summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/ABI/stable/sysfs-driver-dma-idxd6
-rw-r--r--Documentation/ABI/stable/sysfs-driver-firmware-zynqmp103
-rw-r--r--Documentation/ABI/testing/debugfs-driver-habanalabs17
-rw-r--r--Documentation/ABI/testing/sysfs-block-rnbd46
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-dfl_fme104
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x721
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-proximity10
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-sx931010
-rw-r--r--Documentation/ABI/testing/sysfs-bus-most104
-rw-r--r--Documentation/ABI/testing/sysfs-bus-soundwire-master23
-rw-r--r--Documentation/ABI/testing/sysfs-bus-soundwire-slave91
-rw-r--r--Documentation/ABI/testing/sysfs-class-power45
-rw-r--r--Documentation/ABI/testing/sysfs-class-power-mp26298
-rw-r--r--Documentation/ABI/testing/sysfs-class-rnbd-client111
-rw-r--r--Documentation/ABI/testing/sysfs-class-rnbd-server50
-rw-r--r--Documentation/ABI/testing/sysfs-class-rtrs-client131
-rw-r--r--Documentation/ABI/testing/sysfs-class-rtrs-server53
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu40
-rw-r--r--Documentation/ABI/testing/sysfs-driver-habanalabs17
-rw-r--r--Documentation/ABI/testing/sysfs-driver-w1_therm116
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs24
-rw-r--r--Documentation/COPYING-logo2
-rw-r--r--Documentation/PCI/endpoint/pci-endpoint.rst16
-rw-r--r--Documentation/admin-guide/LSM/tomoyo.rst16
-rw-r--r--Documentation/admin-guide/README.rst11
-rw-r--r--Documentation/admin-guide/acpi/initrd_table_override.rst2
-rw-r--r--Documentation/admin-guide/bcache.rst4
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst23
-rw-r--r--Documentation/admin-guide/device-mapper/dm-ebs.rst51
-rw-r--r--Documentation/admin-guide/device-mapper/dm-integrity.rst8
-rw-r--r--Documentation/admin-guide/device-mapper/dm-zoned.rst62
-rw-r--r--Documentation/admin-guide/devices.rst2
-rw-r--r--Documentation/admin-guide/dynamic-debug-howto.rst5
-rw-r--r--Documentation/admin-guide/gpio/gpio-aggregator.rst111
-rw-r--r--Documentation/admin-guide/gpio/index.rst1
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst1
-rw-r--r--Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst149
-rw-r--r--Documentation/admin-guide/initrd.rst2
-rw-r--r--Documentation/admin-guide/kdump/kdump.rst8
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt61
-rw-r--r--Documentation/admin-guide/md.rst2
-rw-r--r--Documentation/admin-guide/mm/numa_memory_policy.rst10
-rw-r--r--Documentation/admin-guide/mm/userfaultfd.rst2
-rw-r--r--Documentation/admin-guide/mono.rst4
-rw-r--r--Documentation/admin-guide/reporting-bugs.rst2
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst37
-rw-r--r--Documentation/admin-guide/sysrq.rst10
-rw-r--r--Documentation/admin-guide/unicode.rst4
-rw-r--r--Documentation/arm/microchip.rst2
-rw-r--r--Documentation/conf.py2
-rw-r--r--Documentation/core-api/pin_user_pages.rst51
-rw-r--r--Documentation/core-api/rbtree.rst4
-rw-r--r--Documentation/dev-tools/coccinelle.rst8
-rw-r--r--Documentation/dev-tools/gdb-kernel-debugging.rst2
-rw-r--r--Documentation/dev-tools/index.rst1
-rw-r--r--Documentation/dev-tools/kcov.rst17
-rw-r--r--Documentation/dev-tools/kcsan.rst321
-rw-r--r--Documentation/dev-tools/kselftest.rst23
-rw-r--r--Documentation/dev-tools/kunit/start.rst13
-rw-r--r--Documentation/dev-tools/kunit/usage.rst4
-rw-r--r--Documentation/devicetree/bindings/ABI.rst42
-rw-r--r--Documentation/devicetree/bindings/ABI.txt39
-rw-r--r--Documentation/devicetree/bindings/Makefile14
-rw-r--r--Documentation/devicetree/bindings/arm/altera.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scmi.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml34
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-at91.yaml7
-rw-r--r--Documentation/devicetree/bindings/arm/bitmain.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/calxeda/hb-sregs.yaml49
-rw-r--r--Documentation/devicetree/bindings/arm/calxeda/l2ecc.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/calxeda/l2ecc.yaml42
-rw-r--r--Documentation/devicetree/bindings/arm/coresight-cti.yaml20
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml86
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml4
-rw-r--r--Documentation/devicetree/bindings/arm/l2c2x0.yaml87
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek.yaml22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt28
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt8
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt27
-rw-r--r--Documentation/devicetree/bindings/arm/nxp/lpc32xx.yaml9
-rw-r--r--Documentation/devicetree/bindings/arm/psci.yaml16
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml7
-rw-r--r--Documentation/devicetree/bindings/arm/realtek.yaml21
-rw-r--r--Documentation/devicetree/bindings/arm/renesas,prr.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/renesas.yaml10
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml5
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml5
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/socionext/uniphier.yaml27
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml6
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/stm32.yaml3
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.yaml5
-rw-r--r--Documentation/devicetree/bindings/arm/syna.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml2
-rw-r--r--Documentation/devicetree/bindings/ata/faraday,ftide010.yaml4
-rw-r--r--Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml1
-rw-r--r--Documentation/devicetree/bindings/ata/sata_highbank.txt44
-rw-r--r--Documentation/devicetree/bindings/ata/sata_highbank.yaml95
-rw-r--r--Documentation/devicetree/bindings/auxdisplay/hit,hd44780.txt45
-rw-r--r--Documentation/devicetree/bindings/auxdisplay/hit,hd44780.yaml96
-rw-r--r--Documentation/devicetree/bindings/bus/allwinner,sun50i-a64-de2.yaml5
-rw-r--r--Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml4
-rw-r--r--Documentation/devicetree/bindings/bus/arm,integrator-ap-lm.yaml83
-rw-r--r--Documentation/devicetree/bindings/bus/baikal,bt1-apb.yaml90
-rw-r--r--Documentation/devicetree/bindings/bus/baikal,bt1-axi.yaml107
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-gates-clk.yaml8
-rw-r--r--Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml188
-rw-r--r--Documentation/devicetree/bindings/clock/baikal,bt1-ccu-pll.yaml131
-rw-r--r--Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/calxeda.txt17
-rw-r--r--Documentation/devicetree/bindings/clock/calxeda.yaml82
-rw-r--r--Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt94
-rw-r--r--Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml78
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml5
-rw-r--r--Documentation/devicetree/bindings/clock/fsl,plldig.yaml19
-rw-r--r--Documentation/devicetree/bindings/clock/idt,versaclock5.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/imx1-clock.txt26
-rw-r--r--Documentation/devicetree/bindings/clock/imx1-clock.yaml51
-rw-r--r--Documentation/devicetree/bindings/clock/imx21-clock.txt27
-rw-r--r--Documentation/devicetree/bindings/clock/imx21-clock.yaml51
-rw-r--r--Documentation/devicetree/bindings/clock/imx23-clock.txt70
-rw-r--r--Documentation/devicetree/bindings/clock/imx23-clock.yaml92
-rw-r--r--Documentation/devicetree/bindings/clock/imx25-clock.txt160
-rw-r--r--Documentation/devicetree/bindings/clock/imx25-clock.yaml186
-rw-r--r--Documentation/devicetree/bindings/clock/imx27-clock.txt27
-rw-r--r--Documentation/devicetree/bindings/clock/imx27-clock.yaml55
-rw-r--r--Documentation/devicetree/bindings/clock/imx28-clock.txt93
-rw-r--r--Documentation/devicetree/bindings/clock/imx28-clock.yaml115
-rw-r--r--Documentation/devicetree/bindings/clock/imx31-clock.txt90
-rw-r--r--Documentation/devicetree/bindings/clock/imx31-clock.yaml120
-rw-r--r--Documentation/devicetree/bindings/clock/imx35-clock.txt114
-rw-r--r--Documentation/devicetree/bindings/clock/imx35-clock.yaml139
-rw-r--r--Documentation/devicetree/bindings/clock/imx5-clock.txt28
-rw-r--r--Documentation/devicetree/bindings/clock/imx5-clock.yaml65
-rw-r--r--Documentation/devicetree/bindings/clock/imx6q-clock.txt41
-rw-r--r--Documentation/devicetree/bindings/clock/imx6q-clock.yaml72
-rw-r--r--Documentation/devicetree/bindings/clock/imx6sl-clock.txt10
-rw-r--r--Documentation/devicetree/bindings/clock/imx6sl-clock.yaml48
-rw-r--r--Documentation/devicetree/bindings/clock/imx6sll-clock.txt36
-rw-r--r--Documentation/devicetree/bindings/clock/imx6sll-clock.yaml66
-rw-r--r--Documentation/devicetree/bindings/clock/imx6sx-clock.txt13
-rw-r--r--Documentation/devicetree/bindings/clock/imx6sx-clock.yaml70
-rw-r--r--Documentation/devicetree/bindings/clock/imx6ul-clock.txt13
-rw-r--r--Documentation/devicetree/bindings/clock/imx6ul-clock.yaml66
-rw-r--r--Documentation/devicetree/bindings/clock/imx7d-clock.txt13
-rw-r--r--Documentation/devicetree/bindings/clock/imx7d-clock.yaml65
-rw-r--r--Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt51
-rw-r--r--Documentation/devicetree/bindings/clock/imx8qxp-lpcg.yaml73
-rw-r--r--Documentation/devicetree/bindings/clock/ingenic,cgu.txt57
-rw-r--r--Documentation/devicetree/bindings/clock/ingenic,cgu.yaml124
-rw-r--r--Documentation/devicetree/bindings/clock/intel,agilex.yaml46
-rw-r--r--Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml44
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,mmp2-audio-clock.yaml75
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml5
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,a53pll.txt22
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,a53pll.yaml40
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.yaml3
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,mmcc.yaml20
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7180-mss.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-div6-clock.yaml60
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt40
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt60
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.yaml82
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt4
-rw-r--r--Documentation/devicetree/bindings/clock/silabs,si5341.txt11
-rw-r--r--Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml30
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.yaml73
-rw-r--r--Documentation/devicetree/bindings/cpufreq/nvidia,tegra20-cpufreq.txt56
-rw-r--r--Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml14
-rw-r--r--Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml16
-rw-r--r--Documentation/devicetree/bindings/crypto/amlogic,gxl-crypto.yaml4
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml9
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml40
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml63
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml28
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml10
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml119
-rw-r--r--Documentation/devicetree/bindings/display/bridge/anx7814.txt42
-rw-r--r--Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml18
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-common.yaml11
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-timing.yaml120
-rw-r--r--Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/renesas,cmm.yaml18
-rw-r--r--Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt73
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml19
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml34
-rw-r--r--Documentation/devicetree/bindings/dma/dma-common.yaml3
-rw-r--r--Documentation/devicetree/bindings/dma/ingenic,dma.yaml80
-rw-r--r--Documentation/devicetree/bindings/dma/jz4780-dma.txt64
-rw-r--r--Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt3
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt117
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml150
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt55
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,usb-dmac.yaml102
-rw-r--r--Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/st,stm32-dma.yaml5
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-udma.yaml29
-rw-r--r--Documentation/devicetree/bindings/dsp/fsl,dsp.yaml2
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml13
-rw-r--r--Documentation/devicetree/bindings/example-schema.yaml17
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-arizona.txt76
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml3
-rw-r--r--Documentation/devicetree/bindings/extcon/wlf,arizona.yaml125
-rw-r--r--Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt2
-rw-r--r--Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt3
-rw-r--r--Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt35
-rw-r--r--Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml68
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mxs.txt88
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mxs.yaml136
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,em-gio.yaml70
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt94
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml144
-rw-r--r--Documentation/devicetree/bindings/gpio/sifive,gpio.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml134
-rw-r--r--Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt65
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml6
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml20
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml6
-rw-r--r--Documentation/devicetree/bindings/gpu/vivante,gc.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/adi,axi-fan-control.yaml3
-rw-r--r--Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml32
-rw-r--r--Documentation/devicetree/bindings/hwmon/adt7475.yaml18
-rw-r--r--Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt26
-rw-r--r--Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.yaml35
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml21
-rw-r--r--Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml58
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-cadence.txt28
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-jz4780.txt33
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml10
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-xiic.txt25
-rw-r--r--Documentation/devicetree/bindings/i2c/ingenic,i2c.yaml88
-rw-r--r--Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt6
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,i2c.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,iic.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml15
-rw-r--r--Documentation/devicetree/bindings/i2c/xlnx,xps-iic-2.00.a.yaml49
-rw-r--r--Documentation/devicetree/bindings/iio/accel/bma180.txt8
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml9
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml65
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml62
-rw-r--r--Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml9
-rw-r--r--Documentation/devicetree/bindings/iio/adc/maxim,max1241.yaml63
-rw-r--r--Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml7
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt37
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml80
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml35
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml27
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/ams,ccs811.yaml53
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/atlas,sensor.yaml8
-rw-r--r--Documentation/devicetree/bindings/iio/common.yaml35
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ltc2632.txt8
-rw-r--r--Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt63
-rw-r--r--Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml110
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/bmg160.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml137
-rw-r--r--Documentation/devicetree/bindings/iio/imu/bmi160.txt37
-rw-r--r--Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml75
-rw-r--r--Documentation/devicetree/bindings/iio/light/amstaos,tsl2563.yaml49
-rw-r--r--Documentation/devicetree/bindings/iio/light/tsl2563.txt19
-rw-r--r--Documentation/devicetree/bindings/iio/light/tsl2772.yaml13
-rw-r--r--Documentation/devicetree/bindings/iio/light/vcnl4000.txt24
-rw-r--r--Documentation/devicetree/bindings/iio/light/vishay,vcnl4000.yaml50
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/vishay,vcnl3020.yaml62
-rw-r--r--Documentation/devicetree/bindings/iio/st-sensors.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml60
-rw-r--r--Documentation/devicetree/bindings/index.rst12
-rw-r--r--Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml9
-rw-r--r--Documentation/devicetree/bindings/input/elants_i2c.txt34
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys-polled.txt45
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.txt58
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.yaml149
-rw-r--r--Documentation/devicetree/bindings/input/input.yaml9
-rw-r--r--Documentation/devicetree/bindings/input/iqs269a.yaml581
-rw-r--r--Documentation/devicetree/bindings/input/iqs62x-keys.yaml7
-rw-r--r--Documentation/devicetree/bindings/input/msm-vibrator.txt36
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml72
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml30
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml69
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/mms114.txt3
-rw-r--r--Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml101
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8916.yaml4
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml4
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml4
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sc7180.yaml6
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sdm845.yaml4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml12
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml39
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml9
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt35
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml89
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt28
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml63
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml8
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml8
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.txt62
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.yaml107
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml12
-rw-r--r--Documentation/devicetree/bindings/iommu/allwinner,sun50i-h6-iommu.yaml61
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml8
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt73
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml98
-rw-r--r--Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml10
-rw-r--r--Documentation/devicetree/bindings/ipmi/ipmi-smic.txt25
-rw-r--r--Documentation/devicetree/bindings/ipmi/ipmi-smic.yaml63
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/qcom-wled.txt154
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/qcom-wled.yaml261
-rw-r--r--Documentation/devicetree/bindings/leds/common.yaml13
-rw-r--r--Documentation/devicetree/bindings/leds/leds-aw2013.yaml91
-rw-r--r--Documentation/devicetree/bindings/leds/leds-gpio.yaml3
-rw-r--r--Documentation/devicetree/bindings/leds/leds-sgm3140.yaml62
-rw-r--r--Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml9
-rw-r--r--Documentation/devicetree/bindings/mailbox/fsl,mu.txt58
-rw-r--r--Documentation/devicetree/bindings/mailbox/fsl,mu.yaml91
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt88
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml86
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml80
-rw-r--r--Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml60
-rw-r--r--Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml7
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml28
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml20
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml5
-rw-r--r--Documentation/devicetree/bindings/media/i2c/imx219.yaml3
-rw-r--r--Documentation/devicetree/bindings/media/marvell,mmp2-ccic.txt50
-rw-r--r--Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml99
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm845-venus.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/rc.yaml265
-rw-r--r--Documentation/devicetree/bindings/media/renesas,ceu.yaml39
-rw-r--r--Documentation/devicetree/bindings/media/renesas,csi2.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/renesas,vin.yaml21
-rw-r--r--Documentation/devicetree/bindings/media/ti,vpe.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/baikal,bt1-l2-ctl.yaml63
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.txt16
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.yaml42
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml13
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml6
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt76
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml126
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml13
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-mc.yaml5
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra210-emc.yaml82
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-emc.yaml9
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-mc.yaml3
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/renesas,dbsc.txt44
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/renesas,dbsc.yaml56
-rw-r--r--Documentation/devicetree/bindings/mfd/allwinner,sun4i-a10-ts.yaml20
-rw-r--r--Documentation/devicetree/bindings/mfd/arizona.txt101
-rw-r--r--Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt85
-rw-r--r--Documentation/devicetree/bindings/mfd/cirrus,lochnagar.yaml352
-rw-r--r--Documentation/devicetree/bindings/mfd/cirrus,madera.yaml299
-rw-r--r--Documentation/devicetree/bindings/mfd/madera.txt114
-rw-r--r--Documentation/devicetree/bindings/mfd/mps,mp2629.yaml62
-rw-r--r--Documentation/devicetree/bindings/mfd/mt6397.txt19
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml37
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stpmic1.yaml9
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.yaml17
-rw-r--r--Documentation/devicetree/bindings/mfd/wlf,arizona.yaml280
-rw-r--r--Documentation/devicetree/bindings/mips/ingenic/devices.yaml4
-rw-r--r--Documentation/devicetree/bindings/mmc/aspeed,sdhci.yaml4
-rw-r--r--Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml79
-rw-r--r--Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml79
-rw-r--r--Documentation/devicetree/bindings/mmc/jz4740.txt41
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-controller.yaml37
-rw-r--r--Documentation/devicetree/bindings/mmc/owl-mmc.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml24
-rw-r--r--Documentation/devicetree/bindings/mmc/socionext,uniphier-sd.yaml14
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc-common.yaml14
-rw-r--r--Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml13
-rw-r--r--Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml63
-rw-r--r--Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/denali,nand.yaml4
-rw-r--r--Documentation/devicetree/bindings/mtd/ingenic,jz4780-nand.txt92
-rw-r--r--Documentation/devicetree/bindings/mtd/ingenic,nand.yaml132
-rw-r--r--Documentation/devicetree/bindings/mtd/nand-controller.yaml27
-rw-r--r--Documentation/devicetree/bindings/mtd/partition.txt3
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/calxeda-xgmac.txt18
-rw-r--r--Documentation/devicetree/bindings/net/calxeda-xgmac.yaml49
-rw-r--r--Documentation/devicetree/bindings/net/can/bosch,m_can.yaml111
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml34
-rw-r--r--Documentation/devicetree/bindings/net/qca,ar803x.yaml17
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml12
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ether.yaml9
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt1
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml30
-rw-r--r--Documentation/devicetree/bindings/net/stm32-dwmac.txt44
-rw-r--r--Documentation/devicetree/bindings/net/stm32-dwmac.yaml148
-rw-r--r--Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml18
-rw-r--r--Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml34
-rw-r--r--Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml98
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-iim.txt22
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-iim.yaml57
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt50
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml95
-rw-r--r--Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt24
-rw-r--r--Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml50
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.yaml2
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt54
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip-efuse.yaml70
-rw-r--r--Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml17
-rw-r--r--Documentation/devicetree/bindings/opp/opp.txt17
-rw-r--r--Documentation/devicetree/bindings/pci/aardvark-pci.txt4
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/cdns-pcie-ep.yaml25
-rw-r--r--Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml13
-rw-r--r--Documentation/devicetree/bindings/pci/cdns-pcie.yaml8
-rw-r--r--Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml7
-rw-r--r--Documentation/devicetree/bindings/pci/pci-ep.yaml9
-rw-r--r--Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci-ep.yaml77
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml92
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,meson-axg-pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,meson8b-usb2-phy.yaml64
-rw-r--r--Documentation/devicetree/bindings/phy/calxeda-combophy.txt17
-rw-r--r--Documentation/devicetree/bindings/phy/calxeda-combophy.yaml51
-rw-r--r--Documentation/devicetree/bindings/phy/cdns,salvo-phy.yaml52
-rw-r--r--Documentation/devicetree/bindings/phy/intel,combo-phy.yaml101
-rw-r--r--Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/meson-gxl-usb3-phy.txt31
-rw-r--r--Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt28
-rw-r--r--Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml59
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml313
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml136
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml65
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml80
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt242
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-usb-ipq4019-phy.yaml50
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt3
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt70
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt52
-rw-r--r--Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml117
-rw-r--r--Documentation/devicetree/bindings/phy/renesas,usb3-phy.yaml79
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml77
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb2-phy.yaml85
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml103
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml96
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt36
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt45
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt69
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt58
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml12
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml37
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml46
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml108
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt141
-rw-r--r--Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.yaml190
-rw-r--r--Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt99
-rw-r--r--Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml122
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml31
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml31
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml31
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml31
-rw-r--r--Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml147
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml57
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml105
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpc.txt91
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpc.yaml124
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt77
-rw-r--r--Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml108
-rw-r--r--Documentation/devicetree/bindings/power/qcom,rpmpd.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/renesas,apmu.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt35
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml55
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml15
-rw-r--r--Documentation/devicetree/bindings/power/supply/battery.txt6
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq27xxx.txt56
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq27xxx.yaml91
-rw-r--r--Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml82
-rw-r--r--Documentation/devicetree/bindings/power/supply/power-supply.yaml40
-rw-r--r--Documentation/devicetree/bindings/power/supply/power_supply.txt25
-rw-r--r--Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml155
-rw-r--r--Documentation/devicetree/bindings/power/supply/sbs,sbs-battery.yaml83
-rw-r--r--Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt27
-rw-r--r--Documentation/devicetree/bindings/property-units.txt4
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-pwm.txt27
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-pwm.yaml66
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt22
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-tpm-pwm.yaml55
-rw-r--r--Documentation/devicetree/bindings/pwm/mxs-pwm.txt17
-rw-r--r--Documentation/devicetree/bindings/pwm/mxs-pwm.yaml43
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-samsung.yaml27
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/arizona-regulator.txt18
-rw-r--r--Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt82
-rw-r--r--Documentation/devicetree/bindings/regulator/gpio-regulator.yaml35
-rw-r--r--Documentation/devicetree/bindings/regulator/mps,mp5416.yaml6
-rw-r--r--Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml28
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.yaml5
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd71828-regulator.yaml34
-rw-r--r--Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.yaml27
-rw-r--r--Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml3
-rw-r--r--Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml3
-rw-r--r--Documentation/devicetree/bindings/regulator/wlf,arizona.yaml37
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml77
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt12
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt25
-rw-r--r--Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml11
-rw-r--r--Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt2
-rw-r--r--Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml4
-rw-r--r--Documentation/devicetree/bindings/reset/fsl,imx7-src.txt6
-rw-r--r--Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml3
-rw-r--r--Documentation/devicetree/bindings/reset/renesas,rst.yaml1
-rw-r--r--Documentation/devicetree/bindings/riscv/cpus.yaml20
-rw-r--r--Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml5
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-mxc.txt26
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-mxc.yaml57
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-mxc_v2.txt17
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-mxc_v2.yaml46
-rw-r--r--Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml47
-rw-r--r--Documentation/devicetree/bindings/serial/8250.txt100
-rw-r--r--Documentation/devicetree/bindings/serial/8250.yaml233
-rw-r--r--Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml16
-rw-r--r--Documentation/devicetree/bindings/serial/ingenic,uart.txt28
-rw-r--r--Documentation/devicetree/bindings/serial/ingenic,uart.yaml94
-rw-r--r--Documentation/devicetree/bindings/serial/mrvl-serial.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/pl011.yaml10
-rw-r--r--Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt31
-rw-r--r--Documentation/devicetree/bindings/serial/qca,ar9330-uart.yaml50
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,em-uart.yaml49
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,hscif.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scifa.yaml15
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scifb.yaml1
-rw-r--r--Documentation/devicetree/bindings/serial/rs485.yaml47
-rw-r--r--Documentation/devicetree/bindings/serial/samsung_uart.yaml13
-rw-r--r--Documentation/devicetree/bindings/serial/serial.yaml8
-rw-r--r--Documentation/devicetree/bindings/serial/sifive-serial.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/st,stm32-uart.yaml14
-rw-r--r--Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml10
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt20
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt94
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml225
-rw-r--r--Documentation/devicetree/bindings/soc/ti/k3-socinfo.yaml40
-rw-r--r--Documentation/devicetree/bindings/sound/adi,adau7118.yaml20
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml51
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,aiu.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,t9015.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt39
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,lochnagar.yaml52
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,madera.yaml113
-rw-r--r--Documentation/devicetree/bindings/sound/da7213.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,asrc.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,easrc.yaml101
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,esai.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/madera.txt67
-rw-r--r--Documentation/devicetree/bindings/sound/marvell,mmp-sspa.yaml122
-rw-r--r--Documentation/devicetree/bindings/sound/nau8810.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/nau8825.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,lpass-cpu.txt25
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6adm.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6afe.txt46
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6asm.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6core.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,fsi.yaml41
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-i2s.yaml18
-rw-r--r--Documentation/devicetree/bindings/sound/rt1016.txt17
-rw-r--r--[-rwxr-xr-x]Documentation/devicetree/bindings/sound/rt1308.txt0
-rw-r--r--Documentation/devicetree/bindings/sound/simple-card.txt351
-rw-r--r--Documentation/devicetree/bindings/sound/simple-card.yaml484
-rw-r--r--Documentation/devicetree/bindings/sound/tdm-slot.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320adcx140.yaml59
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,arizona.txt53
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,arizona.yaml114
-rw-r--r--Documentation/devicetree/bindings/sound/wm8994.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/zl38060.yaml69
-rw-r--r--Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml58
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml10
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,hspi.yaml4
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml44
-rw-r--r--Documentation/devicetree/bindings/spi/spi-controller.yaml14
-rw-r--r--Documentation/devicetree/bindings/spi/spi-pl022.yaml57
-rw-r--r--Documentation/devicetree/bindings/spi/spi-pxa2xx.txt27
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sifive.yaml25
-rw-r--r--Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml4
-rw-r--r--Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml64
-rw-r--r--Documentation/devicetree/bindings/sram/rockchip-pmu-sram.txt16
-rw-r--r--Documentation/devicetree/bindings/sram/sram.yaml28
-rw-r--r--Documentation/devicetree/bindings/submitting-patches.rst91
-rw-r--r--Documentation/devicetree/bindings/submitting-patches.txt85
-rw-r--r--Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml10
-rw-r--r--Documentation/devicetree/bindings/thermal/imx-thermal.txt61
-rw-r--r--Documentation/devicetree/bindings/thermal/imx-thermal.yaml102
-rw-r--r--Documentation/devicetree/bindings/thermal/imx8mm-thermal.txt15
-rw-r--r--Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml58
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.yaml7
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt60
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml99
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-thermal.yaml7
-rw-r--r--Documentation/devicetree/bindings/thermal/socionext,uniphier-thermal.yaml59
-rw-r--r--Documentation/devicetree/bindings/thermal/sprd-thermal.yaml2
-rw-r--r--Documentation/devicetree/bindings/thermal/uniphier-thermal.txt65
-rw-r--r--Documentation/devicetree/bindings/timer/arm,arch_timer.yaml10
-rw-r--r--Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml11
-rw-r--r--Documentation/devicetree/bindings/timer/cadence,ttc-timer.txt21
-rw-r--r--Documentation/devicetree/bindings/timer/cdns,ttc.yaml48
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,imxgpt.txt45
-rw-r--r--Documentation/devicetree/bindings/timer/fsl,imxgpt.yaml72
-rw-r--r--Documentation/devicetree/bindings/timer/ingenic,tcu.txt138
-rw-r--r--Documentation/devicetree/bindings/timer/ingenic,tcu.yaml280
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,sysctr-timer.txt25
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml54
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt28
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,tpm-timer.yaml61
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,cmt.txt110
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,cmt.yaml182
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,mtu2.txt42
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,mtu2.yaml76
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,ostm.txt31
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,ostm.yaml59
-rw-r--r--Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml65
-rw-r--r--Documentation/devicetree/bindings/usb/amlogic,dwc3.txt42
-rw-r--r--Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml76
-rw-r--r--Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml82
-rw-r--r--Documentation/devicetree/bindings/usb/atmel-usb.txt56
-rw-r--r--Documentation/devicetree/bindings/usb/brcm,bcm7445-ehci.yaml59
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.yaml11
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/ehci-mv.txt23
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ehci.yaml27
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ohci.yaml6
-rw-r--r--Documentation/devicetree/bindings/usb/ingenic,musb.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/keystone-usb.txt56
-rw-r--r--Documentation/devicetree/bindings/usb/marvell,pxau2o-ehci.yaml62
-rw-r--r--Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml8
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.txt104
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml174
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/renesas,usbhs.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml54
-rw-r--r--Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml77
-rw-r--r--Documentation/devicetree/bindings/usb/ti,tps6598x.yaml64
-rw-r--r--Documentation/devicetree/bindings/usb/usb-conn-gpio.txt30
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt2
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml30
-rw-r--r--Documentation/devicetree/bindings/watchdog/arm-smc-wdt.yaml37
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt24
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.yaml54
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.txt22
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml60
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.txt50
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml101
-rw-r--r--Documentation/devicetree/bindings/watchdog/socionext,uniphier-wdt.yaml36
-rw-r--r--Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/uniphier-wdt.txt20
-rw-r--r--Documentation/devicetree/bindings/writing-bindings.rst67
-rw-r--r--Documentation/devicetree/bindings/writing-bindings.txt60
-rw-r--r--Documentation/devicetree/bindings/xilinx.txt143
-rw-r--r--Documentation/devicetree/changesets.rst37
-rw-r--r--Documentation/devicetree/changesets.txt31
-rw-r--r--Documentation/devicetree/dynamic-resolution-notes.rst27
-rw-r--r--Documentation/devicetree/dynamic-resolution-notes.txt24
-rw-r--r--Documentation/devicetree/index.rst17
-rw-r--r--Documentation/devicetree/of_unittest.rst205
-rw-r--r--Documentation/devicetree/of_unittest.txt197
-rw-r--r--Documentation/devicetree/overlay-notes.rst128
-rw-r--r--Documentation/devicetree/overlay-notes.txt139
-rw-r--r--Documentation/devicetree/usage-model.rst420
-rw-r--r--Documentation/devicetree/usage-model.txt415
-rw-r--r--Documentation/doc-guide/parse-headers.rst2
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/driver-api/acpi/linuxized-acpica.rst6
-rw-r--r--Documentation/driver-api/driver-model/devres.rst9
-rw-r--r--Documentation/driver-api/driver-model/driver.rst32
-rw-r--r--Documentation/driver-api/gpio/board.rst15
-rw-r--r--Documentation/driver-api/iio/triggers.rst2
-rw-r--r--Documentation/driver-api/infiniband.rst3
-rw-r--r--Documentation/driver-api/mtdnand.rst6
-rw-r--r--Documentation/driver-api/soundwire/stream.rst89
-rw-r--r--Documentation/driver-api/soundwire/summary.rst7
-rw-r--r--Documentation/driver-api/usb/bulk-streams.rst4
-rw-r--r--Documentation/driver-api/usb/writing_musb_glue_layer.rst6
-rw-r--r--Documentation/features/debug/debug-vm-pgtable/arch-support.txt34
-rw-r--r--Documentation/filesystems/f2fs.rst2
-rw-r--r--Documentation/filesystems/fiemap.rst12
-rw-r--r--Documentation/filesystems/gfs2-glocks.rst253
-rw-r--r--Documentation/filesystems/gfs2-glocks.txt232
-rw-r--r--Documentation/filesystems/index.rst1
-rw-r--r--Documentation/filesystems/locking.rst4
-rw-r--r--Documentation/filesystems/overlayfs.rst7
-rw-r--r--Documentation/filesystems/path-lookup.txt2
-rw-r--r--Documentation/filesystems/porting.rst7
-rw-r--r--Documentation/filesystems/proc.rst92
-rw-r--r--Documentation/filesystems/seq_file.rst4
-rw-r--r--Documentation/filesystems/virtiofs.rst14
-rw-r--r--Documentation/firmware-guide/acpi/intel-pmc-mux.rst153
-rw-r--r--Documentation/fpga/dfl.rst84
-rw-r--r--Documentation/index.rst3
-rw-r--r--Documentation/infiniband/core_locking.rst2
-rw-r--r--Documentation/kbuild/makefiles.rst187
-rw-r--r--Documentation/kbuild/modules.rst12
-rw-r--r--Documentation/livepatch/module-elf-format.rst15
-rw-r--r--Documentation/lzo.txt8
-rw-r--r--Documentation/misc-devices/c2port.txt6
-rw-r--r--Documentation/powerpc/bootwrapper.rst28
-rw-r--r--Documentation/powerpc/index.rst1
-rw-r--r--Documentation/powerpc/transactional_memory.rst27
-rw-r--r--Documentation/powerpc/vas-api.rst292
-rw-r--r--Documentation/process/3.Early-stage.rst4
-rw-r--r--Documentation/process/7.AdvancedTopics.rst8
-rw-r--r--Documentation/process/8.Conclusion.rst14
-rw-r--r--Documentation/process/adding-syscalls.rst4
-rw-r--r--Documentation/process/applying-patches.rst4
-rw-r--r--Documentation/process/changes.rst2
-rw-r--r--Documentation/process/coding-style.rst2
-rw-r--r--Documentation/process/submitting-patches.rst2
-rw-r--r--Documentation/process/volatile-considered-harmful.rst4
-rw-r--r--Documentation/s390/index.rst1
-rw-r--r--Documentation/s390/pci.rst125
-rw-r--r--Documentation/s390/vfio-ccw.rst100
-rw-r--r--Documentation/s390/zfcpdump.rst4
-rw-r--r--Documentation/security/SCTP.rst2
-rw-r--r--Documentation/security/keys/core.rst8
-rw-r--r--Documentation/sphinx/kfigure.py6
-rw-r--r--Documentation/static-keys.txt2
-rw-r--r--Documentation/trace/coresight/coresight-ect.rst5
-rw-r--r--Documentation/trace/coresight/coresight.rst85
-rw-r--r--Documentation/trace/events-msr.rst2
-rw-r--r--Documentation/trace/ftrace.rst2
-rw-r--r--Documentation/trace/histogram-design.rst2115
-rw-r--r--Documentation/trace/mmiotrace.rst2
-rw-r--r--Documentation/translations/it_IT/kernel-hacking/hacking.rst4
-rw-r--r--Documentation/translations/it_IT/process/email-clients.rst2
-rw-r--r--Documentation/translations/it_IT/process/management-style.rst2
-rw-r--r--Documentation/translations/it_IT/process/submitting-patches.rst2
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt15
-rw-r--r--Documentation/translations/zh_CN/filesystems/debugfs.rst2
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst1
-rw-r--r--Documentation/vm/hmm.rst6
-rw-r--r--Documentation/vm/ksm.rst2
-rw-r--r--Documentation/vm/transhuge.rst4
-rw-r--r--Documentation/w1/slaves/w1_therm.rst50
-rw-r--r--Documentation/xz.txt6
-rw-r--r--Kconfig2
-rw-r--r--MAINTAINERS213
-rw-r--r--Makefile220
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/boot/bootp.c1
-rw-r--r--arch/alpha/boot/bootpz.c1
-rw-r--r--arch/alpha/boot/main.c1
-rw-r--r--arch/alpha/include/asm/cacheflush.h32
-rw-r--r--arch/alpha/include/asm/io.h1
-rw-r--r--arch/alpha/include/asm/pgtable.h16
-rw-r--r--arch/alpha/kernel/binfmt_loader.c11
-rw-r--r--arch/alpha/kernel/process.c1
-rw-r--r--arch/alpha/kernel/proto.h2
-rw-r--r--arch/alpha/kernel/ptrace.c1
-rw-r--r--arch/alpha/kernel/setup.c19
-rw-r--r--arch/alpha/kernel/smp.c3
-rw-r--r--arch/alpha/kernel/sys_alcor.c1
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c1
-rw-r--r--arch/alpha/kernel/sys_dp264.c1
-rw-r--r--arch/alpha/kernel/sys_eb64p.c1
-rw-r--r--arch/alpha/kernel/sys_eiger.c1
-rw-r--r--arch/alpha/kernel/sys_jensen.c1
-rw-r--r--arch/alpha/kernel/sys_marvel.c1
-rw-r--r--arch/alpha/kernel/sys_miata.c1
-rw-r--r--arch/alpha/kernel/sys_mikasa.c1
-rw-r--r--arch/alpha/kernel/sys_nautilus.c1
-rw-r--r--arch/alpha/kernel/sys_noritake.c1
-rw-r--r--arch/alpha/kernel/sys_rawhide.c1
-rw-r--r--arch/alpha/kernel/sys_ruffian.c1
-rw-r--r--arch/alpha/kernel/sys_rx164.c1
-rw-r--r--arch/alpha/kernel/sys_sable.c1
-rw-r--r--arch/alpha/kernel/sys_sio.c1
-rw-r--r--arch/alpha/kernel/sys_sx164.c1
-rw-r--r--arch/alpha/kernel/sys_takara.c1
-rw-r--r--arch/alpha/kernel/sys_titan.c1
-rw-r--r--arch/alpha/kernel/sys_wildfire.c1
-rw-r--r--arch/alpha/kernel/traps.c26
-rw-r--r--arch/alpha/mm/fault.c12
-rw-r--r--arch/alpha/mm/init.c1
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/asm/bug.h3
-rw-r--r--arch/arc/include/asm/highmem.h18
-rw-r--r--arch/arc/include/asm/pgtable.h24
-rw-r--r--arch/arc/kernel/process.c4
-rw-r--r--arch/arc/kernel/stacktrace.c17
-rw-r--r--arch/arc/kernel/troubleshoot.c6
-rw-r--r--arch/arc/mm/fault.c6
-rw-r--r--arch/arc/mm/highmem.c40
-rw-r--r--arch/arc/mm/tlbex.S2
-rw-r--r--arch/arm/Kconfig9
-rw-r--r--arch/arm/Kconfig.debug10
-rw-r--r--arch/arm/Makefile5
-rwxr-xr-xarch/arm/boot/deflate_xip_data.sh2
-rw-r--r--arch/arm/boot/dts/Makefile19
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi6
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi20
-rw-r--r--arch/arm/boot/dts/am3517.dtsi24
-rw-r--r--arch/arm/boot/dts/am4372.dtsi20
-rw-r--r--arch/arm/boot/dts/am437x-l4.dtsi7
-rw-r--r--arch/arm/boot/dts/am571x-idk.dts48
-rw-r--r--arch/arm/boot/dts/am5729-beagleboneai.dts731
-rw-r--r--arch/arm/boot/dts/am572x-idk-common.dtsi63
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi63
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi58
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-375.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-39x.dtsi4
-rw-r--r--arch/arm/boot/dts/aspeed-ast2600-evb.dts4
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts78
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-facebook-yosemitev2.dts231
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts202
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts310
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-nicole.dts326
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts35
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts112
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts34
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts37
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi10
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi43
-rw-r--r--arch/arm/boot/dts/aspeed-g6.dtsi64
-rw-r--r--arch/arm/boot/dts/at91-dvk_su60_somc.dtsi2
-rw-r--r--arch/arm/boot/dts/at91-kizbox3-hs.dts4
-rw-r--r--arch/arm/boot/dts/at91-kizbox3_common.dtsi48
-rw-r--r--arch/arm/boot/dts/at91-sam9x60ek.dts23
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1.dtsi54
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1_ek.dts64
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi16
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts12
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_icp.dts767
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts25
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts118
-rw-r--r--arch/arm/boot/dts/at91-wb50n.dtsi4
-rw-r--r--arch/arm/boot/dts/at91rm9200.dtsi296
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi392
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi324
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9rl.dtsi54
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi54
-rw-r--r--arch/arm/boot/dts/bcm2711-rpi-4-b.dts13
-rw-r--r--arch/arm/boot/dts/bcm2835-common.dtsi1
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-common.dtsi12
-rw-r--r--arch/arm/boot/dts/bcm2835.dtsi1
-rw-r--r--arch/arm/boot/dts/bcm2836.dtsi1
-rw-r--r--arch/arm/boot/dts/bcm2837.dtsi1
-rw-r--r--arch/arm/boot/dts/berlin2.dtsi6
-rw-r--r--arch/arm/boot/dts/berlin2cd.dtsi2
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi6
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi74
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi78
-rw-r--r--arch/arm/boot/dts/dove.dtsi3
-rw-r--r--arch/arm/boot/dts/dra7-evm-common.dtsi1
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts54
-rw-r--r--arch/arm/boot/dts/dra7-ipu-dsp-common.dtsi39
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi57
-rw-r--r--arch/arm/boot/dts/dra7.dtsi46
-rw-r--r--arch/arm/boot/dts/dra71-evm.dts42
-rw-r--r--arch/arm/boot/dts/dra72-evm-common.dtsi18
-rw-r--r--arch/arm/boot/dts/dra72-evm-revc.dts42
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts42
-rw-r--r--arch/arm/boot/dts/dra72x.dtsi6
-rw-r--r--arch/arm/boot/dts/dra74-ipu-dsp-common.dtsi18
-rw-r--r--arch/arm/boot/dts/dra74x.dtsi21
-rw-r--r--arch/arm/boot/dts/dra76-evm.dts54
-rw-r--r--arch/arm/boot/dts/e60k02.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos3250-monk.dts3
-rw-r--r--arch/arm/boot/dts/exynos3250-rinato.dts48
-rw-r--r--arch/arm/boot/dts/exynos4210-i9100.dts768
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts7
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts41
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts33
-rw-r--r--arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi6
-rw-r--r--arch/arm/boot/dts/exynos4412-midas.dtsi17
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi8
-rw-r--r--arch/arm/boot/dts/exynos4412-origen.dts14
-rw-r--r--arch/arm/boot/dts/exynos5250-arndale.dts13
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts2
-rw-r--r--arch/arm/boot/dts/imx50.dtsi8
-rw-r--r--arch/arm/boot/dts/imx51.dtsi3
-rw-r--r--arch/arm/boot/dts/imx53-cx9020.dts25
-rw-r--r--arch/arm/boot/dts/imx53.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6dl-colibri-v1_1-eval-v3.dts31
-rw-r--r--arch/arm/boot/dts/imx6q-dhcom-pdk2.dts115
-rw-r--r--arch/arm/boot/dts/imx6qdl-colibri-v1_1-uhs.dtsi44
-rw-r--r--arch/arm/boot/dts/imx6qdl-colibri.dtsi11
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw552x.dtsi14
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw560x.dtsi31
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5904.dtsi31
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5910.dtsi35
-rw-r--r--arch/arm/boot/dts/imx6qdl-sr-som.dtsi11
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi13
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi13
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7-tqma7.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7d-cl-som-imx7.dts4
-rw-r--r--arch/arm/boot/dts/imx7d-colibri.dtsi4
-rw-r--r--arch/arm/boot/dts/imx7d-nitrogen7.dts4
-rw-r--r--arch/arm/boot/dts/imx7d-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts4
-rw-r--r--arch/arm/boot/dts/imx7d-tqma7.dtsi4
-rw-r--r--arch/arm/boot/dts/imx7d-zii-rmu2.dts2
-rw-r--r--arch/arm/boot/dts/imx7d-zii-rpu2.dts2
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi2
-rw-r--r--arch/arm/boot/dts/integratorap-im-pd1.dts270
-rw-r--r--arch/arm/boot/dts/integratorap.dts53
-rw-r--r--arch/arm/boot/dts/keystone-k2e.dtsi4
-rw-r--r--arch/arm/boot/dts/keystone-k2g-evm.dts101
-rw-r--r--arch/arm/boot/dts/keystone-k2g.dtsi26
-rw-r--r--arch/arm/boot/dts/keystone-k2hk.dtsi4
-rw-r--r--arch/arm/boot/dts/keystone-k2l.dtsi4
-rw-r--r--arch/arm/boot/dts/kirkwood-l-50.dts438
-rw-r--r--arch/arm/boot/dts/kirkwood-netgear_readynas_nv+_v2.dts14
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi2
-rw-r--r--arch/arm/boot/dts/ls1021a-twr.dts14
-rw-r--r--arch/arm/boot/dts/meson.dtsi3
-rw-r--r--arch/arm/boot/dts/meson8b-odroidc1.dts3
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi5
-rw-r--r--arch/arm/boot/dts/meson8m2-mxiii-plus.dts4
-rw-r--r--arch/arm/boot/dts/meson8m2.dtsi13
-rw-r--r--arch/arm/boot/dts/mmp2.dtsi2
-rw-r--r--arch/arm/boot/dts/mmp3.dtsi26
-rw-r--r--arch/arm/boot/dts/mt2701-evb.dts21
-rw-r--r--arch/arm/boot/dts/mt2701.dtsi33
-rw-r--r--arch/arm/boot/dts/mt7623.dtsi25
-rw-r--r--arch/arm/boot/dts/mt7623n-rfb-emmc.dts1
-rw-r--r--arch/arm/boot/dts/omap2.dtsi31
-rw-r--r--arch/arm/boot/dts/omap2420.dtsi68
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi68
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts33
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000.dts33
-rw-r--r--arch/arm/boot/dts/omap3.dtsi134
-rw-r--r--arch/arm/boot/dts/omap4-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/omap4.dtsi10
-rw-r--r--arch/arm/boot/dts/omap5-l4.dtsi35
-rw-r--r--arch/arm/boot/dts/omap5.dtsi96
-rw-r--r--arch/arm/boot/dts/pxa168.dtsi8
-rw-r--r--arch/arm/boot/dts/pxa3xx.dtsi2
-rw-r--r--arch/arm/boot/dts/pxa910.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts405
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi11
-rw-r--r--arch/arm/boot/dts/r8a7740.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7742-iwg21d-q7.dts37
-rw-r--r--arch/arm/boot/dts/r8a7742-iwg21m.dtsi53
-rw-r--r--arch/arm/boot/dts/r8a7742.dtsi648
-rw-r--r--arch/arm/boot/dts/r8a7743.dtsi12
-rw-r--r--arch/arm/boot/dts/r8a7744.dtsi12
-rw-r--r--arch/arm/boot/dts/r8a7745.dtsi12
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi12
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi95
-rw-r--r--arch/arm/boot/dts/r8a7793.dtsi14
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi12
-rw-r--r--arch/arm/boot/dts/rk3036-kylin.dts2
-rw-r--r--arch/arm/boot/dts/rk3066a-mk808.dts2
-rw-r--r--arch/arm/boot/dts/rk3188-radxarock.dts6
-rw-r--r--arch/arm/boot/dts/rk3229-xms6.dts19
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi10
-rw-r--r--arch/arm/boot/dts/rk3288-firefly-reload.dts12
-rw-r--r--arch/arm/boot/dts/rk3288-firefly.dtsi12
-rw-r--r--arch/arm/boot/dts/rk3288-miqi.dts2
-rw-r--r--arch/arm/boot/dts/rk3288-phycore-som.dtsi6
-rw-r--r--arch/arm/boot/dts/rk3288-rock2-square.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-tinker.dtsi6
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi1
-rw-r--r--arch/arm/boot/dts/rtd1195-horseradish.dts32
-rw-r--r--arch/arm/boot/dts/rtd1195-mele-x1000.dts32
-rw-r--r--arch/arm/boot/dts/rtd1195.dtsi217
-rw-r--r--arch/arm/boot/dts/s5pv210-aries.dtsi359
-rw-r--r--arch/arm/boot/dts/s5pv210-fascinate4g.dts249
-rw-r--r--arch/arm/boot/dts/s5pv210-galaxys.dts292
-rw-r--r--arch/arm/boot/dts/s5pv210-pinctrl.dtsi9
-rw-r--r--arch/arm/boot/dts/s5pv210.dtsi23
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi415
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi537
-rw-r--r--arch/arm/boot/dts/sama5d3_can.dtsi20
-rw-r--r--arch/arm/boot/dts/sama5d3_emac.dtsi8
-rw-r--r--arch/arm/boot/dts/sama5d3_gmac.dtsi11
-rw-r--r--arch/arm/boot/dts/sama5d3_lcd.dtsi19
-rw-r--r--arch/arm/boot/dts/sama5d3_mci2.dtsi11
-rw-r--r--arch/arm/boot/dts/sama5d3_tcb1.dtsi12
-rw-r--r--arch/arm/boot/dts/sama5d3_uart.dtsi20
-rw-r--r--arch/arm/boot/dts/sama5d3xmb.dtsi6
-rw-r--r--arch/arm/boot/dts/sama5d3xmb_cmp.dtsi6
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi126
-rw-r--r--arch/arm/boot/dts/sh73a0.dtsi2
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi16
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-golden.dts65
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-skomer.dts39
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi14
-rw-r--r--arch/arm/boot/dts/stih418.dtsi8
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi4
-rw-r--r--arch/arm/boot/dts/stm32h743.dtsi4
-rw-r--r--arch/arm/boot/dts/stm32mp15-pinctrl.dtsi666
-rw-r--r--arch/arm/boot/dts/stm32mp151.dtsi37
-rw-r--r--arch/arm/boot/dts/stm32mp157.dtsi8
-rw-r--r--arch/arm/boot/dts/stm32mp157a-avenger96.dts314
-rw-r--r--arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts38
-rw-r--r--arch/arm/boot/dts/stm32mp157a-iot-box.dts68
-rw-r--r--arch/arm/boot/dts/stm32mp157a-stinger96.dts12
-rw-r--r--arch/arm/boot/dts/stm32mp157a-stinger96.dtsi342
-rw-r--r--arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts265
-rw-r--r--arch/arm/boot/dts/stm32mp157c-dhcom-som.dtsi368
-rw-r--r--arch/arm/boot/dts/stm32mp157c-dk2.dts8
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ed1.dts7
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ev1.dts14
-rw-r--r--arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts252
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi337
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi361
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi401
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcor-io1v8.dtsi23
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi209
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dkx.dtsi20
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-osd32.dtsi230
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime-emmc.dts32
-rw-r--r--arch/arm/boot/dts/sun8i-a83t.dtsi10
-rw-r--r--arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-h3.dtsi24
-rw-r--r--arch/arm/boot/dts/sunxi-h3-h5.dtsi10
-rw-r--r--arch/arm/boot/dts/tegra114-dalmore.dts3
-rw-r--r--arch/arm/boot/dts/tegra124-venice2.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-colibri-eval-v3.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-colibri-iris.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-harmony.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-medcom-wide.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-paz00.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-ventana.dts2
-rw-r--r--arch/arm/boot/dts/tegra30-apalis-eval.dts2
-rw-r--r--arch/arm/boot/dts/tegra30-apalis-v1.1-eval.dts2
-rw-r--r--arch/arm/boot/dts/tegra30-beaver.dts40
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30-colibri-eval-v3.dts2
-rw-r--r--arch/arm/boot/dts/uniphier-ld4.dtsi2
-rw-r--r--arch/arm/boot/dts/uniphier-ld6b-ref.dts1
-rw-r--r--arch/arm/boot/dts/uniphier-pro4-ace.dts1
-rw-r--r--arch/arm/boot/dts/uniphier-pro4-ref.dts1
-rw-r--r--arch/arm/boot/dts/uniphier-pro4-sanji.dts1
-rw-r--r--arch/arm/boot/dts/uniphier-pro4.dtsi10
-rw-r--r--arch/arm/boot/dts/uniphier-pro5.dtsi12
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2-gentil.dts1
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2-vodka.dts1
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2.dtsi12
-rw-r--r--arch/arm/boot/dts/uniphier-sld8.dtsi2
-rw-r--r--arch/arm/boot/dts/vexpress-v2m-rs1.dtsi328
-rw-r--r--arch/arm/configs/bcm2835_defconfig1
-rw-r--r--arch/arm/configs/cm_x2xx_defconfig173
-rw-r--r--arch/arm/configs/em_x270_defconfig178
-rw-r--r--arch/arm/configs/exynos_defconfig8
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/pxa_defconfig2
-rw-r--r--arch/arm/configs/sama5_defconfig2
-rw-r--r--arch/arm/configs/shmobile_defconfig2
-rw-r--r--arch/arm/configs/sunxi_defconfig1
-rw-r--r--arch/arm/configs/u8500_defconfig14
-rw-r--r--arch/arm/crypto/Kconfig12
-rw-r--r--arch/arm/include/asm/bug.h3
-rw-r--r--arch/arm/include/asm/cacheflush.h7
-rw-r--r--arch/arm/include/asm/efi.h1
-rw-r--r--arch/arm/include/asm/fixmap.h2
-rw-r--r--arch/arm/include/asm/highmem.h9
-rw-r--r--arch/arm/include/asm/idmap.h2
-rw-r--r--arch/arm/include/asm/pgtable-2level.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h7
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h3
-rw-r--r--arch/arm/include/asm/pgtable.h26
-rw-r--r--arch/arm/include/asm/traps.h3
-rw-r--r--arch/arm/include/asm/unwind.h3
-rw-r--r--arch/arm/kernel/elf.c27
-rw-r--r--arch/arm/kernel/fiq.c4
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/machine_kexec.c1
-rw-r--r--arch/arm/kernel/module.c1
-rw-r--r--arch/arm/kernel/process.c4
-rw-r--r--arch/arm/kernel/ptrace.c1
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/kernel/suspend.c2
-rw-r--r--arch/arm/kernel/swp_emulate.c4
-rw-r--r--arch/arm/kernel/time.c2
-rw-r--r--arch/arm/kernel/traps.c41
-rw-r--r--arch/arm/kernel/unwind.c5
-rw-r--r--arch/arm/kernel/vdso.c2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/lib/backtrace-clang.S9
-rw-r--r--arch/arm/lib/backtrace.S14
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c23
-rw-r--r--arch/arm/mach-actions/Kconfig1
-rw-r--r--arch/arm/mach-alpine/Kconfig1
-rw-r--r--arch/arm/mach-asm9260/Kconfig1
-rw-r--r--arch/arm/mach-aspeed/Kconfig1
-rw-r--r--arch/arm/mach-berlin/Kconfig1
-rw-r--r--arch/arm/mach-clps711x/Kconfig5
-rw-r--r--arch/arm/mach-davinci/Kconfig1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c26
-rw-r--r--arch/arm/mach-ebsa110/core.c1
-rw-r--r--arch/arm/mach-footbridge/common.c1
-rw-r--r--arch/arm/mach-imx/common.h1
-rw-r--r--arch/arm/mach-imx/cpu.c159
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c8
-rw-r--r--arch/arm/mach-imx/mach-imx6sl.c8
-rw-r--r--arch/arm/mach-imx/mach-imx6sx.c8
-rw-r--r--arch/arm/mach-imx/mach-imx6ul.c8
-rw-r--r--arch/arm/mach-imx/mach-imx7d.c6
-rw-r--r--arch/arm/mach-imx/mach-imx7ulp.c2
-rw-r--r--arch/arm/mach-imx/mach-mx27_3ds.c21
-rw-r--r--arch/arm/mach-imx/mach-mx31_3ds.c24
-rw-r--r--arch/arm/mach-imx/mach-pcm037.c2
-rw-r--r--arch/arm/mach-imx/mach-vf610.c47
-rw-r--r--arch/arm/mach-imx/mm-imx21.c1
-rw-r--r--arch/arm/mach-imx/mm-imx27.c1
-rw-r--r--arch/arm/mach-imx/mm-imx3.c1
-rw-r--r--arch/arm/mach-imx/mxc.h22
-rw-r--r--arch/arm/mach-integrator/Kconfig9
-rw-r--r--arch/arm/mach-integrator/Makefile3
-rw-r--r--arch/arm/mach-integrator/core.c2
-rw-r--r--arch/arm/mach-integrator/impd1.c475
-rw-r--r--arch/arm/mach-integrator/impd1.h15
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c31
-rw-r--r--arch/arm/mach-integrator/lm.c96
-rw-r--r--arch/arm/mach-integrator/lm.h24
-rw-r--r--arch/arm/mach-iop32x/i2c.c1
-rw-r--r--arch/arm/mach-iop32x/iq31244.c1
-rw-r--r--arch/arm/mach-iop32x/iq80321.c1
-rw-r--r--arch/arm/mach-iop32x/n2100.c1
-rw-r--r--arch/arm/mach-ixp4xx/common.c1
-rw-r--r--arch/arm/mach-keystone/platsmp.c2
-rw-r--r--arch/arm/mach-mediatek/mediatek.c2
-rw-r--r--arch/arm/mach-mmp/Kconfig3
-rw-r--r--arch/arm/mach-mmp/Makefile6
-rw-r--r--arch/arm/mach-mmp/clock-mmp2.c114
-rw-r--r--arch/arm/mach-mmp/clock-pxa168.c94
-rw-r--r--arch/arm/mach-mmp/clock-pxa910.c70
-rw-r--r--arch/arm/mach-mmp/clock.c105
-rw-r--r--arch/arm/mach-mmp/clock.h65
-rw-r--r--arch/arm/mach-mmp/mmp-dt.c2
-rw-r--r--arch/arm/mach-mmp/mmp2-dt.c2
-rw-r--r--arch/arm/mach-mmp/pxa168.c1
-rw-r--r--arch/arm/mach-mmp/time.c1
-rw-r--r--arch/arm/mach-mvebu/Kconfig3
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/board-generic.c39
-rw-r--r--arch/arm/mach-omap2/clockdomains44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/clockdomains54xx_data.c2
-rw-r--r--arch/arm/mach-omap2/common.h7
-rw-r--r--arch/arm/mach-omap2/omap-smp.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c20
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c19
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c47
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c62
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c10
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c146
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_43xx_data.c45
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c90
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c89
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c176
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_81xx_data.c74
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.h3
-rw-r--r--arch/arm/mach-omap2/pm33xx-core.c2
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S2
-rw-r--r--arch/arm/mach-omap2/timer.c577
-rw-r--r--arch/arm/mach-prima2/Kconfig1
-rw-r--r--arch/arm/mach-pxa/Kconfig17
-rw-r--r--arch/arm/mach-pxa/Makefile5
-rw-r--r--arch/arm/mach-pxa/cm-x255.c240
-rw-r--r--arch/arm/mach-pxa/cm-x270.c419
-rw-r--r--arch/arm/mach-pxa/cm-x2xx-pci.c196
-rw-r--r--arch/arm/mach-pxa/cm-x2xx-pci.h14
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c538
-rw-r--r--arch/arm/mach-pxa/em-x270.c1286
-rw-r--r--arch/arm/mach-pxa/include/mach/io.h18
-rw-r--r--arch/arm/mach-realtek/Kconfig11
-rw-r--r--arch/arm/mach-realtek/Makefile2
-rw-r--r--arch/arm/mach-realtek/rtd1195.c40
-rw-r--r--arch/arm/mach-realview/Kconfig10
-rw-r--r--arch/arm/mach-rockchip/platsmp.c2
-rw-r--r--arch/arm/mach-rockchip/rockchip.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c7
-rw-r--r--arch/arm/mach-sa1100/assabet.c3
-rw-r--r--arch/arm/mach-sa1100/hackkit.c2
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c2
-rw-r--r--arch/arm/mach-socfpga/Kconfig1
-rw-r--r--arch/arm/mach-tegra/iomap.h2
-rw-r--r--arch/arm/mach-tegra/pm.c4
-rw-r--r--arch/arm/mach-tegra/reset-handler.S7
-rw-r--r--arch/arm/mach-tegra/sleep-tegra30.S16
-rw-r--r--arch/arm/mach-tegra/tegra.c8
-rw-r--r--arch/arm/mach-versatile/Kconfig1
-rw-r--r--arch/arm/mach-versatile/versatile_dt.c5
-rw-r--r--arch/arm/mach-vexpress/Kconfig3
-rw-r--r--arch/arm/mach-vexpress/core.h1
-rw-r--r--arch/arm/mach-vexpress/dcscb.c1
-rw-r--r--arch/arm/mach-vexpress/v2m.c23
-rw-r--r--arch/arm/mach-vt8500/Kconfig1
-rw-r--r--arch/arm/mach-zynq/Kconfig1
-rw-r--r--arch/arm/mach-zynq/common.c2
-rw-r--r--arch/arm/mm/cache-b15-rac.c3
-rw-r--r--arch/arm/mm/copypage-v4mc.c1
-rw-r--r--arch/arm/mm/copypage-v6.c1
-rw-r--r--arch/arm/mm/copypage-xscale.c1
-rw-r--r--arch/arm/mm/dump.c30
-rw-r--r--arch/arm/mm/fault-armv.c8
-rw-r--r--arch/arm/mm/fault.c31
-rw-r--r--arch/arm/mm/highmem.c39
-rw-r--r--arch/arm/mm/idmap.c5
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/ioremap.c25
-rw-r--r--arch/arm/mm/mm.h8
-rw-r--r--arch/arm/mm/mmu.c38
-rw-r--r--arch/arm/mm/pageattr.c1
-rw-r--r--arch/arm/mm/pgd.c40
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm1022.S2
-rw-r--r--arch/arm/mm/proc-arm1026.S2
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-arm740.S2
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm922.S2
-rw-r--r--arch/arm/mm/proc-arm925.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-arm940.S2
-rw-r--r--arch/arm/mm/proc-arm946.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-fa526.S2
-rw-r--r--arch/arm/mm/proc-feroceon.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa110.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/mm/pv-fixup-asm.S2
-rw-r--r--arch/arm/plat-samsung/adc.c8
-rw-r--r--arch/arm/plat-versatile/Kconfig7
-rw-r--r--arch/arm/plat-versatile/Makefile1
-rw-r--r--arch/arm/plat-versatile/include/plat/sched_clock.h7
-rw-r--r--arch/arm/plat-versatile/sched-clock.c28
-rw-r--r--arch/arm64/Kconfig12
-rw-r--r--arch/arm64/Kconfig.platforms6
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts9
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi12
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts9
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi117
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts65
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi17
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts43
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts13
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi60
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi11
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12.dtsi32
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts125
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts145
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi18
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi15
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts377
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi423
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b.dtsi22
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi78
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi98
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi23
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi23
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts73
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts80
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905w-p281.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905w-tx3-mini.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts77
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi79
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-vega-s96.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm.dtsi7
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts402
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1.dtsi24
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi4
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi11
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8.dtsi142
-rw-r--r--arch/arm64/boot/dts/arm/fvp-base-revc.dts10
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi82
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi166
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts2
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi4
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi152
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts15
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts15
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts15
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts15
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi13
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts5
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts33
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi65
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi130
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi285
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts19
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi410
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-evk.dts12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi14
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi88
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek.dts95
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp.dtsi18
-rw-r--r--arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660.dtsi4
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-coresight.dtsi130
-rw-r--r--arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi6
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-db.dts3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts10
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts22
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-mcbin-singleshot.dts4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap80x.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-evb.dts74
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi158
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6358.dtsi358
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts49
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6797.dtsi231
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi11
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-elm-hana-rev7.dts27
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dts14
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi70
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-elm.dts14
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi1173
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi80
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-evb.dts147
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi50
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra132-norrin.dts2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi3
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi5
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi30
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi3
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi10
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts7
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi89
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile1
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi74
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi89
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi257
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi53
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074-hk01.dts112
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi474
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts25
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-pins.dtsi221
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi98
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts54
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts35
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi228
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi87
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi38
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150.dtsi14
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150b.dtsi14
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150l.dtsi14
-rw-r--r--arch/arm64/boot/dts/qcom/pmi8994.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404-evb.dtsi85
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404.dtsi100
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-idp.dts66
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi959
-rw-r--r--arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts46
-rw-r--r--arch/arm64/boot/dts/qcom/sdm660.dtsi372
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi7
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c.dts210
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi104
-rw-r--r--arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts13
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-mtp.dts351
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi126
-rw-r--r--arch/arm64/boot/dts/realtek/Makefile6
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1293-ds418j.dts6
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1293.dtsi12
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1295-mele-v9.dts6
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1295-probox2-ava.dts6
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1295-xnano-x5.dts30
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1295-zidoo-x9s.dts4
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1295.dtsi21
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1296-ds418.dts4
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1296.dtsi8
-rw-r--r--arch/arm64/boot/dts/realtek/rtd129x.dtsi221
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1395-bpi-m4.dts30
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1395-lionskin.dts36
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1395.dtsi65
-rw-r--r--arch/arm64/boot/dts/realtek/rtd139x.dtsi193
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1619-mjolnir.dts44
-rw-r--r--arch/arm64/boot/dts/realtek/rtd1619.dtsi12
-rw-r--r--arch/arm64/boot/dts/realtek/rtd16xx.dtsi229
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile3
-rw-r--r--arch/arm64/boot/dts/renesas/aistarvision-mipi-adapter-2.1.dtsi94
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi18
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1.dtsi18
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts72
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi18
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77950.dtsi14
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951.dtsi34
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960.dtsi22
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961.dtsi403
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi20
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970.dtsi10
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi16
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi20
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995.dtsi20
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile1
-rw-r--r--arch/arm64/boot/dts/rockchip/px30.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts557
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3326.dtsi15
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-a1.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-r88.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-ficus.dts29
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-firefly.dts10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts11
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock960.dts29
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi27
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi34
-rw-r--r--arch/arm64/boot/dts/socionext/Makefile1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi12
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20-akebi96.dts189
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi16
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts18
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi12
-rw-r--r--arch/arm64/boot/dts/sprd/sc9863a.dtsi66
-rw-r--r--arch/arm64/boot/dts/sprd/sharkl3.dtsi164
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi104
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi11
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-industrial-thermal.dtsi45
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts20
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-main.dtsi75
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi6
-rw-r--r--arch/arm64/configs/defconfig19
-rw-r--r--arch/arm64/include/asm/acpi.h5
-rw-r--r--arch/arm64/include/asm/atomic.h6
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/include/asm/cacheflush.h46
-rw-r--r--arch/arm64/include/asm/elf.h23
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h2
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h12
-rw-r--r--arch/arm64/include/asm/mmu_context.h2
-rw-r--r--arch/arm64/include/asm/pgalloc.h10
-rw-r--r--arch/arm64/include/asm/pgtable-types.h5
-rw-r--r--arch/arm64/include/asm/pgtable.h77
-rw-r--r--arch/arm64/include/asm/stacktrace.h3
-rw-r--r--arch/arm64/include/asm/stage2_pgtable.h50
-rw-r--r--arch/arm64/include/asm/vmap_stack.h2
-rw-r--r--arch/arm64/kernel/acpi.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c2
-rw-r--r--arch/arm64/kernel/ftrace.c3
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/hibernate.c49
-rw-r--r--arch/arm64/kernel/kaslr.c2
-rw-r--r--arch/arm64/kernel/pci.c4
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/ptrace.c1
-rw-r--r--arch/arm64/kernel/setup.c4
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/kernel/suspend.c2
-rw-r--r--arch/arm64/kernel/traps.c21
-rw-r--r--arch/arm64/kernel/vdso.c8
-rw-r--r--arch/arm64/kernel/vdso32/Makefile8
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm64/kvm/mmu.c223
-rw-r--r--arch/arm64/lib/csum.c20
-rw-r--r--arch/arm64/mm/dump.c1
-rw-r--r--arch/arm64/mm/fault.c18
-rw-r--r--arch/arm64/mm/hugetlbpage.c15
-rw-r--r--arch/arm64/mm/kasan_init.c29
-rw-r--r--arch/arm64/mm/mmu.c60
-rw-r--r--arch/arm64/mm/pageattr.c8
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/cacheflush.h19
-rw-r--r--arch/c6x/include/asm/pgtable.h3
-rw-r--r--arch/c6x/kernel/traps.c16
-rw-r--r--arch/csky/include/asm/highmem.h12
-rw-r--r--arch/csky/include/asm/io.h2
-rw-r--r--arch/csky/include/asm/pgtable.h33
-rw-r--r--arch/csky/kernel/module.c1
-rw-r--r--arch/csky/kernel/ptrace.c5
-rw-r--r--arch/csky/kernel/stacktrace.c6
-rw-r--r--arch/csky/kernel/vdso.c4
-rw-r--r--arch/csky/mm/fault.c10
-rw-r--r--arch/csky/mm/highmem.c58
-rw-r--r--arch/csky/mm/init.c7
-rw-r--r--arch/csky/mm/tlb.c1
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/boot/compressed/Makefile2
-rw-r--r--arch/h8300/include/asm/pgtable.h2
-rw-r--r--arch/h8300/kernel/process.c1
-rw-r--r--arch/h8300/kernel/setup.c1
-rw-r--r--arch/h8300/kernel/signal.c1
-rw-r--r--arch/h8300/kernel/traps.c12
-rw-r--r--arch/h8300/mm/fault.c1
-rw-r--r--arch/h8300/mm/init.c1
-rw-r--r--arch/h8300/mm/memory.c1
-rw-r--r--arch/hexagon/Makefile2
-rw-r--r--arch/hexagon/include/asm/cacheflush.h19
-rw-r--r--arch/hexagon/include/asm/fixmap.h4
-rw-r--r--arch/hexagon/include/asm/pgtable.h56
-rw-r--r--arch/hexagon/kernel/traps.c25
-rw-r--r--arch/hexagon/kernel/vdso.c4
-rw-r--r--arch/hexagon/mm/uaccess.c2
-rw-r--r--arch/hexagon/mm/vm_fault.c9
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/include/asm/cacheflush.h30
-rw-r--r--arch/ia64/include/asm/pgalloc.h4
-rw-r--r--arch/ia64/include/asm/pgtable.h49
-rw-r--r--arch/ia64/include/asm/ptrace.h1
-rw-r--r--arch/ia64/include/asm/uaccess.h2
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/head.S3
-rw-r--r--arch/ia64/kernel/irq_ia64.c2
-rw-r--r--arch/ia64/kernel/ivt.S2
-rw-r--r--arch/ia64/kernel/kprobes.c2
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/mca_asm.S2
-rw-r--r--arch/ia64/kernel/perfmon.c8
-rw-r--r--arch/ia64/kernel/process.c17
-rw-r--r--arch/ia64/kernel/ptrace.c1
-rw-r--r--arch/ia64/kernel/relocate_kernel.S4
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/kernel/smp.c1
-rw-r--r--arch/ia64/kernel/smpboot.c1
-rw-r--r--arch/ia64/kernel/uncached.c2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S2
-rw-r--r--arch/ia64/mm/contig.c1
-rw-r--r--arch/ia64/mm/fault.c24
-rw-r--r--arch/ia64/mm/hugetlbpage.c18
-rw-r--r--arch/ia64/mm/init.c40
-rw-r--r--arch/m68k/68000/m68EZ328.c2
-rw-r--r--arch/m68k/68000/m68VZ328.c2
-rw-r--r--arch/m68k/68000/timers.c1
-rw-r--r--arch/m68k/Kconfig.cpu2
-rw-r--r--arch/m68k/Makefile8
-rw-r--r--arch/m68k/amiga/config.c1
-rw-r--r--arch/m68k/apollo/config.c1
-rw-r--r--arch/m68k/atari/atasound.c1
-rw-r--r--arch/m68k/atari/stram.c1
-rw-r--r--arch/m68k/bvme6000/config.c1
-rw-r--r--arch/m68k/coldfire/pci.c4
-rw-r--r--arch/m68k/configs/stmark2_defconfig1
-rw-r--r--arch/m68k/include/asm/cacheflush_mm.h6
-rw-r--r--arch/m68k/include/asm/cacheflush_no.h19
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h63
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h8
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h84
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h1
-rw-r--r--arch/m68k/include/asm/pgtable_no.h2
-rw-r--r--arch/m68k/include/asm/sun3_pgtable.h24
-rw-r--r--arch/m68k/include/asm/sun3xflop.h2
-rw-r--r--arch/m68k/include/asm/uaccess_no.h6
-rw-r--r--arch/m68k/kernel/head.S2
-rw-r--r--arch/m68k/kernel/process.c1
-rw-r--r--arch/m68k/kernel/ptrace.c1
-rw-r--r--arch/m68k/kernel/setup_no.c1
-rw-r--r--arch/m68k/kernel/signal.c1
-rw-r--r--arch/m68k/kernel/sys_m68k.c14
-rw-r--r--arch/m68k/kernel/traps.c13
-rw-r--r--arch/m68k/kernel/uboot.c1
-rw-r--r--arch/m68k/mac/config.c1
-rw-r--r--arch/m68k/mm/cache.c13
-rw-r--r--arch/m68k/mm/fault.c10
-rw-r--r--arch/m68k/mm/init.c2
-rw-r--r--arch/m68k/mm/mcfmmu.c1
-rw-r--r--arch/m68k/mm/motorola.c25
-rw-r--r--arch/m68k/mm/sun3kmap.c1
-rw-r--r--arch/m68k/mm/sun3mmu.c1
-rw-r--r--arch/m68k/mvme147/config.c1
-rw-r--r--arch/m68k/mvme16x/config.c1
-rw-r--r--arch/m68k/q40/config.c1
-rw-r--r--arch/m68k/sun3/config.c1
-rw-r--r--arch/m68k/sun3/dvma.c1
-rw-r--r--arch/m68k/sun3/mmu_emu.c1
-rw-r--r--arch/m68k/sun3/sun3dvma.c1
-rw-r--r--arch/m68k/sun3x/dvma.c1
-rw-r--r--arch/m68k/sun3x/prom.c1
-rw-r--r--arch/microblaze/include/asm/cacheflush.h29
-rw-r--r--arch/microblaze/include/asm/highmem.h27
-rw-r--r--arch/microblaze/include/asm/pgalloc.h2
-rw-r--r--arch/microblaze/include/asm/pgtable.h23
-rw-r--r--arch/microblaze/include/asm/uaccess.h2
-rw-r--r--arch/microblaze/include/asm/unwind.h3
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S2
-rw-r--r--arch/microblaze/kernel/module.c2
-rw-r--r--arch/microblaze/kernel/setup.c2
-rw-r--r--arch/microblaze/kernel/signal.c9
-rw-r--r--arch/microblaze/kernel/stacktrace.c4
-rw-r--r--arch/microblaze/kernel/traps.c12
-rw-r--r--arch/microblaze/kernel/unwind.c40
-rw-r--r--arch/microblaze/mm/fault.c17
-rw-r--r--arch/microblaze/mm/highmem.c21
-rw-r--r--arch/microblaze/mm/init.c12
-rw-r--r--arch/microblaze/mm/pgtable.c2
-rw-r--r--arch/mips/Kconfig7
-rw-r--r--arch/mips/fw/arc/memory.c1
-rw-r--r--arch/mips/include/asm/fixmap.h3
-rw-r--r--arch/mips/include/asm/highmem.h11
-rw-r--r--arch/mips/include/asm/mach-generic/floppy.h1
-rw-r--r--arch/mips/include/asm/mach-jazz/floppy.h1
-rw-r--r--arch/mips/include/asm/pgtable-32.h22
-rw-r--r--arch/mips/include/asm/pgtable-64.h32
-rw-r--r--arch/mips/include/asm/pgtable.h2
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/jazz/jazzdma.c1
-rw-r--r--arch/mips/jazz/setup.c2
-rw-r--r--arch/mips/kernel/module.c1
-rw-r--r--arch/mips/kernel/process.c1
-rw-r--r--arch/mips/kernel/ptrace.c1
-rw-r--r--arch/mips/kernel/ptrace32.c1
-rw-r--r--arch/mips/kernel/smp-bmips.c1
-rw-r--r--arch/mips/kernel/sysrq.c2
-rw-r--r--arch/mips/kernel/traps.c40
-rw-r--r--arch/mips/kernel/vdso.c4
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/kvm/mmu.c20
-rw-r--r--arch/mips/kvm/tlb.c1
-rw-r--r--arch/mips/kvm/trap_emul.c2
-rw-r--r--arch/mips/lib/dump_tlb.c1
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c1
-rw-r--r--arch/mips/mm/c-octeon.c1
-rw-r--r--arch/mips/mm/c-r3k.c11
-rw-r--r--arch/mips/mm/c-r4k.c11
-rw-r--r--arch/mips/mm/c-tx39.c11
-rw-r--r--arch/mips/mm/cache.c6
-rw-r--r--arch/mips/mm/fault.c12
-rw-r--r--arch/mips/mm/highmem.c56
-rw-r--r--arch/mips/mm/init.c1
-rw-r--r--arch/mips/mm/page.c1
-rw-r--r--arch/mips/mm/pgtable-32.c1
-rw-r--r--arch/mips/mm/pgtable-64.c1
-rw-r--r--arch/mips/mm/sc-ip22.c1
-rw-r--r--arch/mips/mm/sc-mips.c1
-rw-r--r--arch/mips/mm/sc-r5k.c1
-rw-r--r--arch/mips/mm/tlb-r3k.c1
-rw-r--r--arch/mips/mm/tlb-r4k.c1
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/ralink/Kconfig4
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c1
-rw-r--r--arch/mips/sgi-ip32/ip32-memory.c1
-rw-r--r--arch/nds32/include/asm/cacheflush.h4
-rw-r--r--arch/nds32/include/asm/highmem.h10
-rw-r--r--arch/nds32/include/asm/pgtable.h22
-rw-r--r--arch/nds32/kernel/head.S2
-rw-r--r--arch/nds32/kernel/module.c2
-rw-r--r--arch/nds32/kernel/traps.c15
-rw-r--r--arch/nds32/kernel/vdso.c6
-rw-r--r--arch/nds32/mm/cacheflush.c3
-rw-r--r--arch/nds32/mm/fault.c17
-rw-r--r--arch/nds32/mm/highmem.c41
-rw-r--r--arch/nds32/mm/init.c13
-rw-r--r--arch/nds32/mm/proc.c7
-rw-r--r--arch/nios2/include/asm/pgtable.h27
-rw-r--r--arch/nios2/kernel/module.c1
-rw-r--r--arch/nios2/kernel/nios2_ksyms.c2
-rw-r--r--arch/nios2/kernel/traps.c17
-rw-r--r--arch/nios2/mm/fault.c23
-rw-r--r--arch/nios2/mm/init.c5
-rw-r--r--arch/nios2/mm/ioremap.c6
-rw-r--r--arch/nios2/mm/pgtable.c1
-rw-r--r--arch/nios2/mm/tlb.c1
-rw-r--r--arch/openrisc/include/asm/cacheflush.h31
-rw-r--r--arch/openrisc/include/asm/io.h1
-rw-r--r--arch/openrisc/include/asm/pgtable.h34
-rw-r--r--arch/openrisc/include/asm/tlbflush.h1
-rw-r--r--arch/openrisc/kernel/asm-offsets.c1
-rw-r--r--arch/openrisc/kernel/entry.S2
-rw-r--r--arch/openrisc/kernel/head.S2
-rw-r--r--arch/openrisc/kernel/or32_ksyms.c2
-rw-r--r--arch/openrisc/kernel/process.c1
-rw-r--r--arch/openrisc/kernel/ptrace.c1
-rw-r--r--arch/openrisc/kernel/setup.c1
-rw-r--r--arch/openrisc/kernel/traps.c13
-rw-r--r--arch/openrisc/mm/fault.c22
-rw-r--r--arch/openrisc/mm/init.c5
-rw-r--r--arch/openrisc/mm/ioremap.c2
-rw-r--r--arch/openrisc/mm/tlb.c1
-rw-r--r--arch/parisc/Makefile2
-rw-r--r--arch/parisc/include/asm/cacheflush.h30
-rw-r--r--arch/parisc/include/asm/io.h2
-rw-r--r--arch/parisc/include/asm/mmu_context.h1
-rw-r--r--arch/parisc/include/asm/pgtable.h33
-rw-r--r--arch/parisc/kernel/asm-offsets.c2
-rw-r--r--arch/parisc/kernel/entry.S2
-rw-r--r--arch/parisc/kernel/head.S2
-rw-r--r--arch/parisc/kernel/module.c1
-rw-r--r--arch/parisc/kernel/pacache.S2
-rw-r--r--arch/parisc/kernel/pci-dma.c2
-rw-r--r--arch/parisc/kernel/pdt.c2
-rw-r--r--arch/parisc/kernel/ptrace.c1
-rw-r--r--arch/parisc/kernel/smp.c1
-rw-r--r--arch/parisc/kernel/traps.c30
-rw-r--r--arch/parisc/lib/memcpy.c12
-rw-r--r--arch/parisc/mm/fault.c10
-rw-r--r--arch/parisc/mm/fixmap.c6
-rw-r--r--arch/parisc/mm/init.c1
-rw-r--r--arch/powerpc/Kconfig70
-rw-r--r--arch/powerpc/Kconfig.debug2
-rw-r--r--arch/powerpc/boot/Makefile14
-rw-r--r--arch/powerpc/boot/dts/Makefile1
-rw-r--r--arch/powerpc/boot/dts/ep405.dts230
-rw-r--r--arch/powerpc/boot/dts/pcm032.dts4
-rw-r--r--arch/powerpc/boot/dts/virtex440-ml507.dts406
-rw-r--r--arch/powerpc/boot/dts/virtex440-ml510.dts466
-rw-r--r--arch/powerpc/boot/dts/walnut.dts246
-rw-r--r--arch/powerpc/boot/ep405.c71
-rw-r--r--arch/powerpc/boot/ops.h1
-rw-r--r--arch/powerpc/boot/serial.c5
-rw-r--r--arch/powerpc/boot/treeboot-walnut.c81
-rw-r--r--arch/powerpc/boot/uartlite.c79
-rw-r--r--arch/powerpc/boot/virtex.c97
-rw-r--r--arch/powerpc/boot/virtex405-head.S31
-rwxr-xr-xarch/powerpc/boot/wrapper26
-rw-r--r--arch/powerpc/configs/40x/acadia_defconfig1
-rw-r--r--arch/powerpc/configs/40x/ep405_defconfig62
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig1
-rw-r--r--arch/powerpc/configs/40x/klondike_defconfig1
-rw-r--r--arch/powerpc/configs/40x/makalu_defconfig1
-rw-r--r--arch/powerpc/configs/40x/obs600_defconfig1
-rw-r--r--arch/powerpc/configs/40x/virtex_defconfig75
-rw-r--r--arch/powerpc/configs/44x/virtex5_defconfig74
-rw-r--r--arch/powerpc/configs/adder875_defconfig1
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig1
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig1
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/ppc40x_defconfig9
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig8
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig1
-rw-r--r--arch/powerpc/include/asm/asm-405.h19
-rw-r--r--arch/powerpc/include/asm/atomic.h11
-rw-r--r--arch/powerpc/include/asm/bitops.h4
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h7
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h103
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup-radix.h41
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h111
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-hash.h3
-rw-r--r--arch/powerpc/include/asm/cache.h2
-rw-r--r--arch/powerpc/include/asm/cacheflush.h42
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h11
-rw-r--r--arch/powerpc/include/asm/code-patching.h37
-rw-r--r--arch/powerpc/include/asm/cputable.h22
-rw-r--r--arch/powerpc/include/asm/debug.h2
-rw-r--r--arch/powerpc/include/asm/drmem.h1
-rw-r--r--arch/powerpc/include/asm/fadump-internal.h4
-rw-r--r--arch/powerpc/include/asm/firmware.h1
-rw-r--r--arch/powerpc/include/asm/fixmap.h6
-rw-r--r--arch/powerpc/include/asm/ftrace.h14
-rw-r--r--arch/powerpc/include/asm/futex.h3
-rw-r--r--arch/powerpc/include/asm/highmem.h28
-rw-r--r--arch/powerpc/include/asm/hugetlb.h4
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h31
-rw-r--r--arch/powerpc/include/asm/icswx.h20
-rw-r--r--arch/powerpc/include/asm/idle.h93
-rw-r--r--arch/powerpc/include/asm/inst.h131
-rw-r--r--arch/powerpc/include/asm/io.h1
-rw-r--r--arch/powerpc/include/asm/iommu.h4
-rw-r--r--arch/powerpc/include/asm/kasan.h10
-rw-r--r--arch/powerpc/include/asm/kprobes.h2
-rw-r--r--arch/powerpc/include/asm/kup.h16
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h44
-rw-r--r--arch/powerpc/include/asm/mmu.h10
-rw-r--r--arch/powerpc/include/asm/mmu_context.h30
-rw-r--r--arch/powerpc/include/asm/module.h3
-rw-r--r--arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h32
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h90
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h132
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-40x.h23
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h4
-rw-r--r--arch/powerpc/include/asm/nohash/32/slice.h20
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h2
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-4k.h32
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h56
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/page.h7
-rw-r--r--arch/powerpc/include/asm/pgtable.h36
-rw-r--r--arch/powerpc/include/asm/pkeys.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/processor.h11
-rw-r--r--arch/powerpc/include/asm/prom.h1
-rw-r--r--arch/powerpc/include/asm/ptrace.h46
-rw-r--r--arch/powerpc/include/asm/reg.h19
-rw-r--r--arch/powerpc/include/asm/reg_booke.h54
-rw-r--r--arch/powerpc/include/asm/rtas-types.h124
-rw-r--r--arch/powerpc/include/asm/rtas.h125
-rw-r--r--arch/powerpc/include/asm/slice.h2
-rw-r--r--arch/powerpc/include/asm/spinlock.h4
-rw-r--r--arch/powerpc/include/asm/sstep.h17
-rw-r--r--arch/powerpc/include/asm/switch_to.h2
-rw-r--r--arch/powerpc/include/asm/syscall.h5
-rw-r--r--arch/powerpc/include/asm/time.h12
-rw-r--r--arch/powerpc/include/asm/tlb.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h149
-rw-r--r--arch/powerpc/include/asm/uprobes.h7
-rw-r--r--arch/powerpc/include/asm/vas.h13
-rw-r--r--arch/powerpc/include/asm/xilinx_intc.h16
-rw-r--r--arch/powerpc/include/asm/xilinx_pci.h21
-rw-r--r--arch/powerpc/include/asm/xive-regs.h8
-rw-r--r--arch/powerpc/include/asm/xive.h9
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h2
-rw-r--r--arch/powerpc/include/uapi/asm/vas-api.h24
-rw-r--r--arch/powerpc/kernel/align.c18
-rw-r--r--arch/powerpc/kernel/asm-offsets.c9
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S2
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S22
-rw-r--r--arch/powerpc/kernel/cputable.c124
-rw-r--r--arch/powerpc/kernel/crash_dump.c7
-rw-r--r--arch/powerpc/kernel/dawr.c23
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c32
-rw-r--r--arch/powerpc/kernel/eeh.c31
-rw-r--r--arch/powerpc/kernel/entry_32.S69
-rw-r--r--arch/powerpc/kernel/entry_64.S8
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c7
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S51
-rw-r--r--arch/powerpc/kernel/fadump.c155
-rw-r--r--arch/powerpc/kernel/fpu.S2
-rw-r--r--arch/powerpc/kernel/head_32.S4
-rw-r--r--arch/powerpc/kernel/head_40x.S318
-rw-r--r--arch/powerpc/kernel/head_44x.S2
-rw-r--r--arch/powerpc/kernel/head_64.S9
-rw-r--r--arch/powerpc/kernel/head_8xx.S356
-rw-r--r--arch/powerpc/kernel/head_booke.h2
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c641
-rw-r--r--arch/powerpc/kernel/idle_6xx.S1
-rw-r--r--arch/powerpc/kernel/idle_e500.S1
-rw-r--r--arch/powerpc/kernel/io-workarounds.c2
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/jump_label.c5
-rw-r--r--arch/powerpc/kernel/kgdb.c9
-rw-r--r--arch/powerpc/kernel/kprobes.c47
-rw-r--r--arch/powerpc/kernel/l2cr_6xx.S1
-rw-r--r--arch/powerpc/kernel/mce.c16
-rw-r--r--arch/powerpc/kernel/mce_power.c21
-rw-r--r--arch/powerpc/kernel/misc.S2
-rw-r--r--arch/powerpc/kernel/misc_32.S11
-rw-r--r--arch/powerpc/kernel/module_32.c17
-rw-r--r--arch/powerpc/kernel/module_64.c301
-rw-r--r--arch/powerpc/kernel/nvram_64.c4
-rw-r--r--arch/powerpc/kernel/optprobes.c99
-rw-r--r--arch/powerpc/kernel/optprobes_head.S3
-rw-r--r--arch/powerpc/kernel/paca.c34
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c2
-rw-r--r--arch/powerpc/kernel/pci_64.c6
-rw-r--r--arch/powerpc/kernel/process.c129
-rw-r--r--arch/powerpc/kernel/prom.c40
-rw-r--r--arch/powerpc/kernel/prom_init.c38
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-noadv.c72
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-tm.c2
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-view.c2
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace32.c4
-rw-r--r--arch/powerpc/kernel/rtas.c52
-rw-r--r--arch/powerpc/kernel/rtas_pci.c2
-rw-r--r--arch/powerpc/kernel/security.c48
-rw-r--r--arch/powerpc/kernel/setup-common.c6
-rw-r--r--arch/powerpc/kernel/setup_32.c12
-rw-r--r--arch/powerpc/kernel/setup_64.c17
-rw-r--r--arch/powerpc/kernel/signal.c22
-rw-r--r--arch/powerpc/kernel/signal_32.c3
-rw-r--r--arch/powerpc/kernel/signal_64.c11
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kernel/stacktrace.c2
-rw-r--r--arch/powerpc/kernel/swsusp_32.S2
-rw-r--r--arch/powerpc/kernel/syscall_64.c72
-rw-r--r--arch/powerpc/kernel/sysfs.c82
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c168
-rw-r--r--arch/powerpc/kernel/traps.c50
-rw-r--r--arch/powerpc/kernel/uprobes.c5
-rw-r--r--arch/powerpc/kernel/vdso.c7
-rw-r--r--arch/powerpc/kernel/vecemu.c20
-rw-r--r--arch/powerpc/kernel/vector.S1
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S3
-rw-r--r--arch/powerpc/kexec/core.c8
-rw-r--r--arch/powerpc/kexec/crash.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c15
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c107
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c66
-rw-r--r--arch/powerpc/kvm/book3s_hv.c21
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c41
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c60
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xive.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S23
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c18
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c6
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c3
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c2
-rw-r--r--arch/powerpc/kvm/fpu.S2
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/code-patching.c315
-rw-r--r--arch/powerpc/lib/feature-fixups-test.S69
-rw-r--r--arch/powerpc/lib/feature-fixups.c163
-rw-r--r--arch/powerpc/lib/inst.c73
-rw-r--r--arch/powerpc/lib/sstep.c460
-rw-r--r--arch/powerpc/lib/test_code-patching.S20
-rw-r--r--arch/powerpc/lib/test_emulate_step.c56
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S34
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c14
-rw-r--r--arch/powerpc/mm/book3s32/tlb.c6
-rw-r--r--arch/powerpc/mm/book3s64/hash_hugetlbpage.c1
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c2
-rw-r--r--arch/powerpc/mm/book3s64/hash_pgtable.c20
-rw-r--r--arch/powerpc/mm/book3s64/hash_tlb.c22
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c74
-rw-r--r--arch/powerpc/mm/book3s64/internal.h16
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c4
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c37
-rw-r--r--arch/powerpc/mm/book3s64/radix_hugetlbpage.c1
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c48
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c4
-rw-r--r--arch/powerpc/mm/book3s64/slb.c168
-rw-r--r--arch/powerpc/mm/book3s64/subpage_prot.c22
-rw-r--r--arch/powerpc/mm/copro_fault.c4
-rw-r--r--arch/powerpc/mm/fault.c111
-rw-r--r--arch/powerpc/mm/highmem.c26
-rw-r--r--arch/powerpc/mm/hugetlbpage.c72
-rw-r--r--arch/powerpc/mm/init-common.c2
-rw-r--r--arch/powerpc/mm/init_32.c13
-rw-r--r--arch/powerpc/mm/init_64.c5
-rw-r--r--arch/powerpc/mm/kasan/8xx.c74
-rw-r--r--arch/powerpc/mm/kasan/Makefile2
-rw-r--r--arch/powerpc/mm/kasan/book3s_32.c57
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c96
-rw-r--r--arch/powerpc/mm/mem.c9
-rw-r--r--arch/powerpc/mm/mmu_decl.h4
-rw-r--r--arch/powerpc/mm/nohash/40x.c9
-rw-r--r--arch/powerpc/mm/nohash/8xx.c227
-rw-r--r--arch/powerpc/mm/nohash/book3e_pgtable.c15
-rw-r--r--arch/powerpc/mm/nohash/fsl_booke.c1
-rw-r--r--arch/powerpc/mm/nohash/tlb_low_64e.S2
-rw-r--r--arch/powerpc/mm/pgtable.c66
-rw-r--r--arch/powerpc/mm/pgtable_32.c25
-rw-r--r--arch/powerpc/mm/pgtable_64.c11
-rw-r--r--arch/powerpc/mm/ptdump/8xx.c7
-rw-r--r--arch/powerpc/mm/ptdump/bats.c43
-rw-r--r--arch/powerpc/mm/ptdump/book3s64.c2
-rw-r--r--arch/powerpc/mm/ptdump/hashpagetable.c21
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c80
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.h3
-rw-r--r--arch/powerpc/mm/ptdump/shared.c7
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c6
-rw-r--r--arch/powerpc/perf/8xx-pmu.c19
-rw-r--r--arch/powerpc/perf/callchain.c1
-rw-r--r--arch/powerpc/perf/callchain_32.c1
-rw-r--r--arch/powerpc/perf/callchain_64.c45
-rw-r--r--arch/powerpc/perf/core-book3s.c4
-rw-r--r--arch/powerpc/perf/hv-24x7.c96
-rw-r--r--arch/powerpc/platforms/40x/Kconfig76
-rw-r--r--arch/powerpc/platforms/40x/Makefile3
-rw-r--r--arch/powerpc/platforms/40x/ep405.c123
-rw-r--r--arch/powerpc/platforms/40x/virtex.c54
-rw-r--r--arch/powerpc/platforms/40x/walnut.c65
-rw-r--r--arch/powerpc/platforms/44x/Kconfig40
-rw-r--r--arch/powerpc/platforms/44x/Makefile2
-rw-r--r--arch/powerpc/platforms/44x/virtex.c60
-rw-r--r--arch/powerpc/platforms/44x/virtex_ml510.c30
-rw-r--r--arch/powerpc/platforms/4xx/pci.c4
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_sleep.S2
-rw-r--r--arch/powerpc/platforms/82xx/pq2.c3
-rw-r--r--arch/powerpc/platforms/83xx/suspend-asm.S1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c2
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c2
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c2
-rw-r--r--arch/powerpc/platforms/85xx/smp.c2
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c7
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig50
-rw-r--r--arch/powerpc/platforms/8xx/cpm1.c1
-rw-r--r--arch/powerpc/platforms/8xx/micropatch.c1
-rw-r--r--arch/powerpc/platforms/Kconfig4
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype6
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c6
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c2
-rw-r--r--arch/powerpc/platforms/cell/setup.c1
-rw-r--r--arch/powerpc/platforms/cell/smp.c2
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c10
-rw-r--r--arch/powerpc/platforms/chrp/pci.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c1
-rw-r--r--arch/powerpc/platforms/chrp/smp.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c25
-rw-r--r--arch/powerpc/platforms/maple/setup.c1
-rw-r--r--arch/powerpc/platforms/maple/time.c1
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c14
-rw-r--r--arch/powerpc/platforms/powermac/cache.S2
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c1
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S5
-rw-r--r--arch/powerpc/platforms/powermac/smp.c7
-rw-r--r--arch/powerpc/platforms/powermac/time.c1
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/idle.c2
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c117
-rw-r--r--arch/powerpc/platforms/powernv/opal-fadump.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c4
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda-tce.c28
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c299
-rw-r--r--arch/powerpc/platforms/powernv/pci.c20
-rw-r--r--arch/powerpc/platforms/powernv/pci.h28
-rw-r--r--arch/powerpc/platforms/powernv/vas-api.c278
-rw-r--r--arch/powerpc/platforms/powernv/vas-debug.c2
-rw-r--r--arch/powerpc/platforms/powernv/vas-fault.c382
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c238
-rw-r--r--arch/powerpc/platforms/powernv/vas.c85
-rw-r--r--arch/powerpc/platforms/powernv/vas.h59
-rw-r--r--arch/powerpc/platforms/ps3/mm.c52
-rw-r--r--arch/powerpc/platforms/ps3/setup.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c26
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c3
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c3
-rw-r--r--arch/powerpc/platforms/pseries/ras.c62
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fadump.c2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c23
-rw-r--r--arch/powerpc/platforms/pseries/smp.c2
-rw-r--r--arch/powerpc/platforms/pseries/vio.c7
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/cpm2.c1
-rw-r--r--arch/powerpc/sysdev/cpm_common.c2
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/xics/ics-rtas.c22
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c88
-rw-r--r--arch/powerpc/sysdev/xilinx_pci.c132
-rw-r--r--arch/powerpc/sysdev/xive/common.c13
-rw-r--r--arch/powerpc/sysdev/xive/native.c6
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c7
-rw-r--r--arch/powerpc/tools/head_check.sh8
-rw-r--r--arch/powerpc/xmon/Makefile2
-rw-r--r--arch/powerpc/xmon/xmon.c259
-rw-r--r--arch/powerpc/xmon/xmon_bpts.S11
-rw-r--r--arch/powerpc/xmon/xmon_bpts.h14
-rw-r--r--arch/riscv/Kbuild1
-rw-r--r--arch/riscv/Kconfig77
-rw-r--r--arch/riscv/Kconfig.socs17
-rw-r--r--arch/riscv/boot/dts/Makefile2
-rw-r--r--arch/riscv/boot/dts/kendryte/Makefile4
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig7
-rw-r--r--arch/riscv/include/asm/cacheflush.h65
-rw-r--r--arch/riscv/include/asm/cacheinfo.h15
-rw-r--r--arch/riscv/include/asm/clocksource.h7
-rw-r--r--arch/riscv/include/asm/fixmap.h2
-rw-r--r--arch/riscv/include/asm/gdb_xml.h117
-rw-r--r--arch/riscv/include/asm/io.h2
-rw-r--r--arch/riscv/include/asm/irq.h5
-rw-r--r--arch/riscv/include/asm/kasan.h2
-rw-r--r--arch/riscv/include/asm/kdebug.h12
-rw-r--r--arch/riscv/include/asm/kgdb.h112
-rw-r--r--arch/riscv/include/asm/parse_asm.h219
-rw-r--r--arch/riscv/include/asm/patch.h4
-rw-r--r--arch/riscv/include/asm/pgtable-64.h7
-rw-r--r--arch/riscv/include/asm/pgtable.h22
-rw-r--r--arch/riscv/include/asm/processor.h13
-rw-r--r--arch/riscv/include/asm/smp.h3
-rw-r--r--arch/riscv/include/asm/soc.h39
-rw-r--r--arch/riscv/include/asm/vdso.h2
-rw-r--r--arch/riscv/include/asm/vdso/clocksource.h8
-rw-r--r--arch/riscv/include/asm/vdso/gettimeofday.h79
-rw-r--r--arch/riscv/include/asm/vdso/processor.h19
-rw-r--r--arch/riscv/include/asm/vdso/vsyscall.h27
-rw-r--r--arch/riscv/kernel/Makefile1
-rw-r--r--arch/riscv/kernel/cacheinfo.c17
-rw-r--r--arch/riscv/kernel/cpu.c16
-rw-r--r--arch/riscv/kernel/entry.S4
-rw-r--r--arch/riscv/kernel/ftrace.c15
-rw-r--r--arch/riscv/kernel/head.S11
-rw-r--r--arch/riscv/kernel/irq.c33
-rw-r--r--arch/riscv/kernel/kgdb.c390
-rw-r--r--arch/riscv/kernel/module.c2
-rw-r--r--arch/riscv/kernel/patch.c47
-rw-r--r--arch/riscv/kernel/setup.c5
-rw-r--r--arch/riscv/kernel/smp.c11
-rw-r--r--arch/riscv/kernel/soc.c29
-rw-r--r--arch/riscv/kernel/stacktrace.c9
-rw-r--r--arch/riscv/kernel/time.c9
-rw-r--r--arch/riscv/kernel/traps.c7
-rw-r--r--arch/riscv/kernel/vdso.c28
-rw-r--r--arch/riscv/kernel/vdso/Makefile12
-rw-r--r--arch/riscv/kernel/vdso/clock_getres.S18
-rw-r--r--arch/riscv/kernel/vdso/clock_gettime.S18
-rw-r--r--arch/riscv/kernel/vdso/gettimeofday.S18
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/riscv/kernel/vdso/vgettimeofday.c25
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S5
-rw-r--r--arch/riscv/mm/cacheflush.c1
-rw-r--r--arch/riscv/mm/fault.c14
-rw-r--r--arch/riscv/mm/init.c51
-rw-r--r--arch/riscv/mm/kasan_init.c2
-rw-r--r--arch/riscv/mm/pageattr.c6
-rw-r--r--arch/riscv/mm/ptdump.c2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/appldata/appldata_mem.c4
-rw-r--r--arch/s390/appldata/appldata_net_sum.c4
-rw-r--r--arch/s390/appldata/appldata_os.c4
-rw-r--r--arch/s390/boot/ipl_parm.c2
-rw-r--r--arch/s390/boot/kaslr.c2
-rw-r--r--arch/s390/include/asm/ccwdev.h5
-rw-r--r--arch/s390/include/asm/chsc.h62
-rw-r--r--arch/s390/include/asm/hugetlb.h2
-rw-r--r--arch/s390/include/asm/io.h2
-rw-r--r--arch/s390/include/asm/ipl.h11
-rw-r--r--arch/s390/include/asm/kasan.h2
-rw-r--r--arch/s390/include/asm/nmi.h2
-rw-r--r--arch/s390/include/asm/pci.h42
-rw-r--r--arch/s390/include/asm/pci_clp.h13
-rw-r--r--arch/s390/include/asm/pgtable.h19
-rw-r--r--arch/s390/include/asm/processor.h20
-rw-r--r--arch/s390/include/asm/qdio.h33
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/tlbflush.h1
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/include/uapi/asm/ipl.h25
-rw-r--r--arch/s390/kernel/Makefile5
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/dumpstack.c13
-rw-r--r--arch/s390/kernel/entry.S464
-rw-r--r--arch/s390/kernel/ftrace.c16
-rw-r--r--arch/s390/kernel/idle.c14
-rw-r--r--arch/s390/kernel/ipl.c209
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/module.c147
-rw-r--r--arch/s390/kernel/nmi.c23
-rw-r--r--arch/s390/kernel/ptrace.c3
-rw-r--r--arch/s390/kernel/setup.c16
-rw-r--r--arch/s390/kernel/smp.c8
-rw-r--r--arch/s390/kernel/uv.c4
-rw-r--r--arch/s390/kernel/vdso.c5
-rw-r--r--arch/s390/kvm/gaccess.c6
-rw-r--r--arch/s390/kvm/interrupt.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c33
-rw-r--r--arch/s390/kvm/priv.c36
-rw-r--r--arch/s390/kvm/vsie.c3
-rw-r--r--arch/s390/lib/delay.c4
-rw-r--r--arch/s390/mm/dump_pagetables.c1
-rw-r--r--arch/s390/mm/extmem.c2
-rw-r--r--arch/s390/mm/fault.c17
-rw-r--r--arch/s390/mm/gmap.c78
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/kasan_init.c2
-rw-r--r--arch/s390/mm/maccess.c9
-rw-r--r--arch/s390/mm/pageattr.c13
-rw-r--r--arch/s390/mm/pgalloc.c2
-rw-r--r--arch/s390/mm/pgtable.c1
-rw-r--r--arch/s390/mm/vmem.c1
-rw-r--r--arch/s390/pci/Makefile3
-rw-r--r--arch/s390/pci/pci.c225
-rw-r--r--arch/s390/pci/pci_bus.c328
-rw-r--r--arch/s390/pci/pci_bus.h31
-rw-r--r--arch/s390/pci/pci_clp.c8
-rw-r--r--arch/s390/pci/pci_event.c39
-rw-r--r--arch/s390/pci/pci_mmio.c4
-rw-r--r--arch/s390/pci/pci_sysfs.c4
-rw-r--r--arch/sh/Kconfig62
-rw-r--r--arch/sh/Kconfig.cpu9
-rw-r--r--arch/sh/Kconfig.debug13
-rw-r--r--arch/sh/Makefile29
-rw-r--r--arch/sh/boards/Kconfig5
-rw-r--r--arch/sh/boot/compressed/Makefile12
-rw-r--r--arch/sh/boot/compressed/misc.c8
-rw-r--r--arch/sh/boot/compressed/vmlinux.scr2
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig3
-rw-r--r--arch/sh/configs/kfr2r09_defconfig2
-rw-r--r--arch/sh/configs/magicpanelr2_defconfig2
-rw-r--r--arch/sh/configs/polaris_defconfig1
-rw-r--r--arch/sh/configs/r7780mp_defconfig2
-rw-r--r--arch/sh/configs/r7785rp_defconfig2
-rw-r--r--arch/sh/configs/rsk7201_defconfig2
-rw-r--r--arch/sh/configs/rsk7203_defconfig2
-rw-r--r--arch/sh/configs/rsk7264_defconfig2
-rw-r--r--arch/sh/configs/rsk7269_defconfig2
-rw-r--r--arch/sh/configs/sdk7786_defconfig3
-rw-r--r--arch/sh/configs/se7206_defconfig2
-rw-r--r--arch/sh/configs/se7343_defconfig1
-rw-r--r--arch/sh/configs/se7619_defconfig2
-rw-r--r--arch/sh/configs/se7705_defconfig2
-rw-r--r--arch/sh/configs/se7712_defconfig2
-rw-r--r--arch/sh/configs/se7721_defconfig2
-rw-r--r--arch/sh/configs/se7722_defconfig2
-rw-r--r--arch/sh/configs/se7780_defconfig1
-rw-r--r--arch/sh/configs/sh7710voipgw_defconfig1
-rw-r--r--arch/sh/configs/sh7757lcr_defconfig2
-rw-r--r--arch/sh/configs/shmin_defconfig2
-rw-r--r--arch/sh/configs/ul2_defconfig2
-rw-r--r--arch/sh/drivers/pci/Makefile1
-rw-r--r--arch/sh/drivers/pci/ops-sh5.c65
-rw-r--r--arch/sh/drivers/pci/pci-sh5.c217
-rw-r--r--arch/sh/drivers/pci/pci-sh5.h108
-rw-r--r--arch/sh/include/asm/barrier.h4
-rw-r--r--arch/sh/include/asm/bitops.h26
-rw-r--r--arch/sh/include/asm/bl_bit.h11
-rw-r--r--arch/sh/include/asm/bl_bit_64.h37
-rw-r--r--arch/sh/include/asm/bugs.h4
-rw-r--r--arch/sh/include/asm/cache_insns.h12
-rw-r--r--arch/sh/include/asm/cache_insns_64.h20
-rw-r--r--arch/sh/include/asm/cacheflush.h1
-rw-r--r--arch/sh/include/asm/checksum.h6
-rw-r--r--arch/sh/include/asm/elf.h23
-rw-r--r--arch/sh/include/asm/extable.h4
-rw-r--r--arch/sh/include/asm/fixmap.h4
-rw-r--r--arch/sh/include/asm/io.h8
-rw-r--r--arch/sh/include/asm/io_noioport.h34
-rw-r--r--arch/sh/include/asm/irq.h3
-rw-r--r--arch/sh/include/asm/kdebug.h6
-rw-r--r--arch/sh/include/asm/mmu_context.h12
-rw-r--r--arch/sh/include/asm/mmu_context_64.h75
-rw-r--r--arch/sh/include/asm/page.h21
-rw-r--r--arch/sh/include/asm/pgtable-2level.h1
-rw-r--r--arch/sh/include/asm/pgtable-3level.h8
-rw-r--r--arch/sh/include/asm/pgtable.h19
-rw-r--r--arch/sh/include/asm/pgtable_32.h26
-rw-r--r--arch/sh/include/asm/pgtable_64.h307
-rw-r--r--arch/sh/include/asm/posix_types.h6
-rw-r--r--arch/sh/include/asm/processor.h14
-rw-r--r--arch/sh/include/asm/processor_32.h2
-rw-r--r--arch/sh/include/asm/processor_64.h212
-rw-r--r--arch/sh/include/asm/ptrace_64.h14
-rw-r--r--arch/sh/include/asm/string.h6
-rw-r--r--arch/sh/include/asm/string_64.h21
-rw-r--r--arch/sh/include/asm/switch_to.h11
-rw-r--r--arch/sh/include/asm/switch_to_64.h32
-rw-r--r--arch/sh/include/asm/syscall.h6
-rw-r--r--arch/sh/include/asm/syscall_64.h75
-rw-r--r--arch/sh/include/asm/syscalls.h9
-rw-r--r--arch/sh/include/asm/syscalls_64.h18
-rw-r--r--arch/sh/include/asm/thread_info.h4
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/sh/include/asm/tlb_64.h68
-rw-r--r--arch/sh/include/asm/traps.h4
-rw-r--r--arch/sh/include/asm/traps_64.h35
-rw-r--r--arch/sh/include/asm/types.h5
-rw-r--r--arch/sh/include/asm/uaccess.h4
-rw-r--r--arch/sh/include/asm/uaccess_64.h85
-rw-r--r--arch/sh/include/asm/unistd.h6
-rw-r--r--arch/sh/include/asm/user.h7
-rw-r--r--arch/sh/include/asm/vermagic.h4
-rw-r--r--arch/sh/include/asm/vmlinux.lds.h8
-rw-r--r--arch/sh/include/cpu-sh5/cpu/addrspace.h12
-rw-r--r--arch/sh/include/cpu-sh5/cpu/cache.h94
-rw-r--r--arch/sh/include/cpu-sh5/cpu/irq.h113
-rw-r--r--arch/sh/include/cpu-sh5/cpu/mmu_context.h22
-rw-r--r--arch/sh/include/cpu-sh5/cpu/registers.h103
-rw-r--r--arch/sh/include/cpu-sh5/cpu/rtc.h9
-rw-r--r--arch/sh/include/uapi/asm/posix_types.h8
-rw-r--r--arch/sh/include/uapi/asm/posix_types_64.h29
-rw-r--r--arch/sh/include/uapi/asm/ptrace.h5
-rw-r--r--arch/sh/include/uapi/asm/ptrace_64.h15
-rw-r--r--arch/sh/include/uapi/asm/sigcontext.h13
-rw-r--r--arch/sh/include/uapi/asm/stat.h61
-rw-r--r--arch/sh/include/uapi/asm/swab.h10
-rw-r--r--arch/sh/include/uapi/asm/unistd.h8
-rw-r--r--arch/sh/include/uapi/asm/unistd_64.h423
-rw-r--r--arch/sh/kernel/Makefile16
-rw-r--r--arch/sh/kernel/cpu/Makefile1
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile3
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c194
-rw-r--r--arch/sh/kernel/cpu/proc.c1
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c1
-rw-r--r--arch/sh/kernel/cpu/sh5/Makefile16
-rw-r--r--arch/sh/kernel/cpu/sh5/clock-sh5.c76
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2000
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c106
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c72
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c121
-rw-r--r--arch/sh/kernel/cpu/sh5/switchto.S195
-rw-r--r--arch/sh/kernel/cpu/sh5/unwind.c342
-rw-r--r--arch/sh/kernel/dumpstack.c36
-rw-r--r--arch/sh/kernel/head_64.S346
-rw-r--r--arch/sh/kernel/io_trapped.c7
-rw-r--r--arch/sh/kernel/irq_64.c48
-rw-r--r--arch/sh/kernel/machine_kexec.c1
-rw-r--r--arch/sh/kernel/module.c9
-rw-r--r--arch/sh/kernel/process.c2
-rw-r--r--arch/sh/kernel/process_32.c2
-rw-r--r--arch/sh/kernel/process_64.c461
-rw-r--r--arch/sh/kernel/ptrace_32.c1
-rw-r--r--arch/sh/kernel/ptrace_64.c576
-rw-r--r--arch/sh/kernel/reboot.c6
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c17
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c51
-rw-r--r--arch/sh/kernel/signal_32.c1
-rw-r--r--arch/sh/kernel/signal_64.c567
-rw-r--r--arch/sh/kernel/sys_sh.c6
-rw-r--r--arch/sh/kernel/syscalls_64.S419
-rw-r--r--arch/sh/kernel/traps.c4
-rw-r--r--arch/sh/kernel/traps_64.c814
-rw-r--r--arch/sh/kernel/vmlinux.lds.S18
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c4
-rw-r--r--arch/sh/lib/delay.c1
-rw-r--r--arch/sh/lib64/Makefile17
-rw-r--r--arch/sh/lib64/copy_page.S89
-rw-r--r--arch/sh/lib64/copy_user_memcpy.S218
-rw-r--r--arch/sh/lib64/memcpy.S202
-rw-r--r--arch/sh/lib64/memset.S92
-rw-r--r--arch/sh/lib64/panic.c15
-rw-r--r--arch/sh/lib64/sdivsi3.S136
-rw-r--r--arch/sh/lib64/strcpy.S98
-rw-r--r--arch/sh/lib64/strlen.S34
-rw-r--r--arch/sh/lib64/udelay.c49
-rw-r--r--arch/sh/lib64/udivdi3.S121
-rw-r--r--arch/sh/lib64/udivsi3.S60
-rw-r--r--arch/sh/mm/Kconfig16
-rw-r--r--arch/sh/mm/Makefile31
-rw-r--r--arch/sh/mm/cache-sh3.c1
-rw-r--r--arch/sh/mm/cache-sh4.c9
-rw-r--r--arch/sh/mm/cache-sh5.c621
-rw-r--r--arch/sh/mm/cache-sh7705.c1
-rw-r--r--arch/sh/mm/cache.c6
-rw-r--r--arch/sh/mm/extable_64.c84
-rw-r--r--arch/sh/mm/fault.c80
-rw-r--r--arch/sh/mm/hugetlbpage.c28
-rw-r--r--arch/sh/mm/init.c15
-rw-r--r--arch/sh/mm/kmap.c5
-rw-r--r--arch/sh/mm/nommu.c1
-rw-r--r--arch/sh/mm/pmb.c2
-rw-r--r--arch/sh/mm/tlb-sh5.c224
-rw-r--r--arch/sh/mm/tlbex_32.c6
-rw-r--r--arch/sh/mm/tlbex_64.c166
-rw-r--r--arch/sh/mm/tlbflush_64.c172
-rw-r--r--arch/sparc/include/asm/cacheflush_32.h2
-rw-r--r--arch/sparc/include/asm/cacheflush_64.h1
-rw-r--r--arch/sparc/include/asm/floppy_32.h2
-rw-r--r--arch/sparc/include/asm/highmem.h27
-rw-r--r--arch/sparc/include/asm/ide.h2
-rw-r--r--arch/sparc/include/asm/io-unit.h2
-rw-r--r--arch/sparc/include/asm/page_32.h12
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h13
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h2
-rw-r--r--arch/sparc/include/asm/pgtable_32.h74
-rw-r--r--arch/sparc/include/asm/pgtable_64.h32
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h36
-rw-r--r--arch/sparc/include/asm/viking.h5
-rw-r--r--arch/sparc/kernel/cpu.c2
-rw-r--r--arch/sparc/kernel/cpumap.c2
-rw-r--r--arch/sparc/kernel/ds.c8
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/head_32.S8
-rw-r--r--arch/sparc/kernel/head_64.S2
-rw-r--r--arch/sparc/kernel/ktlb.S2
-rw-r--r--arch/sparc/kernel/leon_smp.c1
-rw-r--r--arch/sparc/kernel/pci.c4
-rw-r--r--arch/sparc/kernel/process_32.c11
-rw-r--r--arch/sparc/kernel/process_64.c7
-rw-r--r--arch/sparc/kernel/ptrace_32.c234
-rw-r--r--arch/sparc/kernel/ptrace_64.c18
-rw-r--r--arch/sparc/kernel/setup_32.c1
-rw-r--r--arch/sparc/kernel/setup_64.c1
-rw-r--r--arch/sparc/kernel/signal32.c1
-rw-r--r--arch/sparc/kernel/signal_32.c1
-rw-r--r--arch/sparc/kernel/signal_64.c1
-rw-r--r--arch/sparc/kernel/smp_32.c1
-rw-r--r--arch/sparc/kernel/smp_64.c1
-rw-r--r--arch/sparc/kernel/sun4m_irq.c2
-rw-r--r--arch/sparc/kernel/sys_sparc32.c1
-rw-r--r--arch/sparc/kernel/trampoline_64.S2
-rw-r--r--arch/sparc/kernel/traps_32.c2
-rw-r--r--arch/sparc/kernel/traps_64.c10
-rw-r--r--arch/sparc/kernel/vio.c2
-rw-r--r--arch/sparc/lib/clear_page.S2
-rw-r--r--arch/sparc/lib/copy_page.S2
-rw-r--r--arch/sparc/mm/fault_32.c21
-rw-r--r--arch/sparc/mm/fault_64.c17
-rw-r--r--arch/sparc/mm/highmem.c37
-rw-r--r--arch/sparc/mm/hugetlbpage.c1
-rw-r--r--arch/sparc/mm/hypersparc.S3
-rw-r--r--arch/sparc/mm/init_32.c1
-rw-r--r--arch/sparc/mm/init_64.c17
-rw-r--r--arch/sparc/mm/io-unit.c12
-rw-r--r--arch/sparc/mm/iommu.c10
-rw-r--r--arch/sparc/mm/srmmu.c114
-rw-r--r--arch/sparc/mm/tlb.c1
-rw-r--r--arch/sparc/mm/tsb.c2
-rw-r--r--arch/sparc/mm/ultra.S2
-rw-r--r--arch/sparc/mm/viking.S5
-rw-r--r--arch/sparc/vdso/vma.c4
-rw-r--r--arch/um/Makefile2
-rw-r--r--arch/um/drivers/Makefile4
-rw-r--r--arch/um/drivers/mconsole_kern.c2
-rw-r--r--arch/um/drivers/vector_kern.h2
-rw-r--r--arch/um/drivers/vector_user.c59
-rw-r--r--arch/um/drivers/vhost_user.h2
-rw-r--r--arch/um/drivers/virtio_uml.c2
-rw-r--r--arch/um/include/asm/mmu_context.h5
-rw-r--r--arch/um/include/asm/pgtable-3level.h4
-rw-r--r--arch/um/include/asm/pgtable.h69
-rw-r--r--arch/um/include/asm/tlb.h2
-rw-r--r--arch/um/kernel/maccess.c10
-rw-r--r--arch/um/kernel/mem.c10
-rw-r--r--arch/um/kernel/process.c1
-rw-r--r--arch/um/kernel/skas/mmu.c3
-rw-r--r--arch/um/kernel/skas/uaccess.c1
-rw-r--r--arch/um/kernel/sysrq.c23
-rw-r--r--arch/um/kernel/tlb.c5
-rw-r--r--arch/um/kernel/trap.c15
-rw-r--r--arch/um/kernel/um_arch.c17
-rw-r--r--arch/um/os-Linux/file.c3
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/unicore32/include/asm/cacheflush.h11
-rw-r--r--arch/unicore32/include/asm/pgtable.h20
-rw-r--r--arch/unicore32/kernel/hibernate.c6
-rw-r--r--arch/unicore32/kernel/hibernate_asm.S2
-rw-r--r--arch/unicore32/kernel/module.c1
-rw-r--r--arch/unicore32/kernel/setup.h2
-rw-r--r--arch/unicore32/kernel/traps.c34
-rw-r--r--arch/unicore32/lib/Makefile4
-rw-r--r--arch/unicore32/lib/backtrace.S24
-rw-r--r--arch/unicore32/mm/alignment.c2
-rw-r--r--arch/unicore32/mm/fault.c9
-rw-r--r--arch/unicore32/mm/mm.h10
-rw-r--r--arch/unicore32/mm/proc-ucv2.S2
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/kaslr_64.c2
-rw-r--r--arch/x86/entry/vdso/Makefile6
-rw-r--r--arch/x86/entry/vdso/vma.c14
-rw-r--r--arch/x86/events/core.c15
-rw-r--r--arch/x86/ia32/ia32_aout.c4
-rw-r--r--arch/x86/include/asm/agp.h2
-rw-r--r--arch/x86/include/asm/asm-prototypes.h2
-rw-r--r--arch/x86/include/asm/atomic.h17
-rw-r--r--arch/x86/include/asm/atomic64_32.h9
-rw-r--r--arch/x86/include/asm/atomic64_64.h15
-rw-r--r--arch/x86/include/asm/bitops.h6
-rw-r--r--arch/x86/include/asm/cacheflush.h2
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/elf.h22
-rw-r--r--arch/x86/include/asm/fixmap.h1
-rw-r--r--arch/x86/include/asm/highmem.h9
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/iomap.h1
-rw-r--r--arch/x86/include/asm/kaslr.h2
-rw-r--r--arch/x86/include/asm/memtype.h3
-rw-r--r--arch/x86/include/asm/mmu.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h88
-rw-r--r--arch/x86/include/asm/msr-index.h4
-rw-r--r--arch/x86/include/asm/paravirt.h12
-rw-r--r--arch/x86/include/asm/pgtable-3level.h8
-rw-r--r--arch/x86/include/asm/pgtable.h90
-rw-r--r--arch/x86/include/asm/pgtable_32.h20
-rw-r--r--arch/x86/include/asm/pgtable_64.h10
-rw-r--r--arch/x86/include/asm/pgtable_types.h44
-rw-r--r--arch/x86/include/asm/setup.h12
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/include/asm/tlbflush.h441
-rw-r--r--arch/x86/include/asm/uaccess.h28
-rw-r--r--arch/x86/include/asm/uv/uv.h1
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h18
-rw-r--r--arch/x86/include/asm/xen/hypercall.h2
-rw-r--r--arch/x86/include/asm/xen/page.h1
-rw-r--r--arch/x86/kernel/Makefile5
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/alternative.c56
-rw-r--r--arch/x86/kernel/amd_gart_64.c3
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/cpu/Makefile3
-rw-r--r--arch/x86/kernel/cpu/bugs.c200
-rw-r--r--arch/x86/kernel/cpu/common.c83
-rw-r--r--arch/x86/kernel/cpu/cpu.h1
-rw-r--r--arch/x86/kernel/cpu/intel.c5
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c4
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c6
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c6
-rw-r--r--arch/x86/kernel/crash_core_32.c2
-rw-r--r--arch/x86/kernel/crash_core_64.c2
-rw-r--r--arch/x86/kernel/doublefault_32.c1
-rw-r--r--arch/x86/kernel/dumpstack.c9
-rw-r--r--arch/x86/kernel/e820.c10
-rw-r--r--arch/x86/kernel/early_printk.c2
-rw-r--r--arch/x86/kernel/espfix_64.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/i8259.c2
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c2
-rw-r--r--arch/x86/kernel/kprobes/opt.c2
-rw-r--r--arch/x86/kernel/ldt.c2
-rw-r--r--arch/x86/kernel/livepatch.c53
-rw-r--r--arch/x86/kernel/machine_kexec_32.c1
-rw-r--r--arch/x86/kernel/machine_kexec_64.c1
-rw-r--r--arch/x86/kernel/module.c44
-rw-r--r--arch/x86/kernel/paravirt.c23
-rw-r--r--arch/x86/kernel/process.c39
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/ptrace.c1
-rw-r--r--arch/x86/kernel/reboot.c10
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/sys_ia32.c40
-rw-r--r--arch/x86/kernel/tboot.c3
-rw-r--r--arch/x86/kernel/time.c4
-rw-r--r--arch/x86/kernel/vm86_32.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h8
-rw-r--r--arch/x86/lib/Makefile9
-rw-r--r--arch/x86/mm/Makefile4
-rw-r--r--arch/x86/mm/cpu_entry_area.c2
-rw-r--r--arch/x86/mm/debug_pagetables.c2
-rw-r--r--arch/x86/mm/dump_pagetables.c1
-rw-r--r--arch/x86/mm/fault.c22
-rw-r--r--arch/x86/mm/highmem_32.c50
-rw-r--r--arch/x86/mm/init.c66
-rw-r--r--arch/x86/mm/init_32.c27
-rw-r--r--arch/x86/mm/init_64.c5
-rw-r--r--arch/x86/mm/ioremap.c12
-rw-r--r--arch/x86/mm/kasan_init_64.c1
-rw-r--r--arch/x86/mm/kaslr.c35
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/maccess.c28
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S2
-rw-r--r--arch/x86/mm/mmio-mod.c2
-rw-r--r--arch/x86/mm/pat/cpa-test.c1
-rw-r--r--arch/x86/mm/pat/memtype.c1
-rw-r--r--arch/x86/mm/pat/memtype_interval.c2
-rw-r--r--arch/x86/mm/pat/set_memory.c7
-rw-r--r--arch/x86/mm/pgtable.c17
-rw-r--r--arch/x86/mm/pgtable_32.c3
-rw-r--r--arch/x86/mm/pti.c1
-rw-r--r--arch/x86/mm/setup_nx.c2
-rw-r--r--arch/x86/mm/tlb.c384
-rw-r--r--arch/x86/pci/fixup.c4
-rw-r--r--arch/x86/pci/xen.c16
-rw-r--r--arch/x86/platform/efi/efi_32.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c1
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c4
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c4
-rw-r--r--arch/x86/platform/olpc/olpc_ofw.c2
-rw-r--r--arch/x86/platform/uv/tlb_uv.c4
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--arch/x86/power/hibernate.c2
-rw-r--r--arch/x86/power/hibernate_32.c2
-rw-r--r--arch/x86/power/hibernate_64.c2
-rw-r--r--arch/x86/purgatory/.gitignore1
-rw-r--r--arch/x86/purgatory/Makefile21
-rw-r--r--arch/x86/realmode/Makefile3
-rw-r--r--arch/x86/realmode/init.c2
-rw-r--r--arch/x86/realmode/rm/Makefile3
-rw-r--r--arch/x86/um/vdso/vma.c4
-rw-r--r--arch/x86/xen/enlighten_pv.c1
-rw-r--r--arch/x86/xen/grant-table.c1
-rw-r--r--arch/x86/xen/mmu_pv.c2
-rw-r--r--arch/x86/xen/smp_pv.c2
-rw-r--r--arch/xtensa/include/asm/cacheflush.h2
-rw-r--r--arch/xtensa/include/asm/fixmap.h10
-rw-r--r--arch/xtensa/include/asm/highmem.h29
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h2
-rw-r--r--arch/xtensa/include/asm/mmu_context.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h20
-rw-r--r--arch/xtensa/kernel/entry.S2
-rw-r--r--arch/xtensa/kernel/process.c1
-rw-r--r--arch/xtensa/kernel/ptrace.c1
-rw-r--r--arch/xtensa/kernel/setup.c1
-rw-r--r--arch/xtensa/kernel/traps.c24
-rw-r--r--arch/xtensa/kernel/vectors.S2
-rw-r--r--arch/xtensa/mm/cache.c2
-rw-r--r--arch/xtensa/mm/fault.c12
-rw-r--r--arch/xtensa/mm/highmem.c24
-rw-r--r--arch/xtensa/mm/ioremap.c2
-rw-r--r--arch/xtensa/mm/kasan_init.c10
-rw-r--r--arch/xtensa/mm/misc.S2
-rw-r--r--arch/xtensa/mm/mmu.c5
-rw-r--r--block/bio-integrity.c1
-rw-r--r--block/bio.c3
-rw-r--r--block/blk-mq-tag.c39
-rw-r--r--block/blk-mq-tag.h8
-rw-r--r--block/blk-mq.c29
-rw-r--r--block/blk-mq.h1
-rw-r--r--block/blk.h2
-rw-r--r--crypto/Kconfig1
-rw-r--r--drivers/acpi/acpica/dbdisply.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c3
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/numa/srat.c1
-rw-r--r--drivers/acpi/pci_mcfg.c8
-rw-r--r--drivers/acpi/pci_root.c11
-rw-r--r--drivers/acpi/scan.c31
-rw-r--r--drivers/amba/bus.c14
-rw-r--r--drivers/android/binder_alloc.c14
-rw-r--r--drivers/android/binderfs.c4
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/core.c235
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c33
-rw-r--r--drivers/base/firmware_loader/fallback.c15
-rw-r--r--drivers/base/firmware_loader/fallback.h8
-rw-r--r--drivers/base/firmware_loader/fallback_platform.c2
-rw-r--r--drivers/base/firmware_loader/fallback_table.c2
-rw-r--r--drivers/base/firmware_loader/firmware.h3
-rw-r--r--drivers/base/firmware_loader/main.c14
-rw-r--r--drivers/base/platform.c46
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/property.c13
-rw-r--r--drivers/base/soc.c2
-rw-r--r--drivers/base/swnode.c27
-rw-r--r--drivers/base/test/Kconfig3
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/ps3disk.c1
-rw-r--r--drivers/block/rbd.c44
-rw-r--r--drivers/block/rbd_types.h2
-rw-r--r--drivers/block/rnbd/Kconfig28
-rw-r--r--drivers/block/rnbd/Makefile15
-rw-r--r--drivers/block/rnbd/README92
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c639
-rw-r--r--drivers/block/rnbd/rnbd-clt.c1729
-rw-r--r--drivers/block/rnbd/rnbd-clt.h156
-rw-r--r--drivers/block/rnbd/rnbd-common.c23
-rw-r--r--drivers/block/rnbd/rnbd-log.h41
-rw-r--r--drivers/block/rnbd/rnbd-proto.h303
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.c134
-rw-r--r--drivers/block/rnbd/rnbd-srv-dev.h92
-rw-r--r--drivers/block/rnbd/rnbd-srv-sysfs.c215
-rw-r--r--drivers/block/rnbd/rnbd-srv.c844
-rw-r--r--drivers/block/rnbd/rnbd-srv.h78
-rw-r--r--drivers/block/umem.c2
-rw-r--r--drivers/block/z2ram.c2
-rw-r--r--drivers/block/zram/zcomp.c7
-rw-r--r--drivers/bus/Kconfig41
-rw-r--r--drivers/bus/Makefile4
-rw-r--r--drivers/bus/arm-integrator-lm.c128
-rw-r--r--drivers/bus/bt1-apb.c421
-rw-r--r--drivers/bus/bt1-axi.c314
-rw-r--r--drivers/bus/mhi/core/boot.c75
-rw-r--r--drivers/bus/mhi/core/init.c8
-rw-r--r--drivers/bus/mhi/core/internal.h9
-rw-r--r--drivers/bus/mhi/core/main.c197
-rw-r--r--drivers/bus/mhi/core/pm.c229
-rw-r--r--drivers/bus/ti-sysc.c25
-rw-r--r--drivers/bus/vexpress-config.c354
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/frontend.c1
-rw-r--r--drivers/char/agp/generic.c1
-rw-r--r--drivers/char/bsr.c1
-rw-r--r--drivers/char/ipmi/bt-bmc.c21
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c9
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c24
-rw-r--r--drivers/char/mem.c101
-rw-r--r--drivers/char/mspec.c3
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/tlclk.c17
-rw-r--r--drivers/clk/Kconfig33
-rw-r--r--drivers/clk/Makefile8
-rw-r--r--drivers/clk/at91/at91rm9200.c12
-rw-r--r--drivers/clk/at91/at91sam9260.c13
-rw-r--r--drivers/clk/at91/at91sam9g45.c10
-rw-r--r--drivers/clk/at91/at91sam9n12.c12
-rw-r--r--drivers/clk/at91/at91sam9rl.c10
-rw-r--r--drivers/clk/at91/at91sam9x5.c10
-rw-r--r--drivers/clk/at91/pmc.c47
-rw-r--r--drivers/clk/at91/pmc.h8
-rw-r--r--drivers/clk/at91/sam9x60.c10
-rw-r--r--drivers/clk/at91/sama5d2.c13
-rw-r--r--drivers/clk/at91/sama5d3.c10
-rw-r--r--drivers/clk/at91/sama5d4.c10
-rw-r--r--drivers/clk/baikal-t1/Kconfig42
-rw-r--r--drivers/clk/baikal-t1/Makefile3
-rw-r--r--drivers/clk/baikal-t1/ccu-div.c602
-rw-r--r--drivers/clk/baikal-t1/ccu-div.h110
-rw-r--r--drivers/clk/baikal-t1/ccu-pll.c558
-rw-r--r--drivers/clk/baikal-t1/ccu-pll.h64
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-div.c485
-rw-r--r--drivers/clk/baikal-t1/clk-ccu-pll.c204
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c80
-rw-r--r--drivers/clk/clk-ast2600.c31
-rw-r--r--drivers/clk/clk-hsdk-pll.c70
-rw-r--r--drivers/clk/clk-si5341.c69
-rw-r--r--drivers/clk/clk-versaclock5.c11
-rw-r--r--drivers/clk/clk.c4
-rw-r--r--drivers/clk/imx/Kconfig8
-rw-r--r--drivers/clk/imx/clk-composite-8m.c56
-rw-r--r--drivers/clk/imx/clk-gate2.c31
-rw-r--r--drivers/clk/imx/clk-imx6ul.c2
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c6
-rw-r--r--drivers/clk/imx/clk-imx8mm.c27
-rw-r--r--drivers/clk/imx/clk-imx8mn.c25
-rw-r--r--drivers/clk/imx/clk-imx8mp.c148
-rw-r--r--drivers/clk/imx/clk-imx8mq.c29
-rw-r--r--drivers/clk/imx/clk-pll14xx.c8
-rw-r--r--drivers/clk/imx/clk-pllv3.c16
-rw-r--r--drivers/clk/imx/clk-sscg-pll.c10
-rw-r--r--drivers/clk/imx/clk.h62
-rw-r--r--drivers/clk/ingenic/Kconfig10
-rw-r--r--drivers/clk/ingenic/Makefile1
-rw-r--r--drivers/clk/ingenic/cgu.c28
-rw-r--r--drivers/clk/ingenic/cgu.h4
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c4
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c4
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c8
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c3
-rw-r--r--drivers/clk/ingenic/tcu.c2
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c123
-rw-r--r--drivers/clk/ingenic/x1830-cgu.c448
-rw-r--r--drivers/clk/mediatek/Kconfig93
-rw-r--r--drivers/clk/mediatek/Makefile8
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt6765-audio.c100
-rw-r--r--drivers/clk/mediatek/clk-mt6765-cam.c74
-rw-r--r--drivers/clk/mediatek/clk-mt6765-img.c70
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mipi0a.c68
-rw-r--r--drivers/clk/mediatek/clk-mt6765-mm.c96
-rw-r--r--drivers/clk/mediatek/clk-mt6765-vcodec.c70
-rw-r--r--drivers/clk/mediatek/clk-mt6765.c922
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt6797-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mt8173-mm.c146
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c104
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mm.c9
-rw-r--r--drivers/clk/mediatek/clk-mux.c2
-rw-r--r--drivers/clk/meson/g12a.c30
-rw-r--r--drivers/clk/meson/gxbb.c40
-rw-r--r--drivers/clk/meson/meson8b.c120
-rw-r--r--drivers/clk/meson/meson8b.h5
-rw-r--r--drivers/clk/mmp/Makefile3
-rw-r--r--drivers/clk/mmp/clk-audio.c443
-rw-r--r--drivers/clk/mmp/clk-frac.c27
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c104
-rw-r--r--drivers/clk/mmp/clk.h11
-rw-r--r--drivers/clk/mmp/pwr-island.c115
-rw-r--r--drivers/clk/qcom/Kconfig8
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c8
-rw-r--r--drivers/clk/qcom/gcc-msm8939.c3988
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c27
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c94
-rw-r--r--drivers/clk/qcom/gcc-sm8150.c148
-rw-r--r--drivers/clk/qcom/gdsc.c23
-rw-r--r--drivers/clk/qcom/gdsc.h4
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c2
-rw-r--r--drivers/clk/renesas/Kconfig8
-rw-r--r--drivers/clk/renesas/Makefile1
-rw-r--r--drivers/clk/renesas/r8a7742-cpg-mssr.c275
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c6
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c14
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h1
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c18
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c3
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c2
-rw-r--r--drivers/clk/socfpga/Makefile2
-rw-r--r--drivers/clk/socfpga/clk-agilex.c454
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c5
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c10
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c4
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c78
-rw-r--r--drivers/clk/socfpga/clk-pll.c4
-rw-r--r--drivers/clk/socfpga/clk-s10.c160
-rw-r--r--drivers/clk/socfpga/stratix10-clk.h10
-rw-r--r--drivers/clk/sprd/gate.c7
-rw-r--r--drivers/clk/sprd/gate.h9
-rw-r--r--drivers/clk/sprd/pll.c2
-rw-r--r--drivers/clk/sprd/sc9863a-clk.c64
-rw-r--r--drivers/clk/st/clk-flexgen.c1
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c2
-rw-r--r--drivers/clk/tegra/Kconfig4
-rw-r--r--drivers/clk/tegra/Makefile4
-rw-r--r--drivers/clk/tegra/clk-pll.c12
-rw-r--r--drivers/clk/tegra/clk-tegra-super-cclk.c212
-rw-r--r--drivers/clk/tegra/clk-tegra124-emc.c (renamed from drivers/clk/tegra/clk-emc.c)0
-rw-r--r--drivers/clk/tegra/clk-tegra20.c7
-rw-r--r--drivers/clk/tegra/clk-tegra210-emc.c369
-rw-r--r--drivers/clk/tegra/clk-tegra210.c94
-rw-r--r--drivers/clk/tegra/clk-tegra30.c6
-rw-r--r--drivers/clk/tegra/clk.h24
-rw-r--r--drivers/clk/ti/clk-44xx.c14
-rw-r--r--drivers/clk/ti/clk-54xx.c14
-rw-r--r--drivers/clk/ti/clk-7xx.c15
-rw-r--r--drivers/clk/ti/clk-816x.c1
-rw-r--r--drivers/clk/ti/composite.c1
-rw-r--r--drivers/clk/versatile/Kconfig21
-rw-r--r--drivers/clk/versatile/clk-impd1.c121
-rw-r--r--drivers/clk/versatile/clk-versatile.c2
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c20
-rw-r--r--drivers/clk/x86/Kconfig8
-rw-r--r--drivers/clk/x86/Makefile1
-rw-r--r--drivers/clk/x86/clk-cgu-pll.c156
-rw-r--r--drivers/clk/x86/clk-cgu.c636
-rw-r--r--drivers/clk/x86/clk-cgu.h335
-rw-r--r--drivers/clk/x86/clk-lgm.c475
-rw-r--r--drivers/clk/zynqmp/clk-gate-zynqmp.c9
-rw-r--r--drivers/clk/zynqmp/clk-mux-zynqmp.c6
-rw-r--r--drivers/clk/zynqmp/clk-zynqmp.h1
-rw-r--r--drivers/clk/zynqmp/clkc.c41
-rw-r--r--drivers/clk/zynqmp/divider.c39
-rw-r--r--drivers/clk/zynqmp/pll.c29
-rw-r--r--drivers/clocksource/timer-riscv.c43
-rw-r--r--drivers/cpufreq/Kconfig.arm6
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c14
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c39
-rw-r--r--drivers/cpufreq/cpufreq-dt.c4
-rw-r--r--drivers/cpufreq/cpufreq.c57
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c3
-rw-r--r--drivers/cpufreq/tegra20-cpufreq.c217
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c39
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h4
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c46
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c2
-rw-r--r--drivers/crypto/nx/Makefile2
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c1062
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c1136
-rw-r--r--drivers/crypto/omap-aes-gcm.c1
-rw-r--r--drivers/crypto/omap-aes.c8
-rw-r--r--drivers/crypto/omap-crypto.c10
-rw-r--r--drivers/crypto/omap-sham.c101
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c21
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c22
-rw-r--r--drivers/dax/dax-private.h1
-rw-r--r--drivers/dax/kmem.c28
-rw-r--r--drivers/dca/dca-sysfs.c4
-rw-r--r--drivers/dma-buf/dma-resv.c5
-rw-r--r--drivers/dma/Kconfig4
-rw-r--r--drivers/dma/at_hdmac_regs.h2
-rw-r--r--drivers/dma/at_xdmac.c2
-rw-r--r--drivers/dma/dmaengine.c98
-rw-r--r--drivers/dma/dmatest.c24
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c65
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h4
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c10
-rw-r--r--drivers/dma/idxd/sysfs.c11
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/ioat/dma.c85
-rw-r--r--drivers/dma/ioat/dma.h10
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/mmp_tdma.c26
-rw-r--r--drivers/dma/moxart-dma.c2
-rw-r--r--drivers/dma/qcom/bam_dma.c2
-rw-r--r--drivers/dma/qcom/hidma.c3
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c25
-rw-r--r--drivers/dma/stm32-dma.c41
-rw-r--r--drivers/dma/ti/Kconfig4
-rw-r--r--drivers/dma/ti/k3-udma.c34
-rw-r--r--drivers/extcon/extcon-adc-jack.c3
-rw-r--r--drivers/extcon/extcon-arizona.c17
-rw-r--r--drivers/extcon/extcon-max14577.c10
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firmware/Kconfig3
-rw-r--r--drivers/firmware/arm_scmi/Makefile4
-rw-r--r--drivers/firmware/arm_scmi/base.c7
-rw-r--r--drivers/firmware/arm_scmi/common.h11
-rw-r--r--drivers/firmware/arm_scmi/driver.c133
-rw-r--r--drivers/firmware/arm_scmi/mailbox.c17
-rw-r--r--drivers/firmware/arm_scmi/perf.c5
-rw-r--r--drivers/firmware/arm_scmi/power.c6
-rw-r--r--drivers/firmware/arm_scmi/sensors.c4
-rw-r--r--drivers/firmware/arm_scmi/shmem.c15
-rw-r--r--drivers/firmware/arm_scmi/smc.c153
-rw-r--r--drivers/firmware/dmi-id.c6
-rw-r--r--drivers/firmware/dmi_scan.c30
-rw-r--r--drivers/firmware/efi/arm-runtime.c2
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/imx/imx-scu.c64
-rw-r--r--drivers/firmware/qcom_scm-legacy.c2
-rw-r--r--drivers/firmware/qcom_scm.c11
-rw-r--r--drivers/firmware/raspberrypi.c61
-rw-r--r--drivers/firmware/stratix10-rsu.c10
-rw-r--r--drivers/firmware/stratix10-svc.c62
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c4
-rw-r--r--drivers/firmware/tegra/bpmp.c9
-rw-r--r--drivers/firmware/trusted_foundations.c21
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c5
-rw-r--r--drivers/firmware/xilinx/zynqmp.c607
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/Makefile1
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c4
-rw-r--r--drivers/fpga/dfl-afu-main.c35
-rw-r--r--drivers/fpga/dfl-fme-main.c23
-rw-r--r--drivers/fpga/dfl-fme-perf.c1020
-rw-r--r--drivers/fpga/dfl-fme.h2
-rw-r--r--drivers/fpga/dfl.c15
-rw-r--r--drivers/fpga/dfl.h39
-rw-r--r--drivers/fpga/ice40-spi.c10
-rw-r--r--drivers/fpga/machxo2-spi.c12
-rw-r--r--drivers/fpga/stratix10-soc.c28
-rw-r--r--drivers/fpga/zynqmp-fpga.c14
-rw-r--r--drivers/gnss/serial.h2
-rw-r--r--drivers/gnss/sirf.c8
-rw-r--r--drivers/gpio/Kconfig24
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/TODO4
-rw-r--r--drivers/gpio/gpio-aggregator.c568
-rw-r--r--drivers/gpio/gpio-dwapb.c248
-rw-r--r--drivers/gpio/gpio-f7188x.c33
-rw-r--r--drivers/gpio/gpio-ftgpio010.c2
-rw-r--r--drivers/gpio/gpio-ich.c2
-rw-r--r--drivers/gpio/gpio-max730x.c12
-rw-r--r--drivers/gpio/gpio-mb86s7x.c28
-rw-r--r--drivers/gpio/gpio-merrifield.c10
-rw-r--r--drivers/gpio/gpio-mlxbf2.c5
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c96
-rw-r--r--drivers/gpio/gpio-pch.c73
-rw-r--r--drivers/gpio/gpio-pl061.c9
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-regmap.c349
-rw-r--r--drivers/gpio/gpio-tegra186.c1
-rw-r--r--drivers/gpio/gpio-xgene-sb.c14
-rw-r--r--drivers/gpio/gpiolib-acpi.c6
-rw-r--r--drivers/gpio/gpiolib-devprop.c5
-rw-r--r--drivers/gpio/gpiolib-of.c31
-rw-r--r--drivers/gpio/gpiolib.c165
-rw-r--r--drivers/gpio/gpiolib.h27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c171
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c30
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c6
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1
-rw-r--r--drivers/gpu/drm/drm_connector.c5
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c4
-rw-r--r--drivers/gpu/drm/drm_sysfs.c3
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c56
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c14
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c1
-rw-r--r--drivers/gpu/drm/i915/i915_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c5
-rw-r--r--drivers/gpu/drm/i915/i915_query.c62
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/i915_request.c359
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c16
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c19
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c259
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.h7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c45
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c4
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c16
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c83
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h14
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c418
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h37
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h48
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c70
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c123
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h50
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c35
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c95
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c129
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h100
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c58
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c80
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c21
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c6
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h15
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c31
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c232
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c42
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c49
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gpummu.c10
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c22
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h5
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c32
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c56
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c17
-rw-r--r--drivers/greybus/Kconfig6
-rw-r--r--drivers/greybus/arpc.h2
-rw-r--r--drivers/hid/Kconfig11
-rw-r--r--drivers/hid/hid-alps.c3
-rw-r--r--drivers/hid/hid-apple.c30
-rw-r--r--drivers/hid/hid-asus.c122
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-mcp2221.c169
-rw-r--r--drivers/hid/hid-multitouch.c66
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-sony.c17
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c8
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c2
-rw-r--r--drivers/hwtracing/coresight/Makefile3
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-platform.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c16
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.c232
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h8
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c91
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h21
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c204
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c16
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c2
-rw-r--r--drivers/hwtracing/coresight/coresight.c82
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/iio/accel/Kconfig10
-rw-r--r--drivers/iio/accel/bma180.c208
-rw-r--r--drivers/iio/accel/dmard06.c3
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c18
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c11
-rw-r--r--drivers/iio/accel/mxc4005.c4
-rw-r--r--drivers/iio/accel/st_accel.h2
-rw-r--r--drivers/iio/accel/st_accel_buffer.c3
-rw-r--r--drivers/iio/accel/st_accel_core.c83
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/adc/Kconfig55
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad7476.c59
-rw-r--r--drivers/iio/adc/ad7780.c27
-rw-r--r--drivers/iio/adc/ad7791.c64
-rw-r--r--drivers/iio/adc/ad7793.c144
-rw-r--r--drivers/iio/adc/ad9467.c422
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c8
-rw-r--r--drivers/iio/adc/adi-axi-adc.c482
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c233
-rw-r--r--drivers/iio/adc/at91_adc.c5
-rw-r--r--drivers/iio/adc/exynos_adc.c17
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c4
-rw-r--r--drivers/iio/adc/intel_mrfld_adc.c6
-rw-r--r--drivers/iio/adc/max1241.c227
-rw-r--r--drivers/iio/adc/max1363.c32
-rw-r--r--drivers/iio/adc/mcp3422.c5
-rw-r--r--drivers/iio/adc/mp2629_adc.c208
-rw-r--r--drivers/iio/adc/stm32-adc-core.c34
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c4
-rw-r--r--drivers/iio/adc/ti-ads124s08.c7
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c4
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c2
-rw-r--r--drivers/iio/adc/xilinx-xadc.h2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c41
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c31
-rw-r--r--drivers/iio/buffer/industrialio-triggered-buffer.c11
-rw-r--r--drivers/iio/buffer/kfifo_buf.c22
-rw-r--r--drivers/iio/chemical/Kconfig11
-rw-r--r--drivers/iio/chemical/Makefile1
-rw-r--r--drivers/iio/chemical/atlas-ezo-sensor.c177
-rw-r--r--drivers/iio/chemical/atlas-sensor.c36
-rw-r--r--drivers/iio/chemical/bme680_core.c36
-rw-r--r--drivers/iio/chemical/ccs811.c112
-rw-r--r--drivers/iio/chemical/pms7003.c17
-rw-r--r--drivers/iio/chemical/sps30.c9
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c18
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.h3
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c13
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c4
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_spi.c6
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c13
-rw-r--r--drivers/iio/dac/Kconfig6
-rw-r--r--drivers/iio/dac/ad5360.c17
-rw-r--r--drivers/iio/dac/ad5380.c8
-rw-r--r--drivers/iio/dac/ad5421.c21
-rw-r--r--drivers/iio/dac/ad5446.c18
-rw-r--r--drivers/iio/dac/ad5449.c12
-rw-r--r--drivers/iio/dac/ad5592r-base.c30
-rw-r--r--drivers/iio/dac/ad5592r-base.h1
-rw-r--r--drivers/iio/dac/ad5592r.c4
-rw-r--r--drivers/iio/dac/ad5593r.c2
-rw-r--r--drivers/iio/dac/ad5624r_spi.c8
-rw-r--r--drivers/iio/dac/ad5686.c10
-rw-r--r--drivers/iio/dac/ad5686.h2
-rw-r--r--drivers/iio/dac/ad5755.c22
-rw-r--r--drivers/iio/dac/ad5761.c12
-rw-r--r--drivers/iio/dac/ad5764.c12
-rw-r--r--drivers/iio/dac/ltc2632.c67
-rw-r--r--drivers/iio/dac/vf610_dac.c11
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/adis16130.c4
-rw-r--r--drivers/iio/gyro/adis16136.c10
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c6
-rw-r--r--drivers/iio/gyro/bmg160_spi.c5
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c18
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c4
-rw-r--r--drivers/iio/gyro/st_gyro_buffer.c3
-rw-r--r--drivers/iio/gyro/st_gyro_core.c9
-rw-r--r--drivers/iio/health/afe4403.c14
-rw-r--r--drivers/iio/health/max30100.c7
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c12
-rw-r--r--drivers/iio/humidity/hts221_buffer.c6
-rw-r--r--drivers/iio/humidity/hts221_i2c.c6
-rw-r--r--drivers/iio/humidity/hts221_spi.c6
-rw-r--r--drivers/iio/imu/Kconfig13
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis.c27
-rw-r--r--drivers/iio/imu/adis16400.c21
-rw-r--r--drivers/iio/imu/adis16460.c27
-rw-r--r--drivers/iio/imu/adis16475.c1338
-rw-r--r--drivers/iio/imu/adis16480.c16
-rw-r--r--drivers/iio/imu/adis_buffer.c58
-rw-r--r--drivers/iio/imu/adis_trigger.c72
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c4
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c23
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c4
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c21
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c96
-rw-r--r--drivers/iio/industrialio-buffer.c93
-rw-r--r--drivers/iio/industrialio-core.c126
-rw-r--r--drivers/iio/industrialio-trigger.c53
-rw-r--r--drivers/iio/inkern.c27
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/light/bh1780.c6
-rw-r--r--drivers/iio/light/cm32181.c271
-rw-r--r--drivers/iio/light/cm3232.c3
-rw-r--r--drivers/iio/light/gp2ap002.c19
-rw-r--r--drivers/iio/light/gp2ap020a00f.c6
-rw-r--r--drivers/iio/light/hid-sensor-als.c18
-rw-r--r--drivers/iio/light/hid-sensor-prox.c18
-rw-r--r--drivers/iio/light/isl29125.c28
-rw-r--r--drivers/iio/light/ltr501.c41
-rw-r--r--drivers/iio/light/opt3001.c3
-rw-r--r--drivers/iio/light/si1133.c18
-rw-r--r--drivers/iio/light/st_uvis25_i2c.c7
-rw-r--r--drivers/iio/light/st_uvis25_spi.c7
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/light/tsl2772.c6
-rw-r--r--drivers/iio/light/vcnl4000.c746
-rw-r--r--drivers/iio/light/vl6180.c3
-rw-r--r--drivers/iio/light/zopt2201.c4
-rw-r--r--drivers/iio/magnetometer/ak8974.c201
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c4
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c18
-rw-r--r--drivers/iio/magnetometer/mmc35240.c4
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c5
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c3
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c18
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c18
-rw-r--r--drivers/iio/pressure/bmp280-core.c100
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c18
-rw-r--r--drivers/iio/pressure/hp206c.c8
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c4
-rw-r--r--drivers/iio/pressure/ms5611_spi.c4
-rw-r--r--drivers/iio/pressure/st_pressure_core.c7
-rw-r--r--drivers/iio/pressure/zpa2326.c9
-rw-r--r--drivers/iio/proximity/Kconfig24
-rw-r--r--drivers/iio/proximity/Makefile2
-rw-r--r--drivers/iio/proximity/ping.c7
-rw-r--r--drivers/iio/proximity/sx9310.c1069
-rw-r--r--drivers/iio/proximity/vcnl3020.c258
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c12
-rw-r--r--drivers/iio/temperature/ltc2983.c4
-rw-r--r--drivers/iio/temperature/max31856.c5
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile9
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/cm.c306
-rw-r--r--drivers/infiniband/core/cma.c114
-rw-r--r--drivers/infiniband/core/cma_configfs.c13
-rw-r--r--drivers/infiniband/core/cma_priv.h1
-rw-r--r--drivers/infiniband/core/cma_trace.h20
-rw-r--r--drivers/infiniband/core/core_priv.h3
-rw-r--r--drivers/infiniband/core/cq.c173
-rw-r--r--drivers/infiniband/core/device.c22
-rw-r--r--drivers/infiniband/core/fmr_pool.c494
-rw-r--r--drivers/infiniband/core/lag.c138
-rw-r--r--drivers/infiniband/core/mad.c255
-rw-r--r--drivers/infiniband/core/multicast.c12
-rw-r--r--drivers/infiniband/core/rdma_core.c25
-rw-r--r--drivers/infiniband/core/rdma_core.h7
-rw-r--r--drivers/infiniband/core/rw.c2
-rw-r--r--drivers/infiniband/core/sa_query.c51
-rw-r--r--drivers/infiniband/core/sysfs.c10
-rw-r--r--drivers/infiniband/core/ucma.c65
-rw-r--r--drivers/infiniband/core/ud_header.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/core/user_mad.c22
-rw-r--r--drivers/infiniband/core/uverbs.h21
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c76
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c24
-rw-r--r--drivers/infiniband/core/uverbs_main.c46
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c95
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c17
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c12
-rw-r--r--drivers/infiniband/core/uverbs_std_types_qp.c401
-rw-r--r--drivers/infiniband/core/uverbs_std_types_srq.c234
-rw-r--r--drivers/infiniband/core/uverbs_std_types_wq.c194
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c3
-rw-r--r--drivers/infiniband/core/verbs.c159
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c76
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h18
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c357
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h42
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c88
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h91
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h53
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h106
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c1
-rw-r--r--drivers/infiniband/hw/efa/efa.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h63
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h3
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c18
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h11
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c52
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c19
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile4
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c12
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h3
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c303
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h5
-rw-r--r--drivers/infiniband/hw/hfi1/common.h13
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c231
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h38
-rw-r--r--drivers/infiniband/hw/hfi1/init.c13
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h171
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c309
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_rx.c95
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c828
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c2
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c36
-rw-r--r--drivers/infiniband/hw/hfi1/msix.h7
-rw-r--r--drivers/infiniband/hw/hfi1/netdev.h118
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c481
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c18
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c42
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ctxts.h11
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c14
-rw-r--r--drivers/infiniband/hw/hfi1/vnic.h5
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c325
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c148
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c351
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h246
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c114
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c360
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1675
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c71
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c1644
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c509
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c378
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h1
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c11
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c97
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile3
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c35
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c114
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h4
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c17
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c147
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c38
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c73
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h72
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qos.c13
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c3710
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h6
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c44
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c111
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c1504
-rw-r--r--drivers/infiniband/hw/mlx5/wr.h76
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c262
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c105
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h23
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c1
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/main.c1
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h1
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c11
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c155
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h15
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c24
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c9
-rw-r--r--drivers/infiniband/sw/siw/siw.h4
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c1
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c9
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c1
-rw-r--r--drivers/infiniband/ulp/Makefile1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c37
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h79
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c19
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c188
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c126
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c5
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c12
-rw-r--r--drivers/infiniband/ulp/rtrs/Kconfig27
-rw-r--r--drivers/infiniband/ulp/rtrs/Makefile15
-rw-r--r--drivers/infiniband/ulp/rtrs/README213
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c200
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c483
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c2992
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h252
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-log.h28
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h399
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c38
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c321
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2178
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h148
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c612
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h196
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c265
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h27
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c67
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h5
-rw-r--r--drivers/input/evdev.c7
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/atkbd.c97
-rw-r--r--drivers/input/keyboard/imx_sc_key.c33
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c2
-rw-r--r--drivers/input/misc/Kconfig32
-rw-r--r--drivers/input/misc/Makefile3
-rw-r--r--drivers/input/misc/gp2ap002a00f.c281
-rw-r--r--drivers/input/misc/iqs269a.c1833
-rw-r--r--drivers/input/misc/msm-vibrator.c281
-rw-r--r--drivers/input/misc/xen-kbdfront.c2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/serio/i8042-ppcio.h57
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h1
-rw-r--r--drivers/input/serio/i8042.c3
-rw-r--r--drivers/input/serio/i8042.h2
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c7
-rw-r--r--drivers/input/touchscreen/cy8ctma140.c353
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c5
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c198
-rw-r--r--drivers/input/touchscreen/elants_i2c.c247
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c4
-rw-r--r--drivers/input/touchscreen/mms114.c19
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c2
-rw-r--r--drivers/input/touchscreen/stmfts.c2
-rw-r--r--drivers/interconnect/Kconfig3
-rw-r--r--drivers/interconnect/Makefile1
-rw-r--r--drivers/interconnect/core.c161
-rw-r--r--drivers/interconnect/imx/Kconfig17
-rw-r--r--drivers/interconnect/imx/Makefile9
-rw-r--r--drivers/interconnect/imx/imx.c284
-rw-r--r--drivers/interconnect/imx/imx.h61
-rw-r--r--drivers/interconnect/imx/imx8mm.c105
-rw-r--r--drivers/interconnect/imx/imx8mn.c94
-rw-r--r--drivers/interconnect/imx/imx8mq.c103
-rw-r--r--drivers/interconnect/internal.h2
-rw-r--r--drivers/iommu/Kconfig9
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c369
-rw-r--r--drivers/iommu/amd_iommu.h96
-rw-r--r--drivers/iommu/amd_iommu_debugfs.c5
-rw-r--r--drivers/iommu/amd_iommu_init.c4
-rw-r--r--drivers/iommu/amd_iommu_proto.h96
-rw-r--r--drivers/iommu/amd_iommu_types.h9
-rw-r--r--drivers/iommu/amd_iommu_v2.c18
-rw-r--r--drivers/iommu/arm-smmu-impl.c8
-rw-r--r--drivers/iommu/arm-smmu-qcom.c37
-rw-r--r--drivers/iommu/arm-smmu-v3.c122
-rw-r--r--drivers/iommu/arm-smmu.c53
-rw-r--r--drivers/iommu/arm-smmu.h1
-rw-r--r--drivers/iommu/dma-iommu.c5
-rw-r--r--drivers/iommu/dmar.c99
-rw-r--r--drivers/iommu/exynos-iommu.c24
-rw-r--r--drivers/iommu/fsl_pamu_domain.c22
-rw-r--r--drivers/iommu/hyperv-iommu.c2
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c62
-rw-r--r--drivers/iommu/intel-iommu.c952
-rw-r--r--drivers/iommu/intel-pasid.c309
-rw-r--r--drivers/iommu/intel-pasid.h27
-rw-r--r--drivers/iommu/intel-svm.c452
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/iommu.c470
-rw-r--r--drivers/iommu/iova.c6
-rw-r--r--drivers/iommu/ipmmu-vmsa.c59
-rw-r--r--drivers/iommu/msm_iommu.c36
-rw-r--r--drivers/iommu/mtk_iommu.c24
-rw-r--r--drivers/iommu/mtk_iommu_v1.c68
-rw-r--r--drivers/iommu/omap-iommu.c103
-rw-r--r--drivers/iommu/qcom_iommu.c24
-rw-r--r--drivers/iommu/rockchip-iommu.c26
-rw-r--r--drivers/iommu/s390-iommu.c30
-rw-r--r--drivers/iommu/sun50i-iommu.c1023
-rw-r--r--drivers/iommu/tegra-gart.c24
-rw-r--r--drivers/iommu/tegra-smmu.c31
-rw-r--r--drivers/iommu/virtio-iommu.c41
-rw-r--r--drivers/irqchip/Kconfig13
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-riscv-intc.c138
-rw-r--r--drivers/irqchip/irq-sifive-plic.c46
-rw-r--r--drivers/leds/Kconfig29
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/leds-ariel.c133
-rw-r--r--drivers/leds/leds-aw2013.c436
-rw-r--r--drivers/leds/leds-lm355x.c1
-rw-r--r--drivers/leds/leds-lp3952.c2
-rw-r--r--drivers/leds/leds-lt3593.c1
-rw-r--r--drivers/leds/leds-netxbig.c148
-rw-r--r--drivers/leds/leds-pwm.c16
-rw-r--r--drivers/leds/leds-sgm3140.c320
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/leds-tlc591xx.c5
-rw-r--r--drivers/leds/trigger/ledtrig-timer.c4
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/macintosh/ams/ams-input.c37
-rw-r--r--drivers/macintosh/ams/ams.h4
-rw-r--r--drivers/macintosh/macio-adb.c2
-rw-r--r--drivers/macintosh/mediabay.c2
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/macintosh/windfarm_pm112.c21
-rw-r--r--drivers/mailbox/Kconfig18
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/imx-mailbox.c117
-rw-r--r--drivers/mailbox/pcc.c2
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c61
-rw-r--r--drivers/mailbox/qcom-ipcc.c286
-rw-r--r--drivers/mailbox/sprd-mailbox.c361
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c25
-rw-r--r--drivers/md/Kconfig20
-rw-r--r--drivers/md/Makefile3
-rw-r--r--drivers/md/dm-bufio.c109
-rw-r--r--drivers/md/dm-crypt.c80
-rw-r--r--drivers/md/dm-ebs-target.c471
-rw-r--r--drivers/md/dm-historical-service-time.c561
-rw-r--r--drivers/md/dm-integrity.c6
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-mpath.c123
-rw-r--r--drivers/md/dm-path-selector.h2
-rw-r--r--drivers/md/dm-queue-length.c2
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-service-time.c2
-rw-r--r--drivers/md/dm-stats.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-writecache.c42
-rw-r--r--drivers/md/dm-zoned-metadata.c1046
-rw-r--r--drivers/md/dm-zoned-reclaim.c210
-rw-r--r--drivers/md/dm-zoned-target.c463
-rw-r--r--drivers/md/dm-zoned.h113
-rw-r--r--drivers/md/dm.c11
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h4
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c6
-rw-r--r--drivers/media/pci/bt8xx/bt878.c2
-rw-r--r--drivers/media/pci/bt8xx/btcx-risc.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c1
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c1
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c8
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c2
-rw-r--r--drivers/memory/Kconfig11
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/bt1-l2-ctl.c322
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c8
-rw-r--r--drivers/message/fusion/mptbase.c8
-rw-r--r--drivers/mfd/Kconfig26
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/htc-i2cpld.c6
-rw-r--r--drivers/mfd/intel-lpss-pci.c2
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c1
-rw-r--r--drivers/mfd/max77620.c1
-rw-r--r--drivers/mfd/mp2629.c79
-rw-r--r--drivers/mfd/mt6358-irq.c235
-rw-r--r--drivers/mfd/mt6360-core.c424
-rw-r--r--drivers/mfd/mt6397-core.c101
-rw-r--r--drivers/mfd/mt6397-irq.c35
-rw-r--r--drivers/mfd/sm501.c24
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c1
-rw-r--r--drivers/mfd/stm32-timers.c32
-rw-r--r--drivers/mfd/stmfx.c22
-rw-r--r--drivers/mfd/stpmic1.c2
-rw-r--r--drivers/mfd/tqmx86.c2
-rw-r--r--drivers/mfd/vexpress-sysreg.c99
-rw-r--r--drivers/mfd/wcd934x.c1
-rw-r--r--drivers/mfd/wm8994-core.c8
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/rts5249.c29
-rw-r--r--drivers/misc/cardreader/rts5260.c26
-rw-r--r--drivers/misc/cardreader/rts5261.c47
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c43
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h1
-rw-r--r--drivers/misc/cxl/Kconfig8
-rw-r--r--drivers/misc/cxl/cxllib.c9
-rw-r--r--drivers/misc/cxl/fault.c4
-rw-r--r--drivers/misc/fastrpc.c13
-rw-r--r--drivers/misc/genwqe/card_utils.c44
-rw-r--r--drivers/misc/habanalabs/Makefile3
-rw-r--r--drivers/misc/habanalabs/command_buffer.c28
-rw-r--r--drivers/misc/habanalabs/command_submission.c385
-rw-r--r--drivers/misc/habanalabs/context.c8
-rw-r--r--drivers/misc/habanalabs/debugfs.c116
-rw-r--r--drivers/misc/habanalabs/device.c53
-rw-r--r--drivers/misc/habanalabs/firmware_if.c297
-rw-r--r--drivers/misc/habanalabs/gaudi/Makefile5
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c6748
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h261
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c884
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c121
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c9090
-rw-r--r--drivers/misc/habanalabs/goya/goya.c345
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h12
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c2
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c100
-rw-r--r--drivers/misc/habanalabs/habanalabs.h187
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c14
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c21
-rw-r--r--drivers/misc/habanalabs/hw_queue.c118
-rw-r--r--drivers/misc/habanalabs/hwmon.c75
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h43
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h174
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h348
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h800
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h156
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h860
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h4974
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h299
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h800
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h1456
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h72
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h502
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h1062
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h114
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h56
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h896
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h82
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h2578
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h800
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h1226
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h834
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi.h59
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h310
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h694
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h367
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h36
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h458
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_packets.h212
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h27
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h3
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h56
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_reg_map.h43
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h58
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h2
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h16
-rw-r--r--drivers/misc/habanalabs/memory.c37
-rw-r--r--drivers/misc/habanalabs/pci.c63
-rw-r--r--drivers/misc/habanalabs/sysfs.c17
-rw-r--r--drivers/misc/lkdtm/bugs.c2
-rw-r--r--drivers/misc/mic/Kconfig2
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c2
-rw-r--r--drivers/misc/mic/scif/scif_rma.c26
-rw-r--r--drivers/misc/ocxl/context.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c4
-rw-r--r--drivers/misc/sgi-gru/grufault.c25
-rw-r--r--drivers/misc/sgi-gru/grufile.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c10
-rw-r--r--drivers/misc/sgi-xp/xpnet.c8
-rw-r--r--drivers/misc/uacce/uacce.c172
-rw-r--r--drivers/misc/vexpress-syscfg.c280
-rw-r--r--drivers/misc/xilinx_sdfec.c61
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c38
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c13
-rw-r--r--drivers/mtd/devices/docg3.c10
-rw-r--r--drivers/mtd/maps/physmap-gemini.c5
-rw-r--r--drivers/mtd/mtdblock.c11
-rw-r--r--drivers/mtd/mtdcore.c191
-rw-r--r--drivers/mtd/mtdpart.c54
-rw-r--r--drivers/mtd/nand/raw/Kconfig12
-rw-r--r--drivers/mtd/nand/raw/Makefile2
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c5
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c1297
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c2
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c403
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/main.c6
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c164
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c17
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c16
-rw-r--r--drivers/mtd/nand/raw/cmx270_nand.c236
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c199
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c312
-rw-r--r--drivers/mtd/nand/raw/denali.c60
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c487
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c7
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c7
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c9
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c19
-rw-r--r--drivers/mtd/nand/raw/gpio.c6
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c189
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c6
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c170
-rw-r--r--drivers/mtd/nand/raw/internals.h12
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c7
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c6
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c68
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c5
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c5
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c19
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c6
-rw-r--r--drivers/mtd/nand/raw/mxic_nand.c10
-rw-r--r--drivers/mtd/nand/raw/nand_base.c445
-rw-r--r--drivers/mtd/nand/raw/nand_bch.c10
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c32
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c8
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c65
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c71
-rw-r--r--drivers/mtd/nand/raw/nand_timings.c11
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c14
-rw-r--r--drivers/mtd/nand/raw/nandsim.c438
-rw-r--r--drivers/mtd/nand/raw/ndfc.c8
-rw-r--r--drivers/mtd/nand/raw/omap2.c8
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c1
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c8
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c33
-rw-r--r--drivers/mtd/nand/raw/pasemi_nand.c9
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c8
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c13
-rw-r--r--drivers/mtd/nand/raw/r852.c6
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c3
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c6
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c14
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c8
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c1067
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c16
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c13
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c6
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c8
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c6
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c10
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c8
-rw-r--r--drivers/mtd/parsers/cmdlinepart.c35
-rw-r--r--drivers/mtd/parsers/ofpart.c3
-rw-r--r--drivers/mtd/spi-nor/Kconfig4
-rw-r--r--drivers/mtd/spi-nor/controllers/Kconfig4
-rw-r--r--drivers/mtd/spi-nor/controllers/aspeed-smc.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/hisi-sfc.c2
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c2
-rw-r--r--drivers/mtd/spi-nor/core.c22
-rw-r--r--drivers/mtd/spi-nor/macronix.c6
-rw-r--r--drivers/mtd/spi-nor/micron-st.c6
-rw-r--r--drivers/mtd/spi-nor/sfdp.c34
-rw-r--r--drivers/mtd/spi-nor/sfdp.h11
-rw-r--r--drivers/mtd/spi-nor/spansion.c44
-rw-r--r--drivers/mtd/spi-nor/winbond.c29
-rw-r--r--drivers/mtd/ubi/build.c5
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c39
-rw-r--r--drivers/mtd/ubi/fastmap.c11
-rw-r--r--drivers/mtd/ubi/ubi.h6
-rw-r--r--drivers/mtd/ubi/wl.c28
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/dsa/lantiq_gswip.c3
-rw-r--r--drivers/net/dsa/qca8k.c3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c10
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/hplance.c2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c2
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c2
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c2
-rw-r--r--drivers/net/ethernet/apple/mace.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c1
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c23
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c2
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c1
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/geneve.c7
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/dp83869.c5
-rw-r--r--drivers/net/phy/fixed_phy.c28
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/mscc/mscc.h2
-rw-r--r--drivers/net/phy/mscc/mscc_main.c8
-rw-r--r--drivers/ntb/core.c9
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c4
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c6
-rw-r--r--drivers/ntb/hw/intel/Makefile2
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c49
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.h1
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.c13
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.h8
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c552
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.h100
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.h12
-rw-r--r--drivers/ntb/test/ntb_perf.c49
-rw-r--r--drivers/ntb/test/ntb_pingpong.c14
-rw-r--r--drivers/ntb/test/ntb_tool.c9
-rw-r--r--drivers/nvdimm/pmem.c3
-rw-r--r--drivers/nvme/host/core.c4
-rw-r--r--drivers/nvme/host/fc.c5
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c6
-rw-r--r--drivers/nvme/host/tcp.c8
-rw-r--r--drivers/nvme/target/core.c27
-rw-r--r--drivers/nvme/target/rdma.c4
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/nvmem/core.c104
-rw-r--r--drivers/nvmem/imx-ocotp.c9
-rw-r--r--drivers/nvmem/jz4780-efuse.c4
-rw-r--r--drivers/nvmem/qfprom.c14
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c11
-rw-r--r--drivers/of/dynamic.c3
-rw-r--r--drivers/of/fdt.c8
-rw-r--r--drivers/of/kobj.c3
-rw-r--r--drivers/of/of_reserved_mem.c51
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/of/property.c20
-rw-r--r--drivers/opp/core.c119
-rw-r--r--drivers/opp/debugfs.c42
-rw-r--r--drivers/opp/of.c205
-rw-r--r--drivers/opp/opp.h10
-rw-r--r--drivers/oprofile/buffer_sync.c12
-rw-r--r--drivers/parport/daisy.c29
-rw-r--r--drivers/parport/ieee1284.c94
-rw-r--r--drivers/parport/ieee1284_ops.c70
-rw-r--r--drivers/parport/parport_amiga.c22
-rw-r--r--drivers/parport/parport_atari.c2
-rw-r--r--drivers/parport/parport_cs.c6
-rw-r--r--drivers/parport/parport_gsc.c25
-rw-r--r--drivers/parport/parport_gsc.h21
-rw-r--r--drivers/parport/parport_ip32.c117
-rw-r--r--drivers/parport/parport_mfc3.c21
-rw-r--r--drivers/parport/parport_pc.c263
-rw-r--r--drivers/parport/parport_sunbpp.c2
-rw-r--r--drivers/parport/probe.c34
-rw-r--r--drivers/parport/procfs.c6
-rw-r--r--drivers/parport/share.c292
-rw-r--r--drivers/pci/ats.c18
-rw-r--r--drivers/pci/controller/Kconfig22
-rw-r--r--drivers/pci/controller/Makefile3
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c2
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c10
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h6
-rw-r--r--drivers/pci/controller/dwc/Kconfig17
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c8
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c4
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c22
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h3
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c19
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c383
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c4
-rw-r--r--drivers/pci/controller/pci-aardvark.c266
-rw-r--r--drivers/pci/controller/pci-host-common.c18
-rw-r--r--drivers/pci/controller/pci-host-generic.c26
-rw-r--r--drivers/pci/controller/pci-hyperv.c82
-rw-r--r--drivers/pci/controller/pci-tegra.c7
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c14
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c16
-rw-r--r--drivers/pci/controller/pci-v3-semi.c6
-rw-r--r--drivers/pci/controller/pci-xgene.c4
-rw-r--r--drivers/pci/controller/pcie-altera.c2
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c37
-rw-r--r--drivers/pci/controller/pcie-mediatek.c3
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c563
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c1130
-rw-r--r--drivers/pci/controller/pcie-rcar.c1211
-rw-r--r--drivers/pci/controller/pcie-rcar.h140
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c2
-rw-r--r--drivers/pci/controller/pcie-tango.c13
-rw-r--r--drivers/pci/controller/vmd.c6
-rw-r--r--drivers/pci/ecam.c10
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c3
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c204
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c16
-rw-r--r--drivers/pci/hotplug/shpchp.h2
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c5
-rw-r--r--drivers/pci/iov.c39
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/p2pdma.c2
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci-bridge-emul.c61
-rw-r--r--drivers/pci/pci-label.c4
-rw-r--r--drivers/pci/pci.c64
-rw-r--r--drivers/pci/pcie/Kconfig1
-rw-r--r--drivers/pci/pcie/aer.c340
-rw-r--r--drivers/pci/pcie/aspm.c10
-rw-r--r--drivers/pci/pcie/dpc.c3
-rw-r--r--drivers/pci/pcie/edr.c4
-rw-r--r--drivers/pci/pcie/pme.c4
-rw-r--r--drivers/pci/pcie/portdrv.h13
-rw-r--r--drivers/pci/pcie/ptm.c22
-rw-r--r--drivers/pci/probe.c65
-rw-r--r--drivers/pci/quirks.c50
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/setup-bus.c115
-rw-r--r--drivers/pci/setup-res.c9
-rw-r--r--drivers/pci/switch/switchtec.c2
-rw-r--r--drivers/pci/xen-pcifront.c27
-rw-r--r--drivers/pcmcia/cs_internal.h6
-rw-r--r--drivers/pcmcia/pcmcia_cis.c6
-rw-r--r--drivers/pcmcia/yenta_socket.c40
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c2
-rw-r--r--drivers/phy/amlogic/Kconfig15
-rw-r--r--drivers/phy/amlogic/Makefile1
-rw-r--r--drivers/phy/amlogic/phy-meson-gxl-usb3.c283
-rw-r--r--drivers/phy/amlogic/phy-meson8b-usb2.c149
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-usb.c57
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c16
-rw-r--r--drivers/phy/cadence/Kconfig9
-rw-r--r--drivers/phy/cadence/Makefile1
-rw-r--r--drivers/phy/cadence/phy-cadence-salvo.c325
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c27
-rw-r--r--drivers/phy/intel/Kconfig15
-rw-r--r--drivers/phy/intel/Makefile1
-rw-r--r--drivers/phy/intel/phy-intel-combo.c632
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/qualcomm/Kconfig17
-rw-r--r--drivers/phy/qualcomm/Makefile2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c148
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c254
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h238
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c287
-rw-r--r--drivers/phy/samsung/phy-s5pv210-usb2.c4
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c104
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c65
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c60
-rw-r--r--drivers/pinctrl/Kconfig17
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c80
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c26
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c3
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8dxl.c193
-rw-r--r--drivers/pinctrl/intel/Kconfig8
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c9
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c58
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c278
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c30
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c22
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h27
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c344
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c32
-rw-r--r--drivers/pinctrl/mediatek/Kconfig13
-rw-r--r--drivers/pinctrl/mediatek/Makefile5
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c9
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6765.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c28
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c6
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c14
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-ab8505.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c2
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c1
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c21
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c514
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.h52
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_i2c.c124
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_spi.c262
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c127
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c4
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c11
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c4
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c6
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c2
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c9
-rw-r--r--drivers/pinctrl/qcom/Kconfig9
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c1361
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c82
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig4
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c744
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c5
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h1
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c20
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c7
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c2
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c2
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c1
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c2
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c45
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c119
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c1
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c7
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c4
-rw-r--r--drivers/power/reset/Kconfig9
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/gpio-poweroff.c2
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c3
-rw-r--r--drivers/power/reset/mt6323-poweroff.c2
-rw-r--r--drivers/power/reset/oxnas-restart.c233
-rw-r--r--drivers/power/reset/qcom-pon.c3
-rw-r--r--drivers/power/reset/syscon-reboot.c7
-rw-r--r--drivers/power/reset/vexpress-poweroff.c8
-rw-r--r--drivers/power/supply/88pm860x_battery.c8
-rw-r--r--drivers/power/supply/Kconfig59
-rw-r--r--drivers/power/supply/Makefile3
-rw-r--r--drivers/power/supply/ab8500_fg.c2
-rw-r--r--drivers/power/supply/axp288_charger.c5
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c6
-rw-r--r--drivers/power/supply/bd70528-charger.c144
-rw-r--r--drivers/power/supply/bd99954-charger.c1142
-rw-r--r--drivers/power/supply/bd99954-charger.h1075
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq25890_charger.c200
-rw-r--r--drivers/power/supply/charger-manager.c40
-rw-r--r--drivers/power/supply/cw2015_battery.c750
-rw-r--r--drivers/power/supply/generic-adc-battery.c22
-rw-r--r--drivers/power/supply/lp8788-charger.c18
-rw-r--r--drivers/power/supply/max14577_charger.c10
-rw-r--r--drivers/power/supply/max14656_charger_detector.c5
-rw-r--r--drivers/power/supply/max17040_battery.c2
-rw-r--r--drivers/power/supply/max17042_battery.c8
-rw-r--r--drivers/power/supply/mp2629_charger.c669
-rw-r--r--drivers/power/supply/olpc_battery.c4
-rw-r--r--drivers/power/supply/power_supply_core.c8
-rw-r--r--drivers/power/supply/power_supply_hwmon.c64
-rw-r--r--drivers/power/supply/power_supply_sysfs.c484
-rw-r--r--drivers/power/supply/sbs-battery.c232
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c77
-rw-r--r--drivers/power/supply/smb347-charger.c5
-rw-r--r--drivers/ps3/ps3-lpm.c8
-rw-r--r--drivers/ps3/ps3-vuart.c5
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c27
-rw-r--r--drivers/remoteproc/Kconfig9
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/ingenic_rproc.c280
-rw-r--r--drivers/remoteproc/mtk_scp.c4
-rw-r--r--drivers/remoteproc/qcom_common.c17
-rw-r--r--drivers/remoteproc/qcom_common.h5
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c173
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c68
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c6
-rw-r--r--drivers/remoteproc/qcom_sysmon.c116
-rw-r--r--drivers/remoteproc/qcom_wcnss.c1
-rw-r--r--drivers/remoteproc/remoteproc_core.c243
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c28
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c24
-rw-r--r--drivers/remoteproc/remoteproc_internal.h17
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c15
-rw-r--r--drivers/remoteproc/st_remoteproc.c2
-rw-r--r--drivers/remoteproc/st_slim_rproc.c2
-rw-r--r--drivers/remoteproc/stm32_rproc.c3
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c69
-rw-r--r--drivers/reset/reset-imx7.c101
-rw-r--r--drivers/reset/reset-zynqmp.c26
-rw-r--r--drivers/rpmsg/Kconfig6
-rw-r--r--drivers/rpmsg/Makefile3
-rw-r--r--drivers/rpmsg/qcom_glink_ssr.c166
-rw-r--r--drivers/rpmsg/rpmsg_core.c2
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c2
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/rtc-88pm860x.c6
-rw-r--r--drivers/rtc/rtc-abx80x.c66
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c10
-rw-r--r--drivers/rtc/rtc-goldfish.c2
-rw-r--r--drivers/rtc/rtc-jz4740.c173
-rw-r--r--drivers/rtc/rtc-lpc24xx.c4
-rw-r--r--drivers/rtc/rtc-max77686.c22
-rw-r--r--drivers/rtc/rtc-mc13xxx.c4
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--drivers/rtc/rtc-mt2712.c16
-rw-r--r--drivers/rtc/rtc-mt6397.c18
-rw-r--r--drivers/rtc/rtc-pcf2127.c31
-rw-r--r--drivers/rtc/rtc-rc5t619.c4
-rw-r--r--drivers/rtc/rtc-rv3028.c2
-rw-r--r--drivers/rtc/rtc-snvs.c59
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chsc.c40
-rw-r--r--drivers/s390/cio/chsc.h50
-rw-r--r--drivers/s390/cio/device_ops.c23
-rw-r--r--drivers/s390/cio/idset.c12
-rw-r--r--drivers/s390/cio/qdio.h16
-rw-r--r--drivers/s390/cio/qdio_main.c299
-rw-r--r--drivers/s390/cio/qdio_setup.c100
-rw-r--r--drivers/s390/cio/qdio_thinint.c61
-rw-r--r--drivers/s390/cio/vfio_ccw_chp.c148
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c19
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c165
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c65
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h16
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.h30
-rw-r--r--drivers/s390/crypto/ap_bus.c94
-rw-r--r--drivers/s390/crypto/ap_bus.h25
-rw-r--r--drivers/s390/crypto/ap_card.c47
-rw-r--r--drivers/s390/crypto/ap_queue.c10
-rw-r--r--drivers/s390/net/qeth_l2_main.c198
-rw-r--r--drivers/s390/scsi/zfcp_aux.c5
-rw-r--r--drivers/s390/scsi/zfcp_diag.h6
-rw-r--r--drivers/s390/scsi/zfcp_erp.c84
-rw-r--r--drivers/s390/scsi/zfcp_ext.h11
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c76
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c19
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c131
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c16
-rw-r--r--drivers/sbus/char/flash.c1
-rw-r--r--drivers/sbus/char/oradax.c8
-rw-r--r--drivers/sbus/char/uctrl.c1
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/a2091.c1
-rw-r--r--drivers/scsi/a3000.c1
-rw-r--r--drivers/scsi/aacraid/aachba.c1
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/aacraid/commsup.c4
-rw-r--r--drivers/scsi/aacraid/linit.c16
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c18
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c19
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c14
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/eesox.c2
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/bfa/bfa_core.c2
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c4
-rw-r--r--drivers/scsi/bfa/bfa_svc.c7
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_attr.c4
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c18
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c7
-rw-r--r--drivers/scsi/cxlflash/main.c1
-rw-r--r--drivers/scsi/dpt_i2o.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/scsi/fnic/fnic_main.c4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c6
-rw-r--r--drivers/scsi/fnic/vnic_dev.c12
-rw-r--r--drivers/scsi/fnic/vnic_wq.c4
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/gvp11.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c17
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c26
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/isci/isci.h6
-rw-r--r--drivers/scsi/lasi700.c1
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c108
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c82
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c37
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c45
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c12
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c81
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h6
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/mpt3sas/Makefile3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c266
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debugfs.c157
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8
-rw-r--r--drivers/scsi/mvme147.c1
-rw-r--r--drivers/scsi/mvsas/mv_init.c6
-rw-r--r--drivers/scsi/pmcraid.c4
-rw-r--r--drivers/scsi/qedf/qedf.h6
-rw-r--r--drivers/scsi/qedf/qedf_els.c10
-rw-r--r--drivers/scsi/qedf/qedf_io.c48
-rw-r--r--drivers/scsi/qedf/qedf_main.c135
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c21
-rw-r--r--drivers/scsi/qedi/qedi_main.c22
-rw-r--r--drivers/scsi/qla1280.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c866
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h443
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h728
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h768
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c380
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c287
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c123
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h64
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c208
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h36
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c323
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c111
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h232
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c16
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi_debug.c2048
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c230
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c68
-rw-r--r--drivers/scsi/sd.c19
-rw-r--r--drivers/scsi/sgiwd93.c2
-rw-r--r--drivers/scsi/sni_53c710.c1
-rw-r--r--drivers/scsi/snic/snic.h2
-rw-r--r--drivers/scsi/snic/snic_ctl.c5
-rw-r--r--drivers/scsi/sr.c26
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c13
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c30
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c10
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c61
-rw-r--r--drivers/scsi/ufs/ufs.h43
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h7
-rw-r--r--drivers/scsi/ufs/ufshcd.c515
-rw-r--r--drivers/scsi/ufs/ufshcd.h45
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/scsi/zorro_esp.c2
-rw-r--r--drivers/slimbus/core.c6
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c5
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c112
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c6
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c12
-rw-r--r--drivers/soc/fsl/qbman/qman.c5
-rw-r--r--drivers/soc/fsl/qe/qe.c4
-rw-r--r--drivers/soc/fsl/qe/ucc.c2
-rw-r--r--drivers/soc/imx/Makefile3
-rw-r--r--drivers/soc/imx/soc-imx.c192
-rw-r--r--drivers/soc/imx/soc-imx8m.c7
-rw-r--r--drivers/soc/kendryte/k210-sysctl.c12
-rw-r--r--drivers/soc/mediatek/Kconfig7
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c378
-rw-r--r--drivers/soc/qcom/Kconfig15
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/cmd-db.c78
-rw-r--r--drivers/soc/qcom/glink_ssr.c156
-rw-r--r--drivers/soc/qcom/pdr_interface.c4
-rw-r--r--drivers/soc/qcom/qcom_aoss.c1
-rw-r--r--drivers/soc/qcom/rpmh-internal.h59
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c746
-rw-r--r--drivers/soc/qcom/rpmh.c97
-rw-r--r--drivers/soc/qcom/rpmhpd.c24
-rw-r--r--drivers/soc/qcom/rpmpd.c5
-rw-r--r--drivers/soc/qcom/smp2p.c4
-rw-r--r--drivers/soc/qcom/socinfo.c6
-rw-r--r--drivers/soc/renesas/Kconfig11
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a7742-sysc.c42
-rw-r--r--drivers/soc/renesas/rcar-rst.c1
-rw-r--r--drivers/soc/renesas/rcar-sysc.c3
-rw-r--r--drivers/soc/renesas/rcar-sysc.h1
-rw-r--r--drivers/soc/sifive/sifive_l2_cache.c40
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c57
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra20.c1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c6
-rw-r--r--drivers/soc/tegra/fuse/fuse.h8
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c32
-rw-r--r--drivers/soc/tegra/pmc.c3
-rw-r--r--drivers/soc/ti/Kconfig10
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/k3-socinfo.c152
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c2
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c26
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c17
-rw-r--r--drivers/soundwire/Makefile8
-rw-r--r--drivers/soundwire/bus.c71
-rw-r--r--drivers/soundwire/bus.h4
-rw-r--r--drivers/soundwire/bus_type.c22
-rw-r--r--drivers/soundwire/cadence_master.c8
-rw-r--r--drivers/soundwire/debugfs.c2
-rw-r--r--drivers/soundwire/intel.c13
-rw-r--r--drivers/soundwire/intel_init.c4
-rw-r--r--drivers/soundwire/master.c172
-rw-r--r--drivers/soundwire/mipi_disco.c11
-rw-r--r--drivers/soundwire/qcom.c34
-rw-r--r--drivers/soundwire/slave.c10
-rw-r--r--drivers/soundwire/sysfs_local.h14
-rw-r--r--drivers/soundwire/sysfs_slave.c214
-rw-r--r--drivers/soundwire/sysfs_slave_dpn.c300
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c5
-rw-r--r--drivers/staging/android/ashmem.c4
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c4
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c12
-rw-r--r--drivers/staging/comedi/comedi_fops.c2
-rw-r--r--drivers/staging/comedi/comedi_internal.h4
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c4
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c32
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c6
-rw-r--r--drivers/staging/gasket/gasket_page_table.c2
-rw-r--r--drivers/staging/gasket/gasket_sysfs.c2
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c2
-rw-r--r--drivers/staging/greybus/hid.c3
-rw-r--r--drivers/staging/greybus/light.c3
-rw-r--r--drivers/staging/greybus/loopback.c2
-rw-r--r--drivers/staging/greybus/uart.c19
-rw-r--r--drivers/staging/iio/Documentation/overview.txt2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c77
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c4
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c4
-rw-r--r--drivers/staging/media/tegra-video/Kconfig12
-rw-r--r--drivers/staging/media/tegra-video/Makefile8
-rw-r--r--drivers/staging/media/tegra-video/TODO11
-rw-r--r--drivers/staging/media/tegra-video/csi.c539
-rw-r--r--drivers/staging/media/tegra-video/csi.h147
-rw-r--r--drivers/staging/media/tegra-video/tegra210.c978
-rw-r--r--drivers/staging/media/tegra-video/vi.c1074
-rw-r--r--drivers/staging/media/tegra-video/vi.h257
-rw-r--r--drivers/staging/media/tegra-video/video.c155
-rw-r--r--drivers/staging/media/tegra-video/video.h29
-rw-r--r--drivers/staging/most/usb/Kconfig2
-rw-r--r--drivers/staging/most/usb/usb.c305
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi9
-rw-r--r--drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt28
-rw-r--r--drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml36
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c64
-rw-r--r--drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c2
-rw-r--r--drivers/staging/pi433/pi433_if.c1
-rw-r--r--drivers/staging/qlge/qlge_dbg.c7
-rw-r--r--drivers/staging/qlge/qlge_main.c476
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c99
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c33
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c17
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c19
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c54
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_hwconfig.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c62
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c3
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c116
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c24
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c18
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c126
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c158
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h2
-rw-r--r--drivers/staging/rtl8712/usb_halinit.c2
-rw-r--r--drivers/staging/rtl8712/wifi.h9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c22
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c45
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c13
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h4
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c44
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_data.h8
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c34
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c33
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c26
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c8
-rw-r--r--drivers/staging/sm750fb/sm750.c154
-rw-r--r--drivers/staging/sm750fb/sm750.h21
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c2
-rw-r--r--drivers/staging/speakup/speakup_decext.c4
-rw-r--r--drivers/staging/speakup/speakup_decpc.c4
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c5
-rw-r--r--drivers/staging/speakup/speakup_dummy.c4
-rw-r--r--drivers/staging/speakup/speakup_soft.c4
-rw-r--r--drivers/staging/speakup/spk_types.h3
-rw-r--r--drivers/staging/speakup/spkguide.txt7
-rw-r--r--drivers/staging/speakup/sysfs-driver-speakup6
-rw-r--r--drivers/staging/speakup/varhandlers.c1
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c383
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h62
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c97
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-common.h18
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h14
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h81
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c7
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c33
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c19
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h7
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c166
-rw-r--r--drivers/staging/vt6655/Makefile3
-rw-r--r--drivers/staging/vt6655/baseband.c320
-rw-r--r--drivers/staging/vt6655/baseband.h37
-rw-r--r--drivers/staging/vt6655/card.c145
-rw-r--r--drivers/staging/vt6655/card.h4
-rw-r--r--drivers/staging/vt6655/channel.c4
-rw-r--r--drivers/staging/vt6655/device_main.c37
-rw-r--r--drivers/staging/vt6655/rf.c4
-rw-r--r--drivers/staging/vt6655/rxtx.c252
-rw-r--r--drivers/staging/vt6656/Makefile6
-rw-r--r--drivers/staging/vt6656/baseband.c620
-rw-r--r--drivers/staging/vt6656/baseband.h17
-rw-r--r--drivers/staging/vt6656/card.c570
-rw-r--r--drivers/staging/vt6656/card.h20
-rw-r--r--drivers/staging/vt6656/device.h20
-rw-r--r--drivers/staging/vt6656/firmware.c106
-rw-r--r--drivers/staging/vt6656/firmware.h25
-rw-r--r--drivers/staging/vt6656/key.c47
-rw-r--r--drivers/staging/vt6656/key.h13
-rw-r--r--drivers/staging/vt6656/mac.c128
-rw-r--r--drivers/staging/vt6656/mac.h28
-rw-r--r--drivers/staging/vt6656/main_usb.c181
-rw-r--r--drivers/staging/vt6656/power.c34
-rw-r--r--drivers/staging/vt6656/power.h2
-rw-r--r--drivers/staging/vt6656/rf.c463
-rw-r--r--drivers/staging/vt6656/rf.h3
-rw-r--r--drivers/staging/vt6656/rxtx.c674
-rw-r--r--drivers/staging/vt6656/rxtx.h20
-rw-r--r--drivers/staging/vt6656/usbpipe.c70
-rw-r--r--drivers/staging/vt6656/usbpipe.h11
-rw-r--r--drivers/staging/vt6656/wcmd.c3
-rw-r--r--drivers/staging/wfx/Makefile1
-rw-r--r--drivers/staging/wfx/TODO51
-rw-r--r--drivers/staging/wfx/bh.c50
-rw-r--r--drivers/staging/wfx/bh.h1
-rw-r--r--drivers/staging/wfx/bus.h2
-rw-r--r--drivers/staging/wfx/bus_sdio.c86
-rw-r--r--drivers/staging/wfx/bus_spi.c44
-rw-r--r--drivers/staging/wfx/data_rx.c16
-rw-r--r--drivers/staging/wfx/data_rx.h3
-rw-r--r--drivers/staging/wfx/data_tx.c352
-rw-r--r--drivers/staging/wfx/data_tx.h8
-rw-r--r--drivers/staging/wfx/debug.c70
-rw-r--r--drivers/staging/wfx/fwio.c14
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h623
-rw-r--r--drivers/staging/wfx/hif_api_general.h495
-rw-r--r--drivers/staging/wfx/hif_api_mib.h671
-rw-r--r--drivers/staging/wfx/hif_rx.c221
-rw-r--r--drivers/staging/wfx/hif_tx.c119
-rw-r--r--drivers/staging/wfx/hif_tx.h10
-rw-r--r--drivers/staging/wfx/hif_tx_mib.c386
-rw-r--r--drivers/staging/wfx/hif_tx_mib.h436
-rw-r--r--drivers/staging/wfx/hwio.c18
-rw-r--r--drivers/staging/wfx/key.c71
-rw-r--r--drivers/staging/wfx/key.h2
-rw-r--r--drivers/staging/wfx/main.c78
-rw-r--r--drivers/staging/wfx/main.h4
-rw-r--r--drivers/staging/wfx/queue.c533
-rw-r--r--drivers/staging/wfx/queue.h42
-rw-r--r--drivers/staging/wfx/scan.c13
-rw-r--r--drivers/staging/wfx/sta.c871
-rw-r--r--drivers/staging/wfx/sta.h38
-rw-r--r--drivers/staging/wfx/traces.h31
-rw-r--r--drivers/staging/wfx/wfx.h47
-rw-r--r--drivers/staging/wilc1000/hif.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c30
-rw-r--r--drivers/target/loopback/tcm_loop.c36
-rw-r--r--drivers/target/target_core_alua.c10
-rw-r--r--drivers/target/target_core_configfs.c82
-rw-r--r--drivers/target/target_core_device.c13
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_pscsi.c6
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c6
-rw-r--r--drivers/target/target_core_user.c177
-rw-r--r--drivers/tee/Kconfig2
-rw-r--r--drivers/tee/optee/call.c10
-rw-r--r--drivers/tee/tee_core.c159
-rw-r--r--drivers/tee/tee_shm.c31
-rw-r--r--drivers/thermal/imx_sc_thermal.c2
-rw-r--r--drivers/thunderbolt/Kconfig1
-rw-r--r--drivers/thunderbolt/icm.c22
-rw-r--r--drivers/thunderbolt/nhi.c5
-rw-r--r--drivers/thunderbolt/nhi.h2
-rw-r--r--drivers/thunderbolt/switch.c11
-rw-r--r--drivers/tty/hvc/hvc_console.c23
-rw-r--r--drivers/tty/hvc/hvcs.c2
-rw-r--r--drivers/tty/mxser.c7
-rw-r--r--drivers/tty/n_gsm.c39
-rw-r--r--drivers/tty/rocket.c10
-rw-r--r--drivers/tty/serial/8250/8250_core.c18
-rw-r--r--drivers/tty/serial/8250/8250_exar.c65
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c13
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/8250/8250_port.c9
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/8250/serial_cs.c6
-rw-r--r--drivers/tty/serial/Kconfig16
-rw-r--r--drivers/tty/serial/amba-pl011.c1
-rw-r--r--drivers/tty/serial/ar933x_uart.c6
-rw-r--r--drivers/tty/serial/atmel_serial.c6
-rw-r--r--drivers/tty/serial/fsl_lpuart.c27
-rw-r--r--drivers/tty/serial/imx.c13
-rw-r--r--drivers/tty/serial/lantiq.c40
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c1
-rw-r--r--drivers/tty/serial/omap-serial.c52
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c7
-rw-r--r--drivers/tty/serial/samsung_tty.c84
-rw-r--r--drivers/tty/serial/sc16is7xx.c73
-rw-r--r--drivers/tty/serial/serial_core.c22
-rw-r--r--drivers/tty/serial/sh-sci.h1
-rw-r--r--drivers/tty/serial/stm32-usart.c74
-rw-r--r--drivers/tty/serial/stm32-usart.h1
-rw-r--r--drivers/tty/serial/xilinx_uartps.c12
-rw-r--r--drivers/tty/sysrq.c70
-rw-r--r--drivers/tty/vcc.c1
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/tty/vt/keyboard.c26
-rw-r--r--drivers/tty/vt/selection.c133
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c3
-rw-r--r--drivers/uio/uio_hv_generic.c1
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c3
-rw-r--r--drivers/usb/cdns3/core.c47
-rw-r--r--drivers/usb/cdns3/core.h2
-rw-r--r--drivers/usb/cdns3/drd.c4
-rw-r--r--drivers/usb/cdns3/ep0.c7
-rw-r--r--drivers/usb/cdns3/gadget.c15
-rw-r--r--drivers/usb/chipidea/Kconfig37
-rw-r--r--drivers/usb/chipidea/Makefile13
-rw-r--r--drivers/usb/chipidea/ci.h1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c13
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c30
-rw-r--r--drivers/usb/chipidea/ci_hdrc_zevio.c67
-rw-r--r--drivers/usb/chipidea/core.c48
-rw-r--r--drivers/usb/chipidea/udc.c170
-rw-r--r--drivers/usb/chipidea/udc.h6
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c334
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/usblp.c5
-rw-r--r--drivers/usb/core/hcd-pci.c7
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/otg_whitelist.h2
-rw-r--r--drivers/usb/core/sysfs.c6
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc2/core.c23
-rw-r--r--drivers/usb/dwc2/core.h6
-rw-r--r--drivers/usb/dwc2/core_intr.c7
-rw-r--r--drivers/usb/dwc2/debug.h2
-rw-r--r--drivers/usb/dwc2/hcd.h2
-rw-r--r--drivers/usb/dwc2/hw.h3
-rw-r--r--drivers/usb/dwc2/params.c19
-rw-r--r--drivers/usb/dwc2/platform.c39
-rw-r--r--drivers/usb/dwc3/core.c62
-rw-r--r--drivers/usb/dwc3/core.h83
-rw-r--r--drivers/usb/dwc3/debug.h4
-rw-r--r--drivers/usb/dwc3/debugfs.c14
-rw-r--r--drivers/usb/dwc3/drd.c6
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c41
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c422
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c30
-rw-r--r--drivers/usb/dwc3/gadget.c469
-rw-r--r--drivers/usb/dwc3/gadget.h2
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/dwc3/io.h2
-rw-r--r--drivers/usb/dwc3/trace.h2
-rw-r--r--drivers/usb/early/xhci-dbc.c1
-rw-r--r--drivers/usb/early/xhci-dbc.h2
-rw-r--r--drivers/usb/gadget/composite.c78
-rw-r--r--drivers/usb/gadget/configfs.c14
-rw-r--r--drivers/usb/gadget/function/f_acm.c16
-rw-r--r--drivers/usb/gadget/function/f_eem.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c12
-rw-r--r--drivers/usb/gadget/function/f_serial.c16
-rw-r--r--drivers/usb/gadget/function/f_tcm.c3
-rw-r--r--drivers/usb/gadget/function/f_uvc.h2
-rw-r--r--drivers/usb/gadget/function/rndis.h2
-rw-r--r--drivers/usb/gadget/function/u_audio.h2
-rw-r--r--drivers/usb/gadget/function/u_ecm.h2
-rw-r--r--drivers/usb/gadget/function/u_eem.h2
-rw-r--r--drivers/usb/gadget/function/u_ether.h2
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h2
-rw-r--r--drivers/usb/gadget/function/u_fs.h2
-rw-r--r--drivers/usb/gadget/function/u_gether.h2
-rw-r--r--drivers/usb/gadget/function/u_hid.h2
-rw-r--r--drivers/usb/gadget/function/u_midi.h2
-rw-r--r--drivers/usb/gadget/function/u_ncm.h2
-rw-r--r--drivers/usb/gadget/function/u_phonet.h2
-rw-r--r--drivers/usb/gadget/function/u_printer.h2
-rw-r--r--drivers/usb/gadget/function/u_rndis.h2
-rw-r--r--drivers/usb/gadget/function/u_serial.c57
-rw-r--r--drivers/usb/gadget/function/u_serial.h4
-rw-r--r--drivers/usb/gadget/function/u_tcm.h2
-rw-r--r--drivers/usb/gadget/function/u_uac1.h2
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.h2
-rw-r--r--drivers/usb/gadget/function/u_uac2.h2
-rw-r--r--drivers/usb/gadget/function/u_uvc.h2
-rw-r--r--drivers/usb/gadget/function/uvc.h4
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c4
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.h2
-rw-r--r--drivers/usb/gadget/function/uvc_video.c76
-rw-r--r--drivers/usb/gadget/function/uvc_video.h4
-rw-r--r--drivers/usb/gadget/legacy/inode.c6
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c14
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c16
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c236
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h12
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c112
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h12
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c27
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c1
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c11
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c2
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c2
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c2
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c4
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c140
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c1
-rw-r--r--drivers/usb/gadget/usbstring.c24
-rw-r--r--drivers/usb/host/Kconfig29
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-brcm.c280
-rw-r--r--drivers/usb/host/ehci-fsl.h2
-rw-r--r--drivers/usb/host/ehci-mv.c12
-rw-r--r--drivers/usb/host/ehci-mxc.c15
-rw-r--r--drivers/usb/host/ehci-pci.c6
-rw-r--r--drivers/usb/host/ehci-platform.c4
-rw-r--r--drivers/usb/host/ehci-tegra.c1
-rw-r--r--drivers/usb/host/ehci.h2
-rw-r--r--drivers/usb/host/fhci.h2
-rw-r--r--drivers/usb/host/imx21-hcd.h2
-rw-r--r--drivers/usb/host/ohci-pci.c9
-rw-r--r--drivers/usb/host/ohci-platform.c5
-rw-r--r--drivers/usb/host/ohci-sm501.c7
-rw-r--r--drivers/usb/host/ohci.h2
-rw-r--r--drivers/usb/host/pci-quirks.c24
-rw-r--r--drivers/usb/host/r8a66597.h2
-rw-r--r--drivers/usb/host/u132-hcd.c10
-rw-r--r--drivers/usb/host/uhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-debugfs.h2
-rw-r--r--drivers/usb/host/xhci-ext-caps.h2
-rw-r--r--drivers/usb/host/xhci-mtk.h2
-rw-r--r--drivers/usb/host/xhci-mvebu.h2
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c645
-rw-r--r--drivers/usb/host/xhci-pci.c47
-rw-r--r--drivers/usb/host/xhci-pci.h28
-rw-r--r--drivers/usb/host/xhci-plat.c20
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-rcar.h2
-rw-r--r--drivers/usb/host/xhci-trace.h2
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/isp1760/isp1760-core.h2
-rw-r--r--drivers/usb/isp1760/isp1760-regs.h2
-rw-r--r--drivers/usb/isp1760/isp1760-udc.h2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.h2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.h2
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_struct.h2
-rw-r--r--drivers/usb/misc/usb_u132.h2
-rw-r--r--drivers/usb/mtu3/mtu3.h2
-rw-r--r--drivers/usb/mtu3/mtu3_debug.h2
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h2
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h2
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.h2
-rw-r--r--drivers/usb/mtu3/mtu3_trace.h2
-rw-r--r--drivers/usb/musb/davinci.h2
-rw-r--r--drivers/usb/musb/jz4740.c4
-rw-r--r--drivers/usb/musb/mediatek.c6
-rw-r--r--drivers/usb/musb/musb_core.c9
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_debug.h2
-rw-r--r--drivers/usb/musb/musb_debugfs.c10
-rw-r--r--drivers/usb/musb/musb_dma.h2
-rw-r--r--drivers/usb/musb/musb_gadget.h2
-rw-r--r--drivers/usb/musb/musb_host.c10
-rw-r--r--drivers/usb/musb/musb_host.h2
-rw-r--r--drivers/usb/musb/musb_io.h2
-rw-r--r--drivers/usb/musb/musb_regs.h2
-rw-r--r--drivers/usb/musb/musb_trace.h2
-rw-r--r--drivers/usb/musb/omap2430.h2
-rw-r--r--drivers/usb/musb/tusb6010.h2
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h2
-rw-r--r--drivers/usb/phy/phy-jz4770.c12
-rw-r--r--drivers/usb/phy/phy-mv-usb.h2
-rw-r--r--drivers/usb/renesas_usbhs/common.h2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h2
-rw-r--r--drivers/usb/renesas_usbhs/mod.h2
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h2
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.h2
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.h2
-rw-r--r--drivers/usb/renesas_usbhs/rza.h2
-rw-r--r--drivers/usb/roles/class.c4
-rw-r--r--drivers/usb/serial/belkin_sa.h2
-rw-r--r--drivers/usb/serial/ch341.c68
-rw-r--r--drivers/usb/serial/io_16654.h2
-rw-r--r--drivers/usb/serial/io_edgeport.h2
-rw-r--r--drivers/usb/serial/io_ionsp.h2
-rw-r--r--drivers/usb/serial/io_ti.h2
-rw-r--r--drivers/usb/serial/io_usbvend.h2
-rw-r--r--drivers/usb/serial/iuu_phoenix.h2
-rw-r--r--drivers/usb/serial/mct_u232.h2
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/oti6858.h2
-rw-r--r--drivers/usb/serial/pl2303.h2
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/usb_wwan.c4
-rw-r--r--drivers/usb/serial/visor.h2
-rw-r--r--drivers/usb/serial/whiteheat.h2
-rw-r--r--drivers/usb/storage/debug.h2
-rw-r--r--drivers/usb/storage/initializers.h2
-rw-r--r--drivers/usb/storage/protocol.h2
-rw-r--r--drivers/usb/storage/scsiglue.h2
-rw-r--r--drivers/usb/storage/sierra_ms.c4
-rw-r--r--drivers/usb/storage/transport.h2
-rw-r--r--drivers/usb/storage/unusual_alauda.h2
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_datafab.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/storage/unusual_ene_ub6250.h2
-rw-r--r--drivers/usb/storage/unusual_freecom.h2
-rw-r--r--drivers/usb/storage/unusual_isd200.h2
-rw-r--r--drivers/usb/storage/unusual_jumpshot.h2
-rw-r--r--drivers/usb/storage/unusual_karma.h2
-rw-r--r--drivers/usb/storage/unusual_onetouch.h2
-rw-r--r--drivers/usb/storage/unusual_realtek.h2
-rw-r--r--drivers/usb/storage/unusual_sddr09.h2
-rw-r--r--drivers/usb/storage/unusual_sddr55.h2
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/usb/storage/unusual_usbat.h2
-rw-r--r--drivers/usb/storage/usb.h2
-rw-r--r--drivers/usb/typec/Kconfig3
-rw-r--r--drivers/usb/typec/class.c36
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c42
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c32
-rw-r--r--drivers/usb/typec/tcpm/fusb302_reg.h2
-rw-r--r--drivers/usb/typec/tps6598x.c64
-rw-r--r--drivers/usb/typec/ucsi/Makefile4
-rw-r--r--drivers/usb/typec/ucsi/psy.c241
-rw-r--r--drivers/usb/typec/ucsi/trace.c10
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c41
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h26
-rw-r--r--drivers/vdpa/Kconfig2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c3
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h4
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c146
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c7
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c353
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c50
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c14
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h15
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c24
-rw-r--r--drivers/vfio/vfio.c13
-rw-r--r--drivers/vfio/vfio_iommu_type1.c623
-rw-r--r--drivers/vhost/Kconfig17
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/scsi.c3
-rw-r--r--drivers/vhost/test.c2
-rw-r--r--drivers/vhost/vdpa.c116
-rw-r--r--drivers/vhost/vhost.c111
-rw-r--r--drivers/vhost/vhost.h8
-rw-r--r--drivers/vhost/vringh.c6
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/backlight/backlight.c21
-rw-r--r--drivers/video/backlight/l4f00242t03.c45
-rw-r--r--drivers/video/backlight/lp855x_bl.c20
-rw-r--r--drivers/video/backlight/qcom-wled.c589
-rw-r--r--drivers/video/console/newport_con.c1
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/acornfb.c1
-rw-r--r--drivers/video/fbdev/atafb.c1
-rw-r--r--drivers/video/fbdev/cirrusfb.c1
-rw-r--r--drivers/video/fbdev/cyber2000fb.c1
-rw-r--r--drivers/video/fbdev/fb-puv3.c1
-rw-r--r--drivers/video/fbdev/hitfb.c1
-rw-r--r--drivers/video/fbdev/neofb.c1
-rw-r--r--drivers/video/fbdev/ps3fb.c4
-rw-r--r--drivers/video/fbdev/q40fb.c1
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c1
-rw-r--r--drivers/virtio/Kconfig17
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio_balloon.c9
-rw-r--r--drivers/virtio/virtio_mem.c1965
-rw-r--r--drivers/virtio/virtio_mmio.c4
-rw-r--r--drivers/virtio/virtio_pci_modern.c1
-rw-r--r--drivers/visorbus/controlvmchannel.h2
-rw-r--r--drivers/visorbus/vbuschannel.h2
-rw-r--r--drivers/visorbus/visorbus_private.h2
-rw-r--r--drivers/w1/masters/omap_hdq.c82
-rw-r--r--drivers/w1/slaves/w1_ds2430.c2
-rw-r--r--drivers/w1/slaves/w1_therm.c1624
-rw-r--r--drivers/watchdog/Kconfig14
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/arm_smc_wdt.c188
-rw-r--r--drivers/watchdog/da9062_wdt.c32
-rw-r--r--drivers/watchdog/da9063_wdt.c20
-rw-r--r--drivers/watchdog/imx2_wdt.c2
-rw-r--r--drivers/watchdog/imx_sc_wdt.c5
-rw-r--r--drivers/watchdog/m54xx_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c1
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/xen/Kconfig4
-rw-r--r--drivers/xen/balloon.c1
-rw-r--r--drivers/xen/cpu_hotplug.c8
-rw-r--r--drivers/xen/gntdev.c6
-rw-r--r--drivers/xen/grant-table.c1
-rw-r--r--drivers/xen/platform-pci.c2
-rw-r--r--drivers/xen/privcmd.c16
-rw-r--r--drivers/xen/pvcalls-back.c5
-rw-r--r--drivers/xen/time.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.c16
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c44
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.c6
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c38
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c55
-rw-r--r--drivers/xen/xen-pciback/vpci.c10
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c12
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c1
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c1
-rw-r--r--fs/Kconfig.binfmt2
-rw-r--r--fs/afs/Makefile2
-rw-r--r--fs/afs/afs.h3
-rw-r--r--fs/afs/afs_vl.h1
-rw-r--r--fs/afs/callback.c345
-rw-r--r--fs/afs/cell.c10
-rw-r--r--fs/afs/cmservice.c67
-rw-r--r--fs/afs/dir.c1253
-rw-r--r--fs/afs/dir_silly.c190
-rw-r--r--fs/afs/dynroot.c93
-rw-r--r--fs/afs/file.c62
-rw-r--r--fs/afs/flock.c114
-rw-r--r--fs/afs/fs_operation.c239
-rw-r--r--fs/afs/fs_probe.c339
-rw-r--r--fs/afs/fsclient.c1305
-rw-r--r--fs/afs/inode.c493
-rw-r--r--fs/afs/internal.h523
-rw-r--r--fs/afs/main.c6
-rw-r--r--fs/afs/proc.c43
-rw-r--r--fs/afs/protocol_yfs.h2
-rw-r--r--fs/afs/rotate.c447
-rw-r--r--fs/afs/rxrpc.c45
-rw-r--r--fs/afs/security.c8
-rw-r--r--fs/afs/server.c299
-rw-r--r--fs/afs/server_list.c40
-rw-r--r--fs/afs/super.c107
-rw-r--r--fs/afs/vl_alias.c383
-rw-r--r--fs/afs/vl_rotate.c4
-rw-r--r--fs/afs/vlclient.c146
-rw-r--r--fs/afs/volume.c154
-rw-r--r--fs/afs/write.c149
-rw-r--r--fs/afs/xattr.c300
-rw-r--r--fs/afs/yfsclient.c916
-rw-r--r--fs/aio.c5
-rw-r--r--fs/bad_inode.c1
-rw-r--r--fs/binfmt_aout.c3
-rw-r--r--fs/binfmt_elf.c27
-rw-r--r--fs/binfmt_elf_fdpic.c40
-rw-r--r--fs/binfmt_em86.c19
-rw-r--r--fs/binfmt_flat.c28
-rw-r--r--fs/binfmt_misc.c73
-rw-r--r--fs/binfmt_script.c88
-rw-r--r--fs/btrfs/extent_io.h1
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/ceph/Makefile2
-rw-r--r--fs/ceph/acl.c2
-rw-r--r--fs/ceph/addr.c20
-rw-r--r--fs/ceph/caps.c425
-rw-r--r--fs/ceph/debugfs.c100
-rw-r--r--fs/ceph/dir.c26
-rw-r--r--fs/ceph/export.c9
-rw-r--r--fs/ceph/file.c30
-rw-r--r--fs/ceph/inode.c4
-rw-r--r--fs/ceph/mds_client.c48
-rw-r--r--fs/ceph/mds_client.h15
-rw-r--r--fs/ceph/metric.c148
-rw-r--r--fs/ceph/metric.h62
-rw-r--r--fs/ceph/quota.c62
-rw-r--r--fs/ceph/super.h34
-rw-r--r--fs/ceph/xattr.c4
-rw-r--r--fs/cifs/cifs_debug.c6
-rw-r--r--fs/cifs/cifs_debug.h145
-rw-r--r--fs/cifs/cifsencrypt.c8
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h20
-rw-r--r--fs/cifs/cifsproto.h36
-rw-r--r--fs/cifs/cifsroot.c6
-rw-r--r--fs/cifs/cifssmb.c81
-rw-r--r--fs/cifs/connect.c130
-rw-r--r--fs/cifs/dfs_cache.c14
-rw-r--r--fs/cifs/file.c60
-rw-r--r--fs/cifs/inode.c18
-rw-r--r--fs/cifs/link.c8
-rw-r--r--fs/cifs/misc.c60
-rw-r--r--fs/cifs/netmisc.c6
-rw-r--r--fs/cifs/readdir.c10
-rw-r--r--fs/cifs/sess.c55
-rw-r--r--fs/cifs/smb1ops.c2
-rw-r--r--fs/cifs/smb2inode.c137
-rw-r--r--fs/cifs/smb2misc.c20
-rw-r--r--fs/cifs/smb2ops.c174
-rw-r--r--fs/cifs/smb2pdu.c499
-rw-r--r--fs/cifs/smb2pdu.h13
-rw-r--r--fs/cifs/smb2proto.h25
-rw-r--r--fs/cifs/smbdirect.c165
-rw-r--r--fs/cifs/transport.c75
-rw-r--r--fs/compat_binfmt_elf.c5
-rw-r--r--fs/coredump.c8
-rw-r--r--fs/debugfs/internal.h2
-rw-r--r--fs/dlm/dlm_internal.h7
-rw-r--r--fs/dlm/lockspace.c18
-rw-r--r--fs/dlm/rcom.c2
-rw-r--r--fs/dlm/user.c2
-rw-r--r--fs/eventfd.c64
-rw-r--r--fs/exec.c397
-rw-r--r--fs/exfat/Kconfig7
-rw-r--r--fs/exfat/balloc.c8
-rw-r--r--fs/exfat/dir.c222
-rw-r--r--fs/exfat/exfat_fs.h48
-rw-r--r--fs/exfat/exfat_raw.h85
-rw-r--r--fs/exfat/fatent.c17
-rw-r--r--fs/exfat/file.c25
-rw-r--r--fs/exfat/inode.c57
-rw-r--r--fs/exfat/misc.c46
-rw-r--r--fs/exfat/namei.c63
-rw-r--r--fs/exfat/nls.c52
-rw-r--r--fs/exfat/super.c262
-rw-r--r--fs/ext2/file.c4
-rw-r--r--fs/ext2/inode.c1
-rw-r--r--fs/ext2/namei.c6
-rw-r--r--fs/ext2/symlink.c4
-rw-r--r--fs/ext2/xattr.h1
-rw-r--r--fs/ext4/Kconfig6
-rw-r--r--fs/ext4/acl.c5
-rw-r--r--fs/ext4/balloc.c5
-rw-r--r--fs/ext4/ext4.h40
-rw-r--r--fs/ext4/ext4_extents.h9
-rw-r--r--fs/ext4/ext4_jbd2.h11
-rw-r--r--fs/ext4/extents.c444
-rw-r--r--fs/ext4/extents_status.c2
-rw-r--r--fs/ext4/file.c17
-rw-r--r--fs/ext4/fsync.c28
-rw-r--r--fs/ext4/ialloc.c1
-rw-r--r--fs/ext4/indirect.c4
-rw-r--r--fs/ext4/inline.c6
-rw-r--r--fs/ext4/inode.c152
-rw-r--r--fs/ext4/ioctl.c8
-rw-r--r--fs/ext4/mballoc.c512
-rw-r--r--fs/ext4/mballoc.h16
-rw-r--r--fs/ext4/migrate.c12
-rw-r--r--fs/ext4/namei.c76
-rw-r--r--fs/ext4/super.c33
-rw-r--r--fs/ext4/xattr.c13
-rw-r--r--fs/f2fs/Kconfig10
-rw-r--r--fs/f2fs/acl.h2
-rw-r--r--fs/f2fs/checkpoint.c37
-rw-r--r--fs/f2fs/compress.c182
-rw-r--r--fs/f2fs/data.c166
-rw-r--r--fs/f2fs/dir.c374
-rw-r--r--fs/f2fs/f2fs.h171
-rw-r--r--fs/f2fs/file.c401
-rw-r--r--fs/f2fs/gc.c125
-rw-r--r--fs/f2fs/gc.h2
-rw-r--r--fs/f2fs/hash.c76
-rw-r--r--fs/f2fs/inline.c50
-rw-r--r--fs/f2fs/namei.c19
-rw-r--r--fs/f2fs/node.c101
-rw-r--r--fs/f2fs/node.h5
-rw-r--r--fs/f2fs/recovery.c51
-rw-r--r--fs/f2fs/segment.c40
-rw-r--r--fs/f2fs/segment.h2
-rw-r--r--fs/f2fs/super.c88
-rw-r--r--fs/f2fs/sysfs.c97
-rw-r--r--fs/f2fs/trace.h2
-rw-r--r--fs/f2fs/xattr.h8
-rw-r--r--fs/fat/fatent.c103
-rw-r--r--fs/fat/inode.c6
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/fuse/dev.c14
-rw-r--r--fs/fuse/dir.c12
-rw-r--r--fs/fuse/file.c120
-rw-r--r--fs/fuse/fuse_i.h3
-rw-r--r--fs/fuse/inode.c26
-rw-r--r--fs/fuse/virtio_fs.c115
-rw-r--r--fs/gfs2/export.c4
-rw-r--r--fs/gfs2/glock.c208
-rw-r--r--fs/gfs2/glock.h16
-rw-r--r--fs/gfs2/glops.c21
-rw-r--r--fs/gfs2/incore.h9
-rw-r--r--fs/gfs2/inode.c48
-rw-r--r--fs/gfs2/inode.h2
-rw-r--r--fs/gfs2/log.c56
-rw-r--r--fs/gfs2/main.c9
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/gfs2/super.c72
-rw-r--r--fs/gfs2/trans.c21
-rw-r--r--fs/gfs2/trans.h1
-rw-r--r--fs/gfs2/util.c1
-rw-r--r--fs/gfs2/util.h1
-rw-r--r--fs/hpfs/buffer.c2
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/inode.c112
-rw-r--r--fs/internal.h2
-rw-r--r--fs/io-wq.c25
-rw-r--r--fs/io-wq.h8
-rw-r--r--fs/io_uring.c437
-rw-r--r--fs/ioctl.c82
-rw-r--r--fs/iomap/fiemap.c11
-rw-r--r--fs/jbd2/transaction.c14
-rw-r--r--fs/kernfs/file.c6
-rw-r--r--fs/locks.c7
-rw-r--r--fs/namei.c46
-rw-r--r--fs/namespace.c21
-rw-r--r--fs/nfs/direct.c4
-rw-r--r--fs/nfs/dns_resolve.c1
-rw-r--r--fs/nfs/inode.c14
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfs/nfstrace.h106
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/sysfs.h2
-rw-r--r--fs/nfsd/cache.h2
-rw-r--r--fs/nfsd/netns.h1
-rw-r--r--fs/nfsd/nfs4callback.c39
-rw-r--r--fs/nfsd/nfs4proc.c9
-rw-r--r--fs/nfsd/nfs4state.c166
-rw-r--r--fs/nfsd/nfscache.c89
-rw-r--r--fs/nfsd/nfsctl.c32
-rw-r--r--fs/nfsd/nfsd.h2
-rw-r--r--fs/nfsd/nfssvc.c6
-rw-r--r--fs/nfsd/state.h7
-rw-r--r--fs/nfsd/trace.h345
-rw-r--r--fs/nilfs2/inode.c3
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/notify/fanotify/fanotify.c9
-rw-r--r--fs/notify/fanotify/fanotify.h2
-rw-r--r--fs/notify/fanotify/fanotify_user.c8
-rw-r--r--fs/notify/fdinfo.c1
-rw-r--r--fs/notify/group.c1
-rw-r--r--fs/notify/inotify/inotify_user.c4
-rw-r--r--fs/notify/mark.c6
-rw-r--r--fs/ocfs2/Kconfig2
-rw-r--r--fs/ocfs2/extent_map.c4
-rw-r--r--fs/ocfs2/mmap.c2
-rw-r--r--fs/orangefs/orangefs-bufmap.c9
-rw-r--r--fs/orangefs/orangefs-mod.c2
-rw-r--r--fs/overlayfs/copy_up.c9
-rw-r--r--fs/overlayfs/dir.c51
-rw-r--r--fs/overlayfs/export.c24
-rw-r--r--fs/overlayfs/file.c28
-rw-r--r--fs/overlayfs/inode.c22
-rw-r--r--fs/overlayfs/namei.c138
-rw-r--r--fs/overlayfs/overlayfs.h11
-rw-r--r--fs/overlayfs/ovl_entry.h10
-rw-r--r--fs/overlayfs/readdir.c57
-rw-r--r--fs/overlayfs/super.c243
-rw-r--r--fs/overlayfs/util.c36
-rw-r--r--fs/posix_acl.c2
-rw-r--r--fs/proc/array.c11
-rw-r--r--fs/proc/base.c111
-rw-r--r--fs/proc/generic.c9
-rw-r--r--fs/proc/inode.c30
-rw-r--r--fs/proc/meminfo.c1
-rw-r--r--fs/proc/nommu.c1
-rw-r--r--fs/proc/proc_sysctl.c149
-rw-r--r--fs/proc/root.c133
-rw-r--r--fs/proc/self.c8
-rw-r--r--fs/proc/task_mmu.c34
-rw-r--r--fs/proc/task_nommu.c18
-rw-r--r--fs/proc/thread_self.c8
-rw-r--r--fs/proc/vmcore.c1
-rw-r--r--fs/proc_namespace.c14
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/select.c112
-rw-r--r--fs/seq_file.c7
-rw-r--r--fs/super.c2
-rw-r--r--fs/sync.c3
-rw-r--r--fs/sysfs/file.c1
-rw-r--r--fs/userfaultfd.c46
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_inode.c14
-rw-r--r--fs/xfs/xfs_ioctl.c108
-rw-r--r--fs/xfs/xfs_iops.c5
-rw-r--r--fs/zonefs/super.c2
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actypes.h3
-rw-r--r--include/asm-generic/5level-fixup.h59
-rw-r--r--include/asm-generic/atomic-instrumented.h711
-rw-r--r--include/asm-generic/atomic-long.h331
-rw-r--r--include/asm-generic/barrier.h16
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h14
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h10
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h16
-rw-r--r--include/asm-generic/cacheflush.h25
-rw-r--r--include/asm-generic/io.h66
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h64
-rw-r--r--include/asm-generic/pgtable-nopmd.h1
-rw-r--r--include/asm-generic/pgtable-nopud.h5
-rw-r--r--include/asm-generic/pgtable.h1322
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
-rw-r--r--include/dt-bindings/clock/agilex-clock.h70
-rw-r--r--include/dt-bindings/clock/at91.h4
-rw-r--r--include/dt-bindings/clock/bt1-ccu.h48
-rw-r--r--include/dt-bindings/clock/imx7ulp-clock.h5
-rw-r--r--include/dt-bindings/clock/imx8mp-clock.h90
-rw-r--r--include/dt-bindings/clock/intel,lgm-clk.h165
-rw-r--r--include/dt-bindings/clock/marvell,mmp2-audio.h10
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h3
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h1
-rw-r--r--include/dt-bindings/clock/mt6765-clk.h313
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8939.h206
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc7180.h1
-rw-r--r--include/dt-bindings/clock/r8a7742-cpg-mssr.h42
-rw-r--r--include/dt-bindings/clock/sprd,sc9863a-clk.h5
-rw-r--r--include/dt-bindings/clock/tegra114-car.h14
-rw-r--r--include/dt-bindings/clock/tegra124-car-common.h14
-rw-r--r--include/dt-bindings/clock/tegra20-car.h2
-rw-r--r--include/dt-bindings/clock/tegra210-car.h20
-rw-r--r--include/dt-bindings/clock/tegra30-car.h14
-rw-r--r--include/dt-bindings/clock/x1000-cgu.h64
-rw-r--r--include/dt-bindings/clock/x1830-cgu.h55
-rw-r--r--include/dt-bindings/firmware/imx/rsrc.h84
-rw-r--r--include/dt-bindings/interconnect/imx8mm.h50
-rw-r--r--include/dt-bindings/interconnect/imx8mn.h41
-rw-r--r--include/dt-bindings/interconnect/imx8mq.h48
-rw-r--r--include/dt-bindings/mailbox/qcom-ipcc.h33
-rw-r--r--include/dt-bindings/phy/phy.h1
-rw-r--r--include/dt-bindings/pinctrl/pads-imx8dxl.h639
-rw-r--r--include/dt-bindings/pinctrl/rockchip.h11
-rw-r--r--include/dt-bindings/power/marvell,mmp2.h11
-rw-r--r--include/dt-bindings/power/meson-gxbb-power.h13
-rw-r--r--include/dt-bindings/power/meson8-power.h13
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h12
-rw-r--r--include/dt-bindings/power/r8a7742-sysc.h29
-rw-r--r--include/dt-bindings/reset/amlogic,meson-gxbb-reset.h2
-rw-r--r--include/dt-bindings/reset/bt1-ccu.h25
-rw-r--r--include/dt-bindings/reset/imx8mp-reset.h50
-rw-r--r--include/dt-bindings/reset/imx8mq-reset.h56
-rw-r--r--include/dt-bindings/reset/qcom,gcc-msm8939.h110
-rw-r--r--include/dt-bindings/reset/realtek,rtd1195.h74
-rw-r--r--include/dt-bindings/reset/realtek,rtd1295.h3
-rw-r--r--include/keys/big_key-type.h1
-rw-r--r--include/keys/user-type.h2
-rw-r--r--include/kunit/test.h12
-rw-r--r--include/linux/atomic-arch-fallback.h2291
-rw-r--r--include/linux/atomic-fallback.h346
-rw-r--r--include/linux/atomic.h11
-rw-r--r--include/linux/backlight.h1
-rw-r--r--include/linux/bch.h11
-rw-r--r--include/linux/binfmts.h48
-rw-r--r--include/linux/bitops.h2
-rw-r--r--include/linux/cache.h10
-rw-r--r--include/linux/ceph/libceph.h13
-rw-r--r--include/linux/ceph/mon_client.h2
-rw-r--r--include/linux/ceph/osd_client.h8
-rw-r--r--include/linux/ceph/osdmap.h19
-rw-r--r--include/linux/ceph/rados.h14
-rw-r--r--include/linux/clk/tegra.h27
-rw-r--r--include/linux/compiler-clang.h11
-rw-r--r--include/linux/compiler-gcc.h11
-rw-r--r--include/linux/compiler.h157
-rw-r--r--include/linux/compiler_types.h79
-rw-r--r--include/linux/coresight.h32
-rw-r--r--include/linux/cpufreq.h2
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/crash_dump.h3
-rw-r--r--include/linux/crush/crush.h14
-rw-r--r--include/linux/dax.h1
-rw-r--r--include/linux/dev_printk.h6
-rw-r--r--include/linux/device-mapper.h9
-rw-r--r--include/linux/dm-bufio.h12
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/dma-mapping.h86
-rw-r--r--include/linux/dma-noncoherent.h2
-rw-r--r--include/linux/dynamic_debug.h2
-rw-r--r--include/linux/elfnote.h2
-rw-r--r--include/linux/ethtool_netlink.h2
-rw-r--r--include/linux/fiemap.h25
-rw-r--r--include/linux/firmware.h1
-rw-r--r--include/linux/firmware/imx/sci.h1
-rw-r--r--include/linux/firmware/imx/types.h65
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h49
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h71
-rw-r--r--include/linux/firmware/trusted_foundations.h1
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h237
-rw-r--r--include/linux/fpga/adi-axi-common.h6
-rw-r--r--include/linux/fs.h33
-rw-r--r--include/linux/fsl/bestcomm/bestcomm.h2
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/fwnode.h2
-rw-r--r--include/linux/gpio/driver.h48
-rw-r--r--include/linux/gpio/machine.h17
-rw-r--r--include/linux/gpio/regmap.h86
-rw-r--r--include/linux/greybus/greybus_protocols.h44
-rw-r--r--include/linux/highmem.h81
-rw-r--r--include/linux/hmm.h2
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/hw_breakpoint.h4
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h58
-rw-r--r--include/linux/iio/adc/adi-axi-adc.h64
-rw-r--r--include/linux/iio/buffer-dma.h2
-rw-r--r--include/linux/iio/buffer-dmaengine.h3
-rw-r--r--include/linux/iio/buffer_impl.h9
-rw-r--r--include/linux/iio/consumer.h18
-rw-r--r--include/linux/iio/hw-consumer.h1
-rw-r--r--include/linux/iio/iio.h10
-rw-r--r--include/linux/iio/imu/adis.h87
-rw-r--r--include/linux/iio/kfifo_buf.h1
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/iio/triggered_buffer.h2
-rw-r--r--include/linux/ima.h7
-rw-r--r--include/linux/input/gp2ap002a00f.h23
-rw-r--r--include/linux/input/mt.h5
-rw-r--r--include/linux/instrumented.h109
-rw-r--r--include/linux/intel-iommu.h82
-rw-r--r--include/linux/intel-svm.h94
-rw-r--r--include/linux/interconnect.h31
-rw-r--r--include/linux/io-mapping.h2
-rw-r--r--include/linux/iomap.h2
-rw-r--r--include/linux/iommu.h79
-rw-r--r--include/linux/ioport.h7
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/kallsyms.h4
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kconfig.h2
-rw-r--r--include/linux/kcsan-checks.h430
-rw-r--r--include/linux/kcsan.h59
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kthread.h6
-rw-r--r--include/linux/livepatch.h17
-rw-r--r--include/linux/lsm_hook_defs.h3
-rw-r--r--include/linux/lsm_hooks.h56
-rw-r--r--include/linux/memory_hotplug.h10
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mfd/core.h4
-rw-r--r--include/linux/mfd/mp2629.h26
-rw-r--r--include/linux/mfd/mt6358/core.h158
-rw-r--r--include/linux/mfd/mt6358/registers.h282
-rw-r--r--include/linux/mfd/mt6360.h240
-rw-r--r--include/linux/mfd/mt6397/core.h5
-rw-r--r--include/linux/mfd/mt6397/rtc.h9
-rw-r--r--include/linux/mfd/stmfx.h1
-rw-r--r--include/linux/mhi.h23
-rw-r--r--include/linux/mlx4/device.h22
-rw-r--r--include/linux/mlx5/mlx5_ifc.h9
-rw-r--r--include/linux/mlx5/qp.h68
-rw-r--r--include/linux/mm.h37
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmap_lock.h90
-rw-r--r--include/linux/mmu_context.h5
-rw-r--r--include/linux/mmu_notifier.h13
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h8
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/mtd/bbm.h2
-rw-r--r--include/linux/mtd/cfi.h6
-rw-r--r--include/linux/mtd/mtd.h7
-rw-r--r--include/linux/mtd/partitions.h2
-rw-r--r--include/linux/mtd/qinfo.h2
-rw-r--r--include/linux/mtd/rawnand.h131
-rw-r--r--include/linux/mtd/spi-nor.h24
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/linux/nfs4.h4
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/ntb.h6
-rw-r--r--include/linux/of_reserved_mem.h12
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/parport.h16
-rw-r--r--include/linux/pci-acpi.h18
-rw-r--r--include/linux/pci-ats.h3
-rw-r--r--include/linux/pci-ecam.h25
-rw-r--r--include/linux/pci-epc.h38
-rw-r--r--include/linux/pci.h51
-rw-r--r--include/linux/pci_ids.h6
-rw-r--r--include/linux/pgtable.h1438
-rw-r--r--include/linux/phy/omap_usb.h69
-rw-r--r--include/linux/pid.h1
-rw-r--r--include/linux/pid_namespace.h12
-rw-r--r--include/linux/platform_data/clk-integrator.h2
-rw-r--r--include/linux/platform_data/gpio-dwapb.h1
-rw-r--r--include/linux/platform_data/mtd-davinci.h2
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h2
-rw-r--r--include/linux/pm_opp.h18
-rw-r--r--include/linux/power_supply.h13
-rw-r--r--include/linux/printk.h9
-rw-r--r--include/linux/proc_fs.h32
-rw-r--r--include/linux/property.h1
-rw-r--r--include/linux/qed/qed_if.h1
-rw-r--r--include/linux/qed/qed_rdma_if.h1
-rw-r--r--include/linux/rculist.h21
-rw-r--r--include/linux/regset.h2
-rw-r--r--include/linux/remoteproc.h19
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/rpmsg/qcom_glink.h3
-rw-r--r--include/linux/rtsx_pci.h25
-rw-r--r--include/linux/scatterlist.h50
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/sched/debug.h3
-rw-r--r--include/linux/sched/mm.h10
-rw-r--r--include/linux/sched/signal.h11
-rw-r--r--include/linux/sched/sysctl.h7
-rw-r--r--include/linux/scmi_protocol.h6
-rw-r--r--include/linux/scpi_protocol.h6
-rw-r--r--include/linux/security.h15
-rw-r--r--include/linux/seq_file.h19
-rw-r--r--include/linux/seqlock.h51
-rw-r--r--include/linux/serial_core.h4
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h20
-rw-r--r--include/linux/soundwire/sdw.h32
-rw-r--r--include/linux/soundwire/sdw_type.h9
-rw-r--r--include/linux/spi/l4f00242t03.h17
-rw-r--r--include/linux/spi/mcp23s08.h18
-rw-r--r--include/linux/stacktrace.h2
-rw-r--r--include/linux/sunrpc/auth.h5
-rw-r--r--include/linux/sunrpc/gss_api.h1
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svc_rdma.h6
-rw-r--r--include/linux/sunrpc/svc_xprt.h6
-rw-r--r--include/linux/sunrpc/svcauth_gss.h3
-rw-r--r--include/linux/sunrpc/svcsock.h6
-rw-r--r--include/linux/sysctl.h4
-rw-r--r--include/linux/sysrq.h18
-rw-r--r--include/linux/tee_drv.h17
-rw-r--r--include/linux/thunderbolt.h2
-rw-r--r--include/linux/u64_stats_sync.h43
-rw-r--r--include/linux/uacce.h34
-rw-r--r--include/linux/uaccess.h74
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/composite.h3
-rw-r--r--include/linux/usb/gadget.h8
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/linux/usb/typec.h1
-rw-r--r--include/linux/vdpa.h16
-rw-r--r--include/linux/vexpress.h30
-rw-r--r--include/linux/vfio.h4
-rw-r--r--include/linux/vringh.h6
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/linux/xarray.h4
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/seg6.h2
-rw-r--r--include/pcmcia/cistpl.h6
-rw-r--r--include/rdma/ib_cm.h9
-rw-r--r--include/rdma/ib_fmr_pool.h93
-rw-r--r--include/rdma/ib_mad.h49
-rw-r--r--include/rdma/ib_verbs.h304
-rw-r--r--include/rdma/ibta_vol1_c12.h6
-rw-r--r--include/rdma/lag.h23
-rw-r--r--include/rdma/opa_port_info.h10
-rw-r--r--include/rdma/opa_vnic.h4
-rw-r--r--include/rdma/rdma_cm.h17
-rw-r--r--include/rdma/rdmavt_qp.h31
-rw-r--r--include/rdma/uverbs_ioctl.h18
-rw-r--r--include/rdma/uverbs_std_types.h2
-rw-r--r--include/rdma/uverbs_types.h3
-rw-r--r--include/scsi/sas.h8
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h9
-rw-r--r--include/soc/fsl/qe/qe.h2
-rw-r--r--include/soc/imx/cpu.h36
-rw-r--r--include/soc/qcom/cmd-db.h1
-rw-r--r--include/sound/control.h2
-rw-r--r--include/sound/hda_codec.h11
-rw-r--r--include/sound/hdaudio.h10
-rw-r--r--include/sound/intel-nhlt.h6
-rw-r--r--include/sound/soc-acpi.h2
-rw-r--r--include/sound/soc-card.h69
-rw-r--r--include/sound/soc-component.h46
-rw-r--r--include/sound/soc-dai.h62
-rw-r--r--include/sound/soc-dapm.h2
-rw-r--r--include/sound/soc-link.h27
-rw-r--r--include/sound/soc.h104
-rw-r--r--include/sound/sof.h5
-rw-r--r--include/sound/sof/channel_map.h2
-rw-r--r--include/sound/sof/control.h2
-rw-r--r--include/sound/sof/dai-imx.h2
-rw-r--r--include/sound/sof/dai-intel.h22
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/ext_manifest.h95
-rw-r--r--include/sound/sof/header.h2
-rw-r--r--include/sound/sof/info.h28
-rw-r--r--include/sound/sof/pm.h2
-rw-r--r--include/sound/sof/stream.h2
-rw-r--r--include/sound/sof/topology.h20
-rw-r--r--include/sound/sof/trace.h4
-rw-r--r--include/sound/sof/xtensa.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h10
-rw-r--r--include/target/target_core_backend.h4
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/afs.h111
-rw-r--r--include/trace/events/block.h6
-rw-r--r--include/trace/events/ext4.h9
-rw-r--r--include/trace/events/f2fs.h83
-rw-r--r--include/trace/events/qla.h7
-rw-r--r--include/trace/events/rpcgss.h89
-rw-r--r--include/trace/events/rpcrdma.h146
-rw-r--r--include/trace/events/sunrpc.h748
-rw-r--r--include/uapi/drm/msm_drm.h24
-rw-r--r--include/uapi/linux/fiemap.h6
-rw-r--r--include/uapi/linux/gfs2_ondisk.h6
-rw-r--r--include/uapi/linux/iommu.h5
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/rtc.h1
-rw-r--r--include/uapi/linux/tee.h9
-rw-r--r--include/uapi/linux/vfio.h322
-rw-r--r--include/uapi/linux/vfio_ccw.h19
-rw-r--r--include/uapi/linux/vhost.h4
-rw-r--r--include/uapi/linux/virtio_ids.h1
-rw-r--r--include/uapi/linux/virtio_mem.h211
-rw-r--r--include/uapi/linux/virtio_ring.h48
-rw-r--r--include/uapi/misc/habanalabs.h245
-rw-r--r--include/uapi/mtd/mtd-abi.h1
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h3
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h81
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h43
-rw-r--r--include/uapi/rdma/mlx5-abi.h9
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h6
-rw-r--r--include/uapi/rdma/rdma_user_cm.h15
-rw-r--r--include/uapi/sound/skl-tplg-interface.h2
-rw-r--r--include/uapi/sound/sof/abi.h2
-rw-r--r--include/uapi/sound/sof/tokens.h8
-rw-r--r--include/xen/arm/page.h2
-rw-r--r--init/Kconfig45
-rw-r--r--init/Makefile2
-rw-r--r--init/init_task.c11
-rw-r--r--init/main.c14
-rw-r--r--ipc/msg.c2
-rw-r--r--ipc/namespace.c24
-rw-r--r--ipc/shm.c8
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/acct.c6
-rw-r--r--kernel/bpf/stackmap.c17
-rw-r--r--kernel/bpf/syscall.c27
-rw-r--r--kernel/cgroup/cgroup.c10
-rw-r--r--kernel/cgroup/cpuset.c4
-rw-r--r--kernel/cgroup/rstat.c60
-rw-r--r--kernel/cpu_pm.c4
-rw-r--r--kernel/cred.c3
-rw-r--r--kernel/debug/debug_core.c14
-rw-r--r--kernel/debug/kdb/kdb_bt.c15
-rw-r--r--kernel/dma/Kconfig6
-rw-r--r--kernel/dma/Makefile1
-rw-r--r--kernel/dma/contiguous.c4
-rw-r--r--kernel/dma/debug.c2
-rw-r--r--kernel/dma/direct.c56
-rw-r--r--kernel/dma/pool.c264
-rw-r--r--kernel/dma/remap.c121
-rw-r--r--kernel/events/core.c16
-rw-r--r--kernel/events/hw_breakpoint.c16
-rw-r--r--kernel/events/uprobes.c38
-rw-r--r--kernel/exit.c11
-rw-r--r--kernel/fork.c17
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/gcov/Kconfig24
-rw-r--r--kernel/gcov/Makefile3
-rw-r--r--kernel/gcov/gcc_3_4.c573
-rwxr-xr-xkernel/gen_kheaders.sh2
-rw-r--r--kernel/hung_task.c30
-rw-r--r--kernel/kcov.c282
-rw-r--r--kernel/kcsan/Makefile14
-rw-r--r--kernel/kcsan/atomic.h20
-rw-r--r--kernel/kcsan/core.c850
-rw-r--r--kernel/kcsan/debugfs.c349
-rw-r--r--kernel/kcsan/encoding.h95
-rw-r--r--kernel/kcsan/kcsan.h142
-rw-r--r--kernel/kcsan/report.c634
-rw-r--r--kernel/kcsan/test.c131
-rw-r--r--kernel/kexec_file.c5
-rw-r--r--kernel/kprobes.c34
-rw-r--r--kernel/kthread.c78
-rw-r--r--kernel/livepatch/core.c178
-rw-r--r--kernel/locking/Makefile3
-rw-r--r--kernel/locking/lockdep.c4
-rw-r--r--kernel/locking/rtmutex-debug.c2
-rw-r--r--kernel/module.c84
-rw-r--r--kernel/panic.c45
-rw-r--r--kernel/pid.c22
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/power/snapshot.c1
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/rcu/tree_stall.h2
-rw-r--r--kernel/relay.c24
-rw-r--r--kernel/resource.c5
-rw-r--r--kernel/sched/Makefile6
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/fair.c4
-rw-r--r--kernel/scs.c2
-rw-r--r--kernel/sys.c22
-rw-r--r--kernel/sysctl.c38
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/posix-cpu-timers.c111
-rw-r--r--kernel/trace/Kconfig52
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/blktrace.c36
-rw-r--r--kernel/trace/bpf_trace.c162
-rw-r--r--kernel/trace/ftrace.c16
-rw-r--r--kernel/trace/trace.c5
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_events_hist.c2073
-rw-r--r--kernel/trace/trace_events_synth.c1789
-rw-r--r--kernel/trace/trace_kprobe.c70
-rw-r--r--kernel/trace/trace_output.c4
-rw-r--r--kernel/trace/trace_probe.c2
-rw-r--r--kernel/trace/trace_stack.c5
-rw-r--r--kernel/trace/trace_synth.h36
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/watchdog.c37
-rw-r--r--kernel/workqueue.c207
-rw-r--r--lib/Kconfig.debug81
-rw-r--r--lib/Kconfig.kcsan199
-rw-r--r--lib/Kconfig.ubsan11
-rw-r--r--lib/Makefile8
-rw-r--r--lib/bch.c152
-rw-r--r--lib/bitmap.c9
-rw-r--r--lib/bug.c3
-rw-r--r--lib/dump_stack.c2
-rw-r--r--lib/dynamic_debug.c9
-rw-r--r--lib/fault-inject.c4
-rw-r--r--lib/flex_proportions.c7
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/iov_iter.c7
-rw-r--r--lib/kobject.c14
-rw-r--r--lib/kunit/Kconfig23
-rw-r--r--lib/logic_pio.c22
-rw-r--r--lib/lz4/lz4_decompress.c3
-rw-r--r--lib/lzo/lzo1x_compress.c13
-rw-r--r--lib/math/Kconfig7
-rw-r--r--lib/math/prime_numbers.c10
-rw-r--r--lib/percpu-refcount.c6
-rw-r--r--lib/rhashtable.c17
-rw-r--r--lib/strncpy_from_user.c1
-rw-r--r--lib/test_bitops.c109
-rw-r--r--lib/test_firmware.c26
-rw-r--r--lib/test_hmm.c14
-rw-r--r--lib/test_lockup.c18
-rw-r--r--lib/test_printf.c4
-rw-r--r--lib/test_sysctl.c15
-rw-r--r--lib/ubsan.c33
-rw-r--r--lib/usercopy.c7
-rw-r--r--lib/vdso/gettimeofday.c13
-rw-r--r--lib/zlib_inflate/inffast.c91
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/Makefile11
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/debug.c10
-rw-r--r--mm/debug_vm_pgtable.c387
-rw-r--r--mm/filemap.c48
-rw-r--r--mm/frame_vector.c13
-rw-r--r--mm/frontswap.c6
-rw-r--r--mm/gup.c146
-rw-r--r--mm/hmm.c2
-rw-r--r--mm/huge_memory.c14
-rw-r--r--mm/hugetlb.c19
-rw-r--r--mm/init-mm.c4
-rw-r--r--mm/internal.h8
-rw-r--r--mm/kasan/init.c11
-rw-r--r--mm/khugepaged.c72
-rw-r--r--mm/ksm.c58
-rw-r--r--mm/list_lru.c2
-rw-r--r--mm/maccess.c278
-rw-r--r--mm/madvise.c40
-rw-r--r--mm/memblock.c2
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/memory-failure.c43
-rw-r--r--mm/memory.c71
-rw-r--r--mm/memory_hotplug.c240
-rw-r--r--mm/mempolicy.c36
-rw-r--r--mm/migrate.c16
-rw-r--r--mm/mincore.c6
-rw-r--r--mm/mlock.c22
-rw-r--r--mm/mmap.c76
-rw-r--r--mm/mmu_context.c64
-rw-r--r--mm/mmu_gather.c2
-rw-r--r--mm/mmu_notifier.c22
-rw-r--r--mm/mprotect.c22
-rw-r--r--mm/mremap.c18
-rw-r--r--mm/msync.c8
-rw-r--r--mm/nommu.c26
-rw-r--r--mm/oom_kill.c20
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/page_alloc.c35
-rw-r--r--mm/page_idle.c7
-rw-r--r--mm/page_io.c1
-rw-r--r--mm/page_isolation.c9
-rw-r--r--mm/page_reporting.h2
-rw-r--r--mm/pagewalk.c12
-rw-r--r--mm/pgtable-generic.c6
-rw-r--r--mm/process_vm_access.c4
-rw-r--r--mm/ptdump.c4
-rw-r--r--mm/rmap.c12
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/slub.c2
-rw-r--r--mm/sparse-vmemmap.c1
-rw-r--r--mm/sparse.c3
-rw-r--r--mm/swap_state.c5
-rw-r--r--mm/swapfile.c5
-rw-r--r--mm/userfaultfd.c26
-rw-r--r--mm/util.c34
-rw-r--r--mm/vmacache.c5
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/vmscan.c6
-rw-r--r--mm/vmstat.c32
-rw-r--r--mm/zbud.c2
-rw-r--r--mm/zsmalloc.c2
-rw-r--r--net/bpfilter/Kconfig6
-rw-r--r--net/bpfilter/Makefile11
-rw-r--r--net/ceph/ceph_common.c75
-rw-r--r--net/ceph/crush/crush.c3
-rw-r--r--net/ceph/debugfs.c6
-rw-r--r--net/ceph/osd_client.c103
-rw-r--r--net/ceph/osdmap.c363
-rw-r--r--net/core/dev.c40
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/ethtool/linkinfo.c3
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv6/ip6_flowlabel.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/seg6.c16
-rw-r--r--net/ipv6/seg6_iptunnel.c2
-rw-r--r--net/ipv6/seg6_local.c6
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netlink/genetlink.c94
-rw-r--r--net/rds/Makefile2
-rw-r--r--net/rds/ib.c43
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/ib_cm.c8
-rw-r--r--net/rds/ib_fmr.c269
-rw-r--r--net/rds/ib_frmr.c4
-rw-r--r--net/rds/ib_mr.h14
-rw-r--r--net/rds/ib_rdma.c28
-rw-r--r--net/rxrpc/peer_event.c3
-rw-r--r--net/rxrpc/proc.c6
-rw-r--r--net/smc/smc_ib.c13
-rw-r--r--net/sunrpc/addr.c4
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c56
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c12
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c18
-rw-r--r--net/sunrpc/auth_gss/trace.c1
-rw-r--r--net/sunrpc/clnt.c54
-rw-r--r--net/sunrpc/rpcb_clnt.c6
-rw-r--r--net/sunrpc/sunrpc.h1
-rw-r--r--net/sunrpc/sunrpc_syms.c2
-rw-r--r--net/sunrpc/svc.c29
-rw-r--r--net/sunrpc/svc_xprt.c57
-rw-r--r--net/sunrpc/svcauth.c25
-rw-r--r--net/sunrpc/svcauth_unix.c9
-rw-r--r--net/sunrpc/svcsock.c400
-rw-r--r--net/sunrpc/xprt.c23
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c121
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c21
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c92
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c55
-rw-r--r--net/sunrpc/xprtrdma/transport.c10
-rw-r--r--net/sunrpc/xprtrdma/verbs.c1
-rw-r--r--net/sunrpc/xprtsock.c12
-rw-r--r--net/tipc/msg.c4
-rw-r--r--net/tls/tls_main.c2
-rw-r--r--net/vmw_vsock/vmci_transport.c2
-rw-r--r--net/xdp/xdp_umem.c6
-rw-r--r--samples/Kconfig26
-rw-r--r--samples/Makefile5
-rw-r--r--samples/auxdisplay/Makefile11
-rw-r--r--samples/connector/Makefile12
-rw-r--r--samples/hidraw/Makefile9
-rw-r--r--samples/mei/Makefile9
-rw-r--r--samples/pidfd/Makefile8
-rw-r--r--samples/seccomp/Makefile42
-rw-r--r--samples/timers/Makefile17
-rw-r--r--samples/uhid/.gitignore2
-rw-r--r--samples/uhid/Makefile9
-rw-r--r--samples/uhid/uhid-example.c4
-rw-r--r--samples/vfs/Makefile11
-rw-r--r--samples/watchdog/Makefile10
-rw-r--r--scripts/Kbuild.include2
-rw-r--r--scripts/Makefile.build67
-rw-r--r--scripts/Makefile.clean2
-rw-r--r--scripts/Makefile.host4
-rw-r--r--scripts/Makefile.kcsan19
-rw-r--r--scripts/Makefile.lib89
-rw-r--r--scripts/Makefile.modpost75
-rw-r--r--scripts/Makefile.package8
-rw-r--r--scripts/Makefile.userprogs45
-rwxr-xr-xscripts/atomic/fallbacks/acquire6
-rwxr-xr-xscripts/atomic/fallbacks/add_negative8
-rwxr-xr-xscripts/atomic/fallbacks/add_unless8
-rwxr-xr-xscripts/atomic/fallbacks/andnot6
-rwxr-xr-xscripts/atomic/fallbacks/dec6
-rwxr-xr-xscripts/atomic/fallbacks/dec_and_test8
-rwxr-xr-xscripts/atomic/fallbacks/dec_if_positive8
-rwxr-xr-xscripts/atomic/fallbacks/dec_unless_positive8
-rwxr-xr-xscripts/atomic/fallbacks/fence6
-rwxr-xr-xscripts/atomic/fallbacks/fetch_add_unless10
-rwxr-xr-xscripts/atomic/fallbacks/inc6
-rwxr-xr-xscripts/atomic/fallbacks/inc_and_test8
-rwxr-xr-xscripts/atomic/fallbacks/inc_not_zero8
-rwxr-xr-xscripts/atomic/fallbacks/inc_unless_negative8
-rwxr-xr-xscripts/atomic/fallbacks/read_acquire4
-rwxr-xr-xscripts/atomic/fallbacks/release6
-rwxr-xr-xscripts/atomic/fallbacks/set_release4
-rwxr-xr-xscripts/atomic/fallbacks/sub_and_test8
-rwxr-xr-xscripts/atomic/fallbacks/try_cmpxchg6
-rwxr-xr-xscripts/atomic/gen-atomic-fallback.sh31
-rwxr-xr-xscripts/atomic/gen-atomic-instrumented.sh9
-rwxr-xr-xscripts/atomic/gen-atomic-long.sh3
-rw-r--r--scripts/atomic/gen-atomics.sh5
-rw-r--r--scripts/basic/fixdep.c2
-rwxr-xr-xscripts/checkpatch.pl76
-rwxr-xr-xscripts/checkstack.pl87
-rw-r--r--scripts/gcc-plugins/Kconfig2
-rw-r--r--scripts/gcc-plugins/Makefile2
-rwxr-xr-xscripts/get_maintainer.pl46
-rwxr-xr-xscripts/headers_install.sh14
-rw-r--r--scripts/kconfig/Makefile11
-rw-r--r--scripts/kconfig/menu.c3
-rw-r--r--scripts/kconfig/parser.y30
-rwxr-xr-xscripts/kconfig/streamline_config.pl21
-rw-r--r--scripts/kconfig/symbol.c2
-rw-r--r--scripts/kconfig/tests/rand_nested_choice/Kconfig35
-rw-r--r--scripts/kconfig/tests/rand_nested_choice/__init__.py17
-rw-r--r--scripts/kconfig/tests/rand_nested_choice/expected_stdout02
-rw-r--r--scripts/kconfig/tests/rand_nested_choice/expected_stdout14
-rw-r--r--scripts/kconfig/tests/rand_nested_choice/expected_stdout25
-rwxr-xr-xscripts/kernel-doc2
-rwxr-xr-xscripts/link-vmlinux.sh2
-rwxr-xr-xscripts/mkcompile_h3
-rwxr-xr-xscripts/mksysmap2
-rw-r--r--scripts/mod/file2alias.c2
-rw-r--r--scripts/mod/modpost.c386
-rw-r--r--scripts/mod/modpost.h19
-rw-r--r--scripts/mod/sumversion.c117
-rwxr-xr-xscripts/modules-check.sh16
-rwxr-xr-xscripts/package/buildtar6
-rw-r--r--scripts/spelling.txt9
-rwxr-xr-xscripts/xz_wrap.sh2
-rw-r--r--security/apparmor/Kconfig3
-rw-r--r--security/apparmor/apparmorfs.c56
-rw-r--r--security/apparmor/domain.c46
-rw-r--r--security/apparmor/file.c12
-rw-r--r--security/apparmor/include/domain.h2
-rw-r--r--security/apparmor/include/label.h2
-rw-r--r--security/apparmor/include/match.h11
-rw-r--r--security/apparmor/label.c60
-rw-r--r--security/apparmor/lsm.c7
-rw-r--r--security/apparmor/match.c58
-rw-r--r--security/apparmor/path.c2
-rw-r--r--security/apparmor/policy.c1
-rw-r--r--security/apparmor/policy_unpack.c58
-rw-r--r--security/commoncap.c23
-rw-r--r--security/integrity/evm/evm_crypto.c2
-rw-r--r--security/integrity/ima/ima.h20
-rw-r--r--security/integrity/ima/ima_api.c23
-rw-r--r--security/integrity/ima/ima_crypto.c254
-rw-r--r--security/integrity/ima/ima_fs.c4
-rw-r--r--security/integrity/ima/ima_init.c24
-rw-r--r--security/integrity/ima/ima_main.c54
-rw-r--r--security/integrity/ima/ima_policy.c12
-rw-r--r--security/integrity/ima/ima_queue.c36
-rw-r--r--security/integrity/ima/ima_template.c25
-rw-r--r--security/integrity/ima/ima_template_lib.c18
-rw-r--r--security/keys/Kconfig4
-rw-r--r--security/keys/big_key.c257
-rw-r--r--security/keys/internal.h11
-rw-r--r--security/keys/keyctl.c18
-rw-r--r--security/security.c17
-rw-r--r--security/selinux/hooks.c8
-rw-r--r--security/smack/smack.h12
-rw-r--r--security/smack/smack_lsm.c51
-rw-r--r--security/smack/smackfs.c10
-rw-r--r--security/tomoyo/realpath.c4
-rw-r--r--security/tomoyo/tomoyo.c12
-rw-r--r--sound/core/oss/pcm_oss.c2
-rw-r--r--sound/core/oss/pcm_plugin.h2
-rw-r--r--sound/core/pcm_native.c20
-rw-r--r--sound/core/seq/oss/seq_oss_timer.h10
-rw-r--r--sound/core/sgbuf.c1
-rw-r--r--sound/drivers/Kconfig12
-rw-r--r--sound/drivers/Makefile2
-rw-r--r--sound/drivers/ml403-ac97cr.c1298
-rw-r--r--sound/drivers/pcm-indirect2.c560
-rw-r--r--sound/drivers/pcm-indirect2.h127
-rw-r--r--sound/drivers/portman2x4.c2
-rw-r--r--sound/firewire/Kconfig8
-rw-r--r--sound/firewire/amdtp-am824.c3
-rw-r--r--sound/firewire/amdtp-stream.c326
-rw-r--r--sound/firewire/amdtp-stream.h20
-rw-r--r--sound/firewire/fireface/ff-protocol-latter.c58
-rw-r--r--sound/firewire/fireface/ff-stream.c10
-rw-r--r--sound/firewire/fireface/ff.c61
-rw-r--r--sound/firewire/fireface/ff.h11
-rw-r--r--sound/firewire/fireworks/fireworks.h2
-rw-r--r--sound/firewire/motu/amdtp-motu.c19
-rw-r--r--sound/firewire/motu/motu-pcm.c14
-rw-r--r--sound/firewire/motu/motu-proc.c20
-rw-r--r--sound/firewire/motu/motu-protocol-v2.c314
-rw-r--r--sound/firewire/motu/motu-protocol-v3.c312
-rw-r--r--sound/firewire/motu/motu-stream.c16
-rw-r--r--sound/firewire/motu/motu.c114
-rw-r--r--sound/firewire/motu/motu.h125
-rw-r--r--sound/hda/ext/hdac_ext_bus.c2
-rw-r--r--sound/hda/hdac_bus.c6
-rw-r--r--sound/hda/hdac_controller.c13
-rw-r--r--sound/hda/hdac_device.c2
-rw-r--r--sound/hda/hdac_stream.c2
-rw-r--r--sound/hda/intel-dsp-config.c117
-rw-r--r--sound/hda/intel-nhlt.c51
-rw-r--r--sound/hda/local.h3
-rw-r--r--sound/isa/ad1816a/ad1816a.c2
-rw-r--r--sound/isa/es1688/es1688.c4
-rw-r--r--sound/isa/wavefront/wavefront_synth.c8
-rw-r--r--sound/pci/ac97/ac97_patch.c2
-rw-r--r--sound/pci/emu10k1/emu10k1x.c2
-rw-r--r--sound/pci/hda/Kconfig4
-rw-r--r--sound/pci/hda/hda_intel.c5
-rw-r--r--sound/pci/hda/hda_tegra.c50
-rw-r--r--sound/pci/hda/patch_hdmi.c13
-rw-r--r--sound/pci/hda/patch_realtek.c57
-rw-r--r--sound/pci/oxygen/xonar_pcm179x.c4
-rw-r--r--sound/ppc/pmac.c4
-rw-r--r--sound/ppc/snd_ps3.c2
-rw-r--r--sound/soc/Makefile2
-rw-r--r--sound/soc/amd/Kconfig15
-rw-r--r--sound/soc/amd/Makefile1
-rw-r--r--sound/soc/amd/raven/acp3x-i2s.c6
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c49
-rw-r--r--sound/soc/amd/renoir/Makefile7
-rw-r--r--sound/soc/amd/renoir/acp3x-pdm-dma.c524
-rw-r--r--sound/soc/amd/renoir/acp3x-rn.c77
-rw-r--r--sound/soc/amd/renoir/rn-pci-acp3x.c344
-rw-r--r--sound/soc/amd/renoir/rn_acp3x.h88
-rw-r--r--sound/soc/amd/renoir/rn_chip_offset_byte.h349
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c4
-rw-r--r--sound/soc/bcm/bcm2835-i2s.c6
-rw-r--r--sound/soc/bcm/cygnus-ssp.c4
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c4
-rw-r--r--sound/soc/codecs/Kconfig40
-rw-r--r--sound/soc/codecs/Makefile6
-rw-r--r--sound/soc/codecs/ad1980.c2
-rw-r--r--sound/soc/codecs/ad73311.c2
-rw-r--r--sound/soc/codecs/adau7118-i2c.c7
-rw-r--r--sound/soc/codecs/adav80x.c4
-rw-r--r--sound/soc/codecs/arizona.c2
-rw-r--r--sound/soc/codecs/cros_ec_codec.c22
-rw-r--r--sound/soc/codecs/cs4271.c4
-rw-r--r--sound/soc/codecs/cs47l15.c9
-rw-r--r--sound/soc/codecs/cs47l24.c8
-rw-r--r--sound/soc/codecs/cs47l35.c9
-rw-r--r--sound/soc/codecs/cs47l85.c9
-rw-r--r--sound/soc/codecs/cs47l90.c9
-rw-r--r--sound/soc/codecs/cs47l92.c9
-rw-r--r--sound/soc/codecs/da7213.c102
-rw-r--r--sound/soc/codecs/da7213.h9
-rw-r--r--sound/soc/codecs/dmic.c4
-rw-r--r--sound/soc/codecs/hdac_hda.h4
-rw-r--r--sound/soc/codecs/jz4725b.c4
-rw-r--r--sound/soc/codecs/jz4740.c4
-rw-r--r--sound/soc/codecs/jz4770.c2
-rw-r--r--sound/soc/codecs/madera.c2
-rw-r--r--sound/soc/codecs/max9768.c2
-rw-r--r--sound/soc/codecs/max98090.c6
-rw-r--r--sound/soc/codecs/max98373.c2
-rw-r--r--sound/soc/codecs/max98390.c1056
-rw-r--r--sound/soc/codecs/max98390.h664
-rw-r--r--sound/soc/codecs/max9867.c95
-rw-r--r--sound/soc/codecs/max9867.h1
-rw-r--r--sound/soc/codecs/nau8810.c39
-rw-r--r--sound/soc/codecs/nau8810.h8
-rw-r--r--sound/soc/codecs/rl6231.c33
-rw-r--r--sound/soc/codecs/rl6231.h1
-rw-r--r--sound/soc/codecs/rt1015.c29
-rw-r--r--sound/soc/codecs/rt1015.h1
-rw-r--r--sound/soc/codecs/rt1016.c695
-rw-r--r--sound/soc/codecs/rt1016.h232
-rw-r--r--sound/soc/codecs/rt1308-sdw.c15
-rw-r--r--sound/soc/codecs/rt5645.c14
-rw-r--r--sound/soc/codecs/rt5677-spi.c12
-rw-r--r--sound/soc/codecs/rt5682-i2c.c306
-rw-r--r--sound/soc/codecs/rt5682-sdw.c462
-rw-r--r--sound/soc/codecs/rt5682-sdw.h20
-rw-r--r--sound/soc/codecs/rt5682.c888
-rw-r--r--sound/soc/codecs/rt5682.h32
-rw-r--r--sound/soc/codecs/rt700-sdw.c3
-rw-r--r--sound/soc/codecs/rt700.c3
-rw-r--r--sound/soc/codecs/rt711-sdw.c3
-rw-r--r--sound/soc/codecs/rt711.c3
-rw-r--r--sound/soc/codecs/rt715-sdw.c3
-rw-r--r--sound/soc/codecs/rt715.c3
-rw-r--r--sound/soc/codecs/sta32x.c11
-rw-r--r--sound/soc/codecs/tas2552.c1
-rw-r--r--sound/soc/codecs/tlv320adcx140.c96
-rw-r--r--sound/soc/codecs/tlv320adcx140.h11
-rw-r--r--sound/soc/codecs/tlv320aic23.c2
-rw-r--r--sound/soc/codecs/tlv320dac33.c2
-rw-r--r--sound/soc/codecs/uda1380.c2
-rw-r--r--sound/soc/codecs/wcd9335.c44
-rw-r--r--sound/soc/codecs/wcd934x.c30
-rw-r--r--sound/soc/codecs/wl1273.c2
-rw-r--r--sound/soc/codecs/wm5102.c9
-rw-r--r--sound/soc/codecs/wm5110.c8
-rw-r--r--sound/soc/codecs/wm8524.c4
-rw-r--r--sound/soc/codecs/wm8711.c2
-rw-r--r--sound/soc/codecs/wm8753.c4
-rw-r--r--sound/soc/codecs/wm8782.c2
-rw-r--r--sound/soc/codecs/wm8900.c6
-rw-r--r--sound/soc/codecs/wm8962.c9
-rw-r--r--sound/soc/codecs/wm8990.c98
-rw-r--r--sound/soc/codecs/wm8991.c8
-rw-r--r--sound/soc/codecs/wm8994.c8
-rw-r--r--sound/soc/codecs/wm_adsp.c18
-rw-r--r--sound/soc/codecs/wm_adsp.h18
-rw-r--r--sound/soc/codecs/zl38060.c638
-rw-r--r--sound/soc/dwc/dwc-i2s.c2
-rw-r--r--sound/soc/fsl/Kconfig11
-rw-r--r--sound/soc/fsl/Makefile2
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c26
-rw-r--r--sound/soc/fsl/fsl_asrc.c368
-rw-r--r--sound/soc/fsl/fsl_asrc.h74
-rw-r--r--sound/soc/fsl/fsl_asrc_common.h106
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c55
-rw-r--r--sound/soc/fsl/fsl_audmix.c4
-rw-r--r--sound/soc/fsl/fsl_easrc.c2117
-rw-r--r--sound/soc/fsl/fsl_easrc.h651
-rw-r--r--sound/soc/fsl/fsl_esai.c52
-rw-r--r--sound/soc/fsl/fsl_micfil.c17
-rw-r--r--sound/soc/fsl/fsl_spdif.c4
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.c16
-rw-r--r--sound/soc/img/img-i2s-in.c1
-rw-r--r--sound/soc/img/img-i2s-out.c8
-rw-r--r--sound/soc/img/img-spdif-in.c4
-rw-r--r--sound/soc/img/img-spdif-out.c4
-rw-r--r--sound/soc/intel/Kconfig8
-rw-r--r--sound/soc/intel/Makefile2
-rw-r--r--sound/soc/intel/atom/Makefile2
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.h2
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-compress.c43
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c16
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform.h2
-rw-r--r--sound/soc/intel/atom/sst/Makefile2
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c4
-rw-r--r--sound/soc/intel/boards/Kconfig83
-rw-r--r--sound/soc/intel/boards/Makefile6
-rw-r--r--sound/soc/intel/boards/bdw-rt5650.c29
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c26
-rw-r--r--sound/soc/intel/boards/broadwell.c43
-rw-r--r--sound/soc/intel/boards/bytcht_cx2072x.c5
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c3
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c3
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c31
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c6
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c3
-rw-r--r--sound/soc/intel/boards/cht_bsw_nau8824.c7
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c3
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c3
-rw-r--r--sound/soc/intel/boards/cml_rt1011_rt5682.c224
-rw-r--r--sound/soc/intel/boards/ehl_rt5660.c323
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c4
-rw-r--r--sound/soc/intel/boards/hda_dsp_common.c2
-rw-r--r--sound/soc/intel/boards/hda_dsp_common.h2
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98357a.c2
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c6
-rw-r--r--sound/soc/intel/boards/kbl_rt5660.c2
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c2
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c2
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.c2
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.h2
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c33
-rw-r--r--sound/soc/intel/boards/sof_da7219_max98373.c23
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.c2
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.h2
-rw-r--r--sound/soc/intel/boards/sof_pcm512x.c11
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c2
-rw-r--r--sound/soc/intel/boards/sof_sdw.c65
-rw-r--r--sound/soc/intel/boards/sof_sdw_common.h2
-rw-r--r--sound/soc/intel/boards/sof_sdw_dmic.c2
-rw-r--r--sound/soc/intel/boards/sof_sdw_hdmi.c11
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt1308.c4
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt5682.c5
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt700.c13
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt711.c13
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt715.c2
-rw-r--r--sound/soc/intel/boards/sof_wm8804.c298
-rw-r--r--sound/soc/intel/common/Makefile2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-bxt-match.c4
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cfl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cml-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ehl-match.c9
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-glk-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hda-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-icl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-jsl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-kbl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-skl-match.c2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-tgl-match.c2
-rw-r--r--sound/soc/intel/common/soc-intel-quirks.h2
-rw-r--r--sound/soc/intel/common/sst-dsp.c9
-rw-r--r--sound/soc/intel/common/sst-firmware.c2
-rw-r--r--sound/soc/intel/haswell/sst-haswell-dsp.c185
-rw-r--r--sound/soc/intel/haswell/sst-haswell-pcm.c2
-rw-r--r--sound/soc/intel/skylake/Makefile2
-rw-r--r--sound/soc/intel/skylake/skl-i2s.h2
-rw-r--r--sound/soc/intel/skylake/skl-ssp-clk.c2
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c2
-rw-r--r--sound/soc/intel/skylake/skl-topology.c184
-rw-r--r--sound/soc/intel/skylake/skl-topology.h5
-rw-r--r--sound/soc/intel/skylake/skl.h2
-rw-r--r--sound/soc/jz4740/Kconfig2
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c12
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c7
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c8
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-pcm.c7
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c16
-rw-r--r--sound/soc/meson/axg-fifo.c10
-rw-r--r--sound/soc/meson/axg-tdm-interface.c2
-rw-r--r--sound/soc/meson/meson-card-utils.c17
-rw-r--r--sound/soc/mxs/mxs-saif.c5
-rw-r--r--sound/soc/pxa/Kconfig25
-rw-r--r--sound/soc/pxa/mmp-sspa.c445
-rw-r--r--sound/soc/pxa/mmp-sspa.h32
-rw-r--r--sound/soc/pxa/pxa-ssp.c8
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c2
-rw-r--r--sound/soc/qcom/lpass-apq8016.c9
-rw-r--r--sound/soc/qcom/lpass-cpu.c235
-rw-r--r--sound/soc/qcom/lpass-lpaif-reg.h30
-rw-r--r--sound/soc/qcom/lpass.h4
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c55
-rw-r--r--sound/soc/sh/rcar/gen.c8
-rw-r--r--sound/soc/sh/rcar/rsnd.h9
-rw-r--r--sound/soc/sh/rcar/ssi.c145
-rw-r--r--sound/soc/soc-card.c225
-rw-r--r--sound/soc/soc-compress.c281
-rw-r--r--sound/soc/soc-core.c267
-rw-r--r--sound/soc/soc-dai.c485
-rw-r--r--sound/soc/soc-dapm.c30
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c6
-rw-r--r--sound/soc/soc-jack.c38
-rw-r--r--sound/soc/soc-link.c155
-rw-r--r--sound/soc/soc-pcm.c295
-rw-r--r--sound/soc/soc-topology.c96
-rw-r--r--sound/soc/sof/Makefile2
-rw-r--r--sound/soc/sof/compress.c9
-rw-r--r--sound/soc/sof/compress.h9
-rw-r--r--sound/soc/sof/control.c6
-rw-r--r--sound/soc/sof/core.c9
-rw-r--r--sound/soc/sof/debug.c2
-rw-r--r--sound/soc/sof/imx/Kconfig34
-rw-r--r--sound/soc/sof/imx/Makefile4
-rw-r--r--sound/soc/sof/imx/imx8.c4
-rw-r--r--sound/soc/sof/imx/imx8m.c285
-rw-r--r--sound/soc/sof/intel/Makefile2
-rw-r--r--sound/soc/sof/intel/apl.c2
-rw-r--r--sound/soc/sof/intel/bdw.c2
-rw-r--r--sound/soc/sof/intel/byt.c172
-rw-r--r--sound/soc/sof/intel/cnl.c2
-rw-r--r--sound/soc/sof/intel/hda-bus.c2
-rw-r--r--sound/soc/sof/intel/hda-codec.c58
-rw-r--r--sound/soc/sof/intel/hda-compress.c2
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c2
-rw-r--r--sound/soc/sof/intel/hda-dai.c2
-rw-r--r--sound/soc/sof/intel/hda-dsp.c10
-rw-r--r--sound/soc/sof/intel/hda-ipc.c2
-rw-r--r--sound/soc/sof/intel/hda-ipc.h2
-rw-r--r--sound/soc/sof/intel/hda-loader.c11
-rw-r--r--sound/soc/sof/intel/hda-pcm.c2
-rw-r--r--sound/soc/sof/intel/hda-stream.c2
-rw-r--r--sound/soc/sof/intel/hda-trace.c2
-rw-r--r--sound/soc/sof/intel/hda.c28
-rw-r--r--sound/soc/sof/intel/hda.h2
-rw-r--r--sound/soc/sof/intel/intel-ipc.c2
-rw-r--r--sound/soc/sof/intel/shim.h2
-rw-r--r--sound/soc/sof/ipc.c11
-rw-r--r--sound/soc/sof/loader.c213
-rw-r--r--sound/soc/sof/nocodec.c14
-rw-r--r--sound/soc/sof/ops.c2
-rw-r--r--sound/soc/sof/ops.h2
-rw-r--r--sound/soc/sof/pcm.c18
-rw-r--r--sound/soc/sof/pm.c29
-rw-r--r--sound/soc/sof/probe.c2
-rw-r--r--sound/soc/sof/probe.h2
-rw-r--r--sound/soc/sof/sof-acpi-dev.c2
-rw-r--r--sound/soc/sof/sof-audio.c2
-rw-r--r--sound/soc/sof/sof-audio.h4
-rw-r--r--sound/soc/sof/sof-of-dev.c16
-rw-r--r--sound/soc/sof/sof-pci-dev.c4
-rw-r--r--sound/soc/sof/sof-priv.h7
-rw-r--r--sound/soc/sof/topology.c485
-rw-r--r--sound/soc/sof/trace.c2
-rw-r--r--sound/soc/sof/utils.c2
-rw-r--r--sound/soc/sof/xtensa/Makefile2
-rw-r--r--sound/soc/sof/xtensa/core.c2
-rw-r--r--sound/soc/sprd/sprd-pcm-compress.c49
-rw-r--r--sound/soc/sprd/sprd-pcm-dma.c2
-rw-r--r--sound/soc/sprd/sprd-pcm-dma.h2
-rw-r--r--sound/soc/tegra/tegra_alc5632.c7
-rw-r--r--sound/soc/tegra/tegra_asoc_utils.c113
-rw-r--r--sound/soc/tegra/tegra_asoc_utils.h1
-rw-r--r--sound/soc/tegra/tegra_max98090.c22
-rw-r--r--sound/soc/tegra/tegra_rt5640.c22
-rw-r--r--sound/soc/tegra/tegra_rt5677.c7
-rw-r--r--sound/soc/tegra/tegra_sgtl5000.c7
-rw-r--r--sound/soc/tegra/tegra_wm8753.c22
-rw-r--r--sound/soc/tegra/tegra_wm8903.c42
-rw-r--r--sound/soc/tegra/tegra_wm9712.c8
-rw-r--r--sound/soc/tegra/trimslice.c18
-rw-r--r--sound/soc/ti/davinci-mcasp.c6
-rw-r--r--sound/soc/ti/omap-dmic.c4
-rw-r--r--sound/soc/ti/omap-mcbsp.c21
-rw-r--r--sound/soc/ti/omap-mcpdm.c8
-rw-r--r--sound/soc/uniphier/aio-compress.c45
-rw-r--r--sound/soc/uniphier/aio-cpu.c4
-rw-r--r--sound/soc/uniphier/aio-dma.c2
-rw-r--r--sound/soc/uniphier/aio.h2
-rw-r--r--sound/soc/ux500/mop500.c11
-rw-r--r--sound/usb/card.c54
-rw-r--r--sound/usb/card.h5
-rw-r--r--sound/usb/endpoint.c244
-rw-r--r--sound/usb/endpoint.h1
-rw-r--r--sound/usb/line6/driver.c20
-rw-r--r--sound/usb/line6/driver.h1
-rw-r--r--sound/usb/mixer_quirks.c418
-rw-r--r--sound/usb/pcm.c11
-rw-r--r--sound/usb/proc.c53
-rw-r--r--sound/usb/quirks-table.h166
-rw-r--r--sound/usb/quirks.c41
-rw-r--r--sound/usb/usbaudio.h6
-rw-r--r--sound/usb/usx2y/usbusx2y.h2
-rw-r--r--tools/arch/sh/include/asm/barrier.h2
-rw-r--r--tools/arch/x86/include/asm/msr-index.h3
-rwxr-xr-xtools/bootconfig/test-bootconfig.sh7
-rw-r--r--tools/build/Makefile.feature2
-rw-r--r--tools/build/feature/Makefile2
-rw-r--r--tools/build/feature/test-all.c5
-rw-r--r--tools/gpio/lsgpio.c12
-rw-r--r--tools/include/linux/compiler-gcc.h12
-rw-r--r--tools/include/linux/compiler.h3
-rw-r--r--tools/include/linux/kallsyms.h2
-rw-r--r--tools/objtool/check.c22
-rw-r--r--tools/perf/Documentation/itrace.txt6
-rw-r--r--tools/perf/Documentation/perf-c2c.txt2
-rw-r--r--tools/perf/Documentation/perf-config.txt5
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt2
-rw-r--r--tools/perf/Documentation/perf-record.txt15
-rw-r--r--tools/perf/Documentation/perf-stat.txt33
-rw-r--r--tools/perf/Documentation/perf-top.txt11
-rw-r--r--tools/perf/Documentation/security.txt237
-rw-r--r--tools/perf/Makefile.config43
-rw-r--r--tools/perf/Makefile.perf14
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c7
-rw-r--r--tools/perf/arch/arm64/util/unwind-libdw.c6
-rw-r--r--tools/perf/arch/powerpc/util/Build1
-rw-r--r--tools/perf/arch/powerpc/util/unwind-libdw.c6
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c8
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c41
-rw-r--r--tools/perf/arch/x86/util/unwind-libdw.c6
-rw-r--r--tools/perf/bench/epoll-ctl.c4
-rw-r--r--tools/perf/bench/epoll-wait.c4
-rw-r--r--tools/perf/bench/sched-messaging.c2
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-bench.c6
-rw-r--r--tools/perf/builtin-c2c.c9
-rw-r--r--tools/perf/builtin-evlist.c2
-rw-r--r--tools/perf/builtin-ftrace.c15
-rw-r--r--tools/perf/builtin-inject.c2
-rw-r--r--tools/perf/builtin-list.c2
-rw-r--r--tools/perf/builtin-mem.c24
-rw-r--r--tools/perf/builtin-probe.c3
-rw-r--r--tools/perf/builtin-record.c69
-rw-r--r--tools/perf/builtin-report.c37
-rw-r--r--tools/perf/builtin-script.c41
-rw-r--r--tools/perf/builtin-stat.c181
-rw-r--r--tools/perf/builtin-timechart.c2
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/builtin-trace.c92
-rwxr-xr-xtools/perf/check-headers.sh4
-rw-r--r--tools/perf/jvmti/libjvmti.c92
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/metrics.json149
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json4
-rw-r--r--tools/perf/pmu-events/jsmn.h2
-rw-r--r--tools/perf/tests/Build2
-rw-r--r--tools/perf/tests/attr/system-wide-dummy50
-rw-r--r--tools/perf/tests/attr/test-record-C012
-rw-r--r--tools/perf/tests/builtin-test.c65
-rw-r--r--tools/perf/tests/demangle-java-test.c42
-rw-r--r--tools/perf/tests/dwarf-unwind.c11
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c5
-rw-r--r--tools/perf/tests/evsel-tp-sched.c8
-rw-r--r--tools/perf/tests/expr.c46
-rw-r--r--tools/perf/tests/hists_cumulate.c2
-rw-r--r--tools/perf/tests/hists_filter.c2
-rw-r--r--tools/perf/tests/hists_output.c10
-rw-r--r--tools/perf/tests/make10
-rw-r--r--tools/perf/tests/mmap-basic.c4
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c8
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c4
-rw-r--r--tools/perf/tests/openat-syscall.c2
-rw-r--r--tools/perf/tests/pfm.c203
-rw-r--r--tools/perf/tests/pmu-events.c173
-rw-r--r--tools/perf/tests/pmu.c4
-rw-r--r--tools/perf/tests/sw-clock.c2
-rw-r--r--tools/perf/tests/tests.h8
-rwxr-xr-xtools/perf/trace/beauty/arch_errno_names.sh4
-rw-r--r--tools/perf/util/Build8
-rw-r--r--tools/perf/util/annotate.c1
-rw-r--r--tools/perf/util/annotate.h4
-rw-r--r--tools/perf/util/arm-spe-decoder/Build1
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.c219
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.h82
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c (renamed from tools/perf/util/arm-spe-pkt-decoder.c)0
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.h59
-rw-r--r--tools/perf/util/arm-spe-pkt-decoder.h43
-rw-r--r--tools/perf/util/arm-spe.c823
-rw-r--r--tools/perf/util/auxtrace.c22
-rw-r--r--tools/perf/util/auxtrace.h15
-rw-r--r--tools/perf/util/bpf-loader.c2
-rw-r--r--tools/perf/util/branch.h2
-rw-r--r--tools/perf/util/callchain.c14
-rw-r--r--tools/perf/util/callchain.h1
-rw-r--r--tools/perf/util/cloexec.c4
-rw-r--r--tools/perf/util/config.c14
-rw-r--r--tools/perf/util/counts.c10
-rw-r--r--tools/perf/util/counts.h7
-rw-r--r--tools/perf/util/cputopo.h2
-rw-r--r--tools/perf/util/demangle-java.c13
-rw-r--r--tools/perf/util/dso.c16
-rw-r--r--tools/perf/util/dso.h5
-rw-r--r--tools/perf/util/event.h2
-rw-r--r--tools/perf/util/evlist.c39
-rw-r--r--tools/perf/util/evsel.c156
-rw-r--r--tools/perf/util/evsel.h34
-rw-r--r--tools/perf/util/evsel_config.h43
-rw-r--r--tools/perf/util/evsel_fprintf.c3
-rw-r--r--tools/perf/util/evsel_fprintf.h3
-rw-r--r--tools/perf/util/expr.c130
-rw-r--r--tools/perf/util/expr.h29
-rw-r--r--tools/perf/util/expr.l16
-rw-r--r--tools/perf/util/expr.y41
-rw-r--r--tools/perf/util/genelf_debug.c4
-rw-r--r--tools/perf/util/hashmap.c238
-rw-r--r--tools/perf/util/hashmap.h176
-rw-r--r--tools/perf/util/header.c34
-rw-r--r--tools/perf/util/hist.c13
-rw-r--r--tools/perf/util/hist.h6
-rw-r--r--tools/perf/util/intel-pt.c31
-rw-r--r--tools/perf/util/jitdump.c2
-rw-r--r--tools/perf/util/jitdump.h6
-rw-r--r--tools/perf/util/machine.c4
-rw-r--r--tools/perf/util/mem-events.c15
-rw-r--r--tools/perf/util/mem-events.h2
-rw-r--r--tools/perf/util/metricgroup.c316
-rw-r--r--tools/perf/util/metricgroup.h6
-rw-r--r--tools/perf/util/ordered-events.h2
-rw-r--r--tools/perf/util/parse-events.c111
-rw-r--r--tools/perf/util/parse-events.h5
-rw-r--r--tools/perf/util/parse-events.l12
-rw-r--r--tools/perf/util/pfm.c281
-rw-r--r--tools/perf/util/pfm.h37
-rw-r--r--tools/perf/util/pmu.c33
-rw-r--r--tools/perf/util/pmu.h4
-rw-r--r--tools/perf/util/probe-event.c49
-rw-r--r--tools/perf/util/probe-finder.c1
-rw-r--r--tools/perf/util/pstack.c2
-rw-r--r--tools/perf/util/record.h6
-rw-r--r--tools/perf/util/session.c12
-rw-r--r--tools/perf/util/sideband_evlist.c2
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/stat-shadow.c53
-rw-r--r--tools/perf/util/stat.c90
-rw-r--r--tools/perf/util/stat.h7
-rw-r--r--tools/perf/util/symbol-elf.c7
-rw-r--r--tools/perf/util/symbol.c4
-rw-r--r--tools/perf/util/symbol.h2
-rw-r--r--tools/perf/util/syscalltbl.c4
-rw-r--r--tools/perf/util/syscalltbl.h14
-rw-r--r--tools/perf/util/trace-event-info.c2
-rw-r--r--tools/perf/util/unwind-libunwind-local.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c11
-rwxr-xr-xtools/testing/kunit/kunit.py307
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py63
-rw-r--r--tools/testing/selftests/Makefile9
-rw-r--r--tools/testing/selftests/exec/.gitignore1
-rw-r--r--tools/testing/selftests/exec/Makefile3
-rwxr-xr-xtools/testing/selftests/exec/binfmt_script171
-rw-r--r--tools/testing/selftests/exec/execveat.c8
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/functions8
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc4
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc5
-rwxr-xr-xtools/testing/selftests/gen_kselftest_tar.sh5
-rw-r--r--tools/testing/selftests/lib/config1
-rwxr-xr-xtools/testing/selftests/ntb/ntb_test.sh2
-rw-r--r--tools/testing/selftests/powerpc/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules1
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/Makefile8
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/README45
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gunz_test.c1028
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c433
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c316
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h56
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/crb.h155
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/nx.h38
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h95
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/nxu.h650
l---------tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h1
-rwxr-xr-xtools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh46
-rw-r--r--tools/testing/selftests/powerpc/pmu/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile8
-rw-r--r--tools/testing/selftests/powerpc/pmu/count_stcx_fail.c161
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/trace.h4
-rw-r--r--tools/testing/selftests/powerpc/pmu/loop.S35
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c174
-rw-r--r--tools/testing/selftests/proc/.gitignore2
-rw-r--r--tools/testing/selftests/proc/Makefile2
-rw-r--r--tools/testing/selftests/proc/proc-fsconfig-hidepid.c50
-rw-r--r--tools/testing/selftests/proc/proc-multiple-procfs.c48
-rw-r--r--tools/testing/selftests/sysctl/config2
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh57
-rw-r--r--tools/testing/selftests/timens/clock_nanosleep.c2
-rw-r--r--tools/testing/selftests/timens/timens.c2
-rw-r--r--tools/testing/selftests/timens/timens.h13
-rw-r--r--tools/testing/selftests/timens/timer.c5
-rw-r--r--tools/testing/selftests/timens/timerfd.c5
-rwxr-xr-xtools/testing/selftests/tpm2/test_smoke.sh5
-rwxr-xr-xtools/testing/selftests/tpm2/test_space.sh5
-rw-r--r--tools/testing/selftests/vDSO/.gitignore2
-rw-r--r--tools/testing/selftests/vDSO/Makefile5
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.c24
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.h31
-rw-r--r--tools/testing/selftests/vDSO/vdso_standalone_test_x86.c4
-rw-r--r--tools/testing/selftests/vDSO/vdso_test.c68
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_getcpu.c54
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_gettimeofday.c66
-rw-r--r--tools/testing/selftests/vm/.gitignore1
-rw-r--r--tools/testing/selftests/vm/Makefile73
-rw-r--r--tools/testing/selftests/vm/khugepaged.c2
-rw-r--r--tools/testing/selftests/vm/mremap_dontunmap.c1
-rw-r--r--tools/testing/selftests/vm/pkey-helpers.h225
-rw-r--r--tools/testing/selftests/vm/pkey-powerpc.h133
-rw-r--r--tools/testing/selftests/vm/pkey-x86.h181
-rw-r--r--tools/testing/selftests/vm/protection_keys.c1580
-rw-r--r--tools/testing/selftests/x86/.gitignore1
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/pkey-helpers.h219
-rw-r--r--tools/testing/selftests/x86/protection_keys.c1506
-rw-r--r--usr/include/Makefile6
-rw-r--r--virt/kvm/async_pf.c4
-rw-r--r--virt/kvm/kvm_main.c17
6606 files changed, 336115 insertions, 118770 deletions
diff --git a/.gitignore b/.gitignore
index 2258e906f01c..87b9dd8a163b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -56,6 +56,7 @@ modules.order
/linux
/vmlinux
/vmlinux.32
+/vmlinux.symvers
/vmlinux-gdb.py
/vmlinuz
/System.map
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
index f4be46cc6cb6..b5bebf642db6 100644
--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd
+++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd
@@ -1,3 +1,9 @@
+What: sys/bus/dsa/devices/dsa<m>/version
+Date: Apr 15, 2020
+KernelVersion: 5.8.0
+Contact: dmaengine@vger.kernel.org
+Description: The hardware version number.
+
What: sys/bus/dsa/devices/dsa<m>/cdev_major
Date: Oct 25, 2019
KernelVersion: 5.6.0
diff --git a/Documentation/ABI/stable/sysfs-driver-firmware-zynqmp b/Documentation/ABI/stable/sysfs-driver-firmware-zynqmp
new file mode 100644
index 000000000000..00fa04c76ff3
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-driver-firmware-zynqmp
@@ -0,0 +1,103 @@
+What: /sys/devices/platform/firmware\:zynqmp-firmware/ggs*
+Date: March 2020
+KernelVersion: 5.6
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ Read/Write PMU global general storage register value,
+ GLOBAL_GEN_STORAGE{0:3}.
+ Global general storage register that can be used
+ by system to pass information between masters.
+
+ The register is reset during system or power-on
+ resets. Three registers are used by the FSBL and
+ other Xilinx software products: GLOBAL_GEN_STORAGE{4:6}.
+
+ Usage:
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/ggs0
+ # echo <value> > /sys/devices/platform/firmware\:zynqmp-firmware/ggs0
+
+ Example:
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/ggs0
+ # echo 0x1234ABCD > /sys/devices/platform/firmware\:zynqmp-firmware/ggs0
+
+Users: Xilinx
+
+What: /sys/devices/platform/firmware\:zynqmp-firmware/pggs*
+Date: March 2020
+KernelVersion: 5.6
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ Read/Write PMU persistent global general storage register
+ value, PERS_GLOB_GEN_STORAGE{0:3}.
+ Persistent global general storage register that
+ can be used by system to pass information between
+ masters.
+
+ This register is only reset by the power-on reset
+ and maintains its value through a system reset.
+ Four registers are used by the FSBL and other Xilinx
+ software products: PERS_GLOB_GEN_STORAGE{4:7}.
+ Register is reset only by a POR reset.
+
+ Usage:
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/pggs0
+ # echo <value> > /sys/devices/platform/firmware\:zynqmp-firmware/pggs0
+
+ Example:
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/pggs0
+ # echo 0x1234ABCD > /sys/devices/platform/firmware\:zynqmp-firmware/pggs0
+
+Users: Xilinx
+
+What: /sys/devices/platform/firmware\:zynqmp-firmware/shutdown_scope
+Date: March 2020
+KernelVersion: 5.6
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ This sysfs interface allows to set the shutdown scope for the
+ next shutdown request. When the next shutdown is performed, the
+ platform specific portion of PSCI-system_off can use the chosen
+ shutdown scope.
+
+ Following are available shutdown scopes(subtypes):
+
+ subsystem: Only the APU along with all of its peripherals
+ not used by other processing units will be
+ shut down. This may result in the FPD power
+ domain being shut down provided that no other
+ processing unit uses FPD peripherals or DRAM.
+ ps_only: The complete PS will be shut down, including the
+ RPU, PMU, etc. Only the PL domain (FPGA)
+ remains untouched.
+ system: The complete system/device is shut down.
+
+ Usage:
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/shutdown_scope
+ # echo <scope> > /sys/devices/platform/firmware\:zynqmp-firmware/shutdown_scope
+
+ Example:
+ # cat /sys/devices/platform/firmware\:zynqmp-firmware/shutdown_scope
+ # echo "subsystem" > /sys/devices/platform/firmware\:zynqmp-firmware/shutdown_scope
+
+Users: Xilinx
+
+What: /sys/devices/platform/firmware\:zynqmp-firmware/health_status
+Date: March 2020
+KernelVersion: 5.6
+Contact: "Jolly Shah" <jollys@xilinx.com>
+Description:
+ This sysfs interface allows to set the health status. If PMUFW
+ is compiled with CHECK_HEALTHY_BOOT, it will check the healthy
+ bit on FPD WDT expiration. If healthy bit is set by a user
+ application running in Linux, PMUFW will do APU only restart. If
+ healthy bit is not set during FPD WDT expiration, PMUFW will do
+ system restart.
+
+ Usage:
+ Set healthy bit
+ # echo 1 > /sys/devices/platform/firmware\:zynqmp-firmware/health_status
+
+ Unset healthy bit
+ # echo 0 > /sys/devices/platform/firmware\:zynqmp-firmware/health_status
+
+Users: Xilinx
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs
index a73601c5121e..f6d9c2a8d528 100644
--- a/Documentation/ABI/testing/debugfs-driver-habanalabs
+++ b/Documentation/ABI/testing/debugfs-driver-habanalabs
@@ -8,6 +8,16 @@ Description: Sets the device address to be used for read or write through
only when the IOMMU is disabled.
The acceptable value is a string that starts with "0x"
+What: /sys/kernel/debug/habanalabs/hl<n>/clk_gate
+Date: May 2020
+KernelVersion: 5.8
+Contact: oded.gabbay@gmail.com
+Description: Allow the root user to disable/enable in runtime the clock
+ gating mechanism in Gaudi. Due to how Gaudi is built, the
+ clock gating needs to be disabled in order to access the
+ registers of the TPC and MME engines. This is sometimes needed
+ during debug by the user and hence the user needs this option
+
What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers
Date: Jan 2019
KernelVersion: 5.1
@@ -150,3 +160,10 @@ KernelVersion: 5.1
Contact: oded.gabbay@gmail.com
Description: Displays a list with information about all the active virtual
address mappings per ASID
+
+What: /sys/kernel/debug/habanalabs/hl<n>/stop_on_err
+Date: Mar 2020
+KernelVersion: 5.6
+Contact: oded.gabbay@gmail.com
+Description: Sets the stop-on_error option for the device engines. Value of
+ "0" is for disable, otherwise enable.
diff --git a/Documentation/ABI/testing/sysfs-block-rnbd b/Documentation/ABI/testing/sysfs-block-rnbd
new file mode 100644
index 000000000000..8f070b47f361
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-block-rnbd
@@ -0,0 +1,46 @@
+What: /sys/block/rnbd<N>/rnbd/unmap_device
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: To unmap a volume, "normal" or "force" has to be written to:
+ /sys/block/rnbd<N>/rnbd/unmap_device
+
+ When "normal" is used, the operation will fail with EBUSY if any process
+ is using the device. When "force" is used, the device is also unmapped
+ when device is in use. All I/Os that are in progress will fail.
+
+ Example:
+
+ # echo "normal" > /sys/block/rnbd0/rnbd/unmap_device
+
+What: /sys/block/rnbd<N>/rnbd/state
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: The file contains the current state of the block device. The state file
+ returns "open" when the device is successfully mapped from the server
+ and accepting I/O requests. When the connection to the server gets
+ disconnected in case of an error (e.g. link failure), the state file
+ returns "closed" and all I/O requests submitted to it will fail with -EIO.
+
+What: /sys/block/rnbd<N>/rnbd/session
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RNBD uses RTRS session to transport the data between client and
+ server. The entry "session" contains the name of the session, that
+ was used to establish the RTRS session. It's the same name that
+ was passed as server parameter to the map_device entry.
+
+What: /sys/block/rnbd<N>/rnbd/mapping_path
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Contains the path that was passed as "device_path" to the map_device
+ operation.
+
+What: /sys/block/rnbd<N>/rnbd/access_mode
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Contains the device access mode: ro, rw or migration.
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-dfl_fme b/Documentation/ABI/testing/sysfs-bus-event_source-devices-dfl_fme
new file mode 100644
index 000000000000..c9278a3b3df1
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-dfl_fme
@@ -0,0 +1,104 @@
+What: /sys/bus/event_source/devices/dfl_fmeX/format
+Date: April 2020
+KernelVersion: 5.8
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. Attribute group to describe the magic bits
+ that go into perf_event_attr.config for a particular pmu.
+ (See ABI/testing/sysfs-bus-event_source-devices-format).
+
+ Each attribute under this group defines a bit range of the
+ perf_event_attr.config. All supported attributes are listed
+ below.
+
+ event = "config:0-11" - event ID
+ evtype = "config:12-15" - event type
+ portid = "config:16-23" - event source
+
+ For example,
+
+ fab_mmio_read = "event=0x06,evtype=0x02,portid=0xff"
+
+ It shows this fab_mmio_read is a fabric type (0x02) event with
+ 0x06 local event id for overall monitoring (portid=0xff).
+
+What: /sys/bus/event_source/devices/dfl_fmeX/cpumask
+Date: April 2020
+KernelVersion: 5.8
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. This file always returns cpu which the PMU is bound
+ for access to all fme pmu performance monitoring events.
+
+What: /sys/bus/event_source/devices/dfl_fmeX/events
+Date: April 2020
+KernelVersion: 5.8
+Contact: Wu Hao <hao.wu@intel.com>
+Description: Read-only. Attribute group to describe performance monitoring
+ events specific to fme. Each attribute in this group describes
+ a single performance monitoring event supported by this fme pmu.
+ The name of the file is the name of the event.
+ (See ABI/testing/sysfs-bus-event_source-devices-events).
+
+ All supported performance monitoring events are listed below.
+
+ Basic events (evtype=0x00)
+
+ clock = "event=0x00,evtype=0x00,portid=0xff"
+
+ Cache events (evtype=0x01)
+
+ cache_read_hit = "event=0x00,evtype=0x01,portid=0xff"
+ cache_read_miss = "event=0x01,evtype=0x01,portid=0xff"
+ cache_write_hit = "event=0x02,evtype=0x01,portid=0xff"
+ cache_write_miss = "event=0x03,evtype=0x01,portid=0xff"
+ cache_hold_request = "event=0x05,evtype=0x01,portid=0xff"
+ cache_data_write_port_contention =
+ "event=0x06,evtype=0x01,portid=0xff"
+ cache_tag_write_port_contention =
+ "event=0x07,evtype=0x01,portid=0xff"
+ cache_tx_req_stall = "event=0x08,evtype=0x01,portid=0xff"
+ cache_rx_req_stall = "event=0x09,evtype=0x01,portid=0xff"
+ cache_eviction = "event=0x0a,evtype=0x01,portid=0xff"
+
+ Fabric events (evtype=0x02)
+
+ fab_pcie0_read = "event=0x00,evtype=0x02,portid=0xff"
+ fab_pcie0_write = "event=0x01,evtype=0x02,portid=0xff"
+ fab_pcie1_read = "event=0x02,evtype=0x02,portid=0xff"
+ fab_pcie1_write = "event=0x03,evtype=0x02,portid=0xff"
+ fab_upi_read = "event=0x04,evtype=0x02,portid=0xff"
+ fab_upi_write = "event=0x05,evtype=0x02,portid=0xff"
+ fab_mmio_read = "event=0x06,evtype=0x02,portid=0xff"
+ fab_mmio_write = "event=0x07,evtype=0x02,portid=0xff"
+ fab_port_pcie0_read = "event=0x00,evtype=0x02,portid=?"
+ fab_port_pcie0_write = "event=0x01,evtype=0x02,portid=?"
+ fab_port_pcie1_read = "event=0x02,evtype=0x02,portid=?"
+ fab_port_pcie1_write = "event=0x03,evtype=0x02,portid=?"
+ fab_port_upi_read = "event=0x04,evtype=0x02,portid=?"
+ fab_port_upi_write = "event=0x05,evtype=0x02,portid=?"
+ fab_port_mmio_read = "event=0x06,evtype=0x02,portid=?"
+ fab_port_mmio_write = "event=0x07,evtype=0x02,portid=?"
+
+ VTD events (evtype=0x03)
+
+ vtd_port_read_transaction = "event=0x00,evtype=0x03,portid=?"
+ vtd_port_write_transaction = "event=0x01,evtype=0x03,portid=?"
+ vtd_port_devtlb_read_hit = "event=0x02,evtype=0x03,portid=?"
+ vtd_port_devtlb_write_hit = "event=0x03,evtype=0x03,portid=?"
+ vtd_port_devtlb_4k_fill = "event=0x04,evtype=0x03,portid=?"
+ vtd_port_devtlb_2m_fill = "event=0x05,evtype=0x03,portid=?"
+ vtd_port_devtlb_1g_fill = "event=0x06,evtype=0x03,portid=?"
+
+ VTD SIP events (evtype=0x04)
+
+ vtd_sip_iotlb_4k_hit = "event=0x00,evtype=0x04,portid=0xff"
+ vtd_sip_iotlb_2m_hit = "event=0x01,evtype=0x04,portid=0xff"
+ vtd_sip_iotlb_1g_hit = "event=0x02,evtype=0x04,portid=0xff"
+ vtd_sip_slpwc_l3_hit = "event=0x03,evtype=0x04,portid=0xff"
+ vtd_sip_slpwc_l4_hit = "event=0x04,evtype=0x04,portid=0xff"
+ vtd_sip_rcc_hit = "event=0x05,evtype=0x04,portid=0xff"
+ vtd_sip_iotlb_4k_miss = "event=0x06,evtype=0x04,portid=0xff"
+ vtd_sip_iotlb_2m_miss = "event=0x07,evtype=0x04,portid=0xff"
+ vtd_sip_iotlb_1g_miss = "event=0x08,evtype=0x04,portid=0xff"
+ vtd_sip_slpwc_l3_miss = "event=0x09,evtype=0x04,portid=0xff"
+ vtd_sip_slpwc_l4_miss = "event=0x0a,evtype=0x04,portid=0xff"
+ vtd_sip_rcc_miss = "event=0x0b,evtype=0x04,portid=0xff"
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7 b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7
index ec27c6c9e737..e8698afcd952 100644
--- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-hv_24x7
@@ -22,6 +22,27 @@ Description:
Exposes the "version" field of the 24x7 catalog. This is also
extractable from the provided binary "catalog" sysfs entry.
+What: /sys/devices/hv_24x7/interface/sockets
+Date: May 2020
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
+Description: read only
+ This sysfs interface exposes the number of sockets present in the
+ system.
+
+What: /sys/devices/hv_24x7/interface/chipspersocket
+Date: May 2020
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
+Description: read only
+ This sysfs interface exposes the number of chips per socket
+ present in the system.
+
+What: /sys/devices/hv_24x7/interface/coresperchip
+Date: May 2020
+Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
+Description: read only
+ This sysfs interface exposes the number of cores per chip
+ present in the system.
+
What: /sys/bus/event_source/devices/hv_24x7/event_descs/<event-name>
Date: February 2014
Contact: Linux on PowerPC Developer List <linuxppc-dev@lists.ozlabs.org>
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity b/Documentation/ABI/testing/sysfs-bus-iio-proximity
new file mode 100644
index 000000000000..2172f3bb9c64
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity
@@ -0,0 +1,10 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity_nearlevel
+Date: March 2020
+KernelVersion: 5.7
+Contact: linux-iio@vger.kernel.org
+Description:
+ Near level for proximity sensors. This is a single integer
+ value that tells user space when an object should be
+ considered close to the device. If the value read from the
+ sensor is above or equal to the value in this file an object
+ should typically be considered near.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-sx9310 b/Documentation/ABI/testing/sysfs-bus-iio-sx9310
new file mode 100644
index 000000000000..3ac7759013e5
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-sx9310
@@ -0,0 +1,10 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_proximity3_comb_raw
+Date: February 2019
+KernelVersion: 5.6
+Contact: Daniel Campello <campello@chromium.org>
+Description:
+ Proximity measurement indicating that some object is
+ near the combined sensor. The combined sensor presents
+ proximity measurements constructed by hardware by
+ combining measurements taken from a given set of
+ physical sensors.
diff --git a/Documentation/ABI/testing/sysfs-bus-most b/Documentation/ABI/testing/sysfs-bus-most
index 6b1d06e3285e..ec0a603d804b 100644
--- a/Documentation/ABI/testing/sysfs-bus-most
+++ b/Documentation/ABI/testing/sysfs-bus-most
@@ -1,14 +1,14 @@
-What: /sys/bus/most/devices/.../description
+What: /sys/bus/most/devices/<dev>/description
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- Provides information about the interface type and the physical
- location of the device. Hardware attached via USB, for instance,
+ Provides information about the physical location of the
+ device. Hardware attached via USB, for instance,
might return <1-1.1:1.0>
Users:
-What: /sys/bus/most/devices/.../interface
+What: /sys/bus/most/devices/<dev>/interface
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -16,7 +16,7 @@ Description:
Indicates the type of peripheral interface the device uses.
Users:
-What: /sys/bus/most/devices/.../dci
+What: /sys/bus/most/devices/<dev>/dci
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -26,7 +26,7 @@ Description:
write the controller's DCI registers.
Users:
-What: /sys/bus/most/devices/.../dci/arb_address
+What: /sys/bus/most/devices/<dev>/dci/arb_address
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -35,7 +35,7 @@ Description:
application wants to read from or write to.
Users:
-What: /sys/bus/most/devices/.../dci/arb_value
+What: /sys/bus/most/devices/<dev>/dci/arb_value
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -44,7 +44,7 @@ Description:
is stored in arb_address.
Users:
-What: /sys/bus/most/devices/.../dci/mep_eui48_hi
+What: /sys/bus/most/devices/<dev>/dci/mep_eui48_hi
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -52,7 +52,7 @@ Description:
This is used to check and configure the MAC address.
Users:
-What: /sys/bus/most/devices/.../dci/mep_eui48_lo
+What: /sys/bus/most/devices/<dev>/dci/mep_eui48_lo
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -60,7 +60,7 @@ Description:
This is used to check and configure the MAC address.
Users:
-What: /sys/bus/most/devices/.../dci/mep_eui48_mi
+What: /sys/bus/most/devices/<dev>/dci/mep_eui48_mi
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -68,7 +68,7 @@ Description:
This is used to check and configure the MAC address.
Users:
-What: /sys/bus/most/devices/.../dci/mep_filter
+What: /sys/bus/most/devices/<dev>/dci/mep_filter
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -76,7 +76,7 @@ Description:
This is used to check and configure the MEP filter address.
Users:
-What: /sys/bus/most/devices/.../dci/mep_hash0
+What: /sys/bus/most/devices/<dev>/dci/mep_hash0
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -84,7 +84,7 @@ Description:
This is used to check and configure the MEP hash table.
Users:
-What: /sys/bus/most/devices/.../dci/mep_hash1
+What: /sys/bus/most/devices/<dev>/dci/mep_hash1
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -92,7 +92,7 @@ Description:
This is used to check and configure the MEP hash table.
Users:
-What: /sys/bus/most/devices/.../dci/mep_hash2
+What: /sys/bus/most/devices/<dev>/dci/mep_hash2
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -100,7 +100,7 @@ Description:
This is used to check and configure the MEP hash table.
Users:
-What: /sys/bus/most/devices/.../dci/mep_hash3
+What: /sys/bus/most/devices/<dev>/dci/mep_hash3
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -108,7 +108,7 @@ Description:
This is used to check and configure the MEP hash table.
Users:
-What: /sys/bus/most/devices/.../dci/ni_state
+What: /sys/bus/most/devices/<dev>/dci/ni_state
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -116,7 +116,7 @@ Description:
Indicates the current network interface state.
Users:
-What: /sys/bus/most/devices/.../dci/node_address
+What: /sys/bus/most/devices/<dev>/dci/node_address
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -124,7 +124,7 @@ Description:
Indicates the current node address.
Users:
-What: /sys/bus/most/devices/.../dci/node_position
+What: /sys/bus/most/devices/<dev>/dci/node_position
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -132,7 +132,7 @@ Description:
Indicates the current node position.
Users:
-What: /sys/bus/most/devices/.../dci/packet_bandwidth
+What: /sys/bus/most/devices/<dev>/dci/packet_bandwidth
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -140,7 +140,7 @@ Description:
Indicates the configured packet bandwidth.
Users:
-What: /sys/bus/most/devices/.../dci/sync_ep
+What: /sys/bus/most/devices/<dev>/dci/sync_ep
Date: June 2016
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -149,7 +149,7 @@ Description:
endpoint.
Users:
-What: /sys/bus/most/devices/.../<channel>/
+What: /sys/bus/most/devices/<dev>/<channel>/
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
@@ -160,91 +160,92 @@ Description:
configure it.
Users:
-What: /sys/bus/most/devices/.../<channel>/available_datatypes
+What: /sys/bus/most/devices/<dev>/<channel>/available_datatypes
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- Indicates the data types the current channel can transport.
+ Indicates the data types the channel can transport.
Users:
-What: /sys/bus/most/devices/.../<channel>/available_directions
+What: /sys/bus/most/devices/<dev>/<channel>/available_directions
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- Indicates the directions the current channel is capable of.
+ Indicates the directions the channel is capable of.
Users:
-What: /sys/bus/most/devices/.../<channel>/number_of_packet_buffers
+What: /sys/bus/most/devices/<dev>/<channel>/number_of_packet_buffers
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- Indicates the number of packet buffers the current channel can
+ Indicates the number of packet buffers the channel can
handle.
Users:
-What: /sys/bus/most/devices/.../<channel>/number_of_stream_buffers
+What: /sys/bus/most/devices/<dev>/<channel>/number_of_stream_buffers
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- Indicates the number of streaming buffers the current channel can
+ Indicates the number of streaming buffers the channel can
handle.
Users:
-What: /sys/bus/most/devices/.../<channel>/size_of_packet_buffer
+What: /sys/bus/most/devices/<dev>/<channel>/size_of_packet_buffer
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- Indicates the size of a packet buffer the current channel can
+ Indicates the size of a packet buffer the channel can
handle.
Users:
-What: /sys/bus/most/devices/.../<channel>/size_of_stream_buffer
+What: /sys/bus/most/devices/<dev>/<channel>/size_of_stream_buffer
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- Indicates the size of a streaming buffer the current channel can
+ Indicates the size of a streaming buffer the channel can
handle.
Users:
-What: /sys/bus/most/devices/.../<channel>/set_number_of_buffers
+What: /sys/bus/most/devices/<dev>/<channel>/set_number_of_buffers
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- This is to configure the number of buffers of the current channel.
+ This is to read back the configured number of buffers of
+ the channel.
Users:
-What: /sys/bus/most/devices/.../<channel>/set_buffer_size
+What: /sys/bus/most/devices/<dev>/<channel>/set_buffer_size
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- This is to configure the size of a buffer of the current channel.
+ This is to read back the configured buffer size of the channel.
Users:
-What: /sys/bus/most/devices/.../<channel>/set_direction
+What: /sys/bus/most/devices/<dev>/<channel>/set_direction
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- This is to configure the direction of the current channel.
+ This is to read back the configured direction of the channel.
The following strings will be accepted:
- 'dir_tx',
- 'dir_rx'
+ 'tx',
+ 'rx'
Users:
-What: /sys/bus/most/devices/.../<channel>/set_datatype
+What: /sys/bus/most/devices/<dev>/<channel>/set_datatype
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- This is to configure the data type of the current channel.
+ This is to read back the configured data type of the channel.
The following strings will be accepted:
'control',
'async',
@@ -252,30 +253,31 @@ Description:
'isoc_avp'
Users:
-What: /sys/bus/most/devices/.../<channel>/set_subbuffer_size
+What: /sys/bus/most/devices/<dev>/<channel>/set_subbuffer_size
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- This is to configure the subbuffer size of the current channel.
+ This is to read back the configured subbuffer size of
+ the channel.
Users:
-What: /sys/bus/most/devices/.../<channel>/set_packets_per_xact
+What: /sys/bus/most/devices/<dev>/<channel>/set_packets_per_xact
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- This is to configure the number of packets per transaction of
- the current channel. This is only needed network interface
- controller is attached via USB.
+ This is to read back the configured number of packets per
+ transaction of the channel. This is only applicable when
+ connected via USB.
Users:
-What: /sys/bus/most/devices/.../<channel>/channel_starving
+What: /sys/bus/most/devices/<dev>/<channel>/channel_starving
Date: March 2017
KernelVersion: 4.15
Contact: Christian Gromm <christian.gromm@microchip.com>
Description:
- Indicates whether current channel ran out of buffers.
+ Indicates whether channel ran out of buffers.
Users:
What: /sys/bus/most/drivers/most_core/components
diff --git a/Documentation/ABI/testing/sysfs-bus-soundwire-master b/Documentation/ABI/testing/sysfs-bus-soundwire-master
new file mode 100644
index 000000000000..46ef038d8722
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-soundwire-master
@@ -0,0 +1,23 @@
+What: /sys/bus/soundwire/devices/sdw-master-N/revision
+ /sys/bus/soundwire/devices/sdw-master-N/clk_stop_modes
+ /sys/bus/soundwire/devices/sdw-master-N/clk_freq
+ /sys/bus/soundwire/devices/sdw-master-N/clk_gears
+ /sys/bus/soundwire/devices/sdw-master-N/default_col
+ /sys/bus/soundwire/devices/sdw-master-N/default_frame_rate
+ /sys/bus/soundwire/devices/sdw-master-N/default_row
+ /sys/bus/soundwire/devices/sdw-master-N/dynamic_shape
+ /sys/bus/soundwire/devices/sdw-master-N/err_threshold
+ /sys/bus/soundwire/devices/sdw-master-N/max_clk_freq
+
+Date: April 2020
+
+Contact: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+ Bard Liao <yung-chuan.liao@linux.intel.com>
+ Vinod Koul <vkoul@kernel.org>
+
+Description: SoundWire Master-N DisCo properties.
+ These properties are defined by MIPI DisCo Specification
+ for SoundWire. They define various properties of the Master
+ and are used by the bus to configure the Master. clk_stop_modes
+ is a bitmask for simplifications and combines the
+ clock-stop-mode0 and clock-stop-mode1 properties.
diff --git a/Documentation/ABI/testing/sysfs-bus-soundwire-slave b/Documentation/ABI/testing/sysfs-bus-soundwire-slave
new file mode 100644
index 000000000000..db4c9511d1aa
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-soundwire-slave
@@ -0,0 +1,91 @@
+What: /sys/bus/soundwire/devices/sdw:.../dev-properties/mipi_revision
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/wake_capable
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/test_mode_capable
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/clk_stop_mode1
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/simple_clk_stop_capable
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/clk_stop_timeout
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/ch_prep_timeout
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/reset_behave
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/high_PHY_capable
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/paging_support
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/bank_delay_support
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/p15_behave
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/master_count
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/source_ports
+ /sys/bus/soundwire/devices/sdw:.../dev-properties/sink_ports
+
+Date: May 2020
+
+Contact: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+ Bard Liao <yung-chuan.liao@linux.intel.com>
+ Vinod Koul <vkoul@kernel.org>
+
+Description: SoundWire Slave DisCo properties.
+ These properties are defined by MIPI DisCo Specification
+ for SoundWire. They define various properties of the
+ SoundWire Slave and are used by the bus to configure
+ the Slave
+
+
+What: /sys/bus/soundwire/devices/sdw:.../dp0/max_word
+ /sys/bus/soundwire/devices/sdw:.../dp0/min_word
+ /sys/bus/soundwire/devices/sdw:.../dp0/words
+ /sys/bus/soundwire/devices/sdw:.../dp0/BRA_flow_controlled
+ /sys/bus/soundwire/devices/sdw:.../dp0/simple_ch_prep_sm
+ /sys/bus/soundwire/devices/sdw:.../dp0/imp_def_interrupts
+
+Date: May 2020
+
+Contact: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+ Bard Liao <yung-chuan.liao@linux.intel.com>
+ Vinod Koul <vkoul@kernel.org>
+
+Description: SoundWire Slave Data Port-0 DisCo properties.
+ These properties are defined by MIPI DisCo Specification
+ for the SoundWire. They define various properties of the
+ Data port 0 are used by the bus to configure the Data Port 0.
+
+
+What: /sys/bus/soundwire/devices/sdw:.../dpN_src/max_word
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/min_word
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/words
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/type
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/max_grouping
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/simple_ch_prep_sm
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/ch_prep_timeout
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/imp_def_interrupts
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/min_ch
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/max_ch
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/channels
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/ch_combinations
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/max_async_buffer
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/block_pack_mode
+ /sys/bus/soundwire/devices/sdw:.../dpN_src/port_encoding
+
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/max_word
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/min_word
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/words
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/type
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/max_grouping
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/simple_ch_prep_sm
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/ch_prep_timeout
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/imp_def_interrupts
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/min_ch
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/max_ch
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/channels
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/ch_combinations
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/max_async_buffer
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/block_pack_mode
+ /sys/bus/soundwire/devices/sdw:.../dpN_sink/port_encoding
+
+Date: May 2020
+
+Contact: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+ Bard Liao <yung-chuan.liao@linux.intel.com>
+ Vinod Koul <vkoul@kernel.org>
+
+Description: SoundWire Slave Data Source/Sink Port-N DisCo properties.
+ These properties are defined by MIPI DisCo Specification
+ for SoundWire. They define various properties of the
+ Source/Sink Data port N and are used by the bus to configure
+ the Data Port N.
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index bf3b48f022dc..216d61a22f1e 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -74,6 +74,21 @@ Description:
Access: Read, Write
Valid values: 0 - 100 (percent)
+What: /sys/class/power_supply/<supply_name>/capacity_error_margin
+Date: April 2019
+Contact: linux-pm@vger.kernel.org
+Description:
+ Battery capacity measurement becomes unreliable without
+ recalibration. This values provides the maximum error
+ margin expected to exist by the fuel gauge in percent.
+ Values close to 0% will be returned after (re-)calibration
+ has happened. Over time the error margin will increase.
+ 100% means, that the capacity related values are basically
+ completely useless.
+
+ Access: Read
+ Valid values: 0 - 100 (percent)
+
What: /sys/class/power_supply/<supply_name>/capacity_level
Date: June 2009
Contact: linux-pm@vger.kernel.org
@@ -190,7 +205,7 @@ Description:
Valid values: "Unknown", "Good", "Overheat", "Dead",
"Over voltage", "Unspecified failure", "Cold",
"Watchdog timer expire", "Safety timer expire",
- "Over current"
+ "Over current", "Calibration required"
What: /sys/class/power_supply/<supply_name>/precharge_current
Date: June 2017
@@ -665,3 +680,31 @@ Description:
Valid values:
- 1: enabled
- 0: disabled
+
+What: /sys/class/power_supply/<supply_name>/manufacture_year
+Date: January 2020
+Contact: linux-pm@vger.kernel.org
+Description:
+ Reports the year (following Gregorian calendar) when the device has been
+ manufactured.
+
+ Access: Read
+ Valid values: Reported as integer
+
+What: /sys/class/power_supply/<supply_name>/manufacture_month
+Date: January 2020
+Contact: linux-pm@vger.kernel.org
+Description:
+ Reports the month when the device has been manufactured.
+
+ Access: Read
+ Valid values: 1-12
+
+What: /sys/class/power_supply/<supply_name>/manufacture_day
+Date: January 2020
+Contact: linux-pm@vger.kernel.org
+Description:
+ Reports the day of month when the device has been manufactured.
+
+ Access: Read
+ Valid values: 1-31
diff --git a/Documentation/ABI/testing/sysfs-class-power-mp2629 b/Documentation/ABI/testing/sysfs-class-power-mp2629
new file mode 100644
index 000000000000..327a07e22805
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-power-mp2629
@@ -0,0 +1,8 @@
+What: /sys/class/power_supply/mp2629_battery/batt_impedance_compen
+Date: April 2020
+KernelVersion: 5.7
+Description:
+ Represents a battery impedance compensation to accelerate charging.
+
+ Access: Read, Write
+ Valid values: Represented in milli-ohms. Valid range is [0, 140].
diff --git a/Documentation/ABI/testing/sysfs-class-rnbd-client b/Documentation/ABI/testing/sysfs-class-rnbd-client
new file mode 100644
index 000000000000..c084f203b41e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-rnbd-client
@@ -0,0 +1,111 @@
+What: /sys/class/rnbd-client
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Provide information about RNBD-client.
+ All sysfs files that are not read-only provide the usage information on read:
+
+ Example:
+ # cat /sys/class/rnbd-client/ctl/map_device
+
+ > Usage: echo "sessname=<name of the rtrs session> path=<[srcaddr,]dstaddr>
+ > [path=<[srcaddr,]dstaddr>] device_path=<full path on remote side>
+ > [access_mode=<ro|rw|migration>] > map_device
+ >
+ > addr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]
+
+What: /sys/class/rnbd-client/ctl/map_device
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Expected format is the following:
+
+ sessname=<name of the rtrs session>
+ path=<[srcaddr,]dstaddr> [path=<[srcaddr,]dstaddr> ...]
+ device_path=<full path on remote side>
+ [access_mode=<ro|rw|migration>]
+
+ Where:
+
+ sessname: accepts a string not bigger than 256 chars, which identifies
+ a given session on the client and on the server.
+ I.e. "clt_hostname-srv_hostname" could be a natural choice.
+
+ path: describes a connection between the client and the server by
+ specifying destination and, when required, the source address.
+ The addresses are to be provided in the following format:
+
+ ip:<IPv6>
+ ip:<IPv4>
+ gid:<GID>
+
+ for example:
+
+ path=ip:10.0.0.66
+ The single addr is treated as the destination.
+ The connection will be established to this server from any client IP address.
+
+ path=ip:10.0.0.66,ip:10.0.1.66
+ First addr is the source address and the second is the destination.
+
+ If multiple "path=" options are specified multiple connection
+ will be established and data will be sent according to
+ the selected multipath policy (see RTRS mp_policy sysfs entry description).
+
+ device_path: Path to the block device on the server side. Path is specified
+ relative to the directory on server side configured in the
+ 'dev_search_path' module parameter of the rnbd_server.
+ The rnbd_server prepends the <device_path> received from client
+ with <dev_search_path> and tries to open the
+ <dev_search_path>/<device_path> block device. On success,
+ a /dev/rnbd<N> device file, a /sys/block/rnbd_client/rnbd<N>/
+ directory and an entry in /sys/class/rnbd-client/ctl/devices
+ will be created.
+
+ If 'dev_search_path' contains '%SESSNAME%', then each session can
+ have different devices namespace, e.g. server was configured with
+ the following parameter "dev_search_path=/run/rnbd-devs/%SESSNAME%",
+ client has this string "sessname=blya device_path=sda", then server
+ will try to open: /run/rnbd-devs/blya/sda.
+
+ access_mode: the access_mode parameter specifies if the device is to be
+ mapped as "ro" read-only or "rw" read-write. The server allows
+ a device to be exported in rw mode only once. The "migration"
+ access mode has to be specified if a second mapping in read-write
+ mode is desired.
+
+ By default "rw" is used.
+
+ Exit Codes:
+
+ If the device is already mapped it will fail with EEXIST. If the input
+ has an invalid format it will return EINVAL. If the device path cannot
+ be found on the server, it will fail with ENOENT.
+
+ Finding device file after mapping
+ ---------------------------------
+
+ After mapping, the device file can be found by:
+ o The symlink /sys/class/rnbd-client/ctl/devices/<device_id>
+ points to /sys/block/<dev-name>. The last part of the symlink destination
+ is the same as the device name. By extracting the last part of the
+ path the path to the device /dev/<dev-name> can be build.
+
+ o /dev/block/$(cat /sys/class/rnbd-client/ctl/devices/<device_id>/dev)
+
+ How to find the <device_id> of the device is described on the next
+ section.
+
+What: /sys/class/rnbd-client/ctl/devices/
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: For each device mapped on the client a new symbolic link is created as
+ /sys/class/rnbd-client/ctl/devices/<device_id>, which points
+ to the block device created by rnbd (/sys/block/rnbd<N>/).
+ The <device_id> of each device is created as follows:
+
+ - If the 'device_path' provided during mapping contains slashes ("/"),
+ they are replaced by exclamation mark ("!") and used as as the
+ <device_id>. Otherwise, the <device_id> will be the same as the
+ "device_path" provided.
diff --git a/Documentation/ABI/testing/sysfs-class-rnbd-server b/Documentation/ABI/testing/sysfs-class-rnbd-server
new file mode 100644
index 000000000000..ba60a90c0e45
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-rnbd-server
@@ -0,0 +1,50 @@
+What: /sys/class/rnbd-server
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: provide information about RNBD-server.
+
+What: /sys/class/rnbd-server/ctl/
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: When a client maps a device, a directory entry with the name of the
+ block device is created under /sys/class/rnbd-server/ctl/devices/.
+
+What: /sys/class/rnbd-server/ctl/devices/<device_name>/block_dev
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Is a symlink to the sysfs entry of the exported device.
+
+ Example:
+ block_dev -> ../../../../class/block/ram0
+
+What: /sys/class/rnbd-server/ctl/devices/<device_name>/sessions/
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: For each client a particular device is exported to, following directory will be
+ created:
+
+ /sys/class/rnbd-server/ctl/devices/<device_name>/sessions/<session-name>/
+
+ When the device is unmapped by that client, the directory will be removed.
+
+What: /sys/class/rnbd-server/ctl/devices/<device_name>/sessions/<session-name>/read_only
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Contains '1' if device is mapped read-only, otherwise '0'.
+
+What: /sys/class/rnbd-server/ctl/devices/<device_name>/sessions/<session-name>/mapping_path
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Contains the relative device path provided by the user during mapping.
+
+What: /sys/class/rnbd-server/ctl/devices/<device_name>/sessions/<session-name>/access_mode
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Contains the device access mode: ro, rw or migration.
diff --git a/Documentation/ABI/testing/sysfs-class-rtrs-client b/Documentation/ABI/testing/sysfs-class-rtrs-client
new file mode 100644
index 000000000000..e7e718db8941
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-rtrs-client
@@ -0,0 +1,131 @@
+What: /sys/class/rtrs-client
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: When a user of RTRS API creates a new session, a directory entry with
+ the name of that session is created under /sys/class/rtrs-client/<session-name>/
+
+What: /sys/class/rtrs-client/<session-name>/add_path
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RW, adds a new path (connection) to an existing session. Expected format is the
+ following:
+
+ <[source addr,]destination addr>
+ *addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]
+
+What: /sys/class/rtrs-client/<session-name>/max_reconnect_attempts
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Maximum number reconnect attempts the client should make before giving up
+ after connection breaks unexpectedly.
+
+What: /sys/class/rtrs-client/<session-name>/mp_policy
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Multipath policy specifies which path should be selected on each IO:
+
+ round-robin (0):
+ select path in per CPU round-robin manner.
+
+ min-inflight (1):
+ select path with minimum inflights.
+
+What: /sys/class/rtrs-client/<session-name>/paths/
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Each path belonging to a given session is listed here by its source and
+ destination address. When a new path is added to a session by writing to
+ the "add_path" entry, a directory <src@dst> is created.
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/state
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RO, Contains "connected" if the session is connected to the peer and fully
+ functional. Otherwise the file contains "disconnected"
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/reconnect
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Write "1" to the file in order to reconnect the path.
+ Operation is blocking and returns 0 if reconnect was successful.
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/disconnect
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Write "1" to the file in order to disconnect the path.
+ Operation blocks until RTRS path is disconnected.
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/remove_path
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Write "1" to the file in order to disconnected and remove the path
+ from the session. Operation blocks until the path is disconnected
+ and removed from the session.
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/hca_name
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RO, Contains the the name of HCA the connection established on.
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/hca_port
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RO, Contains the port number of active port traffic is going through.
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/src_addr
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RO, Contains the source address of the path
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/dst_addr
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RO, Contains the destination address of the path
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/stats/reset_all
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RW, Read will return usage help, write 0 will clear all the statistics.
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/stats/cpu_migration
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RTRS expects that each HCA IRQ is pinned to a separate CPU. If it's
+ not the case, the processing of an I/O response could be processed on a
+ different CPU than where it was originally submitted. This file shows
+ how many interrupts where generated on a non expected CPU.
+ "from:" is the CPU on which the IRQ was expected, but not generated.
+ "to:" is the CPU on which the IRQ was generated, but not expected.
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/stats/reconnects
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Contains 2 unsigned int values, the first one records number of successful
+ reconnects in the path lifetime, the second one records number of failed
+ reconnects in the path lifetime.
+
+What: /sys/class/rtrs-client/<session-name>/paths/<src@dst>/stats/rdma
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Contains statistics regarding rdma operations and inflight operations.
+ The output consists of 6 values:
+
+ <read-count> <read-total-size> <write-count> <write-total-size> \
+ <inflights> <failovered>
diff --git a/Documentation/ABI/testing/sysfs-class-rtrs-server b/Documentation/ABI/testing/sysfs-class-rtrs-server
new file mode 100644
index 000000000000..3b6d5b067df0
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-rtrs-server
@@ -0,0 +1,53 @@
+What: /sys/class/rtrs-server
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: When a user of RTRS API creates a new session on a client side, a
+ directory entry with the name of that session is created in here.
+
+What: /sys/class/rtrs-server/<session-name>/paths/
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: When new path is created by writing to "add_path" entry on client side,
+ a directory entry named as <source address>@<destination address> is created
+ on server.
+
+What: /sys/class/rtrs-server/<session-name>/paths/<src@dst>/disconnect
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: When "1" is written to the file, the RTRS session is being disconnected.
+ Operations is non-blocking and returns control immediately to the caller.
+
+What: /sys/class/rtrs-server/<session-name>/paths/<src@dst>/hca_name
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RO, Contains the the name of HCA the connection established on.
+
+What: /sys/class/rtrs-server/<session-name>/paths/<src@dst>/hca_port
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RO, Contains the port number of active port traffic is going through.
+
+What: /sys/class/rtrs-server/<session-name>/paths/<src@dst>/src_addr
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RO, Contains the source address of the path
+
+What: /sys/class/rtrs-server/<session-name>/paths/<src@dst>/dst_addr
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: RO, Contains the destination address of the path
+
+What: /sys/class/rtrs-server/<session-name>/paths/<src@dst>/stats/rdma
+Date: Feb 2020
+KernelVersion: 5.7
+Contact: Jack Wang <jinpu.wang@cloud.ionos.com> Danil Kipnis <danil.kipnis@cloud.ionos.com>
+Description: Contains statistics regarding rdma operations and inflight operations.
+ The output consists of 5 values:
+ <read-count> <read-total-size> <write-count> <write-total-size> <inflights>
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 6b5dafab950c..b555df825447 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -486,6 +486,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
/sys/devices/system/cpu/vulnerabilities/l1tf
/sys/devices/system/cpu/vulnerabilities/mds
+ /sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
Date: January 2018
@@ -574,3 +575,42 @@ Description: Secure Virtual Machine
If 1, it means the system is using the Protected Execution
Facility in POWER9 and newer processors. i.e., it is a Secure
Virtual Machine.
+
+What: /sys/devices/system/cpu/cpuX/purr
+Date: Apr 2005
+Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Description: PURR ticks for this CPU since the system boot.
+
+ The Processor Utilization Resources Register (PURR) is
+ a 64-bit counter which provides an estimate of the
+ resources used by the CPU thread. The contents of this
+ register increases monotonically. This sysfs interface
+ exposes the number of PURR ticks for cpuX.
+
+What: /sys/devices/system/cpu/cpuX/spurr
+Date: Dec 2006
+Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Description: SPURR ticks for this CPU since the system boot.
+
+ The Scaled Processor Utilization Resources Register
+ (SPURR) is a 64-bit counter that provides a frequency
+ invariant estimate of the resources used by the CPU
+ thread. The contents of this register increases
+ monotonically. This sysfs interface exposes the number
+ of SPURR ticks for cpuX.
+
+What: /sys/devices/system/cpu/cpuX/idle_purr
+Date: Apr 2020
+Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Description: PURR ticks for cpuX when it was idle.
+
+ This sysfs interface exposes the number of PURR ticks
+ for cpuX when it was idle.
+
+What: /sys/devices/system/cpu/cpuX/idle_spurr
+Date: Apr 2020
+Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Description: SPURR ticks for cpuX when it was idle.
+
+ This sysfs interface exposes the number of SPURR ticks
+ for cpuX when it was idle.
diff --git a/Documentation/ABI/testing/sysfs-driver-habanalabs b/Documentation/ABI/testing/sysfs-driver-habanalabs
index 782df74042ed..1a14bf9b22ba 100644
--- a/Documentation/ABI/testing/sysfs-driver-habanalabs
+++ b/Documentation/ABI/testing/sysfs-driver-habanalabs
@@ -10,6 +10,23 @@ KernelVersion: 5.1
Contact: oded.gabbay@gmail.com
Description: Version of the application running on the device's CPU
+What: /sys/class/habanalabs/hl<n>/clk_max_freq_mhz
+Date: Jun 2019
+KernelVersion: not yet upstreamed
+Contact: oded.gabbay@gmail.com
+Description: Allows the user to set the maximum clock frequency, in MHz.
+ The device clock might be set to lower value than the maximum.
+ The user should read the clk_cur_freq_mhz to see the actual
+ frequency value of the device clock. This property is valid
+ only for the Gaudi ASIC family
+
+What: /sys/class/habanalabs/hl<n>/clk_cur_freq_mhz
+Date: Jun 2019
+KernelVersion: not yet upstreamed
+Contact: oded.gabbay@gmail.com
+Description: Displays the current frequency, in MHz, of the device clock.
+ This property is valid only for the Gaudi ASIC family
+
What: /sys/class/habanalabs/hl<n>/cpld_ver
Date: Jan 2019
KernelVersion: 5.1
diff --git a/Documentation/ABI/testing/sysfs-driver-w1_therm b/Documentation/ABI/testing/sysfs-driver-w1_therm
new file mode 100644
index 000000000000..076659d506f2
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-w1_therm
@@ -0,0 +1,116 @@
+What: /sys/bus/w1/devices/.../alarms
+Date: May 2020
+Contact: Akira Shimahara <akira215corp@gmail.com>
+Description:
+ (RW) read or write TH and TL (Temperature High an Low) alarms.
+ Values shall be space separated and in the device range
+ (typical -55 degC to 125 degC), if not values will be trimmed
+ to device min/max capabilities. Values are integer as they are
+ stored in a 8bit register in the device. Lowest value is
+ automatically put to TL. Once set, alarms could be search at
+ master level, refer to Documentation/w1/w1_generic.rst for
+ detailed information
+Users: any user space application which wants to communicate with
+ w1_term device
+
+
+What: /sys/bus/w1/devices/.../eeprom
+Date: May 2020
+Contact: Akira Shimahara <akira215corp@gmail.com>
+Description:
+ (WO) writing that file will either trigger a save of the
+ device data to its embedded EEPROM, either restore data
+ embedded in device EEPROM. Be aware that devices support
+ limited EEPROM writing cycles (typical 50k)
+ * 'save': save device RAM to EEPROM
+ * 'restore': restore EEPROM data in device RAM
+Users: any user space application which wants to communicate with
+ w1_term device
+
+
+What: /sys/bus/w1/devices/.../ext_power
+Date: May 2020
+Contact: Akira Shimahara <akira215corp@gmail.com>
+Description:
+ (RO) return the power status by asking the device
+ * '0': device parasite powered
+ * '1': device externally powered
+ * '-xx': xx is kernel error when reading power status
+Users: any user space application which wants to communicate with
+ w1_term device
+
+
+What: /sys/bus/w1/devices/.../resolution
+Date: May 2020
+Contact: Akira Shimahara <akira215corp@gmail.com>
+Description:
+ (RW) get or set the device resolution (on supported devices,
+ if not, this entry is not present). Note that the resolution
+ will be changed only in device RAM, so it will be cleared when
+ power is lost. Trigger a 'save' to EEPROM command to keep
+ values after power-on. Read or write are :
+ * '9..12': device resolution in bit
+ or resolution to set in bit
+ * '-xx': xx is kernel error when reading the resolution
+ * Anything else: do nothing
+Users: any user space application which wants to communicate with
+ w1_term device
+
+
+What: /sys/bus/w1/devices/.../temperature
+Date: May 2020
+Contact: Akira Shimahara <akira215corp@gmail.com>
+Description:
+ (RO) return the temperature in 1/1000 degC.
+ * If a bulk read has been triggered, it will directly
+ return the temperature computed when the bulk read
+ occurred, if available. If not yet available, nothing
+ is returned (a debug kernel message is sent), you
+ should retry later on.
+ * If no bulk read has been triggered, it will trigger
+ a conversion and send the result. Note that the
+ conversion duration depend on the resolution (if
+ device support this feature). It takes 94ms in 9bits
+ resolution, 750ms for 12bits.
+Users: any user space application which wants to communicate with
+ w1_term device
+
+
+What: /sys/bus/w1/devices/.../w1_slave
+Date: May 2020
+Contact: Akira Shimahara <akira215corp@gmail.com>
+Description:
+ (RW) return the temperature in 1/1000 degC.
+ *read*: return 2 lines with the hexa output data sent on the
+ bus, return the CRC check and temperature in 1/1000 degC
+ *write* :
+ * '0' : save the 2 or 3 bytes to the device EEPROM
+ (i.e. TH, TL and config register)
+ * '9..12' : set the device resolution in RAM
+ (if supported)
+ * Anything else: do nothing
+ refer to Documentation/w1/slaves/w1_therm.rst for detailed
+ information.
+Users: any user space application which wants to communicate with
+ w1_term device
+
+
+What: /sys/bus/w1/devices/w1_bus_masterXX/therm_bulk_read
+Date: May 2020
+Contact: Akira Shimahara <akira215corp@gmail.com>
+Description:
+ (RW) trigger a bulk read conversion. read the status
+ *read*:
+ * '-1': conversion in progress on at least 1 sensor
+ * '1' : conversion complete but at least one sensor
+ value has not been read yet
+ * '0' : no bulk operation. Reading temperature will
+ trigger a conversion on each device
+ *write*: 'trigger': trigger a bulk read on all supporting
+ devices on the bus
+ Note that if a bulk read is sent but one sensor is not read
+ immediately, the next access to temperature on this device
+ will return the temperature measured at the time of issue
+ of the bulk read command (not the current temperature).
+Users: any user space application which wants to communicate with
+ w1_term device
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index bd8a0d19abe6..4bb93a06d8ab 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -323,3 +323,27 @@ What: /sys/fs/f2fs/<disk>/mounted_time_sec
Date: February 2020
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description: Show the mounted time in secs of this partition.
+
+What: /sys/fs/f2fs/<disk>/data_io_flag
+Date: April 2020
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description: Give a way to attach REQ_META|FUA to data writes
+ given temperature-based bits. Now the bits indicate:
+ * REQ_META | REQ_FUA |
+ * 5 | 4 | 3 | 2 | 1 | 0 |
+ * Cold | Warm | Hot | Cold | Warm | Hot |
+
+What: /sys/fs/f2fs/<disk>/node_io_flag
+Date: June 2020
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description: Give a way to attach REQ_META|FUA to node writes
+ given temperature-based bits. Now the bits indicate:
+ * REQ_META | REQ_FUA |
+ * 5 | 4 | 3 | 2 | 1 | 0 |
+ * Cold | Warm | Hot | Cold | Warm | Hot |
+
+What: /sys/fs/f2fs/<disk>/iostat_period_ms
+Date: April 2020
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: Give a way to change iostat_period time. 3secs by default.
+ The new iostat trace gives stats gap given the period.
diff --git a/Documentation/COPYING-logo b/Documentation/COPYING-logo
index 296f0f7f67eb..b21c7cf7d9f6 100644
--- a/Documentation/COPYING-logo
+++ b/Documentation/COPYING-logo
@@ -9,5 +9,5 @@ scale down to smaller sizes and are better for letterheads or whatever
you want to use it for: for the full range of logos take a look at
Larry's web-page:
- http://www.isc.tamu.edu/~lewing/linux/
+ https://www.isc.tamu.edu/~lewing/linux/
diff --git a/Documentation/PCI/endpoint/pci-endpoint.rst b/Documentation/PCI/endpoint/pci-endpoint.rst
index 0e2311b5617b..7536be445db8 100644
--- a/Documentation/PCI/endpoint/pci-endpoint.rst
+++ b/Documentation/PCI/endpoint/pci-endpoint.rst
@@ -78,8 +78,8 @@ by the PCI controller driver.
Cleanup the pci_epc_mem structure allocated during pci_epc_mem_init().
-APIs for the PCI Endpoint Function Driver
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+EPC APIs for the PCI Endpoint Function Driver
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section lists the APIs that the PCI Endpoint core provides to be used
by the PCI endpoint function driver.
@@ -117,8 +117,8 @@ by the PCI endpoint function driver.
The PCI endpoint function driver should use pci_epc_mem_free_addr() to
free the memory space allocated using pci_epc_mem_alloc_addr().
-Other APIs
-~~~~~~~~~~
+Other EPC APIs
+~~~~~~~~~~~~~~
There are other APIs provided by the EPC library. These are used for binding
the EPF device with EPC device. pci-ep-cfs.c can be used as reference for
@@ -160,8 +160,8 @@ PCI Endpoint Function(EPF) Library
The EPF library provides APIs to be used by the function driver and the EPC
library to provide endpoint mode functionality.
-APIs for the PCI Endpoint Function Driver
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+EPF APIs for the PCI Endpoint Function Driver
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section lists the APIs that the PCI Endpoint core provides to be used
by the PCI endpoint function driver.
@@ -204,8 +204,8 @@ by the PCI endpoint controller library.
The PCI endpoint controller library invokes pci_epf_linkup() when the
EPC device has established the connection to the host.
-Other APIs
-~~~~~~~~~~
+Other EPF APIs
+~~~~~~~~~~~~~~
There are other APIs provided by the EPF library. These are used to notify
the function driver when the EPF device is bound to the EPC device.
diff --git a/Documentation/admin-guide/LSM/tomoyo.rst b/Documentation/admin-guide/LSM/tomoyo.rst
index e2d6b6e15082..4bc9c2b4da6f 100644
--- a/Documentation/admin-guide/LSM/tomoyo.rst
+++ b/Documentation/admin-guide/LSM/tomoyo.rst
@@ -27,29 +27,29 @@ Where is documentation?
=======================
User <-> Kernel interface documentation is available at
-http://tomoyo.osdn.jp/2.5/policy-specification/index.html .
+https://tomoyo.osdn.jp/2.5/policy-specification/index.html .
Materials we prepared for seminars and symposiums are available at
-http://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 .
+https://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 .
Below lists are chosen from three aspects.
What is TOMOYO?
TOMOYO Linux Overview
- http://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf
+ https://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf
TOMOYO Linux: pragmatic and manageable security for Linux
- http://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf
+ https://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf
TOMOYO Linux: A Practical Method to Understand and Protect Your Own Linux Box
- http://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf
+ https://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf
What can TOMOYO do?
Deep inside TOMOYO Linux
- http://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf
+ https://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf
The role of "pathname based access control" in security.
- http://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf
+ https://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf
History of TOMOYO?
Realities of Mainlining
- http://osdn.jp/projects/tomoyo/docs/lfj2008.pdf
+ https://osdn.jp/projects/tomoyo/docs/lfj2008.pdf
What is future plan?
====================
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index cc6151fc0845..5fb526900023 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -209,15 +209,22 @@ Configuring the kernel
store the lsmod of that machine into a file
and pass it in as a LSMOD parameter.
+ Also, you can preserve modules in certain folders
+ or kconfig files by specifying their paths in
+ parameter LMC_KEEP.
+
target$ lsmod > /tmp/mylsmod
target$ scp /tmp/mylsmod host:/tmp
- host$ make LSMOD=/tmp/mylsmod localmodconfig
+ host$ make LSMOD=/tmp/mylsmod \
+ LMC_KEEP="drivers/usb:drivers/gpu:fs" \
+ localmodconfig
The above also works when cross compiling.
"make localyesconfig" Similar to localmodconfig, except it will convert
- all module options to built in (=y) options.
+ all module options to built in (=y) options. You can
+ also preserve modules by LMC_KEEP.
"make kvmconfig" Enable additional options for kvm guest kernel support.
diff --git a/Documentation/admin-guide/acpi/initrd_table_override.rst b/Documentation/admin-guide/acpi/initrd_table_override.rst
index cbd768207631..bb24fa6b5fbe 100644
--- a/Documentation/admin-guide/acpi/initrd_table_override.rst
+++ b/Documentation/admin-guide/acpi/initrd_table_override.rst
@@ -102,7 +102,7 @@ Where to retrieve userspace tools
=================================
iasl and acpixtract are part of Intel's ACPICA project:
-http://acpica.org/
+https://acpica.org/
and should be packaged by distributions (for example in the acpica package
on SUSE).
diff --git a/Documentation/admin-guide/bcache.rst b/Documentation/admin-guide/bcache.rst
index c0ce64d75bbf..1eccf952876d 100644
--- a/Documentation/admin-guide/bcache.rst
+++ b/Documentation/admin-guide/bcache.rst
@@ -7,9 +7,9 @@ nice if you could use them as cache... Hence bcache.
Wiki and git repositories are at:
- - http://bcache.evilpiepirate.org
+ - https://bcache.evilpiepirate.org
- http://evilpiepirate.org/git/linux-bcache.git
- - http://evilpiepirate.org/git/bcache-tools.git
+ - https://evilpiepirate.org/git/bcache-tools.git
It's designed around the performance characteristics of SSDs - it only allocates
in erase block sized buckets, and it uses a hybrid btree/log to track cached
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index b8c0460730f3..ce3e05e41724 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -714,9 +714,7 @@ Conventions
- Settings for a single feature should be contained in a single file.
- The root cgroup should be exempt from resource control and thus
- shouldn't have resource control interface files. Also,
- informational files on the root cgroup which end up showing global
- information available elsewhere shouldn't exist.
+ shouldn't have resource control interface files.
- The default time unit is microseconds. If a different unit is ever
used, an explicit unit suffix must be present.
@@ -985,7 +983,7 @@ CPU Interface Files
All time durations are in microseconds.
cpu.stat
- A read-only flat-keyed file which exists on non-root cgroups.
+ A read-only flat-keyed file.
This file exists whether the controller is enabled or not.
It always reports the following three stats:
@@ -1172,6 +1170,13 @@ PAGE_SIZE multiple when read back.
Under certain circumstances, the usage may go over the limit
temporarily.
+ In default configuration regular 0-order allocations always
+ succeed unless OOM killer chooses current task as a victim.
+
+ Some kinds of allocations don't invoke the OOM killer.
+ Caller could retry them differently, return into userspace
+ as -ENOMEM or silently ignore in cases like disk readahead.
+
This is the ultimate protection mechanism. As long as the
high limit is used and monitored properly, this limit's
utility is limited to providing the final safety net.
@@ -1228,17 +1233,9 @@ PAGE_SIZE multiple when read back.
The number of time the cgroup's memory usage was
reached the limit and allocation was about to fail.
- Depending on context result could be invocation of OOM
- killer and retrying allocation or failing allocation.
-
- Failed allocation in its turn could be returned into
- userspace as -ENOMEM or silently ignored in cases like
- disk readahead. For now OOM in memory cgroup kills
- tasks iff shortage has happened inside page fault.
-
This event is not raised if the OOM killer is not
considered as an option, e.g. for failed high-order
- allocations.
+ allocations or if caller asked to not retry attempts.
oom_kill
The number of processes belonging to this cgroup
diff --git a/Documentation/admin-guide/device-mapper/dm-ebs.rst b/Documentation/admin-guide/device-mapper/dm-ebs.rst
new file mode 100644
index 000000000000..534fa38e8862
--- /dev/null
+++ b/Documentation/admin-guide/device-mapper/dm-ebs.rst
@@ -0,0 +1,51 @@
+======
+dm-ebs
+======
+
+
+This target is similar to the linear target except that it emulates
+a smaller logical block size on a device with a larger logical block
+size. Its main purpose is to provide emulation of 512 byte sectors on
+devices that do not provide this emulation (i.e. 4K native disks).
+
+Supported emulated logical block sizes 512, 1024, 2048 and 4096.
+
+Underlying block size can be set to > 4K to test buffering larger units.
+
+
+Table parameters
+----------------
+ <dev path> <offset> <emulated sectors> [<underlying sectors>]
+
+Mandatory parameters:
+
+ <dev path>:
+ Full pathname to the underlying block-device,
+ or a "major:minor" device-number.
+ <offset>:
+ Starting sector within the device;
+ has to be a multiple of <emulated sectors>.
+ <emulated sectors>:
+ Number of sectors defining the logical block size to be emulated;
+ 1, 2, 4, 8 sectors of 512 bytes supported.
+
+Optional parameter:
+
+ <underyling sectors>:
+ Number of sectors defining the logical block size of <dev path>.
+ 2^N supported, e.g. 8 = emulate 8 sectors of 512 bytes = 4KiB.
+ If not provided, the logical block size of <dev path> will be used.
+
+
+Examples:
+
+Emulate 1 sector = 512 bytes logical block size on /dev/sda starting at
+offset 1024 sectors with underlying devices block size automatically set:
+
+ebs /dev/sda 1024 1
+
+Emulate 2 sector = 1KiB logical block size on /dev/sda starting at
+offset 128 sectors, enforce 2KiB underlying device block size.
+This presumes 2KiB logical blocksize on /dev/sda or less to work:
+
+ebs /dev/sda 128 2 4
diff --git a/Documentation/admin-guide/device-mapper/dm-integrity.rst b/Documentation/admin-guide/device-mapper/dm-integrity.rst
index 8439d2ae689b..9edd45593abd 100644
--- a/Documentation/admin-guide/device-mapper/dm-integrity.rst
+++ b/Documentation/admin-guide/device-mapper/dm-integrity.rst
@@ -193,6 +193,14 @@ should not be changed when reloading the target because the layout of disk
data depend on them and the reloaded target would be non-functional.
+Status line:
+
+1. the number of integrity mismatches
+2. provided data sectors - that is the number of sectors that the user
+ could use
+3. the current recalculating position (or '-' if we didn't recalculate)
+
+
The layout of the formatted block device:
* reserved sectors
diff --git a/Documentation/admin-guide/device-mapper/dm-zoned.rst b/Documentation/admin-guide/device-mapper/dm-zoned.rst
index 07f56ebc1730..553752ea2521 100644
--- a/Documentation/admin-guide/device-mapper/dm-zoned.rst
+++ b/Documentation/admin-guide/device-mapper/dm-zoned.rst
@@ -37,9 +37,13 @@ Algorithm
dm-zoned implements an on-disk buffering scheme to handle non-sequential
write accesses to the sequential zones of a zoned block device.
Conventional zones are used for caching as well as for storing internal
-metadata.
+metadata. It can also use a regular block device together with the zoned
+block device; in that case the regular block device will be split logically
+in zones with the same size as the zoned block device. These zones will be
+placed in front of the zones from the zoned block device and will be handled
+just like conventional zones.
-The zones of the device are separated into 2 types:
+The zones of the device(s) are separated into 2 types:
1) Metadata zones: these are conventional zones used to store metadata.
Metadata zones are not reported as useable capacity to the user.
@@ -127,6 +131,13 @@ resumed. Flushing metadata thus only temporarily delays write and
discard requests. Read requests can be processed concurrently while
metadata flush is being executed.
+If a regular device is used in conjunction with the zoned block device,
+a third set of metadata (without the zone bitmaps) is written to the
+start of the zoned block device. This metadata has a generation counter of
+'0' and will never be updated during normal operation; it just serves for
+identification purposes. The first and second copy of the metadata
+are located at the start of the regular block device.
+
Usage
=====
@@ -138,9 +149,46 @@ Ex::
dmzadm --format /dev/sdxx
-For a formatted device, the target can be created normally with the
-dmsetup utility. The only parameter that dm-zoned requires is the
-underlying zoned block device name. Ex::
- echo "0 `blockdev --getsize ${dev}` zoned ${dev}" | \
- dmsetup create dmz-`basename ${dev}`
+If two drives are to be used, both devices must be specified, with the
+regular block device as the first device.
+
+Ex::
+
+ dmzadm --format /dev/sdxx /dev/sdyy
+
+
+Fomatted device(s) can be started with the dmzadm utility, too.:
+
+Ex::
+
+ dmzadm --start /dev/sdxx /dev/sdyy
+
+
+Information about the internal layout and current usage of the zones can
+be obtained with the 'status' callback from dmsetup:
+
+Ex::
+
+ dmsetup status /dev/dm-X
+
+will return a line
+
+ 0 <size> zoned <nr_zones> zones <nr_unmap_rnd>/<nr_rnd> random <nr_unmap_seq>/<nr_seq> sequential
+
+where <nr_zones> is the total number of zones, <nr_unmap_rnd> is the number
+of unmapped (ie free) random zones, <nr_rnd> the total number of zones,
+<nr_unmap_seq> the number of unmapped sequential zones, and <nr_seq> the
+total number of sequential zones.
+
+Normally the reclaim process will be started once there are less than 50
+percent free random zones. In order to start the reclaim process manually
+even before reaching this threshold the 'dmsetup message' function can be
+used:
+
+Ex::
+
+ dmsetup message /dev/dm-X 0 reclaim
+
+will start the reclaim process and random zones will be moved to sequential
+zones.
diff --git a/Documentation/admin-guide/devices.rst b/Documentation/admin-guide/devices.rst
index d41671aeaef0..035275fedbdd 100644
--- a/Documentation/admin-guide/devices.rst
+++ b/Documentation/admin-guide/devices.rst
@@ -17,7 +17,7 @@ Specifically explore the sections titled "CHAR and MISC DRIVERS", and
to involve for character and block devices.
This document is included by reference into the Filesystem Hierarchy
-Standard (FHS). The FHS is available from http://www.pathname.com/fhs/.
+Standard (FHS). The FHS is available from https://www.pathname.com/fhs/.
Allocations marked (68k/Amiga) apply to Linux/68k on the Amiga
platform only. Allocations marked (68k/Atari) apply to Linux/68k on
diff --git a/Documentation/admin-guide/dynamic-debug-howto.rst b/Documentation/admin-guide/dynamic-debug-howto.rst
index 0dc2eb8e44e5..1012bd9305e9 100644
--- a/Documentation/admin-guide/dynamic-debug-howto.rst
+++ b/Documentation/admin-guide/dynamic-debug-howto.rst
@@ -13,6 +13,11 @@ kernel code to obtain additional kernel information. Currently, if
``print_hex_dump_debug()``/``print_hex_dump_bytes()`` calls can be dynamically
enabled per-callsite.
+If you do not want to enable dynamic debug globally (i.e. in some embedded
+system), you may set ``CONFIG_DYNAMIC_DEBUG_CORE`` as basic support of dynamic
+debug and add ``ccflags := -DDYNAMIC_DEBUG_MODULE`` into the Makefile of any
+modules which you'd like to dynamically debug later.
+
If ``CONFIG_DYNAMIC_DEBUG`` is not set, ``print_hex_dump_debug()`` is just
shortcut for ``print_hex_dump(KERN_DEBUG)``.
diff --git a/Documentation/admin-guide/gpio/gpio-aggregator.rst b/Documentation/admin-guide/gpio/gpio-aggregator.rst
new file mode 100644
index 000000000000..5cd1e7221756
--- /dev/null
+++ b/Documentation/admin-guide/gpio/gpio-aggregator.rst
@@ -0,0 +1,111 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+GPIO Aggregator
+===============
+
+The GPIO Aggregator provides a mechanism to aggregate GPIOs, and expose them as
+a new gpio_chip. This supports the following use cases.
+
+
+Aggregating GPIOs using Sysfs
+-----------------------------
+
+GPIO controllers are exported to userspace using /dev/gpiochip* character
+devices. Access control to these devices is provided by standard UNIX file
+system permissions, on an all-or-nothing basis: either a GPIO controller is
+accessible for a user, or it is not.
+
+The GPIO Aggregator provides access control for a set of one or more GPIOs, by
+aggregating them into a new gpio_chip, which can be assigned to a group or user
+using standard UNIX file ownership and permissions. Furthermore, this
+simplifies and hardens exporting GPIOs to a virtual machine, as the VM can just
+grab the full GPIO controller, and no longer needs to care about which GPIOs to
+grab and which not, reducing the attack surface.
+
+Aggregated GPIO controllers are instantiated and destroyed by writing to
+write-only attribute files in sysfs.
+
+ /sys/bus/platform/drivers/gpio-aggregator/
+
+ "new_device" ...
+ Userspace may ask the kernel to instantiate an aggregated GPIO
+ controller by writing a string describing the GPIOs to
+ aggregate to the "new_device" file, using the format
+
+ .. code-block:: none
+
+ [<gpioA>] [<gpiochipB> <offsets>] ...
+
+ Where:
+
+ "<gpioA>" ...
+ is a GPIO line name,
+
+ "<gpiochipB>" ...
+ is a GPIO chip label, and
+
+ "<offsets>" ...
+ is a comma-separated list of GPIO offsets and/or
+ GPIO offset ranges denoted by dashes.
+
+ Example: Instantiate a new GPIO aggregator by aggregating GPIO
+ line 19 of "e6052000.gpio" and GPIO lines 20-21 of
+ "e6050000.gpio" into a new gpio_chip:
+
+ .. code-block:: sh
+
+ $ echo 'e6052000.gpio 19 e6050000.gpio 20-21' > new_device
+
+ "delete_device" ...
+ Userspace may ask the kernel to destroy an aggregated GPIO
+ controller after use by writing its device name to the
+ "delete_device" file.
+
+ Example: Destroy the previously-created aggregated GPIO
+ controller, assumed to be "gpio-aggregator.0":
+
+ .. code-block:: sh
+
+ $ echo gpio-aggregator.0 > delete_device
+
+
+Generic GPIO Driver
+-------------------
+
+The GPIO Aggregator can also be used as a generic driver for a simple
+GPIO-operated device described in DT, without a dedicated in-kernel driver.
+This is useful in industrial control, and is not unlike e.g. spidev, which
+allows the user to communicate with an SPI device from userspace.
+
+Binding a device to the GPIO Aggregator is performed either by modifying the
+gpio-aggregator driver, or by writing to the "driver_override" file in Sysfs.
+
+Example: If "door" is a GPIO-operated device described in DT, using its own
+compatible value::
+
+ door {
+ compatible = "myvendor,mydoor";
+
+ gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>,
+ <&gpio2 20 GPIO_ACTIVE_LOW>;
+ gpio-line-names = "open", "lock";
+ };
+
+it can be bound to the GPIO Aggregator by either:
+
+1. Adding its compatible value to ``gpio_aggregator_dt_ids[]``,
+2. Binding manually using "driver_override":
+
+.. code-block:: sh
+
+ $ echo gpio-aggregator > /sys/bus/platform/devices/door/driver_override
+ $ echo door > /sys/bus/platform/drivers/gpio-aggregator/bind
+
+After that, a new gpiochip "door" has been created:
+
+.. code-block:: sh
+
+ $ gpioinfo door
+ gpiochip12 - 2 lines:
+ line 0: "open" unused input active-high
+ line 1: "lock" unused input active-high
diff --git a/Documentation/admin-guide/gpio/index.rst b/Documentation/admin-guide/gpio/index.rst
index a244ba4e87d5..ef2838638e96 100644
--- a/Documentation/admin-guide/gpio/index.rst
+++ b/Documentation/admin-guide/gpio/index.rst
@@ -7,6 +7,7 @@ gpio
.. toctree::
:maxdepth: 1
+ gpio-aggregator
sysfs
.. only:: subproject and html
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
index 0795e3c2643f..ca4dbdd9016d 100644
--- a/Documentation/admin-guide/hw-vuln/index.rst
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -14,3 +14,4 @@ are configurable at compile, boot or run time.
mds
tsx_async_abort
multihit.rst
+ special-register-buffer-data-sampling.rst
diff --git a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst
new file mode 100644
index 000000000000..47b1b3afac99
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst
@@ -0,0 +1,149 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+SRBDS - Special Register Buffer Data Sampling
+=============================================
+
+SRBDS is a hardware vulnerability that allows MDS :doc:`mds` techniques to
+infer values returned from special register accesses. Special register
+accesses are accesses to off core registers. According to Intel's evaluation,
+the special register reads that have a security expectation of privacy are
+RDRAND, RDSEED and SGX EGETKEY.
+
+When RDRAND, RDSEED and EGETKEY instructions are used, the data is moved
+to the core through the special register mechanism that is susceptible
+to MDS attacks.
+
+Affected processors
+--------------------
+Core models (desktop, mobile, Xeon-E3) that implement RDRAND and/or RDSEED may
+be affected.
+
+A processor is affected by SRBDS if its Family_Model and stepping is
+in the following list, with the exception of the listed processors
+exporting MDS_NO while Intel TSX is available yet not enabled. The
+latter class of processors are only affected when Intel TSX is enabled
+by software using TSX_CTRL_MSR otherwise they are not affected.
+
+ ============= ============ ========
+ common name Family_Model Stepping
+ ============= ============ ========
+ IvyBridge 06_3AH All
+
+ Haswell 06_3CH All
+ Haswell_L 06_45H All
+ Haswell_G 06_46H All
+
+ Broadwell_G 06_47H All
+ Broadwell 06_3DH All
+
+ Skylake_L 06_4EH All
+ Skylake 06_5EH All
+
+ Kabylake_L 06_8EH <= 0xC
+ Kabylake 06_9EH <= 0xD
+ ============= ============ ========
+
+Related CVEs
+------------
+
+The following CVE entry is related to this SRBDS issue:
+
+ ============== ===== =====================================
+ CVE-2020-0543 SRBDS Special Register Buffer Data Sampling
+ ============== ===== =====================================
+
+Attack scenarios
+----------------
+An unprivileged user can extract values returned from RDRAND and RDSEED
+executed on another core or sibling thread using MDS techniques.
+
+
+Mitigation mechanism
+-------------------
+Intel will release microcode updates that modify the RDRAND, RDSEED, and
+EGETKEY instructions to overwrite secret special register data in the shared
+staging buffer before the secret data can be accessed by another logical
+processor.
+
+During execution of the RDRAND, RDSEED, or EGETKEY instructions, off-core
+accesses from other logical processors will be delayed until the special
+register read is complete and the secret data in the shared staging buffer is
+overwritten.
+
+This has three effects on performance:
+
+#. RDRAND, RDSEED, or EGETKEY instructions have higher latency.
+
+#. Executing RDRAND at the same time on multiple logical processors will be
+ serialized, resulting in an overall reduction in the maximum RDRAND
+ bandwidth.
+
+#. Executing RDRAND, RDSEED or EGETKEY will delay memory accesses from other
+ logical processors that miss their core caches, with an impact similar to
+ legacy locked cache-line-split accesses.
+
+The microcode updates provide an opt-out mechanism (RNGDS_MITG_DIS) to disable
+the mitigation for RDRAND and RDSEED instructions executed outside of Intel
+Software Guard Extensions (Intel SGX) enclaves. On logical processors that
+disable the mitigation using this opt-out mechanism, RDRAND and RDSEED do not
+take longer to execute and do not impact performance of sibling logical
+processors memory accesses. The opt-out mechanism does not affect Intel SGX
+enclaves (including execution of RDRAND or RDSEED inside an enclave, as well
+as EGETKEY execution).
+
+IA32_MCU_OPT_CTRL MSR Definition
+--------------------------------
+Along with the mitigation for this issue, Intel added a new thread-scope
+IA32_MCU_OPT_CTRL MSR, (address 0x123). The presence of this MSR and
+RNGDS_MITG_DIS (bit 0) is enumerated by CPUID.(EAX=07H,ECX=0).EDX[SRBDS_CTRL =
+9]==1. This MSR is introduced through the microcode update.
+
+Setting IA32_MCU_OPT_CTRL[0] (RNGDS_MITG_DIS) to 1 for a logical processor
+disables the mitigation for RDRAND and RDSEED executed outside of an Intel SGX
+enclave on that logical processor. Opting out of the mitigation for a
+particular logical processor does not affect the RDRAND and RDSEED mitigations
+for other logical processors.
+
+Note that inside of an Intel SGX enclave, the mitigation is applied regardless
+of the value of RNGDS_MITG_DS.
+
+Mitigation control on the kernel command line
+---------------------------------------------
+The kernel command line allows control over the SRBDS mitigation at boot time
+with the option "srbds=". The option for this is:
+
+ ============= =============================================================
+ off This option disables SRBDS mitigation for RDRAND and RDSEED on
+ affected platforms.
+ ============= =============================================================
+
+SRBDS System Information
+-----------------------
+The Linux kernel provides vulnerability status information through sysfs. For
+SRBDS this can be accessed by the following sysfs file:
+/sys/devices/system/cpu/vulnerabilities/srbds
+
+The possible values contained in this file are:
+
+ ============================== =============================================
+ Not affected Processor not vulnerable
+ Vulnerable Processor vulnerable and mitigation disabled
+ Vulnerable: No microcode Processor vulnerable and microcode is missing
+ mitigation
+ Mitigation: Microcode Processor is vulnerable and mitigation is in
+ effect.
+ Mitigation: TSX disabled Processor is only vulnerable when TSX is
+ enabled while this system was booted with TSX
+ disabled.
+ Unknown: Dependent on
+ hypervisor status Running on virtual guest processor that is
+ affected but with no way to know if host
+ processor is mitigated or vulnerable.
+ ============================== =============================================
+
+SRBDS Default mitigation
+------------------------
+This new microcode serializes processor access during execution of RDRAND,
+RDSEED ensures that the shared buffer is overwritten before it is released for
+reuse. Use the "srbds=off" kernel command line to disable the mitigation for
+RDRAND and RDSEED.
diff --git a/Documentation/admin-guide/initrd.rst b/Documentation/admin-guide/initrd.rst
index a03dabaaf3a3..67bbad8806e8 100644
--- a/Documentation/admin-guide/initrd.rst
+++ b/Documentation/admin-guide/initrd.rst
@@ -376,7 +376,7 @@ Resources
---------
.. [#f1] Almesberger, Werner; "Booting Linux: The History and the Future"
- http://www.almesberger.net/cv/papers/ols2k-9.ps.gz
+ https://www.almesberger.net/cv/papers/ols2k-9.ps.gz
.. [#f2] newlib package (experimental), with initrd example
https://www.sourceware.org/newlib/
.. [#f3] util-linux: Miscellaneous utilities for Linux
diff --git a/Documentation/admin-guide/kdump/kdump.rst b/Documentation/admin-guide/kdump/kdump.rst
index ac7e131d2935..2da65fef2a1c 100644
--- a/Documentation/admin-guide/kdump/kdump.rst
+++ b/Documentation/admin-guide/kdump/kdump.rst
@@ -521,6 +521,14 @@ will cause a kdump to occur at the panic() call. In cases where a user wants
to specify this during runtime, /proc/sys/kernel/panic_on_warn can be set to 1
to achieve the same behaviour.
+Trigger Kdump on add_taint()
+============================
+
+The kernel parameter panic_on_taint facilitates a conditional call to panic()
+from within add_taint() whenever the value set in this bitmask matches with the
+bit flag being set by add_taint().
+This will cause a kdump to occur at the add_taint()->panic() call.
+
Contact
=======
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 89386f6f3ab6..fb95fad81c79 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -874,6 +874,11 @@
can be useful when debugging issues that require an SLB
miss to occur.
+ stress_slb [PPC]
+ Limits the number of kernel SLB entries, and flushes
+ them frequently to increase the rate of SLB faults
+ on kernel addresses.
+
disable= [IPV6]
See Documentation/networking/ipv6.rst.
@@ -1440,7 +1445,7 @@
hardlockup_all_cpu_backtrace=
[KNL] Should the hard-lockup detector generate
backtraces on all cpus.
- Format: <integer>
+ Format: 0 | 1
hashdist= [KNL,NUMA] Large hashes allocated during boot
are distributed across NUMA nodes. Defaults on
@@ -1508,9 +1513,9 @@
hung_task_panic=
[KNL] Should the hung task detector generate panics.
- Format: <integer>
+ Format: 0 | 1
- A nonzero value instructs the kernel to panic when a
+ A value of 1 instructs the kernel to panic when a
hung task is detected. The default value is controlled
by the CONFIG_BOOTPARAM_HUNG_TASK_PANIC build-time
option. The value selected by this boot parameter can
@@ -3442,6 +3447,19 @@
bit 4: print ftrace buffer
bit 5: print all printk messages in buffer
+ panic_on_taint= Bitmask for conditionally calling panic() in add_taint()
+ Format: <hex>[,nousertaint]
+ Hexadecimal bitmask representing the set of TAINT flags
+ that will cause the kernel to panic when add_taint() is
+ called with any of the flags in this set.
+ The optional switch "nousertaint" can be utilized to
+ prevent userspace forced crashes by writing to sysctl
+ /proc/sys/kernel/tainted any flagset matching with the
+ bitmask set on panic_on_taint.
+ See Documentation/admin-guide/tainted-kernels.rst for
+ extra details on the taint flags that users can pick
+ to compose the bitmask to assign to panic_on_taint.
+
panic_on_warn panic() instead of WARN(). Useful to cause kdump
on a WARN().
@@ -3710,6 +3728,8 @@
may put more devices in an IOMMU group.
force_floating [S390] Force usage of floating interrupts.
nomio [S390] Do not use MIO instructions.
+ norid [S390] ignore the RID field and force use of
+ one PCI domain per PCI function
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.
@@ -4647,9 +4667,9 @@
softlockup_panic=
[KNL] Should the soft-lockup detector generate panics.
- Format: <integer>
+ Format: 0 | 1
- A nonzero value instructs the soft-lockup detector
+ A value of 1 instructs the soft-lockup detector
to panic the machine when a soft-lockup occurs. It is
also controlled by the kernel.softlockup_panic sysctl
and CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC, which is the
@@ -4658,7 +4678,7 @@
softlockup_all_cpu_backtrace=
[KNL] Should the soft-lockup detector generate
backtraces on all cpus.
- Format: <integer>
+ Format: 0 | 1
sonypi.*= [HW] Sony Programmable I/O Control Device driver
See Documentation/admin-guide/laptops/sonypi.rst
@@ -4817,6 +4837,26 @@
the kernel will oops in either "warn" or "fatal"
mode.
+ srbds= [X86,INTEL]
+ Control the Special Register Buffer Data Sampling
+ (SRBDS) mitigation.
+
+ Certain CPUs are vulnerable to an MDS-like
+ exploit which can leak bits from the random
+ number generator.
+
+ By default, this issue is mitigated by
+ microcode. However, the microcode fix can cause
+ the RDRAND and RDSEED instructions to become
+ much slower. Among other effects, this will
+ result in reduced throughput from /dev/urandom.
+
+ The microcode mitigation can be disabled with
+ the following option:
+
+ off: Disable mitigation and remove
+ performance impact to RDRAND and RDSEED
+
srcutree.counter_wrap_check [KNL]
Specifies how frequently to check for
grace-period sequence counter wrap for the
@@ -4951,6 +4991,15 @@
switches= [HW,M68k]
+ sysctl.*= [KNL]
+ Set a sysctl parameter, right before loading the init
+ process, as if the value was written to the respective
+ /proc/sys/... file. Both '.' and '/' are recognized as
+ separators. Unrecognized parameters and invalid values
+ are reported in the kernel log. Sysctls registered
+ later by a loaded module cannot be set this way.
+ Example: sysctl.vm.swappiness=40
+
sysfs.deprecated=0|1 [KNL]
Enable/disable old style sysfs layout for old udev
on older distributions. When this option is enabled
diff --git a/Documentation/admin-guide/md.rst b/Documentation/admin-guide/md.rst
index 3c51084ffd37..d973d469ffc4 100644
--- a/Documentation/admin-guide/md.rst
+++ b/Documentation/admin-guide/md.rst
@@ -5,7 +5,7 @@ Boot time assembly of RAID arrays
---------------------------------
Tools that manage md devices can be found at
- http://www.kernel.org/pub/linux/utils/raid/
+ https://www.kernel.org/pub/linux/utils/raid/
You can boot with your md device with the following kernel command
diff --git a/Documentation/admin-guide/mm/numa_memory_policy.rst b/Documentation/admin-guide/mm/numa_memory_policy.rst
index 8463f5538fda..067a90a1499c 100644
--- a/Documentation/admin-guide/mm/numa_memory_policy.rst
+++ b/Documentation/admin-guide/mm/numa_memory_policy.rst
@@ -364,19 +364,19 @@ follows:
2) for querying the policy, we do not need to take an extra reference on the
target task's task policy nor vma policies because we always acquire the
- task's mm's mmap_sem for read during the query. The set_mempolicy() and
- mbind() APIs [see below] always acquire the mmap_sem for write when
+ task's mm's mmap_lock for read during the query. The set_mempolicy() and
+ mbind() APIs [see below] always acquire the mmap_lock for write when
installing or replacing task or vma policies. Thus, there is no possibility
of a task or thread freeing a policy while another task or thread is
querying it.
3) Page allocation usage of task or vma policy occurs in the fault path where
- we hold them mmap_sem for read. Again, because replacing the task or vma
- policy requires that the mmap_sem be held for write, the policy can't be
+ we hold them mmap_lock for read. Again, because replacing the task or vma
+ policy requires that the mmap_lock be held for write, the policy can't be
freed out from under us while we're using it for page allocation.
4) Shared policies require special consideration. One task can replace a
- shared memory policy while another task, with a distinct mmap_sem, is
+ shared memory policy while another task, with a distinct mmap_lock, is
querying or allocating a page based on the policy. To resolve this
potential race, the shared policy infrastructure adds an extra reference
to the shared policy during lookup while holding a spin lock on the shared
diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst
index 0bf49d7313ad..1dc2d5f823b4 100644
--- a/Documentation/admin-guide/mm/userfaultfd.rst
+++ b/Documentation/admin-guide/mm/userfaultfd.rst
@@ -33,7 +33,7 @@ memory ranges) provides two primary functionalities:
The real advantage of userfaults if compared to regular virtual memory
management of mremap/mprotect is that the userfaults in all their
operations never involve heavyweight structures like vmas (in fact the
-``userfaultfd`` runtime load never takes the mmap_sem for writing).
+``userfaultfd`` runtime load never takes the mmap_lock for writing).
Vmas are not suitable for page- (or hugepage) granular fault tracking
when dealing with virtual address spaces that could span
diff --git a/Documentation/admin-guide/mono.rst b/Documentation/admin-guide/mono.rst
index 59e6d59f0ed9..c6dab5680065 100644
--- a/Documentation/admin-guide/mono.rst
+++ b/Documentation/admin-guide/mono.rst
@@ -12,11 +12,11 @@ other program after you have done the following:
a binary package, a source tarball or by installing from Git. Binary
packages for several distributions can be found at:
- http://www.mono-project.com/download/
+ https://www.mono-project.com/download/
Instructions for compiling Mono can be found at:
- http://www.mono-project.com/docs/compiling-mono/linux/
+ https://www.mono-project.com/docs/compiling-mono/linux/
Once the Mono CLR support has been installed, just check that
``/usr/bin/mono`` (which could be located elsewhere, for example
diff --git a/Documentation/admin-guide/reporting-bugs.rst b/Documentation/admin-guide/reporting-bugs.rst
index 49ac8dc3594d..42481ea7b41d 100644
--- a/Documentation/admin-guide/reporting-bugs.rst
+++ b/Documentation/admin-guide/reporting-bugs.rst
@@ -75,7 +75,7 @@ Tips for reporting bugs
If you haven't reported a bug before, please read:
- http://www.chiark.greenend.org.uk/~sgtatham/bugs.html
+ https://www.chiark.greenend.org.uk/~sgtatham/bugs.html
http://www.catb.org/esr/faqs/smart-questions.html
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 1ebf68d01141..83acf5025488 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -335,6 +335,20 @@ Path for the hotplug policy agent.
Default value is "``/sbin/hotplug``".
+hung_task_all_cpu_backtrace:
+================
+
+If this option is set, the kernel will send an NMI to all CPUs to dump
+their backtraces when a hung task is detected. This file shows up if
+CONFIG_DETECT_HUNG_TASK and CONFIG_SMP are enabled.
+
+0: Won't show all CPUs backtraces when a hung task is detected.
+This is the default behavior.
+
+1: Will non-maskably interrupt all CPUs and dump their backtraces when
+a hung task is detected.
+
+
hung_task_panic
===============
@@ -632,6 +646,22 @@ rate for each task.
scanned for a given scan.
+oops_all_cpu_backtrace:
+================
+
+If this option is set, the kernel will send an NMI to all CPUs to dump
+their backtraces when an oops event occurs. It should be used as a last
+resort in case a panic cannot be triggered (to protect VMs running, for
+example) or kdump can't be collected. This file shows up if CONFIG_SMP
+is enabled.
+
+0: Won't show all CPUs backtraces when an oops is detected.
+This is the default behavior.
+
+1: Will non-maskably interrupt all CPUs and dump their backtraces when
+an oops event is detected.
+
+
osrelease, ostype & version
===========================
@@ -1239,6 +1269,13 @@ ORed together. The letters are seen in "Tainted" line of Oops reports.
See :doc:`/admin-guide/tainted-kernels` for more information.
+Note:
+ writes to this sysctl interface will fail with ``EINVAL`` if the kernel is
+ booted with the command line option ``panic_on_taint=<bitmask>,nousertaint``
+ and any of the ORed together values being written to ``tainted`` match with
+ the bitmask declared on panic_on_taint.
+ See :doc:`/admin-guide/kernel-parameters` for more details on that particular
+ kernel command line option and its optional ``nousertaint`` switch.
threads-max
===========
diff --git a/Documentation/admin-guide/sysrq.rst b/Documentation/admin-guide/sysrq.rst
index a46209f4636c..e6424d8c5846 100644
--- a/Documentation/admin-guide/sysrq.rst
+++ b/Documentation/admin-guide/sysrq.rst
@@ -231,13 +231,13 @@ prints help, and C) an action_msg string, that will print right before your
handler is called. Your handler must conform to the prototype in 'sysrq.h'.
After the ``sysrq_key_op`` is created, you can call the kernel function
-``register_sysrq_key(int key, struct sysrq_key_op *op_p);`` this will
+``register_sysrq_key(int key, const struct sysrq_key_op *op_p);`` this will
register the operation pointed to by ``op_p`` at table key 'key',
if that slot in the table is blank. At module unload time, you must call
-the function ``unregister_sysrq_key(int key, struct sysrq_key_op *op_p)``, which
-will remove the key op pointed to by 'op_p' from the key 'key', if and only if
-it is currently registered in that slot. This is in case the slot has been
-overwritten since you registered it.
+the function ``unregister_sysrq_key(int key, const struct sysrq_key_op *op_p)``,
+which will remove the key op pointed to by 'op_p' from the key 'key', if and
+only if it is currently registered in that slot. This is in case the slot has
+been overwritten since you registered it.
The Magic SysRQ system works by registering key operations against a key op
lookup table, which is defined in 'drivers/tty/sysrq.c'. This key table has
diff --git a/Documentation/admin-guide/unicode.rst b/Documentation/admin-guide/unicode.rst
index 7425a3351321..290fe83ebe82 100644
--- a/Documentation/admin-guide/unicode.rst
+++ b/Documentation/admin-guide/unicode.rst
@@ -114,7 +114,7 @@ Unicode practice.
This range is now officially managed by the ConScript Unicode
Registry. The normative reference is at:
- http://www.evertype.com/standards/csur/klingon.html
+ https://www.evertype.com/standards/csur/klingon.html
Klingon has an alphabet of 26 characters, a positional numeric writing
system with 10 digits, and is written left-to-right, top-to-bottom.
@@ -178,7 +178,7 @@ fictional and artificial scripts has been established by John Cowan
<jcowan@reutershealth.com> and Michael Everson <everson@evertype.com>.
The ConScript Unicode Registry is accessible at:
- http://www.evertype.com/standards/csur/
+ https://www.evertype.com/standards/csur/
The ranges used fall at the low end of the End User Zone and can hence
not be normatively assigned, but it is recommended that people who
diff --git a/Documentation/arm/microchip.rst b/Documentation/arm/microchip.rst
index 05e5f2dfb814..9c013299fd3b 100644
--- a/Documentation/arm/microchip.rst
+++ b/Documentation/arm/microchip.rst
@@ -192,7 +192,7 @@ Device Tree files and Device Tree bindings that apply to AT91 SoCs and boards ar
considered as "Unstable". To be completely clear, any at91 binding can change at
any time. So, be sure to use a Device Tree Binary and a Kernel Image generated from
the same source tree.
-Please refer to the Documentation/devicetree/bindings/ABI.txt file for a
+Please refer to the Documentation/devicetree/bindings/ABI.rst file for a
definition of a "Stable" binding/ABI.
This statement will be removed by AT91 MAINTAINERS when appropriate.
diff --git a/Documentation/conf.py b/Documentation/conf.py
index f6a1bc07c410..c503188880d9 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -538,7 +538,7 @@ epub_exclude_files = ['search.html']
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
-# See the Sphinx chapter of http://ralsina.me/static/manual.pdf
+# See the Sphinx chapter of https://ralsina.me/static/manual.pdf
#
# FIXME: Do not add the index file here; the result will be too big. Adding
# multiple PDF files here actually tries to get the cross-referencing right
diff --git a/Documentation/core-api/pin_user_pages.rst b/Documentation/core-api/pin_user_pages.rst
index 2e939ff10b86..6068266dd303 100644
--- a/Documentation/core-api/pin_user_pages.rst
+++ b/Documentation/core-api/pin_user_pages.rst
@@ -148,23 +148,46 @@ NOTE: Some pages, such as DAX pages, cannot be pinned with longterm pins. That's
because DAX pages do not have a separate page cache, and so "pinning" implies
locking down file system blocks, which is not (yet) supported in that way.
-CASE 3: Hardware with page faulting support
--------------------------------------------
-Here, a well-written driver doesn't normally need to pin pages at all. However,
-if the driver does choose to do so, it can register MMU notifiers for the range,
-and will be called back upon invalidation. Either way (avoiding page pinning, or
-using MMU notifiers to unpin upon request), there is proper synchronization with
-both filesystem and mm (page_mkclean(), munmap(), etc).
-
-Therefore, neither flag needs to be set.
-
-In this case, ideally, neither get_user_pages() nor pin_user_pages() should be
-called. Instead, the software should be written so that it does not pin pages.
-This allows mm and filesystems to operate more efficiently and reliably.
+CASE 3: MMU notifier registration, with or without page faulting hardware
+-------------------------------------------------------------------------
+Device drivers can pin pages via get_user_pages*(), and register for mmu
+notifier callbacks for the memory range. Then, upon receiving a notifier
+"invalidate range" callback , stop the device from using the range, and unpin
+the pages. There may be other possible schemes, such as for example explicitly
+synchronizing against pending IO, that accomplish approximately the same thing.
+
+Or, if the hardware supports replayable page faults, then the device driver can
+avoid pinning entirely (this is ideal), as follows: register for mmu notifier
+callbacks as above, but instead of stopping the device and unpinning in the
+callback, simply remove the range from the device's page tables.
+
+Either way, as long as the driver unpins the pages upon mmu notifier callback,
+then there is proper synchronization with both filesystem and mm
+(page_mkclean(), munmap(), etc). Therefore, neither flag needs to be set.
CASE 4: Pinning for struct page manipulation only
-------------------------------------------------
-Here, normal GUP calls are sufficient, so neither flag needs to be set.
+If only struct page data (as opposed to the actual memory contents that a page
+is tracking) is affected, then normal GUP calls are sufficient, and neither flag
+needs to be set.
+
+CASE 5: Pinning in order to write to the data within the page
+-------------------------------------------------------------
+Even though neither DMA nor Direct IO is involved, just a simple case of "pin,
+write to a page's data, unpin" can cause a problem. Case 5 may be considered a
+superset of Case 1, plus Case 2, plus anything that invokes that pattern. In
+other words, if the code is neither Case 1 nor Case 2, it may still require
+FOLL_PIN, for patterns like this:
+
+Correct (uses FOLL_PIN calls):
+ pin_user_pages()
+ write to the data within the pages
+ unpin_user_pages()
+
+INCORRECT (uses FOLL_GET calls):
+ get_user_pages()
+ write to the data within the pages
+ put_page()
page_maybe_dma_pinned(): the whole point of pinning
===================================================
diff --git a/Documentation/core-api/rbtree.rst b/Documentation/core-api/rbtree.rst
index 523d54b60087..6b88837fbf82 100644
--- a/Documentation/core-api/rbtree.rst
+++ b/Documentation/core-api/rbtree.rst
@@ -36,10 +36,10 @@ This document covers use of the Linux rbtree implementation. For more
information on the nature and implementation of Red Black Trees, see:
Linux Weekly News article on red-black trees
- http://lwn.net/Articles/184495/
+ https://lwn.net/Articles/184495/
Wikipedia entry on red-black trees
- http://en.wikipedia.org/wiki/Red-black_tree
+ https://en.wikipedia.org/wiki/Red-black_tree
Linux implementation of red-black trees
---------------------------------------
diff --git a/Documentation/dev-tools/coccinelle.rst b/Documentation/dev-tools/coccinelle.rst
index 00a3409b0c28..70274c3f5f5a 100644
--- a/Documentation/dev-tools/coccinelle.rst
+++ b/Documentation/dev-tools/coccinelle.rst
@@ -14,7 +14,7 @@ many uses in kernel development, including the application of complex,
tree-wide patches and detection of problematic programming patterns.
Getting Coccinelle
--------------------
+------------------
The semantic patches included in the kernel use features and options
which are provided by Coccinelle version 1.0.0-rc11 and above.
@@ -56,7 +56,7 @@ found at:
https://github.com/coccinelle/coccinelle/blob/master/install.txt
Supplemental documentation
----------------------------
+--------------------------
For supplemental documentation refer to the wiki:
@@ -128,7 +128,7 @@ To enable verbose messages set the V= variable, for example::
make coccicheck MODE=report V=1
Coccinelle parallelization
----------------------------
+--------------------------
By default, coccicheck tries to run as parallel as possible. To change
the parallelism, set the J= variable. For example, to run across 4 CPUs::
@@ -333,7 +333,7 @@ as an example if requiring at least Coccinelle >= 1.0.5::
// Requires: 1.0.5
Proposing new semantic patches
--------------------------------
+------------------------------
New semantic patches can be proposed and submitted by kernel
developers. For sake of clarity, they should be organized in the
diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst
index 19df79286f00..4756f6b3a04e 100644
--- a/Documentation/dev-tools/gdb-kernel-debugging.rst
+++ b/Documentation/dev-tools/gdb-kernel-debugging.rst
@@ -24,7 +24,7 @@ Setup
- Create a virtual Linux machine for QEMU/KVM (see www.linux-kvm.org and
www.qemu.org for more details). For cross-development,
- http://landley.net/aboriginal/bin keeps a pool of machine images and
+ https://landley.net/aboriginal/bin keeps a pool of machine images and
toolchains that can be helpful to start from.
- Build the kernel with CONFIG_GDB_SCRIPTS enabled, but leave
diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst
index 09dee10d2592..f7809c7b1ba9 100644
--- a/Documentation/dev-tools/index.rst
+++ b/Documentation/dev-tools/index.rst
@@ -21,6 +21,7 @@ whole; patches welcome!
kasan
ubsan
kmemleak
+ kcsan
gdb-kernel-debugging
kgdb
kselftest
diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst
index 1c4e1825d769..8548b0b04e43 100644
--- a/Documentation/dev-tools/kcov.rst
+++ b/Documentation/dev-tools/kcov.rst
@@ -217,14 +217,15 @@ This allows to collect coverage from two types of kernel background
threads: the global ones, that are spawned during kernel boot in a limited
number of instances (e.g. one USB hub_event() worker thread is spawned per
USB HCD); and the local ones, that are spawned when a user interacts with
-some kernel interface (e.g. vhost workers).
+some kernel interface (e.g. vhost workers); as well as from soft
+interrupts.
-To enable collecting coverage from a global background thread, a unique
-global handle must be assigned and passed to the corresponding
-kcov_remote_start() call. Then a userspace process can pass a list of such
-handles to the KCOV_REMOTE_ENABLE ioctl in the handles array field of the
-kcov_remote_arg struct. This will attach the used kcov device to the code
-sections, that are referenced by those handles.
+To enable collecting coverage from a global background thread or from a
+softirq, a unique global handle must be assigned and passed to the
+corresponding kcov_remote_start() call. Then a userspace process can pass
+a list of such handles to the KCOV_REMOTE_ENABLE ioctl in the handles
+array field of the kcov_remote_arg struct. This will attach the used kcov
+device to the code sections, that are referenced by those handles.
Since there might be many local background threads spawned from different
userspace processes, we can't use a single global handle per annotation.
@@ -242,7 +243,7 @@ handles as they don't belong to a particular subsystem. The bytes 4-7 are
currently reserved and must be zero. In the future the number of bytes
used for the subsystem or handle ids might be increased.
-When a particular userspace proccess collects coverage by via a common
+When a particular userspace proccess collects coverage via a common
handle, kcov will collect coverage for each code section that is annotated
to use the common handle obtained as kcov_handle from the current
task_struct. However non common handles allow to collect coverage
diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst
new file mode 100644
index 000000000000..ce4bbd918648
--- /dev/null
+++ b/Documentation/dev-tools/kcsan.rst
@@ -0,0 +1,321 @@
+The Kernel Concurrency Sanitizer (KCSAN)
+========================================
+
+The Kernel Concurrency Sanitizer (KCSAN) is a dynamic race detector, which
+relies on compile-time instrumentation, and uses a watchpoint-based sampling
+approach to detect races. KCSAN's primary purpose is to detect `data races`_.
+
+Usage
+-----
+
+KCSAN requires Clang version 11 or later.
+
+To enable KCSAN configure the kernel with::
+
+ CONFIG_KCSAN = y
+
+KCSAN provides several other configuration options to customize behaviour (see
+the respective help text in ``lib/Kconfig.kcsan`` for more info).
+
+Error reports
+~~~~~~~~~~~~~
+
+A typical data race report looks like this::
+
+ ==================================================================
+ BUG: KCSAN: data-race in generic_permission / kernfs_refresh_inode
+
+ write to 0xffff8fee4c40700c of 4 bytes by task 175 on cpu 4:
+ kernfs_refresh_inode+0x70/0x170
+ kernfs_iop_permission+0x4f/0x90
+ inode_permission+0x190/0x200
+ link_path_walk.part.0+0x503/0x8e0
+ path_lookupat.isra.0+0x69/0x4d0
+ filename_lookup+0x136/0x280
+ user_path_at_empty+0x47/0x60
+ vfs_statx+0x9b/0x130
+ __do_sys_newlstat+0x50/0xb0
+ __x64_sys_newlstat+0x37/0x50
+ do_syscall_64+0x85/0x260
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+ read to 0xffff8fee4c40700c of 4 bytes by task 166 on cpu 6:
+ generic_permission+0x5b/0x2a0
+ kernfs_iop_permission+0x66/0x90
+ inode_permission+0x190/0x200
+ link_path_walk.part.0+0x503/0x8e0
+ path_lookupat.isra.0+0x69/0x4d0
+ filename_lookup+0x136/0x280
+ user_path_at_empty+0x47/0x60
+ do_faccessat+0x11a/0x390
+ __x64_sys_access+0x3c/0x50
+ do_syscall_64+0x85/0x260
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+ Reported by Kernel Concurrency Sanitizer on:
+ CPU: 6 PID: 166 Comm: systemd-journal Not tainted 5.3.0-rc7+ #1
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
+ ==================================================================
+
+The header of the report provides a short summary of the functions involved in
+the race. It is followed by the access types and stack traces of the 2 threads
+involved in the data race.
+
+The other less common type of data race report looks like this::
+
+ ==================================================================
+ BUG: KCSAN: data-race in e1000_clean_rx_irq+0x551/0xb10
+
+ race at unknown origin, with read to 0xffff933db8a2ae6c of 1 bytes by interrupt on cpu 0:
+ e1000_clean_rx_irq+0x551/0xb10
+ e1000_clean+0x533/0xda0
+ net_rx_action+0x329/0x900
+ __do_softirq+0xdb/0x2db
+ irq_exit+0x9b/0xa0
+ do_IRQ+0x9c/0xf0
+ ret_from_intr+0x0/0x18
+ default_idle+0x3f/0x220
+ arch_cpu_idle+0x21/0x30
+ do_idle+0x1df/0x230
+ cpu_startup_entry+0x14/0x20
+ rest_init+0xc5/0xcb
+ arch_call_rest_init+0x13/0x2b
+ start_kernel+0x6db/0x700
+
+ Reported by Kernel Concurrency Sanitizer on:
+ CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.3.0-rc7+ #2
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014
+ ==================================================================
+
+This report is generated where it was not possible to determine the other
+racing thread, but a race was inferred due to the data value of the watched
+memory location having changed. These can occur either due to missing
+instrumentation or e.g. DMA accesses. These reports will only be generated if
+``CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=y`` (selected by default).
+
+Selective analysis
+~~~~~~~~~~~~~~~~~~
+
+It may be desirable to disable data race detection for specific accesses,
+functions, compilation units, or entire subsystems. For static blacklisting,
+the below options are available:
+
+* KCSAN understands the ``data_race(expr)`` annotation, which tells KCSAN that
+ any data races due to accesses in ``expr`` should be ignored and resulting
+ behaviour when encountering a data race is deemed safe.
+
+* Disabling data race detection for entire functions can be accomplished by
+ using the function attribute ``__no_kcsan``::
+
+ __no_kcsan
+ void foo(void) {
+ ...
+
+ To dynamically limit for which functions to generate reports, see the
+ `DebugFS interface`_ blacklist/whitelist feature.
+
+ For ``__always_inline`` functions, replace ``__always_inline`` with
+ ``__no_kcsan_or_inline`` (which implies ``__always_inline``)::
+
+ static __no_kcsan_or_inline void foo(void) {
+ ...
+
+* To disable data race detection for a particular compilation unit, add to the
+ ``Makefile``::
+
+ KCSAN_SANITIZE_file.o := n
+
+* To disable data race detection for all compilation units listed in a
+ ``Makefile``, add to the respective ``Makefile``::
+
+ KCSAN_SANITIZE := n
+
+Furthermore, it is possible to tell KCSAN to show or hide entire classes of
+data races, depending on preferences. These can be changed via the following
+Kconfig options:
+
+* ``CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY``: If enabled and a conflicting write
+ is observed via a watchpoint, but the data value of the memory location was
+ observed to remain unchanged, do not report the data race.
+
+* ``CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC``: Assume that plain aligned writes
+ up to word size are atomic by default. Assumes that such writes are not
+ subject to unsafe compiler optimizations resulting in data races. The option
+ causes KCSAN to not report data races due to conflicts where the only plain
+ accesses are aligned writes up to word size.
+
+DebugFS interface
+~~~~~~~~~~~~~~~~~
+
+The file ``/sys/kernel/debug/kcsan`` provides the following interface:
+
+* Reading ``/sys/kernel/debug/kcsan`` returns various runtime statistics.
+
+* Writing ``on`` or ``off`` to ``/sys/kernel/debug/kcsan`` allows turning KCSAN
+ on or off, respectively.
+
+* Writing ``!some_func_name`` to ``/sys/kernel/debug/kcsan`` adds
+ ``some_func_name`` to the report filter list, which (by default) blacklists
+ reporting data races where either one of the top stackframes are a function
+ in the list.
+
+* Writing either ``blacklist`` or ``whitelist`` to ``/sys/kernel/debug/kcsan``
+ changes the report filtering behaviour. For example, the blacklist feature
+ can be used to silence frequently occurring data races; the whitelist feature
+ can help with reproduction and testing of fixes.
+
+Tuning performance
+~~~~~~~~~~~~~~~~~~
+
+Core parameters that affect KCSAN's overall performance and bug detection
+ability are exposed as kernel command-line arguments whose defaults can also be
+changed via the corresponding Kconfig options.
+
+* ``kcsan.skip_watch`` (``CONFIG_KCSAN_SKIP_WATCH``): Number of per-CPU memory
+ operations to skip, before another watchpoint is set up. Setting up
+ watchpoints more frequently will result in the likelihood of races to be
+ observed to increase. This parameter has the most significant impact on
+ overall system performance and race detection ability.
+
+* ``kcsan.udelay_task`` (``CONFIG_KCSAN_UDELAY_TASK``): For tasks, the
+ microsecond delay to stall execution after a watchpoint has been set up.
+ Larger values result in the window in which we may observe a race to
+ increase.
+
+* ``kcsan.udelay_interrupt`` (``CONFIG_KCSAN_UDELAY_INTERRUPT``): For
+ interrupts, the microsecond delay to stall execution after a watchpoint has
+ been set up. Interrupts have tighter latency requirements, and their delay
+ should generally be smaller than the one chosen for tasks.
+
+They may be tweaked at runtime via ``/sys/module/kcsan/parameters/``.
+
+Data Races
+----------
+
+In an execution, two memory accesses form a *data race* if they *conflict*,
+they happen concurrently in different threads, and at least one of them is a
+*plain access*; they *conflict* if both access the same memory location, and at
+least one is a write. For a more thorough discussion and definition, see `"Plain
+Accesses and Data Races" in the LKMM`_.
+
+.. _"Plain Accesses and Data Races" in the LKMM: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/explanation.txt#n1922
+
+Relationship with the Linux-Kernel Memory Consistency Model (LKMM)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The LKMM defines the propagation and ordering rules of various memory
+operations, which gives developers the ability to reason about concurrent code.
+Ultimately this allows to determine the possible executions of concurrent code,
+and if that code is free from data races.
+
+KCSAN is aware of *marked atomic operations* (``READ_ONCE``, ``WRITE_ONCE``,
+``atomic_*``, etc.), but is oblivious of any ordering guarantees and simply
+assumes that memory barriers are placed correctly. In other words, KCSAN
+assumes that as long as a plain access is not observed to race with another
+conflicting access, memory operations are correctly ordered.
+
+This means that KCSAN will not report *potential* data races due to missing
+memory ordering. Developers should therefore carefully consider the required
+memory ordering requirements that remain unchecked. If, however, missing
+memory ordering (that is observable with a particular compiler and
+architecture) leads to an observable data race (e.g. entering a critical
+section erroneously), KCSAN would report the resulting data race.
+
+Race Detection Beyond Data Races
+--------------------------------
+
+For code with complex concurrency design, race-condition bugs may not always
+manifest as data races. Race conditions occur if concurrently executing
+operations result in unexpected system behaviour. On the other hand, data races
+are defined at the C-language level. The following macros can be used to check
+properties of concurrent code where bugs would not manifest as data races.
+
+.. kernel-doc:: include/linux/kcsan-checks.h
+ :functions: ASSERT_EXCLUSIVE_WRITER ASSERT_EXCLUSIVE_WRITER_SCOPED
+ ASSERT_EXCLUSIVE_ACCESS ASSERT_EXCLUSIVE_ACCESS_SCOPED
+ ASSERT_EXCLUSIVE_BITS
+
+Implementation Details
+----------------------
+
+KCSAN relies on observing that two accesses happen concurrently. Crucially, we
+want to (a) increase the chances of observing races (especially for races that
+manifest rarely), and (b) be able to actually observe them. We can accomplish
+(a) by injecting various delays, and (b) by using address watchpoints (or
+breakpoints).
+
+If we deliberately stall a memory access, while we have a watchpoint for its
+address set up, and then observe the watchpoint to fire, two accesses to the
+same address just raced. Using hardware watchpoints, this is the approach taken
+in `DataCollider
+<http://usenix.org/legacy/events/osdi10/tech/full_papers/Erickson.pdf>`_.
+Unlike DataCollider, KCSAN does not use hardware watchpoints, but instead
+relies on compiler instrumentation and "soft watchpoints".
+
+In KCSAN, watchpoints are implemented using an efficient encoding that stores
+access type, size, and address in a long; the benefits of using "soft
+watchpoints" are portability and greater flexibility. KCSAN then relies on the
+compiler instrumenting plain accesses. For each instrumented plain access:
+
+1. Check if a matching watchpoint exists; if yes, and at least one access is a
+ write, then we encountered a racing access.
+
+2. Periodically, if no matching watchpoint exists, set up a watchpoint and
+ stall for a small randomized delay.
+
+3. Also check the data value before the delay, and re-check the data value
+ after delay; if the values mismatch, we infer a race of unknown origin.
+
+To detect data races between plain and marked accesses, KCSAN also annotates
+marked accesses, but only to check if a watchpoint exists; i.e. KCSAN never
+sets up a watchpoint on marked accesses. By never setting up watchpoints for
+marked operations, if all accesses to a variable that is accessed concurrently
+are properly marked, KCSAN will never trigger a watchpoint and therefore never
+report the accesses.
+
+Key Properties
+~~~~~~~~~~~~~~
+
+1. **Memory Overhead:** The overall memory overhead is only a few MiB
+ depending on configuration. The current implementation uses a small array of
+ longs to encode watchpoint information, which is negligible.
+
+2. **Performance Overhead:** KCSAN's runtime aims to be minimal, using an
+ efficient watchpoint encoding that does not require acquiring any shared
+ locks in the fast-path. For kernel boot on a system with 8 CPUs:
+
+ - 5.0x slow-down with the default KCSAN config;
+ - 2.8x slow-down from runtime fast-path overhead only (set very large
+ ``KCSAN_SKIP_WATCH`` and unset ``KCSAN_SKIP_WATCH_RANDOMIZE``).
+
+3. **Annotation Overheads:** Minimal annotations are required outside the KCSAN
+ runtime. As a result, maintenance overheads are minimal as the kernel
+ evolves.
+
+4. **Detects Racy Writes from Devices:** Due to checking data values upon
+ setting up watchpoints, racy writes from devices can also be detected.
+
+5. **Memory Ordering:** KCSAN is *not* explicitly aware of the LKMM's ordering
+ rules; this may result in missed data races (false negatives).
+
+6. **Analysis Accuracy:** For observed executions, due to using a sampling
+ strategy, the analysis is *unsound* (false negatives possible), but aims to
+ be complete (no false positives).
+
+Alternatives Considered
+-----------------------
+
+An alternative data race detection approach for the kernel can be found in the
+`Kernel Thread Sanitizer (KTSAN) <https://github.com/google/ktsan/wiki>`_.
+KTSAN is a happens-before data race detector, which explicitly establishes the
+happens-before order between memory operations, which can then be used to
+determine data races as defined in `Data Races`_.
+
+To build a correct happens-before relation, KTSAN must be aware of all ordering
+rules of the LKMM and synchronization primitives. Unfortunately, any omission
+leads to large numbers of false positives, which is especially detrimental in
+the context of the kernel which includes numerous custom synchronization
+mechanisms. To track the happens-before relation, KTSAN's implementation
+requires metadata for each memory location (shadow memory), which for each page
+corresponds to 4 pages of shadow memory, and can translate into overhead of
+tens of GiB on a large system.
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index 5d1f56fcd2e7..469d115a95f1 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -151,6 +151,29 @@ note some tests will require root privileges::
$ cd kselftest
$ ./run_kselftest.sh
+Packaging selftests
+===================
+
+In some cases packaging is desired, such as when tests need to run on a
+different system. To package selftests, run::
+
+ $ make -C tools/testing/selftests gen_tar
+
+This generates a tarball in the `INSTALL_PATH/kselftest-packages` directory. By
+default, `.gz` format is used. The tar format can be overridden by specifying
+a `FORMAT` make variable. Any value recognized by `tar's auto-compress`_ option
+is supported, such as::
+
+ $ make -C tools/testing/selftests gen_tar FORMAT=.xz
+
+`make gen_tar` invokes `make install` so you can use it to package a subset of
+tests by using variables specified in `Running a subset of selftests`_
+section::
+
+ $ make -C tools/testing/selftests gen_tar TARGETS="bpf" FORMAT=.xz
+
+.. _tar's auto-compress: https://www.gnu.org/software/tar/manual/html_node/gzip.html#auto_002dcompress
+
Contributing new tests
======================
diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst
index e1c5ce80ce12..bb112cf70624 100644
--- a/Documentation/dev-tools/kunit/start.rst
+++ b/Documentation/dev-tools/kunit/start.rst
@@ -32,15 +32,17 @@ test targets as well. The ``.kunitconfig`` should also contain any other config
options required by the tests.
A good starting point for a ``.kunitconfig`` is the KUnit defconfig:
+
.. code-block:: bash
cd $PATH_TO_LINUX_REPO
cp arch/um/configs/kunit_defconfig .kunitconfig
You can then add any other Kconfig options you wish, e.g.:
+
.. code-block:: none
- CONFIG_LIST_KUNIT_TEST=y
+ CONFIG_LIST_KUNIT_TEST=y
:doc:`kunit_tool <kunit-tool>` will ensure that all config options set in
``.kunitconfig`` are set in the kernel ``.config`` before running the tests.
@@ -54,8 +56,8 @@ using.
other tools (such as make menuconfig) to adjust other config options.
-Running the tests
------------------
+Running the tests (KUnit Wrapper)
+---------------------------------
To make sure that everything is set up correctly, simply invoke the Python
wrapper from your kernel repo:
@@ -105,8 +107,9 @@ have config options ending in ``_KUNIT_TEST``.
KUnit and KUnit tests can be compiled as modules: in this case the tests in a
module will be run when the module is loaded.
-Running the tests
------------------
+
+Running the tests (w/o KUnit Wrapper)
+-------------------------------------
Build and run your kernel as usual. Test output will be written to the kernel
log in `TAP <https://testanything.org/>`_ format.
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
index 473a2361ec37..3c3fe8b5fecc 100644
--- a/Documentation/dev-tools/kunit/usage.rst
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -595,7 +595,7 @@ able to run one test case per invocation.
KUnit debugfs representation
============================
When kunit test suites are initialized, they create an associated directory
-in /sys/kernel/debug/kunit/<test-suite>. The directory contains one file
+in ``/sys/kernel/debug/kunit/<test-suite>``. The directory contains one file
- results: "cat results" displays results of each test case and the results
of the entire suite for the last test run.
@@ -604,4 +604,4 @@ The debugfs representation is primarily of use when kunit test suites are
run in a native environment, either as modules or builtin. Having a way
to display results like this is valuable as otherwise results can be
intermixed with other events in dmesg output. The maximum size of each
-results file is KUNIT_LOG_SIZE bytes (defined in include/kunit/test.h).
+results file is KUNIT_LOG_SIZE bytes (defined in ``include/kunit/test.h``).
diff --git a/Documentation/devicetree/bindings/ABI.rst b/Documentation/devicetree/bindings/ABI.rst
new file mode 100644
index 000000000000..a885713cf184
--- /dev/null
+++ b/Documentation/devicetree/bindings/ABI.rst
@@ -0,0 +1,42 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+Devicetree (DT) ABI
+===================
+
+I. Regarding stable bindings/ABI, we quote from the 2013 ARM mini-summit
+ summary document:
+
+ "That still leaves the question of, what does a stable binding look
+ like? Certainly a stable binding means that a newer kernel will not
+ break on an older device tree, but that doesn't mean the binding is
+ frozen for all time. Grant said there are ways to change bindings that
+ don't result in breakage. For instance, if a new property is added,
+ then default to the previous behaviour if it is missing. If a binding
+ truly needs an incompatible change, then change the compatible string
+ at the same time. The driver can bind against both the old and the
+ new. These guidelines aren't new, but they desperately need to be
+ documented."
+
+II. General binding rules
+
+ 1) Maintainers, don't let perfect be the enemy of good. Don't hold up a
+ binding because it isn't perfect.
+
+ 2) Use specific compatible strings so that if we need to add a feature (DMA)
+ in the future, we can create a new compatible string. See I.
+
+ 3) Bindings can be augmented, but the driver shouldn't break when given
+ the old binding. ie. add additional properties, but don't change the
+ meaning of an existing property. For drivers, default to the original
+ behaviour when a newly added property is missing.
+
+ 4) Don't submit bindings for staging or unstable. That will be decided by
+ the devicetree maintainers *after* discussion on the mailinglist.
+
+III. Notes
+
+ 1) This document is intended as a general familiarization with the process as
+ decided at the 2013 Kernel Summit. When in doubt, the current word of the
+ devicetree maintainers overrules this document. In that situation, a patch
+ updating this document would be appreciated.
diff --git a/Documentation/devicetree/bindings/ABI.txt b/Documentation/devicetree/bindings/ABI.txt
deleted file mode 100644
index d25f8d379680..000000000000
--- a/Documentation/devicetree/bindings/ABI.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-
- Devicetree (DT) ABI
-
-I. Regarding stable bindings/ABI, we quote from the 2013 ARM mini-summit
- summary document:
-
- "That still leaves the question of, what does a stable binding look
- like? Certainly a stable binding means that a newer kernel will not
- break on an older device tree, but that doesn't mean the binding is
- frozen for all time. Grant said there are ways to change bindings that
- don't result in breakage. For instance, if a new property is added,
- then default to the previous behaviour if it is missing. If a binding
- truly needs an incompatible change, then change the compatible string
- at the same time. The driver can bind against both the old and the
- new. These guidelines aren't new, but they desperately need to be
- documented."
-
-II. General binding rules
-
- 1) Maintainers, don't let perfect be the enemy of good. Don't hold up a
- binding because it isn't perfect.
-
- 2) Use specific compatible strings so that if we need to add a feature (DMA)
- in the future, we can create a new compatible string. See I.
-
- 3) Bindings can be augmented, but the driver shouldn't break when given
- the old binding. ie. add additional properties, but don't change the
- meaning of an existing property. For drivers, default to the original
- behaviour when a newly added property is missing.
-
- 4) Don't submit bindings for staging or unstable. That will be decided by
- the devicetree maintainers *after* discussion on the mailinglist.
-
-III. Notes
-
- 1) This document is intended as a general familiarization with the process as
- decided at the 2013 Kernel Summit. When in doubt, the current word of the
- devicetree maintainers overrules this document. In that situation, a patch
- updating this document would be appreciated.
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 7782d9985082..a63898954068 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -4,11 +4,19 @@ DT_EXTRACT_EX ?= dt-extract-example
DT_MK_SCHEMA ?= dt-mk-schema
DT_MK_SCHEMA_USERONLY_FLAG := $(if $(DT_SCHEMA_FILES), -u)
+DT_SCHEMA_MIN_VERSION = 2020.5
+
+PHONY += check_dtschema_version
+check_dtschema_version:
+ @{ echo $(DT_SCHEMA_MIN_VERSION); \
+ $(DT_DOC_CHECKER) --version 2>/dev/null || echo 0; } | sort -VC || \
+ { echo "ERROR: dtschema minimum version is v$(DT_SCHEMA_MIN_VERSION)" >&2; false; }
+
quiet_cmd_chk_binding = CHKDT $(patsubst $(srctree)/%,%,$<)
cmd_chk_binding = $(DT_DOC_CHECKER) -u $(srctree)/$(src) $< ; \
$(DT_EXTRACT_EX) $< > $@
-$(obj)/%.example.dts: $(src)/%.yaml FORCE
+$(obj)/%.example.dts: $(src)/%.yaml check_dtschema_version FORCE
$(call if_changed,chk_binding)
# Use full schemas when checking %.example.dts
@@ -37,11 +45,11 @@ override DTC_FLAGS := \
-Wno-avoid_unnecessary_addr_size \
-Wno-graph_child_address
-$(obj)/processed-schema-examples.yaml: $(DT_DOCS) FORCE
+$(obj)/processed-schema-examples.yaml: $(DT_DOCS) check_dtschema_version FORCE
$(call if_changed,mk_schema)
$(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := $(DT_MK_SCHEMA_USERONLY_FLAG)
-$(obj)/processed-schema.yaml: $(DT_SCHEMA_FILES) FORCE
+$(obj)/processed-schema.yaml: $(DT_SCHEMA_FILES) check_dtschema_version FORCE
$(call if_changed,mk_schema)
extra-y += processed-schema.yaml
diff --git a/Documentation/devicetree/bindings/arm/altera.yaml b/Documentation/devicetree/bindings/arm/altera.yaml
index 49e0362ddc11..b388c5aa7984 100644
--- a/Documentation/devicetree/bindings/arm/altera.yaml
+++ b/Documentation/devicetree/bindings/arm/altera.yaml
@@ -13,8 +13,8 @@ properties:
compatible:
items:
- enum:
- - altr,socfpga-cyclone5
- - altr,socfpga-arria5
- - altr,socfpga-arria10
+ - altr,socfpga-cyclone5
+ - altr,socfpga-arria5
+ - altr,socfpga-arria10
- const: altr,socfpga
...
diff --git a/Documentation/devicetree/bindings/arm/amlogic.yaml b/Documentation/devicetree/bindings/arm/amlogic.yaml
index f74aba48cec1..378229fa8310 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.yaml
+++ b/Documentation/devicetree/bindings/arm/amlogic.yaml
@@ -17,7 +17,7 @@ description: |+
any time. Be sure to use a device tree binary and a kernel image
generated from the same source tree.
- Please refer to Documentation/devicetree/bindings/ABI.txt for a definition of a
+ Please refer to Documentation/devicetree/bindings/ABI.rst for a definition of a
stable binding/ABI.
properties:
@@ -107,6 +107,7 @@ properties:
- amlogic,p231
- libretech,aml-s905d-pc
- phicomm,n1
+ - smartlabs,sml5442tw
- const: amlogic,s905d
- const: amlogic,meson-gxl
@@ -148,6 +149,8 @@ properties:
- description: Boards with the Amlogic Meson G12B S922X SoC
items:
- enum:
+ - azw,gtking
+ - azw,gtking-pro
- hardkernel,odroid-n2
- khadas,vim3
- ugoos,am6
@@ -159,6 +162,7 @@ properties:
- enum:
- seirobotics,sei610
- khadas,vim3l
+ - hardkernel,odroid-c4
- const: amlogic,sm1
- description: Boards with the Amlogic Meson A1 A113L SoC
diff --git a/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
index 66213bd95e6e..6cc74523ebfd 100644
--- a/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
+++ b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
@@ -25,7 +25,7 @@ select:
properties:
compatible:
- items:
+ items:
- const: amlogic,meson-gx-ao-secure
- const: syscon
diff --git a/Documentation/devicetree/bindings/arm/arm,scmi.txt b/Documentation/devicetree/bindings/arm/arm,scmi.txt
index dc102c4e4a78..1f293ea24cd8 100644
--- a/Documentation/devicetree/bindings/arm/arm,scmi.txt
+++ b/Documentation/devicetree/bindings/arm/arm,scmi.txt
@@ -14,7 +14,7 @@ Required properties:
The scmi node with the following properties shall be under the /firmware/ node.
-- compatible : shall be "arm,scmi"
+- compatible : shall be "arm,scmi" or "arm,scmi-smc" for smc/hvc transports
- mboxes: List of phandle and mailbox channel specifiers. It should contain
exactly one or two mailboxes, one for transmitting messages("tx")
and another optional for receiving the notifications("rx") if
@@ -25,6 +25,7 @@ The scmi node with the following properties shall be under the /firmware/ node.
protocol identifier for a given sub-node.
- #size-cells : should be '0' as 'reg' property doesn't have any size
associated with it.
+- arm,smc-id : SMC id required when using smc or hvc transports
Optional properties:
diff --git a/Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml b/Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml
index 8c06a73f716c..a3420c81cf35 100644
--- a/Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml
@@ -131,26 +131,23 @@ properties:
property, describing the physical location of the children nodes.
0 means motherboard site, while 1 and 2 are daughterboard sites, and
0xf means "sisterboard" which is the site containing the main CPU tile.
- allOf:
- - $ref: '/schemas/types.yaml#/definitions/uint32'
- - minimum: 0
- maximum: 15
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ minimum: 0
+ maximum: 15
arm,vexpress,position:
description: When daughterboards are stacked on one site, their position
in the stack be be described this attribute.
- allOf:
- - $ref: '/schemas/types.yaml#/definitions/uint32'
- - minimum: 0
- maximum: 3
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ minimum: 0
+ maximum: 3
arm,vexpress,dcc:
description: When describing tiles consisting of more than one DCC, its
number can be specified with this attribute.
- allOf:
- - $ref: '/schemas/types.yaml#/definitions/uint32'
- - minimum: 0
- maximum: 3
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ minimum: 0
+ maximum: 3
patternProperties:
"^bus@[0-9a-f]+$":
@@ -162,8 +159,7 @@ patternProperties:
"simple-bus". If the compatible is placed in the "motherboard" node,
it is stricter and always has two compatibles.
type: object
- allOf:
- - $ref: '/schemas/simple-bus.yaml'
+ $ref: '/schemas/simple-bus.yaml'
properties:
compatible:
@@ -195,11 +191,11 @@ patternProperties:
- const: simple-bus
arm,v2m-memory-map:
description: This describes the memory map type.
- allOf:
- - $ref: '/schemas/types.yaml#/definitions/string'
- - enum:
- - rs1
- - rs2
+ $ref: '/schemas/types.yaml#/definitions/string'
+ enum:
+ - rs1
+ - rs2
+
required:
- compatible
required:
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.yaml b/Documentation/devicetree/bindings/arm/atmel-at91.yaml
index 0357314076bc..31b0c54fa2cf 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.yaml
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.yaml
@@ -82,6 +82,13 @@ properties:
- const: atmel,sama5d2
- const: atmel,sama5
+ - description: Microchip SAMA5D2 Industrial Connectivity Platform
+ items:
+ - const: microchip,sama5d2-icp
+ - const: atmel,sama5d27
+ - const: atmel,sama5d2
+ - const: atmel,sama5
+
- description: SAM9X60-EK board
items:
- const: microchip,sam9x60ek
diff --git a/Documentation/devicetree/bindings/arm/bitmain.yaml b/Documentation/devicetree/bindings/arm/bitmain.yaml
index 0efdb4ac028e..5cd5b36cff2d 100644
--- a/Documentation/devicetree/bindings/arm/bitmain.yaml
+++ b/Documentation/devicetree/bindings/arm/bitmain.yaml
@@ -13,6 +13,6 @@ properties:
compatible:
items:
- enum:
- - bitmain,sophon-edge
+ - bitmain,sophon-edge
- const: bitmain,bm1880
...
diff --git a/Documentation/devicetree/bindings/arm/calxeda/hb-sregs.yaml b/Documentation/devicetree/bindings/arm/calxeda/hb-sregs.yaml
new file mode 100644
index 000000000000..dfdc97083efb
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/calxeda/hb-sregs.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/calxeda/hb-sregs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Calxeda Highbank system registers
+
+description: |
+ The Calxeda Highbank system has a block of MMIO registers controlling
+ several generic system aspects. Those can be used to control some power
+ management, they also contain some gate and PLL clocks.
+
+maintainers:
+ - Andre Przywara <andre.przywara@arm.com>
+
+properties:
+ compatible:
+ const: calxeda,hb-sregs
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ type: object
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ sregs@fff3c000 {
+ compatible = "calxeda,hb-sregs";
+ reg = <0xfff3c000 0x1000>;
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ osc: oscillator {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <33333000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/calxeda/l2ecc.txt b/Documentation/devicetree/bindings/arm/calxeda/l2ecc.txt
deleted file mode 100644
index 94e642a33db0..000000000000
--- a/Documentation/devicetree/bindings/arm/calxeda/l2ecc.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-Calxeda Highbank L2 cache ECC
-
-Properties:
-- compatible : Should be "calxeda,hb-sregs-l2-ecc"
-- reg : Address and size for ECC error interrupt clear registers.
-- interrupts : Should be single bit error interrupt, then double bit error
- interrupt.
-
-Example:
-
- sregs@fff3c200 {
- compatible = "calxeda,hb-sregs-l2-ecc";
- reg = <0xfff3c200 0x100>;
- interrupts = <0 71 4 0 72 4>;
- };
diff --git a/Documentation/devicetree/bindings/arm/calxeda/l2ecc.yaml b/Documentation/devicetree/bindings/arm/calxeda/l2ecc.yaml
new file mode 100644
index 000000000000..a9fe01238a88
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/calxeda/l2ecc.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/calxeda/l2ecc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Calxeda Highbank L2 cache ECC
+
+description: |
+ Binding for the Calxeda Highbank L2 cache controller ECC device.
+ This does not cover the actual L2 cache controller control registers,
+ but just the error reporting functionality.
+
+maintainers:
+ - Andre Przywara <andre.przywara@arm.com>
+
+properties:
+ compatible:
+ const: "calxeda,hb-sregs-l2-ecc"
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: single bit error interrupt
+ - description: double bit error interrupt
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ sregs@fff3c200 {
+ compatible = "calxeda,hb-sregs-l2-ecc";
+ reg = <0xfff3c200 0x100>;
+ interrupts = <0 71 4>, <0 72 4>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/coresight-cti.yaml b/Documentation/devicetree/bindings/arm/coresight-cti.yaml
index 3db3642bd532..17df5cd12d8d 100644
--- a/Documentation/devicetree/bindings/arm/coresight-cti.yaml
+++ b/Documentation/devicetree/bindings/arm/coresight-cti.yaml
@@ -140,16 +140,14 @@ patternProperties:
maxItems: 1
arm,trig-in-sigs:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 32
description:
List of CTI trigger in signal numbers in use by a trig-conns node.
arm,trig-in-types:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 32
description:
@@ -159,16 +157,14 @@ patternProperties:
completely, then the types will default to GEN_IO.
arm,trig-out-sigs:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 32
description:
List of CTI trigger out signal numbers in use by a trig-conns node.
arm,trig-out-types:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 32
description:
@@ -178,8 +174,7 @@ patternProperties:
or omitted completely, then the types will default to GEN_IO.
arm,trig-filters:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
maxItems: 32
description:
@@ -187,8 +182,7 @@ patternProperties:
active, unless filtering is disabled on the driver.
arm,trig-conn-name:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/string
+ $ref: /schemas/types.yaml#/definitions/string
description:
Defines a connection name that will be displayed, if the cpu or
arm,cs-dev-assoc properties are not being used in this connection.
@@ -301,7 +295,7 @@ examples:
- |
cti@20110000 {
compatible = "arm,coresight-cti", "arm,primecell";
- reg = <0 0x20110000 0 0x1000>;
+ reg = <0x20110000 0x1000>;
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index a01814765ddb..40f692c846f0 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -167,53 +167,53 @@ properties:
- qcom,kryo260
- qcom,kryo280
- qcom,kryo385
+ - qcom,kryo468
- qcom,kryo485
- qcom,scorpion
enable-method:
- allOf:
- - $ref: '/schemas/types.yaml#/definitions/string'
- - oneOf:
- # On ARM v8 64-bit this property is required
- - enum:
- - psci
- - spin-table
- # On ARM 32-bit systems this property is optional
- - enum:
- - actions,s500-smp
- - allwinner,sun6i-a31
- - allwinner,sun8i-a23
- - allwinner,sun9i-a80-smp
- - allwinner,sun8i-a83t-smp
- - amlogic,meson8-smp
- - amlogic,meson8b-smp
- - arm,realview-smp
- - aspeed,ast2600-smp
- - brcm,bcm11351-cpu-method
- - brcm,bcm23550
- - brcm,bcm2836-smp
- - brcm,bcm63138
- - brcm,bcm-nsp-smp
- - brcm,brahma-b15
- - marvell,armada-375-smp
- - marvell,armada-380-smp
- - marvell,armada-390-smp
- - marvell,armada-xp-smp
- - marvell,98dx3236-smp
- - marvell,mmp3-smp
- - mediatek,mt6589-smp
- - mediatek,mt81xx-tz-smp
- - qcom,gcc-msm8660
- - qcom,kpss-acc-v1
- - qcom,kpss-acc-v2
- - renesas,apmu
- - renesas,r9a06g032-smp
- - rockchip,rk3036-smp
- - rockchip,rk3066-smp
- - socionext,milbeaut-m10v-smp
- - ste,dbx500-smp
- - ti,am3352
- - ti,am4372
+ $ref: '/schemas/types.yaml#/definitions/string'
+ oneOf:
+ # On ARM v8 64-bit this property is required
+ - enum:
+ - psci
+ - spin-table
+ # On ARM 32-bit systems this property is optional
+ - enum:
+ - actions,s500-smp
+ - allwinner,sun6i-a31
+ - allwinner,sun8i-a23
+ - allwinner,sun9i-a80-smp
+ - allwinner,sun8i-a83t-smp
+ - amlogic,meson8-smp
+ - amlogic,meson8b-smp
+ - arm,realview-smp
+ - aspeed,ast2600-smp
+ - brcm,bcm11351-cpu-method
+ - brcm,bcm23550
+ - brcm,bcm2836-smp
+ - brcm,bcm63138
+ - brcm,bcm-nsp-smp
+ - brcm,brahma-b15
+ - marvell,armada-375-smp
+ - marvell,armada-380-smp
+ - marvell,armada-390-smp
+ - marvell,armada-xp-smp
+ - marvell,98dx3236-smp
+ - marvell,mmp3-smp
+ - mediatek,mt6589-smp
+ - mediatek,mt81xx-tz-smp
+ - qcom,gcc-msm8660
+ - qcom,kpss-acc-v1
+ - qcom,kpss-acc-v2
+ - renesas,apmu
+ - renesas,r9a06g032-smp
+ - rockchip,rk3036-smp
+ - rockchip,rk3066-smp
+ - socionext,milbeaut-m10v-smp
+ - ste,dbx500-smp
+ - ti,am3352
+ - ti,am4372
cpu-release-addr:
$ref: '/schemas/types.yaml#/definitions/uint64'
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
index 623fedf12180..715047444391 100644
--- a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
@@ -108,7 +108,8 @@ This binding uses the i.MX common pinctrl binding[3].
Required properties:
- compatible: Should be one of:
"fsl,imx8qm-iomuxc",
- "fsl,imx8qxp-iomuxc".
+ "fsl,imx8qxp-iomuxc",
+ "fsl,imx8dxl-iomuxc".
Required properties for Pinctrl sub nodes:
- fsl,pins: Each entry consists of 3 integers which represents
@@ -116,7 +117,8 @@ Required properties for Pinctrl sub nodes:
integers <pin_id mux_mode> are specified using a
PIN_FUNC_ID macro, which can be found in
<dt-bindings/pinctrl/pads-imx8qm.h>,
- <dt-bindings/pinctrl/pads-imx8qxp.h>.
+ <dt-bindings/pinctrl/pads-imx8qxp.h>,
+ <dt-bindings/pinctrl/pads-imx8dxl.h>.
The last integer CONFIG is the pad setting value like
pull-up on this pin.
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index cd3fbe7e3948..05906e291e38 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -119,6 +119,7 @@ properties:
- fsl,imx6q-sabreauto
- fsl,imx6q-sabrelite
- fsl,imx6q-sabresd
+ - kontron,imx6q-samx6i # Kontron i.MX6 Dual/Quad SMARC Module
- technexion,imx6q-pico-dwarf # TechNexion i.MX6Q Pico-Dwarf
- technexion,imx6q-pico-hobbit # TechNexion i.MX6Q Pico-Hobbit
- technexion,imx6q-pico-nymph # TechNexion i.MX6Q Pico-Nymph
@@ -170,6 +171,7 @@ properties:
- emtrion,emcon-mx6-avari # emCON-MX6S or emCON-MX6DL SoM on Avari Base
- fsl,imx6dl-sabreauto # i.MX6 DualLite/Solo SABRE Automotive Board
- fsl,imx6dl-sabresd # i.MX6 DualLite SABRE Smart Device Board
+ - kontron,imx6dl-samx6i # Kontron i.MX6 Solo SMARC Module
- technexion,imx6dl-pico-dwarf # TechNexion i.MX6DL Pico-Dwarf
- technexion,imx6dl-pico-hobbit # TechNexion i.MX6DL Pico-Hobbit
- technexion,imx6dl-pico-nymph # TechNexion i.MX6DL Pico-Nymph
@@ -177,7 +179,9 @@ properties:
- technologic,imx6dl-ts4900
- technologic,imx6dl-ts7970
- toradex,colibri_imx6dl # Colibri iMX6 Module
+ - toradex,colibri_imx6dl-v1_1 # Colibri iMX6 Module V1.1
- toradex,colibri_imx6dl-eval-v3 # Colibri iMX6 Module on Colibri Evaluation Board V3
+ - toradex,colibri_imx6dl-v1_1-eval-v3 # Colibri iMX6 Module V1.1 on Colibri Evaluation Board V3
- ysoft,imx6dl-yapp4-draco # i.MX6 DualLite Y Soft IOTA Draco board
- ysoft,imx6dl-yapp4-hydra # i.MX6 DualLite Y Soft IOTA Hydra board
- ysoft,imx6dl-yapp4-ursa # i.MX6 Solo Y Soft IOTA Ursa board
diff --git a/Documentation/devicetree/bindings/arm/l2c2x0.yaml b/Documentation/devicetree/bindings/arm/l2c2x0.yaml
index 5d1d50eea26e..6b8f4d4fa580 100644
--- a/Documentation/devicetree/bindings/arm/l2c2x0.yaml
+++ b/Documentation/devicetree/bindings/arm/l2c2x0.yaml
@@ -70,43 +70,39 @@ properties:
description: Cycles of latency for Data RAM accesses. Specifies 3 cells of
read, write and setup latencies. Minimum valid values are 1. Controllers
without setup latency control should use a value of 0.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 2
- maxItems: 3
- items:
- minimum: 0
- maximum: 8
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 8
arm,tag-latency:
description: Cycles of latency for Tag RAM accesses. Specifies 3 cells of
read, write and setup latencies. Controllers without setup latency control
should use 0. Controllers without separate read and write Tag RAM latency
values should only use the first cell.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 1
- maxItems: 3
- items:
- minimum: 0
- maximum: 8
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 8
arm,dirty-latency:
description: Cycles of latency for Dirty RAMs. This is a single cell.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 1
- maximum: 8
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 8
arm,filter-ranges:
description: <start length> Starting address and length of window to
filter. Addresses in the filter window are directed to the M1 port. Other
addresses will go to the M0 port.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - items:
- minItems: 2
- maxItems: 2
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ minItems: 2
+ maxItems: 2
arm,io-coherent:
description: indicates that the system is operating in an hardware
@@ -131,36 +127,31 @@ properties:
arm,double-linefill:
description: Override double linefill enable setting. Enable if
non-zero, disable if zero.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
arm,double-linefill-incr:
description: Override double linefill on INCR read. Enable
if non-zero, disable if zero.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
arm,double-linefill-wrap:
description: Override double linefill on WRAP read. Enable
if non-zero, disable if zero.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
arm,prefetch-drop:
description: Override prefetch drop enable setting. Enable if non-zero,
disable if zero.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
arm,prefetch-offset:
description: Override prefetch offset value.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1, 2, 3, 4, 5, 6, 7, 15, 23, 31 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3, 4, 5, 6, 7, 15, 23, 31]
arm,shared-override:
description: The default behavior of the L220 or PL310 cache
@@ -193,35 +184,31 @@ properties:
description: |
Data prefetch. Value: <0> (forcibly disable), <1>
(forcibly enable), property absent (retain settings set by firmware)
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
prefetch-instr:
description: |
Instruction prefetch. Value: <0> (forcibly disable),
<1> (forcibly enable), property absent (retain settings set by
firmware)
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
arm,dynamic-clock-gating:
description: |
L2 dynamic clock gating. Value: <0> (forcibly
disable), <1> (forcibly enable), property absent (OS specific behavior,
preferably retain firmware settings)
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
arm,standby-mode:
description: L2 standby mode enable. Value <0> (forcibly disable),
<1> (forcibly enable), property absent (OS specific behavior,
preferably retain firmware settings)
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
arm,early-bresp-disable:
description: Disable the CA9 optimization Early BRESP (PL310)
diff --git a/Documentation/devicetree/bindings/arm/mediatek.yaml b/Documentation/devicetree/bindings/arm/mediatek.yaml
index 4043c5046441..abc544dde692 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek.yaml
@@ -84,6 +84,28 @@ properties:
- enum:
- mediatek,mt8135-evbp1
- const: mediatek,mt8135
+ - description: Google Elm (Acer Chromebook R13)
+ items:
+ - const: google,elm-rev8
+ - const: google,elm-rev7
+ - const: google,elm-rev6
+ - const: google,elm-rev5
+ - const: google,elm-rev4
+ - const: google,elm-rev3
+ - const: google,elm
+ - const: mediatek,mt8173
+ - description: Google Hana (Lenovo Chromebook N23 Yoga, C330, 300e,...)
+ items:
+ - const: google,hana-rev6
+ - const: google,hana-rev5
+ - const: google,hana-rev4
+ - const: google,hana-rev3
+ - const: google,hana
+ - const: mediatek,mt8173
+ - description: Google Hana rev7 (Poin2 Chromebook 11C)
+ items:
+ - const: google,hana-rev7
+ - const: mediatek,mt8173
- items:
- enum:
- mediatek,mt8173-evb
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
index ff000ccade78..bd7a0fa5801b 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
@@ -8,6 +8,7 @@ Required Properties:
- compatible: Should be one of:
- "mediatek,mt2701-apmixedsys"
- "mediatek,mt2712-apmixedsys", "syscon"
+ - "mediatek,mt6765-apmixedsys", "syscon"
- "mediatek,mt6779-apmixedsys", "syscon"
- "mediatek,mt6797-apmixedsys"
- "mediatek,mt7622-apmixedsys"
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
index e4ca7b703123..38309db115f5 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
@@ -7,6 +7,7 @@ Required Properties:
- compatible: Should be one of:
- "mediatek,mt2701-audsys", "syscon"
+ - "mediatek,mt6765-audsys", "syscon"
- "mediatek,mt6779-audio", "syscon"
- "mediatek,mt7622-audsys", "syscon"
- "mediatek,mt7623-audsys", "mediatek,mt2701-audsys", "syscon"
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt
index 1f4aaa15a37e..a0ce82085ad0 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt
@@ -6,6 +6,7 @@ The MediaTek camsys controller provides various clocks to the system.
Required Properties:
- compatible: Should be one of:
+ - "mediatek,mt6765-camsys", "syscon"
- "mediatek,mt6779-camsys", "syscon"
- "mediatek,mt8183-camsys", "syscon"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
index 2b693e343c56..1e1f00718a7d 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
@@ -8,6 +8,7 @@ Required Properties:
- compatible: Should be one of:
- "mediatek,mt2701-imgsys", "syscon"
- "mediatek,mt2712-imgsys", "syscon"
+ - "mediatek,mt6765-imgsys", "syscon"
- "mediatek,mt6779-imgsys", "syscon"
- "mediatek,mt6797-imgsys", "syscon"
- "mediatek,mt7623-imgsys", "mediatek,mt2701-imgsys", "syscon"
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
index db2f4fd754e7..49a968be1a80 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
@@ -9,6 +9,7 @@ Required Properties:
- compatible: Should be one of:
- "mediatek,mt2701-infracfg", "syscon"
- "mediatek,mt2712-infracfg", "syscon"
+ - "mediatek,mt6765-infracfg", "syscon"
- "mediatek,mt6779-infracfg_ao", "syscon"
- "mediatek,mt6797-infracfg", "syscon"
- "mediatek,mt7622-infracfg", "syscon"
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt
new file mode 100644
index 000000000000..8be5978f388d
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mipi0a.txt
@@ -0,0 +1,28 @@
+Mediatek mipi0a (mipi_rx_ana_csi0a) controller
+============================
+
+The Mediatek mipi0a controller provides various clocks
+to the system.
+
+Required Properties:
+
+- compatible: Should be one of:
+ - "mediatek,mt6765-mipi0a", "syscon"
+- #clock-cells: Must be 1
+
+The mipi0a controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+The mipi0a controller also uses the common power domain from
+Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
+The available power doamins are defined in dt-bindings/power/mt*-power.h.
+
+Example:
+
+mipi0a: clock-controller@11c10000 {
+ compatible = "mediatek,mt6765-mipi0a", "syscon";
+ reg = <0 0x11c10000 0 0x1000>;
+ power-domains = <&scpsys MT6765_POWER_DOMAIN_CAM>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
index 301eefbe1618..d8c9108c3b4a 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
@@ -1,13 +1,15 @@
Mediatek mmsys controller
============================
-The Mediatek mmsys controller provides various clocks to the system.
+The Mediatek mmsys system controller provides clock control, routing control,
+and miscellaneous control in mmsys partition.
Required Properties:
- compatible: Should be one of:
- "mediatek,mt2701-mmsys", "syscon"
- "mediatek,mt2712-mmsys", "syscon"
+ - "mediatek,mt6765-mmsys", "syscon"
- "mediatek,mt6779-mmsys", "syscon"
- "mediatek,mt6797-mmsys", "syscon"
- "mediatek,mt7623-mmsys", "mediatek,mt2701-mmsys", "syscon"
@@ -15,13 +17,13 @@ Required Properties:
- "mediatek,mt8183-mmsys", "syscon"
- #clock-cells: Must be 1
-The mmsys controller uses the common clk binding from
+For the clock control, the mmsys controller uses the common clk binding from
Documentation/devicetree/bindings/clock/clock-bindings.txt
The available clocks are defined in dt-bindings/clock/mt*-clk.h.
Example:
-mmsys: clock-controller@14000000 {
+mmsys: syscon@14000000 {
compatible = "mediatek,mt8173-mmsys", "syscon";
reg = <0 0x14000000 0 0x1000>;
#clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml
index 55209a2baedc..e271c4682ebc 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.yaml
@@ -20,6 +20,7 @@ properties:
- enum:
- mediatek,mt2701-pericfg
- mediatek,mt2712-pericfg
+ - mediatek,mt6765-pericfg
- mediatek,mt7622-pericfg
- mediatek,mt7629-pericfg
- mediatek,mt8135-pericfg
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
index 0293d693ce0c..9b0394cbbdc9 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
@@ -8,6 +8,7 @@ Required Properties:
- compatible: Should be one of:
- "mediatek,mt2701-topckgen"
- "mediatek,mt2712-topckgen", "syscon"
+ - "mediatek,mt6765-topckgen", "syscon"
- "mediatek,mt6779-topckgen", "syscon"
- "mediatek,mt6797-topckgen"
- "mediatek,mt7622-topckgen"
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt
new file mode 100644
index 000000000000..c877bcc1a5c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vcodecsys.txt
@@ -0,0 +1,27 @@
+Mediatek vcodecsys controller
+============================
+
+The Mediatek vcodecsys controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be one of:
+ - "mediatek,mt6765-vcodecsys", "syscon"
+- #clock-cells: Must be 1
+
+The vcodecsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+The vcodecsys controller also uses the common power domain from
+Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
+The available power doamins are defined in dt-bindings/power/mt*-power.h.
+
+Example:
+
+venc_gcon: clock-controller@17000000 {
+ compatible = "mediatek,mt6765-vcodecsys", "syscon";
+ reg = <0 0x17000000 0 0x10000>;
+ power-domains = <&scpsys MT6765_POWER_DOMAIN_VCODEC>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/nxp/lpc32xx.yaml b/Documentation/devicetree/bindings/arm/nxp/lpc32xx.yaml
index 07f39d3eee7e..f7f024910e71 100644
--- a/Documentation/devicetree/bindings/arm/nxp/lpc32xx.yaml
+++ b/Documentation/devicetree/bindings/arm/nxp/lpc32xx.yaml
@@ -17,9 +17,8 @@ properties:
- nxp,lpc3230
- nxp,lpc3240
- items:
- - enum:
- - ea,ea3250
- - phytec,phy3250
- - const: nxp,lpc3250
-
+ - enum:
+ - ea,ea3250
+ - phytec,phy3250
+ - const: nxp,lpc3250
...
diff --git a/Documentation/devicetree/bindings/arm/psci.yaml b/Documentation/devicetree/bindings/arm/psci.yaml
index 9247b58c26fc..8b77cf83a095 100644
--- a/Documentation/devicetree/bindings/arm/psci.yaml
+++ b/Documentation/devicetree/bindings/arm/psci.yaml
@@ -69,13 +69,11 @@ properties:
method:
description: The method of calling the PSCI firmware.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/string-array
- - enum:
- # SMC #0, with the register assignments specified in this binding.
- - smc
- # HVC #0, with the register assignments specified in this binding.
- - hvc
+ $ref: /schemas/types.yaml#/definitions/string-array
+ enum:
+ - smc
+ # HVC #0, with the register assignments specified in this binding.
+ - hvc
cpu_suspend:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -107,8 +105,8 @@ properties:
patternProperties:
"^power-domain-":
- allOf:
- - $ref: "../power/power-domain.yaml#"
+ $ref: "../power/power-domain.yaml#"
+
type: object
description: |
ARM systems can have multiple cores, sometimes in an hierarchical
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index 64ddae3bd39f..6031aee0f5a8 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -37,6 +37,8 @@ description: |
msm8994
msm8996
sc7180
+ sdm630
+ sdm660
sdm845
The 'board' element must be one of the following strings:
@@ -155,6 +157,11 @@ properties:
- items:
- enum:
+ - xiaomi,lavender
+ - const: qcom,sdm660
+
+ - items:
+ - enum:
- qcom,ipq6018-cp01-c1
- const: qcom,ipq6018
diff --git a/Documentation/devicetree/bindings/arm/realtek.yaml b/Documentation/devicetree/bindings/arm/realtek.yaml
index ab59de17152d..845f9c76d6f7 100644
--- a/Documentation/devicetree/bindings/arm/realtek.yaml
+++ b/Documentation/devicetree/bindings/arm/realtek.yaml
@@ -14,6 +14,13 @@ properties:
const: '/'
compatible:
oneOf:
+ # RTD1195 SoC based boards
+ - items:
+ - enum:
+ - mele,x1000 # MeLE X1000
+ - realtek,horseradish # Realtek Horseradish EVB
+ - const: realtek,rtd1195
+
# RTD1293 SoC based boards
- items:
- enum:
@@ -25,6 +32,7 @@ properties:
- enum:
- mele,v9 # MeLE V9
- probox2,ava # ProBox2 AVA
+ - xnano,x5 # Xnano X5
- zidoo,x9s # Zidoo X9S
- const: realtek,rtd1295
@@ -33,4 +41,17 @@ properties:
- enum:
- synology,ds418 # Synology DiskStation DS418
- const: realtek,rtd1296
+
+ # RTD1395 SoC based boards
+ - items:
+ - enum:
+ - bananapi,bpi-m4 # Banana Pi BPI-M4
+ - realtek,lion-skin # Realtek Lion Skin EVB
+ - const: realtek,rtd1395
+
+ # RTD1619 SoC based boards
+ - items:
+ - enum:
+ - realtek,mjolnir # Realtek Mjolnir EVB
+ - const: realtek,rtd1619
...
diff --git a/Documentation/devicetree/bindings/arm/renesas,prr.yaml b/Documentation/devicetree/bindings/arm/renesas,prr.yaml
index dd087643a9f8..1f80767da38b 100644
--- a/Documentation/devicetree/bindings/arm/renesas,prr.yaml
+++ b/Documentation/devicetree/bindings/arm/renesas,prr.yaml
@@ -33,5 +33,5 @@ examples:
- |
prr: chipid@ff000044 {
compatible = "renesas,prr";
- reg = <0 0xff000044 0 4>;
+ reg = <0xff000044 4>;
};
diff --git a/Documentation/devicetree/bindings/arm/renesas.yaml b/Documentation/devicetree/bindings/arm/renesas.yaml
index 611094d9186b..b7d2e921150a 100644
--- a/Documentation/devicetree/bindings/arm/renesas.yaml
+++ b/Documentation/devicetree/bindings/arm/renesas.yaml
@@ -54,6 +54,16 @@ properties:
- description: RZ/G1H (R8A77420)
items:
+ - enum:
+ # iWave Systems RZ/G1H Qseven System On Module (iW-RainboW-G21M-Qseven)
+ - iwave,g21m
+ - const: renesas,r8a7742
+
+ - items:
+ - enum:
+ # iWave Systems RZ/G1H Qseven Development Platform (iW-RainboW-G21D-Qseven)
+ - iwave,g21d
+ - const: iwave,g21m
- const: renesas,r8a7742
- description: RZ/G1M (R8A77430)
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index 715586dea9bb..d4a4045092df 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -358,6 +358,11 @@ properties:
- const: haoyu,marsboard-rk3066
- const: rockchip,rk3066a
+ - description: Hardkernel Odroid Go Advance
+ items:
+ - const: hardkernel,rk3326-odroid-go2
+ - const: rockchip,rk3326
+
- description: Hugsun X99 TV Box
items:
- const: hugsun,x99
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml b/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml
index 0425d333b50d..f99c0c6df21b 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml
+++ b/Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml
@@ -22,9 +22,8 @@ properties:
Adaptive Supply Voltage bin selection. This can be used
to determine the ASV bin of an SoC if respective information
is missing in the CHIPID registers or in the OTP memory.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1, 2, 3 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
required:
- compatible
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml
index 63acd57c4799..eb92f9eefaba 100644
--- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.yaml
@@ -52,6 +52,7 @@ properties:
items:
- enum:
- insignal,origen # Insignal Origen
+ - samsung,i9100 # Samsung Galaxy S2 (GT-I9100)
- samsung,smdkv310 # Samsung SMDKV310 eval
- samsung,trats # Samsung Tizen Reference
- samsung,universal_c210 # Samsung C210
diff --git a/Documentation/devicetree/bindings/arm/socionext/uniphier.yaml b/Documentation/devicetree/bindings/arm/socionext/uniphier.yaml
index 65ad6d8a3c99..6caf1f9be390 100644
--- a/Documentation/devicetree/bindings/arm/socionext/uniphier.yaml
+++ b/Documentation/devicetree/bindings/arm/socionext/uniphier.yaml
@@ -17,45 +17,46 @@ properties:
- description: LD4 SoC boards
items:
- enum:
- - socionext,uniphier-ld4-ref
+ - socionext,uniphier-ld4-ref
- const: socionext,uniphier-ld4
- description: Pro4 SoC boards
items:
- enum:
- - socionext,uniphier-pro4-ace
- - socionext,uniphier-pro4-ref
- - socionext,uniphier-pro4-sanji
+ - socionext,uniphier-pro4-ace
+ - socionext,uniphier-pro4-ref
+ - socionext,uniphier-pro4-sanji
- const: socionext,uniphier-pro4
- description: sLD8 SoC boards
items:
- enum:
- - socionext,uniphier-sld8-ref
+ - socionext,uniphier-sld8-ref
- const: socionext,uniphier-sld8
- description: PXs2 SoC boards
items:
- enum:
- - socionext,uniphier-pxs2-gentil
- - socionext,uniphier-pxs2-vodka
+ - socionext,uniphier-pxs2-gentil
+ - socionext,uniphier-pxs2-vodka
- const: socionext,uniphier-pxs2
- description: LD6b SoC boards
items:
- enum:
- - socionext,uniphier-ld6b-ref
+ - socionext,uniphier-ld6b-ref
- const: socionext,uniphier-ld6b
- description: LD11 SoC boards
items:
- enum:
- - socionext,uniphier-ld11-global
- - socionext,uniphier-ld11-ref
+ - socionext,uniphier-ld11-global
+ - socionext,uniphier-ld11-ref
- const: socionext,uniphier-ld11
- description: LD20 SoC boards
items:
- enum:
- - socionext,uniphier-ld20-global
- - socionext,uniphier-ld20-ref
+ - socionext,uniphier-ld20-akebi96
+ - socionext,uniphier-ld20-global
+ - socionext,uniphier-ld20-ref
- const: socionext,uniphier-ld20
- description: PXs3 SoC boards
items:
- enum:
- - socionext,uniphier-pxs3-ref
+ - socionext,uniphier-pxs3-ref
- const: socionext,uniphier-pxs3
diff --git a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
index 55f7938c4826..9f276bc9efa0 100644
--- a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
@@ -20,7 +20,7 @@ description: |
[2]: https://wiki.st.com/stm32mpu/wiki/STM32MP15_RAM_mapping
allOf:
- - $ref: /schemas/simple-bus.yaml#
+ - $ref: /schemas/simple-bus.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml b/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
index baff80197d5a..cf5db5e273f3 100644
--- a/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
@@ -14,9 +14,9 @@ properties:
compatible:
oneOf:
- items:
- - enum:
- - st,stm32mp157-syscfg
- - const: syscon
+ - enum:
+ - st,stm32mp157-syscfg
+ - const: syscon
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
index 1fcf306bd2d1..790e6dd48e34 100644
--- a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
@@ -38,6 +38,9 @@ properties:
- items:
- enum:
- arrow,stm32mp157a-avenger96 # Avenger96
+ - lxa,stm32mp157c-mc1
+ - shiratech,stm32mp157a-iot-box # IoT Box
+ - shiratech,stm32mp157a-stinger96 # Stinger96
- st,stm32mp157c-ed1
- st,stm32mp157a-dk1
- st,stm32mp157c-dk2
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index abf2d97fb7ae..87817ff0cd35 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -561,6 +561,11 @@ properties:
- const: olimex,a20-olinuxino-lime
- const: allwinner,sun7i-a20
+ - description: Olimex A20-OlinuXino LIME (with eMMC)
+ items:
+ - const: olimex,a20-olinuxino-lime-emmc
+ - const: allwinner,sun7i-a20
+
- description: Olimex A20-OlinuXino LIME2
items:
- const: olimex,a20-olinuxino-lime2
diff --git a/Documentation/devicetree/bindings/arm/syna.txt b/Documentation/devicetree/bindings/arm/syna.txt
index 2face46a5f64..d8b48f2edf1b 100644
--- a/Documentation/devicetree/bindings/arm/syna.txt
+++ b/Documentation/devicetree/bindings/arm/syna.txt
@@ -13,7 +13,7 @@ considered "unstable". Any Marvell Berlin device tree binding may change at any
time. Be sure to use a device tree binary and a kernel image generated from the
same source tree.
-Please refer to Documentation/devicetree/bindings/ABI.txt for a definition of a
+Please refer to Documentation/devicetree/bindings/ABI.rst for a definition of a
stable binding/ABI.
---------------------------------------------------------------
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
index f17bb353f65e..81534d04094b 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
@@ -323,7 +323,7 @@ examples:
tegra_pmc: pmc@7000e400 {
compatible = "nvidia,tegra210-pmc";
- reg = <0x0 0x7000e400 0x0 0x400>;
+ reg = <0x7000e400 0x400>;
clocks = <&tegra_car TEGRA210_CLK_PCLK>, <&clk32k_in>;
clock-names = "pclk", "clk32k_in";
#clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/ata/faraday,ftide010.yaml b/Documentation/devicetree/bindings/ata/faraday,ftide010.yaml
index bfc6357476fd..6451928dd2ce 100644
--- a/Documentation/devicetree/bindings/ata/faraday,ftide010.yaml
+++ b/Documentation/devicetree/bindings/ata/faraday,ftide010.yaml
@@ -26,8 +26,8 @@ properties:
oneOf:
- const: faraday,ftide010
- items:
- - const: cortina,gemini-pata
- - const: faraday,ftide010
+ - const: cortina,gemini-pata
+ - const: faraday,ftide010
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml b/Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
index 7b69831060d8..d06096a7ba4b 100644
--- a/Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
+++ b/Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
@@ -17,6 +17,7 @@ properties:
- renesas,sata-r8a7779 # R-Car H1
- items:
- enum:
+ - renesas,sata-r8a7742 # RZ/G1H
- renesas,sata-r8a7790-es1 # R-Car H2 ES1
- renesas,sata-r8a7790 # R-Car H2 other than ES1
- renesas,sata-r8a7791 # R-Car M2-W
diff --git a/Documentation/devicetree/bindings/ata/sata_highbank.txt b/Documentation/devicetree/bindings/ata/sata_highbank.txt
deleted file mode 100644
index aa83407cb7a4..000000000000
--- a/Documentation/devicetree/bindings/ata/sata_highbank.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-* Calxeda AHCI SATA Controller
-
-SATA nodes are defined to describe on-chip Serial ATA controllers.
-The Calxeda SATA controller mostly conforms to the AHCI interface
-with some special extensions to add functionality.
-Each SATA controller should have its own node.
-
-Required properties:
-- compatible : compatible list, contains "calxeda,hb-ahci"
-- interrupts : <interrupt mapping for SATA IRQ>
-- reg : <registers mapping>
-
-Optional properties:
-- dma-coherent : Present if dma operations are coherent
-- calxeda,port-phys : phandle-combophy and lane assignment, which maps each
- SATA port to a combophy and a lane within that
- combophy
-- calxeda,sgpio-gpio: phandle-gpio bank, bit offset, and default on or off,
- which indicates that the driver supports SGPIO
- indicator lights using the indicated GPIOs
-- calxeda,led-order : a u32 array that map port numbers to offsets within the
- SGPIO bitstream.
-- calxeda,tx-atten : a u32 array that contains TX attenuation override
- codes, one per port. The upper 3 bytes are always
- 0 and thus ignored.
-- calxeda,pre-clocks : a u32 that indicates the number of additional clock
- cycles to transmit before sending an SGPIO pattern
-- calxeda,post-clocks: a u32 that indicates the number of additional clock
- cycles to transmit after sending an SGPIO pattern
-
-Example:
- sata@ffe08000 {
- compatible = "calxeda,hb-ahci";
- reg = <0xffe08000 0x1000>;
- interrupts = <115>;
- dma-coherent;
- calxeda,port-phys = <&combophy5 0 &combophy0 0 &combophy0 1
- &combophy0 2 &combophy0 3>;
- calxeda,sgpio-gpio =<&gpioh 5 1 &gpioh 6 1 &gpioh 7 1>;
- calxeda,led-order = <4 0 1 2 3>;
- calxeda,tx-atten = <0xff 22 0xff 0xff 23>;
- calxeda,pre-clocks = <10>;
- calxeda,post-clocks = <0>;
- };
diff --git a/Documentation/devicetree/bindings/ata/sata_highbank.yaml b/Documentation/devicetree/bindings/ata/sata_highbank.yaml
new file mode 100644
index 000000000000..b195457006cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/sata_highbank.yaml
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/sata_highbank.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Calxeda AHCI SATA Controller
+
+description: |
+ The Calxeda SATA controller mostly conforms to the AHCI interface
+ with some special extensions to add functionality, to map GPIOs for
+ activity LEDs and for mapping the ComboPHYs.
+
+maintainers:
+ - Andre Przywara <andre.przywara@arm.com>
+
+properties:
+ compatible:
+ const: calxeda,hb-ahci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ dma-coherent: true
+
+ calxeda,pre-clocks:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Indicates the number of additional clock cycles to transmit before
+ sending an SGPIO pattern.
+
+ calxeda,post-clocks:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Indicates the number of additional clock cycles to transmit after
+ sending an SGPIO pattern.
+
+ calxeda,led-order:
+ description: Maps port numbers to offsets within the SGPIO bitstream.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 1
+ maxItems: 8
+
+ calxeda,port-phys:
+ description: |
+ phandle-combophy and lane assignment, which maps each SATA port to a
+ combophy and a lane within that combophy
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/phandle-array
+ - minItems: 1
+ maxItems: 8
+
+ calxeda,tx-atten:
+ description: |
+ Contains TX attenuation override codes, one per port.
+ The upper 24 bits of each entry are always 0 and thus ignored.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 1
+ maxItems: 8
+
+ calxeda,sgpio-gpio:
+ description: |
+ phandle-gpio bank, bit offset, and default on or off, which indicates
+ that the driver supports SGPIO indicator lights using the indicated
+ GPIOs.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ sata@ffe08000 {
+ compatible = "calxeda,hb-ahci";
+ reg = <0xffe08000 0x1000>;
+ interrupts = <115>;
+ dma-coherent;
+ calxeda,port-phys = <&combophy5 0>, <&combophy0 0>, <&combophy0 1>,
+ <&combophy0 2>, <&combophy0 3>;
+ calxeda,sgpio-gpio =<&gpioh 5 1>, <&gpioh 6 1>, <&gpioh 7 1>;
+ calxeda,led-order = <4 0 1 2 3>;
+ calxeda,tx-atten = <0xff 22 0xff 0xff 23>;
+ calxeda,pre-clocks = <10>;
+ calxeda,post-clocks = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.txt b/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.txt
deleted file mode 100644
index 2aa24b889923..000000000000
--- a/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-DT bindings for the Hitachi HD44780 Character LCD Controller
-
-The Hitachi HD44780 Character LCD Controller is commonly used on character LCDs
-that can display one or more lines of text. It exposes an M6800 bus interface,
-which can be used in either 4-bit or 8-bit mode.
-
-Required properties:
- - compatible: Must contain "hit,hd44780",
- - data-gpios: Must contain an array of either 4 or 8 GPIO specifiers,
- referring to the GPIO pins connected to the data signal lines DB0-DB7
- (8-bit mode) or DB4-DB7 (4-bit mode) of the LCD Controller's bus interface,
- - enable-gpios: Must contain a GPIO specifier, referring to the GPIO pin
- connected to the "E" (Enable) signal line of the LCD Controller's bus
- interface,
- - rs-gpios: Must contain a GPIO specifier, referring to the GPIO pin
- connected to the "RS" (Register Select) signal line of the LCD Controller's
- bus interface,
- - display-height-chars: Height of the display, in character cells,
- - display-width-chars: Width of the display, in character cells.
-
-Optional properties:
- - rw-gpios: Must contain a GPIO specifier, referring to the GPIO pin
- connected to the "RW" (Read/Write) signal line of the LCD Controller's bus
- interface,
- - backlight-gpios: Must contain a GPIO specifier, referring to the GPIO pin
- used for enabling the LCD's backlight,
- - internal-buffer-width: Internal buffer width (default is 40 for displays
- with 1 or 2 lines, and display-width-chars for displays with more than 2
- lines).
-
-Example:
-
- auxdisplay {
- compatible = "hit,hd44780";
-
- data-gpios = <&hc595 0 GPIO_ACTIVE_HIGH>,
- <&hc595 1 GPIO_ACTIVE_HIGH>,
- <&hc595 2 GPIO_ACTIVE_HIGH>,
- <&hc595 3 GPIO_ACTIVE_HIGH>;
- enable-gpios = <&hc595 4 GPIO_ACTIVE_HIGH>;
- rs-gpios = <&hc595 5 GPIO_ACTIVE_HIGH>;
-
- display-height-chars = <2>;
- display-width-chars = <16>;
- };
diff --git a/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.yaml b/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.yaml
new file mode 100644
index 000000000000..9222b06e93a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/auxdisplay/hit,hd44780.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/auxdisplay/hit,hd44780.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Hitachi HD44780 Character LCD Controller
+
+maintainers:
+ - Geert Uytterhoeven <geert@linux-m68k.org>
+
+description:
+ The Hitachi HD44780 Character LCD Controller is commonly used on character
+ LCDs that can display one or more lines of text. It exposes an M6800 bus
+ interface, which can be used in either 4-bit or 8-bit mode.
+
+properties:
+ compatible:
+ const: hit,hd44780
+
+ data-gpios:
+ description:
+ GPIO pins connected to the data signal lines DB0-DB7 (8-bit mode) or
+ DB4-DB7 (4-bit mode) of the LCD Controller's bus interface.
+ oneOf:
+ - maxItems: 4
+ - maxItems: 8
+
+ enable-gpios:
+ description:
+ GPIO pin connected to the "E" (Enable) signal line of the LCD
+ Controller's bus interface.
+ maxItems: 1
+
+ rs-gpios:
+ description:
+ GPIO pin connected to the "RS" (Register Select) signal line of the LCD
+ Controller's bus interface.
+ maxItems: 1
+
+ rw-gpios:
+ description:
+ GPIO pin connected to the "RW" (Read/Write) signal line of the LCD
+ Controller's bus interface.
+ maxItems: 1
+
+ backlight-gpios:
+ description: GPIO pin used for enabling the LCD's backlight.
+ maxItems: 1
+
+ display-height-chars:
+ description: Height of the display, in character cells,
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 4
+
+ display-width-chars:
+ description: Width of the display, in character cells.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 64
+
+ internal-buffer-width:
+ description:
+ Internal buffer width (default is 40 for displays with 1 or 2 lines, and
+ display-width-chars for displays with more than 2 lines).
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 64
+
+required:
+ - compatible
+ - data-gpios
+ - enable-gpios
+ - rs-gpios
+ - display-height-chars
+ - display-width-chars
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ auxdisplay {
+ compatible = "hit,hd44780";
+
+ data-gpios = <&hc595 0 GPIO_ACTIVE_HIGH>,
+ <&hc595 1 GPIO_ACTIVE_HIGH>,
+ <&hc595 2 GPIO_ACTIVE_HIGH>,
+ <&hc595 3 GPIO_ACTIVE_HIGH>;
+ enable-gpios = <&hc595 4 GPIO_ACTIVE_HIGH>;
+ rs-gpios = <&hc595 5 GPIO_ACTIVE_HIGH>;
+
+ display-height-chars = <2>;
+ display-width-chars = <16>;
+ };
diff --git a/Documentation/devicetree/bindings/bus/allwinner,sun50i-a64-de2.yaml b/Documentation/devicetree/bindings/bus/allwinner,sun50i-a64-de2.yaml
index f0b3d30fbb76..0503651cd214 100644
--- a/Documentation/devicetree/bindings/bus/allwinner,sun50i-a64-de2.yaml
+++ b/Documentation/devicetree/bindings/bus/allwinner,sun50i-a64-de2.yaml
@@ -31,12 +31,11 @@ properties:
maxItems: 1
allwinner,sram:
- allOf:
- - $ref: /schemas/types.yaml#definitions/phandle-array
- - maxItems: 1
description:
The SRAM that needs to be claimed to access the display engine
bus.
+ $ref: /schemas/types.yaml#definitions/phandle-array
+ maxItems: 1
ranges: true
diff --git a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
index 80973619342d..32d33b983d66 100644
--- a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
+++ b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
@@ -21,8 +21,8 @@ properties:
oneOf:
- const: allwinner,sun8i-a23-rsb
- items:
- - const: allwinner,sun8i-a83t-rsb
- - const: allwinner,sun8i-a23-rsb
+ - const: allwinner,sun8i-a83t-rsb
+ - const: allwinner,sun8i-a23-rsb
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/bus/arm,integrator-ap-lm.yaml b/Documentation/devicetree/bindings/bus/arm,integrator-ap-lm.yaml
new file mode 100644
index 000000000000..47227427c1c0
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/arm,integrator-ap-lm.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bus/arm,integrator-ap-lm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Integrator/AP Logic Module extension bus
+
+maintainers:
+ - Linus Walleij <linusw@kernel.org>
+
+description: The Integrator/AP is a prototyping platform and as such has a
+ site for stacking up to four logic modules (LM) designed specifically for
+ use with this platform. A special system controller register can be read to
+ determine if a logic module is connected at index 0, 1, 2 or 3. The logic
+ module connector is described in this binding. The logic modules per se
+ then have their own specific per-module bindings and they will be described
+ as subnodes under this logic module extension bus.
+
+properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ compatible:
+ items:
+ - const: arm,integrator-ap-lm
+
+ ranges: true
+ dma-ranges: true
+
+patternProperties:
+ "^bus(@[0-9a-f]*)?$":
+ description: Nodes on the Logic Module bus represent logic modules
+ and are named with bus. The first module is at 0xc0000000, the second
+ at 0xd0000000 and so on until the top of the memory of the system at
+ 0xffffffff. All information about the memory used by the module is
+ in ranges and dma-ranges.
+ type: object
+
+ required:
+ - compatible
+
+required:
+ - compatible
+
+examples:
+ - |
+ bus@c0000000 {
+ compatible = "arm,integrator-ap-lm";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0xc0000000 0xc0000000 0x40000000>;
+ dma-ranges;
+
+ bus@c0000000 {
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xc0000000 0x10000000>;
+ /* The Logic Modules sees the Core Module 0 RAM @80000000 */
+ dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ serial@100000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x00100000 0x1000>;
+ interrupts-extended = <&impd1_vic 1>;
+ };
+
+ impd1_vic: interrupt-controller@3000000 {
+ compatible = "arm,pl192-vic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x03000000 0x1000>;
+ valid-mask = <0x00000bff>;
+ interrupts-extended = <&pic 9>;
+ };
+ };
+ };
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/bus/baikal,bt1-apb.yaml b/Documentation/devicetree/bindings/bus/baikal,bt1-apb.yaml
new file mode 100644
index 000000000000..d6a3b71ea835
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/baikal,bt1-apb.yaml
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bus/baikal,bt1-apb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Baikal-T1 APB-bus
+
+maintainers:
+ - Serge Semin <fancer.lancer@gmail.com>
+
+description: |
+ Baikal-T1 CPU or DMAC MMIO requests are handled by the AMBA 3 AXI Interconnect
+ which routes them to the AXI-APB bridge. This interface is a single master
+ multiple slaves bus in turn serializing IO accesses and routing them to the
+ addressed APB slave devices. In case of any APB protocol collisions, slave
+ device not responding on timeout an IRQ is raised with an erroneous address
+ reported to the APB terminator (APB Errors Handler Block).
+
+allOf:
+ - $ref: /schemas/simple-bus.yaml#
+
+properties:
+ compatible:
+ contains:
+ const: baikal,bt1-apb
+
+ reg:
+ items:
+ - description: APB EHB MMIO registers
+ - description: APB MMIO region with no any device mapped
+
+ reg-names:
+ items:
+ - const: ehb
+ - const: nodev
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: APB reference clock
+
+ clock-names:
+ items:
+ - const: pclk
+
+ resets:
+ items:
+ - description: APB domain reset line
+
+ reset-names:
+ items:
+ - const: prst
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/mips-gic.h>
+
+ bus@1f059000 {
+ compatible = "baikal,bt1-apb", "simple-bus";
+ reg = <0 0x1f059000 0 0x1000>,
+ <0 0x1d000000 0 0x2040000>;
+ reg-names = "ehb", "nodev";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges;
+
+ interrupts = <GIC_SHARED 16 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_sys 1>;
+ clock-names = "pclk";
+
+ resets = <&ccu_sys 1>;
+ reset-names = "prst";
+ };
+...
diff --git a/Documentation/devicetree/bindings/bus/baikal,bt1-axi.yaml b/Documentation/devicetree/bindings/bus/baikal,bt1-axi.yaml
new file mode 100644
index 000000000000..203bc0e5346b
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/baikal,bt1-axi.yaml
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bus/baikal,bt1-axi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Baikal-T1 AXI-bus
+
+maintainers:
+ - Serge Semin <fancer.lancer@gmail.com>
+
+description: |
+ AXI3-bus is the main communication bus of Baikal-T1 SoC connecting all
+ high-speed peripheral IP-cores with RAM controller and with MIPS P5600
+ cores. Traffic arbitration is done by means of DW AXI Interconnect (so
+ called AXI Main Interconnect) routing IO requests from one block to
+ another: from CPU to SoC peripherals and between some SoC peripherals
+ (mostly between peripheral devices and RAM, but also between DMA and
+ some peripherals). In case of any protocol error, device not responding
+ an IRQ is raised and a faulty situation is reported to the AXI EHB
+ (Errors Handler Block) embedded on top of the DW AXI Interconnect and
+ accessible by means of the Baikal-T1 System Controller.
+
+allOf:
+ - $ref: /schemas/simple-bus.yaml#
+
+properties:
+ compatible:
+ contains:
+ const: baikal,bt1-axi
+
+ reg:
+ minItems: 1
+ items:
+ - description: Synopsys DesignWare AXI Interconnect QoS registers
+ - description: AXI EHB MMIO system controller registers
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: qos
+ - const: ehb
+
+ '#interconnect-cells':
+ const: 1
+
+ syscon:
+ $ref: /schemas/types.yaml#definitions/phandle
+ description: Phandle to the Baikal-T1 System Controller DT node
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Main Interconnect uplink reference clock
+
+ clock-names:
+ items:
+ - const: aclk
+
+ resets:
+ items:
+ - description: Main Interconnect reset line
+
+ reset-names:
+ items:
+ - const: arst
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - syscon
+ - interrupts
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/mips-gic.h>
+
+ bus@1f05a000 {
+ compatible = "baikal,bt1-axi", "simple-bus";
+ reg = <0 0x1f05a000 0 0x1000>,
+ <0 0x1f04d110 0 0x8>;
+ reg-names = "qos", "ehb";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #interconnect-cells = <1>;
+
+ syscon = <&syscon>;
+
+ ranges;
+
+ interrupts = <GIC_SHARED 127 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&ccu_axi 0>;
+ clock-names = "aclk";
+
+ resets = <&ccu_axi 0>;
+ reset-names = "arst";
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-gates-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-gates-clk.yaml
index ed1b2126a81b..9a37a357cb4e 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-gates-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-gates-clk.yaml
@@ -52,12 +52,12 @@ properties:
- const: allwinner,sun4i-a10-dram-gates-clk
- items:
- - const: allwinner,sun5i-a13-dram-gates-clk
- - const: allwinner,sun4i-a10-gates-clk
+ - const: allwinner,sun5i-a13-dram-gates-clk
+ - const: allwinner,sun4i-a10-gates-clk
- items:
- - const: allwinner,sun8i-h3-apb0-gates-clk
- - const: allwinner,sun4i-a10-gates-clk
+ - const: allwinner,sun8i-h3-apb0-gates-clk
+ - const: allwinner,sun4i-a10-gates-clk
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml
new file mode 100644
index 000000000000..2821425ee445
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-div.yaml
@@ -0,0 +1,188 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/baikal,bt1-ccu-div.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Baikal-T1 Clock Control Unit Dividers
+
+maintainers:
+ - Serge Semin <fancer.lancer@gmail.com>
+
+description: |
+ Clocks Control Unit is the core of Baikal-T1 SoC System Controller
+ responsible for the chip subsystems clocking and resetting. The CCU is
+ connected with an external fixed rate oscillator, which signal is transformed
+ into clocks of various frequencies and then propagated to either individual
+ IP-blocks or to groups of blocks (clock domains). The transformation is done
+ by means of an embedded into CCU PLLs and gateable/non-gateable dividers. The
+ later ones are described in this binding. Each clock domain can be also
+ individually reset by using the domain clocks divider configuration
+ registers. Baikal-T1 CCU is logically divided into the next components:
+ 1) External oscillator (normally XTAL's 25 MHz crystal oscillator, but
+ in general can provide any frequency supported by the CCU PLLs).
+ 2) PLLs clocks generators (PLLs).
+ 3) AXI-bus clock dividers (AXI) - described in this binding file.
+ 4) System devices reference clock dividers (SYS) - described in this binding
+ file.
+ which are connected with each other as shown on the next figure:
+
+ +---------------+
+ | Baikal-T1 CCU |
+ | +----+------|- MIPS P5600 cores
+ | +-|PLLs|------|- DDR controller
+ | | +----+ |
+ +----+ | | | | |
+ |XTAL|--|-+ | | +---+-|
+ +----+ | | | +-|AXI|-|- AXI-bus
+ | | | +---+-|
+ | | | |
+ | | +----+---+-|- APB-bus
+ | +-------|SYS|-|- Low-speed Devices
+ | +---+-|- High-speed Devices
+ +---------------+
+
+ Each sub-block is represented as a separate DT node and has an individual
+ driver to be bound with.
+
+ In order to create signals of wide range frequencies the external oscillator
+ output is primarily connected to a set of CCU PLLs. Some of PLLs CLKOUT are
+ then passed over CCU dividers to create signals required for the target clock
+ domain (like AXI-bus or System Device consumers). The dividers have the
+ following structure:
+
+ +--------------+
+ CLKIN --|->+----+ 1|\ |
+ SETCLK--|--|/DIV|->| | |
+ CLKDIV--|--| | | |-|->CLKLOUT
+ LOCK----|--+----+ | | |
+ | |/ |
+ | | |
+ EN------|-----------+ |
+ RST-----|--------------|->RSTOUT
+ +--------------+
+
+ where CLKIN is the reference clock coming either from CCU PLLs or from an
+ external clock oscillator, SETCLK - a command to update the output clock in
+ accordance with a set divider, CLKDIV - clocks divider, LOCK - a signal of
+ the output clock stabilization, EN - enable/disable the divider block,
+ RST/RSTOUT - reset clocks domain signal. Depending on the consumer IP-core
+ peculiarities the dividers may lack of some functionality depicted on the
+ figure above (like EN, CLKDIV/LOCK/SETCLK). In this case the corresponding
+ clock provider just doesn't expose either switching functions, or the rate
+ configuration, or both of them.
+
+ The clock dividers, which output clock is then consumed by the SoC individual
+ devices, are united into a single clocks provider called System Devices CCU.
+ Similarly the dividers with output clocks utilized as AXI-bus reference clocks
+ are called AXI-bus CCU. Both of them use the common clock bindings with no
+ custom properties. The list of exported clocks and reset signals can be found
+ in the files: 'include/dt-bindings/clock/bt1-ccu.h' and
+ 'include/dt-bindings/reset/bt1-ccu.h'. Since System Devices and AXI-bus CCU
+ are a part of the Baikal-T1 SoC System Controller their DT nodes are supposed
+ to be a children of later one.
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: baikal,bt1-ccu-axi
+
+then:
+ properties:
+ clocks:
+ items:
+ - description: CCU SATA PLL output clock
+ - description: CCU PCIe PLL output clock
+ - description: CCU Ethernet PLL output clock
+
+ clock-names:
+ items:
+ - const: sata_clk
+ - const: pcie_clk
+ - const: eth_clk
+
+else:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock
+ - description: CCU SATA PLL output clock
+ - description: CCU PCIe PLL output clock
+ - description: CCU Ethernet PLL output clock
+
+ clock-names:
+ items:
+ - const: ref_clk
+ - const: sata_clk
+ - const: pcie_clk
+ - const: eth_clk
+
+properties:
+ compatible:
+ enum:
+ - baikal,bt1-ccu-axi
+ - baikal,bt1-ccu-sys
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - "#clock-cells"
+ - clocks
+ - clock-names
+
+examples:
+ # AXI-bus Clock Control Unit node:
+ - |
+ #include <dt-bindings/clock/bt1-ccu.h>
+
+ clock-controller@1f04d030 {
+ compatible = "baikal,bt1-ccu-axi";
+ reg = <0x1f04d030 0x030>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+
+ clocks = <&ccu_pll CCU_SATA_PLL>,
+ <&ccu_pll CCU_PCIE_PLL>,
+ <&ccu_pll CCU_ETH_PLL>;
+ clock-names = "sata_clk", "pcie_clk", "eth_clk";
+ };
+ # System Devices Clock Control Unit node:
+ - |
+ #include <dt-bindings/clock/bt1-ccu.h>
+
+ clock-controller@1f04d060 {
+ compatible = "baikal,bt1-ccu-sys";
+ reg = <0x1f04d060 0x0a0>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+
+ clocks = <&clk25m>,
+ <&ccu_pll CCU_SATA_PLL>,
+ <&ccu_pll CCU_PCIE_PLL>,
+ <&ccu_pll CCU_ETH_PLL>;
+ clock-names = "ref_clk", "sata_clk", "pcie_clk",
+ "eth_clk";
+ };
+ # Required Clock Control Unit PLL node:
+ - |
+ ccu_pll: clock-controller@1f04d000 {
+ compatible = "baikal,bt1-ccu-pll";
+ reg = <0x1f04d000 0x028>;
+ #clock-cells = <1>;
+
+ clocks = <&clk25m>;
+ clock-names = "ref_clk";
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-pll.yaml b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-pll.yaml
new file mode 100644
index 000000000000..97131bfa6f87
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/baikal,bt1-ccu-pll.yaml
@@ -0,0 +1,131 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/baikal,bt1-ccu-pll.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Baikal-T1 Clock Control Unit PLL
+
+maintainers:
+ - Serge Semin <fancer.lancer@gmail.com>
+
+description: |
+ Clocks Control Unit is the core of Baikal-T1 SoC System Controller
+ responsible for the chip subsystems clocking and resetting. The CCU is
+ connected with an external fixed rate oscillator, which signal is transformed
+ into clocks of various frequencies and then propagated to either individual
+ IP-blocks or to groups of blocks (clock domains). The transformation is done
+ by means of PLLs and gateable/non-gateable dividers embedded into the CCU.
+ It's logically divided into the next components:
+ 1) External oscillator (normally XTAL's 25 MHz crystal oscillator, but
+ in general can provide any frequency supported by the CCU PLLs).
+ 2) PLLs clocks generators (PLLs) - described in this binding file.
+ 3) AXI-bus clock dividers (AXI).
+ 4) System devices reference clock dividers (SYS).
+ which are connected with each other as shown on the next figure:
+
+ +---------------+
+ | Baikal-T1 CCU |
+ | +----+------|- MIPS P5600 cores
+ | +-|PLLs|------|- DDR controller
+ | | +----+ |
+ +----+ | | | | |
+ |XTAL|--|-+ | | +---+-|
+ +----+ | | | +-|AXI|-|- AXI-bus
+ | | | +---+-|
+ | | | |
+ | | +----+---+-|- APB-bus
+ | +-------|SYS|-|- Low-speed Devices
+ | +---+-|- High-speed Devices
+ +---------------+
+
+ Each CCU sub-block is represented as a separate dts-node and has an
+ individual driver to be bound with.
+
+ In order to create signals of wide range frequencies the external oscillator
+ output is primarily connected to a set of CCU PLLs. There are five PLLs
+ to create a clock for the MIPS P5600 cores, the embedded DDR controller,
+ SATA, Ethernet and PCIe domains. The last three domains though named by the
+ biggest system interfaces in fact include nearly all of the rest SoC
+ peripherals. Each of the PLLs is based on True Circuits TSMC CLN28HPM core
+ with an interface wrapper (so called safe PLL' clocks switcher) to simplify
+ the PLL configuration procedure. The PLLs work as depicted on the next
+ diagram:
+
+ +--------------------------+
+ | |
+ +-->+---+ +---+ +---+ | +---+ 0|\
+ CLKF--->|/NF|--->|PFD|...|VCO|-+->|/OD|--->| |
+ +---+ +->+---+ +---+ /->+---+ | |--->CLKOUT
+ CLKOD---------C----------------+ 1| |
+ +--------C--------------------------->|/
+ | | ^
+ Rclk-+->+---+ | |
+ CLKR--->|/NR|-+ |
+ +---+ |
+ BYPASS--------------------------------------+
+ BWADJ--->
+
+ where Rclk is the reference clock coming from XTAL, NR - reference clock
+ divider, NF - PLL clock multiplier, OD - VCO output clock divider, CLKOUT -
+ output clock, BWADJ is the PLL bandwidth adjustment parameter. At this moment
+ the binding supports the PLL dividers configuration in accordance with a
+ requested rate, while bypassing and bandwidth adjustment settings can be
+ added in future if it gets to be necessary.
+
+ The PLLs CLKOUT is then either directly connected with the corresponding
+ clocks consumer (like P5600 cores or DDR controller) or passed over a CCU
+ divider to create a signal required for the clock domain.
+
+ The CCU PLL dts-node uses the common clock bindings with no custom
+ parameters. The list of exported clocks can be found in
+ 'include/dt-bindings/clock/bt1-ccu.h'. Since CCU PLL is a part of the
+ Baikal-T1 SoC System Controller its DT node is supposed to be a child of
+ later one.
+
+properties:
+ compatible:
+ const: baikal,bt1-ccu-pll
+
+ reg:
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ clocks:
+ description: External reference clock
+ maxItems: 1
+
+ clock-names:
+ const: ref_clk
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - "#clock-cells"
+ - clocks
+ - clock-names
+
+examples:
+ # Clock Control Unit PLL node:
+ - |
+ clock-controller@1f04d000 {
+ compatible = "baikal,bt1-ccu-pll";
+ reg = <0x1f04d000 0x028>;
+ #clock-cells = <1>;
+
+ clocks = <&clk25m>;
+ clock-names = "ref_clk";
+ };
+ # Required external oscillator:
+ - |
+ clk25m: clock-oscillator-25m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "clk25m";
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml b/Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
index 8559fe8f7efd..228c9313df53 100644
--- a/Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/bitmain,bm1880-clk.yaml
@@ -65,7 +65,7 @@ examples:
- |
uart0: serial@58018000 {
compatible = "snps,dw-apb-uart";
- reg = <0x0 0x58018000 0x0 0x2000>;
+ reg = <0x58018000 0x2000>;
clocks = <&clk 45>, <&clk 46>;
clock-names = "baudclk", "apb_pclk";
interrupts = <0 9 4>;
diff --git a/Documentation/devicetree/bindings/clock/calxeda.txt b/Documentation/devicetree/bindings/clock/calxeda.txt
deleted file mode 100644
index 0a6ac1bdcda1..000000000000
--- a/Documentation/devicetree/bindings/clock/calxeda.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Device Tree Clock bindings for Calxeda highbank platform
-
-This binding uses the common clock binding[1].
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-
-Required properties:
-- compatible : shall be one of the following:
- "calxeda,hb-pll-clock" - for a PLL clock
- "calxeda,hb-a9periph-clock" - The A9 peripheral clock divided from the
- A9 clock.
- "calxeda,hb-a9bus-clock" - The A9 bus clock divided from the A9 clock.
- "calxeda,hb-emmc-clock" - Divided clock for MMC/SD controller.
-- reg : shall be the control register offset from SYSREGs base for the clock.
-- clocks : shall be the input parent clock phandle for the clock. This is
- either an oscillator or a pll output.
-- #clock-cells : from common clock binding; shall be set to 0.
diff --git a/Documentation/devicetree/bindings/clock/calxeda.yaml b/Documentation/devicetree/bindings/clock/calxeda.yaml
new file mode 100644
index 000000000000..a34cbf3c9aaf
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/calxeda.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/calxeda.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Device Tree Clock bindings for Calxeda highbank platform
+
+description: |
+ This binding covers the Calxeda SoC internal peripheral and bus clocks
+ as used by peripherals. The clocks live inside the "system register"
+ region of the SoC, so are typically presented as children of an
+ "hb-sregs" node.
+
+maintainers:
+ - Andre Przywara <andre.przywara@arm.com>
+
+properties:
+ "#clock-cells":
+ const: 0
+
+ compatible:
+ enum:
+ - calxeda,hb-pll-clock
+ - calxeda,hb-a9periph-clock
+ - calxeda,hb-a9bus-clock
+ - calxeda,hb-emmc-clock
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - "#clock-cells"
+ - compatible
+ - clocks
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ sregs@3fffc000 {
+ compatible = "calxeda,hb-sregs";
+ reg = <0x3fffc000 0x1000>;
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ osc: oscillator {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <33333000>;
+ };
+
+ ddrpll: ddrpll@108 {
+ #clock-cells = <0>;
+ compatible = "calxeda,hb-pll-clock";
+ clocks = <&osc>;
+ reg = <0x108>;
+ };
+
+ a9pll: a9pll@100 {
+ #clock-cells = <0>;
+ compatible = "calxeda,hb-pll-clock";
+ clocks = <&osc>;
+ reg = <0x100>;
+ };
+
+ a9periphclk: a9periphclk@104 {
+ #clock-cells = <0>;
+ compatible = "calxeda,hb-a9periph-clock";
+ clocks = <&a9pll>;
+ reg = <0x104>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt
deleted file mode 100644
index 52a064c789ee..000000000000
--- a/Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-Cirrus Logic Lochnagar Audio Development Board
-
-Lochnagar is an evaluation and development board for Cirrus Logic
-Smart CODEC and Amp devices. It allows the connection of most Cirrus
-Logic devices on mini-cards, as well as allowing connection of
-various application processor systems to provide a full evaluation
-platform. Audio system topology, clocking and power can all be
-controlled through the Lochnagar, allowing the device under test
-to be used in a variety of possible use cases.
-
-This binding document describes the binding for the clock portion of
-the driver.
-
-Also see these documents for generic binding information:
- [1] Clock : ../clock/clock-bindings.txt
-
-And these for relevant defines:
- [2] include/dt-bindings/clock/lochnagar.h
-
-This binding must be part of the Lochnagar MFD binding:
- [3] ../mfd/cirrus,lochnagar.txt
-
-Required properties:
-
- - compatible : One of the following strings:
- "cirrus,lochnagar1-clk"
- "cirrus,lochnagar2-clk"
-
- - #clock-cells : Must be 1. The first cell indicates the clock
- number, see [2] for available clocks and [1].
-
-Optional properties:
-
- - clocks : Must contain an entry for each clock in clock-names.
- - clock-names : May contain entries for each of the following
- clocks:
- - ln-cdc-clkout : Output clock from CODEC card.
- - ln-dsp-clkout : Output clock from DSP card.
- - ln-gf-mclk1,ln-gf-mclk2,ln-gf-mclk3,ln-gf-mclk4 : Optional
- input audio clocks from host system.
- - ln-psia1-mclk, ln-psia2-mclk : Optional input audio clocks from
- external connector.
- - ln-spdif-mclk : Optional input audio clock from SPDIF.
- - ln-spdif-clkout : Optional input audio clock from SPDIF.
- - ln-adat-mclk : Optional input audio clock from ADAT.
- - ln-pmic-32k : On board fixed clock.
- - ln-clk-12m : On board fixed clock.
- - ln-clk-11m : On board fixed clock.
- - ln-clk-24m : On board fixed clock.
- - ln-clk-22m : On board fixed clock.
- - ln-clk-8m : On board fixed clock.
- - ln-usb-clk-24m : On board fixed clock.
- - ln-usb-clk-12m : On board fixed clock.
-
- - assigned-clocks : A list of Lochnagar clocks to be reparented, see
- [2] for available clocks.
- - assigned-clock-parents : Parents to be assigned to the clocks
- listed in "assigned-clocks".
-
-Optional nodes:
-
- - fixed-clock nodes may be registered for the following on board clocks:
- - ln-pmic-32k : 32768 Hz
- - ln-clk-12m : 12288000 Hz
- - ln-clk-11m : 11298600 Hz
- - ln-clk-24m : 24576000 Hz
- - ln-clk-22m : 22579200 Hz
- - ln-clk-8m : 8192000 Hz
- - ln-usb-clk-24m : 24576000 Hz
- - ln-usb-clk-12m : 12288000 Hz
-
-Example:
-
-lochnagar {
- lochnagar-clk {
- compatible = "cirrus,lochnagar2-clk";
-
- #clock-cells = <1>;
-
- clocks = <&clk-audio>, <&clk_pmic>;
- clock-names = "ln-gf-mclk2", "ln-pmic-32k";
-
- assigned-clocks = <&lochnagar-clk LOCHNAGAR_CDC_MCLK1>,
- <&lochnagar-clk LOCHNAGAR_CDC_MCLK2>;
- assigned-clock-parents = <&clk-audio>,
- <&clk-pmic>;
- };
-
- clk-pmic: clk-pmic {
- compatible = "fixed-clock";
- clock-cells = <0>;
- clock-frequency = <32768>;
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml b/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml
new file mode 100644
index 000000000000..59de125647ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/cirrus,lochnagar.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic Lochnagar Audio Development Board
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ Lochnagar is an evaluation and development board for Cirrus Logic
+ Smart CODEC and Amp devices. It allows the connection of most Cirrus
+ Logic devices on mini-cards, as well as allowing connection of various
+ application processor systems to provide a full evaluation platform.
+ Audio system topology, clocking and power can all be controlled through
+ the Lochnagar, allowing the device under test to be used in a variety of
+ possible use cases.
+
+ This binding document describes the binding for the clock portion of the
+ driver.
+
+ Also see these documents for generic binding information:
+ [1] Clock : ../clock/clock-bindings.txt
+
+ And these for relevant defines:
+ [2] include/dt-bindings/clock/lochnagar.h
+
+ This binding must be part of the Lochnagar MFD binding:
+ [3] ../mfd/cirrus,lochnagar.yaml
+
+properties:
+ compatible:
+ enum:
+ - cirrus,lochnagar1-clk
+ - cirrus,lochnagar2-clk
+
+ '#clock-cells':
+ description:
+ The first cell indicates the clock number, see [2] for available
+ clocks and [1].
+ const: 1
+
+ clock-names:
+ items:
+ enum:
+ - ln-cdc-clkout # Output clock from CODEC card.
+ - ln-dsp-clkout # Output clock from DSP card.
+ - ln-gf-mclk1 # Optional input clock from host system.
+ - ln-gf-mclk2 # Optional input clock from host system.
+ - ln-gf-mclk3 # Optional input clock from host system.
+ - ln-gf-mclk4 # Optional input clock from host system.
+ - ln-psia1-mclk # Optional input clock from external connector.
+ - ln-psia2-mclk # Optional input clock from external connector.
+ - ln-spdif-mclk # Optional input clock from SPDIF.
+ - ln-spdif-clkout # Optional input clock from SPDIF.
+ - ln-adat-mclk # Optional input clock from ADAT.
+ - ln-pmic-32k # On board fixed clock.
+ - ln-clk-12m # On board fixed clock.
+ - ln-clk-11m # On board fixed clock.
+ - ln-clk-24m # On board fixed clock.
+ - ln-clk-22m # On board fixed clock.
+ - ln-clk-8m # On board fixed clock.
+ - ln-usb-clk-24m # On board fixed clock.
+ - ln-usb-clk-12m # On board fixed clock.
+ minItems: 1
+ maxItems: 19
+
+ clocks: true
+ assigned-clocks: true
+ assigned-clock-parents: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - '#clock-cells'
diff --git a/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml b/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml
index b567f8092f8c..f415845b38dd 100644
--- a/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/fixed-factor-clock.yaml
@@ -24,9 +24,8 @@ properties:
clock-div:
description: Fixed divider
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 1
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
clock-mult:
description: Fixed multiplier
diff --git a/Documentation/devicetree/bindings/clock/fsl,plldig.yaml b/Documentation/devicetree/bindings/clock/fsl,plldig.yaml
index a203d5d498db..9ac716dfa602 100644
--- a/Documentation/devicetree/bindings/clock/fsl,plldig.yaml
+++ b/Documentation/devicetree/bindings/clock/fsl,plldig.yaml
@@ -28,15 +28,14 @@ properties:
const: 0
fsl,vco-hz:
- description: Optional for VCO frequency of the PLL in Hertz.
- The VCO frequency of this PLL cannot be changed during runtime
- only at startup. Therefore, the output frequencies are very
- limited and might not even closely match the requested frequency.
- To work around this restriction the user may specify its own
- desired VCO frequency for the PLL.
- minimum: 650000000
- maximum: 1300000000
- default: 1188000000
+ description: Optional for VCO frequency of the PLL in Hertz. The VCO frequency
+ of this PLL cannot be changed during runtime only at startup. Therefore,
+ the output frequencies are very limited and might not even closely match
+ the requested frequency. To work around this restriction the user may specify
+ its own desired VCO frequency for the PLL.
+ minimum: 650000000
+ maximum: 1300000000
+ default: 1188000000
required:
- compatible
@@ -51,7 +50,7 @@ examples:
- |
dpclk: clock-display@f1f0000 {
compatible = "fsl,ls1028a-plldig";
- reg = <0x0 0xf1f0000 0x0 0xffff>;
+ reg = <0xf1f0000 0xffff>;
#clock-cells = <0>;
clocks = <&osc_27m>;
};
diff --git a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt
index 05a245c9df08..bcff681a4bd0 100644
--- a/Documentation/devicetree/bindings/clock/idt,versaclock5.txt
+++ b/Documentation/devicetree/bindings/clock/idt,versaclock5.txt
@@ -12,6 +12,7 @@ Required properties:
"idt,5p49v5933"
"idt,5p49v5935"
"idt,5p49v6901"
+ "idt,5p49v6965"
- reg: i2c device address, shall be 0x68 or 0x6a.
- #clock-cells: from common clock binding; shall be set to 1.
- clocks: from common clock binding; list of parent clock handles,
diff --git a/Documentation/devicetree/bindings/clock/imx1-clock.txt b/Documentation/devicetree/bindings/clock/imx1-clock.txt
deleted file mode 100644
index 9823baf7acb6..000000000000
--- a/Documentation/devicetree/bindings/clock/imx1-clock.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-* Clock bindings for Freescale i.MX1 CPUs
-
-Required properties:
-- compatible: Should be "fsl,imx1-ccm".
-- reg: Address and length of the register set.
-- #clock-cells: Should be <1>.
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx1-clock.h
-for the full list of i.MX1 clock IDs.
-
-Examples:
- clks: ccm@21b000 {
- #clock-cells = <1>;
- compatible = "fsl,imx1-ccm";
- reg = <0x0021b000 0x1000>;
- };
-
- pwm: pwm@208000 {
- #pwm-cells = <2>;
- compatible = "fsl,imx1-pwm";
- reg = <0x00208000 0x1000>;
- interrupts = <34>;
- clocks = <&clks IMX1_CLK_DUMMY>, <&clks IMX1_CLK_PER1>;
- clock-names = "ipg", "per";
- };
diff --git a/Documentation/devicetree/bindings/clock/imx1-clock.yaml b/Documentation/devicetree/bindings/clock/imx1-clock.yaml
new file mode 100644
index 000000000000..f4833a29b79e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx1-clock.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx1-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX1 CPUs
+
+maintainers:
+ - Alexander Shiyan <shc_work@mail.ru>
+
+description: |
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx1-clock.h
+ for the full list of i.MX1 clock IDs.
+
+properties:
+ compatible:
+ const: fsl,imx1-ccm
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx1-clock.h>
+
+ clock-controller@21b000 {
+ #clock-cells = <1>;
+ compatible = "fsl,imx1-ccm";
+ reg = <0x0021b000 0x1000>;
+ };
+
+ pwm@208000 {
+ #pwm-cells = <2>;
+ compatible = "fsl,imx1-pwm";
+ reg = <0x00208000 0x1000>;
+ interrupts = <34>;
+ clocks = <&clks IMX1_CLK_DUMMY>, <&clks IMX1_CLK_PER1>;
+ clock-names = "ipg", "per";
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx21-clock.txt b/Documentation/devicetree/bindings/clock/imx21-clock.txt
deleted file mode 100644
index 806f63d628bd..000000000000
--- a/Documentation/devicetree/bindings/clock/imx21-clock.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Clock bindings for Freescale i.MX21
-
-Required properties:
-- compatible : Should be "fsl,imx21-ccm".
-- reg : Address and length of the register set.
-- interrupts : Should contain CCM interrupt.
-- #clock-cells: Should be <1>.
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx21-clock.h
-for the full list of i.MX21 clock IDs.
-
-Examples:
- clks: ccm@10027000{
- compatible = "fsl,imx21-ccm";
- reg = <0x10027000 0x800>;
- #clock-cells = <1>;
- };
-
- uart1: serial@1000a000 {
- compatible = "fsl,imx21-uart";
- reg = <0x1000a000 0x1000>;
- interrupts = <20>;
- clocks = <&clks IMX21_CLK_UART1_IPG_GATE>,
- <&clks IMX21_CLK_PER1>;
- clock-names = "ipg", "per";
- };
diff --git a/Documentation/devicetree/bindings/clock/imx21-clock.yaml b/Documentation/devicetree/bindings/clock/imx21-clock.yaml
new file mode 100644
index 000000000000..518ad9a4733c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx21-clock.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx21-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX21
+
+maintainers:
+ - Alexander Shiyan <shc_work@mail.ru>
+
+description: |
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx21-clock.h
+ for the full list of i.MX21 clock IDs.
+
+properties:
+ compatible:
+ const: fsl,imx21-ccm
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx21-clock.h>
+
+ clock-controller@10027000 {
+ compatible = "fsl,imx21-ccm";
+ reg = <0x10027000 0x800>;
+ #clock-cells = <1>;
+ };
+
+ serial@1000a000 {
+ compatible = "fsl,imx21-uart";
+ reg = <0x1000a000 0x1000>;
+ interrupts = <20>;
+ clocks = <&clks IMX21_CLK_UART1_IPG_GATE>,
+ <&clks IMX21_CLK_PER1>;
+ clock-names = "ipg", "per";
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx23-clock.txt b/Documentation/devicetree/bindings/clock/imx23-clock.txt
deleted file mode 100644
index 8385348d3bd9..000000000000
--- a/Documentation/devicetree/bindings/clock/imx23-clock.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-* Clock bindings for Freescale i.MX23
-
-Required properties:
-- compatible: Should be "fsl,imx23-clkctrl"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. The following is a full list of i.MX23
-clocks and IDs.
-
- Clock ID
- ------------------
- ref_xtal 0
- pll 1
- ref_cpu 2
- ref_emi 3
- ref_pix 4
- ref_io 5
- saif_sel 6
- lcdif_sel 7
- gpmi_sel 8
- ssp_sel 9
- emi_sel 10
- cpu 11
- etm_sel 12
- cpu_pll 13
- cpu_xtal 14
- hbus 15
- xbus 16
- lcdif_div 17
- ssp_div 18
- gpmi_div 19
- emi_pll 20
- emi_xtal 21
- etm_div 22
- saif_div 23
- clk32k_div 24
- rtc 25
- adc 26
- spdif_div 27
- clk32k 28
- dri 29
- pwm 30
- filt 31
- uart 32
- ssp 33
- gpmi 34
- spdif 35
- emi 36
- saif 37
- lcdif 38
- etm 39
- usb 40
- usb_phy 41
-
-Examples:
-
-clks: clkctrl@80040000 {
- compatible = "fsl,imx23-clkctrl";
- reg = <0x80040000 0x2000>;
- #clock-cells = <1>;
-};
-
-auart0: serial@8006c000 {
- compatible = "fsl,imx23-auart";
- reg = <0x8006c000 0x2000>;
- interrupts = <24 25 23>;
- clocks = <&clks 32>;
-};
diff --git a/Documentation/devicetree/bindings/clock/imx23-clock.yaml b/Documentation/devicetree/bindings/clock/imx23-clock.yaml
new file mode 100644
index 000000000000..66cb238a1040
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx23-clock.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx23-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX23
+
+maintainers:
+ - Shawn Guo <shawn.guo@linaro.org>
+
+description: |
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. The following is a full list of i.MX23
+ clocks and IDs.
+
+ Clock ID
+ ------------------
+ ref_xtal 0
+ pll 1
+ ref_cpu 2
+ ref_emi 3
+ ref_pix 4
+ ref_io 5
+ saif_sel 6
+ lcdif_sel 7
+ gpmi_sel 8
+ ssp_sel 9
+ emi_sel 10
+ cpu 11
+ etm_sel 12
+ cpu_pll 13
+ cpu_xtal 14
+ hbus 15
+ xbus 16
+ lcdif_div 17
+ ssp_div 18
+ gpmi_div 19
+ emi_pll 20
+ emi_xtal 21
+ etm_div 22
+ saif_div 23
+ clk32k_div 24
+ rtc 25
+ adc 26
+ spdif_div 27
+ clk32k 28
+ dri 29
+ pwm 30
+ filt 31
+ uart 32
+ ssp 33
+ gpmi 34
+ spdif 35
+ emi 36
+ saif 37
+ lcdif 38
+ etm 39
+ usb 40
+ usb_phy 41
+
+properties:
+ compatible:
+ const: fsl,imx23-clkctrl
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@80040000 {
+ compatible = "fsl,imx23-clkctrl";
+ reg = <0x80040000 0x2000>;
+ #clock-cells = <1>;
+ };
+
+ serial@8006c000 {
+ compatible = "fsl,imx23-auart";
+ reg = <0x8006c000 0x2000>;
+ interrupts = <24 25 23>;
+ clocks = <&clks 32>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx25-clock.txt b/Documentation/devicetree/bindings/clock/imx25-clock.txt
deleted file mode 100644
index f8135ea9ca4e..000000000000
--- a/Documentation/devicetree/bindings/clock/imx25-clock.txt
+++ /dev/null
@@ -1,160 +0,0 @@
-* Clock bindings for Freescale i.MX25
-
-Required properties:
-- compatible: Should be "fsl,imx25-ccm"
-- reg: Address and length of the register set
-- interrupts: Should contain CCM interrupt
-- #clock-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. The following is a full list of i.MX25
-clocks and IDs.
-
- Clock ID
- ---------------------------
- dummy 0
- osc 1
- mpll 2
- upll 3
- mpll_cpu_3_4 4
- cpu_sel 5
- cpu 6
- ahb 7
- usb_div 8
- ipg 9
- per0_sel 10
- per1_sel 11
- per2_sel 12
- per3_sel 13
- per4_sel 14
- per5_sel 15
- per6_sel 16
- per7_sel 17
- per8_sel 18
- per9_sel 19
- per10_sel 20
- per11_sel 21
- per12_sel 22
- per13_sel 23
- per14_sel 24
- per15_sel 25
- per0 26
- per1 27
- per2 28
- per3 29
- per4 30
- per5 31
- per6 32
- per7 33
- per8 34
- per9 35
- per10 36
- per11 37
- per12 38
- per13 39
- per14 40
- per15 41
- csi_ipg_per 42
- epit_ipg_per 43
- esai_ipg_per 44
- esdhc1_ipg_per 45
- esdhc2_ipg_per 46
- gpt_ipg_per 47
- i2c_ipg_per 48
- lcdc_ipg_per 49
- nfc_ipg_per 50
- owire_ipg_per 51
- pwm_ipg_per 52
- sim1_ipg_per 53
- sim2_ipg_per 54
- ssi1_ipg_per 55
- ssi2_ipg_per 56
- uart_ipg_per 57
- ata_ahb 58
- reserved 59
- csi_ahb 60
- emi_ahb 61
- esai_ahb 62
- esdhc1_ahb 63
- esdhc2_ahb 64
- fec_ahb 65
- lcdc_ahb 66
- rtic_ahb 67
- sdma_ahb 68
- slcdc_ahb 69
- usbotg_ahb 70
- reserved 71
- reserved 72
- reserved 73
- reserved 74
- can1_ipg 75
- can2_ipg 76
- csi_ipg 77
- cspi1_ipg 78
- cspi2_ipg 79
- cspi3_ipg 80
- dryice_ipg 81
- ect_ipg 82
- epit1_ipg 83
- epit2_ipg 84
- reserved 85
- esdhc1_ipg 86
- esdhc2_ipg 87
- fec_ipg 88
- reserved 89
- reserved 90
- reserved 91
- gpt1_ipg 92
- gpt2_ipg 93
- gpt3_ipg 94
- gpt4_ipg 95
- reserved 96
- reserved 97
- reserved 98
- iim_ipg 99
- reserved 100
- reserved 101
- kpp_ipg 102
- lcdc_ipg 103
- reserved 104
- pwm1_ipg 105
- pwm2_ipg 106
- pwm3_ipg 107
- pwm4_ipg 108
- rngb_ipg 109
- reserved 110
- scc_ipg 111
- sdma_ipg 112
- sim1_ipg 113
- sim2_ipg 114
- slcdc_ipg 115
- spba_ipg 116
- ssi1_ipg 117
- ssi2_ipg 118
- tsc_ipg 119
- uart1_ipg 120
- uart2_ipg 121
- uart3_ipg 122
- uart4_ipg 123
- uart5_ipg 124
- reserved 125
- wdt_ipg 126
- cko_div 127
- cko_sel 128
- cko 129
-
-Examples:
-
-clks: ccm@53f80000 {
- compatible = "fsl,imx25-ccm";
- reg = <0x53f80000 0x4000>;
- interrupts = <31>;
-};
-
-uart1: serial@43f90000 {
- compatible = "fsl,imx25-uart", "fsl,imx21-uart";
- reg = <0x43f90000 0x4000>;
- interrupts = <45>;
- clocks = <&clks 79>, <&clks 50>;
- clock-names = "ipg", "per";
-};
diff --git a/Documentation/devicetree/bindings/clock/imx25-clock.yaml b/Documentation/devicetree/bindings/clock/imx25-clock.yaml
new file mode 100644
index 000000000000..2a2b10778e72
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx25-clock.yaml
@@ -0,0 +1,186 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx25-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX25
+
+maintainers:
+ - Sascha Hauer <s.hauer@pengutronix.de>
+
+description: |
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. The following is a full list of i.MX25
+ clocks and IDs.
+
+ Clock ID
+ --------------------------
+ dummy 0
+ osc 1
+ mpll 2
+ upll 3
+ mpll_cpu_3_4 4
+ cpu_sel 5
+ cpu 6
+ ahb 7
+ usb_div 8
+ ipg 9
+ per0_sel 10
+ per1_sel 11
+ per2_sel 12
+ per3_sel 13
+ per4_sel 14
+ per5_sel 15
+ per6_sel 16
+ per7_sel 17
+ per8_sel 18
+ per9_sel 19
+ per10_sel 20
+ per11_sel 21
+ per12_sel 22
+ per13_sel 23
+ per14_sel 24
+ per15_sel 25
+ per0 26
+ per1 27
+ per2 28
+ per3 29
+ per4 30
+ per5 31
+ per6 32
+ per7 33
+ per8 34
+ per9 35
+ per10 36
+ per11 37
+ per12 38
+ per13 39
+ per14 40
+ per15 41
+ csi_ipg_per 42
+ epit_ipg_per 43
+ esai_ipg_per 44
+ esdhc1_ipg_per 45
+ esdhc2_ipg_per 46
+ gpt_ipg_per 47
+ i2c_ipg_per 48
+ lcdc_ipg_per 49
+ nfc_ipg_per 50
+ owire_ipg_per 51
+ pwm_ipg_per 52
+ sim1_ipg_per 53
+ sim2_ipg_per 54
+ ssi1_ipg_per 55
+ ssi2_ipg_per 56
+ uart_ipg_per 57
+ ata_ahb 58
+ reserved 59
+ csi_ahb 60
+ emi_ahb 61
+ esai_ahb 62
+ esdhc1_ahb 63
+ esdhc2_ahb 64
+ fec_ahb 65
+ lcdc_ahb 66
+ rtic_ahb 67
+ sdma_ahb 68
+ slcdc_ahb 69
+ usbotg_ahb 70
+ reserved 71
+ reserved 72
+ reserved 73
+ reserved 74
+ can1_ipg 75
+ can2_ipg 76
+ csi_ipg 77
+ cspi1_ipg 78
+ cspi2_ipg 79
+ cspi3_ipg 80
+ dryice_ipg 81
+ ect_ipg 82
+ epit1_ipg 83
+ epit2_ipg 84
+ reserved 85
+ esdhc1_ipg 86
+ esdhc2_ipg 87
+ fec_ipg 88
+ reserved 89
+ reserved 90
+ reserved 91
+ gpt1_ipg 92
+ gpt2_ipg 93
+ gpt3_ipg 94
+ gpt4_ipg 95
+ reserved 96
+ reserved 97
+ reserved 98
+ iim_ipg 99
+ reserved 100
+ reserved 101
+ kpp_ipg 102
+ lcdc_ipg 103
+ reserved 104
+ pwm1_ipg 105
+ pwm2_ipg 106
+ pwm3_ipg 107
+ pwm4_ipg 108
+ rngb_ipg 109
+ reserved 110
+ scc_ipg 111
+ sdma_ipg 112
+ sim1_ipg 113
+ sim2_ipg 114
+ slcdc_ipg 115
+ spba_ipg 116
+ ssi1_ipg 117
+ ssi2_ipg 118
+ tsc_ipg 119
+ uart1_ipg 120
+ uart2_ipg 121
+ uart3_ipg 122
+ uart4_ipg 123
+ uart5_ipg 124
+ reserved 125
+ wdt_ipg 126
+ cko_div 127
+ cko_sel 128
+ cko 129
+
+properties:
+ compatible:
+ const: fsl,imx25-ccm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@53f80000 {
+ compatible = "fsl,imx25-ccm";
+ reg = <0x53f80000 0x4000>;
+ interrupts = <31>;
+ #clock-cells = <1>;
+ };
+
+ serial@43f90000 {
+ compatible = "fsl,imx25-uart", "fsl,imx21-uart";
+ reg = <0x43f90000 0x4000>;
+ interrupts = <45>;
+ clocks = <&clks 79>, <&clks 50>;
+ clock-names = "ipg", "per";
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx27-clock.txt b/Documentation/devicetree/bindings/clock/imx27-clock.txt
deleted file mode 100644
index 4c95c048d3b2..000000000000
--- a/Documentation/devicetree/bindings/clock/imx27-clock.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Clock bindings for Freescale i.MX27
-
-Required properties:
-- compatible: Should be "fsl,imx27-ccm"
-- reg: Address and length of the register set
-- interrupts: Should contain CCM interrupt
-- #clock-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx27-clock.h
-for the full list of i.MX27 clock IDs.
-
-Examples:
- clks: ccm@10027000{
- compatible = "fsl,imx27-ccm";
- reg = <0x10027000 0x1000>;
- #clock-cells = <1>;
- };
-
- uart1: serial@1000a000 {
- compatible = "fsl,imx27-uart", "fsl,imx21-uart";
- reg = <0x1000a000 0x1000>;
- interrupts = <20>;
- clocks = <&clks IMX27_CLK_UART1_IPG_GATE>,
- <&clks IMX27_CLK_PER1_GATE>;
- clock-names = "ipg", "per";
- };
diff --git a/Documentation/devicetree/bindings/clock/imx27-clock.yaml b/Documentation/devicetree/bindings/clock/imx27-clock.yaml
new file mode 100644
index 000000000000..b5f3ed084ea0
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx27-clock.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx27-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX27
+
+maintainers:
+ - Fabio Estevam <fabio.estevam@freescale.com>
+
+description: |
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx27-clock.h
+ for the full list of i.MX27 clock IDs.
+
+properties:
+ compatible:
+ const: fsl,imx27-ccm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx27-clock.h>
+
+ clock-controller@10027000 {
+ compatible = "fsl,imx27-ccm";
+ reg = <0x10027000 0x1000>;
+ interrupts = <31>;
+ #clock-cells = <1>;
+ };
+
+ serial@1000a000 {
+ compatible = "fsl,imx27-uart", "fsl,imx21-uart";
+ reg = <0x1000a000 0x1000>;
+ interrupts = <20>;
+ clocks = <&clks IMX27_CLK_UART1_IPG_GATE>,
+ <&clks IMX27_CLK_PER1_GATE>;
+ clock-names = "ipg", "per";
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx28-clock.txt b/Documentation/devicetree/bindings/clock/imx28-clock.txt
deleted file mode 100644
index d84a37d2885f..000000000000
--- a/Documentation/devicetree/bindings/clock/imx28-clock.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-* Clock bindings for Freescale i.MX28
-
-Required properties:
-- compatible: Should be "fsl,imx28-clkctrl"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. The following is a full list of i.MX28
-clocks and IDs.
-
- Clock ID
- ------------------
- ref_xtal 0
- pll0 1
- pll1 2
- pll2 3
- ref_cpu 4
- ref_emi 5
- ref_io0 6
- ref_io1 7
- ref_pix 8
- ref_hsadc 9
- ref_gpmi 10
- saif0_sel 11
- saif1_sel 12
- gpmi_sel 13
- ssp0_sel 14
- ssp1_sel 15
- ssp2_sel 16
- ssp3_sel 17
- emi_sel 18
- etm_sel 19
- lcdif_sel 20
- cpu 21
- ptp_sel 22
- cpu_pll 23
- cpu_xtal 24
- hbus 25
- xbus 26
- ssp0_div 27
- ssp1_div 28
- ssp2_div 29
- ssp3_div 30
- gpmi_div 31
- emi_pll 32
- emi_xtal 33
- lcdif_div 34
- etm_div 35
- ptp 36
- saif0_div 37
- saif1_div 38
- clk32k_div 39
- rtc 40
- lradc 41
- spdif_div 42
- clk32k 43
- pwm 44
- uart 45
- ssp0 46
- ssp1 47
- ssp2 48
- ssp3 49
- gpmi 50
- spdif 51
- emi 52
- saif0 53
- saif1 54
- lcdif 55
- etm 56
- fec 57
- can0 58
- can1 59
- usb0 60
- usb1 61
- usb0_phy 62
- usb1_phy 63
- enet_out 64
-
-Examples:
-
-clks: clkctrl@80040000 {
- compatible = "fsl,imx28-clkctrl";
- reg = <0x80040000 0x2000>;
- #clock-cells = <1>;
-};
-
-auart0: serial@8006a000 {
- compatible = "fsl,imx28-auart", "fsl,imx23-auart";
- reg = <0x8006a000 0x2000>;
- interrupts = <112 70 71>;
- clocks = <&clks 45>;
-};
diff --git a/Documentation/devicetree/bindings/clock/imx28-clock.yaml b/Documentation/devicetree/bindings/clock/imx28-clock.yaml
new file mode 100644
index 000000000000..72328d5ca09a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx28-clock.yaml
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx28-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX28
+
+maintainers:
+ - Shawn Guo <shawn.guo@linaro.org>
+
+description: |
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. The following is a full list of i.MX28
+ clocks and IDs.
+
+ Clock ID
+ ------------------
+ ref_xtal 0
+ pll0 1
+ pll1 2
+ pll2 3
+ ref_cpu 4
+ ref_emi 5
+ ref_io0 6
+ ref_io1 7
+ ref_pix 8
+ ref_hsadc 9
+ ref_gpmi 10
+ saif0_sel 11
+ saif1_sel 12
+ gpmi_sel 13
+ ssp0_sel 14
+ ssp1_sel 15
+ ssp2_sel 16
+ ssp3_sel 17
+ emi_sel 18
+ etm_sel 19
+ lcdif_sel 20
+ cpu 21
+ ptp_sel 22
+ cpu_pll 23
+ cpu_xtal 24
+ hbus 25
+ xbus 26
+ ssp0_div 27
+ ssp1_div 28
+ ssp2_div 29
+ ssp3_div 30
+ gpmi_div 31
+ emi_pll 32
+ emi_xtal 33
+ lcdif_div 34
+ etm_div 35
+ ptp 36
+ saif0_div 37
+ saif1_div 38
+ clk32k_div 39
+ rtc 40
+ lradc 41
+ spdif_div 42
+ clk32k 43
+ pwm 44
+ uart 45
+ ssp0 46
+ ssp1 47
+ ssp2 48
+ ssp3 49
+ gpmi 50
+ spdif 51
+ emi 52
+ saif0 53
+ saif1 54
+ lcdif 55
+ etm 56
+ fec 57
+ can0 58
+ can1 59
+ usb0 60
+ usb1 61
+ usb0_phy 62
+ usb1_phy 63
+ enet_out 64
+
+properties:
+ compatible:
+ const: fsl,imx28-clkctrl
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@80040000 {
+ compatible = "fsl,imx28-clkctrl";
+ reg = <0x80040000 0x2000>;
+ #clock-cells = <1>;
+ };
+
+ serial@8006a000 {
+ compatible = "fsl,imx28-auart", "fsl,imx23-auart";
+ reg = <0x8006a000 0x2000>;
+ interrupts = <112 70 71>;
+ clocks = <&clks 45>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.txt b/Documentation/devicetree/bindings/clock/imx31-clock.txt
deleted file mode 100644
index 0a291090e562..000000000000
--- a/Documentation/devicetree/bindings/clock/imx31-clock.txt
+++ /dev/null
@@ -1,90 +0,0 @@
-* Clock bindings for Freescale i.MX31
-
-Required properties:
-- compatible: Should be "fsl,imx31-ccm"
-- reg: Address and length of the register set
-- interrupts: Should contain CCM interrupt
-- #clock-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. The following is a full list of i.MX31
-clocks and IDs.
-
- Clock ID
- -----------------------
- dummy 0
- ckih 1
- ckil 2
- mpll 3
- spll 4
- upll 5
- mcu_main 6
- hsp 7
- ahb 8
- nfc 9
- ipg 10
- per_div 11
- per 12
- csi_sel 13
- fir_sel 14
- csi_div 15
- usb_div_pre 16
- usb_div_post 17
- fir_div_pre 18
- fir_div_post 19
- sdhc1_gate 20
- sdhc2_gate 21
- gpt_gate 22
- epit1_gate 23
- epit2_gate 24
- iim_gate 25
- ata_gate 26
- sdma_gate 27
- cspi3_gate 28
- rng_gate 29
- uart1_gate 30
- uart2_gate 31
- ssi1_gate 32
- i2c1_gate 33
- i2c2_gate 34
- i2c3_gate 35
- hantro_gate 36
- mstick1_gate 37
- mstick2_gate 38
- csi_gate 39
- rtc_gate 40
- wdog_gate 41
- pwm_gate 42
- sim_gate 43
- ect_gate 44
- usb_gate 45
- kpp_gate 46
- ipu_gate 47
- uart3_gate 48
- uart4_gate 49
- uart5_gate 50
- owire_gate 51
- ssi2_gate 52
- cspi1_gate 53
- cspi2_gate 54
- gacc_gate 55
- emi_gate 56
- rtic_gate 57
- firi_gate 58
-
-Examples:
-
-clks: ccm@53f80000{
- compatible = "fsl,imx31-ccm";
- reg = <0x53f80000 0x4000>;
- interrupts = <31>, <53>;
- #clock-cells = <1>;
-};
-
-uart1: serial@43f90000 {
- compatible = "fsl,imx31-uart", "fsl,imx21-uart";
- reg = <0x43f90000 0x4000>;
- interrupts = <45>;
- clocks = <&clks 10>, <&clks 30>;
- clock-names = "ipg", "per";
-};
diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.yaml b/Documentation/devicetree/bindings/clock/imx31-clock.yaml
new file mode 100644
index 000000000000..1b6f75d3928a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx31-clock.yaml
@@ -0,0 +1,120 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx31-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX31
+
+maintainers:
+ - Fabio Estevam <fabio.estevam@freescale.com>
+
+description: |
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. The following is a full list of i.MX31
+ clocks and IDs.
+
+ Clock ID
+ -----------------------
+ dummy 0
+ ckih 1
+ ckil 2
+ mpll 3
+ spll 4
+ upll 5
+ mcu_main 6
+ hsp 7
+ ahb 8
+ nfc 9
+ ipg 10
+ per_div 11
+ per 12
+ csi_sel 13
+ fir_sel 14
+ csi_div 15
+ usb_div_pre 16
+ usb_div_post 17
+ fir_div_pre 18
+ fir_div_post 19
+ sdhc1_gate 20
+ sdhc2_gate 21
+ gpt_gate 22
+ epit1_gate 23
+ epit2_gate 24
+ iim_gate 25
+ ata_gate 26
+ sdma_gate 27
+ cspi3_gate 28
+ rng_gate 29
+ uart1_gate 30
+ uart2_gate 31
+ ssi1_gate 32
+ i2c1_gate 33
+ i2c2_gate 34
+ i2c3_gate 35
+ hantro_gate 36
+ mstick1_gate 37
+ mstick2_gate 38
+ csi_gate 39
+ rtc_gate 40
+ wdog_gate 41
+ pwm_gate 42
+ sim_gate 43
+ ect_gate 44
+ usb_gate 45
+ kpp_gate 46
+ ipu_gate 47
+ uart3_gate 48
+ uart4_gate 49
+ uart5_gate 50
+ owire_gate 51
+ ssi2_gate 52
+ cspi1_gate 53
+ cspi2_gate 54
+ gacc_gate 55
+ emi_gate 56
+ rtic_gate 57
+ firi_gate 58
+
+properties:
+ compatible:
+ const: fsl,imx31-ccm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: CCM provides 2 interrupt requests, request 1 is to generate
+ interrupt for DVFS when a frequency change is requested, request 2 is
+ to generate interrupt for DPTC when a voltage change is requested.
+ items:
+ - description: CCM DVFS interrupt request 1
+ - description: CCM DPTC interrupt request 2
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@53f80000 {
+ compatible = "fsl,imx31-ccm";
+ reg = <0x53f80000 0x4000>;
+ interrupts = <31>, <53>;
+ #clock-cells = <1>;
+ };
+
+ serial@43f90000 {
+ compatible = "fsl,imx31-uart", "fsl,imx21-uart";
+ reg = <0x43f90000 0x4000>;
+ interrupts = <45>;
+ clocks = <&clks 10>, <&clks 30>;
+ clock-names = "ipg", "per";
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx35-clock.txt b/Documentation/devicetree/bindings/clock/imx35-clock.txt
deleted file mode 100644
index f49783213c56..000000000000
--- a/Documentation/devicetree/bindings/clock/imx35-clock.txt
+++ /dev/null
@@ -1,114 +0,0 @@
-* Clock bindings for Freescale i.MX35
-
-Required properties:
-- compatible: Should be "fsl,imx35-ccm"
-- reg: Address and length of the register set
-- interrupts: Should contain CCM interrupt
-- #clock-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. The following is a full list of i.MX35
-clocks and IDs.
-
- Clock ID
- ---------------------------
- ckih 0
- mpll 1
- ppll 2
- mpll_075 3
- arm 4
- hsp 5
- hsp_div 6
- hsp_sel 7
- ahb 8
- ipg 9
- arm_per_div 10
- ahb_per_div 11
- ipg_per 12
- uart_sel 13
- uart_div 14
- esdhc_sel 15
- esdhc1_div 16
- esdhc2_div 17
- esdhc3_div 18
- spdif_sel 19
- spdif_div_pre 20
- spdif_div_post 21
- ssi_sel 22
- ssi1_div_pre 23
- ssi1_div_post 24
- ssi2_div_pre 25
- ssi2_div_post 26
- usb_sel 27
- usb_div 28
- nfc_div 29
- asrc_gate 30
- pata_gate 31
- audmux_gate 32
- can1_gate 33
- can2_gate 34
- cspi1_gate 35
- cspi2_gate 36
- ect_gate 37
- edio_gate 38
- emi_gate 39
- epit1_gate 40
- epit2_gate 41
- esai_gate 42
- esdhc1_gate 43
- esdhc2_gate 44
- esdhc3_gate 45
- fec_gate 46
- gpio1_gate 47
- gpio2_gate 48
- gpio3_gate 49
- gpt_gate 50
- i2c1_gate 51
- i2c2_gate 52
- i2c3_gate 53
- iomuxc_gate 54
- ipu_gate 55
- kpp_gate 56
- mlb_gate 57
- mshc_gate 58
- owire_gate 59
- pwm_gate 60
- rngc_gate 61
- rtc_gate 62
- rtic_gate 63
- scc_gate 64
- sdma_gate 65
- spba_gate 66
- spdif_gate 67
- ssi1_gate 68
- ssi2_gate 69
- uart1_gate 70
- uart2_gate 71
- uart3_gate 72
- usbotg_gate 73
- wdog_gate 74
- max_gate 75
- admux_gate 76
- csi_gate 77
- csi_div 78
- csi_sel 79
- iim_gate 80
- gpu2d_gate 81
- ckli_gate 82
-
-Examples:
-
-clks: ccm@53f80000 {
- compatible = "fsl,imx35-ccm";
- reg = <0x53f80000 0x4000>;
- interrupts = <31>;
- #clock-cells = <1>;
-};
-
-esdhc1: esdhc@53fb4000 {
- compatible = "fsl,imx35-esdhc";
- reg = <0x53fb4000 0x4000>;
- interrupts = <7>;
- clocks = <&clks 9>, <&clks 8>, <&clks 43>;
- clock-names = "ipg", "ahb", "per";
-};
diff --git a/Documentation/devicetree/bindings/clock/imx35-clock.yaml b/Documentation/devicetree/bindings/clock/imx35-clock.yaml
new file mode 100644
index 000000000000..bd871da6fc7c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx35-clock.yaml
@@ -0,0 +1,139 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx35-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX35
+
+maintainers:
+ - Steffen Trumtrar <s.trumtrar@pengutronix.de>
+
+description: |
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. The following is a full list of i.MX35
+ clocks and IDs.
+
+ Clock ID
+ ---------------------------
+ ckih 0
+ mpll 1
+ ppll 2
+ mpll_075 3
+ arm 4
+ hsp 5
+ hsp_div 6
+ hsp_sel 7
+ ahb 8
+ ipg 9
+ arm_per_div 10
+ ahb_per_div 11
+ ipg_per 12
+ uart_sel 13
+ uart_div 14
+ esdhc_sel 15
+ esdhc1_div 16
+ esdhc2_div 17
+ esdhc3_div 18
+ spdif_sel 19
+ spdif_div_pre 20
+ spdif_div_post 21
+ ssi_sel 22
+ ssi1_div_pre 23
+ ssi1_div_post 24
+ ssi2_div_pre 25
+ ssi2_div_post 26
+ usb_sel 27
+ usb_div 28
+ nfc_div 29
+ asrc_gate 30
+ pata_gate 31
+ audmux_gate 32
+ can1_gate 33
+ can2_gate 34
+ cspi1_gate 35
+ cspi2_gate 36
+ ect_gate 37
+ edio_gate 38
+ emi_gate 39
+ epit1_gate 40
+ epit2_gate 41
+ esai_gate 42
+ esdhc1_gate 43
+ esdhc2_gate 44
+ esdhc3_gate 45
+ fec_gate 46
+ gpio1_gate 47
+ gpio2_gate 48
+ gpio3_gate 49
+ gpt_gate 50
+ i2c1_gate 51
+ i2c2_gate 52
+ i2c3_gate 53
+ iomuxc_gate 54
+ ipu_gate 55
+ kpp_gate 56
+ mlb_gate 57
+ mshc_gate 58
+ owire_gate 59
+ pwm_gate 60
+ rngc_gate 61
+ rtc_gate 62
+ rtic_gate 63
+ scc_gate 64
+ sdma_gate 65
+ spba_gate 66
+ spdif_gate 67
+ ssi1_gate 68
+ ssi2_gate 69
+ uart1_gate 70
+ uart2_gate 71
+ uart3_gate 72
+ usbotg_gate 73
+ wdog_gate 74
+ max_gate 75
+ admux_gate 76
+ csi_gate 77
+ csi_div 78
+ csi_sel 79
+ iim_gate 80
+ gpu2d_gate 81
+ ckli_gate 82
+
+properties:
+ compatible:
+ const: fsl,imx35-ccm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@53f80000 {
+ compatible = "fsl,imx35-ccm";
+ reg = <0x53f80000 0x4000>;
+ interrupts = <31>;
+ #clock-cells = <1>;
+ };
+
+ esdhc@53fb4000 {
+ compatible = "fsl,imx35-esdhc";
+ reg = <0x53fb4000 0x4000>;
+ interrupts = <7>;
+ clocks = <&clks 9>, <&clks 8>, <&clks 43>;
+ clock-names = "ipg", "ahb", "per";
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx5-clock.txt b/Documentation/devicetree/bindings/clock/imx5-clock.txt
deleted file mode 100644
index a24ca9e582d2..000000000000
--- a/Documentation/devicetree/bindings/clock/imx5-clock.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Clock bindings for Freescale i.MX5
-
-Required properties:
-- compatible: Should be "fsl,<soc>-ccm" , where <soc> can be imx51 or imx53
-- reg: Address and length of the register set
-- interrupts: Should contain CCM interrupt
-- #clock-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx5-clock.h
-for the full list of i.MX5 clock IDs.
-
-Examples (for mx53):
-
-clks: ccm@53fd4000{
- compatible = "fsl,imx53-ccm";
- reg = <0x53fd4000 0x4000>;
- interrupts = <0 71 0x04 0 72 0x04>;
- #clock-cells = <1>;
-};
-
-can1: can@53fc8000 {
- compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
- reg = <0x53fc8000 0x4000>;
- interrupts = <82>;
- clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>, <&clks IMX5_CLK_CAN1_SERIAL_GATE>;
- clock-names = "ipg", "per";
-};
diff --git a/Documentation/devicetree/bindings/clock/imx5-clock.yaml b/Documentation/devicetree/bindings/clock/imx5-clock.yaml
new file mode 100644
index 000000000000..f5c2b3d7a910
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx5-clock.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx5-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX5
+
+maintainers:
+ - Fabio Estevam <fabio.estevam@freescale.com>
+
+description: |
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx5-clock.h
+ for the full list of i.MX5 clock IDs.
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx53-ccm
+ - fsl,imx51-ccm
+ - fsl,imx50-ccm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: CCM provides 2 interrupt requests, request 1 is to generate
+ interrupt for frequency or mux change, request 2 is to generate
+ interrupt for oscillator read or PLL lock.
+ items:
+ - description: CCM interrupt request 1
+ - description: CCM interrupt request 2
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx5-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ clock-controller@53fd4000{
+ compatible = "fsl,imx53-ccm";
+ reg = <0x53fd4000 0x4000>;
+ interrupts = <0 71 IRQ_TYPE_LEVEL_HIGH>,
+ <0 72 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ };
+
+ can@53fc8000 {
+ compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
+ reg = <0x53fc8000 0x4000>;
+ interrupts = <82>;
+ clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>, <&clks IMX5_CLK_CAN1_SERIAL_GATE>;
+ clock-names = "ipg", "per";
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx6q-clock.txt b/Documentation/devicetree/bindings/clock/imx6q-clock.txt
deleted file mode 100644
index 13d36d4c6991..000000000000
--- a/Documentation/devicetree/bindings/clock/imx6q-clock.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-* Clock bindings for Freescale i.MX6 Quad
-
-Required properties:
-- compatible: Should be "fsl,imx6q-ccm"
-- reg: Address and length of the register set
-- interrupts: Should contain CCM interrupt
-- #clock-cells: Should be <1>
-
-Optional properties:
-- fsl,pmic-stby-poweroff: Configure CCM to assert PMIC_STBY_REQ signal
- on power off.
- Use this property if the SoC should be powered off by external power
- management IC (PMIC) triggered via PMIC_STBY_REQ signal.
- Boards that are designed to initiate poweroff on PMIC_ON_REQ signal should
- be using "syscon-poweroff" driver instead.
-- clocks: list of clock specifiers, must contain an entry for each entry
- in clock-names
-- clock-names: valid names are "osc", "ckil", "ckih1", "anaclk1" and "anaclk2"
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx6qdl-clock.h
-for the full list of i.MX6 Quad and DualLite clock IDs.
-
-Examples:
-
-#include <dt-bindings/clock/imx6qdl-clock.h>
-
-clks: ccm@20c4000 {
- compatible = "fsl,imx6q-ccm";
- reg = <0x020c4000 0x4000>;
- interrupts = <0 87 0x04 0 88 0x04>;
- #clock-cells = <1>;
-};
-
-uart1: serial@2020000 {
- compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
- reg = <0x02020000 0x4000>;
- interrupts = <0 26 0x04>;
- clocks = <&clks IMX6QDL_CLK_UART_IPG>, <&clks IMX6QDL_CLK_UART_SERIAL>;
- clock-names = "ipg", "per";
-};
diff --git a/Documentation/devicetree/bindings/clock/imx6q-clock.yaml b/Documentation/devicetree/bindings/clock/imx6q-clock.yaml
new file mode 100644
index 000000000000..429e3b62b965
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx6q-clock.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx6q-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX6 Quad
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx6q-ccm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: CCM provides 2 interrupt requests, request 1 is to generate
+ interrupt for frequency or mux change, request 2 is to generate
+ interrupt for oscillator read or PLL lock.
+ items:
+ - description: CCM interrupt request 1
+ - description: CCM interrupt request 2
+ maxItems: 2
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ items:
+ - description: 24m osc
+ - description: 32k osc
+ - description: ckih1 clock input
+ - description: anaclk1 clock input
+ - description: anaclk2 clock input
+
+ clock-names:
+ items:
+ - const: osc
+ - const: ckil
+ - const: ckih1
+ - const: anaclk1
+ - const: anaclk2
+
+ fsl,pmic-stby-poweroff:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: |
+ Use this property if the SoC should be powered off by external power
+ management IC (PMIC) triggered via PMIC_STBY_REQ signal.
+ Boards that are designed to initiate poweroff on PMIC_ON_REQ signal should
+ be using "syscon-poweroff" driver instead.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#clock-cells'
+
+examples:
+ # Clock Control Module node:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ clock-controller@20c4000 {
+ compatible = "fsl,imx6q-ccm";
+ reg = <0x020c4000 0x4000>;
+ interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>,
+ <0 88 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx6sl-clock.txt b/Documentation/devicetree/bindings/clock/imx6sl-clock.txt
deleted file mode 100644
index 15e40bdf147d..000000000000
--- a/Documentation/devicetree/bindings/clock/imx6sl-clock.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-* Clock bindings for Freescale i.MX6 SoloLite
-
-Required properties:
-- compatible: Should be "fsl,imx6sl-ccm"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx6sl-clock.h
-for the full list of i.MX6 SoloLite clock IDs.
diff --git a/Documentation/devicetree/bindings/clock/imx6sl-clock.yaml b/Documentation/devicetree/bindings/clock/imx6sl-clock.yaml
new file mode 100644
index 000000000000..135568c46350
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx6sl-clock.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx6sl-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX6 SoloLite
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx6sl-ccm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: CCM provides 2 interrupt requests, request 1 is to generate
+ interrupt for frequency or mux change, request 2 is to generate
+ interrupt for oscillator read or PLL lock.
+ items:
+ - description: CCM interrupt request 1
+ - description: CCM interrupt request 2
+ maxItems: 2
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#clock-cells'
+
+examples:
+ # Clock Control Module node:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ clock-controller@20c4000 {
+ compatible = "fsl,imx6sl-ccm";
+ reg = <0x020c4000 0x4000>;
+ interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>,
+ <0 88 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx6sll-clock.txt b/Documentation/devicetree/bindings/clock/imx6sll-clock.txt
deleted file mode 100644
index fee849d5fdd1..000000000000
--- a/Documentation/devicetree/bindings/clock/imx6sll-clock.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-* Clock bindings for Freescale i.MX6 SLL
-
-Required properties:
-- compatible: Should be "fsl,imx6sll-ccm"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-- clocks: list of clock specifiers, must contain an entry for each required
- entry in clock-names
-- clock-names: should include entries "ckil", "osc", "ipp_di0" and "ipp_di1"
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx6sll-clock.h
-for the full list of i.MX6 SLL clock IDs.
-
-Examples:
-
-#include <dt-bindings/clock/imx6sll-clock.h>
-
-clks: clock-controller@20c4000 {
- compatible = "fsl,imx6sll-ccm";
- reg = <0x020c4000 0x4000>;
- interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
- #clock-cells = <1>;
- clocks = <&ckil>, <&osc>, <&ipp_di0>, <&ipp_di1>;
- clock-names = "ckil", "osc", "ipp_di0", "ipp_di1";
-};
-
-uart1: serial@2020000 {
- compatible = "fsl,imx6sl-uart", "fsl,imx6q-uart", "fsl,imx21-uart";
- reg = <0x02020000 0x4000>;
- interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6SLL_CLK_UART1_IPG>,
- <&clks IMX6SLL_CLK_UART1_SERIAL>;
- clock-names = "ipg", "per";
-};
diff --git a/Documentation/devicetree/bindings/clock/imx6sll-clock.yaml b/Documentation/devicetree/bindings/clock/imx6sll-clock.yaml
new file mode 100644
index 000000000000..fa55f1ce3e57
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx6sll-clock.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx6sll-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX6 SLL
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx6sll-ccm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: CCM provides 2 interrupt requests, request 1 is to generate
+ interrupt for frequency or mux change, request 2 is to generate
+ interrupt for oscillator read or PLL lock.
+ items:
+ - description: CCM interrupt request 1
+ - description: CCM interrupt request 2
+ maxItems: 2
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ items:
+ - description: 32k osc
+ - description: 24m osc
+ - description: ipp_di0 clock input
+ - description: ipp_di1 clock input
+
+ clock-names:
+ items:
+ - const: ckil
+ - const: osc
+ - const: ipp_di0
+ - const: ipp_di1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#clock-cells'
+ - clocks
+ - clock-names
+
+examples:
+ # Clock Control Module node:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ clock-controller@20c4000 {
+ compatible = "fsl,imx6sll-ccm";
+ reg = <0x020c4000 0x4000>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ clocks = <&ckil>, <&osc>, <&ipp_di0>, <&ipp_di1>;
+ clock-names = "ckil", "osc", "ipp_di0", "ipp_di1";
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx6sx-clock.txt b/Documentation/devicetree/bindings/clock/imx6sx-clock.txt
deleted file mode 100644
index 22362b9b7ba3..000000000000
--- a/Documentation/devicetree/bindings/clock/imx6sx-clock.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-* Clock bindings for Freescale i.MX6 SoloX
-
-Required properties:
-- compatible: Should be "fsl,imx6sx-ccm"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-- clocks: list of clock specifiers, must contain an entry for each required
- entry in clock-names
-- clock-names: should include entries "ckil", "osc", "ipp_di0" and "ipp_di1"
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx6sx-clock.h
-for the full list of i.MX6 SoloX clock IDs.
diff --git a/Documentation/devicetree/bindings/clock/imx6sx-clock.yaml b/Documentation/devicetree/bindings/clock/imx6sx-clock.yaml
new file mode 100644
index 000000000000..982d698e8c54
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx6sx-clock.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx6sx-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX6 SoloX
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx6sx-ccm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: CCM provides 2 interrupt requests, request 1 is to generate
+ interrupt for frequency or mux change, request 2 is to generate
+ interrupt for oscillator read or PLL lock.
+ items:
+ - description: CCM interrupt request 1
+ - description: CCM interrupt request 2
+ maxItems: 2
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ items:
+ - description: 32k osc
+ - description: 24m osc
+ - description: ipp_di0 clock input
+ - description: ipp_di1 clock input
+ - description: anaclk1 clock input
+ - description: anaclk2 clock input
+
+ clock-names:
+ items:
+ - const: ckil
+ - const: osc
+ - const: ipp_di0
+ - const: ipp_di1
+ - const: anaclk1
+ - const: anaclk2
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#clock-cells'
+ - clocks
+ - clock-names
+
+examples:
+ # Clock Control Module node:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ clock-controller@20c4000 {
+ compatible = "fsl,imx6sx-ccm";
+ reg = <0x020c4000 0x4000>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ clocks = <&ckil>, <&osc>, <&ipp_di0>, <&ipp_di1>, <&anaclk1>, <&anaclk2>;
+ clock-names = "ckil", "osc", "ipp_di0", "ipp_di1", "anaclk1", "anaclk2";
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx6ul-clock.txt b/Documentation/devicetree/bindings/clock/imx6ul-clock.txt
deleted file mode 100644
index 571d5039f663..000000000000
--- a/Documentation/devicetree/bindings/clock/imx6ul-clock.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-* Clock bindings for Freescale i.MX6 UltraLite
-
-Required properties:
-- compatible: Should be "fsl,imx6ul-ccm"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-- clocks: list of clock specifiers, must contain an entry for each required
- entry in clock-names
-- clock-names: should include entries "ckil", "osc", "ipp_di0" and "ipp_di1"
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx6ul-clock.h
-for the full list of i.MX6 UltraLite clock IDs.
diff --git a/Documentation/devicetree/bindings/clock/imx6ul-clock.yaml b/Documentation/devicetree/bindings/clock/imx6ul-clock.yaml
new file mode 100644
index 000000000000..3c779eea6394
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx6ul-clock.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx6ul-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX6 UltraLite
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+properties:
+ compatible:
+ const: fsl,imx6ul-ccm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: CCM provides 2 interrupt requests, request 1 is to generate
+ interrupt for frequency or mux change, request 2 is to generate
+ interrupt for oscillator read or PLL lock.
+ items:
+ - description: CCM interrupt request 1
+ - description: CCM interrupt request 2
+ maxItems: 2
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ items:
+ - description: 32k osc
+ - description: 24m osc
+ - description: ipp_di0 clock input
+ - description: ipp_di1 clock input
+
+ clock-names:
+ items:
+ - const: ckil
+ - const: osc
+ - const: ipp_di0
+ - const: ipp_di1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#clock-cells'
+ - clocks
+ - clock-names
+
+examples:
+ # Clock Control Module node:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ clock-controller@20c4000 {
+ compatible = "fsl,imx6ul-ccm";
+ reg = <0x020c4000 0x4000>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ clocks = <&ckil>, <&osc>, <&ipp_di0>, <&ipp_di1>;
+ clock-names = "ckil", "osc", "ipp_di0", "ipp_di1";
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx7d-clock.txt b/Documentation/devicetree/bindings/clock/imx7d-clock.txt
deleted file mode 100644
index 9d3026d81a68..000000000000
--- a/Documentation/devicetree/bindings/clock/imx7d-clock.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-* Clock bindings for Freescale i.MX7 Dual
-
-Required properties:
-- compatible: Should be "fsl,imx7d-ccm"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-- clocks: list of clock specifiers, must contain an entry for each required
- entry in clock-names
-- clock-names: should include entries "ckil", "osc"
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx7d-clock.h
-for the full list of i.MX7 Dual clock IDs.
diff --git a/Documentation/devicetree/bindings/clock/imx7d-clock.yaml b/Documentation/devicetree/bindings/clock/imx7d-clock.yaml
new file mode 100644
index 000000000000..cefb61db01a8
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx7d-clock.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx7d-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Clock bindings for Freescale i.MX7 Dual
+
+maintainers:
+ - Frank Li <Frank.Li@nxp.com>
+ - Anson Huang <Anson.Huang@nxp.com>
+
+description: |
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx7d-clock.h
+ for the full list of i.MX7 Dual clock IDs.
+
+properties:
+ compatible:
+ const: fsl,imx7d-ccm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: CCM interrupt request 1
+ - description: CCM interrupt request 2
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ items:
+ - description: 32k osc
+ - description: 24m osc
+
+ clock-names:
+ items:
+ - const: ckil
+ - const: osc
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ clock-controller@30380000 {
+ compatible = "fsl,imx7d-ccm";
+ reg = <0x30380000 0x10000>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ #clock-cells = <1>;
+ clocks = <&ckil>, <&osc>;
+ clock-names = "ckil", "osc";
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt b/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt
deleted file mode 100644
index 965cfa42e025..000000000000
--- a/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-* NXP i.MX8QXP LPCG (Low-Power Clock Gating) Clock bindings
-
-The Low-Power Clock Gate (LPCG) modules contain a local programming
-model to control the clock gates for the peripherals. An LPCG module
-is used to locally gate the clocks for the associated peripheral.
-
-Note:
-This level of clock gating is provided after the clocks are generated
-by the SCU resources and clock controls. Thus even if the clock is
-enabled by these control bits, it might still not be running based
-on the base resource.
-
-Required properties:
-- compatible: Should be one of:
- "fsl,imx8qxp-lpcg-adma",
- "fsl,imx8qxp-lpcg-conn",
- "fsl,imx8qxp-lpcg-dc",
- "fsl,imx8qxp-lpcg-dsp",
- "fsl,imx8qxp-lpcg-gpu",
- "fsl,imx8qxp-lpcg-hsio",
- "fsl,imx8qxp-lpcg-img",
- "fsl,imx8qxp-lpcg-lsio",
- "fsl,imx8qxp-lpcg-vpu"
-- reg: Address and length of the register set
-- #clock-cells: Should be <1>
-
-The clock consumer should specify the desired clock by having the clock
-ID in its "clocks" phandle cell.
-See the full list of clock IDs from:
-include/dt-bindings/clock/imx8qxp-clock.h
-
-Examples:
-
-#include <dt-bindings/clock/imx8qxp-clock.h>
-
-conn_lpcg: clock-controller@5b200000 {
- compatible = "fsl,imx8qxp-lpcg-conn";
- reg = <0x5b200000 0xb0000>;
- #clock-cells = <1>;
-};
-
-usdhc1: mmc@5b010000 {
- compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x5b010000 0x10000>;
- clocks = <&conn_lpcg IMX8QXP_CONN_LPCG_SDHC0_IPG_CLK>,
- <&conn_lpcg IMX8QXP_CONN_LPCG_SDHC0_PER_CLK>,
- <&conn_lpcg IMX8QXP_CONN_LPCG_SDHC0_HCLK>;
- clock-names = "ipg", "per", "ahb";
-};
diff --git a/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.yaml b/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.yaml
new file mode 100644
index 000000000000..33f3010f48c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/imx8qxp-lpcg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX8QXP LPCG (Low-Power Clock Gating) Clock bindings
+
+maintainers:
+ - Aisheng Dong <aisheng.dong@nxp.com>
+
+description: |
+ The Low-Power Clock Gate (LPCG) modules contain a local programming
+ model to control the clock gates for the peripherals. An LPCG module
+ is used to locally gate the clocks for the associated peripheral.
+
+ This level of clock gating is provided after the clocks are generated
+ by the SCU resources and clock controls. Thus even if the clock is
+ enabled by these control bits, it might still not be running based
+ on the base resource.
+
+ The clock consumer should specify the desired clock by having the clock
+ ID in its "clocks" phandle cell. See the full list of clock IDs from:
+ include/dt-bindings/clock/imx8-clock.h
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx8qxp-lpcg-adma
+ - fsl,imx8qxp-lpcg-conn
+ - fsl,imx8qxp-lpcg-dc
+ - fsl,imx8qxp-lpcg-dsp
+ - fsl,imx8qxp-lpcg-gpu
+ - fsl,imx8qxp-lpcg-hsio
+ - fsl,imx8qxp-lpcg-img
+ - fsl,imx8qxp-lpcg-lsio
+ - fsl,imx8qxp-lpcg-vpu
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8-clock.h>
+ #include <dt-bindings/firmware/imx/rsrc.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ clock-controller@5b200000 {
+ compatible = "fsl,imx8qxp-lpcg-conn";
+ reg = <0x5b200000 0xb0000>;
+ #clock-cells = <1>;
+ };
+
+ mmc@5b010000 {
+ compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
+ interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b010000 0x10000>;
+ clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC0_IPG_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_SDHC0_PER_CLK>,
+ <&conn_lpcg IMX_CONN_LPCG_SDHC0_HCLK>;
+ clock-names = "ipg", "per", "ahb";
+ power-domains = <&pd IMX_SC_R_SDHC_0>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/ingenic,cgu.txt b/Documentation/devicetree/bindings/clock/ingenic,cgu.txt
deleted file mode 100644
index 75598e655067..000000000000
--- a/Documentation/devicetree/bindings/clock/ingenic,cgu.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-Ingenic SoC CGU binding
-
-The CGU in an Ingenic SoC provides all the clocks generated on-chip. It
-typically includes a variety of PLLs, multiplexers, dividers & gates in order
-to provide many different clock signals derived from only 2 external source
-clocks.
-
-Required properties:
-- compatible : Should be one of:
- * ingenic,jz4740-cgu
- * ingenic,jz4725b-cgu
- * ingenic,jz4770-cgu
- * ingenic,jz4780-cgu
- * ingenic,x1000-cgu
-- reg : The address & length of the CGU registers.
-- clocks : List of phandle & clock specifiers for clocks external to the CGU.
- Two such external clocks should be specified - first the external crystal
- "ext" and second the RTC clock source "rtc".
-- clock-names : List of name strings for the external clocks.
-- #clock-cells: Should be 1.
- Clock consumers specify this argument to identify a clock. The valid values
- may be found in <dt-bindings/clock/<soctype>-cgu.h>.
-
-Example SoC include file:
-
-/ {
- cgu: jz4740-cgu {
- compatible = "ingenic,jz4740-cgu";
- reg = <0x10000000 0x100>;
- #clock-cells = <1>;
- };
-
- uart0: serial@10030000 {
- clocks = <&cgu JZ4740_CLK_UART0>;
- };
-};
-
-Example board file:
-
-/ {
- ext: clock@0 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <12000000>;
- };
-
- rtc: clock@1 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- };
-
- &cgu {
- clocks = <&ext> <&rtc>;
- clock-names: "ext", "rtc";
- };
-};
diff --git a/Documentation/devicetree/bindings/clock/ingenic,cgu.yaml b/Documentation/devicetree/bindings/clock/ingenic,cgu.yaml
new file mode 100644
index 000000000000..a952d5811823
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ingenic,cgu.yaml
@@ -0,0 +1,124 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/ingenic,cgu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs CGU devicetree bindings
+
+description: |
+ The CGU in an Ingenic SoC provides all the clocks generated on-chip. It
+ typically includes a variety of PLLs, multiplexers, dividers & gates in order
+ to provide many different clock signals derived from only 2 external source
+ clocks.
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ingenic,jz4740-cgu
+ - ingenic,jz4725b-cgu
+ - ingenic,jz4770-cgu
+ - ingenic,jz4780-cgu
+ - ingenic,x1000-cgu
+ - ingenic,x1830-cgu
+ required:
+ - compatible
+
+properties:
+ $nodename:
+ pattern: "^clock-controller@[0-9a-f]+$"
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ "#clock-cells":
+ const: 1
+
+ ranges: true
+
+ compatible:
+ items:
+ - enum:
+ - ingenic,jz4740-cgu
+ - ingenic,jz4725b-cgu
+ - ingenic,jz4770-cgu
+ - ingenic,jz4780-cgu
+ - ingenic,x1000-cgu
+ - ingenic,x1830-cgu
+ - const: simple-mfd
+ minItems: 1
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: External oscillator clock
+ - description: Internal 32 kHz RTC clock
+
+ clock-names:
+ items:
+ - const: ext
+ - enum:
+ - rtc
+ - osc32k # Different name, same clock
+
+ assigned-clocks:
+ minItems: 1
+ maxItems: 64
+
+ assigned-clock-parents:
+ minItems: 1
+ maxItems: 64
+
+ assigned-clock-rates:
+ minItems: 1
+ maxItems: 64
+
+required:
+ - "#clock-cells"
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+patternProperties:
+ "^usb-phy@[a-f0-9]+$":
+ allOf: [ $ref: "../usb/ingenic,jz4770-phy.yaml#" ]
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4770-cgu.h>
+ cgu: clock-controller@10000000 {
+ compatible = "ingenic,jz4770-cgu", "simple-mfd";
+ reg = <0x10000000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x10000000 0x100>;
+
+ clocks = <&ext>, <&osc32k>;
+ clock-names = "ext", "osc32k";
+
+ #clock-cells = <1>;
+
+ otg_phy: usb-phy@3c {
+ compatible = "ingenic,jz4770-phy";
+ reg = <0x3c 0x10>;
+
+ clocks = <&cgu JZ4770_CLK_OTG_PHY>;
+
+ vcc-supply = <&ldo5>;
+
+ #phy-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/clock/intel,agilex.yaml b/Documentation/devicetree/bindings/clock/intel,agilex.yaml
new file mode 100644
index 000000000000..cf5a9eb803e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/intel,agilex.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/intel,agilex.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel SoCFPGA Agilex platform clock controller binding
+
+maintainers:
+ - Dinh Nguyen <dinguyen@kernel.org>
+
+description:
+ The Intel Agilex Clock controller is an integrated clock controller, which
+ generates and supplies to all modules.
+
+properties:
+ compatible:
+ const: intel,agilex-clkmgr
+
+ '#clock-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ # Clock controller node:
+ - |
+ clkmgr: clock-controller@ffd10000 {
+ compatible = "intel,agilex-clkmgr";
+ reg = <0xffd10000 0x1000>;
+ clocks = <&osc1>;
+ #clock-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml b/Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml
new file mode 100644
index 000000000000..6dc1414bfb7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/intel,cgu-lgm.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/intel,cgu-lgm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Lightning Mountain SoC's Clock Controller(CGU) Binding
+
+maintainers:
+ - Rahul Tanwar <rahul.tanwar@linux.intel.com>
+
+description: |
+ Lightning Mountain(LGM) SoC's Clock Generation Unit(CGU) driver provides
+ all means to access the CGU hardware module in order to generate a series
+ of clocks for the whole system and individual peripherals.
+
+ Please refer to include/dt-bindings/clock/intel,lgm-clk.h header file, it
+ defines all available clocks as macros. These macros can be used in device
+ tree sources.
+
+properties:
+ compatible:
+ const: intel,cgu-lgm
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+examples:
+ - |
+ cgu: clock-controller@e0200000 {
+ compatible = "intel,cgu-lgm";
+ reg = <0xe0200000 0x33c>;
+ #clock-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2-audio-clock.yaml b/Documentation/devicetree/bindings/clock/marvell,mmp2-audio-clock.yaml
new file mode 100644
index 000000000000..dffa73402da9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,mmp2-audio-clock.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/marvell,mmp2-audio-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell MMP2 Audio Clock Controller
+
+maintainers:
+ - Lubomir Rintel <lkundrak@v3.sk>
+
+description: |
+ The audio clock controller generates and supplies the clocks to the audio
+ codec.
+
+ Each clock is assigned an identifier and client nodes use this identifier
+ to specify the clock which they consume.
+
+ All these identifiers could be found in
+ <dt-bindings/clock/marvell,mmp2-audio.h>.
+
+properties:
+ compatible:
+ enum:
+ - marvell,mmp2-audio-clock
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Audio subsystem clock
+ - description: The crystal oscillator clock
+ - description: First I2S clock
+ - description: Second I2S clock
+
+ clock-names:
+ items:
+ - const: audio
+ - const: vctcxo
+ - const: i2s0
+ - const: i2s1
+
+ '#clock-cells':
+ const: 1
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/marvell,mmp2-audio.h>
+ #include <dt-bindings/clock/marvell,mmp2.h>
+ #include <dt-bindings/power/marvell,mmp2.h>
+
+ clock-controller@d42a0c30 {
+ compatible = "marvell,mmp2-audio-clock";
+ reg = <0xd42a0c30 0x10>;
+ clock-names = "audio", "vctcxo", "i2s0", "i2s1";
+ clocks = <&soc_clocks MMP2_CLK_AUDIO>,
+ <&soc_clocks MMP2_CLK_VCTCXO>,
+ <&soc_clocks MMP2_CLK_I2S0>,
+ <&soc_clocks MMP2_CLK_I2S1>;
+ power-domains = <&soc_clocks MMP2_POWER_DOMAIN_AUDIO>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml b/Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml
index e2b6ac96bbcb..d68f0d196e7d 100644
--- a/Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/marvell,mmp2-clock.yaml
@@ -42,12 +42,16 @@ properties:
'#reset-cells':
const: 1
+ '#power-domain-cells':
+ const: 1
+
required:
- compatible
- reg
- reg-names
- '#clock-cells'
- '#reset-cells'
+ - '#power-domain-cells'
additionalProperties: false
@@ -61,4 +65,5 @@ examples:
reg-names = "mpmu", "apmu", "apbc";
#clock-cells = <1>;
#reset-cells = <1>;
+ #power-domain-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/clock/qcom,a53pll.txt b/Documentation/devicetree/bindings/clock/qcom,a53pll.txt
deleted file mode 100644
index e3fa8118eaee..000000000000
--- a/Documentation/devicetree/bindings/clock/qcom,a53pll.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Qualcomm MSM8916 A53 PLL Binding
---------------------------------
-The A53 PLL on MSM8916 platforms is the main CPU PLL used used for frequencies
-above 1GHz.
-
-Required properties :
-- compatible : Shall contain only one of the following:
-
- "qcom,msm8916-a53pll"
-
-- reg : shall contain base register location and length
-
-- #clock-cells : must be set to <0>
-
-Example:
-
- a53pll: clock@b016000 {
- compatible = "qcom,msm8916-a53pll";
- reg = <0xb016000 0x40>;
- #clock-cells = <0>;
- };
-
diff --git a/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml
new file mode 100644
index 000000000000..20d2638b4cd2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,a53pll.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,a53pll.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm A53 PLL Binding
+
+maintainers:
+ - Sivaprakash Murugesan <sivaprak@codeaurora.org>
+
+description:
+ The A53 PLL on few Qualcomm platforms is the main CPU PLL used used for
+ frequencies above 1GHz.
+
+properties:
+ compatible:
+ const: qcom,msm8916-a53pll
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ #Example 1 - A53 PLL found on MSM8916 devices
+ - |
+ a53pll: clock@b016000 {
+ compatible = "qcom,msm8916-a53pll";
+ reg = <0xb016000 0x40>;
+ #clock-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
index a345320e0e49..a404c8fbee67 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sc7180.yaml
@@ -65,7 +65,7 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
clock-controller@100000 {
compatible = "qcom,gcc-sc7180";
- reg = <0 0x00100000 0 0x1f0000>;
+ reg = <0x00100000 0x1f0000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&rpmhcc RPMH_CXO_CLK_A>,
<&sleep_clk>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml
index 36f3b3668ced..12766a866625 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8150.yaml
@@ -63,7 +63,7 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
clock-controller@100000 {
compatible = "qcom,gcc-sm8150";
- reg = <0 0x00100000 0 0x1f0000>;
+ reg = <0x00100000 0x1f0000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&sleep_clk>;
clock-names = "bi_tcxo", "sleep_clk";
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml
index 2c40a8aa9815..a5766ff89082 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-sm8250.yaml
@@ -61,7 +61,7 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
clock-controller@100000 {
compatible = "qcom,gcc-sm8250";
- reg = <0 0x00100000 0 0x1f0000>;
+ reg = <0x00100000 0x1f0000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&sleep_clk>;
clock-names = "bi_tcxo", "sleep_clk";
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml
index e533bb0cfd2b..ee0467fb5e31 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.yaml
@@ -22,6 +22,8 @@ description: |
- dt-bindings/reset/qcom,gcc-ipq6018.h
- dt-bindings/clock/qcom,gcc-ipq806x.h (qcom,gcc-ipq8064)
- dt-bindings/reset/qcom,gcc-ipq806x.h (qcom,gcc-ipq8064)
+ - dt-bindings/clock/qcom,gcc-msm8939.h
+ - dt-bindings/reset/qcom,gcc-msm8939.h
- dt-bindings/clock/qcom,gcc-msm8660.h
- dt-bindings/reset/qcom,gcc-msm8660.h
- dt-bindings/clock/qcom,gcc-msm8974.h
@@ -41,6 +43,7 @@ properties:
- qcom,gcc-ipq8064
- qcom,gcc-msm8660
- qcom,gcc-msm8916
+ - qcom,gcc-msm8939
- qcom,gcc-msm8960
- qcom,gcc-msm8974
- qcom,gcc-msm8974pro
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
index f684fe67db84..1b16a863b355 100644
--- a/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,mmcc.yaml
@@ -15,15 +15,15 @@ description: |
power domains.
properties:
- compatible :
+ compatible:
enum:
- - qcom,mmcc-apq8064
- - qcom,mmcc-apq8084
- - qcom,mmcc-msm8660
- - qcom,mmcc-msm8960
- - qcom,mmcc-msm8974
- - qcom,mmcc-msm8996
- - qcom,mmcc-msm8998
+ - qcom,mmcc-apq8064
+ - qcom,mmcc-apq8084
+ - qcom,mmcc-msm8660
+ - qcom,mmcc-msm8960
+ - qcom,mmcc-msm8974
+ - qcom,mmcc-msm8996
+ - qcom,mmcc-msm8998
clocks:
items:
@@ -67,6 +67,10 @@ properties:
description:
Protected clock specifier list as per common clock binding
+ vdd-gfx-supply:
+ description:
+ Regulator supply for the GPU_GX GDSC
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml
index 58cdfd5924d3..e94847f92770 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-dispcc.yaml
@@ -66,7 +66,7 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
clock-controller@af00000 {
compatible = "qcom,sc7180-dispcc";
- reg = <0 0x0af00000 0 0x200000>;
+ reg = <0x0af00000 0x200000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_DISP_GPLL0_CLK_SRC>,
<&dsi_phy 0>,
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml
index 8635e35fd3f0..fe08461fce05 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-gpucc.yaml
@@ -60,7 +60,7 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
clock-controller@5090000 {
compatible = "qcom,sc7180-gpucc";
- reg = <0 0x05090000 0 0x9000>;
+ reg = <0x05090000 0x9000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_GPU_GPLL0_CLK_SRC>,
<&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-mss.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-mss.yaml
index 0dd5d25ae7d7..970030986a86 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7180-mss.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-mss.yaml
@@ -50,7 +50,7 @@ examples:
#include <dt-bindings/clock/qcom,gcc-sc7180.h>
clock-controller@41a8000 {
compatible = "qcom,sc7180-mss";
- reg = <0 0x041a8000 0 0x8000>;
+ reg = <0x041a8000 0x8000>;
clocks = <&gcc GCC_MSS_MFAB_AXIS_CLK>,
<&gcc GCC_MSS_NAV_AXI_CLK>,
<&gcc GCC_MSS_CFG_AHB_CLK>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml
index 0071b9701960..2feea2b91aa9 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sc7180-videocc.yaml
@@ -55,7 +55,7 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
clock-controller@ab00000 {
compatible = "qcom,sc7180-videocc";
- reg = <0 0x0ab00000 0 0x10000>;
+ reg = <0x0ab00000 0x10000>;
clocks = <&rpmhcc RPMH_CXO_CLK>;
clock-names = "bi_tcxo";
#clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml
index ad47d747a3e4..4a3be733d042 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-dispcc.yaml
@@ -75,7 +75,7 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
clock-controller@af00000 {
compatible = "qcom,sdm845-dispcc";
- reg = <0 0x0af00000 0 0x10000>;
+ reg = <0x0af00000 0x10000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_DISP_GPLL0_CLK_SRC>,
<&gcc GCC_DISP_GPLL0_DIV_CLK_SRC>,
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml
index 7a052ac5dc00..8a0c576ba8b3 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-gpucc.yaml
@@ -60,7 +60,7 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
clock-controller@5090000 {
compatible = "qcom,sdm845-gpucc";
- reg = <0 0x05090000 0 0x9000>;
+ reg = <0x05090000 0x9000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_GPU_GPLL0_CLK_SRC>,
<&gcc GCC_GPU_GPLL0_DIV_CLK_SRC>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml b/Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml
index 2a6a81ab0318..f7a0cf53d5f0 100644
--- a/Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,sdm845-videocc.yaml
@@ -55,7 +55,7 @@ examples:
#include <dt-bindings/clock/qcom,rpmh.h>
clock-controller@ab00000 {
compatible = "qcom,sdm845-videocc";
- reg = <0 0x0ab00000 0 0x10000>;
+ reg = <0x0ab00000 0x10000>;
clocks = <&rpmhcc RPMH_CXO_CLK>;
clock-names = "bi_tcxo";
#clock-cells = <1>;
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clock.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clock.yaml
new file mode 100644
index 000000000000..c55a7c494e01
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clock.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/renesas,cpg-div6-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas CPG DIV6 Clock
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description:
+ The CPG DIV6 clocks are variable factor clocks provided by the Clock Pulse
+ Generator (CPG). Their clock input is divided by a configurable factor from 1
+ to 64.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r8a73a4-div6-clock # R-Mobile APE6
+ - renesas,r8a7740-div6-clock # R-Mobile A1
+ - renesas,sh73a0-div6-clock # SH-Mobile AG5
+ - const: renesas,cpg-div6-clock
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ oneOf:
+ - maxItems: 1
+ - maxItems: 4
+ - maxItems: 8
+ description:
+ For clocks with multiple parents, invalid settings must be specified as
+ "<0>".
+
+ '#clock-cells':
+ const: 0
+
+ clock-output-names: true
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a73a4-clock.h>
+ sdhi2_clk: sdhi2_clk@e615007c {
+ compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+ reg = <0xe615007c 4>;
+ clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>, <0>,
+ <&extal2_clk>;
+ #clock-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt
deleted file mode 100644
index ae36ab842919..000000000000
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-* Renesas CPG DIV6 Clock
-
-The CPG DIV6 clocks are variable factor clocks provided by the Clock Pulse
-Generator (CPG). Their clock input is divided by a configurable factor from 1
-to 64.
-
-Required Properties:
-
- - compatible: Must be one of the following
- - "renesas,r8a73a4-div6-clock" for R8A73A4 (R-Mobile APE6) DIV6 clocks
- - "renesas,r8a7740-div6-clock" for R8A7740 (R-Mobile A1) DIV6 clocks
- - "renesas,r8a7790-div6-clock" for R8A7790 (R-Car H2) DIV6 clocks
- - "renesas,r8a7791-div6-clock" for R8A7791 (R-Car M2-W) DIV6 clocks
- - "renesas,r8a7793-div6-clock" for R8A7793 (R-Car M2-N) DIV6 clocks
- - "renesas,r8a7794-div6-clock" for R8A7794 (R-Car E2) DIV6 clocks
- - "renesas,sh73a0-div6-clock" for SH73A0 (SH-Mobile AG5) DIV6 clocks
- and "renesas,cpg-div6-clock" as a fallback.
- - reg: Base address and length of the memory resource used by the DIV6 clock
- - clocks: Reference to the parent clock(s); either one, four, or eight
- clocks must be specified. For clocks with multiple parents, invalid
- settings must be specified as "<0>".
- - #clock-cells: Must be 0
-
-
-Optional Properties:
-
- - clock-output-names: The name of the clock as a free-form string
-
-
-Example
--------
-
- sdhi2_clk: sdhi2_clk@e615007c {
- compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
- reg = <0 0xe615007c 0 4>;
- clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
- <0>, <&extal2_clk>;
- #clock-cells = <0>;
- clock-output-names = "sdhi2ck";
- };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
index 9cd102e5fed5..c745bd60719a 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
@@ -25,6 +25,7 @@ properties:
compatible:
enum:
- renesas,r7s9210-cpg-mssr # RZ/A2
+ - renesas,r8a7742-cpg-mssr # RZ/G1H
- renesas,r8a7743-cpg-mssr # RZ/G1M
- renesas,r8a7744-cpg-mssr # RZ/G1N
- renesas,r8a7745-cpg-mssr # RZ/G1E
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
deleted file mode 100644
index da578ebdda28..000000000000
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-* Renesas CPG Module Stop (MSTP) Clocks
-
-The CPG can gate SoC device clocks. The gates are organized in groups of up to
-32 gates.
-
-This device tree binding describes a single 32 gate clocks group per node.
-Clocks are referenced by user nodes by the MSTP node phandle and the clock
-index in the group, from 0 to 31.
-
-Required Properties:
-
- - compatible: Must be one of the following
- - "renesas,r7s72100-mstp-clocks" for R7S72100 (RZ) MSTP gate clocks
- - "renesas,r8a73a4-mstp-clocks" for R8A73A4 (R-Mobile APE6) MSTP gate clocks
- - "renesas,r8a7740-mstp-clocks" for R8A7740 (R-Mobile A1) MSTP gate clocks
- - "renesas,r8a7778-mstp-clocks" for R8A7778 (R-Car M1) MSTP gate clocks
- - "renesas,r8a7779-mstp-clocks" for R8A7779 (R-Car H1) MSTP gate clocks
- - "renesas,r8a7790-mstp-clocks" for R8A7790 (R-Car H2) MSTP gate clocks
- - "renesas,r8a7791-mstp-clocks" for R8A7791 (R-Car M2-W) MSTP gate clocks
- - "renesas,r8a7792-mstp-clocks" for R8A7792 (R-Car V2H) MSTP gate clocks
- - "renesas,r8a7793-mstp-clocks" for R8A7793 (R-Car M2-N) MSTP gate clocks
- - "renesas,r8a7794-mstp-clocks" for R8A7794 (R-Car E2) MSTP gate clocks
- - "renesas,sh73a0-mstp-clocks" for SH73A0 (SH-MobileAG5) MSTP gate clocks
- and "renesas,cpg-mstp-clocks" as a fallback.
- - reg: Base address and length of the I/O mapped registers used by the MSTP
- clocks. The first register is the clock control register and is mandatory.
- The second register is the clock status register and is optional when not
- implemented in hardware.
- - clocks: Reference to the parent clocks, one per output clock. The parents
- must appear in the same order as the output clocks.
- - #clock-cells: Must be 1
- - clock-output-names: The name of the clocks as free-form strings
- - clock-indices: Indices of the gate clocks into the group (0 to 31)
-
-The clocks, clock-output-names and clock-indices properties contain one entry
-per gate clock. The MSTP groups are sparsely populated. Unimplemented gate
-clocks must not be declared.
-
-
-Example
--------
-
- #include <dt-bindings/clock/r8a7790-clock.h>
-
- mstp3_clks: mstp3_clks@e615013c {
- compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
- reg = <0 0xe615013c 0 4>, <0 0xe6150048 0 4>;
- clocks = <&cp_clk>, <&mmc1_clk>, <&sd3_clk>, <&sd2_clk>,
- <&cpg_clocks R8A7790_CLK_SD1>, <&cpg_clocks R8A7790_CLK_SD0>,
- <&mmc0_clk>;
- #clock-cells = <1>;
- clock-output-names =
- "tpu0", "mmcif1", "sdhi3", "sdhi2",
- "sdhi1", "sdhi0", "mmcif0";
- clock-indices = <
- R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SDHI3
- R8A7790_CLK_SDHI2 R8A7790_CLK_SDHI1 R8A7790_CLK_SDHI0
- R8A7790_CLK_MMCIF0
- >;
- };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.yaml
new file mode 100644
index 000000000000..9752ac63288b
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/renesas,cpg-mstp-clocks.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Clock Pulse Generator (CPG) Module Stop (MSTP) Clocks
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description:
+ The Clock Pulse Generator (CPG) can gate SoC device clocks. The gates are
+ organized in groups of up to 32 gates.
+
+ This device tree binding describes a single 32 gate clocks group per node.
+ Clocks are referenced by user nodes by the Module Stop (MSTP) node phandle
+ and the clock index in the group, from 0 to 31.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r7s72100-mstp-clocks # RZ/A1
+ - renesas,r8a73a4-mstp-clocks # R-Mobile APE6
+ - renesas,r8a7740-mstp-clocks # R-Mobile A1
+ - renesas,r8a7778-mstp-clocks # R-Car M1
+ - renesas,r8a7779-mstp-clocks # R-Car H1
+ - renesas,sh73a0-mstp-clocks # SH-Mobile AG5
+ - const: renesas,cpg-mstp-clocks
+
+ reg:
+ minItems: 1
+ items:
+ - description: Module Stop Control Register (MSTPCR)
+ - description: Module Stop Status Register (MSTPSR)
+
+ clocks:
+ minItems: 1
+ maxItems: 32
+
+ '#clock-cells':
+ const: 1
+
+ clock-indices:
+ minItems: 1
+ maxItems: 32
+
+ clock-output-names:
+ minItems: 1
+ maxItems: 32
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+ - clock-indices
+ - clock-output-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a73a4-clock.h>
+ mstp2_clks: mstp2_clks@e6150138 {
+ compatible = "renesas,r8a73a4-mstp-clocks",
+ "renesas,cpg-mstp-clocks";
+ reg = <0xe6150138 4>, <0xe6150040 4>;
+ clocks = <&mp_clk>, <&mp_clk>, <&mp_clk>, <&mp_clk>, <&mp_clk>,
+ <&mp_clk>, <&cpg_clocks R8A73A4_CLK_HP>;
+ #clock-cells = <1>;
+ clock-indices = <
+ R8A73A4_CLK_SCIFA0 R8A73A4_CLK_SCIFA1
+ R8A73A4_CLK_SCIFB0 R8A73A4_CLK_SCIFB1
+ R8A73A4_CLK_SCIFB2 R8A73A4_CLK_SCIFB3
+ R8A73A4_CLK_DMAC
+ >;
+ clock-output-names =
+ "scifa0", "scifa1", "scifb0", "scifb1", "scifb2", "scifb3",
+ "dmac";
+ };
diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
index 4bf6f53bd95e..da92f5748dee 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,rcar-usb2-clock-sel.txt
@@ -27,7 +27,9 @@ Required properties:
- compatible: "renesas,r8a7795-rcar-usb2-clock-sel" if the device is a part of
an R8A7795 SoC.
"renesas,r8a7796-rcar-usb2-clock-sel" if the device if a part of
- an R8A7796 SoC.
+ an R8A77960 SoC.
+ "renesas,r8a77961-rcar-usb2-clock-sel" if the device if a part of
+ an R8A77961 SoC.
"renesas,rcar-gen3-usb2-clock-sel" for a generic R-Car Gen3
compatible device.
diff --git a/Documentation/devicetree/bindings/clock/silabs,si5341.txt b/Documentation/devicetree/bindings/clock/silabs,si5341.txt
index a70c333e4cd4..504cce3abe46 100644
--- a/Documentation/devicetree/bindings/clock/silabs,si5341.txt
+++ b/Documentation/devicetree/bindings/clock/silabs,si5341.txt
@@ -1,15 +1,21 @@
-Binding for Silicon Labs Si5341 and Si5340 programmable i2c clock generator.
+Binding for Silicon Labs Si5340, Si5341 Si5342, Si5344 and Si5345 programmable
+i2c clock generator.
Reference
[1] Si5341 Data Sheet
https://www.silabs.com/documents/public/data-sheets/Si5341-40-D-DataSheet.pdf
[2] Si5341 Reference Manual
https://www.silabs.com/documents/public/reference-manuals/Si5341-40-D-RM.pdf
+[3] Si5345 Reference Manual
+ https://www.silabs.com/documents/public/reference-manuals/Si5345-44-42-D-RM.pdf
The Si5341 and Si5340 are programmable i2c clock generators with up to 10 output
clocks. The chip contains a PLL that sources 5 (or 4) multisynth clocks, which
in turn can be directed to any of the 10 (or 4) outputs through a divider.
The internal structure of the clock generators can be found in [2].
+The Si5345 is similar to the Si5341 with the addition of fractional input
+dividers and automatic input selection, as described in [3].
+The Si5342 and Si5344 are smaller versions of the Si5345, with 2 or 4 outputs.
The driver can be used in "as is" mode, reading the current settings from the
chip at boot, in case you have a (pre-)programmed device. If the PLL is not
@@ -28,6 +34,9 @@ Required properties:
- compatible: shall be one of the following:
"silabs,si5340" - Si5340 A/B/C/D
"silabs,si5341" - Si5341 A/B/C/D
+ "silabs,si5342" - Si5342 A/B/C/D
+ "silabs,si5344" - Si5344 A/B/C/D
+ "silabs,si5345" - Si5345 A/B/C/D
- reg: i2c device address, usually 0x74
- #clock-cells: from common clock binding; shall be set to 2.
The first value is "0" for outputs, "1" for synthesizers.
diff --git a/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml b/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml
index bb3a78d8105e..29813873cfbc 100644
--- a/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/sprd,sc9863a-clk.yaml
@@ -28,6 +28,7 @@ properties:
- sprd,sc9863a-rpll
- sprd,sc9863a-dpll
- sprd,sc9863a-mm-gate
+ - sprd,sc9863a-mm-clk
- sprd,sc9863a-apapb-gate
clocks:
@@ -76,29 +77,24 @@ examples:
- |
ap_clk: clock-controller@21500000 {
compatible = "sprd,sc9863a-ap-clk";
- reg = <0 0x21500000 0 0x1000>;
+ reg = <0x21500000 0x1000>;
clocks = <&ext_26m>, <&ext_32k>;
clock-names = "ext-26m", "ext-32k";
#clock-cells = <1>;
};
- |
- soc {
- #address-cells = <2>;
- #size-cells = <2>;
-
- ap_ahb_regs: syscon@20e00000 {
- compatible = "sprd,sc9863a-glbregs", "syscon", "simple-mfd";
- reg = <0 0x20e00000 0 0x4000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0 0x20e00000 0x4000>;
-
- apahb_gate: apahb-gate@0 {
- compatible = "sprd,sc9863a-apahb-gate";
- reg = <0x0 0x1020>;
- #clock-cells = <1>;
- };
+ syscon@20e00000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon", "simple-mfd";
+ reg = <0x20e00000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x20e00000 0x4000>;
+
+ apahb_gate: apahb-gate@0 {
+ compatible = "sprd,sc9863a-apahb-gate";
+ reg = <0x0 0x1020>;
+ #clock-cells = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.yaml b/Documentation/devicetree/bindings/connector/usb-connector.yaml
index 4638d7adb806..9bd52e63c935 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.yaml
+++ b/Documentation/devicetree/bindings/connector/usb-connector.yaml
@@ -15,10 +15,15 @@ description:
properties:
compatible:
- enum:
- - usb-a-connector
- - usb-b-connector
- - usb-c-connector
+ oneOf:
+ - enum:
+ - usb-a-connector
+ - usb-b-connector
+ - usb-c-connector
+
+ - items:
+ - const: gpio-usb-b-connector
+ - const: usb-b-connector
label:
description: Symbolic name for the connector.
@@ -27,8 +32,8 @@ properties:
description: Size of the connector, should be specified in case of
non-fullsize 'usb-a-connector' or 'usb-b-connector' compatible
connectors.
- allOf:
- - $ref: /schemas/types.yaml#definitions/string
+ $ref: /schemas/types.yaml#definitions/string
+
enum:
- mini
- micro
@@ -57,8 +62,8 @@ properties:
power-role:
description: Determines the power role that the Type C connector will
support. "dual" refers to Dual Role Port (DRP).
- allOf:
- - $ref: /schemas/types.yaml#definitions/string
+ $ref: /schemas/types.yaml#definitions/string
+
enum:
- source
- sink
@@ -66,18 +71,18 @@ properties:
try-power-role:
description: Preferred power role.
- allOf:
- - $ref: /schemas/types.yaml#definitions/string
+ $ref: /schemas/types.yaml#definitions/string
+
enum:
- - source
- - sink
- - dual
+ - source
+ - sink
+ - dual
data-role:
description: Data role if Type C connector supports USB data. "dual" refers
Dual Role Device (DRD).
- allOf:
- - $ref: /schemas/types.yaml#definitions/string
+ $ref: /schemas/types.yaml#definitions/string
+
enum:
- host
- device
@@ -95,8 +100,7 @@ properties:
defined in dt-bindings/usb/pd.h.
minItems: 1
maxItems: 7
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
sink-pdos:
description: An array of u32 with each entry providing supported power sink
@@ -108,8 +112,7 @@ properties:
in dt-bindings/usb/pd.h.
minItems: 1
maxItems: 7
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
op-sink-microwatt:
description: Sink required operating power in microwatt, if source can't
@@ -142,9 +145,22 @@ properties:
required:
- compatible
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: gpio-usb-b-connector
+ then:
+ anyOf:
+ - required:
+ - vbus-gpios
+ - required:
+ - id-gpios
+
examples:
# Micro-USB connector with HS lines routed via controller (MUIC).
- - |+
+ - |
muic-max77843 {
usb_con1: connector {
compatible = "usb-b-connector";
@@ -156,7 +172,7 @@ examples:
# USB-C connector attached to CC controller (s2mm005), HS lines routed
# to companion PMIC (max77865), SS lines to USB3 PHY and SBU to DisplayPort.
# DisplayPort video lines are routed to the connector via SS mux in USB3 PHY.
- - |+
+ - |
ccic: s2mm005 {
usb_con2: connector {
compatible = "usb-c-connector";
@@ -190,7 +206,7 @@ examples:
# USB-C connector attached to a typec port controller(ptn5110), which has
# power delivery support and enables drp.
- - |+
+ - |
#include <dt-bindings/usb/pd.h>
typec: ptn5110 {
usb_con3: connector {
@@ -204,3 +220,16 @@ examples:
op-sink-microwatt = <10000000>;
};
};
+
+ # USB connector with GPIO control lines
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ usb {
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ type = "micro";
+ id-gpios = <&pio 12 GPIO_ACTIVE_HIGH>;
+ vbus-supply = <&usb_p0_vbus>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/cpufreq/nvidia,tegra20-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/nvidia,tegra20-cpufreq.txt
new file mode 100644
index 000000000000..daeca6ae6b76
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/nvidia,tegra20-cpufreq.txt
@@ -0,0 +1,56 @@
+Binding for NVIDIA Tegra20 CPUFreq
+==================================
+
+Required properties:
+- clocks: Must contain an entry for the CPU clock.
+ See ../clocks/clock-bindings.txt for details.
+- operating-points-v2: See ../bindings/opp/opp.txt for details.
+- #cooling-cells: Should be 2. See ../thermal/thermal.txt for details.
+
+For each opp entry in 'operating-points-v2' table:
+- opp-supported-hw: Two bitfields indicating:
+ On Tegra20:
+ 1. CPU process ID mask
+ 2. SoC speedo ID mask
+
+ On Tegra30:
+ 1. CPU process ID mask
+ 2. CPU speedo ID mask
+
+ A bitwise AND is performed against these values and if any bit
+ matches, the OPP gets enabled.
+
+- opp-microvolt: CPU voltage triplet.
+
+Optional properties:
+- cpu-supply: Phandle to the CPU power supply.
+
+Example:
+ regulators {
+ cpu_reg: regulator0 {
+ regulator-name = "vdd_cpu";
+ };
+ };
+
+ cpu0_opp_table: opp_table0 {
+ compatible = "operating-points-v2";
+
+ opp@456000000 {
+ clock-latency-ns = <125000>;
+ opp-microvolt = <825000 825000 1125000>;
+ opp-supported-hw = <0x03 0x0001>;
+ opp-hz = /bits/ 64 <456000000>;
+ };
+
+ ...
+ };
+
+ cpus {
+ cpu@0 {
+ compatible = "arm,cortex-a9";
+ clocks = <&tegra_car TEGRA20_CLK_CCLK>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ cpu-supply = <&cpu_reg>;
+ #cooling-cells = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
index 8b9a8f337f16..fc823572bcff 100644
--- a/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
+++ b/Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
@@ -15,16 +15,16 @@ properties:
oneOf:
- const: allwinner,sun4i-a10-crypto
- items:
- - const: allwinner,sun5i-a13-crypto
- - const: allwinner,sun4i-a10-crypto
+ - const: allwinner,sun5i-a13-crypto
+ - const: allwinner,sun4i-a10-crypto
- items:
- - const: allwinner,sun6i-a31-crypto
- - const: allwinner,sun4i-a10-crypto
+ - const: allwinner,sun6i-a31-crypto
+ - const: allwinner,sun4i-a10-crypto
- items:
- - const: allwinner,sun7i-a20-crypto
- - const: allwinner,sun4i-a10-crypto
+ - const: allwinner,sun7i-a20-crypto
+ - const: allwinner,sun4i-a10-crypto
- items:
- - const: allwinner,sun8i-a33-crypto
+ - const: allwinner,sun8i-a33-crypto
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml b/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml
index 2c459b8c76ff..7a60d84289cc 100644
--- a/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml
+++ b/Documentation/devicetree/bindings/crypto/allwinner,sun8i-ce.yaml
@@ -50,16 +50,16 @@ if:
const: allwinner,sun50i-h6-crypto
then:
properties:
- clocks:
- minItems: 3
- clock-names:
- minItems: 3
+ clocks:
+ minItems: 3
+ clock-names:
+ minItems: 3
else:
properties:
- clocks:
- maxItems: 2
- clock-names:
- maxItems: 2
+ clocks:
+ maxItems: 2
+ clock-names:
+ maxItems: 2
required:
- compatible
diff --git a/Documentation/devicetree/bindings/crypto/amlogic,gxl-crypto.yaml b/Documentation/devicetree/bindings/crypto/amlogic,gxl-crypto.yaml
index 5becc60a0e28..ecf98a9e72b2 100644
--- a/Documentation/devicetree/bindings/crypto/amlogic,gxl-crypto.yaml
+++ b/Documentation/devicetree/bindings/crypto/amlogic,gxl-crypto.yaml
@@ -12,7 +12,7 @@ maintainers:
properties:
compatible:
items:
- - const: amlogic,gxl-crypto
+ - const: amlogic,gxl-crypto
reg:
maxItems: 1
@@ -45,7 +45,7 @@ examples:
crypto: crypto-engine@c883e000 {
compatible = "amlogic,gxl-crypto";
- reg = <0x0 0xc883e000 0x0 0x36>;
+ reg = <0xc883e000 0x36>;
interrupts = <GIC_SPI 188 IRQ_TYPE_EDGE_RISING>, <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>;
clocks = <&clkc CLKID_BLKMV>;
clock-names = "blkmv";
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
index 57ae1c0b6d18..6dd658f0912c 100644
--- a/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
+++ b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
@@ -36,11 +36,10 @@ properties:
dma-maxburst:
description: Set number of maximum dma burst supported
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- - maximum: 2
- - default: 0
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 2
+ default: 0
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml
index 944ff2f1cf93..e77523b02fad 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml
@@ -66,10 +66,9 @@ properties:
- allwinner,sun50i-h6-display-engine
allwinner,pipelines:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/phandle-array
- - minItems: 1
- maxItems: 2
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ maxItems: 2
description: |
Available display engine frontends (DE 1.0) or mixers (DE
2.0/3.0) available.
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
index 5d4915aed1e2..75e6479397a5 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-hdmi.yaml
@@ -21,8 +21,8 @@ properties:
- const: allwinner,sun5i-a10s-hdmi
- const: allwinner,sun6i-a31-hdmi
- items:
- - const: allwinner,sun7i-a20-hdmi
- - const: allwinner,sun5i-a10s-hdmi
+ - const: allwinner,sun7i-a20-hdmi
+ - const: allwinner,sun5i-a10s-hdmi
reg:
maxItems: 1
@@ -33,32 +33,32 @@ properties:
clocks:
oneOf:
- items:
- - description: The HDMI interface clock
- - description: The HDMI module clock
- - description: The first video PLL
- - description: The second video PLL
+ - description: The HDMI interface clock
+ - description: The HDMI module clock
+ - description: The first video PLL
+ - description: The second video PLL
- items:
- - description: The HDMI interface clock
- - description: The HDMI module clock
- - description: The HDMI DDC clock
- - description: The first video PLL
- - description: The second video PLL
+ - description: The HDMI interface clock
+ - description: The HDMI module clock
+ - description: The HDMI DDC clock
+ - description: The first video PLL
+ - description: The second video PLL
clock-names:
oneOf:
- items:
- - const: ahb
- - const: mod
- - const: pll-0
- - const: pll-1
+ - const: ahb
+ - const: mod
+ - const: pll-0
+ - const: pll-1
- items:
- - const: ahb
- - const: mod
- - const: ddc
- - const: pll-0
- - const: pll-1
+ - const: ahb
+ - const: mod
+ - const: ddc
+ - const: pll-0
+ - const: pll-1
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
index e5344c4ae226..4c15a2644a7c 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
@@ -35,26 +35,26 @@ properties:
- const: allwinner,sun9i-a80-tcon-tv
- items:
- - enum:
- - allwinner,sun7i-a20-tcon0
- - allwinner,sun7i-a20-tcon1
- - const: allwinner,sun7i-a20-tcon
+ - enum:
+ - allwinner,sun7i-a20-tcon0
+ - allwinner,sun7i-a20-tcon1
+ - const: allwinner,sun7i-a20-tcon
- items:
- - enum:
- - allwinner,sun50i-a64-tcon-lcd
- - const: allwinner,sun8i-a83t-tcon-lcd
+ - enum:
+ - allwinner,sun50i-a64-tcon-lcd
+ - const: allwinner,sun8i-a83t-tcon-lcd
- items:
- - enum:
- - allwinner,sun8i-h3-tcon-tv
- - allwinner,sun50i-a64-tcon-tv
- - const: allwinner,sun8i-a83t-tcon-tv
+ - enum:
+ - allwinner,sun8i-h3-tcon-tv
+ - allwinner,sun50i-a64-tcon-tv
+ - const: allwinner,sun8i-a83t-tcon-tv
- items:
- - enum:
- - allwinner,sun50i-h6-tcon-tv
- - const: allwinner,sun8i-r40-tcon-tv
+ - enum:
+ - allwinner,sun50i-h6-tcon-tv
+ - const: allwinner,sun8i-r40-tcon-tv
reg:
maxItems: 1
@@ -71,11 +71,10 @@ properties:
maxItems: 4
clock-output-names:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/string-array
- - maxItems: 1
description:
Name of the LCD pixel clock created.
+ $ref: /schemas/types.yaml#/definitions/string-array
+ maxItems: 1
dmas:
maxItems: 1
@@ -83,37 +82,37 @@ properties:
resets:
anyOf:
- items:
- - description: TCON Reset Line
+ - description: TCON Reset Line
- items:
- - description: TCON Reset Line
- - description: TCON LVDS Reset Line
+ - description: TCON Reset Line
+ - description: TCON LVDS Reset Line
- items:
- - description: TCON Reset Line
- - description: TCON eDP Reset Line
+ - description: TCON Reset Line
+ - description: TCON eDP Reset Line
- items:
- - description: TCON Reset Line
- - description: TCON eDP Reset Line
- - description: TCON LVDS Reset Line
+ - description: TCON Reset Line
+ - description: TCON eDP Reset Line
+ - description: TCON LVDS Reset Line
reset-names:
oneOf:
- const: lcd
- items:
- - const: lcd
- - const: lvds
+ - const: lcd
+ - const: lvds
- items:
- - const: lcd
- - const: edp
+ - const: lcd
+ - const: edp
- items:
- - const: lcd
- - const: edp
- - const: lvds
+ - const: lcd
+ - const: edp
+ - const: lvds
ports:
type: object
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
index e73662c8d339..63f948175239 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
@@ -76,28 +76,28 @@ required:
allOf:
- if:
properties:
- compatible:
- contains:
- const: allwinner,sun6i-a31-mipi-dsi
+ compatible:
+ contains:
+ const: allwinner,sun6i-a31-mipi-dsi
then:
- properties:
- clocks:
- minItems: 2
+ properties:
+ clocks:
+ minItems: 2
- required:
- - clock-names
+ required:
+ - clock-names
- if:
properties:
- compatible:
- contains:
- const: allwinner,sun50i-a64-mipi-dsi
+ compatible:
+ contains:
+ const: allwinner,sun50i-a64-mipi-dsi
then:
- properties:
- clocks:
- minItems: 1
+ properties:
+ clocks:
+ minItems: 1
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
index 4d6795690ac3..fa4769a0b26e 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun8i-a83t-dw-hdmi.yaml
@@ -29,11 +29,11 @@ properties:
- const: allwinner,sun50i-h6-dw-hdmi
- items:
- - enum:
- - allwinner,sun8i-h3-dw-hdmi
- - allwinner,sun8i-r40-dw-hdmi
- - allwinner,sun50i-a64-dw-hdmi
- - const: allwinner,sun8i-a83t-dw-hdmi
+ - enum:
+ - allwinner,sun8i-h3-dw-hdmi
+ - allwinner,sun8i-r40-dw-hdmi
+ - allwinner,sun50i-a64-dw-hdmi
+ - const: allwinner,sun8i-a83t-dw-hdmi
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
new file mode 100644
index 000000000000..3ba477aefdd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/analogix,anx7814.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analogix ANX7814 SlimPort (Full-HD Transmitter)
+
+maintainers:
+ - Enric Balletbo i Serra <enric.balletbo@collabora.com>
+
+properties:
+ compatible:
+ enum:
+ - analogix,anx7808
+ - analogix,anx7812
+ - analogix,anx7814
+ - analogix,anx7818
+
+ reg:
+ maxItems: 1
+ description: I2C address of the device.
+
+ interrupts:
+ maxItems: 1
+ description: Should contain the INTP interrupt.
+
+ hpd-gpios:
+ deprecated: true
+ maxItems: 1
+ description: Which GPIO to use for hpd.
+
+ pd-gpios:
+ maxItems: 1
+ description: Which GPIO to use for power down.
+
+ reset-gpios:
+ maxItems: 1
+ description: Which GPIO to use for reset.
+
+ dvdd10-supply:
+ description: Regulator for 1.0V digital core power.
+
+ ports:
+ type: object
+ description:
+ A node containing input and output port nodes with endpoint
+ definitions as documented in
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+ Documentation/devicetree/bindings/graph.txt
+
+ properties:
+ port@0:
+ type: object
+ description: Video port for HDMI input.
+
+ properties:
+ reg:
+ const: 0
+
+ port@1:
+ type: object
+ description:
+ Video port for SlimPort, DisplayPort, eDP or MyDP output.
+
+ properties:
+ reg:
+ const: 1
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ anx7814: bridge@38 {
+ compatible = "analogix,anx7814";
+ reg = <0x38>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <99 IRQ_TYPE_LEVEL_LOW>; /* INTP */
+ pd-gpios = <&pio 33 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 98 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ anx7814_in: endpoint {
+ remote-endpoint = <&hdmi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ anx7814_out: endpoint {
+ remote-endpoint = <&edp_out>;
+ };
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/bridge/anx7814.txt b/Documentation/devicetree/bindings/display/bridge/anx7814.txt
deleted file mode 100644
index 17258747fff6..000000000000
--- a/Documentation/devicetree/bindings/display/bridge/anx7814.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-Analogix ANX7814 SlimPort (Full-HD Transmitter)
------------------------------------------------
-
-The ANX7814 is an ultra-low power Full-HD (1080p60) SlimPort transmitter
-designed for portable devices.
-
-Required properties:
-
- - compatible : Must be one of:
- "analogix,anx7808"
- "analogix,anx7812"
- "analogix,anx7814"
- "analogix,anx7818"
- - reg : I2C address of the device
- - interrupts : Should contain the INTP interrupt
- - hpd-gpios : Which GPIO to use for hpd
- - pd-gpios : Which GPIO to use for power down
- - reset-gpios : Which GPIO to use for reset
-
-Optional properties:
-
- - dvdd10-supply : Regulator for 1.0V digital core power.
- - Video port for HDMI input, using the DT bindings defined in [1].
-
-[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
-
-Example:
-
- anx7814: anx7814@38 {
- compatible = "analogix,anx7814";
- reg = <0x38>;
- interrupt-parent = <&gpio0>;
- interrupts = <99 IRQ_TYPE_LEVEL_LOW>; /* INTP */
- hpd-gpios = <&pio 36 GPIO_ACTIVE_HIGH>;
- pd-gpios = <&pio 33 GPIO_ACTIVE_HIGH>;
- reset-gpios = <&pio 98 GPIO_ACTIVE_HIGH>;
- port {
- anx7814_in: endpoint {
- remote-endpoint = <&hdmi0_out>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
index 800c63764e71..68951d56ebba 100644
--- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml
@@ -32,17 +32,17 @@ properties:
compatible:
oneOf:
- items:
- - enum:
- - ti,ds90c185 # For the TI DS90C185 FPD-Link Serializer
- - ti,ds90c187 # For the TI DS90C187 FPD-Link Serializer
- - ti,sn75lvds83 # For the TI SN75LVDS83 FlatLink transmitter
- - const: lvds-encoder # Generic LVDS encoder compatible fallback
+ - enum:
+ - ti,ds90c185 # For the TI DS90C185 FPD-Link Serializer
+ - ti,ds90c187 # For the TI DS90C187 FPD-Link Serializer
+ - ti,sn75lvds83 # For the TI SN75LVDS83 FlatLink transmitter
+ - const: lvds-encoder # Generic LVDS encoder compatible fallback
- items:
- - enum:
- - ti,ds90cf384a # For the DS90CF384A FPD-Link LVDS Receiver
- - const: lvds-decoder # Generic LVDS decoders compatible fallback
+ - enum:
+ - ti,ds90cf384a # For the DS90CF384A FPD-Link LVDS Receiver
+ - const: lvds-decoder # Generic LVDS decoders compatible fallback
- enum:
- - thine,thc63lvdm83d # For the THC63LVDM83D LVDS serializer
+ - thine,thc63lvdm83d # For the THC63LVDM83D LVDS serializer
ports:
type: object
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.yaml b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
index a747b755ad06..45fe8fe5faba 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-common.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
@@ -48,9 +48,8 @@ properties:
rotation:
description:
Display rotation in degrees counter clockwise (0,90,180,270)
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 90, 180, 270 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 90, 180, 270]
# Display Timings
panel-timing:
@@ -58,16 +57,14 @@ properties:
Most display panels are restricted to a single resolution and
require specific display timings. The panel-timing subnode expresses those
timings.
- allOf:
- - $ref: panel-timing.yaml#
+ $ref: panel-timing.yaml#
display-timings:
description:
Some display panels support several resolutions with different timings.
The display-timings bindings supports specifying several timings and
optionally specifying which is the native mode.
- allOf:
- - $ref: display-timings.yaml#
+ $ref: display-timings.yaml#
# Connectivity
port:
diff --git a/Documentation/devicetree/bindings/display/panel/panel-timing.yaml b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
index bd558ad7891f..182c19cb7fdd 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
@@ -72,92 +72,80 @@ properties:
hfront-porch:
description: Horizontal front porch panel timing
oneOf:
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - maxItems: 1
- items:
- description: typical number of pixels
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 3
- maxItems: 3
- items:
- description: min, typ, max number of pixels
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ maxItems: 1
+ items:
+ description: typical number of pixels
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of pixels
hback-porch:
description: Horizontal back porch timing
oneOf:
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - maxItems: 1
- items:
- description: typical number of pixels
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 3
- maxItems: 3
- items:
- description: min, typ, max number of pixels
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ maxItems: 1
+ items:
+ description: typical number of pixels
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of pixels
hsync-len:
description: Horizontal sync length panel timing
oneOf:
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - maxItems: 1
- items:
- description: typical number of pixels
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 3
- maxItems: 3
- items:
- description: min, typ, max number of pixels
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ maxItems: 1
+ items:
+ description: typical number of pixels
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of pixels
vfront-porch:
description: Vertical front porch panel timing
oneOf:
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - maxItems: 1
- items:
- description: typical number of lines
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 3
- maxItems: 3
- items:
- description: min, typ, max number of lines
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ maxItems: 1
+ items:
+ description: typical number of lines
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of lines
vback-porch:
description: Vertical back porch panel timing
oneOf:
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - maxItems: 1
- items:
- description: typical number of lines
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 3
- maxItems: 3
- items:
- description: min, typ, max number of lines
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ maxItems: 1
+ items:
+ description: typical number of lines
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of lines
vsync-len:
description: Vertical sync length panel timing
oneOf:
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - maxItems: 1
- items:
- description: typical number of lines
- - allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 3
- maxItems: 3
- items:
- description: min, typ, max number of lines
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ maxItems: 1
+ items:
+ description: typical number of lines
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of lines
hsync-active:
description: |
diff --git a/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml b/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
index 185dcc8fd1f9..78d060097052 100644
--- a/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sony,acx424akp.yaml
@@ -18,7 +18,7 @@ properties:
reg: true
reset-gpios: true
vddi-supply:
- description: regulator that supplies the vddi voltage
+ description: regulator that supplies the vddi voltage
enforce-video-mode: true
required:
diff --git a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
index 6913923df569..d5c46a3cc2b0 100644
--- a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
+++ b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
@@ -19,9 +19,9 @@ properties:
backlight: true
reset-gpios: true
iovcc-supply:
- description: regulator that supplies the iovcc voltage
+ description: regulator that supplies the iovcc voltage
vci-supply:
- description: regulator that supplies the vci voltage
+ description: regulator that supplies the vci voltage
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/renesas,cmm.yaml b/Documentation/devicetree/bindings/display/renesas,cmm.yaml
index a57037b9e9ba..561efaaa5a91 100644
--- a/Documentation/devicetree/bindings/display/renesas,cmm.yaml
+++ b/Documentation/devicetree/bindings/display/renesas,cmm.yaml
@@ -21,15 +21,15 @@ properties:
compatible:
oneOf:
- items:
- - enum:
- - renesas,r8a7795-cmm
- - renesas,r8a7796-cmm
- - renesas,r8a77965-cmm
- - renesas,r8a77990-cmm
- - renesas,r8a77995-cmm
- - const: renesas,rcar-gen3-cmm
+ - enum:
+ - renesas,r8a7795-cmm
+ - renesas,r8a7796-cmm
+ - renesas,r8a77965-cmm
+ - renesas,r8a77990-cmm
+ - renesas,r8a77995-cmm
+ - const: renesas,rcar-gen3-cmm
- items:
- - const: renesas,rcar-gen2-cmm
+ - const: renesas,rcar-gen2-cmm
reg:
maxItems: 1
@@ -60,7 +60,7 @@ examples:
cmm0: cmm@fea40000 {
compatible = "renesas,r8a7796-cmm",
"renesas,rcar-gen3-cmm";
- reg = <0 0xfea40000 0 0x1000>;
+ reg = <0xfea40000 0x1000>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
clocks = <&cpg CPG_MOD 711>;
resets = <&cpg 711>;
diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
index 9999255ac5b6..47319214b5f6 100644
--- a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
@@ -40,14 +40,30 @@ of the following host1x client modules:
Required properties:
- compatible: "nvidia,tegra<chip>-vi"
- - reg: Physical base address and length of the controller's registers.
+ - reg: Physical base address and length of the controller registers.
- interrupts: The interrupt outputs from the controller.
- - clocks: Must contain one entry, for the module clock.
+ - clocks: clocks: Must contain one entry, for the module clock.
See ../clocks/clock-bindings.txt for details.
- - resets: Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
- - reset-names: Must include the following entries:
- - vi
+ - Tegra20/Tegra30/Tegra114/Tegra124:
+ - resets: Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+ - reset-names: Must include the following entries:
+ - vi
+ - Tegra210:
+ - power-domains: Must include venc powergate node as vi is in VE partition.
+ - Tegra210 has CSI part of VI sharing same host interface and register space.
+ So, VI device node should have CSI child node.
+
+ - csi: mipi csi interface to vi
+
+ Required properties:
+ - compatible: "nvidia,tegra210-csi"
+ - reg: Physical base address offset to parent and length of the controller
+ registers.
+ - clocks: Must contain entries csi, cilab, cilcd, cile, csi_tpg clocks.
+ See ../clocks/clock-bindings.txt for details.
+ - power-domains: Must include sor powergate node as csicil is in
+ SOR partition.
- epp: encoder pre-processor
@@ -309,13 +325,44 @@ Example:
reset-names = "mpe";
};
- vi {
- compatible = "nvidia,tegra20-vi";
- reg = <0x54080000 0x00040000>;
- interrupts = <0 69 0x04>;
- clocks = <&tegra_car TEGRA20_CLK_VI>;
- resets = <&tegra_car 100>;
- reset-names = "vi";
+ vi@54080000 {
+ compatible = "nvidia,tegra210-vi";
+ reg = <0x0 0x54080000 0x0 0x700>;
+ interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ assigned-clocks = <&tegra_car TEGRA210_CLK_VI>;
+ assigned-clock-parents = <&tegra_car TEGRA210_CLK_PLL_C4_OUT0>;
+
+ clocks = <&tegra_car TEGRA210_CLK_VI>;
+ power-domains = <&pd_venc>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x0 0x0 0x54080000 0x2000>;
+
+ csi@838 {
+ compatible = "nvidia,tegra210-csi";
+ reg = <0x838 0x1300>;
+ assigned-clocks = <&tegra_car TEGRA210_CLK_CILAB>,
+ <&tegra_car TEGRA210_CLK_CILCD>,
+ <&tegra_car TEGRA210_CLK_CILE>,
+ <&tegra_car TEGRA210_CLK_CSI_TPG>;
+ assigned-clock-parents = <&tegra_car TEGRA210_CLK_PLL_P>,
+ <&tegra_car TEGRA210_CLK_PLL_P>,
+ <&tegra_car TEGRA210_CLK_PLL_P>;
+ assigned-clock-rates = <102000000>,
+ <102000000>,
+ <102000000>,
+ <972000000>;
+
+ clocks = <&tegra_car TEGRA210_CLK_CSI>,
+ <&tegra_car TEGRA210_CLK_CILAB>,
+ <&tegra_car TEGRA210_CLK_CILCD>,
+ <&tegra_car TEGRA210_CLK_CILE>,
+ <&tegra_car TEGRA210_CLK_CSI_TPG>;
+ clock-names = "csi", "cilab", "cilcd", "cile", "csi_tpg";
+ power-domains = <&pd_sor>;
+ };
};
epp {
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
index eb04c2330698..4f9185462ed3 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
@@ -88,9 +88,8 @@ properties:
- "#size-cells"
ti,am65x-oldi-io-ctrl:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/phandle-array"
- - maxItems: 1
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ maxItems: 1
description:
phandle to syscon device node mapping OLDI IO_CTRL registers.
The mapped range should point to OLDI_DAT0_IO_CTRL, map it and
@@ -123,13 +122,13 @@ examples:
dss: dss@4a00000 {
compatible = "ti,am65x-dss";
- reg = <0x0 0x04a00000 0x0 0x1000>, /* common */
- <0x0 0x04a02000 0x0 0x1000>, /* vidl1 */
- <0x0 0x04a06000 0x0 0x1000>, /* vid */
- <0x0 0x04a07000 0x0 0x1000>, /* ovr1 */
- <0x0 0x04a08000 0x0 0x1000>, /* ovr2 */
- <0x0 0x04a0a000 0x0 0x1000>, /* vp1 */
- <0x0 0x04a0b000 0x0 0x1000>; /* vp2 */
+ reg = <0x04a00000 0x1000>, /* common */
+ <0x04a02000 0x1000>, /* vidl1 */
+ <0x04a06000 0x1000>, /* vid */
+ <0x04a07000 0x1000>, /* ovr1 */
+ <0x04a08000 0x1000>, /* ovr2 */
+ <0x04a0a000 0x1000>, /* vp1 */
+ <0x04a0b000 0x1000>; /* vp2 */
reg-names = "common", "vidl1", "vid",
"ovr1", "ovr2", "vp1", "vp2";
ti,am65x-oldi-io-ctrl = <&dss_oldi_io_ctrl>;
diff --git a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
index eb4b1a266210..bbd76591c180 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
@@ -156,23 +156,23 @@ examples:
dss: dss@4a00000 {
compatible = "ti,j721e-dss";
- reg = <0x00 0x04a00000 0x00 0x10000>, /* common_m */
- <0x00 0x04a10000 0x00 0x10000>, /* common_s0*/
- <0x00 0x04b00000 0x00 0x10000>, /* common_s1*/
- <0x00 0x04b10000 0x00 0x10000>, /* common_s2*/
- <0x00 0x04a20000 0x00 0x10000>, /* vidl1 */
- <0x00 0x04a30000 0x00 0x10000>, /* vidl2 */
- <0x00 0x04a50000 0x00 0x10000>, /* vid1 */
- <0x00 0x04a60000 0x00 0x10000>, /* vid2 */
- <0x00 0x04a70000 0x00 0x10000>, /* ovr1 */
- <0x00 0x04a90000 0x00 0x10000>, /* ovr2 */
- <0x00 0x04ab0000 0x00 0x10000>, /* ovr3 */
- <0x00 0x04ad0000 0x00 0x10000>, /* ovr4 */
- <0x00 0x04a80000 0x00 0x10000>, /* vp1 */
- <0x00 0x04aa0000 0x00 0x10000>, /* vp2 */
- <0x00 0x04ac0000 0x00 0x10000>, /* vp3 */
- <0x00 0x04ae0000 0x00 0x10000>, /* vp4 */
- <0x00 0x04af0000 0x00 0x10000>; /* wb */
+ reg = <0x04a00000 0x10000>, /* common_m */
+ <0x04a10000 0x10000>, /* common_s0*/
+ <0x04b00000 0x10000>, /* common_s1*/
+ <0x04b10000 0x10000>, /* common_s2*/
+ <0x04a20000 0x10000>, /* vidl1 */
+ <0x04a30000 0x10000>, /* vidl2 */
+ <0x04a50000 0x10000>, /* vid1 */
+ <0x04a60000 0x10000>, /* vid2 */
+ <0x04a70000 0x10000>, /* ovr1 */
+ <0x04a90000 0x10000>, /* ovr2 */
+ <0x04ab0000 0x10000>, /* ovr3 */
+ <0x04ad0000 0x10000>, /* ovr4 */
+ <0x04a80000 0x10000>, /* vp1 */
+ <0x04aa0000 0x10000>, /* vp2 */
+ <0x04ac0000 0x10000>, /* vp3 */
+ <0x04ae0000 0x10000>, /* vp4 */
+ <0x04af0000 0x10000>; /* wb */
reg-names = "common_m", "common_s0",
"common_s1", "common_s2",
"vidl1", "vidl2","vid1","vid2",
diff --git a/Documentation/devicetree/bindings/dma/dma-common.yaml b/Documentation/devicetree/bindings/dma/dma-common.yaml
index 02a34ba2b49b..c36592683340 100644
--- a/Documentation/devicetree/bindings/dma/dma-common.yaml
+++ b/Documentation/devicetree/bindings/dma/dma-common.yaml
@@ -31,8 +31,7 @@ properties:
kernel. i.e. first channel corresponds to LSB.
The first item in the array is for channels 0-31, the second is for
channels 32-63, etc.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
items:
minItems: 1
# Should be enough
diff --git a/Documentation/devicetree/bindings/dma/ingenic,dma.yaml b/Documentation/devicetree/bindings/dma/ingenic,dma.yaml
new file mode 100644
index 000000000000..92794c500589
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/ingenic,dma.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/ingenic,dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs DMA Controller DT bindings
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - ingenic,jz4740-dma
+ - ingenic,jz4725b-dma
+ - ingenic,jz4770-dma
+ - ingenic,jz4780-dma
+ - ingenic,x1000-dma
+ - ingenic,x1830-dma
+
+ reg:
+ items:
+ - description: Channel-specific registers
+ - description: System control registers
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ "#dma-cells":
+ const: 2
+ description: >
+ DMA clients must use the format described in dma.txt, giving a phandle
+ to the DMA controller plus the following 2 integer cells:
+
+ - Request type: The DMA request type for transfers to/from the
+ device on the allocated channel, as defined in the SoC documentation.
+
+ - Channel: If set to 0xffffffff, any available channel will be allocated
+ for the client. Otherwise, the exact channel specified will be used.
+ The channel should be reserved on the DMA controller using the
+ ingenic,reserved-channels property.
+
+ ingenic,reserved-channels:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: >
+ Bitmask of channels to reserve for devices that need a specific
+ channel. These channels will only be assigned when explicitely
+ requested by a client. The primary use for this is channels 0 and
+ 1, which can be configured to have special behaviour for NAND/BCH
+ when using programmable firmware.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4780-cgu.h>
+ dma: dma-controller@13420000 {
+ compatible = "ingenic,jz4780-dma";
+ reg = <0x13420000 0x400>, <0x13421000 0x40>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <10>;
+
+ clocks = <&cgu JZ4780_CLK_PDMA>;
+
+ #dma-cells = <2>;
+
+ ingenic,reserved-channels = <0x3>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/jz4780-dma.txt b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
deleted file mode 100644
index 3459e77be294..000000000000
--- a/Documentation/devicetree/bindings/dma/jz4780-dma.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-* Ingenic XBurst DMA Controller
-
-Required properties:
-
-- compatible: Should be one of:
- * ingenic,jz4740-dma
- * ingenic,jz4725b-dma
- * ingenic,jz4770-dma
- * ingenic,jz4780-dma
- * ingenic,x1000-dma
- * ingenic,x1830-dma
-- reg: Should contain the DMA channel registers location and length, followed
- by the DMA controller registers location and length.
-- interrupts: Should contain the interrupt specifier of the DMA controller.
-- clocks: Should contain a clock specifier for the JZ4780/X1000/X1830 PDMA
- clock.
-- #dma-cells: Must be <2>. Number of integer cells in the dmas property of
- DMA clients (see below).
-
-Optional properties:
-
-- ingenic,reserved-channels: Bitmask of channels to reserve for devices that
- need a specific channel. These channels will only be assigned when explicitly
- requested by a client. The primary use for this is channels 0 and 1, which
- can be configured to have special behaviour for NAND/BCH when using
- programmable firmware.
-
-Example:
-
-dma: dma-controller@13420000 {
- compatible = "ingenic,jz4780-dma";
- reg = <0x13420000 0x400
- 0x13421000 0x40>;
-
- interrupt-parent = <&intc>;
- interrupts = <10>;
-
- clocks = <&cgu JZ4780_CLK_PDMA>;
-
- #dma-cells = <2>;
-
- ingenic,reserved-channels = <0x3>;
-};
-
-DMA clients must use the format described in dma.txt, giving a phandle to the
-DMA controller plus the following 2 integer cells:
-
-1. Request type: The DMA request type for transfers to/from the device on
- the allocated channel, as defined in the SoC documentation.
-
-2. Channel: If set to 0xffffffff, any available channel will be allocated for
- the client. Otherwise, the exact channel specified will be used. The channel
- should be reserved on the DMA controller using the ingenic,reserved-channels
- property.
-
-Example:
-
-uart0: serial@10030000 {
- ...
- dmas = <&dma 0x14 0xffffffff
- &dma 0x15 0xffffffff>;
- dma-names = "tx", "rx";
- ...
-};
diff --git a/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt b/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt
index 5d6f98c43e3d..2117db0ce4f2 100644
--- a/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt
+++ b/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt
@@ -21,7 +21,8 @@ Required properties:
Examples:
apdma: dma-controller@11000400 {
- compatible = "mediatek,mt2712-uart-dma";
+ compatible = "mediatek,mt2712-uart-dma",
+ "mediatek,mt6577-uart-dma";
reg = <0 0x11000400 0 0x80>,
<0 0x11000480 0 0x80>,
<0 0x11000500 0 0x80>,
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
deleted file mode 100644
index b7f81c63be8b..000000000000
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ /dev/null
@@ -1,117 +0,0 @@
-* Renesas R-Car (RZ/G) DMA Controller Device Tree bindings
-
-Renesas R-Car (Gen 2/3) and RZ/G SoCs have multiple multi-channel DMA
-controller instances named DMAC capable of serving multiple clients. Channels
-can be dedicated to specific clients or shared between a large number of
-clients.
-
-Each DMA client is connected to one dedicated port of the DMAC, identified by
-an 8-bit port number called the MID/RID. A DMA controller can thus serve up to
-256 clients in total. When the number of hardware channels is lower than the
-number of clients to be served, channels must be shared between multiple DMA
-clients. The association of DMA clients to DMAC channels is fully dynamic and
-not described in these device tree bindings.
-
-Required Properties:
-
-- compatible: "renesas,dmac-<soctype>", "renesas,rcar-dmac" as fallback.
- Examples with soctypes are:
- - "renesas,dmac-r8a7743" (RZ/G1M)
- - "renesas,dmac-r8a7744" (RZ/G1N)
- - "renesas,dmac-r8a7745" (RZ/G1E)
- - "renesas,dmac-r8a77470" (RZ/G1C)
- - "renesas,dmac-r8a774a1" (RZ/G2M)
- - "renesas,dmac-r8a774b1" (RZ/G2N)
- - "renesas,dmac-r8a774c0" (RZ/G2E)
- - "renesas,dmac-r8a7790" (R-Car H2)
- - "renesas,dmac-r8a7791" (R-Car M2-W)
- - "renesas,dmac-r8a7792" (R-Car V2H)
- - "renesas,dmac-r8a7793" (R-Car M2-N)
- - "renesas,dmac-r8a7794" (R-Car E2)
- - "renesas,dmac-r8a7795" (R-Car H3)
- - "renesas,dmac-r8a7796" (R-Car M3-W)
- - "renesas,dmac-r8a77961" (R-Car M3-W+)
- - "renesas,dmac-r8a77965" (R-Car M3-N)
- - "renesas,dmac-r8a77970" (R-Car V3M)
- - "renesas,dmac-r8a77980" (R-Car V3H)
- - "renesas,dmac-r8a77990" (R-Car E3)
- - "renesas,dmac-r8a77995" (R-Car D3)
-
-- reg: base address and length of the registers block for the DMAC
-
-- interrupts: interrupt specifiers for the DMAC, one for each entry in
- interrupt-names.
-- interrupt-names: one entry for the error interrupt, named "error", plus one
- entry per channel, named "ch%u", where %u is the channel number ranging from
- zero to the number of channels minus one.
-
-- clock-names: "fck" for the functional clock
-- clocks: a list of phandle + clock-specifier pairs, one for each entry
- in clock-names.
-- clock-names: must contain "fck" for the functional clock.
-
-- #dma-cells: must be <1>, the cell specifies the MID/RID of the DMAC port
- connected to the DMA client
-- dma-channels: number of DMA channels
-
-Example: R8A7790 (R-Car H2) SYS-DMACs
-
- dmac0: dma-controller@e6700000 {
- compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac";
- reg = <0 0xe6700000 0 0x20000>;
- interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH
- 0 200 IRQ_TYPE_LEVEL_HIGH
- 0 201 IRQ_TYPE_LEVEL_HIGH
- 0 202 IRQ_TYPE_LEVEL_HIGH
- 0 203 IRQ_TYPE_LEVEL_HIGH
- 0 204 IRQ_TYPE_LEVEL_HIGH
- 0 205 IRQ_TYPE_LEVEL_HIGH
- 0 206 IRQ_TYPE_LEVEL_HIGH
- 0 207 IRQ_TYPE_LEVEL_HIGH
- 0 208 IRQ_TYPE_LEVEL_HIGH
- 0 209 IRQ_TYPE_LEVEL_HIGH
- 0 210 IRQ_TYPE_LEVEL_HIGH
- 0 211 IRQ_TYPE_LEVEL_HIGH
- 0 212 IRQ_TYPE_LEVEL_HIGH
- 0 213 IRQ_TYPE_LEVEL_HIGH
- 0 214 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "error",
- "ch0", "ch1", "ch2", "ch3",
- "ch4", "ch5", "ch6", "ch7",
- "ch8", "ch9", "ch10", "ch11",
- "ch12", "ch13", "ch14";
- clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC0>;
- clock-names = "fck";
- #dma-cells = <1>;
- dma-channels = <15>;
- };
-
- dmac1: dma-controller@e6720000 {
- compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac";
- reg = <0 0xe6720000 0 0x20000>;
- interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
- 0 216 IRQ_TYPE_LEVEL_HIGH
- 0 217 IRQ_TYPE_LEVEL_HIGH
- 0 218 IRQ_TYPE_LEVEL_HIGH
- 0 219 IRQ_TYPE_LEVEL_HIGH
- 0 308 IRQ_TYPE_LEVEL_HIGH
- 0 309 IRQ_TYPE_LEVEL_HIGH
- 0 310 IRQ_TYPE_LEVEL_HIGH
- 0 311 IRQ_TYPE_LEVEL_HIGH
- 0 312 IRQ_TYPE_LEVEL_HIGH
- 0 313 IRQ_TYPE_LEVEL_HIGH
- 0 314 IRQ_TYPE_LEVEL_HIGH
- 0 315 IRQ_TYPE_LEVEL_HIGH
- 0 316 IRQ_TYPE_LEVEL_HIGH
- 0 317 IRQ_TYPE_LEVEL_HIGH
- 0 318 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "error",
- "ch0", "ch1", "ch2", "ch3",
- "ch4", "ch5", "ch6", "ch7",
- "ch8", "ch9", "ch10", "ch11",
- "ch12", "ch13", "ch14";
- clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC1>;
- clock-names = "fck";
- #dma-cells = <1>;
- dma-channels = <15>;
- };
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
new file mode 100644
index 000000000000..b842dfd96a89
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
@@ -0,0 +1,150 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/renesas,rcar-dmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car and RZ/G DMA Controller
+
+maintainers:
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,dmac-r8a7743 # RZ/G1M
+ - renesas,dmac-r8a7744 # RZ/G1N
+ - renesas,dmac-r8a7745 # RZ/G1E
+ - renesas,dmac-r8a77470 # RZ/G1C
+ - renesas,dmac-r8a774a1 # RZ/G2M
+ - renesas,dmac-r8a774b1 # RZ/G2N
+ - renesas,dmac-r8a774c0 # RZ/G2E
+ - renesas,dmac-r8a7790 # R-Car H2
+ - renesas,dmac-r8a7791 # R-Car M2-W
+ - renesas,dmac-r8a7792 # R-Car V2H
+ - renesas,dmac-r8a7793 # R-Car M2-N
+ - renesas,dmac-r8a7794 # R-Car E2
+ - renesas,dmac-r8a7795 # R-Car H3
+ - renesas,dmac-r8a7796 # R-Car M3-W
+ - renesas,dmac-r8a77961 # R-Car M3-W+
+ - renesas,dmac-r8a77965 # R-Car M3-N
+ - renesas,dmac-r8a77970 # R-Car V3M
+ - renesas,dmac-r8a77980 # R-Car V3H
+ - renesas,dmac-r8a77990 # R-Car E3
+ - renesas,dmac-r8a77995 # R-Car D3
+ - const: renesas,rcar-dmac
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 9
+ maxItems: 17
+
+ interrupt-names:
+ minItems: 9
+ maxItems: 17
+ items:
+ - const: error
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+ - pattern: "^ch([0-9]|1[0-5])$"
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ maxItems: 1
+ items:
+ - const: fck
+
+ '#dma-cells':
+ const: 1
+ description:
+ The cell specifies the MID/RID of the DMAC port connected to
+ the DMA client.
+
+ dma-channels:
+ minimum: 8
+ maximum: 16
+
+ dma-channel-mask: true
+
+ iommus:
+ minItems: 8
+ maxItems: 16
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+ - '#dma-cells'
+ - dma-channels
+ - power-domains
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+
+ dmac0: dma-controller@e6700000 {
+ compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac";
+ reg = <0xe6700000 0x20000>;
+ interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14";
+ clocks = <&cpg CPG_MOD 219>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 219>;
+ #dma-cells = <1>;
+ dma-channels = <15>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
deleted file mode 100644
index e8f6c42e80f2..000000000000
--- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-* Renesas USB DMA Controller Device Tree bindings
-
-Required Properties:
--compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback.
- Examples with soctypes are:
- - "renesas,r8a7743-usb-dmac" (RZ/G1M)
- - "renesas,r8a7744-usb-dmac" (RZ/G1N)
- - "renesas,r8a7745-usb-dmac" (RZ/G1E)
- - "renesas,r8a77470-usb-dmac" (RZ/G1C)
- - "renesas,r8a774a1-usb-dmac" (RZ/G2M)
- - "renesas,r8a774b1-usb-dmac" (RZ/G2N)
- - "renesas,r8a774c0-usb-dmac" (RZ/G2E)
- - "renesas,r8a7790-usb-dmac" (R-Car H2)
- - "renesas,r8a7791-usb-dmac" (R-Car M2-W)
- - "renesas,r8a7793-usb-dmac" (R-Car M2-N)
- - "renesas,r8a7794-usb-dmac" (R-Car E2)
- - "renesas,r8a7795-usb-dmac" (R-Car H3)
- - "renesas,r8a7796-usb-dmac" (R-Car M3-W)
- - "renesas,r8a77961-usb-dmac" (R-Car M3-W+)
- - "renesas,r8a77965-usb-dmac" (R-Car M3-N)
- - "renesas,r8a77990-usb-dmac" (R-Car E3)
- - "renesas,r8a77995-usb-dmac" (R-Car D3)
-- reg: base address and length of the registers block for the DMAC
-- interrupts: interrupt specifiers for the DMAC, one for each entry in
- interrupt-names.
-- interrupt-names: one entry per channel, named "ch%u", where %u is the
- channel number ranging from zero to the number of channels minus one.
-- clocks: a list of phandle + clock-specifier pairs.
-- #dma-cells: must be <1>, the cell specifies the channel number of the DMAC
- port connected to the DMA client.
-- dma-channels: number of DMA channels
-
-Example: R8A7790 (R-Car H2) USB-DMACs
-
- usb_dmac0: dma-controller@e65a0000 {
- compatible = "renesas,r8a7790-usb-dmac", "renesas,usb-dmac";
- reg = <0 0xe65a0000 0 0x100>;
- interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH
- 0 109 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "ch0", "ch1";
- clocks = <&mstp3_clks R8A7790_CLK_USBDMAC0>;
- #dma-cells = <1>;
- dma-channels = <2>;
- };
-
- usb_dmac1: dma-controller@e65b0000 {
- compatible = "renesas,usb-dmac";
- reg = <0 0xe65b0000 0 0x100>;
- interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH
- 0 110 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "ch0", "ch1";
- clocks = <&mstp3_clks R8A7790_CLK_USBDMAC1>;
- #dma-cells = <1>;
- dma-channels = <2>;
- };
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.yaml
new file mode 100644
index 000000000000..9ca6d8ddf232
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/renesas,usb-dmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas USB DMA Controller
+
+maintainers:
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+allOf:
+ - $ref: "dma-controller.yaml#"
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r8a7743-usb-dmac # RZ/G1M
+ - renesas,r8a7744-usb-dmac # RZ/G1N
+ - renesas,r8a7745-usb-dmac # RZ/G1E
+ - renesas,r8a77470-usb-dmac # RZ/G1C
+ - renesas,r8a774a1-usb-dmac # RZ/G2M
+ - renesas,r8a774b1-usb-dmac # RZ/G2N
+ - renesas,r8a774c0-usb-dmac # RZ/G2E
+ - renesas,r8a7790-usb-dmac # R-Car H2
+ - renesas,r8a7791-usb-dmac # R-Car M2-W
+ - renesas,r8a7793-usb-dmac # R-Car M2-N
+ - renesas,r8a7794-usb-dmac # R-Car E2
+ - renesas,r8a7795-usb-dmac # R-Car H3
+ - renesas,r8a7796-usb-dmac # R-Car M3-W
+ - renesas,r8a77961-usb-dmac # R-Car M3-W+
+ - renesas,r8a77965-usb-dmac # R-Car M3-N
+ - renesas,r8a77990-usb-dmac # R-Car E3
+ - renesas,r8a77995-usb-dmac # R-Car D3
+ - const: renesas,usb-dmac
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 2
+ maxItems: 2
+
+ interrupt-names:
+ items:
+ - pattern: ch0
+ - pattern: ch1
+
+ clocks:
+ maxItems: 1
+
+ '#dma-cells':
+ const: 1
+ description:
+ The cell specifies the channel number of the DMAC port connected to
+ the DMA client.
+
+ dma-channels:
+ const: 2
+
+ iommus:
+ minItems: 2
+ maxItems: 2
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - '#dma-cells'
+ - dma-channels
+ - power-domains
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+
+ usb_dmac0: dma-controller@e65a0000 {
+ compatible = "renesas,r8a7790-usb-dmac", "renesas,usb-dmac";
+ reg = <0xe65a0000 0x100>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1";
+ clocks = <&cpg CPG_MOD 330>;
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 330>;
+ #dma-cells = <1>;
+ dma-channels = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
index e7f2ad7dab5e..d32a71b975fe 100644
--- a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
+++ b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
@@ -49,7 +49,7 @@ examples:
- |
dma@3000000 {
compatible = "sifive,fu540-c000-pdma";
- reg = <0x0 0x3000000 0x0 0x8000>;
+ reg = <0x3000000 0x8000>;
interrupts = <23 24 25 26 27 28 29 30>;
#dma-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
index 0c0ac11ad55f..71987878e4ae 100644
--- a/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
@@ -36,6 +36,11 @@ description: |
0x1: 1/2 full FIFO
0x2: 3/4 full FIFO
0x3: full FIFO
+ -bit 2: DMA direct mode
+ 0x0: FIFO mode with threshold selectable with bit 0-1
+ 0x1: Direct mode: each DMA request immediately initiates a transfer
+ from/to the memory, FIFO is bypassed.
+
maintainers:
- Amelie Delaunay <amelie.delaunay@st.com>
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
index 39ea05e6e5ff..dd70ddab4fd1 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
@@ -69,34 +69,30 @@ properties:
maxItems: 3
reg-names:
- items:
- - const: gcfg
- - const: rchanrt
- - const: tchanrt
+ items:
+ - const: gcfg
+ - const: rchanrt
+ - const: tchanrt
msi-parent: true
ti,sci:
description: phandle to TI-SCI compatible System controller node
- allOf:
- - $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle
ti,sci-dev-id:
description: TI-SCI device id of UDMAP
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
ti,ringacc:
description: phandle to the ring accelerator node
- allOf:
- - $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle
ti,sci-rm-range-tchan:
description: |
Array of UDMA tchan resource subtypes for resource allocation for this
host
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
# Should be enough
maxItems: 255
@@ -105,8 +101,7 @@ properties:
description: |
Array of UDMA rchan resource subtypes for resource allocation for this
host
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
# Should be enough
maxItems: 255
@@ -115,8 +110,7 @@ properties:
description: |
Array of UDMA rflow resource subtypes for resource allocation for this
host
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 1
# Should be enough
maxItems: 255
@@ -142,8 +136,7 @@ then:
properties:
ti,udma-atype:
description: ATYPE value which should be used by non slave channels
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
required:
- ti,udma-atype
diff --git a/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml b/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
index a5dc070d0ca7..3bbe9521c0bc 100644
--- a/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
+++ b/Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
@@ -17,6 +17,8 @@ properties:
compatible:
enum:
- fsl,imx8qxp-dsp
+ - fsl,imx8qm-dsp
+ - fsl,imx8mp-dsp
reg:
description: Should contain register location and length
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index a15787e504f0..4cee72d53318 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -34,7 +34,7 @@ properties:
- minItems: 1
maxItems: 2
items:
- - pattern: "^(atmel|catalyst|microchip|nxp|ramtron|renesas|rohm|st),(24(c|cs|mac)[0-9]+|spd)$"
+ - pattern: "^(atmel|catalyst|microchip|nxp|ramtron|renesas|rohm|st),(24(c|cs|lc|mac)[0-9]+|spd)$"
- pattern: "^atmel,(24(c|cs|mac)[0-9]+|spd)$"
- oneOf:
- items:
@@ -118,14 +118,13 @@ properties:
maxItems: 1
pagesize:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
description:
The length of the pagesize for writing. Please consult the
manual of your device, that value varies a lot. A wrong value
may result in data loss! If not specified, a safety value of
'1' is used which will be very slow.
- enum: [ 1, 8, 16, 32, 64, 128, 258 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 8, 16, 32, 64, 128, 256]
default: 1
read-only:
@@ -148,18 +147,16 @@ properties:
wp-gpios: true
address-width:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
description:
Number of address bits.
+ $ref: /schemas/types.yaml#/definitions/uint32
default: 8
enum: [ 8, 16 ]
num-addresses:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
description:
Total number of i2c slave addresses this device takes.
+ $ref: /schemas/types.yaml#/definitions/uint32
default: 1
minimum: 1
maximum: 8
diff --git a/Documentation/devicetree/bindings/example-schema.yaml b/Documentation/devicetree/bindings/example-schema.yaml
index 62811a1b5058..c9534d2164a2 100644
--- a/Documentation/devicetree/bindings/example-schema.yaml
+++ b/Documentation/devicetree/bindings/example-schema.yaml
@@ -138,12 +138,8 @@ properties:
# 'description'.
vendor,int-property:
description: Vendor specific properties must have a description
- # 'allOf' is the json-schema way of subclassing a schema. Here the base
- # type schema is referenced and then additional constraints on the values
- # are added.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [2, 4, 6, 8, 10]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [2, 4, 6, 8, 10]
vendor,bool-property:
description: Vendor specific properties must have a description. Boolean
@@ -154,11 +150,10 @@ properties:
vendor,string-array-property:
description: Vendor specific properties should reference a type in the
core schema.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/string-array
- - items:
- - enum: [ foo, bar ]
- - enum: [ baz, boo ]
+ $ref: /schemas/types.yaml#/definitions/string-array
+ items:
+ - enum: [foo, bar]
+ - enum: [baz, boo]
vendor,property-in-standard-units-microvolt:
description: Vendor specific properties having a standard unit suffix
diff --git a/Documentation/devicetree/bindings/extcon/extcon-arizona.txt b/Documentation/devicetree/bindings/extcon/extcon-arizona.txt
deleted file mode 100644
index 208daaff0be4..000000000000
--- a/Documentation/devicetree/bindings/extcon/extcon-arizona.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-Cirrus Logic Arizona class audio SoCs
-
-These devices are audio SoCs with extensive digital capabilities and a range
-of analogue I/O.
-
-This document lists Extcon specific bindings, see the primary binding document:
- ../mfd/arizona.txt
-
-Optional properties:
-
- - wlf,hpdet-channel : Headphone detection channel.
- ARIZONA_ACCDET_MODE_HPL or 1 - Headphone detect mode is set to HPDETL
- ARIZONA_ACCDET_MODE_HPR or 2 - Headphone detect mode is set to HPDETR
- If this node is not mentioned or if the value is unknown, then
- headphone detection mode is set to HPDETL.
-
- - wlf,use-jd2 : Use the additional JD input along with JD1 for dual pin jack
- detection.
- - wlf,use-jd2-nopull : Internal pull on JD2 is disabled when used for
- jack detection.
- - wlf,jd-invert : Invert the polarity of the jack detection switch
-
- - wlf,micd-software-compare : Use a software comparison to determine mic
- presence
- - wlf,micd-detect-debounce : Additional software microphone detection
- debounce specified in milliseconds.
- - wlf,micd-pol-gpio : GPIO specifier for the GPIO controlling the headset
- polarity if one exists.
- - wlf,micd-bias-start-time : Time allowed for MICBIAS to startup prior to
- performing microphone detection, specified as per the ARIZONA_MICD_TIME_XXX
- defines.
- - wlf,micd-rate : Delay between successive microphone detection measurements,
- specified as per the ARIZONA_MICD_TIME_XXX defines.
- - wlf,micd-dbtime : Microphone detection hardware debounces specified as the
- number of measurements to take, valid values being 2 and 4.
- - wlf,micd-timeout-ms : Timeout for microphone detection, specified in
- milliseconds.
- - wlf,micd-force-micbias : Force MICBIAS continuously on during microphone
- detection.
- - wlf,micd-configs : Headset polarity configurations (generally used for
- detection of CTIA / OMTP headsets), the field can be of variable length
- but should always be a multiple of 3 cells long, each three cell group
- represents one polarity configuration.
- The first cell defines the accessory detection pin, zero will use MICDET1
- and all other values will use MICDET2.
- The second cell represents the MICBIAS to be used.
- The third cell represents the value of the micd-pol-gpio pin.
-
- - wlf,gpsw : Settings for the general purpose switch, set as one of the
- ARIZONA_GPSW_XXX defines.
-
-Example:
-
-codec: wm8280@0 {
- compatible = "wlf,wm8280";
- reg = <0>;
- ...
-
- wlf,use-jd2;
- wlf,use-jd2-nopull;
- wlf,jd-invert;
-
- wlf,micd-software-compare;
- wlf,micd-detect-debounce = <0>;
- wlf,micd-pol-gpio = <&codec 2 0>;
- wlf,micd-rate = <ARIZONA_MICD_TIME_8MS>;
- wlf,micd-dbtime = <4>;
- wlf,micd-timeout-ms = <100>;
- wlf,micd-force-micbias;
- wlf,micd-configs = <
- 0 1 0 /* MICDET1 MICBIAS1 GPIO=low */
- 1 2 1 /* MICDET2 MICBIAS2 GPIO=high */
- >;
-
- wlf,gpsw = <ARIZONA_GPSW_OPEN>;
-};
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml b/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml
index 9c5849b341ea..20e1ccfc8630 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml
+++ b/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml
@@ -22,8 +22,7 @@ properties:
const: google,extcon-usbc-cros-ec
google,usb-port-id:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
description: the port id
minimum: 0
maximum: 255
diff --git a/Documentation/devicetree/bindings/extcon/wlf,arizona.yaml b/Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
new file mode 100644
index 000000000000..f9845dc2f5ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/extcon/wlf,arizona.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic/Wolfson Microelectronics Arizona class audio SoCs
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ These devices are audio SoCs with extensive digital capabilities and a
+ range of analogue I/O.
+
+ This document lists Extcon specific bindings, see the primary binding
+ document ../mfd/arizona.yaml
+
+properties:
+ wlf,hpdet-channel:
+ description:
+ Headphone detection channel. ARIZONA_ACCDET_MODE_HPL/1 sets the
+ headphone detect mode to HPDETL, ARIZONA_ACCDET_MODE_HPR/2 sets it
+ to HPDETR. If this node is not included or if the value is unknown,
+ then headphone detection mode is set to HPDETL.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 1
+ maximum: 2
+
+ wlf,use-jd2:
+ description:
+ Use the additional JD input along with JD1 for dual pin jack detection.
+ type: boolean
+
+ wlf,use-jd2-nopull:
+ description:
+ Internal pull on JD2 is disabled when used for jack detection.
+ type: boolean
+
+ wlf,jd-invert:
+ description:
+ Invert the polarity of the jack detection switch.
+ type: boolean
+
+ wlf,micd-software-compare:
+ description:
+ Use a software comparison to determine mic presence.
+ type: boolean
+
+ wlf,micd-detect-debounce:
+ description:
+ Additional software microphone detection debounce specified in
+ milliseconds.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+
+ wlf,micd-pol-gpio:
+ description:
+ GPIO specifier for the GPIO controlling the headset polarity if one
+ exists.
+ maxItems: 1
+
+ wlf,micd-bias-start-time:
+ description:
+ Time allowed for MICBIAS to startup prior to performing microphone
+ detection, specified as per the ARIZONA_MICD_TIME_XXX defines.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 12
+
+ wlf,micd-rate:
+ description:
+ Delay between successive microphone detection measurements, specified
+ as per the ARIZONA_MICD_TIME_XXX defines.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 12
+
+ wlf,micd-dbtime:
+ description:
+ Microphone detection hardware debounces specified as the number of
+ measurements to take.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ enum: [2, 4]
+
+ wlf,micd-timeout-ms:
+ description:
+ Timeout for microphone detection, specified in milliseconds.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+
+ wlf,micd-force-micbias:
+ description:
+ Force MICBIAS continuously on during microphone detection.
+ type: boolean
+
+ wlf,micd-configs:
+ description:
+ Headset polarity configurations (generally used for detection of
+ CTIA / OMTP headsets), the field can be of variable length but
+ should always be a multiple of 3 cells long, each three cell group
+ represents one polarity configuration.
+ $ref: "/schemas/types.yaml#/definitions/uint32-matrix"
+ items:
+ items:
+ - description:
+ The first cell defines the accessory detection pin, zero
+ will use MICDET1 and 0x2000 will use MICDET2.
+ enum: [ 0, 0x2000 ]
+ - description:
+ The second cell represents the MICBIAS to be used. Zero
+ will use MICVDD, 1-3 will use MICBIASx.
+ minimum: 0
+ maximum: 3
+ - description:
+ The third cell represents the value of the micd-pol-gpio
+ pin.
+ minimum: 0
+ maximum: 1
+
+ wlf,gpsw:
+ description:
+ Settings for the general purpose switch, set as one of the
+ ARIZONA_GPSW_XXX defines.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 3
diff --git a/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt b/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt
index 1fa66065acc6..6eff1afd8daf 100644
--- a/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt
+++ b/Documentation/devicetree/bindings/firmware/intel,stratix10-svc.txt
@@ -23,7 +23,7 @@ Required properties:
The svc node has the following mandatory properties, must be located under
the firmware node.
-- compatible: "intel,stratix10-svc"
+- compatible: "intel,stratix10-svc" or "intel,agilex-svc"
- method: smc or hvc
smc - Secure Monitor Call
hvc - Hypervisor Call
diff --git a/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt b/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt
index 6e03f79287fb..0f874137ca46 100644
--- a/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt
+++ b/Documentation/devicetree/bindings/fpga/intel-stratix10-soc-fpga-mgr.txt
@@ -4,7 +4,8 @@ Required properties:
The fpga_mgr node has the following mandatory property, must be located under
firmware/svc node.
-- compatible : should contain "intel,stratix10-soc-fpga-mgr"
+- compatible : should contain "intel,stratix10-soc-fpga-mgr" or
+ "intel,agilex-soc-fpga-mgr"
Example:
diff --git a/Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml b/Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml
index 5f1ed20e43ee..4f2cbd8307a7 100644
--- a/Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/brcm,xgs-iproc-gpio.yaml
@@ -27,7 +27,7 @@ properties:
gpio-controller: true
'#gpio-cells':
- const: 2
+ const: 2
ngpios:
minimum: 0
diff --git a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
deleted file mode 100644
index b4cd9f906c24..000000000000
--- a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-* Freescale i.MX/MXC GPIO controller
-
-Required properties:
-- compatible : Should be "fsl,<soc>-gpio"
-- reg : Address and length of the register set for the device
-- interrupts : Should be the port interrupt shared by all 32 pins, if
- one number. If two numbers, the first one is the interrupt shared
- by low 16 pins and the second one is for high 16 pins.
-- gpio-controller : Marks the device node as a gpio controller.
-- #gpio-cells : Should be two. The first cell is the pin number and
- the second cell is used to specify the gpio polarity:
- 0 = active high
- 1 = active low
-- interrupt-controller: Marks the device node as an interrupt controller.
-- #interrupt-cells : Should be 2. The first cell is the GPIO number.
- The second cell bits[3:0] is used to specify trigger type and level flags:
- 1 = low-to-high edge triggered.
- 2 = high-to-low edge triggered.
- 4 = active high level-sensitive.
- 8 = active low level-sensitive.
-
-Optional properties:
-- clocks: the clock for clocking the GPIO silicon
-
-Example:
-
-gpio0: gpio@73f84000 {
- compatible = "fsl,imx51-gpio", "fsl,imx35-gpio";
- reg = <0x73f84000 0x4000>;
- interrupts = <50 51>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
-};
diff --git a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml
new file mode 100644
index 000000000000..0b223abe8cfb
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/fsl-imx-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX/MXC GPIO controller
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx1-gpio
+ - fsl,imx21-gpio
+ - fsl,imx31-gpio
+ - fsl,imx35-gpio
+ - fsl,imx7d-gpio
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: |
+ Should be the port interrupt shared by all 32 pins, if one number.
+ If two numbers, the first one is the interrupt shared by low 16 pins
+ and the second one is for high 16 pins.
+ minItems: 1
+ maxItems: 2
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ clocks:
+ maxItems: 1
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-controller: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - "#interrupt-cells"
+ - "#gpio-cells"
+ - gpio-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ gpio0: gpio@73f84000 {
+ compatible = "fsl,imx35-gpio";
+ reg = <0x73f84000 0x4000>;
+ interrupts = <50 51>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mxs.txt b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
deleted file mode 100644
index 1e677a47b836..000000000000
--- a/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-* Freescale MXS GPIO controller
-
-The Freescale MXS GPIO controller is part of MXS PIN controller. The
-GPIOs are organized in port/bank. Each port consists of 32 GPIOs.
-
-As the GPIO controller is embedded in the PIN controller and all the
-GPIO ports share the same IO space with PIN controller, the GPIO node
-will be represented as sub-nodes of MXS pinctrl node.
-
-Required properties for GPIO node:
-- compatible : Should be "fsl,<soc>-gpio". The supported SoCs include
- imx23 and imx28.
-- interrupts : Should be the port interrupt shared by all 32 pins.
-- gpio-controller : Marks the device node as a gpio controller.
-- #gpio-cells : Should be two. The first cell is the pin number and
- the second cell is used to specify the gpio polarity:
- 0 = active high
- 1 = active low
-- interrupt-controller: Marks the device node as an interrupt controller.
-- #interrupt-cells : Should be 2. The first cell is the GPIO number.
- The second cell bits[3:0] is used to specify trigger type and level flags:
- 1 = low-to-high edge triggered.
- 2 = high-to-low edge triggered.
- 4 = active high level-sensitive.
- 8 = active low level-sensitive.
-
-Note: Each GPIO port should have an alias correctly numbered in "aliases"
-node.
-
-Examples:
-
-aliases {
- gpio0 = &gpio0;
- gpio1 = &gpio1;
- gpio2 = &gpio2;
- gpio3 = &gpio3;
- gpio4 = &gpio4;
-};
-
-pinctrl@80018000 {
- compatible = "fsl,imx28-pinctrl", "simple-bus";
- reg = <0x80018000 2000>;
-
- gpio0: gpio@0 {
- compatible = "fsl,imx28-gpio";
- interrupts = <127>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio1: gpio@1 {
- compatible = "fsl,imx28-gpio";
- interrupts = <126>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio2: gpio@2 {
- compatible = "fsl,imx28-gpio";
- interrupts = <125>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio3: gpio@3 {
- compatible = "fsl,imx28-gpio";
- interrupts = <124>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpio4: gpio@4 {
- compatible = "fsl,imx28-gpio";
- interrupts = <123>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mxs.yaml b/Documentation/devicetree/bindings/gpio/gpio-mxs.yaml
new file mode 100644
index 000000000000..ccf5b50e798b
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mxs.yaml
@@ -0,0 +1,136 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/gpio-mxs.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale MXS GPIO controller
+
+maintainers:
+ - Shawn Guo <shawn.guo@linaro.org>
+ - Anson Huang <Anson.Huang@nxp.com>
+
+description: |
+ The Freescale MXS GPIO controller is part of MXS PIN controller.
+ The GPIOs are organized in port/bank, each port consists of 32 GPIOs.
+ As the GPIO controller is embedded in the PIN controller and all the
+ GPIO ports share the same IO space with PIN controller, the GPIO node
+ will be represented as sub-nodes of MXS pinctrl node.
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx23-pinctrl
+ - fsl,imx28-pinctrl
+
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ "gpio@[0-9]+$":
+ type: object
+ properties:
+ compatible:
+ enum:
+ - fsl,imx23-gpio
+ - fsl,imx28-gpio
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: Should be the port interrupt shared by all 32 pins.
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-controller: true
+
+ required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - "#interrupt-cells"
+ - "#gpio-cells"
+ - gpio-controller
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - '#address-cells'
+ - '#size-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ pinctrl@80018000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx28-pinctrl";
+ reg = <0x80018000 0x2000>;
+
+ gpio@0 {
+ compatible = "fsl,imx28-gpio";
+ reg = <0>;
+ interrupts = <127>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio@1 {
+ compatible = "fsl,imx28-gpio";
+ reg = <1>;
+ interrupts = <126>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio@2 {
+ compatible = "fsl,imx28-gpio";
+ reg = <2>;
+ interrupts = <125>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio@3 {
+ compatible = "fsl,imx28-gpio";
+ reg = <3>;
+ interrupts = <124>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio@4 {
+ compatible = "fsl,imx28-gpio";
+ reg = <4>;
+ interrupts = <123>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/gpio/renesas,em-gio.yaml b/Documentation/devicetree/bindings/gpio/renesas,em-gio.yaml
new file mode 100644
index 000000000000..8bdef812c87c
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/renesas,em-gio.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/renesas,em-gio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas EMMA Mobile General Purpose I/O Interface
+
+maintainers:
+ - Magnus Damm <magnus.damm@gmail.com>
+
+properties:
+ compatible:
+ const: renesas,em-gio
+
+ reg:
+ items:
+ - description: First set of contiguous registers
+ - description: Second set of contiguous registers
+
+ interrupts:
+ items:
+ - description: Interrupt for the first set of 16 GPIO ports
+ - description: Interrupt for the second set of 16 GPIO ports
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ gpio-ranges:
+ maxItems: 1
+
+ ngpios:
+ minimum: 1
+ maximum: 32
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+ - ngpios
+ - interrupt-controller
+ - '#interrupt-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ gpio0: gpio@e0050000 {
+ compatible = "renesas,em-gio";
+ reg = <0xe0050000 0x2c>, <0xe0050040 0x20>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 0 32>;
+ ngpios = <32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
deleted file mode 100644
index 10dce84b1545..000000000000
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-* Renesas R-Car GPIO Controller
-
-Required Properties:
-
- - compatible: should contain one or more of the following:
- - "renesas,gpio-r8a7743": for R8A7743 (RZ/G1M) compatible GPIO controller.
- - "renesas,gpio-r8a7744": for R8A7744 (RZ/G1N) compatible GPIO controller.
- - "renesas,gpio-r8a7745": for R8A7745 (RZ/G1E) compatible GPIO controller.
- - "renesas,gpio-r8a77470": for R8A77470 (RZ/G1C) compatible GPIO controller.
- - "renesas,gpio-r8a774a1": for R8A774A1 (RZ/G2M) compatible GPIO controller.
- - "renesas,gpio-r8a774b1": for R8A774B1 (RZ/G2N) compatible GPIO controller.
- - "renesas,gpio-r8a774c0": for R8A774C0 (RZ/G2E) compatible GPIO controller.
- - "renesas,gpio-r8a7778": for R8A7778 (R-Car M1) compatible GPIO controller.
- - "renesas,gpio-r8a7779": for R8A7779 (R-Car H1) compatible GPIO controller.
- - "renesas,gpio-r8a7790": for R8A7790 (R-Car H2) compatible GPIO controller.
- - "renesas,gpio-r8a7791": for R8A7791 (R-Car M2-W) compatible GPIO controller.
- - "renesas,gpio-r8a7792": for R8A7792 (R-Car V2H) compatible GPIO controller.
- - "renesas,gpio-r8a7793": for R8A7793 (R-Car M2-N) compatible GPIO controller.
- - "renesas,gpio-r8a7794": for R8A7794 (R-Car E2) compatible GPIO controller.
- - "renesas,gpio-r8a7795": for R8A7795 (R-Car H3) compatible GPIO controller.
- - "renesas,gpio-r8a7796": for R8A77960 (R-Car M3-W) compatible GPIO controller.
- - "renesas,gpio-r8a77961": for R8A77961 (R-Car M3-W+) compatible GPIO controller.
- - "renesas,gpio-r8a77965": for R8A77965 (R-Car M3-N) compatible GPIO controller.
- - "renesas,gpio-r8a77970": for R8A77970 (R-Car V3M) compatible GPIO controller.
- - "renesas,gpio-r8a77980": for R8A77980 (R-Car V3H) compatible GPIO controller.
- - "renesas,gpio-r8a77990": for R8A77990 (R-Car E3) compatible GPIO controller.
- - "renesas,gpio-r8a77995": for R8A77995 (R-Car D3) compatible GPIO controller.
- - "renesas,rcar-gen1-gpio": for a generic R-Car Gen1 GPIO controller.
- - "renesas,rcar-gen2-gpio": for a generic R-Car Gen2 or RZ/G1 GPIO controller.
- - "renesas,rcar-gen3-gpio": for a generic R-Car Gen3 or RZ/G2 GPIO controller.
- - "renesas,gpio-rcar": deprecated.
-
- When compatible with the generic version nodes must list the
- SoC-specific version corresponding to the platform first followed by
- the generic version.
-
- - reg: Base address and length of each memory resource used by the GPIO
- controller hardware module.
-
- - interrupts: Interrupt specifier for the controllers interrupt.
-
- - gpio-controller: Marks the device node as a gpio controller.
- - #gpio-cells: Should be 2. The first cell is the GPIO number and the second
- cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>. Only the
- GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
- - gpio-ranges: See gpio.txt.
-
-Optional properties:
-
- - clocks: Must contain a reference to the functional clock. The property is
- mandatory if the hardware implements a controllable functional clock for
- the GPIO instance.
-
- - gpio-reserved-ranges: See gpio.txt.
-
-Please refer to gpio.txt in this directory for the common GPIO bindings used by
-client devices.
-
-The GPIO controller also acts as an interrupt controller. It uses the default
-two cells specifier as described in Documentation/devicetree/bindings/
-interrupt-controller/interrupts.txt.
-
-Example: R8A77470 (RZ/G1C) GPIO controller nodes
-
- gpio0: gpio@e6050000 {
- compatible = "renesas,gpio-r8a77470",
- "renesas,rcar-gen2-gpio";
- reg = <0 0xe6050000 0 0x50>;
- interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
- #gpio-cells = <2>;
- gpio-controller;
- gpio-ranges = <&pfc 0 0 23>;
- #interrupt-cells = <2>;
- interrupt-controller;
- clocks = <&cpg CPG_MOD 912>;
- power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
- resets = <&cpg 912>;
- };
- ...
- gpio3: gpio@e6053000 {
- compatible = "renesas,gpio-r8a77470",
- "renesas,rcar-gen2-gpio";
- reg = <0 0xe6053000 0 0x50>;
- interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- #gpio-cells = <2>;
- gpio-controller;
- gpio-ranges = <&pfc 0 96 30>;
- gpio-reserved-ranges = <17 10>;
- #interrupt-cells = <2>;
- interrupt-controller;
- clocks = <&cpg CPG_MOD 909>;
- power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
- resets = <&cpg 909>;
- };
diff --git a/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml b/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
new file mode 100644
index 000000000000..397d9383d15a
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/renesas,rcar-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car General-Purpose Input/Output Ports (GPIO)
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,gpio-r8a7778 # R-Car M1
+ - renesas,gpio-r8a7779 # R-Car H1
+ - const: renesas,rcar-gen1-gpio # R-Car Gen1
+
+ - items:
+ - enum:
+ - renesas,gpio-r8a7742 # RZ/G1H
+ - renesas,gpio-r8a7743 # RZ/G1M
+ - renesas,gpio-r8a7744 # RZ/G1N
+ - renesas,gpio-r8a7745 # RZ/G1E
+ - renesas,gpio-r8a77470 # RZ/G1C
+ - renesas,gpio-r8a7790 # R-Car H2
+ - renesas,gpio-r8a7791 # R-Car M2-W
+ - renesas,gpio-r8a7792 # R-Car V2H
+ - renesas,gpio-r8a7793 # R-Car M2-N
+ - renesas,gpio-r8a7794 # R-Car E2
+ - const: renesas,rcar-gen2-gpio # R-Car Gen2 or RZ/G1
+
+ - items:
+ - enum:
+ - renesas,gpio-r8a774a1 # RZ/G2M
+ - renesas,gpio-r8a774b1 # RZ/G2N
+ - renesas,gpio-r8a774c0 # RZ/G2E
+ - renesas,gpio-r8a7795 # R-Car H3
+ - renesas,gpio-r8a7796 # R-Car M3-W
+ - renesas,gpio-r8a77961 # R-Car M3-W+
+ - renesas,gpio-r8a77965 # R-Car M3-N
+ - renesas,gpio-r8a77970 # R-Car V3M
+ - renesas,gpio-r8a77980 # R-Car V3H
+ - renesas,gpio-r8a77990 # R-Car E3
+ - renesas,gpio-r8a77995 # R-Car D3
+ - const: renesas,rcar-gen3-gpio # R-Car Gen3 or RZ/G2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ gpio-ranges:
+ maxItems: 1
+
+ gpio-reserved-ranges:
+ minItems: 1
+ maxItems: 8
+
+patternProperties:
+ "^.*$":
+ if:
+ type: object
+ then:
+ properties:
+ gpio-hog: true
+ gpios: true
+ input: true
+ output-high: true
+ output-low: true
+ line-name: true
+
+ required:
+ - gpio-hog
+ - gpios
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+ - interrupt-controller
+ - '#interrupt-cells'
+
+if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen1-gpio
+then:
+ required:
+ - clocks
+ - power-domains
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a77470-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a77470-sysc.h>
+ gpio3: gpio@e6053000 {
+ compatible = "renesas,gpio-r8a77470", "renesas,rcar-gen2-gpio";
+ reg = <0xe6053000 0x50>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 909>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 909>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pfc 0 96 30>;
+ gpio-reserved-ranges = <17 10>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
index 418e8381e07c..a0efd8dc2538 100644
--- a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
@@ -57,7 +57,7 @@ examples:
compatible = "sifive,fu540-c000-gpio", "sifive,gpio0";
interrupt-parent = <&plic>;
interrupts = <7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22>;
- reg = <0x0 0x10060000 0x0 0x1000>;
+ reg = <0x10060000 0x1000>;
clocks = <&tlclk PRCI_CLK_TLCLK>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml b/Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml
new file mode 100644
index 000000000000..04a3c51e1dc1
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml
@@ -0,0 +1,134 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/snps,dw-apb-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare APB GPIO controller
+
+description: |
+ Synopsys DesignWare GPIO controllers have a configurable number of ports,
+ each of which are intended to be represented as child nodes with the generic
+ GPIO-controller properties as desribed in this bindings file.
+
+maintainers:
+ - Hoan Tran <hoan@os.amperecomputing.com>
+ - Serge Semin <fancer.lancer@gmail.com>
+
+properties:
+ $nodename:
+ pattern: "^gpio@[0-9a-f]+$"
+
+ compatible:
+ const: snps,dw-apb-gpio
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ items:
+ - description: APB interface clock source
+ - description: DW GPIO debounce reference clock source
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: bus
+ - const: db
+
+ resets:
+ maxItems: 1
+
+patternProperties:
+ "^gpio-(port|controller)@[0-9a-f]+$":
+ type: object
+ properties:
+ compatible:
+ const: snps,dw-apb-gpio-port
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ snps,nr-gpios:
+ description: The number of GPIO pins exported by the port.
+ default: 32
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 1
+ maximum: 32
+
+ interrupts:
+ description: |
+ The interrupts to the parent controller raised when GPIOs generate
+ the interrupts. If the controller provides one combined interrupt
+ for all GPIOs, specify a single interrupt. If the controller provides
+ one interrupt for each GPIO, provide a list of interrupts that
+ correspond to each of the GPIO pins.
+ minItems: 1
+ maxItems: 32
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ required:
+ - compatible
+ - reg
+ - gpio-controller
+ - '#gpio-cells'
+
+ dependencies:
+ interrupt-controller: [ interrupts ]
+
+ additionalProperties: false
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+examples:
+ - |
+ gpio: gpio@20000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x20000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ porta: gpio-port@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <8>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&vic1>;
+ interrupts = <0>;
+ };
+
+ portb: gpio-port@1 {
+ compatible = "snps,dw-apb-gpio-port";
+ reg = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <8>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt b/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
deleted file mode 100644
index 839dd32ffe11..000000000000
--- a/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-* Synopsys DesignWare APB GPIO controller
-
-Required properties:
-- compatible : Should contain "snps,dw-apb-gpio"
-- reg : Address and length of the register set for the device.
-- #address-cells : should be 1 (for addressing port subnodes).
-- #size-cells : should be 0 (port subnodes).
-
-The GPIO controller has a configurable number of ports, each of which are
-represented as child nodes with the following properties:
-
-Required properties:
-- compatible : "snps,dw-apb-gpio-port"
-- gpio-controller : Marks the device node as a gpio controller.
-- #gpio-cells : Should be two. The first cell is the pin number and
- the second cell is used to specify the gpio polarity:
- 0 = active high
- 1 = active low
-- reg : The integer port index of the port, a single cell.
-
-Optional properties:
-- interrupt-controller : The first port may be configured to be an interrupt
-controller.
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt. Shall be set to 2. The first cell defines the interrupt number,
- the second encodes the triger flags encoded as described in
- Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-- interrupts : The interrupts to the parent controller raised when GPIOs
- generate the interrupts. If the controller provides one combined interrupt
- for all GPIOs, specify a single interrupt. If the controller provides one
- interrupt for each GPIO, provide a list of interrupts that correspond to each
- of the GPIO pins. When specifying multiple interrupts, if any are unconnected,
- use the interrupts-extended property to specify the interrupts and set the
- interrupt controller handle for unused interrupts to 0.
-- snps,nr-gpios : The number of pins in the port, a single cell.
-- resets : Reset line for the controller.
-
-Example:
-
-gpio: gpio@20000 {
- compatible = "snps,dw-apb-gpio";
- reg = <0x20000 0x1000>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- porta: gpio@0 {
- compatible = "snps,dw-apb-gpio-port";
- gpio-controller;
- #gpio-cells = <2>;
- snps,nr-gpios = <8>;
- reg = <0>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupt-parent = <&vic1>;
- interrupts = <0>;
- };
-
- portb: gpio@1 {
- compatible = "snps,dw-apb-gpio-port";
- gpio-controller;
- #gpio-cells = <2>;
- snps,nr-gpios = <8>;
- reg = <1>;
- };
-};
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index 0b229a7d4a98..b1844b9c295d 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -43,9 +43,15 @@ properties:
operating-points-v2: true
+ power-domains:
+ maxItems: 1
+
resets:
maxItems: 2
+ "#cooling-cells":
+ const: 2
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
index 0407e45eb8c4..80d519a76db2 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
@@ -16,33 +16,33 @@ properties:
oneOf:
- items:
- enum:
- - samsung,exynos5250-mali
+ - samsung,exynos5250-mali
- const: arm,mali-t604
- items:
- enum:
- - samsung,exynos5420-mali
+ - samsung,exynos5420-mali
- const: arm,mali-t628
- items:
- enum:
- - allwinner,sun50i-h6-mali
+ - allwinner,sun50i-h6-mali
- const: arm,mali-t720
- items:
- enum:
- - amlogic,meson-gxm-mali
- - realtek,rtd1295-mali
+ - amlogic,meson-gxm-mali
+ - realtek,rtd1295-mali
- const: arm,mali-t820
- items:
- enum:
- - arm,juno-mali
+ - arm,juno-mali
- const: arm,mali-t624
- items:
- enum:
- - rockchip,rk3288-mali
- - samsung,exynos5433-mali
+ - rockchip,rk3288-mali
+ - samsung,exynos5433-mali
- const: arm,mali-t760
- items:
- enum:
- - rockchip,rk3399-mali
+ - rockchip,rk3399-mali
- const: arm,mali-t860
# "arm,mali-t830"
@@ -87,6 +87,8 @@ properties:
"#cooling-cells":
const: 2
+ dma-coherent: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
index f5401cc8de4a..6226d31ec4b7 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
@@ -41,6 +41,7 @@ properties:
- amlogic,meson-gxbb-mali
- amlogic,meson-gxl-mali
- hisilicon,hi6220-mali
+ - mediatek,mt7623-mali
- rockchip,rk3328-mali
- const: arm,mali-450
@@ -107,6 +108,9 @@ properties:
operating-points-v2: true
+ "#cooling-cells":
+ const: 2
+
required:
- compatible
- reg
@@ -130,6 +134,7 @@ allOf:
- amlogic,meson8-mali
- amlogic,meson8b-mali
- hisilicon,hi6220-mali
+ - mediatek,mt7623-mali
- rockchip,rk3036-mali
- rockchip,rk3066-mali
- rockchip,rk3188-mali
@@ -164,6 +169,7 @@ examples:
clocks = <&ccu 1>, <&ccu 2>;
clock-names = "bus", "core";
resets = <&ccu 1>;
+ #cooling-cells = <2>;
};
...
diff --git a/Documentation/devicetree/bindings/gpu/vivante,gc.yaml b/Documentation/devicetree/bindings/gpu/vivante,gc.yaml
index 0bc4b38d5cbb..e1ac6ff5a230 100644
--- a/Documentation/devicetree/bindings/gpu/vivante,gc.yaml
+++ b/Documentation/devicetree/bindings/gpu/vivante,gc.yaml
@@ -9,7 +9,7 @@ title: Vivante GPU Bindings
description: Vivante GPU core devices
maintainers:
- - Lucas Stach <l.stach@pengutronix.de>
+ - Lucas Stach <l.stach@pengutronix.de>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/hwmon/adi,axi-fan-control.yaml b/Documentation/devicetree/bindings/hwmon/adi,axi-fan-control.yaml
index 7db78767c02d..af35b77053df 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,axi-fan-control.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,axi-fan-control.yaml
@@ -34,8 +34,7 @@ properties:
description:
Value specifying the number of pulses per revolution of the controlled
FAN.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
enum: [1, 2, 4]
required:
diff --git a/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
index 44a63fffb4be..eef614962b10 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
@@ -38,20 +38,18 @@ properties:
the accumulated values, this entry can also have two items which sets
energy1/charge1 and energy2/charger2 respectively. Check table 12 of the
datasheet for more information on the supported options.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 2
- maxItems: 2
- items:
- enum: [0, 1, 2, 3]
- default: 0
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 2
+ items:
+ enum: [0, 1, 2, 3]
+ default: 0
adi,accumulation-deadband-microamp:
description:
This property controls the Accumulation Dead band which allows to set the
level of current below which no accumulation takes place.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
maximum: 255
default: 0
@@ -61,8 +59,7 @@ properties:
active high, setting it to zero makets it active low. When this property
is present, the GPIO is automatically configured as output and set to
control a fan as a function of measured temperature.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1]
default: 0
@@ -74,13 +71,12 @@ properties:
registers. Check table 13 of the datasheet for more information on the
supported options. This property cannot be used together with
adi,gpio-out-pol.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 2
- maxItems: 2
- items:
- enum: [0, 1, 2]
- default: 0
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 2
+ items:
+ enum: [0, 1, 2]
+ default: 0
required:
- compatible
diff --git a/Documentation/devicetree/bindings/hwmon/adt7475.yaml b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
index 46c441574f98..dfa821c0aacc 100644
--- a/Documentation/devicetree/bindings/hwmon/adt7475.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adt7475.yaml
@@ -46,22 +46,20 @@ patternProperties:
set to 1 the attenuator is bypassed if set to 0 the attenuator is
not bypassed. If the property is absent then the attenuator
retains it's configuration from the bios/bootloader.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [0, 1]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
"^adi,pwm-active-state$":
description: |
Integer array, represents the active state of the pwm outputs If set to 0
the pwm uses a logic low output for 100% duty cycle. If set to 1 the pwm
uses a logic high output for 100% duty cycle.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 3
- maxItems: 3
- items:
- enum: [0, 1]
- default: 1
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 3
+ maxItems: 3
+ items:
+ enum: [0, 1]
+ default: 1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
deleted file mode 100644
index ffb79ccf51ee..000000000000
--- a/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-Cirrus Logic Lochnagar Audio Development Board
-
-Lochnagar is an evaluation and development board for Cirrus Logic
-Smart CODEC and Amp devices. It allows the connection of most Cirrus
-Logic devices on mini-cards, as well as allowing connection of
-various application processor systems to provide a full evaluation
-platform. Audio system topology, clocking and power can all be
-controlled through the Lochnagar, allowing the device under test
-to be used in a variety of possible use cases.
-
-This binding document describes the binding for the hardware monitor
-portion of the driver.
-
-This binding must be part of the Lochnagar MFD binding:
- [4] ../mfd/cirrus,lochnagar.txt
-
-Required properties:
-
- - compatible : One of the following strings:
- "cirrus,lochnagar2-hwmon"
-
-Example:
-
-lochnagar-hwmon {
- compatible = "cirrus,lochnagar2-hwmon";
-};
diff --git a/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.yaml b/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.yaml
new file mode 100644
index 000000000000..cc00b97a7dac
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/cirrus,lochnagar.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic Lochnagar Audio Development Board
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ Lochnagar is an evaluation and development board for Cirrus Logic
+ Smart CODEC and Amp devices. It allows the connection of most Cirrus
+ Logic devices on mini-cards, as well as allowing connection of various
+ application processor systems to provide a full evaluation platform.
+ Audio system topology, clocking and power can all be controlled through
+ the Lochnagar, allowing the device under test to be used in a variety of
+ possible use cases.
+
+ This binding document describes the binding for the hardware monitor
+ portion of the driver.
+
+ This binding must be part of the Lochnagar MFD binding:
+ [1] ../mfd/cirrus,lochnagar.yaml
+
+properties:
+ compatible:
+ enum:
+ - cirrus,lochnagar2-hwmon
+
+required:
+ - compatible
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
index 3f043e943668..90b2fa3f7752 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
@@ -45,16 +45,14 @@ properties:
The gain value for the PGA function. This is 8, 4, 2 or 1.
The PGA gain affect the shunt voltage range.
The range will be equal to: pga-gain * 40mV
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
enum: [1, 2, 4, 8]
default: 8
ti,bus-range-microvolt:
description: |
This is the operating range of the bus voltage in microvolt
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
enum: [16000000, 32000000]
default: 32000000
@@ -63,14 +61,13 @@ properties:
Array of three(TMP513) or two(TMP512) n-Factor value for each remote
temperature channel.
See datasheet Table 11 for n-Factor range list and value interpretation.
- allOf:
- - $ref: /schemas/types.yaml#definitions/uint32-array
- - minItems: 2
- maxItems: 3
- items:
- default: 0x00
- minimum: 0x00
- maximum: 0xFF
+ $ref: /schemas/types.yaml#definitions/uint32-array
+ minItems: 2
+ maxItems: 3
+ items:
+ default: 0x00
+ minimum: 0x00
+ maximum: 0xFF
required:
- compatible
diff --git a/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt b/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt
index c9a6587fe4bb..a8a35df41951 100644
--- a/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt
@@ -13,7 +13,7 @@ Recommended properties:
Example:
-i2c@20205000 {
+i2c@7e205000 {
compatible = "brcm,bcm2835-i2c";
reg = <0x7e205000 0x1000>;
interrupts = <2 21>;
diff --git a/Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml b/Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml
new file mode 100644
index 000000000000..dc0952f3780f
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/i2c/cdns,i2c-r1p10.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Cadence I2C controller Device Tree Bindings
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - cdns,i2c-r1p10 # cadence i2c controller version 1.0
+ - cdns,i2c-r1p14 # cadence i2c controller version 1.4
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-frequency:
+ minimum: 1
+ maximum: 400000
+ description: |
+ Desired operating frequency, in Hz, of the bus.
+
+ clock-name:
+ const: pclk
+ description: |
+ Input clock name.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ i2c@e0004000 {
+ compatible = "cdns,i2c-r1p10";
+ clocks = <&clkc 38>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xe0004000 0x1000>;
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-cadence.txt b/Documentation/devicetree/bindings/i2c/i2c-cadence.txt
deleted file mode 100644
index ebaa90c58c8e..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-cadence.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Binding for the Cadence I2C controller
-
-Required properties:
- - reg: Physical base address and size of the controller's register area.
- - compatible: Should contain one of:
- * "cdns,i2c-r1p10"
- Note: Use this when cadence i2c controller version 1.0 is used.
- * "cdns,i2c-r1p14"
- Note: Use this when cadence i2c controller version 1.4 is used.
- - clocks: Input clock specifier. Refer to common clock bindings.
- - interrupts: Interrupt specifier. Refer to interrupt bindings.
- - #address-cells: Should be 1.
- - #size-cells: Should be 0.
-
-Optional properties:
- - clock-frequency: Desired operating frequency, in Hz, of the bus.
- - clock-names: Input clock name, should be 'pclk'.
-
-Example:
- i2c@e0004000 {
- compatible = "cdns,i2c-r1p10";
- clocks = <&clkc 38>;
- interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xe0004000 0x1000>;
- clock-frequency = <400000>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt b/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
deleted file mode 100644
index d229eff5ca1b..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-jz4780.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-* Ingenic JZ4780 I2C Bus controller
-
-Required properties:
-- compatible: should be one of the following:
- - "ingenic,jz4780-i2c" for the JZ4780
- - "ingenic,x1000-i2c" for the X1000
-- reg: Should contain the address & size of the I2C controller registers.
-- interrupts: Should specify the interrupt provided by parent.
-- clocks: Should contain a single clock specifier for the JZ4780 I2C clock.
-- clock-frequency: desired I2C bus clock frequency in Hz.
-
-Recommended properties:
-- pinctrl-names: should be "default";
-- pinctrl-0: phandle to pinctrl function
-
-Example
-
-/ {
- i2c4: i2c4@10054000 {
- compatible = "ingenic,jz4780-i2c";
- reg = <0x10054000 0x1000>;
-
- interrupt-parent = <&intc>;
- interrupts = <56>;
-
- clocks = <&cgu JZ4780_CLK_SMB4>;
- clock-frequency = <100000>;
- pinctrl-names = "default";
- pinctrl-0 = <&pins_i2c4_data>;
-
- };
-};
-
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
index 68f6d73a8b73..88b71c1b32c9 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
@@ -8,6 +8,7 @@ Required properties:
"mediatek,mt2712-i2c": for MediaTek MT2712
"mediatek,mt6577-i2c": for MediaTek MT6577
"mediatek,mt6589-i2c": for MediaTek MT6589
+ "mediatek,mt6797-i2c", "mediatek,mt6577-i2c": for MediaTek MT6797
"mediatek,mt7622-i2c": for MediaTek MT7622
"mediatek,mt7623-i2c", "mediatek,mt6577-i2c": for MediaTek MT7623
"mediatek,mt7629-i2c", "mediatek,mt2712-i2c": for MediaTek MT7629
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
index 61eac76c84c4..790aa7218ee0 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml
@@ -28,14 +28,14 @@ properties:
- const: rockchip,rk3399-i2c
- items:
- enum:
- - rockchip,rk3036-i2c
- - rockchip,rk3368-i2c
+ - rockchip,rk3036-i2c
+ - rockchip,rk3368-i2c
- const: rockchip,rk3288-i2c
- items:
- enum:
- - rockchip,px30-i2c
- - rockchip,rk3308-i2c
- - rockchip,rk3328-i2c
+ - rockchip,px30-i2c
+ - rockchip,rk3308-i2c
+ - rockchip,rk3328-i2c
- const: rockchip,rk3399-i2c
reg:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-xiic.txt b/Documentation/devicetree/bindings/i2c/i2c-xiic.txt
deleted file mode 100644
index caf42e989462..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-xiic.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Xilinx IIC controller:
-
-Required properties:
-- compatible : Must be "xlnx,xps-iic-2.00.a"
-- reg : IIC register location and length
-- interrupts : IIC controller unterrupt
-- #address-cells = <1>
-- #size-cells = <0>
-- clocks: Input clock specifier. Refer to common clock bindings.
-
-Optional properties:
-- Child nodes conforming to i2c bus binding
-- clock-names: Input clock name, should be 'pclk'.
-
-Example:
-
- axi_iic_0: i2c@40800000 {
- compatible = "xlnx,xps-iic-2.00.a";
- clocks = <&clkc 15>;
- interrupts = < 1 2 >;
- reg = < 0x40800000 0x10000 >;
-
- #size-cells = <0>;
- #address-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/i2c/ingenic,i2c.yaml b/Documentation/devicetree/bindings/i2c/ingenic,i2c.yaml
new file mode 100644
index 000000000000..682ed1bbf5c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/ingenic,i2c.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/ingenic,i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs I2C controller devicetree bindings
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ $nodename:
+ pattern: "^i2c@[0-9a-f]+$"
+
+ compatible:
+ enum:
+ - ingenic,jz4780-i2c
+ - ingenic,x1000-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-frequency:
+ enum: [ 100000, 400000 ]
+
+ dmas:
+ items:
+ - description: DMA controller phandle and request line for RX
+ - description: DMA controller phandle and request line for TX
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-frequency
+ - dmas
+ - dma-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4780-cgu.h>
+ #include <dt-bindings/dma/jz4780-dma.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c@10054000 {
+ compatible = "ingenic,jz4780-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10054000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <56>;
+
+ clocks = <&cgu JZ4780_CLK_SMB4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_i2c4_data>;
+
+ dmas = <&dma JZ4780_DMA_SMB4_RX 0xffffffff>,
+ <&dma JZ4780_DMA_SMB4_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ clock-frequency = <400000>;
+
+ rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+
+ interrupt-parent = <&gpf>;
+ interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt
index f64064f8bdc2..18c0de362451 100644
--- a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt
@@ -35,6 +35,12 @@ Required properties:
Due to above changes, Tegra114 I2C driver makes incompatible with
previous hardware driver. Hence, tegra114 I2C controller is compatible
with "nvidia,tegra114-i2c".
+ nvidia,tegra210-i2c-vi: Tegra210 has one I2C controller that is part of the
+ host1x domain and typically used for camera use-cases. This VI I2C
+ controller is mostly compatible with the programming model of the
+ regular I2C controllers with a few exceptions. The I2C registers start
+ at an offset of 0xc00 (instead of 0), registers are 16 bytes apart
+ (rather than 4) and the controller does not support slave mode.
- reg: Should contain I2C controller registers physical address and length.
- interrupts: Should contain I2C controller interrupts.
- address-cells: Address cells for I2C device address.
diff --git a/Documentation/devicetree/bindings/i2c/renesas,i2c.txt b/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
index c359965d0724..a03f9f5cb378 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/renesas,i2c.txt
@@ -2,6 +2,7 @@ I2C for R-Car platforms
Required properties:
- compatible:
+ "renesas,i2c-r8a7742" if the device is a part of a R8A7742 SoC.
"renesas,i2c-r8a7743" if the device is a part of a R8A7743 SoC.
"renesas,i2c-r8a7744" if the device is a part of a R8A7744 SoC.
"renesas,i2c-r8a7745" if the device is a part of a R8A7745 SoC.
diff --git a/Documentation/devicetree/bindings/i2c/renesas,iic.txt b/Documentation/devicetree/bindings/i2c/renesas,iic.txt
index ffe085c9947e..89facb09337a 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,iic.txt
+++ b/Documentation/devicetree/bindings/i2c/renesas,iic.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible :
- "renesas,iic-r8a73a4" (R-Mobile APE6)
- "renesas,iic-r8a7740" (R-Mobile A1)
+ - "renesas,iic-r8a7742" (RZ/G1H)
- "renesas,iic-r8a7743" (RZ/G1M)
- "renesas,iic-r8a7744" (RZ/G1N)
- "renesas,iic-r8a7745" (RZ/G1E)
diff --git a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
index 900ec1ab6a47..7b3342354bbb 100644
--- a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
@@ -17,6 +17,7 @@ allOf:
contains:
enum:
- st,stm32f7-i2c
+ - st,stm32mp15-i2c
then:
properties:
i2c-scl-rising-time-ns:
@@ -30,11 +31,10 @@ allOf:
Fast Mode Plus speed is selected by slave.
Format is phandle to syscfg / register offset within
syscfg / register bitmask for FMP bit.
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/phandle-array"
- - items:
- minItems: 3
- maxItems: 3
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ items:
+ minItems: 3
+ maxItems: 3
- if:
properties:
@@ -52,6 +52,7 @@ properties:
enum:
- st,stm32f4-i2c
- st,stm32f7-i2c
+ - st,stm32mp15-i2c
reg:
maxItems: 1
@@ -121,12 +122,12 @@ examples:
clocks = <&rcc 1 CLK_I2C1>;
};
- //Example 3 (with st,stm32f7-i2c compatible on stm32mp)
+ //Example 3 (with st,stm32mp15-i2c compatible on stm32mp)
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/stm32mp1-clks.h>
#include <dt-bindings/reset/stm32mp1-resets.h>
i2c@40013000 {
- compatible = "st,stm32f7-i2c";
+ compatible = "st,stm32mp15-i2c";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x40013000 0x400>;
diff --git a/Documentation/devicetree/bindings/i2c/xlnx,xps-iic-2.00.a.yaml b/Documentation/devicetree/bindings/i2c/xlnx,xps-iic-2.00.a.yaml
new file mode 100644
index 000000000000..67c1c84ba3dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/xlnx,xps-iic-2.00.a.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/i2c/xlnx,xps-iic-2.00.a.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: ilinx IIC controller Device Tree Bindings
+
+maintainers:
+ - info@mocean-labs.com
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ const: xlnx,xps-iic-2.00.a
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+
+ clock-name:
+ const: pclk
+ description: |
+ Input clock name.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ axi_iic_0: i2c@40800000 {
+ compatible = "xlnx,xps-iic-2.00.a";
+ clocks = <&clkc 15>;
+ interrupts = < 1 2 >;
+ reg = < 0x40800000 0x10000 >;
+
+ #size-cells = <0>;
+ #address-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/accel/bma180.txt b/Documentation/devicetree/bindings/iio/accel/bma180.txt
index f53237270b32..33da4a6fdb39 100644
--- a/Documentation/devicetree/bindings/iio/accel/bma180.txt
+++ b/Documentation/devicetree/bindings/iio/accel/bma180.txt
@@ -1,15 +1,21 @@
-* Bosch BMA180 / BMA25x triaxial acceleration sensor
+* Bosch BMA023 / BMA150/ BMA180 / BMA25x / SMB380 triaxial acceleration sensor
+https://media.digikey.com/pdf/Data%20Sheets/Bosch/BMA150.pdf
http://omapworld.com/BMA180_111_1002839.pdf
http://ae-bst.resource.bosch.com/media/products/dokumente/bma250/bst-bma250-ds002-05.pdf
Required properties:
- compatible : should be one of:
+ "bosch,bma023"
+ "bosch,bma150"
"bosch,bma180"
"bosch,bma250"
"bosch,bma254"
+ "bosch,smb380"
- reg : the I2C address of the sensor
+ - vdd-supply : regulator phandle connected to the VDD pin
+ - vddio-supply : regulator phandle connected to the VDDIO pin
Optional properties:
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
index f0934b295edc..deb34deff0e8 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
@@ -72,8 +72,8 @@ patternProperties:
The channel number. It can have up to 8 channels on ad7124-4
and 16 channels on ad7124-8, numbered from 0 to 15.
items:
- minimum: 0
- maximum: 15
+ minimum: 0
+ maximum: 15
adi,reference-select:
description: |
@@ -83,9 +83,8 @@ patternProperties:
1: REFIN2(+)/REFIN2(−).
3: AVDD
If this field is left empty, internal reference is selected.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [0, 1, 3]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 3]
diff-channels:
description: see Documentation/devicetree/bindings/iio/adc/adc.txt
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml
new file mode 100644
index 000000000000..c4f57fa6aad1
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad9467.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,ad9467.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD9467 High-Speed ADC
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+ - Alexandru Ardelean <alexandru.ardelean@analog.com>
+
+description: |
+ The AD9467 is a 16-bit, monolithic, IF sampling analog-to-digital
+ converter (ADC).
+
+ https://www.analog.com/media/en/technical-documentation/data-sheets/AD9467.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ad9467
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: adc-clk
+
+ powerdown-gpios:
+ description:
+ Pin that controls the powerdown mode of the device.
+ maxItems: 1
+
+ reset-gpios:
+ description:
+ Reset pin for the device.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "adi,ad9467";
+ reg = <0>;
+ clocks = <&adc_clk>;
+ clock-names = "adc-clk";
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml b/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
new file mode 100644
index 000000000000..0924b2b4972b
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/adi,axi-adc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AXI ADC IP core
+
+maintainers:
+ - Michael Hennerich <michael.hennerich@analog.com>
+ - Alexandru Ardelean <alexandru.ardelean@analog.com>
+
+description: |
+ Analog Devices Generic AXI ADC IP core for interfacing an ADC device
+ with a high speed serial (JESD204B/C) or source synchronous parallel
+ interface (LVDS/CMOS).
+ Usually, some other interface type (i.e SPI) is used as a control
+ interface for the actual ADC, while this IP core will interface
+ to the data-lines of the ADC and handle the streaming of data into
+ memory via DMA.
+
+ https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+
+properties:
+ compatible:
+ enum:
+ - adi,axi-adc-10.0.a
+
+ reg:
+ maxItems: 1
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ items:
+ - const: rx
+
+ adi,adc-dev:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ A reference to a the actual ADC to which this FPGA ADC interfaces to.
+
+required:
+ - compatible
+ - dmas
+ - reg
+ - adi,adc-dev
+
+additionalProperties: false
+
+examples:
+ - |
+ axi-adc@44a00000 {
+ compatible = "adi,axi-adc-10.0.a";
+ reg = <0x44a00000 0x10000>;
+ dmas = <&rx_dma 0>;
+ dma-names = "rx";
+
+ adi,adc-dev = <&spi_adc>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml b/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml
index 118809a03279..6a991e9f78e2 100644
--- a/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/lltc,ltc2496.yaml
@@ -7,9 +7,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Linear Technology / Analog Devices LTC2496 ADC
maintainers:
- - Lars-Peter Clausen <lars@metafoo.de>
- - Michael Hennerich <Michael.Hennerich@analog.com>
- - Stefan Popa <stefan.popa@analog.com>
+ - Lars-Peter Clausen <lars@metafoo.de>
+ - Michael Hennerich <Michael.Hennerich@analog.com>
+ - Stefan Popa <stefan.popa@analog.com>
properties:
compatible:
@@ -18,8 +18,7 @@ properties:
vref-supply:
description: phandle to an external regulator providing the reference voltage
- allOf:
- - $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle
reg:
description: spi chipselect number according to the usual spi bindings
diff --git a/Documentation/devicetree/bindings/iio/adc/maxim,max1241.yaml b/Documentation/devicetree/bindings/iio/adc/maxim,max1241.yaml
new file mode 100644
index 000000000000..f562505f5ecd
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/maxim,max1241.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2020 Alexandru Lazar
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/maxim,max1241.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim MAX1241 12-bit, single-channel analog to digital converter
+
+maintainers:
+ - Alexandru Lazar <alazar@startmail.com>
+
+description: |
+ Bindings for the max1241 12-bit, single-channel ADC device. Datasheet
+ can be found at:
+ https://datasheets.maximintegrated.com/en/ds/MAX1240-MAX1241.pdf
+
+properties:
+ compatible:
+ enum:
+ - maxim,max1241
+
+ reg:
+ maxItems: 1
+
+ vdd-supply:
+ description:
+ Device tree identifier of the regulator that powers the ADC.
+
+ vref-supply:
+ description:
+ Device tree identifier of the regulator that provides the external
+ reference voltage.
+
+ shutdown-gpios:
+ description:
+ GPIO spec for the GPIO pin connected to the ADC's /SHDN pin. If
+ specified, the /SHDN pin will be asserted between conversions,
+ thus enabling power-down mode.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+ - vref-supply
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adc@0 {
+ compatible = "maxim,max1241";
+ reg = <0>;
+ vdd-supply = <&adc_vdd>;
+ vref-supply = <&adc_vref>;
+ spi-max-frequency = <1000000>;
+ shutdown-gpios = <&gpio 26 1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
index 8ffeceb6abae..95ab285f4eba 100644
--- a/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/microchip,mcp3911.yaml
@@ -38,10 +38,9 @@ properties:
microchip,device-addr:
description: Device address when multiple MCP3911 chips are present on the same SPI bus.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [0, 1, 2, 3]
- - default: 0
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ default: 0
vref-supply:
description: |
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
deleted file mode 100644
index c2c50b59873d..000000000000
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-Rockchip Successive Approximation Register (SAR) A/D Converter bindings
-
-Required properties:
-- compatible: should be "rockchip,<name>-saradc" or "rockchip,rk3066-tsadc"
- - "rockchip,saradc": for rk3188, rk3288
- - "rockchip,rk3066-tsadc": for rk3036
- - "rockchip,rk3328-saradc", "rockchip,rk3399-saradc": for rk3328
- - "rockchip,rk3399-saradc": for rk3399
- - "rockchip,rv1108-saradc", "rockchip,rk3399-saradc": for rv1108
-
-- reg: physical base address of the controller and length of memory mapped
- region.
-- interrupts: The interrupt number to the cpu. The interrupt specifier format
- depends on the interrupt controller.
-- clocks: Must contain an entry for each entry in clock-names.
-- clock-names: Shall be "saradc" for the converter-clock, and "apb_pclk" for
- the peripheral clock.
-- vref-supply: The regulator supply ADC reference voltage.
-- #io-channel-cells: Should be 1, see ../iio-bindings.txt
-
-Optional properties:
-- resets: Must contain an entry for each entry in reset-names if need support
- this option. See ../reset/reset.txt for details.
-- reset-names: Must include the name "saradc-apb".
-
-Example:
- saradc: saradc@2006c000 {
- compatible = "rockchip,saradc";
- reg = <0x2006c000 0x100>;
- interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
- clock-names = "saradc", "apb_pclk";
- resets = <&cru SRST_SARADC>;
- reset-names = "saradc-apb";
- #io-channel-cells = <1>;
- vref-supply = <&vcc18>;
- };
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
new file mode 100644
index 000000000000..bcff82a423bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/rockchip-saradc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip Successive Approximation Register (SAR) A/D Converter
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ oneOf:
+ - const: rockchip,saradc
+ - const: rockchip,rk3066-tsadc
+ - const: rockchip,rk3399-saradc
+ - items:
+ - enum:
+ - rockchip,px30-saradc
+ - rockchip,rk3308-saradc
+ - rockchip,rk3328-saradc
+ - rockchip,rv1108-saradc
+ - const: rockchip,rk3399-saradc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: converter clock
+ - description: peripheral clock
+
+ clock-names:
+ items:
+ - const: saradc
+ - const: apb_pclk
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: saradc-apb
+
+ vref-supply:
+ description:
+ The regulator supply for the ADC reference voltage.
+
+ "#io-channel-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - vref-supply
+ - "#io-channel-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3288-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ saradc: saradc@2006c000 {
+ compatible = "rockchip,saradc";
+ reg = <0x2006c000 0x100>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
+ clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_SARADC>;
+ reset-names = "saradc-apb";
+ vref-supply = <&vcc18>;
+ #io-channel-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
index dd8eb15aeb63..28417b31b558 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
@@ -76,8 +76,7 @@ properties:
description:
Phandle to system configuration controller. It can be used to control the
analog circuitry on stm32mp1.
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
interrupt-controller: true
@@ -247,8 +246,7 @@ patternProperties:
Resolution (bits) to use for conversions:
- can be 6, 8, 10 or 12 on stm32f4
- can be 8, 10, 12, 14 or 16 on stm32h7 and stm32mp1
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
st,adc-channels:
description: |
@@ -256,8 +254,7 @@ patternProperties:
- 16 channels, numbered from 0 to 15 (for in0..in15) on stm32f4
- 20 channels, numbered from 0 to 19 (for in0..in19) on stm32h7 and
stm32mp1.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
st,adc-diff-channels:
description: |
@@ -270,18 +267,17 @@ patternProperties:
required. Both properties can be used together. Some channels can be
used as single-ended and some other ones as differential (mixed). But
channels can't be configured both as single-ended and differential.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-matrix
- - items:
- items:
- - description: |
- "vinp" indicates positive input number
- minimum: 0
- maximum: 19
- - description: |
- "vinn" indicates negative input number
- minimum: 0
- maximum: 19
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: |
+ "vinp" indicates positive input number
+ minimum: 0
+ maximum: 19
+ - description: |
+ "vinn" indicates negative input number
+ minimum: 0
+ maximum: 19
st,min-sample-time-nsecs:
description:
@@ -291,8 +287,7 @@ patternProperties:
array that matches "st,adc-channels" and/or "st,adc-diff-channels"
list, to set sample time resp. for all channels, or independently for
each channel.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
allOf:
- if:
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
index b1627441a0b2..d61bc011e820 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
@@ -95,16 +95,14 @@ patternProperties:
On stm32h7 and stm32mp1:
- For st,stm32-dfsdm-adc: up to 8 channels numbered from 0 to 7.
- For st,stm32-dfsdm-dmic: 1 channel numbered from 0 to 7.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - items:
- minimum: 0
- maximum: 7
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ minimum: 0
+ maximum: 7
st,adc-channel-names:
description: List of single-ended channel names.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/string-array
+ $ref: /schemas/types.yaml#/definitions/string-array
st,filter-order:
description: |
@@ -112,11 +110,10 @@ patternProperties:
- 0: FastSinC
- [1-5]: order 1 to 5.
For audio purpose it is recommended to use order 3 to 5.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - items:
- minimum: 0
- maximum: 5
+ $ref: /schemas/types.yaml#/definitions/uint32
+ items:
+ minimum: 0
+ maximum: 5
"#io-channel-cells":
const: 1
@@ -130,8 +127,7 @@ patternProperties:
- "MANCH_F": manchester codec, rising edge = logic 1, falling edge = logic 0
items:
enum: [ SPI_R, SPI_F, MANCH_R, MANCH_F ]
- allOf:
- - $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
st,adc-channel-clk-src:
description: |
@@ -142,8 +138,7 @@ patternProperties:
- "CLKOUT_R": internal SPI clock divided by 2 (rising edge).
items:
enum: [ CLKIN, CLKOUT, CLKOUT_F, CLKOUT_R ]
- allOf:
- - $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
st,adc-alt-channel:
description:
diff --git a/Documentation/devicetree/bindings/iio/chemical/ams,ccs811.yaml b/Documentation/devicetree/bindings/iio/chemical/ams,ccs811.yaml
new file mode 100644
index 000000000000..52341c8bacd9
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/chemical/ams,ccs811.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/chemical/ams,ccs811.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AMS CCS811 VOC Sensor
+
+maintainers:
+ - Narcisa Vasile <narcisaanamaria12@gmail.com>
+
+description: |
+ Ultra-Low Power Digital Gas Sensor for Monitoring Indoor Air Quality.
+
+properties:
+ compatible:
+ enum:
+ - ams,ccs811
+ reg:
+ maxItems: 1
+
+ reset-gpios:
+ description: GPIO connected to the nRESET line. This is an active low
+ input to CCS811.
+ maxItems: 1
+
+ wakeup-gpios:
+ description: GPIO connected to the nWAKE line. This is an active low
+ input to CCS811.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ voc@5b {
+ compatible = "ams,ccs811";
+ reg = <0x5b>;
+ reset-gpios = <&gpioa 11 GPIO_ACTIVE_LOW>;
+ wakeup-gpios = <&gpioa 12 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/iio/chemical/atlas,sensor.yaml b/Documentation/devicetree/bindings/iio/chemical/atlas,sensor.yaml
index edcd2904d50e..69e8931e0ae8 100644
--- a/Documentation/devicetree/bindings/iio/chemical/atlas,sensor.yaml
+++ b/Documentation/devicetree/bindings/iio/chemical/atlas,sensor.yaml
@@ -4,19 +4,21 @@
$id: http://devicetree.org/schemas/iio/chemical/atlas,sensor.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Atlas Scientific OEM sensors
+title: Atlas Scientific OEM + EZO sensors
maintainers:
- Matt Ranostay <matt.ranostay@konsulko.com>
description: |
- Atlas Scientific OEM sensors connected via I2C
+ Atlas Scientific OEM + EZO sensors connected via I2C
Datasheets:
http://www.atlas-scientific.com/_files/_datasheets/_oem/DO_oem_datasheet.pdf
http://www.atlas-scientific.com/_files/_datasheets/_oem/EC_oem_datasheet.pdf
http://www.atlas-scientific.com/_files/_datasheets/_oem/ORP_oem_datasheet.pdf
http://www.atlas-scientific.com/_files/_datasheets/_oem/pH_oem_datasheet.pdf
+ http://www.atlas-scientific.com/_files/_datasheets/_oem/RTD_oem_datasheet.pdf
+ http://www.atlas-scientific.com/_files/_datasheets/_probe/EZO_CO2_Datasheet.pdf
properties:
compatible:
@@ -25,6 +27,8 @@ properties:
- atlas,ec-sm
- atlas,orp-sm
- atlas,ph-sm
+ - atlas,rtd-sm
+ - atlas,co2-ezo
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/common.yaml b/Documentation/devicetree/bindings/iio/common.yaml
new file mode 100644
index 000000000000..97ffcb77043d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/common.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common properties for iio sensors
+
+maintainers:
+ - Jonathan Cameron <jic23@kernel.org>
+ - Guido Günther <agx@sigxcpu.org>
+
+description: |
+ This document defines device tree properties common to several iio
+ sensors. It doesn't constitue a device tree binding specification by itself but
+ is meant to be referenced by device tree bindings.
+
+ When referenced from sensor tree bindings the properties defined in this
+ document are defined as follows. The sensor tree bindings are responsible for
+ defining whether each property is required or optional.
+
+properties:
+ proximity-near-level:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ For proximity sensors whether an object can be considered near to the
+ device depends on parameters like sensor position, covering glass and
+ aperture. This value gives an indication to userspace for which
+ sensor readings this is the case.
+
+ Raw proximity values equal or above this level should be
+ considered 'near' to the device (an object is near to the
+ sensor).
+
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/ltc2632.txt b/Documentation/devicetree/bindings/iio/dac/ltc2632.txt
index 338c3220f01a..1ab9570cf219 100644
--- a/Documentation/devicetree/bindings/iio/dac/ltc2632.txt
+++ b/Documentation/devicetree/bindings/iio/dac/ltc2632.txt
@@ -1,4 +1,4 @@
-Linear Technology LTC2632/2636 DAC
+Linear Technology LTC2632/2634/2636 DAC
Required properties:
- compatible: Has to contain one of the following:
@@ -8,6 +8,12 @@ Required properties:
lltc,ltc2632-h12
lltc,ltc2632-h10
lltc,ltc2632-h8
+ lltc,ltc2634-l12
+ lltc,ltc2634-l10
+ lltc,ltc2634-l8
+ lltc,ltc2634-h12
+ lltc,ltc2634-h10
+ lltc,ltc2634-h8
lltc,ltc2636-l12
lltc,ltc2636-l10
lltc,ltc2636-l8
diff --git a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt
deleted file mode 100644
index bf2925c671c6..000000000000
--- a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-STMicroelectronics STM32 DAC
-
-The STM32 DAC is a 12-bit voltage output digital-to-analog converter. The DAC
-may be configured in 8 or 12-bit mode. It has two output channels, each with
-its own converter.
-It has built-in noise and triangle waveform generator and supports external
-triggers for conversions. The DAC's output buffer allows a high drive output
-current.
-
-Contents of a stm32 dac root node:
------------------------------------
-Required properties:
-- compatible: Should be one of:
- "st,stm32f4-dac-core"
- "st,stm32h7-dac-core"
-- reg: Offset and length of the device's register set.
-- clocks: Must contain an entry for pclk (which feeds the peripheral bus
- interface)
-- clock-names: Must be "pclk".
-- vref-supply: Phandle to the vref+ input analog reference supply.
-- #address-cells = <1>;
-- #size-cells = <0>;
-
-Optional properties:
-- resets: Must contain the phandle to the reset controller.
-- A pinctrl state named "default" for each DAC channel may be defined to set
- DAC_OUTx pin in mode of operation for analog output on external pin.
-
-Contents of a stm32 dac child node:
------------------------------------
-DAC core node should contain at least one subnode, representing a
-DAC instance/channel available on the machine.
-
-Required properties:
-- compatible: Must be "st,stm32-dac".
-- reg: Must be either 1 or 2, to define (single) channel in use
-- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers" in
- Documentation/devicetree/bindings/iio/iio-bindings.txt
-
-Example:
- dac: dac@40007400 {
- compatible = "st,stm32h7-dac-core";
- reg = <0x40007400 0x400>;
- clocks = <&clk>;
- clock-names = "pclk";
- vref-supply = <&reg_vref>;
- pinctrl-names = "default";
- pinctrl-0 = <&dac_out1 &dac_out2>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- dac1: dac@1 {
- compatible = "st,stm32-dac";
- #io-channels-cells = <1>;
- reg = <1>;
- };
-
- dac2: dac@2 {
- compatible = "st,stm32-dac";
- #io-channels-cells = <1>;
- reg = <2>;
- };
- };
diff --git a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml
new file mode 100644
index 000000000000..393f7005941a
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/iio/dac/st,stm32-dac.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: STMicroelectronics STM32 DAC bindings
+
+description: |
+ The STM32 DAC is a 12-bit voltage output digital-to-analog converter. The DAC
+ may be configured in 8 or 12-bit mode. It has two output channels, each with
+ its own converter.
+ It has built-in noise and triangle waveform generator and supports external
+ triggers for conversions. The DAC's output buffer allows a high drive output
+ current.
+
+maintainers:
+ - Fabrice Gasnier <fabrice.gasnier@st.com>
+
+properties:
+ compatible:
+ enum:
+ - st,stm32f4-dac-core
+ - st,stm32h7-dac-core
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: pclk
+
+ vref-supply:
+ description: Phandle to the vref input analog reference voltage.
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - vref-supply
+ - '#address-cells'
+ - '#size-cells'
+
+patternProperties:
+ "^dac@[1-2]+$":
+ type: object
+ description:
+ A DAC block node should contain at least one subnode, representing an
+ DAC instance/channel available on the machine.
+
+ properties:
+ compatible:
+ const: st,stm32-dac
+
+ reg:
+ description: Must be either 1 or 2, to define (single) channel in use
+ enum: [1, 2]
+
+ '#io-channel-cells':
+ const: 1
+
+ additionalProperties: false
+
+ required:
+ - compatible
+ - reg
+ - '#io-channel-cells'
+
+examples:
+ - |
+ // Example on stm32mp157c
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ dac: dac@40017000 {
+ compatible = "st,stm32h7-dac-core";
+ reg = <0x40017000 0x400>;
+ clocks = <&rcc DAC12>;
+ clock-names = "pclk";
+ vref-supply = <&vref>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dac@1 {
+ compatible = "st,stm32-dac";
+ #io-channel-cells = <1>;
+ reg = <1>;
+ };
+
+ dac@2 {
+ compatible = "st,stm32-dac";
+ #io-channel-cells = <1>;
+ reg = <2>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/bmg160.txt b/Documentation/devicetree/bindings/iio/gyroscope/bmg160.txt
index 78e18a1e9c1d..bb43d1ad9c9f 100644
--- a/Documentation/devicetree/bindings/iio/gyroscope/bmg160.txt
+++ b/Documentation/devicetree/bindings/iio/gyroscope/bmg160.txt
@@ -2,7 +2,7 @@
Required properties:
- - compatible : should be "bosch,bmg160" or "bosch,bmi055_gyro"
+ - compatible : should be "bosch,bmg160", "bosch,bmi055_gyro" or "bosch,bmi088_gyro"
- reg : the I2C address of the sensor (0x69)
Optional properties:
diff --git a/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml b/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml
new file mode 100644
index 000000000000..98baecb4b98a
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml
@@ -0,0 +1,137 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/imu/adi,adis16475.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices ADIS16475 and similar IMUs
+
+maintainers:
+ - Nuno Sá <nuno.sa@analog.com>
+
+description: |
+ Analog Devices ADIS16475 and similar IMUs
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ADIS16475.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,adis16475-1
+ - adi,adis16475-2
+ - adi,adis16475-3
+ - adi,adis16477-1
+ - adi,adis16477-2
+ - adi,adis16477-3
+ - adi,adis16470
+ - adi,adis16465-1
+ - adi,adis16465-2
+ - adi,adis16465-3
+ - adi,adis16467-1
+ - adi,adis16467-2
+ - adi,adis16467-3
+ - adi,adis16500
+ - adi,adis16505-1
+ - adi,adis16505-2
+ - adi,adis16505-3
+ - adi,adis16507-1
+ - adi,adis16507-2
+ - adi,adis16507-3
+
+ reg:
+ maxItems: 1
+
+ spi-cpha: true
+
+ spi-cpol: true
+
+ spi-max-frequency:
+ maximum: 2000000
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ reset-gpios:
+ description:
+ Must be the device tree identifier of the RESET pin. If specified,
+ it will be asserted during driver probe. As the line is active low,
+ it should be marked GPIO_ACTIVE_LOW.
+ maxItems: 1
+
+ adi,sync-mode:
+ description:
+ Configures the device SYNC pin. The following modes are supported
+ 0 - output_sync
+ 1 - direct_sync
+ 2 - scaled_sync
+ 3 - pulse_sync
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 3
+
+ adi,scaled-output-hz:
+ description:
+ This property must be present if the clock mode is scaled-sync through
+ clock-names property. In this mode, the input clock can have a range
+ of 1Hz to 128HZ which must be scaled to originate an allowable sample
+ rate. This property specifies that rate.
+ minimum: 1900
+ maximum: 2100
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - spi-cpha
+ - spi-cpol
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,adis16500
+ - adi,adis16505-1
+ - adi,adis16505-2
+ - adi,adis16505-3
+ - adi,adis16507-1
+ - adi,adis16507-2
+ - adi,adis16507-3
+
+ then:
+ properties:
+ adi,sync-mode:
+ minimum: 0
+ maximum: 2
+
+ - if:
+ properties:
+ adi,sync-mode:
+ enum: [1, 2, 3]
+
+ then:
+ dependencies:
+ adi,sync-mode: [ clocks ]
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ adis16475: adis16475-3@0 {
+ compatible = "adi,adis16475-3";
+ reg = <0>;
+ spi-cpha;
+ spi-cpol;
+ spi-max-frequency = <2000000>;
+ interrupts = <4 IRQ_TYPE_EDGE_RISING>;
+ interrupt-parent = <&gpio>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/imu/bmi160.txt b/Documentation/devicetree/bindings/iio/imu/bmi160.txt
deleted file mode 100644
index 900c169de00f..000000000000
--- a/Documentation/devicetree/bindings/iio/imu/bmi160.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-Bosch BMI160 - Inertial Measurement Unit with Accelerometer, Gyroscope
-and externally connectable Magnetometer
-
-https://www.bosch-sensortec.com/bst/products/all_products/bmi160
-
-Required properties:
- - compatible : should be "bosch,bmi160"
- - reg : the I2C address or SPI chip select number of the sensor
- - spi-max-frequency : set maximum clock frequency (only for SPI)
-
-Optional properties:
- - interrupts : interrupt mapping for IRQ
- - interrupt-names : set to "INT1" if INT1 pin should be used as interrupt
- input, set to "INT2" if INT2 pin should be used instead
- - drive-open-drain : set if the specified interrupt pin should be configured as
- open drain. If not set, defaults to push-pull.
-
-Examples:
-
-bmi160@68 {
- compatible = "bosch,bmi160";
- reg = <0x68>;
-
- interrupt-parent = <&gpio4>;
- interrupts = <12 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "INT1";
-};
-
-bmi160@0 {
- compatible = "bosch,bmi160";
- reg = <0>;
- spi-max-frequency = <10000000>;
-
- interrupt-parent = <&gpio2>;
- interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
- interrupt-names = "INT2";
-};
diff --git a/Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml b/Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml
new file mode 100644
index 000000000000..0d0ef84e22b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/imu/bosch,bmi160.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/imu/bosch,bmi160.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Bosch BMI160
+
+maintainers:
+ - Jonathan Cameron <jic23@kernel.org>
+
+description: |
+ Inertial Measurement Unit with Accelerometer, Gyroscope and externally
+ connectable Magnetometer
+ https://www.bosch-sensortec.com/bst/products/all_products/bmi160
+
+properties:
+ compatible:
+ const: bosch,bmi160
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ enum:
+ - INT1
+ - INT2
+ description: |
+ set to "INT1" if INT1 pin should be used as interrupt input, set
+ to "INT2" if INT2 pin should be used instead
+
+ drive-open-drain:
+ description: |
+ set if the specified interrupt pin should be configured as
+ open drain. If not set, defaults to push-pull.
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ // Example for I2C
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bmi160@68 {
+ compatible = "bosch,bmi160";
+ reg = <0x68>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <12 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "INT1";
+ };
+ };
+ - |
+ // Example for SPI
+ #include <dt-bindings/interrupt-controller/irq.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bmi160@0 {
+ compatible = "bosch,bmi160";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <12 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "INT2";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/light/amstaos,tsl2563.yaml b/Documentation/devicetree/bindings/iio/light/amstaos,tsl2563.yaml
new file mode 100644
index 000000000000..efd2eba5f23c
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/amstaos,tsl2563.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/amstaos,tsl2563.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AMS TAOS TSL2563 ambient light sensor
+
+maintainers:
+ - Sebastian Reichel <sre@kernel.org>
+
+description: |
+ Ambient light sensor with an i2c interface.
+
+properties:
+ compatible:
+ enum:
+ - amstaos,tsl2560
+ - amstaos,tsl2561
+ - amstaos,tsl2562
+ - amstaos,tsl2563
+
+ reg:
+ maxItems: 1
+
+ amstaos,cover-comp-gain:
+ description: Multiplier for gain compensation
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [1, 16]
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c {
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ light-sensor@29 {
+ compatible = "amstaos,tsl2563";
+ reg = <0x29>;
+ amstaos,cover-comp-gain = <16>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/light/tsl2563.txt b/Documentation/devicetree/bindings/iio/light/tsl2563.txt
deleted file mode 100644
index f91e809e736e..000000000000
--- a/Documentation/devicetree/bindings/iio/light/tsl2563.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-* AMS TAOS TSL2563 ambient light sensor
-
-Required properties:
-
- - compatible : should be "amstaos,tsl2563"
- - reg : the I2C address of the sensor
-
-Optional properties:
-
- - amstaos,cover-comp-gain : integer used as multiplier for gain
- compensation (default = 1)
-
-Example:
-
-tsl2563@29 {
- compatible = "amstaos,tsl2563";
- reg = <0x29>;
- amstaos,cover-comp-gain = <16>;
-};
diff --git a/Documentation/devicetree/bindings/iio/light/tsl2772.yaml b/Documentation/devicetree/bindings/iio/light/tsl2772.yaml
index e8f7d1ada57b..d81229857944 100644
--- a/Documentation/devicetree/bindings/iio/light/tsl2772.yaml
+++ b/Documentation/devicetree/bindings/iio/light/tsl2772.yaml
@@ -33,13 +33,12 @@ properties:
amstaos,proximity-diodes:
description: Proximity diodes to enable
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 1
- maxItems: 2
- items:
- minimum: 0
- maximum: 1
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 2
+ items:
+ minimum: 0
+ maximum: 1
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/iio/light/vcnl4000.txt b/Documentation/devicetree/bindings/iio/light/vcnl4000.txt
deleted file mode 100644
index 955af4555c90..000000000000
--- a/Documentation/devicetree/bindings/iio/light/vcnl4000.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-VISHAY VCNL4000 - Ambient Light and proximity sensor
-
-This driver supports the VCNL4000/10/20/40 and VCNL4200 chips
-
-Required properties:
-
- -compatible: must be one of :
- vishay,vcnl4000
- vishay,vcnl4010
- vishay,vcnl4020
- vishay,vcnl4040
- vishay,vcnl4200
-
- -reg: I2C address of the sensor, should be one from below based on the model:
- 0x13
- 0x51
- 0x60
-
-Example:
-
-light-sensor@51 {
- compatible = "vishay,vcnl4200";
- reg = <0x51>;
-};
diff --git a/Documentation/devicetree/bindings/iio/light/vishay,vcnl4000.yaml b/Documentation/devicetree/bindings/iio/light/vishay,vcnl4000.yaml
new file mode 100644
index 000000000000..da8f2e872535
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/vishay,vcnl4000.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/vishay,vcnl4000.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: VISHAY VCNL4000 ambient light and proximity sensor
+
+maintainers:
+ - Peter Meerwald <pmeerw@pmeerw.net>
+
+description: |
+ Ambient light sensing with proximity detection over an i2c
+ interface.
+
+allOf:
+ - $ref: ../common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - vishay,vcnl4000
+ - vishay,vcnl4010
+ - vishay,vcnl4020
+ - vishay,vcnl4040
+ - vishay,vcnl4200
+ reg:
+ maxItems: 1
+
+ proximity-near-level: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+- |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ light-sensor@51 {
+ compatible = "vishay,vcnl4200";
+ reg = <0x51>;
+ proximity-near-level = <220>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt b/Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt
index baecc4a85197..7f06eff3b504 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt
+++ b/Documentation/devicetree/bindings/iio/magnetometer/ak8974.txt
@@ -2,7 +2,9 @@
Required properties:
-- compatible : should be "asahi-kasei,ak8974"
+- compatible:
+ * "asahi-kasei,ak8974"
+ * "alps,hscdtd008a"
- reg : the I2C address of the magnetometer
Optional properties:
diff --git a/Documentation/devicetree/bindings/iio/proximity/vishay,vcnl3020.yaml b/Documentation/devicetree/bindings/iio/proximity/vishay,vcnl3020.yaml
new file mode 100644
index 000000000000..4190253336ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/vishay,vcnl3020.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/proximity/vishay,vcnl3020.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Integrated Proximity Sensor With Infrared Emitter
+
+maintainers:
+ - Ivan Mikhaylov <i.mikhaylov@yadro.com>
+
+description: |
+ The VCNL3020 is a fully integrated proximity sensor. Fully integrated means
+ that the infrared emitter is included in the package. It has 16-bit
+ resolution. It includes a signal processing IC and features standard I2C
+ communication interface. It features an interrupt function.
+
+ Specifications about the devices can be found at:
+ https://www.vishay.com/docs/84150/vcnl3020.pdf
+
+properties:
+ compatible:
+ enum:
+ - vishay,vcnl3020
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply:
+ description: Regulator that provides power to the sensor
+
+ vddio-supply:
+ description: Regulator that provides power to the bus
+
+ vishay,led-current-microamp:
+ description:
+ The driver current for the LED used in proximity sensing.
+ enum: [0, 10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000,
+ 100000, 110000, 120000, 130000, 140000, 150000, 160000, 170000,
+ 180000, 190000, 200000]
+ default: 20000
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c {
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ proximity@13 {
+ compatible = "vishay,vcnl3020";
+ reg = <0x13>;
+ vishay,led-current-microamp = <200000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index 0ef64a444479..3213599c5071 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -50,6 +50,7 @@ Accelerometers:
- st,lis3dhh
- st,lis3de
- st,lis2de12
+- st,lis2hh12
Gyroscopes:
- st,l3g4200d-gyro
diff --git a/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml b/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
index 8fb46de6641d..40ccbe7b5c13 100644
--- a/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
+++ b/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
@@ -42,10 +42,9 @@ properties:
0 - 50/60Hz rejection
1 - 60Hz rejection
2 - 50Hz rejection
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- maximum: 2
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 2
'#address-cells':
const: 1
@@ -91,8 +90,7 @@ patternProperties:
7 - Type T Thermocouple
8 - Type B Thermocouple
9 - Custom Thermocouple
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 9
@@ -121,8 +119,7 @@ patternProperties:
more details look at table 69 and 70.
Note should be signed, but dtc doesn't currently maintain the
sign.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint64-matrix
+ $ref: /schemas/types.yaml#/definitions/uint64-matrix
minItems: 3
maxItems: 64
items:
@@ -138,8 +135,7 @@ patternProperties:
properties:
adi,sensor-type:
description: Identifies the sensor as a diode.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
const: 28
adi,single-ended:
@@ -196,8 +192,7 @@ patternProperties:
16 - RTD PT-1000 (0.00375)
17 - RTD NI-120
18 - RTD Custom
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 10
maximum: 18
@@ -210,9 +205,8 @@ patternProperties:
description:
Identifies the number of wires used by the RTD. Setting this
property to 5 means 4 wires with Kelvin Rsense.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [2, 3, 4, 5]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [2, 3, 4, 5]
adi,rsense-share:
description:
@@ -237,18 +231,16 @@ patternProperties:
description:
This property set the RTD curve used and the corresponding
Callendar-VanDusen constants. Look at table 30 of the datasheet.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- maximum: 3
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 3
adi,custom-rtd:
description:
This is a table, where each entry should be a pair of
resistance(ohm)-temperature(K). The entries added here are in uohm
and uK. For more details values look at table 74 and 75.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint64-matrix
+ $ref: /schemas/types.yaml#/definitions/uint64-matrix
items:
minItems: 3
maxItems: 64
@@ -260,7 +252,7 @@ patternProperties:
- adi,rsense-handle
dependencies:
- adi,current-rotate: [ adi,rsense-share ]
+ adi,current-rotate: [ "adi,rsense-share" ]
"^thermistor@":
type: object
@@ -280,8 +272,7 @@ patternProperties:
25 - Thermistor Spectrum 1003k 1kohm
26 - Thermistor Custom Steinhart-Hart
27 - Custom Thermistor
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 19
maximum: 27
@@ -314,10 +305,9 @@ patternProperties:
This property controls the magnitude of the excitation current
applied to the thermistor. Value 0 set's the sensor in auto-range
mode.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [0, 250, 500, 1000, 5000, 10000, 25000, 50000, 100000,
- 250000, 500000, 1000000]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 250, 500, 1000, 5000, 10000, 25000, 50000, 100000, 250000,
+ 500000, 1000000]
adi,custom-thermistor:
description:
@@ -325,8 +315,7 @@ patternProperties:
resistance(ohm)-temperature(K). The entries added here are in uohm
and uK only for custom thermistors. For more details look at table
78 and 79.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint64-matrix
+ $ref: /schemas/types.yaml#/definitions/uint64-matrix
minItems: 3
maxItems: 64
items:
@@ -339,8 +328,7 @@ patternProperties:
be programmed into the device memory using this property. For
Steinhart sensors the coefficients are given in the raw
format. Look at table 82 for more information.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
items:
minItems: 6
maxItems: 6
@@ -349,7 +337,7 @@ patternProperties:
- adi,rsense-handle
dependencies:
- adi,current-rotate: [ adi,rsense-share ]
+ adi,current-rotate: [ "adi,rsense-share" ]
"^adc@":
type: object
@@ -358,8 +346,7 @@ patternProperties:
properties:
adi,sensor-type:
description: Identifies the sensor as a direct adc.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
const: 30
adi,single-ended:
@@ -379,8 +366,7 @@ patternProperties:
adi,sensor-type:
description: Identifies the sensor as a rsense.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
const: 29
adi,rsense-val-milli-ohms:
diff --git a/Documentation/devicetree/bindings/index.rst b/Documentation/devicetree/bindings/index.rst
new file mode 100644
index 000000000000..3837b17c234f
--- /dev/null
+++ b/Documentation/devicetree/bindings/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========
+Device Tree
+===========
+
+.. toctree::
+ :maxdepth: 1
+
+ ABI
+ submitting-patches
+ writing-bindings
diff --git a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
index 5b3b71c9c018..cffd02028d02 100644
--- a/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
+++ b/Documentation/devicetree/bindings/input/allwinner,sun4i-a10-lradc-keys.yaml
@@ -16,8 +16,8 @@ properties:
- const: allwinner,sun4i-a10-lradc-keys
- const: allwinner,sun8i-a83t-r-lradc
- items:
- - const: allwinner,sun50i-a64-lradc
- - const: allwinner,sun8i-a83t-r-lradc
+ - const: allwinner,sun50i-a64-lradc
+ - const: allwinner,sun8i-a83t-r-lradc
reg:
maxItems: 1
@@ -42,9 +42,8 @@ patternProperties:
description: Keycode to emit
channel:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [0, 1]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
description: ADC Channel this key is attached to
voltage:
diff --git a/Documentation/devicetree/bindings/input/elants_i2c.txt b/Documentation/devicetree/bindings/input/elants_i2c.txt
deleted file mode 100644
index 5edac8be0802..000000000000
--- a/Documentation/devicetree/bindings/input/elants_i2c.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-Elantech I2C Touchscreen
-
-Required properties:
-- compatible: must be "elan,ekth3500".
-- reg: I2C address of the chip.
-- interrupts: interrupt to which the chip is connected (see interrupt
- binding[0]).
-
-Optional properties:
-- wakeup-source: touchscreen can be used as a wakeup source.
-- pinctrl-names: should be "default" (see pinctrl binding [1]).
-- pinctrl-0: a phandle pointing to the pin settings for the device (see
- pinctrl binding [1]).
-- reset-gpios: reset gpio the chip is connected to.
-- vcc33-supply: a phandle for the regulator supplying 3.3V power.
-- vccio-supply: a phandle for the regulator supplying IO power.
-
-[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
-
-Example:
- &i2c1 {
- /* ... */
-
- touchscreen@10 {
- compatible = "elan,ekth3500";
- reg = <0x10>;
- interrupt-parent = <&gpio4>;
- interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>;
- wakeup-source;
- };
-
- /* ... */
- };
diff --git a/Documentation/devicetree/bindings/input/gpio-keys-polled.txt b/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
deleted file mode 100644
index 4d9a3717eaaf..000000000000
--- a/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Device-Tree bindings for input/gpio_keys_polled.c keyboard driver
-
-Required properties:
- - compatible = "gpio-keys-polled";
- - poll-interval: Poll interval time in milliseconds
-
-Optional properties:
- - autorepeat: Boolean, Enable auto repeat feature of Linux input
- subsystem.
-
-Each button (key) is represented as a sub-node of "gpio-keys-polled":
-Subnode properties:
-
- - gpios: OF device-tree gpio specification.
- - label: Descriptive name of the key.
- - linux,code: Key / Axis code to emit.
-
-Optional subnode-properties:
- - linux,input-type: Specify event type this button/key generates.
- If not specified defaults to <1> == EV_KEY.
- - linux,input-value: If linux,input-type is EV_ABS or EV_REL then this
- value is sent for events this button generates when pressed.
- EV_ABS/EV_REL axis will generate an event with a value of 0 when
- all buttons with linux,input-type == type and linux,code == axis
- are released. This value is interpreted as a signed 32 bit value,
- e.g. to make a button generate a value of -1 use:
- linux,input-value = <0xffffffff>; /* -1 */
- - debounce-interval: Debouncing interval time in milliseconds.
- If not specified defaults to 5.
- - wakeup-source: Boolean, button can wake-up the system.
- (Legacy property supported: "gpio-key,wakeup")
-
-Example nodes:
-
- gpio_keys_polled {
- compatible = "gpio-keys-polled";
- poll-interval = <100>;
- autorepeat;
-
- button21 {
- label = "GPIO Key UP";
- linux,code = <103>;
- gpios = <&gpio1 0 1>;
- };
- ...
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
deleted file mode 100644
index 7cccc49b6bea..000000000000
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
-
-Required properties:
- - compatible = "gpio-keys";
-
-Optional properties:
- - autorepeat: Boolean, Enable auto repeat feature of Linux input
- subsystem.
- - label: String, name of the input device.
-
-Each button (key) is represented as a sub-node of "gpio-keys":
-Subnode properties:
-
- - gpios: OF device-tree gpio specification.
- - interrupts: the interrupt line for that input.
- - label: Descriptive name of the key.
- - linux,code: Keycode to emit.
-
-Note that either "interrupts" or "gpios" properties can be omitted, but not
-both at the same time. Specifying both properties is allowed.
-
-Optional subnode-properties:
- - linux,input-type: Specify event type this button/key generates.
- If not specified defaults to <1> == EV_KEY.
- - debounce-interval: Debouncing interval time in milliseconds.
- If not specified defaults to 5.
- - wakeup-source: Boolean, button can wake-up the system.
- (Legacy property supported: "gpio-key,wakeup")
- - wakeup-event-action: Specifies whether the key should wake the
- system when asserted, when deasserted, or both. This property is
- only valid for keys that wake up the system (e.g., when the
- "wakeup-source" property is also provided).
- Supported values are defined in linux-event-codes.h:
- EV_ACT_ASSERTED - asserted
- EV_ACT_DEASSERTED - deasserted
- EV_ACT_ANY - both asserted and deasserted
- - linux,can-disable: Boolean, indicates that button is connected
- to dedicated (not shared) interrupt which can be disabled to
- suppress events from the button.
-
-Example nodes:
-
- gpio-keys {
- compatible = "gpio-keys";
- autorepeat;
-
- up {
- label = "GPIO Key UP";
- linux,code = <103>;
- gpios = <&gpio1 0 1>;
- };
-
- down {
- label = "GPIO Key DOWN";
- linux,code = <108>;
- interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
- };
- ...
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.yaml b/Documentation/devicetree/bindings/input/gpio-keys.yaml
new file mode 100644
index 000000000000..6966ab009fa3
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/gpio-keys.yaml
@@ -0,0 +1,149 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/gpio-keys.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Device-Tree bindings for GPIO attached keys
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - gpio-keys
+ - gpio-keys-polled
+
+patternProperties:
+ ".*":
+ if:
+ type: object
+ then:
+ $ref: input.yaml#
+
+ properties:
+ gpios:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ label:
+ description: Descriptive name of the key.
+
+ linux,code:
+ description: Key / Axis code to emit.
+ $ref: /schemas/types.yaml#definitions/uint32
+
+ linux,input-type:
+ description:
+ Specify event type this button/key generates. If not specified defaults to
+ <1> == EV_KEY.
+ $ref: /schemas/types.yaml#definitions/uint32
+
+ default: 1
+
+ linux,input-value:
+ description: |
+ If linux,input-type is EV_ABS or EV_REL then this
+ value is sent for events this button generates when pressed.
+ EV_ABS/EV_REL axis will generate an event with a value of 0
+ when all buttons with linux,input-type == type and
+ linux,code == axis are released. This value is interpreted
+ as a signed 32 bit value, e.g. to make a button generate a
+ value of -1 use:
+
+ linux,input-value = <0xffffffff>; /* -1 */
+
+ $ref: /schemas/types.yaml#definitions/uint32
+
+ debounce-interval:
+ description:
+ Debouncing interval time in milliseconds. If not specified defaults to 5.
+ $ref: /schemas/types.yaml#definitions/uint32
+
+ default: 5
+
+ wakeup-source:
+ description: Button can wake-up the system.
+
+ wakeup-event-action:
+ description: |
+ Specifies whether the key should wake the system when asserted, when
+ deasserted, or both. This property is only valid for keys that wake up the
+ system (e.g., when the "wakeup-source" property is also provided).
+
+ Supported values are defined in linux-event-codes.h:
+
+ EV_ACT_ANY - both asserted and deasserted
+ EV_ACT_ASSERTED - asserted
+ EV_ACT_DEASSERTED - deasserted
+ $ref: /schemas/types.yaml#definitions/uint32
+ enum: [0, 1, 2]
+
+ linux,can-disable:
+ description:
+ Indicates that button is connected to dedicated (not shared) interrupt
+ which can be disabled to suppress events from the button.
+ type: boolean
+
+ pinctrl-0:
+ maxItems: 1
+
+ pinctrl-names:
+ maxItems: 1
+
+ required:
+ - linux,code
+
+ anyOf:
+ - required:
+ - interrupts
+ - required:
+ - gpios
+
+ dependencies:
+ wakeup-event-action: [ wakeup-source ]
+ linux,input-value: [ gpios ]
+
+ unevaluatedProperties: false
+
+if:
+ properties:
+ compatible:
+ const: gpio-keys-polled
+then:
+ properties:
+ poll-interval:
+ description:
+ Poll interval time in milliseconds
+ $ref: /schemas/types.yaml#definitions/uint32
+
+ required:
+ - poll-interval
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+
+ up {
+ label = "GPIO Key UP";
+ linux,code = <103>;
+ gpios = <&gpio1 0 1>;
+ };
+
+ down {
+ label = "GPIO Key DOWN";
+ linux,code = <108>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/input/input.yaml b/Documentation/devicetree/bindings/input/input.yaml
index 6d519046b3af..8edcb3c31270 100644
--- a/Documentation/devicetree/bindings/input/input.yaml
+++ b/Documentation/devicetree/bindings/input/input.yaml
@@ -18,11 +18,10 @@ properties:
description:
Specifies an array of numeric keycode values to be used for reporting
button presses.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - items:
- minimum: 0
- maximum: 0xff
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ minimum: 0
+ maximum: 0xff
poll-interval:
description: Poll interval time in milliseconds.
diff --git a/Documentation/devicetree/bindings/input/iqs269a.yaml b/Documentation/devicetree/bindings/input/iqs269a.yaml
new file mode 100644
index 000000000000..f0242bb4be81
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/iqs269a.yaml
@@ -0,0 +1,581 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/iqs269a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Azoteq IQS269A Capacitive Touch Controller
+
+maintainers:
+ - Jeff LaBundy <jeff@labundy.com>
+
+description: |
+ The Azoteq IQS269A is an 8-channel capacitive touch controller that features
+ additional Hall-effect and inductive sensing capabilities.
+
+ Link to datasheet: https://www.azoteq.com/
+
+properties:
+ compatible:
+ const: azoteq,iqs269a
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ azoteq,hall-enable:
+ type: boolean
+ description:
+ Enables Hall-effect sensing on channels 6 and 7. In this case, keycodes
+ assigned to channel 6 are ignored and keycodes assigned to channel 7 are
+ interpreted as switch codes. Refer to the datasheet for requirements im-
+ posed on channels 6 and 7 by Hall-effect sensing.
+
+ azoteq,suspend-mode:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ default: 0
+ description: |
+ Specifies the power mode during suspend as follows:
+ 0: Automatic (same as normal runtime, i.e. suspend/resume disabled)
+ 1: Low power (all sensing at a reduced reporting rate)
+ 2: Ultra-low power (channel 0 proximity sensing)
+ 3: Halt (no sensing)
+
+ azoteq,clk-div:
+ type: boolean
+ description: Divides the device's core clock by a factor of 4.
+
+ azoteq,ulp-update:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 7
+ default: 3
+ description: Specifies the ultra-low-power mode update rate.
+
+ azoteq,reseed-offset:
+ type: boolean
+ description:
+ Applies an 8-count offset to all long-term averages upon either ATI or
+ reseed events.
+
+ azoteq,filt-str-lp-lta:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ default: 0
+ description:
+ Specifies the long-term average filter strength during low-power mode.
+
+ azoteq,filt-str-lp-cnt:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ default: 0
+ description:
+ Specifies the raw count filter strength during low-power mode.
+
+ azoteq,filt-str-np-lta:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ default: 0
+ description:
+ Specifies the long-term average filter strength during normal-power mode.
+
+ azoteq,filt-str-np-cnt:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ default: 0
+ description:
+ Specifies the raw count filter strength during normal-power mode.
+
+ azoteq,rate-np-ms:
+ minimum: 0
+ maximum: 255
+ default: 16
+ description: Specifies the report rate (in ms) during normal-power mode.
+
+ azoteq,rate-lp-ms:
+ minimum: 0
+ maximum: 255
+ default: 160
+ description: Specifies the report rate (in ms) during low-power mode.
+
+ azoteq,rate-ulp-ms:
+ multipleOf: 16
+ minimum: 0
+ maximum: 4080
+ default: 160
+ description: Specifies the report rate (in ms) during ultra-low-power mode.
+
+ azoteq,timeout-pwr-ms:
+ multipleOf: 512
+ minimum: 0
+ maximum: 130560
+ default: 2560
+ description:
+ Specifies the length of time (in ms) to wait for an event during normal-
+ power mode before transitioning to low-power mode.
+
+ azoteq,timeout-lta-ms:
+ multipleOf: 512
+ minimum: 0
+ maximum: 130560
+ default: 32768
+ description:
+ Specifies the length of time (in ms) to wait before resetting the long-
+ term average of all channels. Specify the maximum timeout to disable it
+ altogether.
+
+ azoteq,ati-band-disable:
+ type: boolean
+ description: Disables the ATI band check.
+
+ azoteq,ati-lp-only:
+ type: boolean
+ description: Limits automatic ATI to low-power mode.
+
+ azoteq,ati-band-tighten:
+ type: boolean
+ description: Tightens the ATI band from 1/8 to 1/16 of the desired target.
+
+ azoteq,filt-disable:
+ type: boolean
+ description: Disables all raw count filtering.
+
+ azoteq,gpio3-select:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 7
+ default: 0
+ description:
+ Selects the channel for which the GPIO3 pin represents touch state.
+
+ azoteq,dual-direction:
+ type: boolean
+ description:
+ Specifies that long-term averages are to freeze in the presence of either
+ increasing or decreasing counts, thereby permitting events to be reported
+ in either direction.
+
+ azoteq,tx-freq:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ default: 0
+ description: |
+ Specifies the inductive sensing excitation frequency as follows (paren-
+ thesized numbers represent the frequency if 'azoteq,clk-div' is present):
+ 0: 16 MHz (4 MHz)
+ 1: 8 MHz (2 MHz)
+ 2: 4 MHz (1 MHz)
+ 3: 2 MHz (500 kHz)
+
+ azoteq,global-cap-increase:
+ type: boolean
+ description: Increases the global capacitance adder from 0.5 pF to 1.5 pF.
+
+ azoteq,reseed-select:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ default: 0
+ description: |
+ Specifies the event(s) that prompt the device to reseed (i.e. reset the
+ long-term average) of an associated channel as follows:
+ 0: None
+ 1: Proximity
+ 2: Proximity or touch
+ 3: Proximity, touch or deep touch
+
+ azoteq,tracking-enable:
+ type: boolean
+ description:
+ Enables all associated channels to track their respective reference
+ channels.
+
+ azoteq,filt-str-slider:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ default: 1
+ description: Specifies the slider coordinate filter strength.
+
+patternProperties:
+ "^channel@[0-7]$":
+ type: object
+ description:
+ Represents a single sensing channel. A channel is active if defined and
+ inactive otherwise.
+
+ properties:
+ reg:
+ minimum: 0
+ maximum: 7
+ description: Index of the channel.
+
+ azoteq,reseed-disable:
+ type: boolean
+ description:
+ Prevents the channel from being reseeded if the long-term average
+ timeout (defined in 'azoteq,timeout-lta') expires.
+
+ azoteq,blocking-enable:
+ type: boolean
+ description: Specifies that the channel is a blocking channel.
+
+ azoteq,slider0-select:
+ type: boolean
+ description: Specifies that the channel participates in slider 0.
+
+ azoteq,slider1-select:
+ type: boolean
+ description: Specifies that the channel participates in slider 1.
+
+ azoteq,rx-enable:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 1
+ maxItems: 8
+ items:
+ minimum: 0
+ maximum: 7
+ description:
+ Specifies the CRX pin(s) associated with the channel. By default, only
+ the CRX pin corresponding to the channel's index is enabled (e.g. CRX0
+ for channel 0).
+
+ azoteq,tx-enable:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 1
+ maxItems: 8
+ items:
+ minimum: 0
+ maximum: 7
+ default: [0, 1, 2, 3, 4, 5, 6, 7]
+ description: Specifies the TX pin(s) associated with the channel.
+
+ azoteq,meas-cap-decrease:
+ type: boolean
+ description:
+ Decreases the internal measurement capacitance from 60 pF to 15 pF.
+
+ azoteq,rx-float-inactive:
+ type: boolean
+ description: Floats any inactive CRX pins instead of grounding them.
+
+ azoteq,local-cap-size:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2]
+ default: 0
+ description: |
+ Specifies the capacitance to be added to the channel as follows:
+ 0: None
+ 1: Global adder (based on 'azoteq,global-cap-increase')
+ 2: Global adder + 0.5 pF
+
+ azoteq,invert-enable:
+ type: boolean
+ description:
+ Inverts the polarity of the states reported for proximity, touch and
+ deep-touch events relative to their respective thresholds.
+
+ azoteq,proj-bias:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ default: 2
+ description: |
+ Specifies the bias current applied during projected-capacitance
+ sensing as follows:
+ 0: 2.5 uA
+ 1: 5 uA
+ 2: 10 uA
+ 3: 20 uA
+
+ azoteq,sense-mode:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 9, 14, 15]
+ default: 0
+ description: |
+ Specifies the channel's sensing mode as follows:
+ 0: Self capacitance
+ 1: Projected capacitance
+ 9: Self or mutual inductance
+ 14: Hall effect
+ 15: Temperature
+
+ azoteq,sense-freq:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ default: 1
+ description: |
+ Specifies the channel's sensing frequency as follows (parenthesized
+ numbers represent the frequency if 'azoteq,clk-div' is present):
+ 0: 4 MHz (1 MHz)
+ 1: 2 MHz (500 kHz)
+ 2: 1 MHz (250 kHz)
+ 3: 500 kHz (125 kHz)
+
+ azoteq,static-enable:
+ type: boolean
+ description: Enables the static front-end for the channel.
+
+ azoteq,ati-mode:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [0, 1, 2, 3]
+ default: 3
+ description: |
+ Specifies the channel's ATI mode as follows:
+ 0: Disabled
+ 1: Semi-partial
+ 2: Partial
+ 3: Full
+
+ azoteq,ati-base:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [75, 100, 150, 200]
+ default: 100
+ description: Specifies the channel's ATI base.
+
+ azoteq,ati-target:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - multipleOf: 32
+ minimum: 0
+ maximum: 2016
+ default: 512
+ description: Specifies the channel's ATI target.
+
+ azoteq,assoc-select:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 1
+ maxItems: 8
+ items:
+ minimum: 0
+ maximum: 7
+ description:
+ Specifies the associated channels for which the channel serves as a
+ reference channel. By default, no channels are selected.
+
+ azoteq,assoc-weight:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 255
+ default: 0
+ description:
+ Specifies the channel's impact weight if it acts as an associated
+ channel (0 = 0% impact, 255 = 200% impact).
+
+ patternProperties:
+ "^event-prox(-alt)?$":
+ type: object
+ description:
+ Represents a proximity event reported by the channel in response to
+ a decrease in counts. Node names suffixed with '-alt' instead corre-
+ spond to an increase in counts.
+
+ By default, the long-term average tracks an increase in counts such
+ that only events corresponding to a decrease in counts are reported
+ (refer to the datasheet for more information).
+
+ Specify 'azoteq,dual-direction' to freeze the long-term average when
+ the counts increase or decrease such that events of either direction
+ can be reported. Alternatively, specify 'azoteq,invert-enable' to in-
+ vert the polarity of the states reported by the channel.
+
+ Complementary events (e.g. event-touch and event-touch-alt) can both
+ be present and specify different key or switch codes, but not differ-
+ ent thresholds or hysteresis (if applicable).
+
+ properties:
+ azoteq,thresh:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 255
+ default: 10
+ description: Specifies the threshold for the event.
+
+ linux,code:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Numeric key or switch code associated with the event.
+
+ additionalProperties: false
+
+ "^event-touch(-alt)?$":
+ type: object
+ description: Represents a touch event reported by the channel.
+
+ properties:
+ azoteq,thresh:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 255
+ default: 8
+ description: Specifies the threshold for the event.
+
+ azoteq,hyst:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 15
+ default: 4
+ description: Specifies the hysteresis for the event.
+
+ linux,code:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Numeric key or switch code associated with the event.
+
+ additionalProperties: false
+
+ "^event-deep(-alt)?$":
+ type: object
+ description: Represents a deep-touch event reported by the channel.
+
+ properties:
+ azoteq,thresh:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 255
+ default: 26
+ description: Specifies the threshold for the event.
+
+ azoteq,hyst:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 0
+ maximum: 15
+ default: 0
+ description: Specifies the hysteresis for the event.
+
+ linux,code:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Numeric key or switch code associated with the event.
+
+ additionalProperties: false
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/input/input.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ iqs269a@44 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible = "azoteq,iqs269a";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+
+ azoteq,hall-enable;
+ azoteq,suspend-mode = <2>;
+
+ channel@0 {
+ reg = <0x0>;
+
+ event-prox {
+ linux,code = <KEY_POWER>;
+ };
+ };
+
+ channel@1 {
+ reg = <0x1>;
+ azoteq,slider0-select;
+ };
+
+ channel@2 {
+ reg = <0x2>;
+ azoteq,slider0-select;
+ };
+
+ channel@3 {
+ reg = <0x3>;
+ azoteq,slider0-select;
+ };
+
+ channel@4 {
+ reg = <0x4>;
+ azoteq,slider0-select;
+ };
+
+ channel@5 {
+ reg = <0x5>;
+ azoteq,slider0-select;
+ };
+
+ channel@6 {
+ reg = <0x6>;
+ azoteq,invert-enable;
+ azoteq,static-enable;
+ azoteq,reseed-disable;
+ azoteq,rx-enable = <0>;
+ azoteq,sense-freq = <0x0>;
+ azoteq,sense-mode = <0xE>;
+ azoteq,ati-mode = <0x0>;
+ azoteq,ati-base = <200>;
+ azoteq,ati-target = <320>;
+ };
+
+ channel@7 {
+ reg = <0x7>;
+ azoteq,invert-enable;
+ azoteq,static-enable;
+ azoteq,reseed-disable;
+ azoteq,rx-enable = <0>, <6>;
+ azoteq,sense-freq = <0x0>;
+ azoteq,sense-mode = <0xE>;
+ azoteq,ati-mode = <0x3>;
+ azoteq,ati-base = <200>;
+ azoteq,ati-target = <320>;
+
+ event-touch {
+ linux,code = <SW_LID>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/input/iqs62x-keys.yaml b/Documentation/devicetree/bindings/input/iqs62x-keys.yaml
index 5625c222903a..77fe3b545b35 100644
--- a/Documentation/devicetree/bindings/input/iqs62x-keys.yaml
+++ b/Documentation/devicetree/bindings/input/iqs62x-keys.yaml
@@ -30,10 +30,9 @@ properties:
- azoteq,iqs625-keys
linux,keycodes:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 1
- maxItems: 16
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 16
description: |
Specifies the numeric keycodes associated with each available touch or
proximity event according to the following table. An 'x' indicates the
diff --git a/Documentation/devicetree/bindings/input/msm-vibrator.txt b/Documentation/devicetree/bindings/input/msm-vibrator.txt
deleted file mode 100644
index 8dcf014ef2e5..000000000000
--- a/Documentation/devicetree/bindings/input/msm-vibrator.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-* Device tree bindings for the Qualcomm MSM vibrator
-
-Required properties:
-
- - compatible: Should be one of
- "qcom,msm8226-vibrator"
- "qcom,msm8974-vibrator"
- - reg: the base address and length of the IO memory for the registers.
- - pinctrl-names: set to default.
- - pinctrl-0: phandles pointing to pin configuration nodes. See
- Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
- - clock-names: set to pwm
- - clocks: phandle of the clock. See
- Documentation/devicetree/bindings/clock/clock-bindings.txt
- - enable-gpios: GPIO that enables the vibrator.
-
-Optional properties:
-
- - vcc-supply: phandle to the regulator that provides power to the sensor.
-
-Example from a LG Nexus 5 (hammerhead) phone:
-
-vibrator@fd8c3450 {
- reg = <0xfd8c3450 0x400>;
- compatible = "qcom,msm8974-vibrator";
-
- vcc-supply = <&pm8941_l19>;
-
- clocks = <&mmcc CAMSS_GP1_CLK>;
- clock-names = "pwm";
-
- enable-gpios = <&msmgpio 60 GPIO_ACTIVE_HIGH>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&vibrator_pin>;
-};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml b/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml
new file mode 100644
index 000000000000..8c73e5264312
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/cypress,cy8ctma140.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/cypress,cy8ctma140.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cypress CY8CTMA140 series touchscreen controller bindings
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+properties:
+ compatible:
+ const: cypress,cy8ctma140
+
+ reg:
+ const: 0x20
+
+ clock-frequency:
+ description: I2C client clock frequency, defined for host
+ minimum: 100000
+ maximum: 400000
+
+ interrupts:
+ maxItems: 1
+
+ vcpin-supply:
+ description: Analog power supply regulator on VCPIN pin
+
+ vdd-supply:
+ description: Digital power supply regulator on VDD pin
+
+ touchscreen-inverted-x: true
+ touchscreen-inverted-y: true
+ touchscreen-size-x: true
+ touchscreen-size-y: true
+ touchscreen-swapped-x-y: true
+ touchscreen-max-pressure: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - touchscreen-size-x
+ - touchscreen-size-y
+ - touchscreen-max-pressure
+
+examples:
+- |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ touchscreen@20 {
+ compatible = "cypress,cy8ctma140";
+ reg = <0x20>;
+ touchscreen-size-x = <480>;
+ touchscreen-size-y = <800>;
+ touchscreen-max-pressure = <255>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&ab8500_ldo_aux2_reg>;
+ vcpin-supply = <&ab8500_ldo_aux2_reg>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
index 383d64a91854..024b262a2ef7 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
@@ -42,7 +42,7 @@ properties:
- focaltech,ft6236
reg:
- const: 0x38
+ maxItems: 1
interrupts:
maxItems: 1
@@ -61,33 +61,29 @@ properties:
gain:
description: Allows setting the sensitivity in the range from 0 to 31.
Note that lower values indicate higher sensitivity.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- - maximum: 31
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
offset:
description: Allows setting the edge compensation in the range from 0 to 31.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- - maximum: 31
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
offset-x:
description: Same as offset, but applies only to the horizontal position.
Range from 0 to 80, only supported by evervision,ev-ft5726 devices.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- - maximum: 80
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 80
offset-y:
description: Same as offset, but applies only to the vertical position.
Range from 0 to 80, only supported by evervision,ev-ft5726 devices.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- - maximum: 80
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 80
touchscreen-size-x: true
touchscreen-size-y: true
diff --git a/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml b/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml
new file mode 100644
index 000000000000..a792d6377b1d
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/elan,elants_i2c.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/input/touchscreen/elan,elants_i2c.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Elantech I2C Touchscreen
+
+maintainers:
+ - David Heidelberg <david@ixit.cz>
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+properties:
+ compatible:
+ enum:
+ - elan,ektf3624
+ - elan,ekth3500
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ wakeup-source:
+ type: boolean
+ description: touchscreen can be used as a wakeup source.
+
+ reset-gpios:
+ description: reset gpio the chip is connected to.
+
+ vcc33-supply:
+ description: a phandle for the regulator supplying 3.3V power.
+
+ vccio-supply:
+ description: a phandle for the regulator supplying IO power.
+
+ touchscreen-inverted-x: true
+ touchscreen-inverted-y: true
+ touchscreen-size-x: true
+ touchscreen-size-y: true
+ touchscreen-swapped-x-y: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen@10 {
+ compatible = "elan,ekth3500";
+ reg = <0x10>;
+
+ interrupt-parent = <&gpio4>;
+ interrupts = <0x0 IRQ_TYPE_EDGE_FALLING>;
+ wakeup-source;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
index c8ea9434c9cc..e81cfa56f25a 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
@@ -63,7 +63,7 @@ required:
- interrupts
examples:
-- |
+ - |
i2c {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/mms114.txt b/Documentation/devicetree/bindings/input/touchscreen/mms114.txt
index 2cd954051d29..707234cfd7e6 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/mms114.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/mms114.txt
@@ -1,9 +1,10 @@
-* MELFAS MMS114/MMS152 touchscreen controller
+* MELFAS MMS114/MMS152/MMS345L touchscreen controller
Required properties:
- compatible: should be one of:
- "melfas,mms114"
- "melfas,mms152"
+ - "melfas,mms345l"
- reg: I2C address of the chip
- interrupts: interrupt to which the chip is connected
- touchscreen-size-x: See [1]
diff --git a/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml b/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
new file mode 100644
index 000000000000..ff09550ad959
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
@@ -0,0 +1,101 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/fsl,imx8m-noc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic i.MX bus frequency device
+
+maintainers:
+ - Leonard Crestez <leonard.crestez@nxp.com>
+
+description: |
+ The i.MX SoC family has multiple buses for which clock frequency (and
+ sometimes voltage) can be adjusted.
+
+ Some of those buses expose register areas mentioned in the memory maps as GPV
+ ("Global Programmers View") but not all. Access to this area might be denied
+ for normal (non-secure) world.
+
+ The buses are based on externally licensed IPs such as ARM NIC-301 and
+ Arteris FlexNOC but DT bindings are specific to the integration of these bus
+ interconnect IPs into imx SOCs.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - fsl,imx8mn-nic
+ - fsl,imx8mm-nic
+ - fsl,imx8mq-nic
+ - const: fsl,imx8m-nic
+ - items:
+ - enum:
+ - fsl,imx8mn-noc
+ - fsl,imx8mm-noc
+ - fsl,imx8mq-noc
+ - const: fsl,imx8m-noc
+ - const: fsl,imx8m-nic
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ operating-points-v2: true
+ opp-table: true
+
+ fsl,ddrc:
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+ description:
+ Phandle to DDR Controller.
+
+ '#interconnect-cells':
+ description:
+ If specified then also act as an interconnect provider. Should only be
+ set once per soc on the main noc.
+ const: 1
+
+required:
+ - compatible
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8mm-clock.h>
+ #include <dt-bindings/interconnect/imx8mm.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ noc: interconnect@32700000 {
+ compatible = "fsl,imx8mm-noc", "fsl,imx8m-noc";
+ reg = <0x32700000 0x100000>;
+ clocks = <&clk IMX8MM_CLK_NOC>;
+ #interconnect-cells = <1>;
+ fsl,ddrc = <&ddrc>;
+
+ operating-points-v2 = <&noc_opp_table>;
+ noc_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-133M {
+ opp-hz = /bits/ 64 <133333333>;
+ };
+ opp-800M {
+ opp-hz = /bits/ 64 <800000000>;
+ };
+ };
+ };
+
+ ddrc: memory-controller@3d400000 {
+ compatible = "fsl,imx8mm-ddrc", "fsl,imx8m-ddrc";
+ reg = <0x3d400000 0x400000>;
+ clock-names = "core", "pll", "alt", "apb";
+ clocks = <&clk IMX8MM_CLK_DRAM_CORE>,
+ <&clk IMX8MM_DRAM_PLL>,
+ <&clk IMX8MM_CLK_DRAM_ALT>,
+ <&clk IMX8MM_CLK_DRAM_APB>;
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8916.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8916.yaml
index 4107e60cab12..e1009ae4e8f7 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8916.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8916.yaml
@@ -10,8 +10,8 @@ maintainers:
- Georgi Djakov <georgi.djakov@linaro.org>
description: |
- The Qualcomm MSM8916 interconnect providers support adjusting the
- bandwidth requirements between the various NoC fabrics.
+ The Qualcomm MSM8916 interconnect providers support adjusting the
+ bandwidth requirements between the various NoC fabrics.
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml
index 9af3c6e59cff..8004c4baf397 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8974.yaml
@@ -10,8 +10,8 @@ maintainers:
- Brian Masney <masneyb@onstation.org>
description: |
- The Qualcomm MSM8974 interconnect providers support setting system
- bandwidth requirements between various network-on-chip fabrics.
+ The Qualcomm MSM8974 interconnect providers support setting system
+ bandwidth requirements between various network-on-chip fabrics.
properties:
reg:
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml b/Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml
index 8d65c5f80679..3fbb8785fbc9 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,qcs404.yaml
@@ -10,8 +10,8 @@ maintainers:
- Georgi Djakov <georgi.djakov@linaro.org>
description: |
- The Qualcomm QCS404 interconnect providers support adjusting the
- bandwidth requirements between the various NoC fabrics.
+ The Qualcomm QCS404 interconnect providers support adjusting the
+ bandwidth requirements between the various NoC fabrics.
properties:
reg:
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sc7180.yaml b/Documentation/devicetree/bindings/interconnect/qcom,sc7180.yaml
index 50f78f87f3fb..d01bac80d416 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,sc7180.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sc7180.yaml
@@ -65,21 +65,21 @@ examples:
config_noc: interconnect@1500000 {
compatible = "qcom,sc7180-config-noc";
- reg = <0 0x01500000 0 0x28000>;
+ reg = <0x01500000 0x28000>;
#interconnect-cells = <1>;
qcom,bcm-voters = <&apps_bcm_voter>;
};
system_noc: interconnect@1620000 {
compatible = "qcom,sc7180-system-noc";
- reg = <0 0x01620000 0 0x17080>;
+ reg = <0x01620000 0x17080>;
#interconnect-cells = <1>;
qcom,bcm-voters = <&apps_bcm_voter>;
};
mmss_noc: interconnect@1740000 {
compatible = "qcom,sc7180-mmss-noc";
- reg = <0 0x01740000 0 0x1c100>;
+ reg = <0x01740000 0x1c100>;
#interconnect-cells = <1>;
qcom,bcm-voters = <&apps_bcm_voter>;
};
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm845.yaml b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.yaml
index 8b087e0b0b81..74536747b51d 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,sdm845.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.yaml
@@ -60,14 +60,14 @@ examples:
mem_noc: interconnect@1380000 {
compatible = "qcom,sdm845-mem-noc";
- reg = <0 0x01380000 0 0x27200>;
+ reg = <0x01380000 0x27200>;
#interconnect-cells = <1>;
qcom,bcm-voters = <&apps_bcm_voter>;
};
mmss_noc: interconnect@1740000 {
compatible = "qcom,sdm845-mmss-noc";
- reg = <0 0x01740000 0 0x1c1000>;
+ reg = <0x01740000 0x1c1000>;
#interconnect-cells = <1>;
qcom,bcm-voter-names = "apps", "disp";
qcom,bcm-voters = <&apps_bcm_voter>, <&disp_bcm_voter>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
index cf09055da78b..7cd6b8bacfa0 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
@@ -27,15 +27,15 @@ properties:
deprecated: true
- const: allwinner,sun7i-a20-sc-nmi
- items:
- - const: allwinner,sun8i-a83t-r-intc
- - const: allwinner,sun6i-a31-r-intc
+ - const: allwinner,sun8i-a83t-r-intc
+ - const: allwinner,sun6i-a31-r-intc
- const: allwinner,sun9i-a80-sc-nmi
- items:
- - const: allwinner,sun50i-a64-r-intc
- - const: allwinner,sun6i-a31-r-intc
+ - const: allwinner,sun50i-a64-r-intc
+ - const: allwinner,sun6i-a31-r-intc
- items:
- - const: allwinner,sun50i-h6-r-intc
- - const: allwinner,sun6i-a31-r-intc
+ - const: allwinner,sun50i-h6-r-intc
+ - const: allwinner,sun6i-a31-r-intc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
index 66aacd106503..1ecd1831cf02 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
@@ -91,18 +91,16 @@ properties:
description:
If using padding pages, specifies the stride of consecutive
redistributors. Must be a multiple of 64kB.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint64
- - multipleOf: 0x10000
- exclusiveMinimum: 0
+ $ref: /schemas/types.yaml#/definitions/uint64
+ multipleOf: 0x10000
+ exclusiveMinimum: 0
"#redistributor-regions":
description:
The number of independent contiguous regions occupied by the
redistributors. Required if more than one such region is present.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - maximum: 4096 # Should be enough?
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 4096
msi-controller:
description:
@@ -114,22 +112,20 @@ properties:
A list of pairs <intid span>, where "intid" is the first SPI of a range
that can be used an MBI, and "span" the size of that range. Multiple
ranges can be provided.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-matrix
- - items:
- minItems: 2
- maxItems: 2
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ minItems: 2
+ maxItems: 2
mbi-alias:
description:
Address property. Base address of an alias of the GICD region containing
only the {SET,CLR}SPI registers to be used if isolation is required,
and if supported by the HW.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - items:
- minItems: 1
- maxItems: 2
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ minItems: 1
+ maxItems: 2
ppi-partitions:
type: object
@@ -188,11 +184,10 @@ patternProperties:
description:
(u32, u32) tuple describing the untranslated
address and size of the pre-ITS window.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - items:
- minItems: 2
- maxItems: 2
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ minItems: 2
+ maxItems: 2
required:
- compatible
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
index 9a47820ef346..96f8803ff4e6 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
@@ -40,6 +40,12 @@ properties:
- qcom,msm-qgic2
- items:
+ - const: arm,gic-400
+ - enum:
+ - arm,cortex-a15-gic
+ - arm,cortex-a7-gic
+
+ - items:
- const: arm,arm1176jzf-devchip-gic
- const: arm,arm11mp-gic
@@ -125,6 +131,9 @@ properties:
power-domains:
maxItems: 1
+ resets:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt
deleted file mode 100644
index 582991c426ee..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-Freescale IRQSTEER Interrupt multiplexer
-
-Required properties:
-
-- compatible: should be:
- - "fsl,imx8m-irqsteer"
- - "fsl,imx-irqsteer"
-- reg: Physical base address and size of registers.
-- interrupts: Should contain the up to 8 parent interrupt lines used to
- multiplex the input interrupts. They should be specified sequentially
- from output 0 to 7.
-- clocks: Should contain one clock for entry in clock-names
- see Documentation/devicetree/bindings/clock/clock-bindings.txt
-- clock-names:
- - "ipg": main logic clock
-- interrupt-controller: Identifies the node as an interrupt controller.
-- #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The value must be 1.
-- fsl,channel: The output channel that all input IRQs should be steered into.
-- fsl,num-irqs: Number of input interrupts of this channel.
- Should be multiple of 32 input interrupts and up to 512 interrupts.
-
-Example:
-
- interrupt-controller@32e2d000 {
- compatible = "fsl,imx8m-irqsteer", "fsl,imx-irqsteer";
- reg = <0x32e2d000 0x1000>;
- interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clk IMX8MQ_CLK_DISP_APB_ROOT>;
- clock-names = "ipg";
- fsl,channel = <0>;
- fsl,num-irqs = <64>;
- interrupt-controller;
- #interrupt-cells = <1>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
new file mode 100644
index 000000000000..360a575ef8b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/fsl,irqsteer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale IRQSTEER Interrupt Multiplexer
+
+maintainers:
+ - Lucas Stach <l.stach@pengutronix.de>
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx8m-irqsteer
+ - fsl,imx-irqsteer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: |
+ should contain the up to 8 parent interrupt lines used to multiplex
+ the input interrupts. They should be specified sequentially from
+ output 0 to 7.
+ items:
+ - description: output interrupt 0
+ - description: output interrupt 1
+ - description: output interrupt 2
+ - description: output interrupt 3
+ - description: output interrupt 4
+ - description: output interrupt 5
+ - description: output interrupt 6
+ - description: output interrupt 7
+ minItems: 1
+ maxItems: 8
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: ipg
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 1
+
+ fsl,channel:
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ description: |
+ u32 value representing the output channel that all input IRQs should be
+ steered into.
+
+ fsl,num-irqs:
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ description: |
+ u32 value representing the number of input interrupts of this channel,
+ should be multiple of 32 input interrupts and up to 512 interrupts.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - interrupt-controller
+ - "#interrupt-cells"
+ - fsl,channel
+ - fsl,num-irqs
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8mq-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ interrupt-controller@32e2d000 {
+ compatible = "fsl,imx-irqsteer";
+ reg = <0x32e2d000 0x1000>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_DISP_APB_ROOT>;
+ clock-names = "ipg";
+ fsl,channel = <0>;
+ fsl,num-irqs = <64>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
deleted file mode 100644
index d4373d0f7121..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Ingenic SoC Interrupt Controller
-
-Required properties:
-
-- compatible : should be "ingenic,<socname>-intc". Valid strings are:
- ingenic,jz4740-intc
- ingenic,jz4725b-intc
- ingenic,jz4770-intc
- ingenic,jz4775-intc
- ingenic,jz4780-intc
-- reg : Specifies base physical address and size of the registers.
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The value shall be 1.
-- interrupts : Specifies the CPU interrupt the controller is connected to.
-
-Example:
-
-intc: interrupt-controller@10001000 {
- compatible = "ingenic,jz4740-intc";
- reg = <0x10001000 0x14>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- interrupt-parent = <&cpuintc>;
- interrupts = <2>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml
new file mode 100644
index 000000000000..28b27e1a6e9d
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/ingenic,intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs interrupt controller devicetree bindings
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+properties:
+ $nodename:
+ pattern: "^interrupt-controller@[0-9a-f]+$"
+
+ compatible:
+ oneOf:
+ - enum:
+ - ingenic,jz4740-intc
+ - ingenic,jz4760-intc
+ - ingenic,jz4780-intc
+ - items:
+ - enum:
+ - ingenic,jz4775-intc
+ - ingenic,jz4770-intc
+ - const: ingenic,jz4760-intc
+ - items:
+ - const: ingenic,x1000-intc
+ - const: ingenic,jz4780-intc
+ - items:
+ - const: ingenic,jz4725b-intc
+ - const: ingenic,jz4740-intc
+
+ "#interrupt-cells":
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#interrupt-cells"
+ - interrupt-controller
+
+examples:
+ - |
+ intc: interrupt-controller@10001000 {
+ compatible = "ingenic,jz4770-intc", "ingenic,jz4760-intc";
+ reg = <0x10001000 0x40>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
index ccc507f384d2..14dced11877b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
@@ -25,10 +25,10 @@ properties:
compatible:
items:
- enum:
- - intel,ixp42x-interrupt
- - intel,ixp43x-interrupt
- - intel,ixp45x-interrupt
- - intel,ixp46x-interrupt
+ - intel,ixp42x-interrupt
+ - intel,ixp43x-interrupt
+ - intel,ixp45x-interrupt
+ - intel,ixp46x-interrupt
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml b/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
index 26f1fcf0857a..b1db21ed44e9 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/loongson,liointc.yaml
@@ -54,11 +54,9 @@ properties:
and each bit in the cell refers to a children interrupt fron 0 to 31.
If a CPU interrupt line didn't connected with liointc, then keep it's
cell with zero.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 4
- maxItems: 4
-
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 4
+ maxItems: 4
required:
- compatible
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.txt
deleted file mode 100644
index 772c550d3b4b..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-DT bindings for the R-/SH-Mobile irqpin controller
-
-Required properties:
-
-- compatible: has to be "renesas,intc-irqpin-<soctype>", "renesas,intc-irqpin"
- as fallback.
- Examples with soctypes are:
- - "renesas,intc-irqpin-r8a7740" (R-Mobile A1)
- - "renesas,intc-irqpin-r8a7778" (R-Car M1A)
- - "renesas,intc-irqpin-r8a7779" (R-Car H1)
- - "renesas,intc-irqpin-sh73a0" (SH-Mobile AG5)
-
-- reg: Base address and length of each register bank used by the external
- IRQ pins driven by the interrupt controller hardware module. The base
- addresses, length and number of required register banks varies with soctype.
-- interrupt-controller: Identifies the node as an interrupt controller.
-- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
- interrupts.txt in this directory.
-- interrupts: Must contain a list of interrupt specifiers. For each interrupt
- provided by this irqpin controller instance, there must be one entry,
- referring to the corresponding parent interrupt.
-
-Optional properties:
-
-- any properties, listed in interrupts.txt, and any standard resource allocation
- properties
-- sense-bitfield-width: width of a single sense bitfield in the SENSE register,
- if different from the default 4 bits
-- control-parent: disable and enable interrupts on the parent interrupt
- controller, needed for some broken implementations
-- clocks: Must contain a reference to the functional clock. This property is
- mandatory if the hardware implements a controllable functional clock for
- the irqpin controller instance.
-- power-domains: Must contain a reference to the power domain. This property is
- mandatory if the irqpin controller instance is part of a controllable power
- domain.
-
-
-Example
--------
-
- irqpin1: interrupt-controller@e6900004 {
- compatible = "renesas,intc-irqpin-r8a7740",
- "renesas,intc-irqpin";
- #interrupt-cells = <2>;
- interrupt-controller;
- reg = <0xe6900004 4>,
- <0xe6900014 4>,
- <0xe6900024 1>,
- <0xe6900044 1>,
- <0xe6900064 1>;
- interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH
- 0 149 IRQ_TYPE_LEVEL_HIGH
- 0 149 IRQ_TYPE_LEVEL_HIGH
- 0 149 IRQ_TYPE_LEVEL_HIGH
- 0 149 IRQ_TYPE_LEVEL_HIGH
- 0 149 IRQ_TYPE_LEVEL_HIGH
- 0 149 IRQ_TYPE_LEVEL_HIGH
- 0 149 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
- power-domains = <&pd_a4s>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.yaml
new file mode 100644
index 000000000000..f4aae56c6469
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,intc-irqpin.yaml
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/renesas,intc-irqpin.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Interrupt Controller (INTC) for external pins
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,intc-irqpin-r8a7740 # R-Mobile A1
+ - renesas,intc-irqpin-r8a7778 # R-Car M1A
+ - renesas,intc-irqpin-r8a7779 # R-Car H1
+ - renesas,intc-irqpin-sh73a0 # SH-Mobile AG5
+ - const: renesas,intc-irqpin
+
+ reg:
+ minItems: 5
+ items:
+ - description: Interrupt control register
+ - description: Interrupt priority register
+ - description: Interrupt source register
+ - description: Interrupt mask register
+ - description: Interrupt mask clear register
+ - description: Interrupt control register for ICR0 with IRLM0 bit
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+ interrupts:
+ minItems: 1
+ maxItems: 8
+
+ sense-bitfield-width:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [2, 4]
+ default: 4
+ description:
+ Width of a single sense bitfield in the SENSE register, if different from the
+ default.
+
+ control-parent:
+ type: boolean
+ description:
+ Disable and enable interrupts on the parent interrupt controller, needed for some
+ broken implementations.
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+ - interrupts
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,intc-irqpin-r8a7740
+ - renesas,intc-irqpin-sh73a0
+then:
+ required:
+ - clocks
+ - power-domains
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7740-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ irqpin1: interrupt-controller@e6900004 {
+ compatible = "renesas,intc-irqpin-r8a7740", "renesas,intc-irqpin";
+ reg = <0xe6900004 4>,
+ <0xe6900014 4>,
+ <0xe6900024 1>,
+ <0xe6900044 1>,
+ <0xe6900064 1>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
+ power-domains = <&pd_a4s>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
index ee5273b6c5a3..b67b8cbd33fc 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
@@ -14,6 +14,7 @@ properties:
items:
- enum:
- renesas,irqc-r8a73a4 # R-Mobile APE6
+ - renesas,irqc-r8a7742 # RZ/G1H
- renesas,irqc-r8a7743 # RZ/G1M
- renesas,irqc-r8a7744 # RZ/G1N
- renesas,irqc-r8a7745 # RZ/G1E
@@ -78,7 +79,7 @@ examples:
compatible = "renesas,irqc-r8a7790", "renesas,irqc";
#interrupt-cells = <2>;
interrupt-controller;
- reg = <0 0xe61c0000 0 0x200>;
+ reg = <0xe61c0000 0x200>;
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
index 9e5c6608b4e3..2a5b29567926 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.yaml
@@ -14,13 +14,13 @@ properties:
compatible:
oneOf:
- items:
- - enum:
- - st,stm32-exti
- - st,stm32h7-exti
+ - enum:
+ - st,stm32-exti
+ - st,stm32h7-exti
- items:
- - enum:
- - st,stm32mp1-exti
- - const: syscon
+ - enum:
+ - st,stm32mp1-exti
+ - const: syscon
"#interrupt-cells":
const: 2
diff --git a/Documentation/devicetree/bindings/iommu/allwinner,sun50i-h6-iommu.yaml b/Documentation/devicetree/bindings/iommu/allwinner,sun50i-h6-iommu.yaml
new file mode 100644
index 000000000000..5e125cf2a88b
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/allwinner,sun50i-h6-iommu.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iommu/allwinner,sun50i-h6-iommu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner H6 IOMMU Device Tree Bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <mripard@kernel.org>
+
+properties:
+ "#iommu-cells":
+ const: 1
+ description:
+ The content of the cell is the master ID.
+
+ compatible:
+ const: allwinner,sun50i-h6-iommu
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - "#iommu-cells"
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ #include <dt-bindings/clock/sun50i-h6-ccu.h>
+ #include <dt-bindings/reset/sun50i-h6-ccu.h>
+
+ iommu: iommu@30f0000 {
+ compatible = "allwinner,sun50i-h6-iommu";
+ reg = <0x030f0000 0x10000>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_IOMMU>;
+ resets = <&ccu RST_BUS_IOMMU>;
+ #iommu-cells = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index 6515dbe47508..d7ceb4c34423 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -28,6 +28,7 @@ properties:
- enum:
- qcom,msm8996-smmu-v2
- qcom,msm8998-smmu-v2
+ - qcom,sc7180-smmu-v2
- qcom,sdm845-smmu-v2
- const: qcom,smmu-v2
@@ -41,7 +42,9 @@ properties:
- const: arm,mmu-500
- const: arm,smmu-v2
- items:
- - const: arm,mmu-401
+ - enum:
+ - arm,mmu-400
+ - arm,mmu-401
- const: arm,smmu-v1
- enum:
- arm,smmu-v1
@@ -56,8 +59,7 @@ properties:
'#global-interrupts':
description: The number of global interrupts exposed by the device.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 260 # 2 secure, 2 non-secure, and up to 256 perf counters
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
deleted file mode 100644
index 020d6f226efb..000000000000
--- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-* Renesas VMSA-Compatible IOMMU
-
-The IPMMU is an IOMMU implementation compatible with the ARM VMSA page tables.
-It provides address translation for bus masters outside of the CPU, each
-connected to the IPMMU through a port called micro-TLB.
-
-
-Required Properties:
-
- - compatible: Must contain SoC-specific and generic entry below in case
- the device is compatible with the R-Car Gen2 VMSA-compatible IPMMU.
-
- - "renesas,ipmmu-r8a73a4" for the R8A73A4 (R-Mobile APE6) IPMMU.
- - "renesas,ipmmu-r8a7743" for the R8A7743 (RZ/G1M) IPMMU.
- - "renesas,ipmmu-r8a7744" for the R8A7744 (RZ/G1N) IPMMU.
- - "renesas,ipmmu-r8a7745" for the R8A7745 (RZ/G1E) IPMMU.
- - "renesas,ipmmu-r8a774a1" for the R8A774A1 (RZ/G2M) IPMMU.
- - "renesas,ipmmu-r8a774b1" for the R8A774B1 (RZ/G2N) IPMMU.
- - "renesas,ipmmu-r8a774c0" for the R8A774C0 (RZ/G2E) IPMMU.
- - "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU.
- - "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU.
- - "renesas,ipmmu-r8a7793" for the R8A7793 (R-Car M2-N) IPMMU.
- - "renesas,ipmmu-r8a7794" for the R8A7794 (R-Car E2) IPMMU.
- - "renesas,ipmmu-r8a7795" for the R8A7795 (R-Car H3) IPMMU.
- - "renesas,ipmmu-r8a7796" for the R8A7796 (R-Car M3-W) IPMMU.
- - "renesas,ipmmu-r8a77965" for the R8A77965 (R-Car M3-N) IPMMU.
- - "renesas,ipmmu-r8a77970" for the R8A77970 (R-Car V3M) IPMMU.
- - "renesas,ipmmu-r8a77980" for the R8A77980 (R-Car V3H) IPMMU.
- - "renesas,ipmmu-r8a77990" for the R8A77990 (R-Car E3) IPMMU.
- - "renesas,ipmmu-r8a77995" for the R8A77995 (R-Car D3) IPMMU.
- - "renesas,ipmmu-vmsa" for generic R-Car Gen2 or RZ/G1 VMSA-compatible
- IPMMU.
-
- - reg: Base address and size of the IPMMU registers.
- - interrupts: Specifiers for the MMU fault interrupts. For instances that
- support secure mode two interrupts must be specified, for non-secure and
- secure mode, in that order. For instances that don't support secure mode a
- single interrupt must be specified. Not required for cache IPMMUs.
-
- - #iommu-cells: Must be 1.
-
-Optional properties:
-
- - renesas,ipmmu-main: reference to the main IPMMU instance in two cells.
- The first cell is a phandle to the main IPMMU and the second cell is
- the interrupt bit number associated with the particular cache IPMMU device.
- The interrupt bit number needs to match the main IPMMU IMSSTR register.
- Only used by cache IPMMU instances.
-
-
-Each bus master connected to an IPMMU must reference the IPMMU in its device
-node with the following property:
-
- - iommus: A reference to the IPMMU in two cells. The first cell is a phandle
- to the IPMMU and the second cell the number of the micro-TLB that the
- device is connected to.
-
-
-Example: R8A7791 IPMMU-MX and VSP1-D0 bus master
-
- ipmmu_mx: mmu@fe951000 {
- compatible = "renasas,ipmmu-r8a7791", "renasas,ipmmu-vmsa";
- reg = <0 0xfe951000 0 0x1000>;
- interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
- <0 221 IRQ_TYPE_LEVEL_HIGH>;
- #iommu-cells = <1>;
- };
-
- vsp@fe928000 {
- ...
- iommus = <&ipmmu_mx 13>;
- ...
- };
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
new file mode 100644
index 000000000000..39675cf4ed71
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iommu/renesas,ipmmu-vmsa.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas VMSA-Compatible IOMMU
+
+maintainers:
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+description:
+ The IPMMU is an IOMMU implementation compatible with the ARM VMSA page tables.
+ It provides address translation for bus masters outside of the CPU, each
+ connected to the IPMMU through a port called micro-TLB.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,ipmmu-r8a73a4 # R-Mobile APE6
+ - renesas,ipmmu-r8a7743 # RZ/G1M
+ - renesas,ipmmu-r8a7744 # RZ/G1N
+ - renesas,ipmmu-r8a7745 # RZ/G1E
+ - renesas,ipmmu-r8a7790 # R-Car H2
+ - renesas,ipmmu-r8a7791 # R-Car M2-W
+ - renesas,ipmmu-r8a7793 # R-Car M2-N
+ - renesas,ipmmu-r8a7794 # R-Car E2
+ - const: renesas,ipmmu-vmsa # R-Mobile APE6 or R-Car Gen2 or RZ/G1
+ - items:
+ - enum:
+ - renesas,ipmmu-r8a774a1 # RZ/G2M
+ - renesas,ipmmu-r8a774b1 # RZ/G2N
+ - renesas,ipmmu-r8a774c0 # RZ/G2E
+ - renesas,ipmmu-r8a7795 # R-Car H3
+ - renesas,ipmmu-r8a7796 # R-Car M3-W
+ - renesas,ipmmu-r8a77965 # R-Car M3-N
+ - renesas,ipmmu-r8a77970 # R-Car V3M
+ - renesas,ipmmu-r8a77980 # R-Car V3H
+ - renesas,ipmmu-r8a77990 # R-Car E3
+ - renesas,ipmmu-r8a77995 # R-Car D3
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+ description:
+ Specifiers for the MMU fault interrupts. Not required for cache IPMMUs.
+ items:
+ - description: non-secure mode
+ - description: secure mode if supported
+
+ '#iommu-cells':
+ const: 1
+ description:
+ The number of the micro-TLB that the device is connected to.
+
+ power-domains:
+ maxItems: 1
+
+ renesas,ipmmu-main:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ Reference to the main IPMMU phandle plus 1 cell. The cell is
+ the interrupt bit number associated with the particular cache IPMMU
+ device. The interrupt bit number needs to match the main IPMMU IMSSTR
+ register. Only used by cache IPMMU instances.
+
+required:
+ - compatible
+ - reg
+ - '#iommu-cells'
+ - power-domains
+
+oneOf:
+ - required:
+ - interrupts
+ - required:
+ - renesas,ipmmu-main
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7791-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7791-sysc.h>
+
+ ipmmu_mx: iommu@fe951000 {
+ compatible = "renasas,ipmmu-r8a7791", "renasas,ipmmu-vmsa";
+ reg = <0xfe951000 0x1000>;
+ interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
index 0e33cd9e010e..af51b91c893e 100644
--- a/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/samsung,sysmmu.yaml
@@ -54,13 +54,13 @@ properties:
clock-names:
oneOf:
- items:
- - const: sysmmu
+ - const: sysmmu
- items:
- - const: sysmmu
- - const: master
+ - const: sysmmu
+ - const: master
- items:
- - const: aclk
- - const: pclk
+ - const: aclk
+ - const: pclk
"#iommu-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt b/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt
deleted file mode 100644
index d5f1a877ed3e..000000000000
--- a/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-IPMI device
-
-Required properties:
-- compatible: should be one of ipmi-kcs, ipmi-smic, or ipmi-bt
-- device_type: should be ipmi
-- reg: Address and length of the register set for the device
-
-Optional properties:
-- interrupts: The interrupt for the device. Without this the interface
- is polled.
-- reg-size - The size of the register. Defaults to 1
-- reg-spacing - The number of bytes between register starts. Defaults to 1
-- reg-shift - The amount to shift the registers to the right to get the data
- into bit zero.
-
-Example:
-
-smic@fff3a000 {
- compatible = "ipmi-smic";
- device_type = "ipmi";
- reg = <0xfff3a000 0x1000>;
- interrupts = <0 24 4>;
- reg-size = <4>;
- reg-spacing = <4>;
-};
diff --git a/Documentation/devicetree/bindings/ipmi/ipmi-smic.yaml b/Documentation/devicetree/bindings/ipmi/ipmi-smic.yaml
new file mode 100644
index 000000000000..f0bb157e9417
--- /dev/null
+++ b/Documentation/devicetree/bindings/ipmi/ipmi-smic.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ipmi/ipmi-smic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IPMI device bindings
+
+description: IPMI device bindings
+
+maintainers:
+ - Corey Minyard <cminyard@mvista.com>
+
+properties:
+ compatible:
+ enum:
+ - ipmi-kcs
+ - ipmi-smic
+ - ipmi-bt
+
+ device_type:
+ items:
+ - const: "ipmi"
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: Interface is polled if this property is omitted.
+ maxItems: 1
+
+ reg-size:
+ description: The access width of the register in bytes. Defaults to 1.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [1, 2, 4, 8]
+
+ reg-spacing:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: The number of bytes between register starts. Defaults to 1.
+
+ reg-shift:
+ description: |
+ The amount of bits to shift the register content to the right to get
+ the data into bit zero.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maximum: 56
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ smic@fff3a000 {
+ compatible = "ipmi-smic";
+ device_type = "ipmi";
+ reg = <0xfff3a000 0x1000>;
+ interrupts = <0 24 4>;
+ reg-size = <4>;
+ reg-spacing = <4>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/backlight/qcom-wled.txt b/Documentation/devicetree/bindings/leds/backlight/qcom-wled.txt
deleted file mode 100644
index c06863badfbd..000000000000
--- a/Documentation/devicetree/bindings/leds/backlight/qcom-wled.txt
+++ /dev/null
@@ -1,154 +0,0 @@
-Binding for Qualcomm Technologies, Inc. WLED driver
-
-WLED (White Light Emitting Diode) driver is used for controlling display
-backlight that is part of PMIC on Qualcomm Technologies, Inc. reference
-platforms. The PMIC is connected to the host processor via SPMI bus.
-
-- compatible
- Usage: required
- Value type: <string>
- Definition: should be one of:
- "qcom,pm8941-wled"
- "qcom,pmi8998-wled"
- "qcom,pm660l-wled"
-
-- reg
- Usage: required
- Value type: <prop encoded array>
- Definition: Base address of the WLED modules.
-
-- default-brightness
- Usage: optional
- Value type: <u32>
- Definition: brightness value on boot, value from: 0-4095.
- Default: 2048
-
-- label
- Usage: required
- Value type: <string>
- Definition: The name of the backlight device
-
-- qcom,cs-out
- Usage: optional
- Value type: <bool>
- Definition: enable current sink output.
- This property is supported only for PM8941.
-
-- qcom,cabc
- Usage: optional
- Value type: <bool>
- Definition: enable content adaptive backlight control.
-
-- qcom,ext-gen
- Usage: optional
- Value type: <bool>
- Definition: use externally generated modulator signal to dim.
- This property is supported only for PM8941.
-
-- qcom,current-limit
- Usage: optional
- Value type: <u32>
- Definition: mA; per-string current limit; value from 0 to 25 with
- 1 mA step. Default 20 mA.
- This property is supported only for pm8941.
-
-- qcom,current-limit-microamp
- Usage: optional
- Value type: <u32>
- Definition: uA; per-string current limit; value from 0 to 30000 with
- 2500 uA step. Default 25 mA.
-
-- qcom,current-boost-limit
- Usage: optional
- Value type: <u32>
- Definition: mA; boost current limit.
- For pm8941: one of: 105, 385, 525, 805, 980, 1260, 1400,
- 1680. Default: 805 mA.
- For pmi8998: one of: 105, 280, 450, 620, 970, 1150, 1300,
- 1500. Default: 970 mA.
-
-- qcom,switching-freq
- Usage: optional
- Value type: <u32>
- Definition: kHz; switching frequency; one of: 600, 640, 685, 738,
- 800, 872, 960, 1066, 1200, 1371, 1600, 1920, 2400, 3200,
- 4800, 9600.
- Default: for pm8941: 1600 kHz
- for pmi8998: 800 kHz
-
-- qcom,ovp
- Usage: optional
- Value type: <u32>
- Definition: V; Over-voltage protection limit; one of:
- 27, 29, 32, 35. Default: 29V
- This property is supported only for PM8941.
-
-- qcom,ovp-millivolt
- Usage: optional
- Value type: <u32>
- Definition: mV; Over-voltage protection limit;
- For pmi8998: one of 18100, 19600, 29600, 31100.
- Default 29600 mV.
- If this property is not specified for PM8941, it
- falls back to "qcom,ovp" property.
-
-- qcom,num-strings
- Usage: optional
- Value type: <u32>
- Definition: #; number of led strings attached;
- value: For PM8941 from 1 to 3. Default: 2
- For PMI8998 from 1 to 4.
-
-- interrupts
- Usage: optional
- Value type: <prop encoded array>
- Definition: Interrupts associated with WLED. This should be
- "short" and "ovp" interrupts. Interrupts can be
- specified as per the encoding listed under
- Documentation/devicetree/bindings/spmi/
- qcom,spmi-pmic-arb.txt.
-
-- interrupt-names
- Usage: optional
- Value type: <string>
- Definition: Interrupt names associated with the interrupts.
- Must be "short" and "ovp". The short circuit detection
- is not supported for PM8941.
-
-- qcom,enabled-strings
- Usage: optional
- Value tyoe: <u32 array>
- Definition: Array of the WLED strings numbered from 0 to 3. Each
- string of leds are operated individually. Specify the
- list of strings used by the device. Any combination of
- led strings can be used.
-
-- qcom,external-pfet
- Usage: optional
- Value type: <bool>
- Definition: Specify if external PFET control for short circuit
- protection is used. This property is supported only
- for PMI8998.
-
-- qcom,auto-string-detection
- Usage: optional
- Value type: <bool>
- Definition: Enables auto-detection of the WLED string configuration.
- This feature is not supported for PM8941.
-
-
-Example:
-
-pm8941-wled@d800 {
- compatible = "qcom,pm8941-wled";
- reg = <0xd800>;
- label = "backlight";
-
- qcom,cs-out;
- qcom,current-limit = <20>;
- qcom,current-boost-limit = <805>;
- qcom,switching-freq = <1600>;
- qcom,ovp = <29>;
- qcom,num-strings = <2>;
- qcom,enabled-strings = <0 1>;
-};
diff --git a/Documentation/devicetree/bindings/leds/backlight/qcom-wled.yaml b/Documentation/devicetree/bindings/leds/backlight/qcom-wled.yaml
new file mode 100644
index 000000000000..01c7d93dc658
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/backlight/qcom-wled.yaml
@@ -0,0 +1,261 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/backlight/qcom-wled.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Binding for Qualcomm Technologies, Inc. WLED driver
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+ - Kiran Gunda <kgunda@codeaurora.org>
+
+description: |
+ WLED (White Light Emitting Diode) driver is used for controlling display
+ backlight that is part of PMIC on Qualcomm Technologies, Inc. reference
+ platforms. The PMIC is connected to the host processor via SPMI bus.
+
+properties:
+ compatible:
+ enum:
+ - qcom,pm8941-wled
+ - qcom,pmi8998-wled
+ - qcom,pm660l-wled
+ - qcom,pm8150l-wled
+
+ reg:
+ maxItems: 1
+
+ default-brightness:
+ description: |
+ brightness value on boot.
+
+ label: true
+
+ max-brightness:
+ description: |
+ Maximum brightness level.
+
+ qcom,cs-out:
+ description: |
+ enable current sink output.
+ This property is supported only for WLED3.
+ type: boolean
+
+ qcom,cabc:
+ description: |
+ enable content adaptive backlight control.
+ type: boolean
+
+ qcom,ext-gen:
+ description: |
+ use externally generated modulator signal to dim.
+ This property is supported only for WLED3.
+ type: boolean
+
+ qcom,current-limit:
+ description: |
+ mA; per-string current limit.
+ This property is supported only for WLED3.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ default: 20
+ minimum: 0
+ maximum: 25
+
+ qcom,current-limit-microamp:
+ description: |
+ uA; per-string current limit.
+ default: 25
+ minimum: 0
+ maximum: 30000
+ multipleOf: 25
+
+ qcom,current-boost-limit:
+ description: |
+ mA; boost current limit.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+
+ qcom,switching-freq:
+ description: |
+ kHz; switching frequency.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 600, 640, 685, 738, 800, 872, 960, 1066, 1200, 1371, 1600, 1920, 2400, 3200, 4800, 9600 ]
+
+ qcom,ovp:
+ description: |
+ V; Over-voltage protection limit.
+ This property is supported only for WLED3.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 27, 29, 32, 35 ]
+ - default: 29
+
+ qcom,ovp-millivolt:
+ description: |
+ Over-voltage protection limit. This property is for WLED4 only.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 18100, 19600, 29600, 31100 ]
+ - default: 29600
+
+ qcom,num-strings:
+ description: |
+ number of led strings attached.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+
+ qcom,enabled-strings:
+ description: |
+ Array of the WLED strings numbered from 0 to 3. Each
+ string of leds are operated individually. Specify the
+ list of strings used by the device. Any combination of
+ led strings can be used.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 4
+
+ qcom,external-pfet:
+ description: |
+ Specify if external PFET control for short circuit
+ protection is used. This property is supported only
+ for WLED4.
+ type: boolean
+
+ qcom,auto-string-detection:
+ description: |
+ Enables auto-detection of the WLED string configuration.
+ This feature is not supported for WLED3.
+ type: boolean
+
+ interrupts:
+ minItems: 1
+ items:
+ - description: over voltage protection interrupt.
+ - description: short circuit interrupt.
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: ovp
+ - const: short
+
+ qcom,modulator-sel:
+ description: |
+ Selects the modulator used for brightness modulation.
+ Allowed values are,
+ 0 - Modulator A
+ 1 - Modulator B
+ This property is applicable only to WLED5 peripheral.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1 ]
+ - default: 0
+
+ qcom,cabc-sel:
+ description: |
+ Selects the CABC pin signal used for brightness modulation.
+ Allowed values are,
+ 0 - CABC disabled
+ 1 - CABC 1
+ 2 - CABC 2
+ 3 - External signal (e.g. LPG) is used for dimming
+ This property is applicable only to WLED5 peripheral.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 1, 2, 3 ]
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: qcom,pm8941-wled
+
+ then:
+ properties:
+ qcom,current-boost-limit:
+ enum: [ 105, 385, 525, 805, 980, 1260, 1400, 1680 ]
+ default: 805
+
+ qcom,switching-freq:
+ default: 1600
+
+ qcom,num-strings:
+ enum: [ 1, 2, 3 ]
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ maxItems: 1
+
+ else:
+ properties:
+ qcom,current-boost-limit:
+ enum: [ 105, 280, 450, 620, 970, 1150, 1300, 1500 ]
+ default: 970
+
+ qcom,switching-freq:
+ default: 800
+
+ qcom,num-strings:
+ enum: [ 1, 2, 3, 4 ]
+
+ interrupts:
+ minItems: 2
+
+ interrupt-names:
+ minItems: 2
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,pm8150l-wled
+
+ then:
+ properties:
+ default-brightness:
+ minimum: 0
+ maximum: 32767
+
+ max-brightness:
+ minimum: 0
+ maximum: 32767
+
+ else:
+ properties:
+ default-brightness:
+ minimum: 0
+ maximum: 4095
+
+ max-brightness:
+ minimum: 0
+ maximum: 4095
+
+required:
+ - compatible
+ - reg
+ - label
+
+additionalProperties: false
+
+examples:
+ - |
+ backlight@d800 {
+ compatible = "qcom,pm8941-wled";
+ reg = <0xd800 0x100>;
+ label = "backlight";
+
+ qcom,cs-out;
+ qcom,current-limit = <20>;
+ qcom,current-boost-limit = <805>;
+ qcom,switching-freq = <1600>;
+ qcom,ovp = <29>;
+ qcom,num-strings = <2>;
+ qcom,enabled-strings = <0 1>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
index 4c270fde4567..a2a541bca73c 100644
--- a/Documentation/devicetree/bindings/leds/common.yaml
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -41,8 +41,7 @@ properties:
Color of the LED. Use one of the LED_COLOR_ID_* prefixed definitions from
the header include/dt-bindings/leds/common.h. If there is no matching
LED_COLOR_ID available, add a new one.
- allOf:
- - $ref: /schemas/types.yaml#definitions/uint32
+ $ref: /schemas/types.yaml#definitions/uint32
minimum: 0
maximum: 8
@@ -67,8 +66,7 @@ properties:
produced where the LED momentarily turns off (or on). The "keep" setting
will keep the LED at whatever its current state is, without producing a
glitch.
- allOf:
- - $ref: /schemas/types.yaml#definitions/string
+ $ref: /schemas/types.yaml#definitions/string
enum:
- on
- off
@@ -79,8 +77,8 @@ properties:
description:
This parameter, if present, is a string defining the trigger assigned to
the LED.
- allOf:
- - $ref: /schemas/types.yaml#definitions/string
+ $ref: /schemas/types.yaml#definitions/string
+
enum:
# LED will act as a back-light, controlled by the framebuffer system
- backlight
@@ -111,8 +109,7 @@ properties:
brightness and duration (in ms). The exact format is
described in:
Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt
- allOf:
- - $ref: /schemas/types.yaml#definitions/uint32-matrix
+ $ref: /schemas/types.yaml#definitions/uint32-matrix
items:
minItems: 2
maxItems: 2
diff --git a/Documentation/devicetree/bindings/leds/leds-aw2013.yaml b/Documentation/devicetree/bindings/leds/leds-aw2013.yaml
new file mode 100644
index 000000000000..f118721df1e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-aw2013.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-aw2013.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AWINIC AW2013 3-channel LED Driver
+
+maintainers:
+ - Nikita Travkin <nikitos.tr@gmail.com>
+
+description: |
+ The AW2013 is a 3-channel LED driver with I2C interface. It can control
+ LED brightness with PWM output.
+
+properties:
+ compatible:
+ const: awinic,aw2013
+
+ reg:
+ maxItems: 1
+
+ vcc-supply:
+ description: Regulator providing power to the "VCC" pin.
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^led@[0-2]$":
+ type: object
+ allOf:
+ - $ref: common.yaml#
+
+ properties:
+ reg:
+ description: Index of the LED.
+ minimum: 0
+ maximum: 2
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@45 {
+ compatible = "awinic,aw2013";
+ reg = <0x45>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vcc-supply = <&pm8916_l17>;
+
+ led@0 {
+ reg = <0>;
+ led-max-microamp = <5000>;
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@1 {
+ reg = <1>;
+ led-max-microamp = <5000>;
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@2 {
+ reg = <2>;
+ led-max-microamp = <5000>;
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/leds/leds-gpio.yaml b/Documentation/devicetree/bindings/leds/leds-gpio.yaml
index 0e75b185dd19..7ad2baeda0b0 100644
--- a/Documentation/devicetree/bindings/leds/leds-gpio.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-gpio.yaml
@@ -24,8 +24,7 @@ patternProperties:
"(^led-[0-9a-f]$|led)":
type: object
- allOf:
- - $ref: common.yaml#
+ $ref: common.yaml#
properties:
gpios:
diff --git a/Documentation/devicetree/bindings/leds/leds-sgm3140.yaml b/Documentation/devicetree/bindings/leds/leds-sgm3140.yaml
new file mode 100644
index 000000000000..ecf7ac9ab067
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-sgm3140.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-sgm3140.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SGMICRO SGM3140 500mA Buck/Boost Charge Pump LED Driver
+
+maintainers:
+ - Luca Weiss <luca@z3ntu.xyz>
+
+description: |
+ The SGM3140 is a current-regulated charge pump which can regulate two current
+ levels for Flash and Torch modes.
+
+ The data sheet can be found at:
+ http://www.sg-micro.com/uploads/soft/20190626/1561535688.pdf
+
+properties:
+ compatible:
+ const: sgmicro,sgm3140
+
+ enable-gpios:
+ maxItems: 1
+ description: A connection to the 'EN' pin.
+
+ flash-gpios:
+ maxItems: 1
+ description: A connection to the 'FLASH' pin.
+
+ vin-supply:
+ description: Regulator providing power to the 'VIN' pin.
+
+ led:
+ type: object
+ allOf:
+ - $ref: common.yaml#
+
+required:
+ - compatible
+ - flash-gpios
+ - enable-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ led-controller {
+ compatible = "sgmicro,sgm3140";
+ flash-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* PD24 */
+ enable-gpios = <&pio 2 3 GPIO_ACTIVE_HIGH>; /* PC3 */
+ vin-supply = <&reg_dcdc1>;
+
+ sgm3140_flash: led {
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ flash-max-timeout-us = <250000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml b/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
index 90edf9d33b33..86a37c92b834 100644
--- a/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
+++ b/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
@@ -34,11 +34,10 @@ patternProperties:
#- $ref: "common.yaml#"
rohm,led-compatible:
description: LED identification string
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/string"
- - enum:
- - bd71828-ambled
- - bd71828-grnled
+ $ref: "/schemas/types.yaml#/definitions/string"
+ enum:
+ - bd71828-ambled
+ - bd71828-grnled
function:
description:
Purpose of LED as defined in dt-bindings/leds/common.h
diff --git a/Documentation/devicetree/bindings/mailbox/fsl,mu.txt b/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
deleted file mode 100644
index 31486c9f6443..000000000000
--- a/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-NXP i.MX Messaging Unit (MU)
---------------------------------------------------------------------
-
-The Messaging Unit module enables two processors within the SoC to
-communicate and coordinate by passing messages (e.g. data, status
-and control) through the MU interface. The MU also provides the ability
-for one processor to signal the other processor using interrupts.
-
-Because the MU manages the messaging between processors, the MU uses
-different clocks (from each side of the different peripheral buses).
-Therefore, the MU must synchronize the accesses from one side to the
-other. The MU accomplishes synchronization using two sets of matching
-registers (Processor A-facing, Processor B-facing).
-
-Messaging Unit Device Node:
-=============================
-
-Required properties:
--------------------
-- compatible : should be "fsl,<chip>-mu", the supported chips include
- imx6sx, imx7s, imx8qxp, imx8qm.
- The "fsl,imx6sx-mu" compatible is seen as generic and should
- be included together with SoC specific compatible.
- There is a version 1.0 MU on imx7ulp, use "fsl,imx7ulp-mu"
- compatible to support it.
- To communicate with i.MX8 SCU, "fsl,imx8-mu-scu" could be
- used for fast IPC
-- reg : Should contain the registers location and length
-- interrupts : Interrupt number. The interrupt specifier format depends
- on the interrupt controller parent.
-- #mbox-cells: Must be 2.
- <&phandle type channel>
- phandle : Label name of controller
- type : Channel type
- channel : Channel number
-
- This MU support 4 type of unidirectional channels, each type
- has 4 channels. A total of 16 channels. Following types are
- supported:
- 0 - TX channel with 32bit transmit register and IRQ transmit
- acknowledgment support.
- 1 - RX channel with 32bit receive register and IRQ support
- 2 - TX doorbell channel. Without own register and no ACK support.
- 3 - RX doorbell channel.
-
-Optional properties:
--------------------
-- clocks : phandle to the input clock.
-- fsl,mu-side-b : Should be set for side B MU.
-
-Examples:
---------
-lsio_mu0: mailbox@5d1b0000 {
- compatible = "fsl,imx8qxp-mu";
- reg = <0x0 0x5d1b0000 0x0 0x10000>;
- interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <2>;
-};
diff --git a/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml b/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
new file mode 100644
index 000000000000..3b35eb5ac3f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/fsl,mu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX Messaging Unit (MU)
+
+maintainers:
+ - Dong Aisheng <aisheng.dong@nxp.com>
+
+description: |
+ The Messaging Unit module enables two processors within the SoC to
+ communicate and coordinate by passing messages (e.g. data, status
+ and control) through the MU interface. The MU also provides the ability
+ for one processor to signal the other processor using interrupts.
+
+ Because the MU manages the messaging between processors, the MU uses
+ different clocks (from each side of the different peripheral buses).
+ Therefore, the MU must synchronize the accesses from one side to the
+ other. The MU accomplishes synchronization using two sets of matching
+ registers (Processor A-facing, Processor B-facing).
+
+properties:
+ compatible:
+ oneOf:
+ - const: fsl,imx6sx-mu
+ - const: fsl,imx7ulp-mu
+ - const: fsl,imx8-mu-scu
+ - items:
+ - enum:
+ - fsl,imx7s-mu
+ - fsl,imx8mq-mu
+ - fsl,imx8mm-mu
+ - fsl,imx8mn-mu
+ - fsl,imx8mp-mu
+ - fsl,imx8qxp-mu
+ - const: fsl,imx6sx-mu
+ - description: To communicate with i.MX8 SCU with fast IPC
+ items:
+ - const: fsl,imx8qxp-mu
+ - const: fsl,imx8-mu-scu
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#mbox-cells":
+ description: |
+ <&phandle type channel>
+ phandle : Label name of controller
+ type : Channel type
+ channel : Channel number
+
+ This MU support 4 type of unidirectional channels, each type
+ has 4 channels. A total of 16 channels. Following types are
+ supported:
+ 0 - TX channel with 32bit transmit register and IRQ transmit
+ acknowledgment support.
+ 1 - RX channel with 32bit receive register and IRQ support
+ 2 - TX doorbell channel. Without own register and no ACK support.
+ 3 - RX doorbell channel.
+ const: 2
+
+ clocks:
+ maxItems: 1
+
+ fsl,mu-side-b:
+ description: boolean, if present, means it is for side B MU.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#mbox-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ mailbox@5d1b0000 {
+ compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+ reg = <0x5d1b0000 0x10000>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
deleted file mode 100644
index beec612dbe6a..000000000000
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-Binding for the Qualcomm APCS global block
-==========================================
-
-This binding describes the APCS "global" block found in various Qualcomm
-platforms.
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: must be one of:
- "qcom,msm8916-apcs-kpss-global",
- "qcom,msm8996-apcs-hmss-global"
- "qcom,msm8998-apcs-hmss-global"
- "qcom,qcs404-apcs-apps-global"
- "qcom,sc7180-apss-shared"
- "qcom,sdm845-apss-shared"
- "qcom,sm8150-apss-shared"
- "qcom,ipq8074-apcs-apps-global"
-
-- reg:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: must specify the base address and size of the global block
-
-- clocks:
- Usage: required if #clock-names property is present
- Value type: <phandle array>
- Definition: phandles to the two parent clocks of the clock driver.
-
-- #mbox-cells:
- Usage: required
- Value type: <u32>
- Definition: as described in mailbox.txt, must be 1
-
-- #clock-cells:
- Usage: optional
- Value type: <u32>
- Definition: as described in clock.txt, must be 0
-
-- clock-names:
- Usage: required if the platform data based clock driver needs to
- retrieve the parent clock names from device tree.
- This will requires two mandatory clocks to be defined.
- Value type: <string-array>
- Definition: must be "pll" and "aux"
-
-= EXAMPLE
-The following example describes the APCS HMSS found in MSM8996 and part of the
-GLINK RPM referencing the "rpm_hlos" doorbell therein.
-
- apcs_glb: mailbox@9820000 {
- compatible = "qcom,msm8996-apcs-hmss-global";
- reg = <0x9820000 0x1000>;
-
- #mbox-cells = <1>;
- };
-
- rpm-glink {
- compatible = "qcom,glink-rpm";
-
- interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
-
- qcom,rpm-msg-ram = <&rpm_msg_ram>;
-
- mboxes = <&apcs_glb 0>;
- mbox-names = "rpm_hlos";
- };
-
-Below is another example of the APCS binding on MSM8916 platforms:
-
- apcs: mailbox@b011000 {
- compatible = "qcom,msm8916-apcs-kpss-global";
- reg = <0xb011000 0x1000>;
- #mbox-cells = <1>;
- clocks = <&a53pll>;
- #clock-cells = <0>;
- };
-
-Below is another example of the APCS binding on QCS404 platforms:
-
- apcs_glb: mailbox@b011000 {
- compatible = "qcom,qcs404-apcs-apps-global", "syscon";
- reg = <0x0b011000 0x1000>;
- #mbox-cells = <1>;
- clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>;
- clock-names = "pll", "aux";
- #clock-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
new file mode 100644
index 000000000000..12eff942708d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/mailbox/qcom,apcs-kpss-global.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm APCS global block bindings
+
+description:
+ This binding describes the APCS "global" block found in various Qualcomm
+ platforms.
+
+maintainers:
+ - Sivaprakash Murugesan <sivaprak@codeaurora.org>
+
+properties:
+ compatible:
+ enum:
+ - qcom,ipq8074-apcs-apps-global
+ - qcom,msm8916-apcs-kpss-global
+ - qcom,msm8996-apcs-hmss-global
+ - qcom,msm8998-apcs-hmss-global
+ - qcom,qcs404-apcs-apps-global
+ - qcom,sc7180-apss-shared
+ - qcom,sdm845-apss-shared
+ - qcom,sm8150-apss-shared
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description: phandles to the parent clocks of the clock driver
+ items:
+ - description: primary pll parent of the clock driver
+ - description: auxiliary parent
+
+ '#mbox-cells':
+ const: 1
+
+ '#clock-cells':
+ const: 0
+
+ clock-names:
+ items:
+ - const: pll
+ - const: aux
+
+required:
+ - compatible
+ - reg
+ - '#mbox-cells'
+
+additionalProperties: false
+
+examples:
+
+ # Example apcs with msm8996
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+ mboxes = <&apcs_glb 0>;
+ mbox-names = "rpm_hlos";
+ };
+
+ # Example apcs with qcs404
+ - |
+ #define GCC_APSS_AHB_CLK_SRC 1
+ #define GCC_GPLL0_AO_OUT_MAIN 123
+ apcs: mailbox@b011000 {
+ compatible = "qcom,qcs404-apcs-apps-global";
+ reg = <0x0b011000 0x1000>;
+ #mbox-cells = <1>;
+ clocks = <&apcs_hfpll>, <&gcc GCC_GPLL0_AO_OUT_MAIN>;
+ clock-names = "pll", "aux";
+ #clock-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
new file mode 100644
index 000000000000..4ac2123d9193
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mailbox/qcom-ipcc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. Inter-Processor Communication Controller
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+description:
+ The Inter-Processor Communication Controller (IPCC) is a centralized hardware
+ to route interrupts across various subsystems. It involves a three-level
+ addressing scheme called protocol, client and signal. For example, consider an
+ entity on the Application Processor Subsystem (APSS) that wants to listen to
+ Modem's interrupts via Shared Memory Point to Point (SMP2P) interface. In such
+ a case, the client would be Modem (client-id is 2) and the signal would be
+ SMP2P (signal-id is 2). The SMP2P itself falls under the Multiprocessor (MPROC)
+ protocol (protocol-id is 0). Refer include/dt-bindings/mailbox/qcom-ipcc.h
+ for the list of such IDs.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qcom,sm8250-ipcc
+ - const: qcom,ipcc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 3
+ description:
+ The first cell is the client-id, the second cell is the signal-id and the
+ third cell is the interrupt type.
+
+ "#mbox-cells":
+ const: 2
+ description:
+ The first cell is the client-id, and the second cell is the signal-id.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - "#interrupt-cells"
+ - "#mbox-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/mailbox/qcom-ipcc.h>
+
+ mailbox@408000 {
+ compatible = "qcom,sm8250-ipcc", "qcom,ipcc";
+ reg = <0x408000 0x1000>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ #mbox-cells = <2>;
+ };
+
+ smp2p-modem {
+ compatible = "qcom,smp2p";
+ interrupts-extended = <&ipcc_mproc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_SMP2P IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc_mproc IPCC_CLIENT_MPSS IPCC_MPROC_SIGNAL_SMP2P>;
+
+ /* Other SMP2P fields */
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml b/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml
new file mode 100644
index 000000000000..0f7451b42d7e
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/sprd-mailbox.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/mailbox/sprd-mailbox.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Spreadtrum mailbox controller bindings
+
+maintainers:
+ - Orson Zhai <orsonzhai@gmail.com>
+ - Baolin Wang <baolin.wang7@gmail.com>
+ - Chunyan Zhang <zhang.lyra@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - sprd,sc9860-mailbox
+
+ reg:
+ items:
+ - description: inbox registers' base address
+ - description: outbox registers' base address
+
+ interrupts:
+ items:
+ - description: inbox interrupt
+ - description: outbox interrupt
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: enable
+
+ "#mbox-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#mbox-cells"
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ mailbox: mailbox@400a0000 {
+ compatible = "sprd,sc9860-mailbox";
+ reg = <0 0x400a0000 0 0x8000>, <0 0x400a8000 0 0x8000>;
+ #mbox-cells = <1>;
+ clock-names = "enable";
+ clocks = <&aon_gate 53>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml b/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml
index 5b13d6672996..3b7ab61a144f 100644
--- a/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml
+++ b/Documentation/devicetree/bindings/mailbox/st,stm32-ipcc.yaml
@@ -24,7 +24,7 @@ properties:
maxItems: 1
clocks:
- maxItems: 1
+ maxItems: 1
interrupts:
items:
@@ -49,9 +49,8 @@ properties:
st,proc-id:
description: Processor id using the mailbox (0 or 1)
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
required:
- compatible
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
index 8453ee340b9f..09318830db47 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
@@ -20,11 +20,11 @@ properties:
- const: allwinner,sun4i-a10-csi1
- const: allwinner,sun7i-a20-csi0
- items:
- - const: allwinner,sun7i-a20-csi1
- - const: allwinner,sun4i-a10-csi1
+ - const: allwinner,sun7i-a20-csi1
+ - const: allwinner,sun4i-a10-csi1
- items:
- - const: allwinner,sun8i-r40-csi0
- - const: allwinner,sun7i-a20-csi0
+ - const: allwinner,sun8i-r40-csi0
+ - const: allwinner,sun7i-a20-csi0
reg:
maxItems: 1
@@ -35,24 +35,24 @@ properties:
clocks:
oneOf:
- items:
- - description: The CSI interface clock
- - description: The CSI DRAM clock
+ - description: The CSI interface clock
+ - description: The CSI DRAM clock
- items:
- - description: The CSI interface clock
- - description: The CSI ISP clock
- - description: The CSI DRAM clock
+ - description: The CSI interface clock
+ - description: The CSI ISP clock
+ - description: The CSI DRAM clock
clock-names:
oneOf:
- items:
- - const: bus
- - const: ram
+ - const: bus
+ - const: ram
- items:
- - const: bus
- - const: isp
- - const: ram
+ - const: bus
+ - const: isp
+ - const: ram
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
index 37d77e065491..b902495d278b 100644
--- a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
@@ -29,14 +29,14 @@ properties:
compatible:
oneOf:
- items:
- - enum:
- - amlogic,gxbb-vdec # GXBB (S905)
- - amlogic,gxl-vdec # GXL (S905X, S905D)
- - amlogic,gxm-vdec # GXM (S912)
- - const: amlogic,gx-vdec
+ - enum:
+ - amlogic,gxbb-vdec # GXBB (S905)
+ - amlogic,gxl-vdec # GXL (S905X, S905D)
+ - amlogic,gxm-vdec # GXM (S912)
+ - const: amlogic,gx-vdec
- enum:
- - amlogic,g12a-vdec # G12A (S905X2, S905D2)
- - amlogic,sm1-vdec # SM1 (S905X3, S905D3)
+ - amlogic,g12a-vdec # G12A (S905X2, S905D2)
+ - amlogic,sm1-vdec # SM1 (S905X3, S905D3)
interrupts:
minItems: 2
@@ -77,13 +77,11 @@ properties:
amlogic,ao-sysctrl:
description: should point to the AOBUS sysctrl node
- allOf:
- - $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle
amlogic,canvas:
description: should point to a canvas provider node
- allOf:
- - $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle
allOf:
- if:
diff --git a/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml b/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
index 95ffa8bc0533..d93aea6a0258 100644
--- a/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
@@ -35,8 +35,7 @@ properties:
hdmi-phandle:
description: phandle to the HDMI controller
- allOf:
- - $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle
allOf:
- if:
@@ -88,7 +87,7 @@ examples:
- |
cec_AO: cec@100 {
compatible = "amlogic,meson-gx-ao-cec";
- reg = <0x0 0x00100 0x0 0x14>;
+ reg = <0x00100 0x14>;
interrupts = <199>;
clocks = <&clkc_cec>;
clock-names = "core";
diff --git a/Documentation/devicetree/bindings/media/i2c/imx219.yaml b/Documentation/devicetree/bindings/media/i2c/imx219.yaml
index 32d6b693274f..dfc4d29a4f04 100644
--- a/Documentation/devicetree/bindings/media/i2c/imx219.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/imx219.yaml
@@ -67,8 +67,7 @@ properties:
otherwise it's continuous.
link-frequencies:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint64-array
+ $ref: /schemas/types.yaml#/definitions/uint64-array
description:
Allowed data bus frequencies.
diff --git a/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.txt b/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.txt
deleted file mode 100644
index 7ec2c8c8a3b9..000000000000
--- a/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-Marvell MMP2 camera host interface
-
-Required properties:
- - compatible: Should be "marvell,mmp2-ccic".
- - reg: Register base and size.
- - interrupts: The interrupt number.
- - #clock-cells: Must be 0.
-
-Optional properties:
- - clocks: Reference to the input clock as specified by
- Documentation/devicetree/bindings/clock/clock-bindings.txt.
- - clock-names: Names of the clocks used; "axi" for the AXI bus interface,
- "func" for the peripheral clock and "phy" for the parallel
- video bus interface.
- - clock-output-names: Optional clock source for sensors. Shall be "mclk".
-
-Required subnodes:
- - port: The parallel bus interface port with a single endpoint linked to
- the sensor's endpoint as described in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
-
-Required endpoint properties:
- - bus-type: data bus type, <5> or <6> for Parallel or Bt.656 respectively
- - pclk-sample: pixel clock polarity
- - hsync-active: horizontal synchronization polarity (only required for
- parallel bus)
- - vsync-active: vertical synchronization polarity (only required for
- parallel bus)
-
-Example:
-
- camera0: camera@d420a000 {
- compatible = "marvell,mmp2-ccic";
- reg = <0xd420a000 0x800>;
- interrupts = <42>;
- clocks = <&soc_clocks MMP2_CLK_CCIC0>;
- clock-names = "axi";
- #clock-cells = <0>;
- clock-output-names = "mclk";
-
- port {
- camera0_0: endpoint {
- remote-endpoint = <&ov7670_0>;
- bus-type = <5>; /* Parallel */
- hsync-active = <1>; /* Active high */
- vsync-active = <1>; /* Active high */
- pclk-sample = <0>; /* Falling */
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml b/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml
new file mode 100644
index 000000000000..49bff738aca5
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/marvell,mmp2-ccic.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2019,2020 Lubomir Rintel <lkundrak@v3.sk>
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/marvell,mmp2-ccic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell MMP2 camera host interface bindings
+
+maintainers:
+ - Lubomir Rintel <lkundrak@v3.sk>
+
+properties:
+ $nodename:
+ pattern: '^camera@[a-f0-9]+$'
+
+ compatible:
+ const: marvell,mmp2-ccic
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ port:
+ type: object
+ additionalProperties: false
+
+ properties:
+ endpoint:
+ type: object
+ additionalProperties: false
+
+ # Properties described in
+ # Documentation/devicetree/bindings/media/video-interfaces.txt
+ properties:
+ remote-endpoint: true
+ hsync-active: true
+ vsync-active: true
+ pclk-sample: true
+ bus-type: true
+
+ required:
+ - remote-endpoint
+
+ required:
+ - endpoint
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+ items:
+ - description: AXI bus interface clock
+ - description: Peripheral clock
+ - description: Parallel video bus interface clock
+
+ clock-names:
+ const: axi
+
+ '#clock-cells':
+ const: 0
+
+ clock-output-names:
+ const: mclk
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/marvell,mmp2.h>
+
+ camera@d420a000 {
+ compatible = "marvell,mmp2-ccic";
+ reg = <0xd420a000 0x800>;
+ interrupts = <42>;
+ clocks = <&soc_clocks MMP2_CLK_CCIC0>;
+ clock-names = "axi";
+ #clock-cells = <0>;
+ clock-output-names = "mclk";
+
+ port {
+ camera0_0: endpoint {
+ remote-endpoint = <&ov7670_0>;
+ bus-type = <5>; /* Parallel */
+ hsync-active = <1>; /* Active high */
+ vsync-active = <1>; /* Active high */
+ pclk-sample = <0>; /* Falling */
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml b/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
index 764affa4877e..55f2d67ae34e 100644
--- a/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sc7180-venus.yaml
@@ -115,7 +115,7 @@ examples:
venus: video-codec@aa00000 {
compatible = "qcom,sc7180-venus";
- reg = <0 0x0aa00000 0 0xff000>;
+ reg = <0x0aa00000 0xff000>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&videocc VENUS_GDSC>,
<&videocc VCODEC0_GDSC>;
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml b/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
index 8552f4ab907e..157dff8057e9 100644
--- a/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
@@ -110,7 +110,7 @@ examples:
video-codec@aa00000 {
compatible = "qcom,sdm845-venus-v2";
- reg = <0 0x0aa00000 0 0xff000>;
+ reg = <0x0aa00000 0xff000>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
<&videocc VIDEO_CC_VENUS_AHB_CLK>,
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm845-venus.yaml b/Documentation/devicetree/bindings/media/qcom,sdm845-venus.yaml
index 05cabe4e893a..084e45e2df62 100644
--- a/Documentation/devicetree/bindings/media/qcom,sdm845-venus.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sdm845-venus.yaml
@@ -127,7 +127,7 @@ examples:
video-codec@aa00000 {
compatible = "qcom,sdm845-venus";
- reg = <0 0x0aa00000 0 0xff000>;
+ reg = <0x0aa00000 0xff000>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
<&videocc VIDEO_CC_VENUS_AHB_CLK>,
diff --git a/Documentation/devicetree/bindings/media/rc.yaml b/Documentation/devicetree/bindings/media/rc.yaml
index b27c9385d490..ded2ac43237d 100644
--- a/Documentation/devicetree/bindings/media/rc.yaml
+++ b/Documentation/devicetree/bindings/media/rc.yaml
@@ -18,136 +18,135 @@ properties:
description:
Specifies the scancode/key mapping table defined in-kernel for
the remote controller.
- allOf:
- - $ref: '/schemas/types.yaml#/definitions/string'
- - enum:
- - rc-adstech-dvb-t-pci
- - rc-alink-dtu-m
- - rc-anysee
- - rc-apac-viewcomp
- - rc-astrometa-t2hybrid
- - rc-asus-pc39
- - rc-asus-ps3-100
- - rc-ati-tv-wonder-hd-600
- - rc-ati-x10
- - rc-avermedia
- - rc-avermedia-a16d
- - rc-avermedia-cardbus
- - rc-avermedia-dvbt
- - rc-avermedia-m135a
- - rc-avermedia-m733a-rm-k6
- - rc-avermedia-rm-ks
- - rc-avertv-303
- - rc-azurewave-ad-tu700
- - rc-beelink-gs1
- - rc-behold
- - rc-behold-columbus
- - rc-budget-ci-old
- - rc-cec
- - rc-cinergy
- - rc-cinergy-1400
- - rc-d680-dmb
- - rc-delock-61959
- - rc-dib0700-nec
- - rc-dib0700-rc5
- - rc-digitalnow-tinytwin
- - rc-digittrade
- - rc-dm1105-nec
- - rc-dntv-live-dvb-t
- - rc-dntv-live-dvbt-pro
- - rc-dtt200u
- - rc-dvbsky
- - rc-dvico-mce
- - rc-dvico-portable
- - rc-em-terratec
- - rc-empty
- - rc-encore-enltv
- - rc-encore-enltv-fm53
- - rc-encore-enltv2
- - rc-evga-indtube
- - rc-eztv
- - rc-flydvb
- - rc-flyvideo
- - rc-fusionhdtv-mce
- - rc-gadmei-rm008z
- - rc-geekbox
- - rc-genius-tvgo-a11mce
- - rc-gotview7135
- - rc-hauppauge
- - rc-hisi-poplar
- - rc-hisi-tv-demo
- - rc-imon-mce
- - rc-imon-pad
- - rc-imon-rsc
- - rc-iodata-bctv7e
- - rc-it913x-v1
- - rc-it913x-v2
- - rc-kaiomy
- - rc-khadas
- - rc-kworld-315u
- - rc-kworld-pc150u
- - rc-kworld-plus-tv-analog
- - rc-leadtek-y04g0051
- - rc-lme2510
- - rc-manli
- - rc-medion-x10
- - rc-medion-x10-digitainer
- - rc-medion-x10-or2x
- - rc-msi-digivox-ii
- - rc-msi-digivox-iii
- - rc-msi-tvanywhere
- - rc-msi-tvanywhere-plus
- - rc-nebula
- - rc-nec-terratec-cinergy-xs
- - rc-norwood
- - rc-npgtech
- - rc-odroid
- - rc-pctv-sedna
- - rc-pinnacle-color
- - rc-pinnacle-grey
- - rc-pinnacle-pctv-hd
- - rc-pixelview
- - rc-pixelview-002t
- - rc-pixelview-mk12
- - rc-pixelview-new
- - rc-powercolor-real-angel
- - rc-proteus-2309
- - rc-purpletv
- - rc-pv951
- - rc-rc5-tv
- - rc-rc6-mce
- - rc-real-audio-220-32-keys
- - rc-reddo
- - rc-snapstream-firefly
- - rc-streamzap
- - rc-su3000
- - rc-tango
- - rc-tanix-tx3mini
- - rc-tanix-tx5max
- - rc-tbs-nec
- - rc-technisat-ts35
- - rc-technisat-usb2
- - rc-terratec-cinergy-c-pci
- - rc-terratec-cinergy-s2-hd
- - rc-terratec-cinergy-xs
- - rc-terratec-slim
- - rc-terratec-slim-2
- - rc-tevii-nec
- - rc-tivo
- - rc-total-media-in-hand
- - rc-total-media-in-hand-02
- - rc-trekstor
- - rc-tt-1500
- - rc-twinhan-dtv-cab-ci
- - rc-twinhan1027
- - rc-videomate-k100
- - rc-videomate-s350
- - rc-videomate-tv-pvr
- - rc-videostrong-kii-pro
- - rc-wetek-hub
- - rc-wetek-play2
- - rc-winfast
- - rc-winfast-usbii-deluxe
- - rc-x96max
- - rc-xbox-dvd
- - rc-zx-irdec
+ $ref: '/schemas/types.yaml#/definitions/string'
+ enum:
+ - rc-adstech-dvb-t-pci
+ - rc-alink-dtu-m
+ - rc-anysee
+ - rc-apac-viewcomp
+ - rc-astrometa-t2hybrid
+ - rc-asus-pc39
+ - rc-asus-ps3-100
+ - rc-ati-tv-wonder-hd-600
+ - rc-ati-x10
+ - rc-avermedia
+ - rc-avermedia-a16d
+ - rc-avermedia-cardbus
+ - rc-avermedia-dvbt
+ - rc-avermedia-m135a
+ - rc-avermedia-m733a-rm-k6
+ - rc-avermedia-rm-ks
+ - rc-avertv-303
+ - rc-azurewave-ad-tu700
+ - rc-beelink-gs1
+ - rc-behold
+ - rc-behold-columbus
+ - rc-budget-ci-old
+ - rc-cec
+ - rc-cinergy
+ - rc-cinergy-1400
+ - rc-d680-dmb
+ - rc-delock-61959
+ - rc-dib0700-nec
+ - rc-dib0700-rc5
+ - rc-digitalnow-tinytwin
+ - rc-digittrade
+ - rc-dm1105-nec
+ - rc-dntv-live-dvb-t
+ - rc-dntv-live-dvbt-pro
+ - rc-dtt200u
+ - rc-dvbsky
+ - rc-dvico-mce
+ - rc-dvico-portable
+ - rc-em-terratec
+ - rc-empty
+ - rc-encore-enltv
+ - rc-encore-enltv-fm53
+ - rc-encore-enltv2
+ - rc-evga-indtube
+ - rc-eztv
+ - rc-flydvb
+ - rc-flyvideo
+ - rc-fusionhdtv-mce
+ - rc-gadmei-rm008z
+ - rc-geekbox
+ - rc-genius-tvgo-a11mce
+ - rc-gotview7135
+ - rc-hauppauge
+ - rc-hisi-poplar
+ - rc-hisi-tv-demo
+ - rc-imon-mce
+ - rc-imon-pad
+ - rc-imon-rsc
+ - rc-iodata-bctv7e
+ - rc-it913x-v1
+ - rc-it913x-v2
+ - rc-kaiomy
+ - rc-khadas
+ - rc-kworld-315u
+ - rc-kworld-pc150u
+ - rc-kworld-plus-tv-analog
+ - rc-leadtek-y04g0051
+ - rc-lme2510
+ - rc-manli
+ - rc-medion-x10
+ - rc-medion-x10-digitainer
+ - rc-medion-x10-or2x
+ - rc-msi-digivox-ii
+ - rc-msi-digivox-iii
+ - rc-msi-tvanywhere
+ - rc-msi-tvanywhere-plus
+ - rc-nebula
+ - rc-nec-terratec-cinergy-xs
+ - rc-norwood
+ - rc-npgtech
+ - rc-odroid
+ - rc-pctv-sedna
+ - rc-pinnacle-color
+ - rc-pinnacle-grey
+ - rc-pinnacle-pctv-hd
+ - rc-pixelview
+ - rc-pixelview-002t
+ - rc-pixelview-mk12
+ - rc-pixelview-new
+ - rc-powercolor-real-angel
+ - rc-proteus-2309
+ - rc-purpletv
+ - rc-pv951
+ - rc-rc5-tv
+ - rc-rc6-mce
+ - rc-real-audio-220-32-keys
+ - rc-reddo
+ - rc-snapstream-firefly
+ - rc-streamzap
+ - rc-su3000
+ - rc-tango
+ - rc-tanix-tx3mini
+ - rc-tanix-tx5max
+ - rc-tbs-nec
+ - rc-technisat-ts35
+ - rc-technisat-usb2
+ - rc-terratec-cinergy-c-pci
+ - rc-terratec-cinergy-s2-hd
+ - rc-terratec-cinergy-xs
+ - rc-terratec-slim
+ - rc-terratec-slim-2
+ - rc-tevii-nec
+ - rc-tivo
+ - rc-total-media-in-hand
+ - rc-total-media-in-hand-02
+ - rc-trekstor
+ - rc-tt-1500
+ - rc-twinhan-dtv-cab-ci
+ - rc-twinhan1027
+ - rc-videomate-k100
+ - rc-videomate-s350
+ - rc-videomate-tv-pvr
+ - rc-videostrong-kii-pro
+ - rc-wetek-hub
+ - rc-wetek-play2
+ - rc-winfast
+ - rc-winfast-usbii-deluxe
+ - rc-x96max
+ - rc-xbox-dvd
+ - rc-zx-irdec
diff --git a/Documentation/devicetree/bindings/media/renesas,ceu.yaml b/Documentation/devicetree/bindings/media/renesas,ceu.yaml
index fcb5f13704a5..c7e1e4fe67e6 100644
--- a/Documentation/devicetree/bindings/media/renesas,ceu.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,ceu.yaml
@@ -27,28 +27,34 @@ properties:
interrupts:
maxItems: 1
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
port:
type: object
additionalProperties: false
properties:
- endpoint:
- type: object
- additionalProperties: false
+ endpoint:
+ type: object
+ additionalProperties: false
# Properties described in
# Documentation/devicetree/bindings/media/video-interfaces.txt
- properties:
- remote-endpoint: true
- hsync-active: true
- vsync-active: true
- field-even-active: false
- bus-width:
- enum: [8, 16]
- default: 8
-
- required:
- - remote-endpoint
+ properties:
+ remote-endpoint: true
+ hsync-active: true
+ vsync-active: true
+ field-even-active: false
+ bus-width:
+ enum: [8, 16]
+ default: 8
+
+ required:
+ - remote-endpoint
required:
- endpoint
@@ -57,6 +63,8 @@ required:
- compatible
- reg
- interrupts
+ - clocks
+ - power-domains
- port
additionalProperties: false
@@ -64,11 +72,14 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/r7s72100-clock.h>
ceu: ceu@e8210000 {
reg = <0xe8210000 0x209c>;
compatible = "renesas,r7s72100-ceu";
interrupts = <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp6_clks R7S72100_CLK_CEU>;
+ power-domains = <&cpg_clocks>;
port {
ceu_in: endpoint {
diff --git a/Documentation/devicetree/bindings/media/renesas,csi2.yaml b/Documentation/devicetree/bindings/media/renesas,csi2.yaml
index 408442a0c389..c9e068231d4b 100644
--- a/Documentation/devicetree/bindings/media/renesas,csi2.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,csi2.yaml
@@ -135,7 +135,7 @@ examples:
csi20: csi2@fea80000 {
compatible = "renesas,r8a7796-csi2";
- reg = <0 0xfea80000 0 0x10000>;
+ reg = <0xfea80000 0x10000>;
interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 714>;
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
index 1ec947b4781f..53c0a7238bac 100644
--- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
+++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
@@ -116,10 +116,9 @@ properties:
#The per-board settings for Gen3 and RZ/G2 platforms:
renesas,id:
description: VIN channel number
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- - maximum: 15
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
ports:
type: object
@@ -261,13 +260,13 @@ properties:
anyOf:
- required:
- - endpoint@0
+ - endpoint@0
- required:
- - endpoint@1
+ - endpoint@1
- required:
- - endpoint@2
+ - endpoint@2
- required:
- - endpoint@3
+ - endpoint@3
additionalProperties: false
@@ -307,7 +306,7 @@ examples:
vin1: vin@e6ef1000 {
compatible = "renesas,vin-r8a7790",
"renesas,rcar-gen2-vin";
- reg = <0 0xe6ef1000 0 0x1000>;
+ reg = <0xe6ef1000 0x1000>;
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 810>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
@@ -329,7 +328,7 @@ examples:
vin0: video@e6ef0000 {
compatible = "renesas,vin-r8a7795";
- reg = <0 0xe6ef0000 0 0x1000>;
+ reg = <0xe6ef0000 0x1000>;
interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 811>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
@@ -366,7 +365,7 @@ examples:
vin2: video@e6ef2000 {
compatible = "renesas,vin-r8a77970";
- reg = <0 0xe6ef2000 0 0x1000>;
+ reg = <0xe6ef2000 0x1000>;
interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 809>;
power-domains = <&sysc R8A77970_PD_ALWAYS_ON>;
diff --git a/Documentation/devicetree/bindings/media/ti,vpe.yaml b/Documentation/devicetree/bindings/media/ti,vpe.yaml
index f3a8a350e85f..ef473f287399 100644
--- a/Documentation/devicetree/bindings/media/ti,vpe.yaml
+++ b/Documentation/devicetree/bindings/media/ti,vpe.yaml
@@ -17,7 +17,7 @@ description: |-
properties:
compatible:
- const: ti,dra7-vpe
+ const: ti,dra7-vpe
reg:
items:
diff --git a/Documentation/devicetree/bindings/memory-controllers/baikal,bt1-l2-ctl.yaml b/Documentation/devicetree/bindings/memory-controllers/baikal,bt1-l2-ctl.yaml
new file mode 100644
index 000000000000..1fca282f64a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/baikal,bt1-l2-ctl.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/baikal,bt1-l2-ctl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Baikal-T1 L2-cache Control Block
+
+maintainers:
+ - Serge Semin <fancer.lancer@gmail.com>
+
+description: |
+ By means of the System Controller Baikal-T1 SoC exposes a few settings to
+ tune the MIPS P5600 CM2 L2 cache performance up. In particular it's possible
+ to change the Tag, Data and Way-select RAM access latencies. Baikal-T1
+ L2-cache controller block is responsible for the tuning. Its DT node is
+ supposed to be a child of the system controller.
+
+properties:
+ compatible:
+ const: baikal,bt1-l2-ctl
+
+ reg:
+ maxItems: 1
+
+ baikal,l2-ws-latency:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Cycles of latency for Way-select RAM accesses
+ default: 0
+ minimum: 0
+ maximum: 3
+
+ baikal,l2-tag-latency:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Cycles of latency for Tag RAM accesses
+ default: 0
+ minimum: 0
+ maximum: 3
+
+ baikal,l2-data-latency:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Cycles of latency for Data RAM accesses
+ default: 1
+ minimum: 0
+ maximum: 3
+
+additionalProperties: false
+
+required:
+ - compatible
+
+examples:
+ - |
+ l2@1f04d028 {
+ compatible = "baikal,bt1-l2-ctl";
+ reg = <0x1f04d028 0x004>;
+
+ baikal,l2-ws-latency = <1>;
+ baikal,l2-tag-latency = <1>;
+ baikal,l2-data-latency = <2>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.txt b/Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.txt
deleted file mode 100644
index 049675944b78..000000000000
--- a/Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-Calxeda DDR memory controller
-
-Properties:
-- compatible : Should be:
- - "calxeda,hb-ddr-ctrl" for ECX-1000
- - "calxeda,ecx-2000-ddr-ctrl" for ECX-2000
-- reg : Address and size for DDR controller registers.
-- interrupts : Interrupt for DDR controller.
-
-Example:
-
- memory-controller@fff00000 {
- compatible = "calxeda,hb-ddr-ctrl";
- reg = <0xfff00000 0x1000>;
- interrupts = <0 91 4>;
- };
diff --git a/Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.yaml b/Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.yaml
new file mode 100644
index 000000000000..96d563fd61f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/calxeda-ddr-ctrlr.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/calxeda-ddr-ctrlr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Calxeda DDR memory controller binding
+
+description: |
+ The Calxeda DDR memory controller is initialised and programmed by the
+ firmware, but an OS might want to read its registers for error reporting
+ purposes and to learn about the DRAM topology.
+
+maintainers:
+ - Andre Przywara <andre.przywara@arm.com>
+
+properties:
+ compatible:
+ enum:
+ - calxeda,hb-ddr-ctrl
+ - calxeda,ecx-2000-ddr-ctrl
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ memory-controller@fff00000 {
+ compatible = "calxeda,hb-ddr-ctrl";
+ reg = <0xfff00000 0x1000>;
+ interrupts = <0 91 4>;
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml b/Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml
index cdfe3f7f0ea9..637e24f0f73b 100644
--- a/Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/exynos-srom.yaml
@@ -51,9 +51,7 @@ patternProperties:
maxItems: 1
reg-io-width:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [1, 2]
+ enum: [1, 2]
description:
Data width in bytes (1 or 2). If omitted, default of 1 is used.
@@ -64,11 +62,10 @@ patternProperties:
type: boolean
samsung,srom-timing:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - items:
- minItems: 6
- maxItems: 6
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ minItems: 6
+ maxItems: 6
description: |
Array of 6 integers, specifying bank timings in the following order:
Tacp, Tcah, Tcoh, Tacc, Tcos, Tacs.
diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml b/Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml
index c9e6c22cb5be..445e46feda69 100644
--- a/Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/fsl/imx8m-ddrc.yaml
@@ -25,9 +25,9 @@ properties:
compatible:
items:
- enum:
- - fsl,imx8mn-ddrc
- - fsl,imx8mm-ddrc
- - fsl,imx8mq-ddrc
+ - fsl,imx8mn-ddrc
+ - fsl,imx8mm-ddrc
+ - fsl,imx8mq-ddrc
- const: fsl,imx8m-ddrc
reg:
diff --git a/Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt b/Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt
deleted file mode 100644
index 59b8dcc118ee..000000000000
--- a/Documentation/devicetree/bindings/memory-controllers/ingenic,jz4780-nemc.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-* Ingenic JZ4780 NAND/external memory controller (NEMC)
-
-This file documents the device tree bindings for the NEMC external memory
-controller in Ingenic JZ4780
-
-Required properties:
-- compatible: Should be set to one of:
- "ingenic,jz4740-nemc" (JZ4740)
- "ingenic,jz4780-nemc" (JZ4780)
-- reg: Should specify the NEMC controller registers location and length.
-- clocks: Clock for the NEMC controller.
-- #address-cells: Must be set to 2.
-- #size-cells: Must be set to 1.
-- ranges: A set of ranges for each bank describing the physical memory layout.
- Each should specify the following 4 integer values:
-
- <cs number> 0 <physical address of mapping> <size of mapping>
-
-Each child of the NEMC node describes a device connected to the NEMC.
-
-Required child node properties:
-- reg: Should contain at least one register specifier, given in the following
- format:
-
- <cs number> <offset> <size>
-
- Multiple registers can be specified across multiple banks. This is needed,
- for example, for packaged NAND devices with multiple dies. Such devices
- should be grouped into a single node.
-
-Optional child node properties:
-- ingenic,nemc-bus-width: Specifies the bus width in bits. Defaults to 8 bits.
-- ingenic,nemc-tAS: Address setup time in nanoseconds.
-- ingenic,nemc-tAH: Address hold time in nanoseconds.
-- ingenic,nemc-tBP: Burst pitch time in nanoseconds.
-- ingenic,nemc-tAW: Access wait time in nanoseconds.
-- ingenic,nemc-tSTRV: Static memory recovery time in nanoseconds.
-
-If a child node references multiple banks in its "reg" property, the same value
-for all optional parameters will be configured for all banks. If any optional
-parameters are omitted, they will be left unchanged from whatever they are
-configured to when the NEMC device is probed (which may be the reset value as
-given in the hardware reference manual, or a value configured by the boot
-loader).
-
-Example (NEMC node with a NAND child device attached at CS1):
-
-nemc: nemc@13410000 {
- compatible = "ingenic,jz4780-nemc";
- reg = <0x13410000 0x10000>;
-
- #address-cells = <2>;
- #size-cells = <1>;
-
- ranges = <1 0 0x1b000000 0x1000000
- 2 0 0x1a000000 0x1000000
- 3 0 0x19000000 0x1000000
- 4 0 0x18000000 0x1000000
- 5 0 0x17000000 0x1000000
- 6 0 0x16000000 0x1000000>;
-
- clocks = <&cgu JZ4780_CLK_NEMC>;
-
- nand: nand@1 {
- compatible = "ingenic,jz4780-nand";
- reg = <1 0 0x1000000>;
-
- ingenic,nemc-tAS = <10>;
- ingenic,nemc-tAH = <5>;
- ingenic,nemc-tBP = <10>;
- ingenic,nemc-tAW = <15>;
- ingenic,nemc-tSTRV = <100>;
-
- ...
- };
-};
diff --git a/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml b/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml
new file mode 100644
index 000000000000..9b478da0c479
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/ingenic,nemc.yaml
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/ingenic,nemc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs NAND / External Memory Controller (NEMC) devicetree bindings
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+properties:
+ $nodename:
+ pattern: "^memory-controller@[0-9a-f]+$"
+
+ compatible:
+ oneOf:
+ - enum:
+ - ingenic,jz4740-nemc
+ - ingenic,jz4780-nemc
+ - items:
+ - const: ingenic,jz4725b-nemc
+ - const: ingenic,jz4740-nemc
+
+ "#address-cells":
+ const: 2
+
+ "#size-cells":
+ const: 1
+
+ ranges: true
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+patternProperties:
+ ".*@[0-9]+$":
+ type: object
+ properties:
+ reg:
+ minItems: 1
+ maxItems: 255
+
+ ingenic,nemc-bus-width:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [8, 16]
+ description: Specifies the bus width in bits.
+
+ ingenic,nemc-tAS:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Address setup time in nanoseconds.
+
+ ingenic,nemc-tAH:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Address hold time in nanoseconds.
+
+ ingenic,nemc-tBP:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Burst pitch time in nanoseconds.
+
+ ingenic,nemc-tAW:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Address wait time in nanoseconds.
+
+ ingenic,nemc-tSTRV:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Static memory recovery time in nanoseconds.
+
+ required:
+ - reg
+
+required:
+ - compatible
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4780-cgu.h>
+ #include <dt-bindings/gpio/gpio.h>
+ nemc: memory-controller@13410000 {
+ compatible = "ingenic,jz4780-nemc";
+ reg = <0x13410000 0x10000>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <1 0 0x1b000000 0x1000000>,
+ <2 0 0x1a000000 0x1000000>,
+ <3 0 0x19000000 0x1000000>,
+ <4 0 0x18000000 0x1000000>,
+ <5 0 0x17000000 0x1000000>,
+ <6 0 0x16000000 0x1000000>;
+
+ clocks = <&cgu JZ4780_CLK_NEMC>;
+
+ ethernet@6 {
+ compatible = "davicom,dm9000";
+ davicom,no-eeprom;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_nemc_cs6>;
+
+ reg = <6 0 1>, /* addr */
+ <6 2 1>; /* data */
+
+ ingenic,nemc-tAS = <15>;
+ ingenic,nemc-tAH = <10>;
+ ingenic,nemc-tBP = <20>;
+ ingenic,nemc-tAW = <50>;
+ ingenic,nemc-tSTRV = <100>;
+
+ reset-gpios = <&gpf 12 GPIO_ACTIVE_HIGH>;
+ vcc-supply = <&eth0_power>;
+
+ interrupt-parent = <&gpe>;
+ interrupts = <19 4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml
index 3e0a8a92d652..278549f9e051 100644
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml
@@ -73,10 +73,9 @@ patternProperties:
timings
nvidia,emc-auto-cal-interval:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
description:
pad calibration interval in microseconds
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 2097151
@@ -136,11 +135,10 @@ patternProperties:
value of the EMC_XM2DQSPADCTRL2 register for this set of timings
nvidia,emc-zcal-cnt-long:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
description:
number of EMC clocks to wait before issuing any commands after
clock change
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 1023
@@ -150,12 +148,11 @@ patternProperties:
value of the EMC_ZCAL_INTERVAL register for this set of timings
nvidia,emc-configuration:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
description:
EMC timing characterization data. These are the registers (see
section "15.6.2 EMC Registers" in the TRM) whose values need to
be specified, according to the board documentation.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
items:
- description: EMC_RC
- description: EMC_RFC
@@ -340,7 +337,7 @@ examples:
mc: memory-controller@70019000 {
compatible = "nvidia,tegra124-mc";
- reg = <0x0 0x70019000 0x0 0x1000>;
+ reg = <0x70019000 0x1000>;
clocks = <&tegra_car TEGRA124_CLK_MC>;
clock-names = "mc";
@@ -352,7 +349,7 @@ examples:
external-memory-controller@7001b000 {
compatible = "nvidia,tegra124-emc";
- reg = <0x0 0x7001b000 0x0 0x1000>;
+ reg = <0x7001b000 0x1000>;
clocks = <&car TEGRA124_CLK_EMC>;
clock-names = "emc";
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-mc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-mc.yaml
index 22a94b6fdbde..84d0339505b1 100644
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-mc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-mc.yaml
@@ -60,8 +60,7 @@ patternProperties:
maximum: 1066000000
nvidia,emem-configuration:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
description: |
Values to be written to the EMEM register block. See section
"15.6.1 MC Registers" in the TRM.
@@ -112,7 +111,7 @@ examples:
- |
memory-controller@70019000 {
compatible = "nvidia,tegra124-mc";
- reg = <0x0 0x70019000 0x0 0x1000>;
+ reg = <0x70019000 0x1000>;
clocks = <&tegra_car 32>;
clock-names = "mc";
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra210-emc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra210-emc.yaml
new file mode 100644
index 000000000000..49ab09252e52
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra210-emc.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/nvidia,tegra210-emc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra210 SoC External Memory Controller
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+description: |
+ The EMC interfaces with the off-chip SDRAM to service the request stream
+ sent from the memory controller.
+
+properties:
+ compatible:
+ const: nvidia,tegra210-emc
+
+ reg:
+ maxItems: 3
+
+ clocks:
+ items:
+ - description: external memory clock
+
+ clock-names:
+ items:
+ - const: emc
+
+ interrupts:
+ items:
+ - description: EMC general interrupt
+
+ memory-region:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle to a reserved memory region describing the table of EMC
+ frequencies trained by the firmware
+
+ nvidia,memory-controller:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle of the memory controller node
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - nvidia,memory-controller
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra210-car.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ emc_table: emc-table@83400000 {
+ compatible = "nvidia,tegra210-emc-table";
+ reg = <0x83400000 0x10000>;
+ };
+ };
+
+ external-memory-controller@7001b000 {
+ compatible = "nvidia,tegra210-emc";
+ reg = <0x7001b000 0x1000>,
+ <0x7001e000 0x1000>,
+ <0x7001f000 0x1000>;
+ clocks = <&tegra_car TEGRA210_CLK_EMC>;
+ clock-names = "emc";
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ memory-region = <&emc_table>;
+ nvidia,memory-controller = <&mc>;
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-emc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-emc.yaml
index e4135bac6957..112bae2fcbbd 100644
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-emc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-emc.yaml
@@ -56,10 +56,9 @@ patternProperties:
maximum: 900000000
nvidia,emc-auto-cal-interval:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
description:
Pad calibration interval in microseconds.
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 2097151
@@ -79,11 +78,10 @@ patternProperties:
Mode Register 0.
nvidia,emc-zcal-cnt-long:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
description:
Number of EMC clocks to wait before issuing any commands after
sending ZCAL_MRW_CMD.
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 1023
@@ -98,12 +96,11 @@ patternProperties:
FBIO "read" FIFO periodic resetting enabled.
nvidia,emc-configuration:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
description:
EMC timing characterization data. These are the registers
(see section "18.13.2 EMC Registers" in the TRM) whose values
need to be specified, according to the board documentation.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
items:
- description: EMC_RC
- description: EMC_RFC
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-mc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-mc.yaml
index 4b9196c83291..84fd57bcf0dc 100644
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-mc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra30-mc.yaml
@@ -77,8 +77,7 @@ patternProperties:
maximum: 900000000
nvidia,emem-configuration:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
description: |
Values to be written to the EMEM register block. See section
"18.13.1 MC Registers" in the TRM.
diff --git a/Documentation/devicetree/bindings/memory-controllers/renesas,dbsc.txt b/Documentation/devicetree/bindings/memory-controllers/renesas,dbsc.txt
deleted file mode 100644
index 9f78e6c82740..000000000000
--- a/Documentation/devicetree/bindings/memory-controllers/renesas,dbsc.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-DT bindings for Renesas R-Mobile and SH-Mobile memory controllers
-=================================================================
-
-Renesas R-Mobile and SH-Mobile SoCs contain one or more memory controllers.
-These memory controllers differ from one SoC variant to another, and are called
-by different names ("DDR Bus Controller (DBSC)", "DDR3 Bus State Controller
-(DBSC3)", "SDRAM Bus State Controller (SBSC)").
-
-Currently memory controller device nodes are used only to reference PM
-domains, and prevent these PM domains from being powered down, which would
-crash the system.
-
-As there exist no actual drivers for these controllers yet, these bindings
-should be considered EXPERIMENTAL for now.
-
-Required properties:
- - compatible: Must be one of the following SoC-specific values:
- - "renesas,dbsc-r8a73a4" (R-Mobile APE6)
- - "renesas,dbsc3-r8a7740" (R-Mobile A1)
- - "renesas,sbsc-sh73a0" (SH-Mobile AG5)
- - reg: Must contain the base address and length of the memory controller's
- registers.
-
-Optional properties:
- - interrupts: Must contain a list of interrupt specifiers for memory
- controller interrupts, if available.
- - interrupt-names: Must contain a list of interrupt names corresponding to
- the interrupts in the interrupts property, if available.
- Valid interrupt names are:
- - "sec" (secure interrupt)
- - "temp" (normal (temperature) interrupt)
- - power-domains: Must contain a reference to the PM domain that the memory
- controller belongs to, if available.
-
-Example:
-
- sbsc1: memory-controller@fe400000 {
- compatible = "renesas,sbsc-sh73a0";
- reg = <0xfe400000 0x400>;
- interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>,
- <0 36 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "sec", "temp";
- power-domains = <&pd_a4bc0>;
- };
diff --git a/Documentation/devicetree/bindings/memory-controllers/renesas,dbsc.yaml b/Documentation/devicetree/bindings/memory-controllers/renesas,dbsc.yaml
new file mode 100644
index 000000000000..7056ccb7eb30
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/renesas,dbsc.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/memory-controllers/renesas,dbsc.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas DDR Bus Controllers
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description: |
+ Renesas SoCs contain one or more memory controllers. These memory
+ controllers differ from one SoC variant to another, and are called by
+ different names, e.g. "DDR Bus Controller (DBSC)", "DDR3 Bus State Controller
+ (DBSC3)", or "SDRAM Bus State Controller (SBSC)").
+
+properties:
+ compatible:
+ enum:
+ - renesas,dbsc-r8a73a4 # R-Mobile APE6
+ - renesas,dbsc3-r8a7740 # R-Mobile A1
+ - renesas,sbsc-sh73a0 # SH-Mobile AG5
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 2
+
+ interrupt-names:
+ items:
+ - const: sec # secure interrupt
+ - const: temp # normal (temperature) interrupt
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - power-domains
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ sbsc1: memory-controller@fe400000 {
+ compatible = "renesas,sbsc-sh73a0";
+ reg = <0xfe400000 0x400>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "sec", "temp";
+ power-domains = <&pd_a4bc0>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/allwinner,sun4i-a10-ts.yaml b/Documentation/devicetree/bindings/mfd/allwinner,sun4i-a10-ts.yaml
index 39afacc447b2..f591332fc462 100644
--- a/Documentation/devicetree/bindings/mfd/allwinner,sun4i-a10-ts.yaml
+++ b/Documentation/devicetree/bindings/mfd/allwinner,sun4i-a10-ts.yaml
@@ -31,19 +31,19 @@ properties:
description: A touchscreen is attached to the controller
allwinner,tp-sensitive-adjust:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- maximum: 15
- default: 15
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ default: 15
+
description: Sensitivity of pen down detection
allwinner,filter-type:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- maximum: 3
- default: 1
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 3
+ default: 1
+
description: |
Select median and averaging filter. Sample used for median /
averaging filter:
diff --git a/Documentation/devicetree/bindings/mfd/arizona.txt b/Documentation/devicetree/bindings/mfd/arizona.txt
deleted file mode 100644
index 148ef621a5e5..000000000000
--- a/Documentation/devicetree/bindings/mfd/arizona.txt
+++ /dev/null
@@ -1,101 +0,0 @@
-Cirrus Logic/Wolfson Microelectronics Arizona class audio SoCs
-
-These devices are audio SoCs with extensive digital capabilities and a range
-of analogue I/O.
-
-Required properties:
-
- - compatible : One of the following chip-specific strings:
- "cirrus,cs47l24"
- "wlf,wm5102"
- "wlf,wm5110"
- "wlf,wm8280"
- "wlf,wm8997"
- "wlf,wm8998"
- "wlf,wm1814"
- "wlf,wm1831"
-
- - reg : I2C slave address when connected using I2C, chip select number when
- using SPI.
-
- - interrupts : The interrupt line the /IRQ signal for the device is
- connected to.
- - interrupt-controller : Arizona class devices contain interrupt controllers
- and may provide interrupt services to other devices.
- - #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
- The first cell is the IRQ number.
- The second cell is the flags, encoded as the trigger masks from
- Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-
- - gpio-controller : Indicates this device is a GPIO controller.
- - #gpio-cells : Must be 2. The first cell is the pin number and the
- second cell is used to specify optional parameters, see ../gpio/gpio.txt
- for details.
-
- - AVDD-supply, DBVDD1-supply, CPVDD-supply : Power supplies for the device,
- as covered in Documentation/devicetree/bindings/regulator/regulator.txt
-
- - DBVDD2-supply, DBVDD3-supply : Additional databus power supplies (wm5102,
- wm5110, wm8280, wm8998, wm1814)
-
- - SPKVDDL-supply, SPKVDDR-supply : Speaker driver power supplies (wm5102,
- wm5110, wm8280, wm8998, wm1814)
-
- - SPKVDD-supply : Speaker driver power supply (wm8997)
-
- - DCVDD-supply : Main power supply (cs47l24, wm1831)
-
- - MICVDD-supply : Microphone power supply (cs47l24, wm1831)
-
-Optional properties:
-
- - reset-gpios : GPIO specifier for the GPIO controlling /RESET
-
- - clocks: Should reference the clocks supplied on MCLK1 and MCLK2
- - clock-names: Should contains two strings:
- "mclk1" for the clock supplied on MCLK1, recommended to be a high
- quality audio reference clock
- "mclk2" for the clock supplied on MCLK2, recommended to be an always on
- 32k clock
-
- - wlf,gpio-defaults : A list of GPIO configuration register values. Defines
- for the appropriate values can found in <dt-bindings/mfd/arizona.txt>. If
- absent, no configuration of these registers is performed. If any entry has
- a value that is out of range for a 16 bit register then the chip default
- will be used. If present exactly five values must be specified.
-
- - DCVDD-supply, MICVDD-supply : Power supplies, only need to be specified if
- they are being externally supplied. As covered in
- Documentation/devicetree/bindings/regulator/regulator.txt
- (wm5102, wm5110, wm8280, wm8997, wm8998, wm1814)
-
-Deprecated properties:
-
- - wlf,reset : GPIO specifier for the GPIO controlling /RESET
-
-Also see child specific device properties:
- Regulator - ../regulator/arizona-regulator.txt
- Extcon - ../extcon/extcon-arizona.txt
- Sound - ../sound/wlf,arizona.txt
-
-Example:
-
-codec: wm5102@1a {
- compatible = "wlf,wm5102";
- reg = <0x1a>;
- interrupts = <347>;
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupt-parent = <&gic>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- wlf,gpio-defaults = <
- ARIZONA_GP_FN_TXLRCLK
- ARIZONA_GP_DEFAULT
- ARIZONA_GP_DEFAULT
- ARIZONA_GP_DEFAULT
- ARIZONA_GP_DEFAULT
- >;
-};
diff --git a/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
deleted file mode 100644
index 3bf92ad37fa1..000000000000
--- a/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
+++ /dev/null
@@ -1,85 +0,0 @@
-Cirrus Logic Lochnagar Audio Development Board
-
-Lochnagar is an evaluation and development board for Cirrus Logic
-Smart CODEC and Amp devices. It allows the connection of most Cirrus
-Logic devices on mini-cards, as well as allowing connection of
-various application processor systems to provide a full evaluation
-platform. Audio system topology, clocking and power can all be
-controlled through the Lochnagar, allowing the device under test
-to be used in a variety of possible use cases.
-
-Also see these documents for generic binding information:
- [1] GPIO : ../gpio/gpio.txt
-
-And these for relevant defines:
- [2] include/dt-bindings/pinctrl/lochnagar.h
- [3] include/dt-bindings/clock/lochnagar.h
-
-And these documents for the required sub-node binding details:
- [4] Clock: ../clock/cirrus,lochnagar.txt
- [5] Pinctrl: ../pinctrl/cirrus,lochnagar.txt
- [6] Regulator: ../regulator/cirrus,lochnagar.txt
- [7] Sound: ../sound/cirrus,lochnagar.txt
- [8] Hardware Monitor: ../hwmon/cirrus,lochnagar.txt
-
-Required properties:
-
- - compatible : One of the following strings:
- "cirrus,lochnagar1"
- "cirrus,lochnagar2"
-
- - reg : I2C slave address
-
- - reset-gpios : Reset line to the Lochnagar, see [1].
-
-Required sub-nodes:
-
- - lochnagar-clk : Binding for the clocking components, see [4].
-
- - lochnagar-pinctrl : Binding for the pin control components, see [5].
-
-Optional sub-nodes:
-
- - Bindings for the regulator components, see [6]. Only available on
- Lochnagar 2.
-
- - lochnagar-sc : Binding for the sound card components, see [7].
- Only available on Lochnagar 2.
- - lochnagar-hwmon : Binding for the hardware monitor components, see [8].
- Only available on Lochnagar 2.
-
-Optional properties:
-
- - present-gpios : Host present line, indicating the presence of a
- host system, see [1]. This can be omitted if the present line is
- tied in hardware.
-
-Example:
-
-lochnagar: lochnagar@22 {
- compatible = "cirrus,lochnagar2";
- reg = <0x22>;
-
- reset-gpios = <&gpio0 55 0>;
- present-gpios = <&gpio0 60 0>;
-
- lochnagar-clk {
- compatible = "cirrus,lochnagar2-clk";
- ...
- };
-
- lochnagar-pinctrl {
- compatible = "cirrus,lochnagar-pinctrl";
- ...
- };
-
- lochnagar-sc {
- compatible = "cirrus,lochnagar2-soundcard";
- ...
- };
-
- lochnagar-hwmon {
- compatible = "cirrus,lochnagar2-hwmon";
- ...
- };
-};
diff --git a/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.yaml b/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.yaml
new file mode 100644
index 000000000000..7a616577ac63
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.yaml
@@ -0,0 +1,352 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/cirrus,lochnagar.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic Lochnagar Audio Development Board
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ Lochnagar is an evaluation and development board for Cirrus Logic
+ Smart CODEC and Amp devices. It allows the connection of most Cirrus
+ Logic devices on mini-cards, as well as allowing connection of
+ various application processor systems to provide a full evaluation
+ platform. Audio system topology, clocking and power can all be
+ controlled through the Lochnagar, allowing the device under test
+ to be used in a variety of possible use cases.
+
+ Also see these documents for generic binding information:
+ [1] GPIO : ../gpio/gpio.txt
+
+ And these for relevant defines:
+ [2] include/dt-bindings/pinctrl/lochnagar.h
+ [3] include/dt-bindings/clock/lochnagar.h
+
+ And these documents for the required sub-node binding details:
+ [4] Clock: ../clock/cirrus,lochnagar.yaml
+ [5] Pinctrl: ../pinctrl/cirrus,lochnagar.yaml
+ [6] Sound: ../sound/cirrus,lochnagar.yaml
+ [7] Hardware Monitor: ../hwmon/cirrus,lochnagar.yaml
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - cirrus,lochnagar2
+ then:
+ properties:
+ lochnagar-hwmon:
+ type: object
+ $ref: /schemas/hwmon/cirrus,lochnagar.yaml#
+
+ lochnagar-sc:
+ type: object
+ $ref: /schemas/sound/cirrus,lochnagar.yaml#
+
+properties:
+ compatible:
+ enum:
+ - cirrus,lochnagar1
+ - cirrus,lochnagar2
+
+ reg:
+ const: 0x22
+
+ reset-gpios:
+ maxItems: 1
+
+ present-gpios:
+ description: |
+ Host present line, indicating the presence of a
+ host system, see [1]. This can be omitted if the present line is
+ tied in hardware.
+ maxItems: 1
+
+ lochnagar-clk:
+ type: object
+ $ref: /schemas/clock/cirrus,lochnagar.yaml#
+
+ lochnagar-pmic32k:
+ type: object
+ $ref: /schemas/clock/fixed-clock.yaml#
+ properties:
+ clock-frequency:
+ const: 32768
+
+ lochnagar-clk12m:
+ type: object
+ $ref: /schemas/clock/fixed-clock.yaml#
+ properties:
+ clock-frequency:
+ const: 12288000
+
+ lochnagar-clk11m:
+ type: object
+ $ref: /schemas/clock/fixed-clock.yaml#
+ properties:
+ clock-frequency:
+ const: 11298600
+
+ lochnagar-clk24m:
+ type: object
+ $ref: /schemas/clock/fixed-clock.yaml#
+ properties:
+ clock-frequency:
+ const: 24576000
+
+ lochnagar-clk22m:
+ type: object
+ $ref: /schemas/clock/fixed-clock.yaml#
+ properties:
+ clock-frequency:
+ const: 22579200
+
+ lochnagar-clk8m:
+ type: object
+ $ref: /schemas/clock/fixed-clock.yaml#
+ properties:
+ clock-frequency:
+ const: 8192000
+
+ lochnagar-usb24m:
+ type: object
+ $ref: /schemas/clock/fixed-clock.yaml#
+ properties:
+ clock-frequency:
+ const: 24576000
+
+ lochnagar-usb12m:
+ type: object
+ $ref: /schemas/clock/fixed-clock.yaml#
+ properties:
+ clock-frequency:
+ const: 12288000
+
+ lochnagar-pinctrl:
+ type: object
+ $ref: /schemas/pinctrl/cirrus,lochnagar.yaml#
+
+ VDDCORE:
+ description:
+ Initialisation data for the VDDCORE regulator, which supplies the
+ CODECs digital core if not being provided by an internal regulator.
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ properties:
+ compatible:
+ enum:
+ - cirrus,lochnagar2-vddcore
+
+ SYSVDD-supply:
+ description:
+ Primary power supply for the Lochnagar.
+ required:
+ - compatible
+
+ MICVDD:
+ description:
+ Initialisation data for the MICVDD regulator, which supplies the
+ CODECs MICVDD.
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ properties:
+ compatible:
+ enum:
+ - cirrus,lochnagar2-micvdd
+
+ SYSVDD-supply:
+ description:
+ Primary power supply for the Lochnagar.
+ required:
+ - compatible
+
+ MIC1VDD:
+ description:
+ Initialisation data for the MIC1VDD supplies.
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ properties:
+ compatible:
+ enum:
+ - cirrus,lochnagar2-mic1vdd
+
+ cirrus,micbias-input:
+ description:
+ A property selecting which of the CODEC minicard micbias outputs
+ should be used.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 4
+
+ MICBIAS1-supply:
+ description:
+ Regulator supplies for the MIC1VDD outputs, supplying the digital
+ microphones, normally supplied from the attached CODEC.
+ required:
+ - compatible
+
+ MIC2VDD:
+ description:
+ Initialisation data for the MIC2VDD supplies.
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ properties:
+ compatible:
+ enum:
+ - cirrus,lochnagar2-mic2vdd
+
+ cirrus,micbias-input:
+ description:
+ A property selecting which of the CODEC minicard micbias outputs
+ should be used.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 4
+
+ MICBIAS2-supply:
+ description:
+ Regulator supplies for the MIC2VDD outputs, supplying the digital
+ microphones, normally supplied from the attached CODEC.
+ required:
+ - compatible
+
+ VDD1V8:
+ description:
+ Recommended fixed regulator for the VDD1V8 regulator, which supplies
+ the CODECs analog and 1.8V digital supplies.
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ properties:
+ compatible:
+ enum:
+ - regulator-fixed
+
+ regulator-min-microvolt:
+ const: 1800000
+
+ regulator-max-microvolt:
+ const: 1800000
+
+ vin-supply:
+ description:
+ Should be set to same supply as SYSVDD
+ required:
+ - compatible
+ - regulator-min-microvolt
+ - regulator-max-microvolt
+ - regulator-boot-on
+ - regulator-always-on
+ - vin-supply
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - lochnagar-clk
+ - lochnagar-pinctrl
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clk/lochnagar.h>
+ #include <dt-bindings/pinctrl/lochnagar.h>
+ i2c@e0004000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xe0004000 0x1000>;
+
+ lochnagar: lochnagar@22 {
+ compatible = "cirrus,lochnagar2";
+ reg = <0x22>;
+
+ reset-gpios = <&gpio0 55 0>;
+ present-gpios = <&gpio0 60 0>;
+
+ lochnagarclk: lochnagar-clk {
+ compatible = "cirrus,lochnagar2-clk";
+
+ #clock-cells = <1>;
+ clocks = <&clkaudio>, <&clkpmic>;
+ clock-names = "ln-gf-mclk2", "ln-pmic-32k";
+
+ assigned-clocks = <&lochnagarclk LOCHNAGAR_CDC_MCLK1>,
+ <&lochnagarclk LOCHNAGAR_CDC_MCLK2>;
+ assigned-clock-parents = <&clkaudio>, <&clkpmic>;
+ };
+
+ clkpmic: lochnagar-pmic32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+
+ lochnagar-pinctrl {
+ compatible = "cirrus,lochnagar-pinctrl";
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&lochnagar 0 0 LOCHNAGAR2_PIN_NUM_GPIOS>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinsettings>;
+
+ pinsettings: pin-settings {
+ ap2aif-pins {
+ input-enable;
+ groups = "gf-aif1";
+ function = "codec-aif3";
+ };
+ codec2aif-pins {
+ output-enable;
+ groups = "codec-aif3";
+ function = "gf-aif1";
+ };
+ };
+ };
+
+ lochnagar-sc {
+ compatible = "cirrus,lochnagar2-soundcard";
+
+ #sound-dai-cells = <1>;
+
+ clocks = <&lochnagarclk LOCHNAGAR_SOUNDCARD_MCLK>;
+ clock-names = "mclk";
+ };
+
+ lochnagar-hwmon {
+ compatible = "cirrus,lochnagar2-hwmon";
+ };
+
+ MIC1VDD {
+ compatible = "cirrus,lochnagar2-mic1vdd";
+
+ cirrus,micbias-input = <3>;
+ };
+
+ MICVDD {
+ compatible = "cirrus,lochnagar2-micvdd";
+
+ SYSVDD-supply = <&wallvdd>;
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ VDD1V8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VDD1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+
+ vin-supply = <&wallvdd>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/cirrus,madera.yaml b/Documentation/devicetree/bindings/mfd/cirrus,madera.yaml
new file mode 100644
index 000000000000..a5531f6caf12
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/cirrus,madera.yaml
@@ -0,0 +1,299 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/cirrus,madera.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic Madera class audio CODECs Multi-Functional Device
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ These devices are audio SoCs with extensive digital capabilities and a range
+ of analogue I/O.
+
+ See also the child driver bindings in:
+
+ bindings/pinctrl/cirrus,madera.yaml
+ bindings/regulator/wlf,arizona.yaml
+ bindings/sound/cirrus,madera.yaml
+
+allOf:
+ - $ref: /schemas/pinctrl/cirrus,madera.yaml#
+ - $ref: /schemas/regulator/wlf,arizona.yaml#
+ - $ref: /schemas/sound/cirrus,madera.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - cirrus,cs47l85
+ - wlf,wm1840
+ then:
+ properties:
+ SPKVDDL-supply:
+ description:
+ Left speaker driver power supply.
+
+ SPKVDDR-supply:
+ description:
+ Right speaker driver power supply.
+
+ required:
+ - SPKVDDL-supply
+ - SPKVDDR-supply
+ else:
+ required:
+ - DCVDD-supply
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - cirrus,cs47l15
+ - cirrus,cs47l35
+ then:
+ properties:
+ SPKVDD-supply:
+ description:
+ Mono speaker driver power supply.
+
+ required:
+ - SPKVDD-supply
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - cirrus,cs47l35
+ - cirrus,cs47l85
+ - cirrus,cs47l90
+ - cirrus,cs47l91
+ - wlf,wm1840
+ then:
+ properties:
+ DBVDD2-supply:
+ description:
+ Databus power supply.
+
+ required:
+ - DBVDD2-supply
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - cirrus,cs47l85
+ - cirrus,cs47l90
+ - cirrus,cs47l91
+ - wlf,wm1840
+ then:
+ properties:
+ DBVDD3-supply:
+ description:
+ Databus power supply.
+
+ DBVDD4-supply:
+ description:
+ Databus power supply.
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - cirrus,cs47l15
+ then:
+ required:
+ - MICVDD-supply
+ else:
+ properties:
+ CPVDD2-supply:
+ description:
+ Secondary charge pump power supply.
+
+ required:
+ - CPVDD2-supply
+
+properties:
+ compatible:
+ enum:
+ - cirrus,cs47l15
+ - cirrus,cs47l35
+ - cirrus,cs47l85
+ - cirrus,cs47l90
+ - cirrus,cs47l91
+ - cirrus,cs42l92
+ - cirrus,cs47l92
+ - cirrus,cs47l93
+ - cirrus,wm1840
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ description:
+ The first cell is the pin number. The second cell is reserved for
+ future use and must be zero
+ const: 2
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ description:
+ The first cell is the IRQ number.
+ The second cell is the flags, encoded as the trigger masks from
+ bindings/interrupt-controller/interrupts.txt
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+ reset-gpios:
+ description:
+ One entry specifying the GPIO controlling /RESET. As defined in
+ bindings/gpio.txt. Although optional, it is strongly recommended
+ to use a hardware reset.
+ maxItems: 1
+
+ clocks:
+ description:
+ Should reference the clocks supplied on MCLK1, MCLK2 and MCLK3.
+ minItems: 1
+ maxItems: 3
+
+ clock-names:
+ description: |
+ May contain up to three strings:
+ "mclk1" For the clock supplied on MCLK1, recommended to be a
+ high quality audio reference clock.
+ "mclk2" For the clock supplied on MCLK2, required to be an
+ always on 32k clock.
+ "mclk3" For the clock supplied on MCLK3.
+ oneOf:
+ - items:
+ - const: mclk1
+ - items:
+ - const: mclk2
+ - items:
+ - const: mclk3
+ - items:
+ - const: mclk1
+ - const: mclk2
+ - items:
+ - const: mclk1
+ - const: mclk3
+ - items:
+ - const: mclk2
+ - const: mclk3
+ - items:
+ - const: mclk1
+ - const: mclk2
+ - const: mclk3
+
+ AVDD-supply:
+ description:
+ Analogue power supply.
+
+ DBVDD1-supply:
+ description:
+ Databus power supply.
+
+ CPVDD1-supply:
+ description:
+ Charge pump power supply.
+
+ DCVDD-supply:
+ description:
+ Digital power supply, optional on CS47L85, WM1840 where it can
+ be supplied internally.
+
+ MICVDD-supply:
+ description:
+ Microphone power supply, normally supplied internally except on
+ cs47l24, wm1831 where it is mandatory.
+
+required:
+ - compatible
+ - gpio-controller
+ - '#gpio-cells'
+ - interrupt-controller
+ - '#interrupt-cells'
+ - interrupt-parent
+ - interrupts
+ - AVDD-supply
+ - DBVDD1-supply
+ - CPVDD1-supply
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/sound/madera.h>
+ i2c@e0004000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xe0004000 0x1000>;
+
+ cs47l85: codec@1a {
+ compatible = "cirrus,cs47l85";
+ reg = <0x1a>;
+
+ reset-gpios = <&gpio 0>;
+ wlf,ldoena = <&gpio 1>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <&host_irq1>;
+ interrupt-parent = <&gic>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ AVDD-supply = <&vdd1v8>;
+ DBVDD1-supply = <&vdd1v8>;
+ DBVDD2-supply = <&vdd1v8>;
+ DBVDD3-supply = <&vdd1v8>;
+ DBVDD4-supply = <&vdd1v8>;
+ CPVDD1-supply = <&vdd1v8>;
+ CPVDD2-supply = <&vdd1v2>;
+ SPKVDDL-supply = <&vdd5v>;
+ SPKVDDR-supply = <&vdd5v>;
+
+ clocks = <&clks 0>, <&clks 1>, <&clks 2>;
+ clock-names = "mclk1", "mclk2", "mclk3";
+
+ cirrus,dmic-ref = <0 0 MADERA_DMIC_REF_MICBIAS1>;
+ cirrus,inmode = <
+ MADERA_INMODE_SE MADERA_INMODE_SE
+ MADERA_INMODE_SE MADERA_INMODE_SE
+ MADERA_INMODE_DIFF MADERA_INMODE_DIFF
+ >;
+ cirrus,max-channels-clocked = <2 0 0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinsettings>;
+
+ pinsettings: pin-settings {
+ aif1-pins {
+ groups = "aif1";
+ function = "aif1";
+ bias-bus-hold;
+ };
+
+ aif2-pins {
+ groups = "aif2";
+ function = "aif2";
+ bias-bus-hold;
+ };
+
+ aif3-pins {
+ groups = "aif3";
+ function = "aif3";
+ bias-bus-hold;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/madera.txt b/Documentation/devicetree/bindings/mfd/madera.txt
deleted file mode 100644
index 47e2b8bc6051..000000000000
--- a/Documentation/devicetree/bindings/mfd/madera.txt
+++ /dev/null
@@ -1,114 +0,0 @@
-Cirrus Logic Madera class audio codecs Multi-Functional Device
-
-These devices are audio SoCs with extensive digital capabilities and a range
-of analogue I/O.
-
-See also the child driver bindings in:
-bindings/pinctrl/cirrus,madera-pinctrl.txt
-bindings/regulator/arizona-regulator.txt
-bindings/sound/madera.txt
-
-Required properties:
-
- - compatible : One of the following chip-specific strings:
- "cirrus,cs47l15"
- "cirrus,cs47l35"
- "cirrus,cs47l85"
- "cirrus,cs47l90"
- "cirrus,cs47l91"
- "cirrus,cs42l92"
- "cirrus,cs47l92"
- "cirrus,cs47l93"
- "cirrus,wm1840"
-
- - reg : I2C slave address when connected using I2C, chip select number when
- using SPI.
-
- - DCVDD-supply : Power supply for the device as defined in
- bindings/regulator/regulator.txt
- Mandatory on CS47L15, CS47L35, CS47L90, CS47L91, CS42L92, CS47L92, CS47L93
- Optional on CS47L85, WM1840
-
- - AVDD-supply, DBVDD1-supply, DBVDD2-supply, CPVDD1-supply, CPVDD2-supply :
- Power supplies for the device
-
- - DBVDD3-supply, DBVDD4-supply : Power supplies for the device
- (CS47L85, CS47L90, CS47L91, WM1840)
-
- - SPKVDDL-supply, SPKVDDR-supply : Power supplies for the device
- (CS47L85, WM1840)
-
- - SPKVDD-supply : Power supply for the device
- (CS47L15, CS47L35)
-
- - interrupt-controller : Indicates that this device is an interrupt controller
-
- - #interrupt-cells: the number of cells to describe an IRQ, must be 2.
- The first cell is the IRQ number.
- The second cell is the flags, encoded as the trigger masks from
- bindings/interrupt-controller/interrupts.txt
-
- - gpio-controller : Indicates this device is a GPIO controller.
-
- - #gpio-cells : Must be 2. The first cell is the pin number. The second cell
- is reserved for future use and must be zero
-
- - interrupt-parent : The parent interrupt controller.
-
- - interrupts : The interrupt line the /IRQ signal for the device is
- connected to.
-
-Optional properties:
-
- - MICVDD-supply : Power supply, only need to be specified if
- powered externally
-
- - reset-gpios : One entry specifying the GPIO controlling /RESET.
- As defined in bindings/gpio.txt.
- Although optional, it is strongly recommended to use a hardware reset
-
- - clocks: Should reference the clocks supplied on MCLK1, MCLK2 and MCLK3
- - clock-names: May contain up to three strings:
- "mclk1" for the clock supplied on MCLK1, recommended to be a high
- quality audio reference clock
- "mclk2" for the clock supplied on MCLK2, required to be an always on
- 32k clock
- "mclk3" for the clock supplied on MCLK3
-
- - MICBIASx : Initial data for the MICBIAS regulators, as covered in
- Documentation/devicetree/bindings/regulator/regulator.txt.
- One for each MICBIAS generator (MICBIAS1, MICBIAS2, ...)
- (all codecs)
-
- One for each output pin (MICBIAS1A, MIBCIAS1B, MICBIAS2A, ...)
- (all except CS47L85, WM1840)
-
- The following following additional property is supported for the generator
- nodes:
- - cirrus,ext-cap : Set to 1 if the MICBIAS has external decoupling
- capacitors attached.
-
-Optional child nodes:
- micvdd : Node containing initialization data for the micvdd regulator
- See bindings/regulator/arizona-regulator.txt
-
- ldo1 : Node containing initialization data for the LDO1 regulator
- See bindings/regulator/arizona-regulator.txt
- (cs47l85, wm1840)
-
-Example:
-
-cs47l85@0 {
- compatible = "cirrus,cs47l85";
- reg = <0>;
-
- reset-gpios = <&gpio 0>;
-
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupts = <&host_irq1>;
- interrupt-parent = <&gic>;
-
- gpio-controller;
- #gpio-cells = <2>;
-};
diff --git a/Documentation/devicetree/bindings/mfd/mps,mp2629.yaml b/Documentation/devicetree/bindings/mfd/mps,mp2629.yaml
new file mode 100644
index 000000000000..f91acc42d652
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/mps,mp2629.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/mps,mp2629.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MP2629 Battery Charger PMIC from Monolithic Power System.
+
+maintainers:
+ - Saravanan Sekar <sravanhome@gmail.com>
+
+description: |
+ MP2629 is a PMIC providing battery charging and power supply for smartphones,
+ wireless camera and portable devices. Chip is controlled over I2C.
+
+ The battery charge management device handles battery charger controller and
+ ADC IIO device for battery, system voltage
+
+properties:
+ compatible:
+ const: mps,mp2629
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+ description:
+ The first cell is the IRQ number, the second cell is the trigger type.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - "#interrupt-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/input/linux-event-codes.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@4b {
+ compatible = "mps,mp2629";
+ reg = <0x4b>;
+
+ interrupt-controller;
+ interrupt-parent = <&gpio2>;
+ #interrupt-cells = <2>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/mt6397.txt b/Documentation/devicetree/bindings/mfd/mt6397.txt
index a9b105ac00a8..2661775a3825 100644
--- a/Documentation/devicetree/bindings/mfd/mt6397.txt
+++ b/Documentation/devicetree/bindings/mfd/mt6397.txt
@@ -18,24 +18,30 @@ See the following for pwarp node definitions:
This document describes the binding for MFD device and its sub module.
Required properties:
-compatible: "mediatek,mt6397" or "mediatek,mt6323"
+compatible:
+ "mediatek,mt6323" for PMIC MT6323
+ "mediatek,mt6358" for PMIC MT6358
+ "mediatek,mt6397" for PMIC MT6397
Optional subnodes:
- rtc
Required properties: Should be one of follows
- compatible: "mediatek,mt6323-rtc"
+ - compatible: "mediatek,mt6358-rtc"
- compatible: "mediatek,mt6397-rtc"
For details, see ../rtc/rtc-mt6397.txt
- regulators
Required properties:
- - compatible: "mediatek,mt6397-regulator"
- see ../regulator/mt6397-regulator.txt
- compatible: "mediatek,mt6323-regulator"
see ../regulator/mt6323-regulator.txt
+ - compatible: "mediatek,mt6358-regulator"
+ see ../regulator/mt6358-regulator.txt
+ - compatible: "mediatek,mt6397-regulator"
+ see ../regulator/mt6397-regulator.txt
- codec
Required properties:
- - compatible: "mediatek,mt6397-codec"
+ - compatible: "mediatek,mt6397-codec" or "mediatek,mt6358-sound"
- clk
Required properties:
- compatible: "mediatek,mt6397-clk"
@@ -54,6 +60,11 @@ Optional subnodes:
- compatible: "mediatek,mt6323-pwrc"
For details, see ../power/reset/mt6323-poweroff.txt
+- pin-controller
+ Required properties:
+ - compatible: "mediatek,mt6397-pinctrl"
+ For details, see ../pinctrl/pinctrl-mt65xx.txt
+
Example:
pwrap: pwrap@1000f000 {
compatible = "mediatek,mt8135-pwrap";
diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
index ddf190cb800b..e675611f80d0 100644
--- a/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stm32-lptimer.yaml
@@ -66,8 +66,8 @@ patternProperties:
reg:
description: Identify trigger hardware block.
items:
- minimum: 0
- maximum: 2
+ minimum: 0
+ maximum: 2
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
index 590849ee9f32..f212fc6e1661 100644
--- a/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stm32-timers.yaml
@@ -67,23 +67,22 @@ properties:
description:
One or two <index level filter> to describe break input
configurations.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-matrix
- - items:
- items:
- - description: |
- "index" indicates on which break input (0 or 1) the
- configuration should be applied.
- enum: [ 0 , 1]
- - description: |
- "level" gives the active level (0=low or 1=high) of the
- input signal for this configuration
- enum: [ 0, 1 ]
- - description: |
- "filter" gives the filtering value (up to 15) to be applied.
- maximum: 15
- minItems: 1
- maxItems: 2
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: |
+ "index" indicates on which break input (0 or 1) the
+ configuration should be applied.
+ enum: [0, 1]
+ - description: |
+ "level" gives the active level (0=low or 1=high) of the
+ input signal for this configuration
+ enum: [0, 1]
+ - description: |
+ "filter" gives the filtering value (up to 15) to be applied.
+ maximum: 15
+ minItems: 1
+ maxItems: 2
required:
- "#pwm-cells"
@@ -102,8 +101,8 @@ patternProperties:
reg:
description: Identify trigger hardware block.
items:
- minimum: 0
- maximum: 16
+ minimum: 0
+ maximum: 16
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
index be7faa6dc055..dd995d7dc1a6 100644
--- a/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
@@ -29,8 +29,7 @@ properties:
onkey:
type: object
- allOf:
- - $ref: ../input/input.yaml
+ $ref: ../input/input.yaml
properties:
compatible:
@@ -68,8 +67,7 @@ properties:
watchdog:
type: object
- allOf:
- - $ref: ../watchdog/watchdog.yaml
+ $ref: ../watchdog/watchdog.yaml
properties:
compatible:
@@ -190,8 +188,7 @@ properties:
description: STPMIC1 voltage regulators supplies
"^(buck[1-4]|ldo[1-6]|boost|vref_ddr|pwr_sw[1-2])$":
- allOf:
- - $ref: ../regulator/regulator.yaml
+ $ref: ../regulator/regulator.yaml
"^ldo[1-2,5-6]$":
type: object
diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml
index 39375e4313d2..19bdaf781853 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.yaml
+++ b/Documentation/devicetree/bindings/mfd/syscon.yaml
@@ -33,13 +33,13 @@ properties:
compatible:
anyOf:
- items:
- - enum:
- - allwinner,sun8i-a83t-system-controller
- - allwinner,sun8i-h3-system-controller
- - allwinner,sun8i-v3s-system-controller
- - allwinner,sun50i-a64-system-controller
+ - enum:
+ - allwinner,sun8i-a83t-system-controller
+ - allwinner,sun8i-h3-system-controller
+ - allwinner,sun8i-v3s-system-controller
+ - allwinner,sun50i-a64-system-controller
- - const: syscon
+ - const: syscon
- contains:
const: syscon
@@ -52,9 +52,8 @@ properties:
description: |
The size (in bytes) of the IO accesses that should be performed
on the device.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 1, 2, 4, 8 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 4, 8]
hwlocks:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mfd/wlf,arizona.yaml b/Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
new file mode 100644
index 000000000000..4c0106cea36d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
@@ -0,0 +1,280 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/wlf,arizona.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic/Wolfson Microelectronics Arizona class audio SoCs
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ These devices are audio SoCs with extensive digital capabilities and a
+ range of analogue I/O.
+
+allOf:
+ - $ref: /schemas/sound/wlf,arizona.yaml#
+ - $ref: /schemas/regulator/wlf,arizona.yaml#
+ - $ref: /schemas/extcon/wlf,arizona.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - cirrus,cs47l24
+ - wlf,wm1831
+ then:
+ required:
+ - DCVDD-supply
+ - MICVDD-supply
+ else:
+ properties:
+ LDOVDD-supply:
+ description:
+ Digital power supply, used internally to generate DCVDD when
+ internally supplied.
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - wlf,wm1814
+ - wlf,wm5102
+ - wlf,wm5110
+ - wlf,wm8280
+ - wlf,wm8997
+ - wlf,wm8998
+ then:
+ properties:
+ DBVDD2-supply:
+ description:
+ Databus power supply.
+
+ required:
+ - DBVDD2-supply
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - wlf,wm1814
+ - wlf,wm5102
+ - wlf,wm5110
+ - wlf,wm8280
+ - wlf,wm8998
+ then:
+ properties:
+ DBVDD3-supply:
+ description:
+ Databus power supply.
+
+ required:
+ - DBVDD3-supply
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - cirrus,cs47l24
+ - wlf,wm1831
+ - wlf,wm8997
+ then:
+ properties:
+ SPKVDD-supply:
+ description:
+ Mono speaker driver power supply.
+
+ required:
+ - SPKVDD-supply
+ else:
+ properties:
+ SPKVDDL-supply:
+ description:
+ Left speaker driver power supply.
+
+ SPKVDDR-supply:
+ description:
+ Right speaker driver power supply.
+
+ required:
+ - SPKVDDL-supply
+ - SPKVDDR-supply
+
+properties:
+ compatible:
+ enum:
+ - cirrus,cs47l24
+ - wlf,wm1814
+ - wlf,wm1831
+ - wlf,wm5102
+ - wlf,wm5110
+ - wlf,wm8280
+ - wlf,wm8997
+ - wlf,wm8998
+
+ reg:
+ maxItems: 1
+
+ AVDD-supply:
+ description:
+ Analogue power supply.
+
+ CPVDD-supply:
+ description:
+ Charge pump power supply.
+
+ DBVDD1-supply:
+ description:
+ Databus power supply.
+
+ DCVDD-supply:
+ description:
+ Digital power supply, normally supplied internally except on cs47l24,
+ wm1831 where it is mandatory.
+
+ MICVDD-supply:
+ description:
+ Microphone power supply, normally supplied internally except on
+ cs47l24, wm1831 where it is mandatory.
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ description:
+ The first cell is the pin number and the second cell is used to
+ specify optional parameters.
+ const: 2
+
+ wlf,gpio-defaults:
+ description:
+ A list of GPIO configuration register values. Defines for the
+ appropriate values can found in dt-bindings/mfd/arizona.h. If
+ absent, no configuration of these registers is performed. If any
+ entry has a value that is out of range for a 16 bit register then the
+ chip default will be used. If present exactly five values must be
+ specified.
+ $ref: "/schemas/types.yaml#/definitions/uint32-array"
+ minItems: 1
+ maxItems: 5
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ description:
+ The first cell is the IRQ number. The second cell is the flags,
+ encoded as trigger masks.
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ description:
+ Should reference the clocks supplied on MCLK1 and MCLK2.
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ description:
+ Should contains two strings mclk1 for the clock supplied on MCLK1,
+ recommended to be a high quality audio reference clock mclk2 for the
+ clock supplied on MCLK2, recommended to be an always on 32k clock.
+ oneOf:
+ - items:
+ - const: mclk1
+ - items:
+ - const: mclk2
+ - items:
+ - const: mclk1
+ - const: mclk2
+
+ reset-gpios:
+ maxItems: 1
+
+ wlf,reset:
+ description:
+ GPIO specifier for the GPIO controlling RESET
+ deprecated: true
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+
+required:
+ - compatible
+ - AVDD-supply
+ - CPVDD-supply
+ - DBVDD1-supply
+ - gpio-controller
+ - '#gpio-cells'
+ - interrupt-controller
+ - '#interrupt-cells'
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/mfd/arizona.h>
+ i2c@e0004000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xe0004000 0x1000>;
+
+ wm5102: codec@1a {
+ compatible = "wlf,wm5102";
+ reg = <0x1a>;
+
+ reset-gpios = <&gpio 0>;
+ wlf,ldoena = <&gpio 1>;
+
+ AVDD-supply = <&vdd1v8>;
+ DBVDD1-supply = <&vdd1v8>;
+ DBVDD2-supply = <&vdd1v8>;
+ DBVDD3-supply = <&vdd1v8>;
+ CPVDD-supply = <&vdd1v8>;
+ LDOVDD-supply = <&vdd1v8>;
+ SPKVDDL-supply = <&vdd5v>;
+ SPKVDDR-supply = <&vdd5v>;
+
+ interrupts = <347>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gic>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ #sound-dai-cells = <1>;
+
+ wlf,gpio-defaults = <
+ ARIZONA_GP_FN_TXLRCLK
+ ARIZONA_GP_DEFAULT
+ ARIZONA_GP_DEFAULT
+ ARIZONA_GP_DEFAULT
+ ARIZONA_GP_DEFAULT
+ >;
+
+ clocks = <&clks 0>, <&clks 1>;
+ clock-names = "mclk1", "mclk2";
+
+ wlf,inmode = <ARIZONA_INMODE_DIFF ARIZONA_INMODE_DMIC>;
+ wlf,dmic-ref = <ARIZONA_DMIC_MICBIAS1 ARIZONA_DMIC_MICBIAS3>;
+
+ wlf,use-jd2;
+ wlf,use-jd2-nopull;
+ wlf,jd-invert;
+
+ wlf,micd-software-compare;
+ wlf,micd-detect-debounce = <0>;
+ wlf,micd-pol-gpio = <&codec 2 0>;
+ wlf,micd-rate = <ARIZONA_MICD_TIME_8MS>;
+ wlf,micd-dbtime = <4>;
+ wlf,micd-timeout-ms = <100>;
+ wlf,micd-force-micbias;
+ wlf,micd-configs = <0 ARIZONA_DMIC_MICBIAS1 0>,
+ <0x2000 ARIZONA_DMIC_MICBIAS2 1>;
+
+ wlf,gpsw = <ARIZONA_GPSW_OPEN>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mips/ingenic/devices.yaml b/Documentation/devicetree/bindings/mips/ingenic/devices.yaml
index 78dcf6ef3883..d1175030781a 100644
--- a/Documentation/devicetree/bindings/mips/ingenic/devices.yaml
+++ b/Documentation/devicetree/bindings/mips/ingenic/devices.yaml
@@ -20,16 +20,20 @@ properties:
- description: Qi Hardware Ben NanoNote
items:
- const: qi,lb60
+ - const: ingenic,jz4740
- description: Game Consoles Worldwide GCW Zero
items:
- const: gcw,zero
+ - const: ingenic,jz4770
- description: MIPS Creator CI20
items:
- const: img,ci20
+ - const: ingenic,jz4780
- description: YSH & ATIL General Board CU Neo
items:
- const: yna,cu1000-neo
+ - const: ingenic,x1000
...
diff --git a/Documentation/devicetree/bindings/mmc/aspeed,sdhci.yaml b/Documentation/devicetree/bindings/mmc/aspeed,sdhci.yaml
index 200de9396036..987b287f3bff 100644
--- a/Documentation/devicetree/bindings/mmc/aspeed,sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/aspeed,sdhci.yaml
@@ -41,8 +41,8 @@ properties:
patternProperties:
"^sdhci@[0-9a-f]+$":
type: object
- allOf:
- - $ref: mmc-controller.yaml
+ $ref: mmc-controller.yaml
+
properties:
compatible:
enum:
diff --git a/Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml b/Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml
index 2f45dd0d04db..d93f7794a85f 100644
--- a/Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/cdns,sdhci.yaml
@@ -17,7 +17,7 @@ properties:
compatible:
items:
- enum:
- - socionext,uniphier-sd4hc
+ - socionext,uniphier-sd4hc
- const: cdns,sd4hc
reg:
@@ -36,91 +36,80 @@ properties:
cdns,phy-input-delay-sd-highspeed:
description: Value of the delay in the input path for SD high-speed timing
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 0x1f
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 0x1f
cdns,phy-input-delay-legacy:
description: Value of the delay in the input path for legacy timing
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 0x1f
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 0x1f
cdns,phy-input-delay-sd-uhs-sdr12:
description: Value of the delay in the input path for SD UHS SDR12 timing
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 0x1f
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 0x1f
cdns,phy-input-delay-sd-uhs-sdr25:
description: Value of the delay in the input path for SD UHS SDR25 timing
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 0x1f
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 0x1f
cdns,phy-input-delay-sd-uhs-sdr50:
description: Value of the delay in the input path for SD UHS SDR50 timing
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 0x1f
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 0x1f
cdns,phy-input-delay-sd-uhs-ddr50:
description: Value of the delay in the input path for SD UHS DDR50 timing
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 0x1f
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 0x1f
cdns,phy-input-delay-mmc-highspeed:
description: Value of the delay in the input path for MMC high-speed timing
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 0x1f
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 0x1f
cdns,phy-input-delay-mmc-ddr:
description: Value of the delay in the input path for eMMC high-speed DDR timing
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 0x1f
# PHY DLL clock delays:
# Each delay property represents the fraction of the clock period.
# The approximate delay value will be
# (<delay property value>/128)*sdmclk_clock_period.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 0x1f
cdns,phy-dll-delay-sdclk:
description: |
Value of the delay introduced on the sdclk output for all modes except
HS200, HS400 and HS400_ES.
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 0x7f
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 0x7f
cdns,phy-dll-delay-sdclk-hsmmc:
description: |
Value of the delay introduced on the sdclk output for HS200, HS400 and
HS400_ES speed modes.
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 0x7f
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 0x7f
cdns,phy-dll-delay-strobe:
description: |
Value of the delay introduced on the dat_strobe input used in
HS400 / HS400_ES speed modes.
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 0x7f
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 0x7f
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml b/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml
new file mode 100644
index 000000000000..e60bfe980ab3
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/ingenic,mmc.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/ingenic,mmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs MMC Controller DT bindings
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+allOf:
+ - $ref: mmc-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - ingenic,jz4740-mmc
+ - ingenic,jz4725b-mmc
+ - ingenic,jz4760-mmc
+ - ingenic,jz4780-mmc
+ - ingenic,x1000-mmc
+ - items:
+ - const: ingenic,jz4770-mmc
+ - const: ingenic,jz4760-mmc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: mmc
+
+ dmas:
+ items:
+ - description: DMA controller phandle and request line for RX
+ - description: DMA controller phandle and request line for TX
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - dmas
+ - dma-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4780-cgu.h>
+ #include <dt-bindings/dma/jz4780-dma.h>
+ mmc0: mmc@13450000 {
+ compatible = "ingenic,jz4780-mmc";
+ reg = <0x13450000 0x1000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <37>;
+
+ clocks = <&cgu JZ4780_CLK_MSC0>;
+ clock-names = "mmc";
+
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ cap-sdio-irq;
+ dmas = <&dma JZ4780_DMA_MSC0_RX 0xffffffff>,
+ <&dma JZ4780_DMA_MSC0_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+ };
diff --git a/Documentation/devicetree/bindings/mmc/jz4740.txt b/Documentation/devicetree/bindings/mmc/jz4740.txt
deleted file mode 100644
index 453d3b9d145d..000000000000
--- a/Documentation/devicetree/bindings/mmc/jz4740.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-* Ingenic XBurst MMC controllers
-
-This file documents the device tree properties used for the MMC controller in
-Ingenic JZ4740/JZ4760/JZ4780/X1000 SoCs. These are in addition to the core MMC
-properties described in mmc.txt.
-
-Required properties:
-- compatible: Should be one of the following:
- - "ingenic,jz4740-mmc" for the JZ4740
- - "ingenic,jz4725b-mmc" for the JZ4725B
- - "ingenic,jz4760-mmc" for the JZ4760
- - "ingenic,jz4780-mmc" for the JZ4780
- - "ingenic,x1000-mmc" for the X1000
-- reg: Should contain the MMC controller registers location and length.
-- interrupts: Should contain the interrupt specifier of the MMC controller.
-- clocks: Clock for the MMC controller.
-
-Optional properties:
-- dmas: List of DMA specifiers with the controller specific format
- as described in the generic DMA client binding. A tx and rx
- specifier is required.
-- dma-names: RX and TX DMA request names.
- Should be "rx" and "tx", in that order.
-
-For additional details on DMA client bindings see ../dma/dma.txt.
-
-Example:
-
-mmc0: mmc@13450000 {
- compatible = "ingenic,jz4780-mmc";
- reg = <0x13450000 0x1000>;
-
- interrupt-parent = <&intc>;
- interrupts = <37>;
-
- clocks = <&cgu JZ4780_CLK_MSC0>;
- clock-names = "mmc";
-
- dmas = <&dma JZ4780_DMA_MSC0_RX 0xffffffff>, <&dma JZ4780_DMA_MSC0_TX 0xffffffff>;
- dma-names = "rx", "tx";
-};
diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
index acc9f10871d4..4931fab34d81 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
@@ -76,20 +76,18 @@ properties:
# Other properties
bus-width:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [1, 4, 8]
- default: 1
description:
Number of data lines.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 4, 8]
+ default: 1
max-frequency:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 400000
- - maximum: 200000000
description:
Maximum operating frequency of the bus.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 400000
+ maximum: 200000000
disable-wp:
$ref: /schemas/types.yaml#/definitions/flag
@@ -212,13 +210,12 @@ properties:
eMMC HS400 enhanced strobe mode is supported
dsr:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- - maximum: 0xffff
description:
Value the card Driver Stage Register (DSR) should be programmed
with.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 0xffff
no-sdio:
$ref: /schemas/types.yaml#/definitions/flag
@@ -238,25 +235,23 @@ properties:
initialization.
fixed-emmc-driver-type:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- - maximum: 4
description:
For non-removable eMMC, enforce this driver type. The value is
the driver type as specified in the eMMC specification (table
206 in spec version 5.1)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 4
post-power-on-delay-ms:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - default: 10
description:
It was invented for MMC pwrseq-simple which could be referred to
mmc-pwrseq-simple.txt. But now it\'s reused as a tunable delay
waiting for I/O signalling and card power supply to be stable,
regardless of whether pwrseq-simple is used. Default to 10ms if
no available.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 10
supports-cqe:
$ref: /schemas/types.yaml#/definitions/flag
@@ -333,8 +328,8 @@ patternProperties:
- reg
"^clk-phase-(legacy|sd-hs|mmc-(hs|hs[24]00|ddr52)|uhs-(sdr(12|25|50|104)|ddr50))$":
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+
minItems: 2
maxItems: 2
items:
diff --git a/Documentation/devicetree/bindings/mmc/owl-mmc.yaml b/Documentation/devicetree/bindings/mmc/owl-mmc.yaml
index 12b40213426d..1380501fb8f0 100644
--- a/Documentation/devicetree/bindings/mmc/owl-mmc.yaml
+++ b/Documentation/devicetree/bindings/mmc/owl-mmc.yaml
@@ -47,7 +47,7 @@ examples:
- |
mmc0: mmc@e0330000 {
compatible = "actions,owl-mmc";
- reg = <0x0 0xe0330000 0x0 0x4000>;
+ reg = <0xe0330000 0x4000>;
interrupts = <0 42 4>;
clocks = <&cmu 56>;
resets = <&cmu 23>;
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
index 89c3edd6a728..01316185e771 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
@@ -30,21 +30,21 @@ properties:
- items:
- enum:
# for Rockchip PX30
- - rockchip,px30-dw-mshc
+ - rockchip,px30-dw-mshc
# for Rockchip RK3036
- - rockchip,rk3036-dw-mshc
+ - rockchip,rk3036-dw-mshc
# for Rockchip RK322x
- - rockchip,rk3228-dw-mshc
+ - rockchip,rk3228-dw-mshc
# for Rockchip RK3308
- - rockchip,rk3308-dw-mshc
+ - rockchip,rk3308-dw-mshc
# for Rockchip RK3328
- - rockchip,rk3328-dw-mshc
+ - rockchip,rk3328-dw-mshc
# for Rockchip RK3368
- - rockchip,rk3368-dw-mshc
+ - rockchip,rk3368-dw-mshc
# for Rockchip RK3399
- - rockchip,rk3399-dw-mshc
+ - rockchip,rk3399-dw-mshc
# for Rockchip RV1108
- - rockchip,rv1108-dw-mshc
+ - rockchip,rv1108-dw-mshc
- const: rockchip,rk3288-dw-mshc
reg:
@@ -76,8 +76,7 @@ properties:
high speed modes.
rockchip,default-sample-phase:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 360
default: 0
@@ -87,8 +86,7 @@ properties:
If not specified 0 deg will be used.
rockchip,desired-num-phases:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 360
default: 360
@@ -111,7 +109,7 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
sdmmc: mmc@ff0c0000 {
compatible = "rockchip,rk3288-dw-mshc";
- reg = <0x0 0xff0c0000 0x0 0x4000>;
+ reg = <0xff0c0000 0x4000>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
<&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
diff --git a/Documentation/devicetree/bindings/mmc/socionext,uniphier-sd.yaml b/Documentation/devicetree/bindings/mmc/socionext,uniphier-sd.yaml
index cdfac9b4411b..8d6413f48823 100644
--- a/Documentation/devicetree/bindings/mmc/socionext,uniphier-sd.yaml
+++ b/Documentation/devicetree/bindings/mmc/socionext,uniphier-sd.yaml
@@ -35,15 +35,15 @@ properties:
oneOf:
- const: host
- items:
- - const: host
- - const: bridge
+ - const: host
+ - const: bridge
- items:
- - const: host
- - const: hw
+ - const: host
+ - const: hw
- items:
- - const: host
- - const: bridge
- - const: hw
+ - const: host
+ - const: bridge
+ - const: hw
resets:
minItems: 1
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc-common.yaml b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc-common.yaml
index 890d47a87ac5..85bd528e9a14 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc-common.yaml
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc-common.yaml
@@ -27,39 +27,35 @@ properties:
clock to this at probe time.
fifo-depth:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
description:
The maximum size of the tx/rx fifo's. If this property is not
specified, the default value of the fifo size is determined from the
controller registers.
+ $ref: /schemas/types.yaml#/definitions/uint32
card-detect-delay:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - default: 0
description:
Delay in milli-seconds before detecting card after card
insert event. The default value is 0.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
data-addr:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
description:
Override fifo address with value provided by DT. The default FIFO reg
offset is assumed as 0x100 (version < 0x240A) and 0x200(version >= 0x240A)
by driver. If the controller does not follow this rule, please use
this property to set fifo address in device tree.
+ $ref: /schemas/types.yaml#/definitions/uint32
fifo-watermark-aligned:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/flag
description:
Data done irq is expected if data length is less than
watermark in PIO mode. But fifo watermark is requested to be aligned
with data length in some SoC so that TX/RX irq can be generated with
data done irq. Add this watermark quirk to mark this requirement and
force fifo watermark setting accordingly.
+ $ref: /schemas/types.yaml#/definitions/flag
dmas:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml b/Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml
index 5d3fa412aabd..c033ac3f147d 100644
--- a/Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml
+++ b/Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml
@@ -75,13 +75,12 @@ patternProperties:
allwinner,rb:
description:
Contains the native Ready/Busy IDs.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 1
- maxItems: 2
- items:
- minimum: 0
- maximum: 1
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 2
+ items:
+ minimum: 0
+ maximum: 1
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml b/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
new file mode 100644
index 000000000000..db8f115a13ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/arasan,nand-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Arasan NAND Flash Controller with ONFI 3.1 support device tree bindings
+
+allOf:
+ - $ref: "nand-controller.yaml"
+
+maintainers:
+ - Naga Sureshkumar Relli <naga.sureshkumar.relli@xilinx.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - xlnx,zynqmp-nand-controller
+ - enum:
+ - arasan,nfc-v3p10
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Controller clock
+ - description: NAND bus clock
+
+ clock-names:
+ items:
+ - const: controller
+ - const: bus
+
+ interrupts:
+ maxItems: 1
+
+ "#address-cells": true
+ "#size-cells": true
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+additionalProperties: true
+
+examples:
+ - |
+ nfc: nand-controller@ff100000 {
+ compatible = "xlnx,zynqmp-nand-controller", "arasan,nfc-v3p10";
+ reg = <0x0 0xff100000 0x0 0x1000>;
+ clock-names = "controller", "bus";
+ clocks = <&clk200>, <&clk100>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 14 4>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
index 05651a654c66..44335a4f8bfb 100644
--- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
+++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
@@ -20,6 +20,8 @@ Required properties:
"brcm,brcmnand" and an appropriate version compatibility
string, like "brcm,brcmnand-v7.0"
Possible values:
+ brcm,brcmnand-v2.1
+ brcm,brcmnand-v2.2
brcm,brcmnand-v4.0
brcm,brcmnand-v5.0
brcm,brcmnand-v6.0
diff --git a/Documentation/devicetree/bindings/mtd/denali,nand.yaml b/Documentation/devicetree/bindings/mtd/denali,nand.yaml
index 46e6b6726bc0..c07b91592cbd 100644
--- a/Documentation/devicetree/bindings/mtd/denali,nand.yaml
+++ b/Documentation/devicetree/bindings/mtd/denali,nand.yaml
@@ -54,8 +54,8 @@ properties:
reg: register reset
oneOf:
- items:
- - const: nand
- - const: reg
+ - const: nand
+ - const: reg
- const: nand
- const: reg
diff --git a/Documentation/devicetree/bindings/mtd/ingenic,jz4780-nand.txt b/Documentation/devicetree/bindings/mtd/ingenic,jz4780-nand.txt
deleted file mode 100644
index c02259353327..000000000000
--- a/Documentation/devicetree/bindings/mtd/ingenic,jz4780-nand.txt
+++ /dev/null
@@ -1,92 +0,0 @@
-* Ingenic JZ4780 NAND/ECC
-
-This file documents the device tree bindings for NAND flash devices on the
-JZ4780. NAND devices are connected to the NEMC controller (described in
-memory-controllers/ingenic,jz4780-nemc.txt), and thus NAND device nodes must
-be children of the NEMC node.
-
-Required NAND controller device properties:
-- compatible: Should be one of:
- * ingenic,jz4740-nand
- * ingenic,jz4725b-nand
- * ingenic,jz4780-nand
-- reg: For each bank with a NAND chip attached, should specify a bank number,
- an offset of 0 and a size of 0x1000000 (i.e. the whole NEMC bank).
-
-Optional NAND controller device properties:
-- ecc-engine: To make use of the hardware ECC controller, this
- property must contain a phandle for the ECC controller node. The required
- properties for this node are described below. If this is not specified,
- software ECC will be used instead.
-
-Optional children nodes:
-- Individual NAND chips are children of the NAND controller node.
-
-Required children node properties:
-- reg: An integer ranging from 1 to 6 representing the CS line to use.
-
-Optional children node properties:
-- nand-ecc-step-size: ECC block size in bytes.
-- nand-ecc-strength: ECC strength (max number of correctable bits).
-- nand-ecc-mode: String, operation mode of the NAND ecc mode. "hw" by default
-- nand-on-flash-bbt: boolean to enable on flash bbt option, if not present false
-- rb-gpios: GPIO specifier for the busy pin.
-- wp-gpios: GPIO specifier for the write protect pin.
-
-Optional child node of NAND chip nodes:
-- partitions: see Documentation/devicetree/bindings/mtd/partition.txt
-
-Example:
-
-nemc: nemc@13410000 {
- ...
-
- nandc: nand-controller@1 {
- compatible = "ingenic,jz4780-nand";
- reg = <1 0 0x1000000>; /* Bank 1 */
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- ecc-engine = <&bch>;
-
- nand@1 {
- reg = <1>;
-
- nand-ecc-step-size = <1024>;
- nand-ecc-strength = <24>;
- nand-ecc-mode = "hw";
- nand-on-flash-bbt;
-
- rb-gpios = <&gpa 20 GPIO_ACTIVE_LOW>;
- wp-gpios = <&gpf 22 GPIO_ACTIVE_LOW>;
-
- partitions {
- #address-cells = <2>;
- #size-cells = <2>;
- ...
- }
- };
- };
-};
-
-The ECC controller is a separate SoC component used for error correction on
-NAND devices. The following is a description of the device properties for a
-ECC controller.
-
-Required ECC properties:
-- compatible: Should be one of:
- * ingenic,jz4740-ecc
- * ingenic,jz4725b-bch
- * ingenic,jz4780-bch
-- reg: Should specify the ECC controller registers location and length.
-- clocks: Clock for the ECC controller.
-
-Example:
-
-bch: bch@134d0000 {
- compatible = "ingenic,jz4780-bch";
- reg = <0x134d0000 0x10000>;
-
- clocks = <&cgu JZ4780_CLK_BCH>;
-};
diff --git a/Documentation/devicetree/bindings/mtd/ingenic,nand.yaml b/Documentation/devicetree/bindings/mtd/ingenic,nand.yaml
new file mode 100644
index 000000000000..8abb6d463cb6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/ingenic,nand.yaml
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/ingenic,nand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs NAND controller devicetree bindings
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+allOf:
+ - $ref: nand-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ingenic,jz4740-nand
+ - ingenic,jz4725b-nand
+ - ingenic,jz4780-nand
+
+ reg:
+ items:
+ - description: Bank number, offset and size of first attached NAND chip
+ - description: Bank number, offset and size of second attached NAND chip
+ - description: Bank number, offset and size of third attached NAND chip
+ - description: Bank number, offset and size of fourth attached NAND chip
+ minItems: 1
+
+ ecc-engine: true
+
+ partitions:
+ type: object
+ description:
+ Node containing description of fixed partitions.
+ See Documentation/devicetree/bindings/mtd/partition.txt
+
+patternProperties:
+ "^nand@[a-f0-9]$":
+ type: object
+ properties:
+ rb-gpios:
+ description: GPIO specifier for the busy pin.
+ maxItems: 1
+
+ wp-gpios:
+ description: GPIO specifier for the write-protect pin.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4780-cgu.h>
+ memory-controller@13410000 {
+ compatible = "ingenic,jz4780-nemc";
+ reg = <0x13410000 0x10000>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <1 0 0x1b000000 0x1000000>,
+ <2 0 0x1a000000 0x1000000>,
+ <3 0 0x19000000 0x1000000>,
+ <4 0 0x18000000 0x1000000>,
+ <5 0 0x17000000 0x1000000>,
+ <6 0 0x16000000 0x1000000>;
+
+ clocks = <&cgu JZ4780_CLK_NEMC>;
+
+ nand-controller@1 {
+ compatible = "ingenic,jz4780-nand";
+ reg = <1 0 0x1000000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ecc-engine = <&bch>;
+
+ ingenic,nemc-tAS = <10>;
+ ingenic,nemc-tAH = <5>;
+ ingenic,nemc-tBP = <10>;
+ ingenic,nemc-tAW = <15>;
+ ingenic,nemc-tSTRV = <100>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_nemc>;
+
+ nand@1 {
+ reg = <1>;
+
+ nand-ecc-step-size = <1024>;
+ nand-ecc-strength = <24>;
+ nand-ecc-mode = "hw";
+ nand-on-flash-bbt;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_nemc_cs1>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ partition@0 {
+ label = "u-boot-spl";
+ reg = <0x0 0x0 0x0 0x800000>;
+ };
+
+ partition@800000 {
+ label = "u-boot";
+ reg = <0x0 0x800000 0x0 0x200000>;
+ };
+
+ partition@a00000 {
+ label = "u-boot-env";
+ reg = <0x0 0xa00000 0x0 0x200000>;
+ };
+
+ partition@c00000 {
+ label = "boot";
+ reg = <0x0 0xc00000 0x0 0x4000000>;
+ };
+
+ partition@4c00000 {
+ label = "system";
+ reg = <0x0 0x4c00000 0x1 0xfb400000>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
index d261b7096c69..cde7c4d79efe 100644
--- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml
+++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
@@ -47,29 +47,26 @@ patternProperties:
Contains the native Ready/Busy IDs.
nand-ecc-mode:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/string
- - enum: [ none, soft, hw, hw_syndrome, hw_oob_first, on-die ]
description:
Desired ECC engine, either hardware (most of the time
embedded in the NAND controller) or software correction
(Linux will handle the calculations). soft_bch is deprecated
and should be replaced by soft and nand-ecc-algo.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [none, soft, hw, hw_syndrome, hw_oob_first, on-die]
nand-ecc-algo:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/string
- - enum: [ hamming, bch, rs ]
description:
Desired ECC algorithm.
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [hamming, bch, rs]
nand-bus-width:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 8, 16 ]
- - default: 8
description:
Bus width to the NAND chip
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [8, 16]
+ default: 8
nand-on-flash-bbt:
$ref: /schemas/types.yaml#/definitions/flag
@@ -83,18 +80,16 @@ patternProperties:
build a volatile BBT in RAM.
nand-ecc-strength:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 1
description:
Maximum number of bits that can be corrected per ECC step.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
nand-ecc-step-size:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 1
description:
Number of data bytes covered by a single ECC step.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
nand-ecc-maximize:
$ref: /schemas/types.yaml#/definitions/flag
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt
index afbbd870496d..4a39698221a2 100644
--- a/Documentation/devicetree/bindings/mtd/partition.txt
+++ b/Documentation/devicetree/bindings/mtd/partition.txt
@@ -61,6 +61,9 @@ Optional properties:
clobbered.
- lock : Do not unlock the partition at initialization time (not supported on
all devices)
+- slc-mode: This parameter, if present, allows one to emulate SLC mode on a
+ partition attached to an MLC NAND thus making this partition immune to
+ paired-pages corruptions
Examples:
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
index db36b4d86484..c7c9ad4e3f9f 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
@@ -19,8 +19,8 @@ properties:
- const: allwinner,sun8i-v3s-emac
- const: allwinner,sun50i-a64-emac
- items:
- - const: allwinner,sun50i-h6-emac
- - const: allwinner,sun50i-a64-emac
+ - const: allwinner,sun50i-h6-emac
+ - const: allwinner,sun50i-a64-emac
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/calxeda-xgmac.txt b/Documentation/devicetree/bindings/net/calxeda-xgmac.txt
deleted file mode 100644
index c8ae996bd8f2..000000000000
--- a/Documentation/devicetree/bindings/net/calxeda-xgmac.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-* Calxeda Highbank 10Gb XGMAC Ethernet
-
-Required properties:
-- compatible : Should be "calxeda,hb-xgmac"
-- reg : Address and length of the register set for the device
-- interrupts : Should contain 3 xgmac interrupts. The 1st is main interrupt.
- The 2nd is pwr mgt interrupt. The 3rd is low power state interrupt.
-
-Optional properties:
-- dma-coherent : Present if dma operations are coherent
-
-Example:
-
-ethernet@fff50000 {
- compatible = "calxeda,hb-xgmac";
- reg = <0xfff50000 0x1000>;
- interrupts = <0 77 4 0 78 4 0 79 4>;
-};
diff --git a/Documentation/devicetree/bindings/net/calxeda-xgmac.yaml b/Documentation/devicetree/bindings/net/calxeda-xgmac.yaml
new file mode 100644
index 000000000000..c3ca26666ede
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/calxeda-xgmac.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/calxeda-xgmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Calxeda Highbank 10Gb XGMAC Ethernet controller
+
+description: |
+ The Calxeda XGMAC Ethernet controllers are directly connected to the
+ internal machine "network fabric", which is set up, initialised and
+ managed by the firmware. So there are no PHY properties in this
+ binding. Switches in the fabric take care of routing and mapping the
+ traffic to external network ports.
+
+maintainers:
+ - Andre Przywara <andre.przywara@arm.com>
+
+properties:
+ compatible:
+ const: calxeda,hb-xgmac
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: |
+ Can point to at most 3 xgmac interrupts. The 1st one is the main
+ interrupt, the 2nd one is used for power management. The optional
+ 3rd one is the low power state interrupt.
+ minItems: 2
+ maxItems: 3
+
+ dma-coherent: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ ethernet@fff50000 {
+ compatible = "calxeda,hb-xgmac";
+ reg = <0xfff50000 0x1000>;
+ interrupts = <0 77 4>, <0 78 4>, <0 79 4>;
+ };
diff --git a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
index cccf8202c8f7..798fa5fb7bb2 100644
--- a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
+++ b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
@@ -9,7 +9,7 @@ title: Bosch MCAN controller Bindings
description: Bosch MCAN controller for CAN bus
maintainers:
- - Sriram Dash <sriram.dash@samsung.com>
+ - Sriram Dash <sriram.dash@samsung.com>
properties:
compatible:
@@ -51,61 +51,60 @@ properties:
bosch,mram-cfg:
description: |
- Message RAM configuration data.
- Multiple M_CAN instances can share the same Message RAM
- and each element(e.g Rx FIFO or Tx Buffer and etc) number
- in Message RAM is also configurable, so this property is
- telling driver how the shared or private Message RAM are
- used by this M_CAN controller.
-
- The format should be as follows:
- <offset sidf_elems xidf_elems rxf0_elems rxf1_elems rxb_elems txe_elems txb_elems>
- The 'offset' is an address offset of the Message RAM where
- the following elements start from. This is usually set to
- 0x0 if you're using a private Message RAM. The remain cells
- are used to specify how many elements are used for each FIFO/Buffer.
-
- M_CAN includes the following elements according to user manual:
- 11-bit Filter 0-128 elements / 0-128 words
- 29-bit Filter 0-64 elements / 0-128 words
- Rx FIFO 0 0-64 elements / 0-1152 words
- Rx FIFO 1 0-64 elements / 0-1152 words
- Rx Buffers 0-64 elements / 0-1152 words
- Tx Event FIFO 0-32 elements / 0-64 words
- Tx Buffers 0-32 elements / 0-576 words
-
- Please refer to 2.4.1 Message RAM Configuration in Bosch
- M_CAN user manual for details.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/int32-array
- - items:
- items:
- - description: The 'offset' is an address offset of the Message RAM
- where the following elements start from. This is usually
- set to 0x0 if you're using a private Message RAM.
- default: 0
- - description: 11-bit Filter 0-128 elements / 0-128 words
- minimum: 0
- maximum: 128
- - description: 29-bit Filter 0-64 elements / 0-128 words
- minimum: 0
- maximum: 64
- - description: Rx FIFO 0 0-64 elements / 0-1152 words
- minimum: 0
- maximum: 64
- - description: Rx FIFO 1 0-64 elements / 0-1152 words
- minimum: 0
- maximum: 64
- - description: Rx Buffers 0-64 elements / 0-1152 words
- minimum: 0
- maximum: 64
- - description: Tx Event FIFO 0-32 elements / 0-64 words
- minimum: 0
- maximum: 32
- - description: Tx Buffers 0-32 elements / 0-576 words
- minimum: 0
- maximum: 32
- maxItems: 1
+ Message RAM configuration data.
+ Multiple M_CAN instances can share the same Message RAM
+ and each element(e.g Rx FIFO or Tx Buffer and etc) number
+ in Message RAM is also configurable, so this property is
+ telling driver how the shared or private Message RAM are
+ used by this M_CAN controller.
+
+ The format should be as follows:
+ <offset sidf_elems xidf_elems rxf0_elems rxf1_elems rxb_elems txe_elems txb_elems>
+ The 'offset' is an address offset of the Message RAM where
+ the following elements start from. This is usually set to
+ 0x0 if you're using a private Message RAM. The remain cells
+ are used to specify how many elements are used for each FIFO/Buffer.
+
+ M_CAN includes the following elements according to user manual:
+ 11-bit Filter 0-128 elements / 0-128 words
+ 29-bit Filter 0-64 elements / 0-128 words
+ Rx FIFO 0 0-64 elements / 0-1152 words
+ Rx FIFO 1 0-64 elements / 0-1152 words
+ Rx Buffers 0-64 elements / 0-1152 words
+ Tx Event FIFO 0-32 elements / 0-64 words
+ Tx Buffers 0-32 elements / 0-576 words
+
+ Please refer to 2.4.1 Message RAM Configuration in Bosch
+ M_CAN user manual for details.
+ $ref: /schemas/types.yaml#/definitions/int32-array
+ items:
+ items:
+ - description: The 'offset' is an address offset of the Message RAM where
+ the following elements start from. This is usually set to 0x0 if
+ you're using a private Message RAM.
+ default: 0
+ - description: 11-bit Filter 0-128 elements / 0-128 words
+ minimum: 0
+ maximum: 128
+ - description: 29-bit Filter 0-64 elements / 0-128 words
+ minimum: 0
+ maximum: 64
+ - description: Rx FIFO 0 0-64 elements / 0-1152 words
+ minimum: 0
+ maximum: 64
+ - description: Rx FIFO 1 0-64 elements / 0-1152 words
+ minimum: 0
+ maximum: 64
+ - description: Rx Buffers 0-64 elements / 0-1152 words
+ minimum: 0
+ maximum: 64
+ - description: Tx Event FIFO 0-32 elements / 0-64 words
+ minimum: 0
+ maximum: 32
+ - description: Tx Buffers 0-32 elements / 0-576 words
+ minimum: 0
+ maximum: 32
+ maxItems: 1
can-transceiver:
$ref: can-transceiver.yaml#
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index ac471b60ed6a..1c4474036d46 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -14,25 +14,23 @@ properties:
pattern: "^ethernet(@.*)?$"
local-mac-address:
- allOf:
- - $ref: /schemas/types.yaml#definitions/uint8-array
- - items:
- - minItems: 6
- maxItems: 6
description:
Specifies the MAC address that was assigned to the network device.
+ $ref: /schemas/types.yaml#definitions/uint8-array
+ items:
+ - minItems: 6
+ maxItems: 6
mac-address:
- allOf:
- - $ref: /schemas/types.yaml#definitions/uint8-array
- - items:
- - minItems: 6
- maxItems: 6
description:
Specifies the MAC address that was last used by the boot
program; should be used in cases where the MAC address assigned
to the device by the boot program is different from the
local-mac-address property.
+ $ref: /schemas/types.yaml#definitions/uint8-array
+ items:
+ - minItems: 6
+ maxItems: 6
max-frame-size:
$ref: /schemas/types.yaml#definitions/uint32
@@ -133,15 +131,14 @@ properties:
is used for components that can have configurable fifo sizes.
managed:
- allOf:
- - $ref: /schemas/types.yaml#definitions/string
- - default: auto
- enum:
- - auto
- - in-band-status
description:
Specifies the PHY management type. If auto is set and fixed-link
is not specified, it uses MDIO for management.
+ $ref: /schemas/types.yaml#definitions/string
+ default: auto
+ enum:
+ - auto
+ - in-band-status
fixed-link:
allOf:
@@ -183,11 +180,10 @@ properties:
then:
properties:
speed:
- allOf:
- - $ref: /schemas/types.yaml#definitions/uint32
- - enum: [10, 100, 1000]
description:
Link speed.
+ $ref: /schemas/types.yaml#definitions/uint32
+ enum: [10, 100, 1000]
full-duplex:
$ref: /schemas/types.yaml#definitions/flag
diff --git a/Documentation/devicetree/bindings/net/qca,ar803x.yaml b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
index 5a6c9d20c0ba..1788884b8c28 100644
--- a/Documentation/devicetree/bindings/net/qca,ar803x.yaml
+++ b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
@@ -20,15 +20,13 @@ allOf:
properties:
qca,clk-out-frequency:
description: Clock output frequency in Hertz.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 25000000, 50000000, 62500000, 125000000 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [25000000, 50000000, 62500000, 125000000]
qca,clk-out-strength:
description: Clock output driver strength.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 0, 1, 2 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2]
qca,keep-pll-enabled:
description: |
@@ -52,17 +50,14 @@ properties:
type: object
description:
Initial data for the VDDIO regulator. Set this to 1.5V or 1.8V.
- allOf:
- - $ref: /schemas/regulator/regulator.yaml
+ $ref: /schemas/regulator/regulator.yaml
vddh-regulator:
type: object
description:
Dummy subnode to model the external connection of the PHY VDDH
regulator to VDDIO.
- allOf:
- - $ref: /schemas/regulator/regulator.yaml
-
+ $ref: /schemas/regulator/regulator.yaml
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
index 7b749fc04c32..a3561276e609 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipa.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -93,16 +93,14 @@ properties:
- const: config
qcom,smem-states:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/phandle-array
+ $ref: /schemas/types.yaml#/definitions/phandle-array
description: State bits used in by the AP to signal the modem.
items:
- description: Whether the "ipa-clock-enabled" state bit is valid
- description: Whether the IPA clock is enabled (if valid)
qcom,smem-state-names:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/string-array
+ $ref: /schemas/types.yaml#/definitions/string-array
description: The names of the state bits used for SMP2P output
items:
- const: ipa-clock-enabled-valid
@@ -172,9 +170,9 @@ examples:
modem-remoteproc = <&mss_pil>;
iommus = <&apps_smmu 0x720 0x3>;
- reg = <0 0x1e40000 0 0x7000>,
- <0 0x1e47000 0 0x2000>,
- <0 0x1e04000 0 0x2c000>;
+ reg = <0x1e40000 0x7000>,
+ <0x1e47000 0x2000>,
+ <0x1e04000 0x2c000>;
reg-names = "ipa-reg",
"ipa-shared",
"gsi";
diff --git a/Documentation/devicetree/bindings/net/renesas,ether.yaml b/Documentation/devicetree/bindings/net/renesas,ether.yaml
index 7f84df9790e2..08678af5ed93 100644
--- a/Documentation/devicetree/bindings/net/renesas,ether.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,ether.yaml
@@ -29,8 +29,9 @@ properties:
- renesas,rcar-gen1-ether # a generic R-Car Gen1 device
- items:
- enum:
- - renesas,ether-r8a7745 # device is a part of R8A7745 SoC
+ - renesas,ether-r8a7742 # device is a part of R8A7742 SoC
- renesas,ether-r8a7743 # device is a part of R8A7743 SoC
+ - renesas,ether-r8a7745 # device is a part of R8A7745 SoC
- renesas,ether-r8a7790 # device is a part of R8A7790 SoC
- renesas,ether-r8a7791 # device is a part of R8A7791 SoC
- renesas,ether-r8a7793 # device is a part of R8A7793 SoC
@@ -40,8 +41,8 @@ properties:
reg:
items:
- - description: E-DMAC/feLic registers
- - description: TSU registers
+ - description: E-DMAC/feLic registers
+ - description: TSU registers
minItems: 1
interrupts:
@@ -92,7 +93,7 @@ examples:
ethernet@ee700000 {
compatible = "renesas,ether-r8a7790", "renesas,rcar-gen2-ether";
- reg = <0 0xee700000 0 0x400>;
+ reg = <0xee700000 0x400>;
interrupt-parent = <&gic>;
interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 87dad2dd8ca0..032b76f14f4f 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -5,6 +5,7 @@ interface contains.
Required properties:
- compatible: Must contain one or more of the following:
+ - "renesas,etheravb-r8a7742" for the R8A7742 SoC.
- "renesas,etheravb-r8a7743" for the R8A7743 SoC.
- "renesas,etheravb-r8a7744" for the R8A7744 SoC.
- "renesas,etheravb-r8a7745" for the R8A7745 SoC.
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index e08cd4c4d568..30a1efd26626 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -27,6 +27,7 @@ select:
- snps,dwmac-3.710
- snps,dwmac-4.00
- snps,dwmac-4.10a
+ - snps,dwmac-4.20a
- snps,dwxgmac
- snps,dwxgmac-2.10
@@ -62,6 +63,7 @@ properties:
- snps,dwmac-3.710
- snps,dwmac-4.00
- snps,dwmac-4.10a
+ - snps,dwmac-4.20a
- snps,dwxgmac
- snps,dwxgmac-2.10
@@ -87,7 +89,8 @@ properties:
clocks:
minItems: 1
- maxItems: 3
+ maxItems: 5
+ additionalItems: true
items:
- description: GMAC main clock
- description: Peripheral registers interface clock
@@ -97,6 +100,8 @@ properties:
clock will be used and this is fine on some platforms.
clock-names:
+ minItems: 1
+ maxItems: 5
additionalItems: true
contains:
enum:
@@ -199,14 +204,13 @@ properties:
snps,reset-delays-us:
deprecated: true
- allOf:
- - $ref: /schemas/types.yaml#definitions/uint32-array
- - minItems: 3
- maxItems: 3
description:
Triplet of delays. The 1st cell is reset pre-delay in micro
seconds. The 2nd cell is reset pulse in micro seconds. The 3rd
cell is reset post-delay in micro seconds.
+ $ref: /schemas/types.yaml#definitions/uint32-array
+ minItems: 3
+ maxItems: 3
snps,aal:
$ref: /schemas/types.yaml#definitions/flag
@@ -301,27 +305,24 @@ allOf:
then:
properties:
snps,pbl:
- allOf:
- - $ref: /schemas/types.yaml#definitions/uint32
- - enum: [2, 4, 8]
description:
Programmable Burst Length (tx and rx)
+ $ref: /schemas/types.yaml#definitions/uint32
+ enum: [2, 4, 8]
snps,txpbl:
- allOf:
- - $ref: /schemas/types.yaml#definitions/uint32
- - enum: [2, 4, 8]
description:
Tx Programmable Burst Length. If set, DMA tx will use this
value rather than snps,pbl.
+ $ref: /schemas/types.yaml#definitions/uint32
+ enum: [2, 4, 8]
snps,rxpbl:
- allOf:
- - $ref: /schemas/types.yaml#definitions/uint32
- - enum: [2, 4, 8]
description:
Rx Programmable Burst Length. If set, DMA rx will use this
value rather than snps,pbl.
+ $ref: /schemas/types.yaml#definitions/uint32
+ enum: [2, 4, 8]
snps,no-pbl-x8:
$ref: /schemas/types.yaml#definitions/flag
@@ -342,6 +343,7 @@ allOf:
- allwinner,sun50i-a64-emac
- snps,dwmac-4.00
- snps,dwmac-4.10a
+ - snps,dwmac-4.20a
- snps,dwxgmac
- snps,dwxgmac-2.10
- st,spear600-gmac
diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.txt b/Documentation/devicetree/bindings/net/stm32-dwmac.txt
deleted file mode 100644
index a90eef11dc46..000000000000
--- a/Documentation/devicetree/bindings/net/stm32-dwmac.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-STMicroelectronics STM32 / MCU DWMAC glue layer controller
-
-This file documents platform glue layer for stmmac.
-Please see stmmac.txt for the other unchanged properties.
-
-The device node has following properties.
-
-Required properties:
-- compatible: For MCU family should be "st,stm32-dwmac" to select glue, and
- "snps,dwmac-3.50a" to select IP version.
- For MPU family should be "st,stm32mp1-dwmac" to select
- glue, and "snps,dwmac-4.20a" to select IP version.
-- clocks: Must contain a phandle for each entry in clock-names.
-- clock-names: Should be "stmmaceth" for the host clock.
- Should be "mac-clk-tx" for the MAC TX clock.
- Should be "mac-clk-rx" for the MAC RX clock.
- For MPU family need to add also "ethstp" for power mode clock
-- interrupt-names: Should contain a list of interrupt names corresponding to
- the interrupts in the interrupts property, if available.
- Should be "macirq" for the main MAC IRQ
- Should be "eth_wake_irq" for the IT which wake up system
-- st,syscon : Should be phandle/offset pair. The phandle to the syscon node which
- encompases the glue register, and the offset of the control register.
-
-Optional properties:
-- clock-names: For MPU family "eth-ck" for PHY without quartz
-- st,eth-clk-sel (boolean) : set this property in RGMII PHY when you want to select RCC clock instead of ETH_CLK125.
-- st,eth-ref-clk-sel (boolean) : set this property in RMII mode when you have PHY without crystal 50MHz and want to select RCC clock instead of ETH_REF_CLK.
-
-Example:
-
- ethernet@40028000 {
- compatible = "st,stm32-dwmac", "snps,dwmac-3.50a";
- reg = <0x40028000 0x8000>;
- reg-names = "stmmaceth";
- interrupts = <0 61 0>, <0 62 0>;
- interrupt-names = "macirq", "eth_wake_irq";
- clock-names = "stmmaceth", "mac-clk-tx", "mac-clk-rx";
- clocks = <&rcc 0 25>, <&rcc 0 26>, <&rcc 0 27>;
- st,syscon = <&syscfg 0x4>;
- snps,pbl = <8>;
- snps,mixed-burst;
- dma-ranges;
- };
diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
new file mode 100644
index 000000000000..fafa34cebdb1
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
@@ -0,0 +1,148 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/net/stm32-dwmac.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: STMicroelectronics STM32 / MCU DWMAC glue layer controller
+
+maintainers:
+ - Alexandre Torgue <alexandre.torgue@st.com>
+ - Christophe Roullier <christophe.roullier@st.com>
+
+description:
+ This file documents platform glue layer for stmmac.
+
+# We need a select here so we don't match all nodes with 'snps,dwmac'
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - st,stm32-dwmac
+ - st,stm32mp1-dwmac
+ required:
+ - compatible
+
+allOf:
+ - $ref: "snps,dwmac.yaml#"
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - st,stm32mp1-dwmac
+ - const: snps,dwmac-4.20a
+ - items:
+ - enum:
+ - st,stm32-dwmac
+ - const: snps,dwmac-4.10a
+ - items:
+ - enum:
+ - st,stm32-dwmac
+ - const: snps,dwmac-3.50a
+
+ clocks:
+ minItems: 3
+ maxItems: 5
+ items:
+ - description: GMAC main clock
+ - description: MAC TX clock
+ - description: MAC RX clock
+ - description: For MPU family, used for power mode
+ - description: For MPU family, used for PHY without quartz
+
+ clock-names:
+ minItems: 3
+ maxItems: 5
+ contains:
+ enum:
+ - stmmaceth
+ - mac-clk-tx
+ - mac-clk-rx
+ - ethstp
+ - eth-ck
+
+ st,syscon:
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ description:
+ Should be phandle/offset pair. The phandle to the syscon node which
+ encompases the glue register, and the offset of the control register
+
+ st,eth-clk-sel:
+ description:
+ set this property in RGMII PHY when you want to select RCC clock instead of ETH_CLK125.
+ type: boolean
+
+ st,eth-ref-clk-sel:
+ description:
+ set this property in RMII mode when you have PHY without crystal 50MHz and want to
+ select RCC clock instead of ETH_REF_CLK.
+ type: boolean
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - st,syscon
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/stm32mp1-clks.h>
+ #include <dt-bindings/reset/stm32mp1-resets.h>
+ #include <dt-bindings/mfd/stm32h7-rcc.h>
+ //Example 1
+ ethernet0: ethernet@5800a000 {
+ compatible = "st,stm32mp1-dwmac", "snps,dwmac-4.20a";
+ reg = <0x5800a000 0x2000>;
+ reg-names = "stmmaceth";
+ interrupts = <&intc GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clock-names = "stmmaceth",
+ "mac-clk-tx",
+ "mac-clk-rx",
+ "ethstp",
+ "eth-ck";
+ clocks = <&rcc ETHMAC>,
+ <&rcc ETHTX>,
+ <&rcc ETHRX>,
+ <&rcc ETHSTP>,
+ <&rcc ETHCK_K>;
+ st,syscon = <&syscfg 0x4>;
+ snps,pbl = <2>;
+ snps,axi-config = <&stmmac_axi_config_0>;
+ snps,tso;
+ phy-mode = "rgmii";
+ };
+
+ //Example 2 (MCU example)
+ ethernet1: ethernet@40028000 {
+ compatible = "st,stm32-dwmac", "snps,dwmac-3.50a";
+ reg = <0x40028000 0x8000>;
+ reg-names = "stmmaceth";
+ interrupts = <0 61 0>, <0 62 0>;
+ interrupt-names = "macirq", "eth_wake_irq";
+ clock-names = "stmmaceth", "mac-clk-tx", "mac-clk-rx";
+ clocks = <&rcc 0 25>, <&rcc 0 26>, <&rcc 0 27>;
+ st,syscon = <&syscfg 0x4>;
+ snps,pbl = <8>;
+ snps,mixed-burst;
+ phy-mode = "mii";
+ };
+
+ //Example 3
+ ethernet2: ethernet@40027000 {
+ compatible = "st,stm32-dwmac", "snps,dwmac-4.10a";
+ reg = <0x40028000 0x8000>;
+ reg-names = "stmmaceth";
+ interrupts = <61>;
+ interrupt-names = "macirq";
+ clock-names = "stmmaceth", "mac-clk-tx", "mac-clk-rx";
+ clocks = <&rcc 62>, <&rcc 61>, <&rcc 60>;
+ st,syscon = <&syscfg 0x4>;
+ snps,pbl = <8>;
+ phy-mode = "mii";
+ };
diff --git a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
index 976f139bb66e..3ea0e1290dbb 100644
--- a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
+++ b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
@@ -23,14 +23,14 @@ properties:
oneOf:
- const: ti,cpsw-switch
- items:
- - const: ti,am335x-cpsw-switch
- - const: ti,cpsw-switch
+ - const: ti,am335x-cpsw-switch
+ - const: ti,cpsw-switch
- items:
- - const: ti,am4372-cpsw-switch
- - const: ti,cpsw-switch
+ - const: ti,am4372-cpsw-switch
+ - const: ti,cpsw-switch
- items:
- - const: ti,dra7-cpsw-switch
- - const: ti,cpsw-switch
+ - const: ti,dra7-cpsw-switch
+ - const: ti,cpsw-switch
reg:
maxItems: 1
@@ -105,8 +105,7 @@ properties:
description: label associated with this port
ti,dual-emac-pvid:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 1024
description:
@@ -150,10 +149,9 @@ properties:
patternProperties:
"^mdio@":
type: object
- allOf:
- - $ref: "ti,davinci-mdio.yaml#"
description:
CPSW MDIO bus.
+ $ref: "ti,davinci-mdio.yaml#"
required:
diff --git a/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml b/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
index 242ac4935a4b..d454c1fab930 100644
--- a/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
@@ -18,33 +18,31 @@ allOf:
properties:
compatible:
oneOf:
- - const: ti,davinci_mdio
- - items:
- - const: ti,keystone_mdio
- - const: ti,davinci_mdio
- - items:
- - const: ti,cpsw-mdio
- - const: ti,davinci_mdio
- - items:
- - const: ti,am4372-mdio
- - const: ti,cpsw-mdio
- - const: ti,davinci_mdio
+ - const: ti,davinci_mdio
+ - items:
+ - const: ti,keystone_mdio
+ - const: ti,davinci_mdio
+ - items:
+ - const: ti,cpsw-mdio
+ - const: ti,davinci_mdio
+ - items:
+ - const: ti,am4372-mdio
+ - const: ti,cpsw-mdio
+ - const: ti,davinci_mdio
reg:
maxItems: 1
bus_freq:
- maximum: 2500000
- description:
- MDIO Bus frequency
+ maximum: 2500000
+ description: MDIO Bus frequency
ti,hwmods:
description: TI hwmod name
deprecated: true
- allOf:
- - $ref: /schemas/types.yaml#/definitions/string-array
- - items:
- const: davinci_mdio
+ $ref: /schemas/types.yaml#/definitions/string-array
+ items:
+ const: davinci_mdio
if:
properties:
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
index c87395f360a6..71d9e6c1c72e 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
@@ -103,8 +103,7 @@ properties:
type: object
description: CPSW2G NUSS external ports
- allOf:
- - $ref: ethernet-controller.yaml#
+ $ref: ethernet-controller.yaml#
properties:
reg:
@@ -139,8 +138,8 @@ properties:
patternProperties:
"^mdio@[0-9a-f]+$":
type: object
- allOf:
- - $ref: "ti,davinci-mdio.yaml#"
+ $ref: "ti,davinci-mdio.yaml#"
+
description:
CPSW MDIO bus.
@@ -174,37 +173,41 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
- mcu_cpsw: ethernet@46000000 {
- compatible = "ti,am654-cpsw-nuss";
+ bus {
#address-cells = <2>;
#size-cells = <2>;
- reg = <0x0 0x46000000 0x0 0x200000>;
- reg-names = "cpsw_nuss";
- ranges = <0x0 0x0 0x46000000 0x0 0x200000>;
- dma-coherent;
- clocks = <&k3_clks 5 10>;
- clock-names = "fck";
- power-domains = <&k3_pds 5 TI_SCI_PD_EXCLUSIVE>;
- pinctrl-names = "default";
- pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
-
- dmas = <&mcu_udmap 0xf000>,
- <&mcu_udmap 0xf001>,
- <&mcu_udmap 0xf002>,
- <&mcu_udmap 0xf003>,
- <&mcu_udmap 0xf004>,
- <&mcu_udmap 0xf005>,
- <&mcu_udmap 0xf006>,
- <&mcu_udmap 0xf007>,
- <&mcu_udmap 0x7000>;
- dma-names = "tx0", "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7",
- "rx";
-
- ethernet-ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpsw_port1: port@1 {
+
+ mcu_cpsw: ethernet@46000000 {
+ compatible = "ti,am654-cpsw-nuss";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ reg = <0x0 0x46000000 0x0 0x200000>;
+ reg-names = "cpsw_nuss";
+ ranges = <0x0 0x0 0x0 0x46000000 0x0 0x200000>;
+ dma-coherent;
+ clocks = <&k3_clks 5 10>;
+ clock-names = "fck";
+ power-domains = <&k3_pds 5 TI_SCI_PD_EXCLUSIVE>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
+
+ dmas = <&mcu_udmap 0xf000>,
+ <&mcu_udmap 0xf001>,
+ <&mcu_udmap 0xf002>,
+ <&mcu_udmap 0xf003>,
+ <&mcu_udmap 0xf004>,
+ <&mcu_udmap 0xf005>,
+ <&mcu_udmap 0xf006>,
+ <&mcu_udmap 0xf007>,
+ <&mcu_udmap 0x7000>;
+ dma-names = "tx0", "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7",
+ "rx";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw_port1: port@1 {
reg = <1>;
ti,mac-only;
label = "port1";
@@ -213,23 +216,24 @@ examples:
phy-mode = "rgmii-rxid";
phy-handle = <&phy0>;
- };
- };
-
- davinci_mdio: mdio@f00 {
- compatible = "ti,cpsw-mdio","ti,davinci_mdio";
- reg = <0x0 0xf00 0x0 0x100>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&k3_clks 5 10>;
- clock-names = "fck";
- bus_freq = <1000000>;
-
- phy0: ethernet-phy@0 {
+ };
+ };
+
+ davinci_mdio: mdio@f00 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ reg = <0x0 0xf00 0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&k3_clks 5 10>;
+ clock-names = "fck";
+ bus_freq = <1000000>;
+
+ phy0: ethernet-phy@0 {
reg = <0>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
- };
+ };
+ };
};
cpts@3d000 {
diff --git a/Documentation/devicetree/bindings/nvmem/imx-iim.txt b/Documentation/devicetree/bindings/nvmem/imx-iim.txt
deleted file mode 100644
index 1978c5bcd96d..000000000000
--- a/Documentation/devicetree/bindings/nvmem/imx-iim.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Freescale i.MX IC Identification Module (IIM) device tree bindings
-
-This binding represents the IC Identification Module (IIM) found on
-i.MX25, i.MX27, i.MX31, i.MX35, i.MX51 and i.MX53 SoCs.
-
-Required properties:
-- compatible: should be one of
- "fsl,imx25-iim", "fsl,imx27-iim",
- "fsl,imx31-iim", "fsl,imx35-iim",
- "fsl,imx51-iim", "fsl,imx53-iim",
-- reg: Should contain the register base and length.
-- interrupts: Should contain the interrupt for the IIM
-- clocks: Should contain a phandle pointing to the gated peripheral clock.
-
-Example:
-
- iim: iim@63f98000 {
- compatible = "fsl,imx53-iim", "fsl,imx27-iim";
- reg = <0x63f98000 0x4000>;
- interrupts = <69>;
- clocks = <&clks IMX5_CLK_IIM_GATE>;
- };
diff --git a/Documentation/devicetree/bindings/nvmem/imx-iim.yaml b/Documentation/devicetree/bindings/nvmem/imx-iim.yaml
new file mode 100644
index 000000000000..9cc43e7a4b38
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/imx-iim.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/imx-iim.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX IC Identification Module (IIM) device tree bindings
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+description: |
+ This binding represents the IC Identification Module (IIM) found on
+ i.MX25, i.MX27, i.MX31, i.MX35, i.MX51 and i.MX53 SoCs.
+
+allOf:
+ - $ref: "nvmem.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx25-iim
+ - fsl,imx27-iim
+ - fsl,imx31-iim
+ - fsl,imx35-iim
+ - fsl,imx51-iim
+ - fsl,imx53-iim
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx5-clock.h>
+
+ iim: efuse@63f98000 {
+ compatible = "fsl,imx53-iim";
+ reg = <0x63f98000 0x4000>;
+ interrupts = <69>;
+ clocks = <&clks IMX5_CLK_IIM_GATE>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
deleted file mode 100644
index 6e346d5cddcf..000000000000
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
-
-This binding represents the on-chip eFuse OTP controller found on
-i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ, i.MX6SLL,
-i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM, i.MX8MN and i.MX8MP SoCs.
-
-Required properties:
-- compatible: should be one of
- "fsl,imx6q-ocotp" (i.MX6Q/D/DL/S),
- "fsl,imx6sl-ocotp" (i.MX6SL), or
- "fsl,imx6sx-ocotp" (i.MX6SX),
- "fsl,imx6ul-ocotp" (i.MX6UL),
- "fsl,imx6ull-ocotp" (i.MX6ULL/ULZ),
- "fsl,imx7d-ocotp" (i.MX7D/S),
- "fsl,imx6sll-ocotp" (i.MX6SLL),
- "fsl,imx7ulp-ocotp" (i.MX7ULP),
- "fsl,imx8mq-ocotp" (i.MX8MQ),
- "fsl,imx8mm-ocotp" (i.MX8MM),
- "fsl,imx8mn-ocotp" (i.MX8MN),
- "fsl,imx8mp-ocotp" (i.MX8MP),
- followed by "syscon".
-- #address-cells : Should be 1
-- #size-cells : Should be 1
-- reg: Should contain the register base and length.
-- clocks: Should contain a phandle pointing to the gated peripheral clock.
-
-Optional properties:
-- read-only: disable write access
-
-Optional Child nodes:
-
-- Data cells of ocotp:
- Detailed bindings are described in bindings/nvmem/nvmem.txt
-
-Example:
- ocotp: ocotp@21bc000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,imx6sx-ocotp", "syscon";
- reg = <0x021bc000 0x4000>;
- clocks = <&clks IMX6SX_CLK_OCOTP>;
-
- tempmon_calib: calib@38 {
- reg = <0x38 4>;
- };
-
- tempmon_temp_grade: temp-grade@20 {
- reg = <0x20 4>;
- };
- };
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
new file mode 100644
index 000000000000..fe9c7df78ea1
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.yaml
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/imx-ocotp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+description: |
+ This binding represents the on-chip eFuse OTP controller found on
+ i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ, i.MX6SLL,
+ i.MX7D/S, i.MX7ULP, i.MX8MQ, i.MX8MM, i.MX8MN and i.MX8MP SoCs.
+
+allOf:
+ - $ref: "nvmem.yaml#"
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - fsl,imx6q-ocotp
+ - fsl,imx6sl-ocotp
+ - fsl,imx6sx-ocotp
+ - fsl,imx6ul-ocotp
+ - fsl,imx6ull-ocotp
+ - fsl,imx7d-ocotp
+ - fsl,imx6sll-ocotp
+ - fsl,imx7ulp-ocotp
+ - fsl,imx8mq-ocotp
+ - fsl,imx8mm-ocotp
+ - fsl,imx8mn-ocotp
+ - fsl,imx8mp-ocotp
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - "#address-cells"
+ - "#size-cells"
+ - compatible
+ - reg
+
+patternProperties:
+ "^.*@[0-9a-f]+$":
+ type: object
+
+ properties:
+ reg:
+ maxItems: 1
+ description:
+ Offset and size in bytes within the storage device.
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx6sx-clock.h>
+
+ ocotp: efuse@21bc000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,imx6sx-ocotp", "syscon";
+ reg = <0x021bc000 0x4000>;
+ clocks = <&clks IMX6SX_CLK_OCOTP>;
+
+ cpu_speed_grade: speed-grade@10 {
+ reg = <0x10 4>;
+ };
+
+ tempmon_calib: calib@38 {
+ reg = <0x38 4>;
+ };
+
+ tempmon_temp_grade: temp-grade@20 {
+ reg = <0x20 4>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt
deleted file mode 100644
index 372c72fd64dc..000000000000
--- a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-On-Chip OTP Memory for Freescale i.MX23/i.MX28
-
-Required properties :
-- compatible :
- - "fsl,imx23-ocotp" for i.MX23
- - "fsl,imx28-ocotp" for i.MX28
-- #address-cells : Should be 1
-- #size-cells : Should be 1
-- reg : Address and length of OTP controller registers
-- clocks : Should contain a reference to the hbus clock
-
-= Data cells =
-Are child nodes of mxs-ocotp, bindings of which as described in
-bindings/nvmem/nvmem.txt
-
-Example for i.MX28:
-
- ocotp: ocotp@8002c000 {
- compatible = "fsl,imx28-ocotp", "fsl,ocotp";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x8002c000 0x2000>;
- clocks = <&clks 25>;
- };
diff --git a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
new file mode 100644
index 000000000000..ff317fd7c15b
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/mxs-ocotp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: On-Chip OTP Memory for Freescale i.MX23/i.MX28
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+allOf:
+ - $ref: "nvmem.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx23-ocotp
+ - fsl,imx28-ocotp
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ ocotp: efuse@8002c000 {
+ compatible = "fsl,imx28-ocotp";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x8002c000 0x2000>;
+ clocks = <&clks 25>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
index 65980224d550..b459f9dba6c9 100644
--- a/Documentation/devicetree/bindings/nvmem/nvmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
@@ -67,8 +67,6 @@ patternProperties:
required:
- reg
- additionalProperties: false
-
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
deleted file mode 100644
index 265bdb7dc8aa..000000000000
--- a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-= Rockchip eFuse device tree bindings =
-
-Required properties:
-- compatible: Should be one of the following.
- - "rockchip,rk3066a-efuse" - for RK3066a SoCs.
- - "rockchip,rk3188-efuse" - for RK3188 SoCs.
- - "rockchip,rk3228-efuse" - for RK3228 SoCs.
- - "rockchip,rk3288-efuse" - for RK3288 SoCs.
- - "rockchip,rk3328-efuse" - for RK3328 SoCs.
- - "rockchip,rk3368-efuse" - for RK3368 SoCs.
- - "rockchip,rk3399-efuse" - for RK3399 SoCs.
-- reg: Should contain the registers location and exact eFuse size
-- clocks: Should be the clock id of eFuse
-- clock-names: Should be "pclk_efuse"
-
-Optional properties:
-- rockchip,efuse-size: Should be exact eFuse size in byte, the eFuse
- size in property <reg> will be invalid if define this property.
-
-Deprecated properties:
-- compatible: "rockchip,rockchip-efuse"
- Old efuse compatible value compatible to rk3066a, rk3188 and rk3288
- efuses
-
-= Data cells =
-Are child nodes of eFuse, bindings of which as described in
-bindings/nvmem/nvmem.txt
-
-Example:
-
- efuse: efuse@ffb40000 {
- compatible = "rockchip,rk3288-efuse";
- reg = <0xffb40000 0x20>;
- #address-cells = <1>;
- #size-cells = <1>;
- clocks = <&cru PCLK_EFUSE256>;
- clock-names = "pclk_efuse";
-
- /* Data cells */
- cpu_leakage: cpu_leakage {
- reg = <0x17 0x1>;
- };
- };
-
-= Data consumers =
-Are device nodes which consume nvmem data cells.
-
-Example:
-
- cpu_leakage {
- ...
- nvmem-cells = <&cpu_leakage>;
- nvmem-cell-names = "cpu_leakage";
- };
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.yaml b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.yaml
new file mode 100644
index 000000000000..3ae00b0b23bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/rockchip-efuse.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip eFuse device tree bindings
+
+maintainers:
+ - Heiko Stuebner <heiko@sntech.de>
+
+allOf:
+ - $ref: "nvmem.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3066a-efuse
+ - rockchip,rk3188-efuse
+ - rockchip,rk3228-efuse
+ - rockchip,rk3288-efuse
+ - rockchip,rk3328-efuse
+ - rockchip,rk3368-efuse
+ - rockchip,rk3399-efuse
+
+ # Deprecated: old compatible value for rk3066a, rk3188 and rk3288
+ - rockchip,rockchip-efuse
+
+ reg:
+ description:
+ Registers location and eFuse size.
+ maxItems: 1
+
+ clocks:
+ description:
+ eFuse clock id.
+ maxItems: 1
+
+ clock-names:
+ const: pclk_efuse
+
+ rockchip,efuse-size:
+ description:
+ eFuse size in bytes. The eFuse size in property <reg> will be invalid if
+ this property is defined.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3288-cru.h>
+ efuse: efuse@ffb40000 {
+ compatible = "rockchip,rk3288-efuse";
+ reg = <0xffb40000 0x20>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cru PCLK_EFUSE256>;
+ clock-names = "pclk_efuse";
+
+ /* Data cells */
+ cpu_leakage: cpu_leakage@17 {
+ reg = <0x17 0x1>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
index d84deb4774a4..c11c99f085d7 100644
--- a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
@@ -24,6 +24,18 @@ properties:
- st,stm32f4-otp
- st,stm32mp15-bsec
+patternProperties:
+ "^.*@[0-9a-f]+$":
+ type: object
+
+ properties:
+ st,non-secure-otp:
+ description: |
+ This property explicits a factory programmed area that both secure
+ and non-secure worlds can access. It is needed when, by default, the
+ related area can only be reached by the secure world.
+ type: boolean
+
required:
- "#address-cells"
- "#size-cells"
@@ -41,6 +53,11 @@ examples:
calib@22c {
reg = <0x22c 0x2>;
};
+
+ mac_addr@e4 {
+ reg = <0xe4 0x8>;
+ st,non-secure-otp;
+ };
};
...
diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
index 68592271461f..9d16d417e9be 100644
--- a/Documentation/devicetree/bindings/opp/opp.txt
+++ b/Documentation/devicetree/bindings/opp/opp.txt
@@ -83,9 +83,14 @@ properties.
Required properties:
- opp-hz: Frequency in Hz, expressed as a 64-bit big-endian integer. This is a
- required property for all device nodes but devices like power domains. The
- power domain nodes must have another (implementation dependent) property which
- uniquely identifies the OPP nodes.
+ required property for all device nodes, unless another "required" property to
+ uniquely identify the OPP nodes exists. Devices like power domains must have
+ another (implementation dependent) property.
+
+- opp-peak-kBps: Peak bandwidth in kilobytes per second, expressed as an array
+ of 32-bit big-endian integers. Each element of the array represents the
+ peak bandwidth value of each interconnect path. The number of elements should
+ match the number of interconnect paths.
Optional properties:
- opp-microvolt: voltage in micro Volts.
@@ -132,6 +137,12 @@ Optional properties:
- opp-level: A value representing the performance level of the device,
expressed as a 32-bit integer.
+- opp-avg-kBps: Average bandwidth in kilobytes per second, expressed as an array
+ of 32-bit big-endian integers. Each element of the array represents the
+ average bandwidth value of each interconnect path. The number of elements
+ should match the number of interconnect paths. This property is only
+ meaningful in OPP tables where opp-peak-kBps is present.
+
- clock-latency-ns: Specifies the maximum possible transition latency (in
nanoseconds) for switching to this OPP from any other OPP.
diff --git a/Documentation/devicetree/bindings/pci/aardvark-pci.txt b/Documentation/devicetree/bindings/pci/aardvark-pci.txt
index 310ef7145c47..2b8ca920a7fa 100644
--- a/Documentation/devicetree/bindings/pci/aardvark-pci.txt
+++ b/Documentation/devicetree/bindings/pci/aardvark-pci.txt
@@ -19,6 +19,9 @@ contain the following properties:
- interrupt-map-mask and interrupt-map: standard PCI properties to
define the mapping of the PCIe interface to interrupt numbers.
- bus-range: PCI bus numbers covered
+ - phys: the PCIe PHY handle
+ - max-link-speed: see pci.txt
+ - reset-gpios: see pci.txt
In addition, the Device Tree describing an Aardvark PCIe controller
must include a sub-node that describes the legacy interrupt controller
@@ -48,6 +51,7 @@ Example:
<0 0 0 2 &pcie_intc 1>,
<0 0 0 3 &pcie_intc 2>,
<0 0 0 4 &pcie_intc 3>;
+ phys = <&comphy1 0>;
pcie_intc: interrupt-controller {
interrupt-controller;
#interrupt-cells = <1>;
diff --git a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
index 77d3e81a437b..8680a0f86c5a 100644
--- a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -56,6 +56,8 @@ properties:
description: Indicates usage of spread-spectrum clocking.
type: boolean
+ aspm-no-l0s: true
+
required:
- reg
- dma-ranges
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml
index 2996f8d4777c..50ce5d79d2c7 100644
--- a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml
@@ -10,7 +10,7 @@ maintainers:
- Tom Joseph <tjoseph@cadence.com>
allOf:
- - $ref: "cdns-pcie.yaml#"
+ - $ref: "cdns-pcie-ep.yaml#"
- $ref: "pci-ep.yaml#"
properties:
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml
index cabbe46ff578..84a8f095d031 100644
--- a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-host.yaml
@@ -45,8 +45,6 @@ examples:
#size-cells = <2>;
bus-range = <0x0 0xff>;
linux,pci-domain = <0>;
- cdns,max-outbound-regions = <16>;
- cdns,no-bar-match-nbits = <32>;
vendor-id = <0x17cd>;
device-id = <0x0200>;
@@ -57,6 +55,7 @@ examples:
ranges = <0x02000000 0x0 0x42000000 0x0 0x42000000 0x0 0x1000000>,
<0x01000000 0x0 0x43000000 0x0 0x43000000 0x0 0x0010000>;
+ dma-ranges = <0x02000000 0x0 0x0 0x0 0x0 0x1 0x00000000>;
#interrupt-cells = <0x1>;
diff --git a/Documentation/devicetree/bindings/pci/cdns-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/cdns-pcie-ep.yaml
new file mode 100644
index 000000000000..6150a7a7bdbf
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/cdns-pcie-ep.yaml
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/pci/cdns-pcie-ep.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Cadence PCIe Device
+
+maintainers:
+ - Tom Joseph <tjoseph@cadence.com>
+
+allOf:
+ - $ref: "cdns-pcie.yaml#"
+
+properties:
+ cdns,max-outbound-regions:
+ description: maximum number of outbound regions
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 32
+ default: 32
+
+required:
+ - cdns,max-outbound-regions
diff --git a/Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml b/Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml
index ab6e43b636ec..c87a3a36ccd2 100644
--- a/Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml
+++ b/Documentation/devicetree/bindings/pci/cdns-pcie-host.yaml
@@ -14,14 +14,23 @@ allOf:
- $ref: "cdns-pcie.yaml#"
properties:
+ cdns,max-outbound-regions:
+ description: maximum number of outbound regions
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 32
+ default: 32
+ deprecated: true
+
cdns,no-bar-match-nbits:
description:
Set into the no BAR match register to configure the number of least
significant bits kept during inbound (PCIe -> AXI) address translations
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 64
default: 32
+ deprecated: true
msi-parent: true
diff --git a/Documentation/devicetree/bindings/pci/cdns-pcie.yaml b/Documentation/devicetree/bindings/pci/cdns-pcie.yaml
index 6887ccc339cc..02553d5e6c51 100644
--- a/Documentation/devicetree/bindings/pci/cdns-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/cdns-pcie.yaml
@@ -10,14 +10,6 @@ maintainers:
- Tom Joseph <tjoseph@cadence.com>
properties:
- cdns,max-outbound-regions:
- description: maximum number of outbound regions
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- minimum: 1
- maximum: 32
- default: 32
-
phys:
description:
One per lane if more than one in the list. If only one PHY listed it must
diff --git a/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml b/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
index 48a98dae00de..64b2c64ca806 100644
--- a/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
@@ -71,10 +71,9 @@ properties:
max-link-speed:
description: Specify PCI Gen for link capability.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 1, 2, 3, 4 ]
- - default: 1
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 3, 4]
+ default: 1
bus-range:
description: Range of bus numbers associated with this controller.
diff --git a/Documentation/devicetree/bindings/pci/pci-ep.yaml b/Documentation/devicetree/bindings/pci/pci-ep.yaml
index b3df100705b0..0f8e575ac01a 100644
--- a/Documentation/devicetree/bindings/pci/pci-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/pci-ep.yaml
@@ -18,21 +18,18 @@ properties:
max-functions:
description: Maximum number of functions that can be configured
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint8
+ $ref: /schemas/types.yaml#/definitions/uint8
minimum: 1
default: 1
maximum: 255
max-link-speed:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
enum: [ 1, 2, 3, 4 ]
num-lanes:
description: maximum number of lanes
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
default: 1
maximum: 16
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
index b94078f58d8e..aeba38f0a387 100644
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
@@ -6,7 +6,8 @@ AHB. There is one bridge instance per USB port connected to the internal
OHCI and EHCI controllers.
Required properties:
-- compatible: "renesas,pci-r8a7743" for the R8A7743 SoC;
+- compatible: "renesas,pci-r8a7742" for the R8A7742 SoC;
+ "renesas,pci-r8a7743" for the R8A7743 SoC;
"renesas,pci-r8a7744" for the R8A7744 SoC;
"renesas,pci-r8a7745" for the R8A7745 SoC;
"renesas,pci-r8a7790" for the R8A7790 SoC;
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-ep.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-ep.yaml
new file mode 100644
index 000000000000..aa483c7f27fd
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/rcar-pci-ep.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Renesas Electronics Europe GmbH - https://www.renesas.com/eu/en/
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/rcar-pci-ep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car PCIe Endpoint
+
+maintainers:
+ - Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+properties:
+ compatible:
+ items:
+ - const: renesas,r8a774c0-pcie-ep
+ - const: renesas,rcar-gen3-pcie-ep
+
+ reg:
+ maxItems: 5
+
+ reg-names:
+ items:
+ - const: apb-base
+ - const: memory0
+ - const: memory1
+ - const: memory2
+ - const: memory3
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: pcie
+
+ max-functions:
+ minimum: 1
+ maximum: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - resets
+ - power-domains
+ - clocks
+ - clock-names
+ - max-functions
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
+ #include <dt-bindings/power/r8a774c0-sysc.h>
+
+ pcie0_ep: pcie-ep@fe000000 {
+ compatible = "renesas,r8a774c0-pcie-ep",
+ "renesas,rcar-gen3-pcie-ep";
+ reg = <0xfe000000 0x80000>,
+ <0xfe100000 0x100000>,
+ <0xfe200000 0x200000>,
+ <0x30000000 0x8000000>,
+ <0x38000000 0x8000000>;
+ reg-names = "apb-base", "memory0", "memory1", "memory2", "memory3";
+ resets = <&cpg 319>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ clocks = <&cpg CPG_MOD 319>;
+ clock-names = "pcie";
+ max-functions = /bits/ 8 <1>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 12702c8c46ce..1041c44a614f 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -11,7 +11,8 @@ compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
"renesas,pcie-r8a7791" for the R8A7791 SoC;
"renesas,pcie-r8a7793" for the R8A7793 SoC;
"renesas,pcie-r8a7795" for the R8A7795 SoC;
- "renesas,pcie-r8a7796" for the R8A7796 SoC;
+ "renesas,pcie-r8a7796" for the R8A77960 SoC;
+ "renesas,pcie-r8a77961" for the R8A77961 SoC;
"renesas,pcie-r8a77980" for the R8A77980 SoC;
"renesas,pcie-r8a77990" for the R8A77990 SoC;
"renesas,pcie-rcar-gen2" for a generic R-Car Gen2 or
diff --git a/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml
new file mode 100644
index 000000000000..f0558b9cf9e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/socionext,uniphier-pcie-ep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier PCIe endpoint controller
+
+description: |
+ UniPhier PCIe endpoint controller is based on the Synopsys DesignWare
+ PCI core. It shares common features with the PCIe DesignWare core and
+ inherits common properties defined in
+ Documentation/devicetree/bindings/pci/designware-pcie.txt.
+
+maintainers:
+ - Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+allOf:
+ - $ref: "pci-ep.yaml#"
+
+properties:
+ compatible:
+ const: socionext,uniphier-pro5-pcie-ep
+
+ reg:
+ maxItems: 4
+
+ reg-names:
+ items:
+ - const: dbi
+ - const: dbi2
+ - const: link
+ - const: addr_space
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: gio
+ - const: link
+
+ resets:
+ maxItems: 2
+
+ reset-names:
+ items:
+ - const: gio
+ - const: link
+
+ num-ib-windows:
+ const: 16
+
+ num-ob-windows:
+ const: 16
+
+ num-lanes: true
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: pcie-phy
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+
+additionalProperties: false
+
+examples:
+ - |
+ pcie_ep: pcie-ep@66000000 {
+ compatible = "socionext,uniphier-pro5-pcie-ep";
+ reg-names = "dbi", "dbi2", "link", "addr_space";
+ reg = <0x66000000 0x1000>, <0x66001000 0x1000>,
+ <0x66010000 0x10000>, <0x67000000 0x400000>;
+ clock-names = "gio", "link";
+ clocks = <&sys_clk 12>, <&sys_clk 24>;
+ reset-names = "gio", "link";
+ resets = <&sys_rst 12>, <&sys_rst 24>;
+ num-ib-windows = <16>;
+ num-ob-windows = <16>;
+ num-lanes = <4>;
+ phy-names = "pcie-phy";
+ phys = <&pcie_phy>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
index 88683db6cf81..18c1ec5e19ad 100644
--- a/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-mipi-pcie-analog.yaml
@@ -30,6 +30,6 @@ examples:
- |
mpphy: phy@0 {
compatible = "amlogic,axg-mipi-pcie-analog-phy";
- reg = <0x0 0x0 0x0 0xc>;
+ reg = <0x0 0xc>;
#phy-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-axg-pcie.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-pcie.yaml
index 086478aec946..45f3d72b1cca 100644
--- a/Documentation/devicetree/bindings/phy/amlogic,meson-axg-pcie.yaml
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson-axg-pcie.yaml
@@ -44,7 +44,7 @@ examples:
#include <dt-bindings/phy/phy.h>
pcie_phy: pcie-phy@ff644000 {
compatible = "amlogic,axg-pcie-phy";
- reg = <0x0 0xff644000 0x0 0x1c>;
+ reg = <0xff644000 0x1c>;
resets = <&reset RESET_PCIE_PHY>;
phys = <&mipi_analog_phy PHY_TYPE_PCIE>;
phy-names = "analog";
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson8b-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson8b-usb2-phy.yaml
new file mode 100644
index 000000000000..03c4809dbe8d
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson8b-usb2-phy.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/amlogic,meson8b-usb2-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic Meson8, Meson8b, Meson8m2 and GXBB USB2 PHY
+
+maintainers:
+ - Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - amlogic,meson8-usb2-phy
+ - amlogic,meson8b-usb2-phy
+ - amlogic,meson8m2-usb2-phy
+ - const: amlogic,meson-mx-usb2-phy
+ - const: amlogic,meson-gxbb-usb2-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+
+ clock-names:
+ items:
+ - const: usb_general
+ - const: usb
+
+ resets:
+ minItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ phy-supply:
+ description:
+ Phandle to a regulator that provides power to the PHY. This
+ regulator will be managed during the PHY power on/off sequence.
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy@c0000000 {
+ compatible = "amlogic,meson-gxbb-usb2-phy";
+ reg = <0xc0000000 0x20>;
+ resets = <&reset_usb_phy>;
+ clocks = <&clk_usb_general>, <&reset_usb>;
+ clock-names = "usb_general", "usb";
+ phy-supply = <&usb_vbus>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/calxeda-combophy.txt b/Documentation/devicetree/bindings/phy/calxeda-combophy.txt
deleted file mode 100644
index 6622bdb2e8bc..000000000000
--- a/Documentation/devicetree/bindings/phy/calxeda-combophy.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Calxeda Highbank Combination Phys for SATA
-
-Properties:
-- compatible : Should be "calxeda,hb-combophy"
-- #phy-cells: Should be 1.
-- reg : Address and size for Combination Phy registers.
-- phydev: device ID for programming the combophy.
-
-Example:
-
- combophy5: combo-phy@fff5d000 {
- compatible = "calxeda,hb-combophy";
- #phy-cells = <1>;
- reg = <0xfff5d000 0x1000>;
- phydev = <31>;
- };
-
diff --git a/Documentation/devicetree/bindings/phy/calxeda-combophy.yaml b/Documentation/devicetree/bindings/phy/calxeda-combophy.yaml
new file mode 100644
index 000000000000..16a8bd7644bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/calxeda-combophy.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/calxeda-combophy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Calxeda Highbank Combination PHYs binding for SATA
+
+description: |
+ The Calxeda Combination PHYs connect the SoC to the internal fabric
+ and to SATA connectors. The PHYs support multiple protocols (SATA,
+ SGMII, PCIe) and can be assigned to different devices (SATA or XGMAC
+ controller).
+ Programming the PHYs is typically handled by those device drivers,
+ not by a dedicated PHY driver.
+
+maintainers:
+ - Andre Przywara <andre.przywara@arm.com>
+
+properties:
+ compatible:
+ const: calxeda,hb-combophy
+
+ '#phy-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+ phydev:
+ description: device ID for programming the ComboPHY.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maximum: 31
+
+required:
+ - compatible
+ - reg
+ - phydev
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ combophy5: combo-phy@fff5d000 {
+ compatible = "calxeda,hb-combophy";
+ #phy-cells = <1>;
+ reg = <0xfff5d000 0x1000>;
+ phydev = <31>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/cdns,salvo-phy.yaml b/Documentation/devicetree/bindings/phy/cdns,salvo-phy.yaml
new file mode 100644
index 000000000000..3a07285b5470
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/cdns,salvo-phy.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright (c) 2020 NXP
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/cdns,salvo-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Cadence SALVO PHY
+
+maintainers:
+ - Peter Chen <peter.chen@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - nxp,salvo-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: salvo_phy_clk
+
+ power-domains:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/firmware/imx/rsrc.h>
+
+ usb3phy: usb3-phy@5b160000 {
+ compatible = "nxp,salvo-phy";
+ reg = <0x5b160000 0x40000>;
+ clocks = <&usb3_lpcg 4>;
+ clock-names = "salvo_phy_clk";
+ power-domains = <&pd IMX_SC_R_USB_2_PHY>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/intel,combo-phy.yaml b/Documentation/devicetree/bindings/phy/intel,combo-phy.yaml
new file mode 100644
index 000000000000..347d0cdfb80d
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/intel,combo-phy.yaml
@@ -0,0 +1,101 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/intel,combo-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel ComboPhy Subsystem
+
+maintainers:
+ - Dilip Kota <eswara.kota@linux.intel.com>
+
+description: |
+ Intel Combophy subsystem supports PHYs for PCIe, EMAC and SATA
+ controllers. A single Combophy provides two PHY instances.
+
+properties:
+ $nodename:
+ pattern: "combophy(@.*|-[0-9a-f])*$"
+
+ compatible:
+ items:
+ - const: intel,combophy-lgm
+ - const: intel,combo-phy
+
+ clocks:
+ maxItems: 1
+
+ reg:
+ items:
+ - description: ComboPhy core registers
+ - description: PCIe app core control registers
+
+ reg-names:
+ items:
+ - const: core
+ - const: app
+
+ resets:
+ maxItems: 4
+
+ reset-names:
+ items:
+ - const: phy
+ - const: core
+ - const: iphy0
+ - const: iphy1
+
+ intel,syscfg:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: Chip configuration registers handle and ComboPhy instance id
+
+ intel,hsio:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: HSIO registers handle and ComboPhy instance id on NOC
+
+ intel,aggregation:
+ type: boolean
+ description: |
+ Specify the flag to configure ComboPHY in dual lane mode.
+
+ intel,phy-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Mode of the two phys in ComboPhy.
+ See dt-bindings/phy/phy.h for values.
+
+ "#phy-cells":
+ const: 1
+
+required:
+ - compatible
+ - clocks
+ - reg
+ - reg-names
+ - intel,syscfg
+ - intel,hsio
+ - intel,phy-mode
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/phy/phy.h>
+ combophy@d0a00000 {
+ compatible = "intel,combophy-lgm", "intel,combo-phy";
+ clocks = <&cgu0 1>;
+ #phy-cells = <1>;
+ reg = <0xd0a00000 0x40000>,
+ <0xd0a40000 0x1000>;
+ reg-names = "core", "app";
+ resets = <&rcu0 0x50 6>,
+ <&rcu0 0x50 17>,
+ <&rcu0 0x50 23>,
+ <&rcu0 0x50 24>;
+ reset-names = "phy", "core", "iphy0", "iphy1";
+ intel,syscfg = <&sysconf 0>;
+ intel,hsio = <&hsiol 0>;
+ intel,phy-mode = <PHY_TYPE_PCIE>;
+ intel,aggregation;
+ };
diff --git a/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml b/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml
index 9a346d6290d9..77bb5309918e 100644
--- a/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml
@@ -23,7 +23,7 @@ description: |+
properties:
compatible:
- const: intel,lgm-emmc-phy
+ const: intel,lgm-emmc-phy
"#phy-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/phy/meson-gxl-usb3-phy.txt b/Documentation/devicetree/bindings/phy/meson-gxl-usb3-phy.txt
deleted file mode 100644
index 114947e1de3d..000000000000
--- a/Documentation/devicetree/bindings/phy/meson-gxl-usb3-phy.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-* Amlogic Meson GXL and GXM USB3 PHY and OTG detection binding
-
-Required properties:
-- compatible: Should be "amlogic,meson-gxl-usb3-phy"
-- #phys-cells: must be 0 (see phy-bindings.txt in this directory)
-- reg: The base address and length of the registers
-- interrupts: the interrupt specifier for the OTG detection
-- clocks: phandles to the clocks for
- - the USB3 PHY
- - and peripheral mode/OTG detection
-- clock-names: must contain "phy" and "peripheral"
-- resets: phandle to the reset lines for:
- - the USB3 PHY and
- - peripheral mode/OTG detection
-- reset-names: must contain "phy" and "peripheral"
-
-Optional properties:
-- phy-supply: see phy-bindings.txt in this directory
-
-
-Example:
- usb3_phy0: phy@78080 {
- compatible = "amlogic,meson-gxl-usb3-phy";
- #phy-cells = <0>;
- reg = <0x0 0x78080 0x0 0x20>;
- interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clkc CLKID_USB_OTG>, <&clkc_AO CLKID_AO_CEC_32K>;
- clock-names = "phy", "peripheral";
- resets = <&reset RESET_USB_OTG>, <&reset RESET_USB_OTG>;
- reset-names = "phy", "peripheral";
- };
diff --git a/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt b/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt
deleted file mode 100644
index d81d73aea608..000000000000
--- a/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Amlogic Meson8, Meson8b and GXBB USB2 PHY
-
-Required properties:
-- compatible: Depending on the platform this should be one of:
- "amlogic,meson8-usb2-phy"
- "amlogic,meson8b-usb2-phy"
- "amlogic,meson-gxbb-usb2-phy"
-- reg: The base address and length of the registers
-- #phys-cells: should be 0 (see phy-bindings.txt in this directory)
-- clocks: phandle and clock identifier for the phy clocks
-- clock-names: "usb_general" and "usb"
-
-Optional properties:
-- resets: reference to the reset controller
-- phy-supply: see phy-bindings.txt in this directory
-
-
-Example:
-
-usb0_phy: usb-phy@c0000000 {
- compatible = "amlogic,meson-gxbb-usb2-phy";
- #phy-cells = <0>;
- reg = <0x0 0xc0000000 0x0 0x20>;
- resets = <&reset RESET_USB_OTG>;
- clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB0>;
- clock-names = "usb_general", "usb";
- phy-supply = <&usb_vbus>;
-};
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
index c779a3c7d87a..4071438be2ba 100644
--- a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
@@ -77,24 +77,21 @@ patternProperties:
description:
Specifies the type of PHY for which the group of PHY lanes is used.
Refer include/dt-bindings/phy/phy.h. Constants from the header should be used.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [1, 2, 3, 4, 5, 6]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 3, 4, 5, 6]
cdns,num-lanes:
description:
Number of DisplayPort lanes.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [1, 2, 4]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 4]
default: 4
cdns,max-bit-rate:
description:
Maximum DisplayPort link bit rate to use, in Mbps
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [2160, 2430, 2700, 3240, 4320, 5400, 8100]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [2160, 2430, 2700, 3240, 4320, 5400, 8100]
default: 8100
required:
@@ -120,24 +117,30 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/phy/phy.h>
- torrent_phy: torrent-phy@f0fb500000 {
- compatible = "cdns,torrent-phy";
- reg = <0xf0 0xfb500000 0x0 0x00100000>,
- <0xf0 0xfb030a00 0x0 0x00000040>;
- reg-names = "torrent_phy", "dptx_phy";
- resets = <&phyrst 0>;
- clocks = <&ref_clk>;
- clock-names = "refclk";
- #address-cells = <1>;
- #size-cells = <0>;
- torrent_phy_dp: phy@0 {
- reg = <0>;
- resets = <&phyrst 1>, <&phyrst 2>,
- <&phyrst 3>, <&phyrst 4>;
- #phy-cells = <0>;
- cdns,phy-type = <PHY_TYPE_DP>;
- cdns,num-lanes = <4>;
- cdns,max-bit-rate = <8100>;
- };
+
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ torrent-phy@f0fb500000 {
+ compatible = "cdns,torrent-phy";
+ reg = <0xf0 0xfb500000 0x0 0x00100000>,
+ <0xf0 0xfb030a00 0x0 0x00000040>;
+ reg-names = "torrent_phy", "dptx_phy";
+ resets = <&phyrst 0>;
+ clocks = <&ref_clk>;
+ clock-names = "refclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy@0 {
+ reg = <0>;
+ resets = <&phyrst 1>, <&phyrst 2>,
+ <&phyrst 3>, <&phyrst 4>;
+ #phy-cells = <0>;
+ cdns,phy-type = <PHY_TYPE_DP>;
+ cdns,num-lanes = <4>;
+ cdns,max-bit-rate = <8100>;
+ };
+ };
};
...
diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
new file mode 100644
index 000000000000..973b2d196f46
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
@@ -0,0 +1,313 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/qcom,qmp-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm QMP PHY controller
+
+maintainers:
+ - Manu Gautam <mgautam@codeaurora.org>
+
+description:
+ QMP phy controller supports physical layer functionality for a number of
+ controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
+
+properties:
+ compatible:
+ enum:
+ - qcom,ipq8074-qmp-pcie-phy
+ - qcom,msm8996-qmp-pcie-phy
+ - qcom,msm8996-qmp-ufs-phy
+ - qcom,msm8996-qmp-usb3-phy
+ - qcom,msm8998-qmp-pcie-phy
+ - qcom,msm8998-qmp-ufs-phy
+ - qcom,msm8998-qmp-usb3-phy
+ - qcom,sdm845-qhp-pcie-phy
+ - qcom,sdm845-qmp-pcie-phy
+ - qcom,sdm845-qmp-ufs-phy
+ - qcom,sdm845-qmp-usb3-uni-phy
+ - qcom,sm8150-qmp-ufs-phy
+ - qcom,sm8250-qmp-ufs-phy
+
+ reg:
+ items:
+ - description: Address and length of PHY's common serdes block.
+
+ "#clock-cells":
+ enum: [ 1, 2 ]
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ clocks:
+ minItems: 1
+ maxItems: 4
+
+ clock-names:
+ minItems: 1
+ maxItems: 4
+
+ resets:
+ minItems: 1
+ maxItems: 3
+
+ reset-names:
+ minItems: 1
+ maxItems: 3
+
+ vdda-phy-supply:
+ description:
+ Phandle to a regulator supply to PHY core block.
+
+ vdda-pll-supply:
+ description:
+ Phandle to 1.8V regulator supply to PHY refclk pll block.
+
+ vddp-ref-clk-supply:
+ description:
+ Phandle to a regulator supply to any specific refclk
+ pll block.
+
+#Required nodes:
+patternProperties:
+ "^phy@[0-9a-f]+$":
+ type: object
+ description:
+ Each device node of QMP phy is required to have as many child nodes as
+ the number of lanes the PHY has.
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - "#address-cells"
+ - "#size-cells"
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - vdda-phy-supply
+ - vdda-pll-supply
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sdm845-qmp-usb3-uni-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Phy aux clock.
+ - description: Phy config clock.
+ - description: 19.2 MHz ref clk.
+ - description: Phy common block aux clock.
+ clock-names:
+ items:
+ - const: aux
+ - const: cfg_ahb
+ - const: ref
+ - const: com_aux
+ resets:
+ items:
+ - description: reset of phy block.
+ - description: phy common block reset.
+ reset-names:
+ items:
+ - const: phy
+ - const: common
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8996-qmp-pcie-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Phy aux clock.
+ - description: Phy config clock.
+ - description: 19.2 MHz ref clk.
+ clock-names:
+ items:
+ - const: aux
+ - const: cfg_ahb
+ - const: ref
+ resets:
+ items:
+ - description: reset of phy block.
+ - description: phy common block reset.
+ - description: phy's ahb cfg block reset.
+ reset-names:
+ items:
+ - const: phy
+ - const: common
+ - const: cfg
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8996-qmp-usb3-phy
+ - qcom,msm8998-qmp-pcie-phy
+ - qcom,msm8998-qmp-usb3-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Phy aux clock.
+ - description: Phy config clock.
+ - description: 19.2 MHz ref clk.
+ clock-names:
+ items:
+ - const: aux
+ - const: cfg_ahb
+ - const: ref
+ resets:
+ items:
+ - description: reset of phy block.
+ - description: phy common block reset.
+ reset-names:
+ items:
+ - const: phy
+ - const: common
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8996-qmp-ufs-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: 19.2 MHz ref clk.
+ clock-names:
+ items:
+ - const: ref
+ resets:
+ items:
+ - description: PHY reset in the UFS controller.
+ reset-names:
+ items:
+ - const: ufsphy
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8998-qmp-ufs-phy
+ - qcom,sdm845-qmp-ufs-phy
+ - qcom,sm8150-qmp-ufs-phy
+ - qcom,sm8250-qmp-ufs-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: 19.2 MHz ref clk.
+ - description: Phy reference aux clock.
+ clock-names:
+ items:
+ - const: ref
+ - const: ref_aux
+ resets:
+ items:
+ - description: PHY reset in the UFS controller.
+ reset-names:
+ items:
+ - const: ufsphy
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,ipq8074-qmp-pcie-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: pipe clk.
+ clock-names:
+ items:
+ - const: pipe_clk
+ resets:
+ items:
+ - description: reset of phy block.
+ - description: phy common block reset.
+ reset-names:
+ items:
+ - const: phy
+ - const: common
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sdm845-qhp-pcie-phy
+ - qcom,sdm845-qmp-pcie-phy
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Phy aux clock.
+ - description: Phy config clock.
+ - description: 19.2 MHz ref clk.
+ - description: Phy refgen clk.
+ clock-names:
+ items:
+ - const: aux
+ - const: cfg_ahb
+ - const: ref
+ - const: refgen
+ resets:
+ items:
+ - description: reset of phy block.
+ reset-names:
+ items:
+ - const: phy
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+ usb_2_qmpphy: phy-wrapper@88eb000 {
+ compatible = "qcom,sdm845-qmp-usb3-uni-phy";
+ reg = <0 0x088eb000 0 0x18c>;
+ #clock-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK >,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_SEC_CLKREF_CLK>,
+ <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref", "com_aux";
+
+ resets = <&gcc GCC_USB3PHY_PHY_SEC_BCR>,
+ <&gcc GCC_USB3_PHY_SEC_BCR>;
+ reset-names = "phy", "common";
+
+ vdda-phy-supply = <&vdda_usb2_ss_1p2>;
+ vdda-pll-supply = <&vdda_usb2_ss_core>;
+
+ usb_2_ssphy: phy@88eb200 {
+ reg = <0 0x088eb200 0 0x128>,
+ <0 0x088eb400 0 0x1fc>,
+ <0 0x088eb800 0 0x218>,
+ <0 0x088eb600 0 0x70>;
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ clock-output-names = "usb3_uni_phy_pipe_clk_src";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
new file mode 100644
index 000000000000..b770e637df1d
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
@@ -0,0 +1,136 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/qcom,qmp-usb3-dp-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm QMP USB3 DP PHY controller
+
+maintainers:
+ - Manu Gautam <mgautam@codeaurora.org>
+
+properties:
+ compatible:
+ enum:
+ - qcom,sc7180-qmp-usb3-phy
+ - qcom,sdm845-qmp-usb3-phy
+ reg:
+ items:
+ - description: Address and length of PHY's common serdes block.
+ - description: Address and length of the DP_COM control block.
+
+ reg-names:
+ items:
+ - const: reg-base
+ - const: dp_com
+
+ "#clock-cells":
+ enum: [ 1, 2 ]
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ clocks:
+ items:
+ - description: Phy aux clock.
+ - description: Phy config clock.
+ - description: 19.2 MHz ref clk.
+ - description: Phy common block aux clock.
+
+ clock-names:
+ items:
+ - const: aux
+ - const: cfg_ahb
+ - const: ref
+ - const: com_aux
+
+ resets:
+ items:
+ - description: reset of phy block.
+ - description: phy common block reset.
+
+ reset-names:
+ items:
+ - const: phy
+ - const: common
+
+ vdda-phy-supply:
+ description:
+ Phandle to a regulator supply to PHY core block.
+
+ vdda-pll-supply:
+ description:
+ Phandle to 1.8V regulator supply to PHY refclk pll block.
+
+ vddp-ref-clk-supply:
+ description:
+ Phandle to a regulator supply to any specific refclk
+ pll block.
+
+#Required nodes:
+patternProperties:
+ "^phy@[0-9a-f]+$":
+ type: object
+ description:
+ Each device node of QMP phy is required to have as many child nodes as
+ the number of lanes the PHY has.
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - "#clock-cells"
+ - "#address-cells"
+ - "#size-cells"
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - vdda-phy-supply
+ - vdda-pll-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+ usb_1_qmpphy: phy-wrapper@88e9000 {
+ compatible = "qcom,sdm845-qmp-usb3-phy";
+ reg = <0 0x088e9000 0 0x18c>,
+ <0 0x088e8000 0 0x10>;
+ reg-names = "reg-base", "dp_com";
+ #clock-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+ <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+ <&gcc GCC_USB3_PRIM_CLKREF_CLK>,
+ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
+ clock-names = "aux", "cfg_ahb", "ref", "com_aux";
+
+ resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
+ <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
+ reset-names = "phy", "common";
+
+ vdda-phy-supply = <&vdda_usb2_ss_1p2>;
+ vdda-pll-supply = <&vdda_usb2_ss_core>;
+
+ usb_1_ssphy: phy@88e9200 {
+ reg = <0 0x088e9200 0 0x128>,
+ <0 0x088e9400 0 0x200>,
+ <0 0x088e9c00 0 0x218>,
+ <0 0x088e9600 0 0x128>,
+ <0 0x088e9800 0 0x200>,
+ <0 0x088e9a00 0 0x100>;
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
index f8bd28ff31c1..b5a6195de7ff 100644
--- a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
@@ -83,31 +83,28 @@ then:
It is a 6 bit value that specifies offset to be
added to PHY refgen RESCODE via IMP_CTRL1 register. It is a PHY
tuning parameter that may vary for different boards of same SOC.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- maximum: 63
- default: 0
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 63
+ default: 0
qcom,bias-ctrl-value:
description:
It is a 6 bit value that specifies bias-ctrl-value. It is a PHY
tuning parameter that may vary for different boards of same SOC.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- maximum: 63
- default: 32
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 63
+ default: 32
qcom,charge-ctrl-value:
- description:
+ description:
It is a 2 bit value that specifies charge-ctrl-value. It is a PHY
tuning parameter that may vary for different boards of same SOC.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- maximum: 3
- default: 0
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 3
+ default: 0
qcom,hstx-trim-value:
description:
@@ -115,22 +112,20 @@ then:
output current.
Possible range is - 15mA to 24mA (stepsize of 600 uA).
See dt-bindings/phy/phy-qcom-qusb2.h for applicable values.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- maximum: 15
- default: 3
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ default: 3
qcom,preemphasis-level:
description:
It is a 2 bit value that specifies pre-emphasis level.
Possible range is 0 to 15% (stepsize of 5%).
See dt-bindings/phy/phy-qcom-qusb2.h for applicable values.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- maximum: 3
- default: 0
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 3
+ default: 2
qcom,preemphasis-width:
description:
@@ -138,21 +133,19 @@ then:
pre-emphasis (specified using qcom,preemphasis-level) must be in
effect. Duration could be half-bit of full-bit.
See dt-bindings/phy/phy-qcom-qusb2.h for applicable values.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- maximum: 1
- default: 0
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 1
+ default: 0
qcom,hsdisc-trim-value:
description:
It is a 2 bit value tuning parameter that control disconnect
threshold and may vary for different boards of same SOC.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 0
- maximum: 3
- default: 1
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 3
+ default: 0
required:
- compatible
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
new file mode 100644
index 000000000000..574f890fab1d
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/qcom,usb-snps-femto-v2.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm Synopsys Femto High-Speed USB PHY V2
+
+maintainers:
+ - Wesley Cheng <wcheng@codeaurora.org>
+
+description: |
+ Qualcomm High-Speed USB PHY
+
+properties:
+ compatible:
+ enum:
+ - qcom,usb-snps-hs-7nm-phy
+ - qcom,sm8150-usb-hs-phy
+ - qcom,usb-snps-femto-v2-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ clocks:
+ items:
+ - description: rpmhcc ref clock
+
+ clock-names:
+ items:
+ - const: ref
+
+ resets:
+ items:
+ - description: PHY core reset
+
+ vdda-pll-supply:
+ description: phandle to the regulator VDD supply node.
+
+ vdda18-supply:
+ description: phandle to the regulator 1.8V supply node.
+
+ vdda33-supply:
+ description: phandle to the regulator 3.3V supply node.
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+ - clocks
+ - clock-names
+ - resets
+ - vdda-pll-supply
+ - vdda18-supply
+ - vdda33-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,gcc-sm8150.h>
+ phy@88e2000 {
+ compatible = "qcom,sm8150-usb-hs-phy";
+ reg = <0 0x088e2000 0 0x400>;
+ #phy-cells = <0>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+
+ vdda-pll-supply = <&vdd_usb_hs_core>;
+ vdda33-supply = <&vdda_usb_hs_3p1>;
+ vdda18-supply = <&vdda_usb_hs_1p8>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
deleted file mode 100644
index 54d6f8d43508..000000000000
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ /dev/null
@@ -1,242 +0,0 @@
-Qualcomm QMP PHY controller
-===========================
-
-QMP phy controller supports physical layer functionality for a number of
-controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
-
-Required properties:
- - compatible: compatible list, contains:
- "qcom,ipq8074-qmp-pcie-phy" for PCIe phy on IPQ8074
- "qcom,msm8996-qmp-pcie-phy" for 14nm PCIe phy on msm8996,
- "qcom,msm8996-qmp-ufs-phy" for 14nm UFS phy on msm8996,
- "qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996,
- "qcom,msm8998-qmp-usb3-phy" for USB3 QMP V3 phy on msm8998,
- "qcom,msm8998-qmp-ufs-phy" for UFS QMP phy on msm8998,
- "qcom,msm8998-qmp-pcie-phy" for PCIe QMP phy on msm8998,
- "qcom,sdm845-qhp-pcie-phy" for QHP PCIe phy on sdm845,
- "qcom,sdm845-qmp-pcie-phy" for QMP PCIe phy on sdm845,
- "qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
- "qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845,
- "qcom,sdm845-qmp-ufs-phy" for UFS QMP phy on sdm845,
- "qcom,sm8150-qmp-ufs-phy" for UFS QMP phy on sm8150.
-
-- reg:
- - index 0: address and length of register set for PHY's common
- serdes block.
- - index 1: address and length of the DP_COM control block (for
- "qcom,sdm845-qmp-usb3-phy" only).
-
-- reg-names:
- - For "qcom,sdm845-qmp-usb3-phy":
- - Should be: "reg-base", "dp_com"
- - For all others:
- - The reg-names property shouldn't be defined.
-
- - #address-cells: must be 1
- - #size-cells: must be 1
- - ranges: must be present
-
- - clocks: a list of phandles and clock-specifier pairs,
- one for each entry in clock-names.
- - clock-names: "cfg_ahb" for phy config clock,
- "aux" for phy aux clock,
- "ref" for 19.2 MHz ref clk,
- "com_aux" for phy common block aux clock,
- "ref_aux" for phy reference aux clock,
-
- For "qcom,ipq8074-qmp-pcie-phy": no clocks are listed.
- For "qcom,msm8996-qmp-pcie-phy" must contain:
- "aux", "cfg_ahb", "ref".
- For "qcom,msm8996-qmp-ufs-phy" must contain:
- "ref".
- For "qcom,msm8996-qmp-usb3-phy" must contain:
- "aux", "cfg_ahb", "ref".
- For "qcom,msm8998-qmp-usb3-phy" must contain:
- "aux", "cfg_ahb", "ref".
- For "qcom,msm8998-qmp-ufs-phy" must contain:
- "ref", "ref_aux".
- For "qcom,msm8998-qmp-pcie-phy" must contain:
- "aux", "cfg_ahb", "ref".
- For "qcom,sdm845-qhp-pcie-phy" must contain:
- "aux", "cfg_ahb", "ref", "refgen".
- For "qcom,sdm845-qmp-pcie-phy" must contain:
- "aux", "cfg_ahb", "ref", "refgen".
- For "qcom,sdm845-qmp-usb3-phy" must contain:
- "aux", "cfg_ahb", "ref", "com_aux".
- For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
- "aux", "cfg_ahb", "ref", "com_aux".
- For "qcom,sdm845-qmp-ufs-phy" must contain:
- "ref", "ref_aux".
- For "qcom,sm8150-qmp-ufs-phy" must contain:
- "ref", "ref_aux".
-
- - resets: a list of phandles and reset controller specifier pairs,
- one for each entry in reset-names.
- - reset-names: "phy" for reset of phy block,
- "common" for phy common block reset,
- "cfg" for phy's ahb cfg block reset,
- "ufsphy" for the PHY reset in the UFS controller.
-
- For "qcom,ipq8074-qmp-pcie-phy" must contain:
- "phy", "common".
- For "qcom,msm8996-qmp-pcie-phy" must contain:
- "phy", "common", "cfg".
- For "qcom,msm8996-qmp-ufs-phy": must contain:
- "ufsphy".
- For "qcom,msm8996-qmp-usb3-phy" must contain
- "phy", "common".
- For "qcom,msm8998-qmp-usb3-phy" must contain
- "phy", "common".
- For "qcom,msm8998-qmp-ufs-phy": must contain:
- "ufsphy".
- For "qcom,msm8998-qmp-pcie-phy" must contain:
- "phy", "common".
- For "qcom,sdm845-qhp-pcie-phy" must contain:
- "phy".
- For "qcom,sdm845-qmp-pcie-phy" must contain:
- "phy".
- For "qcom,sdm845-qmp-usb3-phy" must contain:
- "phy", "common".
- For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
- "phy", "common".
- For "qcom,sdm845-qmp-ufs-phy": must contain:
- "ufsphy".
- For "qcom,sm8150-qmp-ufs-phy": must contain:
- "ufsphy".
-
- - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
- - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
-
-Optional properties:
- - vddp-ref-clk-supply: Phandle to a regulator supply to any specific refclk
- pll block.
-
-Required nodes:
- - Each device node of QMP phy is required to have as many child nodes as
- the number of lanes the PHY has.
-
-Required properties for child nodes of PCIe PHYs (one child per lane):
- - reg: list of offset and length pairs of register sets for PHY blocks -
- tx, rx, pcs, and pcs_misc (optional).
- - #phy-cells: must be 0
-
-Required properties for a single "lanes" child node of non-PCIe PHYs:
- - reg: list of offset and length pairs of register sets for PHY blocks
- For 1-lane devices:
- tx, rx, pcs, and (optionally) pcs_misc
- For 2-lane devices:
- tx0, rx0, pcs, tx1, rx1, and (optionally) pcs_misc
- - #phy-cells: must be 0
-
-Required properties for child node of PCIe and USB3 qmp phys:
- - clocks: a list of phandles and clock-specifier pairs,
- one for each entry in clock-names.
- - clock-names: Must contain following:
- "pipe<lane-number>" for pipe clock specific to each lane.
- - clock-output-names: Name of the PHY clock that will be the parent for
- the above pipe clock.
- For "qcom,ipq8074-qmp-pcie-phy":
- - "pcie20_phy0_pipe_clk" Pipe Clock parent
- (or)
- "pcie20_phy1_pipe_clk"
- - #clock-cells: must be 0
- - Phy pll outputs pipe clocks for pipe based PHYs. These clocks are then
- gate-controlled by the gcc.
-
-Required properties for child node of PHYs with lane reset, AKA:
- "qcom,msm8996-qmp-pcie-phy"
- - resets: a list of phandles and reset controller specifier pairs,
- one for each entry in reset-names.
- - reset-names: Must contain following:
- "lane<lane-number>" for reset specific to each lane.
-
-Example:
- phy@34000 {
- compatible = "qcom,msm8996-qmp-pcie-phy";
- reg = <0x34000 0x488>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
- <&gcc GCC_PCIE_PHY_CFG_AHB_CLK>,
- <&gcc GCC_PCIE_CLKREF_CLK>;
- clock-names = "aux", "cfg_ahb", "ref";
-
- vdda-phy-supply = <&pm8994_l28>;
- vdda-pll-supply = <&pm8994_l12>;
-
- resets = <&gcc GCC_PCIE_PHY_BCR>,
- <&gcc GCC_PCIE_PHY_COM_BCR>,
- <&gcc GCC_PCIE_PHY_COM_NOCSR_BCR>;
- reset-names = "phy", "common", "cfg";
-
- pciephy_0: lane@35000 {
- reg = <0x35000 0x130>,
- <0x35200 0x200>,
- <0x35400 0x1dc>;
- #clock-cells = <0>;
- #phy-cells = <0>;
-
- clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
- clock-names = "pipe0";
- clock-output-names = "pcie_0_pipe_clk_src";
- resets = <&gcc GCC_PCIE_0_PHY_BCR>;
- reset-names = "lane0";
- };
-
- pciephy_1: lane@36000 {
- ...
- ...
- };
-
- phy@88eb000 {
- compatible = "qcom,sdm845-qmp-usb3-uni-phy";
- reg = <0x88eb000 0x18c>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>,
- <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
- <&gcc GCC_USB3_SEC_CLKREF_CLK>,
- <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
- clock-names = "aux", "cfg_ahb", "ref", "com_aux";
-
- resets = <&gcc GCC_USB3PHY_PHY_SEC_BCR>,
- <&gcc GCC_USB3_PHY_SEC_BCR>;
- reset-names = "phy", "common";
-
- lane@88eb200 {
- reg = <0x88eb200 0x128>,
- <0x88eb400 0x1fc>,
- <0x88eb800 0x218>,
- <0x88eb600 0x70>;
- #clock-cells = <0>;
- #phy-cells = <0>;
- clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
- clock-names = "pipe0";
- clock-output-names = "usb3_uni_phy_pipe_clk_src";
- };
- };
-
- phy@1d87000 {
- compatible = "qcom,sdm845-qmp-ufs-phy";
- reg = <0x1d87000 0x18c>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- clock-names = "ref",
- "ref_aux";
- clocks = <&gcc GCC_UFS_MEM_CLKREF_CLK>,
- <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
-
- lanes@1d87400 {
- reg = <0x1d87400 0x108>,
- <0x1d87600 0x1e0>,
- <0x1d87c00 0x1dc>,
- <0x1d87800 0x108>,
- <0x1d87a00 0x1e0>;
- #phy-cells = <0>;
- };
- };
diff --git a/Documentation/devicetree/bindings/phy/qcom-usb-ipq4019-phy.yaml b/Documentation/devicetree/bindings/phy/qcom-usb-ipq4019-phy.yaml
new file mode 100644
index 000000000000..1118fe69b611
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom-usb-ipq4019-phy.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/qcom-usb-ipq4019-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcom IPQ40xx Dakota HS/SS USB PHY
+
+maintainers:
+ - Robert Marko <robert.marko@sartura.hr>
+
+properties:
+ compatible:
+ enum:
+ - qcom,usb-ss-ipq4019-phy
+ - qcom,usb-hs-ipq4019-phy
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 2
+
+ reset-names:
+ items:
+ - const: por_rst
+ - const: srif_rst
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - resets
+ - reset-names
+ - "#phy-cells"
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-ipq4019.h>
+
+ hsphy@a8000 {
+ #phy-cells = <0>;
+ compatible = "qcom,usb-hs-ipq4019-phy";
+ reg = <0xa8000 0x40>;
+ resets = <&gcc USB2_HSPHY_POR_ARES>,
+ <&gcc USB2_HSPHY_S_ARES>;
+ reset-names = "por_rst", "srif_rst";
+ };
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt b/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
index ac96d6481bb8..a3bd1c4499b7 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
@@ -4,7 +4,8 @@ This file provides information on what the device node for the R-Car generation
2 USB PHY contains.
Required properties:
-- compatible: "renesas,usb-phy-r8a7743" if the device is a part of R8A7743 SoC.
+- compatible: "renesas,usb-phy-r8a7742" if the device is a part of R8A7742 SoC.
+ "renesas,usb-phy-r8a7743" if the device is a part of R8A7743 SoC.
"renesas,usb-phy-r8a7744" if the device is a part of R8A7744 SoC.
"renesas,usb-phy-r8a7745" if the device is a part of R8A7745 SoC.
"renesas,usb-phy-r8a77470" if the device is a part of R8A77470 SoC.
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
deleted file mode 100644
index 7734b219d9aa..000000000000
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-* Renesas R-Car generation 3 USB 2.0 PHY
-
-This file provides information on what the device node for the R-Car generation
-3, RZ/G1C, RZ/G2 and RZ/A2 USB 2.0 PHY contain.
-
-Required properties:
-- compatible: "renesas,usb2-phy-r7s9210" if the device is a part of an R7S9210
- SoC.
- "renesas,usb2-phy-r8a77470" if the device is a part of an R8A77470
- SoC.
- "renesas,usb2-phy-r8a774a1" if the device is a part of an R8A774A1
- SoC.
- "renesas,usb2-phy-r8a774b1" if the device is a part of an R8A774B1
- SoC.
- "renesas,usb2-phy-r8a774c0" if the device is a part of an R8A774C0
- SoC.
- "renesas,usb2-phy-r8a7795" if the device is a part of an R8A7795
- SoC.
- "renesas,usb2-phy-r8a7796" if the device is a part of an R8A7796
- SoC.
- "renesas,usb2-phy-r8a77965" if the device is a part of an
- R8A77965 SoC.
- "renesas,usb2-phy-r8a77990" if the device is a part of an
- R8A77990 SoC.
- "renesas,usb2-phy-r8a77995" if the device is a part of an
- R8A77995 SoC.
- "renesas,rcar-gen3-usb2-phy" for a generic R-Car Gen3, RZ/G2 or
- RZ/A2 compatible device.
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first
- followed by the generic version.
-
-- reg: offset and length of the partial USB 2.0 Host register block.
-- clocks: clock phandle and specifier pair(s).
-- #phy-cells: see phy-bindings.txt in the same directory, must be <1> (and
- using <0> is deprecated).
-
-The phandle's argument in the PHY specifier is the INT_STATUS bit of controller:
-- 1 = USBH_INTA (OHCI)
-- 2 = USBH_INTB (EHCI)
-- 3 = UCOM_INT (OTG and BC)
-
-Optional properties:
-To use a USB channel where USB 2.0 Host and HSUSB (USB 2.0 Peripheral) are
-combined, the device tree node should set interrupt properties to use the
-channel as USB OTG:
-- interrupts: interrupt specifier for the PHY.
-- vbus-supply: Phandle to a regulator that provides power to the VBUS. This
- regulator will be managed during the PHY power on/off sequence.
-- renesas,no-otg-pins: boolean, specify when a board does not provide proper
- otg pins.
-- dr_mode: string, indicates the working mode for the PHY. Can be "host",
- "peripheral", or "otg". Should be set if otg controller is not used.
-
-
-Example (R-Car H3):
-
- usb-phy@ee080200 {
- compatible = "renesas,usb2-phy-r8a7795", "renesas,rcar-gen3-usb2-phy";
- reg = <0 0xee080200 0 0x700>;
- interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 703>;
- };
-
- usb-phy@ee0a0200 {
- compatible = "renesas,usb2-phy-r8a7795", "renesas,rcar-gen3-usb2-phy";
- reg = <0 0xee0a0200 0 0x700>;
- clocks = <&cpg CPG_MOD 702>;
- };
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
deleted file mode 100644
index 0fe433b9a592..000000000000
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-* Renesas R-Car generation 3 USB 3.0 PHY
-
-This file provides information on what the device node for the R-Car generation
-3 and RZ/G2 USB 3.0 PHY contain.
-If you want to enable spread spectrum clock (ssc), you should use USB_EXTAL
-instead of USB3_CLK. However, if you don't want to these features, you don't
-need this driver.
-
-Required properties:
-- compatible: "renesas,r8a774a1-usb3-phy" if the device is a part of an R8A774A1
- SoC.
- "renesas,r8a774b1-usb3-phy" if the device is a part of an R8A774B1
- SoC.
- "renesas,r8a7795-usb3-phy" if the device is a part of an R8A7795
- SoC.
- "renesas,r8a7796-usb3-phy" if the device is a part of an R8A7796
- SoC.
- "renesas,r8a77965-usb3-phy" if the device is a part of an
- R8A77965 SoC.
- "renesas,rcar-gen3-usb3-phy" for a generic R-Car Gen3 or RZ/G2
- compatible device.
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first
- followed by the generic version.
-
-- reg: offset and length of the USB 3.0 PHY register block.
-- clocks: A list of phandles and clock-specifier pairs.
-- clock-names: Name of the clocks.
- - The funcional clock must be "usb3-if".
- - The usb3's external clock must be "usb3s_clk".
- - The usb2's external clock must be "usb_extal". If you want to use the ssc,
- the clock-frequency must not be 0.
-- #phy-cells: see phy-bindings.txt in the same directory, must be <0>.
-
-Optional properties:
-- renesas,ssc-range: Enable/disable spread spectrum clock (ssc) by using
- the following values as u32:
- - 0 (or the property doesn't exist): disable the ssc
- - 4980: enable the ssc as -4980 ppm
- - 4492: enable the ssc as -4492 ppm
- - 4003: enable the ssc as -4003 ppm
-
-Example (R-Car H3):
-
- usb-phy@e65ee000 {
- compatible = "renesas,r8a7795-usb3-phy",
- "renesas,rcar-gen3-usb3-phy";
- reg = <0 0xe65ee000 0 0x90>;
- clocks = <&cpg CPG_MOD 328>, <&usb3s0_clk>, <&usb_extal>;
- clock-names = "usb3-if", "usb3s_clk", "usb_extal";
- };
diff --git a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
new file mode 100644
index 000000000000..440f09fddf93
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/renesas,usb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car generation 3 USB 2.0 PHY
+
+maintainers:
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: renesas,usb2-phy-r8a77470 # RZ/G1C
+
+ - items:
+ - enum:
+ - renesas,usb2-phy-r7s9210 # RZ/A2
+ - renesas,usb2-phy-r8a774a1 # RZ/G2M
+ - renesas,usb2-phy-r8a774b1 # RZ/G2N
+ - renesas,usb2-phy-r8a774c0 # RZ/G2E
+ - renesas,usb2-phy-r8a7795 # R-Car H3
+ - renesas,usb2-phy-r8a7796 # R-Car M3-W
+ - renesas,usb2-phy-r8a77961 # R-Car M3-W+
+ - renesas,usb2-phy-r8a77965 # R-Car M3-N
+ - renesas,usb2-phy-r8a77990 # R-Car E3
+ - renesas,usb2-phy-r8a77995 # R-Car D3
+ - const: renesas,rcar-gen3-usb2-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ minItems: 1
+ maxItems: 2
+ items:
+ - const: fck
+ - const: usb_x1
+
+ '#phy-cells':
+ enum: [0, 1] # and 0 is deprecated.
+ description: |
+ The phandle's argument in the PHY specifier is the INT_STATUS bit of
+ controller.
+ - 1 = USBH_INTA (OHCI)
+ - 2 = USBH_INTB (EHCI)
+ - 3 = UCOM_INT (OTG and BC)
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ minItems: 1
+ maxItems: 2
+ items:
+ - description: reset of USB 2.0 host side
+ - description: reset of USB 2.0 peripheral side
+
+ vbus-supply:
+ description: |
+ Phandle to a regulator that provides power to the VBUS. This regulator
+ will be managed during the PHY power on/off sequence.
+
+ renesas,no-otg-pins:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: |
+ specify when a board does not provide proper otg pins.
+
+ dr_mode: true
+
+if:
+ properties:
+ compatible:
+ items:
+ enum:
+ - renesas,usb2-phy-r7s9210
+then:
+ required:
+ - clock-names
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7795-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7795-sysc.h>
+
+ usb-phy@ee080200 {
+ compatible = "renesas,usb2-phy-r8a7795", "renesas,rcar-gen3-usb2-phy";
+ reg = <0xee080200 0x700>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>;
+ #phy-cells = <1>;
+ };
+
+ usb-phy@ee0a0200 {
+ compatible = "renesas,usb2-phy-r8a7795", "renesas,rcar-gen3-usb2-phy";
+ reg = <0xee0a0200 0x700>;
+ clocks = <&cpg CPG_MOD 702>;
+ #phy-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/renesas,usb3-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb3-phy.yaml
new file mode 100644
index 000000000000..f459eaf55278
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/renesas,usb3-phy.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/renesas,usb3-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car generation 3 USB 3.0 PHY
+
+maintainers:
+ - Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r8a774a1-usb3-phy # RZ/G2M
+ - renesas,r8a774b1-usb3-phy # RZ/G2N
+ - renesas,r8a7795-usb3-phy # R-Car H3
+ - renesas,r8a7796-usb3-phy # R-Car M3-W
+ - renesas,r8a77961-usb3-phy # R-Car M3-W+
+ - renesas,r8a77965-usb3-phy # R-Car M3-N
+ - const: renesas,rcar-gen3-usb3-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+ maxItems: 3
+
+ clock-names:
+ # If you want to use the ssc, the clock-frequency of usb_extal
+ # must not be 0.
+ minItems: 2
+ maxItems: 3
+ items:
+ - const: usb3-if # The funcional clock
+ - const: usb3s_clk # The usb3's external clock
+ - const: usb_extal # The usb2's external clock
+
+ '#phy-cells':
+ # see phy-bindings.txt in the same directory
+ const: 0
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ renesas,ssc-range:
+ description: |
+ Enable/disable spread spectrum clock (ssc). 0 or the property doesn't
+ exist means disabling the ssc. The actual value will be -<value> ppm.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 0, 4003, 4492, 4980 ]
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7795-cpg-mssr.h>
+ #include <dt-bindings/power/r8a7795-sysc.h>
+
+ usb-phy@e65ee000 {
+ compatible = "renesas,r8a7795-usb3-phy", "renesas,rcar-gen3-usb3-phy";
+ reg = <0xe65ee000 0x90>;
+ clocks = <&cpg CPG_MOD 328>, <&usb3s0_clk>, <&usb_extal>;
+ clock-names = "usb3-if", "usb3s_clk", "usb_extal";
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml b/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
index 72aca81e8959..8a3032a3bd73 100644
--- a/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
+++ b/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
@@ -59,7 +59,7 @@ examples:
- |
dsi_dphy: phy@ff2e0000 {
compatible = "rockchip,px30-dsi-dphy";
- reg = <0x0 0xff2e0000 0x0 0x10000>;
+ reg = <0xff2e0000 0x10000>;
clocks = <&pmucru 13>, <&cru 12>;
clock-names = "ref", "pclk";
resets = <&cru 12>;
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
new file mode 100644
index 000000000000..86f49093b65f
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/socionext,uniphier-pcie-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier PCIe PHY
+
+description: |
+ This describes the devicetree bindings for PHY interface built into
+ PCIe controller implemented on Socionext UniPhier SoCs.
+
+maintainers:
+ - Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+properties:
+ compatible:
+ enum:
+ - socionext,uniphier-pro5-pcie-phy
+ - socionext,uniphier-ld20-pcie-phy
+ - socionext,uniphier-pxs3-pcie-phy
+
+ reg:
+ description: PHY register region (offset and length)
+
+ "#phy-cells":
+ const: 0
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ oneOf:
+ - items: # for Pro5
+ - const: gio
+ - const: link
+ - const: link # for others
+
+ resets:
+ minItems: 1
+ maxItems: 2
+
+ reset-names:
+ oneOf:
+ - items: # for Pro5
+ - const: gio
+ - const: link
+ - const: link # for others
+
+ socionext,syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: A phandle to system control to set configurations for phy
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+
+additionalProperties: false
+
+examples:
+ - |
+ pcie_phy: phy@66038000 {
+ compatible = "socionext,uniphier-ld20-pcie-phy";
+ reg = <0x66038000 0x4000>;
+ #phy-cells = <0>;
+ clock-names = "link";
+ clocks = <&sys_clk 24>;
+ reset-names = "link";
+ resets = <&sys_rst 24>;
+ socionext,syscon = <&soc_glue>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb2-phy.yaml
new file mode 100644
index 000000000000..479b203f7aa6
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb2-phy.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/socionext,uniphier-usb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier USB2 PHY
+
+description: |
+ This describes the devicetree bindings for PHY interface built into
+ USB2 controller implemented on Socionext UniPhier SoCs.
+ Pro4 SoC has both USB2 and USB3 host controllers, however, this USB3
+ controller doesn't include its own High-Speed PHY. This needs to specify
+ USB2 PHY instead of USB3 HS-PHY.
+
+maintainers:
+ - Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+properties:
+ compatible:
+ enum:
+ - socionext,uniphier-pro4-usb2-phy
+ - socionext,uniphier-ld11-usb2-phy
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^phy@[0-9]+$":
+ type: object
+ additionalProperties: false
+
+ properties:
+ reg:
+ minimum: 0
+ maximum: 3
+ description:
+ The ID number for the PHY
+
+ "#phy-cells":
+ const: 0
+
+ required:
+ - reg
+ - "#phy-cells"
+
+required:
+ - compatible
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ // The UniPhier usb2-phy should be a subnode of a "syscon" compatible node.
+
+ soc-glue@5f800000 {
+ compatible = "socionext,uniphier-ld11-soc-glue", "simple-mfd", "syscon";
+ reg = <0x5f800000 0x2000>;
+
+ usb-controller {
+ compatible = "socionext,uniphier-ld11-usb2-phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usb_phy0: phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ usb_phy1: phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+
+ usb_phy2: phy@2 {
+ reg = <2>;
+ #phy-cells = <0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
new file mode 100644
index 000000000000..f88d36207b87
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/socionext,uniphier-usb3hs-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier USB3 High-Speed (HS) PHY
+
+description: |
+ This describes the devicetree bindings for PHY interfaces built into
+ USB3 controller implemented on Socionext UniPhier SoCs.
+ Although the controller includes High-Speed PHY and Super-Speed PHY,
+ this describes about High-Speed PHY.
+
+maintainers:
+ - Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+properties:
+ compatible:
+ enum:
+ - socionext,uniphier-pro5-usb3-hsphy
+ - socionext,uniphier-pxs2-usb3-hsphy
+ - socionext,uniphier-ld20-usb3-hsphy
+ - socionext,uniphier-pxs3-usb3-hsphy
+
+ reg:
+ description: PHY register region (offset and length)
+
+ "#phy-cells":
+ const: 0
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ oneOf:
+ - const: link # for PXs2
+ - items: # for PXs3
+ - const: link
+ - const: phy
+
+ resets:
+ maxItems: 2
+
+ reset-names:
+ items:
+ - const: link
+ - const: phy
+
+ vbus-supply:
+ description: A phandle to the regulator for USB VBUS
+
+ nvmem-cells:
+ maxItems: 3
+ description:
+ Phandles to nvmem cell that contains the trimming data.
+ Available only for HS-PHY implemented on LD20 and PXs3, and
+ if unspecified, default value is used.
+
+ nvmem-cell-names:
+ items:
+ - const: rterm
+ - const: sel_t
+ - const: hs_i
+ description:
+ Should be the following names, which correspond to each nvmem-cells.
+ All of the 3 parameters associated with the above names are
+ required for each port, if any one is omitted, the trimming data
+ of the port will not be set at all.
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-glue@65b00000 {
+ compatible = "socionext,uniphier-ld20-dwc3-glue", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x65b00000 0x400>;
+
+ usb_hsphy0: hs-phy@200 {
+ compatible = "socionext,uniphier-ld20-usb3-hsphy";
+ reg = <0x200 0x10>;
+ #phy-cells = <0>;
+ clock-names = "link", "phy";
+ clocks = <&sys_clk 14>, <&sys_clk 16>;
+ reset-names = "link", "phy";
+ resets = <&sys_rst 14>, <&sys_rst 16>;
+ vbus-supply = <&usb_vbus0>;
+ nvmem-cell-names = "rterm", "sel_t", "hs_i";
+ nvmem-cells = <&usb_rterm0>, <&usb_sel_t0>, <&usb_hs_i0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
new file mode 100644
index 000000000000..edff2c95c9ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/socionext,uniphier-usb3ss-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier USB3 Super-Speed (SS) PHY
+
+description: |
+ This describes the devicetree bindings for PHY interfaces built into
+ USB3 controller implemented on Socionext UniPhier SoCs.
+ Although the controller includes High-Speed PHY and Super-Speed PHY,
+ this describes about Super-Speed PHY.
+
+maintainers:
+ - Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+properties:
+ compatible:
+ enum:
+ - socionext,uniphier-pro4-usb3-ssphy
+ - socionext,uniphier-pro5-usb3-ssphy
+ - socionext,uniphier-pxs2-usb3-ssphy
+ - socionext,uniphier-ld20-usb3-ssphy
+ - socionext,uniphier-pxs3-usb3-ssphy
+
+ reg:
+ description: PHY register region (offset and length)
+
+ "#phy-cells":
+ const: 0
+
+ clocks:
+ minItems: 2
+ maxItems: 3
+
+ clock-names:
+ oneOf:
+ - items: # for Pro4, Pro5
+ - const: gio
+ - const: link
+ - items: # for PXs3 with phy-ext
+ - const: link
+ - const: phy
+ - const: phy-ext
+ - items: # for others
+ - const: link
+ - const: phy
+
+ resets:
+ maxItems: 2
+
+ reset-names:
+ oneOf:
+ - items: # for Pro4,Pro5
+ - const: gio
+ - const: link
+ - items: # for others
+ - const: link
+ - const: phy
+
+ vbus-supply:
+ description: A phandle to the regulator for USB VBUS
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - vbus-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-glue@65b00000 {
+ compatible = "socionext,uniphier-ld20-dwc3-glue",
+ "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x65b00000 0x400>;
+
+ usb_ssphy0: ss-phy@300 {
+ compatible = "socionext,uniphier-ld20-usb3-ssphy";
+ reg = <0x300 0x10>;
+ #phy-cells = <0>;
+ clock-names = "link", "phy";
+ clocks = <&sys_clk 14>, <&sys_clk 16>;
+ reset-names = "link", "phy";
+ resets = <&sys_rst 14>, <&sys_rst 16>;
+ vbus-supply = <&usb_vbus0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt b/Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt
deleted file mode 100644
index 3cee372c5742..000000000000
--- a/Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Socionext UniPhier PCIe PHY bindings
-
-This describes the devicetree bindings for PHY interface built into
-PCIe controller implemented on Socionext UniPhier SoCs.
-
-Required properties:
-- compatible: Should contain one of the following:
- "socionext,uniphier-pro5-pcie-phy" - for Pro5 PHY
- "socionext,uniphier-ld20-pcie-phy" - for LD20 PHY
- "socionext,uniphier-pxs3-pcie-phy" - for PXs3 PHY
-- reg: Specifies offset and length of the register set for the device.
-- #phy-cells: Must be zero.
-- clocks: A list of phandles to the clock gate for PCIe glue layer
- including this phy.
-- clock-names: For Pro5 only, should contain the following:
- "gio", "link" - for Pro5 SoC
-- resets: A list of phandles to the reset line for PCIe glue layer
- including this phy.
-- reset-names: For Pro5 only, should contain the following:
- "gio", "link" - for Pro5 SoC
-
-Optional properties:
-- socionext,syscon: A phandle to system control to set configurations
- for phy.
-
-Refer to phy/phy-bindings.txt for the generic PHY binding properties.
-
-Example:
- pcie_phy: phy@66038000 {
- compatible = "socionext,uniphier-ld20-pcie-phy";
- reg = <0x66038000 0x4000>;
- #phy-cells = <0>;
- clocks = <&sys_clk 24>;
- resets = <&sys_rst 24>;
- socionext,syscon = <&soc_glue>;
- };
diff --git a/Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt b/Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt
deleted file mode 100644
index b43b28250cc0..000000000000
--- a/Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Socionext UniPhier USB2 PHY
-
-This describes the devicetree bindings for PHY interface built into
-USB2 controller implemented on Socionext UniPhier SoCs.
-
-Pro4 SoC has both USB2 and USB3 host controllers, however, this USB3
-controller doesn't include its own High-Speed PHY. This needs to specify
-USB2 PHY instead of USB3 HS-PHY.
-
-Required properties:
-- compatible: Should contain one of the following:
- "socionext,uniphier-pro4-usb2-phy" - for Pro4 SoC
- "socionext,uniphier-ld11-usb2-phy" - for LD11 SoC
-
-Sub-nodes:
-Each PHY should be represented as a sub-node.
-
-Sub-nodes required properties:
-- #phy-cells: Should be 0.
-- reg: The number of the PHY.
-
-Sub-nodes optional properties:
-- vbus-supply: A phandle to the regulator for USB VBUS.
-
-Refer to phy/phy-bindings.txt for the generic PHY binding properties.
-
-Example:
- soc-glue@5f800000 {
- ...
- usb-phy {
- compatible = "socionext,uniphier-ld11-usb2-phy";
- usb_phy0: phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
- ...
- };
- };
-
- usb@5a800100 {
- compatible = "socionext,uniphier-ehci", "generic-ehci";
- ...
- phy-names = "usb";
- phys = <&usb_phy0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt b/Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt
deleted file mode 100644
index 093d4f08705f..000000000000
--- a/Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-Socionext UniPhier USB3 High-Speed (HS) PHY
-
-This describes the devicetree bindings for PHY interfaces built into
-USB3 controller implemented on Socionext UniPhier SoCs.
-Although the controller includes High-Speed PHY and Super-Speed PHY,
-this describes about High-Speed PHY.
-
-Required properties:
-- compatible: Should contain one of the following:
- "socionext,uniphier-pro5-usb3-hsphy" - for Pro5 SoC
- "socionext,uniphier-pxs2-usb3-hsphy" - for PXs2 SoC
- "socionext,uniphier-ld20-usb3-hsphy" - for LD20 SoC
- "socionext,uniphier-pxs3-usb3-hsphy" - for PXs3 SoC
-- reg: Specifies offset and length of the register set for the device.
-- #phy-cells: Should be 0.
-- clocks: A list of phandles to the clock gate for USB3 glue layer.
- According to the clock-names, appropriate clocks are required.
-- clock-names: Should contain the following:
- "gio", "link" - for Pro5 SoC
- "phy", "phy-ext", "link" - for PXs3 SoC, "phy-ext" is optional.
- "phy", "link" - for others
-- resets: A list of phandles to the reset control for USB3 glue layer.
- According to the reset-names, appropriate resets are required.
-- reset-names: Should contain the following:
- "gio", "link" - for Pro5 SoC
- "phy", "link" - for others
-
-Optional properties:
-- vbus-supply: A phandle to the regulator for USB VBUS.
-- nvmem-cells: Phandles to nvmem cell that contains the trimming data.
- Available only for HS-PHY implemented on LD20 and PXs3, and
- if unspecified, default value is used.
-- nvmem-cell-names: Should be the following names, which correspond to
- each nvmem-cells.
- All of the 3 parameters associated with the following names are
- required for each port, if any one is omitted, the trimming data
- of the port will not be set at all.
- "rterm", "sel_t", "hs_i" - Each cell name for phy parameters
-
-Refer to phy/phy-bindings.txt for the generic PHY binding properties.
-
-Example:
-
- usb-glue@65b00000 {
- compatible = "socionext,uniphier-ld20-dwc3-glue",
- "simple-mfd";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x65b00000 0x400>;
-
- usb_vbus0: regulator {
- ...
- };
-
- usb_hsphy0: hs-phy@200 {
- compatible = "socionext,uniphier-ld20-usb3-hsphy";
- reg = <0x200 0x10>;
- #phy-cells = <0>;
- clock-names = "link", "phy";
- clocks = <&sys_clk 14>, <&sys_clk 16>;
- reset-names = "link", "phy";
- resets = <&sys_rst 14>, <&sys_rst 16>;
- vbus-supply = <&usb_vbus0>;
- nvmem-cell-names = "rterm", "sel_t", "hs_i";
- nvmem-cells = <&usb_rterm0>, <&usb_sel_t0>,
- <&usb_hs_i0>;
- };
- ...
- };
diff --git a/Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt b/Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt
deleted file mode 100644
index 9df2bc2f5999..000000000000
--- a/Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-Socionext UniPhier USB3 Super-Speed (SS) PHY
-
-This describes the devicetree bindings for PHY interfaces built into
-USB3 controller implemented on Socionext UniPhier SoCs.
-Although the controller includes High-Speed PHY and Super-Speed PHY,
-this describes about Super-Speed PHY.
-
-Required properties:
-- compatible: Should contain one of the following:
- "socionext,uniphier-pro4-usb3-ssphy" - for Pro4 SoC
- "socionext,uniphier-pro5-usb3-ssphy" - for Pro5 SoC
- "socionext,uniphier-pxs2-usb3-ssphy" - for PXs2 SoC
- "socionext,uniphier-ld20-usb3-ssphy" - for LD20 SoC
- "socionext,uniphier-pxs3-usb3-ssphy" - for PXs3 SoC
-- reg: Specifies offset and length of the register set for the device.
-- #phy-cells: Should be 0.
-- clocks: A list of phandles to the clock gate for USB3 glue layer.
- According to the clock-names, appropriate clocks are required.
-- clock-names:
- "gio", "link" - for Pro4 and Pro5 SoC
- "phy", "phy-ext", "link" - for PXs3 SoC, "phy-ext" is optional.
- "phy", "link" - for others
-- resets: A list of phandles to the reset control for USB3 glue layer.
- According to the reset-names, appropriate resets are required.
-- reset-names:
- "gio", "link" - for Pro4 and Pro5 SoC
- "phy", "link" - for others
-
-Optional properties:
-- vbus-supply: A phandle to the regulator for USB VBUS.
-
-Refer to phy/phy-bindings.txt for the generic PHY binding properties.
-
-Example:
-
- usb-glue@65b00000 {
- compatible = "socionext,uniphier-ld20-dwc3-glue",
- "simple-mfd";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x65b00000 0x400>;
-
- usb_vbus0: regulator {
- ...
- };
-
- usb_ssphy0: ss-phy@300 {
- compatible = "socionext,uniphier-ld20-usb3-ssphy";
- reg = <0x300 0x10>;
- #phy-cells = <0>;
- clock-names = "link", "phy";
- clocks = <&sys_clk 14>, <&sys_clk 16>;
- reset-names = "link", "phy";
- resets = <&sys_rst 14>, <&sys_rst 16>;
- vbus-supply = <&usb_vbus0>;
- };
- ...
- };
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
index bfefd09d8c1e..7556be6e2754 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
@@ -84,13 +84,12 @@ properties:
gpio-line-names: true
input-debounce:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 1
- maxItems: 5
description:
Debouncing periods in microseconds, one period per interrupt
bank found in the controller
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 5
patternProperties:
# It's pretty scary, but the basic idea is that:
@@ -115,9 +114,8 @@ patternProperties:
bias-pull-down: true
drive-strength:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 10, 20, 30, 40 ]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [10, 20, 30, 40]
required:
- pins
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
index 7651a675ab2d..017d9593573b 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
@@ -33,26 +33,23 @@ patternProperties:
then:
patternProperties:
"^function|groups$":
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/string"
- - enum: [ ACPI, ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14,
- ADC15, ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT,
- DDCCLK, DDCDAT, EXTRST, FLACK, FLBUSY, FLWP, GPID, GPID0, GPID2,
- GPID4, GPID6, GPIE0, GPIE2, GPIE4, GPIE6, I2C10, I2C11, I2C12,
- I2C13, I2C14, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, LPCPD,
- LPCPME, LPCRST, LPCSMI, MAC1LINK, MAC2LINK, MDIO1, MDIO2, NCTS1,
- NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2,
- NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NDTS4, NRI1, NRI2,
- NRI3, NRI4, NRTS1, NRTS2, NRTS3, OSCCLK, PWM0, PWM1, PWM2, PWM3,
- PWM4, PWM5, PWM6, PWM7, RGMII1, RGMII2, RMII1, RMII2, ROM16,
- ROM8, ROMCS1, ROMCS2, ROMCS3, ROMCS4, RXD1, RXD2, RXD3, RXD4,
- SALT1, SALT2, SALT3, SALT4, SD1, SD2, SGPMCK, SGPMI, SGPMLD,
- SGPMO, SGPSCK, SGPSI0, SGPSI1, SGPSLD, SIOONCTRL, SIOPBI, SIOPBO,
- SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1DEBUG,
- SPI1PASSTHRU, SPICS1, TIMER3, TIMER4, TIMER5, TIMER6, TIMER7,
- TIMER8, TXD1, TXD2, TXD3, TXD4, UART6, USB11D1, USB11H2, USB2D1,
- USB2H1, USBCKI, VGABIOS_ROM, VGAHS, VGAVS, VPI18, VPI24, VPI30,
- VPO12, VPO24, WDTRST1, WDTRST2 ]
+ $ref: "/schemas/types.yaml#/definitions/string"
+ enum: [ACPI, ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
+ ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, DDCCLK, DDCDAT,
+ EXTRST, FLACK, FLBUSY, FLWP, GPID, GPID0, GPID2, GPID4, GPID6, GPIE0,
+ GPIE2, GPIE4, GPIE6, I2C10, I2C11, I2C12, I2C13, I2C14, I2C3, I2C4,
+ I2C5, I2C6, I2C7, I2C8, I2C9, LPCPD, LPCPME, LPCRST, LPCSMI, MAC1LINK,
+ MAC2LINK, MDIO1, MDIO2, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2,
+ NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4,
+ NDTS4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, OSCCLK, PWM0,
+ PWM1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, RGMII1, RGMII2, RMII1,
+ RMII2, ROM16, ROM8, ROMCS1, ROMCS2, ROMCS3, ROMCS4, RXD1, RXD2, RXD3,
+ RXD4, SALT1, SALT2, SALT3, SALT4, SD1, SD2, SGPMCK, SGPMI, SGPMLD,
+ SGPMO, SGPSCK, SGPSI0, SGPSI1, SGPSLD, SIOONCTRL, SIOPBI, SIOPBO,
+ SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1DEBUG, SPI1PASSTHRU,
+ SPICS1, TIMER3, TIMER4, TIMER5, TIMER6, TIMER7, TIMER8, TXD1, TXD2,
+ TXD3, TXD4, UART6, USB11D1, USB11H2, USB2D1, USB2H1, USBCKI, VGABIOS_ROM,
+ VGAHS, VGAVS, VPI18, VPI24, VPI30, VPO12, VPO24, WDTRST1, WDTRST2]
required:
- compatible
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
index 36feaf5e2dff..c643d6d44415 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
@@ -29,8 +29,7 @@ properties:
aspeed,external-nodes:
minItems: 2
maxItems: 2
- allOf:
- - $ref: /schemas/types.yaml#/definitions/phandle-array
+ $ref: /schemas/types.yaml#/definitions/phandle-array
description: |
A cell of phandles to external controller nodes:
0: compatible with "aspeed,ast2500-gfx", "syscon"
@@ -43,28 +42,25 @@ patternProperties:
then:
patternProperties:
"^function|groups$":
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/string"
- - enum: [ ACPI, ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14,
- ADC15, ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT,
- DDCCLK, DDCDAT, ESPI, FWSPICS1, FWSPICS2, GPID0, GPID2, GPID4,
- GPID6, GPIE0, GPIE2, GPIE4, GPIE6, I2C10, I2C11, I2C12, I2C13,
- I2C14, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, LAD0, LAD1,
- LAD2, LAD3, LCLK, LFRAME, LPCHC, LPCPD, LPCPLUS, LPCPME, LPCRST,
- LPCSMI, LSIRQ, MAC1LINK, MAC2LINK, MDIO1, MDIO2, NCTS1, NCTS2,
- NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3,
- NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1,
- NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE, PNOR, PWM0, PWM1, PWM2,
- PWM3, PWM4, PWM5, PWM6, PWM7, RGMII1, RGMII2, RMII1, RMII2, RXD1,
- RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12, SALT13, SALT14,
- SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8, SALT9, SCL1,
- SCL2, SD1, SD2, SDA1, SDA2, SGPS1, SGPS2, SIOONCTRL, SIOPBI,
- SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1CS1,
- SPI1DEBUG, SPI1PASSTHRU, SPI2CK, SPI2CS0, SPI2CS1, SPI2MISO,
- SPI2MOSI, TIMER3, TIMER4, TIMER5, TIMER6, TIMER7, TIMER8, TXD1,
- TXD2, TXD3, TXD4, UART6, USB11BHID, USB2AD, USB2AH, USB2BD,
- USB2BH, USBCKI, VGABIOSROM, VGAHS, VGAVS, VPI24, VPO, WDTRST1,
- WDTRST2, ]
+ $ref: "/schemas/types.yaml#/definitions/string"
+ enum: [ACPI, ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
+ ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, DDCCLK, DDCDAT,
+ ESPI, FWSPICS1, FWSPICS2, GPID0, GPID2, GPID4, GPID6, GPIE0, GPIE2,
+ GPIE4, GPIE6, I2C10, I2C11, I2C12, I2C13, I2C14, I2C3, I2C4, I2C5,
+ I2C6, I2C7, I2C8, I2C9, LAD0, LAD1, LAD2, LAD3, LCLK, LFRAME, LPCHC,
+ LPCPD, LPCPLUS, LPCPME, LPCRST, LPCSMI, LSIRQ, MAC1LINK, MAC2LINK,
+ MDIO1, MDIO2, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4,
+ NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2,
+ NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE, PNOR, PWM0,
+ PWM1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, RGMII1, RGMII2, RMII1,
+ RMII2, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12, SALT13,
+ SALT14, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8, SALT9, SCL1,
+ SCL2, SD1, SD2, SDA1, SDA2, SGPS1, SGPS2, SIOONCTRL, SIOPBI, SIOPBO,
+ SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1CS1, SPI1DEBUG,
+ SPI1PASSTHRU, SPI2CK, SPI2CS0, SPI2CS1, SPI2MISO, SPI2MOSI, TIMER3,
+ TIMER4, TIMER5, TIMER6, TIMER7, TIMER8, TXD1, TXD2, TXD3, TXD4, UART6,
+ USB11BHID, USB2AD, USB2AH, USB2BD, USB2BH, USBCKI, VGABIOSROM, VGAHS,
+ VGAVS, VPI24, VPO, WDTRST1, WDTRST2]
required:
- compatible
@@ -125,7 +121,7 @@ examples:
lhc: lhc@20 {
compatible = "aspeed,ast2500-lhc";
- reg = <0x20 0x24 0x48 0x8>;
+ reg = <0x20 0x24>, <0x48 0x8>;
};
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
index 45af29bc3202..1506726c7fea 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
@@ -30,64 +30,58 @@ patternProperties:
then:
properties:
function:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/string"
- - enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
- ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC,
- ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0,
- GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
- GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, I2C1, I2C10, I2C11,
- I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6,
- I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ,
- LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2,
- MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2,
- NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3,
- NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1,
- NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE, PWM0, PWM1, PWM10, PWM11,
- PWM12, PWM13, PWM14, PWM15, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7,
- PWM8, PWM9, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3,
- RMII4, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12,
- SALT13, SALT14, SALT15, SALT16, SALT2, SALT3, SALT4, SALT5,
- SALT6, SALT7, SALT8, SALT9, SD1, SD2, SGPM1, SGPS1, SIOONCTRL,
- SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
- SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
- TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
- TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
- THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13,
- UART6, UART7, UART8, UART9, USBAD, USBADP, USB2AH, USB2AHP,
- USB2BD, USB2BH, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
- WDTRST4, ]
+ $ref: "/schemas/types.yaml#/definitions/string"
+ enum: [ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
+ ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC, ESPI, ESPIALT,
+ FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3,
+ GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5,
+ GPIU6, GPIU7, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16,
+ I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5,
+ I3C6, JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ,
+ MACLINK1, MACLINK2, MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4,
+ NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2,
+ NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4,
+ NRTS1, NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE, PWM0, PWM1, PWM10, PWM11,
+ PWM12, PWM13, PWM14, PWM15, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, PWM8,
+ PWM9, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3, RMII4,
+ RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12, SALT13, SALT14,
+ SALT15, SALT16, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8,
+ SALT9, SD1, SD2, SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO, SIOPWREQ,
+ SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1, SPI1WP, SPI2,
+ SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11, TACH12, TACH13, TACH14,
+ TACH15, TACH2, TACH3, TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0,
+ THRU1, THRU2, THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12,
+ UART13, UART6, UART7, UART8, UART9, USBAD, USBADP, USB2AH, USB2AHP,
+ USB2BD, USB2BH, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3, WDTRST4]
+
groups:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/string"
- - enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
- ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1,
- EMMCG4, EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID,
- FWQSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5,
- GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5, GPIU6,
- GPIU7, HVI3C3, HVI3C4, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14,
- I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9,
- I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD,
- LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2, MACLINK3, MACLINK4,
- MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1,
- NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2,
- NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4,
- OSCCLK, PEWAKE, PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1,
- PWM12G0, PWM12G1, PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0,
- PWM15G1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1,
- PWM9G0, PWM9G1, QSPI1, QSPI2, RGMII1, RGMII2, RGMII3, RGMII4,
- RMII1, RMII2, RMII3, RMII4, RXD1, RXD2, RXD3, RXD4, SALT1,
- SALT10G0, SALT10G1, SALT11G0, SALT11G1, SALT12G0, SALT12G1,
- SALT13G0, SALT13G1, SALT14G0, SALT14G1, SALT15G0, SALT15G1,
- SALT16G0, SALT16G1, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7,
- SALT8, SALT9G0, SALT9G1, SD1, SD2, SD3, SGPM1, SGPS1, SIOONCTRL,
- SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
- SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
- TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
- TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
- THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12G0,
- UART12G1, UART13G0, UART13G1, UART6, UART7, UART8, UART9, USBA,
- USBB, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3, WDTRST4, ]
+ $ref: "/schemas/types.yaml#/definitions/string"
+ enum: [ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15, ADC2,
+ ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1, EMMCG4,
+ EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP,
+ GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
+ GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1, I2C10,
+ I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5,
+ I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ,
+ LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2, MACLINK3,
+ MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2, NCTS3, NCTS4,
+ NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2,
+ NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4,
+ OSCCLK, PEWAKE, PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1, PWM12G0,
+ PWM12G1, PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0, PWM15G1, PWM2,
+ PWM3, PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1, PWM9G0, PWM9G1, QSPI1,
+ QSPI2, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3, RMII4,
+ RXD1, RXD2, RXD3, RXD4, SALT1, SALT10G0, SALT10G1, SALT11G0, SALT11G1,
+ SALT12G0, SALT12G1, SALT13G0, SALT13G1, SALT14G0, SALT14G1, SALT15G0,
+ SALT15G1, SALT16G0, SALT16G1, SALT2, SALT3, SALT4, SALT5, SALT6,
+ SALT7, SALT8, SALT9G0, SALT9G1, SD1, SD2, SD3, SGPM1, SGPS1, SIOONCTRL,
+ SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR,
+ SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11,
+ TACH12, TACH13, TACH14, TACH15, TACH2, TACH3, TACH4, TACH5, TACH6,
+ TACH7, TACH8, TACH9, THRU0, THRU1, THRU2, THRU3, TXD1, TXD2, TXD3,
+ TXD4, UART10, UART11, UART12G0, UART12G1, UART13G0, UART13G1, UART6,
+ UART7, UART8, UART9, USBA, USBB, VB, VGAHS, VGAVS, WDTRST1, WDTRST2,
+ WDTRST3, WDTRST4]
required:
- compatible
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt b/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt
index 3cab7336a326..5682b2010e50 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm2835-gpio.txt
@@ -9,13 +9,16 @@ Required properties:
"brcm,bcm2835-gpio" - BCM2835 compatible pinctrl
"brcm,bcm7211-gpio" - BCM7211 compatible pinctrl
"brcm,bcm2711-gpio" - BCM2711 compatible pinctrl
+ "brcm,bcm7211-gpio" - BCM7211 compatible pinctrl
- reg: Should contain the physical address of the GPIO module's registers.
- gpio-controller: Marks the device node as a GPIO controller.
- #gpio-cells : Should be two. The first cell is the pin number and the
second cell is used to specify optional parameters:
- bit 0 specifies polarity (0 for normal, 1 for inverted)
- interrupts : The interrupt outputs from the controller. One interrupt per
- individual bank followed by the "all banks" interrupt.
+ individual bank followed by the "all banks" interrupt. For BCM7211, an
+ additional set of per-bank interrupt line and an "all banks" wake-up
+ interrupt may be specified.
- interrupt-controller: Marks the device node as an interrupt controller.
- #interrupt-cells : Should be 2.
The first cell is the GPIO number.
diff --git a/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
deleted file mode 100644
index a87447180e83..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
+++ /dev/null
@@ -1,141 +0,0 @@
-Cirrus Logic Lochnagar Audio Development Board
-
-Lochnagar is an evaluation and development board for Cirrus Logic
-Smart CODEC and Amp devices. It allows the connection of most Cirrus
-Logic devices on mini-cards, as well as allowing connection of
-various application processor systems to provide a full evaluation
-platform. Audio system topology, clocking and power can all be
-controlled through the Lochnagar, allowing the device under test
-to be used in a variety of possible use cases.
-
-This binding document describes the binding for the pinctrl portion
-of the driver.
-
-Also see these documents for generic binding information:
- [1] GPIO : ../gpio/gpio.txt
- [2] Pinctrl: ../pinctrl/pinctrl-bindings.txt
-
-And these for relevant defines:
- [3] include/dt-bindings/pinctrl/lochnagar.h
-
-This binding must be part of the Lochnagar MFD binding:
- [4] ../mfd/cirrus,lochnagar.txt
-
-Required properties:
-
- - compatible : One of the following strings:
- "cirrus,lochnagar-pinctrl"
-
- - gpio-controller : Indicates this device is a GPIO controller.
- - #gpio-cells : Must be 2. The first cell is the pin number, see
- [3] for available pins and the second cell is used to specify
- optional parameters, see [1].
- - gpio-ranges : Range of pins managed by the GPIO controller, see
- [1]. Both the GPIO and Pinctrl base should be set to zero and the
- count to the appropriate of the LOCHNAGARx_PIN_NUM_GPIOS define,
- see [3].
-
- - pinctrl-names : A pinctrl state named "default" must be defined.
- - pinctrl-0 : A phandle to the default pinctrl state.
-
-Required sub-nodes:
-
-The pin configurations are defined as a child of the pinctrl states
-node, see [2]. Each sub-node can have the following properties:
- - groups : A list of groups to select (either this or "pins" must be
- specified), available groups:
- codec-aif1, codec-aif2, codec-aif3, dsp-aif1, dsp-aif2, psia1,
- psia2, gf-aif1, gf-aif2, gf-aif3, gf-aif4, spdif-aif, usb-aif1,
- usb-aif2, adat-aif, soundcard-aif
- - pins : A list of pin names to select (either this or "groups" must
- be specified), available pins:
- fpga-gpio1, fpga-gpio2, fpga-gpio3, fpga-gpio4, fpga-gpio5,
- fpga-gpio6, codec-gpio1, codec-gpio2, codec-gpio3, codec-gpio4,
- codec-gpio5, codec-gpio6, codec-gpio7, codec-gpio8, dsp-gpio1,
- dsp-gpio2, dsp-gpio3, dsp-gpio4, dsp-gpio5, dsp-gpio6, gf-gpio2,
- gf-gpio3, gf-gpio7, codec-aif1-bclk, codec-aif1-rxdat,
- codec-aif1-lrclk, codec-aif1-txdat, codec-aif2-bclk,
- codec-aif2-rxdat, codec-aif2-lrclk, codec-aif2-txdat,
- codec-aif3-bclk, codec-aif3-rxdat, codec-aif3-lrclk,
- codec-aif3-txdat, dsp-aif1-bclk, dsp-aif1-rxdat, dsp-aif1-lrclk,
- dsp-aif1-txdat, dsp-aif2-bclk, dsp-aif2-rxdat,
- dsp-aif2-lrclk, dsp-aif2-txdat, psia1-bclk, psia1-rxdat,
- psia1-lrclk, psia1-txdat, psia2-bclk, psia2-rxdat, psia2-lrclk,
- psia2-txdat, gf-aif3-bclk, gf-aif3-rxdat, gf-aif3-lrclk,
- gf-aif3-txdat, gf-aif4-bclk, gf-aif4-rxdat, gf-aif4-lrclk,
- gf-aif4-txdat, gf-aif1-bclk, gf-aif1-rxdat, gf-aif1-lrclk,
- gf-aif1-txdat, gf-aif2-bclk, gf-aif2-rxdat, gf-aif2-lrclk,
- gf-aif2-txdat, dsp-uart1-rx, dsp-uart1-tx, dsp-uart2-rx,
- dsp-uart2-tx, gf-uart2-rx, gf-uart2-tx, usb-uart-rx,
- codec-pdmclk1, codec-pdmdat1, codec-pdmclk2, codec-pdmdat2,
- codec-dmicclk1, codec-dmicdat1, codec-dmicclk2, codec-dmicdat2,
- codec-dmicclk3, codec-dmicdat3, codec-dmicclk4, codec-dmicdat4,
- dsp-dmicclk1, dsp-dmicdat1, dsp-dmicclk2, dsp-dmicdat2, i2c2-scl,
- i2c2-sda, i2c3-scl, i2c3-sda, i2c4-scl, i2c4-sda, dsp-standby,
- codec-mclk1, codec-mclk2, dsp-clkin, psia1-mclk, psia2-mclk,
- gf-gpio1, gf-gpio5, dsp-gpio20, led1, led2
- - function : The mux function to select, available functions:
- aif, fpga-gpio1, fpga-gpio2, fpga-gpio3, fpga-gpio4, fpga-gpio5,
- fpga-gpio6, codec-gpio1, codec-gpio2, codec-gpio3, codec-gpio4,
- codec-gpio5, codec-gpio6, codec-gpio7, codec-gpio8, dsp-gpio1,
- dsp-gpio2, dsp-gpio3, dsp-gpio4, dsp-gpio5, dsp-gpio6, gf-gpio2,
- gf-gpio3, gf-gpio7, gf-gpio1, gf-gpio5, dsp-gpio20, codec-clkout,
- dsp-clkout, pmic-32k, spdif-clkout, clk-12m288, clk-11m2986,
- clk-24m576, clk-22m5792, xmos-mclk, gf-clkout1, gf-mclk1,
- gf-mclk3, gf-mclk2, gf-clkout2, codec-mclk1, codec-mclk2,
- dsp-clkin, psia1-mclk, psia2-mclk, spdif-mclk, codec-irq,
- codec-reset, dsp-reset, dsp-irq, dsp-standby, codec-pdmclk1,
- codec-pdmdat1, codec-pdmclk2, codec-pdmdat2, codec-dmicclk1,
- codec-dmicdat1, codec-dmicclk2, codec-dmicdat2, codec-dmicclk3,
- codec-dmicdat3, codec-dmicclk4, codec-dmicdat4, dsp-dmicclk1,
- dsp-dmicdat1, dsp-dmicclk2, dsp-dmicdat2, dsp-uart1-rx,
- dsp-uart1-tx, dsp-uart2-rx, dsp-uart2-tx, gf-uart2-rx,
- gf-uart2-tx, usb-uart-rx, usb-uart-tx, i2c2-scl, i2c2-sda,
- i2c3-scl, i2c3-sda, i2c4-scl, i2c4-sda, spdif-aif, psia1,
- psia1-bclk, psia1-lrclk, psia1-rxdat, psia1-txdat, psia2,
- psia2-bclk, psia2-lrclk, psia2-rxdat, psia2-txdat, codec-aif1,
- codec-aif1-bclk, codec-aif1-lrclk, codec-aif1-rxdat,
- codec-aif1-txdat, codec-aif2, codec-aif2-bclk, codec-aif2-lrclk,
- codec-aif2-rxdat, codec-aif2-txdat, codec-aif3, codec-aif3-bclk,
- codec-aif3-lrclk, codec-aif3-rxdat, codec-aif3-txdat, dsp-aif1,
- dsp-aif1-bclk, dsp-aif1-lrclk, dsp-aif1-rxdat, dsp-aif1-txdat,
- dsp-aif2, dsp-aif2-bclk, dsp-aif2-lrclk, dsp-aif2-rxdat,
- dsp-aif2-txdat, gf-aif3, gf-aif3-bclk, gf-aif3-lrclk,
- gf-aif3-rxdat, gf-aif3-txdat, gf-aif4, gf-aif4-bclk,
- gf-aif4-lrclk, gf-aif4-rxdat, gf-aif4-txdat, gf-aif1,
- gf-aif1-bclk, gf-aif1-lrclk, gf-aif1-rxdat, gf-aif1-txdat,
- gf-aif2, gf-aif2-bclk, gf-aif2-lrclk, gf-aif2-rxdat,
- gf-aif2-txdat, usb-aif1, usb-aif2, adat-aif, soundcard-aif,
-
- - output-enable : Specifies that an AIF group will be used as a master
- interface (either this or input-enable is required if a group is
- being muxed to an AIF)
- - input-enable : Specifies that an AIF group will be used as a slave
- interface (either this or output-enable is required if a group is
- being muxed to an AIF)
-
-Example:
-
-lochnagar-pinctrl {
- compatible = "cirrus,lochnagar-pinctrl";
-
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&lochnagar 0 0 LOCHNAGAR2_PIN_NUM_GPIOS>;
-
- pinctrl-names = "default";
- pinctrl-0 = <&pin-settings>;
-
- pin-settings: pin-settings {
- ap-aif {
- input-enable;
- groups = "gf-aif1";
- function = "codec-aif3";
- };
- codec-aif {
- output-enable;
- groups = "codec-aif3";
- function = "gf-aif1";
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.yaml b/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.yaml
new file mode 100644
index 000000000000..420d74856032
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.yaml
@@ -0,0 +1,190 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/cirrus,lochnagar.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic Lochnagar Audio Development Board
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ Lochnagar is an evaluation and development board for Cirrus Logic
+ Smart CODEC and Amp devices. It allows the connection of most Cirrus
+ Logic devices on mini-cards, as well as allowing connection of various
+ application processor systems to provide a full evaluation platform.
+ Audio system topology, clocking and power can all be controlled through
+ the Lochnagar, allowing the device under test to be used in a variety of
+ possible use cases.
+
+ This binding document describes the binding for the pinctrl portion of
+ the driver.
+
+ Also see these documents for generic binding information:
+ [1] GPIO : ../gpio/gpio.txt
+ [2] Pinctrl: ../pinctrl/pinctrl-bindings.txt
+
+ And these for relevant defines:
+ [3] include/dt-bindings/pinctrl/lochnagar.h
+
+ This binding must be part of the Lochnagar MFD binding:
+ [4] ../mfd/cirrus,lochnagar.yaml
+
+properties:
+ compatible:
+ enum:
+ - cirrus,lochnagar-pinctrl
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ description:
+ The first cell is the pin number and the second cell is used
+ to specify optional parameters.
+ const: 2
+
+ gpio-ranges:
+ description:
+ Range of pins managed by the GPIO controller, see [1]. Both the
+ GPIO and Pinctrl base should be set to zero and the count to the
+ appropriate of the LOCHNAGARx_PIN_NUM_GPIOS define, see [3].
+ maxItems: 1
+
+ pinctrl-0:
+ description:
+ A phandle to the default pinctrl state.
+
+ pinctrl-names:
+ description:
+ A pinctrl state named "default" must be defined.
+ const: default
+
+ pin-settings:
+ type: object
+ patternProperties:
+ '-pins$':
+ description:
+ The pin configurations are defined as a child of the pinctrl
+ states node, see [2]. Each sub-node can have the following
+ properties.
+ type: object
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ properties:
+ groups:
+ description:
+ A list of groups to select (either this or "pins" must be
+ specified), available groups.
+ enum: [ codec-aif1, codec-aif2, codec-aif3, dsp-aif1,
+ dsp-aif2, psia1, psia2, gf-aif1, gf-aif2, gf-aif3,
+ gf-aif4, spdif-aif, usb-aif1, usb-aif2, adat-aif,
+ soundcard-aif ]
+
+ pins:
+ description:
+ A list of pin names to select (either this or "groups" must
+ be specified), available pins.
+ enum: [ fpga-gpio1, fpga-gpio2, fpga-gpio3, fpga-gpio4,
+ fpga-gpio5, fpga-gpio6, codec-gpio1, codec-gpio2,
+ codec-gpio3, codec-gpio4, codec-gpio5, codec-gpio6,
+ codec-gpio7, codec-gpio8, dsp-gpio1, dsp-gpio2,
+ dsp-gpio3, dsp-gpio4, dsp-gpio5, dsp-gpio6,
+ gf-gpio2, gf-gpio3, gf-gpio7, codec-aif1-bclk,
+ codec-aif1-rxdat, codec-aif1-lrclk, codec-aif1-txdat,
+ codec-aif2-bclk, codec-aif2-rxdat, codec-aif2-lrclk,
+ codec-aif2-txdat, codec-aif3-bclk, codec-aif3-rxdat,
+ codec-aif3-lrclk, codec-aif3-txdat, dsp-aif1-bclk,
+ dsp-aif1-rxdat, dsp-aif1-lrclk, dsp-aif1-txdat,
+ dsp-aif2-bclk, dsp-aif2-rxdat, dsp-aif2-lrclk,
+ dsp-aif2-txdat, psia1-bclk, psia1-rxdat, psia1-lrclk,
+ psia1-txdat, psia2-bclk, psia2-rxdat, psia2-lrclk,
+ psia2-txdat, gf-aif3-bclk, gf-aif3-rxdat,
+ gf-aif3-lrclk, gf-aif3-txdat, gf-aif4-bclk,
+ gf-aif4-rxdat, gf-aif4-lrclk, gf-aif4-txdat,
+ gf-aif1-bclk, gf-aif1-rxdat, gf-aif1-lrclk,
+ gf-aif1-txdat, gf-aif2-bclk, gf-aif2-rxdat,
+ gf-aif2-lrclk, gf-aif2-txdat, dsp-uart1-rx,
+ dsp-uart1-tx, dsp-uart2-rx, dsp-uart2-tx,
+ gf-uart2-rx, gf-uart2-tx, usb-uart-rx, codec-pdmclk1,
+ codec-pdmdat1, codec-pdmclk2, codec-pdmdat2,
+ codec-dmicclk1, codec-dmicdat1, codec-dmicclk2,
+ codec-dmicdat2, codec-dmicclk3, codec-dmicdat3,
+ codec-dmicclk4, codec-dmicdat4, dsp-dmicclk1,
+ dsp-dmicdat1, dsp-dmicclk2, dsp-dmicdat2, i2c2-scl,
+ i2c2-sda, i2c3-scl, i2c3-sda, i2c4-scl, i2c4-sda,
+ dsp-standby, codec-mclk1, codec-mclk2, dsp-clkin,
+ psia1-mclk, psia2-mclk, gf-gpio1, gf-gpio5,
+ dsp-gpio20, led1, led2 ]
+
+ function:
+ description:
+ The mux function to select, available functions.
+ enum: [ aif, fpga-gpio1, fpga-gpio2, fpga-gpio3, fpga-gpio4,
+ fpga-gpio5, fpga-gpio6, codec-gpio1, codec-gpio2,
+ codec-gpio3, codec-gpio4, codec-gpio5, codec-gpio6,
+ codec-gpio7, codec-gpio8, dsp-gpio1, dsp-gpio2,
+ dsp-gpio3, dsp-gpio4, dsp-gpio5, dsp-gpio6,
+ gf-gpio2, gf-gpio3, gf-gpio7, gf-gpio1, gf-gpio5,
+ dsp-gpio20, codec-clkout, dsp-clkout, pmic-32k,
+ spdif-clkout, clk-12m288, clk-11m2986, clk-24m576,
+ clk-22m5792, xmos-mclk, gf-clkout1, gf-mclk1,
+ gf-mclk3, gf-mclk2, gf-clkout2, codec-mclk1,
+ codec-mclk2, dsp-clkin, psia1-mclk, psia2-mclk,
+ spdif-mclk, codec-irq, codec-reset, dsp-reset,
+ dsp-irq, dsp-standby, codec-pdmclk1, codec-pdmdat1,
+ codec-pdmclk2, codec-pdmdat2, codec-dmicclk1,
+ codec-dmicdat1, codec-dmicclk2, codec-dmicdat2,
+ codec-dmicclk3, codec-dmicdat3, codec-dmicclk4,
+ codec-dmicdat4, dsp-dmicclk1, dsp-dmicdat1,
+ dsp-dmicclk2, dsp-dmicdat2, dsp-uart1-rx,
+ dsp-uart1-tx, dsp-uart2-rx, dsp-uart2-tx,
+ gf-uart2-rx, gf-uart2-tx, usb-uart-rx, usb-uart-tx,
+ i2c2-scl, i2c2-sda, i2c3-scl, i2c3-sda, i2c4-scl,
+ i2c4-sda, spdif-aif, psia1, psia1-bclk, psia1-lrclk,
+ psia1-rxdat, psia1-txdat, psia2, psia2-bclk,
+ psia2-lrclk, psia2-rxdat, psia2-txdat, codec-aif1,
+ codec-aif1-bclk, codec-aif1-lrclk, codec-aif1-rxdat,
+ codec-aif1-txdat, codec-aif2, codec-aif2-bclk,
+ codec-aif2-lrclk, codec-aif2-rxdat, codec-aif2-txdat,
+ codec-aif3, codec-aif3-bclk, codec-aif3-lrclk,
+ codec-aif3-rxdat, codec-aif3-txdat, dsp-aif1,
+ dsp-aif1-bclk, dsp-aif1-lrclk, dsp-aif1-rxdat,
+ dsp-aif1-txdat, dsp-aif2, dsp-aif2-bclk,
+ dsp-aif2-lrclk, dsp-aif2-rxdat, dsp-aif2-txdat,
+ gf-aif3, gf-aif3-bclk, gf-aif3-lrclk, gf-aif3-rxdat,
+ gf-aif3-txdat, gf-aif4, gf-aif4-bclk, gf-aif4-lrclk,
+ gf-aif4-rxdat, gf-aif4-txdat, gf-aif1, gf-aif1-bclk,
+ gf-aif1-lrclk, gf-aif1-rxdat, gf-aif1-txdat, gf-aif2,
+ gf-aif2-bclk, gf-aif2-lrclk, gf-aif2-rxdat,
+ gf-aif2-txdat, usb-aif1, usb-aif2, adat-aif,
+ soundcard-aif ]
+
+ output-enable:
+ description:
+ Specifies that an AIF group will be used as a master
+ interface (either this or input-enable is required if a
+ group is being muxed to an AIF)
+
+ input-enable:
+ description:
+ Specifies that an AIF group will be used as a slave
+ interface (either this or output-enable is required if a
+ group is being muxed to an AIF)
+
+ additionalProperties: false
+
+ required:
+ - function
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+ - pinctrl-0
+ - pinctrl-names
diff --git a/Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt
deleted file mode 100644
index b0e36cf0d289..000000000000
--- a/Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt
+++ /dev/null
@@ -1,99 +0,0 @@
-Cirrus Logic Madera class audio codecs pinctrl driver
-
-The Cirrus Logic Madera codecs provide a number of GPIO functions for
-interfacing to external hardware and to provide logic outputs to other devices.
-Certain groups of GPIO pins also have an alternate function, normally as an
-audio interface.
-
-The set of available GPIOs, functions and alternate function groups differs
-between codecs so refer to the datasheet for the codec for further information
-on what is supported on that device.
-
-The properties for this driver exist within the parent MFD driver node.
-
-See also
- the core bindings for the parent MFD driver:
- Documentation/devicetree/bindings/mfd/madera.txt
-
- the generic pinmix bindings:
- Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
-
-Required properties of parent mfd node:
- - pinctrl-names : must be "default"
- - pinctrl-0 : a phandle to the node containing the subnodes containing default
- configurations
-
-Required subnodes:
- One subnode is required to contain the default settings. It contains an
- arbitrary number of configuration subnodes, one for each group or pin
- configuration you want to apply as a default.
-
-Required properties of configuration subnodes:
- - groups : name of one pin group to configure. One of:
- aif1, aif2, aif3, aif4, mif1, mif2, mif3, pdmspk1, pdmspk2,
- dmic4, dmic5, dmic6,
- gpio1, gpio2, ..., gpio40
- The gpioN groups select the single pin of this name for configuration
-
-Optional properties of configuration subnodes:
- Any configuration option not explicitly listed in the dts will be left at
- chip default setting.
-
- - function : name of function to assign to this group. One of:
- aif1, aif2, aif3, aif4, mif1, mif2, mif3, pdmspk1, pdmspk2,
- dmic3, dmic4, dmic5, dmic6,
- io, dsp-gpio, irq1, irq2,
- fll1-clk, fll1-lock, fll2-clk, fll2-lock, fll3-clk, fll3-lock,
- fllao-clk, fllao-lock,
- opclk, opclk-async, pwm1, pwm2, spdif,
- asrc1-in1-lock, asrc1-in2-lock, asrc2-in1-lock, asrc2-in2-lock,
- spkl-short-circuit, spkr-short-circuit, spk-shutdown,
- spk-overheat-shutdown, spk-overheat-warn,
- timer1-sts, timer2-sts, timer3-sts, timer4-sts, timer5-sts, timer6-sts,
- timer7-sts, timer8-sts,
- log1-fifo-ne, log2-fifo-ne, log3-fifo-ne, log4-fifo-ne, log5-fifo-ne,
- log6-fifo-ne, log7-fifo-ne, log8-fifo-ne,
-
- - bias-disable : disable pull-up and pull-down
- - bias-bus-hold : enable buskeeper
- - bias-pull-up : output is pulled-up
- - bias-pull-down : output is pulled-down
- - drive-push-pull : CMOS output
- - drive-open-drain : open-drain output
- - drive-strength : drive strength in mA. Valid values are 4 or 8
- - input-schmitt-enable : enable schmitt-trigger mode
- - input-schmitt-disable : disable schmitt-trigger mode
- - input-debounce : A value of 0 disables debounce, a value !=0 enables
- debounce
- - output-low : set the pin to output mode with low level
- - output-high : set the pin to output mode with high level
-
-Example:
-
-cs47l85@0 {
- compatible = "cirrus,cs47l85";
-
- pinctrl-names = "default";
- pinctrl-0 = <&cs47l85_defaults>;
-
- cs47l85_defaults: cs47l85-gpio-defaults {
- aif1 {
- groups = "aif1";
- function = "aif1";
- bias-bus-hold;
- };
-
- aif2 {
- groups = "aif2";
- function = "aif2";
- bias-bus-hold;
- };
-
- opclk {
- groups = "gpio1";
- function = "opclk";
- bias-pull-up;
- drive-strength = <8>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml b/Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
new file mode 100644
index 000000000000..6bfc25d0e1b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/cirrus,madera.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic Madera class audio CODECs pinctrl driver
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ The Cirrus Logic Madera codecs provide a number of GPIO functions for
+ interfacing to external hardware and to provide logic outputs to other devices.
+ Certain groups of GPIO pins also have an alternate function, normally as an
+ audio interface.
+
+ The set of available GPIOs, functions and alternate function groups differs
+ between CODECs so refer to the datasheet for the CODEC for further information
+ on what is supported on that device.
+
+ The properties for this driver exist within the parent MFD driver node.
+
+ See also the core bindings for the parent MFD driver:
+
+ Documentation/devicetree/bindings/mfd/cirrus,madera.yaml
+
+ And the generic pinmix bindings:
+
+ Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+properties:
+ pinctrl-0:
+ description:
+ A phandle to the node containing the subnodes containing default
+ configurations.
+
+ pinctrl-names:
+ description:
+ A pinctrl state named "default" must be defined.
+ const: default
+
+ pin-settings:
+ description:
+ One subnode is required to contain the default settings. It
+ contains an arbitrary number of configuration subnodes, one for
+ each group or pin configuration you want to apply as a default.
+ type: object
+ patternProperties:
+ '-pins$':
+ type: object
+ allOf:
+ - $ref: "pincfg-node.yaml#"
+ - $ref: "pinmux-node.yaml#"
+ properties:
+ groups:
+ description:
+ Name of one pin group to configure.
+ enum: [ aif1, aif2, aif3, aif4, mif1, mif2, mif3, pdmspk1,
+ pdmspk2, dmic4, dmic5, dmic6, gpio1, gpio2, gpio3,
+ gpio4, gpio5, gpio6, gpio7, gpio7, gpio8, gpio9,
+ gpio10, gpio11, gpio12, gpio13, gpio14, gpio15,
+ gpio16, gpio17, gpio17, gpio18, gpio19, gpio20,
+ gpio21, gpio22, gpio23, gpio24, gpio25, gpio26,
+ gpio27, gpio27, gpio28, gpio29, gpio30, gpio31,
+ gpio32, gpio33, gpio34, gpio35, gpio36, gpio37,
+ gpio37, gpio38, gpio39 ]
+
+ function:
+ description:
+ Name of function to assign to this group.
+ enum: [ aif1, aif2, aif3, aif4, mif1, mif2, mif3,
+ pdmspk1, pdmspk2, dmic3, dmic4, dmic5,
+ dmic6, io, dsp-gpio, irq1, irq2, fll1-clk,
+ fll1-lock, fll2-clk, fll2-lock, fll3-clk,
+ fll3-lock, fllao-clk, fllao-lock, opclk,
+ opclk-async, pwm1, pwm2, spdif, asrc1-in1-lock,
+ asrc1-in2-lock, asrc2-in1-lock, asrc2-in2-lock,
+ spkl-short-circuit, spkr-short-circuit,
+ spk-shutdown, spk-overheat-shutdown,
+ spk-overheat-warn, timer1-sts, timer2-sts,
+ timer3-sts, timer4-sts, timer5-sts, timer6-sts,
+ timer7-sts, timer8-sts, log1-fifo-ne,
+ log2-fifo-ne, log3-fifo-ne, log4-fifo-ne,
+ log5-fifo-ne, log6-fifo-ne, log7-fifo-ne,
+ log8-fifo-ne ]
+
+ bias-disable: true
+
+ bias-bus-hold: true
+
+ bias-pull-up: true
+
+ bias-pull-down: true
+
+ drive-push-pull: true
+
+ drive-open-drain: true
+
+ drive-strength:
+ enum: [ 4, 8 ]
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ input-debounce: true
+
+ output-low: true
+
+ output-high: true
+
+ additionalProperties: false
+
+ required:
+ - groups
+
+ additionalProperties: false
+
+required:
+ - pinctrl-0
+ - pinctrl-names
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml
index d98a3866add8..6d7d162e6171 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml
@@ -37,22 +37,21 @@ patternProperties:
be found in <arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h>. The last
integer CONFIG is the pad setting value like pull-up on this pin. Please
refer to i.MX8M Mini Reference Manual for detailed CONFIG settings.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-matrix
- - items:
- items:
- - description: |
- "mux_reg" indicates the offset of mux register.
- - description: |
- "conf_reg" indicates the offset of pad configuration register.
- - description: |
- "input_reg" indicates the offset of select input register.
- - description: |
- "mux_val" indicates the mux value to be applied.
- - description: |
- "input_val" indicates the select input value to be applied.
- - description: |
- "pad_setting" indicates the pad configuration value to be applied.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: |
+ "mux_reg" indicates the offset of mux register.
+ - description: |
+ "conf_reg" indicates the offset of pad configuration register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_val" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ "pad_setting" indicates the pad configuration value to be applied.
required:
- fsl,pins
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml
index b9aa180e07e4..7131cfd1fc45 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml
@@ -37,22 +37,21 @@ patternProperties:
be found in <arch/arm64/boot/dts/freescale/imx8mn-pinfunc.h>. The last
integer CONFIG is the pad setting value like pull-up on this pin. Please
refer to i.MX8M Nano Reference Manual for detailed CONFIG settings.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-matrix
- - items:
- items:
- - description: |
- "mux_reg" indicates the offset of mux register.
- - description: |
- "conf_reg" indicates the offset of pad configuration register.
- - description: |
- "input_reg" indicates the offset of select input register.
- - description: |
- "mux_val" indicates the mux value to be applied.
- - description: |
- "input_val" indicates the select input value to be applied.
- - description: |
- "pad_setting" indicates the pad configuration value to be applied.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: |
+ "mux_reg" indicates the offset of mux register.
+ - description: |
+ "conf_reg" indicates the offset of pad configuration register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_val" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ "pad_setting" indicates the pad configuration value to be applied.
required:
- fsl,pins
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml
index 6297e78418cf..d474bc1f393b 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml
@@ -37,22 +37,21 @@ patternProperties:
be found in <arch/arm64/boot/dts/freescale/imx8mp-pinfunc.h>. The last
integer CONFIG is the pad setting value like pull-up on this pin. Please
refer to i.MX8M Plus Reference Manual for detailed CONFIG settings.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-matrix
- - items:
- items:
- - description: |
- "mux_reg" indicates the offset of mux register.
- - description: |
- "conf_reg" indicates the offset of pad configuration register.
- - description: |
- "input_reg" indicates the offset of select input register.
- - description: |
- "mux_val" indicates the mux value to be applied.
- - description: |
- "input_val" indicates the select input value to be applied.
- - description: |
- "pad_setting" indicates the pad configuration value to be applied.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: |
+ "mux_reg" indicates the offset of mux register.
+ - description: |
+ "conf_reg" indicates the offset of pad configuration register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_val" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ "pad_setting" indicates the pad configuration value to be applied.
required:
- fsl,pins
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml
index b30c704fcfa1..0af2b6c95c17 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml
@@ -37,22 +37,21 @@ patternProperties:
be found in <arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h>. The last
integer CONFIG is the pad setting value like pull-up on this pin. Please
refer to i.MX8M Quad Reference Manual for detailed CONFIG settings.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-matrix
- - items:
- items:
- - description: |
- "mux_reg" indicates the offset of mux register.
- - description: |
- "conf_reg" indicates the offset of pad configuration register.
- - description: |
- "input_reg" indicates the offset of select input register.
- - description: |
- "mux_val" indicates the mux value to be applied.
- - description: |
- "input_val" indicates the select input value to be applied.
- - description: |
- "pad_setting" indicates the pad configuration value to be applied.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: |
+ "mux_reg" indicates the offset of mux register.
+ - description: |
+ "conf_reg" indicates the offset of pad configuration register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_val" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ "pad_setting" indicates the pad configuration value to be applied.
required:
- fsl,pins
diff --git a/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml b/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml
index cd2b436350ef..2c0acb405e6c 100644
--- a/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml
@@ -24,12 +24,10 @@ properties:
patternProperties:
'-pins$':
type: object
- allOf:
- - $ref: pincfg-node.yaml#
- - $ref: pinmux-node.yaml#
description:
Pinctrl node's client devices use subnodes for desired pin configuration.
Client device subnodes use below standard properties.
+ $ref: pinmux-node.yaml#
properties:
function: true
diff --git a/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt
index 32a8a8fa7805..00912449237b 100644
--- a/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt
@@ -2,8 +2,8 @@ Microsemi Ocelot pin controller Device Tree Bindings
----------------------------------------------------
Required properties:
- - compatible : Should be "mscc,ocelot-pinctrl" or
- "mscc,jaguar2-pinctrl"
+ - compatible : Should be "mscc,ocelot-pinctrl",
+ "mscc,jaguar2-pinctrl" or "microchip,sparx5-pinctrl"
- reg : Address and length of the register set for the device
- gpio-controller : Indicates this device is a GPIO controller
- #gpio-cells : Must be 2.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml b/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml
index 732d9075560b..ef8877ddb1eb 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pinmux-node.yaml
@@ -122,11 +122,10 @@ properties:
this, "pins" or "pinmux" has to be specified)
pinmux:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
description:
The list of numeric pin ids and their mux settings that properties in the
node apply to (either this, "pins" or "groups" have to be specified)
+ $ref: /schemas/types.yaml#/definitions/uint32-array
pinctrl-pin-array:
$ref: /schemas/types.yaml#/definitions/uint32-array
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
index 63d1cfe86c6e..b2de3992d484 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
@@ -49,8 +49,7 @@ patternProperties:
description:
Pinctrl node's client devices use subnodes for desired pin configuration.
Client device subnodes use below standard properties.
- allOf:
- - $ref: "/schemas/pinctrl/pincfg-node.yaml"
+ $ref: "/schemas/pinctrl/pincfg-node.yaml"
properties:
pins:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml
new file mode 100644
index 000000000000..6dc3b52f47cd
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml
@@ -0,0 +1,147 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,sm8250-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. SM8250 TLMM block
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description: |
+ This binding describes the Top Level Mode Multiplexer block found in the
+ SM8250 platform.
+
+properties:
+ compatible:
+ const: qcom,sm8250-pinctrl
+
+ reg:
+ minItems: 3
+ maxItems: 3
+
+ reg-names:
+ items:
+ - const: "west"
+ - const: "south"
+ - const: "north"
+
+ interrupts:
+ description: Specifies the TLMM summary IRQ
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ description:
+ Specifies the PIN numbers and Flags, as defined in defined in
+ include/dt-bindings/interrupt-controller/irq.h
+ const: 2
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ description: Specifying the pin number and flags, as defined in
+ include/dt-bindings/gpio/gpio.h
+ const: 2
+
+ gpio-ranges:
+ maxItems: 1
+
+ wakeup-parent:
+ maxItems: 1
+
+#PIN CONFIGURATION NODES
+patternProperties:
+ '^.*$':
+ if:
+ type: object
+ then:
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-7][0-9])$"
+ - enum: [ sdc2_clk, sdc2_cmd, sdc2_data, ufs_reset ]
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+
+ enum: [ aoss_cti, atest, audio_ref, cam_mclk, cci_async, cci_i2c,
+ cci_timer0, cci_timer1, cci_timer2, cci_timer3, cci_timer4, cri_trng,
+ cri_trng0, cri_trng1, dbg_out, ddr_bist, ddr_pxi0, ddr_pxi1,
+ ddr_pxi2, ddr_pxi3, dp_hot, dp_lcd, gcc_gp1, gcc_gp2, gcc_gp3, gpio,
+ ibi_i3c, jitter_bist, lpass_slimbus, mdp_vsync, mdp_vsync0,
+ mdp_vsync1, mdp_vsync2, mdp_vsync3, mi2s0_data0, mi2s0_data1,
+ mi2s0_sck, mi2s0_ws, mi2s1_data0, mi2s1_data1, mi2s1_sck, mi2s1_ws,
+ mi2s2_data0, mi2s2_data1, mi2s2_sck, mi2s2_ws, pci_e0, pci_e1,
+ pci_e2, phase_flag, pll_bist, pll_bypassnl, pll_clk, pll_reset,
+ pri_mi2s, prng_rosc, qdss_cti, qdss_gpio, qspi0, qspi1, qspi2, qspi3,
+ qspi_clk, qspi_cs, qup0, qup1, qup10, qup11, qup12, qup13, qup14,
+ qup15, qup16, qup17, qup18, qup19, qup2, qup3, qup4, qup5, qup6,
+ qup7, qup8, qup9, qup_l4, qup_l5, qup_l6, sd_write, sdc40, sdc41,
+ sdc42, sdc43, sdc4_clk, sdc4_cmd, sec_mi2s, sp_cmu, tgu_ch0, tgu_ch1,
+ tgu_ch2, tgu_ch3, tsense_pwm1, tsense_pwm2, tsif0_clk, tsif0_data,
+ tsif0_en, tsif0_error, tsif0_sync, tsif1_clk, tsif1_data, tsif1_en,
+ tsif1_error, tsif1_sync, usb2phy_ac, usb_phy, vsense_trigger ]
+
+ drive-strength:
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+ default: 2
+ description:
+ Selects the drive strength for the specified pins, in mA.
+
+ bias-pull-down: true
+
+ bias-pull-up: true
+
+ bias-disable: true
+
+ output-high: true
+
+ output-low: true
+
+ required:
+ - pins
+ - function
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ pinctrl@1f00000 {
+ compatible = "qcom,sm8250-pinctrl";
+ reg = <0x0f100000 0x300000>,
+ <0x0f500000 0x300000>,
+ <0x0f900000 0x300000>;
+ reg-names = "west", "south", "north";
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 180>;
+ wakeup-parent = <&pdc>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index 6eada23eaa31..b68613188c19 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -13,6 +13,7 @@ Required Properties:
- "renesas,pfc-emev2": for EMEV2 (EMMA Mobile EV2) compatible pin-controller.
- "renesas,pfc-r8a73a4": for R8A73A4 (R-Mobile APE6) compatible pin-controller.
- "renesas,pfc-r8a7740": for R8A7740 (R-Mobile A1) compatible pin-controller.
+ - "renesas,pfc-r8a7742": for R8A7742 (RZ/G1H) compatible pin-controller.
- "renesas,pfc-r8a7743": for R8A7743 (RZ/G1M) compatible pin-controller.
- "renesas,pfc-r8a7744": for R8A7744 (RZ/G1N) compatible pin-controller.
- "renesas,pfc-r8a7745": for R8A7745 (RZ/G1E) compatible pin-controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
index 2113cfaa26e6..d3eae61a340d 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
@@ -110,8 +110,8 @@ pinctrl@20008000 {
uart2 {
uart2_xfer: uart2-xfer {
- rockchip,pins = <RK_GPIO1 8 1 &pcfg_pull_default>,
- <RK_GPIO1 9 1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PB0 1 &pcfg_pull_default>,
+ <1 RK_PB1 1 &pcfg_pull_default>;
};
};
};
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index 46a0478cb924..0857cbeeb43c 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -36,22 +36,22 @@ properties:
pins-are-numbered: true
hwlocks: true
+ interrupts:
+ maxItems: 1
+
st,syscfg:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/phandle-array"
description: Should be phandle/offset/mask
- Phandle to the syscon node which includes IRQ mux selection.
- The offset of the IRQ mux selection register.
- The field mask of IRQ mux, needed if different of 0xf.
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
st,package:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [1, 2, 4, 8]
description:
Indicates the SOC package used.
More details in include/dt-bindings/pinctrl/stm32-pinfunc.h
-
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 4, 8]
patternProperties:
'^gpio@[0-9a-f]*$':
@@ -78,33 +78,30 @@ patternProperties:
maximum: 16
st,bank-name:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/string"
- - enum:
- - GPIOA
- - GPIOB
- - GPIOC
- - GPIOD
- - GPIOE
- - GPIOF
- - GPIOG
- - GPIOH
- - GPIOI
- - GPIOJ
- - GPIOK
- - GPIOZ
description:
Should be a name string for this bank as specified in the datasheet.
+ $ref: "/schemas/types.yaml#/definitions/string"
+ enum:
+ - GPIOA
+ - GPIOB
+ - GPIOC
+ - GPIOD
+ - GPIOE
+ - GPIOF
+ - GPIOG
+ - GPIOH
+ - GPIOI
+ - GPIOJ
+ - GPIOK
+ - GPIOZ
st,bank-ioport:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- - maximum: 11
-
description:
Should correspond to the EXTI IOport selection (EXTI line used
to select GPIOs as interrupts).
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 11
required:
- gpio-controller
@@ -125,8 +122,7 @@ patternProperties:
configuration, pullups, drive, output high/low and output speed.
properties:
pinmux:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32-array"
+ $ref: "/schemas/types.yaml#/definitions/uint32-array"
description: |
Integer array, represents gpio pin number and mux setting.
Supported pin number and mux varies for different SoCs, and are
@@ -180,9 +176,8 @@ patternProperties:
1: Medium speed
2: Fast speed
3: High speed
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [0, 1, 2, 3]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
required:
- pinmux
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
index 6c6079fe1351..4f524f822e84 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
@@ -23,48 +23,119 @@ description: |+
properties:
compatible:
enum:
+ - amlogic,meson8-pwrc
+ - amlogic,meson8b-pwrc
+ - amlogic,meson8m2-pwrc
+ - amlogic,meson-gxbb-pwrc
- amlogic,meson-g12a-pwrc
- amlogic,meson-sm1-pwrc
clocks:
- minItems: 2
+ minItems: 1
+ maxItems: 2
clock-names:
+ minItems: 1
+ maxItems: 2
items:
- const: vpu
- const: vapb
resets:
minItems: 11
+ maxItems: 12
reset-names:
- items:
- - const: viu
- - const: venc
- - const: vcbus
- - const: bt656
- - const: rdma
- - const: venci
- - const: vencp
- - const: vdac
- - const: vdi6
- - const: vencl
- - const: vid_lock
+ minItems: 11
+ maxItems: 12
"#power-domain-cells":
const: 1
amlogic,ao-sysctrl:
description: phandle to the AO sysctrl node
- allOf:
- - $ref: /schemas/types.yaml#/definitions/phandle
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - amlogic,meson8b-pwrc
+ - amlogic,meson8m2-pwrc
+ then:
+ properties:
+ reset-names:
+ items:
+ - const: dblk
+ - const: pic_dc
+ - const: hdmi_apb
+ - const: hdmi_system
+ - const: venci
+ - const: vencp
+ - const: vdac
+ - const: vencl
+ - const: viu
+ - const: venc
+ - const: rdma
+ required:
+ - resets
+ - reset-names
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - amlogic,meson-gxbb-pwrc
+ then:
+ properties:
+ reset-names:
+ items:
+ - const: viu
+ - const: venc
+ - const: vcbus
+ - const: bt656
+ - const: dvin
+ - const: rdma
+ - const: venci
+ - const: vencp
+ - const: vdac
+ - const: vdi6
+ - const: vencl
+ - const: vid_lock
+ required:
+ - resets
+ - reset-names
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - amlogic,meson-g12a-pwrc
+ - amlogic,meson-sm1-pwrc
+ then:
+ properties:
+ reset-names:
+ items:
+ - const: viu
+ - const: venc
+ - const: vcbus
+ - const: bt656
+ - const: rdma
+ - const: venci
+ - const: vencp
+ - const: vdac
+ - const: vdi6
+ - const: vencl
+ - const: vid_lock
+ required:
+ - resets
+ - reset-names
required:
- compatible
- clocks
- clock-names
- - resets
- - reset-names
- "#power-domain-cells"
- amlogic,ao-sysctrl
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
deleted file mode 100644
index f0f5553a9e74..000000000000
--- a/Documentation/devicetree/bindings/power/fsl,imx-gpc.txt
+++ /dev/null
@@ -1,91 +0,0 @@
-Freescale i.MX General Power Controller
-=======================================
-
-The i.MX6 General Power Control (GPC) block contains DVFS load tracking
-counters and Power Gating Control (PGC).
-
-Required properties:
-- compatible: Should be one of the following:
- - fsl,imx6q-gpc
- - fsl,imx6qp-gpc
- - fsl,imx6sl-gpc
- - fsl,imx6sx-gpc
-- reg: should be register base and length as documented in the
- datasheet
-- interrupts: Should contain one interrupt specifier for the GPC interrupt
-- clocks: Must contain an entry for each entry in clock-names.
- See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
-- clock-names: Must include the following entries:
- - ipg
-
-The power domains are generic power domain providers as documented in
-Documentation/devicetree/bindings/power/power-domain.yaml. They are described as
-subnodes of the power gating controller 'pgc' node of the GPC and should
-contain the following:
-
-Required properties:
-- reg: Must contain the DOMAIN_INDEX of this power domain
- The following DOMAIN_INDEX values are valid for i.MX6Q:
- ARM_DOMAIN 0
- PU_DOMAIN 1
- The following additional DOMAIN_INDEX value is valid for i.MX6SL:
- DISPLAY_DOMAIN 2
- The following additional DOMAIN_INDEX value is valid for i.MX6SX:
- PCI_DOMAIN 3
-
-- #power-domain-cells: Should be 0
-
-Optional properties:
-- clocks: a number of phandles to clocks that need to be enabled during domain
- power-up sequencing to ensure reset propagation into devices located inside
- this power domain
-- power-supply: a phandle to the regulator powering this domain
-
-Example:
-
- gpc: gpc@20dc000 {
- compatible = "fsl,imx6q-gpc";
- reg = <0x020dc000 0x4000>;
- interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>,
- <0 90 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6QDL_CLK_IPG>;
- clock-names = "ipg";
-
- pgc {
- #address-cells = <1>;
- #size-cells = <0>;
-
- power-domain@0 {
- reg = <0>;
- #power-domain-cells = <0>;
- };
-
- pd_pu: power-domain@1 {
- reg = <1>;
- #power-domain-cells = <0>;
- power-supply = <&reg_pu>;
- clocks = <&clks IMX6QDL_CLK_GPU3D_CORE>,
- <&clks IMX6QDL_CLK_GPU3D_SHADER>,
- <&clks IMX6QDL_CLK_GPU2D_CORE>,
- <&clks IMX6QDL_CLK_GPU2D_AXI>,
- <&clks IMX6QDL_CLK_OPENVG_AXI>,
- <&clks IMX6QDL_CLK_VPU_AXI>;
- };
- };
- };
-
-
-Specifying power domain for IP modules
-======================================
-
-IP cores belonging to a power domain should contain a 'power-domains' property
-that is a phandle pointing to the power domain the device belongs to.
-
-Example of a device that is part of the PU power domain:
-
- vpu: vpu@2040000 {
- reg = <0x02040000 0x3c000>;
- /* ... */
- power-domains = <&pd_pu>;
- /* ... */
- };
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpc.yaml b/Documentation/devicetree/bindings/power/fsl,imx-gpc.yaml
new file mode 100644
index 000000000000..a055b3e819d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/fsl,imx-gpc.yaml
@@ -0,0 +1,124 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/fsl,imx-gpc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX General Power Controller
+
+maintainers:
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+description: |
+ The i.MX6 General Power Control (GPC) block contains DVFS load tracking
+ counters and Power Gating Control (PGC).
+
+ The power domains are generic power domain providers as documented in
+ Documentation/devicetree/bindings/power/power-domain.yaml. They are
+ described as subnodes of the power gating controller 'pgc' node of the GPC.
+
+ IP cores belonging to a power domain should contain a 'power-domains'
+ property that is a phandle pointing to the power domain the device belongs
+ to.
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx6q-gpc
+ - fsl,imx6qp-gpc
+ - fsl,imx6sl-gpc
+ - fsl,imx6sx-gpc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: ipg
+
+ pgc:
+ type: object
+ description: list of power domains provided by this controller.
+
+ patternProperties:
+ "power-domain@[0-9]$":
+ type: object
+ properties:
+
+ '#power-domain-cells':
+ const: 0
+
+ reg:
+ description: |
+ The following DOMAIN_INDEX values are valid for i.MX6Q:
+ ARM_DOMAIN 0
+ PU_DOMAIN 1
+ The following additional DOMAIN_INDEX value is valid for i.MX6SL:
+ DISPLAY_DOMAIN 2
+ The following additional DOMAIN_INDEX value is valid for i.MX6SX:
+ PCI_DOMAIN 3
+ maxItems: 1
+
+ clocks:
+ description: |
+ A number of phandles to clocks that need to be enabled during domain
+ power-up sequencing to ensure reset propagation into devices located
+ inside this power domain.
+ minItems: 1
+ maxItems: 7
+
+ power-supply: true
+
+ required:
+ - '#power-domain-cells'
+ - reg
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - pgc
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx6qdl-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ gpc@20dc000 {
+ compatible = "fsl,imx6q-gpc";
+ reg = <0x020dc000 0x4000>;
+ interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6QDL_CLK_IPG>;
+ clock-names = "ipg";
+
+ pgc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ power-domain@0 {
+ reg = <0>;
+ #power-domain-cells = <0>;
+ };
+
+ pd_pu: power-domain@1 {
+ reg = <1>;
+ #power-domain-cells = <0>;
+ power-supply = <&reg_pu>;
+ clocks = <&clks IMX6QDL_CLK_GPU3D_CORE>,
+ <&clks IMX6QDL_CLK_GPU3D_SHADER>,
+ <&clks IMX6QDL_CLK_GPU2D_CORE>,
+ <&clks IMX6QDL_CLK_GPU2D_AXI>,
+ <&clks IMX6QDL_CLK_OPENVG_AXI>,
+ <&clks IMX6QDL_CLK_VPU_AXI>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt
deleted file mode 100644
index 61649202f6f5..000000000000
--- a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-Freescale i.MX General Power Controller v2
-==========================================
-
-The i.MX7S/D General Power Control (GPC) block contains Power Gating
-Control (PGC) for various power domains.
-
-Required properties:
-
-- compatible: Should be one of:
- - "fsl,imx7d-gpc"
- - "fsl,imx8mq-gpc"
-
-- reg: should be register base and length as documented in the
- datasheet
-
-- interrupts: Should contain GPC interrupt request 1
-
-Power domains contained within GPC node are generic power domain
-providers, documented in
-Documentation/devicetree/bindings/power/power-domain.yaml, which are
-described as subnodes of the power gating controller 'pgc' node,
-which, in turn, is expected to contain the following:
-
-Required properties:
-
-- reg: Power domain index. Valid values are defined in
- include/dt-bindings/power/imx7-power.h for fsl,imx7d-gpc and
- include/dt-bindings/power/imx8m-power.h for fsl,imx8mq-gpc
-
-- #power-domain-cells: Should be 0
-
-Optional properties:
-
-- power-supply: Power supply used to power the domain
-- clocks: a number of phandles to clocks that need to be enabled during
- domain power-up sequencing to ensure reset propagation into devices
- located inside this power domain
-
-Example:
-
- gpc: gpc@303a0000 {
- compatible = "fsl,imx7d-gpc";
- reg = <0x303a0000 0x1000>;
- interrupt-controller;
- interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
- #interrupt-cells = <3>;
- interrupt-parent = <&intc>;
-
- pgc {
- #address-cells = <1>;
- #size-cells = <0>;
-
- pgc_pcie_phy: power-domain@1 {
- #power-domain-cells = <0>;
-
- reg = <1>;
- power-supply = <&reg_1p0d>;
- };
- };
- };
-
-
-Specifying power domain for IP modules
-======================================
-
-IP cores belonging to a power domain should contain a 'power-domains'
-property that is a phandle for PGC node representing the domain.
-
-Example of a device that is part of the PCIE_PHY power domain:
-
- pcie: pcie@33800000 {
- reg = <0x33800000 0x4000>,
- <0x4ff00000 0x80000>;
- /* ... */
- power-domains = <&pgc_pcie_phy>;
- /* ... */
- };
diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml
new file mode 100644
index 000000000000..bde09a0b2da3
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml
@@ -0,0 +1,108 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/fsl,imx-gpcv2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX General Power Controller v2
+
+maintainers:
+ - Andrey Smirnov <andrew.smirnov@gmail.com>
+
+description: |
+ The i.MX7S/D General Power Control (GPC) block contains Power Gating
+ Control (PGC) for various power domains.
+
+ Power domains contained within GPC node are generic power domain
+ providers, documented in
+ Documentation/devicetree/bindings/power/power-domain.yaml, which are
+ described as subnodes of the power gating controller 'pgc' node.
+
+ IP cores belonging to a power domain should contain a 'power-domains'
+ property that is a phandle for PGC node representing the domain.
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx7d-gpc
+ - fsl,imx8mq-gpc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ pgc:
+ type: object
+ description: list of power domains provided by this controller.
+
+ patternProperties:
+ "power-domain@[0-9]$":
+ type: object
+ properties:
+
+ '#power-domain-cells':
+ const: 0
+
+ reg:
+ description: |
+ Power domain index. Valid values are defined in
+ include/dt-bindings/power/imx7-power.h for fsl,imx7d-gpc and
+ include/dt-bindings/power/imx8m-power.h for fsl,imx8mq-gpc
+ maxItems: 1
+
+ clocks:
+ description: |
+ A number of phandles to clocks that need to be enabled during domain
+ power-up sequencing to ensure reset propagation into devices located
+ inside this power domain.
+ minItems: 1
+ maxItems: 5
+
+ power-supply: true
+
+ required:
+ - '#power-domain-cells'
+ - reg
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - pgc
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ gpc@303a0000 {
+ compatible = "fsl,imx7d-gpc";
+ reg = <0x303a0000 0x1000>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+
+ pgc {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pgc_mipi_phy: power-domain@0 {
+ #power-domain-cells = <0>;
+ reg = <0>;
+ power-supply = <&reg_1p0d>;
+ };
+
+ pgc_pcie_phy: power-domain@1 {
+ #power-domain-cells = <0>;
+ reg = <1>;
+ power-supply = <&reg_1p0d>;
+ };
+
+ pgc_hsic_phy: power-domain@2 {
+ #power-domain-cells = <0>;
+ reg = <2>;
+ power-supply = <&reg_1p2>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
index ba605310abeb..8058955fb3b9 100644
--- a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
+++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
@@ -23,6 +23,7 @@ properties:
- qcom,sc7180-rpmhpd
- qcom,sdm845-rpmhpd
- qcom,sm8150-rpmhpd
+ - qcom,sm8250-rpmhpd
'#power-domain-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/power/renesas,apmu.yaml b/Documentation/devicetree/bindings/power/renesas,apmu.yaml
index 078b2cb40fe3..60a23b3beb40 100644
--- a/Documentation/devicetree/bindings/power/renesas,apmu.yaml
+++ b/Documentation/devicetree/bindings/power/renesas,apmu.yaml
@@ -18,6 +18,7 @@ properties:
compatible:
items:
- enum:
+ - renesas,r8a7742-apmu # RZ/G1H
- renesas,r8a7743-apmu # RZ/G1M
- renesas,r8a7744-apmu # RZ/G1N
- renesas,r8a7745-apmu # RZ/G1E
diff --git a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml
index e59331e1d944..55b6ab2d8784 100644
--- a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml
+++ b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.yaml
@@ -17,6 +17,7 @@ description:
properties:
compatible:
enum:
+ - renesas,r8a7742-sysc # RZ/G1H
- renesas,r8a7743-sysc # RZ/G1M
- renesas,r8a7744-sysc # RZ/G1N
- renesas,r8a7745-sysc # RZ/G1E
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt
deleted file mode 100644
index f7ce1d8af04a..000000000000
--- a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-SYSCON reboot mode driver
-
-This driver gets reboot mode magic value form reboot-mode driver
-and stores it in a SYSCON mapped register. Then the bootloader
-can read it and take different action according to the magic
-value stored.
-
-This DT node should be represented as a sub-node of a "syscon", "simple-mfd"
-node.
-
-Required properties:
-- compatible: should be "syscon-reboot-mode"
-- offset: offset in the register map for the storage register (in bytes)
-
-Optional property:
-- mask: bits mask of the bits in the register to store the reboot mode magic value,
- default set to 0xffffffff if missing.
-
-The rest of the properties should follow the generic reboot-mode description
-found in reboot-mode.txt
-
-Example:
- pmu: pmu@20004000 {
- compatible = "rockchip,rk3066-pmu", "syscon", "simple-mfd";
- reg = <0x20004000 0x100>;
-
- reboot-mode {
- compatible = "syscon-reboot-mode";
- offset = <0x40>;
- mode-normal = <BOOT_NORMAL>;
- mode-recovery = <BOOT_RECOVERY>;
- mode-bootloader = <BOOT_FASTBOOT>;
- mode-loader = <BOOT_BL_DOWNLOAD>;
- };
- };
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml
new file mode 100644
index 000000000000..9b1ffceefe3d
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot-mode.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/reset/syscon-reboot-mode.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic SYSCON reboot mode driver
+
+maintainers:
+ - Sebastian Reichel <sre@kernel.org>
+
+description: |
+ This driver gets reboot mode magic value from reboot-mode driver
+ and stores it in a SYSCON mapped register. Then the bootloader
+ can read it and take different action according to the magic
+ value stored. The SYSCON mapped register is retrieved from the
+ parental dt-node plus the offset. So the SYSCON reboot-mode node
+ should be represented as a sub-node of a "syscon", "simple-mfd" node.
+
+properties:
+ compatible:
+ const: syscon-reboot-mode
+
+ mask:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Update only the register bits defined by the mask (32 bit)
+
+ offset:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Offset in the register map for the mode register (in bytes)
+
+patternProperties:
+ "^mode-.+":
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Vendor-specific mode value written to the mode register
+
+additionalProperties: false
+
+required:
+ - compatible
+ - offset
+
+examples:
+ - |
+ #include <dt-bindings/soc/rockchip,boot-mode.h>
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x40>;
+ mode-normal = <BOOT_NORMAL>;
+ mode-recovery = <BOOT_RECOVERY>;
+ mode-bootloader = <BOOT_FASTBOOT>;
+ mode-loader = <BOOT_BL_DOWNLOAD>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml
index b80772cb9f06..da2509724812 100644
--- a/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml
+++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml
@@ -12,9 +12,12 @@ maintainers:
description: |+
This is a generic reset driver using syscon to map the reset register.
The reset is generally performed with a write to the reset register
- defined by the register map pointed by syscon reference plus the offset
- with the value and mask defined in the reboot node.
- Default will be little endian mode, 32 bit access only.
+ defined by the SYSCON register map base plus the offset with the value and
+ mask defined in the reboot node. Default will be little endian mode, 32 bit
+ access only. The SYSCON registers map is normally retrieved from the
+ parental dt-node. So the SYSCON reboot node should be represented as a
+ sub-node of a "syscon", "simple-mfd" node. Though the regmap property
+ pointing to the system controller node is also supported.
properties:
compatible:
@@ -30,7 +33,10 @@ properties:
regmap:
$ref: /schemas/types.yaml#/definitions/phandle
- description: Phandle to the register map node.
+ deprecated: true
+ description: |
+ Phandle to the register map node. This property is deprecated in favor of
+ the syscon-reboot node been a child of a system controller node.
value:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -38,7 +44,6 @@ properties:
required:
- compatible
- - regmap
- offset
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/power/supply/battery.txt b/Documentation/devicetree/bindings/power/supply/battery.txt
index 3049cf88bdcf..5e29595edd74 100644
--- a/Documentation/devicetree/bindings/power/supply/battery.txt
+++ b/Documentation/devicetree/bindings/power/supply/battery.txt
@@ -11,15 +11,21 @@ different type. This prevents unpredictable, potentially harmful,
behavior should a replacement that changes the battery type occur
without a corresponding update to the dtb.
+Please note that not all charger drivers respect all of the properties.
+
Required Properties:
- compatible: Must be "simple-battery"
Optional Properties:
+ - over-voltage-threshold-microvolt: battery over-voltage limit
+ - re-charge-voltage-microvolt: limit to automatically start charging again
- voltage-min-design-microvolt: drained battery voltage
- voltage-max-design-microvolt: fully charged battery voltage
- energy-full-design-microwatt-hours: battery design energy
- charge-full-design-microamp-hours: battery design capacity
+ - trickle-charge-current-microamp: current for trickle-charge phase
- precharge-current-microamp: current for pre-charge phase
+ - precharge-upper-limit-microvolt: limit when to change to constant charging
- charge-term-current-microamp: current for charge termination phase
- constant-charge-current-max-microamp: maximum constant input current
- constant-charge-voltage-max-microvolt: maximum constant input voltage
diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
deleted file mode 100644
index 4fa8e08df2b6..000000000000
--- a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-TI BQ27XXX fuel gauge family
-
-Required properties:
-- compatible: contains one of the following:
- * "ti,bq27200" - BQ27200
- * "ti,bq27210" - BQ27210
- * "ti,bq27500" - deprecated, use revision specific property below
- * "ti,bq27510" - deprecated, use revision specific property below
- * "ti,bq27520" - deprecated, use revision specific property below
- * "ti,bq27500-1" - BQ27500/1
- * "ti,bq27510g1" - BQ27510-g1
- * "ti,bq27510g2" - BQ27510-g2
- * "ti,bq27510g3" - BQ27510-g3
- * "ti,bq27520g1" - BQ27520-g1
- * "ti,bq27520g2" - BQ27520-g2
- * "ti,bq27520g3" - BQ27520-g3
- * "ti,bq27520g4" - BQ27520-g4
- * "ti,bq27521" - BQ27521
- * "ti,bq27530" - BQ27530
- * "ti,bq27531" - BQ27531
- * "ti,bq27541" - BQ27541
- * "ti,bq27542" - BQ27542
- * "ti,bq27546" - BQ27546
- * "ti,bq27742" - BQ27742
- * "ti,bq27545" - BQ27545
- * "ti,bq27411" - BQ27411
- * "ti,bq27421" - BQ27421
- * "ti,bq27425" - BQ27425
- * "ti,bq27426" - BQ27426
- * "ti,bq27441" - BQ27441
- * "ti,bq27621" - BQ27621
-- reg: integer, I2C address of the fuel gauge.
-
-Optional properties:
-- monitored-battery: phandle of battery characteristics node
- The fuel gauge uses the following battery properties:
- + energy-full-design-microwatt-hours
- + charge-full-design-microamp-hours
- + voltage-min-design-microvolt
- Both or neither of the *-full-design-*-hours properties must be set.
- See Documentation/devicetree/bindings/power/supply/battery.txt
-
-Example:
-
- bat: battery {
- compatible = "simple-battery";
- voltage-min-design-microvolt = <3200000>;
- energy-full-design-microwatt-hours = <5290000>;
- charge-full-design-microamp-hours = <1430000>;
- };
-
- bq27510g3: fuel-gauge@55 {
- compatible = "ti,bq27510g3";
- reg = <0x55>;
- monitored-battery = <&bat>;
- };
diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml b/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml
new file mode 100644
index 000000000000..03d1020a2e47
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/bq27xxx.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2020 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/power/supply/bq27xxx.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: TI BQ27XXX fuel gauge family
+
+maintainers:
+ - Pali Rohár <pali@kernel.org>
+ - Andrew F. Davis <afd@ti.com>
+ - Sebastian Reichel <sre@kernel.org>
+
+description: |
+ Support various Texas Instruments fuel gauge devices that share similar
+ register maps and power supply properties
+
+allOf:
+ - $ref: power-supply.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ti,bq27200
+ - ti,bq27210
+ - ti,bq27500 # deprecated, use revision specific property below
+ - ti,bq27510 # deprecated, use revision specific property below
+ - ti,bq27520 # deprecated, use revision specific property below
+ - ti,bq27500-1
+ - ti,bq27510g1
+ - ti,bq27510g2
+ - ti,bq27510g3
+ - ti,bq27520g1
+ - ti,bq27520g2
+ - ti,bq27520g3
+ - ti,bq27520g4
+ - ti,bq27521
+ - ti,bq27530
+ - ti,bq27531
+ - ti,bq27541
+ - ti,bq27542
+ - ti,bq27546
+ - ti,bq27742
+ - ti,bq27545
+ - ti,bq27411
+ - ti,bq27421
+ - ti,bq27425
+ - ti,bq27426
+ - ti,bq27441
+ - ti,bq27621
+
+ reg:
+ maxItems: 1
+ description: integer, I2C address of the fuel gauge.
+
+ monitored-battery:
+ description: |
+ phandle of battery characteristics node.
+ The fuel gauge uses the following battery properties:
+ - energy-full-design-microwatt-hours
+ - charge-full-design-microamp-hours
+ - voltage-min-design-microvolt
+ Both or neither of the *-full-design-*-hours properties must be set.
+ See Documentation/devicetree/bindings/power/supply/battery.txt
+
+ power-supplies: true
+
+required:
+ - compatible
+ - reg
+additionalProperties: false
+
+examples:
+ - |
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bat: battery {
+ compatible = "simple-battery";
+ voltage-min-design-microvolt = <3200000>;
+ energy-full-design-microwatt-hours = <5290000>;
+ charge-full-design-microamp-hours = <1430000>;
+ };
+
+ bq27510g3: fuel-gauge@55 {
+ compatible = "ti,bq27510g3";
+ reg = <0x55>;
+ monitored-battery = <&bat>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml b/Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml
new file mode 100644
index 000000000000..4a265d4234b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/cw2015_battery.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Battery driver for CW2015 shuntless fuel gauge by CellWise.
+
+maintainers:
+ - Tobias Schramm <t.schramm@manjaro.org>
+
+description: |
+ The driver can utilize information from a simple-battery linked via a
+ phandle in monitored-battery. If specified the driver uses the
+ charge-full-design-microamp-hours property of the battery.
+
+properties:
+ compatible:
+ const: cellwise,cw2015
+
+ reg:
+ maxItems: 1
+
+ cellwise,battery-profile:
+ description: |
+ This property specifies characteristics of the battery used. The format
+ of this binary blob is kept secret by CellWise. The only way to obtain
+ it is to mail two batteries to a test facility of CellWise and receive
+ back a test report with the binary blob.
+ allOf:
+ - $ref: /schemas/types.yaml#definitions/uint8-array
+ items:
+ - minItems: 64
+ maxItems: 64
+
+ cellwise,monitor-interval-ms:
+ description:
+ Specifies the interval in milliseconds gauge values are polled at
+ minimum: 250
+
+ power-supplies:
+ description:
+ Specifies supplies used for charging the battery connected to this gauge
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/phandle-array
+ - minItems: 1
+ maxItems: 8 # Should be enough
+
+ monitored-battery:
+ description:
+ Specifies the phandle of a simple-battery connected to this gauge
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cw2015@62 {
+ compatible = "cellwise,cw201x";
+ reg = <0x62>;
+ cellwise,battery-profile = /bits/ 8 <
+ 0x17 0x67 0x80 0x73 0x6E 0x6C 0x6B 0x63
+ 0x77 0x51 0x5C 0x58 0x50 0x4C 0x48 0x36
+ 0x15 0x0C 0x0C 0x19 0x5B 0x7D 0x6F 0x69
+ 0x69 0x5B 0x0C 0x29 0x20 0x40 0x52 0x59
+ 0x57 0x56 0x54 0x4F 0x3B 0x1F 0x7F 0x17
+ 0x06 0x1A 0x30 0x5A 0x85 0x93 0x96 0x2D
+ 0x48 0x77 0x9C 0xB3 0x80 0x52 0x94 0xCB
+ 0x2F 0x00 0x64 0xA5 0xB5 0x11 0xF0 0x11
+ >;
+ cellwise,monitor-interval-ms = <5000>;
+ monitored-battery = <&bat>;
+ power-supplies = <&mains_charger>, <&usb_charger>;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/power/supply/power-supply.yaml b/Documentation/devicetree/bindings/power/supply/power-supply.yaml
new file mode 100644
index 000000000000..3bb02bb3a2d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/power-supply.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/power/supply/power-supply.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Power Supply Core Support
+
+maintainers:
+ - Sebastian Reichel <sre@kernel.org>
+
+properties:
+ power-supplies:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ This property is added to a supply in order to list the devices which
+ supply it power, referenced by their phandles.
+
+examples:
+ - |
+ power {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usb_charger:charger@e {
+ compatible = "some,usb-charger";
+ reg = <0xe>;
+ };
+
+ ac_charger:charger@c {
+ compatible = "some,ac-charger";
+ reg = <0xc>;
+ };
+
+ battery:battery@b {
+ compatible = "some,battery";
+ reg = <0xb>;
+ power-supplies = <&usb_charger>, <&ac_charger>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/power_supply.txt b/Documentation/devicetree/bindings/power/supply/power_supply.txt
index 8391bfa0edac..d9693e054509 100644
--- a/Documentation/devicetree/bindings/power/supply/power_supply.txt
+++ b/Documentation/devicetree/bindings/power/supply/power_supply.txt
@@ -1,23 +1,2 @@
-Power Supply Core Support
-
-Optional Properties:
- - power-supplies : This property is added to a supply in order to list the
- devices which supply it power, referenced by their phandles.
-
-Example:
-
- usb-charger: power@e {
- compatible = "some,usb-charger";
- ...
- };
-
- ac-charger: power@c {
- compatible = "some,ac-charger";
- ...
- };
-
- battery@b {
- compatible = "some,battery";
- ...
- power-supplies = <&usb-charger>, <&ac-charger>;
- };
+This binding has been converted to yaml please see power-supply.yaml in this
+directory.
diff --git a/Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml b/Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml
new file mode 100644
index 000000000000..7e0f73a898c7
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/rohm,bd99954.yaml
@@ -0,0 +1,155 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/rohm,bd99954.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BD99954 Battery charger
+
+maintainers:
+ - Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+ - Markus Laine <markus.laine@fi.rohmeurope.com>
+ - Mikko Mutanen <mikko.mutanen@fi.rohmeurope.com>
+
+description: |
+ The ROHM BD99954 is a Battery Management LSI for 1-4 cell Lithium-Ion
+ secondary battery intended to be used in space-constraint equipment such
+ as Low profile Notebook PC, Tablets and other applications. BD99954
+ provides a Dual-source Battery Charger, two port BC1.2 detection and a
+ Battery Monitor.
+
+
+properties:
+ compatible:
+ const: rohm,bd99954
+#
+# The battery charging profile of BD99954.
+#
+# Curve (1) represents charging current.
+# Curve (2) represents battery voltage.
+#
+# The BD99954 data sheet divides charging to three phases.
+# a) Trickle-charge with constant current (8).
+# b) pre-charge with constant current (6)
+# c) fast-charge with:
+# First a constant current (5) phase (CC)
+# Then constant voltage (CV) phase (after the battery voltage has reached
+# target level - until charging current has dropped to termination
+# level (7)
+#
+# V ^ ^ I
+# . .
+# . .
+# (4)- -.- - - - - - - - - - - - - - +++++++++++++++++++++++++++.
+# . / .
+# . ++++++/++ - - - - - - - - - - - - -.- - (5)
+# . + / + .
+# . + - -- .
+# . + - + .
+# . +.- -: .
+# . .+ +` .
+# . .- + | `/ .
+# . .." + .: .
+# . -" + -- .
+# . (2) ..." + | :- .
+# . ..."" + -: .
+# (3)- -.-.""- - - - -+++++++++ - - - - - - -.:- - - - - - - - - .- - (6)
+# . + `:. .
+# . + | -: .
+# . + -: .
+# . + .. .
+# . (1) + | "+++- - - -.- - (7)
+# -++++++++++++++- - - - - - - - - - - - - - - - - + - - - .- - (8)
+# . + -
+# -------------------------------------------------+++++++++-->
+# | | | CC | CV |
+# | --trickle-- | -pre- | ---------fast----------- |
+#
+# The charger uses the following battery properties
+# - trickle-charge-current-microamp:
+# Current used at trickle-charge phase (8 in above chart)
+# minimum: 64000
+# maximum: 1024000
+# multipleOf: 64000
+# - precharge-current-microamp:
+# Current used at pre-charge phase (6 in above chart)
+# minimum: 64000
+# maximum: 1024000
+# multipleOf: 64000
+# - constant-charge-current-max-microamp
+# Current used at fast charge constant current phase (5 in above chart)
+# minimum: 64000
+# maximum: 1024000
+# multipleOf: 64000
+# - constant-charge-voltage-max-microvolt
+# The constant voltage used in fast charging phase (4 in above chart)
+# minimum: 2560000
+# maximum: 19200000
+# multipleOf: 16000
+# - precharge-upper-limit-microvolt
+# charging mode is changed from trickle charging to pre-charging
+# when battery voltage exceeds this limit voltage (3 in above chart)
+# minimum: 2048000
+# maximum: 19200000
+# multipleOf: 64000
+# - re-charge-voltage-microvolt
+# minimum: 2560000
+# maximum: 19200000
+# multipleOf: 16000
+# re-charging is automatically started when battry has been discharging
+# to the point where the battery voltage drops below this limit
+# - over-voltage-threshold-microvolt
+# battery is expected to be faulty if battery voltage exceeds this limit.
+# Charger will then enter to a "battery faulty" -state
+# minimum: 2560000
+# maximum: 19200000
+# multipleOf: 16000
+# - charge-term-current-microamp
+# minimum: 0
+# maximum: 1024000
+# multipleOf: 64000
+# a charge cycle terminates when the battery voltage is above recharge
+# threshold, and the current is below this setting (7 in above chart)
+# See also Documentation/devicetree/bindings/power/supply/battery.txt
+
+ monitored-battery:
+ description:
+ phandle of battery characteristics devicetree node
+
+ rohm,vsys-regulation-microvolt:
+ description: system specific lower limit for system voltage.
+ minimum: 2560000
+ maximum: 19200000
+ multipleOf: 64000
+
+ rohm,vbus-input-current-limit-microamp:
+ description: system specific VBUS input current limit (in microamps).
+ minimum: 32000
+ maximum: 16352000
+ multipleOf: 32000
+
+ rohm,vcc-input-current-limit-microamp:
+ description: system specific VCC/VACP input current limit (in microamps).
+ minimum: 32000
+ maximum: 16352000
+ multipleOf: 32000
+
+required:
+ - compatible
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ charger@9 {
+ compatible = "rohm,bd99954";
+ monitored-battery = <&battery>;
+ reg = <0x9>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <29 8>;
+ rohm,vsys-regulation-microvolt = <8960000>;
+ rohm,vbus-input-current-limit-microamp = <1472000>;
+ rohm,vcc-input-current-limit-microamp = <1472000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/sbs,sbs-battery.yaml b/Documentation/devicetree/bindings/power/supply/sbs,sbs-battery.yaml
new file mode 100644
index 000000000000..205bc826bd20
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/sbs,sbs-battery.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/supply/sbs,sbs-battery.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SBS compliant battery
+
+maintainers:
+ - Sebastian Reichel <sre@kernel.org>
+
+description: |
+ Battery compatible with the smart battery system specifications
+
+properties:
+
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - ti,bq20z65
+ - ti,bq20z75
+ - enum:
+ - sbs,sbs-battery
+ - items:
+ - const: sbs,sbs-battery
+
+ reg:
+ maxItems: 1
+
+ sbs,i2c-retry-count:
+ description:
+ The number of times to retry I2C transactions on I2C IO failure.
+ default: 0
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+
+ sbs,poll-retry-count:
+ description:
+ The number of times to try looking for new status after an external
+ change notification.
+ default: 0
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+
+ sbs,battery-detect-gpios:
+ description:
+ GPIO which signals battery detection. If this is not supplied, the bus
+ needs to be polled to detect the battery.
+ maxItems: 1
+
+ sbs,disable-charger-broadcasts:
+ description:
+ SBS batteries by default send broadcast messages to SBS compliant chargers to
+ configure max. charge current/voltage. If your hardware does not have an SBS
+ compliant charger it should be disabled via this property to avoid blocking
+ the bus. Also some SBS battery fuel gauges are known to have a buggy multi-
+ master implementation.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ battery@b {
+ compatible = "ti,bq20z75", "sbs,sbs-battery";
+ reg = <0xb>;
+ sbs,i2c-retry-count = <2>;
+ sbs,poll-retry-count = <10>;
+ sbs,battery-detect-gpios = <&gpio 122 GPIO_ACTIVE_HIGH>;
+ sbs,disable-charger-broadcasts;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt b/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt
deleted file mode 100644
index 4e78e51018eb..000000000000
--- a/Documentation/devicetree/bindings/power/supply/sbs_sbs-battery.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-SBS sbs-battery
-~~~~~~~~~~
-
-Required properties :
- - compatible: "<vendor>,<part-number>", "sbs,sbs-battery" as fallback. The
- part number compatible string might be used in order to take care of
- vendor specific registers.
- Known <vendor>,<part-number>:
- ti,bq20z75
-
-Optional properties :
- - sbs,i2c-retry-count : The number of times to retry i2c transactions on i2c
- IO failure.
- - sbs,poll-retry-count : The number of times to try looking for new status
- after an external change notification.
- - sbs,battery-detect-gpios : The gpio which signals battery detection and
- a flag specifying its polarity.
-
-Example:
-
- battery@b {
- compatible = "ti,bq20z75", "sbs,sbs-battery";
- reg = <0xb>;
- sbs,i2c-retry-count = <2>;
- sbs,poll-retry-count = <10>;
- sbs,battery-detect-gpios = <&gpio-controller 122 1>;
- }
diff --git a/Documentation/devicetree/bindings/property-units.txt b/Documentation/devicetree/bindings/property-units.txt
index e9b8360b3288..c80a110c1e26 100644
--- a/Documentation/devicetree/bindings/property-units.txt
+++ b/Documentation/devicetree/bindings/property-units.txt
@@ -41,3 +41,7 @@ Temperature
Pressure
----------------------------------------
-kpascal : kilopascal
+
+Throughput
+----------------------------------------
+-kBps : kilobytes per second
diff --git a/Documentation/devicetree/bindings/pwm/imx-pwm.txt b/Documentation/devicetree/bindings/pwm/imx-pwm.txt
deleted file mode 100644
index 22f1c3d8b773..000000000000
--- a/Documentation/devicetree/bindings/pwm/imx-pwm.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Freescale i.MX PWM controller
-
-Required properties:
-- compatible : should be "fsl,<soc>-pwm" and one of the following
- compatible strings:
- - "fsl,imx1-pwm" for PWM compatible with the one integrated on i.MX1
- - "fsl,imx27-pwm" for PWM compatible with the one integrated on i.MX27
-- reg: physical base address and length of the controller's registers
-- #pwm-cells: 2 for i.MX1 and 3 for i.MX27 and newer SoCs. See pwm.yaml
- in this directory for a description of the cells format.
-- clocks : Clock specifiers for both ipg and per clocks.
-- clock-names : Clock names should include both "ipg" and "per"
-See the clock consumer binding,
- Documentation/devicetree/bindings/clock/clock-bindings.txt
-- interrupts: The interrupt for the pwm controller
-
-Example:
-
-pwm1: pwm@53fb4000 {
- #pwm-cells = <3>;
- compatible = "fsl,imx53-pwm", "fsl,imx27-pwm";
- reg = <0x53fb4000 0x4000>;
- clocks = <&clks IMX5_CLK_PWM1_IPG_GATE>,
- <&clks IMX5_CLK_PWM1_HF_GATE>;
- clock-names = "ipg", "per";
- interrupts = <61>;
-};
diff --git a/Documentation/devicetree/bindings/pwm/imx-pwm.yaml b/Documentation/devicetree/bindings/pwm/imx-pwm.yaml
new file mode 100644
index 000000000000..4b62af27d4b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/imx-pwm.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/imx-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX PWM controller
+
+maintainers:
+ - Philipp Zabel <p.zabel@pengutronix.de>
+
+properties:
+ "#pwm-cells":
+ description: |
+ Should be 2 for i.MX1 and 3 for i.MX27 and newer SoCs. See pwm.yaml
+ in this directory for a description of the cells format.
+ enum:
+ - 2
+ - 3
+
+ compatible:
+ enum:
+ - fsl,imx1-pwm
+ - fsl,imx27-pwm
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: SoC PWM ipg clock
+ - description: SoC PWM per clock
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: ipg
+ - const: per
+ maxItems: 2
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - "#pwm-cells"
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx5-clock.h>
+
+ pwm@53fb4000 {
+ #pwm-cells = <3>;
+ compatible = "fsl,imx27-pwm";
+ reg = <0x53fb4000 0x4000>;
+ clocks = <&clks IMX5_CLK_PWM1_IPG_GATE>,
+ <&clks IMX5_CLK_PWM1_HF_GATE>;
+ clock-names = "ipg", "per";
+ interrupts = <61>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt b/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt
deleted file mode 100644
index 5bf20950a24e..000000000000
--- a/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Freescale i.MX TPM PWM controller
-
-Required properties:
-- compatible : Should be "fsl,imx7ulp-pwm".
-- reg: Physical base address and length of the controller's registers.
-- #pwm-cells: Should be 3. See pwm.yaml in this directory for a description of the cells format.
-- clocks : The clock provided by the SoC to drive the PWM.
-- interrupts: The interrupt for the PWM controller.
-
-Note: The TPM counter and period counter are shared between multiple channels, so all channels
-should use same period setting.
-
-Example:
-
-tpm4: pwm@40250000 {
- compatible = "fsl,imx7ulp-pwm";
- reg = <0x40250000 0x1000>;
- assigned-clocks = <&pcc2 IMX7ULP_CLK_LPTPM4>;
- assigned-clock-parents = <&scg1 IMX7ULP_CLK_SOSC_BUS_CLK>;
- clocks = <&pcc2 IMX7ULP_CLK_LPTPM4>;
- #pwm-cells = <3>;
-};
diff --git a/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.yaml b/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.yaml
new file mode 100644
index 000000000000..fe9ef42544f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/imx-tpm-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX TPM PWM controller
+
+maintainers:
+ - Anson Huang <anson.huang@nxp.com>
+
+description: |
+ The TPM counter and period counter are shared between multiple
+ channels, so all channels should use same period setting.
+
+properties:
+ "#pwm-cells":
+ const: 3
+
+ compatible:
+ enum:
+ - fsl,imx7ulp-pwm
+
+ reg:
+ maxItems: 1
+
+ assigned-clocks:
+ maxItems: 1
+
+ assigned-clock-parents:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - "#pwm-cells"
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx7ulp-clock.h>
+
+ pwm@40250000 {
+ compatible = "fsl,imx7ulp-pwm";
+ reg = <0x40250000 0x1000>;
+ assigned-clocks = <&pcc2 IMX7ULP_CLK_LPTPM4>;
+ assigned-clock-parents = <&scg1 IMX7ULP_CLK_SOSC_BUS_CLK>;
+ clocks = <&pcc2 IMX7ULP_CLK_LPTPM4>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/mxs-pwm.txt b/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
deleted file mode 100644
index a1b8a482f873..000000000000
--- a/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Freescale MXS PWM controller
-
-Required properties:
-- compatible: should be "fsl,imx23-pwm"
-- reg: physical base address and length of the controller's registers
-- #pwm-cells: should be 3. See pwm.yaml in this directory for a description of
- the cells format.
-- fsl,pwm-number: the number of PWM devices
-
-Example:
-
-pwm: pwm@80064000 {
- compatible = "fsl,imx28-pwm", "fsl,imx23-pwm";
- reg = <0x80064000 0x2000>;
- #pwm-cells = <3>;
- fsl,pwm-number = <8>;
-};
diff --git a/Documentation/devicetree/bindings/pwm/mxs-pwm.yaml b/Documentation/devicetree/bindings/pwm/mxs-pwm.yaml
new file mode 100644
index 000000000000..da68f4a25dd9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/mxs-pwm.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/mxs-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale MXS PWM controller
+
+maintainers:
+ - Shawn Guo <shawn.guo@linaro.org>
+ - Anson Huang <anson.huang@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx23-pwm
+
+ reg:
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 3
+
+ fsl,pwm-number:
+ $ref: '/schemas/types.yaml#/definitions/uint32'
+ description: u32 value representing the number of PWM devices
+
+required:
+ - compatible
+ - reg
+ - "#pwm-cells"
+ - fsl,pwm-number
+
+additionalProperties: false
+
+examples:
+ - |
+ pwm@80064000 {
+ compatible = "fsl,imx23-pwm";
+ reg = <0x80064000 0x2000>;
+ #pwm-cells = <3>;
+ fsl,pwm-number = <8>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-samsung.yaml b/Documentation/devicetree/bindings/pwm/pwm-samsung.yaml
index ea7f32905172..fc799b0577d4 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-samsung.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm-samsung.yaml
@@ -49,17 +49,17 @@ properties:
are available.
oneOf:
- items:
- - const: timers
+ - const: timers
- items:
- - const: timers
- - const: pwm-tclk0
+ - const: timers
+ - const: pwm-tclk0
- items:
- - const: timers
- - const: pwm-tclk1
+ - const: timers
+ - const: pwm-tclk1
- items:
- - const: timers
- - const: pwm-tclk0
- - const: pwm-tclk1
+ - const: timers
+ - const: pwm-tclk0
+ - const: pwm-tclk1
interrupts:
description:
@@ -78,12 +78,11 @@ properties:
A list of PWM channels used as PWM outputs on particular platform.
It is an array of up to 5 elements being indices of PWM channels
(from 0 to 4), the order does not matter.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - uniqueItems: true
- - items:
- minimum: 0
- maximum: 4
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ uniqueItems: true
+ items:
+ minimum: 0
+ maximum: 4
required:
- clocks
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
index 945c14e1be35..461afb4c1f5d 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
+++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.yaml
@@ -68,7 +68,7 @@ examples:
pwm0: pwm@e6e30000 {
compatible = "renesas,pwm-r8a7743", "renesas,pwm-rcar";
- reg = <0 0xe6e30000 0 0x8>;
+ reg = <0xe6e30000 0x8>;
clocks = <&cpg CPG_MOD 523>;
power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
resets = <&cpg 523>;
diff --git a/Documentation/devicetree/bindings/regulator/arizona-regulator.txt b/Documentation/devicetree/bindings/regulator/arizona-regulator.txt
deleted file mode 100644
index 69bf41949b01..000000000000
--- a/Documentation/devicetree/bindings/regulator/arizona-regulator.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Cirrus Logic Arizona class audio SoCs
-
-These devices are audio SoCs with extensive digital capabilities and a range
-of analogue I/O.
-
-This document lists regulator specific bindings, see the primary binding
-document:
- For Wolfson Microelectronic Arizona codecs: ../mfd/arizona.txt
- For Cirrus Logic Madera codecs: ../mfd/madera.txt
-
-Optional properties:
- - wlf,ldoena : GPIO specifier for the GPIO controlling LDOENA
-
-Optional subnodes:
- - ldo1 : Initial data for the LDO1 regulator, as covered in
- Documentation/devicetree/bindings/regulator/regulator.txt
- - micvdd : Initial data for the MICVDD regulator, as covered in
- Documentation/devicetree/bindings/regulator/regulator.txt
diff --git a/Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
deleted file mode 100644
index 91974e6ee251..000000000000
--- a/Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-Cirrus Logic Lochnagar Audio Development Board
-
-Lochnagar is an evaluation and development board for Cirrus Logic
-Smart CODEC and Amp devices. It allows the connection of most Cirrus
-Logic devices on mini-cards, as well as allowing connection of
-various application processor systems to provide a full evaluation
-platform. Audio system topology, clocking and power can all be
-controlled through the Lochnagar, allowing the device under test
-to be used in a variety of possible use cases.
-
-This binding document describes the binding for the regulator portion
-of the driver.
-
-Also see these documents for generic binding information:
- [1] Regulator: ../regulator/regulator.txt
-
-This binding must be part of the Lochnagar MFD binding:
- [2] ../mfd/cirrus,lochnagar.txt
-
-Optional sub-nodes:
-
- - VDDCORE : Initialisation data for the VDDCORE regulator, which
- supplies the CODECs digital core if it has no build regulator for that
- purpose.
- Required Properties:
- - compatible : One of the following strings:
- "cirrus,lochnagar2-vddcore"
- - SYSVDD-supply: Primary power supply for the Lochnagar.
-
- - MICVDD : Initialisation data for the MICVDD regulator, which
- supplies the CODECs MICVDD.
- Required Properties:
- - compatible : One of the following strings:
- "cirrus,lochnagar2-micvdd"
- - SYSVDD-supply: Primary power supply for the Lochnagar.
-
- - MIC1VDD, MIC2VDD : Initialisation data for the MICxVDD supplies.
- Required Properties:
- - compatible : One of the following strings:
- "cirrus,lochnagar2-mic1vdd", "cirrus,lochnagar2-mic2vdd"
- Optional Properties:
- - cirrus,micbias-input : A property selecting which of the CODEC
- minicard micbias outputs should be used, valid values are 1 - 4.
- - MICBIAS1-supply, MICBIAS2-supply: Regulator supplies for the
- MICxVDD outputs, supplying the digital microphones, normally
- supplied from the attached CODEC.
-
- - VDD1V8 : Recommended fixed regulator for the VDD1V8 regulator, which supplies the
- CODECs analog and 1.8V digital supplies.
- Required Properties:
- - compatible : Should be set to "regulator-fixed"
- - regulator-min-microvolt : Should be set to 1.8V
- - regulator-max-microvolt : Should be set to 1.8V
- - regulator-boot-on
- - regulator-always-on
- - vin-supply : Should be set to same supply as SYSVDD
-
-Example:
-
-lochnagar {
- lochnagar-micvdd: MICVDD {
- compatible = "cirrus,lochnagar2-micvdd";
-
- SYSVDD-supply = <&wallvdd>;
-
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
-
- lochnagar-vdd1v8: VDD1V8 {
- compatible = "regulator-fixed";
-
- regulator-name = "VDD1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-boot-on;
- regulator-always-on;
-
- vin-supply = <&wallvdd>;
- };
-};
-
diff --git a/Documentation/devicetree/bindings/regulator/gpio-regulator.yaml b/Documentation/devicetree/bindings/regulator/gpio-regulator.yaml
index 9d3b28417fb6..605590384b48 100644
--- a/Documentation/devicetree/bindings/regulator/gpio-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/gpio-regulator.yaml
@@ -46,24 +46,22 @@ properties:
0: LOW
1: HIGH
Default is LOW if nothing else is specified.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - maxItems: 8
- items:
- enum: [ 0, 1 ]
- default: 0
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ maxItems: 8
+ items:
+ enum: [0, 1]
+ default: 0
states:
description: Selection of available voltages/currents provided by this
regulator and matching GPIO configurations to achieve them. If there are
no states in the "states" array, use a fixed regulator instead.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-matrix
- - maxItems: 8
- items:
- items:
- - description: Voltage in microvolts
- - description: GPIO group state value
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ maxItems: 8
+ items:
+ items:
+ - description: Voltage in microvolts
+ - description: GPIO group state value
startup-delay-us:
description: startup time in microseconds
@@ -81,12 +79,11 @@ properties:
regulator-type:
description: Specifies what is being regulated.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/string
- - enum:
- - voltage
- - current
- default: voltage
+ $ref: /schemas/types.yaml#/definitions/string
+ enum:
+ - voltage
+ - current
+ default: voltage
required:
- compatible
diff --git a/Documentation/devicetree/bindings/regulator/mps,mp5416.yaml b/Documentation/devicetree/bindings/regulator/mps,mp5416.yaml
index 3b019fa6db31..90727fdc1283 100644
--- a/Documentation/devicetree/bindings/regulator/mps,mp5416.yaml
+++ b/Documentation/devicetree/bindings/regulator/mps,mp5416.yaml
@@ -27,13 +27,11 @@ properties:
patternProperties:
"^buck[1-4]$":
- allOf:
- - $ref: "regulator.yaml#"
+ $ref: "regulator.yaml#"
type: object
"^ldo[1-4]$":
- allOf:
- - $ref: "regulator.yaml#"
+ $ref: "regulator.yaml#"
type: object
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
index ae6e7ab36c58..12b8963615c3 100644
--- a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
+++ b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
@@ -21,17 +21,16 @@ properties:
regulators:
type: object
- allOf:
- - $ref: regulator.yaml#
+ $ref: regulator.yaml#
+
description: |
list of regulators provided by this controller, must be named
after their hardware counterparts BUCK[1-4], one LDORTC, and LDO[2-5]
properties:
mps,switch-freq:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint8"
- enum: [ 0, 1, 2, 3 ]
+ $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [0, 1, 2, 3]
default: 2
description: |
switching frequency must be one of following corresponding value
@@ -40,32 +39,27 @@ properties:
patternProperties:
"^ldo[1-4]$":
type: object
- allOf:
- - $ref: regulator.yaml#
+ $ref: regulator.yaml#
"^ldortc$":
type: object
- allOf:
- - $ref: regulator.yaml#
+ $ref: regulator.yaml#
"^buck[1-4]$":
type: object
- allOf:
- - $ref: regulator.yaml#
+ $ref: regulator.yaml#
properties:
mps,buck-softstart:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint8"
- enum: [ 0, 1, 2, 3 ]
+ $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [0, 1, 2, 3]
description: |
defines the soft start time of this buck, must be one of the following
corresponding values 150us, 300us, 610us, 920us
mps,buck-phase-delay:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint8"
- enum: [ 0, 1, 2, 3 ]
+ $ref: "/schemas/types.yaml#/definitions/uint8"
+ enum: [0, 1, 2, 3]
description: |
defines the phase delay of this buck, must be one of the following
corresponding values 0deg, 90deg, 180deg, 270deg
diff --git a/Documentation/devicetree/bindings/regulator/regulator.yaml b/Documentation/devicetree/bindings/regulator/regulator.yaml
index 91a39a33000b..ec505dbbf87c 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/regulator.yaml
@@ -123,9 +123,8 @@ properties:
0: Disable active discharge.
1: Enable active discharge.
Absence of this property will leave configuration to default.
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - enum: [ 0, 1 ]
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ enum: [0, 1]
regulator-coupled-with:
description: Regulators with which the regulator is coupled. The linkage
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd71828-regulator.yaml b/Documentation/devicetree/bindings/regulator/rohm,bd71828-regulator.yaml
index 1e52dafcb5c9..5ce587fff961 100644
--- a/Documentation/devicetree/bindings/regulator/rohm,bd71828-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd71828-regulator.yaml
@@ -24,10 +24,9 @@ description: |
patternProperties:
"^LDO[1-7]$":
type: object
- allOf:
- - $ref: regulator.yaml#
description:
Properties for single LDO regulator.
+ $ref: regulator.yaml#
properties:
regulator-name:
@@ -39,10 +38,9 @@ patternProperties:
"^BUCK[1-7]$":
type: object
- allOf:
- - $ref: regulator.yaml#
description:
Properties for single BUCK regulator.
+ $ref: regulator.yaml#
properties:
regulator-name:
@@ -51,40 +49,36 @@ patternProperties:
should be "buck1", ..., "buck7"
rohm,dvs-run-voltage:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- maximum: 3300000
description:
PMIC default "RUN" state voltage in uV. See below table for
bucks which support this. 0 means disabled.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 3300000
rohm,dvs-idle-voltage:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- maximum: 3300000
description:
PMIC default "IDLE" state voltage in uV. See below table for
bucks which support this. 0 means disabled.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 3300000
rohm,dvs-suspend-voltage:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- maximum: 3300000
description:
PMIC default "SUSPEND" state voltage in uV. See below table for
bucks which support this. 0 means disabled.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 3300000
rohm,dvs-lpsr-voltage:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- maximum: 3300000
description:
PMIC default "LPSR" state voltage in uV. See below table for
bucks which support this. 0 means disabled.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 3300000
# Supported default DVS states:
# buck | run | idle | suspend | lpsr
diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.yaml b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.yaml
index 543d4b52397e..19d9408d9c3b 100644
--- a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.yaml
@@ -30,8 +30,7 @@ description: |
patternProperties:
"^LDO[1-7]$":
type: object
- allOf:
- - $ref: regulator.yaml#
+ $ref: regulator.yaml#
description:
Properties for single LDO regulator.
@@ -45,8 +44,7 @@ patternProperties:
"^BUCK[1-8]$":
type: object
- allOf:
- - $ref: regulator.yaml#
+ $ref: regulator.yaml#
description:
Properties for single BUCK regulator.
@@ -57,28 +55,25 @@ patternProperties:
should be "buck1", ..., "buck8"
rohm,dvs-run-voltage:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- maximum: 1300000
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 1300000
description:
PMIC default "RUN" state voltage in uV. See below table for
bucks which support this. 0 means disabled.
rohm,dvs-idle-voltage:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- maximum: 1300000
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 1300000
description:
PMIC default "IDLE" state voltage in uV. See below table for
bucks which support this. 0 means disabled.
rohm,dvs-suspend-voltage:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- maximum: 1300000
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 1300000
description:
PMIC default "SUSPEND" state voltage in uV. See below table for
bucks which support this. 0 means disabled.
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml b/Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml
index 64f1183ce841..cb336b2c16af 100644
--- a/Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml
+++ b/Documentation/devicetree/bindings/regulator/st,stm32-booster.yaml
@@ -23,8 +23,7 @@ properties:
- st,stm32mp1-booster
st,syscfg:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
description: phandle to system configuration controller.
vdda-supply:
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml
index 8d8f38fe85dc..e6322bc3e447 100644
--- a/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml
+++ b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml
@@ -26,8 +26,7 @@ patternProperties:
"^(reg11|reg18|usb33)$":
type: object
- allOf:
- - $ref: "regulator.yaml#"
+ $ref: "regulator.yaml#"
required:
- compatible
diff --git a/Documentation/devicetree/bindings/regulator/wlf,arizona.yaml b/Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
new file mode 100644
index 000000000000..a0aea73bf412
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/wlf,arizona.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic/Wolfson Microelectronics Arizona/Madera class audio SoCs
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ These devices are audio SoCs with extensive digital capabilities and a
+ range of analogue I/O.
+
+ This document lists regulator specific bindings, see the primary binding
+ document. For Wolfson Microelectronic Arizona codecs ../mfd/wlf,arizona.yaml
+ and for Cirrus Logic Madera codecs ../mfd/madera.txt
+
+properties:
+ wlf,ldoena:
+ description:
+ GPIO specifier for the GPIO controlling LDOENA.
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ maxItems: 1
+
+ ldo1:
+ description:
+ Initial data for the LDO1 regulator.
+ $ref: "regulator.yaml#"
+ type: object
+
+ micvdd:
+ description:
+ Initial data for the MICVDD regulator.
+ $ref: "regulator.yaml#"
+ type: object
diff --git a/Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml b/Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml
new file mode 100644
index 000000000000..c019f9fbe916
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/ingenic,vpu.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/remoteproc/ingenic,vpu.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Ingenic Video Processing Unit bindings
+
+description:
+ Inside the Video Processing Unit (VPU) of the recent JZ47xx SoCs from
+ Ingenic is a second Xburst MIPS CPU very similar to the main core.
+ This document describes the devicetree bindings for this auxiliary
+ processor.
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+properties:
+ compatible:
+ const: ingenic,jz4770-vpu-rproc
+
+ reg:
+ items:
+ - description: aux registers
+ - description: tcsm0 registers
+ - description: tcsm1 registers
+ - description: sram registers
+
+ reg-names:
+ items:
+ - const: aux
+ - const: tcsm0
+ - const: tcsm1
+ - const: sram
+
+ clocks:
+ items:
+ - description: aux clock
+ - description: vpu clock
+
+ clock-names:
+ items:
+ - const: aux
+ - const: vpu
+
+ interrupts:
+ description: VPU hardware interrupt
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4770-cgu.h>
+
+ vpu: video-decoder@132a0000 {
+ compatible = "ingenic,jz4770-vpu-rproc";
+
+ reg = <0x132a0000 0x20>, /* AUX */
+ <0x132b0000 0x4000>, /* TCSM0 */
+ <0x132c0000 0xc000>, /* TCSM1 */
+ <0x132f0000 0x7000>; /* SRAM */
+ reg-names = "aux", "tcsm0", "tcsm1", "sram";
+
+ clocks = <&cgu JZ4770_CLK_AUX>, <&cgu JZ4770_CLK_VPU>;
+ clock-names = "aux", "vpu";
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
index 9938918b2fea..54737024da20 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.txt
@@ -15,12 +15,16 @@ on the Qualcomm ADSP Hexagon core.
"qcom,qcs404-adsp-pas"
"qcom,qcs404-cdsp-pas"
"qcom,qcs404-wcss-pas"
+ "qcom,sc7180-mpss-pas"
"qcom,sdm845-adsp-pas"
"qcom,sdm845-cdsp-pas"
"qcom,sm8150-adsp-pas"
"qcom,sm8150-cdsp-pas"
"qcom,sm8150-mpss-pas"
"qcom,sm8150-slpi-pas"
+ "qcom,sm8250-adsp-pas"
+ "qcom,sm8250-cdsp-pas"
+ "qcom,sm8250-slpi-pas"
- interrupts-extended:
Usage: required
@@ -44,8 +48,12 @@ on the Qualcomm ADSP Hexagon core.
qcom,sm8150-adsp-pas:
qcom,sm8150-cdsp-pas:
qcom,sm8150-slpi-pas:
+ qcom,sm8250-adsp-pas:
+ qcom,sm8250-cdsp-pas:
+ qcom,sm8250-slpi-pas:
must be "wdog", "fatal", "ready", "handover", "stop-ack"
qcom,qcs404-wcss-pas:
+ qcom,sc7180-mpss-pas:
qcom,sm8150-mpss-pas:
must be "wdog", "fatal", "ready", "handover", "stop-ack",
"shutdown-ack"
@@ -105,10 +113,14 @@ on the Qualcomm ADSP Hexagon core.
qcom,sdm845-cdsp-pas:
qcom,sm8150-adsp-pas:
qcom,sm8150-cdsp-pas:
+ qcom,sm8250-cdsp-pas:
must be "cx", "load_state"
+ qcom,sc7180-mpss-pas:
qcom,sm8150-mpss-pas:
must be "cx", "load_state", "mss"
+ qcom,sm8250-adsp-pas:
qcom,sm8150-slpi-pas:
+ qcom,sm8250-slpi-pas:
must be "lcx", "lmx", "load_state"
- memory-region:
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
index 88dfa3fc15f7..1f9a62e13ebe 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt
@@ -79,7 +79,7 @@ on the Qualcomm Hexagon core.
"snoc_axi", "mnoc_axi", "qdss"
qcom,sc7180-mss-pil:
must be "iface", "bus", "xo", "snoc_axi", "mnoc_axi",
- "mss_crypto", "mss_nav", "nav"
+ "nav"
qcom,sdm845-mss-pil:
must be "iface", "bus", "mem", "xo", "gpll0_mss",
"snoc_axi", "mnoc_axi", "prng"
@@ -102,6 +102,14 @@ on the Qualcomm Hexagon core.
must be "mss_restart", "pdc_reset" for the modem
sub-system on SC7180, SDM845 SoCs
+For devices where the mba and mpss sub-nodes are not specified, mba/mpss region
+should be referenced as follows:
+- memory-region:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the reserved-memory for the mba region followed
+ by the mpss region
+
For the compatible strings below the following supplies are required:
"qcom,q6v5-pil"
"qcom,msm8916-mss-pil",
@@ -173,16 +181,15 @@ For the compatible string below the following supplies are required:
For the compatible strings below the following phandle references are required:
"qcom,sc7180-mss-pil"
-- qcom,halt-nav-regs:
+- qcom,spare-regs:
Usage: required
Value type: <prop-encoded-array>
- Definition: reference to a list of 2 phandles with one offset each for
- the modem sub-system running on SC7180 SoC. The first
- phandle reference is to the mss clock node followed by the
- offset within register space for nav halt register. The
- second phandle reference is to a syscon representing TCSR
- followed by the offset within syscon for conn_box_spare0
- register.
+ Definition: a phandle reference to a syscon representing TCSR followed
+ by the offset within syscon for conn_box_spare0 register
+ used by the modem sub-system running on SC7180 SoC.
+
+The Hexagon node must contain iommus property as described in ../iommu/iommu.txt
+on platforms which do not have TrustZone.
= SUBNODES:
The Hexagon node must contain two subnodes, named "mba" and "mpss" representing
diff --git a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
index c0d83865e933..4ffa25268fcc 100644
--- a/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/st,stm32-rproc.yaml
@@ -25,25 +25,23 @@ properties:
maxItems: 3
resets:
- maxItems: 1
+ maxItems: 1
st,syscfg-holdboot:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/phandle-array"
description: remote processor reset hold boot
- Phandle of syscon block.
- The offset of the hold boot setting register.
- The field mask of the hold boot.
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
maxItems: 1
st,syscfg-tz:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/phandle-array"
description:
Reference to the system configuration which holds the RCC trust zone mode
- Phandle of syscon block.
- The offset of the RCC trust zone mode register.
- The field mask of the RCC trust zone mode.
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
maxItems: 1
interrupts:
@@ -90,8 +88,7 @@ properties:
(see ../reserved-memory/reserved-memory.txt)
st,syscfg-pdds:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
description: |
Reference to the system configuration which holds the remote
1st cell: phandle to syscon block
diff --git a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
index bac4afa3b197..4dd20de6977f 100644
--- a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+++ b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
@@ -77,6 +77,8 @@ Regions in the /reserved-memory node may be referenced by other device
nodes by adding a memory-region property to the device node.
memory-region (optional) - phandle, specifier pairs to children of /reserved-memory
+memory-region-names (optional) - a list of names, one for each corresponding
+ entry in the memory-region property
Example
-------
diff --git a/Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml b/Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml
index 512a33bdb208..dfce6738b033 100644
--- a/Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml
+++ b/Documentation/devicetree/bindings/reset/brcm,bcm7216-pcie-sata-rescal.yaml
@@ -7,7 +7,9 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: BCM7216 RESCAL reset controller
-description: This document describes the BCM7216 RESCAL reset controller which is responsible for controlling the reset of the SATA and PCIe0/1 instances on BCM7216.
+description: This document describes the BCM7216 RESCAL reset controller
+ which is responsible for controlling the reset of the SATA and PCIe0/1
+ instances on BCM7216.
maintainers:
- Florian Fainelli <f.fainelli@gmail.com>
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
index c2489e41a801..e10502d9153e 100644
--- a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
+++ b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
@@ -9,6 +9,8 @@ Required properties:
- For i.MX7 SoCs should be "fsl,imx7d-src", "syscon"
- For i.MX8MQ SoCs should be "fsl,imx8mq-src", "syscon"
- For i.MX8MM SoCs should be "fsl,imx8mm-src", "fsl,imx8mq-src", "syscon"
+ - For i.MX8MN SoCs should be "fsl,imx8mn-src", "fsl,imx8mq-src", "syscon"
+ - For i.MX8MP SoCs should be "fsl,imx8mp-src", "syscon"
- reg: should be register base and length as documented in the
datasheet
- interrupts: Should contain SRC interrupt
@@ -49,4 +51,6 @@ Example:
For list of all valid reset indices see
<dt-bindings/reset/imx7-reset.h> for i.MX7,
<dt-bindings/reset/imx8mq-reset.h> for i.MX8MQ and
-<dt-bindings/reset/imx8mq-reset.h> for i.MX8MM
+<dt-bindings/reset/imx8mq-reset.h> for i.MX8MM and
+<dt-bindings/reset/imx8mq-reset.h> for i.MX8MN and
+<dt-bindings/reset/imx8mp-reset.h> for i.MX8MP
diff --git a/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml b/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml
index 8ac437282659..6b2d56cc3f38 100644
--- a/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml
+++ b/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml
@@ -21,8 +21,7 @@ properties:
intel,global-reset:
description: Global reset register offset and bit offset.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
+ $ref: /schemas/types.yaml#/definitions/uint32-array
items:
- description: Register offset
- description: Register bit offset
diff --git a/Documentation/devicetree/bindings/reset/renesas,rst.yaml b/Documentation/devicetree/bindings/reset/renesas,rst.yaml
index b5de1d196a13..4c2b429ac702 100644
--- a/Documentation/devicetree/bindings/reset/renesas,rst.yaml
+++ b/Documentation/devicetree/bindings/reset/renesas,rst.yaml
@@ -23,6 +23,7 @@ description: |
properties:
compatible:
enum:
+ - renesas,r8a7742-rst # RZ/G1H
- renesas,r8a7743-rst # RZ/G1M
- renesas,r8a7744-rst # RZ/G1N
- renesas,r8a7745-rst # RZ/G1E
diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml
index 04819ad379c2..f80ba2c66f71 100644
--- a/Documentation/devicetree/bindings/riscv/cpus.yaml
+++ b/Documentation/devicetree/bindings/riscv/cpus.yaml
@@ -40,24 +40,18 @@ properties:
and identifies the type of the hart.
mmu-type:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/string"
- - enum:
- - riscv,sv32
- - riscv,sv39
- - riscv,sv48
description:
Identifies the MMU address translation mode used on this
hart. These values originate from the RISC-V Privileged
Specification document, available from
https://riscv.org/specifications/
+ $ref: "/schemas/types.yaml#/definitions/string"
+ enum:
+ - riscv,sv32
+ - riscv,sv39
+ - riscv,sv48
riscv,isa:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/string"
- - enum:
- - rv64imac
- - rv64imafdc
description:
Identifies the specific RISC-V instruction set architecture
supported by the hart. These are documented in the RISC-V
@@ -67,6 +61,10 @@ properties:
While the isa strings in ISA specification are case
insensitive, letters in the riscv,isa string must be all
lowercase to simplify parsing.
+ $ref: "/schemas/types.yaml#/definitions/string"
+ enum:
+ - rv64imac
+ - rv64imafdc
# RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
timebase-frequency: false
diff --git a/Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml b/Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml
index b95cb017f469..eff9df4b856a 100644
--- a/Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/renesas,sh-rtc.yaml
@@ -43,6 +43,9 @@ properties:
items:
enum: [ fck, rtc_x1, rtc_x3, extal ]
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
@@ -50,6 +53,7 @@ required:
- interrupt-names
- clocks
- clock-names
+ - power-domains
additionalProperties: false
@@ -68,5 +72,6 @@ examples:
interrupt-names = "alarm", "period", "carry";
clocks = <&mstp6_clks R7S72100_CLK_RTC>, <&rtc_x1_clk>,
<&rtc_x3_clk>, <&extal_clk>;
+ power-domains = <&cpg_clocks>;
clock-names = "fck", "rtc_x1", "rtc_x3", "extal";
};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mxc.txt b/Documentation/devicetree/bindings/rtc/rtc-mxc.txt
deleted file mode 100644
index 5bcd31d995b0..000000000000
--- a/Documentation/devicetree/bindings/rtc/rtc-mxc.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-* Real Time Clock of the i.MX SoCs
-
-RTC controller for the i.MX SoCs
-
-Required properties:
-- compatible: Should be "fsl,imx1-rtc" or "fsl,imx21-rtc".
-- reg: physical base address of the controller and length of memory mapped
- region.
-- interrupts: IRQ line for the RTC.
-- clocks: should contain two entries:
- * one for the input reference
- * one for the the SoC RTC
-- clock-names: should contain:
- * "ref" for the input reference clock
- * "ipg" for the SoC RTC clock
-
-Example:
-
-rtc@10007000 {
- compatible = "fsl,imx21-rtc";
- reg = <0x10007000 0x1000>;
- interrupts = <22>;
- clocks = <&clks IMX27_CLK_CKIL>,
- <&clks IMX27_CLK_RTC_IPG_GATE>;
- clock-names = "ref", "ipg";
-};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mxc.yaml b/Documentation/devicetree/bindings/rtc/rtc-mxc.yaml
new file mode 100644
index 000000000000..4f263fa6fd0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-mxc.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/rtc-mxc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Real Time Clock of the i.MX SoCs
+
+allOf:
+ - $ref: "rtc.yaml#"
+
+maintainers:
+ - Philippe Reynes <tremyfr@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx1-rtc
+ - fsl,imx21-rtc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: input reference
+ - description: the SoC RTC clock
+
+ clock-names:
+ items:
+ - const: ref
+ - const: ipg
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx27-clock.h>
+
+ rtc@10007000 {
+ compatible = "fsl,imx21-rtc";
+ reg = <0x10007000 0x1000>;
+ interrupts = <22>;
+ clocks = <&clks IMX27_CLK_CKIL>,
+ <&clks IMX27_CLK_RTC_IPG_GATE>;
+ clock-names = "ref", "ipg";
+ };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mxc_v2.txt b/Documentation/devicetree/bindings/rtc/rtc-mxc_v2.txt
deleted file mode 100644
index 79d7e87b0d91..000000000000
--- a/Documentation/devicetree/bindings/rtc/rtc-mxc_v2.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-* i.MX53 Secure Real Time Clock (SRTC)
-
-Required properties:
-- compatible: should be: "fsl,imx53-rtc"
-- reg: physical base address of the controller and length of memory mapped
- region.
-- clocks: should contain the phandle for the rtc clock
-- interrupts: rtc alarm interrupt
-
-Example:
-
-rtc@53fa4000 {
- compatible = "fsl,imx53-rtc";
- reg = <0x53fa4000 0x4000>;
- interrupts = <24>;
- clocks = <&clks IMX5_CLK_SRTC_GATE>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mxc_v2.yaml b/Documentation/devicetree/bindings/rtc/rtc-mxc_v2.yaml
new file mode 100644
index 000000000000..2d1a30663d72
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-mxc_v2.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/rtc-mxc_v2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: i.MX53 Secure Real Time Clock (SRTC)
+
+allOf:
+ - $ref: "rtc.yaml#"
+
+maintainers:
+ - Patrick Bruenn <p.bruenn@beckhoff.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx53-rtc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx5-clock.h>
+
+ rtc@53fa4000 {
+ compatible = "fsl,imx53-rtc";
+ reg = <0x53fa4000 0x4000>;
+ interrupts = <24>;
+ clocks = <&clks IMX5_CLK_SRTC_GATE>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
index 48c6cafca90c..5456604b1c14 100644
--- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
@@ -32,16 +32,15 @@ properties:
maxItems: 1
st,syscfg:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/phandle-array"
- - items:
- minItems: 3
- maxItems: 3
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ items:
+ minItems: 3
+ maxItems: 3
description: |
- Phandle/offset/mask triplet. The phandle to pwrcfg used to
- access control register at offset, and change the dbp (Disable Backup
- Protection) bit represented by the mask, mandatory to disable/enable backup
- domain (RTC registers) write protection.
+ Phandle/offset/mask triplet. The phandle to pwrcfg used to
+ access control register at offset, and change the dbp (Disable Backup
+ Protection) bit represented by the mask, mandatory to disable/enable backup
+ domain (RTC registers) write protection.
assigned-clocks:
description: |
@@ -78,14 +77,14 @@ allOf:
const: st,stm32h7-rtc
then:
- properties:
- clocks:
- minItems: 2
- maxItems: 2
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
- required:
- - clock-names
- - st,syscfg
+ required:
+ - clock-names
+ - st,syscfg
- if:
properties:
@@ -94,16 +93,16 @@ allOf:
const: st,stm32mp1-rtc
then:
- properties:
- clocks:
- minItems: 2
- maxItems: 2
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
- assigned-clocks: false
- assigned-clock-parents: false
+ assigned-clocks: false
+ assigned-clock-parents: false
- required:
- - clock-names
+ required:
+ - clock-names
required:
- compatible
diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt
deleted file mode 100644
index 55700f20f6ee..000000000000
--- a/Documentation/devicetree/bindings/serial/8250.txt
+++ /dev/null
@@ -1,100 +0,0 @@
-* UART (Universal Asynchronous Receiver/Transmitter)
-
-Required properties:
-- compatible : one of:
- - "ns8250"
- - "ns16450"
- - "ns16550a"
- - "ns16550"
- - "ns16750"
- - "ns16850"
- - For Tegra20, must contain "nvidia,tegra20-uart"
- - For other Tegra, must contain '"nvidia,<chip>-uart",
- "nvidia,tegra20-uart"' where <chip> is tegra30, tegra114, tegra124,
- tegra132, or tegra210.
- - "nxp,lpc3220-uart"
- - "ralink,rt2880-uart"
- - For MediaTek BTIF, must contain '"mediatek,<chip>-btif",
- "mediatek,mtk-btif"' where <chip> is mt7622, mt7623.
- - "altr,16550-FIFO32"
- - "altr,16550-FIFO64"
- - "altr,16550-FIFO128"
- - "fsl,16550-FIFO64"
- - "fsl,ns16550"
- - "intel,xscale-uart"
- - "ti,da830-uart"
- - "aspeed,ast2400-vuart"
- - "aspeed,ast2500-vuart"
- - "nuvoton,npcm750-uart"
- - "serial" if the port type is unknown.
-- reg : offset and length of the register set for the device.
-- interrupts : should contain uart interrupt.
-- clock-frequency : the input clock frequency for the UART
- or
- clocks phandle to refer to the clk used as per Documentation/devicetree
- /bindings/clock/clock-bindings.txt
-
-Optional properties:
-- current-speed : the current active speed of the UART.
-- reg-offset : offset to apply to the mapbase from the start of the registers.
-- reg-shift : quantity to shift the register offsets by.
-- reg-io-width : the size (in bytes) of the IO accesses that should be
- performed on the device. There are some systems that require 32-bit
- accesses to the UART (e.g. TI davinci).
-- used-by-rtas : set to indicate that the port is in use by the OpenFirmware
- RTAS and should not be registered.
-- no-loopback-test: set to indicate that the port does not implements loopback
- test mode
-- fifo-size: the fifo size of the UART.
-- auto-flow-control: one way to enable automatic flow control support. The
- driver is allowed to detect support for the capability even without this
- property.
-- tx-threshold: Specify the TX FIFO low water indication for parts with
- programmable TX FIFO thresholds.
-- resets : phandle + reset specifier pairs
-- overrun-throttle-ms : how long to pause uart rx when input overrun is encountered.
-- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD
- line respectively. It will use specified GPIO instead of the peripheral
- function pin for the UART feature. If unsure, don't specify this property.
-- aspeed,sirq-polarity-sense: Only applicable to aspeed,ast2500-vuart.
- phandle to aspeed,ast2500-scu compatible syscon alongside register offset
- and bit number to identify how the SIRQ polarity should be configured.
- One possible data source is the LPC/eSPI mode bit.
- Example: aspeed,sirq-polarity-sense = <&syscon 0x70 25>
-
-Note:
-* fsl,ns16550:
- ------------
- Freescale DUART is very similar to the PC16552D (and to a
- pair of NS16550A), albeit with some nonstandard behavior such as
- erratum A-004737 (relating to incorrect BRK handling).
-
- Represents a single port that is compatible with the DUART found
- on many Freescale chips (examples include mpc8349, mpc8548,
- mpc8641d, p4080 and ls2085a).
-
-Example:
-
- uart@80230000 {
- compatible = "ns8250";
- reg = <0x80230000 0x100>;
- clock-frequency = <3686400>;
- interrupts = <10>;
- reg-shift = <2>;
- };
-
-Example for OMAP UART using GPIO-based modem control signals:
-
- uart4: serial@49042000 {
- compatible = "ti,omap3-uart";
- reg = <0x49042000 0x400>;
- interrupts = <80>;
- ti,hwmods = "uart4";
- clock-frequency = <48000000>;
- cts-gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
- rts-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
- dtr-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
- dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
- dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
- rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
- };
diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
new file mode 100644
index 000000000000..c1d4c196f005
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/8250.yaml
@@ -0,0 +1,233 @@
+# Copyright 2020 Lubomir Rintel <lkundrak@v3.sk>
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/8250.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: UART (Universal Asynchronous Receiver/Transmitter) bindings
+
+maintainers:
+ - devicetree@vger.kernel.org
+
+allOf:
+ - $ref: /schemas/serial.yaml#
+ - if:
+ required:
+ - aspeed,sirq-polarity-sense
+ then:
+ properties:
+ compatible:
+ const: aspeed,ast2500-vuart
+ - if:
+ properties:
+ compatible:
+ const: mrvl,mmp-uart
+ then:
+ properties:
+ reg-shift:
+ const: 2
+ required:
+ - reg-shift
+ - if:
+ not:
+ properties:
+ compatible:
+ items:
+ - enum:
+ - ns8250
+ - ns16450
+ - ns16550
+ - ns16550a
+ then:
+ anyOf:
+ - required: [ clock-frequency ]
+ - required: [ clocks ]
+
+properties:
+ compatible:
+ oneOf:
+ - const: ns8250
+ - const: ns16450
+ - const: ns16550
+ - const: ns16550a
+ - const: ns16850
+ - const: aspeed,ast2400-vuart
+ - const: aspeed,ast2500-vuart
+ - const: intel,xscale-uart
+ - const: mrvl,pxa-uart
+ - const: nuvoton,npcm750-uart
+ - const: nvidia,tegra20-uart
+ - const: nxp,lpc3220-uart
+ - items:
+ - enum:
+ - altr,16550-FIFO32
+ - altr,16550-FIFO64
+ - altr,16550-FIFO128
+ - fsl,16550-FIFO64
+ - fsl,ns16550
+ - andestech,uart16550
+ - nxp,lpc1850-uart
+ - opencores,uart16550-rtlsvn105
+ - ti,da830-uart
+ - const: ns16550a
+ - items:
+ - enum:
+ - ns16750
+ - cavium,octeon-3860-uart
+ - xlnx,xps-uart16550-2.00.b
+ - ralink,rt2880-uart
+ - enum:
+ - ns16550 # Deprecated, unless the FIFO really is broken
+ - ns16550a
+ - items:
+ - enum:
+ - ralink,mt7620a-uart
+ - ralink,rt3052-uart
+ - ralink,rt3883-uart
+ - const: ralink,rt2880-uart
+ - enum:
+ - ns16550 # Deprecated, unless the FIFO really is broken
+ - ns16550a
+ - items:
+ - enum:
+ - mediatek,mt7622-btif
+ - mediatek,mt7623-btif
+ - const: mediatek,mtk-btif
+ - items:
+ - enum:
+ - mediatek,mt7622-btif
+ - mediatek,mt7623-btif
+ - const: mediatek,mtk-btif
+ - items:
+ - const: mrvl,mmp-uart
+ - const: intel,xscale-uart
+ - items:
+ - enum:
+ - nvidia,tegra30-uart
+ - nvidia,tegra114-uart
+ - nvidia,tegra124-uart
+ - nvidia,tegra186-uart
+ - nvidia,tegra194-uart
+ - nvidia,tegra210-uart
+ - const: nvidia,tegra20-uart
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-frequency: true
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ current-speed:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: The current active speed of the UART.
+
+ reg-offset:
+ description: |
+ Offset to apply to the mapbase from the start of the registers.
+
+ reg-shift:
+ description: Quantity to shift the register offsets by.
+
+ reg-io-width:
+ description: |
+ The size (in bytes) of the IO accesses that should be performed on the
+ device. There are some systems that require 32-bit accesses to the
+ UART (e.g. TI davinci).
+
+ used-by-rtas:
+ type: boolean
+ description: |
+ Set to indicate that the port is in use by the OpenFirmware RTAS and
+ should not be registered.
+
+ no-loopback-test:
+ type: boolean
+ description: |
+ Set to indicate that the port does not implement loopback test mode.
+
+ fifo-size:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: The fifo size of the UART.
+
+ auto-flow-control:
+ type: boolean
+ description: |
+ One way to enable automatic flow control support. The driver is
+ allowed to detect support for the capability even without this
+ property.
+
+ tx-threshold:
+ $ref: /schemas/types.yaml#definitions/uint32
+ description: |
+ Specify the TX FIFO low water indication for parts with programmable
+ TX FIFO thresholds.
+
+ overrun-throttle-ms:
+ description: |
+ How long to pause uart rx when input overrun is encountered.
+
+ rts-gpios: true
+ cts-gpios: true
+ dtr-gpios: true
+ dsr-gpios: true
+ rng-gpios: true
+ dcd-gpios: true
+
+ aspeed,sirq-polarity-sense:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ Phandle to aspeed,ast2500-scu compatible syscon alongside register
+ offset and bit number to identify how the SIRQ polarity should be
+ configured. One possible data source is the LPC/eSPI mode bit. Only
+ applicable to aspeed,ast2500-vuart.
+
+required:
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ serial@80230000 {
+ compatible = "ns8250";
+ reg = <0x80230000 0x100>;
+ interrupts = <10>;
+ reg-shift = <2>;
+ clock-frequency = <48000000>;
+ };
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ serial@49042000 {
+ compatible = "andestech,uart16550", "ns16550a";
+ reg = <0x49042000 0x400>;
+ interrupts = <80>;
+ clock-frequency = <48000000>;
+ cts-gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
+ rts-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
+ dtr-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
+ dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
+ dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
+ rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
+ };
+ - |
+ #include <dt-bindings/clock/aspeed-clock.h>
+ serial@1e787000 {
+ compatible = "aspeed,ast2500-vuart";
+ reg = <0x1e787000 0x40>;
+ reg-shift = <2>;
+ interrupts = <8>;
+ clocks = <&syscon ASPEED_CLK_APB>;
+ no-loopback-test;
+ aspeed,sirq-polarity-sense = <&syscon 0x70 25>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
index d4178ab0d675..75ebc9952a99 100644
--- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
@@ -24,18 +24,18 @@ properties:
oneOf:
- description: Always-on power domain UART controller
items:
- - enum:
+ - enum:
+ - amlogic,meson6-uart
+ - amlogic,meson8-uart
+ - amlogic,meson8b-uart
+ - amlogic,meson-gx-uart
+ - const: amlogic,meson-ao-uart
+ - description: Everything-Else power domain UART controller
+ enum:
- amlogic,meson6-uart
- amlogic,meson8-uart
- amlogic,meson8b-uart
- amlogic,meson-gx-uart
- - const: amlogic,meson-ao-uart
- - description: Everything-Else power domain UART controller
- enum:
- - amlogic,meson6-uart
- - amlogic,meson8-uart
- - amlogic,meson8b-uart
- - amlogic,meson-gx-uart
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/ingenic,uart.txt b/Documentation/devicetree/bindings/serial/ingenic,uart.txt
deleted file mode 100644
index 24ed8769f4af..000000000000
--- a/Documentation/devicetree/bindings/serial/ingenic,uart.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Ingenic SoC UART
-
-Required properties:
-- compatible : One of:
- - "ingenic,jz4740-uart",
- - "ingenic,jz4760-uart",
- - "ingenic,jz4770-uart",
- - "ingenic,jz4775-uart",
- - "ingenic,jz4780-uart",
- - "ingenic,x1000-uart".
-- reg : offset and length of the register set for the device.
-- interrupts : should contain uart interrupt.
-- clocks : phandles to the module & baud clocks.
-- clock-names: tuple listing input clock names.
- Required elements: "baud", "module"
-
-Example:
-
-uart0: serial@10030000 {
- compatible = "ingenic,jz4740-uart";
- reg = <0x10030000 0x100>;
-
- interrupt-parent = <&intc>;
- interrupts = <9>;
-
- clocks = <&ext>, <&cgu JZ4740_CLK_UART0>;
- clock-names = "baud", "module";
-};
diff --git a/Documentation/devicetree/bindings/serial/ingenic,uart.yaml b/Documentation/devicetree/bindings/serial/ingenic,uart.yaml
new file mode 100644
index 000000000000..c023d650e9c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/ingenic,uart.yaml
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/ingenic,uart.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs UART controller devicetree bindings
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+properties:
+ $nodename:
+ pattern: "^serial@[0-9a-f]+$"
+
+ compatible:
+ oneOf:
+ - enum:
+ - ingenic,jz4740-uart
+ - ingenic,jz4760-uart
+ - ingenic,jz4780-uart
+ - ingenic,x1000-uart
+ - items:
+ - enum:
+ - ingenic,jz4770-uart
+ - ingenic,jz4775-uart
+ - const: ingenic,jz4760-uart
+ - items:
+ - const: ingenic,jz4725b-uart
+ - const: ingenic,jz4740-uart
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Baud clock
+ - description: UART module clock
+
+ clock-names:
+ items:
+ - const: baud
+ - const: module
+
+ dmas:
+ items:
+ - description: DMA controller phandle and request line for RX
+ - description: DMA controller phandle and request line for TX
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - dmas
+ - dma-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4780-cgu.h>
+ #include <dt-bindings/dma/jz4780-dma.h>
+ #include <dt-bindings/gpio/gpio.h>
+ serial@10032000 {
+ compatible = "ingenic,jz4780-uart";
+ reg = <0x10032000 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <49>;
+
+ clocks = <&ext>, <&cgu JZ4780_CLK_UART2>;
+ clock-names = "baud", "module";
+
+ dmas = <&dma JZ4780_DMA_UART2_RX 0xffffffff>,
+ <&dma JZ4780_DMA_UART2_TX 0xffffffff>;
+ dma-names = "rx", "tx";
+
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+ reset-gpios = <&gpf 8 GPIO_ACTIVE_HIGH>;
+ vcc-supply = <&wlan0_power>;
+ device-wakeup-gpios = <&gpf 5 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpf 6 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpf 4 GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/serial/mrvl-serial.txt b/Documentation/devicetree/bindings/serial/mrvl-serial.txt
deleted file mode 100644
index d744340de887..000000000000
--- a/Documentation/devicetree/bindings/serial/mrvl-serial.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-PXA UART controller
-
-Required properties:
-- compatible : should be "mrvl,mmp-uart" or "mrvl,pxa-uart".
diff --git a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
index c1091a923a89..0fa8e3e43bf8 100644
--- a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
+++ b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
@@ -21,6 +21,8 @@ Optional properties:
the second cell is used to specify the GPIO polarity:
0 = active high,
1 = active low.
+- irda-mode-ports: An array that lists the indices of the port that
+ should operate in IrDA mode.
Example:
sc16is750: sc16is750@51 {
@@ -55,6 +57,8 @@ Optional properties:
the second cell is used to specify the GPIO polarity:
0 = active high,
1 = active low.
+- irda-mode-ports: An array that lists the indices of the port that
+ should operate in IrDA mode.
Example:
sc16is750: sc16is750@0 {
diff --git a/Documentation/devicetree/bindings/serial/pl011.yaml b/Documentation/devicetree/bindings/serial/pl011.yaml
index 1a64d59152aa..c23c93b400f0 100644
--- a/Documentation/devicetree/bindings/serial/pl011.yaml
+++ b/Documentation/devicetree/bindings/serial/pl011.yaml
@@ -88,17 +88,15 @@ properties:
description:
Rate at which poll occurs when auto-poll is set.
default 100ms.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - default: 100
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 100
poll-timeout-ms:
description:
Poll timeout when auto-poll is set, default
3000ms.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - default: 3000
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 3000
required:
- compatible
diff --git a/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
deleted file mode 100644
index 7d65126bd1d7..000000000000
--- a/Documentation/devicetree/bindings/serial/qca,ar9330-uart.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-* Qualcomm Atheros AR9330 High-Speed UART
-
-Required properties:
-
-- compatible: Must be "qca,ar9330-uart"
-
-- reg: Specifies the physical base address of the controller and
- the length of the memory mapped region.
-
-- interrupts: Specifies the interrupt source of the parent interrupt
- controller. The format of the interrupt specifier depends on the
- parent interrupt controller.
-
-Additional requirements:
-
- Each UART port must have an alias correctly numbered in "aliases"
- node.
-
-Example:
-
- aliases {
- serial0 = &uart0;
- };
-
- uart0: uart@18020000 {
- compatible = "qca,ar9330-uart";
- reg = <0x18020000 0x14>;
-
- interrupt-parent = <&intc>;
- interrupts = <3>;
- };
diff --git a/Documentation/devicetree/bindings/serial/qca,ar9330-uart.yaml b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.yaml
new file mode 100644
index 000000000000..a344369285b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/qca,ar9330-uart.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/serial/qca,ar9330-uart.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Atheros AR9330 High-Speed UART
+
+maintainers:
+ - Oleksij Rempel <o.rempel@pengutronix.de>
+
+allOf:
+ - $ref: /schemas/serial.yaml#
+
+properties:
+ compatible:
+ const: qca,ar9330-uart
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: uart
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ serial@18020000 {
+ compatible = "qca,ar9330-uart";
+ reg = <0x18020000 0x14>;
+ clocks = <&ref>;
+ clock-names = "uart";
+ interrupt-parent = <&intc>;
+ interrupts = <3>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml b/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
new file mode 100644
index 000000000000..82aefdb0d45e
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/renesas,em-uart.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/serial/renesas,em-uart.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas EMMA Mobile UART Interface
+
+maintainers:
+ - Magnus Damm <magnus.damm@gmail.com>
+
+allOf:
+ - $ref: serial.yaml#
+
+properties:
+ compatible:
+ const: renesas,em-uart
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: sclk
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ uart0: serial@e1020000 {
+ compatible = "renesas,em-uart";
+ reg = <0xe1020000 0x38>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&usia_u0_sclk>;
+ clock-names = "sclk";
+ };
diff --git a/Documentation/devicetree/bindings/serial/renesas,hscif.yaml b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
index 91101521ef07..6b04c0451d41 100644
--- a/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,hscif.yaml
@@ -24,6 +24,7 @@ properties:
- items:
- enum:
+ - renesas,hscif-r8a7742 # RZ/G1H
- renesas,hscif-r8a7743 # RZ/G1M
- renesas,hscif-r8a7744 # RZ/G1N
- renesas,hscif-r8a7745 # RZ/G1E
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index 70392b9bd977..570b379f9f19 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -33,6 +33,7 @@ properties:
- items:
- enum:
+ - renesas,scif-r8a7742 # RZ/G1H
- renesas,scif-r8a7743 # RZ/G1M
- renesas,scif-r8a7744 # RZ/G1N
- renesas,scif-r8a7745 # RZ/G1E
diff --git a/Documentation/devicetree/bindings/serial/renesas,scifa.yaml b/Documentation/devicetree/bindings/serial/renesas,scifa.yaml
index b28bcb268854..78b8e20dd34d 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scifa.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scifa.yaml
@@ -24,13 +24,14 @@ properties:
- items:
- enum:
- - renesas,scifa-r8a7743 # R8A7743 RZ/G1M
- - renesas,scifa-r8a7744 # R8A7744 RZ/G1N
- - renesas,scifa-r8a7745 # R8A7745 RZ/G1E
- - renesas,scifa-r8a7790 # R8A7790 R-Car H2
- - renesas,scifa-r8a7791 # R8A7791 R-Car M2-W
- - renesas,scifa-r8a7793 # R8A7793 R-Car M2-N
- - renesas,scifa-r8a7794 # R8A7794 R-Car E2
+ - renesas,scifa-r8a7742 # RZ/G1H
+ - renesas,scifa-r8a7743 # RZ/G1M
+ - renesas,scifa-r8a7744 # RZ/G1N
+ - renesas,scifa-r8a7745 # RZ/G1E
+ - renesas,scifa-r8a7790 # R-Car H2
+ - renesas,scifa-r8a7791 # R-Car M2-W
+ - renesas,scifa-r8a7793 # R-Car M2-N
+ - renesas,scifa-r8a7794 # R-Car E2
- const: renesas,rcar-gen2-scifa # R-Car Gen2 and RZ/G1
- const: renesas,scifa # generic SCIFA compatible UART
diff --git a/Documentation/devicetree/bindings/serial/renesas,scifb.yaml b/Documentation/devicetree/bindings/serial/renesas,scifb.yaml
index 57205cb1dcd4..b083970c16a9 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scifb.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scifb.yaml
@@ -24,6 +24,7 @@ properties:
- items:
- enum:
+ - renesas,scifb-r8a7742 # RZ/G1H
- renesas,scifb-r8a7743 # RZ/G1M
- renesas,scifb-r8a7744 # RZ/G1N
- renesas,scifb-r8a7745 # RZ/G1E
diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml
index d4beaf11222d..fe90569475e1 100644
--- a/Documentation/devicetree/bindings/serial/rs485.yaml
+++ b/Documentation/devicetree/bindings/serial/rs485.yaml
@@ -6,40 +6,43 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: RS485 serial communications Bindings
-description: The RTS signal is capable of automatically controlling
- line direction for the built-in half-duplex mode.
- The properties described hereafter shall be given to a
- half-duplex capable UART node.
+description: The RTS signal is capable of automatically controlling line
+ direction for the built-in half-duplex mode. The properties described
+ hereafter shall be given to a half-duplex capable UART node.
maintainers:
- - Rob Herring <robh@kernel.org>
+ - Rob Herring <robh@kernel.org>
properties:
rs485-rts-delay:
description: prop-encoded-array <a b>
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - items:
- items:
- - description:
- Delay between rts signal and beginning of data sent in milliseconds.
- It corresponds to the delay before sending data.
- default: 0
- maximum: 1000
- - description:
- Delay between end of data sent and rts signal in milliseconds.
- It corresponds to the delay after sending data and actual release of the line.
- default: 0
- maximum: 1000
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ items:
+ - description: Delay between rts signal and beginning of data sent in
+ milliseconds. It corresponds to the delay before sending data.
+ default: 0
+ maximum: 1000
+ - description: Delay between end of data sent and rts signal in milliseconds.
+ It corresponds to the delay after sending data and actual release
+ of the line.
+ default: 0
+ maximum: 1000
rs485-rts-active-low:
description: drive RTS low when sending (default is high).
$ref: /schemas/types.yaml#/definitions/flag
linux,rs485-enabled-at-boot-time:
- description: enables the rs485 feature at boot time. It can be disabled later with proper ioctl.
+ description: enables the rs485 feature at boot time. It can be disabled
+ later with proper ioctl.
$ref: /schemas/types.yaml#/definitions/flag
rs485-rx-during-tx:
- description: enables the receiving of data even while sending data.
- $ref: /schemas/types.yaml#/definitions/flag
+ description: enables the receiving of data even while sending data.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ rs485-term-gpios:
+ description: GPIO pin to enable RS485 bus termination.
+ maxItems: 1
+...
diff --git a/Documentation/devicetree/bindings/serial/samsung_uart.yaml b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
index 9d2ce347875b..32a5e1e30833 100644
--- a/Documentation/devicetree/bindings/serial/samsung_uart.yaml
+++ b/Documentation/devicetree/bindings/serial/samsung_uart.yaml
@@ -29,6 +29,14 @@ properties:
reg:
maxItems: 1
+ reg-io-width:
+ description: |
+ The size (in bytes) of the IO accesses that should be performed
+ on the device.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 1, 4 ]
+
clocks:
minItems: 2
maxItems: 5
@@ -51,9 +59,8 @@ properties:
samsung,uart-fifosize:
description: The fifo size supported by the UART channel.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [16, 64, 256]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [16, 64, 256]
required:
- compatible
diff --git a/Documentation/devicetree/bindings/serial/serial.yaml b/Documentation/devicetree/bindings/serial/serial.yaml
index 53204d90d0c7..8645d0e526b4 100644
--- a/Documentation/devicetree/bindings/serial/serial.yaml
+++ b/Documentation/devicetree/bindings/serial/serial.yaml
@@ -67,6 +67,14 @@ properties:
(wired and enabled by pinmux configuration). This depends on both the
UART hardware and the board wiring.
+ rx-tx-swap:
+ type: boolean
+ description: RX and TX pins are swapped.
+
+ cts-rts-swap:
+ type: boolean
+ description: CTS and RTS pins are swapped.
+
if:
required:
- uart-has-rtscts
diff --git a/Documentation/devicetree/bindings/serial/sifive-serial.yaml b/Documentation/devicetree/bindings/serial/sifive-serial.yaml
index e8d3aeda1202..92283f693de0 100644
--- a/Documentation/devicetree/bindings/serial/sifive-serial.yaml
+++ b/Documentation/devicetree/bindings/serial/sifive-serial.yaml
@@ -55,7 +55,7 @@ examples:
compatible = "sifive,fu540-c000-uart", "sifive,uart0";
interrupt-parent = <&plic0>;
interrupts = <80>;
- reg = <0x0 0x10010000 0x0 0x1000>;
+ reg = <0x10010000 0x1000>;
clocks = <&prci PRCI_CLK_TLCLK>;
};
diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
index 238c44192d31..75b8521eb7cb 100644
--- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
@@ -48,6 +48,12 @@ properties:
minItems: 1
maxItems: 2
+ cts-gpios:
+ maxItems: 1
+
+ rts-gpios:
+ maxItems: 1
+
wakeup-source: true
rs485-rts-delay: true
@@ -55,6 +61,14 @@ properties:
linux,rs485-enabled-at-boot-time: true
rs485-rx-during-tx: true
+if:
+ required:
+ - st,hw-flow-ctrl
+then:
+ properties:
+ cts-gpios: false
+ rts-gpios: false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml b/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
index cb008fd188d8..02b2d5ba01d6 100644
--- a/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
+++ b/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
@@ -26,11 +26,11 @@ properties:
compatible:
oneOf:
- items:
- - enum:
- - amlogic,meson8-canvas
- - amlogic,meson8b-canvas
- - amlogic,meson8m2-canvas
- - const: amlogic,canvas
+ - enum:
+ - amlogic,meson8-canvas
+ - amlogic,meson8b-canvas
+ - amlogic,meson8m2-canvas
+ - const: amlogic,canvas
- const: amlogic,canvas # GXBB and newer SoCs
reg:
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt
index 4fc571e78f01..953add19e937 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt
@@ -19,6 +19,7 @@ power-domains.
"qcom,sc7180-aoss-qmp"
"qcom,sdm845-aoss-qmp"
"qcom,sm8150-aoss-qmp"
+ "qcom,sm8250-aoss-qmp"
- reg:
Usage: required
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
index f8fa71f5d84b..2e2f6dc351c0 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
@@ -65,30 +65,30 @@ which uses apr as communication between Apps and QDSP.
compatible = "qcom,apr-v2";
qcom,apr-domain = <APR_DOMAIN_ADSP>;
- q6core@3 {
+ apr-service@3 {
compatible = "qcom,q6core";
reg = <APR_SVC_ADSP_CORE>;
};
- q6afe@4 {
+ apr-service@4 {
compatible = "qcom,q6afe";
reg = <APR_SVC_AFE>;
dais {
#sound-dai-cells = <1>;
- hdmi@1 {
- reg = <1>;
+ dai@1 {
+ reg = <HDMI_RX>;
};
};
};
- q6asm@7 {
+ apr-service@7 {
compatible = "qcom,q6asm";
reg = <APR_SVC_ASM>;
...
};
- q6adm@8 {
+ apr-service@8 {
compatible = "qcom,q6adm";
reg = <APR_SVC_ADM>;
...
@@ -106,26 +106,26 @@ have no such dependency.
qcom,glink-channels = "apr_audio_svc";
qcom,apr-domain = <APR_DOMAIN_ADSP>;
- q6core {
+ apr-service@3 {
compatible = "qcom,q6core";
reg = <APR_SVC_ADSP_CORE>;
};
- q6afe: q6afe {
+ q6afe: apr-service@4 {
compatible = "qcom,q6afe";
reg = <APR_SVC_AFE>;
qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
...
};
- q6asm: q6asm {
+ q6asm: apr-service@7 {
compatible = "qcom,q6asm";
reg = <APR_SVC_ASM>;
qcom,protection-domain = "tms/servreg", "msm/slpi/sensor_pd";
...
};
- q6adm: q6adm {
+ q6adm: apr-service@8 {
compatible = "qcom,q6adm";
reg = <APR_SVC_ADM>;
qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
deleted file mode 100644
index dab7ca9f250c..000000000000
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-Qualcomm Technologies, Inc. GENI Serial Engine QUP Wrapper Controller
-
-Generic Interface (GENI) based Qualcomm Universal Peripheral (QUP) wrapper
-is a programmable module for supporting a wide range of serial interfaces
-like UART, SPI, I2C, I3C, etc. A single QUP module can provide upto 8 Serial
-Interfaces, using its internal Serial Engines. The GENI Serial Engine QUP
-Wrapper controller is modeled as a node with zero or more child nodes each
-representing a serial engine.
-
-Required properties:
-- compatible: Must be "qcom,geni-se-qup".
-- reg: Must contain QUP register address and length.
-- clock-names: Must contain "m-ahb" and "s-ahb".
-- clocks: AHB clocks needed by the device.
-
-Required properties if child node exists:
-- #address-cells: Must be <1> for Serial Engine Address
-- #size-cells: Must be <1> for Serial Engine Address Size
-- ranges: Must be present
-
-Properties for children:
-
-A GENI based QUP wrapper controller node can contain 0 or more child nodes
-representing serial devices. These serial devices can be a QCOM UART, I2C
-controller, SPI controller, or some combination of aforementioned devices.
-Please refer below the child node definitions for the supported serial
-interface protocols.
-
-Qualcomm Technologies Inc. GENI Serial Engine based I2C Controller
-
-Required properties:
-- compatible: Must be "qcom,geni-i2c".
-- reg: Must contain QUP register address and length.
-- interrupts: Must contain I2C interrupt.
-- clock-names: Must contain "se".
-- clocks: Serial engine core clock needed by the device.
-- #address-cells: Must be <1> for I2C device address.
-- #size-cells: Must be <0> as I2C addresses have no size component.
-
-Optional property:
-- clock-frequency: Desired I2C bus clock frequency in Hz.
- When missing default to 100000Hz.
-
-Child nodes should conform to I2C bus binding as described in i2c.txt.
-
-Qualcomm Technologies Inc. GENI Serial Engine based UART Controller
-
-Required properties:
-- compatible: Must be "qcom,geni-debug-uart" or "qcom,geni-uart".
-- reg: Must contain UART register location and length.
-- interrupts: Must contain UART core interrupts.
-- clock-names: Must contain "se".
-- clocks: Serial engine core clock needed by the device.
-
-Qualcomm Technologies Inc. GENI Serial Engine based SPI Controller
-node binding is described in
-Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt.
-
-Example:
- geniqup@8c0000 {
- compatible = "qcom,geni-se-qup";
- reg = <0x8c0000 0x6000>;
- clock-names = "m-ahb", "s-ahb";
- clocks = <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
- <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- i2c0: i2c@a94000 {
- compatible = "qcom,geni-i2c";
- reg = <0xa94000 0x4000>;
- interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "se";
- clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&qup_1_i2c_5_active>;
- pinctrl-1 = <&qup_1_i2c_5_sleep>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- uart0: serial@a88000 {
- compatible = "qcom,geni-debug-uart";
- reg = <0xa88000 0x7000>;
- interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "se";
- clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&qup_1_uart_3_active>;
- pinctrl-1 = <&qup_1_uart_3_sleep>;
- };
-
- }
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml
new file mode 100644
index 000000000000..dee8bb2b69fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/soc/qcom/qcom,geni-se.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: GENI Serial Engine QUP Wrapper Controller
+
+maintainers:
+ - Mukesh Savaliya <msavaliy@codeaurora.org>
+ - Akash Asthana <akashast@codeaurora.org>
+
+description: |
+ Generic Interface (GENI) based Qualcomm Universal Peripheral (QUP) wrapper
+ is a programmable module for supporting a wide range of serial interfaces
+ like UART, SPI, I2C, I3C, etc. A single QUP module can provide upto 8 Serial
+ Interfaces, using its internal Serial Engines. The GENI Serial Engine QUP
+ Wrapper controller is modeled as a node with zero or more child nodes each
+ representing a serial engine.
+
+properties:
+ compatible:
+ enum:
+ - qcom,geni-se-qup
+
+ reg:
+ description: QUP wrapper common register address and length.
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: m-ahb
+ - const: s-ahb
+
+ clocks:
+ items:
+ - description: Master AHB Clock
+ - description: Slave AHB Clock
+
+ "#address-cells":
+ const: 2
+
+ "#size-cells":
+ const: 2
+
+ ranges: true
+
+ interconnects:
+ maxItems: 1
+
+ interconnect-names:
+ const: qup-core
+
+required:
+ - compatible
+ - reg
+ - clock-names
+ - clocks
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+
+patternProperties:
+ "^.*@[0-9a-f]+$":
+ type: object
+ description: Common properties for GENI Serial Engine based I2C, SPI and
+ UART controller.
+
+ properties:
+ reg:
+ description: GENI Serial Engine register address and length.
+ maxItems: 1
+
+ clock-names:
+ const: se
+
+ clocks:
+ description: Serial engine core clock needed by the device.
+ maxItems: 1
+
+ interconnects:
+ minItems: 2
+ maxItems: 3
+
+ interconnect-names:
+ minItems: 2
+ items:
+ - const: qup-core
+ - const: qup-config
+ - const: qup-memory
+
+ required:
+ - reg
+ - clock-names
+ - clocks
+
+ "spi@[0-9a-f]+$":
+ type: object
+ description: GENI serial engine based SPI controller. SPI in master mode
+ supports up to 50MHz, up to four chip selects, programmable
+ data path from 4 bits to 32 bits and numerous protocol
+ variants.
+ allOf:
+ - $ref: /spi/spi-controller.yaml#
+
+ properties:
+ compatible:
+ enum:
+ - qcom,geni-spi
+
+ interrupts:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ required:
+ - compatible
+ - interrupts
+ - "#address-cells"
+ - "#size-cells"
+
+ "i2c@[0-9a-f]+$":
+ type: object
+ description: GENI serial engine based I2C controller.
+ allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+ properties:
+ compatible:
+ enum:
+ - qcom,geni-i2c
+
+ interrupts:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ clock-frequency:
+ description: Desired I2C bus clock frequency in Hz.
+ default: 100000
+
+ required:
+ - compatible
+ - interrupts
+ - "#address-cells"
+ - "#size-cells"
+
+ "serial@[0-9a-f]+$":
+ type: object
+ description: GENI Serial Engine based UART Controller.
+ allOf:
+ - $ref: /schemas/serial.yaml#
+
+ properties:
+ compatible:
+ enum:
+ - qcom,geni-uart
+ - qcom,geni-debug-uart
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+ items:
+ - description: UART core irq
+ - description: Wakeup irq (RX GPIO)
+
+ required:
+ - compatible
+ - interrupts
+
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ geniqup@8c0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0 0x008c0000 0 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ i2c0: i2c@a94000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0xa94000 0 0x4000>;
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qup_1_i2c_5_active>;
+ pinctrl-1 = <&qup_1_i2c_5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ uart0: serial@a88000 {
+ compatible = "qcom,geni-uart";
+ reg = <0 0xa88000 0 0x7000>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qup_1_uart_3_active>;
+ pinctrl-1 = <&qup_1_uart_3_sleep>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/soc/ti/k3-socinfo.yaml b/Documentation/devicetree/bindings/soc/ti/k3-socinfo.yaml
new file mode 100644
index 000000000000..a1a8423b2e2e
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/ti/k3-socinfo.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/ti/k3-socinfo.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments K3 Multicore SoC platforms chipid module
+
+maintainers:
+ - Tero Kristo <t-kristo@ti.com>
+ - Nishanth Menon <nm@ti.com>
+
+description: |
+ Texas Instruments (ARM64) K3 Multicore SoC platforms chipid module is
+ represented by CTRLMMR_xxx_JTAGID register which contains information about
+ SoC id and revision.
+
+properties:
+ $nodename:
+ pattern: "^chipid@[0-9a-f]+$"
+
+ compatible:
+ items:
+ - const: ti,am654-chipid
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ chipid@43000014 {
+ compatible = "ti,am654-chipid";
+ reg = <0x43000014 0x4>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/adi,adau7118.yaml b/Documentation/devicetree/bindings/sound/adi,adau7118.yaml
index 76ee695097bf..fb78967ee17b 100644
--- a/Documentation/devicetree/bindings/sound/adi,adau7118.yaml
+++ b/Documentation/devicetree/bindings/sound/adi,adau7118.yaml
@@ -35,23 +35,21 @@ properties:
adi,decimation-ratio:
description: |
This property set's the decimation ratio of PDM to PCM audio data.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [64, 32, 16]
- default: 64
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [64, 32, 16]
+ default: 64
adi,pdm-clk-map:
description: |
The ADAU7118 has two PDM clocks for the four Inputs. Each input must be
assigned to one of these two clocks. This property set's the mapping
between the clocks and the inputs.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- - minItems: 4
- maxItems: 4
- items:
- maximum: 1
- default: [0, 0, 1, 1]
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 4
+ maxItems: 4
+ items:
+ maximum: 1
+ default: [0, 0, 1, 1]
required:
- "#sound-dai-cells"
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
index ea1d2efb2aaa..be390accdd07 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
@@ -57,32 +57,31 @@ properties:
A list of the connections between audio components. Each entry
is a pair of strings, the first being the connection's sink, the
second being the connection's source.
- allOf:
- - $ref: /schemas/types.yaml#definitions/non-unique-string-array
- - minItems: 2
- maxItems: 18
- items:
- enum:
- # Audio Pins on the SoC
- - HP
- - HPCOM
- - LINEIN
- - LINEOUT
- - MIC1
- - MIC2
- - MIC3
-
- # Microphone Biases from the SoC
- - HBIAS
- - MBIAS
-
- # Board Connectors
- - Headphone
- - Headset Mic
- - Line In
- - Line Out
- - Mic
- - Speaker
+ $ref: /schemas/types.yaml#definitions/non-unique-string-array
+ minItems: 2
+ maxItems: 18
+ items:
+ enum:
+ # Audio Pins on the SoC
+ - HP
+ - HPCOM
+ - LINEIN
+ - LINEOUT
+ - MIC1
+ - MIC2
+ - MIC3
+
+ # Microphone Biases from the SoC
+ - HBIAS
+ - MBIAS
+
+ # Board Connectors
+ - Headphone
+ - Headset Mic
+ - Line In
+ - Line Out
+ - Mic
+ - Speaker
allwinner,codec-analog-controls:
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/sound/amlogic,aiu.yaml b/Documentation/devicetree/bindings/sound/amlogic,aiu.yaml
index a61bccf915d8..f9344adaf6c2 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,aiu.yaml
+++ b/Documentation/devicetree/bindings/sound/amlogic,aiu.yaml
@@ -86,7 +86,7 @@ examples:
aiu: audio-controller@5400 {
compatible = "amlogic,aiu-gxl", "amlogic,aiu";
#sound-dai-cells = <2>;
- reg = <0x0 0x5400 0x0 0x2ac>;
+ reg = <0x5400 0x2ac>;
interrupts = <GIC_SPI 48 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 50 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "i2s", "spdif";
@@ -110,4 +110,3 @@ examples:
"spdif_mclk_sel";
resets = <&reset RESET_AIU>;
};
-
diff --git a/Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml b/Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml
index f778d3371fde..51a0c30e10f9 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml
+++ b/Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml
@@ -45,7 +45,7 @@ examples:
toacodec: audio-controller@740 {
compatible = "amlogic,g12a-toacodec";
- reg = <0x0 0x740 0x0 0x4>;
+ reg = <0x740 0x4>;
#sound-dai-cells = <1>;
resets = <&clkc_audio AUD_RESET_TOACODEC>;
};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,t9015.yaml b/Documentation/devicetree/bindings/sound/amlogic,t9015.yaml
index b7c38c2b5b54..04014e658c90 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,t9015.yaml
+++ b/Documentation/devicetree/bindings/sound/amlogic,t9015.yaml
@@ -49,10 +49,9 @@ examples:
acodec: audio-controller@32000 {
compatible = "amlogic,t9015";
- reg = <0x0 0x32000 0x0 0x14>;
+ reg = <0x32000 0x14>;
#sound-dai-cells = <0>;
clocks = <&clkc CLKID_AUDIO_CODEC>;
clock-names = "pclk";
resets = <&reset RESET_AUDIO_CODEC>;
};
-
diff --git a/Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt
deleted file mode 100644
index 41ae2699f07a..000000000000
--- a/Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-Cirrus Logic Lochnagar Audio Development Board
-
-Lochnagar is an evaluation and development board for Cirrus Logic
-Smart CODEC and Amp devices. It allows the connection of most Cirrus
-Logic devices on mini-cards, as well as allowing connection of
-various application processor systems to provide a full evaluation
-platform. Audio system topology, clocking and power can all be
-controlled through the Lochnagar, allowing the device under test
-to be used in a variety of possible use cases.
-
-This binding document describes the binding for the audio portion
-of the driver.
-
-This binding must be part of the Lochnagar MFD binding:
- [4] ../mfd/cirrus,lochnagar.txt
-
-Required properties:
-
- - compatible : One of the following strings:
- "cirrus,lochnagar2-soundcard"
-
- - #sound-dai-cells : Must be set to 1.
-
- - clocks : Contains an entry for each entry in clock-names.
- - clock-names : Must include the following clocks:
- "mclk" Master clock source for the sound card, should normally
- be set to LOCHNAGAR_SOUNDCARD_MCLK provided by the Lochnagar
- clock driver.
-
-Example:
-
-lochnagar-sc {
- compatible = "cirrus,lochnagar2-soundcard";
-
- #sound-dai-cells = <1>;
-
- clocks = <&lochnagar_clk LOCHNAGAR_SOUNDCARD_MCLK>;
- clock-names = "mclk";
-};
diff --git a/Documentation/devicetree/bindings/sound/cirrus,lochnagar.yaml b/Documentation/devicetree/bindings/sound/cirrus,lochnagar.yaml
new file mode 100644
index 000000000000..cea612d3d4a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cirrus,lochnagar.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/cirrus,lochnagar.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic Lochnagar Audio Development Board
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ Lochnagar is an evaluation and development board for Cirrus Logic
+ Smart CODEC and Amp devices. It allows the connection of most Cirrus
+ Logic devices on mini-cards, as well as allowing connection of various
+ application processor systems to provide a full evaluation platform.
+ Audio system topology, clocking and power can all be controlled through
+ the Lochnagar, allowing the device under test to be used in a variety of
+ possible use cases.
+
+ This binding document describes the binding for the audio portion of the
+ driver.
+
+ This binding must be part of the Lochnagar MFD binding:
+ [1] ../mfd/cirrus,lochnagar.yaml
+
+properties:
+ compatible:
+ enum:
+ - cirrus,lochnagar2-soundcard
+
+ '#sound-dai-cells':
+ description:
+ The first cell indicating the audio interface.
+ const: 1
+
+ clocks:
+ description:
+ Master clock source for the sound card, should normally be set to
+ LOCHNAGAR_SOUNDCARD_MCLK provided by the Lochnagar clock driver.
+ maxItems: 1
+
+ clock-names:
+ const: mclk
+
+required:
+ - compatible
+ - '#sound-dai-cells'
+ - clocks
+ - clock-names
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/sound/cirrus,madera.yaml b/Documentation/devicetree/bindings/sound/cirrus,madera.yaml
new file mode 100644
index 000000000000..c4cd58b5acd4
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cirrus,madera.yaml
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/cirrus,madera.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic Madera class audio CODECs
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ This describes audio configuration bindings for these codecs.
+
+ See also the core bindings for the parent MFD driver:
+
+ Documentation/devicetree/bindings/mfd/cirrus,madera.yaml
+
+ and defines for values used in these bindings:
+
+ include/dt-bindings/sound/madera.h
+
+ The properties are all contained in the parent MFD node.
+
+properties:
+ '#sound-dai-cells':
+ description:
+ The first cell indicating the audio interface.
+ const: 1
+
+ cirrus,inmode:
+ description:
+ A list of input mode settings for each input. A maximum
+ of 24 cells, with four cells per input in the order INnAL,
+ INnAR INnBL INnBR. For non-muxed inputs the first two cells
+ for that input set the mode for the left and right channel
+ and the second two cells must be 0. For muxed inputs the
+ first two cells for that input set the mode of the left and
+ right A inputs and the second two cells set the mode of the
+ left and right B inputs. Valid mode values are one of the
+ MADERA_INMODE_xxx. If the array is shorter than the number
+ of inputs the unspecified inputs default to MADERA_INMODE_DIFF.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 24
+ items:
+ minimum: 0
+ maximum: 1
+ default: 0
+
+ cirrus,out-mono:
+ description:
+ Mono bit for each output, maximum of six cells if the array
+ is shorter outputs will be set to stereo.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 6
+ items:
+ minimum: 0
+ maximum: 1
+ default: 0
+
+ cirrus,dmic-ref:
+ description: |
+ Indicates how the MICBIAS pins have been externally connected
+ to DMICs on each input, one cell per input.
+
+ <IN1 IN2 IN3 ...>
+
+ A value of 0 indicates MICVDD and is the default,
+ other values depend on the codec: For CS47L35 one of the
+ CS47L35_DMIC_REF_xxx values For all other codecs one of
+ the MADERA_DMIC_REF_xxx values Also see the datasheet for a
+ description of the INn_DMIC_SUP field.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 6
+ items:
+ minimum: 0
+ maximum: 3
+ default: 0
+
+ cirrus,max-channels-clocked:
+ description:
+ Maximum number of channels that I2S clocks will be generated
+ for. Useful when clock master for systems where the I2S bus
+ has multiple data lines. One cell for each AIF, use a value
+ of zero for AIFs that should be handled normally.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 4
+ items:
+ default: 0
+
+ cirrus,pdm-fmt:
+ description:
+ PDM speaker data format, must contain 2 cells (OUT5 and
+ OUT6). See the PDM_SPKn_FMT field in the datasheet for a
+ description of this value. The second cell is ignored for
+ codecs that do not have OUT6.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 2
+
+ cirrus,pdm-mute:
+ description: |
+ PDM mute format, must contain 2 cells (OUT5 and OUT6). See the
+ PDM_SPKn_CTRL_1 register in the datasheet for a description
+ of this value. The second cell is ignored for codecs that
+ do not have OUT6.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 2
diff --git a/Documentation/devicetree/bindings/sound/da7213.txt b/Documentation/devicetree/bindings/sound/da7213.txt
index 58902802d56c..94584c96c4ae 100644
--- a/Documentation/devicetree/bindings/sound/da7213.txt
+++ b/Documentation/devicetree/bindings/sound/da7213.txt
@@ -1,9 +1,9 @@
-Dialog Semiconductor DA7213 Audio Codec bindings
+Dialog Semiconductor DA7212/DA7213 Audio Codec bindings
======
Required properties:
-- compatible : Should be "dlg,da7213"
+- compatible : Should be "dlg,da7212" or "dlg,da7213"
- reg: Specifies the I2C slave address
Optional properties:
@@ -21,6 +21,10 @@ Optional properties:
- dlg,dmic-clkrate : DMIC clock frequency (Hz).
[<1500000>, <3000000>]
+ - VDDA-supply : Regulator phandle for Analogue power supply
+ - VDDMIC-supply : Regulator phandle for Mic Bias
+ - VDDIO-supply : Regulator phandle for I/O power supply
+
======
Example:
diff --git a/Documentation/devicetree/bindings/sound/fsl,asrc.txt b/Documentation/devicetree/bindings/sound/fsl,asrc.txt
index cb9a25165503..998b4c8a7f78 100644
--- a/Documentation/devicetree/bindings/sound/fsl,asrc.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,asrc.txt
@@ -51,6 +51,10 @@ Optional properties:
will be in use as default. Otherwise, the big endian
mode will be in use for all the device registers.
+ - fsl,asrc-format : Defines a mutual sample format used by DPCM Back
+ Ends, which can replace the fsl,asrc-width.
+ The value is 2 (S16_LE), or 6 (S24_LE).
+
Example:
asrc: asrc@2034000 {
diff --git a/Documentation/devicetree/bindings/sound/fsl,easrc.yaml b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
new file mode 100644
index 000000000000..73cdcf053a9c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
@@ -0,0 +1,101 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/fsl,easrc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP Asynchronous Sample Rate Converter (ASRC) Controller
+
+maintainers:
+ - Shengjiu Wang <shengjiu.wang@nxp.com>
+
+properties:
+ $nodename:
+ pattern: "^easrc@.*"
+
+ compatible:
+ const: fsl,imx8mn-easrc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Peripheral clock
+
+ clock-names:
+ items:
+ - const: mem
+
+ dmas:
+ maxItems: 8
+
+ dma-names:
+ items:
+ - const: ctx0_rx
+ - const: ctx0_tx
+ - const: ctx1_rx
+ - const: ctx1_tx
+ - const: ctx2_rx
+ - const: ctx2_tx
+ - const: ctx3_rx
+ - const: ctx3_tx
+
+ firmware-name:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/string
+ - const: imx/easrc/easrc-imx8mn.bin
+ description: The coefficient table for the filters
+
+ fsl,asrc-rate:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 8000
+ - maximum: 192000
+ description: Defines a mutual sample rate used by DPCM Back Ends
+
+ fsl,asrc-format:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [2, 6, 10, 32, 36]
+ default: 2
+ description:
+ Defines a mutual sample format used by DPCM Back Ends
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - dmas
+ - dma-names
+ - firmware-name
+ - fsl,asrc-rate
+ - fsl,asrc-format
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8mn-clock.h>
+
+ easrc: easrc@300c0000 {
+ compatible = "fsl,imx8mn-easrc";
+ reg = <0x0 0x300c0000 0x0 0x10000>;
+ interrupts = <0x0 122 0x4>;
+ clocks = <&clk IMX8MN_CLK_ASRC_ROOT>;
+ clock-names = "mem";
+ dmas = <&sdma2 16 23 0> , <&sdma2 17 23 0>,
+ <&sdma2 18 23 0> , <&sdma2 19 23 0>,
+ <&sdma2 20 23 0> , <&sdma2 21 23 0>,
+ <&sdma2 22 23 0> , <&sdma2 23 23 0>;
+ dma-names = "ctx0_rx", "ctx0_tx",
+ "ctx1_rx", "ctx1_tx",
+ "ctx2_rx", "ctx2_tx",
+ "ctx3_rx", "ctx3_tx";
+ firmware-name = "imx/easrc/easrc-imx8mn.bin";
+ fsl,asrc-rate = <8000>;
+ fsl,asrc-format = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/fsl,esai.txt b/Documentation/devicetree/bindings/sound/fsl,esai.txt
index 0e6e2166f76c..0a2480aeecf0 100644
--- a/Documentation/devicetree/bindings/sound/fsl,esai.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,esai.txt
@@ -12,6 +12,7 @@ Required properties:
"fsl,imx35-esai",
"fsl,vf610-esai",
"fsl,imx6ull-esai",
+ "fsl,imx8qm-esai",
- reg : Offset and length of the register set for the device.
diff --git a/Documentation/devicetree/bindings/sound/madera.txt b/Documentation/devicetree/bindings/sound/madera.txt
deleted file mode 100644
index 5e669ce552f4..000000000000
--- a/Documentation/devicetree/bindings/sound/madera.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-Cirrus Logic Madera class audio codecs
-
-This describes audio configuration bindings for these codecs.
-
-See also the core bindings for the parent MFD driver:
-See Documentation/devicetree/bindings/mfd/madera.txt
-
-and defines for values used in these bindings:
-include/dt-bindings/sound/madera.h
-
-These properties are all contained in the parent MFD node.
-
-Optional properties:
- - cirrus,dmic-ref : Indicates how the MICBIAS pins have been externally
- connected to DMICs on each input, one cell per input.
- <IN1 IN2 IN3 ...>
- A value of 0 indicates MICVDD and is the default, other values depend on the
- codec:
- For CS47L35 one of the CS47L35_DMIC_REF_xxx values
- For all other codecs one of the MADERA_DMIC_REF_xxx values
- Also see the datasheet for a description of the INn_DMIC_SUP field.
-
- - cirrus,inmode : A list of input mode settings for each input. A maximum of
- 16 cells, with four cells per input in the order INnAL, INnAR INnBL INnBR.
- For non-muxed inputs the first two cells for that input set the mode for
- the left and right channel and the second two cells must be 0.
- For muxed inputs the first two cells for that input set the mode of the
- left and right A inputs and the second two cells set the mode of the left
- and right B inputs.
- Valid mode values are one of the MADERA_INMODE_xxx. If the array is shorter
- than the number of inputs the unspecified inputs default to
- MADERA_INMODE_DIFF.
-
- - cirrus,out-mono : Mono bit for each output, maximum of six cells if the
- array is shorter outputs will be set to stereo.
-
- - cirrus,max-channels-clocked : Maximum number of channels that I2S clocks
- will be generated for. Useful when clock master for systems where the I2S
- bus has multiple data lines.
- One cell for each AIF, use a value of zero for AIFs that should be handled
- normally.
-
- - cirrus,pdm-fmt : PDM speaker data format, must contain 2 cells
- (OUT5 and OUT6). See the PDM_SPKn_FMT field in the datasheet for a
- description of this value.
- The second cell is ignored for codecs that do not have OUT6.
-
- - cirrus,pdm-mute : PDM mute format, must contain 2 cells
- (OUT5 and OUT6). See the PDM_SPKn_CTRL_1 register in the datasheet for a
- description of this value.
- The second cell is ignored for codecs that do not have OUT6.
-
-Example:
-
-cs47l35@0 {
- compatible = "cirrus,cs47l35";
-
- cirrus,dmic-ref = <0 0 CS47L35_DMIC_REF_MICBIAS1B 0>;
- cirrus,inmode = <
- MADERA_INMODE_DMIC MADERA_INMODE_DMIC /* IN1A digital */
- MADERA_INMODE_SE MADERA_INMODE_SE /* IN1B single-ended */
- MADERA_INMODE_DIFF MADERA_INMODE_DIFF /* IN2 differential */
- 0 0 /* not used on this codec */
- >;
- cirrus,out-mono = <0 0 0 0 0 0>;
- cirrus,max-channels-clocked = <2 0 0>;
-};
diff --git a/Documentation/devicetree/bindings/sound/marvell,mmp-sspa.yaml b/Documentation/devicetree/bindings/sound/marvell,mmp-sspa.yaml
new file mode 100644
index 000000000000..6d20a24a2ae9
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/marvell,mmp-sspa.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/marvell,mmp-sspa.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvel SSPA Digital Audio Interface Bindings
+
+maintainers:
+ - Lubomir Rintel <lkundrak@v3.sk>
+
+properties:
+ $nodename:
+ pattern: "^audio-controller(@.*)?$"
+
+ compatible:
+ const: marvell,mmp-sspa
+
+ reg:
+ items:
+ - description: RX block
+ - description: TX block
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Clock for the Audio block
+ - description: I2S bit clock
+
+ clock-names:
+ items:
+ - const: audio
+ - const: bitclk
+
+ power-domains:
+ maxItems: 1
+
+ '#sound-dai-cells':
+ const: 0
+
+ dmas:
+ items:
+ - description: TX DMA Channel
+ - description: RX DMA Channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ port:
+ type: object
+
+ properties:
+ endpoint:
+ type: object
+
+ properties:
+ remote-endpoint: true
+
+ frame-master:
+ type: boolean
+ description: SoC generates the frame clock
+
+ bitclock-master:
+ type: boolean
+ description: SoC generates the bit clock
+
+ dai-format:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: The digital audio format
+ const: i2s
+
+ required:
+ - remote-endpoint
+
+ required:
+ - endpoint
+
+ additionalProperties: false
+
+required:
+ - "#sound-dai-cells"
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - dmas
+ - dma-names
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/marvell,mmp2.h>
+
+ audio-controller@d42a0c00 {
+ compatible = "marvell,mmp-sspa";
+ reg = <0xd42a0c00 0x30>,
+ <0xd42a0c80 0x30>;
+ interrupts = <2>;
+ clock-names = "audio", "bitclk";
+ clocks = <&soc_clocks 127>,
+ <&audio_clk 1>;
+ #sound-dai-cells = <0>;
+ dmas = <&adma0 0>, <&adma0 1>;
+ dma-names = "tx", "rx";
+ port {
+ endpoint {
+ remote-endpoint = <&rt5631_0>;
+ frame-master;
+ bitclock-master;
+ dai-format = "i2s";
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/nau8810.txt b/Documentation/devicetree/bindings/sound/nau8810.txt
index 05830e477acd..7deaa452b200 100644
--- a/Documentation/devicetree/bindings/sound/nau8810.txt
+++ b/Documentation/devicetree/bindings/sound/nau8810.txt
@@ -1,10 +1,11 @@
-NAU8810 audio CODEC
+NAU8810/NAU8812/NAU8814 audio CODEC
This device supports I2C only.
Required properties:
- - compatible : "nuvoton,nau8810"
+ - compatible : One of "nuvoton,nau8810" or "nuvoton,nau8812" or
+ "nuvoton,nau8814"
- reg : the I2C address of the device.
diff --git a/Documentation/devicetree/bindings/sound/nau8825.txt b/Documentation/devicetree/bindings/sound/nau8825.txt
index d16d96839bcb..388a7bc60b1f 100644
--- a/Documentation/devicetree/bindings/sound/nau8825.txt
+++ b/Documentation/devicetree/bindings/sound/nau8825.txt
@@ -101,5 +101,5 @@ Example:
nuvoton,crosstalk-enable;
clock-names = "mclk";
- clocks = <&tegra_car TEGRA210_CLK_CLK_OUT_2>;
+ clocks = <&tegra_pmc TEGRA_PMC_CLK_OUT_2>;
};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
index a8f2b0c56c79..bbd581a8c5bc 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
@@ -29,6 +29,7 @@ Optional properties:
- nvidia,hp-det-gpios : The GPIO that detect headphones are plugged in
- nvidia,int-mic-en-gpios : The GPIO that enables the internal microphone
- nvidia,ext-mic-en-gpios : The GPIO that enables the external microphone
+- nvidia,headset : The Mic Jack represents state of the headset microphone pin
Example:
diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.txt b/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.txt
index 21c648328be9..32c2cdb3d32f 100644
--- a/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.txt
@@ -30,6 +30,8 @@ Required properties:
- reg : Must contain an address for each entry in reg-names.
- reg-names : A list which must include the following entries:
* "lpass-lpaif"
+- #address-cells : Must be 1
+- #size-cells : Must be 0
@@ -37,6 +39,20 @@ Optional properties:
- qcom,adsp : Phandle for the audio DSP node
+By default, the driver uses up to 4 MI2S SD lines, for a total of 8 channels.
+The SD lines to use can be configured by adding subnodes for each of the DAIs.
+
+Required properties for each DAI (represented by a subnode):
+- reg : Must be one of the DAI IDs
+ (usually part of dt-bindings header)
+- qcom,playback-sd-lines: List of serial data lines to use for playback
+ Each SD line should be represented by a number from 0-3.
+- qcom,capture-sd-lines : List of serial data lines to use for capture
+ Each SD line should be represented by a number from 0-3.
+
+Note that adding a subnode changes the default to "no lines configured",
+so both playback and capture lines should be configured when a subnode is added.
+
Example:
lpass@28100000 {
@@ -51,4 +67,13 @@ lpass@28100000 {
reg = <0x28100000 0x10000>;
reg-names = "lpass-lpaif";
qcom,adsp = <&adsp>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Optional to set different MI2S SD lines */
+ dai@3 {
+ reg = <MI2S_QUATERNARY>;
+ qcom,playback-sd-lines = <0 1>;
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6adm.txt b/Documentation/devicetree/bindings/sound/qcom,q6adm.txt
index bbae426cdfb1..15c353a20de8 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6adm.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6adm.txt
@@ -29,7 +29,7 @@ used by the apr service device.
Definition: Must be 0
= EXAMPLE
-q6adm@8 {
+apr-service@8 {
compatible = "qcom,q6adm";
reg = <APR_SVC_ADM>;
q6routing: routing {
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6afe.txt b/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
index d74888b9f1bb..4916dd6a0896 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
@@ -100,7 +100,7 @@ configuration of each dai. Must contain the following properties.
= EXAMPLE
-q6afe@4 {
+apr-service@4 {
compatible = "qcom,q6afe";
reg = <APR_SVC_AFE>;
@@ -110,12 +110,12 @@ q6afe@4 {
#address-cells = <1>;
#size-cells = <0>;
- hdmi@1 {
- reg = <1>;
+ dai@1 {
+ reg = <HDMI_RX>;
};
- tdm@24 {
- reg = <24>;
+ dai@24 {
+ reg = <PRIMARY_TDM_RX_0>;
qcom,tdm-sync-mode = <1>:
qcom,tdm-sync-src = <1>;
qcom,tdm-data-out = <0>;
@@ -125,8 +125,8 @@ q6afe@4 {
};
- tdm@25 {
- reg = <25>;
+ dai@25 {
+ reg = <PRIMARY_TDM_TX_0>;
qcom,tdm-sync-mode = <1>:
qcom,tdm-sync-src = <1>;
qcom,tdm-data-out = <0>;
@@ -135,43 +135,43 @@ q6afe@4 {
qcom,tdm-data-align = <0>;
};
- prim-mi2s-rx@16 {
- reg = <16>;
+ dai@16 {
+ reg = <PRIMARY_MI2S_RX>;
qcom,sd-lines = <0 2>;
};
- prim-mi2s-tx@17 {
- reg = <17>;
+ dai@17 {
+ reg = <PRIMARY_MI2S_TX>;
qcom,sd-lines = <1>;
};
- sec-mi2s-rx@18 {
- reg = <18>;
+ dai@18 {
+ reg = <SECONDARY_MI2S_RX>;
qcom,sd-lines = <0 3>;
};
- sec-mi2s-tx@19 {
- reg = <19>;
+ dai@19 {
+ reg = <SECONDARY_MI2S_TX>;
qcom,sd-lines = <1>;
};
- tert-mi2s-rx@20 {
- reg = <20>;
+ dai@20 {
+ reg = <TERTIARY_MI2S_RX>;
qcom,sd-lines = <1 3>;
};
- tert-mi2s-tx@21 {
- reg = <21>;
+ dai@21 {
+ reg = <TERTIARY_MI2S_TX>;
qcom,sd-lines = <0>;
};
- quat-mi2s-rx@22 {
- reg = <22>;
+ dai@22 {
+ reg = <QUATERNARY_MI2S_RX>;
qcom,sd-lines = <0>;
};
- quat-mi2s-tx@23 {
- reg = <23>;
+ dai@23 {
+ reg = <QUATERNARY_MI2S_TX>;
qcom,sd-lines = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6asm.txt b/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
index 9f5378c51686..6b9a88d0ea3f 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
@@ -51,13 +51,16 @@ configuration of each dai. Must contain the following properties.
= EXAMPLE
-q6asm@7 {
+apr-service@7 {
compatible = "qcom,q6asm";
reg = <APR_SVC_ASM>;
q6asmdai: dais {
compatible = "qcom,q6asm-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
#sound-dai-cells = <1>;
- mm@0 {
+
+ dai@0 {
reg = <0>;
direction = <2>;
is-compress-dai;
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6core.txt b/Documentation/devicetree/bindings/sound/qcom,q6core.txt
index 7f36ff8bec18..5cd4cc9b1fde 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6core.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6core.txt
@@ -15,7 +15,7 @@ used by the apr service device.
example "qcom,q6core-v2.0"
= EXAMPLE
-q6core@3 {
+apr-service@3 {
compatible = "qcom,q6core";
reg = <APR_SVC_ADSP_CORE>;
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml b/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
index a495d5fc0d23..e8f716b5f875 100644
--- a/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd934x.yaml
@@ -102,8 +102,7 @@ properties:
gpio@42:
type: object
- allOf:
- - $ref: ../gpio/qcom,wcd934x-gpio.yaml#
+ $ref: ../gpio/qcom,wcd934x-gpio.yaml#
patternProperties:
"^.*@[0-9a-f]+$":
diff --git a/Documentation/devicetree/bindings/sound/renesas,fsi.yaml b/Documentation/devicetree/bindings/sound/renesas,fsi.yaml
index d1b65554e681..8a4406be387a 100644
--- a/Documentation/devicetree/bindings/sound/renesas,fsi.yaml
+++ b/Documentation/devicetree/bindings/sound/renesas,fsi.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/sound/renesas,fsi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Renesas FSI Sound Driver Device Tree Bindings
+title: Renesas FIFO-buffered Serial Interface (FSI)
maintainers:
- Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
@@ -17,16 +17,16 @@ properties:
oneOf:
# for FSI2 SoC
- items:
- - enum:
- - renesas,fsi2-sh73a0
- - renesas,fsi2-r8a7740
- - enum:
- - renesas,sh_fsi2
+ - enum:
+ - renesas,fsi2-sh73a0 # SH-Mobile AG5
+ - renesas,fsi2-r8a7740 # R-Mobile A1
+ - enum:
+ - renesas,sh_fsi2
# for Generic
- items:
- - enum:
- - renesas,sh_fsi
- - renesas,sh_fsi2
+ - enum:
+ - renesas,sh_fsi
+ - renesas,sh_fsi2
reg:
maxItems: 1
@@ -34,6 +34,15 @@ properties:
interrupts:
maxItems: 1
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ '#sound-dai-cells':
+ const: 1
+
fsia,spdif-connection:
$ref: /schemas/types.yaml#/definitions/flag
description: FSI is connected by S/PDIF
@@ -62,16 +71,24 @@ required:
- compatible
- reg
- interrupts
+ - clocks
+ - power-domains
+ - '#sound-dai-cells'
additionalProperties: false
examples:
- |
- sh_fsi2: sound@ec230000 {
+ #include <dt-bindings/clock/r8a7740-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ sh_fsi2: sound@fe1f0000 {
compatible = "renesas,fsi2-r8a7740", "renesas,sh_fsi2";
- reg = <0xec230000 0x400>;
- interrupts = <0 146 0x4>;
+ reg = <0xfe1f0000 0x400>;
+ interrupts = <GIC_SPI 9 0x4>;
+ clocks = <&mstp3_clks R8A7740_CLK_FSI>;
+ power-domains = <&pd_a4mp>;
+ #sound-dai-cells = <1>;
fsia,spdif-connection;
fsia,stream-mode-support;
fsia,use-internal-clock;
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 797fd035434c..1596f0d1e2fe 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -263,6 +263,7 @@ Required properties:
"renesas,rcar_sound-gen2" if generation2 (or RZ/G1)
"renesas,rcar_sound-gen3" if generation3 (or RZ/G2)
Examples with soctypes are:
+ - "renesas,rcar_sound-r8a7742" (RZ/G1H)
- "renesas,rcar_sound-r8a7743" (RZ/G1M)
- "renesas,rcar_sound-r8a7744" (RZ/G1N)
- "renesas,rcar_sound-r8a7745" (RZ/G1E)
diff --git a/Documentation/devicetree/bindings/sound/rockchip-i2s.yaml b/Documentation/devicetree/bindings/sound/rockchip-i2s.yaml
index a3ba2186d6a1..acb2b888dbfc 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/rockchip-i2s.yaml
@@ -24,6 +24,7 @@ properties:
- rockchip,rk3188-i2s
- rockchip,rk3228-i2s
- rockchip,rk3288-i2s
+ - rockchip,rk3308-i2s
- rockchip,rk3328-i2s
- rockchip,rk3366-i2s
- rockchip,rk3368-i2s
@@ -47,28 +48,27 @@ properties:
- const: i2s_hclk
dmas:
- items:
- - description: TX DMA Channel
- - description: RX DMA Channel
+ minItems: 1
+ maxItems: 2
dma-names:
- items:
- - const: tx
+ oneOf:
- const: rx
+ - items:
+ - const: tx
+ - const: rx
power-domains:
maxItems: 1
rockchip,capture-channels:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
default: 2
description:
Max capture channels, if not set, 2 channels default.
rockchip,playback-channels:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint32
default: 8
description:
Max playback channels, if not set, 8 channels default.
diff --git a/Documentation/devicetree/bindings/sound/rt1016.txt b/Documentation/devicetree/bindings/sound/rt1016.txt
new file mode 100644
index 000000000000..2310f8ff259b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rt1016.txt
@@ -0,0 +1,17 @@
+RT1016 Stereo Class D Audio Amplifier
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible : "realtek,rt1016".
+
+- reg : The I2C address of the device.
+
+
+Example:
+
+rt1016: codec@1a {
+ compatible = "realtek,rt1016";
+ reg = <0x1a>;
+};
diff --git a/Documentation/devicetree/bindings/sound/rt1308.txt b/Documentation/devicetree/bindings/sound/rt1308.txt
index 2d46084afce4..2d46084afce4 100755..100644
--- a/Documentation/devicetree/bindings/sound/rt1308.txt
+++ b/Documentation/devicetree/bindings/sound/rt1308.txt
diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt
deleted file mode 100644
index 79954cd6e37b..000000000000
--- a/Documentation/devicetree/bindings/sound/simple-card.txt
+++ /dev/null
@@ -1,351 +0,0 @@
-Simple-Card:
-
-Simple-Card specifies audio DAI connections of SoC <-> codec.
-
-Required properties:
-
-- compatible : "simple-audio-card"
-
-Optional properties:
-
-- simple-audio-card,name : User specified audio sound card name, one string
- property.
-- simple-audio-card,widgets : Please refer to widgets.txt.
-- simple-audio-card,routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's
- source.
-- simple-audio-card,mclk-fs : Multiplication factor between stream rate and codec
- mclk. When defined, mclk-fs property defined in
- dai-link sub nodes are ignored.
-- simple-audio-card,hp-det-gpio : Reference to GPIO that signals when
- headphones are attached.
-- simple-audio-card,mic-det-gpio : Reference to GPIO that signals when
- a microphone is attached.
-- simple-audio-card,aux-devs : List of phandles pointing to auxiliary devices, such
- as amplifiers, to be added to the sound card.
-- simple-audio-card,pin-switches : List of strings containing the widget names for
- which pin switches must be created.
-
-Optional subnodes:
-
-- simple-audio-card,dai-link : Container for dai-link level
- properties and the CPU and CODEC
- sub-nodes. This container may be
- omitted when the card has only one
- DAI link. See the examples and the
- section below.
-
-Dai-link subnode properties and subnodes:
-
-If dai-link subnode is omitted and the subnode properties are directly
-under "sound"-node the subnode property and subnode names have to be
-prefixed with "simple-audio-card,"-prefix.
-
-Required dai-link subnodes:
-
-- cpu : CPU sub-node
-- codec : CODEC sub-node
-
-Optional dai-link subnode properties:
-
-- format : CPU/CODEC common audio format.
- "i2s", "right_j", "left_j" , "dsp_a"
- "dsp_b", "ac97", "pdm", "msb", "lsb"
-- frame-master : Indicates dai-link frame master.
- phandle to a cpu or codec subnode.
-- bitclock-master : Indicates dai-link bit clock master.
- phandle to a cpu or codec subnode.
-- bitclock-inversion : bool property. Add this if the
- dai-link uses bit clock inversion.
-- frame-inversion : bool property. Add this if the
- dai-link uses frame clock inversion.
-- mclk-fs : Multiplication factor between stream
- rate and codec mclk, applied only for
- the dai-link.
-
-For backward compatibility the frame-master and bitclock-master
-properties can be used as booleans in codec subnode to indicate if the
-codec is the dai-link frame or bit clock master. In this case there
-should be no dai-link node, the same properties should not be present
-at sound-node level, and the bitclock-inversion and frame-inversion
-properties should also be placed in the codec node if needed.
-
-Required CPU/CODEC subnodes properties:
-
-- sound-dai : phandle and port of CPU/CODEC
-
-Optional CPU/CODEC subnodes properties:
-
-- dai-tdm-slot-num : Please refer to tdm-slot.txt.
-- dai-tdm-slot-width : Please refer to tdm-slot.txt.
-- clocks / system-clock-frequency : specify subnode's clock if needed.
- it can be specified via "clocks" if system has
- clock node (= common clock), or "system-clock-frequency"
- (if system doens't support common clock)
- If a clock is specified, it is
- enabled with clk_prepare_enable()
- in dai startup() and disabled with
- clk_disable_unprepare() in dai
- shutdown().
- If a clock is specified and a
- multiplication factor is given with
- mclk-fs, the clock will be set to the
- calculated mclk frequency when the
- stream starts.
-- system-clock-direction-out : specifies clock direction as 'out' on
- initialization. It is useful for some aCPUs with
- fixed clocks.
-
--------------------------------------------
-Example 1 - single DAI link:
--------------------------------------------
-
-sound {
- compatible = "simple-audio-card";
- simple-audio-card,name = "VF610-Tower-Sound-Card";
- simple-audio-card,format = "left_j";
- simple-audio-card,bitclock-master = <&dailink0_master>;
- simple-audio-card,frame-master = <&dailink0_master>;
- simple-audio-card,widgets =
- "Microphone", "Microphone Jack",
- "Headphone", "Headphone Jack",
- "Speaker", "External Speaker";
- simple-audio-card,routing =
- "MIC_IN", "Microphone Jack",
- "Headphone Jack", "HP_OUT",
- "External Speaker", "LINE_OUT";
-
- simple-audio-card,cpu {
- sound-dai = <&sh_fsi2 0>;
- };
-
- dailink0_master: simple-audio-card,codec {
- sound-dai = <&ak4648>;
- clocks = <&osc>;
- };
-};
-
-&i2c0 {
- ak4648: ak4648@12 {
- #sound-dai-cells = <0>;
- compatible = "asahi-kasei,ak4648";
- reg = <0x12>;
- };
-};
-
-sh_fsi2: sh_fsi2@ec230000 {
- #sound-dai-cells = <1>;
- compatible = "renesas,sh_fsi2";
- reg = <0xec230000 0x400>;
- interrupt-parent = <&gic>;
- interrupts = <0 146 0x4>;
-};
-
--------------------------------------------
-Example 2 - many DAI links:
--------------------------------------------
-
-sound {
- compatible = "simple-audio-card";
- simple-audio-card,name = "Cubox Audio";
-
- simple-audio-card,dai-link@0 { /* I2S - HDMI */
- reg = <0>;
- format = "i2s";
- cpu {
- sound-dai = <&audio1 0>;
- };
- codec {
- sound-dai = <&tda998x 0>;
- };
- };
-
- simple-audio-card,dai-link@1 { /* S/PDIF - HDMI */
- reg = <1>;
- cpu {
- sound-dai = <&audio1 1>;
- };
- codec {
- sound-dai = <&tda998x 1>;
- };
- };
-
- simple-audio-card,dai-link@2 { /* S/PDIF - S/PDIF */
- reg = <2>;
- cpu {
- sound-dai = <&audio1 1>;
- };
- codec {
- sound-dai = <&spdif_codec>;
- };
- };
-};
-
--------------------------------------------
-Example 3 - route audio from IMX6 SSI2 through TLV320DAC3100 codec
-through TPA6130A2 amplifier to headphones:
--------------------------------------------
-
-&i2c0 {
- codec: tlv320dac3100@18 {
- compatible = "ti,tlv320dac3100";
- ...
- }
-
- amp: tpa6130a2@60 {
- compatible = "ti,tpa6130a2";
- ...
- }
-}
-
-sound {
- compatible = "simple-audio-card";
- ...
- simple-audio-card,widgets =
- "Headphone", "Headphone Jack";
- simple-audio-card,routing =
- "Headphone Jack", "HPLEFT",
- "Headphone Jack", "HPRIGHT",
- "LEFTIN", "HPL",
- "RIGHTIN", "HPR";
- simple-audio-card,aux-devs = <&amp>;
- simple-audio-card,cpu {
- sound-dai = <&ssi2>;
- };
- simple-audio-card,codec {
- sound-dai = <&codec>;
- clocks = ...
- };
-};
-
--------------------------------------------
-Example 4. Sampling Rate Conversion
--------------------------------------------
-
-sound {
- compatible = "simple-audio-card";
-
- simple-audio-card,name = "rsnd-ak4643";
- simple-audio-card,format = "left_j";
- simple-audio-card,bitclock-master = <&sndcodec>;
- simple-audio-card,frame-master = <&sndcodec>;
-
- simple-audio-card,convert-rate = <48000>;
-
- simple-audio-card,prefix = "ak4642";
- simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
- "DAI0 Capture", "ak4642 Capture";
-
- sndcpu: simple-audio-card,cpu {
- sound-dai = <&rcar_sound>;
- };
-
- sndcodec: simple-audio-card,codec {
- sound-dai = <&ak4643>;
- system-clock-frequency = <11289600>;
- };
-};
-
--------------------------------------------
-Example 5. 2 CPU 1 Codec (Mixing)
--------------------------------------------
-sound {
- compatible = "simple-audio-card";
-
- simple-audio-card,name = "rsnd-ak4643";
- simple-audio-card,format = "left_j";
- simple-audio-card,bitclock-master = <&dpcmcpu>;
- simple-audio-card,frame-master = <&dpcmcpu>;
-
- simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
- "ak4642 Playback", "DAI1 Playback";
-
- dpcmcpu: cpu@0 {
- sound-dai = <&rcar_sound 0>;
- };
-
- cpu@1 {
- sound-dai = <&rcar_sound 1>;
- };
-
- codec {
- prefix = "ak4642";
- sound-dai = <&ak4643>;
- clocks = <&audio_clock>;
- };
-};
-
--------------------------------------------
-Example 6 - many DAI links with DPCM:
--------------------------------------------
-
-CPU0 ------ ak4613
-CPU1 ------ PCM3168A-p /* DPCM 1ch/2ch */
-CPU2 --/ /* DPCM 3ch/4ch */
-CPU3 --/ /* DPCM 5ch/6ch */
-CPU4 --/ /* DPCM 7ch/8ch */
-CPU5 ------ PCM3168A-c
-
-sound {
- compatible = "simple-audio-card";
-
- simple-audio-card,routing =
- "pcm3168a Playback", "DAI1 Playback",
- "pcm3168a Playback", "DAI2 Playback",
- "pcm3168a Playback", "DAI3 Playback",
- "pcm3168a Playback", "DAI4 Playback";
-
- simple-audio-card,dai-link@0 {
- format = "left_j";
- bitclock-master = <&sndcpu0>;
- frame-master = <&sndcpu0>;
-
- sndcpu0: cpu {
- sound-dai = <&rcar_sound 0>;
- };
- codec {
- sound-dai = <&ak4613>;
- };
- };
- simple-audio-card,dai-link@1 {
- format = "i2s";
- bitclock-master = <&sndcpu1>;
- frame-master = <&sndcpu1>;
-
- convert-channels = <8>; /* TDM Split */
-
- sndcpu1: cpu@0 {
- sound-dai = <&rcar_sound 1>;
- };
- cpu@1 {
- sound-dai = <&rcar_sound 2>;
- };
- cpu@2 {
- sound-dai = <&rcar_sound 3>;
- };
- cpu@3 {
- sound-dai = <&rcar_sound 4>;
- };
- codec {
- mclk-fs = <512>;
- prefix = "pcm3168a";
- dai-tdm-slot-num = <8>;
- sound-dai = <&pcm3168a 0>;
- };
- };
- simple-audio-card,dai-link@2 {
- format = "i2s";
- bitclock-master = <&sndcpu2>;
- frame-master = <&sndcpu2>;
-
- sndcpu2: cpu {
- sound-dai = <&rcar_sound 5>;
- };
- codec {
- mclk-fs = <512>;
- prefix = "pcm3168a";
- sound-dai = <&pcm3168a 1>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/sound/simple-card.yaml b/Documentation/devicetree/bindings/sound/simple-card.yaml
new file mode 100644
index 000000000000..cb2bb5fac0e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/simple-card.yaml
@@ -0,0 +1,484 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/simple-card.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Simple Audio Card Driver Device Tree Bindings
+
+maintainers:
+ - Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+
+definitions:
+
+ frame-master:
+ description: Indicates dai-link frame master.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/phandle-array
+ - maxItems: 1
+
+ bitclock-master:
+ description: Indicates dai-link bit clock master
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/phandle-array
+ - maxItems: 1
+
+ frame-inversion:
+ description: dai-link uses frame clock inversion
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ bitclock-inversion:
+ description: dai-link uses bit clock inversion
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ dai-tdm-slot-num:
+ description: see tdm-slot.txt.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ dai-tdm-slot-width:
+ description: see tdm-slot.txt.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ system-clock-frequency:
+ description: |
+ If a clock is specified and a multiplication factor is given with
+ mclk-fs, the clock will be set to the calculated mclk frequency
+ when the stream starts.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ system-clock-direction-out:
+ description: |
+ specifies clock direction as 'out' on initialization.
+ It is useful for some aCPUs with fixed clocks.
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ mclk-fs:
+ description: |
+ Multiplication factor between stream rate and codec mclk.
+ When defined, mclk-fs property defined in dai-link sub nodes are ignored.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ aux-devs:
+ description: |
+ List of phandles pointing to auxiliary devices, such
+ as amplifiers, to be added to the sound card.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+
+ convert-rate:
+ description: CPU to Codec rate convert.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ convert-channels:
+ description: CPU to Codec rate channels.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ prefix:
+ description: "device name prefix"
+ $ref: /schemas/types.yaml#/definitions/string
+
+ label:
+ maxItems: 1
+
+ routing:
+ description: |
+ A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's source.
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+
+ widgets:
+ description: User specified audio sound widgets.
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+
+ pin-switches:
+ description: the widget names for which pin switches must be created.
+ $ref: /schemas/types.yaml#/definitions/string-array
+
+ format:
+ description: audio format.
+ items:
+ enum:
+ - i2s
+ - right_j
+ - left_j
+ - dsp_a
+ - dsp_b
+ - ac97
+ - pdm
+ - msb
+ - lsb
+
+ dai:
+ type: object
+ properties:
+ sound-dai:
+ maxItems: 1
+
+ # common properties
+ mclk-fs:
+ $ref: "#/definitions/mclk-fs"
+ prefix:
+ $ref: "#/definitions/prefix"
+ frame-inversion:
+ $ref: "#/definitions/frame-inversion"
+ bitclock-inversion:
+ $ref: "#/definitions/bitclock-inversion"
+ frame-master:
+ $ref: /schemas/types.yaml#/definitions/flag
+ bitclock-master:
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ dai-tdm-slot-num:
+ $ref: "#/definitions/dai-tdm-slot-num"
+ dai-tdm-slot-width:
+ $ref: "#/definitions/dai-tdm-slot-width"
+ clocks:
+ maxItems: 1
+ system-clock-frequency:
+ $ref: "#/definitions/system-clock-frequency"
+ system-clock-direction-out:
+ $ref: "#/definitions/system-clock-direction-out"
+ required:
+ - sound-dai
+
+properties:
+ compatible:
+ contains:
+ enum:
+ - simple-audio-card
+ - simple-scu-audio-card
+
+ "#address-cells":
+ const: 1
+ "#size-cells":
+ const: 0
+
+ label:
+ $ref: "#/definitions/label"
+
+ simple-audio-card,name:
+ description: User specified audio sound card name.
+ $ref: /schemas/types.yaml#/definitions/string
+
+# use patternProperties to avoid naming "xxx,yyy" issue
+patternProperties:
+ "^simple-audio-card,widgets$":
+ $ref: "#/definitions/widgets"
+ "^simple-audio-card,routing$":
+ $ref: "#/definitions/routing"
+ "^simple-audio-card,cpu(@[0-9a-f]+)?":
+ $ref: "#/definitions/dai"
+ "^simple-audio-card,codec(@[0-9a-f]+)?":
+ $ref: "#/definitions/dai"
+
+ # common properties
+ "^simple-audio-card,frame-master$":
+ $ref: "#/definitions/frame-master"
+ "^simple-audio-card,bitclock-master$":
+ $ref: "#/definitions/bitclock-master"
+ "^simple-audio-card,frame-inversion$":
+ $ref: "#/definitions/frame-inversion"
+ "^simple-audio-card,bitclock-inversion$":
+ $ref: "#/definitions/bitclock-inversion"
+ "^simple-audio-card,format$":
+ $ref: "#/definitions/format"
+ "^simple-audio-card,mclk-fs$":
+ $ref: "#/definitions/mclk-fs"
+ "^simple-audio-card,aux-devs$":
+ $ref: "#/definitions/aux-devs"
+ "^simple-audio-card,convert-rate$":
+ $ref: "#/definitions/convert-rate"
+ "^simple-audio-card,convert-channels$":
+ $ref: "#/definitions/convert-channels"
+ "^simple-audio-card,prefix$":
+ $ref: "#/definitions/prefix"
+ "^simple-audio-card,pin-switches$":
+ $ref: "#/definitions/pin-switches"
+ "^simple-audio-card,hp-det-gpio$":
+ maxItems: 1
+ "^simple-audio-card,mic-det-gpio$":
+ maxItems: 1
+
+ "^simple-audio-card,dai-link(@[0-9a-f]+)?$":
+ description: |
+ Container for dai-link level properties and the CPU and CODEC sub-nodes.
+ This container may be omitted when the card has only one DAI link.
+ type: object
+ properties:
+ reg:
+ maxItems: 1
+
+ # common properties
+ frame-master:
+ $ref: "#/definitions/frame-master"
+ bitclock-master:
+ $ref: "#/definitions/bitclock-master"
+ frame-inversion:
+ $ref: "#/definitions/frame-inversion"
+ bitclock-inversion:
+ $ref: "#/definitions/bitclock-inversion"
+ format:
+ $ref: "#/definitions/format"
+ mclk-fs:
+ $ref: "#/definitions/mclk-fs"
+ aux-devs:
+ $ref: "#/definitions/aux-devs"
+ convert-rate:
+ $ref: "#/definitions/convert-rate"
+ convert-channels:
+ $ref: "#/definitions/convert-channels"
+ prefix:
+ $ref: "#/definitions/prefix"
+ pin-switches:
+ $ref: "#/definitions/pin-switches"
+ hp-det-gpio:
+ maxItems: 1
+ mic-det-gpio:
+ maxItems: 1
+
+ patternProperties:
+ "^cpu(@[0-9a-f]+)?":
+ $ref: "#/definitions/dai"
+ "^codec(@[0-9a-f]+)?":
+ $ref: "#/definitions/dai"
+ additionalProperties: false
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+#--------------------
+# single DAI link
+#--------------------
+ - |
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "VF610-Tower-Sound-Card";
+ simple-audio-card,format = "left_j";
+ simple-audio-card,bitclock-master = <&dailink0_master>;
+ simple-audio-card,frame-master = <&dailink0_master>;
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Headphone", "Headphone Jack",
+ "Speaker", "External Speaker";
+ simple-audio-card,routing =
+ "MIC_IN", "Microphone Jack",
+ "Headphone Jack", "HP_OUT",
+ "External Speaker", "LINE_OUT";
+
+ simple-audio-card,cpu {
+ sound-dai = <&sh_fsi2 0>;
+ };
+
+ dailink0_master: simple-audio-card,codec {
+ sound-dai = <&ak4648>;
+ clocks = <&osc>;
+ };
+ };
+
+#--------------------
+# Multi DAI links
+#--------------------
+ - |
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "Cubox Audio";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ simple-audio-card,dai-link@0 { /* I2S - HDMI */
+ reg = <0>;
+ format = "i2s";
+ cpu {
+ sound-dai = <&audio0>;
+ };
+ codec {
+ sound-dai = <&tda998x0>;
+ };
+ };
+
+ simple-audio-card,dai-link@1 { /* S/PDIF - HDMI */
+ reg = <1>;
+ cpu {
+ sound-dai = <&audio1>;
+ };
+ codec {
+ sound-dai = <&tda998x1>;
+ };
+ };
+
+ simple-audio-card,dai-link@2 { /* S/PDIF - S/PDIF */
+ reg = <2>;
+ cpu {
+ sound-dai = <&audio2>;
+ };
+ codec {
+ sound-dai = <&spdif_codec>;
+ };
+ };
+ };
+
+#--------------------
+# route audio from IMX6 SSI2 through TLV320DAC3100 codec
+# through TPA6130A2 amplifier to headphones:
+#--------------------
+ - |
+ sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack";
+ simple-audio-card,routing =
+ "Headphone Jack", "HPLEFT",
+ "Headphone Jack", "HPRIGHT",
+ "LEFTIN", "HPL",
+ "RIGHTIN", "HPR";
+ simple-audio-card,aux-devs = <&amp>;
+ simple-audio-card,cpu {
+ sound-dai = <&ssi2>;
+ };
+ simple-audio-card,codec {
+ sound-dai = <&codec>;
+ clocks = <&clocks>;
+ };
+ };
+
+#--------------------
+# Sampling Rate Conversion
+#--------------------
+ - |
+ sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,name = "rsnd-ak4643";
+ simple-audio-card,format = "left_j";
+ simple-audio-card,bitclock-master = <&sndcodec>;
+ simple-audio-card,frame-master = <&sndcodec>;
+
+ simple-audio-card,convert-rate = <48000>;
+
+ simple-audio-card,prefix = "ak4642";
+ simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
+ "DAI0 Capture", "ak4642 Capture";
+
+ sndcpu: simple-audio-card,cpu {
+ sound-dai = <&rcar_sound>;
+ };
+
+ sndcodec: simple-audio-card,codec {
+ sound-dai = <&ak4643>;
+ system-clock-frequency = <11289600>;
+ };
+ };
+
+#--------------------
+# 2 CPU 1 Codec (Mixing)
+#--------------------
+ - |
+ sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,name = "rsnd-ak4643";
+ simple-audio-card,format = "left_j";
+ simple-audio-card,bitclock-master = <&dpcmcpu>;
+ simple-audio-card,frame-master = <&dpcmcpu>;
+
+ simple-audio-card,convert-rate = <48000>;
+ simple-audio-card,convert-channels = <2>;
+
+ simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
+ "ak4642 Playback", "DAI1 Playback";
+
+ dpcmcpu: simple-audio-card,cpu@0 {
+ sound-dai = <&rcar_sound 0>;
+ };
+
+ simple-audio-card,cpu@1 {
+ sound-dai = <&rcar_sound 1>;
+ };
+
+ simple-audio-card,codec {
+ prefix = "ak4642";
+ sound-dai = <&ak4643>;
+ clocks = <&audio_clock>;
+ };
+ };
+
+#--------------------
+# Multi DAI links with DPCM:
+#
+# CPU0 ------ ak4613
+# CPU1 ------ PCM3168A-p /* DPCM 1ch/2ch */
+# CPU2 --/ /* DPCM 3ch/4ch */
+# CPU3 --/ /* DPCM 5ch/6ch */
+# CPU4 --/ /* DPCM 7ch/8ch */
+# CPU5 ------ PCM3168A-c
+#--------------------
+ - |
+ sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,routing =
+ "pcm3168a Playback", "DAI1 Playback",
+ "pcm3168a Playback", "DAI2 Playback",
+ "pcm3168a Playback", "DAI3 Playback",
+ "pcm3168a Playback", "DAI4 Playback";
+
+ simple-audio-card,dai-link@0 {
+ format = "left_j";
+ bitclock-master = <&sndcpu0>;
+ frame-master = <&sndcpu0>;
+
+ sndcpu0: cpu {
+ sound-dai = <&rcar_sound 0>;
+ };
+ codec {
+ sound-dai = <&ak4613>;
+ };
+ };
+
+ simple-audio-card,dai-link@1 {
+ format = "i2s";
+ bitclock-master = <&sndcpu1>;
+ frame-master = <&sndcpu1>;
+
+ convert-channels = <8>; /* TDM Split */
+
+ sndcpu1: cpu@0 {
+ sound-dai = <&rcar_sound 1>;
+ };
+ cpu@1 {
+ sound-dai = <&rcar_sound 2>;
+ };
+ cpu@2 {
+ sound-dai = <&rcar_sound 3>;
+ };
+ cpu@3 {
+ sound-dai = <&rcar_sound 4>;
+ };
+ codec {
+ mclk-fs = <512>;
+ prefix = "pcm3168a";
+ dai-tdm-slot-num = <8>;
+ sound-dai = <&pcm3168a 0>;
+ };
+ };
+
+ simple-audio-card,dai-link@2 {
+ format = "i2s";
+ bitclock-master = <&sndcpu2>;
+ frame-master = <&sndcpu2>;
+
+ sndcpu2: cpu {
+ sound-dai = <&rcar_sound 5>;
+ };
+ codec {
+ mclk-fs = <512>;
+ prefix = "pcm3168a";
+ sound-dai = <&pcm3168a 1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/tdm-slot.txt b/Documentation/devicetree/bindings/sound/tdm-slot.txt
index 34cf70e2cbc4..4bb513ae62fc 100644
--- a/Documentation/devicetree/bindings/sound/tdm-slot.txt
+++ b/Documentation/devicetree/bindings/sound/tdm-slot.txt
@@ -14,8 +14,8 @@ For instance:
dai-tdm-slot-tx-mask = <0 1>;
dai-tdm-slot-rx-mask = <1 0>;
-And for each spcified driver, there could be one .of_xlate_tdm_slot_mask()
-to specify a explicit mapping of the channels and the slots. If it's absent
+And for each specified driver, there could be one .of_xlate_tdm_slot_mask()
+to specify an explicit mapping of the channels and the slots. If it's absent
the default snd_soc_of_xlate_tdm_slot_mask() will be used to generating the
tx and rx masks.
diff --git a/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml b/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
index ab2268c0ee67..c5b5b4260496 100644
--- a/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
+++ b/Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
@@ -49,9 +49,8 @@ properties:
0 - Mic bias is set to VREF
1 - Mic bias is set to VREF × 1.096
6 - Mic bias is set to AVDD
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [0, 1, 6]
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 6]
ti,vref-source:
description: |
@@ -59,9 +58,57 @@ properties:
0 - Set VREF to 2.75V
1 - Set VREF to 2.5V
2 - Set VREF to 1.375V
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2]
+
+ ti,pdm-edge-select:
+ description: |
+ Defines the PDMCLK sampling edge configuration for the PDM inputs. This
+ array is defined as <PDMIN1 PDMIN2 PDMIN3 PDMIN4>.
+
+ 0 - (default) Odd channel is latched on the negative edge and even
+ channel is latched on the the positive edge.
+ 1 - Odd channel is latched on the positive edge and even channel is
+ latched on the the negative edge.
+
+ PDMIN1 - PDMCLK latching edge used for channel 1 and 2 data
+ PDMIN2 - PDMCLK latching edge used for channel 3 and 4 data
+ PDMIN3 - PDMCLK latching edge used for channel 5 and 6 data
+ PDMIN4 - PDMCLK latching edge used for channel 7 and 8 data
+
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 1
+ maxItems: 4
+ items:
+ maximum: 1
+ default: [0, 0, 0, 0]
+
+ ti,gpi-config:
+ description: |
+ Defines the configuration for the general purpose input pins (GPI).
+ The array is defined as <GPI1 GPI2 GPI3 GPI4>.
+
+ 0 - (default) disabled
+ 1 - GPIX is configured as a general-purpose input (GPI)
+ 2 - GPIX is configured as a master clock input (MCLK)
+ 3 - GPIX is configured as an ASI input for daisy-chain (SDIN)
+ 4 - GPIX is configured as a PDM data input for channel 1 and channel
+ (PDMDIN1)
+ 5 - GPIX is configured as a PDM data input for channel 3 and channel
+ (PDMDIN2)
+ 6 - GPIX is configured as a PDM data input for channel 5 and channel
+ (PDMDIN3)
+ 7 - GPIX is configured as a PDM data input for channel 7 and channel
+ (PDMDIN4)
+
allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [0, 1, 2]
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 1
+ maxItems: 4
+ items:
+ maximum: 7
+ default: [0, 0, 0, 0]
required:
- compatible
@@ -77,6 +124,8 @@ examples:
compatible = "ti,tlv320adc5140";
reg = <0x4c>;
ti,mic-bias-source = <6>;
+ ti,pdm-edge-select = <0 1 0 1>;
+ ti,gpi-config = <4 5 6 7>;
reset-gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/wlf,arizona.txt b/Documentation/devicetree/bindings/sound/wlf,arizona.txt
deleted file mode 100644
index e172c62dc2df..000000000000
--- a/Documentation/devicetree/bindings/sound/wlf,arizona.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-Cirrus Logic Arizona class audio SoCs
-
-These devices are audio SoCs with extensive digital capabilities and a range
-of analogue I/O.
-
-This document lists sound specific bindings, see the primary binding
-document:
- ../mfd/arizona.txt
-
-Optional properties:
-
- - wlf,inmode : A list of INn_MODE register values, where n is the number
- of input signals. Valid values are 0 (Differential), 1 (Single-ended) and
- 2 (Digital Microphone). If absent, INn_MODE registers set to 0 by default.
- If present, values must be specified less than or equal to the number of
- input signals. If values less than the number of input signals, elements
- that have not been specified are set to 0 by default. Entries are:
- <IN1, IN2, IN3, IN4> (wm5102, wm5110, wm8280, wm8997)
- <IN1A, IN2A, IN1B, IN2B> (wm8998, wm1814)
- - wlf,out-mono : A list of boolean values indicating whether each output is
- mono or stereo. Position within the list indicates the output affected
- (eg. First entry in the list corresponds to output 1). A non-zero value
- indicates a mono output. If present, the number of values should be less
- than or equal to the number of outputs, if less values are supplied the
- additional outputs will be treated as stereo.
-
- - wlf,dmic-ref : DMIC reference voltage source for each input, can be
- selected from either MICVDD or one of the MICBIAS's, defines
- (ARIZONA_DMIC_xxxx) are provided in <dt-bindings/mfd/arizona.txt>. If
- present, the number of values should be less than or equal to the
- number of inputs, unspecified inputs will use the chip default.
-
- - wlf,max-channels-clocked : The maximum number of channels to be clocked on
- each AIF, useful for I2S systems with multiple data lines being mastered.
- Specify one cell for each AIF to be configured, specify zero for AIFs that
- should be handled normally.
- If present, number of cells must be less than or equal to the number of
- AIFs. If less than the number of AIFs, for cells that have not been
- specified the corresponding AIFs will be treated as default setting.
-
- - wlf,spk-fmt : PDM speaker data format, must contain 2 cells (OUT5 and OUT6).
- See the datasheet for values.
- The second cell is ignored for codecs that do not have OUT6 (wm5102, wm8997,
- wm8998, wm1814)
-
- - wlf,spk-mute : PDM speaker mute setting, must contain 2 cells (OUT5 and OUT6).
- See the datasheet for values.
- The second cell is ignored for codecs that do not have OUT6 (wm5102, wm8997,
- wm8998, wm1814)
-
- - wlf,out-volume-limit : The volume limit value that should be applied to each
- output channel. See the datasheet for exact values. Channels are specified
- in the order OUT1L, OUT1R, OUT2L, OUT2R, etc.
diff --git a/Documentation/devicetree/bindings/sound/wlf,arizona.yaml b/Documentation/devicetree/bindings/sound/wlf,arizona.yaml
new file mode 100644
index 000000000000..22d54be7900a
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/wlf,arizona.yaml
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/wlf,arizona.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic/Wolfson Microelectronics Arizona class audio SoCs
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ These devices are audio SoCs with extensive digital capabilities and a range
+ of analogue I/O.
+
+ This document lists sound specific bindings, see the primary binding
+ document ../mfd/arizona.yaml
+
+properties:
+ '#sound-dai-cells':
+ description:
+ The first cell indicating the audio interface.
+ const: 1
+
+ wlf,inmode:
+ description:
+ A list of INn_MODE register values, where n is the number of input
+ signals. Valid values are 0 (Differential), 1 (Single-ended) and
+ 2 (Digital Microphone). If absent, INn_MODE registers set to 0 by
+ default. If present, values must be specified less than or equal
+ to the number of input signals. If values less than the number of
+ input signals, elements that have not been specified are set to 0 by
+ default. Entries are <IN1, IN2, IN3, IN4> (wm5102, wm5110, wm8280,
+ wm8997) and <IN1A, IN2A, IN1B, IN2B> (wm8998, wm1814)
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 4
+ items:
+ minimum: 0
+ maximum: 2
+ default: 0
+
+ wlf,out-mono:
+ description:
+ A list of boolean values indicating whether each output is mono
+ or stereo. Position within the list indicates the output affected
+ (eg. First entry in the list corresponds to output 1). A non-zero
+ value indicates a mono output. If present, the number of values
+ should be less than or equal to the number of outputs, if less values
+ are supplied the additional outputs will be treated as stereo.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 6
+ items:
+ minimum: 0
+ maximum: 1
+ default: 0
+
+ wlf,dmic-ref:
+ description:
+ DMIC reference voltage source for each input, can be selected from
+ either MICVDD or one of the MICBIAS's, defines (ARIZONA_DMIC_xxxx)
+ are provided in dt-bindings/mfd/arizona.h. If present, the number
+ of values should be less than or equal to the number of inputs,
+ unspecified inputs will use the chip default.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 4
+ items:
+ minimum: 0
+ maximum: 3
+ default: 0
+
+ wlf,max-channels-clocked:
+ description:
+ The maximum number of channels to be clocked on each AIF, useful for
+ I2S systems with multiple data lines being mastered. Specify one
+ cell for each AIF to be configured, specify zero for AIFs that should
+ be handled normally. If present, number of cells must be less than
+ or equal to the number of AIFs. If less than the number of AIFs, for
+ cells that have not been specified the corresponding AIFs will be
+ treated as default setting.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 3
+ items:
+ default: 0
+
+ wlf,spk-fmt:
+ description:
+ PDM speaker data format, must contain 2 cells (OUT5 and OUT6). See
+ the datasheet for values. The second cell is ignored for codecs that
+ do not have OUT6 (wm5102, wm8997, wm8998, wm1814)
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 2
+
+ wlf,spk-mute:
+ description:
+ PDM speaker mute setting, must contain 2 cells (OUT5 and OUT6). See
+ the datasheet for values. The second cell is ignored for codecs that
+ do not have OUT6 (wm5102, wm8997, wm8998, wm1814)
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 2
+
+ wlf,out-volume-limit:
+ description:
+ The volume limit value that should be applied to each output
+ channel. See the datasheet for exact values. Channels are specified
+ in the order OUT1L, OUT1R, OUT2L, OUT2R, etc.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 12
diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt
index 68cccc4653ba..367b58ce1bb9 100644
--- a/Documentation/devicetree/bindings/sound/wm8994.txt
+++ b/Documentation/devicetree/bindings/sound/wm8994.txt
@@ -14,9 +14,15 @@ Required properties:
- #gpio-cells : Must be 2. The first cell is the pin number and the
second cell is used to specify optional parameters (currently unused).
- - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply,
- SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered
- in Documentation/devicetree/bindings/regulator/regulator.txt
+ - power supplies for the device, as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt, depending
+ on compatible:
+ - for wlf,wm1811 and wlf,wm8958:
+ AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply,
+ DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply
+ - for wlf,wm8994:
+ AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply,
+ SPKVDD1-supply, SPKVDD2-supply
Optional properties:
@@ -73,11 +79,11 @@ wm8994: codec@1a {
lineout1-se;
+ AVDD1-supply = <&regulator>;
AVDD2-supply = <&regulator>;
CPVDD-supply = <&regulator>;
- DBVDD1-supply = <&regulator>;
- DBVDD2-supply = <&regulator>;
- DBVDD3-supply = <&regulator>;
+ DBVDD-supply = <&regulator>;
+ DCVDD-supply = <&regulator>;
SPKVDD1-supply = <&regulator>;
SPKVDD2-supply = <&regulator>;
};
diff --git a/Documentation/devicetree/bindings/sound/zl38060.yaml b/Documentation/devicetree/bindings/sound/zl38060.yaml
new file mode 100644
index 000000000000..338e2a13c775
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/zl38060.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/zl38060.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ZL38060 Connected Home Audio Processor from Microsemi.
+
+description: |
+ The ZL38060 is a "Connected Home Audio Processor" from Microsemi,
+ which consists of a Digital Signal Processor (DSP), several Digital
+ Audio Interfaces (DAIs), analog outputs, and a block of 14 GPIOs.
+
+maintainers:
+ - Jaroslav Kysela <perex@perex.cz>
+ - Takashi Iwai <tiwai@suse.com>
+
+properties:
+ compatible:
+ const: mscc,zl38060
+
+ reg:
+ description:
+ SPI device address.
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 24000000
+
+ reset-gpios:
+ description:
+ A GPIO line handling reset of the chip. As the line is active low,
+ it should be marked GPIO_ACTIVE_LOW (see ../gpio/gpio.txt)
+ maxItems: 1
+
+ '#gpio-cells':
+ const: 2
+
+ gpio-controller: true
+
+ '#sound-dai-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - '#gpio-cells'
+ - gpio-controller
+ - '#sound-dai-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ codec: zl38060@0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ #sound-dai-cells = <0>;
+ compatible = "mscc,zl38060";
+ reg = <0>;
+ spi-max-frequency = <12000000>;
+ reset-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml b/Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml
new file mode 100644
index 000000000000..0abcac385e7c
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2019,2020 Lubomir Rintel <lkundrak@v3.sk>
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/marvell,mmp2-ssp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PXA2xx SSP SPI Controller bindings
+
+maintainers:
+ - Lubomir Rintel <lkundrak@v3.sk>
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ const: marvell,mmp2-ssp
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ ready-gpios:
+ description: |
+ GPIO used to signal a SPI master that the FIFO is filled and we're
+ ready to service a transfer. Only useful in slave mode.
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+dependencies:
+ ready-gpios: [ spi-slave ]
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/marvell,mmp2.h>
+ spi@d4035000 {
+ compatible = "marvell,mmp2-ssp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xd4035000 0x1000>;
+ clocks = <&soc_clocks MMP2_CLK_SSP0>;
+ interrupts = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml
index 5c16cf59ca00..0178831b0662 100644
--- a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml
@@ -8,12 +8,12 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Qualcomm Quad Serial Peripheral Interface (QSPI)
maintainers:
- - Mukesh Savaliya <msavaliy@codeaurora.org>
- - Akash Asthana <akashast@codeaurora.org>
+ - Mukesh Savaliya <msavaliy@codeaurora.org>
+ - Akash Asthana <akashast@codeaurora.org>
-description:
- The QSPI controller allows SPI protocol communication in single, dual, or quad
- wire transmission modes for read/write access to slaves such as NOR flash.
+description: The QSPI controller allows SPI protocol communication in single,
+ dual, or quad wire transmission modes for read/write access to slaves such
+ as NOR flash.
allOf:
- $ref: /spi/spi-controller.yaml#
diff --git a/Documentation/devicetree/bindings/spi/renesas,hspi.yaml b/Documentation/devicetree/bindings/spi/renesas,hspi.yaml
index c429cf4bea5b..f492cb9fea12 100644
--- a/Documentation/devicetree/bindings/spi/renesas,hspi.yaml
+++ b/Documentation/devicetree/bindings/spi/renesas,hspi.yaml
@@ -16,8 +16,8 @@ properties:
compatible:
items:
- enum:
- - renesas,hspi-r8a7778 # R-Car M1A
- - renesas,hspi-r8a7779 # R-Car H1
+ - renesas,hspi-r8a7778 # R-Car M1A
+ - renesas,hspi-r8a7779 # R-Car H1
- const: renesas,hspi
reg:
diff --git a/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
index b6c1dd2a9c5e..e84edcf8b332 100644
--- a/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
+++ b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
@@ -96,43 +96,39 @@ properties:
renesas,dtdl:
description: delay sync signal (setup) in transmit mode.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum:
- - 0 # no bit delay
- - 50 # 0.5-clock-cycle delay
- - 100 # 1-clock-cycle delay
- - 150 # 1.5-clock-cycle delay
- - 200 # 2-clock-cycle delay
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # no bit delay
+ - 50 # 0.5-clock-cycle delay
+ - 100 # 1-clock-cycle delay
+ - 150 # 1.5-clock-cycle delay
+ - 200 # 2-clock-cycle delay
renesas,syncdl:
description: delay sync signal (hold) in transmit mode
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum:
- - 0 # no bit delay
- - 50 # 0.5-clock-cycle delay
- - 100 # 1-clock-cycle delay
- - 150 # 1.5-clock-cycle delay
- - 200 # 2-clock-cycle delay
- - 300 # 3-clock-cycle delay
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # no bit delay
+ - 50 # 0.5-clock-cycle delay
+ - 100 # 1-clock-cycle delay
+ - 150 # 1.5-clock-cycle delay
+ - 200 # 2-clock-cycle delay
+ - 300 # 3-clock-cycle delay
renesas,tx-fifo-size:
# deprecated for soctype-specific bindings
description: |
Override the default TX fifo size. Unit is words. Ignored if 0.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - maxItems: 1
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maxItems: 1
default: 64
renesas,rx-fifo-size:
# deprecated for soctype-specific bindings
description: |
Override the default RX fifo size. Unit is words. Ignored if 0.
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - maxItems: 1
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maxItems: 1
default: 64
required:
@@ -149,7 +145,7 @@ examples:
msiof0: spi@e6e20000 {
compatible = "renesas,msiof-r8a7791", "renesas,rcar-gen2-msiof";
- reg = <0 0xe6e20000 0 0x0064>;
+ reg = <0xe6e20000 0x0064>;
interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
dmas = <&dmac0 0x51>, <&dmac0 0x52>;
diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml
index d8e5509a7081..c6a2f543648b 100644
--- a/Documentation/devicetree/bindings/spi/spi-controller.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml
@@ -115,24 +115,22 @@ patternProperties:
Maximum SPI clocking speed of the device in Hz.
spi-rx-bus-width:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 1, 2, 4, 8 ]
- - default: 1
description:
Bus width to the SPI bus used for read transfers.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 4, 8]
+ default: 1
spi-rx-delay-us:
description:
Delay, in microseconds, after a read transfer.
spi-tx-bus-width:
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - enum: [ 1, 2, 4, 8 ]
- - default: 1
description:
Bus width to the SPI bus used for write transfers.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 4, 8]
+ default: 1
spi-tx-delay-us:
description:
diff --git a/Documentation/devicetree/bindings/spi/spi-pl022.yaml b/Documentation/devicetree/bindings/spi/spi-pl022.yaml
index dfb697c69341..22999024477f 100644
--- a/Documentation/devicetree/bindings/spi/spi-pl022.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-pl022.yaml
@@ -51,7 +51,7 @@ properties:
pl022,rt:
description: indicates the controller should run the message pump with realtime
- priority to minimise the transfer latency on the bus (boolean)
+ priority to minimise the transfer latency on the bus (boolean)
type: boolean
dmas:
@@ -80,55 +80,48 @@ patternProperties:
properties:
pl022,interface:
description: SPI interface type
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - enum:
- - 0 # SPI
- - 1 # Texas Instruments Synchronous Serial Frame Format
- - 2 # Microwire (Half Duplex)
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ enum:
+ - 0 # SPI
+ - 1 # Texas Instruments Synchronous Serial Frame Format
+ - 2 # Microwire (Half Duplex)
pl022,com-mode:
description: Specifies the transfer mode
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - enum:
- - 0 # interrupt mode
- - 1 # polling mode
- - 2 # DMA mode
- default: 1
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ enum:
+ - 0 # interrupt mode
+ - 1 # polling mode
+ - 2 # DMA mode
+ default: 1
pl022,rx-level-trig:
description: Rx FIFO watermark level
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- maximum: 4
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 4
pl022,tx-level-trig:
description: Tx FIFO watermark level
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- maximum: 4
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 4
pl022,ctrl-len:
description: Microwire interface - Control length
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0x03
- maximum: 0x1f
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0x03
+ maximum: 0x1f
pl022,wait-state:
description: Microwire interface - Wait state
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - enum: [ 0, 1 ]
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ enum: [0, 1]
pl022,duplex:
description: Microwire interface - Full/Half duplex
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - enum: [ 0, 1 ]
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ enum: [0, 1]
required:
- compatible
diff --git a/Documentation/devicetree/bindings/spi/spi-pxa2xx.txt b/Documentation/devicetree/bindings/spi/spi-pxa2xx.txt
deleted file mode 100644
index e30e0c2a4bce..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-pxa2xx.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-PXA2xx SSP SPI Controller
-
-Required properties:
-- compatible: Must be "marvell,mmp2-ssp".
-- reg: Offset and length of the device's register set.
-- interrupts: Should be the interrupt number.
-- clocks: Should contain a single entry describing the clock input.
-- #address-cells: Number of cells required to define a chip select address.
-- #size-cells: Should be zero.
-
-Optional properties:
-- cs-gpios: list of GPIO chip selects. See the SPI bus bindings,
- Documentation/devicetree/bindings/spi/spi-bus.txt
-- spi-slave: Empty property indicating the SPI controller is used in slave mode.
-- ready-gpios: GPIO used to signal a SPI master that the FIFO is filled
- and we're ready to service a transfer. Only useful in slave mode.
-
-Child nodes represent devices on the SPI bus
- See ../spi/spi-bus.txt
-
-Example:
- ssp1: spi@d4035000 {
- compatible = "marvell,mmp2-ssp";
- reg = <0xd4035000 0x1000>;
- clocks = <&soc_clocks MMP2_CLK_SSP0>;
- interrupts = <0>;
- };
diff --git a/Documentation/devicetree/bindings/spi/spi-sifive.yaml b/Documentation/devicetree/bindings/spi/spi-sifive.yaml
index 140e4351a19f..4932205d1cba 100644
--- a/Documentation/devicetree/bindings/spi/spi-sifive.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-sifive.yaml
@@ -32,11 +32,10 @@ properties:
https://github.com/sifive/sifive-blocks/tree/master/src/main/scala/devices/spi
reg:
- maxItems: 1
-
- description:
- Physical base address and size of SPI registers map
- A second (optional) range can indicate memory mapped flash
+ minItems: 1
+ items:
+ - description: SPI registers region
+ - description: Memory mapped flash region
interrupts:
maxItems: 1
@@ -50,18 +49,16 @@ properties:
sifive,fifo-depth:
description:
Depth of hardware queues; defaults to 8
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - enum: [ 8 ]
- - default: 8
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ enum: [8]
+ default: 8
sifive,max-bits-per-word:
description:
Maximum bits per word; defaults to 8
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - enum: [ 0, 1, 2, 3, 4, 5, 6, 7, 8 ]
- - default: 8
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ enum: [0, 1, 2, 3, 4, 5, 6, 7, 8]
+ default: 8
required:
- compatible
@@ -73,7 +70,7 @@ examples:
- |
spi: spi@10040000 {
compatible = "sifive,fu540-c000-spi", "sifive,spi0";
- reg = <0x0 0x10040000 0x0 0x1000 0x0 0x20000000 0x0 0x10000000>;
+ reg = <0x10040000 0x1000>, <0x20000000 0x10000000>;
interrupt-parent = <&plic>;
interrupts = <51>;
clocks = <&tlclk>;
diff --git a/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml
index 3665a5fe6b7f..1a342ce1f798 100644
--- a/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml
+++ b/Documentation/devicetree/bindings/spi/st,stm32-qspi.yaml
@@ -24,8 +24,8 @@ properties:
reg-names:
items:
- - const: qspi
- - const: qspi_mm
+ - const: qspi
+ - const: qspi_mm
clocks:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
index 4b5509436588..f5825935fd22 100644
--- a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
+++ b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
@@ -29,8 +29,8 @@ properties:
- const: allwinner,sun4i-a10-system-control
- const: allwinner,sun5i-a13-system-control
- items:
- - const: allwinner,sun7i-a20-system-control
- - const: allwinner,sun4i-a10-system-control
+ - const: allwinner,sun7i-a20-system-control
+ - const: allwinner,sun4i-a10-system-control
- const: allwinner,sun8i-a23-system-control
- const: allwinner,sun8i-h3-system-control
- const: allwinner,sun50i-a64-sram-controller
@@ -38,11 +38,11 @@ properties:
- const: allwinner,sun50i-a64-system-control
- const: allwinner,sun50i-h5-system-control
- items:
- - const: allwinner,sun50i-h6-system-control
- - const: allwinner,sun50i-a64-system-control
+ - const: allwinner,sun50i-h6-system-control
+ - const: allwinner,sun50i-a64-system-control
- items:
- - const: allwinner,suniv-f1c100s-system-control
- - const: allwinner,sun4i-a10-system-control
+ - const: allwinner,suniv-f1c100s-system-control
+ - const: allwinner,sun4i-a10-system-control
reg:
maxItems: 1
@@ -69,44 +69,44 @@ patternProperties:
- const: allwinner,sun4i-a10-sram-d
- const: allwinner,sun50i-a64-sram-c
- items:
- - const: allwinner,sun5i-a13-sram-a3-a4
- - const: allwinner,sun4i-a10-sram-a3-a4
+ - const: allwinner,sun5i-a13-sram-a3-a4
+ - const: allwinner,sun4i-a10-sram-a3-a4
- items:
- - const: allwinner,sun7i-a20-sram-a3-a4
- - const: allwinner,sun4i-a10-sram-a3-a4
+ - const: allwinner,sun7i-a20-sram-a3-a4
+ - const: allwinner,sun4i-a10-sram-a3-a4
- items:
- - const: allwinner,sun5i-a13-sram-c1
- - const: allwinner,sun4i-a10-sram-c1
+ - const: allwinner,sun5i-a13-sram-c1
+ - const: allwinner,sun4i-a10-sram-c1
- items:
- - const: allwinner,sun7i-a20-sram-c1
- - const: allwinner,sun4i-a10-sram-c1
+ - const: allwinner,sun7i-a20-sram-c1
+ - const: allwinner,sun4i-a10-sram-c1
- items:
- - const: allwinner,sun8i-a23-sram-c1
- - const: allwinner,sun4i-a10-sram-c1
+ - const: allwinner,sun8i-a23-sram-c1
+ - const: allwinner,sun4i-a10-sram-c1
- items:
- - const: allwinner,sun8i-h3-sram-c1
- - const: allwinner,sun4i-a10-sram-c1
+ - const: allwinner,sun8i-h3-sram-c1
+ - const: allwinner,sun4i-a10-sram-c1
- items:
- - const: allwinner,sun50i-a64-sram-c1
- - const: allwinner,sun4i-a10-sram-c1
+ - const: allwinner,sun50i-a64-sram-c1
+ - const: allwinner,sun4i-a10-sram-c1
- items:
- - const: allwinner,sun50i-h5-sram-c1
- - const: allwinner,sun4i-a10-sram-c1
+ - const: allwinner,sun50i-h5-sram-c1
+ - const: allwinner,sun4i-a10-sram-c1
- items:
- - const: allwinner,sun50i-h6-sram-c1
- - const: allwinner,sun4i-a10-sram-c1
+ - const: allwinner,sun50i-h6-sram-c1
+ - const: allwinner,sun4i-a10-sram-c1
- items:
- - const: allwinner,sun5i-a13-sram-d
- - const: allwinner,sun4i-a10-sram-d
+ - const: allwinner,sun5i-a13-sram-d
+ - const: allwinner,sun4i-a10-sram-d
- items:
- - const: allwinner,sun7i-a20-sram-d
- - const: allwinner,sun4i-a10-sram-d
+ - const: allwinner,sun7i-a20-sram-d
+ - const: allwinner,sun4i-a10-sram-d
- items:
- - const: allwinner,suniv-f1c100s-sram-d
- - const: allwinner,sun4i-a10-sram-d
+ - const: allwinner,suniv-f1c100s-sram-d
+ - const: allwinner,sun4i-a10-sram-d
- items:
- - const: allwinner,sun50i-h6-sram-c
- - const: allwinner,sun50i-a64-sram-c
+ - const: allwinner,sun50i-h6-sram-c
+ - const: allwinner,sun50i-a64-sram-c
required:
- "#address-cells"
diff --git a/Documentation/devicetree/bindings/sram/rockchip-pmu-sram.txt b/Documentation/devicetree/bindings/sram/rockchip-pmu-sram.txt
deleted file mode 100644
index 6b42fda306ff..000000000000
--- a/Documentation/devicetree/bindings/sram/rockchip-pmu-sram.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-Rockchip SRAM for pmu:
-------------------------------
-
-The sram of pmu is used to store the function of resume from maskrom(the 1st
-level loader). This is a common use of the "pmu-sram" because it keeps power
-even in low power states in the system.
-
-Required node properties:
-- compatible : should be "rockchip,rk3288-pmu-sram"
-- reg : physical base address and the size of the registers window
-
-Example:
- sram@ff720000 {
- compatible = "rockchip,rk3288-pmu-sram", "mmio-sram";
- reg = <0xff720000 0x1000>;
- };
diff --git a/Documentation/devicetree/bindings/sram/sram.yaml b/Documentation/devicetree/bindings/sram/sram.yaml
index 7b83cc6c9bfa..19d116ff9ddc 100644
--- a/Documentation/devicetree/bindings/sram/sram.yaml
+++ b/Documentation/devicetree/bindings/sram/sram.yaml
@@ -29,6 +29,7 @@ properties:
enum:
- mmio-sram
- atmel,sama5d2-securam
+ - rockchip,rk3288-pmu-sram
reg:
maxItems: 1
@@ -73,6 +74,8 @@ patternProperties:
- allwinner,sun50i-a64-sram-c
- amlogic,meson8-smp-sram
- amlogic,meson8b-smp-sram
+ - amlogic,meson-gxbb-scp-shmem
+ - amlogic,meson-axg-scp-shmem
- renesas,smp-sram
- rockchip,rk3066-smp-sram
- samsung,exynos4210-sysram
@@ -118,9 +121,18 @@ patternProperties:
required:
- compatible
- reg
- - "#address-cells"
- - "#size-cells"
- - ranges
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3288-pmu-sram
+
+else:
+ required:
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
additionalProperties: false
@@ -224,6 +236,16 @@ examples:
};
- |
+ // Rockchip's rk3288 SoC uses the sram of pmu to store the function of
+ // resume from maskrom(the 1st level loader). This is a common use of
+ // the "pmu-sram" because it keeps power even in low power states
+ // in the system.
+ sram@ff720000 {
+ compatible = "rockchip,rk3288-pmu-sram", "mmio-sram";
+ reg = <0xff720000 0x1000>;
+ };
+
+ - |
// Allwinner's A80 SoC uses part of the secure sram for hotplugging of the
// primary core (cpu0). Once the core gets powered up it checks if a magic
// value is set at a specific location. If it is then the BROM will jump
diff --git a/Documentation/devicetree/bindings/submitting-patches.rst b/Documentation/devicetree/bindings/submitting-patches.rst
new file mode 100644
index 000000000000..0aab2b3f16d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/submitting-patches.rst
@@ -0,0 +1,91 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================================
+Submitting devicetree (DT) binding patches
+==========================================
+
+I. For patch submitters
+=======================
+
+ 0) Normal patch submission rules from Documentation/process/submitting-patches.rst
+ applies.
+
+ 1) The Documentation/ and include/dt-bindings/ portion of the patch should
+ be a separate patch. The preferred subject prefix for binding patches is::
+
+ "dt-bindings: <binding dir>: ..."
+
+ The 80 characters of the subject are precious. It is recommended to not
+ use "Documentation" or "doc" because that is implied. All bindings are
+ docs. Repeating "binding" again should also be avoided.
+
+ 2) DT binding files are written in DT schema format using json-schema
+ vocabulary and YAML file format. The DT binding files must pass validation
+ by running::
+
+ make dt_binding_check
+
+ See ../writing-schema.rst for more details about schema and tools setup.
+
+ 3) DT binding files should be dual licensed. The preferred license tag is
+ (GPL-2.0-only OR BSD-2-Clause).
+
+ 4) Submit the entire series to the devicetree mailinglist at
+
+ devicetree@vger.kernel.org
+
+ and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify
+ all of the DT maintainers.
+
+ 5) The Documentation/ portion of the patch should come in the series before
+ the code implementing the binding.
+
+ 6) Any compatible strings used in a chip or board DTS file must be
+ previously documented in the corresponding DT binding text file
+ in Documentation/devicetree/bindings. This rule applies even if
+ the Linux device driver does not yet match on the compatible
+ string. [ checkpatch will emit warnings if this step is not
+ followed as of commit bff5da4335256513497cc8c79f9a9d1665e09864
+ ("checkpatch: add DT compatible string documentation checks"). ]
+
+ 7) The wildcard "<chip>" may be used in compatible strings, as in
+ the following example:
+
+ - compatible: Must contain '"nvidia,<chip>-pcie",
+ "nvidia,tegra20-pcie"' where <chip> is tegra30, tegra132, ...
+
+ As in the above example, the known values of "<chip>" should be
+ documented if it is used.
+
+ 8) If a documented compatible string is not yet matched by the
+ driver, the documentation should also include a compatible
+ string that is matched by the driver (as in the "nvidia,tegra20-pcie"
+ example above).
+
+
+II. For kernel maintainers
+==========================
+
+ 1) If you aren't comfortable reviewing a given binding, reply to it and ask
+ the devicetree maintainers for guidance. This will help them prioritize
+ which ones to review and which ones are ok to let go.
+
+ 2) For driver (not subsystem) bindings: If you are comfortable with the
+ binding, and it hasn't received an Acked-by from the devicetree
+ maintainers after a few weeks, go ahead and take it.
+
+ Subsystem bindings (anything affecting more than a single device)
+ then getting a devicetree maintainer to review it is required.
+
+ 3) For a series going though multiple trees, the binding patch should be
+ kept with the driver using the binding.
+
+III. Notes
+==========
+
+ 0) Please see ...bindings/ABI.txt for details regarding devicetree ABI.
+
+ 1) This document is intended as a general familiarization with the process as
+ decided at the 2013 Kernel Summit. When in doubt, the current word of the
+ devicetree maintainers overrules this document. In that situation, a patch
+ updating this document would be appreciated.
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt
deleted file mode 100644
index 98bee6240b65..000000000000
--- a/Documentation/devicetree/bindings/submitting-patches.txt
+++ /dev/null
@@ -1,85 +0,0 @@
-
- Submitting devicetree (DT) binding patches
-
-I. For patch submitters
-
- 0) Normal patch submission rules from Documentation/process/submitting-patches.rst
- applies.
-
- 1) The Documentation/ and include/dt-bindings/ portion of the patch should
- be a separate patch. The preferred subject prefix for binding patches is:
-
- "dt-bindings: <binding dir>: ..."
-
- The 80 characters of the subject are precious. It is recommended to not
- use "Documentation" or "doc" because that is implied. All bindings are
- docs. Repeating "binding" again should also be avoided.
-
- 2) DT binding files are written in DT schema format using json-schema
- vocabulary and YAML file format. The DT binding files must pass validation
- by running:
-
- make dt_binding_check
-
- See ../writing-schema.rst for more details about schema and tools setup.
-
- 3) DT binding files should be dual licensed. The preferred license tag is
- (GPL-2.0-only OR BSD-2-Clause).
-
- 4) Submit the entire series to the devicetree mailinglist at
-
- devicetree@vger.kernel.org
-
- and Cc: the DT maintainers. Use scripts/get_maintainer.pl to identify
- all of the DT maintainers.
-
- 5) The Documentation/ portion of the patch should come in the series before
- the code implementing the binding.
-
- 6) Any compatible strings used in a chip or board DTS file must be
- previously documented in the corresponding DT binding text file
- in Documentation/devicetree/bindings. This rule applies even if
- the Linux device driver does not yet match on the compatible
- string. [ checkpatch will emit warnings if this step is not
- followed as of commit bff5da4335256513497cc8c79f9a9d1665e09864
- ("checkpatch: add DT compatible string documentation checks"). ]
-
- 7) The wildcard "<chip>" may be used in compatible strings, as in
- the following example:
-
- - compatible: Must contain '"nvidia,<chip>-pcie",
- "nvidia,tegra20-pcie"' where <chip> is tegra30, tegra132, ...
-
- As in the above example, the known values of "<chip>" should be
- documented if it is used.
-
- 8) If a documented compatible string is not yet matched by the
- driver, the documentation should also include a compatible
- string that is matched by the driver (as in the "nvidia,tegra20-pcie"
- example above).
-
-
-II. For kernel maintainers
-
- 1) If you aren't comfortable reviewing a given binding, reply to it and ask
- the devicetree maintainers for guidance. This will help them prioritize
- which ones to review and which ones are ok to let go.
-
- 2) For driver (not subsystem) bindings: If you are comfortable with the
- binding, and it hasn't received an Acked-by from the devicetree
- maintainers after a few weeks, go ahead and take it.
-
- Subsystem bindings (anything affecting more than a single device)
- then getting a devicetree maintainer to review it is required.
-
- 3) For a series going though multiple trees, the binding patch should be
- kept with the driver using the binding.
-
-III. Notes
-
- 0) Please see ...bindings/ABI.txt for details regarding devicetree ABI.
-
- 1) This document is intended as a general familiarization with the process as
- decided at the 2013 Kernel Summit. When in doubt, the current word of the
- devicetree maintainers overrules this document. In that situation, a patch
- updating this document would be appreciated.
diff --git a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
index e43ec50bda37..999c6b365f1d 100644
--- a/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/amlogic,thermal.yaml
@@ -13,11 +13,11 @@ description: Binding for Amlogic Thermal
properties:
compatible:
- items:
- - enum:
- - amlogic,g12a-cpu-thermal
- - amlogic,g12a-ddr-thermal
- - const: amlogic,g12a-thermal
+ items:
+ - enum:
+ - amlogic,g12a-cpu-thermal
+ - amlogic,g12a-ddr-thermal
+ - const: amlogic,g12a-thermal
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.txt b/Documentation/devicetree/bindings/thermal/imx-thermal.txt
deleted file mode 100644
index 823e4176eef8..000000000000
--- a/Documentation/devicetree/bindings/thermal/imx-thermal.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-* Temperature Monitor (TEMPMON) on Freescale i.MX SoCs
-
-Required properties:
-- compatible : must be one of following:
- - "fsl,imx6q-tempmon" for i.MX6Q,
- - "fsl,imx6sx-tempmon" for i.MX6SX,
- - "fsl,imx7d-tempmon" for i.MX7S/D.
-- interrupts : the interrupt output of the controller:
- i.MX6Q has one IRQ which will be triggered when temperature is higher than high threshold,
- i.MX6SX and i.MX7S/D have two more IRQs than i.MX6Q, one is IRQ_LOW and the other is IRQ_PANIC,
- when temperature is below than low threshold, IRQ_LOW will be triggered, when temperature
- is higher than panic threshold, system will auto reboot by SRC module.
-- fsl,tempmon : phandle pointer to system controller that contains TEMPMON
- control registers, e.g. ANATOP on imx6q.
-- nvmem-cells: A phandle to the calibration cells provided by ocotp.
-- nvmem-cell-names: Should be "calib", "temp_grade".
-
-Deprecated properties:
-- fsl,tempmon-data : phandle pointer to fuse controller that contains TEMPMON
- calibration data, e.g. OCOTP on imx6q. The details about calibration data
- can be found in SoC Reference Manual.
-
-Direct access to OCOTP via fsl,tempmon-data is incorrect on some newer chips
-because it does not handle OCOTP clock requirements.
-
-Optional properties:
-- clocks : thermal sensor's clock source.
-
-Example:
-ocotp: ocotp@21bc000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,imx6sx-ocotp", "syscon";
- reg = <0x021bc000 0x4000>;
- clocks = <&clks IMX6SX_CLK_OCOTP>;
-
- tempmon_calib: calib@38 {
- reg = <0x38 4>;
- };
-
- tempmon_temp_grade: temp-grade@20 {
- reg = <0x20 4>;
- };
-};
-
-tempmon: tempmon {
- compatible = "fsl,imx6sx-tempmon", "fsl,imx6q-tempmon";
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
- fsl,tempmon = <&anatop>;
- nvmem-cells = <&tempmon_calib>, <&tempmon_temp_grade>;
- nvmem-cell-names = "calib", "temp_grade";
- clocks = <&clks IMX6SX_CLK_PLL3_USB_OTG>;
-};
-
-Legacy method (Deprecated):
-tempmon {
- compatible = "fsl,imx6q-tempmon";
- fsl,tempmon = <&anatop>;
- fsl,tempmon-data = <&ocotp>;
- clocks = <&clks 172>;
-};
diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.yaml b/Documentation/devicetree/bindings/thermal/imx-thermal.yaml
new file mode 100644
index 000000000000..aedac1669998
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/imx-thermal.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/imx-thermal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX Thermal Binding
+
+maintainers:
+ - Shawn Guo <shawn.guo@linaro.org>
+ - Anson Huang <Anson.Huang@nxp.com>
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx6q-tempmon
+ - fsl,imx6sx-tempmon
+ - fsl,imx7d-tempmon
+
+ interrupts:
+ description: |
+ The interrupt output of the controller, i.MX6Q has IRQ_HIGH which
+ will be triggered when temperature is higher than high threshold,
+ i.MX6SX and i.MX7S/D have two more IRQs than i.MX6Q, one is IRQ_LOW
+ and the other is IRQ_PANIC, when temperature is lower than low
+ threshold, IRQ_LOW will be triggered, when temperature is higher
+ than panic threshold, IRQ_PANIC will be triggered, and system can
+ be configured to auto reboot by SRC module for IRQ_PANIC. IRQ_HIGH,
+ IRQ_LOW and IRQ_PANIC share same interrupt output of controller.
+ maxItems: 1
+
+ nvmem-cells:
+ items:
+ - description: Phandle to the calibration data provided by ocotp
+ - description: Phandle to the temperature grade provided by ocotp
+
+ nvmem-cell-names:
+ items:
+ - const: calib
+ - const: temp_grade
+
+ fsl,tempmon:
+ $ref: '/schemas/types.yaml#/definitions/phandle'
+ description: Phandle to anatop system controller node.
+
+ fsl,tempmon-data:
+ $ref: '/schemas/types.yaml#/definitions/phandle'
+ description: |
+ Deprecated property, phandle pointer to fuse controller that contains
+ TEMPMON calibration data, e.g. OCOTP on imx6q. The details about
+ calibration data can be found in SoC Reference Manual.
+ deprecated: true
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - interrupts
+ - fsl,tempmon
+ - nvmem-cells
+ - nvmem-cell-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx6sx-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ efuse@21bc000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,imx6sx-ocotp", "syscon";
+ reg = <0x021bc000 0x4000>;
+ clocks = <&clks IMX6SX_CLK_OCOTP>;
+
+ tempmon_calib: calib@38 {
+ reg = <0x38 4>;
+ };
+
+ tempmon_temp_grade: temp-grade@20 {
+ reg = <0x20 4>;
+ };
+ };
+
+ anatop@20c8000 {
+ compatible = "fsl,imx6q-anatop", "syscon", "simple-mfd";
+ reg = <0x020c8000 0x1000>;
+ interrupts = <0 49 IRQ_TYPE_LEVEL_HIGH>,
+ <0 54 IRQ_TYPE_LEVEL_HIGH>,
+ <0 127 IRQ_TYPE_LEVEL_HIGH>;
+
+ tempmon {
+ compatible = "fsl,imx6sx-tempmon";
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,tempmon = <&anatop>;
+ nvmem-cells = <&tempmon_calib>, <&tempmon_temp_grade>;
+ nvmem-cell-names = "calib", "temp_grade";
+ clocks = <&clks IMX6SX_CLK_PLL3_USB_OTG>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/thermal/imx8mm-thermal.txt b/Documentation/devicetree/bindings/thermal/imx8mm-thermal.txt
deleted file mode 100644
index 3629d3c7e76a..000000000000
--- a/Documentation/devicetree/bindings/thermal/imx8mm-thermal.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-* Thermal Monitoring Unit (TMU) on Freescale i.MX8MM SoC
-
-Required properties:
-- compatible : Must be "fsl,imx8mm-tmu" or "fsl,imx8mp-tmu".
-- reg : Address range of TMU registers.
-- clocks : TMU's clock source.
-- #thermal-sensor-cells : Should be 0 or 1. See ./thermal.txt for a description.
-
-Example:
-tmu: tmu@30260000 {
- compatible = "fsl,imx8mm-tmu";
- reg = <0x30260000 0x10000>;
- clocks = <&clk IMX8MM_CLK_TMU_ROOT>;
- #thermal-sensor-cells = <0>;
-};
diff --git a/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml b/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml
new file mode 100644
index 000000000000..38852877b8e3
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/imx8mm-thermal.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/imx8mm-thermal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP i.MX8M Mini Thermal Binding
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+description: |
+ i.MX8MM has TMU IP to allow temperature measurement, there are
+ currently two distinct major versions of the IP that is supported
+ by a single driver. The IP versions are named v1 and v2, v1 is
+ for i.MX8MM which has ONLY 1 sensor, v2 is for i.MX8MP which has
+ 2 sensors.
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx8mm-tmu
+ - fsl,imx8mp-tmu
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ "#thermal-sensor-cells":
+ description: |
+ Number of cells required to uniquely identify the thermal
+ sensors, 0 for ONLY one sensor and 1 for multiple sensors.
+ enum:
+ - 0
+ - 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#thermal-sensor-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8mm-clock.h>
+
+ thermal-sensor@30260000 {
+ compatible = "fsl,imx8mm-tmu";
+ reg = <0x30260000 0x10000>;
+ clocks = <&clk IMX8MM_CLK_TMU_ROOT>;
+ #thermal-sensor-cells = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
index 2ddd39d96766..d7be931b42d2 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
@@ -73,12 +73,11 @@ properties:
- const: calib_sel
"#qcom,sensors":
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - minimum: 1
- - maximum: 16
description:
Number of sensors enabled on this platform
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 16
"#thermal-sensor-cells":
const: 1
diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
deleted file mode 100644
index 2993fa720195..000000000000
--- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-* DT bindings for Renesas R-Car Gen3 Thermal Sensor driver
-
-On R-Car Gen3 SoCs, the thermal sensor controllers (TSC) control the thermal
-sensors (THS) which are the analog circuits for measuring temperature (Tj)
-inside the LSI.
-
-Required properties:
-- compatible : "renesas,<soctype>-thermal",
- Examples with soctypes are:
- - "renesas,r8a774a1-thermal" (RZ/G2M)
- - "renesas,r8a774b1-thermal" (RZ/G2N)
- - "renesas,r8a7795-thermal" (R-Car H3)
- - "renesas,r8a7796-thermal" (R-Car M3-W)
- - "renesas,r8a77961-thermal" (R-Car M3-W+)
- - "renesas,r8a77965-thermal" (R-Car M3-N)
- - "renesas,r8a77980-thermal" (R-Car V3H)
-- reg : Address ranges of the thermal registers. Each sensor
- needs one address range. Sorting must be done in
- increasing order according to datasheet, i.e.
- TSC1, TSC2, ...
-- clocks : Must contain a reference to the functional clock.
-- #thermal-sensor-cells : must be <1>.
-
-Optional properties:
-
-- interrupts : interrupts routed to the TSC (must be 3).
-- power-domain : Must contain a reference to the power domain. This
- property is mandatory if the thermal sensor instance
- is part of a controllable power domain.
-
-Example:
-
- tsc: thermal@e6198000 {
- compatible = "renesas,r8a7795-thermal";
- reg = <0 0xe6198000 0 0x100>,
- <0 0xe61a0000 0 0x100>,
- <0 0xe61a8000 0 0x100>;
- interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 522>;
- power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
- #thermal-sensor-cells = <1>;
- };
-
- thermal-zones {
- sensor_thermal1: sensor-thermal1 {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
- thermal-sensors = <&tsc 0>;
-
- trips {
- sensor1_crit: sensor1-crit {
- temperature = <90000>;
- hysteresis = <2000>;
- type = "critical";
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
new file mode 100644
index 000000000000..b1a55ae497de
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2020 Renesas Electronics Corp.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/rcar-gen3-thermal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas R-Car Gen3 Thermal Sensor
+
+description:
+ On R-Car Gen3 SoCs, the thermal sensor controllers (TSC) control the thermal
+ sensors (THS) which are the analog circuits for measuring temperature (Tj)
+ inside the LSI.
+
+maintainers:
+ - Niklas Söderlund <niklas.soderlund@ragnatech.se>
+
+properties:
+ compatible:
+ enum:
+ - renesas,r8a774a1-thermal # RZ/G2M
+ - renesas,r8a774b1-thermal # RZ/G2N
+ - renesas,r8a7795-thermal # R-Car H3
+ - renesas,r8a7796-thermal # R-Car M3-W
+ - renesas,r8a77961-thermal # R-Car M3-W+
+ - renesas,r8a77965-thermal # R-Car M3-N
+ - renesas,r8a77980-thermal # R-Car V3H
+ reg:
+ minItems: 2
+ maxItems: 3
+ items:
+ - description: TSC1 registers
+ - description: TSC2 registers
+ - description: TSC3 registers
+
+ interrupts:
+ items:
+ - description: TEMP1 interrupt
+ - description: TEMP2 interrupt
+ - description: TEMP3 interrupt
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ "#thermal-sensor-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - power-domains
+ - resets
+ - "#thermal-sensor-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7795-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7795-sysc.h>
+
+ tsc: thermal@e6198000 {
+ compatible = "renesas,r8a7795-thermal";
+ reg = <0xe6198000 0x100>,
+ <0xe61a0000 0x100>,
+ <0xe61a8000 0x100>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 522>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 522>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ thermal-zones {
+ sensor_thermal: sensor-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 0>;
+
+ trips {
+ sensor1_crit: sensor1-crit {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
index d2f4f1b063ac..0994693d240f 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
@@ -20,6 +20,7 @@ properties:
- const: renesas,rcar-thermal # Generic without thermal-zone
- items:
- enum:
+ - renesas,thermal-r8a7742 # RZ/G1H
- renesas,thermal-r8a7743 # RZ/G1M
- renesas,thermal-r8a7744 # RZ/G1N
- const: renesas,rcar-gen2-thermal # Generic thermal-zone
@@ -94,8 +95,8 @@ examples:
thermal@e61f0000 {
compatible = "renesas,thermal-r8a73a4", "renesas,rcar-thermal";
- reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>,
- <0 0xe61f0200 0 0x38>, <0 0xe61f0300 0 0x38>;
+ reg = <0xe61f0000 0x14>, <0xe61f0100 0x38>,
+ <0xe61f0200 0x38>, <0xe61f0300 0x38>;
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp5_clks R8A73A4_CLK_THERMAL>;
power-domains = <&pd_c5>;
@@ -111,7 +112,7 @@ examples:
compatible = "renesas,thermal-r8a7790",
"renesas,rcar-gen2-thermal",
"renesas,rcar-thermal";
- reg = <0 0xe61f0000 0 0x10>, <0 0xe61f0100 0 0x38>;
+ reg = <0xe61f0000 0x10>, <0xe61f0100 0x38>;
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 522>;
power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
diff --git a/Documentation/devicetree/bindings/thermal/socionext,uniphier-thermal.yaml b/Documentation/devicetree/bindings/thermal/socionext,uniphier-thermal.yaml
new file mode 100644
index 000000000000..bb9594bb2cf1
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/socionext,uniphier-thermal.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/socionext,uniphier-thermal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier thermal monitor
+
+description: |
+ This describes the devicetree bindings for thermal monitor supported by
+ PVT(Process, Voltage and Temperature) monitoring unit implemented on
+ Socionext UniPhier SoCs.
+
+maintainers:
+ - Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+
+properties:
+ compatible:
+ enum:
+ - socionext,uniphier-pxs2-thermal
+ - socionext,uniphier-ld20-thermal
+ - socionext,uniphier-pxs3-thermal
+
+ interrupts:
+ maxItems: 1
+
+ "#thermal-sensor-cells":
+ const: 0
+
+ socionext,tmod-calibration:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - maxItems: 2
+ description:
+ A pair of calibrated values referred from PVT, in case that the values
+ aren't set on SoC, like a reference board.
+
+required:
+ - compatible
+ - interrupts
+ - "#thermal-sensor-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ // The UniPhier thermal should be a subnode of a "syscon" compatible node.
+
+ sysctrl@61840000 {
+ compatible = "socionext,uniphier-ld20-sysctrl",
+ "simple-mfd", "syscon";
+ reg = <0x61840000 0x10000>;
+
+ pvtctl: thermal {
+ compatible = "socionext,uniphier-ld20-thermal";
+ interrupts = <0 3 1>;
+ #thermal-sensor-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml b/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml
index 058c4cc06ba6..af2ff930646a 100644
--- a/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/sprd-thermal.yaml
@@ -83,7 +83,7 @@ examples:
- |
ap_thm0: thermal@32200000 {
compatible = "sprd,ums512-thermal";
- reg = <0 0x32200000 0 0x10000>;
+ reg = <0x32200000 0x10000>;
clock-names = "enable";
clocks = <&aonapb_gate 32>;
#thermal-sensor-cells = <1>;
diff --git a/Documentation/devicetree/bindings/thermal/uniphier-thermal.txt b/Documentation/devicetree/bindings/thermal/uniphier-thermal.txt
deleted file mode 100644
index ceb92a95727a..000000000000
--- a/Documentation/devicetree/bindings/thermal/uniphier-thermal.txt
+++ /dev/null
@@ -1,65 +0,0 @@
-* UniPhier Thermal bindings
-
-This describes the devicetree bindings for thermal monitor supported by
-PVT(Process, Voltage and Temperature) monitoring unit implemented on Socionext
-UniPhier SoCs.
-
-Required properties:
-- compatible :
- - "socionext,uniphier-pxs2-thermal" : For UniPhier PXs2 SoC
- - "socionext,uniphier-ld20-thermal" : For UniPhier LD20 SoC
- - "socionext,uniphier-pxs3-thermal" : For UniPhier PXs3 SoC
-- interrupts : IRQ for the temperature alarm
-- #thermal-sensor-cells : Should be 0. See ./thermal.txt for details.
-
-Optional properties:
-- socionext,tmod-calibration: A pair of calibrated values referred from PVT,
- in case that the values aren't set on SoC,
- like a reference board.
-
-Example:
-
- sysctrl@61840000 {
- compatible = "socionext,uniphier-ld20-sysctrl",
- "simple-mfd", "syscon";
- reg = <0x61840000 0x10000>;
- ...
- pvtctl: pvtctl {
- compatible = "socionext,uniphier-ld20-thermal";
- interrupts = <0 3 1>;
- #thermal-sensor-cells = <0>;
- };
- ...
- };
-
- thermal-zones {
- cpu_thermal {
- polling-delay-passive = <250>; /* 250ms */
- polling-delay = <1000>; /* 1000ms */
- thermal-sensors = <&pvtctl>;
-
- trips {
- cpu_crit: cpu_crit {
- temperature = <110000>; /* 110C */
- hysteresis = <2000>;
- type = "critical";
- };
- cpu_alert: cpu_alert {
- temperature = <100000>; /* 100C */
- hysteresis = <2000>;
- type = "passive";
- };
- };
-
- cooling-maps {
- map0 {
- trip = <&cpu_alert>;
- cooling-device = <&cpu0 (-1) (-1)>;
- };
- map1 {
- trip = <&cpu_alert>;
- cooling-device = <&cpu2 (-1) (-1)>;
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml b/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml
index fa255672e8e5..2c75105c1398 100644
--- a/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer.yaml
@@ -28,10 +28,10 @@ properties:
- arm,armv7-timer
- items:
- enum:
- - arm,armv7-timer
+ - arm,armv7-timer
- items:
- enum:
- - arm,armv8-timer
+ - arm,armv8-timer
interrupts:
items:
@@ -51,6 +51,12 @@ properties:
description: If present, the timer is powered through an always-on power
domain, therefore it never loses context.
+ allwinner,erratum-unknown1:
+ type: boolean
+ description: Indicates the presence of an erratum found in Allwinner SoCs,
+ where reading certain values from the counter is unreliable. This also
+ affects writes to the tval register, due to the implicit counter read.
+
fsl,erratum-a008585:
type: boolean
description: Indicates the presence of QorIQ erratum A-008585, which says
diff --git a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
index 582bbef62b95..d83a1f97f911 100644
--- a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
@@ -20,7 +20,7 @@ properties:
compatible:
items:
- enum:
- - arm,armv7-timer-mem
+ - arm,armv7-timer-mem
reg:
maxItems: 1
@@ -65,10 +65,9 @@ patternProperties:
description: A timer node has up to 8 frame sub-nodes, each with the following properties.
properties:
frame-number:
- allOf:
- - $ref: "/schemas/types.yaml#/definitions/uint32"
- - minimum: 0
- maximum: 7
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 7
interrupts:
minItems: 1
@@ -77,7 +76,7 @@ patternProperties:
- description: physical timer irq
- description: virtual timer irq
- reg :
+ reg:
minItems: 1
maxItems: 2
items:
diff --git a/Documentation/devicetree/bindings/timer/cadence,ttc-timer.txt b/Documentation/devicetree/bindings/timer/cadence,ttc-timer.txt
deleted file mode 100644
index eeee6cd51e5c..000000000000
--- a/Documentation/devicetree/bindings/timer/cadence,ttc-timer.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Cadence TTC - Triple Timer Counter
-
-Required properties:
-- compatible : Should be "cdns,ttc".
-- reg : Specifies base physical address and size of the registers.
-- interrupts : A list of 3 interrupts; one per timer channel.
-- clocks: phandle to the source clock
-
-Optional properties:
-- timer-width: Bit width of the timer, necessary if not 16.
-
-Example:
-
-ttc0: ttc0@f8001000 {
- interrupt-parent = <&intc>;
- interrupts = < 0 10 4 0 11 4 0 12 4 >;
- compatible = "cdns,ttc";
- reg = <0xF8001000 0x1000>;
- clocks = <&cpu_clk 3>;
- timer-width = <32>;
-};
diff --git a/Documentation/devicetree/bindings/timer/cdns,ttc.yaml b/Documentation/devicetree/bindings/timer/cdns,ttc.yaml
new file mode 100644
index 000000000000..c532b60b9c63
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/cdns,ttc.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/cdns,ttc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence TTC - Triple Timer Counter
+
+maintainers:
+ - Michal Simek <michal.simek@xilinx.com>
+
+properties:
+ compatible:
+ const: cdns,ttc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 3
+ maxItems: 3
+ description: |
+ A list of 3 interrupts; one per timer channel.
+
+ clocks:
+ maxItems: 1
+
+ timer-width:
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ description: |
+ Bit width of the timer, necessary if not 16.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ ttc0: ttc0@f8001000 {
+ interrupt-parent = <&intc>;
+ interrupts = <0 10 4>, <0 11 4>, <0 12 4>;
+ compatible = "cdns,ttc";
+ reg = <0xF8001000 0x1000>;
+ clocks = <&cpu_clk 3>;
+ timer-width = <32>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/fsl,imxgpt.txt b/Documentation/devicetree/bindings/timer/fsl,imxgpt.txt
deleted file mode 100644
index 5d8fd5b52598..000000000000
--- a/Documentation/devicetree/bindings/timer/fsl,imxgpt.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-Freescale i.MX General Purpose Timer (GPT)
-
-Required properties:
-
-- compatible : should be one of following:
- for i.MX1:
- - "fsl,imx1-gpt";
- for i.MX21:
- - "fsl,imx21-gpt";
- for i.MX27:
- - "fsl,imx27-gpt", "fsl,imx21-gpt";
- for i.MX31:
- - "fsl,imx31-gpt";
- for i.MX25:
- - "fsl,imx25-gpt", "fsl,imx31-gpt";
- for i.MX50:
- - "fsl,imx50-gpt", "fsl,imx31-gpt";
- for i.MX51:
- - "fsl,imx51-gpt", "fsl,imx31-gpt";
- for i.MX53:
- - "fsl,imx53-gpt", "fsl,imx31-gpt";
- for i.MX6Q:
- - "fsl,imx6q-gpt", "fsl,imx31-gpt";
- for i.MX6DL:
- - "fsl,imx6dl-gpt";
- for i.MX6SL:
- - "fsl,imx6sl-gpt", "fsl,imx6dl-gpt";
- for i.MX6SX:
- - "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
-- reg : specifies base physical address and size of the registers.
-- interrupts : should be the gpt interrupt.
-- clocks : the clocks provided by the SoC to drive the timer, must contain
- an entry for each entry in clock-names.
-- clock-names : must include "ipg" entry first, then "per" entry.
-
-Example:
-
-gpt1: timer@10003000 {
- compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
- reg = <0x10003000 0x1000>;
- interrupts = <26>;
- clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>,
- <&clks IMX27_CLK_PER1_GATE>;
- clock-names = "ipg", "per";
-};
diff --git a/Documentation/devicetree/bindings/timer/fsl,imxgpt.yaml b/Documentation/devicetree/bindings/timer/fsl,imxgpt.yaml
new file mode 100644
index 000000000000..883f7f46650b
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/fsl,imxgpt.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/fsl,imxgpt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX General Purpose Timer (GPT)
+
+maintainers:
+ - Sascha Hauer <s.hauer@pengutronix.de>
+
+properties:
+ compatible:
+ oneOf:
+ - const: fsl,imx1-gpt
+ - const: fsl,imx21-gpt
+ - items:
+ - const: fsl,imx27-gpt
+ - const: fsl,imx21-gpt
+ - const: fsl,imx31-gpt
+ - items:
+ - enum:
+ - fsl,imx25-gpt
+ - fsl,imx50-gpt
+ - fsl,imx51-gpt
+ - fsl,imx53-gpt
+ - fsl,imx6q-gpt
+ - const: fsl,imx31-gpt
+ - const: fsl,imx6dl-gpt
+ - items:
+ - enum:
+ - fsl,imx6sl-gpt
+ - fsl,imx6sx-gpt
+ - const: fsl,imx6dl-gpt
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: SoC GPT ipg clock
+ - description: SoC GPT per clock
+
+ clock-names:
+ items:
+ - const: ipg
+ - const: per
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx27-clock.h>
+
+ timer@10003000 {
+ compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
+ reg = <0x10003000 0x1000>;
+ interrupts = <26>;
+ clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>,
+ <&clks IMX27_CLK_PER1_GATE>;
+ clock-names = "ipg", "per";
+ };
diff --git a/Documentation/devicetree/bindings/timer/ingenic,tcu.txt b/Documentation/devicetree/bindings/timer/ingenic,tcu.txt
deleted file mode 100644
index 91f704951845..000000000000
--- a/Documentation/devicetree/bindings/timer/ingenic,tcu.txt
+++ /dev/null
@@ -1,138 +0,0 @@
-Ingenic JZ47xx SoCs Timer/Counter Unit devicetree bindings
-==========================================================
-
-For a description of the TCU hardware and drivers, have a look at
-Documentation/mips/ingenic-tcu.rst.
-
-Required properties:
-
-- compatible: Must be one of:
- * ingenic,jz4740-tcu
- * ingenic,jz4725b-tcu
- * ingenic,jz4770-tcu
- * ingenic,x1000-tcu
- followed by "simple-mfd".
-- reg: Should be the offset/length value corresponding to the TCU registers
-- clocks: List of phandle & clock specifiers for clocks external to the TCU.
- The "pclk", "rtc" and "ext" clocks should be provided. The "tcu" clock
- should be provided if the SoC has it.
-- clock-names: List of name strings for the external clocks.
-- #clock-cells: Should be <1>;
- Clock consumers specify this argument to identify a clock. The valid values
- may be found in <dt-bindings/clock/ingenic,tcu.h>.
-- interrupt-controller : Identifies the node as an interrupt controller
-- #interrupt-cells : Specifies the number of cells needed to encode an
- interrupt source. The value should be 1.
-- interrupts : Specifies the interrupt the controller is connected to.
-
-Optional properties:
-
-- ingenic,pwm-channels-mask: Bitmask of TCU channels reserved for PWM use.
- Default value is 0xfc.
-
-
-Children nodes
-==========================================================
-
-
-PWM node:
----------
-
-Required properties:
-
-- compatible: Must be one of:
- * ingenic,jz4740-pwm
- * ingenic,jz4725b-pwm
-- #pwm-cells: Should be 3. See ../pwm/pwm.yaml for a description of the cell
- format.
-- clocks: List of phandle & clock specifiers for the TCU clocks.
-- clock-names: List of name strings for the TCU clocks.
-
-
-Watchdog node:
---------------
-
-Required properties:
-
-- compatible: Must be "ingenic,jz4740-watchdog"
-- clocks: phandle to the WDT clock
-- clock-names: should be "wdt"
-
-
-OS Timer node:
----------
-
-Required properties:
-
-- compatible: Must be one of:
- * ingenic,jz4725b-ost
- * ingenic,jz4770-ost
-- clocks: phandle to the OST clock
-- clock-names: should be "ost"
-- interrupts : Specifies the interrupt the OST is connected to.
-
-
-Example
-==========================================================
-
-#include <dt-bindings/clock/jz4770-cgu.h>
-#include <dt-bindings/clock/ingenic,tcu.h>
-
-/ {
- tcu: timer@10002000 {
- compatible = "ingenic,jz4770-tcu", "simple-mfd";
- reg = <0x10002000 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x10002000 0x1000>;
-
- #clock-cells = <1>;
-
- clocks = <&cgu JZ4770_CLK_RTC
- &cgu JZ4770_CLK_EXT
- &cgu JZ4770_CLK_PCLK>;
- clock-names = "rtc", "ext", "pclk";
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- interrupt-parent = <&intc>;
- interrupts = <27 26 25>;
-
- watchdog: watchdog@0 {
- compatible = "ingenic,jz4740-watchdog";
- reg = <0x0 0xc>;
-
- clocks = <&tcu TCU_CLK_WDT>;
- clock-names = "wdt";
- };
-
- pwm: pwm@40 {
- compatible = "ingenic,jz4740-pwm";
- reg = <0x40 0x80>;
-
- #pwm-cells = <3>;
-
- clocks = <&tcu TCU_CLK_TIMER0
- &tcu TCU_CLK_TIMER1
- &tcu TCU_CLK_TIMER2
- &tcu TCU_CLK_TIMER3
- &tcu TCU_CLK_TIMER4
- &tcu TCU_CLK_TIMER5
- &tcu TCU_CLK_TIMER6
- &tcu TCU_CLK_TIMER7>;
- clock-names = "timer0", "timer1", "timer2", "timer3",
- "timer4", "timer5", "timer6", "timer7";
- };
-
- ost: timer@e0 {
- compatible = "ingenic,jz4770-ost";
- reg = <0xe0 0x20>;
-
- clocks = <&tcu TCU_CLK_OST>;
- clock-names = "ost";
-
- interrupts = <15>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/timer/ingenic,tcu.yaml b/Documentation/devicetree/bindings/timer/ingenic,tcu.yaml
new file mode 100644
index 000000000000..03893e6a2f57
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/ingenic,tcu.yaml
@@ -0,0 +1,280 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/ingenic,tcu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ingenic SoCs Timer/Counter Unit (TCU) devicetree bindings
+
+description: |
+ For a description of the TCU hardware and drivers, have a look at
+ Documentation/mips/ingenic-tcu.rst.
+
+maintainers:
+ - Paul Cercueil <paul@crapouillou.net>
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ingenic,jz4740-tcu
+ - ingenic,jz4725b-tcu
+ - ingenic,jz4770-tcu
+ - ingenic,jz4780-tcu
+ - ingenic,x1000-tcu
+ required:
+ - compatible
+
+properties:
+ $nodename:
+ pattern: "^timer@[0-9a-f]+$"
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ "#clock-cells":
+ const: 1
+
+ "#interrupt-cells":
+ const: 1
+
+ interrupt-controller: true
+
+ ranges: true
+
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - ingenic,jz4740-tcu
+ - ingenic,jz4725b-tcu
+ - ingenic,jz4770-tcu
+ - ingenic,x1000-tcu
+ - const: simple-mfd
+ - items:
+ - const: ingenic,jz4780-tcu
+ - const: ingenic,jz4770-tcu
+ - const: simple-mfd
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: RTC clock
+ - description: EXT clock
+ - description: PCLK clock
+ - description: TCU clock
+ minItems: 3
+
+ clock-names:
+ items:
+ - const: rtc
+ - const: ext
+ - const: pclk
+ - const: tcu
+ minItems: 3
+
+ interrupts:
+ items:
+ - description: TCU0 interrupt
+ - description: TCU1 interrupt
+ - description: TCU2 interrupt
+ minItems: 1
+
+ assigned-clocks:
+ minItems: 1
+ maxItems: 8
+
+ assigned-clock-parents:
+ minItems: 1
+ maxItems: 8
+
+ assigned-clock-rates:
+ minItems: 1
+ maxItems: 8
+
+ ingenic,pwm-channels-mask:
+ description: Bitmask of TCU channels reserved for PWM use.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0x00
+ maximum: 0xff
+ default: 0xfc
+
+patternProperties:
+ "^watchdog@[a-f0-9]+$":
+ type: object
+ $ref: ../watchdog/watchdog.yaml#
+ properties:
+ compatible:
+ oneOf:
+ - enum:
+ - ingenic,jz4740-watchdog
+ - ingenic,jz4780-watchdog
+ - items:
+ - const: ingenic,jz4770-watchdog
+ - const: ingenic,jz4740-watchdog
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: wdt
+
+ required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+ "^pwm@[a-f0-9]+$":
+ type: object
+ $ref: ../pwm/pwm.yaml#
+ properties:
+ compatible:
+ oneOf:
+ - enum:
+ - ingenic,jz4740-pwm
+ - items:
+ - enum:
+ - ingenic,jz4770-pwm
+ - ingenic,jz4780-pwm
+ - const: ingenic,jz4740-pwm
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 6
+ maxItems: 8
+
+ clock-names:
+ items:
+ - const: timer0
+ - const: timer1
+ - const: timer2
+ - const: timer3
+ - const: timer4
+ - const: timer5
+ - const: timer6
+ - const: timer7
+ minItems: 6
+
+ required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+ "^timer@[a-f0-9]+$":
+ type: object
+ properties:
+ compatible:
+ oneOf:
+ - enum:
+ - ingenic,jz4725b-ost
+ - ingenic,jz4770-ost
+ - items:
+ - const: ingenic,jz4780-ost
+ - const: ingenic,jz4770-ost
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: ost
+
+ interrupts:
+ maxItems: 1
+
+ required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+ additionalProperties: false
+
+required:
+ - "#clock-cells"
+ - "#interrupt-cells"
+ - interrupt-controller
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/jz4770-cgu.h>
+ #include <dt-bindings/clock/ingenic,tcu.h>
+ tcu: timer@10002000 {
+ compatible = "ingenic,jz4770-tcu", "simple-mfd";
+ reg = <0x10002000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x10002000 0x1000>;
+
+ #clock-cells = <1>;
+
+ clocks = <&cgu JZ4770_CLK_RTC>,
+ <&cgu JZ4770_CLK_EXT>,
+ <&cgu JZ4770_CLK_PCLK>;
+ clock-names = "rtc", "ext", "pclk";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <27 26 25>;
+
+ watchdog: watchdog@0 {
+ compatible = "ingenic,jz4770-watchdog", "ingenic,jz4740-watchdog";
+ reg = <0x0 0xc>;
+
+ clocks = <&tcu TCU_CLK_WDT>;
+ clock-names = "wdt";
+ };
+
+ pwm: pwm@40 {
+ compatible = "ingenic,jz4770-pwm", "ingenic,jz4740-pwm";
+ reg = <0x40 0x80>;
+
+ #pwm-cells = <3>;
+
+ clocks = <&tcu TCU_CLK_TIMER0>,
+ <&tcu TCU_CLK_TIMER1>,
+ <&tcu TCU_CLK_TIMER2>,
+ <&tcu TCU_CLK_TIMER3>,
+ <&tcu TCU_CLK_TIMER4>,
+ <&tcu TCU_CLK_TIMER5>,
+ <&tcu TCU_CLK_TIMER6>,
+ <&tcu TCU_CLK_TIMER7>;
+ clock-names = "timer0", "timer1", "timer2", "timer3",
+ "timer4", "timer5", "timer6", "timer7";
+ };
+
+ ost: timer@e0 {
+ compatible = "ingenic,jz4770-ost";
+ reg = <0xe0 0x20>;
+
+ clocks = <&tcu TCU_CLK_OST>;
+ clock-names = "ost";
+
+ interrupts = <15>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.txt b/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.txt
deleted file mode 100644
index d57659996d62..000000000000
--- a/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-NXP System Counter Module(sys_ctr)
-
-The system counter(sys_ctr) is a programmable system counter which provides
-a shared time base to Cortex A15, A7, A53, A73, etc. it is intended for use in
-applications where the counter is always powered and support multiple,
-unrelated clocks. The compare frame inside can be used for timer purpose.
-
-Required properties:
-
-- compatible : should be "nxp,sysctr-timer"
-- reg : Specifies the base physical address and size of the comapre
- frame and the counter control, read & compare.
-- interrupts : should be the first compare frames' interrupt
-- clocks : Specifies the counter clock.
-- clock-names: Specifies the clock's name of this module
-
-Example:
-
- system_counter: timer@306a0000 {
- compatible = "nxp,sysctr-timer";
- reg = <0x306a0000 0x20000>;/* system-counter-rd & compare */
- clocks = <&clk_8m>;
- clock-names = "per";
- interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
- };
diff --git a/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml b/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml
new file mode 100644
index 000000000000..830211c55b4a
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/nxp,sysctr-timer.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/nxp,sysctr-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP System Counter Module(sys_ctr)
+
+maintainers:
+ - Bai Ping <ping.bai@nxp.com>
+
+description: |
+ The system counter(sys_ctr) is a programmable system counter
+ which provides a shared time base to Cortex A15, A7, A53, A73,
+ etc. it is intended for use in applications where the counter
+ is always powered and support multiple, unrelated clocks. The
+ compare frame inside can be used for timer purpose.
+
+properties:
+ compatible:
+ const: nxp,sysctr-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: per
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ timer@306a0000 {
+ compatible = "nxp,sysctr-timer";
+ reg = <0x306a0000 0x20000>;
+ clocks = <&clk_8m>;
+ clock-names = "per";
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt
deleted file mode 100644
index f82087b220f4..000000000000
--- a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-NXP Low Power Timer/Pulse Width Modulation Module (TPM)
-
-The Timer/PWM Module (TPM) supports input capture, output compare,
-and the generation of PWM signals to control electric motor and power
-management applications. The counter, compare and capture registers
-are clocked by an asynchronous clock that can remain enabled in low
-power modes. TPM can support global counter bus where one TPM drives
-the counter bus for the others, provided bit width is the same.
-
-Required properties:
-
-- compatible : should be "fsl,imx7ulp-tpm"
-- reg : Specifies base physical address and size of the register sets
- for the clock event device and clock source device.
-- interrupts : Should be the clock event device interrupt.
-- clocks : The clocks provided by the SoC to drive the timer, must contain
- an entry for each entry in clock-names.
-- clock-names : Must include the following entries: "ipg" and "per".
-
-Example:
-tpm5: tpm@40260000 {
- compatible = "fsl,imx7ulp-tpm";
- reg = <0x40260000 0x1000>;
- interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7ULP_CLK_NIC1_BUS_DIV>,
- <&clks IMX7ULP_CLK_LPTPM5>;
- clock-names = "ipg", "per";
-};
diff --git a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.yaml b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.yaml
new file mode 100644
index 000000000000..edd9585f6726
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/nxp,tpm-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP Low Power Timer/Pulse Width Modulation Module (TPM)
+
+maintainers:
+ - Dong Aisheng <aisheng.dong@nxp.com>
+
+description: |
+ The Timer/PWM Module (TPM) supports input capture, output compare,
+ and the generation of PWM signals to control electric motor and power
+ management applications. The counter, compare and capture registers
+ are clocked by an asynchronous clock that can remain enabled in low
+ power modes. TPM can support global counter bus where one TPM drives
+ the counter bus for the others, provided bit width is the same.
+
+properties:
+ compatible:
+ const: fsl,imx7ulp-tpm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: SoC TPM ipg clock
+ - description: SoC TPM per clock
+
+ clock-names:
+ items:
+ - const: ipg
+ - const: per
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx7ulp-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ timer@40260000 {
+ compatible = "fsl,imx7ulp-tpm";
+ reg = <0x40260000 0x1000>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scg1 IMX7ULP_CLK_NIC1_BUS_DIV>,
+ <&pcc2 IMX7ULP_CLK_LPTPM5>;
+ clock-names = "ipg", "per";
+ };
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
deleted file mode 100644
index a747fabab7d3..000000000000
--- a/Documentation/devicetree/bindings/timer/renesas,cmt.txt
+++ /dev/null
@@ -1,110 +0,0 @@
-* Renesas R-Car Compare Match Timer (CMT)
-
-The CMT is a multi-channel 16/32/48-bit timer/counter with configurable clock
-inputs and programmable compare match.
-
-Channels share hardware resources but their counter and compare match value
-are independent. A particular CMT instance can implement only a subset of the
-channels supported by the CMT model. Channel indices represent the hardware
-position of the channel in the CMT and don't match the channel numbers in the
-datasheets.
-
-Required Properties:
-
- - compatible: must contain one or more of the following:
- - "renesas,r8a73a4-cmt0" for the 32-bit CMT0 device included in r8a73a4.
- - "renesas,r8a73a4-cmt1" for the 48-bit CMT1 device included in r8a73a4.
- - "renesas,r8a7740-cmt0" for the 32-bit CMT0 device included in r8a7740.
- - "renesas,r8a7740-cmt1" for the 48-bit CMT1 device included in r8a7740.
- - "renesas,r8a7740-cmt2" for the 32-bit CMT2 device included in r8a7740.
- - "renesas,r8a7740-cmt3" for the 32-bit CMT3 device included in r8a7740.
- - "renesas,r8a7740-cmt4" for the 32-bit CMT4 device included in r8a7740.
- - "renesas,r8a7743-cmt0" for the 32-bit CMT0 device included in r8a7743.
- - "renesas,r8a7743-cmt1" for the 48-bit CMT1 device included in r8a7743.
- - "renesas,r8a7744-cmt0" for the 32-bit CMT0 device included in r8a7744.
- - "renesas,r8a7744-cmt1" for the 48-bit CMT1 device included in r8a7744.
- - "renesas,r8a7745-cmt0" for the 32-bit CMT0 device included in r8a7745.
- - "renesas,r8a7745-cmt1" for the 48-bit CMT1 device included in r8a7745.
- - "renesas,r8a77470-cmt0" for the 32-bit CMT0 device included in r8a77470.
- - "renesas,r8a77470-cmt1" for the 48-bit CMT1 device included in r8a77470.
- - "renesas,r8a774a1-cmt0" for the 32-bit CMT0 device included in r8a774a1.
- - "renesas,r8a774a1-cmt1" for the 48-bit CMT devices included in r8a774a1.
- - "renesas,r8a774b1-cmt0" for the 32-bit CMT0 device included in r8a774b1.
- - "renesas,r8a774b1-cmt1" for the 48-bit CMT devices included in r8a774b1.
- - "renesas,r8a774c0-cmt0" for the 32-bit CMT0 device included in r8a774c0.
- - "renesas,r8a774c0-cmt1" for the 48-bit CMT devices included in r8a774c0.
- - "renesas,r8a7790-cmt0" for the 32-bit CMT0 device included in r8a7790.
- - "renesas,r8a7790-cmt1" for the 48-bit CMT1 device included in r8a7790.
- - "renesas,r8a7791-cmt0" for the 32-bit CMT0 device included in r8a7791.
- - "renesas,r8a7791-cmt1" for the 48-bit CMT1 device included in r8a7791.
- - "renesas,r8a7792-cmt0" for the 32-bit CMT0 device included in r8a7792.
- - "renesas,r8a7792-cmt1" for the 48-bit CMT1 device included in r8a7792.
- - "renesas,r8a7793-cmt0" for the 32-bit CMT0 device included in r8a7793.
- - "renesas,r8a7793-cmt1" for the 48-bit CMT1 device included in r8a7793.
- - "renesas,r8a7794-cmt0" for the 32-bit CMT0 device included in r8a7794.
- - "renesas,r8a7794-cmt1" for the 48-bit CMT1 device included in r8a7794.
- - "renesas,r8a7795-cmt0" for the 32-bit CMT0 device included in r8a7795.
- - "renesas,r8a7795-cmt1" for the 48-bit CMT devices included in r8a7795.
- - "renesas,r8a7796-cmt0" for the 32-bit CMT0 device included in r8a7796.
- - "renesas,r8a7796-cmt1" for the 48-bit CMT devices included in r8a7796.
- - "renesas,r8a77965-cmt0" for the 32-bit CMT0 device included in r8a77965.
- - "renesas,r8a77965-cmt1" for the 48-bit CMT devices included in r8a77965.
- - "renesas,r8a77970-cmt0" for the 32-bit CMT0 device included in r8a77970.
- - "renesas,r8a77970-cmt1" for the 48-bit CMT devices included in r8a77970.
- - "renesas,r8a77980-cmt0" for the 32-bit CMT0 device included in r8a77980.
- - "renesas,r8a77980-cmt1" for the 48-bit CMT devices included in r8a77980.
- - "renesas,r8a77990-cmt0" for the 32-bit CMT0 device included in r8a77990.
- - "renesas,r8a77990-cmt1" for the 48-bit CMT devices included in r8a77990.
- - "renesas,r8a77995-cmt0" for the 32-bit CMT0 device included in r8a77995.
- - "renesas,r8a77995-cmt1" for the 48-bit CMT devices included in r8a77995.
- - "renesas,sh73a0-cmt0" for the 32-bit CMT0 device included in sh73a0.
- - "renesas,sh73a0-cmt1" for the 48-bit CMT1 device included in sh73a0.
- - "renesas,sh73a0-cmt2" for the 32-bit CMT2 device included in sh73a0.
- - "renesas,sh73a0-cmt3" for the 32-bit CMT3 device included in sh73a0.
- - "renesas,sh73a0-cmt4" for the 32-bit CMT4 device included in sh73a0.
-
- - "renesas,rcar-gen2-cmt0" for 32-bit CMT0 devices included in R-Car Gen2
- and RZ/G1.
- - "renesas,rcar-gen2-cmt1" for 48-bit CMT1 devices included in R-Car Gen2
- and RZ/G1.
- These are fallbacks for r8a73a4, R-Car Gen2 and RZ/G1 entries
- listed above.
- - "renesas,rcar-gen3-cmt0" for 32-bit CMT0 devices included in R-Car Gen3
- and RZ/G2.
- - "renesas,rcar-gen3-cmt1" for 48-bit CMT devices included in R-Car Gen3
- and RZ/G2.
- These are fallbacks for R-Car Gen3 and RZ/G2 entries listed
- above.
-
- - reg: base address and length of the registers block for the timer module.
- - interrupts: interrupt-specifier for the timer, one per channel.
- - clocks: a list of phandle + clock-specifier pairs, one for each entry
- in clock-names.
- - clock-names: must contain "fck" for the functional clock.
-
-
-Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes
-
- cmt0: timer@ffca0000 {
- compatible = "renesas,r8a7790-cmt0", "renesas,rcar-gen2-cmt0";
- reg = <0 0xffca0000 0 0x1004>;
- interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
- <0 142 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp1_clks R8A7790_CLK_CMT0>;
- clock-names = "fck";
- };
-
- cmt1: timer@e6130000 {
- compatible = "renesas,r8a7790-cmt1", "renesas,rcar-gen2-cmt1";
- reg = <0 0xe6130000 0 0x1004>;
- interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
- <0 121 IRQ_TYPE_LEVEL_HIGH>,
- <0 122 IRQ_TYPE_LEVEL_HIGH>,
- <0 123 IRQ_TYPE_LEVEL_HIGH>,
- <0 124 IRQ_TYPE_LEVEL_HIGH>,
- <0 125 IRQ_TYPE_LEVEL_HIGH>,
- <0 126 IRQ_TYPE_LEVEL_HIGH>,
- <0 127 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&mstp3_clks R8A7790_CLK_CMT1>;
- clock-names = "fck";
- };
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.yaml b/Documentation/devicetree/bindings/timer/renesas,cmt.yaml
new file mode 100644
index 000000000000..7e4dc5623da8
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/renesas,cmt.yaml
@@ -0,0 +1,182 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/renesas,cmt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Compare Match Timer (CMT)
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+ - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+description:
+ The CMT is a multi-channel 16/32/48-bit timer/counter with configurable clock
+ inputs and programmable compare match.
+
+ Channels share hardware resources but their counter and compare match values
+ are independent. A particular CMT instance can implement only a subset of the
+ channels supported by the CMT model. Channel indices represent the hardware
+ position of the channel in the CMT and don't match the channel numbers in the
+ datasheets.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,r8a7740-cmt0 # 32-bit CMT0 on R-Mobile A1
+ - renesas,r8a7740-cmt1 # 48-bit CMT1 on R-Mobile A1
+ - renesas,r8a7740-cmt2 # 32-bit CMT2 on R-Mobile A1
+ - renesas,r8a7740-cmt3 # 32-bit CMT3 on R-Mobile A1
+ - renesas,r8a7740-cmt4 # 32-bit CMT4 on R-Mobile A1
+ - renesas,sh73a0-cmt0 # 32-bit CMT0 on SH-Mobile AG5
+ - renesas,sh73a0-cmt1 # 48-bit CMT1 on SH-Mobile AG5
+ - renesas,sh73a0-cmt2 # 32-bit CMT2 on SH-Mobile AG5
+ - renesas,sh73a0-cmt3 # 32-bit CMT3 on SH-Mobile AG5
+ - renesas,sh73a0-cmt4 # 32-bit CMT4 on SH-Mobile AG5
+
+ - items:
+ - enum:
+ - renesas,r8a73a4-cmt0 # 32-bit CMT0 on R-Mobile APE6
+ - renesas,r8a7743-cmt0 # 32-bit CMT0 on RZ/G1M
+ - renesas,r8a7744-cmt0 # 32-bit CMT0 on RZ/G1N
+ - renesas,r8a7745-cmt0 # 32-bit CMT0 on RZ/G1E
+ - renesas,r8a77470-cmt0 # 32-bit CMT0 on RZ/G1C
+ - renesas,r8a7790-cmt0 # 32-bit CMT0 on R-Car H2
+ - renesas,r8a7791-cmt0 # 32-bit CMT0 on R-Car M2-W
+ - renesas,r8a7792-cmt0 # 32-bit CMT0 on R-Car V2H
+ - renesas,r8a7793-cmt0 # 32-bit CMT0 on R-Car M2-N
+ - renesas,r8a7794-cmt0 # 32-bit CMT0 on R-Car E2
+ - const: renesas,rcar-gen2-cmt0 # 32-bit CMT0 on R-Mobile APE6, R-Car Gen2 and RZ/G1
+
+ - items:
+ - enum:
+ - renesas,r8a73a4-cmt1 # 48-bit CMT1 on R-Mobile APE6
+ - renesas,r8a7743-cmt1 # 48-bit CMT1 on RZ/G1M
+ - renesas,r8a7744-cmt1 # 48-bit CMT1 on RZ/G1N
+ - renesas,r8a7745-cmt1 # 48-bit CMT1 on RZ/G1E
+ - renesas,r8a77470-cmt1 # 48-bit CMT1 on RZ/G1C
+ - renesas,r8a7790-cmt1 # 48-bit CMT1 on R-Car H2
+ - renesas,r8a7791-cmt1 # 48-bit CMT1 on R-Car M2-W
+ - renesas,r8a7792-cmt1 # 48-bit CMT1 on R-Car V2H
+ - renesas,r8a7793-cmt1 # 48-bit CMT1 on R-Car M2-N
+ - renesas,r8a7794-cmt1 # 48-bit CMT1 on R-Car E2
+ - const: renesas,rcar-gen2-cmt1 # 48-bit CMT1 on R-Mobile APE6, R-Car Gen2 and RZ/G1
+
+ - items:
+ - enum:
+ - renesas,r8a774a1-cmt0 # 32-bit CMT0 on RZ/G2M
+ - renesas,r8a774b1-cmt0 # 32-bit CMT0 on RZ/G2N
+ - renesas,r8a774c0-cmt0 # 32-bit CMT0 on RZ/G2E
+ - renesas,r8a7795-cmt0 # 32-bit CMT0 on R-Car H3
+ - renesas,r8a7796-cmt0 # 32-bit CMT0 on R-Car M3-W
+ - renesas,r8a77965-cmt0 # 32-bit CMT0 on R-Car M3-N
+ - renesas,r8a77970-cmt0 # 32-bit CMT0 on R-Car V3M
+ - renesas,r8a77980-cmt0 # 32-bit CMT0 on R-Car V3H
+ - renesas,r8a77990-cmt0 # 32-bit CMT0 on R-Car E3
+ - renesas,r8a77995-cmt0 # 32-bit CMT0 on R-Car D3
+ - const: renesas,rcar-gen3-cmt0 # 32-bit CMT0 on R-Car Gen3 and RZ/G2
+
+ - items:
+ - enum:
+ - renesas,r8a774a1-cmt1 # 48-bit CMT on RZ/G2M
+ - renesas,r8a774b1-cmt1 # 48-bit CMT on RZ/G2N
+ - renesas,r8a774c0-cmt1 # 48-bit CMT on RZ/G2E
+ - renesas,r8a7795-cmt1 # 48-bit CMT on R-Car H3
+ - renesas,r8a7796-cmt1 # 48-bit CMT on R-Car M3-W
+ - renesas,r8a77965-cmt1 # 48-bit CMT on R-Car M3-N
+ - renesas,r8a77970-cmt1 # 48-bit CMT on R-Car V3M
+ - renesas,r8a77980-cmt1 # 48-bit CMT on R-Car V3H
+ - renesas,r8a77990-cmt1 # 48-bit CMT on R-Car E3
+ - renesas,r8a77995-cmt1 # 48-bit CMT on R-Car D3
+ - const: renesas,rcar-gen3-cmt1 # 48-bit CMT on R-Car Gen3 and RZ/G2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 8
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: fck
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - power-domains
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen2-cmt0
+ - renesas,rcar-gen3-cmt0
+ then:
+ properties:
+ interrupts:
+ minItems: 2
+ maxItems: 2
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rcar-gen2-cmt1
+ - renesas,rcar-gen3-cmt1
+ then:
+ properties:
+ interrupts:
+ minItems: 8
+ maxItems: 8
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7790-sysc.h>
+ cmt0: timer@ffca0000 {
+ compatible = "renesas,r8a7790-cmt0", "renesas,rcar-gen2-cmt0";
+ reg = <0xffca0000 0x1004>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 124>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 124>;
+ };
+
+ cmt1: timer@e6130000 {
+ compatible = "renesas,r8a7790-cmt1", "renesas,rcar-gen2-cmt1";
+ reg = <0xe6130000 0x1004>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 329>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 329>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/renesas,mtu2.txt b/Documentation/devicetree/bindings/timer/renesas,mtu2.txt
deleted file mode 100644
index ba0a34d97eb8..000000000000
--- a/Documentation/devicetree/bindings/timer/renesas,mtu2.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-* Renesas Multi-Function Timer Pulse Unit 2 (MTU2)
-
-The MTU2 is a multi-purpose, multi-channel timer/counter with configurable
-clock inputs and programmable compare match.
-
-Channels share hardware resources but their counter and compare match value
-are independent. The MTU2 hardware supports five channels indexed from 0 to 4.
-
-Required Properties:
-
- - compatible: must be one or more of the following:
- - "renesas,mtu2-r7s72100" for the r7s72100 MTU2
- - "renesas,mtu2" for any MTU2
- This is a fallback for the above renesas,mtu2-* entries
-
- - reg: base address and length of the registers block for the timer module.
-
- - interrupts: interrupt specifiers for the timer, one for each entry in
- interrupt-names.
- - interrupt-names: must contain one entry named "tgi?a" for each enabled
- channel, where "?" is the channel index expressed as one digit from "0" to
- "4".
-
- - clocks: a list of phandle + clock-specifier pairs, one for each entry
- in clock-names.
- - clock-names: must contain "fck" for the functional clock.
-
-
-Example: R7S72100 (RZ/A1H) MTU2 node
-
- mtu2: timer@fcff0000 {
- compatible = "renesas,mtu2-r7s72100", "renesas,mtu2";
- reg = <0xfcff0000 0x400>;
- interrupts = <0 139 IRQ_TYPE_LEVEL_HIGH>,
- <0 146 IRQ_TYPE_LEVEL_HIGH>,
- <0 150 IRQ_TYPE_LEVEL_HIGH>,
- <0 154 IRQ_TYPE_LEVEL_HIGH>,
- <0 159 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "tgi0a", "tgi1a", "tgi2a", "tgi3a", "tgi4a";
- clocks = <&mstp3_clks R7S72100_CLK_MTU2>;
- clock-names = "fck";
- };
diff --git a/Documentation/devicetree/bindings/timer/renesas,mtu2.yaml b/Documentation/devicetree/bindings/timer/renesas,mtu2.yaml
new file mode 100644
index 000000000000..15d8dddf4ae9
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/renesas,mtu2.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/renesas,mtu2.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Multi-Function Timer Pulse Unit 2 (MTU2)
+
+maintainers:
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+ - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+
+description:
+ The MTU2 is a multi-purpose, multi-channel timer/counter with configurable clock inputs
+ and programmable compare match.
+
+ Channels share hardware resources but their counter and compare match value are
+ independent. The MTU2 hardware supports five channels indexed from 0 to 4.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,mtu2-r7s72100 # RZ/A1H
+ - const: renesas,mtu2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 5
+ description: One entry for each enabled channel.
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: tgi0a
+ - const: tgi1a
+ - const: tgi2a
+ - const: tgi3a
+ - const: tgi4a
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: fck
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+ - power-domains
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r7s72100-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ mtu2: timer@fcff0000 {
+ compatible = "renesas,mtu2-r7s72100", "renesas,mtu2";
+ reg = <0xfcff0000 0x400>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tgi0a";
+ clocks = <&mstp3_clks R7S72100_CLK_MTU2>;
+ clock-names = "fck";
+ power-domains = <&cpg_clocks>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/renesas,ostm.txt b/Documentation/devicetree/bindings/timer/renesas,ostm.txt
deleted file mode 100644
index 81a78f8bcf17..000000000000
--- a/Documentation/devicetree/bindings/timer/renesas,ostm.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-* Renesas OS Timer (OSTM)
-
-The OSTM is a multi-channel 32-bit timer/counter with fixed clock
-source that can operate in either interval count down timer or free-running
-compare match mode.
-
-Channels are independent from each other.
-
-Required Properties:
-
- - compatible: must be one or more of the following:
- - "renesas,r7s72100-ostm" for the R7S72100 (RZ/A1) OSTM
- - "renesas,r7s9210-ostm" for the R7S9210 (RZ/A2) OSTM
- - "renesas,ostm" for any OSTM
- This is a fallback for the above renesas,*-ostm entries
-
- - reg: base address and length of the register block for a timer channel.
-
- - interrupts: interrupt specifier for the timer channel.
-
- - clocks: clock specifier for the timer channel.
-
-Example: R7S72100 (RZ/A1H) OSTM node
-
- ostm0: timer@fcfec000 {
- compatible = "renesas,r7s72100-ostm", "renesas,ostm";
- reg = <0xfcfec000 0x30>;
- interrupts = <GIC_SPI 102 IRQ_TYPE_EDGE_RISING>;
- clocks = <&mstp5_clks R7S72100_CLK_OSTM0>;
- power-domains = <&cpg_clocks>;
- };
diff --git a/Documentation/devicetree/bindings/timer/renesas,ostm.yaml b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
new file mode 100644
index 000000000000..600d47ab7d58
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/renesas,ostm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas OS Timer (OSTM)
+
+maintainers:
+ - Chris Brandt <chris.brandt@renesas.com>
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+description:
+ The OSTM is a multi-channel 32-bit timer/counter with fixed clock source that
+ can operate in either interval count down timer or free-running compare match
+ mode.
+
+ Channels are independent from each other.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - renesas,r7s72100-ostm # RZ/A1H
+ - renesas,r7s9210-ostm # RZ/A2M
+ - const: renesas,ostm # Generic
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - power-domains
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r7s72100-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ ostm0: timer@fcfec000 {
+ compatible = "renesas,r7s72100-ostm", "renesas,ostm";
+ reg = <0xfcfec000 0x30>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&mstp5_clks R7S72100_CLK_OSTM0>;
+ power-domains = <&cpg_clocks>;
+ };
diff --git a/Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml b/Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml
index c8a2a92074df..4d13e6bc1c50 100644
--- a/Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/ti,j721e-ufs.yaml
@@ -25,6 +25,20 @@ properties:
power-domains:
maxItems: 1
+ assigned-clocks:
+ maxItems: 1
+
+ assigned-clock-parents:
+ maxItems: 1
+
+ "#address-cells":
+ const: 2
+
+ "#size-cells":
+ const: 2
+
+ ranges: true
+
required:
- compatible
- reg
@@ -39,30 +53,39 @@ patternProperties:
Documentation/devicetree/bindings/ufs/cdns,ufshc.txt for binding
documentation of child node
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
- ufs_wrapper: ufs-wrapper@4e80000 {
- compatible = "ti,j721e-ufs";
- reg = <0x0 0x4e80000 0x0 0x100>;
- power-domains = <&k3_pds 277>;
- clocks = <&k3_clks 277 1>;
- assigned-clocks = <&k3_clks 277 1>;
- assigned-clock-parents = <&k3_clks 277 4>;
- #address-cells = <2>;
- #size-cells = <2>;
-
- ufs@4e84000 {
- compatible = "cdns,ufshc-m31-16nm", "jedec,ufs-2.0";
- reg = <0x0 0x4e84000 0x0 0x10000>;
- interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
- freq-table-hz = <19200000 19200000>;
- power-domains = <&k3_pds 277>;
- clocks = <&k3_clks 277 1>;
- assigned-clocks = <&k3_clks 277 1>;
- assigned-clock-parents = <&k3_clks 277 4>;
- clock-names = "core_clk";
- };
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ufs-wrapper@4e80000 {
+ compatible = "ti,j721e-ufs";
+ reg = <0x0 0x4e80000 0x0 0x100>;
+ power-domains = <&k3_pds 277>;
+ clocks = <&k3_clks 277 1>;
+ assigned-clocks = <&k3_clks 277 1>;
+ assigned-clock-parents = <&k3_clks 277 4>;
+
+ ranges = <0x0 0x0 0x0 0x4e80000 0x0 0x14000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ufs@4000 {
+ compatible = "cdns,ufshc-m31-16nm", "jedec,ufs-2.0";
+ reg = <0x0 0x4000 0x0 0x10000>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ freq-table-hz = <19200000 19200000>;
+ power-domains = <&k3_pds 277>;
+ clocks = <&k3_clks 277 1>;
+ assigned-clocks = <&k3_clks 277 1>;
+ assigned-clock-parents = <&k3_clks 277 4>;
+ clock-names = "core_clk";
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt b/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
deleted file mode 100644
index 9a8b631904fd..000000000000
--- a/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-Amlogic Meson GX DWC3 USB SoC controller
-
-Required properties:
-- compatible: depending on the SoC this should contain one of:
- * amlogic,meson-axg-dwc3
- * amlogic,meson-gxl-dwc3
-- clocks: a handle for the "USB general" clock
-- clock-names: must be "usb_general"
-- resets: a handle for the shared "USB OTG" reset line
-- reset-names: must be "usb_otg"
-
-Required child node:
-A child node must exist to represent the core DWC3 IP block. The name of
-the node is not important. The content of the node is defined in dwc3.txt.
-
-PHY documentation is provided in the following places:
-- Documentation/devicetree/bindings/phy/meson-gxl-usb2-phy.txt
-- Documentation/devicetree/bindings/phy/meson-gxl-usb3-phy.txt
-
-Example device nodes:
- usb0: usb@ff500000 {
- compatible = "amlogic,meson-axg-dwc3";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- clocks = <&clkc CLKID_USB>;
- clock-names = "usb_general";
- resets = <&reset RESET_USB_OTG>;
- reset-names = "usb_otg";
-
- dwc3: dwc3@ff500000 {
- compatible = "snps,dwc3";
- reg = <0x0 0xff500000 0x0 0x100000>;
- interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
- dr_mode = "host";
- maximum-speed = "high-speed";
- snps,dis_u2_susphy_quirk;
- phys = <&usb3_phy>, <&usb2_phy0>;
- phy-names = "usb2-phy", "usb3-phy";
- };
- };
diff --git a/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
index b0e5e0fe9386..5b04a7dfa018 100644
--- a/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
+++ b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
@@ -25,9 +25,13 @@ description: |
The Amlogic A1 embeds a DWC3 USB IP Core configured for USB2 in
host-only mode.
+ The Amlogic GXL & GXM SoCs doesn't embed an USB3 PHY.
+
properties:
compatible:
enum:
+ - amlogic,meson-gxl-usb-ctrl
+ - amlogic,meson-gxm-usb-ctrl
- amlogic,meson-g12a-usb-ctrl
- amlogic,meson-a1-usb-ctrl
@@ -41,6 +45,11 @@ properties:
clocks:
minItems: 1
+ maxItems: 3
+
+ clock-names:
+ minItems: 1
+ maxItems: 3
resets:
minItems: 1
@@ -52,10 +61,8 @@ properties:
maxItems: 1
phy-names:
- items:
- - const: usb2-phy0 # USB2 PHY0 if USBHOST_A port is used
- - const: usb2-phy1 # USB2 PHY1 if USBOTG_B port is used
- - const: usb3-phy0 # USB3 PHY if USB3_0 is used
+ minItems: 1
+ maxItems: 3
phys:
minItems: 1
@@ -93,10 +100,68 @@ allOf:
properties:
compatible:
enum:
+ - amlogic,meson-g12a-usb-ctrl
+
+ then:
+ properties:
+ phy-names:
+ items:
+ - const: usb2-phy0 # USB2 PHY0 if USBHOST_A port is used
+ - const: usb2-phy1 # USB2 PHY1 if USBOTG_B port is used
+ - const: usb3-phy0 # USB3 PHY if USB3_0 is used
+ - if:
+ properties:
+ compatible:
+ enum:
+ - amlogic,meson-gxl-usb-ctrl
+
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ clock-names:
+ items:
+ - const: usb_ctrl
+ - const: ddr
+ phy-names:
+ items:
+ - const: usb2-phy0 # USB2 PHY0 if USBHOST_A port is used
+ - const: usb2-phy1 # USB2 PHY1 if USBOTG_B port is used
+ required:
+ - clock-names
+ - if:
+ properties:
+ compatible:
+ enum:
+ - amlogic,meson-gxm-usb-ctrl
+
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ clock-names:
+ items:
+ - const: usb_ctrl
+ - const: ddr
+ phy-names:
+ items:
+ - const: usb2-phy0 # USB2 PHY0 if USBHOST_A port is used
+ - const: usb2-phy1 # USB2 PHY1 if USBOTG_B port is used
+ - const: usb2-phy2 # USB2 PHY2 if USBOTG_C port is used
+
+ required:
+ - clock-names
+ - if:
+ properties:
+ compatible:
+ enum:
- amlogic,meson-a1-usb-ctrl
then:
properties:
+ phy-names:
+ items:
+ - const: usb2-phy1 # USB2 PHY1 if USBOTG_B port is used
clocks:
minItems: 3
clock-names:
@@ -111,7 +176,7 @@ examples:
- |
usb: usb@ffe09000 {
compatible = "amlogic,meson-g12a-usb-ctrl";
- reg = <0x0 0xffe09000 0x0 0xa0>;
+ reg = <0xffe09000 0xa0>;
interrupts = <16>;
#address-cells = <1>;
#size-cells = <1>;
@@ -147,4 +212,3 @@ examples:
snps,quirk-frame-length-adjustment;
};
};
-
diff --git a/Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml b/Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml
index 06399ba0d9e4..ccc67d03d4bb 100644
--- a/Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml
+++ b/Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml
@@ -38,19 +38,70 @@ properties:
aspeed,vhub-downstream-ports:
description: Number of downstream ports supported by the Virtual Hub
- allOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- - default: 5
- minimum: 1
- maximum: 7
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 5
+ minimum: 1
+ maximum: 7
aspeed,vhub-generic-endpoints:
description: Number of generic endpoints supported by the Virtual Hub
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 15
+ minimum: 1
+ maximum: 21
+
+ vhub-vendor-id:
+ description: vhub Vendor ID
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maximum: 65535
+
+ vhub-product-id:
+ description: vhub Product ID
allOf:
- $ref: /schemas/types.yaml#/definitions/uint32
- - default: 15
- minimum: 1
- maximum: 21
+ - maximum: 65535
+
+ vhub-device-revision:
+ description: vhub Device Revision in binary-coded decimal
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maximum: 65535
+
+ vhub-strings:
+ type: object
+
+ properties:
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ '^string@[0-9a-f]+$':
+ type: object
+ description: string descriptors of the specific language
+
+ properties:
+ reg:
+ maxItems: 1
+ description: 16-bit Language Identifier defined by USB-IF
+
+ manufacturer:
+ description: vhub manufacturer
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/string
+
+ product:
+ description: vhub product name
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/string
+
+ serial-number:
+ description: vhub device serial number
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/string
required:
- compatible
@@ -74,4 +125,19 @@ examples:
aspeed,vhub-generic-endpoints = <15>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb2ad_default>;
+
+ vhub-vendor-id = <0x1d6b>;
+ vhub-product-id = <0x0107>;
+ vhub-device-revision = <0x0100>;
+ vhub-strings {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ string@0409 {
+ reg = <0x0409>;
+ manufacturer = "ASPEED";
+ product = "USB Virtual Hub";
+ serial-number = "0000";
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/usb/atmel-usb.txt b/Documentation/devicetree/bindings/usb/atmel-usb.txt
index 44e80153b148..423b99a8fd97 100644
--- a/Documentation/devicetree/bindings/usb/atmel-usb.txt
+++ b/Documentation/devicetree/bindings/usb/atmel-usb.txt
@@ -88,13 +88,15 @@ Required properties:
- clock-names: Should contain two strings
"pclk" for the peripheral clock
"hclk" for the host clock
+
+Deprecated property:
- ep childnode: To specify the number of endpoints and their properties.
Optional properties:
- atmel,vbus-gpio: If present, specifies a gpio that allows to detect whether
vbus is present (USB is connected).
-Required child node properties:
+Deprecated child node properties:
- name: Name of the endpoint.
- reg: Num of the endpoint.
- atmel,fifo-size: Size of the fifo.
@@ -112,56 +114,4 @@ usb2: gadget@fff78000 {
clocks = <&utmi>, <&udphs_clk>;
clock-names = "hclk", "pclk";
atmel,vbus-gpio = <&pioB 19 0>;
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
};
diff --git a/Documentation/devicetree/bindings/usb/brcm,bcm7445-ehci.yaml b/Documentation/devicetree/bindings/usb/brcm,bcm7445-ehci.yaml
new file mode 100644
index 000000000000..2a9acf2b5a64
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/brcm,bcm7445-ehci.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/brcm,bcm7445-ehci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom STB USB EHCI Controller Device Tree Bindings
+
+allOf:
+ - $ref: "usb-hcd.yaml"
+
+maintainers:
+ - Al Cooper <alcooperx@gmail.com>
+
+properties:
+ compatible:
+ const: brcm,bcm7445-ehci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+ description: Clock specifier for the EHCI clock
+
+ clock-names:
+ const: sw_usb
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: usbphy
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - phys
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ usb@f0b00300 {
+ compatible = "brcm,bcm7445-ehci";
+ reg = <0xf0b00300 0xa8>;
+ interrupts = <0x0 0x5a 0x0>;
+ phys = <&usbphy_0 0x0>;
+ phy-names = "usbphy";
+ clocks = <&usb20>;
+ clock-names = "sw_usb";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml
index 0d6d850a7f17..9352a8ef60a6 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc2.yaml
@@ -62,14 +62,14 @@ properties:
resets:
items:
- - description: common reset
- - description: ecc reset
+ - description: common reset
+ - description: ecc reset
minItems: 1
reset-names:
items:
- - const: dwc2
- - const: dwc2-ecc
+ - const: dwc2
+ - const: dwc2-ecc
minItems: 1
phys:
@@ -78,6 +78,9 @@ properties:
phy-names:
const: usb2-phy
+ power-domains:
+ maxItems: 1
+
vbus-supply:
description: reference to the VBUS regulator. Depending on the current mode
this is enabled (in "host" mode") or disabled (in "peripheral" mode). The
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 9946ff9ba735..d03edf9d3935 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -15,8 +15,6 @@ Required properties:
Exception for clocks:
clocks are optional if the parent node (i.e. glue-layer) is compatible to
one of the following:
- "amlogic,meson-axg-dwc3"
- "amlogic,meson-gxl-dwc3"
"cavium,octeon-7130-usb-uctl"
"qcom,dwc3"
"samsung,exynos5250-dwusb3"
diff --git a/Documentation/devicetree/bindings/usb/ehci-mv.txt b/Documentation/devicetree/bindings/usb/ehci-mv.txt
deleted file mode 100644
index 335589895763..000000000000
--- a/Documentation/devicetree/bindings/usb/ehci-mv.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-* Marvell PXA/MMP EHCI controller.
-
-Required properties:
-
-- compatible: must be "marvell,pxau2o-ehci"
-- reg: physical base addresses of the controller and length of memory mapped region
-- interrupts: one EHCI controller interrupt should be described here
-- clocks: phandle list of usb clocks
-- clock-names: should be "USBCLK"
-- phys: phandle for the PHY device
-- phy-names: should be "usb"
-
-Example:
-
- ehci0: usb-ehci@d4208000 {
- compatible = "marvell,pxau2o-ehci";
- reg = <0xd4208000 0x200>;
- interrupts = <44>;
- clocks = <&soc_clocks MMP2_CLK_USB>;
- clock-names = "USBCLK";
- phys = <&usb_otg_phy>;
- phy-names = "usb";
- };
diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
index 10edd05872ea..69f3f26d1207 100644
--- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
@@ -6,19 +6,30 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: USB EHCI Controller Device Tree Bindings
-allOf:
- - $ref: "usb-hcd.yaml"
-
maintainers:
- Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+allOf:
+ - $ref: "usb-hcd.yaml"
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ const: ibm,usb-ehci-440epx
+ then:
+ properties:
+ reg:
+ maxItems: 1
+
properties:
compatible:
contains:
const: generic-ehci
reg:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
interrupts:
maxItems: 1
@@ -36,6 +47,9 @@ properties:
- if a USB DRD channel: first clock should be host and second
one should be peripheral
+ power-domains:
+ maxItems: 1
+
big-endian:
$ref: /schemas/types.yaml#/definitions/flag
description:
@@ -74,6 +88,9 @@ properties:
phy-names:
const: usb
+ iommus:
+ maxItems: 1
+
required:
- compatible
- reg
@@ -87,7 +104,7 @@ examples:
compatible = "ibm,usb-ehci-440epx", "generic-ehci";
interrupt-parent = <&UIC0>;
interrupts = <0x1a 4>;
- reg = <0 0xe0000300 90 0 0xe0000390 70>;
+ reg = <0xe0000300 90>, <0xe0000390 70>;
big-endian;
};
diff --git a/Documentation/devicetree/bindings/usb/generic-ohci.yaml b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
index bcffec1f1341..2178bcc401bc 100644
--- a/Documentation/devicetree/bindings/usb/generic-ohci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
@@ -36,6 +36,9 @@ properties:
- if a USB DRD channel: first clock should be host and second
one should be peripheral
+ power-domains:
+ maxItems: 1
+
big-endian:
$ref: /schemas/types.yaml#/definitions/flag
description:
@@ -73,6 +76,9 @@ properties:
phy-names:
const: usb
+ iommus:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/usb/ingenic,musb.yaml b/Documentation/devicetree/bindings/usb/ingenic,musb.yaml
index c2d2ee43ba67..c334aea6b59d 100644
--- a/Documentation/devicetree/bindings/usb/ingenic,musb.yaml
+++ b/Documentation/devicetree/bindings/usb/ingenic,musb.yaml
@@ -42,6 +42,9 @@ properties:
phys:
description: PHY specifier for the USB PHY
+ usb-role-switch:
+ type: boolean
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/usb/keystone-usb.txt b/Documentation/devicetree/bindings/usb/keystone-usb.txt
deleted file mode 100644
index 77df82e36138..000000000000
--- a/Documentation/devicetree/bindings/usb/keystone-usb.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-TI Keystone Soc USB Controller
-
-DWC3 GLUE
-
-Required properties:
- - compatible: should be
- "ti,keystone-dwc3" for Keystone 2 SoCs
- "ti,am654-dwc3" for AM654 SoC
- - #address-cells, #size-cells : should be '1' if the device has sub-nodes
- with 'reg' property.
- - reg : Address and length of the register set for the USB subsystem on
- the SOC.
- - interrupts : The irq number of this device that is used to interrupt the
- MPU.
- - ranges: allows valid 1:1 translation between child's address space and
- parent's address space.
-
-SoC-specific Required Properties:
-The following are mandatory properties for Keystone 2 66AK2HK, 66AK2L and 66AK2E
-SoCs only:
-
-- clocks: Clock ID for USB functional clock.
-- clock-names: Must be "usb".
-
-
-The following are mandatory properties for 66AK2G and AM654:
-
-- power-domains: Should contain a phandle to a PM domain provider node
- and an args specifier containing the USB device id
- value. This property is as per the binding,
- Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
-
-Sub-nodes:
-The dwc3 core should be added as subnode to Keystone DWC3 glue.
-- dwc3 :
- The binding details of dwc3 can be found in:
- Documentation/devicetree/bindings/usb/dwc3.txt
-
-Example:
- usb: usb@2680000 {
- compatible = "ti,keystone-dwc3";
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x2680000 0x10000>;
- clocks = <&clkusb>;
- clock-names = "usb";
- interrupts = <GIC_SPI 393 IRQ_TYPE_EDGE_RISING>;
- ranges;
-
- dwc3@2690000 {
- compatible = "synopsys,dwc3";
- reg = <0x2690000 0x70000>;
- interrupts = <GIC_SPI 393 IRQ_TYPE_EDGE_RISING>;
- usb-phy = <&usb_phy>, <&usb_phy>;
- };
- };
diff --git a/Documentation/devicetree/bindings/usb/marvell,pxau2o-ehci.yaml b/Documentation/devicetree/bindings/usb/marvell,pxau2o-ehci.yaml
new file mode 100644
index 000000000000..3cf93dd45eb7
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/marvell,pxau2o-ehci.yaml
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2019,2020 Lubomir Rintel <lkundrak@v3.sk>
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/marvell,pxau2o-ehci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell PXA/MMP EHCI bindings
+
+maintainers:
+ - Lubomir Rintel <lkundrak@v3.sk>
+
+allOf:
+ - $ref: usb-hcd.yaml#
+
+properties:
+ compatible:
+ const: marvell,pxau2o-ehci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: USBCLK
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: usb
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - phys
+ - phy-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/marvell,mmp2.h>
+ usb@d4208000 {
+ compatible = "marvell,pxau2o-ehci";
+ reg = <0xd4208000 0x200>;
+ interrupts = <44>;
+ clocks = <&soc_clocks MMP2_CLK_USB>;
+ clock-names = "USBCLK";
+ phys = <&usb_otg_phy>;
+ phy-names = "usb";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml b/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml
index b84ed8ee8cfc..c4ddc0adf101 100644
--- a/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml
+++ b/Documentation/devicetree/bindings/usb/nvidia,tegra-xudc.yaml
@@ -21,6 +21,7 @@ properties:
- enum:
- nvidia,tegra210-xudc # For Tegra210
- nvidia,tegra186-xudc # For Tegra186
+ - nvidia,tegra194-xudc # For Tegra194
reg:
minItems: 2
@@ -144,6 +145,7 @@ allOf:
contains:
enum:
- nvidia,tegra186-xudc
+ - nvidia,tegra194-xudc
then:
properties:
reg:
@@ -163,9 +165,9 @@ examples:
usb@700d0000 {
compatible = "nvidia,tegra210-xudc";
- reg = <0x0 0x700d0000 0x0 0x8000>,
- <0x0 0x700d8000 0x0 0x1000>,
- <0x0 0x700d9000 0x0 0x1000>;
+ reg = <0x700d0000 0x8000>,
+ <0x700d8000 0x1000>,
+ <0x700d9000 0x1000>;
reg-names = "base", "fpci", "ipfs";
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.txt b/Documentation/devicetree/bindings/usb/qcom,dwc3.txt
deleted file mode 100644
index fbdd01756752..000000000000
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.txt
+++ /dev/null
@@ -1,104 +0,0 @@
-Qualcomm SuperSpeed DWC3 USB SoC controller
-
-Required properties:
-- compatible: Compatible list, contains
- "qcom,dwc3"
- "qcom,msm8996-dwc3" for msm8996 SOC.
- "qcom,msm8998-dwc3" for msm8998 SOC.
- "qcom,sdm845-dwc3" for sdm845 SOC.
-- reg: Offset and length of register set for QSCRATCH wrapper
-- power-domains: specifies a phandle to PM domain provider node
-- clocks: A list of phandle + clock-specifier pairs for the
- clocks listed in clock-names
-- clock-names: Should contain the following:
- "core" Master/Core clock, have to be >= 125 MHz for SS
- operation and >= 60MHz for HS operation
- "mock_utmi" Mock utmi clock needed for ITP/SOF generation in
- host mode. Its frequency should be 19.2MHz.
- "sleep" Sleep clock, used for wakeup when USB3 core goes
- into low power mode (U3).
-
-Optional clocks:
- "iface" System bus AXI clock.
- Not present on "qcom,msm8996-dwc3" compatible.
- "cfg_noc" System Config NOC clock.
- Not present on "qcom,msm8996-dwc3" compatible.
-- assigned-clocks: Should be:
- MOCK_UTMI_CLK
- MASTER_CLK
-- assigned-clock-rates: Should be:
- 19.2Mhz (192000000) for MOCK_UTMI_CLK
- >=125Mhz (125000000) for MASTER_CLK in SS mode
- >=60Mhz (60000000) for MASTER_CLK in HS mode
-
-Optional properties:
-- resets: Phandle to reset control that resets core and wrapper.
-- interrupts: specifies interrupts from controller wrapper used
- to wakeup from low power/susepnd state. Must contain
- one or more entry for interrupt-names property
-- interrupt-names: Must include the following entries:
- - "hs_phy_irq": The interrupt that is asserted when a
- wakeup event is received on USB2 bus
- - "ss_phy_irq": The interrupt that is asserted when a
- wakeup event is received on USB3 bus
- - "dm_hs_phy_irq" and "dp_hs_phy_irq": Separate
- interrupts for any wakeup event on DM and DP lines
-- qcom,select-utmi-as-pipe-clk: if present, disable USB3 pipe_clk requirement.
- Used when dwc3 operates without SSPHY and only
- HS/FS/LS modes are supported.
-
-Required child node:
-A child node must exist to represent the core DWC3 IP block. The name of
-the node is not important. The content of the node is defined in dwc3.txt.
-
-Phy documentation is provided in the following places:
-Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt - USB3 QMP PHY
-Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml - USB2 QUSB2 PHY
-
-Example device nodes:
-
- hs_phy: phy@100f8800 {
- compatible = "qcom,qusb2-v2-phy";
- ...
- };
-
- ss_phy: phy@100f8830 {
- compatible = "qcom,qmp-v3-usb3-phy";
- ...
- };
-
- usb3_0: usb30@a6f8800 {
- compatible = "qcom,dwc3";
- reg = <0xa6f8800 0x400>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- interrupts = <0 131 0>, <0 486 0>, <0 488 0>, <0 489 0>;
- interrupt-names = "hs_phy_irq", "ss_phy_irq",
- "dm_hs_phy_irq", "dp_hs_phy_irq";
-
- clocks = <&gcc GCC_USB30_PRIM_MASTER_CLK>,
- <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
- clock-names = "core", "mock_utmi", "sleep";
-
- assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
- <&gcc GCC_USB30_PRIM_MASTER_CLK>;
- assigned-clock-rates = <19200000>, <133000000>;
-
- resets = <&gcc GCC_USB30_PRIM_BCR>;
- reset-names = "core_reset";
- power-domains = <&gcc USB30_PRIM_GDSC>;
- qcom,select-utmi-as-pipe-clk;
-
- dwc3@10000000 {
- compatible = "snps,dwc3";
- reg = <0x10000000 0xcd00>;
- interrupts = <0 205 0x4>;
- phys = <&hs_phy>, <&ss_phy>;
- phy-names = "usb2-phy", "usb3-phy";
- dr_mode = "host";
- };
- };
-
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
new file mode 100644
index 000000000000..dac10848dd7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -0,0 +1,174 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/qcom,dwc3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SuperSpeed DWC3 USB SoC controller
+
+maintainers:
+ - Manu Gautam <mgautam@codeaurora.org>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qcom,msm8996-dwc3
+ - qcom,msm8998-dwc3
+ - qcom,sc7180-dwc3
+ - qcom,sdm845-dwc3
+ - const: qcom,dwc3
+
+ reg:
+ description: Offset and length of register set for QSCRATCH wrapper
+ maxItems: 1
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ ranges: true
+
+ power-domains:
+ description: specifies a phandle to PM domain provider node
+ maxItems: 1
+
+ clocks:
+ description:
+ A list of phandle and clock-specifier pairs for the clocks
+ listed in clock-names.
+ items:
+ - description: System Config NOC clock.
+ - description: Master/Core clock, has to be >= 125 MHz
+ for SS operation and >= 60MHz for HS operation.
+ - description: System bus AXI clock.
+ - description: Mock utmi clock needed for ITP/SOF generation
+ in host mode. Its frequency should be 19.2MHz.
+ - description: Sleep clock, used for wakeup when
+ USB3 core goes into low power mode (U3).
+
+ clock-names:
+ items:
+ - const: cfg_noc
+ - const: core
+ - const: iface
+ - const: mock_utmi
+ - const: sleep
+
+ assigned-clocks:
+ items:
+ - description: Phandle and clock specifier of MOCK_UTMI_CLK.
+ - description: Phandle and clock specifoer of MASTER_CLK.
+
+ assigned-clock-rates:
+ items:
+ - description: Must be 19.2MHz (19200000).
+ - description: Must be >= 60 MHz in HS mode, >= 125 MHz in SS mode.
+ resets:
+ maxItems: 1
+
+ interconnects:
+ maxItems: 2
+
+ interconnect-names:
+ items:
+ - const: usb-ddr
+ - const: apps-usb
+
+ interrupts:
+ items:
+ - description: The interrupt that is asserted
+ when a wakeup event is received on USB2 bus.
+ - description: The interrupt that is asserted
+ when a wakeup event is received on USB3 bus.
+ - description: Wakeup event on DM line.
+ - description: Wakeup event on DP line.
+
+ interrupt-names:
+ items:
+ - const: hs_phy_irq
+ - const: ss_phy_irq
+ - const: dm_hs_phy_irq
+ - const: dp_hs_phy_irq
+
+ qcom,select-utmi-as-pipe-clk:
+ description:
+ If present, disable USB3 pipe_clk requirement.
+ Used when dwc3 operates without SSPHY and only
+ HS/FS/LS modes are supported.
+ type: boolean
+
+# Required child node:
+
+patternProperties:
+ "^dwc3@[0-9a-f]+$":
+ type: object
+ description:
+ A child node must exist to represent the core DWC3 IP block
+ The content of the node is defined in dwc3.txt.
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+ - power-domains
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ usb@a6f8800 {
+ compatible = "qcom,sdm845-dwc3", "qcom,dwc3";
+ reg = <0 0x0a6f8800 0 0x400>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>,
+ <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
+ clock-names = "cfg_noc", "core", "iface", "mock_utmi",
+ "sleep";
+
+ assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <150000000>;
+
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hs_phy_irq", "ss_phy_irq",
+ "dm_hs_phy_irq", "dp_hs_phy_irq";
+
+ power-domains = <&gcc USB30_PRIM_GDSC>;
+
+ resets = <&gcc GCC_USB30_PRIM_BCR>;
+
+ dwc3@a600000 {
+ compatible = "snps,dwc3";
+ reg = <0 0x0a600000 0 0xcd00>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x740 0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
index 031452aa25bc..e3cdeab1199f 100644
--- a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
+++ b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
@@ -73,7 +73,7 @@ examples:
usb3_peri0: usb@ee020000 {
compatible = "renesas,r8a774c0-usb3-peri", "renesas,rcar-gen3-usb3-peri";
- reg = <0 0xee020000 0 0x400>;
+ reg = <0xee020000 0x400>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 328>;
companion = <&xhci0>;
diff --git a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
index a7ae95598ccb..af4826fb6824 100644
--- a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
+++ b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
@@ -22,6 +22,7 @@ properties:
- items:
- enum:
+ - renesas,usbhs-r8a7742 # RZ/G1H
- renesas,usbhs-r8a7743 # RZ/G1M
- renesas,usbhs-r8a7744 # RZ/G1N
- renesas,usbhs-r8a7745 # RZ/G1E
@@ -121,7 +122,7 @@ examples:
usbhs: usb@e6590000 {
compatible = "renesas,usbhs-r8a7790", "renesas,rcar-gen2-usbhs";
- reg = <0 0xe6590000 0 0x100>;
+ reg = <0xe6590000 0x100>;
interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 704>;
};
diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
index 5f5264b2e9ad..90750255792f 100644
--- a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
@@ -57,30 +57,36 @@ examples:
- |
#include <dt-bindings/soc/ti,sci_pm_domain.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
- cdns_usb@4104000 {
- compatible = "ti,j721e-usb";
- reg = <0x00 0x4104000 0x00 0x100>;
- power-domains = <&k3_pds 288 TI_SCI_PD_EXCLUSIVE>;
- clocks = <&k3_clks 288 15>, <&k3_clks 288 3>;
- clock-names = "ref", "lpm";
- assigned-clocks = <&k3_clks 288 15>; /* USB2_REFCLK */
- assigned-clock-parents = <&k3_clks 288 16>; /* HFOSC0 */
- #address-cells = <2>;
- #size-cells = <2>;
- usb@6000000 {
- compatible = "cdns,usb3";
- reg = <0x00 0x6000000 0x00 0x10000>,
- <0x00 0x6010000 0x00 0x10000>,
- <0x00 0x6020000 0x00 0x10000>;
- reg-names = "otg", "xhci", "dev";
- interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
- <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, /* irq.6 */
- <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; /* otgirq.0 */
- interrupt-names = "host",
- "peripheral",
- "otg";
- maximum-speed = "super-speed";
- dr_mode = "otg";
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cdns_usb@4104000 {
+ compatible = "ti,j721e-usb";
+ reg = <0x00 0x4104000 0x00 0x100>;
+ power-domains = <&k3_pds 288 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 288 15>, <&k3_clks 288 3>;
+ clock-names = "ref", "lpm";
+ assigned-clocks = <&k3_clks 288 15>; /* USB2_REFCLK */
+ assigned-clock-parents = <&k3_clks 288 16>; /* HFOSC0 */
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ usb@6000000 {
+ compatible = "cdns,usb3";
+ reg = <0x00 0x6000000 0x00 0x10000>,
+ <0x00 0x6010000 0x00 0x10000>,
+ <0x00 0x6020000 0x00 0x10000>;
+ reg-names = "otg", "xhci", "dev";
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, /* irq.6 */
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; /* otgirq.0 */
+ interrupt-names = "host",
+ "peripheral",
+ "otg";
+ maximum-speed = "super-speed";
+ dr_mode = "otg";
+ };
};
};
diff --git a/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml b/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
new file mode 100644
index 000000000000..f127535feb0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/ti,keystone-dwc3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI Keystone Soc USB Controller
+
+maintainers:
+ - Roger Quadros <rogerq@ti.com>
+
+properties:
+ compatible:
+ oneOf:
+ - const: "ti,keystone-dwc3"
+ - const: "ti,am654-dwc3"
+
+ reg:
+ maxItems: 1
+ description: Address and length of the register set for the USB subsystem on
+ the SOC.
+
+ interrupts:
+ maxItems: 1
+ description: The irq number of this device that is used to interrupt the MPU.
+
+
+ clocks:
+ description: Clock ID for USB functional clock.
+
+ power-domains:
+ description: Should contain a phandle to a PM domain provider node
+ and an args specifier containing the USB device id
+ value. This property is as per the binding,
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
+
+ phys:
+ description:
+ PHY specifier for the USB3.0 PHY. Some SoCs need the USB3.0 PHY
+ to be turned on before the controller.
+ Documentation/devicetree/bindings/phy/phy-bindings.txt
+
+ phy-names:
+ items:
+ - const: "usb3-phy"
+
+ dwc3:
+ description: This is the node representing the DWC3 controller instance
+ Documentation/devicetree/bindings/usb/dwc3.txt
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ usb: usb@2680000 {
+ compatible = "ti,keystone-dwc3";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x2680000 0x10000>;
+ clocks = <&clkusb>;
+ clock-names = "usb";
+ interrupts = <GIC_SPI 393 IRQ_TYPE_EDGE_RISING>;
+ ranges;
+
+ dwc3@2690000 {
+ compatible = "synopsys,dwc3";
+ reg = <0x2690000 0x70000>;
+ interrupts = <GIC_SPI 393 IRQ_TYPE_EDGE_RISING>;
+ usb-phy = <&usb_phy>, <&usb_phy>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/ti,tps6598x.yaml b/Documentation/devicetree/bindings/usb/ti,tps6598x.yaml
new file mode 100644
index 000000000000..8eaf4b6c4735
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ti,tps6598x.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/usb/ti,tps6598x.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Texas Instruments 6598x Type-C Port Switch and Power Delivery controller DT bindings
+
+maintainers:
+ - Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+
+description: |
+ Texas Instruments 6598x Type-C Port Switch and Power Delivery controller
+
+properties:
+ compatible:
+ enum:
+ - ti,tps6598x
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ items:
+ - const: irq
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ tps6598x: tps6598x@38 {
+ compatible = "ti,tps6598x";
+ reg = <0x38>;
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <107 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "irq";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&typec_pins>;
+
+ typec_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ port {
+ typec_ep: endpoint {
+ remote-endpoint = <&otg_ep>;
+ };
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/usb/usb-conn-gpio.txt b/Documentation/devicetree/bindings/usb/usb-conn-gpio.txt
deleted file mode 100644
index ec80641208a5..000000000000
--- a/Documentation/devicetree/bindings/usb/usb-conn-gpio.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-USB GPIO Based Connection Detection
-
-This is typically used to switch dual role mode from the USB ID pin connected
-to an input GPIO, and also used to enable/disable device mode from the USB
-Vbus pin connected to an input GPIO.
-
-Required properties:
-- compatible : should include "gpio-usb-b-connector" and "usb-b-connector".
-- id-gpios, vbus-gpios : input gpios, either one of them must be present,
- and both can be present as well.
- see connector/usb-connector.yaml
-
-Optional properties:
-- vbus-supply : can be present if needed when supports dual role mode.
- see connector/usb-connector.yaml
-
-- Sub-nodes:
- - port : can be present.
- see graph.txt
-
-Example:
-
-&mtu3 {
- connector {
- compatible = "gpio-usb-b-connector", "usb-b-connector";
- type = "micro";
- id-gpios = <&pio 12 GPIO_ACTIVE_HIGH>;
- vbus-supply = <&usb_p0_vbus>;
- };
-};
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index dc025f126d71..b120dd6612a2 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -7,6 +7,7 @@ Required properties:
- "marvell,armada3700-xhci" for Armada 37xx SoCs
- "marvell,armada-375-xhci" for Armada 375 SoCs
- "marvell,armada-380-xhci" for Armada 38x SoCs
+ - "renesas,xhci-r8a7742" for r8a7742 SoC
- "renesas,xhci-r8a7743" for r8a7743 SoC
- "renesas,xhci-r8a7744" for r8a7744 SoC
- "renesas,xhci-r8a774a1" for r8a774a1 SoC
@@ -24,6 +25,7 @@ Required properties:
device
- "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 or RZ/G2 compatible
device
+ - "brcm,bcm7445-xhci" for Broadcom STB SoCs with XHCI
- "xhci-platform" (deprecated)
When compatible with the generic version, nodes must list the
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 997934c58f9a..9aeab66be85f 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -59,6 +59,8 @@ patternProperties:
description: Allwinner Technology Co., Ltd.
"^alphascale,.*":
description: AlphaScale Integrated Circuits Systems, Inc.
+ "^alps,.*":
+ description: Alps Electric Co., Ltd.
"^altr,.*":
description: Altera Corp.
"^amarula,.*":
@@ -131,6 +133,8 @@ patternProperties:
description: Shanghai AVIC Optoelectronics Co., Ltd.
"^avnet,.*":
description: Avnet, Inc.
+ "^awinic,.*":
+ description: Shanghai Awinic Technology Co., Ltd.
"^axentia,.*":
description: Axentia Technologies AB
"^axis,.*":
@@ -139,10 +143,14 @@ patternProperties:
description: Azoteq (Pty) Ltd
"^azw,.*":
description: Shenzhen AZW Technology Co., Ltd.
+ "^baikal,.*":
+ description: BAIKAL ELECTRONICS, JSC
"^bananapi,.*":
description: BIPAI KEJI LIMITED
"^beacon,.*":
description: Compass Electronics Group, LLC
+ "^beagle,.*":
+ description: BeagleBoard.org Foundation
"^bhf,.*":
description: Beckhoff Automation GmbH & Co. KG
"^bitmain,.*":
@@ -179,8 +187,12 @@ patternProperties:
description: Cadence Design Systems Inc.
"^cdtech,.*":
description: CDTech(H.K.) Electronics Limited
+ "^cellwise,.*":
+ description: CellWise Microelectronics Co., Ltd
"^ceva,.*":
description: Ceva, Inc.
+ "^checkpoint,.*":
+ description: Check Point Software Technologies Ltd.
"^chipidea,.*":
description: Chipidea, Inc
"^chipone,.*":
@@ -589,6 +601,8 @@ patternProperties:
description: LSI Corp. (LSI Logic)
"^lwn,.*":
description: Liebherr-Werk Nenzing GmbH
+ "^lxa,.*":
+ description: Linux Automation GmbH
"^macnica,.*":
description: Macnica Americas
"^mapleboard,.*":
@@ -814,6 +828,8 @@ patternProperties:
description: Primux Trading, S.L.
"^probox2,.*":
description: PROBOX2 (by W2COMP Co., Ltd.)
+ "^prt,.*":
+ description: Protonic Holland
"^pulsedlight,.*":
description: PulsedLight, Inc
"^purism,.*":
@@ -906,6 +922,8 @@ patternProperties:
description: Sharp Corporation
"^shimafuji,.*":
description: Shimafuji Electric, Inc.
+ "^shiratech,.*":
+ description: Shiratech Solutions
"^si-en,.*":
description: Si-En Technology Ltd.
"^si-linux,.*":
@@ -924,6 +942,8 @@ patternProperties:
description: Silead Inc.
"^silergy,.*":
description: Silergy Corp.
+ "^silex-insight,.*":
+ description: Silex Insight
"^siliconmitus,.*":
description: Silicon Mitus, Inc.
"^simtek,.*":
@@ -942,6 +962,8 @@ patternProperties:
description: Sitronix Technology Corporation
"^skyworks,.*":
description: Skyworks Solutions, Inc.
+ "^smartlabs,.*":
+ description: SmartLabs LLC
"^smsc,.*":
description: Standard Microsystems Corporation
"^snps,.*":
@@ -1053,6 +1075,8 @@ patternProperties:
description: Tyan Computer Corporation
"^u-blox,.*":
description: u-blox
+ "^u-boot,.*":
+ description: U-Boot bootloader
"^ucrobotics,.*":
description: uCRobotics
"^ubnt,.*":
@@ -1073,6 +1097,8 @@ patternProperties:
description: Aigo Digital Technology Co., Ltd.
"^v3,.*":
description: V3 Semiconductor
+ "^vaisala,.*":
+ description: Vaisala
"^vamrs,.*":
description: Vamrs Ltd.
"^variscite,.*":
@@ -1101,6 +1127,8 @@ patternProperties:
description: Waveshare Electronics
"^wd,.*":
description: Western Digital Corp.
+ "^we,.*":
+ description: Würth Elektronik GmbH.
"^wetek,.*":
description: WeTek Electronics, limited.
"^wexler,.*":
@@ -1133,6 +1161,8 @@ patternProperties:
description: Shenzhen Xinpeng Technology Co., Ltd
"^xlnx,.*":
description: Xilinx
+ "^xnano,.*":
+ description: Xnano
"^xunlong,.*":
description: Shenzhen Xunlong Software CO.,Limited
"^xylon,.*":
diff --git a/Documentation/devicetree/bindings/watchdog/arm-smc-wdt.yaml b/Documentation/devicetree/bindings/watchdog/arm-smc-wdt.yaml
new file mode 100644
index 000000000000..bec651541e0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/arm-smc-wdt.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/arm-smc-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Secure Monitor Call based watchdog
+
+allOf:
+ - $ref: "watchdog.yaml#"
+
+maintainers:
+ - Julius Werner <jwerner@chromium.org>
+
+properties:
+ compatible:
+ enum:
+ - arm,smc-wdt
+ arm,smc-id:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ The ATF smc function id used by the firmware.
+ Defaults to 0x82003D06 if unset.
+
+required:
+ - compatible
+
+examples:
+ - |
+ watchdog {
+ compatible = "arm,smc-wdt";
+ arm,smc-id = <0x82003D06>;
+ timeout-sec = <15>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt
deleted file mode 100644
index adc6b76fcb3a..000000000000
--- a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-* Freescale i.MX Watchdog Timer (WDT) Controller
-
-Required properties:
-- compatible : Should be "fsl,<soc>-wdt"
-- reg : Should contain WDT registers location and length
-- interrupts : Should contain WDT interrupt
-
-Optional properties:
-- big-endian: If present the watchdog device's registers are implemented
- in big endian mode, otherwise in native mode(same with CPU), for more
- detail please see: Documentation/devicetree/bindings/regmap/regmap.txt.
-- fsl,ext-reset-output: If present the watchdog device is configured to
- assert its external reset (WDOG_B) instead of issuing a software reset.
-- timeout-sec : Contains the watchdog timeout in seconds
-
-Examples:
-
-wdt@73f98000 {
- compatible = "fsl,imx51-wdt", "fsl,imx21-wdt";
- reg = <0x73f98000 0x4000>;
- interrupts = <58>;
- big-endian;
- timeout-sec = <20>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.yaml b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.yaml
new file mode 100644
index 000000000000..d96b93b11fad
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/fsl-imx-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX Watchdog Timer (WDT) Controller
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+allOf:
+ - $ref: "watchdog.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx21-wdt
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ fsl,ext-reset-output:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: |
+ If present, the watchdog device is configured to assert its
+ external reset (WDOG_B) instead of issuing a software reset.
+
+required:
+ - compatible
+ - interrupts
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/imx6qdl-clock.h>
+
+ watchdog@20bc000 {
+ compatible = "fsl,imx21-wdt";
+ reg = <0x020bc000 0x4000>;
+ interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6QDL_CLK_IPG>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.txt b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.txt
deleted file mode 100644
index f902508d6cac..000000000000
--- a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-* Freescale i.MX7ULP Watchdog Timer (WDT) Controller
-
-Required properties:
-- compatible : Should be "fsl,imx7ulp-wdt"
-- reg : Should contain WDT registers location and length
-- interrupts : Should contain WDT interrupt
-- clocks: Should contain a phandle pointing to the gated peripheral clock.
-
-Optional properties:
-- timeout-sec : Contains the watchdog timeout in seconds
-
-Examples:
-
-wdog1: watchdog@403d0000 {
- compatible = "fsl,imx7ulp-wdt";
- reg = <0x403d0000 0x10000>;
- interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
- assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
- assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
- timeout-sec = <40>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
new file mode 100644
index 000000000000..51d6d482bbc2
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/fsl-imx7ulp-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX7ULP Watchdog Timer (WDT) Controller
+
+maintainers:
+ - Anson Huang <Anson.Huang@nxp.com>
+
+allOf:
+ - $ref: "watchdog.yaml#"
+
+properties:
+ compatible:
+ enum:
+ - fsl,imx7ulp-wdt
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ assigned-clocks:
+ maxItems: 1
+
+ assigned-clocks-parents:
+ maxItems: 1
+
+ timeout-sec: true
+
+required:
+ - compatible
+ - interrupts
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/imx7ulp-clock.h>
+
+ watchdog@403d0000 {
+ compatible = "fsl,imx7ulp-wdt";
+ reg = <0x403d0000 0x10000>;
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
+ assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
+ assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
+ timeout-sec = <40>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas,wdt.txt
deleted file mode 100644
index 79b3c62f183d..000000000000
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-Renesas Watchdog Timer (WDT) Controller
-
-Required properties:
- - compatible : Must be "renesas,<soctype>-wdt", followed by a generic
- fallback compatible string when compatible with the generic
- version.
- Examples with soctypes are:
- - "renesas,r8a7743-wdt" (RZ/G1M)
- - "renesas,r8a7744-wdt" (RZ/G1N)
- - "renesas,r8a7745-wdt" (RZ/G1E)
- - "renesas,r8a77470-wdt" (RZ/G1C)
- - "renesas,r8a774a1-wdt" (RZ/G2M)
- - "renesas,r8a774b1-wdt" (RZ/G2N)
- - "renesas,r8a774c0-wdt" (RZ/G2E)
- - "renesas,r8a7790-wdt" (R-Car H2)
- - "renesas,r8a7791-wdt" (R-Car M2-W)
- - "renesas,r8a7792-wdt" (R-Car V2H)
- - "renesas,r8a7793-wdt" (R-Car M2-N)
- - "renesas,r8a7794-wdt" (R-Car E2)
- - "renesas,r8a7795-wdt" (R-Car H3)
- - "renesas,r8a7796-wdt" (R-Car M3-W)
- - "renesas,r8a77961-wdt" (R-Car M3-W+)
- - "renesas,r8a77965-wdt" (R-Car M3-N)
- - "renesas,r8a77970-wdt" (R-Car V3M)
- - "renesas,r8a77990-wdt" (R-Car E3)
- - "renesas,r8a77995-wdt" (R-Car D3)
- - "renesas,r7s72100-wdt" (RZ/A1)
- - "renesas,r7s9210-wdt" (RZ/A2)
- The generic compatible string must be:
- - "renesas,rza-wdt" for RZ/A
- - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G1
- - "renesas,rcar-gen3-wdt" for R-Car Gen3 and RZ/G2
-
-- reg : Should contain WDT registers location and length
-- clocks : the clock feeding the watchdog timer.
-
-Optional properties:
-- timeout-sec : Contains the watchdog timeout in seconds
-- power-domains : the power domain the WDT belongs to
-- interrupts: Some WDTs have an interrupt when used in interval timer mode
-
-Examples:
-
- wdt0: watchdog@e6020000 {
- compatible = "renesas,r8a7795-wdt", "renesas,rcar-gen3-wdt";
- reg = <0 0xe6020000 0 0x0c>;
- clocks = <&cpg CPG_MOD 402>;
- power-domains = <&cpg>;
- timeout-sec = <60>;
- };
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
new file mode 100644
index 000000000000..572f4c912fef
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
@@ -0,0 +1,101 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/renesas,wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Watchdog Timer (WDT) Controller
+
+maintainers:
+ - Wolfram Sang <wsa+renesas@sang-engineering.com>
+ - Geert Uytterhoeven <geert+renesas@glider.be>
+
+allOf:
+ - $ref: "watchdog.yaml#"
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,r7s72100-wdt # RZ/A1
+ - renesas,r7s9210-wdt # RZ/A2
+ - const: renesas,rza-wdt # RZ/A
+
+ - items:
+ - enum:
+ - renesas,r8a7742-wdt # RZ/G1H
+ - renesas,r8a7743-wdt # RZ/G1M
+ - renesas,r8a7744-wdt # RZ/G1N
+ - renesas,r8a7745-wdt # RZ/G1E
+ - renesas,r8a77470-wdt # RZ/G1C
+ - renesas,r8a7790-wdt # R-Car H2
+ - renesas,r8a7791-wdt # R-Car M2-W
+ - renesas,r8a7792-wdt # R-Car V2H
+ - renesas,r8a7793-wdt # R-Car M2-N
+ - renesas,r8a7794-wdt # R-Car E2
+ - const: renesas,rcar-gen2-wdt # R-Car Gen2 and RZ/G1
+
+ - items:
+ - enum:
+ - renesas,r8a774a1-wdt # RZ/G2M
+ - renesas,r8a774b1-wdt # RZ/G2N
+ - renesas,r8a774c0-wdt # RZ/G2E
+ - renesas,r8a7795-wdt # R-Car H3
+ - renesas,r8a7796-wdt # R-Car M3-W
+ - renesas,r8a77961-wdt # R-Car M3-W+
+ - renesas,r8a77965-wdt # R-Car M3-N
+ - renesas,r8a77970-wdt # R-Car V3M
+ - renesas,r8a77980-wdt # R-Car V3H
+ - renesas,r8a77990-wdt # R-Car E3
+ - renesas,r8a77995-wdt # R-Car D3
+ - const: renesas,rcar-gen3-wdt # R-Car Gen3 and RZ/G2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ timeout-sec: true
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rza-wdt
+then:
+ required:
+ - power-domains
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7795-cpg-mssr.h>
+ #include <dt-bindings/power/r8a7795-sysc.h>
+ wdt0: watchdog@e6020000 {
+ compatible = "renesas,r8a7795-wdt", "renesas,rcar-gen3-wdt";
+ reg = <0xe6020000 0x0c>;
+ clocks = <&cpg CPG_MOD 402>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 402>;
+ timeout-sec = <60>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/socionext,uniphier-wdt.yaml b/Documentation/devicetree/bindings/watchdog/socionext,uniphier-wdt.yaml
new file mode 100644
index 000000000000..a059d16cb4f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/socionext,uniphier-wdt.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/socionext,uniphier-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Socionext UniPhier watchdog timer
+
+maintainers:
+ - Keiji Hayashibara <hayashibara.keiji@socionext.com>
+
+allOf:
+ - $ref: "watchdog.yaml#"
+
+properties:
+ compatible:
+ const: socionext,uniphier-wdt
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ // The UniPhier watchdog should be a subnode of a "syscon" compatible node.
+
+ sysctrl@61840000 {
+ compatible = "socionext,uniphier-ld11-sysctrl",
+ "simple-mfd", "syscon";
+ reg = <0x61840000 0x10000>;
+
+ watchdog {
+ compatible = "socionext,uniphier-wdt";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml b/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
index e83026fef2e9..f0452791c598 100644
--- a/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
@@ -57,7 +57,7 @@ examples:
watchdog0: rti@2200000 {
compatible = "ti,rti-wdt";
- reg = <0x0 0x2200000 0x0 0x100>;
+ reg = <0x2200000 0x100>;
clocks = <&k3_clks 252 1>;
power-domains = <&k3_pds 252 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 252 1>;
diff --git a/Documentation/devicetree/bindings/watchdog/uniphier-wdt.txt b/Documentation/devicetree/bindings/watchdog/uniphier-wdt.txt
deleted file mode 100644
index bf6337546dd1..000000000000
--- a/Documentation/devicetree/bindings/watchdog/uniphier-wdt.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-UniPhier watchdog timer controller
-
-This UniPhier watchdog timer controller must be under sysctrl node.
-
-Required properties:
-- compatible: should be "socionext,uniphier-wdt"
-
-Example:
-
- sysctrl@61840000 {
- compatible = "socionext,uniphier-ld11-sysctrl",
- "simple-mfd", "syscon";
- reg = <0x61840000 0x4000>;
-
- watchdog {
- compatible = "socionext,uniphier-wdt";
- }
-
- other nodes ...
- };
diff --git a/Documentation/devicetree/bindings/writing-bindings.rst b/Documentation/devicetree/bindings/writing-bindings.rst
new file mode 100644
index 000000000000..45ff426d0019
--- /dev/null
+++ b/Documentation/devicetree/bindings/writing-bindings.rst
@@ -0,0 +1,67 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================================================
+DOs and DON'Ts for designing and writing Devicetree bindings
+============================================================
+
+This is a list of common review feedback items focused on binding design. With
+every rule, there are exceptions and bindings have many gray areas.
+
+For guidelines related to patches, see
+Documentation/devicetree/bindings/submitting-patches.rst
+
+
+Overall design
+==============
+
+- DO attempt to make bindings complete even if a driver doesn't support some
+ features. For example, if a device has an interrupt, then include the
+ 'interrupts' property even if the driver is only polled mode.
+
+- DON'T refer to Linux or "device driver" in bindings. Bindings should be
+ based on what the hardware has, not what an OS and driver currently support.
+
+- DO use node names matching the class of the device. Many standard names are
+ defined in the DT Spec. If there isn't one, consider adding it.
+
+- DO check that the example matches the documentation especially after making
+ review changes.
+
+- DON'T create nodes just for the sake of instantiating drivers. Multi-function
+ devices only need child nodes when the child nodes have their own DT
+ resources. A single node can be multiple providers (e.g. clocks and resets).
+
+- DON'T use 'syscon' alone without a specific compatible string. A 'syscon'
+ hardware block should have a compatible string unique enough to infer the
+ register layout of the entire block (at a minimum).
+
+
+Properties
+==========
+
+- DO make 'compatible' properties specific. DON'T use wildcards in compatible
+ strings. DO use fallback compatibles when devices are the same as or a subset
+ of prior implementations. DO add new compatibles in case there are new
+ features or bugs.
+
+- DO use a vendor prefix on device specific property names. Consider if
+ properties could be common among devices of the same class. Check other
+ existing bindings for similar devices.
+
+- DON'T redefine common properties. Just reference the definition and define
+ constraints specific to the device.
+
+- DO use common property unit suffixes for properties with scientific units.
+ See property-units.txt.
+
+- DO define properties in terms of constraints. How many entries? What are
+ possible values? What is the order?
+
+
+Board/SoC .dts Files
+====================
+
+- DO put all MMIO devices under a bus node and not at the top-level.
+
+- DO use non-empty 'ranges' to limit the size of child buses/devices. 64-bit
+ platforms don't need all devices to have 64-bit address and size.
diff --git a/Documentation/devicetree/bindings/writing-bindings.txt b/Documentation/devicetree/bindings/writing-bindings.txt
deleted file mode 100644
index 27dfd2d8016e..000000000000
--- a/Documentation/devicetree/bindings/writing-bindings.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-DOs and DON'Ts for designing and writing Devicetree bindings
-
-This is a list of common review feedback items focused on binding design. With
-every rule, there are exceptions and bindings have many gray areas.
-
-For guidelines related to patches, see
-Documentation/devicetree/bindings/submitting-patches.txt
-
-
-Overall design
-
-- DO attempt to make bindings complete even if a driver doesn't support some
- features. For example, if a device has an interrupt, then include the
- 'interrupts' property even if the driver is only polled mode.
-
-- DON'T refer to Linux or "device driver" in bindings. Bindings should be
- based on what the hardware has, not what an OS and driver currently support.
-
-- DO use node names matching the class of the device. Many standard names are
- defined in the DT Spec. If there isn't one, consider adding it.
-
-- DO check that the example matches the documentation especially after making
- review changes.
-
-- DON'T create nodes just for the sake of instantiating drivers. Multi-function
- devices only need child nodes when the child nodes have their own DT
- resources. A single node can be multiple providers (e.g. clocks and resets).
-
-- DON'T use 'syscon' alone without a specific compatible string. A 'syscon'
- hardware block should have a compatible string unique enough to infer the
- register layout of the entire block (at a minimum).
-
-
-Properties
-
-- DO make 'compatible' properties specific. DON'T use wildcards in compatible
- strings. DO use fallback compatibles when devices are the same as or a subset
- of prior implementations. DO add new compatibles in case there are new
- features or bugs.
-
-- DO use a vendor prefix on device specific property names. Consider if
- properties could be common among devices of the same class. Check other
- existing bindings for similar devices.
-
-- DON'T redefine common properties. Just reference the definition and define
- constraints specific to the device.
-
-- DO use common property unit suffixes for properties with scientific units.
- See property-units.txt.
-
-- DO define properties in terms of constraints. How many entries? What are
- possible values? What is the order?
-
-
-Board/SoC .dts Files
-
-- DO put all MMIO devices under a bus node and not at the top-level.
-
-- DO use non-empty 'ranges' to limit the size of child buses/devices. 64-bit
- platforms don't need all devices to have 64-bit address and size.
diff --git a/Documentation/devicetree/bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt
index d058ace29345..28199b31fe5e 100644
--- a/Documentation/devicetree/bindings/xilinx.txt
+++ b/Documentation/devicetree/bindings/xilinx.txt
@@ -86,149 +86,6 @@
xlnx,use-parity = <0>;
};
- Some IP cores actually implement 2 or more logical devices. In
- this case, the device should still describe the whole IP core with
- a single node and add a child node for each logical device. The
- ranges property can be used to translate from parent IP-core to the
- registers of each device. In addition, the parent node should be
- compatible with the bus type 'xlnx,compound', and should contain
- #address-cells and #size-cells, as with any other bus. (Note: this
- makes the assumption that both logical devices have the same bus
- binding. If this is not true, then separate nodes should be used
- for each logical device). The 'cell-index' property can be used to
- enumerate logical devices within an IP core. For example, the
- following is the system.mhs entry for the dual ps2 controller found
- on the ml403 reference design.
-
- BEGIN opb_ps2_dual_ref
- PARAMETER INSTANCE = opb_ps2_dual_ref_0
- PARAMETER HW_VER = 1.00.a
- PARAMETER C_BASEADDR = 0xA9000000
- PARAMETER C_HIGHADDR = 0xA9001FFF
- BUS_INTERFACE SOPB = opb_v20_0
- PORT Sys_Intr1 = ps2_1_intr
- PORT Sys_Intr2 = ps2_2_intr
- PORT Clkin1 = ps2_clk_rx_1
- PORT Clkin2 = ps2_clk_rx_2
- PORT Clkpd1 = ps2_clk_tx_1
- PORT Clkpd2 = ps2_clk_tx_2
- PORT Rx1 = ps2_d_rx_1
- PORT Rx2 = ps2_d_rx_2
- PORT Txpd1 = ps2_d_tx_1
- PORT Txpd2 = ps2_d_tx_2
- END
-
- It would result in the following device tree nodes:
-
- opb_ps2_dual_ref_0: opb-ps2-dual-ref@a9000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,compound";
- ranges = <0 a9000000 2000>;
- // If this device had extra parameters, then they would
- // go here.
- ps2@0 {
- compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
- reg = <0 40>;
- interrupt-parent = <&opb_intc_0>;
- interrupts = <3 0>;
- cell-index = <0>;
- };
- ps2@1000 {
- compatible = "xlnx,opb-ps2-dual-ref-1.00.a";
- reg = <1000 40>;
- interrupt-parent = <&opb_intc_0>;
- interrupts = <3 0>;
- cell-index = <0>;
- };
- };
-
- Also, the system.mhs file defines bus attachments from the processor
- to the devices. The device tree structure should reflect the bus
- attachments. Again an example; this system.mhs fragment:
-
- BEGIN ppc405_virtex4
- PARAMETER INSTANCE = ppc405_0
- PARAMETER HW_VER = 1.01.a
- BUS_INTERFACE DPLB = plb_v34_0
- BUS_INTERFACE IPLB = plb_v34_0
- END
-
- BEGIN opb_intc
- PARAMETER INSTANCE = opb_intc_0
- PARAMETER HW_VER = 1.00.c
- PARAMETER C_BASEADDR = 0xD1000FC0
- PARAMETER C_HIGHADDR = 0xD1000FDF
- BUS_INTERFACE SOPB = opb_v20_0
- END
-
- BEGIN opb_uart16550
- PARAMETER INSTANCE = opb_uart16550_0
- PARAMETER HW_VER = 1.00.d
- PARAMETER C_BASEADDR = 0xa0000000
- PARAMETER C_HIGHADDR = 0xa0001FFF
- BUS_INTERFACE SOPB = opb_v20_0
- END
-
- BEGIN plb_v34
- PARAMETER INSTANCE = plb_v34_0
- PARAMETER HW_VER = 1.02.a
- END
-
- BEGIN plb_bram_if_cntlr
- PARAMETER INSTANCE = plb_bram_if_cntlr_0
- PARAMETER HW_VER = 1.00.b
- PARAMETER C_BASEADDR = 0xFFFF0000
- PARAMETER C_HIGHADDR = 0xFFFFFFFF
- BUS_INTERFACE SPLB = plb_v34_0
- END
-
- BEGIN plb2opb_bridge
- PARAMETER INSTANCE = plb2opb_bridge_0
- PARAMETER HW_VER = 1.01.a
- PARAMETER C_RNG0_BASEADDR = 0x20000000
- PARAMETER C_RNG0_HIGHADDR = 0x3FFFFFFF
- PARAMETER C_RNG1_BASEADDR = 0x60000000
- PARAMETER C_RNG1_HIGHADDR = 0x7FFFFFFF
- PARAMETER C_RNG2_BASEADDR = 0x80000000
- PARAMETER C_RNG2_HIGHADDR = 0xBFFFFFFF
- PARAMETER C_RNG3_BASEADDR = 0xC0000000
- PARAMETER C_RNG3_HIGHADDR = 0xDFFFFFFF
- BUS_INTERFACE SPLB = plb_v34_0
- BUS_INTERFACE MOPB = opb_v20_0
- END
-
- Gives this device tree (some properties removed for clarity):
-
- plb@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,plb-v34-1.02.a";
- device_type = "ibm,plb";
- ranges; // 1:1 translation
-
- plb_bram_if_cntrl_0: bram@ffff0000 {
- reg = <ffff0000 10000>;
- }
-
- opb@20000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <20000000 20000000 20000000
- 60000000 60000000 20000000
- 80000000 80000000 40000000
- c0000000 c0000000 20000000>;
-
- opb_uart16550_0: serial@a0000000 {
- reg = <a00000000 2000>;
- };
-
- opb_intc_0: interrupt-controller@d1000fc0 {
- reg = <d1000fc0 20>;
- };
- };
- };
-
That covers the general approach to binding xilinx IP cores into the
device tree. The following are bindings for specific devices:
diff --git a/Documentation/devicetree/changesets.rst b/Documentation/devicetree/changesets.rst
new file mode 100644
index 000000000000..c7fd8cd6a270
--- /dev/null
+++ b/Documentation/devicetree/changesets.rst
@@ -0,0 +1,37 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+DT Changesets
+=============
+
+A DT changeset is a method which allows one to apply changes
+in the live tree in such a way that either the full set of changes
+will be applied, or none of them will be. If an error occurs partway
+through applying the changeset, then the tree will be rolled back to the
+previous state. A changeset can also be removed after it has been
+applied.
+
+When a changeset is applied, all of the changes get applied to the tree
+at once before emitting OF_RECONFIG notifiers. This is so that the
+receiver sees a complete and consistent state of the tree when it
+receives the notifier.
+
+The sequence of a changeset is as follows.
+
+1. of_changeset_init() - initializes a changeset
+
+2. A number of DT tree change calls, of_changeset_attach_node(),
+ of_changeset_detach_node(), of_changeset_add_property(),
+ of_changeset_remove_property, of_changeset_update_property() to prepare
+ a set of changes. No changes to the active tree are made at this point.
+ All the change operations are recorded in the of_changeset 'entries'
+ list.
+
+3. of_changeset_apply() - Apply the changes to the tree. Either the
+ entire changeset will get applied, or if there is an error the tree will
+ be restored to the previous state. The core ensures proper serialization
+ through locking. An unlocked version __of_changeset_apply is available,
+ if needed.
+
+If a successfully applied changeset needs to be removed, it can be done
+with of_changeset_revert().
diff --git a/Documentation/devicetree/changesets.txt b/Documentation/devicetree/changesets.txt
deleted file mode 100644
index cb488eeb6353..000000000000
--- a/Documentation/devicetree/changesets.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-A DT changeset is a method which allows one to apply changes
-in the live tree in such a way that either the full set of changes
-will be applied, or none of them will be. If an error occurs partway
-through applying the changeset, then the tree will be rolled back to the
-previous state. A changeset can also be removed after it has been
-applied.
-
-When a changeset is applied, all of the changes get applied to the tree
-at once before emitting OF_RECONFIG notifiers. This is so that the
-receiver sees a complete and consistent state of the tree when it
-receives the notifier.
-
-The sequence of a changeset is as follows.
-
-1. of_changeset_init() - initializes a changeset
-
-2. A number of DT tree change calls, of_changeset_attach_node(),
-of_changeset_detach_node(), of_changeset_add_property(),
-of_changeset_remove_property, of_changeset_update_property() to prepare
-a set of changes. No changes to the active tree are made at this point.
-All the change operations are recorded in the of_changeset 'entries'
-list.
-
-3. of_changeset_apply() - Apply the changes to the tree. Either the
-entire changeset will get applied, or if there is an error the tree will
-be restored to the previous state. The core ensures proper serialization
-through locking. An unlocked version __of_changeset_apply is available,
-if needed.
-
-If a successfully applied changeset needs to be removed, it can be done
-with of_changeset_revert().
diff --git a/Documentation/devicetree/dynamic-resolution-notes.rst b/Documentation/devicetree/dynamic-resolution-notes.rst
new file mode 100644
index 000000000000..570b7e1f39eb
--- /dev/null
+++ b/Documentation/devicetree/dynamic-resolution-notes.rst
@@ -0,0 +1,27 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
+Device Tree Dynamic Resolver Notes
+==================================
+
+This document describes the implementation of the in-kernel
+Device Tree resolver, residing in drivers/of/resolver.c
+
+How the resolver works
+----------------------
+
+The resolver is given as an input an arbitrary tree compiled with the
+proper dtc option and having a /plugin/ tag. This generates the
+appropriate __fixups__ & __local_fixups__ nodes.
+
+In sequence the resolver works by the following steps:
+
+1. Get the maximum device tree phandle value from the live tree + 1.
+2. Adjust all the local phandles of the tree to resolve by that amount.
+3. Using the __local__fixups__ node information adjust all local references
+ by the same amount.
+4. For each property in the __fixups__ node locate the node it references
+ in the live tree. This is the label used to tag the node.
+5. Retrieve the phandle of the target of the fixup.
+6. For each fixup in the property locate the node:property:offset location
+ and replace it with the phandle value.
diff --git a/Documentation/devicetree/dynamic-resolution-notes.txt b/Documentation/devicetree/dynamic-resolution-notes.txt
deleted file mode 100644
index c24ec366c5dc..000000000000
--- a/Documentation/devicetree/dynamic-resolution-notes.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Device Tree Dynamic Resolver Notes
-----------------------------------
-
-This document describes the implementation of the in-kernel
-Device Tree resolver, residing in drivers/of/resolver.c
-
-How the resolver works
-----------------------
-
-The resolver is given as an input an arbitrary tree compiled with the
-proper dtc option and having a /plugin/ tag. This generates the
-appropriate __fixups__ & __local_fixups__ nodes.
-
-In sequence the resolver works by the following steps:
-
-1. Get the maximum device tree phandle value from the live tree + 1.
-2. Adjust all the local phandles of the tree to resolve by that amount.
-3. Using the __local__fixups__ node information adjust all local references
- by the same amount.
-4. For each property in the __fixups__ node locate the node it references
- in the live tree. This is the label used to tag the node.
-5. Retrieve the phandle of the target of the fixup.
-6. For each fixup in the property locate the node:property:offset location
- and replace it with the phandle value.
diff --git a/Documentation/devicetree/index.rst b/Documentation/devicetree/index.rst
new file mode 100644
index 000000000000..54026763916d
--- /dev/null
+++ b/Documentation/devicetree/index.rst
@@ -0,0 +1,17 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================
+Open Firmware and Device Tree
+=============================
+
+.. toctree::
+ :maxdepth: 1
+
+ usage-model
+ writing-schema
+ changesets
+ dynamic-resolution-notes
+ of_unittest
+ overlay-notes
+
+ bindings/index
diff --git a/Documentation/devicetree/of_unittest.rst b/Documentation/devicetree/of_unittest.rst
new file mode 100644
index 000000000000..dea05214f3ad
--- /dev/null
+++ b/Documentation/devicetree/of_unittest.rst
@@ -0,0 +1,205 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
+Open Firmware Device Tree Unittest
+==================================
+
+Author: Gaurav Minocha <gaurav.minocha.os@gmail.com>
+
+1. Introduction
+===============
+
+This document explains how the test data required for executing OF unittest
+is attached to the live tree dynamically, independent of the machine's
+architecture.
+
+It is recommended to read the following documents before moving ahead.
+
+(1) Documentation/devicetree/usage-model.rst
+(2) http://www.devicetree.org/Device_Tree_Usage
+
+OF Selftest has been designed to test the interface (include/linux/of.h)
+provided to device driver developers to fetch the device information..etc.
+from the unflattened device tree data structure. This interface is used by
+most of the device drivers in various use cases.
+
+
+2. Test-data
+============
+
+The Device Tree Source file (drivers/of/unittest-data/testcases.dts) contains
+the test data required for executing the unit tests automated in
+drivers/of/unittest.c. Currently, following Device Tree Source Include files
+(.dtsi) are included in testcases.dts::
+
+ drivers/of/unittest-data/tests-interrupts.dtsi
+ drivers/of/unittest-data/tests-platform.dtsi
+ drivers/of/unittest-data/tests-phandle.dtsi
+ drivers/of/unittest-data/tests-match.dtsi
+
+When the kernel is build with OF_SELFTEST enabled, then the following make
+rule::
+
+ $(obj)/%.dtb: $(src)/%.dts FORCE
+ $(call if_changed_dep, dtc)
+
+is used to compile the DT source file (testcases.dts) into a binary blob
+(testcases.dtb), also referred as flattened DT.
+
+After that, using the following rule the binary blob above is wrapped as an
+assembly file (testcases.dtb.S)::
+
+ $(obj)/%.dtb.S: $(obj)/%.dtb
+ $(call cmd, dt_S_dtb)
+
+The assembly file is compiled into an object file (testcases.dtb.o), and is
+linked into the kernel image.
+
+
+2.1. Adding the test data
+-------------------------
+
+Un-flattened device tree structure:
+
+Un-flattened device tree consists of connected device_node(s) in form of a tree
+structure described below::
+
+ // following struct members are used to construct the tree
+ struct device_node {
+ ...
+ struct device_node *parent;
+ struct device_node *child;
+ struct device_node *sibling;
+ ...
+ };
+
+Figure 1, describes a generic structure of machine's un-flattened device tree
+considering only child and sibling pointers. There exists another pointer,
+``*parent``, that is used to traverse the tree in the reverse direction. So, at
+a particular level the child node and all the sibling nodes will have a parent
+pointer pointing to a common node (e.g. child1, sibling2, sibling3, sibling4's
+parent points to root node)::
+
+ root ('/')
+ |
+ child1 -> sibling2 -> sibling3 -> sibling4 -> null
+ | | | |
+ | | | null
+ | | |
+ | | child31 -> sibling32 -> null
+ | | | |
+ | | null null
+ | |
+ | child21 -> sibling22 -> sibling23 -> null
+ | | | |
+ | null null null
+ |
+ child11 -> sibling12 -> sibling13 -> sibling14 -> null
+ | | | |
+ | | | null
+ | | |
+ null null child131 -> null
+ |
+ null
+
+Figure 1: Generic structure of un-flattened device tree
+
+
+Before executing OF unittest, it is required to attach the test data to
+machine's device tree (if present). So, when selftest_data_add() is called,
+at first it reads the flattened device tree data linked into the kernel image
+via the following kernel symbols::
+
+ __dtb_testcases_begin - address marking the start of test data blob
+ __dtb_testcases_end - address marking the end of test data blob
+
+Secondly, it calls of_fdt_unflatten_tree() to unflatten the flattened
+blob. And finally, if the machine's device tree (i.e live tree) is present,
+then it attaches the unflattened test data tree to the live tree, else it
+attaches itself as a live device tree.
+
+attach_node_and_children() uses of_attach_node() to attach the nodes into the
+live tree as explained below. To explain the same, the test data tree described
+in Figure 2 is attached to the live tree described in Figure 1::
+
+ root ('/')
+ |
+ testcase-data
+ |
+ test-child0 -> test-sibling1 -> test-sibling2 -> test-sibling3 -> null
+ | | | |
+ test-child01 null null null
+
+
+Figure 2: Example test data tree to be attached to live tree.
+
+According to the scenario above, the live tree is already present so it isn't
+required to attach the root('/') node. All other nodes are attached by calling
+of_attach_node() on each node.
+
+In the function of_attach_node(), the new node is attached as the child of the
+given parent in live tree. But, if parent already has a child then the new node
+replaces the current child and turns it into its sibling. So, when the testcase
+data node is attached to the live tree above (Figure 1), the final structure is
+as shown in Figure 3::
+
+ root ('/')
+ |
+ testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null
+ | | | | |
+ (...) | | | null
+ | | child31 -> sibling32 -> null
+ | | | |
+ | | null null
+ | |
+ | child21 -> sibling22 -> sibling23 -> null
+ | | | |
+ | null null null
+ |
+ child11 -> sibling12 -> sibling13 -> sibling14 -> null
+ | | | |
+ null null | null
+ |
+ child131 -> null
+ |
+ null
+ -----------------------------------------------------------------------
+
+ root ('/')
+ |
+ testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null
+ | | | | |
+ | (...) (...) (...) null
+ |
+ test-sibling3 -> test-sibling2 -> test-sibling1 -> test-child0 -> null
+ | | | |
+ null null null test-child01
+
+
+Figure 3: Live device tree structure after attaching the testcase-data.
+
+
+Astute readers would have noticed that test-child0 node becomes the last
+sibling compared to the earlier structure (Figure 2). After attaching first
+test-child0 the test-sibling1 is attached that pushes the child node
+(i.e. test-child0) to become a sibling and makes itself a child node,
+as mentioned above.
+
+If a duplicate node is found (i.e. if a node with same full_name property is
+already present in the live tree), then the node isn't attached rather its
+properties are updated to the live tree's node by calling the function
+update_node_properties().
+
+
+2.2. Removing the test data
+---------------------------
+
+Once the test case execution is complete, selftest_data_remove is called in
+order to remove the device nodes attached initially (first the leaf nodes are
+detached and then moving up the parent nodes are removed, and eventually the
+whole tree). selftest_data_remove() calls detach_node_and_children() that uses
+of_detach_node() to detach the nodes from the live device tree.
+
+To detach a node, of_detach_node() either updates the child pointer of given
+node's parent to its sibling or attaches the previous sibling to the given
+node's sibling, as appropriate. That is it :)
diff --git a/Documentation/devicetree/of_unittest.txt b/Documentation/devicetree/of_unittest.txt
deleted file mode 100644
index 3e4e7d48ae93..000000000000
--- a/Documentation/devicetree/of_unittest.txt
+++ /dev/null
@@ -1,197 +0,0 @@
-Open Firmware Device Tree Unittest
-----------------------------------
-
-Author: Gaurav Minocha <gaurav.minocha.os@gmail.com>
-
-1. Introduction
-
-This document explains how the test data required for executing OF unittest
-is attached to the live tree dynamically, independent of the machine's
-architecture.
-
-It is recommended to read the following documents before moving ahead.
-
-[1] Documentation/devicetree/usage-model.txt
-[2] http://www.devicetree.org/Device_Tree_Usage
-
-OF Selftest has been designed to test the interface (include/linux/of.h)
-provided to device driver developers to fetch the device information..etc.
-from the unflattened device tree data structure. This interface is used by
-most of the device drivers in various use cases.
-
-
-2. Test-data
-
-The Device Tree Source file (drivers/of/unittest-data/testcases.dts) contains
-the test data required for executing the unit tests automated in
-drivers/of/unittest.c. Currently, following Device Tree Source Include files
-(.dtsi) are included in testcases.dts:
-
-drivers/of/unittest-data/tests-interrupts.dtsi
-drivers/of/unittest-data/tests-platform.dtsi
-drivers/of/unittest-data/tests-phandle.dtsi
-drivers/of/unittest-data/tests-match.dtsi
-
-When the kernel is build with OF_SELFTEST enabled, then the following make rule
-
-$(obj)/%.dtb: $(src)/%.dts FORCE
- $(call if_changed_dep, dtc)
-
-is used to compile the DT source file (testcases.dts) into a binary blob
-(testcases.dtb), also referred as flattened DT.
-
-After that, using the following rule the binary blob above is wrapped as an
-assembly file (testcases.dtb.S).
-
-$(obj)/%.dtb.S: $(obj)/%.dtb
- $(call cmd, dt_S_dtb)
-
-The assembly file is compiled into an object file (testcases.dtb.o), and is
-linked into the kernel image.
-
-
-2.1. Adding the test data
-
-Un-flattened device tree structure:
-
-Un-flattened device tree consists of connected device_node(s) in form of a tree
-structure described below.
-
-// following struct members are used to construct the tree
-struct device_node {
- ...
- struct device_node *parent;
- struct device_node *child;
- struct device_node *sibling;
- ...
- };
-
-Figure 1, describes a generic structure of machine's un-flattened device tree
-considering only child and sibling pointers. There exists another pointer,
-*parent, that is used to traverse the tree in the reverse direction. So, at
-a particular level the child node and all the sibling nodes will have a parent
-pointer pointing to a common node (e.g. child1, sibling2, sibling3, sibling4's
-parent points to root node)
-
-root ('/')
- |
-child1 -> sibling2 -> sibling3 -> sibling4 -> null
- | | | |
- | | | null
- | | |
- | | child31 -> sibling32 -> null
- | | | |
- | | null null
- | |
- | child21 -> sibling22 -> sibling23 -> null
- | | | |
- | null null null
- |
-child11 -> sibling12 -> sibling13 -> sibling14 -> null
- | | | |
- | | | null
- | | |
- null null child131 -> null
- |
- null
-
-Figure 1: Generic structure of un-flattened device tree
-
-
-Before executing OF unittest, it is required to attach the test data to
-machine's device tree (if present). So, when selftest_data_add() is called,
-at first it reads the flattened device tree data linked into the kernel image
-via the following kernel symbols:
-
-__dtb_testcases_begin - address marking the start of test data blob
-__dtb_testcases_end - address marking the end of test data blob
-
-Secondly, it calls of_fdt_unflatten_tree() to unflatten the flattened
-blob. And finally, if the machine's device tree (i.e live tree) is present,
-then it attaches the unflattened test data tree to the live tree, else it
-attaches itself as a live device tree.
-
-attach_node_and_children() uses of_attach_node() to attach the nodes into the
-live tree as explained below. To explain the same, the test data tree described
- in Figure 2 is attached to the live tree described in Figure 1.
-
-root ('/')
- |
- testcase-data
- |
- test-child0 -> test-sibling1 -> test-sibling2 -> test-sibling3 -> null
- | | | |
- test-child01 null null null
-
-
-Figure 2: Example test data tree to be attached to live tree.
-
-According to the scenario above, the live tree is already present so it isn't
-required to attach the root('/') node. All other nodes are attached by calling
-of_attach_node() on each node.
-
-In the function of_attach_node(), the new node is attached as the child of the
-given parent in live tree. But, if parent already has a child then the new node
-replaces the current child and turns it into its sibling. So, when the testcase
-data node is attached to the live tree above (Figure 1), the final structure is
- as shown in Figure 3.
-
-root ('/')
- |
-testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null
- | | | | |
- (...) | | | null
- | | child31 -> sibling32 -> null
- | | | |
- | | null null
- | |
- | child21 -> sibling22 -> sibling23 -> null
- | | | |
- | null null null
- |
- child11 -> sibling12 -> sibling13 -> sibling14 -> null
- | | | |
- null null | null
- |
- child131 -> null
- |
- null
------------------------------------------------------------------------
-
-root ('/')
- |
-testcase-data -> child1 -> sibling2 -> sibling3 -> sibling4 -> null
- | | | | |
- | (...) (...) (...) null
- |
-test-sibling3 -> test-sibling2 -> test-sibling1 -> test-child0 -> null
- | | | |
- null null null test-child01
-
-
-Figure 3: Live device tree structure after attaching the testcase-data.
-
-
-Astute readers would have noticed that test-child0 node becomes the last
-sibling compared to the earlier structure (Figure 2). After attaching first
-test-child0 the test-sibling1 is attached that pushes the child node
-(i.e. test-child0) to become a sibling and makes itself a child node,
- as mentioned above.
-
-If a duplicate node is found (i.e. if a node with same full_name property is
-already present in the live tree), then the node isn't attached rather its
-properties are updated to the live tree's node by calling the function
-update_node_properties().
-
-
-2.2. Removing the test data
-
-Once the test case execution is complete, selftest_data_remove is called in
-order to remove the device nodes attached initially (first the leaf nodes are
-detached and then moving up the parent nodes are removed, and eventually the
-whole tree). selftest_data_remove() calls detach_node_and_children() that uses
-of_detach_node() to detach the nodes from the live device tree.
-
-To detach a node, of_detach_node() either updates the child pointer of given
-node's parent to its sibling or attaches the previous sibling to the given
-node's sibling, as appropriate. That is it :)
diff --git a/Documentation/devicetree/overlay-notes.rst b/Documentation/devicetree/overlay-notes.rst
new file mode 100644
index 000000000000..c67cc676bbd2
--- /dev/null
+++ b/Documentation/devicetree/overlay-notes.rst
@@ -0,0 +1,128 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================
+Device Tree Overlay Notes
+=========================
+
+This document describes the implementation of the in-kernel
+device tree overlay functionality residing in drivers/of/overlay.c and is a
+companion document to Documentation/devicetree/dynamic-resolution-notes.rst[1]
+
+How overlays work
+-----------------
+
+A Device Tree's overlay purpose is to modify the kernel's live tree, and
+have the modification affecting the state of the kernel in a way that
+is reflecting the changes.
+Since the kernel mainly deals with devices, any new device node that result
+in an active device should have it created while if the device node is either
+disabled or removed all together, the affected device should be deregistered.
+
+Lets take an example where we have a foo board with the following base tree::
+
+ ---- foo.dts ---------------------------------------------------------------
+ /* FOO platform */
+ /dts-v1/;
+ / {
+ compatible = "corp,foo";
+
+ /* shared resources */
+ res: res {
+ };
+
+ /* On chip peripherals */
+ ocp: ocp {
+ /* peripherals that are always instantiated */
+ peripheral1 { ... };
+ };
+ };
+ ---- foo.dts ---------------------------------------------------------------
+
+The overlay bar.dts,
+::
+
+ ---- bar.dts - overlay target location by label ----------------------------
+ /dts-v1/;
+ /plugin/;
+ &ocp {
+ /* bar peripheral */
+ bar {
+ compatible = "corp,bar";
+ ... /* various properties and child nodes */
+ };
+ };
+ ---- bar.dts ---------------------------------------------------------------
+
+when loaded (and resolved as described in [1]) should result in foo+bar.dts::
+
+ ---- foo+bar.dts -----------------------------------------------------------
+ /* FOO platform + bar peripheral */
+ / {
+ compatible = "corp,foo";
+
+ /* shared resources */
+ res: res {
+ };
+
+ /* On chip peripherals */
+ ocp: ocp {
+ /* peripherals that are always instantiated */
+ peripheral1 { ... };
+
+ /* bar peripheral */
+ bar {
+ compatible = "corp,bar";
+ ... /* various properties and child nodes */
+ };
+ };
+ };
+ ---- foo+bar.dts -----------------------------------------------------------
+
+As a result of the overlay, a new device node (bar) has been created
+so a bar platform device will be registered and if a matching device driver
+is loaded the device will be created as expected.
+
+If the base DT was not compiled with the -@ option then the "&ocp" label
+will not be available to resolve the overlay node(s) to the proper location
+in the base DT. In this case, the target path can be provided. The target
+location by label syntax is preferred because the overlay can be applied to
+any base DT containing the label, no matter where the label occurs in the DT.
+
+The above bar.dts example modified to use target path syntax is::
+
+ ---- bar.dts - overlay target location by explicit path --------------------
+ /dts-v1/;
+ /plugin/;
+ &{/ocp} {
+ /* bar peripheral */
+ bar {
+ compatible = "corp,bar";
+ ... /* various properties and child nodes */
+ }
+ };
+ ---- bar.dts ---------------------------------------------------------------
+
+
+Overlay in-kernel API
+--------------------------------
+
+The API is quite easy to use.
+
+1) Call of_overlay_fdt_apply() to create and apply an overlay changeset. The
+ return value is an error or a cookie identifying this overlay.
+
+2) Call of_overlay_remove() to remove and cleanup the overlay changeset
+ previously created via the call to of_overlay_fdt_apply(). Removal of an
+ overlay changeset that is stacked by another will not be permitted.
+
+Finally, if you need to remove all overlays in one-go, just call
+of_overlay_remove_all() which will remove every single one in the correct
+order.
+
+In addition, there is the option to register notifiers that get called on
+overlay operations. See of_overlay_notifier_register/unregister and
+enum of_overlay_notify_action for details.
+
+Note that a notifier callback is not supposed to store pointers to a device
+tree node or its content beyond OF_OVERLAY_POST_REMOVE corresponding to the
+respective node it received.
diff --git a/Documentation/devicetree/overlay-notes.txt b/Documentation/devicetree/overlay-notes.txt
deleted file mode 100644
index 725fb8d255c1..000000000000
--- a/Documentation/devicetree/overlay-notes.txt
+++ /dev/null
@@ -1,139 +0,0 @@
-Device Tree Overlay Notes
--------------------------
-
-This document describes the implementation of the in-kernel
-device tree overlay functionality residing in drivers/of/overlay.c and is a
-companion document to Documentation/devicetree/dynamic-resolution-notes.txt[1]
-
-How overlays work
------------------
-
-A Device Tree's overlay purpose is to modify the kernel's live tree, and
-have the modification affecting the state of the kernel in a way that
-is reflecting the changes.
-Since the kernel mainly deals with devices, any new device node that result
-in an active device should have it created while if the device node is either
-disabled or removed all together, the affected device should be deregistered.
-
-Lets take an example where we have a foo board with the following base tree:
-
----- foo.dts -----------------------------------------------------------------
- /* FOO platform */
- / {
- compatible = "corp,foo";
-
- /* shared resources */
- res: res {
- };
-
- /* On chip peripherals */
- ocp: ocp {
- /* peripherals that are always instantiated */
- peripheral1 { ... };
- }
- };
----- foo.dts -----------------------------------------------------------------
-
-The overlay bar.dts, when loaded (and resolved as described in [1]) should
-
----- bar.dts -----------------------------------------------------------------
-/plugin/; /* allow undefined label references and record them */
-/ {
- .... /* various properties for loader use; i.e. part id etc. */
- fragment@0 {
- target = <&ocp>;
- __overlay__ {
- /* bar peripheral */
- bar {
- compatible = "corp,bar";
- ... /* various properties and child nodes */
- }
- };
- };
-};
----- bar.dts -----------------------------------------------------------------
-
-result in foo+bar.dts
-
----- foo+bar.dts -------------------------------------------------------------
- /* FOO platform + bar peripheral */
- / {
- compatible = "corp,foo";
-
- /* shared resources */
- res: res {
- };
-
- /* On chip peripherals */
- ocp: ocp {
- /* peripherals that are always instantiated */
- peripheral1 { ... };
-
- /* bar peripheral */
- bar {
- compatible = "corp,bar";
- ... /* various properties and child nodes */
- }
- }
- };
----- foo+bar.dts -------------------------------------------------------------
-
-As a result of the overlay, a new device node (bar) has been created
-so a bar platform device will be registered and if a matching device driver
-is loaded the device will be created as expected.
-
-Overlay in-kernel API
---------------------------------
-
-The API is quite easy to use.
-
-1. Call of_overlay_fdt_apply() to create and apply an overlay changeset. The
-return value is an error or a cookie identifying this overlay.
-
-2. Call of_overlay_remove() to remove and cleanup the overlay changeset
-previously created via the call to of_overlay_fdt_apply(). Removal of an
-overlay changeset that is stacked by another will not be permitted.
-
-Finally, if you need to remove all overlays in one-go, just call
-of_overlay_remove_all() which will remove every single one in the correct
-order.
-
-In addition, there is the option to register notifiers that get called on
-overlay operations. See of_overlay_notifier_register/unregister and
-enum of_overlay_notify_action for details.
-
-Note that a notifier callback is not supposed to store pointers to a device
-tree node or its content beyond OF_OVERLAY_POST_REMOVE corresponding to the
-respective node it received.
-
-Overlay DTS Format
-------------------
-
-The DTS of an overlay should have the following format:
-
-{
- /* ignored properties by the overlay */
-
- fragment@0 { /* first child node */
-
- target=<phandle>; /* phandle target of the overlay */
- or
- target-path="/path"; /* target path of the overlay */
-
- __overlay__ {
- property-a; /* add property-a to the target */
- node-a { /* add to an existing, or create a node-a */
- ...
- };
- };
- }
- fragment@1 { /* second child node */
- ...
- };
- /* more fragments follow */
-}
-
-Using the non-phandle based target method allows one to use a base DT which does
-not contain a __symbols__ node, i.e. it was not compiled with the -@ option.
-The __symbols__ node is only required for the target=<phandle> method, since it
-contains the information required to map from a phandle to a tree location.
diff --git a/Documentation/devicetree/usage-model.rst b/Documentation/devicetree/usage-model.rst
new file mode 100644
index 000000000000..e1b42dc63f01
--- /dev/null
+++ b/Documentation/devicetree/usage-model.rst
@@ -0,0 +1,420 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================
+Linux and the Device Tree
+=========================
+
+The Linux usage model for device tree data
+
+:Author: Grant Likely <grant.likely@secretlab.ca>
+
+This article describes how Linux uses the device tree. An overview of
+the device tree data format can be found on the device tree usage page
+at devicetree.org\ [1]_.
+
+.. [1] https://elinux.org/Device_Tree_Usage
+
+The "Open Firmware Device Tree", or simply Device Tree (DT), is a data
+structure and language for describing hardware. More specifically, it
+is a description of hardware that is readable by an operating system
+so that the operating system doesn't need to hard code details of the
+machine.
+
+Structurally, the DT is a tree, or acyclic graph with named nodes, and
+nodes may have an arbitrary number of named properties encapsulating
+arbitrary data. A mechanism also exists to create arbitrary
+links from one node to another outside of the natural tree structure.
+
+Conceptually, a common set of usage conventions, called 'bindings',
+is defined for how data should appear in the tree to describe typical
+hardware characteristics including data busses, interrupt lines, GPIO
+connections, and peripheral devices.
+
+As much as possible, hardware is described using existing bindings to
+maximize use of existing support code, but since property and node
+names are simply text strings, it is easy to extend existing bindings
+or create new ones by defining new nodes and properties. Be wary,
+however, of creating a new binding without first doing some homework
+about what already exists. There are currently two different,
+incompatible, bindings for i2c busses that came about because the new
+binding was created without first investigating how i2c devices were
+already being enumerated in existing systems.
+
+1. History
+----------
+The DT was originally created by Open Firmware as part of the
+communication method for passing data from Open Firmware to a client
+program (like to an operating system). An operating system used the
+Device Tree to discover the topology of the hardware at runtime, and
+thereby support a majority of available hardware without hard coded
+information (assuming drivers were available for all devices).
+
+Since Open Firmware is commonly used on PowerPC and SPARC platforms,
+the Linux support for those architectures has for a long time used the
+Device Tree.
+
+In 2005, when PowerPC Linux began a major cleanup and to merge 32-bit
+and 64-bit support, the decision was made to require DT support on all
+powerpc platforms, regardless of whether or not they used Open
+Firmware. To do this, a DT representation called the Flattened Device
+Tree (FDT) was created which could be passed to the kernel as a binary
+blob without requiring a real Open Firmware implementation. U-Boot,
+kexec, and other bootloaders were modified to support both passing a
+Device Tree Binary (dtb) and to modify a dtb at boot time. DT was
+also added to the PowerPC boot wrapper (``arch/powerpc/boot/*``) so that
+a dtb could be wrapped up with the kernel image to support booting
+existing non-DT aware firmware.
+
+Some time later, FDT infrastructure was generalized to be usable by
+all architectures. At the time of this writing, 6 mainlined
+architectures (arm, microblaze, mips, powerpc, sparc, and x86) and 1
+out of mainline (nios) have some level of DT support.
+
+2. Data Model
+-------------
+If you haven't already read the Device Tree Usage\ [1]_ page,
+then go read it now. It's okay, I'll wait....
+
+2.1 High Level View
+-------------------
+The most important thing to understand is that the DT is simply a data
+structure that describes the hardware. There is nothing magical about
+it, and it doesn't magically make all hardware configuration problems
+go away. What it does do is provide a language for decoupling the
+hardware configuration from the board and device driver support in the
+Linux kernel (or any other operating system for that matter). Using
+it allows board and device support to become data driven; to make
+setup decisions based on data passed into the kernel instead of on
+per-machine hard coded selections.
+
+Ideally, data driven platform setup should result in less code
+duplication and make it easier to support a wide range of hardware
+with a single kernel image.
+
+Linux uses DT data for three major purposes:
+
+1) platform identification,
+2) runtime configuration, and
+3) device population.
+
+2.2 Platform Identification
+---------------------------
+First and foremost, the kernel will use data in the DT to identify the
+specific machine. In a perfect world, the specific platform shouldn't
+matter to the kernel because all platform details would be described
+perfectly by the device tree in a consistent and reliable manner.
+Hardware is not perfect though, and so the kernel must identify the
+machine during early boot so that it has the opportunity to run
+machine-specific fixups.
+
+In the majority of cases, the machine identity is irrelevant, and the
+kernel will instead select setup code based on the machine's core
+CPU or SoC. On ARM for example, setup_arch() in
+arch/arm/kernel/setup.c will call setup_machine_fdt() in
+arch/arm/kernel/devtree.c which searches through the machine_desc
+table and selects the machine_desc which best matches the device tree
+data. It determines the best match by looking at the 'compatible'
+property in the root device tree node, and comparing it with the
+dt_compat list in struct machine_desc (which is defined in
+arch/arm/include/asm/mach/arch.h if you're curious).
+
+The 'compatible' property contains a sorted list of strings starting
+with the exact name of the machine, followed by an optional list of
+boards it is compatible with sorted from most compatible to least. For
+example, the root compatible properties for the TI BeagleBoard and its
+successor, the BeagleBoard xM board might look like, respectively::
+
+ compatible = "ti,omap3-beagleboard", "ti,omap3450", "ti,omap3";
+ compatible = "ti,omap3-beagleboard-xm", "ti,omap3450", "ti,omap3";
+
+Where "ti,omap3-beagleboard-xm" specifies the exact model, it also
+claims that it compatible with the OMAP 3450 SoC, and the omap3 family
+of SoCs in general. You'll notice that the list is sorted from most
+specific (exact board) to least specific (SoC family).
+
+Astute readers might point out that the Beagle xM could also claim
+compatibility with the original Beagle board. However, one should be
+cautioned about doing so at the board level since there is typically a
+high level of change from one board to another, even within the same
+product line, and it is hard to nail down exactly what is meant when one
+board claims to be compatible with another. For the top level, it is
+better to err on the side of caution and not claim one board is
+compatible with another. The notable exception would be when one
+board is a carrier for another, such as a CPU module attached to a
+carrier board.
+
+One more note on compatible values. Any string used in a compatible
+property must be documented as to what it indicates. Add
+documentation for compatible strings in Documentation/devicetree/bindings.
+
+Again on ARM, for each machine_desc, the kernel looks to see if
+any of the dt_compat list entries appear in the compatible property.
+If one does, then that machine_desc is a candidate for driving the
+machine. After searching the entire table of machine_descs,
+setup_machine_fdt() returns the 'most compatible' machine_desc based
+on which entry in the compatible property each machine_desc matches
+against. If no matching machine_desc is found, then it returns NULL.
+
+The reasoning behind this scheme is the observation that in the majority
+of cases, a single machine_desc can support a large number of boards
+if they all use the same SoC, or same family of SoCs. However,
+invariably there will be some exceptions where a specific board will
+require special setup code that is not useful in the generic case.
+Special cases could be handled by explicitly checking for the
+troublesome board(s) in generic setup code, but doing so very quickly
+becomes ugly and/or unmaintainable if it is more than just a couple of
+cases.
+
+Instead, the compatible list allows a generic machine_desc to provide
+support for a wide common set of boards by specifying "less
+compatible" values in the dt_compat list. In the example above,
+generic board support can claim compatibility with "ti,omap3" or
+"ti,omap3450". If a bug was discovered on the original beagleboard
+that required special workaround code during early boot, then a new
+machine_desc could be added which implements the workarounds and only
+matches on "ti,omap3-beagleboard".
+
+PowerPC uses a slightly different scheme where it calls the .probe()
+hook from each machine_desc, and the first one returning TRUE is used.
+However, this approach does not take into account the priority of the
+compatible list, and probably should be avoided for new architecture
+support.
+
+2.3 Runtime configuration
+-------------------------
+In most cases, a DT will be the sole method of communicating data from
+firmware to the kernel, so also gets used to pass in runtime and
+configuration data like the kernel parameters string and the location
+of an initrd image.
+
+Most of this data is contained in the /chosen node, and when booting
+Linux it will look something like this::
+
+ chosen {
+ bootargs = "console=ttyS0,115200 loglevel=8";
+ initrd-start = <0xc8000000>;
+ initrd-end = <0xc8200000>;
+ };
+
+The bootargs property contains the kernel arguments, and the initrd-*
+properties define the address and size of an initrd blob. Note that
+initrd-end is the first address after the initrd image, so this doesn't
+match the usual semantic of struct resource. The chosen node may also
+optionally contain an arbitrary number of additional properties for
+platform-specific configuration data.
+
+During early boot, the architecture setup code calls of_scan_flat_dt()
+several times with different helper callbacks to parse device tree
+data before paging is setup. The of_scan_flat_dt() code scans through
+the device tree and uses the helpers to extract information required
+during early boot. Typically the early_init_dt_scan_chosen() helper
+is used to parse the chosen node including kernel parameters,
+early_init_dt_scan_root() to initialize the DT address space model,
+and early_init_dt_scan_memory() to determine the size and
+location of usable RAM.
+
+On ARM, the function setup_machine_fdt() is responsible for early
+scanning of the device tree after selecting the correct machine_desc
+that supports the board.
+
+2.4 Device population
+---------------------
+After the board has been identified, and after the early configuration data
+has been parsed, then kernel initialization can proceed in the normal
+way. At some point in this process, unflatten_device_tree() is called
+to convert the data into a more efficient runtime representation.
+This is also when machine-specific setup hooks will get called, like
+the machine_desc .init_early(), .init_irq() and .init_machine() hooks
+on ARM. The remainder of this section uses examples from the ARM
+implementation, but all architectures will do pretty much the same
+thing when using a DT.
+
+As can be guessed by the names, .init_early() is used for any machine-
+specific setup that needs to be executed early in the boot process,
+and .init_irq() is used to set up interrupt handling. Using a DT
+doesn't materially change the behaviour of either of these functions.
+If a DT is provided, then both .init_early() and .init_irq() are able
+to call any of the DT query functions (of_* in include/linux/of*.h) to
+get additional data about the platform.
+
+The most interesting hook in the DT context is .init_machine() which
+is primarily responsible for populating the Linux device model with
+data about the platform. Historically this has been implemented on
+embedded platforms by defining a set of static clock structures,
+platform_devices, and other data in the board support .c file, and
+registering it en-masse in .init_machine(). When DT is used, then
+instead of hard coding static devices for each platform, the list of
+devices can be obtained by parsing the DT, and allocating device
+structures dynamically.
+
+The simplest case is when .init_machine() is only responsible for
+registering a block of platform_devices. A platform_device is a concept
+used by Linux for memory or I/O mapped devices which cannot be detected
+by hardware, and for 'composite' or 'virtual' devices (more on those
+later). While there is no 'platform device' terminology for the DT,
+platform devices roughly correspond to device nodes at the root of the
+tree and children of simple memory mapped bus nodes.
+
+About now is a good time to lay out an example. Here is part of the
+device tree for the NVIDIA Tegra board::
+
+ /{
+ compatible = "nvidia,harmony", "nvidia,tegra20";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+
+ chosen { };
+ aliases { };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>;
+ };
+
+ soc {
+ compatible = "nvidia,tegra20-soc", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ intc: interrupt-controller@50041000 {
+ compatible = "nvidia,tegra20-gic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x50041000 0x1000>, < 0x50040100 0x0100 >;
+ };
+
+ serial@70006300 {
+ compatible = "nvidia,tegra20-uart";
+ reg = <0x70006300 0x100>;
+ interrupts = <122>;
+ };
+
+ i2s1: i2s@70002800 {
+ compatible = "nvidia,tegra20-i2s";
+ reg = <0x70002800 0x100>;
+ interrupts = <77>;
+ codec = <&wm8903>;
+ };
+
+ i2c@7000c000 {
+ compatible = "nvidia,tegra20-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x7000c000 0x100>;
+ interrupts = <70>;
+
+ wm8903: codec@1a {
+ compatible = "wlf,wm8903";
+ reg = <0x1a>;
+ interrupts = <347>;
+ };
+ };
+ };
+
+ sound {
+ compatible = "nvidia,harmony-sound";
+ i2s-controller = <&i2s1>;
+ i2s-codec = <&wm8903>;
+ };
+ };
+
+At .init_machine() time, Tegra board support code will need to look at
+this DT and decide which nodes to create platform_devices for.
+However, looking at the tree, it is not immediately obvious what kind
+of device each node represents, or even if a node represents a device
+at all. The /chosen, /aliases, and /memory nodes are informational
+nodes that don't describe devices (although arguably memory could be
+considered a device). The children of the /soc node are memory mapped
+devices, but the codec@1a is an i2c device, and the sound node
+represents not a device, but rather how other devices are connected
+together to create the audio subsystem. I know what each device is
+because I'm familiar with the board design, but how does the kernel
+know what to do with each node?
+
+The trick is that the kernel starts at the root of the tree and looks
+for nodes that have a 'compatible' property. First, it is generally
+assumed that any node with a 'compatible' property represents a device
+of some kind, and second, it can be assumed that any node at the root
+of the tree is either directly attached to the processor bus, or is a
+miscellaneous system device that cannot be described any other way.
+For each of these nodes, Linux allocates and registers a
+platform_device, which in turn may get bound to a platform_driver.
+
+Why is using a platform_device for these nodes a safe assumption?
+Well, for the way that Linux models devices, just about all bus_types
+assume that its devices are children of a bus controller. For
+example, each i2c_client is a child of an i2c_master. Each spi_device
+is a child of an SPI bus. Similarly for USB, PCI, MDIO, etc. The
+same hierarchy is also found in the DT, where I2C device nodes only
+ever appear as children of an I2C bus node. Ditto for SPI, MDIO, USB,
+etc. The only devices which do not require a specific type of parent
+device are platform_devices (and amba_devices, but more on that
+later), which will happily live at the base of the Linux /sys/devices
+tree. Therefore, if a DT node is at the root of the tree, then it
+really probably is best registered as a platform_device.
+
+Linux board support code calls of_platform_populate(NULL, NULL, NULL, NULL)
+to kick off discovery of devices at the root of the tree. The
+parameters are all NULL because when starting from the root of the
+tree, there is no need to provide a starting node (the first NULL), a
+parent struct device (the last NULL), and we're not using a match
+table (yet). For a board that only needs to register devices,
+.init_machine() can be completely empty except for the
+of_platform_populate() call.
+
+In the Tegra example, this accounts for the /soc and /sound nodes, but
+what about the children of the SoC node? Shouldn't they be registered
+as platform devices too? For Linux DT support, the generic behaviour
+is for child devices to be registered by the parent's device driver at
+driver .probe() time. So, an i2c bus device driver will register a
+i2c_client for each child node, an SPI bus driver will register
+its spi_device children, and similarly for other bus_types.
+According to that model, a driver could be written that binds to the
+SoC node and simply registers platform_devices for each of its
+children. The board support code would allocate and register an SoC
+device, a (theoretical) SoC device driver could bind to the SoC device,
+and register platform_devices for /soc/interrupt-controller, /soc/serial,
+/soc/i2s, and /soc/i2c in its .probe() hook. Easy, right?
+
+Actually, it turns out that registering children of some
+platform_devices as more platform_devices is a common pattern, and the
+device tree support code reflects that and makes the above example
+simpler. The second argument to of_platform_populate() is an
+of_device_id table, and any node that matches an entry in that table
+will also get its child nodes registered. In the Tegra case, the code
+can look something like this::
+
+ static void __init harmony_init_machine(void)
+ {
+ /* ... */
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ }
+
+"simple-bus" is defined in the Devicetree Specification as a property
+meaning a simple memory mapped bus, so the of_platform_populate() code
+could be written to just assume simple-bus compatible nodes will
+always be traversed. However, we pass it in as an argument so that
+board support code can always override the default behaviour.
+
+[Need to add discussion of adding i2c/spi/etc child devices]
+
+Appendix A: AMBA devices
+------------------------
+
+ARM Primecells are a certain kind of device attached to the ARM AMBA
+bus which include some support for hardware detection and power
+management. In Linux, struct amba_device and the amba_bus_type is
+used to represent Primecell devices. However, the fiddly bit is that
+not all devices on an AMBA bus are Primecells, and for Linux it is
+typical for both amba_device and platform_device instances to be
+siblings of the same bus segment.
+
+When using the DT, this creates problems for of_platform_populate()
+because it must decide whether to register each node as either a
+platform_device or an amba_device. This unfortunately complicates the
+device creation model a little bit, but the solution turns out not to
+be too invasive. If a node is compatible with "arm,amba-primecell", then
+of_platform_populate() will register it as an amba_device instead of a
+platform_device.
diff --git a/Documentation/devicetree/usage-model.txt b/Documentation/devicetree/usage-model.txt
deleted file mode 100644
index 33a8aaac02a8..000000000000
--- a/Documentation/devicetree/usage-model.txt
+++ /dev/null
@@ -1,415 +0,0 @@
-Linux and the Device Tree
--------------------------
-The Linux usage model for device tree data
-
-Author: Grant Likely <grant.likely@secretlab.ca>
-
-This article describes how Linux uses the device tree. An overview of
-the device tree data format can be found on the device tree usage page
-at devicetree.org[1].
-
-[1] http://devicetree.org/Device_Tree_Usage
-
-The "Open Firmware Device Tree", or simply Device Tree (DT), is a data
-structure and language for describing hardware. More specifically, it
-is a description of hardware that is readable by an operating system
-so that the operating system doesn't need to hard code details of the
-machine.
-
-Structurally, the DT is a tree, or acyclic graph with named nodes, and
-nodes may have an arbitrary number of named properties encapsulating
-arbitrary data. A mechanism also exists to create arbitrary
-links from one node to another outside of the natural tree structure.
-
-Conceptually, a common set of usage conventions, called 'bindings',
-is defined for how data should appear in the tree to describe typical
-hardware characteristics including data busses, interrupt lines, GPIO
-connections, and peripheral devices.
-
-As much as possible, hardware is described using existing bindings to
-maximize use of existing support code, but since property and node
-names are simply text strings, it is easy to extend existing bindings
-or create new ones by defining new nodes and properties. Be wary,
-however, of creating a new binding without first doing some homework
-about what already exists. There are currently two different,
-incompatible, bindings for i2c busses that came about because the new
-binding was created without first investigating how i2c devices were
-already being enumerated in existing systems.
-
-1. History
-----------
-The DT was originally created by Open Firmware as part of the
-communication method for passing data from Open Firmware to a client
-program (like to an operating system). An operating system used the
-Device Tree to discover the topology of the hardware at runtime, and
-thereby support a majority of available hardware without hard coded
-information (assuming drivers were available for all devices).
-
-Since Open Firmware is commonly used on PowerPC and SPARC platforms,
-the Linux support for those architectures has for a long time used the
-Device Tree.
-
-In 2005, when PowerPC Linux began a major cleanup and to merge 32-bit
-and 64-bit support, the decision was made to require DT support on all
-powerpc platforms, regardless of whether or not they used Open
-Firmware. To do this, a DT representation called the Flattened Device
-Tree (FDT) was created which could be passed to the kernel as a binary
-blob without requiring a real Open Firmware implementation. U-Boot,
-kexec, and other bootloaders were modified to support both passing a
-Device Tree Binary (dtb) and to modify a dtb at boot time. DT was
-also added to the PowerPC boot wrapper (arch/powerpc/boot/*) so that
-a dtb could be wrapped up with the kernel image to support booting
-existing non-DT aware firmware.
-
-Some time later, FDT infrastructure was generalized to be usable by
-all architectures. At the time of this writing, 6 mainlined
-architectures (arm, microblaze, mips, powerpc, sparc, and x86) and 1
-out of mainline (nios) have some level of DT support.
-
-2. Data Model
--------------
-If you haven't already read the Device Tree Usage[1] page,
-then go read it now. It's okay, I'll wait....
-
-2.1 High Level View
--------------------
-The most important thing to understand is that the DT is simply a data
-structure that describes the hardware. There is nothing magical about
-it, and it doesn't magically make all hardware configuration problems
-go away. What it does do is provide a language for decoupling the
-hardware configuration from the board and device driver support in the
-Linux kernel (or any other operating system for that matter). Using
-it allows board and device support to become data driven; to make
-setup decisions based on data passed into the kernel instead of on
-per-machine hard coded selections.
-
-Ideally, data driven platform setup should result in less code
-duplication and make it easier to support a wide range of hardware
-with a single kernel image.
-
-Linux uses DT data for three major purposes:
-1) platform identification,
-2) runtime configuration, and
-3) device population.
-
-2.2 Platform Identification
----------------------------
-First and foremost, the kernel will use data in the DT to identify the
-specific machine. In a perfect world, the specific platform shouldn't
-matter to the kernel because all platform details would be described
-perfectly by the device tree in a consistent and reliable manner.
-Hardware is not perfect though, and so the kernel must identify the
-machine during early boot so that it has the opportunity to run
-machine-specific fixups.
-
-In the majority of cases, the machine identity is irrelevant, and the
-kernel will instead select setup code based on the machine's core
-CPU or SoC. On ARM for example, setup_arch() in
-arch/arm/kernel/setup.c will call setup_machine_fdt() in
-arch/arm/kernel/devtree.c which searches through the machine_desc
-table and selects the machine_desc which best matches the device tree
-data. It determines the best match by looking at the 'compatible'
-property in the root device tree node, and comparing it with the
-dt_compat list in struct machine_desc (which is defined in
-arch/arm/include/asm/mach/arch.h if you're curious).
-
-The 'compatible' property contains a sorted list of strings starting
-with the exact name of the machine, followed by an optional list of
-boards it is compatible with sorted from most compatible to least. For
-example, the root compatible properties for the TI BeagleBoard and its
-successor, the BeagleBoard xM board might look like, respectively:
-
- compatible = "ti,omap3-beagleboard", "ti,omap3450", "ti,omap3";
- compatible = "ti,omap3-beagleboard-xm", "ti,omap3450", "ti,omap3";
-
-Where "ti,omap3-beagleboard-xm" specifies the exact model, it also
-claims that it compatible with the OMAP 3450 SoC, and the omap3 family
-of SoCs in general. You'll notice that the list is sorted from most
-specific (exact board) to least specific (SoC family).
-
-Astute readers might point out that the Beagle xM could also claim
-compatibility with the original Beagle board. However, one should be
-cautioned about doing so at the board level since there is typically a
-high level of change from one board to another, even within the same
-product line, and it is hard to nail down exactly what is meant when one
-board claims to be compatible with another. For the top level, it is
-better to err on the side of caution and not claim one board is
-compatible with another. The notable exception would be when one
-board is a carrier for another, such as a CPU module attached to a
-carrier board.
-
-One more note on compatible values. Any string used in a compatible
-property must be documented as to what it indicates. Add
-documentation for compatible strings in Documentation/devicetree/bindings.
-
-Again on ARM, for each machine_desc, the kernel looks to see if
-any of the dt_compat list entries appear in the compatible property.
-If one does, then that machine_desc is a candidate for driving the
-machine. After searching the entire table of machine_descs,
-setup_machine_fdt() returns the 'most compatible' machine_desc based
-on which entry in the compatible property each machine_desc matches
-against. If no matching machine_desc is found, then it returns NULL.
-
-The reasoning behind this scheme is the observation that in the majority
-of cases, a single machine_desc can support a large number of boards
-if they all use the same SoC, or same family of SoCs. However,
-invariably there will be some exceptions where a specific board will
-require special setup code that is not useful in the generic case.
-Special cases could be handled by explicitly checking for the
-troublesome board(s) in generic setup code, but doing so very quickly
-becomes ugly and/or unmaintainable if it is more than just a couple of
-cases.
-
-Instead, the compatible list allows a generic machine_desc to provide
-support for a wide common set of boards by specifying "less
-compatible" values in the dt_compat list. In the example above,
-generic board support can claim compatibility with "ti,omap3" or
-"ti,omap3450". If a bug was discovered on the original beagleboard
-that required special workaround code during early boot, then a new
-machine_desc could be added which implements the workarounds and only
-matches on "ti,omap3-beagleboard".
-
-PowerPC uses a slightly different scheme where it calls the .probe()
-hook from each machine_desc, and the first one returning TRUE is used.
-However, this approach does not take into account the priority of the
-compatible list, and probably should be avoided for new architecture
-support.
-
-2.3 Runtime configuration
--------------------------
-In most cases, a DT will be the sole method of communicating data from
-firmware to the kernel, so also gets used to pass in runtime and
-configuration data like the kernel parameters string and the location
-of an initrd image.
-
-Most of this data is contained in the /chosen node, and when booting
-Linux it will look something like this:
-
- chosen {
- bootargs = "console=ttyS0,115200 loglevel=8";
- initrd-start = <0xc8000000>;
- initrd-end = <0xc8200000>;
- };
-
-The bootargs property contains the kernel arguments, and the initrd-*
-properties define the address and size of an initrd blob. Note that
-initrd-end is the first address after the initrd image, so this doesn't
-match the usual semantic of struct resource. The chosen node may also
-optionally contain an arbitrary number of additional properties for
-platform-specific configuration data.
-
-During early boot, the architecture setup code calls of_scan_flat_dt()
-several times with different helper callbacks to parse device tree
-data before paging is setup. The of_scan_flat_dt() code scans through
-the device tree and uses the helpers to extract information required
-during early boot. Typically the early_init_dt_scan_chosen() helper
-is used to parse the chosen node including kernel parameters,
-early_init_dt_scan_root() to initialize the DT address space model,
-and early_init_dt_scan_memory() to determine the size and
-location of usable RAM.
-
-On ARM, the function setup_machine_fdt() is responsible for early
-scanning of the device tree after selecting the correct machine_desc
-that supports the board.
-
-2.4 Device population
----------------------
-After the board has been identified, and after the early configuration data
-has been parsed, then kernel initialization can proceed in the normal
-way. At some point in this process, unflatten_device_tree() is called
-to convert the data into a more efficient runtime representation.
-This is also when machine-specific setup hooks will get called, like
-the machine_desc .init_early(), .init_irq() and .init_machine() hooks
-on ARM. The remainder of this section uses examples from the ARM
-implementation, but all architectures will do pretty much the same
-thing when using a DT.
-
-As can be guessed by the names, .init_early() is used for any machine-
-specific setup that needs to be executed early in the boot process,
-and .init_irq() is used to set up interrupt handling. Using a DT
-doesn't materially change the behaviour of either of these functions.
-If a DT is provided, then both .init_early() and .init_irq() are able
-to call any of the DT query functions (of_* in include/linux/of*.h) to
-get additional data about the platform.
-
-The most interesting hook in the DT context is .init_machine() which
-is primarily responsible for populating the Linux device model with
-data about the platform. Historically this has been implemented on
-embedded platforms by defining a set of static clock structures,
-platform_devices, and other data in the board support .c file, and
-registering it en-masse in .init_machine(). When DT is used, then
-instead of hard coding static devices for each platform, the list of
-devices can be obtained by parsing the DT, and allocating device
-structures dynamically.
-
-The simplest case is when .init_machine() is only responsible for
-registering a block of platform_devices. A platform_device is a concept
-used by Linux for memory or I/O mapped devices which cannot be detected
-by hardware, and for 'composite' or 'virtual' devices (more on those
-later). While there is no 'platform device' terminology for the DT,
-platform devices roughly correspond to device nodes at the root of the
-tree and children of simple memory mapped bus nodes.
-
-About now is a good time to lay out an example. Here is part of the
-device tree for the NVIDIA Tegra board.
-
-/{
- compatible = "nvidia,harmony", "nvidia,tegra20";
- #address-cells = <1>;
- #size-cells = <1>;
- interrupt-parent = <&intc>;
-
- chosen { };
- aliases { };
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x40000000>;
- };
-
- soc {
- compatible = "nvidia,tegra20-soc", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- intc: interrupt-controller@50041000 {
- compatible = "nvidia,tegra20-gic";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0x50041000 0x1000>, < 0x50040100 0x0100 >;
- };
-
- serial@70006300 {
- compatible = "nvidia,tegra20-uart";
- reg = <0x70006300 0x100>;
- interrupts = <122>;
- };
-
- i2s1: i2s@70002800 {
- compatible = "nvidia,tegra20-i2s";
- reg = <0x70002800 0x100>;
- interrupts = <77>;
- codec = <&wm8903>;
- };
-
- i2c@7000c000 {
- compatible = "nvidia,tegra20-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x7000c000 0x100>;
- interrupts = <70>;
-
- wm8903: codec@1a {
- compatible = "wlf,wm8903";
- reg = <0x1a>;
- interrupts = <347>;
- };
- };
- };
-
- sound {
- compatible = "nvidia,harmony-sound";
- i2s-controller = <&i2s1>;
- i2s-codec = <&wm8903>;
- };
-};
-
-At .init_machine() time, Tegra board support code will need to look at
-this DT and decide which nodes to create platform_devices for.
-However, looking at the tree, it is not immediately obvious what kind
-of device each node represents, or even if a node represents a device
-at all. The /chosen, /aliases, and /memory nodes are informational
-nodes that don't describe devices (although arguably memory could be
-considered a device). The children of the /soc node are memory mapped
-devices, but the codec@1a is an i2c device, and the sound node
-represents not a device, but rather how other devices are connected
-together to create the audio subsystem. I know what each device is
-because I'm familiar with the board design, but how does the kernel
-know what to do with each node?
-
-The trick is that the kernel starts at the root of the tree and looks
-for nodes that have a 'compatible' property. First, it is generally
-assumed that any node with a 'compatible' property represents a device
-of some kind, and second, it can be assumed that any node at the root
-of the tree is either directly attached to the processor bus, or is a
-miscellaneous system device that cannot be described any other way.
-For each of these nodes, Linux allocates and registers a
-platform_device, which in turn may get bound to a platform_driver.
-
-Why is using a platform_device for these nodes a safe assumption?
-Well, for the way that Linux models devices, just about all bus_types
-assume that its devices are children of a bus controller. For
-example, each i2c_client is a child of an i2c_master. Each spi_device
-is a child of an SPI bus. Similarly for USB, PCI, MDIO, etc. The
-same hierarchy is also found in the DT, where I2C device nodes only
-ever appear as children of an I2C bus node. Ditto for SPI, MDIO, USB,
-etc. The only devices which do not require a specific type of parent
-device are platform_devices (and amba_devices, but more on that
-later), which will happily live at the base of the Linux /sys/devices
-tree. Therefore, if a DT node is at the root of the tree, then it
-really probably is best registered as a platform_device.
-
-Linux board support code calls of_platform_populate(NULL, NULL, NULL, NULL)
-to kick off discovery of devices at the root of the tree. The
-parameters are all NULL because when starting from the root of the
-tree, there is no need to provide a starting node (the first NULL), a
-parent struct device (the last NULL), and we're not using a match
-table (yet). For a board that only needs to register devices,
-.init_machine() can be completely empty except for the
-of_platform_populate() call.
-
-In the Tegra example, this accounts for the /soc and /sound nodes, but
-what about the children of the SoC node? Shouldn't they be registered
-as platform devices too? For Linux DT support, the generic behaviour
-is for child devices to be registered by the parent's device driver at
-driver .probe() time. So, an i2c bus device driver will register a
-i2c_client for each child node, an SPI bus driver will register
-its spi_device children, and similarly for other bus_types.
-According to that model, a driver could be written that binds to the
-SoC node and simply registers platform_devices for each of its
-children. The board support code would allocate and register an SoC
-device, a (theoretical) SoC device driver could bind to the SoC device,
-and register platform_devices for /soc/interrupt-controller, /soc/serial,
-/soc/i2s, and /soc/i2c in its .probe() hook. Easy, right?
-
-Actually, it turns out that registering children of some
-platform_devices as more platform_devices is a common pattern, and the
-device tree support code reflects that and makes the above example
-simpler. The second argument to of_platform_populate() is an
-of_device_id table, and any node that matches an entry in that table
-will also get its child nodes registered. In the Tegra case, the code
-can look something like this:
-
-static void __init harmony_init_machine(void)
-{
- /* ... */
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-
-"simple-bus" is defined in the Devicetree Specification as a property
-meaning a simple memory mapped bus, so the of_platform_populate() code
-could be written to just assume simple-bus compatible nodes will
-always be traversed. However, we pass it in as an argument so that
-board support code can always override the default behaviour.
-
-[Need to add discussion of adding i2c/spi/etc child devices]
-
-Appendix A: AMBA devices
-------------------------
-
-ARM Primecells are a certain kind of device attached to the ARM AMBA
-bus which include some support for hardware detection and power
-management. In Linux, struct amba_device and the amba_bus_type is
-used to represent Primecell devices. However, the fiddly bit is that
-not all devices on an AMBA bus are Primecells, and for Linux it is
-typical for both amba_device and platform_device instances to be
-siblings of the same bus segment.
-
-When using the DT, this creates problems for of_platform_populate()
-because it must decide whether to register each node as either a
-platform_device or an amba_device. This unfortunately complicates the
-device creation model a little bit, but the solution turns out not to
-be too invasive. If a node is compatible with "arm,amba-primecell", then
-of_platform_populate() will register it as an amba_device instead of a
-platform_device.
diff --git a/Documentation/doc-guide/parse-headers.rst b/Documentation/doc-guide/parse-headers.rst
index f7135b058246..5da0046f7059 100644
--- a/Documentation/doc-guide/parse-headers.rst
+++ b/Documentation/doc-guide/parse-headers.rst
@@ -186,7 +186,7 @@ COPYRIGHT
Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>.
-License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.
+License GPLv2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>.
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 72fc2e9e2b63..ef9519c32c55 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -251,6 +251,7 @@ vmlinux-*
vmlinux.aout
vmlinux.bin.all
vmlinux.lds
+vmlinux.symvers
vmlinuz
voffset.h
vsyscall.lds
diff --git a/Documentation/driver-api/acpi/linuxized-acpica.rst b/Documentation/driver-api/acpi/linuxized-acpica.rst
index 0ca8f1538519..6bee03383225 100644
--- a/Documentation/driver-api/acpi/linuxized-acpica.rst
+++ b/Documentation/driver-api/acpi/linuxized-acpica.rst
@@ -175,9 +175,9 @@ illustrated in the following figure::
B. acpica / master - "master" branch of the git repository at
<https://github.com/acpica/acpica.git>.
C. linux-pm / linux-next - "linux-next" branch of the git repository at
- <http://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git>.
+ <https://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git>.
D. linux / master - "master" branch of the git repository at
- <http://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git>.
+ <https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git>.
Before the linuxized ACPICA patches are sent to the Linux ACPI community
for review, there is a quality assurance build test process to reduce
@@ -274,6 +274,6 @@ before they become available from the ACPICA release process.
a diff file indicating the state of the current divergences::
# git clone https://github.com/acpica/acpica
- # git clone http://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+ # git clone https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
# cd acpica
# generate/linux/divergences.sh -s ../linux
diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst
index fc242ed4bde5..e0b58c392e4f 100644
--- a/Documentation/driver-api/driver-model/devres.rst
+++ b/Documentation/driver-api/driver-model/devres.rst
@@ -284,21 +284,13 @@ I2C
IIO
devm_iio_device_alloc()
- devm_iio_device_free()
devm_iio_device_register()
- devm_iio_device_unregister()
devm_iio_kfifo_allocate()
- devm_iio_kfifo_free()
devm_iio_triggered_buffer_setup()
- devm_iio_triggered_buffer_cleanup()
devm_iio_trigger_alloc()
- devm_iio_trigger_free()
devm_iio_trigger_register()
- devm_iio_trigger_unregister()
devm_iio_channel_get()
- devm_iio_channel_release()
devm_iio_channel_get_all()
- devm_iio_channel_release_all()
INPUT
devm_input_allocate_device()
@@ -322,6 +314,7 @@ IOMAP
devm_platform_ioremap_resource() : calls devm_ioremap_resource() for platform device
devm_platform_ioremap_resource_wc()
devm_platform_ioremap_resource_byname()
+ devm_platform_get_and_ioremap_resource()
devm_iounmap()
pcim_iomap()
pcim_iomap_regions() : do request_region() and iomap() on multiple BARs
diff --git a/Documentation/driver-api/driver-model/driver.rst b/Documentation/driver-api/driver-model/driver.rst
index 63887b813005..7d5040f6a3d8 100644
--- a/Documentation/driver-api/driver-model/driver.rst
+++ b/Documentation/driver-api/driver-model/driver.rst
@@ -4,7 +4,6 @@ Device Drivers
See the kerneldoc for the struct device_driver.
-
Allocation
~~~~~~~~~~
@@ -167,9 +166,26 @@ the driver to that device.
A driver's probe() may return a negative errno value to indicate that
the driver did not bind to this device, in which case it should have
-released all resources it allocated::
+released all resources it allocated.
+
+Optionally, probe() may return -EPROBE_DEFER if the driver depends on
+resources that are not yet available (e.g., supplied by a driver that
+hasn't initialized yet). The driver core will put the device onto the
+deferred probe list and will try to call it again later. If a driver
+must defer, it should return -EPROBE_DEFER as early as possible to
+reduce the amount of time spent on setup work that will need to be
+unwound and reexecuted at a later time.
+
+.. warning::
+ -EPROBE_DEFER must not be returned if probe() has already created
+ child devices, even if those child devices are removed again
+ in a cleanup path. If -EPROBE_DEFER is returned after a child
+ device has been registered, it may result in an infinite loop of
+ .probe() calls to the same driver.
+
+::
- void (*sync_state)(struct device *dev);
+ void (*sync_state) (struct device *dev);
sync_state is called only once for a device. It's called when all the consumer
devices of the device have successfully probed. The list of consumers of the
@@ -212,6 +228,8 @@ over management of devices from the bootloader, the usage of sync_state() is
not restricted to that. Use it whenever it makes sense to take an action after
all the consumers of a device have probed::
+::
+
int (*remove) (struct device *dev);
remove is called to unbind a driver from a device. This may be
@@ -224,11 +242,15 @@ not. It should free any resources allocated specifically for the
device; i.e. anything in the device's driver_data field.
If the device is still present, it should quiesce the device and place
-it into a supported low-power state::
+it into a supported low-power state.
+
+::
int (*suspend) (struct device *dev, pm_message_t state);
-suspend is called to put the device in a low power state::
+suspend is called to put the device in a low power state.
+
+::
int (*resume) (struct device *dev);
diff --git a/Documentation/driver-api/gpio/board.rst b/Documentation/driver-api/gpio/board.rst
index ce91518bf9f4..191fa867826a 100644
--- a/Documentation/driver-api/gpio/board.rst
+++ b/Documentation/driver-api/gpio/board.rst
@@ -113,13 +113,15 @@ files that desire to do so need to include the following header::
GPIOs are mapped by the means of tables of lookups, containing instances of the
gpiod_lookup structure. Two macros are defined to help declaring such mappings::
- GPIO_LOOKUP(chip_label, chip_hwnum, con_id, flags)
- GPIO_LOOKUP_IDX(chip_label, chip_hwnum, con_id, idx, flags)
+ GPIO_LOOKUP(key, chip_hwnum, con_id, flags)
+ GPIO_LOOKUP_IDX(key, chip_hwnum, con_id, idx, flags)
where
- - chip_label is the label of the gpiod_chip instance providing the GPIO
- - chip_hwnum is the hardware number of the GPIO within the chip
+ - key is either the label of the gpiod_chip instance providing the GPIO, or
+ the GPIO line name
+ - chip_hwnum is the hardware number of the GPIO within the chip, or U16_MAX
+ to indicate that key is a GPIO line name
- con_id is the name of the GPIO function from the device point of view. It
can be NULL, in which case it will match any function.
- idx is the index of the GPIO within the function.
@@ -135,7 +137,10 @@ where
In the future, these flags might be extended to support more properties.
-Note that GPIO_LOOKUP() is just a shortcut to GPIO_LOOKUP_IDX() where idx = 0.
+Note that:
+ 1. GPIO line names are not guaranteed to be globally unique, so the first
+ match found will be used.
+ 2. GPIO_LOOKUP() is just a shortcut to GPIO_LOOKUP_IDX() where idx = 0.
A lookup table can then be defined as follows, with an empty entry defining its
end. The 'dev_id' field of the table is the identifier of the device that will
diff --git a/Documentation/driver-api/iio/triggers.rst b/Documentation/driver-api/iio/triggers.rst
index 5c2156de6284..dfd7ba3eabde 100644
--- a/Documentation/driver-api/iio/triggers.rst
+++ b/Documentation/driver-api/iio/triggers.rst
@@ -4,9 +4,7 @@ Triggers
* struct :c:type:`iio_trigger` — industrial I/O trigger device
* :c:func:`devm_iio_trigger_alloc` — Resource-managed iio_trigger_alloc
-* :c:func:`devm_iio_trigger_free` — Resource-managed iio_trigger_free
* :c:func:`devm_iio_trigger_register` — Resource-managed iio_trigger_register
-* :c:func:`devm_iio_trigger_unregister` — Resource-managed
iio_trigger_unregister
* :c:func:`iio_trigger_validate_own_device` — Check if a trigger and IIO
device belong to the same device
diff --git a/Documentation/driver-api/infiniband.rst b/Documentation/driver-api/infiniband.rst
index 1a3116f32ff0..30e142ccbee9 100644
--- a/Documentation/driver-api/infiniband.rst
+++ b/Documentation/driver-api/infiniband.rst
@@ -37,9 +37,6 @@ InfiniBand core interfaces
.. kernel-doc:: drivers/infiniband/core/ud_header.c
:export:
-.. kernel-doc:: drivers/infiniband/core/fmr_pool.c
- :export:
-
.. kernel-doc:: drivers/infiniband/core/umem.c
:export:
diff --git a/Documentation/driver-api/mtdnand.rst b/Documentation/driver-api/mtdnand.rst
index 55447659b81f..0bf8d6ec3f54 100644
--- a/Documentation/driver-api/mtdnand.rst
+++ b/Documentation/driver-api/mtdnand.rst
@@ -276,8 +276,10 @@ unregisters the partitions in the MTD layer.
#ifdef MODULE
static void __exit board_cleanup (void)
{
- /* Release resources, unregister device */
- nand_release (mtd_to_nand(board_mtd));
+ /* Unregister device */
+ WARN_ON(mtd_device_unregister(board_mtd));
+ /* Release resources */
+ nand_cleanup(mtd_to_nand(board_mtd));
/* unmap physical address */
iounmap(baseaddr);
diff --git a/Documentation/driver-api/soundwire/stream.rst b/Documentation/driver-api/soundwire/stream.rst
index 8bceece51554..1b386076402c 100644
--- a/Documentation/driver-api/soundwire/stream.rst
+++ b/Documentation/driver-api/soundwire/stream.rst
@@ -75,8 +75,33 @@ Slaves are using single port. ::
| (Data) |
+---------------+
+Example 4: Stereo Stream with L and R channels is rendered by
+Master. Both of the L and R channels are received by two different
+Slaves. Master and both Slaves are using single port handling
+L+R. Each Slave device processes the L + R data locally, typically
+based on static configuration or dynamic orientation, and may drive
+one or more speakers. ::
-Example 4: Stereo Stream with L and R channel is rendered by two different
+ +---------------+ Clock Signal +---------------+
+ | Master +---------+------------------------+ Slave |
+ | Interface | | | Interface |
+ | | | | 1 |
+ | | | Data Signal | |
+ | L + R +---+------------------------------+ L + R |
+ | (Data) | | | Data Direction | (Data) |
+ +---------------+ | | +-------------> +---------------+
+ | |
+ | |
+ | | +---------------+
+ | +----------------------> | Slave |
+ | | Interface |
+ | | 2 |
+ | | |
+ +----------------------------> | L + R |
+ | (Data) |
+ +---------------+
+
+Example 5: Stereo Stream with L and R channel is rendered by two different
Ports of the Master and is received by only single Port of the Slave
interface. ::
@@ -101,7 +126,7 @@ interface. ::
+--------------------+ | |
+----------------+
-Example 5: Stereo Stream with L and R channel is rendered by 2 Masters, each
+Example 6: Stereo Stream with L and R channel is rendered by 2 Masters, each
rendering one channel, and is received by two different Slaves, each
receiving one channel. Both Masters and both Slaves are using single port. ::
@@ -123,12 +148,70 @@ receiving one channel. Both Masters and both Slaves are using single port. ::
| (Data) | Data Direction | (Data) |
+---------------+ +-----------------------> +---------------+
-Note: In multi-link cases like above, to lock, one would acquire a global
+Example 7: Stereo Stream with L and R channel is rendered by 2
+Masters, each rendering both channels. Each Slave receives L + R. This
+is the same application as Example 4 but with Slaves placed on
+separate links. ::
+
+ +---------------+ Clock Signal +---------------+
+ | Master +----------------------------------+ Slave |
+ | Interface | | Interface |
+ | 1 | | 1 |
+ | | Data Signal | |
+ | L + R +----------------------------------+ L + R |
+ | (Data) | Data Direction | (Data) |
+ +---------------+ +-----------------------> +---------------+
+
+ +---------------+ Clock Signal +---------------+
+ | Master +----------------------------------+ Slave |
+ | Interface | | Interface |
+ | 2 | | 2 |
+ | | Data Signal | |
+ | L + R +----------------------------------+ L + R |
+ | (Data) | Data Direction | (Data) |
+ +---------------+ +-----------------------> +---------------+
+
+Example 8: 4-channel Stream is rendered by 2 Masters, each rendering a
+2 channels. Each Slave receives 2 channels. ::
+
+ +---------------+ Clock Signal +---------------+
+ | Master +----------------------------------+ Slave |
+ | Interface | | Interface |
+ | 1 | | 1 |
+ | | Data Signal | |
+ | L1 + R1 +----------------------------------+ L1 + R1 |
+ | (Data) | Data Direction | (Data) |
+ +---------------+ +-----------------------> +---------------+
+
+ +---------------+ Clock Signal +---------------+
+ | Master +----------------------------------+ Slave |
+ | Interface | | Interface |
+ | 2 | | 2 |
+ | | Data Signal | |
+ | L2 + R2 +----------------------------------+ L2 + R2 |
+ | (Data) | Data Direction | (Data) |
+ +---------------+ +-----------------------> +---------------+
+
+Note1: In multi-link cases like above, to lock, one would acquire a global
lock and then go on locking bus instances. But, in this case the caller
framework(ASoC DPCM) guarantees that stream operations on a card are
always serialized. So, there is no race condition and hence no need for
global lock.
+Note2: A Slave device may be configured to receive all channels
+transmitted on a link for a given Stream (Example 4) or just a subset
+of the data (Example 3). The configuration of the Slave device is not
+handled by a SoundWire subsystem API, but instead by the
+snd_soc_dai_set_tdm_slot() API. The platform or machine driver will
+typically configure which of the slots are used. For Example 4, the
+same slots would be used by all Devices, while for Example 3 the Slave
+Device1 would use e.g. Slot 0 and Slave device2 slot 1.
+
+Note3: Multiple Sink ports can extract the same information for the
+same bitSlots in the SoundWire frame, however multiple Source ports
+shall be configured with different bitSlot configurations. This is the
+same limitation as with I2S/PCM TDM usages.
+
SoundWire Stream Management flow
================================
diff --git a/Documentation/driver-api/soundwire/summary.rst b/Documentation/driver-api/soundwire/summary.rst
index 8193125a2bfb..01dcb954f6d7 100644
--- a/Documentation/driver-api/soundwire/summary.rst
+++ b/Documentation/driver-api/soundwire/summary.rst
@@ -101,10 +101,11 @@ Following is the Bus API to register the SoundWire Bus:
.. code-block:: c
- int sdw_add_bus_master(struct sdw_bus *bus)
+ int sdw_bus_master_add(struct sdw_bus *bus,
+ struct device *parent,
+ struct fwnode_handle)
{
- if (!bus->dev)
- return -ENODEV;
+ sdw_master_device_add(bus, parent, fwnode);
mutex_init(&bus->lock);
INIT_LIST_HEAD(&bus->slaves);
diff --git a/Documentation/driver-api/usb/bulk-streams.rst b/Documentation/driver-api/usb/bulk-streams.rst
index 99b515babdeb..eeefe582f8ff 100644
--- a/Documentation/driver-api/usb/bulk-streams.rst
+++ b/Documentation/driver-api/usb/bulk-streams.rst
@@ -9,9 +9,9 @@ device driver to overload a bulk endpoint so that multiple transfers can be
queued at once.
Streams are defined in sections 4.4.6.4 and 8.12.1.4 of the Universal Serial Bus
-3.0 specification at http://www.usb.org/developers/docs/ The USB Attached SCSI
+3.0 specification at https://www.usb.org/developers/docs/ The USB Attached SCSI
Protocol, which uses streams to queue multiple SCSI commands, can be found on
-the T10 website (http://t10.org/).
+the T10 website (https://t10.org/).
Device-side implications
diff --git a/Documentation/driver-api/usb/writing_musb_glue_layer.rst b/Documentation/driver-api/usb/writing_musb_glue_layer.rst
index 5bf7152fd76f..10416cc11cd5 100644
--- a/Documentation/driver-api/usb/writing_musb_glue_layer.rst
+++ b/Documentation/driver-api/usb/writing_musb_glue_layer.rst
@@ -707,12 +707,12 @@ cheerful guidance and support.
Resources
=========
-USB Home Page: http://www.usb.org
+USB Home Page: https://www.usb.org
-linux-usb Mailing List Archives: http://marc.info/?l=linux-usb
+linux-usb Mailing List Archives: https://marc.info/?l=linux-usb
USB On-the-Go Basics:
-http://www.maximintegrated.com/app-notes/index.mvp/id/1822
+https://www.maximintegrated.com/app-notes/index.mvp/id/1822
:ref:`Writing USB Device Drivers <writing-usb-driver>`
diff --git a/Documentation/features/debug/debug-vm-pgtable/arch-support.txt b/Documentation/features/debug/debug-vm-pgtable/arch-support.txt
new file mode 100644
index 000000000000..c527d05c0459
--- /dev/null
+++ b/Documentation/features/debug/debug-vm-pgtable/arch-support.txt
@@ -0,0 +1,34 @@
+#
+# Feature name: debug-vm-pgtable
+# Kconfig: ARCH_HAS_DEBUG_VM_PGTABLE
+# description: arch supports pgtable tests for semantics compliance
+#
+ -----------------------
+ | arch |status|
+ -----------------------
+ | alpha: | TODO |
+ | arc: | ok |
+ | arm: | TODO |
+ | arm64: | ok |
+ | c6x: | TODO |
+ | csky: | TODO |
+ | h8300: | TODO |
+ | hexagon: | TODO |
+ | ia64: | TODO |
+ | m68k: | TODO |
+ | microblaze: | TODO |
+ | mips: | TODO |
+ | nds32: | TODO |
+ | nios2: | TODO |
+ | openrisc: | TODO |
+ | parisc: | TODO |
+ | powerpc: | ok |
+ | riscv: | TODO |
+ | s390: | ok |
+ | sh: | TODO |
+ | sparc: | TODO |
+ | um: | TODO |
+ | unicore32: | TODO |
+ | x86: | ok |
+ | xtensa: | TODO |
+ -----------------------
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index 4218ac658629..099d45ac8d8f 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -248,7 +248,7 @@ checkpoint=%s[:%u[%]] Set to "disable" to turn off checkpointing. Set to "enabl
would be unusable can be viewed at /sys/fs/f2fs/<disk>/unusable
This space is reclaimed once checkpoint=enable.
compress_algorithm=%s Control compress algorithm, currently f2fs supports "lzo",
- "lz4" and "zstd" algorithm.
+ "lz4", "zstd" and "lzo-rle" algorithm.
compress_log_size=%u Support configuring compress cluster size, the size will
be 4KB * (1 << %u), 16KB is minimum size, also it's
default size.
diff --git a/Documentation/filesystems/fiemap.rst b/Documentation/filesystems/fiemap.rst
index 2a572e7edc08..93fc96f760aa 100644
--- a/Documentation/filesystems/fiemap.rst
+++ b/Documentation/filesystems/fiemap.rst
@@ -206,16 +206,18 @@ EINTR once fatal signal received.
Flag checking should be done at the beginning of the ->fiemap callback via the
-fiemap_check_flags() helper::
+fiemap_prep() helper::
- int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
+ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 *len, u32 supported_flags);
The struct fieinfo should be passed in as received from ioctl_fiemap(). The
set of fiemap flags which the fs understands should be passed via fs_flags. If
-fiemap_check_flags finds invalid user flags, it will place the bad values in
+fiemap_prep finds invalid user flags, it will place the bad values in
fieinfo->fi_flags and return -EBADR. If the file system gets -EBADR, from
-fiemap_check_flags(), it should immediately exit, returning that error back to
-ioctl_fiemap().
+fiemap_prep(), it should immediately exit, returning that error back to
+ioctl_fiemap(). Additionally the range is validate against the supported
+maximum file size.
For each extent in the request range, the file system should call
diff --git a/Documentation/filesystems/gfs2-glocks.rst b/Documentation/filesystems/gfs2-glocks.rst
new file mode 100644
index 000000000000..d14f230f0b12
--- /dev/null
+++ b/Documentation/filesystems/gfs2-glocks.rst
@@ -0,0 +1,253 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================
+Glock internal locking rules
+============================
+
+This documents the basic principles of the glock state machine
+internals. Each glock (struct gfs2_glock in fs/gfs2/incore.h)
+has two main (internal) locks:
+
+ 1. A spinlock (gl_lockref.lock) which protects the internal state such
+ as gl_state, gl_target and the list of holders (gl_holders)
+ 2. A non-blocking bit lock, GLF_LOCK, which is used to prevent other
+ threads from making calls to the DLM, etc. at the same time. If a
+ thread takes this lock, it must then call run_queue (usually via the
+ workqueue) when it releases it in order to ensure any pending tasks
+ are completed.
+
+The gl_holders list contains all the queued lock requests (not
+just the holders) associated with the glock. If there are any
+held locks, then they will be contiguous entries at the head
+of the list. Locks are granted in strictly the order that they
+are queued, except for those marked LM_FLAG_PRIORITY which are
+used only during recovery, and even then only for journal locks.
+
+There are three lock states that users of the glock layer can request,
+namely shared (SH), deferred (DF) and exclusive (EX). Those translate
+to the following DLM lock modes:
+
+========== ====== =====================================================
+Glock mode DLM lock mode
+========== ====== =====================================================
+ UN IV/NL Unlocked (no DLM lock associated with glock) or NL
+ SH PR (Protected read)
+ DF CW (Concurrent write)
+ EX EX (Exclusive)
+========== ====== =====================================================
+
+Thus DF is basically a shared mode which is incompatible with the "normal"
+shared lock mode, SH. In GFS2 the DF mode is used exclusively for direct I/O
+operations. The glocks are basically a lock plus some routines which deal
+with cache management. The following rules apply for the cache:
+
+========== ========== ============== ========== ==============
+Glock mode Cache data Cache Metadata Dirty Data Dirty Metadata
+========== ========== ============== ========== ==============
+ UN No No No No
+ SH Yes Yes No No
+ DF No Yes No No
+ EX Yes Yes Yes Yes
+========== ========== ============== ========== ==============
+
+These rules are implemented using the various glock operations which
+are defined for each type of glock. Not all types of glocks use
+all the modes. Only inode glocks use the DF mode for example.
+
+Table of glock operations and per type constants:
+
+============= =============================================================
+Field Purpose
+============= =============================================================
+go_xmote_th Called before remote state change (e.g. to sync dirty data)
+go_xmote_bh Called after remote state change (e.g. to refill cache)
+go_inval Called if remote state change requires invalidating the cache
+go_demote_ok Returns boolean value of whether its ok to demote a glock
+ (e.g. checks timeout, and that there is no cached data)
+go_lock Called for the first local holder of a lock
+go_unlock Called on the final local unlock of a lock
+go_dump Called to print content of object for debugfs file, or on
+ error to dump glock to the log.
+go_type The type of the glock, ``LM_TYPE_*``
+go_callback Called if the DLM sends a callback to drop this lock
+go_flags GLOF_ASPACE is set, if the glock has an address space
+ associated with it
+============= =============================================================
+
+The minimum hold time for each lock is the time after a remote lock
+grant for which we ignore remote demote requests. This is in order to
+prevent a situation where locks are being bounced around the cluster
+from node to node with none of the nodes making any progress. This
+tends to show up most with shared mmaped files which are being written
+to by multiple nodes. By delaying the demotion in response to a
+remote callback, that gives the userspace program time to make
+some progress before the pages are unmapped.
+
+There is a plan to try and remove the go_lock and go_unlock callbacks
+if possible, in order to try and speed up the fast path though the locking.
+Also, eventually we hope to make the glock "EX" mode locally shared
+such that any local locking will be done with the i_mutex as required
+rather than via the glock.
+
+Locking rules for glock operations:
+
+============= ====================== =============================
+Operation GLF_LOCK bit lock held gl_lockref.lock spinlock held
+============= ====================== =============================
+go_xmote_th Yes No
+go_xmote_bh Yes No
+go_inval Yes No
+go_demote_ok Sometimes Yes
+go_lock Yes No
+go_unlock Yes No
+go_dump Sometimes Yes
+go_callback Sometimes (N/A) Yes
+============= ====================== =============================
+
+.. Note::
+
+ Operations must not drop either the bit lock or the spinlock
+ if its held on entry. go_dump and do_demote_ok must never block.
+ Note that go_dump will only be called if the glock's state
+ indicates that it is caching uptodate data.
+
+Glock locking order within GFS2:
+
+ 1. i_rwsem (if required)
+ 2. Rename glock (for rename only)
+ 3. Inode glock(s)
+ (Parents before children, inodes at "same level" with same parent in
+ lock number order)
+ 4. Rgrp glock(s) (for (de)allocation operations)
+ 5. Transaction glock (via gfs2_trans_begin) for non-read operations
+ 6. i_rw_mutex (if required)
+ 7. Page lock (always last, very important!)
+
+There are two glocks per inode. One deals with access to the inode
+itself (locking order as above), and the other, known as the iopen
+glock is used in conjunction with the i_nlink field in the inode to
+determine the lifetime of the inode in question. Locking of inodes
+is on a per-inode basis. Locking of rgrps is on a per rgrp basis.
+In general we prefer to lock local locks prior to cluster locks.
+
+Glock Statistics
+----------------
+
+The stats are divided into two sets: those relating to the
+super block and those relating to an individual glock. The
+super block stats are done on a per cpu basis in order to
+try and reduce the overhead of gathering them. They are also
+further divided by glock type. All timings are in nanoseconds.
+
+In the case of both the super block and glock statistics,
+the same information is gathered in each case. The super
+block timing statistics are used to provide default values for
+the glock timing statistics, so that newly created glocks
+should have, as far as possible, a sensible starting point.
+The per-glock counters are initialised to zero when the
+glock is created. The per-glock statistics are lost when
+the glock is ejected from memory.
+
+The statistics are divided into three pairs of mean and
+variance, plus two counters. The mean/variance pairs are
+smoothed exponential estimates and the algorithm used is
+one which will be very familiar to those used to calculation
+of round trip times in network code. See "TCP/IP Illustrated,
+Volume 1", W. Richard Stevens, sect 21.3, "Round-Trip Time Measurement",
+p. 299 and onwards. Also, Volume 2, Sect. 25.10, p. 838 and onwards.
+Unlike the TCP/IP Illustrated case, the mean and variance are
+not scaled, but are in units of integer nanoseconds.
+
+The three pairs of mean/variance measure the following
+things:
+
+ 1. DLM lock time (non-blocking requests)
+ 2. DLM lock time (blocking requests)
+ 3. Inter-request time (again to the DLM)
+
+A non-blocking request is one which will complete right
+away, whatever the state of the DLM lock in question. That
+currently means any requests when (a) the current state of
+the lock is exclusive, i.e. a lock demotion (b) the requested
+state is either null or unlocked (again, a demotion) or (c) the
+"try lock" flag is set. A blocking request covers all the other
+lock requests.
+
+There are two counters. The first is there primarily to show
+how many lock requests have been made, and thus how much data
+has gone into the mean/variance calculations. The other counter
+is counting queuing of holders at the top layer of the glock
+code. Hopefully that number will be a lot larger than the number
+of dlm lock requests issued.
+
+So why gather these statistics? There are several reasons
+we'd like to get a better idea of these timings:
+
+1. To be able to better set the glock "min hold time"
+2. To spot performance issues more easily
+3. To improve the algorithm for selecting resource groups for
+ allocation (to base it on lock wait time, rather than blindly
+ using a "try lock")
+
+Due to the smoothing action of the updates, a step change in
+some input quantity being sampled will only fully be taken
+into account after 8 samples (or 4 for the variance) and this
+needs to be carefully considered when interpreting the
+results.
+
+Knowing both the time it takes a lock request to complete and
+the average time between lock requests for a glock means we
+can compute the total percentage of the time for which the
+node is able to use a glock vs. time that the rest of the
+cluster has its share. That will be very useful when setting
+the lock min hold time.
+
+Great care has been taken to ensure that we
+measure exactly the quantities that we want, as accurately
+as possible. There are always inaccuracies in any
+measuring system, but I hope this is as accurate as we
+can reasonably make it.
+
+Per sb stats can be found here::
+
+ /sys/kernel/debug/gfs2/<fsname>/sbstats
+
+Per glock stats can be found here::
+
+ /sys/kernel/debug/gfs2/<fsname>/glstats
+
+Assuming that debugfs is mounted on /sys/kernel/debug and also
+that <fsname> is replaced with the name of the gfs2 filesystem
+in question.
+
+The abbreviations used in the output as are follows:
+
+========= ================================================================
+srtt Smoothed round trip time for non blocking dlm requests
+srttvar Variance estimate for srtt
+srttb Smoothed round trip time for (potentially) blocking dlm requests
+srttvarb Variance estimate for srttb
+sirt Smoothed inter request time (for dlm requests)
+sirtvar Variance estimate for sirt
+dlm Number of dlm requests made (dcnt in glstats file)
+queue Number of glock requests queued (qcnt in glstats file)
+========= ================================================================
+
+The sbstats file contains a set of these stats for each glock type (so 8 lines
+for each type) and for each cpu (one column per cpu). The glstats file contains
+a set of these stats for each glock in a similar format to the glocks file, but
+using the format mean/variance for each of the timing stats.
+
+The gfs2_glock_lock_time tracepoint prints out the current values of the stats
+for the glock in question, along with some addition information on each dlm
+reply that is received:
+
+====== =======================================
+status The status of the dlm request
+flags The dlm request flags
+tdiff The time taken by this specific request
+====== =======================================
+
+(remaining fields as per above list)
+
+
diff --git a/Documentation/filesystems/gfs2-glocks.txt b/Documentation/filesystems/gfs2-glocks.txt
deleted file mode 100644
index 7059623635b2..000000000000
--- a/Documentation/filesystems/gfs2-glocks.txt
+++ /dev/null
@@ -1,232 +0,0 @@
- Glock internal locking rules
- ------------------------------
-
-This documents the basic principles of the glock state machine
-internals. Each glock (struct gfs2_glock in fs/gfs2/incore.h)
-has two main (internal) locks:
-
- 1. A spinlock (gl_lockref.lock) which protects the internal state such
- as gl_state, gl_target and the list of holders (gl_holders)
- 2. A non-blocking bit lock, GLF_LOCK, which is used to prevent other
- threads from making calls to the DLM, etc. at the same time. If a
- thread takes this lock, it must then call run_queue (usually via the
- workqueue) when it releases it in order to ensure any pending tasks
- are completed.
-
-The gl_holders list contains all the queued lock requests (not
-just the holders) associated with the glock. If there are any
-held locks, then they will be contiguous entries at the head
-of the list. Locks are granted in strictly the order that they
-are queued, except for those marked LM_FLAG_PRIORITY which are
-used only during recovery, and even then only for journal locks.
-
-There are three lock states that users of the glock layer can request,
-namely shared (SH), deferred (DF) and exclusive (EX). Those translate
-to the following DLM lock modes:
-
-Glock mode | DLM lock mode
-------------------------------
- UN | IV/NL Unlocked (no DLM lock associated with glock) or NL
- SH | PR (Protected read)
- DF | CW (Concurrent write)
- EX | EX (Exclusive)
-
-Thus DF is basically a shared mode which is incompatible with the "normal"
-shared lock mode, SH. In GFS2 the DF mode is used exclusively for direct I/O
-operations. The glocks are basically a lock plus some routines which deal
-with cache management. The following rules apply for the cache:
-
-Glock mode | Cache data | Cache Metadata | Dirty Data | Dirty Metadata
---------------------------------------------------------------------------
- UN | No | No | No | No
- SH | Yes | Yes | No | No
- DF | No | Yes | No | No
- EX | Yes | Yes | Yes | Yes
-
-These rules are implemented using the various glock operations which
-are defined for each type of glock. Not all types of glocks use
-all the modes. Only inode glocks use the DF mode for example.
-
-Table of glock operations and per type constants:
-
-Field | Purpose
-----------------------------------------------------------------------------
-go_xmote_th | Called before remote state change (e.g. to sync dirty data)
-go_xmote_bh | Called after remote state change (e.g. to refill cache)
-go_inval | Called if remote state change requires invalidating the cache
-go_demote_ok | Returns boolean value of whether its ok to demote a glock
- | (e.g. checks timeout, and that there is no cached data)
-go_lock | Called for the first local holder of a lock
-go_unlock | Called on the final local unlock of a lock
-go_dump | Called to print content of object for debugfs file, or on
- | error to dump glock to the log.
-go_type | The type of the glock, LM_TYPE_.....
-go_callback | Called if the DLM sends a callback to drop this lock
-go_flags | GLOF_ASPACE is set, if the glock has an address space
- | associated with it
-
-The minimum hold time for each lock is the time after a remote lock
-grant for which we ignore remote demote requests. This is in order to
-prevent a situation where locks are being bounced around the cluster
-from node to node with none of the nodes making any progress. This
-tends to show up most with shared mmaped files which are being written
-to by multiple nodes. By delaying the demotion in response to a
-remote callback, that gives the userspace program time to make
-some progress before the pages are unmapped.
-
-There is a plan to try and remove the go_lock and go_unlock callbacks
-if possible, in order to try and speed up the fast path though the locking.
-Also, eventually we hope to make the glock "EX" mode locally shared
-such that any local locking will be done with the i_mutex as required
-rather than via the glock.
-
-Locking rules for glock operations:
-
-Operation | GLF_LOCK bit lock held | gl_lockref.lock spinlock held
--------------------------------------------------------------------------
-go_xmote_th | Yes | No
-go_xmote_bh | Yes | No
-go_inval | Yes | No
-go_demote_ok | Sometimes | Yes
-go_lock | Yes | No
-go_unlock | Yes | No
-go_dump | Sometimes | Yes
-go_callback | Sometimes (N/A) | Yes
-
-N.B. Operations must not drop either the bit lock or the spinlock
-if its held on entry. go_dump and do_demote_ok must never block.
-Note that go_dump will only be called if the glock's state
-indicates that it is caching uptodate data.
-
-Glock locking order within GFS2:
-
- 1. i_rwsem (if required)
- 2. Rename glock (for rename only)
- 3. Inode glock(s)
- (Parents before children, inodes at "same level" with same parent in
- lock number order)
- 4. Rgrp glock(s) (for (de)allocation operations)
- 5. Transaction glock (via gfs2_trans_begin) for non-read operations
- 6. i_rw_mutex (if required)
- 7. Page lock (always last, very important!)
-
-There are two glocks per inode. One deals with access to the inode
-itself (locking order as above), and the other, known as the iopen
-glock is used in conjunction with the i_nlink field in the inode to
-determine the lifetime of the inode in question. Locking of inodes
-is on a per-inode basis. Locking of rgrps is on a per rgrp basis.
-In general we prefer to lock local locks prior to cluster locks.
-
- Glock Statistics
- ------------------
-
-The stats are divided into two sets: those relating to the
-super block and those relating to an individual glock. The
-super block stats are done on a per cpu basis in order to
-try and reduce the overhead of gathering them. They are also
-further divided by glock type. All timings are in nanoseconds.
-
-In the case of both the super block and glock statistics,
-the same information is gathered in each case. The super
-block timing statistics are used to provide default values for
-the glock timing statistics, so that newly created glocks
-should have, as far as possible, a sensible starting point.
-The per-glock counters are initialised to zero when the
-glock is created. The per-glock statistics are lost when
-the glock is ejected from memory.
-
-The statistics are divided into three pairs of mean and
-variance, plus two counters. The mean/variance pairs are
-smoothed exponential estimates and the algorithm used is
-one which will be very familiar to those used to calculation
-of round trip times in network code. See "TCP/IP Illustrated,
-Volume 1", W. Richard Stevens, sect 21.3, "Round-Trip Time Measurement",
-p. 299 and onwards. Also, Volume 2, Sect. 25.10, p. 838 and onwards.
-Unlike the TCP/IP Illustrated case, the mean and variance are
-not scaled, but are in units of integer nanoseconds.
-
-The three pairs of mean/variance measure the following
-things:
-
- 1. DLM lock time (non-blocking requests)
- 2. DLM lock time (blocking requests)
- 3. Inter-request time (again to the DLM)
-
-A non-blocking request is one which will complete right
-away, whatever the state of the DLM lock in question. That
-currently means any requests when (a) the current state of
-the lock is exclusive, i.e. a lock demotion (b) the requested
-state is either null or unlocked (again, a demotion) or (c) the
-"try lock" flag is set. A blocking request covers all the other
-lock requests.
-
-There are two counters. The first is there primarily to show
-how many lock requests have been made, and thus how much data
-has gone into the mean/variance calculations. The other counter
-is counting queuing of holders at the top layer of the glock
-code. Hopefully that number will be a lot larger than the number
-of dlm lock requests issued.
-
-So why gather these statistics? There are several reasons
-we'd like to get a better idea of these timings:
-
-1. To be able to better set the glock "min hold time"
-2. To spot performance issues more easily
-3. To improve the algorithm for selecting resource groups for
-allocation (to base it on lock wait time, rather than blindly
-using a "try lock")
-
-Due to the smoothing action of the updates, a step change in
-some input quantity being sampled will only fully be taken
-into account after 8 samples (or 4 for the variance) and this
-needs to be carefully considered when interpreting the
-results.
-
-Knowing both the time it takes a lock request to complete and
-the average time between lock requests for a glock means we
-can compute the total percentage of the time for which the
-node is able to use a glock vs. time that the rest of the
-cluster has its share. That will be very useful when setting
-the lock min hold time.
-
-Great care has been taken to ensure that we
-measure exactly the quantities that we want, as accurately
-as possible. There are always inaccuracies in any
-measuring system, but I hope this is as accurate as we
-can reasonably make it.
-
-Per sb stats can be found here:
-/sys/kernel/debug/gfs2/<fsname>/sbstats
-Per glock stats can be found here:
-/sys/kernel/debug/gfs2/<fsname>/glstats
-
-Assuming that debugfs is mounted on /sys/kernel/debug and also
-that <fsname> is replaced with the name of the gfs2 filesystem
-in question.
-
-The abbreviations used in the output as are follows:
-
-srtt - Smoothed round trip time for non-blocking dlm requests
-srttvar - Variance estimate for srtt
-srttb - Smoothed round trip time for (potentially) blocking dlm requests
-srttvarb - Variance estimate for srttb
-sirt - Smoothed inter-request time (for dlm requests)
-sirtvar - Variance estimate for sirt
-dlm - Number of dlm requests made (dcnt in glstats file)
-queue - Number of glock requests queued (qcnt in glstats file)
-
-The sbstats file contains a set of these stats for each glock type (so 8 lines
-for each type) and for each cpu (one column per cpu). The glstats file contains
-a set of these stats for each glock in a similar format to the glocks file, but
-using the format mean/variance for each of the timing stats.
-
-The gfs2_glock_lock_time tracepoint prints out the current values of the stats
-for the glock in question, along with some addition information on each dlm
-reply that is received:
-
-status - The status of the dlm request
-flags - The dlm request flags
-tdiff - The time taken by this specific request
-(remaining fields as per above list)
-
-
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 17795341e0a3..4c536e66dc4c 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -88,6 +88,7 @@ Documentation for filesystem implementations.
f2fs
gfs2
gfs2-uevents
+ gfs2-glocks
hfs
hfsplus
hpfs
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index 0af2e0e11461..318605de83f3 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -429,6 +429,7 @@ prototypes::
int (*lm_grant)(struct file_lock *, struct file_lock *, int);
void (*lm_break)(struct file_lock *); /* break_lease callback */
int (*lm_change)(struct file_lock **, int);
+ bool (*lm_breaker_owns_lease)(struct file_lock *);
locking rules:
@@ -439,6 +440,7 @@ lm_notify: yes yes no
lm_grant: no no no
lm_break: yes no no
lm_change yes no no
+lm_breaker_owns_lease: no no no
========== ============= ================= =========
buffer_head
@@ -615,7 +617,7 @@ prototypes::
locking rules:
============= ======== ===========================
-ops mmap_sem PageLocked(page)
+ops mmap_lock PageLocked(page)
============= ======== ===========================
open: yes
close: yes
diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
index c9d2bf96b02d..660dbaf0b9b8 100644
--- a/Documentation/filesystems/overlayfs.rst
+++ b/Documentation/filesystems/overlayfs.rst
@@ -365,8 +365,8 @@ pointed by REDIRECT. This should not be possible on local system as setting
"trusted." xattrs will require CAP_SYS_ADMIN. But it should be possible
for untrusted layers like from a pen drive.
-Note: redirect_dir={off|nofollow|follow[*]} conflicts with metacopy=on, and
-results in an error.
+Note: redirect_dir={off|nofollow|follow[*]} and nfs_export=on mount options
+conflict with metacopy=on, and will result in an error.
[*] redirect_dir=follow only conflicts with metacopy=on if upperdir=... is
given.
@@ -560,6 +560,9 @@ When the NFS export feature is enabled, all directory index entries are
verified on mount time to check that upper file handles are not stale.
This verification may cause significant overhead in some cases.
+Note: the mount options index=off,nfs_export=on are conflicting and will
+result in an error.
+
Testsuite
---------
diff --git a/Documentation/filesystems/path-lookup.txt b/Documentation/filesystems/path-lookup.txt
index 9b8930f589d9..1aa7ce099f6f 100644
--- a/Documentation/filesystems/path-lookup.txt
+++ b/Documentation/filesystems/path-lookup.txt
@@ -375,7 +375,7 @@ common path elements, the more likely they will exist in dentry cache.
Papers and other documentation on dcache locking
================================================
-1. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124).
+1. Scaling dcache with RCU (https://linuxjournal.com/article.php?sid=7124).
2. http://lse.sourceforge.net/locking/dcache/dcache.html
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 26c093969573..867036aa90b8 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -858,3 +858,10 @@ be misspelled d_alloc_anon().
[should've been added in 2016] stale comment in finish_open() nonwithstanding,
failure exits in ->atomic_open() instances should *NOT* fput() the file,
no matter what. Everything is handled by the caller.
+
+---
+
+**mandatory**
+
+clone_private_mount() returns a longterm mount now, so the proper destructor of
+its result is kern_unmount() or kern_unmount_array().
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 430963e0e8c3..996f3cfe7030 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -51,6 +51,8 @@ fixes/update part 1.1 Stefani Seibold <stefani@seibold.net> June 9 2009
4 Configuring procfs
4.1 Mount options
+ 5 Filesystem behavior
+
Preface
=======
@@ -2143,28 +2145,80 @@ The following mount options are supported:
========= ========================================================
hidepid= Set /proc/<pid>/ access mode.
gid= Set the group authorized to learn processes information.
+ subset= Show only the specified subset of procfs.
========= ========================================================
-hidepid=0 means classic mode - everybody may access all /proc/<pid>/ directories
-(default).
-
-hidepid=1 means users may not access any /proc/<pid>/ directories but their
-own. Sensitive files like cmdline, sched*, status are now protected against
-other users. This makes it impossible to learn whether any user runs
-specific program (given the program doesn't reveal itself by its behaviour).
-As an additional bonus, as /proc/<pid>/cmdline is unaccessible for other users,
-poorly written programs passing sensitive information via program arguments are
-now protected against local eavesdroppers.
-
-hidepid=2 means hidepid=1 plus all /proc/<pid>/ will be fully invisible to other
-users. It doesn't mean that it hides a fact whether a process with a specific
-pid value exists (it can be learned by other means, e.g. by "kill -0 $PID"),
-but it hides process' uid and gid, which may be learned by stat()'ing
-/proc/<pid>/ otherwise. It greatly complicates an intruder's task of gathering
-information about running processes, whether some daemon runs with elevated
-privileges, whether other user runs some sensitive program, whether other users
-run any program at all, etc.
+hidepid=off or hidepid=0 means classic mode - everybody may access all
+/proc/<pid>/ directories (default).
+
+hidepid=noaccess or hidepid=1 means users may not access any /proc/<pid>/
+directories but their own. Sensitive files like cmdline, sched*, status are now
+protected against other users. This makes it impossible to learn whether any
+user runs specific program (given the program doesn't reveal itself by its
+behaviour). As an additional bonus, as /proc/<pid>/cmdline is unaccessible for
+other users, poorly written programs passing sensitive information via program
+arguments are now protected against local eavesdroppers.
+
+hidepid=invisible or hidepid=2 means hidepid=1 plus all /proc/<pid>/ will be
+fully invisible to other users. It doesn't mean that it hides a fact whether a
+process with a specific pid value exists (it can be learned by other means, e.g.
+by "kill -0 $PID"), but it hides process' uid and gid, which may be learned by
+stat()'ing /proc/<pid>/ otherwise. It greatly complicates an intruder's task of
+gathering information about running processes, whether some daemon runs with
+elevated privileges, whether other user runs some sensitive program, whether
+other users run any program at all, etc.
+
+hidepid=ptraceable or hidepid=4 means that procfs should only contain
+/proc/<pid>/ directories that the caller can ptrace.
gid= defines a group authorized to learn processes information otherwise
prohibited by hidepid=. If you use some daemon like identd which needs to learn
information about processes information, just add identd to this group.
+
+subset=pid hides all top level files and directories in the procfs that
+are not related to tasks.
+
+5 Filesystem behavior
+----------------------------
+
+Originally, before the advent of pid namepsace, procfs was a global file
+system. It means that there was only one procfs instance in the system.
+
+When pid namespace was added, a separate procfs instance was mounted in
+each pid namespace. So, procfs mount options are global among all
+mountpoints within the same namespace.
+
+::
+
+# grep ^proc /proc/mounts
+proc /proc proc rw,relatime,hidepid=2 0 0
+
+# strace -e mount mount -o hidepid=1 -t proc proc /tmp/proc
+mount("proc", "/tmp/proc", "proc", 0, "hidepid=1") = 0
++++ exited with 0 +++
+
+# grep ^proc /proc/mounts
+proc /proc proc rw,relatime,hidepid=2 0 0
+proc /tmp/proc proc rw,relatime,hidepid=2 0 0
+
+and only after remounting procfs mount options will change at all
+mountpoints.
+
+# mount -o remount,hidepid=1 -t proc proc /tmp/proc
+
+# grep ^proc /proc/mounts
+proc /proc proc rw,relatime,hidepid=1 0 0
+proc /tmp/proc proc rw,relatime,hidepid=1 0 0
+
+This behavior is different from the behavior of other filesystems.
+
+The new procfs behavior is more like other filesystems. Each procfs mount
+creates a new procfs instance. Mount options affect own procfs instance.
+It means that it became possible to have several procfs instances
+displaying tasks with different filtering options in one pid namespace.
+
+# mount -o hidepid=invisible -t proc proc /proc
+# mount -o hidepid=noaccess -t proc proc /tmp/proc
+# grep ^proc /proc/mounts
+proc /proc proc rw,relatime,hidepid=invisible 0 0
+proc /tmp/proc proc rw,relatime,hidepid=noaccess 0 0
diff --git a/Documentation/filesystems/seq_file.rst b/Documentation/filesystems/seq_file.rst
index fab302046b13..7f7ee06b2693 100644
--- a/Documentation/filesystems/seq_file.rst
+++ b/Documentation/filesystems/seq_file.rst
@@ -7,7 +7,7 @@ The seq_file Interface
Copyright 2003 Jonathan Corbet <corbet@lwn.net>
This file is originally from the LWN.net Driver Porting series at
- http://lwn.net/Articles/driver-porting/
+ https://lwn.net/Articles/driver-porting/
There are numerous ways for a device driver (or other kernel component) to
@@ -57,7 +57,7 @@ Then concatenate the output files out1 and out2 and get the right
result. Yes, it is a thoroughly useless module, but the point is to show
how the mechanism works without getting lost in other details. (Those
wanting to see the full source for this module can find it at
-http://lwn.net/Articles/22359/).
+https://lwn.net/Articles/22359/).
Deprecated create_proc_entry
============================
diff --git a/Documentation/filesystems/virtiofs.rst b/Documentation/filesystems/virtiofs.rst
index e06e4951cb39..fd4d2484e949 100644
--- a/Documentation/filesystems/virtiofs.rst
+++ b/Documentation/filesystems/virtiofs.rst
@@ -39,6 +39,20 @@ Mount file system with tag ``myfs`` on ``/mnt``:
Please see https://virtio-fs.gitlab.io/ for details on how to configure QEMU
and the virtiofsd daemon.
+Mount options
+-------------
+
+virtiofs supports general VFS mount options, for example, remount,
+ro, rw, context, etc. It also supports FUSE mount options.
+
+atime behavior
+^^^^^^^^^^^^^^
+
+The atime-related mount options, for example, noatime, strictatime,
+are ignored. The atime behavior for virtiofs is the same as the
+underlying filesystem of the directory that has been exported
+on the host.
+
Internals
=========
Since the virtio-fs device uses the FUSE protocol for file system requests, the
diff --git a/Documentation/firmware-guide/acpi/intel-pmc-mux.rst b/Documentation/firmware-guide/acpi/intel-pmc-mux.rst
new file mode 100644
index 000000000000..99b86710f02b
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/intel-pmc-mux.rst
@@ -0,0 +1,153 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+Intel North Mux-Agent
+=====================
+
+Introduction
+============
+
+North Mux-Agent is a function of the Intel PMC firmware that is supported on
+most Intel based platforms that have the PMC microcontroller. It's used for
+configuring the various USB Multiplexer/DeMultiplexers on the system. The
+platforms that allow the mux-agent to be configured from the operating system
+have an ACPI device object (node) with HID "INTC105C" that represents it.
+
+The North Mux-Agent (aka. Intel PMC Mux Control, or just mux-agent) driver
+communicates with the PMC microcontroller by using the PMC IPC method
+(drivers/platform/x86/intel_scu_ipc.c). The driver registers with the USB Type-C
+Mux Class which allows the USB Type-C Controller and Interface drivers to
+configure the cable plug orientation and mode (with Alternate Modes). The driver
+also registers with the USB Role Class in order to support both USB Host and
+Device modes. The driver is located here: drivers/usb/typec/mux/intel_pmc_mux.c.
+
+Port nodes
+==========
+
+General
+-------
+
+For every USB Type-C connector under the mux-agent control on the system, there
+is a separate child node under the PMC mux-agent device node. Those nodes do not
+represent the actual connectors, but instead the "channels" in the mux-agent
+that are associated with the connectors::
+
+ Scope (_SB.PCI0.PMC.MUX)
+ {
+ Device (CH0)
+ {
+ Name (_ADR, 0)
+ }
+
+ Device (CH1)
+ {
+ Name (_ADR, 1)
+ }
+ }
+
+_PLD (Physical Location of Device)
+----------------------------------
+
+The optional _PLD object can be used with the port (the channel) nodes. If _PLD
+is supplied, it should match the connector node _PLD::
+
+ Scope (_SB.PCI0.PMC.MUX)
+ {
+ Device (CH0)
+ {
+ Name (_ADR, 0)
+ Method (_PLD, 0, NotSerialized)
+ {
+ /* Consider this as pseudocode. */
+ Return (\_SB.USBC.CON0._PLD())
+ }
+ }
+ }
+
+Mux-agent specific _DSD Device Properties
+-----------------------------------------
+
+Port Numbers
+~~~~~~~~~~~~
+
+In order to configure the muxes behind a USB Type-C connector, the PMC firmware
+needs to know the USB2 port and the USB3 port that is associated with the
+connector. The driver extracts the correct port numbers by reading specific _DSD
+device properties named "usb2-port-number" and "usb3-port-number". These
+properties have integer value that means the port index. The port index number
+is 1's based, and value 0 is illegal. The driver uses the numbers extracted from
+these device properties as-is when sending the mux-agent specific messages to
+the PMC::
+
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package() {
+ Package () {"usb2-port-number", 6},
+ Package () {"usb3-port-number", 3},
+ },
+ })
+
+Orientation
+~~~~~~~~~~~
+
+Depending on the platform, the data and SBU lines coming from the connector may
+be "fixed" from the mux-agent's point of view, which means the mux-agent driver
+should not configure them according to the cable plug orientation. This can
+happen for example if a retimer on the platform handles the cable plug
+orientation. The driver uses a specific device properties "sbu-orientation"
+(SBU) and "hsl-orientation" (data) to know if those lines are "fixed", and to
+which orientation. The value that these properties have is a string value, and
+it can be one that is defined for the USB Type-C connector orientation: "normal"
+or "reversed"::
+
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package() {
+ Package () {"sbu-orientation", "normal"},
+ Package () {"hsl-orientation", "normal"},
+ },
+ })
+
+Example ASL
+===========
+
+The following ASL is an example that shows the mux-agent node, and two
+connectors under its control::
+
+ Scope (_SB.PCI0.PMC)
+ {
+ Device (MUX)
+ {
+ Name (_HID, "INTC105C")
+
+ Device (CH0)
+ {
+ Name (_ADR, 0)
+
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package() {
+ Package () {"usb2-port-number", 6},
+ Package () {"usb3-port-number", 3},
+ Package () {"sbu-orientation", "normal"},
+ Package () {"hsl-orientation", "normal"},
+ },
+ })
+ }
+
+ Device (CH1)
+ {
+ Name (_ADR, 1)
+
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package() {
+ Package () {"usb2-port-number", 5},
+ Package () {"usb3-port-number", 2},
+ Package () {"sbu-orientation", "normal"},
+ Package () {"hsl-orientation", "normal"},
+ },
+ })
+ }
+ }
+ }
diff --git a/Documentation/fpga/dfl.rst b/Documentation/fpga/dfl.rst
index 094fc8aacd8e..978c4af416a4 100644
--- a/Documentation/fpga/dfl.rst
+++ b/Documentation/fpga/dfl.rst
@@ -118,6 +118,11 @@ More functions are exposed through sysfs
management information (current temperature, thresholds, threshold status,
etc.).
+ Performance reporting
+ performance counters are exposed through perf PMU APIs. Standard perf tool
+ can be used to monitor all available perf events. Please see performance
+ counter section below for more detailed information.
+
FIU - PORT
==========
@@ -378,6 +383,85 @@ The device nodes used for ioctl() or mmap() can be referenced through::
/sys/class/fpga_region/<regionX>/<dfl-port.n>/dev
+Performance Counters
+====================
+Performance reporting is one private feature implemented in FME. It could
+supports several independent, system-wide, device counter sets in hardware to
+monitor and count for performance events, including "basic", "cache", "fabric",
+"vtd" and "vtd_sip" counters. Users could use standard perf tool to monitor
+FPGA cache hit/miss rate, transaction number, interface clock counter of AFU
+and other FPGA performance events.
+
+Different FPGA devices may have different counter sets, depending on hardware
+implementation. E.g., some discrete FPGA cards don't have any cache. User could
+use "perf list" to check which perf events are supported by target hardware.
+
+In order to allow user to use standard perf API to access these performance
+counters, driver creates a perf PMU, and related sysfs interfaces in
+/sys/bus/event_source/devices/dfl_fme* to describe available perf events and
+configuration options.
+
+The "format" directory describes the format of the config field of struct
+perf_event_attr. There are 3 bitfields for config: "evtype" defines which type
+the perf event belongs to; "event" is the identity of the event within its
+category; "portid" is introduced to decide counters set to monitor on FPGA
+overall data or a specific port.
+
+The "events" directory describes the configuration templates for all available
+events which can be used with perf tool directly. For example, fab_mmio_read
+has the configuration "event=0x06,evtype=0x02,portid=0xff", which shows this
+event belongs to fabric type (0x02), the local event id is 0x06 and it is for
+overall monitoring (portid=0xff).
+
+Example usage of perf::
+
+ $# perf list |grep dfl_fme
+
+ dfl_fme0/fab_mmio_read/ [Kernel PMU event]
+ <...>
+ dfl_fme0/fab_port_mmio_read,portid=?/ [Kernel PMU event]
+ <...>
+
+ $# perf stat -a -e dfl_fme0/fab_mmio_read/ <command>
+ or
+ $# perf stat -a -e dfl_fme0/event=0x06,evtype=0x02,portid=0xff/ <command>
+ or
+ $# perf stat -a -e dfl_fme0/config=0xff2006/ <command>
+
+Another example, fab_port_mmio_read monitors mmio read of a specific port. So
+its configuration template is "event=0x06,evtype=0x01,portid=?". The portid
+should be explicitly set.
+
+Its usage of perf::
+
+ $# perf stat -a -e dfl_fme0/fab_port_mmio_read,portid=0x0/ <command>
+ or
+ $# perf stat -a -e dfl_fme0/event=0x06,evtype=0x02,portid=0x0/ <command>
+ or
+ $# perf stat -a -e dfl_fme0/config=0x2006/ <command>
+
+Please note for fabric counters, overall perf events (fab_*) and port perf
+events (fab_port_*) actually share one set of counters in hardware, so it can't
+monitor both at the same time. If this set of counters is configured to monitor
+overall data, then per port perf data is not supported. See below example::
+
+ $# perf stat -e dfl_fme0/fab_mmio_read/,dfl_fme0/fab_port_mmio_write,\
+ portid=0/ sleep 1
+
+ Performance counter stats for 'system wide':
+
+ 3 dfl_fme0/fab_mmio_read/
+ <not supported> dfl_fme0/fab_port_mmio_write,portid=0x0/
+
+ 1.001750904 seconds time elapsed
+
+The driver also provides a "cpumask" sysfs attribute, which contains only one
+CPU id used to access these perf events. Counting on multiple CPU is not allowed
+since they are system-wide counters on FPGA device.
+
+The current driver does not support sampling. So "perf record" is unsupported.
+
+
Add new FIUs support
====================
It's possible that developers made some new function blocks (FIUs) under this
diff --git a/Documentation/index.rst b/Documentation/index.rst
index af2b87afebc8..71eca3171574 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
.. The Linux Kernel documentation master file, created by
sphinx-quickstart on Fri Feb 12 13:51:46 2016.
@@ -46,6 +48,7 @@ platform firmwares.
:maxdepth: 2
firmware-guide/index
+ devicetree/index
Application-developer documentation
-----------------------------------
diff --git a/Documentation/infiniband/core_locking.rst b/Documentation/infiniband/core_locking.rst
index 8f76a8a5a38f..efd5e7603014 100644
--- a/Documentation/infiniband/core_locking.rst
+++ b/Documentation/infiniband/core_locking.rst
@@ -22,7 +22,6 @@ Sleeping and interrupt context
- post_recv
- poll_cq
- req_notify_cq
- - map_phys_fmr
which may not sleep and must be callable from any context.
@@ -36,7 +35,6 @@ Sleeping and interrupt context
- ib_post_send
- ib_post_recv
- ib_req_notify_cq
- - ib_map_phys_fmr
are therefore safe to call from any context.
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index b80257a03830..6515ebc12b6f 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -29,31 +29,37 @@ This document describes the Linux kernel Makefiles.
--- 4.4 Controlling compiler options for host programs
--- 4.5 When host programs are actually built
- === 5 Kbuild clean infrastructure
-
- === 6 Architecture Makefiles
- --- 6.1 Set variables to tweak the build to the architecture
- --- 6.2 Add prerequisites to archheaders:
- --- 6.3 Add prerequisites to archprepare:
- --- 6.4 List directories to visit when descending
- --- 6.5 Architecture-specific boot images
- --- 6.6 Building non-kbuild targets
- --- 6.7 Commands useful for building a boot image
- --- 6.8 Custom kbuild commands
- --- 6.9 Preprocessing linker scripts
- --- 6.10 Generic header files
- --- 6.11 Post-link pass
-
- === 7 Kbuild syntax for exported headers
- --- 7.1 no-export-headers
- --- 7.2 generic-y
- --- 7.3 generated-y
- --- 7.4 mandatory-y
-
- === 8 Kbuild Variables
- === 9 Makefile language
- === 10 Credits
- === 11 TODO
+ === 5 Userspace Program support
+ --- 5.1 Simple Userspace Program
+ --- 5.2 Composite Userspace Programs
+ --- 5.3 Controlling compiler options for userspace programs
+ --- 5.4 When userspace programs are actually built
+
+ === 6 Kbuild clean infrastructure
+
+ === 7 Architecture Makefiles
+ --- 7.1 Set variables to tweak the build to the architecture
+ --- 7.2 Add prerequisites to archheaders:
+ --- 7.3 Add prerequisites to archprepare:
+ --- 7.4 List directories to visit when descending
+ --- 7.5 Architecture-specific boot images
+ --- 7.6 Building non-kbuild targets
+ --- 7.7 Commands useful for building a boot image
+ --- 7.8 Custom kbuild commands
+ --- 7.9 Preprocessing linker scripts
+ --- 7.10 Generic header files
+ --- 7.11 Post-link pass
+
+ === 8 Kbuild syntax for exported headers
+ --- 8.1 no-export-headers
+ --- 8.2 generic-y
+ --- 8.3 generated-y
+ --- 8.4 mandatory-y
+
+ === 9 Kbuild Variables
+ === 10 Makefile language
+ === 11 Credits
+ === 12 TODO
1 Overview
==========
@@ -732,7 +738,88 @@ Both possibilities are described in the following.
This will tell kbuild to build lxdialog even if not referenced in
any rule.
-5 Kbuild clean infrastructure
+5 Userspace Program support
+===========================
+
+Just like host programs, Kbuild also supports building userspace executables
+for the target architecture (i.e. the same architecture as you are building
+the kernel for).
+
+The syntax is quite similar. The difference is to use "userprogs" instead of
+"hostprogs".
+
+5.1 Simple Userspace Program
+----------------------------
+
+ The following line tells kbuild that the program bpf-direct shall be
+ built for the target architecture.
+
+ Example::
+
+ userprogs := bpf-direct
+
+ Kbuild assumes in the above example that bpf-direct is made from a
+ single C source file named bpf-direct.c located in the same directory
+ as the Makefile.
+
+5.2 Composite Userspace Programs
+--------------------------------
+
+ Userspace programs can be made up based on composite objects.
+ The syntax used to define composite objects for userspace programs is
+ similar to the syntax used for kernel objects.
+ $(<executable>-objs) lists all objects used to link the final
+ executable.
+
+ Example::
+
+ #samples/seccomp/Makefile
+ userprogs := bpf-fancy
+ bpf-fancy-objs := bpf-fancy.o bpf-helper.o
+
+ Objects with extension .o are compiled from the corresponding .c
+ files. In the above example, bpf-fancy.c is compiled to bpf-fancy.o
+ and bpf-helper.c is compiled to bpf-helper.o.
+
+ Finally, the two .o files are linked to the executable, bpf-fancy.
+ Note: The syntax <executable>-y is not permitted for userspace programs.
+
+5.3 Controlling compiler options for userspace programs
+-------------------------------------------------------
+
+ When compiling userspace programs, it is possible to set specific flags.
+ The programs will always be compiled utilising $(CC) passed
+ the options specified in $(KBUILD_USERCFLAGS).
+ To set flags that will take effect for all userspace programs created
+ in that Makefile, use the variable userccflags.
+
+ Example::
+
+ # samples/seccomp/Makefile
+ userccflags += -I usr/include
+
+ To set specific flags for a single file the following construction
+ is used:
+
+ Example::
+
+ bpf-helper-userccflags += -I user/include
+
+ It is also possible to specify additional options to the linker.
+
+ Example::
+
+ # net/bpfilter/Makefile
+ bpfilter_umh-userldflags += -static
+
+ When linking bpfilter_umh, it will be passed the extra option -static.
+
+5.4 When userspace programs are actually built
+----------------------------------------------
+
+ Same as "When host programs are actually built".
+
+6 Kbuild clean infrastructure
=============================
"make clean" deletes most generated files in the obj tree where the kernel
@@ -790,7 +877,7 @@ is not operational at that point.
Note 2: All directories listed in core-y, libs-y, drivers-y and net-y will
be visited during "make clean".
-6 Architecture Makefiles
+7 Architecture Makefiles
========================
The top level Makefile sets up the environment and does the preparation,
@@ -820,10 +907,10 @@ When kbuild executes, the following steps are followed (roughly):
- Preparing initrd images and the like
-6.1 Set variables to tweak the build to the architecture
+7.1 Set variables to tweak the build to the architecture
--------------------------------------------------------
- LDFLAGS
+ KBUILD_LDFLAGS
Generic $(LD) options
Flags used for all invocations of the linker.
@@ -832,7 +919,7 @@ When kbuild executes, the following steps are followed (roughly):
Example::
#arch/s390/Makefile
- LDFLAGS := -m elf_s390
+ KBUILD_LDFLAGS := -m elf_s390
Note: ldflags-y can be used to further customise
the flags used. See chapter 3.7.
@@ -967,7 +1054,7 @@ When kbuild executes, the following steps are followed (roughly):
KBUILD_VMLINUX_LIBS together specify all the object files used to
link vmlinux.
-6.2 Add prerequisites to archheaders
+7.2 Add prerequisites to archheaders
------------------------------------
The archheaders: rule is used to generate header files that
@@ -977,7 +1064,7 @@ When kbuild executes, the following steps are followed (roughly):
architecture itself.
-6.3 Add prerequisites to archprepare
+7.3 Add prerequisites to archprepare
------------------------------------
The archprepare: rule is used to list prerequisites that need to be
@@ -995,7 +1082,7 @@ When kbuild executes, the following steps are followed (roughly):
generating offset header files.
-6.4 List directories to visit when descending
+7.4 List directories to visit when descending
---------------------------------------------
An arch Makefile cooperates with the top Makefile to define variables
@@ -1030,7 +1117,7 @@ When kbuild executes, the following steps are followed (roughly):
drivers-$(CONFIG_OPROFILE) += arch/sparc64/oprofile/
-6.5 Architecture-specific boot images
+7.5 Architecture-specific boot images
-------------------------------------
An arch Makefile specifies goals that take the vmlinux file, compress
@@ -1085,7 +1172,7 @@ When kbuild executes, the following steps are followed (roughly):
When "make" is executed without arguments, bzImage will be built.
-6.6 Building non-kbuild targets
+7.6 Building non-kbuild targets
-------------------------------
extra-y
@@ -1108,7 +1195,7 @@ When kbuild executes, the following steps are followed (roughly):
In this example, extra-y is used to list object files that
shall be built, but shall not be linked as part of built-in.a.
-6.7 Commands useful for building a boot image
+7.7 Commands useful for building a boot image
---------------------------------------------
Kbuild provides a few macros that are useful when building a
@@ -1211,7 +1298,7 @@ When kbuild executes, the following steps are followed (roughly):
targets += $(dtb-y)
DTC_FLAGS ?= -p 1024
-6.8 Custom kbuild commands
+7.8 Custom kbuild commands
--------------------------
When kbuild is executing with KBUILD_VERBOSE=0, then only a shorthand
@@ -1241,7 +1328,7 @@ When kbuild executes, the following steps are followed (roughly):
will be displayed with "make KBUILD_VERBOSE=0".
-6.9 Preprocessing linker scripts
+7.9 Preprocessing linker scripts
--------------------------------
When the vmlinux image is built, the linker script
@@ -1274,7 +1361,7 @@ When kbuild executes, the following steps are followed (roughly):
The kbuild infrastructure for `*lds` files is used in several
architecture-specific files.
-6.10 Generic header files
+7.10 Generic header files
-------------------------
The directory include/asm-generic contains the header files
@@ -1283,7 +1370,7 @@ When kbuild executes, the following steps are followed (roughly):
to list the file in the Kbuild file.
See "7.2 generic-y" for further info on syntax etc.
-6.11 Post-link pass
+7.11 Post-link pass
-------------------
If the file arch/xxx/Makefile.postlink exists, this makefile
@@ -1299,7 +1386,7 @@ When kbuild executes, the following steps are followed (roughly):
For example, powerpc uses this to check relocation sanity of
the linked vmlinux file.
-7 Kbuild syntax for exported headers
+8 Kbuild syntax for exported headers
------------------------------------
The kernel includes a set of headers that is exported to userspace.
@@ -1319,14 +1406,14 @@ A Kbuild file may be defined under arch/<arch>/include/uapi/asm/ and
arch/<arch>/include/asm/ to list asm files coming from asm-generic.
See subsequent chapter for the syntax of the Kbuild file.
-7.1 no-export-headers
+8.1 no-export-headers
---------------------
no-export-headers is essentially used by include/uapi/linux/Kbuild to
avoid exporting specific headers (e.g. kvm.h) on architectures that do
not support it. It should be avoided as much as possible.
-7.2 generic-y
+8.2 generic-y
-------------
If an architecture uses a verbatim copy of a header from
@@ -1356,7 +1443,7 @@ See subsequent chapter for the syntax of the Kbuild file.
#include <asm-generic/termios.h>
-7.3 generated-y
+8.3 generated-y
---------------
If an architecture generates other header files alongside generic-y
@@ -1370,7 +1457,7 @@ See subsequent chapter for the syntax of the Kbuild file.
#arch/x86/include/asm/Kbuild
generated-y += syscalls_32.h
-7.4 mandatory-y
+8.4 mandatory-y
---------------
mandatory-y is essentially used by include/(uapi/)asm-generic/Kbuild
@@ -1380,7 +1467,7 @@ See subsequent chapter for the syntax of the Kbuild file.
in arch/$(ARCH)/include/(uapi/)/asm, Kbuild will automatically generate
a wrapper of the asm-generic one.
-8 Kbuild Variables
+9 Kbuild Variables
==================
The top Makefile exports the following variables:
@@ -1438,8 +1525,8 @@ The top Makefile exports the following variables:
command.
-9 Makefile language
-===================
+10 Makefile language
+====================
The kernel Makefiles are designed to be run with GNU Make. The Makefiles
use only the documented features of GNU Make, but they do use many
@@ -1458,7 +1545,7 @@ time the left-hand side is used.
There are some cases where "=" is appropriate. Usually, though, ":="
is the right choice.
-10 Credits
+11 Credits
==========
- Original version made by Michael Elizabeth Chastain, <mailto:mec@shout.net>
@@ -1466,7 +1553,7 @@ is the right choice.
- Updates by Sam Ravnborg <sam@ravnborg.org>
- Language QA by Jan Engelhardt <jengelh@gmx.de>
-11 TODO
+12 TODO
=======
- Describe how kbuild supports shipped files with _shipped.
diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
index e0b45a257f21..a45cccff467d 100644
--- a/Documentation/kbuild/modules.rst
+++ b/Documentation/kbuild/modules.rst
@@ -528,18 +528,6 @@ build.
will then do the expected and compile both modules with
full knowledge of symbols from either module.
- Use an extra Module.symvers file
- When an external module is built, a Module.symvers file
- is generated containing all exported symbols which are
- not defined in the kernel. To get access to symbols
- from bar.ko, copy the Module.symvers file from the
- compilation of bar.ko to the directory where foo.ko is
- built. During the module build, kbuild will read the
- Module.symvers file in the directory of the external
- module, and when the build is finished, a new
- Module.symvers file is created containing the sum of
- all symbols defined and not part of the kernel.
-
Use "make" variable KBUILD_EXTRA_SYMBOLS
If it is impractical to add a top-level kbuild file,
you can assign a space separated list
diff --git a/Documentation/livepatch/module-elf-format.rst b/Documentation/livepatch/module-elf-format.rst
index 2a591e6f8e6c..8c6b894c4661 100644
--- a/Documentation/livepatch/module-elf-format.rst
+++ b/Documentation/livepatch/module-elf-format.rst
@@ -14,8 +14,7 @@ This document outlines the Elf format requirements that livepatch modules must f
4. Livepatch symbols
4.1 A livepatch module's symbol table
4.2 Livepatch symbol format
- 5. Architecture-specific sections
- 6. Symbol table and Elf section access
+ 5. Symbol table and Elf section access
1. Background and motivation
============================
@@ -298,17 +297,7 @@ Examples:
Note that the 'Ndx' (Section index) for these symbols is SHN_LIVEPATCH (0xff20).
"OS" means OS-specific.
-5. Architecture-specific sections
-=================================
-Architectures may override arch_klp_init_object_loaded() to perform
-additional arch-specific tasks when a target module loads, such as applying
-arch-specific sections. On x86 for example, we must apply per-object
-.altinstructions and .parainstructions sections when a target module loads.
-These sections must be prefixed with ".klp.arch.$objname." so that they can
-be easily identified when iterating through a patch module's Elf sections
-(See arch/x86/kernel/livepatch.c for a complete example).
-
-6. Symbol table and Elf section access
+5. Symbol table and Elf section access
======================================
A livepatch module's symbol table is accessible through module->symtab.
diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt
index ca983328976b..f65b51523014 100644
--- a/Documentation/lzo.txt
+++ b/Documentation/lzo.txt
@@ -159,11 +159,15 @@ Byte sequences
distance = 16384 + (H << 14) + D
state = S (copy S literals after this block)
End of stream is reached if distance == 16384
+ In version 1 only, to prevent ambiguity with the RLE case when
+ ((distance & 0x803f) == 0x803f) && (261 <= length <= 264), the
+ compressor must not emit block copies where distance and length
+ meet these conditions.
In version 1 only, this instruction is also used to encode a run of
- zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1.
+ zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1.
In this case, it is followed by a fourth byte, X.
- run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4.
+ run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4
0 0 1 L L L L L (32..63)
Copy of small block within 16kB distance (preferably less than 34B)
diff --git a/Documentation/misc-devices/c2port.txt b/Documentation/misc-devices/c2port.txt
index ea7344465610..31351b1a5a1f 100644
--- a/Documentation/misc-devices/c2port.txt
+++ b/Documentation/misc-devices/c2port.txt
@@ -28,14 +28,14 @@ where the micro controller is connected via special GPIOs pins.
References
----------
-The C2 Interface main references are at (http://www.silabs.com)
+The C2 Interface main references are at (https://www.silabs.com)
Silicon Laboratories site], see:
- AN127: FLASH Programming via the C2 Interface at
-http://www.silabs.com/Support Documents/TechnicalDocs/an127.pdf
+https://www.silabs.com/Support Documents/TechnicalDocs/an127.pdf
- C2 Specification at
-http://www.silabs.com/pages/DownloadDoc.aspx?FILEURL=Support%20Documents/TechnicalDocs/an127.pdf&src=SearchResults
+https://www.silabs.com/pages/DownloadDoc.aspx?FILEURL=Support%20Documents/TechnicalDocs/an127.pdf&src=SearchResults
however it implements a two wire serial communication protocol (bit
banging) designed to enable in-system programming, debugging, and
diff --git a/Documentation/powerpc/bootwrapper.rst b/Documentation/powerpc/bootwrapper.rst
index a6292afba573..cdfa2bc8425f 100644
--- a/Documentation/powerpc/bootwrapper.rst
+++ b/Documentation/powerpc/bootwrapper.rst
@@ -70,28 +70,6 @@ Currently, the following image format targets exist:
kernel with this image type and it depends entirely on
the embedded device tree for all information.
- The simpleImage is useful for booting systems with
- an unknown firmware interface or for booting from
- a debugger when no firmware is present (such as on
- the Xilinx Virtex platform). The only assumption that
- simpleImage makes is that RAM is correctly initialized
- and that the MMU is either off or has RAM mapped to
- base address 0.
-
- simpleImage also supports inserting special platform
- specific initialization code to the start of the bootup
- sequence. The virtex405 platform uses this feature to
- ensure that the cache is invalidated before caching
- is enabled. Platform specific initialization code is
- added as part of the wrapper script and is keyed on
- the image target name. For example, all
- simpleImage.virtex405-* targets will add the
- virtex405-head.S initialization code (This also means
- that the dts file for virtex405 targets should be
- named (virtex405-<board>.dts). Search the wrapper
- script for 'virtex405' and see the file
- arch/powerpc/boot/virtex405-head.S for details.
-
treeImage.%; Image format for used with OpenBIOS firmware found
on some ppc4xx hardware. This image embeds a device
tree blob inside the image.
@@ -116,10 +94,8 @@ Image types which embed a device tree blob (simpleImage, dtbImage, treeImage,
and cuImage) all generate the device tree blob from a file in the
arch/powerpc/boot/dts/ directory. The Makefile selects the correct device
tree source based on the name of the target. Therefore, if the kernel is
-built with 'make treeImage.walnut simpleImage.virtex405-ml403', then the
-build system will use arch/powerpc/boot/dts/walnut.dts to build
-treeImage.walnut and arch/powerpc/boot/dts/virtex405-ml403.dts to build
-the simpleImage.virtex405-ml403.
+built with 'make treeImage.walnut', then the build system will use
+arch/powerpc/boot/dts/walnut.dts to build treeImage.walnut.
Two special targets called 'zImage' and 'zImage.initrd' also exist. These
targets build all the default images as selected by the kernel configuration.
diff --git a/Documentation/powerpc/index.rst b/Documentation/powerpc/index.rst
index 0d45f0fc8e57..afe2d5e54db6 100644
--- a/Documentation/powerpc/index.rst
+++ b/Documentation/powerpc/index.rst
@@ -30,6 +30,7 @@ powerpc
syscall64-abi
transactional_memory
ultravisor
+ vas-api
.. only:: subproject and html
diff --git a/Documentation/powerpc/transactional_memory.rst b/Documentation/powerpc/transactional_memory.rst
index 09955103acb4..b5b09bf00966 100644
--- a/Documentation/powerpc/transactional_memory.rst
+++ b/Documentation/powerpc/transactional_memory.rst
@@ -245,3 +245,30 @@ POWER9N DD2.2.
Guest migration from POWER8 to POWER9 will work with POWER9N DD2.2 and
POWER9C DD1.2. Since earlier POWER9 processors don't support TM
emulation, migration from POWER8 to POWER9 is not supported there.
+
+Kernel implementation
+=====================
+
+h/rfid mtmsrd quirk
+-------------------
+
+As defined in the ISA, rfid has a quirk which is useful in early
+exception handling. When in a userspace transaction and we enter the
+kernel via some exception, MSR will end up as TM=0 and TS=01 (ie. TM
+off but TM suspended). Regularly the kernel will want change bits in
+the MSR and will perform an rfid to do this. In this case rfid can
+have SRR0 TM = 0 and TS = 00 (ie. TM off and non transaction) and the
+resulting MSR will retain TM = 0 and TS=01 from before (ie. stay in
+suspend). This is a quirk in the architecture as this would normally
+be a transition from TS=01 to TS=00 (ie. suspend -> non transactional)
+which is an illegal transition.
+
+This quirk is described the architecture in the definition of rfid
+with these lines:
+
+ if (MSR 29:31 ¬ = 0b010 | SRR1 29:31 ¬ = 0b000) then
+ MSR 29:31 <- SRR1 29:31
+
+hrfid and mtmsrd have the same quirk.
+
+The Linux kernel uses this quirk in it's early exception handling.
diff --git a/Documentation/powerpc/vas-api.rst b/Documentation/powerpc/vas-api.rst
new file mode 100644
index 000000000000..1217c2f1595e
--- /dev/null
+++ b/Documentation/powerpc/vas-api.rst
@@ -0,0 +1,292 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. _VAS-API:
+
+===================================================
+Virtual Accelerator Switchboard (VAS) userspace API
+===================================================
+
+Introduction
+============
+
+Power9 processor introduced Virtual Accelerator Switchboard (VAS) which
+allows both userspace and kernel communicate to co-processor
+(hardware accelerator) referred to as the Nest Accelerator (NX). The NX
+unit comprises of one or more hardware engines or co-processor types
+such as 842 compression, GZIP compression and encryption. On power9,
+userspace applications will have access to only GZIP Compression engine
+which supports ZLIB and GZIP compression algorithms in the hardware.
+
+To communicate with NX, kernel has to establish a channel or window and
+then requests can be submitted directly without kernel involvement.
+Requests to the GZIP engine must be formatted as a co-processor Request
+Block (CRB) and these CRBs must be submitted to the NX using COPY/PASTE
+instructions to paste the CRB to hardware address that is associated with
+the engine's request queue.
+
+The GZIP engine provides two priority levels of requests: Normal and
+High. Only Normal requests are supported from userspace right now.
+
+This document explains userspace API that is used to interact with
+kernel to setup channel / window which can be used to send compression
+requests directly to NX accelerator.
+
+
+Overview
+========
+
+Application access to the GZIP engine is provided through
+/dev/crypto/nx-gzip device node implemented by the VAS/NX device driver.
+An application must open the /dev/crypto/nx-gzip device to obtain a file
+descriptor (fd). Then should issue VAS_TX_WIN_OPEN ioctl with this fd to
+establish connection to the engine. It means send window is opened on GZIP
+engine for this process. Once a connection is established, the application
+should use the mmap() system call to map the hardware address of engine's
+request queue into the application's virtual address space.
+
+The application can then submit one or more requests to the the engine by
+using copy/paste instructions and pasting the CRBs to the virtual address
+(aka paste_address) returned by mmap(). User space can close the
+established connection or send window by closing the file descriptior
+(close(fd)) or upon the process exit.
+
+Note that applications can send several requests with the same window or
+can establish multiple windows, but one window for each file descriptor.
+
+Following sections provide additional details and references about the
+individual steps.
+
+NX-GZIP Device Node
+===================
+
+There is one /dev/crypto/nx-gzip node in the system and it provides
+access to all GZIP engines in the system. The only valid operations on
+/dev/crypto/nx-gzip are:
+
+ * open() the device for read and write.
+ * issue VAS_TX_WIN_OPEN ioctl
+ * mmap() the engine's request queue into application's virtual
+ address space (i.e. get a paste_address for the co-processor
+ engine).
+ * close the device node.
+
+Other file operations on this device node are undefined.
+
+Note that the copy and paste operations go directly to the hardware and
+do not go through this device. Refer COPY/PASTE document for more
+details.
+
+Although a system may have several instances of the NX co-processor
+engines (typically, one per P9 chip) there is just one
+/dev/crypto/nx-gzip device node in the system. When the nx-gzip device
+node is opened, Kernel opens send window on a suitable instance of NX
+accelerator. It finds CPU on which the user process is executing and
+determine the NX instance for the corresponding chip on which this CPU
+belongs.
+
+Applications may chose a specific instance of the NX co-processor using
+the vas_id field in the VAS_TX_WIN_OPEN ioctl as detailed below.
+
+A userspace library libnxz is available here but still in development:
+ https://github.com/abalib/power-gzip
+
+Applications that use inflate / deflate calls can link with libnxz
+instead of libz and use NX GZIP compression without any modification.
+
+Open /dev/crypto/nx-gzip
+========================
+
+The nx-gzip device should be opened for read and write. No special
+privileges are needed to open the device. Each window corresponds to one
+file descriptor. So if the userspace process needs multiple windows,
+several open calls have to be issued.
+
+See open(2) system call man pages for other details such as return values,
+error codes and restrictions.
+
+VAS_TX_WIN_OPEN ioctl
+=====================
+
+Applications should use the VAS_TX_WIN_OPEN ioctl as follows to establish
+a connection with NX co-processor engine:
+
+ ::
+ struct vas_tx_win_open_attr {
+ __u32 version;
+ __s16 vas_id; /* specific instance of vas or -1
+ for default */
+ __u16 reserved1;
+ __u64 flags; /* For future use */
+ __u64 reserved2[6];
+ };
+
+ version: The version field must be currently set to 1.
+ vas_id: If '-1' is passed, kernel will make a best-effort attempt
+ to assign an optimal instance of NX for the process. To
+ select the specific VAS instance, refer
+ "Discovery of available VAS engines" section below.
+
+ flags, reserved1 and reserved2[6] fields are for future extension
+ and must be set to 0.
+
+ The attributes attr for the VAS_TX_WIN_OPEN ioctl are defined as
+ follows:
+ #define VAS_MAGIC 'v'
+ #define VAS_TX_WIN_OPEN _IOW(VAS_MAGIC, 1,
+ struct vas_tx_win_open_attr)
+
+ struct vas_tx_win_open_attr attr;
+ rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
+
+ The VAS_TX_WIN_OPEN ioctl returns 0 on success. On errors, it
+ returns -1 and sets the errno variable to indicate the error.
+
+ Error conditions:
+ EINVAL fd does not refer to a valid VAS device.
+ EINVAL Invalid vas ID
+ EINVAL version is not set with proper value
+ EEXIST Window is already opened for the given fd
+ ENOMEM Memory is not available to allocate window
+ ENOSPC System has too many active windows (connections)
+ opened
+ EINVAL reserved fields are not set to 0.
+
+ See the ioctl(2) man page for more details, error codes and
+ restrictions.
+
+mmap() NX-GZIP device
+=====================
+
+The mmap() system call for a NX-GZIP device fd returns a paste_address
+that the application can use to copy/paste its CRB to the hardware engines.
+ ::
+
+ paste_addr = mmap(addr, size, prot, flags, fd, offset);
+
+ Only restrictions on mmap for a NX-GZIP device fd are:
+ * size should be PAGE_SIZE
+ * offset parameter should be 0ULL
+
+ Refer to mmap(2) man page for additional details/restrictions.
+ In addition to the error conditions listed on the mmap(2) man
+ page, can also fail with one of the following error codes:
+
+ EINVAL fd is not associated with an open window
+ (i.e mmap() does not follow a successful call
+ to the VAS_TX_WIN_OPEN ioctl).
+ EINVAL offset field is not 0ULL.
+
+Discovery of available VAS engines
+==================================
+
+Each available VAS instance in the system will have a device tree node
+like /proc/device-tree/vas@* or /proc/device-tree/xscom@*/vas@*.
+Determine the chip or VAS instance and use the corresponding ibm,vas-id
+property value in this node to select specific VAS instance.
+
+Copy/Paste operations
+=====================
+
+Applications should use the copy and paste instructions to send CRB to NX.
+Refer section 4.4 in PowerISA for Copy/Paste instructions:
+https://openpowerfoundation.org/?resource_lib=power-isa-version-3-0
+
+CRB Specification and use NX
+============================
+
+Applications should format requests to the co-processor using the
+co-processor Request Block (CRBs). Refer NX-GZIP user's manual for the format
+of CRB and use NX from userspace such as sending requests and checking
+request status.
+
+NX Fault handling
+=================
+
+Applications send requests to NX and wait for the status by polling on
+co-processor Status Block (CSB) flags. NX updates status in CSB after each
+request is processed. Refer NX-GZIP user's manual for the format of CSB and
+status flags.
+
+In case if NX encounters translation error (called NX page fault) on CSB
+address or any request buffer, raises an interrupt on the CPU to handle the
+fault. Page fault can happen if an application passes invalid addresses or
+request buffers are not in memory. The operating system handles the fault by
+updating CSB with the following data:
+
+ csb.flags = CSB_V;
+ csb.cc = CSB_CC_TRANSLATION;
+ csb.ce = CSB_CE_TERMINATION;
+ csb.address = fault_address;
+
+When an application receives translation error, it can touch or access
+the page that has a fault address so that this page will be in memory. Then
+the application can resend this request to NX.
+
+If the OS can not update CSB due to invalid CSB address, sends SEGV signal
+to the process who opened the send window on which the original request was
+issued. This signal returns with the following siginfo struct:
+
+ siginfo.si_signo = SIGSEGV;
+ siginfo.si_errno = EFAULT;
+ siginfo.si_code = SEGV_MAPERR;
+ siginfo.si_addr = CSB adress;
+
+In the case of multi-thread applications, NX send windows can be shared
+across all threads. For example, a child thread can open a send window,
+but other threads can send requests to NX using this window. These
+requests will be successful even in the case of OS handling faults as long
+as CSB address is valid. If the NX request contains an invalid CSB address,
+the signal will be sent to the child thread that opened the window. But if
+the thread is exited without closing the window and the request is issued
+using this window. the signal will be issued to the thread group leader
+(tgid). It is up to the application whether to ignore or handle these
+signals.
+
+NX-GZIP User's Manual:
+https://github.com/libnxz/power-gzip/blob/master/power_nx_gzip_um.pdf
+
+Simple example
+==============
+
+ ::
+ int use_nx_gzip()
+ {
+ int rc, fd;
+ void *addr;
+ struct vas_setup_attr txattr;
+
+ fd = open("/dev/crypto/nx-gzip", O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, "open nx-gzip failed\n");
+ return -1;
+ }
+ memset(&txattr, 0, sizeof(txattr));
+ txattr.version = 1;
+ txattr.vas_id = -1
+ rc = ioctl(fd, VAS_TX_WIN_OPEN,
+ (unsigned long)&txattr);
+ if (rc < 0) {
+ fprintf(stderr, "ioctl() n %d, error %d\n",
+ rc, errno);
+ return rc;
+ }
+ addr = mmap(NULL, 4096, PROT_READ|PROT_WRITE,
+ MAP_SHARED, fd, 0ULL);
+ if (addr == MAP_FAILED) {
+ fprintf(stderr, "mmap() failed, errno %d\n",
+ errno);
+ return -errno;
+ }
+ do {
+ //Format CRB request with compression or
+ //uncompression
+ // Refer tests for vas_copy/vas_paste
+ vas_copy((&crb, 0, 1);
+ vas_paste(addr, 0, 1);
+ // Poll on csb.flags with timeout
+ // csb address is listed in CRB
+ } while (true)
+ close(fd) or window can be closed upon process exit
+ }
+
+ Refer https://github.com/abalib/power-gzip for tests or more
+ use cases.
diff --git a/Documentation/process/3.Early-stage.rst b/Documentation/process/3.Early-stage.rst
index be00716071d4..6bfd60d77d1a 100644
--- a/Documentation/process/3.Early-stage.rst
+++ b/Documentation/process/3.Early-stage.rst
@@ -46,7 +46,7 @@ and posted this:
to communicate user requirements to these people is a waste of
time. They are much too "intelligent" to listen to lesser mortals.
-(http://lwn.net/Articles/131776/).
+(https://lwn.net/Articles/131776/).
The reality of the situation was different; the kernel developers were far
more concerned about system stability, long-term maintenance, and finding
@@ -216,7 +216,7 @@ a non-disclosure agreement. The Linux Foundation operates an NDA program
designed to help with this sort of situation; more information can be found
at:
- http://www.linuxfoundation.org/en/NDA_program
+ https://www.linuxfoundation.org/nda/
This kind of review is often enough to avoid serious problems later on
without requiring public disclosure of the project.
diff --git a/Documentation/process/7.AdvancedTopics.rst b/Documentation/process/7.AdvancedTopics.rst
index 172733cff097..bf7cbfb4caa5 100644
--- a/Documentation/process/7.AdvancedTopics.rst
+++ b/Documentation/process/7.AdvancedTopics.rst
@@ -29,9 +29,9 @@ long document in its own right. Instead, the focus here will be on how git
fits into the kernel development process in particular. Developers who
wish to come up to speed with git will find more information at:
- http://git-scm.com/
+ https://git-scm.com/
- http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
+ https://www.kernel.org/pub/software/scm/git/docs/user-manual.html
and on various tutorials found on the web.
@@ -55,7 +55,7 @@ server with git-daemon is relatively straightforward if you have a system
which is accessible to the Internet. Otherwise, free, public hosting sites
(Github, for example) are starting to appear on the net. Established
developers can get an account on kernel.org, but those are not easy to come
-by; see http://kernel.org/faq/ for more information.
+by; see https://kernel.org/faq/ for more information.
The normal git workflow involves the use of a lot of branches. Each line
of development can be separated into a separate "topic branch" and
@@ -125,7 +125,7 @@ can affect your ability to get trees pulled in the future. Quoting Linus:
to trust things *without* then having to go and check every
individual change by hand.
-(http://lwn.net/Articles/224135/).
+(https://lwn.net/Articles/224135/).
To avoid this kind of situation, ensure that all patches within a given
branch stick closely to the associated topic; a "driver fixes" branch
diff --git a/Documentation/process/8.Conclusion.rst b/Documentation/process/8.Conclusion.rst
index 8395aa2c1f3a..b32a40215858 100644
--- a/Documentation/process/8.Conclusion.rst
+++ b/Documentation/process/8.Conclusion.rst
@@ -16,24 +16,24 @@ distributions runs into internal limits and fails to process the documents
properly).
Various web sites discuss kernel development at all levels of detail. Your
-author would like to humbly suggest http://lwn.net/ as a source;
+author would like to humbly suggest https://lwn.net/ as a source;
information on many specific kernel topics can be found via the LWN kernel
index at:
- http://lwn.net/Kernel/Index/
+ https://lwn.net/Kernel/Index/
Beyond that, a valuable resource for kernel developers is:
- http://kernelnewbies.org/
+ https://kernelnewbies.org/
-And, of course, one should not forget http://kernel.org/, the definitive
+And, of course, one should not forget https://kernel.org/, the definitive
location for kernel release information.
There are a number of books on kernel development:
Linux Device Drivers, 3rd Edition (Jonathan Corbet, Alessandro
Rubini, and Greg Kroah-Hartman). Online at
- http://lwn.net/Kernel/LDD3/.
+ https://lwn.net/Kernel/LDD3/.
Linux Kernel Development (Robert Love).
@@ -46,9 +46,9 @@ information to be found there.
Documentation for git can be found at:
- http://www.kernel.org/pub/software/scm/git/docs/
+ https://www.kernel.org/pub/software/scm/git/docs/
- http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
+ https://www.kernel.org/pub/software/scm/git/docs/user-manual.html
Conclusion
diff --git a/Documentation/process/adding-syscalls.rst b/Documentation/process/adding-syscalls.rst
index a6b4a3a5bf3f..a3ecb236576c 100644
--- a/Documentation/process/adding-syscalls.rst
+++ b/Documentation/process/adding-syscalls.rst
@@ -541,9 +541,9 @@ References and Sources
:manpage:`syscall(2)` man-page:
http://man7.org/linux/man-pages/man2/syscall.2.html#NOTES
- Collated emails from Linus Torvalds discussing the problems with ``ioctl()``:
- http://yarchive.net/comp/linux/ioctl.html
+ https://yarchive.net/comp/linux/ioctl.html
- "How to not invent kernel interfaces", Arnd Bergmann,
- http://www.ukuug.org/events/linux2007/2007/papers/Bergmann.pdf
+ https://www.ukuug.org/events/linux2007/2007/papers/Bergmann.pdf
- LWN article from Michael Kerrisk on avoiding new uses of CAP_SYS_ADMIN:
https://lwn.net/Articles/486306/
- Recommendation from Andrew Morton that all related information for a new
diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst
index fbb9297e6360..2e7017bef4b8 100644
--- a/Documentation/process/applying-patches.rst
+++ b/Documentation/process/applying-patches.rst
@@ -229,7 +229,7 @@ Although interdiff may save you a step or two you are generally advised to
do the additional steps since interdiff can get things wrong in some cases.
Another alternative is ``ketchup``, which is a python script for automatic
-downloading and applying of patches (http://www.selenic.com/ketchup/).
+downloading and applying of patches (https://www.selenic.com/ketchup/).
Other nice tools are diffstat, which shows a summary of changes made by a
patch; lsdiff, which displays a short listing of affected files in a patch
@@ -241,7 +241,7 @@ the patch contains a given regular expression.
Where can I download the patches?
=================================
-The patches are available at http://kernel.org/
+The patches are available at https://kernel.org/
Most recent patches are linked from the front page, but they also have
specific homes.
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 91c5ff8e161e..5cfb54c2aaa6 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
====================== =============== ========================================
Program Minimal version Command to check the version
====================== =============== ========================================
-GNU C 4.6 gcc --version
+GNU C 4.8 gcc --version
GNU make 3.81 make --version
binutils 2.23 ld -v
flex 2.5.35 flex --version
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index 17a8e584f15f..2657a55c6f12 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -90,7 +90,7 @@ Statements longer than 80 columns should be broken into sensible chunks,
unless exceeding 80 columns significantly increases readability and does
not hide information.
-Descendants are always substantially shorter than the parent and are
+Descendants are always substantially shorter than the parent and
are placed substantially to the right. A very commonly used style
is to align descendants to a function open parenthesis.
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index ba5e944c7a63..1699b7f8e63a 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -16,7 +16,7 @@ for a list of items to check before
submitting code. If you are submitting a driver, also read
:ref:`Documentation/process/submitting-drivers.rst <submittingdrivers>`;
for device tree binding patches, read
-Documentation/devicetree/bindings/submitting-patches.txt.
+Documentation/devicetree/bindings/submitting-patches.rst.
Many of these steps describe the default behavior of the ``git`` version
control system; if you use ``git`` to prepare your patches, you'll find much
diff --git a/Documentation/process/volatile-considered-harmful.rst b/Documentation/process/volatile-considered-harmful.rst
index 4934e656a6f3..7eb6bd7c9214 100644
--- a/Documentation/process/volatile-considered-harmful.rst
+++ b/Documentation/process/volatile-considered-harmful.rst
@@ -109,9 +109,9 @@ been properly thought through.
References
==========
-[1] http://lwn.net/Articles/233481/
+[1] https://lwn.net/Articles/233481/
-[2] http://lwn.net/Articles/233482/
+[2] https://lwn.net/Articles/233482/
Credits
=======
diff --git a/Documentation/s390/index.rst b/Documentation/s390/index.rst
index f7af2061e406..cf71df5776b4 100644
--- a/Documentation/s390/index.rst
+++ b/Documentation/s390/index.rst
@@ -15,6 +15,7 @@ s390 Architecture
vfio-ccw
zfcpdump
common_io
+ pci
text_files
diff --git a/Documentation/s390/pci.rst b/Documentation/s390/pci.rst
new file mode 100644
index 000000000000..492850bff316
--- /dev/null
+++ b/Documentation/s390/pci.rst
@@ -0,0 +1,125 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========
+S/390 PCI
+=========
+
+Authors:
+ - Pierre Morel
+
+Copyright, IBM Corp. 2020
+
+
+Command line parameters and debugfs entries
+===========================================
+
+Command line parameters
+-----------------------
+
+* nomio
+
+ Do not use PCI Mapped I/O (MIO) instructions.
+
+* norid
+
+ Ignore the RID field and force use of one PCI domain per PCI function.
+
+debugfs entries
+---------------
+
+The S/390 debug feature (s390dbf) generates views to hold various debug results in sysfs directories of the form:
+
+ * /sys/kernel/debug/s390dbf/pci_*/
+
+For example:
+
+ - /sys/kernel/debug/s390dbf/pci_msg/sprintf
+ Holds messages from the processing of PCI events, like machine check handling
+ and setting of global functionality, like UID checking.
+
+ Change the level of logging to be more or less verbose by piping
+ a number between 0 and 6 to /sys/kernel/debug/s390dbf/pci_*/level. For
+ details, see the documentation on the S/390 debug feature at
+ Documentation/s390/s390dbf.rst.
+
+Sysfs entries
+=============
+
+Entries specific to zPCI functions and entries that hold zPCI information.
+
+* /sys/bus/pci/slots/XXXXXXXX
+
+ The slot entries are set up using the function identifier (FID) of the
+ PCI function.
+
+ - /sys/bus/pci/slots/XXXXXXXX/power
+
+ A physical function that currently supports a virtual function cannot be
+ powered off until all virtual functions are removed with:
+ echo 0 > /sys/bus/pci/devices/XXXX:XX:XX.X/sriov_numvf
+
+* /sys/bus/pci/devices/XXXX:XX:XX.X/
+
+ - function_id
+ A zPCI function identifier that uniquely identifies the function in the Z server.
+
+ - function_handle
+ Low-level identifier used for a configured PCI function.
+ It might be useful for debuging.
+
+ - pchid
+ Model-dependent location of the I/O adapter.
+
+ - pfgid
+ PCI function group ID, functions that share identical functionality
+ use a common identifier.
+ A PCI group defines interrupts, IOMMU, IOTLB, and DMA specifics.
+
+ - vfn
+ The virtual function number, from 1 to N for virtual functions,
+ 0 for physical functions.
+
+ - pft
+ The PCI function type
+
+ - port
+ The port corresponds to the physical port the function is attached to.
+ It also gives an indication of the physical function a virtual function
+ is attached to.
+
+ - uid
+ The unique identifier (UID) is defined when configuring an LPAR and is
+ unique in the LPAR.
+
+ - pfip/segmentX
+ The segments determine the isolation of a function.
+ They correspond to the physical path to the function.
+ The more the segments are different, the more the functions are isolated.
+
+Enumeration and hotplug
+=======================
+
+The PCI address consists of four parts: domain, bus, device and function,
+and is of this form: DDDD:BB:dd.f
+
+* When not using multi-functions (norid is set, or the firmware does not
+ support multi-functions):
+
+ - There is only one function per domain.
+
+ - The domain is set from the zPCI function's UID as defined during the
+ LPAR creation.
+
+* When using multi-functions (norid parameter is not set),
+ zPCI functions are addressed differently:
+
+ - There is still only one bus per domain.
+
+ - There can be up to 256 functions per bus.
+
+ - The domain part of the address of all functions for
+ a multi-Function device is set from the zPCI function's UID as defined
+ in the LPAR creation for the function zero.
+
+ - New functions will only be ready for use after the function zero
+ (the function with devfn 0) has been enumerated.
diff --git a/Documentation/s390/vfio-ccw.rst b/Documentation/s390/vfio-ccw.rst
index fca9c4f5bd9c..8aad08a8b8a5 100644
--- a/Documentation/s390/vfio-ccw.rst
+++ b/Documentation/s390/vfio-ccw.rst
@@ -204,15 +204,44 @@ definition of the region is::
__u32 ret_code;
} __packed;
+This region is always available.
+
While starting an I/O request, orb_area should be filled with the
guest ORB, and scsw_area should be filled with the SCSW of the Virtual
Subchannel.
irb_area stores the I/O result.
-ret_code stores a return code for each access of the region.
+ret_code stores a return code for each access of the region. The following
+values may occur:
+
+``0``
+ The operation was successful.
+
+``-EOPNOTSUPP``
+ The orb specified transport mode or an unidentified IDAW format, or the
+ scsw specified a function other than the start function.
+
+``-EIO``
+ A request was issued while the device was not in a state ready to accept
+ requests, or an internal error occurred.
+
+``-EBUSY``
+ The subchannel was status pending or busy, or a request is already active.
+
+``-EAGAIN``
+ A request was being processed, and the caller should retry.
+
+``-EACCES``
+ The channel path(s) used for the I/O were found to be not operational.
+
+``-ENODEV``
+ The device was found to be not operational.
+
+``-EINVAL``
+ The orb specified a chain longer than 255 ccws, or an internal error
+ occurred.
-This region is always available.
vfio-ccw cmd region
-------------------
@@ -231,6 +260,64 @@ This region is exposed via region type VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD.
Currently, CLEAR SUBCHANNEL and HALT SUBCHANNEL use this region.
+command specifies the command to be issued; ret_code stores a return code
+for each access of the region. The following values may occur:
+
+``0``
+ The operation was successful.
+
+``-ENODEV``
+ The device was found to be not operational.
+
+``-EINVAL``
+ A command other than halt or clear was specified.
+
+``-EIO``
+ A request was issued while the device was not in a state ready to accept
+ requests.
+
+``-EAGAIN``
+ A request was being processed, and the caller should retry.
+
+``-EBUSY``
+ The subchannel was status pending or busy while processing a halt request.
+
+vfio-ccw schib region
+---------------------
+
+The vfio-ccw schib region is used to return Subchannel-Information
+Block (SCHIB) data to userspace::
+
+ struct ccw_schib_region {
+ #define SCHIB_AREA_SIZE 52
+ __u8 schib_area[SCHIB_AREA_SIZE];
+ } __packed;
+
+This region is exposed via region type VFIO_REGION_SUBTYPE_CCW_SCHIB.
+
+Reading this region triggers a STORE SUBCHANNEL to be issued to the
+associated hardware.
+
+vfio-ccw crw region
+---------------------
+
+The vfio-ccw crw region is used to return Channel Report Word (CRW)
+data to userspace::
+
+ struct ccw_crw_region {
+ __u32 crw;
+ __u32 pad;
+ } __packed;
+
+This region is exposed via region type VFIO_REGION_SUBTYPE_CCW_CRW.
+
+Reading this region returns a CRW if one that is relevant for this
+subchannel (e.g. one reporting changes in channel path state) is
+pending, or all zeroes if not. If multiple CRWs are pending (including
+possibly chained CRWs), reading this region again will return the next
+one, until no more CRWs are pending and zeroes are returned. This is
+similar to how STORE CHANNEL REPORT WORD works.
+
vfio-ccw operation details
--------------------------
@@ -333,7 +420,14 @@ through DASD/ECKD device online in a guest now and use it as a block
device.
The current code allows the guest to start channel programs via
-START SUBCHANNEL, and to issue HALT SUBCHANNEL and CLEAR SUBCHANNEL.
+START SUBCHANNEL, and to issue HALT SUBCHANNEL, CLEAR SUBCHANNEL,
+and STORE SUBCHANNEL.
+
+Currently all channel programs are prefetched, regardless of the
+p-bit setting in the ORB. As a result, self modifying channel
+programs are not supported. For this reason, IPL has to be handled as
+a special case by a userspace/guest program; this has been implemented
+in QEMU's s390-ccw bios as of QEMU 4.1.
vfio-ccw supports classic (command mode) channel I/O only. Transport
mode (HPF) is not supported.
diff --git a/Documentation/s390/zfcpdump.rst b/Documentation/s390/zfcpdump.rst
index 54e8e7caf7e7..a61de7aa8778 100644
--- a/Documentation/s390/zfcpdump.rst
+++ b/Documentation/s390/zfcpdump.rst
@@ -46,5 +46,5 @@ initramfs with a user space application that writes the dump to a SCSI
partition.
For more information on how to use zfcpdump refer to the s390 'Using the Dump
-Tools book', which is available from
-http://www.ibm.com/developerworks/linux/linux390.
+Tools' book, which is available from IBM Knowledge Center:
+https://www.ibm.com/support/knowledgecenter/linuxonibm/liaaf/lnz_r_dt.html
diff --git a/Documentation/security/SCTP.rst b/Documentation/security/SCTP.rst
index d903eb97fcf3..0bcf6c1245ee 100644
--- a/Documentation/security/SCTP.rst
+++ b/Documentation/security/SCTP.rst
@@ -328,7 +328,7 @@ NOTES:
label (see **netlabel-config**\(8) helper script for details).
5) The NetLabel SCTP peer labeling rules apply as discussed in the following
- set of posts tagged "netlabel" at: http://www.paul-moore.com/blog/t.
+ set of posts tagged "netlabel" at: https://www.paul-moore.com/blog/t.
6) CIPSO is only supported for IPv4 addressing: ``socket(AF_INET, ...)``
CALIPSO is only supported for IPv6 addressing: ``socket(AF_INET6, ...)``
diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst
index d9b0b859018b..9367d0fe4a02 100644
--- a/Documentation/security/keys/core.rst
+++ b/Documentation/security/keys/core.rst
@@ -920,10 +920,14 @@ The keyctl syscall functions are:
long keyctl(KEYCTL_PKEY_QUERY,
key_serial_t key_id, unsigned long reserved,
+ const char *params,
struct keyctl_pkey_query *info);
- Get information about an asymmetric key. The information is returned in
- the keyctl_pkey_query struct::
+ Get information about an asymmetric key. Specific algorithms and
+ encodings may be queried by using the ``params`` argument. This is a
+ string containing a space- or tab-separated string of key-value pairs.
+ Currently supported keys include ``enc`` and ``hash``. The information
+ is returned in the keyctl_pkey_query struct::
__u32 supported_ops;
__u32 key_size;
diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py
index fbfe6693bb60..788704886eec 100644
--- a/Documentation/sphinx/kfigure.py
+++ b/Documentation/sphinx/kfigure.py
@@ -29,7 +29,7 @@ u"""
Used tools:
- * ``dot(1)``: Graphviz (http://www.graphviz.org). If Graphviz is not
+ * ``dot(1)``: Graphviz (https://www.graphviz.org). If Graphviz is not
available, the DOT language is inserted as literal-block.
* SVG to PDF: To generate PDF, you need at least one of this tools:
@@ -41,7 +41,7 @@ u"""
* generate PDF from SVG / used by PDF (LaTeX) builder
* generate SVG (html-builder) and PDF (latex-builder) from DOT files.
- DOT: see http://www.graphviz.org/content/dot-language
+ DOT: see https://www.graphviz.org/content/dot-language
"""
@@ -182,7 +182,7 @@ def setupTools(app):
kernellog.verbose(app, "use dot(1) from: " + dot_cmd)
else:
kernellog.warn(app, "dot(1) not found, for better output quality install "
- "graphviz from http://www.graphviz.org")
+ "graphviz from https://www.graphviz.org")
if convert_cmd:
kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
else:
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
index 9803e14639bf..38290b9f25eb 100644
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -71,7 +71,7 @@ Solution
gcc (v4.5) adds a new 'asm goto' statement that allows branching to a label:
-http://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html
+https://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html
Using the 'asm goto', we can create branches that are either taken or not taken
by default, without the need to check memory. Then, at run-time, we can patch
diff --git a/Documentation/trace/coresight/coresight-ect.rst b/Documentation/trace/coresight/coresight-ect.rst
index a93e52abcf46..a68732c5c6d6 100644
--- a/Documentation/trace/coresight/coresight-ect.rst
+++ b/Documentation/trace/coresight/coresight-ect.rst
@@ -73,7 +73,7 @@ capable of generating or using trigger signals.::
>$ ls /sys/bus/coresight/devices/etm0/cti_cpu0
channels ctmid enable nr_trigger_cons mgmt power powered regs
- subsystem triggers0 triggers1 uevent
+ connections subsystem triggers0 triggers1 uevent
*Key file items are:-*
* ``enable``: enables/disables the CTI. Read to determine current state.
@@ -89,6 +89,9 @@ capable of generating or using trigger signals.::
* ``channels``: Contains the channel API - CTI main programming interface.
* ``regs``: Gives access to the raw programmable CTI regs.
* ``mgmt``: the standard CoreSight management registers.
+ * ``connections``: Links to connected *CoreSight* devices. The number of
+ links can be 0 to ``nr_trigger_cons``. Actual number given by ``nr_links``
+ in this directory.
triggers<N> directories
diff --git a/Documentation/trace/coresight/coresight.rst b/Documentation/trace/coresight/coresight.rst
index 108600ee1e12..0b73acb44efa 100644
--- a/Documentation/trace/coresight/coresight.rst
+++ b/Documentation/trace/coresight/coresight.rst
@@ -241,6 +241,91 @@ to the newer scheme, to give a confirmation that what you see on your
system is not unexpected. One must use the "names" as they appear on
the system under specified locations.
+Topology Representation
+-----------------------
+
+Each CoreSight component has a ``connections`` directory which will contain
+links to other CoreSight components. This allows the user to explore the trace
+topology and for larger systems, determine the most appropriate sink for a
+given source. The connection information can also be used to establish
+which CTI devices are connected to a given component. This directory contains a
+``nr_links`` attribute detailing the number of links in the directory.
+
+For an ETM source, in this case ``etm0`` on a Juno platform, a typical
+arrangement will be::
+
+ linaro-developer:~# ls - l /sys/bus/coresight/devices/etm0/connections
+ <file details> cti_cpu0 -> ../../../23020000.cti/cti_cpu0
+ <file details> nr_links
+ <file details> out:0 -> ../../../230c0000.funnel/funnel2
+
+Following the out port to ``funnel2``::
+
+ linaro-developer:~# ls -l /sys/bus/coresight/devices/funnel2/connections
+ <file details> in:0 -> ../../../23040000.etm/etm0
+ <file details> in:1 -> ../../../23140000.etm/etm3
+ <file details> in:2 -> ../../../23240000.etm/etm4
+ <file details> in:3 -> ../../../23340000.etm/etm5
+ <file details> nr_links
+ <file details> out:0 -> ../../../20040000.funnel/funnel0
+
+And again to ``funnel0``::
+
+ linaro-developer:~# ls -l /sys/bus/coresight/devices/funnel0/connections
+ <file details> in:0 -> ../../../220c0000.funnel/funnel1
+ <file details> in:1 -> ../../../230c0000.funnel/funnel2
+ <file details> nr_links
+ <file details> out:0 -> ../../../20010000.etf/tmc_etf0
+
+Finding the first sink ``tmc_etf0``. This can be used to collect data
+as a sink, or as a link to propagate further along the chain::
+
+ linaro-developer:~# ls -l /sys/bus/coresight/devices/tmc_etf0/connections
+ <file details> cti_sys0 -> ../../../20020000.cti/cti_sys0
+ <file details> in:0 -> ../../../20040000.funnel/funnel0
+ <file details> nr_links
+ <file details> out:0 -> ../../../20150000.funnel/funnel4
+
+via ``funnel4``::
+
+ linaro-developer:~# ls -l /sys/bus/coresight/devices/funnel4/connections
+ <file details> in:0 -> ../../../20010000.etf/tmc_etf0
+ <file details> in:1 -> ../../../20140000.etf/tmc_etf1
+ <file details> nr_links
+ <file details> out:0 -> ../../../20120000.replicator/replicator0
+
+and a ``replicator0``::
+
+ linaro-developer:~# ls -l /sys/bus/coresight/devices/replicator0/connections
+ <file details> in:0 -> ../../../20150000.funnel/funnel4
+ <file details> nr_links
+ <file details> out:0 -> ../../../20030000.tpiu/tpiu0
+ <file details> out:1 -> ../../../20070000.etr/tmc_etr0
+
+Arriving at the final sink in the chain, ``tmc_etr0``::
+
+ linaro-developer:~# ls -l /sys/bus/coresight/devices/tmc_etr0/connections
+ <file details> cti_sys0 -> ../../../20020000.cti/cti_sys0
+ <file details> in:0 -> ../../../20120000.replicator/replicator0
+ <file details> nr_links
+
+As described below, when using sysfs it is sufficient to enable a sink and
+a source for successful trace. The framework will correctly enable all
+intermediate links as required.
+
+Note: ``cti_sys0`` appears in two of the connections lists above.
+CTIs can connect to multiple devices and are arranged in a star topology
+via the CTM. See (:doc:`coresight-ect`) [#fourth]_ for further details.
+Looking at this device we see 4 connections::
+
+ linaro-developer:~# ls -l /sys/bus/coresight/devices/cti_sys0/connections
+ <file details> nr_links
+ <file details> stm0 -> ../../../20100000.stm/stm0
+ <file details> tmc_etf0 -> ../../../20010000.etf/tmc_etf0
+ <file details> tmc_etr0 -> ../../../20070000.etr/tmc_etr0
+ <file details> tpiu0 -> ../../../20030000.tpiu/tpiu0
+
+
How to use the tracer modules
-----------------------------
diff --git a/Documentation/trace/events-msr.rst b/Documentation/trace/events-msr.rst
index e938aa0b6f4f..810481e530b6 100644
--- a/Documentation/trace/events-msr.rst
+++ b/Documentation/trace/events-msr.rst
@@ -4,7 +4,7 @@ MSR Trace Events
The x86 kernel supports tracing most MSR (Model Specific Register) accesses.
To see the definition of the MSRs on Intel systems please see the SDM
-at http://www.intel.com/sdm (Volume 3)
+at https://www.intel.com/sdm (Volume 3)
Available trace points:
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 3b5614b1d1a5..430a16283103 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -1524,7 +1524,7 @@ display-graph option::
=> remove_vma
=> exit_mmap
=> mmput
- => flush_old_exec
+ => begin_new_exec
=> load_elf_binary
=> search_binary_handler
=> __do_execve_file.isra.32
diff --git a/Documentation/trace/histogram-design.rst b/Documentation/trace/histogram-design.rst
new file mode 100644
index 000000000000..eef840043da9
--- /dev/null
+++ b/Documentation/trace/histogram-design.rst
@@ -0,0 +1,2115 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+Histogram Design Notes
+======================
+
+:Author: Tom Zanussi <zanussi@kernel.org>
+
+This document attempts to provide a description of how the ftrace
+histograms work and how the individual pieces map to the data
+structures used to implement them in trace_events_hist.c and
+tracing_map.c.
+
+Note: All the ftrace histogram command examples assume the working
+ directory is the ftrace /tracing directory. For example::
+
+ # cd /sys/kernel/debug/tracing
+
+Also, the histogram output displayed for those commands will be
+generally be truncated - only enough to make the point is displayed.
+
+'hist_debug' trace event files
+==============================
+
+If the kernel is compiled with CONFIG_HIST_TRIGGERS_DEBUG set, an
+event file named 'hist_debug' will appear in each event's
+subdirectory. This file can be read at any time and will display some
+of the hist trigger internals described in this document. Specific
+examples and output will be described in test cases below.
+
+Basic histograms
+================
+
+First, basic histograms. Below is pretty much the simplest thing you
+can do with histograms - create one with a single key on a single
+event and cat the output::
+
+ # echo 'hist:keys=pid' >> events/sched/sched_waking/trigger
+
+ # cat events/sched/sched_waking/hist
+
+ { pid: 18249 } hitcount: 1
+ { pid: 13399 } hitcount: 1
+ { pid: 17973 } hitcount: 1
+ { pid: 12572 } hitcount: 1
+ ...
+ { pid: 10 } hitcount: 921
+ { pid: 18255 } hitcount: 1444
+ { pid: 25526 } hitcount: 2055
+ { pid: 5257 } hitcount: 2055
+ { pid: 27367 } hitcount: 2055
+ { pid: 1728 } hitcount: 2161
+
+ Totals:
+ Hits: 21305
+ Entries: 183
+ Dropped: 0
+
+What this does is create a histogram on the sched_waking event using
+pid as a key and with a single value, hitcount, which even if not
+explicitly specified, exists for every histogram regardless.
+
+The hitcount value is a per-bucket value that's automatically
+incremented on every hit for the given key, which in this case is the
+pid.
+
+So in this histogram, there's a separate bucket for each pid, and each
+bucket contains a value for that bucket, counting the number of times
+sched_waking was called for that pid.
+
+Each histogram is represented by a hist_data struct.
+
+To keep track of each key and value field in the histogram, hist_data
+keeps an array of these fields named fields[]. The fields[] array is
+an array containing struct hist_field representations of each
+histogram val and key in the histogram (variables are also included
+here, but are discussed later). So for the above histogram we have one
+key and one value; in this case the one value is the hitcount value,
+which all histograms have, regardless of whether they define that
+value or not, which the above histogram does not.
+
+Each struct hist_field contains a pointer to the ftrace_event_field
+from the event's trace_event_file along with various bits related to
+that such as the size, offset, type, and a hist_field_fn_t function,
+which is used to grab the field's data from the ftrace event buffer
+(in most cases - some hist_fields such as hitcount don't directly map
+to an event field in the trace buffer - in these cases the function
+implementation gets its value from somewhere else). The flags field
+indicates which type of field it is - key, value, variable, variable
+reference, etc., with value being the default.
+
+The other important hist_data data structure in addition to the
+fields[] array is the tracing_map instance created for the histogram,
+which is held in the .map member. The tracing_map implements the
+lock-free hash table used to implement histograms (see
+kernel/trace/tracing_map.h for much more discussion about the
+low-level data structures implementing the tracing_map). For the
+purposes of this discussion, the tracing_map contains a number of
+buckets, each bucket corresponding to a particular tracing_map_elt
+object hashed by a given histogram key.
+
+Below is a diagram the first part of which describes the hist_data and
+associated key and value fields for the histogram described above. As
+you can see, there are two fields in the fields array, one val field
+for the hitcount and one key field for the pid key.
+
+Below that is a diagram of a run-time snapshot of what the tracing_map
+might look like for a given run. It attempts to show the
+relationships between the hist_data fields and the tracing_map
+elements for a couple hypothetical keys and values.::
+
+ +------------------+
+ | hist_data |
+ +------------------+ +----------------+
+ | .fields[] |---->| val = hitcount |----------------------------+
+ +----------------+ +----------------+ |
+ | .map | | .size | |
+ +----------------+ +--------------+ |
+ | .offset | |
+ +--------------+ |
+ | .fn() | |
+ +--------------+ |
+ . |
+ . |
+ . |
+ +----------------+ <--- n_vals |
+ | key = pid |----------------------------|--+
+ +----------------+ | |
+ | .size | | |
+ +--------------+ | |
+ | .offset | | |
+ +--------------+ | |
+ | .fn() | | |
+ +----------------+ <--- n_fields | |
+ | unused | | |
+ +----------------+ | |
+ | | | |
+ +--------------+ | |
+ | | | |
+ +--------------+ | |
+ | | | |
+ +--------------+ | |
+ n_keys = n_fields - n_vals | |
+
+The hist_data n_vals and n_fields delineate the extent of the fields[] | |
+array and separate keys from values for the rest of the code. | |
+
+Below is a run-time representation of the tracing_map part of the | |
+histogram, with pointers from various parts of the fields[] array | |
+to corresponding parts of the tracing_map. | |
+
+The tracing_map consists of an array of tracing_map_entrys and a set | |
+of preallocated tracing_map_elts (abbreviated below as map_entry and | |
+map_elt). The total number of map_entrys in the hist_data.map array = | |
+map->max_elts (actually map->map_size but only max_elts of those are | |
+used. This is a property required by the map_insert() algorithm). | |
+
+If a map_entry is unused, meaning no key has yet hashed into it, its | |
+.key value is 0 and its .val pointer is NULL. Once a map_entry has | |
+been claimed, the .key value contains the key's hash value and the | |
+.val member points to a map_elt containing the full key and an entry | |
+for each key or value in the map_elt.fields[] array. There is an | |
+entry in the map_elt.fields[] array corresponding to each hist_field | |
+in the histogram, and this is where the continually aggregated sums | |
+corresponding to each histogram value are kept. | |
+
+The diagram attempts to show the relationship between the | |
+hist_data.fields[] and the map_elt.fields[] with the links drawn | |
+between diagrams::
+
+ +-----------+ | |
+ | hist_data | | |
+ +-----------+ | |
+ | .fields | | |
+ +---------+ +-----------+ | |
+ | .map |---->| map_entry | | |
+ +---------+ +-----------+ | |
+ | .key |---> 0 | |
+ +---------+ | |
+ | .val |---> NULL | |
+ +-----------+ | |
+ | map_entry | | |
+ +-----------+ | |
+ | .key |---> pid = 999 | |
+ +---------+ +-----------+ | |
+ | .val |--->| map_elt | | |
+ +---------+ +-----------+ | |
+ . | .key |---> full key * | |
+ . +---------+ +---------------+ | |
+ . | .fields |--->| .sum (val) |<-+ |
+ +-----------+ +---------+ | 2345 | | |
+ | map_entry | +---------------+ | |
+ +-----------+ | .offset (key) |<----+
+ | .key |---> 0 | 0 | | |
+ +---------+ +---------------+ | |
+ | .val |---> NULL . | |
+ +-----------+ . | |
+ | map_entry | . | |
+ +-----------+ +---------------+ | |
+ | .key | | .sum (val) or | | |
+ +---------+ +---------+ | .offset (key) | | |
+ | .val |--->| map_elt | +---------------+ | |
+ +-----------+ +---------+ | .sum (val) or | | |
+ | map_entry | | .offset (key) | | |
+ +-----------+ +---------------+ | |
+ | .key |---> pid = 4444 | |
+ +---------+ +-----------+ | |
+ | .val | | map_elt | | |
+ +---------+ +-----------+ | |
+ | .key |---> full key * | |
+ +---------+ +---------------+ | |
+ | .fields |--->| .sum (val) |<-+ |
+ +---------+ | 65523 | |
+ +---------------+ |
+ | .offset (key) |<----+
+ | 0 |
+ +---------------+
+ .
+ .
+ .
+ +---------------+
+ | .sum (val) or |
+ | .offset (key) |
+ +---------------+
+ | .sum (val) or |
+ | .offset (key) |
+ +---------------+
+
+Abbreviations used in the diagrams::
+
+ hist_data = struct hist_trigger_data
+ hist_data.fields = struct hist_field
+ fn = hist_field_fn_t
+ map_entry = struct tracing_map_entry
+ map_elt = struct tracing_map_elt
+ map_elt.fields = struct tracing_map_field
+
+Whenever a new event occurs and it has a hist trigger associated with
+it, event_hist_trigger() is called. event_hist_trigger() first deals
+with the key: for each subkey in the key (in the above example, there
+is just one subkey corresponding to pid), the hist_field that
+represents that subkey is retrieved from hist_data.fields[] and the
+hist_field_fn_t fn() associated with that field, along with the
+field's size and offset, is used to grab that subkey's data from the
+current trace record.
+
+Once the complete key has been retrieved, it's used to look that key
+up in the tracing_map. If there's no tracing_map_elt associated with
+that key, an empty one is claimed and inserted in the map for the new
+key. In either case, the tracing_map_elt associated with that key is
+returned.
+
+Once a tracing_map_elt available, hist_trigger_elt_update() is called.
+As the name implies, this updates the element, which basically means
+updating the element's fields. There's a tracing_map_field associated
+with each key and value in the histogram, and each of these correspond
+to the key and value hist_fields created when the histogram was
+created. hist_trigger_elt_update() goes through each value hist_field
+and, as for the keys, uses the hist_field's fn() and size and offset
+to grab the field's value from the current trace record. Once it has
+that value, it simply adds that value to that field's
+continually-updated tracing_map_field.sum member. Some hist_field
+fn()s, such as for the hitcount, don't actually grab anything from the
+trace record (the hitcount fn() just increments the counter sum by 1),
+but the idea is the same.
+
+Once all the values have been updated, hist_trigger_elt_update() is
+done and returns. Note that there are also tracing_map_fields for
+each subkey in the key, but hist_trigger_elt_update() doesn't look at
+them or update anything - those exist only for sorting, which can
+happen later.
+
+Basic histogram test
+--------------------
+
+This is a good example to try. It produces 3 value fields and 2 key
+fields in the output::
+
+ # echo 'hist:keys=common_pid,call_site.sym:values=bytes_req,bytes_alloc,hitcount' >> events/kmem/kmalloc/trigger
+
+To see the debug data, cat the kmem/kmalloc's 'hist_debug' file. It
+will show the trigger info of the histogram it corresponds to, along
+with the address of the hist_data associated with the histogram, which
+will become useful in later examples. It then displays the number of
+total hist_fields associated with the histogram along with a count of
+how many of those correspond to keys and how many correspond to values.
+
+It then goes on to display details for each field, including the
+field's flags and the position of each field in the hist_data's
+fields[] array, which is useful information for verifying that things
+internally appear correct or not, and which again will become even
+more useful in further examples::
+
+ # cat events/kmem/kmalloc/hist_debug
+
+ # event histogram
+ #
+ # trigger info: hist:keys=common_pid,call_site.sym:vals=hitcount,bytes_req,bytes_alloc:sort=hitcount:size=2048 [active]
+ #
+
+ hist_data: 000000005e48c9a5
+
+ n_vals: 3
+ n_keys: 2
+ n_fields: 5
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ VAL: normal u64 value
+ ftrace_event_field name: bytes_req
+ type: size_t
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[2]:
+ flags:
+ VAL: normal u64 value
+ ftrace_event_field name: bytes_alloc
+ type: size_t
+ size: 8
+ is_signed: 0
+
+ key fields:
+
+ hist_data->fields[3]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: common_pid
+ type: int
+ size: 8
+ is_signed: 1
+
+ hist_data->fields[4]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: call_site
+ type: unsigned long
+ size: 8
+ is_signed: 0
+
+The commands below can be used to clean things up for the next test::
+
+ # echo '!hist:keys=common_pid,call_site.sym:values=bytes_req,bytes_alloc,hitcount' >> events/kmem/kmalloc/trigger
+
+Variables
+=========
+
+Variables allow data from one hist trigger to be saved by one hist
+trigger and retrieved by another hist trigger. For example, a trigger
+on the sched_waking event can capture a timestamp for a particular
+pid, and later a sched_switch event that switches to that pid event
+can grab the timestamp and use it to calculate a time delta between
+the two events::
+
+ # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >>
+ events/sched/sched_waking/trigger
+
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0' >>
+ events/sched/sched_switch/trigger
+
+In terms of the histogram data structures, variables are implemented
+as another type of hist_field and for a given hist trigger are added
+to the hist_data.fields[] array just after all the val fields. To
+distinguish them from the existing key and val fields, they're given a
+new flag type, HIST_FIELD_FL_VAR (abbreviated FL_VAR) and they also
+make use of a new .var.idx field member in struct hist_field, which
+maps them to an index in a new map_elt.vars[] array added to the
+map_elt specifically designed to store and retrieve variable values.
+The diagram below shows those new elements and adds a new variable
+entry, ts0, corresponding to the ts0 variable in the sched_waking
+trigger above.
+
+sched_waking histogram
+----------------------::
+
+ +------------------+
+ | hist_data |<-------------------------------------------------------+
+ +------------------+ +-------------------+ |
+ | .fields[] |-->| val = hitcount | |
+ +----------------+ +-------------------+ |
+ | .map | | .size | |
+ +----------------+ +-----------------+ |
+ | .offset | |
+ +-----------------+ |
+ | .fn() | |
+ +-----------------+ |
+ | .flags | |
+ +-----------------+ |
+ | .var.idx | |
+ +-------------------+ |
+ | var = ts0 | |
+ +-------------------+ |
+ | .size | |
+ +-----------------+ |
+ | .offset | |
+ +-----------------+ |
+ | .fn() | |
+ +-----------------+ |
+ | .flags & FL_VAR | |
+ +-----------------+ |
+ | .var.idx |----------------------------+-+ |
+ +-----------------+ | | |
+ . | | |
+ . | | |
+ . | | |
+ +-------------------+ <--- n_vals | | |
+ | key = pid | | | |
+ +-------------------+ | | |
+ | .size | | | |
+ +-----------------+ | | |
+ | .offset | | | |
+ +-----------------+ | | |
+ | .fn() | | | |
+ +-----------------+ | | |
+ | .flags & FL_KEY | | | |
+ +-----------------+ | | |
+ | .var.idx | | | |
+ +-------------------+ <--- n_fields | | |
+ | unused | | | |
+ +-------------------+ | | |
+ | | | | |
+ +-----------------+ | | |
+ | | | | |
+ +-----------------+ | | |
+ | | | | |
+ +-----------------+ | | |
+ | | | | |
+ +-----------------+ | | |
+ | | | | |
+ +-----------------+ | | |
+ n_keys = n_fields - n_vals | | |
+ | | |
+
+This is very similar to the basic case. In the above diagram, we can | | |
+see a new .flags member has been added to the struct hist_field | | |
+struct, and a new entry added to hist_data.fields representing the ts0 | | |
+variable. For a normal val hist_field, .flags is just 0 (modulo | | |
+modifier flags), but if the value is defined as a variable, the .flags | | |
+contains a set FL_VAR bit. | | |
+
+As you can see, the ts0 entry's .var.idx member contains the index | | |
+into the tracing_map_elts' .vars[] array containing variable values. | | |
+This idx is used whenever the value of the variable is set or read. | | |
+The map_elt.vars idx assigned to the given variable is assigned and | | |
+saved in .var.idx by create_tracing_map_fields() after it calls | | |
+tracing_map_add_var(). | | |
+
+Below is a representation of the histogram at run-time, which | | |
+populates the map, along with correspondence to the above hist_data and | | |
+hist_field data structures. | | |
+
+The diagram attempts to show the relationship between the | | |
+hist_data.fields[] and the map_elt.fields[] and map_elt.vars[] with | | |
+the links drawn between diagrams. For each of the map_elts, you can | | |
+see that the .fields[] members point to the .sum or .offset of a key | | |
+or val and the .vars[] members point to the value of a variable. The | | |
+arrows between the two diagrams show the linkages between those | | |
+tracing_map members and the field definitions in the corresponding | | |
+hist_data fields[] members.::
+
+ +-----------+ | | |
+ | hist_data | | | |
+ +-----------+ | | |
+ | .fields | | | |
+ +---------+ +-----------+ | | |
+ | .map |---->| map_entry | | | |
+ +---------+ +-----------+ | | |
+ | .key |---> 0 | | |
+ +---------+ | | |
+ | .val |---> NULL | | |
+ +-----------+ | | |
+ | map_entry | | | |
+ +-----------+ | | |
+ | .key |---> pid = 999 | | |
+ +---------+ +-----------+ | | |
+ | .val |--->| map_elt | | | |
+ +---------+ +-----------+ | | |
+ . | .key |---> full key * | | |
+ . +---------+ +---------------+ | | |
+ . | .fields |--->| .sum (val) | | | |
+ . +---------+ | 2345 | | | |
+ . +--| .vars | +---------------+ | | |
+ . | +---------+ | .offset (key) | | | |
+ . | | 0 | | | |
+ . | +---------------+ | | |
+ . | . | | |
+ . | . | | |
+ . | . | | |
+ . | +---------------+ | | |
+ . | | .sum (val) or | | | |
+ . | | .offset (key) | | | |
+ . | +---------------+ | | |
+ . | | .sum (val) or | | | |
+ . | | .offset (key) | | | |
+ . | +---------------+ | | |
+ . | | | |
+ . +---------------->+---------------+ | | |
+ . | ts0 |<--+ | |
+ . | 113345679876 | | | |
+ . +---------------+ | | |
+ . | unused | | | |
+ . | | | | |
+ . +---------------+ | | |
+ . . | | |
+ . . | | |
+ . . | | |
+ . +---------------+ | | |
+ . | unused | | | |
+ . | | | | |
+ . +---------------+ | | |
+ . | unused | | | |
+ . | | | | |
+ . +---------------+ | | |
+ . | | |
+ +-----------+ | | |
+ | map_entry | | | |
+ +-----------+ | | |
+ | .key |---> pid = 4444 | | |
+ +---------+ +-----------+ | | |
+ | .val |--->| map_elt | | | |
+ +---------+ +-----------+ | | |
+ . | .key |---> full key * | | |
+ . +---------+ +---------------+ | | |
+ . | .fields |--->| .sum (val) | | | |
+ +---------+ | 2345 | | | |
+ +--| .vars | +---------------+ | | |
+ | +---------+ | .offset (key) | | | |
+ | | 0 | | | |
+ | +---------------+ | | |
+ | . | | |
+ | . | | |
+ | . | | |
+ | +---------------+ | | |
+ | | .sum (val) or | | | |
+ | | .offset (key) | | | |
+ | +---------------+ | | |
+ | | .sum (val) or | | | |
+ | | .offset (key) | | | |
+ | +---------------+ | | |
+ | | | |
+ | +---------------+ | | |
+ +---------------->| ts0 |<--+ | |
+ | 213499240729 | | |
+ +---------------+ | |
+ | unused | | |
+ | | | |
+ +---------------+ | |
+ . | |
+ . | |
+ . | |
+ +---------------+ | |
+ | unused | | |
+ | | | |
+ +---------------+ | |
+ | unused | | |
+ | | | |
+ +---------------+ | |
+
+For each used map entry, there's a map_elt pointing to an array of | |
+.vars containing the current value of the variables associated with | |
+that histogram entry. So in the above, the timestamp associated with | |
+pid 999 is 113345679876, and the timestamp variable in the same | |
+.var.idx for pid 4444 is 213499240729. | |
+
+sched_switch histogram | |
+---------------------- | |
+
+The sched_switch histogram paired with the above sched_waking | |
+histogram is shown below. The most important aspect of the | |
+sched_switch histogram is that it references a variable on the | |
+sched_waking histogram above. | |
+
+The histogram diagram is very similar to the others so far displayed, | |
+but it adds variable references. You can see the normal hitcount and | |
+key fields along with a new wakeup_lat variable implemented in the | |
+same way as the sched_waking ts0 variable, but in addition there's an | |
+entry with the new FL_VAR_REF (short for HIST_FIELD_FL_VAR_REF) flag. | |
+
+Associated with the new var ref field are a couple of new hist_field | |
+members, var.hist_data and var_ref_idx. For a variable reference, the | |
+var.hist_data goes with the var.idx, which together uniquely identify | |
+a particular variable on a particular histogram. The var_ref_idx is | |
+just the index into the var_ref_vals[] array that caches the values of | |
+each variable whenever a hist trigger is updated. Those resulting | |
+values are then finally accessed by other code such as trace action | |
+code that uses the var_ref_idx values to assign param values. | |
+
+The diagram below describes the situation for the sched_switch | |
+histogram referred to before::
+
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0' >> | |
+ events/sched/sched_switch/trigger | |
+ | |
+ +------------------+ | |
+ | hist_data | | |
+ +------------------+ +-----------------------+ | |
+ | .fields[] |-->| val = hitcount | | |
+ +----------------+ +-----------------------+ | |
+ | .map | | .size | | |
+ +----------------+ +---------------------+ | |
+ +--| .var_refs[] | | .offset | | |
+ | +----------------+ +---------------------+ | |
+ | | .fn() | | |
+ | var_ref_vals[] +---------------------+ | |
+ | +-------------+ | .flags | | |
+ | | $ts0 |<---+ +---------------------+ | |
+ | +-------------+ | | .var.idx | | |
+ | | | | +---------------------+ | |
+ | +-------------+ | | .var.hist_data | | |
+ | | | | +---------------------+ | |
+ | +-------------+ | | .var_ref_idx | | |
+ | | | | +-----------------------+ | |
+ | +-------------+ | | var = wakeup_lat | | |
+ | . | +-----------------------+ | |
+ | . | | .size | | |
+ | . | +---------------------+ | |
+ | +-------------+ | | .offset | | |
+ | | | | +---------------------+ | |
+ | +-------------+ | | .fn() | | |
+ | | | | +---------------------+ | |
+ | +-------------+ | | .flags & FL_VAR | | |
+ | | +---------------------+ | |
+ | | | .var.idx | | |
+ | | +---------------------+ | |
+ | | | .var.hist_data | | |
+ | | +---------------------+ | |
+ | | | .var_ref_idx | | |
+ | | +---------------------+ | |
+ | | . | |
+ | | . | |
+ | | . | |
+ | | +-----------------------+ <--- n_vals | |
+ | | | key = pid | | |
+ | | +-----------------------+ | |
+ | | | .size | | |
+ | | +---------------------+ | |
+ | | | .offset | | |
+ | | +---------------------+ | |
+ | | | .fn() | | |
+ | | +---------------------+ | |
+ | | | .flags | | |
+ | | +---------------------+ | |
+ | | | .var.idx | | |
+ | | +-----------------------+ <--- n_fields | |
+ | | | unused | | |
+ | | +-----------------------+ | |
+ | | | | | |
+ | | +---------------------+ | |
+ | | | | | |
+ | | +---------------------+ | |
+ | | | | | |
+ | | +---------------------+ | |
+ | | | | | |
+ | | +---------------------+ | |
+ | | | | | |
+ | | +---------------------+ | |
+ | | n_keys = n_fields - n_vals | |
+ | | | |
+ | | | |
+ | | +-----------------------+ | |
+ +---------------------->| var_ref = $ts0 | | |
+ | +-----------------------+ | |
+ | | .size | | |
+ | +---------------------+ | |
+ | | .offset | | |
+ | +---------------------+ | |
+ | | .fn() | | |
+ | +---------------------+ | |
+ | | .flags & FL_VAR_REF | | |
+ | +---------------------+ | |
+ | | .var.idx |--------------------------+ |
+ | +---------------------+ |
+ | | .var.hist_data |----------------------------+
+ | +---------------------+
+ +---| .var_ref_idx |
+ +---------------------+
+
+Abbreviations used in the diagrams::
+
+ hist_data = struct hist_trigger_data
+ hist_data.fields = struct hist_field
+ fn = hist_field_fn_t
+ FL_KEY = HIST_FIELD_FL_KEY
+ FL_VAR = HIST_FIELD_FL_VAR
+ FL_VAR_REF = HIST_FIELD_FL_VAR_REF
+
+When a hist trigger makes use of a variable, a new hist_field is
+created with flag HIST_FIELD_FL_VAR_REF. For a VAR_REF field, the
+var.idx and var.hist_data take the same values as the referenced
+variable, as well as the referenced variable's size, type, and
+is_signed values. The VAR_REF field's .name is set to the name of the
+variable it references. If a variable reference was created using the
+explicit system.event.$var_ref notation, the hist_field's system and
+event_name variables are also set.
+
+So, in order to handle an event for the sched_switch histogram,
+because we have a reference to a variable on another histogram, we
+need to resolve all variable references first. This is done via the
+resolve_var_refs() calls made from event_hist_trigger(). What this
+does is grabs the var_refs[] array from the hist_data representing the
+sched_switch histogram. For each one of those, the referenced
+variable's var.hist_data along with the current key is used to look up
+the corresponding tracing_map_elt in that histogram. Once found, the
+referenced variable's var.idx is used to look up the variable's value
+using tracing_map_read_var(elt, var.idx), which yields the value of
+the variable for that element, ts0 in the case above. Note that both
+the hist_fields representing both the variable and the variable
+reference have the same var.idx, so this is straightforward.
+
+Variable and variable reference test
+------------------------------------
+
+This example creates a variable on the sched_waking event, ts0, and
+uses it in the sched_switch trigger. The sched_switch trigger also
+creates its own variable, wakeup_lat, but nothing yet uses it::
+
+ # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger
+
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0' >> events/sched/sched_switch/trigger
+
+Looking at the sched_waking 'hist_debug' output, in addition to the
+normal key and value hist_fields, in the val fields section we see a
+field with the HIST_FIELD_FL_VAR flag, which indicates that that field
+represents a variable. Note that in addition to the variable name,
+contained in the var.name field, it includes the var.idx, which is the
+index into the tracing_map_elt.vars[] array of the actual variable
+location. Note also that the output shows that variables live in the
+same part of the hist_data->fields[] array as normal values::
+
+ # cat events/sched/sched_waking/hist_debug
+
+ # event histogram
+ #
+ # trigger info: hist:keys=pid:vals=hitcount:ts0=common_timestamp.usecs:sort=hitcount:size=2048:clock=global [active]
+ #
+
+ hist_data: 000000009536f554
+
+ n_vals: 2
+ n_keys: 1
+ n_fields: 3
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: ts0
+ var.idx (into tracing_map_elt.vars[]): 0
+ type: u64
+ size: 8
+ is_signed: 0
+
+ key fields:
+
+ hist_data->fields[2]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: pid
+ type: pid_t
+ size: 8
+ is_signed: 1
+
+Moving on to the sched_switch trigger hist_debug output, in addition
+to the unused wakeup_lat variable, we see a new section displaying
+variable references. Variable references are displayed in a separate
+section because in addition to to being logically separate from
+variables and values, they actually live in a separate hist_data
+array, var_refs[].
+
+In this example, the sched_switch trigger has a reference to a
+variable on the sched_waking trigger, $ts0. Looking at the details,
+we can see that the var.hist_data value of the referenced variable
+matches the previously displayed sched_waking trigger, and the var.idx
+value matches the previously displayed var.idx value for that
+variable. Also displayed is the var_ref_idx value for that variable
+reference, which is where the value for that variable is cached for
+use when the trigger is invoked::
+
+ # cat events/sched/sched_switch/hist_debug
+
+ # event histogram
+ #
+ # trigger info: hist:keys=next_pid:vals=hitcount:wakeup_lat=common_timestamp.usecs-$ts0:sort=hitcount:size=2048:clock=global [active]
+ #
+
+ hist_data: 00000000f4ee8006
+
+ n_vals: 2
+ n_keys: 1
+ n_fields: 3
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: wakeup_lat
+ var.idx (into tracing_map_elt.vars[]): 0
+ type: u64
+ size: 0
+ is_signed: 0
+
+ key fields:
+
+ hist_data->fields[2]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: next_pid
+ type: pid_t
+ size: 8
+ is_signed: 1
+
+ variable reference fields:
+
+ hist_data->var_refs[0]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: ts0
+ var.idx (into tracing_map_elt.vars[]): 0
+ var.hist_data: 000000009536f554
+ var_ref_idx (into hist_data->var_refs[]): 0
+ type: u64
+ size: 8
+ is_signed: 0
+
+The commands below can be used to clean things up for the next test::
+
+ # echo '!hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0' >> events/sched/sched_switch/trigger
+
+ # echo '!hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger
+
+Actions and Handlers
+====================
+
+Adding onto the previous example, we will now do something with that
+wakeup_lat variable, namely send it and another field as a synthetic
+event.
+
+The onmatch() action below basically says that whenever we have a
+sched_switch event, if we have a matching sched_waking event, in this
+case if we have a pid in the sched_waking histogram that matches the
+the next_pid field on this sched_switch event, we retrieve the
+variables specified in the wakeup_latency() trace action, and use
+them to generate a new wakeup_latency event into the trace stream.
+
+Note that the way the trace handlers such as wakeup_latency() (which
+could equivalently be written trace(wakeup_latency,$wakeup_lat,next_pid)
+are implemented, the parameters specified to the trace handler must be
+variables. In this case, $wakeup_lat is obviously a variable, but
+next_pid isn't, since it's just naming a field in the sched_switch
+trace event. Since this is something that almost every trace() and
+save() action does, a special shortcut is implemented to allow field
+names to be used directly in those cases. How it works is that under
+the covers, a temporary variable is created for the named field, and
+this variable is what is actually passed to the trace handler. In the
+code and documentation, this type of variable is called a 'field
+variable'.
+
+Fields on other trace event's histograms can be used as well. In that
+case we have to generate a new histogram and an unfortunately named
+'synthetic_field' (the use of synthetic here has nothing to do with
+synthetic events) and use that special histogram field as a variable.
+
+The diagram below illustrates the new elements described above in the
+context of the sched_switch histogram using the onmatch() handler and
+the trace() action.
+
+First, we define the wakeup_latency synthetic event::
+
+ # echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events
+
+Next, the sched_waking hist trigger as before::
+
+ # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >>
+ events/sched/sched_waking/trigger
+
+Finally, we create a hist trigger on the sched_switch event that
+generates a wakeup_latency() trace event. In this case we pass
+next_pid into the wakeup_latency synthetic event invocation, which
+means it will be automatically converted into a field variable::
+
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0: \
+ onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid)' >>
+ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+
+The diagram for the sched_switch event is similar to previous examples
+but shows the additional field_vars[] array for hist_data and shows
+the linkages between the field_vars and the variables and references
+created to implement the field variables. The details are discussed
+below::
+
+ +------------------+
+ | hist_data |
+ +------------------+ +-----------------------+
+ | .fields[] |-->| val = hitcount |
+ +----------------+ +-----------------------+
+ | .map | | .size |
+ +----------------+ +---------------------+
+ +---| .field_vars[] | | .offset |
+ | +----------------+ +---------------------+
+ |+--| .var_refs[] | | .offset |
+ || +----------------+ +---------------------+
+ || | .fn() |
+ || var_ref_vals[] +---------------------+
+ || +-------------+ | .flags |
+ || | $ts0 |<---+ +---------------------+
+ || +-------------+ | | .var.idx |
+ || | $next_pid |<-+ | +---------------------+
+ || +-------------+ | | | .var.hist_data |
+ ||+>| $wakeup_lat | | | +---------------------+
+ ||| +-------------+ | | | .var_ref_idx |
+ ||| | | | | +-----------------------+
+ ||| +-------------+ | | | var = wakeup_lat |
+ ||| . | | +-----------------------+
+ ||| . | | | .size |
+ ||| . | | +---------------------+
+ ||| +-------------+ | | | .offset |
+ ||| | | | | +---------------------+
+ ||| +-------------+ | | | .fn() |
+ ||| | | | | +---------------------+
+ ||| +-------------+ | | | .flags & FL_VAR |
+ ||| | | +---------------------+
+ ||| | | | .var.idx |
+ ||| | | +---------------------+
+ ||| | | | .var.hist_data |
+ ||| | | +---------------------+
+ ||| | | | .var_ref_idx |
+ ||| | | +---------------------+
+ ||| | | .
+ ||| | | .
+ ||| | | .
+ ||| | | .
+ ||| +--------------+ | | .
+ +-->| field_var | | | .
+ || +--------------+ | | .
+ || | var | | | .
+ || +------------+ | | .
+ || | val | | | .
+ || +--------------+ | | .
+ || | field_var | | | .
+ || +--------------+ | | .
+ || | var | | | .
+ || +------------+ | | .
+ || | val | | | .
+ || +------------+ | | .
+ || . | | .
+ || . | | .
+ || . | | +-----------------------+ <--- n_vals
+ || +--------------+ | | | key = pid |
+ || | field_var | | | +-----------------------+
+ || +--------------+ | | | .size |
+ || | var |--+| +---------------------+
+ || +------------+ ||| | .offset |
+ || | val |-+|| +---------------------+
+ || +------------+ ||| | .fn() |
+ || ||| +---------------------+
+ || ||| | .flags |
+ || ||| +---------------------+
+ || ||| | .var.idx |
+ || ||| +---------------------+ <--- n_fields
+ || |||
+ || ||| n_keys = n_fields - n_vals
+ || ||| +-----------------------+
+ || |+->| var = next_pid |
+ || | | +-----------------------+
+ || | | | .size |
+ || | | +---------------------+
+ || | | | .offset |
+ || | | +---------------------+
+ || | | | .flags & FL_VAR |
+ || | | +---------------------+
+ || | | | .var.idx |
+ || | | +---------------------+
+ || | | | .var.hist_data |
+ || | | +-----------------------+
+ || +-->| val for next_pid |
+ || | | +-----------------------+
+ || | | | .size |
+ || | | +---------------------+
+ || | | | .offset |
+ || | | +---------------------+
+ || | | | .fn() |
+ || | | +---------------------+
+ || | | | .flags |
+ || | | +---------------------+
+ || | | | |
+ || | | +---------------------+
+ || | |
+ || | |
+ || | | +-----------------------+
+ +|------------------|-|>| var_ref = $ts0 |
+ | | | +-----------------------+
+ | | | | .size |
+ | | | +---------------------+
+ | | | | .offset |
+ | | | +---------------------+
+ | | | | .fn() |
+ | | | +---------------------+
+ | | | | .flags & FL_VAR_REF |
+ | | | +---------------------+
+ | | +---| .var_ref_idx |
+ | | +-----------------------+
+ | | | var_ref = $next_pid |
+ | | +-----------------------+
+ | | | .size |
+ | | +---------------------+
+ | | | .offset |
+ | | +---------------------+
+ | | | .fn() |
+ | | +---------------------+
+ | | | .flags & FL_VAR_REF |
+ | | +---------------------+
+ | +-----| .var_ref_idx |
+ | +-----------------------+
+ | | var_ref = $wakeup_lat |
+ | +-----------------------+
+ | | .size |
+ | +---------------------+
+ | | .offset |
+ | +---------------------+
+ | | .fn() |
+ | +---------------------+
+ | | .flags & FL_VAR_REF |
+ | +---------------------+
+ +------------------------| .var_ref_idx |
+ +---------------------+
+
+As you can see, for a field variable, two hist_fields are created: one
+representing the variable, in this case next_pid, and one to actually
+get the value of the field from the trace stream, like a normal val
+field does. These are created separately from normal variable
+creation and are saved in the hist_data->field_vars[] array. See
+below for how these are used. In addition, a reference hist_field is
+also created, which is needed to reference the field variables such as
+$next_pid variable in the trace() action.
+
+Note that $wakeup_lat is also a variable reference, referencing the
+value of the expression common_timestamp-$ts0, and so also needs to
+have a hist field entry representing that reference created.
+
+When hist_trigger_elt_update() is called to get the normal key and
+value fields, it also calls update_field_vars(), which goes through
+each field_var created for the histogram, and available from
+hist_data->field_vars and calls val->fn() to get the data from the
+current trace record, and then uses the var's var.idx to set the
+variable at the var.idx offset in the appropriate tracing_map_elt's
+variable at elt->vars[var.idx].
+
+Once all the variables have been updated, resolve_var_refs() can be
+called from event_hist_trigger(), and not only can our $ts0 and
+$next_pid references be resolved but the $wakeup_lat reference as
+well. At this point, the trace() action can simply access the values
+assembled in the var_ref_vals[] array and generate the trace event.
+
+The same process occurs for the field variables associated with the
+save() action.
+
+Abbreviations used in the diagram::
+
+ hist_data = struct hist_trigger_data
+ hist_data.fields = struct hist_field
+ field_var = struct field_var
+ fn = hist_field_fn_t
+ FL_KEY = HIST_FIELD_FL_KEY
+ FL_VAR = HIST_FIELD_FL_VAR
+ FL_VAR_REF = HIST_FIELD_FL_VAR_REF
+
+trace() action field variable test
+----------------------------------
+
+This example adds to the previous test example by finally making use
+of the wakeup_lat variable, but in addition also creates a couple of
+field variables that then are all passed to the wakeup_latency() trace
+action via the onmatch() handler.
+
+First, we create the wakeup_latency synthetic event::
+
+ # echo 'wakeup_latency u64 lat; pid_t pid; char comm[16]' >> synthetic_events
+
+Next, the sched_waking trigger from previous examples::
+
+ # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger
+
+Finally, as in the previous test example, we calculate and assign the
+wakeup latency using the $ts0 reference from the sched_waking trigger
+to the wakeup_lat variable, and finally use it along with a couple
+sched_switch event fields, next_pid and next_comm, to generate a
+wakeup_latency trace event. The next_pid and next_comm event fields
+are automatically converted into field variables for this purpose::
+
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,next_comm)' >> /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+
+The sched_waking hist_debug output shows the same data as in the
+previous test example::
+
+ # cat events/sched/sched_waking/hist_debug
+
+ # event histogram
+ #
+ # trigger info: hist:keys=pid:vals=hitcount:ts0=common_timestamp.usecs:sort=hitcount:size=2048:clock=global [active]
+ #
+
+ hist_data: 00000000d60ff61f
+
+ n_vals: 2
+ n_keys: 1
+ n_fields: 3
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: ts0
+ var.idx (into tracing_map_elt.vars[]): 0
+ type: u64
+ size: 8
+ is_signed: 0
+
+ key fields:
+
+ hist_data->fields[2]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: pid
+ type: pid_t
+ size: 8
+ is_signed: 1
+
+The sched_switch hist_debug output shows the same key and value fields
+as in the previous test example - note that wakeup_lat is still in the
+val fields section, but that the new field variables are not there -
+although the field variables are variables, they're held separately in
+the hist_data's field_vars[] array. Although the field variables and
+the normal variables are located in separate places, you can see that
+the actual variable locations for those variables in the
+tracing_map_elt.vars[] do have increasing indices as expected:
+wakeup_lat takes the var.idx = 0 slot, while the field variables for
+next_pid and next_comm have values var.idx = 1, and var.idx = 2. Note
+also that those are the same values displayed for the variable
+references corresponding to those variables in the variable reference
+fields section. Since there are two triggers and thus two hist_data
+addresses, those addresses also need to be accounted for when doing
+the matching - you can see that the first variable refers to the 0
+var.idx on the previous hist trigger (see the hist_data address
+associated with that trigger), while the second variable refers to the
+0 var.idx on the sched_switch hist trigger, as do all the remaining
+variable references.
+
+Finally, the action tracking variables section just shows the system
+and event name for the onmatch() handler::
+
+ # cat events/sched/sched_switch/hist_debug
+
+ # event histogram
+ #
+ # trigger info: hist:keys=next_pid:vals=hitcount:wakeup_lat=common_timestamp.usecs-$ts0:sort=hitcount:size=2048:clock=global:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,next_comm) [active]
+ #
+
+ hist_data: 0000000008f551b7
+
+ n_vals: 2
+ n_keys: 1
+ n_fields: 3
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: wakeup_lat
+ var.idx (into tracing_map_elt.vars[]): 0
+ type: u64
+ size: 0
+ is_signed: 0
+
+ key fields:
+
+ hist_data->fields[2]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: next_pid
+ type: pid_t
+ size: 8
+ is_signed: 1
+
+ variable reference fields:
+
+ hist_data->var_refs[0]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: ts0
+ var.idx (into tracing_map_elt.vars[]): 0
+ var.hist_data: 00000000d60ff61f
+ var_ref_idx (into hist_data->var_refs[]): 0
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->var_refs[1]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: wakeup_lat
+ var.idx (into tracing_map_elt.vars[]): 0
+ var.hist_data: 0000000008f551b7
+ var_ref_idx (into hist_data->var_refs[]): 1
+ type: u64
+ size: 0
+ is_signed: 0
+
+ hist_data->var_refs[2]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: next_pid
+ var.idx (into tracing_map_elt.vars[]): 1
+ var.hist_data: 0000000008f551b7
+ var_ref_idx (into hist_data->var_refs[]): 2
+ type: pid_t
+ size: 4
+ is_signed: 0
+
+ hist_data->var_refs[3]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: next_comm
+ var.idx (into tracing_map_elt.vars[]): 2
+ var.hist_data: 0000000008f551b7
+ var_ref_idx (into hist_data->var_refs[]): 3
+ type: char[16]
+ size: 256
+ is_signed: 0
+
+ field variables:
+
+ hist_data->field_vars[0]:
+
+ field_vars[0].var:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: next_pid
+ var.idx (into tracing_map_elt.vars[]): 1
+
+ field_vars[0].val:
+ ftrace_event_field name: next_pid
+ type: pid_t
+ size: 4
+ is_signed: 1
+
+ hist_data->field_vars[1]:
+
+ field_vars[1].var:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: next_comm
+ var.idx (into tracing_map_elt.vars[]): 2
+
+ field_vars[1].val:
+ ftrace_event_field name: next_comm
+ type: char[16]
+ size: 256
+ is_signed: 0
+
+ action tracking variables (for onmax()/onchange()/onmatch()):
+
+ hist_data->actions[0].match_data.event_system: sched
+ hist_data->actions[0].match_data.event: sched_waking
+
+The commands below can be used to clean things up for the next test::
+
+ # echo '!hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,next_comm)' >> /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+
+ # echo '!hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger
+
+ # echo '!wakeup_latency u64 lat; pid_t pid; char comm[16]' >> synthetic_events
+
+action_data and the trace() action
+----------------------------------
+
+As mentioned above, when the trace() action generates a synthetic
+event, all the parameters to the synthetic event either already are
+variables or are converted into variables (via field variables), and
+finally all those variable values are collected via references to them
+into a var_ref_vals[] array.
+
+The values in the var_ref_vals[] array, however, don't necessarily
+follow the same ordering as the synthetic event params. To address
+that, struct action_data contains another array, var_ref_idx[] that
+maps the trace action params to the var_ref_vals[] values. Below is a
+diagram illustrating that for the wakeup_latency() synthetic event::
+
+ +------------------+ wakeup_latency()
+ | action_data | event params var_ref_vals[]
+ +------------------+ +-----------------+ +-----------------+
+ | .var_ref_idx[] |--->| $wakeup_lat idx |---+ | |
+ +----------------+ +-----------------+ | +-----------------+
+ | .synth_event | | $next_pid idx |---|-+ | $wakeup_lat val |
+ +----------------+ +-----------------+ | | +-----------------+
+ . | +->| $next_pid val |
+ . | +-----------------+
+ . | .
+ +-----------------+ | .
+ | | | .
+ +-----------------+ | +-----------------+
+ +--->| $wakeup_lat val |
+ +-----------------+
+
+Basically, how this ends up getting used in the synthetic event probe
+function, trace_event_raw_event_synth(), is as follows::
+
+ for each field i in .synth_event
+ val_idx = .var_ref_idx[i]
+ val = var_ref_vals[val_idx]
+
+action_data and the onXXX() handlers
+------------------------------------
+
+The hist trigger onXXX() actions other than onmatch(), such as onmax()
+and onchange(), also make use of and internally create hidden
+variables. This information is contained in the
+action_data.track_data struct, and is also visible in the hist_debug
+output as will be described in the example below.
+
+Typically, the onmax() or onchange() handlers are used in conjunction
+with the save() and snapshot() actions. For example::
+
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0: \
+ onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm)' >>
+ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+
+or::
+
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0: \
+ onmax($wakeup_lat).snapshot()' >>
+ /sys/kernel/debug/tracing/events/sched/sched_switch/trigger
+
+save() action field variable test
+---------------------------------
+
+For this example, instead of generating a synthetic event, the save()
+action is used to save field values whenever an onmax() handler
+detects that a new max latency has been hit. As in the previous
+example, the values being saved are also field values, but in this
+case, are kept in a separate hist_data array named save_vars[].
+
+As in previous test examples, we set up the sched_waking trigger::
+
+ # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger
+
+In this case, however, we set up the sched_switch trigger to save some
+sched_switch field values whenever we hit a new maximum latency. For
+both the onmax() handler and save() action, variables will be created,
+which we can use the hist_debug files to examine::
+
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm)' >> events/sched/sched_switch/trigger
+
+The sched_waking hist_debug output shows the same data as in the
+previous test examples::
+
+ # cat events/sched/sched_waking/hist_debug
+
+ #
+ # trigger info: hist:keys=pid:vals=hitcount:ts0=common_timestamp.usecs:sort=hitcount:size=2048:clock=global [active]
+ #
+
+ hist_data: 00000000e6290f48
+
+ n_vals: 2
+ n_keys: 1
+ n_fields: 3
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: ts0
+ var.idx (into tracing_map_elt.vars[]): 0
+ type: u64
+ size: 8
+ is_signed: 0
+
+ key fields:
+
+ hist_data->fields[2]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: pid
+ type: pid_t
+ size: 8
+ is_signed: 1
+
+The output of the sched_switch trigger shows the same val and key
+values as before, but also shows a couple new sections.
+
+First, the action tracking variables section now shows the
+actions[].track_data information describing the special tracking
+variables and references used to track, in this case, the running
+maximum value. The actions[].track_data.var_ref member contains the
+reference to the variable being tracked, in this case the $wakeup_lat
+variable. In order to perform the onmax() handler function, there
+also needs to be a variable that tracks the current maximum by getting
+updated whenever a new maximum is hit. In this case, we can see that
+an auto-generated variable named ' __max' has been created and is
+visible in the actions[].track_data.track_var variable.
+
+Finally, in the new 'save action variables' section, we can see that
+the 4 params to the save() function have resulted in 4 field variables
+being created for the purposes of saving the values of the named
+fields when the max is hit. These variables are kept in a separate
+save_vars[] array off of hist_data, so are displayed in a separate
+section::
+
+ # cat events/sched/sched_switch/hist_debug
+
+ # event histogram
+ #
+ # trigger info: hist:keys=next_pid:vals=hitcount:wakeup_lat=common_timestamp.usecs-$ts0:sort=hitcount:size=2048:clock=global:onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm) [active]
+ #
+
+ hist_data: 0000000057bcd28d
+
+ n_vals: 2
+ n_keys: 1
+ n_fields: 3
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: wakeup_lat
+ var.idx (into tracing_map_elt.vars[]): 0
+ type: u64
+ size: 0
+ is_signed: 0
+
+ key fields:
+
+ hist_data->fields[2]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: next_pid
+ type: pid_t
+ size: 8
+ is_signed: 1
+
+ variable reference fields:
+
+ hist_data->var_refs[0]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: ts0
+ var.idx (into tracing_map_elt.vars[]): 0
+ var.hist_data: 00000000e6290f48
+ var_ref_idx (into hist_data->var_refs[]): 0
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->var_refs[1]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: wakeup_lat
+ var.idx (into tracing_map_elt.vars[]): 0
+ var.hist_data: 0000000057bcd28d
+ var_ref_idx (into hist_data->var_refs[]): 1
+ type: u64
+ size: 0
+ is_signed: 0
+
+ action tracking variables (for onmax()/onchange()/onmatch()):
+
+ hist_data->actions[0].track_data.var_ref:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: wakeup_lat
+ var.idx (into tracing_map_elt.vars[]): 0
+ var.hist_data: 0000000057bcd28d
+ var_ref_idx (into hist_data->var_refs[]): 1
+ type: u64
+ size: 0
+ is_signed: 0
+
+ hist_data->actions[0].track_data.track_var:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: __max
+ var.idx (into tracing_map_elt.vars[]): 1
+ type: u64
+ size: 8
+ is_signed: 0
+
+ save action variables (save() params):
+
+ hist_data->save_vars[0]:
+
+ save_vars[0].var:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: next_comm
+ var.idx (into tracing_map_elt.vars[]): 2
+
+ save_vars[0].val:
+ ftrace_event_field name: next_comm
+ type: char[16]
+ size: 256
+ is_signed: 0
+
+ hist_data->save_vars[1]:
+
+ save_vars[1].var:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: prev_pid
+ var.idx (into tracing_map_elt.vars[]): 3
+
+ save_vars[1].val:
+ ftrace_event_field name: prev_pid
+ type: pid_t
+ size: 4
+ is_signed: 1
+
+ hist_data->save_vars[2]:
+
+ save_vars[2].var:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: prev_prio
+ var.idx (into tracing_map_elt.vars[]): 4
+
+ save_vars[2].val:
+ ftrace_event_field name: prev_prio
+ type: int
+ size: 4
+ is_signed: 1
+
+ hist_data->save_vars[3]:
+
+ save_vars[3].var:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: prev_comm
+ var.idx (into tracing_map_elt.vars[]): 5
+
+ save_vars[3].val:
+ ftrace_event_field name: prev_comm
+ type: char[16]
+ size: 256
+ is_signed: 0
+
+The commands below can be used to clean things up for the next test::
+
+ # echo '!hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmax($wakeup_lat).save(next_comm,prev_pid,prev_prio,prev_comm)' >> events/sched/sched_switch/trigger
+
+ # echo '!hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger
+
+A couple special cases
+======================
+
+While the above covers the basics of the histogram internals, there
+are a couple of special cases that should be discussed, since they
+tend to create even more confusion. Those are field variables on other
+histograms, and aliases, both described below through example tests
+using the hist_debug files.
+
+Test of field variables on other histograms
+-------------------------------------------
+
+This example is similar to the previous examples, but in this case,
+the sched_switch trigger references a hist trigger field on another
+event, namely the sched_waking event. In order to accomplish this, a
+field variable is created for the other event, but since an existing
+histogram can't be used, as existing histograms are immutable, a new
+histogram with a matching variable is created and used, and we'll see
+that reflected in the hist_debug output shown below.
+
+First, we create the wakeup_latency synthetic event. Note the
+addition of the prio field::
+
+ # echo 'wakeup_latency u64 lat; pid_t pid; int prio' >> synthetic_events
+
+As in previous test examples, we set up the sched_waking trigger::
+
+ # echo 'hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger
+
+Here we set up a hist trigger on sched_switch to send a wakeup_latency
+event using an onmatch handler naming the sched_waking event. Note
+that the third param being passed to the wakeup_latency() is prio,
+which is a field name that needs to have a field variable created for
+it. There isn't however any prio field on the sched_switch event so
+it would seem that it wouldn't be possible to create a field variable
+for it. The matching sched_waking event does have a prio field, so it
+should be possible to make use of it for this purpose. The problem
+with that is that it's not currently possible to define a new variable
+on an existing histogram, so it's not possible to add a new prio field
+variable to the existing sched_waking histogram. It is however
+possible to create an additional new 'matching' sched_waking histogram
+for the same event, meaning that it uses the same key and filters, and
+define the new prio field variable on that.
+
+Here's the sched_switch trigger::
+
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,prio)' >> events/sched/sched_switch/trigger
+
+And here's the output of the hist_debug information for the
+sched_waking hist trigger. Note that there are two histograms
+displayed in the output: the first is the normal sched_waking
+histogram we've seen in the previous examples, and the second is the
+special histogram we created to provide the prio field variable.
+
+Looking at the second histogram below, we see a variable with the name
+synthetic_prio. This is the field variable created for the prio field
+on that sched_waking histogram::
+
+ # cat events/sched/sched_waking/hist_debug
+
+ # event histogram
+ #
+ # trigger info: hist:keys=pid:vals=hitcount:ts0=common_timestamp.usecs:sort=hitcount:size=2048:clock=global [active]
+ #
+
+ hist_data: 00000000349570e4
+
+ n_vals: 2
+ n_keys: 1
+ n_fields: 3
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: ts0
+ var.idx (into tracing_map_elt.vars[]): 0
+ type: u64
+ size: 8
+ is_signed: 0
+
+ key fields:
+
+ hist_data->fields[2]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: pid
+ type: pid_t
+ size: 8
+ is_signed: 1
+
+
+ # event histogram
+ #
+ # trigger info: hist:keys=pid:vals=hitcount:synthetic_prio=prio:sort=hitcount:size=2048 [active]
+ #
+
+ hist_data: 000000006920cf38
+
+ n_vals: 2
+ n_keys: 1
+ n_fields: 3
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ HIST_FIELD_FL_VAR
+ ftrace_event_field name: prio
+ var.name: synthetic_prio
+ var.idx (into tracing_map_elt.vars[]): 0
+ type: int
+ size: 4
+ is_signed: 1
+
+ key fields:
+
+ hist_data->fields[2]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: pid
+ type: pid_t
+ size: 8
+ is_signed: 1
+
+Looking at the sched_switch histogram below, we can see a reference to
+the synthetic_prio variable on sched_waking, and looking at the
+associated hist_data address we see that it is indeed associated with
+the new histogram. Note also that the other references are to a
+normal variable, wakeup_lat, and to a normal field variable, next_pid,
+the details of which are in the field variables section::
+
+ # cat events/sched/sched_switch/hist_debug
+
+ # event histogram
+ #
+ # trigger info: hist:keys=next_pid:vals=hitcount:wakeup_lat=common_timestamp.usecs-$ts0:sort=hitcount:size=2048:clock=global:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,prio) [active]
+ #
+
+ hist_data: 00000000a73b67df
+
+ n_vals: 2
+ n_keys: 1
+ n_fields: 3
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: wakeup_lat
+ var.idx (into tracing_map_elt.vars[]): 0
+ type: u64
+ size: 0
+ is_signed: 0
+
+ key fields:
+
+ hist_data->fields[2]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: next_pid
+ type: pid_t
+ size: 8
+ is_signed: 1
+
+ variable reference fields:
+
+ hist_data->var_refs[0]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: ts0
+ var.idx (into tracing_map_elt.vars[]): 0
+ var.hist_data: 00000000349570e4
+ var_ref_idx (into hist_data->var_refs[]): 0
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->var_refs[1]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: wakeup_lat
+ var.idx (into tracing_map_elt.vars[]): 0
+ var.hist_data: 00000000a73b67df
+ var_ref_idx (into hist_data->var_refs[]): 1
+ type: u64
+ size: 0
+ is_signed: 0
+
+ hist_data->var_refs[2]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: next_pid
+ var.idx (into tracing_map_elt.vars[]): 1
+ var.hist_data: 00000000a73b67df
+ var_ref_idx (into hist_data->var_refs[]): 2
+ type: pid_t
+ size: 4
+ is_signed: 0
+
+ hist_data->var_refs[3]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: synthetic_prio
+ var.idx (into tracing_map_elt.vars[]): 0
+ var.hist_data: 000000006920cf38
+ var_ref_idx (into hist_data->var_refs[]): 3
+ type: int
+ size: 4
+ is_signed: 1
+
+ field variables:
+
+ hist_data->field_vars[0]:
+
+ field_vars[0].var:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: next_pid
+ var.idx (into tracing_map_elt.vars[]): 1
+
+ field_vars[0].val:
+ ftrace_event_field name: next_pid
+ type: pid_t
+ size: 4
+ is_signed: 1
+
+ action tracking variables (for onmax()/onchange()/onmatch()):
+
+ hist_data->actions[0].match_data.event_system: sched
+ hist_data->actions[0].match_data.event: sched_waking
+
+The commands below can be used to clean things up for the next test::
+
+ # echo '!hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,next_pid,prio)' >> events/sched/sched_switch/trigger
+
+ # echo '!hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger
+
+ # echo '!wakeup_latency u64 lat; pid_t pid; int prio' >> synthetic_events
+
+Alias test
+----------
+
+This example is very similar to previous examples, but demonstrates
+the alias flag.
+
+First, we create the wakeup_latency synthetic event::
+
+ # echo 'wakeup_latency u64 lat; pid_t pid; char comm[16]' >> synthetic_events
+
+Next, we create a sched_waking trigger similar to previous examples,
+but in this case we save the pid in the waking_pid variable::
+
+ # echo 'hist:keys=pid:waking_pid=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger
+
+For the sched_switch trigger, instead of using $waking_pid directly in
+the wakeup_latency synthetic event invocation, we create an alias of
+$waking_pid named $woken_pid, and use that in the synthetic event
+invocation instead::
+
+ # echo 'hist:keys=next_pid:woken_pid=$waking_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,$woken_pid,next_comm)' >> events/sched/sched_switch/trigger
+
+Looking at the sched_waking hist_debug output, in addition to the
+normal fields, we can see the waking_pid variable::
+
+ # cat events/sched/sched_waking/hist_debug
+
+ # event histogram
+ #
+ # trigger info: hist:keys=pid:vals=hitcount:waking_pid=pid,ts0=common_timestamp.usecs:sort=hitcount:size=2048:clock=global [active]
+ #
+
+ hist_data: 00000000a250528c
+
+ n_vals: 3
+ n_keys: 1
+ n_fields: 4
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ HIST_FIELD_FL_VAR
+ ftrace_event_field name: pid
+ var.name: waking_pid
+ var.idx (into tracing_map_elt.vars[]): 0
+ type: pid_t
+ size: 4
+ is_signed: 1
+
+ hist_data->fields[2]:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: ts0
+ var.idx (into tracing_map_elt.vars[]): 1
+ type: u64
+ size: 8
+ is_signed: 0
+
+ key fields:
+
+ hist_data->fields[3]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: pid
+ type: pid_t
+ size: 8
+ is_signed: 1
+
+The sched_switch hist_debug output shows that a variable named
+woken_pid has been created but that it also has the
+HIST_FIELD_FL_ALIAS flag set. It also has the HIST_FIELD_FL_VAR flag
+set, which is why it appears in the val field section.
+
+Despite that implementation detail, an alias variable is actually more
+like a variable reference; in fact it can be thought of as a reference
+to a reference. The implementation copies the var_ref->fn() from the
+variable reference being referenced, in this case, the waking_pid
+fn(), which is hist_field_var_ref() and makes that the fn() of the
+alias. The hist_field_var_ref() fn() requires the var_ref_idx of the
+variable reference it's using, so waking_pid's var_ref_idx is also
+copied to the alias. The end result is that when the value of alias
+is retrieved, in the end it just does the same thing the original
+reference would have done and retrieves the same value from the
+var_ref_vals[] array. You can verify this in the output by noting
+that the var_ref_idx of the alias, in this case woken_pid, is the same
+as the var_ref_idx of the reference, waking_pid, in the variable
+reference fields section.
+
+Additionally, once it gets that value, since it is also a variable, it
+then saves that value into its var.idx. So the var.idx of the
+woken_pid alias is 0, which it fills with the value from var_ref_idx 0
+when its fn() is called to update itself. You'll also notice that
+there's a woken_pid var_ref in the variable refs section. That is the
+reference to the woken_pid alias variable, and you can see that it
+retrieves the value from the same var.idx as the woken_pid alias, 0,
+and then in turn saves that value in its own var_ref_idx slot, 3, and
+the value at this position is finally what gets assigned to the
+$woken_pid slot in the trace event invocation::
+
+ # cat events/sched/sched_switch/hist_debug
+
+ # event histogram
+ #
+ # trigger info: hist:keys=next_pid:vals=hitcount:woken_pid=$waking_pid,wakeup_lat=common_timestamp.usecs-$ts0:sort=hitcount:size=2048:clock=global:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,$woken_pid,next_comm) [active]
+ #
+
+ hist_data: 0000000055d65ed0
+
+ n_vals: 3
+ n_keys: 1
+ n_fields: 4
+
+ val fields:
+
+ hist_data->fields[0]:
+ flags:
+ VAL: HIST_FIELD_FL_HITCOUNT
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->fields[1]:
+ flags:
+ HIST_FIELD_FL_VAR
+ HIST_FIELD_FL_ALIAS
+ var.name: woken_pid
+ var.idx (into tracing_map_elt.vars[]): 0
+ var_ref_idx (into hist_data->var_refs[]): 0
+ type: pid_t
+ size: 4
+ is_signed: 1
+
+ hist_data->fields[2]:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: wakeup_lat
+ var.idx (into tracing_map_elt.vars[]): 1
+ type: u64
+ size: 0
+ is_signed: 0
+
+ key fields:
+
+ hist_data->fields[3]:
+ flags:
+ HIST_FIELD_FL_KEY
+ ftrace_event_field name: next_pid
+ type: pid_t
+ size: 8
+ is_signed: 1
+
+ variable reference fields:
+
+ hist_data->var_refs[0]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: waking_pid
+ var.idx (into tracing_map_elt.vars[]): 0
+ var.hist_data: 00000000a250528c
+ var_ref_idx (into hist_data->var_refs[]): 0
+ type: pid_t
+ size: 4
+ is_signed: 1
+
+ hist_data->var_refs[1]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: ts0
+ var.idx (into tracing_map_elt.vars[]): 1
+ var.hist_data: 00000000a250528c
+ var_ref_idx (into hist_data->var_refs[]): 1
+ type: u64
+ size: 8
+ is_signed: 0
+
+ hist_data->var_refs[2]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: wakeup_lat
+ var.idx (into tracing_map_elt.vars[]): 1
+ var.hist_data: 0000000055d65ed0
+ var_ref_idx (into hist_data->var_refs[]): 2
+ type: u64
+ size: 0
+ is_signed: 0
+
+ hist_data->var_refs[3]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: woken_pid
+ var.idx (into tracing_map_elt.vars[]): 0
+ var.hist_data: 0000000055d65ed0
+ var_ref_idx (into hist_data->var_refs[]): 3
+ type: pid_t
+ size: 4
+ is_signed: 1
+
+ hist_data->var_refs[4]:
+ flags:
+ HIST_FIELD_FL_VAR_REF
+ name: next_comm
+ var.idx (into tracing_map_elt.vars[]): 2
+ var.hist_data: 0000000055d65ed0
+ var_ref_idx (into hist_data->var_refs[]): 4
+ type: char[16]
+ size: 256
+ is_signed: 0
+
+ field variables:
+
+ hist_data->field_vars[0]:
+
+ field_vars[0].var:
+ flags:
+ HIST_FIELD_FL_VAR
+ var.name: next_comm
+ var.idx (into tracing_map_elt.vars[]): 2
+
+ field_vars[0].val:
+ ftrace_event_field name: next_comm
+ type: char[16]
+ size: 256
+ is_signed: 0
+
+ action tracking variables (for onmax()/onchange()/onmatch()):
+
+ hist_data->actions[0].match_data.event_system: sched
+ hist_data->actions[0].match_data.event: sched_waking
+
+The commands below can be used to clean things up for the next test::
+
+ # echo '!hist:keys=next_pid:woken_pid=$waking_pid:wakeup_lat=common_timestamp.usecs-$ts0:onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,$woken_pid,next_comm)' >> events/sched/sched_switch/trigger
+
+ # echo '!hist:keys=pid:ts0=common_timestamp.usecs' >> events/sched/sched_waking/trigger
+
+ # echo '!wakeup_latency u64 lat; pid_t pid; char comm[16]' >> synthetic_events
diff --git a/Documentation/trace/mmiotrace.rst b/Documentation/trace/mmiotrace.rst
index 5116e8ca27b4..fed13eaead89 100644
--- a/Documentation/trace/mmiotrace.rst
+++ b/Documentation/trace/mmiotrace.rst
@@ -5,7 +5,7 @@ In-kernel memory-mapped I/O tracing
Home page and links to optional user space tools:
- http://nouveau.freedesktop.org/wiki/MmioTrace
+ https://nouveau.freedesktop.org/wiki/MmioTrace
MMIO tracing was originally developed by Intel around 2003 for their Fault
Injection Test Harness. In Dec 2006 - Jan 2007, using the code from Intel,
diff --git a/Documentation/translations/it_IT/kernel-hacking/hacking.rst b/Documentation/translations/it_IT/kernel-hacking/hacking.rst
index 6aab27a8d323..e9a2e92134f0 100644
--- a/Documentation/translations/it_IT/kernel-hacking/hacking.rst
+++ b/Documentation/translations/it_IT/kernel-hacking/hacking.rst
@@ -634,7 +634,7 @@ Definita in ``include/linux/export.h``
Questa è una variate di `EXPORT_SYMBOL()` che permette di specificare uno
spazio dei nomi. Lo spazio dei nomi è documentato in
-:doc:`../core-api/symbol-namespaces`
+:doc:`../../../core-api/symbol-namespaces`
:c:func:`EXPORT_SYMBOL_NS_GPL()`
--------------------------------
@@ -643,7 +643,7 @@ Definita in ``include/linux/export.h``
Questa è una variate di `EXPORT_SYMBOL_GPL()` che permette di specificare uno
spazio dei nomi. Lo spazio dei nomi è documentato in
-:doc:`../core-api/symbol-namespaces`
+:doc:`../../../core-api/symbol-namespaces`
Procedure e convenzioni
=======================
diff --git a/Documentation/translations/it_IT/process/email-clients.rst b/Documentation/translations/it_IT/process/email-clients.rst
index 89abf6d325f2..66d3d65776f7 100644
--- a/Documentation/translations/it_IT/process/email-clients.rst
+++ b/Documentation/translations/it_IT/process/email-clients.rst
@@ -3,6 +3,8 @@
:Original: :doc:`../../../process/email-clients`
:Translator: Alessia Mantegazza <amantegazza@vaga.pv.it>
+.. _it_email_clients:
+
Informazioni sui programmi di posta elettronica per Linux
=========================================================
diff --git a/Documentation/translations/it_IT/process/management-style.rst b/Documentation/translations/it_IT/process/management-style.rst
index c709285138a7..76ed074082ea 100644
--- a/Documentation/translations/it_IT/process/management-style.rst
+++ b/Documentation/translations/it_IT/process/management-style.rst
@@ -3,6 +3,8 @@
:Original: :doc:`../../../process/management-style`
:Translator: Alessia Mantegazza <amantegazza@vaga.pv.it>
+.. _it_managementstyle:
+
Il modello di gestione del kernel Linux
=======================================
diff --git a/Documentation/translations/it_IT/process/submitting-patches.rst b/Documentation/translations/it_IT/process/submitting-patches.rst
index cba1f8cb61ed..7c23c08e4401 100644
--- a/Documentation/translations/it_IT/process/submitting-patches.rst
+++ b/Documentation/translations/it_IT/process/submitting-patches.rst
@@ -21,7 +21,7 @@ Leggete anche :ref:`Documentation/translations/it_IT/process/submit-checklist.rs
per una lista di punti da verificare prima di inviare del codice. Se state
inviando un driver, allora leggete anche :ref:`Documentation/translations/it_IT/process/submitting-drivers.rst <it_submittingdrivers>`;
per delle patch relative alle associazioni per Device Tree leggete
-Documentation/devicetree/bindings/submitting-patches.txt.
+Documentation/devicetree/bindings/submitting-patches.rst.
Molti di questi passi descrivono il comportamento di base del sistema di
controllo di versione ``git``; se utilizzate ``git`` per preparare le vostre
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index e50fe6541335..34d041d68f78 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -1842,12 +1842,15 @@ Mandatory ë°°ë¦¬ì–´ë“¤ì€ SMP 시스템ì—ì„œë„ UP 시스템ì—ì„œë„ SMP 효ê³
(*) smp_mb__before_atomic();
(*) smp_mb__after_atomic();
- ì´ê²ƒë“¤ì€ ê°’ì„ ë¦¬í„´í•˜ì§€ 않는 (ë”하기, 빼기, ì¦ê°€, ê°ì†Œì™€ ê°™ì€) 어토믹
- í•¨ìˆ˜ë“¤ì„ ìœ„í•œ, 특히 ê·¸ê²ƒë“¤ì´ ë ˆí¼ëŸ°ìŠ¤ ì¹´ìš´íŒ…ì— ì‚¬ìš©ë  ë•Œë¥¼ 위한
- 함수들입니다. ì´ í•¨ìˆ˜ë“¤ì€ ë©”ëª¨ë¦¬ 배리어를 ë‚´í¬í•˜ê³  있지는 않습니다.
-
- ì´ê²ƒë“¤ì€ ê°’ì„ ë¦¬í„´í•˜ì§€ 않으며 어토믹한 (set_bit ê³¼ clear_bit ê°™ì€) 비트
- ì—°ì‚°ì—ë„ ì‚¬ìš©ë  ìˆ˜ 있습니다.
+ ì´ê²ƒë“¤ì€ 메모리 배리어를 ë‚´í¬í•˜ì§€ 않는 어토믹 RMW 함수를 사용하지만 코드ì—
+ 메모리 배리어가 필요한 경우를 위한 것들입니다. 메모리 배리어를 ë‚´í¬í•˜ì§€
+ 않는 어토믹 RMW í•¨ìˆ˜ë“¤ì˜ ì˜ˆë¡œëŠ” ë”하기, 빼기, (실패한) ì¡°ê±´ì 
+ 오í¼ë ˆì´ì…˜ë“¤, _relaxed í•¨ìˆ˜ë“¤ì´ ìžˆìœ¼ë©°, atomic_read 나 atomic_set ì€ ì´ì—
+ 해당ë˜ì§€ 않습니다. 메모리 배리어가 필요해지는 í”í•œ 예로는 어토믹
+ 오í¼ë ˆì´ì…˜ì„ 사용해 ë ˆí¼ëŸ°ìŠ¤ 카운트를 수정하는 경우를 들 수 있습니다.
+
+ ì´ê²ƒë“¤ì€ ë˜í•œ (set_bit ê³¼ clear_bit ê°™ì€) 메모리 배리어를 ë‚´í¬í•˜ì§€ 않는
+ 어토믹 RMW bitop í•¨ìˆ˜ë“¤ì„ ìœ„í•´ì„œë„ ì‚¬ìš©ë  ìˆ˜ 있습니다.
í•œ 예로, ê°ì²´ 하나를 무효한 것으로 표시하고 ê·¸ ê°ì²´ì˜ ë ˆí¼ëŸ°ìŠ¤ 카운트를
ê°ì†Œì‹œí‚¤ëŠ” ë‹¤ìŒ ì½”ë“œë¥¼ 보세요:
diff --git a/Documentation/translations/zh_CN/filesystems/debugfs.rst b/Documentation/translations/zh_CN/filesystems/debugfs.rst
index f8a28793c277..822c4d42fdf9 100644
--- a/Documentation/translations/zh_CN/filesystems/debugfs.rst
+++ b/Documentation/translations/zh_CN/filesystems/debugfs.rst
@@ -2,7 +2,7 @@
.. include:: ../disclaimer-zh_CN.rst
-:Original: :ref:`Documentation/filesystems/debugfs.txt <debugfs_index>`
+:Original: :doc:`../../../filesystems/debugfs`
=======
Debugfs
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 52bf58417653..1f3da8f32fc1 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -287,6 +287,7 @@ Code Seq# Include File Comments
'v' 00-1F linux/fs.h conflict!
'v' 00-0F linux/sonypi.h conflict!
'v' 00-0F media/v4l2-subdev.h conflict!
+'v' 20-27 arch/powerpc/include/uapi/asm/vas-api.h VAS API
'v' C0-FF linux/meye.h conflict!
'w' all CERN SCI driver
'y' 00-1F packet based user level communications
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index 561969754bc0..6f9e000757fa 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -191,15 +191,15 @@ The usage pattern is::
again:
range.notifier_seq = mmu_interval_read_begin(&interval_sub);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = hmm_range_fault(&range);
if (ret) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret == -EBUSY)
goto again;
return ret;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
take_lock(driver->update);
if (mmu_interval_read_retry(&ni, range.notifier_seq) {
diff --git a/Documentation/vm/ksm.rst b/Documentation/vm/ksm.rst
index d32016d9be2c..d1b7270ad55c 100644
--- a/Documentation/vm/ksm.rst
+++ b/Documentation/vm/ksm.rst
@@ -6,7 +6,7 @@ Kernel Samepage Merging
KSM is a memory-saving de-duplication feature, enabled by CONFIG_KSM=y,
added to the Linux kernel in 2.6.32. See ``mm/ksm.c`` for its implementation,
-and http://lwn.net/Articles/306704/ and http://lwn.net/Articles/330589/
+and http://lwn.net/Articles/306704/ and https://lwn.net/Articles/330589/
The userspace interface of KSM is described in :ref:`Documentation/admin-guide/mm/ksm.rst <admin_guide_ksm>`
diff --git a/Documentation/vm/transhuge.rst b/Documentation/vm/transhuge.rst
index 37c57ca32629..0ed23e59abe5 100644
--- a/Documentation/vm/transhuge.rst
+++ b/Documentation/vm/transhuge.rst
@@ -98,9 +98,9 @@ split_huge_page() or split_huge_pmd() has a cost.
To make pagetable walks huge pmd aware, all you need to do is to call
pmd_trans_huge() on the pmd returned by pmd_offset. You must hold the
-mmap_sem in read (or write) mode to be sure a huge pmd cannot be
+mmap_lock in read (or write) mode to be sure a huge pmd cannot be
created from under you by khugepaged (khugepaged collapse_huge_page
-takes the mmap_sem in write mode in addition to the anon_vma lock). If
+takes the mmap_lock in write mode in addition to the anon_vma lock). If
pmd_trans_huge returns false, you just fallback in the old code
paths. If instead pmd_trans_huge returns true, you have to take the
page table lock (pmd_lock()) and re-run pmd_trans_huge. Taking the
diff --git a/Documentation/w1/slaves/w1_therm.rst b/Documentation/w1/slaves/w1_therm.rst
index 90531c340a07..cc4edae17751 100644
--- a/Documentation/w1/slaves/w1_therm.rst
+++ b/Documentation/w1/slaves/w1_therm.rst
@@ -26,20 +26,31 @@ W1_THERM_DS1825 0x3B
W1_THERM_DS28EA00 0x42
==================== ====
-Support is provided through the sysfs w1_slave file. Each open and
+Support is provided through the sysfs w1_slave file. Each open and
read sequence will initiate a temperature conversion then provide two
-lines of ASCII output. The first line contains the nine hex bytes
+lines of ASCII output. The first line contains the nine hex bytes
read along with a calculated crc value and YES or NO if it matched.
-If the crc matched the returned values are retained. The second line
+If the crc matched the returned values are retained. The second line
displays the retained values along with a temperature in millidegrees
Centigrade after t=.
-Parasite powered devices are limited to one slave performing a
-temperature conversion at a time. If none of the devices are parasite
-powered it would be possible to convert all the devices at the same
-time and then go back to read individual sensors. That isn't
-currently supported. The driver also doesn't support reduced
-precision (which would also reduce the conversion time) when reading values.
+Alternatively, temperature can be read using temperature sysfs, it
+return only temperature in millidegrees Centigrade.
+
+A bulk read of all devices on the bus could be done writing 'trigger'
+in the therm_bulk_read sysfs entry at w1_bus_master level. This will
+sent the convert command on all devices on the bus, and if parasite
+powered devices are detected on the bus (and strong pullup is enable
+in the module), it will drive the line high during the longer conversion
+time required by parasited powered device on the line. Reading
+therm_bulk_read will return 0 if no bulk conversion pending,
+-1 if at least one sensor still in conversion, 1 if conversion is complete
+but at least one sensor value has not been read yet. Result temperature is
+then accessed by reading the temperature sysfs entry of each device, which
+may return empty if conversion is still in progress. Note that if a bulk
+read is sent but one sensor is not read immediately, the next access to
+temperature on this device will return the temperature measured at the
+time of issue of the bulk read command (not the current temperature).
Writing a value between 9 and 12 to the sysfs w1_slave file will change the
precision of the sensor for the next readings. This value is in (volatile)
@@ -49,6 +60,27 @@ To store the current precision configuration into EEPROM, the value 0
has to be written to the sysfs w1_slave file. Since the EEPROM has a limited
amount of writes (>50k), this command should be used wisely.
+Alternatively, resolution can be set or read (value from 9 to 12) using the
+dedicated resolution sysfs entry on each device. This sysfs entry is not
+present for devices not supporting this feature. Driver will adjust the
+correct conversion time for each device regarding to its resolution setting.
+In particular, strong pullup will be applied if required during the conversion
+duration.
+
+The write-only sysfs entry eeprom is an alternative for EEPROM operations:
+ * 'save': will save device RAM to EEPROM
+ * 'restore': will restore EEPROM data in device RAM.
+
+ext_power syfs entry allow tho check the power status of each device.
+ * '0': device parasite powered
+ * '1': device externally powered
+
+sysfs alarms allow read or write TH and TL (Temperature High an Low) alarms.
+Values shall be space separated and in the device range (typical -55 degC
+to 125 degC). Values are integer as they are store in a 8bit register in
+the device. Lowest value is automatically put to TL.Once set, alarms could
+be search at master level.
+
The module parameter strong_pullup can be set to 0 to disable the
strong pullup, 1 to enable autodetection or 2 to force strong pullup.
In case of autodetection, the driver will use the "READ POWER SUPPLY"
diff --git a/Documentation/xz.txt b/Documentation/xz.txt
index b2220d03aa50..b2f5ff12a161 100644
--- a/Documentation/xz.txt
+++ b/Documentation/xz.txt
@@ -14,13 +14,13 @@ improve compression ratio of executable data.
The XZ decompressor in Linux is called XZ Embedded. It supports
the LZMA2 filter and optionally also BCJ filters. CRC32 is supported
for integrity checking. The home page of XZ Embedded is at
-<http://tukaani.org/xz/embedded.html>, where you can find the
+<https://tukaani.org/xz/embedded.html>, where you can find the
latest version and also information about using the code outside
the Linux kernel.
For userspace, XZ Utils provide a zlib-like compression library
and a gzip-like command line tool. XZ Utils can be downloaded from
-<http://tukaani.org/xz/>.
+<https://tukaani.org/xz/>.
XZ related components in the kernel
===================================
@@ -113,7 +113,7 @@ Reporting bugs
==============
Before reporting a bug, please check that it's not fixed already
-at upstream. See <http://tukaani.org/xz/embedded.html> to get the
+at upstream. See <https://tukaani.org/xz/embedded.html> to get the
latest code.
Report bugs to <lasse.collin@tukaani.org> or visit #tukaani on
diff --git a/Kconfig b/Kconfig
index e10b3ee084d4..745bc773f567 100644
--- a/Kconfig
+++ b/Kconfig
@@ -5,8 +5,6 @@
#
mainmenu "Linux/$(ARCH) $(KERNELVERSION) Kernel Configuration"
-comment "Compiler: $(CC_VERSION_TEXT)"
-
source "scripts/Kconfig.include"
source "init/Kconfig"
diff --git a/MAINTAINERS b/MAINTAINERS
index ddc1dc7e535e..58bc99a57d95 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -294,6 +294,7 @@ F: drivers/gpio/gpio-104-idio-16.c
ACCES 104-QUAD-8 DRIVER
M: William Breathitt Gray <vilhelm.gray@gmail.com>
+M: Syed Nayyar Waris <syednwaris@gmail.com>
L: linux-iio@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-bus-counter-104-quad-8
@@ -1042,6 +1043,14 @@ W: http://ez.analog.com/community/linux-device-drivers
F: Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml
F: drivers/iio/imu/adis16460.c
+ANALOG DEVICES INC ADIS16475 DRIVER
+M: Nuno Sa <nuno.sa@analog.com>
+L: linux-iio@vger.kernel.org
+W: http://ez.analog.com/community/linux-device-drivers
+S: Supported
+F: drivers/iio/imu/adis16475.c
+F: Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml
+
ANALOG DEVICES INC ADM1177 DRIVER
M: Beniamin Bia <beniamin.bia@analog.com>
M: Michael Hennerich <Michael.Hennerich@analog.com>
@@ -1296,6 +1305,13 @@ S: Supported
W: http://www.aquantia.com
F: drivers/net/ethernet/aquantia/atlantic/aq_ptp*
+ARASAN NAND CONTROLLER DRIVER
+M: Naga Sureshkumar Relli <nagasure@xilinx.com>
+L: linux-mtd@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
+F: drivers/mtd/nand/raw/arasan-nand-controller.c
+
ARC FRAMEBUFFER DRIVER
M: Jaya Kumar <jayalk@intworks.biz>
S: Maintained
@@ -1350,10 +1366,11 @@ F: arch/arm/mach-integrator/
F: arch/arm/mach-realview/
F: arch/arm/mach-versatile/
F: arch/arm/plat-versatile/
+F: drivers/bus/arm-integrator-lm.c
F: drivers/clk/versatile/
F: drivers/i2c/busses/i2c-versatile.c
F: drivers/irqchip/irq-versatile-fpga.c
-F: drivers/mtd/maps/physmap_of_versatile.c
+F: drivers/mtd/maps/physmap-versatile.*
F: drivers/power/reset/arm-versatile-reboot.c
F: drivers/soc/versatile/
@@ -1468,6 +1485,13 @@ S: Maintained
F: Documentation/devicetree/bindings/interrupt-controller/arm,vic.txt
F: drivers/irqchip/irq-vic.c
+ARM SMC WATCHDOG DRIVER
+M: Julius Werner <jwerner@chromium.org>
+R: Evan Benn <evanbenn@chromium.org>
+S: Maintained
+F: devicetree/bindings/watchdog/arm-smc-wdt.yaml
+F: drivers/watchdog/arm_smc_wdt.c
+
ARM SMMU DRIVERS
M: Will Deacon <will@kernel.org>
R: Robin Murphy <robin.murphy@arm.com>
@@ -1696,11 +1720,6 @@ S: Maintained
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev
F: drivers/clk/clkdev.c
-ARM/COMPULAB CM-X270/EM-X270 and CM-X300 MACHINE SUPPORT
-M: Mike Rapoport <mike@compulab.co.il>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-
ARM/CONEXANT DIGICOLOR MACHINE SUPPORT
M: Baruch Siach <baruch@tkos.co.il>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2009,6 +2028,7 @@ F: arch/arm/mach-dove/
F: arch/arm/mach-mv78xx0/
F: arch/arm/mach-orion5x/
F: arch/arm/plat-orion/
+F: drivers/soc/dove/
ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K, CN9130 SOC support
M: Jason Cooper <jason@lakedaemon.net>
@@ -2177,6 +2197,7 @@ L: linux-oxnas@groups.io (moderated for non-subscribers)
S: Maintained
F: arch/arm/boot/dts/ox8*.dts*
F: arch/arm/mach-oxnas/
+F: drivers/power/reset/oxnas-restart.c
N: oxnas
ARM/PALM TREO SUPPORT
@@ -2285,6 +2306,8 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-realtek-soc@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/arm/realtek.yaml
+F: arch/arm/boot/dts/rtd*
+F: arch/arm/mach-realtek/
F: arch/arm64/boot/dts/realtek/
ARM/RENESAS ARM64 ARCHITECTURE
@@ -2707,8 +2730,8 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
W: http://wiki.xilinx.com
T: git https://github.com/Xilinx/linux-xlnx.git
-F: Documentation/devicetree/bindings/i2c/i2c-cadence.txt
-F: Documentation/devicetree/bindings/i2c/i2c-xiic.txt
+F: Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml
+F: Documentation/devicetree/bindings/i2c/xlnx,xps-iic-2.00.a.yaml
F: arch/arm/mach-zynq/
F: drivers/block/xsysace.c
F: drivers/clocksource/timer-cadence-ttc.c
@@ -3333,7 +3356,7 @@ L: bcm-kernel-feedback-list@broadcom.com
L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://github.com/anholt/linux
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsaenz/linux-rpi.git
F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
F: drivers/pci/controller/pcie-brcmstb.c
F: drivers/staging/vc04_services
@@ -3494,6 +3517,14 @@ S: Supported
F: Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
F: drivers/i2c/busses/i2c-brcmstb.c
+BROADCOM BRCMSTB USB EHCI DRIVER
+M: Al Cooper <alcooperx@gmail.com>
+L: linux-usb@vger.kernel.org
+L: bcm-kernel-feedback-list@broadcom.com
+S: Maintained
+F: Documentation/devicetree/bindings/usb/brcm,bcm7445-ehci.yaml
+F: drivers/usb/host/ehci-brcm.*
+
BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
M: Al Cooper <alcooperx@gmail.com>
L: linux-kernel@vger.kernel.org
@@ -3754,9 +3785,8 @@ F: Documentation/devicetree/bindings/media/cdns,*.txt
F: drivers/media/platform/cadence/cdns-csi2*
CADENCE NAND DRIVER
-M: Piotr Sroka <piotrs@cadence.com>
L: linux-mtd@lists.infradead.org
-S: Maintained
+S: Orphan
F: Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
F: drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -3955,6 +3985,12 @@ F: arch/powerpc/include/uapi/asm/spu*.h
F: arch/powerpc/oprofile/*cell*
F: arch/powerpc/platforms/cell/
+CELLWISE CW2015 BATTERY DRIVER
+M: Tobias Schrammm <t.schramm@manjaro.org>
+S: Maintained
+F: Documentation/devicetree/bindings/power/supply/cw2015_battery.yaml
+F: drivers/power/supply/cw2015_battery.c
+
CEPH COMMON CODE (LIBCEPH)
M: Ilya Dryomov <idryomov@gmail.com>
M: Jeff Layton <jlayton@kernel.org>
@@ -4082,12 +4118,11 @@ M: Charles Keepax <ckeepax@opensource.cirrus.com>
M: Richard Fitzgerald <rf@opensource.cirrus.com>
L: patches@opensource.cirrus.com
S: Supported
-F: Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt
-F: Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
-F: Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
-F: Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
-F: Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
-F: Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt
+F: Documentation/devicetree/bindings/clock/cirrus,lochnagar.yaml
+F: Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.yaml
+F: Documentation/devicetree/bindings/mfd/cirrus,lochnagar.yaml
+F: Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.yaml
+F: Documentation/devicetree/bindings/sound/cirrus,lochnagar.yaml
F: Documentation/hwmon/lochnagar.rst
F: drivers/clk/clk-lochnagar.c
F: drivers/hwmon/lochnagar-hwmon.c
@@ -4107,9 +4142,9 @@ L: patches@opensource.cirrus.com
S: Supported
W: https://github.com/CirrusLogic/linux-drivers/wiki
T: git https://github.com/CirrusLogic/linux-drivers.git
-F: Documentation/devicetree/bindings/mfd/madera.txt
-F: Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt
-F: Documentation/devicetree/bindings/sound/madera.txt
+F: Documentation/devicetree/bindings/mfd/cirrus,madera.yaml
+F: Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
+F: Documentation/devicetree/bindings/sound/cirrus,madera.yaml
F: drivers/gpio/gpio-madera*
F: drivers/irqchip/irq-madera*
F: drivers/mfd/cs47l*
@@ -4675,6 +4710,12 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/common/cypress_firmware*
+CYPRESS CY8CTMA140 TOUCHSCREEN DRIVER
+M: Linus Walleij <linus.walleij@linaro.org>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/input/touchscreen/cy8ctma140.c
+
CYTTSP TOUCHSCREEN DRIVER
M: Ferruh Yigit <fery@cypress.com>
L: linux-input@vger.kernel.org
@@ -7081,6 +7122,7 @@ GASKET DRIVER FRAMEWORK
M: Rob Springer <rspringer@google.com>
M: Todd Poynor <toddpoynor@google.com>
M: Ben Chan <benchan@chromium.org>
+M: Richard Yeh <rcy@google.com>
S: Maintained
F: drivers/staging/gasket/
@@ -7228,7 +7270,7 @@ L: cluster-devel@redhat.com
S: Supported
W: http://sources.redhat.com/cluster/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git
-F: Documentation/filesystems/gfs2*.txt
+F: Documentation/filesystems/gfs2*
F: fs/gfs2/
F: include/uapi/linux/gfs2_ondisk.h
@@ -7278,6 +7320,13 @@ F: Documentation/firmware-guide/acpi/gpio-properties.rst
F: drivers/gpio/gpiolib-acpi.c
F: drivers/gpio/gpiolib-acpi.h
+GPIO AGGREGATOR
+M: Geert Uytterhoeven <geert+renesas@glider.be>
+L: linux-gpio@vger.kernel.org
+S: Supported
+F: Documentation/admin-guide/gpio/gpio-aggregator.rst
+F: drivers/gpio/gpio-aggregator.c
+
GPIO IR Transmitter
M: Sean Young <sean@mess.org>
L: linux-media@vger.kernel.org
@@ -7291,6 +7340,12 @@ S: Maintained
F: drivers/gpio/gpio-mockup.c
F: tools/testing/selftests/gpio/
+GPIO REGMAP
+R: Michael Walle <michael@walle.cc>
+S: Maintained
+F: drivers/gpio/gpio-regmap.c
+F: include/linux/gpio/regmap.h
+
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
@@ -8466,6 +8521,7 @@ F: drivers/mtd/nand/raw/ingenic/
F: drivers/pinctrl/pinctrl-ingenic.c
F: drivers/power/supply/ingenic-battery.c
F: drivers/pwm/pwm-jz4740.c
+F: drivers/remoteproc/ingenic_rproc.c
F: drivers/rtc/rtc-jz4740.c
F: drivers/tty/serial/8250/8250_ingenic.c
F: drivers/usb/musb/jz4740.c
@@ -9249,6 +9305,17 @@ F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
+KCSAN
+M: Marco Elver <elver@google.com>
+R: Dmitry Vyukov <dvyukov@google.com>
+L: kasan-dev@googlegroups.com
+S: Maintained
+F: Documentation/dev-tools/kcsan.rst
+F: include/linux/kcsan*.h
+F: kernel/kcsan/
+F: lib/Kconfig.kcsan
+F: scripts/Makefile.kcsan
+
KDUMP
M: Dave Young <dyoung@redhat.com>
M: Baoquan He <bhe@redhat.com>
@@ -9869,7 +9936,7 @@ F: arch/powerpc/platforms/83xx/
F: arch/powerpc/platforms/85xx/
LINUX FOR POWERPC EMBEDDED PPC8XX
-M: Christophe Leroy <christophe.leroy@c-s.fr>
+M: Christophe Leroy <christophe.leroy@csgroup.eu>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: arch/powerpc/platforms/8xx/
@@ -9929,10 +9996,12 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching.git
F: Documentation/ABI/testing/sysfs-kernel-livepatch
F: Documentation/livepatch/
+F: arch/powerpc/include/asm/livepatch.h
+F: arch/s390/include/asm/livepatch.h
F: arch/x86/include/asm/livepatch.h
-F: arch/x86/kernel/livepatch.c
F: include/linux/livepatch.h
F: kernel/livepatch/
+F: lib/livepatch/
F: samples/livepatch/
F: tools/testing/selftests/livepatch/
@@ -10031,7 +10100,7 @@ F: drivers/hid/hid-lg-g15.c
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Sathya Prakash <sathya.prakash@broadcom.com>
-M: Chaitra P B <chaitra.basappa@broadcom.com>
+M: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
M: Suganath Prabu Subramani <suganath-prabu.subramani@broadcom.com>
L: MPT-FusionLinux.pdl@broadcom.com
L: linux-scsi@vger.kernel.org
@@ -10807,9 +10876,8 @@ F: Documentation/devicetree/bindings/i2c/i2c-mt7621.txt
F: drivers/i2c/busses/i2c-mt7621.c
MEDIATEK NAND CONTROLLER DRIVER
-M: Xiaolei Li <xiaolei.li@mediatek.com>
L: linux-mtd@lists.infradead.org
-S: Maintained
+S: Orphan
F: Documentation/devicetree/bindings/mtd/mtk-nand.txt
F: drivers/mtd/nand/raw/mtk_*
@@ -11474,10 +11542,15 @@ F: kernel/module.c
MONOLITHIC POWER SYSTEM PMIC DRIVER
M: Saravanan Sekar <sravanhome@gmail.com>
S: Maintained
+F: Documentation/devicetree/bindings/mfd/mps,mp2629.yaml
F: Documentation/devicetree/bindings/regulator/mps,mp*.yaml
+F: drivers/iio/adc/mp2629_adc.c
+F: drivers/mfd/mp2629.c
+F: drivers/power/supply/mp2629_charger.c
F: drivers/regulator/mp5416.c
F: drivers/regulator/mpq7920.c
F: drivers/regulator/mpq7920.h
+F: include/linux/mfd/mp2629.h
MOTION EYE VAIO PICTUREBOOK CAMERA DRIVER
S: Orphan
@@ -12640,8 +12713,8 @@ M: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
M: Frank Rowand <frowand.list@gmail.com>
L: devicetree@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/dynamic-resolution-notes.txt
-F: Documentation/devicetree/overlay-notes.txt
+F: Documentation/devicetree/dynamic-resolution-notes.rst
+F: Documentation/devicetree/overlay-notes.rst
F: drivers/of/overlay.c
F: drivers/of/resolver.c
K: of_overlay_notifier_
@@ -12838,7 +12911,7 @@ F: include/uapi/linux/ppdev.h
PARAVIRT_OPS INTERFACE
M: Juergen Gross <jgross@suse.com>
-M: Thomas Hellstrom <thellstrom@vmware.com>
+M: Deep Shah <sdeep@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>
L: virtualization@lists.linux-foundation.org
S: Supported
@@ -13049,7 +13122,7 @@ L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt
-F: drivers/pci/controller/mobibeil/pcie-layerscape-gen4.c
+F: drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
PCI DRIVER FOR RENESAS R-CAR
M: Marek Vasut <marek.vasut+renesas@gmail.com>
@@ -13057,6 +13130,7 @@ M: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
L: linux-pci@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org
S: Maintained
+F: Documentation/devicetree/bindings/pci/*rcar*
F: drivers/pci/controller/*rcar*
PCI DRIVER FOR SAMSUNG EXYNOS
@@ -13250,8 +13324,8 @@ PCIE DRIVER FOR SOCIONEXT UNIPHIER
M: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
L: linux-pci@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/pci/uniphier-pcie.txt
-F: drivers/pci/controller/dwc/pcie-uniphier.c
+F: Documentation/devicetree/bindings/pci/uniphier-pcie*
+F: drivers/pci/controller/dwc/pcie-uniphier*
PCIE DRIVER FOR ST SPEAR13XX
M: Pratyush Anand <pratyush.anand@gmail.com>
@@ -13452,8 +13526,9 @@ F: drivers/pinctrl/qcom/
PIN CONTROLLER - RENESAS
M: Geert Uytterhoeven <geert+renesas@glider.be>
L: linux-renesas-soc@vger.kernel.org
-S: Maintained
+S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git sh-pfc
+F: Documentation/devicetree/bindings/pinctrl/renesas,*
F: drivers/pinctrl/pinctrl-rz*
F: drivers/pinctrl/sh-pfc/
@@ -14123,6 +14198,14 @@ L: linux-arm-msm@vger.kernel.org
S: Maintained
F: drivers/iommu/qcom_iommu.c
+QUALCOMM IPCC MAILBOX DRIVER
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+L: linux-arm-msm@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
+F: drivers/mailbox/qcom-ipcc.c
+F: include/dt-bindings/mailbox/qcom-ipcc.h
+
QUALCOMM RMNET DRIVER
M: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
M: Sean Tranchetti <stranche@codeaurora.org>
@@ -14440,6 +14523,7 @@ M: Geert Uytterhoeven <geert+renesas@glider.be>
L: linux-renesas-soc@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git clk-renesas
+F: Documentation/devicetree/bindings/clock/renesas,*
F: drivers/clk/renesas/
RENESAS EMEV2 I2C DRIVER
@@ -14554,6 +14638,13 @@ F: arch/riscv/
N: riscv
K: riscv
+RNBD BLOCK DRIVERS
+M: Danil Kipnis <danil.kipnis@cloud.ionos.com>
+M: Jack Wang <jinpu.wang@cloud.ionos.com>
+L: linux-block@vger.kernel.org
+S: Maintained
+F: drivers/block/rnbd/
+
ROCCAT DRIVERS
M: Stefan Achatz <erazor_de@users.sourceforge.net>
S: Maintained
@@ -14691,6 +14782,13 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-devel
F: drivers/net/wireless/realtek/rtl8xxxu/
+RTRS TRANSPORT DRIVERS
+M: Danil Kipnis <danil.kipnis@cloud.ionos.com>
+M: Jack Wang <jinpu.wang@cloud.ionos.com>
+L: linux-rdma@vger.kernel.org
+S: Maintained
+F: drivers/infiniband/ulp/rtrs/
+
RXRPC SOCKETS (AF_RXRPC)
M: David Howells <dhowells@redhat.com>
L: linux-afs@lists.infradead.org
@@ -14775,6 +14873,7 @@ S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
F: arch/s390/pci/
F: drivers/pci/hotplug/s390_pci_hpc.c
+F: Documentation/s390/pci.rst
S390 VFIO AP DRIVER
M: Tony Krowiak <akrowiak@linux.ibm.com>
@@ -16356,9 +16455,10 @@ F: drivers/tty/serial/8250/8250_lpss.c
SYNOPSYS DESIGNWARE APB GPIO DRIVER
M: Hoan Tran <hoan@os.amperecomputing.com>
+M: Serge Semin <fancer.lancer@gmail.com>
L: linux-gpio@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
+F: Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml
F: drivers/gpio/gpio-dwapb.c
SYNOPSYS DESIGNWARE AXI DMAC DRIVER
@@ -16716,6 +16816,16 @@ M: Laxman Dewangan <ldewangan@nvidia.com>
S: Supported
F: drivers/spi/spi-tegra*
+TEGRA VIDEO DRIVER
+M: Thierry Reding <thierry.reding@gmail.com>
+M: Jonathan Hunter <jonathanh@nvidia.com>
+M: Sowjanya Komatineni <skomatineni@nvidia.com>
+L: linux-media@vger.kernel.org
+L: linux-tegra@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
+F: drivers/staging/media/tegra-video/
+
TEGRA XUSB PADCTL DRIVER
M: JC Kuo <jckuo@nvidia.com>
S: Supported
@@ -17708,6 +17818,13 @@ F: Documentation/driver-api/usb/typec.rst
F: drivers/usb/typec/
F: include/linux/usb/typec.h
+USB TYPEC INTEL PMC MUX DRIVER
+M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: Documentation/firmware-guide/acpi/intel-pmc-mux.rst
+F: drivers/usb/typec/mux/intel_pmc_mux.c
+
USB TYPEC PI3USB30532 MUX DRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: linux-usb@vger.kernel.org
@@ -18022,9 +18139,18 @@ F: drivers/virtio/
F: include/linux/vdpa.h
F: include/linux/virtio*.h
F: include/uapi/linux/virtio_*.h
-F: mm/balloon_compaction.c
F: tools/virtio/
+VIRTIO BALLOON
+M: "Michael S. Tsirkin" <mst@redhat.com>
+M: David Hildenbrand <david@redhat.com>
+L: virtualization@lists.linux-foundation.org
+S: Maintained
+F: drivers/virtio/virtio_balloon.c
+F: include/uapi/linux/virtio_balloon.h
+F: include/linux/balloon_compaction.h
+F: mm/balloon_compaction.c
+
VIRTIO CRYPTO DRIVER
M: Gonglei <arei.gonglei@huawei.com>
L: virtualization@lists.linux-foundation.org
@@ -18090,6 +18216,13 @@ S: Maintained
F: drivers/iommu/virtio-iommu.c
F: include/uapi/linux/virtio_iommu.h
+VIRTIO MEM DRIVER
+M: David Hildenbrand <david@redhat.com>
+L: virtualization@lists.linux-foundation.org
+S: Maintained
+F: drivers/virtio/virtio_mem.c
+F: include/uapi/linux/virtio_mem.h
+
VIRTUAL BOX GUEST DEVICE DRIVER
M: Hans de Goede <hdegoede@redhat.com>
M: Arnd Bergmann <arnd@arndb.de>
@@ -18154,7 +18287,7 @@ S: Maintained
F: drivers/misc/vmw_balloon.c
VMWARE HYPERVISOR INTERFACE
-M: Thomas Hellstrom <thellstrom@vmware.com>
+M: Deep Shah <sdeep@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>
L: virtualization@lists.linux-foundation.org
S: Supported
@@ -18387,11 +18520,11 @@ L: patches@opensource.cirrus.com
S: Supported
W: https://github.com/CirrusLogic/linux-drivers/wiki
T: git https://github.com/CirrusLogic/linux-drivers.git
-F: Documentation/devicetree/bindings/extcon/extcon-arizona.txt
-F: Documentation/devicetree/bindings/mfd/arizona.txt
+F: Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
+F: Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
F: Documentation/devicetree/bindings/mfd/wm831x.txt
-F: Documentation/devicetree/bindings/regulator/arizona-regulator.txt
-F: Documentation/devicetree/bindings/sound/wlf,arizona.txt
+F: Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
+F: Documentation/devicetree/bindings/sound/wlf,arizona.yaml
F: Documentation/hwmon/wm83??.rst
F: arch/arm/mach-s3c64xx/mach-crag6410*
F: drivers/clk/clk-wm83*.c
diff --git a/Makefile b/Makefile
index a7bc91cbac8f..f0c1a3a8d9e4 100644
--- a/Makefile
+++ b/Makefile
@@ -11,9 +11,12 @@ NAME = Kleptomaniac Octopus
# Comments in this file are targeted only to the developer, do not
# expect to learn how to build the kernel reading this file.
+$(if $(filter __%, $(MAKECMDGOALS)), \
+ $(error targets prefixed with '__' are only for internal use))
+
# That's our default target when none is given on the command line
-PHONY := _all
-_all:
+PHONY := __all
+__all:
# We are using a recursive build, so we need to do a little thinking
# to get the ordering right.
@@ -157,12 +160,14 @@ MAKEFLAGS += --include-dir=$(abs_srctree)
need-sub-make := 1
endif
+this-makefile := $(lastword $(MAKEFILE_LIST))
+
ifneq ($(filter 3.%,$(MAKE_VERSION)),)
# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
# We need to invoke sub-make to avoid implicit rules in the top Makefile.
need-sub-make := 1
# Cancel implicit rules for this Makefile.
-$(lastword $(MAKEFILE_LIST)): ;
+$(this-makefile): ;
endif
export abs_srctree abs_objtree
@@ -170,13 +175,13 @@ export sub_make_done := 1
ifeq ($(need-sub-make),1)
-PHONY += $(MAKECMDGOALS) sub-make
+PHONY += $(MAKECMDGOALS) __sub-make
-$(filter-out _all sub-make $(lastword $(MAKEFILE_LIST)), $(MAKECMDGOALS)) _all: sub-make
+$(filter-out $(this-makefile), $(MAKECMDGOALS)) __all: __sub-make
@:
# Invoke a second make in the output directory, passing relevant variables
-sub-make:
+__sub-make:
$(Q)$(MAKE) -C $(abs_objtree) -f $(abs_srctree)/Makefile $(MAKECMDGOALS)
endif # need-sub-make
@@ -213,6 +218,9 @@ ifeq ("$(origin M)", "command line")
KBUILD_EXTMOD := $(M)
endif
+$(if $(word 2, $(KBUILD_EXTMOD)), \
+ $(error building multiple external modules is not supported))
+
export KBUILD_CHECKSRC KBUILD_EXTMOD
extmod-prefix = $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)
@@ -321,7 +329,7 @@ ifdef mixed-build
PHONY += $(MAKECMDGOALS) __build_one_by_one
-$(filter-out __build_one_by_one, $(MAKECMDGOALS)): __build_one_by_one
+$(MAKECMDGOALS): __build_one_by_one
@:
__build_one_by_one:
@@ -406,9 +414,12 @@ else
HOSTCC = gcc
HOSTCXX = g++
endif
-KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \
- -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \
- $(HOSTCFLAGS)
+
+export KBUILD_USERCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
+ -O2 -fomit-frame-pointer -std=gnu89
+export KBUILD_USERLDFLAGS :=
+
+KBUILD_HOSTCFLAGS := $(KBUILD_USERCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS)
KBUILD_HOSTCXXFLAGS := -Wall -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS)
KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
@@ -447,6 +458,26 @@ PYTHON = python
PYTHON3 = python3
CHECK = sparse
BASH = bash
+GZIP = gzip
+BZIP2 = bzip2
+LZOP = lzop
+LZMA = lzma
+LZ4 = lz4c
+XZ = xz
+
+# GZIP, BZIP2, LZOP env vars are used by the tools. Support them as the command
+# line interface, but use _GZIP, _BZIP2, _LZOP internally.
+_GZIP := $(GZIP)
+_BZIP2 := $(BZIP2)
+_LZOP := $(LZOP)
+
+# Reset GZIP, BZIP2, LZOP in this Makefile
+override GZIP=
+override BZIP2=
+override LZOP=
+
+# Reset GZIP, BZIP2, LZOP in recursive invocations
+MAKEOVERRIDES += GZIP= BZIP2= LZOP=
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
@@ -495,11 +526,12 @@ CLANG_FLAGS :=
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL
export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
+export _GZIP _BZIP2 _LZOP LZMA LZ4 XZ
export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
-export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
+export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN CFLAGS_KCSAN
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -593,12 +625,12 @@ else #!config-build
# targets and others. In general all targets except *config targets.
# If building an external module we do not care about the all: rule
-# but instead _all depend on modules
+# but instead __all depend on modules
PHONY += all
ifeq ($(KBUILD_EXTMOD),)
-_all: all
+__all: all
else
-_all: modules
+__all: modules
endif
# Decide whether to build built-in, modular, or both.
@@ -608,19 +640,15 @@ KBUILD_MODULES :=
KBUILD_BUILTIN := 1
# If we have only "make modules", don't compile built-in objects.
-# When we're building modules with modversions, we need to consider
-# the built-in objects during the descend as well, in order to
-# make sure the checksums are up to date before we record them.
-
ifeq ($(MAKECMDGOALS),modules)
- KBUILD_BUILTIN := $(if $(CONFIG_MODVERSIONS),1)
+ KBUILD_BUILTIN :=
endif
# If we have "make <whatever> modules", compile modules
# in addition to whatever we do anyway.
# Just "make" or "make all" shall build modules as well
-ifneq ($(filter all _all modules nsdeps,$(MAKECMDGOALS)),)
+ifneq ($(filter all modules nsdeps,$(MAKECMDGOALS)),)
KBUILD_MODULES := 1
endif
@@ -636,13 +664,11 @@ endif
ifeq ($(KBUILD_EXTMOD),)
# Objects we will link into vmlinux / subdirs we need to visit
-init-y := init/
+core-y := init/ usr/
drivers-y := drivers/ sound/
drivers-$(CONFIG_SAMPLES) += samples/
-net-y := net/
+drivers-y += net/ virt/
libs-y := lib/
-core-y := usr/
-virt-y := virt/
endif # KBUILD_EXTMOD
# The all: target is the default when no target is given on the
@@ -695,7 +721,7 @@ $(KCONFIG_CONFIG):
# This exploits the 'multi-target pattern rule' trick.
# The syncconfig should be executed only once to make all the targets.
# (Note: use the grouped target '&:' when we bump to GNU Make 4.3)
-%/auto.conf %/auto.conf.cmd: $(KCONFIG_CONFIG)
+%/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h: $(KCONFIG_CONFIG)
$(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
else # !may-sync-config
# External modules and some install targets need include/generated/autoconf.h
@@ -814,6 +840,12 @@ DEBUG_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
$(call cc-option,-fno-var-tracking)
endif
+ifdef CONFIG_DEBUG_INFO_COMPRESSED
+DEBUG_CFLAGS += -gz=zlib
+KBUILD_AFLAGS += -Wa,--compress-debug-sections=zlib
+KBUILD_LDFLAGS += --compress-debug-sections=zlib
+endif
+
KBUILD_CFLAGS += $(DEBUG_CFLAGS)
export DEBUG_CFLAGS
@@ -933,6 +965,7 @@ endif
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan
+include scripts/Makefile.kcsan
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
KBUILD_CPPFLAGS += $(KCPPFLAGS)
@@ -950,6 +983,10 @@ ifeq ($(CONFIG_RELR),y)
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
endif
+# Align the bit size of userspace programs with the kernel
+KBUILD_USERCFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
+KBUILD_USERLDFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
+
# make the checker run with the right architecture
CHECKFLAGS += --arch=$(ARCH)
@@ -1011,10 +1048,10 @@ export mod_strip_cmd
mod_compress_cmd = true
ifdef CONFIG_MODULE_COMPRESS
ifdef CONFIG_MODULE_COMPRESS_GZIP
- mod_compress_cmd = gzip -n -f
+ mod_compress_cmd = $(_GZIP) -n -f
endif # CONFIG_MODULE_COMPRESS_GZIP
ifdef CONFIG_MODULE_COMPRESS_XZ
- mod_compress_cmd = xz -f
+ mod_compress_cmd = $(XZ) -f
endif # CONFIG_MODULE_COMPRESS_XZ
endif # CONFIG_MODULE_COMPRESS
export mod_compress_cmd
@@ -1049,34 +1086,33 @@ export MODULES_NSDEPS := $(extmod-prefix)modules.nsdeps
ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
-vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+vmlinux-dirs := $(patsubst %/,%,$(filter %/, \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
- $(net-y) $(net-m) $(libs-y) $(libs-m) $(virt-y)))
+ $(libs-y) $(libs-m)))
vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \
- $(patsubst %/,%,$(filter %/, $(init-) $(core-) \
- $(drivers-) $(net-) $(libs-) $(virt-))))
+ $(patsubst %/,%,$(filter %/, $(core-) \
+ $(drivers-) $(libs-))))
+
+subdir-modorder := $(addsuffix modules.order,$(filter %/, \
+ $(core-y) $(core-m) $(libs-y) $(libs-m) \
+ $(drivers-y) $(drivers-m)))
build-dirs := $(vmlinux-dirs)
clean-dirs := $(vmlinux-alldirs)
-init-y := $(patsubst %/, %/built-in.a, $(init-y))
-core-y := $(patsubst %/, %/built-in.a, $(core-y))
-drivers-y := $(patsubst %/, %/built-in.a, $(drivers-y))
-net-y := $(patsubst %/, %/built-in.a, $(net-y))
-libs-y2 := $(patsubst %/, %/built-in.a, $(filter %/, $(libs-y)))
+# Externally visible symbols (used by link-vmlinux.sh)
+KBUILD_VMLINUX_OBJS := $(head-y) $(patsubst %/,%/built-in.a, $(core-y))
+KBUILD_VMLINUX_OBJS += $(addsuffix built-in.a, $(filter %/, $(libs-y)))
ifdef CONFIG_MODULES
-libs-y1 := $(filter-out %/, $(libs-y))
-libs-y2 += $(patsubst %/, %/lib.a, $(filter %/, $(libs-y)))
+KBUILD_VMLINUX_OBJS += $(patsubst %/, %/lib.a, $(filter %/, $(libs-y)))
+KBUILD_VMLINUX_LIBS := $(filter-out %/, $(libs-y))
else
-libs-y1 := $(patsubst %/, %/lib.a, $(libs-y))
+KBUILD_VMLINUX_LIBS := $(patsubst %/,%/lib.a, $(libs-y))
endif
-virt-y := $(patsubst %/, %/built-in.a, $(virt-y))
+KBUILD_VMLINUX_OBJS += $(patsubst %/,%/built-in.a, $(drivers-y))
-# Externally visible symbols (used by link-vmlinux.sh)
-export KBUILD_VMLINUX_OBJS := $(head-y) $(init-y) $(core-y) $(libs-y2) \
- $(drivers-y) $(net-y) $(virt-y)
-export KBUILD_VMLINUX_LIBS := $(libs-y1)
+export KBUILD_VMLINUX_OBJS KBUILD_VMLINUX_LIBS
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
# used by scripts/Makefile.package
@@ -1087,16 +1123,14 @@ vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)
# Recurse until adjust_autoksyms.sh is satisfied
PHONY += autoksyms_recursive
ifdef CONFIG_TRIM_UNUSED_KSYMS
-autoksyms_recursive: descend modules.order
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
- "$(MAKE) -f $(srctree)/Makefile vmlinux"
-endif
-
# For the kernel to actually contain only the needed exported symbols,
# we have to build modules as well to determine what those symbols are.
# (this can be evaluated only once include/config/auto.conf has been included)
-ifdef CONFIG_TRIM_UNUSED_KSYMS
- KBUILD_MODULES := 1
+KBUILD_MODULES := 1
+
+autoksyms_recursive: descend modules.order
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
+ "$(MAKE) -f $(srctree)/Makefile vmlinux"
endif
autoksyms_h := $(if $(CONFIG_TRIM_UNUSED_KSYMS), include/generated/autoksyms.h)
@@ -1122,7 +1156,7 @@ targets := vmlinux
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
-$(sort $(vmlinux-deps)): descend ;
+$(sort $(vmlinux-deps) $(subdir-modorder)): descend ;
filechk_kernel.release = \
echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
@@ -1147,7 +1181,8 @@ scripts: scripts_basic scripts_dtc
PHONY += prepare archprepare
archprepare: outputmakefile archheaders archscripts scripts include/config/kernel.release \
- asm-generic $(version_h) $(autoksyms_h) include/generated/utsrelease.h
+ asm-generic $(version_h) $(autoksyms_h) include/generated/utsrelease.h \
+ include/generated/autoconf.h
prepare0: archprepare
$(Q)$(MAKE) $(build)=scripts/mod
@@ -1315,12 +1350,29 @@ dt_binding_check: scripts_dtc
# ---------------------------------------------------------------------------
# Modules
+# install modules.builtin regardless of CONFIG_MODULES
+PHONY += _builtin_inst_
+_builtin_inst_:
+ @mkdir -p $(MODLIB)/
+ @cp -f modules.builtin $(MODLIB)/
+ @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/
+
+PHONY += install
+install: _builtin_inst_
+
ifdef CONFIG_MODULES
# By default, build modules as well
all: modules
+# When we're building modules with modversions, we need to consider
+# the built-in objects during the descend as well, in order to
+# make sure the checksums are up to date before we record them.
+ifdef CONFIG_MODVERSIONS
+ KBUILD_BUILTIN := 1
+endif
+
# Build modules
#
# A module can be listed more than once in obj-m resulting in
@@ -1328,12 +1380,19 @@ all: modules
# using awk while concatenating to the final file.
PHONY += modules
-modules: $(if $(KBUILD_BUILTIN),vmlinux) modules.order
+modules: $(if $(KBUILD_BUILTIN),vmlinux) modules_check
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/modules-check.sh
-modules.order: descend
- $(Q)$(AWK) '!x[$$0]++' $(addsuffix /$@, $(build-dirs)) > $@
+PHONY += modules_check
+modules_check: modules.order
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/modules-check.sh $<
+
+cmd_modules_order = $(AWK) '!x[$$0]++' $(real-prereqs) > $@
+
+modules.order: $(subdir-modorder) FORCE
+ $(call if_changed,modules_order)
+
+targets += modules.order
# Target to prepare building external modules
PHONY += modules_prepare
@@ -1344,7 +1403,7 @@ PHONY += modules_install
modules_install: _modinst_ _modinst_post
PHONY += _modinst_
-_modinst_:
+_modinst_: _builtin_inst_
@rm -rf $(MODLIB)/kernel
@rm -f $(MODLIB)/source
@mkdir -p $(MODLIB)/kernel
@@ -1354,8 +1413,6 @@ _modinst_:
ln -s $(CURDIR) $(MODLIB)/build ; \
fi
@sed 's:^:kernel/:' modules.order > $(MODLIB)/modules.order
- @cp -f modules.builtin $(MODLIB)/
- @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
# This depmod is only for convenience to give the initial
@@ -1395,14 +1452,14 @@ endif # CONFIG_MODULES
# make distclean Remove editor backup files, patch leftover files and the like
# Directories & files removed with 'make clean'
-CLEAN_DIRS += include/ksym
-CLEAN_FILES += modules.builtin modules.builtin.modinfo modules.nsdeps
+CLEAN_FILES += include/ksym vmlinux.symvers \
+ modules.builtin modules.builtin.modinfo modules.nsdeps
# Directories & files removed with 'make mrproper'
-MRPROPER_DIRS += include/config include/generated \
+MRPROPER_FILES += include/config include/generated \
arch/$(SRCARCH)/include/generated .tmp_objdiff \
- debian/ snap/ tar-install/
-MRPROPER_FILES += .config .config.old .version \
+ debian snap tar-install \
+ .config .config.old .version \
Module.symvers \
signing_key.pem signing_key.priv signing_key.x509 \
x509.genkey extra_certificates signing_key.x509.keyid \
@@ -1410,12 +1467,10 @@ MRPROPER_FILES += .config .config.old .version \
*.spec
# Directories & files removed with 'make distclean'
-DISTCLEAN_DIRS +=
DISTCLEAN_FILES += tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
# clean - Delete most, but leave enough to build external modules
#
-clean: rm-dirs := $(CLEAN_DIRS)
clean: rm-files := $(CLEAN_FILES)
PHONY += archclean vmlinuxclean
@@ -1428,7 +1483,6 @@ clean: archclean vmlinuxclean
# mrproper - Delete all generated files, including .config
#
-mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS))
mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
mrproper-dirs := $(addprefix _mrproper_,scripts)
@@ -1437,18 +1491,15 @@ $(mrproper-dirs):
$(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
mrproper: clean $(mrproper-dirs)
- $(call cmd,rmdirs)
$(call cmd,rmfiles)
# distclean
#
-distclean: rm-dirs := $(wildcard $(DISTCLEAN_DIRS))
distclean: rm-files := $(wildcard $(DISTCLEAN_FILES))
PHONY += distclean
distclean: mrproper
- $(call cmd,rmdirs)
$(call cmd,rmfiles)
@find $(srctree) $(RCS_FIND_IGNORE) \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
@@ -1634,17 +1685,9 @@ else # KBUILD_EXTMOD
# We are always building modules
KBUILD_MODULES := 1
-PHONY += $(objtree)/Module.symvers
-$(objtree)/Module.symvers:
- @test -e $(objtree)/Module.symvers || ( \
- echo; \
- echo " WARNING: Symbol version dump $(objtree)/Module.symvers"; \
- echo " is missing; modules will have no dependencies and modversions."; \
- echo )
-
build-dirs := $(KBUILD_EXTMOD)
PHONY += modules
-modules: descend $(objtree)/Module.symvers
+modules: descend
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
PHONY += modules_install
@@ -1663,10 +1706,6 @@ _emodinst_post: _emodinst_
clean-dirs := $(KBUILD_EXTMOD)
clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers $(KBUILD_EXTMOD)/modules.nsdeps
-PHONY += /
-/:
- @echo >&2 '"$(MAKE) /" is no longer supported. Please use "$(MAKE) ./" instead.'
-
PHONY += help
help:
@echo ' Building external modules.'
@@ -1724,6 +1763,10 @@ build-dirs := $(foreach d, $(build-dirs), \
endif
+ifndef CONFIG_MODULES
+KBUILD_MODULES :=
+endif
+
# Handle descending into subdirectories listed in $(build-dirs)
# Preset locale variables to speed up the build process. Limit locale
# tweaks to this spot to avoid wrong language settings when running
@@ -1742,7 +1785,6 @@ $(clean-dirs):
$(Q)$(MAKE) $(clean)=$(patsubst _clean_%,%,$@)
clean: $(clean-dirs)
- $(call cmd,rmdirs)
$(call cmd,rmfiles)
@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
\( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
@@ -1837,14 +1879,8 @@ tools/%: FORCE
$(Q)mkdir -p $(objtree)/tools
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
-# FIXME Should go into a make.lib or something
-# ===========================================================================
-
-quiet_cmd_rmdirs = $(if $(wildcard $(rm-dirs)),CLEAN $(wildcard $(rm-dirs)))
- cmd_rmdirs = rm -rf $(rm-dirs)
-
quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files)))
- cmd_rmfiles = rm -f $(rm-files)
+ cmd_rmfiles = rm -rf $(rm-files)
# Run depmod only if we have System.map and depmod is executable
quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
diff --git a/arch/Kconfig b/arch/Kconfig
index 2e6f843d87c4..6d2ba653fe49 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -328,12 +328,6 @@ config HAVE_FUNCTION_ARG_ACCESS_API
the API needed to access function arguments from pt_regs,
declared in asm/ptrace.h
-config HAVE_CLK
- bool
- help
- The <linux/clk.h> calls support software clock gating and
- thus are a key power management tool on many systems.
-
config HAVE_HW_BREAKPOINT
bool
depends on PERF_EVENTS
diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c
index 95c0359f4858..00266e6e1b71 100644
--- a/arch/alpha/boot/bootp.c
+++ b/arch/alpha/boot/bootp.c
@@ -16,7 +16,6 @@
#include <asm/console.h>
#include <asm/hwrpb.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <stdarg.h>
diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c
index 99b8d7dc344b..43af71835adf 100644
--- a/arch/alpha/boot/bootpz.c
+++ b/arch/alpha/boot/bootpz.c
@@ -18,7 +18,6 @@
#include <asm/console.h>
#include <asm/hwrpb.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <stdarg.h>
diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c
index 8f5ed8610970..e5347a080008 100644
--- a/arch/alpha/boot/main.c
+++ b/arch/alpha/boot/main.c
@@ -14,7 +14,6 @@
#include <asm/console.h>
#include <asm/hwrpb.h>
-#include <asm/pgtable.h>
#include <stdarg.h>
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
index 89128489cb59..9945ff483eaf 100644
--- a/arch/alpha/include/asm/cacheflush.h
+++ b/arch/alpha/include/asm/cacheflush.h
@@ -4,19 +4,6 @@
#include <linux/mm.h>
-/* Caches aren't brain-dead on the Alpha. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
/* Note that the following two definitions are _highly_ dependent
on the contexts in which they are used in the kernel. I personally
think it is criminal how loosely defined these macros are. */
@@ -48,7 +35,7 @@ extern void smp_imb(void);
extern void __load_new_mm_context(struct mm_struct *);
static inline void
-flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
if (vma->vm_flags & VM_EXEC) {
@@ -59,20 +46,17 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
mm->context[smp_processor_id()] = 0;
}
}
-#else
-extern void flush_icache_user_range(struct vm_area_struct *vma,
+#define flush_icache_user_page flush_icache_user_page
+#else /* CONFIG_SMP */
+extern void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
-#endif
+#define flush_icache_user_page flush_icache_user_page
+#endif /* CONFIG_SMP */
/* This is used only in __do_fault and do_swap_page. */
#define flush_icache_page(vma, page) \
- flush_icache_user_range((vma), (page), 0, 0)
+ flush_icache_user_page((vma), (page), 0, 0)
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#endif /* _ALPHA_CACHEFLUSH_H */
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index d1ed5a8133c5..13bea465f1c0 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/compiler.h>
-#include <asm/pgtable.h>
#include <asm/machvec.h>
#include <asm/hwrpb.h>
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 0267aa8a4f86..162c17b2631f 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -276,15 +276,6 @@ extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
-#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
-
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
/*
* The smp_read_barrier_depends() in the following functions are required to
* order the load of *dir (the pointer in the top level page table) with any
@@ -305,6 +296,7 @@ extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
smp_read_barrier_depends(); /* see above */
return ret;
}
+#define pmd_offset pmd_offset
/* Find an entry in the third-level page table.. */
extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
@@ -314,9 +306,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
smp_read_barrier_depends(); /* see above */
return ret;
}
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
-#define pte_unmap(pte) do { } while (0)
+#define pte_offset_kernel pte_offset_kernel
extern pgd_t swapper_pg_dir[1024];
@@ -355,8 +345,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
extern void paging_init(void);
-#include <asm-generic/pgtable.h>
-
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/arch/alpha/kernel/binfmt_loader.c b/arch/alpha/kernel/binfmt_loader.c
index a8d0d6e06526..e4be7a543ecf 100644
--- a/arch/alpha/kernel/binfmt_loader.c
+++ b/arch/alpha/kernel/binfmt_loader.c
@@ -19,10 +19,6 @@ static int load_binary(struct linux_binprm *bprm)
if (bprm->loader)
return -ENOEXEC;
- allow_write_access(bprm->file);
- fput(bprm->file);
- bprm->file = NULL;
-
loader = bprm->vma->vm_end - sizeof(void *);
file = open_exec("/sbin/loader");
@@ -33,12 +29,9 @@ static int load_binary(struct linux_binprm *bprm)
/* Remember if the application is TASO. */
bprm->taso = eh->ah.entry < 0x100000000UL;
- bprm->file = file;
+ bprm->interpreter = file;
bprm->loader = loader;
- retval = prepare_binprm(bprm);
- if (retval < 0)
- return retval;
- return search_binary_handler(bprm);
+ return 0;
}
static struct linux_binfmt loader_format = {
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 48b81d015d8a..b45f0b0d6511 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -37,7 +37,6 @@
#include <asm/reg.h>
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/hwrpb.h>
#include <asm/fpu.h>
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index f1fce942fddc..701a05090141 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -2,8 +2,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <asm/pgtable.h>
-
/* Prototypes of functions used across modules here in this directory. */
#define vucp volatile unsigned char *
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index cb8d599e72d6..8c43212ae38e 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -19,7 +19,6 @@
#include <linux/audit.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/fpu.h>
#include "proto.h"
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index f19aa577354b..53520f8cb904 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -55,7 +55,6 @@ static struct notifier_block alpha_panic_block = {
};
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/hwrpb.h>
#include <asm/dma.h>
#include <asm/mmu_context.h>
@@ -430,6 +429,20 @@ register_cpus(void)
arch_initcall(register_cpus);
+#ifdef CONFIG_MAGIC_SYSRQ
+static void sysrq_reboot_handler(int unused)
+{
+ machine_halt();
+}
+
+static const struct sysrq_key_op srm_sysrq_reboot_op = {
+ .handler = sysrq_reboot_handler,
+ .help_msg = "reboot(b)",
+ .action_msg = "Resetting",
+ .enable_mask = SYSRQ_ENABLE_BOOT,
+};
+#endif
+
void __init
setup_arch(char **cmdline_p)
{
@@ -550,8 +563,8 @@ setup_arch(char **cmdline_p)
/* If we're using SRM, make sysrq-b halt back to the prom,
not auto-reboot. */
if (alpha_using_srm) {
- struct sysrq_key_op *op = __sysrq_get_key_op('b');
- op->handler = (void *) machine_halt;
+ unregister_sysrq_key('b', __sysrq_reboot_op);
+ register_sysrq_key('b', &srm_sysrq_reboot_op);
}
#endif
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 5f90df30be20..631cc17410d1 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -36,7 +36,6 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
@@ -740,7 +739,7 @@ ipi_flush_icache_page(void *x)
}
void
-flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
struct mm_struct *mm = vma->vm_mm;
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index ce5430056f65..e063b3857b3d 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -23,7 +23,6 @@
#include <asm/dma.h>
#include <asm/mmu_context.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 0aa6a27d0e2f..47459b73cdb7 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -23,7 +23,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/core_lca.h>
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index d33508621820..9fb445d7dca5 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -26,7 +26,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_tsunami.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index 1cdfe55fb987..3c43fd347526 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -22,7 +22,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_lca.h>
#include <asm/hwrpb.h>
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 016f79251141..bf99dcfd40c4 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -23,7 +23,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_tsunami.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index d0d44f543d77..0a2ab6cb18db 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -25,7 +25,6 @@
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "proto.h"
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 533899a4a1a1..83d6c53d6d4d 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -18,7 +18,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_marvel.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
index 702292af2225..e1bee8f84c58 100644
--- a/arch/alpha/kernel/sys_miata.c
+++ b/arch/alpha/kernel/sys_miata.c
@@ -22,7 +22,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index 3af4f94113e1..7690dfd57cb6 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -23,7 +23,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 32850e45834b..53adf43dcd44 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -40,7 +40,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_irongate.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index b106f327f765..47f3ce4f719a 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -24,7 +24,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index b76f65d0e8b5..b5846ffdadce 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -21,7 +21,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_mcpcia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index d33074011960..4b1c8d85c4f0 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -23,7 +23,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index 4d85eaeb44aa..94046f9aea08 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -22,7 +22,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_polaris.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 3cf0d32da5d8..930005b2f630 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -21,7 +21,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_t2.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index a6bdc1da47ad..7c420d8dac53 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -25,7 +25,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_lca.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c
index 17cc203176c8..dd9de84b630c 100644
--- a/arch/alpha/kernel/sys_sx164.c
+++ b/arch/alpha/kernel/sys_sx164.c
@@ -22,7 +22,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index e230c6864088..9e2adb69bc74 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -21,7 +21,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index c8390d8de140..b1f3b4fcf99b 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -26,7 +26,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_titan.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 2191bde161fd..2c54d707142a 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -20,7 +20,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_wildfire.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index f6b9664ac504..49754e07e04f 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -121,10 +121,10 @@ dik_show_code(unsigned int *pc)
}
static void
-dik_show_trace(unsigned long *sp)
+dik_show_trace(unsigned long *sp, const char *loglvl)
{
long i = 0;
- printk("Trace:\n");
+ printk("%sTrace:\n", loglvl);
while (0x1ff8 & (unsigned long) sp) {
extern char _stext[], _etext[];
unsigned long tmp = *sp;
@@ -133,24 +133,24 @@ dik_show_trace(unsigned long *sp)
continue;
if (tmp >= (unsigned long) &_etext)
continue;
- printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
+ printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp);
if (i > 40) {
- printk(" ...");
+ printk("%s ...", loglvl);
break;
}
}
- printk("\n");
+ printk("%s\n", loglvl);
}
static int kstack_depth_to_print = 24;
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
unsigned long *stack;
int i;
/*
- * debugging aid: "show_stack(NULL);" prints the
+ * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
* back trace for this cpu.
*/
if(sp==NULL)
@@ -163,14 +163,14 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if ((i % 4) == 0) {
if (i)
pr_cont("\n");
- printk(" ");
+ printk("%s ", loglvl);
} else {
pr_cont(" ");
}
pr_cont("%016lx", *stack++);
}
pr_cont("\n");
- dik_show_trace(sp);
+ dik_show_trace(sp, loglvl);
}
void
@@ -184,7 +184,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
dik_show_regs(regs, r9_15);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- dik_show_trace((unsigned long *)(regs+1));
+ dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
dik_show_code((unsigned int *)regs->pc);
if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
@@ -625,7 +625,7 @@ got_exception:
printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
dik_show_code((unsigned int *)pc);
- dik_show_trace((unsigned long *)(regs+1));
+ dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
printk("die_if_kernel recursion detected.\n");
@@ -957,12 +957,12 @@ give_sigsegv:
si_code = SEGV_ACCERR;
else {
struct mm_struct *mm = current->mm;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (find_vma(mm, (unsigned long)va))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
send_sig_fault(SIGSEGV, si_code, va, 0, current);
return;
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index c2d7b6d7bac7..c2303a8c2b9f 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -117,7 +117,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -171,7 +171,7 @@ retry:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -180,14 +180,14 @@ retry:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/* Something tried to access memory that isn't in our memory map.
Fix it, but check if it's kernel or user first. */
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (user_mode(regs))
goto do_sigsegv;
@@ -211,14 +211,14 @@ retry:
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Send a sigbus, regardless of whether we were in kernel
or user mode. */
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0);
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 667cd21393b5..3c42b3147fd6 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -24,7 +24,6 @@
#include <linux/gfp.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/hwrpb.h>
#include <asm/dma.h>
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index ff306246d0f8..471ef22216c4 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -6,6 +6,7 @@
config ARC
def_bool y
select ARC_TIMERS
+ select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SETUP_DMA_OPS
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
index 0be19fd1a412..4c453ba96c51 100644
--- a/arch/arc/include/asm/bug.h
+++ b/arch/arc/include/asm/bug.h
@@ -13,7 +13,8 @@
struct task_struct;
void show_regs(struct pt_regs *regs);
-void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
+ const char *loglvl);
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
unsigned long address);
void die(const char *str, struct pt_regs *regs, unsigned long address);
diff --git a/arch/arc/include/asm/highmem.h b/arch/arc/include/asm/highmem.h
index 1af00accb37f..6e5eafb3afdd 100644
--- a/arch/arc/include/asm/highmem.h
+++ b/arch/arc/include/asm/highmem.h
@@ -25,17 +25,8 @@
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
-#define kmap_prot PAGE_KERNEL
-
-
#include <asm/cacheflush.h>
-extern void *kmap(struct page *page);
-extern void *kmap_high(struct page *page);
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
-extern void kunmap_high(struct page *page);
-
extern void kmap_init(void);
static inline void flush_cache_kmaps(void)
@@ -43,15 +34,6 @@ static inline void flush_cache_kmaps(void)
flush_cache_all();
}
-static inline void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-
-
#endif
#endif
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 12be7e1b7cc0..f1ed17edb085 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -248,9 +248,6 @@
extern char empty_zero_page[PAGE_SIZE];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
@@ -282,18 +279,6 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
-#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-/*
- * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
- * and returns ptr to PTE entry corresponding to @addr
- */
-#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
- __pte_index(addr))
-
-/* No mapping of Page Tables in high mem etc, so following same as above */
-#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
-#define pte_offset_map(dir, addr) pte_offset(dir, addr)
/* Zoo of pte_xxx function */
#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
@@ -332,13 +317,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
}
/*
- * All kernel related VM pages are in init's mm.
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
-#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
-
-/*
* Macro to quickly access the PGD entry, utlising the fact that some
* arch may cache the pointer to Page Directory of "current" task
* in a MMU register
@@ -390,8 +368,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#include <asm/hugepage.h>
#endif
-#include <asm-generic/pgtable.h>
-
/* to cope with aliasing VIPT cache */
#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 315528f04bc1..8c8e5172fecd 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -90,10 +90,10 @@ fault:
if (unlikely(ret != -EFAULT))
goto fail;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
FAULT_FLAG_WRITE, NULL);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (likely(!ret))
goto again;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index 1e440bbfa876..feba91c9d969 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -158,9 +158,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
/* Call-back which plugs into unwinding core to dump the stack in
* case of panic/OOPs/BUG etc
*/
-static int __print_sym(unsigned int address, void *unused)
+static int __print_sym(unsigned int address, void *arg)
{
- printk(" %pS\n", (void *)address);
+ const char *loglvl = arg;
+
+ printk("%s %pS\n", loglvl, (void *)address);
return 0;
}
@@ -217,17 +219,18 @@ static int __get_first_nonsched(unsigned int address, void *unused)
*-------------------------------------------------------------------------
*/
-noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
+noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
+ const char *loglvl)
{
- pr_info("\nStack Trace:\n");
- arc_unwind_core(tsk, regs, __print_sym, NULL);
+ printk("%s\nStack Trace:\n", loglvl);
+ arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
}
EXPORT_SYMBOL(show_stacktrace);
/* Expected by sched Code */
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
- show_stacktrace(tsk, NULL);
+ show_stacktrace(tsk, NULL, loglvl);
}
/* Another API expected by schedular, shows up in "ps" as Wait Channel
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 3393558876a9..28e8bf04b253 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -89,7 +89,7 @@ static void show_faulting_vma(unsigned long address)
/* can't use print_vma_addr() yet as it doesn't check for
* non-inclusive vma
*/
- down_read(&active_mm->mmap_sem);
+ mmap_read_lock(active_mm);
vma = find_vma(active_mm, address);
/* check against the find_vma( ) behaviour which returns the next VMA
@@ -111,7 +111,7 @@ static void show_faulting_vma(unsigned long address)
} else
pr_info(" @No matching VMA found\n");
- up_read(&active_mm->mmap_sem);
+ mmap_read_unlock(active_mm);
}
static void show_ecr_verbose(struct pt_regs *regs)
@@ -240,5 +240,5 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
/* Show stack trace if this Fatality happened in kernel mode */
if (!user_mode(regs))
- show_stacktrace(current, regs);
+ show_stacktrace(current, regs, KERN_DEFAULT);
}
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 92b339c7adba..72f5405a7ec5 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -107,7 +107,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
flags |= FAULT_FLAG_WRITE;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -141,7 +141,7 @@ retry:
}
/*
- * Fault retry nuances, mmap_sem already relinquished by core mm
+ * Fault retry nuances, mmap_lock already relinquished by core mm
*/
if (unlikely((fault & VM_FAULT_RETRY) &&
(flags & FAULT_FLAG_ALLOW_RETRY))) {
@@ -150,7 +150,7 @@ retry:
}
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Major/minor page fault accounting
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index fc8849e4f72e..1b9f473c6369 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -6,8 +6,8 @@
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/highmem.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -49,38 +49,23 @@
extern pte_t * pkmap_page_table;
static pte_t * fixmap_page_table;
-void *kmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return page_address(page);
-
- return kmap_high(page);
-}
-EXPORT_SYMBOL(kmap);
-
-void *kmap_atomic(struct page *page)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
int idx, cpu_idx;
unsigned long vaddr;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
cpu_idx = kmap_atomic_idx_push();
idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
vaddr = FIXMAP_ADDR(idx);
set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
- mk_pte(page, kmap_prot));
+ mk_pte(page, prot));
return (void *)vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kv)
+void kunmap_atomic_high(void *kv)
{
unsigned long kvaddr = (unsigned long)kv;
@@ -102,25 +87,14 @@ void __kunmap_atomic(void *kv)
kmap_atomic_idx_pop();
}
-
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
{
- pgd_t *pgd_k;
- p4d_t *p4d_k;
- pud_t *pud_k;
- pmd_t *pmd_k;
+ pmd_t *pmd_k = pmd_off_k(kvaddr);
pte_t *pte_k;
- pgd_k = pgd_offset_k(kvaddr);
- p4d_k = p4d_offset(pgd_k, kvaddr);
- pud_k = pud_offset(p4d_k, kvaddr);
- pmd_k = pmd_offset(pud_k, kvaddr);
-
pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!pte_k)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 2efaf6ca0c06..31f54bdd95f2 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -33,9 +33,9 @@
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/entry.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
#include <asm/processor.h>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 16fbf74030fe..bbac2867062d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -367,6 +367,7 @@ config ARCH_EP93XX
select CPU_ARM920T
select GENERIC_CLOCKEVENTS
select GPIOLIB
+ select HAVE_LEGACY_CLK
help
This enables support for the Cirrus EP93xx series of CPUs.
@@ -438,7 +439,6 @@ config ARCH_PXA
select ARM_CPU_SUSPEND if PM
select AUTO_ZRELADDR
select COMMON_CLK
- select CLKDEV_LOOKUP
select CLKSRC_PXA
select CLKSRC_MMIO
select TIMER_OF
@@ -477,7 +477,6 @@ config ARCH_SA1100
bool "SA1100-based"
select ARCH_MTD_XIP
select ARCH_SPARSEMEM_ENABLE
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select CLKSRC_PXA
select TIMER_OF if OF
@@ -498,7 +497,6 @@ config ARCH_SA1100
config ARCH_S3C24XX
bool "Samsung S3C24XX SoCs"
select ATAGS
- select CLKDEV_LOOKUP
select CLKSRC_SAMSUNG_PWM
select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
@@ -528,6 +526,7 @@ config ARCH_OMAP1
select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
+ select HAVE_LEGACY_CLK
select IRQ_DOMAIN
select NEED_MACH_IO_H if PCCARD
select NEED_MACH_MEMORY_H
@@ -702,6 +701,8 @@ source "arch/arm/mach-qcom/Kconfig"
source "arch/arm/mach-rda/Kconfig"
+source "arch/arm/mach-realtek/Kconfig"
+
source "arch/arm/mach-realview/Kconfig"
source "arch/arm/mach-rockchip/Kconfig"
@@ -739,7 +740,6 @@ source "arch/arm/mach-ux500/Kconfig"
source "arch/arm/mach-versatile/Kconfig"
source "arch/arm/mach-vexpress/Kconfig"
-source "arch/arm/plat-versatile/Kconfig"
source "arch/arm/mach-vt8500/Kconfig"
@@ -1249,7 +1249,6 @@ config HAVE_ARM_ARCH_TIMER
bool "Architected timer support"
depends on CPU_V7
select ARM_ARCH_TIMER
- select GENERIC_CLOCKEVENTS
help
This option enables support for the ARM architected timer
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f46e18a77645..26a158e35e2c 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -976,6 +976,13 @@ choice
Say Y here if you want kernel low-level debugging support
via SCIF4 on Renesas RZ/G1E (R8A7745).
+ config DEBUG_RCAR_GEN2_SCIFA2
+ bool "Kernel low-level debugging messages via SCIFA2 on R8A7742"
+ depends on ARCH_R8A7742
+ help
+ Say Y here if you want kernel low-level debugging support
+ via SCIFA2 on Renesas RZ/G1H (R8A7742).
+
config DEBUG_RMOBILE_SCIFA0
bool "Kernel low-level debugging messages via SCIFA0 on R8A73A4"
depends on ARCH_R8A73A4
@@ -1577,6 +1584,7 @@ config DEBUG_LL_INCLUDE
default "debug/renesas-scif.S" if DEBUG_RCAR_GEN2_SCIF1
default "debug/renesas-scif.S" if DEBUG_RCAR_GEN2_SCIF2
default "debug/renesas-scif.S" if DEBUG_RCAR_GEN2_SCIF4
+ default "debug/renesas-scif.S" if DEBUG_RCAR_GEN2_SCIFA2
default "debug/renesas-scif.S" if DEBUG_RMOBILE_SCIFA0
default "debug/renesas-scif.S" if DEBUG_RMOBILE_SCIFA1
default "debug/renesas-scif.S" if DEBUG_RMOBILE_SCIFA4
@@ -1696,6 +1704,7 @@ config DEBUG_UART_PHYS
default 0xe4007000 if DEBUG_HIP04_UART
default 0xe6c40000 if DEBUG_RMOBILE_SCIFA0
default 0xe6c50000 if DEBUG_RMOBILE_SCIFA1
+ default 0xe6c60000 if DEBUG_RCAR_GEN2_SCIFA2
default 0xe6c80000 if DEBUG_RMOBILE_SCIFA4
default 0xe6e58000 if DEBUG_RCAR_GEN2_SCIF2
default 0xe6e60000 if DEBUG_RCAR_GEN2_SCIF0
@@ -1737,6 +1746,7 @@ config DEBUG_UART_PHYS
DEBUG_RCAR_GEN1_SCIF0 || DEBUG_RCAR_GEN1_SCIF2 || \
DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF1 || \
DEBUG_RCAR_GEN2_SCIF2 || DEBUG_RCAR_GEN2_SCIF4 || \
+ DEBUG_RCAR_GEN2_SCIFA2 || \
DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
DEBUG_S3C64XX_UART || \
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 7d5cd0f85461..59fde2d598d8 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -45,12 +45,10 @@ endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__ARMEB__
-AS += -EB
KBUILD_LDFLAGS += -EB
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__ARMEL__
-AS += -EL
KBUILD_LDFLAGS += -EL
endif
@@ -148,6 +146,8 @@ head-y := arch/arm/kernel/head$(MMUEXT).o
textofs-y := 0x00008000
# We don't want the htc bootloader to corrupt kernel during resume
textofs-$(CONFIG_PM_H1940) := 0x00108000
+# RTD1195 has Boot ROM at start of address space
+textofs-$(CONFIG_ARCH_REALTEK) := 0x00108000
# SA1111 DMA bug: we don't want the kernel to live in precious DMA-able memory
ifeq ($(CONFIG_ARCH_SA1100),y)
textofs-$(CONFIG_SA1111) := 0x00208000
@@ -208,6 +208,7 @@ machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell
machine-$(CONFIG_ARCH_PXA) += pxa
machine-$(CONFIG_ARCH_QCOM) += qcom
machine-$(CONFIG_ARCH_RDA) += rda
+machine-$(CONFIG_ARCH_REALTEK) += realtek
machine-$(CONFIG_ARCH_REALVIEW) += realview
machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip
machine-$(CONFIG_ARCH_RPC) += rpc
diff --git a/arch/arm/boot/deflate_xip_data.sh b/arch/arm/boot/deflate_xip_data.sh
index 40937248cebe..739f0464321e 100755
--- a/arch/arm/boot/deflate_xip_data.sh
+++ b/arch/arm/boot/deflate_xip_data.sh
@@ -56,7 +56,7 @@ trap 'rm -f "$XIPIMAGE.tmp"; exit 1' 1 2 3
# substitute the data section by a compressed version
$DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp"
$DD if="$XIPIMAGE" skip=$data_start iflag=skip_bytes |
-gzip -9 >> "$XIPIMAGE.tmp"
+$_GZIP -9 >> "$XIPIMAGE.tmp"
# replace kernel binary
mv -f "$XIPIMAGE.tmp" "$XIPIMAGE"
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index e8dd99201397..e6a1cac0bfc7 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -55,6 +55,7 @@ dtb-$(CONFIG_SOC_SAM_V7) += \
at91-nattis-2-natte-2.dtb \
at91-sama5d27_som1_ek.dtb \
at91-sama5d27_wlsom1_ek.dtb \
+ at91-sama5d2_icp.dtb \
at91-sama5d2_ptc_ek.dtb \
at91-sama5d2_xplained.dtb \
at91-sama5d3_xplained.dtb \
@@ -181,6 +182,7 @@ dtb-$(CONFIG_ARCH_EXYNOS3) += \
exynos3250-monk.dtb \
exynos3250-rinato.dtb
dtb-$(CONFIG_ARCH_EXYNOS4) += \
+ exynos4210-i9100.dtb \
exynos4210-origen.dtb \
exynos4210-smdkv310.dtb \
exynos4210-trats.dtb \
@@ -237,6 +239,7 @@ dtb-$(CONFIG_ARCH_HIX5HD2) += \
hisi-x5hd2-dkb.dtb
dtb-$(CONFIG_ARCH_INTEGRATOR) += \
integratorap.dtb \
+ integratorap-im-pd1.dtb \
integratorcp.dtb
dtb-$(CONFIG_ARCH_IXP4XX) += \
intel-ixp42x-linksys-nslu2.dtb \
@@ -279,6 +282,7 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += \
kirkwood-iomega_ix2_200.dtb \
kirkwood-is2.dtb \
kirkwood-km_kirkwood.dtb \
+ kirkwood-l-50.dtb \
kirkwood-laplug.dtb \
kirkwood-linkstation-lsqvl.dtb \
kirkwood-linkstation-lsvl.dtb \
@@ -412,6 +416,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-aristainetos2_4.dtb \
imx6dl-aristainetos2_7.dtb \
imx6dl-colibri-eval-v3.dtb \
+ imx6dl-colibri-v1_1-eval-v3.dtb \
imx6dl-cubox-i.dtb \
imx6dl-cubox-i-emmc-som-v15.dtb \
imx6dl-cubox-i-som-v15.dtb \
@@ -832,6 +837,7 @@ dtb-$(CONFIG_SOC_DRA7XX) += \
am57xx-beagle-x15.dtb \
am57xx-beagle-x15-revb1.dtb \
am57xx-beagle-x15-revc.dtb \
+ am5729-beagleboneai.dtb \
am57xx-cl-som-am57x.dtb \
am57xx-sbc-am57x.dtb \
am572x-idk.dtb \
@@ -896,6 +902,9 @@ dtb-$(CONFIG_ARCH_QCOM) += \
dtb-$(CONFIG_ARCH_RDA) += \
rda8810pl-orangepi-2g-iot.dtb \
rda8810pl-orangepi-i96.dtb
+dtb-$(CONFIG_ARCH_REALTEK) += \
+ rtd1195-horseradish.dtb \
+ rtd1195-mele-x1000.dtb
dtb-$(CONFIG_ARCH_REALVIEW) += \
arm-realview-pb1176.dtb \
arm-realview-pb11mp.dtb \
@@ -917,6 +926,7 @@ dtb-$(CONFIG_ARCH_RENESAS) += \
r7s9210-rza2mevb.dtb \
r8a73a4-ape6evm.dtb \
r8a7740-armadillo800eva.dtb \
+ r8a7742-iwg21d-q7.dtb \
r8a7743-iwg20d-q7.dtb \
r8a7743-iwg20d-q7-dbcm-ca.dtb \
r8a7743-sk-rzg1m.dtb \
@@ -1029,11 +1039,15 @@ dtb-$(CONFIG_ARCH_STM32) += \
stm32h743i-eval.dtb \
stm32h743i-disco.dtb \
stm32mp157a-avenger96.dtb \
+ stm32mp157a-dhcor-avenger96.dtb \
stm32mp157a-dk1.dtb \
+ stm32mp157a-iot-box.dtb \
+ stm32mp157a-stinger96.dtb \
stm32mp157c-dhcom-pdk2.dtb \
stm32mp157c-dk2.dtb \
stm32mp157c-ed1.dtb \
- stm32mp157c-ev1.dtb
+ stm32mp157c-ev1.dtb \
+ stm32mp157c-lxa-mc1.dtb
dtb-$(CONFIG_MACH_SUN4I) += \
sun4i-a10-a1000.dtb \
sun4i-a10-ba10-tvbox.dtb \
@@ -1110,6 +1124,7 @@ dtb-$(CONFIG_MACH_SUN7I) += \
sun7i-a20-olimex-som204-evb.dtb \
sun7i-a20-olimex-som204-evb-emmc.dtb \
sun7i-a20-olinuxino-lime.dtb \
+ sun7i-a20-olinuxino-lime-emmc.dtb \
sun7i-a20-olinuxino-lime2.dtb \
sun7i-a20-olinuxino-lime2-emmc.dtb \
sun7i-a20-olinuxino-micro.dtb \
@@ -1339,6 +1354,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-facebook-wedge40.dtb \
aspeed-bmc-facebook-wedge100.dtb \
aspeed-bmc-facebook-yamp.dtb \
+ aspeed-bmc-facebook-yosemitev2.dtb \
aspeed-bmc-ibm-rainier.dtb \
aspeed-bmc-intel-s2600wf.dtb \
aspeed-bmc-inspur-fp5280g2.dtb \
@@ -1347,6 +1363,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-microsoft-olympus.dtb \
aspeed-bmc-opp-lanyang.dtb \
aspeed-bmc-opp-mihawk.dtb \
+ aspeed-bmc-opp-nicole.dtb \
aspeed-bmc-opp-palmetto.dtb \
aspeed-bmc-opp-romulus.dtb \
aspeed-bmc-opp-swift.dtb \
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index 5ed7f3c58c0f..7ff11d6bf0f2 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -330,9 +330,8 @@
};
};
- target-module@31000 { /* 0x44e31000, ap 25 40.0 */
+ timer1_target: target-module@31000 { /* 0x44e31000, ap 25 40.0 */
compatible = "ti,sysc-omap2-timer", "ti,sysc";
- ti,hwmods = "timer1";
reg = <0x31000 0x4>,
<0x31010 0x4>,
<0x31014 0x4>;
@@ -1117,9 +1116,8 @@
};
};
- target-module@40000 { /* 0x48040000, ap 22 1e.0 */
+ timer2_target: target-module@40000 { /* 0x48040000, ap 22 1e.0 */
compatible = "ti,sysc-omap4-timer", "ti,sysc";
- ti,hwmods = "timer2";
reg = <0x40000 0x4>,
<0x40010 0x4>,
<0x40014 0x4>;
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index a35f5052d76f..3b177c9c4412 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -619,3 +619,23 @@
#reset-cells = <1>;
};
};
+
+/* Preferred always-on timer for clocksource */
+&timer1_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&timer1_fck>;
+ assigned-clock-parents = <&sys_clkin_ck>;
+ };
+};
+
+/* Preferred timer for clockevent */
+&timer2_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&timer2_fck>;
+ assigned-clock-parents = <&sys_clkin_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index e0b5a00e2078..dc8927f14b6c 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -169,5 +169,25 @@
status = "disabled";
};
-/include/ "am35xx-clocks.dtsi"
-/include/ "omap36xx-am35xx-omap3430es2plus-clocks.dtsi"
+#include "am35xx-clocks.dtsi"
+#include "omap36xx-am35xx-omap3430es2plus-clocks.dtsi"
+
+/* Preferred always-on timer for clocksource */
+&timer1_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&gpt1_fck>;
+ assigned-clock-parents = <&sys_ck>;
+ };
+};
+
+/* Preferred timer for clockevent */
+&timer2_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&gpt2_fck>;
+ assigned-clock-parents = <&sys_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index dba87bfaf33e..b4861f70f178 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -553,3 +553,23 @@
#reset-cells = <1>;
};
};
+
+/* Preferred always-on timer for clocksource */
+&timer1_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&timer1_fck>;
+ assigned-clock-parents = <&sys_clkin_ck>;
+ };
+};
+
+/* Preferred timer for clockevent */
+&timer2_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&timer2_fck>;
+ assigned-clock-parents = <&sys_clkin_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
index 49c6a872052e..0d0f9fe4a882 100644
--- a/arch/arm/boot/dts/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/am437x-l4.dtsi
@@ -328,9 +328,8 @@
};
};
- target-module@31000 { /* 0x44e31000, ap 24 40.0 */
+ timer1_target: target-module@31000 { /* 0x44e31000, ap 24 40.0 */
compatible = "ti,sysc-omap2-timer", "ti,sysc";
- ti,hwmods = "timer1";
reg = <0x31000 0x4>,
<0x31010 0x4>,
<0x31014 0x4>;
@@ -450,7 +449,6 @@
target-module@86000 { /* 0x44e86000, ap 40 70.0 */
compatible = "ti,sysc-omap2", "ti,sysc";
- ti,hwmods = "counter_32k";
reg = <0x86000 0x4>,
<0x86004 0x4>;
reg-names = "rev", "sysc";
@@ -868,9 +866,8 @@
};
};
- target-module@40000 { /* 0x48040000, ap 18 1e.0 */
+ timer2_target: target-module@40000 { /* 0x48040000, ap 18 1e.0 */
compatible = "ti,sysc-omap4-timer", "ti,sysc";
- ti,hwmods = "timer2";
reg = <0x40000 0x4>,
<0x40010 0x4>,
<0x40014 0x4>;
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index c13756fa0f55..99a408a2ec6a 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -10,6 +10,7 @@
#include "dra7-mmc-iodelay.dtsi"
#include "dra72x-mmc-iodelay.dtsi"
#include "am57xx-idk-common.dtsi"
+#include "dra7-ipu-dsp-common.dtsi"
/ {
model = "TI AM5718 IDK";
@@ -20,6 +21,33 @@
reg = <0x0 0x80000000 0x0 0x40000000>;
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipu2_memory_region: ipu2-memory@95800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x95800000 0x0 0x3800000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp1_memory_region: dsp1-memory@99000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x99000000 0x0 0x4000000>;
+ reusable;
+ status = "okay";
+ };
+
+ ipu1_memory_region: ipu1-memory@9d000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9d000000 0x0 0x2000000>;
+ reusable;
+ status = "okay";
+ };
+ };
+
leds {
compatible = "gpio-leds";
cpu0-led {
@@ -148,21 +176,19 @@
load-gpios = <&gpio2 23 GPIO_ACTIVE_LOW>;
};
-&mailbox5 {
+&ipu2 {
status = "okay";
- mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
- status = "okay";
- };
- mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
- status = "okay";
- };
+ memory-region = <&ipu2_memory_region>;
};
-&mailbox6 {
+&ipu1 {
status = "okay";
- mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
- status = "okay";
- };
+ memory-region = <&ipu1_memory_region>;
+};
+
+&dsp1 {
+ status = "okay";
+ memory-region = <&dsp1_memory_region>;
};
&pcie1_rc {
diff --git a/arch/arm/boot/dts/am5729-beagleboneai.dts b/arch/arm/boot/dts/am5729-beagleboneai.dts
new file mode 100644
index 000000000000..9877d7709d41
--- /dev/null
+++ b/arch/arm/boot/dts/am5729-beagleboneai.dts
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014-2019 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+/dts-v1/;
+
+#include "dra74x.dtsi"
+#include "am57xx-commercial-grade.dtsi"
+#include "dra74x-mmc-iodelay.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/dra.h>
+
+/ {
+ model = "BeagleBoard.org BeagleBone AI";
+ compatible = "beagle,am5729-beagleboneai", "ti,am5728",
+ "ti,dra742", "ti,dra74", "ti,dra7";
+
+ aliases {
+ rtc0 = &tps659038_rtc;
+ rtc1 = &rtc;
+ display0 = &hdmi_conn;
+ };
+
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipu2_memory_region: ipu2-memory@95800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x95800000 0x0 0x3800000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp1_memory_region: dsp1-memory@99000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x99000000 0x0 0x4000000>;
+ reusable;
+ status = "okay";
+ };
+
+ ipu1_memory_region: ipu1-memory@9d000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9d000000 0x0 0x2000000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp2_memory_region: dsp2-memory@9f000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9f000000 0x0 0x800000>;
+ reusable;
+ status = "okay";
+ };
+
+ };
+
+ vdd_adc: gpioregulator-vdd_adc {
+ compatible = "regulator-gpio";
+ regulator-name = "vdd_adc";
+ vin-supply = <&vdd_5v>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ gpios = <&gpio3 27 GPIO_ACTIVE_HIGH>;
+ states = <1800000 0
+ 3300000 1>;
+ };
+
+ vdd_5v: fixedregulator-vdd_5v {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vtt_fixed: fixedregulator-vtt {
+ /* TPS51200 */
+ compatible = "regulator-fixed";
+ regulator-name = "vtt_fixed";
+ vin-supply = <&vdd_ddr>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led0 {
+ label = "beaglebone:green:usr0";
+ gpios = <&gpio3 17 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ led1 {
+ label = "beaglebone:green:usr1";
+ gpios = <&gpio5 5 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ default-state = "off";
+ };
+
+ led2 {
+ label = "beaglebone:green:usr2";
+ gpios = <&gpio3 15 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "cpu";
+ default-state = "off";
+ };
+
+ led3 {
+ label = "beaglebone:green:usr3";
+ gpios = <&gpio3 14 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc1";
+ default-state = "off";
+ };
+
+ led4 {
+ label = "beaglebone:green:usr4";
+ gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "netdev";
+ default-state = "off";
+ };
+ };
+
+ hdmi_conn: connector@0 {
+ compatible = "hdmi-connector";
+ label = "hdmi";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_encoder_out>;
+ };
+ };
+ };
+
+ hdmi_enc: encoder@0 {
+ /* "ti,tpd12s016" software compatible with "ti,tpd12s015"
+ * no need for individual driver
+ */
+ compatible = "ti,tpd12s015";
+ gpios = <0>,
+ <0>,
+ <&gpio7 12 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+
+ port@0 {
+ reg = <0x0>;
+
+ hdmi_encoder_in: endpoint@0 {
+ remote-endpoint = <&hdmi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <0x1>;
+
+ hdmi_encoder_out: endpoint@0 {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+ };
+
+ emmc_pwrseq: emmc_pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio5 7 GPIO_ACTIVE_LOW>;
+ };
+
+ brcmf_pwrseq: brcmf_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>, /* BT-REG-ON */
+ <&gpio3 18 GPIO_ACTIVE_LOW>; /* WL-REG-ON */
+ };
+
+ extcon_usb1: extcon_usb1 {
+ compatible = "linux,extcon-usb-gpio";
+ ti,enable-id-detection;
+ id-gpio = <&gpio3 13 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ tps659038: tps659038@58 {
+ compatible = "ti,tps659038";
+ reg = <0x58>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ ti,system-power-controller;
+ ti,palmas-override-powerhold;
+
+ tps659038_pmic {
+ compatible = "ti,tps659038-pmic";
+
+ smps12-in-supply = <&vdd_5v>;
+ smps3-in-supply = <&vdd_5v>;
+ smps45-in-supply = <&vdd_5v>;
+ smps6-in-supply = <&vdd_5v>;
+ smps7-in-supply = <&vdd_5v>;
+ mps3-in-supply = <&vdd_5v>;
+ smps8-in-supply = <&vdd_5v>;
+ smps9-in-supply = <&vdd_5v>;
+ ldo1-in-supply = <&vdd_5v>;
+ ldo2-in-supply = <&vdd_5v>;
+ ldo3-in-supply = <&vdd_5v>;
+ ldo4-in-supply = <&vdd_5v>;
+ ldo9-in-supply = <&vdd_5v>;
+ ldoln-in-supply = <&vdd_5v>;
+ ldousb-in-supply = <&vdd_5v>;
+ ldortc-in-supply = <&vdd_5v>;
+
+ regulators {
+ vdd_mpu: smps12 {
+ /* VDD_MPU */
+ regulator-name = "smps12";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_ddr: smps3 {
+ /* VDD_DDR EMIF1 EMIF2 */
+ regulator-name = "smps3";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_dspeve: smps45 {
+ /* VDD_DSPEVE on AM572 */
+ regulator-name = "smps45";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_gpu: smps6 {
+ /* VDD_GPU */
+ regulator-name = "smps6";
+ regulator-min-microvolt = < 850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_core: smps7 {
+ /* VDD_CORE */
+ regulator-name = "smps7";
+ regulator-min-microvolt = < 850000>; /*** 1.15V */
+ regulator-max-microvolt = <1150000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_iva: smps8 {
+ /* VDD_IVAHD */ /*** 1.06V */
+ regulator-name = "smps8";
+ };
+
+ vdd_3v3: smps9 {
+ /* VDD_3V3 */
+ regulator-name = "smps9";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_sd: ldo1 {
+ /* VDDSHV8 - VSDMMC */
+ regulator-name = "ldo1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vdd_1v8: ldo2 {
+ /* VDDSH18V */
+ regulator-name = "ldo2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v8_phy_ldo3: ldo3 {
+ /* R1.3a 572x V1_8PHY_LDO3: USB, SATA */
+ regulator-name = "ldo3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v8_phy_ldo4: ldo4 {
+ /* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/
+ regulator-name = "ldo4";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ /* LDO5-8 unused */
+
+ vdd_rtc: ldo9 {
+ /* VDD_RTC */
+ regulator-name = "ldo9";
+ regulator-min-microvolt = < 840000>;
+ regulator-max-microvolt = <1160000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_1v8_pll: ldoln {
+ /* VDDA_1V8_PLL */
+ regulator-name = "ldoln";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldousb_reg: ldousb {
+ /* VDDA_3V_USB: VDDA_USBHS33 */
+ regulator-name = "ldousb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ ldortc_reg: ldortc {
+ /* VDDA_RTC */
+ regulator-name = "ldortc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ regen1: regen1 {
+ /* VDD_3V3_ON */
+ regulator-name = "regen1";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ regen2: regen2 {
+ /* Needed for PMIC internal resource */
+ regulator-name = "regen2";
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+
+ tps659038_rtc: tps659038_rtc {
+ compatible = "ti,palmas-rtc";
+ interrupt-parent = <&tps659038>;
+ interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
+ wakeup-source;
+ };
+
+ tps659038_pwr_button: tps659038_pwr_button {
+ compatible = "ti,palmas-pwrbutton";
+ interrupt-parent = <&tps659038>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ wakeup-source;
+ ti,palmas-long-press-seconds = <12>;
+ };
+
+ tps659038_gpio: tps659038_gpio {
+ compatible = "ti,palmas-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+
+ /* STMPE811 touch screen controller */
+ stmpe811@41 {
+ compatible = "st,stmpe811";
+ reg = <0x41>;
+ interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpio2>;
+ interrupt-controller;
+ id = <0>;
+ blocks = <0x5>;
+ irq-trigger = <0x1>;
+ st,mod-12b = <1>; /* 12-bit ADC */
+ st,ref-sel = <0>; /* internal ADC reference */
+ st,adc-freq = <1>; /* 3.25 MHz ADC clock speed */
+ st,sample-time = <4>; /* ADC converstion time: 80 clocks */
+
+ stmpe_adc {
+ compatible = "st,stmpe-adc";
+ st,norequest-mask = <0x00>; /* mask any channels to be used by touchscreen */
+ adc0: iio-device@0 {
+ #io-channel-cells = <1>;
+ iio-channels = <&adc0 4>, <&adc0 1>, <&adc0 2>, <&adc0 3>, <&adc0 4>, <&adc0 5>, <&adc0 6>;
+ iio-channel-names = "AIN0_P9_39", "AIN1_P9_40", "AIN2_P9_37", "AIN3_P9_38",
+ "AIN4_P9_33", "AIN5_P9_36", "AIN6_P9_35";
+ };
+ };
+
+ stmpe_touchscreen {
+ status = "disabled";
+ compatible = "st,stmpe-ts";
+ /* 8 sample average control */
+ st,ave-ctrl = <3>;
+ /* 7 length fractional part in z */
+ st,fraction-z = <7>;
+ /*
+ * 50 mA typical 80 mA max touchscreen drivers
+ * current limit value
+ */
+ st,i-drive = <1>;
+ /* 1 ms panel driver settling time */
+ st,settling = <3>;
+ /* 5 ms touch detect interrupt delay */
+ st,touch-det-delay = <5>;
+ };
+
+ stmpe_gpio {
+ compatible = "st,stmpe-gpio";
+ };
+
+ stmpe_pwm {
+ compatible = "st,stmpe-pwm";
+ #pwm-cells = <2>;
+ };
+ };
+};
+
+&mcspi3 {
+ status = "okay";
+ ti,pindir-d0-out-d1-in;
+
+ sn65hvs882: sn65hvs882@0 {
+ compatible = "pisosr-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ spi-cpol;
+ };
+};
+
+&cpu0 {
+ vdd-supply = <&vdd_mpu>;
+ voltage-tolerance = <1>;
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&davinci_mdio {
+ reset-gpios = <&gpio2 23 GPIO_ACTIVE_LOW>;
+ reset-delay-us = <2>;
+
+ phy0: ethernet-phy@1 {
+ reg = <4>;
+ eee-broken-100tx;
+ eee-broken-1000t;
+ };
+};
+
+&mac {
+ slaves = <1>;
+ status = "okay";
+};
+
+&cpsw_emac0 {
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii";
+};
+
+&ocp {
+ pruss1_shmem: pruss_shmem@4b200000 {
+ status = "okay";
+ compatible = "ti,pruss-shmem";
+ reg = <0x4b200000 0x020000>;
+ };
+
+ pruss2_shmem: pruss_shmem@4b280000 {
+ status = "okay";
+ compatible = "ti,pruss-shmem";
+ reg = <0x4b280000 0x020000>;
+ };
+};
+
+&mmc1 {
+ status = "okay";
+ vmmc-supply = <&vdd_3v3>;
+ vqmmc-supply = <&vdd_sd>;
+ bus-width = <4>;
+ cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins_default>;
+};
+
+&mmc2 {
+ status = "okay";
+ vmmc-supply = <&vdd_1v8>;
+ vqmmc-supply = <&vdd_1v8>;
+ bus-width = <8>;
+ ti,non-removable;
+ non-removable;
+ mmc-pwrseq = <&emmc_pwrseq>;
+
+ ti,needs-special-reset;
+ dmas = <&sdma_xbar 47>, <&sdma_xbar 48>;
+ dma-names = "tx", "rx";
+
+};
+
+&mmc4 {
+ /* DS: Default speed (DS) up to 25 MHz, including 1- and 4-bit modes (3.3 V signaling). */
+ /* HS: High speed up to 50 MHz (3.3 V signaling). */
+ /* SDR12: SDR up to 25 MHz (1.8 V signaling). */
+ /* SDR25: SDR up to 50 MHz (1.8 V signaling). */
+ /* SDR50: SDR up to 100 MHz (1.8 V signaling). */
+ /* SDR104: SDR up to 208 MHz (1.8 V signaling) */
+ /* DDR50: DDR up to 50 MHz (1.8 V signaling). */
+ status = "okay";
+
+ ti,needs-special-reset;
+ vmmc-supply = <&vdd_3v3>;
+ cap-power-off-card;
+ keep-power-in-suspend;
+ bus-width = <4>;
+ ti,non-removable;
+ non-removable;
+ no-1-8-v;
+ max-frequency = <24000000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mmc-pwrseq = <&brcmf_pwrseq>;
+
+ brcmf: wifi@1 {
+ status = "okay";
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+
+ brcm,sd-head-align = <4>;
+ brcm,sd_head_align = <4>;
+ brcm,sd_sgentry_align = <512>;
+
+ interrupt-parent = <&gpio3>;
+ interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "host-wake";
+ };
+};
+
+&usb2_phy1 {
+ phy-supply = <&ldousb_reg>;
+};
+
+&usb2_phy2 {
+ phy-supply = <&ldousb_reg>;
+};
+
+&usb1 {
+ status = "okay";
+ dr_mode = "otg";
+};
+
+&omap_dwc3_1 {
+ extcon = <&extcon_usb1>;
+};
+
+&usb2 {
+ status = "okay";
+ dr_mode = "host";
+};
+
+&dss {
+ status = "okay";
+ vdda_video-supply = <&vdd_1v8_pll>;
+};
+
+&hdmi {
+ status = "okay";
+ vdda-supply = <&vdd_1v8_phy_ldo4>;
+
+ port {
+ hdmi_out: endpoint {
+ remote-endpoint = <&hdmi_encoder_in>;
+ };
+ };
+};
+
+&bandgap {
+ status = "okay";
+};
+
+&mailbox1 {
+ status = "okay";
+};
+
+&mailbox2 {
+ status = "okay";
+};
+
+&mailbox3 {
+ status = "okay";
+};
+
+&mailbox4 {
+ status = "okay";
+};
+
+&mailbox5 {
+ status = "okay";
+};
+
+&mailbox6 {
+ status = "okay";
+};
+
+&mailbox7 {
+ status = "okay";
+};
+
+&mailbox8 {
+ status = "okay";
+};
+
+&mailbox9 {
+ status = "okay";
+};
+
+&mailbox10 {
+ status = "okay";
+};
+
+&mailbox11 {
+ status = "okay";
+};
+
+&mailbox12 {
+ status = "okay";
+};
+
+&mailbox13 {
+ status = "okay";
+};
+
+&cpu_alert0 {
+ temperature = <55000>; /* milliCelsius */
+};
+
+&cpu_crit {
+ temperature = <85000>; /* milliCelsius */
+};
+
+&gpu_crit {
+ temperature = <85000>; /* milliCelsius */
+};
+
+&core_crit {
+ temperature = <85000>; /* milliCelsius */
+};
+
+&dspeve_crit {
+ temperature = <85000>; /* milliCelsius */
+};
+
+&iva_crit {
+ temperature = <85000>; /* milliCelsius */
+};
+
+&sata {
+ status = "disabled";
+};
+
+&sata_phy {
+ status = "disabled";
+};
+
+/* bluetooth */
+&uart6 {
+ status = "okay";
+};
+
+/* cape header stuff */
+&i2c4 {
+ status = "okay";
+ clock-frequency = <100000>;
+};
+
+&cpu0_opp_table {
+ opp_slow-500000000 {
+ opp-shared;
+ };
+};
diff --git a/arch/arm/boot/dts/am572x-idk-common.dtsi b/arch/arm/boot/dts/am572x-idk-common.dtsi
index ddf123620e96..37ce2d7c4173 100644
--- a/arch/arm/boot/dts/am572x-idk-common.dtsi
+++ b/arch/arm/boot/dts/am572x-idk-common.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include "am57xx-idk-common.dtsi"
+#include "dra74-ipu-dsp-common.dtsi"
/ {
memory@0 {
@@ -13,6 +14,40 @@
reg = <0x0 0x80000000 0x0 0x80000000>;
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipu2_memory_region: ipu2-memory@95800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x95800000 0x0 0x3800000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp1_memory_region: dsp1-memory@99000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x99000000 0x0 0x4000000>;
+ reusable;
+ status = "okay";
+ };
+
+ ipu1_memory_region: ipu1-memory@9d000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9d000000 0x0 0x2000000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp2_memory_region: dsp2-memory@9f000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9f000000 0x0 0x800000>;
+ reusable;
+ status = "okay";
+ };
+ };
+
status-leds {
compatible = "gpio-leds";
cpu0-led {
@@ -147,22 +182,22 @@
gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
};
-&mailbox5 {
+&ipu2 {
status = "okay";
- mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
- status = "okay";
- };
- mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
- status = "okay";
- };
+ memory-region = <&ipu2_memory_region>;
};
-&mailbox6 {
+&ipu1 {
status = "okay";
- mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
- status = "okay";
- };
- mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
- status = "okay";
- };
+ memory-region = <&ipu1_memory_region>;
+};
+
+&dsp1 {
+ status = "okay";
+ memory-region = <&dsp1_memory_region>;
+};
+
+&dsp2 {
+ status = "okay";
+ memory-region = <&dsp2_memory_region>;
};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index 565675354de4..94135fc5dd44 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -7,6 +7,7 @@
#include "am5728.dtsi"
#include "am57xx-commercial-grade.dtsi"
#include "dra74x-mmc-iodelay.dtsi"
+#include "dra74-ipu-dsp-common.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -50,6 +51,40 @@
regulator-boot-on;
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipu2_memory_region: ipu2-memory@95800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x95800000 0x0 0x3800000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp1_memory_region: dsp1-memory@99000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x99000000 0x0 0x4000000>;
+ reusable;
+ status = "okay";
+ };
+
+ ipu1_memory_region: ipu1-memory@9d000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9d000000 0x0 0x2000000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp2_memory_region: dsp2-memory@9f000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9f000000 0x0 0x800000>;
+ reusable;
+ status = "okay";
+ };
+ };
+
vdd_3v3: fixedregulator-vdd_3v3 {
compatible = "regulator-fixed";
regulator-name = "vdd_3v3";
@@ -584,22 +619,22 @@
rx-num-evt = <32>;
};
-&mailbox5 {
+&ipu2 {
status = "okay";
- mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
- status = "okay";
- };
- mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
- status = "okay";
- };
+ memory-region = <&ipu2_memory_region>;
};
-&mailbox6 {
+&ipu1 {
status = "okay";
- mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
- status = "okay";
- };
- mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
- status = "okay";
- };
+ memory-region = <&ipu1_memory_region>;
+};
+
+&dsp1 {
+ status = "okay";
+ memory-region = <&dsp1_memory_region>;
+};
+
+&dsp2 {
+ status = "okay";
+ memory-region = <&dsp2_memory_region>;
};
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index a3ff1237d1fa..2c0aab352b44 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -35,6 +35,16 @@
regulator-boot-on;
};
+ v1_2d: fixedregulator-v1_2d {
+ compatible = "regulator-fixed";
+ regulator-name = "V1_2D";
+ vin-supply = <&vmain>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
vtt_fixed: fixedregulator-vtt {
/* TPS51200 */
compatible = "regulator-fixed";
@@ -139,6 +149,12 @@
};
};
};
+
+ src_clk_x1: src_clk_x1 {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <20000000>;
+ };
};
&dra7_pmx_core {
@@ -378,6 +394,32 @@
gpio-controller;
#gpio-cells = <2>;
};
+
+ dsi_bridge: tc358778@e {
+ compatible = "toshiba,tc358778", "toshiba,tc358768";
+ reg = <0xe>;
+ status = "disabled";
+
+ clocks = <&src_clk_x1>;
+ clock-names = "refclk";
+
+ vddc-supply = <&v1_2d>;
+ vddmipi-supply = <&v1_2d>;
+ vddio-supply = <&v3_3d>;
+
+ dsi_bridge_ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ rgb_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ data-lines = <24>;
+ };
+ };
+ };
+ };
};
&mcspi3 {
@@ -543,4 +585,20 @@
&dss {
status = "okay";
+
+ vdda_video-supply = <&ldoln_reg>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ dpi_out: endpoint {
+ remote-endpoint = <&rgb_in>;
+ data-lines = <24>;
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index c15f5e92f97f..0b8c2a64b36f 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -114,7 +114,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <31>;
- timeout-ms = <1000>;
clocks = <&coreclk 0>;
status = "disabled";
};
@@ -124,7 +123,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <32>;
- timeout-ms = <1000>;
clocks = <&coreclk 0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index 2932a29ae272..9805e507c695 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -236,7 +236,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- timeout-ms = <1000>;
clocks = <&coreclk 0>;
status = "disabled";
};
@@ -247,7 +246,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- timeout-ms = <1000>;
clocks = <&coreclk 0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index e038abc0c6b4..348116501aa2 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -153,7 +153,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- timeout-ms = <1000>;
clocks = <&coreclk 0>;
status = "disabled";
};
@@ -164,7 +163,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- timeout-ms = <1000>;
clocks = <&coreclk 0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
index b1b86934c688..e0b7c2099831 100644
--- a/arch/arm/boot/dts/armada-39x.dtsi
+++ b/arch/arm/boot/dts/armada-39x.dtsi
@@ -108,7 +108,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- timeout-ms = <1000>;
clocks = <&coreclk 0>;
status = "disabled";
};
@@ -119,7 +118,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- timeout-ms = <1000>;
clocks = <&coreclk 0>;
status = "disabled";
};
@@ -130,7 +128,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
- timeout-ms = <1000>;
clocks = <&coreclk 0>;
status = "disabled";
};
@@ -141,7 +138,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
- timeout-ms = <1000>;
clocks = <&coreclk 0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/aspeed-ast2600-evb.dts b/arch/arm/boot/dts/aspeed-ast2600-evb.dts
index 4afa8662c4e8..8d0f4656aa05 100644
--- a/arch/arm/boot/dts/aspeed-ast2600-evb.dts
+++ b/arch/arm/boot/dts/aspeed-ast2600-evb.dts
@@ -213,3 +213,7 @@
&i2c15 {
status = "okay";
};
+
+&fsim0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
index 5d7cbd9164d4..2d44d9ad4e40 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
@@ -5,6 +5,7 @@
#include "aspeed-g5.dtsi"
#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
/ {
model = "Facebook TiogaPass BMC";
@@ -112,13 +113,76 @@
&kcs2 {
// BMC KCS channel 2
status = "okay";
- kcs_addr = <0xca8>;
+ aspeed,lpc-io-reg = <0xca8>;
};
&kcs3 {
// BMC KCS channel 3
status = "okay";
- kcs_addr = <0xca2>;
+ aspeed,lpc-io-reg = <0xca2>;
+};
+
+&gpio {
+ status = "okay";
+ gpio-line-names =
+ /*A0-A7*/ "BMC_CPLD_FPGA_SEL","","","","","","","",
+ /*B0-B7*/ "","BMC_DEBUG_EN","","","","BMC_PPIN","PS_PWROK",
+ "IRQ_PVDDQ_GHJ_VRHOT_LVT3",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "BIOS_MRC_DEBUG_MSG_DIS","BOARD_REV_ID0","",
+ "BOARD_REV_ID1","IRQ_DIMM_SAVE_LVT3","BOARD_REV_ID2",
+ "CPU_ERR0_LVT3_BMC","CPU_ERR1_LVT3_BMC",
+ /*E0-E7*/ "RESET_BUTTON","RESET_OUT","POWER_BUTTON",
+ "POWER_OUT","NMI_BUTTON","","CPU0_PROCHOT_LVT3_ BMC",
+ "CPU1_PROCHOT_LVT3_ BMC",
+ /*F0-F7*/ "IRQ_PVDDQ_ABC_VRHOT_LVT3","",
+ "IRQ_PVCCIN_CPU0_VRHOT_LVC3",
+ "IRQ_PVCCIN_CPU1_VRHOT_LVC3",
+ "IRQ_PVDDQ_KLM_VRHOT_LVT3","","P3VBAT_BRIDGE_EN","",
+ /*G0-G7*/ "CPU_ERR2_LVT3","CPU_CATERR_LVT3","PCH_BMC_THERMTRIP",
+ "CPU0_SKTOCC_LVT3","","","","BIOS_SMI_ACTIVE",
+ /*H0-H7*/ "LED_POST_CODE_0","LED_POST_CODE_1","LED_POST_CODE_2",
+ "LED_POST_CODE_3","LED_POST_CODE_4","LED_POST_CODE_5",
+ "LED_POST_CODE_6","LED_POST_CODE_7",
+ /*I0-I7*/ "CPU0_FIVR_FAULT_LVT3","CPU1_FIVR_FAULT_LVT3",
+ "FORCE_ADR","UV_ADR_TRIGGER_EN","","","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "IRQ_UV_DETECT","IRQ_OC_DETECT","HSC_TIMER_EXP","",
+ "MEM_THERM_EVENT_PCH","PMBUS_ALERT_BUF_EN","","",
+ /*M0-M7*/ "CPU0_RC_ERROR","CPU1_RC_ERROR","","OC_DETECT_EN",
+ "CPU0_THERMTRIP_LATCH_LVT3",
+ "CPU1_THERMTRIP_LATCH_LVT3","","",
+ /*N0-N7*/ "","","","CPU_MSMI_LVT3","","BIOS_SPI_BMC_CTRL","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "BOARD_SKU_ID0","BOARD_SKU_ID1","BOARD_SKU_ID2",
+ "BOARD_SKU_ID3","BOARD_SKU_ID4","BMC_PREQ",
+ "BMC_PWR_DEBUG","RST_RSMRST",
+ /*Q0-Q7*/ "","","","","UARTSW_LSB","UARTSW_MSB",
+ "POST_CARD_PRES_BMC","PE_BMC_WAKE",
+ /*R0-R7*/ "","","BMC_TCK_MUX_SEL","BMC_PRDY",
+ "BMC_XDP_PRSNT_IN","RST_BMC_PLTRST_BUF","SLT_CFG0",
+ "SLT_CFG1",
+ /*S0-S7*/ "THROTTLE","BMC_READY","","HSC_SMBUS_SWITCH_EN","",
+ "","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","BMC_FAULT","","",
+ /*V0-V7*/ "","","","FAST_PROCHOT_EN","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","GLOBAL_RST_WARN",
+ "CPU0_MEMABC_MEMHOT_LVT3_BMC",
+ "CPU0_MEMDEF_MEMHOT_LVT3_BMC",
+ "CPU1_MEMGHJ_MEMHOT_LVT3_BMC",
+ "CPU1_MEMKLM_MEMHOT_LVT3_BMC",
+ /*Y0-Y7*/ "SIO_S3","SIO_S5","BMC_JTAG_SEL","SIO_ONCONTROL","",
+ "","","",
+ /*Z0-Z7*/ "","SIO_POWER_GOOD","IRQ_PVDDQ_DEF_VRHOT_LVT3","",
+ "","","","",
+ /*AA0-AA7*/ "CPU1_SKTOCC_LVT3","IRQ_SML1_PMBUS_ALERT",
+ "SERVER_POWER_LED","","PECI_MUX_SELECT","UV_HIGH_SET",
+ "","POST_COMPLETE",
+ /*AB0-AB7*/ "IRQ_HSC_FAULT","OCP_MEZZA_PRES","","","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
};
&mac0 {
@@ -368,6 +432,11 @@
&i2c4 {
status = "okay";
// BMC Debug Header
+ ipmb0@10 {
+ compatible = "ipmb-dev";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ i2c-protocol;
+ };
};
&i2c5 {
@@ -449,6 +518,11 @@
&i2c9 {
status = "okay";
//USB Debug Connector
+ ipmb0@10 {
+ compatible = "ipmb-dev";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ i2c-protocol;
+ };
};
&pwm_tacho {
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-yosemitev2.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-yosemitev2.dts
new file mode 100644
index 000000000000..8864e9c312a8
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-yosemitev2.dts
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2018 Facebook Inc.
+/dts-v1/;
+#include "aspeed-g5.dtsi"
+#include <dt-bindings/i2c/i2c.h>
+
+/ {
+ model = "Facebook Yosemitev2 BMC";
+ compatible = "facebook,yosemitev2-bmc", "aspeed,ast2500";
+ aliases {
+ serial4 = &uart5;
+ };
+ chosen {
+ stdout-path = &uart5;
+ };
+
+ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ iio-hwmon {
+ // VOLATAGE SENSOR
+ compatible = "iio-hwmon";
+ io-channels = <&adc 0> , <&adc 1> , <&adc 2> , <&adc 3> ,
+ <&adc 4> , <&adc 5> , <&adc 6> , <&adc 7> ,
+ <&adc 8> , <&adc 9> , <&adc 10>, <&adc 11> ,
+ <&adc 12> , <&adc 13> , <&adc 14> , <&adc 15> ;
+ };
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+#include "openbmc-flash-layout.dtsi"
+ };
+};
+
+&spi1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "pnor";
+ };
+};
+&uart1 {
+ // Host1 Console
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd1_default
+ &pinctrl_rxd1_default>;
+};
+
+&uart2 {
+ // Host2 Console
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd2_default
+ &pinctrl_rxd2_default>;
+
+};
+
+&uart3 {
+ // Host3 Console
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd3_default
+ &pinctrl_rxd3_default>;
+};
+
+&uart4 {
+ // Host4 Console
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd4_default
+ &pinctrl_rxd4_default>;
+};
+
+&uart5 {
+ // BMC Console
+ status = "okay";
+};
+
+&vuart {
+ // Virtual UART
+ status = "okay";
+};
+
+&mac0 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii1_default>;
+ use-ncsi;
+ mlx,multi-host;
+};
+
+&adc {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0_default
+ &pinctrl_adc1_default
+ &pinctrl_adc2_default
+ &pinctrl_adc3_default
+ &pinctrl_adc4_default
+ &pinctrl_adc5_default
+ &pinctrl_adc6_default
+ &pinctrl_adc7_default
+ &pinctrl_adc8_default
+ &pinctrl_adc9_default
+ &pinctrl_adc10_default
+ &pinctrl_adc11_default
+ &pinctrl_adc12_default
+ &pinctrl_adc13_default
+ &pinctrl_adc14_default
+ &pinctrl_adc15_default>;
+};
+
+&i2c1 {
+ //Host1 IPMB bus
+ status = "okay";
+ multi-master;
+ ipmb1@10 {
+ compatible = "ipmb-dev";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ i2c-protocol;
+ };
+};
+
+&i2c3 {
+ //Host2 IPMB bus
+ status = "okay";
+ multi-master;
+ ipmb3@10 {
+ compatible = "ipmb-dev";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ i2c-protocol;
+ };
+};
+
+&i2c5 {
+ //Host3 IPMB bus
+ status = "okay";
+ multi-master;
+ ipmb5@10 {
+ compatible = "ipmb-dev";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ i2c-protocol;
+ };
+};
+
+&i2c7 {
+ //Host4 IPMB bus
+ status = "okay";
+ multi-master;
+ ipmb7@10 {
+ compatible = "ipmb-dev";
+ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>;
+ i2c-protocol;
+ };
+};
+
+&i2c8 {
+ status = "okay";
+ //FRU EEPROM
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ pagesize = <32>;
+ };
+};
+
+&i2c9 {
+ status = "okay";
+ tmp421@4e {
+ //INLET TEMP
+ compatible = "ti,tmp421";
+ reg = <0x4e>;
+ };
+ //OUTLET TEMP
+ tmp421@4f {
+ compatible = "ti,tmp421";
+ reg = <0x4f>;
+ };
+};
+
+&i2c10 {
+ status = "okay";
+ //HSC
+ adm1278@40 {
+ compatible = "adi,adm1278";
+ reg = <0x40>;
+ };
+};
+
+&i2c11 {
+ status = "okay";
+ //MEZZ_TEMP_SENSOR
+ tmp421@1f {
+ compatible = "ti,tmp421";
+ reg = <0x1f>;
+ };
+};
+
+&i2c12 {
+ status = "okay";
+ //MEZZ_FRU
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ pagesize = <32>;
+ };
+};
+
+&pwm_tacho {
+ status = "okay";
+ //FSC
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default>;
+ fan@0 {
+ reg = <0x00>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x00>;
+ };
+ fan@1 {
+ reg = <0x01>;
+ aspeed,fan-tach-ch = /bits/ 8 <0x01>;
+ };
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
index 6232cd726a7f..bdfe342bf7c5 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
@@ -4,6 +4,7 @@
#include "aspeed-g6.dtsi"
#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/leds/leds-pca955x.h>
/ {
model = "Rainier";
@@ -32,6 +33,11 @@
no-map;
reg = <0xB8000000 0x04000000>; /* 64M */
};
+
+ vga_memory: region@bf000000 {
+ no-map;
+ reg = <0xbf000000 0x01000000>; /* 16M */
+ };
};
gpio-keys {
@@ -64,6 +70,40 @@
};
+&gpio0 {
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "","","","","","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "","","","","","","","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","","","","","",
+ /*I0-I7*/ "","","","","","","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","","","","",
+ /*Q0-Q7*/ "cfam-reset","","","","","","","",
+ /*R0-R7*/ "","","","","","","","",
+ /*S0-S7*/ "presence-ps0","presence-ps1","presence-ps2","presence-ps3",
+ "","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","",
+ /*AA0-AA7*/ "","","","","","","","",
+ /*AB0-AB7*/ "","","","","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
+};
+
&emmc_controller {
status = "okay";
};
@@ -72,6 +112,88 @@
status = "okay";
};
+&fsim0 {
+ status = "okay";
+
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cfam@0,0 {
+ reg = <0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <0>;
+
+ scom@1000 {
+ compatible = "ibm,fsi2pib";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi_occ0: occ {
+ compatible = "ibm,p9-occ";
+ };
+ };
+
+ fsi_hub0: hub@3400 {
+ compatible = "fsi-master-hub";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ no-scan-on-init;
+ };
+ };
+};
+
+&fsi_hub0 {
+ cfam@1,0 {
+ reg = <1 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <1>;
+
+ scom@1000 {
+ compatible = "ibm,fsi2pib";
+ reg = <0x1000 0x400>;
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi_occ1: occ {
+ compatible = "ibm,p9-occ";
+ };
+ };
+
+ fsi_hub1: hub@3400 {
+ compatible = "fsi-master-hub";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ no-scan-on-init;
+ };
+ };
+};
+
+/* Legacy OCC numbering (to get rid of when userspace is fixed) */
+&fsi_occ0 {
+ reg = <1>;
+};
+
+&fsi_occ1 {
+ reg = <2>;
+};
+
&ibt {
status = "okay";
};
@@ -269,66 +391,82 @@
gpio@0 {
reg = <0>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@1 {
reg = <1>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@2 {
reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@3 {
reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@4 {
reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@5 {
reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@6 {
reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@7 {
reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@8 {
reg = <8>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@9 {
reg = <9>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@10 {
reg = <10>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@11 {
reg = <11>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@12 {
reg = <12>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@13 {
reg = <13>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@14 {
reg = <14>;
+ type = <PCA955X_TYPE_GPIO>;
};
gpio@15 {
reg = <15>;
+ type = <PCA955X_TYPE_GPIO>;
};
};
@@ -386,21 +524,6 @@
&i2c9 {
status = "okay";
- ir35221@42 {
- compatible = "infineon,ir35221";
- reg = <0x42>;
- };
-
- ir35221@43 {
- compatible = "infineon,ir35221";
- reg = <0x43>;
- };
-
- ir35221@44 {
- compatible = "infineon,ir35221";
- reg = <0x44>;
- };
-
tmp423a@4c {
compatible = "ti,tmp423";
reg = <0x4c>;
@@ -411,21 +534,6 @@
reg = <0x4d>;
};
- ir35221@72 {
- compatible = "infineon,ir35221";
- reg = <0x72>;
- };
-
- ir35221@73 {
- compatible = "infineon,ir35221";
- reg = <0x73>;
- };
-
- ir35221@74 {
- compatible = "infineon,ir35221";
- reg = <0x74>;
- };
-
eeprom@50 {
compatible = "atmel,24c128";
reg = <0x50>;
@@ -435,21 +543,6 @@
&i2c10 {
status = "okay";
- ir35221@42 {
- compatible = "infineon,ir35221";
- reg = <0x42>;
- };
-
- ir35221@43 {
- compatible = "infineon,ir35221";
- reg = <0x43>;
- };
-
- ir35221@44 {
- compatible = "infineon,ir35221";
- reg = <0x44>;
- };
-
tmp423a@4c {
compatible = "ti,tmp423";
reg = <0x4c>;
@@ -460,21 +553,6 @@
reg = <0x4d>;
};
- ir35221@72 {
- compatible = "infineon,ir35221";
- reg = <0x72>;
- };
-
- ir35221@73 {
- compatible = "infineon,ir35221";
- reg = <0x73>;
- };
-
- ir35221@74 {
- compatible = "infineon,ir35221";
- reg = <0x74>;
- };
-
eeprom@50 {
compatible = "atmel,24c128";
reg = <0x50>;
@@ -540,6 +618,10 @@
status = "okay";
};
+&vuart2 {
+ status = "okay";
+};
+
&lpc_ctrl {
status = "okay";
memory-region = <&flash_memory>;
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts b/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
index f7e935ede919..60e545b6396f 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
@@ -8,6 +8,52 @@
model = "Mihawk BMC";
compatible = "ibm,mihawk-bmc", "aspeed,ast2500";
+ aliases {
+ i2c215 = &bus6_mux215;
+ i2c216 = &bus6_mux216;
+ i2c217 = &bus6_mux217;
+ i2c218 = &bus6_mux218;
+ i2c219 = &bus6_mux219;
+ i2c220 = &bus6_mux220;
+ i2c221 = &bus6_mux221;
+ i2c222 = &bus6_mux222;
+ i2c223 = &bus7_mux223;
+ i2c224 = &bus7_mux224;
+ i2c225 = &bus7_mux225;
+ i2c226 = &bus7_mux226;
+ i2c227 = &bus7_mux227;
+ i2c228 = &bus7_mux228;
+ i2c229 = &bus7_mux229;
+ i2c230 = &bus7_mux230;
+ i2c231 = &bus9_mux231;
+ i2c232 = &bus9_mux232;
+ i2c233 = &bus9_mux233;
+ i2c234 = &bus9_mux234;
+ i2c235 = &bus9_mux235;
+ i2c236 = &bus9_mux236;
+ i2c237 = &bus9_mux237;
+ i2c238 = &bus9_mux238;
+ i2c239 = &bus10_mux239;
+ i2c240 = &bus10_mux240;
+ i2c241 = &bus10_mux241;
+ i2c242 = &bus10_mux242;
+ i2c243 = &bus10_mux243;
+ i2c244 = &bus10_mux244;
+ i2c245 = &bus10_mux245;
+ i2c246 = &bus10_mux246;
+ i2c247 = &bus12_mux247;
+ i2c248 = &bus12_mux248;
+ i2c249 = &bus12_mux249;
+ i2c250 = &bus12_mux250;
+ i2c251 = &bus13_mux251;
+ i2c252 = &bus13_mux252;
+ i2c253 = &bus13_mux253;
+ i2c254 = &bus13_mux254;
+ i2c255 = &bus13_mux255;
+ i2c256 = &bus13_mux256;
+ i2c257 = &bus13_mux257;
+ i2c258 = &bus13_mux258;
+ };
chosen {
stdout-path = &uart5;
@@ -120,35 +166,24 @@
leds {
compatible = "gpio-leds";
- fault {
+ front-fault {
retain-state-shutdown;
default-state = "keep";
gpios = <&gpio ASPEED_GPIO(AA, 0) GPIO_ACTIVE_LOW>;
};
- power {
+ power-button {
retain-state-shutdown;
default-state = "keep";
gpios = <&gpio ASPEED_GPIO(AA, 1) GPIO_ACTIVE_LOW>;
};
- rear-id {
+ front-id {
retain-state-shutdown;
default-state = "keep";
gpios = <&gpio ASPEED_GPIO(AA, 2) GPIO_ACTIVE_LOW>;
};
- rear-g {
- retain-state-shutdown;
- default-state = "keep";
- gpios = <&gpio ASPEED_GPIO(AA, 4) GPIO_ACTIVE_LOW>;
- };
-
- rear-ok {
- retain-state-shutdown;
- default-state = "keep";
- gpios = <&gpio ASPEED_GPIO(Y, 0) GPIO_ACTIVE_LOW>;
- };
fan0 {
retain-state-shutdown;
@@ -630,6 +665,54 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x70>;
+
+ bus7_mux223: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ bus7_mux224: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ bus7_mux225: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ bus7_mux226: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+
+ bus7_mux227: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ };
+
+ bus7_mux228: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ bus7_mux229: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ };
+
+ bus7_mux230: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
};
};
@@ -644,6 +727,54 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x70>;
+
+ bus6_mux215: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ bus6_mux216: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ bus6_mux217: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ bus6_mux218: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+
+ bus6_mux219: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ };
+
+ bus6_mux220: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ bus6_mux221: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ };
+
+ bus6_mux222: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
};
};
@@ -684,6 +815,30 @@
i2c-mux-idle-disconnect;
interrupt-controller;
#interrupt-cells = <2>;
+
+ bus9_mux231: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ bus9_mux232: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ bus9_mux233: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ bus9_mux234: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
};
pca9545@71 {
@@ -695,6 +850,30 @@
i2c-mux-idle-disconnect;
interrupt-controller;
#interrupt-cells = <2>;
+
+ bus9_mux235: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ bus9_mux236: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ bus9_mux237: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ bus9_mux238: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
};
};
@@ -725,6 +904,30 @@
i2c-mux-idle-disconnect;
interrupt-controller;
#interrupt-cells = <2>;
+
+ bus10_mux239: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ bus10_mux240: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ bus10_mux241: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ bus10_mux242: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
};
pca9545@71 {
@@ -736,6 +939,30 @@
i2c-mux-idle-disconnect;
interrupt-controller;
#interrupt-cells = <2>;
+
+ bus10_mux243: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ bus10_mux244: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ bus10_mux245: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ bus10_mux246: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
};
};
@@ -796,7 +1023,7 @@
interrupt-controller;
#interrupt-cells = <2>;
- i2c@0 {
+ bus12_mux247: i2c@0 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
@@ -807,7 +1034,7 @@
};
};
- i2c@1 {
+ bus12_mux248: i2c@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
@@ -818,7 +1045,7 @@
};
};
- i2c@2 {
+ bus12_mux249: i2c@2 {
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
@@ -829,7 +1056,7 @@
};
};
- i2c@3 {
+ bus12_mux250: i2c@3 {
#address-cells = <1>;
#size-cells = <0>;
reg = <3>;
@@ -857,6 +1084,53 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x70>;
+ bus13_mux251: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ bus13_mux252: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ bus13_mux253: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ bus13_mux254: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+
+ bus13_mux255: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ };
+
+ bus13_mux256: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ bus13_mux257: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ };
+
+ bus13_mux258: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
};
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-nicole.dts b/arch/arm/boot/dts/aspeed-bmc-opp-nicole.dts
new file mode 100644
index 000000000000..91dced7e7849
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-nicole.dts
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2019 YADRO
+/dts-v1/;
+#include "aspeed-g5.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+
+/ {
+ model = "Nicole BMC";
+ compatible = "yadro,nicole-bmc", "aspeed,ast2500";
+
+ chosen {
+ stdout-path = &uart5;
+ bootargs = "console=ttyS4,115200 earlyprintk";
+ };
+
+ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ vga_memory: framebuffer@9f000000 {
+ no-map;
+ reg = <0x9f000000 0x01000000>; /* 16M */
+ };
+
+ flash_memory: region@98000000 {
+ no-map;
+ reg = <0x98000000 0x04000000>; /* 64M */
+ };
+
+ coldfire_memory: codefire_memory@9ef00000 {
+ reg = <0x9ef00000 0x00100000>;
+ no-map;
+ };
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+
+ video_engine_memory: jpegbuffer {
+ size = <0x02000000>; /* 32M */
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ power {
+ label = "platform:green:power";
+ gpios = <&gpio ASPEED_GPIO(AA, 4) GPIO_ACTIVE_HIGH>;
+ };
+
+ identify {
+ label = "platform:blue:indicator";
+ gpios = <&gpio ASPEED_GPIO(AA, 7) GPIO_ACTIVE_HIGH>;
+ };
+
+ fault {
+ label = "platform:red:fault";
+ gpios = <&gpio ASPEED_GPIO(AA, 3) GPIO_ACTIVE_HIGH>;
+ };
+
+ attention {
+ label = "platform:yellow:alarm";
+ gpios = <&gpio ASPEED_GPIO(AA, 1) GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ fsi: gpio-fsi {
+ compatible = "aspeed,ast2500-cf-fsi-master", "fsi-master";
+ #address-cells = <2>;
+ #size-cells = <0>;
+ no-gpio-delays;
+
+ memory-region = <&coldfire_memory>;
+ aspeed,sram = <&sram>;
+ aspeed,cvic = <&cvic>;
+
+ clock-gpios = <&gpio ASPEED_GPIO(AA, 0) GPIO_ACTIVE_HIGH>;
+ data-gpios = <&gpio ASPEED_GPIO(AA, 2) GPIO_ACTIVE_HIGH>;
+ mux-gpios = <&gpio ASPEED_GPIO(A, 6) GPIO_ACTIVE_HIGH>;
+ enable-gpios = <&gpio ASPEED_GPIO(D, 0) GPIO_ACTIVE_HIGH>;
+ trans-gpios = <&gpio ASPEED_GPIO(P, 1) GPIO_ACTIVE_HIGH>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ checkstop {
+ label = "checkstop";
+ gpios = <&gpio ASPEED_GPIO(J, 2) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(J, 2)>;
+ };
+ };
+
+ iio-hwmon-battery {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 12>;
+ };
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ spi-max-frequency = <50000000>;
+#include "openbmc-flash-layout.dtsi"
+ };
+};
+
+&spi1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi1_default>;
+
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "pnor";
+ spi-max-frequency = <100000000>;
+ };
+};
+
+&lpc_ctrl {
+ status = "okay";
+ memory-region = <&flash_memory>;
+ flash = <&spi1>;
+};
+
+&uart1 {
+ /* Rear RS-232 connector */
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_txd1_default
+ &pinctrl_rxd1_default
+ &pinctrl_nrts1_default
+ &pinctrl_ndtr1_default
+ &pinctrl_ndsr1_default
+ &pinctrl_ncts1_default
+ &pinctrl_ndcd1_default
+ &pinctrl_nri1_default>;
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&mac0 {
+ status = "okay";
+
+ use-ncsi;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rmii1_default>;
+ clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>,
+ <&syscon ASPEED_CLK_MAC1RCLK>;
+ clock-names = "MACCLK", "RCLK";
+};
+
+&i2c0 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c256";
+ reg = <0x50>;
+ pagesize = <64>;
+ };
+};
+
+&i2c2 {
+ status = "okay";
+ /* CPU0 characterization connector */
+};
+
+&i2c3 {
+ status = "okay";
+ /* CLK GEN SI5338 */
+};
+
+&i2c4 {
+ status = "okay";
+ /* Voltage regulators for CPU0 */
+};
+
+&i2c5 {
+ status = "okay";
+ /* Voltage regulators for CPU1 */
+};
+
+&i2c6 {
+ status = "okay";
+
+ rtc@32 {
+ compatible = "epson,rx8900";
+ reg = <0x32>;
+ };
+};
+
+&i2c7 {
+ status = "okay";
+ /* CPLD */
+};
+
+&gpio {
+ gpio-line-names =
+ /*A0-A7*/ "","cfam-reset","","","","","fsi-mux","",
+ /*B0-B7*/ "","","","","","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "fsi-enable","bmc_power_up","sys_pwrok_buf",
+ "func_mode0","func_mode1","func_mode2","","",
+ /*E0-E7*/ "","ncsi_cfg","","","","","","",
+ /*F0-F7*/ "","","","","","","","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","","","","","",
+ /*I0-I7*/ "","","","","","","","",
+ /*J0-J7*/ "","","checkstop","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","power-button","","","","","",
+ /*P0-P7*/ "","fsi-trans","pm_rtc_adc_en","","","","","",
+ /*Q0-Q7*/ "","","","","","","","id-button",
+ /*R0-R7*/ "","software_pwrgood","","","","","","",
+ /*S0-S7*/ "","","","","","","","seq_cont",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","",
+ /*AA0-AA7*/ "fsi-clock","led-attention","fsi-data","led-fault",
+ "led-power","","","led-identify",
+ /*AB0-AB7*/ "","","","","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
+
+ func_mode0 {
+ gpio-hog;
+ gpios = <ASPEED_GPIO(D, 3) GPIO_ACTIVE_HIGH>;
+ output-low;
+ };
+ func_mode1 {
+ gpio-hog;
+ gpios = <ASPEED_GPIO(D, 4) GPIO_ACTIVE_HIGH>;
+ output-low;
+ };
+ func_mode2 {
+ gpio-hog;
+ gpios = <ASPEED_GPIO(D, 5) GPIO_ACTIVE_HIGH>;
+ output-low;
+ };
+ seq_cont {
+ gpio-hog;
+ gpios = <ASPEED_GPIO(S, 7) GPIO_ACTIVE_HIGH>;
+ output-low;
+ };
+ ncsi_cfg {
+ gpio-hog;
+ input;
+ gpios = <ASPEED_GPIO(E, 1) GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&vuart {
+ status = "okay";
+};
+
+&gfx {
+ status = "okay";
+ memory-region = <&gfx_memory>;
+};
+
+&pinctrl {
+ aspeed,external-nodes = <&gfx &lhc>;
+};
+
+&ibt {
+ status = "okay";
+};
+
+&vhub {
+ status = "okay";
+};
+
+&adc {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0_default
+ &pinctrl_adc1_default
+ &pinctrl_adc2_default
+ &pinctrl_adc3_default
+ &pinctrl_adc4_default
+ &pinctrl_adc5_default
+ &pinctrl_adc6_default
+ &pinctrl_adc7_default
+ &pinctrl_adc8_default
+ &pinctrl_adc9_default
+ &pinctrl_adc10_default
+ &pinctrl_adc11_default
+ &pinctrl_adc12_default
+ &pinctrl_adc13_default
+ &pinctrl_adc14_default
+ &pinctrl_adc15_default>;
+};
+
+&video {
+ status = "okay";
+ memory-region = <&video_engine_memory>;
+};
+
+#include "ibm-power9-dual.dtsi"
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts b/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
index edfa44fe1f75..fd2e014dae75 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
@@ -231,23 +231,52 @@
};
&gpio {
+ gpio-line-names =
+ /*A0-A7*/ "","cfam-reset","","","","","fsi-mux","",
+ /*B0-B7*/ "","","","","","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "fsi-enable","","","nic_func_mode0","nic_func_mode1","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "","","","","","","","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","","","","","",
+ /*I0-I7*/ "","","","power-button","","","","",
+ /*J0-J7*/ "","","checkstop","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","led-fault","",
+ "led-identify","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","","","id-button",
+ /*R0-R7*/ "","","fsi-trans","","","led-power","","",
+ /*S0-S7*/ "","","","","","","","seq_cont",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","",
+ /*AA0-AA7*/ "fsi-clock","","fsi-data","","","","","",
+ /*AB0-AB7*/ "","","","","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
+
nic_func_mode0 {
gpio-hog;
gpios = <ASPEED_GPIO(D, 3) GPIO_ACTIVE_HIGH>;
output-low;
- line-name = "nic_func_mode0";
};
nic_func_mode1 {
gpio-hog;
gpios = <ASPEED_GPIO(D, 4) GPIO_ACTIVE_HIGH>;
output-low;
- line-name = "nic_func_mode1";
};
seq_cont {
gpio-hog;
gpios = <ASPEED_GPIO(S, 7) GPIO_ACTIVE_HIGH>;
output-low;
- line-name = "seq_cont";
};
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
index ff49ec76fa7c..13c4aa02f4de 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-tacoma.dts
@@ -34,6 +34,59 @@
gpio-keys {
compatible = "gpio-keys";
+ checkstop {
+ label = "checkstop";
+ gpios = <&gpio0 ASPEED_GPIO(E, 3) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(E, 3)>;
+ };
+
+ ps0-presence {
+ label = "ps0-presence";
+ gpios = <&gpio0 ASPEED_GPIO(H, 3) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(H, 3)>;
+ };
+
+ ps1-presence {
+ label = "ps1-presence";
+ gpios = <&gpio0 ASPEED_GPIO(E, 5) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(E, 5)>;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <1000>;
+
+ fan0-presence {
+ label = "fan0-presence";
+ gpios = <&pca0 4 GPIO_ACTIVE_LOW>;
+ linux,code = <4>;
+ };
+
+ fan1-presence {
+ label = "fan1-presence";
+ gpios = <&pca0 5 GPIO_ACTIVE_LOW>;
+ linux,code = <5>;
+ };
+
+ fan2-presence {
+ label = "fan2-presence";
+ gpios = <&pca0 6 GPIO_ACTIVE_LOW>;
+ linux,code = <6>;
+ };
+
+ fan3-presence {
+ label = "fan3-presence";
+ gpios = <&pca0 7 GPIO_ACTIVE_LOW>;
+ linux,code = <7>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
air-water {
label = "air-water";
gpios = <&gpio0 ASPEED_GPIO(Q, 7) GPIO_ACTIVE_LOW>;
@@ -89,6 +142,49 @@
linux,code = <7>;
};
};
+
+ iio-hwmon-dps310 {
+ compatible = "iio-hwmon";
+ io-channels = <&dps 0>;
+ };
+
+ iio-hwmon-bmp280 {
+ compatible = "iio-hwmon";
+ io-channels = <&bmp 1>;
+ };
+};
+
+&gpio0 {
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "fsi-mux","","","","","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "power-button","","","checkstop","","presence-ps1","","led-rear-fault",
+ /*F0-F7*/ "","","","","","","","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","presence-ps0","","","","",
+ /*I0-I7*/ "","","","","","","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "led-rear-power","led-rear-id","","","","","","",
+ /*P0-P7*/ "","","","","","","","",
+ /*Q0-Q7*/ "cfam-reset","","","","","","","fsi-routing",
+ /*R0-R7*/ "","","","","","","","",
+ /*S0-S7*/ "","","","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","",
+ /*AA0-AA7*/ "","","","","","","","",
+ /*AB0-AB7*/ "","","","","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
};
&fmc {
@@ -132,6 +228,10 @@
use-ncsi;
};
+&emmc_controller {
+ status = "okay";
+};
+
&emmc {
status = "okay";
};
@@ -142,6 +242,9 @@
#address-cells = <2>;
#size-cells = <0>;
+ fsi-routing-gpios = <&gpio0 ASPEED_GPIO(Q, 7) GPIO_ACTIVE_HIGH>;
+ fsi-mux-gpios = <&gpio0 ASPEED_GPIO(B, 0) GPIO_ACTIVE_HIGH>;
+
cfam@0,0 {
reg = <0 0>;
#address-cells = <1>;
@@ -394,6 +497,11 @@
&i2c1 {
status = "okay";
+
+ tpm: tpm@2e {
+ compatible = "tcg,tpm-tis-i2c";
+ reg = <0x2e>;
+ };
};
&i2c2 {
@@ -774,6 +882,10 @@
status = "okay";
};
+&vuart2 {
+ status = "okay";
+};
+
&lpc_ctrl {
status = "okay";
memory-region = <&flash_memory>;
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts b/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
index 421aa600148b..a0f99e34ac8e 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
@@ -191,6 +191,40 @@
};
+&gpio {
+ gpio-line-names =
+ /*A0-A7*/ "","cfam-reset","","","","","fsi-mux","",
+ /*B0-B7*/ "","","","","","air-water","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "fsi-enable","","","","","","","",
+ /*E0-E7*/ "fsi-data","","","","","","","",
+ /*F0-F7*/ "","","","","","","","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","","","","","",
+ /*I0-I7*/ "","","","","","","","",
+ /*J0-J7*/ "","","checkstop","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "presence-ps1","","led-rear-fault","led-rear-power",
+ "led-rear-id","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","","","","presence-ps0",
+ /*Q0-Q7*/ "","","","","","","","",
+ /*R0-R7*/ "","","fsi-trans","","","power-button","","",
+ /*S0-S7*/ "","","","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","",
+ /*AA0-AA7*/ "fsi-clock","","","","","","","",
+ /*AB0-AB7*/ "","","","","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
+};
+
&fmc {
status = "okay";
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts b/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts
index bc60ec291681..4bcc82046362 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-zaius.dts
@@ -478,32 +478,61 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpioh_unbiased>;
+ gpio-line-names =
+ /*A0-A7*/ "","cfam-reset","","","","","","",
+ /*B0-B7*/ "","","","","","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "fsi-enable","","","","","led-sys-boot-status","led-attention",
+ "led-fault",
+ /*E0-E7*/ "","","","","","","","presence-pcie-e2b",
+ /*F0-F7*/ "","","","","","","","checkstop",
+ /*G0-G7*/ "fsi-clock","fsi-data","","","","","","",
+ /*H0-H7*/ "onewire0","onewire1","onewire2","onewire3","","","","",
+ /*I0-I7*/ "","","","power-button","","","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","","iso_u164_en","","fsi-trans","",
+ /*P0-P7*/ "ncsi_mux_en_n","bmc_i2c2_sw_rst_n","","bmc_i2c5_sw_rst_n","",
+ "","fsi-mux","",
+ /*Q0-Q7*/ "","","","","","","","",
+ /*R0-R7*/ "","","","","","","","",
+ /*S0-S7*/ "","","","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","",
+ /*AA0-AA7*/ "","","led-hdd-fault","","","","","",
+ /*AB0-AB7*/ "","","","","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
+
line_iso_u146_en {
gpio-hog;
gpios = <ASPEED_GPIO(O, 4) GPIO_ACTIVE_HIGH>;
output-high;
- line-name = "iso_u164_en";
};
ncsi_mux_en_n {
gpio-hog;
gpios = <ASPEED_GPIO(P, 0) GPIO_ACTIVE_HIGH>;
output-low;
- line-name = "ncsi_mux_en_n";
};
line_bmc_i2c2_sw_rst_n {
gpio-hog;
gpios = <ASPEED_GPIO(P, 1) GPIO_ACTIVE_HIGH>;
output-high;
- line-name = "bmc_i2c2_sw_rst_n";
};
line_bmc_i2c5_sw_rst_n {
gpio-hog;
gpios = <ASPEED_GPIO(P, 3) GPIO_ACTIVE_HIGH>;
output-high;
- line-name = "bmc_i2c5_sw_rst_n";
};
};
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index 8e04303e8514..82f0213e3a3c 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -219,6 +219,16 @@
reg = <0x1e720000 0x8000>; // 32K
};
+ video: video@1e700000 {
+ compatible = "aspeed,ast2400-video-engine";
+ reg = <0x1e700000 0x1000>;
+ clocks = <&syscon ASPEED_CLK_GATE_VCLK>,
+ <&syscon ASPEED_CLK_GATE_ECLK>;
+ clock-names = "vclk", "eclk";
+ interrupts = <7>;
+ status = "disabled";
+ };
+
sdmmc: sd-controller@1e740000 {
compatible = "aspeed,ast2400-sd-controller";
reg = <0x1e740000 0x100>;
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index f12ec04d3cbc..de7fd80b022a 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -224,6 +224,14 @@
#clock-cells = <1>;
#reset-cells = <1>;
+ scu_ic: interrupt-controller@18 {
+ #interrupt-cells = <1>;
+ compatible = "aspeed,ast2500-scu-ic";
+ reg = <0x18 0x4>;
+ interrupts = <21>;
+ interrupt-controller;
+ };
+
p2a: p2a-control@2c {
compatible = "aspeed,ast2500-p2a-ctrl";
reg = <0x2c 0x4>;
@@ -254,6 +262,17 @@
interrupts = <0x19>;
};
+ xdma: xdma@1e6e7000 {
+ compatible = "aspeed,ast2500-xdma";
+ reg = <0x1e6e7000 0x100>;
+ clocks = <&syscon ASPEED_CLK_GATE_BCLK>;
+ resets = <&syscon ASPEED_RESET_XDMA>;
+ interrupts-extended = <&vic 6>, <&scu_ic 2>;
+ pcie-device = "bmc";
+ aspeed,scu = <&syscon>;
+ status = "disabled";
+ };
+
adc: adc@1e6e9000 {
compatible = "aspeed,ast2500-adc";
reg = <0x1e6e9000 0xb0>;
@@ -426,22 +445,22 @@
#size-cells = <1>;
ranges = <0x0 0x0 0x80>;
- kcs1: kcs1@0 {
- compatible = "aspeed,ast2500-kcs-bmc";
+ kcs1: kcs@24 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
interrupts = <8>;
- kcs_chan = <1>;
status = "disabled";
};
- kcs2: kcs2@0 {
- compatible = "aspeed,ast2500-kcs-bmc";
+ kcs2: kcs@28 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>;
interrupts = <8>;
- kcs_chan = <2>;
status = "disabled";
};
- kcs3: kcs3@0 {
- compatible = "aspeed,ast2500-kcs-bmc";
+ kcs3: kcs@2c {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>;
interrupts = <8>;
- kcs_chan = <3>;
status = "disabled";
};
};
@@ -455,10 +474,10 @@
#size-cells = <1>;
ranges = <0x0 0x80 0x1e0>;
- kcs4: kcs4@0 {
- compatible = "aspeed,ast2500-kcs-bmc";
+ kcs4: kcs@94 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x94 0x1>, <0x98 0x1>, <0x9c 0x1>;
interrupts = <8>;
- kcs_chan = <4>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
index 0a29b3b57a9d..9d8d8e18bc90 100644
--- a/arch/arm/boot/dts/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6.dtsi
@@ -65,6 +65,7 @@
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
clocks = <&syscon ASPEED_CLK_HPLL>;
arm,cpu-registers-not-fw-configured;
+ always-on;
};
ahb {
@@ -313,6 +314,22 @@
compatible = "aspeed,ast2600-smpmem";
reg = <0x180 0x40>;
};
+
+ scu_ic0: interrupt-controller@560 {
+ #interrupt-cells = <1>;
+ compatible = "aspeed,ast2600-scu-ic0";
+ reg = <0x560 0x4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ };
+
+ scu_ic1: interrupt-controller@570 {
+ #interrupt-cells = <1>;
+ compatible = "aspeed,ast2600-scu-ic1";
+ reg = <0x570 0x4>;
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ };
};
rng: hwrng@1e6e2524 {
@@ -322,6 +339,29 @@
quality = <100>;
};
+ xdma: xdma@1e6e7000 {
+ compatible = "aspeed,ast2600-xdma";
+ reg = <0x1e6e7000 0x100>;
+ clocks = <&syscon ASPEED_CLK_GATE_BCLK>;
+ resets = <&syscon ASPEED_RESET_DEV_XDMA>, <&syscon ASPEED_RESET_RC_XDMA>;
+ reset-names = "device", "root-complex";
+ interrupts-extended = <&gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <&scu_ic0 2>;
+ pcie-device = "bmc";
+ aspeed,scu = <&syscon>;
+ status = "disabled";
+ };
+
+ video: video@1e700000 {
+ compatible = "aspeed,ast2600-video-engine";
+ reg = <0x1e700000 0x1000>;
+ clocks = <&syscon ASPEED_CLK_GATE_VCLK>,
+ <&syscon ASPEED_CLK_GATE_ECLK>;
+ clock-names = "vclk", "eclk";
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
gpio0: gpio@1e780000 {
#gpio-cells = <2>;
gpio-controller;
@@ -368,6 +408,7 @@
<&gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&syscon ASPEED_CLK_APB1>;
clock-names = "PCLK";
+ status = "disabled";
};
uart1: serial@1e783000 {
@@ -433,22 +474,23 @@
#size-cells = <1>;
ranges = <0x0 0x0 0x80>;
- kcs1: kcs1@0 {
- compatible = "aspeed,ast2600-kcs-bmc";
+ kcs1: kcs@24 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
kcs_chan = <1>;
status = "disabled";
};
- kcs2: kcs2@0 {
- compatible = "aspeed,ast2600-kcs-bmc";
+ kcs2: kcs@28 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>;
interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
- kcs_chan = <2>;
status = "disabled";
};
- kcs3: kcs3@0 {
- compatible = "aspeed,ast2600-kcs-bmc";
+ kcs3: kcs@2c {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>;
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
- kcs_chan = <3>;
status = "disabled";
};
};
@@ -462,10 +504,10 @@
#size-cells = <1>;
ranges = <0x0 0x80 0x1e0>;
- kcs4: kcs4@0 {
- compatible = "aspeed,ast2600-kcs-bmc";
+ kcs4: kcs@94 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x94 0x1>, <0x98 0x1>, <0x9c 0x1>;
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
- kcs_chan = <4>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi b/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi
index 21876da7c442..c1c8650dafce 100644
--- a/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi
+++ b/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi
@@ -62,7 +62,7 @@
wm8904: wm8904@1a {
compatible = "wlf,wm8904";
reg = <0x1a>;
- clocks = <&pck2>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 10>;
clock-names = "mclk";
};
};
diff --git a/arch/arm/boot/dts/at91-kizbox3-hs.dts b/arch/arm/boot/dts/at91-kizbox3-hs.dts
index 8734e7f8939e..0da1f0557eaf 100644
--- a/arch/arm/boot/dts/at91-kizbox3-hs.dts
+++ b/arch/arm/boot/dts/at91-kizbox3-hs.dts
@@ -283,7 +283,7 @@
&flx3 {
status = "okay";
- uart6: serial@200 {
+ uart8: serial@200 {
status = "okay";
};
};
@@ -291,7 +291,7 @@
&flx4 {
status = "okay";
- i2c2: i2c@600 {
+ i2c6: i2c@600 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/at91-kizbox3_common.dtsi b/arch/arm/boot/dts/at91-kizbox3_common.dtsi
index 299e74d23184..7c3076e245ef 100644
--- a/arch/arm/boot/dts/at91-kizbox3_common.dtsi
+++ b/arch/arm/boot/dts/at91-kizbox3_common.dtsi
@@ -28,7 +28,7 @@
serial3 = &uart3;
serial4 = &uart4;
serial5 = &uart5;
- serial6 = &uart6;
+ serial6 = &uart8;
};
chosen {
@@ -207,7 +207,7 @@
};
};
- pinctrl_flx4_default: flx4_i2c2_default {
+ pinctrl_flx4_default: flx4_i2c6_default {
pinmux = <PIN_PD12__FLEXCOM4_IO0>, //DATA
<PIN_PD13__FLEXCOM4_IO1>; //CLK
bias-disable;
@@ -299,21 +299,8 @@
status = "disabled";
uart5: serial@200 {
- compatible = "atmel,at91sam9260-usart";
- reg = <0x200 0x400>;
- interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
- dmas = <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
- | AT91_XDMAC_DT_PERID(11))>,
- <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
- | AT91_XDMAC_DT_PERID(12))>;
- dma-names = "tx", "rx";
- clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
- clock-names = "usart";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx0_default>;
- atmel,fifo-size = <32>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -324,22 +311,9 @@
atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
status = "disabled";
- uart6: serial@200 {
- compatible = "atmel,at91sam9260-usart";
- reg = <0x200 0x400>;
- interrupts = <22 IRQ_TYPE_LEVEL_HIGH 7>;
- dmas = <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
- | AT91_XDMAC_DT_PERID(17))>,
- <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
- | AT91_XDMAC_DT_PERID(18))>;
- dma-names = "tx", "rx";
- clocks = <&pmc PMC_TYPE_PERIPHERAL 22>;
- clock-names = "usart";
+ uart8: serial@200 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx3_default>;
- atmel,fifo-size = <32>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -350,23 +324,9 @@
atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
status = "disabled";
- i2c2: i2c@600 {
- compatible = "atmel,sama5d2-i2c";
- reg = <0x600 0x200>;
- interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
- dmas = <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
- | AT91_XDMAC_DT_PERID(19))>,
- <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
- | AT91_XDMAC_DT_PERID(20))>;
- dma-names = "tx", "rx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 23>;
+ i2c6: i2c@600 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx4_default>;
- atmel,fifo-size = <16>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
index b484745bf2d4..a5f5718c711a 100644
--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
+++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
@@ -542,6 +542,18 @@
};
};
+ sdmmc1 {
+ pinctrl_sdmmc1_default: sdmmc1 {
+ atmel,pins =
+ <AT91_PIOA 13 AT91_PERIPH_B (AT91_PINCTRL_DRIVE_STRENGTH_HI) /* PA13 CK periph B */
+ AT91_PIOA 12 AT91_PERIPH_B (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI) /* PA12 CMD periph B with pullup */
+ AT91_PIOA 11 AT91_PERIPH_B (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI) /* PA11 DAT0 periph B with pullup */
+ AT91_PIOA 2 AT91_PERIPH_B (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI) /* PA2 DAT1 periph B with pullup */
+ AT91_PIOA 3 AT91_PERIPH_B (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI) /* PA3 DAT2 periph B with pullup */
+ AT91_PIOA 4 AT91_PERIPH_B (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI)>; /* PA4 DAT3 periph B with pullup */
+ };
+ };
+
gpio_keys {
pinctrl_key_gpio_default: pinctrl_key_gpio {
atmel,pins = <AT91_PIOD 18 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
@@ -568,6 +580,15 @@
disable-wp;
};
+&sdmmc1 {
+ bus-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdmmc1_default>;
+ no-1-8-v;
+ non-removable;
+ status = "disabled"; /* Conflict with flx4. */
+};
+
&qspi {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_qspi>;
@@ -579,6 +600,8 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <80000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
m25p,fast-read;
at91bootstrap@0 {
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
index 6281590150c8..b1f994c0ae79 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
@@ -13,6 +13,10 @@
model = "Atmel SAMA5D27 SoM1";
compatible = "atmel,sama5d27-som1", "atmel,sama5d27", "atmel,sama5d2", "atmel,sama5";
+ aliases {
+ i2c0 = &i2c0;
+ };
+
clocks {
slow_xtal {
clock-frequency = <32768>;
@@ -34,12 +38,44 @@
pinctrl-0 = <&pinctrl_qspi1_default>;
flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <80000000>;
spi-tx-bus-width = <4>;
spi-rx-bus-width = <4>;
m25p,fast-read;
+
+ at91bootstrap@00000000 {
+ label = "at91bootstrap";
+ reg = <0x00000000 0x00040000>;
+ };
+
+ bootloader@00040000 {
+ label = "bootloader";
+ reg = <0x00040000 0x000c0000>;
+ };
+
+ bootloaderenvred@00100000 {
+ label = "bootloader env redundant";
+ reg = <0x00100000 0x00040000>;
+ };
+
+ bootloaderenv@00140000 {
+ label = "bootloader env";
+ reg = <0x00140000 0x00040000>;
+ };
+
+ dtb@00180000 {
+ label = "device tree";
+ reg = <0x00180000 0x00080000>;
+ };
+
+ kernel@00200000 {
+ label = "kernel";
+ reg = <0x00200000 0x00600000>;
+ };
};
};
@@ -57,7 +93,25 @@
};
};
+ i2c0: i2c@f8028000 {
+ dmas = <0>, <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c0_default>;
+ status = "okay";
+
+ at24@50 {
+ compatible = "24c02";
+ reg = <0x50>;
+ pagesize = <8>;
+ };
+ };
+
pinctrl@fc038000 {
+ pinctrl_i2c0_default: i2c0_default {
+ pinmux = <PIN_PD21__TWD0>,
+ <PIN_PD22__TWCK0>;
+ bias-disable;
+ };
pinctrl_qspi1_default: qspi1_default {
sck_cs {
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
index b0853bf7901c..0e159f879c15 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
@@ -21,7 +21,7 @@
serial1 = &uart4; /* mikro BUS 1 */
serial2 = &uart2; /* mikro BUS 2 */
i2c1 = &i2c1;
- i2c2 = &i2c2;
+ i2c2 = &i2c3;
};
chosen {
@@ -125,21 +125,13 @@
atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
status = "okay";
- i2c2: i2c@600 {
- compatible = "atmel,sama5d2-i2c";
- reg = <0x600 0x200>;
- interrupts = <20 IRQ_TYPE_LEVEL_HIGH 7>;
+ i2c3: i2c@600 {
dmas = <0>, <0>;
- dma-names = "tx", "rx";
i2c-analog-filter;
i2c-digital-filter;
i2c-digital-filter-width-ns = <35>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 20>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mikrobus_i2c>;
- atmel,fifo-size = <16>;
status = "okay";
};
};
@@ -178,27 +170,17 @@
atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_SPI>;
status = "disabled";
- uart7: serial@200 {
- compatible = "atmel,at91sam9260-usart";
- reg = <0x200 0x200>;
- interrupts = <22 IRQ_TYPE_LEVEL_HIGH 7>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 22>;
- clock-names = "usart";
+ uart8: serial@200 {
+ dmas = <0>, <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx3_default>;
- atmel,fifo-size = <32>;
status = "disabled"; /* Conflict with isc. */
};
- spi2: spi@400 {
- compatible = "atmel,at91rm9200-spi";
- reg = <0x400 0x200>;
- interrupts = <22 IRQ_TYPE_LEVEL_HIGH 7>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 22>;
- clock-names = "spi_clk";
+ spi5: spi@400 {
+ dmas = <0>, <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx3_default>;
- atmel,fifo-size = <16>;
status = "disabled"; /* Conflict with isc. */
};
};
@@ -207,43 +189,25 @@
atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_SPI>;
status = "okay";
- uart6: serial@200 {
- compatible = "atmel,at91sam9260-usart";
- reg = <0x200 0x200>;
- interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 23>;
- clock-names = "usart";
+ uart9: serial@200 {
+ dmas = <0>, <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx4_default>;
- atmel,fifo-size = <32>;
- status = "disabled"; /* Conflict with spi3 and i2c3. */
+ status = "disabled"; /* Conflict with spi6 and i2c6. */
};
- spi3: spi@400 {
- compatible = "atmel,at91rm9200-spi";
- reg = <0x400 0x200>;
- interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 23>;
- clock-names = "spi_clk";
+ spi6: spi@400 {
+ dmas = <0>, <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mikrobus_spi &pinctrl_mikrobus1_spi_cs &pinctrl_mikrobus2_spi_cs>;
- atmel,fifo-size = <16>;
- status = "okay"; /* Conflict with uart6 and i2c3. */
+ status = "okay"; /* Conflict with uart5 and i2c6. */
};
- i2c3: i2c@600 {
- compatible = "atmel,sama5d2-i2c";
- reg = <0x600 0x200>;
- interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
+ i2c6: i2c@600 {
dmas = <0>, <0>;
- dma-names = "tx", "rx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 23>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx4_default>;
- atmel,fifo-size = <16>;
- status = "disabled"; /* Conflict with uart6 and spi3. */
+ status = "disabled"; /* Conflict with uart5 and spi6. */
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
index db3e2239eee8..a06700e53e4c 100644
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
@@ -17,6 +17,10 @@
model = "Microchip SAMA5D27 WLSOM1";
compatible = "microchip,sama5d27-wlsom1", "atmel,sama5d27", "atmel,sama5d2", "atmel,sama5";
+ aliases {
+ i2c0 = &i2c0;
+ };
+
clocks {
slow_xtal {
clock-frequency = <32768>;
@@ -32,18 +36,6 @@
atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
uart6: serial@200 {
- compatible = "atmel,at91sam9260-usart";
- reg = <0x200 0x200>;
- interrupts = <20 IRQ_TYPE_LEVEL_HIGH 7>;
- dmas = <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
- AT91_XDMAC_DT_PERID(13))>,
- <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
- AT91_XDMAC_DT_PERID(14))>;
- dma-names = "tx", "rx";
- clocks = <&pmc PMC_TYPE_PERIPHERAL 20>;
- clock-names = "usart";
pinctrl-0 = <&pinctrl_flx1_default>;
pinctrl-names = "default";
};
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
index 6b8461278950..6b38fa3f5568 100644
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
@@ -77,18 +77,6 @@
status = "okay";
uart5: serial@200 {
- compatible = "atmel,at91sam9260-usart";
- reg = <0x200 0x200>;
- interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
- dmas = <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
- AT91_XDMAC_DT_PERID(11))>,
- <&dma0
- (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
- AT91_XDMAC_DT_PERID(12))>;
- dma-names = "tx", "rx";
- clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
- clock-names = "usart";
pinctrl-0 = <&pinctrl_flx0_default>;
pinctrl-names = "default";
atmel,use-dma-rx;
diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
new file mode 100644
index 000000000000..8d19925fc09e
--- /dev/null
+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
@@ -0,0 +1,767 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * at91-sama5d2_icp.dts - Device Tree file for SAMA5D2-ICP board
+ *
+ * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Radu Pirea & Razvan Stefanescu,
+ * Codrin Ciubotariu <codrin.ciubotariu@microchip.com>,
+ * Cristian Birsan <cristian.birsan@microchip.com>
+ */
+/dts-v1/;
+#include "sama5d2.dtsi"
+#include "sama5d2-pinfunc.h"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/mfd/atmel-flexcom.h>
+
+/ {
+ model = "Microchip SAMA5D2-ICP";
+ compatible = "microchip,sama5d2-icp", "atmel,sama5d27", "atmel,sama5d2", "atmel,sama5";
+
+ aliases {
+ serial0 = &uart0; /* debug uart0 + mikro BUS 1 */
+ serial1 = &uart1; /* mikro BUS 3 */
+ serial3 = &uart3; /* mikro BUS 2 */
+ serial5 = &uart7; /* flx2 */
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clocks {
+ slow_xtal {
+ clock-frequency = <32768>;
+ };
+
+ main_xtal {
+ clock-frequency = <12000000>;
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_key_gpio_default>;
+ status = "okay";
+
+ sw4 {
+ label = "USER_PB1";
+ gpios = <&pioA PIN_PD0 GPIO_ACTIVE_LOW>;
+ linux,code = <0x104>;
+ wakeup-source;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led_gpio_default>;
+ status = "okay"; /* conflict with pwm0 */
+
+ red {
+ label = "red";
+ gpios = <&pioA PIN_PB0 GPIO_ACTIVE_HIGH>;
+ };
+
+ green {
+ label = "green";
+ gpios = <&pioA PIN_PB1 GPIO_ACTIVE_HIGH>;
+ };
+
+ blue {
+ label = "blue";
+ gpios = <&pioA PIN_PA31 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
+
+&adc {
+ vddana-supply = <&vdd_io_reg>;
+ vref-supply = <&vdd_io_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc_default &pinctrl_adtrg_default>;
+ status = "okay";
+};
+
+&can0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can0_default>;
+ status = "okay";
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1_default>;
+ status = "okay";
+};
+
+&flx0 { /* mikrobus2 spi */
+ atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_SPI>;
+ status = "okay";
+
+ spi2: spi@400 {
+ dmas = <0>, <0>;
+ cs-gpios = <&pioA PIN_PC0 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mikrobus2_spi &pinctrl_ksz_spi_cs>;
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch0: ksz8563@0 {
+ compatible = "microchip,ksz8563";
+ reg = <0>;
+ phy-mode = "mii";
+ reset-gpios = <&pioA PIN_PD4 GPIO_ACTIVE_LOW>;
+
+ spi-max-frequency = <500000>;
+ spi-cpha;
+ spi-cpol;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "cpu";
+ ethernet = <&macb0>;
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
+};
+
+&flx2 {
+ atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
+ status = "okay";
+
+ uart7: serial@200 {
+ pinctrl-0 = <&pinctrl_flx2_default>;
+ pinctrl-names = "default";
+ atmel,use-dma-rx;
+ atmel,use-dma-tx;
+ status = "okay"; /* Conflict w/ qspi1. */
+ };
+};
+
+&flx3 { /* mikrobus1 spi */
+ atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_SPI>;
+ status = "okay";
+
+ spi5: spi@400 {
+ dmas = <0>, <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mikrobus1_spi &pinctrl_mikrobus1_spi_cs>;
+ status = "okay";
+ };
+};
+
+&flx4 {
+ atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
+ status = "okay";
+
+ i2c6: i2c@600 {
+ dmas = <0>, <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flx4_default>;
+ status = "okay";
+
+ mcp16502@5b {
+ compatible = "microchip,mcp16502";
+ reg = <0x5b>;
+ status = "okay";
+ lpm-gpios = <&pioBU 7 GPIO_ACTIVE_LOW>;
+
+ regulators {
+ vdd_io_reg: VDD_IO {
+ regulator-name = "VDD_IO";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3700000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-mode = <4>;
+ };
+ };
+
+ VDD_DDR {
+ regulator-name = "VDD_DDR";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-mode = <4>;
+ };
+ };
+
+ VDD_CORE {
+ regulator-name = "VDD_CORE";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-mode = <4>;
+ };
+ };
+
+ VDD_OTHER {
+ regulator-name = "VDD_OTHER";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-mode = <4>;
+ };
+ };
+
+ LDO1 {
+ regulator-name = "LDO1";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3700000>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ LDO2 {
+ regulator-name = "LDO2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3700000>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ };
+ };
+ };
+};
+
+&i2c0 { /* mikrobus i2c */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mikrobus_i2c>;
+ status = "okay";
+};
+
+&i2c1 {
+ dmas = <0>, <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_default>;
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ pagesize = <16>;
+ status = "okay";
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c32";
+ reg = <0x52>;
+ pagesize = <16>;
+ status = "disabled";
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c32";
+ reg = <0x53>;
+ pagesize = <16>;
+ status = "disabled";
+ };
+};
+
+&macb0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_macb0_default &pinctrl_macb0_phy_irq &pinctrl_macb0_rst>;
+ phy-mode = "mii";
+ status = "okay";
+
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+};
+
+&pioA {
+ pinctrl_adc_default: adc_default {
+ pinmux = <PIN_PD24__GPIO>,
+ <PIN_PD25__GPIO>,
+ <PIN_PD26__GPIO>;
+ bias-disable;
+ };
+
+ /*
+ * The ADTRG pin can work on any edge type.
+ * In here it's being pulled up, so need to
+ * connect it to ground to get an edge e.g.
+ * Trigger can be configured on falling, rise
+ * or any edge, and the pull-up can be changed
+ * to pull-down or left floating according to
+ * needs.
+ */
+ pinctrl_adtrg_default: adtrg_default {
+ pinmux = <PIN_PD31__ADTRG>;
+ bias-pull-up;
+ };
+
+ pinctrl_flx4_default: flx4_default {
+ pinmux = <PIN_PC28__FLEXCOM4_IO0>,
+ <PIN_PC29__FLEXCOM4_IO1>;
+ bias-disable;
+ };
+
+ pinctrl_can0_default: can0_default {
+ pinmux = <PIN_PC10__CANTX0>,
+ <PIN_PC11__CANRX0>;
+ bias-disable;
+ };
+
+ pinctrl_can1_default: can1_default {
+ pinmux = <PIN_PC26__CANTX1>,
+ <PIN_PC27__CANRX1>;
+ bias-disable;
+ };
+
+ pinctrl_i2c1_default: i2c1_default {
+ pinmux = <PIN_PD19__TWD1>,
+ <PIN_PD20__TWCK1>;
+ bias-disable;
+ };
+
+ pinctrl_key_gpio_default: key_gpio_default {
+ pinmux = <PIN_PD0__GPIO>;
+ bias-pull-up;
+ };
+
+ pinctrl_led_gpio_default: led_gpio_default {
+ pinmux = <PIN_PB0__GPIO>,
+ <PIN_PB1__GPIO>,
+ <PIN_PA31__GPIO>;
+ bias-pull-up;
+ };
+
+ pinctrl_qspi1_default: qspi1_default {
+ pinmux = <PIN_PA6__QSPI1_SCK>,
+ <PIN_PA7__QSPI1_IO0>,
+ <PIN_PA8__QSPI1_IO1>,
+ <PIN_PA9__QSPI1_IO2>,
+ <PIN_PA10__QSPI1_IO3>,
+ <PIN_PA11__QSPI1_CS>;
+ bias-disable;
+ };
+
+ pinctrl_sdmmc0_default: sdmmc0_default {
+ cmd_data {
+ pinmux = <PIN_PA1__SDMMC0_CMD>,
+ <PIN_PA2__SDMMC0_DAT0>,
+ <PIN_PA3__SDMMC0_DAT1>,
+ <PIN_PA4__SDMMC0_DAT2>,
+ <PIN_PA5__SDMMC0_DAT3>;
+ bias-disable;
+ };
+
+ ck_cd {
+ pinmux = <PIN_PA0__SDMMC0_CK>,
+ <PIN_PA13__SDMMC0_CD>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_sdmmc1_default: sdmmc1_default {
+ cmd_data {
+ pinmux = <PIN_PA18__SDMMC1_DAT0>,
+ <PIN_PA19__SDMMC1_DAT1>,
+ <PIN_PA20__SDMMC1_DAT2>,
+ <PIN_PA21__SDMMC1_DAT3>;
+ bias-disable;
+ };
+
+ ck_cd {
+ pinmux = <PIN_PA22__SDMMC1_CK>,
+ <PIN_PA28__SDMMC1_CMD>;
+ bias-disable;
+ };
+ };
+
+ pinctrl_mikrobus_i2c: mikrobus_i2c {
+ pinmux = <PIN_PD22__TWCK0>,
+ <PIN_PD21__TWD0>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus1_an: mikrobus1_an {
+ pinmux = <PIN_PD26__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus1_rst: mikrobus1_rst {
+ pinmux = <PIN_PC5__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus1_spi_cs: mikrobus1_spi_cs {
+ pinmux = <PIN_PC21__FLEXCOM3_IO3>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus1_spi: mikrobus1_spi {
+ pinmux = <PIN_PC20__FLEXCOM3_IO0>,
+ <PIN_PC19__FLEXCOM3_IO1>,
+ <PIN_PC18__FLEXCOM3_IO2>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus1_pwm: mikrobus1_pwm {
+ pinmux = <PIN_PC4__TIOB1>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus1_int: mikrobus1_int {
+ pinmux = <PIN_PC3__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus1_uart: mikrobus1_uart {
+ pinmux = <PIN_PB26__URXD0>,
+ <PIN_PB27__UTXD0>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus2_an: mikrobus2_an {
+ pinmux = <PIN_PD25__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus2_rst: mikrobus2_rst {
+ pinmux = <PIN_PB24__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus2_spi_cs: mikrobus2_spi_cs {
+ pinmux = <PIN_PB31__FLEXCOM0_IO3>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus2_spi: mikrobus2_spi {
+ pinmux = <PIN_PB28__FLEXCOM0_IO0>,
+ <PIN_PB29__FLEXCOM0_IO1>,
+ <PIN_PB30__FLEXCOM0_IO2>;
+ bias-disable;
+ };
+
+ pinctrl_ksz_spi_cs: ksz_spi_cs {
+ pinmux = <PIN_PC0__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus2_pwm: mikrobus2_pwm {
+ pinmux = <PIN_PB23__TIOB2>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus2_int: mikrobus2_int {
+ pinmux = <PIN_PB22__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus2_uart: mikrobus2_uart {
+ pinmux = <PIN_PC12__URXD3>,
+ <PIN_PC13__UTXD3>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus3_an: mikrobus3_an {
+ pinmux = <PIN_PD24__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus3_rst: mikrobus3_rst {
+ pinmux = <PIN_PB21__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus3_spi_cs: mikrobus3_spi_cs {
+ pinmux = <PIN_PA17__SPI0_NPCS0>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus3_spi: mikrobus3_spi {
+ pinmux = <PIN_PA14__SPI0_SPCK>,
+ <PIN_PA16__SPI0_MISO>,
+ <PIN_PA15__SPI0_MOSI>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus3_pwm: mikrobus3_pwm {
+ pinmux = <PIN_PB20__TIOB3>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus3_int: mikrobus3_int {
+ pinmux = <PIN_PB18__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_mikrobus3_uart: mikrobus3_uart {
+ pinmux = <PIN_PC7__URXD1>,
+ <PIN_PC8__UTXD1>;
+ bias-disable;
+ };
+
+ pinctrl_usb_default: usb_default {
+ pinmux = <PIN_PC17__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_usba_vbus: usba_vbus {
+ pinmux = <PIN_PD23__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_pwm0_pwm2_default: pwm0_pwm2_default {
+ pinmux = <PIN_PB5__PWMH2>,
+ <PIN_PB6__PWML2>;
+ bias-pull-up;
+ };
+
+ pinctrl_macb0_default: macb0_default {
+ pinmux = <PIN_PD1__GRXCK>,
+ <PIN_PD2__GTXER>,
+ <PIN_PD5__GRX2>,
+ <PIN_PD6__GRX3>,
+ <PIN_PD7__GTX2>,
+ <PIN_PD8__GTX3>,
+ <PIN_PD9__GTXCK>,
+ <PIN_PD10__GTXEN>,
+ <PIN_PD11__GRXDV>,
+ <PIN_PD12__GRXER>,
+ <PIN_PD13__GRX0>,
+ <PIN_PD14__GRX1>,
+ <PIN_PD15__GTX0>,
+ <PIN_PD16__GTX1>,
+ <PIN_PD17__GMDC>,
+ <PIN_PD18__GMDIO>;
+ bias-disable;
+ };
+
+ pinctrl_macb0_phy_irq: macb0_phy_irq {
+ pinmux = <PIN_PD3__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_macb0_rst: macb0_sw_rst {
+ pinmux = <PIN_PD4__GPIO>;
+ bias-disable;
+ };
+
+ pinctrl_flx2_default: flx2_default {
+ pinmux = <PIN_PA6__FLEXCOM2_IO0>,
+ <PIN_PA7__FLEXCOM2_IO1>,
+ <PIN_PA9__FLEXCOM2_IO3>,
+ <PIN_PA10__FLEXCOM2_IO4>;
+ bias-disable;
+ };
+};
+
+&pwm0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm0_pwm2_default>;
+ status = "disabled"; /* conflict with leds, HSIC */
+};
+
+&qspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_qspi1_default>;
+ status = "disabled"; /* Conflict with wilc_pwrseq, flx2 */
+
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <80000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ m25p,fast-read;
+
+ at91bootstrap@0 {
+ label = "qspi: at91bootstrap";
+ reg = <0x00000000 0x00040000>;
+ };
+
+ bootloader@40000 {
+ label = "qspi: bootloader";
+ reg = <0x00040000 0x000c0000>;
+ };
+
+ bootloaderenvred@100000 {
+ label = "qspi: bootloader env redundant";
+ reg = <0x00100000 0x00040000>;
+ };
+
+ bootloaderenv@140000 {
+ label = "qspi: bootloader env";
+ reg = <0x00140000 0x00040000>;
+ };
+
+ dtb@180000 {
+ label = "qspi: device tree";
+ reg = <0x00180000 0x00080000>;
+ };
+
+ kernel@200000 {
+ label = "qspi: kernel";
+ reg = <0x00200000 0x00600000>;
+ };
+ };
+};
+
+&sdmmc0 {
+ no-1-8-v;
+ bus-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sdmmc0_default>;
+ status = "okay";
+};
+
+&shutdown_controller {
+ atmel,shdwc-debouncer = <976>;
+ atmel,wakeup-rtc-timer;
+
+ input@0 {
+ reg = <0>;
+ atmel,wakeup-type = "low";
+ };
+};
+
+&spi0 { /* mikrobus3 spi */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mikrobus3_spi &pinctrl_mikrobus3_spi_cs>;
+ status = "okay";
+};
+
+&tcb0 {
+ timer0: timer@0 {
+ compatible = "atmel,tcb-timer";
+ reg = <0>;
+ };
+
+ timer1: timer@1 {
+ compatible = "atmel,tcb-timer";
+ reg = <1>;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mikrobus1_uart>;
+ atmel,use-dma-rx;
+ atmel,use-dma-tx;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mikrobus3_uart>;
+ atmel,use-dma-rx;
+ atmel,use-dma-tx;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mikrobus2_uart>;
+ atmel,use-dma-rx;
+ atmel,use-dma-tx;
+ status = "okay";
+};
+
+&usb0 {
+ atmel,vbus-gpio = <&pioA PIN_PD23 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usba_vbus>;
+ status = "okay";
+};
+
+&usb1 {
+ num-ports = <3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb_default>;
+ status = "okay";
+};
+
+&usb2 {
+ phy_type = "hsic";
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
index 1c24ac8019ba..c894c7c788a9 100644
--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
@@ -18,9 +18,9 @@
compatible = "atmel,sama5d2-ptc_ek", "atmel,sama5d2", "atmel,sama5";
aliases {
- serial0 = &uart0;
- i2c0 = &i2c0;
- i2c1 = &i2c1;
+ serial0 = &uart0; /* DBGU */
+ i2c0 = &i2c0; /* mikroBUS 1 */
+ i2c1 = &i2c1; /* XPRO EXT1 */
i2c2 = &i2c2;
};
@@ -40,7 +40,7 @@
ahb {
usb0: gadget@300000 {
- atmel,vbus-gpio = <&pioA PIN_PA27 GPIO_ACTIVE_HIGH>;
+ atmel,vbus-gpio = <&pioA PIN_PB11 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usba_vbus>;
status = "okay";
@@ -125,8 +125,6 @@
bus-width = <8>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdmmc0_default>;
- non-removable;
- mmc-ddr-1_8v;
status = "okay";
};
@@ -184,7 +182,7 @@
pinctrl-0 = <&pinctrl_i2c0_default>;
pinctrl-1 = <&pinctrl_i2c0_gpio>;
sda-gpios = <&pioA PIN_PD21 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioA PIN_PD22 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PD22 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
};
@@ -193,20 +191,12 @@
status = "okay";
i2c2: i2c@600 {
- compatible = "atmel,sama5d2-i2c";
- reg = <0x600 0x200>;
- interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
dmas = <0>, <0>;
- dma-names = "tx", "rx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_flx0_default>;
pinctrl-1 = <&pinctrl_flx0_gpio>;
sda-gpios = <&pioA PIN_PB28 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioA PIN_PB29 GPIO_ACTIVE_HIGH>;
- atmel,fifo-size = <16>;
+ scl-gpios = <&pioA PIN_PB29 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
};
};
@@ -236,7 +226,7 @@
pinctrl-0 = <&pinctrl_i2c1_default>;
pinctrl-1 = <&pinctrl_i2c1_gpio>;
sda-gpios = <&pioA PIN_PC6 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioA PIN_PC7 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PC7 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
at24@50 {
@@ -414,6 +404,7 @@
label = "PB_USER";
gpios = <&pioA PIN_PA10 GPIO_ACTIVE_LOW>;
linux,code = <0x104>;
+ wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index 055ee53e4773..a927165ea7c2 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -16,6 +16,13 @@
model = "Atmel SAMA5D2 Xplained";
compatible = "atmel,sama5d2-xplained", "atmel,sama5d2", "atmel,sama5";
+ aliases {
+ serial0 = &uart1; /* DBGU */
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2; /* XPRO EXT2 */
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
@@ -72,6 +79,58 @@
};
apb {
+ qspi0: spi@f0020000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_qspi0_default>;
+ status = "disabled"; /* conflict with sdmmc1 */
+
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <80000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ m25p,fast-read;
+
+ at91bootstrap@00000000 {
+ label = "at91bootstrap";
+ reg = <0x00000000 0x00040000>;
+ };
+
+ bootloader@00040000 {
+ label = "bootloader";
+ reg = <0x00040000 0x000c0000>;
+ };
+
+ bootloaderenvred@00100000 {
+ label = "bootloader env redundant";
+ reg = <0x00100000 0x00040000>;
+ };
+
+ bootloaderenv@00140000 {
+ label = "bootloader env";
+ reg = <0x00140000 0x00040000>;
+ };
+
+ dtb@00180000 {
+ label = "device tree";
+ reg = <0x00180000 0x00080000>;
+ };
+
+ kernel@00200000 {
+ label = "kernel";
+ reg = <0x00200000 0x00600000>;
+ };
+
+ misc@00800000 {
+ label = "misc";
+ reg = <0x00800000 0x00000000>;
+ };
+ };
+ };
+
spi0: spi@f8000000 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0_default>;
@@ -133,7 +192,7 @@
pinctrl-0 = <&pinctrl_i2c0_default>;
pinctrl-1 = <&pinctrl_i2c0_gpio>;
sda-gpios = <&pioA PIN_PD21 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioA PIN_PD22 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PD22 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
i2c-sda-hold-time-ns = <350>;
status = "okay";
@@ -275,16 +334,25 @@
status = "disabled"; /* conflict with ISC_D2 & ISC_D3 data pins */
uart5: serial@200 {
- compatible = "atmel,at91sam9260-usart";
- reg = <0x200 0x200>;
- interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
- clock-names = "usart";
+ dmas = <0>, <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_flx0_default>;
- atmel,fifo-size = <32>;
status = "okay";
};
+
+ i2c2: i2c@600 {
+ dmas = <0>, <0>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_flx0_default>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ sda-gpios = <&pioA PIN_PB28 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PB29 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-sda-hold-time-ns = <350>;
+ i2c-analog-filter;
+ i2c-digital-filter;
+ i2c-digital-filter-width-ns = <35>;
+ status = "disabled"; /* conflict with ISC_D2 & ISC_D3 data pins */
+ };
};
shdwc@f8048010 {
@@ -325,21 +393,13 @@
atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
status = "okay";
- i2c2: i2c@600 {
- compatible = "atmel,sama5d2-i2c";
- reg = <0x600 0x200>;
- interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
+ i2c6: i2c@600 {
dmas = <0>, <0>;
- dma-names = "tx", "rx";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&pmc PMC_TYPE_PERIPHERAL 23>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_flx4_default>;
pinctrl-1 = <&pinctrl_flx4_gpio>;
sda-gpios = <&pioA PIN_PD12 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioA PIN_PD13 GPIO_ACTIVE_HIGH>;
- atmel,fifo-size = <16>;
+ scl-gpios = <&pioA PIN_PD13 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
i2c-analog-filter;
i2c-digital-filter;
i2c-digital-filter-width-ns = <35>;
@@ -356,7 +416,7 @@
i2c-digital-filter-width-ns = <35>;
pinctrl-1 = <&pinctrl_i2c1_gpio>;
sda-gpios = <&pioA PIN_PD4 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioA PIN_PD5 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA PIN_PD5 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
at24@54 {
@@ -480,6 +540,12 @@
bias-disable;
};
+ pinctrl_i2c2_gpio: i2c2_gpio {
+ pinmux = <PIN_PB28__GPIO>,
+ <PIN_PB29__GPIO>;
+ bias-disable;
+ };
+
pinctrl_i2s0_default: i2s0_default {
pinmux = <PIN_PC1__I2SC0_CK>,
<PIN_PC2__I2SC0_MCK>,
@@ -535,6 +601,22 @@
bias-disable;
};
+ pinctrl_qspi0_default: qspi0_default {
+ sck_cs {
+ pinmux = <PIN_PA22__QSPI0_SCK>,
+ <PIN_PA23__QSPI0_CS>;
+ bias-disable;
+ };
+
+ data {
+ pinmux = <PIN_PA24__QSPI0_IO0>,
+ <PIN_PA25__QSPI0_IO1>,
+ <PIN_PA26__QSPI0_IO2>,
+ <PIN_PA27__QSPI0_IO3>;
+ bias-pull-up;
+ };
+ };
+
pinctrl_sdmmc0_default: sdmmc0_default {
cmd_data {
pinmux = <PIN_PA1__SDMMC0_CMD>,
diff --git a/arch/arm/boot/dts/at91-wb50n.dtsi b/arch/arm/boot/dts/at91-wb50n.dtsi
index 4ed8500a5cb8..1487b893cfa7 100644
--- a/arch/arm/boot/dts/at91-wb50n.dtsi
+++ b/arch/arm/boot/dts/at91-wb50n.dtsi
@@ -46,10 +46,6 @@
atmel,osc-bypass;
};
-&usart1_clk {
- atmel,clk-output-range = <0 132000000>;
-};
-
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_cd>;
diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi
index 44385718d9d4..a5040f5ea641 100644
--- a/arch/arm/boot/dts/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/at91rm9200.dtsi
@@ -101,259 +101,9 @@
compatible = "atmel,at91rm9200-pmc", "syscon";
reg = <0xfffffc00 0x100>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
- interrupt-controller;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
-
- main_osc: main_osc {
- compatible = "atmel,at91rm9200-clk-main-osc";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_MOSCS>;
- clocks = <&main_xtal>;
- };
-
- main: mainck {
- compatible = "atmel,at91rm9200-clk-main";
- #clock-cells = <0>;
- clocks = <&main_osc>;
- };
-
- plla: pllack {
- compatible = "atmel,at91rm9200-clk-pll";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_LOCKA>;
- clocks = <&main>;
- reg = <0>;
- atmel,clk-input-range = <1000000 32000000>;
- #atmel,pll-clk-output-range-cells = <3>;
- atmel,pll-clk-output-ranges = <80000000 160000000 0>,
- <150000000 180000000 2>;
- };
-
- pllb: pllbck {
- compatible = "atmel,at91rm9200-clk-pll";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_LOCKB>;
- clocks = <&main>;
- reg = <1>;
- atmel,clk-input-range = <1000000 32000000>;
- #atmel,pll-clk-output-range-cells = <3>;
- atmel,pll-clk-output-ranges = <80000000 160000000 0>,
- <150000000 180000000 2>;
- };
-
- mck: masterck {
- compatible = "atmel,at91rm9200-clk-master";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_MCKRDY>;
- clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
- atmel,clk-output-range = <0 80000000>;
- atmel,clk-divisors = <1 2 3 4>;
- };
-
- usb: usbck {
- compatible = "atmel,at91rm9200-clk-usb";
- #clock-cells = <0>;
- atmel,clk-divisors = <1 2 0 0>;
- clocks = <&pllb>;
- };
-
- prog: progck {
- compatible = "atmel,at91rm9200-clk-programmable";
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&pmc>;
- clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
-
- prog0: prog0 {
- #clock-cells = <0>;
- reg = <0>;
- interrupts = <AT91_PMC_PCKRDY(0)>;
- };
-
- prog1: prog1 {
- #clock-cells = <0>;
- reg = <1>;
- interrupts = <AT91_PMC_PCKRDY(1)>;
- };
-
- prog2: prog2 {
- #clock-cells = <0>;
- reg = <2>;
- interrupts = <AT91_PMC_PCKRDY(2)>;
- };
-
- prog3: prog3 {
- #clock-cells = <0>;
- reg = <3>;
- interrupts = <AT91_PMC_PCKRDY(3)>;
- };
- };
-
- systemck {
- compatible = "atmel,at91rm9200-clk-system";
- #address-cells = <1>;
- #size-cells = <0>;
-
- udpck: udpck {
- #clock-cells = <0>;
- reg = <2>;
- clocks = <&usb>;
- };
-
- uhpck: uhpck {
- #clock-cells = <0>;
- reg = <4>;
- clocks = <&usb>;
- };
-
- pck0: pck0 {
- #clock-cells = <0>;
- reg = <8>;
- clocks = <&prog0>;
- };
-
- pck1: pck1 {
- #clock-cells = <0>;
- reg = <9>;
- clocks = <&prog1>;
- };
-
- pck2: pck2 {
- #clock-cells = <0>;
- reg = <10>;
- clocks = <&prog2>;
- };
-
- pck3: pck3 {
- #clock-cells = <0>;
- reg = <11>;
- clocks = <&prog3>;
- };
- };
-
- periphck {
- compatible = "atmel,at91rm9200-clk-peripheral";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&mck>;
-
- pioA_clk: pioA_clk {
- #clock-cells = <0>;
- reg = <2>;
- };
-
- pioB_clk: pioB_clk {
- #clock-cells = <0>;
- reg = <3>;
- };
-
- pioC_clk: pioC_clk {
- #clock-cells = <0>;
- reg = <4>;
- };
-
- pioD_clk: pioD_clk {
- #clock-cells = <0>;
- reg = <5>;
- };
-
- usart0_clk: usart0_clk {
- #clock-cells = <0>;
- reg = <6>;
- };
-
- usart1_clk: usart1_clk {
- #clock-cells = <0>;
- reg = <7>;
- };
-
- usart2_clk: usart2_clk {
- #clock-cells = <0>;
- reg = <8>;
- };
-
- usart3_clk: usart3_clk {
- #clock-cells = <0>;
- reg = <9>;
- };
-
- mci0_clk: mci0_clk {
- #clock-cells = <0>;
- reg = <10>;
- };
-
- udc_clk: udc_clk {
- #clock-cells = <0>;
- reg = <11>;
- };
-
- twi0_clk: twi0_clk {
- reg = <12>;
- #clock-cells = <0>;
- };
-
- spi0_clk: spi0_clk {
- #clock-cells = <0>;
- reg = <13>;
- };
-
- ssc0_clk: ssc0_clk {
- #clock-cells = <0>;
- reg = <14>;
- };
-
- ssc1_clk: ssc1_clk {
- #clock-cells = <0>;
- reg = <15>;
- };
-
- ssc2_clk: ssc2_clk {
- #clock-cells = <0>;
- reg = <16>;
- };
-
- tc0_clk: tc0_clk {
- #clock-cells = <0>;
- reg = <17>;
- };
-
- tc1_clk: tc1_clk {
- #clock-cells = <0>;
- reg = <18>;
- };
-
- tc2_clk: tc2_clk {
- #clock-cells = <0>;
- reg = <19>;
- };
-
- tc3_clk: tc3_clk {
- #clock-cells = <0>;
- reg = <20>;
- };
-
- tc4_clk: tc4_clk {
- #clock-cells = <0>;
- reg = <21>;
- };
-
- tc5_clk: tc5_clk {
- #clock-cells = <0>;
- reg = <22>;
- };
-
- ohci_clk: ohci_clk {
- #clock-cells = <0>;
- reg = <23>;
- };
-
- macb0_clk: macb0_clk {
- #clock-cells = <0>;
- reg = <24>;
- };
- };
+ #clock-cells = <2>;
+ clocks = <&slow_xtal>, <&main_xtal>;
+ clock-names = "slow_xtal", "main_xtal";
};
st: timer@fffffd00 {
@@ -383,7 +133,7 @@
interrupts = <17 IRQ_TYPE_LEVEL_HIGH 0
18 IRQ_TYPE_LEVEL_HIGH 0
19 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&tc0_clk>, <&tc1_clk>, <&tc2_clk>, <&slow_xtal>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 17>, <&pmc PMC_TYPE_PERIPHERAL 18>, <&pmc PMC_TYPE_PERIPHERAL 19>, <&slow_xtal>;
clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
};
@@ -395,7 +145,7 @@
interrupts = <20 IRQ_TYPE_LEVEL_HIGH 0
21 IRQ_TYPE_LEVEL_HIGH 0
22 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&tc3_clk>, <&tc4_clk>, <&tc5_clk>, <&slow_xtal>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 20>, <&pmc PMC_TYPE_PERIPHERAL 21>, <&pmc PMC_TYPE_PERIPHERAL 22>, <&slow_xtal>;
clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
};
@@ -405,7 +155,7 @@
interrupts = <12 IRQ_TYPE_LEVEL_HIGH 6>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_twi>;
- clocks = <&twi0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 12>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@@ -415,7 +165,7 @@
compatible = "atmel,hsmci";
reg = <0xfffb4000 0x4000>;
interrupts = <10 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&mci0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 10>;
clock-names = "mci_clk";
#address-cells = <1>;
#size-cells = <0>;
@@ -429,7 +179,7 @@
interrupts = <14 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
- clocks = <&ssc0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 14>;
clock-names = "pclk";
status = "disabled";
};
@@ -440,7 +190,7 @@
interrupts = <15 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
- clocks = <&ssc1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 15>;
clock-names = "pclk";
status = "disabled";
};
@@ -451,7 +201,7 @@
interrupts = <16 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc2_tx &pinctrl_ssc2_rx>;
- clocks = <&ssc2_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 16>;
clock-names = "pclk";
status = "disabled";
};
@@ -463,7 +213,7 @@
phy-mode = "rmii";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_macb_rmii>;
- clocks = <&macb0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 24>;
clock-names = "ether_clk";
status = "disabled";
};
@@ -803,7 +553,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioA_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
};
pioB: gpio@fffff600 {
@@ -814,7 +564,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioB_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 3>;
};
pioC: gpio@fffff800 {
@@ -825,7 +575,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioC_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 4>;
};
pioD: gpio@fffffa00 {
@@ -836,7 +586,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioD_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
};
};
@@ -846,7 +596,7 @@
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dbgu>;
- clocks = <&mck>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
clock-names = "usart";
status = "disabled";
};
@@ -859,7 +609,7 @@
atmel,use-dma-tx;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart0>;
- clocks = <&usart0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 6>;
clock-names = "usart";
status = "disabled";
};
@@ -872,7 +622,7 @@
atmel,use-dma-tx;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- clocks = <&usart1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 7>;
clock-names = "usart";
status = "disabled";
};
@@ -885,7 +635,7 @@
atmel,use-dma-tx;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
- clocks = <&usart2_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 8>;
clock-names = "usart";
status = "disabled";
};
@@ -898,7 +648,7 @@
atmel,use-dma-tx;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart3>;
- clocks = <&usart3_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 9>;
clock-names = "usart";
status = "disabled";
};
@@ -907,7 +657,7 @@
compatible = "atmel,at91rm9200-udc";
reg = <0xfffb0000 0x4000>;
interrupts = <11 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&udc_clk>, <&udpck>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 11>, <&pmc PMC_TYPE_SYSTEM 2>;
clock-names = "pclk", "hclk";
status = "disabled";
};
@@ -920,7 +670,7 @@
interrupts = <13 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
- clocks = <&spi0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 13>;
clock-names = "spi_clk";
status = "disabled";
};
@@ -947,7 +697,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00300000 0x100000>;
interrupts = <23 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&ohci_clk>, <&ohci_clk>, <&uhpck>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 23>, <&pmc PMC_TYPE_PERIPHERAL 23>, <&pmc PMC_TYPE_SYSTEM 4>;
clock-names = "ohci_clk", "hclk", "uhpck";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index fd179097a4bf..1fbee2a7785f 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -103,14 +103,14 @@
ramc0: ramc@ffffe400 {
compatible = "atmel,at91sam9g45-ddramc";
reg = <0xffffe400 0x200>;
- clocks = <&ddrck>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 2>;
clock-names = "ddrck";
};
ramc1: ramc@ffffe600 {
compatible = "atmel,at91sam9g45-ddramc";
reg = <0xffffe600 0x200>;
- clocks = <&ddrck>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 2>;
clock-names = "ddrck";
};
@@ -128,271 +128,9 @@
compatible = "atmel,at91sam9g45-pmc", "syscon";
reg = <0xfffffc00 0x100>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
- interrupt-controller;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
-
- main_osc: main_osc {
- compatible = "atmel,at91rm9200-clk-main-osc";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_MOSCS>;
- clocks = <&main_xtal>;
- };
-
- main: mainck {
- compatible = "atmel,at91rm9200-clk-main";
- #clock-cells = <0>;
- clocks = <&main_osc>;
- };
-
- plla: pllack {
- compatible = "atmel,at91rm9200-clk-pll";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_LOCKA>;
- clocks = <&main>;
- reg = <0>;
- atmel,clk-input-range = <2000000 32000000>;
- #atmel,pll-clk-output-range-cells = <4>;
- atmel,pll-clk-output-ranges = <745000000 800000000 0 0
- 695000000 750000000 1 0
- 645000000 700000000 2 0
- 595000000 650000000 3 0
- 545000000 600000000 0 1
- 495000000 555000000 1 1
- 445000000 500000000 2 1
- 400000000 450000000 3 1>;
- };
-
- plladiv: plladivck {
- compatible = "atmel,at91sam9x5-clk-plldiv";
- #clock-cells = <0>;
- clocks = <&plla>;
- };
-
- utmi: utmick {
- compatible = "atmel,at91sam9x5-clk-utmi";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_LOCKU>;
- clocks = <&main>;
- };
-
- mck: masterck {
- compatible = "atmel,at91rm9200-clk-master";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_MCKRDY>;
- clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>;
- atmel,clk-output-range = <0 133333333>;
- atmel,clk-divisors = <1 2 4 3>;
- };
-
- usb: usbck {
- compatible = "atmel,at91sam9x5-clk-usb";
- #clock-cells = <0>;
- clocks = <&plladiv>, <&utmi>;
- };
-
- prog: progck {
- compatible = "atmel,at91sam9g45-clk-programmable";
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&pmc>;
- clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>;
-
- prog0: prog0 {
- #clock-cells = <0>;
- reg = <0>;
- interrupts = <AT91_PMC_PCKRDY(0)>;
- };
-
- prog1: prog1 {
- #clock-cells = <0>;
- reg = <1>;
- interrupts = <AT91_PMC_PCKRDY(1)>;
- };
- };
-
- systemck {
- compatible = "atmel,at91rm9200-clk-system";
- #address-cells = <1>;
- #size-cells = <0>;
-
- ddrck: ddrck {
- #clock-cells = <0>;
- reg = <2>;
- clocks = <&mck>;
- };
-
- uhpck: uhpck {
- #clock-cells = <0>;
- reg = <6>;
- clocks = <&usb>;
- };
-
- pck0: pck0 {
- #clock-cells = <0>;
- reg = <8>;
- clocks = <&prog0>;
- };
-
- pck1: pck1 {
- #clock-cells = <0>;
- reg = <9>;
- clocks = <&prog1>;
- };
- };
-
- periphck {
- compatible = "atmel,at91rm9200-clk-peripheral";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&mck>;
-
- pioA_clk: pioA_clk {
- #clock-cells = <0>;
- reg = <2>;
- };
-
- pioB_clk: pioB_clk {
- #clock-cells = <0>;
- reg = <3>;
- };
-
- pioC_clk: pioC_clk {
- #clock-cells = <0>;
- reg = <4>;
- };
-
- pioDE_clk: pioDE_clk {
- #clock-cells = <0>;
- reg = <5>;
- };
-
- trng_clk: trng_clk {
- #clock-cells = <0>;
- reg = <6>;
- };
-
- usart0_clk: usart0_clk {
- #clock-cells = <0>;
- reg = <7>;
- };
-
- usart1_clk: usart1_clk {
- #clock-cells = <0>;
- reg = <8>;
- };
-
- usart2_clk: usart2_clk {
- #clock-cells = <0>;
- reg = <9>;
- };
-
- usart3_clk: usart3_clk {
- #clock-cells = <0>;
- reg = <10>;
- };
-
- mci0_clk: mci0_clk {
- #clock-cells = <0>;
- reg = <11>;
- };
-
- twi0_clk: twi0_clk {
- #clock-cells = <0>;
- reg = <12>;
- };
-
- twi1_clk: twi1_clk {
- #clock-cells = <0>;
- reg = <13>;
- };
-
- spi0_clk: spi0_clk {
- #clock-cells = <0>;
- reg = <14>;
- };
-
- spi1_clk: spi1_clk {
- #clock-cells = <0>;
- reg = <15>;
- };
-
- ssc0_clk: ssc0_clk {
- #clock-cells = <0>;
- reg = <16>;
- };
-
- ssc1_clk: ssc1_clk {
- #clock-cells = <0>;
- reg = <17>;
- };
-
- tcb0_clk: tcb0_clk {
- #clock-cells = <0>;
- reg = <18>;
- };
-
- pwm_clk: pwm_clk {
- #clock-cells = <0>;
- reg = <19>;
- };
-
- adc_clk: adc_clk {
- #clock-cells = <0>;
- reg = <20>;
- };
-
- dma0_clk: dma0_clk {
- #clock-cells = <0>;
- reg = <21>;
- };
-
- uhphs_clk: uhphs_clk {
- #clock-cells = <0>;
- reg = <22>;
- };
-
- lcd_clk: lcd_clk {
- #clock-cells = <0>;
- reg = <23>;
- };
-
- ac97_clk: ac97_clk {
- #clock-cells = <0>;
- reg = <24>;
- };
-
- macb0_clk: macb0_clk {
- #clock-cells = <0>;
- reg = <25>;
- };
-
- isi_clk: isi_clk {
- #clock-cells = <0>;
- reg = <26>;
- };
-
- udphs_clk: udphs_clk {
- #clock-cells = <0>;
- reg = <27>;
- };
-
- aestdessha_clk: aestdessha_clk {
- #clock-cells = <0>;
- reg = <28>;
- };
-
- mci1_clk: mci1_clk {
- #clock-cells = <0>;
- reg = <29>;
- };
-
- vdec_clk: vdec_clk {
- #clock-cells = <0>;
- reg = <30>;
- };
- };
+ #clock-cells = <2>;
+ clocks = <&clk32k>, <&main_xtal>;
+ clock-names = "slow_clk", "main_xtal";
};
rstc@fffffd00 {
@@ -405,7 +143,7 @@
compatible = "atmel,at91sam9260-pit";
reg = <0xfffffd30 0xf>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
- clocks = <&mck>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
};
@@ -421,7 +159,7 @@
#size-cells = <0>;
reg = <0xfff7c000 0x100>;
interrupts = <18 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&tcb0_clk>, <&tcb0_clk>, <&tcb0_clk>, <&clk32k>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 18>, <&pmc PMC_TYPE_PERIPHERAL 18>, <&pmc PMC_TYPE_PERIPHERAL 18>, <&clk32k>;
clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
};
@@ -431,7 +169,7 @@
#size-cells = <0>;
reg = <0xfffd4000 0x100>;
interrupts = <18 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&tcb0_clk>, <&tcb0_clk>, <&tcb0_clk>, <&clk32k>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 18>, <&pmc PMC_TYPE_PERIPHERAL 18>, <&pmc PMC_TYPE_PERIPHERAL 18>, <&clk32k>;
clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
};
@@ -440,7 +178,7 @@
reg = <0xffffec00 0x200>;
interrupts = <21 IRQ_TYPE_LEVEL_HIGH 0>;
#dma-cells = <2>;
- clocks = <&dma0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 21>;
clock-names = "dma_clk";
};
@@ -883,7 +621,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioA_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
};
pioB: gpio@fffff400 {
@@ -894,7 +632,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioB_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 3>;
};
pioC: gpio@fffff600 {
@@ -905,7 +643,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioC_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 4>;
};
pioD: gpio@fffff800 {
@@ -916,7 +654,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioDE_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
};
pioE: gpio@fffffa00 {
@@ -927,7 +665,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioDE_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
};
};
@@ -937,7 +675,7 @@
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dbgu>;
- clocks = <&mck>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
clock-names = "usart";
status = "disabled";
};
@@ -950,7 +688,7 @@
atmel,use-dma-tx;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart0>;
- clocks = <&usart0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 7>;
clock-names = "usart";
status = "disabled";
};
@@ -963,7 +701,7 @@
atmel,use-dma-tx;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart1>;
- clocks = <&usart1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 8>;
clock-names = "usart";
status = "disabled";
};
@@ -976,7 +714,7 @@
atmel,use-dma-tx;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart2>;
- clocks = <&usart2_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 9>;
clock-names = "usart";
status = "disabled";
};
@@ -989,7 +727,7 @@
atmel,use-dma-tx;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart3>;
- clocks = <&usart3_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 10>;
clock-names = "usart";
status = "disabled";
};
@@ -1000,7 +738,7 @@
interrupts = <25 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_macb_rmii>;
- clocks = <&macb0_clk>, <&macb0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 25>, <&pmc PMC_TYPE_PERIPHERAL 25>;
clock-names = "hclk", "pclk";
status = "disabled";
};
@@ -1009,7 +747,7 @@
compatible = "atmel,at91sam9g45-trng";
reg = <0xfffcc000 0x100>;
interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&trng_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 6>;
};
i2c0: i2c@fff84000 {
@@ -1020,7 +758,7 @@
pinctrl-0 = <&pinctrl_i2c0>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&twi0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 12>;
status = "disabled";
};
@@ -1032,7 +770,7 @@
pinctrl-0 = <&pinctrl_i2c1>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&twi1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 13>;
status = "disabled";
};
@@ -1042,7 +780,7 @@
interrupts = <16 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
- clocks = <&ssc0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 16>;
clock-names = "pclk";
status = "disabled";
};
@@ -1053,7 +791,7 @@
interrupts = <17 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
- clocks = <&ssc1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 17>;
clock-names = "pclk";
status = "disabled";
};
@@ -1064,7 +802,7 @@
interrupts = <24 IRQ_TYPE_LEVEL_HIGH 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ac97>;
- clocks = <&ac97_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 24>;
clock-names = "ac97_clk";
status = "disabled";
};
@@ -1075,7 +813,7 @@
compatible = "atmel,at91sam9g45-adc";
reg = <0xfffb0000 0x100>;
interrupts = <20 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&adc_clk>, <&adc_op_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 20>, <&adc_op_clk>;
clock-names = "adc_clk", "adc_op_clk";
atmel,adc-channels-used = <0xff>;
atmel,adc-vref = <3300>;
@@ -1111,7 +849,7 @@
compatible = "atmel,at91sam9g45-isi";
reg = <0xfffb4000 0x4000>;
interrupts = <26 IRQ_TYPE_LEVEL_HIGH 5>;
- clocks = <&isi_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 26>;
clock-names = "isi_clk";
status = "disabled";
port {
@@ -1125,7 +863,7 @@
reg = <0xfffb8000 0x300>;
interrupts = <19 IRQ_TYPE_LEVEL_HIGH 4>;
#pwm-cells = <3>;
- clocks = <&pwm_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
status = "disabled";
};
@@ -1138,7 +876,7 @@
dma-names = "rxtx";
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&mci0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 11>;
clock-names = "mci_clk";
status = "disabled";
};
@@ -1152,7 +890,7 @@
dma-names = "rxtx";
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&mci1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 29>;
clock-names = "mci_clk";
status = "disabled";
};
@@ -1176,7 +914,7 @@
interrupts = <14 4 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
- clocks = <&spi0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 14>;
clock-names = "spi_clk";
status = "disabled";
};
@@ -1189,73 +927,19 @@
interrupts = <15 4 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
- clocks = <&spi1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 15>;
clock-names = "spi_clk";
status = "disabled";
};
usb2: gadget@fff78000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,at91sam9g45-udc";
reg = <0x00600000 0x80000
0xfff78000 0x400>;
interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&udphs_clk>, <&utmi>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 27>, <&pmc PMC_TYPE_CORE PMC_UTMI>;
clock-names = "pclk", "hclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
};
clk32k: sckc@fffffd50 {
@@ -1294,7 +978,7 @@
interrupts = <23 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fb>;
- clocks = <&lcd_clk>, <&lcd_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 23>, <&pmc PMC_TYPE_PERIPHERAL 23>;
clock-names = "hclk", "lcdc_clk";
status = "disabled";
};
@@ -1303,7 +987,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00700000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 22>, <&pmc PMC_TYPE_PERIPHERAL 22>, <&pmc PMC_TYPE_SYSTEM 6>;
clock-names = "ohci_clk", "hclk", "uhpck";
status = "disabled";
};
@@ -1312,7 +996,7 @@
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00800000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&utmi>, <&uhphs_clk>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_UTMI>, <&pmc PMC_TYPE_PERIPHERAL 22>;
clock-names = "usb_clk", "ehci_clk";
status = "disabled";
};
@@ -1330,7 +1014,7 @@
0x3 0x0 0x40000000 0x10000000
0x4 0x0 0x50000000 0x10000000
0x5 0x0 0x60000000 0x10000000>;
- clocks = <&mck>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
status = "disabled";
nand_controller: nand-controller {
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index 84bed6f55fcd..a3a5c82d9f29 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -72,9 +72,9 @@
pinctrl-0 = <&pinctrl_pck1_as_isi_mck &pinctrl_sensor_power &pinctrl_sensor_reset>;
resetb-gpios = <&pioD 12 GPIO_ACTIVE_LOW>;
pwdn-gpios = <&pioD 13 GPIO_ACTIVE_HIGH>;
- clocks = <&pck1>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 9>;
clock-names = "xvclk";
- assigned-clocks = <&pck1>;
+ assigned-clocks = <&pmc PMC_TYPE_SYSTEM 9>;
assigned-clock-rates = <25000000>;
port {
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index ea675174432e..a994d076dc7e 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -104,7 +104,7 @@
ramc0: ramc@ffffe800 {
compatible = "atmel,at91sam9g45-ddramc";
reg = <0xffffe800 0x200>;
- clocks = <&ddrck>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 2>;
clock-names = "ddrck";
};
@@ -116,278 +116,10 @@
pmc: pmc@fffffc00 {
compatible = "atmel,at91sam9n12-pmc", "syscon";
reg = <0xfffffc00 0x200>;
+ #clock-cells = <2>;
+ clocks = <&clk32k>, <&main_xtal>;
+ clock-names = "slow_clk", "main_xtal";
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
- interrupt-controller;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
-
- main_rc_osc: main_rc_osc {
- compatible = "atmel,at91sam9x5-clk-main-rc-osc";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_MOSCRCS>;
- clock-frequency = <12000000>;
- clock-accuracy = <50000000>;
- };
-
- main_osc: main_osc {
- compatible = "atmel,at91rm9200-clk-main-osc";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_MOSCS>;
- clocks = <&main_xtal>;
- };
-
- main: mainck {
- compatible = "atmel,at91sam9x5-clk-main";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_MOSCSELS>;
- clocks = <&main_rc_osc>, <&main_osc>;
- };
-
- plla: pllack {
- compatible = "atmel,at91rm9200-clk-pll";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_LOCKA>;
- clocks = <&main>;
- reg = <0>;
- atmel,clk-input-range = <2000000 32000000>;
- #atmel,pll-clk-output-range-cells = <4>;
- atmel,pll-clk-output-ranges = <745000000 800000000 0 0>,
- <695000000 750000000 1 0>,
- <645000000 700000000 2 0>,
- <595000000 650000000 3 0>,
- <545000000 600000000 0 1>,
- <495000000 555000000 1 1>,
- <445000000 500000000 2 1>,
- <400000000 450000000 3 1>;
- };
-
- plladiv: plladivck {
- compatible = "atmel,at91sam9x5-clk-plldiv";
- #clock-cells = <0>;
- clocks = <&plla>;
- };
-
- pllb: pllbck {
- compatible = "atmel,at91rm9200-clk-pll";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_LOCKB>;
- clocks = <&main>;
- reg = <1>;
- atmel,clk-input-range = <2000000 32000000>;
- #atmel,pll-clk-output-range-cells = <3>;
- atmel,pll-clk-output-ranges = <30000000 100000000 0>;
- };
-
- mck: masterck {
- compatible = "atmel,at91sam9x5-clk-master";
- #clock-cells = <0>;
- interrupts-extended = <&pmc AT91_PMC_MCKRDY>;
- clocks = <&clk32k>, <&main>, <&plladiv>, <&pllb>;
- atmel,clk-output-range = <0 133333333>;
- atmel,clk-divisors = <1 2 4 3>;
- atmel,master-clk-have-div3-pres;
- };
-
- usb: usbck {
- compatible = "atmel,at91sam9n12-clk-usb";
- #clock-cells = <0>;
- clocks = <&pllb>;
- };
-
- prog: progck {
- compatible = "atmel,at91sam9x5-clk-programmable";
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&pmc>;
- clocks = <&clk32k>, <&main>, <&plladiv>, <&pllb>, <&mck>;
-
- prog0: prog0 {
- #clock-cells = <0>;
- reg = <0>;
- interrupts = <AT91_PMC_PCKRDY(0)>;
- };
-
- prog1: prog1 {
- #clock-cells = <0>;
- reg = <1>;
- interrupts = <AT91_PMC_PCKRDY(1)>;
- };
- };
-
- systemck {
- compatible = "atmel,at91rm9200-clk-system";
- #address-cells = <1>;
- #size-cells = <0>;
-
- ddrck: ddrck {
- #clock-cells = <0>;
- reg = <2>;
- clocks = <&mck>;
- };
-
- lcdck: lcdck {
- #clock-cells = <0>;
- reg = <3>;
- clocks = <&mck>;
- };
-
- uhpck: uhpck {
- #clock-cells = <0>;
- reg = <6>;
- clocks = <&usb>;
- };
-
- udpck: udpck {
- #clock-cells = <0>;
- reg = <7>;
- clocks = <&usb>;
- };
-
- pck0: pck0 {
- #clock-cells = <0>;
- reg = <8>;
- clocks = <&prog0>;
- };
-
- pck1: pck1 {
- #clock-cells = <0>;
- reg = <9>;
- clocks = <&prog1>;
- };
- };
-
- periphck {
- compatible = "atmel,at91sam9x5-clk-peripheral";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&mck>;
-
- pioAB_clk: pioAB_clk {
- #clock-cells = <0>;
- reg = <2>;
- };
-
- pioCD_clk: pioCD_clk {
- #clock-cells = <0>;
- reg = <3>;
- };
-
- fuse_clk: fuse_clk {
- #clock-cells = <0>;
- reg = <4>;
- };
-
- usart0_clk: usart0_clk {
- #clock-cells = <0>;
- reg = <5>;
- };
-
- usart1_clk: usart1_clk {
- #clock-cells = <0>;
- reg = <6>;
- };
-
- usart2_clk: usart2_clk {
- #clock-cells = <0>;
- reg = <7>;
- };
-
- usart3_clk: usart3_clk {
- #clock-cells = <0>;
- reg = <8>;
- };
-
- twi0_clk: twi0_clk {
- reg = <9>;
- #clock-cells = <0>;
- };
-
- twi1_clk: twi1_clk {
- #clock-cells = <0>;
- reg = <10>;
- };
-
- mci0_clk: mci0_clk {
- #clock-cells = <0>;
- reg = <12>;
- };
-
- spi0_clk: spi0_clk {
- #clock-cells = <0>;
- reg = <13>;
- };
-
- spi1_clk: spi1_clk {
- #clock-cells = <0>;
- reg = <14>;
- };
-
- uart0_clk: uart0_clk {
- #clock-cells = <0>;
- reg = <15>;
- };
-
- uart1_clk: uart1_clk {
- #clock-cells = <0>;
- reg = <16>;
- };
-
- tcb_clk: tcb_clk {
- #clock-cells = <0>;
- reg = <17>;
- };
-
- pwm_clk: pwm_clk {
- #clock-cells = <0>;
- reg = <18>;
- };
-
- adc_clk: adc_clk {
- #clock-cells = <0>;
- reg = <19>;
- };
-
- dma0_clk: dma0_clk {
- #clock-cells = <0>;
- reg = <20>;
- };
-
- uhphs_clk: uhphs_clk {
- #clock-cells = <0>;
- reg = <22>;
- };
-
- udphs_clk: udphs_clk {
- #clock-cells = <0>;
- reg = <23>;
- };
-
- lcdc_clk: lcdc_clk {
- #clock-cells = <0>;
- reg = <25>;
- };
-
- sha_clk: sha_clk {
- #clock-cells = <0>;
- reg = <27>;
- };
-
- ssc0_clk: ssc0_clk {
- #clock-cells = <0>;
- reg = <28>;
- };
-
- aes_clk: aes_clk {
- #clock-cells = <0>;
- reg = <29>;
- };
-
- trng_clk: trng_clk {
- #clock-cells = <0>;
- reg = <30>;
- };
- };
};
rstc@fffffe00 {
@@ -400,7 +132,7 @@
compatible = "atmel,at91sam9260-pit";
reg = <0xfffffe30 0xf>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
- clocks = <&mck>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
};
shdwc@fffffe10 {
@@ -439,7 +171,7 @@
interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
dmas = <&dma 1 AT91_DMA_CFG_PER_ID(0)>;
dma-names = "rxtx";
- clocks = <&mci0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 12>;
clock-names = "mci_clk";
#address-cells = <1>;
#size-cells = <0>;
@@ -452,7 +184,7 @@
#size-cells = <0>;
reg = <0xf8008000 0x100>;
interrupts = <17 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&tcb_clk>, <&clk32k>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 17>, <&clk32k>;
clock-names = "t0_clk", "slow_clk";
};
@@ -462,7 +194,7 @@
#size-cells = <0>;
reg = <0xf800c000 0x100>;
interrupts = <17 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&tcb_clk>, <&clk32k>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 17>, <&clk32k>;
clock-names = "t0_clk", "slow_clk";
};
@@ -470,7 +202,7 @@
compatible = "atmel,at91sam9n12-hlcdc";
reg = <0xf8038000 0x2000>;
interrupts = <25 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 25>, <&pmc PMC_TYPE_SYSTEM 3>, <&clk32k>;
clock-names = "periph_clk", "sys_clk", "slow_clk";
status = "disabled";
@@ -499,7 +231,7 @@
reg = <0xffffec00 0x200>;
interrupts = <20 IRQ_TYPE_LEVEL_HIGH 0>;
#dma-cells = <2>;
- clocks = <&dma0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 20>;
clock-names = "dma_clk";
};
@@ -817,7 +549,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioAB_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
};
pioB: gpio@fffff600 {
@@ -828,7 +560,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioAB_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
};
pioC: gpio@fffff800 {
@@ -839,7 +571,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioCD_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 3>;
};
pioD: gpio@fffffa00 {
@@ -850,7 +582,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioCD_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 3>;
};
};
@@ -860,7 +592,7 @@
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dbgu>;
- clocks = <&mck>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
clock-names = "usart";
status = "disabled";
};
@@ -874,7 +606,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
- clocks = <&ssc0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 28>;
clock-names = "pclk";
status = "disabled";
};
@@ -885,7 +617,7 @@
interrupts = <5 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart0>;
- clocks = <&usart0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
clock-names = "usart";
status = "disabled";
};
@@ -896,7 +628,7 @@
interrupts = <6 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart1>;
- clocks = <&usart1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 6>;
clock-names = "usart";
status = "disabled";
};
@@ -907,7 +639,7 @@
interrupts = <7 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart2>;
- clocks = <&usart2_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 7>;
clock-names = "usart";
status = "disabled";
};
@@ -918,7 +650,7 @@
interrupts = <8 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart3>;
- clocks = <&usart3_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 8>;
clock-names = "usart";
status = "disabled";
};
@@ -934,7 +666,7 @@
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c0>;
- clocks = <&twi0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 9>;
status = "disabled";
};
@@ -949,7 +681,7 @@
#size-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
- clocks = <&twi1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 10>;
status = "disabled";
};
@@ -964,7 +696,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
- clocks = <&spi0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 13>;
clock-names = "spi_clk";
status = "disabled";
};
@@ -980,7 +712,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
- clocks = <&spi1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 14>;
clock-names = "spi_clk";
status = "disabled";
};
@@ -1009,7 +741,7 @@
reg = <0xf8034000 0x300>;
interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
#pwm-cells = <3>;
- clocks = <&pwm_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 18>;
status = "disabled";
};
@@ -1017,7 +749,7 @@
compatible = "atmel,at91sam9260-udc";
reg = <0xf803c000 0x4000>;
interrupts = <23 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&udphs_clk>, <&udpck>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 23>, <&pmc PMC_TYPE_SYSTEM 7>;
clock-names = "pclk", "hclk";
status = "disabled";
};
@@ -1027,7 +759,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x00100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 22>, <&pmc PMC_TYPE_PERIPHERAL 22>, <&pmc PMC_TYPE_SYSTEM 6>;
clock-names = "ohci_clk", "hclk", "uhpck";
status = "disabled";
};
@@ -1045,7 +777,7 @@
0x3 0x0 0x40000000 0x10000000
0x4 0x0 0x50000000 0x10000000
0x5 0x0 0x60000000 0x10000000>;
- clocks = <&mck>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
status = "disabled";
nand_controller: nand-controller {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d36e162a8817..870b83ff6b97 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -59,7 +59,7 @@
wm8904: codec@1a {
compatible = "wlf,wm8904";
reg = <0x1a>;
- clocks = <&pck0>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 8>;
clock-names = "mclk";
};
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index ea024e4b6e09..4d70194fd808 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -299,8 +299,6 @@
};
usb0: gadget@fffd4000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,at91sam9rl-udc";
reg = <0x00600000 0x100000>,
<0xfffd4000 0x4000>;
@@ -308,58 +306,6 @@
clocks = <&pmc PMC_TYPE_PERIPHERAL 22>, <&pmc PMC_TYPE_CORE PMC_UTMI>;
clock-names = "pclk", "hclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
};
dma0: dma-controller@ffffe600 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 7c2eb93f8cac..948fe99ab6c3 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -867,8 +867,6 @@
};
usb2: gadget@f803c000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,at91sam9g45-udc";
reg = <0x00500000 0x80000
0xf803c000 0x400>;
@@ -876,58 +874,6 @@
clocks = <&pmc PMC_TYPE_CORE PMC_UTMI>, <&pmc PMC_TYPE_PERIPHERAL 23>;
clock-names = "hclk", "pclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
};
watchdog: watchdog@fffffe40 {
diff --git a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
index e26ea9006378..c7f1d97e69bb 100644
--- a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
+++ b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
@@ -56,6 +56,16 @@
3300000 0x0>;
status = "okay";
};
+
+ sd_vcc_reg: sd_vcc_reg {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ enable-active-high;
+ gpio = <&expgpio 6 GPIO_ACTIVE_HIGH>;
+ };
};
&firmware {
@@ -69,7 +79,7 @@
"GLOBAL_RESET",
"VDD_SD_IO_SEL",
"CAM_GPIO",
- "",
+ "SD_PWR_ON",
"";
status = "okay";
};
@@ -174,6 +184,7 @@
/* EMMC2 is used to drive the SD card */
&emmc2 {
vqmmc-supply = <&sd_io_1v8_reg>;
+ vmmc-supply = <&sd_vcc_reg>;
broken-cd;
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm2835-common.dtsi b/arch/arm/boot/dts/bcm2835-common.dtsi
index 2b1d9d4c0cde..4119271c979d 100644
--- a/arch/arm/boot/dts/bcm2835-common.dtsi
+++ b/arch/arm/boot/dts/bcm2835-common.dtsi
@@ -130,7 +130,6 @@
compatible = "brcm,bcm2835-v3d";
reg = <0x7ec00000 0x1000>;
interrupts = <1 10>;
- power-domains = <&pm BCM2835_POWER_DOMAIN_GRAFX_V3D>;
};
vc4: gpu {
diff --git a/arch/arm/boot/dts/bcm2835-rpi-common.dtsi b/arch/arm/boot/dts/bcm2835-rpi-common.dtsi
new file mode 100644
index 000000000000..8a55b6cded59
--- /dev/null
+++ b/arch/arm/boot/dts/bcm2835-rpi-common.dtsi
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This include file covers the common peripherals and configuration between
+ * bcm2835, bcm2836 and bcm2837 implementations that interact with RPi's
+ * firmware interface.
+ */
+
+#include <dt-bindings/power/raspberrypi-power.h>
+
+&v3d {
+ power-domains = <&power RPI_POWER_DOMAIN_V3D>;
+};
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index 53bf4579cc22..0549686134ea 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcm283x.dtsi"
#include "bcm2835-common.dtsi"
+#include "bcm2835-rpi-common.dtsi"
/ {
compatible = "brcm,bcm2835";
diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi
index 82d6c4662ae4..b390006aef79 100644
--- a/arch/arm/boot/dts/bcm2836.dtsi
+++ b/arch/arm/boot/dts/bcm2836.dtsi
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcm283x.dtsi"
#include "bcm2835-common.dtsi"
+#include "bcm2835-rpi-common.dtsi"
/ {
compatible = "brcm,bcm2836";
diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi
index 9e95fee78e19..0199ec98cd61 100644
--- a/arch/arm/boot/dts/bcm2837.dtsi
+++ b/arch/arm/boot/dts/bcm2837.dtsi
@@ -1,5 +1,6 @@
#include "bcm283x.dtsi"
#include "bcm2835-common.dtsi"
+#include "bcm2835-rpi-common.dtsi"
/ {
compatible = "brcm,bcm2837";
diff --git a/arch/arm/boot/dts/berlin2.dtsi b/arch/arm/boot/dts/berlin2.dtsi
index d2f7d984bba5..3ab3cd250da7 100644
--- a/arch/arm/boot/dts/berlin2.dtsi
+++ b/arch/arm/boot/dts/berlin2.dtsi
@@ -77,7 +77,7 @@
ranges = <0 0xf7000000 0x1000000>;
- sdhci0: sdhci@ab0000 {
+ sdhci0: mmc@ab0000 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab0000 0x200>;
clocks = <&chip_clk CLKID_SDIO0XIN>, <&chip_clk CLKID_SDIO0>;
@@ -86,7 +86,7 @@
status = "disabled";
};
- sdhci1: sdhci@ab0800 {
+ sdhci1: mmc@ab0800 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab0800 0x200>;
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO1>;
@@ -95,7 +95,7 @@
status = "disabled";
};
- sdhci2: sdhci@ab1000 {
+ sdhci2: mmc@ab1000 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab1000 0x200>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/berlin2cd.dtsi b/arch/arm/boot/dts/berlin2cd.dtsi
index e5c1f4213ff9..7cf3e6302d75 100644
--- a/arch/arm/boot/dts/berlin2cd.dtsi
+++ b/arch/arm/boot/dts/berlin2cd.dtsi
@@ -62,7 +62,7 @@
ranges = <0 0xf7000000 0x1000000>;
- sdhci0: sdhci@ab0000 {
+ sdhci0: mmc@ab0000 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab0000 0x200>;
clocks = <&chip_clk CLKID_SDIO0XIN>, <&chip_clk CLKID_SDIO0>;
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 99d6872a6dfc..c44a32e873f4 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -122,7 +122,7 @@
ranges = <0 0xf7000000 0x1000000>;
interrupt-parent = <&gic>;
- sdhci0: sdhci@ab0000 {
+ sdhci0: mmc@ab0000 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab0000 0x200>;
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
@@ -131,7 +131,7 @@
status = "disabled";
};
- sdhci1: sdhci@ab0800 {
+ sdhci1: mmc@ab0800 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab0800 0x200>;
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
@@ -140,7 +140,7 @@
status = "disabled";
};
- sdhci2: sdhci@ab1000 {
+ sdhci2: mmc@ab1000 {
compatible = "mrvl,pxav3-mmc";
reg = <0xab1000 0x200>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index c28ca0540f03..7702e048e110 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -308,14 +308,30 @@
ti,hwmods = "mcspi4";
};
- timer1: timer@2e000 {
- compatible = "ti,dm814-timer";
- reg = <0x2e000 0x2000>;
- interrupts = <67>;
- ti,hwmods = "timer1";
- ti,timer-alwon;
+ timer1_target: target-module@2e000 {
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ reg = <0x2e000 0x4>,
+ <0x2e010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
clocks = <&timer1_fck>;
clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x2e000 0x1000>;
+
+ timer1: timer@0 {
+ compatible = "ti,am335x-timer-1ms";
+ reg = <0x0 0x400>;
+ interrupts = <67>;
+ ti,timer-alwon;
+ clocks = <&timer1_fck>;
+ clock-names = "fck";
+ };
};
uart1: uart@20000 {
@@ -348,13 +364,29 @@
dma-names = "tx", "rx";
};
- timer2: timer@40000 {
- compatible = "ti,dm814-timer";
- reg = <0x40000 0x2000>;
- interrupts = <68>;
- ti,hwmods = "timer2";
+ timer2_target: target-module@40000 {
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ reg = <0x40000 0x4>,
+ <0x40010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
clocks = <&timer2_fck>;
clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x40000 0x1000>;
+
+ timer2: timer@0 {
+ compatible = "ti,dm814-timer";
+ reg = <0 0x1000>;
+ interrupts = <68>;
+ clocks = <&timer2_fck>;
+ clock-names = "fck";
+ };
};
timer3: timer@42000 {
@@ -735,3 +767,23 @@
};
#include "dm814x-clocks.dtsi"
+
+/* Preferred always-on timer for clocksource */
+&timer1_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&timer1_fck>;
+ assigned-clock-parents = <&devosc_ck>;
+ };
+};
+
+/* Preferred timer for clockevent */
+&timer2_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&timer2_fck>;
+ assigned-clock-parents = <&devosc_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 2a4934b60ded..3551a64963f8 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -440,23 +440,55 @@
dma-names = "tx", "rx";
};
- timer1: timer@4802e000 {
- compatible = "ti,dm816-timer";
- reg = <0x4802e000 0x2000>;
- interrupts = <67>;
- ti,hwmods = "timer1";
- ti,timer-alwon;
- clocks = <&timer1_fck>;
+ timer1_target: target-module@4802e000 {
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ reg = <0x4802e000 0x4>,
+ <0x4802e010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ clocks = <&alwon_clkctrl DM816_TIMER1_CLKCTRL 0>;
clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x4802e000 0x1000>;
+
+ timer1: timer@0 {
+ compatible = "ti,dm816-timer";
+ reg = <0 0x1000>;
+ interrupts = <67>;
+ ti,timer-alwon;
+ clocks = <&alwon_clkctrl DM816_TIMER1_CLKCTRL 0>;
+ clock-names = "fck";
+ };
};
- timer2: timer@48040000 {
- compatible = "ti,dm816-timer";
- reg = <0x48040000 0x2000>;
- interrupts = <68>;
- ti,hwmods = "timer2";
- clocks = <&timer2_fck>;
+ timer2_target: target-module@48040000 {
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ reg = <0x48040000 0x4>,
+ <0x48040010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <SYSC_OMAP4_SOFTRESET>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ clocks = <&alwon_clkctrl DM816_TIMER2_CLKCTRL 0>;
clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x48040000 0x1000>;
+
+ timer2: timer@0 {
+ compatible = "ti,dm816-timer";
+ reg = <0 0x1000>;
+ interrupts = <68>;
+ clocks = <&alwon_clkctrl DM816_TIMER2_CLKCTRL 0>;
+ clock-names = "fck";
+ };
};
timer3: timer@48042000 {
@@ -642,3 +674,23 @@
};
#include "dm816x-clocks.dtsi"
+
+/* Preferred always-on timer for clocksource */
+&timer1_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&timer1_fck>;
+ assigned-clock-parents = <&sys_clkin_ck>;
+ };
+};
+
+/* Preferred timer for clockevent */
+&timer2_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&timer2_fck>;
+ assigned-clock-parents = <&sys_clkin_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 3081b04e8c08..89e0bdaf3a85 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -175,7 +175,6 @@
#size-cells = <0>;
interrupts = <11>;
clock-frequency = <400000>;
- timeout-ms = <1000>;
clocks = <&core_clk 0>;
status = "okay";
};
@@ -248,7 +247,7 @@
marvell,#interrupts = <5>;
};
- intc: main-interrupt-ctrl@20200 {
+ intc: interrupt-controller@20200 {
compatible = "marvell,orion-intc";
interrupt-controller;
#interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/dra7-evm-common.dtsi b/arch/arm/boot/dts/dra7-evm-common.dtsi
index 23244b5a9942..f89a64cbcd53 100644
--- a/arch/arm/boot/dts/dra7-evm-common.dtsi
+++ b/arch/arm/boot/dts/dra7-evm-common.dtsi
@@ -3,6 +3,7 @@
* Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
*/
+#include "dra74-ipu-dsp-common.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/clock/ti-dra7-atl.h>
#include <dt-bindings/input/input.h>
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index af06a55d1c5c..7aeb30daf3b8 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -35,6 +35,40 @@
regulator-max-microvolt = <1800000>;
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipu2_memory_region: ipu2-memory@95800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x95800000 0x0 0x3800000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp1_memory_region: dsp1-memory@99000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x99000000 0x0 0x4000000>;
+ reusable;
+ status = "okay";
+ };
+
+ ipu1_memory_region: ipu1-memory@9d000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9d000000 0x0 0x2000000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp2_memory_region: dsp2-memory@9f000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9f000000 0x0 0x800000>;
+ reusable;
+ status = "okay";
+ };
+ };
+
evm_3v3_sd: fixedregulator-sd {
compatible = "regulator-fixed";
regulator-name = "evm_3v3_sd";
@@ -537,3 +571,23 @@
pinctrl-1 = <&dcan1_pins_sleep>;
pinctrl-2 = <&dcan1_pins_default>;
};
+
+&ipu2 {
+ status = "okay";
+ memory-region = <&ipu2_memory_region>;
+};
+
+&ipu1 {
+ status = "okay";
+ memory-region = <&ipu1_memory_region>;
+};
+
+&dsp1 {
+ status = "okay";
+ memory-region = <&dsp1_memory_region>;
+};
+
+&dsp2 {
+ status = "okay";
+ memory-region = <&dsp2_memory_region>;
+};
diff --git a/arch/arm/boot/dts/dra7-ipu-dsp-common.dtsi b/arch/arm/boot/dts/dra7-ipu-dsp-common.dtsi
new file mode 100644
index 000000000000..a25749a1c365
--- /dev/null
+++ b/arch/arm/boot/dts/dra7-ipu-dsp-common.dtsi
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common IPU and DSP data for TI DRA7xx/AM57xx platforms
+ */
+
+&mailbox5 {
+ status = "okay";
+ mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
+ status = "okay";
+ };
+ mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
+ status = "okay";
+ };
+};
+
+&mailbox6 {
+ status = "okay";
+ mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
+ status = "okay";
+ };
+};
+
+&ipu2 {
+ mboxes = <&mailbox6 &mbox_ipu2_ipc3x>;
+ ti,timers = <&timer3>;
+ ti,watchdog-timers = <&timer4>, <&timer9>;
+};
+
+&ipu1 {
+ mboxes = <&mailbox5 &mbox_ipu1_ipc3x>;
+ ti,timers = <&timer11>;
+ ti,watchdog-timers = <&timer7>, <&timer8>;
+};
+
+&dsp1 {
+ mboxes = <&mailbox5 &mbox_dsp1_ipc3x>;
+ ti,timers = <&timer5>;
+ ti,watchdog-timers = <&timer10>;
+};
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 2119a78e9c15..62ca89551219 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -1143,7 +1143,6 @@
target-module@32000 { /* 0x48032000, ap 5 3e.0 */
compatible = "ti,sysc-omap4-timer", "ti,sysc";
- ti,hwmods = "timer2";
reg = <0x32000 0x4>,
<0x32010 0x4>;
reg-names = "rev", "sysc";
@@ -1163,15 +1162,14 @@
timer2: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&l4per_clkctrl DRA7_L4PER_TIMER2_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&l4per_clkctrl DRA7_L4PER_TIMER2_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
};
};
target-module@34000 { /* 0x48034000, ap 7 46.0 */
compatible = "ti,sysc-omap4-timer", "ti,sysc";
- ti,hwmods = "timer3";
reg = <0x34000 0x4>,
<0x34010 0x4>;
reg-names = "rev", "sysc";
@@ -1191,15 +1189,14 @@
timer3: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
};
};
target-module@36000 { /* 0x48036000, ap 9 4e.0 */
compatible = "ti,sysc-omap4-timer", "ti,sysc";
- ti,hwmods = "timer4";
reg = <0x36000 0x4>,
<0x36010 0x4>;
reg-names = "rev", "sysc";
@@ -1210,8 +1207,9 @@
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
/* Domains (P, C): l4per_pwrdm, l4per_clkdm */
- clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 0>;
- clock-names = "fck";
+ clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 0>,
+ <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x36000 0x1000>;
@@ -1219,8 +1217,8 @@
timer4: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -1246,8 +1244,8 @@
timer9: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&l4per_clkctrl DRA7_L4PER_TIMER9_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&l4per_clkctrl DRA7_L4PER_TIMER9_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -1853,8 +1851,8 @@
timer10: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&l4per_clkctrl DRA7_L4PER_TIMER10_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&l4per_clkctrl DRA7_L4PER_TIMER10_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -1880,8 +1878,8 @@
timer11: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&l4per_clkctrl DRA7_L4PER_TIMER11_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&l4per_clkctrl DRA7_L4PER_TIMER11_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -3354,8 +3352,8 @@
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
/* Domains (P, C): ipu_pwrdm, ipu_clkdm */
- clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 0>;
- clock-names = "fck";
+ clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 0>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x20000 0x1000>;
@@ -3381,8 +3379,9 @@
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
/* Domains (P, C): ipu_pwrdm, ipu_clkdm */
- clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 0>;
- clock-names = "fck";
+ clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 0>,
+ <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x22000 0x1000>;
@@ -3417,8 +3416,8 @@
timer7: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&ipu_clkctrl DRA7_IPU_TIMER7_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&ipu_clkctrl DRA7_IPU_TIMER7_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -3444,8 +3443,8 @@
timer8: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&ipu_clkctrl DRA7_IPU_TIMER8_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&ipu_clkctrl DRA7_IPU_TIMER8_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -3471,8 +3470,8 @@
timer13: timer@0 {
compatible = "ti,omap5430-timer";
reg = <0x0 0x80>;
- clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER13_CLKCTRL 24>;
- clock-names = "fck";
+ clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER13_CLKCTRL 24>, <&timer_sys_clk_div>;
+ clock-names = "fck", "timer_sys_ck";
interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
ti,timer-pwm;
};
@@ -4295,7 +4294,6 @@
target-module@4000 { /* 0x4ae04000, ap 15 40.0 */
compatible = "ti,sysc-omap2", "ti,sysc";
- ti,hwmods = "counter_32k";
reg = <0x4000 0x4>,
<0x4010 0x4>;
reg-names = "rev", "sysc";
@@ -4430,9 +4428,8 @@
};
};
- target-module@8000 { /* 0x4ae18000, ap 9 30.0 */
+ timer1_target: target-module@8000 { /* 0x4ae18000, ap 9 30.0 */
compatible = "ti,sysc-omap4-timer", "ti,sysc";
- ti,hwmods = "timer1";
reg = <0x8000 0x4>,
<0x8010 0x4>;
reg-names = "rev", "sysc";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 7191ee6a1b82..099546be5014 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -410,6 +410,42 @@
ti,hwmods = "dmm";
};
+ ipu1: ipu@58820000 {
+ compatible = "ti,dra7-ipu";
+ reg = <0x58820000 0x10000>;
+ reg-names = "l2ram";
+ iommus = <&mmu_ipu1>;
+ status = "disabled";
+ resets = <&prm_ipu 0>, <&prm_ipu 1>;
+ clocks = <&ipu1_clkctrl DRA7_IPU1_MMU_IPU1_CLKCTRL 0>;
+ firmware-name = "dra7-ipu1-fw.xem4";
+ };
+
+ ipu2: ipu@55020000 {
+ compatible = "ti,dra7-ipu";
+ reg = <0x55020000 0x10000>;
+ reg-names = "l2ram";
+ iommus = <&mmu_ipu2>;
+ status = "disabled";
+ resets = <&prm_core 0>, <&prm_core 1>;
+ clocks = <&ipu2_clkctrl DRA7_IPU2_MMU_IPU2_CLKCTRL 0>;
+ firmware-name = "dra7-ipu2-fw.xem4";
+ };
+
+ dsp1: dsp@40800000 {
+ compatible = "ti,dra7-dsp";
+ reg = <0x40800000 0x48000>,
+ <0x40e00000 0x8000>,
+ <0x40f00000 0x8000>;
+ reg-names = "l2ram", "l1pram", "l1dram";
+ ti,bootreg = <&scm_conf 0x55c 10>;
+ iommus = <&mmu0_dsp1>, <&mmu1_dsp1>;
+ status = "disabled";
+ resets = <&prm_dsp1 0>;
+ clocks = <&dsp1_clkctrl DRA7_DSP1_MMU0_DSP1_CLKCTRL 0>;
+ firmware-name = "dra7-dsp1-fw.xe66";
+ };
+
target-module@40d01000 {
compatible = "ti,sysc-omap2", "ti,sysc";
reg = <0x40d01000 0x4>,
@@ -1044,3 +1080,13 @@
reg = <0x1c00 0x60>;
};
};
+
+/* Preferred always-on timer for clockevent */
+&timer1_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&wkupaon_clkctrl DRA7_TIMER1_CLKCTRL 24>;
+ assigned-clock-parents = <&sys_32k_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts
index fabeb7704753..a5d275ea7bd3 100644
--- a/arch/arm/boot/dts/dra71-evm.dts
+++ b/arch/arm/boot/dts/dra71-evm.dts
@@ -17,6 +17,33 @@
reg = <0x0 0x80000000 0x0 0x80000000>; /* 2GB */
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipu2_memory_region: ipu2-memory@95800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x95800000 0x0 0x3800000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp1_memory_region: dsp1-memory@99000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x99000000 0x0 0x4000000>;
+ reusable;
+ status = "okay";
+ };
+
+ ipu1_memory_region: ipu1-memory@9d000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9d000000 0x0 0x2000000>;
+ reusable;
+ status = "okay";
+ };
+ };
+
vpo_sd_1v8_3v3: gpio-regulator-TPS74801 {
compatible = "regulator-gpio";
@@ -270,3 +297,18 @@
&extcon_usb2 {
vbus-gpio = <&pcf_lcd 15 GPIO_ACTIVE_HIGH>;
};
+
+&ipu2 {
+ status = "okay";
+ memory-region = <&ipu2_memory_region>;
+};
+
+&ipu1 {
+ status = "okay";
+ memory-region = <&ipu1_memory_region>;
+};
+
+&dsp1 {
+ status = "okay";
+ memory-region = <&dsp1_memory_region>;
+};
diff --git a/arch/arm/boot/dts/dra72-evm-common.dtsi b/arch/arm/boot/dts/dra72-evm-common.dtsi
index 01558a86af82..c84b63bf0fc8 100644
--- a/arch/arm/boot/dts/dra72-evm-common.dtsi
+++ b/arch/arm/boot/dts/dra72-evm-common.dtsi
@@ -5,6 +5,7 @@
/dts-v1/;
#include "dra72x.dtsi"
+#include "dra7-ipu-dsp-common.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/clock/ti-dra7-atl.h>
@@ -583,23 +584,6 @@
rx-num-evt = <32>;
};
-&mailbox5 {
- status = "okay";
- mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
- status = "okay";
- };
- mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
- status = "okay";
- };
-};
-
-&mailbox6 {
- status = "okay";
- mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
- status = "okay";
- };
-};
-
&pcie1_rc {
status = "okay";
};
diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts
index 2bb2e8be6276..6e70858f6313 100644
--- a/arch/arm/boot/dts/dra72-evm-revc.dts
+++ b/arch/arm/boot/dts/dra72-evm-revc.dts
@@ -14,6 +14,33 @@
reg = <0x0 0x80000000 0x0 0x80000000>; /* 2GB */
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipu2_cma_pool: ipu2_cma@95800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x95800000 0x0 0x3800000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp1_cma_pool: dsp1_cma@99000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x99000000 0x0 0x4000000>;
+ reusable;
+ status = "okay";
+ };
+
+ ipu1_cma_pool: ipu1_cma@9d000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9d000000 0x0 0x2000000>;
+ reusable;
+ status = "okay";
+ };
+ };
+
evm_1v8_sw: fixedregulator-evm_1v8 {
compatible = "regulator-fixed";
regulator-name = "evm_1v8";
@@ -113,3 +140,18 @@
pinctrl-3 = <&mmc2_pins_hs200 &mmc2_iodelay_hs200_rev20_conf>;
vmmc-supply = <&evm_1v8_sw>;
};
+
+&ipu2 {
+ status = "okay";
+ memory-region = <&ipu2_cma_pool>;
+};
+
+&ipu1 {
+ status = "okay";
+ memory-region = <&ipu1_cma_pool>;
+};
+
+&dsp1 {
+ status = "okay";
+ memory-region = <&dsp1_cma_pool>;
+};
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index 9adb77585ef1..951152fe206a 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -12,6 +12,33 @@
reg = <0x0 0x80000000 0x0 0x40000000>; /* 1024 MB */
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipu2_memory_region: ipu2-memory@95800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x95800000 0x0 0x3800000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp1_memory_region: dsp1-memory@99000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x99000000 0x0 0x4000000>;
+ reusable;
+ status = "okay";
+ };
+
+ ipu1_memory_region: ipu1-memory@9d000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9d000000 0x0 0x2000000>;
+ reusable;
+ status = "okay";
+ };
+ };
+
evm_1v8_sw: fixedregulator-evm_1v8 {
compatible = "regulator-fixed";
regulator-name = "evm_1v8";
@@ -78,3 +105,18 @@
pinctrl-3 = <&mmc2_pins_hs200 &mmc2_iodelay_hs200_rev10_conf>;
vmmc-supply = <&evm_1v8_sw>;
};
+
+&ipu2 {
+ status = "okay";
+ memory-region = <&ipu2_memory_region>;
+};
+
+&ipu1 {
+ status = "okay";
+ memory-region = <&ipu1_memory_region>;
+};
+
+&dsp1 {
+ status = "okay";
+ memory-region = <&dsp1_memory_region>;
+};
diff --git a/arch/arm/boot/dts/dra72x.dtsi b/arch/arm/boot/dts/dra72x.dtsi
index da334489b18f..ae23ec14e8fa 100644
--- a/arch/arm/boot/dts/dra72x.dtsi
+++ b/arch/arm/boot/dts/dra72x.dtsi
@@ -10,6 +10,12 @@
/ {
compatible = "ti,dra722", "ti,dra72", "ti,dra7";
+ aliases {
+ rproc0 = &ipu1;
+ rproc1 = &ipu2;
+ rproc2 = &dsp1;
+ };
+
pmu {
compatible = "arm,cortex-a15-pmu";
interrupt-parent = <&wakeupgen>;
diff --git a/arch/arm/boot/dts/dra74-ipu-dsp-common.dtsi b/arch/arm/boot/dts/dra74-ipu-dsp-common.dtsi
new file mode 100644
index 000000000000..b1147a4b77f9
--- /dev/null
+++ b/arch/arm/boot/dts/dra74-ipu-dsp-common.dtsi
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common IPU and DSP data for TI DRA74x/DRA76x/AM572x/AM574x platforms
+ */
+
+#include "dra7-ipu-dsp-common.dtsi"
+
+&mailbox6 {
+ mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
+ status = "okay";
+ };
+};
+
+&dsp2 {
+ mboxes = <&mailbox6 &mbox_dsp2_ipc3x>;
+ ti,timers = <&timer6>;
+ ti,watchdog-timers = <&timer13>;
+};
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index 7b1c61298253..46d8e7615180 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -29,6 +29,13 @@
};
};
+ aliases {
+ rproc0 = &ipu1;
+ rproc1 = &ipu2;
+ rproc2 = &dsp1;
+ rproc3 = &dsp2;
+ };
+
pmu {
compatible = "arm,cortex-a15-pmu";
interrupt-parent = <&wakeupgen>;
@@ -124,6 +131,20 @@
ti,syscon-mmuconfig = <&dsp2_system 0x1>;
};
};
+
+ dsp2: dsp@41000000 {
+ compatible = "ti,dra7-dsp";
+ reg = <0x41000000 0x48000>,
+ <0x41600000 0x8000>,
+ <0x41700000 0x8000>;
+ reg-names = "l2ram", "l1pram", "l1dram";
+ ti,bootreg = <&scm_conf 0x560 10>;
+ iommus = <&mmu0_dsp2>, <&mmu1_dsp2>;
+ status = "disabled";
+ resets = <&prm_dsp2 0>;
+ clocks = <&dsp2_clkctrl DRA7_DSP2_MMU0_DSP2_CLKCTRL 0>;
+ firmware-name = "dra7-dsp2-fw.xe66";
+ };
};
};
diff --git a/arch/arm/boot/dts/dra76-evm.dts b/arch/arm/boot/dts/dra76-evm.dts
index e958cb3d1b31..820a0ece20d4 100644
--- a/arch/arm/boot/dts/dra76-evm.dts
+++ b/arch/arm/boot/dts/dra76-evm.dts
@@ -25,6 +25,40 @@
reg = <0x0 0x80000000 0x0 0x80000000>;
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ipu2_cma_pool: ipu2_cma@95800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x95800000 0x0 0x3800000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp1_cma_pool: dsp1_cma@99000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x99000000 0x0 0x4000000>;
+ reusable;
+ status = "okay";
+ };
+
+ ipu1_cma_pool: ipu1_cma@9d000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9d000000 0x0 0x2000000>;
+ reusable;
+ status = "okay";
+ };
+
+ dsp2_cma_pool: dsp2_cma@9f000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x9f000000 0x0 0x800000>;
+ reusable;
+ status = "okay";
+ };
+ };
+
vsys_12v0: fixedregulator-vsys12v0 {
/* main supply */
compatible = "regulator-fixed";
@@ -548,3 +582,23 @@
data-lanes = <1 2>;
};
};
+
+&ipu2 {
+ status = "okay";
+ memory-region = <&ipu2_cma_pool>;
+};
+
+&ipu1 {
+ status = "okay";
+ memory-region = <&ipu1_cma_pool>;
+};
+
+&dsp1 {
+ status = "okay";
+ memory-region = <&dsp1_cma_pool>;
+};
+
+&dsp2 {
+ status = "okay";
+ memory-region = <&dsp2_cma_pool>;
+};
diff --git a/arch/arm/boot/dts/e60k02.dtsi b/arch/arm/boot/dts/e60k02.dtsi
index ce50c4dc6f2a..3af1ab4458ef 100644
--- a/arch/arm/boot/dts/e60k02.dtsi
+++ b/arch/arm/boot/dts/e60k02.dtsi
@@ -117,6 +117,8 @@
ricoh619: pmic@32 {
compatible = "ricoh,rc5t619";
reg = <0x32>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
system-power-controller;
regulators {
diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
index 248bd372fe70..ca29d7ed8216 100644
--- a/arch/arm/boot/dts/exynos3250-monk.dts
+++ b/arch/arm/boot/dts/exynos3250-monk.dts
@@ -57,7 +57,8 @@
i2c_max77836: i2c-gpio-0 {
compatible = "i2c-gpio";
- gpios = <&gpd0 2 GPIO_ACTIVE_HIGH>, <&gpd0 3 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpd0 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpd0 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index 86c26a4edfd7..aba8350cfdaf 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -50,9 +50,15 @@
};
};
+ wlan_pwrseq: mshc1-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpe0 4 GPIO_ACTIVE_LOW>;
+ };
+
i2c_max77836: i2c-gpio-0 {
compatible = "i2c-gpio";
- gpios = <&gpd0 2 GPIO_ACTIVE_HIGH>, <&gpd0 3 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpd0 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpd0 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
#address-cells = <1>;
#size-cells = <0>;
@@ -605,8 +611,6 @@
};
&mshc_0 {
- #address-cells = <1>;
- #size-cells = <0>;
broken-cd;
non-removable;
cap-mmc-highspeed;
@@ -625,10 +629,48 @@
status = "okay";
};
+&mshc_1 {
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ non-removable;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ samsung,dw-mshc-ciu-div = <1>;
+ samsung,dw-mshc-sdr-timing = <0 1>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_bus1 &sd1_bus4>;
+ bus-width = <4>;
+
+ mmc-pwrseq = <&wlan_pwrseq>;
+
+ brcmf: wifi@1 {
+ compatible = "brcm,bcm4334-fmac";
+ reg = <1>;
+
+ interrupt-parent = <&gpx1>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ };
+};
+
&serial_0 {
assigned-clocks = <&cmu CLK_SCLK_UART0>;
assigned-clock-rates = <100000000>;
status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+ max-speed = <3000000>;
+ shutdown-gpios = <&gpe0 0 GPIO_ACTIVE_HIGH>;
+ device-wakeup-gpios = <&gpx3 1 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
+ clocks = <&s2mps14_osc S2MPS11_CLK_BT>;
+ };
};
&serial_1 {
diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
new file mode 100644
index 000000000000..6d0c04d77a39
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4210-i9100.dts
@@ -0,0 +1,768 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Samsung's Exynos4210 based Galaxy S2 (GT-I9100 version) device tree
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Copyright (c) 2020 Stenkin Evgeniy <stenkinevgeniy@gmail.com>
+ * Copyright (c) 2020 Paul Cercueil <paul@crapouillou.net>
+ */
+
+/dts-v1/;
+#include "exynos4210.dtsi"
+#include "exynos4412-ppmu-common.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/linux-event-codes.h>
+
+/ {
+ model = "Samsung Galaxy S2 (GT-I9100)";
+ compatible = "samsung,i9100", "samsung,exynos4210", "samsung,exynos4";
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x40000000 0x40000000>;
+ };
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ vemmc_reg: regulator-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "VMEM_VDD_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&gpk0 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ tsp_reg: regulator-1 {
+ compatible = "regulator-fixed";
+ regulator-name = "TSP_FIXED_VOLTAGES";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpl0 3 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <70000>;
+ enable-active-high;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ cam_af_28v_reg: regulator-2 {
+ compatible = "regulator-fixed";
+ regulator-name = "8M_AF_2.8V_EN";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&gpk1 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ cam_io_en_reg: regulator-3 {
+ compatible = "regulator-fixed";
+ regulator-name = "CAM_IO_EN";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ gpio = <&gpe2 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ cam_io_12v_reg: regulator-4 {
+ compatible = "regulator-fixed";
+ regulator-name = "8M_1.2V_EN";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ gpio = <&gpe2 5 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vt_core_15v_reg: regulator-5 {
+ compatible = "regulator-fixed";
+ regulator-name = "VT_CORE_1.5V";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ gpio = <&gpe2 2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ vol-down {
+ gpios = <&gpx2 1 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ label = "volume down";
+ debounce-interval = <10>;
+ };
+
+ vol-up {
+ gpios = <&gpx2 0 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ label = "volume up";
+ debounce-interval = <10>;
+ };
+
+ power {
+ gpios = <&gpx2 7 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ label = "power";
+ debounce-interval = <10>;
+ wakeup-source;
+ };
+
+ ok {
+ gpios = <&gpx3 5 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_OK>;
+ label = "ok";
+ debounce-interval = <10>;
+ };
+ };
+
+ wlan_pwrseq: sdhci3-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpl1 2 GPIO_ACTIVE_LOW>;
+ };
+
+ i2c_max17042_fuel: i2c-gpio {
+ compatible = "i2c-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sda-gpios = <&gpy4 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpy4 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <5>;
+
+ battery@36 {
+ compatible = "maxim,max17042";
+
+ interrupt-parent = <&gpx2>;
+ interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-0 = <&max17042_fuel_irq>;
+ pinctrl-names = "default";
+
+ reg = <0x36>;
+ maxim,over-heat-temp = <700>;
+ maxim,over-volt = <4500>;
+ };
+ };
+
+ spi-lcd {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ num-chipselects = <1>;
+ cs-gpios = <&gpy4 3 GPIO_ACTIVE_LOW>;
+ sck-gpios = <&gpy3 1 GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&gpy3 3 GPIO_ACTIVE_HIGH>;
+
+ lcd@0 {
+ compatible = "samsung,ld9040";
+ reg = <0>;
+
+ spi-max-frequency = <1200000>;
+
+ vdd3-supply = <&vmipi_reg>;
+ vci-supply = <&vcclcd_reg>;
+
+ reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>;
+ power-on-delay = <10>;
+ reset-delay = <10>;
+
+ panel-width-mm = <90>;
+ panel-height-mm = <154>;
+
+ display-timings {
+ timing {
+ clock-frequency = <23492370>;
+ hactive = <480>;
+ vactive = <800>;
+ hback-porch = <16>;
+ hfront-porch = <16>;
+ vback-porch = <2>;
+ vfront-porch = <28>;
+ hsync-len = <2>;
+ vsync-len = <1>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <0>;
+ pixelclk-active = <0>;
+ };
+ };
+
+ port {
+ lcd_ep: endpoint {
+ remote-endpoint = <&fimd_dpi_ep>;
+ };
+ };
+ };
+ };
+
+ fixed-rate-clocks {
+ xxti {
+ compatible = "samsung,clock-xxti";
+ clock-frequency = <0>;
+ };
+
+ xusbxti {
+ compatible = "samsung,clock-xusbxti";
+ clock-frequency = <24000000>;
+ };
+ };
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ cooling-maps {
+ map0 {
+ /* Corresponds to 800MHz */
+ cooling-device = <&cpu0 2 2>;
+ };
+ map1 {
+ /* Corresponds to 200MHz */
+ cooling-device = <&cpu0 4 4>;
+ };
+ };
+ };
+ };
+};
+
+&camera {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu0-supply = <&varm_breg>;
+};
+
+&ehci {
+ status = "okay";
+
+ phys = <&exynos_usbphy 1>;
+ phy-names = "host";
+};
+
+&exynos_usbphy {
+ status = "okay";
+
+ vbus-supply = <&safe1_sreg>;
+};
+
+&fimc_0 {
+ status = "okay";
+
+ assigned-clocks = <&clock CLK_MOUT_FIMC0>, <&clock CLK_SCLK_FIMC0>;
+ assigned-clock-parents = <&clock CLK_SCLK_MPLL>;
+ assigned-clock-rates = <0>, <160000000>;
+};
+
+&fimc_1 {
+ status = "okay";
+
+ assigned-clocks = <&clock CLK_MOUT_FIMC1>, <&clock CLK_SCLK_FIMC1>;
+ assigned-clock-parents = <&clock CLK_SCLK_MPLL>;
+ assigned-clock-rates = <0>, <160000000>;
+};
+
+&fimc_2 {
+ status = "okay";
+
+ assigned-clocks = <&clock CLK_MOUT_FIMC2>, <&clock CLK_SCLK_FIMC2>;
+ assigned-clock-parents = <&clock CLK_SCLK_MPLL>;
+ assigned-clock-rates = <0>, <160000000>;
+};
+
+&fimc_3 {
+ status = "okay";
+
+ assigned-clocks = <&clock CLK_MOUT_FIMC3>, <&clock CLK_SCLK_FIMC3>;
+ assigned-clock-parents = <&clock CLK_SCLK_MPLL>;
+ assigned-clock-rates = <0>, <160000000>;
+};
+
+&fimd {
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ samsung,invert-vden;
+ samsung,invert-vclk;
+
+ pinctrl-0 = <&lcd_clk>, <&lcd_data24>;
+ pinctrl-names = "default";
+
+ port@3 {
+ reg = <3>;
+
+ fimd_dpi_ep: endpoint {
+ remote-endpoint = <&lcd_ep>;
+ };
+ };
+};
+
+&gpu {
+ status = "okay";
+
+ mali-supply = <&vg3d_breg>;
+ regulator-microvolt-offset = <50000>;
+ regulator-microsecs-delay = <50>;
+};
+
+&hsotg {
+ status = "okay";
+
+ dr_mode = "otg";
+ vusb_d-supply = <&vusb_reg>;
+ vusb_a-supply = <&vusbdac_reg>;
+};
+
+&i2c_3 {
+ status = "okay";
+
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-slave-addr = <0x10>;
+ samsung,i2c-max-bus-freq = <100000>;
+
+ pinctrl-0 = <&i2c3_bus>;
+ pinctrl-names = "default";
+
+ mxt224-touchscreen@4a {
+ compatible = "atmel,maxtouch";
+ reg = <0x4a>;
+
+ interrupt-parent = <&gpx0>;
+ interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ };
+};
+
+&i2c_5 {
+ status = "okay";
+
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-slave-addr = <0x10>;
+ samsung,i2c-max-bus-freq = <100000>;
+
+ pinctrl-0 = <&i2c5_bus>;
+ pinctrl-names = "default";
+
+ max8997_pmic@66 {
+ compatible = "maxim,max8997-pmic";
+ reg = <0x66>;
+
+ interrupts-extended = <&gpx0 7 IRQ_TYPE_NONE>,
+ <&gpx2 3 IRQ_TYPE_EDGE_FALLING>;
+
+ max8997,pmic-buck1-uses-gpio-dvs;
+ max8997,pmic-buck2-uses-gpio-dvs;
+ max8997,pmic-buck5-uses-gpio-dvs;
+
+ max8997,pmic-ignore-gpiodvs-side-effect;
+ max8997,pmic-buck125-default-dvs-idx = <0>;
+
+ max8997,pmic-buck125-dvs-gpios = <&gpx0 5 GPIO_ACTIVE_HIGH>,
+ <&gpx0 6 GPIO_ACTIVE_HIGH>,
+ <&gpl0 0 GPIO_ACTIVE_HIGH>;
+
+ max8997,pmic-buck1-dvs-voltage = <1350000>, <1300000>,
+ <1250000>, <1200000>,
+ <1150000>, <1100000>,
+ <1000000>, <950000>;
+
+ max8997,pmic-buck2-dvs-voltage = <1100000>, <1000000>,
+ <950000>, <900000>,
+ <1100000>, <1000000>,
+ <950000>, <900000>;
+
+ max8997,pmic-buck5-dvs-voltage = <1200000>, <1200000>,
+ <1200000>, <1200000>,
+ <1200000>, <1200000>,
+ <1200000>, <1200000>;
+
+ pinctrl-0 = <&max8997_irq>, <&otg_gp>, <&usb_sel>;
+ pinctrl-names = "default";
+
+ regulators {
+ vadc_reg: LDO1 {
+ regulator-name = "VADC_3.3V_C210";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+
+ };
+ valive_reg: LDO2 {
+ regulator-name = "VALIVE_1.1V_C210";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+
+ };
+
+ vusb_reg: LDO3 {
+ regulator-name = "VUSB_1.1V_C210";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ vmipi_reg: LDO4 {
+ regulator-name = "VMIPI_1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ vhsic_reg: LDO5 {
+ regulator-name = "VHSIC_1.2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+
+ vpda_reg: LDO6 {
+ regulator-name = "VCC_1.8V_PDA";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ vcam_reg: LDO7 {
+ regulator-name = "CAM_ISP_1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vusbdac_reg: LDO8 {
+ regulator-name = "VUSB+VDAC_3.3V_C210";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vccpda_reg: LDO9 {
+ regulator-name = "VCC_2.8V_PDA";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+
+ vtouch_reg: LDO11 {
+ regulator-name = "TOUCH_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+
+ vpll_reg: LDO10 {
+ regulator-name = "VPLL_1.1V";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+
+ vtcam_reg: LDO12 {
+ regulator-name = "VT_CAM_1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vcclcd_reg: LDO13 {
+ regulator-name = "VCC_3.0V_LCD";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ vmotor_reg: LDO14 {
+ regulator-name = "VCC_2.8V_MOTOR";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ vled_reg: LDO15 {
+ regulator-name = "LED_A_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ camsensor_reg: LDO16 {
+ regulator-name = "CAM_SENSOR_IO_1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ vtf_reg: LDO17 {
+ regulator-name = "VTF_2.8V";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+
+ vtouchled_reg: LDO18 {
+ regulator-name = "TOUCH_LED_3.3V";
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vddq_reg: LDO21 {
+ regulator-name = "VDDQ_M1M2_1.2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+
+ varm_breg: BUCK1 {
+ regulator-name = "VARM_1.2V_C210";
+ regulator-min-microvolt = <65000>;
+ regulator-max-microvolt = <2225000>;
+ regulator-always-on;
+ };
+
+ vint_breg: BUCK2 {
+ regulator-name = "VINT_1.1V_C210";
+ regulator-min-microvolt = <65000>;
+ regulator-max-microvolt = <2225000>;
+ regulator-always-on;
+ };
+
+ vg3d_breg: BUCK3 {
+ regulator-name = "G3D_1.1V";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+
+ camisp_breg: BUCK4 {
+ regulator-name = "CAM_ISP_CORE_1.2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ vmem_breg: BUCK5 {
+ regulator-name = "VMEM_1.2V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+
+ vccsub_breg: BUCK7 {
+ regulator-name = "VCC_SUB_2.0V";
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-always-on;
+ };
+
+ safe1_sreg: ESAFEOUT1 {
+ regulator-name = "SAFEOUT1";
+ };
+
+ safe2_sreg: ESAFEOUT2 {
+ regulator-name = "SAFEOUT2";
+ regulator-boot-on;
+ };
+
+ charger_reg: CHARGER {
+ regulator-name = "CHARGER";
+ regulator-min-microamp = <60000>;
+ regulator-max-microamp = <2580000>;
+ regulator-always-on;
+ };
+
+ chargercv_reg: CHARGER_CV {
+ regulator-name = "CHARGER_CV";
+ regulator-min-microvolt = <3800000>;
+ regulator-max-microvolt = <4100000>;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c_7 {
+ status = "okay";
+
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-slave-addr = <0x10>;
+ samsung,i2c-max-bus-freq = <400000>;
+
+ pinctrl-0 = <&i2c7_bus>;
+ pinctrl-names = "default";
+
+ ak8975@c {
+ compatible = "asahi-kasei,ak8975";
+ reg = <0x0c>;
+
+ gpios = <&gpx2 2 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&pinctrl_0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sleep0>;
+
+ sleep0: sleep-states {
+ gpa0-0 {
+ samsung,pins = "gpa0-0";
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_INPUT>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ };
+
+ gpa0-1 {
+ samsung,pins = "gpa0-1";
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_OUT0>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ };
+
+ gpa0-2 {
+ samsung,pins = "gpa0-2";
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_INPUT>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ };
+
+ gpa0-3 {
+ samsung,pins = "gpa0-3";
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_OUT1>;
+ samsung,pin-pud-pdn = <EXYNOS_PIN_PULL_NONE>;
+ };
+ };
+};
+
+&pinctrl_1 {
+ mhl_int: mhl-int {
+ samsung,pins = "gpf3-5";
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ };
+
+ i2c_mhl_bus: i2c-mhl-bus {
+ samsung,pins = "gpf0-4", "gpf0-6";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ usb_sel: usb-sel {
+ samsung,pins = "gpl0-6";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ samsung,pin-val = <0>;
+ };
+
+ bt_en: bt-en {
+ samsung,pins = "gpl0-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
+ samsung,pin-val = <0>;
+ };
+
+ bt_res: bt-res {
+ samsung,pins = "gpl1-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
+ samsung,pin-val = <0>;
+ };
+
+ otg_gp: otg-gp {
+ samsung,pins = "gpx3-3";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ samsung,pin-val = <0>;
+ };
+
+ mag_mhl_gpio: mag-mhl-gpio {
+ samsung,pins = "gpd0-2";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ };
+
+ max8997_irq: max8997-irq {
+ samsung,pins = "gpx0-7";
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ };
+
+ max17042_fuel_irq: max17042-fuel-irq {
+ samsung,pins = "gpx2-3";
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ };
+
+ tsp224_irq: tsp224-irq {
+ samsung,pins = "gpx0-4";
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ };
+};
+
+&sdhci_0 {
+ status = "okay";
+
+ bus-width = <8>;
+ non-removable;
+ vmmc-supply = <&vemmc_reg>;
+
+ pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_bus8>;
+ pinctrl-names = "default";
+};
+
+&sdhci_2 {
+ status = "okay";
+
+ bus-width = <4>;
+ cd-gpios = <&gpx3 4 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&vtf_reg>;
+
+ pinctrl-0 = <&sd2_clk>, <&sd2_cmd>, <&sd2_bus4>;
+ pinctrl-names = "default";
+};
+
+&sdhci_3 {
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ non-removable;
+ bus-width = <4>;
+ mmc-pwrseq = <&wlan_pwrseq>;
+ vmmc-supply = <&vtf_reg>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd3_clk>, <&sd3_cmd>, <&sd3_bus4>;
+
+ brcmf: wifi@1 {
+ compatible = "brcm,bcm4330-fmac";
+ reg = <1>;
+
+ interrupt-parent = <&gpx2>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ };
+};
+
+&serial_0 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en>, <&bt_res>, <&uart0_data>, <&uart0_fctl>;
+
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+
+ shutdown-gpios = <&gpl0 4 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpl1 0 GPIO_ACTIVE_HIGH>;
+ device-wakeup-gpios = <&gpx3 1 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&serial_1 {
+ status = "okay";
+};
+
+&serial_2 {
+ status = "okay";
+};
+
+&serial_3 {
+ status = "okay";
+};
+
+&tmu {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index 0d1e1a9c2f6e..890525b10d22 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -251,12 +251,7 @@
};
buck1_reg: BUCK1 {
- /*
- * HACK: The real name is VDD_ARM_1.2V,
- * but exynos-cpufreq does not support
- * DT-based regulator lookup yet.
- */
- regulator-name = "vdd_arm";
+ regulator-name = "VDD_ARM_1.2V";
regulator-min-microvolt = <950000>;
regulator-max-microvolt = <1350000>;
regulator-always-on;
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 7c39dd1c4d3a..3d791db6095c 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -121,6 +121,11 @@
};
};
+ wlan_pwrseq: sdhci3-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpl1 2 GPIO_ACTIVE_LOW>;
+ };
+
fixed-rate-clocks {
xxti {
compatible = "samsung,clock-xxti";
@@ -280,11 +285,10 @@
max8997_pmic@66 {
compatible = "maxim,max8997-pmic";
- interrupts-extended = <&gpx0 7 0>, <&gpx2 3 0>;
reg = <0x66>;
- interrupt-parent = <&gpx0>;
- interrupts = <7 IRQ_TYPE_NONE>;
+ interrupts-extended = <&gpx0 7 IRQ_TYPE_LEVEL_LOW>,
+ <&gpx2 3 IRQ_TYPE_EDGE_FALLING>;
max8997,pmic-buck1-uses-gpio-dvs;
max8997,pmic-buck2-uses-gpio-dvs;
@@ -403,12 +407,7 @@
};
varm_breg: BUCK1 {
- /*
- * HACK: The real name is VARM_1.2V_C210,
- * but exynos-cpufreq does not support
- * DT-based regulator lookup yet.
- */
- regulator-name = "vdd_arm";
+ regulator-name = "VARM_1.2V_C210";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1350000>;
regulator-always-on;
@@ -471,6 +470,30 @@
status = "okay";
};
+&sdhci_3 {
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ non-removable;
+ bus-width = <4>;
+ mmc-pwrseq = <&wlan_pwrseq>;
+ vmmc-supply = <&tflash_reg>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd3_clk>, <&sd3_cmd>, <&sd3_bus4>;
+
+ brcmf: wifi@1 {
+ compatible = "brcm,bcm4330-fmac";
+ reg = <1>;
+
+ interrupt-parent = <&gpx2>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ };
+};
+
&serial_0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index 9dda6bdb9253..02fde1a75ebd 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -50,6 +50,11 @@
enable-active-high;
};
+ wlan_pwrseq: sdhci3-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpe3 1 GPIO_ACTIVE_LOW>;
+ };
+
gpio-keys {
compatible = "gpio-keys";
@@ -164,7 +169,8 @@
hdmi_ddc: i2c-ddc {
compatible = "i2c-gpio";
- gpios = <&gpe4 2 GPIO_ACTIVE_HIGH &gpe4 3 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpe4 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpe4 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
i2c-gpio,delay-us = <100>;
#address-cells = <1>;
#size-cells = <0>;
@@ -317,7 +323,7 @@
max8952,sync-freq = <0>;
max8952,ramp-speed = <0>;
- regulator-name = "vdd_arm";
+ regulator-name = "VARM_1.2V_C210";
regulator-min-microvolt = <770000>;
regulator-max-microvolt = <1400000>;
regulator-always-on;
@@ -563,6 +569,29 @@
status = "okay";
};
+&sdhci_3 {
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ non-removable;
+ bus-width = <4>;
+ mmc-pwrseq = <&wlan_pwrseq>;
+ vmmc-supply = <&ldo5_reg>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd3_clk>, <&sd3_cmd>, <&sd3_bus4>;
+
+ brcmf: wifi@1 {
+ compatible = "brcm,bcm4330-fmac";
+ reg = <1>;
+ interrupt-parent = <&gpx2>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ };
+};
+
&serial_0 {
status = "okay";
/delete-property/dmas;
diff --git a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
index 44f97546dd0a..53b3ca3effab 100644
--- a/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
+++ b/arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
@@ -53,7 +53,8 @@
i2c_ak8975: i2c-gpio-0 {
compatible = "i2c-gpio";
- gpios = <&gpy2 4 GPIO_ACTIVE_HIGH>, <&gpy2 5 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpy2 4 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpy2 5 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
i2c-gpio,delay-us = <2>;
#address-cells = <1>;
#size-cells = <0>;
@@ -68,7 +69,8 @@
i2c_cm36651: i2c-gpio-2 {
compatible = "i2c-gpio";
- gpios = <&gpf0 0 GPIO_ACTIVE_LOW>, <&gpf0 1 GPIO_ACTIVE_LOW>;
+ sda-gpios = <&gpf0 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpf0 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
i2c-gpio,delay-us = <2>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
index 3023bc3b68ce..2c8111c6b065 100644
--- a/arch/arm/boot/dts/exynos4412-midas.dtsi
+++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
@@ -140,7 +140,8 @@
i2c_max77693: i2c-gpio-1 {
compatible = "i2c-gpio";
- gpios = <&gpm2 0 GPIO_ACTIVE_HIGH>, <&gpm2 1 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpm2 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpm2 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
i2c-gpio,delay-us = <2>;
#address-cells = <1>;
#size-cells = <0>;
@@ -188,7 +189,8 @@
i2c_max77693_fuel: i2c-gpio-3 {
compatible = "i2c-gpio";
- gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>, <&gpf1 4 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpf1 5 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpf1 4 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
i2c-gpio,delay-us = <2>;
#address-cells = <1>;
#size-cells = <0>;
@@ -228,7 +230,8 @@
i2c-mhl {
compatible = "i2c-gpio";
- gpios = <&gpf0 4 GPIO_ACTIVE_HIGH>, <&gpf0 6 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpf0 4 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpf0 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
i2c-gpio,delay-us = <100>;
#address-cells = <1>;
#size-cells = <0>;
@@ -820,7 +823,7 @@
};
buck1_reg: BUCK1 {
- regulator-name = "vdd_mif";
+ regulator-name = "VDD_MIF";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1100000>;
regulator-always-on;
@@ -831,7 +834,7 @@
};
buck2_reg: BUCK2 {
- regulator-name = "vdd_arm";
+ regulator-name = "VDD_ARM";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
@@ -842,7 +845,7 @@
};
buck3_reg: BUCK3 {
- regulator-name = "vdd_int";
+ regulator-name = "VDD_INT";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1150000>;
regulator-always-on;
@@ -853,7 +856,7 @@
};
buck4_reg: BUCK4 {
- regulator-name = "vdd_g3d";
+ regulator-name = "VDD_G3D";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1150000>;
regulator-boot-on;
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 73d6a71da88d..a5c1ce1e396c 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -430,7 +430,7 @@
};
buck1_reg: BUCK1 {
- regulator-name = "vdd_mif";
+ regulator-name = "VDD_MIF";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1100000>;
regulator-always-on;
@@ -438,7 +438,7 @@
};
buck2_reg: BUCK2 {
- regulator-name = "vdd_arm";
+ regulator-name = "VDD_ARM";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1350000>;
regulator-always-on;
@@ -446,7 +446,7 @@
};
buck3_reg: BUCK3 {
- regulator-name = "vdd_int";
+ regulator-name = "VDD_INT";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1050000>;
regulator-always-on;
@@ -454,7 +454,7 @@
};
buck4_reg: BUCK4 {
- regulator-name = "vdd_g3d";
+ regulator-name = "VDD_G3D";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1100000>;
regulator-microvolt-offset = <50000>;
diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts
index ecd14b283a6b..dc865be40751 100644
--- a/arch/arm/boot/dts/exynos4412-origen.dts
+++ b/arch/arm/boot/dts/exynos4412-origen.dts
@@ -363,7 +363,7 @@
};
buck1_reg: BUCK1 {
- regulator-name = "vdd_mif";
+ regulator-name = "VDD_MIF";
regulator-min-microvolt = <950000>;
regulator-max-microvolt = <1100000>;
regulator-always-on;
@@ -372,7 +372,7 @@
};
buck2_reg: BUCK2 {
- regulator-name = "vdd_arm";
+ regulator-name = "VDD_ARM";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1350000>;
regulator-always-on;
@@ -381,7 +381,7 @@
};
buck3_reg: BUCK3 {
- regulator-name = "vdd_int";
+ regulator-name = "VDD_INT";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1200000>;
regulator-always-on;
@@ -390,7 +390,7 @@
};
buck4_reg: BUCK4 {
- regulator-name = "vdd_g3d";
+ regulator-name = "VDD_G3D";
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
@@ -399,7 +399,7 @@
};
buck5_reg: BUCK5 {
- regulator-name = "vdd_m12";
+ regulator-name = "VDD_M12";
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
@@ -408,7 +408,7 @@
};
buck6_reg: BUCK6 {
- regulator-name = "vdd12_5m";
+ regulator-name = "VDD12_5M";
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
@@ -417,7 +417,7 @@
};
buck9_reg: BUCK9 {
- regulator-name = "vddf28_emmc";
+ regulator-name = "VDDF28_EMMC";
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <3000000>;
regulator-always-on;
diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts
index 6904091d4837..c4cc7611898c 100644
--- a/arch/arm/boot/dts/exynos5250-arndale.dts
+++ b/arch/arm/boot/dts/exynos5250-arndale.dts
@@ -454,7 +454,7 @@
};
buck1_reg: BUCK1 {
- regulator-name = "vdd_mif";
+ regulator-name = "VDD_MIF";
regulator-min-microvolt = <950000>;
regulator-max-microvolt = <1200000>;
regulator-always-on;
@@ -463,7 +463,7 @@
};
buck2_reg: BUCK2 {
- regulator-name = "vdd_arm";
+ regulator-name = "VDD_ARM";
regulator-min-microvolt = <912500>;
regulator-max-microvolt = <1300000>;
regulator-always-on;
@@ -472,7 +472,7 @@
};
buck3_reg: BUCK3 {
- regulator-name = "vdd_int";
+ regulator-name = "VDD_INT";
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1200000>;
regulator-always-on;
@@ -481,7 +481,7 @@
};
buck4_reg: BUCK4 {
- regulator-name = "vdd_g3d";
+ regulator-name = "VDD_G3D";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <1300000>;
regulator-always-on;
@@ -641,9 +641,8 @@
pinctrl-0 = <&i2c2_gpio_bus>;
status = "okay";
compatible = "i2c-gpio";
- gpios = <&gpa0 6 0 /* sda */
- &gpa0 7 0 /* scl */
- >;
+ sda-gpios = <&gpa0 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpa0 7 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
i2c-gpio,delay-us = <2>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index e9a09dd0a49b..dd7f8385d81e 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -673,7 +673,7 @@
};
buck2_reg: BUCK2 {
- regulator-name = "vdd_arm";
+ regulator-name = "PVDD_ARM_1V0";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
index d325658901c5..1f4ecbca5225 100644
--- a/arch/arm/boot/dts/imx50.dtsi
+++ b/arch/arm/boot/dts/imx50.dtsi
@@ -288,11 +288,6 @@
reg = <0x53fa8000 0x4000>;
};
- gpr: iomuxc-gpr@53fa8000 {
- compatible = "fsl,imx50-iomuxc-gpr", "syscon";
- reg = <0x53fa8000 0xc>;
- };
-
pwm1: pwm@53fb4000 {
#pwm-cells = <2>;
compatible = "fsl,imx50-pwm", "fsl,imx27-pwm";
@@ -333,9 +328,10 @@
status = "disabled";
};
- src: src@53fd0000 {
+ src: reset-controller@53fd0000 {
compatible = "fsl,imx50-src", "fsl,imx51-src";
reg = <0x53fd0000 0x4000>;
+ interrupts = <75>;
#reset-cells = <1>;
};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 92fbb90bec57..d3583aad8323 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -439,9 +439,10 @@
status = "disabled";
};
- src: src@73fd0000 {
+ src: reset-controller@73fd0000 {
compatible = "fsl,imx51-src";
reg = <0x73fd0000 0x4000>;
+ interrupts = <75>;
#reset-cells = <1>;
};
diff --git a/arch/arm/boot/dts/imx53-cx9020.dts b/arch/arm/boot/dts/imx53-cx9020.dts
index 0a475c234054..cfb18849a92b 100644
--- a/arch/arm/boot/dts/imx53-cx9020.dts
+++ b/arch/arm/boot/dts/imx53-cx9020.dts
@@ -59,23 +59,26 @@
};
dvi-converter {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "ti,tfp410";
- port@0 {
- reg = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
- tfp410_in: endpoint {
- remote-endpoint = <&display0_out>;
+ tfp410_in: endpoint {
+ remote-endpoint = <&display0_out>;
+ };
};
- };
- port@1 {
- reg = <1>;
+ port@1 {
+ reg = <1>;
- tfp410_out: endpoint {
- remote-endpoint = <&dvi_connector_in>;
+ tfp410_out: endpoint {
+ remote-endpoint = <&dvi_connector_in>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 8536f59f59e6..afa57bf7b0ed 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -588,9 +588,10 @@
status = "disabled";
};
- src: src@53fd0000 {
+ src: reset-controller@53fd0000 {
compatible = "fsl,imx53-src", "fsl,imx51-src";
reg = <0x53fd0000 0x4000>;
+ interrupts = <75>;
#reset-cells = <1>;
};
diff --git a/arch/arm/boot/dts/imx6dl-colibri-v1_1-eval-v3.dts b/arch/arm/boot/dts/imx6dl-colibri-v1_1-eval-v3.dts
new file mode 100644
index 000000000000..223275f028f1
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-colibri-v1_1-eval-v3.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Copyright 2020 Toradex
+ */
+
+/dts-v1/;
+
+#include "imx6dl-colibri-eval-v3.dts"
+#include "imx6qdl-colibri-v1_1-uhs.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX6DL/S V1.1 on Colibri Evaluation Board V3";
+ compatible = "toradex,colibri_imx6dl-v1_1-eval-v3",
+ "toradex,colibri_imx6dl-v1_1",
+ "toradex,colibri_imx6dl-eval-v3",
+ "toradex,colibri_imx6dl",
+ "fsl,imx6dl";
+};
+
+/* Colibri MMC */
+&usdhc1 {
+ status = "okay";
+ /*
+ * Please make sure your carrier board does not pull-up any of
+ * the MMC/SD signals to 3.3 volt before attempting to activate
+ * UHS-I support.
+ * To let signaling voltage be changed to 1.8V, please
+ * delete no-1-8-v property (example below):
+ * /delete-property/no-1-8-v;
+ */
+};
diff --git a/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts b/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts
index bb74fc62d913..a2dd7e549568 100644
--- a/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts
+++ b/arch/arm/boot/dts/imx6q-dhcom-pdk2.dts
@@ -22,6 +22,53 @@
clock-frequency = <24000000>;
};
+ display_bl: display-bl {
+ compatible = "pwm-backlight";
+ pwms = <&pwm1 0 50000 PWM_POLARITY_INVERTED>;
+ brightness-levels = <0 16 22 30 40 55 75 102 138 188 255>;
+ default-brightness-level = <8>;
+ enable-gpios = <&gpio3 27 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+
+ lcd_display: disp0 {
+ compatible = "fsl,imx-parallel-display";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interface-pix-fmt = "rgb24";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu1_lcdif>;
+ status = "okay";
+
+ port@0 {
+ reg = <0>;
+
+ lcd_display_in: endpoint {
+ remote-endpoint = <&ipu1_di0_disp0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ lcd_display_out: endpoint {
+ remote-endpoint = <&lcd_panel_in>;
+ };
+ };
+ };
+
+ panel {
+ compatible = "edt,etm0700g0edh6";
+ ddc-i2c-bus = <&i2c2>;
+ backlight = <&display_bl>;
+
+ port {
+ lcd_panel_in: endpoint {
+ remote-endpoint = <&lcd_display_out>;
+ };
+ };
+ };
+
sound {
compatible = "fsl,imx-audio-sgtl5000";
model = "imx-sgtl5000";
@@ -65,6 +112,15 @@
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&sw2_reg>;
};
+
+ touchscreen@38 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_touchscreen>;
+ compatible = "edt,edt-ft5406";
+ reg = <0x38>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <5 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
+ };
};
&iomuxc {
@@ -77,9 +133,7 @@
MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x400120b0
MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x400120b0
MX6QDL_PAD_CSI0_DAT17__GPIO6_IO03 0x400120b0
- MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x120b0
MX6QDL_PAD_DI0_PIN4__GPIO4_IO20 0x400120b0
- MX6QDL_PAD_EIM_D27__GPIO3_IO27 0x120b0
MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x120b0
MX6QDL_PAD_KEY_COL1__GPIO4_IO08 0x400120b0
MX6QDL_PAD_NANDF_CS1__GPIO6_IO14 0x400120b0
@@ -132,6 +186,52 @@
>;
};
+ pinctrl_ipu1_lcdif: ipu1-lcdif-grp {
+ fsl,pins = <
+ MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x38
+ MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15 0x38
+ MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x38
+ MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x38
+ MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x38
+ MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x38
+ MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x38
+ MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x38
+ MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x38
+ MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x38
+ MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x38
+ MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x38
+ MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x38
+ MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x38
+ MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x38
+ MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x38
+ MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x38
+ MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x38
+ MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x38
+ MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x38
+ MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x38
+ MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x38
+ MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18 0x38
+ MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19 0x38
+ MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20 0x38
+ MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21 0x38
+ MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22 0x38
+ MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x38
+ MX6QDL_PAD_EIM_D27__GPIO3_IO27 0x120b0
+ >;
+ };
+
+ pinctrl_pwm1: pwm1-grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_DAT3__PWM1_OUT 0x1b0b1
+ >;
+ };
+
+ pinctrl_touchscreen: touchscreen-grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x1b0b1
+ >;
+ };
+
pinctrl_pcie: pcie-grp {
fsl,pins = <
MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x1b0b1
@@ -139,6 +239,10 @@
};
};
+&ipu1_di0_disp0 {
+ remote-endpoint = <&lcd_display_in>;
+};
+
&pcie {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcie>;
@@ -146,6 +250,13 @@
status = "okay";
};
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ #pwm-cells = <3>;
+ status = "okay";
+};
+
&ssi1 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-colibri-v1_1-uhs.dtsi b/arch/arm/boot/dts/imx6qdl-colibri-v1_1-uhs.dtsi
new file mode 100644
index 000000000000..7672fbfc29be
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-colibri-v1_1-uhs.dtsi
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Copyright 2020 Toradex
+ */
+
+&iomuxc {
+ pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170b1
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100b1
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x170b1
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x170b1
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x170b1
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x170b1
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170f1
+ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100f1
+ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x170f1
+ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x170f1
+ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x170f1
+ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x170f1
+ >;
+ };
+};
+
+/* Colibri MMC */
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1 &pinctrl_mmc_cd>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz &pinctrl_mmc_cd>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz &pinctrl_mmc_cd>;
+ vmmc-supply = <&reg_module_3v3>;
+ vqmmc-supply = <&vgen3_reg>;
+ wakeup-source;
+ keep-power-in-suspend;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+};
diff --git a/arch/arm/boot/dts/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/imx6qdl-colibri.dtsi
index 6e3c6b4925a7..240b86d2eb71 100644
--- a/arch/arm/boot/dts/imx6qdl-colibri.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-colibri.dtsi
@@ -193,7 +193,16 @@
regulator-always-on;
};
- /* vgen3: unused */
+ /*
+ * +V3.3_1.8_SD1 coming off VGEN3 and supplying
+ * the i.MX 6 NVCC_SD1.
+ */
+ vgen3_reg: vgen3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
vgen4_reg: vgen4 {
regulator-min-microvolt = <1800000>;
diff --git a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
index dc646b72b59a..bb3597132c62 100644
--- a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
@@ -258,6 +258,14 @@
status = "okay";
};
+&usbotg {
+ vbus-supply = <&reg_5p0v>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg>;
+ disable-over-current;
+ status = "okay";
+};
+
&wdog1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_wdog>;
@@ -359,6 +367,12 @@
>;
};
+ pinctrl_usbotg: usbotggrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x13059
+ >;
+ };
+
pinctrl_wdog: wdoggrp {
fsl,pins = <
MX6QDL_PAD_DISP0_DAT8__WDOG1_B 0x1b0b0
diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
index e8e36dfd0a6b..69ca70d3baa8 100644
--- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
@@ -295,6 +295,15 @@
VDDIO-supply = <&reg_3p3v>;
};
+ magn@1c {
+ compatible = "st,lsm9ds1-magn";
+ reg = <0x1c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mag>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <9 IRQ_TYPE_EDGE_RISING>;
+ };
+
tca8418: keypad@34 {
compatible = "ti,tca8418";
pinctrl-names = "default";
@@ -389,6 +398,16 @@
};
};
};
+
+ imu@6a {
+ compatible = "st,lsm9ds1-imu";
+ reg = <0x6a>;
+ st,drdy-int-pin = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_imu>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <6 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
&i2c3 {
@@ -609,6 +628,12 @@
>;
};
+ pinctrl_imu: imugrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT12__GPIO5_IO06 0x1b0b0
+ >;
+ };
+
pinctrl_keypad: keypadgrp {
fsl,pins = <
MX6QDL_PAD_DISP0_DAT17__GPIO5_IO11 0x0001b0b0 /* KEYPAD_IRQ# */
@@ -616,6 +641,12 @@
>;
};
+ pinctrl_mag: maggrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT15__GPIO5_IO09 0x1b0b0
+ >;
+ };
+
pinctrl_pcie: pciegrp {
fsl,pins = <
MX6QDL_PAD_DISP0_DAT10__GPIO4_IO31 0x1b0b0 /* PCI_RST# */
diff --git a/arch/arm/boot/dts/imx6qdl-gw5904.dtsi b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
index 6d21cc6a9d4b..76d6cf57f1c3 100644
--- a/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
@@ -248,6 +248,15 @@
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
+ magn@1c {
+ compatible = "st,lsm9ds1-magn";
+ reg = <0x1c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mag>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <17 IRQ_TYPE_EDGE_RISING>;
+ };
+
ltc3676: pmic@3c {
compatible = "lltc,ltc3676";
reg = <0x3c>;
@@ -320,6 +329,16 @@
};
};
};
+
+ imu@6a {
+ compatible = "st,lsm9ds1-imu";
+ reg = <0x6a>;
+ st,drdy-int-pin = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_imu>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
&i2c3 {
@@ -501,6 +520,18 @@
>;
};
+ pinctrl_imu: imugrp {
+ fsl,pins = <
+ MX6QDL_PAD_DI0_PIN2__GPIO4_IO18 0x1b0b0
+ >;
+ };
+
+ pinctrl_mag: maggrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT23__GPIO5_IO17 0x1b0b0
+ >;
+ };
+
pinctrl_pcie: pciegrp {
fsl,pins = <
MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0 /* PCIE RST */
diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
index 30fe47ff64a4..0857de505192 100644
--- a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
@@ -81,20 +81,6 @@
enable-active-high;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- reg_bt: regulator-bt {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_reg_bt>;
- compatible = "regulator-fixed";
- regulator-name = "bt";
- gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
- startup-delay-us = <100>;
- enable-active-high;
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
};
};
@@ -231,9 +217,14 @@
/* Sterling-LWB Bluetooth */
&uart4 {
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart4>;
+ pinctrl-0 = <&pinctrl_uart4>,<&pinctrl_bten>;
uart-has-rtscts;
status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm4330-bt";
+ shutdown-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ };
};
/* GPS */
@@ -259,7 +250,7 @@
&usdhc2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
- vmmc-supply = <&reg_3p3v>;
+ vmmc-supply = <&reg_wl>;
non-removable;
bus-width = <4>;
status = "okay";
@@ -288,6 +279,12 @@
>;
};
+ pinctrl_bten: btengrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b1
+ >;
+ };
+
pinctrl_ecspi3: escpi3grp {
fsl,pins = <
MX6QDL_PAD_DISP0_DAT0__ECSPI3_SCLK 0x100b1
@@ -393,12 +390,6 @@
>;
};
- pinctrl_reg_bt: regbtgrp {
- fsl,pins = <
- MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b1
- >;
- };
-
pinctrl_reg_wl: regwlgrp {
fsl,pins = <
MX6QDL_PAD_GPIO_5__GPIO1_IO05 0x1b0b1
diff --git a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
index 6d7f6b9035bc..b06577808ff4 100644
--- a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
@@ -53,10 +53,21 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
+ phy-handle = <&phy>;
phy-mode = "rgmii-id";
phy-reset-duration = <2>;
phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy: ethernet-phy@0 {
+ reg = <0>;
+ qca,clk-out-frequency = <125000000>;
+ };
+ };
};
&iomuxc {
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 48f50161ea21..32114cf6acee 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -74,7 +74,8 @@
interrupt-parent = <&gpc>;
interrupts = <0 49 IRQ_TYPE_LEVEL_HIGH>;
fsl,tempmon = <&anatop>;
- fsl,tempmon-data = <&ocotp>;
+ nvmem-cells = <&tempmon_calib>, <&tempmon_temp_grade>;
+ nvmem-cell-names = "calib", "temp_grade";
clocks = <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
#thermal-sensor-cells = <0>;
};
@@ -857,7 +858,7 @@
interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
};
- src: src@20d8000 {
+ src: reset-controller@20d8000 {
compatible = "fsl,imx6q-src", "fsl,imx51-src";
reg = <0x020d8000 0x4000>;
interrupts = <0 91 IRQ_TYPE_LEVEL_HIGH>,
@@ -1171,6 +1172,14 @@
cpu_speed_grade: speed-grade@10 {
reg = <0x10 4>;
};
+
+ tempmon_calib: calib@38 {
+ reg = <0x38 4>;
+ };
+
+ tempmon_temp_grade: temp-grade@20 {
+ reg = <0x20 4>;
+ };
};
tzasc@21d0000 { /* TZASC1 */
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 8230b45057a1..911d8cf77f2c 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -98,7 +98,8 @@
interrupts = <0 49 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gpc>;
fsl,tempmon = <&anatop>;
- fsl,tempmon-data = <&ocotp>;
+ nvmem-cells = <&tempmon_calib>, <&tempmon_temp_grade>;
+ nvmem-cell-names = "calib", "temp_grade";
clocks = <&clks IMX6SL_CLK_PLL3_USB_OTG>;
};
@@ -677,7 +678,7 @@
interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
};
- src: src@20d8000 {
+ src: reset-controller@20d8000 {
compatible = "fsl,imx6sl-src", "fsl,imx51-src";
reg = <0x020d8000 0x4000>;
interrupts = <0 91 IRQ_TYPE_LEVEL_HIGH>,
@@ -961,6 +962,14 @@
cpu_speed_grade: speed-grade@10 {
reg = <0x10 4>;
};
+
+ tempmon_calib: calib@38 {
+ reg = <0x38 4>;
+ };
+
+ tempmon_temp_grade: temp-grade@20 {
+ reg = <0x20 4>;
+ };
};
audmux: audmux@21d8000 {
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 09f21aaee936..94e3df47d1ad 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -754,7 +754,7 @@
interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
};
- src: src@20d8000 {
+ src: reset-controller@20d8000 {
compatible = "fsl,imx6sx-src", "fsl,imx51-src";
reg = <0x020d8000 0x4000>;
interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index 345ae9b0db37..5379a03391bd 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -677,7 +677,7 @@
interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
};
- src: src@20d8000 {
+ src: reset-controller@20d8000 {
compatible = "fsl,imx6ul-src", "fsl,imx51-src";
reg = <0x020d8000 0x4000>;
interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/imx7-tqma7.dtsi b/arch/arm/boot/dts/imx7-tqma7.dtsi
index 9aaed85138cb..8773344b54aa 100644
--- a/arch/arm/boot/dts/imx7-tqma7.dtsi
+++ b/arch/arm/boot/dts/imx7-tqma7.dtsi
@@ -16,7 +16,7 @@
};
&cpu0 {
- arm-supply = <&sw1a_reg>;
+ cpu-supply = <&sw1a_reg>;
};
&i2c1 {
diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
index 89267cd59037..713483c39c9d 100644
--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
@@ -37,6 +37,10 @@
cpu-supply = <&sw1a_reg>;
};
+&cpu1 {
+ cpu-supply = <&sw1a_reg>;
+};
+
&fec1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet1>;
diff --git a/arch/arm/boot/dts/imx7d-colibri.dtsi b/arch/arm/boot/dts/imx7d-colibri.dtsi
index c59d72e50920..219a0404a058 100644
--- a/arch/arm/boot/dts/imx7d-colibri.dtsi
+++ b/arch/arm/boot/dts/imx7d-colibri.dtsi
@@ -13,6 +13,10 @@
};
};
+&cpu1 {
+ cpu-supply = <&reg_DCDC2>;
+};
+
&gpmi {
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
index 6b4acea1ef79..e0751e6ba3c0 100644
--- a/arch/arm/boot/dts/imx7d-nitrogen7.dts
+++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
@@ -121,6 +121,10 @@
cpu-supply = <&sw1a_reg>;
};
+&cpu1 {
+ cpu-supply = <&sw1a_reg>;
+};
+
&fec1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet1>;
diff --git a/arch/arm/boot/dts/imx7d-pinfunc.h b/arch/arm/boot/dts/imx7d-pinfunc.h
index 08ca1608fdb1..69f2c1ec8254 100644
--- a/arch/arm/boot/dts/imx7d-pinfunc.h
+++ b/arch/arm/boot/dts/imx7d-pinfunc.h
@@ -592,7 +592,7 @@
#define MX7D_PAD_UART2_RX_DATA__ECSPI1_SS3 0x0130 0x03A0 0x0000 0x3 0x0
#define MX7D_PAD_UART2_RX_DATA__ENET2_1588_EVENT1_IN 0x0130 0x03A0 0x0000 0x4 0x0
#define MX7D_PAD_UART2_RX_DATA__GPIO4_IO2 0x0130 0x03A0 0x0000 0x5 0x0
-#define MX7D_PAD_UART2_RX_DATA__ENET2_MDIO 0x0130 0x03A0 0x0000 0x6 0x0
+#define MX7D_PAD_UART2_RX_DATA__ENET2_MDIO 0x0130 0x03A0 0x0574 0x6 0x1
#define MX7D_PAD_UART2_TX_DATA__UART2_DCE_TX 0x0134 0x03A4 0x0000 0x0 0x0
#define MX7D_PAD_UART2_TX_DATA__UART2_DTE_RX 0x0134 0x03A4 0x06FC 0x0 0x3
#define MX7D_PAD_UART2_TX_DATA__I2C2_SDA 0x0134 0x03A4 0x05E0 0x1 0x0
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index 869efbc4af42..17cca8a9f77b 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -162,6 +162,10 @@
cpu-supply = <&sw1a_reg>;
};
+&cpu1 {
+ cpu-supply = <&sw1a_reg>;
+};
+
&ecspi3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi3>;
diff --git a/arch/arm/boot/dts/imx7d-tqma7.dtsi b/arch/arm/boot/dts/imx7d-tqma7.dtsi
index 8ad3048dac0d..598aed1ffd99 100644
--- a/arch/arm/boot/dts/imx7d-tqma7.dtsi
+++ b/arch/arm/boot/dts/imx7d-tqma7.dtsi
@@ -9,3 +9,7 @@
#include "imx7d.dtsi"
#include "imx7-tqma7.dtsi"
+
+&cpu1 {
+ cpu-supply = <&sw1a_reg>;
+};
diff --git a/arch/arm/boot/dts/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/imx7d-zii-rmu2.dts
index 2b8d6cc45a53..e5e20b07f184 100644
--- a/arch/arm/boot/dts/imx7d-zii-rmu2.dts
+++ b/arch/arm/boot/dts/imx7d-zii-rmu2.dts
@@ -33,7 +33,7 @@
};
&cpu0 {
- arm-supply = <&sw1a_reg>;
+ cpu-supply = <&sw1a_reg>;
};
&ecspi1 {
diff --git a/arch/arm/boot/dts/imx7d-zii-rpu2.dts b/arch/arm/boot/dts/imx7d-zii-rpu2.dts
index 39812c92bf0d..cbf0dbb4c198 100644
--- a/arch/arm/boot/dts/imx7d-zii-rpu2.dts
+++ b/arch/arm/boot/dts/imx7d-zii-rpu2.dts
@@ -182,7 +182,7 @@
};
&cpu0 {
- arm-supply = <&sw1a_reg>;
+ cpu-supply = <&sw1a_reg>;
};
&clks {
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 5bf0b39fa99b..f6bb35d3ce51 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -624,7 +624,7 @@
clock-names = "ckil", "osc";
};
- src: src@30390000 {
+ src: reset-controller@30390000 {
compatible = "fsl,imx7d-src", "syscon";
reg = <0x30390000 0x10000>;
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/integratorap-im-pd1.dts b/arch/arm/boot/dts/integratorap-im-pd1.dts
new file mode 100644
index 000000000000..1412a1a968fc
--- /dev/null
+++ b/arch/arm/boot/dts/integratorap-im-pd1.dts
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree for the ARM Integrator/AP platform
+ * with the IM-PD1 example logical module mounted.
+ */
+
+#include "integratorap.dts"
+
+/ {
+ model = "ARM Integrator/AP with IM-PD1";
+ compatible = "arm,integrator-ap";
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ impd1_ram: vram@c2000000 {
+ /* 1 MB of designated video RAM on the IM-PD1 */
+ compatible = "shared-dma-pool";
+ reg = <0xc2000000 0x00100000>;
+ no-map;
+ };
+ };
+};
+
+&lm0 {
+ syscon@0 {
+ compatible = "arm,im-pd1-syscon", "syscon";
+ reg = <0x00000000 0x1000>;
+
+ vco1: clock@00 {
+ compatible = "arm,impd1-vco1";
+ #clock-cells = <0>;
+ lock-offset = <0x08>;
+ vco-offset = <0x00>;
+ clocks = <&sysclk>;
+ clock-output-names = "IM-PD1-VCO1";
+ };
+
+ vco2: clock@04 {
+ compatible = "arm,impd1-vco2";
+ #clock-cells = <0>;
+ lock-offset = <0x08>;
+ vco-offset = <0x04>;
+ clocks = <&sysclk>;
+ clock-output-names = "IM-PD1-VCO2";
+ };
+ };
+
+ /* Also used for the Smart Card Interface SCI */
+ impd1_uartclk: clock@1_4 {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+ clock-div = <4>;
+ clock-mult = <1>;
+ clocks = <&vco2>;
+ clock-output-names = "VCO2_DIV4";
+ };
+
+ /* For the SSP the clock is divided by 64 */
+ impd1_sspclk: clock@1_64 {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <0>;
+ clock-div = <64>;
+ clock-mult = <1>;
+ clocks = <&vco2>;
+ clock-output-names = "VCO2_DIV64";
+ };
+
+ /* Fixed regulator for the MMC */
+ impd1_3v3: regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ /* Push buttons on the IM-PD1 */
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@0 {
+ debounce-interval = <50>;
+ linux,code = <KEY_UP>;
+ label = "UP";
+ gpios = <&impd1_gpio1 0 GPIO_ACTIVE_HIGH>;
+ };
+ button@1 {
+ debounce-interval = <50>;
+ linux,code = <KEY_DOWN>;
+ label = "DOWN";
+ gpios = <&impd1_gpio1 1 GPIO_ACTIVE_HIGH>;
+ };
+ button@2 {
+ debounce-interval = <50>;
+ linux,code = <KEY_LEFT>;
+ label = "LEFT";
+ gpios = <&impd1_gpio1 2 GPIO_ACTIVE_HIGH>;
+ };
+ button@3 {
+ debounce-interval = <50>;
+ linux,code = <KEY_RIGHT>;
+ label = "UP";
+ gpios = <&impd1_gpio1 3 GPIO_ACTIVE_HIGH>;
+ };
+ button@4 {
+ debounce-interval = <50>;
+ linux,code = <KEY_ESC>;
+ label = "ESC";
+ gpios = <&impd1_gpio1 4 GPIO_ACTIVE_HIGH>;
+ };
+ button@5 {
+ debounce-interval = <50>;
+ linux,code = <KEY_ENTER>;
+ label = "ENTER";
+ gpios = <&impd1_gpio1 5 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+
+ bridge {
+ compatible = "ti,ths8134b", "ti,ths8134";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ vga_bridge_in: endpoint {
+ remote-endpoint = <&clcd_pads_vga_dac>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ vga_bridge_out: endpoint {
+ remote-endpoint = <&vga_con_in>;
+ };
+ };
+ };
+ };
+
+ vga {
+ compatible = "vga-connector";
+
+ port {
+ vga_con_in: endpoint {
+ remote-endpoint = <&vga_bridge_out>;
+ };
+ };
+ };
+
+ uart@100000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x00100000 0x1000>;
+ interrupts-extended = <&impd1_vic 1>;
+ clocks = <&impd1_uartclk>, <&sysclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ uart@200000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x00200000 0x1000>;
+ interrupts-extended = <&impd1_vic 2>;
+ clocks = <&impd1_uartclk>, <&sysclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ ssp@300000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x00300000 0x1000>;
+ interrupts-extended = <&impd1_vic 3>;
+ clocks = <&impd1_sspclk>, <&sysclk>;
+ clock-names = "spiclk", "apb_pclk";
+ };
+
+ impd1_gpio0: gpio@400000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x00400000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts-extended = <&impd1_vic 4>;
+ clocks = <&sysclk>;
+ clock-names = "apb_pclk";
+ };
+
+ impd1_gpio1: gpio@500000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0x00500000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts-extended = <&impd1_vic 5>;
+ clocks = <&sysclk>;
+ clock-names = "apb_pclk";
+ };
+
+ rtc@600000 {
+ compatible = "arm,pl030", "arm,primecell";
+ reg = <0x00600000 0x1000>;
+ interrupts-extended = <&impd1_vic 6>;
+ clocks = <&sysclk>;
+ clock-names = "apb_pclk";
+ };
+
+ mmc@700000 {
+ compatible = "arm,pl181", "arm,primecell";
+ reg = <0x00700000 0x1000>;
+ interrupts-extended = <&impd1_vic 7>,
+ <&impd1_vic 8>;
+ clocks = <&sysclk>, <&sysclk>;
+ clock-names = "mclk", "apb_pclk";
+ bus-width = <1>;
+ max-frequency = <515633>;
+ vmmc-supply = <&impd1_3v3>;
+ wp-gpios = <&impd1_gpio0 3 GPIO_ACTIVE_HIGH>;
+ cd-gpios = <&impd1_gpio0 4 GPIO_ACTIVE_LOW>;
+ };
+
+ aaci@800000 {
+ compatible = "arm,pl041", "arm,primecell";
+ reg = <0x00800000 0x1000>;
+ interrupts-extended = <&impd1_vic 9>;
+ clocks = <&sysclk>;
+ clock-names = "apb_pclk";
+ };
+
+ display@1000000 {
+ compatible = "arm,pl110", "arm,primecell";
+ reg = <0x01000000 0x1000>;
+ interrupts-extended = <&impd1_vic 11>;
+ clocks = <&vco1>, <&sysclk>;
+ clock-names = "clcdclk", "apb_pclk";
+ /* 640x480 16bpp @ 25.175MHz is 36827428 bytes/s */
+ max-memory-bandwidth = <40000000>;
+ memory-region = <&impd1_ram>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clcd_pads_vga_dac: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vga_bridge_in>;
+ arm,pl11x,tft-r0g0b0-pads = <0 8 16>;
+ };
+ };
+ };
+
+ impd1_vic: interrupt-controller@3000000 {
+ compatible = "arm,pl192-vic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x03000000 0x1000>;
+ /* Valid interrupts, 0-9 and 11 */
+ valid-mask = <0x00000bff>;
+ /* LM site 0 has IRQ 9 on the PIC */
+ interrupts-extended = <&pic 9>;
+ };
+};
diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts
index 198d66181c50..67d1f9b24a52 100644
--- a/arch/arm/boot/dts/integratorap.dts
+++ b/arch/arm/boot/dts/integratorap.dts
@@ -4,7 +4,9 @@
*/
/dts-v1/;
-/include/ "integrator.dtsi"
+#include "integrator.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
/ {
model = "ARM Integrator/AP";
@@ -107,9 +109,6 @@
syscon {
compatible = "arm,integrator-ap-syscon", "syscon";
reg = <0x11000000 0x100>;
- interrupt-parent = <&pic>;
- /* These are the logical module IRQs */
- interrupts = <9>, <10>, <11>, <12>;
/*
* SYSCLK clocks PCIv3 bridge, system controller and the
@@ -239,4 +238,50 @@
clock-names = "KMIREFCLK", "apb_pclk";
};
};
+
+ /*
+ * Logic module bus, we support up to 4 logical modules
+ * They appear at 0xc0000000, 0xd0000000, 0xe0000000 and 0xf0000000
+ * and use interrupts 9, 10, 11 and 12 respectively.
+ */
+ bus@c0000000 {
+ compatible = "arm,integrator-ap-lm";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0xc0000000 0xc0000000 0x40000000>;
+ dma-ranges;
+
+ lm0: bus@c0000000 {
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xc0000000 0x10000000>;
+ dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ reg = <0xc0000000 0x10000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ lm1: bus@d0000000 {
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xd0000000 0x10000000>;
+ dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ reg = <0xd0000000 0x10000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ lm2: bus@e0000000 {
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xe0000000 0x10000000>;
+ dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ reg = <0xe0000000 0x10000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ lm3: bus@f0000000 {
+ compatible = "simple-bus";
+ ranges = <0x00000000 0xf0000000 0x10000000>;
+ dma-ranges = <0x00000000 0x80000000 0x10000000>;
+ reg = <0xf0000000 0x10000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/keystone-k2e.dtsi b/arch/arm/boot/dts/keystone-k2e.dtsi
index 085e7326ea8e..2d94faf31fab 100644
--- a/arch/arm/boot/dts/keystone-k2e.dtsi
+++ b/arch/arm/boot/dts/keystone-k2e.dtsi
@@ -86,14 +86,14 @@
};
};
- msm_ram: msmram@c000000 {
+ msm_ram: sram@c000000 {
compatible = "mmio-sram";
reg = <0x0c000000 0x200000>;
ranges = <0x0 0x0c000000 0x200000>;
#address-cells = <1>;
#size-cells = <1>;
- sram-bm@1f0000 {
+ bm-sram@1f0000 {
reg = <0x001f0000 0x8000>;
};
};
diff --git a/arch/arm/boot/dts/keystone-k2g-evm.dts b/arch/arm/boot/dts/keystone-k2g-evm.dts
index b7f10bf94576..db640bab8c1d 100644
--- a/arch/arm/boot/dts/keystone-k2g-evm.dts
+++ b/arch/arm/boot/dts/keystone-k2g-evm.dts
@@ -45,6 +45,19 @@
regulator-max-microvolt = <1800000>;
regulator-always-on;
};
+
+ hdmi: connector {
+ compatible = "hdmi-connector";
+ label = "hdmi";
+
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&sii9022_out>;
+ };
+ };
+ };
};
&k2g_pinctrl {
@@ -89,6 +102,13 @@
>;
};
+ i2c1_pins: pinmux_i2c1_pins {
+ pinctrl-single,pins = <
+ K2G_CORE_IOPAD(0x1384) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* i2c1_scl.i2c1_scl */
+ K2G_CORE_IOPAD(0x1388) (BUFFER_CLASS_B | PIN_PULLUP | MUX_MODE0) /* i2c1_sda.i2c1_sda */
+ >;
+ };
+
ecap0_pins: ecap0_pins {
pinctrl-single,pins = <
K2G_CORE_IOPAD(0x1374) (BUFFER_CLASS_B | MUX_MODE4) /* pr1_mdio_data.ecap0_in_apwm0_out */
@@ -160,6 +180,40 @@
K2G_CORE_IOPAD(0x1188) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* MDIO_DATA.MDIO_DATA */
>;
};
+
+ vout_pins: pinmux_vout_pins {
+ pinctrl-single,pins = <
+ K2G_CORE_IOPAD(0x1078) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata23.dssdata23 */
+ K2G_CORE_IOPAD(0x107c) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata22.dssdata22 */
+ K2G_CORE_IOPAD(0x1080) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata21.dssdata21 */
+ K2G_CORE_IOPAD(0x1084) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata20.dssdata20 */
+ K2G_CORE_IOPAD(0x1088) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata19.dssdata19 */
+ K2G_CORE_IOPAD(0x108c) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata18.dssdata18 */
+ K2G_CORE_IOPAD(0x1090) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata17.dssdata17 */
+ K2G_CORE_IOPAD(0x1094) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata16.dssdata16 */
+ K2G_CORE_IOPAD(0x1098) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata15.dssdata15 */
+ K2G_CORE_IOPAD(0x109c) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata14.dssdata14 */
+ K2G_CORE_IOPAD(0x10a0) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata13.dssdata13 */
+ K2G_CORE_IOPAD(0x10a4) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata12.dssdata12 */
+ K2G_CORE_IOPAD(0x10a8) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata11.dssdata11 */
+ K2G_CORE_IOPAD(0x10ac) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata10.dssdata10 */
+ K2G_CORE_IOPAD(0x10b0) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata9.dssdata9 */
+ K2G_CORE_IOPAD(0x10b4) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata8.dssdata8 */
+ K2G_CORE_IOPAD(0x10b8) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata7.dssdata7 */
+ K2G_CORE_IOPAD(0x10bc) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata6.dssdata6 */
+ K2G_CORE_IOPAD(0x10c0) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata5.dssdata5 */
+ K2G_CORE_IOPAD(0x10c4) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata4.dssdata4 */
+ K2G_CORE_IOPAD(0x10c8) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata3.dssdata3 */
+ K2G_CORE_IOPAD(0x10cc) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata2.dssdata2 */
+ K2G_CORE_IOPAD(0x10d0) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata1.dssdata1 */
+ K2G_CORE_IOPAD(0x10d4) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssdata0.dssdata0 */
+ K2G_CORE_IOPAD(0x10d8) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssvsync.dssvsync */
+ K2G_CORE_IOPAD(0x10dc) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dsshsync.dsshsync */
+ K2G_CORE_IOPAD(0x10e0) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dsspclk.dsspclk */
+ K2G_CORE_IOPAD(0x10e4) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssde.dssde */
+ K2G_CORE_IOPAD(0x10e8) (BUFFER_CLASS_B | PULL_DISABLE | MUX_MODE0) /* dssfid.dssfid */
+ >;
+ };
};
&uart0 {
@@ -357,3 +411,50 @@
pinctrl-0 = <&emac_pins>;
status = "okay";
};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+ status = "okay";
+ clock-frequency = <400000>;
+
+ sii9022: sii9022@3b {
+ #sound-dai-cells = <0>;
+ compatible = "sil,sii9022";
+ reg = <0x3b>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ sii9022_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ sii9022_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+ };
+};
+
+&dss {
+ pinctrl-names = "default";
+ pinctrl-0 = <&vout_pins>;
+ status = "ok";
+
+ port {
+ dpi_out: endpoint {
+ remote-endpoint = <&sii9022_in>;
+ data-lines = <24>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/keystone-k2g.dtsi b/arch/arm/boot/dts/keystone-k2g.dtsi
index 1c833105d6c5..05a75019275e 100644
--- a/arch/arm/boot/dts/keystone-k2g.dtsi
+++ b/arch/arm/boot/dts/keystone-k2g.dtsi
@@ -95,14 +95,14 @@
ranges = <0x0 0x0 0x0 0xc0000000>;
dma-ranges = <0x80000000 0x8 0x00000000 0x80000000>;
- msm_ram: msmram@c000000 {
+ msm_ram: sram@c000000 {
compatible = "mmio-sram";
reg = <0x0c000000 0x100000>;
ranges = <0x0 0x0c000000 0x100000>;
#address-cells = <1>;
#size-cells = <1>;
- sram-bm@f7000 {
+ bm-sram@f7000 {
reg = <0x000f7000 0x8000>;
};
};
@@ -324,6 +324,28 @@
clock-names = "gpio";
};
+ dss: dss@02540000 {
+ compatible = "ti,k2g-dss";
+ reg = <0x02540000 0x400>,
+ <0x02550000 0x1000>,
+ <0x02557000 0x1000>,
+ <0x0255a800 0x100>,
+ <0x0255ac00 0x100>;
+ reg-names = "cfg", "common", "vid1", "ovr1", "vp1";
+ clocks = <&k2g_clks 0x2 0>,
+ <&k2g_clks 0x2 1>;
+ clock-names = "fck", "vp1";
+ interrupts = <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>;
+
+ power-domains = <&k2g_pds 0x2>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ max-memory-bandwidth = <230000000>;
+ };
+
edma0: edma@2700000 {
compatible = "ti,k2g-edma3-tpcc", "ti,edma3-tpcc";
reg = <0x02700000 0x8000>;
diff --git a/arch/arm/boot/dts/keystone-k2hk.dtsi b/arch/arm/boot/dts/keystone-k2hk.dtsi
index ca0f198ba627..8a9447703310 100644
--- a/arch/arm/boot/dts/keystone-k2hk.dtsi
+++ b/arch/arm/boot/dts/keystone-k2hk.dtsi
@@ -57,14 +57,14 @@
&soc0 {
/include/ "keystone-k2hk-clocks.dtsi"
- msm_ram: msmram@c000000 {
+ msm_ram: sram@c000000 {
compatible = "mmio-sram";
reg = <0x0c000000 0x600000>;
ranges = <0x0 0x0c000000 0x600000>;
#address-cells = <1>;
#size-cells = <1>;
- sram-bm@5f0000 {
+ bm-sram@5f0000 {
reg = <0x5f0000 0x8000>;
};
};
diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi
index 374c80124c4e..dff5fea72b2f 100644
--- a/arch/arm/boot/dts/keystone-k2l.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l.dtsi
@@ -255,14 +255,14 @@
};
};
- msm_ram: msmram@c000000 {
+ msm_ram: sram@c000000 {
compatible = "mmio-sram";
reg = <0x0c000000 0x200000>;
ranges = <0x0 0x0c000000 0x200000>;
#address-cells = <1>;
#size-cells = <1>;
- sram-bm@1f8000 {
+ bm-sram@1f8000 {
reg = <0x001f8000 0x8000>;
};
};
diff --git a/arch/arm/boot/dts/kirkwood-l-50.dts b/arch/arm/boot/dts/kirkwood-l-50.dts
new file mode 100644
index 000000000000..0d81c43a6a73
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-l-50.dts
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check Point L-50 Board Description
+ * Copyright 2020 Pawel Dembicki <paweldembicki@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
+
+/ {
+ model = "Check Point L-50";
+ compatible = "checkpoint,l-50", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x20000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8";
+ stdout-path = &uart0;
+ };
+
+ ocp@f1000000 {
+ pinctrl: pin-controller@10000 {
+ pinctrl-0 = <&pmx_led38 &pmx_sysrst &pmx_button29>;
+ pinctrl-names = "default";
+
+ pmx_sysrst: pmx-sysrst {
+ marvell,pins = "mpp6";
+ marvell,function = "sysrst";
+ };
+
+ pmx_button29: pmx_button29 {
+ marvell,pins = "mpp29";
+ marvell,function = "gpio";
+ };
+
+ pmx_led38: pmx_led38 {
+ marvell,pins = "mpp38";
+ marvell,function = "gpio";
+ };
+
+ pmx_sdio_cd: pmx-sdio-cd {
+ marvell,pins = "mpp46";
+ marvell,function = "gpio";
+ };
+ };
+
+ serial@12000 {
+ status = "okay";
+ };
+
+ mvsdio@90000 {
+ status = "okay";
+ cd-gpios = <&gpio1 14 9>;
+ };
+
+ i2c@11000 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ gpio2: gpio-expander@20{
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ compatible = "semtech,sx1505q";
+ reg = <0x20>;
+
+ gpio-controller;
+ };
+
+ /* Three GPIOs from 0x21 exp. are undescribed in dts:
+ * 1: DSL module reset (active low)
+ * 5: mPCIE reset (active low)
+ * 6: Express card reset (active low)
+ */
+ gpio3: gpio-expander@21{
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ compatible = "semtech,sx1505q";
+ reg = <0x21>;
+
+ gpio-controller;
+ };
+
+ rtc@30 {
+ compatible = "s35390a";
+ reg = <0x30>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ status_green {
+ label = "l-50:green:status";
+ gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
+ };
+
+ status_red {
+ label = "l-50:red:status";
+ gpios = <&gpio3 2 GPIO_ACTIVE_LOW>;
+ };
+
+ wifi {
+ label = "l-50:green:wifi";
+ gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "phy0tpt";
+ };
+
+ internet_green {
+ label = "l-50:green:internet";
+ gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
+ };
+
+ internet_red {
+ label = "l-50:red:internet";
+ gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ };
+
+ usb1_green {
+ label = "l-50:green:usb1";
+ gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "usbport";
+ trigger-sources = <&hub_port3>;
+ };
+
+ usb1_red {
+ label = "l-50:red:usb1";
+ gpios = <&gpio2 4 GPIO_ACTIVE_LOW>;
+ };
+
+ usb2_green {
+ label = "l-50:green:usb2";
+ gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "usbport";
+ trigger-sources = <&hub_port1>;
+ };
+
+ usb2_red {
+ label = "l-50:red:usb2";
+ gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ usb2_pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "usb2_pwr";
+
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 3 GPIO_ACTIVE_LOW>;
+ regulator-always-on;
+ };
+
+ usb1_pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "usb1_pwr";
+
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 4 GPIO_ACTIVE_LOW>;
+ regulator-always-on;
+ };
+
+ mpcie_pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "mpcie_pwr";
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 5 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ express_card_pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "express_card_pwr";
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ keys {
+ compatible = "gpio-keys";
+
+ factory_defaults {
+ label = "factory_defaults";
+ gpios = <&gpio0 29 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_RESTART>;
+ };
+ };
+};
+
+&mdio {
+ status = "okay";
+
+ ethphy8: ethernet-phy@8 {
+ reg = <0x08>;
+ };
+
+ switch0: switch@10 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10>;
+ dsa,member = <0 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan5";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan6";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan2";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan7";
+ };
+
+ switch0port5: port@5 {
+ reg = <5>;
+ phy-mode = "rgmii-txid";
+ link = <&switch1port5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ phy-mode = "rgmii-id";
+ ethernet = <&eth1port>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+
+ switch@11 {
+ compatible = "marvell,mv88e6085";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11>;
+ dsa,member = <0 1>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan3";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan8";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan4";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "dmz";
+ };
+
+ switch1port5: port@5 {
+ reg = <5>;
+ phy-mode = "rgmii-txid";
+ link = <&switch0port5>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "dsl";
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+ };
+ };
+};
+
+&eth0 {
+ status = "okay";
+ ethernet0-port@0 {
+ phy-handle = <&ethphy8>;
+ };
+};
+
+&eth1 {
+ status = "okay";
+ ethernet1-port@0 {
+ speed = <1000>;
+ duplex = <1>;
+ };
+};
+
+&nand {
+ status = "okay";
+ pinctrl-0 = <&pmx_nand>;
+ pinctrl-names = "default";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x00000000 0x000c0000>;
+ };
+
+ partition@a0000 {
+ label = "bootldr-env";
+ reg = <0x000c0000 0x00040000>;
+ };
+
+ partition@100000 {
+ label = "kernel-1";
+ reg = <0x00100000 0x00800000>;
+ };
+
+ partition@900000 {
+ label = "rootfs-1";
+ reg = <0x00900000 0x07100000>;
+ };
+
+ partition@7a00000 {
+ label = "kernel-2";
+ reg = <0x07a00000 0x00800000>;
+ };
+
+ partition@8200000 {
+ label = "rootfs-2";
+ reg = <0x08200000 0x07100000>;
+ };
+
+ partition@f300000 {
+ label = "default_sw";
+ reg = <0x0f300000 0x07900000>;
+ };
+
+ partition@16c00000 {
+ label = "logs";
+ reg = <0x16c00000 0x01800000>;
+ };
+
+ partition@18400000 {
+ label = "preset_cfg";
+ reg = <0x18400000 0x00100000>;
+ };
+
+ partition@18500000 {
+ label = "adsl";
+ reg = <0x18500000 0x00100000>;
+ };
+
+ partition@18600000 {
+ label = "storage";
+ reg = <0x18600000 0x07a00000>;
+ };
+};
+
+&rtc {
+ status = "disabled";
+};
+
+&pciec {
+ status = "okay";
+};
+
+&pcie0 {
+ status = "okay";
+};
+
+&sata_phy0 {
+ status = "disabled";
+};
+
+&sata_phy1 {
+ status = "disabled";
+};
+
+&usb0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ #trigger-source-cells = <0>;
+
+ hub_port1: port@1 {
+ reg = <1>;
+ #trigger-source-cells = <0>;
+ };
+
+ hub_port3: port@3 {
+ reg = <3>;
+ #trigger-source-cells = <0>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-netgear_readynas_nv+_v2.dts b/arch/arm/boot/dts/kirkwood-netgear_readynas_nv+_v2.dts
index 8cc8550242ef..b13aee570804 100644
--- a/arch/arm/boot/dts/kirkwood-netgear_readynas_nv+_v2.dts
+++ b/arch/arm/boot/dts/kirkwood-netgear_readynas_nv+_v2.dts
@@ -113,6 +113,20 @@
};
};
+ auxdisplay {
+ compatible = "hit,hd44780";
+ data-gpios = <&gpio0 17 GPIO_ACTIVE_HIGH>,
+ <&gpio1 1 GPIO_ACTIVE_HIGH>,
+ <&gpio1 3 GPIO_ACTIVE_HIGH>,
+ <&gpio1 17 GPIO_ACTIVE_HIGH>;
+ enable-gpios = <&gpio0 16 GPIO_ACTIVE_HIGH>;
+ rs-gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
+ rw-gpios = <&gpio0 15 GPIO_ACTIVE_HIGH>;
+ backlight-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
+ display-height-chars = <2>;
+ display-width-chars = <16>;
+ };
+
gpio-leds {
compatible = "gpio-leds";
pinctrl-0 = < &pmx_led_blue_power &pmx_led_blue_backup
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index 2161e23bd98e..6c8d94beae78 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -228,7 +228,7 @@
reg = <0x20128 0x4>;
};
- intc: main-interrupt-ctrl@20200 {
+ intc: interrupt-controller@20200 {
compatible = "marvell,orion-intc";
interrupt-controller;
#interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index 9b1fe99d55b1..5edf001f6138 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -242,6 +242,20 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+
+ n25q128a130: flash@0 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <4>;
+ };
+};
+
&sai1 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 5d198309058a..ae89deaa8c9c 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -229,6 +229,9 @@
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb0_phy>;
phy-names = "usb2-phy";
+ g-rx-fifo-size = <512>;
+ g-np-tx-fifo-size = <500>;
+ g-tx-fifo-size = <256 192 128 128 128>;
dr_mode = "host";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
index a2a47804fc4a..cb21ac9f517c 100644
--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
+++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
@@ -202,9 +202,8 @@
pinctrl-0 = <&eth_rgmii_pins>;
pinctrl-names = "default";
- phy-mode = "rgmii";
phy-handle = <&eth_phy>;
- amlogic,tx-delay-ns = <4>;
+ phy-mode = "rgmii-id";
nvmem-cells = <&ethernet_mac_address>;
nvmem-cell-names = "mac-address";
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index e34b039b9357..ba36168b9c1b 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -425,8 +425,9 @@
clocks = <&clkc CLKID_ETH>,
<&clkc CLKID_MPLL2>,
- <&clkc CLKID_MPLL2>;
- clock-names = "stmmaceth", "clkin0", "clkin1";
+ <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_FCLK_DIV2>;
+ clock-names = "stmmaceth", "clkin0", "clkin1", "timing-adjustment";
rx-fifo-depth = <4096>;
tx-fifo-depth = <2048>;
diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
index d54477b1001c..cc498191ddd1 100644
--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
+++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
@@ -69,9 +69,7 @@
pinctrl-names = "default";
phy-handle = <&eth_phy0>;
- phy-mode = "rgmii";
-
- amlogic,tx-delay-ns = <4>;
+ phy-mode = "rgmii-id";
mdio {
compatible = "snps,dwmac-mdio";
diff --git a/arch/arm/boot/dts/meson8m2.dtsi b/arch/arm/boot/dts/meson8m2.dtsi
index 5bde7f502007..2397ba06d608 100644
--- a/arch/arm/boot/dts/meson8m2.dtsi
+++ b/arch/arm/boot/dts/meson8m2.dtsi
@@ -30,8 +30,9 @@
0xc1108140 0x8>;
clocks = <&clkc CLKID_ETH>,
<&clkc CLKID_MPLL2>,
- <&clkc CLKID_MPLL2>;
- clock-names = "stmmaceth", "clkin0", "clkin1";
+ <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_FCLK_DIV2>;
+ clock-names = "stmmaceth", "clkin0", "clkin1", "timing-adjustment";
resets = <&reset RESET_ETHERNET>;
reset-names = "stmmaceth";
};
@@ -64,6 +65,14 @@
compatible = "amlogic,meson8m2-saradc", "amlogic,meson-saradc";
};
+&usb0_phy {
+ compatible = "amlogic,meson8m2-usb2-phy", "amlogic,meson-mx-usb2-phy";
+};
+
+&usb1_phy {
+ compatible = "amlogic,meson8m2-usb2-phy", "amlogic,meson-mx-usb2-phy";
+};
+
&wdt {
compatible = "amlogic,meson8m2-wdt", "amlogic,meson8b-wdt";
};
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
index da10567b5aca..4306f3a6742b 100644
--- a/arch/arm/boot/dts/mmp2.dtsi
+++ b/arch/arm/boot/dts/mmp2.dtsi
@@ -364,7 +364,7 @@
rtc: rtc@d4010000 {
compatible = "mrvl,mmp-rtc";
reg = <0xd4010000 0x1000>;
- interrupts = <1 0>;
+ interrupts = <1>, <0>;
interrupt-names = "rtc 1Hz", "rtc alarm";
interrupt-parent = <&intcmux5>;
clocks = <&soc_clocks MMP2_CLK_RTC>;
diff --git a/arch/arm/boot/dts/mmp3.dtsi b/arch/arm/boot/dts/mmp3.dtsi
index 826f0a577859..57231d49d938 100644
--- a/arch/arm/boot/dts/mmp3.dtsi
+++ b/arch/arm/boot/dts/mmp3.dtsi
@@ -183,14 +183,14 @@
mrvl,intc-nr-irqs = <5>;
};
- usb_otg_phy0: usb-otg-phy@d4207000 {
+ usb_otg_phy0: usb-phy@d4207000 {
compatible = "marvell,mmp3-usb-phy";
reg = <0xd4207000 0x40>;
#phy-cells = <0>;
status = "disabled";
};
- usb_otg0: usb-otg@d4208000 {
+ usb_otg0: usb@d4208000 {
compatible = "marvell,pxau2o-ehci";
reg = <0xd4208000 0x200>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
@@ -201,14 +201,14 @@
status = "disabled";
};
- hsic_phy0: hsic-phy@f0001800 {
+ hsic_phy0: usb-phy@f0001800 {
compatible = "marvell,mmp3-hsic-phy";
reg = <0xf0001800 0x40>;
#phy-cells = <0>;
status = "disabled";
};
- hsic0: hsic@f0001000 {
+ hsic0: usb@f0001000 {
compatible = "marvell,pxau2o-ehci";
reg = <0xf0001000 0x200>;
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
@@ -222,14 +222,14 @@
status = "disabled";
};
- hsic_phy1: hsic-phy@f0002800 {
+ hsic_phy1: usb-phy@f0002800 {
compatible = "marvell,mmp3-hsic-phy";
reg = <0xf0002800 0x40>;
#phy-cells = <0>;
status = "disabled";
};
- hsic1: hsic@f0002000 {
+ hsic1: usb@f0002000 {
compatible = "marvell,pxau2o-ehci";
reg = <0xf0002000 0x200>;
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
@@ -279,6 +279,16 @@
status = "disabled";
};
+ mmc5: mmc@d4217000 {
+ compatible = "mrvl,pxav3-mmc";
+ reg = <0xd4217000 0x120>;
+ clocks = <&soc_clocks MMP3_CLK_SDH4>;
+ clock-names = "io";
+ interrupt-parent = <&hsi1_mux>;
+ interrupts = <0>;
+ status = "disabled";
+ };
+
camera0: camera@d420a000 {
compatible = "marvell,mmp2-ccic";
reg = <0xd420a000 0x800>;
@@ -472,7 +482,7 @@
rtc: rtc@d4010000 {
compatible = "mrvl,mmp-rtc";
reg = <0xd4010000 0x1000>;
- interrupts = <1 0>;
+ interrupts = <1>, <0>;
interrupt-names = "rtc 1Hz", "rtc alarm";
interrupt-parent = <&rtc_mux>;
clocks = <&soc_clocks MMP2_CLK_RTC>;
@@ -521,7 +531,7 @@
};
};
- l2: l2-cache-controller@d0020000 {
+ l2: cache-controller@d0020000 {
compatible = "marvell,tauros3-cache", "arm,pl310-cache";
reg = <0xd0020000 0x1000>;
cache-unified;
diff --git a/arch/arm/boot/dts/mt2701-evb.dts b/arch/arm/boot/dts/mt2701-evb.dts
index 88f8fd22302a..d1535f385f36 100644
--- a/arch/arm/boot/dts/mt2701-evb.dts
+++ b/arch/arm/boot/dts/mt2701-evb.dts
@@ -6,6 +6,7 @@
*/
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "mt2701.dtsi"
/ {
@@ -61,6 +62,15 @@
>;
default-brightness-level = <9>;
};
+
+ usb_vbus: regulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 45 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
};
&auxadc {
@@ -230,3 +240,14 @@
&uart0 {
status = "okay";
};
+
+&usb2 {
+ status = "okay";
+ usb-role-switch;
+ connector{
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ type = "micro";
+ id-gpios = <&pio 44 GPIO_ACTIVE_HIGH>;
+ vbus-supply = <&usb_vbus>;
+ };
+};
diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
index 2093b38d6e6d..39b3a2f4bef4 100644
--- a/arch/arm/boot/dts/mt2701.dtsi
+++ b/arch/arm/boot/dts/mt2701.dtsi
@@ -671,6 +671,39 @@
};
};
+ usb2: usb@11200000 {
+ compatible = "mediatek,mt2701-musb",
+ "mediatek,mtk-musb";
+ reg = <0 0x11200000 0 0x1000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "mc";
+ phys = <&u2port2 PHY_TYPE_USB2>;
+ dr_mode = "otg";
+ clocks = <&pericfg CLK_PERI_USB0>,
+ <&pericfg CLK_PERI_USB0_MCU>,
+ <&pericfg CLK_PERI_USB_SLV>;
+ clock-names = "main","mcu","univpll";
+ power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+ status = "disabled";
+ };
+
+ u2phy0: usb-phy@11210000 {
+ compatible = "mediatek,generic-tphy-v1";
+ reg = <0 0x11210000 0 0x0800>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "okay";
+
+ u2port2: usb-phy@1a1c4800 {
+ reg = <0 0x11210800 0 0x0100>;
+ clocks = <&topckgen CLK_TOP_USB_PHY48M>;
+ clock-names = "ref";
+ #phy-cells = <1>;
+ status = "okay";
+ };
+ };
+
ethsys: syscon@1b000000 {
compatible = "mediatek,mt2701-ethsys", "syscon";
reg = <0 0x1b000000 0 0x1000>;
diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
index f76b4a3c34b9..3a6b856e5b74 100644
--- a/arch/arm/boot/dts/mt7623.dtsi
+++ b/arch/arm/boot/dts/mt7623.dtsi
@@ -3,6 +3,7 @@
* Copyright (c) 2017-2018 MediaTek Inc.
* Author: John Crispin <john@phrozen.org>
* Sean Wang <sean.wang@mediatek.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
*
*/
@@ -733,6 +734,30 @@
#reset-cells = <1>;
};
+ mali: gpu@13040000 {
+ compatible = "mediatek,mt7623-mali", "arm,mali-450";
+ reg = <0 0x13040000 0 0x30000>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 171 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 172 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 173 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 174 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 177 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 178 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 179 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 180 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "gp", "gpmmu", "pp0", "ppmmu0", "pp1",
+ "ppmmu1", "pp2", "ppmmu2", "pp3", "ppmmu3",
+ "pp";
+ clocks = <&topckgen CLK_TOP_MMPLL>,
+ <&g3dsys CLK_G3DSYS_CORE>;
+ clock-names = "bus", "core";
+ power-domains = <&scpsys MT2701_POWER_DOMAIN_MFG>;
+ resets = <&g3dsys MT2701_G3DSYS_CORE_RST>;
+ };
+
mmsys: syscon@14000000 {
compatible = "mediatek,mt7623-mmsys",
"mediatek,mt2701-mmsys",
diff --git a/arch/arm/boot/dts/mt7623n-rfb-emmc.dts b/arch/arm/boot/dts/mt7623n-rfb-emmc.dts
index b7606130ade9..0447748f9fa0 100644
--- a/arch/arm/boot/dts/mt7623n-rfb-emmc.dts
+++ b/arch/arm/boot/dts/mt7623n-rfb-emmc.dts
@@ -138,6 +138,7 @@
mac@1 {
compatible = "mediatek,eth-mac";
reg = <1>;
+ phy-mode = "rgmii";
phy-handle = <&phy5>;
};
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index 0e453fec2e3a..8a5cb44bfe2f 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -201,11 +201,32 @@
clock-frequency = <48000000>;
};
- timer2: timer@4802a000 {
- compatible = "ti,omap2420-timer";
- reg = <0x4802a000 0x400>;
- interrupts = <38>;
- ti,hwmods = "timer2";
+ timer2_target: target-module@4802a000 {
+ compatible = "ti,sysc-omap2-timer", "ti,sysc";
+ reg = <0x4802a000 0x4>,
+ <0x4802a010 0x4>,
+ <0x4802a014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_EMUFREE |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,syss-mask = <1>;
+ clocks = <&gpt2_fck>, <&gpt2_ick>;
+ clock-names = "fck", "ick";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x4802a000 0x1000>;
+
+ timer2: timer@0 {
+ compatible = "ti,omap2420-timer";
+ reg = <0 0x400>;
+ interrupts = <38>;
+ };
};
timer3: timer@48078000 {
diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi
index aba542d63d6d..6c5c7c0e8b94 100644
--- a/arch/arm/boot/dts/omap2420.dtsi
+++ b/arch/arm/boot/dts/omap2420.dtsi
@@ -68,10 +68,23 @@
};
};
- counter32k: counter@4000 {
- compatible = "ti,omap-counter32k";
- reg = <0x4000 0x20>;
- ti,hwmods = "counter_32k";
+ target-module@4000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x4000 0x4>,
+ <0x4004 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>;
+ clocks = <&func_32k_ck>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x4000 0x1000>;
+
+ counter32k: counter@0 {
+ compatible = "ti,omap-counter32k";
+ reg = <0 0x20>;
+ };
};
};
@@ -194,12 +207,33 @@
};
};
- timer1: timer@48028000 {
- compatible = "ti,omap2420-timer";
- reg = <0x48028000 0x400>;
- interrupts = <37>;
- ti,hwmods = "timer1";
- ti,timer-alwon;
+ timer1_target: target-module@48028000 {
+ compatible = "ti,sysc-omap2-timer", "ti,sysc";
+ reg = <0x48028000 0x4>,
+ <0x48028010 0x4>,
+ <0x48028014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_EMUFREE |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,syss-mask = <1>;
+ clocks = <&gpt1_fck>, <&gpt1_ick>;
+ clock-names = "fck", "ick";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x48028000 0x1000>;
+
+ timer1: timer@0 {
+ compatible = "ti,omap2420-timer";
+ reg = <0 0x400>;
+ interrupts = <37>;
+ ti,timer-alwon;
+ };
};
wd_timer2: wdt@48022000 {
@@ -218,5 +252,15 @@
compatible = "ti,omap2420-i2c";
};
-/include/ "omap24xx-clocks.dtsi"
-/include/ "omap2420-clocks.dtsi"
+#include "omap24xx-clocks.dtsi"
+#include "omap2420-clocks.dtsi"
+
+/* Preferred always-on timer for clockevent */
+&timer1_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&gpt1_fck>;
+ assigned-clock-parents = <&func_32k_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 15ef7593be12..6a1f5bb3c06a 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -81,10 +81,23 @@
};
};
- counter32k: counter@20000 {
- compatible = "ti,omap-counter32k";
- reg = <0x20000 0x20>;
- ti,hwmods = "counter_32k";
+ target-module@20000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x20000 0x4>,
+ <0x20004 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>;
+ clocks = <&func_32k_ck>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x20000 0x1000>;
+
+ counter32k: counter@0 {
+ compatible = "ti,omap-counter32k";
+ reg = <0 0x20>;
+ };
};
};
@@ -277,12 +290,33 @@
};
};
- timer1: timer@49018000 {
- compatible = "ti,omap2420-timer";
- reg = <0x49018000 0x400>;
- interrupts = <37>;
- ti,hwmods = "timer1";
- ti,timer-alwon;
+ timer1_target: target-module@49018000 {
+ compatible = "ti,sysc-omap2-timer", "ti,sysc";
+ reg = <0x49018000 0x4>,
+ <0x49018010 0x4>,
+ <0x49018014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_EMUFREE |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,syss-mask = <1>;
+ clocks = <&gpt1_fck>, <&gpt1_ick>;
+ clock-names = "fck", "ick";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49018000 0x1000>;
+
+ timer1: timer@0 {
+ compatible = "ti,omap2420-timer";
+ reg = <0 0x400>;
+ interrupts = <37>;
+ ti,timer-alwon;
+ };
};
mcspi3: spi@480b8000 {
@@ -321,5 +355,15 @@
compatible = "ti,omap2430-i2c";
};
-/include/ "omap24xx-clocks.dtsi"
-/include/ "omap2430-clocks.dtsi"
+#include "omap24xx-clocks.dtsi"
+#include "omap2430-clocks.dtsi"
+
+/* Preferred always-on timer for clockevent */
+&timer1_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&gpt1_fck>;
+ assigned-clock-parents = <&func_32k_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index 4ed3f93f5841..dfa158647d91 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -304,6 +304,39 @@
phys = <0 &hsusb2_phy>;
};
+/* Unusable as clocksource because of unreliable oscillator */
+&counter32k {
+ status = "disabled";
+};
+
+/* Unusable as clockevent because if unreliable oscillator, allow to idle */
+&timer1_target {
+ /delete-property/ti,no-reset-on-init;
+ /delete-property/ti,no-idle;
+ timer@0 {
+ /delete-property/ti,timer-alwon;
+ };
+};
+
+/* Preferred always-on timer for clocksource */
+&timer12_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ /* Always clocked by secure_32k_fck */
+ };
+};
+
+/* Preferred timer for clockevent */
+&timer2_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&gpt2_fck>;
+ assigned-clock-parents = <&sys_ck>;
+ };
+};
+
&twl_gpio {
ti,use-leds;
/* pullups: BIT(1) */
diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts
index 162d0726b008..c2995a280729 100644
--- a/arch/arm/boot/dts/omap3-devkit8000.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000.dts
@@ -14,3 +14,36 @@
display2 = &tv0;
};
};
+
+/* Unusable as clocksource because of unreliable oscillator */
+&counter32k {
+ status = "disabled";
+};
+
+/* Unusable as clockevent because if unreliable oscillator, allow to idle */
+&timer1_target {
+ /delete-property/ti,no-reset-on-init;
+ /delete-property/ti,no-idle;
+ timer@0 {
+ /delete-property/ti,timer-alwon;
+ };
+};
+
+/* Preferred always-on timer for clocksource */
+&timer12_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ /* Always clocked by secure_32k_fck */
+ };
+};
+
+/* Preferred timer for clockevent */
+&timer2_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&gpt2_fck>;
+ assigned-clock-parents = <&sys_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 634ea16a711e..1296d0643943 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -193,10 +193,23 @@
};
};
- counter32k: counter@48320000 {
- compatible = "ti,omap-counter32k";
- reg = <0x48320000 0x20>;
- ti,hwmods = "counter_32k";
+ target-module@48320000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x48320000 0x4>,
+ <0x48320004 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>;
+ clocks = <&wkup_32k_fck>, <&omap_32ksync_ick>;
+ clock-names = "fck", "ick";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x48320000 0x1000>;
+
+ counter32k: counter@0 {
+ compatible = "ti,omap-counter32k";
+ reg = <0x0 0x20>;
+ };
};
intc: interrupt-controller@48200000 {
@@ -637,19 +650,63 @@
dma-names = "rx";
};
- timer1: timer@48318000 {
- compatible = "ti,omap3430-timer";
- reg = <0x48318000 0x400>;
- interrupts = <37>;
- ti,hwmods = "timer1";
- ti,timer-alwon;
+ timer1_target: target-module@48318000 {
+ compatible = "ti,sysc-omap2-timer", "ti,sysc";
+ reg = <0x48318000 0x4>,
+ <0x48318010 0x4>,
+ <0x48318014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_EMUFREE |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,syss-mask = <1>;
+ clocks = <&gpt1_fck>, <&gpt1_ick>;
+ clock-names = "fck", "ick";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x48318000 0x1000>;
+
+ timer1: timer@0 {
+ compatible = "ti,omap3430-timer";
+ reg = <0x0 0x80>;
+ clocks = <&gpt1_fck>;
+ clock-names = "fck";
+ interrupts = <37>;
+ ti,timer-alwon;
+ };
};
- timer2: timer@49032000 {
- compatible = "ti,omap3430-timer";
- reg = <0x49032000 0x400>;
- interrupts = <38>;
- ti,hwmods = "timer2";
+ timer2_target: target-module@49032000 {
+ compatible = "ti,sysc-omap2-timer", "ti,sysc";
+ reg = <0x49032000 0x4>,
+ <0x49032010 0x4>,
+ <0x49032014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_EMUFREE |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,syss-mask = <1>;
+ clocks = <&gpt2_fck>, <&gpt2_ick>;
+ clock-names = "fck", "ick";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x49032000 0x1000>;
+
+ timer2: timer@0 {
+ compatible = "ti,omap3430-timer";
+ reg = <0 0x400>;
+ interrupts = <38>;
+ };
};
timer3: timer@49034000 {
@@ -723,13 +780,34 @@
ti,timer-pwm;
};
- timer12: timer@48304000 {
- compatible = "ti,omap3430-timer";
- reg = <0x48304000 0x400>;
- interrupts = <95>;
- ti,hwmods = "timer12";
- ti,timer-alwon;
- ti,timer-secure;
+ timer12_target: target-module@48304000 {
+ compatible = "ti,sysc-omap2-timer", "ti,sysc";
+ reg = <0x48304000 0x4>,
+ <0x48304010 0x4>,
+ <0x48304014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_EMUFREE |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,syss-mask = <1>;
+ clocks = <&gpt12_fck>, <&gpt12_ick>;
+ clock-names = "fck", "ick";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x48304000 0x1000>;
+
+ timer12: timer@0 {
+ compatible = "ti,omap3430-timer";
+ reg = <0 0x400>;
+ interrupts = <95>;
+ ti,timer-alwon;
+ ti,timer-secure;
+ };
};
usbhstll: usbhstll@48062000 {
@@ -886,4 +964,14 @@
};
};
-/include/ "omap3xxx-clocks.dtsi"
+#include "omap3xxx-clocks.dtsi"
+
+/* Preferred always-on timer for clockevent. Some boards must use dmtimer12 */
+&timer1_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&gpt1_fck>;
+ assigned-clock-parents = <&omap_32k_fck>;
+ };
+};
diff --git a/arch/arm/boot/dts/omap4-l4.dtsi b/arch/arm/boot/dts/omap4-l4.dtsi
index ef59e4e97d7c..fcc52121ff09 100644
--- a/arch/arm/boot/dts/omap4-l4.dtsi
+++ b/arch/arm/boot/dts/omap4-l4.dtsi
@@ -974,7 +974,6 @@
target-module@4000 { /* 0x4a304000, ap 17 24.0 */
compatible = "ti,sysc-omap2", "ti,sysc";
- ti,hwmods = "counter_32k";
reg = <0x4000 0x4>,
<0x4004 0x4>;
reg-names = "rev", "sysc";
@@ -1139,9 +1138,8 @@
};
};
- target-module@8000 { /* 0x4a318000, ap 9 1c.0 */
+ timer1_target: target-module@8000 { /* 0x4a318000, ap 9 1c.0 */
compatible = "ti,sysc-omap2-timer", "ti,sysc";
- ti,hwmods = "timer1";
reg = <0x8000 0x4>,
<0x8010 0x4>,
<0x8014 0x4>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 763bdea8c829..6c2b07f0704d 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -655,3 +655,13 @@
#reset-cells = <1>;
};
};
+
+/* Preferred always-on timer for clockevent */
+&timer1_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&l4_wkup_clkctrl OMAP4_TIMER1_CLKCTRL 24>;
+ assigned-clock-parents = <&sys_clkin_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi
index f68740abb8aa..5217805bf126 100644
--- a/arch/arm/boot/dts/omap5-l4.dtsi
+++ b/arch/arm/boot/dts/omap5-l4.dtsi
@@ -1003,6 +1003,7 @@
<0x00090000 0x00090000 0x002000>, /* ap 55 */
<0x00092000 0x00092000 0x001000>, /* ap 56 */
<0x000a4000 0x000a4000 0x001000>, /* ap 57 */
+ <0x000a5000 0x000a5000 0x001000>,
<0x000a6000 0x000a6000 0x001000>, /* ap 58 */
<0x000a8000 0x000a8000 0x004000>, /* ap 59 */
<0x000ac000 0x000ac000 0x001000>, /* ap 60 */
@@ -1908,6 +1909,36 @@
<0x00001000 0x000a5000 0x00001000>;
};
+ des_target: target-module@a5000 { /* 0x480a5000 */
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0xa5030 0x4>,
+ <0xa5034 0x4>,
+ <0xa5038 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ ti,syss-mask = <1>;
+ /* Domains (P, C): l4per_pwrdm, l4sec_clkdm */
+ clocks = <&l4sec_clkctrl OMAP5_DES3DES_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0xa5000 0x00001000>;
+ status = "disabled";
+
+ des: des@0 {
+ compatible = "ti,omap4-des";
+ reg = <0 0xa0>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 117>, <&sdma 116>;
+ dma-names = "tx", "rx";
+ };
+ };
+
target-module@a8000 { /* 0x480a8000, ap 59 2a.0 */
compatible = "ti,sysc";
status = "disabled";
@@ -2150,7 +2181,6 @@
target-module@4000 { /* 0x4ae04000, ap 17 20.0 */
compatible = "ti,sysc-omap2", "ti,sysc";
- ti,hwmods = "counter_32k";
reg = <0x4000 0x4>,
<0x4010 0x4>;
reg-names = "rev", "sysc";
@@ -2336,9 +2366,8 @@
};
};
- target-module@8000 { /* 0x4ae18000, ap 9 18.0 */
+ timer1_target: target-module@8000 { /* 0x4ae18000, ap 9 18.0 */
compatible = "ti,sysc-omap4-timer", "ti,sysc";
- ti,hwmods = "timer1";
reg = <0x8000 0x4>,
<0x8010 0x4>;
reg-names = "rev", "sysc";
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 2ac7f021c284..fb889c5b00c9 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -247,6 +247,92 @@
hw-caps-temp-alert;
};
+ aes1_target: target-module@4b501000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x4b501080 0x4>,
+ <0x4b501084 0x4>,
+ <0x4b501088 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ ti,syss-mask = <1>;
+ /* Domains (P, C): l4per_pwrdm, l4sec_clkdm */
+ clocks = <&l4sec_clkctrl OMAP5_AES1_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x4b501000 0x1000>;
+
+ aes1: aes@0 {
+ compatible = "ti,omap4-aes";
+ reg = <0 0xa0>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 111>, <&sdma 110>;
+ dma-names = "tx", "rx";
+ };
+ };
+
+ aes2_target: target-module@4b701000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x4b701080 0x4>,
+ <0x4b701084 0x4>,
+ <0x4b701088 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ ti,syss-mask = <1>;
+ /* Domains (P, C): l4per_pwrdm, l4sec_clkdm */
+ clocks = <&l4sec_clkctrl OMAP5_AES2_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x4b701000 0x1000>;
+
+ aes2: aes@0 {
+ compatible = "ti,omap4-aes";
+ reg = <0 0xa0>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 114>, <&sdma 113>;
+ dma-names = "tx", "rx";
+ };
+ };
+
+ sham_target: target-module@4b100000 {
+ compatible = "ti,sysc-omap3-sham", "ti,sysc";
+ reg = <0x4b100100 0x4>,
+ <0x4b100110 0x4>,
+ <0x4b100114 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,syss-mask = <1>;
+ /* Domains (P, C): l4per_pwrdm, l4sec_clkdm */
+ clocks = <&l4sec_clkctrl OMAP5_SHA2MD5_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x4b100000 0x1000>;
+
+ sham: sham@0 {
+ compatible = "ti,omap4-sham";
+ reg = <0 0x300>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 119>;
+ dma-names = "rx";
+ };
+ };
+
bandgap: bandgap@4a0021e0 {
reg = <0x4a0021e0 0xc
0x4a00232c 0xc
@@ -581,3 +667,13 @@
#reset-cells = <1>;
};
};
+
+/* Preferred always-on timer for clockevent */
+&timer1_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&wkupaon_clkctrl OMAP5_TIMER1_CLKCTRL 24>;
+ assigned-clock-parents = <&sys_32k_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/pxa168.dtsi b/arch/arm/boot/dts/pxa168.dtsi
index 9a9e38245e88..4fe7735c7c58 100644
--- a/arch/arm/boot/dts/pxa168.dtsi
+++ b/arch/arm/boot/dts/pxa168.dtsi
@@ -97,7 +97,7 @@
resets = <&soc_clocks PXA168_CLK_GPIO>;
interrupt-names = "gpio_mux";
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
ranges;
gcb0: gpio@d4019000 {
@@ -119,6 +119,8 @@
twsi1: i2c@d4011000 {
compatible = "mrvl,mmp-twsi";
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0xd4011000 0x1000>;
interrupts = <7>;
clocks = <&soc_clocks PXA168_CLK_TWSI0>;
@@ -129,6 +131,8 @@
twsi2: i2c@d4025000 {
compatible = "mrvl,mmp-twsi";
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0xd4025000 0x1000>;
interrupts = <58>;
clocks = <&soc_clocks PXA168_CLK_TWSI1>;
@@ -139,7 +143,7 @@
rtc: rtc@d4010000 {
compatible = "mrvl,mmp-rtc";
reg = <0xd4010000 0x1000>;
- interrupts = <5 6>;
+ interrupts = <5>, <6>;
interrupt-names = "rtc 1Hz", "rtc alarm";
clocks = <&soc_clocks PXA168_CLK_RTC>;
resets = <&soc_clocks PXA168_CLK_RTC>;
diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
index c237a0e4b12a..d19674812cd2 100644
--- a/arch/arm/boot/dts/pxa3xx.dtsi
+++ b/arch/arm/boot/dts/pxa3xx.dtsi
@@ -170,7 +170,7 @@
clocks = <&clks CLK_GPIO>;
gpio-ranges = <&pinctrl 0 0 128>;
interrupt-names = "gpio0", "gpio1", "gpio_mux";
- interrupts = <8 9 10>;
+ interrupts = <8>, <9>, <10>;
gpio-controller;
#gpio-cells = <0x2>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/pxa910.dtsi b/arch/arm/boot/dts/pxa910.dtsi
index 587a5e7f0702..352a39357810 100644
--- a/arch/arm/boot/dts/pxa910.dtsi
+++ b/arch/arm/boot/dts/pxa910.dtsi
@@ -109,7 +109,7 @@
clocks = <&soc_clocks PXA910_CLK_GPIO>;
resets = <&soc_clocks PXA910_CLK_GPIO>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
ranges;
gcb0: gpio@d4019000 {
@@ -155,7 +155,7 @@
rtc: rtc@d4010000 {
compatible = "mrvl,mmp-rtc";
reg = <0xd4010000 0x1000>;
- interrupts = <5 6>;
+ interrupts = <5>, <6>;
interrupt-names = "rtc 1Hz", "rtc alarm";
clocks = <&soc_clocks PXA910_CLK_RTC>;
resets = <&soc_clocks PXA910_CLK_RTC>;
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index b9839f86e703..74d8e2c8e4b3 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -166,6 +166,7 @@
<1 4 0xf08>,
<1 1 0xf08>;
clock-frequency = <48000000>;
+ always-on;
};
soc {
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index 4021f661cd11..b912da9a3ff3 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -93,6 +93,12 @@
};
};
+ firmware {
+ scm {
+ compatible = "qcom,scm-ipq806x", "qcom,scm";
+ };
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
index eaa1001d0a46..d4dc98214225 100644
--- a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include "qcom-msm8974pro.dtsi"
-#include "qcom-pm8841.dtsi"
-#include "qcom-pm8941.dtsi"
+#include "qcom-pma8084.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
/ {
model = "Samsung Galaxy S5";
@@ -14,6 +16,194 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ smd {
+ rpm {
+ rpm_requests {
+ pma8084-regulators {
+ compatible = "qcom,rpm-pma8084-regulators";
+ status = "okay";
+
+ pma8084_s1: s1 {
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pma8084_s2: s2 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pma8084_s3: s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ pma8084_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_s5: s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+
+ pma8084_s6: s6 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ pma8084_l1: l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pma8084_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pma8084_l3: l3 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ pma8084_l4: l4 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pma8084_l5: l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_l6: l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_l7: l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_l8: l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_l9: l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pma8084_l10: l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pma8084_l11: l11 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ pma8084_l12: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_l13: l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pma8084_l14: l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ pma8084_l15: l15 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pma8084_l16: l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+
+ pma8084_l17: l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pma8084_l18: l18 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ };
+
+ pma8084_l19: l19 {
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pma8084_l20: l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+
+ regulator-allow-set-load;
+ regulator-system-load = <200000>;
+ };
+
+ pma8084_l21: l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ pma8084_l22: l22 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ pma8084_l23: l23 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
+ pma8084_l24: l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+
+ pma8084_l25: l25 {
+ regulator-min-microvolt = <2100000>;
+ regulator-max-microvolt = <2100000>;
+ };
+
+ pma8084_l26: l26 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2050000>;
+ };
+
+ pma8084_l27: l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ pma8084_lvs1: lvs1 {};
+ pma8084_lvs2: lvs2 {};
+ pma8084_lvs3: lvs3 {};
+ pma8084_lvs4: lvs4 {};
+
+ pma8084_5vs1: 5vs1 {};
+ };
+ };
+ };
+ };
+
+ /delete-node/ vreg-boost;
};
&soc {
@@ -21,4 +211,215 @@
status = "ok";
};
+ gpio-keys {
+ compatible = "gpio-keys";
+ input-name = "gpio-keys";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_pin_a>;
+
+ volume-down {
+ label = "volume_down";
+ gpios = <&pma8084_gpios 2 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ debounce-interval = <15>;
+ };
+
+ home-key {
+ label = "home_key";
+ gpios = <&pma8084_gpios 3 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <KEY_HOMEPAGE>;
+ wakeup-source;
+ debounce-interval = <15>;
+ };
+
+ volume-up {
+ label = "volume_up";
+ gpios = <&pma8084_gpios 5 GPIO_ACTIVE_LOW>;
+ linux,input-type = <1>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ };
+ };
+
+ pinctrl@fd510000 {
+ sdhc1_pin_a: sdhc1-pin-active {
+ clk {
+ pins = "sdc1_clk";
+ drive-strength = <4>;
+ bias-disable;
+ };
+
+ cmd-data {
+ pins = "sdc1_cmd", "sdc1_data";
+ drive-strength = <4>;
+ bias-pull-up;
+ };
+ };
+ };
+
+ sdhci@f9824900 {
+ status = "ok";
+
+ vmmc-supply = <&pma8084_l20>;
+ vqmmc-supply = <&pma8084_s4>;
+
+ bus-width = <8>;
+ non-removable;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdhc1_pin_a>;
+ };
+
+ usb@f9a55000 {
+ status = "ok";
+
+ phys = <&usb_hs1_phy>;
+ phy-select = <&tcsr 0xb000 0>;
+ /*extcon = <&smbb>, <&usb_id>;*/
+ /*vbus-supply = <&chg_otg>;*/
+
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+
+ ulpi {
+ phy@a {
+ status = "ok";
+
+ v1p8-supply = <&pma8084_l6>;
+ v3p3-supply = <&pma8084_l24>;
+
+ /*extcon = <&smbb>;*/
+ qcom,init-seq = /bits/ 8 <0x1 0x64>;
+ };
+ };
+ };
+
+ pinctrl@fd510000 {
+ i2c6_pins: i2c6 {
+ mux {
+ pins = "gpio29", "gpio30";
+ function = "blsp_i2c6";
+
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ i2c@f9928000 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_pins>;
+
+ pmic@60 {
+ reg = <0x60>;
+ compatible = "maxim,max77826";
+
+ regulators {
+ max77826_ldo1: LDO1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ max77826_ldo2: LDO2 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ max77826_ldo3: LDO3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ max77826_ldo4: LDO4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ max77826_ldo5: LDO5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ max77826_ldo6: LDO6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ max77826_ldo7: LDO7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ max77826_ldo8: LDO8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ max77826_ldo9: LDO9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ max77826_ldo10: LDO10 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ max77826_ldo11: LDO11 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ max77826_ldo12: LDO12 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ max77826_ldo13: LDO13 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ max77826_ldo14: LDO14 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ max77826_ldo15: LDO15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ max77826_buck: BUCK {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+
+ max77826_buckboost: BUCKBOOST {
+ regulator-min-microvolt = <3400000>;
+ regulator-max-microvolt = <3400000>;
+ };
+ };
+ };
+ };
+};
+
+&spmi_bus {
+ pma8084@0 {
+ gpios@c000 {
+ gpio_keys_pin_a: gpio-keys-active {
+ pins = "gpio2", "gpio3", "gpio5";
+ function = "normal";
+
+ bias-pull-up;
+ power-source = <PMA8084_GPIO_S4>;
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index 2ea2308d91b3..51f5f904f9eb 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -974,6 +974,17 @@
#size-cells = <0>;
};
+ blsp_i2c6: i2c@f9928000 {
+ status = "disabled";
+ compatible = "qcom,i2c-qup-v2.1.1";
+ reg = <0xf9928000 0x1000>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
blsp_i2c8: i2c@f9964000 {
status = "disabled";
compatible = "qcom,i2c-qup-v2.1.1";
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index 90feb2cf9960..0588d4446f9a 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -83,7 +83,7 @@
};
cmt1: timer@e6138000 {
- compatible = "renesas,cmt-48-r8a7740", "renesas,cmt-48";
+ compatible = "renesas,r8a7740-cmt1";
reg = <0xe6138000 0x170>;
interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks R8A7740_CLK_CMT1>;
diff --git a/arch/arm/boot/dts/r8a7742-iwg21d-q7.dts b/arch/arm/boot/dts/r8a7742-iwg21d-q7.dts
new file mode 100644
index 000000000000..1f5c35c66d91
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7742-iwg21d-q7.dts
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the iWave-RZ/G1H Qseven board
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r8a7742-iwg21m.dtsi"
+
+/ {
+ model = "iWave Systems RainboW-G21D-Qseven board based on RZ/G1H";
+ compatible = "iwave,g21d", "iwave,g21m", "renesas,r8a7742";
+
+ aliases {
+ serial2 = &scifa2;
+ };
+
+ chosen {
+ bootargs = "ignore_loglevel root=/dev/mmcblk0p1 rw rootwait";
+ stdout-path = "serial2:115200n8";
+ };
+};
+
+&pfc {
+ scifa2_pins: scifa2 {
+ groups = "scifa2_data_c";
+ function = "scifa2";
+ };
+};
+
+&scifa2 {
+ pinctrl-0 = <&scifa2_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/r8a7742-iwg21m.dtsi b/arch/arm/boot/dts/r8a7742-iwg21m.dtsi
new file mode 100644
index 000000000000..85aff429d408
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7742-iwg21m.dtsi
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the iWave RZ/G1H Qseven SOM
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#include "r8a7742.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ compatible = "iwave,g21m", "renesas,r8a7742";
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0 0x40000000>;
+ };
+
+ memory@200000000 {
+ device_type = "memory";
+ reg = <2 0x00000000 0 0x40000000>;
+ };
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&extal_clk {
+ clock-frequency = <20000000>;
+};
+
+&pfc {
+ mmc1_pins: mmc1 {
+ groups = "mmc1_data4", "mmc1_ctrl";
+ function = "mmc1";
+ };
+};
+
+&mmcif1 {
+ pinctrl-0 = <&mmc1_pins>;
+ pinctrl-names = "default";
+
+ vmmc-supply = <&reg_3p3v>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/r8a7742.dtsi b/arch/arm/boot/dts/r8a7742.dtsi
new file mode 100644
index 000000000000..305d8086a3dd
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7742.dtsi
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the r8a7742 SoC
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#include <dt-bindings/clock/r8a7742-cpg-mssr.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/power/r8a7742-sysc.h>
+
+/ {
+ compatible = "renesas,r8a7742";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ clock-frequency = <1400000000>;
+ clocks = <&cpg CPG_CORE R8A7742_CLK_Z>;
+ power-domains = <&sysc R8A7742_PD_CA15_CPU0>;
+ next-level-cache = <&L2_CA15>;
+ capacity-dmips-mhz = <1024>;
+ voltage-tolerance = <1>; /* 1% */
+ clock-latency = <300000>; /* 300 us */
+
+ /* kHz - uV - OPPs unknown yet */
+ operating-points = <1400000 1000000>,
+ <1225000 1000000>,
+ <1050000 1000000>,
+ < 875000 1000000>,
+ < 700000 1000000>,
+ < 350000 1000000>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ clock-frequency = <1400000000>;
+ clocks = <&cpg CPG_CORE R8A7742_CLK_Z>;
+ power-domains = <&sysc R8A7742_PD_CA15_CPU1>;
+ next-level-cache = <&L2_CA15>;
+ capacity-dmips-mhz = <1024>;
+ voltage-tolerance = <1>; /* 1% */
+ clock-latency = <300000>; /* 300 us */
+
+ /* kHz - uV - OPPs unknown yet */
+ operating-points = <1400000 1000000>,
+ <1225000 1000000>,
+ <1050000 1000000>,
+ < 875000 1000000>,
+ < 700000 1000000>,
+ < 350000 1000000>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <2>;
+ clock-frequency = <1400000000>;
+ clocks = <&cpg CPG_CORE R8A7742_CLK_Z>;
+ power-domains = <&sysc R8A7742_PD_CA15_CPU2>;
+ next-level-cache = <&L2_CA15>;
+ capacity-dmips-mhz = <1024>;
+ voltage-tolerance = <1>; /* 1% */
+ clock-latency = <300000>; /* 300 us */
+
+ /* kHz - uV - OPPs unknown yet */
+ operating-points = <1400000 1000000>,
+ <1225000 1000000>,
+ <1050000 1000000>,
+ < 875000 1000000>,
+ < 700000 1000000>,
+ < 350000 1000000>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <3>;
+ clock-frequency = <1400000000>;
+ clocks = <&cpg CPG_CORE R8A7742_CLK_Z>;
+ power-domains = <&sysc R8A7742_PD_CA15_CPU3>;
+ next-level-cache = <&L2_CA15>;
+ capacity-dmips-mhz = <1024>;
+ voltage-tolerance = <1>; /* 1% */
+ clock-latency = <300000>; /* 300 us */
+
+ /* kHz - uV - OPPs unknown yet */
+ operating-points = <1400000 1000000>,
+ <1225000 1000000>,
+ <1050000 1000000>,
+ < 875000 1000000>,
+ < 700000 1000000>,
+ < 350000 1000000>;
+ };
+
+ cpu4: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x100>;
+ clock-frequency = <780000000>;
+ clocks = <&cpg CPG_CORE R8A7742_CLK_Z2>;
+ power-domains = <&sysc R8A7742_PD_CA7_CPU0>;
+ next-level-cache = <&L2_CA7>;
+ };
+
+ cpu5: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x101>;
+ clock-frequency = <780000000>;
+ clocks = <&cpg CPG_CORE R8A7742_CLK_Z2>;
+ power-domains = <&sysc R8A7742_PD_CA7_CPU1>;
+ next-level-cache = <&L2_CA7>;
+ };
+
+ cpu6: cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x102>;
+ clock-frequency = <780000000>;
+ clocks = <&cpg CPG_CORE R8A7742_CLK_Z2>;
+ power-domains = <&sysc R8A7742_PD_CA7_CPU2>;
+ next-level-cache = <&L2_CA7>;
+ };
+
+ cpu7: cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x103>;
+ clock-frequency = <780000000>;
+ clocks = <&cpg CPG_CORE R8A7742_CLK_Z2>;
+ power-domains = <&sysc R8A7742_PD_CA7_CPU3>;
+ next-level-cache = <&L2_CA7>;
+ };
+
+ L2_CA15: cache-controller-0 {
+ compatible = "cache";
+ power-domains = <&sysc R8A7742_PD_CA15_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ L2_CA7: cache-controller-1 {
+ compatible = "cache";
+ power-domains = <&sysc R8A7742_PD_CA7_SCU>;
+ cache-unified;
+ cache-level = <2>;
+ };
+ };
+
+ /* External root clock */
+ extal_clk: extal {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
+ };
+
+ pmu-0 {
+ compatible = "arm,cortex-a15-pmu";
+ interrupts-extended = <&gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+ };
+
+ pmu-1 {
+ compatible = "arm,cortex-a7-pmu";
+ interrupts-extended = <&gic GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
+ };
+
+ /* External SCIF clock */
+ scif_clk: scif {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ interrupt-parent = <&gic>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio0: gpio@e6050000 {
+ compatible = "renesas,gpio-r8a7742",
+ "renesas,rcar-gen2-gpio";
+ reg = <0 0xe6050000 0 0x50>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 0 32>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 912>;
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 912>;
+ };
+
+ gpio1: gpio@e6051000 {
+ compatible = "renesas,gpio-r8a7742",
+ "renesas,rcar-gen2-gpio";
+ reg = <0 0xe6051000 0 0x50>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 32 30>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 911>;
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 911>;
+ };
+
+ gpio2: gpio@e6052000 {
+ compatible = "renesas,gpio-r8a7742",
+ "renesas,rcar-gen2-gpio";
+ reg = <0 0xe6052000 0 0x50>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 64 30>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 910>;
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 910>;
+ };
+
+ gpio3: gpio@e6053000 {
+ compatible = "renesas,gpio-r8a7742",
+ "renesas,rcar-gen2-gpio";
+ reg = <0 0xe6053000 0 0x50>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 96 32>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 909>;
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 909>;
+ };
+
+ gpio4: gpio@e6054000 {
+ compatible = "renesas,gpio-r8a7742",
+ "renesas,rcar-gen2-gpio";
+ reg = <0 0xe6054000 0 0x50>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 128 32>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 908>;
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 908>;
+ };
+
+ gpio5: gpio@e6055000 {
+ compatible = "renesas,gpio-r8a7742",
+ "renesas,rcar-gen2-gpio";
+ reg = <0 0xe6055000 0 0x50>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-ranges = <&pfc 0 160 32>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ clocks = <&cpg CPG_MOD 907>;
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 907>;
+ };
+
+ pfc: pin-controller@e6060000 {
+ compatible = "renesas,pfc-r8a7742";
+ reg = <0 0xe6060000 0 0x250>;
+ };
+
+ cpg: clock-controller@e6150000 {
+ compatible = "renesas,r8a7742-cpg-mssr";
+ reg = <0 0xe6150000 0 0x1000>;
+ clocks = <&extal_clk>, <&usb_extal_clk>;
+ clock-names = "extal", "usb_extal";
+ #clock-cells = <2>;
+ #power-domain-cells = <0>;
+ #reset-cells = <1>;
+ };
+
+ rst: reset-controller@e6160000 {
+ compatible = "renesas,r8a7742-rst";
+ reg = <0 0xe6160000 0 0x0100>;
+ };
+
+ sysc: system-controller@e6180000 {
+ compatible = "renesas,r8a7742-sysc";
+ reg = <0 0xe6180000 0 0x0200>;
+ #power-domain-cells = <1>;
+ };
+
+ irqc: interrupt-controller@e61c0000 {
+ compatible = "renesas,irqc-r8a7742", "renesas,irqc";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ reg = <0 0xe61c0000 0 0x200>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 407>;
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 407>;
+ };
+
+ icram0: sram@e63a0000 {
+ compatible = "mmio-sram";
+ reg = <0 0xe63a0000 0 0x12000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0xe63a0000 0x12000>;
+ };
+
+ icram1: sram@e63c0000 {
+ compatible = "mmio-sram";
+ reg = <0 0xe63c0000 0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0xe63c0000 0x1000>;
+
+ smp-sram@0 {
+ compatible = "renesas,smp-sram";
+ reg = <0 0x100>;
+ };
+ };
+
+ icram2: sram@e6300000 {
+ compatible = "mmio-sram";
+ reg = <0 0xe6300000 0 0x40000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0xe6300000 0x40000>;
+ };
+
+ dmac0: dma-controller@e6700000 {
+ compatible = "renesas,dmac-r8a7742",
+ "renesas,rcar-dmac";
+ reg = <0 0xe6700000 0 0x20000>;
+ interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14";
+ clocks = <&cpg CPG_MOD 219>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 219>;
+ #dma-cells = <1>;
+ dma-channels = <15>;
+ };
+
+ dmac1: dma-controller@e6720000 {
+ compatible = "renesas,dmac-r8a7742",
+ "renesas,rcar-dmac";
+ reg = <0 0xe6720000 0 0x20000>;
+ interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error",
+ "ch0", "ch1", "ch2", "ch3",
+ "ch4", "ch5", "ch6", "ch7",
+ "ch8", "ch9", "ch10", "ch11",
+ "ch12", "ch13", "ch14";
+ clocks = <&cpg CPG_MOD 218>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 218>;
+ #dma-cells = <1>;
+ dma-channels = <15>;
+ };
+
+ scifa0: serial@e6c40000 {
+ compatible = "renesas,scifa-r8a7742",
+ "renesas,rcar-gen2-scifa", "renesas,scifa";
+ reg = <0 0xe6c40000 0 0x40>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 204>;
+ clock-names = "fck";
+ dmas = <&dmac0 0x21>, <&dmac0 0x22>,
+ <&dmac1 0x21>, <&dmac1 0x22>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 204>;
+ status = "disabled";
+ };
+
+ scifa1: serial@e6c50000 {
+ compatible = "renesas,scifa-r8a7742",
+ "renesas,rcar-gen2-scifa", "renesas,scifa";
+ reg = <0 0xe6c50000 0 0x40>;
+ interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 203>;
+ clock-names = "fck";
+ dmas = <&dmac0 0x25>, <&dmac0 0x26>,
+ <&dmac1 0x25>, <&dmac1 0x26>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 203>;
+ status = "disabled";
+ };
+
+ scifa2: serial@e6c60000 {
+ compatible = "renesas,scifa-r8a7742",
+ "renesas,rcar-gen2-scifa", "renesas,scifa";
+ reg = <0 0xe6c60000 0 0x40>;
+ interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 202>;
+ clock-names = "fck";
+ dmas = <&dmac0 0x27>, <&dmac0 0x28>,
+ <&dmac1 0x27>, <&dmac1 0x28>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 202>;
+ status = "disabled";
+ };
+
+ scifb0: serial@e6c20000 {
+ compatible = "renesas,scifb-r8a7742",
+ "renesas,rcar-gen2-scifb", "renesas,scifb";
+ reg = <0 0xe6c20000 0 0x100>;
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 206>;
+ clock-names = "fck";
+ dmas = <&dmac0 0x3d>, <&dmac0 0x3e>,
+ <&dmac1 0x3d>, <&dmac1 0x3e>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 206>;
+ status = "disabled";
+ };
+
+ scifb1: serial@e6c30000 {
+ compatible = "renesas,scifb-r8a7742",
+ "renesas,rcar-gen2-scifb", "renesas,scifb";
+ reg = <0 0xe6c30000 0 0x100>;
+ interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 207>;
+ clock-names = "fck";
+ dmas = <&dmac0 0x19>, <&dmac0 0x1a>,
+ <&dmac1 0x19>, <&dmac1 0x1a>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 207>;
+ status = "disabled";
+ };
+
+ scifb2: serial@e6ce0000 {
+ compatible = "renesas,scifb-r8a7742",
+ "renesas,rcar-gen2-scifb", "renesas,scifb";
+ reg = <0 0xe6ce0000 0 0x100>;
+ interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 216>;
+ clock-names = "fck";
+ dmas = <&dmac0 0x1d>, <&dmac0 0x1e>,
+ <&dmac1 0x1d>, <&dmac1 0x1e>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 216>;
+ status = "disabled";
+ };
+
+ scif0: serial@e6e60000 {
+ compatible = "renesas,scif-r8a7742",
+ "renesas,rcar-gen2-scif", "renesas,scif";
+ reg = <0 0xe6e60000 0 0x40>;
+ interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 721>,
+ <&cpg CPG_CORE R8A7742_CLK_ZS>, <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x29>, <&dmac0 0x2a>,
+ <&dmac1 0x29>, <&dmac1 0x2a>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 721>;
+ status = "disabled";
+ };
+
+ scif1: serial@e6e68000 {
+ compatible = "renesas,scif-r8a7742",
+ "renesas,rcar-gen2-scif", "renesas,scif";
+ reg = <0 0xe6e68000 0 0x40>;
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 720>,
+ <&cpg CPG_CORE R8A7742_CLK_ZS>, <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x2d>, <&dmac0 0x2e>,
+ <&dmac1 0x2d>, <&dmac1 0x2e>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 720>;
+ status = "disabled";
+ };
+
+ scif2: serial@e6e56000 {
+ compatible = "renesas,scif-r8a7742",
+ "renesas,rcar-gen2-scif", "renesas,scif";
+ reg = <0 0xe6e56000 0 0x40>;
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 310>,
+ <&cpg CPG_CORE R8A7742_CLK_ZS>, <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x2b>, <&dmac0 0x2c>,
+ <&dmac1 0x2b>, <&dmac1 0x2c>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 310>;
+ status = "disabled";
+ };
+
+ hscif0: serial@e62c0000 {
+ compatible = "renesas,hscif-r8a7742",
+ "renesas,rcar-gen2-hscif", "renesas,hscif";
+ reg = <0 0xe62c0000 0 0x60>;
+ interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 717>,
+ <&cpg CPG_CORE R8A7742_CLK_ZS>, <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x39>, <&dmac0 0x3a>,
+ <&dmac1 0x39>, <&dmac1 0x3a>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 717>;
+ status = "disabled";
+ };
+
+ hscif1: serial@e62c8000 {
+ compatible = "renesas,hscif-r8a7742",
+ "renesas,rcar-gen2-hscif", "renesas,hscif";
+ reg = <0 0xe62c8000 0 0x60>;
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 716>,
+ <&cpg CPG_CORE R8A7742_CLK_ZS>, <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x4d>, <&dmac0 0x4e>,
+ <&dmac1 0x4d>, <&dmac1 0x4e>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 716>;
+ status = "disabled";
+ };
+
+ mmcif1: mmc@ee220000 {
+ compatible = "renesas,mmcif-r8a7742",
+ "renesas,sh-mmcif";
+ reg = <0 0xee220000 0 0x80>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 305>;
+ dmas = <&dmac0 0xe1>, <&dmac0 0xe2>,
+ <&dmac1 0xe1>, <&dmac1 0xe2>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 305>;
+ reg-io-width = <4>;
+ status = "disabled";
+ max-frequency = <97500000>;
+ };
+
+ gic: interrupt-controller@f1001000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0xf1001000 0 0x1000>, <0 0xf1002000 0 0x2000>,
+ <0 0xf1004000 0 0x2000>, <0 0xf1006000 0 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+ clocks = <&cpg CPG_MOD 408>;
+ clock-names = "clk";
+ power-domains = <&sysc R8A7742_PD_ALWAYS_ON>;
+ resets = <&cpg 408>;
+ };
+
+ prr: chipid@ff000044 {
+ compatible = "renesas,prr";
+ reg = <0 0xff000044 0 4>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ /* External USB clock - can be overridden by the board */
+ usb_extal_clk: usb_extal {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <48000000>;
+ };
+};
diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi
index e8b340bb99bc..fff123753b85 100644
--- a/arch/arm/boot/dts/r8a7743.dtsi
+++ b/arch/arm/boot/dts/r8a7743.dtsi
@@ -338,7 +338,7 @@
#thermal-sensor-cells = <0>;
};
- ipmmu_sy0: mmu@e6280000 {
+ ipmmu_sy0: iommu@e6280000 {
compatible = "renesas,ipmmu-r8a7743",
"renesas,ipmmu-vmsa";
reg = <0 0xe6280000 0 0x1000>;
@@ -348,7 +348,7 @@
status = "disabled";
};
- ipmmu_sy1: mmu@e6290000 {
+ ipmmu_sy1: iommu@e6290000 {
compatible = "renesas,ipmmu-r8a7743",
"renesas,ipmmu-vmsa";
reg = <0 0xe6290000 0 0x1000>;
@@ -357,7 +357,7 @@
status = "disabled";
};
- ipmmu_ds: mmu@e6740000 {
+ ipmmu_ds: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a7743",
"renesas,ipmmu-vmsa";
reg = <0 0xe6740000 0 0x1000>;
@@ -367,7 +367,7 @@
status = "disabled";
};
- ipmmu_mp: mmu@ec680000 {
+ ipmmu_mp: iommu@ec680000 {
compatible = "renesas,ipmmu-r8a7743",
"renesas,ipmmu-vmsa";
reg = <0 0xec680000 0 0x1000>;
@@ -376,7 +376,7 @@
status = "disabled";
};
- ipmmu_mx: mmu@fe951000 {
+ ipmmu_mx: iommu@fe951000 {
compatible = "renesas,ipmmu-r8a7743",
"renesas,ipmmu-vmsa";
reg = <0 0xfe951000 0 0x1000>;
@@ -386,7 +386,7 @@
status = "disabled";
};
- ipmmu_gp: mmu@e62a0000 {
+ ipmmu_gp: iommu@e62a0000 {
compatible = "renesas,ipmmu-r8a7743",
"renesas,ipmmu-vmsa";
reg = <0 0xe62a0000 0 0x1000>;
diff --git a/arch/arm/boot/dts/r8a7744.dtsi b/arch/arm/boot/dts/r8a7744.dtsi
index def840b8b2d3..5050ac19041d 100644
--- a/arch/arm/boot/dts/r8a7744.dtsi
+++ b/arch/arm/boot/dts/r8a7744.dtsi
@@ -338,7 +338,7 @@
#thermal-sensor-cells = <0>;
};
- ipmmu_sy0: mmu@e6280000 {
+ ipmmu_sy0: iommu@e6280000 {
compatible = "renesas,ipmmu-r8a7744",
"renesas,ipmmu-vmsa";
reg = <0 0xe6280000 0 0x1000>;
@@ -348,7 +348,7 @@
status = "disabled";
};
- ipmmu_sy1: mmu@e6290000 {
+ ipmmu_sy1: iommu@e6290000 {
compatible = "renesas,ipmmu-r8a7744",
"renesas,ipmmu-vmsa";
reg = <0 0xe6290000 0 0x1000>;
@@ -357,7 +357,7 @@
status = "disabled";
};
- ipmmu_ds: mmu@e6740000 {
+ ipmmu_ds: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a7744",
"renesas,ipmmu-vmsa";
reg = <0 0xe6740000 0 0x1000>;
@@ -367,7 +367,7 @@
status = "disabled";
};
- ipmmu_mp: mmu@ec680000 {
+ ipmmu_mp: iommu@ec680000 {
compatible = "renesas,ipmmu-r8a7744",
"renesas,ipmmu-vmsa";
reg = <0 0xec680000 0 0x1000>;
@@ -376,7 +376,7 @@
status = "disabled";
};
- ipmmu_mx: mmu@fe951000 {
+ ipmmu_mx: iommu@fe951000 {
compatible = "renesas,ipmmu-r8a7744",
"renesas,ipmmu-vmsa";
reg = <0 0xfe951000 0 0x1000>;
@@ -386,7 +386,7 @@
status = "disabled";
};
- ipmmu_gp: mmu@e62a0000 {
+ ipmmu_gp: iommu@e62a0000 {
compatible = "renesas,ipmmu-r8a7744",
"renesas,ipmmu-vmsa";
reg = <0 0xe62a0000 0 0x1000>;
diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi
index 7ab58d8bb740..b0d1fc24e97e 100644
--- a/arch/arm/boot/dts/r8a7745.dtsi
+++ b/arch/arm/boot/dts/r8a7745.dtsi
@@ -302,7 +302,7 @@
resets = <&cpg 407>;
};
- ipmmu_sy0: mmu@e6280000 {
+ ipmmu_sy0: iommu@e6280000 {
compatible = "renesas,ipmmu-r8a7745",
"renesas,ipmmu-vmsa";
reg = <0 0xe6280000 0 0x1000>;
@@ -312,7 +312,7 @@
status = "disabled";
};
- ipmmu_sy1: mmu@e6290000 {
+ ipmmu_sy1: iommu@e6290000 {
compatible = "renesas,ipmmu-r8a7745",
"renesas,ipmmu-vmsa";
reg = <0 0xe6290000 0 0x1000>;
@@ -321,7 +321,7 @@
status = "disabled";
};
- ipmmu_ds: mmu@e6740000 {
+ ipmmu_ds: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a7745",
"renesas,ipmmu-vmsa";
reg = <0 0xe6740000 0 0x1000>;
@@ -331,7 +331,7 @@
status = "disabled";
};
- ipmmu_mp: mmu@ec680000 {
+ ipmmu_mp: iommu@ec680000 {
compatible = "renesas,ipmmu-r8a7745",
"renesas,ipmmu-vmsa";
reg = <0 0xec680000 0 0x1000>;
@@ -340,7 +340,7 @@
status = "disabled";
};
- ipmmu_mx: mmu@fe951000 {
+ ipmmu_mx: iommu@fe951000 {
compatible = "renesas,ipmmu-r8a7745",
"renesas,ipmmu-vmsa";
reg = <0 0xfe951000 0 0x1000>;
@@ -350,7 +350,7 @@
status = "disabled";
};
- ipmmu_gp: mmu@e62a0000 {
+ ipmmu_gp: iommu@e62a0000 {
compatible = "renesas,ipmmu-r8a7745",
"renesas,ipmmu-vmsa";
reg = <0 0xe62a0000 0 0x1000>;
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index e5ef9fd4284a..166d5566229d 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -427,7 +427,7 @@
#thermal-sensor-cells = <0>;
};
- ipmmu_sy0: mmu@e6280000 {
+ ipmmu_sy0: iommu@e6280000 {
compatible = "renesas,ipmmu-r8a7790",
"renesas,ipmmu-vmsa";
reg = <0 0xe6280000 0 0x1000>;
@@ -437,7 +437,7 @@
status = "disabled";
};
- ipmmu_sy1: mmu@e6290000 {
+ ipmmu_sy1: iommu@e6290000 {
compatible = "renesas,ipmmu-r8a7790",
"renesas,ipmmu-vmsa";
reg = <0 0xe6290000 0 0x1000>;
@@ -446,7 +446,7 @@
status = "disabled";
};
- ipmmu_ds: mmu@e6740000 {
+ ipmmu_ds: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a7790",
"renesas,ipmmu-vmsa";
reg = <0 0xe6740000 0 0x1000>;
@@ -456,7 +456,7 @@
status = "disabled";
};
- ipmmu_mp: mmu@ec680000 {
+ ipmmu_mp: iommu@ec680000 {
compatible = "renesas,ipmmu-r8a7790",
"renesas,ipmmu-vmsa";
reg = <0 0xec680000 0 0x1000>;
@@ -465,7 +465,7 @@
status = "disabled";
};
- ipmmu_mx: mmu@fe951000 {
+ ipmmu_mx: iommu@fe951000 {
compatible = "renesas,ipmmu-r8a7790",
"renesas,ipmmu-vmsa";
reg = <0 0xfe951000 0 0x1000>;
@@ -475,7 +475,7 @@
status = "disabled";
};
- ipmmu_rt: mmu@ffc80000 {
+ ipmmu_rt: iommu@ffc80000 {
compatible = "renesas,ipmmu-r8a7790",
"renesas,ipmmu-vmsa";
reg = <0 0xffc80000 0 0x1000>;
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 6e5bd86731cd..225676fbe312 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -291,6 +291,17 @@
reg = <0 0xe6060000 0 0x250>;
};
+ tpu: pwm@e60f0000 {
+ compatible = "renesas,tpu-r8a7791", "renesas,tpu";
+ reg = <0 0xe60f0000 0 0x148>;
+ interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 304>;
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 304>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a7791-cpg-mssr";
reg = <0 0xe6150000 0 0x1000>;
@@ -350,7 +361,7 @@
#thermal-sensor-cells = <0>;
};
- ipmmu_sy0: mmu@e6280000 {
+ ipmmu_sy0: iommu@e6280000 {
compatible = "renesas,ipmmu-r8a7791",
"renesas,ipmmu-vmsa";
reg = <0 0xe6280000 0 0x1000>;
@@ -360,7 +371,7 @@
status = "disabled";
};
- ipmmu_sy1: mmu@e6290000 {
+ ipmmu_sy1: iommu@e6290000 {
compatible = "renesas,ipmmu-r8a7791",
"renesas,ipmmu-vmsa";
reg = <0 0xe6290000 0 0x1000>;
@@ -369,7 +380,7 @@
status = "disabled";
};
- ipmmu_ds: mmu@e6740000 {
+ ipmmu_ds: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a7791",
"renesas,ipmmu-vmsa";
reg = <0 0xe6740000 0 0x1000>;
@@ -379,7 +390,7 @@
status = "disabled";
};
- ipmmu_mp: mmu@ec680000 {
+ ipmmu_mp: iommu@ec680000 {
compatible = "renesas,ipmmu-r8a7791",
"renesas,ipmmu-vmsa";
reg = <0 0xec680000 0 0x1000>;
@@ -388,7 +399,7 @@
status = "disabled";
};
- ipmmu_mx: mmu@fe951000 {
+ ipmmu_mx: iommu@fe951000 {
compatible = "renesas,ipmmu-r8a7791",
"renesas,ipmmu-vmsa";
reg = <0 0xfe951000 0 0x1000>;
@@ -398,7 +409,7 @@
status = "disabled";
};
- ipmmu_rt: mmu@ffc80000 {
+ ipmmu_rt: iommu@ffc80000 {
compatible = "renesas,ipmmu-r8a7791",
"renesas,ipmmu-vmsa";
reg = <0 0xffc80000 0 0x1000>;
@@ -407,7 +418,7 @@
status = "disabled";
};
- ipmmu_gp: mmu@e62a0000 {
+ ipmmu_gp: iommu@e62a0000 {
compatible = "renesas,ipmmu-r8a7791",
"renesas,ipmmu-vmsa";
reg = <0 0xe62a0000 0 0x1000>;
@@ -1067,6 +1078,76 @@
status = "disabled";
};
+ pwm0: pwm@e6e30000 {
+ compatible = "renesas,pwm-r8a7791", "renesas,pwm-rcar";
+ reg = <0 0xe6e30000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@e6e31000 {
+ compatible = "renesas,pwm-r8a7791", "renesas,pwm-rcar";
+ reg = <0 0xe6e31000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@e6e32000 {
+ compatible = "renesas,pwm-r8a7791", "renesas,pwm-rcar";
+ reg = <0 0xe6e32000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@e6e33000 {
+ compatible = "renesas,pwm-r8a7791", "renesas,pwm-rcar";
+ reg = <0 0xe6e33000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@e6e34000 {
+ compatible = "renesas,pwm-r8a7791", "renesas,pwm-rcar";
+ reg = <0 0xe6e34000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm5: pwm@e6e35000 {
+ compatible = "renesas,pwm-r8a7791", "renesas,pwm-rcar";
+ reg = <0 0xe6e35000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm6: pwm@e6e36000 {
+ compatible = "renesas,pwm-r8a7791", "renesas,pwm-rcar";
+ reg = <0 0xe6e36000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
adc: adc@e6e54000 {
compatible = "renesas,r8a7791-gyroadc",
"renesas,rcar-gyroadc";
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index dadbda16161b..1b62a7e06b42 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -336,7 +336,7 @@
#thermal-sensor-cells = <0>;
};
- ipmmu_sy0: mmu@e6280000 {
+ ipmmu_sy0: iommu@e6280000 {
compatible = "renesas,ipmmu-r8a7793",
"renesas,ipmmu-vmsa";
reg = <0 0xe6280000 0 0x1000>;
@@ -346,7 +346,7 @@
status = "disabled";
};
- ipmmu_sy1: mmu@e6290000 {
+ ipmmu_sy1: iommu@e6290000 {
compatible = "renesas,ipmmu-r8a7793",
"renesas,ipmmu-vmsa";
reg = <0 0xe6290000 0 0x1000>;
@@ -355,7 +355,7 @@
status = "disabled";
};
- ipmmu_ds: mmu@e6740000 {
+ ipmmu_ds: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a7793",
"renesas,ipmmu-vmsa";
reg = <0 0xe6740000 0 0x1000>;
@@ -365,7 +365,7 @@
status = "disabled";
};
- ipmmu_mp: mmu@ec680000 {
+ ipmmu_mp: iommu@ec680000 {
compatible = "renesas,ipmmu-r8a7793",
"renesas,ipmmu-vmsa";
reg = <0 0xec680000 0 0x1000>;
@@ -374,7 +374,7 @@
status = "disabled";
};
- ipmmu_mx: mmu@fe951000 {
+ ipmmu_mx: iommu@fe951000 {
compatible = "renesas,ipmmu-r8a7793",
"renesas,ipmmu-vmsa";
reg = <0 0xfe951000 0 0x1000>;
@@ -384,7 +384,7 @@
status = "disabled";
};
- ipmmu_rt: mmu@ffc80000 {
+ ipmmu_rt: iommu@ffc80000 {
compatible = "renesas,ipmmu-r8a7793",
"renesas,ipmmu-vmsa";
reg = <0 0xffc80000 0 0x1000>;
@@ -393,7 +393,7 @@
status = "disabled";
};
- ipmmu_gp: mmu@e62a0000 {
+ ipmmu_gp: iommu@e62a0000 {
compatible = "renesas,ipmmu-r8a7793",
"renesas,ipmmu-vmsa";
reg = <0 0xe62a0000 0 0x1000>;
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 2c9e7a1ebfec..8d7f8798628a 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -290,7 +290,7 @@
resets = <&cpg 407>;
};
- ipmmu_sy0: mmu@e6280000 {
+ ipmmu_sy0: iommu@e6280000 {
compatible = "renesas,ipmmu-r8a7794",
"renesas,ipmmu-vmsa";
reg = <0 0xe6280000 0 0x1000>;
@@ -300,7 +300,7 @@
status = "disabled";
};
- ipmmu_sy1: mmu@e6290000 {
+ ipmmu_sy1: iommu@e6290000 {
compatible = "renesas,ipmmu-r8a7794",
"renesas,ipmmu-vmsa";
reg = <0 0xe6290000 0 0x1000>;
@@ -309,7 +309,7 @@
status = "disabled";
};
- ipmmu_ds: mmu@e6740000 {
+ ipmmu_ds: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a7794",
"renesas,ipmmu-vmsa";
reg = <0 0xe6740000 0 0x1000>;
@@ -319,7 +319,7 @@
status = "disabled";
};
- ipmmu_mp: mmu@ec680000 {
+ ipmmu_mp: iommu@ec680000 {
compatible = "renesas,ipmmu-r8a7794",
"renesas,ipmmu-vmsa";
reg = <0 0xec680000 0 0x1000>;
@@ -328,7 +328,7 @@
status = "disabled";
};
- ipmmu_mx: mmu@fe951000 {
+ ipmmu_mx: iommu@fe951000 {
compatible = "renesas,ipmmu-r8a7794",
"renesas,ipmmu-vmsa";
reg = <0 0xfe951000 0 0x1000>;
@@ -338,7 +338,7 @@
status = "disabled";
};
- ipmmu_gp: mmu@e62a0000 {
+ ipmmu_gp: iommu@e62a0000 {
compatible = "renesas,ipmmu-r8a7794",
"renesas,ipmmu-vmsa";
reg = <0 0xe62a0000 0 0x1000>;
diff --git a/arch/arm/boot/dts/rk3036-kylin.dts b/arch/arm/boot/dts/rk3036-kylin.dts
index 2ff9f152d29b..7154b827ea2f 100644
--- a/arch/arm/boot/dts/rk3036-kylin.dts
+++ b/arch/arm/boot/dts/rk3036-kylin.dts
@@ -16,7 +16,7 @@
leds: gpio-leds {
compatible = "gpio-leds";
- work {
+ work_led: led-0 {
gpios = <&gpio2 RK_PD6 GPIO_ACTIVE_HIGH>;
label = "kylin:red:led";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/rk3066a-mk808.dts b/arch/arm/boot/dts/rk3066a-mk808.dts
index 365eff621113..eed9e60cffa2 100644
--- a/arch/arm/boot/dts/rk3066a-mk808.dts
+++ b/arch/arm/boot/dts/rk3066a-mk808.dts
@@ -22,7 +22,7 @@
gpio-leds {
compatible = "gpio-leds";
- blue {
+ blue_led: led-0 {
label = "mk808:blue:power";
gpios = <&gpio0 RK_PA3 GPIO_ACTIVE_HIGH>;
default-state = "off";
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
index c9a7f5409960..b0fef82c0a71 100644
--- a/arch/arm/boot/dts/rk3188-radxarock.dts
+++ b/arch/arm/boot/dts/rk3188-radxarock.dts
@@ -33,19 +33,19 @@
gpio-leds {
compatible = "gpio-leds";
- green {
+ green_led: led-0 {
label = "rock:green:user1";
gpios = <&gpio0 RK_PB4 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- blue {
+ blue_led: led-1 {
label = "rock:blue:user2";
gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_LOW>;
default-state = "off";
};
- sleep {
+ sleep_led: led-2 {
label = "rock:red:power";
gpios = <&gpio0 RK_PB7 GPIO_ACTIVE_HIGH>;
default-state = "off";
diff --git a/arch/arm/boot/dts/rk3229-xms6.dts b/arch/arm/boot/dts/rk3229-xms6.dts
index 933ef69da32a..263393ac4fa6 100644
--- a/arch/arm/boot/dts/rk3229-xms6.dts
+++ b/arch/arm/boot/dts/rk3229-xms6.dts
@@ -33,12 +33,18 @@
power-led {
compatible = "gpio-leds";
- blue {
+ blue_led: led-0 {
gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
};
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpio2 26 GPIO_ACTIVE_LOW>,
+ <&gpio2 29 GPIO_ACTIVE_LOW>;
+ };
+
vcc_host: vcc-host-regulator {
compatible = "regulator-fixed";
enable-active-high;
@@ -131,7 +137,6 @@
&emmc {
cap-mmc-highspeed;
- disable-wp;
non-removable;
status = "okay";
};
@@ -202,6 +207,16 @@
status = "okay";
};
+&sdio {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ vqmmc-supply = <&vccio_1v8>;
+ status = "okay";
+};
+
&sdmmc {
cap-mmc-highspeed;
disable-wp;
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 5485a9918da6..b0fd92befdeb 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -615,6 +615,16 @@
status = "disabled";
};
+ rga: rga@20060000 {
+ compatible = "rockchip,rk3228-rga", "rockchip,rk3288-rga";
+ reg = <0x20060000 0x1000>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_RGA>, <&cru HCLK_RGA>, <&cru SCLK_RGA>;
+ clock-names = "aclk", "hclk", "sclk";
+ resets = <&cru SRST_RGA>, <&cru SRST_RGA_A>, <&cru SRST_RGA_H>;
+ reset-names = "core", "axi", "ahb";
+ };
+
iep_mmu: iommu@20070800 {
compatible = "rockchip,iommu";
reg = <0x20070800 0x100>;
diff --git a/arch/arm/boot/dts/rk3288-firefly-reload.dts b/arch/arm/boot/dts/rk3288-firefly-reload.dts
index 8c38bda21a7c..9a4a9749c405 100644
--- a/arch/arm/boot/dts/rk3288-firefly-reload.dts
+++ b/arch/arm/boot/dts/rk3288-firefly-reload.dts
@@ -45,20 +45,20 @@
leds {
compatible = "gpio-leds";
- power {
+ power_led: led-0 {
gpios = <&gpio8 RK_PA2 GPIO_ACTIVE_LOW>;
label = "firefly:blue:power";
pinctrl-names = "default";
- pinctrl-0 = <&power_led>;
+ pinctrl-0 = <&power_led_pin>;
panic-indicator;
};
- work {
+ work_led: led-1 {
gpios = <&gpio8 RK_PA1 GPIO_ACTIVE_LOW>;
label = "firefly:blue:user";
linux,default-trigger = "rc-feedback";
pinctrl-names = "default";
- pinctrl-0 = <&work_led>;
+ pinctrl-0 = <&work_led_pin>;
};
};
@@ -334,11 +334,11 @@
};
leds {
- power_led: power-led {
+ power_led_pin: power-led-pin {
rockchip,pins = <8 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
- work_led: work-led {
+ work_led_pin: work-led-pin {
rockchip,pins = <8 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi
index 5e0a19004e46..e5c4fd4ea67e 100644
--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
@@ -62,20 +62,20 @@
leds {
compatible = "gpio-leds";
- work {
+ work_led: led-0 {
gpios = <&gpio8 RK_PA1 GPIO_ACTIVE_LOW>;
label = "firefly:blue:user";
linux,default-trigger = "rc-feedback";
pinctrl-names = "default";
- pinctrl-0 = <&work_led>;
+ pinctrl-0 = <&work_led_pin>;
};
- power {
+ power_led: led-1 {
gpios = <&gpio8 RK_PA2 GPIO_ACTIVE_LOW>;
label = "firefly:green:power";
linux,default-trigger = "default-on";
pinctrl-names = "default";
- pinctrl-0 = <&power_led>;
+ pinctrl-0 = <&power_led_pin>;
};
};
@@ -429,11 +429,11 @@
};
leds {
- power_led: power-led {
+ power_led_pin: power-led-pin {
rockchip,pins = <8 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
- work_led: work-led {
+ work_led_pin: work-led-pin {
rockchip,pins = <8 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts
index c41d012c8850..213c9eb84f76 100644
--- a/arch/arm/boot/dts/rk3288-miqi.dts
+++ b/arch/arm/boot/dts/rk3288-miqi.dts
@@ -30,7 +30,7 @@
leds {
compatible = "gpio-leds";
- work {
+ work_led: led-0 {
gpios = <&gpio7 RK_PA2 GPIO_ACTIVE_HIGH>;
label = "miqi:green:user";
linux,default-trigger = "timer";
diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
index 77a47b9b756d..e43887c9635f 100644
--- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi
+++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
@@ -36,9 +36,9 @@
leds: user-leds {
compatible = "gpio-leds";
pinctrl-names = "default";
- pinctrl-0 = <&user_led>;
+ pinctrl-0 = <&user_led_pin>;
- user {
+ user_led: led-0 {
label = "green_led";
gpios = <&gpio7 2 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
@@ -372,7 +372,7 @@
};
leds {
- user_led: user-led {
+ user_led_pin: user-led-pin {
rockchip,pins = <7 RK_PA2 RK_FUNC_GPIO &pcfg_output_high>;
};
};
diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts
index cdcdc921ee09..3cca4d0f9b09 100644
--- a/arch/arm/boot/dts/rk3288-rock2-square.dts
+++ b/arch/arm/boot/dts/rk3288-rock2-square.dts
@@ -41,13 +41,13 @@
gpio-leds {
compatible = "gpio-leds";
- heartbeat {
+ heartbeat_led: led-0 {
gpios = <&gpio7 RK_PB7 GPIO_ACTIVE_LOW>;
label = "rock2:green:state1";
linux,default-trigger = "heartbeat";
};
- mmc {
+ mmc_led: led-1 {
gpios = <&gpio0 RK_PB3 GPIO_ACTIVE_LOW>;
label = "rock2:blue:state2";
linux,default-trigger = "mmc0";
diff --git a/arch/arm/boot/dts/rk3288-tinker.dtsi b/arch/arm/boot/dts/rk3288-tinker.dtsi
index acfaa12ec239..90e9be443fe6 100644
--- a/arch/arm/boot/dts/rk3288-tinker.dtsi
+++ b/arch/arm/boot/dts/rk3288-tinker.dtsi
@@ -46,17 +46,17 @@
gpio-leds {
compatible = "gpio-leds";
- act-led {
+ act_led: led-0 {
gpios = <&gpio1 RK_PD0 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "mmc0";
};
- heartbeat-led {
+ heartbeat_led: led-1 {
gpios = <&gpio1 RK_PD1 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
- pwr-led {
+ pwr_led: led-2 {
gpios = <&gpio0 RK_PA3 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "default-on";
};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 0cd88774db95..2e1edd85f04a 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -7,7 +7,6 @@
#include <dt-bindings/clock/rk3288-cru.h>
#include <dt-bindings/power/rk3288-power.h>
#include <dt-bindings/thermal/thermal.h>
-#include <dt-bindings/power/rk3288-power.h>
#include <dt-bindings/soc/rockchip,boot-mode.h>
/ {
diff --git a/arch/arm/boot/dts/rtd1195-horseradish.dts b/arch/arm/boot/dts/rtd1195-horseradish.dts
new file mode 100644
index 000000000000..9d06d3d34c74
--- /dev/null
+++ b/arch/arm/boot/dts/rtd1195-horseradish.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Copyright (c) 2019 Andreas Färber
+ */
+
+/dts-v1/;
+
+#include "rtd1195.dtsi"
+
+/ {
+ compatible = "realtek,horseradish", "realtek,rtd1195";
+ model = "Realtek Horseradish EVB";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@a800 {
+ device_type = "memory";
+ reg = <0x0000a800 0x17ff5800>, /* boot ROM to r-bus */
+ <0x18070000 0x00090000>, /* r-bus to NOR flash */
+ <0x19100000 0x26f00000>; /* NOR flash to 1 GiB */
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rtd1195-mele-x1000.dts b/arch/arm/boot/dts/rtd1195-mele-x1000.dts
new file mode 100644
index 000000000000..c7951b9a2c97
--- /dev/null
+++ b/arch/arm/boot/dts/rtd1195-mele-x1000.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Copyright (c) 2017-2019 Andreas Färber
+ */
+
+/dts-v1/;
+
+#include "rtd1195.dtsi"
+
+/ {
+ compatible = "mele,x1000", "realtek,rtd1195";
+ model = "MeLE X1000";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@a800 {
+ device_type = "memory";
+ reg = <0x0000a800 0x17ff5800>, /* boot ROM to r-bus */
+ <0x18070000 0x00090000>, /* r-bus to NOR flash */
+ <0x19100000 0x26f00000>; /* NOR flash to 1 GiB */
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/rtd1195.dtsi b/arch/arm/boot/dts/rtd1195.dtsi
new file mode 100644
index 000000000000..21897210d9d0
--- /dev/null
+++ b/arch/arm/boot/dts/rtd1195.dtsi
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Copyright (c) 2017-2019 Andreas Färber
+ */
+
+/memreserve/ 0x00000000 0x0000a800; /* boot code */
+/memreserve/ 0x0000a800 0x000f5800;
+/memreserve/ 0x17fff000 0x00001000;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/realtek,rtd1195.h>
+
+/ {
+ compatible = "realtek,rtd1195";
+ interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x0>;
+ clock-frequency = <1000000000>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x1>;
+ clock-frequency = <1000000000>;
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ rpc_comm: rpc@b000 {
+ reg = <0x0000b000 0x1000>;
+ };
+
+ audio@1b00000 {
+ reg = <0x01b00000 0x400000>;
+ };
+
+ rpc_ringbuf: rpc@1ffe000 {
+ reg = <0x01ffe000 0x4000>;
+ };
+
+ secure@10000000 {
+ reg = <0x10000000 0x100000>;
+ no-map;
+ };
+ };
+
+ arm-pmu {
+ compatible = "arm,cortex-a7-pmu";
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ clock-frequency = <27000000>;
+ };
+
+ osc27M: osc {
+ compatible = "fixed-clock";
+ clock-frequency = <27000000>;
+ #clock-cells = <0>;
+ clock-output-names = "osc27M";
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x00000000 0x0000a800>,
+ <0x18000000 0x18000000 0x00070000>,
+ <0x18100000 0x18100000 0x01000000>,
+ <0x80000000 0x80000000 0x80000000>;
+
+ rbus: bus@18000000 {
+ compatible = "simple-bus";
+ reg = <0x18000000 0x70000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x18000000 0x70000>;
+
+ crt: syscon@0 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x0 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x1000>;
+ };
+
+ iso: syscon@7000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x7000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x7000 0x1000>;
+ };
+
+ sb2: syscon@1a000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1a000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1a000 0x1000>;
+ };
+
+ misc: syscon@1b000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1b000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1b000 0x1000>;
+ };
+
+ scpu_wrapper: syscon@1d000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1d000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1d000 0x1000>;
+ };
+ };
+
+ gic: interrupt-controller@ff011000 {
+ compatible = "arm,cortex-a7-gic";
+ reg = <0xff011000 0x1000>,
+ <0xff012000 0x2000>,
+ <0xff014000 0x2000>,
+ <0xff016000 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+ };
+};
+
+&crt {
+ reset1: reset-controller@0 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x0 0x4>;
+ #reset-cells = <1>;
+ };
+
+ reset2: reset-controller@4 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x4 0x4>;
+ #reset-cells = <1>;
+ };
+
+ reset3: reset-controller@8 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x8 0x4>;
+ #reset-cells = <1>;
+ };
+};
+
+&iso {
+ iso_reset: reset-controller@88 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x88 0x4>;
+ #reset-cells = <1>;
+ };
+
+ wdt: watchdog@680 {
+ compatible = "realtek,rtd1295-watchdog";
+ reg = <0x680 0x100>;
+ clocks = <&osc27M>;
+ };
+
+ uart0: serial@800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x800 0x400>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ resets = <&iso_reset RTD1195_ISO_RSTN_UR0>;
+ clock-frequency = <27000000>;
+ status = "disabled";
+ };
+};
+
+&misc {
+ uart1: serial@200 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x200 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ resets = <&reset2 RTD1195_RSTN_UR1>;
+ clock-frequency = <27000000>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi
index 8ff70b856334..cf858029292e 100644
--- a/arch/arm/boot/dts/s5pv210-aries.dtsi
+++ b/arch/arm/boot/dts/s5pv210-aries.dtsi
@@ -11,9 +11,15 @@
/ {
compatible = "samsung,aries", "samsung,s5pv210";
- aliases {
+ aliases: aliases {
+ i2c4 = &i2c_sound;
+ i2c5 = &i2c_accel;
i2c6 = &i2c_pmic;
+ i2c7 = &i2c_musb;
i2c9 = &i2c_fuel;
+ i2c10 = &i2c_touchkey;
+ i2c11 = &i2c_prox;
+ i2c12 = &i2c_magnetometer;
};
memory@30000000 {
@@ -46,6 +52,21 @@
regulator-name = "vibrator-en";
enable-active-high;
gpio = <&gpj1 1 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctr-0 = <&vibrator_ena>;
+ };
+
+ touchkey_vdd: regulator-fixed-1 {
+ compatible = "regulator-fixed";
+ regulator-name = "VTOUCH_3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ gpio = <&gpj3 2 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchkey_vdd_ena>;
};
wifi_pwrseq: wifi-pwrseq {
@@ -57,7 +78,71 @@
power-off-delay-us = <500>;
};
- i2c_pmic: i2c-gpio-0 {
+ i2c_sound: i2c-gpio-0 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&mp05 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&mp05 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sound_i2c_pins>;
+
+ wm8994: wm8994@1a {
+ compatible = "wlf,wm8994";
+ reg = <0x1a>;
+
+ #sound-dai-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ clocks = <&clocks MOUT_CLKOUT>;
+ clock-names = "MCLK1";
+
+ AVDD2-supply = <&buck3_reg>;
+ DBVDD-supply = <&buck3_reg>;
+ CPVDD-supply = <&buck3_reg>;
+ SPKVDD1-supply = <&buck3_reg>;
+ SPKVDD2-supply = <&buck3_reg>;
+
+ wlf,gpio-cfg = <0xa101 0x8100 0x0100 0x0100 0x8100
+ 0xa101 0x0100 0x8100 0x0100 0x0100
+ 0x0100>;
+
+ wlf,ldo1ena = <&gpf3 4 GPIO_ACTIVE_HIGH>;
+ wlf,ldo2ena = <&gpf3 4 GPIO_ACTIVE_HIGH>;
+
+ wlf,lineout1-se;
+ wlf,lineout2-se;
+
+ assigned-clocks = <&clocks MOUT_CLKOUT>;
+ assigned-clock-rates = <0>;
+ assigned-clock-parents = <&xusbxti>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&codec_ldo>;
+ };
+ };
+
+ i2c_accel: i2c-gpio-1 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpj3 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpj3 7 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&accel_i2c_pins>;
+
+ status = "disabled";
+
+ /* bma023 accelerometer, no mainline binding */
+ };
+
+ i2c_pmic: i2c-gpio-2 {
compatible = "i2c-gpio";
sda-gpios = <&gpj4 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
scl-gpios = <&gpj4 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
@@ -65,6 +150,9 @@
#address-cells = <1>;
#size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_i2c_pins>;
+
pmic@66 {
compatible = "maxim,max8998";
reg = <0x66>;
@@ -81,6 +169,9 @@
max8998,pmic-buck2-dvs-gpio = <&gph0 5 GPIO_ACTIVE_HIGH>;
max8998,pmic-buck2-dvs-voltage = <1100000>, <1000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_dvs_pins &pmic_irq>;
+
regulators {
ldo2_reg: LDO2 {
regulator-name = "VALIVE_1.2V";
@@ -107,7 +198,6 @@
regulator-name = "VADC_3.3V";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- regulator-always-on;
regulator-state-mem {
regulator-off-in-suspend;
@@ -134,8 +224,6 @@
regulator-name = "VLCD_1.8V";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- /* Till we get panel driver */
- regulator-always-on;
regulator-state-mem {
regulator-off-in-suspend;
@@ -234,8 +322,6 @@
regulator-name = "VCC_3.0V_LCD";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
- /* Till we get panel driver */
- regulator-always-on;
regulator-state-mem {
regulator-off-in-suspend;
@@ -306,7 +392,29 @@
};
};
- i2c_fuel: i2c-gpio-1 {
+ i2c_musb: i2c-gpio-3 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpj3 4 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpj3 5 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&musb_i2c_pins>;
+
+ fsa9480: musb@25 {
+ compatible = "fcs,fsa9480";
+ reg = <0x25>;
+ interrupt-parent = <&gph2>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&musb_irq>;
+ };
+ };
+
+ i2c_fuel: i2c-gpio-4 {
compatible = "i2c-gpio";
sda-gpios = <&mp05 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
scl-gpios = <&mp05 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
@@ -314,6 +422,9 @@
#address-cells = <1>;
#size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&fg_i2c_pins>;
+
fuelgauge@36 {
compatible = "maxim,max17040";
interrupt-parent = <&vic0>;
@@ -322,6 +433,64 @@
};
};
+ i2c_touchkey: i2c-gpio-5 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpj3 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpj3 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchkey_i2c_pins>;
+
+ touchkey@20 {
+ compatible = "cypress,aries-touchkey";
+ reg = <0x20>;
+ vdd-supply = <&touchkey_vdd>;
+ vcc-supply = <&buck3_reg>;
+ linux,keycodes = <KEY_MENU KEY_BACK
+ KEY_HOMEPAGE KEY_SEARCH>;
+ interrupt-parent = <&gpj4>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchkey_irq>;
+ };
+ };
+
+ i2c_prox: i2c-gpio-6 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpg2 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpg0 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&prox_i2c_pins>;
+
+ status = "disabled";
+
+ /* Sharp gp2a prox/light sensor, incomplete mainline binding */
+ };
+
+ i2c_magnetometer: i2c-gpio-7 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpj0 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpj0 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&magnetometer_i2c_pins>;
+
+ status = "disabled";
+
+ /* Yamaha yas529 magnetometer, no mainline binding */
+ };
+
vibrator: pwm-vibrator {
compatible = "pwm-vibrator";
pwms = <&pwm 1 44642 0>;
@@ -337,6 +506,45 @@
offset = <0x681c>; /* PS_HOLD_CONTROL */
value = <0x5200>;
};
+
+ spi_lcd: spi-gpio-0 {
+ compatible = "spi-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sck-gpios = <&mp04 1 GPIO_ACTIVE_HIGH>;
+ mosi-gpios = <&mp04 3 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&mp01 1 GPIO_ACTIVE_HIGH>;
+ num-chipselects = <1>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcd_spi_pins>;
+
+ panel@0 {
+ compatible = "samsung,s6e63m0";
+ reg = <0>;
+ reset-gpios = <&mp05 5 GPIO_ACTIVE_LOW>;
+ vdd3-supply = <&ldo7_reg>;
+ vci-supply = <&ldo17_reg>;
+ spi-cs-high;
+ spi-max-frequency = <1200000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_rst>;
+
+ port {
+ lcd_ep: endpoint {
+ remote-endpoint = <&fimd_ep>;
+ };
+ };
+ };
+ };
+};
+
+&adc {
+ vdd-supply = <&ldo4_reg>;
+
+ status = "okay";
};
&fimd {
@@ -347,18 +555,13 @@
samsung,invert-vden;
samsung,invert-vclk;
- display-timings {
- timing-0 {
- /* 480x800@60Hz */
- clock-frequency = <25628040>;
- hactive = <480>;
- vactive = <800>;
- hfront-porch = <16>;
- hback-porch = <16>;
- hsync-len = <2>;
- vfront-porch = <28>;
- vback-porch = <1>;
- vsync-len = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@3 {
+ reg = <3>;
+ fimd_ep: endpoint {
+ remote-endpoint = <&lcd_ep>;
};
};
};
@@ -399,12 +602,39 @@
samsung,pin-val = <1>;
};
+ codec_ldo: codec-ldo {
+ samsung,pins = "gpf3-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ };
+
+ prox_i2c_pins: gp2a-i2c-pins {
+ samsung,pins = "gpg0-2", "gpg2-2";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
wlan_gpio_rst: wlan-gpio-rst {
samsung,pins = "gpg1-2";
samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
+ pmic_dvs_pins: pmic-dvs-pins {
+ samsung,pins = "gph0-3", "gph0-4", "gph0-5";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ samsung,pin-val = <0>;
+ };
+
+ pmic_irq: pmic-irq {
+ samsung,pins = "gph0-7";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_F>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
wifi_host_wake: wifi-host-wake {
samsung,pins = "gph2-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
@@ -419,6 +649,13 @@
samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
+ musb_irq: musq-irq {
+ samsung,pins = "gph2-7";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
tf_detect: tf-detect {
samsung,pins = "gph3-4";
samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
@@ -432,12 +669,85 @@
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
};
+ magnetometer_i2c_pins: yas529-i2c-pins {
+ samsung,pins = "gpj0-0", "gpj0-1";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
ts_irq: ts-irq {
samsung,pins = "gpj0-5";
samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
+
+ vibrator_ena: vibrator-ena {
+ samsung,pins = "gpj1-1";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ touchkey_i2c_pins: touchkey-i2c-pins {
+ samsung,pins = "gpj3-0", "gpj3-1";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ touchkey_vdd_ena: touchkey-vdd-ena {
+ samsung,pins = "gpj3-2";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ musb_i2c_pins: musb-i2c-pins {
+ samsung,pins = "gpj3-4", "gpj3-5";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ accel_i2c_pins: accel-i2c-pins {
+ samsung,pins = "gpj3-6", "gpj3-7";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ pmic_i2c_pins: pmic-i2c-pins {
+ samsung,pins = "gpj4-0", "gpj4-3";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ touchkey_irq: touchkey-irq {
+ samsung,pins = "gpj4-1";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ lcd_spi_pins: spi-lcd-pins {
+ samsung,pins = "mp01-1", "mp04-1", "mp04-3";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ fg_i2c_pins: fg-i2c-pins {
+ samsung,pins = "mp05-0", "mp05-1";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ sound_i2c_pins: sound-i2c-pins {
+ samsung,pins = "mp05-2", "mp05-3";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ panel_rst: panel-rst {
+ samsung,pins = "mp05-5";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
};
&pwm {
@@ -454,11 +764,16 @@
pinctrl-names = "default";
cap-sd-highspeed;
cap-mmc-highspeed;
+ keep-power-in-suspend;
mmc-pwrseq = <&wifi_pwrseq>;
non-removable;
status = "okay";
+ assigned-clocks = <&clocks MOUT_MMC1>, <&clocks SCLK_MMC1>;
+ assigned-clock-rates = <0>, <50000000>;
+ assigned-clock-parents = <&clocks MOUT_MPLL>;
+
wlan@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
@@ -475,6 +790,10 @@
pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_bus4 &tf_detect>;
pinctrl-names = "default";
status = "okay";
+
+ assigned-clocks = <&clocks MOUT_MMC2>, <&clocks SCLK_MMC2>;
+ assigned-clock-rates = <0>, <50000000>;
+ assigned-clock-parents = <&clocks MOUT_MPLL>;
};
&uart0 {
diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
index 07a8d9bbe5b8..5e1b81823a8d 100644
--- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
+++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
@@ -36,3 +36,252 @@
};
};
};
+
+&pinctrl0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sleep_cfg>;
+
+ /* Based on vendor kernel v2.6.35.7 */
+ sleep_cfg: sleep-cfg {
+ PIN_SLP(gpa0-0, PREV, NONE);
+ PIN_SLP(gpa0-1, PREV, NONE);
+ PIN_SLP(gpa0-2, PREV, NONE);
+ PIN_SLP(gpa0-3, OUT1, NONE);
+ PIN_SLP(gpa0-4, PREV, NONE);
+ PIN_SLP(gpa0-5, PREV, NONE);
+ PIN_SLP(gpa0-6, PREV, NONE);
+ PIN_SLP(gpa0-7, PREV, NONE);
+
+ PIN_SLP(gpa1-0, INPUT, DOWN);
+ PIN_SLP(gpa1-1, OUT0, NONE);
+ PIN_SLP(gpa1-2, INPUT, DOWN);
+ PIN_SLP(gpa1-3, OUT0, NONE);
+
+ PIN_SLP(gpb-0, OUT0, NONE);
+ PIN_SLP(gpb-1, OUT1, NONE);
+ PIN_SLP(gpb-2, OUT0, NONE);
+ PIN_SLP(gpb-3, PREV, NONE);
+ PIN_SLP(gpb-4, INPUT, NONE);
+ PIN_SLP(gpb-5, PREV, NONE);
+ PIN_SLP(gpb-6, INPUT, DOWN);
+ PIN_SLP(gpb-7, OUT0, NONE);
+
+ PIN_SLP(gpc0-0, OUT0, NONE);
+ PIN_SLP(gpc0-1, INPUT, DOWN);
+ PIN_SLP(gpc0-2, OUT0, NONE);
+ PIN_SLP(gpc0-3, INPUT, DOWN);
+ PIN_SLP(gpc0-4, OUT0, NONE);
+
+ PIN_SLP(gpc1-0, INPUT, DOWN);
+ PIN_SLP(gpc1-1, INPUT, DOWN);
+ PIN_SLP(gpc1-2, INPUT, DOWN);
+ PIN_SLP(gpc1-3, INPUT, DOWN);
+ PIN_SLP(gpc1-4, INPUT, DOWN);
+
+ PIN_SLP(gpd0-0, INPUT, DOWN);
+ PIN_SLP(gpd0-1, OUT0, NONE);
+ PIN_SLP(gpd0-2, INPUT, DOWN);
+ PIN_SLP(gpd0-3, INPUT, DOWN);
+
+ PIN_SLP(gpd1-0, INPUT, NONE);
+ PIN_SLP(gpd1-1, INPUT, NONE);
+ PIN_SLP(gpd1-2, INPUT, DOWN);
+ PIN_SLP(gpd1-3, INPUT, DOWN);
+ PIN_SLP(gpd1-4, INPUT, DOWN);
+ PIN_SLP(gpd1-5, INPUT, DOWN);
+
+ PIN_SLP(gpe0-0, INPUT, DOWN);
+ PIN_SLP(gpe0-1, INPUT, DOWN);
+ PIN_SLP(gpe0-2, INPUT, DOWN);
+ PIN_SLP(gpe0-3, INPUT, DOWN);
+ PIN_SLP(gpe0-4, INPUT, DOWN);
+ PIN_SLP(gpe0-5, INPUT, DOWN);
+ PIN_SLP(gpe0-6, INPUT, DOWN);
+ PIN_SLP(gpe0-7, INPUT, DOWN);
+
+ PIN_SLP(gpe1-0, INPUT, DOWN);
+ PIN_SLP(gpe1-1, INPUT, DOWN);
+ PIN_SLP(gpe1-2, INPUT, DOWN);
+ PIN_SLP(gpe1-3, OUT0, NONE);
+ PIN_SLP(gpe1-4, INPUT, DOWN);
+
+ PIN_SLP(gpf0-0, OUT0, NONE);
+ PIN_SLP(gpf0-1, OUT0, NONE);
+ PIN_SLP(gpf0-2, OUT0, NONE);
+ PIN_SLP(gpf0-3, OUT0, NONE);
+ PIN_SLP(gpf0-4, OUT0, NONE);
+ PIN_SLP(gpf0-5, OUT0, NONE);
+ PIN_SLP(gpf0-6, OUT0, NONE);
+ PIN_SLP(gpf0-7, OUT0, NONE);
+
+ PIN_SLP(gpf1-0, OUT0, NONE);
+ PIN_SLP(gpf1-1, OUT0, NONE);
+ PIN_SLP(gpf1-2, OUT0, NONE);
+ PIN_SLP(gpf1-3, OUT0, NONE);
+ PIN_SLP(gpf1-4, OUT0, NONE);
+ PIN_SLP(gpf1-5, OUT0, NONE);
+ PIN_SLP(gpf1-6, OUT0, NONE);
+ PIN_SLP(gpf1-7, OUT0, NONE);
+
+ PIN_SLP(gpf2-0, OUT0, NONE);
+ PIN_SLP(gpf2-1, OUT0, NONE);
+ PIN_SLP(gpf2-2, OUT0, NONE);
+ PIN_SLP(gpf2-3, OUT0, NONE);
+ PIN_SLP(gpf2-4, OUT0, NONE);
+ PIN_SLP(gpf2-5, OUT0, NONE);
+ PIN_SLP(gpf2-6, OUT0, NONE);
+ PIN_SLP(gpf2-7, OUT0, NONE);
+
+ PIN_SLP(gpf3-0, OUT0, NONE);
+ PIN_SLP(gpf3-1, OUT0, NONE);
+ PIN_SLP(gpf3-2, OUT0, NONE);
+ PIN_SLP(gpf3-3, OUT0, NONE);
+ PIN_SLP(gpf3-4, PREV, NONE);
+ PIN_SLP(gpf3-5, INPUT, DOWN);
+
+ PIN_SLP(gpg0-0, INPUT, DOWN);
+ PIN_SLP(gpg0-1, INPUT, DOWN);
+ PIN_SLP(gpg0-2, INPUT, NONE);
+ PIN_SLP(gpg0-3, INPUT, DOWN);
+ PIN_SLP(gpg0-4, INPUT, DOWN);
+ PIN_SLP(gpg0-5, INPUT, DOWN);
+ PIN_SLP(gpg0-6, INPUT, DOWN);
+
+ PIN_SLP(gpg1-0, OUT0, NONE);
+ PIN_SLP(gpg1-1, OUT1, NONE);
+ PIN_SLP(gpg1-2, PREV, NONE);
+ PIN_SLP(gpg1-3, OUT1, NONE);
+ PIN_SLP(gpg1-4, OUT1, NONE);
+ PIN_SLP(gpg1-5, OUT1, NONE);
+ PIN_SLP(gpg1-6, OUT1, NONE);
+
+ PIN_SLP(gpg2-0, OUT0, NONE);
+ PIN_SLP(gpg2-1, OUT0, NONE);
+ PIN_SLP(gpg2-2, INPUT, NONE);
+ PIN_SLP(gpg2-3, OUT0, NONE);
+ PIN_SLP(gpg2-4, OUT0, NONE);
+ PIN_SLP(gpg2-5, OUT0, NONE);
+ PIN_SLP(gpg2-6, OUT0, NONE);
+
+ PIN_SLP(gpg3-0, PREV, UP);
+ PIN_SLP(gpg3-1, PREV, UP);
+ PIN_SLP(gpg3-2, INPUT, NONE);
+ PIN_SLP(gpg3-3, INPUT, DOWN);
+ PIN_SLP(gpg3-4, OUT0, NONE);
+ PIN_SLP(gpg3-5, OUT0, NONE);
+ PIN_SLP(gpg3-6, INPUT, DOWN);
+
+ PIN_SLP(gpi-0, PREV, NONE);
+ PIN_SLP(gpi-1, INPUT, DOWN);
+ PIN_SLP(gpi-2, PREV, NONE);
+ PIN_SLP(gpi-3, PREV, NONE);
+ PIN_SLP(gpi-4, PREV, NONE);
+ PIN_SLP(gpi-5, INPUT, DOWN);
+ PIN_SLP(gpi-6, INPUT, DOWN);
+
+ PIN_SLP(gpj0-0, INPUT, NONE);
+ PIN_SLP(gpj0-1, INPUT, NONE);
+ PIN_SLP(gpj0-2, INPUT, NONE);
+ PIN_SLP(gpj0-3, INPUT, NONE);
+ PIN_SLP(gpj0-4, INPUT, NONE);
+ PIN_SLP(gpj0-5, INPUT, DOWN);
+ PIN_SLP(gpj0-6, OUT0, NONE);
+ PIN_SLP(gpj0-7, INPUT, NONE);
+
+ PIN_SLP(gpj1-0, OUT1, NONE);
+ PIN_SLP(gpj1-1, OUT0, NONE);
+ PIN_SLP(gpj1-2, INPUT, DOWN);
+ PIN_SLP(gpj1-3, PREV, NONE);
+ PIN_SLP(gpj1-4, PREV, NONE);
+ PIN_SLP(gpj1-5, OUT0, NONE);
+
+ PIN_SLP(gpj2-0, INPUT, DOWN);
+ PIN_SLP(gpj2-1, INPUT, DOWN);
+ PIN_SLP(gpj2-2, OUT0, NONE);
+ PIN_SLP(gpj2-3, INPUT, DOWN);
+ PIN_SLP(gpj2-4, INPUT, DOWN);
+ PIN_SLP(gpj2-5, PREV, NONE);
+ PIN_SLP(gpj2-6, PREV, NONE);
+ PIN_SLP(gpj2-7, INPUT, DOWN);
+
+ PIN_SLP(gpj3-0, INPUT, NONE);
+ PIN_SLP(gpj3-1, INPUT, NONE);
+ PIN_SLP(gpj3-2, OUT0, NONE);
+ PIN_SLP(gpj3-3, INPUT, DOWN);
+ PIN_SLP(gpj3-4, INPUT, NONE);
+ PIN_SLP(gpj3-5, INPUT, NONE);
+ PIN_SLP(gpj3-6, INPUT, NONE);
+ PIN_SLP(gpj3-7, INPUT, NONE);
+
+ PIN_SLP(gpj4-0, INPUT, NONE);
+ PIN_SLP(gpj4-1, INPUT, DOWN);
+ PIN_SLP(gpj4-2, PREV, NONE);
+ PIN_SLP(gpj4-3, INPUT, NONE);
+ PIN_SLP(gpj4-4, INPUT, DOWN);
+
+ PIN_SLP(mp01-0, OUT1, NONE);
+ PIN_SLP(mp01-1, OUT0, NONE);
+ PIN_SLP(mp01-2, INPUT, DOWN);
+ PIN_SLP(mp01-3, INPUT, DOWN);
+ PIN_SLP(mp01-4, OUT1, NONE);
+ PIN_SLP(mp01-5, INPUT, DOWN);
+ PIN_SLP(mp01-6, INPUT, DOWN);
+ PIN_SLP(mp01-7, INPUT, DOWN);
+
+ PIN_SLP(mp02-0, INPUT, DOWN);
+ PIN_SLP(mp02-1, INPUT, DOWN);
+ PIN_SLP(mp02-2, INPUT, NONE);
+ PIN_SLP(mp02-3, INPUT, DOWN);
+
+ PIN_SLP(mp03-0, INPUT, DOWN);
+ PIN_SLP(mp03-1, INPUT, DOWN);
+ PIN_SLP(mp03-2, OUT1, NONE);
+ PIN_SLP(mp03-3, OUT0, NONE);
+ PIN_SLP(mp03-4, INPUT, NONE);
+ PIN_SLP(mp03-5, OUT0, NONE);
+ PIN_SLP(mp03-6, INPUT, DOWN);
+ PIN_SLP(mp03-7, INPUT, DOWN);
+
+ PIN_SLP(mp04-0, INPUT, DOWN);
+ PIN_SLP(mp04-1, OUT0, NONE);
+ PIN_SLP(mp04-2, INPUT, DOWN);
+ PIN_SLP(mp04-3, OUT0, NONE);
+ PIN_SLP(mp04-4, INPUT, DOWN);
+ PIN_SLP(mp04-5, INPUT, DOWN);
+ PIN_SLP(mp04-6, OUT0, NONE);
+ PIN_SLP(mp04-7, INPUT, DOWN);
+
+ PIN_SLP(mp05-0, INPUT, NONE);
+ PIN_SLP(mp05-1, INPUT, NONE);
+ PIN_SLP(mp05-2, INPUT, NONE);
+ PIN_SLP(mp05-3, INPUT, NONE);
+ PIN_SLP(mp05-4, INPUT, DOWN);
+ PIN_SLP(mp05-5, OUT0, NONE);
+ PIN_SLP(mp05-6, INPUT, DOWN);
+ PIN_SLP(mp05-7, PREV, NONE);
+
+ PIN_SLP(mp06-0, INPUT, DOWN);
+ PIN_SLP(mp06-1, INPUT, DOWN);
+ PIN_SLP(mp06-2, INPUT, DOWN);
+ PIN_SLP(mp06-3, INPUT, DOWN);
+ PIN_SLP(mp06-4, INPUT, DOWN);
+ PIN_SLP(mp06-5, INPUT, DOWN);
+ PIN_SLP(mp06-6, INPUT, DOWN);
+ PIN_SLP(mp06-7, INPUT, DOWN);
+
+ PIN_SLP(mp07-0, INPUT, DOWN);
+ PIN_SLP(mp07-1, INPUT, DOWN);
+ PIN_SLP(mp07-2, INPUT, DOWN);
+ PIN_SLP(mp07-3, INPUT, DOWN);
+ PIN_SLP(mp07-4, INPUT, DOWN);
+ PIN_SLP(mp07-5, INPUT, DOWN);
+ PIN_SLP(mp07-6, INPUT, DOWN);
+ PIN_SLP(mp07-7, INPUT, DOWN);
+ };
+};
+
+&wm8994 {
+ /* GPIO3 (BCLK2) and GPIO4 (LRCLK2) as outputs */
+ wlf,gpio-cfg = <0xa101 0x8100 0x8100 0x8100 0x8100 0xa101
+ 0x0100 0x8100 0x0100 0x0100 0x0100>;
+};
diff --git a/arch/arm/boot/dts/s5pv210-galaxys.dts b/arch/arm/boot/dts/s5pv210-galaxys.dts
index cf161bbfbacf..5d10dd67eacc 100644
--- a/arch/arm/boot/dts/s5pv210-galaxys.dts
+++ b/arch/arm/boot/dts/s5pv210-galaxys.dts
@@ -49,15 +49,303 @@
wakeup-source;
};
};
+
+ i2c_fmradio: i2c-gpio-8 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpd1 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpd1 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&fm_i2c_pins>;
+
+ fmradio@10 {
+ compatible = "silabs,si470x";
+ reg = <0x10>;
+ interrupt-parent = <&gpj2>;
+ interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpj2 5 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&fm_irq &fm_rst>;
+ };
+ };
+};
+
+&aliases {
+ i2c8 = &i2c_fmradio;
};
&pinctrl0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&sleep_cfg>;
+
+ fm_i2c_pins: fm-i2c-pins {
+ samsung,pins = "gpd1-2", "gpd1-3";
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ fm_irq: fm-irq {
+ samsung,pins = "gpj2-4";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
+ fm_rst: fm-rst {
+ samsung,pins = "gpj2-5";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
+ samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+ };
+
massmemory_en: massmemory-en {
samsung,pins = "gpj2-7";
samsung,pin-function = <EXYNOS_PIN_FUNC_OUTPUT>;
samsung,pin-pud = <S3C64XX_PIN_PULL_NONE>;
samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
+
+ /* Based on CyanogenMod 3.0.101 kernel */
+ sleep_cfg: sleep-cfg {
+ PIN_SLP(gpa0-0, PREV, NONE);
+ PIN_SLP(gpa0-1, PREV, NONE);
+ PIN_SLP(gpa0-2, PREV, NONE);
+ PIN_SLP(gpa0-3, OUT1, NONE);
+ PIN_SLP(gpa0-4, INPUT, DOWN);
+ PIN_SLP(gpa0-5, OUT0, NONE);
+ PIN_SLP(gpa0-6, INPUT, DOWN);
+ PIN_SLP(gpa0-7, OUT1, NONE);
+
+ PIN_SLP(gpa1-0, INPUT, DOWN);
+ PIN_SLP(gpa1-1, OUT0, NONE);
+ PIN_SLP(gpa1-2, INPUT, NONE);
+ PIN_SLP(gpa1-3, OUT0, NONE);
+
+ PIN_SLP(gpb-0, OUT0, NONE);
+ PIN_SLP(gpb-1, OUT1, NONE);
+ PIN_SLP(gpb-2, OUT0, NONE);
+ PIN_SLP(gpb-3, PREV, NONE);
+ PIN_SLP(gpb-4, INPUT, NONE);
+ PIN_SLP(gpb-5, PREV, NONE);
+ PIN_SLP(gpb-6, INPUT, DOWN);
+ PIN_SLP(gpb-7, OUT0, NONE);
+
+ PIN_SLP(gpc0-0, OUT0, NONE);
+ PIN_SLP(gpc0-1, INPUT, DOWN);
+ PIN_SLP(gpc0-2, OUT0, NONE);
+ PIN_SLP(gpc0-3, INPUT, NONE);
+ PIN_SLP(gpc0-4, OUT0, NONE);
+
+ PIN_SLP(gpc1-0, INPUT, DOWN);
+ PIN_SLP(gpc1-1, INPUT, DOWN);
+ PIN_SLP(gpc1-2, INPUT, DOWN);
+ PIN_SLP(gpc1-3, INPUT, DOWN);
+ PIN_SLP(gpc1-4, INPUT, DOWN);
+
+ PIN_SLP(gpd0-0, INPUT, DOWN);
+ PIN_SLP(gpd0-1, OUT0, NONE);
+ PIN_SLP(gpd0-2, INPUT, DOWN);
+ PIN_SLP(gpd0-3, INPUT, DOWN);
+
+ PIN_SLP(gpd1-0, INPUT, NONE);
+ PIN_SLP(gpd1-1, INPUT, NONE);
+ PIN_SLP(gpd1-2, INPUT, NONE);
+ PIN_SLP(gpd1-3, INPUT, NONE);
+ PIN_SLP(gpd1-4, INPUT, DOWN);
+ PIN_SLP(gpd1-5, INPUT, DOWN);
+
+ PIN_SLP(gpe0-0, INPUT, DOWN);
+ PIN_SLP(gpe0-1, INPUT, DOWN);
+ PIN_SLP(gpe0-2, INPUT, DOWN);
+ PIN_SLP(gpe0-3, INPUT, DOWN);
+ PIN_SLP(gpe0-4, INPUT, DOWN);
+ PIN_SLP(gpe0-5, INPUT, DOWN);
+ PIN_SLP(gpe0-6, INPUT, DOWN);
+ PIN_SLP(gpe0-7, INPUT, DOWN);
+
+ PIN_SLP(gpe1-0, INPUT, DOWN);
+ PIN_SLP(gpe1-1, INPUT, DOWN);
+ PIN_SLP(gpe1-2, INPUT, DOWN);
+ PIN_SLP(gpe1-3, OUT0, NONE);
+ PIN_SLP(gpe1-4, INPUT, DOWN);
+
+ PIN_SLP(gpf0-0, OUT0, NONE);
+ PIN_SLP(gpf0-1, OUT0, NONE);
+ PIN_SLP(gpf0-2, OUT0, NONE);
+ PIN_SLP(gpf0-3, OUT0, NONE);
+ PIN_SLP(gpf0-4, OUT0, NONE);
+ PIN_SLP(gpf0-5, OUT0, NONE);
+ PIN_SLP(gpf0-6, OUT0, NONE);
+ PIN_SLP(gpf0-7, OUT0, NONE);
+
+ PIN_SLP(gpf1-0, OUT0, NONE);
+ PIN_SLP(gpf1-1, OUT0, NONE);
+ PIN_SLP(gpf1-2, OUT0, NONE);
+ PIN_SLP(gpf1-3, OUT0, NONE);
+ PIN_SLP(gpf1-4, OUT0, NONE);
+ PIN_SLP(gpf1-5, OUT0, NONE);
+ PIN_SLP(gpf1-6, OUT0, NONE);
+ PIN_SLP(gpf1-7, OUT0, NONE);
+
+ PIN_SLP(gpf2-0, OUT0, NONE);
+ PIN_SLP(gpf2-1, OUT0, NONE);
+ PIN_SLP(gpf2-2, OUT0, NONE);
+ PIN_SLP(gpf2-3, OUT0, NONE);
+ PIN_SLP(gpf2-4, OUT0, NONE);
+ PIN_SLP(gpf2-5, OUT0, NONE);
+ PIN_SLP(gpf2-6, OUT0, NONE);
+ PIN_SLP(gpf2-7, OUT0, NONE);
+
+ PIN_SLP(gpf3-0, OUT0, NONE);
+ PIN_SLP(gpf3-1, OUT0, NONE);
+ PIN_SLP(gpf3-2, OUT0, NONE);
+ PIN_SLP(gpf3-3, OUT0, NONE);
+ PIN_SLP(gpf3-4, PREV, NONE);
+ PIN_SLP(gpf3-5, INPUT, DOWN);
+
+ PIN_SLP(gpg0-0, OUT0, NONE);
+ PIN_SLP(gpg0-1, INPUT, NONE);
+ PIN_SLP(gpg0-2, INPUT, NONE);
+ PIN_SLP(gpg0-3, INPUT, NONE);
+ PIN_SLP(gpg0-4, INPUT, NONE);
+ PIN_SLP(gpg0-5, INPUT, NONE);
+ PIN_SLP(gpg0-6, INPUT, NONE);
+
+ PIN_SLP(gpg1-0, OUT0, NONE);
+ PIN_SLP(gpg1-1, OUT1, NONE);
+ PIN_SLP(gpg1-2, PREV, NONE);
+ PIN_SLP(gpg1-3, OUT1, NONE);
+ PIN_SLP(gpg1-4, OUT1, NONE);
+ PIN_SLP(gpg1-5, OUT1, NONE);
+ PIN_SLP(gpg1-6, OUT1, NONE);
+
+ PIN_SLP(gpg2-0, OUT0, NONE);
+ PIN_SLP(gpg2-1, OUT0, NONE);
+ PIN_SLP(gpg2-2, INPUT, NONE);
+ PIN_SLP(gpg2-3, OUT0, NONE);
+ PIN_SLP(gpg2-4, OUT0, NONE);
+ PIN_SLP(gpg2-5, OUT0, NONE);
+ PIN_SLP(gpg2-6, OUT0, NONE);
+
+ PIN_SLP(gpg3-0, OUT1, NONE);
+ PIN_SLP(gpg3-1, OUT0, NONE);
+ PIN_SLP(gpg3-2, INPUT, NONE);
+ PIN_SLP(gpg3-3, INPUT, DOWN);
+ PIN_SLP(gpg3-4, OUT0, NONE);
+ PIN_SLP(gpg3-5, OUT0, NONE);
+ PIN_SLP(gpg3-6, INPUT, DOWN);
+
+ PIN_SLP(gpi-0, PREV, NONE);
+ PIN_SLP(gpi-1, INPUT, DOWN);
+ PIN_SLP(gpi-2, PREV, NONE);
+ PIN_SLP(gpi-3, PREV, NONE);
+ PIN_SLP(gpi-4, PREV, NONE);
+ PIN_SLP(gpi-5, INPUT, DOWN);
+ PIN_SLP(gpi-6, INPUT, DOWN);
+
+ PIN_SLP(gpj0-0, INPUT, NONE);
+ PIN_SLP(gpj0-1, INPUT, NONE);
+ PIN_SLP(gpj0-2, INPUT, NONE);
+ PIN_SLP(gpj0-3, INPUT, NONE);
+ PIN_SLP(gpj0-4, INPUT, NONE);
+ PIN_SLP(gpj0-5, INPUT, DOWN);
+ PIN_SLP(gpj0-6, OUT0, NONE);
+ PIN_SLP(gpj0-7, INPUT, NONE);
+
+ PIN_SLP(gpj1-0, INPUT, DOWN);
+ PIN_SLP(gpj1-1, OUT0, NONE);
+ PIN_SLP(gpj1-2, INPUT, DOWN);
+ PIN_SLP(gpj1-3, PREV, NONE);
+ PIN_SLP(gpj1-4, PREV, NONE);
+ PIN_SLP(gpj1-5, OUT0, NONE);
+
+ PIN_SLP(gpj2-0, INPUT, DOWN);
+ PIN_SLP(gpj2-1, INPUT, DOWN);
+ PIN_SLP(gpj2-2, OUT0, NONE);
+ PIN_SLP(gpj2-3, INPUT, DOWN);
+ PIN_SLP(gpj2-4, INPUT, UP);
+ PIN_SLP(gpj2-5, PREV, NONE);
+ PIN_SLP(gpj2-6, PREV, NONE);
+ PIN_SLP(gpj2-7, OUT1, NONE);
+
+ PIN_SLP(gpj3-0, INPUT, NONE);
+ PIN_SLP(gpj3-1, INPUT, NONE);
+ PIN_SLP(gpj3-2, OUT0, NONE);
+ PIN_SLP(gpj3-3, INPUT, DOWN);
+ PIN_SLP(gpj3-4, INPUT, NONE);
+ PIN_SLP(gpj3-5, INPUT, NONE);
+ PIN_SLP(gpj3-6, INPUT, NONE);
+ PIN_SLP(gpj3-7, INPUT, NONE);
+
+ PIN_SLP(gpj4-0, INPUT, NONE);
+ PIN_SLP(gpj4-1, INPUT, DOWN);
+ PIN_SLP(gpj4-2, PREV, NONE);
+ PIN_SLP(gpj4-3, INPUT, NONE);
+ PIN_SLP(gpj4-4, INPUT, DOWN);
+
+ PIN_SLP(mp01-0, INPUT, DOWN);
+ PIN_SLP(mp01-1, OUT0, NONE);
+ PIN_SLP(mp01-2, INPUT, DOWN);
+ PIN_SLP(mp01-3, INPUT, DOWN);
+ PIN_SLP(mp01-4, OUT1, NONE);
+ PIN_SLP(mp01-5, INPUT, DOWN);
+ PIN_SLP(mp01-6, INPUT, DOWN);
+ PIN_SLP(mp01-7, INPUT, DOWN);
+
+ PIN_SLP(mp02-0, INPUT, DOWN);
+ PIN_SLP(mp02-1, INPUT, DOWN);
+ PIN_SLP(mp02-2, INPUT, NONE);
+ PIN_SLP(mp02-3, INPUT, DOWN);
+
+ PIN_SLP(mp03-0, INPUT, DOWN);
+ PIN_SLP(mp03-1, INPUT, DOWN);
+ PIN_SLP(mp03-2, OUT1, NONE);
+ PIN_SLP(mp03-3, OUT0, NONE);
+ PIN_SLP(mp03-4, INPUT, NONE);
+ PIN_SLP(mp03-5, OUT1, NONE);
+ PIN_SLP(mp03-6, INPUT, DOWN);
+ PIN_SLP(mp03-7, INPUT, DOWN);
+
+ PIN_SLP(mp04-0, INPUT, DOWN);
+ PIN_SLP(mp04-1, OUT0, NONE);
+ PIN_SLP(mp04-2, INPUT, DOWN);
+ PIN_SLP(mp04-3, OUT0, NONE);
+ PIN_SLP(mp04-4, INPUT, DOWN);
+ PIN_SLP(mp04-5, INPUT, DOWN);
+ PIN_SLP(mp04-6, OUT0, NONE);
+ PIN_SLP(mp04-7, INPUT, DOWN);
+
+ PIN_SLP(mp05-0, INPUT, NONE);
+ PIN_SLP(mp05-1, INPUT, NONE);
+ PIN_SLP(mp05-2, INPUT, NONE);
+ PIN_SLP(mp05-3, INPUT, NONE);
+ PIN_SLP(mp05-4, INPUT, DOWN);
+ PIN_SLP(mp05-5, OUT0, NONE);
+ PIN_SLP(mp05-6, INPUT, DOWN);
+ PIN_SLP(mp05-7, PREV, NONE);
+
+ PIN_SLP(mp06-0, INPUT, DOWN);
+ PIN_SLP(mp06-1, INPUT, DOWN);
+ PIN_SLP(mp06-2, INPUT, DOWN);
+ PIN_SLP(mp06-3, INPUT, DOWN);
+ PIN_SLP(mp06-4, INPUT, DOWN);
+ PIN_SLP(mp06-5, INPUT, DOWN);
+ PIN_SLP(mp06-6, INPUT, DOWN);
+ PIN_SLP(mp06-7, INPUT, DOWN);
+
+ PIN_SLP(mp07-0, INPUT, DOWN);
+ PIN_SLP(mp07-1, INPUT, DOWN);
+ PIN_SLP(mp07-2, INPUT, DOWN);
+ PIN_SLP(mp07-3, INPUT, DOWN);
+ PIN_SLP(mp07-4, INPUT, DOWN);
+ PIN_SLP(mp07-5, INPUT, DOWN);
+ PIN_SLP(mp07-6, INPUT, DOWN);
+ PIN_SLP(mp07-7, INPUT, DOWN);
+ };
};
&sdhci0 {
@@ -67,4 +355,8 @@
pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4>;
pinctrl-names = "default";
status = "okay";
+
+ assigned-clocks = <&clocks MOUT_MMC0>, <&clocks SCLK_MMC0>;
+ assigned-clock-rates = <0>, <52000000>;
+ assigned-clock-parents = <&clocks MOUT_MPLL>;
};
diff --git a/arch/arm/boot/dts/s5pv210-pinctrl.dtsi b/arch/arm/boot/dts/s5pv210-pinctrl.dtsi
index 7f0c9d447871..5e8b66281f01 100644
--- a/arch/arm/boot/dts/s5pv210-pinctrl.dtsi
+++ b/arch/arm/boot/dts/s5pv210-pinctrl.dtsi
@@ -18,6 +18,13 @@
#include <dt-bindings/pinctrl/samsung.h>
+#define PIN_SLP(_pin, _mode, _pull) \
+ _pin { \
+ samsung,pins = #_pin; \
+ samsung,pin-con-pdn = <EXYNOS_PIN_PDN_ ##_mode>; \
+ samsung,pin-pud-pdn = <S3C64XX_PIN_PULL_ ##_pull>; \
+ }
+
&pinctrl0 {
gpa0: gpa0 {
gpio-controller;
@@ -195,7 +202,7 @@
#interrupt-cells = <2>;
};
- gpgi: gpgi {
+ gpi: gpi {
gpio-controller;
#gpio-cells = <2>;
};
diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
index 2ad642f51fd9..1b0ee884e91d 100644
--- a/arch/arm/boot/dts/s5pv210.dtsi
+++ b/arch/arm/boot/dts/s5pv210.dtsi
@@ -159,6 +159,18 @@
};
};
+ adc: adc@e1700000 {
+ compatible = "samsung,s5pv210-adc";
+ reg = <0xe1700000 0x1000>;
+ interrupt-parent = <&vic2>;
+ interrupts = <23>, <24>;
+ clocks = <&clocks CLK_TSADC>;
+ clock-names = "adc";
+ #io-channel-cells = <1>;
+ io-channel-ranges;
+ status = "disabled";
+ };
+
spi0: spi@e1300000 {
compatible = "samsung,s5pv210-spi";
reg = <0xe1300000 0x1000>;
@@ -614,7 +626,7 @@
clock-names = "fimc",
"sclk_fimc";
samsung,pix-limits = <4224 8192 1920 4224>;
- samsung,mainscaler-ext;
+ samsung,min-pix-alignment = <16 8>;
samsung,cam-if;
};
@@ -628,8 +640,10 @@
clock-names = "fimc",
"sclk_fimc";
samsung,pix-limits = <4224 8192 1920 4224>;
+ samsung,min-pix-alignment = <1 1>;
samsung,mainscaler-ext;
samsung,cam-if;
+ samsung,lcd-wb;
};
fimc2: fimc@fb400000 {
@@ -641,9 +655,10 @@
<&clocks SCLK_FIMC2>;
clock-names = "fimc",
"sclk_fimc";
- samsung,pix-limits = <4224 8192 1920 4224>;
- samsung,mainscaler-ext;
- samsung,lcd-wb;
+ samsung,pix-limits = <1920 8192 1280 1920>;
+ samsung,min-pix-alignment = <16 8>;
+ samsung,rotators = <0>;
+ samsung,cam-if;
};
};
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index ab550d69db91..31d8766ec7ef 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -21,10 +21,6 @@
aliases {
serial0 = &uart1;
serial1 = &uart3;
- tcb0 = &tcb0;
- tcb1 = &tcb1;
- i2s0 = &i2s0;
- i2s1 = &i2s1;
};
cpus {
@@ -113,8 +109,6 @@
};
usb0: gadget@300000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,sama5d3-udc";
reg = <0x00300000 0x100000
0xfc02c000 0x400>;
@@ -122,124 +116,6 @@
clocks = <&pmc PMC_TYPE_PERIPHERAL 42>, <&pmc PMC_TYPE_CORE PMC_UTMI>;
clock-names = "pclk", "hclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@7 {
- reg = <7>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@8 {
- reg = <8>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@9 {
- reg = <9>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@10 {
- reg = <10>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@11 {
- reg = <11>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@12 {
- reg = <12>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@13 {
- reg = <13>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@14 {
- reg = <14>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@15 {
- reg = <15>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
};
usb1: ohci@400000 {
@@ -635,6 +511,64 @@
#size-cells = <1>;
ranges = <0x0 0xf8034000 0x800>;
status = "disabled";
+
+ uart5: serial@200 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0x200 0x200>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
+ clock-names = "usart";
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(11))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(12))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+
+ spi2: spi@400 {
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0x400 0x200>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
+ clock-names = "spi_clk";
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(11))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(12))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <16>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@600 {
+ compatible = "atmel,sama5d2-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(11))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(12))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <16>;
+ status = "disabled";
+ };
};
flx1: flexcom@f8038000 {
@@ -645,6 +579,64 @@
#size-cells = <1>;
ranges = <0x0 0xf8038000 0x800>;
status = "disabled";
+
+ uart6: serial@200 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0x200 0x200>;
+ interrupts = <20 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 20>;
+ clock-names = "usart";
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(13))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(14))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+
+ spi3: spi@400 {
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0x400 0x200>;
+ interrupts = <20 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 20>;
+ clock-names = "spi_clk";
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(13))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(14))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <16>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@600 {
+ compatible = "atmel,sama5d2-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <20 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 20>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(13))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(14))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <16>;
+ status = "disabled";
+ };
};
securam: sram@f8044000 {
@@ -794,6 +786,64 @@
#size-cells = <1>;
ranges = <0x0 0xfc010000 0x800>;
status = "disabled";
+
+ uart7: serial@200 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0x200 0x200>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 21>;
+ clock-names = "usart";
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(15))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(16))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+
+ spi4: spi@400 {
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0x400 0x200>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 21>;
+ clock-names = "spi_clk";
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(15))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(16))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <16>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@600 {
+ compatible = "atmel,sama5d2-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <21 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 21>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(15))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(16))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <16>;
+ status = "disabled";
+ };
};
flx3: flexcom@fc014000 {
@@ -804,6 +854,65 @@
#size-cells = <1>;
ranges = <0x0 0xfc014000 0x800>;
status = "disabled";
+
+ uart8: serial@200 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0x200 0x200>;
+ interrupts = <22 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 22>;
+ clock-names = "usart";
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(17))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(18))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+
+ spi5: spi@400 {
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0x400 0x200>;
+ interrupts = <22 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 22>;
+ clock-names = "spi_clk";
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(17))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(18))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <16>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@600 {
+ compatible = "atmel,sama5d2-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <22 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 22>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(17))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(18))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <16>;
+ status = "disabled";
+ };
+
};
flx4: flexcom@fc018000 {
@@ -814,6 +923,64 @@
#size-cells = <1>;
ranges = <0x0 0xfc018000 0x800>;
status = "disabled";
+
+ uart9: serial@200 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0x200 0x200>;
+ interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 23>;
+ clock-names = "usart";
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(19))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(20))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+
+ spi6: spi@400 {
+ compatible = "atmel,at91rm9200-spi";
+ reg = <0x400 0x200>;
+ interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 23>;
+ clock-names = "spi_clk";
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(19))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(20))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <16>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@600 {
+ compatible = "atmel,sama5d2-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 23>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(19))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) |
+ AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(20))>;
+ dma-names = "tx", "rx";
+ atmel,fifo-size = <16>;
+ status = "disabled";
+ };
};
trng@fc01c000 {
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index f11b018e9173..0bb5b6fa0748 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -108,7 +108,7 @@
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&mci0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 21>;
clock-names = "mci_clk";
};
@@ -123,7 +123,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
- clocks = <&spi0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 24>;
clock-names = "spi_clk";
status = "disabled";
};
@@ -137,7 +137,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
- clocks = <&ssc0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 38>;
clock-names = "pclk";
status = "disabled";
};
@@ -148,7 +148,7 @@
#size-cells = <0>;
reg = <0xf0010000 0x100>;
interrupts = <26 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&tcb0_clk>, <&clk32k>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 26>, <&clk32k>;
clock-names = "t0_clk", "slow_clk";
};
@@ -163,10 +163,10 @@
pinctrl-0 = <&pinctrl_i2c0>;
pinctrl-1 = <&pinctrl_i2c0_gpio>;
sda-gpios = <&pioA 30 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioA 31 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA 31 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&twi0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 18>;
status = "disabled";
};
@@ -181,10 +181,10 @@
pinctrl-0 = <&pinctrl_i2c1>;
pinctrl-1 = <&pinctrl_i2c1_gpio>;
sda-gpios = <&pioC 26 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioC 27 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioC 27 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&twi1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
status = "disabled";
};
@@ -197,7 +197,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart0>;
- clocks = <&usart0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 12>;
clock-names = "usart";
status = "disabled";
};
@@ -211,7 +211,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart1>;
- clocks = <&usart1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 13>;
clock-names = "usart";
status = "disabled";
};
@@ -222,7 +222,7 @@
interrupts = <16 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart0>;
- clocks = <&uart0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 16>;
clock-names = "usart";
status = "disabled";
};
@@ -232,7 +232,7 @@
reg = <0xf002c000 0x300>;
interrupts = <28 IRQ_TYPE_LEVEL_HIGH 4>;
#pwm-cells = <3>;
- clocks = <&pwm_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 28>;
status = "disabled";
};
@@ -242,7 +242,7 @@
interrupts = <37 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_isi_data_0_7>;
- clocks = <&isi_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 37>;
clock-names = "isi_clk";
status = "disabled";
port {
@@ -267,7 +267,7 @@
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&mci1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 22>;
clock-names = "mci_clk";
};
@@ -282,7 +282,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
- clocks = <&spi1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 25>;
clock-names = "spi_clk";
status = "disabled";
};
@@ -296,7 +296,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
- clocks = <&ssc1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 39>;
clock-names = "pclk";
status = "disabled";
};
@@ -323,7 +323,7 @@
&pinctrl_adc0_ad10
&pinctrl_adc0_ad11
>;
- clocks = <&adc_clk>,
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 29>,
<&adc_op_clk>;
clock-names = "adc_clk", "adc_op_clk";
atmel,adc-channels-used = <0xfff>;
@@ -367,10 +367,10 @@
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
sda-gpios = <&pioA 18 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioA 19 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA 19 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&twi2_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 20>;
status = "disabled";
};
@@ -383,7 +383,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart2>;
- clocks = <&usart2_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 14>;
clock-names = "usart";
status = "disabled";
};
@@ -397,7 +397,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usart3>;
- clocks = <&usart3_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 15>;
clock-names = "usart";
status = "disabled";
};
@@ -408,7 +408,7 @@
interrupts = <42 IRQ_TYPE_LEVEL_HIGH 0>;
dmas = <&dma1 2 AT91_DMA_CFG_PER_ID(17)>;
dma-names = "tx";
- clocks = <&sha_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 42>;
clock-names = "sha_clk";
};
@@ -419,7 +419,7 @@
dmas = <&dma1 2 AT91_DMA_CFG_PER_ID(18)>,
<&dma1 2 AT91_DMA_CFG_PER_ID(19)>;
dma-names = "tx", "rx";
- clocks = <&aes_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 43>;
clock-names = "aes_clk";
};
@@ -430,7 +430,7 @@
dmas = <&dma1 2 AT91_DMA_CFG_PER_ID(20)>,
<&dma1 2 AT91_DMA_CFG_PER_ID(21)>;
dma-names = "tx", "rx";
- clocks = <&tdes_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 44>;
clock-names = "tdes_clk";
};
@@ -438,14 +438,14 @@
compatible = "atmel,at91sam9g45-trng";
reg = <0xf8040000 0x100>;
interrupts = <45 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&trng_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 45>;
};
hsmc: hsmc@ffffc000 {
compatible = "atmel,sama5d3-smc", "syscon", "simple-mfd";
reg = <0xffffc000 0x1000>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH 6>;
- clocks = <&hsmc_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 5>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
@@ -462,7 +462,7 @@
reg = <0xffffe600 0x200>;
interrupts = <30 IRQ_TYPE_LEVEL_HIGH 0>;
#dma-cells = <2>;
- clocks = <&dma0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 30>;
clock-names = "dma_clk";
};
@@ -471,14 +471,14 @@
reg = <0xffffe800 0x200>;
interrupts = <31 IRQ_TYPE_LEVEL_HIGH 0>;
#dma-cells = <2>;
- clocks = <&dma1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 31>;
clock-names = "dma_clk";
};
ramc0: ramc@ffffea00 {
compatible = "atmel,sama5d3-ddramc";
reg = <0xffffea00 0x200>;
- clocks = <&ddrck>, <&mpddr_clk>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_PERIPHERAL 49>;
clock-names = "ddrck", "mpddr";
};
@@ -491,7 +491,7 @@
dma-names = "tx", "rx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dbgu>;
- clocks = <&dbgu_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 2>;
clock-names = "usart";
status = "disabled";
};
@@ -967,7 +967,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioA_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 6>;
};
pioB: gpio@fffff400 {
@@ -978,7 +978,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioB_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 7>;
};
pioC: gpio@fffff600 {
@@ -989,7 +989,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioC_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 8>;
};
pioD: gpio@fffff800 {
@@ -1000,7 +1000,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioD_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 9>;
};
pioE: gpio@fffffa00 {
@@ -1011,7 +1011,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&pioE_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 10>;
};
};
@@ -1019,353 +1019,9 @@
compatible = "atmel,sama5d3-pmc", "syscon";
reg = <0xfffffc00 0x120>;
interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
- interrupt-controller;
- #address-cells = <1>;
- #size-cells = <0>;
- #interrupt-cells = <1>;
-
- main_rc_osc: main_rc_osc {
- compatible = "atmel,at91sam9x5-clk-main-rc-osc";
- #clock-cells = <0>;
- interrupt-parent = <&pmc>;
- interrupts = <AT91_PMC_MOSCRCS>;
- clock-frequency = <12000000>;
- clock-accuracy = <50000000>;
- };
-
- main_osc: main_osc {
- compatible = "atmel,at91rm9200-clk-main-osc";
- #clock-cells = <0>;
- interrupt-parent = <&pmc>;
- interrupts = <AT91_PMC_MOSCS>;
- clocks = <&main_xtal>;
- };
-
- main: mainck {
- compatible = "atmel,at91sam9x5-clk-main";
- #clock-cells = <0>;
- interrupt-parent = <&pmc>;
- interrupts = <AT91_PMC_MOSCSELS>;
- clocks = <&main_rc_osc &main_osc>;
- };
-
- plla: pllack {
- compatible = "atmel,sama5d3-clk-pll";
- #clock-cells = <0>;
- interrupt-parent = <&pmc>;
- interrupts = <AT91_PMC_LOCKA>;
- clocks = <&main>;
- reg = <0>;
- atmel,clk-input-range = <8000000 50000000>;
- #atmel,pll-clk-output-range-cells = <4>;
- atmel,pll-clk-output-ranges = <400000000 1000000000 0 0>;
- };
-
- plladiv: plladivck {
- compatible = "atmel,at91sam9x5-clk-plldiv";
- #clock-cells = <0>;
- clocks = <&plla>;
- };
-
- utmi: utmick {
- compatible = "atmel,at91sam9x5-clk-utmi";
- #clock-cells = <0>;
- interrupt-parent = <&pmc>;
- interrupts = <AT91_PMC_LOCKU>;
- clocks = <&main>;
- };
-
- mck: masterck {
- compatible = "atmel,at91sam9x5-clk-master";
- #clock-cells = <0>;
- interrupt-parent = <&pmc>;
- interrupts = <AT91_PMC_MCKRDY>;
- clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>;
- atmel,clk-output-range = <0 166000000>;
- atmel,clk-divisors = <1 2 4 3>;
- };
-
- usb: usbck {
- compatible = "atmel,at91sam9x5-clk-usb";
- #clock-cells = <0>;
- clocks = <&plladiv>, <&utmi>;
- };
-
- prog: progck {
- compatible = "atmel,at91sam9x5-clk-programmable";
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&pmc>;
- clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>;
-
- prog0: prog0 {
- #clock-cells = <0>;
- reg = <0>;
- interrupts = <AT91_PMC_PCKRDY(0)>;
- };
-
- prog1: prog1 {
- #clock-cells = <0>;
- reg = <1>;
- interrupts = <AT91_PMC_PCKRDY(1)>;
- };
-
- prog2: prog2 {
- #clock-cells = <0>;
- reg = <2>;
- interrupts = <AT91_PMC_PCKRDY(2)>;
- };
- };
-
- smd: smdclk {
- compatible = "atmel,at91sam9x5-clk-smd";
- #clock-cells = <0>;
- clocks = <&plladiv>, <&utmi>;
- };
-
- systemck {
- compatible = "atmel,at91rm9200-clk-system";
- #address-cells = <1>;
- #size-cells = <0>;
-
- ddrck: ddrck {
- #clock-cells = <0>;
- reg = <2>;
- clocks = <&mck>;
- };
-
- smdck: smdck {
- #clock-cells = <0>;
- reg = <4>;
- clocks = <&smd>;
- };
-
- uhpck: uhpck {
- #clock-cells = <0>;
- reg = <6>;
- clocks = <&usb>;
- };
-
- udpck: udpck {
- #clock-cells = <0>;
- reg = <7>;
- clocks = <&usb>;
- };
-
- pck0: pck0 {
- #clock-cells = <0>;
- reg = <8>;
- clocks = <&prog0>;
- };
-
- pck1: pck1 {
- #clock-cells = <0>;
- reg = <9>;
- clocks = <&prog1>;
- };
-
- pck2: pck2 {
- #clock-cells = <0>;
- reg = <10>;
- clocks = <&prog2>;
- };
- };
-
- periphck {
- compatible = "atmel,at91sam9x5-clk-peripheral";
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&mck>;
-
- dbgu_clk: dbgu_clk {
- #clock-cells = <0>;
- reg = <2>;
- };
-
- hsmc_clk: hsmc_clk {
- #clock-cells = <0>;
- reg = <5>;
- };
-
- pioA_clk: pioA_clk {
- #clock-cells = <0>;
- reg = <6>;
- };
-
- pioB_clk: pioB_clk {
- #clock-cells = <0>;
- reg = <7>;
- };
-
- pioC_clk: pioC_clk {
- #clock-cells = <0>;
- reg = <8>;
- };
-
- pioD_clk: pioD_clk {
- #clock-cells = <0>;
- reg = <9>;
- };
-
- pioE_clk: pioE_clk {
- #clock-cells = <0>;
- reg = <10>;
- };
-
- usart0_clk: usart0_clk {
- #clock-cells = <0>;
- reg = <12>;
- atmel,clk-output-range = <0 83000000>;
- };
-
- usart1_clk: usart1_clk {
- #clock-cells = <0>;
- reg = <13>;
- atmel,clk-output-range = <0 83000000>;
- };
-
- usart2_clk: usart2_clk {
- #clock-cells = <0>;
- reg = <14>;
- atmel,clk-output-range = <0 83000000>;
- };
-
- usart3_clk: usart3_clk {
- #clock-cells = <0>;
- reg = <15>;
- atmel,clk-output-range = <0 83000000>;
- };
-
- uart0_clk: uart0_clk {
- #clock-cells = <0>;
- reg = <16>;
- atmel,clk-output-range = <0 83000000>;
- };
-
- twi0_clk: twi0_clk {
- reg = <18>;
- #clock-cells = <0>;
- atmel,clk-output-range = <0 41500000>;
- };
-
- twi1_clk: twi1_clk {
- #clock-cells = <0>;
- reg = <19>;
- atmel,clk-output-range = <0 41500000>;
- };
-
- twi2_clk: twi2_clk {
- #clock-cells = <0>;
- reg = <20>;
- atmel,clk-output-range = <0 41500000>;
- };
-
- mci0_clk: mci0_clk {
- #clock-cells = <0>;
- reg = <21>;
- };
-
- mci1_clk: mci1_clk {
- #clock-cells = <0>;
- reg = <22>;
- };
-
- spi0_clk: spi0_clk {
- #clock-cells = <0>;
- reg = <24>;
- atmel,clk-output-range = <0 166000000>;
- };
-
- spi1_clk: spi1_clk {
- #clock-cells = <0>;
- reg = <25>;
- atmel,clk-output-range = <0 166000000>;
- };
-
- tcb0_clk: tcb0_clk {
- #clock-cells = <0>;
- reg = <26>;
- atmel,clk-output-range = <0 166000000>;
- };
-
- pwm_clk: pwm_clk {
- #clock-cells = <0>;
- reg = <28>;
- };
-
- adc_clk: adc_clk {
- #clock-cells = <0>;
- reg = <29>;
- atmel,clk-output-range = <0 83000000>;
- };
-
- dma0_clk: dma0_clk {
- #clock-cells = <0>;
- reg = <30>;
- };
-
- dma1_clk: dma1_clk {
- #clock-cells = <0>;
- reg = <31>;
- };
-
- uhphs_clk: uhphs_clk {
- #clock-cells = <0>;
- reg = <32>;
- };
-
- udphs_clk: udphs_clk {
- #clock-cells = <0>;
- reg = <33>;
- };
-
- isi_clk: isi_clk {
- #clock-cells = <0>;
- reg = <37>;
- };
-
- ssc0_clk: ssc0_clk {
- #clock-cells = <0>;
- reg = <38>;
- atmel,clk-output-range = <0 83000000>;
- };
-
- ssc1_clk: ssc1_clk {
- #clock-cells = <0>;
- reg = <39>;
- atmel,clk-output-range = <0 83000000>;
- };
-
- sha_clk: sha_clk {
- #clock-cells = <0>;
- reg = <42>;
- };
-
- aes_clk: aes_clk {
- #clock-cells = <0>;
- reg = <43>;
- };
-
- tdes_clk: tdes_clk {
- #clock-cells = <0>;
- reg = <44>;
- };
-
- trng_clk: trng_clk {
- #clock-cells = <0>;
- reg = <45>;
- };
-
- fuse_clk: fuse_clk {
- #clock-cells = <0>;
- reg = <48>;
- };
-
- mpddr_clk: mpddr_clk {
- #clock-cells = <0>;
- reg = <49>;
- };
- };
+ #clock-cells = <2>;
+ clocks = <&clk32k>, <&main_xtal>;
+ clock-names = "slow_clk", "main_xtal";
};
reset_controller: rstc@fffffe00 {
@@ -1384,7 +1040,7 @@
compatible = "atmel,at91sam9260-pit";
reg = <0xfffffe30 0xf>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH 5>;
- clocks = <&mck>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
};
watchdog: watchdog@fffffe40 {
@@ -1420,127 +1076,20 @@
};
usb0: gadget@500000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,sama5d3-udc";
reg = <0x00500000 0x100000
0xf8030000 0x4000>;
interrupts = <33 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&udphs_clk>, <&utmi>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 33>, <&pmc PMC_TYPE_CORE PMC_UTMI>;
clock-names = "pclk", "hclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- };
-
- ep@7 {
- reg = <7>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- };
-
- ep@8 {
- reg = <8>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@9 {
- reg = <9>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@10 {
- reg = <10>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@11 {
- reg = <11>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@12 {
- reg = <12>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@13 {
- reg = <13>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@14 {
- reg = <14>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@15 {
- reg = <15>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
};
usb1: ohci@600000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00600000 0x100000>;
interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 32>, <&pmc PMC_TYPE_PERIPHERAL 32>, <&pmc PMC_TYPE_SYSTEM 6>;
clock-names = "ohci_clk", "hclk", "uhpck";
status = "disabled";
};
@@ -1549,7 +1098,7 @@
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00700000 0x100000>;
interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&utmi>, <&uhphs_clk>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_UTMI>, <&pmc PMC_TYPE_PERIPHERAL 32>;
clock-names = "usb_clk", "ehci_clk";
status = "disabled";
};
@@ -1565,7 +1114,7 @@
0x1 0x0 0x40000000 0x10000000
0x2 0x0 0x50000000 0x10000000
0x3 0x0 0x60000000 0x10000000>;
- clocks = <&mck>;
+ clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
status = "disabled";
nand_controller: nand-controller {
diff --git a/arch/arm/boot/dts/sama5d3_can.dtsi b/arch/arm/boot/dts/sama5d3_can.dtsi
index 2470dd3fff25..9ac29bf3f933 100644
--- a/arch/arm/boot/dts/sama5d3_can.dtsi
+++ b/arch/arm/boot/dts/sama5d3_can.dtsi
@@ -31,29 +31,13 @@
};
- pmc: pmc@fffffc00 {
- periphck {
- can0_clk: can0_clk {
- #clock-cells = <0>;
- reg = <40>;
- atmel,clk-output-range = <0 83000000>;
- };
-
- can1_clk: can1_clk {
- #clock-cells = <0>;
- reg = <41>;
- atmel,clk-output-range = <0 83000000>;
- };
- };
- };
-
can0: can@f000c000 {
compatible = "atmel,at91sam9x5-can";
reg = <0xf000c000 0x300>;
interrupts = <40 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_can0_rx_tx>;
- clocks = <&can0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 40>;
clock-names = "can_clk";
status = "disabled";
};
@@ -64,7 +48,7 @@
interrupts = <41 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_can1_rx_tx>;
- clocks = <&can1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 41>;
clock-names = "can_clk";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
index 9aef414bcd2e..45226108850d 100644
--- a/arch/arm/boot/dts/sama5d3_emac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
@@ -31,12 +31,6 @@
};
pmc: pmc@fffffc00 {
- periphck {
- macb1_clk: macb1_clk {
- #clock-cells = <0>;
- reg = <35>;
- };
- };
};
macb1: ethernet@f802c000 {
@@ -45,7 +39,7 @@
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_macb1_rmii>;
- clocks = <&macb1_clk>, <&macb1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 35>, <&pmc PMC_TYPE_PERIPHERAL 35>;
clock-names = "hclk", "pclk";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sama5d3_gmac.dtsi b/arch/arm/boot/dts/sama5d3_gmac.dtsi
index 3667765a138b..884df7a54dbb 100644
--- a/arch/arm/boot/dts/sama5d3_gmac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_gmac.dtsi
@@ -63,22 +63,13 @@
};
};
- pmc: pmc@fffffc00 {
- periphck {
- macb0_clk: macb0_clk {
- #clock-cells = <0>;
- reg = <34>;
- };
- };
- };
-
macb0: ethernet@f0028000 {
compatible = "atmel,sama5d3-gem";
reg = <0xf0028000 0x100>;
interrupts = <34 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_macb0_data_rgmii &pinctrl_macb0_signal_rgmii>;
- clocks = <&macb0_clk>, <&macb0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 34>, <&pmc PMC_TYPE_PERIPHERAL 34>;
clock-names = "hclk", "pclk";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sama5d3_lcd.dtsi b/arch/arm/boot/dts/sama5d3_lcd.dtsi
index 2cf046cd4e99..308d2fc276d6 100644
--- a/arch/arm/boot/dts/sama5d3_lcd.dtsi
+++ b/arch/arm/boot/dts/sama5d3_lcd.dtsi
@@ -16,7 +16,7 @@
compatible = "atmel,sama5d3-hlcdc";
reg = <0xf0030000 0x2000>;
interrupts = <36 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 36>, <&pmc PMC_TYPE_SYSTEM 3>, <&clk32k>;
clock-names = "periph_clk","sys_clk", "slow_clk";
status = "disabled";
@@ -192,23 +192,6 @@
};
};
};
-
- pmc: pmc@fffffc00 {
- periphck {
- lcdc_clk: lcdc_clk {
- #clock-cells = <0>;
- reg = <36>;
- };
- };
-
- systemck {
- lcdck: lcdck {
- #clock-cells = <0>;
- reg = <3>;
- clocks = <&mck>;
- };
- };
- };
};
};
};
diff --git a/arch/arm/boot/dts/sama5d3_mci2.dtsi b/arch/arm/boot/dts/sama5d3_mci2.dtsi
index 3c83c1c36ac8..7141ee97ec3e 100644
--- a/arch/arm/boot/dts/sama5d3_mci2.dtsi
+++ b/arch/arm/boot/dts/sama5d3_mci2.dtsi
@@ -30,15 +30,6 @@
};
};
- pmc: pmc@fffffc00 {
- periphck {
- mci2_clk: mci2_clk {
- #clock-cells = <0>;
- reg = <23>;
- };
- };
- };
-
mmc2: mmc@f8004000 {
compatible = "atmel,hsmci";
reg = <0xf8004000 0x600>;
@@ -47,7 +38,7 @@
dma-names = "rxtx";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc2_clk_cmd_dat0 &pinctrl_mmc2_dat1_3>;
- clocks = <&mci2_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 23>;
clock-names = "mci_clk";
status = "disabled";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
index 215802b8db30..2b18c5c2cc03 100644
--- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi
+++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
@@ -17,23 +17,13 @@
ahb {
apb {
- pmc: pmc@fffffc00 {
- periphck {
- tcb1_clk: tcb1_clk {
- #clock-cells = <0>;
- reg = <27>;
- atmel,clk-output-range = <0 166000000>;
- };
- };
- };
-
tcb1: timer@f8014000 {
compatible = "atmel,at91sam9x5-tcb", "simple-mfd", "syscon";
#address-cells = <1>;
#size-cells = <0>;
reg = <0xf8014000 0x100>;
interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&tcb1_clk>, <&clk32k>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 27>, <&clk32k>;
clock-names = "t0_clk", "slow_clk";
};
};
diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi
index cb62adbd28ed..a3eaba995cf4 100644
--- a/arch/arm/boot/dts/sama5d3_uart.dtsi
+++ b/arch/arm/boot/dts/sama5d3_uart.dtsi
@@ -36,29 +36,13 @@
};
};
- pmc: pmc@fffffc00 {
- periphck {
- uart0_clk: uart0_clk {
- #clock-cells = <0>;
- reg = <16>;
- atmel,clk-output-range = <0 83000000>;
- };
-
- uart1_clk: uart1_clk {
- #clock-cells = <0>;
- reg = <17>;
- atmel,clk-output-range = <0 83000000>;
- };
- };
- };
-
uart0: serial@f0024000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xf0024000 0x100>;
interrupts = <16 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart0>;
- clocks = <&uart0_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 16>;
clock-names = "usart";
status = "disabled";
};
@@ -69,7 +53,7 @@
interrupts = <17 IRQ_TYPE_LEVEL_HIGH 5>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
- clocks = <&uart1_clk>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 17>;
clock-names = "usart";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi
index 35031bbc7e70..a499de8a7a64 100644
--- a/arch/arm/boot/dts/sama5d3xmb.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb.dtsi
@@ -46,7 +46,7 @@
wm8904: wm8904@1a {
compatible = "wlf,wm8904";
reg = <0x1a>;
- clocks = <&pck0>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 8>;
clock-names = "mclk";
};
};
@@ -60,9 +60,9 @@
resetb-gpios = <&pioE 24 GPIO_ACTIVE_LOW>;
pwdn-gpios = <&pioE 29 GPIO_ACTIVE_HIGH>;
/* use pck1 for the master clock of ov2640 */
- clocks = <&pck1>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 9>;
clock-names = "xvclk";
- assigned-clocks = <&pck1>;
+ assigned-clocks = <&pmc PMC_TYPE_SYSTEM 9>;
assigned-clock-rates = <25000000>;
port {
diff --git a/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi b/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi
index 8a6916a69da4..fa9e5e2a745d 100644
--- a/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi
@@ -45,7 +45,7 @@
wm8904: wm8904@1a {
compatible = "wlf,wm8904";
reg = <0x1a>;
- clocks = <&pck0>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 8>;
clock-names = "mclk";
};
};
@@ -59,9 +59,9 @@
resetb-gpios = <&pioE 24 GPIO_ACTIVE_LOW>;
pwdn-gpios = <&pioE 29 GPIO_ACTIVE_HIGH>;
/* use pck1 for the master clock of ov2640 */
- clocks = <&pck1>;
+ clocks = <&pmc PMC_TYPE_SYSTEM 9>;
clock-names = "xvclk";
- assigned-clocks = <&pck1>;
+ assigned-clocks = <&pmc PMC_TYPE_SYSTEM 9>;
assigned-clock-rates = <25000000>;
port {
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index c9c0316b5b0e..2d9f853ab15f 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -96,8 +96,6 @@
};
usb0: gadget@400000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,sama5d3-udc";
reg = <0x00400000 0x100000
0xfc02c000 0x4000>;
@@ -105,124 +103,6 @@
clocks = <&pmc PMC_TYPE_PERIPHERAL 47>, <&pmc PMC_TYPE_CORE PMC_UTMI>;
clock-names = "pclk", "hclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@7 {
- reg = <7>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@8 {
- reg = <8>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@9 {
- reg = <9>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@10 {
- reg = <10>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@11 {
- reg = <11>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@12 {
- reg = <12>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@13 {
- reg = <13>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@14 {
- reg = <14>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@15 {
- reg = <15>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
};
usb1: ohci@500000 {
@@ -462,7 +342,7 @@
pinctrl-0 = <&pinctrl_i2c0>;
pinctrl-1 = <&pinctrl_i2c0_gpio>;
sda-gpios = <&pioA 30 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioA 31 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioA 31 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 32>;
@@ -484,7 +364,7 @@
pinctrl-0 = <&pinctrl_i2c1>;
pinctrl-1 = <&pinctrl_i2c1_gpio>;
sda-gpios = <&pioE 29 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioE 30 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioE 30 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 33>;
@@ -529,7 +409,7 @@
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
sda-gpios = <&pioB 29 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&pioB 30 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pioB 30 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 34>;
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index c134154bcce8..01fd06328420 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -99,7 +99,7 @@
};
cmt1: timer@e6138000 {
- compatible = "renesas,cmt-48-sh73a0", "renesas,cmt-48";
+ compatible = "renesas,sh73a0-cmt1";
reg = <0xe6138000 0x200>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp3_clks SH73A0_CLK_CMT1>;
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 4f3993cc0227..c2b54af417a2 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -531,6 +531,7 @@
reg = <0xff400000 0x100000>;
resets = <&rst LWHPS2FPGA_RESET>;
clocks = <&l4_main_clk>;
+ status = "disabled";
};
fpga_bridge1: fpga_bridge@ff500000 {
@@ -538,6 +539,21 @@
reg = <0xff500000 0x10000>;
resets = <&rst HPS2FPGA_RESET>;
clocks = <&l4_main_clk>;
+ status = "disabled";
+ };
+
+ fpga_bridge2: fpga-bridge@ff600000 {
+ compatible = "altr,socfpga-fpga2hps-bridge";
+ reg = <0xff600000 0x100000>;
+ resets = <&rst FPGA2HPS_RESET>;
+ clocks = <&l4_main_clk>;
+ status = "disabled";
+ };
+
+ fpga_bridge3: fpga-bridge@ffc25080 {
+ compatible = "altr,socfpga-fpga2sdram-bridge";
+ reg = <0xffc25080 0x4>;
+ status = "disabled";
};
fpgamgr0: fpgamgr@ff706000 {
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-golden.dts b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
index 313f0ab16866..5b499c0b2745 100644
--- a/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
+++ b/arch/arm/boot/dts/ste-ux500-samsung-golden.dts
@@ -24,6 +24,26 @@
stdout-path = &serial2;
};
+ i2c-gpio-1 {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio4 24 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio4 23 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_gpio_1_default>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ magnetometer@c {
+ compatible = "alps,hscdtd008a";
+ reg = <0x0c>;
+
+ avdd-supply = <&ab8500_ldo_aux1_reg>;
+ dvdd-supply = <&ab8500_ldo_aux8_reg>;
+ };
+ };
+
soc {
/* External Micro SD card slot */
sdi0_per1@80126000 {
@@ -146,6 +166,32 @@
pinctrl-1 = <&u2rxtx_c_1_sleep>;
};
+ i2c@80004000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c0_a_1_default>;
+ pinctrl-1 = <&i2c0_a_1_sleep>;
+
+ proximity@44 {
+ compatible = "sharp,gp2ap002s00f";
+ reg = <0x44>;
+
+ /* GPIO146 (PS_INT) */
+ interrupt-parent = <&gpio4>;
+ interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ vio-supply = <&ab8500_ldo_aux8_reg>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&proximity_default>;
+
+ sharp,proximity-far-hysteresis = <0x40>;
+ sharp,proximity-close-hysteresis = <0x0f>;
+ };
+ };
+
i2c@80128000 {
status = "okay";
@@ -357,6 +403,16 @@
};
};
+ i2c-gpio-1 {
+ i2c_gpio_1_default: i2c_gpio_1 {
+ golden_cfg1 {
+ pins = "GPIO151", /* COMP_SCL */
+ "GPIO152"; /* COMP_SDA */
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+
sdi0 {
sd_level_translator_default: sd_level_translator_default {
golden_cfg1 {
@@ -375,6 +431,15 @@
};
};
+ proximity {
+ proximity_default: proximity_default {
+ golden_cfg1 {
+ pins = "GPIO146_D13"; /* PS_INT */
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
+
imu {
imu_default: imu_default {
golden_cfg1 {
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
index 292ed5286652..8edef161613a 100644
--- a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
+++ b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
@@ -140,7 +140,12 @@
pinctrl-0 = <&i2c_gpio_1_default>;
#address-cells = <1>;
#size-cells = <0>;
- /* TODO: this should be used by the ALPS HSCDTD008A compass sensor */
+ magnetometer@c {
+ compatible = "alps,hscdtd008a";
+ reg = <0x0c>;
+ avdd-supply = <&ab8500_ldo_aux1_reg>;
+ dvdd-supply = <&ab8500_ldo_aux8_reg>;
+ };
};
soc {
@@ -362,7 +367,28 @@
pinctrl-0 = <&i2c3_c_2_default>;
pinctrl-1 = <&i2c3_c_2_sleep>;
- /* TODO: this should be used by the Cypress TMA140 touchscreen */
+ /* Cypress CY8CTMA140 touchscreen */
+ touchscreen@20 {
+ compatible = "cypress,cy8ctma140";
+ clock-frequency = <400000>;
+ reg = <0x20>;
+
+ touchscreen-size-x = <480>;
+ touchscreen-size-y = <800>;
+ touchscreen-max-pressure = <255>;
+
+ /* GPIO218 for IRQ */
+ interrupt-parent = <&gpio6>;
+ interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
+
+ /* VDD is "digital supply" nominally 1.71-3.6V */
+ vdd-supply = <&ab8500_ldo_aux2_reg>;
+ /* VCPIN is "analog supply", 2.7-3.6 V */
+ vcpin-supply = <&ab8500_ldo_aux2_reg>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&tma140_skomer_default>;
+ };
};
mcde@a0350000 {
@@ -557,6 +583,15 @@
};
};
};
+ /* Interrupt line for the Cypress TMA140 touchscreen */
+ touchscreen {
+ tma140_skomer_default: tma140_skomer {
+ skomer_cfg1 {
+ pins = "GPIO218_AH11";
+ ste,config = <&gpio_in_nopull>;
+ };
+ };
+ };
};
&ab8505_gpio {
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 7c36c37260a4..23a1746f3baa 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -767,20 +767,6 @@
<&clk_s_c0_flexgen CLK_ETH_PHY>;
};
- rng10: rng@8a89000 {
- compatible = "st,rng";
- reg = <0x08a89000 0x1000>;
- clocks = <&clk_sysin>;
- status = "okay";
- };
-
- rng11: rng@8a8a000 {
- compatible = "st,rng";
- reg = <0x08a8a000 0x1000>;
- clocks = <&clk_sysin>;
- status = "okay";
- };
-
mailbox0: mailbox@8f00000 {
compatible = "st,stih407-mailbox";
reg = <0x8f00000 0x1000>;
diff --git a/arch/arm/boot/dts/stih418.dtsi b/arch/arm/boot/dts/stih418.dtsi
index 83411322bd92..a05e2278b448 100644
--- a/arch/arm/boot/dts/stih418.dtsi
+++ b/arch/arm/boot/dts/stih418.dtsi
@@ -50,7 +50,7 @@
ohci0: usb@9a03c00 {
compatible = "st,st-ohci-300x";
reg = <0x9a03c00 0x100>;
- interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
<&softreset STIH407_USB2_PORT0_SOFTRESET>;
@@ -62,7 +62,7 @@
ehci0: usb@9a03e00 {
compatible = "st,st-ehci-300x";
reg = <0x9a03e00 0x100>;
- interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
@@ -76,7 +76,7 @@
ohci1: usb@9a83c00 {
compatible = "st,st-ohci-300x";
reg = <0x9a83c00 0x100>;
- interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
<&softreset STIH407_USB2_PORT1_SOFTRESET>;
@@ -88,7 +88,7 @@
ehci1: usb@9a83e00 {
compatible = "st,st-ehci-300x";
reg = <0x9a83e00 0x100>;
- interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index d7770699feb5..393f43c85a3c 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -414,14 +414,14 @@
dac1: dac@1 {
compatible = "st,stm32-dac";
- #io-channels-cells = <1>;
+ #io-channel-cells = <1>;
reg = <1>;
status = "disabled";
};
dac2: dac@2 {
compatible = "st,stm32-dac";
- #io-channels-cells = <1>;
+ #io-channel-cells = <1>;
reg = <2>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi
index 05eb02e6d083..9b7fc68380e9 100644
--- a/arch/arm/boot/dts/stm32h743.dtsi
+++ b/arch/arm/boot/dts/stm32h743.dtsi
@@ -180,14 +180,14 @@
dac1: dac@1 {
compatible = "st,stm32-dac";
- #io-channels-cells = <1>;
+ #io-channel-cells = <1>;
reg = <1>;
status = "disabled";
};
dac2: dac@2 {
compatible = "st,stm32-dac";
- #io-channels-cells = <1>;
+ #io-channel-cells = <1>;
reg = <2>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
index 73c07f0dfad2..7eb858732d6d 100644
--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
@@ -6,7 +6,7 @@
#include <dt-bindings/pinctrl/stm32-pinfunc.h>
&pinctrl {
- adc1_in6_pins_a: adc1-in6 {
+ adc1_in6_pins_a: adc1-in6-0 {
pins {
pinmux = <STM32_PINMUX('F', 12, ANALOG)>;
};
@@ -21,6 +21,13 @@
};
};
+ adc12_ain_pins_b: adc12-ain-1 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 12, ANALOG)>, /* ADC1 in6 */
+ <STM32_PINMUX('F', 13, ANALOG)>; /* ADC2 in2 */
+ };
+ };
+
adc12_usb_cc_pins_a: adc12-usb-cc-pins-0 {
pins {
pinmux = <STM32_PINMUX('A', 4, ANALOG)>, /* ADC12 in18 */
@@ -37,7 +44,7 @@
};
};
- cec_pins_sleep_a: cec-sleep-0 {
+ cec_sleep_pins_a: cec-sleep-0 {
pins {
pinmux = <STM32_PINMUX('A', 15, ANALOG)>; /* HDMI_CEC */
};
@@ -52,19 +59,19 @@
};
};
- cec_pins_sleep_b: cec-sleep-1 {
+ cec_sleep_pins_b: cec-sleep-1 {
pins {
pinmux = <STM32_PINMUX('B', 6, ANALOG)>; /* HDMI_CEC */
};
};
- dac_ch1_pins_a: dac-ch1 {
+ dac_ch1_pins_a: dac-ch1-0 {
pins {
pinmux = <STM32_PINMUX('A', 4, ANALOG)>;
};
};
- dac_ch2_pins_a: dac-ch2 {
+ dac_ch2_pins_a: dac-ch2-0 {
pins {
pinmux = <STM32_PINMUX('A', 5, ANALOG)>;
};
@@ -142,7 +149,7 @@
};
};
- ethernet0_rgmii_pins_sleep_a: rgmii-sleep-0 {
+ ethernet0_rgmii_sleep_pins_a: rgmii-sleep-0 {
pins1 {
pinmux = <STM32_PINMUX('G', 5, ANALOG)>, /* ETH_RGMII_CLK125 */
<STM32_PINMUX('G', 4, ANALOG)>, /* ETH_RGMII_GTX_CLK */
@@ -162,6 +169,108 @@
};
};
+ ethernet0_rgmii_pins_b: rgmii-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 5, AF11)>, /* ETH_RGMII_CLK125 */
+ <STM32_PINMUX('G', 4, AF11)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('G', 13, AF11)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('G', 14, AF11)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 2, AF11)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('E', 2, AF11)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('B', 11, AF11)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('C', 1, AF11)>; /* ETH_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('A', 2, AF11)>; /* ETH_MDIO */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 5, AF11)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('H', 6, AF11)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('H', 7, AF11)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('A', 1, AF11)>, /* ETH_RGMII_RX_CLK */
+ <STM32_PINMUX('A', 7, AF11)>; /* ETH_RGMII_RX_CTL */
+ bias-disable;
+ };
+ };
+
+ ethernet0_rgmii_sleep_pins_b: rgmii-sleep-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 5, ANALOG)>, /* ETH_RGMII_CLK125 */
+ <STM32_PINMUX('G', 4, ANALOG)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('G', 13, ANALOG)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('G', 14, ANALOG)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 2, ANALOG)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('E', 2, ANALOG)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('B', 11, ANALOG)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('C', 1, ANALOG)>, /* ETH_MDC */
+ <STM32_PINMUX('A', 2, ANALOG)>, /* ETH_MDIO */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('H', 6, ANALOG)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('H', 7, ANALOG)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* ETH_RGMII_RX_CLK */
+ <STM32_PINMUX('A', 7, ANALOG)>; /* ETH_RGMII_RX_CTL */
+ };
+ };
+
+ ethernet0_rgmii_pins_c: rgmii-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 5, AF11)>, /* ETH_RGMII_CLK125 */
+ <STM32_PINMUX('G', 4, AF11)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('B', 12, AF11)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('G', 14, AF11)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 2, AF11)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('E', 2, AF11)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('G', 11, AF11)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('C', 1, AF11)>; /* ETH_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('A', 2, AF11)>; /* ETH_MDIO */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 5, AF11)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('H', 6, AF11)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('B', 1, AF11)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('A', 1, AF11)>, /* ETH_RGMII_RX_CLK */
+ <STM32_PINMUX('A', 7, AF11)>; /* ETH_RGMII_RX_CTL */
+ bias-disable;
+ };
+ };
+
+ ethernet0_rgmii_sleep_pins_c: rgmii-sleep-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 5, ANALOG)>, /* ETH_RGMII_CLK125 */
+ <STM32_PINMUX('G', 4, ANALOG)>, /* ETH_RGMII_GTX_CLK */
+ <STM32_PINMUX('B', 12, ANALOG)>, /* ETH_RGMII_TXD0 */
+ <STM32_PINMUX('G', 14, ANALOG)>, /* ETH_RGMII_TXD1 */
+ <STM32_PINMUX('C', 2, ANALOG)>, /* ETH_RGMII_TXD2 */
+ <STM32_PINMUX('E', 2, ANALOG)>, /* ETH_RGMII_TXD3 */
+ <STM32_PINMUX('G', 11, ANALOG)>, /* ETH_RGMII_TX_CTL */
+ <STM32_PINMUX('A', 2, ANALOG)>, /* ETH_MDIO */
+ <STM32_PINMUX('C', 1, ANALOG)>, /* ETH_MDC */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH_RGMII_RXD0 */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH_RGMII_RXD1 */
+ <STM32_PINMUX('H', 6, ANALOG)>, /* ETH_RGMII_RXD2 */
+ <STM32_PINMUX('B', 1, ANALOG)>, /* ETH_RGMII_RXD3 */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* ETH_RGMII_RX_CLK */
+ <STM32_PINMUX('A', 7, ANALOG)>; /* ETH_RGMII_RX_CTL */
+ };
+ };
+
ethernet0_rmii_pins_a: rmii-0 {
pins1 {
pinmux = <STM32_PINMUX('G', 13, AF11)>, /* ETH1_RMII_TXD0 */
@@ -182,7 +291,7 @@
};
};
- ethernet0_rmii_pins_sleep_a: rmii-sleep-0 {
+ ethernet0_rmii_sleep_pins_a: rmii-sleep-0 {
pins1 {
pinmux = <STM32_PINMUX('G', 13, ANALOG)>, /* ETH1_RMII_TXD0 */
<STM32_PINMUX('G', 14, ANALOG)>, /* ETH1_RMII_TXD1 */
@@ -250,14 +359,14 @@
};
};
- i2c1_pins_sleep_a: i2c1-1 {
+ i2c1_sleep_pins_a: i2c1-sleep-0 {
pins {
pinmux = <STM32_PINMUX('D', 12, ANALOG)>, /* I2C1_SCL */
<STM32_PINMUX('F', 15, ANALOG)>; /* I2C1_SDA */
};
};
- i2c1_pins_b: i2c1-2 {
+ i2c1_pins_b: i2c1-1 {
pins {
pinmux = <STM32_PINMUX('F', 14, AF5)>, /* I2C1_SCL */
<STM32_PINMUX('F', 15, AF5)>; /* I2C1_SDA */
@@ -267,7 +376,7 @@
};
};
- i2c1_pins_sleep_b: i2c1-3 {
+ i2c1_sleep_pins_b: i2c1-sleep-1 {
pins {
pinmux = <STM32_PINMUX('F', 14, ANALOG)>, /* I2C1_SCL */
<STM32_PINMUX('F', 15, ANALOG)>; /* I2C1_SDA */
@@ -284,14 +393,14 @@
};
};
- i2c2_pins_sleep_a: i2c2-1 {
+ i2c2_sleep_pins_a: i2c2-sleep-0 {
pins {
pinmux = <STM32_PINMUX('H', 4, ANALOG)>, /* I2C2_SCL */
<STM32_PINMUX('H', 5, ANALOG)>; /* I2C2_SDA */
};
};
- i2c2_pins_b1: i2c2-2 {
+ i2c2_pins_b1: i2c2-1 {
pins {
pinmux = <STM32_PINMUX('H', 5, AF4)>; /* I2C2_SDA */
bias-disable;
@@ -300,12 +409,29 @@
};
};
- i2c2_pins_sleep_b1: i2c2-3 {
+ i2c2_sleep_pins_b1: i2c2-sleep-1 {
pins {
pinmux = <STM32_PINMUX('H', 5, ANALOG)>; /* I2C2_SDA */
};
};
+ i2c2_pins_c: i2c2-2 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 1, AF4)>, /* I2C2_SCL */
+ <STM32_PINMUX('H', 5, AF4)>; /* I2C2_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ i2c2_pins_sleep_c: i2c2-sleep-2 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 1, ANALOG)>, /* I2C2_SCL */
+ <STM32_PINMUX('H', 5, ANALOG)>; /* I2C2_SDA */
+ };
+ };
+
i2c5_pins_a: i2c5-0 {
pins {
pinmux = <STM32_PINMUX('A', 11, AF4)>, /* I2C5_SCL */
@@ -316,7 +442,7 @@
};
};
- i2c5_pins_sleep_a: i2c5-1 {
+ i2c5_sleep_pins_a: i2c5-sleep-0 {
pins {
pinmux = <STM32_PINMUX('A', 11, ANALOG)>, /* I2C5_SCL */
<STM32_PINMUX('A', 12, ANALOG)>; /* I2C5_SDA */
@@ -324,6 +450,23 @@
};
};
+ i2c5_pins_b: i2c5-1 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 0, AF4)>, /* I2C5_SCL */
+ <STM32_PINMUX('D', 1, AF4)>; /* I2C5_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ i2c5_sleep_pins_b: i2c5-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 0, ANALOG)>, /* I2C5_SCL */
+ <STM32_PINMUX('D', 1, ANALOG)>; /* I2C5_SDA */
+ };
+ };
+
i2s2_pins_a: i2s2-0 {
pins {
pinmux = <STM32_PINMUX('I', 3, AF5)>, /* I2S2_SDO */
@@ -335,7 +478,7 @@
};
};
- i2s2_pins_sleep_a: i2s2-1 {
+ i2s2_sleep_pins_a: i2s2-sleep-0 {
pins {
pinmux = <STM32_PINMUX('I', 3, ANALOG)>, /* I2S2_SDO */
<STM32_PINMUX('B', 9, ANALOG)>, /* I2S2_WS */
@@ -343,7 +486,7 @@
};
};
- ltdc_pins_a: ltdc-a-0 {
+ ltdc_pins_a: ltdc-0 {
pins {
pinmux = <STM32_PINMUX('G', 7, AF14)>, /* LCD_CLK */
<STM32_PINMUX('I', 10, AF14)>, /* LCD_HSYNC */
@@ -379,7 +522,7 @@
};
};
- ltdc_pins_sleep_a: ltdc-a-1 {
+ ltdc_sleep_pins_a: ltdc-sleep-0 {
pins {
pinmux = <STM32_PINMUX('G', 7, ANALOG)>, /* LCD_CLK */
<STM32_PINMUX('I', 10, ANALOG)>, /* LCD_HSYNC */
@@ -412,7 +555,7 @@
};
};
- ltdc_pins_b: ltdc-b-0 {
+ ltdc_pins_b: ltdc-1 {
pins {
pinmux = <STM32_PINMUX('I', 14, AF14)>, /* LCD_CLK */
<STM32_PINMUX('I', 12, AF14)>, /* LCD_HSYNC */
@@ -448,7 +591,7 @@
};
};
- ltdc_pins_sleep_b: ltdc-b-1 {
+ ltdc_sleep_pins_b: ltdc-sleep-1 {
pins {
pinmux = <STM32_PINMUX('I', 14, ANALOG)>, /* LCD_CLK */
<STM32_PINMUX('I', 12, ANALOG)>, /* LCD_HSYNC */
@@ -481,6 +624,142 @@
};
};
+ ltdc_pins_c: ltdc-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 1, AF9)>, /* LTDC_R6 */
+ <STM32_PINMUX('B', 9, AF14)>, /* LTDC_B7 */
+ <STM32_PINMUX('C', 0, AF14)>, /* LTDC_R5 */
+ <STM32_PINMUX('D', 3, AF14)>, /* LTDC_G7 */
+ <STM32_PINMUX('D', 6, AF14)>, /* LTDC_B2 */
+ <STM32_PINMUX('D', 10, AF14)>, /* LTDC_B3 */
+ <STM32_PINMUX('E', 11, AF14)>, /* LTDC_G3 */
+ <STM32_PINMUX('E', 12, AF14)>, /* LTDC_B4 */
+ <STM32_PINMUX('E', 13, AF14)>, /* LTDC_DE */
+ <STM32_PINMUX('E', 15, AF14)>, /* LTDC_R7 */
+ <STM32_PINMUX('H', 4, AF9)>, /* LTDC_G5 */
+ <STM32_PINMUX('H', 8, AF14)>, /* LTDC_R2 */
+ <STM32_PINMUX('H', 9, AF14)>, /* LTDC_R3 */
+ <STM32_PINMUX('H', 10, AF14)>, /* LTDC_R4 */
+ <STM32_PINMUX('H', 13, AF14)>, /* LTDC_G2 */
+ <STM32_PINMUX('H', 15, AF14)>, /* LTDC_G4 */
+ <STM32_PINMUX('I', 1, AF14)>, /* LTDC_G6 */
+ <STM32_PINMUX('I', 5, AF14)>, /* LTDC_B5 */
+ <STM32_PINMUX('I', 6, AF14)>, /* LTDC_B6 */
+ <STM32_PINMUX('I', 9, AF14)>, /* LTDC_VSYNC */
+ <STM32_PINMUX('I', 10, AF14)>; /* LTDC_HSYNC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 14, AF14)>; /* LTDC_CLK */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+ };
+
+ ltdc_sleep_pins_c: ltdc-sleep-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 1, ANALOG)>, /* LTDC_R6 */
+ <STM32_PINMUX('B', 9, ANALOG)>, /* LTDC_B7 */
+ <STM32_PINMUX('C', 0, ANALOG)>, /* LTDC_R5 */
+ <STM32_PINMUX('D', 3, ANALOG)>, /* LTDC_G7 */
+ <STM32_PINMUX('D', 6, ANALOG)>, /* LTDC_B2 */
+ <STM32_PINMUX('D', 10, ANALOG)>, /* LTDC_B3 */
+ <STM32_PINMUX('E', 11, ANALOG)>, /* LTDC_G3 */
+ <STM32_PINMUX('E', 12, ANALOG)>, /* LTDC_B4 */
+ <STM32_PINMUX('E', 13, ANALOG)>, /* LTDC_DE */
+ <STM32_PINMUX('E', 15, ANALOG)>, /* LTDC_R7 */
+ <STM32_PINMUX('H', 4, ANALOG)>, /* LTDC_G5 */
+ <STM32_PINMUX('H', 8, ANALOG)>, /* LTDC_R2 */
+ <STM32_PINMUX('H', 9, ANALOG)>, /* LTDC_R3 */
+ <STM32_PINMUX('H', 10, ANALOG)>, /* LTDC_R4 */
+ <STM32_PINMUX('H', 13, ANALOG)>, /* LTDC_G2 */
+ <STM32_PINMUX('H', 15, ANALOG)>, /* LTDC_G4 */
+ <STM32_PINMUX('I', 1, ANALOG)>, /* LTDC_G6 */
+ <STM32_PINMUX('I', 5, ANALOG)>, /* LTDC_B5 */
+ <STM32_PINMUX('I', 6, ANALOG)>, /* LTDC_B6 */
+ <STM32_PINMUX('I', 9, ANALOG)>, /* LTDC_VSYNC */
+ <STM32_PINMUX('I', 10, ANALOG)>, /* LTDC_HSYNC */
+ <STM32_PINMUX('E', 14, ANALOG)>; /* LTDC_CLK */
+ };
+ };
+
+ ltdc_pins_d: ltdc-3 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 7, AF14)>; /* LCD_CLK */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <3>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('I', 10, AF14)>, /* LCD_HSYNC */
+ <STM32_PINMUX('I', 9, AF14)>, /* LCD_VSYNC */
+ <STM32_PINMUX('E', 13, AF14)>, /* LCD_DE */
+ <STM32_PINMUX('G', 13, AF14)>, /* LCD_R0 */
+ <STM32_PINMUX('H', 3, AF14)>, /* LCD_R1 */
+ <STM32_PINMUX('H', 8, AF14)>, /* LCD_R2 */
+ <STM32_PINMUX('H', 9, AF14)>, /* LCD_R3 */
+ <STM32_PINMUX('A', 5, AF14)>, /* LCD_R4 */
+ <STM32_PINMUX('H', 11, AF14)>, /* LCD_R5 */
+ <STM32_PINMUX('H', 12, AF14)>, /* LCD_R6 */
+ <STM32_PINMUX('E', 15, AF14)>, /* LCD_R7 */
+ <STM32_PINMUX('E', 5, AF14)>, /* LCD_G0 */
+ <STM32_PINMUX('B', 0, AF14)>, /* LCD_G1 */
+ <STM32_PINMUX('H', 13, AF14)>, /* LCD_G2 */
+ <STM32_PINMUX('E', 11, AF14)>, /* LCD_G3 */
+ <STM32_PINMUX('H', 15, AF14)>, /* LCD_G4 */
+ <STM32_PINMUX('H', 4, AF9)>, /* LCD_G5 */
+ <STM32_PINMUX('I', 11, AF9)>, /* LCD_G6 */
+ <STM32_PINMUX('G', 8, AF14)>, /* LCD_G7 */
+ <STM32_PINMUX('D', 9, AF14)>, /* LCD_B0 */
+ <STM32_PINMUX('G', 12, AF14)>, /* LCD_B1 */
+ <STM32_PINMUX('G', 10, AF14)>, /* LCD_B2 */
+ <STM32_PINMUX('D', 10, AF14)>, /* LCD_B3 */
+ <STM32_PINMUX('E', 12, AF14)>, /* LCD_B4 */
+ <STM32_PINMUX('A', 3, AF14)>, /* LCD_B5 */
+ <STM32_PINMUX('B', 8, AF14)>, /* LCD_B6 */
+ <STM32_PINMUX('I', 7, AF14)>; /* LCD_B7 */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ };
+
+ ltdc_sleep_pins_d: ltdc-sleep-3 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 7, ANALOG)>, /* LCD_CLK */
+ <STM32_PINMUX('I', 10, ANALOG)>, /* LCD_HSYNC */
+ <STM32_PINMUX('I', 9, ANALOG)>, /* LCD_VSYNC */
+ <STM32_PINMUX('E', 13, ANALOG)>, /* LCD_DE */
+ <STM32_PINMUX('G', 13, ANALOG)>, /* LCD_R0 */
+ <STM32_PINMUX('H', 3, ANALOG)>, /* LCD_R1 */
+ <STM32_PINMUX('H', 8, ANALOG)>, /* LCD_R2 */
+ <STM32_PINMUX('H', 9, ANALOG)>, /* LCD_R3 */
+ <STM32_PINMUX('A', 5, ANALOG)>, /* LCD_R4 */
+ <STM32_PINMUX('H', 11, ANALOG)>, /* LCD_R5 */
+ <STM32_PINMUX('H', 12, ANALOG)>, /* LCD_R6 */
+ <STM32_PINMUX('E', 15, ANALOG)>, /* LCD_R7 */
+ <STM32_PINMUX('E', 5, ANALOG)>, /* LCD_G0 */
+ <STM32_PINMUX('B', 0, ANALOG)>, /* LCD_G1 */
+ <STM32_PINMUX('H', 13, ANALOG)>, /* LCD_G2 */
+ <STM32_PINMUX('E', 11, ANALOG)>, /* LCD_G3 */
+ <STM32_PINMUX('H', 15, ANALOG)>, /* LCD_G4 */
+ <STM32_PINMUX('H', 4, ANALOG)>, /* LCD_G5 */
+ <STM32_PINMUX('I', 11, ANALOG)>, /* LCD_G6 */
+ <STM32_PINMUX('G', 8, ANALOG)>, /* LCD_G7 */
+ <STM32_PINMUX('D', 9, ANALOG)>, /* LCD_B0 */
+ <STM32_PINMUX('G', 12, ANALOG)>, /* LCD_B1 */
+ <STM32_PINMUX('G', 10, ANALOG)>, /* LCD_B2 */
+ <STM32_PINMUX('D', 10, ANALOG)>, /* LCD_B3 */
+ <STM32_PINMUX('E', 12, ANALOG)>, /* LCD_B4 */
+ <STM32_PINMUX('A', 3, ANALOG)>, /* LCD_B5 */
+ <STM32_PINMUX('B', 8, ANALOG)>, /* LCD_B6 */
+ <STM32_PINMUX('I', 7, ANALOG)>; /* LCD_B7 */
+ };
+ };
+
m_can1_pins_a: m-can1-0 {
pins1 {
pinmux = <STM32_PINMUX('H', 13, AF9)>; /* CAN1_TX */
@@ -501,6 +780,46 @@
};
};
+ m_can1_pins_b: m-can1-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('A', 12, AF9)>; /* CAN1_TX */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-disable;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('A', 11, AF9)>; /* CAN1_RX */
+ bias-disable;
+ };
+ };
+
+ m_can1_sleep_pins_b: m_can1-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 12, ANALOG)>, /* CAN1_TX */
+ <STM32_PINMUX('A', 11, ANALOG)>; /* CAN1_RX */
+ };
+ };
+
+ m_can2_pins_a: m-can2-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 13, AF9)>; /* CAN2_TX */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-disable;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 5, AF9)>; /* CAN2_RX */
+ bias-disable;
+ };
+ };
+
+ m_can2_sleep_pins_a: m_can2-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 13, ANALOG)>, /* CAN2_TX */
+ <STM32_PINMUX('B', 5, ANALOG)>; /* CAN2_RX */
+ };
+ };
+
pwm1_pins_a: pwm1-0 {
pins {
pinmux = <STM32_PINMUX('E', 9, AF1)>, /* TIM1_CH1 */
@@ -550,6 +869,21 @@
};
};
+ pwm3_pins_b: pwm3-1 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 5, AF2)>; /* TIM3_CH2 */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ pwm3_sleep_pins_b: pwm3-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 5, ANALOG)>; /* TIM3_CH2 */
+ };
+ };
+
pwm4_pins_a: pwm4-0 {
pins {
pinmux = <STM32_PINMUX('D', 14, AF2)>, /* TIM4_CH3 */
@@ -597,6 +931,25 @@
};
};
+ pwm5_pins_b: pwm5-1 {
+ pins {
+ pinmux = <STM32_PINMUX('H', 11, AF2)>, /* TIM5_CH2 */
+ <STM32_PINMUX('H', 12, AF2)>, /* TIM5_CH3 */
+ <STM32_PINMUX('I', 0, AF2)>; /* TIM5_CH4 */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ pwm5_sleep_pins_b: pwm5-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('H', 11, ANALOG)>, /* TIM5_CH2 */
+ <STM32_PINMUX('H', 12, ANALOG)>, /* TIM5_CH3 */
+ <STM32_PINMUX('I', 0, ANALOG)>; /* TIM5_CH4 */
+ };
+ };
+
pwm8_pins_a: pwm8-0 {
pins {
pinmux = <STM32_PINMUX('I', 2, AF3)>; /* TIM8_CH4 */
@@ -710,7 +1063,7 @@
};
};
- sai2a_sleep_pins_a: sai2a-1 {
+ sai2a_sleep_pins_a: sai2a-sleep-0 {
pins {
pinmux = <STM32_PINMUX('I', 5, ANALOG)>, /* SAI2_SCK_A */
<STM32_PINMUX('I', 6, ANALOG)>, /* SAI2_SD_A */
@@ -720,7 +1073,7 @@
};
- sai2a_pins_b: sai2a-2 {
+ sai2a_pins_b: sai2a-1 {
pins1 {
pinmux = <STM32_PINMUX('I', 6, AF10)>, /* SAI2_SD_A */
<STM32_PINMUX('I', 7, AF10)>, /* SAI2_FS_A */
@@ -731,7 +1084,7 @@
};
};
- sai2a_sleep_pins_b: sai2a-sleep-3 {
+ sai2a_sleep_pins_b: sai2a-sleep-1 {
pins {
pinmux = <STM32_PINMUX('I', 6, ANALOG)>, /* SAI2_SD_A */
<STM32_PINMUX('I', 7, ANALOG)>, /* SAI2_FS_A */
@@ -739,6 +1092,25 @@
};
};
+ sai2a_pins_c: sai2a-4 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 13, AF10)>, /* SAI2_SCK_A */
+ <STM32_PINMUX('D', 11, AF10)>, /* SAI2_SD_A */
+ <STM32_PINMUX('D', 12, AF10)>; /* SAI2_FS_A */
+ slew-rate = <0>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
+ sai2a_sleep_pins_c: sai2a-5 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 13, ANALOG)>, /* SAI2_SCK_A */
+ <STM32_PINMUX('D', 11, ANALOG)>, /* SAI2_SD_A */
+ <STM32_PINMUX('D', 12, ANALOG)>; /* SAI2_FS_A */
+ };
+ };
+
sai2b_pins_a: sai2b-0 {
pins1 {
pinmux = <STM32_PINMUX('E', 12, AF10)>, /* SAI2_SCK_B */
@@ -754,7 +1126,7 @@
};
};
- sai2b_sleep_pins_a: sai2b-1 {
+ sai2b_sleep_pins_a: sai2b-sleep-0 {
pins {
pinmux = <STM32_PINMUX('F', 11, ANALOG)>, /* SAI2_SD_B */
<STM32_PINMUX('E', 12, ANALOG)>, /* SAI2_SCK_B */
@@ -763,14 +1135,27 @@
};
};
- sai2b_pins_b: sai2b-2 {
+ sai2b_pins_b: sai2b-1 {
pins {
pinmux = <STM32_PINMUX('F', 11, AF10)>; /* SAI2_SD_B */
bias-disable;
};
};
- sai2b_sleep_pins_b: sai2b-3 {
+ sai2b_sleep_pins_b: sai2b-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 11, ANALOG)>; /* SAI2_SD_B */
+ };
+ };
+
+ sai2b_pins_c: sai2a-4 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 11, AF10)>; /* SAI2_SD_B */
+ bias-disable;
+ };
+ };
+
+ sai2b_sleep_pins_c: sai2a-sleep-5 {
pins {
pinmux = <STM32_PINMUX('F', 11, ANALOG)>; /* SAI2_SD_B */
};
@@ -785,7 +1170,7 @@
};
};
- sai4a_sleep_pins_a: sai4a-1 {
+ sai4a_sleep_pins_a: sai4a-sleep-0 {
pins {
pinmux = <STM32_PINMUX('B', 5, ANALOG)>; /* SAI4_SD_A */
};
@@ -869,6 +1254,30 @@
};
};
+ sdmmc1_dir_pins_b: sdmmc1-dir-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 2, AF11)>, /* SDMMC1_D0DIR */
+ <STM32_PINMUX('E', 14, AF11)>, /* SDMMC1_D123DIR */
+ <STM32_PINMUX('B', 9, AF11)>; /* SDMMC1_CDIR */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ pins2{
+ pinmux = <STM32_PINMUX('E', 4, AF8)>; /* SDMMC1_CKIN */
+ bias-pull-up;
+ };
+ };
+
+ sdmmc1_dir_sleep_pins_b: sdmmc1-dir-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 2, ANALOG)>, /* SDMMC1_D0DIR */
+ <STM32_PINMUX('E', 14, ANALOG)>, /* SDMMC1_D123DIR */
+ <STM32_PINMUX('B', 9, ANALOG)>, /* SDMMC1_CDIR */
+ <STM32_PINMUX('E', 4, ANALOG)>; /* SDMMC1_CKIN */
+ };
+ };
+
sdmmc2_b4_pins_a: sdmmc2-b4-0 {
pins1 {
pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
@@ -987,6 +1396,48 @@
};
};
+ sdmmc2_d47_pins_b: sdmmc2-d47-1 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 8, AF9)>, /* SDMMC2_D4 */
+ <STM32_PINMUX('A', 9, AF10)>, /* SDMMC2_D5 */
+ <STM32_PINMUX('C', 6, AF10)>, /* SDMMC2_D6 */
+ <STM32_PINMUX('C', 7, AF10)>; /* SDMMC2_D7 */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
+ sdmmc2_d47_sleep_pins_b: sdmmc2-d47-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 8, ANALOG)>, /* SDMMC2_D4 */
+ <STM32_PINMUX('A', 9, ANALOG)>, /* SDMMC2_D5 */
+ <STM32_PINMUX('C', 6, ANALOG)>, /* SDMMC2_D6 */
+ <STM32_PINMUX('C', 7, ANALOG)>; /* SDMMC2_D7 */
+ };
+ };
+
+ sdmmc2_d47_pins_c: sdmmc2-d47-2 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 8, AF9)>, /* SDMMC2_D4 */
+ <STM32_PINMUX('A', 15, AF9)>, /* SDMMC2_D5 */
+ <STM32_PINMUX('C', 6, AF10)>, /* SDMMC2_D6 */
+ <STM32_PINMUX('C', 7, AF10)>; /* SDMMC2_D7 */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ };
+
+ sdmmc2_d47_sleep_pins_c: sdmmc2-d47-sleep-2 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 8, ANALOG)>, /* SDMMC2_D4 */
+ <STM32_PINMUX('A', 15, ANALOG)>, /* SDMMC2_D5 */
+ <STM32_PINMUX('C', 6, ANALOG)>, /* SDMMC2_D6 */
+ <STM32_PINMUX('C', 7, ANALOG)>; /* SDMMC2_D7 */
+ };
+ };
+
sdmmc3_b4_pins_a: sdmmc3-b4-0 {
pins1 {
pinmux = <STM32_PINMUX('F', 0, AF9)>, /* SDMMC3_D0 */
@@ -1041,6 +1492,60 @@
};
};
+ sdmmc3_b4_pins_b: sdmmc3-b4-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 0, AF9)>, /* SDMMC3_D0 */
+ <STM32_PINMUX('F', 4, AF9)>, /* SDMMC3_D1 */
+ <STM32_PINMUX('D', 5, AF10)>, /* SDMMC3_D2 */
+ <STM32_PINMUX('D', 7, AF10)>, /* SDMMC3_D3 */
+ <STM32_PINMUX('D', 0, AF10)>; /* SDMMC3_CMD */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('G', 15, AF10)>; /* SDMMC3_CK */
+ slew-rate = <2>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ };
+
+ sdmmc3_b4_od_pins_b: sdmmc3-b4-od-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 0, AF9)>, /* SDMMC3_D0 */
+ <STM32_PINMUX('F', 4, AF9)>, /* SDMMC3_D1 */
+ <STM32_PINMUX('D', 5, AF10)>, /* SDMMC3_D2 */
+ <STM32_PINMUX('D', 7, AF10)>; /* SDMMC3_D3 */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('G', 15, AF10)>; /* SDMMC3_CK */
+ slew-rate = <2>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('D', 0, AF10)>; /* SDMMC2_CMD */
+ slew-rate = <1>;
+ drive-open-drain;
+ bias-pull-up;
+ };
+ };
+
+ sdmmc3_b4_sleep_pins_b: sdmmc3-b4-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 0, ANALOG)>, /* SDMMC3_D0 */
+ <STM32_PINMUX('F', 4, ANALOG)>, /* SDMMC3_D1 */
+ <STM32_PINMUX('D', 5, ANALOG)>, /* SDMMC3_D2 */
+ <STM32_PINMUX('D', 7, ANALOG)>, /* SDMMC3_D3 */
+ <STM32_PINMUX('G', 15, ANALOG)>, /* SDMMC3_CK */
+ <STM32_PINMUX('D', 0, ANALOG)>; /* SDMMC3_CMD */
+ };
+ };
+
spdifrx_pins_a: spdifrx-0 {
pins {
pinmux = <STM32_PINMUX('G', 12, AF8)>; /* SPDIF_IN1 */
@@ -1048,12 +1553,75 @@
};
};
- spdifrx_sleep_pins_a: spdifrx-1 {
+ spdifrx_sleep_pins_a: spdifrx-sleep-0 {
pins {
pinmux = <STM32_PINMUX('G', 12, ANALOG)>; /* SPDIF_IN1 */
};
};
+ spi2_pins_a: spi2-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 10, AF5)>, /* SPI1_SCK */
+ <STM32_PINMUX('I', 3, AF5)>; /* SPI1_MOSI */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('I', 2, AF5)>; /* SPI1_MISO */
+ bias-disable;
+ };
+ };
+
+ usart2_pins_a: usart2-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 5, AF7)>, /* USART2_TX */
+ <STM32_PINMUX('D', 4, AF7)>; /* USART2_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('D', 6, AF7)>, /* USART2_RX */
+ <STM32_PINMUX('D', 3, AF7)>; /* USART2_CTS_NSS */
+ bias-disable;
+ };
+ };
+
+ usart2_sleep_pins_a: usart2-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 5, ANALOG)>, /* USART2_TX */
+ <STM32_PINMUX('D', 4, ANALOG)>, /* USART2_RTS */
+ <STM32_PINMUX('D', 6, ANALOG)>, /* USART2_RX */
+ <STM32_PINMUX('D', 3, ANALOG)>; /* USART2_CTS_NSS */
+ };
+ };
+
+ usart2_pins_b: usart2-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 5, AF7)>, /* USART2_TX */
+ <STM32_PINMUX('A', 1, AF7)>; /* USART2_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('F', 4, AF7)>, /* USART2_RX */
+ <STM32_PINMUX('E', 15, AF7)>; /* USART2_CTS_NSS */
+ bias-disable;
+ };
+ };
+
+ usart2_sleep_pins_b: usart2-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 5, ANALOG)>, /* USART2_TX */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* USART2_RTS */
+ <STM32_PINMUX('F', 4, ANALOG)>, /* USART2_RX */
+ <STM32_PINMUX('E', 15, ANALOG)>; /* USART2_CTS_NSS */
+ };
+ };
+
usart3_pins_a: usart3-0 {
pins1 {
pinmux = <STM32_PINMUX('B', 10, AF7)>; /* USART3_TX */
@@ -1093,6 +1661,19 @@
};
};
+ uart4_pins_c: uart4-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 11, AF6)>; /* UART4_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 2, AF8)>; /* UART4_RX */
+ bias-disable;
+ };
+ };
+
uart7_pins_a: uart7-0 {
pins1 {
pinmux = <STM32_PINMUX('E', 8, AF7)>; /* UART4_TX */
@@ -1108,6 +1689,19 @@
};
};
+ uart7_pins_b: uart7-1 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 7, AF7)>; /* UART7_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('F', 6, AF7)>; /* UART7_RX */
+ bias-disable;
+ };
+ };
+
uart8_pins_a: uart8-0 {
pins1 {
pinmux = <STM32_PINMUX('E', 1, AF8)>; /* UART8_TX */
@@ -1145,7 +1739,7 @@
};
};
- i2c2_pins_sleep_b2: i2c2-1 {
+ i2c2_sleep_pins_b2: i2c2-sleep-0 {
pins {
pinmux = <STM32_PINMUX('Z', 0, ANALOG)>; /* I2C2_SCL */
};
@@ -1161,7 +1755,7 @@
};
};
- i2c4_pins_sleep_a: i2c4-1 {
+ i2c4_sleep_pins_a: i2c4-sleep-0 {
pins {
pinmux = <STM32_PINMUX('Z', 4, ANALOG)>, /* I2C4_SCL */
<STM32_PINMUX('Z', 5, ANALOG)>; /* I2C4_SDA */
@@ -1182,4 +1776,18 @@
bias-disable;
};
};
+
+ spi4_pins_a: spi4-0 {
+ pins {
+ pinmux = <STM32_PINMUX('E', 12, AF5)>, /* SPI4_SCK */
+ <STM32_PINMUX('E', 6, AF5)>; /* SPI4_MOSI */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 13, AF5)>; /* SPI4_MISO */
+ bias-disable;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi
index 3ea05ba48215..36f38a95b4de 100644
--- a/arch/arm/boot/dts/stm32mp151.dtsi
+++ b/arch/arm/boot/dts/stm32mp151.dtsi
@@ -24,10 +24,8 @@
};
psci {
- compatible = "arm,psci";
+ compatible = "arm,psci-1.0";
method = "smc";
- cpu_off = <0x84000002>;
- cpu_on = <0x84000003>;
};
intc: interrupt-controller@a0021000 {
@@ -475,7 +473,7 @@
};
i2c1: i2c@40012000 {
- compatible = "st,stm32f7-i2c";
+ compatible = "st,stm32mp15-i2c";
reg = <0x40012000 0x400>;
interrupt-names = "event", "error";
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
@@ -484,12 +482,13 @@
resets = <&rcc I2C1_R>;
#address-cells = <1>;
#size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x1>;
wakeup-source;
status = "disabled";
};
i2c2: i2c@40013000 {
- compatible = "st,stm32f7-i2c";
+ compatible = "st,stm32mp15-i2c";
reg = <0x40013000 0x400>;
interrupt-names = "event", "error";
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
@@ -498,12 +497,13 @@
resets = <&rcc I2C2_R>;
#address-cells = <1>;
#size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x2>;
wakeup-source;
status = "disabled";
};
i2c3: i2c@40014000 {
- compatible = "st,stm32f7-i2c";
+ compatible = "st,stm32mp15-i2c";
reg = <0x40014000 0x400>;
interrupt-names = "event", "error";
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
@@ -512,12 +512,13 @@
resets = <&rcc I2C3_R>;
#address-cells = <1>;
#size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x4>;
wakeup-source;
status = "disabled";
};
i2c5: i2c@40015000 {
- compatible = "st,stm32f7-i2c";
+ compatible = "st,stm32mp15-i2c";
reg = <0x40015000 0x400>;
interrupt-names = "event", "error";
interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
@@ -526,6 +527,7 @@
resets = <&rcc I2C5_R>;
#address-cells = <1>;
#size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x10>;
wakeup-source;
status = "disabled";
};
@@ -550,14 +552,14 @@
dac1: dac@1 {
compatible = "st,stm32-dac";
- #io-channels-cells = <1>;
+ #io-channel-cells = <1>;
reg = <1>;
status = "disabled";
};
dac2: dac@2 {
compatible = "st,stm32-dac";
- #io-channels-cells = <1>;
+ #io-channel-cells = <1>;
reg = <2>;
status = "disabled";
};
@@ -1124,6 +1126,11 @@
};
};
+ pwr_mcu: pwr_mcu@50001014 {
+ compatible = "syscon";
+ reg = <0x50001014 0x4>;
+ };
+
exti: interrupt-controller@5000d000 {
compatible = "st,stm32mp1-exti", "syscon";
interrupt-controller;
@@ -1423,6 +1430,11 @@
clock-names = "lcd";
resets = <&rcc LTDC_R>;
status = "disabled";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
};
iwdg2: watchdog@5a002000 {
@@ -1476,7 +1488,7 @@
};
i2c4: i2c@5c002000 {
- compatible = "st,stm32f7-i2c";
+ compatible = "st,stm32mp15-i2c";
reg = <0x5c002000 0x400>;
interrupt-names = "event", "error";
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
@@ -1485,6 +1497,7 @@
resets = <&rcc I2C4_R>;
#address-cells = <1>;
#size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x8>;
wakeup-source;
status = "disabled";
};
@@ -1512,7 +1525,7 @@
};
i2c6: i2c@5c009000 {
- compatible = "st,stm32f7-i2c";
+ compatible = "st,stm32mp15-i2c";
reg = <0x5c009000 0x400>;
interrupt-names = "event", "error";
interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
@@ -1521,6 +1534,7 @@
resets = <&rcc I2C6_R>;
#address-cells = <1>;
#size-cells = <0>;
+ st,syscfg-fmp = <&syscfg 0x4 0x20>;
wakeup-source;
status = "disabled";
};
@@ -1700,6 +1714,7 @@
resets = <&rcc MCU_R>;
st,syscfg-holdboot = <&rcc 0x10C 0x1>;
st,syscfg-tz = <&rcc 0x000 0x1>;
+ st,syscfg-pdds = <&pwr_mcu 0x0 0x1>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/stm32mp157.dtsi b/arch/arm/boot/dts/stm32mp157.dtsi
index 3f0a4a91cce6..54e73ccea446 100644
--- a/arch/arm/boot/dts/stm32mp157.dtsi
+++ b/arch/arm/boot/dts/stm32mp157.dtsi
@@ -15,7 +15,6 @@
clocks = <&rcc GPU>, <&rcc GPU_K>;
clock-names = "bus" ,"core";
resets = <&rcc GPU_R>;
- status = "disabled";
};
dsi: dsi@5a000000 {
@@ -25,7 +24,14 @@
clock-names = "pclk", "ref", "px_clk";
resets = <&rcc DSI_R>;
reset-names = "apb";
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/stm32mp157a-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-avenger96.dts
index 425175f7d83c..8a6eaca994d1 100644
--- a/arch/arm/boot/dts/stm32mp157a-avenger96.dts
+++ b/arch/arm/boot/dts/stm32mp157a-avenger96.dts
@@ -2,318 +2,10 @@
/*
* Copyright (C) Linaro Ltd 2019 - All Rights Reserved
* Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ * Copyright (C) 2020 Marek Vasut <marex@denx.de>
*/
/dts-v1/;
-#include "stm32mp157.dtsi"
-#include "stm32mp15-pinctrl.dtsi"
-#include "stm32mp15xxac-pinctrl.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/mfd/st,stpmic1.h>
-
-/ {
- model = "Arrow Electronics STM32MP157A Avenger96 board";
- compatible = "arrow,stm32mp157a-avenger96", "st,stm32mp157";
-
- aliases {
- ethernet0 = &ethernet0;
- mmc0 = &sdmmc1;
- serial0 = &uart4;
- serial1 = &uart7;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
- memory@c0000000 {
- device_type = "memory";
- reg = <0xc0000000 0x40000000>;
- };
-
- led {
- compatible = "gpio-leds";
- led1 {
- label = "green:user1";
- gpios = <&gpioz 7 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "heartbeat";
- default-state = "off";
- };
-
- led2 {
- label = "green:user2";
- gpios = <&gpiof 3 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "mmc0";
- default-state = "off";
- };
-
- led3 {
- label = "green:user3";
- gpios = <&gpiog 0 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "mmc1";
- default-state = "off";
- };
-
- led4 {
- label = "green:user3";
- gpios = <&gpiog 1 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "none";
- default-state = "off";
- panic-indicator;
- };
-
- led5 {
- label = "yellow:wifi";
- gpios = <&gpioz 3 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "phy0tx";
- default-state = "off";
- };
-
- led6 {
- label = "blue:bt";
- gpios = <&gpioz 6 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "bluetooth-power";
- default-state = "off";
- };
- };
-};
-
-&ethernet0 {
- status = "okay";
- pinctrl-0 = <&ethernet0_rgmii_pins_a>;
- pinctrl-1 = <&ethernet0_rgmii_pins_sleep_a>;
- pinctrl-names = "default", "sleep";
- phy-mode = "rgmii";
- max-speed = <1000>;
- phy-handle = <&phy0>;
-
- mdio0 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "snps,dwmac-mdio";
- phy0: ethernet-phy@7 {
- reg = <7>;
- };
- };
-};
-
-&i2c1 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_pins_b>;
- i2c-scl-rising-time-ns = <185>;
- i2c-scl-falling-time-ns = <20>;
- status = "okay";
- /delete-property/dmas;
- /delete-property/dma-names;
-};
-
-&i2c2 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c2_pins_b1 &i2c2_pins_b2>;
- i2c-scl-rising-time-ns = <185>;
- i2c-scl-falling-time-ns = <20>;
- status = "okay";
- /delete-property/dmas;
- /delete-property/dma-names;
-};
-
-&i2c4 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c4_pins_a>;
- i2c-scl-rising-time-ns = <185>;
- i2c-scl-falling-time-ns = <20>;
- status = "okay";
- /delete-property/dmas;
- /delete-property/dma-names;
-
- pmic: stpmic@33 {
- compatible = "st,stpmic1";
- reg = <0x33>;
- interrupts-extended = <&exti 55 IRQ_TYPE_EDGE_FALLING>;
- interrupt-controller;
- #interrupt-cells = <2>;
- status = "okay";
-
- regulators {
- compatible = "st,stpmic1-regulators";
-
- ldo1-supply = <&v3v3>;
- ldo2-supply = <&v3v3>;
- ldo3-supply = <&vdd_ddr>;
- ldo5-supply = <&v3v3>;
- ldo6-supply = <&v3v3>;
- pwr_sw1-supply = <&bst_out>;
- pwr_sw2-supply = <&bst_out>;
-
- vddcore: buck1 {
- regulator-name = "vddcore";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1350000>;
- regulator-always-on;
- regulator-initial-mode = <0>;
- regulator-over-current-protection;
- };
-
- vdd_ddr: buck2 {
- regulator-name = "vdd_ddr";
- regulator-min-microvolt = <1350000>;
- regulator-max-microvolt = <1350000>;
- regulator-always-on;
- regulator-initial-mode = <0>;
- regulator-over-current-protection;
- };
-
- vdd: buck3 {
- regulator-name = "vdd";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-initial-mode = <0>;
- regulator-over-current-protection;
- };
-
- v3v3: buck4 {
- regulator-name = "v3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-over-current-protection;
- regulator-initial-mode = <0>;
- };
-
- vdda: ldo1 {
- regulator-name = "vdda";
- regulator-min-microvolt = <2900000>;
- regulator-max-microvolt = <2900000>;
- interrupts = <IT_CURLIM_LDO1 0>;
- interrupt-parent = <&pmic>;
- };
-
- v2v8: ldo2 {
- regulator-name = "v2v8";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- interrupts = <IT_CURLIM_LDO2 0>;
- interrupt-parent = <&pmic>;
- };
-
- vtt_ddr: ldo3 {
- regulator-name = "vtt_ddr";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <750000>;
- regulator-always-on;
- regulator-over-current-protection;
- };
-
- vdd_usb: ldo4 {
- regulator-name = "vdd_usb";
- interrupts = <IT_CURLIM_LDO4 0>;
- interrupt-parent = <&pmic>;
- };
-
- vdd_sd: ldo5 {
- regulator-name = "vdd_sd";
- regulator-min-microvolt = <2900000>;
- regulator-max-microvolt = <2900000>;
- interrupts = <IT_CURLIM_LDO5 0>;
- interrupt-parent = <&pmic>;
- regulator-boot-on;
- };
-
- v1v8: ldo6 {
- regulator-name = "v1v8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- interrupts = <IT_CURLIM_LDO6 0>;
- interrupt-parent = <&pmic>;
- regulator-enable-ramp-delay = <300000>;
- };
-
- vref_ddr: vref_ddr {
- regulator-name = "vref_ddr";
- regulator-always-on;
- };
-
- bst_out: boost {
- regulator-name = "bst_out";
- interrupts = <IT_OCP_BOOST 0>;
- interrupt-parent = <&pmic>;
- };
-
- vbus_otg: pwr_sw1 {
- regulator-name = "vbus_otg";
- interrupts = <IT_OCP_OTG 0>;
- interrupt-parent = <&pmic>;
- };
-
- vbus_sw: pwr_sw2 {
- regulator-name = "vbus_sw";
- interrupts = <IT_OCP_SWOUT 0>;
- interrupt-parent = <&pmic>;
- regulator-active-discharge = <1>;
- };
- };
-
- onkey {
- compatible = "st,stpmic1-onkey";
- interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 1>;
- interrupt-names = "onkey-falling", "onkey-rising";
- status = "okay";
- };
-
- watchdog {
- compatible = "st,stpmic1-wdt";
- status = "disabled";
- };
- };
-};
-
-&iwdg2 {
- timeout-sec = <32>;
- status = "okay";
-};
-
-&pwr_regulators {
- vdd-supply = <&vdd>;
- vdd_3v3_usbfs-supply = <&vdd_usb>;
-};
-
-&rng1 {
- status = "okay";
-};
-
-&rtc {
- status = "okay";
-};
-
-&sdmmc1 {
- pinctrl-names = "default", "opendrain", "sleep";
- pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
- pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
- pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
- cd-gpios = <&gpioi 8 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
- disable-wp;
- st,sig-dir;
- st,neg-edge;
- st,use-ckin;
- bus-width = <4>;
- vmmc-supply = <&vdd_sd>;
- status = "okay";
-};
-
-&uart4 {
- /* On Low speed expansion header */
- label = "LS-UART1";
- pinctrl-names = "default";
- pinctrl-0 = <&uart4_pins_b>;
- status = "okay";
-};
-
-&uart7 {
- /* On Low speed expansion header */
- label = "LS-UART0";
- pinctrl-names = "default";
- pinctrl-0 = <&uart7_pins_a>;
- status = "okay";
-};
+/* This DT is here only for backward compatibility */
+#include "stm32mp157a-dhcor-avenger96.dts"
diff --git a/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts
new file mode 100644
index 000000000000..2e3c9fbb4eb3
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157a-dhcor-avenger96.dts
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (C) Linaro Ltd 2019 - All Rights Reserved
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ * Copyright (C) 2020 Marek Vasut <marex@denx.de>
+ *
+ * DHCOR STM32MP1 variant:
+ * DHCR-STM32MP157A-C065-R102-V18-SPI-C-01LG
+ * DHCOR PCB number: 586-100 or newer
+ * Avenger96 PCB number: 588-200 or newer
+ */
+
+/dts-v1/;
+
+#include "stm32mp157.dtsi"
+#include "stm32mp15xc.dtsi"
+#include "stm32mp15xx-dhcor-som.dtsi"
+#include "stm32mp15xx-dhcor-avenger96.dtsi"
+
+/ {
+ model = "Arrow Electronics STM32MP157A Avenger96 board";
+ compatible = "arrow,stm32mp157a-avenger96", "dh,stm32mp157a-dhcor-som",
+ "st,stm32mp157";
+};
+
+&m_can1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&m_can1_pins_b>;
+ pinctrl-1 = <&m_can1_sleep_pins_b>;
+ status = "disabled";
+};
+
+&m_can2 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&m_can2_pins_a>;
+ pinctrl-1 = <&m_can2_sleep_pins_a>;
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/stm32mp157a-iot-box.dts b/arch/arm/boot/dts/stm32mp157a-iot-box.dts
new file mode 100644
index 000000000000..70f394b4d3c0
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157a-iot-box.dts
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2020 Manivannan Sadhasivam
+ */
+
+/dts-v1/;
+#include "stm32mp157a-stinger96.dtsi"
+
+/ {
+ model = "Shiratech STM32MP157A IoT Box";
+ compatible = "shiratech,stm32mp157a-iot-box", "st,stm32mp157";
+
+ wlan_pwr: regulator-wlan {
+ compatible = "regulator-fixed";
+
+ regulator-name = "wl-reg";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpios = <&gpiog 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&i2c2 {
+ ccs811@5b {
+ compatible = "ams,ccs811";
+ reg = <0x5b>;
+ wakeup-gpios = <&gpioa 12 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpioa 11 GPIO_ACTIVE_LOW>;
+ };
+};
+
+/* WiFi */
+&sdmmc2 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc2_b4_pins_a>;
+ pinctrl-1 = <&sdmmc2_b4_od_pins_b>;
+ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a>;
+ broken-cd;
+ non-removable;
+ st,neg-edge;
+ bus-width = <1>;
+ vmmc-supply = <&wlan_pwr>;
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ brcmf: bcrmf@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ };
+};
+
+/* Bluetooth */
+&uart4 {
+ /* Note: HW flow control is broken, hence using custom CTS/RTS gpios */
+ /delete-property/st,hw-flow-ctrl;
+ cts-gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ rts-gpios = <&gpiob 0 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ bluetooth {
+ shutdown-gpios = <&gpiog 2 GPIO_ACTIVE_HIGH>;
+ compatible = "brcm,bcm43438-bt";
+ max-speed = <115200>;
+ };
+};
diff --git a/arch/arm/boot/dts/stm32mp157a-stinger96.dts b/arch/arm/boot/dts/stm32mp157a-stinger96.dts
new file mode 100644
index 000000000000..249a53877512
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157a-stinger96.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2020 Manivannan Sadhasivam
+ */
+
+/dts-v1/;
+#include "stm32mp157a-stinger96.dtsi"
+
+/ {
+ model = "Shiratech STM32MP157A Stinger96 board";
+ compatible = "shiratech,stm32mp157a-stinger96", "st,stm32mp157";
+};
diff --git a/arch/arm/boot/dts/stm32mp157a-stinger96.dtsi b/arch/arm/boot/dts/stm32mp157a-stinger96.dtsi
new file mode 100644
index 000000000000..58275bcf9e26
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157a-stinger96.dtsi
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2020 Manivannan Sadhasivam
+ */
+
+/dts-v1/;
+
+#include "stm32mp157.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxac-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/mfd/st,stpmic1.h>
+
+/ {
+ aliases {
+ mmc0 = &sdmmc1;
+ serial0 = &uart4;
+ serial1 = &uart7;
+ serial2 = &usart2;
+ spi0 = &spi4;
+ };
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+
+ memory@c0000000 {
+ device_type = "memory";
+ reg = <0xc0000000 0x10000000>;
+ };
+
+ led {
+ compatible = "gpio-leds";
+
+ led1 {
+ label = "green:user1";
+ gpios = <&gpioa 13 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ led2 {
+ label = "green:user2";
+ gpios = <&gpioh 3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ default-state = "off";
+ };
+
+ led3 {
+ label = "green:user3";
+ gpios = <&gpioh 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc1";
+ default-state = "off";
+ };
+
+ led4 {
+ label = "green:user4";
+ gpios = <&gpiof 12 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "none";
+ default-state = "off";
+ panic-indicator;
+ };
+ };
+
+ sd_switch: regulator-sd_switch {
+ compatible = "regulator-gpio";
+ regulator-name = "sd_switch";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-type = "voltage";
+ regulator-always-on;
+
+ gpios = <&gpioa 8 GPIO_ACTIVE_HIGH>;
+ gpios-states = <0>;
+ states = <1800000 0x1>,
+ <2900000 0x0>;
+ };
+};
+
+/* Only headless mode is supported */
+&gpu {
+ status = "disabled";
+};
+
+/* LS-I2C0 */
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins_a>;
+ i2c-scl-rising-time-ns = <1000>;
+ i2c-scl-falling-time-ns = <300>;
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins_a>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ pmic: stpmic@33 {
+ compatible = "st,stpmic1";
+ reg = <0x33>;
+ interrupts-extended = <&gpioa 0 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "okay";
+
+ regulators {
+ compatible = "st,stpmic1-regulators";
+
+ ldo1-supply = <&v3v3>;
+ ldo2-supply = <&v3v3>;
+ ldo3-supply = <&vdd_ddr>;
+ ldo5-supply = <&v3v3>;
+ ldo6-supply = <&v3v3>;
+ pwr_sw1-supply = <&bst_out>;
+ pwr_sw2-supply = <&bst_out>;
+
+ vddcore: buck1 {
+ regulator-name = "vddcore";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd_ddr: buck2 {
+ regulator-name = "vdd_ddr";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd: buck3 {
+ regulator-name = "vdd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ st,mask-reset;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ v3v3: buck4 {
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ regulator-initial-mode = <0>;
+ };
+
+ vdda: ldo1 {
+ regulator-name = "vdda";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ interrupts = <IT_CURLIM_LDO1 0>;
+ };
+
+ v2v9: ldo2 {
+ regulator-name = "v2v9";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-always-on;
+ interrupts = <IT_CURLIM_LDO2 0>;
+ };
+
+ vtt_ddr: ldo3 {
+ regulator-name = "vtt_ddr";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <750000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ vdd_usb: ldo4 {
+ regulator-name = "vdd_usb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ interrupts = <IT_CURLIM_LDO4 0>;
+ };
+
+ vdd_sd: ldo5 {
+ regulator-name = "vdd_sd";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ interrupts = <IT_CURLIM_LDO5 0>;
+ regulator-boot-on;
+ };
+
+ v1v8: ldo6 {
+ regulator-name = "v1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ interrupts = <IT_CURLIM_LDO6 0>;
+ };
+
+ vref_ddr: vref_ddr {
+ regulator-name = "vref_ddr";
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ bst_out: boost {
+ regulator-name = "bst_out";
+ interrupts = <IT_OCP_BOOST 0>;
+ };
+
+ vbus_otg: pwr_sw1 {
+ regulator-name = "vbus_otg";
+ interrupts = <IT_OCP_OTG 0>;
+ regulator-active-discharge;
+ };
+
+ vbus_sw: pwr_sw2 {
+ regulator-name = "vbus_sw";
+ interrupts = <IT_OCP_SWOUT 0>;
+ regulator-active-discharge;
+ };
+ };
+
+ onkey {
+ compatible = "st,stpmic1-onkey";
+ interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 1>;
+ interrupt-names = "onkey-falling", "onkey-rising";
+ status = "okay";
+ };
+
+ watchdog {
+ compatible = "st,stpmic1-wdt";
+ status = "disabled";
+ };
+ };
+};
+
+&iwdg2 {
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&pwr_regulators {
+ vdd-supply = <&vdd>;
+ vdd_3v3_usbfs-supply = <&vdd_usb>;
+};
+
+&rng1 {
+ status = "okay";
+};
+
+&rtc {
+ status = "okay";
+};
+
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_b>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_b>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_b>;
+ broken-cd;
+ disable-wp;
+ st,sig-dir;
+ st,neg-edge;
+ st,use-ckin;
+ bus-width = <4>;
+ vmmc-supply = <&vdd_sd>;
+ vqmmc-supply = <&sd_switch>;
+ status = "okay";
+};
+
+/* LS-SPI0 */
+&spi4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi4_pins_a>;
+ cs-gpios = <&gpioe 11 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+/* BG96 */
+&usart2 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&usart2_pins_b>;
+ pinctrl-1 = <&usart2_sleep_pins_b>;
+ st,hw-flow-ctrl;
+ status = "okay";
+};
+
+/* LS-UART0 */
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pins_c>;
+ st,hw-flow-ctrl;
+ status = "okay";
+};
+
+/* Debug console */
+&uart7 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart7_pins_b>;
+ status = "okay";
+};
+
+&usbh_ehci {
+ phys = <&usbphyc_port0>;
+ phy-names = "usb";
+ status = "okay";
+};
+
+&usbotg_hs {
+ dr_mode = "peripheral";
+ pinctrl-0 = <&usbotg_hs_pins_a>;
+ pinctrl-names = "default";
+ phy-names = "usb2-phy";
+ phys = <&usbphyc_port1 0>;
+ vbus-supply = <&vbus_otg>;
+ status = "okay";
+};
+
+&usbphyc {
+ status = "okay";
+};
+
+&usbphyc_port0 {
+ phy-supply = <&vdd_usb>;
+ vdda1v1-supply = <&reg11>;
+ vdda1v8-supply = <&reg18>;
+};
+
+&usbphyc_port1 {
+ phy-supply = <&vdd_usb>;
+ vdda1v1-supply = <&reg11>;
+ vdda1v8-supply = <&reg18>;
+};
diff --git a/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts b/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts
index af99e132e1b1..197aa98d49e2 100644
--- a/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts
+++ b/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts
@@ -1,160 +1,23 @@
// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
/*
- * Copyright (C) 2019 Marek Vasut <marex@denx.de>
+ * Copyright (C) 2019-2020 Marek Vasut <marex@denx.de>
+ *
+ * DHCOM STM32MP1 variant:
+ * DHCM-STM32MP157C-C065-R102-F0819-SPI-E2-CAN2-SDR104-RTC-WBT-T-DSI-I-01D2
+ * DHCOR PCB number: 587-200 or newer
+ * PDK2 PCB number: 516-400 or newer
*/
+/dts-v1/;
-#include "stm32mp157c-dhcom-som.dtsi"
-#include <dt-bindings/pwm/pwm.h>
+#include "stm32mp157.dtsi"
+#include "stm32mp15xc.dtsi"
+#include "stm32mp15xx-dhcom-som.dtsi"
+#include "stm32mp15xx-dhcom-pdk2.dtsi"
/ {
- model = "STMicroelectronics STM32MP157C DHCOM Premium Developer Kit (2)";
- compatible = "dh,stm32mp157c-dhcom-pdk2", "st,stm32mp157";
-
- aliases {
- serial0 = &uart4;
- serial1 = &usart3;
- serial2 = &uart8;
- ethernet0 = &ethernet0;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
- clk_ext_audio_codec: clock-codec {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- };
-
- display_bl: display-bl {
- compatible = "pwm-backlight";
- pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
- brightness-levels = <0 16 22 30 40 55 75 102 138 188 255>;
- default-brightness-level = <8>;
- enable-gpios = <&gpioi 0 GPIO_ACTIVE_HIGH>;
- status = "okay";
- };
-
- ethernet_vio: vioregulator {
- compatible = "regulator-fixed";
- regulator-name = "vio";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
- regulator-always-on;
- regulator-boot-on;
- };
-
- panel {
- compatible = "edt,etm0700g0edh6";
- backlight = <&display_bl>;
-
- port {
- lcd_panel_in: endpoint {
- remote-endpoint = <&lcd_display_out>;
- };
- };
- };
-
- sound {
- compatible = "audio-graph-card";
- routing =
- "MIC_IN", "Capture",
- "Capture", "Mic Bias",
- "Playback", "HP_OUT";
- dais = <&sai2a_port &sai2b_port>;
- status = "okay";
- };
-};
-
-&cec {
- pinctrl-names = "default";
- pinctrl-0 = <&cec_pins_a>;
- status = "okay";
-};
-
-&ethernet0 {
- status = "okay";
- pinctrl-0 = <&ethernet0_rmii_pins_a>;
- pinctrl-1 = <&ethernet0_rmii_pins_sleep_a>;
- pinctrl-names = "default", "sleep";
- phy-mode = "rmii";
- max-speed = <100>;
- phy-handle = <&phy0>;
- st,eth-ref-clk-sel;
- phy-reset-gpios = <&gpioh 15 GPIO_ACTIVE_LOW>;
-
- mdio0 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "snps,dwmac-mdio";
-
- phy0: ethernet-phy@1 {
- reg = <1>;
- };
- };
-};
-
-&i2c5 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c5_pins_a>;
- i2c-scl-rising-time-ns = <185>;
- i2c-scl-falling-time-ns = <20>;
- status = "okay";
- /* spare dmas for other usage */
- /delete-property/dmas;
- /delete-property/dma-names;
-
- sgtl5000: codec@a {
- compatible = "fsl,sgtl5000";
- reg = <0x0a>;
- #sound-dai-cells = <0>;
- clocks = <&clk_ext_audio_codec>;
- VDDA-supply = <&v3v3>;
- VDDIO-supply = <&vdd>;
-
- sgtl5000_port: port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- sgtl5000_tx_endpoint: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&sai2a_endpoint>;
- frame-master;
- bitclock-master;
- };
-
- sgtl5000_rx_endpoint: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&sai2b_endpoint>;
- frame-master;
- bitclock-master;
- };
- };
-
- };
-
- polytouch@38 {
- compatible = "edt,edt-ft5x06";
- reg = <0x38>;
- interrupt-parent = <&gpiog>;
- interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
- linux,wakeup;
- };
-};
-
-&ltdc {
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&ltdc_pins_b>;
- pinctrl-1 = <&ltdc_pins_sleep_b>;
- status = "okay";
-
- port {
- lcd_display_out: endpoint {
- remote-endpoint = <&lcd_panel_in>;
- };
- };
+ model = "DH Electronics STM32MP157C DHCOM Premium Developer Kit (2)";
+ compatible = "dh,stm32mp157c-dhcom-pdk2", "dh,stm32mp157c-dhcom-som",
+ "st,stm32mp157";
};
&m_can1 {
@@ -163,103 +26,3 @@
pinctrl-1 = <&m_can1_sleep_pins_a>;
status = "okay";
};
-
-&sai2 {
- clocks = <&rcc SAI2>, <&rcc PLL3_Q>, <&rcc PLL3_R>;
- clock-names = "pclk", "x8k", "x11k";
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&sai2a_pins_b &sai2b_pins_b>;
- pinctrl-1 = <&sai2a_sleep_pins_b &sai2b_sleep_pins_b>;
- status = "okay";
-
- sai2a: audio-controller@4400b004 {
- #clock-cells = <0>;
- dma-names = "tx";
- clocks = <&rcc SAI2_K>;
- clock-names = "sai_ck";
- status = "okay";
-
- sai2a_port: port {
- sai2a_endpoint: endpoint {
- remote-endpoint = <&sgtl5000_tx_endpoint>;
- format = "i2s";
- mclk-fs = <512>;
- dai-tdm-slot-num = <2>;
- dai-tdm-slot-width = <16>;
- };
- };
- };
-
- sai2b: audio-controller@4400b024 {
- dma-names = "rx";
- st,sync = <&sai2a 2>;
- clocks = <&rcc SAI2_K>, <&sai2a>;
- clock-names = "sai_ck", "MCLK";
- status = "okay";
-
- sai2b_port: port {
- sai2b_endpoint: endpoint {
- remote-endpoint = <&sgtl5000_rx_endpoint>;
- format = "i2s";
- mclk-fs = <512>;
- dai-tdm-slot-num = <2>;
- dai-tdm-slot-width = <16>;
- };
- };
- };
-};
-
-&timers2 {
- /* spare dmas for other usage (un-delete to enable pwm capture) */
- /delete-property/dmas;
- /delete-property/dma-names;
- status = "okay";
- pwm2: pwm {
- pinctrl-0 = <&pwm2_pins_a>;
- pinctrl-names = "default";
- status = "okay";
- };
- timer@1 {
- status = "okay";
- };
-};
-
-&usart3 {
- pinctrl-names = "default";
- pinctrl-0 = <&usart3_pins_a>;
- status = "okay";
-};
-
-&uart8 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart8_pins_a>;
- status = "okay";
-};
-
-&usbh_ehci {
- phys = <&usbphyc_port0>;
- status = "okay";
-};
-
-&usbotg_hs {
- dr_mode = "peripheral";
- phys = <&usbphyc_port1 0>;
- phy-names = "usb2-phy";
- status = "okay";
-};
-
-&usbphyc {
- status = "okay";
-};
-
-&usbphyc_port0 {
- phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
-};
-
-&usbphyc_port1 {
- phy-supply = <&vdd_usb>;
- vdda1v1-supply = <&reg11>;
- vdda1v8-supply = <&reg18>;
-};
diff --git a/arch/arm/boot/dts/stm32mp157c-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp157c-dhcom-som.dtsi
deleted file mode 100644
index f81dc3134135..000000000000
--- a/arch/arm/boot/dts/stm32mp157c-dhcom-som.dtsi
+++ /dev/null
@@ -1,368 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/*
- * Copyright (C) 2019 Marek Vasut <marex@denx.de>
- */
-/dts-v1/;
-
-#include "stm32mp157.dtsi"
-#include "stm32mp15xc.dtsi"
-#include "stm32mp15-pinctrl.dtsi"
-#include "stm32mp15xxaa-pinctrl.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/mfd/st,stpmic1.h>
-
-/ {
- memory@c0000000 {
- device_type = "memory";
- reg = <0xC0000000 0x40000000>;
- };
-
- reserved-memory {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- mcuram2: mcuram2@10000000 {
- compatible = "shared-dma-pool";
- reg = <0x10000000 0x40000>;
- no-map;
- };
-
- vdev0vring0: vdev0vring0@10040000 {
- compatible = "shared-dma-pool";
- reg = <0x10040000 0x1000>;
- no-map;
- };
-
- vdev0vring1: vdev0vring1@10041000 {
- compatible = "shared-dma-pool";
- reg = <0x10041000 0x1000>;
- no-map;
- };
-
- vdev0buffer: vdev0buffer@10042000 {
- compatible = "shared-dma-pool";
- reg = <0x10042000 0x4000>;
- no-map;
- };
-
- mcuram: mcuram@30000000 {
- compatible = "shared-dma-pool";
- reg = <0x30000000 0x40000>;
- no-map;
- };
-
- retram: retram@38000000 {
- compatible = "shared-dma-pool";
- reg = <0x38000000 0x10000>;
- no-map;
- };
- };
-};
-
-&adc {
- vdd-supply = <&vdd>;
- vdda-supply = <&vdda>;
- vref-supply = <&vdda>;
- status = "okay";
-
- adc1: adc@0 {
- st,min-sample-time-nsecs = <5000>;
- st,adc-channels = <0>;
- status = "okay";
- };
-
- adc2: adc@100 {
- st,adc-channels = <1>;
- st,min-sample-time-nsecs = <5000>;
- status = "okay";
- };
-};
-
-&dac {
- pinctrl-names = "default";
- pinctrl-0 = <&dac_ch1_pins_a &dac_ch2_pins_a>;
- vref-supply = <&vdda>;
- status = "okay";
-
- dac1: dac@1 {
- status = "okay";
- };
- dac2: dac@2 {
- status = "okay";
- };
-};
-
-&dts {
- status = "okay";
-};
-
-&gpu {
- status = "okay";
-};
-
-&i2c4 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c4_pins_a>;
- i2c-scl-rising-time-ns = <185>;
- i2c-scl-falling-time-ns = <20>;
- status = "okay";
- /* spare dmas for other usage */
- /delete-property/dmas;
- /delete-property/dma-names;
-
- rtc@32 {
- compatible = "microcrystal,rv8803";
- reg = <0x32>;
- };
-
- pmic: stpmic@33 {
- compatible = "st,stpmic1";
- reg = <0x33>;
- interrupts-extended = <&gpioa 0 IRQ_TYPE_EDGE_FALLING>;
- interrupt-controller;
- #interrupt-cells = <2>;
- status = "okay";
-
- regulators {
- compatible = "st,stpmic1-regulators";
- ldo1-supply = <&v3v3>;
- ldo2-supply = <&v3v3>;
- ldo3-supply = <&vdd_ddr>;
- ldo5-supply = <&v3v3>;
- ldo6-supply = <&v3v3>;
- pwr_sw1-supply = <&bst_out>;
- pwr_sw2-supply = <&bst_out>;
-
- vddcore: buck1 {
- regulator-name = "vddcore";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1350000>;
- regulator-always-on;
- regulator-initial-mode = <0>;
- regulator-over-current-protection;
- };
-
- vdd_ddr: buck2 {
- regulator-name = "vdd_ddr";
- regulator-min-microvolt = <1350000>;
- regulator-max-microvolt = <1350000>;
- regulator-always-on;
- regulator-initial-mode = <0>;
- regulator-over-current-protection;
- };
-
- vdd: buck3 {
- regulator-name = "vdd";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- st,mask-reset;
- regulator-initial-mode = <0>;
- regulator-over-current-protection;
- };
-
- v3v3: buck4 {
- regulator-name = "v3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-over-current-protection;
- regulator-initial-mode = <0>;
- };
-
- vdda: ldo1 {
- regulator-name = "vdda";
- regulator-min-microvolt = <2900000>;
- regulator-max-microvolt = <2900000>;
- interrupts = <IT_CURLIM_LDO1 0>;
- };
-
- v2v8: ldo2 {
- regulator-name = "v2v8";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- interrupts = <IT_CURLIM_LDO2 0>;
- };
-
- vtt_ddr: ldo3 {
- regulator-name = "vtt_ddr";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <750000>;
- regulator-always-on;
- regulator-over-current-protection;
- };
-
- vdd_usb: ldo4 {
- regulator-name = "vdd_usb";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- interrupts = <IT_CURLIM_LDO4 0>;
- };
-
- vdd_sd: ldo5 {
- regulator-name = "vdd_sd";
- regulator-min-microvolt = <2900000>;
- regulator-max-microvolt = <2900000>;
- interrupts = <IT_CURLIM_LDO5 0>;
- regulator-boot-on;
- };
-
- v1v8: ldo6 {
- regulator-name = "v1v8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- interrupts = <IT_CURLIM_LDO6 0>;
- };
-
- vref_ddr: vref_ddr {
- regulator-name = "vref_ddr";
- regulator-always-on;
- regulator-over-current-protection;
- };
-
- bst_out: boost {
- regulator-name = "bst_out";
- interrupts = <IT_OCP_BOOST 0>;
- };
-
- vbus_otg: pwr_sw1 {
- regulator-name = "vbus_otg";
- interrupts = <IT_OCP_OTG 0>;
- };
-
- vbus_sw: pwr_sw2 {
- regulator-name = "vbus_sw";
- interrupts = <IT_OCP_SWOUT 0>;
- regulator-active-discharge;
- };
- };
-
- onkey {
- compatible = "st,stpmic1-onkey";
- interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 0>;
- interrupt-names = "onkey-falling", "onkey-rising";
- power-off-time-sec = <10>;
- status = "okay";
- };
-
- watchdog {
- compatible = "st,stpmic1-wdt";
- status = "disabled";
- };
- };
-
- touchscreen@49 {
- compatible = "ti,tsc2004";
- reg = <0x49>;
- vio-supply = <&v3v3>;
- interrupts-extended = <&gpioh 3 IRQ_TYPE_EDGE_FALLING>;
- };
-
- eeprom@50 {
- compatible = "atmel,24c02";
- reg = <0x50>;
- pagesize = <16>;
- };
-};
-
-&ipcc {
- status = "okay";
-};
-
-&iwdg2 {
- timeout-sec = <32>;
- status = "okay";
-};
-
-&m4_rproc {
- memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
- <&vdev0vring1>, <&vdev0buffer>;
- mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
- mbox-names = "vq0", "vq1", "shutdown";
- interrupt-parent = <&exti>;
- interrupts = <68 1>;
- status = "okay";
-};
-
-&pwr_regulators {
- vdd-supply = <&vdd>;
- vdd_3v3_usbfs-supply = <&vdd_usb>;
-};
-
-&qspi {
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a &qspi_bk2_pins_a>;
- pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a &qspi_bk2_sleep_pins_a>;
- reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "okay";
-
- flash0: mx66l51235l@0 {
- compatible = "jedec,spi-nor";
- reg = <0>;
- spi-rx-bus-width = <4>;
- spi-max-frequency = <108000000>;
- #address-cells = <1>;
- #size-cells = <1>;
- };
-};
-
-&rng1 {
- status = "okay";
-};
-
-&rtc {
- status = "okay";
-};
-
-&sdmmc1 {
- pinctrl-names = "default", "opendrain", "sleep";
- pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
- pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
- pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
- broken-cd;
- st,sig-dir;
- st,neg-edge;
- st,use-ckin;
- bus-width = <4>;
- vmmc-supply = <&vdd_sd>;
- status = "okay";
-};
-
-&sdmmc2 {
- pinctrl-names = "default", "opendrain", "sleep";
- pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_a>;
- pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_a>;
- pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_a>;
- non-removable;
- no-sd;
- no-sdio;
- st,neg-edge;
- bus-width = <8>;
- vmmc-supply = <&v3v3>;
- vqmmc-supply = <&v3v3>;
- mmc-ddr-3_3v;
- status = "okay";
-};
-
-&sdmmc3 {
- pinctrl-names = "default", "opendrain", "sleep";
- pinctrl-0 = <&sdmmc3_b4_pins_a>;
- pinctrl-1 = <&sdmmc3_b4_od_pins_a>;
- pinctrl-2 = <&sdmmc3_b4_sleep_pins_a>;
- broken-cd;
- st,neg-edge;
- bus-width = <4>;
- vmmc-supply = <&v3v3>;
- vqmmc-supply = <&v3v3>;
- mmc-ddr-3_3v;
- status = "okay";
-};
-
-&uart4 {
- pinctrl-names = "default";
- pinctrl-0 = <&uart4_pins_a>;
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/stm32mp157c-dk2.dts b/arch/arm/boot/dts/stm32mp157c-dk2.dts
index 7985b80967ca..9a8a26710ac1 100644
--- a/arch/arm/boot/dts/stm32mp157c-dk2.dts
+++ b/arch/arm/boot/dts/stm32mp157c-dk2.dts
@@ -27,15 +27,10 @@
};
&dsi {
- #address-cells = <1>;
- #size-cells = <0>;
status = "okay";
phy-dsi-supply = <&reg18>;
ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
port@0 {
reg = <0>;
dsi_in: endpoint {
@@ -83,9 +78,6 @@
status = "okay";
port {
- #address-cells = <1>;
- #size-cells = <0>;
-
ltdc_ep1_out: endpoint@1 {
reg = <1>;
remote-endpoint = <&dsi_in>;
diff --git a/arch/arm/boot/dts/stm32mp157c-ed1.dts b/arch/arm/boot/dts/stm32mp157c-ed1.dts
index 9d2592db630c..32ccd50b4144 100644
--- a/arch/arm/boot/dts/stm32mp157c-ed1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ed1.dts
@@ -126,13 +126,12 @@
&gpu {
contiguous-area = <&gpu_reserved>;
- status = "okay";
};
&i2c4 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c4_pins_a>;
- pinctrl-1 = <&i2c4_pins_sleep_a>;
+ pinctrl-1 = <&i2c4_sleep_pins_a>;
i2c-scl-rising-time-ns = <185>;
i2c-scl-falling-time-ns = <20>;
clock-frequency = <400000>;
@@ -320,6 +319,10 @@
bus-width = <4>;
vmmc-supply = <&vdd_sd>;
vqmmc-supply = <&sd_switch>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-ddr50;
status = "okay";
};
diff --git a/arch/arm/boot/dts/stm32mp157c-ev1.dts b/arch/arm/boot/dts/stm32mp157c-ev1.dts
index 8a4c7ff31a92..b19056557ef0 100644
--- a/arch/arm/boot/dts/stm32mp157c-ev1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ev1.dts
@@ -98,15 +98,10 @@
};
&dsi {
- #address-cells = <1>;
- #size-cells = <0>;
phy-dsi-supply = <&reg18>;
status = "okay";
ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
port@0 {
reg = <0>;
dsi_in: endpoint {
@@ -141,7 +136,7 @@
&ethernet0 {
status = "okay";
pinctrl-0 = <&ethernet0_rgmii_pins_a>;
- pinctrl-1 = <&ethernet0_rgmii_pins_sleep_a>;
+ pinctrl-1 = <&ethernet0_rgmii_sleep_pins_a>;
pinctrl-names = "default", "sleep";
phy-mode = "rgmii-id";
max-speed = <1000>;
@@ -176,7 +171,7 @@
&i2c2 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c2_pins_a>;
- pinctrl-1 = <&i2c2_pins_sleep_a>;
+ pinctrl-1 = <&i2c2_sleep_pins_a>;
i2c-scl-rising-time-ns = <185>;
i2c-scl-falling-time-ns = <20>;
status = "okay";
@@ -230,7 +225,7 @@
&i2c5 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c5_pins_a>;
- pinctrl-1 = <&i2c5_pins_sleep_a>;
+ pinctrl-1 = <&i2c5_sleep_pins_a>;
i2c-scl-rising-time-ns = <185>;
i2c-scl-falling-time-ns = <20>;
status = "okay";
@@ -240,9 +235,6 @@
status = "okay";
port {
- #address-cells = <1>;
- #size-cells = <0>;
-
ltdc_ep0_out: endpoint@0 {
reg = <0>;
remote-endpoint = <&dsi_in>;
diff --git a/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
new file mode 100644
index 000000000000..5700e6b700d3
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157c-lxa-mc1.dts
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2020 STMicroelectronics - All Rights Reserved
+ * Copyright (C) 2020 Ahmad Fatoum, Pengutronix
+ */
+
+/dts-v1/;
+
+#include "stm32mp157.dtsi"
+#include "stm32mp15xx-osd32.dtsi"
+#include "stm32mp15xxac-pinctrl.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+ model = "Linux Automation MC-1 board";
+ compatible = "lxa,stm32mp157c-mc1", "st,stm32mp157";
+
+ aliases {
+ ethernet0 = &ethernet0;
+ mmc0 = &sdmmc1;
+ mmc1 = &sdmmc2;
+ serial0 = &uart4;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&backlight_pwm 1 100000 PWM_POLARITY_INVERTED>;
+ brightness-levels = <0 31 63 95 127 159 191 223 255>;
+ default-brightness-level = <7>;
+ power-supply = <&reg_5v2>; /* 3V3_BACKLIGHT */
+ };
+
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ led-act {
+ compatible = "gpio-leds";
+
+ led-green {
+ label = "mc1:green:act";
+ gpios = <&gpioa 13 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ led-rgb {
+ compatible = "pwm-leds";
+
+ led-red {
+ label = "mc1:red:rgb";
+ pwms = <&leds_pwm 1 1000000 0>;
+ max-brightness = <255>;
+ active-low;
+ };
+
+ led-green {
+ label = "mc1:green:rgb";
+ pwms = <&leds_pwm 2 1000000 0>;
+ max-brightness = <255>;
+ active-low;
+ };
+
+ led-blue {
+ label = "mc1:blue:rgb";
+ pwms = <&leds_pwm 3 1000000 0>;
+ max-brightness = <255>;
+ active-low;
+ };
+ };
+
+ panel: panel {
+ compatible = "edt,etm0700g0edh6", "simple-panel";
+ backlight = <&backlight>;
+ enable-gpios = <&gpiod 4 GPIO_ACTIVE_HIGH>;
+ power-supply = <&reg_3v3>;
+
+ port {
+ panel_input: endpoint {
+ remote-endpoint = <&ltdc_ep0_out>;
+ };
+ };
+ };
+
+ reg_3v3: regulator_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ vin-supply = <&v3v3>;
+ };
+
+ /* supplied by either debug board or PoE */
+ reg_5v2: regulator_5v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "5V2";
+ regulator-min-microvolt = <5200000>;
+ regulator-max-microvolt = <5200000>;
+ regulator-always-on;
+ };
+};
+
+&ethernet0 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&ethernet0_rgmii_pins_b>;
+ pinctrl-1 = <&ethernet0_rgmii_sleep_pins_b>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy>;
+ status = "okay";
+
+ mdio0 {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy: ethernet-phy@3 { /* KSZ9031RN */
+ reg = <3>;
+ reset-gpios = <&gpiog 0 GPIO_ACTIVE_LOW>; /* ETH_RST# */
+ interrupt-parent = <&gpioa>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* ETH_MDINT# */
+ rxc-skew-ps = <1860>;
+ txc-skew-ps = <1860>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ micrel,force-master;
+ };
+ };
+};
+
+&gpioz {
+ gpio-line-names = "HWID0", "HWID1", "HWID2", "HWID3", "", "",
+ "HWID4", "HWID5";
+};
+
+&i2c5 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c5_pins_b>;
+ pinctrl-1 = <&i2c5_sleep_pins_b>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ touchscreen@38 {
+ compatible = "edt,edt-ft5x06";
+ interrupt-parent = <&gpiod>;
+ interrupts = <11 IRQ_TYPE_EDGE_FALLING>; /* TOUCH_INT# */
+ vcc-supply = <&reg_3v3>;
+ reg = <0x38>;
+ reset-gpios = <&gpiof 8 GPIO_ACTIVE_LOW>; /* TOUCH_RESET# */
+ touchscreen-size-x = <1792>;
+ touchscreen-size-y = <1024>;
+ wakeup-source;
+ };
+};
+
+&ltdc {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&ltdc_pins_c>;
+ pinctrl-1 = <&ltdc_sleep_pins_c>;
+ status = "okay";
+
+ port {
+ ltdc_ep0_out: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&panel_input>;
+ };
+ };
+};
+
+&pmic {
+ regulators {
+ buck4-supply = <&reg_5v2>; /* VIN */
+ ldo2-supply = <&reg_5v2>; /* PMIC_LDO25IN */
+ ldo5-supply = <&reg_5v2>; /* PMIC_LDO25IN */
+ boost-supply = <&reg_5v2>; /* PMIC_BSTIN */
+ pwr_sw2-supply = <&bst_out>; /* PMIC_SWIN */
+ };
+};
+
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ bus-width = <4>;
+ cd-gpios = <&gpioh 3 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ no-1-8-v;
+ st,neg-edge;
+ vmmc-supply = <&reg_3v3>;
+ status = "okay";
+};
+
+&sdmmc1_b4_pins_a {
+ /*
+ * board lacks external pull-ups on SDMMC lines. Class 10 SD refuses to
+ * work, thus enable internal pull-ups.
+ */
+ pins1 {
+ /delete-property/ bias-disable;
+ bias-pull-up;
+ };
+ pins2 {
+ /delete-property/ bias-disable;
+ bias-pull-up;
+ };
+};
+
+&sdmmc2 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_b>;
+ pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_b>;
+ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_b>;
+ bus-width = <8>;
+ no-1-8-v;
+ no-sd;
+ no-sdio;
+ non-removable;
+ st,neg-edge;
+ vmmc-supply = <&reg_3v3>;
+ status = "okay";
+};
+
+&timers3 {
+ status = "okay";
+
+ backlight_pwm: pwm {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pwm3_pins_b>;
+ pinctrl-1 = <&pwm3_sleep_pins_b>;
+ status = "okay";
+ };
+};
+
+&timers5 {
+ status = "okay";
+
+ leds_pwm: pwm {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pwm5_pins_b>;
+ pinctrl-1 = <&pwm5_sleep_pins_b>;
+ status = "okay";
+ };
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pins_a>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
new file mode 100644
index 000000000000..7c4bd615b311
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-pdk2.dtsi
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (C) 2019-2020 Marek Vasut <marex@denx.de>
+ */
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+ aliases {
+ serial0 = &uart4;
+ serial1 = &usart3;
+ serial2 = &uart8;
+ ethernet0 = &ethernet0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ clk_ext_audio_codec: clock-codec {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+
+ display_bl: display-bl {
+ compatible = "pwm-backlight";
+ pwms = <&pwm2 0 500000 PWM_POLARITY_INVERTED>;
+ brightness-levels = <0 16 22 30 40 55 75 102 138 188 255>;
+ default-brightness-level = <8>;
+ enable-gpios = <&gpioi 0 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ };
+
+ ethernet_vio: vioregulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vio";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpiog 3 GPIO_ACTIVE_LOW>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #size-cells = <0>;
+ poll-interval = <20>;
+
+ /*
+ * The EXTi IRQ line 3 is shared with touchscreen and ethernet,
+ * so mark this as polled GPIO key.
+ */
+ button-0 {
+ label = "TA1-GPIO-A";
+ linux,code = <KEY_A>;
+ gpios = <&gpiof 3 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #size-cells = <0>;
+
+ button-1 {
+ label = "TA2-GPIO-B";
+ linux,code = <KEY_B>;
+ gpios = <&gpiod 6 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+
+ button-2 {
+ label = "TA3-GPIO-C";
+ linux,code = <KEY_C>;
+ gpios = <&gpioi 11 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+
+ button-3 {
+ label = "TA4-GPIO-D";
+ linux,code = <KEY_D>;
+ gpios = <&gpiod 12 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+ };
+
+ led {
+ compatible = "gpio-leds";
+
+ led-0 {
+ label = "green:led5";
+ gpios = <&gpiog 2 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-1 {
+ label = "green:led6";
+ gpios = <&gpiod 11 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-2 {
+ label = "green:led7";
+ gpios = <&gpioi 2 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-3 {
+ label = "green:led8";
+ gpios = <&gpioi 3 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+
+ panel {
+ compatible = "edt,etm0700g0edh6";
+ backlight = <&display_bl>;
+
+ port {
+ lcd_panel_in: endpoint {
+ remote-endpoint = <&lcd_display_out>;
+ };
+ };
+ };
+
+ sound {
+ compatible = "audio-graph-card";
+ routing =
+ "MIC_IN", "Capture",
+ "Capture", "Mic Bias",
+ "Playback", "HP_OUT";
+ dais = <&sai2a_port &sai2b_port>;
+ status = "okay";
+ };
+};
+
+&cec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&cec_pins_a>;
+ status = "okay";
+};
+
+&ethernet0 {
+ status = "okay";
+ pinctrl-0 = <&ethernet0_rmii_pins_a>;
+ pinctrl-1 = <&ethernet0_rmii_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ phy-mode = "rmii";
+ max-speed = <100>;
+ phy-handle = <&phy0>;
+ st,eth-ref-clk-sel;
+ phy-reset-gpios = <&gpioh 15 GPIO_ACTIVE_LOW>;
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ phy0: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
+};
+
+&i2c2 { /* Header X22 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins_a>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+};
+
+&i2c5 { /* Header X21 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_pins_a>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ sgtl5000: codec@a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ #sound-dai-cells = <0>;
+ clocks = <&clk_ext_audio_codec>;
+ VDDA-supply = <&v3v3>;
+ VDDIO-supply = <&vdd>;
+
+ sgtl5000_port: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sgtl5000_tx_endpoint: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&sai2a_endpoint>;
+ frame-master;
+ bitclock-master;
+ };
+
+ sgtl5000_rx_endpoint: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&sai2b_endpoint>;
+ frame-master;
+ bitclock-master;
+ };
+ };
+
+ };
+
+ polytouch@38 {
+ compatible = "edt,edt-ft5x06";
+ reg = <0x38>;
+ interrupt-parent = <&gpiog>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO E */
+ linux,wakeup;
+ };
+};
+
+&ltdc {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&ltdc_pins_b>;
+ pinctrl-1 = <&ltdc_sleep_pins_b>;
+ status = "okay";
+
+ port {
+ lcd_display_out: endpoint {
+ remote-endpoint = <&lcd_panel_in>;
+ };
+ };
+};
+
+&sai2 {
+ clocks = <&rcc SAI2>, <&rcc PLL3_Q>, <&rcc PLL3_R>;
+ clock-names = "pclk", "x8k", "x11k";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sai2a_pins_b &sai2b_pins_b>;
+ pinctrl-1 = <&sai2a_sleep_pins_b &sai2b_sleep_pins_b>;
+ status = "okay";
+
+ sai2a: audio-controller@4400b004 {
+ #clock-cells = <0>;
+ dma-names = "tx";
+ clocks = <&rcc SAI2_K>;
+ clock-names = "sai_ck";
+ status = "okay";
+
+ sai2a_port: port {
+ sai2a_endpoint: endpoint {
+ remote-endpoint = <&sgtl5000_tx_endpoint>;
+ format = "i2s";
+ mclk-fs = <512>;
+ dai-tdm-slot-num = <2>;
+ dai-tdm-slot-width = <16>;
+ };
+ };
+ };
+
+ sai2b: audio-controller@4400b024 {
+ dma-names = "rx";
+ st,sync = <&sai2a 2>;
+ clocks = <&rcc SAI2_K>, <&sai2a>;
+ clock-names = "sai_ck", "MCLK";
+ status = "okay";
+
+ sai2b_port: port {
+ sai2b_endpoint: endpoint {
+ remote-endpoint = <&sgtl5000_rx_endpoint>;
+ format = "i2s";
+ mclk-fs = <512>;
+ dai-tdm-slot-num = <2>;
+ dai-tdm-slot-width = <16>;
+ };
+ };
+ };
+};
+
+&timers2 {
+ /* spare dmas for other usage (un-delete to enable pwm capture) */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+ pwm2: pwm {
+ pinctrl-0 = <&pwm2_pins_a>;
+ pinctrl-names = "default";
+ status = "okay";
+ };
+ timer@1 {
+ status = "okay";
+ };
+};
+
+&usart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usart3_pins_a>;
+ status = "okay";
+};
+
+&uart8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart8_pins_a>;
+ status = "okay";
+};
+
+&usbh_ehci {
+ phys = <&usbphyc_port0>;
+ status = "okay";
+};
+
+&usbotg_hs {
+ dr_mode = "peripheral";
+ phys = <&usbphyc_port1 0>;
+ phy-names = "usb2-phy";
+ status = "okay";
+};
+
+&usbphyc {
+ status = "okay";
+};
+
+&usbphyc_port0 {
+ phy-supply = <&vdd_usb>;
+ vdda1v1-supply = <&reg11>;
+ vdda1v8-supply = <&reg18>;
+};
+
+&usbphyc_port1 {
+ phy-supply = <&vdd_usb>;
+ vdda1v1-supply = <&reg11>;
+ vdda1v8-supply = <&reg18>;
+};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
new file mode 100644
index 000000000000..ba905196fb54
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) 2019-2020 Marek Vasut <marex@denx.de>
+ */
+
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxaa-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/mfd/st,stpmic1.h>
+
+/ {
+ memory@c0000000 {
+ device_type = "memory";
+ reg = <0xC0000000 0x40000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mcuram2: mcuram2@10000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10000000 0x40000>;
+ no-map;
+ };
+
+ vdev0vring0: vdev0vring0@10040000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10040000 0x1000>;
+ no-map;
+ };
+
+ vdev0vring1: vdev0vring1@10041000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10041000 0x1000>;
+ no-map;
+ };
+
+ vdev0buffer: vdev0buffer@10042000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10042000 0x4000>;
+ no-map;
+ };
+
+ mcuram: mcuram@30000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x30000000 0x40000>;
+ no-map;
+ };
+
+ retram: retram@38000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x38000000 0x10000>;
+ no-map;
+ };
+ };
+};
+
+&adc {
+ vdd-supply = <&vdd>;
+ vdda-supply = <&vdda>;
+ vref-supply = <&vdda>;
+ status = "okay";
+
+ adc1: adc@0 {
+ st,min-sample-time-nsecs = <5000>;
+ st,adc-channels = <0>;
+ status = "okay";
+ };
+
+ adc2: adc@100 {
+ st,adc-channels = <1>;
+ st,min-sample-time-nsecs = <5000>;
+ status = "okay";
+ };
+};
+
+&dac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&dac_ch1_pins_a &dac_ch2_pins_a>;
+ vref-supply = <&vdda>;
+ status = "okay";
+
+ dac1: dac@1 {
+ status = "okay";
+ };
+ dac2: dac@2 {
+ status = "okay";
+ };
+};
+
+&dts {
+ status = "okay";
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins_a>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ rtc@32 {
+ compatible = "microcrystal,rv8803";
+ reg = <0x32>;
+ };
+
+ pmic: stpmic@33 {
+ compatible = "st,stpmic1";
+ reg = <0x33>;
+ interrupts-extended = <&gpioa 0 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "okay";
+
+ regulators {
+ compatible = "st,stpmic1-regulators";
+ ldo1-supply = <&v3v3>;
+ ldo2-supply = <&v3v3>;
+ ldo3-supply = <&vdd_ddr>;
+ ldo5-supply = <&v3v3>;
+ ldo6-supply = <&v3v3>;
+ pwr_sw1-supply = <&bst_out>;
+ pwr_sw2-supply = <&bst_out>;
+
+ vddcore: buck1 {
+ regulator-name = "vddcore";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd_ddr: buck2 {
+ regulator-name = "vdd_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd: buck3 {
+ regulator-name = "vdd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ st,mask-reset;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ v3v3: buck4 {
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ regulator-initial-mode = <0>;
+ };
+
+ vdda: ldo1 {
+ regulator-name = "vdda";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ interrupts = <IT_CURLIM_LDO1 0>;
+ };
+
+ v2v8: ldo2 {
+ regulator-name = "v2v8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ interrupts = <IT_CURLIM_LDO2 0>;
+ };
+
+ vtt_ddr: ldo3 {
+ regulator-name = "vtt_ddr";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <750000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ vdd_usb: ldo4 {
+ regulator-name = "vdd_usb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ interrupts = <IT_CURLIM_LDO4 0>;
+ };
+
+ vdd_sd: ldo5 {
+ regulator-name = "vdd_sd";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ interrupts = <IT_CURLIM_LDO5 0>;
+ regulator-boot-on;
+ };
+
+ v1v8: ldo6 {
+ regulator-name = "v1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ interrupts = <IT_CURLIM_LDO6 0>;
+ };
+
+ vref_ddr: vref_ddr {
+ regulator-name = "vref_ddr";
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ bst_out: boost {
+ regulator-name = "bst_out";
+ interrupts = <IT_OCP_BOOST 0>;
+ };
+
+ vbus_otg: pwr_sw1 {
+ regulator-name = "vbus_otg";
+ interrupts = <IT_OCP_OTG 0>;
+ };
+
+ vbus_sw: pwr_sw2 {
+ regulator-name = "vbus_sw";
+ interrupts = <IT_OCP_SWOUT 0>;
+ regulator-active-discharge;
+ };
+ };
+
+ onkey {
+ compatible = "st,stpmic1-onkey";
+ interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 0>;
+ interrupt-names = "onkey-falling", "onkey-rising";
+ power-off-time-sec = <10>;
+ status = "okay";
+ };
+
+ watchdog {
+ compatible = "st,stpmic1-wdt";
+ status = "disabled";
+ };
+ };
+
+ touchscreen@49 {
+ compatible = "ti,tsc2004";
+ reg = <0x49>;
+ vio-supply = <&v3v3>;
+ interrupts-extended = <&gpioh 3 IRQ_TYPE_EDGE_FALLING>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+};
+
+&ipcc {
+ status = "okay";
+};
+
+&iwdg2 {
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&m4_rproc {
+ memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ <&vdev0vring1>, <&vdev0buffer>;
+ mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+ mbox-names = "vq0", "vq1", "shutdown";
+ interrupt-parent = <&exti>;
+ interrupts = <68 1>;
+ status = "okay";
+};
+
+&pwr_regulators {
+ vdd-supply = <&vdd>;
+ vdd_3v3_usbfs-supply = <&vdd_usb>;
+};
+
+&qspi {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a &qspi_bk2_pins_a>;
+ pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a &qspi_bk2_sleep_pins_a>;
+ reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ flash0: mx66l51235l@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&rng1 {
+ status = "okay";
+};
+
+&rtc {
+ status = "okay";
+};
+
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
+ broken-cd;
+ st,sig-dir;
+ st,neg-edge;
+ st,use-ckin;
+ bus-width = <4>;
+ vmmc-supply = <&vdd_sd>;
+ status = "okay";
+};
+
+&sdmmc2 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_a>;
+ pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_a>;
+ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_a>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ st,neg-edge;
+ bus-width = <8>;
+ vmmc-supply = <&v3v3>;
+ vqmmc-supply = <&v3v3>;
+ mmc-ddr-3_3v;
+ status = "okay";
+};
+
+&sdmmc3 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc3_b4_pins_a>;
+ pinctrl-1 = <&sdmmc3_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc3_b4_sleep_pins_a>;
+ broken-cd;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&v3v3>;
+ vqmmc-supply = <&v3v3>;
+ mmc-ddr-3_3v;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pins_a>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
new file mode 100644
index 000000000000..930202742a3f
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (C) Linaro Ltd 2019 - All Rights Reserved
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ * Copyright (C) 2020 Marek Vasut <marex@denx.de>
+ */
+
+/* Avenger96 uses DHCOR SoM configured for 1V8 IO operation */
+#include "stm32mp15xx-dhcor-io1v8.dtsi"
+
+/ {
+ aliases {
+ ethernet0 = &ethernet0;
+ mmc0 = &sdmmc1;
+ serial0 = &uart4;
+ serial1 = &uart7;
+ serial2 = &usart2;
+ spi0 = &qspi;
+ };
+
+ /* XTal Q1 */
+ cec_clock: clk-cec-fixed {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con: endpoint {
+ remote-endpoint = <&adv7513_out>;
+ };
+ };
+ };
+
+ led {
+ compatible = "gpio-leds";
+ led1 {
+ label = "green:user0";
+ gpios = <&gpioz 7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ led2 {
+ label = "green:user1";
+ gpios = <&gpiof 3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ default-state = "off";
+ };
+
+ led3 {
+ label = "green:user2";
+ gpios = <&gpiog 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc1";
+ default-state = "off";
+ };
+
+ led4 {
+ label = "green:user3";
+ gpios = <&gpiog 1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "none";
+ default-state = "off";
+ panic-indicator;
+ };
+ };
+
+ sd_switch: regulator-sd_switch {
+ compatible = "regulator-gpio";
+ regulator-name = "sd_switch";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-type = "voltage";
+ regulator-always-on;
+
+ gpios = <&gpioi 5 GPIO_ACTIVE_HIGH>;
+ gpios-states = <0>;
+ states = <1800000 0x1>,
+ <2900000 0x0>;
+ };
+
+ sound {
+ compatible = "audio-graph-card";
+ label = "STM32MP1-AV96-HDMI";
+ dais = <&sai2a_port>;
+ status = "okay";
+ };
+
+ wlan_pwr: regulator-wlan {
+ compatible = "regulator-fixed";
+
+ regulator-name = "wl-reg";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpios = <&gpioz 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&adc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&adc12_ain_pins_b>;
+ vdd-supply = <&vdd>;
+ vdda-supply = <&vdda>;
+ vref-supply = <&vdda>;
+ status = "okay";
+
+ adc1: adc@0 {
+ st,adc-channels = <0 1 6>;
+ st,min-sample-time-nsecs = <5000>;
+ status = "okay";
+ };
+
+ adc2: adc@100 {
+ st,adc-channels = <0 1 2>;
+ st,min-sample-time-nsecs = <5000>;
+ status = "okay";
+ };
+};
+
+&ethernet0 {
+ status = "okay";
+ pinctrl-0 = <&ethernet0_rgmii_pins_c>;
+ pinctrl-1 = <&ethernet0_rgmii_sleep_pins_c>;
+ pinctrl-names = "default", "sleep";
+ phy-mode = "rgmii";
+ max-speed = <1000>;
+ phy-handle = <&phy0>;
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ reset-gpios = <&gpioz 2 GPIO_ACTIVE_LOW>;
+ reset-delay-us = <1000>;
+
+ phy0: ethernet-phy@7 {
+ reg = <7>;
+
+ rxc-skew-ps = <1500>;
+ rxdv-skew-ps = <540>;
+ rxd0-skew-ps = <420>;
+ rxd1-skew-ps = <420>;
+ rxd2-skew-ps = <420>;
+ rxd3-skew-ps = <420>;
+
+ txc-skew-ps = <1440>;
+ txen-skew-ps = <540>;
+ txd0-skew-ps = <420>;
+ txd1-skew-ps = <420>;
+ txd2-skew-ps = <420>;
+ txd3-skew-ps = <420>;
+ };
+ };
+};
+
+&i2c1 { /* X6 I2C1 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins_b>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+};
+
+&i2c2 { /* X6 I2C2 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins_c>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+};
+
+&i2c4 {
+ hdmi-transmitter@3d {
+ compatible = "adi,adv7513";
+ reg = <0x3d>, <0x2d>, <0x4d>, <0x5d>;
+ reg-names = "main", "cec", "edid", "packet";
+ clocks = <&cec_clock>;
+ clock-names = "cec";
+
+ avdd-supply = <&v3v3>;
+ dvdd-supply = <&v3v3>;
+ pvdd-supply = <&v3v3>;
+ dvdd-3v-supply = <&v3v3>;
+ bgvdd-supply = <&v3v3>;
+
+ interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-parent = <&gpiog>;
+
+ status = "okay";
+
+ adi,input-depth = <8>;
+ adi,input-colorspace = "rgb";
+ adi,input-clock = "1x";
+ adi,input-style = <1>;
+ adi,input-justification = "evenly";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7513_in: endpoint {
+ remote-endpoint = <&ltdc_ep0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7513_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ adv7513_i2s0: endpoint {
+ remote-endpoint = <&sai2a_endpoint>;
+ };
+ };
+ };
+ };
+};
+
+&ltdc {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&ltdc_pins_d>;
+ pinctrl-1 = <&ltdc_sleep_pins_d>;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ltdc_ep0_out: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&adv7513_in>;
+ };
+ };
+};
+
+&sai2 {
+ clocks = <&rcc SAI2>, <&rcc PLL3_Q>, <&rcc PLL3_R>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sai2a_pins_c>;
+ pinctrl-1 = <&sai2a_sleep_pins_c>;
+ clock-names = "pclk", "x8k", "x11k";
+ status = "okay";
+
+ sai2a: audio-controller@4400b004 {
+ #clock-cells = <0>;
+ dma-names = "tx";
+ clocks = <&rcc SAI2_K>;
+ clock-names = "sai_ck";
+ status = "okay";
+
+ sai2a_port: port {
+ sai2a_endpoint: endpoint {
+ remote-endpoint = <&adv7513_i2s0>;
+ format = "i2s";
+ mclk-fs = <256>;
+ };
+ };
+ };
+};
+
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_b>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_b>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_b>;
+ cd-gpios = <&gpioi 8 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ disable-wp;
+ st,sig-dir;
+ st,neg-edge;
+ st,use-ckin;
+ bus-width = <4>;
+ vmmc-supply = <&vdd_sd>;
+ vqmmc-supply = <&sd_switch>;
+ status = "okay";
+};
+
+&sdmmc2 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_b>;
+ pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_b>;
+ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_b>;
+ bus-width = <8>;
+ mmc-ddr-1_8v;
+ no-sd;
+ no-sdio;
+ non-removable;
+ st,neg-edge;
+ vmmc-supply = <&v3v3>;
+ vqmmc-supply = <&vdd_io>;
+ status = "okay";
+};
+
+&sdmmc3 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc3_b4_pins_b>;
+ pinctrl-1 = <&sdmmc3_b4_od_pins_b>;
+ pinctrl-2 = <&sdmmc3_b4_sleep_pins_b>;
+ broken-cd;
+ non-removable;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&wlan_pwr>;
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ brcmf: bcrmf@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ };
+};
+
+&spi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pins_a>;
+ cs-gpios = <&gpioi 0 0>;
+ status = "disabled";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+};
+
+&uart4 {
+ /* On Low speed expansion header */
+ label = "LS-UART1";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pins_b>;
+ status = "okay";
+};
+
+&uart7 {
+ /* On Low speed expansion header */
+ label = "LS-UART0";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart7_pins_a>;
+ status = "okay";
+};
+
+/* Bluetooth */
+&usart2 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&usart2_pins_a>;
+ pinctrl-1 = <&usart2_sleep_pins_a>;
+ st,hw-flow-ctrl;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ max-speed = <3000000>;
+ shutdown-gpios = <&gpioz 6 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&usbh_ehci {
+ phys = <&usbphyc_port0>;
+ phy-names = "usb";
+ status = "okay";
+};
+
+&usbotg_hs {
+ pinctrl-0 = <&usbotg_hs_pins_a>;
+ pinctrl-names = "default";
+ phy-names = "usb2-phy";
+ phys = <&usbphyc_port1 0>;
+ status = "okay";
+ vbus-supply = <&vbus_otg>;
+};
+
+&usbphyc {
+ status = "okay";
+};
+
+&usbphyc_port0 {
+ phy-supply = <&vdd_usb>;
+ vdda1v1-supply = <&reg11>;
+ vdda1v8-supply = <&reg18>;
+};
+
+&usbphyc_port1 {
+ phy-supply = <&vdd_usb>;
+ vdda1v1-supply = <&reg11>;
+ vdda1v8-supply = <&reg18>;
+};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-io1v8.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-io1v8.dtsi
new file mode 100644
index 000000000000..75172314d7af
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-io1v8.dtsi
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (C) Linaro Ltd 2019 - All Rights Reserved
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ * Copyright (C) 2020 Marek Vasut <marex@denx.de>
+ */
+
+/ {
+ /* Enpirion EP3A8LQI U2 on the DHCOR */
+ vdd_io: regulator-buck-io {
+ compatible = "regulator-fixed";
+ regulator-name = "buck-io";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vdd>;
+ };
+};
+
+&pwr_regulators {
+ vdd-supply = <&vdd_io>;
+};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
new file mode 100644
index 000000000000..04fbb324a541
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (C) Linaro Ltd 2019 - All Rights Reserved
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ * Copyright (C) 2020 Marek Vasut <marex@denx.de>
+ */
+
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxac-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/mfd/st,stpmic1.h>
+
+/ {
+ aliases {
+ spi0 = &qspi;
+ };
+
+ memory@c0000000 {
+ device_type = "memory";
+ reg = <0xc0000000 0x40000000>;
+ };
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins_a>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ pmic: stpmic@33 {
+ compatible = "st,stpmic1";
+ reg = <0x33>;
+ interrupts-extended = <&gpioa 0 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "okay";
+
+ regulators {
+ compatible = "st,stpmic1-regulators";
+
+ ldo1-supply = <&v3v3>;
+ ldo2-supply = <&v3v3>;
+ ldo3-supply = <&vdd_ddr>;
+ ldo5-supply = <&v3v3>;
+ ldo6-supply = <&v3v3>;
+ pwr_sw1-supply = <&bst_out>;
+ pwr_sw2-supply = <&bst_out>;
+
+ vddcore: buck1 {
+ regulator-name = "vddcore";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd_ddr: buck2 {
+ regulator-name = "vdd_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd: buck3 {
+ regulator-name = "vdd";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ v3v3: buck4 {
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ regulator-initial-mode = <0>;
+ };
+
+ vdda: ldo1 {
+ regulator-name = "vdda";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ interrupts = <IT_CURLIM_LDO1 0>;
+ };
+
+ v2v8: ldo2 {
+ regulator-name = "v2v8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ interrupts = <IT_CURLIM_LDO2 0>;
+ };
+
+ vtt_ddr: ldo3 {
+ regulator-name = "vtt_ddr";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <750000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ vdd_usb: ldo4 {
+ regulator-name = "vdd_usb";
+ interrupts = <IT_CURLIM_LDO4 0>;
+ };
+
+ vdd_sd: ldo5 {
+ regulator-name = "vdd_sd";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ interrupts = <IT_CURLIM_LDO5 0>;
+ regulator-boot-on;
+ };
+
+ v1v8: ldo6 {
+ regulator-name = "v1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ interrupts = <IT_CURLIM_LDO6 0>;
+ regulator-enable-ramp-delay = <300000>;
+ };
+
+ vref_ddr: vref_ddr {
+ regulator-name = "vref_ddr";
+ regulator-always-on;
+ };
+
+ bst_out: boost {
+ regulator-name = "bst_out";
+ interrupts = <IT_OCP_BOOST 0>;
+ };
+
+ vbus_otg: pwr_sw1 {
+ regulator-name = "vbus_otg";
+ interrupts = <IT_OCP_OTG 0>;
+ regulator-active-discharge = <1>;
+ };
+
+ vbus_sw: pwr_sw2 {
+ regulator-name = "vbus_sw";
+ interrupts = <IT_OCP_SWOUT 0>;
+ regulator-active-discharge = <1>;
+ };
+ };
+
+ onkey {
+ compatible = "st,stpmic1-onkey";
+ interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 1>;
+ interrupt-names = "onkey-falling", "onkey-rising";
+ status = "okay";
+ };
+
+ watchdog {
+ compatible = "st,stpmic1-wdt";
+ status = "disabled";
+ };
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c02";
+ reg = <0x53>;
+ pagesize = <16>;
+ };
+};
+
+&iwdg2 {
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&pwr_regulators {
+ vdd-supply = <&vdd>;
+ vdd_3v3_usbfs-supply = <&vdd_usb>;
+};
+
+&qspi {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
+ pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+ reg = <0x58003000 0x1000>, <0x70000000 0x200000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ flash0: spi-flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&rng1 {
+ status = "okay";
+};
+
+&rtc {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
index d946e0a02f5c..70db923a45f7 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dkx.dtsi
@@ -112,14 +112,18 @@
&cec {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&cec_pins_b>;
- pinctrl-1 = <&cec_pins_sleep_b>;
+ pinctrl-1 = <&cec_sleep_pins_b>;
+ status = "okay";
+};
+
+&dts {
status = "okay";
};
&ethernet0 {
status = "okay";
pinctrl-0 = <&ethernet0_rgmii_pins_a>;
- pinctrl-1 = <&ethernet0_rgmii_pins_sleep_a>;
+ pinctrl-1 = <&ethernet0_rgmii_sleep_pins_a>;
pinctrl-names = "default", "sleep";
phy-mode = "rgmii-id";
max-speed = <1000>;
@@ -137,13 +141,12 @@
&gpu {
contiguous-area = <&gpu_reserved>;
- status = "okay";
};
&i2c1 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c1_pins_a>;
- pinctrl-1 = <&i2c1_pins_sleep_a>;
+ pinctrl-1 = <&i2c1_sleep_pins_a>;
i2c-scl-rising-time-ns = <100>;
i2c-scl-falling-time-ns = <7>;
status = "okay";
@@ -218,7 +221,7 @@
&i2c4 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2c4_pins_a>;
- pinctrl-1 = <&i2c4_pins_sleep_a>;
+ pinctrl-1 = <&i2c4_sleep_pins_a>;
i2c-scl-rising-time-ns = <185>;
i2c-scl-falling-time-ns = <20>;
clock-frequency = <400000>;
@@ -367,7 +370,7 @@
clock-names = "pclk", "i2sclk", "x8k", "x11k";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&i2s2_pins_a>;
- pinctrl-1 = <&i2s2_pins_sleep_a>;
+ pinctrl-1 = <&i2s2_sleep_pins_a>;
status = "okay";
i2s2_port: port {
@@ -391,13 +394,10 @@
&ltdc {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&ltdc_pins_a>;
- pinctrl-1 = <&ltdc_pins_sleep_a>;
+ pinctrl-1 = <&ltdc_sleep_pins_a>;
status = "okay";
port {
- #address-cells = <1>;
- #size-cells = <0>;
-
ltdc_ep0_out: endpoint@0 {
reg = <0>;
remote-endpoint = <&sii9022_in>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-osd32.dtsi b/arch/arm/boot/dts/stm32mp15xx-osd32.dtsi
new file mode 100644
index 000000000000..713485a95795
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp15xx-osd32.dtsi
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2020 STMicroelectronics - All Rights Reserved
+ * Copyright (C) 2020 Ahmad Fatoum, Pengutronix
+ */
+
+#include "stm32mp15-pinctrl.dtsi"
+
+#include <dt-bindings/mfd/st,stpmic1.h>
+
+/ {
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mcuram2: mcuram2@10000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10000000 0x40000>;
+ no-map;
+ };
+
+ vdev0vring0: vdev0vring0@10040000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10040000 0x1000>;
+ no-map;
+ };
+
+ vdev0vring1: vdev0vring1@10041000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10041000 0x1000>;
+ no-map;
+ };
+
+ vdev0buffer: vdev0buffer@10042000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10042000 0x4000>;
+ no-map;
+ };
+
+ mcuram: mcuram@30000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x30000000 0x40000>;
+ no-map;
+ };
+
+ retram: retram@38000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x38000000 0x10000>;
+ no-map;
+ };
+ };
+
+ reg_sip_eeprom: regulator_eeprom {
+ compatible = "regulator-fixed";
+ regulator-name = "sip_eeprom";
+ regulator-always-on;
+ };
+};
+
+&i2c4 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c4_pins_a>;
+ pinctrl-1 = <&i2c4_sleep_pins_a>;
+ clock-frequency = <400000>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+
+ pmic: stpmic@33 {
+ compatible = "st,stpmic1";
+ reg = <0x33>;
+ interrupts-extended = <&gpioa 0 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ regulators {
+ compatible = "st,stpmic1-regulators";
+
+ ldo1-supply = <&v3v3>;
+ ldo6-supply = <&v3v3>;
+ pwr_sw1-supply = <&bst_out>;
+
+ vddcore: buck1 {
+ regulator-name = "vddcore";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd_ddr: buck2 {
+ regulator-name = "vdd_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd: buck3 {
+ regulator-name = "vdd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ st,mask-reset;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ v3v3: buck4 {
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ regulator-initial-mode = <0>;
+ };
+
+ v1v8_audio: ldo1 {
+ regulator-name = "v1v8_audio";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ interrupts = <IT_CURLIM_LDO1 0>;
+
+ };
+
+ v3v3_hdmi: ldo2 {
+ regulator-name = "v3v3_hdmi";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ interrupts = <IT_CURLIM_LDO2 0>;
+
+ };
+
+ vtt_ddr: ldo3 {
+ regulator-name = "vtt_ddr";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <750000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ vdd_usb: ldo4 {
+ regulator-name = "vdd_usb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ interrupts = <IT_CURLIM_LDO4 0>;
+ };
+
+ vdda: ldo5 {
+ regulator-name = "vdda";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ interrupts = <IT_CURLIM_LDO5 0>;
+ regulator-boot-on;
+ };
+
+ v1v2_hdmi: ldo6 {
+ regulator-name = "v1v2_hdmi";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ interrupts = <IT_CURLIM_LDO6 0>;
+
+ };
+
+ vref_ddr: vref_ddr {
+ regulator-name = "vref_ddr";
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ bst_out: boost {
+ regulator-name = "bst_out";
+ interrupts = <IT_OCP_BOOST 0>;
+ };
+
+ vbus_otg: pwr_sw1 {
+ regulator-name = "vbus_otg";
+ interrupts = <IT_OCP_OTG 0>;
+ regulator-active-discharge;
+ };
+
+ vbus_sw: pwr_sw2 {
+ regulator-name = "vbus_sw";
+ interrupts = <IT_OCP_SWOUT 0>;
+ regulator-active-discharge;
+ };
+ };
+
+ onkey {
+ compatible = "st,stpmic1-onkey";
+ interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 1>;
+ interrupt-names = "onkey-falling", "onkey-rising";
+ };
+
+ pmic_watchdog: watchdog {
+ compatible = "st,stpmic1-wdt";
+ status = "disabled";
+ };
+ };
+
+ sip_eeprom: eeprom@50 {
+ compatible = "atmel,24c32";
+ vcc-supply = <&reg_sip_eeprom>;
+ reg = <0x50>;
+ };
+};
+
+&ipcc {
+ status = "okay";
+};
+
+&m4_rproc {
+ memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ <&vdev0vring1>, <&vdev0buffer>;
+ mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+ mbox-names = "vq0", "vq1", "shutdown";
+ interrupt-parent = <&exti>;
+ interrupts = <68 1>;
+ status = "okay";
+};
+
+&rng1 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime-emmc.dts
new file mode 100644
index 000000000000..033cab3443f8
--- /dev/null
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime-emmc.dts
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2020 Olimex Ltd.
+ * Author: Stefan Mavrodiev <stefan@olimex.com>
+ */
+
+#include "sun7i-a20-olinuxino-lime.dts"
+
+/ {
+ model = "Olimex A20-OLinuXino-LIME-eMMC";
+ compatible = "olimex,a20-olinuxino-lime-emmc", "allwinner,sun7i-a20";
+
+ mmc2_pwrseq: pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&pio 2 16 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mmc2 {
+ vmmc-supply = <&reg_vcc3v3>;
+ vqmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ non-removable;
+ mmc-pwrseq = <&mmc2_pwrseq>;
+ status = "okay";
+
+ emmc: emmc@0 {
+ reg = <0>;
+ compatible = "mmc-card";
+ broken-hpi;
+ };
+};
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
index 655404d6d3a3..c010b27fdb6a 100644
--- a/arch/arm/boot/dts/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
@@ -610,6 +610,16 @@
clock-names = "bus", "mod";
};
+ msgbox: mailbox@1c17000 {
+ compatible = "allwinner,sun8i-a83t-msgbox",
+ "allwinner,sun6i-a31-msgbox";
+ reg = <0x01c17000 0x1000>;
+ clocks = <&ccu CLK_BUS_MSGBOX>;
+ resets = <&ccu RST_BUS_MSGBOX>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ };
+
usb_otg: usb@1c19000 {
compatible = "allwinner,sun8i-a83t-musb",
"allwinner,sun8i-a33-musb";
diff --git a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
index d277d043031b..4c6704e4c57e 100644
--- a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
+++ b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
@@ -31,7 +31,7 @@
pwr_led {
label = "bananapi-m2-zero:red:pwr";
- gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */
+ gpios = <&r_pio 0 10 GPIO_ACTIVE_LOW>; /* PL10 */
default-state = "on";
};
};
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index e83aa6866e7e..4e89701df91f 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -112,6 +112,26 @@
};
};
+ gpu_opp_table: gpu-opp-table {
+ compatible = "operating-points-v2";
+
+ opp-120000000 {
+ opp-hz = /bits/ 64 <120000000>;
+ };
+
+ opp-312000000 {
+ opp-hz = /bits/ 64 <312000000>;
+ };
+
+ opp-432000000 {
+ opp-hz = /bits/ 64 <432000000>;
+ };
+
+ opp-576000000 {
+ opp-hz = /bits/ 64 <576000000>;
+ };
+ };
+
pmu {
compatible = "arm,cortex-a7-pmu";
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
@@ -205,9 +225,7 @@
clocks = <&ccu CLK_BUS_GPU>, <&ccu CLK_GPU>;
clock-names = "bus", "core";
resets = <&ccu RST_BUS_GPU>;
-
- assigned-clocks = <&ccu CLK_GPU>;
- assigned-clock-rates = <384000000>;
+ operating-points-v2 = <&gpu_opp_table>;
};
ths: thermal-sensor@1c25000 {
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index 01a5df9aa71b..22d533d18992 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -239,6 +239,16 @@
};
};
+ msgbox: mailbox@1c17000 {
+ compatible = "allwinner,sun8i-h3-msgbox",
+ "allwinner,sun6i-a31-msgbox";
+ reg = <0x01c17000 0x1000>;
+ clocks = <&ccu CLK_BUS_MSGBOX>;
+ resets = <&ccu RST_BUS_MSGBOX>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ };
+
usb_otg: usb@1c19000 {
compatible = "allwinner,sun8i-h3-musb";
reg = <0x01c19000 0x400>;
diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts
index d3e032e7d21a..08be733ee2cd 100644
--- a/arch/arm/boot/dts/tegra114-dalmore.dts
+++ b/arch/arm/boot/dts/tegra114-dalmore.dts
@@ -46,8 +46,7 @@
avdd-dsi-csi-supply = <&avdd_1v2_reg>;
panel@0 {
- compatible = "panasonic,vvx10f004b00",
- "simple-panel";
+ compatible = "panasonic,vvx10f004b00";
reg = <0>;
power-supply = <&avdd_lcd_reg>;
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
index 8c2ee6e7d6f1..73361dbe2e43 100644
--- a/arch/arm/boot/dts/tegra124-venice2.dts
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -1087,7 +1087,7 @@
};
panel: panel {
- compatible = "lg,lp129qe", "simple-panel";
+ compatible = "lg,lp129qe";
backlight = <&backlight>;
ddc-i2c-bus = <&dpaux>;
diff --git a/arch/arm/boot/dts/tegra20-colibri-eval-v3.dts b/arch/arm/boot/dts/tegra20-colibri-eval-v3.dts
index 3c0f2681fcde..37ad508b61d9 100644
--- a/arch/arm/boot/dts/tegra20-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/tegra20-colibri-eval-v3.dts
@@ -223,7 +223,7 @@
* edt,et057090dhu: EDT 5.7" LCD TFT
* edt,et070080dh6: EDT 7.0" LCD TFT
*/
- compatible = "edt,et057090dhu", "simple-panel";
+ compatible = "edt,et057090dhu";
backlight = <&backlight>;
power-supply = <&reg_3v3>;
};
diff --git a/arch/arm/boot/dts/tegra20-colibri-iris.dts b/arch/arm/boot/dts/tegra20-colibri-iris.dts
index d8004d68efa0..af4740847769 100644
--- a/arch/arm/boot/dts/tegra20-colibri-iris.dts
+++ b/arch/arm/boot/dts/tegra20-colibri-iris.dts
@@ -205,7 +205,7 @@
* edt,et057090dhu: EDT 5.7" LCD TFT
* edt,et070080dh6: EDT 7.0" LCD TFT
*/
- compatible = "edt,et057090dhu", "simple-panel";
+ compatible = "edt,et057090dhu";
backlight = <&backlight>;
power-supply = <&reg_3v3>;
};
diff --git a/arch/arm/boot/dts/tegra20-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts
index 1d96d92b72a7..02cd67ea2503 100644
--- a/arch/arm/boot/dts/tegra20-harmony.dts
+++ b/arch/arm/boot/dts/tegra20-harmony.dts
@@ -665,7 +665,7 @@
};
panel: panel {
- compatible = "auo,b101aw03", "simple-panel";
+ compatible = "auo,b101aw03";
power-supply = <&vdd_pnl_reg>;
enable-gpios = <&gpio TEGRA_GPIO(B, 2) GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/tegra20-medcom-wide.dts b/arch/arm/boot/dts/tegra20-medcom-wide.dts
index cda5448c2ace..c73510cd501c 100644
--- a/arch/arm/boot/dts/tegra20-medcom-wide.dts
+++ b/arch/arm/boot/dts/tegra20-medcom-wide.dts
@@ -57,7 +57,7 @@
};
panel: panel {
- compatible = "innolux,n156bge-l21", "simple-panel";
+ compatible = "innolux,n156bge-l21";
power-supply = <&vdd_1v8_reg>, <&vdd_3v3_reg>;
enable-gpios = <&gpio TEGRA_GPIO(B, 2) GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index be0ab9b84b9a..cce3a3fb82ed 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -604,7 +604,7 @@
};
panel: panel {
- compatible = "samsung,ltn101nt05", "simple-panel";
+ compatible = "samsung,ltn101nt05";
ddc-i2c-bus = <&lvds_ddc>;
power-supply = <&vdd_pnl_reg>;
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index f91441683aad..376ecb6435f4 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -826,7 +826,7 @@
};
panel: panel {
- compatible = "chunghwa,claa101wa01a", "simple-panel";
+ compatible = "chunghwa,claa101wa01a";
power-supply = <&vdd_pnl_reg>;
enable-gpios = <&gpio TEGRA_GPIO(B, 2) GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts
index f44551e2d9d0..022649119821 100644
--- a/arch/arm/boot/dts/tegra20-ventana.dts
+++ b/arch/arm/boot/dts/tegra20-ventana.dts
@@ -611,7 +611,7 @@
};
panel: panel {
- compatible = "chunghwa,claa101wa01a", "simple-panel";
+ compatible = "chunghwa,claa101wa01a";
power-supply = <&vdd_pnl_reg>;
enable-gpios = <&gpio TEGRA_GPIO(B, 2) GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm/boot/dts/tegra30-apalis-eval.dts b/arch/arm/boot/dts/tegra30-apalis-eval.dts
index 749fc6d1ff70..b39c26806bf2 100644
--- a/arch/arm/boot/dts/tegra30-apalis-eval.dts
+++ b/arch/arm/boot/dts/tegra30-apalis-eval.dts
@@ -195,7 +195,7 @@
* edt,et057090dhu: EDT 5.7" LCD TFT
* edt,et070080dh6: EDT 7.0" LCD TFT
*/
- compatible = "edt,et057090dhu", "simple-panel";
+ compatible = "edt,et057090dhu";
backlight = <&backlight>;
power-supply = <&reg_3v3>;
};
diff --git a/arch/arm/boot/dts/tegra30-apalis-v1.1-eval.dts b/arch/arm/boot/dts/tegra30-apalis-v1.1-eval.dts
index 0be50e881684..e29dca92ba0a 100644
--- a/arch/arm/boot/dts/tegra30-apalis-v1.1-eval.dts
+++ b/arch/arm/boot/dts/tegra30-apalis-v1.1-eval.dts
@@ -196,7 +196,7 @@
* edt,et057090dhu: EDT 5.7" LCD TFT
* edt,et070080dh6: EDT 7.0" LCD TFT
*/
- compatible = "edt,et057090dhu", "simple-panel";
+ compatible = "edt,et057090dhu";
backlight = <&backlight>;
power-supply = <&reg_3v3>;
};
diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts
index 45ef6002b225..6b6fd8a8058f 100644
--- a/arch/arm/boot/dts/tegra30-beaver.dts
+++ b/arch/arm/boot/dts/tegra30-beaver.dts
@@ -2,6 +2,8 @@
/dts-v1/;
#include "tegra30.dtsi"
+#include "tegra30-cpu-opp.dtsi"
+#include "tegra30-cpu-opp-microvolt.dtsi"
/ {
model = "NVIDIA Tegra30 Beaver evaluation board";
@@ -1806,9 +1808,14 @@
vddctrl_reg: vddctrl {
regulator-name = "vdd_cpu,vdd_sys";
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-coupled-with = <&core_vdd_reg>;
+ regulator-coupled-max-spread = <300000>;
+ regulator-max-step-microvolt = <100000>;
regulator-always-on;
+
+ nvidia,tegra-cpu-regulator;
};
vio_reg: vio {
@@ -1868,17 +1875,22 @@
};
};
- tps62361@60 {
+ core_vdd_reg: tps62361@60 {
compatible = "ti,tps62361";
reg = <0x60>;
regulator-name = "tps62361-vout";
regulator-min-microvolt = <500000>;
regulator-max-microvolt = <1500000>;
+ regulator-coupled-with = <&vddctrl_reg>;
+ regulator-coupled-max-spread = <300000>;
+ regulator-max-step-microvolt = <100000>;
regulator-boot-on;
regulator-always-on;
ti,vsel0-state-high;
ti,vsel1-state-high;
+
+ nvidia,tegra-core-regulator;
};
};
@@ -2120,4 +2132,26 @@
assigned-clock-parents = <&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
<&tegra_car TEGRA30_CLK_EXTERN1>;
};
+
+ cpus {
+ cpu0: cpu@0 {
+ cpu-supply = <&vddctrl_reg>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+
+ cpu@1 {
+ cpu-supply = <&vddctrl_reg>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+
+ cpu@2 {
+ cpu-supply = <&vddctrl_reg>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+
+ cpu@3 {
+ cpu-supply = <&vddctrl_reg>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index 4b4f49a49394..5ee5d141bd81 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -432,7 +432,7 @@
};
panel: panel {
- compatible = "chunghwa,claa101wb01", "simple-panel";
+ compatible = "chunghwa,claa101wb01";
ddc-i2c-bus = <&panelddc>;
power-supply = <&vdd_pnl1_reg>;
diff --git a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
index 5965150ecdd2..8e106e784dce 100644
--- a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
@@ -159,7 +159,7 @@
* edt,et057090dhu: EDT 5.7" LCD TFT
* edt,et070080dh6: EDT 7.0" LCD TFT
*/
- compatible = "edt,et057090dhu", "simple-panel";
+ compatible = "edt,et057090dhu";
backlight = <&backlight>;
power-supply = <&reg_3v3>;
};
diff --git a/arch/arm/boot/dts/uniphier-ld4.dtsi b/arch/arm/boot/dts/uniphier-ld4.dtsi
index 06e7400d2940..b52957ccda0d 100644
--- a/arch/arm/boot/dts/uniphier-ld4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ld4.dtsi
@@ -67,6 +67,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 39 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
diff --git a/arch/arm/boot/dts/uniphier-ld6b-ref.dts b/arch/arm/boot/dts/uniphier-ld6b-ref.dts
index 60994b6e8b99..079cadc11e6c 100644
--- a/arch/arm/boot/dts/uniphier-ld6b-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ld6b-ref.dts
@@ -29,6 +29,7 @@
i2c4 = &i2c4;
i2c5 = &i2c5;
i2c6 = &i2c6;
+ ethernet0 = &eth;
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/uniphier-pro4-ace.dts b/arch/arm/boot/dts/uniphier-pro4-ace.dts
index 92cc48dd86d0..64246fad325c 100644
--- a/arch/arm/boot/dts/uniphier-pro4-ace.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-ace.dts
@@ -26,6 +26,7 @@
i2c3 = &i2c3;
i2c5 = &i2c5;
i2c6 = &i2c6;
+ ethernet0 = &eth;
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/uniphier-pro4-ref.dts b/arch/arm/boot/dts/uniphier-pro4-ref.dts
index 854f2eba3e72..181442c48532 100644
--- a/arch/arm/boot/dts/uniphier-pro4-ref.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-ref.dts
@@ -29,6 +29,7 @@
i2c3 = &i2c3;
i2c5 = &i2c5;
i2c6 = &i2c6;
+ ethernet0 = &eth;
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/uniphier-pro4-sanji.dts b/arch/arm/boot/dts/uniphier-pro4-sanji.dts
index dda1a2f214a8..5396556dee58 100644
--- a/arch/arm/boot/dts/uniphier-pro4-sanji.dts
+++ b/arch/arm/boot/dts/uniphier-pro4-sanji.dts
@@ -25,6 +25,7 @@
i2c3 = &i2c3;
i2c5 = &i2c5;
i2c6 = &i2c6;
+ ethernet0 = &eth;
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/uniphier-pro4.dtsi b/arch/arm/boot/dts/uniphier-pro4.dtsi
index 1c866f0306fc..a53b73ee93e9 100644
--- a/arch/arm/boot/dts/uniphier-pro4.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro4.dtsi
@@ -75,6 +75,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 39 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
@@ -426,6 +428,14 @@
};
};
+ xdmac: dma-controller@5fc10000 {
+ compatible = "socionext,uniphier-xdmac";
+ reg = <0x5fc10000 0x5300>;
+ interrupts = <0 188 4>;
+ dma-channels = <16>;
+ #dma-cells = <2>;
+ };
+
aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-pro4-aidet";
reg = <0x5fc20000 0x200>;
diff --git a/arch/arm/boot/dts/uniphier-pro5.dtsi b/arch/arm/boot/dts/uniphier-pro5.dtsi
index 8f1ae0957f5f..feadb4a378eb 100644
--- a/arch/arm/boot/dts/uniphier-pro5.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro5.dtsi
@@ -160,6 +160,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 39 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
@@ -171,6 +173,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006100 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 216 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
@@ -408,6 +412,14 @@
};
};
+ xdmac: dma-controller@5fc10000 {
+ compatible = "socionext,uniphier-xdmac";
+ reg = <0x5fc10000 0x5300>;
+ interrupts = <0 188 4>;
+ dma-channels = <16>;
+ #dma-cells = <2>;
+ };
+
aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-pro5-aidet";
reg = <0x5fc20000 0x200>;
diff --git a/arch/arm/boot/dts/uniphier-pxs2-gentil.dts b/arch/arm/boot/dts/uniphier-pxs2-gentil.dts
index e27fd4f2a569..8e9ac579aa9a 100644
--- a/arch/arm/boot/dts/uniphier-pxs2-gentil.dts
+++ b/arch/arm/boot/dts/uniphier-pxs2-gentil.dts
@@ -26,6 +26,7 @@
i2c4 = &i2c4;
i2c5 = &i2c5;
i2c6 = &i2c6;
+ ethernet0 = &eth;
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/uniphier-pxs2-vodka.dts b/arch/arm/boot/dts/uniphier-pxs2-vodka.dts
index 23fe42b7408b..8eacc7bdecb7 100644
--- a/arch/arm/boot/dts/uniphier-pxs2-vodka.dts
+++ b/arch/arm/boot/dts/uniphier-pxs2-vodka.dts
@@ -24,6 +24,7 @@
i2c4 = &i2c4;
i2c5 = &i2c5;
i2c6 = &i2c6;
+ ethernet0 = &eth;
};
memory@80000000 {
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index 2f2a24994c69..b0b15c97306b 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -173,6 +173,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 39 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
@@ -184,6 +186,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006100 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 216 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
@@ -508,6 +512,14 @@
};
};
+ xdmac: dma-controller@5fc10000 {
+ compatible = "socionext,uniphier-xdmac";
+ reg = <0x5fc10000 0x5300>;
+ interrupts = <0 188 4>;
+ dma-channels = <16>;
+ #dma-cells = <2>;
+ };
+
aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-pxs2-aidet";
reg = <0x5fc20000 0x200>;
diff --git a/arch/arm/boot/dts/uniphier-sld8.dtsi b/arch/arm/boot/dts/uniphier-sld8.dtsi
index 09992163e1f4..96a766deb8d1 100644
--- a/arch/arm/boot/dts/uniphier-sld8.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld8.dtsi
@@ -67,6 +67,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 39 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index 5c183483ec3b..e6308fb76183 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -19,8 +19,162 @@
*/
/ {
+ v2m_fixed_3v3: fixed-regulator-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ v2m_clk24mhz: clk24mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "v2m:clk24mhz";
+ };
+
+ v2m_refclk1mhz: refclk1mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = "v2m:refclk1mhz";
+ };
+
+ v2m_refclk32khz: refclk32khz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "v2m:refclk32khz";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-1 {
+ label = "v2m:green:user1";
+ gpios = <&v2m_led_gpios 0 0>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-2 {
+ label = "v2m:green:user2";
+ gpios = <&v2m_led_gpios 1 0>;
+ linux,default-trigger = "disk-activity";
+ };
+
+ led-3 {
+ label = "v2m:green:user3";
+ gpios = <&v2m_led_gpios 2 0>;
+ linux,default-trigger = "cpu0";
+ };
+
+ led-4 {
+ label = "v2m:green:user4";
+ gpios = <&v2m_led_gpios 3 0>;
+ linux,default-trigger = "cpu1";
+ };
+
+ led-5 {
+ label = "v2m:green:user5";
+ gpios = <&v2m_led_gpios 4 0>;
+ linux,default-trigger = "cpu2";
+ };
+
+ led-6 {
+ label = "v2m:green:user6";
+ gpios = <&v2m_led_gpios 5 0>;
+ linux,default-trigger = "cpu3";
+ };
+
+ led-7 {
+ label = "v2m:green:user7";
+ gpios = <&v2m_led_gpios 6 0>;
+ linux,default-trigger = "cpu4";
+ };
+
+ led-8 {
+ label = "v2m:green:user8";
+ gpios = <&v2m_led_gpios 7 0>;
+ linux,default-trigger = "cpu5";
+ };
+ };
+
+ mcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ oscclk0 {
+ /* MCC static memory clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <25000000 60000000>;
+ #clock-cells = <0>;
+ clock-output-names = "v2m:oscclk0";
+ };
+
+ v2m_oscclk1: oscclk1 {
+ /* CLCD clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <23750000 65000000>;
+ #clock-cells = <0>;
+ clock-output-names = "v2m:oscclk1";
+ };
+
+ v2m_oscclk2: oscclk2 {
+ /* IO FPGA peripheral clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <24000000 24000000>;
+ #clock-cells = <0>;
+ clock-output-names = "v2m:oscclk2";
+ };
+
+ volt-vio {
+ /* Logic level voltage */
+ compatible = "arm,vexpress-volt";
+ arm,vexpress-sysreg,func = <2 0>;
+ regulator-name = "VIO";
+ regulator-always-on;
+ label = "VIO";
+ };
+
+ temp-mcc {
+ /* MCC internal operating temperature */
+ compatible = "arm,vexpress-temp";
+ arm,vexpress-sysreg,func = <4 0>;
+ label = "MCC";
+ };
+
+ reset {
+ compatible = "arm,vexpress-reset";
+ arm,vexpress-sysreg,func = <5 0>;
+ };
+
+ muxfpga {
+ compatible = "arm,vexpress-muxfpga";
+ arm,vexpress-sysreg,func = <7 0>;
+ };
+
+ shutdown {
+ compatible = "arm,vexpress-shutdown";
+ arm,vexpress-sysreg,func = <8 0>;
+ };
+
+ reboot {
+ compatible = "arm,vexpress-reboot";
+ arm,vexpress-sysreg,func = <9 0>;
+ };
+
+ dvimode {
+ compatible = "arm,vexpress-dvimode";
+ arm,vexpress-sysreg,func = <11 0>;
+ };
+ };
+
bus@8000000 {
- motherboard {
+ motherboard-bus {
model = "V2M-P1";
arm,hbi = <0x190>;
arm,vexpress,site = <0>;
@@ -31,7 +185,7 @@
#interrupt-cells = <1>;
ranges;
- nor_flash: flash@0,00000000 {
+ nor_flash: flash@0 {
compatible = "arm,vexpress-flash", "cfi-flash";
reg = <0 0x00000000 0x04000000>,
<4 0x00000000 0x04000000>;
@@ -41,13 +195,13 @@
};
};
- psram@1,00000000 {
+ psram@100000000 {
compatible = "arm,vexpress-psram", "mtd-ram";
reg = <1 0x00000000 0x02000000>;
bank-width = <4>;
};
- ethernet@2,02000000 {
+ ethernet@202000000 {
compatible = "smsc,lan9118", "smsc,lan9115";
reg = <2 0x02000000 0x10000>;
interrupts = <15>;
@@ -59,14 +213,14 @@
vddvario-supply = <&v2m_fixed_3v3>;
};
- usb@2,03000000 {
+ usb@203000000 {
compatible = "nxp,usb-isp1761";
reg = <2 0x03000000 0x20000>;
interrupts = <16>;
port1-otg;
};
- iofpga@3,00000000 {
+ iofpga-bus@300000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -162,7 +316,7 @@
clock-names = "KMIREFCLK", "apb_pclk";
};
- v2m_serial0: uart@90000 {
+ v2m_serial0: serial@90000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x090000 0x1000>;
interrupts = <5>;
@@ -170,7 +324,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial1: uart@a0000 {
+ v2m_serial1: serial@a0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0a0000 0x1000>;
interrupts = <6>;
@@ -178,7 +332,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial2: uart@b0000 {
+ v2m_serial2: serial@b0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0b0000 0x1000>;
interrupts = <7>;
@@ -186,7 +340,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial3: uart@c0000 {
+ v2m_serial3: serial@c0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0c0000 0x1000>;
interrupts = <8>;
@@ -282,160 +436,6 @@
};
};
};
-
- v2m_fixed_3v3: fixed-regulator-0 {
- compatible = "regulator-fixed";
- regulator-name = "3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- v2m_clk24mhz: clk24mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- clock-output-names = "v2m:clk24mhz";
- };
-
- v2m_refclk1mhz: refclk1mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <1000000>;
- clock-output-names = "v2m:refclk1mhz";
- };
-
- v2m_refclk32khz: refclk32khz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "v2m:refclk32khz";
- };
-
- leds {
- compatible = "gpio-leds";
-
- user1 {
- label = "v2m:green:user1";
- gpios = <&v2m_led_gpios 0 0>;
- linux,default-trigger = "heartbeat";
- };
-
- user2 {
- label = "v2m:green:user2";
- gpios = <&v2m_led_gpios 1 0>;
- linux,default-trigger = "mmc0";
- };
-
- user3 {
- label = "v2m:green:user3";
- gpios = <&v2m_led_gpios 2 0>;
- linux,default-trigger = "cpu0";
- };
-
- user4 {
- label = "v2m:green:user4";
- gpios = <&v2m_led_gpios 3 0>;
- linux,default-trigger = "cpu1";
- };
-
- user5 {
- label = "v2m:green:user5";
- gpios = <&v2m_led_gpios 4 0>;
- linux,default-trigger = "cpu2";
- };
-
- user6 {
- label = "v2m:green:user6";
- gpios = <&v2m_led_gpios 5 0>;
- linux,default-trigger = "cpu3";
- };
-
- user7 {
- label = "v2m:green:user7";
- gpios = <&v2m_led_gpios 6 0>;
- linux,default-trigger = "cpu4";
- };
-
- user8 {
- label = "v2m:green:user8";
- gpios = <&v2m_led_gpios 7 0>;
- linux,default-trigger = "cpu5";
- };
- };
-
- mcc {
- compatible = "arm,vexpress,config-bus";
- arm,vexpress,config-bridge = <&v2m_sysreg>;
-
- oscclk0 {
- /* MCC static memory clock */
- compatible = "arm,vexpress-osc";
- arm,vexpress-sysreg,func = <1 0>;
- freq-range = <25000000 60000000>;
- #clock-cells = <0>;
- clock-output-names = "v2m:oscclk0";
- };
-
- v2m_oscclk1: oscclk1 {
- /* CLCD clock */
- compatible = "arm,vexpress-osc";
- arm,vexpress-sysreg,func = <1 1>;
- freq-range = <23750000 65000000>;
- #clock-cells = <0>;
- clock-output-names = "v2m:oscclk1";
- };
-
- v2m_oscclk2: oscclk2 {
- /* IO FPGA peripheral clock */
- compatible = "arm,vexpress-osc";
- arm,vexpress-sysreg,func = <1 2>;
- freq-range = <24000000 24000000>;
- #clock-cells = <0>;
- clock-output-names = "v2m:oscclk2";
- };
-
- volt-vio {
- /* Logic level voltage */
- compatible = "arm,vexpress-volt";
- arm,vexpress-sysreg,func = <2 0>;
- regulator-name = "VIO";
- regulator-always-on;
- label = "VIO";
- };
-
- temp-mcc {
- /* MCC internal operating temperature */
- compatible = "arm,vexpress-temp";
- arm,vexpress-sysreg,func = <4 0>;
- label = "MCC";
- };
-
- reset {
- compatible = "arm,vexpress-reset";
- arm,vexpress-sysreg,func = <5 0>;
- };
-
- muxfpga {
- compatible = "arm,vexpress-muxfpga";
- arm,vexpress-sysreg,func = <7 0>;
- };
-
- shutdown {
- compatible = "arm,vexpress-shutdown";
- arm,vexpress-sysreg,func = <8 0>;
- };
-
- reboot {
- compatible = "arm,vexpress-reboot";
- arm,vexpress-sysreg,func = <9 0>;
- };
-
- dvimode {
- compatible = "arm,vexpress-dvimode";
- arm,vexpress-sysreg,func = <11 0>;
- };
- };
};
};
};
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 8e7a3ed2a4df..44ff9cd88d81 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -103,6 +103,7 @@ CONFIG_WATCHDOG=y
CONFIG_BCM2835_WDT=y
CONFIG_MFD_SYSCON=y
CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_GPIO=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
diff --git a/arch/arm/configs/cm_x2xx_defconfig b/arch/arm/configs/cm_x2xx_defconfig
deleted file mode 100644
index fa997ae2673e..000000000000
--- a/arch/arm/configs/cm_x2xx_defconfig
+++ /dev/null
@@ -1,173 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_SLUB_DEBUG is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_PXA=y
-CONFIG_MACH_ARMCORE=y
-CONFIG_PCI=y
-CONFIG_PCCARD=m
-CONFIG_YENTA=m
-# CONFIG_YENTA_O2 is not set
-# CONFIG_YENTA_RICOH is not set
-# CONFIG_YENTA_ENE_TUNE is not set
-# CONFIG_YENTA_TOSHIBA is not set
-CONFIG_PCMCIA_PXA2XX=m
-CONFIG_NO_HZ=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=1f03 mem=32M"
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_APM_EMULATION=m
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_BT=m
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_BNEP=m
-CONFIG_BT_HIDP=m
-CONFIG_LIB80211=m
-CONFIG_FW_LOADER=m
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_JEDECPROBE=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PXA2XX=y
-CONFIG_MTD_RAW_NAND=y
-CONFIG_MTD_NAND_GPIO=m
-CONFIG_MTD_NAND_CM_X270=y
-CONFIG_MTD_NAND_PLATFORM=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_ATA=m
-# CONFIG_SATA_PMP is not set
-CONFIG_PATA_PCMCIA=m
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_DM9000=y
-CONFIG_DM9000_DEBUGLEVEL=1
-CONFIG_NET_PCI=y
-CONFIG_8139TOO=m
-# CONFIG_8139TOO_PIO is not set
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_INPUT_EVDEV=y
-CONFIG_KEYBOARD_PXA27x=m
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_UCB1400=m
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIAL_PXA=y
-CONFIG_SERIAL_PXA_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=m
-CONFIG_I2C_PXA=y
-CONFIG_SPI=y
-CONFIG_SPI_PXA2XX=m
-# CONFIG_HWMON is not set
-CONFIG_UCB1400_CORE=m
-CONFIG_FB=y
-CONFIG_FB_PXA=y
-CONFIG_FB_PXA_PARAMETERS=y
-CONFIG_FB_MBX=m
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-CONFIG_SOUND=m
-CONFIG_SND=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-# CONFIG_SND_DRIVERS is not set
-# CONFIG_SND_PCI is not set
-CONFIG_SND_PXA2XX_AC97=m
-# CONFIG_SND_SPI is not set
-# CONFIG_SND_USB is not set
-# CONFIG_SND_PCMCIA is not set
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=m
-CONFIG_MMC_PXA=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=m
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_V3020=y
-CONFIG_RTC_DRV_PXA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_VFAT_FS=m
-# CONFIG_PROC_PAGE_MONITOR is not set
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_UTF8=m
-CONFIG_FRAME_WARN=0
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DETECT_SOFTLOCKUP is not set
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/em_x270_defconfig b/arch/arm/configs/em_x270_defconfig
deleted file mode 100644
index d08f02014755..000000000000
--- a/arch/arm/configs/em_x270_defconfig
+++ /dev/null
@@ -1,178 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_SLUB_DEBUG is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_PXA=y
-CONFIG_MACH_EM_X270=y
-CONFIG_MACH_EXEDA=y
-CONFIG_NO_HZ=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=1f03 mem=32M"
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_USERSPACE=m
-CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
-CONFIG_APM_EMULATION=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_BT=m
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_BNEP=m
-CONFIG_BT_HIDP=m
-CONFIG_BT_HCIBTUSB=m
-CONFIG_LIB80211=m
-CONFIG_FW_LOADER=m
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_JEDECPROBE=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PXA2XX=y
-CONFIG_MTD_RAW_NAND=y
-CONFIG_MTD_NAND_PLATFORM=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_DM9000=y
-CONFIG_DM9000_DEBUGLEVEL=1
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_APMPOWER=y
-CONFIG_KEYBOARD_GPIO=y
-CONFIG_KEYBOARD_PXA27x=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-# CONFIG_TOUCHSCREEN_DA9034 is not set
-CONFIG_TOUCHSCREEN_WM97XX=m
-# CONFIG_TOUCHSCREEN_WM9705 is not set
-# CONFIG_TOUCHSCREEN_WM9713 is not set
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIAL_PXA=y
-CONFIG_SERIAL_PXA_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=16
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=m
-CONFIG_I2C_PXA=y
-CONFIG_SPI=y
-CONFIG_SPI_PXA2XX=y
-CONFIG_POWER_SUPPLY=y
-CONFIG_BATTERY_DA9030=y
-# CONFIG_HWMON is not set
-CONFIG_PMIC_DA903X=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_DA903X=y
-CONFIG_FB=y
-CONFIG_FB_PXA=y
-CONFIG_FB_PXA_PARAMETERS=y
-CONFIG_FB_MBX=m
-CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_LCD_TDO24M=y
-# CONFIG_BACKLIGHT_GENERIC is not set
-CONFIG_BACKLIGHT_DA903X=m
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-CONFIG_SOUND=m
-CONFIG_SND=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-# CONFIG_SND_DRIVERS is not set
-# CONFIG_SND_SPI is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=m
-CONFIG_SND_PXA2XX_SOC=m
-CONFIG_SND_PXA2XX_SOC_EM_X270=m
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=m
-CONFIG_MMC_PXA=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_DA903X=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_V3020=y
-CONFIG_RTC_DRV_PXA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_VFAT_FS=m
-# CONFIG_PROC_PAGE_MONITOR is not set
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_UTF8=m
-CONFIG_FRAME_WARN=0
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_DETECT_SOFTLOCKUP is not set
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_ARC4=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 2c779ac13270..374fbff8eaa6 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -69,6 +69,7 @@ CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_BCSP=y
CONFIG_BT_HCIUART_ATH3K=y
CONFIG_BT_HCIUART_INTEL=y
+CONFIG_BT_HCIUART_BCM=y
CONFIG_BT_HCIUART_AG6XX=y
CONFIG_BT_HCIUART_MRVL=y
CONFIG_BT_HCIBCM203X=m
@@ -78,8 +79,8 @@ CONFIG_BT_HCIVHCI=m
CONFIG_BT_MRVL=m
CONFIG_BT_MRVL_SDIO=m
CONFIG_BT_ATH3K=m
-CONFIG_CFG80211=y
-CONFIG_MAC80211=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
CONFIG_MAC80211_LEDS=y
CONFIG_NFC=y
CONFIG_NFC_DIGITAL=m
@@ -94,7 +95,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
@@ -133,6 +134,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
CONFIG_HW_RANDOM=y
CONFIG_TCG_TPM=y
CONFIG_TCG_TIS_I2C_INFINEON=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 5a20d12d62bd..87e6400c436b 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -395,6 +395,7 @@ CONFIG_RTC_DRV_DA9063=y
CONFIG_RTC_DRV_MC13XXX=y
CONFIG_RTC_DRV_MXC=y
CONFIG_RTC_DRV_MXC_V2=y
+CONFIG_RTC_DRV_RC5T619=y
CONFIG_RTC_DRV_SNVS=y
CONFIG_DMADEVICES=y
CONFIG_FSL_EDMA=y
@@ -408,6 +409,7 @@ CONFIG_COMMON_CLK_PWM=y
CONFIG_IIO=y
CONFIG_MMA8452=y
CONFIG_IMX7D_ADC=y
+CONFIG_RN5T618_ADC=y
CONFIG_VF610_ADC=y
CONFIG_SENSORS_ISL29018=y
CONFIG_MAG3110=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 9c86b1ab2f1d..95543914d3c7 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -979,6 +979,7 @@ CONFIG_ARCH_R7S72100=y
CONFIG_ARCH_R7S9210=y
CONFIG_ARCH_R8A73A4=y
CONFIG_ARCH_R8A7740=y
+CONFIG_ARCH_R8A7742=y
CONFIG_ARCH_R8A7743=y
CONFIG_ARCH_R8A7744=y
CONFIG_ARCH_R8A7745=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index b817c57f05f1..e6559e3350e6 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -38,8 +38,6 @@ CONFIG_MACH_ARCOM_ZEUS=y
CONFIG_MACH_BALLOON3=y
CONFIG_MACH_CSB726=y
CONFIG_CSB726_CSB701=y
-CONFIG_MACH_ARMCORE=y
-CONFIG_MACH_EM_X270=y
CONFIG_MACH_EXEDA=y
CONFIG_MACH_CM_X300=y
CONFIG_MACH_CAPC7117=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index bab7861443dc..8e1f78c19920 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -128,6 +128,7 @@ CONFIG_SPI=y
CONFIG_SPI_ATMEL=y
CONFIG_SPI_GPIO=y
CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_SAMA5D2_PIOBU=m
CONFIG_POWER_SUPPLY=y
CONFIG_BATTERY_ACT8945A=y
CONFIG_POWER_RESET=y
@@ -142,6 +143,7 @@ CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_ACT8865=y
CONFIG_REGULATOR_ACT8945A=y
+CONFIG_REGULATOR_MCP16502=m
CONFIG_REGULATOR_PWM=m
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 838307a9bb92..9cf3143025e1 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -125,6 +125,7 @@ CONFIG_VIDEO_ML86V7667=y
CONFIG_DRM=y
CONFIG_DRM_RCAR_DU=y
CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_LVDS_CODEC=y
CONFIG_DRM_SII902X=y
CONFIG_DRM_SIMPLE_BRIDGE=y
@@ -178,6 +179,7 @@ CONFIG_ARCH_R7S72100=y
CONFIG_ARCH_R7S9210=y
CONFIG_ARCH_R8A73A4=y
CONFIG_ARCH_R8A7740=y
+CONFIG_ARCH_R8A7742=y
CONFIG_ARCH_R8A7743=y
CONFIG_ARCH_R8A7744=y
CONFIG_ARCH_R8A7745=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 61b8be19e527..b105ce7120cc 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -107,6 +107,7 @@ CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
CONFIG_SND_SUN4I_CODEC=y
+CONFIG_SND_SUN8I_CODEC_ANALOG=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 4dd5c92fe3b7..28dd7cf56048 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -32,6 +32,9 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_NETFILTER=y
CONFIG_PHONET=y
+CONFIG_BT=y
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_BCM=y
CONFIG_CFG80211=y
CONFIG_CFG80211_DEBUGFS=y
CONFIG_MAC80211=y
@@ -44,6 +47,7 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_NETDEVICES=y
CONFIG_SMSC911X=y
CONFIG_SMSC_PHY=y
+CONFIG_BRCMFMAC=m
CONFIG_CW1200=y
CONFIG_CW1200_WLAN_SDIO=y
CONFIG_INPUT_EVDEV=y
@@ -54,9 +58,11 @@ CONFIG_KEYBOARD_STMPE=y
CONFIG_KEYBOARD_TC3589X=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
CONFIG_TOUCHSCREEN_BU21013=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_AB8500_PONKEY=y
+CONFIG_INPUT_GPIO_VIBRA=y
CONFIG_RMI4_CORE=y
CONFIG_RMI4_I2C=y
CONFIG_RMI4_F11=y
@@ -64,7 +70,9 @@ CONFIG_RMI4_F11=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
CONFIG_HW_RANDOM=y
+CONFIG_I2C_GPIO=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_STMPE=y
@@ -78,11 +86,14 @@ CONFIG_MFD_TC3589X=y
CONFIG_REGULATOR_AB8500=y
CONFIG_REGULATOR_GPIO=y
CONFIG_DRM=y
+CONFIG_DRM_PANEL_NOVATEK_NT35510=y
CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=y
+CONFIG_DRM_PANEL_SONY_ACX424AKP=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_MCDE=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_BACKLIGHT_GPIO=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
@@ -113,9 +124,12 @@ CONFIG_HWSPINLOCK=y
CONFIG_HSEM_U8500=y
CONFIG_IIO=y
CONFIG_IIO_SW_TRIGGER=y
+CONFIG_BMA180=y
CONFIG_IIO_ST_ACCEL_3AXIS=y
CONFIG_IIO_ST_GYRO_3AXIS=y
+CONFIG_INV_MPU6050_I2C=y
CONFIG_BH1780=y
+CONFIG_GP2AP002=y
CONFIG_AK8974=y
CONFIG_IIO_ST_MAGN_3AXIS=y
CONFIG_IIO_HRTIMER_TRIGGER=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 2674de6ada1f..c9bf2df85cb9 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -30,7 +30,7 @@ config CRYPTO_SHA1_ARM_NEON
config CRYPTO_SHA1_ARM_CE
tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
select CRYPTO_SHA1_ARM
select CRYPTO_HASH
help
@@ -39,7 +39,7 @@ config CRYPTO_SHA1_ARM_CE
config CRYPTO_SHA2_ARM_CE
tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
select CRYPTO_SHA256_ARM
select CRYPTO_HASH
help
@@ -96,7 +96,7 @@ config CRYPTO_AES_ARM_BS
config CRYPTO_AES_ARM_CE
tristate "Accelerated AES using ARMv8 Crypto Extensions"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
select CRYPTO_SIMD
@@ -106,7 +106,7 @@ config CRYPTO_AES_ARM_CE
config CRYPTO_GHASH_ARM_CE
tristate "PMULL-accelerated GHASH using NEON/ARMv8 Crypto Extensions"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
select CRYPTO_HASH
select CRYPTO_CRYPTD
select CRYPTO_GF128MUL
@@ -118,13 +118,13 @@ config CRYPTO_GHASH_ARM_CE
config CRYPTO_CRCT10DIF_ARM_CE
tristate "CRCT10DIF digest algorithm using PMULL instructions"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
depends on CRC_T10DIF
select CRYPTO_HASH
config CRYPTO_CRC32_ARM_CE
tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
depends on CRC32
select CRYPTO_HASH
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index deef4d0cb3b5..673c7dd75ab9 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -82,7 +82,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
int sig, int code, const char *name);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode,
+ const char *loglvl);
struct mm_struct;
void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr);
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 7114b9aa46b8..2e24e765e6d3 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -258,11 +258,11 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
/*
- * flush_cache_user_range is used when we want to ensure that the
+ * flush_icache_user_range is used when we want to ensure that the
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
-#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
+#define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e)
/*
* Perform necessary cache operations to ensure that data previously
@@ -318,9 +318,6 @@ extern void flush_kernel_dcache_page(struct page *);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
-#define flush_icache_user_range(vma,page,addr,len) \
- flush_dcache_page(page)
-
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 9383f236e795..84dc0ba822f5 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -13,7 +13,6 @@
#include <asm/highmem.h>
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#ifdef CONFIG_EFI
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 472c93db5dac..fc56fc3e1931 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -6,8 +6,8 @@
#define FIXADDR_END 0xfff00000UL
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
+#include <linux/pgtable.h>
#include <asm/kmap_types.h>
-#include <asm/pgtable.h>
enum fixed_addresses {
FIX_EARLYCON_MEM_BASE,
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index eb4e4207cd3c..31811be38d78 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -10,8 +10,6 @@
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-#define kmap_prot PAGE_KERNEL
-
#define flush_cache_kmaps() \
do { \
if (cache_is_vivt()) \
@@ -20,9 +18,6 @@
extern pte_t *pkmap_page_table;
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
-
/*
* The reason for kmap_high_get() is to ensure that the currently kmap'd
* page usage count does not decrease to zero while we're using its
@@ -63,10 +58,6 @@ static inline void *kmap_high_get(struct page *page)
* when CONFIG_HIGHMEM is not set.
*/
#ifdef CONFIG_HIGHMEM
-extern void *kmap(struct page *page);
-extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
#endif
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
index 73ba956e379f..aab7e8358e6a 100644
--- a/arch/arm/include/asm/idmap.h
+++ b/arch/arm/include/asm/idmap.h
@@ -3,7 +3,7 @@
#define __ASM_IDMAP_H
#include <linux/compiler.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
/* Tag a function as requiring to be executed via an identity mapping. */
#define __idmap __section(.idmap.text) noinline notrace
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 9e084a464a97..3502c2f746ca 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -187,6 +187,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
{
return (pmd_t *)pud;
}
+#define pmd_offset pmd_offset
#define pmd_large(pmd) (pmd_val(pmd) & 2)
#define pmd_leaf(pmd) (pmd_val(pmd) & 2)
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 1933aed9f68d..fbb6693c3352 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -133,13 +133,6 @@ static inline pmd_t *pud_page_vaddr(pud_t pud)
return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
}
-/* Find an entry in the second-level page table.. */
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
-
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
#define copy_pmd(pmdpd,pmdps) \
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 30fb2330f57b..d16aba48fa0a 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -22,7 +22,6 @@
#define pgd_bad(pgd) (0)
#define pgd_clear(pgdp)
#define kern_addr_valid(addr) (1)
-#define pmd_offset(a, b) ((void *)0)
/* FIXME */
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
@@ -73,8 +72,6 @@ extern unsigned int kobjsize(const void *objp);
#define FIRST_USER_ADDRESS 0UL
-#include <asm-generic/pgtable.h>
-
#else
/*
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index befc8fcec98f..c02f24400369 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -17,7 +17,6 @@
#else
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
@@ -167,14 +166,6 @@ extern struct page *empty_zero_page;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-/* to find an entry in a page-table-directory */
-#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
-
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-
#define pmd_none(pmd) (!pmd_val(pmd))
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
@@ -184,21 +175,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-#ifndef CONFIG_HIGHPTE
-#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
-#define __pte_unmap(pte) do { } while (0)
-#else
-#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
-#define __pte_unmap(pte) kunmap_atomic(pte)
-#endif
-
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
-
-#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
-#define pte_unmap(pte) __pte_unmap(pte)
-
#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
@@ -340,8 +316,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* FIXME: this is not correct */
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
/*
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 172b08ff3760..987fefb0a4db 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -29,7 +29,8 @@ static inline int __in_irqentry_text(unsigned long ptr)
}
extern void __init early_trap_init(void *);
-extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
+extern void dump_backtrace_entry(unsigned long where, unsigned long from,
+ unsigned long frame, const char *loglvl);
extern void ptrace_break(struct pt_regs *regs);
extern void *vectors_page;
diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h
index 6e282c33126b..0f8a3439902d 100644
--- a/arch/arm/include/asm/unwind.h
+++ b/arch/arm/include/asm/unwind.h
@@ -36,7 +36,8 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
unsigned long text_addr,
unsigned long text_size);
extern void unwind_table_del(struct unwind_table *tab);
-extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
+extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index 182422981386..254ab7138c85 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -78,13 +78,32 @@ void elf_set_personality(const struct elf32_hdr *x)
EXPORT_SYMBOL(elf_set_personality);
/*
- * Set READ_IMPLIES_EXEC if:
- * - the binary requires an executable stack
- * - we're running on a CPU which doesn't support NX.
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically.
+ *
+ * The decision process for determining the results are:
+ *
+ *              CPU: | lacks NX*  | has NX |
+ * ELF:              |            |           |
+ * ---------------------|------------|------------|
+ * missing PT_GNU_STACK | exec-all   | exec-all  |
+ * PT_GNU_STACK == RWX  | exec-all   | exec-stack |
+ * PT_GNU_STACK == RW   | exec-all  | exec-none |
+ *
+ * exec-all : all PROT_READ user mappings are executable, except when
+ * backed by files on a noexec-filesystem.
+ * exec-none : only PROT_EXEC user mappings are executable.
+ * exec-stack: only the stack and PROT_EXEC user mappings are executable.
+ *
+ * *this column has no architectural effect: NX markings are ignored by
+ * hardware, but may have behavioral effects when "wants X" collides with
+ * "cannot be X" constraints in memory permission flags, as in
+ * https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com
+ *
*/
int arm_elf_read_implies_exec(int executable_stack)
{
- if (executable_stack != EXSTACK_DISABLE_X)
+ if (executable_stack == EXSTACK_DEFAULT)
return 1;
if (cpu_architecture() < CPU_ARCH_ARMv6)
return 1;
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index cd1234c103fc..98ca3e3fa847 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -98,8 +98,8 @@ void set_fiq_handler(void *start, unsigned int length)
memcpy(base + offset, start, length);
if (!cache_is_vipt_nonaliasing())
- flush_icache_range((unsigned long)base + offset, offset +
- length);
+ flush_icache_range((unsigned long)base + offset,
+ (unsigned long)base + offset + length);
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
}
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index c49b39340ddb..f8904227e7fd 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/cp15.h>
@@ -18,7 +19,6 @@
#include <asm/asm-offsets.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
-#include <asm/pgtable.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 76300f3813e8..974b6c64d3e6 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -10,7 +10,6 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/memblock.h>
-#include <asm/pgtable.h>
#include <linux/of_fdt.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index af0a8500a24e..e15444b25ca0 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -17,7 +17,6 @@
#include <linux/string.h>
#include <linux/gfp.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/smp_plat.h>
#include <asm/unwind.h>
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 46e478fb5ea2..58eaa1f60e16 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -431,7 +431,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
npages = 1; /* for sigpage */
npages += vdso_total_pages;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
hint = sigpage_addr(mm, npages);
addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
@@ -458,7 +458,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
arm_install_vdso(mm, addr + PAGE_SIZE);
up_fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
#endif
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 4cc6a7eff635..d0f7c8896c96 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -25,7 +25,6 @@
#include <linux/tracehook.h>
#include <linux/unistd.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#define CREATE_TRACE_POINTS
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 46e1be9e57a8..9a6432557871 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -37,7 +37,6 @@
#include <asm/idmap.h>
#include <asm/topology.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/procinfo.h>
#include <asm/processor.h>
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index d08099269e35..d2c9338d74e8 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -2,12 +2,12 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index e640871328c1..6166ba38bf99 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -97,12 +97,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
int si_code;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
if (find_vma(current->mm, addr) == NULL)
si_code = SEGV_MAPERR;
else
si_code = SEGV_ACCERR;
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
pr_debug("SWP{B} emulation: access caused memory abort!\n");
arm_notify_die("Illegal memory access", regs,
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index dddc7ebf4db4..09b149b09c43 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -8,7 +8,6 @@
* This file contains the ARM-specific time handling details:
* reading the RTC at bootup, etc...
*/
-#include <linux/clk-provider.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/errno.h>
@@ -17,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
+#include <linux/of_clk.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/sched_clock.h>
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 1e70e7227f0f..65a3b1e75480 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -62,21 +62,24 @@ __setup("user_debug=", user_debug_setup);
static void dump_mem(const char *, const char *, unsigned long, unsigned long);
-void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+void dump_backtrace_entry(unsigned long where, unsigned long from,
+ unsigned long frame, const char *loglvl)
{
unsigned long end = frame + 4 + sizeof(struct pt_regs);
#ifdef CONFIG_KALLSYMS
- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
+ printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
+ loglvl, where, (void *)where, from, (void *)from);
#else
- printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
+ loglvl, where, from);
#endif
if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
- dump_mem("", "Exception stack", frame + 4, end);
+ dump_mem(loglvl, "Exception stack", frame + 4, end);
}
-void dump_backtrace_stm(u32 *stack, u32 instruction)
+void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl)
{
char str[80], *p;
unsigned int x;
@@ -88,12 +91,12 @@ void dump_backtrace_stm(u32 *stack, u32 instruction)
if (++x == 6) {
x = 0;
p = str;
- printk("%s\n", str);
+ printk("%s%s\n", loglvl, str);
}
}
}
if (p != str)
- printk("%s\n", str);
+ printk("%s%s\n", loglvl, str);
}
#ifndef CONFIG_ARM_UNWIND
@@ -201,17 +204,19 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
#ifdef CONFIG_ARM_UNWIND
-static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
- unwind_backtrace(regs, tsk);
+ unwind_backtrace(regs, tsk, loglvl);
}
#else
-static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
unsigned int fp, mode;
int ok = 1;
- printk("Backtrace: ");
+ printk("%sBacktrace: ", loglvl);
if (!tsk)
tsk = current;
@@ -238,13 +243,13 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
pr_cont("\n");
if (ok)
- c_backtrace(fp, mode);
+ c_backtrace(fp, mode, loglvl);
}
#endif
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
- dump_backtrace(NULL, tsk);
+ dump_backtrace(NULL, tsk, loglvl);
barrier();
}
@@ -288,7 +293,7 @@ static int __die(const char *str, int err, struct pt_regs *regs)
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
- dump_backtrace(regs, tsk);
+ dump_backtrace(regs, tsk, KERN_EMERG);
dump_instr(KERN_EMERG, regs);
}
@@ -566,7 +571,7 @@ __do_cache_op(unsigned long start, unsigned long end)
if (fatal_signal_pending(current))
return 0;
- ret = flush_cache_user_range(start, start + chunk);
+ ret = flush_icache_user_range(start, start + chunk);
if (ret)
return ret;
@@ -663,10 +668,10 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
if (user_debug & UDBG_SYSCALL) {
pr_err("[%d] %s: arm syscall %d\n",
task_pid_nr(current), current->comm, no);
- dump_instr("", regs);
+ dump_instr(KERN_ERR, regs);
if (user_mode(regs)) {
__show_regs(regs);
- c_backtrace(frame_pointer(regs), processor_mode(regs));
+ c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR);
}
}
#endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 11a964fd66f4..d2bd0df2318d 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -455,7 +455,8 @@ int unwind_frame(struct stackframe *frame)
return URC_OK;
}
-void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
struct stackframe frame;
@@ -493,7 +494,7 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
urc = unwind_frame(&frame);
if (urc < 0)
break;
- dump_backtrace_entry(where, frame.pc, frame.sp - 4);
+ dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl);
}
}
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index e0330a25e1c6..6bfdca4769a7 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -240,7 +240,7 @@ static int install_vvar(struct mm_struct *mm, unsigned long addr)
return PTR_ERR_OR_ZERO(vma);
}
-/* assumes mmap_sem is write-locked */
+/* assumes mmap_lock is write-locked */
void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 88a720da443b..7f24bc08403e 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -8,13 +8,13 @@
#include "vmlinux-xip.lds.S"
#else
+#include <linux/pgtable.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "vmlinux.lds.h"
diff --git a/arch/arm/lib/backtrace-clang.S b/arch/arm/lib/backtrace-clang.S
index 2ff375144b55..6174c45f53a5 100644
--- a/arch/arm/lib/backtrace-clang.S
+++ b/arch/arm/lib/backtrace-clang.S
@@ -17,6 +17,7 @@
#define sv_pc r6
#define mask r7
#define sv_lr r8
+#define loglvl r9
ENTRY(c_backtrace)
@@ -99,6 +100,7 @@ ENDPROC(c_backtrace)
@ to ensure 8 byte alignment
movs frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
+ mov loglvl, r2
tst r1, #0x10 @ 26 or 32-bit mode?
moveq mask, #0xfc000003
movne mask, #0 @ mask for 32-bit
@@ -167,6 +169,7 @@ finished_setup:
mov r1, sv_lr
mov r2, frame
bic r1, r1, mask @ mask PC/LR for the mode
+ mov r3, loglvl
bl dump_backtrace_entry
/*
@@ -183,6 +186,7 @@ finished_setup:
ldr r0, [frame] @ locals are stored in
@ the preceding frame
subeq r0, r0, #4
+ mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
/*
@@ -196,7 +200,8 @@ finished_setup:
bhi for_each_frame
1006: adr r0, .Lbad
- mov r1, frame
+ mov r1, loglvl
+ mov r2, frame
bl printk
no_frame: ldmfd sp!, {r4 - r9, fp, pc}
ENDPROC(c_backtrace)
@@ -209,7 +214,7 @@ ENDPROC(c_backtrace)
.long 1005b, 1006b
.popsection
-.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
+.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
.align
.Lopcode: .word 0xe92d4800 >> 11 @ stmfd sp!, {... fp, lr}
.word 0x0b000000 @ bl if these bits are set
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 582925238d65..872f658638d9 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -18,6 +18,7 @@
#define sv_pc r6
#define mask r7
#define offset r8
+#define loglvl r9
ENTRY(c_backtrace)
@@ -25,9 +26,10 @@ ENTRY(c_backtrace)
ret lr
ENDPROC(c_backtrace)
#else
- stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
+ stmfd sp!, {r4 - r9, lr} @ Save an extra register so we have a location...
movs frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
+ mov loglvl, r2
tst r1, #0x10 @ 26 or 32-bit mode?
ARM( moveq mask, #0xfc000003 )
@@ -73,6 +75,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions
ldr r1, [frame, #-4] @ get saved lr
mov r2, frame
bic r1, r1, mask @ mask PC/LR for the mode
+ mov r3, loglvl
bl dump_backtrace_entry
ldr r1, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
@@ -80,12 +83,14 @@ for_each_frame: tst frame, mask @ Check for address exceptions
teq r3, r1, lsr #11
ldreq r0, [frame, #-8] @ get sp
subeq r0, r0, #4 @ point at the last arg
+ mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
ldr r3, .Ldsi @ instruction exists,
teq r3, r1, lsr #11
subeq r0, frame, #16
+ mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
teq sv_fp, #0 @ zero saved fp means
@@ -96,9 +101,10 @@ for_each_frame: tst frame, mask @ Check for address exceptions
bhi for_each_frame
1006: adr r0, .Lbad
- mov r1, frame
+ mov r1, loglvl
+ mov r2, frame
bl printk
-no_frame: ldmfd sp!, {r4 - r8, pc}
+no_frame: ldmfd sp!, {r4 - r9, pc}
ENDPROC(c_backtrace)
.pushsection __ex_table,"a"
@@ -109,7 +115,7 @@ ENDPROC(c_backtrace)
.long 1004b, 1006b
.popsection
-.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
+.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
.align
.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
.word 0xe92d0000 >> 11 @ stmfd sp!, {}
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index c9450982a155..106f83a5ea6d 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -24,6 +24,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
{
unsigned long addr = (unsigned long)_addr;
pgd_t *pgd;
+ p4d_t *p4d;
pmd_t *pmd;
pte_t *pte;
pud_t *pud;
@@ -33,7 +34,11 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
return 0;
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
+ return 0;
+
+ pud = pud_offset(p4d, addr);
if (unlikely(pud_none(*pud) || pud_bad(*pud)))
return 0;
@@ -96,7 +101,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
atomic = faulthandler_disabled();
if (!atomic)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
while (n) {
pte_t *pte;
spinlock_t *ptl;
@@ -104,11 +109,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
while (!pin_page_for_write(to, &pte, &ptl)) {
if (!atomic)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (__put_user(0, (char __user *)to))
goto out;
if (!atomic)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
}
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
@@ -128,7 +133,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
spin_unlock(ptl);
}
if (!atomic)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
out:
return n;
@@ -165,17 +170,17 @@ __clear_user_memset(void __user *addr, unsigned long n)
return 0;
}
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
while (n) {
pte_t *pte;
spinlock_t *ptl;
int tocopy;
while (!pin_page_for_write(addr, &pte, &ptl)) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (__put_user(0, (char __user *)addr))
goto out;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
}
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
@@ -193,7 +198,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
else
spin_unlock(ptl);
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
out:
return n;
diff --git a/arch/arm/mach-actions/Kconfig b/arch/arm/mach-actions/Kconfig
index b5e0ac965ec0..00fb4babccdd 100644
--- a/arch/arm/mach-actions/Kconfig
+++ b/arch/arm/mach-actions/Kconfig
@@ -7,7 +7,6 @@ menuconfig ARCH_ACTIONS
select ARM_GLOBAL_TIMER
select CACHE_L2X0
select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
- select COMMON_CLK
select GENERIC_IRQ_CHIP
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
diff --git a/arch/arm/mach-alpine/Kconfig b/arch/arm/mach-alpine/Kconfig
index bc04c91294cf..6a68a162385b 100644
--- a/arch/arm/mach-alpine/Kconfig
+++ b/arch/arm/mach-alpine/Kconfig
@@ -7,7 +7,6 @@ config ARCH_ALPINE
select ARM_GIC
select GENERIC_IRQ_CHIP
select HAVE_ARM_ARCH_TIMER
- select HAVE_SMP
select MFD_SYSCON
select FORCE_PCI
select PCI_HOST_GENERIC
diff --git a/arch/arm/mach-asm9260/Kconfig b/arch/arm/mach-asm9260/Kconfig
index e42dbaa53bc6..a2e1d0aaf252 100644
--- a/arch/arm/mach-asm9260/Kconfig
+++ b/arch/arm/mach-asm9260/Kconfig
@@ -4,6 +4,5 @@ config MACH_ASM9260
depends on ARCH_MULTI_V5
select CPU_ARM926T
select ASM9260_TIMER
- select GENERIC_CLOCKEVENTS
help
Support for Alphascale ASM9260 based platform.
diff --git a/arch/arm/mach-aspeed/Kconfig b/arch/arm/mach-aspeed/Kconfig
index e8d6e9957d65..ea96d11b8502 100644
--- a/arch/arm/mach-aspeed/Kconfig
+++ b/arch/arm/mach-aspeed/Kconfig
@@ -39,7 +39,6 @@ config MACH_ASPEED_G6
select PINCTRL_ASPEED_G6
select ARM_GIC
select HAVE_ARM_ARCH_TIMER
- select HAVE_SMP
help
Say yes if you intend to run on an Aspeed ast2600 or similar
sixth generation Aspeed BMCs.
diff --git a/arch/arm/mach-berlin/Kconfig b/arch/arm/mach-berlin/Kconfig
index 5b1f61fd7878..01861fa72c97 100644
--- a/arch/arm/mach-berlin/Kconfig
+++ b/arch/arm/mach-berlin/Kconfig
@@ -19,7 +19,6 @@ config MACH_BERLIN_BG2
select CPU_PJ4B
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
- select HAVE_SMP
select PINCTRL_BERLIN_BG2
config MACH_BERLIN_BG2CD
diff --git a/arch/arm/mach-clps711x/Kconfig b/arch/arm/mach-clps711x/Kconfig
index fc9188b54dd6..314de9477b84 100644
--- a/arch/arm/mach-clps711x/Kconfig
+++ b/arch/arm/mach-clps711x/Kconfig
@@ -2,15 +2,10 @@
menuconfig ARCH_CLPS711X
bool "Cirrus Logic EP721x/EP731x-based"
depends on ARCH_MULTI_V4T
- select AUTO_ZRELADDR
- select TIMER_OF
select CLPS711X_TIMER
- select COMMON_CLK
select CPU_ARM720T
- select GENERIC_CLOCKEVENTS
select GPIOLIB
select MFD_SYSCON
select OF_IRQ
- select USE_OF
help
Select this if you use ARMv4T Cirrus Logic chips.
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 02b180ad7245..d028d38a44bf 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -64,7 +64,6 @@ config MACH_DA8XX_DT
default y
depends on ARCH_DAVINCI_DA850
select PINCTRL
- select TIMER_OF
help
Say y here to include support for TI DaVinci DA850 based using
Flattened Device Tree. More information at Documentation/devicetree
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 3461d12bbfc0..a5d3708fedf6 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -655,19 +655,6 @@ static struct i2c_board_info __initdata i2c_info[] = {
},
};
-/* Fixed regulator support */
-static struct regulator_consumer_supply fixed_supplies_3_3v[] = {
- /* Baseboard 3.3V: 5V -> TPS54310PWP -> 3.3V */
- REGULATOR_SUPPLY("AVDD", "1-001b"),
- REGULATOR_SUPPLY("DRVDD", "1-001b"),
-};
-
-static struct regulator_consumer_supply fixed_supplies_1_8v[] = {
- /* Baseboard 1.8V: 5V -> TPS54310PWP -> 1.8V */
- REGULATOR_SUPPLY("IOVDD", "1-001b"),
- REGULATOR_SUPPLY("DVDD", "1-001b"),
-};
-
#define DM644X_I2C_SDA_PIN GPIO_TO_PIN(2, 12)
#define DM644X_I2C_SCL_PIN GPIO_TO_PIN(2, 11)
@@ -700,6 +687,19 @@ static void __init evm_init_i2c(void)
}
#endif
+/* Fixed regulator support */
+static struct regulator_consumer_supply fixed_supplies_3_3v[] = {
+ /* Baseboard 3.3V: 5V -> TPS54310PWP -> 3.3V */
+ REGULATOR_SUPPLY("AVDD", "1-001b"),
+ REGULATOR_SUPPLY("DRVDD", "1-001b"),
+};
+
+static struct regulator_consumer_supply fixed_supplies_1_8v[] = {
+ /* Baseboard 1.8V: 5V -> TPS54310PWP -> 1.8V */
+ REGULATOR_SUPPLY("IOVDD", "1-001b"),
+ REGULATOR_SUPPLY("DVDD", "1-001b"),
+};
+
#define VENC_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL)
/* venc standard timings */
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index 575b2e2b6759..5960e3dfd2bf 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -17,7 +17,6 @@
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/system_misc.h>
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 015f75d1c98d..eee095f0e2f6 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -14,7 +14,6 @@
#include <linux/spinlock.h>
#include <video/vga.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 5aa5796cff0e..72c3fcc32910 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -49,7 +49,6 @@ void imx_aips_allow_unprivileged_access(const char *compat);
int mxc_device_init(void);
void imx_set_soc_revision(unsigned int rev);
void imx_init_revision_from_anatop(void);
-struct device *imx_soc_device_init(void);
void imx6_enable_rbc(bool enable);
void imx_gpc_check_dt(void);
void imx_gpc_set_arm_power_in_lpm(bool power_off);
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c
index 06f8d64b65af..65c7224f5250 100644
--- a/arch/arm/mach-imx/cpu.c
+++ b/arch/arm/mach-imx/cpu.c
@@ -1,25 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/regmap.h>
-#include <linux/slab.h>
-#include <linux/sys_soc.h>
#include "hardware.h"
#include "common.h"
-#define OCOTP_UID_H 0x420
-#define OCOTP_UID_L 0x410
-
-#define OCOTP_ULP_UID_1 0x4b0
-#define OCOTP_ULP_UID_2 0x4c0
-#define OCOTP_ULP_UID_3 0x4d0
-#define OCOTP_ULP_UID_4 0x4e0
-
unsigned int __mxc_cpu_type;
static unsigned int imx_soc_revision;
@@ -82,150 +70,3 @@ void __init imx_aips_allow_unprivileged_access(
imx_set_aips(aips_base_addr);
}
}
-
-struct device * __init imx_soc_device_init(void)
-{
- struct soc_device_attribute *soc_dev_attr;
- const char *ocotp_compat = NULL;
- struct soc_device *soc_dev;
- struct device_node *root;
- struct regmap *ocotp = NULL;
- const char *soc_id;
- u64 soc_uid = 0;
- u32 val;
- int ret;
-
- soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
- if (!soc_dev_attr)
- return NULL;
-
- soc_dev_attr->family = "Freescale i.MX";
-
- root = of_find_node_by_path("/");
- ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
- of_node_put(root);
- if (ret)
- goto free_soc;
-
- switch (__mxc_cpu_type) {
- case MXC_CPU_MX1:
- soc_id = "i.MX1";
- break;
- case MXC_CPU_MX21:
- soc_id = "i.MX21";
- break;
- case MXC_CPU_MX25:
- soc_id = "i.MX25";
- break;
- case MXC_CPU_MX27:
- soc_id = "i.MX27";
- break;
- case MXC_CPU_MX31:
- soc_id = "i.MX31";
- break;
- case MXC_CPU_MX35:
- soc_id = "i.MX35";
- break;
- case MXC_CPU_MX51:
- soc_id = "i.MX51";
- break;
- case MXC_CPU_MX53:
- soc_id = "i.MX53";
- break;
- case MXC_CPU_IMX6SL:
- ocotp_compat = "fsl,imx6sl-ocotp";
- soc_id = "i.MX6SL";
- break;
- case MXC_CPU_IMX6DL:
- ocotp_compat = "fsl,imx6q-ocotp";
- soc_id = "i.MX6DL";
- break;
- case MXC_CPU_IMX6SX:
- ocotp_compat = "fsl,imx6sx-ocotp";
- soc_id = "i.MX6SX";
- break;
- case MXC_CPU_IMX6Q:
- ocotp_compat = "fsl,imx6q-ocotp";
- soc_id = "i.MX6Q";
- break;
- case MXC_CPU_IMX6UL:
- ocotp_compat = "fsl,imx6ul-ocotp";
- soc_id = "i.MX6UL";
- break;
- case MXC_CPU_IMX6ULL:
- ocotp_compat = "fsl,imx6ull-ocotp";
- soc_id = "i.MX6ULL";
- break;
- case MXC_CPU_IMX6ULZ:
- ocotp_compat = "fsl,imx6ull-ocotp";
- soc_id = "i.MX6ULZ";
- break;
- case MXC_CPU_IMX6SLL:
- ocotp_compat = "fsl,imx6sll-ocotp";
- soc_id = "i.MX6SLL";
- break;
- case MXC_CPU_IMX7D:
- ocotp_compat = "fsl,imx7d-ocotp";
- soc_id = "i.MX7D";
- break;
- case MXC_CPU_IMX7ULP:
- ocotp_compat = "fsl,imx7ulp-ocotp";
- soc_id = "i.MX7ULP";
- break;
- default:
- soc_id = "Unknown";
- }
- soc_dev_attr->soc_id = soc_id;
-
- if (ocotp_compat) {
- ocotp = syscon_regmap_lookup_by_compatible(ocotp_compat);
- if (IS_ERR(ocotp))
- pr_err("%s: failed to find %s regmap!\n", __func__, ocotp_compat);
- }
-
- if (!IS_ERR_OR_NULL(ocotp)) {
- if (__mxc_cpu_type == MXC_CPU_IMX7ULP) {
- regmap_read(ocotp, OCOTP_ULP_UID_4, &val);
- soc_uid = val & 0xffff;
- regmap_read(ocotp, OCOTP_ULP_UID_3, &val);
- soc_uid <<= 16;
- soc_uid |= val & 0xffff;
- regmap_read(ocotp, OCOTP_ULP_UID_2, &val);
- soc_uid <<= 16;
- soc_uid |= val & 0xffff;
- regmap_read(ocotp, OCOTP_ULP_UID_1, &val);
- soc_uid <<= 16;
- soc_uid |= val & 0xffff;
- } else {
- regmap_read(ocotp, OCOTP_UID_H, &val);
- soc_uid = val;
- regmap_read(ocotp, OCOTP_UID_L, &val);
- soc_uid <<= 32;
- soc_uid |= val;
- }
- }
-
- soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
- (imx_soc_revision >> 4) & 0xf,
- imx_soc_revision & 0xf);
- if (!soc_dev_attr->revision)
- goto free_soc;
-
- soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
- if (!soc_dev_attr->serial_number)
- goto free_rev;
-
- soc_dev = soc_device_register(soc_dev_attr);
- if (IS_ERR(soc_dev))
- goto free_serial_number;
-
- return soc_device_to_device(soc_dev);
-
-free_serial_number:
- kfree(soc_dev_attr->serial_number);
-free_rev:
- kfree(soc_dev_attr->revision);
-free_soc:
- kfree(soc_dev_attr);
- return NULL;
-}
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 284bce1112d2..85c084a716ab 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -245,21 +245,15 @@ static void __init imx6q_axi_init(void)
static void __init imx6q_init_machine(void)
{
- struct device *parent;
-
if (cpu_is_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_2_0)
imx_print_silicon_rev("i.MX6QP", IMX_CHIP_REVISION_1_0);
else
imx_print_silicon_rev(cpu_is_imx6dl() ? "i.MX6DL" : "i.MX6Q",
imx_get_soc_revision());
- parent = imx_soc_device_init();
- if (parent == NULL)
- pr_warn("failed to initialize soc device\n");
-
imx6q_enet_phy_init();
- of_platform_default_populate(NULL, NULL, parent);
+ of_platform_default_populate(NULL, NULL, NULL);
imx_anatop_init();
cpu_is_imx6q() ? imx6q_pm_init() : imx6dl_pm_init();
diff --git a/arch/arm/mach-imx/mach-imx6sl.c b/arch/arm/mach-imx/mach-imx6sl.c
index e27a6889cc56..f6e87363d605 100644
--- a/arch/arm/mach-imx/mach-imx6sl.c
+++ b/arch/arm/mach-imx/mach-imx6sl.c
@@ -45,13 +45,7 @@ static void __init imx6sl_init_late(void)
static void __init imx6sl_init_machine(void)
{
- struct device *parent;
-
- parent = imx_soc_device_init();
- if (parent == NULL)
- pr_warn("failed to initialize soc device\n");
-
- of_platform_default_populate(NULL, NULL, parent);
+ of_platform_default_populate(NULL, NULL, NULL);
if (cpu_is_imx6sl())
imx6sl_fec_init();
diff --git a/arch/arm/mach-imx/mach-imx6sx.c b/arch/arm/mach-imx/mach-imx6sx.c
index d5310bf307ff..781e2a94fdd7 100644
--- a/arch/arm/mach-imx/mach-imx6sx.c
+++ b/arch/arm/mach-imx/mach-imx6sx.c
@@ -63,13 +63,7 @@ static inline void imx6sx_enet_init(void)
static void __init imx6sx_init_machine(void)
{
- struct device *parent;
-
- parent = imx_soc_device_init();
- if (parent == NULL)
- pr_warn("failed to initialize soc device\n");
-
- of_platform_default_populate(NULL, NULL, parent);
+ of_platform_default_populate(NULL, NULL, NULL);
imx6sx_enet_init();
imx_anatop_init();
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
index 3b0e16ccd59d..e018e716735f 100644
--- a/arch/arm/mach-imx/mach-imx6ul.c
+++ b/arch/arm/mach-imx/mach-imx6ul.c
@@ -55,13 +55,7 @@ static inline void imx6ul_enet_init(void)
static void __init imx6ul_init_machine(void)
{
- struct device *parent;
-
- parent = imx_soc_device_init();
- if (parent == NULL)
- pr_warn("failed to initialize soc device\n");
-
- of_platform_default_populate(NULL, NULL, parent);
+ of_platform_default_populate(NULL, NULL, NULL);
imx6ul_enet_init();
imx_anatop_init();
imx6ul_pm_init();
diff --git a/arch/arm/mach-imx/mach-imx7d.c b/arch/arm/mach-imx/mach-imx7d.c
index ebb27592a9f7..879c35929a13 100644
--- a/arch/arm/mach-imx/mach-imx7d.c
+++ b/arch/arm/mach-imx/mach-imx7d.c
@@ -78,12 +78,6 @@ static inline void imx7d_enet_init(void)
static void __init imx7d_init_machine(void)
{
- struct device *parent;
-
- parent = imx_soc_device_init();
- if (parent == NULL)
- pr_warn("failed to initialize soc device\n");
-
imx_anatop_init();
imx7d_enet_init();
}
diff --git a/arch/arm/mach-imx/mach-imx7ulp.c b/arch/arm/mach-imx/mach-imx7ulp.c
index 11ac71aaf965..128cf4c92aab 100644
--- a/arch/arm/mach-imx/mach-imx7ulp.c
+++ b/arch/arm/mach-imx/mach-imx7ulp.c
@@ -57,7 +57,7 @@ static void __init imx7ulp_init_machine(void)
mxc_set_cpu_type(MXC_CPU_IMX7ULP);
imx7ulp_set_revision();
- of_platform_default_populate(NULL, NULL, imx_soc_device_init());
+ of_platform_default_populate(NULL, NULL, NULL);
}
static const char *const imx7ulp_dt_compat[] __initconst = {
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index 7b8325fb5b41..1da5f07952ac 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/irq.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
@@ -20,8 +21,6 @@
#include <linux/mfd/mc13783.h>
#include <linux/spi/spi.h>
#include <linux/regulator/machine.h>
-#include <linux/spi/l4f00242t03.h>
-
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -351,9 +350,19 @@ static const struct imx_fb_platform_data mx27_3ds_fb_data __initconst = {
};
/* LCD */
-static struct l4f00242t03_pdata mx27_3ds_lcd_pdata = {
- .reset_gpio = LCD_RESET,
- .data_enable_gpio = LCD_ENABLE,
+static struct gpiod_lookup_table mx27_3ds_lcd_gpiod_table = {
+ .dev_id = "spi0.0", /* Bus 0 chipselect 0 */
+ .table = {
+ /*
+ * The i.MX27 has the i.MX21 GPIO controller, the GPIOs
+ * numbered IMX_GPIO_NR(1, 3) and IMX_GPIO_NR(1, 31)
+ * are in "bank 1" which is subtracted by one in the macro
+ * so these are actually bank 0 on "imx21-gpio.0".
+ */
+ GPIO_LOOKUP("imx21-gpio.0", 3, "reset", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("imx21-gpio.0", 31, "enable", GPIO_ACTIVE_HIGH),
+ { },
+ },
};
static struct spi_board_info mx27_3ds_spi_devs[] __initdata = {
@@ -370,7 +379,6 @@ static struct spi_board_info mx27_3ds_spi_devs[] __initdata = {
.max_speed_hz = 5000000,
.bus_num = 0,
.chip_select = 0, /* SS0 */
- .platform_data = &mx27_3ds_lcd_pdata,
},
};
@@ -416,6 +424,7 @@ static void __init mx27pdk_late_init(void)
if (!otg_mode_host)
imx27_add_fsl_usb2_udc(&otg_device_pdata);
+ gpiod_add_lookup_table(&mx27_3ds_lcd_gpiod_table);
mx27_3ds_spi_devs[0].irq = gpio_to_irq(PMIC_INT);
spi_register_board_info(mx27_3ds_spi_devs,
ARRAY_SIZE(mx27_3ds_spi_devs));
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c
index 716d2ad51103..e81386190479 100644
--- a/arch/arm/mach-imx/mach-mx31_3ds.c
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c
@@ -10,10 +10,10 @@
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/platform_device.h>
#include <linux/mfd/mc13783.h>
#include <linux/spi/spi.h>
-#include <linux/spi/l4f00242t03.h>
#include <linux/regulator/machine.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
@@ -160,9 +160,23 @@ static struct mx3fb_platform_data mx3fb_pdata __initdata = {
};
/* LCD */
-static struct l4f00242t03_pdata mx31_3ds_l4f00242t03_pdata = {
- .reset_gpio = IOMUX_TO_GPIO(MX31_PIN_LCS1),
- .data_enable_gpio = IOMUX_TO_GPIO(MX31_PIN_SER_RS),
+static struct gpiod_lookup_table mx31_3ds_lcd_gpiod_table = {
+ .dev_id = "spi0.2", /* Bus 0 chipselect 2 */
+ .table = {
+ /*
+ * "reset" has IOMUX_TO_GPIO(IOMUX_PIN(88, 28)).
+ * The macro only shifts 88 to bits 9..16 and then
+ * mask it and shift it back. The GPIO number is 88.
+ * 88 is 2*32+24
+ */
+ GPIO_LOOKUP("imx31-gpio.2", 24, "reset", GPIO_ACTIVE_HIGH),
+ /*
+ * Same reasoning as above for
+ * IOMUX_TO_GPIO(IOMUX_PIN(89, 27), pin 89 is 2*32+25.
+ */
+ GPIO_LOOKUP("imx31-gpio.2", 25, "enable", GPIO_ACTIVE_HIGH),
+ { },
+ },
};
/*
@@ -387,7 +401,6 @@ static struct spi_board_info mx31_3ds_spi_devs[] __initdata = {
.max_speed_hz = 5000000,
.bus_num = 0,
.chip_select = 2, /* SS2 */
- .platform_data = &mx31_3ds_l4f00242t03_pdata,
},
};
@@ -566,6 +579,7 @@ static void __init mx31_3ds_init(void)
static void __init mx31_3ds_late(void)
{
+ gpiod_add_lookup_table(&mx31_3ds_lcd_gpiod_table);
mx31_3ds_spi_devs[0].irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
spi_register_board_info(mx31_3ds_spi_devs,
ARRAY_SIZE(mx31_3ds_spi_devs));
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index bd9443fa6edc..c7d23e9d4f8b 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -404,7 +404,7 @@ static struct resource pcm970_sja1000_resources[] = {
},
};
-struct sja1000_platform_data pcm970_sja1000_platform_data = {
+static struct sja1000_platform_data pcm970_sja1000_platform_data = {
.osc_freq = 16000000,
.ocr = OCR_TX1_PULLDOWN | OCR_TX0_PUSHPULL,
.cdr = CDR_CBP,
diff --git a/arch/arm/mach-imx/mach-vf610.c b/arch/arm/mach-imx/mach-vf610.c
index 9c929b09310c..208ff640698d 100644
--- a/arch/arm/mach-imx/mach-vf610.c
+++ b/arch/arm/mach-imx/mach-vf610.c
@@ -3,11 +3,57 @@
* Copyright 2012-2013 Freescale Semiconductor, Inc.
*/
+#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/io.h>
+
#include <linux/irqchip.h>
#include <asm/mach/arch.h>
#include <asm/hardware/cache-l2x0.h>
+#include "common.h"
+#include "hardware.h"
+
+#define MSCM_CPxCOUNT 0x00c
+#define MSCM_CPxCFG1 0x014
+
+static void __init vf610_detect_cpu(void)
+{
+ struct device_node *np;
+ u32 cpxcount, cpxcfg1;
+ unsigned int cpu_type;
+ void __iomem *mscm;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,vf610-mscm-cpucfg");
+ if (WARN_ON(!np))
+ return;
+
+ mscm = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (WARN_ON(!mscm))
+ return;
+
+ cpxcount = readl_relaxed(mscm + MSCM_CPxCOUNT);
+ cpxcfg1 = readl_relaxed(mscm + MSCM_CPxCFG1);
+
+ iounmap(mscm);
+
+ cpu_type = cpxcount ? MXC_CPU_VF600 : MXC_CPU_VF500;
+
+ if (cpxcfg1)
+ cpu_type |= MXC_CPU_VFx10;
+
+ mxc_set_cpu_type(cpu_type);
+}
+
+static void __init vf610_init_machine(void)
+{
+ vf610_detect_cpu();
+
+ of_platform_default_populate(NULL, NULL, NULL);
+}
+
static const char * const vf610_dt_compat[] __initconst = {
"fsl,vf500",
"fsl,vf510",
@@ -20,5 +66,6 @@ static const char * const vf610_dt_compat[] __initconst = {
DT_MACHINE_START(VYBRID_VF610, "Freescale Vybrid VF5xx/VF6xx (Device Tree)")
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
+ .init_machine = vf610_init_machine,
.dt_compat = vf610_dt_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mm-imx21.c b/arch/arm/mach-imx/mm-imx21.c
index e117d2883df9..50a2edac8513 100644
--- a/arch/arm/mach-imx/mm-imx21.c
+++ b/arch/arm/mach-imx/mm-imx21.c
@@ -8,7 +8,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pinctrl/machine.h>
-#include <asm/pgtable.h>
#include <asm/mach/map.h>
#include "common.h"
diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c
index dcbe7ec6d543..4e4125140025 100644
--- a/arch/arm/mach-imx/mm-imx27.c
+++ b/arch/arm/mach-imx/mm-imx27.c
@@ -8,7 +8,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pinctrl/machine.h>
-#include <asm/pgtable.h>
#include <asm/mach/map.h>
#include "common.h"
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 67264c48ed68..ea2d58a63903 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -13,7 +13,6 @@
#include <linux/io.h>
#include <linux/pinctrl/machine.h>
-#include <asm/pgtable.h>
#include <asm/system_misc.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h
index 2bfd2d59b4a6..fe2d0f5abfcc 100644
--- a/arch/arm/mach-imx/mxc.h
+++ b/arch/arm/mach-imx/mxc.h
@@ -8,35 +8,15 @@
#define __ASM_ARCH_MXC_H__
#include <linux/types.h>
+#include <soc/imx/cpu.h>
#ifndef __ASM_ARCH_MXC_HARDWARE_H__
#error "Do not include directly."
#endif
-#define MXC_CPU_MX1 1
-#define MXC_CPU_MX21 21
-#define MXC_CPU_MX25 25
-#define MXC_CPU_MX27 27
-#define MXC_CPU_MX31 31
-#define MXC_CPU_MX35 35
-#define MXC_CPU_MX51 51
-#define MXC_CPU_MX53 53
-#define MXC_CPU_IMX6SL 0x60
-#define MXC_CPU_IMX6DL 0x61
-#define MXC_CPU_IMX6SX 0x62
-#define MXC_CPU_IMX6Q 0x63
-#define MXC_CPU_IMX6UL 0x64
-#define MXC_CPU_IMX6ULL 0x65
-/* virtual cpu id for i.mx6ulz */
-#define MXC_CPU_IMX6ULZ 0x6b
-#define MXC_CPU_IMX6SLL 0x67
-#define MXC_CPU_IMX7D 0x72
-#define MXC_CPU_IMX7ULP 0xff
-
#define IMX_DDR_TYPE_LPDDR2 1
#ifndef __ASSEMBLY__
-extern unsigned int __mxc_cpu_type;
#ifdef CONFIG_SOC_IMX6SL
static inline bool cpu_is_imx6sl(void)
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig
index 982eabc36163..7a9808b01763 100644
--- a/arch/arm/mach-integrator/Kconfig
+++ b/arch/arm/mach-integrator/Kconfig
@@ -3,7 +3,8 @@ menuconfig ARCH_INTEGRATOR
bool "ARM Ltd. Integrator family"
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6
select ARM_AMBA
- select COMMON_CLK_VERSATILE
+ select CMA
+ select DMA_CMA
select HAVE_TCM
select ICST
select MFD_SYSCON
@@ -12,7 +13,6 @@ menuconfig ARCH_INTEGRATOR
select POWER_RESET_VERSATILE
select POWER_SUPPLY
select SOC_INTEGRATOR_CM
- select SPARSE_IRQ
select VERSATILE_FPGA_IRQ
help
Support for ARM's Integrator platform.
@@ -35,14 +35,13 @@ config INTEGRATOR_IMPD1
select ARM_VIC
select GPIO_PL061
select GPIOLIB
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
help
The IM-PD1 is an add-on logic module for the Integrator which
allows ARM(R) Ltd PrimeCells to be developed and evaluated.
The IM-PD1 can be found on the Integrator/PP2 platform.
- To compile this driver as a module, choose M here: the
- module will be called impd1.
-
config INTEGRATOR_CM7TDMI
bool "Integrator/CM7TDMI core module"
depends on ARCH_INTEGRATOR_AP
diff --git a/arch/arm/mach-integrator/Makefile b/arch/arm/mach-integrator/Makefile
index 71b97ffe8d32..7857a55c90b0 100644
--- a/arch/arm/mach-integrator/Makefile
+++ b/arch/arm/mach-integrator/Makefile
@@ -5,7 +5,6 @@
# Object file lists.
-obj-y := core.o lm.o
+obj-y := core.o
obj-$(CONFIG_ARCH_INTEGRATOR_AP) += integrator_ap.o
obj-$(CONFIG_ARCH_INTEGRATOR_CP) += integrator_cp.o
-obj-$(CONFIG_INTEGRATOR_IMPD1) += impd1.o
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 9da3ae232211..0fe5e1dc9d89 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -21,10 +21,10 @@
#include <linux/stat.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/pgtable.h>
#include <asm/mach-types.h>
#include <asm/mach/time.h>
-#include <asm/pgtable.h>
#include "hardware.h"
#include "cm.h"
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
deleted file mode 100644
index 6f875ded8419..000000000000
--- a/arch/arm/mach-integrator/impd1.c
+++ /dev/null
@@ -1,475 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-integrator/impd1.c
- *
- * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved.
- *
- * This file provides the core support for the IM-PD1 module.
- *
- * Module / boot parameters.
- * lmid=n impd1.lmid=n - set the logic module position in stack to 'n'
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-#include <linux/amba/mmci.h>
-#include <linux/io.h>
-#include <linux/platform_data/clk-integrator.h>
-#include <linux/slab.h>
-#include <linux/irqchip/arm-vic.h>
-#include <linux/gpio/machine.h>
-
-#include <linux/sizes.h>
-#include "lm.h"
-#include "impd1.h"
-
-static int module_id;
-
-module_param_named(lmid, module_id, int, 0444);
-MODULE_PARM_DESC(lmid, "logic module stack position");
-
-struct impd1_module {
- void __iomem *base;
- void __iomem *vic_base;
-};
-
-void impd1_tweak_control(struct device *dev, u32 mask, u32 val)
-{
- struct impd1_module *impd1 = dev_get_drvdata(dev);
- u32 cur;
-
- val &= mask;
- cur = readl(impd1->base + IMPD1_CTRL) & ~mask;
- writel(cur | val, impd1->base + IMPD1_CTRL);
-}
-
-EXPORT_SYMBOL(impd1_tweak_control);
-
-/*
- * MMC support
- */
-static struct mmci_platform_data mmc_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
-};
-
-/*
- * CLCD support
- */
-#define PANEL PROSPECTOR
-
-#define LTM10C209 1
-#define PROSPECTOR 2
-#define SVGA 3
-#define VGA 4
-
-#if PANEL == VGA
-#define PANELTYPE vga
-static struct clcd_panel vga = {
- .mode = {
- .name = "VGA",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 39721,
- .left_margin = 40,
- .right_margin = 24,
- .upper_margin = 32,
- .lower_margin = 11,
- .hsync_len = 96,
- .vsync_len = 2,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD | TIM2_IPC,
- .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551,
- .connector = IMPD1_CTRL_DISP_VGA,
- .bpp = 16,
- .grayscale = 0,
-};
-
-#elif PANEL == SVGA
-#define PANELTYPE svga
-static struct clcd_panel svga = {
- .mode = {
- .name = "SVGA",
- .refresh = 0,
- .xres = 800,
- .yres = 600,
- .pixclock = 27778,
- .left_margin = 20,
- .right_margin = 20,
- .upper_margin = 5,
- .lower_margin = 5,
- .hsync_len = 164,
- .vsync_len = 62,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD,
- .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
- .connector = IMPD1_CTRL_DISP_VGA,
- .caps = CLCD_CAP_5551,
- .bpp = 16,
- .grayscale = 0,
-};
-
-#elif PANEL == PROSPECTOR
-#define PANELTYPE prospector
-static struct clcd_panel prospector = {
- .mode = {
- .name = "PROSPECTOR",
- .refresh = 0,
- .xres = 640,
- .yres = 480,
- .pixclock = 40000,
- .left_margin = 33,
- .right_margin = 64,
- .upper_margin = 36,
- .lower_margin = 7,
- .hsync_len = 64,
- .vsync_len = 25,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD,
- .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551,
- .fixedtimings = 1,
- .connector = IMPD1_CTRL_DISP_LCD,
- .bpp = 16,
- .grayscale = 0,
-};
-
-#elif PANEL == LTM10C209
-#define PANELTYPE ltm10c209
-/*
- * Untested.
- */
-static struct clcd_panel ltm10c209 = {
- .mode = {
- .name = "LTM10C209",
- .refresh = 0,
- .xres = 640,
- .yres = 480,
- .pixclock = 40000,
- .left_margin = 20,
- .right_margin = 20,
- .upper_margin = 19,
- .lower_margin = 19,
- .hsync_len = 20,
- .vsync_len = 10,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = TIM2_BCD,
- .cntl = CNTL_LCDTFT | CNTL_LCDVCOMP(1),
- .caps = CLCD_CAP_5551,
- .fixedtimings = 1,
- .connector = IMPD1_CTRL_DISP_LCD,
- .bpp = 16,
- .grayscale = 0,
-};
-#endif
-
-/*
- * Disable all display connectors on the interface module.
- */
-static void impd1fb_clcd_disable(struct clcd_fb *fb)
-{
- impd1_tweak_control(fb->dev->dev.parent, IMPD1_CTRL_DISP_MASK, 0);
-}
-
-/*
- * Enable the relevant connector on the interface module.
- */
-static void impd1fb_clcd_enable(struct clcd_fb *fb)
-{
- impd1_tweak_control(fb->dev->dev.parent, IMPD1_CTRL_DISP_MASK,
- fb->panel->connector | IMPD1_CTRL_DISP_ENABLE);
-}
-
-static int impd1fb_clcd_setup(struct clcd_fb *fb)
-{
- unsigned long framebase = fb->dev->res.start + 0x01000000;
- unsigned long framesize = SZ_1M;
- int ret = 0;
-
- fb->panel = &PANELTYPE;
-
- if (!request_mem_region(framebase, framesize, "clcd framebuffer")) {
- printk(KERN_ERR "IM-PD1: unable to reserve framebuffer\n");
- return -EBUSY;
- }
-
- fb->fb.screen_base = ioremap(framebase, framesize);
- if (!fb->fb.screen_base) {
- printk(KERN_ERR "IM-PD1: unable to map framebuffer\n");
- ret = -ENOMEM;
- goto free_buffer;
- }
-
- fb->fb.fix.smem_start = framebase;
- fb->fb.fix.smem_len = framesize;
-
- return 0;
-
- free_buffer:
- release_mem_region(framebase, framesize);
- return ret;
-}
-
-static int impd1fb_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- unsigned long start, size;
-
- start = vma->vm_pgoff + (fb->fb.fix.smem_start >> PAGE_SHIFT);
- size = vma->vm_end - vma->vm_start;
-
- return remap_pfn_range(vma, vma->vm_start, start, size,
- vma->vm_page_prot);
-}
-
-static void impd1fb_clcd_remove(struct clcd_fb *fb)
-{
- iounmap(fb->fb.screen_base);
- release_mem_region(fb->fb.fix.smem_start, fb->fb.fix.smem_len);
-}
-
-static struct clcd_board impd1_clcd_data = {
- .name = "IM-PD/1",
- .caps = CLCD_CAP_5551 | CLCD_CAP_888,
- .check = clcdfb_check,
- .decode = clcdfb_decode,
- .disable = impd1fb_clcd_disable,
- .enable = impd1fb_clcd_enable,
- .setup = impd1fb_clcd_setup,
- .mmap = impd1fb_clcd_mmap,
- .remove = impd1fb_clcd_remove,
-};
-
-struct impd1_device {
- unsigned long offset;
- unsigned int irq[2];
- unsigned int id;
- void *platform_data;
-};
-
-static struct impd1_device impd1_devs[] = {
- {
- .offset = 0x00100000,
- .irq = { 1 },
- .id = 0x00141011,
- }, {
- .offset = 0x00200000,
- .irq = { 2 },
- .id = 0x00141011,
- }, {
- .offset = 0x00300000,
- .irq = { 3 },
- .id = 0x00041022,
- }, {
- .offset = 0x00400000,
- .irq = { 4 },
- .id = 0x00041061,
- }, {
- .offset = 0x00500000,
- .irq = { 5 },
- .id = 0x00041061,
- }, {
- .offset = 0x00600000,
- .irq = { 6 },
- .id = 0x00041130,
- }, {
- .offset = 0x00700000,
- .irq = { 7, 8 },
- .id = 0x00041181,
- .platform_data = &mmc_data,
- }, {
- .offset = 0x00800000,
- .irq = { 9 },
- .id = 0x00041041,
- }, {
- .offset = 0x01000000,
- .irq = { 11 },
- .id = 0x00041110,
- .platform_data = &impd1_clcd_data,
- }
-};
-
-/*
- * Valid IRQs: 0 thru 9 and 11, 10 unused.
- */
-#define IMPD1_VALID_IRQS 0x00000bffU
-
-/*
- * As this module is bool, it is OK to have this as __ref() - no
- * probe calls will be done after the initial system bootup, as devices
- * are discovered as part of the machine startup.
- */
-static int __ref impd1_probe(struct lm_device *dev)
-{
- struct impd1_module *impd1;
- int irq_base;
- int i;
-
- if (dev->id != module_id)
- return -EINVAL;
-
- if (!devm_request_mem_region(&dev->dev, dev->resource.start,
- SZ_4K, "LM registers"))
- return -EBUSY;
-
- impd1 = devm_kzalloc(&dev->dev, sizeof(struct impd1_module),
- GFP_KERNEL);
- if (!impd1)
- return -ENOMEM;
-
- impd1->base = devm_ioremap(&dev->dev, dev->resource.start, SZ_4K);
- if (!impd1->base)
- return -ENOMEM;
-
- integrator_impd1_clk_init(impd1->base, dev->id);
-
- if (!devm_request_mem_region(&dev->dev,
- dev->resource.start + 0x03000000,
- SZ_4K, "VIC"))
- return -EBUSY;
-
- impd1->vic_base = devm_ioremap(&dev->dev,
- dev->resource.start + 0x03000000,
- SZ_4K);
- if (!impd1->vic_base)
- return -ENOMEM;
-
- irq_base = vic_init_cascaded(impd1->vic_base, dev->irq,
- IMPD1_VALID_IRQS, 0);
-
- lm_set_drvdata(dev, impd1);
-
- dev_info(&dev->dev, "IM-PD1 found at 0x%08lx\n",
- (unsigned long)dev->resource.start);
-
- for (i = 0; i < ARRAY_SIZE(impd1_devs); i++) {
- struct impd1_device *idev = impd1_devs + i;
- struct amba_device *d;
- unsigned long pc_base;
- char devname[32];
- int irq1 = idev->irq[0];
- int irq2 = idev->irq[1];
-
- /* Translate IRQs to IM-PD1 local numberspace */
- if (irq1)
- irq1 += irq_base;
- if (irq2)
- irq2 += irq_base;
-
- pc_base = dev->resource.start + idev->offset;
- snprintf(devname, 32, "lm%x:%5.5lx", dev->id, idev->offset >> 12);
-
- /* Add GPIO descriptor lookup table for the PL061 block */
- if (idev->offset == 0x00400000) {
- struct gpiod_lookup_table *lookup;
- char *chipname;
- char *mmciname;
-
- lookup = devm_kzalloc(&dev->dev,
- struct_size(lookup, table, 3),
- GFP_KERNEL);
- chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
- mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
- "lm%x:00700", dev->id);
- if (!lookup || !chipname || !mmciname)
- return -ENOMEM;
-
- lookup->dev_id = mmciname;
- /*
- * Offsets on GPIO block 1:
- * 3 = MMC WP (write protect)
- * 4 = MMC CD (card detect)
- *
- * Offsets on GPIO block 2:
- * 0 = Up key
- * 1 = Down key
- * 2 = Left key
- * 3 = Right key
- * 4 = Key lower left
- * 5 = Key lower right
- */
- /* We need the two MMCI GPIO entries */
- lookup->table[0] = (struct gpiod_lookup)
- GPIO_LOOKUP(chipname, 3, "wp", 0);
- lookup->table[1] = (struct gpiod_lookup)
- GPIO_LOOKUP(chipname, 4, "cd", GPIO_ACTIVE_LOW);
- gpiod_add_lookup_table(lookup);
- }
-
- d = amba_ahb_device_add_res(&dev->dev, devname, pc_base, SZ_4K,
- irq1, irq2,
- idev->platform_data, idev->id,
- &dev->resource);
- if (IS_ERR(d)) {
- dev_err(&dev->dev, "unable to register device: %ld\n", PTR_ERR(d));
- continue;
- }
- }
-
- return 0;
-}
-
-static int impd1_remove_one(struct device *dev, void *data)
-{
- device_unregister(dev);
- return 0;
-}
-
-static void impd1_remove(struct lm_device *dev)
-{
- device_for_each_child(&dev->dev, NULL, impd1_remove_one);
- integrator_impd1_clk_exit(dev->id);
-
- lm_set_drvdata(dev, NULL);
-}
-
-static struct lm_driver impd1_driver = {
- .drv = {
- .name = "impd1",
- /*
- * As we're dropping the probe() function, suppress driver
- * binding from sysfs.
- */
- .suppress_bind_attrs = true,
- },
- .probe = impd1_probe,
- .remove = impd1_remove,
-};
-
-static int __init impd1_init(void)
-{
- return lm_driver_register(&impd1_driver);
-}
-
-static void __exit impd1_exit(void)
-{
- lm_driver_unregister(&impd1_driver);
-}
-
-module_init(impd1_init);
-module_exit(impd1_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Integrator/IM-PD1 logic module core driver");
-MODULE_AUTHOR("Deep Blue Solutions Ltd");
diff --git a/arch/arm/mach-integrator/impd1.h b/arch/arm/mach-integrator/impd1.h
deleted file mode 100644
index 36124d34c8f7..000000000000
--- a/arch/arm/mach-integrator/impd1.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#define IMPD1_LEDS 0x0c
-#define IMPD1_INT 0x10
-#define IMPD1_SW 0x14
-#define IMPD1_CTRL 0x18
-
-#define IMPD1_CTRL_DISP_LCD (0 << 0)
-#define IMPD1_CTRL_DISP_VGA (1 << 0)
-#define IMPD1_CTRL_DISP_LCD1 (2 << 0)
-#define IMPD1_CTRL_DISP_ENABLE (1 << 2)
-#define IMPD1_CTRL_DISP_MASK (7 << 0)
-
-struct device;
-
-void impd1_tweak_control(struct device *dev, u32 mask, u32 val);
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 035069ea2c8b..58b02cbbea72 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -23,7 +23,6 @@
#include "hardware.h"
#include "cm.h"
#include "common.h"
-#include "lm.h"
/* Regmap to the AP system controller */
static struct regmap *ap_syscon_map;
@@ -174,10 +173,7 @@ static const struct of_device_id ap_syscon_match[] = {
static void __init ap_init_of(void)
{
- u32 sc_dec;
struct device_node *syscon;
- int ret;
- int i;
of_platform_default_populate(NULL, ap_auxdata_lookup, NULL);
@@ -189,33 +185,6 @@ static void __init ap_init_of(void)
pr_crit("could not find Integrator/AP system controller\n");
return;
}
-
- ret = regmap_read(ap_syscon_map,
- INTEGRATOR_SC_DEC_OFFSET,
- &sc_dec);
- if (ret) {
- pr_crit("could not read from Integrator/AP syscon\n");
- return;
- }
-
- for (i = 0; i < 4; i++) {
- struct lm_device *lmdev;
-
- if ((sc_dec & (16 << i)) == 0)
- continue;
-
- lmdev = kzalloc(sizeof(struct lm_device), GFP_KERNEL);
- if (!lmdev)
- continue;
-
- lmdev->resource.start = 0xc0000000 + 0x10000000 * i;
- lmdev->resource.end = lmdev->resource.start + 0x0fffffff;
- lmdev->resource.flags = IORESOURCE_MEM;
- lmdev->irq = irq_of_parse_and_map(syscon, i);
- lmdev->id = i;
-
- lm_device_register(lmdev);
- }
}
static const char * ap_dt_board_compat[] = {
diff --git a/arch/arm/mach-integrator/lm.c b/arch/arm/mach-integrator/lm.c
deleted file mode 100644
index 55cd173d1d76..000000000000
--- a/arch/arm/mach-integrator/lm.c
+++ /dev/null
@@ -1,96 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-integrator/lm.c
- *
- * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-
-#include "lm.h"
-
-#define to_lm_device(d) container_of(d, struct lm_device, dev)
-#define to_lm_driver(d) container_of(d, struct lm_driver, drv)
-
-static int lm_match(struct device *dev, struct device_driver *drv)
-{
- return 1;
-}
-
-static int lm_bus_probe(struct device *dev)
-{
- struct lm_device *lmdev = to_lm_device(dev);
- struct lm_driver *lmdrv = to_lm_driver(dev->driver);
-
- return lmdrv->probe(lmdev);
-}
-
-static int lm_bus_remove(struct device *dev)
-{
- struct lm_device *lmdev = to_lm_device(dev);
- struct lm_driver *lmdrv = to_lm_driver(dev->driver);
-
- if (lmdrv->remove)
- lmdrv->remove(lmdev);
- return 0;
-}
-
-static struct bus_type lm_bustype = {
- .name = "logicmodule",
- .match = lm_match,
- .probe = lm_bus_probe,
- .remove = lm_bus_remove,
-// .suspend = lm_bus_suspend,
-// .resume = lm_bus_resume,
-};
-
-static int __init lm_init(void)
-{
- return bus_register(&lm_bustype);
-}
-
-postcore_initcall(lm_init);
-
-int lm_driver_register(struct lm_driver *drv)
-{
- drv->drv.bus = &lm_bustype;
- return driver_register(&drv->drv);
-}
-
-void lm_driver_unregister(struct lm_driver *drv)
-{
- driver_unregister(&drv->drv);
-}
-
-static void lm_device_release(struct device *dev)
-{
- struct lm_device *d = to_lm_device(dev);
-
- kfree(d);
-}
-
-int lm_device_register(struct lm_device *dev)
-{
- int ret;
-
- dev->dev.release = lm_device_release;
- dev->dev.bus = &lm_bustype;
-
- ret = dev_set_name(&dev->dev, "lm%d", dev->id);
- if (ret)
- return ret;
- dev->resource.name = dev_name(&dev->dev);
-
- ret = request_resource(&iomem_resource, &dev->resource);
- if (ret == 0) {
- ret = device_register(&dev->dev);
- if (ret)
- release_resource(&dev->resource);
- }
- return ret;
-}
-
-EXPORT_SYMBOL(lm_driver_register);
-EXPORT_SYMBOL(lm_driver_unregister);
diff --git a/arch/arm/mach-integrator/lm.h b/arch/arm/mach-integrator/lm.h
deleted file mode 100644
index 172966a699bd..000000000000
--- a/arch/arm/mach-integrator/lm.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-struct lm_device {
- struct device dev;
- struct resource resource;
- unsigned int irq;
- unsigned int id;
-};
-
-struct lm_driver {
- struct device_driver drv;
- int (*probe)(struct lm_device *);
- void (*remove)(struct lm_device *);
- int (*suspend)(struct lm_device *, pm_message_t);
- int (*resume)(struct lm_device *);
-};
-
-int lm_driver_register(struct lm_driver *drv);
-void lm_driver_unregister(struct lm_driver *drv);
-
-int lm_device_register(struct lm_device *dev);
-
-#define lm_get_drvdata(lm) dev_get_drvdata(&(lm)->dev)
-#define lm_set_drvdata(lm,d) dev_set_drvdata(&(lm)->dev, d)
diff --git a/arch/arm/mach-iop32x/i2c.c b/arch/arm/mach-iop32x/i2c.c
index dc9f6a14ab1b..e422286af469 100644
--- a/arch/arm/mach-iop32x/i2c.c
+++ b/arch/arm/mach-iop32x/i2c.c
@@ -17,7 +17,6 @@
#include <linux/serial_core.h>
#include <linux/io.h>
#include <linux/gpio/machine.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mach/map.h>
#include <asm/setup.h>
diff --git a/arch/arm/mach-iop32x/iq31244.c b/arch/arm/mach-iop32x/iq31244.c
index 04a7d389d365..49caaa703881 100644
--- a/arch/arm/mach-iop32x/iq31244.c
+++ b/arch/arm/mach-iop32x/iq31244.c
@@ -31,7 +31,6 @@
#include <asm/mach/time.h>
#include <asm/mach-types.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "hardware.h"
#include "irqs.h"
diff --git a/arch/arm/mach-iop32x/iq80321.c b/arch/arm/mach-iop32x/iq80321.c
index 4bd596d6c9c1..b455d7073296 100644
--- a/arch/arm/mach-iop32x/iq80321.c
+++ b/arch/arm/mach-iop32x/iq80321.c
@@ -27,7 +27,6 @@
#include <asm/mach/time.h>
#include <asm/mach-types.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "hardware.h"
#include "irqs.h"
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 5382a93ad0f8..78b9a5ee41c9 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -35,7 +35,6 @@
#include <asm/mach/time.h>
#include <asm/mach-types.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "hardware.h"
#include "irqs.h"
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 381f452de28d..184262d660ba 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -33,7 +33,6 @@
#include <mach/hardware.h>
#include <mach/io.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/exception.h>
#include <asm/irq.h>
diff --git a/arch/arm/mach-keystone/platsmp.c b/arch/arm/mach-keystone/platsmp.c
index c810e23a8fa0..673fcf3b34b1 100644
--- a/arch/arm/mach-keystone/platsmp.c
+++ b/arch/arm/mach-keystone/platsmp.c
@@ -12,11 +12,11 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/io.h>
+#include <linux/pgtable.h>
#include <asm/smp_plat.h>
#include <asm/prom.h>
#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
#include "keystone.h"
diff --git a/arch/arm/mach-mediatek/mediatek.c b/arch/arm/mach-mediatek/mediatek.c
index f6f102fa9e23..e6e9f93a1f01 100644
--- a/arch/arm/mach-mediatek/mediatek.c
+++ b/arch/arm/mach-mediatek/mediatek.c
@@ -9,7 +9,7 @@
#include <linux/io.h>
#include <asm/mach/arch.h>
#include <linux/of.h>
-#include <linux/clk-provider.h>
+#include <linux/of_clk.h>
#include <linux/clocksource.h>
diff --git a/arch/arm/mach-mmp/Kconfig b/arch/arm/mach-mmp/Kconfig
index b58a03b18bde..0dd999212944 100644
--- a/arch/arm/mach-mmp/Kconfig
+++ b/arch/arm/mach-mmp/Kconfig
@@ -110,7 +110,6 @@ config MACH_MMP_DT
depends on ARCH_MULTI_V5
select PINCTRL
select PINCTRL_SINGLE
- select COMMON_CLK
select ARCH_HAS_RESET_CONTROLLER
select CPU_MOHAWK
help
@@ -125,6 +124,8 @@ config MACH_MMP2_DT
select PINCTRL_SINGLE
select ARCH_HAS_RESET_CONTROLLER
select CPU_PJ4
+ select PM_GENERIC_DOMAINS if PM
+ select PM_GENERIC_DOMAINS_OF if PM && OF
help
Include support for Marvell MMP2 based platforms using
the device tree.
diff --git a/arch/arm/mach-mmp/Makefile b/arch/arm/mach-mmp/Makefile
index 7b3a7f979eec..e3758f7e1fe7 100644
--- a/arch/arm/mach-mmp/Makefile
+++ b/arch/arm/mach-mmp/Makefile
@@ -12,12 +12,6 @@ obj-$(CONFIG_CPU_PXA910) += pxa910.o
obj-$(CONFIG_CPU_MMP2) += mmp2.o
obj-$(CONFIG_MMP_SRAM) += sram.o
-ifeq ($(CONFIG_COMMON_CLK), )
-obj-y += clock.o
-obj-$(CONFIG_CPU_PXA168) += clock-pxa168.o
-obj-$(CONFIG_CPU_PXA910) += clock-pxa910.o
-obj-$(CONFIG_CPU_MMP2) += clock-mmp2.o
-endif
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_CPU_PXA910) += pm-pxa910.o
obj-$(CONFIG_CPU_MMP2) += pm-mmp2.o
diff --git a/arch/arm/mach-mmp/clock-mmp2.c b/arch/arm/mach-mmp/clock-mmp2.c
deleted file mode 100644
index 7536398bf1c1..000000000000
--- a/arch/arm/mach-mmp/clock-mmp2.c
+++ /dev/null
@@ -1,114 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/clk/mmp.h>
-
-#include "addr-map.h"
-
-#include "common.h"
-#include "clock.h"
-
-/*
- * APB Clock register offsets for MMP2
- */
-#define APBC_RTC APBC_REG(0x000)
-#define APBC_TWSI1 APBC_REG(0x004)
-#define APBC_TWSI2 APBC_REG(0x008)
-#define APBC_TWSI3 APBC_REG(0x00c)
-#define APBC_TWSI4 APBC_REG(0x010)
-#define APBC_KPC APBC_REG(0x018)
-#define APBC_UART1 APBC_REG(0x02c)
-#define APBC_UART2 APBC_REG(0x030)
-#define APBC_UART3 APBC_REG(0x034)
-#define APBC_GPIO APBC_REG(0x038)
-#define APBC_PWM0 APBC_REG(0x03c)
-#define APBC_PWM1 APBC_REG(0x040)
-#define APBC_PWM2 APBC_REG(0x044)
-#define APBC_PWM3 APBC_REG(0x048)
-#define APBC_SSP0 APBC_REG(0x04c)
-#define APBC_SSP1 APBC_REG(0x050)
-#define APBC_SSP2 APBC_REG(0x054)
-#define APBC_SSP3 APBC_REG(0x058)
-#define APBC_SSP4 APBC_REG(0x05c)
-#define APBC_SSP5 APBC_REG(0x060)
-#define APBC_TWSI5 APBC_REG(0x07c)
-#define APBC_TWSI6 APBC_REG(0x080)
-#define APBC_UART4 APBC_REG(0x088)
-
-#define APMU_USB APMU_REG(0x05c)
-#define APMU_NAND APMU_REG(0x060)
-#define APMU_SDH0 APMU_REG(0x054)
-#define APMU_SDH1 APMU_REG(0x058)
-#define APMU_SDH2 APMU_REG(0x0e8)
-#define APMU_SDH3 APMU_REG(0x0ec)
-
-static void sdhc_clk_enable(struct clk *clk)
-{
- uint32_t clk_rst;
-
- clk_rst = __raw_readl(clk->clk_rst);
- clk_rst |= clk->enable_val;
- __raw_writel(clk_rst, clk->clk_rst);
-}
-
-static void sdhc_clk_disable(struct clk *clk)
-{
- uint32_t clk_rst;
-
- clk_rst = __raw_readl(clk->clk_rst);
- clk_rst &= ~clk->enable_val;
- __raw_writel(clk_rst, clk->clk_rst);
-}
-
-struct clkops sdhc_clk_ops = {
- .enable = sdhc_clk_enable,
- .disable = sdhc_clk_disable,
-};
-
-/* APB peripheral clocks */
-static APBC_CLK(uart1, UART1, 1, 26000000);
-static APBC_CLK(uart2, UART2, 1, 26000000);
-static APBC_CLK(uart3, UART3, 1, 26000000);
-static APBC_CLK(uart4, UART4, 1, 26000000);
-static APBC_CLK(twsi1, TWSI1, 0, 26000000);
-static APBC_CLK(twsi2, TWSI2, 0, 26000000);
-static APBC_CLK(twsi3, TWSI3, 0, 26000000);
-static APBC_CLK(twsi4, TWSI4, 0, 26000000);
-static APBC_CLK(twsi5, TWSI5, 0, 26000000);
-static APBC_CLK(twsi6, TWSI6, 0, 26000000);
-static APBC_CLK(gpio, GPIO, 0, 26000000);
-
-static APMU_CLK(nand, NAND, 0xbf, 100000000);
-static APMU_CLK_OPS(sdh0, SDH0, 0x1b, 200000000, &sdhc_clk_ops);
-static APMU_CLK_OPS(sdh1, SDH1, 0x1b, 200000000, &sdhc_clk_ops);
-static APMU_CLK_OPS(sdh2, SDH2, 0x1b, 200000000, &sdhc_clk_ops);
-static APMU_CLK_OPS(sdh3, SDH3, 0x1b, 200000000, &sdhc_clk_ops);
-
-static struct clk_lookup mmp2_clkregs[] = {
- INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
- INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
- INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL),
- INIT_CLKREG(&clk_uart4, "pxa2xx-uart.3", NULL),
- INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.0", NULL),
- INIT_CLKREG(&clk_twsi2, "pxa2xx-i2c.1", NULL),
- INIT_CLKREG(&clk_twsi3, "pxa2xx-i2c.2", NULL),
- INIT_CLKREG(&clk_twsi4, "pxa2xx-i2c.3", NULL),
- INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL),
- INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL),
- INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
- INIT_CLKREG(&clk_gpio, "mmp2-gpio", NULL),
- INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"),
-};
-
-void __init mmp2_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
- phys_addr_t apbc_phys)
-{
- clkdev_add_table(ARRAY_AND_SIZE(mmp2_clkregs));
-}
diff --git a/arch/arm/mach-mmp/clock-pxa168.c b/arch/arm/mach-mmp/clock-pxa168.c
deleted file mode 100644
index 2d4a5d96a1ff..000000000000
--- a/arch/arm/mach-mmp/clock-pxa168.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/clk/mmp.h>
-
-#include "addr-map.h"
-
-#include "common.h"
-#include "clock.h"
-
-/*
- * APB clock register offsets for PXA168
- */
-#define APBC_UART1 APBC_REG(0x000)
-#define APBC_UART2 APBC_REG(0x004)
-#define APBC_GPIO APBC_REG(0x008)
-#define APBC_PWM1 APBC_REG(0x00c)
-#define APBC_PWM2 APBC_REG(0x010)
-#define APBC_PWM3 APBC_REG(0x014)
-#define APBC_PWM4 APBC_REG(0x018)
-#define APBC_RTC APBC_REG(0x028)
-#define APBC_TWSI0 APBC_REG(0x02c)
-#define APBC_KPC APBC_REG(0x030)
-#define APBC_TWSI1 APBC_REG(0x06c)
-#define APBC_UART3 APBC_REG(0x070)
-#define APBC_SSP1 APBC_REG(0x81c)
-#define APBC_SSP2 APBC_REG(0x820)
-#define APBC_SSP3 APBC_REG(0x84c)
-#define APBC_SSP4 APBC_REG(0x858)
-#define APBC_SSP5 APBC_REG(0x85c)
-
-#define APMU_NAND APMU_REG(0x060)
-#define APMU_LCD APMU_REG(0x04c)
-#define APMU_ETH APMU_REG(0x0fc)
-#define APMU_USB APMU_REG(0x05c)
-
-/* APB peripheral clocks */
-static APBC_CLK(uart1, UART1, 1, 14745600);
-static APBC_CLK(uart2, UART2, 1, 14745600);
-static APBC_CLK(uart3, UART3, 1, 14745600);
-static APBC_CLK(twsi0, TWSI0, 1, 33000000);
-static APBC_CLK(twsi1, TWSI1, 1, 33000000);
-static APBC_CLK(pwm1, PWM1, 1, 13000000);
-static APBC_CLK(pwm2, PWM2, 1, 13000000);
-static APBC_CLK(pwm3, PWM3, 1, 13000000);
-static APBC_CLK(pwm4, PWM4, 1, 13000000);
-static APBC_CLK(ssp1, SSP1, 4, 0);
-static APBC_CLK(ssp2, SSP2, 4, 0);
-static APBC_CLK(ssp3, SSP3, 4, 0);
-static APBC_CLK(ssp4, SSP4, 4, 0);
-static APBC_CLK(ssp5, SSP5, 4, 0);
-static APBC_CLK(gpio, GPIO, 0, 13000000);
-static APBC_CLK(keypad, KPC, 0, 32000);
-static APBC_CLK(rtc, RTC, 8, 32768);
-
-static APMU_CLK(nand, NAND, 0x19b, 156000000);
-static APMU_CLK(lcd, LCD, 0x7f, 312000000);
-static APMU_CLK(eth, ETH, 0x09, 0);
-static APMU_CLK(usb, USB, 0x12, 0);
-
-/* device and clock bindings */
-static struct clk_lookup pxa168_clkregs[] = {
- INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
- INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
- INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL),
- INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL),
- INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL),
- INIT_CLKREG(&clk_pwm1, "pxa168-pwm.0", NULL),
- INIT_CLKREG(&clk_pwm2, "pxa168-pwm.1", NULL),
- INIT_CLKREG(&clk_pwm3, "pxa168-pwm.2", NULL),
- INIT_CLKREG(&clk_pwm4, "pxa168-pwm.3", NULL),
- INIT_CLKREG(&clk_ssp1, "pxa168-ssp.0", NULL),
- INIT_CLKREG(&clk_ssp2, "pxa168-ssp.1", NULL),
- INIT_CLKREG(&clk_ssp3, "pxa168-ssp.2", NULL),
- INIT_CLKREG(&clk_ssp4, "pxa168-ssp.3", NULL),
- INIT_CLKREG(&clk_ssp5, "pxa168-ssp.4", NULL),
- INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
- INIT_CLKREG(&clk_lcd, "pxa168-fb", NULL),
- INIT_CLKREG(&clk_gpio, "mmp-gpio", NULL),
- INIT_CLKREG(&clk_keypad, "pxa27x-keypad", NULL),
- INIT_CLKREG(&clk_eth, "pxa168-eth", "MFUCLK"),
- INIT_CLKREG(&clk_usb, NULL, "PXA168-USBCLK"),
- INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL),
-};
-
-void __init pxa168_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
- phys_addr_t apbc_phys)
-{
- clkdev_add_table(ARRAY_AND_SIZE(pxa168_clkregs));
-}
diff --git a/arch/arm/mach-mmp/clock-pxa910.c b/arch/arm/mach-mmp/clock-pxa910.c
deleted file mode 100644
index 3cd83ff91bb0..000000000000
--- a/arch/arm/mach-mmp/clock-pxa910.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/clk/mmp.h>
-
-#include "addr-map.h"
-
-#include "common.h"
-#include "clock.h"
-
-/*
- * APB Clock register offsets for PXA910
- */
-#define APBC_UART0 APBC_REG(0x000)
-#define APBC_UART1 APBC_REG(0x004)
-#define APBC_GPIO APBC_REG(0x008)
-#define APBC_PWM1 APBC_REG(0x00c)
-#define APBC_PWM2 APBC_REG(0x010)
-#define APBC_PWM3 APBC_REG(0x014)
-#define APBC_PWM4 APBC_REG(0x018)
-#define APBC_SSP1 APBC_REG(0x01c)
-#define APBC_SSP2 APBC_REG(0x020)
-#define APBC_RTC APBC_REG(0x028)
-#define APBC_TWSI0 APBC_REG(0x02c)
-#define APBC_KPC APBC_REG(0x030)
-#define APBC_SSP3 APBC_REG(0x04c)
-#define APBC_TWSI1 APBC_REG(0x06c)
-
-#define APMU_NAND APMU_REG(0x060)
-#define APMU_USB APMU_REG(0x05c)
-
-static APBC_CLK(uart1, UART0, 1, 14745600);
-static APBC_CLK(uart2, UART1, 1, 14745600);
-static APBC_CLK(twsi0, TWSI0, 1, 33000000);
-static APBC_CLK(twsi1, TWSI1, 1, 33000000);
-static APBC_CLK(pwm1, PWM1, 1, 13000000);
-static APBC_CLK(pwm2, PWM2, 1, 13000000);
-static APBC_CLK(pwm3, PWM3, 1, 13000000);
-static APBC_CLK(pwm4, PWM4, 1, 13000000);
-static APBC_CLK(gpio, GPIO, 0, 13000000);
-static APBC_CLK(rtc, RTC, 8, 32768);
-
-static APMU_CLK(nand, NAND, 0x19b, 156000000);
-static APMU_CLK(u2o, USB, 0x1b, 480000000);
-
-/* device and clock bindings */
-static struct clk_lookup pxa910_clkregs[] = {
- INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
- INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
- INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL),
- INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL),
- INIT_CLKREG(&clk_pwm1, "pxa910-pwm.0", NULL),
- INIT_CLKREG(&clk_pwm2, "pxa910-pwm.1", NULL),
- INIT_CLKREG(&clk_pwm3, "pxa910-pwm.2", NULL),
- INIT_CLKREG(&clk_pwm4, "pxa910-pwm.3", NULL),
- INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
- INIT_CLKREG(&clk_gpio, "mmp-gpio", NULL),
- INIT_CLKREG(&clk_u2o, NULL, "U2OCLK"),
- INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL),
-};
-
-void __init pxa910_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
- phys_addr_t apbc_phys, phys_addr_t apbcp_phys)
-{
- clkdev_add_table(ARRAY_AND_SIZE(pxa910_clkregs));
-}
diff --git a/arch/arm/mach-mmp/clock.c b/arch/arm/mach-mmp/clock.c
deleted file mode 100644
index 291fe41e3547..000000000000
--- a/arch/arm/mach-mmp/clock.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-mmp/clock.c
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include "regs-apbc.h"
-#include "clock.h"
-
-static void apbc_clk_enable(struct clk *clk)
-{
- uint32_t clk_rst;
-
- clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(clk->fnclksel);
- __raw_writel(clk_rst, clk->clk_rst);
-}
-
-static void apbc_clk_disable(struct clk *clk)
-{
- __raw_writel(0, clk->clk_rst);
-}
-
-struct clkops apbc_clk_ops = {
- .enable = apbc_clk_enable,
- .disable = apbc_clk_disable,
-};
-
-static void apmu_clk_enable(struct clk *clk)
-{
- __raw_writel(clk->enable_val, clk->clk_rst);
-}
-
-static void apmu_clk_disable(struct clk *clk)
-{
- __raw_writel(0, clk->clk_rst);
-}
-
-struct clkops apmu_clk_ops = {
- .enable = apmu_clk_enable,
- .disable = apmu_clk_disable,
-};
-
-static DEFINE_SPINLOCK(clocks_lock);
-
-int clk_enable(struct clk *clk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&clocks_lock, flags);
- if (clk->enabled++ == 0)
- clk->ops->enable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
- unsigned long flags;
-
- if (!clk)
- return;
-
- WARN_ON(clk->enabled == 0);
-
- spin_lock_irqsave(&clocks_lock, flags);
- if (--clk->enabled == 0)
- clk->ops->disable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- unsigned long rate;
-
- if (clk->ops->getrate)
- rate = clk->ops->getrate(clk);
- else
- rate = clk->rate;
-
- return rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long flags;
- int ret = -EINVAL;
-
- if (clk->ops->setrate) {
- spin_lock_irqsave(&clocks_lock, flags);
- ret = clk->ops->setrate(clk, rate);
- spin_unlock_irqrestore(&clocks_lock, flags);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
diff --git a/arch/arm/mach-mmp/clock.h b/arch/arm/mach-mmp/clock.h
deleted file mode 100644
index 0256c894fa11..000000000000
--- a/arch/arm/mach-mmp/clock.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#include <linux/clkdev.h>
-
-struct clkops {
- void (*enable)(struct clk *);
- void (*disable)(struct clk *);
- unsigned long (*getrate)(struct clk *);
- int (*setrate)(struct clk *, unsigned long);
-};
-
-struct clk {
- const struct clkops *ops;
-
- void __iomem *clk_rst; /* clock reset control register */
- int fnclksel; /* functional clock select (APBC) */
- uint32_t enable_val; /* value for clock enable (APMU) */
- unsigned long rate;
- int enabled;
-};
-
-extern struct clkops apbc_clk_ops;
-extern struct clkops apmu_clk_ops;
-
-#define APBC_CLK(_name, _reg, _fnclksel, _rate) \
-struct clk clk_##_name = { \
- .clk_rst = APBC_##_reg, \
- .fnclksel = _fnclksel, \
- .rate = _rate, \
- .ops = &apbc_clk_ops, \
-}
-
-#define APBC_CLK_OPS(_name, _reg, _fnclksel, _rate, _ops) \
-struct clk clk_##_name = { \
- .clk_rst = APBC_##_reg, \
- .fnclksel = _fnclksel, \
- .rate = _rate, \
- .ops = _ops, \
-}
-
-#define APMU_CLK(_name, _reg, _eval, _rate) \
-struct clk clk_##_name = { \
- .clk_rst = APMU_##_reg, \
- .enable_val = _eval, \
- .rate = _rate, \
- .ops = &apmu_clk_ops, \
-}
-
-#define APMU_CLK_OPS(_name, _reg, _eval, _rate, _ops) \
-struct clk clk_##_name = { \
- .clk_rst = APMU_##_reg, \
- .enable_val = _eval, \
- .rate = _rate, \
- .ops = _ops, \
-}
-
-#define INIT_CLKREG(_clk, _devname, _conname) \
- { \
- .clk = _clk, \
- .dev_id = _devname, \
- .con_id = _conname, \
- }
-
-extern struct clk clk_pxa168_gpio;
-extern struct clk clk_pxa168_timers;
diff --git a/arch/arm/mach-mmp/mmp-dt.c b/arch/arm/mach-mmp/mmp-dt.c
index 91214996acec..3f43c0867dca 100644
--- a/arch/arm/mach-mmp/mmp-dt.c
+++ b/arch/arm/mach-mmp/mmp-dt.c
@@ -8,7 +8,7 @@
#include <linux/irqchip.h>
#include <linux/of_platform.h>
-#include <linux/clk-provider.h>
+#include <linux/of_clk.h>
#include <linux/clocksource.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
diff --git a/arch/arm/mach-mmp/mmp2-dt.c b/arch/arm/mach-mmp/mmp2-dt.c
index 510c762ddc48..34a5fe4b3949 100644
--- a/arch/arm/mach-mmp/mmp2-dt.c
+++ b/arch/arm/mach-mmp/mmp2-dt.c
@@ -9,7 +9,7 @@
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/of_platform.h>
-#include <linux/clk-provider.h>
+#include <linux/of_clk.h>
#include <linux/clocksource.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c
index b642e900727a..1e9389245d0e 100644
--- a/arch/arm/mach-mmp/pxa168.c
+++ b/arch/arm/mach-mmp/pxa168.c
@@ -19,7 +19,6 @@
#include <asm/system_misc.h>
#include "addr-map.h"
-#include "clock.h"
#include "common.h"
#include <linux/soc/mmp/cputype.h>
#include "devices.h"
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
index 049a65f47b42..41b2e8abc9e6 100644
--- a/arch/arm/mach-mmp/time.c
+++ b/arch/arm/mach-mmp/time.c
@@ -34,7 +34,6 @@
#include "regs-apbc.h"
#include "irqs.h"
#include <linux/soc/mmp/cputype.h>
-#include "clock.h"
#define TIMERS_VIRT_BASE TIMERS1_VIRT_BASE
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 7a5629b9bede..34dbeaab94b0 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -47,7 +47,6 @@ config MACH_ARMADA_375
select ARMADA_375_CLK
select HAVE_ARM_SCU
select HAVE_ARM_TWD if SMP
- select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_375
help
@@ -66,7 +65,6 @@ config MACH_ARMADA_38X
select ARMADA_38X_CLK
select HAVE_ARM_SCU
select HAVE_ARM_TWD if SMP
- select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_38X
help
@@ -82,7 +80,6 @@ config MACH_ARMADA_39X
select CACHE_L2X0
select HAVE_ARM_SCU
select HAVE_ARM_TWD if SMP
- select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_39X
help
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index dca7d06c0b93..ea23205bf70f 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -66,7 +66,6 @@ config SOC_AM43XX
select ARCH_OMAP2PLUS
select ARM_GIC
select MACH_OMAP_GENERIC
- select MIGHT_HAVE_CACHE_L2X0
select HAVE_ARM_SCU
select GENERIC_CLOCKEVENTS_BROADCAST
select HAVE_ARM_TWD
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 5017a3be0ff0..732e614c56b2 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -7,7 +7,7 @@ ccflags-y := -I$(srctree)/$(src)/include \
-I$(srctree)/arch/arm/plat-omap/include
# Common support
-obj-y := id.o io.o control.o devices.o fb.o timer.o pm.o \
+obj-y := id.o io.o control.o devices.o fb.o pm.o \
common.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
omap_device.o omap-headsmp.o sram.o
@@ -46,6 +46,10 @@ obj-$(CONFIG_SOC_OMAP5) += $(omap-4-5-common) $(smp-y) sleep44xx.o
obj-$(CONFIG_SOC_AM43XX) += $(omap-4-5-common)
obj-$(CONFIG_SOC_DRA7XX) += $(omap-4-5-common) $(smp-y) sleep44xx.o
+omap5-dra7-common-$(CONFIG_SOC_HAS_REALTIME_COUNTER) = timer.o
+obj-$(CONFIG_SOC_OMAP5) += $(omap5-dra7-common-y)
+obj-$(CONFIG_SOC_DRA7XX) += $(omap5-dra7-common-y)
+
# Functions loaded to SRAM
obj-$(CONFIG_SOC_OMAP2420) += sram242x.o
obj-$(CONFIG_SOC_OMAP2430) += sram243x.o
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index ff992f8895ee..334923d7652d 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -12,6 +12,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/irqdomain.h>
+#include <linux/clocksource.h>
#include <asm/setup.h>
#include <asm/mach/arch.h>
@@ -31,6 +32,20 @@ static void __init __maybe_unused omap_generic_init(void)
omap_soc_device_init();
}
+/* Clocks are needed early, see drivers/clocksource for the rest */
+void __init __maybe_unused omap_init_time_of(void)
+{
+ omap_clk_init();
+ timer_probe();
+}
+
+/* Used by am437x for ARM timer in non-SMP configurations */
+#if !defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+void tick_broadcast(const struct cpumask *mask)
+{
+}
+#endif
+
#ifdef CONFIG_SOC_OMAP2420
static const char *const omap242x_boards_compat[] __initconst = {
"ti,omap2420",
@@ -42,7 +57,7 @@ DT_MACHINE_START(OMAP242X_DT, "Generic OMAP2420 (Flattened Device Tree)")
.map_io = omap242x_map_io,
.init_early = omap2420_init_early,
.init_machine = omap_generic_init,
- .init_time = omap_init_time,
+ .init_time = omap_init_time_of,
.dt_compat = omap242x_boards_compat,
.restart = omap2xxx_restart,
MACHINE_END
@@ -59,7 +74,7 @@ DT_MACHINE_START(OMAP243X_DT, "Generic OMAP2430 (Flattened Device Tree)")
.map_io = omap243x_map_io,
.init_early = omap2430_init_early,
.init_machine = omap_generic_init,
- .init_time = omap_init_time,
+ .init_time = omap_init_time_of,
.dt_compat = omap243x_boards_compat,
.restart = omap2xxx_restart,
MACHINE_END
@@ -106,7 +121,7 @@ DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board")
.init_early = omap3430_init_early,
.init_machine = omap_generic_init,
.init_late = omap3_init_late,
- .init_time = omap_init_time,
+ .init_time = omap_init_time_of,
.dt_compat = n900_boards_compat,
.restart = omap3xxx_restart,
MACHINE_END
@@ -124,7 +139,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
.init_early = omap3430_init_early,
.init_machine = omap_generic_init,
.init_late = omap3_init_late,
- .init_time = omap_init_time,
+ .init_time = omap_init_time_of,
.dt_compat = omap3_boards_compat,
.restart = omap3xxx_restart,
MACHINE_END
@@ -141,7 +156,7 @@ DT_MACHINE_START(OMAP36XX_DT, "Generic OMAP36xx (Flattened Device Tree)")
.init_early = omap3630_init_early,
.init_machine = omap_generic_init,
.init_late = omap3_init_late,
- .init_time = omap_init_time,
+ .init_time = omap_init_time_of,
.dt_compat = omap36xx_boards_compat,
.restart = omap3xxx_restart,
MACHINE_END
@@ -158,7 +173,7 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)")
.init_early = omap3430_init_early,
.init_machine = omap_generic_init,
.init_late = omap3_init_late,
- .init_time = omap3_secure_sync32k_timer_init,
+ .init_time = omap_init_time_of,
.dt_compat = omap3_gp_boards_compat,
.restart = omap3xxx_restart,
MACHINE_END
@@ -174,7 +189,7 @@ DT_MACHINE_START(AM3517_DT, "Generic AM3517 (Flattened Device Tree)")
.init_early = am35xx_init_early,
.init_machine = omap_generic_init,
.init_late = omap3_init_late,
- .init_time = omap3_gptimer_timer_init,
+ .init_time = omap_init_time_of,
.dt_compat = am3517_boards_compat,
.restart = omap3xxx_restart,
MACHINE_END
@@ -193,7 +208,7 @@ DT_MACHINE_START(TI814X_DT, "Generic ti814x (Flattened Device Tree)")
.init_early = ti814x_init_early,
.init_machine = omap_generic_init,
.init_late = ti81xx_init_late,
- .init_time = omap3_gptimer_timer_init,
+ .init_time = omap_init_time_of,
.dt_compat = ti814x_boards_compat,
.restart = ti81xx_restart,
MACHINE_END
@@ -210,7 +225,7 @@ DT_MACHINE_START(TI816X_DT, "Generic ti816x (Flattened Device Tree)")
.init_early = ti816x_init_early,
.init_machine = omap_generic_init,
.init_late = ti81xx_init_late,
- .init_time = omap3_gptimer_timer_init,
+ .init_time = omap_init_time_of,
.dt_compat = ti816x_boards_compat,
.restart = ti81xx_restart,
MACHINE_END
@@ -228,7 +243,7 @@ DT_MACHINE_START(AM33XX_DT, "Generic AM33XX (Flattened Device Tree)")
.init_early = am33xx_init_early,
.init_machine = omap_generic_init,
.init_late = am33xx_init_late,
- .init_time = omap3_gptimer_timer_init,
+ .init_time = omap_init_time_of,
.dt_compat = am33xx_boards_compat,
.restart = am33xx_restart,
MACHINE_END
@@ -253,7 +268,7 @@ DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)")
.init_irq = omap_gic_of_init,
.init_machine = omap_generic_init,
.init_late = omap4430_init_late,
- .init_time = omap4_local_timer_init,
+ .init_time = omap_init_time_of,
.dt_compat = omap4_boards_compat,
.restart = omap44xx_restart,
MACHINE_END
@@ -300,7 +315,7 @@ DT_MACHINE_START(AM43_DT, "Generic AM43 (Flattened Device Tree)")
.init_late = am43xx_init_late,
.init_irq = omap_gic_of_init,
.init_machine = omap_generic_init,
- .init_time = omap3_gptimer_timer_init,
+ .init_time = omap_init_time_of,
.dt_compat = am43_boards_compat,
.restart = omap44xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c
index 6005c4ed3bc6..8285be7c1eab 100644
--- a/arch/arm/mach-omap2/clockdomains44xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -214,7 +214,7 @@ static struct clockdomain l4_secure_44xx_clkdm = {
.dep_bit = OMAP4430_L4SEC_STATDEP_SHIFT,
.wkdep_srcs = l4_secure_wkup_sleep_deps,
.sleepdep_srcs = l4_secure_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l4_per_44xx_clkdm = {
diff --git a/arch/arm/mach-omap2/clockdomains54xx_data.c b/arch/arm/mach-omap2/clockdomains54xx_data.c
index 3ab41fc89dd3..5611e08018a2 100644
--- a/arch/arm/mach-omap2/clockdomains54xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains54xx_data.c
@@ -170,7 +170,7 @@ static struct clockdomain l4sec_54xx_clkdm = {
.dep_bit = OMAP54XX_L4SEC_STATDEP_SHIFT,
.wkdep_srcs = l4sec_wkup_sleep_deps,
.sleepdep_srcs = l4sec_wkup_sleep_deps,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain iva_54xx_clkdm = {
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 75d729943958..49926eced5f1 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -111,7 +111,14 @@ static inline int omap_l2_cache_init(void)
#define OMAP_L2C_AUX_CTRL 0
#define omap4_l2c310_write_sec NULL
#endif
+
+#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
extern void omap5_realtime_timer_init(void);
+#else
+static inline void omap5_realtime_timer_init(void)
+{
+}
+#endif
void omap2420_init_early(void);
void omap2430_init_early(void);
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 6a82fce3f822..570a987e6d1a 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -72,7 +72,7 @@ void __iomem *omap4_get_scu_base(void)
}
#ifdef CONFIG_OMAP5_ERRATA_801819
-void omap5_erratum_workaround_801819(void)
+static void omap5_erratum_workaround_801819(void)
{
u32 acr, revidr;
u32 acr_mask;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index b14442cf6179..558fae4375ba 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -264,14 +264,6 @@ static struct omap_hwmod_ocp_if omap2420_l3__dsp = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_wkup -> timer1 */
-static struct omap_hwmod_ocp_if omap2420_l4_wkup__timer1 = {
- .master = &omap2xxx_l4_wkup_hwmod,
- .slave = &omap2xxx_timer1_hwmod,
- .clk = "gpt1_ick",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_wkup -> wd_timer2 */
static struct omap_hwmod_ocp_if omap2420_l4_wkup__wd_timer2 = {
.master = &omap2xxx_l4_wkup_hwmod,
@@ -352,15 +344,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__hdq1w = {
.flags = OMAP_FIREWALL_L4 | OCPIF_SWSUP_IDLE,
};
-
-/* l4_wkup -> 32ksync_counter */
-static struct omap_hwmod_ocp_if omap2420_l4_wkup__counter_32k = {
- .master = &omap2xxx_l4_wkup_hwmod,
- .slave = &omap2xxx_counter_32k_hwmod,
- .clk = "sync_32k_ick",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
static struct omap_hwmod_ocp_if omap2420_l3__gpmc = {
.master = &omap2xxx_l3_main_hwmod,
.slave = &omap2xxx_gpmc_hwmod,
@@ -382,8 +365,6 @@ static struct omap_hwmod_ocp_if *omap2420_hwmod_ocp_ifs[] __initdata = {
&omap2420_l4_core__i2c2,
&omap2420_l3__iva,
&omap2420_l3__dsp,
- &omap2420_l4_wkup__timer1,
- &omap2xxx_l4_core__timer2,
&omap2xxx_l4_core__timer3,
&omap2xxx_l4_core__timer4,
&omap2xxx_l4_core__timer5,
@@ -411,7 +392,6 @@ static struct omap_hwmod_ocp_if *omap2420_hwmod_ocp_ifs[] __initdata = {
&omap2xxx_l4_core__sham,
&omap2xxx_l4_core__aes,
&omap2420_l4_core__hdq1w,
- &omap2420_l4_wkup__counter_32k,
&omap2420_l3__gpmc,
NULL,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 41a37c74f9a6..c93200801b34 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -436,14 +436,6 @@ static struct omap_hwmod_ocp_if omap2430_l3__iva = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_wkup -> timer1 */
-static struct omap_hwmod_ocp_if omap2430_l4_wkup__timer1 = {
- .master = &omap2xxx_l4_wkup_hwmod,
- .slave = &omap2xxx_timer1_hwmod,
- .clk = "gpt1_ick",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_wkup -> wd_timer2 */
static struct omap_hwmod_ocp_if omap2430_l4_wkup__wd_timer2 = {
.master = &omap2xxx_l4_wkup_hwmod,
@@ -548,14 +540,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__hdq1w = {
.flags = OMAP_FIREWALL_L4 | OCPIF_SWSUP_IDLE,
};
-/* l4_wkup -> 32ksync_counter */
-static struct omap_hwmod_ocp_if omap2430_l4_wkup__counter_32k = {
- .master = &omap2xxx_l4_wkup_hwmod,
- .slave = &omap2xxx_counter_32k_hwmod,
- .clk = "sync_32k_ick",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
static struct omap_hwmod_ocp_if omap2430_l3__gpmc = {
.master = &omap2xxx_l3_main_hwmod,
.slave = &omap2xxx_gpmc_hwmod,
@@ -581,8 +565,6 @@ static struct omap_hwmod_ocp_if *omap2430_hwmod_ocp_ifs[] __initdata = {
&omap2xxx_l4_core__mcspi2,
&omap2430_l4_core__mcspi3,
&omap2430_l3__iva,
- &omap2430_l4_wkup__timer1,
- &omap2xxx_l4_core__timer2,
&omap2xxx_l4_core__timer3,
&omap2xxx_l4_core__timer4,
&omap2xxx_l4_core__timer5,
@@ -613,7 +595,6 @@ static struct omap_hwmod_ocp_if *omap2430_hwmod_ocp_ifs[] __initdata = {
&omap2xxx_l4_core__rng,
&omap2xxx_l4_core__sham,
&omap2xxx_l4_core__aes,
- &omap2430_l4_wkup__counter_32k,
&omap2430_l3__gpmc,
NULL,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
index eef96adea411..518e877bb2a1 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
@@ -95,14 +95,6 @@ struct omap_hwmod_ocp_if omap2xxx_l4_core__mcspi2 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_core -> timer2 */
-struct omap_hwmod_ocp_if omap2xxx_l4_core__timer2 = {
- .master = &omap2xxx_l4_core_hwmod,
- .slave = &omap2xxx_timer2_hwmod,
- .clk = "gpt2_ick",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_core -> timer3 */
struct omap_hwmod_ocp_if omap2xxx_l4_core__timer3 = {
.master = &omap2xxx_l4_core_hwmod,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index a445704d43d9..9156f2bfbc8d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -195,36 +195,6 @@ struct omap_hwmod omap2xxx_iva_hwmod = {
.class = &iva_hwmod_class,
};
-/* timer1 */
-struct omap_hwmod omap2xxx_timer1_hwmod = {
- .name = "timer1",
- .main_clk = "gpt1_fck",
- .prcm = {
- .omap2 = {
- .module_offs = WKUP_MOD,
- .idlest_reg_id = 1,
- .idlest_idle_bit = OMAP24XX_ST_GPT1_SHIFT,
- },
- },
- .class = &omap2xxx_timer_hwmod_class,
- .flags = HWMOD_SET_DEFAULT_CLOCKACT,
-};
-
-/* timer2 */
-struct omap_hwmod omap2xxx_timer2_hwmod = {
- .name = "timer2",
- .main_clk = "gpt2_fck",
- .prcm = {
- .omap2 = {
- .module_offs = CORE_MOD,
- .idlest_reg_id = 1,
- .idlest_idle_bit = OMAP24XX_ST_GPT2_SHIFT,
- },
- },
- .class = &omap2xxx_timer_hwmod_class,
- .flags = HWMOD_SET_DEFAULT_CLOCKACT,
-};
-
/* timer3 */
struct omap_hwmod omap2xxx_timer3_hwmod = {
.name = "timer3",
@@ -595,23 +565,6 @@ struct omap_hwmod omap2xxx_mcspi2_hwmod = {
.class = &omap2xxx_mcspi_class,
};
-static struct omap_hwmod_class omap2xxx_counter_hwmod_class = {
- .name = "counter",
-};
-
-struct omap_hwmod omap2xxx_counter_32k_hwmod = {
- .name = "counter_32k",
- .main_clk = "func_32k_ck",
- .prcm = {
- .omap2 = {
- .module_offs = WKUP_MOD,
- .idlest_reg_id = 1,
- .idlest_idle_bit = OMAP24XX_ST_32KSYNC_SHIFT,
- },
- },
- .class = &omap2xxx_counter_hwmod_class,
-};
-
/* gpmc */
struct omap_hwmod omap2xxx_gpmc_hwmod = {
.name = "gpmc",
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
index fa2ff41f84b9..5f4ab24dd60d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
@@ -44,8 +44,6 @@ extern struct omap_hwmod am33xx_smartreflex0_hwmod;
extern struct omap_hwmod am33xx_smartreflex1_hwmod;
extern struct omap_hwmod am33xx_gpmc_hwmod;
extern struct omap_hwmod am33xx_rtc_hwmod;
-extern struct omap_hwmod am33xx_timer1_hwmod;
-extern struct omap_hwmod am33xx_timer2_hwmod;
extern struct omap_hwmod_class am33xx_emif_hwmod_class;
extern struct omap_hwmod_class am33xx_l4_hwmod_class;
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
index 0ebbfbb4fb1c..b389d6589c32 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
@@ -106,14 +106,6 @@ struct omap_hwmod_ocp_if am33xx_l3_s__gpmc = {
.user = OCP_USER_MPU,
};
-/* l4 per -> timer2 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__timer2 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_timer2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
/* l3 main -> ocmc */
struct omap_hwmod_ocp_if am33xx_l3_main__ocmc = {
.master = &am33xx_l3_main_hwmod,
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index dca5a3a7b97c..4b3cd590fb52 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -307,72 +307,12 @@ struct omap_hwmod am33xx_rtc_hwmod = {
},
};
-/* 'timer 2-7' class */
-static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSC_HAS_RESET_STATUS,
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-struct omap_hwmod_class am33xx_timer_hwmod_class = {
- .name = "timer",
- .sysc = &am33xx_timer_sysc,
-};
-
-/* timer1 1ms */
-static struct omap_hwmod_class_sysconfig am33xx_timer1ms_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_timer1ms_hwmod_class = {
- .name = "timer",
- .sysc = &am33xx_timer1ms_sysc,
-};
-
-struct omap_hwmod am33xx_timer1_hwmod = {
- .name = "timer1",
- .class = &am33xx_timer1ms_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .main_clk = "timer1_fck",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-struct omap_hwmod am33xx_timer2_hwmod = {
- .name = "timer2",
- .class = &am33xx_timer_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "timer2_fck",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
static void omap_hwmod_am33xx_clkctrl(void)
{
- CLKCTRL(am33xx_timer2_hwmod, AM33XX_CM_PER_TIMER2_CLKCTRL_OFFSET);
CLKCTRL(am33xx_smartreflex0_hwmod,
AM33XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_smartreflex1_hwmod,
AM33XX_CM_WKUP_SMARTREFLEX1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_timer1_hwmod, AM33XX_CM_WKUP_TIMER1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_rtc_hwmod, AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET);
PRCM_FLAGS(am33xx_rtc_hwmod, HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET);
CLKCTRL(am33xx_gpmc_hwmod, AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET);
@@ -399,12 +339,10 @@ void omap_hwmod_am33xx_reg(void)
static void omap_hwmod_am43xx_clkctrl(void)
{
- CLKCTRL(am33xx_timer2_hwmod, AM43XX_CM_PER_TIMER2_CLKCTRL_OFFSET);
CLKCTRL(am33xx_smartreflex0_hwmod,
AM43XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_smartreflex1_hwmod,
AM43XX_CM_WKUP_SMARTREFLEX1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_timer1_hwmod, AM43XX_CM_WKUP_TIMER1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_rtc_hwmod, AM43XX_CM_RTC_RTC_CLKCTRL_OFFSET);
CLKCTRL(am33xx_gpmc_hwmod, AM43XX_CM_PER_GPMC_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l4_ls_hwmod, AM43XX_CM_PER_L4LS_CLKCTRL_OFFSET);
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index c64b735c8acc..3cf9c4c90b18 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -265,14 +265,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_wkup__control = {
.user = OCP_USER_MPU,
};
-/* l4 wkup -> timer1 */
-static struct omap_hwmod_ocp_if am33xx_l4_wkup__timer1 = {
- .master = &am33xx_l4_wkup_hwmod,
- .slave = &am33xx_timer1_hwmod,
- .clk = "dpll_core_m4_div2_ck",
- .user = OCP_USER_MPU,
-};
-
static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l3_main__emif,
&am33xx_mpu__l3_main,
@@ -291,9 +283,7 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_wkup__control,
&am33xx_l4_wkup__smartreflex0,
&am33xx_l4_wkup__smartreflex1,
- &am33xx_l4_wkup__timer1,
&am33xx_l4_wkup__rtc,
- &am33xx_l4_ls__timer2,
&am33xx_l3_s__gpmc,
&am33xx_l3_main__ocmc,
NULL,
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 3c8d2b6e887a..ca02f91237e3 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -147,36 +147,6 @@ static struct omap_hwmod_class omap3xxx_timer_hwmod_class = {
.sysc = &omap3xxx_timer_sysc,
};
-/* timer1 */
-static struct omap_hwmod omap3xxx_timer1_hwmod = {
- .name = "timer1",
- .main_clk = "gpt1_fck",
- .prcm = {
- .omap2 = {
- .module_offs = WKUP_MOD,
- .idlest_reg_id = 1,
- .idlest_idle_bit = OMAP3430_ST_GPT1_SHIFT,
- },
- },
- .class = &omap3xxx_timer_hwmod_class,
- .flags = HWMOD_SET_DEFAULT_CLOCKACT,
-};
-
-/* timer2 */
-static struct omap_hwmod omap3xxx_timer2_hwmod = {
- .name = "timer2",
- .main_clk = "gpt2_fck",
- .prcm = {
- .omap2 = {
- .module_offs = OMAP3430_PER_MOD,
- .idlest_reg_id = 1,
- .idlest_idle_bit = OMAP3430_ST_GPT2_SHIFT,
- },
- },
- .class = &omap3xxx_timer_hwmod_class,
- .flags = HWMOD_SET_DEFAULT_CLOCKACT,
-};
-
/* timer3 */
static struct omap_hwmod omap3xxx_timer3_hwmod = {
.name = "timer3",
@@ -312,21 +282,6 @@ static struct omap_hwmod omap3xxx_timer11_hwmod = {
.flags = HWMOD_SET_DEFAULT_CLOCKACT,
};
-/* timer12 */
-static struct omap_hwmod omap3xxx_timer12_hwmod = {
- .name = "timer12",
- .main_clk = "gpt12_fck",
- .prcm = {
- .omap2 = {
- .module_offs = WKUP_MOD,
- .idlest_reg_id = 1,
- .idlest_idle_bit = OMAP3430_ST_GPT12_SHIFT,
- },
- },
- .class = &omap3xxx_timer_hwmod_class,
- .flags = HWMOD_SET_DEFAULT_CLOCKACT,
-};
-
/*
* 'wd_timer' class
* 32-bit watchdog upward counter that generates a pulse on the reset pin on
@@ -1525,38 +1480,6 @@ static struct omap_hwmod omap3xxx_sad2d_hwmod = {
};
/*
- * '32K sync counter' class
- * 32-bit ordinary counter, clocked by the falling edge of the 32 khz clock
- */
-static struct omap_hwmod_class_sysconfig omap3xxx_counter_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0004,
- .sysc_flags = SYSC_HAS_SIDLEMODE,
- .idlemodes = (SIDLE_FORCE | SIDLE_NO),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap3xxx_counter_hwmod_class = {
- .name = "counter",
- .sysc = &omap3xxx_counter_sysc,
-};
-
-static struct omap_hwmod omap3xxx_counter_32k_hwmod = {
- .name = "counter_32k",
- .class = &omap3xxx_counter_hwmod_class,
- .clkdm_name = "wkup_clkdm",
- .flags = HWMOD_SWSUP_SIDLE,
- .main_clk = "wkup_32k_fck",
- .prcm = {
- .omap2 = {
- .module_offs = WKUP_MOD,
- .idlest_reg_id = 1,
- .idlest_idle_bit = OMAP3430_ST_32KSYNC_SHIFT,
- },
- },
-};
-
-/*
* 'gpmc' class
* general purpose memory controller
*/
@@ -1868,25 +1791,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l3__iva = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-
-/* l4_wkup -> timer1 */
-static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__timer1 = {
- .master = &omap3xxx_l4_wkup_hwmod,
- .slave = &omap3xxx_timer1_hwmod,
- .clk = "gpt1_ick",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-
-/* l4_per -> timer2 */
-static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer2 = {
- .master = &omap3xxx_l4_per_hwmod,
- .slave = &omap3xxx_timer2_hwmod,
- .clk = "gpt2_ick",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-
/* l4_per -> timer3 */
static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer3 = {
.master = &omap3xxx_l4_per_hwmod,
@@ -1965,15 +1869,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer11 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-
-/* l4_core -> timer12 */
-static struct omap_hwmod_ocp_if omap3xxx_l4_sec__timer12 = {
- .master = &omap3xxx_l4_sec_hwmod,
- .slave = &omap3xxx_timer12_hwmod,
- .clk = "gpt12_ick",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_wkup -> wd_timer2 */
static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__wd_timer2 = {
@@ -2325,16 +2220,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__hdq1w = {
.flags = OMAP_FIREWALL_L4 | OCPIF_SWSUP_IDLE,
};
-/* l4_wkup -> 32ksync_counter */
-
-
-static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__counter_32k = {
- .master = &omap3xxx_l4_wkup_hwmod,
- .slave = &omap3xxx_counter_32k_hwmod,
- .clk = "omap_32ksync_ick",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* am35xx has Davinci MDIO & EMAC */
static struct omap_hwmod_class am35xx_mdio_class = {
.name = "davinci_mdio",
@@ -2551,8 +2436,6 @@ static struct omap_hwmod_ocp_if *omap3xxx_hwmod_ocp_ifs[] __initdata = {
&omap3_l4_core__i2c2,
&omap3_l4_core__i2c3,
&omap3xxx_l4_wkup__l4_sec,
- &omap3xxx_l4_wkup__timer1,
- &omap3xxx_l4_per__timer2,
&omap3xxx_l4_per__timer3,
&omap3xxx_l4_per__timer4,
&omap3xxx_l4_per__timer5,
@@ -2580,27 +2463,10 @@ static struct omap_hwmod_ocp_if *omap3xxx_hwmod_ocp_ifs[] __initdata = {
&omap34xx_l4_core__mcspi2,
&omap34xx_l4_core__mcspi3,
&omap34xx_l4_core__mcspi4,
- &omap3xxx_l4_wkup__counter_32k,
&omap3xxx_l3_main__gpmc,
NULL,
};
-/* GP-only hwmod links */
-static struct omap_hwmod_ocp_if *omap34xx_gp_hwmod_ocp_ifs[] __initdata = {
- &omap3xxx_l4_sec__timer12,
- NULL,
-};
-
-static struct omap_hwmod_ocp_if *omap36xx_gp_hwmod_ocp_ifs[] __initdata = {
- &omap3xxx_l4_sec__timer12,
- NULL,
-};
-
-static struct omap_hwmod_ocp_if *am35xx_gp_hwmod_ocp_ifs[] __initdata = {
- &omap3xxx_l4_sec__timer12,
- NULL,
-};
-
/* crypto hwmod links */
static struct omap_hwmod_ocp_if *omap34xx_sham_hwmod_ocp_ifs[] __initdata = {
&omap3xxx_l4_core__sham,
@@ -2774,7 +2640,7 @@ static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
int __init omap3xxx_hwmod_init(void)
{
int r;
- struct omap_hwmod_ocp_if **h = NULL, **h_gp = NULL, **h_sham = NULL;
+ struct omap_hwmod_ocp_if **h = NULL, **h_sham = NULL;
struct omap_hwmod_ocp_if **h_aes = NULL;
struct device_node *bus;
unsigned int rev;
@@ -2797,18 +2663,15 @@ int __init omap3xxx_hwmod_init(void)
rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 ||
rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) {
h = omap34xx_hwmod_ocp_ifs;
- h_gp = omap34xx_gp_hwmod_ocp_ifs;
h_sham = omap34xx_sham_hwmod_ocp_ifs;
h_aes = omap34xx_aes_hwmod_ocp_ifs;
} else if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) {
h = am35xx_hwmod_ocp_ifs;
- h_gp = am35xx_gp_hwmod_ocp_ifs;
h_sham = am35xx_sham_hwmod_ocp_ifs;
h_aes = am35xx_aes_hwmod_ocp_ifs;
} else if (rev == OMAP3630_REV_ES1_0 || rev == OMAP3630_REV_ES1_1 ||
rev == OMAP3630_REV_ES1_2) {
h = omap36xx_hwmod_ocp_ifs;
- h_gp = omap36xx_gp_hwmod_ocp_ifs;
h_sham = omap36xx_sham_hwmod_ocp_ifs;
h_aes = omap36xx_aes_hwmod_ocp_ifs;
} else {
@@ -2820,13 +2683,6 @@ int __init omap3xxx_hwmod_init(void)
if (r < 0)
return r;
- /* Register GP-only hwmod links. */
- if (h_gp && omap_type() == OMAP2_DEVICE_TYPE_GP) {
- r = omap_hwmod_register_links(h_gp);
- if (r < 0)
- return r;
- }
-
/*
* Register crypto hwmod links only if they are not disabled in DT.
* If DT information is missing, enable them only for GP devices.
diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
index d2203f44af88..3f338732ee6c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
@@ -85,34 +85,6 @@ static struct omap_hwmod am43xx_control_hwmod = {
},
};
-static struct omap_hwmod_class_sysconfig am43xx_synctimer_sysc = {
- .rev_offs = 0x0,
- .sysc_offs = 0x4,
- .sysc_flags = SYSC_HAS_SIDLEMODE,
- .idlemodes = (SIDLE_FORCE | SIDLE_NO),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am43xx_synctimer_hwmod_class = {
- .name = "synctimer",
- .sysc = &am43xx_synctimer_sysc,
-};
-
-static struct omap_hwmod am43xx_synctimer_hwmod = {
- .name = "counter_32k",
- .class = &am43xx_synctimer_hwmod_class,
- .clkdm_name = "l4_wkup_aon_clkdm",
- .flags = HWMOD_SWSUP_SIDLE,
- .main_clk = "synctimer_32kclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM43XX_CM_WKUP_SYNCTIMER_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-
static struct omap_hwmod_class_sysconfig am43xx_usb_otg_ss_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
@@ -206,20 +178,6 @@ static struct omap_hwmod_ocp_if am43xx_l4_wkup__control = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_ocp_if am43xx_l4_wkup__timer1 = {
- .master = &am33xx_l4_wkup_hwmod,
- .slave = &am33xx_timer1_hwmod,
- .clk = "sys_clkin_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am33xx_l4_wkup__synctimer = {
- .master = &am33xx_l4_wkup_hwmod,
- .slave = &am43xx_synctimer_hwmod,
- .clk = "sys_clkin_ck",
- .user = OCP_USER_MPU,
-};
-
static struct omap_hwmod_ocp_if am43xx_l3_s__usbotgss0 = {
.master = &am33xx_l3_s_hwmod,
.slave = &am43xx_usb_otg_ss0_hwmod,
@@ -235,7 +193,6 @@ static struct omap_hwmod_ocp_if am43xx_l3_s__usbotgss1 = {
};
static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
- &am33xx_l4_wkup__synctimer,
&am33xx_mpu__l3_main,
&am33xx_mpu__prcm,
&am33xx_l3_s__l4_ls,
@@ -252,8 +209,6 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am43xx_l4_wkup__control,
&am43xx_l4_wkup__smartreflex0,
&am43xx_l4_wkup__smartreflex1,
- &am43xx_l4_wkup__timer1,
- &am33xx_l4_ls__timer2,
&am33xx_l3_s__gpmc,
&am33xx_l3_main__ocmc,
&am43xx_l3_s__usbotgss0,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 33f6596c03f7..de13c46b984f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -232,39 +232,6 @@ static struct omap_hwmod omap44xx_ocp_wp_noc_hwmod = {
*/
/*
- * 'counter' class
- * 32-bit ordinary counter, clocked by the falling edge of the 32 khz clock
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_counter_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0004,
- .sysc_flags = SYSC_HAS_SIDLEMODE,
- .idlemodes = (SIDLE_FORCE | SIDLE_NO),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_counter_hwmod_class = {
- .name = "counter",
- .sysc = &omap44xx_counter_sysc,
-};
-
-/* counter_32k */
-static struct omap_hwmod omap44xx_counter_32k_hwmod = {
- .name = "counter_32k",
- .class = &omap44xx_counter_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .flags = HWMOD_SWSUP_SIDLE,
- .main_clk = "sys_32k_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_WKUP_SYNCTIMER_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_WKUP_SYNCTIMER_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
* 'ctrl_module' class
* attila core control module + core pad control module + wkup pad control
* module + attila wkup control module
@@ -673,45 +640,6 @@ static struct omap_hwmod omap44xx_sl2if_hwmod = {
};
/*
- * 'timer' class
- * general purpose timer module with accurate 1ms tick
- * This class contains several variants: ['timer_1ms', 'timer']
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_timer_1ms_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_EMUFREE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_timer_1ms_hwmod_class = {
- .name = "timer",
- .sysc = &omap44xx_timer_1ms_sysc,
-};
-
-/* timer1 */
-static struct omap_hwmod omap44xx_timer1_hwmod = {
- .name = "timer1",
- .class = &omap44xx_timer_1ms_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .flags = HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "dmt1_clk_mux",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_WKUP_TIMER1_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_WKUP_TIMER1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
* 'usb_host_fs' class
* full-speed usb host controller
*/
@@ -1063,14 +991,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ocp_wp_noc = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_wkup -> counter_32k */
-static struct omap_hwmod_ocp_if omap44xx_l4_wkup__counter_32k = {
- .master = &omap44xx_l4_wkup_hwmod,
- .slave = &omap44xx_counter_32k_hwmod,
- .clk = "l4_wkup_clk_mux_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_cfg -> ctrl_module_core */
static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ctrl_module_core = {
.master = &omap44xx_l4_cfg_hwmod,
@@ -1199,14 +1119,6 @@ static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_wkup -> timer1 */
-static struct omap_hwmod_ocp_if omap44xx_l4_wkup__timer1 = {
- .master = &omap44xx_l4_wkup_hwmod,
- .slave = &omap44xx_timer1_hwmod,
- .clk = "l4_wkup_clk_mux_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_cfg -> usb_host_fs */
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
.master = &omap44xx_l4_cfg_hwmod,
@@ -1273,7 +1185,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_cfg__l4_wkup,
&omap44xx_mpu__mpu_private,
&omap44xx_l4_cfg__ocp_wp_noc,
- &omap44xx_l4_wkup__counter_32k,
&omap44xx_l4_cfg__ctrl_module_core,
&omap44xx_l4_cfg__ctrl_module_pad_core,
&omap44xx_l4_wkup__ctrl_module_wkup,
@@ -1290,7 +1201,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_wkup__prm,
&omap44xx_l4_wkup__scrm,
/* &omap44xx_l3_main_2__sl2if, */
- &omap44xx_l4_wkup__timer1,
/* &omap44xx_l4_cfg__usb_host_fs, */
&omap44xx_l4_cfg__usb_host_hs,
&omap44xx_l4_cfg__usb_tll_hs,
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 08f34f4732fd..4cb194ac7a7e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -194,39 +194,6 @@ static struct omap_hwmod omap54xx_mpu_private_hwmod = {
};
/*
- * 'counter' class
- * 32-bit ordinary counter, clocked by the falling edge of the 32 khz clock
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_counter_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = SYSC_HAS_SIDLEMODE,
- .idlemodes = (SIDLE_FORCE | SIDLE_NO),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_counter_hwmod_class = {
- .name = "counter",
- .sysc = &omap54xx_counter_sysc,
-};
-
-/* counter_32k */
-static struct omap_hwmod omap54xx_counter_32k_hwmod = {
- .name = "counter_32k",
- .class = &omap54xx_counter_hwmod_class,
- .clkdm_name = "wkupaon_clkdm",
- .flags = HWMOD_SWSUP_SIDLE,
- .main_clk = "wkupaon_iclk_mux",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_WKUPAON_COUNTER_32K_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_WKUPAON_COUNTER_32K_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
* 'emif' class
* external memory interface no1 (wrapper)
*/
@@ -299,44 +266,6 @@ static struct omap_hwmod omap54xx_mpu_hwmod = {
},
};
-
-/*
- * 'timer' class
- * general purpose timer module with accurate 1ms tick
- * This class contains several variants: ['timer_1ms', 'timer']
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_timer_1ms_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap54xx_timer_1ms_hwmod_class = {
- .name = "timer",
- .sysc = &omap54xx_timer_1ms_sysc,
-};
-
-/* timer1 */
-static struct omap_hwmod omap54xx_timer1_hwmod = {
- .name = "timer1",
- .class = &omap54xx_timer_1ms_hwmod_class,
- .clkdm_name = "wkupaon_clkdm",
- .main_clk = "timer1_gfclk_mux",
- .flags = HWMOD_SET_DEFAULT_CLOCKACT,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_WKUPAON_TIMER1_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_WKUPAON_TIMER1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
/*
* 'usb_host_hs' class
* high-speed multi-port usb host controller
@@ -666,14 +595,6 @@ static struct omap_hwmod_ocp_if omap54xx_mpu__mpu_private = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_wkup -> counter_32k */
-static struct omap_hwmod_ocp_if omap54xx_l4_wkup__counter_32k = {
- .master = &omap54xx_l4_wkup_hwmod,
- .slave = &omap54xx_counter_32k_hwmod,
- .clk = "wkupaon_iclk_mux",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* mpu -> emif1 */
static struct omap_hwmod_ocp_if omap54xx_mpu__emif1 = {
.master = &omap54xx_mpu_hwmod,
@@ -698,14 +619,6 @@ static struct omap_hwmod_ocp_if omap54xx_l4_cfg__mpu = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_wkup -> timer1 */
-static struct omap_hwmod_ocp_if omap54xx_l4_wkup__timer1 = {
- .master = &omap54xx_l4_wkup_hwmod,
- .slave = &omap54xx_timer1_hwmod,
- .clk = "wkupaon_iclk_mux",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_cfg -> usb_host_hs */
static struct omap_hwmod_ocp_if omap54xx_l4_cfg__usb_host_hs = {
.master = &omap54xx_l4_cfg_hwmod,
@@ -747,11 +660,9 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
&omap54xx_l3_main_2__l4_per,
&omap54xx_l3_main_1__l4_wkup,
&omap54xx_mpu__mpu_private,
- &omap54xx_l4_wkup__counter_32k,
&omap54xx_mpu__emif1,
&omap54xx_mpu__emif2,
&omap54xx_l4_cfg__mpu,
- &omap54xx_l4_wkup__timer1,
&omap54xx_l4_cfg__usb_host_hs,
&omap54xx_l4_cfg__usb_tll_hs,
&omap54xx_l4_cfg__usb_otg_ss,
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index e95668bdbc3f..07b7458deae4 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -222,40 +222,6 @@ static struct omap_hwmod dra7xx_bb2d_hwmod = {
};
/*
- * 'counter' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_counter_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = SYSC_HAS_SIDLEMODE,
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class dra7xx_counter_hwmod_class = {
- .name = "counter",
- .sysc = &dra7xx_counter_sysc,
-};
-
-/* counter_32k */
-static struct omap_hwmod dra7xx_counter_32k_hwmod = {
- .name = "counter_32k",
- .class = &dra7xx_counter_hwmod_class,
- .clkdm_name = "wkupaon_clkdm",
- .flags = HWMOD_SWSUP_SIDLE,
- .main_clk = "wkupaon_iclk_mux",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_WKUPAON_COUNTER_32K_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_WKUPAON_COUNTER_32K_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
* 'ctrl_module' class
*
*/
@@ -526,103 +492,6 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
};
/*
- * 'timer' class
- *
- * This class contains several variants: ['timer_1ms', 'timer_secure',
- * 'timer']
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_timer_1ms_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class dra7xx_timer_1ms_hwmod_class = {
- .name = "timer",
- .sysc = &dra7xx_timer_1ms_sysc,
-};
-
-static struct omap_hwmod_class_sysconfig dra7xx_timer_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class dra7xx_timer_hwmod_class = {
- .name = "timer",
- .sysc = &dra7xx_timer_sysc,
-};
-
-/* timer1 */
-static struct omap_hwmod dra7xx_timer1_hwmod = {
- .name = "timer1",
- .class = &dra7xx_timer_1ms_hwmod_class,
- .clkdm_name = "wkupaon_clkdm",
- .main_clk = "timer1_gfclk_mux",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_WKUPAON_TIMER1_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_WKUPAON_TIMER1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* timer2 */
-static struct omap_hwmod dra7xx_timer2_hwmod = {
- .name = "timer2",
- .class = &dra7xx_timer_1ms_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "timer2_gfclk_mux",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_TIMER2_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_TIMER2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* timer3 */
-static struct omap_hwmod dra7xx_timer3_hwmod = {
- .name = "timer3",
- .class = &dra7xx_timer_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "timer3_gfclk_mux",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_TIMER3_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_TIMER3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* timer4 */
-static struct omap_hwmod dra7xx_timer4_hwmod = {
- .name = "timer4",
- .class = &dra7xx_timer_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "timer4_gfclk_mux",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_TIMER4_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_TIMER4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
* 'usb_otg_ss' class
*
*/
@@ -864,14 +733,6 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__bb2d = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_wkup -> counter_32k */
-static struct omap_hwmod_ocp_if dra7xx_l4_wkup__counter_32k = {
- .master = &dra7xx_l4_wkup_hwmod,
- .slave = &dra7xx_counter_32k_hwmod,
- .clk = "wkupaon_iclk_mux",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_wkup -> ctrl_module_wkup */
static struct omap_hwmod_ocp_if dra7xx_l4_wkup__ctrl_module_wkup = {
.master = &dra7xx_l4_wkup_hwmod,
@@ -952,38 +813,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_cfg__sata = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_wkup -> timer1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_wkup__timer1 = {
- .master = &dra7xx_l4_wkup_hwmod,
- .slave = &dra7xx_timer1_hwmod,
- .clk = "wkupaon_iclk_mux",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> timer2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer2 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_timer2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> timer3 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer3 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_timer3_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> timer4 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__timer4 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_timer4_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_per3 -> usb_otg_ss1 */
static struct omap_hwmod_ocp_if dra7xx_l4_per3__usb_otg_ss1 = {
.master = &dra7xx_l4_per3_hwmod,
@@ -1062,7 +891,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
&dra7xx_l3_main_1__l4_wkup,
&dra7xx_l4_per2__atl,
&dra7xx_l3_main_1__bb2d,
- &dra7xx_l4_wkup__counter_32k,
&dra7xx_l4_wkup__ctrl_module_wkup,
&dra7xx_l3_main_1__gpmc,
&dra7xx_l4_cfg__mpu,
@@ -1072,10 +900,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
&dra7xx_l4_cfg__pciess2,
&dra7xx_l3_main_1__qspi,
&dra7xx_l4_cfg__sata,
- &dra7xx_l4_wkup__timer1,
- &dra7xx_l4_per1__timer2,
- &dra7xx_l4_per1__timer3,
- &dra7xx_l4_per1__timer4,
&dra7xx_l4_per3__usb_otg_ss1,
&dra7xx_l4_per3__usb_otg_ss2,
&dra7xx_l4_per3__usb_otg_ss3,
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index 6a9f1ad9d413..50fb699b163f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -690,76 +690,6 @@ static struct omap_hwmod_class dm816x_timer_hwmod_class = {
.sysc = &dm816x_timer_sysc,
};
-static struct omap_hwmod dm814x_timer1_hwmod = {
- .name = "timer1",
- .clkdm_name = "alwon_l3s_clkdm",
- .main_clk = "timer1_fck",
- .class = &dm816x_timer_hwmod_class,
- .flags = HWMOD_NO_IDLEST,
-};
-
-static struct omap_hwmod_ocp_if dm814x_l4_ls__timer1 = {
- .master = &dm81xx_l4_ls_hwmod,
- .slave = &dm814x_timer1_hwmod,
- .clk = "sysclk6_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod dm816x_timer1_hwmod = {
- .name = "timer1",
- .clkdm_name = "alwon_l3s_clkdm",
- .main_clk = "timer1_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DM816X_CM_ALWON_TIMER_1_CLKCTRL,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .class = &dm816x_timer_hwmod_class,
-};
-
-static struct omap_hwmod_ocp_if dm816x_l4_ls__timer1 = {
- .master = &dm81xx_l4_ls_hwmod,
- .slave = &dm816x_timer1_hwmod,
- .clk = "sysclk6_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod dm814x_timer2_hwmod = {
- .name = "timer2",
- .clkdm_name = "alwon_l3s_clkdm",
- .main_clk = "timer2_fck",
- .class = &dm816x_timer_hwmod_class,
- .flags = HWMOD_NO_IDLEST,
-};
-
-static struct omap_hwmod_ocp_if dm814x_l4_ls__timer2 = {
- .master = &dm81xx_l4_ls_hwmod,
- .slave = &dm814x_timer2_hwmod,
- .clk = "sysclk6_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod dm816x_timer2_hwmod = {
- .name = "timer2",
- .clkdm_name = "alwon_l3s_clkdm",
- .main_clk = "timer2_fck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DM816X_CM_ALWON_TIMER_2_CLKCTRL,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .class = &dm816x_timer_hwmod_class,
-};
-
-static struct omap_hwmod_ocp_if dm816x_l4_ls__timer2 = {
- .master = &dm81xx_l4_ls_hwmod,
- .slave = &dm816x_timer2_hwmod,
- .clk = "sysclk6_ck",
- .user = OCP_USER_MPU,
-};
-
static struct omap_hwmod dm816x_timer3_hwmod = {
.name = "timer3",
.clkdm_name = "alwon_l3s_clkdm",
@@ -1288,8 +1218,6 @@ static struct omap_hwmod_ocp_if *dm814x_hwmod_ocp_ifs[] __initdata = {
&dm814x_l4_ls__mmc1,
&dm814x_l4_ls__mmc2,
&ti81xx_l4_ls__rtc,
- &dm814x_l4_ls__timer1,
- &dm814x_l4_ls__timer2,
&dm81xx_alwon_l3_slow__gpmc,
&dm814x_default_l3_slow__usbss,
&dm814x_alwon_l3_med__mmc3,
@@ -1318,8 +1246,6 @@ static struct omap_hwmod_ocp_if *dm816x_hwmod_ocp_ifs[] __initdata = {
&dm81xx_l4_ls__elm,
&ti81xx_l4_ls__rtc,
&dm816x_l4_ls__mmc1,
- &dm816x_l4_ls__timer1,
- &dm816x_l4_ls__timer2,
&dm816x_l4_ls__timer3,
&dm816x_l4_ls__timer4,
&dm816x_l4_ls__timer5,
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h
index c85cb8b5831c..0045e6680a63 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h
@@ -21,8 +21,6 @@ extern struct omap_hwmod omap2xxx_l4_core_hwmod;
extern struct omap_hwmod omap2xxx_l4_wkup_hwmod;
extern struct omap_hwmod omap2xxx_mpu_hwmod;
extern struct omap_hwmod omap2xxx_iva_hwmod;
-extern struct omap_hwmod omap2xxx_timer1_hwmod;
-extern struct omap_hwmod omap2xxx_timer2_hwmod;
extern struct omap_hwmod omap2xxx_timer3_hwmod;
extern struct omap_hwmod omap2xxx_timer4_hwmod;
extern struct omap_hwmod omap2xxx_timer5_hwmod;
@@ -47,7 +45,6 @@ extern struct omap_hwmod omap2xxx_gpio3_hwmod;
extern struct omap_hwmod omap2xxx_gpio4_hwmod;
extern struct omap_hwmod omap2xxx_mcspi1_hwmod;
extern struct omap_hwmod omap2xxx_mcspi2_hwmod;
-extern struct omap_hwmod omap2xxx_counter_32k_hwmod;
extern struct omap_hwmod omap2xxx_gpmc_hwmod;
extern struct omap_hwmod omap2xxx_rng_hwmod;
extern struct omap_hwmod omap2xxx_sham_hwmod;
diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c
index 5455fc98c60e..58236c7dc83e 100644
--- a/arch/arm/mach-omap2/pm33xx-core.c
+++ b/arch/arm/mach-omap2/pm33xx-core.c
@@ -267,7 +267,7 @@ static struct am33xx_pm_sram_addr *amx3_get_sram_addrs(void)
return NULL;
}
-void __iomem *am43xx_get_rtc_base_addr(void)
+static void __iomem *am43xx_get_rtc_base_addr(void)
{
rtc_oh = omap_hwmod_lookup("rtc");
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index ac1324c6453b..c4e97d35c310 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -72,7 +72,7 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
stmfd sp!, {lr} @ save registers on stack
/* Setup so that we will disable and enable l2 */
mov r1, #0x1
- adrl r3, l2dis_3630_offset @ may be too distant for plain adr
+ adr r3, l2dis_3630_offset
ldr r2, [r3] @ value for offset
str r1, [r2, r3] @ write to l2dis_3630
ldmfd sp!, {pc} @ restore regs and return
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 8b09cdacc30d..620ba69c8f11 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -26,34 +26,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-#include <linux/init.h>
-#include <linux/time.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/dmtimer-omap.h>
-#include <linux/sched_clock.h>
-
-#include <asm/mach/time.h>
-
-#include "omap_hwmod.h"
-#include "omap_device.h"
-#include <plat/counter-32k.h>
-#include <clocksource/timer-ti-dm.h>
#include "soc.h"
#include "common.h"
#include "control.h"
-#include "powerdomain.h"
#include "omap-secure.h"
#define REALTIME_COUNTER_BASE 0x48243200
@@ -61,537 +39,12 @@
#define INCREMENTER_DENUMERATOR_RELOAD_OFFSET 0x14
#define NUMERATOR_DENUMERATOR_MASK 0xfffff000
-/* Clockevent code */
-
-static struct omap_dm_timer clkev;
-static struct clock_event_device clockevent_gpt;
-
-/* Clockevent hwmod for am335x and am437x suspend */
-static struct omap_hwmod *clockevent_gpt_hwmod;
-
-/* Clockesource hwmod for am437x suspend */
-static struct omap_hwmod *clocksource_gpt_hwmod;
-
-#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
static unsigned long arch_timer_freq;
void set_cntfreq(void)
{
omap_smc1(OMAP5_DRA7_MON_SET_CNTFRQ_INDEX, arch_timer_freq);
}
-#endif
-
-static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = &clockevent_gpt;
-
- __omap_dm_timer_write_status(&clkev, OMAP_TIMER_INT_OVERFLOW);
-
- evt->event_handler(evt);
- return IRQ_HANDLED;
-}
-
-static int omap2_gp_timer_set_next_event(unsigned long cycles,
- struct clock_event_device *evt)
-{
- __omap_dm_timer_load_start(&clkev, OMAP_TIMER_CTRL_ST,
- 0xffffffff - cycles, OMAP_TIMER_POSTED);
-
- return 0;
-}
-
-static int omap2_gp_timer_shutdown(struct clock_event_device *evt)
-{
- __omap_dm_timer_stop(&clkev, OMAP_TIMER_POSTED, clkev.rate);
- return 0;
-}
-
-static int omap2_gp_timer_set_periodic(struct clock_event_device *evt)
-{
- u32 period;
-
- __omap_dm_timer_stop(&clkev, OMAP_TIMER_POSTED, clkev.rate);
-
- period = clkev.rate / HZ;
- period -= 1;
- /* Looks like we need to first set the load value separately */
- __omap_dm_timer_write(&clkev, OMAP_TIMER_LOAD_REG, 0xffffffff - period,
- OMAP_TIMER_POSTED);
- __omap_dm_timer_load_start(&clkev,
- OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
- 0xffffffff - period, OMAP_TIMER_POSTED);
- return 0;
-}
-
-static void omap_clkevt_idle(struct clock_event_device *unused)
-{
- if (!clockevent_gpt_hwmod)
- return;
-
- omap_hwmod_idle(clockevent_gpt_hwmod);
-}
-
-static void omap_clkevt_unidle(struct clock_event_device *unused)
-{
- if (!clockevent_gpt_hwmod)
- return;
-
- omap_hwmod_enable(clockevent_gpt_hwmod);
- __omap_dm_timer_int_enable(&clkev, OMAP_TIMER_INT_OVERFLOW);
-}
-
-static struct clock_event_device clockevent_gpt = {
- .features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT,
- .rating = 300,
- .set_next_event = omap2_gp_timer_set_next_event,
- .set_state_shutdown = omap2_gp_timer_shutdown,
- .set_state_periodic = omap2_gp_timer_set_periodic,
- .set_state_oneshot = omap2_gp_timer_shutdown,
- .tick_resume = omap2_gp_timer_shutdown,
-};
-
-static const struct of_device_id omap_timer_match[] __initconst = {
- { .compatible = "ti,omap2420-timer", },
- { .compatible = "ti,omap3430-timer", },
- { .compatible = "ti,omap4430-timer", },
- { .compatible = "ti,omap5430-timer", },
- { .compatible = "ti,dm814-timer", },
- { .compatible = "ti,dm816-timer", },
- { .compatible = "ti,am335x-timer", },
- { .compatible = "ti,am335x-timer-1ms", },
- { }
-};
-
-static int omap_timer_add_disabled_property(struct device_node *np)
-{
- struct property *prop;
-
- prop = kzalloc(sizeof(*prop), GFP_KERNEL);
- if (!prop)
- return -ENOMEM;
-
- prop->name = "status";
- prop->value = "disabled";
- prop->length = strlen(prop->value);
-
- return of_add_property(np, prop);
-}
-
-static int omap_timer_update_dt(struct device_node *np)
-{
- int error = 0;
-
- if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
- error = omap_timer_add_disabled_property(np);
- if (error)
- return error;
- }
-
- /* No parent interconnect target module configured? */
- if (of_get_property(np, "ti,hwmods", NULL))
- return error;
-
- /* Tag parent interconnect target module disabled */
- error = omap_timer_add_disabled_property(np->parent);
- if (error)
- return error;
-
- return 0;
-}
-
-/**
- * omap_get_timer_dt - get a timer using device-tree
- * @match - device-tree match structure for matching a device type
- * @property - optional timer property to match
- *
- * Helper function to get a timer during early boot using device-tree for use
- * as kernel system timer. Optionally, the property argument can be used to
- * select a timer with a specific property. Once a timer is found then mark
- * the timer node in device-tree as disabled, to prevent the kernel from
- * registering this timer as a platform device and so no one else can use it.
- */
-static struct device_node * __init omap_get_timer_dt(const struct of_device_id *match,
- const char *property)
-{
- struct device_node *np;
- int error;
-
- for_each_matching_node(np, match) {
- if (!of_device_is_available(np))
- continue;
-
- if (property && !of_get_property(np, property, NULL))
- continue;
-
- if (!property && (of_get_property(np, "ti,timer-alwon", NULL) ||
- of_get_property(np, "ti,timer-dsp", NULL) ||
- of_get_property(np, "ti,timer-pwm", NULL) ||
- of_get_property(np, "ti,timer-secure", NULL)))
- continue;
-
- error = omap_timer_update_dt(np);
- WARN(error, "%s: Could not update dt: %i\n", __func__, error);
-
- return np;
- }
-
- return NULL;
-}
-
-/**
- * omap_dmtimer_init - initialisation function when device tree is used
- *
- * For secure OMAP3/DRA7xx devices, timers with device type "timer-secure"
- * cannot be used by the kernel as they are reserved. Therefore, to prevent the
- * kernel registering these devices remove them dynamically from the device
- * tree on boot.
- */
-static void __init omap_dmtimer_init(void)
-{
- struct device_node *np;
-
- if (!cpu_is_omap34xx() && !soc_is_dra7xx())
- return;
-
- /* If we are a secure device, remove any secure timer nodes */
- if ((omap_type() != OMAP2_DEVICE_TYPE_GP)) {
- np = omap_get_timer_dt(omap_timer_match, "ti,timer-secure");
- of_node_put(np);
- }
-}
-
-/**
- * omap_dm_timer_get_errata - get errata flags for a timer
- *
- * Get the timer errata flags that are specific to the OMAP device being used.
- */
-static u32 __init omap_dm_timer_get_errata(void)
-{
- if (cpu_is_omap24xx())
- return 0;
-
- return OMAP_TIMER_ERRATA_I103_I767;
-}
-
-static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
- const char *fck_source,
- const char *property,
- const char **timer_name,
- int posted)
-{
- const char *oh_name = NULL;
- struct device_node *np;
- struct omap_hwmod *oh;
- struct clk *src;
- int r = 0;
-
- np = omap_get_timer_dt(omap_timer_match, property);
- if (!np)
- return -ENODEV;
-
- of_property_read_string_index(np, "ti,hwmods", 0, &oh_name);
- if (!oh_name) {
- of_property_read_string_index(np->parent, "ti,hwmods", 0,
- &oh_name);
- if (!oh_name)
- return -ENODEV;
- }
-
- timer->irq = irq_of_parse_and_map(np, 0);
- if (!timer->irq)
- return -ENXIO;
-
- timer->io_base = of_iomap(np, 0);
-
- timer->fclk = of_clk_get_by_name(np, "fck");
-
- of_node_put(np);
-
- oh = omap_hwmod_lookup(oh_name);
- if (!oh)
- return -ENODEV;
-
- *timer_name = oh->name;
-
- if (!timer->io_base)
- return -ENXIO;
-
- omap_hwmod_setup_one(oh_name);
-
- /* After the dmtimer is using hwmod these clocks won't be needed */
- if (IS_ERR_OR_NULL(timer->fclk))
- timer->fclk = clk_get(NULL, omap_hwmod_get_main_clk(oh));
- if (IS_ERR(timer->fclk))
- return PTR_ERR(timer->fclk);
-
- src = clk_get(NULL, fck_source);
- if (IS_ERR(src))
- return PTR_ERR(src);
-
- WARN(clk_set_parent(timer->fclk, src) < 0,
- "Cannot set timer parent clock, no PLL clock driver?");
-
- clk_put(src);
-
- omap_hwmod_enable(oh);
- __omap_dm_timer_init_regs(timer);
-
- if (posted)
- __omap_dm_timer_enable_posted(timer);
-
- /* Check that the intended posted configuration matches the actual */
- if (posted != timer->posted)
- return -EINVAL;
-
- timer->rate = clk_get_rate(timer->fclk);
- timer->reserved = 1;
-
- return r;
-}
-
-#if !defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
-void tick_broadcast(const struct cpumask *mask)
-{
-}
-#endif
-
-static void __init omap2_gp_clockevent_init(int gptimer_id,
- const char *fck_source,
- const char *property)
-{
- int res;
-
- clkev.id = gptimer_id;
- clkev.errata = omap_dm_timer_get_errata();
-
- /*
- * For clock-event timers we never read the timer counter and
- * so we are not impacted by errata i103 and i767. Therefore,
- * we can safely ignore this errata for clock-event timers.
- */
- __omap_dm_timer_override_errata(&clkev, OMAP_TIMER_ERRATA_I103_I767);
-
- res = omap_dm_timer_init_one(&clkev, fck_source, property,
- &clockevent_gpt.name, OMAP_TIMER_POSTED);
- BUG_ON(res);
-
- if (request_irq(clkev.irq, omap2_gp_timer_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL, "gp_timer", &clkev))
- pr_err("Failed to request irq %d (gp_timer)\n", clkev.irq);
-
- __omap_dm_timer_int_enable(&clkev, OMAP_TIMER_INT_OVERFLOW);
-
- clockevent_gpt.cpumask = cpu_possible_mask;
- clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev);
- clockevents_config_and_register(&clockevent_gpt, clkev.rate,
- 3, /* Timer internal resynch latency */
- 0xffffffff);
-
- if (soc_is_am33xx() || soc_is_am43xx()) {
- clockevent_gpt.suspend = omap_clkevt_idle;
- clockevent_gpt.resume = omap_clkevt_unidle;
-
- clockevent_gpt_hwmod =
- omap_hwmod_lookup(clockevent_gpt.name);
- }
-
- pr_info("OMAP clockevent source: %s at %lu Hz\n", clockevent_gpt.name,
- clkev.rate);
-}
-
-/* Clocksource code */
-static struct omap_dm_timer clksrc;
-static bool use_gptimer_clksrc __initdata;
-
-/*
- * clocksource
- */
-static u64 clocksource_read_cycles(struct clocksource *cs)
-{
- return (u64)__omap_dm_timer_read_counter(&clksrc,
- OMAP_TIMER_NONPOSTED);
-}
-
-static struct clocksource clocksource_gpt = {
- .rating = 300,
- .read = clocksource_read_cycles,
- .mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static u64 notrace dmtimer_read_sched_clock(void)
-{
- if (clksrc.reserved)
- return __omap_dm_timer_read_counter(&clksrc,
- OMAP_TIMER_NONPOSTED);
-
- return 0;
-}
-
-static const struct of_device_id omap_counter_match[] __initconst = {
- { .compatible = "ti,omap-counter32k", },
- { }
-};
-
-/* Setup free-running counter for clocksource */
-static int __init __maybe_unused omap2_sync32k_clocksource_init(void)
-{
- int ret;
- struct device_node *np = NULL;
- struct omap_hwmod *oh;
- const char *oh_name = "counter_32k";
-
- /*
- * See if the 32kHz counter is supported.
- */
- np = omap_get_timer_dt(omap_counter_match, NULL);
- if (!np)
- return -ENODEV;
-
- of_property_read_string_index(np->parent, "ti,hwmods", 0, &oh_name);
- if (!oh_name) {
- of_property_read_string_index(np, "ti,hwmods", 0, &oh_name);
- if (!oh_name)
- return -ENODEV;
- }
-
- /*
- * First check hwmod data is available for sync32k counter
- */
- oh = omap_hwmod_lookup(oh_name);
- if (!oh || oh->slaves_cnt == 0)
- return -ENODEV;
-
- omap_hwmod_setup_one(oh_name);
-
- ret = omap_hwmod_enable(oh);
- if (ret) {
- pr_warn("%s: failed to enable counter_32k module (%d)\n",
- __func__, ret);
- return ret;
- }
-
- return ret;
-}
-
-static unsigned int omap2_gptimer_clksrc_load;
-
-static void omap2_gptimer_clksrc_suspend(struct clocksource *unused)
-{
- omap2_gptimer_clksrc_load =
- __omap_dm_timer_read_counter(&clksrc, OMAP_TIMER_NONPOSTED);
-
- omap_hwmod_idle(clocksource_gpt_hwmod);
-}
-
-static void omap2_gptimer_clksrc_resume(struct clocksource *unused)
-{
- omap_hwmod_enable(clocksource_gpt_hwmod);
-
- __omap_dm_timer_load_start(&clksrc,
- OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR,
- omap2_gptimer_clksrc_load,
- OMAP_TIMER_NONPOSTED);
-}
-
-static void __init omap2_gptimer_clocksource_init(int gptimer_id,
- const char *fck_source,
- const char *property)
-{
- int res;
-
- clksrc.id = gptimer_id;
- clksrc.errata = omap_dm_timer_get_errata();
-
- res = omap_dm_timer_init_one(&clksrc, fck_source, property,
- &clocksource_gpt.name,
- OMAP_TIMER_NONPOSTED);
-
- if (soc_is_am43xx()) {
- clocksource_gpt.suspend = omap2_gptimer_clksrc_suspend;
- clocksource_gpt.resume = omap2_gptimer_clksrc_resume;
-
- clocksource_gpt_hwmod =
- omap_hwmod_lookup(clocksource_gpt.name);
- }
-
- BUG_ON(res);
-
- __omap_dm_timer_load_start(&clksrc,
- OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0,
- OMAP_TIMER_NONPOSTED);
- sched_clock_register(dmtimer_read_sched_clock, 32, clksrc.rate);
-
- if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
- pr_err("Could not register clocksource %s\n",
- clocksource_gpt.name);
- else
- pr_info("OMAP clocksource: %s at %lu Hz\n",
- clocksource_gpt.name, clksrc.rate);
-}
-
-static void __init __omap_sync32k_timer_init(int clkev_nr, const char *clkev_src,
- const char *clkev_prop, int clksrc_nr, const char *clksrc_src,
- const char *clksrc_prop, bool gptimer)
-{
- omap_clk_init();
- omap_dmtimer_init();
- omap2_gp_clockevent_init(clkev_nr, clkev_src, clkev_prop);
-
- /* Enable the use of clocksource="gp_timer" kernel parameter */
- if (clksrc_nr && (use_gptimer_clksrc || gptimer))
- omap2_gptimer_clocksource_init(clksrc_nr, clksrc_src,
- clksrc_prop);
- else
- omap2_sync32k_clocksource_init();
-}
-
-void __init omap_init_time(void)
-{
- __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
- 2, "timer_sys_ck", NULL, false);
-
- timer_probe();
-}
-
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
-void __init omap3_secure_sync32k_timer_init(void)
-{
- __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
- 2, "timer_sys_ck", NULL, false);
-
- timer_probe();
-}
-#endif /* CONFIG_ARCH_OMAP3 */
-
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) || \
- defined(CONFIG_SOC_AM43XX)
-void __init omap3_gptimer_timer_init(void)
-{
- __omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
- 1, "timer_sys_ck", "ti,timer-alwon", true);
- if (of_have_populated_dt())
- timer_probe();
-}
-#endif
-
-#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
- defined(CONFIG_SOC_DRA7XX)
-static void __init omap4_sync32k_timer_init(void)
-{
- __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
- 0, NULL, NULL, false);
-}
-
-void __init omap4_local_timer_init(void)
-{
- omap4_sync32k_timer_init();
- timer_probe();
-}
-#endif
-
-#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)
/*
* The realtime counter also called master counter, is a free-running
@@ -604,7 +57,6 @@ void __init omap4_local_timer_init(void)
*/
static void __init realtime_counter_init(void)
{
-#ifdef CONFIG_SOC_HAS_REALTIME_COUNTER
void __iomem *base;
static struct clk *sys_clk;
unsigned long rate;
@@ -703,39 +155,12 @@ sysclk1_based:
set_cntfreq();
iounmap(base);
-#endif
}
void __init omap5_realtime_timer_init(void)
{
- omap4_sync32k_timer_init();
+ omap_clk_init();
realtime_counter_init();
timer_probe();
}
-#endif /* CONFIG_SOC_OMAP5 || CONFIG_SOC_DRA7XX */
-
-/**
- * omap2_override_clocksource - clocksource override with user configuration
- *
- * Allows user to override default clocksource, using kernel parameter
- * clocksource="gp_timer" (For all OMAP2PLUS architectures)
- *
- * Note that, here we are using same standard kernel parameter "clocksource=",
- * and not introducing any OMAP specific interface.
- */
-static int __init omap2_override_clocksource(char *str)
-{
- if (!str)
- return 0;
- /*
- * For OMAP architecture, we only have two options
- * - sync_32k (default)
- * - gp_timer (sys_clk based)
- */
- if (!strcmp(str, "gp_timer"))
- use_gptimer_clksrc = true;
-
- return 0;
-}
-early_param("clocksource", omap2_override_clocksource);
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index 6f66785fab01..ea077f66372d 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -30,7 +30,6 @@ config ARCH_ATLAS7
select ARM_GIC
select ATLAS7_TIMER
select HAVE_ARM_SCU if SMP
- select HAVE_SMP
help
Support for CSR SiRFSoC ARM Cortex A7 Platform
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index f60bc29aef68..f7520a6cc7d4 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -123,23 +123,6 @@ config CSB726_CSB701
bool "Enable support for CSB701 baseboard"
depends on MACH_CSB726
-config MACH_ARMCORE
- bool "CompuLab CM-X255/CM-X270 modules"
- select ARCH_HAS_DMA_SET_COHERENT_MASK if PCI
- select IWMMXT
- select HAVE_PCI
- select NEED_MACH_IO_H if PCI
- select PXA25x
- select PXA27x
-
-config MACH_EM_X270
- bool "CompuLab EM-x270 platform"
- select PXA27x
-
-config MACH_EXEDA
- bool "CompuLab eXeda platform"
- select PXA27x
-
config MACH_CM_X300
bool "CompuLab CM-X300 modules"
select CPU_PXA300
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index f70728930c4f..177abe584dd5 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -40,11 +40,6 @@ obj-$(CONFIG_MACH_ARCOM_ZEUS) += zeus.o
obj-$(CONFIG_MACH_BALLOON3) += balloon3.o
obj-$(CONFIG_MACH_CSB726) += csb726.o
obj-$(CONFIG_CSB726_CSB701) += csb701.o
-obj-$(CONFIG_MACH_ARMCORE) += cm-x2xx.o cm-x255.o cm-x270.o
-ifeq ($(CONFIG_PCI),y)
-obj-$(CONFIG_MACH_ARMCORE) += cm-x2xx-pci.o
-endif
-obj-$(CONFIG_MACH_EM_X270) += em-x270.o
obj-$(CONFIG_MACH_CM_X300) += cm-x300.o
obj-$(CONFIG_MACH_CAPC7117) += capc7117.o mxm8x10.o
obj-$(CONFIG_ARCH_GUMSTIX) += gumstix.o
diff --git a/arch/arm/mach-pxa/cm-x255.c b/arch/arm/mach-pxa/cm-x255.c
deleted file mode 100644
index ea1e85775759..000000000000
--- a/arch/arm/mach-pxa/cm-x255.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-pxa/cm-x255.c
- *
- * Copyright (C) 2007, 2008 CompuLab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
- */
-
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/mtd/nand-gpio.h>
-#include <linux/gpio/machine.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/pxa2xx_spi.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach-types.h>
-#include <asm/mach/map.h>
-
-#include "pxa25x.h"
-
-#include "generic.h"
-
-#define GPIO_NAND_CS (5)
-#define GPIO_NAND_ALE (4)
-#define GPIO_NAND_CLE (3)
-#define GPIO_NAND_RB (10)
-
-static unsigned long cmx255_pin_config[] = {
- /* AC'97 */
- GPIO28_AC97_BITCLK,
- GPIO29_AC97_SDATA_IN_0,
- GPIO30_AC97_SDATA_OUT,
- GPIO31_AC97_SYNC,
-
- /* BTUART */
- GPIO42_BTUART_RXD,
- GPIO43_BTUART_TXD,
- GPIO44_BTUART_CTS,
- GPIO45_BTUART_RTS,
-
- /* STUART */
- GPIO46_STUART_RXD,
- GPIO47_STUART_TXD,
-
- /* LCD */
- GPIOxx_LCD_TFT_16BPP,
-
- /* SSP1 */
- GPIO23_SSP1_SCLK,
- GPIO24_SSP1_SFRM,
- GPIO25_SSP1_TXD,
- GPIO26_SSP1_RXD,
-
- /* SSP2 */
- GPIO81_SSP2_CLK_OUT,
- GPIO82_SSP2_FRM_OUT,
- GPIO83_SSP2_TXD,
- GPIO84_SSP2_RXD,
-
- /* PC Card */
- GPIO48_nPOE,
- GPIO49_nPWE,
- GPIO50_nPIOR,
- GPIO51_nPIOW,
- GPIO52_nPCE_1,
- GPIO53_nPCE_2,
- GPIO54_nPSKTSEL,
- GPIO55_nPREG,
- GPIO56_nPWAIT,
- GPIO57_nIOIS16,
-
- /* SDRAM and local bus */
- GPIO15_nCS_1,
- GPIO78_nCS_2,
- GPIO79_nCS_3,
- GPIO80_nCS_4,
- GPIO33_nCS_5,
- GPIO18_RDY,
-
- /* GPIO */
- GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH,
- GPIO9_GPIO, /* PC card reset */
-
- /* NAND controls */
- GPIO5_GPIO | MFP_LPM_DRIVE_HIGH, /* NAND CE# */
- GPIO4_GPIO | MFP_LPM_DRIVE_LOW, /* NAND ALE */
- GPIO3_GPIO | MFP_LPM_DRIVE_LOW, /* NAND CLE */
- GPIO10_GPIO, /* NAND Ready/Busy */
-
- /* interrupts */
- GPIO22_GPIO, /* DM9000 interrupt */
-};
-
-#if defined(CONFIG_SPI_PXA2XX)
-static struct pxa2xx_spi_controller pxa_ssp_master_info = {
- .num_chipselect = 1,
-};
-
-static struct spi_board_info spi_board_info[] __initdata = {
- [0] = {
- .modalias = "rtc-max6902",
- .max_speed_hz = 1000000,
- .bus_num = 1,
- .chip_select = 0,
- },
-};
-
-static void __init cmx255_init_rtc(void)
-{
- pxa2xx_set_spi_info(1, &pxa_ssp_master_info);
- spi_register_board_info(ARRAY_AND_SIZE(spi_board_info));
-}
-#else
-static inline void cmx255_init_rtc(void) {}
-#endif
-
-#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
-static struct mtd_partition cmx255_nor_partitions[] = {
- {
- .name = "ARMmon",
- .size = 0x00030000,
- .offset = 0,
- .mask_flags = MTD_WRITEABLE /* force read-only */
- } , {
- .name = "ARMmon setup block",
- .size = 0x00010000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE /* force read-only */
- } , {
- .name = "kernel",
- .size = 0x00160000,
- .offset = MTDPART_OFS_APPEND,
- } , {
- .name = "ramdisk",
- .size = MTDPART_SIZ_FULL,
- .offset = MTDPART_OFS_APPEND
- }
-};
-
-static struct physmap_flash_data cmx255_nor_flash_data[] = {
- {
- .width = 2, /* bankwidth in bytes */
- .parts = cmx255_nor_partitions,
- .nr_parts = ARRAY_SIZE(cmx255_nor_partitions)
- }
-};
-
-static struct resource cmx255_nor_resource = {
- .start = PXA_CS0_PHYS,
- .end = PXA_CS0_PHYS + SZ_8M - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device cmx255_nor = {
- .name = "physmap-flash",
- .id = -1,
- .dev = {
- .platform_data = cmx255_nor_flash_data,
- },
- .resource = &cmx255_nor_resource,
- .num_resources = 1,
-};
-
-static void __init cmx255_init_nor(void)
-{
- platform_device_register(&cmx255_nor);
-}
-#else
-static inline void cmx255_init_nor(void) {}
-#endif
-
-#if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE)
-
-static struct gpiod_lookup_table cmx255_nand_gpiod_table = {
- .dev_id = "gpio-nand",
- .table = {
- GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CS, "nce", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CLE, "cle", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("gpio-pxa", GPIO_NAND_ALE, "ale", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("gpio-pxa", GPIO_NAND_RB, "rdy", GPIO_ACTIVE_HIGH),
- },
-};
-
-static struct resource cmx255_nand_resource[] = {
- [0] = {
- .start = PXA_CS1_PHYS,
- .end = PXA_CS1_PHYS + 11,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = PXA_CS5_PHYS,
- .end = PXA_CS5_PHYS + 3,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct mtd_partition cmx255_nand_parts[] = {
- [0] = {
- .name = "cmx255-nand",
- .size = MTDPART_SIZ_FULL,
- .offset = 0,
- },
-};
-
-static struct gpio_nand_platdata cmx255_nand_platdata = {
- .parts = cmx255_nand_parts,
- .num_parts = ARRAY_SIZE(cmx255_nand_parts),
- .chip_delay = 25,
-};
-
-static struct platform_device cmx255_nand = {
- .name = "gpio-nand",
- .num_resources = ARRAY_SIZE(cmx255_nand_resource),
- .resource = cmx255_nand_resource,
- .id = -1,
- .dev = {
- .platform_data = &cmx255_nand_platdata,
- }
-};
-
-static void __init cmx255_init_nand(void)
-{
- gpiod_add_lookup_table(&cmx255_nand_gpiod_table);
- platform_device_register(&cmx255_nand);
-}
-#else
-static inline void cmx255_init_nand(void) {}
-#endif
-
-void __init cmx255_init(void)
-{
- pxa2xx_mfp_config(ARRAY_AND_SIZE(cmx255_pin_config));
-
- cmx255_init_rtc();
- cmx255_init_nor();
- cmx255_init_nand();
-}
diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c
deleted file mode 100644
index 9baad11314f2..000000000000
--- a/arch/arm/mach-pxa/cm-x270.c
+++ /dev/null
@@ -1,419 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-pxa/cm-x270.c
- *
- * Copyright (C) 2007, 2008 CompuLab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
- */
-
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
-#include <linux/delay.h>
-
-#include <linux/platform_data/rtc-v3020.h>
-#include <video/mbxfb.h>
-
-#include <linux/spi/spi.h>
-#include <linux/spi/pxa2xx_spi.h>
-#include <linux/spi/libertas_spi.h>
-
-#include "pxa27x.h"
-#include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <linux/platform_data/mmc-pxamci.h>
-
-#include "generic.h"
-
-/* physical address if local-bus attached devices */
-#define RTC_PHYS_BASE (PXA_CS1_PHYS + (5 << 22))
-
-/* GPIO IRQ usage */
-#define GPIO83_MMC_IRQ (83)
-
-#define CMX270_MMC_IRQ PXA_GPIO_TO_IRQ(GPIO83_MMC_IRQ)
-
-/* MMC power enable */
-#define GPIO105_MMC_POWER (105)
-
-/* WLAN GPIOS */
-#define GPIO19_WLAN_STRAP (19)
-#define GPIO102_WLAN_RST (102)
-
-static unsigned long cmx270_pin_config[] = {
- /* AC'97 */
- GPIO28_AC97_BITCLK,
- GPIO29_AC97_SDATA_IN_0,
- GPIO30_AC97_SDATA_OUT,
- GPIO31_AC97_SYNC,
- GPIO98_AC97_SYSCLK,
- GPIO113_AC97_nRESET,
-
- /* BTUART */
- GPIO42_BTUART_RXD,
- GPIO43_BTUART_TXD,
- GPIO44_BTUART_CTS,
- GPIO45_BTUART_RTS,
-
- /* STUART */
- GPIO46_STUART_RXD,
- GPIO47_STUART_TXD,
-
- /* MCI controller */
- GPIO32_MMC_CLK,
- GPIO112_MMC_CMD,
- GPIO92_MMC_DAT_0,
- GPIO109_MMC_DAT_1,
- GPIO110_MMC_DAT_2,
- GPIO111_MMC_DAT_3,
-
- /* LCD */
- GPIOxx_LCD_TFT_16BPP,
-
- /* I2C */
- GPIO117_I2C_SCL,
- GPIO118_I2C_SDA,
-
- /* SSP1 */
- GPIO23_SSP1_SCLK,
- GPIO24_SSP1_SFRM,
- GPIO25_SSP1_TXD,
- GPIO26_SSP1_RXD,
-
- /* SSP2 */
- GPIO19_GPIO, /* SSP2 clock is used as GPIO for Libertas pin-strap */
- GPIO14_GPIO,
- GPIO87_SSP2_TXD,
- GPIO88_SSP2_RXD,
-
- /* PC Card */
- GPIO48_nPOE,
- GPIO49_nPWE,
- GPIO50_nPIOR,
- GPIO51_nPIOW,
- GPIO85_nPCE_1,
- GPIO54_nPCE_2,
- GPIO55_nPREG,
- GPIO56_nPWAIT,
- GPIO57_nIOIS16,
-
- /* SDRAM and local bus */
- GPIO15_nCS_1,
- GPIO78_nCS_2,
- GPIO79_nCS_3,
- GPIO80_nCS_4,
- GPIO33_nCS_5,
- GPIO49_nPWE,
- GPIO18_RDY,
-
- /* GPIO */
- GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH,
- GPIO105_GPIO | MFP_LPM_DRIVE_HIGH, /* MMC/SD power */
- GPIO53_GPIO, /* PC card reset */
- GPIO102_GPIO, /* WLAN reset */
-
- /* NAND controls */
- GPIO11_GPIO | MFP_LPM_DRIVE_HIGH, /* NAND CE# */
- GPIO89_GPIO, /* NAND Ready/Busy */
-
- /* interrupts */
- GPIO10_GPIO, /* DM9000 interrupt */
- GPIO83_GPIO, /* MMC card detect */
- GPIO95_GPIO, /* WLAN interrupt */
-};
-
-/* V3020 RTC */
-#if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE)
-static struct resource cmx270_v3020_resource[] = {
- [0] = {
- .start = RTC_PHYS_BASE,
- .end = RTC_PHYS_BASE + 4,
- .flags = IORESOURCE_MEM,
- },
-};
-
-struct v3020_platform_data cmx270_v3020_pdata = {
- .leftshift = 16,
-};
-
-static struct platform_device cmx270_rtc_device = {
- .name = "v3020",
- .num_resources = ARRAY_SIZE(cmx270_v3020_resource),
- .resource = cmx270_v3020_resource,
- .id = -1,
- .dev = {
- .platform_data = &cmx270_v3020_pdata,
- }
-};
-
-static void __init cmx270_init_rtc(void)
-{
- platform_device_register(&cmx270_rtc_device);
-}
-#else
-static inline void cmx270_init_rtc(void) {}
-#endif
-
-/* 2700G graphics */
-#if defined(CONFIG_FB_MBX) || defined(CONFIG_FB_MBX_MODULE)
-static u64 fb_dma_mask = ~(u64)0;
-
-static struct resource cmx270_2700G_resource[] = {
- /* frame buffer memory including ODFB and External SDRAM */
- [0] = {
- .start = PXA_CS2_PHYS,
- .end = PXA_CS2_PHYS + 0x01ffffff,
- .flags = IORESOURCE_MEM,
- },
- /* Marathon registers */
- [1] = {
- .start = PXA_CS2_PHYS + 0x03fe0000,
- .end = PXA_CS2_PHYS + 0x03ffffff,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static unsigned long cmx270_marathon_on[] = {
- GPIO58_GPIO,
- GPIO59_GPIO,
- GPIO60_GPIO,
- GPIO61_GPIO,
- GPIO62_GPIO,
- GPIO63_GPIO,
- GPIO64_GPIO,
- GPIO65_GPIO,
- GPIO66_GPIO,
- GPIO67_GPIO,
- GPIO68_GPIO,
- GPIO69_GPIO,
- GPIO70_GPIO,
- GPIO71_GPIO,
- GPIO72_GPIO,
- GPIO73_GPIO,
- GPIO74_GPIO,
- GPIO75_GPIO,
- GPIO76_GPIO,
- GPIO77_GPIO,
-};
-
-static unsigned long cmx270_marathon_off[] = {
- GPIOxx_LCD_TFT_16BPP,
-};
-
-static int cmx270_marathon_probe(struct fb_info *fb)
-{
- int gpio, err;
-
- for (gpio = 58; gpio <= 77; gpio++) {
- err = gpio_request(gpio, "LCD");
- if (err)
- return err;
- gpio_direction_input(gpio);
- }
-
- pxa2xx_mfp_config(ARRAY_AND_SIZE(cmx270_marathon_on));
- return 0;
-}
-
-static int cmx270_marathon_remove(struct fb_info *fb)
-{
- int gpio;
-
- pxa2xx_mfp_config(ARRAY_AND_SIZE(cmx270_marathon_off));
-
- for (gpio = 58; gpio <= 77; gpio++)
- gpio_free(gpio);
-
- return 0;
-}
-
-static struct mbxfb_platform_data cmx270_2700G_data = {
- .xres = {
- .min = 240,
- .max = 1200,
- .defval = 640,
- },
- .yres = {
- .min = 240,
- .max = 1200,
- .defval = 480,
- },
- .bpp = {
- .min = 16,
- .max = 32,
- .defval = 16,
- },
- .memsize = 8*1024*1024,
- .probe = cmx270_marathon_probe,
- .remove = cmx270_marathon_remove,
-};
-
-static struct platform_device cmx270_2700G = {
- .name = "mbx-fb",
- .dev = {
- .platform_data = &cmx270_2700G_data,
- .dma_mask = &fb_dma_mask,
- .coherent_dma_mask = 0xffffffff,
- },
- .num_resources = ARRAY_SIZE(cmx270_2700G_resource),
- .resource = cmx270_2700G_resource,
- .id = -1,
-};
-
-static void __init cmx270_init_2700G(void)
-{
- platform_device_register(&cmx270_2700G);
-}
-#else
-static inline void cmx270_init_2700G(void) {}
-#endif
-
-/* PXA27x OHCI controller setup */
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
-static struct pxaohci_platform_data cmx270_ohci_platform_data = {
- .port_mode = PMM_PERPORT_MODE,
- .flags = ENABLE_PORT1 | ENABLE_PORT2 | POWER_CONTROL_LOW,
-};
-
-static void __init cmx270_init_ohci(void)
-{
- pxa_set_ohci_info(&cmx270_ohci_platform_data);
-}
-#else
-static inline void cmx270_init_ohci(void) {}
-#endif
-
-#if defined(CONFIG_MMC) || defined(CONFIG_MMC_MODULE)
-static struct pxamci_platform_data cmx270_mci_platform_data = {
- .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
-};
-
-static struct gpiod_lookup_table cmx270_mci_gpio_table = {
- .dev_id = "pxa2xx-mci.0",
- .table = {
- /* Card detect on GPIO 83 */
- GPIO_LOOKUP("gpio-pxa", GPIO83_MMC_IRQ, "cd", GPIO_ACTIVE_LOW),
- /* Power on GPIO 105 */
- GPIO_LOOKUP("gpio-pxa", GPIO105_MMC_POWER,
- "power", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static void __init cmx270_init_mmc(void)
-{
- gpiod_add_lookup_table(&cmx270_mci_gpio_table);
- pxa_set_mci_info(&cmx270_mci_platform_data);
-}
-#else
-static inline void cmx270_init_mmc(void) {}
-#endif
-
-#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
-static struct pxa2xx_spi_controller cm_x270_spi_info = {
- .num_chipselect = 1,
- .enable_dma = 1,
-};
-
-static struct pxa2xx_spi_chip cm_x270_libertas_chip = {
- .rx_threshold = 1,
- .tx_threshold = 1,
- .timeout = 1000,
- .gpio_cs = 14,
-};
-
-static unsigned long cm_x270_libertas_pin_config[] = {
- /* SSP2 */
- GPIO19_SSP2_SCLK,
- GPIO14_GPIO,
- GPIO87_SSP2_TXD,
- GPIO88_SSP2_RXD,
-
-};
-
-static int cm_x270_libertas_setup(struct spi_device *spi)
-{
- int err = gpio_request(GPIO19_WLAN_STRAP, "WLAN STRAP");
- if (err)
- return err;
-
- err = gpio_request(GPIO102_WLAN_RST, "WLAN RST");
- if (err)
- goto err_free_strap;
-
- err = gpio_direction_output(GPIO102_WLAN_RST, 0);
- if (err)
- goto err_free_strap;
- msleep(100);
-
- err = gpio_direction_output(GPIO19_WLAN_STRAP, 1);
- if (err)
- goto err_free_strap;
- msleep(100);
-
- pxa2xx_mfp_config(ARRAY_AND_SIZE(cm_x270_libertas_pin_config));
-
- gpio_set_value(GPIO102_WLAN_RST, 1);
- msleep(100);
-
- spi->bits_per_word = 16;
- spi_setup(spi);
-
- return 0;
-
-err_free_strap:
- gpio_free(GPIO19_WLAN_STRAP);
-
- return err;
-}
-
-static int cm_x270_libertas_teardown(struct spi_device *spi)
-{
- gpio_set_value(GPIO102_WLAN_RST, 0);
- gpio_free(GPIO102_WLAN_RST);
- gpio_free(GPIO19_WLAN_STRAP);
-
- return 0;
-}
-
-struct libertas_spi_platform_data cm_x270_libertas_pdata = {
- .use_dummy_writes = 1,
- .setup = cm_x270_libertas_setup,
- .teardown = cm_x270_libertas_teardown,
-};
-
-static struct spi_board_info cm_x270_spi_devices[] __initdata = {
- {
- .modalias = "libertas_spi",
- .max_speed_hz = 13000000,
- .bus_num = 2,
- .irq = PXA_GPIO_TO_IRQ(95),
- .chip_select = 0,
- .controller_data = &cm_x270_libertas_chip,
- .platform_data = &cm_x270_libertas_pdata,
- },
-};
-
-static void __init cmx270_init_spi(void)
-{
- pxa2xx_set_spi_info(2, &cm_x270_spi_info);
- spi_register_board_info(ARRAY_AND_SIZE(cm_x270_spi_devices));
-}
-#else
-static inline void cmx270_init_spi(void) {}
-#endif
-
-void __init cmx270_init(void)
-{
- pxa2xx_mfp_config(ARRAY_AND_SIZE(cmx270_pin_config));
-
-#ifdef CONFIG_PM
- pxa27x_set_pwrmode(PWRMODE_DEEPSLEEP);
-#endif
-
- cmx270_init_rtc();
- cmx270_init_mmc();
- cmx270_init_ohci();
- cmx270_init_2700G();
- cmx270_init_spi();
-}
diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c
deleted file mode 100644
index f1c61c6b5610..000000000000
--- a/arch/arm/mach-pxa/cm-x2xx-pci.c
+++ /dev/null
@@ -1,196 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-pxa/cm-x2xx-pci.c
- *
- * PCI bios-type initialisation for PCI machines
- *
- * Bits taken from various places.
- *
- * Copyright (C) 2007, 2008 Compulab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-
-#include <asm/mach/pci.h>
-#include <asm/mach-types.h>
-
-#include <asm/hardware/it8152.h>
-
-void __iomem *it8152_base_address;
-static int cmx2xx_it8152_irq_gpio;
-
-static void cmx2xx_it8152_irq_demux(struct irq_desc *desc)
-{
- /* clear our parent irq */
- desc->irq_data.chip->irq_ack(&desc->irq_data);
-
- it8152_irq_demux(desc);
-}
-
-void __cmx2xx_pci_init_irq(int irq_gpio)
-{
- it8152_init_irq();
-
- cmx2xx_it8152_irq_gpio = irq_gpio;
-
- irq_set_irq_type(gpio_to_irq(irq_gpio), IRQ_TYPE_EDGE_RISING);
-
- irq_set_chained_handler(gpio_to_irq(irq_gpio),
- cmx2xx_it8152_irq_demux);
-}
-
-#ifdef CONFIG_PM
-static unsigned long sleep_save_ite[10];
-
-void __cmx2xx_pci_suspend(void)
-{
- /* save ITE state */
- sleep_save_ite[0] = __raw_readl(IT8152_INTC_PDCNIMR);
- sleep_save_ite[1] = __raw_readl(IT8152_INTC_LPCNIMR);
- sleep_save_ite[2] = __raw_readl(IT8152_INTC_LPNIAR);
-
- /* Clear ITE IRQ's */
- __raw_writel((0), IT8152_INTC_PDCNIRR);
- __raw_writel((0), IT8152_INTC_LPCNIRR);
-}
-
-void __cmx2xx_pci_resume(void)
-{
- /* restore IT8152 state */
- __raw_writel((sleep_save_ite[0]), IT8152_INTC_PDCNIMR);
- __raw_writel((sleep_save_ite[1]), IT8152_INTC_LPCNIMR);
- __raw_writel((sleep_save_ite[2]), IT8152_INTC_LPNIAR);
-}
-#else
-void cmx2xx_pci_suspend(void) {}
-void cmx2xx_pci_resume(void) {}
-#endif
-
-/* PCI IRQ mapping*/
-static int __init cmx2xx_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- int irq;
-
- dev_dbg(&dev->dev, "%s: slot=%x, pin=%x\n", __func__, slot, pin);
-
- irq = it8152_pci_map_irq(dev, slot, pin);
- if (irq)
- return irq;
-
- /*
- Here comes the ugly part. The routing is baseboard specific,
- but defining a platform for each possible base of CM-X2XX is
- unrealistic. Here we keep mapping for ATXBase and SB-X2XX.
- */
- /* ATXBASE PCI slot */
- if (slot == 7)
- return IT8152_PCI_INTA;
-
- /* ATXBase/SB-X2XX CardBus */
- if (slot == 8 || slot == 0)
- return IT8152_PCI_INTB;
-
- /* ATXBase Ethernet */
- if (slot == 9)
- return IT8152_PCI_INTA;
-
- /* CM-x255 Onboard Ethernet */
- if (slot == 15)
- return IT8152_PCI_INTC;
-
- /* SB-x2xx Ethernet */
- if (slot == 16)
- return IT8152_PCI_INTA;
-
- /* PC104+ interrupt routing */
- if ((slot == 17) || (slot == 19))
- return IT8152_PCI_INTA;
- if ((slot == 18) || (slot == 20))
- return IT8152_PCI_INTB;
-
- return(0);
-}
-
-static void cmx2xx_pci_preinit(void)
-{
- pr_info("Initializing CM-X2XX PCI subsystem\n");
-
- pcibios_min_io = 0;
- pcibios_min_mem = 0;
-
- __raw_writel(0x800, IT8152_PCI_CFG_ADDR);
- if (__raw_readl(IT8152_PCI_CFG_DATA) == 0x81521283) {
- pr_info("PCI Bridge found.\n");
-
- /* set PCI I/O base at 0 */
- writel(0x848, IT8152_PCI_CFG_ADDR);
- writel(0, IT8152_PCI_CFG_DATA);
-
- /* set PCI memory base at 0 */
- writel(0x840, IT8152_PCI_CFG_ADDR);
- writel(0, IT8152_PCI_CFG_DATA);
-
- writel(0x20, IT8152_GPIO_GPDR);
-
- /* CardBus Controller on ATXbase baseboard */
- writel(0x4000, IT8152_PCI_CFG_ADDR);
- if (readl(IT8152_PCI_CFG_DATA) == 0xAC51104C) {
- pr_info("CardBus Bridge found.\n");
-
- /* Configure socket 0 */
- writel(0x408C, IT8152_PCI_CFG_ADDR);
- writel(0x1022, IT8152_PCI_CFG_DATA);
-
- writel(0x4080, IT8152_PCI_CFG_ADDR);
- writel(0x3844d060, IT8152_PCI_CFG_DATA);
-
- writel(0x4090, IT8152_PCI_CFG_ADDR);
- writel(((readl(IT8152_PCI_CFG_DATA) & 0xffff) |
- 0x60440000),
- IT8152_PCI_CFG_DATA);
-
- writel(0x4018, IT8152_PCI_CFG_ADDR);
- writel(0xb0000000, IT8152_PCI_CFG_DATA);
-
- /* Configure socket 1 */
- writel(0x418C, IT8152_PCI_CFG_ADDR);
- writel(0x1022, IT8152_PCI_CFG_DATA);
-
- writel(0x4180, IT8152_PCI_CFG_ADDR);
- writel(0x3844d060, IT8152_PCI_CFG_DATA);
-
- writel(0x4190, IT8152_PCI_CFG_ADDR);
- writel(((readl(IT8152_PCI_CFG_DATA) & 0xffff) |
- 0x60440000),
- IT8152_PCI_CFG_DATA);
-
- writel(0x4118, IT8152_PCI_CFG_ADDR);
- writel(0xb0000000, IT8152_PCI_CFG_DATA);
- }
- }
-}
-
-static struct hw_pci cmx2xx_pci __initdata = {
- .map_irq = cmx2xx_pci_map_irq,
- .nr_controllers = 1,
- .ops = &it8152_ops,
- .setup = it8152_pci_setup,
- .preinit = cmx2xx_pci_preinit,
-};
-
-static int __init cmx2xx_init_pci(void)
-{
- if (machine_is_armcore())
- pci_common_init(&cmx2xx_pci);
-
- return 0;
-}
-
-subsys_initcall(cmx2xx_init_pci);
diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.h b/arch/arm/mach-pxa/cm-x2xx-pci.h
deleted file mode 100644
index 93ffaaee75d7..000000000000
--- a/arch/arm/mach-pxa/cm-x2xx-pci.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-extern void __cmx2xx_pci_init_irq(int irq_gpio);
-extern void __cmx2xx_pci_suspend(void);
-extern void __cmx2xx_pci_resume(void);
-
-#ifdef CONFIG_PCI
-#define cmx2xx_pci_init_irq(x) __cmx2xx_pci_init_irq(x)
-#define cmx2xx_pci_suspend(x) __cmx2xx_pci_suspend(x)
-#define cmx2xx_pci_resume(x) __cmx2xx_pci_resume(x)
-#else
-#define cmx2xx_pci_init_irq(x) do {} while (0)
-#define cmx2xx_pci_suspend(x) do {} while (0)
-#define cmx2xx_pci_resume(x) do {} while (0)
-#endif
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
deleted file mode 100644
index ff976d1217eb..000000000000
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ /dev/null
@@ -1,538 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-pxa/cm-x2xx.c
- *
- * Copyright (C) 2008 CompuLab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
- */
-
-#include <linux/platform_device.h>
-#include <linux/syscore_ops.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/regulator/machine.h>
-
-#include <linux/dm9000.h>
-#include <linux/leds.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach-types.h>
-#include <asm/mach/map.h>
-
-#include "pxa25x.h"
-#undef GPIO24_SSP1_SFRM
-#undef GPIO86_GPIO
-#undef GPIO87_GPIO
-#undef GPIO88_GPIO
-#undef GPIO89_GPIO
-#include "pxa27x.h"
-#undef GPIO24_SSP1_SFRM
-#undef GPIO86_GPIO
-#undef GPIO87_GPIO
-#undef GPIO88_GPIO
-#undef GPIO89_GPIO
-#include <mach/audio.h>
-#include <linux/platform_data/video-pxafb.h>
-#include <mach/smemc.h>
-
-#include <asm/hardware/it8152.h>
-
-#include "generic.h"
-#include "cm-x2xx-pci.h"
-
-extern void cmx255_init(void);
-extern void cmx270_init(void);
-
-/* reserve IRQs for IT8152 */
-#define CMX2XX_NR_IRQS (IRQ_BOARD_START + 40)
-
-/* virtual addresses for statically mapped regions */
-#define CMX2XX_VIRT_BASE (void __iomem *)(0xe8000000)
-#define CMX2XX_IT8152_VIRT (CMX2XX_VIRT_BASE)
-
-/* physical address if local-bus attached devices */
-#define CMX255_DM9000_PHYS_BASE (PXA_CS1_PHYS + (8 << 22))
-#define CMX270_DM9000_PHYS_BASE (PXA_CS1_PHYS + (6 << 22))
-
-/* leds */
-#define CMX255_GPIO_RED (27)
-#define CMX255_GPIO_GREEN (32)
-#define CMX270_GPIO_RED (93)
-#define CMX270_GPIO_GREEN (94)
-
-/* GPIO IRQ usage */
-#define GPIO22_ETHIRQ (22)
-#define GPIO10_ETHIRQ (10)
-#define CMX255_GPIO_IT8152_IRQ (0)
-#define CMX270_GPIO_IT8152_IRQ (22)
-
-#define CMX255_ETHIRQ PXA_GPIO_TO_IRQ(GPIO22_ETHIRQ)
-#define CMX270_ETHIRQ PXA_GPIO_TO_IRQ(GPIO10_ETHIRQ)
-
-#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
-static struct resource cmx255_dm9000_resource[] = {
- [0] = {
- .start = CMX255_DM9000_PHYS_BASE,
- .end = CMX255_DM9000_PHYS_BASE + 3,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = CMX255_DM9000_PHYS_BASE + 4,
- .end = CMX255_DM9000_PHYS_BASE + 4 + 500,
- .flags = IORESOURCE_MEM,
- },
- [2] = {
- .start = CMX255_ETHIRQ,
- .end = CMX255_ETHIRQ,
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
- }
-};
-
-static struct resource cmx270_dm9000_resource[] = {
- [0] = {
- .start = CMX270_DM9000_PHYS_BASE,
- .end = CMX270_DM9000_PHYS_BASE + 3,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = CMX270_DM9000_PHYS_BASE + 8,
- .end = CMX270_DM9000_PHYS_BASE + 8 + 500,
- .flags = IORESOURCE_MEM,
- },
- [2] = {
- .start = CMX270_ETHIRQ,
- .end = CMX270_ETHIRQ,
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
- }
-};
-
-static struct dm9000_plat_data cmx270_dm9000_platdata = {
- .flags = DM9000_PLATF_32BITONLY | DM9000_PLATF_NO_EEPROM,
-};
-
-static struct platform_device cmx2xx_dm9000_device = {
- .name = "dm9000",
- .id = 0,
- .num_resources = ARRAY_SIZE(cmx270_dm9000_resource),
- .dev = {
- .platform_data = &cmx270_dm9000_platdata,
- }
-};
-
-static void __init cmx2xx_init_dm9000(void)
-{
- if (cpu_is_pxa25x())
- cmx2xx_dm9000_device.resource = cmx255_dm9000_resource;
- else
- cmx2xx_dm9000_device.resource = cmx270_dm9000_resource;
- platform_device_register(&cmx2xx_dm9000_device);
-}
-#else
-static inline void cmx2xx_init_dm9000(void) {}
-#endif
-
-/* UCB1400 touchscreen controller */
-#if defined(CONFIG_TOUCHSCREEN_UCB1400) || defined(CONFIG_TOUCHSCREEN_UCB1400_MODULE)
-static struct platform_device cmx2xx_ts_device = {
- .name = "ucb1400_core",
- .id = -1,
-};
-
-static void __init cmx2xx_init_touchscreen(void)
-{
- platform_device_register(&cmx2xx_ts_device);
-}
-#else
-static inline void cmx2xx_init_touchscreen(void) {}
-#endif
-
-/* CM-X270 LEDs */
-#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
-static struct gpio_led cmx2xx_leds[] = {
- [0] = {
- .name = "cm-x2xx:red",
- .default_trigger = "nand-disk",
- .active_low = 1,
- },
- [1] = {
- .name = "cm-x2xx:green",
- .default_trigger = "heartbeat",
- .active_low = 1,
- },
-};
-
-static struct gpio_led_platform_data cmx2xx_gpio_led_pdata = {
- .num_leds = ARRAY_SIZE(cmx2xx_leds),
- .leds = cmx2xx_leds,
-};
-
-static struct platform_device cmx2xx_led_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &cmx2xx_gpio_led_pdata,
- },
-};
-
-static void __init cmx2xx_init_leds(void)
-{
- if (cpu_is_pxa25x()) {
- cmx2xx_leds[0].gpio = CMX255_GPIO_RED;
- cmx2xx_leds[1].gpio = CMX255_GPIO_GREEN;
- } else {
- cmx2xx_leds[0].gpio = CMX270_GPIO_RED;
- cmx2xx_leds[1].gpio = CMX270_GPIO_GREEN;
- }
- platform_device_register(&cmx2xx_led_device);
-}
-#else
-static inline void cmx2xx_init_leds(void) {}
-#endif
-
-#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
-/*
- Display definitions
- keep these for backwards compatibility, although symbolic names (as
- e.g. in lpd270.c) looks better
-*/
-#define MTYPE_STN320x240 0
-#define MTYPE_TFT640x480 1
-#define MTYPE_CRT640x480 2
-#define MTYPE_CRT800x600 3
-#define MTYPE_TFT320x240 6
-#define MTYPE_STN640x480 7
-
-static struct pxafb_mode_info generic_stn_320x240_mode = {
- .pixclock = 76923,
- .bpp = 8,
- .xres = 320,
- .yres = 240,
- .hsync_len = 3,
- .vsync_len = 2,
- .left_margin = 3,
- .upper_margin = 0,
- .right_margin = 3,
- .lower_margin = 0,
- .sync = (FB_SYNC_HOR_HIGH_ACT |
- FB_SYNC_VERT_HIGH_ACT),
- .cmap_greyscale = 0,
-};
-
-static struct pxafb_mach_info generic_stn_320x240 = {
- .modes = &generic_stn_320x240_mode,
- .num_modes = 1,
- .lcd_conn = LCD_COLOR_STN_8BPP | LCD_PCLK_EDGE_FALL |\
- LCD_AC_BIAS_FREQ(0xff),
- .cmap_inverse = 0,
- .cmap_static = 0,
-};
-
-static struct pxafb_mode_info generic_tft_640x480_mode = {
- .pixclock = 38461,
- .bpp = 8,
- .xres = 640,
- .yres = 480,
- .hsync_len = 60,
- .vsync_len = 2,
- .left_margin = 70,
- .upper_margin = 10,
- .right_margin = 70,
- .lower_margin = 5,
- .sync = 0,
- .cmap_greyscale = 0,
-};
-
-static struct pxafb_mach_info generic_tft_640x480 = {
- .modes = &generic_tft_640x480_mode,
- .num_modes = 1,
- .lcd_conn = LCD_COLOR_TFT_8BPP | LCD_PCLK_EDGE_FALL |\
- LCD_AC_BIAS_FREQ(0xff),
- .cmap_inverse = 0,
- .cmap_static = 0,
-};
-
-static struct pxafb_mode_info generic_crt_640x480_mode = {
- .pixclock = 38461,
- .bpp = 8,
- .xres = 640,
- .yres = 480,
- .hsync_len = 63,
- .vsync_len = 2,
- .left_margin = 81,
- .upper_margin = 33,
- .right_margin = 16,
- .lower_margin = 10,
- .sync = (FB_SYNC_HOR_HIGH_ACT |
- FB_SYNC_VERT_HIGH_ACT),
- .cmap_greyscale = 0,
-};
-
-static struct pxafb_mach_info generic_crt_640x480 = {
- .modes = &generic_crt_640x480_mode,
- .num_modes = 1,
- .lcd_conn = LCD_COLOR_TFT_8BPP | LCD_AC_BIAS_FREQ(0xff),
- .cmap_inverse = 0,
- .cmap_static = 0,
-};
-
-static struct pxafb_mode_info generic_crt_800x600_mode = {
- .pixclock = 28846,
- .bpp = 8,
- .xres = 800,
- .yres = 600,
- .hsync_len = 63,
- .vsync_len = 2,
- .left_margin = 26,
- .upper_margin = 21,
- .right_margin = 26,
- .lower_margin = 11,
- .sync = (FB_SYNC_HOR_HIGH_ACT |
- FB_SYNC_VERT_HIGH_ACT),
- .cmap_greyscale = 0,
-};
-
-static struct pxafb_mach_info generic_crt_800x600 = {
- .modes = &generic_crt_800x600_mode,
- .num_modes = 1,
- .lcd_conn = LCD_COLOR_TFT_8BPP | LCD_AC_BIAS_FREQ(0xff),
- .cmap_inverse = 0,
- .cmap_static = 0,
-};
-
-static struct pxafb_mode_info generic_tft_320x240_mode = {
- .pixclock = 134615,
- .bpp = 16,
- .xres = 320,
- .yres = 240,
- .hsync_len = 63,
- .vsync_len = 7,
- .left_margin = 75,
- .upper_margin = 0,
- .right_margin = 15,
- .lower_margin = 15,
- .sync = 0,
- .cmap_greyscale = 0,
-};
-
-static struct pxafb_mach_info generic_tft_320x240 = {
- .modes = &generic_tft_320x240_mode,
- .num_modes = 1,
- .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_AC_BIAS_FREQ(0xff),
- .cmap_inverse = 0,
- .cmap_static = 0,
-};
-
-static struct pxafb_mode_info generic_stn_640x480_mode = {
- .pixclock = 57692,
- .bpp = 8,
- .xres = 640,
- .yres = 480,
- .hsync_len = 4,
- .vsync_len = 2,
- .left_margin = 10,
- .upper_margin = 5,
- .right_margin = 10,
- .lower_margin = 5,
- .sync = (FB_SYNC_HOR_HIGH_ACT |
- FB_SYNC_VERT_HIGH_ACT),
- .cmap_greyscale = 0,
-};
-
-static struct pxafb_mach_info generic_stn_640x480 = {
- .modes = &generic_stn_640x480_mode,
- .num_modes = 1,
- .lcd_conn = LCD_COLOR_STN_8BPP | LCD_AC_BIAS_FREQ(0xff),
- .cmap_inverse = 0,
- .cmap_static = 0,
-};
-
-static struct pxafb_mach_info *cmx2xx_display = &generic_crt_640x480;
-
-static int __init cmx2xx_set_display(char *str)
-{
- int disp_type = simple_strtol(str, NULL, 0);
- switch (disp_type) {
- case MTYPE_STN320x240:
- cmx2xx_display = &generic_stn_320x240;
- break;
- case MTYPE_TFT640x480:
- cmx2xx_display = &generic_tft_640x480;
- break;
- case MTYPE_CRT640x480:
- cmx2xx_display = &generic_crt_640x480;
- break;
- case MTYPE_CRT800x600:
- cmx2xx_display = &generic_crt_800x600;
- break;
- case MTYPE_TFT320x240:
- cmx2xx_display = &generic_tft_320x240;
- break;
- case MTYPE_STN640x480:
- cmx2xx_display = &generic_stn_640x480;
- break;
- default: /* fallback to CRT 640x480 */
- cmx2xx_display = &generic_crt_640x480;
- break;
- }
- return 1;
-}
-
-/*
- This should be done really early to get proper configuration for
- frame buffer.
- Indeed, pxafb parameters can be used istead, but CM-X2XX bootloader
- has limitied line length for kernel command line, and also it will
- break compatibitlty with proprietary releases already in field.
-*/
-__setup("monitor=", cmx2xx_set_display);
-
-static void __init cmx2xx_init_display(void)
-{
- pxa_set_fb_info(NULL, cmx2xx_display);
-}
-#else
-static inline void cmx2xx_init_display(void) {}
-#endif
-
-#ifdef CONFIG_PM
-static unsigned long sleep_save_msc[10];
-
-static int cmx2xx_suspend(void)
-{
- cmx2xx_pci_suspend();
-
- /* save MSC registers */
- sleep_save_msc[0] = __raw_readl(MSC0);
- sleep_save_msc[1] = __raw_readl(MSC1);
- sleep_save_msc[2] = __raw_readl(MSC2);
-
- /* setup power saving mode registers */
- PCFR = 0x0;
- PSLR = 0xff400000;
- PMCR = 0x00000005;
- PWER = 0x80000000;
- PFER = 0x00000000;
- PRER = 0x00000000;
- PGSR0 = 0xC0018800;
- PGSR1 = 0x004F0002;
- PGSR2 = 0x6021C000;
- PGSR3 = 0x00020000;
-
- return 0;
-}
-
-static void cmx2xx_resume(void)
-{
- cmx2xx_pci_resume();
-
- /* restore MSC registers */
- __raw_writel(sleep_save_msc[0], MSC0);
- __raw_writel(sleep_save_msc[1], MSC1);
- __raw_writel(sleep_save_msc[2], MSC2);
-}
-
-static struct syscore_ops cmx2xx_pm_syscore_ops = {
- .resume = cmx2xx_resume,
- .suspend = cmx2xx_suspend,
-};
-
-static int __init cmx2xx_pm_init(void)
-{
- register_syscore_ops(&cmx2xx_pm_syscore_ops);
-
- return 0;
-}
-#else
-static int __init cmx2xx_pm_init(void) { return 0; }
-#endif
-
-#if defined(CONFIG_SND_PXA2XX_AC97) || defined(CONFIG_SND_PXA2XX_AC97_MODULE)
-static void __init cmx2xx_init_ac97(void)
-{
- pxa_set_ac97_info(NULL);
-}
-#else
-static inline void cmx2xx_init_ac97(void) {}
-#endif
-
-static void __init cmx2xx_init(void)
-{
- pxa_set_ffuart_info(NULL);
- pxa_set_btuart_info(NULL);
- pxa_set_stuart_info(NULL);
-
- cmx2xx_pm_init();
-
- if (cpu_is_pxa25x())
- cmx255_init();
- else
- cmx270_init();
-
- cmx2xx_init_dm9000();
- cmx2xx_init_display();
- cmx2xx_init_ac97();
- cmx2xx_init_touchscreen();
- cmx2xx_init_leds();
-
- regulator_has_full_constraints();
-}
-
-static void __init cmx2xx_init_irq(void)
-{
- if (cpu_is_pxa25x()) {
- pxa25x_init_irq();
- cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
- } else {
- pxa27x_init_irq();
- cmx2xx_pci_init_irq(CMX270_GPIO_IT8152_IRQ);
- }
-}
-
-#ifdef CONFIG_PCI
-/* Map PCI companion statically */
-static struct map_desc cmx2xx_io_desc[] __initdata = {
- [0] = { /* PCI bridge */
- .virtual = (unsigned long)CMX2XX_IT8152_VIRT,
- .pfn = __phys_to_pfn(PXA_CS4_PHYS),
- .length = SZ_64M,
- .type = MT_DEVICE
- },
-};
-
-static void __init cmx2xx_map_io(void)
-{
- if (cpu_is_pxa25x())
- pxa25x_map_io();
-
- if (cpu_is_pxa27x())
- pxa27x_map_io();
-
- iotable_init(cmx2xx_io_desc, ARRAY_SIZE(cmx2xx_io_desc));
-
- it8152_base_address = CMX2XX_IT8152_VIRT;
-}
-#else
-static void __init cmx2xx_map_io(void)
-{
- if (cpu_is_pxa25x())
- pxa25x_map_io();
-
- if (cpu_is_pxa27x())
- pxa27x_map_io();
-}
-#endif
-
-MACHINE_START(ARMCORE, "Compulab CM-X2XX")
- .atag_offset = 0x100,
- .map_io = cmx2xx_map_io,
- .nr_irqs = CMX2XX_NR_IRQS,
- .init_irq = cmx2xx_init_irq,
- /* NOTE: pxa25x_handle_irq() works on PXA27x w/o camera support */
- .handle_irq = pxa25x_handle_irq,
- .init_time = pxa_timer_init,
- .init_machine = cmx2xx_init,
-#ifdef CONFIG_PCI
- .dma_zone_size = SZ_64M,
-#endif
- .restart = pxa_restart,
-MACHINE_END
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
deleted file mode 100644
index d8681a331030..000000000000
--- a/arch/arm/mach-pxa/em-x270.c
+++ /dev/null
@@ -1,1286 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Support for CompuLab EM-X270 platform
- *
- * Copyright (C) 2007, 2008 CompuLab, Ltd.
- * Author: Mike Rapoport <mike@compulab.co.il>
- */
-
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-
-#include <linux/dm9000.h>
-#include <linux/platform_data/rtc-v3020.h>
-#include <linux/mtd/platnand.h>
-#include <linux/mtd/physmap.h>
-#include <linux/input.h>
-#include <linux/gpio_keys.h>
-#include <linux/gpio.h>
-#include <linux/gpio/machine.h>
-#include <linux/mfd/da903x.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/fixed.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/tdo24m.h>
-#include <linux/spi/libertas_spi.h>
-#include <linux/spi/pxa2xx_spi.h>
-#include <linux/power_supply.h>
-#include <linux/apm-emulation.h>
-#include <linux/i2c.h>
-#include <linux/platform_data/pca953x.h>
-#include <linux/platform_data/i2c-pxa.h>
-#include <linux/regulator/userspace-consumer.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "pxa27x.h"
-#include "pxa27x-udc.h"
-#include <mach/audio.h>
-#include <linux/platform_data/video-pxafb.h>
-#include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <linux/platform_data/mmc-pxamci.h>
-#include <linux/platform_data/keypad-pxa27x.h>
-#include <linux/platform_data/media/camera-pxa.h>
-
-#include "generic.h"
-#include "devices.h"
-
-/* EM-X270 specific GPIOs */
-#define GPIO13_MMC_CD (13)
-#define GPIO95_MMC_WP (95)
-#define GPIO56_NAND_RB (56)
-#define GPIO93_CAM_RESET (93)
-#define GPIO16_USB_HUB_RESET (16)
-
-/* eXeda specific GPIOs */
-#define GPIO114_MMC_CD (114)
-#define GPIO20_NAND_RB (20)
-#define GPIO38_SD_PWEN (38)
-#define GPIO37_WLAN_RST (37)
-#define GPIO95_TOUCHPAD_INT (95)
-#define GPIO130_CAM_RESET (130)
-#define GPIO10_USB_HUB_RESET (10)
-
-/* common GPIOs */
-#define GPIO11_NAND_CS (11)
-#define GPIO41_ETHIRQ (41)
-#define EM_X270_ETHIRQ PXA_GPIO_TO_IRQ(GPIO41_ETHIRQ)
-#define GPIO115_WLAN_PWEN (115)
-#define GPIO19_WLAN_STRAP (19)
-#define GPIO9_USB_VBUS_EN (9)
-
-static int mmc_cd;
-static int nand_rb;
-static int dm9000_flags;
-static int cam_reset;
-static int usb_hub_reset;
-
-static unsigned long common_pin_config[] = {
- /* AC'97 */
- GPIO28_AC97_BITCLK,
- GPIO29_AC97_SDATA_IN_0,
- GPIO30_AC97_SDATA_OUT,
- GPIO31_AC97_SYNC,
- GPIO98_AC97_SYSCLK,
- GPIO113_AC97_nRESET,
-
- /* BTUART */
- GPIO42_BTUART_RXD,
- GPIO43_BTUART_TXD,
- GPIO44_BTUART_CTS,
- GPIO45_BTUART_RTS,
-
- /* STUART */
- GPIO46_STUART_RXD,
- GPIO47_STUART_TXD,
-
- /* MCI controller */
- GPIO32_MMC_CLK,
- GPIO112_MMC_CMD,
- GPIO92_MMC_DAT_0,
- GPIO109_MMC_DAT_1,
- GPIO110_MMC_DAT_2,
- GPIO111_MMC_DAT_3,
-
- /* LCD */
- GPIOxx_LCD_TFT_16BPP,
-
- /* QCI */
- GPIO84_CIF_FV,
- GPIO25_CIF_LV,
- GPIO53_CIF_MCLK,
- GPIO54_CIF_PCLK,
- GPIO81_CIF_DD_0,
- GPIO55_CIF_DD_1,
- GPIO51_CIF_DD_2,
- GPIO50_CIF_DD_3,
- GPIO52_CIF_DD_4,
- GPIO48_CIF_DD_5,
- GPIO17_CIF_DD_6,
- GPIO12_CIF_DD_7,
-
- /* I2C */
- GPIO117_I2C_SCL,
- GPIO118_I2C_SDA,
-
- /* Keypad */
- GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
- GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH,
- GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH,
- GPIO34_KP_MKIN_3 | WAKEUP_ON_LEVEL_HIGH,
- GPIO39_KP_MKIN_4 | WAKEUP_ON_LEVEL_HIGH,
- GPIO99_KP_MKIN_5 | WAKEUP_ON_LEVEL_HIGH,
- GPIO91_KP_MKIN_6 | WAKEUP_ON_LEVEL_HIGH,
- GPIO36_KP_MKIN_7 | WAKEUP_ON_LEVEL_HIGH,
- GPIO103_KP_MKOUT_0,
- GPIO104_KP_MKOUT_1,
- GPIO105_KP_MKOUT_2,
- GPIO106_KP_MKOUT_3,
- GPIO107_KP_MKOUT_4,
- GPIO108_KP_MKOUT_5,
- GPIO96_KP_MKOUT_6,
- GPIO22_KP_MKOUT_7,
-
- /* SSP1 */
- GPIO26_SSP1_RXD,
- GPIO23_SSP1_SCLK,
- GPIO24_SSP1_SFRM,
- GPIO57_SSP1_TXD,
-
- /* SSP2 */
- GPIO19_GPIO, /* SSP2 clock is used as GPIO for Libertas pin-strap */
- GPIO14_GPIO,
- GPIO89_SSP2_TXD,
- GPIO88_SSP2_RXD,
-
- /* SDRAM and local bus */
- GPIO15_nCS_1,
- GPIO78_nCS_2,
- GPIO79_nCS_3,
- GPIO80_nCS_4,
- GPIO49_nPWE,
- GPIO18_RDY,
-
- /* GPIO */
- GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, /* sleep/resume button */
-
- /* power controls */
- GPIO20_GPIO | MFP_LPM_DRIVE_LOW, /* GPRS_PWEN */
- GPIO115_GPIO | MFP_LPM_DRIVE_LOW, /* WLAN_PWEN */
-
- /* NAND controls */
- GPIO11_GPIO | MFP_LPM_DRIVE_HIGH, /* NAND CE# */
-
- /* interrupts */
- GPIO41_GPIO, /* DM9000 interrupt */
-};
-
-static unsigned long em_x270_pin_config[] = {
- GPIO13_GPIO, /* MMC card detect */
- GPIO16_GPIO, /* USB hub reset */
- GPIO56_GPIO, /* NAND Ready/Busy */
- GPIO93_GPIO | MFP_LPM_DRIVE_LOW, /* Camera reset */
- GPIO95_GPIO, /* MMC Write protect */
-};
-
-static unsigned long exeda_pin_config[] = {
- GPIO10_GPIO, /* USB hub reset */
- GPIO20_GPIO, /* NAND Ready/Busy */
- GPIO38_GPIO | MFP_LPM_DRIVE_LOW, /* SD slot power */
- GPIO95_GPIO, /* touchpad IRQ */
- GPIO114_GPIO, /* MMC card detect */
-};
-
-#if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE)
-static struct resource em_x270_dm9000_resource[] = {
- [0] = {
- .start = PXA_CS2_PHYS,
- .end = PXA_CS2_PHYS + 3,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = PXA_CS2_PHYS + 8,
- .end = PXA_CS2_PHYS + 8 + 0x3f,
- .flags = IORESOURCE_MEM,
- },
- [2] = {
- .start = EM_X270_ETHIRQ,
- .end = EM_X270_ETHIRQ,
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
- }
-};
-
-static struct dm9000_plat_data em_x270_dm9000_platdata = {
- .flags = DM9000_PLATF_NO_EEPROM,
-};
-
-static struct platform_device em_x270_dm9000 = {
- .name = "dm9000",
- .id = 0,
- .num_resources = ARRAY_SIZE(em_x270_dm9000_resource),
- .resource = em_x270_dm9000_resource,
- .dev = {
- .platform_data = &em_x270_dm9000_platdata,
- }
-};
-
-static void __init em_x270_init_dm9000(void)
-{
- em_x270_dm9000_platdata.flags |= dm9000_flags;
- platform_device_register(&em_x270_dm9000);
-}
-#else
-static inline void em_x270_init_dm9000(void) {}
-#endif
-
-/* V3020 RTC */
-#if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE)
-static struct resource em_x270_v3020_resource[] = {
- [0] = {
- .start = PXA_CS4_PHYS,
- .end = PXA_CS4_PHYS + 3,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct v3020_platform_data em_x270_v3020_platdata = {
- .leftshift = 0,
-};
-
-static struct platform_device em_x270_rtc = {
- .name = "v3020",
- .num_resources = ARRAY_SIZE(em_x270_v3020_resource),
- .resource = em_x270_v3020_resource,
- .id = -1,
- .dev = {
- .platform_data = &em_x270_v3020_platdata,
- }
-};
-
-static void __init em_x270_init_rtc(void)
-{
- platform_device_register(&em_x270_rtc);
-}
-#else
-static inline void em_x270_init_rtc(void) {}
-#endif
-
-/* NAND flash */
-#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
-static inline void nand_cs_on(void)
-{
- gpio_set_value(GPIO11_NAND_CS, 0);
-}
-
-static void nand_cs_off(void)
-{
- dsb();
-
- gpio_set_value(GPIO11_NAND_CS, 1);
-}
-
-/* hardware specific access to control-lines */
-static void em_x270_nand_cmd_ctl(struct nand_chip *this, int dat,
- unsigned int ctrl)
-{
- unsigned long nandaddr = (unsigned long)this->legacy.IO_ADDR_W;
-
- dsb();
-
- if (ctrl & NAND_CTRL_CHANGE) {
- if (ctrl & NAND_ALE)
- nandaddr |= (1 << 3);
- else
- nandaddr &= ~(1 << 3);
- if (ctrl & NAND_CLE)
- nandaddr |= (1 << 2);
- else
- nandaddr &= ~(1 << 2);
- if (ctrl & NAND_NCE)
- nand_cs_on();
- else
- nand_cs_off();
- }
-
- dsb();
- this->legacy.IO_ADDR_W = (void __iomem *)nandaddr;
- if (dat != NAND_CMD_NONE)
- writel(dat, this->legacy.IO_ADDR_W);
-
- dsb();
-}
-
-/* read device ready pin */
-static int em_x270_nand_device_ready(struct nand_chip *this)
-{
- dsb();
-
- return gpio_get_value(nand_rb);
-}
-
-static struct mtd_partition em_x270_partition_info[] = {
- [0] = {
- .name = "em_x270-0",
- .offset = 0,
- .size = SZ_4M,
- },
- [1] = {
- .name = "em_x270-1",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL
- },
-};
-
-struct platform_nand_data em_x270_nand_platdata = {
- .chip = {
- .nr_chips = 1,
- .chip_offset = 0,
- .nr_partitions = ARRAY_SIZE(em_x270_partition_info),
- .partitions = em_x270_partition_info,
- .chip_delay = 20,
- },
- .ctrl = {
- .dev_ready = em_x270_nand_device_ready,
- .select_chip = 0,
- .cmd_ctrl = em_x270_nand_cmd_ctl,
- },
-};
-
-static struct resource em_x270_nand_resource[] = {
- [0] = {
- .start = PXA_CS1_PHYS,
- .end = PXA_CS1_PHYS + 12,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device em_x270_nand = {
- .name = "gen_nand",
- .num_resources = ARRAY_SIZE(em_x270_nand_resource),
- .resource = em_x270_nand_resource,
- .id = -1,
- .dev = {
- .platform_data = &em_x270_nand_platdata,
- }
-};
-
-static void __init em_x270_init_nand(void)
-{
- int err;
-
- err = gpio_request(GPIO11_NAND_CS, "NAND CS");
- if (err) {
- pr_warn("EM-X270: failed to request NAND CS gpio\n");
- return;
- }
-
- gpio_direction_output(GPIO11_NAND_CS, 1);
-
- err = gpio_request(nand_rb, "NAND R/B");
- if (err) {
- pr_warn("EM-X270: failed to request NAND R/B gpio\n");
- gpio_free(GPIO11_NAND_CS);
- return;
- }
-
- gpio_direction_input(nand_rb);
-
- platform_device_register(&em_x270_nand);
-}
-#else
-static inline void em_x270_init_nand(void) {}
-#endif
-
-#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
-static struct mtd_partition em_x270_nor_parts[] = {
- {
- .name = "Bootloader",
- .offset = 0x00000000,
- .size = 0x00050000,
- .mask_flags = MTD_WRITEABLE /* force read-only */
- }, {
- .name = "Environment",
- .offset = 0x00050000,
- .size = 0x00010000,
- }, {
- .name = "Reserved",
- .offset = 0x00060000,
- .size = 0x00050000,
- .mask_flags = MTD_WRITEABLE /* force read-only */
- }, {
- .name = "Splashscreen",
- .offset = 0x000b0000,
- .size = 0x00050000,
- }
-};
-
-static struct physmap_flash_data em_x270_nor_data[] = {
- [0] = {
- .width = 2,
- .parts = em_x270_nor_parts,
- .nr_parts = ARRAY_SIZE(em_x270_nor_parts),
- },
-};
-
-static struct resource em_x270_nor_flash_resource = {
- .start = PXA_CS0_PHYS,
- .end = PXA_CS0_PHYS + SZ_1M - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device em_x270_physmap_flash = {
- .name = "physmap-flash",
- .id = 0,
- .num_resources = 1,
- .resource = &em_x270_nor_flash_resource,
- .dev = {
- .platform_data = &em_x270_nor_data,
- },
-};
-
-static void __init em_x270_init_nor(void)
-{
- platform_device_register(&em_x270_physmap_flash);
-}
-#else
-static inline void em_x270_init_nor(void) {}
-#endif
-
-/* PXA27x OHCI controller setup */
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
-static struct regulator *em_x270_usb_ldo;
-
-static int em_x270_usb_hub_init(void)
-{
- int err;
-
- em_x270_usb_ldo = regulator_get(NULL, "vcc usb");
- if (IS_ERR(em_x270_usb_ldo))
- return PTR_ERR(em_x270_usb_ldo);
-
- err = gpio_request(GPIO9_USB_VBUS_EN, "vbus en");
- if (err)
- goto err_free_usb_ldo;
-
- err = gpio_request(usb_hub_reset, "hub rst");
- if (err)
- goto err_free_vbus_gpio;
-
- /* USB Hub power-on and reset */
- gpio_direction_output(usb_hub_reset, 1);
- gpio_direction_output(GPIO9_USB_VBUS_EN, 0);
- err = regulator_enable(em_x270_usb_ldo);
- if (err)
- goto err_free_rst_gpio;
-
- gpio_set_value(usb_hub_reset, 0);
- gpio_set_value(usb_hub_reset, 1);
- regulator_disable(em_x270_usb_ldo);
- err = regulator_enable(em_x270_usb_ldo);
- if (err)
- goto err_free_rst_gpio;
-
- gpio_set_value(usb_hub_reset, 0);
- gpio_set_value(GPIO9_USB_VBUS_EN, 1);
-
- return 0;
-
-err_free_rst_gpio:
- gpio_free(usb_hub_reset);
-err_free_vbus_gpio:
- gpio_free(GPIO9_USB_VBUS_EN);
-err_free_usb_ldo:
- regulator_put(em_x270_usb_ldo);
-
- return err;
-}
-
-static int em_x270_ohci_init(struct device *dev)
-{
- int err;
-
- /* we don't want to entirely disable USB if the HUB init failed */
- err = em_x270_usb_hub_init();
- if (err)
- pr_err("USB Hub initialization failed: %d\n", err);
-
- /* enable port 2 transiever */
- UP2OCR = UP2OCR_HXS | UP2OCR_HXOE;
-
- return 0;
-}
-
-static void em_x270_ohci_exit(struct device *dev)
-{
- gpio_free(usb_hub_reset);
- gpio_free(GPIO9_USB_VBUS_EN);
-
- if (!IS_ERR(em_x270_usb_ldo)) {
- if (regulator_is_enabled(em_x270_usb_ldo))
- regulator_disable(em_x270_usb_ldo);
-
- regulator_put(em_x270_usb_ldo);
- }
-}
-
-static struct pxaohci_platform_data em_x270_ohci_platform_data = {
- .port_mode = PMM_PERPORT_MODE,
- .flags = ENABLE_PORT1 | ENABLE_PORT2 | POWER_CONTROL_LOW,
- .init = em_x270_ohci_init,
- .exit = em_x270_ohci_exit,
-};
-
-static void __init em_x270_init_ohci(void)
-{
- pxa_set_ohci_info(&em_x270_ohci_platform_data);
-}
-#else
-static inline void em_x270_init_ohci(void) {}
-#endif
-
-/* MCI controller setup */
-#if defined(CONFIG_MMC) || defined(CONFIG_MMC_MODULE)
-static struct regulator *em_x270_sdio_ldo;
-
-static struct gpiod_lookup_table em_x270_mci_wp_gpio_table = {
- .dev_id = "pxa2xx-mci.0",
- .table = {
- /* Write protect on GPIO 95 */
- GPIO_LOOKUP("gpio-pxa", GPIO95_MMC_WP, "wp", GPIO_ACTIVE_LOW),
- { },
- },
-};
-
-static int em_x270_mci_init(struct device *dev,
- irq_handler_t em_x270_detect_int,
- void *data)
-{
- int err;
-
- em_x270_sdio_ldo = regulator_get(dev, "vcc sdio");
- if (IS_ERR(em_x270_sdio_ldo)) {
- dev_err(dev, "can't request SDIO power supply: %ld\n",
- PTR_ERR(em_x270_sdio_ldo));
- return PTR_ERR(em_x270_sdio_ldo);
- }
-
- err = request_irq(gpio_to_irq(mmc_cd), em_x270_detect_int,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "MMC card detect", data);
- if (err) {
- dev_err(dev, "can't request MMC card detect IRQ: %d\n", err);
- goto err_irq;
- }
-
- if (!machine_is_em_x270()) {
- err = gpio_request(GPIO38_SD_PWEN, "sdio power");
- if (err) {
- dev_err(dev, "can't request MMC power control : %d\n",
- err);
- goto err_gpio_wp;
- }
- gpio_direction_output(GPIO38_SD_PWEN, 1);
- }
-
- return 0;
-
-err_gpio_wp:
- free_irq(gpio_to_irq(mmc_cd), data);
-err_irq:
- regulator_put(em_x270_sdio_ldo);
-
- return err;
-}
-
-static int em_x270_mci_setpower(struct device *dev, unsigned int vdd)
-{
- struct pxamci_platform_data* p_d = dev->platform_data;
-
- if ((1 << vdd) & p_d->ocr_mask) {
- int vdd_uV = (2000 + (vdd - __ffs(MMC_VDD_20_21)) * 100) * 1000;
-
- regulator_set_voltage(em_x270_sdio_ldo, vdd_uV, vdd_uV);
- return regulator_enable(em_x270_sdio_ldo);
- } else {
- regulator_disable(em_x270_sdio_ldo);
- }
- return 0;
-}
-
-static void em_x270_mci_exit(struct device *dev, void *data)
-{
- free_irq(gpio_to_irq(mmc_cd), data);
- regulator_put(em_x270_sdio_ldo);
-
- if (!machine_is_em_x270())
- gpio_free(GPIO38_SD_PWEN);
-}
-
-static struct pxamci_platform_data em_x270_mci_platform_data = {
- .detect_delay_ms = 250,
- .ocr_mask = MMC_VDD_20_21|MMC_VDD_21_22|MMC_VDD_22_23|
- MMC_VDD_24_25|MMC_VDD_25_26|MMC_VDD_26_27|
- MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30|
- MMC_VDD_30_31|MMC_VDD_31_32,
- .init = em_x270_mci_init,
- .setpower = em_x270_mci_setpower,
- .exit = em_x270_mci_exit,
-};
-
-static void __init em_x270_init_mmc(void)
-{
- if (machine_is_em_x270())
- gpiod_add_lookup_table(&em_x270_mci_wp_gpio_table);
-
- pxa_set_mci_info(&em_x270_mci_platform_data);
-}
-#else
-static inline void em_x270_init_mmc(void) {}
-#endif
-
-/* LCD */
-#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
-static struct pxafb_mode_info em_x270_lcd_modes[] = {
- [0] = {
- .pixclock = 38250,
- .bpp = 16,
- .xres = 480,
- .yres = 640,
- .hsync_len = 8,
- .vsync_len = 2,
- .left_margin = 8,
- .upper_margin = 2,
- .right_margin = 24,
- .lower_margin = 4,
- .sync = 0,
- },
- [1] = {
- .pixclock = 153800,
- .bpp = 16,
- .xres = 240,
- .yres = 320,
- .hsync_len = 8,
- .vsync_len = 2,
- .left_margin = 8,
- .upper_margin = 2,
- .right_margin = 88,
- .lower_margin = 2,
- .sync = 0,
- },
-};
-
-static struct pxafb_mach_info em_x270_lcd = {
- .modes = em_x270_lcd_modes,
- .num_modes = 2,
- .lcd_conn = LCD_COLOR_TFT_16BPP,
-};
-
-static void __init em_x270_init_lcd(void)
-{
- pxa_set_fb_info(NULL, &em_x270_lcd);
-}
-#else
-static inline void em_x270_init_lcd(void) {}
-#endif
-
-#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
-static struct pxa2xx_spi_controller em_x270_spi_info = {
- .num_chipselect = 1,
-};
-
-static struct pxa2xx_spi_chip em_x270_tdo24m_chip = {
- .rx_threshold = 1,
- .tx_threshold = 1,
- .gpio_cs = -1,
-};
-
-static struct tdo24m_platform_data em_x270_tdo24m_pdata = {
- .model = TDO35S,
-};
-
-static struct pxa2xx_spi_controller em_x270_spi_2_info = {
- .num_chipselect = 1,
- .enable_dma = 1,
-};
-
-static struct pxa2xx_spi_chip em_x270_libertas_chip = {
- .rx_threshold = 1,
- .tx_threshold = 1,
- .timeout = 1000,
- .gpio_cs = 14,
-};
-
-static unsigned long em_x270_libertas_pin_config[] = {
- /* SSP2 */
- GPIO19_SSP2_SCLK,
- GPIO14_GPIO,
- GPIO89_SSP2_TXD,
- GPIO88_SSP2_RXD,
-};
-
-static int em_x270_libertas_setup(struct spi_device *spi)
-{
- int err = gpio_request(GPIO115_WLAN_PWEN, "WLAN PWEN");
- if (err)
- return err;
-
- err = gpio_request(GPIO19_WLAN_STRAP, "WLAN STRAP");
- if (err)
- goto err_free_pwen;
-
- if (machine_is_exeda()) {
- err = gpio_request(GPIO37_WLAN_RST, "WLAN RST");
- if (err)
- goto err_free_strap;
-
- gpio_direction_output(GPIO37_WLAN_RST, 1);
- msleep(100);
- }
-
- gpio_direction_output(GPIO19_WLAN_STRAP, 1);
- msleep(100);
-
- pxa2xx_mfp_config(ARRAY_AND_SIZE(em_x270_libertas_pin_config));
-
- gpio_direction_output(GPIO115_WLAN_PWEN, 0);
- msleep(100);
- gpio_set_value(GPIO115_WLAN_PWEN, 1);
- msleep(100);
-
- spi->bits_per_word = 16;
- spi_setup(spi);
-
- return 0;
-
-err_free_strap:
- gpio_free(GPIO19_WLAN_STRAP);
-err_free_pwen:
- gpio_free(GPIO115_WLAN_PWEN);
-
- return err;
-}
-
-static int em_x270_libertas_teardown(struct spi_device *spi)
-{
- gpio_set_value(GPIO115_WLAN_PWEN, 0);
- gpio_free(GPIO115_WLAN_PWEN);
- gpio_free(GPIO19_WLAN_STRAP);
-
- if (machine_is_exeda()) {
- gpio_set_value(GPIO37_WLAN_RST, 0);
- gpio_free(GPIO37_WLAN_RST);
- }
-
- return 0;
-}
-
-struct libertas_spi_platform_data em_x270_libertas_pdata = {
- .use_dummy_writes = 1,
- .setup = em_x270_libertas_setup,
- .teardown = em_x270_libertas_teardown,
-};
-
-static struct spi_board_info em_x270_spi_devices[] __initdata = {
- {
- .modalias = "tdo24m",
- .max_speed_hz = 1000000,
- .bus_num = 1,
- .chip_select = 0,
- .controller_data = &em_x270_tdo24m_chip,
- .platform_data = &em_x270_tdo24m_pdata,
- },
- {
- .modalias = "libertas_spi",
- .max_speed_hz = 13000000,
- .bus_num = 2,
- .irq = PXA_GPIO_TO_IRQ(116),
- .chip_select = 0,
- .controller_data = &em_x270_libertas_chip,
- .platform_data = &em_x270_libertas_pdata,
- },
-};
-
-static void __init em_x270_init_spi(void)
-{
- pxa2xx_set_spi_info(1, &em_x270_spi_info);
- pxa2xx_set_spi_info(2, &em_x270_spi_2_info);
- spi_register_board_info(ARRAY_AND_SIZE(em_x270_spi_devices));
-}
-#else
-static inline void em_x270_init_spi(void) {}
-#endif
-
-#if defined(CONFIG_SND_PXA2XX_LIB_AC97)
-static pxa2xx_audio_ops_t em_x270_ac97_info = {
- .reset_gpio = 113,
-};
-
-static void __init em_x270_init_ac97(void)
-{
- pxa_set_ac97_info(&em_x270_ac97_info);
-}
-#else
-static inline void em_x270_init_ac97(void) {}
-#endif
-
-#if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE)
-static const unsigned int em_x270_module_matrix_keys[] = {
- KEY(0, 0, KEY_A), KEY(1, 0, KEY_UP), KEY(2, 1, KEY_B),
- KEY(0, 2, KEY_LEFT), KEY(1, 1, KEY_ENTER), KEY(2, 0, KEY_RIGHT),
- KEY(0, 1, KEY_C), KEY(1, 2, KEY_DOWN), KEY(2, 2, KEY_D),
-};
-
-static struct matrix_keymap_data em_x270_matrix_keymap_data = {
- .keymap = em_x270_module_matrix_keys,
- .keymap_size = ARRAY_SIZE(em_x270_module_matrix_keys),
-};
-
-struct pxa27x_keypad_platform_data em_x270_module_keypad_info = {
- /* code map for the matrix keys */
- .matrix_key_rows = 3,
- .matrix_key_cols = 3,
- .matrix_keymap_data = &em_x270_matrix_keymap_data,
-};
-
-static const unsigned int em_x270_exeda_matrix_keys[] = {
- KEY(0, 0, KEY_RIGHTSHIFT), KEY(0, 1, KEY_RIGHTCTRL),
- KEY(0, 2, KEY_RIGHTALT), KEY(0, 3, KEY_SPACE),
- KEY(0, 4, KEY_LEFTALT), KEY(0, 5, KEY_LEFTCTRL),
- KEY(0, 6, KEY_ENTER), KEY(0, 7, KEY_SLASH),
-
- KEY(1, 0, KEY_DOT), KEY(1, 1, KEY_M),
- KEY(1, 2, KEY_N), KEY(1, 3, KEY_B),
- KEY(1, 4, KEY_V), KEY(1, 5, KEY_C),
- KEY(1, 6, KEY_X), KEY(1, 7, KEY_Z),
-
- KEY(2, 0, KEY_LEFTSHIFT), KEY(2, 1, KEY_SEMICOLON),
- KEY(2, 2, KEY_L), KEY(2, 3, KEY_K),
- KEY(2, 4, KEY_J), KEY(2, 5, KEY_H),
- KEY(2, 6, KEY_G), KEY(2, 7, KEY_F),
-
- KEY(3, 0, KEY_D), KEY(3, 1, KEY_S),
- KEY(3, 2, KEY_A), KEY(3, 3, KEY_TAB),
- KEY(3, 4, KEY_BACKSPACE), KEY(3, 5, KEY_P),
- KEY(3, 6, KEY_O), KEY(3, 7, KEY_I),
-
- KEY(4, 0, KEY_U), KEY(4, 1, KEY_Y),
- KEY(4, 2, KEY_T), KEY(4, 3, KEY_R),
- KEY(4, 4, KEY_E), KEY(4, 5, KEY_W),
- KEY(4, 6, KEY_Q), KEY(4, 7, KEY_MINUS),
-
- KEY(5, 0, KEY_0), KEY(5, 1, KEY_9),
- KEY(5, 2, KEY_8), KEY(5, 3, KEY_7),
- KEY(5, 4, KEY_6), KEY(5, 5, KEY_5),
- KEY(5, 6, KEY_4), KEY(5, 7, KEY_3),
-
- KEY(6, 0, KEY_2), KEY(6, 1, KEY_1),
- KEY(6, 2, KEY_ENTER), KEY(6, 3, KEY_END),
- KEY(6, 4, KEY_DOWN), KEY(6, 5, KEY_UP),
- KEY(6, 6, KEY_MENU), KEY(6, 7, KEY_F1),
-
- KEY(7, 0, KEY_LEFT), KEY(7, 1, KEY_RIGHT),
- KEY(7, 2, KEY_BACK), KEY(7, 3, KEY_HOME),
- KEY(7, 4, 0), KEY(7, 5, 0),
- KEY(7, 6, 0), KEY(7, 7, 0),
-};
-
-static struct matrix_keymap_data em_x270_exeda_matrix_keymap_data = {
- .keymap = em_x270_exeda_matrix_keys,
- .keymap_size = ARRAY_SIZE(em_x270_exeda_matrix_keys),
-};
-
-struct pxa27x_keypad_platform_data em_x270_exeda_keypad_info = {
- /* code map for the matrix keys */
- .matrix_key_rows = 8,
- .matrix_key_cols = 8,
- .matrix_keymap_data = &em_x270_exeda_matrix_keymap_data,
-};
-
-static void __init em_x270_init_keypad(void)
-{
- if (machine_is_em_x270())
- pxa_set_keypad_info(&em_x270_module_keypad_info);
- else
- pxa_set_keypad_info(&em_x270_exeda_keypad_info);
-}
-#else
-static inline void em_x270_init_keypad(void) {}
-#endif
-
-#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
-static struct gpio_keys_button gpio_keys_button[] = {
- [0] = {
- .desc = "sleep/wakeup",
- .code = KEY_SUSPEND,
- .type = EV_PWR,
- .gpio = 1,
- .wakeup = 1,
- },
-};
-
-static struct gpio_keys_platform_data em_x270_gpio_keys_data = {
- .buttons = gpio_keys_button,
- .nbuttons = 1,
-};
-
-static struct platform_device em_x270_gpio_keys = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &em_x270_gpio_keys_data,
- },
-};
-
-static void __init em_x270_init_gpio_keys(void)
-{
- platform_device_register(&em_x270_gpio_keys);
-}
-#else
-static inline void em_x270_init_gpio_keys(void) {}
-#endif
-
-/* Quick Capture Interface and sensor setup */
-#if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE)
-static int em_x270_sensor_init(void)
-{
- int ret;
-
- ret = gpio_request(cam_reset, "camera reset");
- if (ret)
- return ret;
-
- gpio_direction_output(cam_reset, 0);
- gpio_set_value(cam_reset, 1);
-
- return 0;
-}
-
-static struct regulator_consumer_supply camera_dummy_supplies[] = {
- REGULATOR_SUPPLY("vdd", "0-005d"),
-};
-
-static struct regulator_init_data camera_dummy_initdata = {
- .consumer_supplies = camera_dummy_supplies,
- .num_consumer_supplies = ARRAY_SIZE(camera_dummy_supplies),
- .constraints = {
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct fixed_voltage_config camera_dummy_config = {
- .supply_name = "camera_vdd",
- .input_supply = "vcc cam",
- .microvolts = 2800000,
- .init_data = &camera_dummy_initdata,
-};
-
-static struct platform_device camera_supply_dummy_device = {
- .name = "reg-fixed-voltage",
- .id = 1,
- .dev = {
- .platform_data = &camera_dummy_config,
- },
-};
-
-struct pxacamera_platform_data em_x270_camera_platform_data = {
- .flags = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 |
- PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN,
- .mclk_10khz = 2600,
- .sensor_i2c_adapter_id = 0,
- .sensor_i2c_address = 0x5d,
-};
-
-static void __init em_x270_init_camera(void)
-{
- if (em_x270_sensor_init() == 0)
- pxa_set_camera_info(&em_x270_camera_platform_data);
- platform_device_register(&camera_supply_dummy_device);
-}
-#else
-static inline void em_x270_init_camera(void) {}
-#endif
-
-static struct regulator_bulk_data em_x270_gps_consumer_supply = {
- .supply = "vcc gps",
-};
-
-static struct regulator_userspace_consumer_data em_x270_gps_consumer_data = {
- .name = "vcc gps",
- .num_supplies = 1,
- .supplies = &em_x270_gps_consumer_supply,
-};
-
-static struct platform_device em_x270_gps_userspace_consumer = {
- .name = "reg-userspace-consumer",
- .id = 0,
- .dev = {
- .platform_data = &em_x270_gps_consumer_data,
- },
-};
-
-static struct regulator_bulk_data em_x270_gprs_consumer_supply = {
- .supply = "vcc gprs",
-};
-
-static struct regulator_userspace_consumer_data em_x270_gprs_consumer_data = {
- .name = "vcc gprs",
- .num_supplies = 1,
- .supplies = &em_x270_gprs_consumer_supply
-};
-
-static struct platform_device em_x270_gprs_userspace_consumer = {
- .name = "reg-userspace-consumer",
- .id = 1,
- .dev = {
- .platform_data = &em_x270_gprs_consumer_data,
- }
-};
-
-static struct platform_device *em_x270_userspace_consumers[] = {
- &em_x270_gps_userspace_consumer,
- &em_x270_gprs_userspace_consumer,
-};
-
-static void __init em_x270_userspace_consumers_init(void)
-{
- platform_add_devices(ARRAY_AND_SIZE(em_x270_userspace_consumers));
-}
-
-/* DA9030 related initializations */
-#define REGULATOR_CONSUMER(_name, _dev_name, _supply) \
- static struct regulator_consumer_supply _name##_consumers[] = { \
- REGULATOR_SUPPLY(_supply, _dev_name), \
- }
-
-REGULATOR_CONSUMER(ldo3, "reg-userspace-consumer.0", "vcc gps");
-REGULATOR_CONSUMER(ldo5, NULL, "vcc cam");
-REGULATOR_CONSUMER(ldo10, "pxa2xx-mci", "vcc sdio");
-REGULATOR_CONSUMER(ldo12, NULL, "vcc usb");
-REGULATOR_CONSUMER(ldo19, "reg-userspace-consumer.1", "vcc gprs");
-REGULATOR_CONSUMER(buck2, NULL, "vcc_core");
-
-#define REGULATOR_INIT(_ldo, _min_uV, _max_uV, _ops_mask) \
- static struct regulator_init_data _ldo##_data = { \
- .constraints = { \
- .min_uV = _min_uV, \
- .max_uV = _max_uV, \
- .state_mem = { \
- .enabled = 0, \
- }, \
- .valid_ops_mask = _ops_mask, \
- .apply_uV = 1, \
- }, \
- .num_consumer_supplies = ARRAY_SIZE(_ldo##_consumers), \
- .consumer_supplies = _ldo##_consumers, \
- };
-
-REGULATOR_INIT(ldo3, 3200000, 3200000, REGULATOR_CHANGE_STATUS);
-REGULATOR_INIT(ldo5, 3000000, 3000000, REGULATOR_CHANGE_STATUS);
-REGULATOR_INIT(ldo10, 2000000, 3200000,
- REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_VOLTAGE);
-REGULATOR_INIT(ldo12, 3000000, 3000000, REGULATOR_CHANGE_STATUS);
-REGULATOR_INIT(ldo19, 3200000, 3200000, REGULATOR_CHANGE_STATUS);
-REGULATOR_INIT(buck2, 1000000, 1650000, REGULATOR_CHANGE_VOLTAGE);
-
-struct led_info em_x270_led_info = {
- .name = "em-x270:orange",
- .default_trigger = "battery-charging-or-full",
-};
-
-struct power_supply_info em_x270_psy_info = {
- .name = "battery",
- .technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
- .voltage_max_design = 4200000,
- .voltage_min_design = 3000000,
- .use_for_apm = 1,
-};
-
-static void em_x270_battery_low(void)
-{
-#if defined(CONFIG_APM_EMULATION)
- apm_queue_event(APM_LOW_BATTERY);
-#endif
-}
-
-static void em_x270_battery_critical(void)
-{
-#if defined(CONFIG_APM_EMULATION)
- apm_queue_event(APM_CRITICAL_SUSPEND);
-#endif
-}
-
-struct da9030_battery_info em_x270_batterty_info = {
- .battery_info = &em_x270_psy_info,
-
- .charge_milliamp = 1000,
- .charge_millivolt = 4200,
-
- .vbat_low = 3600,
- .vbat_crit = 3400,
- .vbat_charge_start = 4100,
- .vbat_charge_stop = 4200,
- .vbat_charge_restart = 4000,
-
- .vcharge_min = 3200,
- .vcharge_max = 5500,
-
- .tbat_low = 197,
- .tbat_high = 78,
- .tbat_restart = 100,
-
- .batmon_interval = 0,
-
- .battery_low = em_x270_battery_low,
- .battery_critical = em_x270_battery_critical,
-};
-
-#define DA9030_SUBDEV(_name, _id, _pdata) \
- { \
- .name = "da903x-" #_name, \
- .id = DA9030_ID_##_id, \
- .platform_data = _pdata, \
- }
-
-#define DA9030_LDO(num) DA9030_SUBDEV(regulator, LDO##num, &ldo##num##_data)
-
-struct da903x_subdev_info em_x270_da9030_subdevs[] = {
- DA9030_LDO(3),
- DA9030_LDO(5),
- DA9030_LDO(10),
- DA9030_LDO(12),
- DA9030_LDO(19),
-
- DA9030_SUBDEV(regulator, BUCK2, &buck2_data),
-
- DA9030_SUBDEV(led, LED_PC, &em_x270_led_info),
- DA9030_SUBDEV(backlight, WLED, &em_x270_led_info),
- DA9030_SUBDEV(battery, BAT, &em_x270_batterty_info),
-};
-
-static struct da903x_platform_data em_x270_da9030_info = {
- .num_subdevs = ARRAY_SIZE(em_x270_da9030_subdevs),
- .subdevs = em_x270_da9030_subdevs,
-};
-
-static struct i2c_board_info em_x270_i2c_pmic_info = {
- I2C_BOARD_INFO("da9030", 0x49),
- .irq = PXA_GPIO_TO_IRQ(0),
- .platform_data = &em_x270_da9030_info,
-};
-
-static struct i2c_pxa_platform_data em_x270_pwr_i2c_info = {
- .use_pio = 1,
-};
-
-static void __init em_x270_init_da9030(void)
-{
- pxa27x_set_i2c_power_info(&em_x270_pwr_i2c_info);
- i2c_register_board_info(1, &em_x270_i2c_pmic_info, 1);
-}
-
-static struct pca953x_platform_data exeda_gpio_ext_pdata = {
- .gpio_base = 128,
-};
-
-static struct i2c_board_info exeda_i2c_info[] = {
- {
- I2C_BOARD_INFO("pca9555", 0x21),
- .platform_data = &exeda_gpio_ext_pdata,
- },
-};
-
-static struct i2c_pxa_platform_data em_x270_i2c_info = {
- .fast_mode = 1,
-};
-
-static void __init em_x270_init_i2c(void)
-{
- pxa_set_i2c_info(&em_x270_i2c_info);
-
- if (machine_is_exeda())
- i2c_register_board_info(0, ARRAY_AND_SIZE(exeda_i2c_info));
-}
-
-static void __init em_x270_module_init(void)
-{
- pxa2xx_mfp_config(ARRAY_AND_SIZE(em_x270_pin_config));
-
- mmc_cd = GPIO13_MMC_CD;
- nand_rb = GPIO56_NAND_RB;
- dm9000_flags = DM9000_PLATF_32BITONLY;
- cam_reset = GPIO93_CAM_RESET;
- usb_hub_reset = GPIO16_USB_HUB_RESET;
-}
-
-static void __init em_x270_exeda_init(void)
-{
- pxa2xx_mfp_config(ARRAY_AND_SIZE(exeda_pin_config));
-
- mmc_cd = GPIO114_MMC_CD;
- nand_rb = GPIO20_NAND_RB;
- dm9000_flags = DM9000_PLATF_16BITONLY;
- cam_reset = GPIO130_CAM_RESET;
- usb_hub_reset = GPIO10_USB_HUB_RESET;
-}
-
-static void __init em_x270_init(void)
-{
- pxa2xx_mfp_config(ARRAY_AND_SIZE(common_pin_config));
-
- pxa_set_ffuart_info(NULL);
- pxa_set_btuart_info(NULL);
- pxa_set_stuart_info(NULL);
-
-#ifdef CONFIG_PM
- pxa27x_set_pwrmode(PWRMODE_DEEPSLEEP);
-#endif
-
- if (machine_is_em_x270())
- em_x270_module_init();
- else if (machine_is_exeda())
- em_x270_exeda_init();
- else
- panic("Unsupported machine: %d\n", machine_arch_type);
-
- em_x270_init_da9030();
- em_x270_init_dm9000();
- em_x270_init_rtc();
- em_x270_init_nand();
- em_x270_init_nor();
- em_x270_init_lcd();
- em_x270_init_mmc();
- em_x270_init_ohci();
- em_x270_init_keypad();
- em_x270_init_gpio_keys();
- em_x270_init_ac97();
- em_x270_init_spi();
- em_x270_init_i2c();
- em_x270_init_camera();
- em_x270_userspace_consumers_init();
-
- regulator_has_full_constraints();
-}
-
-MACHINE_START(EM_X270, "Compulab EM-X270")
- .atag_offset = 0x100,
- .map_io = pxa27x_map_io,
- .nr_irqs = PXA_NR_IRQS,
- .init_irq = pxa27x_init_irq,
- .handle_irq = pxa27x_handle_irq,
- .init_time = pxa_timer_init,
- .init_machine = em_x270_init,
- .restart = pxa_restart,
-MACHINE_END
-
-MACHINE_START(EXEDA, "Compulab eXeda")
- .atag_offset = 0x100,
- .map_io = pxa27x_map_io,
- .nr_irqs = PXA_NR_IRQS,
- .init_irq = pxa27x_init_irq,
- .handle_irq = pxa27x_handle_irq,
- .init_time = pxa_timer_init,
- .init_machine = em_x270_init,
- .restart = pxa_restart,
-MACHINE_END
diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h
deleted file mode 100644
index d54031c4f3df..000000000000
--- a/arch/arm/mach-pxa/include/mach/io.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * arch/arm/mach-pxa/include/mach/io.h
- *
- * Copied from asm/arch/sa1100/io.h
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/*
- * We don't actually have real ISA nor PCI buses, but there is so many
- * drivers out there that might just work if we fake them...
- */
-#define __io(a) __typesafe_io(a)
-
-#endif
diff --git a/arch/arm/mach-realtek/Kconfig b/arch/arm/mach-realtek/Kconfig
new file mode 100644
index 000000000000..19fdcf093fd1
--- /dev/null
+++ b/arch/arm/mach-realtek/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+menuconfig ARCH_REALTEK
+ bool "Realtek SoCs"
+ depends on ARCH_MULTI_V7
+ select ARM_GIC
+ select ARM_GLOBAL_TIMER
+ select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
+ select GENERIC_IRQ_CHIP
+ select RESET_CONTROLLER
+ help
+ This enables support for the Realtek RTD1195 SoC family.
diff --git a/arch/arm/mach-realtek/Makefile b/arch/arm/mach-realtek/Makefile
new file mode 100644
index 000000000000..5382d5bbdd3c
--- /dev/null
+++ b/arch/arm/mach-realtek/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+obj-y += rtd1195.o
diff --git a/arch/arm/mach-realtek/rtd1195.c b/arch/arm/mach-realtek/rtd1195.c
new file mode 100644
index 000000000000..0381a4447384
--- /dev/null
+++ b/arch/arm/mach-realtek/rtd1195.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Realtek RTD1195
+ *
+ * Copyright (c) 2017-2019 Andreas Färber
+ */
+
+#include <linux/memblock.h>
+#include <asm/mach/arch.h>
+
+static void __init rtd1195_memblock_remove(phys_addr_t base, phys_addr_t size)
+{
+ int ret;
+
+ ret = memblock_remove(base, size);
+ if (ret)
+ pr_err("Failed to remove memblock %pa (%d)\n", &base, ret);
+}
+
+static void __init rtd1195_reserve(void)
+{
+ /* Exclude boot ROM from RAM */
+ rtd1195_memblock_remove(0x00000000, 0x0000a800);
+
+ /* Exclude peripheral register spaces from RAM */
+ rtd1195_memblock_remove(0x18000000, 0x00070000);
+ rtd1195_memblock_remove(0x18100000, 0x01000000);
+}
+
+static const char *const rtd1195_dt_compat[] __initconst = {
+ "realtek,rtd1195",
+ NULL
+};
+
+DT_MACHINE_START(rtd1195, "Realtek RTD1195")
+ .dt_compat = rtd1195_dt_compat,
+ .reserve = rtd1195_reserve,
+ .l2c_aux_val = 0x0,
+ .l2c_aux_mask = ~0x0,
+MACHINE_END
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index 44ebbf9ec673..5c6031b144c8 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -6,7 +6,6 @@ menuconfig ARCH_REALVIEW
select ARM_GIC
select ARM_TIMER_SP804
select CLK_SP810
- select COMMON_CLK_VERSATILE
select GPIO_PL061 if GPIOLIB
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
@@ -16,12 +15,10 @@ menuconfig ARCH_REALVIEW
select MACH_REALVIEW_EB if ARCH_MULTI_V5
select MFD_SYSCON
select PLAT_VERSATILE
- select PLAT_VERSATILE_SCHED_CLOCK
select POWER_RESET
select POWER_RESET_VERSATILE
select POWER_SUPPLY
select SOC_REALVIEW
- select USE_OF
help
This enables support for ARM Ltd RealView boards.
@@ -56,8 +53,6 @@ config REALVIEW_EB_ARM1176
config REALVIEW_EB_A9MP
bool "Support Multicore Cortex-A9 Tile"
depends on MACH_REALVIEW_EB && ARCH_MULTI_V7
- select HAVE_SMP
- select MIGHT_HAVE_CACHE_L2X0
help
Enable support for the Cortex-A9MPCore tile fitted to the
Realview(R) Emulation Baseboard platform.
@@ -66,7 +61,6 @@ config REALVIEW_EB_ARM11MP
bool "Support ARM11MPCore Tile"
depends on MACH_REALVIEW_EB && ARCH_MULTI_V6
select HAVE_SMP
- select MIGHT_HAVE_CACHE_L2X0
help
Enable support for the ARM11MPCore tile fitted to the Realview(R)
Emulation Baseboard platform.
@@ -75,7 +69,6 @@ config MACH_REALVIEW_PB11MP
bool "Support RealView(R) Platform Baseboard for ARM11MPCore"
depends on ARCH_MULTI_V6
select HAVE_SMP
- select MIGHT_HAVE_CACHE_L2X0
help
Include support for the ARM(R) RealView(R) Platform Baseboard for
the ARM11MPCore. This platform has an on-board ARM11MPCore and has
@@ -87,7 +80,6 @@ config MACH_REALVIEW_PB1176
depends on ARCH_MULTI_V6
select CPU_V6
select HAVE_TCM
- select MIGHT_HAVE_CACHE_L2X0
help
Include support for the ARM(R) RealView(R) Platform Baseboard for
ARM1176JZF-S.
@@ -103,8 +95,6 @@ config MACH_REALVIEW_PBA8
config MACH_REALVIEW_PBX
bool "Support RealView(R) Platform Baseboard Explore for Cortex-A9"
depends on ARCH_MULTI_V7
- select HAVE_SMP
- select MIGHT_HAVE_CACHE_L2X0
select ZONE_DMA
help
Include support for the ARM(R) RealView(R) Platform Baseboard
diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
index 649e0a54784c..d60856898d97 100644
--- a/arch/arm/mach-rockchip/platsmp.c
+++ b/arch/arm/mach-rockchip/platsmp.c
@@ -180,7 +180,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
rsize = resource_size(&res);
if (rsize < trampoline_sz) {
- pr_err("%s: reserved block with size 0x%x is to small for trampoline size 0x%x\n",
+ pr_err("%s: reserved block with size 0x%x is too small for trampoline size 0x%x\n",
__func__, rsize, trampoline_sz);
return -EINVAL;
}
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index f9797a2b5d0d..beea4564eed4 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -9,9 +9,9 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/of_clk.h>
#include <linux/of_platform.h>
#include <linux/irqchip.h>
-#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 937d0a83f8fd..34f1baa10c54 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -405,10 +405,9 @@ static int wlf_gf_module_probe(struct i2c_client *i2c,
gf_mods[i].name, rev + 1);
for (j = 0; j < gf_mods[i].num_i2c_devs; j++) {
- if (!i2c_new_device(i2c->adapter,
- &(gf_mods[i].i2c_devs[j])))
- dev_err(&i2c->dev,
- "Failed to register dev: %d\n", ret);
+ if (IS_ERR(i2c_new_client_device(i2c->adapter,
+ &(gf_mods[i].i2c_devs[j]))))
+ dev_err(&i2c->dev, "Failed to register\n");
}
spi_register_board_info(gf_mods[i].spi_devs,
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index d96a101e5504..aa265ede5730 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -34,7 +34,6 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mach/arch.h>
@@ -633,7 +632,7 @@ static void __init map_sa1100_gpio_regs( void )
int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
pmd_t *pmd;
- pmd = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+ pmd = pmd_off_k(virt);
*pmd = __pmd(phys | prot);
flush_pmd_entry(pmd);
}
diff --git a/arch/arm/mach-sa1100/hackkit.c b/arch/arm/mach-sa1100/hackkit.c
index 6d37d263e0d2..3085f1c2e586 100644
--- a/arch/arm/mach-sa1100/hackkit.c
+++ b/arch/arm/mach-sa1100/hackkit.c
@@ -22,11 +22,11 @@
#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
+#include <linux/pgtable.h>
#include <asm/mach-types.h>
#include <asm/setup.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index 1ee5cd2840e0..c42ff8c314c8 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -24,6 +24,7 @@
#include "rcar-gen2.h"
static const struct of_device_id cpg_matches[] __initconst = {
+ { .compatible = "renesas,r8a7742-cpg-mssr", .data = "extal" },
{ .compatible = "renesas,r8a7743-cpg-mssr", .data = "extal" },
{ .compatible = "renesas,r8a7744-cpg-mssr", .data = "extal" },
{ .compatible = "renesas,r8a7790-cpg-mssr", .data = "extal" },
@@ -209,6 +210,7 @@ DT_MACHINE_START(RCAR_GEN2_DT, "Generic R-Car Gen2 (Flattened Device Tree)")
MACHINE_END
static const char * const rz_g1_boards_compat_dt[] __initconst = {
+ "renesas,r8a7742",
"renesas,r8a7743",
"renesas,r8a7744",
"renesas,r8a7745",
diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig
index 22af5e308db6..c3bb68d57cea 100644
--- a/arch/arm/mach-socfpga/Kconfig
+++ b/arch/arm/mach-socfpga/Kconfig
@@ -11,7 +11,6 @@ menuconfig ARCH_SOCFPGA
select HAVE_ARM_SCU
select HAVE_ARM_TWD if SMP
select MFD_SYSCON
- select PCI_DOMAINS_GENERIC if PCI
select ARM_ERRATA_754322
select ARM_ERRATA_764369 if SMP
select ARM_ERRATA_775420
diff --git a/arch/arm/mach-tegra/iomap.h b/arch/arm/mach-tegra/iomap.h
index 160cb18850f2..4cb7e5fee137 100644
--- a/arch/arm/mach-tegra/iomap.h
+++ b/arch/arm/mach-tegra/iomap.h
@@ -10,7 +10,7 @@
#ifndef __MACH_TEGRA_IOMAP_H
#define __MACH_TEGRA_IOMAP_H
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/sizes.h>
#define TEGRA_IRAM_BASE 0x40000000
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index d1e1a61b12cf..6452ebf68d40 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -216,6 +216,8 @@ int tegra_pm_enter_lp2(void)
restore_cpu_complex();
cpu_cluster_pm_exit();
+ call_firmware_op(prepare_idle, TF_PM_MODE_NONE);
+
return err;
}
@@ -391,6 +393,8 @@ static int tegra_suspend_enter(suspend_state_t state)
local_fiq_enable();
+ call_firmware_op(prepare_idle, TF_PM_MODE_NONE);
+
return 0;
}
diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
index 53123ae4ac3b..06ca44b09381 100644
--- a/arch/arm/mach-tegra/reset-handler.S
+++ b/arch/arm/mach-tegra/reset-handler.S
@@ -98,7 +98,12 @@ ENTRY(tegra_resume_trusted_foundations)
reteq lr
.arch_extension sec
- /* First call after suspend wakes firmware. No arguments required. */
+ /*
+ * First call after suspend wakes firmware. No arguments required
+ * for some firmware versions. Downstream kernel of ASUS TF300T uses
+ * r0=3 for the wake-up notification.
+ */
+ mov r0, #3
smc #0
b cpu_resume
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index e7bcf7dc4675..2667bcdb5dc6 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -361,7 +361,6 @@ _no_pll_iddq_exit:
pll_enable r1, r0, CLK_RESET_PLLM_BASE, CLK_RESET_PLLM_MISC
pll_enable r1, r0, CLK_RESET_PLLC_BASE, CLK_RESET_PLLC_MISC
- pll_enable r1, r0, CLK_RESET_PLLX_BASE, CLK_RESET_PLLX_MISC
_pll_m_c_x_done:
pll_enable r1, r0, CLK_RESET_PLLP_BASE, CLK_RESET_PLLP_MISC
@@ -371,12 +370,18 @@ _pll_m_c_x_done:
pll_locked r1, r0, CLK_RESET_PLLP_BASE
pll_locked r1, r0, CLK_RESET_PLLA_BASE
pll_locked r1, r0, CLK_RESET_PLLC_BASE
- pll_locked r1, r0, CLK_RESET_PLLX_BASE
+ /*
+ * CPUFreq driver could select other PLL for CPU. PLLX will be
+ * enabled by the Tegra30 CLK driver on an as-needed basis, see
+ * tegra30_cpu_clock_resume().
+ */
tegra_get_soc_id TEGRA_APB_MISC_BASE, r1
cmp r1, #TEGRA30
beq 1f
+ pll_locked r1, r0, CLK_RESET_PLLX_BASE
+
ldr r1, [r0, #CLK_RESET_PLLP_BASE]
bic r1, r1, #(1<<31) @ disable PllP bypass
str r1, [r0, #CLK_RESET_PLLP_BASE]
@@ -398,11 +403,8 @@ _pll_m_c_x_done:
ldr r4, [r5, #0x1C] @ restore SCLK_BURST
str r4, [r0, #CLK_RESET_SCLK_BURST]
- cmp r10, #TEGRA30
- movweq r4, #:lower16:((1 << 28) | (0x8)) @ burst policy is PLLX
- movteq r4, #:upper16:((1 << 28) | (0x8))
- movwne r4, #:lower16:((1 << 28) | (0xe))
- movtne r4, #:upper16:((1 << 28) | (0xe))
+ movw r4, #:lower16:((1 << 28) | (0x4)) @ burst policy is PLLP
+ movt r4, #:upper16:((1 << 28) | (0x4))
str r4, [r0, #CLK_RESET_CCLK_BURST]
/* Restore pad power state to normal */
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index f1ce2857a251..c011359bcdb4 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -96,6 +96,10 @@ static void __init tegra_dt_init_late(void)
if (IS_ENABLED(CONFIG_ARM_TEGRA_CPUIDLE) && !psci_smp_available())
platform_device_register_simple("tegra-cpuidle", -1, NULL, 0);
+
+ if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) &&
+ of_machine_is_compatible("nvidia,tegra30"))
+ platform_device_register_simple("tegra20-cpufreq", -1, NULL, 0);
}
static const char * const tegra_dt_board_compat[] = {
@@ -107,8 +111,8 @@ static const char * const tegra_dt_board_compat[] = {
};
DT_MACHINE_START(TEGRA_DT, "NVIDIA Tegra SoC (Flattened Device Tree)")
- .l2c_aux_val = 0x3c400001,
- .l2c_aux_mask = 0xc20fc3fe,
+ .l2c_aux_val = 0x3c400000,
+ .l2c_aux_mask = 0xc20fc3ff,
.smp = smp_ops(tegra_smp_ops),
.map_io = tegra_map_common_io,
.init_early = tegra_init_early,
diff --git a/arch/arm/mach-versatile/Kconfig b/arch/arm/mach-versatile/Kconfig
index f5c275434d6c..d88e7725bf99 100644
--- a/arch/arm/mach-versatile/Kconfig
+++ b/arch/arm/mach-versatile/Kconfig
@@ -6,7 +6,6 @@ config ARCH_VERSATILE
select ARM_TIMER_SP804
select ARM_VIC
select CLKSRC_VERSATILE
- select COMMON_CLK_VERSATILE
select CPU_ARM926T
select ICST
select MFD_SYSCON
diff --git a/arch/arm/mach-versatile/versatile_dt.c b/arch/arm/mach-versatile/versatile_dt.c
index c00ea4f77af6..02ba68abe533 100644
--- a/arch/arm/mach-versatile/versatile_dt.c
+++ b/arch/arm/mach-versatile/versatile_dt.c
@@ -39,8 +39,6 @@
#define VERSATILE_MMCI0_BASE 0x10005000 /* MMC interface */
#define VERSATILE_MMCI1_BASE 0x1000B000 /* MMC Interface */
#define VERSATILE_SCTL_BASE 0x101E0000 /* System controller */
-#define VERSATILE_IB2_BASE 0x24000000 /* IB2 module */
-#define VERSATILE_IB2_CTL_BASE (VERSATILE_IB2_BASE + 0x03000000)
/*
* System controller bit assignment
@@ -54,7 +52,6 @@
#define VERSATILE_TIMER4_EnSel 21
static void __iomem *versatile_sys_base;
-static void __iomem *versatile_ib2_ctrl;
unsigned int mmc_status(struct device *dev)
{
@@ -169,8 +166,6 @@ static void __init versatile_dt_init(void)
versatile_sys_base = of_iomap(np, 0);
WARN_ON(!versatile_sys_base);
- versatile_ib2_ctrl = ioremap(VERSATILE_IB2_CTL_BASE, SZ_4K);
-
versatile_dt_pci_init();
of_platform_default_populate(NULL, versatile_auxdata_lookup, NULL);
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 726a68085c3b..065e12991663 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -7,7 +7,6 @@ menuconfig ARCH_VEXPRESS
select ARM_GIC
select ARM_GLOBAL_TIMER
select ARM_TIMER_SP804
- select COMMON_CLK_VERSATILE
select GPIOLIB
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
@@ -21,8 +20,6 @@ menuconfig ARCH_VEXPRESS
select REGULATOR if MMC_ARMMMCI
select REGULATOR_FIXED_VOLTAGE if REGULATOR
select VEXPRESS_CONFIG
- select VEXPRESS_SYSCFG
- select MFD_VEXPRESS_SYSREG
help
This option enables support for systems using Cortex processor based
ARM core and logic (FPGA) tiles on the Versatile Express motherboard,
diff --git a/arch/arm/mach-vexpress/core.h b/arch/arm/mach-vexpress/core.h
index f4a7519084f1..bda78675c55d 100644
--- a/arch/arm/mach-vexpress/core.h
+++ b/arch/arm/mach-vexpress/core.h
@@ -1,3 +1,4 @@
bool vexpress_smp_init_ops(void);
+void vexpress_flags_set(u32 data);
extern const struct smp_operations vexpress_smp_dt_ops;
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 46a903c88c6a..a0554d7d04f7 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -20,6 +20,7 @@
#include <asm/cputype.h>
#include <asm/cp15.h>
+#include "core.h"
#define RST_HOLD0 0x0
#define RST_HOLD1 0x4
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 95886b3bb9dd..ffe7c7a85ae9 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -1,8 +1,31 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/mach/arch.h>
#include "core.h"
+#define SYS_FLAGSSET 0x030
+#define SYS_FLAGSCLR 0x034
+
+void vexpress_flags_set(u32 data)
+{
+ static void __iomem *base;
+
+ if (!base) {
+ struct device_node *node = of_find_compatible_node(NULL, NULL,
+ "arm,vexpress-sysreg");
+
+ base = of_iomap(node, 0);
+ }
+
+ if (WARN_ON(!base))
+ return;
+
+ writel(~0, base + SYS_FLAGSCLR);
+ writel(data, base + SYS_FLAGSSET);
+}
+
static const char * const v2m_dt_match[] __initconst = {
"arm,vexpress",
NULL,
diff --git a/arch/arm/mach-vt8500/Kconfig b/arch/arm/mach-vt8500/Kconfig
index 8841199058ea..d01cdd9ad9c7 100644
--- a/arch/arm/mach-vt8500/Kconfig
+++ b/arch/arm/mach-vt8500/Kconfig
@@ -2,7 +2,6 @@
config ARCH_VT8500
bool
select GPIOLIB
- select CLKDEV_LOOKUP
select VT8500_TIMER
select PINCTRL
help
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
index 1ca633e3d024..43fb941dcd07 100644
--- a/arch/arm/mach-zynq/Kconfig
+++ b/arch/arm/mach-zynq/Kconfig
@@ -10,7 +10,6 @@ config ARCH_ZYNQ
select CADENCE_TTC_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
- select ICST
select MFD_SYSCON
select PINCTRL
select PINCTRL_ZYNQ
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index a9dd2f71cd19..e1ca6a5732d2 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -24,13 +24,13 @@
#include <linux/irqchip/arm-gic.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
+#include <linux/pgtable.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <asm/mach-types.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/smp_scu.h>
#include <asm/system_info.h>
#include <asm/hardware/cache-l2x0.h>
diff --git a/arch/arm/mm/cache-b15-rac.c b/arch/arm/mm/cache-b15-rac.c
index 3471fc64a3ae..bdc07030997b 100644
--- a/arch/arm/mm/cache-b15-rac.c
+++ b/arch/arm/mm/cache-b15-rac.c
@@ -358,8 +358,7 @@ static int __init b15_rac_init(void)
set_bit(RAC_ENABLED, &b15_rac_flags);
spin_unlock(&rac_lock);
- pr_info("Broadcom Brahma-B15 readahead cache at: 0x%p\n",
- b15_rac_base + RAC_CONFIG0_REG);
+ pr_info("%pOF: Broadcom Brahma-B15 readahead cache\n", dn);
goto out;
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index a94bd08fdec2..44f7292ec27b 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -14,7 +14,6 @@
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index a6488bb6cfa9..6a769a6c314e 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -9,7 +9,6 @@
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <asm/pgtable.h>
#include <asm/shmparam.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 382e1c2855e8..eb5d338657d1 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -14,7 +14,6 @@
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 7d6291f23251..c18d23a5e5f1 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -16,7 +16,6 @@
#include <asm/domain.h>
#include <asm/fixmap.h>
#include <asm/memory.h>
-#include <asm/pgtable.h>
#include <asm/ptdump.h>
static struct addr_marker address_markers[] = {
@@ -207,6 +206,7 @@ struct pg_level {
static struct pg_level pg_level[] = {
{
}, { /* pgd */
+ }, { /* p4d */
}, { /* pud */
}, { /* pmd */
.bits = section_bits,
@@ -308,7 +308,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
addr = start + i * PAGE_SIZE;
- note_page(st, addr, 4, pte_val(*pte), domain);
+ note_page(st, addr, 5, pte_val(*pte), domain);
}
}
@@ -350,14 +350,14 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
addr += SECTION_SIZE;
pmd++;
domain = get_domain_name(pmd);
- note_page(st, addr, 3, pmd_val(*pmd), domain);
+ note_page(st, addr, 4, pmd_val(*pmd), domain);
}
}
}
-static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
{
- pud_t *pud = pud_offset(pgd, 0);
+ pud_t *pud = pud_offset(p4d, 0);
unsigned long addr;
unsigned i;
@@ -366,7 +366,23 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
if (!pud_none(*pud)) {
walk_pmd(st, pud, addr);
} else {
- note_page(st, addr, 2, pud_val(*pud), NULL);
+ note_page(st, addr, 3, pud_val(*pud), NULL);
+ }
+ }
+}
+
+static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start)
+{
+ p4d_t *p4d = p4d_offset(pgd, 0);
+ unsigned long addr;
+ unsigned i;
+
+ for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
+ addr = start + i * P4D_SIZE;
+ if (!p4d_none(*p4d)) {
+ walk_pud(st, p4d, addr);
+ } else {
+ note_page(st, addr, 2, p4d_val(*p4d), NULL);
}
}
}
@@ -381,7 +397,7 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = start + i * PGDIR_SIZE;
if (!pgd_none(*pgd)) {
- walk_pud(st, pgd, addr);
+ walk_p4d(st, pgd, addr);
} else {
note_page(st, addr, 1, pgd_val(*pgd), NULL);
}
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index ae857f41f68d..0e49154454a6 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -17,7 +17,6 @@
#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "mm.h"
@@ -91,6 +90,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
{
spinlock_t *ptl;
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -100,7 +100,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
if (pgd_none_or_clear_bad(pgd))
return 0;
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none_or_clear_bad(p4d))
+ return 0;
+
+ pud = pud_offset(p4d, address);
if (pud_none_or_clear_bad(pud))
return 0;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 2dd5c41cbb8d..c6550eddfce1 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -18,7 +18,6 @@
#include <linux/highmem.h>
#include <linux/perf_event.h>
-#include <asm/pgtable.h>
#include <asm/system_misc.h>
#include <asm/system_info.h>
#include <asm/tlbflush.h>
@@ -43,19 +42,21 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd));
do {
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- if (pgd_none(*pgd))
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d))
break;
- if (pgd_bad(*pgd)) {
+ if (p4d_bad(*p4d)) {
pr_cont("(bad)");
break;
}
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
if (PTRS_PER_PUD != 1)
pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
@@ -270,11 +271,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -292,7 +293,7 @@ retry:
fault = __do_page_fault(mm, addr, fsr, flags, tsk);
/* If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because
+ * signal first. We do not need to release the mmap_lock because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
if (fault_signal_pending(fault, regs)) {
@@ -324,7 +325,7 @@ retry:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR
@@ -405,6 +406,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
{
unsigned int index;
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
@@ -419,13 +421,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
pgd = cpu_get_pgd() + index;
pgd_k = init_mm.pgd + index;
- if (pgd_none(*pgd_k))
+ p4d = p4d_offset(pgd, addr);
+ p4d_k = p4d_offset(pgd_k, addr);
+
+ if (p4d_none(*p4d_k))
goto bad_area;
- if (!pgd_present(*pgd))
- set_pgd(pgd, *pgd_k);
+ if (!p4d_present(*p4d))
+ set_p4d(p4d, *p4d_k);
- pud = pud_offset(pgd, addr);
- pud_k = pud_offset(pgd_k, addr);
+ pud = pud_offset(p4d, addr);
+ pud_k = pud_offset(p4d_k, addr);
if (pud_none(*pud_k))
goto bad_area;
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index a76f8ace9ce6..187fab227b50 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -18,7 +18,7 @@
static inline void set_fixmap_pte(int idx, pte_t pte)
{
unsigned long vaddr = __fix_to_virt(idx);
- pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+ pte_t *ptep = virt_to_kpte(vaddr);
set_pte_ext(ptep, pte, 0);
local_flush_tlb_kernel_page(vaddr);
@@ -26,41 +26,18 @@ static inline void set_fixmap_pte(int idx, pte_t pte)
static inline pte_t get_fixmap_pte(unsigned long vaddr)
{
- pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+ pte_t *ptep = virt_to_kpte(vaddr);
return *ptep;
}
-void *kmap(struct page *page)
-{
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_high(page);
-}
-EXPORT_SYMBOL(kmap);
-
-void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-EXPORT_SYMBOL(kunmap);
-
-void *kmap_atomic(struct page *page)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned int idx;
unsigned long vaddr;
void *kmap;
int type;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
#ifdef CONFIG_DEBUG_HIGHMEM
/*
* There is no cache coherency issue when non VIVT, so force the
@@ -90,13 +67,13 @@ void *kmap_atomic(struct page *page)
* in place, so the contained TLB flush ensures the TLB is updated
* with the new mapping.
*/
- set_fixmap_pte(idx, mk_pte(page, kmap_prot));
+ set_fixmap_pte(idx, mk_pte(page, prot));
return (void *)vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int idx, type;
@@ -118,10 +95,8 @@ void __kunmap_atomic(void *kvaddr)
/* this address was obtained through kmap_high_get() */
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
void *kmap_atomic_pfn(unsigned long pfn)
{
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index a033f6134a64..448e57c6f653 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -3,12 +3,12 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/cputype.h>
#include <asm/idmap.h>
#include <asm/hwcap.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/system_info.h>
@@ -68,7 +68,8 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
unsigned long prot)
{
- pud_t *pud = pud_offset(pgd, addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
unsigned long next;
do {
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 4e43455fab84..01e18e43b174 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -519,7 +519,7 @@ static inline void section_update(unsigned long addr, pmdval_t mask,
{
pmd_t *pmd;
- pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
+ pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
#ifdef CONFIG_ARM_LPAE
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 72286f9a4d30..000e8210000b 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -141,14 +141,8 @@ void __check_vmalloc_seq(struct mm_struct *mm)
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmdp;
-
- flush_cache_vunmap(addr, end);
- pgd = pgd_offset_k(addr);
- pud = pud_offset(pgd, addr);
- pmdp = pmd_offset(pud, addr);
+ pmd_t *pmdp = pmd_off_k(addr);
+
do {
pmd_t pmd = *pmdp;
@@ -189,9 +183,7 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
size_t size, const struct mem_type *type)
{
unsigned long addr = virt, end = virt + size;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
+ pmd_t *pmd = pmd_off_k(addr);
/*
* Remove and free any PTE-based mapping, and
@@ -199,9 +191,6 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
*/
unmap_area_sections(virt, size);
- pgd = pgd_offset_k(addr);
- pud = pud_offset(pgd, addr);
- pmd = pmd_offset(pud, addr);
do {
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
@@ -221,19 +210,13 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
size_t size, const struct mem_type *type)
{
unsigned long addr = virt, end = virt + size;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
+ pmd_t *pmd = pmd_off_k(addr);
/*
* Remove and free any PTE-based mapping, and
* sync the current kernel mapping.
*/
unmap_area_sections(virt, size);
-
- pgd = pgd_offset_k(virt);
- pud = pud_offset(pgd, addr);
- pmd = pmd_offset(pud, addr);
do {
unsigned long super_pmd_val, i;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 88c121ac14b3..9ff683612f2a 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -2,8 +2,7 @@
#ifdef CONFIG_MMU
#include <linux/list.h>
#include <linux/vmalloc.h>
-
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
/* the upper-most page table pointer */
extern pmd_t *top_pmd;
@@ -36,11 +35,6 @@ static inline pte_t get_top_pte(unsigned long va)
return *ptep;
}
-static inline pmd_t *pmd_off_k(unsigned long virt)
-{
- return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
-}
-
struct mem_type {
pteval_t prot_pte;
pteval_t prot_pte_s2;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ec8d0008bfa1..628028bfbb92 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -356,11 +356,7 @@ static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
static inline pmd_t * __init fixmap_pmd(unsigned long addr)
{
- pgd_t *pgd = pgd_offset_k(addr);
- pud_t *pud = pud_offset(pgd, addr);
- pmd_t *pmd = pmd_offset(pud, addr);
-
- return pmd;
+ return pmd_off_k(addr);
}
void __init early_fixmap_init(void)
@@ -801,12 +797,12 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
} while (pmd++, addr = next, addr != end);
}
-static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
+static void __init alloc_init_pud(p4d_t *p4d, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type,
void *(*alloc)(unsigned long sz), bool ng)
{
- pud_t *pud = pud_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
unsigned long next;
do {
@@ -816,6 +812,21 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
} while (pud++, addr = next, addr != end);
}
+static void __init alloc_init_p4d(pgd_t *pgd, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type,
+ void *(*alloc)(unsigned long sz), bool ng)
+{
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = p4d_addr_end(addr, end);
+ alloc_init_pud(p4d, addr, next, phys, type, alloc, ng);
+ phys += next - addr;
+ } while (p4d++, addr = next, addr != end);
+}
+
#ifndef CONFIG_ARM_LPAE
static void __init create_36bit_mapping(struct mm_struct *mm,
struct map_desc *md,
@@ -863,7 +874,8 @@ static void __init create_36bit_mapping(struct mm_struct *mm,
pgd = pgd_offset(mm, addr);
end = addr + length;
do {
- pud_t *pud = pud_offset(pgd, addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
pmd_t *pmd = pmd_offset(pud, addr);
int i;
@@ -914,7 +926,7 @@ static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
do {
unsigned long next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, type, alloc, ng);
+ alloc_init_p4d(pgd, addr, next, phys, type, alloc, ng);
phys += next - addr;
addr = next;
@@ -950,7 +962,13 @@ void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
bool ng)
{
#ifdef CONFIG_ARM_LPAE
- pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
+ p4d_t *p4d;
+ pud_t *pud;
+
+ p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
+ if (!WARN_ON(!p4d))
+ return;
+ pud = pud_alloc(mm, p4d, md->virtual);
if (WARN_ON(!pud))
return;
pmd_alloc(mm, pud, 0);
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index d546efad7e97..9790ae3a8c68 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -5,7 +5,6 @@
#include <linux/mm.h>
#include <linux/module.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/set_memory.h>
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 478bd2c6aa50..c5e1b27046a8 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -30,6 +30,7 @@
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *new_pgd, *init_pgd;
+ p4d_t *new_p4d, *init_p4d;
pud_t *new_pud, *init_pud;
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
@@ -53,8 +54,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
/*
* Allocate PMD table for modules and pkmap mappings.
*/
- new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
+ new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
MODULES_VADDR);
+ if (!new_p4d)
+ goto no_p4d;
+
+ new_pud = pud_alloc(mm, new_p4d, MODULES_VADDR);
if (!new_pud)
goto no_pud;
@@ -69,7 +74,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
* contains the machine vectors. The vectors are always high
* with LPAE.
*/
- new_pud = pud_alloc(mm, new_pgd, 0);
+ new_p4d = p4d_alloc(mm, new_pgd, 0);
+ if (!new_p4d)
+ goto no_p4d;
+
+ new_pud = pud_alloc(mm, new_p4d, 0);
if (!new_pud)
goto no_pud;
@@ -91,7 +100,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
#endif
- init_pud = pud_offset(init_pgd, 0);
+ init_p4d = p4d_offset(init_pgd, 0);
+ init_pud = pud_offset(init_p4d, 0);
init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0);
set_pte_ext(new_pte + 0, init_pte[0], 0);
@@ -108,6 +118,8 @@ no_pte:
no_pmd:
pud_free(mm, new_pud);
no_pud:
+ p4d_free(mm, new_p4d);
+no_p4d:
__pgd_free(new_pgd);
no_pgd:
return NULL;
@@ -116,6 +128,7 @@ no_pgd:
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgtable_t pte;
@@ -127,7 +140,11 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
if (pgd_none_or_clear_bad(pgd))
goto no_pgd;
- pud = pud_offset(pgd, 0);
+ p4d = p4d_offset(pgd, 0);
+ if (p4d_none_or_clear_bad(p4d))
+ goto no_p4d;
+
+ pud = pud_offset(p4d, 0);
if (pud_none_or_clear_bad(pud))
goto no_pud;
@@ -144,8 +161,11 @@ no_pmd:
pmd_free(mm, pmd);
mm_dec_nr_pmds(mm);
no_pud:
- pgd_clear(pgd);
+ p4d_clear(p4d);
pud_free(mm, pud);
+no_p4d:
+ pgd_clear(pgd);
+ p4d_free(mm, p4d);
no_pgd:
#ifdef CONFIG_ARM_LPAE
/*
@@ -156,15 +176,21 @@ no_pgd:
continue;
if (pgd_val(*pgd) & L_PGD_SWAPPER)
continue;
- pud = pud_offset(pgd, 0);
+ p4d = p4d_offset(pgd, 0);
+ if (p4d_none_or_clear_bad(p4d))
+ continue;
+ pud = pud_offset(p4d, 0);
if (pud_none_or_clear_bad(pud))
continue;
pmd = pmd_offset(pud, 0);
pud_clear(pud);
pmd_free(mm, pmd);
mm_dec_nr_pmds(mm);
- pgd_clear(pgd);
+ p4d_clear(p4d);
pud_free(mm, pud);
+ mm_dec_nr_puds(mm);
+ pgd_clear(pgd);
+ p4d_free(mm, p4d);
}
#endif
__pgd_free(pgd_base);
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 2785da387c91..6837cf7a4812 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -11,11 +11,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index e9ea237ed785..df49b10250b8 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -11,11 +11,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 920c279e7879..e89ce467f672 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -11,11 +11,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 0bdf25a95b10..7fdd1a205e8e 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -11,11 +11,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 39361e196d61..3b687e6dd9fd 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -20,11 +20,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 1a94bbf6e53f..f2ec3bc60874 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -6,11 +6,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 52b66cf0259e..01bbe7576c1c 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -6,11 +6,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 31ac8acc34dc..a234cd8ba5e6 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -13,10 +13,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index ca2c7ca8af21..53c029dcfd83 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -14,10 +14,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index a381a0c9f109..0bfad62ea858 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -37,10 +37,10 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 1ba253c2bce1..0487a2c3439b 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -13,10 +13,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 4b8a00220cc9..cf9bfcc825ca 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -6,10 +6,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 555becf9c758..6fb3898ad1cd 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -8,10 +8,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index ef517530130b..a054c0e9c034 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -6,11 +6,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index dddf833fe000..2c73e0d47d08 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -11,10 +11,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index b12b76bc8d30..61ce82aca6f0 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -8,10 +8,10 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index d47d6c5cee63..1645ccaffe96 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -9,10 +9,10 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index baba503ba816..4071f7a61cb6 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -12,12 +12,12 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <mach/hardware.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 75ebacc8e4e5..e723bd4119d3 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -17,12 +17,12 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <mach/hardware.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 1dd0d5ca27da..a0618f3e6836 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -9,11 +9,11 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 48e0ef6f0dcc..28c9d32fa99a 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -9,11 +9,11 @@
#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/memory.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 42eaecc43cfe..a17afe7e195a 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -23,9 +23,9 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
-#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 18ac5a1f8922..d82590aa71c0 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -19,9 +19,9 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
-#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
#include <asm/ptrace.h>
diff --git a/arch/arm/mm/pv-fixup-asm.S b/arch/arm/mm/pv-fixup-asm.S
index 769778928356..8eade0416739 100644
--- a/arch/arm/mm/pv-fixup-asm.S
+++ b/arch/arm/mm/pv-fixup-asm.S
@@ -6,10 +6,10 @@
* for Keystone 2
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/asm-offsets.h>
#include <asm/cp15.h>
#include <asm/memory.h>
-#include <asm/pgtable.h>
.section ".idmap.text", "ax"
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index 4f7b27239bd4..55b1925f65d7 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -333,7 +333,6 @@ static int s3c_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct adc_device *adc;
- struct resource *regs;
enum s3c_cpu_type cpu = platform_get_device_id(pdev)->driver_data;
int ret;
unsigned tmp;
@@ -354,10 +353,8 @@ static int s3c_adc_probe(struct platform_device *pdev)
}
adc->irq = platform_get_irq(pdev, 1);
- if (adc->irq <= 0) {
- dev_err(dev, "failed to get adc irq\n");
+ if (adc->irq <= 0)
return -ENOENT;
- }
ret = devm_request_irq(dev, adc->irq, s3c_adc_irq, 0, dev_name(dev),
adc);
@@ -372,8 +369,7 @@ static int s3c_adc_probe(struct platform_device *pdev)
return PTR_ERR(adc->clk);
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adc->regs = devm_ioremap_resource(dev, regs);
+ adc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adc->regs))
return PTR_ERR(adc->regs);
diff --git a/arch/arm/plat-versatile/Kconfig b/arch/arm/plat-versatile/Kconfig
deleted file mode 100644
index 748238f9f10e..000000000000
--- a/arch/arm/plat-versatile/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-if PLAT_VERSATILE
-
-config PLAT_VERSATILE_SCHED_CLOCK
- bool
-
-endif
diff --git a/arch/arm/plat-versatile/Makefile b/arch/arm/plat-versatile/Makefile
index e856f0a4ac6e..5de44a57c4de 100644
--- a/arch/arm/plat-versatile/Makefile
+++ b/arch/arm/plat-versatile/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include
-obj-$(CONFIG_PLAT_VERSATILE_SCHED_CLOCK) += sched-clock.o
obj-$(CONFIG_SMP) += headsmp.o platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/plat-versatile/include/plat/sched_clock.h b/arch/arm/plat-versatile/include/plat/sched_clock.h
deleted file mode 100644
index 83fdaef23c2e..000000000000
--- a/arch/arm/plat-versatile/include/plat/sched_clock.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ARM_PLAT_SCHED_CLOCK_H
-#define ARM_PLAT_SCHED_CLOCK_H
-
-void versatile_sched_clock_init(void __iomem *, unsigned long);
-
-#endif
diff --git a/arch/arm/plat-versatile/sched-clock.c b/arch/arm/plat-versatile/sched-clock.c
deleted file mode 100644
index ecb7913d2f53..000000000000
--- a/arch/arm/plat-versatile/sched-clock.c
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * linux/arch/arm/plat-versatile/sched-clock.c
- *
- * Copyright (C) 1999 - 2003 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- */
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/sched_clock.h>
-
-#include <plat/sched_clock.h>
-
-static void __iomem *ctr;
-
-static u64 notrace versatile_read_sched_clock(void)
-{
- if (ctr)
- return readl(ctr);
-
- return 0;
-}
-
-void __init versatile_sched_clock_init(void __iomem *reg, unsigned long rate)
-{
- ctr = reg;
- sched_clock_register(versatile_read_sched_clock, 32, rate);
-}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index d0bc8bae7c8d..8a46ed3ab429 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -12,6 +12,7 @@ config ARM64
select ARCH_HAS_DEBUG_WX
select ARCH_BINFMT_ELF_STATE
select ARCH_HAS_DEBUG_VIRTUAL
+ select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
@@ -1298,6 +1299,14 @@ config COMPAT_VDSO
You must have a 32-bit build of glibc 2.22 or later for programs
to seamlessly take advantage of this.
+config THUMB2_COMPAT_VDSO
+ bool "Compile the 32-bit vDSO for Thumb-2 mode" if EXPERT
+ depends on COMPAT_VDSO
+ default y
+ help
+ Compile the compat vDSO with '-mthumb -fomit-frame-pointer' if y,
+ otherwise with '-marm'.
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on SYSCTL
@@ -1739,8 +1748,9 @@ config ARM64_DEBUG_PRIORITY_MASKING
endif
config RELOCATABLE
- bool
+ bool "Build a relocatable kernel image" if EXPERT
select ARCH_HAS_RELR
+ default y
help
This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required to relocate the
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 55d70cfe0f9e..8dd05b2a925c 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -235,7 +235,6 @@ config ARCH_TEGRA
bool "NVIDIA Tegra SoC Family"
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC_PM
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select TIMER_OF
select GENERIC_CLOCKEVENTS
@@ -248,7 +247,7 @@ config ARCH_TEGRA
This enables support for the NVIDIA Tegra SoC family.
config ARCH_SPRD
- tristate "Spreadtrum SoC platform"
+ bool "Spreadtrum SoC platform"
help
Support for Spreadtrum ARM based SoCs
@@ -274,12 +273,9 @@ config ARCH_UNIPHIER
config ARCH_VEXPRESS
bool "ARMv8 software model (Versatile Express)"
- select COMMON_CLK_VERSATILE
select GPIOLIB
select PM
select PM_GENERIC_DOMAINS
- select POWER_RESET_VEXPRESS
- select VEXPRESS_CONFIG
help
This enables support for the ARMv8 software model (Versatile
Express).
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 650e1185c190..76359cfb328a 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -94,7 +94,6 @@ endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
-AS += -EB
# Prefer the baremetal ELF build target, but not all toolchains include
# it so fall back to the standard linux version if needed.
KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
@@ -102,7 +101,6 @@ UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
-AS += -EL
# Same as above, prefer ELF but fall back to linux target if needed.
KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
UTS_MACHINE := aarch64
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
index 5fa9ca0191a8..f3f8e177ab61 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
@@ -32,6 +32,15 @@
};
};
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ label = "a64-olinuxino:red:user";
+ gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */
+ };
+ };
+
reg_usb1_vbus: usb1-vbus {
compatible = "regulator-fixed";
regulator-name = "usb1-vbus";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index c26cc1fcaffd..8dfbcd144072 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -539,6 +539,16 @@
resets = <&ccu RST_BUS_CE>;
};
+ msgbox: mailbox@1c17000 {
+ compatible = "allwinner,sun50i-a64-msgbox",
+ "allwinner,sun6i-a31-msgbox";
+ reg = <0x01c17000 0x1000>;
+ clocks = <&ccu CLK_BUS_MSGBOX>;
+ resets = <&ccu RST_BUS_MSGBOX>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ };
+
usb_otg: usb@1c19000 {
compatible = "allwinner,sun8i-a33-musb";
reg = <0x01c19000 0x0400>;
@@ -1065,6 +1075,8 @@
compatible = "allwinner,sun50i-a64-mbus";
reg = <0x01c62000 0x1000>;
clocks = <&ccu 112>;
+ #address-cells = <1>;
+ #size-cells = <1>;
dma-ranges = <0x00000000 0x40000000 0xc0000000>;
#interconnect-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index 8f09d209359b..3f7ceeb1a767 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -4,6 +4,7 @@
/dts-v1/;
#include "sun50i-h6.dtsi"
+#include "sun50i-h6-cpu-opp.dtsi"
#include <dt-bindings/gpio/gpio.h>
@@ -77,6 +78,10 @@
};
};
+&cpu0 {
+ cpu-supply = <&reg_dcdca>;
+};
+
&de {
status = "okay";
};
@@ -234,7 +239,8 @@
reg_dcdca: dcdca {
regulator-always-on;
regulator-min-microvolt = <810000>;
- regulator-max-microvolt = <1080000>;
+ regulator-max-microvolt = <1160000>;
+ regulator-ramp-delay = <2500>;
regulator-name = "vdd-cpu";
};
@@ -242,6 +248,7 @@
regulator-enable-ramp-delay = <32000>;
regulator-min-microvolt = <810000>;
regulator-max-microvolt = <1080000>;
+ regulator-ramp-delay = <2500>;
regulator-name = "vdd-gpu";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi
new file mode 100644
index 000000000000..1a5eddc5a40f
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-cpu-opp.dtsi
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (C) 2020 Ondrej Jirman <megous@megous.com>
+// Copyright (C) 2020 Clément Péron <peron.clem@gmail.com>
+
+/ {
+ cpu_opp_table: cpu-opp-table {
+ compatible = "allwinner,sun50i-h6-operating-points";
+ nvmem-cells = <&cpu_speed_grade>;
+ opp-shared;
+
+ opp@480000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <480000000>;
+
+ opp-microvolt-speed0 = <880000 880000 1200000>;
+ opp-microvolt-speed1 = <820000 820000 1200000>;
+ opp-microvolt-speed2 = <820000 820000 1200000>;
+ };
+
+ opp@720000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <720000000>;
+
+ opp-microvolt-speed0 = <880000 880000 1200000>;
+ opp-microvolt-speed1 = <820000 820000 1200000>;
+ opp-microvolt-speed2 = <820000 820000 1200000>;
+ };
+
+ opp@816000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <816000000>;
+
+ opp-microvolt-speed0 = <880000 880000 1200000>;
+ opp-microvolt-speed1 = <820000 820000 1200000>;
+ opp-microvolt-speed2 = <820000 820000 1200000>;
+ };
+
+ opp@888000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <888000000>;
+
+ opp-microvolt-speed0 = <880000 880000 1200000>;
+ opp-microvolt-speed1 = <820000 820000 1200000>;
+ opp-microvolt-speed2 = <820000 820000 1200000>;
+ };
+
+ opp@1080000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <1080000000>;
+
+ opp-microvolt-speed0 = <940000 940000 1200000>;
+ opp-microvolt-speed1 = <880000 880000 1200000>;
+ opp-microvolt-speed2 = <880000 880000 1200000>;
+ };
+
+ opp@1320000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <1320000000>;
+
+ opp-microvolt-speed0 = <1000000 1000000 1200000>;
+ opp-microvolt-speed1 = <940000 940000 1200000>;
+ opp-microvolt-speed2 = <940000 940000 1200000>;
+ };
+
+ opp@1488000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <1488000000>;
+
+ opp-microvolt-speed0 = <1060000 1060000 1200000>;
+ opp-microvolt-speed1 = <1000000 1000000 1200000>;
+ opp-microvolt-speed2 = <1000000 1000000 1200000>;
+ };
+
+ opp@1608000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <1608000000>;
+
+ opp-microvolt-speed0 = <1090000 1090000 1200000>;
+ opp-microvolt-speed1 = <1030000 1030000 1200000>;
+ opp-microvolt-speed2 = <1030000 1030000 1200000>;
+ };
+
+ opp@1704000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <1704000000>;
+
+ opp-microvolt-speed0 = <1120000 1120000 1200000>;
+ opp-microvolt-speed1 = <1060000 1060000 1200000>;
+ opp-microvolt-speed2 = <1060000 1060000 1200000>;
+ };
+
+ opp@1800000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <1800000000>;
+
+ opp-microvolt-speed0 = <1160000 1160000 1200000>;
+ opp-microvolt-speed1 = <1100000 1100000 1200000>;
+ opp-microvolt-speed2 = <1100000 1100000 1200000>;
+ };
+ };
+};
+
+&cpu0 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
+
+&cpu1 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
+
+&cpu2 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
+
+&cpu3 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
index 47f579610dcc..15c9dd8c4479 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
@@ -4,6 +4,7 @@
/dts-v1/;
#include "sun50i-h6.dtsi"
+#include "sun50i-h6-cpu-opp.dtsi"
#include <dt-bindings/gpio/gpio.h>
@@ -257,6 +258,7 @@
regulator-always-on;
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1160000>;
+ regulator-ramp-delay = <2500>;
regulator-name = "vdd-cpu";
};
@@ -264,6 +266,7 @@
regulator-enable-ramp-delay = <32000>;
regulator-min-microvolt = <810000>;
regulator-max-microvolt = <1080000>;
+ regulator-ramp-delay = <2500>;
regulator-name = "vdd-gpu";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
index e7ca75c0d0f7..e8770858b5d0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-lite2.dts
@@ -6,4 +6,69 @@
/ {
model = "OrangePi Lite2";
compatible = "xunlong,orangepi-lite2", "allwinner,sun50i-h6";
+
+ aliases {
+ serial1 = &uart1; /* BT-UART */
+ };
+
+ wifi_pwrseq: wifi_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rtc 1>;
+ clock-names = "ext_clock";
+ reset-gpios = <&r_pio 1 3 GPIO_ACTIVE_LOW>; /* PM3 */
+ post-power-on-delay-ms = <200>;
+ };
+};
+
+&mmc1 {
+ vmmc-supply = <&reg_cldo2>;
+ vqmmc-supply = <&reg_bldo3>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+
+ brcm: sdio-wifi@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ interrupt-parent = <&r_pio>;
+ interrupts = <1 0 IRQ_TYPE_LEVEL_LOW>; /* PM0 */
+ interrupt-names = "host-wake";
+ };
+};
+
+&reg_cldo2 {
+ /*
+ * This regulator is connected with CLDO3.
+ * Before the kernel can support synchronized
+ * enable of coupled regulators, keep them
+ * both always on as a ugly hack.
+ */
+ regulator-always-on;
+};
+
+&reg_cldo3 {
+ /*
+ * This regulator is connected with CLDO2.
+ * See the comments for CLDO2.
+ */
+ regulator-always-on;
+};
+
+/* There's the BT part of the AP6255 connected to that UART */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm4345c5";
+ clocks = <&rtc 1>;
+ clock-names = "lpo";
+ device-wakeup-gpios = <&r_pio 1 2 GPIO_ACTIVE_HIGH>; /* PM2 */
+ host-wakeup-gpios = <&r_pio 1 1 GPIO_ACTIVE_HIGH>; /* PM1 */
+ shutdown-gpios = <&r_pio 1 4 GPIO_ACTIVE_HIGH>; /* PM4 */
+ max-speed = <1500000>;
+ };
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
index 9287976c4a50..ebc120a9232f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
@@ -106,6 +106,12 @@
status = "okay";
};
+&pio {
+ vcc-pc-supply = <&reg_bldo2>;
+ vcc-pd-supply = <&reg_cldo1>;
+ vcc-pg-supply = <&reg_aldo1>;
+};
+
&r_i2c {
status = "okay";
@@ -230,6 +236,10 @@
status = "okay";
};
+&r_pio {
+ vcc-pm-supply = <&reg_bldo3>;
+};
+
&rtc {
clocks = <&ext_osc32k>;
};
@@ -241,7 +251,12 @@
};
&usb2otg {
- dr_mode = "otg";
+ /*
+ * OrangePi Lite 2 and One Plus, where this DT is used, don't
+ * have a controllable VBUS even though they do have an ID pin.
+ * Using it as anything but a USB host is unsafe.
+ */
+ dr_mode = "host";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index b0642d841933..af85b2074867 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -4,6 +4,7 @@
/dts-v1/;
#include "sun50i-h6.dtsi"
+#include "sun50i-h6-cpu-opp.dtsi"
#include <dt-bindings/gpio/gpio.h>
@@ -80,6 +81,22 @@
};
};
+&cpu0 {
+ cpu-supply = <&reg_dcdca>;
+};
+
+&de {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci3 {
+ status = "okay";
+};
+
&emac {
pinctrl-names = "default";
pinctrl-0 = <&ext_rgmii_pins>;
@@ -91,17 +108,6 @@
status = "okay";
};
-&mdio {
- ext_rgmii_phy: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <1>;
- };
-};
-
-&de {
- status = "okay";
-};
-
&gpu {
mali-supply = <&reg_dcdcc>;
status = "okay";
@@ -117,12 +123,11 @@
};
};
-&ehci0 {
- status = "okay";
-};
-
-&ehci3 {
- status = "okay";
+&mdio {
+ ext_rgmii_phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
};
&mmc0 {
@@ -238,7 +243,8 @@
reg_dcdca: dcdca {
regulator-always-on;
regulator-min-microvolt = <810000>;
- regulator-max-microvolt = <1080000>;
+ regulator-max-microvolt = <1160000>;
+ regulator-ramp-delay = <2500>;
regulator-name = "vdd-cpu";
};
@@ -246,6 +252,7 @@
regulator-enable-ramp-delay = <32000>;
regulator-min-microvolt = <810000>;
regulator-max-microvolt = <1080000>;
+ regulator-ramp-delay = <2500>;
regulator-name = "vdd-gpu";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
index 83e6cb0e59ce..be81330db14f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix-tx6.dts
@@ -4,6 +4,7 @@
/dts-v1/;
#include "sun50i-h6.dtsi"
+#include "sun50i-h6-cpu-opp.dtsi"
#include <dt-bindings/gpio/gpio.h>
@@ -37,6 +38,17 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
+
+ reg_vdd_cpu_gpu: vdd-cpu-gpu {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-cpu-gpu";
+ regulator-min-microvolt = <1135000>;
+ regulator-max-microvolt = <1135000>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&reg_vdd_cpu_gpu>;
};
&de {
@@ -56,6 +68,7 @@
};
&gpu {
+ mali-supply = <&reg_vdd_cpu_gpu>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index b9ab7d8fa8af..78b1361dfbb9 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -25,6 +25,9 @@
device_type = "cpu";
reg = <0>;
enable-method = "psci";
+ clocks = <&ccu CLK_CPUX>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ #cooling-cells = <2>;
};
cpu1: cpu@1 {
@@ -32,6 +35,9 @@
device_type = "cpu";
reg = <1>;
enable-method = "psci";
+ clocks = <&ccu CLK_CPUX>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ #cooling-cells = <2>;
};
cpu2: cpu@2 {
@@ -39,6 +45,9 @@
device_type = "cpu";
reg = <2>;
enable-method = "psci";
+ clocks = <&ccu CLK_CPUX>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ #cooling-cells = <2>;
};
cpu3: cpu@3 {
@@ -46,6 +55,9 @@
device_type = "cpu";
reg = <3>;
enable-method = "psci";
+ clocks = <&ccu CLK_CPUX>;
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ #cooling-cells = <2>;
};
};
@@ -123,6 +135,7 @@
clock-names = "bus",
"mod";
resets = <&display_clocks RST_MIXER0>;
+ iommus = <&iommu 0>;
ports {
#address-cells = <1>;
@@ -231,6 +244,16 @@
#dma-cells = <1>;
};
+ msgbox: mailbox@3003000 {
+ compatible = "allwinner,sun50i-h6-msgbox",
+ "allwinner,sun6i-a31-msgbox";
+ reg = <0x03003000 0x1000>;
+ clocks = <&ccu CLK_BUS_MSGBOX>;
+ resets = <&ccu RST_BUS_MSGBOX>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ };
+
sid: efuse@3006000 {
compatible = "allwinner,sun50i-h6-sid";
reg = <0x03006000 0x400>;
@@ -240,6 +263,10 @@
ths_calibration: thermal-sensor-calibration@14 {
reg = <0x14 0x8>;
};
+
+ cpu_speed_grade: cpu-speed-grade@1c {
+ reg = <0x1c 0x4>;
+ };
};
watchdog: watchdog@30090a0 {
@@ -387,6 +414,15 @@
#interrupt-cells = <3>;
};
+ iommu: iommu@30f0000 {
+ compatible = "allwinner,sun50i-h6-iommu";
+ reg = <0x030f0000 0x10000>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_IOMMU>;
+ resets = <&ccu RST_BUS_IOMMU>;
+ #iommu-cells = <1>;
+ };
+
mmc0: mmc@4020000 {
compatible = "allwinner,sun50i-h6-mmc",
"allwinner,sun50i-a64-mmc";
@@ -946,6 +982,30 @@
polling-delay-passive = <0>;
polling-delay = <0>;
thermal-sensors = <&ths 0>;
+
+ trips {
+ cpu_alert: cpu-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-crit {
+ temperature = <100000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
gpu-thermal {
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index eef0045320f2..5cac4d1d487d 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -3,6 +3,8 @@ dtb-$(CONFIG_ARCH_MESON) += meson-axg-s400.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12a-sei510.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12a-u200.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12a-x96-max.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gtking.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-g12b-gtking-pro.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-a311d-khadas-vim3.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-s922x-khadas-vim3.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12b-odroid-n2.dtb
@@ -27,6 +29,7 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-p212.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p230.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p231.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-phicomm-n1.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-sml5442tw.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s805x-p241.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905w-p281.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905w-tx3-mini.dtb
@@ -40,4 +43,5 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxm-s912-libretech-pc.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-vega-s96.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-sei610.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-sm1-khadas-vim3l.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-sm1-odroid-c4.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-a1-ad401.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index aace3d32a3df..8e6281c685fa 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -1735,18 +1735,18 @@
};
sram: sram@fffc0000 {
- compatible = "amlogic,meson-axg-sram", "mmio-sram";
+ compatible = "mmio-sram";
reg = <0x0 0xfffc0000 0x0 0x20000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x0 0xfffc0000 0x20000>;
- cpu_scp_lpri: scp-shmem@13000 {
+ cpu_scp_lpri: scp-sram@13000 {
compatible = "amlogic,meson-axg-scp-shmem";
reg = <0x13000 0x400>;
};
- cpu_scp_hpri: scp-shmem@13400 {
+ cpu_scp_hpri: scp-sram@13400 {
compatible = "amlogic,meson-axg-scp-shmem";
reg = <0x13400 0x400>;
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index c0aef7d69117..593a006f4b7b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -250,6 +250,17 @@
};
};
+ acodec: audio-controller@32000 {
+ compatible = "amlogic,t9015";
+ reg = <0x0 0x32000 0x0 0x14>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "ACODEC";
+ clocks = <&clkc CLKID_AUDIO_CODEC>;
+ clock-names = "pclk";
+ resets = <&reset RESET_AUDIO_CODEC>;
+ status = "disabled";
+ };
+
periphs: bus@34400 {
compatible = "simple-bus";
reg = <0x0 0x34400 0x0 0x400>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
index 55d39020ec72..6a1f4dcf6488 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12.dtsi
@@ -343,6 +343,15 @@
status = "disabled";
};
+ toacodec: audio-controller@740 {
+ compatible = "amlogic,g12a-toacodec";
+ reg = <0x0 0x740 0x0 0x4>;
+ #sound-dai-cells = <1>;
+ sound-name-prefix = "TOACODEC";
+ resets = <&clkc_audio AUD_RESET_TOACODEC>;
+ status = "disabled";
+ };
+
tohdmitx: audio-controller@744 {
compatible = "amlogic,g12a-tohdmitx";
reg = <0x0 0x744 0x0 0x4>;
@@ -354,29 +363,6 @@
};
};
-&cpu_thermal {
- cooling-maps {
- map0 {
- trip = <&cpu_passive>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu100 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu101 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu102 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu103 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
- map1 {
- trip = <&cpu_hot>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu100 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu101 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu102 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu103 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
- };
-};
-
&ethmac {
power-domains = <&pwrc PWRC_G12A_ETH_ID>;
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts
new file mode 100644
index 000000000000..f0c56a16af3d
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (c) 2019 Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-g12b-w400.dtsi"
+#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+
+/ {
+ compatible = "azw,gtking", "amlogic,g12b";
+ model = "Beelink GT-King Pro";
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ power-button {
+ label = "power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ white {
+ label = "power:white";
+ gpios = <&gpio_ao GPIOAO_11 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ sound {
+ compatible = "amlogic,axg-sound-card";
+ model = "G12B-GTKING-PRO";
+ audio-aux-devs = <&tdmout_b>;
+ audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
+ "TDMOUT_B IN 1", "FRDDR_B OUT 1",
+ "TDMOUT_B IN 2", "FRDDR_C OUT 1",
+ "TDM_B Playback", "TDMOUT_B OUT";
+
+ assigned-clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+ status = "okay";
+
+ dai-link-0 {
+ sound-dai = <&frddr_a>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&frddr_b>;
+ };
+
+ dai-link-2 {
+ sound-dai = <&frddr_c>;
+ };
+
+ /* 8ch hdmi interface */
+ dai-link-3 {
+ sound-dai = <&tdmif_b>;
+ dai-format = "i2s";
+ dai-tdm-slot-tx-mask-0 = <1 1>;
+ dai-tdm-slot-tx-mask-1 = <1 1>;
+ dai-tdm-slot-tx-mask-2 = <1 1>;
+ dai-tdm-slot-tx-mask-3 = <1 1>;
+ mclk-fs = <256>;
+
+ codec {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
+ };
+ };
+
+ dai-link-4 {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
+
+ codec {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+ };
+};
+
+&arb {
+ status = "okay";
+};
+
+&clkc_audio {
+ status = "okay";
+};
+
+&frddr_a {
+ status = "okay";
+};
+
+&frddr_b {
+ status = "okay";
+};
+
+&frddr_c {
+ status = "okay";
+};
+
+&tdmif_b {
+ status = "okay";
+};
+
+&tdmout_b {
+ status = "okay";
+};
+
+&tohdmitx {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts
new file mode 100644
index 000000000000..eeb7bc5539ef
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (c) 2019 Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-g12b-w400.dtsi"
+#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+
+/ {
+ compatible = "azw,gtking", "amlogic,g12b";
+ model = "Beelink GT-King";
+
+ spdif_dit: audio-codec-1 {
+ #sound-dai-cells = <0>;
+ compatible = "linux,spdif-dit";
+ status = "okay";
+ sound-name-prefix = "DIT";
+ };
+
+ sound {
+ compatible = "amlogic,axg-sound-card";
+ model = "G12B-GTKING";
+ audio-aux-devs = <&tdmout_b>;
+ audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
+ "TDMOUT_B IN 1", "FRDDR_B OUT 1",
+ "TDMOUT_B IN 2", "FRDDR_C OUT 1",
+ "TDM_B Playback", "TDMOUT_B OUT",
+ "SPDIFOUT IN 0", "FRDDR_A OUT 3",
+ "SPDIFOUT IN 1", "FRDDR_B OUT 3",
+ "SPDIFOUT IN 2", "FRDDR_C OUT 3";
+
+ assigned-clocks = <&clkc CLKID_MPLL2>,
+ <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+ status = "okay";
+
+ dai-link-0 {
+ sound-dai = <&frddr_a>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&frddr_b>;
+ };
+
+ dai-link-2 {
+ sound-dai = <&frddr_c>;
+ };
+
+ /* 8ch hdmi interface */
+ dai-link-3 {
+ sound-dai = <&tdmif_b>;
+ dai-format = "i2s";
+ dai-tdm-slot-tx-mask-0 = <1 1>;
+ dai-tdm-slot-tx-mask-1 = <1 1>;
+ dai-tdm-slot-tx-mask-2 = <1 1>;
+ dai-tdm-slot-tx-mask-3 = <1 1>;
+ mclk-fs = <256>;
+
+ codec {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
+ };
+ };
+
+ /* spdif hdmi or toslink interface */
+ dai-link-4 {
+ sound-dai = <&spdifout>;
+
+ codec-0 {
+ sound-dai = <&spdif_dit>;
+ };
+
+ codec-1 {
+ sound-dai = <&tohdmitx TOHDMITX_SPDIF_IN_A>;
+ };
+ };
+
+ /* spdif hdmi interface */
+ dai-link-5 {
+ sound-dai = <&spdifout_b>;
+
+ codec {
+ sound-dai = <&tohdmitx TOHDMITX_SPDIF_IN_B>;
+ };
+ };
+
+ /* hdmi glue */
+ dai-link-6 {
+ sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
+
+ codec {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+ };
+};
+
+&arb {
+ status = "okay";
+};
+
+&clkc_audio {
+ status = "okay";
+};
+
+&frddr_a {
+ status = "okay";
+};
+
+&frddr_b {
+ status = "okay";
+};
+
+&frddr_c {
+ status = "okay";
+};
+
+&spdifout {
+ pinctrl-0 = <&spdif_out_h_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&spdifout_b {
+ status = "okay";
+};
+
+&tdmif_b {
+ status = "okay";
+};
+
+&tdmout_b {
+ status = "okay";
+};
+
+&tohdmitx {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
index c6c8caed8327..224c890d32d3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
@@ -51,11 +51,11 @@
sound {
compatible = "amlogic,axg-sound-card";
model = "G12B-KHADAS-VIM3";
- audio-aux-devs = <&tdmout_b>;
- audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
- "TDMOUT_B IN 1", "FRDDR_B OUT 1",
- "TDMOUT_B IN 2", "FRDDR_C OUT 1",
- "TDM_B Playback", "TDMOUT_B OUT";
+ audio-aux-devs = <&tdmout_a>;
+ audio-routing = "TDMOUT_A IN 0", "FRDDR_A OUT 0",
+ "TDMOUT_A IN 1", "FRDDR_B OUT 0",
+ "TDMOUT_A IN 2", "FRDDR_C OUT 0",
+ "TDM_A Playback", "TDMOUT_A OUT";
assigned-clocks = <&clkc CLKID_MPLL2>,
<&clkc CLKID_MPLL0>,
@@ -80,7 +80,7 @@
/* 8ch hdmi interface */
dai-link-3 {
- sound-dai = <&tdmif_b>;
+ sound-dai = <&tdmif_a>;
dai-format = "i2s";
dai-tdm-slot-tx-mask-0 = <1 1>;
dai-tdm-slot-tx-mask-1 = <1 1>;
@@ -89,7 +89,7 @@
mclk-fs = <256>;
codec {
- sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
+ sound-dai = <&tohdmitx TOHDMITX_I2S_IN_A>;
};
};
@@ -182,11 +182,11 @@
status = "okay";
};
-&tdmif_b {
+&tdmif_a {
status = "okay";
};
-&tdmout_b {
+&tdmout_a {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
index 046cc332d07f..1e5d0ee5d541 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
@@ -65,6 +65,11 @@
opp-hz = /bits/ 64 <1896000000>;
opp-microvolt = <981000>;
};
+
+ opp-1992000000 {
+ opp-hz = /bits/ 64 <1992000000>;
+ opp-microvolt = <1001000>;
+ };
};
cpub_opp_table_1: opp-table-1 {
@@ -120,5 +125,15 @@
opp-hz = /bits/ 64 <1704000000>;
opp-microvolt = <891000>;
};
+
+ opp-1800000000 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-microvolt = <981000>;
+ };
+
+ opp-1908000000 {
+ opp-hz = /bits/ 64 <1908000000>;
+ opp-microvolt = <1022000>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
index 06c5430eb92d..b57bb0befc69 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
@@ -7,42 +7,13 @@
/dts-v1/;
-#include "meson-g12b.dtsi"
-#include "meson-g12b-s922x.dtsi"
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/gpio/meson-g12a-gpio.h>
+#include "meson-g12b-w400.dtsi"
#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
/ {
- compatible = "ugoos,am6", "amlogic,g12b";
+ compatible = "ugoos,am6", "amlogic,s922x", "amlogic,g12b";
model = "Ugoos AM6";
- aliases {
- serial0 = &uart_AO;
- ethernet0 = &ethmac;
- };
-
- chosen {
- stdout-path = "serial0:115200n8";
- };
-
- memory@0 {
- device_type = "memory";
- reg = <0x0 0x0 0x0 0x40000000>;
- };
-
- emmc_pwrseq: emmc-pwrseq {
- compatible = "mmc-pwrseq-emmc";
- reset-gpios = <&gpio BOOT_12 GPIO_ACTIVE_LOW>;
- };
-
- sdio_pwrseq: sdio-pwrseq {
- compatible = "mmc-pwrseq-simple";
- reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
- clocks = <&wifi32k>;
- clock-names = "ext_clock";
- };
-
spdif_dit: audio-codec-1 {
#sound-dai-cells = <0>;
compatible = "linux,spdif-dit";
@@ -50,154 +21,6 @@
sound-name-prefix = "DIT";
};
- flash_1v8: regulator-flash_1v8 {
- compatible = "regulator-fixed";
- regulator-name = "FLASH_1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- vin-supply = <&vcc_3v3>;
- regulator-always-on;
- };
-
- main_12v: regulator-main_12v {
- compatible = "regulator-fixed";
- regulator-name = "12V";
- regulator-min-microvolt = <12000000>;
- regulator-max-microvolt = <12000000>;
- regulator-always-on;
- };
-
- vcc_5v: regulator-vcc_5v {
- compatible = "regulator-fixed";
- regulator-name = "VCC_5V";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- vin-supply = <&main_12v>;
-
- gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
- enable-active-high;
- };
-
- vcc_1v8: regulator-vcc_1v8 {
- compatible = "regulator-fixed";
- regulator-name = "VCC_1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- vin-supply = <&vcc_3v3>;
- regulator-always-on;
- };
-
- vcc_3v3: regulator-vcc_3v3 {
- compatible = "regulator-fixed";
- regulator-name = "VCC_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- vin-supply = <&vddao_3v3>;
- regulator-always-on;
- /* FIXME: actually controlled by VDDCPU_B_EN */
- };
-
- vddcpu_a: regulator-vddcpu-a {
- /*
- * MP1653 Regulator.
- */
- compatible = "pwm-regulator";
-
- regulator-name = "VDDCPU_A";
- regulator-min-microvolt = <721000>;
- regulator-max-microvolt = <1022000>;
-
- vin-supply = <&main_12v>;
-
- pwms = <&pwm_ab 0 1250 0>;
- pwm-dutycycle-range = <100 0>;
-
- regulator-boot-on;
- regulator-always-on;
- };
-
- vddcpu_b: regulator-vddcpu-b {
- /*
- * MP1652 Regulator.
- */
- compatible = "pwm-regulator";
-
- regulator-name = "VDDCPU_B";
- regulator-min-microvolt = <721000>;
- regulator-max-microvolt = <1022000>;
-
- vin-supply = <&main_12v>;
-
- pwms = <&pwm_AO_cd 1 1250 0>;
- pwm-dutycycle-range = <100 0>;
-
- regulator-boot-on;
- regulator-always-on;
- };
-
- usb1_pow: regulator-usb1-pow {
- compatible = "regulator-fixed";
- regulator-name = "USB1_POW";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- vin-supply = <&vcc_5v>;
-
- /* connected to SY6280A Power Switch */
- gpio = <&gpio GPIOA_8 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- usb_pwr_en: regulator-usb-pwr-en {
- compatible = "regulator-fixed";
- regulator-name = "USB_PWR_EN";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- vin-supply = <&vcc_5v>;
-
- /* Connected to USB3 Type-A Port power enable */
- gpio = <&gpio GPIOAO_7 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- vddao_1v8: regulator-vddao-1v8 {
- compatible = "regulator-fixed";
- regulator-name = "VDDAO_1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- vin-supply = <&vddao_3v3>;
- regulator-always-on;
- };
-
- vddao_3v3: regulator-vddao-3v3 {
- compatible = "regulator-fixed";
- regulator-name = "VDDAO_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- vin-supply = <&main_12v>;
- regulator-always-on;
- };
-
- cvbs-connector {
- compatible = "composite-video-connector";
-
- port {
- cvbs_connector_in: endpoint {
- remote-endpoint = <&cvbs_vdac_out>;
- };
- };
- };
-
- hdmi-connector {
- compatible = "hdmi-connector";
- type = "a";
-
- port {
- hdmi_connector_in: endpoint {
- remote-endpoint = <&hdmi_tx_tmds_out>;
- };
- };
- };
-
sound {
compatible = "amlogic,axg-sound-card";
model = "G12B-UGOOS-AM6";
@@ -277,110 +100,16 @@
};
};
};
-
- wifi32k: wifi32k {
- compatible = "pwm-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
- };
};
&arb {
status = "okay";
};
-&cec_AO {
- pinctrl-0 = <&cec_ao_a_h_pins>;
- pinctrl-names = "default";
- status = "disabled";
- hdmi-phandle = <&hdmi_tx>;
-};
-
-&cecb_AO {
- pinctrl-0 = <&cec_ao_b_h_pins>;
- pinctrl-names = "default";
- status = "okay";
- hdmi-phandle = <&hdmi_tx>;
-};
-
&clkc_audio {
status = "okay";
};
-&cpu0 {
- cpu-supply = <&vddcpu_b>;
- operating-points-v2 = <&cpu_opp_table_0>;
- clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
-};
-
-&cpu1 {
- cpu-supply = <&vddcpu_b>;
- operating-points-v2 = <&cpu_opp_table_0>;
- clocks = <&clkc CLKID_CPU_CLK>;
- clock-latency = <50000>;
-};
-
-&cpu100 {
- cpu-supply = <&vddcpu_a>;
- operating-points-v2 = <&cpub_opp_table_1>;
- clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
-};
-
-&cpu101 {
- cpu-supply = <&vddcpu_a>;
- operating-points-v2 = <&cpub_opp_table_1>;
- clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
-};
-
-&cpu102 {
- cpu-supply = <&vddcpu_a>;
- operating-points-v2 = <&cpub_opp_table_1>;
- clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
-};
-
-&cpu103 {
- cpu-supply = <&vddcpu_a>;
- operating-points-v2 = <&cpub_opp_table_1>;
- clocks = <&clkc CLKID_CPUB_CLK>;
- clock-latency = <50000>;
-};
-
-&cvbs_vdac_port {
- cvbs_vdac_out: endpoint {
- remote-endpoint = <&cvbs_connector_in>;
- };
-};
-
-&ext_mdio {
- external_phy: ethernet-phy@0 {
- /* Realtek RTL8211F (0x001cc916) */
- reg = <0>;
- max-speed = <1000>;
-
- reset-assert-us = <10000>;
- reset-deassert-us = <30000>;
- reset-gpios = <&gpio GPIOZ_15 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
-
- interrupt-parent = <&gpio_intc>;
- /* MAC_INTR on GPIOZ_14 */
- interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
- };
-};
-
-&ethmac {
- pinctrl-0 = <&eth_pins>, <&eth_rgmii_pins>;
- pinctrl-names = "default";
- status = "okay";
- phy-mode = "rgmii";
- phy-handle = <&external_phy>;
- amlogic,tx-delay-ns = <2>;
-};
-
&frddr_a {
status = "okay";
};
@@ -393,112 +122,10 @@
status = "okay";
};
-&hdmi_tx {
- status = "okay";
- pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
- pinctrl-names = "default";
- hdmi-supply = <&vcc_5v>;
-};
-
-&hdmi_tx_tmds_port {
- hdmi_tx_tmds_out: endpoint {
- remote-endpoint = <&hdmi_connector_in>;
- };
-};
-
&ir {
- status = "okay";
- pinctrl-0 = <&remote_input_ao_pins>;
- pinctrl-names = "default";
linux,rc-map-name = "rc-khadas";
};
-&pwm_ab {
- pinctrl-0 = <&pwm_a_e_pins>;
- pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
- status = "okay";
-};
-
-&pwm_AO_cd {
- pinctrl-0 = <&pwm_ao_d_e_pins>;
- pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
- status = "okay";
-};
-
-&pwm_ef {
- pinctrl-0 = <&pwm_e_pins>;
- pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
- status = "okay";
-};
-
-/* SDIO */
-&sd_emmc_a {
- status = "okay";
- pinctrl-0 = <&sdio_pins>;
- pinctrl-1 = <&sdio_clk_gate_pins>;
- pinctrl-names = "default", "clk-gate";
- #address-cells = <1>;
- #size-cells = <0>;
-
- bus-width = <4>;
- cap-sd-highspeed;
- sd-uhs-sdr50;
- max-frequency = <100000000>;
-
- non-removable;
- disable-wp;
-
- mmc-pwrseq = <&sdio_pwrseq>;
-
- vmmc-supply = <&vddao_3v3>;
- vqmmc-supply = <&vddao_1v8>;
-
- brcmf: wifi@1 {
- reg = <1>;
- compatible = "brcm,bcm4329-fmac";
- };
-};
-
-/* SD card */
-&sd_emmc_b {
- status = "okay";
- pinctrl-0 = <&sdcard_c_pins>;
- pinctrl-1 = <&sdcard_clk_gate_c_pins>;
- pinctrl-names = "default", "clk-gate";
-
- bus-width = <4>;
- cap-sd-highspeed;
- max-frequency = <50000000>;
- disable-wp;
-
- cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
- vmmc-supply = <&vddao_3v3>;
- vqmmc-supply = <&vddao_3v3>;
-};
-
-/* eMMC */
-&sd_emmc_c {
- status = "okay";
- pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
- pinctrl-1 = <&emmc_clk_gate_pins>;
- pinctrl-names = "default", "clk-gate";
-
- bus-width = <8>;
- cap-mmc-highspeed;
- max-frequency = <100000000>;
- disable-wp;
-
- mmc-pwrseq = <&emmc_pwrseq>;
- vmmc-supply = <&vcc_3v3>;
- vqmmc-supply = <&flash_1v8>;
-};
-
&spdifout {
pinctrl-0 = <&spdif_out_h_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi
new file mode 100644
index 000000000000..98b70d216a6f
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (c) 2019 Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-g12b.dtsi"
+#include "meson-g12b-s922x.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+
+/ {
+ aliases {
+ serial0 = &uart_AO;
+ ethernet0 = &ethmac;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio BOOT_12 GPIO_ACTIVE_LOW>;
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+ clocks = <&wifi32k>;
+ clock-names = "ext_clock";
+ };
+
+ flash_1v8: regulator-flash_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "FLASH_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-always-on;
+ };
+
+ main_12v: regulator-main_12v {
+ compatible = "regulator-fixed";
+ regulator-name = "12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ };
+
+ vcc_5v: regulator-vcc_5v {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&main_12v>;
+
+ gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
+ enable-active-high;
+ };
+
+ vcc_1v8: regulator-vcc_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-always-on;
+ };
+
+ vcc_3v3: regulator-vcc_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ /* FIXME: actually controlled by VDDCPU_B_EN */
+ };
+
+ vddcpu_a: regulator-vddcpu-a {
+ /*
+ * MP1653 Regulator.
+ */
+ compatible = "pwm-regulator";
+
+ regulator-name = "VDDCPU_A";
+ regulator-min-microvolt = <721000>;
+ regulator-max-microvolt = <1022000>;
+
+ vin-supply = <&main_12v>;
+
+ pwms = <&pwm_ab 0 1250 0>;
+ pwm-dutycycle-range = <100 0>;
+
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vddcpu_b: regulator-vddcpu-b {
+ /*
+ * MP1652 Regulator.
+ */
+ compatible = "pwm-regulator";
+
+ regulator-name = "VDDCPU_B";
+ regulator-min-microvolt = <721000>;
+ regulator-max-microvolt = <1022000>;
+
+ vin-supply = <&main_12v>;
+
+ pwms = <&pwm_AO_cd 1 1250 0>;
+ pwm-dutycycle-range = <100 0>;
+
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ usb1_pow: regulator-usb1-pow {
+ compatible = "regulator-fixed";
+ regulator-name = "USB1_POW";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_5v>;
+
+ /* connected to SY6280A Power Switch */
+ gpio = <&gpio GPIOA_8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_pwr_en: regulator-usb-pwr-en {
+ compatible = "regulator-fixed";
+ regulator-name = "USB_PWR_EN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_5v>;
+
+ /* Connected to USB3 Type-A Port power enable */
+ gpio = <&gpio GPIOAO_7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vddao_1v8: regulator-vddao-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ };
+
+ vddao_3v3: regulator-vddao-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&main_12v>;
+ regulator-always-on;
+ };
+
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
+
+ wifi32k: wifi32k {
+ compatible = "pwm-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+ };
+};
+
+&cec_AO {
+ pinctrl-0 = <&cec_ao_a_h_pins>;
+ pinctrl-names = "default";
+ status = "disabled";
+ hdmi-phandle = <&hdmi_tx>;
+};
+
+&cecb_AO {
+ pinctrl-0 = <&cec_ao_b_h_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ hdmi-phandle = <&hdmi_tx>;
+};
+
+&cpu0 {
+ cpu-supply = <&vddcpu_b>;
+ operating-points-v2 = <&cpu_opp_table_0>;
+ clocks = <&clkc CLKID_CPU_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu1 {
+ cpu-supply = <&vddcpu_b>;
+ operating-points-v2 = <&cpu_opp_table_0>;
+ clocks = <&clkc CLKID_CPU_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu100 {
+ cpu-supply = <&vddcpu_a>;
+ operating-points-v2 = <&cpub_opp_table_1>;
+ clocks = <&clkc CLKID_CPUB_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu101 {
+ cpu-supply = <&vddcpu_a>;
+ operating-points-v2 = <&cpub_opp_table_1>;
+ clocks = <&clkc CLKID_CPUB_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu102 {
+ cpu-supply = <&vddcpu_a>;
+ operating-points-v2 = <&cpub_opp_table_1>;
+ clocks = <&clkc CLKID_CPUB_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu103 {
+ cpu-supply = <&vddcpu_a>;
+ operating-points-v2 = <&cpub_opp_table_1>;
+ clocks = <&clkc CLKID_CPUB_CLK>;
+ clock-latency = <50000>;
+};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
+
+&ext_mdio {
+ external_phy: ethernet-phy@0 {
+ /* Realtek RTL8211F (0x001cc916) */
+ reg = <0>;
+ max-speed = <1000>;
+
+ reset-assert-us = <10000>;
+ reset-deassert-us = <30000>;
+ reset-gpios = <&gpio GPIOZ_15 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
+
+ interrupt-parent = <&gpio_intc>;
+ /* MAC_INTR on GPIOZ_14 */
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&ethmac {
+ pinctrl-0 = <&eth_pins>, <&eth_rgmii_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ phy-mode = "rgmii";
+ phy-handle = <&external_phy>;
+ amlogic,tx-delay-ns = <2>;
+};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
+ pinctrl-names = "default";
+ hdmi-supply = <&vcc_5v>;
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
+
+&ir {
+ status = "okay";
+ pinctrl-0 = <&remote_input_ao_pins>;
+ pinctrl-names = "default";
+};
+
+&pwm_ab {
+ pinctrl-0 = <&pwm_a_e_pins>;
+ pinctrl-names = "default";
+ clocks = <&xtal>;
+ clock-names = "clkin0";
+ status = "okay";
+};
+
+&pwm_AO_cd {
+ pinctrl-0 = <&pwm_ao_d_e_pins>;
+ pinctrl-names = "default";
+ clocks = <&xtal>;
+ clock-names = "clkin1";
+ status = "okay";
+};
+
+&pwm_ef {
+ pinctrl-0 = <&pwm_e_pins>;
+ pinctrl-names = "default";
+ clocks = <&xtal>;
+ clock-names = "clkin0";
+ status = "okay";
+};
+
+/* SDIO */
+&sd_emmc_a {
+ status = "okay";
+ pinctrl-0 = <&sdio_pins>;
+ pinctrl-1 = <&sdio_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ sd-uhs-sdr50;
+ max-frequency = <100000000>;
+
+ non-removable;
+ disable-wp;
+
+ mmc-pwrseq = <&sdio_pwrseq>;
+
+ vmmc-supply = <&vddao_3v3>;
+ vqmmc-supply = <&vddao_1v8>;
+
+ brcmf: wifi@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ };
+};
+
+/* SD card */
+&sd_emmc_b {
+ status = "okay";
+ pinctrl-0 = <&sdcard_c_pins>;
+ pinctrl-1 = <&sdcard_clk_gate_c_pins>;
+ pinctrl-names = "default", "clk-gate";
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <50000000>;
+ disable-wp;
+
+ cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&vddao_3v3>;
+ vqmmc-supply = <&vddao_3v3>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+ status = "okay";
+ pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
+
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ max-frequency = <100000000>;
+ disable-wp;
+
+ mmc-pwrseq = <&emmc_pwrseq>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&flash_1v8>;
+};
+
+&uart_A {
+ status = "okay";
+ pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+ max-speed = <2000000>;
+ clocks = <&wifi32k>;
+ clock-names = "lpo";
+ };
+};
+
+&uart_AO {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
+
+&usb {
+ status = "okay";
+ dr_mode = "host";
+ vbus-supply = <&usb_pwr_en>;
+};
+
+&usb2_phy0 {
+ phy-supply = <&usb1_pow>;
+};
+
+&usb2_phy1 {
+ phy-supply = <&usb1_pow>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
index 6dbc3968045b..9b8548e5f6e5 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b.dtsi
@@ -113,3 +113,25 @@
compatible = "amlogic,g12b-clkc";
};
+&cpu_thermal {
+ cooling-maps {
+ map0 {
+ trip = <&cpu_passive>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu100 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu101 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu102 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu103 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu_hot>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu100 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu101 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu102 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu103 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
index 248b018c83d5..c2480bab8d33 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
@@ -8,6 +8,7 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/leds/common.h>
+#include <dt-bindings/sound/meson-aiu.h>
/ {
adc-keys {
@@ -29,6 +30,13 @@
spi0 = &spifc;
};
+ dio2133: analog-amplifier {
+ compatible = "simple-audio-amplifier";
+ sound-name-prefix = "AU2";
+ VCC-supply = <&vcc5v>;
+ enable-gpios = <&gpio GPIOH_5 GPIO_ACTIVE_HIGH>;
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
@@ -96,14 +104,14 @@
leds {
compatible = "gpio-leds";
- green {
+ led-green {
color = <LED_COLOR_ID_GREEN>;
function = LED_FUNCTION_DISK_ACTIVITY;
gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "disk-activity";
};
- blue {
+ led-blue {
color = <LED_COLOR_ID_BLUE>;
function = LED_FUNCTION_STATUS;
gpios = <&gpio GPIODV_28 GPIO_ACTIVE_HIGH>;
@@ -175,6 +183,69 @@
regulator-settling-time-up-us = <200>;
regulator-settling-time-down-us = <50000>;
};
+
+ sound {
+ compatible = "amlogic,gx-sound-card";
+ model = "GXL-LIBRETECH-S9XX-PC";
+ audio-aux-devs = <&dio2133>;
+ audio-widgets = "Speaker", "7J4-14 LEFT",
+ "Speaker", "7J4-11 RIGHT";
+ audio-routing = "AU2 INL", "ACODEC LOLN",
+ "AU2 INR", "ACODEC LORN",
+ "7J4-14 LEFT", "AU2 OUTL",
+ "7J4-11 RIGHT", "AU2 OUTR";
+ assigned-clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+ status = "okay";
+
+ dai-link-0 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_FIFO>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_ENCODER>;
+ dai-format = "i2s";
+ mclk-fs = <256>;
+
+ codec-0 {
+ sound-dai = <&aiu AIU_HDMI CTRL_I2S>;
+ };
+
+ codec-1 {
+ sound-dai = <&aiu AIU_ACODEC CTRL_I2S>;
+ };
+ };
+
+ dai-link-2 {
+ sound-dai = <&aiu AIU_HDMI CTRL_OUT>;
+
+ codec-0 {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+
+ dai-link-3 {
+ sound-dai = <&aiu AIU_ACODEC CTRL_OUT>;
+
+ codec-0 {
+ sound-dai = <&acodec>;
+ };
+ };
+ };
+};
+
+&acodec {
+ AVDD-supply = <&vddio_ao18>;
+ status = "okay";
+};
+
+&aiu {
+ status = "okay";
};
&cec_AO {
@@ -360,8 +431,9 @@
status = "okay";
};
-&usb0 {
+&usb {
status = "okay";
+ dr_mode = "host";
};
&usb2_phy0 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index 12d5e333e5f2..6b57e15aade3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -8,12 +8,28 @@
* the pin-compatible S912 (GXM) or S905D (GXL) SoCs.
*/
+#include <dt-bindings/sound/meson-aiu.h>
+
/ {
aliases {
serial0 = &uart_AO;
ethernet0 = &ethmac;
};
+ dio2133: analog-amplifier {
+ compatible = "simple-audio-amplifier";
+ sound-name-prefix = "AU2";
+ VCC-supply = <&hdmi_5v>;
+ enable-gpios = <&gpio GPIOH_5 GPIO_ACTIVE_HIGH>;
+ };
+
+ spdif_dit: audio-codec-0 {
+ #sound-dai-cells = <0>;
+ compatible = "linux,spdif-dit";
+ status = "okay";
+ sound-name-prefix = "DIT";
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
@@ -102,6 +118,85 @@
};
};
};
+
+ sound {
+ compatible = "amlogic,gx-sound-card";
+ model = "GX-P230-Q200";
+ audio-aux-devs = <&dio2133>;
+ audio-widgets = "Line", "Lineout";
+ audio-routing = "AU2 INL", "ACODEC LOLP",
+ "AU2 INR", "ACODEC LORP",
+ "AU2 INL", "ACODEC LOLN",
+ "AU2 INR", "ACODEC LORN",
+ "Lineout", "AU2 OUTL",
+ "Lineout", "AU2 OUTR";
+ assigned-clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+ status = "okay";
+
+ dai-link-0 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_FIFO>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&aiu AIU_CPU CPU_SPDIF_FIFO>;
+ };
+
+ dai-link-2 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_ENCODER>;
+ dai-format = "i2s";
+ mclk-fs = <256>;
+
+ codec-0 {
+ sound-dai = <&aiu AIU_HDMI CTRL_I2S>;
+ };
+
+ codec-1 {
+ sound-dai = <&aiu AIU_ACODEC CTRL_I2S>;
+ };
+ };
+
+ dai-link-3 {
+ sound-dai = <&aiu AIU_CPU CPU_SPDIF_ENCODER>;
+
+ codec-0 {
+ sound-dai = <&spdif_dit>;
+ };
+ };
+
+ dai-link-4 {
+ sound-dai = <&aiu AIU_HDMI CTRL_OUT>;
+
+ codec-0 {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+
+ dai-link-5 {
+ sound-dai = <&aiu AIU_ACODEC CTRL_OUT>;
+
+ codec-0 {
+ sound-dai = <&acodec>;
+ };
+ };
+ };
+};
+
+&acodec {
+ AVDD-supply = <&vddio_ao18>;
+ status = "okay";
+};
+
+&aiu {
+ status = "okay";
+ pinctrl-0 = <&spdif_out_h_pins>;
+ pinctrl-names = "default";
+
};
&cec_AO {
@@ -223,6 +318,7 @@
pinctrl-names = "default";
};
-&usb0 {
+&usb {
status = "okay";
+ dr_mode = "otg";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 03f79fe045b7..ba63c36b22e0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -278,6 +278,17 @@
#reset-cells = <1>;
};
+ aiu: audio-controller@5400 {
+ compatible = "amlogic,aiu";
+ #sound-dai-cells = <2>;
+ sound-name-prefix = "AIU";
+ reg = <0x0 0x5400 0x0 0x2ac>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 50 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "i2s", "spdif";
+ status = "disabled";
+ };
+
uart_A: serial@84c0 {
compatible = "amlogic,meson-gx-uart";
reg = <0x0 0x84c0 0x0 0x18>;
@@ -398,20 +409,20 @@
};
sram: sram@c8000000 {
- compatible = "amlogic,meson-gx-sram", "amlogic,meson-gxbb-sram", "mmio-sram";
+ compatible = "mmio-sram";
reg = <0x0 0xc8000000 0x0 0x14000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x0 0xc8000000 0x14000>;
- cpu_scp_lpri: scp-shmem@0 {
- compatible = "amlogic,meson-gx-scp-shmem", "amlogic,meson-gxbb-scp-shmem";
+ cpu_scp_lpri: scp-sram@0 {
+ compatible = "amlogic,meson-gxbb-scp-shmem";
reg = <0x13000 0x400>;
};
- cpu_scp_hpri: scp-shmem@200 {
- compatible = "amlogic,meson-gx-scp-shmem", "amlogic,meson-gxbb-scp-shmem";
+ cpu_scp_hpri: scp-sram@200 {
+ compatible = "amlogic,meson-gxbb-scp-shmem";
reg = <0x13400 0x400>;
};
};
@@ -626,6 +637,8 @@
interrupts = <GIC_SPI 57 IRQ_TYPE_EDGE_RISING>;
#address-cells = <1>;
#size-cells = <0>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "HDMITX";
status = "disabled";
/* VPU VENC Input */
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
index 6c9cc45fb417..e8394a8269ee 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
@@ -11,7 +11,7 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/leds/common.h>
/ {
- compatible = "videostrong,kii-pro", "amlogic,p201", "amlogic,s905", "amlogic,meson-gxbb";
+ compatible = "videostrong,kii-pro", "amlogic,meson-gxbb";
model = "Videostrong KII Pro";
leds {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
index d6ca684e0e61..7be3e354093b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
@@ -29,7 +29,7 @@
leds {
compatible = "gpio-leds";
- stat {
+ led-stat {
label = "nanopi-k2:blue:stat";
gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index 65ec7dea828c..67d901ed2fa3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -31,7 +31,7 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-blue {
label = "a95x:system-status";
gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_LOW>;
linux,default-trigger = "heartbeat";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index b46ef985bb44..70fcfb7b0683 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -49,7 +49,7 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-blue {
label = "c2:blue:alive";
gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_LOW>;
linux,default-trigger = "heartbeat";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 45cb83625951..222ee8069cfa 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -20,7 +20,7 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-blue {
label = "vega-s95:blue:on";
gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
index 1d32d1f6d032..2ab8a3d10079 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek-play2.dts
@@ -14,13 +14,13 @@
model = "WeTek Play 2";
leds {
- wifi {
+ led-wifi {
label = "wetek-play:wifi-status";
gpios = <&gpio GPIODV_26 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- ethernet {
+ led-ethernet {
label = "wetek-play:ethernet-status";
gpios = <&gpio GPIODV_27 GPIO_ACTIVE_HIGH>;
default-state = "off";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
index dee51cf95223..ad812854a107 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
@@ -25,7 +25,7 @@
leds {
compatible = "gpio-leds";
- system {
+ led-system {
label = "wetek-play:system-status";
gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_HIGH>;
default-state = "on";
@@ -149,6 +149,10 @@
reset-assert-us = <10000>;
reset-deassert-us = <30000>;
reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
+
+ interrupt-parent = <&gpio_intc>;
+ /* MAC_INTR on GPIOZ_15 */
+ interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
};
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 0cb40326b0d3..234490d3ee68 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -60,6 +60,29 @@
};
};
+&aiu {
+ compatible = "amlogic,aiu-gxbb", "amlogic,aiu";
+ clocks = <&clkc CLKID_AIU_GLUE>,
+ <&clkc CLKID_I2S_OUT>,
+ <&clkc CLKID_AOCLK_GATE>,
+ <&clkc CLKID_CTS_AMCLK>,
+ <&clkc CLKID_MIXER_IFACE>,
+ <&clkc CLKID_IEC958>,
+ <&clkc CLKID_IEC958_GATE>,
+ <&clkc CLKID_CTS_MCLK_I958>,
+ <&clkc CLKID_CTS_I958>;
+ clock-names = "pclk",
+ "i2s_pclk",
+ "i2s_aoclk",
+ "i2s_mclk",
+ "i2s_mixer",
+ "spdif_pclk",
+ "spdif_aoclk",
+ "spdif_mclk",
+ "spdif_mclk_sel";
+ resets = <&reset RESET_AIU>;
+};
+
&aobus {
pinctrl_aobus: pinctrl@14 {
compatible = "amlogic,meson-gxbb-aobus-pinctrl";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts
index 4d5949496596..6a226faab183 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts
@@ -8,6 +8,7 @@
/dts-v1/;
#include <dt-bindings/input/input.h>
+#include <dt-bindings/sound/meson-aiu.h>
#include "meson-gxl-s905x.dtsi"
@@ -97,6 +98,15 @@
regulator-always-on;
};
+ vddio_ao18: regulator-vddio_ao18 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_AO18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-always-on;
+ };
+
vddio_boot: regulator-vddio_boot {
compatible = "regulator-fixed";
regulator-name = "VDDIO_BOOT";
@@ -105,6 +115,66 @@
vin-supply = <&vcc_3v3>;
regulator-always-on;
};
+
+ sound {
+ compatible = "amlogic,gx-sound-card";
+ model = "GXL-LIBRETECH-S805X-AC";
+ audio-widgets = "Speaker", "9J5-3 LEFT",
+ "Speaker", "9J5-2 RIGHT";
+ audio-routing = "9J5-3 LEFT", "ACODEC LOLN",
+ "9J5-2 RIGHT", "ACODEC LORN";
+ assigned-clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+ status = "okay";
+
+ dai-link-0 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_FIFO>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_ENCODER>;
+ dai-format = "i2s";
+ mclk-fs = <256>;
+
+ codec-0 {
+ sound-dai = <&aiu AIU_HDMI CTRL_I2S>;
+ };
+
+ codec-1 {
+ sound-dai = <&aiu AIU_ACODEC CTRL_I2S>;
+ };
+ };
+
+ dai-link-2 {
+ sound-dai = <&aiu AIU_HDMI CTRL_OUT>;
+
+ codec-0 {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+
+ dai-link-3 {
+ sound-dai = <&aiu AIU_ACODEC CTRL_OUT>;
+
+ codec-0 {
+ sound-dai = <&acodec>;
+ };
+ };
+ };
+};
+
+&acodec {
+ AVDD-supply = <&vddio_ao18>;
+ status = "okay";
+};
+
+&aiu {
+ status = "okay";
};
&cec_AO {
@@ -243,6 +313,7 @@
pinctrl-names = "default";
};
-&usb0 {
+&usb {
status = "okay";
+ dr_mode = "host";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts
index a1119cfb0280..867e30f1d62b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts
@@ -216,6 +216,7 @@
pinctrl-names = "default";
};
-&usb0 {
+&usb {
status = "okay";
+ dr_mode = "host";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
index b5667f1fb2c8..9ef210f17b4a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
@@ -29,3 +29,7 @@
&cvbs_vdac_port {
status = "disabled";
};
+
+&usb {
+ dr_mode = "host";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts
new file mode 100644
index 000000000000..0b95e9ecbef0
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-gxl-s905d.dtsi"
+#include "meson-gx-p23x-q20x.dtsi"
+#include <dt-bindings/leds/common.h>
+
+/ {
+ compatible = "smartlabs,sml5442tw", "amlogic,s905d", "amlogic,meson-gxl";
+ model = "SmartLabs SML-5442TW";
+
+ leds {
+ compatible = "gpio-leds";
+
+ yellow {
+ color = <LED_COLOR_ID_YELLOW>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio_ao GPIOAO_6 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ blue {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio GPIODV_28 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ green {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+
+ red {
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio GPIODV_27 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+};
+
+&ethmac {
+ status = "okay";
+ phy-mode = "rmii";
+ phy-handle = <&internal_phy>;
+};
+
+&i2c_A {
+ status = "okay";
+ pinctrl-0 = <&i2c_a_pins>;
+ pinctrl-names = "default";
+};
+
+&internal_phy {
+ pinctrl-0 = <&eth_link_led_pins>, <&eth_act_led_pins>;
+ pinctrl-names = "default";
+};
+
+/* This is connected to the Bluetooth module: */
+&uart_A {
+ status = "okay";
+ pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+
+ bluetooth {
+ compatible = "qcom,qca9377-bt";
+ enable-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+ max-speed = <2000000>;
+ clocks = <&wifi32k>;
+ clock-names = "lpo";
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-p281.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-p281.dts
index 6509c4950950..ecc9df7ca023 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-p281.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-p281.dts
@@ -20,3 +20,7 @@
reg = <0x0 0x0 0x0 0x40000000>;
};
};
+
+&usb {
+ dr_mode = "host";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-tx3-mini.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-tx3-mini.dts
index dd729ac2300d..6705c2082a78 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-tx3-mini.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-tx3-mini.dts
@@ -24,3 +24,7 @@
&ir {
linux,rc-map-name = "rc-tanix-tx3mini";
};
+
+&usb {
+ dr_mode = "host";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
index 440bc23c7342..8bcdffdf55d0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
@@ -207,3 +207,7 @@
pinctrl-0 = <&uart_ao_b_pins>;
pinctrl-names = "default";
};
+
+&usb {
+ dr_mode = "peripheral";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index e8348b2728db..5ae7bb6209cb 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -8,6 +8,7 @@
/dts-v1/;
#include <dt-bindings/input/input.h>
+#include <dt-bindings/sound/meson-aiu.h>
#include "meson-gxl-s905x.dtsi"
@@ -21,6 +22,13 @@
ethernet0 = &ethmac;
};
+ dio2133: analog-amplifier {
+ compatible = "simple-audio-amplifier";
+ sound-name-prefix = "AU2";
+ VCC-supply = <&hdmi_5v>;
+ enable-gpios = <&gpio GPIOH_5 GPIO_ACTIVE_HIGH>;
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
@@ -54,14 +62,14 @@
leds {
compatible = "gpio-leds";
- system {
+ led-system {
label = "librecomputer:system-status";
gpios = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>;
default-state = "on";
panic-indicator;
};
- blue {
+ led-blue {
label = "librecomputer:blue";
gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
@@ -124,6 +132,68 @@
regulator-max-microvolt = <1800000>;
vin-supply = <&vcc_3v3>;
};
+
+ sound {
+ compatible = "amlogic,gx-sound-card";
+ model = "GXL-LIBRETECH-S905X-CC";
+ audio-aux-devs = <&dio2133>;
+ audio-widgets = "Line", "Lineout";
+ audio-routing = "AU2 INL", "ACODEC LOLN",
+ "AU2 INR", "ACODEC LORN",
+ "Lineout", "AU2 OUTL",
+ "Lineout", "AU2 OUTR";
+ assigned-clocks = <&clkc CLKID_MPLL0>,
+ <&clkc CLKID_MPLL1>,
+ <&clkc CLKID_MPLL2>;
+ assigned-clock-parents = <0>, <0>, <0>;
+ assigned-clock-rates = <294912000>,
+ <270950400>,
+ <393216000>;
+ status = "okay";
+
+ dai-link-0 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_FIFO>;
+ };
+
+ dai-link-1 {
+ sound-dai = <&aiu AIU_CPU CPU_I2S_ENCODER>;
+ dai-format = "i2s";
+ mclk-fs = <256>;
+
+ codec-0 {
+ sound-dai = <&aiu AIU_HDMI CTRL_I2S>;
+ };
+
+ codec-1 {
+ sound-dai = <&aiu AIU_ACODEC CTRL_I2S>;
+ };
+ };
+
+ dai-link-2 {
+ sound-dai = <&aiu AIU_HDMI CTRL_OUT>;
+
+ codec-0 {
+ sound-dai = <&hdmi_tx>;
+ };
+ };
+
+ dai-link-3 {
+ sound-dai = <&aiu AIU_ACODEC CTRL_OUT>;
+
+ codec-0 {
+ sound-dai = <&acodec>;
+ };
+ };
+ };
+};
+
+&acodec {
+ AVDD-supply = <&vddio_ao18>;
+ status = "okay";
+};
+
+&aiu {
+ status = "okay";
};
&cec_AO {
@@ -272,8 +342,9 @@
pinctrl-names = "default";
};
-&usb0 {
+&usb {
status = "okay";
+ dr_mode = "host";
};
&usb2_phy0 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index 62dd87821ce5..f1acca5c4434 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -218,6 +218,7 @@
pinctrl-names = "default";
};
-&usb0 {
+&usb {
status = "okay";
+ dr_mode = "host";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index 6ac678f88bd8..05cb2f5e5c36 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -195,8 +195,9 @@
pinctrl-names = "default";
};
-&usb0 {
+&usb {
status = "okay";
+ dr_mode = "host";
};
&usb2_phy0 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index 259d86399390..fc59c8534c0f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -14,29 +14,57 @@
compatible = "amlogic,meson-gxl";
soc {
- usb0: usb@c9000000 {
- status = "disabled";
- compatible = "amlogic,meson-gxl-dwc3";
+ usb: usb@d0078080 {
+ compatible = "amlogic,meson-gxl-usb-ctrl";
+ reg = <0x0 0xd0078080 0x0 0x20>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
- clocks = <&clkc CLKID_USB>;
- clock-names = "usb_general";
+ clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB1_DDR_BRIDGE>;
+ clock-names = "usb_ctrl", "ddr";
resets = <&reset RESET_USB_OTG>;
- reset-names = "usb_otg";
- dwc3: dwc3@c9000000 {
+ dr_mode = "otg";
+
+ phys = <&usb2_phy0>, <&usb2_phy1>;
+ phy-names = "usb2-phy0", "usb2-phy1";
+
+ dwc2: usb@c9100000 {
+ compatible = "amlogic,meson-g12a-usb", "snps,dwc2";
+ reg = <0x0 0xc9100000 0x0 0x40000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc CLKID_USB1>;
+ clock-names = "otg";
+ phys = <&usb2_phy1>;
+ dr_mode = "peripheral";
+ g-rx-fifo-size = <192>;
+ g-np-tx-fifo-size = <128>;
+ g-tx-fifo-size = <128 128 16 16 16>;
+ };
+
+ dwc3: usb@c9000000 {
compatible = "snps,dwc3";
reg = <0x0 0xc9000000 0x0 0x100000>;
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
maximum-speed = "high-speed";
snps,dis_u2_susphy_quirk;
- phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>;
};
};
+ acodec: audio-controller@c8832000 {
+ compatible = "amlogic,t9015";
+ reg = <0x0 0xc8832000 0x0 0x14>;
+ #sound-dai-cells = <0>;
+ sound-name-prefix = "ACODEC";
+ clocks = <&clkc CLKID_ACODEC>;
+ clock-names = "pclk";
+ resets = <&reset RESET_ACODEC>;
+ status = "disabled";
+ };
+
crypto: crypto@c883e000 {
compatible = "amlogic,gxl-crypto";
reg = <0x0 0xc883e000 0x0 0x36>;
@@ -49,6 +77,29 @@
};
};
+&aiu {
+ compatible = "amlogic,aiu-gxl", "amlogic,aiu";
+ clocks = <&clkc CLKID_AIU_GLUE>,
+ <&clkc CLKID_I2S_OUT>,
+ <&clkc CLKID_AOCLK_GATE>,
+ <&clkc CLKID_CTS_AMCLK>,
+ <&clkc CLKID_MIXER_IFACE>,
+ <&clkc CLKID_IEC958>,
+ <&clkc CLKID_IEC958_GATE>,
+ <&clkc CLKID_CTS_MCLK_I958>,
+ <&clkc CLKID_CTS_I958>;
+ clock-names = "pclk",
+ "i2s_pclk",
+ "i2s_aoclk",
+ "i2s_mclk",
+ "i2s_mixer",
+ "spdif_pclk",
+ "spdif_aoclk",
+ "spdif_mclk",
+ "spdif_mclk_sel";
+ resets = <&reset RESET_AIU>;
+};
+
&apb {
usb2_phy0: phy@78000 {
compatible = "amlogic,meson-gxl-usb2-phy";
@@ -71,18 +122,6 @@
reset-names = "phy";
status = "okay";
};
-
- usb3_phy: phy@78080 {
- compatible = "amlogic,meson-gxl-usb3-phy";
- #phy-cells = <0>;
- reg = <0x0 0x78080 0x0 0x20>;
- interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clkc CLKID_USB>, <&clkc_AO CLKID_AO_CEC_32K>;
- clock-names = "phy", "peripheral";
- resets = <&reset RESET_USB_OTG>, <&reset RESET_USB_OTG>;
- reset-names = "phy", "peripheral";
- status = "okay";
- };
};
&efuse {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index 27eeab71ec77..bff8ec2c1c70 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -380,6 +380,7 @@
vref-supply = <&vddio_ao18>;
};
-&usb0 {
+&usb {
status = "okay";
+ dr_mode = "peripheral";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index c2bd4dbbf38c..83eca3af44ce 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -179,6 +179,7 @@
pinctrl-names = "default";
};
-&usb0 {
+&usb {
status = "okay";
+ dr_mode = "host";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
index 420a88e9a195..c89c9f846fb1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
@@ -36,13 +36,13 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-blue {
label = "rbox-pro:blue:on";
gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- red {
+ led-red {
label = "rbox-pro:red:standby";
gpios = <&gpio GPIODV_28 GPIO_ACTIVE_HIGH>;
default-state = "off";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-vega-s96.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-vega-s96.dts
index 0bdf51d041ae..d3fdba4da9a6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-vega-s96.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-vega-s96.dts
@@ -39,3 +39,7 @@
&ir {
linux,rc-map-name = "rc-vega-s9x";
};
+
+&usb {
+ dr_mode = "host";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
index b6f89f108e28..40e3e123e05b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
@@ -169,8 +169,11 @@
compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
};
-&dwc3 {
- phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>, <&usb2_phy2>;
+&usb {
+ compatible = "amlogic,meson-gxm-usb-ctrl";
+
+ phy-names = "usb2-phy0", "usb2-phy1", "usb2-phy2";
+ phys = <&usb2_phy0>, <&usb2_phy1>, <&usb2_phy2>;
};
&vdec {
diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
index 094ecf2222bb..1ef1e3672b96 100644
--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
@@ -39,13 +39,13 @@
leds {
compatible = "gpio-leds";
- white {
+ led-white {
label = "vim3:white:sys";
gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_LOW>;
linux,default-trigger = "heartbeat";
};
- red {
+ led-red {
label = "vim3:red";
gpios = <&gpio_expander 5 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
new file mode 100644
index 000000000000..00d90b30f8b4
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2020 Dongjin Kim <tobetter@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-sm1.dtsi"
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ compatible = "hardkernel,odroid-c4", "amlogic,sm1";
+ model = "Hardkernel ODROID-C4";
+
+ aliases {
+ serial0 = &uart_AO;
+ ethernet0 = &ethmac;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio BOOT_12 GPIO_ACTIVE_LOW>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-blue {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio_ao GPIOAO_11 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ panic-indicator;
+ };
+ };
+
+ tflash_vdd: regulator-tflash_vdd {
+ compatible = "regulator-fixed";
+
+ regulator-name = "TFLASH_VDD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ tf_io: gpio-regulator-tf_io {
+ compatible = "regulator-gpio";
+
+ regulator-name = "TF_IO";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpios = <&gpio_ao GPIOAO_6 GPIO_ACTIVE_HIGH>;
+ gpios-states = <0>;
+
+ states = <3300000 0>,
+ <1800000 1>;
+ };
+
+ flash_1v8: regulator-flash_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "FLASH_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-always-on;
+ };
+
+ main_12v: regulator-main_12v {
+ compatible = "regulator-fixed";
+ regulator-name = "12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ };
+
+ vcc_5v: regulator-vcc_5v {
+ compatible = "regulator-fixed";
+ regulator-name = "5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ vin-supply = <&main_12v>;
+ };
+
+ vcc_1v8: regulator-vcc_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-always-on;
+ };
+
+ vcc_3v3: regulator-vcc_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ /* FIXME: actually controlled by VDDCPU_B_EN */
+ };
+
+ vddcpu: regulator-vddcpu {
+ /*
+ * MP8756GD Regulator.
+ */
+ compatible = "pwm-regulator";
+
+ regulator-name = "VDDCPU";
+ regulator-min-microvolt = <721000>;
+ regulator-max-microvolt = <1022000>;
+
+ vin-supply = <&main_12v>;
+
+ pwms = <&pwm_AO_cd 1 1250 0>;
+ pwm-dutycycle-range = <100 0>;
+
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ hub_5v: regulator-hub_5v {
+ compatible = "regulator-fixed";
+ regulator-name = "HUB_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_5v>;
+
+ /* Connected to the Hub CHIPENABLE, LOW sets low power state */
+ gpio = <&gpio GPIOH_4 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ usb_pwr_en: regulator-usb_pwr_en {
+ compatible = "regulator-fixed";
+ regulator-name = "USB_PWR_EN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_5v>;
+
+ /* Connected to the microUSB port power enable */
+ gpio = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vddao_1v8: regulator-vddao_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ };
+
+ vddao_3v3: regulator-vddao_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&main_12v>;
+ regulator-always-on;
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu1 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU1_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu2 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU2_CLK>;
+ clock-latency = <50000>;
+};
+
+&cpu3 {
+ cpu-supply = <&vddcpu>;
+ operating-points-v2 = <&cpu_opp_table>;
+ clocks = <&clkc CLKID_CPU3_CLK>;
+ clock-latency = <50000>;
+};
+
+&ext_mdio {
+ external_phy: ethernet-phy@0 {
+ /* Realtek RTL8211F (0x001cc916) */
+ reg = <0>;
+ max-speed = <1000>;
+
+ interrupt-parent = <&gpio_intc>;
+ /* MAC_INTR on GPIOZ_14 */
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&ethmac {
+ pinctrl-0 = <&eth_pins>, <&eth_rgmii_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ phy-mode = "rgmii";
+ phy-handle = <&external_phy>;
+ amlogic,tx-delay-ns = <2>;
+};
+
+&gpio {
+ gpio-line-names =
+ /* GPIOZ */
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ /* GPIOH */
+ "", "", "", "", "",
+ "PIN_36", /* GPIOH_5 */
+ "PIN_26", /* GPIOH_6 */
+ "PIN_32", /* GPIOH_7 */
+ "",
+ /* BOOT */
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ /* GPIOC */
+ "", "", "", "", "", "", "", "",
+ /* GPIOA */
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "",
+ "PIN_27", /* GPIOA_14 */
+ "PIN_28", /* GPIOA_15 */
+ /* GPIOX */
+ "PIN_16", /* GPIOX_0 */
+ "PIN_18", /* GPIOX_1 */
+ "PIN_22", /* GPIOX_2 */
+ "PIN_11", /* GPIOX_3 */
+ "PIN_13", /* GPIOX_4 */
+ "PIN_7", /* GPIOX_5 */
+ "PIN_33", /* GPIOX_6 */
+ "PIN_15", /* GPIOX_7 */
+ "PIN_19", /* GPIOX_8 */
+ "PIN_21", /* GPIOX_9 */
+ "PIN_24", /* GPIOX_10 */
+ "PIN_23", /* GPIOX_11 */
+ "PIN_8", /* GPIOX_12 */
+ "PIN_10", /* GPIOX_13 */
+ "PIN_29", /* GPIOX_14 */
+ "PIN_31", /* GPIOX_15 */
+ "PIN_12", /* GPIOX_16 */
+ "PIN_3", /* GPIOX_17 */
+ "PIN_5", /* GPIOX_18 */
+ "PIN_35"; /* GPIOX_19 */
+
+ /*
+ * WARNING: The USB Hub on the Odroid-C4 needs a reset signal
+ * to be turned high in order to be detected by the USB Controller
+ * This signal should be handled by a USB specific power sequence
+ * in order to reset the Hub when USB bus is powered down.
+ */
+ usb-hub {
+ gpio-hog;
+ gpios = <GPIOH_4 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "usb-hub-reset";
+ };
+};
+
+&gpio_ao {
+ gpio-line-names =
+ /* GPIOAO */
+ "", "", "", "",
+ "PIN_47", /* GPIOAO_4 */
+ "", "",
+ "PIN_45", /* GPIOAO_7 */
+ "PIN_46", /* GPIOAO_8 */
+ "PIN_44", /* GPIOAO_9 */
+ "PIN_42", /* GPIOAO_10 */
+ "",
+ /* GPIOE */
+ "", "", "";
+};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
+ pinctrl-names = "default";
+ hdmi-supply = <&vcc_5v>;
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
+
+&ir {
+ status = "okay";
+ pinctrl-0 = <&remote_input_ao_pins>;
+ pinctrl-names = "default";
+ linux,rc-map-name = "rc-odroid";
+};
+
+&pwm_AO_cd {
+ pinctrl-0 = <&pwm_ao_d_e_pins>;
+ pinctrl-names = "default";
+ clocks = <&xtal>;
+ clock-names = "clkin1";
+ status = "okay";
+};
+
+&saradc {
+ status = "okay";
+};
+
+/* SD card */
+&sd_emmc_b {
+ status = "okay";
+ pinctrl-0 = <&sdcard_c_pins>;
+ pinctrl-1 = <&sdcard_clk_gate_c_pins>;
+ pinctrl-names = "default", "clk-gate";
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <200000000>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ disable-wp;
+
+ cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&tflash_vdd>;
+ vqmmc-supply = <&tf_io>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+ status = "okay";
+ pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
+ pinctrl-1 = <&emmc_clk_gate_pins>;
+ pinctrl-names = "default", "clk-gate";
+
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ max-frequency = <200000000>;
+ disable-wp;
+
+ mmc-pwrseq = <&emmc_pwrseq>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&flash_1v8>;
+};
+
+&uart_AO {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
+
+&usb {
+ status = "okay";
+ vbus-supply = <&usb_pwr_en>;
+};
+
+&usb2_phy0 {
+ phy-supply = <&vcc_5v>;
+};
+
+&usb2_phy1 {
+ /* Enable the hub which is connected to this port */
+ phy-supply = <&hub_5v>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
index dfb2438851c0..5ab139a34c01 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
@@ -104,7 +104,7 @@
leds {
compatible = "gpio-leds";
- bluetooth {
+ led-bluetooth {
label = "sei610:blue:bt";
gpios = <&gpio GPIOC_7 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
default-state = "off";
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
index d4ec735fb1a5..71317f5aada1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
@@ -56,6 +56,7 @@
reg = <0x0 0x0>;
enable-method = "psci";
next-level-cache = <&l2>;
+ #cooling-cells = <2>;
};
cpu1: cpu@1 {
@@ -64,6 +65,7 @@
reg = <0x0 0x1>;
enable-method = "psci";
next-level-cache = <&l2>;
+ #cooling-cells = <2>;
};
cpu2: cpu@2 {
@@ -72,6 +74,7 @@
reg = <0x0 0x2>;
enable-method = "psci";
next-level-cache = <&l2>;
+ #cooling-cells = <2>;
};
cpu3: cpu@3 {
@@ -80,6 +83,7 @@
reg = <0x0 0x3>;
enable-method = "psci";
next-level-cache = <&l2>;
+ #cooling-cells = <2>;
};
l2: l2-cache0 {
@@ -466,6 +470,26 @@
compatible = "amlogic,sm1-clkc";
};
+&cpu_thermal {
+ cooling-maps {
+ map0 {
+ trip = <&cpu_passive>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+
+ map1 {
+ trip = <&cpu_hot>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+};
+
&ethmac {
power-domains = <&pwrc PWRC_SM1_ETH_ID>;
};
diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi
index 15fe81738e94..655fdcce1561 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi
@@ -6,9 +6,9 @@
/ {
gic: interrupt-controller@2c001000 {
- compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ compatible = "arm,gic-400", "arm,cortex-a15-gic";
#interrupt-cells = <3>;
- #address-cells = <2>;
+ #address-cells = <1>;
interrupt-controller;
reg = <0x0 0x2c001000 0 0x1000>,
<0x0 0x2c002000 0 0x2000>,
diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi
index f2c75c756039..e4a3c7dbcc20 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi
@@ -8,9 +8,9 @@
gic: interrupt-controller@2f000000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x2f000000 0x100000>;
interrupt-controller;
reg = <0x0 0x2f000000 0x0 0x10000>,
<0x0 0x2f100000 0x0 0x200000>,
@@ -19,10 +19,11 @@
<0x0 0x2c02f000 0x0 0x2000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
- its: its@2f020000 {
+ its: msi-controller@2f020000 {
compatible = "arm,gic-v3-its";
msi-controller;
- reg = <0x0 0x2f020000 0x0 0x20000>;
+ #msi-cells = <1>;
+ reg = <0x20000 0x20000>;
};
};
};
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
index 12f039fa3dad..05ae893d1b2e 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
@@ -92,6 +92,27 @@
timeout-sec = <30>;
};
+ v2m_clk24mhz: clk24mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "v2m:clk24mhz";
+ };
+
+ v2m_refclk1mhz: refclk1mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = "v2m:refclk1mhz";
+ };
+
+ v2m_refclk32khz: refclk32khz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "v2m:refclk32khz";
+ };
+
bus@8000000 {
compatible = "arm,vexpress,v2m-p1", "simple-bus";
arm,v2m-memory-map = "rs1";
@@ -107,78 +128,57 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 63>;
- interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 1 &gic 0 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 2 &gic 0 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 3 &gic 0 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 4 &gic 0 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 5 &gic 0 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 6 &gic 0 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 7 &gic 0 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 8 &gic 0 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 9 &gic 0 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 10 &gic 0 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 11 &gic 0 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 12 &gic 0 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 13 &gic 0 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 14 &gic 0 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 15 &gic 0 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 16 &gic 0 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 17 &gic 0 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 18 &gic 0 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 19 &gic 0 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 20 &gic 0 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 21 &gic 0 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 22 &gic 0 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 23 &gic 0 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 24 &gic 0 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 25 &gic 0 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 26 &gic 0 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 27 &gic 0 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 28 &gic 0 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 29 &gic 0 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 30 &gic 0 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 31 &gic 0 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 32 &gic 0 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 33 &gic 0 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 34 &gic 0 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 35 &gic 0 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 36 &gic 0 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 37 &gic 0 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 38 &gic 0 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 39 &gic 0 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 40 &gic 0 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 41 &gic 0 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 42 &gic 0 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
-
- ethernet@2,02000000 {
+ interrupt-map = <0 0 0 &gic 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 1 &gic 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 2 &gic 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 3 &gic 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 4 &gic 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 5 &gic 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 6 &gic 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 7 &gic 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 8 &gic 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 9 &gic 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 10 &gic 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 11 &gic 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 12 &gic 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 13 &gic 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 14 &gic 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 15 &gic 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 16 &gic 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 17 &gic 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 18 &gic 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 19 &gic 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 20 &gic 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 21 &gic 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 22 &gic 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 23 &gic 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 24 &gic 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 25 &gic 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 26 &gic 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 27 &gic 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 28 &gic 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 29 &gic 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 30 &gic 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 31 &gic 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 32 &gic 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 33 &gic 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 34 &gic 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 35 &gic 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 36 &gic 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 37 &gic 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 38 &gic 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 39 &gic 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 40 &gic 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 41 &gic 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 42 &gic 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+
+ ethernet@202000000 {
compatible = "smsc,lan91c111";
reg = <2 0x02000000 0x10000>;
interrupts = <15>;
};
- v2m_clk24mhz: clk24mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- clock-output-names = "v2m:clk24mhz";
- };
-
- v2m_refclk1mhz: refclk1mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <1000000>;
- clock-output-names = "v2m:refclk1mhz";
- };
-
- v2m_refclk32khz: refclk32khz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "v2m:refclk32khz";
- };
-
- iofpga@3,00000000 {
+ iofpga-bus@300000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -189,7 +189,7 @@
reg = <0x010000 0x1000>;
};
- v2m_serial0: uart@90000 {
+ v2m_serial0: serial@90000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x090000 0x1000>;
interrupts = <5>;
@@ -197,7 +197,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial1: uart@a0000 {
+ v2m_serial1: serial@a0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0a0000 0x1000>;
interrupts = <6>;
@@ -205,7 +205,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial2: uart@b0000 {
+ v2m_serial2: serial@b0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0b0000 0x1000>;
interrupts = <7>;
@@ -213,7 +213,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial3: uart@c0000 {
+ v2m_serial3: serial@c0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0c0000 0x1000>;
interrupts = <8>;
diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
index 66381d89c1ce..b8a21092db4d 100644
--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
+++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
@@ -126,7 +126,7 @@
<0x0 0x2c02f000 0 0x2000>; // GICV
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
- its: its@2f020000 {
+ its: msi-controller@2f020000 {
#msi-cells = <1>;
compatible = "arm,gic-v3-its";
reg = <0x0 0x2f020000 0x0 0x20000>; // GITS
@@ -172,14 +172,14 @@
dma-coherent;
};
- smmu: smmu@2b400000 {
+ smmu: iommu@2b400000 {
compatible = "arm,smmu-v3";
reg = <0x0 0x2b400000 0x0 0x100000>;
interrupts = <GIC_SPI 74 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 75 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 77 IRQ_TYPE_EDGE_RISING>,
- <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "eventq", "priq", "cmdq-sync", "gerror";
+ <GIC_SPI 77 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
dma-coherent;
#iommu-cells = <1>;
msi-parent = <&its 0x10000>;
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index f5889281545f..f6c55877fbd9 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -11,14 +11,14 @@
compatible = "arm,armv7-timer-mem";
reg = <0x0 0x2a810000 0x0 0x10000>;
clock-frequency = <50000000>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x2a820000 0x20000>;
status = "disabled";
frame@2a830000 {
frame-number = <1>;
interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x0 0x2a830000 0x0 0x10000>;
+ reg = <0x10000 0x10000>;
};
};
@@ -74,35 +74,35 @@
<0x0 0x2c02f000 0 0x2000>,
<0x0 0x2c04f000 0 0x2000>,
<0x0 0x2c06f000 0 0x2000>;
- #address-cells = <2>;
+ #address-cells = <1>;
#interrupt-cells = <3>;
- #size-cells = <2>;
+ #size-cells = <1>;
interrupt-controller;
interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_HIGH)>;
- ranges = <0 0 0 0x2c1c0000 0 0x40000>;
+ ranges = <0 0 0x2c1c0000 0x40000>;
v2m_0: v2m@0 {
compatible = "arm,gic-v2m-frame";
msi-controller;
- reg = <0 0 0 0x10000>;
+ reg = <0 0x10000>;
};
v2m@10000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
- reg = <0 0x10000 0 0x10000>;
+ reg = <0x10000 0x10000>;
};
v2m@20000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
- reg = <0 0x20000 0 0x10000>;
+ reg = <0x20000 0x10000>;
};
v2m@30000 {
compatible = "arm,gic-v2m-frame";
msi-controller;
- reg = <0 0x30000 0 0x10000>;
+ reg = <0x30000 0x10000>;
};
};
@@ -501,10 +501,10 @@
gpu: gpu@2d000000 {
compatible = "arm,juno-mali", "arm,mali-t624";
reg = <0 0x2d000000 0 0x10000>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "gpu", "job", "mmu";
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "job", "mmu", "gpu";
clocks = <&scpi_dvfs 2>;
power-domains = <&scpi_devpd 1>;
dma-coherent;
@@ -521,12 +521,12 @@
#size-cells = <1>;
ranges = <0 0x0 0x2e000000 0x8000>;
- cpu_scp_lpri: scp-shmem@0 {
+ cpu_scp_lpri: scp-sram@0 {
compatible = "arm,juno-scp-shmem";
reg = <0x0 0x200>;
};
- cpu_scp_hpri: scp-shmem@200 {
+ cpu_scp_hpri: scp-sram@200 {
compatible = "arm,juno-scp-shmem";
reg = <0x200 0x200>;
};
@@ -546,10 +546,10 @@
<0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
- interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &gic 0 0 GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &gic 0 0 GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &gic 0 0 GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &gic 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic 0 GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic 0 GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic 0 GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
msi-parent = <&v2m_0>;
status = "disabled";
iommu-map-mask = <0x0>; /* RC has no means to output PCI RID */
@@ -729,7 +729,7 @@
};
};
- soc_uart0: uart@7ff80000 {
+ soc_uart0: serial@7ff80000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0 0x7ff80000 0x0 0x1000>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
@@ -768,7 +768,7 @@
};
};
- ohci@7ffb0000 {
+ usb@7ffb0000 {
compatible = "generic-ohci";
reg = <0x0 0x7ffb0000 0x0 0x10000>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
@@ -776,7 +776,7 @@
clocks = <&soc_usb48mhz>;
};
- ehci@7ffc0000 {
+ usb@7ffc0000 {
compatible = "generic-ehci";
reg = <0x0 0x7ffc0000 0x0 0x10000>;
interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
@@ -813,28 +813,28 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 15>;
- interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 1 &gic 0 0 GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 2 &gic 0 0 GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 3 &gic 0 0 GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 4 &gic 0 0 GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 5 &gic 0 0 GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 6 &gic 0 0 GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 7 &gic 0 0 GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 8 &gic 0 0 GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 9 &gic 0 0 GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 10 &gic 0 0 GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 11 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 12 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
- };
-
- site2: tlx@60000000 {
+ interrupt-map = <0 0 0 &gic 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 1 &gic 0 GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 2 &gic 0 GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 3 &gic 0 GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 4 &gic 0 GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 5 &gic 0 GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 6 &gic 0 GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 7 &gic 0 GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 8 &gic 0 GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 9 &gic 0 GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 10 &gic 0 GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 11 &gic 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 12 &gic 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ site2: tlx-bus@60000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0 0x60000000 0x10000000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0>;
- interrupt-map = <0 0 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 &gic 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index e3983ded3c3c..eeee51f1251b 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -8,36 +8,91 @@
*/
/ {
- bus@8000000 {
- mb_clk24mhz: clk24mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- clock-output-names = "juno_mb:clk24mhz";
- };
+ mb_clk24mhz: clk24mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "juno_mb:clk24mhz";
+ };
- mb_clk25mhz: clk25mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <25000000>;
- clock-output-names = "juno_mb:clk25mhz";
- };
+ mb_clk25mhz: clk25mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "juno_mb:clk25mhz";
+ };
- v2m_refclk1mhz: refclk1mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <1000000>;
- clock-output-names = "juno_mb:refclk1mhz";
- };
+ v2m_refclk1mhz: refclk1mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = "juno_mb:refclk1mhz";
+ };
- v2m_refclk32khz: refclk32khz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "juno_mb:refclk32khz";
+ v2m_refclk32khz: refclk32khz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "juno_mb:refclk32khz";
+ };
+
+ mb_fixed_3v3: mcc-sb-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "MCC_SB_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ power-button {
+ debounce-interval = <50>;
+ wakeup-source;
+ linux,code = <116>;
+ label = "POWER";
+ gpios = <&iofpga_gpio0 0 0x4>;
+ };
+ home-button {
+ debounce-interval = <50>;
+ wakeup-source;
+ linux,code = <102>;
+ label = "HOME";
+ gpios = <&iofpga_gpio0 1 0x4>;
+ };
+ rlock-button {
+ debounce-interval = <50>;
+ wakeup-source;
+ linux,code = <152>;
+ label = "RLOCK";
+ gpios = <&iofpga_gpio0 2 0x4>;
+ };
+ vol-up-button {
+ debounce-interval = <50>;
+ wakeup-source;
+ linux,code = <115>;
+ label = "VOL+";
+ gpios = <&iofpga_gpio0 3 0x4>;
};
+ vol-down-button {
+ debounce-interval = <50>;
+ wakeup-source;
+ linux,code = <114>;
+ label = "VOL-";
+ gpios = <&iofpga_gpio0 4 0x4>;
+ };
+ nmi-button {
+ debounce-interval = <50>;
+ wakeup-source;
+ linux,code = <99>;
+ label = "NMI";
+ gpios = <&iofpga_gpio0 5 0x4>;
+ };
+ };
- motherboard {
+ bus@8000000 {
+ motherboard-bus {
compatible = "arm,vexpress,v2p-p1", "simple-bus";
#address-cells = <2>; /* SMB chipselect number and offset */
#size-cells = <1>;
@@ -48,62 +103,7 @@
arm,vexpress,site = <0>;
arm,v2m-memory-map = "rs1";
- mb_fixed_3v3: mcc-sb-3v3 {
- compatible = "regulator-fixed";
- regulator-name = "MCC_SB_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- gpio-keys {
- compatible = "gpio-keys";
-
- power-button {
- debounce-interval = <50>;
- wakeup-source;
- linux,code = <116>;
- label = "POWER";
- gpios = <&iofpga_gpio0 0 0x4>;
- };
- home-button {
- debounce-interval = <50>;
- wakeup-source;
- linux,code = <102>;
- label = "HOME";
- gpios = <&iofpga_gpio0 1 0x4>;
- };
- rlock-button {
- debounce-interval = <50>;
- wakeup-source;
- linux,code = <152>;
- label = "RLOCK";
- gpios = <&iofpga_gpio0 2 0x4>;
- };
- vol-up-button {
- debounce-interval = <50>;
- wakeup-source;
- linux,code = <115>;
- label = "VOL+";
- gpios = <&iofpga_gpio0 3 0x4>;
- };
- vol-down-button {
- debounce-interval = <50>;
- wakeup-source;
- linux,code = <114>;
- label = "VOL-";
- gpios = <&iofpga_gpio0 4 0x4>;
- };
- nmi-button {
- debounce-interval = <50>;
- wakeup-source;
- linux,code = <99>;
- label = "NMI";
- gpios = <&iofpga_gpio0 5 0x4>;
- };
- };
-
- flash@0,00000000 {
+ flash@0 {
/* 2 * 32MiB NOR Flash memory mounted on CS0 */
compatible = "arm,vexpress-flash", "cfi-flash";
reg = <0 0x00000000 0x04000000>;
@@ -120,7 +120,7 @@
};
};
- ethernet@2,00000000 {
+ ethernet@200000000 {
compatible = "smsc,lan9118", "smsc,lan9115";
reg = <2 0x00000000 0x10000>;
interrupts = <3>;
@@ -133,7 +133,7 @@
vddvario-supply = <&mb_fixed_3v3>;
};
- iofpga@3,00000000 {
+ iofpga-bus@300000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index c5d15cbd8cf6..3050f45bade4 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -95,7 +95,7 @@
};
gic: interrupt-controller@2c001000 {
- compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ compatible = "arm,gic-400", "arm,cortex-a15-gic";
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
index 60703b5763c6..b917d9d3f1c4 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
@@ -6,10 +6,10 @@
*/
/ {
bus@8000000 {
- motherboard {
+ motherboard-bus {
arm,v2m-memory-map = "rs2";
- iofpga@3,00000000 {
+ iofpga-bus@300000000 {
virtio-p9@140000 {
compatible = "virtio,mmio";
reg = <0x140000 0x200>;
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
index e333c8d2d0e4..001a0a3c7f66 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
@@ -8,8 +8,76 @@
* VEMotherBoard.lisa
*/
/ {
+ v2m_clk24mhz: clk24mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "v2m:clk24mhz";
+ };
+
+ v2m_refclk1mhz: refclk1mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = "v2m:refclk1mhz";
+ };
+
+ v2m_refclk32khz: refclk32khz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "v2m:refclk32khz";
+ };
+
+ v2m_fixed_3v3: v2m-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ mcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ v2m_oscclk1: oscclk1 {
+ /* CLCD clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 1>;
+ freq-range = <23750000 63500000>;
+ #clock-cells = <0>;
+ clock-output-names = "v2m:oscclk1";
+ };
+
+ reset {
+ compatible = "arm,vexpress-reset";
+ arm,vexpress-sysreg,func = <5 0>;
+ };
+
+ muxfpga {
+ compatible = "arm,vexpress-muxfpga";
+ arm,vexpress-sysreg,func = <7 0>;
+ };
+
+ shutdown {
+ compatible = "arm,vexpress-shutdown";
+ arm,vexpress-sysreg,func = <8 0>;
+ };
+
+ reboot {
+ compatible = "arm,vexpress-reboot";
+ arm,vexpress-sysreg,func = <9 0>;
+ };
+
+ dvimode {
+ compatible = "arm,vexpress-dvimode";
+ arm,vexpress-sysreg,func = <11 0>;
+ };
+ };
+
bus@8000000 {
- motherboard {
+ motherboard-bus {
arm,v2m-memory-map = "rs1";
compatible = "arm,vexpress,v2m-p1", "simple-bus";
#address-cells = <2>; /* SMB chipselect number and offset */
@@ -17,41 +85,20 @@
#interrupt-cells = <1>;
ranges;
- flash@0,00000000 {
+ flash@0 {
compatible = "arm,vexpress-flash", "cfi-flash";
reg = <0 0x00000000 0x04000000>,
<4 0x00000000 0x04000000>;
bank-width = <4>;
};
- ethernet@2,02000000 {
+ ethernet@202000000 {
compatible = "smsc,lan91c111";
reg = <2 0x02000000 0x10000>;
interrupts = <15>;
};
- v2m_clk24mhz: clk24mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- clock-output-names = "v2m:clk24mhz";
- };
-
- v2m_refclk1mhz: refclk1mhz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <1000000>;
- clock-output-names = "v2m:refclk1mhz";
- };
-
- v2m_refclk32khz: refclk32khz {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "v2m:refclk32khz";
- };
-
- iofpga@3,00000000 {
+ iofpga-bus@300000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -111,7 +158,7 @@
clock-names = "KMIREFCLK", "apb_pclk";
};
- v2m_serial0: uart@90000 {
+ v2m_serial0: serial@90000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x090000 0x1000>;
interrupts = <5>;
@@ -119,7 +166,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial1: uart@a0000 {
+ v2m_serial1: serial@a0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0a0000 0x1000>;
interrupts = <6>;
@@ -127,7 +174,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial2: uart@b0000 {
+ v2m_serial2: serial@b0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0b0000 0x1000>;
interrupts = <7>;
@@ -135,7 +182,7 @@
clock-names = "uartclk", "apb_pclk";
};
- v2m_serial3: uart@c0000 {
+ v2m_serial3: serial@c0000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x0c0000 0x1000>;
interrupts = <8>;
@@ -198,53 +245,6 @@
};
};
};
-
- v2m_fixed_3v3: v2m-3v3 {
- compatible = "regulator-fixed";
- regulator-name = "3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
- mcc {
- compatible = "arm,vexpress,config-bus";
- arm,vexpress,config-bridge = <&v2m_sysreg>;
-
- v2m_oscclk1: oscclk1 {
- /* CLCD clock */
- compatible = "arm,vexpress-osc";
- arm,vexpress-sysreg,func = <1 1>;
- freq-range = <23750000 63500000>;
- #clock-cells = <0>;
- clock-output-names = "v2m:oscclk1";
- };
-
- reset {
- compatible = "arm,vexpress-reset";
- arm,vexpress-sysreg,func = <5 0>;
- };
-
- muxfpga {
- compatible = "arm,vexpress-muxfpga";
- arm,vexpress-sysreg,func = <7 0>;
- };
-
- shutdown {
- compatible = "arm,vexpress-shutdown";
- arm,vexpress-sysreg,func = <8 0>;
- };
-
- reboot {
- compatible = "arm,vexpress-reboot";
- arm,vexpress-sysreg,func = <9 0>;
- };
-
- dvimode {
- compatible = "arm,vexpress-dvimode";
- arm,vexpress-sysreg,func = <11 0>;
- };
- };
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
index f90c040fd5e8..67702667ed8a 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frdm.dts
@@ -74,6 +74,21 @@
};
};
+&qspi {
+ status = "okay";
+
+ s25fs512s0: flash@0 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ reg = <0>;
+ spi-rx-bus-width = <2>;
+ spi-tx-bus-width = <2>;
+ };
+};
+
&sai2 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
index 8749634c55ee..6290e2f9de6a 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-frwy.dts
@@ -23,3 +23,18 @@
&i2c0 {
status = "okay";
};
+
+&qspi {
+ status = "okay";
+
+ w25q16dw0: flash@0 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ m25p,fast-read;
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+ spi-rx-bus-width = <2>;
+ spi-tx-bus-width = <2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
index 2fb1cb1f7d8f..449475a97bf1 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
@@ -128,6 +128,21 @@
};
};
+&qspi {
+ status = "okay";
+
+ s25fs512s0: flash@0 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ reg = <0>;
+ spi-rx-bus-width = <2>;
+ spi-tx-bus-width = <2>;
+ };
+};
+
&sai2 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
index 5edb1e137a52..d45c17620b98 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-rdb.dts
@@ -35,6 +35,21 @@
status = "okay";
};
+&qspi {
+ status = "okay";
+
+ s25fs512s0: flash@0 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ reg = <0>;
+ spi-rx-bus-width = <2>;
+ spi-tx-bus-width = <2>;
+ };
+};
+
&sata {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index 337919366dc8..006e544d1fdb 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -137,6 +137,19 @@
#size-cells = <2>;
ranges;
+ qspi: spi@1550000 {
+ compatible = "fsl,ls1021a-qspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x1550000 0x0 0x10000>,
+ <0x0 0x40000000 0x0 0x10000000>;
+ reg-names = "QuadSPI", "QuadSPI-memory";
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "qspi_en", "qspi";
+ clocks = <&clockgen 4 0>, <&clockgen 4 0>;
+ status = "disabled";
+ };
+
esdhc0: esdhc@1560000 {
compatible = "fsl,ls1012a-esdhc", "fsl,esdhc";
reg = <0x0 0x1560000 0x0 0x10000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
index 901b5b161def..dd764b720fb0 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var2.dts
@@ -53,14 +53,14 @@
};
&mscc_felix_port0 {
- label = "gbe0";
+ label = "swp0";
phy-handle = <&phy0>;
phy-mode = "sgmii";
status = "okay";
};
&mscc_felix_port1 {
- label = "gbe1";
+ label = "swp1";
phy-handle = <&phy1>;
phy-mode = "sgmii";
status = "okay";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
index 1648a04ea79f..852dad8d70ab 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
@@ -17,6 +17,7 @@
crypto = &crypto;
serial0 = &duart0;
serial1 = &duart1;
+ serial2 = &lpuart1;
spi0 = &fspi;
spi1 = &dspi2;
};
@@ -185,3 +186,7 @@
pagesize = <32>;
};
};
+
+&lpuart1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index 13d0570c7ed6..055f114cf848 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -298,6 +298,8 @@
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
clocks = <&clockgen 4 1>;
+ dmas = <&edma0 0 62>, <&edma0 0 60>;
+ dma-names = "tx", "rx";
spi-num-chipselects = <4>;
little-endian;
status = "disabled";
@@ -311,6 +313,8 @@
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
clocks = <&clockgen 4 1>;
+ dmas = <&edma0 0 58>, <&edma0 0 56>;
+ dma-names = "tx", "rx";
spi-num-chipselects = <4>;
little-endian;
status = "disabled";
@@ -324,6 +328,8 @@
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dspi";
clocks = <&clockgen 4 1>;
+ dmas = <&edma0 0 54>, <&edma0 0 2>;
+ dma-names = "tx", "rx";
spi-num-chipselects = <3>;
little-endian;
status = "disabled";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
index dde50c88f5e3..bfa9d957e536 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
@@ -13,6 +13,7 @@
/ {
model = "LS1043A RDB Board";
+ compatible = "fsl,ls1043a-rdb", "fsl,ls1043a";
aliases {
serial0 = &duart0;
@@ -94,6 +95,22 @@
reg = <0>;
spi-max-frequency = <1000000>; /* input clock */
};
+
+ slic@2 {
+ compatible = "maxim,ds26522";
+ reg = <2>;
+ spi-max-frequency = <2000000>;
+ fsl,spi-cs-sck-delay = <100>;
+ fsl,spi-sck-cs-delay = <50>;
+ };
+
+ slic@3 {
+ compatible = "maxim,ds26522";
+ reg = <3>;
+ spi-max-frequency = <2000000>;
+ fsl,spi-cs-sck-delay = <100>;
+ fsl,spi-sck-cs-delay = <50>;
+ };
};
&duart0 {
@@ -176,3 +193,19 @@
};
};
};
+
+&uqe {
+ ucc_hdlc: ucc@2000 {
+ compatible = "fsl,ucc-hdlc";
+ rx-clock-name = "clk8";
+ tx-clock-name = "clk9";
+ fsl,rx-sync-clock = "rsync_pin";
+ fsl,tx-sync-clock = "tsync_pin";
+ fsl,tx-timeslot-mask = <0xfffffffe>;
+ fsl,rx-timeslot-mask = <0xfffffffe>;
+ fsl,tdm-framer-type = "e1";
+ fsl,tdm-id = <0>;
+ fsl,siram-entry-id = <0>;
+ fsl,tdm-interface;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index c084c7a4b6a6..3b641bd43229 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -525,6 +525,71 @@
#interrupt-cells = <2>;
};
+ uqe: uqe@2400000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,qe", "simple-bus";
+ ranges = <0x0 0x0 0x2400000 0x40000>;
+ reg = <0x0 0x2400000 0x0 0x480>;
+ brg-frequency = <100000000>;
+ bus-frequency = <200000000>;
+ fsl,qe-num-riscs = <1>;
+ fsl,qe-num-snums = <28>;
+
+ qeic: qeic@80 {
+ compatible = "fsl,qe-ic";
+ reg = <0x80 0x80>;
+ #address-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ si1: si@700 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,ls1043-qe-si",
+ "fsl,t1040-qe-si";
+ reg = <0x700 0x80>;
+ };
+
+ siram1: siram@1000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,ls1043-qe-siram",
+ "fsl,t1040-qe-siram";
+ reg = <0x1000 0x800>;
+ };
+
+ ucc@2000 {
+ cell-index = <1>;
+ reg = <0x2000 0x200>;
+ interrupts = <32>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@2200 {
+ cell-index = <3>;
+ reg = <0x2200 0x200>;
+ interrupts = <34>;
+ interrupt-parent = <&qeic>;
+ };
+
+ muram@10000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,qe-muram", "fsl,cpm-muram";
+ ranges = <0x0 0x10000 0x6000>;
+
+ data-only@0 {
+ compatible = "fsl,qe-muram-data",
+ "fsl,cpm-muram-data";
+ reg = <0x0 0x6000>;
+ };
+ };
+ };
+
lpuart0: serial@2950000 {
compatible = "fsl,ls1021a-lpuart";
reg = <0x0 0x2950000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index ae1b113ab162..abaeb587de48 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -436,19 +436,19 @@
};
thermal-zones {
- core_thermal1: core-thermal1 {
+ cluster6-7 {
polling-delay-passive = <1000>;
polling-delay = <5000>;
thermal-sensors = <&tmu 0>;
trips {
- core_cluster_alert: core-cluster-alert {
+ cluster6_7_alert: cluster6-7-alert {
temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};
- core_cluster_crit: core-cluster-crit {
+ cluster6_7_crit: cluster6-7-crit {
temperature = <95000>;
hysteresis = <2000>;
type = "critical";
@@ -457,7 +457,7 @@
cooling-maps {
map0 {
- trip = <&core_cluster_alert>;
+ trip = <&cluster6_7_alert>;
cooling-device =
<&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
@@ -478,6 +478,126 @@
};
};
};
+
+ ddr-cluster5 {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tmu 1>;
+
+ trips {
+ ddr-cluster5-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ ddr-cluster5-crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ wriop {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tmu 2>;
+
+ trips {
+ wriop-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ wriop-crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ dce-qbman-hsio2 {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tmu 3>;
+
+ trips {
+ dce-qbman-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ dce-qbman-crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ ccn-dpaa-tbu {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tmu 4>;
+
+ trips {
+ ccn-dpaa-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ ccn-dpaa-crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cluster4-hsio3 {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tmu 5>;
+
+ trips {
+ clust4-hsio3-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ clust4-hsio3-crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cluster2-3 {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+ thermal-sensors = <&tmu 6>;
+
+ trips {
+ cluster2-3-alert {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cluster2-3-crit {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
};
soc {
@@ -549,7 +669,7 @@
/* Calibration data group 1 */
<0x00000000 0x00000035
/* Calibration data group 2 */
- 0x00010001 0x00000154>;
+ 0x00000001 0x00000154>;
little-endian;
#thermal-sensor-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
new file mode 100644
index 000000000000..baa5f997d018
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2020 Compass Electronics Group, LLC
+ */
+
+/ {
+ leds {
+ compatible = "gpio-leds";
+
+ led0 {
+ label = "gen_led0";
+ gpios = <&pca6416_1 4 GPIO_ACTIVE_HIGH>;
+ default-state = "none";
+ };
+
+ led1 {
+ label = "gen_led1";
+ gpios = <&pca6416_1 5 GPIO_ACTIVE_HIGH>;
+ default-state = "none";
+ };
+
+ led2 {
+ label = "gen_led2";
+ gpios = <&pca6416_1 6 GPIO_ACTIVE_HIGH>;
+ default-state = "none";
+ };
+
+ led3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led3>;
+ label = "heartbeat";
+ gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ reg_audio: regulator-audio {
+ compatible = "regulator-fixed";
+ regulator-name = "3v3_aud";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pca6416_1 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ regulator-name = "VSD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ sound {
+ compatible = "fsl,imx-audio-wm8962";
+ model = "wm8962-audio";
+ audio-cpu = <&sai3>;
+ audio-codec = <&wm8962>;
+ audio-routing =
+ "Headphone Jack", "HPOUTL",
+ "Headphone Jack", "HPOUTR",
+ "Ext Spk", "SPKOUTL",
+ "Ext Spk", "SPKOUTR",
+ "AMIC", "MICBIAS",
+ "IN3R", "AMIC";
+ };
+};
+
+&ecspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_espi2>;
+ cs-gpios = <&gpio5 9 0>;
+ status = "okay";
+
+ eeprom@0 {
+ compatible = "microchip,at25160bn", "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+ spi-cpha;
+ spi-cpol;
+ pagesize = <32>;
+ size = <2048>;
+ address-width = <16>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+
+ wm8962: audio-codec@1a {
+ compatible = "wlf,wm8962";
+ reg = <0x1a>;
+ clocks = <&clk IMX8MM_CLK_SAI3_ROOT>;
+ clock-names = "xclk";
+ DCVDD-supply = <&reg_audio>;
+ DBVDD-supply = <&reg_audio>;
+ AVDD-supply = <&reg_audio>;
+ CPVDD-supply = <&reg_audio>;
+ MICVDD-supply = <&reg_audio>;
+ PLLVDD-supply = <&reg_audio>;
+ SPKVDD1-supply = <&reg_audio>;
+ SPKVDD2-supply = <&reg_audio>;
+ gpio-cfg = <
+ 0x0000 /* 0:Default */
+ 0x0000 /* 1:Default */
+ 0x0000 /* 2:FN_DMICCLK */
+ 0x0000 /* 3:Default */
+ 0x0000 /* 4:FN_DMICCDAT */
+ 0x0000 /* 5:Default */
+ >;
+ };
+
+ pca6416_0: gpio@20 {
+ compatible = "nxp,pcal6416";
+ reg = <0x20>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcal6414>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ pca6416_1: gpio@21 {
+ compatible = "nxp,pcal6416";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&sai3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai3>;
+ assigned-clocks = <&clk IMX8MM_CLK_SAI3>;
+ assigned-clock-parents = <&clk IMX8MM_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <24576000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+&uart2 { /* console */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ assigned-clocks = <&clk IMX8MM_CLK_UART3>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_espi2: espi2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82
+ MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82
+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82
+ MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_led3: led3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x41
+ >;
+ };
+
+ pinctrl_pcal6414: pcal6414-gpio {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI2_MCLK_GPIO4_IO27 0x19
+ >;
+ };
+
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0xd6
+ MX8MM_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0xd6
+ MX8MM_IOMUXC_SAI3_MCLK_SAI3_MCLK 0xd6
+ MX8MM_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0xd6
+ MX8MM_IOMUXC_SAI3_RXD_SAI3_RX_DATA0 0xd6
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
+ MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x40
+ MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x40
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2grpgpio {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CD_B_USDHC2_CD_B 0x41
+ MX8MM_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts
new file mode 100644
index 000000000000..74a7b0cc10c2
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-kit.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2020 Compass Electronics Group, LLC
+ */
+
+/dts-v1/;
+
+#include "imx8mm.dtsi"
+#include "imx8mm-beacon-som.dtsi"
+#include "imx8mm-beacon-baseboard.dtsi"
+
+/ {
+ model = "Beacon EmbeddedWorks i.MX8M Mini Development Kit";
+ compatible = "beacon,imx8mm-beacon-kit", "fsl,imx8mm";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
new file mode 100644
index 000000000000..fb0137a8611c
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2020 Compass Electronics Group, LLC
+ */
+
+/ {
+ usdhc1_pwrseq: usdhc1_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1_gpio>;
+ reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
+ clocks = <&osc_32k>;
+ clock-names = "ext_clock";
+ post-power-on-delay-ms = <80>;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0 0x80000000>;
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&ddrc {
+ operating-points-v2 = <&ddrc_opp_table>;
+
+ ddrc_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-25M {
+ opp-hz = /bits/ 64 <25000000>;
+ };
+
+ opp-100M {
+ opp-hz = /bits/ 64 <100000000>;
+ };
+
+ opp-750M {
+ opp-hz = /bits/ 64 <750000000>;
+ };
+ };
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pmic@4b {
+ compatible = "rohm,bd71847";
+ reg = <0x4b>;
+ pinctrl-0 = <&pinctrl_pmic>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <3 GPIO_ACTIVE_LOW>;
+ rohm,reset-snvs-powered;
+
+ regulators {
+ buck1_reg: BUCK1 {
+ regulator-name = "BUCK1";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <1250>;
+ };
+
+ buck2_reg: BUCK2 {
+ regulator-name = "BUCK2";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <1250>;
+ rohm,dvs-run-voltage = <1000000>;
+ rohm,dvs-idle-voltage = <900000>;
+ };
+
+ buck3_reg: BUCK3 {
+ // BUCK5 in datasheet
+ regulator-name = "BUCK3";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck4_reg: BUCK4 {
+ // BUCK6 in datasheet
+ regulator-name = "BUCK4";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck5_reg: BUCK5 {
+ // BUCK7 in datasheet
+ regulator-name = "BUCK5";
+ regulator-min-microvolt = <1605000>;
+ regulator-max-microvolt = <1995000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ buck6_reg: BUCK6 {
+ // BUCK8 in datasheet
+ regulator-name = "BUCK6";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1_reg: LDO1 {
+ regulator-name = "LDO1";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: LDO2 {
+ regulator-name = "LDO2";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3_reg: LDO3 {
+ regulator-name = "LDO3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4_reg: LDO4 {
+ regulator-name = "LDO4";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo6_reg: LDO6 {
+ regulator-name = "LDO6";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "microchip, at24c64d", "atmel,24c64";
+ pagesize = <32>;
+ read-only; /* Manufacturing EEPROM programmed at factory */
+ reg = <0x50>;
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf85263";
+ reg = <0x51>;
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ assigned-clocks = <&clk IMX8MM_CLK_UART1>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ shutdown-gpios = <&gpio2 6 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpio2 8 GPIO_ACTIVE_HIGH>;
+ device-wakeup-gpios = <&gpio2 7 GPIO_ACTIVE_HIGH>;
+ clocks = <&osc_32k>;
+ clock-names = "extclk";
+ };
+};
+
+&usdhc1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ bus-width = <4>;
+ non-removable;
+ cap-power-off-card;
+ pm-ignore-notify;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&usdhc1_pwrseq>;
+ status = "okay";
+
+ brcmf: bcrmf@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wlan>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ };
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3
+ MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
+ MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
+ MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
+ MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
+ MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
+ MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22 0x19
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_pmic: pmicirq {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x41
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
+ MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
+ MX8MM_IOMUXC_UART3_RXD_UART1_DCE_CTS_B 0x140
+ MX8MM_IOMUXC_UART3_TXD_UART1_DCE_RTS_B 0x140
+ MX8MM_IOMUXC_SD1_DATA4_GPIO2_IO6 0x19
+ MX8MM_IOMUXC_SD1_DATA5_GPIO2_IO7 0x19
+ MX8MM_IOMUXC_SD1_DATA6_GPIO2_IO8 0x19
+ MX8MM_IOMUXC_GPIO1_IO00_ANAMIX_REF_CLK_32K 0x141
+ >;
+ };
+
+ pinctrl_usdhc1_gpio: usdhc1grpgpio {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_RESET_B_GPIO2_IO10 0x41
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x194
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d4
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d4
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d4
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d4
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d4
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x196
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d6
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d6
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d6
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d6
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d6
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x190
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d0
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d0
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d0
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d0
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d0
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d0
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d0
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d0
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x194
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d4
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d4
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d4
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d4
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d4
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d4
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d4
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d4
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d4
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x196
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d6
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d6
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d6
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d6
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d6
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d6
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d6
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d6
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d6
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x196
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6
+ >;
+ };
+
+ pinctrl_wlan: wlangrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_DATA7_GPIO2_IO9 0x111
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
index 951e14a3de0e..e5ec8322796d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
@@ -82,6 +82,18 @@
cpu-supply = <&buck2_reg>;
};
+&A53_1 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck2_reg>;
+};
+
&ddrc {
operating-points-v2 = <&ddrc_opp_table>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index 8829628f757a..aaf6e71101a1 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -270,6 +270,7 @@
ranges = <0x30000000 0x30000000 0x400000>;
sai1: sai@30010000 {
+ #sound-dai-cells = <0>;
compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
reg = <0x30010000 0x10000>;
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
@@ -283,6 +284,7 @@
};
sai2: sai@30020000 {
+ #sound-dai-cells = <0>;
compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
reg = <0x30020000 0x10000>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
@@ -310,6 +312,7 @@
};
sai5: sai@30050000 {
+ #sound-dai-cells = <0>;
compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
reg = <0x30050000 0x10000>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
@@ -323,6 +326,7 @@
};
sai6: sai@30060000 {
+ #sound-dai-cells = <0>;
compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
reg = <0x30060000 0x10000>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
@@ -515,16 +519,20 @@
<&clk_ext3>, <&clk_ext4>;
clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4";
- assigned-clocks = <&clk IMX8MM_CLK_NOC>,
+ assigned-clocks = <&clk IMX8MM_CLK_A53_SRC>,
+ <&clk IMX8MM_CLK_A53_CORE>,
+ <&clk IMX8MM_CLK_NOC>,
<&clk IMX8MM_CLK_AUDIO_AHB>,
<&clk IMX8MM_CLK_IPG_AUDIO_ROOT>,
<&clk IMX8MM_SYS_PLL3>,
<&clk IMX8MM_VIDEO_PLL1>,
<&clk IMX8MM_AUDIO_PLL1>,
<&clk IMX8MM_AUDIO_PLL2>;
- assigned-clock-parents = <&clk IMX8MM_SYS_PLL3_OUT>,
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_800M>,
+ <&clk IMX8MM_ARM_PLL_OUT>,
+ <&clk IMX8MM_SYS_PLL3_OUT>,
<&clk IMX8MM_SYS_PLL1_800M>;
- assigned-clock-rates = <0>,
+ assigned-clock-rates = <0>, <0>, <0>,
<400000000>,
<400000000>,
<750000000>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
index 2497eebb5739..d07e0e6a00cc 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
@@ -17,6 +17,18 @@
cpu-supply = <&buck2_reg>;
};
+&A53_1 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck2_reg>;
+};
+
&ddrc {
operating-points-v2 = <&ddrc_opp_table>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 43971abe218b..9a4b65a267d4 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -121,7 +121,7 @@
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <950000>;
+ opp-microvolt = <850000>;
opp-supported-hw = <0xb00>, <0x7>;
clock-latency-ns = <150000>;
opp-suspend;
@@ -426,13 +426,17 @@
<&clk_ext3>, <&clk_ext4>;
clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4";
- assigned-clocks = <&clk IMX8MN_CLK_NOC>,
+ assigned-clocks = <&clk IMX8MN_CLK_A53_SRC>,
+ <&clk IMX8MN_CLK_A53_CORE>,
+ <&clk IMX8MN_CLK_NOC>,
<&clk IMX8MN_CLK_AUDIO_AHB>,
<&clk IMX8MN_CLK_IPG_AUDIO_ROOT>,
<&clk IMX8MN_SYS_PLL3>;
- assigned-clock-parents = <&clk IMX8MN_SYS_PLL3_OUT>,
+ assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_800M>,
+ <&clk IMX8MN_ARM_PLL_OUT>,
+ <&clk IMX8MN_SYS_PLL3_OUT>,
<&clk IMX8MN_SYS_PLL1_800M>;
- assigned-clock-rates = <0>,
+ assigned-clock-rates = <0>, <0>, <0>,
<400000000>,
<400000000>,
<600000000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index 9f6ba763238d..45e2c0a4e889 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -7,6 +7,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/thermal.h>
#include "imx8mp-pinfunc.h"
@@ -43,6 +44,7 @@
clocks = <&clk IMX8MP_CLK_ARM>;
enable-method = "psci";
next-level-cache = <&A53_L2>;
+ #cooling-cells = <2>;
};
A53_1: cpu@1 {
@@ -53,6 +55,7 @@
clocks = <&clk IMX8MP_CLK_ARM>;
enable-method = "psci";
next-level-cache = <&A53_L2>;
+ #cooling-cells = <2>;
};
A53_2: cpu@2 {
@@ -63,6 +66,7 @@
clocks = <&clk IMX8MP_CLK_ARM>;
enable-method = "psci";
next-level-cache = <&A53_L2>;
+ #cooling-cells = <2>;
};
A53_3: cpu@3 {
@@ -73,6 +77,7 @@
clocks = <&clk IMX8MP_CLK_ARM>;
enable-method = "psci";
next-level-cache = <&A53_L2>;
+ #cooling-cells = <2>;
};
A53_L2: l2-cache0 {
@@ -127,6 +132,68 @@
method = "smc";
};
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&tmu 0>;
+ trips {
+ cpu_alert0: trip0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit0: trip1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device =
+ <&A53_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ soc-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&tmu 1>;
+ trips {
+ soc_alert0: trip0 {
+ temperature = <85000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ soc_crit0: trip1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&soc_alert0>;
+ cooling-device =
+ <&A53_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>,
@@ -215,6 +282,13 @@
gpio-ranges = <&iomuxc 0 114 30>;
};
+ tmu: tmu@30260000 {
+ compatible = "fsl,imx8mp-tmu";
+ reg = <0x30260000 0x10000>;
+ clocks = <&clk IMX8MP_CLK_TSENSOR_ROOT>;
+ #thermal-sensor-cells = <1>;
+ };
+
wdog1: watchdog@30280000 {
compatible = "fsl,imx8mp-wdt", "fsl,imx21-wdt";
reg = <0x30280000 0x10000>;
@@ -286,7 +360,9 @@
<&clk_ext3>, <&clk_ext4>;
clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4";
- assigned-clocks = <&clk IMX8MP_CLK_NOC>,
+ assigned-clocks = <&clk IMX8MP_CLK_A53_SRC>,
+ <&clk IMX8MP_CLK_A53_CORE>,
+ <&clk IMX8MP_CLK_NOC>,
<&clk IMX8MP_CLK_NOC_IO>,
<&clk IMX8MP_CLK_GIC>,
<&clk IMX8MP_CLK_AUDIO_AHB>,
@@ -294,12 +370,15 @@
<&clk IMX8MP_CLK_IPG_AUDIO_ROOT>,
<&clk IMX8MP_AUDIO_PLL1>,
<&clk IMX8MP_AUDIO_PLL2>;
- assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_ARM_PLL_OUT>,
+ <&clk IMX8MP_SYS_PLL2_1000M>,
<&clk IMX8MP_SYS_PLL1_800M>,
<&clk IMX8MP_SYS_PLL2_500M>,
<&clk IMX8MP_SYS_PLL1_800M>,
<&clk IMX8MP_SYS_PLL1_800M>;
- assigned-clock-rates = <1000000000>,
+ assigned-clock-rates = <0>, <0>,
+ <1000000000>,
<800000000>,
<500000000>,
<400000000>,
@@ -312,6 +391,7 @@
src: reset-controller@30390000 {
compatible = "fsl,imx8mp-src", "syscon";
reg = <0x30390000 0x10000>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
#reset-cells = <1>;
};
};
@@ -615,7 +695,7 @@
};
fec: ethernet@30be0000 {
- compatible = "fsl,imx8mp-fec", "fsl,imx8mq-fec";
+ compatible = "fsl,imx8mp-fec", "fsl,imx8mq-fec", "fsl,imx6sx-fec";
reg = <0x30be0000 0x10000>;
interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
index 10eca94194be..6900ac274f5b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
@@ -318,7 +318,7 @@
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
regulator-boot-on;
- rohm,dvs-run-voltage = <1000000>;
+ rohm,dvs-run-voltage = <900000>;
};
buck4_reg: BUCK4 {
@@ -410,7 +410,7 @@
};
};
- typec_ptn5100: usb_typec@52 {
+ typec_ptn5100: usb-typec@52 {
compatible = "nxp,ptn5110";
reg = <0x52>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index bab88369be1b..978f8122c0d2 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -595,13 +595,19 @@
clock-names = "ckil", "osc_25m", "osc_27m",
"clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4";
- assigned-clocks = <&clk IMX8MQ_CLK_NOC>;
- assigned-clock-rates = <800000000>;
+ assigned-clocks = <&clk IMX8MQ_CLK_A53_SRC>,
+ <&clk IMX8MQ_CLK_A53_CORE>,
+ <&clk IMX8MQ_CLK_NOC>;
+ assigned-clock-rates = <0>, <0>,
+ <800000000>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_800M>,
+ <&clk IMX8MQ_ARM_PLL_OUT>;
};
src: reset-controller@30390000 {
compatible = "fsl,imx8mq-src", "syscon";
reg = <0x30390000 0x10000>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
#reset-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
index 13460a360c6a..46437d3c7a04 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
@@ -30,31 +30,10 @@
};
};
-&adma_lpuart0 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_lpuart0>;
+&adma_dsp {
status = "okay";
};
-&fec1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_fec1>;
- phy-mode = "rgmii-id";
- phy-handle = <&ethphy0>;
- fsl,magic-packet;
- status = "okay";
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- ethphy0: ethernet-phy@0 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <0>;
- };
- };
-};
-
&adma_i2c1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -131,6 +110,68 @@
};
};
+&adma_lpuart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart0>;
+ status = "okay";
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+ };
+};
+
+&scu_key {
+ status = "okay";
+};
+
+&thermal_zones {
+ pmic-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&tsens IMX_SC_R_PMIC_0>;
+
+ trips {
+ pmic_alert0: trip0 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ pmic_crit0: trip1 {
+ temperature = <125000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&pmic_alert0>;
+ cooling-device =
+ <&A35_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A35_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+};
+
&usdhc1 {
assigned-clocks = <&clk IMX_CONN_SDHC0_CLK>;
assigned-clock-rates = <200000000>;
@@ -175,7 +216,7 @@
>;
};
- pinctrl_ioexp_rst: ioexp_rst_grp {
+ pinctrl_ioexp_rst: ioexprstgrp {
fsl,pins = <
IMX8QXP_SPI2_SDO_LSIO_GPIO1_IO01 0x06000021
>;
@@ -229,11 +270,3 @@
>;
};
};
-
-&adma_dsp {
- status = "okay";
-};
-
-&scu_key {
- status = "okay";
-};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
index e8ffb7590656..d1c3c98e4b39 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
@@ -141,17 +141,11 @@
scu {
compatible = "fsl,imx-scu";
- mbox-names = "tx0", "tx1", "tx2", "tx3",
- "rx0", "rx1", "rx2", "rx3",
+ mbox-names = "tx0",
+ "rx0",
"gip3";
mboxes = <&lsio_mu1 0 0
- &lsio_mu1 0 1
- &lsio_mu1 0 2
- &lsio_mu1 0 3
&lsio_mu1 1 0
- &lsio_mu1 1 1
- &lsio_mu1 1 2
- &lsio_mu1 1 3
&lsio_mu1 3 3>;
clk: clock-controller {
@@ -548,14 +542,14 @@
};
lsio_mu1: mailbox@5d1c0000 {
- compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
reg = <0x5d1c0000 0x10000>;
interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <2>;
};
lsio_mu2: mailbox@5d1d0000 {
- compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
reg = <0x5d1d0000 0x10000>;
interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <2>;
@@ -563,7 +557,7 @@
};
lsio_mu3: mailbox@5d1e0000 {
- compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
reg = <0x5d1e0000 0x10000>;
interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <2>;
@@ -571,7 +565,7 @@
};
lsio_mu4: mailbox@5d1f0000 {
- compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
reg = <0x5d1f0000 0x10000>;
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <2>;
diff --git a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
index 263b972a6d1e..8bc6caa9167d 100644
--- a/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
+++ b/arch/arm64/boot/dts/freescale/qoriq-fman3-0.dtsi
@@ -81,4 +81,5 @@ ptp_timer0: ptp-timer@1afe000 {
reg = <0x0 0x1afe000 0x0 0x1000>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 3 0>;
+ fsl,extts-fifo;
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
index 253cc345f143..c39b78989ff9 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -974,7 +974,7 @@
clocks = <&crg_ctrl HI3660_CLK_GATE_SPI2>;
clock-names = "apb_pclk";
pinctrl-names = "default";
- pinctrl-0 = <&spi2_pmx_func>;
+ pinctrl-0 = <&spi2_pmx_func &spi2_cfg_func>;
num-cs = <1>;
cs-gpios = <&gpio27 2 0>;
status = "disabled";
@@ -989,7 +989,7 @@
clocks = <&crg_ctrl HI3660_CLK_GATE_SPI3>;
clock-names = "apb_pclk";
pinctrl-names = "default";
- pinctrl-0 = <&spi3_pmx_func>;
+ pinctrl-0 = <&spi3_pmx_func &spi3_cfg_func>;
num-cs = <1>;
cs-gpios = <&gpio18 5 0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-coresight.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220-coresight.dtsi
index 651771a73ed6..7b3010f448c5 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-coresight.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-coresight.dtsi
@@ -213,7 +213,7 @@
};
};
- etm@f659c000 {
+ etm0: etm@f659c000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0xf659c000 0 0x1000>;
@@ -232,7 +232,7 @@
};
};
- etm@f659d000 {
+ etm1: etm@f659d000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0xf659d000 0 0x1000>;
@@ -251,7 +251,7 @@
};
};
- etm@f659e000 {
+ etm2: etm@f659e000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0xf659e000 0 0x1000>;
@@ -270,7 +270,7 @@
};
};
- etm@f659f000 {
+ etm3: etm@f659f000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0xf659f000 0 0x1000>;
@@ -289,7 +289,7 @@
};
};
- etm@f65dc000 {
+ etm4: etm@f65dc000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0xf65dc000 0 0x1000>;
@@ -308,7 +308,7 @@
};
};
- etm@f65dd000 {
+ etm5: etm@f65dd000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0xf65dd000 0 0x1000>;
@@ -327,7 +327,7 @@
};
};
- etm@f65de000 {
+ etm6: etm@f65de000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0xf65de000 0 0x1000>;
@@ -346,7 +346,7 @@
};
};
- etm@f65df000 {
+ etm7: etm@f65df000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0 0xf65df000 0 0x1000>;
@@ -364,5 +364,119 @@
};
};
};
+
+ /* System CTIs */
+ /* CTI 0 - TMC and TPIU connections */
+ cti@f6403000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0 0xf6403000 0 0x1000>;
+
+ clocks = <&acpu_sctrl HI6220_ACPU_SFT_AT_S>;
+ clock-names = "apb_pclk";
+ };
+
+ /* CTI - CPU-0 */
+ cti@f6598000 {
+ compatible = "arm,coresight-cti-v8-arch",
+ "arm,coresight-cti", "arm,primecell";
+ reg = <0 0xf6598000 0 0x1000>;
+
+ clocks = <&acpu_sctrl HI6220_ACPU_SFT_AT_S>;
+ clock-names = "apb_pclk";
+
+ cpu = <&cpu0>;
+ arm,cs-dev-assoc = <&etm0>;
+ };
+
+ /* CTI - CPU-1 */
+ cti@f6599000 {
+ compatible = "arm,coresight-cti-v8-arch",
+ "arm,coresight-cti", "arm,primecell";
+ reg = <0 0xf6599000 0 0x1000>;
+
+ clocks = <&acpu_sctrl HI6220_ACPU_SFT_AT_S>;
+ clock-names = "apb_pclk";
+
+ cpu = <&cpu1>;
+ arm,cs-dev-assoc = <&etm1>;
+ };
+
+ /* CTI - CPU-2 */
+ cti@f659a000 {
+ compatible = "arm,coresight-cti-v8-arch",
+ "arm,coresight-cti", "arm,primecell";
+ reg = <0 0xf659a000 0 0x1000>;
+
+ clocks = <&acpu_sctrl HI6220_ACPU_SFT_AT_S>;
+ clock-names = "apb_pclk";
+
+ cpu = <&cpu2>;
+ arm,cs-dev-assoc = <&etm2>;
+ };
+
+ /* CTI - CPU-3 */
+ cti@f659b000 {
+ compatible = "arm,coresight-cti-v8-arch",
+ "arm,coresight-cti", "arm,primecell";
+ reg = <0 0xf659b000 0 0x1000>;
+
+ clocks = <&acpu_sctrl HI6220_ACPU_SFT_AT_S>;
+ clock-names = "apb_pclk";
+
+ cpu = <&cpu3>;
+ arm,cs-dev-assoc = <&etm3>;
+ };
+
+ /* CTI - CPU-4 */
+ cti@f65d8000 {
+ compatible = "arm,coresight-cti-v8-arch",
+ "arm,coresight-cti", "arm,primecell";
+ reg = <0 0xf65d8000 0 0x1000>;
+
+ clocks = <&acpu_sctrl HI6220_ACPU_SFT_AT_S>;
+ clock-names = "apb_pclk";
+
+ cpu = <&cpu4>;
+ arm,cs-dev-assoc = <&etm4>;
+ };
+
+ /* CTI - CPU-5 */
+ cti@f65d9000 {
+ compatible = "arm,coresight-cti-v8-arch",
+ "arm,coresight-cti", "arm,primecell";
+ reg = <0 0xf65d9000 0 0x1000>;
+
+ clocks = <&acpu_sctrl HI6220_ACPU_SFT_AT_S>;
+ clock-names = "apb_pclk";
+
+ cpu = <&cpu5>;
+ arm,cs-dev-assoc = <&etm5>;
+ };
+
+ /* CTI - CPU-6 */
+ cti@f65da000 {
+ compatible = "arm,coresight-cti-v8-arch",
+ "arm,coresight-cti", "arm,primecell";
+ reg = <0 0xf65da000 0 0x1000>;
+
+ clocks = <&acpu_sctrl HI6220_ACPU_SFT_AT_S>;
+ clock-names = "apb_pclk";
+
+ cpu = <&cpu6>;
+ arm,cs-dev-assoc = <&etm6>;
+ };
+
+ /* CTI - CPU-7 */
+ cti@f65db000 {
+ compatible = "arm,coresight-cti-v8-arch",
+ "arm,coresight-cti", "arm,primecell";
+ reg = <0 0xf65db000 0 0x1000>;
+
+ clocks = <&acpu_sctrl HI6220_ACPU_SFT_AT_S>;
+ clock-names = "apb_pclk";
+
+ cpu = <&cpu7>;
+ arm,cs-dev-assoc = <&etm7>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi b/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
index d11efc81958c..920a3111c66d 100644
--- a/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hikey960-pinctrl.dtsi
@@ -717,7 +717,7 @@
spi3_cfg_func: spi3_cfg_func {
pinctrl-single,pins = <
0x008 0x0 /* SPI3_CLK */
- 0x0 /* SPI3_DI */
+ 0x00c 0x0 /* SPI3_DI */
0x010 0x0 /* SPI3_DO */
0x014 0x0 /* SPI3_CS0_N */
>;
@@ -734,7 +734,7 @@
PULL_UP
>;
pinctrl-single,drive-strength = <
- DRIVE7_02MA DRIVE6_MASK
+ DRIVE7_06MA DRIVE6_MASK
>;
};
};
@@ -1031,7 +1031,7 @@
PULL_UP
>;
pinctrl-single,drive-strength = <
- DRIVE7_02MA DRIVE6_MASK
+ DRIVE7_06MA DRIVE6_MASK
>;
};
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
index d8c44d3ca15a..f52de8f7806a 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -539,12 +539,12 @@
firmware {
svc {
- compatible = "intel,stratix10-svc";
+ compatible = "intel,agilex-svc";
method = "smc";
memory-region = <&service_reserved>;
fpga_mgr: fpga-mgr {
- compatible = "intel,stratix10-soc-fpga-mgr";
+ compatible = "intel,agilex-soc-fpga-mgr";
};
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
index f2cc00594d64..3e5789f37206 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
@@ -128,6 +128,9 @@
/* CON15(V2.0)/CON17(V1.4) : PCIe / CON15(V2.0)/CON12(V1.4) :mini-PCIe */
&pcie0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
+ reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
index 42e992f9c8a5..b97218c72727 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dtsi
@@ -44,9 +44,9 @@
/* J9 */
&pcie0 {
status = "okay";
- phys = <&comphy1 0>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
+ reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
};
/* J6 */
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
index bb42d1e6a4e9..f3a678e0fd99 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
@@ -95,7 +95,7 @@
};
sfp: sfp {
- compatible = "sff,sfp+";
+ compatible = "sff,sfp";
i2c-bus = <&i2c0>;
los-gpio = <&moxtet_sfp 0 GPIO_ACTIVE_HIGH>;
tx-fault-gpio = <&moxtet_sfp 1 GPIO_ACTIVE_HIGH>;
@@ -128,17 +128,11 @@
};
};
-&pcie_reset_pins {
- function = "gpio";
-};
-
&pcie0 {
pinctrl-names = "default";
pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
status = "okay";
- max-link-speed = <2>;
reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
- phys = <&comphy1 0>;
/* enabled by U-Boot if PCIe module is present */
status = "disabled";
@@ -179,6 +173,8 @@
marvell,pad-type = "sd";
vqmmc-supply = <&vsdio_reg>;
mmc-pwrseq = <&sdhci1_pwrseq>;
+ /* forbid SDR104 for FCC purposes */
+ sdhci-caps-mask = <0x2 0x0>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
index 7eb6c1796cef..95d46e8d081c 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
@@ -117,18 +117,36 @@
};
};
+&pinctrl_nb {
+ i2c1_recovery_pins: i2c1-recovery-pins {
+ groups = "i2c1";
+ function = "gpio";
+ };
+
+ i2c2_recovery_pins: i2c2-recovery-pins {
+ groups = "i2c2";
+ function = "gpio";
+ };
+};
+
&i2c0 {
status = "okay";
- pinctrl-names = "default";
+ pinctrl-names = "default", "recovery";
pinctrl-0 = <&i2c1_pins>;
+ pinctrl-1 = <&i2c1_recovery_pins>;
/delete-property/mrvl,i2c-fast-mode;
+ scl-gpios = <&gpionb 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpionb 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
};
&i2c1 {
status = "okay";
- pinctrl-names = "default";
+ pinctrl-names = "default", "recovery";
pinctrl-0 = <&i2c2_pins>;
+ pinctrl-1 = <&i2c2_recovery_pins>;
/delete-property/mrvl,i2c-fast-mode;
+ scl-gpios = <&gpionb 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpionb 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
lm75@48 {
status = "okay";
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 000c135e39b7..2bbc69b4dc99 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -317,7 +317,7 @@
pcie_reset_pins: pcie-reset-pins {
groups = "pcie1";
- function = "pcie";
+ function = "gpio";
};
pcie_clkreq_pins: pcie-clkreq-pins {
@@ -493,6 +493,8 @@
<0 0 0 2 &pcie_intc 1>,
<0 0 0 3 &pcie_intc 2>,
<0 0 0 4 &pcie_intc 3>;
+ max-link-speed = <2>;
+ phys = <&comphy1 0>;
pcie_intc: interrupt-controller {
interrupt-controller;
#interrupt-cells = <1>;
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index b90d78a5724b..c8243da71041 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -276,7 +276,7 @@
/* SFP */
&cp0_eth0 {
status = "okay";
- phy-mode = "10gbase-kr";
+ phy-mode = "10gbase-r";
managed = "in-band-status";
phys = <&cp0_comphy2 0>;
sfp = <&sfp_cp0_eth0>;
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin-singleshot.dts b/arch/arm64/boot/dts/marvell/armada-8040-mcbin-singleshot.dts
index c3e18fd5bc27..2e6832d02a59 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin-singleshot.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin-singleshot.dts
@@ -16,14 +16,14 @@
&cp0_eth0 {
status = "okay";
- phy-mode = "10gbase-kr";
+ phy-mode = "10gbase-r";
managed = "in-band-status";
sfp = <&sfp_eth0>;
};
&cp1_eth0 {
status = "okay";
- phy-mode = "10gbase-kr";
+ phy-mode = "10gbase-r";
managed = "in-band-status";
sfp = <&sfp_eth1>;
};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
index d06f5ab7ddab..1766cf58101b 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
@@ -34,12 +34,12 @@
status = "okay";
/* Network PHY */
phy = <&phy0>;
- phy-mode = "10gbase-kr";
+ phy-mode = "10gbase-r";
};
&cp1_eth0 {
status = "okay";
/* Network PHY */
phy = <&phy8>;
- phy-mode = "10gbase-kr";
+ phy-mode = "10gbase-r";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
index e7438c21ccee..7f9b9a647717 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
@@ -201,7 +201,6 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
- timeout-ms = <1000>;
clocks = <&ap_clk 3>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 2f8967cb8717..a57af9da9f5c 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -6,6 +6,9 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt6797-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt6797-x20-dev.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7622-rfb1.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt7622-bananapi-bpi-r64.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-hana.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-hana-rev7.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8516-pumpkin.dtb
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
index 2b91daf5c1a6..7d369fdd3117 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
@@ -105,7 +105,81 @@
proc-supply = <&cpus_fixed_vproc1>;
};
+&eth {
+ phy-mode ="rgmii-rxid";
+ phy-handle = <&ethernet_phy0>;
+ mediatek,tx-delay-ps = <1530>;
+ snps,reset-gpio = <&pio 87 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&eth_default>;
+ pinctrl-1 = <&eth_sleep>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ethernet_phy0: ethernet-phy@5 {
+ compatible = "ethernet-phy-id0243.0d90";
+ reg = <0x5>;
+ };
+ };
+};
+
&pio {
+ eth_default: eth_default {
+ tx_pins {
+ pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
+ <MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
+ <MT2712_PIN_73_GBE_TXD1__FUNC_GBE_TXD1>,
+ <MT2712_PIN_74_GBE_TXD0__FUNC_GBE_TXD0>,
+ <MT2712_PIN_75_GBE_TXC__FUNC_GBE_TXC>,
+ <MT2712_PIN_76_GBE_TXEN__FUNC_GBE_TXEN>;
+ drive-strength = <MTK_DRIVE_8mA>;
+ };
+ rx_pins {
+ pinmux = <MT2712_PIN_78_GBE_RXD3__FUNC_GBE_RXD3>,
+ <MT2712_PIN_79_GBE_RXD2__FUNC_GBE_RXD2>,
+ <MT2712_PIN_80_GBE_RXD1__FUNC_GBE_RXD1>,
+ <MT2712_PIN_81_GBE_RXD0__FUNC_GBE_RXD0>,
+ <MT2712_PIN_82_GBE_RXDV__FUNC_GBE_RXDV>,
+ <MT2712_PIN_84_GBE_RXC__FUNC_GBE_RXC>;
+ input-enable;
+ };
+ mdio_pins {
+ pinmux = <MT2712_PIN_85_GBE_MDC__FUNC_GBE_MDC>,
+ <MT2712_PIN_86_GBE_MDIO__FUNC_GBE_MDIO>;
+ drive-strength = <MTK_DRIVE_8mA>;
+ input-enable;
+ };
+ };
+
+ eth_sleep: eth_sleep {
+ tx_pins {
+ pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
+ <MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
+ <MT2712_PIN_73_GBE_TXD1__FUNC_GPIO73>,
+ <MT2712_PIN_74_GBE_TXD0__FUNC_GPIO74>,
+ <MT2712_PIN_75_GBE_TXC__FUNC_GPIO75>,
+ <MT2712_PIN_76_GBE_TXEN__FUNC_GPIO76>;
+ };
+ rx_pins {
+ pinmux = <MT2712_PIN_78_GBE_RXD3__FUNC_GPIO78>,
+ <MT2712_PIN_79_GBE_RXD2__FUNC_GPIO79>,
+ <MT2712_PIN_80_GBE_RXD1__FUNC_GPIO80>,
+ <MT2712_PIN_81_GBE_RXD0__FUNC_GPIO81>,
+ <MT2712_PIN_82_GBE_RXDV__FUNC_GPIO82>,
+ <MT2712_PIN_84_GBE_RXC__FUNC_GPIO84>;
+ input-disable;
+ };
+ mdio_pins {
+ pinmux = <MT2712_PIN_85_GBE_MDC__FUNC_GPIO85>,
+ <MT2712_PIN_86_GBE_MDIO__FUNC_GPIO86>;
+ input-disable;
+ bias-disable;
+ };
+ };
+
usb0_id_pins_float: usb0_iddig {
pins_iddig {
pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index 2cd8b33886e5..db17d0a4ed57 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -300,6 +300,9 @@
interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_LOW>;
clocks = <&baud_clk>, <&sys_clk>;
clock-names = "baud", "bus";
+ dmas = <&apdma 10
+ &apdma 11>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -375,6 +378,39 @@
(GIC_CPU_MASK_RAW(0x13) | IRQ_TYPE_LEVEL_HIGH)>;
};
+ apdma: dma-controller@11000400 {
+ compatible = "mediatek,mt2712-uart-dma",
+ "mediatek,mt6577-uart-dma";
+ reg = <0 0x11000400 0 0x80>,
+ <0 0x11000480 0 0x80>,
+ <0 0x11000500 0 0x80>,
+ <0 0x11000580 0 0x80>,
+ <0 0x11000600 0 0x80>,
+ <0 0x11000680 0 0x80>,
+ <0 0x11000700 0 0x80>,
+ <0 0x11000780 0 0x80>,
+ <0 0x11000800 0 0x80>,
+ <0 0x11000880 0 0x80>,
+ <0 0x11000900 0 0x80>,
+ <0 0x11000980 0 0x80>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_LOW>;
+ dma-requests = <12>;
+ clocks = <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "apdma";
+ #dma-cells = <1>;
+ };
+
auxadc: adc@11001000 {
compatible = "mediatek,mt2712-auxadc";
reg = <0 0x11001000 0 0x1000>;
@@ -391,6 +427,9 @@
interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>;
clocks = <&baud_clk>, <&sys_clk>;
clock-names = "baud", "bus";
+ dmas = <&apdma 0
+ &apdma 1>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -401,6 +440,9 @@
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_LOW>;
clocks = <&baud_clk>, <&sys_clk>;
clock-names = "baud", "bus";
+ dmas = <&apdma 2
+ &apdma 3>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -411,6 +453,9 @@
interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_LOW>;
clocks = <&baud_clk>, <&sys_clk>;
clock-names = "baud", "bus";
+ dmas = <&apdma 4
+ &apdma 5>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -421,6 +466,9 @@
interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_LOW>;
clocks = <&baud_clk>, <&sys_clk>;
clock-names = "baud", "bus";
+ dmas = <&apdma 6
+ &apdma 7>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -635,6 +683,74 @@
interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_LOW>;
clocks = <&baud_clk>, <&sys_clk>;
clock-names = "baud", "bus";
+ dmas = <&apdma 8
+ &apdma 9>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
+ stmmac_axi_setup: stmmac-axi-config {
+ snps,wr_osr_lmt = <0x7>;
+ snps,rd_osr_lmt = <0x7>;
+ snps,blen = <0 0 0 0 16 8 4>;
+ };
+
+ mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <1>;
+ snps,rx-sched-sp;
+ queue0 {
+ snps,dcb-algorithm;
+ snps,map-to-dma-channel = <0x0>;
+ snps,priority = <0x0>;
+ };
+ };
+
+ mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <3>;
+ snps,tx-sched-wrr;
+ queue0 {
+ snps,weight = <0x10>;
+ snps,dcb-algorithm;
+ snps,priority = <0x0>;
+ };
+ queue1 {
+ snps,weight = <0x11>;
+ snps,dcb-algorithm;
+ snps,priority = <0x1>;
+ };
+ queue2 {
+ snps,weight = <0x12>;
+ snps,dcb-algorithm;
+ snps,priority = <0x2>;
+ };
+ };
+
+ eth: ethernet@1101c000 {
+ compatible = "mediatek,mt2712-gmac";
+ reg = <0 0x1101c000 0 0x1300>;
+ interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "macirq";
+ mac-address = [00 55 7b b5 7d f7];
+ clock-names = "axi",
+ "apb",
+ "mac_main",
+ "ptp_ref";
+ clocks = <&pericfg CLK_PERI_GMAC>,
+ <&pericfg CLK_PERI_GMAC_PCLK>,
+ <&topckgen CLK_TOP_ETHER_125M_SEL>,
+ <&topckgen CLK_TOP_ETHER_50M_SEL>;
+ assigned-clocks = <&topckgen CLK_TOP_ETHER_125M_SEL>,
+ <&topckgen CLK_TOP_ETHER_50M_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_ETHERPLL_125M>,
+ <&topckgen CLK_TOP_APLL1_D3>;
+ power-domains = <&scpsys MT2712_POWER_DOMAIN_AUDIO>;
+ mediatek,pericfg = <&pericfg>;
+ snps,axi-config = <&stmmac_axi_setup>;
+ snps,mtl-rx-config = <&mtl_rx_setup>;
+ snps,mtl-tx-config = <&mtl_tx_setup>;
+ snps,txpbl = <1>;
+ snps,rxpbl = <1>;
+ clk_csr = <0>;
status = "disabled";
};
@@ -703,30 +819,31 @@
};
u3phy0: usb-phy@11290000 {
- compatible = "mediatek,mt2712-u3phy";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ compatible = "mediatek,mt2712-tphy",
+ "mediatek,generic-tphy-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x11290000 0x9000>;
status = "okay";
- u2port0: usb-phy@11290000 {
- reg = <0 0x11290000 0 0x700>;
+ u2port0: usb-phy@0 {
+ reg = <0x0 0x700>;
clocks = <&clk26m>;
clock-names = "ref";
#phy-cells = <1>;
status = "okay";
};
- u2port1: usb-phy@11298000 {
- reg = <0 0x11298000 0 0x700>;
+ u2port1: usb-phy@8000 {
+ reg = <0x8000 0x700>;
clocks = <&clk26m>;
clock-names = "ref";
#phy-cells = <1>;
status = "okay";
};
- u3port0: usb-phy@11298700 {
- reg = <0 0x11298700 0 0x900>;
+ u3port0: usb-phy@8700 {
+ reg = <0x8700 0x900>;
clocks = <&clk26m>;
clock-names = "ref";
#phy-cells = <1>;
@@ -766,30 +883,31 @@
};
u3phy1: usb-phy@112e0000 {
- compatible = "mediatek,mt2712-u3phy";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ compatible = "mediatek,mt2712-tphy",
+ "mediatek,generic-tphy-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x112e0000 0x9000>;
status = "okay";
- u2port2: usb-phy@112e0000 {
- reg = <0 0x112e0000 0 0x700>;
+ u2port2: usb-phy@0 {
+ reg = <0x0 0x700>;
clocks = <&clk26m>;
clock-names = "ref";
#phy-cells = <1>;
status = "okay";
};
- u2port3: usb-phy@112e8000 {
- reg = <0 0x112e8000 0 0x700>;
+ u2port3: usb-phy@8000 {
+ reg = <0x8000 0x700>;
clocks = <&clk26m>;
clock-names = "ref";
#phy-cells = <1>;
status = "okay";
};
- u3port1: usb-phy@112e8700 {
- reg = <0 0x112e8700 0 0x900>;
+ u3port1: usb-phy@8700 {
+ reg = <0x8700 0x900>;
clocks = <&clk26m>;
clock-names = "ref";
#phy-cells = <1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt6358.dtsi b/arch/arm64/boot/dts/mediatek/mt6358.dtsi
new file mode 100644
index 000000000000..9361ada0c497
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6358.dtsi
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+&pwrap {
+ pmic: mt6358 {
+ compatible = "mediatek,mt6358";
+ interrupt-controller;
+ interrupt-parent = <&pio>;
+ interrupts = <182 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
+
+ mt6358codec: mt6358codec {
+ compatible = "mediatek,mt6358-sound";
+ };
+
+ mt6358regulator: mt6358regulator {
+ mt6358_vdram1_reg: buck_vdram1 {
+ regulator-name = "vdram1";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2087500>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <0>;
+ regulator-always-on;
+ regulator-allowed-modes = <0 1>;
+ };
+
+ mt6358_vcore_reg: buck_vcore {
+ regulator-name = "vcore";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-always-on;
+ regulator-allowed-modes = <0 1>;
+ };
+
+ mt6358_vpa_reg: buck_vpa {
+ regulator-name = "vpa";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <3650000>;
+ regulator-ramp-delay = <50000>;
+ regulator-enable-ramp-delay = <250>;
+ regulator-allowed-modes = <0 1>;
+ };
+
+ mt6358_vproc11_reg: buck_vproc11 {
+ regulator-name = "vproc11";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-always-on;
+ regulator-allowed-modes = <0 1>;
+ };
+
+ mt6358_vproc12_reg: buck_vproc12 {
+ regulator-name = "vproc12";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-always-on;
+ regulator-allowed-modes = <0 1>;
+ };
+
+ mt6358_vgpu_reg: buck_vgpu {
+ regulator-name = "vgpu";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <200>;
+ regulator-allowed-modes = <0 1>;
+ };
+
+ mt6358_vs2_reg: buck_vs2 {
+ regulator-name = "vs2";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2087500>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <0>;
+ regulator-always-on;
+ };
+
+ mt6358_vmodem_reg: buck_vmodem {
+ regulator-name = "vmodem";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <900>;
+ regulator-always-on;
+ regulator-allowed-modes = <0 1>;
+ };
+
+ mt6358_vs1_reg: buck_vs1 {
+ regulator-name = "vs1";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <2587500>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <0>;
+ regulator-always-on;
+ };
+
+ mt6358_vdram2_reg: ldo_vdram2 {
+ regulator-name = "vdram2";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <3300>;
+ };
+
+ mt6358_vsim1_reg: ldo_vsim1 {
+ regulator-name = "vsim1";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-enable-ramp-delay = <540>;
+ };
+
+ mt6358_vibr_reg: ldo_vibr {
+ regulator-name = "vibr";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <60>;
+ };
+
+ mt6358_vrf12_reg: ldo_vrf12 {
+ compatible = "regulator-fixed";
+ regulator-name = "vrf12";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ mt6358_vio18_reg: ldo_vio18 {
+ compatible = "regulator-fixed";
+ regulator-name = "vio18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <2700>;
+ regulator-always-on;
+ };
+
+ mt6358_vusb_reg: ldo_vusb {
+ regulator-name = "vusb";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-enable-ramp-delay = <270>;
+ regulator-always-on;
+ };
+
+ mt6358_vcamio_reg: ldo_vcamio {
+ compatible = "regulator-fixed";
+ regulator-name = "vcamio";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <325>;
+ };
+
+ mt6358_vcamd_reg: ldo_vcamd {
+ regulator-name = "vcamd";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <325>;
+ };
+
+ mt6358_vcn18_reg: ldo_vcn18 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcn18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vfe28_reg: ldo_vfe28 {
+ compatible = "regulator-fixed";
+ regulator-name = "vfe28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vsram_proc11_reg: ldo_vsram_proc11 {
+ regulator-name = "vsram_proc11";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+
+ mt6358_vcn28_reg: ldo_vcn28 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcn28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vsram_others_reg: ldo_vsram_others {
+ regulator-name = "vsram_others";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+
+ mt6358_vsram_gpu_reg: ldo_vsram_gpu {
+ regulator-name = "vsram_gpu";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <240>;
+ };
+
+ mt6358_vxo22_reg: ldo_vxo22 {
+ compatible = "regulator-fixed";
+ regulator-name = "vxo22";
+ regulator-min-microvolt = <2200000>;
+ regulator-max-microvolt = <2200000>;
+ regulator-enable-ramp-delay = <120>;
+ regulator-always-on;
+ };
+
+ mt6358_vefuse_reg: ldo_vefuse {
+ regulator-name = "vefuse";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vaux18_reg: ldo_vaux18 {
+ compatible = "regulator-fixed";
+ regulator-name = "vaux18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vmch_reg: ldo_vmch {
+ regulator-name = "vmch";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <60>;
+ };
+
+ mt6358_vbif28_reg: ldo_vbif28 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbif28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vsram_proc12_reg: ldo_vsram_proc12 {
+ regulator-name = "vsram_proc12";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1293750>;
+ regulator-ramp-delay = <6250>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-always-on;
+ };
+
+ mt6358_vcama1_reg: ldo_vcama1 {
+ regulator-name = "vcama1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <325>;
+ };
+
+ mt6358_vemc_reg: ldo_vemc {
+ regulator-name = "vemc";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <60>;
+ };
+
+ mt6358_vio28_reg: ldo_vio28 {
+ compatible = "regulator-fixed";
+ regulator-name = "vio28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_va12_reg: ldo_va12 {
+ compatible = "regulator-fixed";
+ regulator-name = "va12";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-enable-ramp-delay = <270>;
+ regulator-always-on;
+ };
+
+ mt6358_vrf18_reg: ldo_vrf18 {
+ compatible = "regulator-fixed";
+ regulator-name = "vrf18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <120>;
+ };
+
+ mt6358_vcn33_bt_reg: ldo_vcn33_bt {
+ regulator-name = "vcn33_bt";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3500000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vcn33_wifi_reg: ldo_vcn33_wifi {
+ regulator-name = "vcn33_wifi";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3500000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vcama2_reg: ldo_vcama2 {
+ regulator-name = "vcama2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <325>;
+ };
+
+ mt6358_vmc_reg: ldo_vmc {
+ regulator-name = "vmc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <60>;
+ };
+
+ mt6358_vldo28_reg: ldo_vldo28 {
+ regulator-name = "vldo28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vaud28_reg: ldo_vaud28 {
+ compatible = "regulator-fixed";
+ regulator-name = "vaud28";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <270>;
+ };
+
+ mt6358_vsim2_reg: ldo_vsim2 {
+ regulator-name = "vsim2";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <3100000>;
+ regulator-enable-ramp-delay = <540>;
+ };
+ };
+
+ mt6358rtc: mt6358rtc {
+ compatible = "mediatek,mt6358-rtc";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts b/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts
index 13939d55b85b..eff9e8dbd076 100644
--- a/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts
+++ b/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts
@@ -28,6 +28,55 @@
};
};
+/* HDMI */
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins_a>;
+ status = "okay";
+};
+
+/* HS - I2C2 */
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins_a>;
+ status = "okay";
+};
+
+/* HS - I2C3 */
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins_a>;
+ status = "okay";
+};
+
+/* LS - I2C0 */
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins_a>;
+ status = "okay";
+};
+
+/* LS - I2C1 */
+&i2c5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_pins_a>;
+ status = "okay";
+};
+
+/* POWER_VPROC */
+&i2c6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_pins_a>;
+ status = "okay";
+};
+
+/* FAN53555 */
+&i2c7 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c7_pins_a>;
+ status = "okay";
+};
+
&uart1 {
status = "okay";
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
index 136ef9527a0d..15616231022a 100644
--- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi
@@ -1,14 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 MediaTek Inc.
* Author: Mars.C <mars.cheng@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <dt-bindings/clock/mt6797-clk.h>
@@ -155,6 +148,62 @@
<MT6797_GPIO233__FUNC_UTXD1>;
};
};
+
+ i2c0_pins_a: i2c0 {
+ pins0 {
+ pinmux = <MT6797_GPIO37__FUNC_SCL0_0>,
+ <MT6797_GPIO38__FUNC_SDA0_0>;
+ };
+ };
+
+ i2c1_pins_a: i2c1 {
+ pins1 {
+ pinmux = <MT6797_GPIO55__FUNC_SCL1_0>,
+ <MT6797_GPIO56__FUNC_SDA1_0>;
+ };
+ };
+
+ i2c2_pins_a: i2c2 {
+ pins2 {
+ pinmux = <MT6797_GPIO96__FUNC_SCL2_0>,
+ <MT6797_GPIO95__FUNC_SDA2_0>;
+ };
+ };
+
+ i2c3_pins_a: i2c3 {
+ pins3 {
+ pinmux = <MT6797_GPIO75__FUNC_SDA3_0>,
+ <MT6797_GPIO74__FUNC_SCL3_0>;
+ };
+ };
+
+ i2c4_pins_a: i2c4 {
+ pins4 {
+ pinmux = <MT6797_GPIO238__FUNC_SDA4_0>,
+ <MT6797_GPIO239__FUNC_SCL4_0>;
+ };
+ };
+
+ i2c5_pins_a: i2c5 {
+ pins5 {
+ pinmux = <MT6797_GPIO240__FUNC_SDA5_0>,
+ <MT6797_GPIO241__FUNC_SCL5_0>;
+ };
+ };
+
+ i2c6_pins_a: i2c6 {
+ pins6 {
+ pinmux = <MT6797_GPIO152__FUNC_SDA6_0>,
+ <MT6797_GPIO151__FUNC_SCL6_0>;
+ };
+ };
+
+ i2c7_pins_a: i2c7 {
+ pins7 {
+ pinmux = <MT6797_GPIO154__FUNC_SDA7_0>,
+ <MT6797_GPIO153__FUNC_SCL7_0>;
+ };
+ };
};
scpsys: power-controller@10006000 {
@@ -233,7 +282,171 @@
status = "disabled";
};
- mmsys: mmsys_config@14000000 {
+ i2c0: i2c@11007000 {
+ compatible = "mediatek,mt6797-i2c",
+ "mediatek,mt6577-i2c";
+ id = <0>;
+ reg = <0 0x11007000 0 0x1000>,
+ <0 0x11000100 0 0x80>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infrasys CLK_INFRA_I2C0>,
+ <&infrasys CLK_INFRA_AP_DMA>;
+ clock-names = "main", "dma";
+ clock-div = <10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@11008000 {
+ compatible = "mediatek,mt6797-i2c",
+ "mediatek,mt6577-i2c";
+ id = <1>;
+ reg = <0 0x11008000 0 0x1000>,
+ <0 0x11000180 0 0x80>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infrasys CLK_INFRA_I2C1>,
+ <&infrasys CLK_INFRA_AP_DMA>;
+ clock-names = "main", "dma";
+ clock-div = <10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c8: i2c@11009000 {
+ compatible = "mediatek,mt6797-i2c",
+ "mediatek,mt6577-i2c";
+ id = <8>;
+ reg = <0 0x11009000 0 0x1000>,
+ <0 0x11000200 0 0x80>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infrasys CLK_INFRA_I2C2>,
+ <&infrasys CLK_INFRA_AP_DMA>,
+ <&infrasys CLK_INFRA_I2C2_ARB>;
+ clock-names = "main", "dma", "arb";
+ clock-div = <10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c9: i2c@1100d000 {
+ compatible = "mediatek,mt6797-i2c",
+ "mediatek,mt6577-i2c";
+ id = <9>;
+ reg = <0 0x1100d000 0 0x1000>,
+ <0 0x11000280 0 0x80>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infrasys CLK_INFRA_I2C3>,
+ <&infrasys CLK_INFRA_AP_DMA>,
+ <&infrasys CLK_INFRA_I2C3_ARB>;
+ clock-names = "main", "dma", "arb";
+ clock-div = <10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@1100e000 {
+ compatible = "mediatek,mt6797-i2c",
+ "mediatek,mt6577-i2c";
+ id = <6>;
+ reg = <0 0x1100e000 0 0x1000>,
+ <0 0x11000500 0 0x80>;
+ interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infrasys CLK_INFRA_I2C_APPM>,
+ <&infrasys CLK_INFRA_AP_DMA>;
+ clock-names = "main", "dma";
+ clock-div = <10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@11010000 {
+ compatible = "mediatek,mt6797-i2c",
+ "mediatek,mt6577-i2c";
+ id = <7>;
+ reg = <0 0x11010000 0 0x1000>,
+ <0 0x11000580 0 0x80>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infrasys CLK_INFRA_I2C_GPUPM>,
+ <&infrasys CLK_INFRA_AP_DMA>;
+ clock-names = "main", "dma";
+ clock-div = <10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@11011000 {
+ compatible = "mediatek,mt6797-i2c",
+ "mediatek,mt6577-i2c";
+ id = <4>;
+ reg = <0 0x11011000 0 0x1000>,
+ <0 0x11000300 0 0x80>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infrasys CLK_INFRA_I2C4>,
+ <&infrasys CLK_INFRA_AP_DMA>;
+ clock-names = "main", "dma";
+ clock-div = <10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@11013000 {
+ compatible = "mediatek,mt6797-i2c",
+ "mediatek,mt6577-i2c";
+ id = <2>;
+ reg = <0 0x11013000 0 0x1000>,
+ <0 0x11000400 0 0x80>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infrasys CLK_INFRA_I2C2_IMM>,
+ <&infrasys CLK_INFRA_AP_DMA>,
+ <&infrasys CLK_INFRA_I2C2_ARB>;
+ clock-names = "main", "dma", "arb";
+ clock-div = <10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@11014000 {
+ compatible = "mediatek,mt6797-i2c",
+ "mediatek,mt6577-i2c";
+ id = <3>;
+ reg = <0 0x11014000 0 0x1000>,
+ <0 0x11000480 0 0x80>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infrasys CLK_INFRA_I2C3_IMM>,
+ <&infrasys CLK_INFRA_AP_DMA>,
+ <&infrasys CLK_INFRA_I2C3_ARB>;
+ clock-names = "main", "dma", "arb";
+ clock-div = <10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@1101c000 {
+ compatible = "mediatek,mt6797-i2c",
+ "mediatek,mt6577-i2c";
+ id = <5>;
+ reg = <0 0x1101c000 0 0x1000>,
+ <0 0x11000380 0 0x80>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&infrasys CLK_INFRA_I2C5>,
+ <&infrasys CLK_INFRA_AP_DMA>;
+ clock-names = "main", "dma";
+ clock-div = <10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ mmsys: syscon@14000000 {
compatible = "mediatek,mt6797-mmsys", "syscon";
reg = <0 0x14000000 0 0x1000>;
#clock-cells = <1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
index 83e10591e0e5..d174ad214857 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
@@ -543,3 +543,7 @@
pinctrl-0 = <&watchdog_pins>;
status = "okay";
};
+
+&wmac {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index 3f783348c66a..0b4de627f96e 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -506,3 +506,7 @@
pinctrl-0 = <&watchdog_pins>;
status = "okay";
};
+
+&wmac {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 339dc9f88f43..1a39e0ef776b 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -699,6 +699,17 @@
status = "disabled";
};
+ wmac: wmac@18000000 {
+ compatible = "mediatek,mt7622-wmac";
+ reg = <0 0x18000000 0 0x100000>;
+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_LOW>;
+
+ mediatek,infracfg = <&infracfg>;
+ status = "disabled";
+
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
+ };
+
ssusbsys: ssusbsys@1a000000 {
compatible = "mediatek,mt7622-ssusbsys",
"syscon";
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm-hana-rev7.dts b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana-rev7.dts
new file mode 100644
index 000000000000..44f6149c1307
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana-rev7.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2019 MediaTek Inc.
+ */
+
+/dts-v1/;
+#include "mt8173-elm-hana.dtsi"
+
+/ {
+ model = "Google Hanawl";
+ compatible = "google,hana-rev7", "mediatek,mt8173";
+};
+
+&cpu_thermal {
+ trips {
+ cpu_crit: cpu_crit0 {
+ temperature = <100000>;
+ type = "critical";
+ };
+ };
+};
+
+&gpio_keys {
+ /delete-node/tablet_mode;
+ /delete-node/volume_down;
+ /delete-node/volume_up;
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dts b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dts
new file mode 100644
index 000000000000..c234296755e1
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2016 MediaTek Inc.
+ */
+
+/dts-v1/;
+#include "mt8173-elm-hana.dtsi"
+
+/ {
+ model = "Google Hana";
+ compatible = "google,hana-rev6", "google,hana-rev5",
+ "google,hana-rev4", "google,hana-rev3",
+ "google,hana", "mediatek,mt8173";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi
new file mode 100644
index 000000000000..bdcd35cecad9
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2016 MediaTek Inc.
+ */
+
+#include "mt8173-elm.dtsi"
+
+&i2c0 {
+ clock-frequency = <200000>;
+};
+
+&i2c3 {
+ touchscreen2: touchscreen@34 {
+ compatible = "melfas,mip4_ts";
+ reg = <0x34>;
+ interrupt-parent = <&pio>;
+ interrupts = <88 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ /*
+ * Lenovo 100e Chromebook 2nd Gen (MTK) and Lenovo 300e Chromebook 2nd
+ * Gen (MTK) are using synaptics touchscreen (hid-over-i2c driver) as a
+ * second source touchscreen.
+ */
+ touchscreen3: touchscreen@20 {
+ compatible = "hid-over-i2c";
+ reg = <0x20>;
+ hid-descr-addr = <0x0020>;
+ interrupt-parent = <&pio>;
+ interrupts = <88 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&i2c4 {
+ /*
+ * Lenovo 100e Chromebook 2nd Gen (MTK) and Lenovo 300e Chromebook 2nd
+ * Gen (MTK) are using synaptics trackpad (hid-over-i2c driver) as a
+ * second source trackpad.
+ */
+ trackpad2: trackpad@2c {
+ compatible = "hid-over-i2c";
+ interrupt-parent = <&pio>;
+ interrupts = <117 IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x2c>;
+ hid-descr-addr = <0x0020>;
+ wakeup-source;
+ };
+};
+
+&mmc1 {
+ wp-gpios = <&pio 42 GPIO_ACTIVE_HIGH>;
+};
+
+&pio {
+ hdmi_mux_pins: hdmi_mux_pins {
+ pins2 {
+ pinmux = <MT8173_PIN_98_URTS1__FUNC_GPIO98>;
+ bias-pull-up;
+ output-high;
+ };
+ };
+
+ mmc1_pins_default: mmc1default {
+ pins_wp {
+ pinmux = <MT8173_PIN_42_DSI_TE__FUNC_GPIO42>;
+ input-enable;
+ bias-pull-up;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dts b/arch/arm64/boot/dts/mediatek/mt8173-elm.dts
new file mode 100644
index 000000000000..e9e4ac0b74b2
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2016 MediaTek Inc.
+ */
+
+/dts-v1/;
+#include "mt8173-elm.dtsi"
+
+/ {
+ model = "Google Elm";
+ compatible = "google,elm-rev8", "google,elm-rev7", "google,elm-rev6",
+ "google,elm-rev5", "google,elm-rev4", "google,elm-rev3",
+ "google,elm", "mediatek,mt8173";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
new file mode 100644
index 000000000000..a5a12b2599a4
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
@@ -0,0 +1,1173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2016 MediaTek Inc.
+ */
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "mt8173.dtsi"
+
+/ {
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0 0x80000000>;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm0 0 1000000>;
+ power-supply = <&bl_fixed_reg>;
+ enable-gpios = <&pio 95 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&disp_pwm0_pins>;
+ status = "okay";
+ };
+
+ bl_fixed_reg: fixedregulator2 {
+ compatible = "regulator-fixed";
+ regulator-name = "bl_fixed";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ startup-delay-us = <1000>;
+ enable-active-high;
+ gpio = <&pio 32 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bl_fixed_pins>;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio_keys: gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_keys_pins>;
+
+ lid {
+ label = "Lid";
+ gpios = <&pio 69 GPIO_ACTIVE_LOW>;
+ linux,code = <SW_LID>;
+ linux,input-type = <EV_SW>;
+ gpio-key,wakeup;
+ };
+
+ power {
+ label = "Power";
+ gpios = <&pio 14 GPIO_ACTIVE_HIGH>;
+ linux,code = <KEY_POWER>;
+ debounce-interval = <30>;
+ gpio-key,wakeup;
+ };
+
+ tablet_mode {
+ label = "Tablet_mode";
+ gpios = <&pio 121 GPIO_ACTIVE_HIGH>;
+ linux,code = <SW_TABLET_MODE>;
+ linux,input-type = <EV_SW>;
+ gpio-key,wakeup;
+ };
+
+ volume_down {
+ label = "Volume_down";
+ gpios = <&pio 123 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+
+ volume_up {
+ label = "Volume_up";
+ gpios = <&pio 124 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ };
+ };
+
+ panel: panel {
+ compatible = "lg,lp120up1";
+ power-supply = <&panel_fixed_3v3>;
+ ddc-i2c-bus = <&i2c0>;
+ backlight = <&backlight>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&ps8640_out>;
+ };
+ };
+ };
+
+ panel_fixed_3v3: regulator1 {
+ compatible = "regulator-fixed";
+ regulator-name = "PANEL_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ gpio = <&pio 41 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_fixed_pins>;
+ };
+
+ ps8640_fixed_1v2: regulator2 {
+ compatible = "regulator-fixed";
+ regulator-name = "PS8640_1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-enable-ramp-delay = <2000>;
+ enable-active-high;
+ regulator-boot-on;
+ gpio = <&pio 30 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ps8640_fixed_pins>;
+ };
+
+ sdio_fixed_3v3: fixedregulator0 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 85 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio_fixed_3v3_pins>;
+ };
+
+ sound: sound {
+ compatible = "mediatek,mt8173-rt5650";
+ mediatek,audio-codec = <&rt5650 &hdmi0>;
+ mediatek,platform = <&afe>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&aud_i2s2>;
+
+ mediatek,mclk = <1>;
+ codec-capture {
+ sound-dai = <&rt5650 1>;
+ };
+ };
+
+ hdmicon: connector {
+ compatible = "hdmi-connector";
+ label = "hdmi";
+ type = "a";
+ ddc-i2c-bus = <&hdmiddc0>;
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi0_out>;
+ };
+ };
+ };
+};
+
+&cec {
+ status = "okay";
+};
+
+&cpu0 {
+ proc-supply = <&mt6397_vpca15_reg>;
+};
+
+&cpu1 {
+ proc-supply = <&mt6397_vpca15_reg>;
+};
+
+&cpu2 {
+ proc-supply = <&da9211_vcpu_reg>;
+ sram-supply = <&mt6397_vsramca7_reg>;
+};
+
+&cpu3 {
+ proc-supply = <&da9211_vcpu_reg>;
+ sram-supply = <&mt6397_vsramca7_reg>;
+};
+
+&cpu_thermal {
+ sustainable-power = <4500>; /* milliwatts */
+ trips {
+ threshold: trip-point0 {
+ temperature = <60000>;
+ };
+
+ target: trip-point1 {
+ temperature = <65000>;
+ };
+ };
+};
+
+&dsi0 {
+ status = "okay";
+ ports {
+ port {
+ dsi0_out: endpoint {
+ remote-endpoint = <&ps8640_in>;
+ };
+ };
+ };
+};
+
+&dpi0 {
+ status = "okay";
+};
+
+&hdmi0 {
+ status = "okay";
+ ports {
+ port@1 {
+ reg = <1>;
+
+ hdmi0_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+};
+
+&hdmi_phy {
+ status = "okay";
+ mediatek,ibias = <0xc>;
+};
+
+&i2c0 {
+ status = "okay";
+
+ rt5650: audio-codec@1a {
+ compatible = "realtek,rt5650";
+ reg = <0x1a>;
+ avdd-supply = <&mt6397_vgp1_reg>;
+ cpvdd-supply = <&mt6397_vcama_reg>;
+ interrupt-parent = <&pio>;
+ interrupts = <3 IRQ_TYPE_EDGE_BOTH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rt5650_irq>;
+ #sound-dai-cells = <1>;
+ realtek,dmic1-data-pin = <2>;
+ realtek,jd-mode = <2>;
+ };
+
+ ps8640: edp-bridge@8 {
+ compatible = "parade,ps8640";
+ reg = <0x8>;
+ powerdown-gpios = <&pio 127 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&pio 115 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ps8640_pins>;
+ vdd12-supply = <&ps8640_fixed_1v2>;
+ vdd33-supply = <&mt6397_vgp2_reg>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ ps8640_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ps8640_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <1500000>;
+ status = "okay";
+
+ da9211: da9211@68 {
+ compatible = "dlg,da9211";
+ reg = <0x68>;
+ interrupt-parent = <&pio>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+
+ regulators {
+ da9211_vcpu_reg: BUCKA {
+ regulator-name = "VBUCKA";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1310000>;
+ regulator-min-microamp = <2000000>;
+ regulator-max-microamp = <4400000>;
+ regulator-ramp-delay = <10000>;
+ regulator-always-on;
+ regulator-allowed-modes = <0 1>;
+ };
+
+ da9211_vgpu_reg: BUCKB {
+ regulator-name = "VBUCKB";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1310000>;
+ regulator-min-microamp = <2000000>;
+ regulator-max-microamp = <3000000>;
+ regulator-ramp-delay = <10000>;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ tpm: tpm@20 {
+ compatible = "infineon,slb9645tt";
+ reg = <0x20>;
+ powered-while-suspended;
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ touchscreen: touchscreen@10 {
+ compatible = "elan,ekth3500";
+ reg = <0x10>;
+ interrupt-parent = <&pio>;
+ interrupts = <88 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&trackpad_irq>;
+
+ trackpad: trackpad@15 {
+ compatible = "elan,ekth3000";
+ interrupt-parent = <&pio>;
+ interrupts = <117 IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x15>;
+ vcc-supply = <&mt6397_vgp6_reg>;
+ wakeup-source;
+ };
+};
+
+&mipi_tx0 {
+ status = "okay";
+};
+
+&mmc0 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_pins_default>;
+ pinctrl-1 = <&mmc0_pins_uhs>;
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ cap-mmc-hw-reset;
+ hs400-ds-delay = <0x14015>;
+ mediatek,hs200-cmd-int-delay=<30>;
+ mediatek,hs400-cmd-int-delay=<14>;
+ mediatek,hs400-cmd-resp-sel-rising;
+ vmmc-supply = <&mt6397_vemc_3v3_reg>;
+ vqmmc-supply = <&mt6397_vio18_reg>;
+ assigned-clocks = <&topckgen CLK_TOP_MSDC50_0_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_D2>;
+ non-removable;
+};
+
+&mmc1 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc1_pins_default>;
+ pinctrl-1 = <&mmc1_pins_uhs>;
+ bus-width = <4>;
+ max-frequency = <200000000>;
+ cap-sd-highspeed;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ cd-gpios = <&pio 1 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&mt6397_vmch_reg>;
+ vqmmc-supply = <&mt6397_vmc_reg>;
+};
+
+&mmc3 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc3_pins_default>;
+ pinctrl-1 = <&mmc3_pins_uhs>;
+ bus-width = <4>;
+ max-frequency = <200000000>;
+ cap-sd-highspeed;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ keep-power-in-suspend;
+ enable-sdio-wakeup;
+ cap-sdio-irq;
+ vmmc-supply = <&sdio_fixed_3v3>;
+ vqmmc-supply = <&mt6397_vgp3_reg>;
+ non-removable;
+ cap-power-off-card;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ btmrvl: btmrvl@2 {
+ compatible = "marvell,sd8897-bt";
+ reg = <2>;
+ interrupt-parent = <&pio>;
+ interrupts = <119 IRQ_TYPE_LEVEL_LOW>;
+ marvell,wakeup-pin = /bits/ 16 <0x0d>;
+ marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
+ };
+
+ mwifiex: mwifiex@1 {
+ compatible = "marvell,sd8897";
+ reg = <1>;
+ interrupt-parent = <&pio>;
+ interrupts = <38 IRQ_TYPE_LEVEL_LOW>;
+ marvell,wakeup-pin = <3>;
+ };
+};
+
+&nor_flash {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&nor_gpio1_pins>;
+ bus-width = <8>;
+ max-frequency = <50000000>;
+ non-removable;
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ };
+};
+
+&pio {
+ gpio-line-names = "EC_INT_1V8",
+ "SD_CD_L",
+ "ALC5514_IRQ",
+ "ALC5650_IRQ",
+ /*
+ * AP_FLASH_WP_L is crossystem ABI. Schematics
+ * call it SFWP_B.
+ */
+ "AP_FLASH_WP_L",
+ "SFIN",
+ "SFCS0",
+ "SFHOLD",
+ "SFOUT",
+ "SFCK",
+ "WRAP_EVENT_S_EINT10",
+ "PMU_INT",
+ "I2S2_WS_ALC5650",
+ "I2S2_BCK_ALC5650",
+ "PWR_BTN_1V8",
+ "DA9212_IRQ",
+ "IDDIG",
+ "WATCHDOG",
+ "CEC",
+ "HDMISCK",
+ "HDMISD",
+ "HTPLG",
+ "MSDC3_DAT0",
+ "MSDC3_DAT1",
+ "MSDC3_DAT2",
+ "MSDC3_DAT3",
+ "MSDC3_CLK",
+ "MSDC3_CMD",
+ "USB_C0_OC_FLAGB",
+ "USBA_OC1_L",
+ "PS8640_1V2_ENABLE",
+ "THERM_ALERT_N",
+ "PANEL_LCD_POWER_EN",
+ "ANX7688_CHIP_PD_C",
+ "EC_IN_RW_1V8",
+ "ANX7688_1V_EN_C",
+ "USB_DP_HPD_C",
+ "TPM_DAVINT_N",
+ "MARVELL8897_IRQ",
+ "EN_USB_A0_PWR",
+ "USBA_A0_OC_L",
+ "EN_PP3300_DX_EDP",
+ "",
+ "SOC_I2C2_1V8_SDA_400K",
+ "SOC_I2C2_1V8_SCL_400K",
+ "SOC_I2C0_1V8_SDA_400K",
+ "SOC_I2C0_1V8_SCL_400K",
+ "EMMC_ID1",
+ "EMMC_ID0",
+ "MEM_CONFIG3",
+ "EMMC_ID2",
+ "MEM_CONFIG1",
+ "MEM_CONFIG2",
+ "BRD_ID2",
+ "MEM_CONFIG0",
+ "BRD_ID0",
+ "BRD_ID1",
+ "EMMC_DAT0",
+ "EMMC_DAT1",
+ "EMMC_DAT2",
+ "EMMC_DAT3",
+ "EMMC_DAT4",
+ "EMMC_DAT5",
+ "EMMC_DAT6",
+ "EMMC_DAT7",
+ "EMMC_CLK",
+ "EMMC_CMD",
+ "EMMC_RCLK",
+ "PLT_RST_L",
+ "LID_OPEN_1V8_L",
+ "AUDIO_SPI_MISO_R",
+ "",
+ "AC_OK_1V8",
+ "SD_DATA0",
+ "SD_DATA1",
+ "SD_DATA2",
+ "SD_DATA3",
+ "SD_CLK",
+ "SD_CMD",
+ "PWRAP_SPI0_MI",
+ "PWRAP_SPI0_MO",
+ "PWRAP_SPI0_CK",
+ "PWRAP_SPI0_CSN",
+ "",
+ "",
+ "WIFI_PDN",
+ "RTC32K_1V8",
+ "DISP_PWM0",
+ "TOUCHSCREEN_INT_L",
+ "",
+ "SRCLKENA0",
+ "SRCLKENA1",
+ "PS8640_MODE_CONF",
+ "TOUCHSCREEN_RESET_R",
+ "PLATFORM_PROCHOT_L",
+ "PANEL_POWER_EN",
+ "REC_MODE_L",
+ "EC_FW_UPDATE_L",
+ "ACCEL2_INT_L",
+ "HDMI_DP_INT",
+ "ACCELGYRO3_INT_L",
+ "ACCELGYRO4_INT_L",
+ "SPI_EC_CLK",
+ "SPI_EC_MI",
+ "SPI_EC_MO",
+ "SPI_EC_CSN",
+ "SOC_I2C3_1V8_SDA_400K",
+ "SOC_I2C3_1V8_SCL_400K",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "PS8640_SYSRSTN_1V8",
+ "APIN_MAX98090_DOUT2",
+ "TP_INT_1V8_L_R",
+ "RST_USB_HUB_R",
+ "BT_WAKE_L",
+ "ACCEL1_INT_L",
+ "TABLET_MODE_L",
+ "",
+ "V_UP_IN_L_R",
+ "V_DOWN_IN_L_R",
+ "SOC_I2C1_1V8_SDA_1M",
+ "SOC_I2C1_1V8_SCL_1M",
+ "PS8640_PDN_1V8",
+ "MAX98090_LRCLK",
+ "MAX98090_BCLK",
+ "MAX98090_MCLK",
+ "APOUT_MAX98090_DIN",
+ "APIN_MAX98090_DOUT",
+ "SOC_I2C4_1V8_SDA_400K",
+ "SOC_I2C4_1V8_SCL_400K";
+
+ aud_i2s2: aud_i2s2 {
+ pins1 {
+ pinmux = <MT8173_PIN_128_I2S0_LRCK__FUNC_I2S1_WS>,
+ <MT8173_PIN_129_I2S0_BCK__FUNC_I2S1_BCK>,
+ <MT8173_PIN_130_I2S0_MCK__FUNC_I2S1_MCK>,
+ <MT8173_PIN_131_I2S0_DATA0__FUNC_I2S1_DO_1>,
+ <MT8173_PIN_12_EINT12__FUNC_I2S2_WS>,
+ <MT8173_PIN_13_EINT13__FUNC_I2S2_BCK>,
+ <MT8173_PIN_132_I2S0_DATA1__FUNC_I2S2_DI_2>;
+ bias-pull-down;
+ };
+ };
+
+ bl_fixed_pins: bl_fixed_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_32_UTXD2__FUNC_GPIO32>;
+ output-low;
+ };
+ };
+
+ bt_wake_pins: bt_wake_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_119_KPROW0__FUNC_GPIO119>;
+ bias-pull-up;
+ };
+ };
+
+ disp_pwm0_pins: disp_pwm0_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_87_DISP_PWM0__FUNC_DISP_PWM0>;
+ output-low;
+ };
+ };
+
+ gpio_keys_pins: gpio_keys_pins {
+ volume_pins {
+ pinmux = <MT8173_PIN_123_KPCOL1__FUNC_GPIO123>,
+ <MT8173_PIN_124_KPCOL2__FUNC_GPIO124>;
+ bias-pull-up;
+ };
+
+ tablet_mode_pins {
+ pinmux = <MT8173_PIN_121_KPROW2__FUNC_GPIO121>;
+ bias-pull-up;
+ };
+ };
+
+ hdmi_mux_pins: hdmi_mux_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_36_DAISYNC__FUNC_GPIO36>;
+ };
+ };
+
+ i2c1_pins_a: i2c1 {
+ da9211_pins {
+ pinmux = <MT8173_PIN_15_EINT15__FUNC_GPIO15>;
+ bias-pull-up;
+ };
+ };
+
+ mmc0_pins_default: mmc0default {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_57_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+ <MT8173_PIN_58_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+ <MT8173_PIN_59_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+ <MT8173_PIN_60_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+ <MT8173_PIN_61_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+ <MT8173_PIN_62_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+ <MT8173_PIN_63_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+ <MT8173_PIN_64_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+ <MT8173_PIN_66_MSDC0_CMD__FUNC_MSDC0_CMD>;
+ bias-pull-up;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_65_MSDC0_CLK__FUNC_MSDC0_CLK>;
+ bias-pull-down;
+ };
+
+ pins_rst {
+ pinmux = <MT8173_PIN_68_MSDC0_RST___FUNC_MSDC0_RSTB>;
+ bias-pull-up;
+ };
+ };
+
+ mmc1_pins_default: mmc1default {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_73_MSDC1_DAT0__FUNC_MSDC1_DAT0>,
+ <MT8173_PIN_74_MSDC1_DAT1__FUNC_MSDC1_DAT1>,
+ <MT8173_PIN_75_MSDC1_DAT2__FUNC_MSDC1_DAT2>,
+ <MT8173_PIN_76_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
+ <MT8173_PIN_78_MSDC1_CMD__FUNC_MSDC1_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_4mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_77_MSDC1_CLK__FUNC_MSDC1_CLK>;
+ bias-pull-down;
+ drive-strength = <MTK_DRIVE_4mA>;
+ };
+
+ pins_insert {
+ pinmux = <MT8173_PIN_1_EINT1__FUNC_GPIO1>;
+ bias-pull-up;
+ };
+ };
+
+ mmc3_pins_default: mmc3default {
+ pins_dat {
+ pinmux = <MT8173_PIN_22_MSDC3_DAT0__FUNC_MSDC3_DAT0>,
+ <MT8173_PIN_23_MSDC3_DAT1__FUNC_MSDC3_DAT1>,
+ <MT8173_PIN_24_MSDC3_DAT2__FUNC_MSDC3_DAT2>,
+ <MT8173_PIN_25_MSDC3_DAT3__FUNC_MSDC3_DAT3>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_cmd {
+ pinmux = <MT8173_PIN_27_MSDC3_CMD__FUNC_MSDC3_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_26_MSDC3_CLK__FUNC_MSDC3_CLK>;
+ bias-pull-down;
+ drive-strength = <MTK_DRIVE_8mA>;
+ };
+ };
+
+ mmc0_pins_uhs: mmc0 {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_57_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+ <MT8173_PIN_58_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+ <MT8173_PIN_59_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+ <MT8173_PIN_60_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+ <MT8173_PIN_61_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+ <MT8173_PIN_62_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+ <MT8173_PIN_63_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+ <MT8173_PIN_64_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+ <MT8173_PIN_66_MSDC0_CMD__FUNC_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_6mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_65_MSDC0_CLK__FUNC_MSDC0_CLK>;
+ drive-strength = <MTK_DRIVE_6mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins_ds {
+ pinmux = <MT8173_PIN_67_MSDC0_DSL__FUNC_MSDC0_DSL>;
+ drive-strength = <MTK_DRIVE_10mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins_rst {
+ pinmux = <MT8173_PIN_68_MSDC0_RST___FUNC_MSDC0_RSTB>;
+ bias-pull-up;
+ };
+ };
+
+ mmc1_pins_uhs: mmc1 {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_73_MSDC1_DAT0__FUNC_MSDC1_DAT0>,
+ <MT8173_PIN_74_MSDC1_DAT1__FUNC_MSDC1_DAT1>,
+ <MT8173_PIN_75_MSDC1_DAT2__FUNC_MSDC1_DAT2>,
+ <MT8173_PIN_76_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
+ <MT8173_PIN_78_MSDC1_CMD__FUNC_MSDC1_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_6mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_77_MSDC1_CLK__FUNC_MSDC1_CLK>;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+ };
+
+ mmc3_pins_uhs: mmc3 {
+ pins_dat {
+ pinmux = <MT8173_PIN_22_MSDC3_DAT0__FUNC_MSDC3_DAT0>,
+ <MT8173_PIN_23_MSDC3_DAT1__FUNC_MSDC3_DAT1>,
+ <MT8173_PIN_24_MSDC3_DAT2__FUNC_MSDC3_DAT2>,
+ <MT8173_PIN_25_MSDC3_DAT3__FUNC_MSDC3_DAT3>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_cmd {
+ pinmux = <MT8173_PIN_27_MSDC3_CMD__FUNC_MSDC3_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_26_MSDC3_CLK__FUNC_MSDC3_CLK>;
+ drive-strength = <MTK_DRIVE_8mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+ };
+
+ nor_gpio1_pins: nor {
+ pins1 {
+ pinmux = <MT8173_PIN_6_EINT6__FUNC_SFCS0>,
+ <MT8173_PIN_7_EINT7__FUNC_SFHOLD>,
+ <MT8173_PIN_8_EINT8__FUNC_SFIN>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_4mA>;
+ bias-pull-up;
+ };
+
+ pins2 {
+ pinmux = <MT8173_PIN_5_EINT5__FUNC_SFOUT>;
+ drive-strength = <MTK_DRIVE_4mA>;
+ bias-pull-up;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_9_EINT9__FUNC_SFCK>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_4mA>;
+ bias-pull-up;
+ };
+ };
+
+ panel_fixed_pins: panel_fixed_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_41_CMMCLK__FUNC_GPIO41>;
+ };
+ };
+
+ ps8640_pins: ps8640_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_92_PCM_CLK__FUNC_GPIO92>,
+ <MT8173_PIN_115_URTS0__FUNC_GPIO115>,
+ <MT8173_PIN_127_LCM_RST__FUNC_GPIO127>;
+ };
+ };
+
+ ps8640_fixed_pins: ps8640_fixed_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_30_URTS2__FUNC_GPIO30>;
+ };
+ };
+
+ rt5650_irq: rt5650_irq {
+ pins1 {
+ pinmux = <MT8173_PIN_3_EINT3__FUNC_GPIO3>;
+ bias-pull-down;
+ };
+ };
+
+ sdio_fixed_3v3_pins: sdio_fixed_3v3_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_85_AUD_DAT_MOSI__FUNC_GPIO85>;
+ output-low;
+ };
+ };
+
+ spi_pins_a: spi1 {
+ pins1 {
+ pinmux = <MT8173_PIN_0_EINT0__FUNC_GPIO0>;
+ bias-pull-up;
+ };
+
+ pins_spi {
+ pinmux = <MT8173_PIN_102_MSDC2_DAT2__FUNC_SPI_CK_1_>,
+ <MT8173_PIN_103_MSDC2_DAT3__FUNC_SPI_MI_1_>,
+ <MT8173_PIN_104_MSDC2_CLK__FUNC_SPI_MO_1_>,
+ <MT8173_PIN_105_MSDC2_CMD__FUNC_SPI_CS_1_>;
+ bias-disable;
+ };
+ };
+
+ trackpad_irq: trackpad_irq {
+ pins1 {
+ pinmux = <MT8173_PIN_117_URXD3__FUNC_GPIO117>;
+ input-enable;
+ bias-pull-up;
+ };
+ };
+
+ usb_pins: usb {
+ pins1 {
+ pinmux = <MT8173_PIN_101_MSDC2_DAT1__FUNC_GPIO101>;
+ output-high;
+ bias-disable;
+ };
+ };
+
+ wifi_wake_pins: wifi_wake_pins {
+ pins1 {
+ pinmux = <MT8173_PIN_38_CONN_RST__FUNC_GPIO38>;
+ bias-pull-up;
+ };
+ };
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&pwrap {
+ pmic: mt6397 {
+ compatible = "mediatek,mt6397";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&pio>;
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ clock: mt6397clock {
+ compatible = "mediatek,mt6397-clk";
+ #clock-cells = <1>;
+ };
+
+ pio6397: pinctrl {
+ compatible = "mediatek,mt6397-pinctrl";
+ pins-are-numbered;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ regulator: mt6397regulator {
+ compatible = "mediatek,mt6397-regulator";
+
+ mt6397_vpca15_reg: buck_vpca15 {
+ regulator-compatible = "buck_vpca15";
+ regulator-name = "vpca15";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ regulator-allowed-modes = <0 1>;
+ };
+
+ mt6397_vpca7_reg: buck_vpca7 {
+ regulator-compatible = "buck_vpca7";
+ regulator-name = "vpca7";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ regulator-always-on;
+ };
+
+ mt6397_vsramca15_reg: buck_vsramca15 {
+ regulator-compatible = "buck_vsramca15";
+ regulator-name = "vsramca15";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vsramca7_reg: buck_vsramca7 {
+ regulator-compatible = "buck_vsramca7";
+ regulator-name = "vsramca7";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vcore_reg: buck_vcore {
+ regulator-compatible = "buck_vcore";
+ regulator-name = "vcore";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vgpu_reg: buck_vgpu {
+ regulator-compatible = "buck_vgpu";
+ regulator-name = "vgpu";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vdrm_reg: buck_vdrm {
+ regulator-compatible = "buck_vdrm";
+ regulator-name = "vdrm";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vio18_reg: buck_vio18 {
+ regulator-compatible = "buck_vio18";
+ regulator-name = "vio18";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <1980000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vtcxo_reg: ldo_vtcxo {
+ regulator-compatible = "ldo_vtcxo";
+ regulator-name = "vtcxo";
+ regulator-always-on;
+ };
+
+ mt6397_va28_reg: ldo_va28 {
+ regulator-compatible = "ldo_va28";
+ regulator-name = "va28";
+ };
+
+ mt6397_vcama_reg: ldo_vcama {
+ regulator-compatible = "ldo_vcama";
+ regulator-name = "vcama";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vio28_reg: ldo_vio28 {
+ regulator-compatible = "ldo_vio28";
+ regulator-name = "vio28";
+ regulator-always-on;
+ };
+
+ mt6397_vusb_reg: ldo_vusb {
+ regulator-compatible = "ldo_vusb";
+ regulator-name = "vusb";
+ };
+
+ mt6397_vmc_reg: ldo_vmc {
+ regulator-compatible = "ldo_vmc";
+ regulator-name = "vmc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vmch_reg: ldo_vmch {
+ regulator-compatible = "ldo_vmch";
+ regulator-name = "vmch";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+ regulator-compatible = "ldo_vemc3v3";
+ regulator-name = "vemc_3v3";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp1_reg: ldo_vgp1 {
+ regulator-compatible = "ldo_vgp1";
+ regulator-name = "vcamd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+
+ mt6397_vgp2_reg: ldo_vgp2 {
+ regulator-compatible = "ldo_vgp2";
+ regulator-name = "vcamio";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp3_reg: ldo_vgp3 {
+ regulator-compatible = "ldo_vgp3";
+ regulator-name = "vcamaf";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp4_reg: ldo_vgp4 {
+ regulator-compatible = "ldo_vgp4";
+ regulator-name = "vgp4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp5_reg: ldo_vgp5 {
+ regulator-compatible = "ldo_vgp5";
+ regulator-name = "vgp5";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp6_reg: ldo_vgp6 {
+ regulator-compatible = "ldo_vgp6";
+ regulator-name = "vgp6";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ regulator-always-on;
+ };
+
+ mt6397_vibr_reg: ldo_vibr {
+ regulator-compatible = "ldo_vibr";
+ regulator-name = "vibr";
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+ };
+
+ rtc: mt6397rtc {
+ compatible = "mediatek,mt6397-rtc";
+ };
+
+ syscfg_pctl_pmic: syscfg_pctl_pmic@c000 {
+ compatible = "mediatek,mt6397-pctl-pmic-syscfg",
+ "syscon";
+ reg = <0 0x0000c000 0 0x0108>;
+ };
+ };
+};
+
+&spi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi_pins_a>;
+ mediatek,pad-select = <1>;
+ status = "okay";
+ /* clients */
+ cros_ec: ec@0 {
+ compatible = "google,cros-ec-spi";
+ reg = <0x0>;
+ spi-max-frequency = <12000000>;
+ interrupt-parent = <&pio>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ google,cros-ec-spi-msg-delay = <500>;
+
+ i2c_tunnel: i2c-tunnel0 {
+ compatible = "google,cros-ec-i2c-tunnel";
+ google,remote-bus = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ battery: sbs-battery@b {
+ compatible = "sbs,sbs-battery";
+ reg = <0xb>;
+ sbs,i2c-retry-count = <2>;
+ sbs,poll-retry-count = <1>;
+ };
+ };
+ };
+};
+
+&ssusb {
+ dr_mode = "host";
+ wakeup-source;
+ vusb33-supply = <&mt6397_vusb_reg>;
+ status = "okay";
+};
+
+&thermal {
+ bank0-supply = <&mt6397_vpca15_reg>;
+ bank1-supply = <&da9211_vcpu_reg>;
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&usb_host {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_pins>;
+ vusb33-supply = <&mt6397_vusb_reg>;
+ status = "okay";
+};
+
+#include <arm/cros-ec-keyboard.dtsi>
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index d819e44d94a8..70b1ffcab7f0 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -19,6 +19,7 @@
#include <dt-bindings/power/mt8173-power.h>
#include <dt-bindings/reset/mt8173-resets.h>
#include <dt-bindings/gce/mt8173-gce.h>
+#include <dt-bindings/thermal/thermal.h>
#include "mt8173-pinfunc.h"
/ {
@@ -42,14 +43,18 @@
dpi0 = &dpi0;
dsi0 = &dsi0;
dsi1 = &dsi1;
- mdp_rdma0 = &mdp_rdma0;
- mdp_rdma1 = &mdp_rdma1;
- mdp_rsz0 = &mdp_rsz0;
- mdp_rsz1 = &mdp_rsz1;
- mdp_rsz2 = &mdp_rsz2;
- mdp_wdma0 = &mdp_wdma0;
- mdp_wrot0 = &mdp_wrot0;
- mdp_wrot1 = &mdp_wrot1;
+ mdp-rdma0 = &mdp_rdma0;
+ mdp-rdma1 = &mdp_rdma1;
+ mdp-rsz0 = &mdp_rsz0;
+ mdp-rsz1 = &mdp_rsz1;
+ mdp-rsz2 = &mdp_rsz2;
+ mdp-wdma0 = &mdp_wdma0;
+ mdp-wrot0 = &mdp_wrot0;
+ mdp-wrot1 = &mdp_wrot1;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
};
cluster0_opp: opp_table0 {
@@ -162,6 +167,7 @@
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cluster0_opp>;
+ capacity-dmips-mhz = <526>;
};
cpu1: cpu@1 {
@@ -176,6 +182,7 @@
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cluster0_opp>;
+ capacity-dmips-mhz = <526>;
};
cpu2: cpu@100 {
@@ -190,6 +197,7 @@
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cluster1_opp>;
+ capacity-dmips-mhz = <1024>;
};
cpu3: cpu@101 {
@@ -204,6 +212,7 @@
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cluster1_opp>;
+ capacity-dmips-mhz = <1024>;
};
idle-states {
@@ -242,21 +251,21 @@
cpu_on = <0x84000003>;
};
- clk26m: oscillator@0 {
+ clk26m: oscillator0 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <26000000>;
clock-output-names = "clk26m";
};
- clk32k: oscillator@1 {
+ clk32k: oscillator1 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32000>;
clock-output-names = "clk32k";
};
- cpum_ck: oscillator@2 {
+ cpum_ck: oscillator2 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <0>;
@@ -272,19 +281,19 @@
sustainable-power = <1500>; /* milliwatts */
trips {
- threshold: trip-point@0 {
+ threshold: trip-point0 {
temperature = <68000>;
hysteresis = <2000>;
type = "passive";
};
- target: trip-point@1 {
+ target: trip-point1 {
temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit: cpu_crit@0 {
+ cpu_crit: cpu_crit0 {
temperature = <115000>;
hysteresis = <2000>;
type = "critical";
@@ -292,16 +301,20 @@
};
cooling-maps {
- map@0 {
+ map0 {
trip = <&target>;
- cooling-device = <&cpu0 0 0>,
- <&cpu1 0 0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
contribution = <3072>;
};
- map@1 {
+ map1 {
trip = <&target>;
- cooling-device = <&cpu2 0 0>,
- <&cpu3 0 0>;
+ cooling-device = <&cpu2 THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
contribution = <1024>;
};
};
@@ -312,7 +325,7 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
- vpu_dma_reserved: vpu_dma_mem_region {
+ vpu_dma_reserved: vpu_dma_mem_region@b7000000 {
compatible = "shared-dma-pool";
reg = <0 0xb7000000 0 0x500000>;
alignment = <0x1000>;
@@ -365,7 +378,7 @@
reg = <0 0x10005000 0 0x1000>;
};
- pio: pinctrl@10005000 {
+ pio: pinctrl@1000b000 {
compatible = "mediatek,mt8173-pinctrl";
reg = <0 0x1000b000 0 0x1000>;
mediatek,pctl-regmap = <&syscfg_pctl_a>;
@@ -549,7 +562,7 @@
interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_LOW>;
clocks = <&infracfg CLK_INFRA_GCE>;
clock-names = "gce";
- #mbox-cells = <3>;
+ #mbox-cells = <2>;
};
mipi_tx0: mipi-dphy@10215000 {
@@ -572,7 +585,7 @@
status = "disabled";
};
- gic: interrupt-controller@10220000 {
+ gic: interrupt-controller@10221000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
interrupt-parent = <&gic>;
@@ -909,13 +922,16 @@
};
};
- mmsys: clock-controller@14000000 {
+ mmsys: syscon@14000000 {
compatible = "mediatek,mt8173-mmsys", "syscon";
reg = <0 0x14000000 0 0x1000>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
assigned-clocks = <&topckgen CLK_TOP_MM_SEL>;
assigned-clock-rates = <400000000>;
#clock-cells = <1>;
+ mboxes = <&gce 0 CMDQ_THR_PRIO_HIGHEST>,
+ <&gce 1 CMDQ_THR_PRIO_HIGHEST>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>;
};
mdp_rdma0: rdma@14001000 {
@@ -996,6 +1012,7 @@
clocks = <&mmsys CLK_MM_DISP_OVL0>;
iommus = <&iommu M4U_PORT_DISP_OVL0>;
mediatek,larb = <&larb0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xc000 0x1000>;
};
ovl1: ovl@1400d000 {
@@ -1006,6 +1023,7 @@
clocks = <&mmsys CLK_MM_DISP_OVL1>;
iommus = <&iommu M4U_PORT_DISP_OVL1>;
mediatek,larb = <&larb4>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xd000 0x1000>;
};
rdma0: rdma@1400e000 {
@@ -1016,6 +1034,7 @@
clocks = <&mmsys CLK_MM_DISP_RDMA0>;
iommus = <&iommu M4U_PORT_DISP_RDMA0>;
mediatek,larb = <&larb0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xe000 0x1000>;
};
rdma1: rdma@1400f000 {
@@ -1026,6 +1045,7 @@
clocks = <&mmsys CLK_MM_DISP_RDMA1>;
iommus = <&iommu M4U_PORT_DISP_RDMA1>;
mediatek,larb = <&larb4>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xf000 0x1000>;
};
rdma2: rdma@14010000 {
@@ -1036,6 +1056,7 @@
clocks = <&mmsys CLK_MM_DISP_RDMA2>;
iommus = <&iommu M4U_PORT_DISP_RDMA2>;
mediatek,larb = <&larb4>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0 0x1000>;
};
wdma0: wdma@14011000 {
@@ -1046,6 +1067,7 @@
clocks = <&mmsys CLK_MM_DISP_WDMA0>;
iommus = <&iommu M4U_PORT_DISP_WDMA0>;
mediatek,larb = <&larb0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x1000 0x1000>;
};
wdma1: wdma@14012000 {
@@ -1056,6 +1078,7 @@
clocks = <&mmsys CLK_MM_DISP_WDMA1>;
iommus = <&iommu M4U_PORT_DISP_WDMA1>;
mediatek,larb = <&larb4>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x2000 0x1000>;
};
color0: color@14013000 {
@@ -1064,6 +1087,7 @@
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_COLOR0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x3000 0x1000>;
};
color1: color@14014000 {
@@ -1072,6 +1096,7 @@
interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_COLOR1>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x4000 0x1000>;
};
aal@14015000 {
@@ -1080,6 +1105,7 @@
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_AAL>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x5000 0x1000>;
};
gamma@14016000 {
@@ -1088,6 +1114,7 @@
interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_DISP_GAMMA>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x6000 0x1000>;
};
merge@14017000 {
@@ -1193,6 +1220,8 @@
interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
clocks = <&mmsys CLK_MM_MUTEX_32K>;
+ mediatek,gce-events = <CMDQ_EVENT_MUTEX0_STREAM_EOF>,
+ <CMDQ_EVENT_MUTEX1_STREAM_EOF>;
};
larb0: larb@14021000 {
@@ -1437,4 +1466,3 @@
};
};
};
-
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
index 1fb195c683c3..afd6ddbcbdf2 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
@@ -7,6 +7,7 @@
/dts-v1/;
#include "mt8183.dtsi"
+#include "mt6358.dtsi"
/ {
model = "MediaTek MT8183 evaluation board";
@@ -72,6 +73,47 @@
clock-frequency = <1000000>;
};
+&mmc0 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_pins_default>;
+ pinctrl-1 = <&mmc0_pins_uhs>;
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ cap-mmc-hw-reset;
+ no-sdio;
+ no-sd;
+ hs400-ds-delay = <0x12814>;
+ vmmc-supply = <&mt6358_vemc_reg>;
+ vqmmc-supply = <&mt6358_vio18_reg>;
+ assigned-clocks = <&topckgen CLK_TOP_MUX_MSDC50_0>;
+ assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_CK>;
+ non-removable;
+};
+
+&mmc1 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc1_pins_default>;
+ pinctrl-1 = <&mmc1_pins_uhs>;
+ bus-width = <4>;
+ max-frequency = <200000000>;
+ cap-sd-highspeed;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ cap-sdio-irq;
+ no-mmc;
+ no-sd;
+ vmmc-supply = <&mt6358_vmch_reg>;
+ vqmmc-supply = <&mt6358_vmc_reg>;
+ keep-power-in-suspend;
+ enable-sdio-wakeup;
+ non-removable;
+};
+
&pio {
i2c_pins_0: i2c0{
pins_i2c{
@@ -137,6 +179,111 @@
};
};
+ mmc0_pins_default: mmc0default {
+ pins_cmd_dat {
+ pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+ <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+ <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+ <PINMUX_GPIO132__FUNC_MSDC0_DAT3>,
+ <PINMUX_GPIO126__FUNC_MSDC0_DAT4>,
+ <PINMUX_GPIO129__FUNC_MSDC0_DAT5>,
+ <PINMUX_GPIO127__FUNC_MSDC0_DAT6>,
+ <PINMUX_GPIO130__FUNC_MSDC0_DAT7>,
+ <PINMUX_GPIO122__FUNC_MSDC0_CMD>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pins_clk {
+ pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+ bias-pull-down;
+ };
+
+ pins_rst {
+ pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+ bias-pull-up;
+ };
+ };
+
+ mmc0_pins_uhs: mmc0@0{
+ pins_cmd_dat {
+ pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+ <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+ <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+ <PINMUX_GPIO132__FUNC_MSDC0_DAT3>,
+ <PINMUX_GPIO126__FUNC_MSDC0_DAT4>,
+ <PINMUX_GPIO129__FUNC_MSDC0_DAT5>,
+ <PINMUX_GPIO127__FUNC_MSDC0_DAT6>,
+ <PINMUX_GPIO130__FUNC_MSDC0_DAT7>,
+ <PINMUX_GPIO122__FUNC_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_10mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins_clk {
+ pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+ drive-strength = <MTK_DRIVE_10mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_ds {
+ pinmux = <PINMUX_GPIO131__FUNC_MSDC0_DSL>;
+ drive-strength = <MTK_DRIVE_10mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_rst {
+ pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+ drive-strength = <MTK_DRIVE_10mA>;
+ bias-pull-up;
+ };
+ };
+
+ mmc1_pins_default: mmc1default {
+ pins_cmd_dat {
+ pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+ <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+ <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+ <PINMUX_GPIO33__FUNC_MSDC1_DAT2>,
+ <PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pins_clk {
+ pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+ input-enable;
+ bias-pull-down;
+ };
+
+ pins_pmu {
+ pinmux = <PINMUX_GPIO178__FUNC_GPIO178>,
+ <PINMUX_GPIO166__FUNC_GPIO166>;
+ output-high;
+ };
+ };
+
+ mmc1_pins_uhs: mmc1@0{
+ pins_cmd_dat {
+ pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+ <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+ <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+ <PINMUX_GPIO33__FUNC_MSDC1_DAT2>,
+ <PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
+ drive-strength = <MTK_DRIVE_6mA>;
+ input-enable;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins_clk {
+ pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+ drive-strength = <MTK_DRIVE_6mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ input-enable;
+ };
+ };
+
spi_pins_1: spi1{
pins_spi{
pinmux = <PINMUX_GPIO161__FUNC_SPI1_A_MI>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 97863adb7bc0..1e03c849dc5d 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -74,7 +74,7 @@
reg = <0x000>;
enable-method = "psci";
capacity-dmips-mhz = <741>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP0>;
dynamic-power-coefficient = <84>;
#cooling-cells = <2>;
};
@@ -85,7 +85,7 @@
reg = <0x001>;
enable-method = "psci";
capacity-dmips-mhz = <741>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP0>;
dynamic-power-coefficient = <84>;
#cooling-cells = <2>;
};
@@ -96,7 +96,7 @@
reg = <0x002>;
enable-method = "psci";
capacity-dmips-mhz = <741>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP0>;
dynamic-power-coefficient = <84>;
#cooling-cells = <2>;
};
@@ -107,7 +107,7 @@
reg = <0x003>;
enable-method = "psci";
capacity-dmips-mhz = <741>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP0>;
dynamic-power-coefficient = <84>;
#cooling-cells = <2>;
};
@@ -118,7 +118,7 @@
reg = <0x100>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP1>;
dynamic-power-coefficient = <211>;
#cooling-cells = <2>;
};
@@ -129,7 +129,7 @@
reg = <0x101>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP1>;
dynamic-power-coefficient = <211>;
#cooling-cells = <2>;
};
@@ -140,7 +140,7 @@
reg = <0x102>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP1>;
dynamic-power-coefficient = <211>;
#cooling-cells = <2>;
};
@@ -151,7 +151,7 @@
reg = <0x103>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
- cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP>;
+ cpu-idle-states = <&CPU_SLEEP &CLUSTER_SLEEP1>;
dynamic-power-coefficient = <211>;
#cooling-cells = <2>;
};
@@ -168,7 +168,15 @@
min-residency-us = <800>;
};
- CLUSTER_SLEEP: cluster-sleep {
+ CLUSTER_SLEEP0: cluster-sleep@0 {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ arm,psci-suspend-param = <0x01010001>;
+ entry-latency-us = <250>;
+ exit-latency-us = <400>;
+ min-residency-us = <1000>;
+ };
+ CLUSTER_SLEEP1: cluster-sleep@1 {
compatible = "arm,idle-state";
local-timer-stop;
arm,psci-suspend-param = <0x01010001>;
@@ -640,6 +648,30 @@
#clock-cells = <1>;
};
+ mmc0: mmc@11230000 {
+ compatible = "mediatek,mt8183-mmc";
+ reg = <0 0x11230000 0 0x1000>,
+ <0 0x11f50000 0 0x1000>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_MUX_MSDC50_0>,
+ <&infracfg CLK_INFRA_MSDC0>,
+ <&infracfg CLK_INFRA_MSDC0_SCK>;
+ clock-names = "source", "hclk", "source_cg";
+ status = "disabled";
+ };
+
+ mmc1: mmc@11240000 {
+ compatible = "mediatek,mt8183-mmc";
+ reg = <0 0x11240000 0 0x1000>,
+ <0 0x11e10000 0 0x1000>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_MUX_MSDC30_1>,
+ <&infracfg CLK_INFRA_MSDC1>,
+ <&infracfg CLK_INFRA_MSDC1_SCK>;
+ clock-names = "source", "hclk", "source_cg";
+ status = "disabled";
+ };
+
efuse: efuse@11f10000 {
compatible = "mediatek,mt8183-efuse",
"mediatek,efuse";
diff --git a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
index a0385a386a3f..9f3206c63900 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
@@ -990,7 +990,7 @@
};
panel: panel {
- compatible = "innolux,n116bge", "simple-panel";
+ compatible = "innolux,n116bge";
backlight = <&backlight>;
ddc-i2c-bus = <&dpaux>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
index da96de04d003..2fcaa2e64370 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
@@ -221,7 +221,8 @@
compatible = "maxim,max77620";
reg = <0x3c>;
- interrupts = <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pmc>;
+ interrupts = <24 IRQ_TYPE_LEVEL_LOW>;
#interrupt-cells = <2>;
interrupt-controller;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
index 623f7d7d216b..b96eb4e14556 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
@@ -33,7 +33,7 @@
phy-reset-gpios = <&gpio TEGRA194_MAIN_GPIO(G, 5) GPIO_ACTIVE_LOW>;
phy-handle = <&phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
mdio {
#address-cells = <1>;
@@ -111,7 +111,8 @@
compatible = "maxim,max20024";
reg = <0x3c>;
- interrupts = <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&pmc>;
+ interrupts = <24 IRQ_TYPE_LEVEL_LOW>;
#interrupt-cells = <2>;
interrupt-controller;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index f4ede86e32b4..4bc187a4eacd 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -644,6 +644,24 @@
};
};
+ usb@3550000 {
+ compatible = "nvidia,tegra194-xudc";
+ reg = <0x03550000 0x8000>,
+ <0x03558000 0x1000>;
+ reg-names = "base", "fpci";
+ interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&bpmp TEGRA194_CLK_XUSB_CORE_DEV>,
+ <&bpmp TEGRA194_CLK_XUSB_CORE_SS>,
+ <&bpmp TEGRA194_CLK_XUSB_SS>,
+ <&bpmp TEGRA194_CLK_XUSB_FS>;
+ clock-names = "dev", "ss", "ss_src", "fs_src";
+ power-domains = <&bpmp TEGRA194_POWER_DOMAIN_XUSBB>,
+ <&bpmp TEGRA194_POWER_DOMAIN_XUSBA>;
+ power-domain-names = "dev", "ss";
+ nvidia,xusb-padctl = <&xusb_padctl>;
+ status = "disabled";
+ };
+
usb@3610000 {
compatible = "nvidia,tegra194-xusb";
reg = <0x03610000 0x40000>,
@@ -1387,7 +1405,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x30100000 0x0 0x30100000 0x0 0x00100000 /* downstream I/O (1MB) */
- 0xc2000000 0x12 0x00000000 0x12 0x00000000 0x0 0x30000000 /* prefetchable memory (768MB) */
+ 0xc3000000 0x12 0x00000000 0x12 0x00000000 0x0 0x30000000 /* prefetchable memory (768MB) */
0x82000000 0x0 0x40000000 0x12 0x30000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */
};
@@ -1432,7 +1450,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x32100000 0x0 0x32100000 0x0 0x00100000 /* downstream I/O (1MB) */
- 0xc2000000 0x12 0x40000000 0x12 0x40000000 0x0 0x30000000 /* prefetchable memory (768MB) */
+ 0xc3000000 0x12 0x40000000 0x12 0x40000000 0x0 0x30000000 /* prefetchable memory (768MB) */
0x82000000 0x0 0x40000000 0x12 0x70000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */
};
@@ -1477,7 +1495,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x34100000 0x0 0x34100000 0x0 0x00100000 /* downstream I/O (1MB) */
- 0xc2000000 0x12 0x80000000 0x12 0x80000000 0x0 0x30000000 /* prefetchable memory (768MB) */
+ 0xc3000000 0x12 0x80000000 0x12 0x80000000 0x0 0x30000000 /* prefetchable memory (768MB) */
0x82000000 0x0 0x40000000 0x12 0xb0000000 0x0 0x10000000>; /* non-prefetchable memory (256MB) */
};
@@ -1522,7 +1540,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x36100000 0x0 0x36100000 0x0 0x00100000 /* downstream I/O (1MB) */
- 0xc2000000 0x14 0x00000000 0x14 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */
+ 0xc3000000 0x14 0x00000000 0x14 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */
0x82000000 0x0 0x40000000 0x17 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */
};
@@ -1567,7 +1585,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x38100000 0x0 0x38100000 0x0 0x00100000 /* downstream I/O (1MB) */
- 0xc2000000 0x18 0x00000000 0x18 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */
+ 0xc3000000 0x18 0x00000000 0x18 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */
0x82000000 0x0 0x40000000 0x1b 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */
};
@@ -1616,7 +1634,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x3a100000 0x0 0x3a100000 0x0 0x00100000 /* downstream I/O (1MB) */
- 0xc2000000 0x1c 0x00000000 0x1c 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */
+ 0xc3000000 0x1c 0x00000000 0x1c 0x00000000 0x3 0x40000000 /* prefetchable memory (13GB) */
0x82000000 0x0 0x40000000 0x1f 0x40000000 0x0 0xc0000000>; /* non-prefetchable memory (3GB) */
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index f87d2437d11c..cc6ed45a2b48 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -38,7 +38,8 @@
pmic: pmic@3c {
compatible = "maxim,max77620";
reg = <0x3c>;
- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&tegra_pmc>;
+ interrupts = <51 IRQ_TYPE_LEVEL_LOW>;
#interrupt-cells = <2>;
interrupt-controller;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index 313a4c29d37a..b57d837d5fc7 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -14,6 +14,16 @@
status = "okay";
};
+ vi@54080000 {
+ status = "okay";
+
+ avdd-dsi-csi-supply = <&vdd_dsi_csi>;
+
+ csi@838 {
+ status = "okay";
+ };
+ };
+
sor@54580000 {
status = "okay";
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
index 21ed1756b889..9bc52fdb393c 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
@@ -90,6 +90,10 @@
dpaux@545c0000 {
status = "okay";
};
+
+ i2c@546c0000 {
+ status = "okay";
+ };
};
gpu@57000000 {
@@ -145,7 +149,8 @@
pmic: pmic@3c {
compatible = "maxim,max77620";
reg = <0x3c>;
- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&tegra_pmc>;
+ interrupts = <51 IRQ_TYPE_LEVEL_LOW>;
#interrupt-cells = <2>;
interrupt-controller;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 64c46ce3849d..08655081f72d 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -137,9 +137,44 @@
vi@54080000 {
compatible = "nvidia,tegra210-vi";
- reg = <0x0 0x54080000 0x0 0x00040000>;
+ reg = <0x0 0x54080000 0x0 0x700>;
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
+ assigned-clocks = <&tegra_car TEGRA210_CLK_VI>;
+ assigned-clock-parents = <&tegra_car TEGRA210_CLK_PLL_C4_OUT0>;
+
+ clocks = <&tegra_car TEGRA210_CLK_VI>;
+ power-domains = <&pd_venc>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x0 0x0 0x54080000 0x2000>;
+
+ csi@838 {
+ compatible = "nvidia,tegra210-csi";
+ reg = <0x838 0x1300>;
+ status = "disabled";
+ assigned-clocks = <&tegra_car TEGRA210_CLK_CILAB>,
+ <&tegra_car TEGRA210_CLK_CILCD>,
+ <&tegra_car TEGRA210_CLK_CILE>,
+ <&tegra_car TEGRA210_CLK_CSI_TPG>;
+ assigned-clock-parents = <&tegra_car TEGRA210_CLK_PLL_P>,
+ <&tegra_car TEGRA210_CLK_PLL_P>,
+ <&tegra_car TEGRA210_CLK_PLL_P>;
+ assigned-clock-rates = <102000000>,
+ <102000000>,
+ <102000000>,
+ <972000000>;
+
+ clocks = <&tegra_car TEGRA210_CLK_CSI>,
+ <&tegra_car TEGRA210_CLK_CILAB>,
+ <&tegra_car TEGRA210_CLK_CILCD>,
+ <&tegra_car TEGRA210_CLK_CILE>,
+ <&tegra_car TEGRA210_CLK_CSI_TPG>;
+ clock-names = "csi", "cilab", "cilcd", "cile", "csi_tpg";
+ power-domains = <&pd_sor>;
+ };
};
tsec@54100000 {
@@ -796,7 +831,9 @@
pd_sor: sor {
clocks = <&tegra_car TEGRA210_CLK_SOR0>,
<&tegra_car TEGRA210_CLK_SOR1>,
- <&tegra_car TEGRA210_CLK_CSI>,
+ <&tegra_car TEGRA210_CLK_CILAB>,
+ <&tegra_car TEGRA210_CLK_CILCD>,
+ <&tegra_car TEGRA210_CLK_CILE>,
<&tegra_car TEGRA210_CLK_DSIA>,
<&tegra_car TEGRA210_CLK_DSIB>,
<&tegra_car TEGRA210_CLK_DPAUX>,
@@ -804,7 +841,6 @@
<&tegra_car TEGRA210_CLK_MIPI_CAL>;
resets = <&tegra_car TEGRA210_CLK_SOR0>,
<&tegra_car TEGRA210_CLK_SOR1>,
- <&tegra_car TEGRA210_CLK_CSI>,
<&tegra_car TEGRA210_CLK_DSIA>,
<&tegra_car TEGRA210_CLK_DSIB>,
<&tegra_car TEGRA210_CLK_DPAUX>,
@@ -838,6 +874,15 @@
reset-names = "vic";
#power-domain-cells = <0>;
};
+
+ pd_venc: venc {
+ clocks = <&tegra_car TEGRA210_CLK_VI>,
+ <&tegra_car TEGRA210_CLK_CSI>;
+ resets = <&mc TEGRA210_MC_RESET_VI>,
+ <&tegra_car 20>,
+ <&tegra_car 52>;
+ #power-domain-cells = <0>;
+ };
};
sdmmc1_3v3: sdmmc1-3v3 {
@@ -893,6 +938,19 @@
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
#iommu-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ emc: external-memory-controller@7001b000 {
+ compatible = "nvidia,tegra210-emc";
+ reg = <0x0 0x7001b000 0x0 0x1000>,
+ <0x0 0x7001e000 0x0 0x1000>,
+ <0x0 0x7001f000 0x0 0x1000>;
+ clocks = <&tegra_car TEGRA210_CLK_EMC>;
+ clock-names = "emc";
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ nvidia,memory-controller = <&mc>;
+ #cooling-cells = <2>;
};
sata@70020000 {
@@ -1550,6 +1608,18 @@
<&soctherm TEGRA124_SOCTHERM_SENSOR_MEM>;
trips {
+ dram_nominal: mem-nominal-trip {
+ temperature = <50000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+
+ dram_throttle: mem-throttle-trip {
+ temperature = <70000>;
+ hysteresis = <1000>;
+ type = "active";
+ };
+
mem-shutdown-trip {
temperature = <103000>;
hysteresis = <0>;
@@ -1558,10 +1628,15 @@
};
cooling-maps {
- /*
- * There are currently no cooling maps,
- * because there are no cooling devices.
- */
+ dram-passive {
+ cooling-device = <&emc 0 0>;
+ trip = <&dram_nominal>;
+ };
+
+ dram-active {
+ cooling-device = <&emc 1 1>;
+ trip = <&dram_throttle>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index cc103f7020fd..0f2c33d611df 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -16,6 +16,7 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8998-hp-envy-x2.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8998-lenovo-miix-630.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8998-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sdm660-xiaomi-lavender.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm845-cheza-r1.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm845-cheza-r2.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm845-cheza-r3.dtb
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
deleted file mode 100644
index aff218c1b7b6..000000000000
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
-#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
-
-&pm8916_gpios {
-
- usb_hub_reset_pm: usb_hub_reset_pm {
- pinconf {
- pins = "gpio3";
- function = PMIC_GPIO_FUNC_NORMAL;
- input-disable;
- output-high;
- };
- };
-
- usb_hub_reset_pm_device: usb_hub_reset_pm_device {
- pinconf {
- pins = "gpio3";
- function = PMIC_GPIO_FUNC_NORMAL;
- output-low;
- };
- };
-
- usb_sw_sel_pm: usb_sw_sel_pm {
- pinconf {
- pins = "gpio4";
- function = PMIC_GPIO_FUNC_NORMAL;
- power-source = <PM8916_GPIO_VPH>;
- input-disable;
- output-high;
- };
- };
-
- usb_sw_sel_pm_device: usb_sw_sel_pm_device {
- pinconf {
- pins = "gpio4";
- function = PMIC_GPIO_FUNC_NORMAL;
- power-source = <PM8916_GPIO_VPH>;
- input-disable;
- output-low;
- };
- };
-
- pm8916_gpios_leds: pm8916_gpios_leds {
- pinconf {
- pins = "gpio1", "gpio2";
- function = PMIC_GPIO_FUNC_NORMAL;
- output-low;
- };
- };
-};
-
-&pm8916_mpps {
-
- pinctrl-names = "default";
- pinctrl-0 = <&ls_exp_gpio_f>;
-
- ls_exp_gpio_f: pm8916_mpp4 {
- pinconf {
- pins = "mpp4";
- function = "digital";
- output-low;
- power-source = <PM8916_MPP_L5>; // 1.8V
- };
- };
-
- pm8916_mpps_leds: pm8916_mpps_leds {
- pinconf {
- pins = "mpp2", "mpp3";
- function = "digital";
- output-low;
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
deleted file mode 100644
index 21d0822f1ca6..000000000000
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <dt-bindings/gpio/gpio.h>
-
-&msmgpio {
-
- msmgpio_leds: msmgpio_leds {
- pinconf {
- pins = "gpio21", "gpio120";
- function = "gpio";
- output-low;
- };
- };
-
- usb_id_default: usb-id-default {
- pinmux {
- function = "gpio";
- pins = "gpio121";
- };
-
- pinconf {
- pins = "gpio121";
- drive-strength = <8>;
- input-enable;
- bias-pull-up;
- };
- };
-
- adv7533_int_active: adv533_int_active {
- pinmux {
- function = "gpio";
- pins = "gpio31";
- };
- pinconf {
- pins = "gpio31";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- adv7533_int_suspend: adv7533_int_suspend {
- pinmux {
- function = "gpio";
- pins = "gpio31";
- };
- pinconf {
- pins = "gpio31";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- adv7533_switch_active: adv7533_switch_active {
- pinmux {
- function = "gpio";
- pins = "gpio32";
- };
- pinconf {
- pins = "gpio32";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- adv7533_switch_suspend: adv7533_switch_suspend {
- pinmux {
- function = "gpio";
- pins = "gpio32";
- };
- pinconf {
- pins = "gpio32";
- drive-strength = <2>;
- bias-disable;
- };
- };
-
- msm_key_volp_n_default: msm_key_volp_n_default {
- pinmux {
- function = "gpio";
- pins = "gpio107";
- };
- pinconf {
- pins = "gpio107";
- drive-strength = <8>;
- input-enable;
- bias-pull-up;
- };
- };
-};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 06aab44d798c..8a4b790aa7ff 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -5,10 +5,10 @@
#include "msm8916.dtsi"
#include "pm8916.dtsi"
-#include "apq8016-sbc-soc-pins.dtsi"
-#include "apq8016-sbc-pmic-pins.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
#include <dt-bindings/sound/apq8016-lpass.h>
/*
@@ -51,6 +51,30 @@
stdout-path = "serial0";
};
+ camera_vdddo_1v8: camera-vdddo-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "camera_vdddo";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ camera_vdda_2v8: camera-vdda-2v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "camera_vdda";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+
+ camera_vddd_1v5: camera-vddd-1v5 {
+ compatible = "regulator-fixed";
+ regulator-name = "camera_vddd";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ };
+
reserved-memory {
ramoops@bff00000{
compatible = "ramoops";
@@ -495,6 +519,27 @@
wcnss@a21b000 {
status = "okay";
};
+
+ tpiu@820000 { status = "okay"; };
+ funnel@821000 { status = "okay"; };
+ replicator@824000 { status = "okay"; };
+ etf@825000 { status = "okay"; };
+ etr@826000 { status = "okay"; };
+ funnel@841000 { status = "okay"; };
+ debug@850000 { status = "okay"; };
+ debug@852000 { status = "okay"; };
+ debug@854000 { status = "okay"; };
+ debug@856000 { status = "okay"; };
+ etm@85c000 { status = "okay"; };
+ etm@85d000 { status = "okay"; };
+ etm@85e000 { status = "okay"; };
+ etm@85f000 { status = "okay"; };
+ cti@810000 { status = "okay"; };
+ cti@811000 { status = "okay"; };
+ cti@858000 { status = "okay"; };
+ cti@859000 { status = "okay"; };
+ cti@85a000 { status = "okay"; };
+ cti@85b000 { status = "okay"; };
};
usb2513 {
@@ -521,7 +566,7 @@
};
};
- gpio_keys {
+ gpio-keys {
compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
@@ -538,6 +583,58 @@
};
};
+&camss {
+ status = "ok";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ csiphy0_ep: endpoint {
+ clock-lanes = <1>;
+ data-lanes = <0 2>;
+ remote-endpoint = <&ov5640_ep>;
+ status = "okay";
+ };
+ };
+ };
+};
+
+&cci {
+ status = "ok";
+};
+
+&cci_i2c0 {
+ camera_rear@3b {
+ compatible = "ovti,ov5640";
+ reg = <0x3b>;
+
+ enable-gpios = <&msmgpio 34 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&msmgpio 35 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&camera_rear_default>;
+
+ clocks = <&gcc GCC_CAMSS_MCLK0_CLK>;
+ clock-names = "xclk";
+ clock-frequency = <23880000>;
+
+ vdddo-supply = <&camera_vdddo_1v8>;
+ vdda-supply = <&camera_vdda_2v8>;
+ vddd-supply = <&camera_vddd_1v5>;
+
+ /* No camera mezzanine by default */
+ status = "disabled";
+
+ port {
+ ov5640_ep: endpoint {
+ clock-lanes = <1>;
+ data-lanes = <0 2>;
+ remote-endpoint = <&csiphy0_ep>;
+ };
+ };
+ };
+};
+
&spmi_bus {
pm8916_0: pm8916@0 {
pon@800 {
@@ -680,3 +777,157 @@
regulator-max-microvolt = <3337000>;
};
};
+
+&msmgpio {
+ msmgpio_leds: msmgpio-leds {
+ pinconf {
+ pins = "gpio21", "gpio120";
+ function = "gpio";
+ output-low;
+ };
+ };
+
+ usb_id_default: usb-id-default {
+ pinmux {
+ function = "gpio";
+ pins = "gpio121";
+ };
+
+ pinconf {
+ pins = "gpio121";
+ drive-strength = <8>;
+ input-enable;
+ bias-pull-up;
+ };
+ };
+
+ adv7533_int_active: adv533-int-active {
+ pinmux {
+ function = "gpio";
+ pins = "gpio31";
+ };
+ pinconf {
+ pins = "gpio31";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ adv7533_int_suspend: adv7533-int-suspend {
+ pinmux {
+ function = "gpio";
+ pins = "gpio31";
+ };
+ pinconf {
+ pins = "gpio31";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ adv7533_switch_active: adv7533-switch-active {
+ pinmux {
+ function = "gpio";
+ pins = "gpio32";
+ };
+ pinconf {
+ pins = "gpio32";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ adv7533_switch_suspend: adv7533-switch-suspend {
+ pinmux {
+ function = "gpio";
+ pins = "gpio32";
+ };
+ pinconf {
+ pins = "gpio32";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ msm_key_volp_n_default: msm-key-volp-n-default {
+ pinmux {
+ function = "gpio";
+ pins = "gpio107";
+ };
+ pinconf {
+ pins = "gpio107";
+ drive-strength = <8>;
+ input-enable;
+ bias-pull-up;
+ };
+ };
+};
+
+&pm8916_gpios {
+ usb_hub_reset_pm: usb-hub-reset-pm {
+ pinconf {
+ pins = "gpio3";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-disable;
+ output-high;
+ };
+ };
+
+ usb_hub_reset_pm_device: usb-hub-reset-pm-device {
+ pinconf {
+ pins = "gpio3";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ };
+ };
+
+ usb_sw_sel_pm: usb-sw-sel-pm {
+ pinconf {
+ pins = "gpio4";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ power-source = <PM8916_GPIO_VPH>;
+ input-disable;
+ output-high;
+ };
+ };
+
+ usb_sw_sel_pm_device: usb-sw-sel-pm-device {
+ pinconf {
+ pins = "gpio4";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ power-source = <PM8916_GPIO_VPH>;
+ input-disable;
+ output-low;
+ };
+ };
+
+ pm8916_gpios_leds: pm8916-gpios-leds {
+ pinconf {
+ pins = "gpio1", "gpio2";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ };
+ };
+};
+
+&pm8916_mpps {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ls_exp_gpio_f>;
+
+ ls_exp_gpio_f: pm8916-mpp4 {
+ pinconf {
+ pins = "mpp4";
+ function = "digital";
+ output-low;
+ power-source = <PM8916_MPP_L5>; // 1.8V
+ };
+ };
+
+ pm8916_mpps_leds: pm8916-mpps-leds {
+ pinconf {
+ pins = "mpp2", "mpp3";
+ function = "digital";
+ output-low;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
index c4abbccf2bed..defcbd15edf9 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
@@ -117,16 +117,6 @@
regulator-max-microvolt = <3700000>;
};
- vreg_s8a_l3a_input: vreg-s8a-l3a-input {
- compatible = "regulator-fixed";
- regulator-name = "vreg_s8a_l3a_input";
- regulator-always-on;
- regulator-boot-on;
-
- regulator-min-microvolt = <0>;
- regulator-max-microvolt = <0>;
- };
-
wlan_en: wlan-en-1-8v {
pinctrl-names = "default";
pinctrl-0 = <&wlan_en_gpios>;
@@ -251,6 +241,10 @@
status = "okay";
};
+&mmcc {
+ vdd-gfx-supply = <&vdd_gfx>;
+};
+
&msmgpio {
gpio-line-names =
"[SPI0_DOUT]", /* GPIO_0, BLSP1_SPI_MOSI, LSEC pin 14 */
@@ -688,6 +682,15 @@
};
};
+&pmi8994_spmi_regulators {
+ vdd_gfx: s2@1700 {
+ reg = <0x1700 0x100>;
+ regulator-name = "VDD_GFX";
+ regulator-min-microvolt = <980000>;
+ regulator-max-microvolt = <980000>;
+ };
+};
+
&rpm_requests {
pm8994-regulators {
compatible = "qcom,rpm-pm8994-regulators";
@@ -704,15 +707,20 @@
vdd_s10-supply = <&vph_pwr>;
vdd_s11-supply = <&vph_pwr>;
vdd_s12-supply = <&vph_pwr>;
+ vdd_l1-supply = <&vreg_s1b_1p025>;
vdd_l2_l26_l28-supply = <&vreg_s3a_1p3>;
- vdd_l3_l11-supply = <&vreg_s8a_l3a_input>;
+ vdd_l3_l11-supply = <&vreg_s3a_1p3>;
vdd_l4_l27_l31-supply = <&vreg_s3a_1p3>;
vdd_l5_l7-supply = <&vreg_s5a_2p15>;
vdd_l6_l12_l32-supply = <&vreg_s5a_2p15>;
vdd_l8_l16_l30-supply = <&vph_pwr>;
+ vdd_l9_l10_l18_l22-supply = <&vph_pwr_bbyp>;
+ vdd_l13_l19_l23_l24-supply = <&vph_pwr_bbyp>;
vdd_l14_l15-supply = <&vreg_s5a_2p15>;
+ vdd_l17_l29-supply = <&vph_pwr_bbyp>;
+ vdd_l20_l21-supply = <&vph_pwr_bbyp>;
vdd_l25-supply = <&vreg_s3a_1p3>;
- vdd_lvs1_2-supply = <&vreg_s4a_1p8>;
+ vdd_lvs1_lvs2-supply = <&vreg_s4a_1p8>;
vreg_s3a_1p3: s3 {
regulator-name = "vreg_s3a_1p3";
@@ -895,6 +903,27 @@
regulator-name = "vreg_lvs2a_1p8";
};
};
+
+ pmi8994-regulators {
+ compatible = "qcom,rpm-pmi8994-regulators";
+
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_s2-supply = <&vph_pwr>;
+ vdd_s3-supply = <&vph_pwr>;
+ vdd_bst_byp-supply = <&vph_pwr>;
+
+ vph_pwr_bbyp: boost-bypass {
+ regulator-name = "vph_pwr_bbyp";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vreg_s1b_1p025: s1 {
+ regulator-name = "vreg_s1b_1p025";
+ regulator-min-microvolt = <1025000>;
+ regulator-max-microvolt = <1025000>;
+ };
+ };
};
&sdhc2 {
diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
index 70be3f95209b..6754cb0638f4 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
+++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
@@ -24,63 +24,61 @@
device_type = "memory";
reg = <0x0 0x40000000 0x0 0x20000000>;
};
+};
+
+&blsp1_i2c2 {
+ status = "ok";
+};
+
+&blsp1_spi1 {
+ status = "ok";
+
+ m25p80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&blsp1_uart3 {
+ status = "ok";
+};
+
+&blsp1_uart5 {
+ status = "ok";
+};
+
+&pcie0 {
+ status = "ok";
+ perst-gpio = <&tlmm 61 0x1>;
+};
+
+&pcie1 {
+ status = "ok";
+ perst-gpio = <&tlmm 58 0x1>;
+};
+
+&pcie_phy0 {
+ status = "ok";
+};
+
+&pcie_phy1 {
+ status = "ok";
+};
+
+&qpic_bam {
+ status = "ok";
+};
+
+&qpic_nand {
+ status = "ok";
- soc {
- serial@78b3000 {
- status = "ok";
- };
-
- spi@78b5000 {
- status = "ok";
-
- m25p80@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "jedec,spi-nor";
- reg = <0>;
- spi-max-frequency = <50000000>;
- };
- };
-
- serial@78b1000 {
- status = "ok";
- };
-
- i2c@78b6000 {
- status = "ok";
- };
-
- dma@7984000 {
- status = "ok";
- };
-
- nand@79b0000 {
- status = "ok";
-
- nand@0 {
- reg = <0>;
- nand-ecc-strength = <4>;
- nand-ecc-step-size = <512>;
- nand-bus-width = <8>;
- };
- };
-
- phy@86000 {
- status = "ok";
- };
-
- phy@8e000 {
- status = "ok";
- };
-
- pci@20000000 {
- status = "ok";
- perst-gpio = <&tlmm 58 0x1>;
- };
-
- pci@10000000 {
- status = "ok";
- perst-gpio = <&tlmm 61 0x1>;
- };
+ nand@0 {
+ reg = <0>;
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+ nand-bus-width = <8>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index 2b31823d3ccd..5303821300b4 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -10,15 +10,111 @@
model = "Qualcomm Technologies, Inc. IPQ8074";
compatible = "qcom,ipq8074";
+ clocks {
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32000>;
+ #clock-cells = <0>;
+ };
+
+ xo: xo {
+ compatible = "fixed-clock";
+ clock-frequency = <19200000>;
+ #clock-cells = <0>;
+ };
+ };
+
+ cpus {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0>;
+ next-level-cache = <&L2_0>;
+ enable-method = "psci";
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x1>;
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x2>;
+ next-level-cache = <&L2_0>;
+ };
+
+ CPU3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x3>;
+ next-level-cache = <&L2_0>;
+ };
+
+ L2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <0x2>;
+ };
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
soc: soc {
#address-cells = <0x1>;
#size-cells = <0x1>;
ranges = <0 0 0 0xffffffff>;
compatible = "simple-bus";
+ pcie_phy0: phy@86000 {
+ compatible = "qcom,ipq8074-qmp-pcie-phy";
+ reg = <0x00086000 0x1000>;
+ #phy-cells = <0>;
+ clocks = <&gcc GCC_PCIE0_PIPE_CLK>;
+ clock-names = "pipe_clk";
+ clock-output-names = "pcie20_phy0_pipe_clk";
+
+ resets = <&gcc GCC_PCIE0_PHY_BCR>,
+ <&gcc GCC_PCIE0PHY_PHY_BCR>;
+ reset-names = "phy",
+ "common";
+ status = "disabled";
+ };
+
+ pcie_phy1: phy@8e000 {
+ compatible = "qcom,ipq8074-qmp-pcie-phy";
+ reg = <0x0008e000 0x1000>;
+ #phy-cells = <0>;
+ clocks = <&gcc GCC_PCIE1_PIPE_CLK>;
+ clock-names = "pipe_clk";
+ clock-output-names = "pcie20_phy1_pipe_clk";
+
+ resets = <&gcc GCC_PCIE1_PHY_BCR>,
+ <&gcc GCC_PCIE1PHY_PHY_BCR>;
+ reset-names = "phy",
+ "common";
+ status = "disabled";
+ };
+
tlmm: pinctrl@1000000 {
compatible = "qcom,ipq8074-pinctrl";
- reg = <0x1000000 0x300000>;
+ reg = <0x01000000 0x300000>;
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
gpio-controller;
gpio-ranges = <&tlmm 0 0 70>;
@@ -66,102 +162,16 @@
};
};
- intc: interrupt-controller@b000000 {
- compatible = "qcom,msm-qgic2";
- interrupt-controller;
- #interrupt-cells = <0x3>;
- reg = <0xb000000 0x1000>, <0xb002000 0x1000>;
- };
-
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
- };
-
- timer@b120000 {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- compatible = "arm,armv7-timer-mem";
- reg = <0xb120000 0x1000>;
- clock-frequency = <19200000>;
-
- frame@b120000 {
- frame-number = <0>;
- interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb121000 0x1000>,
- <0xb122000 0x1000>;
- };
-
- frame@b123000 {
- frame-number = <1>;
- interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb123000 0x1000>;
- status = "disabled";
- };
-
- frame@b124000 {
- frame-number = <2>;
- interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb124000 0x1000>;
- status = "disabled";
- };
-
- frame@b125000 {
- frame-number = <3>;
- interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb125000 0x1000>;
- status = "disabled";
- };
-
- frame@b126000 {
- frame-number = <4>;
- interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb126000 0x1000>;
- status = "disabled";
- };
-
- frame@b127000 {
- frame-number = <5>;
- interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb127000 0x1000>;
- status = "disabled";
- };
-
- frame@b128000 {
- frame-number = <6>;
- interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xb128000 0x1000>;
- status = "disabled";
- };
- };
-
gcc: gcc@1800000 {
compatible = "qcom,gcc-ipq8074";
- reg = <0x1800000 0x80000>;
+ reg = <0x01800000 0x80000>;
#clock-cells = <0x1>;
#reset-cells = <0x1>;
};
- blsp1_uart5: serial@78b3000 {
- compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
- reg = <0x78b3000 0x200>;
- interrupts = <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP1_UART5_APPS_CLK>,
- <&gcc GCC_BLSP1_AHB_CLK>;
- clock-names = "core", "iface";
- pinctrl-0 = <&serial_4_pins>;
- pinctrl-names = "default";
- status = "disabled";
- };
-
blsp_dma: dma@7884000 {
compatible = "qcom,bam-v1.7.0";
- reg = <0x7884000 0x2b000>;
+ reg = <0x07884000 0x2b000>;
interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "bam_clk";
@@ -171,7 +181,7 @@
blsp1_uart1: serial@78af000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
- reg = <0x78af000 0x200>;
+ reg = <0x078af000 0x200>;
interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>,
<&gcc GCC_BLSP1_AHB_CLK>;
@@ -181,7 +191,7 @@
blsp1_uart3: serial@78b1000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
- reg = <0x78b1000 0x200>;
+ reg = <0x078b1000 0x200>;
interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_UART3_APPS_CLK>,
<&gcc GCC_BLSP1_AHB_CLK>;
@@ -194,11 +204,23 @@
status = "disabled";
};
+ blsp1_uart5: serial@78b3000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x078b3000 0x200>;
+ interrupts = <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART5_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ pinctrl-0 = <&serial_4_pins>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
blsp1_spi1: spi@78b5000 {
compatible = "qcom,spi-qup-v2.2.1";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x78b5000 0x600>;
+ reg = <0x078b5000 0x600>;
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
spi-max-frequency = <50000000>;
clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
@@ -215,7 +237,7 @@
compatible = "qcom,i2c-qup-v2.2.1";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x78b6000 0x600>;
+ reg = <0x078b6000 0x600>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
<&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
@@ -232,7 +254,7 @@
compatible = "qcom,i2c-qup-v2.2.1";
#address-cells = <1>;
#size-cells = <0>;
- reg = <0x78b7000 0x600>;
+ reg = <0x078b7000 0x600>;
interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_AHB_CLK>,
<&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>;
@@ -245,7 +267,7 @@
qpic_bam: dma@7984000 {
compatible = "qcom,bam-v1.7.0";
- reg = <0x7984000 0x1a000>;
+ reg = <0x07984000 0x1a000>;
interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_QPIC_AHB_CLK>;
clock-names = "bam_clk";
@@ -256,7 +278,7 @@
qpic_nand: nand@79b0000 {
compatible = "qcom,ipq8074-nand";
- reg = <0x79b0000 0x10000>;
+ reg = <0x079b0000 0x10000>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&gcc GCC_QPIC_CLK>,
@@ -272,104 +294,85 @@
status = "disabled";
};
- pcie_phy0: phy@86000 {
- compatible = "qcom,ipq8074-qmp-pcie-phy";
- reg = <0x86000 0x1000>;
- #phy-cells = <0>;
- clocks = <&gcc GCC_PCIE0_PIPE_CLK>;
- clock-names = "pipe_clk";
- clock-output-names = "pcie20_phy0_pipe_clk";
+ intc: interrupt-controller@b000000 {
+ compatible = "qcom,msm-qgic2";
+ interrupt-controller;
+ #interrupt-cells = <0x3>;
+ reg = <0x0b000000 0x1000>, <0x0b002000 0x1000>;
+ };
- resets = <&gcc GCC_PCIE0_PHY_BCR>,
- <&gcc GCC_PCIE0PHY_PHY_BCR>;
- reset-names = "phy",
- "common";
- status = "disabled";
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
- pcie0: pci@20000000 {
- compatible = "qcom,pcie-ipq8074";
- reg = <0x20000000 0xf1d
- 0x20000f20 0xa8
- 0x80000 0x2000
- 0x20100000 0x1000>;
- reg-names = "dbi", "elbi", "parf", "config";
- device_type = "pci";
- linux,pci-domain = <0>;
- bus-range = <0x00 0xff>;
- num-lanes = <1>;
- #address-cells = <3>;
- #size-cells = <2>;
+ timer@b120000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x0b120000 0x1000>;
+ clock-frequency = <19200000>;
- phys = <&pcie_phy0>;
- phy-names = "pciephy";
+ frame@b120000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b121000 0x1000>,
+ <0x0b122000 0x1000>;
+ };
- ranges = <0x81000000 0 0x20200000 0x20200000
- 0 0x100000 /* downstream I/O */
- 0x82000000 0 0x20300000 0x20300000
- 0 0xd00000>; /* non-prefetchable memory */
+ frame@b123000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b123000 0x1000>;
+ status = "disabled";
+ };
- interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "msi";
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc 0 75
- IRQ_TYPE_LEVEL_HIGH>, /* int_a */
- <0 0 0 2 &intc 0 78
- IRQ_TYPE_LEVEL_HIGH>, /* int_b */
- <0 0 0 3 &intc 0 79
- IRQ_TYPE_LEVEL_HIGH>, /* int_c */
- <0 0 0 4 &intc 0 83
- IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+ frame@b124000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b124000 0x1000>;
+ status = "disabled";
+ };
- clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
- <&gcc GCC_PCIE0_AXI_M_CLK>,
- <&gcc GCC_PCIE0_AXI_S_CLK>,
- <&gcc GCC_PCIE0_AHB_CLK>,
- <&gcc GCC_PCIE0_AUX_CLK>;
+ frame@b125000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b125000 0x1000>;
+ status = "disabled";
+ };
- clock-names = "iface",
- "axi_m",
- "axi_s",
- "ahb",
- "aux";
- resets = <&gcc GCC_PCIE0_PIPE_ARES>,
- <&gcc GCC_PCIE0_SLEEP_ARES>,
- <&gcc GCC_PCIE0_CORE_STICKY_ARES>,
- <&gcc GCC_PCIE0_AXI_MASTER_ARES>,
- <&gcc GCC_PCIE0_AXI_SLAVE_ARES>,
- <&gcc GCC_PCIE0_AHB_ARES>,
- <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>;
- reset-names = "pipe",
- "sleep",
- "sticky",
- "axi_m",
- "axi_s",
- "ahb",
- "axi_m_sticky";
- status = "disabled";
- };
+ frame@b126000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b126000 0x1000>;
+ status = "disabled";
+ };
- pcie_phy1: phy@8e000 {
- compatible = "qcom,ipq8074-qmp-pcie-phy";
- reg = <0x8e000 0x1000>;
- #phy-cells = <0>;
- clocks = <&gcc GCC_PCIE1_PIPE_CLK>;
- clock-names = "pipe_clk";
- clock-output-names = "pcie20_phy1_pipe_clk";
+ frame@b127000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b127000 0x1000>;
+ status = "disabled";
+ };
- resets = <&gcc GCC_PCIE1_PHY_BCR>,
- <&gcc GCC_PCIE1PHY_PHY_BCR>;
- reset-names = "phy",
- "common";
- status = "disabled";
+ frame@b128000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x0b128000 0x1000>;
+ status = "disabled";
+ };
};
pcie1: pci@10000000 {
compatible = "qcom,pcie-ipq8074";
reg = <0x10000000 0xf1d
0x10000f20 0xa8
- 0x88000 0x2000
+ 0x00088000 0x2000
0x10100000 0x1000>;
reg-names = "dbi", "elbi", "parf", "config";
device_type = "pci";
@@ -426,71 +429,68 @@
"axi_m_sticky";
status = "disabled";
};
- };
-
- cpus {
- #address-cells = <0x1>;
- #size-cells = <0x0>;
-
- CPU0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x0>;
- next-level-cache = <&L2_0>;
- enable-method = "psci";
- };
-
- CPU1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- enable-method = "psci";
- reg = <0x1>;
- next-level-cache = <&L2_0>;
- };
- CPU2: cpu@2 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- enable-method = "psci";
- reg = <0x2>;
- next-level-cache = <&L2_0>;
- };
-
- CPU3: cpu@3 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- enable-method = "psci";
- reg = <0x3>;
- next-level-cache = <&L2_0>;
- };
+ pcie0: pci@20000000 {
+ compatible = "qcom,pcie-ipq8074";
+ reg = <0x20000000 0xf1d
+ 0x20000f20 0xa8
+ 0x00080000 0x2000
+ 0x20100000 0x1000>;
+ reg-names = "dbi", "elbi", "parf", "config";
+ device_type = "pci";
+ linux,pci-domain = <0>;
+ bus-range = <0x00 0xff>;
+ num-lanes = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
- L2_0: l2-cache {
- compatible = "cache";
- cache-level = <0x2>;
- };
- };
+ phys = <&pcie_phy0>;
+ phy-names = "pciephy";
- psci {
- compatible = "arm,psci-1.0";
- method = "smc";
- };
+ ranges = <0x81000000 0 0x20200000 0x20200000
+ 0 0x100000 /* downstream I/O */
+ 0x82000000 0 0x20300000 0x20300000
+ 0 0xd00000>; /* non-prefetchable memory */
- pmu {
- compatible = "arm,armv8-pmuv3";
- interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
- };
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc 0 75
+ IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 78
+ IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 79
+ IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 83
+ IRQ_TYPE_LEVEL_HIGH>; /* int_d */
- clocks {
- sleep_clk: sleep_clk {
- compatible = "fixed-clock";
- clock-frequency = <32000>;
- #clock-cells = <0>;
- };
+ clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
+ <&gcc GCC_PCIE0_AXI_M_CLK>,
+ <&gcc GCC_PCIE0_AXI_S_CLK>,
+ <&gcc GCC_PCIE0_AHB_CLK>,
+ <&gcc GCC_PCIE0_AUX_CLK>;
- xo: xo {
- compatible = "fixed-clock";
- clock-frequency = <19200000>;
- #clock-cells = <0>;
+ clock-names = "iface",
+ "axi_m",
+ "axi_s",
+ "ahb",
+ "aux";
+ resets = <&gcc GCC_PCIE0_PIPE_ARES>,
+ <&gcc GCC_PCIE0_SLEEP_ARES>,
+ <&gcc GCC_PCIE0_CORE_STICKY_ARES>,
+ <&gcc GCC_PCIE0_AXI_MASTER_ARES>,
+ <&gcc GCC_PCIE0_AXI_SLAVE_ARES>,
+ <&gcc GCC_PCIE0_AHB_ARES>,
+ <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>;
+ reset-names = "pipe",
+ "sleep",
+ "sticky",
+ "axi_m",
+ "axi_s",
+ "ahb",
+ "axi_m_sticky";
+ status = "disabled";
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
index d1ccb9472c8b..d5230cb76eb1 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-longcheer-l8150.dts
@@ -82,29 +82,6 @@
wcnss@a21b000 {
status = "okay";
};
-
- /*
- * Attempting to enable these devices causes a "synchronous
- * external abort". Suspected cause is that the debug power
- * domain is not enabled by default on this device.
- * Disable these devices for now to avoid the crash.
- *
- * See: https://lore.kernel.org/linux-arm-msm/20190618202623.GA53651@gerhold.net/
- */
- tpiu@820000 { status = "disabled"; };
- funnel@821000 { status = "disabled"; };
- replicator@824000 { status = "disabled"; };
- etf@825000 { status = "disabled"; };
- etr@826000 { status = "disabled"; };
- funnel@841000 { status = "disabled"; };
- debug@850000 { status = "disabled"; };
- debug@852000 { status = "disabled"; };
- debug@854000 { status = "disabled"; };
- debug@856000 { status = "disabled"; };
- etm@85c000 { status = "disabled"; };
- etm@85d000 { status = "disabled"; };
- etm@85e000 { status = "disabled"; };
- etm@85f000 { status = "disabled"; };
};
// FIXME: Use extcon device provided by charger driver when available
@@ -132,7 +109,7 @@
};
&msmgpio {
- gpio_keys_default: gpio_keys_default {
+ gpio_keys_default: gpio-keys-default {
pinmux {
function = "gpio";
pins = "gpio107";
diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
index 242aaea68804..e9c00367f7fd 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
@@ -5,7 +5,7 @@
&msmgpio {
- blsp1_uart1_default: blsp1_uart1_default {
+ blsp1_uart1_default: blsp1-uart1-default {
pinmux {
function = "blsp_uart1";
// TX, RX, CTS_N, RTS_N
@@ -20,7 +20,7 @@
};
};
- blsp1_uart1_sleep: blsp1_uart1_sleep {
+ blsp1_uart1_sleep: blsp1-uart1-sleep {
pinmux {
function = "gpio";
pins = "gpio0", "gpio1",
@@ -34,7 +34,7 @@
};
};
- blsp1_uart2_default: blsp1_uart2_default {
+ blsp1_uart2_default: blsp1-uart2-default {
pinmux {
function = "blsp_uart2";
pins = "gpio4", "gpio5";
@@ -46,7 +46,7 @@
};
};
- blsp1_uart2_sleep: blsp1_uart2_sleep {
+ blsp1_uart2_sleep: blsp1-uart2-sleep {
pinmux {
function = "gpio";
pins = "gpio4", "gpio5";
@@ -58,12 +58,12 @@
};
};
- spi1_default: spi1_default {
+ spi1_default: spi1-default {
pinmux {
function = "blsp_spi1";
pins = "gpio0", "gpio1", "gpio3";
};
- pinmux_cs {
+ pinmux-cs {
function = "gpio";
pins = "gpio2";
};
@@ -72,7 +72,7 @@
drive-strength = <12>;
bias-disable;
};
- pinconf_cs {
+ pinconf-cs {
pins = "gpio2";
drive-strength = <16>;
bias-disable;
@@ -80,7 +80,7 @@
};
};
- spi1_sleep: spi1_sleep {
+ spi1_sleep: spi1-sleep {
pinmux {
function = "gpio";
pins = "gpio0", "gpio1", "gpio2", "gpio3";
@@ -92,12 +92,12 @@
};
};
- spi2_default: spi2_default {
+ spi2_default: spi2-default {
pinmux {
function = "blsp_spi2";
pins = "gpio4", "gpio5", "gpio7";
};
- pinmux_cs {
+ pinmux-cs {
function = "gpio";
pins = "gpio6";
};
@@ -106,7 +106,7 @@
drive-strength = <12>;
bias-disable;
};
- pinconf_cs {
+ pinconf-cs {
pins = "gpio6";
drive-strength = <16>;
bias-disable;
@@ -114,7 +114,7 @@
};
};
- spi2_sleep: spi2_sleep {
+ spi2_sleep: spi2-sleep {
pinmux {
function = "gpio";
pins = "gpio4", "gpio5", "gpio6", "gpio7";
@@ -126,12 +126,12 @@
};
};
- spi3_default: spi3_default {
+ spi3_default: spi3-default {
pinmux {
function = "blsp_spi3";
pins = "gpio8", "gpio9", "gpio11";
};
- pinmux_cs {
+ pinmux-cs {
function = "gpio";
pins = "gpio10";
};
@@ -140,7 +140,7 @@
drive-strength = <12>;
bias-disable;
};
- pinconf_cs {
+ pinconf-cs {
pins = "gpio10";
drive-strength = <16>;
bias-disable;
@@ -148,7 +148,7 @@
};
};
- spi3_sleep: spi3_sleep {
+ spi3_sleep: spi3-sleep {
pinmux {
function = "gpio";
pins = "gpio8", "gpio9", "gpio10", "gpio11";
@@ -160,12 +160,12 @@
};
};
- spi4_default: spi4_default {
+ spi4_default: spi4-default {
pinmux {
function = "blsp_spi4";
pins = "gpio12", "gpio13", "gpio15";
};
- pinmux_cs {
+ pinmux-cs {
function = "gpio";
pins = "gpio14";
};
@@ -174,7 +174,7 @@
drive-strength = <12>;
bias-disable;
};
- pinconf_cs {
+ pinconf-cs {
pins = "gpio14";
drive-strength = <16>;
bias-disable;
@@ -182,7 +182,7 @@
};
};
- spi4_sleep: spi4_sleep {
+ spi4_sleep: spi4-sleep {
pinmux {
function = "gpio";
pins = "gpio12", "gpio13", "gpio14", "gpio15";
@@ -194,12 +194,12 @@
};
};
- spi5_default: spi5_default {
+ spi5_default: spi5-default {
pinmux {
function = "blsp_spi5";
pins = "gpio16", "gpio17", "gpio19";
};
- pinmux_cs {
+ pinmux-cs {
function = "gpio";
pins = "gpio18";
};
@@ -208,7 +208,7 @@
drive-strength = <12>;
bias-disable;
};
- pinconf_cs {
+ pinconf-cs {
pins = "gpio18";
drive-strength = <16>;
bias-disable;
@@ -216,7 +216,7 @@
};
};
- spi5_sleep: spi5_sleep {
+ spi5_sleep: spi5-sleep {
pinmux {
function = "gpio";
pins = "gpio16", "gpio17", "gpio18", "gpio19";
@@ -228,12 +228,12 @@
};
};
- spi6_default: spi6_default {
+ spi6_default: spi6-default {
pinmux {
function = "blsp_spi6";
pins = "gpio20", "gpio21", "gpio23";
};
- pinmux_cs {
+ pinmux-cs {
function = "gpio";
pins = "gpio22";
};
@@ -242,7 +242,7 @@
drive-strength = <12>;
bias-disable;
};
- pinconf_cs {
+ pinconf-cs {
pins = "gpio22";
drive-strength = <16>;
bias-disable;
@@ -250,7 +250,7 @@
};
};
- spi6_sleep: spi6_sleep {
+ spi6_sleep: spi6-sleep {
pinmux {
function = "gpio";
pins = "gpio20", "gpio21", "gpio22", "gpio23";
@@ -262,7 +262,31 @@
};
};
- i2c2_default: i2c2_default {
+ i2c1_default: i2c1-default {
+ pinmux {
+ function = "blsp_i2c1";
+ pins = "gpio2", "gpio3";
+ };
+ pinconf {
+ pins = "gpio2", "gpio3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c1_sleep: i2c1-sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio2", "gpio3";
+ };
+ pinconf {
+ pins = "gpio2", "gpio3";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c2_default: i2c2-default {
pinmux {
function = "blsp_i2c2";
pins = "gpio6", "gpio7";
@@ -274,7 +298,7 @@
};
};
- i2c2_sleep: i2c2_sleep {
+ i2c2_sleep: i2c2-sleep {
pinmux {
function = "gpio";
pins = "gpio6", "gpio7";
@@ -286,7 +310,7 @@
};
};
- i2c4_default: i2c4_default {
+ i2c4_default: i2c4-default {
pinmux {
function = "blsp_i2c4";
pins = "gpio14", "gpio15";
@@ -298,7 +322,7 @@
};
};
- i2c4_sleep: i2c4_sleep {
+ i2c4_sleep: i2c4-sleep {
pinmux {
function = "gpio";
pins = "gpio14", "gpio15";
@@ -310,7 +334,31 @@
};
};
- i2c6_default: i2c6_default {
+ i2c5_default: i2c5-default {
+ pinmux {
+ function = "blsp_i2c5";
+ pins = "gpio18", "gpio19";
+ };
+ pinconf {
+ pins = "gpio18", "gpio19";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c5_sleep: i2c5-sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio18", "gpio19";
+ };
+ pinconf {
+ pins = "gpio18", "gpio19";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ i2c6_default: i2c6-default {
pinmux {
function = "blsp_i2c6";
pins = "gpio22", "gpio23";
@@ -322,7 +370,7 @@
};
};
- i2c6_sleep: i2c6_sleep {
+ i2c6_sleep: i2c6-sleep {
pinmux {
function = "gpio";
pins = "gpio22", "gpio23";
@@ -334,8 +382,8 @@
};
};
- pmx_sdc1_clk {
- sdc1_clk_on: clk_on {
+ pmx-sdc1-clk {
+ sdc1_clk_on: clk-on {
pinmux {
pins = "sdc1_clk";
};
@@ -345,7 +393,7 @@
drive-strength = <16>;
};
};
- sdc1_clk_off: clk_off {
+ sdc1_clk_off: clk-off {
pinmux {
pins = "sdc1_clk";
};
@@ -357,8 +405,8 @@
};
};
- pmx_sdc1_cmd {
- sdc1_cmd_on: cmd_on {
+ pmx-sdc1-cmd {
+ sdc1_cmd_on: cmd-on {
pinmux {
pins = "sdc1_cmd";
};
@@ -368,7 +416,7 @@
drive-strength = <10>;
};
};
- sdc1_cmd_off: cmd_off {
+ sdc1_cmd_off: cmd-off {
pinmux {
pins = "sdc1_cmd";
};
@@ -380,8 +428,8 @@
};
};
- pmx_sdc1_data {
- sdc1_data_on: data_on {
+ pmx-sdc1-data {
+ sdc1_data_on: data-on {
pinmux {
pins = "sdc1_data";
};
@@ -391,7 +439,7 @@
drive-strength = <10>;
};
};
- sdc1_data_off: data_off {
+ sdc1_data_off: data-off {
pinmux {
pins = "sdc1_data";
};
@@ -403,8 +451,8 @@
};
};
- pmx_sdc2_clk {
- sdc2_clk_on: clk_on {
+ pmx-sdc2-clk {
+ sdc2_clk_on: clk-on {
pinmux {
pins = "sdc2_clk";
};
@@ -414,7 +462,7 @@
drive-strength = <16>;
};
};
- sdc2_clk_off: clk_off {
+ sdc2_clk_off: clk-off {
pinmux {
pins = "sdc2_clk";
};
@@ -426,8 +474,8 @@
};
};
- pmx_sdc2_cmd {
- sdc2_cmd_on: cmd_on {
+ pmx-sdc2-cmd {
+ sdc2_cmd_on: cmd-on {
pinmux {
pins = "sdc2_cmd";
};
@@ -437,7 +485,7 @@
drive-strength = <10>;
};
};
- sdc2_cmd_off: cmd_off {
+ sdc2_cmd_off: cmd-off {
pinmux {
pins = "sdc2_cmd";
};
@@ -449,8 +497,8 @@
};
};
- pmx_sdc2_data {
- sdc2_data_on: data_on {
+ pmx-sdc2-data {
+ sdc2_data_on: data-on {
pinmux {
pins = "sdc2_data";
};
@@ -460,7 +508,7 @@
drive-strength = <10>;
};
};
- sdc2_data_off: data_off {
+ sdc2_data_off: data-off {
pinmux {
pins = "sdc2_data";
};
@@ -472,8 +520,8 @@
};
};
- pmx_sdc2_cd_pin {
- sdc2_cd_on: cd_on {
+ pmx-sdc2-cd-pin {
+ sdc2_cd_on: cd-on {
pinmux {
function = "gpio";
pins = "gpio38";
@@ -484,7 +532,7 @@
bias-pull-up;
};
};
- sdc2_cd_off: cd_off {
+ sdc2_cd_off: cd-off {
pinmux {
function = "gpio";
pins = "gpio38";
@@ -498,7 +546,7 @@
};
cdc-pdm-lines {
- cdc_pdm_lines_act: pdm_lines_on {
+ cdc_pdm_lines_act: pdm-lines-on {
pinmux {
function = "cdc_pdm0";
pins = "gpio63", "gpio64", "gpio65", "gpio66",
@@ -511,7 +559,7 @@
bias-pull-none;
};
};
- cdc_pdm_lines_sus: pdm_lines_off {
+ cdc_pdm_lines_sus: pdm-lines-off {
pinmux {
function = "cdc_pdm0";
pins = "gpio63", "gpio64", "gpio65", "gpio66",
@@ -527,7 +575,7 @@
};
ext-pri-tlmm-lines {
- ext_pri_tlmm_lines_act: ext_pa_on {
+ ext_pri_tlmm_lines_act: ext-pa-on {
pinmux {
function = "pri_mi2s";
pins = "gpio113", "gpio114", "gpio115",
@@ -541,7 +589,7 @@
};
};
- ext_pri_tlmm_lines_sus: ext_pa_off {
+ ext_pri_tlmm_lines_sus: ext-pa-off {
pinmux {
function = "pri_mi2s";
pins = "gpio113", "gpio114", "gpio115",
@@ -557,7 +605,7 @@
};
ext-pri-ws-line {
- ext_pri_ws_act: ext_pa_on {
+ ext_pri_ws_act: ext-pa-on {
pinmux {
function = "pri_mi2s_ws";
pins = "gpio110";
@@ -569,7 +617,7 @@
};
};
- ext_pri_ws_sus: ext_pa_off {
+ ext_pri_ws_sus: ext-pa-off {
pinmux {
function = "pri_mi2s_ws";
pins = "gpio110";
@@ -583,7 +631,7 @@
};
ext-mclk-tlmm-lines {
- ext_mclk_tlmm_lines_act: mclk_lines_on {
+ ext_mclk_tlmm_lines_act: mclk-lines-on {
pinmux {
function = "pri_mi2s";
pins = "gpio116";
@@ -594,7 +642,7 @@
bias-pull-none;
};
};
- ext_mclk_tlmm_lines_sus: mclk_lines_off {
+ ext_mclk_tlmm_lines_sus: mclk-lines-off {
pinmux {
function = "pri_mi2s";
pins = "gpio116";
@@ -609,7 +657,7 @@
/* secondary Mi2S */
ext-sec-tlmm-lines {
- ext_sec_tlmm_lines_act: tlmm_lines_on {
+ ext_sec_tlmm_lines_act: tlmm-lines-on {
pinmux {
function = "sec_mi2s";
pins = "gpio112", "gpio117", "gpio118",
@@ -622,7 +670,7 @@
bias-pull-none;
};
};
- ext_sec_tlmm_lines_sus: tlmm_lines_off {
+ ext_sec_tlmm_lines_sus: tlmm-lines-off {
pinmux {
function = "sec_mi2s";
pins = "gpio112", "gpio117", "gpio118",
@@ -638,12 +686,12 @@
};
cdc-dmic-lines {
- cdc_dmic_lines_act: dmic_lines_on {
- pinmux_dmic0_clk {
+ cdc_dmic_lines_act: dmic-lines-on {
+ pinmux-dmic0-clk {
function = "dmic0_clk";
pins = "gpio0";
};
- pinmux_dmic0_data {
+ pinmux-dmic0-data {
function = "dmic0_data";
pins = "gpio1";
};
@@ -652,12 +700,12 @@
drive-strength = <8>;
};
};
- cdc_dmic_lines_sus: dmic_lines_off {
- pinmux_dmic0_clk {
+ cdc_dmic_lines_sus: dmic-lines-off {
+ pinmux-dmic0-clk {
function = "dmic0_clk";
pins = "gpio0";
};
- pinmux_dmic0_data {
+ pinmux-dmic0-data {
function = "dmic0_data";
pins = "gpio1";
};
@@ -674,7 +722,6 @@
pins = "gpio40", "gpio41", "gpio42", "gpio43", "gpio44";
function = "wcss_wlan";
};
-
pinconf {
pins = "gpio40", "gpio41", "gpio42", "gpio43", "gpio44";
drive-strength = <6>;
@@ -682,7 +729,7 @@
};
};
- cci0_default: cci0_default {
+ cci0_default: cci0-default {
pinmux {
function = "cci_i2c";
pins = "gpio29", "gpio30";
@@ -694,64 +741,64 @@
};
};
- camera_front_default: camera_front_default {
- pinmux_pwdn {
+ camera_front_default: camera-front-default {
+ pinmux-pwdn {
function = "gpio";
pins = "gpio33";
};
- pinconf_pwdn {
+ pinconf-pwdn {
pins = "gpio33";
drive-strength = <16>;
bias-disable;
};
- pinmux_rst {
+ pinmux-rst {
function = "gpio";
pins = "gpio28";
};
- pinconf_rst {
+ pinconf-rst {
pins = "gpio28";
drive-strength = <16>;
bias-disable;
};
- pinmux_mclk1 {
+ pinmux-mclk1 {
function = "cam_mclk1";
pins = "gpio27";
};
- pinconf_mclk1 {
+ pinconf-mclk1 {
pins = "gpio27";
drive-strength = <16>;
bias-disable;
};
};
- camera_rear_default: camera_rear_default {
- pinmux_pwdn {
+ camera_rear_default: camera-rear-default {
+ pinmux-pwdn {
function = "gpio";
pins = "gpio34";
};
- pinconf_pwdn {
+ pinconf-pwdn {
pins = "gpio34";
drive-strength = <16>;
bias-disable;
};
- pinmux_rst {
+ pinmux-rst {
function = "gpio";
pins = "gpio35";
};
- pinconf_rst {
+ pinconf-rst {
pins = "gpio35";
drive-strength = <16>;
bias-disable;
};
- pinmux_mclk0 {
+ pinmux-mclk0 {
function = "cam_mclk0";
pins = "gpio26";
};
- pinconf_mclk0 {
+ pinconf-mclk0 {
pins = "gpio26";
drive-strength = <16>;
bias-disable;
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
index 43c5e0f882f1..ea52adf07a4b 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a2015-common.dtsi
@@ -72,32 +72,27 @@
};
};
+ mdss@1a00000 {
+ dsi@1a98000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdda-supply = <&pm8916_l2>;
+ vddio-supply = <&pm8916_l6>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mdss_default>;
+ pinctrl-1 = <&mdss_sleep>;
+ };
+
+ dsi-phy@1a98300 {
+ vddio-supply = <&pm8916_l6>;
+ };
+ };
+
wcnss@a21b000 {
status = "okay";
};
-
- /*
- * Attempting to enable these devices causes a "synchronous
- * external abort". Suspected cause is that the debug power
- * domain is not enabled by default on this device.
- * Disable these devices for now to avoid the crash.
- *
- * See: https://lore.kernel.org/linux-arm-msm/20190618202623.GA53651@gerhold.net/
- */
- tpiu@820000 { status = "disabled"; };
- funnel@821000 { status = "disabled"; };
- replicator@824000 { status = "disabled"; };
- etf@825000 { status = "disabled"; };
- etr@826000 { status = "disabled"; };
- funnel@841000 { status = "disabled"; };
- debug@850000 { status = "disabled"; };
- debug@852000 { status = "disabled"; };
- debug@854000 { status = "disabled"; };
- debug@856000 { status = "disabled"; };
- etm@85c000 { status = "disabled"; };
- etm@85d000 { status = "disabled"; };
- etm@85e000 { status = "disabled"; };
- etm@85f000 { status = "disabled"; };
};
gpio-keys {
@@ -138,6 +133,19 @@
};
};
+ reg_vdd_tsp: regulator-vdd-tsp {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_tsp";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&msmgpio 73 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&tsp_en_default>;
+ };
+
i2c-muic {
compatible = "i2c-gpio";
sda-gpios = <&msmgpio 105 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
@@ -160,7 +168,7 @@
};
&msmgpio {
- gpio_keys_default: gpio_keys_default {
+ gpio_keys_default: gpio-keys-default {
pinmux {
function = "gpio";
pins = "gpio107", "gpio109";
@@ -172,7 +180,7 @@
};
};
- gpio_hall_sensor_default: gpio_hall_sensor_default {
+ gpio_hall_sensor_default: gpio-hall-sensor-default {
pinmux {
function = "gpio";
pins = "gpio52";
@@ -184,7 +192,7 @@
};
};
- muic_int_default: muic_int_default {
+ muic_int_default: muic-int-default {
pinmux {
function = "gpio";
pins = "gpio12";
@@ -195,6 +203,44 @@
bias-disable;
};
};
+
+ tsp_en_default: tsp-en-default {
+ pinmux {
+ function = "gpio";
+ pins = "gpio73";
+ };
+ pinconf {
+ pins = "gpio73";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ pmx-mdss {
+ mdss_default: mdss-default {
+ pinmux {
+ function = "gpio";
+ pins = "gpio25";
+ };
+ pinconf {
+ pins = "gpio25";
+ drive-strength = <8>;
+ bias-disable;
+ };
+ };
+
+ mdss_sleep: mdss-sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio25";
+ };
+ pinconf {
+ pins = "gpio25";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
};
&smd_rpm_regulators {
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
index d10f7ac5089f..b46c87289033 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a3u-eur.dts
@@ -7,4 +7,58 @@
/ {
model = "Samsung Galaxy A3U (EUR)";
compatible = "samsung,a3u-eur", "qcom,msm8916";
+
+ reg_panel_vdd3: regulator-panel-vdd3 {
+ compatible = "regulator-fixed";
+ regulator-name = "panel_vdd3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ gpio = <&msmgpio 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_vdd3_default>;
+ };
+};
+
+&dsi0 {
+ panel@0 {
+ reg = <0>;
+
+ compatible = "samsung,s6e88a0-ams452ef01";
+
+ vdd3-supply = <&reg_panel_vdd3>;
+ vci-supply = <&pm8916_l17>;
+ reset-gpios = <&msmgpio 25 GPIO_ACTIVE_HIGH>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+
+ ports {
+ port@1 {
+ dsi0_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ data-lanes = <0 1>;
+ };
+ };
+ };
+};
+
+&msmgpio {
+ panel_vdd3_default: panel-vdd3-default {
+ pinmux {
+ function = "gpio";
+ pins = "gpio9";
+ };
+ pinconf {
+ pins = "gpio9";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
index 6629a621139c..a555db8f6b34 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-samsung-a5u-eur.dts
@@ -9,8 +9,43 @@
compatible = "samsung,a5u-eur", "qcom,msm8916";
};
+&blsp_i2c5 {
+ status = "okay";
+
+ touchscreen@48 {
+ compatible = "melfas,mms345l";
+
+ reg = <0x48>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+
+ touchscreen-size-x = <720>;
+ touchscreen-size-y = <1280>;
+
+ avdd-supply = <&reg_vdd_tsp>;
+ vdd-supply = <&pm8916_l6>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_int_default>;
+ };
+};
+
&pronto {
iris {
compatible = "qcom,wcn3680";
};
};
+
+&msmgpio {
+ ts_int_default: ts-int-default {
+ pinmux {
+ function = "gpio";
+ pins = "gpio13";
+ };
+ pinconf {
+ pins = "gpio13";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index a88a15f2352b..32bd140ac9fd 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -3,6 +3,7 @@
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/arm/coresight-cti-dt.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8916.h>
#include <dt-bindings/reset/qcom,gcc-msm8916.h>
@@ -165,6 +166,9 @@
min-residency-us = <2000>;
local-timer-stop;
};
+ };
+
+ domain-idle-states {
CLUSTER_RET: cluster-retention {
compatible = "domain-idle-state";
@@ -188,31 +192,31 @@
compatible = "arm,psci-1.0";
method = "smc";
- CPU_PD0: cpu-pd0 {
+ CPU_PD0: power-domain-cpu0 {
#power-domain-cells = <0>;
power-domains = <&CLUSTER_PD>;
domain-idle-states = <&CPU_SLEEP_0>;
};
- CPU_PD1: cpu-pd1 {
+ CPU_PD1: power-domain-cpu1 {
#power-domain-cells = <0>;
power-domains = <&CLUSTER_PD>;
domain-idle-states = <&CPU_SLEEP_0>;
};
- CPU_PD2: cpu-pd2 {
+ CPU_PD2: power-domain-cpu2 {
#power-domain-cells = <0>;
power-domains = <&CLUSTER_PD>;
domain-idle-states = <&CPU_SLEEP_0>;
};
- CPU_PD3: cpu-pd3 {
+ CPU_PD3: power-domain-cpu3 {
#power-domain-cells = <0>;
power-domains = <&CLUSTER_PD>;
domain-idle-states = <&CPU_SLEEP_0>;
};
- CLUSTER_PD: cluster-pd {
+ CLUSTER_PD: power-domain-cluster {
#power-domain-cells = <0>;
domain-idle-states = <&CLUSTER_RET>, <&CLUSTER_PWRDN>;
};
@@ -261,7 +265,7 @@
thermal-sensors = <&tsens 4>;
trips {
- cpu2_3_alert0: trip-point@0 {
+ cpu2_3_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -291,7 +295,7 @@
thermal-sensors = <&tsens 2>;
trips {
- gpu_alert0: trip-point@0 {
+ gpu_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -311,7 +315,7 @@
thermal-sensors = <&tsens 1>;
trips {
- cam_alert0: trip-point@0 {
+ cam_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "hot";
@@ -326,7 +330,7 @@
thermal-sensors = <&tsens 0>;
trips {
- modem_alert0: trip-point@0 {
+ modem_alert0: trip-point0 {
temperature = <85000>;
hysteresis = <2000>;
type = "hot";
@@ -336,7 +340,7 @@
};
- cpu_opp_table: cpu_opp_table {
+ cpu_opp_table: cpu-opp-table {
compatible = "operating-points-v2";
opp-shared;
@@ -354,17 +358,6 @@
};
};
- gpu_opp_table: opp_table {
- compatible = "operating-points-v2";
-
- opp-400000000 {
- opp-hz = /bits/ 64 <400000000>;
- };
- opp-19200000 {
- opp-hz = /bits/ 64 <19200000>;
- };
- };
-
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
@@ -374,13 +367,13 @@
};
clocks {
- xo_board: xo_board {
+ xo_board: xo-board {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <19200000>;
};
- sleep_clk: sleep_clk {
+ sleep_clk: sleep-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
@@ -608,6 +601,21 @@
status = "disabled";
};
+ blsp_i2c1: i2c@78b5000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x078b5000 0x500>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c1_default>;
+ pinctrl-1 = <&i2c1_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
blsp_i2c2: i2c@78b6000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x078b6000 0x500>;
@@ -638,6 +646,21 @@
status = "disabled";
};
+ blsp_i2c5: i2c@78b9000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x078b9000 0x500>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP5_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c5_default>;
+ pinctrl-1 = <&i2c5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
blsp_i2c6: i2c@78ba000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x078ba000 0x500>;
@@ -955,6 +978,17 @@
power-domains = <&gcc OXILI_GDSC>;
operating-points-v2 = <&gpu_opp_table>;
iommus = <&gpu_iommu 1>, <&gpu_iommu 2>;
+
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ };
+ opp-19200000 {
+ opp-hz = /bits/ 64 <19200000>;
+ };
+ };
};
mdss: mdss@1a00000 {
@@ -1224,6 +1258,8 @@
clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "atclk";
+ status = "disabled";
+
in-ports {
port {
tpiu_in: endpoint {
@@ -1240,6 +1276,8 @@
clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "atclk";
+ status = "disabled";
+
in-ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -1279,6 +1317,8 @@
clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "atclk";
+ status = "disabled";
+
out-ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -1313,6 +1353,8 @@
clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "atclk";
+ status = "disabled";
+
in-ports {
port {
etf_in: endpoint {
@@ -1337,6 +1379,8 @@
clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "atclk";
+ status = "disabled";
+
in-ports {
port {
etr_in: endpoint {
@@ -1353,6 +1397,8 @@
clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "atclk";
+ status = "disabled";
+
in-ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -1398,6 +1444,7 @@
clocks = <&rpmcc RPM_QDSS_CLK>;
clock-names = "apb_pclk";
cpu = <&CPU0>;
+ status = "disabled";
};
debug@852000 {
@@ -1406,6 +1453,7 @@
clocks = <&rpmcc RPM_QDSS_CLK>;
clock-names = "apb_pclk";
cpu = <&CPU1>;
+ status = "disabled";
};
debug@854000 {
@@ -1414,6 +1462,7 @@
clocks = <&rpmcc RPM_QDSS_CLK>;
clock-names = "apb_pclk";
cpu = <&CPU2>;
+ status = "disabled";
};
debug@856000 {
@@ -1422,9 +1471,10 @@
clocks = <&rpmcc RPM_QDSS_CLK>;
clock-names = "apb_pclk";
cpu = <&CPU3>;
+ status = "disabled";
};
- etm@85c000 {
+ etm0: etm@85c000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0x85c000 0x1000>;
@@ -1434,6 +1484,8 @@
cpu = <&CPU0>;
+ status = "disabled";
+
out-ports {
port {
etm0_out: endpoint {
@@ -1443,7 +1495,7 @@
};
};
- etm@85d000 {
+ etm1: etm@85d000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0x85d000 0x1000>;
@@ -1453,6 +1505,8 @@
cpu = <&CPU1>;
+ status = "disabled";
+
out-ports {
port {
etm1_out: endpoint {
@@ -1462,7 +1516,7 @@
};
};
- etm@85e000 {
+ etm2: etm@85e000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0x85e000 0x1000>;
@@ -1472,6 +1526,8 @@
cpu = <&CPU2>;
+ status = "disabled";
+
out-ports {
port {
etm2_out: endpoint {
@@ -1481,7 +1537,7 @@
};
};
- etm@85f000 {
+ etm3: etm@85f000 {
compatible = "arm,coresight-etm4x", "arm,primecell";
reg = <0x85f000 0x1000>;
@@ -1491,6 +1547,8 @@
cpu = <&CPU3>;
+ status = "disabled";
+
out-ports {
port {
etm3_out: endpoint {
@@ -1500,6 +1558,93 @@
};
};
+ /* System CTIs */
+ /* CTI 0 - TMC connections */
+ cti@810000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x810000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ status = "disabled";
+ };
+
+ /* CTI 1 - TPIU connections */
+ cti@811000 {
+ compatible = "arm,coresight-cti", "arm,primecell";
+ reg = <0x811000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ status = "disabled";
+ };
+
+ /* CTIs 2-11 - no information - not instantiated */
+
+ /* Core CTIs; CTIs 12-15 */
+ /* CTI - CPU-0 */
+ cti@858000 {
+ compatible = "arm,coresight-cti-v8-arch", "arm,coresight-cti",
+ "arm,primecell";
+ reg = <0x858000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU0>;
+ arm,cs-dev-assoc = <&etm0>;
+
+ status = "disabled";
+ };
+
+ /* CTI - CPU-1 */
+ cti@859000 {
+ compatible = "arm,coresight-cti-v8-arch", "arm,coresight-cti",
+ "arm,primecell";
+ reg = <0x859000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU1>;
+ arm,cs-dev-assoc = <&etm1>;
+
+ status = "disabled";
+ };
+
+ /* CTI - CPU-2 */
+ cti@85a000 {
+ compatible = "arm,coresight-cti-v8-arch", "arm,coresight-cti",
+ "arm,primecell";
+ reg = <0x85a000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU2>;
+ arm,cs-dev-assoc = <&etm2>;
+
+ status = "disabled";
+ };
+
+ /* CTI - CPU-3 */
+ cti@85b000 {
+ compatible = "arm,coresight-cti-v8-arch", "arm,coresight-cti",
+ "arm,primecell";
+ reg = <0x85b000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU3>;
+ arm,cs-dev-assoc = <&etm3>;
+
+ status = "disabled";
+ };
+
+
venus: video-codec@1d00000 {
compatible = "qcom,msm8916-venus";
reg = <0x01d00000 0xff000>;
@@ -1601,6 +1746,33 @@
#size-cells = <0>;
};
};
+
+ cci: cci@1b0c000 {
+ compatible = "qcom,msm8916-cci";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1b0c000 0x1000>;
+ interrupts = <GIC_SPI 50 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&gcc GCC_CAMSS_TOP_AHB_CLK>,
+ <&gcc GCC_CAMSS_CCI_AHB_CLK>,
+ <&gcc GCC_CAMSS_CCI_CLK>,
+ <&gcc GCC_CAMSS_AHB_CLK>;
+ clock-names = "camss_top_ahb", "cci_ahb",
+ "cci", "camss_ahb";
+ assigned-clocks = <&gcc GCC_CAMSS_CCI_AHB_CLK>,
+ <&gcc GCC_CAMSS_CCI_CLK>;
+ assigned-clock-rates = <80000000>, <19200000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cci0_default>;
+ status = "disabled";
+
+ cci_i2c0: i2c-bus@0 {
+ reg = <0>;
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
};
smd {
@@ -1611,7 +1783,7 @@
qcom,ipc = <&apcs 8 0>;
qcom,smd-edge = <15>;
- rpm_requests {
+ rpm-requests {
compatible = "qcom,rpm-msm8916";
qcom,smd-channels = "rpm_requests";
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 98634d5c4440..9951286db775 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -639,7 +639,7 @@
"mem",
"mem_iface";
- power-domains = <&mmcc GPU_GDSC>;
+ power-domains = <&mmcc GPU_GX_GDSC>;
iommus = <&adreno_smmu 0>;
nvmem-cells = <&gpu_speed_bin>;
@@ -989,16 +989,16 @@
"csi_clk_mux",
"vfe0",
"vfe1";
- interrupts = <GIC_SPI 78 0>,
- <GIC_SPI 79 0>,
- <GIC_SPI 80 0>,
- <GIC_SPI 296 0>,
- <GIC_SPI 297 0>,
- <GIC_SPI 298 0>,
- <GIC_SPI 299 0>,
- <GIC_SPI 309 0>,
- <GIC_SPI 314 0>,
- <GIC_SPI 315 0>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 80 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 296 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 297 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 298 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 299 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 309 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 314 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 315 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "csiphy0",
"csiphy1",
"csiphy2",
@@ -1093,6 +1093,43 @@
};
};
+ cci: cci@a0c000 {
+ compatible = "qcom,msm8996-cci";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xa0c000 0x1000>;
+ interrupts = <GIC_SPI 295 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&mmcc CAMSS_GDSC>;
+ clocks = <&mmcc CAMSS_TOP_AHB_CLK>,
+ <&mmcc CAMSS_CCI_AHB_CLK>,
+ <&mmcc CAMSS_CCI_CLK>,
+ <&mmcc CAMSS_AHB_CLK>;
+ clock-names = "camss_top_ahb",
+ "cci_ahb",
+ "cci",
+ "camss_ahb";
+ assigned-clocks = <&mmcc CAMSS_CCI_AHB_CLK>,
+ <&mmcc CAMSS_CCI_CLK>;
+ assigned-clock-rates = <80000000>, <37500000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cci0_default &cci1_default>;
+ status = "disabled";
+
+ cci_i2c0: i2c-bus@0 {
+ reg = <0>;
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cci_i2c1: i2c-bus@1 {
+ reg = <1>;
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
adreno_smmu: iommu@b40000 {
compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
reg = <0x00b40000 0x10000>;
@@ -2180,7 +2217,7 @@
thermal-sensors = <&tsens0 3>;
trips {
- cpu0_alert0: trip-point@0 {
+ cpu0_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -2201,7 +2238,7 @@
thermal-sensors = <&tsens0 5>;
trips {
- cpu1_alert0: trip-point@0 {
+ cpu1_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -2222,7 +2259,7 @@
thermal-sensors = <&tsens0 8>;
trips {
- cpu2_alert0: trip-point@0 {
+ cpu2_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -2243,7 +2280,7 @@
thermal-sensors = <&tsens0 10>;
trips {
- cpu3_alert0: trip-point@0 {
+ cpu3_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -2264,7 +2301,7 @@
thermal-sensors = <&tsens1 6>;
trips {
- gpu1_alert0: trip-point@0 {
+ gpu1_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -2279,7 +2316,7 @@
thermal-sensors = <&tsens1 7>;
trips {
- gpu2_alert0: trip-point@0 {
+ gpu2_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -2294,7 +2331,7 @@
thermal-sensors = <&tsens0 1>;
trips {
- m4m_alert0: trip-point@0 {
+ m4m_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -2309,7 +2346,7 @@
thermal-sensors = <&tsens0 2>;
trips {
- l3_or_venus_alert0: trip-point@0 {
+ l3_or_venus_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -2324,7 +2361,7 @@
thermal-sensors = <&tsens0 7>;
trips {
- cluster0_l2_alert0: trip-point@0 {
+ cluster0_l2_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -2339,7 +2376,7 @@
thermal-sensors = <&tsens0 12>;
trips {
- cluster1_l2_alert0: trip-point@0 {
+ cluster1_l2_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -2354,7 +2391,7 @@
thermal-sensors = <&tsens1 1>;
trips {
- camera_alert0: trip-point@0 {
+ camera_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -2369,7 +2406,7 @@
thermal-sensors = <&tsens1 2>;
trips {
- q6_dsp_alert0: trip-point@0 {
+ q6_dsp_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -2384,7 +2421,7 @@
thermal-sensors = <&tsens1 3>;
trips {
- mem_alert0: trip-point@0 {
+ mem_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -2399,7 +2436,7 @@
thermal-sensors = <&tsens1 4>;
trips {
- modemtx_alert0: trip-point@0 {
+ modemtx_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index c07fee6fd7eb..c45870600909 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -500,7 +500,7 @@
thermal-sensors = <&tsens0 1>;
trips {
- cpu0_alert0: trip-point@0 {
+ cpu0_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -521,7 +521,7 @@
thermal-sensors = <&tsens0 2>;
trips {
- cpu1_alert0: trip-point@0 {
+ cpu1_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -542,7 +542,7 @@
thermal-sensors = <&tsens0 3>;
trips {
- cpu2_alert0: trip-point@0 {
+ cpu2_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -563,7 +563,7 @@
thermal-sensors = <&tsens0 4>;
trips {
- cpu3_alert0: trip-point@0 {
+ cpu3_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -584,7 +584,7 @@
thermal-sensors = <&tsens0 7>;
trips {
- cpu4_alert0: trip-point@0 {
+ cpu4_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -605,7 +605,7 @@
thermal-sensors = <&tsens0 8>;
trips {
- cpu5_alert0: trip-point@0 {
+ cpu5_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -626,7 +626,7 @@
thermal-sensors = <&tsens0 9>;
trips {
- cpu6_alert0: trip-point@0 {
+ cpu6_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -647,7 +647,7 @@
thermal-sensors = <&tsens0 10>;
trips {
- cpu7_alert0: trip-point@0 {
+ cpu7_alert0: trip-point0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
@@ -668,7 +668,7 @@
thermal-sensors = <&tsens0 12>;
trips {
- gpu1_alert0: trip-point@0 {
+ gpu1_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -683,7 +683,7 @@
thermal-sensors = <&tsens0 13>;
trips {
- gpu2_alert0: trip-point@0 {
+ gpu2_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -698,7 +698,7 @@
thermal-sensors = <&tsens0 5>;
trips {
- cluster0_mhm_alert0: trip-point@0 {
+ cluster0_mhm_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -713,7 +713,7 @@
thermal-sensors = <&tsens0 6>;
trips {
- cluster1_mhm_alert0: trip-point@0 {
+ cluster1_mhm_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -728,7 +728,7 @@
thermal-sensors = <&tsens0 11>;
trips {
- cluster1_l2_alert0: trip-point@0 {
+ cluster1_l2_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -743,7 +743,7 @@
thermal-sensors = <&tsens1 1>;
trips {
- modem_alert0: trip-point@0 {
+ modem_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -758,7 +758,7 @@
thermal-sensors = <&tsens1 2>;
trips {
- mem_alert0: trip-point@0 {
+ mem_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -773,7 +773,7 @@
thermal-sensors = <&tsens1 3>;
trips {
- wlan_alert0: trip-point@0 {
+ wlan_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -788,7 +788,7 @@
thermal-sensors = <&tsens1 4>;
trips {
- q6_dsp_alert0: trip-point@0 {
+ q6_dsp_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -803,7 +803,7 @@
thermal-sensors = <&tsens1 5>;
trips {
- camera_alert0: trip-point@0 {
+ camera_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
@@ -818,7 +818,7 @@
thermal-sensors = <&tsens1 6>;
trips {
- multimedia_alert0: trip-point@0 {
+ multimedia_alert0: trip-point0 {
temperature = <90000>;
hysteresis = <2000>;
type = "hot";
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index b6e304748a57..c0b197458665 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -73,18 +73,8 @@
reg = <0xc000>;
gpio-controller;
#gpio-cells = <2>;
- interrupts = <0x0 0xc0 0x0 IRQ_TYPE_NONE>,
- <0x0 0xc1 0x0 IRQ_TYPE_NONE>,
- <0x0 0xc2 0x0 IRQ_TYPE_NONE>,
- <0x0 0xc3 0x0 IRQ_TYPE_NONE>,
- <0x0 0xc4 0x0 IRQ_TYPE_NONE>,
- <0x0 0xc5 0x0 IRQ_TYPE_NONE>,
- <0x0 0xc6 0x0 IRQ_TYPE_NONE>,
- <0x0 0xc7 0x0 IRQ_TYPE_NONE>,
- <0x0 0xc8 0x0 IRQ_TYPE_NONE>,
- <0x0 0xc9 0x0 IRQ_TYPE_NONE>,
- <0x0 0xca 0x0 IRQ_TYPE_NONE>,
- <0x0 0xcb 0x0 IRQ_TYPE_NONE>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
index 322379d5c31f..40b5d75a4a1d 100644
--- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
@@ -62,18 +62,8 @@
reg = <0xc000>;
gpio-controller;
#gpio-cells = <2>;
- interrupts = <0x2 0xc0 0x0 IRQ_TYPE_NONE>,
- <0x2 0xc1 0x0 IRQ_TYPE_NONE>,
- <0x2 0xc2 0x0 IRQ_TYPE_NONE>,
- <0x2 0xc3 0x0 IRQ_TYPE_NONE>,
- <0x2 0xc4 0x0 IRQ_TYPE_NONE>,
- <0x2 0xc5 0x0 IRQ_TYPE_NONE>,
- <0x2 0xc6 0x0 IRQ_TYPE_NONE>,
- <0x2 0xc7 0x0 IRQ_TYPE_NONE>,
- <0x2 0xc8 0x0 IRQ_TYPE_NONE>,
- <0x2 0xc9 0x0 IRQ_TYPE_NONE>,
- <0x2 0xca 0x0 IRQ_TYPE_NONE>,
- <0x2 0xcb 0x0 IRQ_TYPE_NONE>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
index eb0e9a090e42..cf05e0685d10 100644
--- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
@@ -56,18 +56,8 @@
reg = <0xc000>;
gpio-controller;
#gpio-cells = <2>;
- interrupts = <0x4 0xc0 0x0 IRQ_TYPE_NONE>,
- <0x4 0xc1 0x0 IRQ_TYPE_NONE>,
- <0x4 0xc2 0x0 IRQ_TYPE_NONE>,
- <0x4 0xc3 0x0 IRQ_TYPE_NONE>,
- <0x4 0xc4 0x0 IRQ_TYPE_NONE>,
- <0x4 0xc5 0x0 IRQ_TYPE_NONE>,
- <0x4 0xc6 0x0 IRQ_TYPE_NONE>,
- <0x4 0xc7 0x0 IRQ_TYPE_NONE>,
- <0x4 0xc8 0x0 IRQ_TYPE_NONE>,
- <0x4 0xc9 0x0 IRQ_TYPE_NONE>,
- <0x4 0xca 0x0 IRQ_TYPE_NONE>,
- <0x4 0xcb 0x0 IRQ_TYPE_NONE>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
index 21e05215abe4..e5ed28ab9b2d 100644
--- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
@@ -26,5 +26,11 @@
reg = <0x3 SPMI_USID>;
#address-cells = <1>;
#size-cells = <0>;
+
+ pmi8994_spmi_regulators: regulators {
+ compatible = "qcom,pmi8994-regulators";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
index afe69e8f3114..6422cf9d5855 100644
--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
@@ -4,6 +4,8 @@
#include <dt-bindings/gpio/gpio.h>
#include "qcs404.dtsi"
#include "pms405.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
/ {
aliases {
@@ -31,6 +33,21 @@
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
+
+ usb3_vbus_reg: regulator-usb3-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "VBUS_BOOST_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pms405_gpios 3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_vbus_boost_pin>;
+ vin-supply = <&vph_pwr>;
+ enable-active-high;
+
+ /* TODO: Drop this when introducing role switching */
+ regulator-always-on;
+ };
};
&blsp1_uart3 {
@@ -186,7 +203,7 @@
};
vreg_l12_3p3: l12 {
- regulator-min-microvolt = <2968000>;
+ regulator-min-microvolt = <3050000>;
regulator-max-microvolt = <3300000>;
};
@@ -270,6 +287,72 @@
bias-pull-down;
};
};
+
+ usb3_id_pin: usb3-id-pin {
+ pinmux {
+ pins = "gpio116";
+ function = "gpio";
+ };
+
+ pinconf {
+ pins = "gpio116";
+ drive-strength = <2>;
+ bias-pull-up;
+ input-enable;
+ };
+ };
+};
+
+&pms405_gpios {
+ usb_vbus_boost_pin: usb-vbus-boost-pin {
+ pinconf {
+ pins = "gpio3";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ power-source = <1>;
+ };
+ };
+ usb3_vbus_pin: usb3-vbus-pin {
+ pinconf {
+ pins = "gpio12";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ input-enable;
+ bias-pull-down;
+ power-source = <1>;
+ };
+ };
+};
+
+&usb2 {
+ status = "okay";
+};
+
+&usb2_phy_sec {
+ vdd-supply = <&vreg_l4_1p2>;
+ vdda1p8-supply = <&vreg_l5_1p8>;
+ vdda3p3-supply = <&vreg_l12_3p3>;
+ status = "okay";
+};
+
+&usb3 {
+ status = "okay";
+
+ dwc3@7580000 {
+ dr_mode = "host";
+ };
+};
+
+&usb2_phy_prim {
+ vdd-supply = <&vreg_l4_1p2>;
+ vdda1p8-supply = <&vreg_l5_1p8>;
+ vdda3p3-supply = <&vreg_l12_3p3>;
+ status = "okay";
+};
+
+&usb3_phy {
+ vdd-supply = <&vreg_l3_1p05>;
+ vdda1p8-supply = <&vreg_l5_1p8>;
+ status = "okay";
};
&wifi {
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index f149a538c1cc..c685a1664810 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -323,6 +323,48 @@
reg = <0x00060000 0x6000>;
};
+ usb3_phy: phy@78000 {
+ compatible = "qcom,usb-ss-28nm-phy";
+ reg = <0x00078000 0x400>;
+ #phy-cells = <0>;
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_USB_HS_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_USB3_PHY_PIPE_CLK>;
+ clock-names = "ref", "ahb", "pipe";
+ resets = <&gcc GCC_USB3_PHY_BCR>,
+ <&gcc GCC_USB3PHY_PHY_BCR>;
+ reset-names = "com", "phy";
+ status = "disabled";
+ };
+
+ usb2_phy_prim: phy@7a000 {
+ compatible = "qcom,usb-hs-28nm-femtophy";
+ reg = <0x0007a000 0x200>;
+ #phy-cells = <0>;
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_USB_HS_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_USB2A_PHY_SLEEP_CLK>;
+ clock-names = "ref", "ahb", "sleep";
+ resets = <&gcc GCC_USB_HS_PHY_CFG_AHB_BCR>,
+ <&gcc GCC_USB2A_PHY_BCR>;
+ reset-names = "phy", "por";
+ status = "disabled";
+ };
+
+ usb2_phy_sec: phy@7c000 {
+ compatible = "qcom,usb-hs-28nm-femtophy";
+ reg = <0x0007c000 0x200>;
+ #phy-cells = <0>;
+ clocks = <&rpmcc RPM_SMD_LN_BB_CLK>,
+ <&gcc GCC_USB_HS_PHY_CFG_AHB_CLK>,
+ <&gcc GCC_USB2A_PHY_SLEEP_CLK>;
+ clock-names = "ref", "ahb", "sleep";
+ resets = <&gcc GCC_QUSB2_PHY_BCR>,
+ <&gcc GCC_USB2_HS_PHY_ONLY_BCR>;
+ reset-names = "phy", "por";
+ status = "disabled";
+ };
+
qfprom: qfprom@a4000 {
compatible = "qcom,qfprom";
reg = <0x000a4000 0x1000>;
@@ -486,6 +528,64 @@
};
};
+ usb3: usb@7678800 {
+ compatible = "qcom,dwc3";
+ reg = <0x07678800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&gcc GCC_USB30_MASTER_CLK>,
+ <&gcc GCC_SYS_NOC_USB3_CLK>,
+ <&gcc GCC_USB30_SLEEP_CLK>,
+ <&gcc GCC_USB30_MOCK_UTMI_CLK>;
+ clock-names = "core", "iface", "sleep", "mock_utmi";
+ assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+ status = "disabled";
+
+ dwc3@7580000 {
+ compatible = "snps,dwc3";
+ reg = <0x07580000 0xcd00>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb2_phy_sec>, <&usb3_phy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,has-lpm-erratum;
+ snps,hird-threshold = /bits/ 8 <0x10>;
+ snps,usb3_lpm_capable;
+ dr_mode = "otg";
+ };
+ };
+
+ usb2: usb@79b8800 {
+ compatible = "qcom,dwc3";
+ reg = <0x079b8800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ clocks = <&gcc GCC_USB_HS_SYSTEM_CLK>,
+ <&gcc GCC_PCNOC_USB2_CLK>,
+ <&gcc GCC_USB_HS_INACTIVITY_TIMERS_CLK>,
+ <&gcc GCC_USB20_MOCK_UTMI_CLK>;
+ clock-names = "core", "iface", "sleep", "mock_utmi";
+ assigned-clocks = <&gcc GCC_USB20_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB_HS_SYSTEM_CLK>;
+ assigned-clock-rates = <19200000>, <133333333>;
+ status = "disabled";
+
+ dwc3@78c0000 {
+ compatible = "snps,dwc3";
+ reg = <0x078c0000 0xcc00>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb2_phy_prim>;
+ phy-names = "usb2-phy";
+ snps,has-lpm-erratum;
+ snps,hird-threshold = /bits/ 8 <0x10>;
+ snps,usb3_lpm_capable;
+ dr_mode = "peripheral";
+ };
+ };
+
tlmm: pinctrl@1000000 {
compatible = "qcom,qcs404-pinctrl";
reg = <0x01000000 0x200000>,
diff --git a/arch/arm64/boot/dts/qcom/sc7180-idp.dts b/arch/arm64/boot/dts/qcom/sc7180-idp.dts
index 043c9b9b5024..4e9149d82d09 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-idp.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-idp.dts
@@ -28,6 +28,59 @@
};
};
+/*
+ * Reserved memory changes
+ *
+ * Delete all unused memory nodes and define the peripheral memory regions
+ * required by the board dts.
+ *
+ */
+
+/delete-node/ &hyp_mem;
+/delete-node/ &xbl_mem;
+/delete-node/ &aop_mem;
+/delete-node/ &sec_apps_mem;
+/delete-node/ &tz_mem;
+
+/* Increase the size from 2MB to 8MB */
+&rmtfs_mem {
+ reg = <0x0 0x84400000 0x0 0x800000>;
+};
+
+/ {
+ reserved-memory {
+ atf_mem: memory@80b00000 {
+ reg = <0x0 0x80b00000 0x0 0x100000>;
+ no-map;
+ };
+
+ mpss_mem: memory@86000000 {
+ reg = <0x0 0x86000000 0x0 0x8c00000>;
+ no-map;
+ };
+
+ camera_mem: memory@8ec00000 {
+ reg = <0x0 0x8ec00000 0x0 0x500000>;
+ no-map;
+ };
+
+ venus_mem: memory@8f600000 {
+ reg = <0 0x8f600000 0 0x500000>;
+ no-map;
+ };
+
+ wlan_mem: memory@94100000 {
+ reg = <0x0 0x94100000 0x0 0x200000>;
+ no-map;
+ };
+
+ mba_mem: memory@94400000 {
+ reg = <0x0 0x94400000 0x0 0x200000>;
+ no-map;
+ };
+ };
+};
+
&apps_rsc {
pm6150-rpmh-regulators {
compatible = "qcom,pm6150-rpmh-regulators";
@@ -256,6 +309,13 @@
status = "okay";
};
+&remoteproc_mpss {
+ status = "okay";
+ compatible = "qcom,sc7180-mss-pil";
+ iommus = <&apps_smmu 0x460 0x1>, <&apps_smmu 0x444 0x3>;
+ memory-region = <&mba_mem &mpss_mem>;
+};
+
&sdhc_1 {
status = "okay";
@@ -310,9 +370,11 @@
vdda-pll-supply = <&vreg_l11a_1p8>;
vdda-phy-dpdm-supply = <&vreg_l17a_3p0>;
qcom,imp-res-offset-value = <8>;
- qcom,hstx-trim-value = <QUSB2_V2_HSTX_TRIM_21_6_MA>;
- qcom,preemphasis-level = <QUSB2_V2_PREEMPHASIS_5_PERCENT>;
+ qcom,preemphasis-level = <QUSB2_V2_PREEMPHASIS_15_PERCENT>;
qcom,preemphasis-width = <QUSB2_V2_PREEMPHASIS_WIDTH_HALF_BIT>;
+ qcom,bias-ctrl-value = <0x22>;
+ qcom,charge-ctrl-value = <3>;
+ qcom,hsdisc-trim-value = <0>;
};
&usb_1_qmpphy {
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 998f101ad623..31b9217bb5bf 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -10,6 +10,7 @@
#include <dt-bindings/clock/qcom,gpucc-sc7180.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,videocc-sc7180.h>
+#include <dt-bindings/interconnect/qcom,sc7180.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/phy/phy-qcom-qusb2.h>
#include <dt-bindings/power/qcom-aoss-qmp.h>
@@ -69,9 +70,30 @@
#size-cells = <2>;
ranges;
+ hyp_mem: memory@80000000 {
+ reg = <0x0 0x80000000 0x0 0x600000>;
+ no-map;
+ };
+
+ xbl_mem: memory@80600000 {
+ reg = <0x0 0x80600000 0x0 0x200000>;
+ no-map;
+ };
+
+ aop_mem: memory@80800000 {
+ reg = <0x0 0x80800000 0x0 0x20000>;
+ no-map;
+ };
+
aop_cmd_db_mem: memory@80820000 {
reg = <0x0 0x80820000 0x0 0x20000>;
compatible = "qcom,cmd-db";
+ no-map;
+ };
+
+ sec_apps_mem: memory@808ff000 {
+ reg = <0x0 0x808ff000 0x0 0x1000>;
+ no-map;
};
smem_mem: memory@80900000 {
@@ -79,9 +101,18 @@
no-map;
};
- venus_mem: memory@8f600000 {
- reg = <0 0x8f600000 0 0x500000>;
+ tz_mem: memory@80b00000 {
+ reg = <0x0 0x80b00000 0x0 0x3900000>;
+ no-map;
+ };
+
+ rmtfs_mem: memory@84400000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0x0 0x84400000 0x0 0x200000>;
no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <15>;
};
};
@@ -91,9 +122,12 @@
CPU0: cpu@0 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo468";
reg = <0x0 0x0>;
enable-method = "psci";
+ cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+ &LITTLE_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <100>;
next-level-cache = <&L2_0>;
@@ -110,9 +144,12 @@
CPU1: cpu@100 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo468";
reg = <0x0 0x100>;
enable-method = "psci";
+ cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+ &LITTLE_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <100>;
next-level-cache = <&L2_100>;
@@ -126,9 +163,12 @@
CPU2: cpu@200 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo468";
reg = <0x0 0x200>;
enable-method = "psci";
+ cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+ &LITTLE_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <100>;
next-level-cache = <&L2_200>;
@@ -142,9 +182,12 @@
CPU3: cpu@300 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo468";
reg = <0x0 0x300>;
enable-method = "psci";
+ cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+ &LITTLE_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <100>;
next-level-cache = <&L2_300>;
@@ -158,9 +201,12 @@
CPU4: cpu@400 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo468";
reg = <0x0 0x400>;
enable-method = "psci";
+ cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+ &LITTLE_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <100>;
next-level-cache = <&L2_400>;
@@ -174,9 +220,12 @@
CPU5: cpu@500 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo468";
reg = <0x0 0x500>;
enable-method = "psci";
+ cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+ &LITTLE_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <100>;
next-level-cache = <&L2_500>;
@@ -190,9 +239,12 @@
CPU6: cpu@600 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo468";
reg = <0x0 0x600>;
enable-method = "psci";
+ cpu-idle-states = <&BIG_CPU_SLEEP_0
+ &BIG_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1740>;
dynamic-power-coefficient = <405>;
next-level-cache = <&L2_600>;
@@ -206,9 +258,12 @@
CPU7: cpu@700 {
device_type = "cpu";
- compatible = "arm,armv8";
+ compatible = "qcom,kryo468";
reg = <0x0 0x700>;
enable-method = "psci";
+ cpu-idle-states = <&BIG_CPU_SLEEP_0
+ &BIG_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
capacity-dmips-mhz = <1740>;
dynamic-power-coefficient = <405>;
next-level-cache = <&L2_700>;
@@ -255,6 +310,60 @@
};
};
};
+
+ idle-states {
+ entry-method = "psci";
+
+ LITTLE_CPU_SLEEP_0: cpu-sleep-0-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "little-power-down";
+ arm,psci-suspend-param = <0x40000003>;
+ entry-latency-us = <549>;
+ exit-latency-us = <901>;
+ min-residency-us = <1774>;
+ local-timer-stop;
+ };
+
+ LITTLE_CPU_SLEEP_1: cpu-sleep-0-1 {
+ compatible = "arm,idle-state";
+ idle-state-name = "little-rail-power-down";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <702>;
+ exit-latency-us = <915>;
+ min-residency-us = <4001>;
+ local-timer-stop;
+ };
+
+ BIG_CPU_SLEEP_0: cpu-sleep-1-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "big-power-down";
+ arm,psci-suspend-param = <0x40000003>;
+ entry-latency-us = <523>;
+ exit-latency-us = <1244>;
+ min-residency-us = <2207>;
+ local-timer-stop;
+ };
+
+ BIG_CPU_SLEEP_1: cpu-sleep-1-1 {
+ compatible = "arm,idle-state";
+ idle-state-name = "big-rail-power-down";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <526>;
+ exit-latency-us = <1854>;
+ min-residency-us = <5555>;
+ local-timer-stop;
+ };
+
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "cluster-power-down";
+ arm,psci-suspend-param = <0x40003444>;
+ entry-latency-us = <3263>;
+ exit-latency-us = <6562>;
+ min-residency-us = <9926>;
+ local-timer-stop;
+ };
+ };
};
memory@80000000 {
@@ -352,6 +461,17 @@
interrupt-controller;
#interrupt-cells = <2>;
};
+
+ ipa_smp2p_out: ipa-ap-to-modem {
+ qcom,entry-name = "ipa";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ ipa_smp2p_in: ipa-modem-to-ap {
+ qcom,entry-name = "ipa";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
psci {
@@ -898,11 +1018,56 @@
qcom,bcm-voters = <&apps_bcm_voter>;
};
+ ipa: ipa@1e40000 {
+ compatible = "qcom,sc7180-ipa";
+
+ iommus = <&apps_smmu 0x440 0x3>;
+ reg = <0 0x1e40000 0 0x7000>,
+ <0 0x1e47000 0 0x2000>,
+ <0 0x1e04000 0 0x2c000>;
+ reg-names = "ipa-reg",
+ "ipa-shared",
+ "gsi";
+
+ interrupts-extended = <&intc 0 311 IRQ_TYPE_EDGE_RISING>,
+ <&intc 0 432 IRQ_TYPE_LEVEL_HIGH>,
+ <&ipa_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&ipa_smp2p_in 1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ipa",
+ "gsi",
+ "ipa-clock-query",
+ "ipa-setup-ready";
+
+ clocks = <&rpmhcc RPMH_IPA_CLK>;
+ clock-names = "core";
+
+ interconnects = <&aggre2_noc MASTER_IPA &mc_virt SLAVE_EBI1>,
+ <&aggre2_noc MASTER_IPA &system_noc SLAVE_IMEM>,
+ <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_IPA_CFG>;
+ interconnect-names = "memory",
+ "imem",
+ "config";
+
+ qcom,smem-states = <&ipa_smp2p_out 0>,
+ <&ipa_smp2p_out 1>;
+ qcom,smem-state-names = "ipa-clock-enabled-valid",
+ "ipa-clock-enabled";
+
+ modem-remoteproc = <&remoteproc_mpss>;
+
+ status = "disabled";
+ };
+
tcsr_mutex_regs: syscon@1f40000 {
compatible = "syscon";
reg = <0 0x01f40000 0 0x40000>;
};
+ tcsr_regs: syscon@1fc0000 {
+ compatible = "syscon";
+ reg = <0 0x01fc0000 0 0x40000>;
+ };
+
tlmm: pinctrl@3500000 {
compatible = "qcom,sc7180-pinctrl";
reg = <0 0x03500000 0 0x300000>,
@@ -1294,22 +1459,106 @@
};
};
- sdhc_2: sdhci@8804000 {
- compatible = "qcom,sc7180-sdhci", "qcom,sdhci-msm-v5";
- reg = <0 0x08804000 0 0x1000>;
+ gpu: gpu@5000000 {
+ compatible = "qcom,adreno-618.0", "qcom,adreno";
+ #stream-id-cells = <16>;
+ reg = <0 0x05000000 0 0x40000>, <0 0x0509e000 0 0x1000>,
+ <0 0x05061000 0 0x800>;
+ reg-names = "kgsl_3d0_reg_memory", "cx_mem", "cx_dbgc";
+ interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&adreno_smmu 0>;
+ operating-points-v2 = <&gpu_opp_table>;
+ qcom,gmu = <&gmu>;
- iommus = <&apps_smmu 0x80 0>;
- interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "hc_irq", "pwr_irq";
+ gpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
- clocks = <&gcc GCC_SDCC2_APPS_CLK>,
- <&gcc GCC_SDCC2_AHB_CLK>;
- clock-names = "core", "iface";
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ };
- bus-width = <4>;
+ opp-650000000 {
+ opp-hz = /bits/ 64 <650000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ };
- status = "disabled";
+ opp-565000000 {
+ opp-hz = /bits/ 64 <565000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ opp-430000000 {
+ opp-hz = /bits/ 64 <430000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ opp-355000000 {
+ opp-hz = /bits/ 64 <355000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ opp-267000000 {
+ opp-hz = /bits/ 64 <267000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ opp-180000000 {
+ opp-hz = /bits/ 64 <180000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+ };
+ };
+
+ adreno_smmu: iommu@5040000 {
+ compatible = "qcom,sc7180-smmu-v2", "qcom,smmu-v2";
+ reg = <0 0x05040000 0 0x10000>;
+ #iommu-cells = <1>;
+ #global-interrupts = <2>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 364 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 365 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 366 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 367 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 368 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
+
+ clocks = <&gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&gcc GCC_GPU_CFG_AHB_CLK>;
+ clock-names = "bus", "iface";
+
+ power-domains = <&gpucc CX_GDSC>;
+ };
+
+ gmu: gmu@506a000 {
+ compatible="qcom,adreno-gmu-618.0", "qcom,adreno-gmu";
+ reg = <0 0x0506a000 0 0x31000>, <0 0x0b290000 0 0x10000>,
+ <0 0x0b490000 0 0x10000>;
+ reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq";
+ interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hfi", "gmu";
+ clocks = <&gpucc GPU_CC_CX_GMU_CLK>,
+ <&gpucc GPU_CC_CXO_CLK>,
+ <&gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&gcc GCC_GPU_MEMNOC_GFX_CLK>;
+ clock-names = "gmu", "cxo", "axi", "memnoc";
+ power-domains = <&gpucc CX_GDSC>, <&gpucc GX_GDSC>;
+ power-domain-names = "cx", "gx";
+ iommus = <&adreno_smmu 5>;
+ operating-points-v2 = <&gmu_opp_table>;
+
+ gmu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+ };
};
gpucc: clock-controller@5090000 {
@@ -1326,6 +1575,554 @@
#power-domain-cells = <1>;
};
+ stm@6002000 {
+ compatible = "arm,coresight-stm", "arm,primecell";
+ reg = <0 0x06002000 0 0x1000>,
+ <0 0x16280000 0 0x180000>;
+ reg-names = "stm-base", "stm-stimulus-base";
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ stm_out: endpoint {
+ remote-endpoint = <&funnel0_in7>;
+ };
+ };
+ };
+ };
+
+ funnel@6041000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x06041000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ funnel0_out: endpoint {
+ remote-endpoint = <&merge_funnel_in0>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@7 {
+ reg = <7>;
+ funnel0_in7: endpoint {
+ remote-endpoint = <&stm_out>;
+ };
+ };
+ };
+ };
+
+ funnel@6042000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x06042000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ funnel1_out: endpoint {
+ remote-endpoint = <&merge_funnel_in1>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+ funnel1_in4: endpoint {
+ remote-endpoint = <&apss_merge_funnel_out>;
+ };
+ };
+ };
+ };
+
+ funnel@6045000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x06045000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ merge_funnel_out: endpoint {
+ remote-endpoint = <&swao_funnel_in>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ merge_funnel_in0: endpoint {
+ remote-endpoint = <&funnel0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ merge_funnel_in1: endpoint {
+ remote-endpoint = <&funnel1_out>;
+ };
+ };
+ };
+ };
+
+ replicator@6046000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0 0x06046000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ replicator_out: endpoint {
+ remote-endpoint = <&etr_in>;
+ };
+ };
+ };
+
+ in-ports {
+ port {
+ replicator_in: endpoint {
+ remote-endpoint = <&swao_replicator_out>;
+ };
+ };
+ };
+ };
+
+ etr@6048000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x06048000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,scatter-gather;
+
+ in-ports {
+ port {
+ etr_in: endpoint {
+ remote-endpoint = <&replicator_out>;
+ };
+ };
+ };
+ };
+
+ funnel@6b04000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x06b04000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ swao_funnel_out: endpoint {
+ remote-endpoint = <&etf_in>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@7 {
+ reg = <7>;
+ swao_funnel_in: endpoint {
+ remote-endpoint = <&merge_funnel_out>;
+ };
+ };
+ };
+ };
+
+ etf@6b05000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x06b05000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ etf_out: endpoint {
+ remote-endpoint = <&swao_replicator_in>;
+ };
+ };
+ };
+
+ in-ports {
+ port {
+ etf_in: endpoint {
+ remote-endpoint = <&swao_funnel_out>;
+ };
+ };
+ };
+ };
+
+ replicator@6b06000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0 0x06b06000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ swao_replicator_out: endpoint {
+ remote-endpoint = <&replicator_in>;
+ };
+ };
+ };
+
+ in-ports {
+ port {
+ swao_replicator_in: endpoint {
+ remote-endpoint = <&etf_out>;
+ };
+ };
+ };
+ };
+
+ etm@7040000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07040000 0 0x1000>;
+
+ cpu = <&CPU0>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+
+ out-ports {
+ port {
+ etm0_out: endpoint {
+ remote-endpoint = <&apss_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ etm@7140000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07140000 0 0x1000>;
+
+ cpu = <&CPU1>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+
+ out-ports {
+ port {
+ etm1_out: endpoint {
+ remote-endpoint = <&apss_funnel_in1>;
+ };
+ };
+ };
+ };
+
+ etm@7240000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07240000 0 0x1000>;
+
+ cpu = <&CPU2>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+
+ out-ports {
+ port {
+ etm2_out: endpoint {
+ remote-endpoint = <&apss_funnel_in2>;
+ };
+ };
+ };
+ };
+
+ etm@7340000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07340000 0 0x1000>;
+
+ cpu = <&CPU3>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+
+ out-ports {
+ port {
+ etm3_out: endpoint {
+ remote-endpoint = <&apss_funnel_in3>;
+ };
+ };
+ };
+ };
+
+ etm@7440000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07440000 0 0x1000>;
+
+ cpu = <&CPU4>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+
+ out-ports {
+ port {
+ etm4_out: endpoint {
+ remote-endpoint = <&apss_funnel_in4>;
+ };
+ };
+ };
+ };
+
+ etm@7540000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07540000 0 0x1000>;
+
+ cpu = <&CPU5>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+
+ out-ports {
+ port {
+ etm5_out: endpoint {
+ remote-endpoint = <&apss_funnel_in5>;
+ };
+ };
+ };
+ };
+
+ etm@7640000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07640000 0 0x1000>;
+
+ cpu = <&CPU6>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+
+ out-ports {
+ port {
+ etm6_out: endpoint {
+ remote-endpoint = <&apss_funnel_in6>;
+ };
+ };
+ };
+ };
+
+ etm@7740000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07740000 0 0x1000>;
+
+ cpu = <&CPU7>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+
+ out-ports {
+ port {
+ etm7_out: endpoint {
+ remote-endpoint = <&apss_funnel_in7>;
+ };
+ };
+ };
+ };
+
+ funnel@7800000 { /* APSS Funnel */
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x07800000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ apss_funnel_out: endpoint {
+ remote-endpoint = <&apss_merge_funnel_in>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ apss_funnel_in0: endpoint {
+ remote-endpoint = <&etm0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ apss_funnel_in1: endpoint {
+ remote-endpoint = <&etm1_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ apss_funnel_in2: endpoint {
+ remote-endpoint = <&etm2_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ apss_funnel_in3: endpoint {
+ remote-endpoint = <&etm3_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ apss_funnel_in4: endpoint {
+ remote-endpoint = <&etm4_out>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+ apss_funnel_in5: endpoint {
+ remote-endpoint = <&etm5_out>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ apss_funnel_in6: endpoint {
+ remote-endpoint = <&etm6_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+ apss_funnel_in7: endpoint {
+ remote-endpoint = <&etm7_out>;
+ };
+ };
+ };
+ };
+
+ funnel@7810000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x07810000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ apss_merge_funnel_out: endpoint {
+ remote-endpoint = <&funnel1_in4>;
+ };
+ };
+ };
+
+ in-ports {
+ port {
+ apss_merge_funnel_in: endpoint {
+ remote-endpoint = <&apss_funnel_out>;
+ };
+ };
+ };
+ };
+
+ remoteproc_mpss: remoteproc@4080000 {
+ compatible = "qcom,sc7180-mpss-pas";
+ reg = <0 0x04080000 0 0x4040>, <0 0x04180000 0 0x48>;
+ reg-names = "qdsp6", "rmb";
+
+ interrupts-extended = <&intc GIC_SPI 266 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 3 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 7 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready", "handover",
+ "stop-ack", "shutdown-ack";
+
+ clocks = <&gcc GCC_MSS_CFG_AHB_CLK>,
+ <&gcc GCC_MSS_Q6_MEMNOC_AXI_CLK>,
+ <&gcc GCC_MSS_NAV_AXI_CLK>,
+ <&gcc GCC_MSS_SNOC_AXI_CLK>,
+ <&gcc GCC_MSS_MFAB_AXIS_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "bus", "nav", "snoc_axi",
+ "mnoc_axi", "xo";
+
+ power-domains = <&aoss_qmp AOSS_QMP_LS_MODEM>,
+ <&rpmhpd SC7180_CX>,
+ <&rpmhpd SC7180_MX>,
+ <&rpmhpd SC7180_MSS>;
+ power-domain-names = "load_state", "cx", "mx", "mss";
+
+ memory-region = <&mpss_mem>;
+
+ qcom,smem-states = <&modem_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ resets = <&aoss_reset AOSS_CC_MSS_RESTART>,
+ <&pdc_reset PDC_MODEM_SYNC_RESET>;
+ reset-names = "mss_restart", "pdc_reset";
+
+ qcom,halt-regs = <&tcsr_mutex_regs 0x23000 0x25000 0x24000>;
+ qcom,spare-regs = <&tcsr_regs 0xb3e4>;
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>;
+ label = "modem";
+ qcom,remote-pid = <1>;
+ mboxes = <&apss_shared 12>;
+ };
+ };
+
+ sdhc_2: sdhci@8804000 {
+ compatible = "qcom,sc7180-sdhci", "qcom,sdhci-msm-v5";
+ reg = <0 0x08804000 0 0x1000>;
+
+ iommus = <&apps_smmu 0x80 0>;
+ interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ clocks = <&gcc GCC_SDCC2_APPS_CLK>,
+ <&gcc GCC_SDCC2_AHB_CLK>;
+ clock-names = "core", "iface";
+
+ bus-width = <4>;
+
+ status = "disabled";
+ };
+
qspi: spi@88dc000 {
compatible = "qcom,qspi-v1";
reg = <0 0x088dc000 0 0x600>;
@@ -1339,7 +2136,7 @@
};
usb_1_hsphy: phy@88e3000 {
- compatible = "qcom,sc7180-qusb2-phy";
+ compatible = "qcom,sc7180-qusb2-phy", "qcom,qusb2-v2-phy";
reg = <0 0x088e3000 0 0x400>;
status = "disabled";
#phy-cells = <0>;
@@ -1447,6 +2244,10 @@
resets = <&gcc GCC_USB30_PRIM_BCR>;
+ interconnects = <&aggre2_noc MASTER_USB3 &mc_virt SLAVE_EBI1>,
+ <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_USB3>;
+ interconnect-names = "usb-ddr", "apps-usb";
+
usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xe000>;
@@ -1475,6 +2276,9 @@
"vcodec0_core", "vcodec0_bus";
iommus = <&apps_smmu 0x0c00 0x60>;
memory-region = <&venus_mem>;
+ interconnects = <&mmss_noc MASTER_VIDEO_P0 &mc_virt SLAVE_EBI1>,
+ <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_VENUS_CFG>;
+ interconnect-names = "video-mem", "cpu-cfg";
video-decoder {
compatible = "venus-decoder";
@@ -1544,8 +2348,12 @@
clock-names = "iface", "rot", "lut", "core",
"vsync";
assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>,
- <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ <&dispcc DISP_CC_MDSS_VSYNC_CLK>,
+ <&dispcc DISP_CC_MDSS_ROT_CLK>,
+ <&dispcc DISP_CC_MDSS_AHB_CLK>;
assigned-clock-rates = <300000000>,
+ <19200000>,
+ <19200000>,
<19200000>;
interrupt-parent = <&mdss>;
@@ -1657,8 +2465,7 @@
pdc: interrupt-controller@b220000 {
compatible = "qcom,sc7180-pdc", "qcom,pdc";
reg = <0 0x0b220000 0 0x30000>;
- qcom,pdc-ranges = <0 480 15>, <17 497 98>,
- <119 634 4>, <124 639 1>;
+ qcom,pdc-ranges = <0 480 94>, <94 609 31>, <125 63 1>;
#interrupt-cells = <2>;
interrupt-parent = <&intc>;
interrupt-controller;
@@ -2011,8 +2818,8 @@
thermal-zones {
cpu0-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 1>;
@@ -2059,8 +2866,8 @@
};
cpu1-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 2>;
@@ -2107,8 +2914,8 @@
};
cpu2-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 3>;
@@ -2155,8 +2962,8 @@
};
cpu3-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 4>;
@@ -2203,8 +3010,8 @@
};
cpu4-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 5>;
@@ -2251,8 +3058,8 @@
};
cpu5-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 6>;
@@ -2299,8 +3106,8 @@
};
cpu6-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 9>;
@@ -2339,8 +3146,8 @@
};
cpu7-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 10>;
@@ -2379,8 +3186,8 @@
};
cpu8-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 11>;
@@ -2419,8 +3226,8 @@
};
cpu9-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 12>;
@@ -2459,8 +3266,8 @@
};
aoss0-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 0>;
@@ -2480,8 +3287,8 @@
};
cpuss0-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 7>;
@@ -2500,8 +3307,8 @@
};
cpuss1-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 8>;
@@ -2520,8 +3327,8 @@
};
gpuss0-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 13>;
@@ -2541,8 +3348,8 @@
};
gpuss1-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens0 14>;
@@ -2562,8 +3369,8 @@
};
aoss1-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens1 0>;
@@ -2583,8 +3390,8 @@
};
cwlan-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens1 1>;
@@ -2604,8 +3411,8 @@
};
audio-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens1 2>;
@@ -2625,8 +3432,8 @@
};
ddr-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens1 3>;
@@ -2646,8 +3453,8 @@
};
q6-hvx-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens1 4>;
@@ -2667,8 +3474,8 @@
};
camera-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens1 5>;
@@ -2688,8 +3495,8 @@
};
mdm-core-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens1 6>;
@@ -2709,8 +3516,8 @@
};
mdm-dsp-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens1 7>;
@@ -2730,8 +3537,8 @@
};
npu-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens1 8>;
@@ -2751,8 +3558,8 @@
};
video-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
thermal-sensors = <&tsens1 9>;
diff --git a/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts b/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts
new file mode 100644
index 000000000000..76533e8b2092
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm660-xiaomi-lavender.dts
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, Alexey Minnekhanov <alexey.min@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "sdm660.dtsi"
+
+/ {
+ model = "Xiaomi Redmi Note 7";
+ compatible = "xiaomi,lavender", "qcom,sdm660";
+
+ aliases {
+ serial0 = &blsp1_uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ramoops@a0000000 {
+ compatible = "ramoops";
+ reg = <0x0 0xa0000000 0x0 0x400000>;
+ console-size = <0x20000>;
+ record-size = <0x20000>;
+ ftrace-size = <0x0>;
+ pmsg-size = <0x20000>;
+ };
+ };
+};
+
+&blsp1_uart2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_console_active>;
+};
+
+&tlmm {
+ gpio-reserved-ranges = <8 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm660.dtsi b/arch/arm64/boot/dts/qcom/sdm660.dtsi
new file mode 100644
index 000000000000..4abbdd03d1e7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm660.dtsi
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018, Craig Tatlor.
+ * Copyright (c) 2020, Alexey Minnekhanov <alexey.min@gmail.com>
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-sdm660.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ clocks {
+ xo_board: xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo260";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ next-level-cache = <&L2_1>;
+ L2_1: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ L1_I_100: l1-icache {
+ compatible = "cache";
+ };
+ L1_D_100: l1-dcache {
+ compatible = "cache";
+ };
+ };
+
+ CPU1: cpu@101 {
+ device_type = "cpu";
+ compatible = "qcom,kryo260";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ next-level-cache = <&L2_1>;
+ L1_I_101: l1-icache {
+ compatible = "cache";
+ };
+ L1_D_101: l1-dcache {
+ compatible = "cache";
+ };
+ };
+
+ CPU2: cpu@102 {
+ device_type = "cpu";
+ compatible = "qcom,kryo260";
+ reg = <0x0 0x102>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ next-level-cache = <&L2_1>;
+ L1_I_102: l1-icache {
+ compatible = "cache";
+ };
+ L1_D_102: l1-dcache {
+ compatible = "cache";
+ };
+ };
+
+ CPU3: cpu@103 {
+ device_type = "cpu";
+ compatible = "qcom,kryo260";
+ reg = <0x0 0x103>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
+ next-level-cache = <&L2_1>;
+ L1_I_103: l1-icache {
+ compatible = "cache";
+ };
+ L1_D_103: l1-dcache {
+ compatible = "cache";
+ };
+ };
+
+ CPU4: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo260";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <640>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ cache-level = <2>;
+ };
+ L1_I_0: l1-icache {
+ compatible = "cache";
+ };
+ L1_D_0: l1-dcache {
+ compatible = "cache";
+ };
+ };
+
+ CPU5: cpu@1 {
+ device_type = "cpu";
+ compatible = "qcom,kryo260";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <640>;
+ next-level-cache = <&L2_0>;
+ L1_I_1: l1-icache {
+ compatible = "cache";
+ };
+ L1_D_1: l1-dcache {
+ compatible = "cache";
+ };
+ };
+
+ CPU6: cpu@2 {
+ device_type = "cpu";
+ compatible = "qcom,kryo260";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <640>;
+ next-level-cache = <&L2_0>;
+ L1_I_2: l1-icache {
+ compatible = "cache";
+ };
+ L1_D_2: l1-dcache {
+ compatible = "cache";
+ };
+ };
+
+ CPU7: cpu@3 {
+ device_type = "cpu";
+ compatible = "qcom,kryo260";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ capacity-dmips-mhz = <640>;
+ next-level-cache = <&L2_0>;
+ L1_I_3: l1-icache {
+ compatible = "cache";
+ };
+ L1_D_3: l1-dcache {
+ compatible = "cache";
+ };
+ };
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU4>;
+ };
+
+ core1 {
+ cpu = <&CPU5>;
+ };
+
+ core2 {
+ cpu = <&CPU6>;
+ };
+
+ core3 {
+ cpu = <&CPU7>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+
+ core2 {
+ cpu = <&CPU2>;
+ };
+
+ core3 {
+ cpu = <&CPU3>;
+ };
+ };
+ };
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm";
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 1 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 2 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 3 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 0 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ soc: soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sdm660";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x00100000 0x94000>;
+ };
+
+ tlmm: pinctrl@3100000 {
+ compatible = "qcom,sdm660-pinctrl";
+ reg = <0x03100000 0x400000>,
+ <0x03500000 0x400000>,
+ <0x03900000 0x400000>;
+ reg-names = "south", "center", "north";
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ gpio-ranges = <&tlmm 0 0 114>;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ uart_console_active: uart_console_active {
+ pinmux {
+ pins = "gpio4", "gpio5";
+ function = "blsp_uart2";
+ };
+
+ pinconf {
+ pins = "gpio4", "gpio5";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ spmi_bus: spmi@800f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x0800f000 0x1000>,
+ <0x08400000 0x1000000>,
+ <0x09400000 0x1000000>,
+ <0x0a400000 0x220000>,
+ <0x0800a000 0x3000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ cell-index = <0>;
+ };
+
+ blsp1_uart2: serial@c170000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x0c170000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
+ timer@17920000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0x17920000 0x1000>;
+
+ frame@17921000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17921000 0x1000>,
+ <0x17922000 0x1000>;
+ };
+
+ frame@17923000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17923000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17924000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17924000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17925000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17925000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17926000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17926000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17927000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17927000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17928000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x17928000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ reg = <0x17a00000 0x10000>,
+ <0x17b00000 0x100000>;
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ interrupt-controller;
+ #redistributor-regions = <1>;
+ redistributor-stride = <0x0 0x20000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
index 9070be43a309..70466cc4b405 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
@@ -548,6 +548,8 @@ edp_brij_i2c: &i2c3 {
clocks = <&rpmhcc RPMH_LN_BB_CLK2>;
clock-names = "refclk";
+ no-hpd;
+
ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -631,6 +633,11 @@ ap_ts_i2c: &i2c14 {
status = "okay";
};
+&mss_pil {
+ iommus = <&apps_smmu 0x780 0x1>,
+ <&apps_smmu 0x724 0x3>;
+};
+
&pm8998_pwrkey {
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index 21fd6f8d5799..c00797bd3b07 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -112,6 +112,40 @@
// enable-active-high;
};
+ cam0_dvdd_1v2: reg_cam0_dvdd_1v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "CAM0_DVDD_1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ enable-active-high;
+ gpio = <&pm8998_gpio 12 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam0_dvdd_1v2_en_default>;
+ vin-supply = <&vbat>;
+ };
+
+ cam0_avdd_2v8: reg_cam0_avdd_2v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "CAM0_AVDD_2V8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ enable-active-high;
+ gpio = <&pm8998_gpio 10 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam0_avdd_2v8_en_default>;
+ vin-supply = <&vbat>;
+ };
+
+ /* This regulator is enabled when the VREG_LVS1A_1P8 trace is enabled */
+ cam3_avdd_2v8: reg_cam3_avdd_2v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "CAM3_AVDD_2V8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ vin-supply = <&vbat>;
+ };
+
pcie0_3p3v_dual: vldo-3v3-regulator {
compatible = "regulator-fixed";
regulator-name = "VLDO_3V3";
@@ -412,6 +446,52 @@
};
&pm8998_gpio {
+ gpio-line-names =
+ "NC",
+ "NC",
+ "WLAN_SW_CTRL",
+ "NC",
+ "PM_GPIO5_BLUE_BT_LED",
+ "VOL_UP_N",
+ "NC",
+ "ADC_IN1",
+ "PM_GPIO9_YEL_WIFI_LED",
+ "CAM0_AVDD_EN",
+ "NC",
+ "CAM0_DVDD_EN",
+ "PM_GPIO13_GREEN_U4_LED",
+ "DIV_CLK2",
+ "NC",
+ "NC",
+ "NC",
+ "SMB_STAT",
+ "NC",
+ "NC",
+ "ADC_IN2",
+ "OPTION1",
+ "WCSS_PWR_REQ",
+ "PM845_GPIO24",
+ "OPTION2",
+ "PM845_SLB";
+
+ cam0_dvdd_1v2_en_default: cam0-dvdd-1v2-en {
+ pins = "gpio12";
+ function = "normal";
+
+ bias-pull-up;
+ drive-push-pull;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ };
+
+ cam0_avdd_2v8_en_default: cam0-avdd-2v8-en {
+ pins = "gpio10";
+ function = "normal";
+
+ bias-pull-up;
+ drive-push-pull;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ };
+
vol_up_pin_a: vol-up-active {
pins = "gpio6";
function = "normal";
@@ -570,6 +650,42 @@
};
&tlmm {
+ cam0_default: cam0_default {
+ rst {
+ pins = "gpio9";
+ function = "gpio";
+
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ mclk0 {
+ pins = "gpio13";
+ function = "cam_mclk";
+
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ cam3_default: cam3_default {
+ rst {
+ function = "gpio";
+ pins = "gpio21";
+
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ mclk3 {
+ function = "cam_mclk";
+ pins = "gpio16";
+
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
pcie0_default_state: pcie0-default {
clkreq {
pins = "gpio36";
@@ -863,3 +979,97 @@
bias-pull-up;
};
};
+
+&pm8998_gpio {
+
+};
+
+&cci {
+ status = "ok";
+};
+
+&cci_i2c0 {
+ camera@10 {
+ compatible = "ovti,ov8856";
+ reg = <0x10>;
+
+ // CAM0_RST_N
+ reset-gpios = <&tlmm 9 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam0_default>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 9 0>;
+
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "xvclk";
+ clock-frequency = <19200000>;
+
+ /* The &vreg_s4a_1p8 trace is powered on as a,
+ * so it is represented by a fixed regulator.
+ *
+ * The 2.8V vdda-supply and 1.2V vddd-supply regulators
+ * both have to be enabled through the power management
+ * gpios.
+ */
+ power-domains = <&clock_camcc TITAN_TOP_GDSC>;
+
+ dovdd-supply = <&vreg_lvs1a_1p8>;
+ avdd-supply = <&cam0_avdd_2v8>;
+ dvdd-supply = <&cam0_dvdd_1v2>;
+
+ status = "disable";
+
+ port {
+ ov8856_ep: endpoint {
+ clock-lanes = <1>;
+ link-frequencies = /bits/ 64
+ <360000000 180000000>;
+ data-lanes = <1 2 3 4>;
+// remote-endpoint = <&csiphy0_ep>;
+ };
+ };
+ };
+};
+
+&cci_i2c1 {
+ camera@60 {
+ compatible = "ovti,ov7251";
+
+ // I2C address as per ov7251.txt linux documentation
+ reg = <0x60>;
+
+ // CAM3_RST_N
+ enable-gpios = <&tlmm 21 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cam3_default>;
+ gpios = <&tlmm 16 0>,
+ <&tlmm 21 0>;
+
+ clocks = <&clock_camcc CAM_CC_MCLK3_CLK>;
+ clock-names = "xclk";
+ clock-frequency = <24000000>;
+
+ /* The &vreg_s4a_1p8 trace always powered on.
+ *
+ * The 2.8V vdda-supply regulator is enabled when the
+ * vreg_s4a_1p8 trace is pulled high.
+ * It too is represented by a fixed regulator.
+ *
+ * No 1.2V vddd-supply regulator is used.
+ */
+ power-domains = <&clock_camcc TITAN_TOP_GDSC>;
+
+ vdddo-supply = <&vreg_lvs1a_1p8>;
+ vdda-supply = <&cam3_avdd_2v8>;
+
+ status = "disable";
+
+ port {
+ ov7251_ep: endpoint {
+ clock-lanes = <1>;
+ data-lanes = <0 1>;
+// remote-endpoint = <&csiphy3_ep>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index 023e8b04c7f6..1372fe8601f5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -13,7 +13,7 @@
/ {
model = "Qualcomm Technologies, Inc. SDM845 MTP";
- compatible = "qcom,sdm845-mtp";
+ compatible = "qcom,sdm845-mtp", "qcom,sdm845";
aliases {
serial0 = &uart9;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index de6bb86c4968..8eb5a31346d2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -5,6 +5,7 @@
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
@@ -1815,6 +1816,42 @@
gpio-ranges = <&tlmm 0 0 150>;
wakeup-parent = <&pdc_intc>;
+ cci0_default: cci0-default {
+ /* SDA, SCL */
+ pins = "gpio17", "gpio18";
+ function = "cci_i2c";
+
+ bias-pull-up;
+ drive-strength = <2>; /* 2 mA */
+ };
+
+ cci0_sleep: cci0-sleep {
+ /* SDA, SCL */
+ pins = "gpio17", "gpio18";
+ function = "cci_i2c";
+
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ };
+
+ cci1_default: cci1-default {
+ /* SDA, SCL */
+ pins = "gpio19", "gpio20";
+ function = "cci_i2c";
+
+ bias-pull-up;
+ drive-strength = <2>; /* 2 mA */
+ };
+
+ cci1_sleep: cci1-sleep {
+ /* SDA, SCL */
+ pins = "gpio19", "gpio20";
+ function = "cci_i2c";
+
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down;
+ };
+
qspi_clk: qspi-clk {
pinmux {
pins = "gpio95";
@@ -2972,7 +3009,7 @@
};
usb_1_hsphy: phy@88e2000 {
- compatible = "qcom,sdm845-qusb2-phy";
+ compatible = "qcom,sdm845-qusb2-phy", "qcom,qusb2-v2-phy";
reg = <0 0x088e2000 0 0x400>;
status = "disabled";
#phy-cells = <0>;
@@ -2987,7 +3024,7 @@
};
usb_2_hsphy: phy@88e3000 {
- compatible = "qcom,sdm845-qusb2-phy";
+ compatible = "qcom,sdm845-qusb2-phy", "qcom,qusb2-v2-phy";
reg = <0 0x088e3000 0 0x400>;
status = "disabled";
#phy-cells = <0>;
@@ -3099,6 +3136,10 @@
resets = <&gcc GCC_USB30_PRIM_BCR>;
+ interconnects = <&aggre2_noc MASTER_USB3_0 &mem_noc SLAVE_EBI1>,
+ <&gladiator_noc MASTER_APPSS_PROC &config_noc SLAVE_USB3_0>;
+ interconnect-names = "usb-ddr", "apps-usb";
+
usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
@@ -3143,6 +3184,10 @@
resets = <&gcc GCC_USB30_SEC_BCR>;
+ interconnects = <&aggre2_noc MASTER_USB3_1 &mem_noc SLAVE_EBI1>,
+ <&gladiator_noc MASTER_APPSS_PROC &config_noc SLAVE_USB3_1>;
+ interconnect-names = "usb-ddr", "apps-usb";
+
usb_2_dwc3: dwc3@a800000 {
compatible = "snps,dwc3";
reg = <0 0x0a800000 0 0xcd00>;
@@ -3196,6 +3241,61 @@
#reset-cells = <1>;
};
+ cci: cci@ac4a000 {
+ compatible = "qcom,sdm845-cci";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0 0x0ac4a000 0 0x4000>;
+ interrupts = <GIC_SPI 460 IRQ_TYPE_EDGE_RISING>;
+ power-domains = <&clock_camcc TITAN_TOP_GDSC>;
+
+ clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CCI_CLK>,
+ <&clock_camcc CAM_CC_CCI_CLK_SRC>;
+ clock-names = "camnoc_axi",
+ "soc_ahb",
+ "slow_ahb_src",
+ "cpas_ahb",
+ "cci",
+ "cci_src";
+
+ assigned-clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_CCI_CLK>;
+ assigned-clock-rates = <80000000>, <37500000>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&cci0_default &cci1_default>;
+ pinctrl-1 = <&cci0_sleep &cci1_sleep>;
+
+ status = "disabled";
+
+ cci_i2c0: i2c-bus@0 {
+ reg = <0>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ cci_i2c1: i2c-bus@1 {
+ reg = <1>;
+ clock-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ clock_camcc: clock-controller@ad00000 {
+ compatible = "qcom,sdm845-camcc";
+ reg = <0 0x0ad00000 0 0x10000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
mdss: mdss@ae00000 {
compatible = "qcom,sdm845-mdss";
reg = <0 0x0ae00000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
index 51a670ad15b2..d03ca3190746 100644
--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
@@ -480,6 +480,8 @@
&ufs_mem_hc {
status = "okay";
+ reset-gpios = <&tlmm 150 GPIO_ACTIVE_LOW>;
+
vcc-supply = <&vreg_l20a_2p95>;
vcc-max-microamp = <600000>;
};
@@ -577,3 +579,14 @@
};
};
};
+
+&wifi {
+ status = "okay";
+
+ vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>;
+ vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+
+ qcom,snoc-host-cap-8bit-quirk;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
index 224d0f1ea6f9..cff7a85890ee 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
@@ -5,6 +5,7 @@
/dts-v1/;
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "sm8250.dtsi"
/ {
@@ -18,6 +19,336 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ };
+
+ vreg_s4a_1p8: pm8150-s4 {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s4a_1p8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ vreg_s6c_0p88: smpc6-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s6c_0p88";
+
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-always-on;
+ vin-supply = <&vph_pwr>;
+ };
+};
+
+&apps_rsc {
+ pm8150-rpmh-regulators {
+ compatible = "qcom,pm8150-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+ vdd-l1-l8-l11-supply = <&vreg_s6c_0p88>;
+ vdd-l2-l10-supply = <&vreg_bob>;
+ vdd-l3-l4-l5-l18-supply = <&vreg_s6a_0p95>;
+ vdd-l6-l9-supply = <&vreg_s8c_1p3>;
+ vdd-l7-l12-l14-l15-supply = <&vreg_s5a_1p9>;
+ vdd-l13-l16-l17-supply = <&vreg_bob>;
+
+ vreg_s5a_1p9: smps5 {
+ regulator-name = "vreg_s5a_1p9";
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s6a_0p95: smps6 {
+ regulator-name = "vreg_s6a_0p95";
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <1128000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2a_3p1: ldo2 {
+ regulator-name = "vreg_l2a_3p1";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3a_0p9: ldo3 {
+ regulator-name = "vreg_l3a_0p9";
+ regulator-min-microvolt = <928000>;
+ regulator-max-microvolt = <932000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5a_0p875: ldo5 {
+ regulator-name = "vreg_l5a_0p875";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6a_1p2: ldo6 {
+ regulator-name = "vreg_l6a_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7a_1p7: ldo7 {
+ regulator-name = "vreg_l7a_1p7";
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9a_1p2: ldo9 {
+ regulator-name = "vreg_l9a_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10a_1p8: ldo10 {
+ regulator-name = "vreg_l10a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11a_0p75: ldo11 {
+ regulator-name = "vreg_l11a_0p75";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12a_1p8: ldo12 {
+ regulator-name = "vreg_l12a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13a_ts_3p0: ldo13 {
+ regulator-name = "vreg_l13a_ts_3p0";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14a_1p8: ldo14 {
+ regulator-name = "vreg_l14a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15a_11ad_io_1p8: ldo15 {
+ regulator-name = "vreg_l15a_11ad_io_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16a_2p7: ldo16 {
+ regulator-name = "vreg_l16a_2p7";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17a_3p0: ldo17 {
+ regulator-name = "vreg_l17a_3p0";
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pm8150l-rpmh-regulators {
+ compatible = "qcom,pm8150l-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-l1-l8-supply = <&vreg_s4a_1p8>;
+ vdd-l2-l3-supply = <&vreg_s8c_1p3>;
+ vdd-l4-l5-l6-supply = <&vreg_bob>;
+ vdd-l7-l11-supply = <&vreg_bob>;
+ vdd-l9-l10-supply = <&vreg_bob>;
+ vdd-bob-supply = <&vph_pwr>;
+
+ vreg_bob: bob {
+ regulator-name = "vreg_bob";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <4000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ };
+
+ vreg_s8c_1p3: smps8 {
+ regulator-name = "vreg_s8c_1p3";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p8: ldo1 {
+ regulator-name = "vreg_l1c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_1p2: ldo2 {
+ regulator-name = "vreg_l2c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_0p92: ldo3 {
+ regulator-name = "vreg_l3c_0p92";
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4c_1p7: ldo4 {
+ regulator-name = "vreg_l4c_1p7";
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5c_1p8: ldo5 {
+ regulator-name = "vreg_l5c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6c_2p9: ldo6 {
+ regulator-name = "vreg_l6c_2p9";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7c_cam_vcm0_2p85: ldo7 {
+ regulator-name = "vreg_l7c_cam_vcm0_2p85";
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c_1p8: ldo8 {
+ regulator-name = "vreg_l8c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c_2p9: ldo9 {
+ regulator-name = "vreg_l9c_2p9";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10c_3p0: ldo10 {
+ regulator-name = "vreg_l10c_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11c_3p3: ldo11 {
+ regulator-name = "vreg_l11c_3p3";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pm8009-rpmh-regulators {
+ compatible = "qcom,pm8009-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vreg_bob>;
+ vdd-l2-supply = <&vreg_s8c_1p3>;
+ vdd-l5-l6-supply = <&vreg_bob>;
+ vdd-l7-supply = <&vreg_s4a_1p8>;
+
+ vreg_l1f_cam_dvdd1_1p1: ldo1 {
+ regulator-name = "vreg_l1f_cam_dvdd1_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f_cam_dvdd0_1p2: ldo2 {
+ regulator-name = "vreg_l2f_cam_dvdd0_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f_cam_dvdd2_1p05: ldo3 {
+ regulator-name = "vreg_l3f_cam_dvdd2_1p05";
+ regulator-min-microvolt = <1056000>;
+ regulator-max-microvolt = <1056000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5f_cam_avdd0_2p85: ldo5 {
+ regulator-name = "vreg_l5f_cam_avdd0_2p85";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6f_cam_avdd1_2p85: ldo6 {
+ regulator-name = "vreg_l6f_cam_avdd1_2p85";
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <2856000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7f_1p8: ldo7 {
+ regulator-name = "vreg_l7f_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
};
&qupv3_id_1 {
@@ -27,3 +358,23 @@
&uart2 {
status = "okay";
};
+
+&ufs_mem_hc {
+ status = "okay";
+
+ vcc-supply = <&vreg_l17a_3p0>;
+ vcc-max-microamp = <750000>;
+ vccq-supply = <&vreg_l6a_1p2>;
+ vccq-max-microamp = <700000>;
+ vccq2-supply = <&vreg_s4a_1p8>;
+ vccq2-max-microamp = <750000>;
+};
+
+&ufs_mem_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l5a_0p875>;
+ vdda-max-microamp = <90200>;
+ vdda-pll-supply = <&vreg_l9a_1p2>;
+ vdda-pll-max-microamp = <19000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 891d83b2afea..7050adba7995 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -4,7 +4,9 @@
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-sm8250.h>
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
/ {
@@ -304,6 +306,76 @@
};
};
+ ufs_mem_hc: ufs@1d84000 {
+ compatible = "qcom,sm8250-ufshc", "qcom,ufshc",
+ "jedec,ufs-2.0";
+ reg = <0 0x01d84000 0 0x3000>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&ufs_mem_phy_lanes>;
+ phy-names = "ufsphy";
+ lanes-per-direction = <2>;
+ #reset-cells = <1>;
+ resets = <&gcc GCC_UFS_PHY_BCR>;
+ reset-names = "rst";
+
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
+ clock-names =
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk",
+ "rx_lane1_sync_clk";
+ clocks =
+ <&gcc GCC_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+ freq-table-hz =
+ <37500000 300000000>,
+ <0 0>,
+ <0 0>,
+ <37500000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ status = "disabled";
+ };
+
+ ufs_mem_phy: phy@1d87000 {
+ compatible = "qcom,sm8250-qmp-ufs-phy";
+ reg = <0 0x01d87000 0 0x1c0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ clock-names = "ref",
+ "ref_aux";
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
+ status = "disabled";
+
+ ufs_mem_phy_lanes: lanes@1d87400 {
+ reg = <0 0x01d87400 0 0x108>,
+ <0 0x01d87600 0 0x1e0>,
+ <0 0x01d87c00 0 0x1dc>,
+ <0 0x01d87800 0 0x108>,
+ <0 0x01d87a00 0 0x1e0>;
+ #phy-cells = <0>;
+ };
+ };
+
intc: interrupt-controller@17a00000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
@@ -314,8 +386,8 @@
};
pdc: interrupt-controller@b220000 {
- compatible = "qcom,sm8250-pdc";
- reg = <0x0b220000 0x30000>, <0x17c000f0 0x60>;
+ compatible = "qcom,sm8250-pdc", "qcom,pdc";
+ reg = <0 0x0b220000 0 0x30000>, <0 0x17c000f0 0 0x60>;
qcom,pdc-ranges = <0 480 94>, <94 609 31>,
<125 63 1>, <126 716 12>;
#interrupt-cells = <2>;
@@ -362,6 +434,56 @@
clock-names = "xo";
clocks = <&xo_board>;
};
+
+ rpmhpd: power-controller {
+ compatible = "qcom,sm8250-rpmhpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmhpd_opp_table>;
+
+ rpmhpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmhpd_opp_ret: opp1 {
+ opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+ };
+
+ rpmhpd_opp_min_svs: opp2 {
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+
+ rpmhpd_opp_low_svs: opp3 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ rpmhpd_opp_svs: opp4 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ rpmhpd_opp_svs_l1: opp5 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ rpmhpd_opp_nom: opp6 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ rpmhpd_opp_nom_l1: opp7 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ };
+
+ rpmhpd_opp_nom_l2: opp8 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+ };
+
+ rpmhpd_opp_turbo: opp9 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ };
+
+ rpmhpd_opp_turbo_l1: opp10 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ };
+ };
+ };
};
tcsr_mutex_regs: syscon@1f40000 {
diff --git a/arch/arm64/boot/dts/realtek/Makefile b/arch/arm64/boot/dts/realtek/Makefile
index 555638ada721..ef8d8fcbaa05 100644
--- a/arch/arm64/boot/dts/realtek/Makefile
+++ b/arch/arm64/boot/dts/realtek/Makefile
@@ -4,6 +4,12 @@ dtb-$(CONFIG_ARCH_REALTEK) += rtd1293-ds418j.dtb
dtb-$(CONFIG_ARCH_REALTEK) += rtd1295-mele-v9.dtb
dtb-$(CONFIG_ARCH_REALTEK) += rtd1295-probox2-ava.dtb
+dtb-$(CONFIG_ARCH_REALTEK) += rtd1295-xnano-x5.dtb
dtb-$(CONFIG_ARCH_REALTEK) += rtd1295-zidoo-x9s.dtb
dtb-$(CONFIG_ARCH_REALTEK) += rtd1296-ds418.dtb
+
+dtb-$(CONFIG_ARCH_REALTEK) += rtd1395-bpi-m4.dtb
+dtb-$(CONFIG_ARCH_REALTEK) += rtd1395-lionskin.dtb
+
+dtb-$(CONFIG_ARCH_REALTEK) += rtd1619-mjolnir.dtb
diff --git a/arch/arm64/boot/dts/realtek/rtd1293-ds418j.dts b/arch/arm64/boot/dts/realtek/rtd1293-ds418j.dts
index b2dd583146b4..b2e44c6c2d22 100644
--- a/arch/arm64/boot/dts/realtek/rtd1293-ds418j.dts
+++ b/arch/arm64/boot/dts/realtek/rtd1293-ds418j.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
/*
- * Copyright (c) 2017 Andreas Färber
+ * Copyright (c) 2017-2019 Andreas Färber
*/
/dts-v1/;
@@ -11,9 +11,9 @@
compatible = "synology,ds418j", "realtek,rtd1293";
model = "Synology DiskStation DS418j";
- memory@0 {
+ memory@1f000 {
device_type = "memory";
- reg = <0x0 0x40000000>;
+ reg = <0x1f000 0x3ffe1000>; /* boot ROM to 1 GiB */
};
aliases {
diff --git a/arch/arm64/boot/dts/realtek/rtd1293.dtsi b/arch/arm64/boot/dts/realtek/rtd1293.dtsi
index bd4e22723f7b..2d92b56ac94d 100644
--- a/arch/arm64/boot/dts/realtek/rtd1293.dtsi
+++ b/arch/arm64/boot/dts/realtek/rtd1293.dtsi
@@ -36,16 +36,20 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>;
+ (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
};
};
&arm_pmu {
interrupt-affinity = <&cpu0>, <&cpu1>;
};
+
+&gic {
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+};
diff --git a/arch/arm64/boot/dts/realtek/rtd1295-mele-v9.dts b/arch/arm64/boot/dts/realtek/rtd1295-mele-v9.dts
index bd584e99fff9..cf4a57c012a8 100644
--- a/arch/arm64/boot/dts/realtek/rtd1295-mele-v9.dts
+++ b/arch/arm64/boot/dts/realtek/rtd1295-mele-v9.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 Andreas Färber
+ * Copyright (c) 2017-2019 Andreas Färber
*
* SPDX-License-Identifier: (GPL-2.0+ OR MIT)
*/
@@ -12,9 +12,9 @@
compatible = "mele,v9", "realtek,rtd1295";
model = "MeLE V9";
- memory@0 {
+ memory@1f000 {
device_type = "memory";
- reg = <0x0 0x80000000>;
+ reg = <0x1f000 0x7ffe1000>; /* boot ROM to 2 GiB */
};
aliases {
diff --git a/arch/arm64/boot/dts/realtek/rtd1295-probox2-ava.dts b/arch/arm64/boot/dts/realtek/rtd1295-probox2-ava.dts
index 8e2b0e75298a..14161c3f304d 100644
--- a/arch/arm64/boot/dts/realtek/rtd1295-probox2-ava.dts
+++ b/arch/arm64/boot/dts/realtek/rtd1295-probox2-ava.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 Andreas Färber
+ * Copyright (c) 2017-2019 Andreas Färber
*
* SPDX-License-Identifier: (GPL-2.0+ OR MIT)
*/
@@ -12,9 +12,9 @@
compatible = "probox2,ava", "realtek,rtd1295";
model = "PROBOX2 AVA";
- memory@0 {
+ memory@1f000 {
device_type = "memory";
- reg = <0x0 0x80000000>;
+ reg = <0x1f000 0x7ffe1000>; /* boot ROM to 2 GiB */
};
aliases {
diff --git a/arch/arm64/boot/dts/realtek/rtd1295-xnano-x5.dts b/arch/arm64/boot/dts/realtek/rtd1295-xnano-x5.dts
new file mode 100644
index 000000000000..d7878ff942e6
--- /dev/null
+++ b/arch/arm64/boot/dts/realtek/rtd1295-xnano-x5.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Copyright (c) 2017-2019 Andreas Färber
+ */
+
+/dts-v1/;
+
+#include "rtd1295.dtsi"
+
+/ {
+ compatible = "xnano,x5", "realtek,rtd1295";
+ model = "Xnano X5";
+
+ memory@1f000 {
+ device_type = "memory";
+ reg = <0x1f000 0x3ffe1000>; /* boot ROM to 1 GiB or 2 GiB */
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/realtek/rtd1295-zidoo-x9s.dts b/arch/arm64/boot/dts/realtek/rtd1295-zidoo-x9s.dts
index e98e508b9514..4beb37bb9522 100644
--- a/arch/arm64/boot/dts/realtek/rtd1295-zidoo-x9s.dts
+++ b/arch/arm64/boot/dts/realtek/rtd1295-zidoo-x9s.dts
@@ -11,9 +11,9 @@
compatible = "zidoo,x9s", "realtek,rtd1295";
model = "Zidoo X9S";
- memory@0 {
+ memory@1f000 {
device_type = "memory";
- reg = <0x0 0x80000000>;
+ reg = <0x1f000 0x7ffe1000>; /* boot ROM to 2 GiB */
};
aliases {
diff --git a/arch/arm64/boot/dts/realtek/rtd1295.dtsi b/arch/arm64/boot/dts/realtek/rtd1295.dtsi
index 93f0e1d97721..1402abe80ea1 100644
--- a/arch/arm64/boot/dts/realtek/rtd1295.dtsi
+++ b/arch/arm64/boot/dts/realtek/rtd1295.dtsi
@@ -2,7 +2,7 @@
/*
* Realtek RTD1295 SoC
*
- * Copyright (c) 2016-2017 Andreas Färber
+ * Copyright (c) 2016-2019 Andreas Färber
*/
#include "rtd129x.dtsi"
@@ -47,27 +47,16 @@
};
};
- reserved-memory {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- tee@10100000 {
- reg = <0x10100000 0xf00000>;
- no-map;
- };
- };
-
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>;
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
};
diff --git a/arch/arm64/boot/dts/realtek/rtd1296-ds418.dts b/arch/arm64/boot/dts/realtek/rtd1296-ds418.dts
index 5a051a52bf88..cc706d13da8b 100644
--- a/arch/arm64/boot/dts/realtek/rtd1296-ds418.dts
+++ b/arch/arm64/boot/dts/realtek/rtd1296-ds418.dts
@@ -11,9 +11,9 @@
compatible = "synology,ds418", "realtek,rtd1296";
model = "Synology DiskStation DS418";
- memory@0 {
+ memory@1f000 {
device_type = "memory";
- reg = <0x0 0x80000000>;
+ reg = <0x1f000 0x7ffe1000>; /* boot ROM to 2 GiB */
};
aliases {
diff --git a/arch/arm64/boot/dts/realtek/rtd1296.dtsi b/arch/arm64/boot/dts/realtek/rtd1296.dtsi
index 0f9e59cac086..fb864a139c97 100644
--- a/arch/arm64/boot/dts/realtek/rtd1296.dtsi
+++ b/arch/arm64/boot/dts/realtek/rtd1296.dtsi
@@ -50,13 +50,13 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>,
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10
- (GIC_CPU_MASK_RAW(0xf) | IRQ_TYPE_LEVEL_LOW)>;
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
};
diff --git a/arch/arm64/boot/dts/realtek/rtd129x.dtsi b/arch/arm64/boot/dts/realtek/rtd129x.dtsi
index 4433114476f5..39aefe66a794 100644
--- a/arch/arm64/boot/dts/realtek/rtd129x.dtsi
+++ b/arch/arm64/boot/dts/realtek/rtd129x.dtsi
@@ -2,14 +2,12 @@
/*
* Realtek RTD1293/RTD1295/RTD1296 SoC
*
- * Copyright (c) 2016-2017 Andreas Färber
+ * Copyright (c) 2016-2019 Andreas Färber
*/
-/memreserve/ 0x0000000000000000 0x0000000000030000;
-/memreserve/ 0x000000000001f000 0x0000000000001000;
-/memreserve/ 0x0000000000030000 0x00000000000d0000;
+/memreserve/ 0x0000000000000000 0x000000000001f000;
+/memreserve/ 0x000000000001f000 0x00000000000e1000;
/memreserve/ 0x0000000001b00000 0x00000000004be000;
-/memreserve/ 0x0000000001ffe000 0x0000000000004000;
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/reset/realtek,rtd1295.h>
@@ -19,6 +17,25 @@
#address-cells = <1>;
#size-cells = <1>;
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ rpc_comm: rpc@1f000 {
+ reg = <0x1f000 0x1000>;
+ };
+
+ rpc_ringbuf: rpc@1ffe000 {
+ reg = <0x1ffe000 0x4000>;
+ };
+
+ tee: tee@10100000 {
+ reg = <0x10100000 0xf00000>;
+ no-map;
+ };
+ };
+
arm_pmu: arm-pmu {
compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
@@ -35,73 +52,61 @@
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- /* Exclude up to 2 GiB of RAM */
- ranges = <0x80000000 0x80000000 0x80000000>;
-
- reset1: reset-controller@98000000 {
- compatible = "snps,dw-low-reset";
- reg = <0x98000000 0x4>;
- #reset-cells = <1>;
- };
-
- reset2: reset-controller@98000004 {
- compatible = "snps,dw-low-reset";
- reg = <0x98000004 0x4>;
- #reset-cells = <1>;
- };
-
- reset3: reset-controller@98000008 {
- compatible = "snps,dw-low-reset";
- reg = <0x98000008 0x4>;
- #reset-cells = <1>;
- };
-
- reset4: reset-controller@98000050 {
- compatible = "snps,dw-low-reset";
- reg = <0x98000050 0x4>;
- #reset-cells = <1>;
- };
-
- iso_reset: reset-controller@98007088 {
- compatible = "snps,dw-low-reset";
- reg = <0x98007088 0x4>;
- #reset-cells = <1>;
- };
-
- wdt: watchdog@98007680 {
- compatible = "realtek,rtd1295-watchdog";
- reg = <0x98007680 0x100>;
- clocks = <&osc27M>;
- };
-
- uart0: serial@98007800 {
- compatible = "snps,dw-apb-uart";
- reg = <0x98007800 0x400>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clock-frequency = <27000000>;
- resets = <&iso_reset RTD1295_ISO_RSTN_UR0>;
- status = "disabled";
- };
-
- uart1: serial@9801b200 {
- compatible = "snps,dw-apb-uart";
- reg = <0x9801b200 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clock-frequency = <432000000>;
- resets = <&reset2 RTD1295_RSTN_UR1>;
- status = "disabled";
- };
-
- uart2: serial@9801b400 {
- compatible = "snps,dw-apb-uart";
- reg = <0x9801b400 0x100>;
- reg-shift = <2>;
- reg-io-width = <4>;
- clock-frequency = <432000000>;
- resets = <&reset2 RTD1295_RSTN_UR2>;
- status = "disabled";
+ ranges = <0x00000000 0x00000000 0x0001f000>, /* boot ROM */
+ /* Exclude up to 2 GiB of RAM */
+ <0x80000000 0x80000000 0x80000000>;
+
+ rbus: bus@98000000 {
+ compatible = "simple-bus";
+ reg = <0x98000000 0x200000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x98000000 0x200000>;
+
+ crt: syscon@0 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x0 0x1800>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x1800>;
+ };
+
+ iso: syscon@7000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x7000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x7000 0x1000>;
+ };
+
+ sb2: syscon@1a000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1a000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1a000 0x1000>;
+ };
+
+ misc: syscon@1b000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1b000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1b000 0x1000>;
+ };
+
+ scpu_wrapper: syscon@1d000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1d000 0x2000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1d000 0x2000>;
+ };
};
gic: interrupt-controller@ff011000 {
@@ -116,3 +121,75 @@
};
};
};
+
+&crt {
+ reset1: reset-controller@0 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x0 0x4>;
+ #reset-cells = <1>;
+ };
+
+ reset2: reset-controller@4 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x4 0x4>;
+ #reset-cells = <1>;
+ };
+
+ reset3: reset-controller@8 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x8 0x4>;
+ #reset-cells = <1>;
+ };
+
+ reset4: reset-controller@50 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x50 0x4>;
+ #reset-cells = <1>;
+ };
+};
+
+&iso {
+ iso_reset: reset-controller@88 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x88 0x4>;
+ #reset-cells = <1>;
+ };
+
+ wdt: watchdog@680 {
+ compatible = "realtek,rtd1295-watchdog";
+ reg = <0x680 0x100>;
+ clocks = <&osc27M>;
+ };
+
+ uart0: serial@800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x800 0x400>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <27000000>;
+ resets = <&iso_reset RTD1295_ISO_RSTN_UR0>;
+ status = "disabled";
+ };
+};
+
+&misc {
+ uart1: serial@200 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x200 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <432000000>;
+ resets = <&reset2 RTD1295_RSTN_UR1>;
+ status = "disabled";
+ };
+
+ uart2: serial@400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x400 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <432000000>;
+ resets = <&reset2 RTD1295_RSTN_UR2>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/realtek/rtd1395-bpi-m4.dts b/arch/arm64/boot/dts/realtek/rtd1395-bpi-m4.dts
new file mode 100644
index 000000000000..9891967d1315
--- /dev/null
+++ b/arch/arm64/boot/dts/realtek/rtd1395-bpi-m4.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Copyright (c) 2019 Andreas Färber
+ */
+
+/dts-v1/;
+
+#include "rtd1395.dtsi"
+
+/ {
+ compatible = "bananapi,bpi-m4", "realtek,rtd1395";
+ model = "Banana Pi BPI-M4";
+
+ memory@2f000 {
+ device_type = "memory";
+ reg = <0x2f000 0x3ffd1000>; /* boot ROM to 1 GiB or 2 GiB */
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/realtek/rtd1395-lionskin.dts b/arch/arm64/boot/dts/realtek/rtd1395-lionskin.dts
new file mode 100644
index 000000000000..83f9b536cdea
--- /dev/null
+++ b/arch/arm64/boot/dts/realtek/rtd1395-lionskin.dts
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Copyright (c) 2019 Andreas Färber
+ */
+
+/dts-v1/;
+
+#include "rtd1395.dtsi"
+
+/ {
+ compatible = "realtek,lion-skin", "realtek,rtd1395";
+ model = "Realtek Lion Skin EVB";
+
+ memory@2f000 {
+ device_type = "memory";
+ reg = <0x2f000 0x3ffd1000>; /* boot ROM to 1 GiB or 2 GiB */
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+/* debug console (J1) */
+&uart0 {
+ status = "okay";
+};
+
+/* M.2 slot (CON1) */
+&uart1 {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/realtek/rtd1395.dtsi b/arch/arm64/boot/dts/realtek/rtd1395.dtsi
new file mode 100644
index 000000000000..05c9216a87ee
--- /dev/null
+++ b/arch/arm64/boot/dts/realtek/rtd1395.dtsi
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Realtek RTD1395 SoC
+ *
+ * Copyright (c) 2019 Andreas Färber
+ */
+
+#include "rtd139x.dtsi"
+
+/ {
+ compatible = "realtek,rtd1395";
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x0>;
+ next-level-cache = <&l2>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x1>;
+ next-level-cache = <&l2>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x2>;
+ next-level-cache = <&l2>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0 0x3>;
+ next-level-cache = <&l2>;
+ };
+
+ l2: l2-cache {
+ compatible = "cache";
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
+
+&arm_pmu {
+ interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+};
diff --git a/arch/arm64/boot/dts/realtek/rtd139x.dtsi b/arch/arm64/boot/dts/realtek/rtd139x.dtsi
new file mode 100644
index 000000000000..a3c10ceeb586
--- /dev/null
+++ b/arch/arm64/boot/dts/realtek/rtd139x.dtsi
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Realtek RTD1395 SoC family
+ *
+ * Copyright (c) 2019 Andreas Färber
+ */
+
+/memreserve/ 0x0000000000000000 0x000000000002f000;
+/memreserve/ 0x000000000002f000 0x00000000000d1000;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/realtek,rtd1295.h>
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ rpc_comm: rpc@2f000 {
+ reg = <0x2f000 0x1000>;
+ };
+
+ rpc_ringbuf: rpc@1ffe000 {
+ reg = <0x1ffe000 0x4000>;
+ };
+
+ tee: tee@10100000 {
+ reg = <0x10100000 0xf00000>;
+ no-map;
+ };
+ };
+
+ arm_pmu: arm-pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ osc27M: osc {
+ compatible = "fixed-clock";
+ clock-frequency = <27000000>;
+ #clock-cells = <0>;
+ clock-output-names = "osc27M";
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x00000000 0x0001f000>, /* boot ROM */
+ <0x98000000 0x98000000 0x68000000>;
+
+ rbus: bus@98000000 {
+ compatible = "simple-bus";
+ reg = <0x98000000 0x200000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x98000000 0x200000>;
+
+ crt: syscon@0 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x0 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x1000>;
+ };
+
+ iso: syscon@7000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x7000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x7000 0x1000>;
+ };
+
+ sb2: syscon@1a000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1a000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1a000 0x1000>;
+ };
+
+ misc: syscon@1b000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1b000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1b000 0x1000>;
+ };
+
+ scpu_wrapper: syscon@1d000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1d000 0x2000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1d000 0x2000>;
+ };
+ };
+
+ gic: interrupt-controller@ff011000 {
+ compatible = "arm,gic-400";
+ reg = <0xff011000 0x1000>,
+ <0xff012000 0x2000>,
+ <0xff014000 0x2000>,
+ <0xff016000 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+ };
+};
+
+&crt {
+ reset1: reset-controller@0 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x0 0x4>;
+ #reset-cells = <1>;
+ };
+
+ reset2: reset-controller@4 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x4 0x4>;
+ #reset-cells = <1>;
+ };
+
+ reset3: reset-controller@8 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x8 0x4>;
+ #reset-cells = <1>;
+ };
+
+ reset4: reset-controller@50 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x50 0x4>;
+ #reset-cells = <1>;
+ };
+};
+
+&iso {
+ iso_reset: reset-controller@88 {
+ compatible = "snps,dw-low-reset";
+ reg = <0x88 0x4>;
+ #reset-cells = <1>;
+ };
+
+ wdt: watchdog@680 {
+ compatible = "realtek,rtd1295-watchdog";
+ reg = <0x680 0x100>;
+ clocks = <&osc27M>;
+ };
+
+ uart0: serial@800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x800 0x400>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <27000000>;
+ resets = <&iso_reset RTD1295_ISO_RSTN_UR0>;
+ status = "disabled";
+ };
+};
+
+&misc {
+ uart1: serial@200 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x200 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <432000000>;
+ resets = <&reset2 RTD1295_RSTN_UR1>;
+ status = "disabled";
+ };
+
+ uart2: serial@400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x400 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <432000000>;
+ resets = <&reset2 RTD1295_RSTN_UR2>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/realtek/rtd1619-mjolnir.dts b/arch/arm64/boot/dts/realtek/rtd1619-mjolnir.dts
new file mode 100644
index 000000000000..90ed6681468f
--- /dev/null
+++ b/arch/arm64/boot/dts/realtek/rtd1619-mjolnir.dts
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Copyright (c) 2019 Realtek Semiconductor Corp.
+ * Copyright (c) 2019 Andreas Färber
+ */
+
+/dts-v1/;
+
+#include "rtd1619.dtsi"
+
+/ {
+ compatible = "realtek,mjolnir", "realtek,rtd1619";
+ model = "Realtek Mjolnir EVB";
+
+ memory@2e000 {
+ device_type = "memory";
+ reg = <0x2e000 0x7ffd2000>; /* boot ROM to 2 GiB */
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ };
+};
+
+/* debug console (J1) */
+&uart0 {
+ status = "okay";
+};
+
+/* M.2 slot (CON4) */
+&uart1 {
+ status = "disabled";
+};
+
+/* GPIO connector (T1) */
+&uart2 {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/realtek/rtd1619.dtsi b/arch/arm64/boot/dts/realtek/rtd1619.dtsi
new file mode 100644
index 000000000000..e52bf708b04e
--- /dev/null
+++ b/arch/arm64/boot/dts/realtek/rtd1619.dtsi
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Realtek RTD1619 SoC
+ *
+ * Copyright (c) 2019 Realtek Semiconductor Corp.
+ */
+
+#include "rtd16xx.dtsi"
+
+/ {
+ compatible = "realtek,rtd1619";
+};
diff --git a/arch/arm64/boot/dts/realtek/rtd16xx.dtsi b/arch/arm64/boot/dts/realtek/rtd16xx.dtsi
new file mode 100644
index 000000000000..afba5f04c8ec
--- /dev/null
+++ b/arch/arm64/boot/dts/realtek/rtd16xx.dtsi
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Realtek RTD16xx SoC family
+ *
+ * Copyright (c) 2019 Realtek Semiconductor Corp.
+ * Copyright (c) 2019 Andreas Färber
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ rpc_comm: rpc@2f000 {
+ reg = <0x2f000 0x1000>;
+ };
+
+ rpc_ringbuf: rpc@1ffe000 {
+ reg = <0x1ffe000 0x4000>;
+ };
+
+ tee: tee@10100000 {
+ reg = <0x10100000 0xf00000>;
+ no-map;
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x0>;
+ enable-method = "psci";
+ next-level-cache = <&l2>;
+ };
+
+ cpu1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x100>;
+ enable-method = "psci";
+ next-level-cache = <&l3>;
+ };
+
+ cpu2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x200>;
+ enable-method = "psci";
+ next-level-cache = <&l3>;
+ };
+
+ cpu3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x300>;
+ enable-method = "psci";
+ next-level-cache = <&l3>;
+ };
+
+ cpu4: cpu@400 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x400>;
+ enable-method = "psci";
+ next-level-cache = <&l3>;
+ };
+
+ cpu5: cpu@500 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a55";
+ reg = <0x500>;
+ enable-method = "psci";
+ next-level-cache = <&l3>;
+ };
+
+ l2: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&l3>;
+
+ };
+
+ l3: l3-cache {
+ compatible = "cache";
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ arm_pmu: pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>,
+ <&cpu3>, <&cpu4>, <&cpu5>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ osc27M: osc {
+ compatible = "fixed-clock";
+ clock-frequency = <27000000>;
+ clock-output-names = "osc27M";
+ #clock-cells = <0>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x00000000 0x0002e000>, /* boot ROM */
+ <0x98000000 0x98000000 0x68000000>;
+
+ rbus: bus@98000000 {
+ compatible = "simple-bus";
+ reg = <0x98000000 0x200000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x98000000 0x200000>;
+
+ crt: syscon@0 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x0 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x1000>;
+ };
+
+ iso: syscon@7000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x7000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x7000 0x1000>;
+ };
+
+ sb2: syscon@1a000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1a000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1a000 0x1000>;
+ };
+
+ misc: syscon@1b000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1b000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1b000 0x1000>;
+ };
+
+ scpu_wrapper: syscon@1d000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x1d000 0x1000>;
+ reg-io-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x1d000 0x1000>;
+ };
+ };
+
+ gic: interrupt-controller@ff100000 {
+ compatible = "arm,gic-v3";
+ reg = <0xff100000 0x10000>,
+ <0xff140000 0xc0000>;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+ };
+};
+
+&iso {
+ uart0: serial0@800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x800 0x400>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <27000000>;
+ status = "disabled";
+ };
+};
+
+&misc {
+ uart1: serial1@200 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x200 0x400>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <432000000>;
+ status = "disabled";
+ };
+
+ uart2: serial2@400 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x400 0x400>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <432000000>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index a7ec7a7065d5..d17351cdbce0 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -5,7 +5,8 @@ dtb-$(CONFIG_ARCH_R8A774A1) += r8a774a1-hihope-rzg2m-ex-idk-1110wr.dtb
dtb-$(CONFIG_ARCH_R8A774B1) += r8a774b1-hihope-rzg2n.dtb
dtb-$(CONFIG_ARCH_R8A774B1) += r8a774b1-hihope-rzg2n-ex.dtb
dtb-$(CONFIG_ARCH_R8A774C0) += r8a774c0-cat874.dtb r8a774c0-ek874.dtb \
- r8a774c0-ek874-idk-2121wr.dtb
+ r8a774c0-ek874-idk-2121wr.dtb \
+ r8a774c0-ek874-mipi-2.1.dtb
dtb-$(CONFIG_ARCH_R8A77950) += r8a77950-salvator-x.dtb
dtb-$(CONFIG_ARCH_R8A77950) += r8a77950-ulcb.dtb r8a77950-ulcb-kf.dtb
dtb-$(CONFIG_ARCH_R8A77951) += r8a77951-salvator-x.dtb r8a77951-salvator-xs.dtb
diff --git a/arch/arm64/boot/dts/renesas/aistarvision-mipi-adapter-2.1.dtsi b/arch/arm64/boot/dts/renesas/aistarvision-mipi-adapter-2.1.dtsi
new file mode 100644
index 000000000000..dac6ff49020f
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/aistarvision-mipi-adapter-2.1.dtsi
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the AISTARVISION MIPI Adapter V2.1
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+/ {
+ ov5645_vdddo_1v8: 1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "camera_vdddo";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ ov5645_vdda_2v8: 2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "camera_vdda";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+
+ ov5645_vddd_1v5: 1p5v {
+ compatible = "regulator-fixed";
+ regulator-name = "camera_vddd";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ };
+
+ imx219_vana_2v8: 2p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "camera_vana";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ };
+
+ imx219_vdig_1v8: 1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "camera_vdig";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ };
+
+ imx219_vddl_1v2: 1p2v {
+ compatible = "regulator-fixed";
+ regulator-name = "camera_vddl";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+
+ osc25250_clk: osc25250_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+};
+
+&MIPI_PARENT_I2C {
+ ov5645: ov5645@3c {
+ compatible = "ovti,ov5645";
+ reg = <0x3c>;
+ clock-names = "xclk";
+ clocks = <&osc25250_clk>;
+ clock-frequency = <24000000>;
+ vdddo-supply = <&ov5645_vdddo_1v8>;
+ vdda-supply = <&ov5645_vdda_2v8>;
+ vddd-supply = <&ov5645_vddd_1v5>;
+
+ port {
+ ov5645_ep: endpoint {
+ };
+ };
+ };
+
+ imx219: imx219@10 {
+ compatible = "sony,imx219";
+ reg = <0x10>;
+ clocks = <&osc25250_clk>;
+ VANA-supply = <&imx219_vana_2v8>;
+ VDIG-supply = <&imx219_vdig_1v8>;
+ VDDL-supply = <&imx219_vddl_1v2>;
+
+ port {
+ imx219_ep: endpoint {
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index 79023433a740..a603d947970e 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -1000,7 +1000,7 @@
<&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
};
- ipmmu_ds0: mmu@e6740000 {
+ ipmmu_ds0: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a774a1";
reg = <0 0xe6740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 0>;
@@ -1008,7 +1008,7 @@
#iommu-cells = <1>;
};
- ipmmu_ds1: mmu@e7740000 {
+ ipmmu_ds1: iommu@e7740000 {
compatible = "renesas,ipmmu-r8a774a1";
reg = <0 0xe7740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 1>;
@@ -1016,7 +1016,7 @@
#iommu-cells = <1>;
};
- ipmmu_hc: mmu@e6570000 {
+ ipmmu_hc: iommu@e6570000 {
compatible = "renesas,ipmmu-r8a774a1";
reg = <0 0xe6570000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 2>;
@@ -1024,7 +1024,7 @@
#iommu-cells = <1>;
};
- ipmmu_mm: mmu@e67b0000 {
+ ipmmu_mm: iommu@e67b0000 {
compatible = "renesas,ipmmu-r8a774a1";
reg = <0 0xe67b0000 0 0x1000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
@@ -1033,7 +1033,7 @@
#iommu-cells = <1>;
};
- ipmmu_mp: mmu@ec670000 {
+ ipmmu_mp: iommu@ec670000 {
compatible = "renesas,ipmmu-r8a774a1";
reg = <0 0xec670000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 4>;
@@ -1041,7 +1041,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv0: mmu@fd800000 {
+ ipmmu_pv0: iommu@fd800000 {
compatible = "renesas,ipmmu-r8a774a1";
reg = <0 0xfd800000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 5>;
@@ -1049,7 +1049,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv1: mmu@fd950000 {
+ ipmmu_pv1: iommu@fd950000 {
compatible = "renesas,ipmmu-r8a774a1";
reg = <0 0xfd950000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 6>;
@@ -1057,7 +1057,7 @@
#iommu-cells = <1>;
};
- ipmmu_vc0: mmu@fe6b0000 {
+ ipmmu_vc0: iommu@fe6b0000 {
compatible = "renesas,ipmmu-r8a774a1";
reg = <0 0xfe6b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 8>;
@@ -1065,7 +1065,7 @@
#iommu-cells = <1>;
};
- ipmmu_vi0: mmu@febd0000 {
+ ipmmu_vi0: iommu@febd0000 {
compatible = "renesas,ipmmu-r8a774a1";
reg = <0 0xfebd0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 9>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index 3137f735974b..1e51855c7cd3 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -874,7 +874,7 @@
<&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
};
- ipmmu_ds0: mmu@e6740000 {
+ ipmmu_ds0: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a774b1";
reg = <0 0xe6740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 0>;
@@ -882,7 +882,7 @@
#iommu-cells = <1>;
};
- ipmmu_ds1: mmu@e7740000 {
+ ipmmu_ds1: iommu@e7740000 {
compatible = "renesas,ipmmu-r8a774b1";
reg = <0 0xe7740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 1>;
@@ -890,7 +890,7 @@
#iommu-cells = <1>;
};
- ipmmu_hc: mmu@e6570000 {
+ ipmmu_hc: iommu@e6570000 {
compatible = "renesas,ipmmu-r8a774b1";
reg = <0 0xe6570000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 2>;
@@ -898,7 +898,7 @@
#iommu-cells = <1>;
};
- ipmmu_mm: mmu@e67b0000 {
+ ipmmu_mm: iommu@e67b0000 {
compatible = "renesas,ipmmu-r8a774b1";
reg = <0 0xe67b0000 0 0x1000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
@@ -907,7 +907,7 @@
#iommu-cells = <1>;
};
- ipmmu_mp: mmu@ec670000 {
+ ipmmu_mp: iommu@ec670000 {
compatible = "renesas,ipmmu-r8a774b1";
reg = <0 0xec670000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 4>;
@@ -915,7 +915,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv0: mmu@fd800000 {
+ ipmmu_pv0: iommu@fd800000 {
compatible = "renesas,ipmmu-r8a774b1";
reg = <0 0xfd800000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 6>;
@@ -923,7 +923,7 @@
#iommu-cells = <1>;
};
- ipmmu_vc0: mmu@fe6b0000 {
+ ipmmu_vc0: iommu@fe6b0000 {
compatible = "renesas,ipmmu-r8a774b1";
reg = <0 0xfe6b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 12>;
@@ -931,7 +931,7 @@
#iommu-cells = <1>;
};
- ipmmu_vi0: mmu@febd0000 {
+ ipmmu_vi0: iommu@febd0000 {
compatible = "renesas,ipmmu-r8a774b1";
reg = <0 0xfebd0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 14>;
@@ -939,7 +939,7 @@
#iommu-cells = <1>;
};
- ipmmu_vp0: mmu@fe990000 {
+ ipmmu_vp0: iommu@fe990000 {
compatible = "renesas,ipmmu-r8a774b1";
reg = <0 0xfe990000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 16>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
new file mode 100644
index 000000000000..f0829e905506
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Silicon Linux RZ/G2E 96board platform (CAT874)
+ * connected with aistarvision-mipi-v2-adapter board
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+#include "r8a774c0-ek874.dts"
+#define MIPI_PARENT_I2C i2c3
+#include "aistarvision-mipi-adapter-2.1.dtsi"
+
+/ {
+ model = "Silicon Linux RZ/G2E evaluation kit EK874 (CAT874 + CAT875) with aistarvision-mipi-v2-adapter board";
+ compatible = "si-linux,cat875", "si-linux,cat874", "renesas,r8a774c0";
+};
+
+&i2c3 {
+ status = "okay";
+};
+
+&vin4 {
+ status = "okay";
+};
+
+&vin5 {
+ status = "okay";
+};
+
+&csi40 {
+ status = "okay";
+
+ ports {
+ port {
+ csi40_in: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ remote-endpoint = <&ov5645_ep>;
+ };
+ };
+ };
+};
+
+&ov5645 {
+ enable-gpios = <&gpio5 5 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio5 3 GPIO_ACTIVE_LOW>;
+
+ port {
+ ov5645_ep: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ remote-endpoint = <&csi40_in>;
+ };
+ };
+};
+
+&imx219 {
+ port {
+ imx219_ep: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ link-frequencies = /bits/ 64 <456000000>;
+ /* uncomment remote-endpoint property to tie imx219 to
+ * CSI2 also make sure remote-endpoint for ov5645 camera
+ * is commented and remote endpoint phandle in csi40_in
+ * is imx219_ep
+ */
+ /* remote-endpoint = <&csi40_in>; */
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 22785cbddff5..5c72a7efbb03 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -847,7 +847,7 @@
<&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
};
- ipmmu_ds0: mmu@e6740000 {
+ ipmmu_ds0: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a774c0";
reg = <0 0xe6740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 0>;
@@ -855,7 +855,7 @@
#iommu-cells = <1>;
};
- ipmmu_ds1: mmu@e7740000 {
+ ipmmu_ds1: iommu@e7740000 {
compatible = "renesas,ipmmu-r8a774c0";
reg = <0 0xe7740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 1>;
@@ -863,7 +863,7 @@
#iommu-cells = <1>;
};
- ipmmu_hc: mmu@e6570000 {
+ ipmmu_hc: iommu@e6570000 {
compatible = "renesas,ipmmu-r8a774c0";
reg = <0 0xe6570000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 2>;
@@ -871,7 +871,7 @@
#iommu-cells = <1>;
};
- ipmmu_mm: mmu@e67b0000 {
+ ipmmu_mm: iommu@e67b0000 {
compatible = "renesas,ipmmu-r8a774c0";
reg = <0 0xe67b0000 0 0x1000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
@@ -880,7 +880,7 @@
#iommu-cells = <1>;
};
- ipmmu_mp: mmu@ec670000 {
+ ipmmu_mp: iommu@ec670000 {
compatible = "renesas,ipmmu-r8a774c0";
reg = <0 0xec670000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 4>;
@@ -888,7 +888,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv0: mmu@fd800000 {
+ ipmmu_pv0: iommu@fd800000 {
compatible = "renesas,ipmmu-r8a774c0";
reg = <0 0xfd800000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 6>;
@@ -896,7 +896,7 @@
#iommu-cells = <1>;
};
- ipmmu_vc0: mmu@fe6b0000 {
+ ipmmu_vc0: iommu@fe6b0000 {
compatible = "renesas,ipmmu-r8a774c0";
reg = <0 0xfe6b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 12>;
@@ -904,7 +904,7 @@
#iommu-cells = <1>;
};
- ipmmu_vi0: mmu@febd0000 {
+ ipmmu_vi0: iommu@febd0000 {
compatible = "renesas,ipmmu-r8a774c0";
reg = <0 0xfebd0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 14>;
@@ -912,7 +912,7 @@
#iommu-cells = <1>;
};
- ipmmu_vp0: mmu@fe990000 {
+ ipmmu_vp0: iommu@fe990000 {
compatible = "renesas,ipmmu-r8a774c0";
reg = <0 0xfe990000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 16>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77950.dtsi b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
index 3975eecd50c4..d716c4386ae9 100644
--- a/arch/arm64/boot/dts/renesas/r8a77950.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
@@ -77,7 +77,7 @@
/delete-node/ dma-controller@e6460000;
/delete-node/ dma-controller@e6470000;
- ipmmu_mp1: mmu@ec680000 {
+ ipmmu_mp1: iommu@ec680000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xec680000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 5>;
@@ -85,7 +85,7 @@
#iommu-cells = <1>;
};
- ipmmu_sy: mmu@e7730000 {
+ ipmmu_sy: iommu@e7730000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xe7730000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 8>;
@@ -93,11 +93,11 @@
#iommu-cells = <1>;
};
- /delete-node/ mmu@fd950000;
- /delete-node/ mmu@fd960000;
- /delete-node/ mmu@fd970000;
- /delete-node/ mmu@febe0000;
- /delete-node/ mmu@fe980000;
+ /delete-node/ iommu@fd950000;
+ /delete-node/ iommu@fd960000;
+ /delete-node/ iommu@fd970000;
+ /delete-node/ iommu@febe0000;
+ /delete-node/ iommu@fe980000;
xhci1: usb@ee040000 {
compatible = "renesas,xhci-r8a7795", "renesas,rcar-gen3-xhci";
diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
index 52229546454c..61d67d9714ab 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
@@ -1073,7 +1073,7 @@
<&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
};
- ipmmu_ds0: mmu@e6740000 {
+ ipmmu_ds0: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xe6740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 0>;
@@ -1081,7 +1081,7 @@
#iommu-cells = <1>;
};
- ipmmu_ds1: mmu@e7740000 {
+ ipmmu_ds1: iommu@e7740000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xe7740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 1>;
@@ -1089,7 +1089,7 @@
#iommu-cells = <1>;
};
- ipmmu_hc: mmu@e6570000 {
+ ipmmu_hc: iommu@e6570000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xe6570000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 2>;
@@ -1097,7 +1097,7 @@
#iommu-cells = <1>;
};
- ipmmu_ir: mmu@ff8b0000 {
+ ipmmu_ir: iommu@ff8b0000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xff8b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 3>;
@@ -1105,7 +1105,7 @@
#iommu-cells = <1>;
};
- ipmmu_mm: mmu@e67b0000 {
+ ipmmu_mm: iommu@e67b0000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xe67b0000 0 0x1000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
@@ -1114,7 +1114,7 @@
#iommu-cells = <1>;
};
- ipmmu_mp0: mmu@ec670000 {
+ ipmmu_mp0: iommu@ec670000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xec670000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 4>;
@@ -1122,7 +1122,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv0: mmu@fd800000 {
+ ipmmu_pv0: iommu@fd800000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xfd800000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 6>;
@@ -1130,7 +1130,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv1: mmu@fd950000 {
+ ipmmu_pv1: iommu@fd950000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xfd950000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 7>;
@@ -1138,7 +1138,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv2: mmu@fd960000 {
+ ipmmu_pv2: iommu@fd960000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xfd960000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 8>;
@@ -1146,7 +1146,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv3: mmu@fd970000 {
+ ipmmu_pv3: iommu@fd970000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xfd970000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 9>;
@@ -1154,7 +1154,7 @@
#iommu-cells = <1>;
};
- ipmmu_rt: mmu@ffc80000 {
+ ipmmu_rt: iommu@ffc80000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xffc80000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 10>;
@@ -1162,7 +1162,7 @@
#iommu-cells = <1>;
};
- ipmmu_vc0: mmu@fe6b0000 {
+ ipmmu_vc0: iommu@fe6b0000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xfe6b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 12>;
@@ -1170,7 +1170,7 @@
#iommu-cells = <1>;
};
- ipmmu_vc1: mmu@fe6f0000 {
+ ipmmu_vc1: iommu@fe6f0000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xfe6f0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 13>;
@@ -1178,7 +1178,7 @@
#iommu-cells = <1>;
};
- ipmmu_vi0: mmu@febd0000 {
+ ipmmu_vi0: iommu@febd0000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xfebd0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 14>;
@@ -1186,7 +1186,7 @@
#iommu-cells = <1>;
};
- ipmmu_vi1: mmu@febe0000 {
+ ipmmu_vi1: iommu@febe0000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xfebe0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 15>;
@@ -1194,7 +1194,7 @@
#iommu-cells = <1>;
};
- ipmmu_vp0: mmu@fe990000 {
+ ipmmu_vp0: iommu@fe990000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xfe990000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 16>;
@@ -1202,7 +1202,7 @@
#iommu-cells = <1>;
};
- ipmmu_vp1: mmu@fe980000 {
+ ipmmu_vp1: iommu@fe980000 {
compatible = "renesas,ipmmu-r8a7795";
reg = <0 0xfe980000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 17>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
index 31282367d3ac..33bf62acffbb 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -997,7 +997,7 @@
<&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
};
- ipmmu_ds0: mmu@e6740000 {
+ ipmmu_ds0: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a7796";
reg = <0 0xe6740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 0>;
@@ -1005,7 +1005,7 @@
#iommu-cells = <1>;
};
- ipmmu_ds1: mmu@e7740000 {
+ ipmmu_ds1: iommu@e7740000 {
compatible = "renesas,ipmmu-r8a7796";
reg = <0 0xe7740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 1>;
@@ -1013,7 +1013,7 @@
#iommu-cells = <1>;
};
- ipmmu_hc: mmu@e6570000 {
+ ipmmu_hc: iommu@e6570000 {
compatible = "renesas,ipmmu-r8a7796";
reg = <0 0xe6570000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 2>;
@@ -1021,7 +1021,7 @@
#iommu-cells = <1>;
};
- ipmmu_ir: mmu@ff8b0000 {
+ ipmmu_ir: iommu@ff8b0000 {
compatible = "renesas,ipmmu-r8a7796";
reg = <0 0xff8b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 3>;
@@ -1029,7 +1029,7 @@
#iommu-cells = <1>;
};
- ipmmu_mm: mmu@e67b0000 {
+ ipmmu_mm: iommu@e67b0000 {
compatible = "renesas,ipmmu-r8a7796";
reg = <0 0xe67b0000 0 0x1000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
@@ -1038,7 +1038,7 @@
#iommu-cells = <1>;
};
- ipmmu_mp: mmu@ec670000 {
+ ipmmu_mp: iommu@ec670000 {
compatible = "renesas,ipmmu-r8a7796";
reg = <0 0xec670000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 4>;
@@ -1046,7 +1046,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv0: mmu@fd800000 {
+ ipmmu_pv0: iommu@fd800000 {
compatible = "renesas,ipmmu-r8a7796";
reg = <0 0xfd800000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 5>;
@@ -1054,7 +1054,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv1: mmu@fd950000 {
+ ipmmu_pv1: iommu@fd950000 {
compatible = "renesas,ipmmu-r8a7796";
reg = <0 0xfd950000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 6>;
@@ -1062,7 +1062,7 @@
#iommu-cells = <1>;
};
- ipmmu_rt: mmu@ffc80000 {
+ ipmmu_rt: iommu@ffc80000 {
compatible = "renesas,ipmmu-r8a7796";
reg = <0 0xffc80000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 7>;
@@ -1070,7 +1070,7 @@
#iommu-cells = <1>;
};
- ipmmu_vc0: mmu@fe6b0000 {
+ ipmmu_vc0: iommu@fe6b0000 {
compatible = "renesas,ipmmu-r8a7796";
reg = <0 0xfe6b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 8>;
@@ -1078,7 +1078,7 @@
#iommu-cells = <1>;
};
- ipmmu_vi0: mmu@febd0000 {
+ ipmmu_vi0: iommu@febd0000 {
compatible = "renesas,ipmmu-r8a7796";
reg = <0 0xfebd0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 9>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index 0d96f2d3492b..760e738b75b3 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -626,21 +626,150 @@
status = "disabled";
};
+ hscif0: serial@e6540000 {
+ compatible = "renesas,hscif-r8a77961",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe6540000 0 0x60>;
+ interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 520>,
+ <&cpg CPG_CORE R8A77961_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x31>, <&dmac1 0x30>,
+ <&dmac2 0x31>, <&dmac2 0x30>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 520>;
+ status = "disabled";
+ };
hscif1: serial@e6550000 {
+ compatible = "renesas,hscif-r8a77961",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
reg = <0 0xe6550000 0 0x60>;
- /* placeholder */
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 519>,
+ <&cpg CPG_CORE R8A77961_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x33>, <&dmac1 0x32>,
+ <&dmac2 0x33>, <&dmac2 0x32>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 519>;
+ status = "disabled";
+ };
+
+ hscif2: serial@e6560000 {
+ compatible = "renesas,hscif-r8a77961",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe6560000 0 0x60>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 518>,
+ <&cpg CPG_CORE R8A77961_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x35>, <&dmac1 0x34>,
+ <&dmac2 0x35>, <&dmac2 0x34>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 518>;
+ status = "disabled";
+ };
+
+ hscif3: serial@e66a0000 {
+ compatible = "renesas,hscif-r8a77961",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe66a0000 0 0x60>;
+ interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 517>,
+ <&cpg CPG_CORE R8A77961_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x37>, <&dmac0 0x36>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 517>;
+ status = "disabled";
+ };
+
+ hscif4: serial@e66b0000 {
+ compatible = "renesas,hscif-r8a77961",
+ "renesas,rcar-gen3-hscif",
+ "renesas,hscif";
+ reg = <0 0xe66b0000 0 0x60>;
+ interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 516>,
+ <&cpg CPG_CORE R8A77961_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x39>, <&dmac0 0x38>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 516>;
+ status = "disabled";
};
hsusb: usb@e6590000 {
+ compatible = "renesas,usbhs-r8a77961",
+ "renesas,rcar-gen3-usbhs";
reg = <0 0xe6590000 0 0x200>;
- /* placeholder */
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 704>, <&cpg CPG_MOD 703>;
+ dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
+ <&usb_dmac1 0>, <&usb_dmac1 1>;
+ dma-names = "ch0", "ch1", "ch2", "ch3";
+ renesas,buswait = <11>;
+ phys = <&usb2_phy0 3>;
+ phy-names = "usb";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 704>, <&cpg 703>;
+ status = "disabled";
+ };
+
+ usb_dmac0: dma-controller@e65a0000 {
+ compatible = "renesas,r8a77961-usb-dmac",
+ "renesas,usb-dmac";
+ reg = <0 0xe65a0000 0 0x100>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1";
+ clocks = <&cpg CPG_MOD 330>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 330>;
+ #dma-cells = <1>;
+ dma-channels = <2>;
+ };
+
+ usb_dmac1: dma-controller@e65b0000 {
+ compatible = "renesas,r8a77961-usb-dmac",
+ "renesas,usb-dmac";
+ reg = <0 0xe65b0000 0 0x100>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1";
+ clocks = <&cpg CPG_MOD 331>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 331>;
+ #dma-cells = <1>;
+ dma-channels = <2>;
};
usb3_phy0: usb-phy@e65ee000 {
+ compatible = "renesas,r8a77961-usb3-phy",
+ "renesas,rcar-gen3-usb3-phy";
reg = <0 0xe65ee000 0 0x90>;
+ clocks = <&cpg CPG_MOD 328>, <&usb3s0_clk>,
+ <&usb_extal_clk>;
+ clock-names = "usb3-if", "usb3s_clk", "usb_extal";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 328>;
#phy-cells = <0>;
- /* placeholder */
+ status = "disabled";
};
arm_cc630p: crypto@e6601000 {
@@ -799,15 +928,108 @@
status = "disabled";
};
+ pwm0: pwm@e6e30000 {
+ compatible = "renesas,pwm-r8a77961", "renesas,pwm-rcar";
+ reg = <0 0xe6e30000 0 8>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 523>;
+ resets = <&cpg 523>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
pwm1: pwm@e6e31000 {
+ compatible = "renesas,pwm-r8a77961", "renesas,pwm-rcar";
reg = <0 0xe6e31000 0 8>;
#pwm-cells = <2>;
- /* placeholder */
+ clocks = <&cpg CPG_MOD 523>;
+ resets = <&cpg 523>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@e6e32000 {
+ compatible = "renesas,pwm-r8a77961", "renesas,pwm-rcar";
+ reg = <0 0xe6e32000 0 8>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 523>;
+ resets = <&cpg 523>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@e6e33000 {
+ compatible = "renesas,pwm-r8a77961", "renesas,pwm-rcar";
+ reg = <0 0xe6e33000 0 8>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 523>;
+ resets = <&cpg 523>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@e6e34000 {
+ compatible = "renesas,pwm-r8a77961", "renesas,pwm-rcar";
+ reg = <0 0xe6e34000 0 8>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 523>;
+ resets = <&cpg 523>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ pwm5: pwm@e6e35000 {
+ compatible = "renesas,pwm-r8a77961", "renesas,pwm-rcar";
+ reg = <0 0xe6e35000 0 8>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 523>;
+ resets = <&cpg 523>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ pwm6: pwm@e6e36000 {
+ compatible = "renesas,pwm-r8a77961", "renesas,pwm-rcar";
+ reg = <0 0xe6e36000 0 8>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 523>;
+ resets = <&cpg 523>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ status = "disabled";
+ };
+
+ scif0: serial@e6e60000 {
+ compatible = "renesas,scif-r8a77961",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6e60000 0 64>;
+ interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 207>,
+ <&cpg CPG_CORE R8A77961_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x51>, <&dmac1 0x50>,
+ <&dmac2 0x51>, <&dmac2 0x50>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 207>;
+ status = "disabled";
};
scif1: serial@e6e68000 {
+ compatible = "renesas,scif-r8a77961",
+ "renesas,rcar-gen3-scif", "renesas,scif";
reg = <0 0xe6e68000 0 64>;
- /* placeholder */
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 206>,
+ <&cpg CPG_CORE R8A77961_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x53>, <&dmac1 0x52>,
+ <&dmac2 0x53>, <&dmac2 0x52>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 206>;
+ status = "disabled";
};
scif2: serial@e6e88000 {
@@ -819,11 +1041,63 @@
<&cpg CPG_CORE R8A77961_CLK_S3D1>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+ <&dmac2 0x13>, <&dmac2 0x12>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";
};
+ scif3: serial@e6c50000 {
+ compatible = "renesas,scif-r8a77961",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6c50000 0 64>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 204>,
+ <&cpg CPG_CORE R8A77961_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x57>, <&dmac0 0x56>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 204>;
+ status = "disabled";
+ };
+
+ scif4: serial@e6c40000 {
+ compatible = "renesas,scif-r8a77961",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6c40000 0 64>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 203>,
+ <&cpg CPG_CORE R8A77961_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x59>, <&dmac0 0x58>;
+ dma-names = "tx", "rx";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 203>;
+ status = "disabled";
+ };
+
+ scif5: serial@e6f30000 {
+ compatible = "renesas,scif-r8a77961",
+ "renesas,rcar-gen3-scif", "renesas,scif";
+ reg = <0 0xe6f30000 0 64>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 202>,
+ <&cpg CPG_CORE R8A77961_CLK_S3D1>,
+ <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
+ <&dmac2 0x5b>, <&dmac2 0x5a>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 202>;
+ status = "disabled";
+ };
+
vin0: video@e6ef0000 {
reg = <0 0xe6ef0000 0 0x1000>;
/* placeholder */
@@ -889,43 +1163,98 @@
};
xhci0: usb@ee000000 {
+ compatible = "renesas,xhci-r8a77961",
+ "renesas,rcar-gen3-xhci";
reg = <0 0xee000000 0 0xc00>;
- /* placeholder */
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 328>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 328>;
+ status = "disabled";
};
usb3_peri0: usb@ee020000 {
+ compatible = "renesas,r8a77961-usb3-peri",
+ "renesas,rcar-gen3-usb3-peri";
reg = <0 0xee020000 0 0x400>;
- /* placeholder */
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 328>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 328>;
+ status = "disabled";
};
ohci0: usb@ee080000 {
+ compatible = "generic-ohci";
reg = <0 0xee080000 0 0x100>;
- /* placeholder */
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+ phys = <&usb2_phy0 1>;
+ phy-names = "usb";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 703>, <&cpg 704>;
+ status = "disabled";
};
ohci1: usb@ee0a0000 {
+ compatible = "generic-ohci";
reg = <0 0xee0a0000 0 0x100>;
- /* placeholder */
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 702>;
+ phys = <&usb2_phy1 1>;
+ phy-names = "usb";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 702>;
+ status = "disabled";
};
ehci0: usb@ee080100 {
+ compatible = "generic-ehci";
reg = <0 0xee080100 0 0x100>;
- /* placeholder */
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+ phys = <&usb2_phy0 2>;
+ phy-names = "usb";
+ companion = <&ohci0>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 703>, <&cpg 704>;
+ status = "disabled";
};
ehci1: usb@ee0a0100 {
+ compatible = "generic-ehci";
reg = <0 0xee0a0100 0 0x100>;
- /* placeholder */
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 702>;
+ phys = <&usb2_phy1 2>;
+ phy-names = "usb";
+ companion = <&ohci1>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 702>;
+ status = "disabled";
};
usb2_phy0: usb-phy@ee080200 {
+ compatible = "renesas,usb2-phy-r8a77961",
+ "renesas,rcar-gen3-usb2-phy";
reg = <0 0xee080200 0 0x700>;
- /* placeholder */
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 703>, <&cpg 704>;
+ #phy-cells = <1>;
+ status = "disabled";
};
usb2_phy1: usb-phy@ee0a0200 {
+ compatible = "renesas,usb2-phy-r8a77961",
+ "renesas,rcar-gen3-usb2-phy";
reg = <0 0xee0a0200 0 0x700>;
- /* placeholder */
+ clocks = <&cpg CPG_MOD 702>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 702>;
+ #phy-cells = <1>;
+ status = "disabled";
};
sdhi0: sd@ee100000 {
@@ -994,13 +1323,57 @@
};
pciec0: pcie@fe000000 {
+ compatible = "renesas,pcie-r8a77961",
+ "renesas,pcie-rcar-gen3";
reg = <0 0xfe000000 0 0x80000>;
- /* placeholder */
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0xff>;
+ device_type = "pci";
+ ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000>,
+ <0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000>,
+ <0x02000000 0 0x30000000 0 0x30000000 0 0x08000000>,
+ <0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>;
+ /* Map all possible DDR as inbound ranges */
+ dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>;
+ clock-names = "pcie", "pcie_bus";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 319>;
+ status = "disabled";
};
pciec1: pcie@ee800000 {
+ compatible = "renesas,pcie-r8a77961",
+ "renesas,pcie-rcar-gen3";
reg = <0 0xee800000 0 0x80000>;
- /* placeholder */
+ #address-cells = <3>;
+ #size-cells = <2>;
+ bus-range = <0x00 0xff>;
+ device_type = "pci";
+ ranges = <0x01000000 0 0x00000000 0 0xee900000 0 0x00100000>,
+ <0x02000000 0 0xeea00000 0 0xeea00000 0 0x00200000>,
+ <0x02000000 0 0xc0000000 0 0xc0000000 0 0x08000000>,
+ <0x42000000 0 0xc8000000 0 0xc8000000 0 0x08000000>;
+ /* Map all possible DDR as inbound ranges */
+ dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>;
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 318>, <&pcie_bus_clk>;
+ clock-names = "pcie", "pcie_bus";
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 318>;
+ status = "disabled";
};
csi20: csi2@fea80000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index d82dd4e67b62..6f7ab39fd282 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -867,7 +867,7 @@
<&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
};
- ipmmu_ds0: mmu@e6740000 {
+ ipmmu_ds0: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a77965";
reg = <0 0xe6740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 0>;
@@ -875,7 +875,7 @@
#iommu-cells = <1>;
};
- ipmmu_ds1: mmu@e7740000 {
+ ipmmu_ds1: iommu@e7740000 {
compatible = "renesas,ipmmu-r8a77965";
reg = <0 0xe7740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 1>;
@@ -883,7 +883,7 @@
#iommu-cells = <1>;
};
- ipmmu_hc: mmu@e6570000 {
+ ipmmu_hc: iommu@e6570000 {
compatible = "renesas,ipmmu-r8a77965";
reg = <0 0xe6570000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 2>;
@@ -891,7 +891,7 @@
#iommu-cells = <1>;
};
- ipmmu_mm: mmu@e67b0000 {
+ ipmmu_mm: iommu@e67b0000 {
compatible = "renesas,ipmmu-r8a77965";
reg = <0 0xe67b0000 0 0x1000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
@@ -900,7 +900,7 @@
#iommu-cells = <1>;
};
- ipmmu_mp: mmu@ec670000 {
+ ipmmu_mp: iommu@ec670000 {
compatible = "renesas,ipmmu-r8a77965";
reg = <0 0xec670000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 4>;
@@ -908,7 +908,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv0: mmu@fd800000 {
+ ipmmu_pv0: iommu@fd800000 {
compatible = "renesas,ipmmu-r8a77965";
reg = <0 0xfd800000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 6>;
@@ -916,7 +916,7 @@
#iommu-cells = <1>;
};
- ipmmu_rt: mmu@ffc80000 {
+ ipmmu_rt: iommu@ffc80000 {
compatible = "renesas,ipmmu-r8a77965";
reg = <0 0xffc80000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 10>;
@@ -924,7 +924,7 @@
#iommu-cells = <1>;
};
- ipmmu_vc0: mmu@fe6b0000 {
+ ipmmu_vc0: iommu@fe6b0000 {
compatible = "renesas,ipmmu-r8a77965";
reg = <0 0xfe6b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 12>;
@@ -932,7 +932,7 @@
#iommu-cells = <1>;
};
- ipmmu_vi0: mmu@febd0000 {
+ ipmmu_vi0: iommu@febd0000 {
compatible = "renesas,ipmmu-r8a77965";
reg = <0 0xfebd0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 14>;
@@ -940,7 +940,7 @@
#iommu-cells = <1>;
};
- ipmmu_vp0: mmu@fe990000 {
+ ipmmu_vp0: iommu@fe990000 {
compatible = "renesas,ipmmu-r8a77965";
reg = <0 0xfe990000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 16>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index a009c0ebc8b4..bd95ecb1b40d 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -985,7 +985,7 @@
<&ipmmu_ds1 22>, <&ipmmu_ds1 23>;
};
- ipmmu_ds1: mmu@e7740000 {
+ ipmmu_ds1: iommu@e7740000 {
compatible = "renesas,ipmmu-r8a77970";
reg = <0 0xe7740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 0>;
@@ -993,7 +993,7 @@
#iommu-cells = <1>;
};
- ipmmu_ir: mmu@ff8b0000 {
+ ipmmu_ir: iommu@ff8b0000 {
compatible = "renesas,ipmmu-r8a77970";
reg = <0 0xff8b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 3>;
@@ -1001,7 +1001,7 @@
#iommu-cells = <1>;
};
- ipmmu_mm: mmu@e67b0000 {
+ ipmmu_mm: iommu@e67b0000 {
compatible = "renesas,ipmmu-r8a77970";
reg = <0 0xe67b0000 0 0x1000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
@@ -1010,7 +1010,7 @@
#iommu-cells = <1>;
};
- ipmmu_rt: mmu@ffc80000 {
+ ipmmu_rt: iommu@ffc80000 {
compatible = "renesas,ipmmu-r8a77970";
reg = <0 0xffc80000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 7>;
@@ -1018,7 +1018,7 @@
#iommu-cells = <1>;
};
- ipmmu_vi0: mmu@febd0000 {
+ ipmmu_vi0: iommu@febd0000 {
compatible = "renesas,ipmmu-r8a77970";
reg = <0 0xfebd0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 9>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index d672b320bc14..387e6d99f2f3 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -1266,7 +1266,7 @@
status = "disabled";
};
- ipmmu_ds1: mmu@e7740000 {
+ ipmmu_ds1: iommu@e7740000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xe7740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 0>;
@@ -1274,7 +1274,7 @@
#iommu-cells = <1>;
};
- ipmmu_ir: mmu@ff8b0000 {
+ ipmmu_ir: iommu@ff8b0000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xff8b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 3>;
@@ -1282,7 +1282,7 @@
#iommu-cells = <1>;
};
- ipmmu_mm: mmu@e67b0000 {
+ ipmmu_mm: iommu@e67b0000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xe67b0000 0 0x1000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
@@ -1291,7 +1291,7 @@
#iommu-cells = <1>;
};
- ipmmu_rt: mmu@ffc80000 {
+ ipmmu_rt: iommu@ffc80000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xffc80000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 10>;
@@ -1299,7 +1299,7 @@
#iommu-cells = <1>;
};
- ipmmu_vc0: mmu@fe990000 {
+ ipmmu_vc0: iommu@fe990000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xfe990000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 12>;
@@ -1307,7 +1307,7 @@
#iommu-cells = <1>;
};
- ipmmu_vi0: mmu@febd0000 {
+ ipmmu_vi0: iommu@febd0000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xfebd0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 14>;
@@ -1315,7 +1315,7 @@
#iommu-cells = <1>;
};
- ipmmu_vip0: mmu@e7b00000 {
+ ipmmu_vip0: iommu@e7b00000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xe7b00000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 4>;
@@ -1323,7 +1323,7 @@
#iommu-cells = <1>;
};
- ipmmu_vip1: mmu@e7960000 {
+ ipmmu_vip1: iommu@e7960000 {
compatible = "renesas,ipmmu-r8a77980";
reg = <0 0xe7960000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 11>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 1543f18e834f..cd11f24744d4 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -817,7 +817,7 @@
<&ipmmu_ds1 30>, <&ipmmu_ds1 31>;
};
- ipmmu_ds0: mmu@e6740000 {
+ ipmmu_ds0: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a77990";
reg = <0 0xe6740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 0>;
@@ -825,7 +825,7 @@
#iommu-cells = <1>;
};
- ipmmu_ds1: mmu@e7740000 {
+ ipmmu_ds1: iommu@e7740000 {
compatible = "renesas,ipmmu-r8a77990";
reg = <0 0xe7740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 1>;
@@ -833,7 +833,7 @@
#iommu-cells = <1>;
};
- ipmmu_hc: mmu@e6570000 {
+ ipmmu_hc: iommu@e6570000 {
compatible = "renesas,ipmmu-r8a77990";
reg = <0 0xe6570000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 2>;
@@ -841,7 +841,7 @@
#iommu-cells = <1>;
};
- ipmmu_mm: mmu@e67b0000 {
+ ipmmu_mm: iommu@e67b0000 {
compatible = "renesas,ipmmu-r8a77990";
reg = <0 0xe67b0000 0 0x1000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
@@ -850,7 +850,7 @@
#iommu-cells = <1>;
};
- ipmmu_mp: mmu@ec670000 {
+ ipmmu_mp: iommu@ec670000 {
compatible = "renesas,ipmmu-r8a77990";
reg = <0 0xec670000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 4>;
@@ -858,7 +858,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv0: mmu@fd800000 {
+ ipmmu_pv0: iommu@fd800000 {
compatible = "renesas,ipmmu-r8a77990";
reg = <0 0xfd800000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 6>;
@@ -866,7 +866,7 @@
#iommu-cells = <1>;
};
- ipmmu_rt: mmu@ffc80000 {
+ ipmmu_rt: iommu@ffc80000 {
compatible = "renesas,ipmmu-r8a77990";
reg = <0 0xffc80000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 10>;
@@ -874,7 +874,7 @@
#iommu-cells = <1>;
};
- ipmmu_vc0: mmu@fe6b0000 {
+ ipmmu_vc0: iommu@fe6b0000 {
compatible = "renesas,ipmmu-r8a77990";
reg = <0 0xfe6b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 12>;
@@ -882,7 +882,7 @@
#iommu-cells = <1>;
};
- ipmmu_vi0: mmu@febd0000 {
+ ipmmu_vi0: iommu@febd0000 {
compatible = "renesas,ipmmu-r8a77990";
reg = <0 0xfebd0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 14>;
@@ -890,7 +890,7 @@
#iommu-cells = <1>;
};
- ipmmu_vp0: mmu@fe990000 {
+ ipmmu_vp0: iommu@fe990000 {
compatible = "renesas,ipmmu-r8a77990";
reg = <0 0xfe990000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 16>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77995.dtsi b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
index e8d2290fe79d..e5617ec0f49c 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
@@ -507,7 +507,7 @@
<&ipmmu_ds1 22>, <&ipmmu_ds1 23>;
};
- ipmmu_ds0: mmu@e6740000 {
+ ipmmu_ds0: iommu@e6740000 {
compatible = "renesas,ipmmu-r8a77995";
reg = <0 0xe6740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 0>;
@@ -515,7 +515,7 @@
#iommu-cells = <1>;
};
- ipmmu_ds1: mmu@e7740000 {
+ ipmmu_ds1: iommu@e7740000 {
compatible = "renesas,ipmmu-r8a77995";
reg = <0 0xe7740000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 1>;
@@ -523,7 +523,7 @@
#iommu-cells = <1>;
};
- ipmmu_hc: mmu@e6570000 {
+ ipmmu_hc: iommu@e6570000 {
compatible = "renesas,ipmmu-r8a77995";
reg = <0 0xe6570000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 2>;
@@ -531,7 +531,7 @@
#iommu-cells = <1>;
};
- ipmmu_mm: mmu@e67b0000 {
+ ipmmu_mm: iommu@e67b0000 {
compatible = "renesas,ipmmu-r8a77995";
reg = <0 0xe67b0000 0 0x1000>;
interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
@@ -540,7 +540,7 @@
#iommu-cells = <1>;
};
- ipmmu_mp: mmu@ec670000 {
+ ipmmu_mp: iommu@ec670000 {
compatible = "renesas,ipmmu-r8a77995";
reg = <0 0xec670000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 4>;
@@ -548,7 +548,7 @@
#iommu-cells = <1>;
};
- ipmmu_pv0: mmu@fd800000 {
+ ipmmu_pv0: iommu@fd800000 {
compatible = "renesas,ipmmu-r8a77995";
reg = <0 0xfd800000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 6>;
@@ -556,7 +556,7 @@
#iommu-cells = <1>;
};
- ipmmu_rt: mmu@ffc80000 {
+ ipmmu_rt: iommu@ffc80000 {
compatible = "renesas,ipmmu-r8a77995";
reg = <0 0xffc80000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 10>;
@@ -564,7 +564,7 @@
#iommu-cells = <1>;
};
- ipmmu_vc0: mmu@fe6b0000 {
+ ipmmu_vc0: iommu@fe6b0000 {
compatible = "renesas,ipmmu-r8a77995";
reg = <0 0xfe6b0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 12>;
@@ -572,7 +572,7 @@
#iommu-cells = <1>;
};
- ipmmu_vi0: mmu@febd0000 {
+ ipmmu_vi0: iommu@febd0000 {
compatible = "renesas,ipmmu-r8a77995";
reg = <0 0xfebd0000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 14>;
@@ -580,7 +580,7 @@
#iommu-cells = <1>;
};
- ipmmu_vp0: mmu@fe990000 {
+ ipmmu_vp0: iommu@fe990000 {
compatible = "renesas,ipmmu-r8a77995";
reg = <0 0xfe990000 0 0x1000>;
renesas,ipmmu-main = <&ipmmu_mm 16>;
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index ae7621309e92..b87b1f773083 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -2,6 +2,7 @@
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-roc-cc.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3326-odroid-go2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-a1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3328-rock64.dtb
diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
index adc9b8bf5eaa..a6b8427156d5 100644
--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
@@ -931,6 +931,7 @@
clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
<&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ bus-width = <4>;
fifo-depth = <0x100>;
max-frequency = <150000000>;
pinctrl-names = "default";
@@ -946,6 +947,7 @@
clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
<&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ bus-width = <4>;
fifo-depth = <0x100>;
max-frequency = <150000000>;
pinctrl-names = "default";
@@ -961,6 +963,7 @@
clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
<&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ bus-width = <8>;
fifo-depth = <0x100>;
max-frequency = <150000000>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
index aa256350b18f..7a96be10eaf0 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
@@ -28,14 +28,14 @@
leds {
compatible = "gpio-leds";
- power {
+ power_led: led-0 {
label = "firefly:red:power";
linux,default-trigger = "ir-power-click";
default-state = "on";
gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
};
- user {
+ user_led: led-1 {
label = "firefly:blue:user";
linux,default-trigger = "ir-user-click";
default-state = "off";
@@ -123,9 +123,7 @@
};
&emmc {
- bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
mmc-hs200-1_8v;
non-removable;
status = "okay";
@@ -171,7 +169,6 @@
};
&sdmmc {
- bus-width = <4>;
cap-mmc-highspeed;
cap-sd-highspeed;
card-detect-delay = <300>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
new file mode 100644
index 000000000000..b3a8f936578f
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
@@ -0,0 +1,557 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 Hardkernel Co., Ltd
+ * Copyright (c) 2020 Theobroma Systems Design und Consulting GmbH
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "rk3326.dtsi"
+
+/ {
+ model = "ODROID-GO Advance";
+ compatible = "hardkernel,rk3326-odroid-go2", "rockchip,rk3326";
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ power-supply = <&vcc_bl>;
+ pwms = <&pwm1 0 25000 0>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&btn_pins>;
+
+ /*
+ * *** ODROIDGO2-Advance Switch layout ***
+ * |------------------------------------------------|
+ * | sw15 sw16 |
+ * |------------------------------------------------|
+ * | sw1 |-------------------| sw8 |
+ * | sw3 sw4 | | sw7 sw5 |
+ * | sw2 | LCD Display | sw6 |
+ * | | | |
+ * | |-------------------| |
+ * | sw9 sw10 sw11 sw12 sw13 sw14 |
+ * |------------------------------------------------|
+ */
+
+ sw1 {
+ gpios = <&gpio1 RK_PB4 GPIO_ACTIVE_LOW>;
+ label = "DPAD-UP";
+ linux,code = <BTN_DPAD_UP>;
+ };
+ sw2 {
+ gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_LOW>;
+ label = "DPAD-DOWN";
+ linux,code = <BTN_DPAD_DOWN>;
+ };
+ sw3 {
+ gpios = <&gpio1 RK_PB6 GPIO_ACTIVE_LOW>;
+ label = "DPAD-LEFT";
+ linux,code = <BTN_DPAD_LEFT>;
+ };
+ sw4 {
+ gpios = <&gpio1 RK_PB7 GPIO_ACTIVE_LOW>;
+ label = "DPAD-RIGHT";
+ linux,code = <BTN_DPAD_RIGHT>;
+ };
+ sw5 {
+ gpios = <&gpio1 RK_PA2 GPIO_ACTIVE_LOW>;
+ label = "BTN-A";
+ linux,code = <BTN_EAST>;
+ };
+ sw6 {
+ gpios = <&gpio1 RK_PA5 GPIO_ACTIVE_LOW>;
+ label = "BTN-B";
+ linux,code = <BTN_SOUTH>;
+ };
+ sw7 {
+ gpios = <&gpio1 RK_PA6 GPIO_ACTIVE_LOW>;
+ label = "BTN-Y";
+ linux,code = <BTN_WEST>;
+ };
+ sw8 {
+ gpios = <&gpio1 RK_PA7 GPIO_ACTIVE_LOW>;
+ label = "BTN-X";
+ linux,code = <BTN_NORTH>;
+ };
+ sw9 {
+ gpios = <&gpio2 RK_PA0 GPIO_ACTIVE_LOW>;
+ label = "F1";
+ linux,code = <BTN_TRIGGER_HAPPY1>;
+ };
+ sw10 {
+ gpios = <&gpio2 RK_PA1 GPIO_ACTIVE_LOW>;
+ label = "F2";
+ linux,code = <BTN_TRIGGER_HAPPY2>;
+ };
+ sw11 {
+ gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_LOW>;
+ label = "F3";
+ linux,code = <BTN_TRIGGER_HAPPY3>;
+ };
+ sw12 {
+ gpios = <&gpio2 RK_PA3 GPIO_ACTIVE_LOW>;
+ label = "F4";
+ linux,code = <BTN_TRIGGER_HAPPY4>;
+ };
+ sw13 {
+ gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_LOW>;
+ label = "F5";
+ linux,code = <BTN_TRIGGER_HAPPY5>;
+ };
+ sw14 {
+ gpios = <&gpio2 RK_PA5 GPIO_ACTIVE_LOW>;
+ label = "F6";
+ linux,code = <BTN_TRIGGER_HAPPY6>;
+ };
+ sw15 {
+ gpios = <&gpio2 RK_PA6 GPIO_ACTIVE_LOW>;
+ label = "TOP-LEFT";
+ linux,code = <BTN_TL>;
+ };
+ sw16 {
+ gpios = <&gpio2 RK_PA7 GPIO_ACTIVE_LOW>;
+ label = "TOP-RIGHT";
+ linux,code = <BTN_TR>;
+ };
+ };
+
+ leds: gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&blue_led_pin>;
+
+ blue_led: led-0 {
+ label = "blue:heartbeat";
+ gpios = <&gpio0 RK_PC1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ vccsys: vccsys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v8_sys";
+ regulator-always-on;
+ regulator-min-microvolt = <3800000>;
+ regulator-max-microvolt = <3800000>;
+ };
+
+ vcc_host: vcc_host {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_host";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ gpio = <&gpio0 RK_PB7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ vin-supply = <&vccsys>;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cru {
+ assigned-clocks = <&cru PLL_NPLL>,
+ <&cru ACLK_BUS_PRE>, <&cru ACLK_PERI_PRE>,
+ <&cru HCLK_BUS_PRE>, <&cru HCLK_PERI_PRE>,
+ <&cru PCLK_BUS_PRE>, <&cru SCLK_GPU>,
+ <&cru PLL_CPLL>;
+
+ assigned-clock-rates = <1188000000>,
+ <200000000>, <200000000>,
+ <150000000>, <150000000>,
+ <100000000>, <200000000>,
+ <17000000>;
+};
+
+&display_subsystem {
+ status = "okay";
+};
+
+&dsi {
+ status = "okay";
+
+ ports {
+ mipi_out: port@1 {
+ reg = <1>;
+
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&mipi_in_panel>;
+ };
+ };
+ };
+
+ panel@0 {
+ compatible = "elida,kd35t133";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc_lcd>;
+ reset-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&vcc_lcd>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+};
+
+&dsi_dphy {
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_logic>;
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ i2c-scl-falling-time-ns = <16>;
+ i2c-scl-rising-time-ns = <280>;
+ status = "okay";
+
+ rk817: pmic@20 {
+ compatible = "rockchip,rk817";
+ reg = <0x20>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int>;
+ rockchip,system-power-controller;
+ wakeup-source;
+ #clock-cells = <1>;
+ clock-output-names = "rk808-clkout1", "xin32k";
+
+ vcc1-supply = <&vccsys>;
+ vcc2-supply = <&vccsys>;
+ vcc3-supply = <&vccsys>;
+ vcc4-supply = <&vccsys>;
+ vcc5-supply = <&vccsys>;
+ vcc6-supply = <&vccsys>;
+ vcc7-supply = <&vccsys>;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-ramp-delay = <6001>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vdd_arm: DCDC_REG2 {
+ regulator-name = "vdd_arm";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_3v3: DCDC_REG4 {
+ regulator-name = "vcc_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_1v8: LDO_REG2 {
+ regulator-name = "vcc_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_1v0: LDO_REG3 {
+ regulator-name = "vdd_1v0";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1000000>;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG4 {
+ regulator-name = "vcc3v3_pmu";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_sd: LDO_REG6 {
+ regulator-name = "vcc_sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_bl: LDO_REG7 {
+ regulator-name = "vcc_bl";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcc_lcd: LDO_REG8 {
+ regulator-name = "vcc_lcd";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <2800000>;
+ };
+ };
+
+ vcc_cam: LDO_REG9 {
+ regulator-name = "vcc_cam";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+ };
+ };
+};
+
+/* EXT Header(P2): 7(SCL:GPIO0.C2), 8(SDA:GPIO0.C3) */
+&i2c1 {
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+/* I2S 1 Channel Used */
+&i2s1_2ch {
+ status = "okay";
+};
+
+&io_domains {
+ vccio1-supply = <&vcc_3v3>;
+ vccio2-supply = <&vccio_sd>;
+ vccio3-supply = <&vcc_3v3>;
+ vccio4-supply = <&vcc_3v3>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc3v3_pmu>;
+ pmuio2-supply = <&vcc3v3_pmu>;
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ card-detect-delay = <200>;
+ cd-gpios = <&gpio0 RK_PA3 GPIO_ACTIVE_LOW>; /*[> CD GPIO <]*/
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&u2phy {
+ status = "okay";
+
+ u2phy_host: host-port {
+ status = "okay";
+ };
+
+ u2phy_otg: otg-port {
+ status = "disabled";
+ };
+};
+
+&usb20_otg {
+ status = "okay";
+};
+
+/* EXT Header(P2): 2(RXD:GPIO1.C0),3(TXD:.C1),4(CTS:.C2),5(RTS:.C3) */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_xfer &uart1_cts>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2m1_xfer>;
+ status = "okay";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&pinctrl {
+ btns {
+ btn_pins: btn-pins {
+ rockchip,pins = <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_up>,
+ <1 RK_PB7 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>,
+ <2 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ headphone {
+ hp_det: hp-det {
+ rockchip,pins = <2 RK_PC6 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+
+ leds {
+ blue_led_pin: blue-led-pin {
+ rockchip,pins = <0 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ dc_det: dc-det {
+ rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pmic_int: pmic-int {
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ soc_slppin_gpio: soc_slppin_gpio {
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_output_low>;
+ };
+
+ soc_slppin_rst: soc_slppin_rst {
+ rockchip,pins = <0 RK_PA4 2 &pcfg_pull_none>;
+ };
+
+ soc_slppin_slp: soc_slppin_slp {
+ rockchip,pins = <0 RK_PA4 1 &pcfg_pull_none>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3326.dtsi b/arch/arm64/boot/dts/rockchip/rk3326.dtsi
new file mode 100644
index 000000000000..2ba6da125137
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3326.dtsi
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2020 Fuzhou Rockchip Electronics Co., Ltd
+ */
+
+#include "px30.dtsi"
+
+&display_subsystem {
+ ports = <&vopb_out>;
+};
+
+/delete-node/ &dsi_in_vopl;
+/delete-node/ &lvds_vopl_in;
+/delete-node/ &vopl;
+/delete-node/ &vopl_mmu;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
index 797e90a3ac92..37f307cfa4cc 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
@@ -115,7 +115,7 @@
#address-cells = <1>;
#size-cells = <0>;
- rtl8211f: phy@0 {
+ rtl8211f: ethernet-phy@0 {
reg = <0>;
reset-assert-us = <10000>;
reset-deassert-us = <30000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
index 8d553c92182a..34db48c274e5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
@@ -86,7 +86,7 @@
leds {
compatible = "gpio-leds";
- power {
+ power_led: led-0 {
label = "firefly:blue:power";
linux,default-trigger = "heartbeat";
gpios = <&rk805 1 GPIO_ACTIVE_LOW>;
@@ -94,7 +94,7 @@
mode = <0x23>;
};
- user {
+ user_led: led-1 {
label = "firefly:yellow:user";
linux,default-trigger = "mmc1";
gpios = <&rk805 0 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index ebf3eb222e1f..6e09c223ed57 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -73,12 +73,12 @@
leds {
compatible = "gpio-leds";
- power {
+ power_led: led-0 {
gpios = <&rk805 1 GPIO_ACTIVE_LOW>;
linux,default-trigger = "mmc0";
};
- standby {
+ standby_led: led-1 {
gpios = <&rk805 0 GPIO_ACTIVE_LOW>;
linux,default-trigger = "heartbeat";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index a4d591d91533..d399883d4b75 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -934,7 +934,7 @@
#address-cells = <1>;
#size-cells = <0>;
- phy: phy@0 {
+ phy: ethernet-phy@0 {
compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22";
reg = <0>;
clocks = <&cru SCLK_MAC2PHY_OUT>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
index 1d0778ff217c..46357d1d77cd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
@@ -50,13 +50,13 @@
leds: gpio-leds {
compatible = "gpio-leds";
- blue {
+ blue_led: led-0 {
gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_HIGH>;
label = "geekbox:blue:led";
default-state = "on";
};
- red {
+ red_led: led-1 {
gpios = <&gpio2 RK_PA3 GPIO_ACTIVE_HIGH>;
label = "geekbox:red:led";
default-state = "off";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
index 6cc310255da8..b058ce999e3b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -50,7 +50,7 @@
leds: gpio-leds {
compatible = "gpio-leds";
- red {
+ red_led: led-0 {
gpios = <&gpio3 RK_PD5 GPIO_ACTIVE_HIGH>;
label = "orion:red:led";
pinctrl-names = "default";
@@ -58,7 +58,7 @@
default-state = "on";
};
- blue {
+ blue_led: led-1 {
gpios = <&gpio0 RK_PB4 GPIO_ACTIVE_HIGH>;
label = "orion:blue:led";
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
index 006a1fb6a816..236ab0f1b206 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
@@ -43,7 +43,7 @@
leds: gpio-leds {
compatible = "gpio-leds";
- work {
+ work_led: led-0 {
gpios = <&gpio3 RK_PD5 GPIO_ACTIVE_HIGH>;
label = "r88:green:led";
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts b/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts
index ebe2ee77ba1f..1ce85a5816e4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts
@@ -27,42 +27,43 @@
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
- pinctrl-0 = <&user_led1>, <&user_led2>, <&user_led3>,
- <&user_led4>, <&wlan_led>, <&bt_led>;
+ pinctrl-0 = <&user_led1_pin>, <&user_led2_pin>,
+ <&user_led3_pin>, <&user_led4_pin>,
+ <&wlan_led_pin>, <&bt_led_pin>;
- user_led1 {
+ user_led1: led-1 {
label = "red:user1";
gpios = <&gpio4 25 0>;
linux,default-trigger = "heartbeat";
};
- user_led2 {
+ user_led2: led-2 {
label = "red:user2";
gpios = <&gpio4 26 0>;
linux,default-trigger = "mmc0";
};
- user_led3 {
+ user_led3: led-3 {
label = "red:user3";
gpios = <&gpio4 30 0>;
linux,default-trigger = "mmc1";
};
- user_led4 {
+ user_led4: led-4 {
label = "red:user4";
gpios = <&gpio1 0 0>;
panic-indicator;
linux,default-trigger = "none";
};
- wlan_active_led {
+ wlan_active_led: led-5 {
label = "red:wlan";
gpios = <&gpio1 1 0>;
linux,default-trigger = "phy0tx";
default-state = "off";
};
- bt_active_led {
+ bt_active_led: led-6 {
label = "red:bt";
gpios = <&gpio1 4 0>;
linux,default-trigger = "hci0-power";
@@ -114,32 +115,32 @@
};
leds {
- user_led1: user_led1 {
+ user_led1_pin: user-led1-pin {
rockchip,pins =
<4 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
};
- user_led2: user_led2 {
+ user_led2_pin: user-led2-pin {
rockchip,pins =
<4 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
};
- user_led3: user_led3 {
+ user_led3_pin: user-led3-pin {
rockchip,pins =
<4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
};
- user_led4: user_led4 {
+ user_led4_pin: user-led4-pin {
rockchip,pins =
<1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
};
- wlan_led: wlan_led {
+ wlan_led_pin: wlan-led-pin {
rockchip,pins =
<1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
};
- bt_led: bt_led {
+ bt_led_pin: bt-led-pin {
rockchip,pins =
<1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
index d63faf38cc81..20b5599f5e78 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
@@ -91,15 +91,15 @@
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
- pinctrl-0 = <&work_led_gpio>, <&diy_led_gpio>;
+ pinctrl-0 = <&work_led_pin>, <&diy_led_pin>;
- work-led {
+ work_led: led-0 {
label = "work";
default-state = "on";
gpios = <&gpio2 RK_PD3 GPIO_ACTIVE_HIGH>;
};
- diy-led {
+ diy_led: led-1 {
label = "diy";
default-state = "off";
gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
@@ -629,11 +629,11 @@
};
leds {
- work_led_gpio: work_led-gpio {
+ work_led_pin: work-led-pin {
rockchip,pins = <2 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
};
- diy_led_gpio: diy_led-gpio {
+ diy_led_pin: diy-led-pin {
rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
index aee484a05181..bf87fa32d3b1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
@@ -39,9 +39,9 @@
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
- pinctrl-0 = <&power_led_gpio>;
+ pinctrl-0 = <&power_led_pin>;
- led-0 {
+ power_led: led-0 {
label = "blue:power";
gpios = <&gpio4 RK_PC2 GPIO_ACTIVE_HIGH>;
default-state = "on";
@@ -510,7 +510,7 @@
};
leds {
- power_led_gpio: power-led-gpio {
+ power_led_pin: power-led-pin {
rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
@@ -633,7 +633,6 @@
&spdif {
status = "okay";
pinctrl-0 = <&spdif_bus_1>;
- #sound-dai-cells = <0>;
};
&spi1 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
index c88018a0ef35..1d246c2caa3c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
@@ -182,7 +182,7 @@
#address-cells = <1>;
#size-cells = <0>;
- rtl8211e: phy@1 {
+ rtl8211e: ethernet-phy@1 {
reg = <1>;
interrupt-parent = <&gpio3>;
interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
@@ -525,7 +525,7 @@
};
};
- phy {
+ gmac {
phy_intb: phy-intb {
rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
index f9f7246d4d2f..6163ae8063a7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
@@ -214,7 +214,7 @@
#address-cells = <1>;
#size-cells = <0>;
- rtl8211e: phy@1 {
+ rtl8211e: ethernet-phy@1 {
reg = <1>;
interrupt-parent = <&gpio3>;
interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
@@ -554,7 +554,7 @@
};
};
- phy {
+ gmac {
phy_intb: phy-intb {
rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
index c49982dfd8fc..cb0245d2226d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
@@ -90,9 +90,9 @@
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
- pinctrl-0 = <&pwrled_gpio &slpled_gpio>;
+ pinctrl-0 = <&pwr_led_pin &slp_led_pin>;
- green-led {
+ green_led: led-0 {
color = <LED_COLOR_ID_GREEN>;
default-state = "on";
function = LED_FUNCTION_POWER;
@@ -100,7 +100,7 @@
label = "green:power";
};
- red-led {
+ red_led: led-1 {
color = <LED_COLOR_ID_RED>;
default-state = "off";
function = LED_FUNCTION_STANDBY;
@@ -744,7 +744,6 @@
};
&i2s1 {
- #sound-dai-cells = <0>;
pinctrl-names = "default";
pinctrl-0 = <&i2s_8ch_mclk_gpio>, <&i2s1_2ch_bus>;
rockchip,capture-channels = <8>;
@@ -826,11 +825,11 @@
};
leds {
- pwrled_gpio: pwrled_gpio {
+ pwr_led_pin: pwr-led-pin {
rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
- slpled_gpio: slpled_gpio {
+ slp_led_pin: slp-led-pin {
rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dts
index 437a75f31ad4..c88295782e7b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dts
@@ -17,42 +17,43 @@
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
- pinctrl-0 = <&user_led1>, <&user_led2>, <&user_led3>,
- <&user_led4>, <&wlan_led>, <&bt_led>;
+ pinctrl-0 = <&user_led1_pin>, <&user_led2_pin>,
+ <&user_led3_pin>, <&user_led4_pin>,
+ <&wlan_led_pin>, <&bt_led_pin>;
- user_led1 {
+ user_led1: led-1 {
label = "green:user1";
gpios = <&gpio4 RK_PC2 0>;
linux,default-trigger = "heartbeat";
};
- user_led2 {
+ user_led2: led-2 {
label = "green:user2";
gpios = <&gpio4 RK_PC6 0>;
linux,default-trigger = "mmc0";
};
- user_led3 {
+ user_led3: led-3 {
label = "green:user3";
gpios = <&gpio4 RK_PD0 0>;
linux,default-trigger = "mmc1";
};
- user_led4 {
+ user_led4: led-4 {
label = "green:user4";
gpios = <&gpio4 RK_PD4 0>;
panic-indicator;
linux,default-trigger = "none";
};
- wlan_active_led {
+ wlan_active_led: led-5 {
label = "yellow:wlan";
gpios = <&gpio4 RK_PD5 0>;
linux,default-trigger = "phy0tx";
default-state = "off";
};
- bt_active_led {
+ bt_active_led: led-6 {
label = "blue:bt";
gpios = <&gpio4 RK_PD6 0>;
linux,default-trigger = "hci0-power";
@@ -68,32 +69,32 @@
&pinctrl {
leds {
- user_led1: user_led1 {
+ user_led1_pin: user-led1-pin {
rockchip,pins =
<4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
};
- user_led2: user_led2 {
+ user_led2_pin: user-led2-pin {
rockchip,pins =
<4 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
};
- user_led3: user_led3 {
+ user_led3_pin: user-led3-pin {
rockchip,pins =
<4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
};
- user_led4: user_led4 {
+ user_led4_pin: user-led4-pin {
rockchip,pins =
<4 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
};
- wlan_led: wlan_led {
+ wlan_led_pin: wlan-led-pin {
rockchip,pins =
<4 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
};
- bt_led: bt_led {
+ bt_led_pin: bt-led-pin {
rockchip,pins =
<4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
index 9bca25801260..6788ab28f89a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
@@ -96,6 +96,24 @@
vin-supply = <&vcc_1v8>;
};
+ /* micro SD card power */
+ vcc3v0_sd: vcc3v0-sd {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PA1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_pwr_h>;
+ regulator-name = "vcc3v0_sd";
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ vin-supply = <&vcc3v3_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
vcc3v3_pcie: vcc3v3-pcie-regulator {
compatible = "regulator-fixed";
enable-active-high;
@@ -603,6 +621,13 @@
};
};
+ sdcard {
+ sdmmc0_pwr_h: sdmmc0-pwr-h {
+ rockchip,pins = <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ };
+
sdio-pwrseq {
wifi_enable_h: wifi-enable-h {
rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -661,6 +686,8 @@
max-frequency = <150000000>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
+ vmmc-supply = <&vcc3v0_sd>;
+ vqmmc-supply = <&vcc_sdio>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 1448f358ed0a..2581e9cc7a1d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -1056,6 +1056,16 @@
clocks = <&cru HCLK_SDIO>;
pm_qos = <&qos_sdioaudio>;
};
+ pd_tcpc0@RK3399_PD_TCPD0 {
+ reg = <RK3399_PD_TCPD0>;
+ clocks = <&cru SCLK_UPHY0_TCPDCORE>,
+ <&cru SCLK_UPHY0_TCPDPHY_REF>;
+ };
+ pd_tcpc1@RK3399_PD_TCPD1 {
+ reg = <RK3399_PD_TCPD1>;
+ clocks = <&cru SCLK_UPHY1_TCPDCORE>,
+ <&cru SCLK_UPHY1_TCPDPHY_REF>;
+ };
pd_usb3@RK3399_PD_USB3 {
reg = <RK3399_PD_USB3>;
clocks = <&cru ACLK_USB3>;
@@ -1088,16 +1098,6 @@
pm_qos = <&qos_isp1_m0>,
<&qos_isp1_m1>;
};
- pd_tcpc0@RK3399_PD_TCPC0 {
- reg = <RK3399_PD_TCPD0>;
- clocks = <&cru SCLK_UPHY0_TCPDCORE>,
- <&cru SCLK_UPHY0_TCPDPHY_REF>;
- };
- pd_tcpc1@RK3399_PD_TCPC1 {
- reg = <RK3399_PD_TCPD1>;
- clocks = <&cru SCLK_UPHY1_TCPDCORE>,
- <&cru SCLK_UPHY1_TCPDPHY_REF>;
- };
pd_vo@RK3399_PD_VO {
reg = <RK3399_PD_VO>;
#address-cells = <1>;
@@ -1269,6 +1269,18 @@
power-domains = <&power RK3399_PD_VCODEC>;
};
+ vdec: video-codec@ff660000 {
+ compatible = "rockchip,rk3399-vdec";
+ reg = <0x0 0xff660000 0x0 0x400>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "vdpu";
+ clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
+ <&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
+ clock-names = "axi", "ahb", "cabac", "core";
+ iommus = <&vdec_mmu>;
+ power-domains = <&power RK3399_PD_VDU>;
+ };
+
vdec_mmu: iommu@ff660480 {
compatible = "rockchip,iommu";
reg = <0x0 0xff660480 0x0 0x40>, <0x0 0xff6604c0 0x0 0x40>;
@@ -1276,8 +1288,8 @@
interrupt-names = "vdec_mmu";
clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>;
clock-names = "aclk", "iface";
+ power-domains = <&power RK3399_PD_VDU>;
#iommu-cells = <0>;
- status = "disabled";
};
iep_mmu: iommu@ff670800 {
diff --git a/arch/arm64/boot/dts/socionext/Makefile b/arch/arm64/boot/dts/socionext/Makefile
index d45441249cb5..dda3da33614b 100644
--- a/arch/arm64/boot/dts/socionext/Makefile
+++ b/arch/arm64/boot/dts/socionext/Makefile
@@ -2,6 +2,7 @@
dtb-$(CONFIG_ARCH_UNIPHIER) += \
uniphier-ld11-global.dtb \
uniphier-ld11-ref.dtb \
+ uniphier-ld20-akebi96.dtb \
uniphier-ld20-global.dtb \
uniphier-ld20-ref.dtb \
uniphier-pxs3-ref.dtb
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
index f72f048a0c9d..816ac25fa1eb 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
@@ -30,6 +30,7 @@
i2c3 = &i2c3;
i2c4 = &i2c4;
i2c5 = &i2c5;
+ ethernet0 = &eth;
};
memory@80000000 {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
index b8f627348448..693171f82ff1 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
@@ -29,6 +29,7 @@
i2c3 = &i2c3;
i2c4 = &i2c4;
i2c5 = &i2c5;
+ ethernet0 = &eth;
};
memory@80000000 {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
index 2ca2d3dc8d6c..15dcfc259854 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
@@ -129,6 +129,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 39 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
@@ -140,6 +142,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006100 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 216 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
@@ -566,6 +570,14 @@
};
};
+ xdmac: dma-controller@5fc10000 {
+ compatible = "socionext,uniphier-xdmac";
+ reg = <0x5fc10000 0x5300>;
+ interrupts = <0 188 4>;
+ dma-channels = <16>;
+ #dma-cells = <2>;
+ };
+
aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-ld11-aidet";
reg = <0x5fc20000 0x200>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-akebi96.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-akebi96.dts
new file mode 100644
index 000000000000..816919b42d2e
--- /dev/null
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-akebi96.dts
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+//
+// Device Tree Source for Akebi96 Development Board
+//
+// Derived from uniphier-ld20-global.dts.
+//
+// Copyright (C) 2015-2017 Socionext Inc.
+// Copyright (C) 2019-2020 Linaro Ltd.
+
+/dts-v1/;
+#include <dt-bindings/gpio/uniphier-gpio.h>
+#include "uniphier-ld20.dtsi"
+
+/ {
+ model = "Akebi96";
+ compatible = "socionext,uniphier-ld20-akebi96",
+ "socionext,uniphier-ld20";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ spi0 = &spi0;
+ spi1 = &spi1;
+ spi2 = &spi2;
+ spi3 = &spi3;
+ ethernet0 = &eth;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0xc0000000>;
+ };
+
+ framebuffer@c0000000 {
+ compatible = "simple-framebuffer";
+ reg = <0 0xc0000000 0 0x02000000>;
+ width = <1920>;
+ height = <1080>;
+ stride = <7680>;
+ format = "a8r8g8b8";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ memory@c0000000 {
+ reg = <0 0xc0000000 0 0x02000000>;
+ no-map;
+ };
+ };
+
+ sound {
+ compatible = "audio-graph-card";
+ label = "UniPhier LD20";
+ dais = <&spdif_port0
+ &comp_spdif_port0>;
+ };
+
+ spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+
+ port@0 {
+ spdif_tx: endpoint {
+ remote-endpoint = <&spdif_hiecout1>;
+ };
+ };
+ };
+
+ comp-spdif-out {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+
+ port@0 {
+ comp_spdif_tx: endpoint {
+ remote-endpoint = <&comp_spdif_hiecout1>;
+ };
+ };
+ };
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
+};
+
+&spi3 {
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ usb-over-spi@0 {
+ compatible = "maxim,max3421-udc";
+ reg = <0>;
+ spi-max-frequency = <12500000>;
+ interrupt-parent = <&gpio>;
+ interrupt-names = "udc";
+ interrupts = <0 2>;
+ };
+};
+
+&serial0 {
+ /* Onboard USB-UART */
+ status = "okay";
+};
+
+&serial2 {
+ /* LS connector UART1 */
+ status = "okay";
+};
+
+&serial3 {
+ /* LS connector UART0 */
+ status = "okay";
+};
+
+&spdif_hiecout1 {
+ remote-endpoint = <&spdif_tx>;
+};
+
+&comp_spdif_hiecout1 {
+ remote-endpoint = <&comp_spdif_tx>;
+};
+
+&i2c0 {
+ /* LS connector I2C0 */
+ status = "okay";
+};
+
+&i2c1 {
+ /* LS connector I2C1 */
+ status = "okay";
+};
+
+&eth {
+ status = "okay";
+ phy-handle = <&ethphy>;
+};
+
+&mdio {
+ ethphy: ethphy@0 {
+ reg = <0>;
+ };
+};
+
+&usb {
+ status = "okay";
+};
+
+&pcie {
+ status = "okay";
+};
+
+&gpio {
+ /* IRQs for Max3421 */
+ xirq0 {
+ gpio-hog;
+ gpios = <UNIPHIER_GPIO_IRQ(0) 1>;
+ input;
+ };
+ xirq10 {
+ gpio-hog;
+ gpios = <UNIPHIER_GPIO_IRQ(10) 1>;
+ input;
+ };
+};
+
+&pinctrl_aout1 {
+ groups = "aout1b";
+};
+
+&pinctrl_uart3 {
+ groups = "uart3", "uart3_ctsrts";
+};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
index 9ca692ed1b2b..2c000082667c 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
@@ -30,6 +30,7 @@
i2c3 = &i2c3;
i2c4 = &i2c4;
i2c5 = &i2c5;
+ ethernet0 = &eth;
};
memory@80000000 {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
index 406244a5c8e8..eeb976e7892d 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
@@ -29,6 +29,7 @@
i2c3 = &i2c3;
i2c4 = &i2c4;
i2c5 = &i2c5;
+ ethernet0 = &eth;
};
memory@80000000 {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index a93148c2088f..f4a56b208837 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -234,6 +234,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 39 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
@@ -245,6 +247,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006100 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 216 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
@@ -256,6 +260,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006200 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 229 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi2>;
@@ -267,6 +273,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006300 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 230 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi3>;
@@ -664,6 +672,14 @@
};
};
+ xdmac: dma-controller@5fc10000 {
+ compatible = "socionext,uniphier-xdmac";
+ reg = <0x5fc10000 0x5300>;
+ interrupts = <0 188 4>;
+ dma-channels = <16>;
+ #dma-cells = <2>;
+ };
+
aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-ld20-aidet";
reg = <0x5fc20000 0x200>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
index 4d00ff9548e1..7c30c6b56b57 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
@@ -29,6 +29,8 @@
i2c6 = &i2c6;
spi0 = &spi0;
spi1 = &spi1;
+ ethernet0 = &eth0;
+ ethernet1 = &eth1;
};
memory@80000000 {
@@ -130,3 +132,19 @@
reg = <0>;
};
};
+
+&pinctrl_ether_rgmii {
+ tx {
+ pins = "RGMII0_TXCLK", "RGMII0_TXD0", "RGMII0_TXD1",
+ "RGMII0_TXD2", "RGMII0_TXD3", "RGMII0_TXCTL";
+ drive-strength = <9>;
+ };
+};
+
+&pinctrl_ether1_rgmii {
+ tx {
+ pins = "RGMII1_TXCLK", "RGMII1_TXD0", "RGMII1_TXD1",
+ "RGMII1_TXD2", "RGMII1_TXD3", "RGMII1_TXCTL";
+ drive-strength = <9>;
+ };
+};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
index 616835b38106..72f16881cf53 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
@@ -193,6 +193,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 39 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0>;
@@ -204,6 +206,8 @@
compatible = "socionext,uniphier-scssi";
status = "disabled";
reg = <0x54006100 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <0 216 4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1>;
@@ -498,6 +502,14 @@
};
};
+ xdmac: dma-controller@5fc10000 {
+ compatible = "socionext,uniphier-xdmac";
+ reg = <0x5fc10000 0x5300>;
+ interrupts = <0 188 4>;
+ dma-channels = <16>;
+ #dma-cells = <2>;
+ };
+
aidet: interrupt-controller@5fc20000 {
compatible = "socionext,uniphier-pxs3-aidet";
reg = <0x5fc20000 0x200>;
diff --git a/arch/arm64/boot/dts/sprd/sc9863a.dtsi b/arch/arm64/boot/dts/sprd/sc9863a.dtsi
index 2c590ca1d079..8cf4a6575980 100644
--- a/arch/arm64/boot/dts/sprd/sc9863a.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc9863a.dtsi
@@ -5,6 +5,7 @@
* Copyright (C) 2019, Unisoc Inc.
*/
+#include <dt-bindings/clock/sprd,sc9863a-clk.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "sharkl3.dtsi"
@@ -159,6 +160,30 @@
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
};
+ ap_clk: clock-controller@21500000 {
+ compatible = "sprd,sc9863a-ap-clk";
+ reg = <0 0x21500000 0 0x1000>;
+ clocks = <&ext_32k>, <&ext_26m>;
+ clock-names = "ext-32k", "ext-26m";
+ #clock-cells = <1>;
+ };
+
+ aon_clk: clock-controller@402d0000 {
+ compatible = "sprd,sc9863a-aon-clk";
+ reg = <0 0x402d0000 0 0x1000>;
+ clocks = <&ext_26m>, <&rco_100m>,
+ <&ext_32k>, <&ext_4m>;
+ clock-names = "ext-26m", "rco-100m",
+ "ext-32k", "ext-4m";
+ #clock-cells = <1>;
+ };
+
+ mm_clk: clock-controller@60900000 {
+ compatible = "sprd,sc9863a-mm-clk";
+ reg = <0 0x60900000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
funnel@10001000 {
compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
reg = <0 0x10001000 0 0x1000>;
@@ -519,5 +544,46 @@
};
};
};
+
+ ap-ahb {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ sdio0: sdio@20300000 {
+ compatible = "sprd,sdhci-r11";
+ reg = <0 0x20300000 0 0x1000>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+
+ clock-names = "sdio", "enable";
+ clocks = <&aon_clk CLK_SDIO0_2X>,
+ <&apahb_gate CLK_SDIO0_EB>;
+ assigned-clocks = <&aon_clk CLK_SDIO0_2X>;
+ assigned-clock-parents = <&rpll CLK_RPLL_390M>;
+
+ bus-width = <4>;
+ no-sdio;
+ no-mmc;
+ };
+
+ sdio3: sdio@20600000 {
+ compatible = "sprd,sdhci-r11";
+ reg = <0 0x20600000 0 0x1000>;
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+
+ clock-names = "sdio", "enable";
+ clocks = <&aon_clk CLK_EMMC_2X>,
+ <&apahb_gate CLK_EMMC_EB>;
+ assigned-clocks = <&aon_clk CLK_EMMC_2X>;
+ assigned-clock-parents = <&rpll CLK_RPLL_390M>;
+
+ bus-width = <8>;
+ non-removable;
+ no-sdio;
+ no-sd;
+ cap-mmc-hw-reset;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/sprd/sharkl3.dtsi b/arch/arm64/boot/dts/sprd/sharkl3.dtsi
index 0222128b10f7..206a4afdab1c 100644
--- a/arch/arm64/boot/dts/sprd/sharkl3.dtsi
+++ b/arch/arm64/boot/dts/sprd/sharkl3.dtsi
@@ -16,6 +16,149 @@
#size-cells = <2>;
ranges;
+ ap_ahb_regs: syscon@20e00000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon",
+ "simple-mfd";
+ reg = <0 0x20e00000 0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x20e00000 0x4000>;
+
+ apahb_gate: apahb-gate {
+ compatible = "sprd,sc9863a-apahb-gate";
+ reg = <0x0 0x1020>;
+ #clock-cells = <1>;
+ };
+ };
+
+ pmu_regs: syscon@402b0000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon",
+ "simple-mfd";
+ reg = <0 0x402b0000 0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x402b0000 0x4000>;
+
+ pmu_gate: pmu-gate {
+ compatible = "sprd,sc9863a-pmu-gate";
+ reg = <0 0x1200>;
+ clocks = <&ext_26m>;
+ clock-names = "ext-26m";
+ #clock-cells = <1>;
+ };
+ };
+
+ aon_apb_regs: syscon@402e0000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon",
+ "simple-mfd";
+ reg = <0 0x402e0000 0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x402e0000 0x4000>;
+
+ aonapb_gate: aonapb-gate {
+ compatible = "sprd,sc9863a-aonapb-gate";
+ reg = <0 0x1100>;
+ #clock-cells = <1>;
+ };
+ };
+
+ anlg_phy_g2_regs: syscon@40353000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon",
+ "simple-mfd";
+ reg = <0 0x40353000 0 0x3000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x40353000 0x3000>;
+
+ pll: pll {
+ compatible = "sprd,sc9863a-pll";
+ reg = <0 0x100>;
+ clocks = <&ext_26m>;
+ clock-names = "ext-26m";
+ #clock-cells = <1>;
+ };
+ };
+
+ anlg_phy_g4_regs: syscon@40359000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon",
+ "simple-mfd";
+ reg = <0 0x40359000 0 0x3000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x40359000 0x3000>;
+
+ mpll: mpll {
+ compatible = "sprd,sc9863a-mpll";
+ reg = <0 0x100>;
+ #clock-cells = <1>;
+ };
+ };
+
+ anlg_phy_g5_regs: syscon@4035c000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon",
+ "simple-mfd";
+ reg = <0 0x4035c000 0 0x3000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x4035c000 0x3000>;
+
+ rpll: rpll {
+ compatible = "sprd,sc9863a-rpll";
+ reg = <0 0x100>;
+ clocks = <&ext_26m>;
+ clock-names = "ext-26m";
+ #clock-cells = <1>;
+ };
+ };
+
+ anlg_phy_g7_regs: syscon@40363000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon",
+ "simple-mfd";
+ reg = <0 0x40363000 0 0x3000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x40363000 0x3000>;
+
+ dpll: dpll {
+ compatible = "sprd,sc9863a-dpll";
+ reg = <0 0x100>;
+ #clock-cells = <1>;
+ };
+ };
+
+ mm_ahb_regs: syscon@60800000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon",
+ "simple-mfd";
+ reg = <0 0x60800000 0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x60800000 0x3000>;
+
+ mm_gate: mm-gate {
+ compatible = "sprd,sc9863a-mm-gate";
+ reg = <0 0x1100>;
+ #clock-cells = <1>;
+ };
+ };
+
+ ap_apb_regs: syscon@71300000 {
+ compatible = "sprd,sc9863a-glbregs", "syscon",
+ "simple-mfd";
+ reg = <0 0x71300000 0 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x71300000 0x4000>;
+
+ apapb_gate: apapb-gate {
+ compatible = "sprd,sc9863a-apapb-gate";
+ reg = <0 0x1000>;
+ clocks = <&ext_26m>;
+ clock-names = "ext-26m";
+ #clock-cells = <1>;
+ };
+ };
+
apb@70000000 {
compatible = "simple-bus";
#address-cells = <1>;
@@ -75,4 +218,25 @@
clock-frequency = <26000000>;
clock-output-names = "ext-26m";
};
+
+ ext_32k: ext-32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "ext-32k";
+ };
+
+ ext_4m: ext-4m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <4000000>;
+ clock-output-names = "ext-4m";
+ };
+
+ rco_100m: rco-100m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "rco-100m";
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index 0d533d52fcda..61815228e230 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -287,6 +287,17 @@
mux-reg-masks = <0x4080 0x3>, /* SERDES0 lane select */
<0x4090 0x3>; /* SERDES1 lane select */
};
+
+ dss_oldi_io_ctrl: dss_oldi_io_ctrl@41E0 {
+ compatible = "syscon";
+ reg = <0x0000041E0 0x14>;
+ };
+
+ ehrpwm_tbclk: syscon@4140 {
+ compatible = "ti,am654-ehrpwm-tbclk", "syscon";
+ reg = <0x4140 0x18>;
+ #clock-cells = <1>;
+ };
};
dwc3_0: dwc3@4000000 {
@@ -768,4 +779,97 @@
};
};
};
+
+ dss: dss@04a00000 {
+ compatible = "ti,am65x-dss";
+ reg = <0x0 0x04a00000 0x0 0x1000>, /* common */
+ <0x0 0x04a02000 0x0 0x1000>, /* vidl1 */
+ <0x0 0x04a06000 0x0 0x1000>, /* vid */
+ <0x0 0x04a07000 0x0 0x1000>, /* ovr1 */
+ <0x0 0x04a08000 0x0 0x1000>, /* ovr2 */
+ <0x0 0x04a0a000 0x0 0x1000>, /* vp1 */
+ <0x0 0x04a0b000 0x0 0x1000>; /* vp2 */
+ reg-names = "common", "vidl1", "vid",
+ "ovr1", "ovr2", "vp1", "vp2";
+
+ ti,am65x-oldi-io-ctrl = <&dss_oldi_io_ctrl>;
+
+ power-domains = <&k3_pds 67 TI_SCI_PD_EXCLUSIVE>;
+
+ clocks = <&k3_clks 67 1>,
+ <&k3_clks 216 1>,
+ <&k3_clks 67 2>;
+ clock-names = "fck", "vp1", "vp2";
+
+ /*
+ * Set vp2 clk (DPI_1_IN_CLK) mux to PLL4 via
+ * DIV1. See "Figure 12-3365. DSS Integration"
+ * in AM65x TRM for details.
+ */
+ assigned-clocks = <&k3_clks 67 2>;
+ assigned-clock-parents = <&k3_clks 67 5>;
+
+ interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>;
+
+ status = "disabled";
+
+ dss_ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
+ ehrpwm0: pwm@3000000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x0 0x3000000 0x0 0x100>;
+ power-domains = <&k3_pds 40 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&ehrpwm_tbclk 0>, <&k3_clks 40 0>;
+ clock-names = "tbclk", "fck";
+ };
+
+ ehrpwm1: pwm@3010000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x0 0x3010000 0x0 0x100>;
+ power-domains = <&k3_pds 41 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&ehrpwm_tbclk 1>, <&k3_clks 41 0>;
+ clock-names = "tbclk", "fck";
+ };
+
+ ehrpwm2: pwm@3020000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x0 0x3020000 0x0 0x100>;
+ power-domains = <&k3_pds 42 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&ehrpwm_tbclk 2>, <&k3_clks 42 0>;
+ clock-names = "tbclk", "fck";
+ };
+
+ ehrpwm3: pwm@3030000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x0 0x3030000 0x0 0x100>;
+ power-domains = <&k3_pds 43 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&ehrpwm_tbclk 3>, <&k3_clks 43 0>;
+ clock-names = "tbclk", "fck";
+ };
+
+ ehrpwm4: pwm@3040000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x0 0x3040000 0x0 0x100>;
+ power-domains = <&k3_pds 44 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&ehrpwm_tbclk 4>, <&k3_clks 44 0>;
+ clock-names = "tbclk", "fck";
+ };
+
+ ehrpwm5: pwm@3050000 {
+ compatible = "ti,am654-ehrpwm", "ti,am3352-ehrpwm";
+ #pwm-cells = <3>;
+ reg = <0x0 0x3050000 0x0 0x100>;
+ power-domains = <&k3_pds 45 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&ehrpwm_tbclk 5>, <&k3_clks 45 0>;
+ clock-names = "tbclk", "fck";
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index f4227e2743f2..54a133fa1bf2 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -89,4 +89,15 @@
clocks = <&k3_clks 59 0>;
clock-names = "gpio";
};
+
+ wkup_vtm0: thermal@42050000 {
+ compatible = "ti,am654-vtm";
+ reg = <0x42050000 0x25c>;
+ power-domains = <&k3_pds 80 TI_SCI_PD_EXCLUSIVE>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ thermal_zones: thermal-zones {
+ #include "k3-am654-industrial-thermal.dtsi"
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am654-industrial-thermal.dtsi b/arch/arm64/boot/dts/ti/k3-am654-industrial-thermal.dtsi
new file mode 100644
index 000000000000..cdc3d40c3f60
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am654-industrial-thermal.dtsi
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <dt-bindings/thermal/thermal.h>
+
+mpu0_thermal: mpu0_thermal {
+ polling-delay-passive = <250>; /* milliseconds */
+ polling-delay = <500>; /* milliseconds */
+ thermal-sensors = <&wkup_vtm0 0>;
+
+ trips {
+ mpu0_crit: mpu0_crit {
+ temperature = <125000>; /* milliCelsius */
+ hysteresis = <2000>; /* milliCelsius */
+ type = "critical";
+ };
+ };
+};
+
+mpu1_thermal: mpu1_thermal {
+ polling-delay-passive = <250>; /* milliseconds */
+ polling-delay = <500>; /* milliseconds */
+ thermal-sensors = <&wkup_vtm0 1>;
+
+ trips {
+ mpu1_crit: mpu1_crit {
+ temperature = <125000>; /* milliCelsius */
+ hysteresis = <2000>; /* milliCelsius */
+ type = "critical";
+ };
+ };
+};
+
+mcu_thermal: mcu_thermal {
+ polling-delay-passive = <250>; /* milliseconds */
+ polling-delay = <500>; /* milliseconds */
+ thermal-sensors = <&wkup_vtm0 2>;
+
+ trips {
+ mcu_crit: mcu_crit {
+ temperature = <125000>; /* milliCelsius */
+ hysteresis = <2000>; /* milliCelsius */
+ type = "critical";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
index 98e5e17e3ff7..6df823aaa37c 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
@@ -472,3 +472,23 @@
phy-mode = "rgmii-rxid";
phy-handle = <&phy0>;
};
+
+&dss {
+ /*
+ * These clock assignments are chosen to enable the following outputs:
+ *
+ * VP0 - DisplayPort SST
+ * VP1 - DPI0
+ * VP2 - DSI
+ * VP3 - DPI1
+ */
+
+ assigned-clocks = <&k3_clks 152 1>,
+ <&k3_clks 152 4>,
+ <&k3_clks 152 9>,
+ <&k3_clks 152 13>;
+ assigned-clock-parents = <&k3_clks 152 2>, /* PLL16_HSDIV0 */
+ <&k3_clks 152 6>, /* PLL19_HSDIV0 */
+ <&k3_clks 152 11>, /* PLL18_HSDIV0 */
+ <&k3_clks 152 18>; /* PLL23_HSDIV0 */
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index 844a5b50cf09..96c929da639d 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -748,6 +748,63 @@
};
};
+ dss: dss@04a00000 {
+ compatible = "ti,j721e-dss";
+ reg =
+ <0x00 0x04a00000 0x00 0x10000>, /* common_m */
+ <0x00 0x04a10000 0x00 0x10000>, /* common_s0*/
+ <0x00 0x04b00000 0x00 0x10000>, /* common_s1*/
+ <0x00 0x04b10000 0x00 0x10000>, /* common_s2*/
+
+ <0x00 0x04a20000 0x00 0x10000>, /* vidl1 */
+ <0x00 0x04a30000 0x00 0x10000>, /* vidl2 */
+ <0x00 0x04a50000 0x00 0x10000>, /* vid1 */
+ <0x00 0x04a60000 0x00 0x10000>, /* vid2 */
+
+ <0x00 0x04a70000 0x00 0x10000>, /* ovr1 */
+ <0x00 0x04a90000 0x00 0x10000>, /* ovr2 */
+ <0x00 0x04ab0000 0x00 0x10000>, /* ovr3 */
+ <0x00 0x04ad0000 0x00 0x10000>, /* ovr4 */
+
+ <0x00 0x04a80000 0x00 0x10000>, /* vp1 */
+ <0x00 0x04aa0000 0x00 0x10000>, /* vp2 */
+ <0x00 0x04ac0000 0x00 0x10000>, /* vp3 */
+ <0x00 0x04ae0000 0x00 0x10000>, /* vp4 */
+ <0x00 0x04af0000 0x00 0x10000>; /* wb */
+
+ reg-names = "common_m", "common_s0",
+ "common_s1", "common_s2",
+ "vidl1", "vidl2","vid1","vid2",
+ "ovr1", "ovr2", "ovr3", "ovr4",
+ "vp1", "vp2", "vp3", "vp4",
+ "wb";
+
+ clocks = <&k3_clks 152 0>,
+ <&k3_clks 152 1>,
+ <&k3_clks 152 4>,
+ <&k3_clks 152 9>,
+ <&k3_clks 152 13>;
+ clock-names = "fck", "vp1", "vp2", "vp3", "vp4";
+
+ power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>;
+
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "common_m",
+ "common_s0",
+ "common_s1",
+ "common_s2";
+
+ status = "disabled";
+
+ dss_ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
mcasp0: mcasp@2b00000 {
compatible = "ti,am33xx-mcasp-audio";
reg = <0x0 0x02b00000 0x0 0x2000>,
@@ -975,4 +1032,22 @@
status = "disabled";
};
+
+ watchdog0: watchdog@2200000 {
+ compatible = "ti,j7-rti-wdt";
+ reg = <0x0 0x2200000 0x0 0x100>;
+ clocks = <&k3_clks 252 1>;
+ power-domains = <&k3_pds 252 TI_SCI_PD_EXCLUSIVE>;
+ assigned-clocks = <&k3_clks 252 1>;
+ assigned-clock-parents = <&k3_clks 252 5>;
+ };
+
+ watchdog1: watchdog@2210000 {
+ compatible = "ti,j7-rti-wdt";
+ reg = <0x0 0x2210000 0x0 0x100>;
+ clocks = <&k3_clks 253 1>;
+ power-domains = <&k3_pds 253 TI_SCI_PD_EXCLUSIVE>;
+ assigned-clocks = <&k3_clks 253 1>;
+ assigned-clock-parents = <&k3_clks 253 5>;
+ };
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 26d926eb1431..9174ddc76bdc 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -158,6 +158,10 @@
zynqmp_pcap: pcap {
compatible = "xlnx,zynqmp-pcap-fpga";
};
+
+ xlnx_aes: zynqmp-aes {
+ compatible = "xlnx,zynqmp-aes";
+ };
};
};
@@ -185,7 +189,7 @@
ranges = <0 0 0 0 0xffffffff>;
gic: interrupt-controller@f9010000 {
- compatible = "arm,gic-400", "arm,cortex-a15-gic";
+ compatible = "arm,gic-400";
#interrupt-cells = <3>;
reg = <0x0 0xf9010000 0x10000>,
<0x0 0xf9020000 0x20000>,
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 03d0189f7d68..883e8bace3ed 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -84,6 +84,7 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
CONFIG_CPUFREQ_DT=y
CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM=m
CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
CONFIG_ARM_SCPI_CPUFREQ=y
CONFIG_ARM_IMX_CPUFREQ_DT=m
@@ -188,6 +189,7 @@ CONFIG_NET_9P_VIRTIO=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_IOV=y
+CONFIG_PCI_PASID=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=y
CONFIG_PCI_AARDVARK=y
@@ -241,6 +243,7 @@ CONFIG_BLK_DEV_NVME=m
CONFIG_SRAM=y
CONFIG_EEPROM_AT24=m
CONFIG_EEPROM_AT25=m
+CONFIG_UACCE=m
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SAS_ATA=y
@@ -305,6 +308,7 @@ CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
CONFIG_QCOM_EMAC=m
+CONFIG_RMNET=m
CONFIG_RAVB=y
CONFIG_SMC91X=y
CONFIG_SMSC911X=y
@@ -312,6 +316,7 @@ CONFIG_SNI_AVE=y
CONFIG_SNI_NETSEC=y
CONFIG_STMMAC_ETH=m
CONFIG_TI_K3_AM65_CPSW_NUSS=y
+CONFIG_QCOM_IPA=m
CONFIG_MDIO_BUS_MUX_MMIOREG=y
CONFIG_MDIO_BUS_MUX_MULTIPLEXER=y
CONFIG_AQUANTIA_PHY=y
@@ -410,6 +415,7 @@ CONFIG_I2C_MESON=y
CONFIG_I2C_MV64XXX=y
CONFIG_I2C_OWL=y
CONFIG_I2C_PXA=y
+CONFIG_I2C_QCOM_CCI=m
CONFIG_I2C_QCOM_GENI=m
CONFIG_I2C_QUP=y
CONFIG_I2C_RK3X=y
@@ -450,6 +456,7 @@ CONFIG_PINCTRL_IMX8MN=y
CONFIG_PINCTRL_IMX8MP=y
CONFIG_PINCTRL_IMX8MQ=y
CONFIG_PINCTRL_IMX8QXP=y
+CONFIG_PINCTRL_IMX8DXL=y
CONFIG_PINCTRL_IPQ8074=y
CONFIG_PINCTRL_IPQ6018=y
CONFIG_PINCTRL_MSM8916=y
@@ -459,6 +466,7 @@ CONFIG_PINCTRL_MSM8998=y
CONFIG_PINCTRL_QCS404=y
CONFIG_PINCTRL_QDF2XXX=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_SC7180=y
CONFIG_PINCTRL_SDM845=y
CONFIG_PINCTRL_SM8150=y
CONFIG_GPIO_ALTERA=m
@@ -513,6 +521,7 @@ CONFIG_UNIPHIER_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_ARM_SP805_WATCHDOG=y
CONFIG_ARM_SBSA_WATCHDOG=y
+CONFIG_ARM_SMC_WATCHDOG=y
CONFIG_S3C2410_WATCHDOG=y
CONFIG_DW_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=m
@@ -584,6 +593,7 @@ CONFIG_VIDEO_RENESAS_FCP=m
CONFIG_VIDEO_RENESAS_VSP1=m
CONFIG_SDR_PLATFORM_DRIVERS=y
CONFIG_VIDEO_RCAR_DRIF=m
+CONFIG_VIDEO_QCOM_CAMSS=m
CONFIG_DRM=m
CONFIG_DRM_I2C_NXP_TDA998X=m
CONFIG_DRM_MALI_DISPLAY=m
@@ -644,6 +654,7 @@ CONFIG_SND_HDA_CODEC_HDMI=m
CONFIG_SND_SOC=y
CONFIG_SND_BCM2835_SOC_I2S=m
CONFIG_SND_MESON_AXG_SOUND_CARD=m
+CONFIG_SND_MESON_GX_SOUND_CARD=m
CONFIG_SND_SOC_SDM845=m
CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
@@ -656,6 +667,7 @@ CONFIG_SND_SOC_AK4613=m
CONFIG_SND_SOC_ES7134=m
CONFIG_SND_SOC_ES7241=m
CONFIG_SND_SOC_PCM3168A_I2C=m
+CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
CONFIG_SND_SOC_TAS571X=m
CONFIG_SND_SOC_WCD934X=m
CONFIG_SND_SOC_WSA881X=m
@@ -725,6 +737,7 @@ CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_PWM=y
CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_CPU=y
@@ -804,10 +817,13 @@ CONFIG_MSM_GCC_8994=y
CONFIG_MSM_MMCC_8996=y
CONFIG_MSM_GCC_8998=y
CONFIG_QCS_GCC_404=y
+CONFIG_SC_GCC_7180=y
+CONFIG_SDM_CAMCC_845=m
CONFIG_SDM_GCC_845=y
CONFIG_SDM_GPUCC_845=y
CONFIG_SDM_DISPCC_845=y
CONFIG_SM_GCC_8150=y
+CONFIG_SM_GCC_8250=y
CONFIG_QCOM_HFPLL=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
@@ -837,7 +853,6 @@ CONFIG_FSL_MC_DPIO=y
CONFIG_IMX_SCU_SOC=y
CONFIG_QCOM_AOSS_QMP=y
CONFIG_QCOM_GENI_SE=y
-CONFIG_QCOM_GLINK_SSR=m
CONFIG_QCOM_RMTFS_MEM=m
CONFIG_QCOM_RPMH=y
CONFIG_QCOM_RPMHPD=y
@@ -971,7 +986,9 @@ CONFIG_CRYPTO_DEV_FSL_CAAM=m
CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM=m
CONFIG_CRYPTO_DEV_QCOM_RNG=m
CONFIG_CRYPTO_DEV_CCREE=m
+CONFIG_CRYPTO_DEV_HISI_SEC2=m
CONFIG_CRYPTO_DEV_HISI_ZIP=m
+CONFIG_CRYPTO_DEV_HISI_HPRE=m
CONFIG_CMA_SIZE_MBYTES=32
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index b263e239cb59..a45366c3909b 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,6 +12,7 @@
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/psci.h>
+#include <linux/stddef.h>
#include <asm/cputype.h>
#include <asm/io.h>
@@ -31,14 +32,14 @@
* is therefore used to delimit the MADT GICC structure minimum length
* appropriately.
*/
-#define ACPI_MADT_GICC_MIN_LENGTH ACPI_OFFSET( \
+#define ACPI_MADT_GICC_MIN_LENGTH offsetof( \
struct acpi_madt_generic_interrupt, efficiency_class)
#define BAD_MADT_GICC_ENTRY(entry, end) \
(!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \
(unsigned long)(entry) + (entry)->header.length > (end))
-#define ACPI_MADT_GICC_SPE (ACPI_OFFSET(struct acpi_madt_generic_interrupt, \
+#define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \
spe_interrupt) + sizeof(u16))
/* Basic configuration for ACPI */
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 9543b5e0534d..a08890da696c 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -101,8 +101,8 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define ATOMIC_INIT(i) { (i) }
-#define arch_atomic_read(v) READ_ONCE((v)->counter)
-#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) __READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
@@ -225,6 +225,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#include <asm-generic/atomic-instrumented.h>
+#define ARCH_ATOMIC
#endif /* __ASM_ATOMIC_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 7d9cc5ec4971..fb4c27506ef4 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -76,8 +76,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
#define __smp_store_release(p, v) \
do { \
typeof(p) __p = (p); \
- union { typeof(*p) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(*p)) (v) }; \
+ union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
kasan_check_write(__p, sizeof(*p)); \
switch (sizeof(*p)) { \
@@ -110,7 +110,7 @@ do { \
#define __smp_load_acquire(p) \
({ \
- union { typeof(*p) __val; char __c[1]; } __u; \
+ union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \
typeof(p) __p = (p); \
compiletime_assert_atomic_type(*p); \
kasan_check_read(__p, sizeof(*p)); \
@@ -136,33 +136,33 @@ do { \
: "Q" (*__p) : "memory"); \
break; \
} \
- __u.__val; \
+ (typeof(*p))__u.__val; \
})
#define smp_cond_load_relaxed(ptr, cond_expr) \
({ \
typeof(ptr) __PTR = (ptr); \
- typeof(*ptr) VAL; \
+ __unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
__cmpwait_relaxed(__PTR, VAL); \
} \
- VAL; \
+ (typeof(*ptr))VAL; \
})
#define smp_cond_load_acquire(ptr, cond_expr) \
({ \
typeof(ptr) __PTR = (ptr); \
- typeof(*ptr) VAL; \
+ __unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = smp_load_acquire(__PTR); \
if (cond_expr) \
break; \
__cmpwait_relaxed(__PTR, VAL); \
} \
- VAL; \
+ (typeof(*ptr))VAL; \
})
#include <asm-generic/barrier.h>
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index ce50c1f1f1ea..9384fd8fc13c 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -94,20 +94,7 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
kick_all_cpus_sync();
}
-
-static inline void flush_cache_mm(struct mm_struct *mm)
-{
-}
-
-static inline void flush_cache_page(struct vm_area_struct *vma,
- unsigned long user_addr, unsigned long pfn)
-{
-}
-
-static inline void flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
-}
+#define flush_icache_range flush_icache_range
/*
* Cache maintenance functions used by the DMA API. No to be used directly.
@@ -123,12 +110,7 @@ extern void __dma_flush_area(const void *, size_t);
*/
extern void copy_to_user_page(struct vm_area_struct *, struct page *,
unsigned long, void *, const void *, unsigned long);
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- } while (0)
-
-#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+#define copy_to_user_page copy_to_user_page
/*
* flush_dcache_page is used when the kernel has written to the page
@@ -154,29 +136,11 @@ static __always_inline void __flush_icache_all(void)
dsb(ish);
}
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
-/*
- * We don't appear to need to do anything here. In fact, if we did, we'd
- * duplicate cache flushing elsewhere performed by flush_dcache_page().
- */
-#define flush_icache_page(vma,page) do { } while (0)
-
-/*
- * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
- */
-static inline void flush_cache_vmap(unsigned long start, unsigned long end)
-{
-}
-
-static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
-{
-}
-
int set_memory_valid(unsigned long addr, int numpages, int enable);
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
-#endif
+#include <asm-generic/cacheflush.h>
+
+#endif /* __ASM_CACHEFLUSH_H */
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 4f00d50585a4..8d1c8dcb87fd 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -96,7 +96,28 @@
*/
#define elf_check_arch(x) ((x)->e_machine == EM_AARCH64)
-#define elf_read_implies_exec(ex,stk) (stk != EXSTACK_DISABLE_X)
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically.
+ *
+ * The decision process for determining the results are:
+ *
+ *              CPU*: | arm32    | arm64 |
+ * ELF:              |            |            |
+ * ---------------------|------------|------------|
+ * missing PT_GNU_STACK | exec-all   | exec-none  |
+ * PT_GNU_STACK == RWX  | exec-stack | exec-stack |
+ * PT_GNU_STACK == RW   | exec-none | exec-none |
+ *
+ * exec-all : all PROT_READ user mappings are executable, except when
+ * backed by files on a noexec-filesystem.
+ * exec-none : only PROT_EXEC user mappings are executable.
+ * exec-stack: only the stack and PROT_EXEC user mappings are executable.
+ *
+ * *all arm64 CPUs support NX, so there is no "lacks NX" column.
+ *
+ */
+#define compat_elf_read_implies_exec(ex, stk) (stk == EXSTACK_DEFAULT)
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 6facd1308e7c..ff50dd731852 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -9,11 +9,11 @@
#define __ASM_IO_H
#include <linux/types.h>
+#include <linux/pgtable.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <asm/memory.h>
-#include <asm/pgtable.h>
#include <asm/early_ioremap.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index a6e5da755359..3bf626f6fe0c 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -8,7 +8,7 @@
#ifndef __ASM_KERNEL_PGTABLE_H
#define __ASM_KERNEL_PGTABLE_H
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/sparsemem.h>
/*
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 0027d34fbb4b..b12bfc1f051a 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -83,11 +83,11 @@ alternative_cb_end
#else
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
@@ -152,8 +152,8 @@ void kvm_clear_hyp_idmap(void);
__pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE)
#define kvm_mk_pud(pmdp) \
__pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE)
-#define kvm_mk_pgd(pudp) \
- __pgd(__phys_to_pgd_val(__pa(pudp)) | PUD_TYPE_TABLE)
+#define kvm_mk_p4d(pmdp) \
+ __p4d(__phys_to_p4d_val(__pa(pmdp)) | PUD_TYPE_TABLE)
#define kvm_set_pud(pudp, pud) set_pud(pudp, pud)
@@ -279,6 +279,12 @@ static inline bool kvm_s2pud_young(pud_t pud)
#define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
#endif
+#ifdef __PAGETABLE_P4D_FOLDED
+#define hyp_p4d_table_empty(p4dp) (0)
+#else
+#define hyp_p4d_table_empty(p4dp) kvm_page_empty(p4dp)
+#endif
+
struct kvm;
#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index ab46187c6300..b0bd9b55594c 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -14,13 +14,13 @@
#include <linux/sched.h>
#include <linux/sched/hotplug.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
-#include <asm/pgtable.h>
#include <asm/sysreg.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 172d76fa0245..58e93583ddb6 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -73,17 +73,17 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
free_page((unsigned long)pudp);
}
-static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
+static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
{
- set_pgd(pgdp, __pgd(__phys_to_pgd_val(pudp) | prot));
+ set_p4d(p4dp, __p4d(__phys_to_p4d_val(pudp) | prot));
}
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp)
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pudp)
{
- __pgd_populate(pgdp, __pa(pudp), PUD_TYPE_TABLE);
+ __p4d_populate(p4dp, __pa(pudp), PUD_TYPE_TABLE);
}
#else
-static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
+static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
{
BUILD_BUG();
}
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
index acb0751a6606..b8f158ae2527 100644
--- a/arch/arm64/include/asm/pgtable-types.h
+++ b/arch/arm64/include/asm/pgtable-types.h
@@ -14,6 +14,7 @@
typedef u64 pteval_t;
typedef u64 pmdval_t;
typedef u64 pudval_t;
+typedef u64 p4dval_t;
typedef u64 pgdval_t;
/*
@@ -44,13 +45,11 @@ typedef struct { pteval_t pgprot; } pgprot_t;
#define __pgprot(x) ((pgprot_t) { (x) } )
#if CONFIG_PGTABLE_LEVELS == 2
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#elif CONFIG_PGTABLE_LEVELS == 3
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#elif CONFIG_PGTABLE_LEVELS == 4
-#include <asm-generic/5level-fixup.h>
+#include <asm-generic/pgtable-nop4d.h>
#endif
#endif /* __ASM_PGTABLE_TYPES_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 9ce000f22d9e..6dbd267ab931 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -298,6 +298,11 @@ static inline pte_t pgd_pte(pgd_t pgd)
return __pte(pgd_val(pgd));
}
+static inline pte_t p4d_pte(p4d_t p4d)
+{
+ return __pte(p4d_val(p4d));
+}
+
static inline pte_t pud_pte(pud_t pud)
{
return __pte(pud_val(pud));
@@ -335,7 +340,7 @@ static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
#ifdef CONFIG_NUMA_BALANCING
/*
- * See the comment in include/asm-generic/pgtable.h
+ * See the comment in include/linux/pgtable.h
*/
static inline int pte_protnone(pte_t pte)
{
@@ -401,6 +406,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
+#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
+#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
+
#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
@@ -498,15 +506,13 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
return __pmd_to_phys(pmd);
}
-static inline void pte_unmap(pte_t *pte) { }
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)__va(pmd_page_paddr(pmd));
+}
/* Find an entry in the third-level page table. */
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
-#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
@@ -560,11 +566,13 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
return __pud_to_phys(pud);
}
-/* Find an entry in the second-level page table. */
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+ return (unsigned long)__va(pud_page_paddr(pud));
+}
+/* Find an entry in the second-level page table. */
#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
-#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
@@ -592,49 +600,52 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
#define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud))
-#define pgd_none(pgd) (!pgd_val(pgd))
-#define pgd_bad(pgd) (!(pgd_val(pgd) & 2))
-#define pgd_present(pgd) (pgd_val(pgd))
+#define p4d_none(p4d) (!p4d_val(p4d))
+#define p4d_bad(p4d) (!(p4d_val(p4d) & 2))
+#define p4d_present(p4d) (p4d_val(p4d))
-static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
{
- if (in_swapper_pgdir(pgdp)) {
- set_swapper_pgd(pgdp, pgd);
+ if (in_swapper_pgdir(p4dp)) {
+ set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d)));
return;
}
- WRITE_ONCE(*pgdp, pgd);
+ WRITE_ONCE(*p4dp, p4d);
dsb(ishst);
isb();
}
-static inline void pgd_clear(pgd_t *pgdp)
+static inline void p4d_clear(p4d_t *p4dp)
{
- set_pgd(pgdp, __pgd(0));
+ set_p4d(p4dp, __p4d(0));
}
-static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
+static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
{
- return __pgd_to_phys(pgd);
+ return __p4d_to_phys(p4d);
}
-/* Find an entry in the frst-level page table. */
-#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+static inline unsigned long p4d_page_vaddr(p4d_t p4d)
+{
+ return (unsigned long)__va(p4d_page_paddr(p4d));
+}
-#define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
-#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
+/* Find an entry in the frst-level page table. */
+#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
-#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
+#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
-#define pgd_page(pgd) phys_to_page(__pgd_to_phys(pgd))
+#define p4d_page(p4d) pfn_to_page(__phys_to_pfn(__p4d_to_phys(p4d)))
/* use ONLY for statically allocated translation tables */
#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
#else
+#define p4d_page_paddr(p4d) ({ BUILD_BUG(); 0;})
#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
@@ -648,16 +659,6 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
-/* to find an entry in a page-table-directory */
-#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-
-#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
-
-#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-
#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
@@ -844,8 +845,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
extern int kern_addr_valid(unsigned long addr);
-#include <asm-generic/pgtable.h>
-
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
*/
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 5017b531a415..fc7613023c19 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -64,7 +64,8 @@ struct stackframe {
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
-extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
+extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h
index 326aac658b9d..b767904f28b1 100644
--- a/arch/arm64/include/asm/stage2_pgtable.h
+++ b/arch/arm64/include/asm/stage2_pgtable.h
@@ -9,7 +9,7 @@
#define __ARM64_S2_PGTABLE_H_
#include <linux/hugetlb.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map
@@ -68,41 +68,67 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
#define S2_PUD_SIZE (1UL << S2_PUD_SHIFT)
#define S2_PUD_MASK (~(S2_PUD_SIZE - 1))
-static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd)
+#define stage2_pgd_none(kvm, pgd) pgd_none(pgd)
+#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd)
+#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
+#define stage2_pgd_populate(kvm, pgd, p4d) pgd_populate(NULL, pgd, p4d)
+
+static inline p4d_t *stage2_p4d_offset(struct kvm *kvm,
+ pgd_t *pgd, unsigned long address)
+{
+ return p4d_offset(pgd, address);
+}
+
+static inline void stage2_p4d_free(struct kvm *kvm, p4d_t *p4d)
+{
+}
+
+static inline bool stage2_p4d_table_empty(struct kvm *kvm, p4d_t *p4dp)
+{
+ return false;
+}
+
+static inline phys_addr_t stage2_p4d_addr_end(struct kvm *kvm,
+ phys_addr_t addr, phys_addr_t end)
+{
+ return end;
+}
+
+static inline bool stage2_p4d_none(struct kvm *kvm, p4d_t p4d)
{
if (kvm_stage2_has_pud(kvm))
- return pgd_none(pgd);
+ return p4d_none(p4d);
else
return 0;
}
-static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp)
+static inline void stage2_p4d_clear(struct kvm *kvm, p4d_t *p4dp)
{
if (kvm_stage2_has_pud(kvm))
- pgd_clear(pgdp);
+ p4d_clear(p4dp);
}
-static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd)
+static inline bool stage2_p4d_present(struct kvm *kvm, p4d_t p4d)
{
if (kvm_stage2_has_pud(kvm))
- return pgd_present(pgd);
+ return p4d_present(p4d);
else
return 1;
}
-static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
+static inline void stage2_p4d_populate(struct kvm *kvm, p4d_t *p4d, pud_t *pud)
{
if (kvm_stage2_has_pud(kvm))
- pgd_populate(NULL, pgd, pud);
+ p4d_populate(NULL, p4d, pud);
}
static inline pud_t *stage2_pud_offset(struct kvm *kvm,
- pgd_t *pgd, unsigned long address)
+ p4d_t *p4d, unsigned long address)
{
if (kvm_stage2_has_pud(kvm))
- return pud_offset(pgd, address);
+ return pud_offset(p4d, address);
else
- return (pud_t *)pgd;
+ return (pud_t *)p4d;
}
static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
diff --git a/arch/arm64/include/asm/vmap_stack.h b/arch/arm64/include/asm/vmap_stack.h
index 0cc6636e3f15..894e031b28d2 100644
--- a/arch/arm64/include/asm/vmap_stack.h
+++ b/arch/arm64/include/asm/vmap_stack.h
@@ -7,8 +7,8 @@
#include <linux/gfp.h>
#include <linux/kconfig.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#include <asm/memory.h>
-#include <asm/pgtable.h>
#include <asm/thread_info.h>
/*
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 46ec402e97ed..a7586a4db142 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -24,12 +24,12 @@
#include <linux/of_fdt.h>
#include <linux/smp.h>
#include <linux/serial_core.h>
+#include <linux/pgtable.h>
#include <acpi/ghes.h>
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
#include <asm/daifflags.h>
-#include <asm/pgtable.h>
#include <asm/smp_plat.h>
int acpi_noirq = 1; /* skip ACPI IRQ initialization */
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 15e80c876d46..5df49366e9ab 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -130,7 +130,7 @@ static int clear_os_lock(unsigned int cpu)
return 0;
}
-static int debug_monitors_init(void)
+static int __init debug_monitors_init(void)
{
return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
"arm64/debug_monitors:starting",
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 8618faa82e6d..86a5cf9bc19a 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -69,7 +69,8 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
if (addr == FTRACE_ADDR)
return &plt[FTRACE_PLT_IDX];
- if (addr == FTRACE_REGS_ADDR && IS_ENABLED(CONFIG_FTRACE_WITH_REGS))
+ if (addr == FTRACE_REGS_ADDR &&
+ IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
return &plt[FTRACE_REGS_PLT_IDX];
#endif
return NULL;
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 632702146813..037421c66b14 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/pgtable.h>
#include <asm/asm_pointer_auth.h>
#include <asm/assembler.h>
@@ -26,7 +27,6 @@
#include <asm/kvm_arm.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/scs.h>
#include <asm/smp.h>
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 5b73e92c99e3..68e14152d6e9 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -32,7 +32,6 @@
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/sections.h>
#include <asm/smp.h>
@@ -184,11 +183,12 @@ static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
pgprot_t pgprot)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
- pgdp = pgd_offset_raw(trans_pgd, dst_addr);
+ pgdp = pgd_offset_pgd(trans_pgd, dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) {
pudp = (void *)get_safe_page(GFP_ATOMIC);
if (!pudp)
@@ -196,7 +196,15 @@ static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
pgd_populate(&init_mm, pgdp, pudp);
}
- pudp = pud_offset(pgdp, dst_addr);
+ p4dp = p4d_offset(pgdp, dst_addr);
+ if (p4d_none(READ_ONCE(*p4dp))) {
+ pudp = (void *)get_safe_page(GFP_ATOMIC);
+ if (!pudp)
+ return -ENOMEM;
+ p4d_populate(&init_mm, p4dp, pudp);
+ }
+
+ pudp = pud_offset(p4dp, dst_addr);
if (pud_none(READ_ONCE(*pudp))) {
pmdp = (void *)get_safe_page(GFP_ATOMIC);
if (!pmdp)
@@ -419,7 +427,7 @@ static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
return 0;
}
-static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
+static int copy_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start,
unsigned long end)
{
pud_t *dst_pudp;
@@ -427,15 +435,15 @@ static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
unsigned long next;
unsigned long addr = start;
- if (pgd_none(READ_ONCE(*dst_pgdp))) {
+ if (p4d_none(READ_ONCE(*dst_p4dp))) {
dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!dst_pudp)
return -ENOMEM;
- pgd_populate(&init_mm, dst_pgdp, dst_pudp);
+ p4d_populate(&init_mm, dst_p4dp, dst_pudp);
}
- dst_pudp = pud_offset(dst_pgdp, start);
+ dst_pudp = pud_offset(dst_p4dp, start);
- src_pudp = pud_offset(src_pgdp, start);
+ src_pudp = pud_offset(src_p4dp, start);
do {
pud_t pud = READ_ONCE(*src_pudp);
@@ -454,6 +462,27 @@ static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
return 0;
}
+static int copy_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
+ unsigned long end)
+{
+ p4d_t *dst_p4dp;
+ p4d_t *src_p4dp;
+ unsigned long next;
+ unsigned long addr = start;
+
+ dst_p4dp = p4d_offset(dst_pgdp, start);
+ src_p4dp = p4d_offset(src_pgdp, start);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_none(READ_ONCE(*src_p4dp)))
+ continue;
+ if (copy_pud(dst_p4dp, src_p4dp, addr, next))
+ return -ENOMEM;
+ } while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
+
+ return 0;
+}
+
static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
unsigned long end)
{
@@ -461,12 +490,12 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
unsigned long addr = start;
pgd_t *src_pgdp = pgd_offset_k(start);
- dst_pgdp = pgd_offset_raw(dst_pgdp, start);
+ dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
do {
next = pgd_addr_end(addr, end);
if (pgd_none(READ_ONCE(*src_pgdp)))
continue;
- if (copy_pud(dst_pgdp, src_pgdp, addr, next))
+ if (copy_p4d(dst_pgdp, src_pgdp, addr, next))
return -ENOMEM;
} while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 91a83104c6e8..07c4c8cc4a67 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -10,6 +10,7 @@
#include <linux/mm_types.h>
#include <linux/sched.h>
#include <linux/types.h>
+#include <linux/pgtable.h>
#include <asm/archrandom.h>
#include <asm/cacheflush.h>
@@ -17,7 +18,6 @@
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
enum kaslr_status {
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 570988c7a7ff..1006ed2d7c60 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -117,7 +117,7 @@ pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
struct device *dev = &root->device->dev;
struct resource *bus_res = &root->secondary;
u16 seg = root->segment;
- struct pci_ecam_ops *ecam_ops;
+ const struct pci_ecam_ops *ecam_ops;
struct resource cfgres;
struct acpi_device *adev;
struct pci_config_window *cfg;
@@ -185,7 +185,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
root_ops->release_info = pci_acpi_generic_release_info;
root_ops->prepare_resources = pci_acpi_root_prepare_resources;
- root_ops->pci_ops = &ri->cfg->ops->pci_ops;
+ root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
if (!bus)
return NULL;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index eade7807e819..6089638c7d43 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -306,7 +306,7 @@ void __show_regs(struct pt_regs *regs)
void show_regs(struct pt_regs * regs)
{
__show_regs(regs);
- dump_backtrace(regs, NULL);
+ dump_backtrace(regs, NULL, KERN_DEFAULT);
}
static void tls_thread_flush(void)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 76790a5f2a0d..68b7f34a08f5 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -34,7 +34,6 @@
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
-#include <asm/pgtable.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
#include <asm/syscall.h>
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 3fd2c11c09fc..93b3844cf442 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -319,6 +319,10 @@ void __init setup_arch(char **cmdline_p)
xen_early_init();
efi_init();
+
+ if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
+ pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
+
arm64_memblock_init();
paging_init();
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 4b6f4999d06a..e43a8ff19f0f 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -43,7 +43,6 @@
#include <asm/kvm_mmu.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/smp_plat.h>
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 9405d1b7f4b0..c1dee9066ff9 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -3,13 +3,13 @@
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/pgtable.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exec.h>
-#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index d332590f5978..50cc30acf106 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -53,9 +53,9 @@ static const char *handler[]= {
int show_unhandled_signals = 0;
-static void dump_backtrace_entry(unsigned long where)
+static void dump_backtrace_entry(unsigned long where, const char *loglvl)
{
- printk(" %pS\n", (void *)where);
+ printk("%s %pS\n", loglvl, (void *)where);
}
static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
@@ -83,7 +83,8 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
printk("%sCode: %s\n", lvl, str);
}
-void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
struct stackframe frame;
int skip = 0;
@@ -115,11 +116,11 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
thread_saved_pc(tsk));
}
- printk("Call trace:\n");
+ printk("%sCall trace:\n", loglvl);
do {
/* skip until specified stack frame */
if (!skip) {
- dump_backtrace_entry(frame.pc);
+ dump_backtrace_entry(frame.pc, loglvl);
} else if (frame.fp == regs->regs[29]) {
skip = 0;
/*
@@ -129,16 +130,16 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
* at which an exception has taken place, use regs->pc
* instead.
*/
- dump_backtrace_entry(regs->pc);
+ dump_backtrace_entry(regs->pc, loglvl);
}
} while (!unwind_frame(tsk, &frame));
put_task_stack(tsk);
}
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
- dump_backtrace(NULL, tsk);
+ dump_backtrace(NULL, tsk, loglvl);
barrier();
}
@@ -447,12 +448,12 @@ void arm64_notify_segfault(unsigned long addr)
{
int code;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
if (find_vma(current->mm, addr) == NULL)
code = SEGV_MAPERR;
else
code = SEGV_ACCERR;
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
force_signal_inject(SIGSEGV, code, addr);
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index d51a898fd60f..4e016574bd91 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -340,7 +340,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct mm_struct *mm = current->mm;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
ret = aarch32_kuser_helpers_setup(mm);
@@ -357,7 +357,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
#endif /* CONFIG_COMPAT_VDSO */
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
#endif /* CONFIG_COMPAT */
@@ -398,7 +398,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
struct mm_struct *mm = current->mm;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
ret = __setup_additional_pages(VDSO_ABI_AA64,
@@ -406,7 +406,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
bprm,
uses_interp);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 3964738ebbde..7ea1e827e505 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -105,6 +105,14 @@ VDSO_CFLAGS += -D__uint128_t='void*'
VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow)
VDSO_CFLAGS += -Wno-int-to-pointer-cast
+# Compile as THUMB2 or ARM. Unwinding via frame-pointers in THUMB2 is
+# unreliable.
+ifeq ($(CONFIG_THUMB2_COMPAT_VDSO), y)
+VDSO_CFLAGS += -mthumb -fomit-frame-pointer
+else
+VDSO_CFLAGS += -marm
+endif
+
VDSO_AFLAGS := $(VDSO_CAFLAGS)
VDSO_AFLAGS += -D__ASSEMBLY__
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 3be632177631..6827da7f3aa5 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -13,7 +13,6 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "image.h"
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index a1f6bc70c4e4..8c0035cab6b6 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -158,13 +158,22 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
{
- pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
+ p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL);
stage2_pgd_clear(kvm, pgd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- stage2_pud_free(kvm, pud_table);
+ stage2_p4d_free(kvm, p4d_table);
put_page(virt_to_page(pgd));
}
+static void clear_stage2_p4d_entry(struct kvm *kvm, p4d_t *p4d, phys_addr_t addr)
+{
+ pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, p4d, 0);
+ stage2_p4d_clear(kvm, p4d);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ stage2_pud_free(kvm, pud_table);
+ put_page(virt_to_page(p4d));
+}
+
static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
@@ -208,12 +217,20 @@ static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
dsb(ishst);
}
-static inline void kvm_pgd_populate(pgd_t *pgdp, pud_t *pudp)
+static inline void kvm_p4d_populate(p4d_t *p4dp, pud_t *pudp)
{
- WRITE_ONCE(*pgdp, kvm_mk_pgd(pudp));
+ WRITE_ONCE(*p4dp, kvm_mk_p4d(pudp));
dsb(ishst);
}
+static inline void kvm_pgd_populate(pgd_t *pgdp, p4d_t *p4dp)
+{
+#ifndef __PAGETABLE_P4D_FOLDED
+ WRITE_ONCE(*pgdp, kvm_mk_pgd(p4dp));
+ dsb(ishst);
+#endif
+}
+
/*
* Unmapping vs dcache management:
*
@@ -293,13 +310,13 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
clear_stage2_pud_entry(kvm, pud, start_addr);
}
-static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
+static void unmap_stage2_puds(struct kvm *kvm, p4d_t *p4d,
phys_addr_t addr, phys_addr_t end)
{
phys_addr_t next, start_addr = addr;
pud_t *pud, *start_pud;
- start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
+ start_pud = pud = stage2_pud_offset(kvm, p4d, addr);
do {
next = stage2_pud_addr_end(kvm, addr, end);
if (!stage2_pud_none(kvm, *pud)) {
@@ -317,6 +334,23 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
} while (pud++, addr = next, addr != end);
if (stage2_pud_table_empty(kvm, start_pud))
+ clear_stage2_p4d_entry(kvm, p4d, start_addr);
+}
+
+static void unmap_stage2_p4ds(struct kvm *kvm, pgd_t *pgd,
+ phys_addr_t addr, phys_addr_t end)
+{
+ phys_addr_t next, start_addr = addr;
+ p4d_t *p4d, *start_p4d;
+
+ start_p4d = p4d = stage2_p4d_offset(kvm, pgd, addr);
+ do {
+ next = stage2_p4d_addr_end(kvm, addr, end);
+ if (!stage2_p4d_none(kvm, *p4d))
+ unmap_stage2_puds(kvm, p4d, addr, next);
+ } while (p4d++, addr = next, addr != end);
+
+ if (stage2_p4d_table_empty(kvm, start_p4d))
clear_stage2_pgd_entry(kvm, pgd, start_addr);
}
@@ -351,7 +385,7 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
break;
next = stage2_pgd_addr_end(kvm, addr, end);
if (!stage2_pgd_none(kvm, *pgd))
- unmap_stage2_puds(kvm, pgd, addr, next);
+ unmap_stage2_p4ds(kvm, pgd, addr, next);
/*
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
@@ -391,13 +425,13 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
} while (pmd++, addr = next, addr != end);
}
-static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
+static void stage2_flush_puds(struct kvm *kvm, p4d_t *p4d,
phys_addr_t addr, phys_addr_t end)
{
pud_t *pud;
phys_addr_t next;
- pud = stage2_pud_offset(kvm, pgd, addr);
+ pud = stage2_pud_offset(kvm, p4d, addr);
do {
next = stage2_pud_addr_end(kvm, addr, end);
if (!stage2_pud_none(kvm, *pud)) {
@@ -409,6 +443,20 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
} while (pud++, addr = next, addr != end);
}
+static void stage2_flush_p4ds(struct kvm *kvm, pgd_t *pgd,
+ phys_addr_t addr, phys_addr_t end)
+{
+ p4d_t *p4d;
+ phys_addr_t next;
+
+ p4d = stage2_p4d_offset(kvm, pgd, addr);
+ do {
+ next = stage2_p4d_addr_end(kvm, addr, end);
+ if (!stage2_p4d_none(kvm, *p4d))
+ stage2_flush_puds(kvm, p4d, addr, next);
+ } while (p4d++, addr = next, addr != end);
+}
+
static void stage2_flush_memslot(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
@@ -421,7 +469,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
do {
next = stage2_pgd_addr_end(kvm, addr, end);
if (!stage2_pgd_none(kvm, *pgd))
- stage2_flush_puds(kvm, pgd, addr, next);
+ stage2_flush_p4ds(kvm, pgd, addr, next);
if (next != end)
cond_resched_lock(&kvm->mmu_lock);
@@ -454,12 +502,21 @@ static void stage2_flush_vm(struct kvm *kvm)
static void clear_hyp_pgd_entry(pgd_t *pgd)
{
- pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
+ p4d_t *p4d_table __maybe_unused = p4d_offset(pgd, 0UL);
pgd_clear(pgd);
- pud_free(NULL, pud_table);
+ p4d_free(NULL, p4d_table);
put_page(virt_to_page(pgd));
}
+static void clear_hyp_p4d_entry(p4d_t *p4d)
+{
+ pud_t *pud_table __maybe_unused = pud_offset(p4d, 0UL);
+ VM_BUG_ON(p4d_huge(*p4d));
+ p4d_clear(p4d);
+ pud_free(NULL, pud_table);
+ put_page(virt_to_page(p4d));
+}
+
static void clear_hyp_pud_entry(pud_t *pud)
{
pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
@@ -511,12 +568,12 @@ static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
clear_hyp_pud_entry(pud);
}
-static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
+static void unmap_hyp_puds(p4d_t *p4d, phys_addr_t addr, phys_addr_t end)
{
phys_addr_t next;
pud_t *pud, *start_pud;
- start_pud = pud = pud_offset(pgd, addr);
+ start_pud = pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
/* Hyp doesn't use huge puds */
@@ -525,6 +582,23 @@ static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
} while (pud++, addr = next, addr != end);
if (hyp_pud_table_empty(start_pud))
+ clear_hyp_p4d_entry(p4d);
+}
+
+static void unmap_hyp_p4ds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
+{
+ phys_addr_t next;
+ p4d_t *p4d, *start_p4d;
+
+ start_p4d = p4d = p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+ /* Hyp doesn't use huge p4ds */
+ if (!p4d_none(*p4d))
+ unmap_hyp_puds(p4d, addr, next);
+ } while (p4d++, addr = next, addr != end);
+
+ if (hyp_p4d_table_empty(start_p4d))
clear_hyp_pgd_entry(pgd);
}
@@ -548,7 +622,7 @@ static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
do {
next = pgd_addr_end(addr, end);
if (!pgd_none(*pgd))
- unmap_hyp_puds(pgd, addr, next);
+ unmap_hyp_p4ds(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
@@ -658,7 +732,7 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
return 0;
}
-static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
+static int create_hyp_pud_mappings(p4d_t *p4d, unsigned long start,
unsigned long end, unsigned long pfn,
pgprot_t prot)
{
@@ -669,7 +743,7 @@ static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
addr = start;
do {
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
if (pud_none_or_clear_bad(pud)) {
pmd = pmd_alloc_one(NULL, addr);
@@ -691,12 +765,45 @@ static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
return 0;
}
+static int create_hyp_p4d_mappings(pgd_t *pgd, unsigned long start,
+ unsigned long end, unsigned long pfn,
+ pgprot_t prot)
+{
+ p4d_t *p4d;
+ pud_t *pud;
+ unsigned long addr, next;
+ int ret;
+
+ addr = start;
+ do {
+ p4d = p4d_offset(pgd, addr);
+
+ if (p4d_none(*p4d)) {
+ pud = pud_alloc_one(NULL, addr);
+ if (!pud) {
+ kvm_err("Cannot allocate Hyp pud\n");
+ return -ENOMEM;
+ }
+ kvm_p4d_populate(p4d, pud);
+ get_page(virt_to_page(p4d));
+ }
+
+ next = p4d_addr_end(addr, end);
+ ret = create_hyp_pud_mappings(p4d, addr, next, pfn, prot);
+ if (ret)
+ return ret;
+ pfn += (next - addr) >> PAGE_SHIFT;
+ } while (addr = next, addr != end);
+
+ return 0;
+}
+
static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
unsigned long start, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
pgd_t *pgd;
- pud_t *pud;
+ p4d_t *p4d;
unsigned long addr, next;
int err = 0;
@@ -707,18 +814,18 @@ static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
if (pgd_none(*pgd)) {
- pud = pud_alloc_one(NULL, addr);
- if (!pud) {
- kvm_err("Cannot allocate Hyp pud\n");
+ p4d = p4d_alloc_one(NULL, addr);
+ if (!p4d) {
+ kvm_err("Cannot allocate Hyp p4d\n");
err = -ENOMEM;
goto out;
}
- kvm_pgd_populate(pgd, pud);
+ kvm_pgd_populate(pgd, p4d);
get_page(virt_to_page(pgd));
}
next = pgd_addr_end(addr, end);
- err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
+ err = create_hyp_p4d_mappings(pgd, addr, next, pfn, prot);
if (err)
goto out;
pfn += (next - addr) >> PAGE_SHIFT;
@@ -977,7 +1084,7 @@ void stage2_unmap_vm(struct kvm *kvm)
int idx;
idx = srcu_read_lock(&kvm->srcu);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@@ -985,7 +1092,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -1015,22 +1122,40 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
free_pages_exact(pgd, stage2_pgd_size(kvm));
}
-static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+static p4d_t *stage2_get_p4d(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
phys_addr_t addr)
{
pgd_t *pgd;
- pud_t *pud;
+ p4d_t *p4d;
pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
if (stage2_pgd_none(kvm, *pgd)) {
if (!cache)
return NULL;
- pud = mmu_memory_cache_alloc(cache);
- stage2_pgd_populate(kvm, pgd, pud);
+ p4d = mmu_memory_cache_alloc(cache);
+ stage2_pgd_populate(kvm, pgd, p4d);
get_page(virt_to_page(pgd));
}
- return stage2_pud_offset(kvm, pgd, addr);
+ return stage2_p4d_offset(kvm, pgd, addr);
+}
+
+static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ phys_addr_t addr)
+{
+ p4d_t *p4d;
+ pud_t *pud;
+
+ p4d = stage2_get_p4d(kvm, cache, addr);
+ if (stage2_p4d_none(kvm, *p4d)) {
+ if (!cache)
+ return NULL;
+ pud = mmu_memory_cache_alloc(cache);
+ stage2_p4d_populate(kvm, p4d, pud);
+ get_page(virt_to_page(p4d));
+ }
+
+ return stage2_pud_offset(kvm, p4d, addr);
}
static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1423,18 +1548,18 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
}
/**
- * stage2_wp_puds - write protect PGD range
+ * stage2_wp_puds - write protect P4D range
* @pgd: pointer to pgd entry
* @addr: range start address
* @end: range end address
*/
-static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
+static void stage2_wp_puds(struct kvm *kvm, p4d_t *p4d,
phys_addr_t addr, phys_addr_t end)
{
pud_t *pud;
phys_addr_t next;
- pud = stage2_pud_offset(kvm, pgd, addr);
+ pud = stage2_pud_offset(kvm, p4d, addr);
do {
next = stage2_pud_addr_end(kvm, addr, end);
if (!stage2_pud_none(kvm, *pud)) {
@@ -1449,6 +1574,26 @@ static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
}
/**
+ * stage2_wp_p4ds - write protect PGD range
+ * @pgd: pointer to pgd entry
+ * @addr: range start address
+ * @end: range end address
+ */
+static void stage2_wp_p4ds(struct kvm *kvm, pgd_t *pgd,
+ phys_addr_t addr, phys_addr_t end)
+{
+ p4d_t *p4d;
+ phys_addr_t next;
+
+ p4d = stage2_p4d_offset(kvm, pgd, addr);
+ do {
+ next = stage2_p4d_addr_end(kvm, addr, end);
+ if (!stage2_p4d_none(kvm, *p4d))
+ stage2_wp_puds(kvm, p4d, addr, next);
+ } while (p4d++, addr = next, addr != end);
+}
+
+/**
* stage2_wp_range() - write protect stage2 memory region range
* @kvm: The KVM pointer
* @addr: Start address of range
@@ -1475,7 +1620,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
break;
next = stage2_pgd_addr_end(kvm, addr, end);
if (stage2_pgd_present(kvm, *pgd))
- stage2_wp_puds(kvm, pgd, addr, next);
+ stage2_wp_p4ds(kvm, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
@@ -1703,11 +1848,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
/* Let's check if we will get back a huge page backed by hugetlbfs */
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, hva, hva + 1);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
@@ -1734,7 +1879,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (vma_pagesize == PMD_SIZE ||
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
/* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
@@ -2311,7 +2456,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -2370,7 +2515,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return ret;
}
diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c
index 60eccae2abad..78b87a64ca0a 100644
--- a/arch/arm64/lib/csum.c
+++ b/arch/arm64/lib/csum.c
@@ -14,7 +14,11 @@ static u64 accumulate(u64 sum, u64 data)
return tmp + (tmp >> 64);
}
-unsigned int do_csum(const unsigned char *buff, int len)
+/*
+ * We over-read the buffer and this makes KASAN unhappy. Instead, disable
+ * instrumentation and call kasan explicitly.
+ */
+unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
{
unsigned int offset, shift, sum;
const u64 *ptr;
@@ -42,7 +46,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
* odd/even alignment, and means we can ignore it until the very end.
*/
shift = offset * 8;
- data = READ_ONCE_NOCHECK(*ptr++);
+ data = *ptr++;
#ifdef __LITTLE_ENDIAN
data = (data >> shift) << shift;
#else
@@ -58,10 +62,10 @@ unsigned int do_csum(const unsigned char *buff, int len)
while (unlikely(len > 64)) {
__uint128_t tmp1, tmp2, tmp3, tmp4;
- tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
- tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2));
- tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4));
- tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6));
+ tmp1 = *(__uint128_t *)ptr;
+ tmp2 = *(__uint128_t *)(ptr + 2);
+ tmp3 = *(__uint128_t *)(ptr + 4);
+ tmp4 = *(__uint128_t *)(ptr + 6);
len -= 64;
ptr += 8;
@@ -85,7 +89,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
__uint128_t tmp;
sum64 = accumulate(sum64, data);
- tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
+ tmp = *(__uint128_t *)ptr;
len -= 16;
ptr += 2;
@@ -100,7 +104,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
}
if (len > 0) {
sum64 = accumulate(sum64, data);
- data = READ_ONCE_NOCHECK(*ptr);
+ data = *ptr;
len -= 8;
}
/*
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 0da020c563e6..0b8da1cc1c07 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -22,7 +22,6 @@
#include <asm/fixmap.h>
#include <asm/kasan.h>
#include <asm/memory.h>
-#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptdump.h>
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index dff2d72b0883..8afb238ff335 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -36,7 +36,6 @@
#include <asm/processor.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
@@ -145,6 +144,7 @@ static void show_pte(unsigned long addr)
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
do {
+ p4d_t *p4dp, p4d;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
pte_t *ptep, pte;
@@ -152,7 +152,13 @@ static void show_pte(unsigned long addr)
if (pgd_none(pgd) || pgd_bad(pgd))
break;
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ p4d = READ_ONCE(*p4dp);
+ pr_cont(", p4d=%016llx", p4d_val(p4d));
+ if (p4d_none(p4d) || p4d_bad(p4d))
+ break;
+
+ pudp = pud_offset(p4dp, addr);
pud = READ_ONCE(*pudp);
pr_cont(", pud=%016llx", pud_val(pud));
if (pud_none(pud) || pud_bad(pud))
@@ -491,11 +497,11 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->pc))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in which
@@ -504,7 +510,7 @@ retry:
might_sleep();
#ifdef CONFIG_DEBUG_VM
if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
goto no_context;
}
#endif
@@ -526,7 +532,7 @@ retry:
goto retry;
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Handle the "normal" (no error) case first.
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 07f154b8b84a..0a52ce46f020 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -67,11 +67,13 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, size_t *pgsize)
{
pgd_t *pgdp = pgd_offset(mm, addr);
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
*pgsize = PAGE_SIZE;
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
if ((pte_t *)pmdp == ptep) {
*pgsize = PMD_SIZE;
@@ -217,12 +219,14 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep = NULL;
pgdp = pgd_offset(mm, addr);
- pudp = pud_alloc(mm, pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ pudp = pud_alloc(mm, p4dp, addr);
if (!pudp)
return NULL;
@@ -261,6 +265,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
@@ -268,7 +273,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
if (!pgd_present(READ_ONCE(*pgdp)))
return NULL;
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ if (!p4d_present(READ_ONCE(*p4dp)))
+ return NULL;
+
+ pudp = pud_offset(p4dp, addr);
pud = READ_ONCE(*pudp);
if (sz != PUD_SIZE && pud_none(pud))
return NULL;
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index f87a32484ea8..7291b26ce788 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -18,7 +18,6 @@
#include <asm/kernel-pgtable.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -84,17 +83,17 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
}
-static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
+static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
bool early)
{
- if (pgd_none(READ_ONCE(*pgdp))) {
+ if (p4d_none(READ_ONCE(*p4dp))) {
phys_addr_t pud_phys = early ?
__pa_symbol(kasan_early_shadow_pud)
: kasan_alloc_zeroed_page(node);
- __pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
+ __p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE);
}
- return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
+ return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
}
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
@@ -126,11 +125,11 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
}
-static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
+static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
unsigned long end, int node, bool early)
{
unsigned long next;
- pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
+ pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
do {
next = pud_addr_end(addr, end);
@@ -138,6 +137,18 @@ static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
}
+static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ p4d_t *p4dp = p4d_offset(pgdp, addr);
+
+ do {
+ next = p4d_addr_end(addr, end);
+ kasan_pud_populate(p4dp, addr, next, node, early);
+ } while (p4dp++, addr = next, addr != end);
+}
+
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
int node, bool early)
{
@@ -147,7 +158,7 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
pgdp = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- kasan_pud_populate(pgdp, addr, next, node, early);
+ kasan_p4d_populate(pgdp, addr, next, node, early);
} while (pgdp++, addr = next, addr != end);
}
@@ -179,7 +190,7 @@ void __init kasan_copy_shadow(pgd_t *pgdir)
pgdp = pgd_offset_k(KASAN_SHADOW_START);
pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
- pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
+ pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
do {
set_pgd(pgdp_new, READ_ONCE(*pgdp));
} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c299b73dd5e4..990929c8837e 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -290,18 +290,19 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
{
unsigned long next;
pud_t *pudp;
- pgd_t pgd = READ_ONCE(*pgdp);
+ p4d_t *p4dp = p4d_offset(pgdp, addr);
+ p4d_t p4d = READ_ONCE(*p4dp);
- if (pgd_none(pgd)) {
+ if (p4d_none(p4d)) {
phys_addr_t pud_phys;
BUG_ON(!pgtable_alloc);
pud_phys = pgtable_alloc(PUD_SHIFT);
- __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
- pgd = READ_ONCE(*pgdp);
+ __p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
+ p4d = READ_ONCE(*p4dp);
}
- BUG_ON(pgd_bad(pgd));
+ BUG_ON(p4d_bad(p4d));
- pudp = pud_set_fixmap_offset(pgdp, addr);
+ pudp = pud_set_fixmap_offset(p4dp, addr);
do {
pud_t old_pud = READ_ONCE(*pudp);
@@ -340,7 +341,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
int flags)
{
unsigned long addr, end, next;
- pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
+ pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
/*
* If the virtual and physical address don't have the same offset
@@ -662,16 +663,17 @@ static void __init map_kernel(pgd_t *pgdp)
&vmlinux_initdata, 0, VM_NO_GUARD);
map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
- if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) {
+ if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) {
/*
* The fixmap falls in a separate pgd to the kernel, and doesn't
* live in the carveout for the swapper_pg_dir. We can simply
* re-use the existing dir for the fixmap.
*/
- set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
+ set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START),
READ_ONCE(*pgd_offset_k(FIXADDR_START)));
} else if (CONFIG_PGTABLE_LEVELS > 3) {
pgd_t *bm_pgdp;
+ p4d_t *bm_p4dp;
pud_t *bm_pudp;
/*
* The fixmap shares its top level pgd entry with the kernel
@@ -680,8 +682,9 @@ static void __init map_kernel(pgd_t *pgdp)
* entry instead.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START);
- bm_pudp = pud_set_fixmap_offset(bm_pgdp, FIXADDR_START);
+ bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START);
+ bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START);
+ bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START);
pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
pud_clear_fixmap();
} else {
@@ -715,6 +718,7 @@ void __init paging_init(void)
int kern_addr_valid(unsigned long addr)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
pte_t *ptep, pte;
@@ -726,7 +730,11 @@ int kern_addr_valid(unsigned long addr)
if (pgd_none(READ_ONCE(*pgdp)))
return 0;
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ if (p4d_none(READ_ONCE(*p4dp)))
+ return 0;
+
+ pudp = pud_offset(p4dp, addr);
pud = READ_ONCE(*pudp);
if (pud_none(pud))
return 0;
@@ -1069,6 +1077,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
unsigned long addr = start;
unsigned long next;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
@@ -1079,7 +1088,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
if (!pgdp)
return -ENOMEM;
- pudp = vmemmap_pud_populate(pgdp, addr, node);
+ p4dp = vmemmap_p4d_populate(pgdp, addr, node);
+ if (!p4dp)
+ return -ENOMEM;
+
+ pudp = vmemmap_pud_populate(p4dp, addr, node);
if (!pudp)
return -ENOMEM;
@@ -1114,11 +1127,12 @@ void vmemmap_free(unsigned long start, unsigned long end,
static inline pud_t * fixmap_pud(unsigned long addr)
{
pgd_t *pgdp = pgd_offset_k(addr);
- pgd_t pgd = READ_ONCE(*pgdp);
+ p4d_t *p4dp = p4d_offset(pgdp, addr);
+ p4d_t p4d = READ_ONCE(*p4dp);
- BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
+ BUG_ON(p4d_none(p4d) || p4d_bad(p4d));
- return pud_offset_kimg(pgdp, addr);
+ return pud_offset_kimg(p4dp, addr);
}
static inline pmd_t * fixmap_pmd(unsigned long addr)
@@ -1144,25 +1158,27 @@ static inline pte_t * fixmap_pte(unsigned long addr)
*/
void __init early_fixmap_init(void)
{
- pgd_t *pgdp, pgd;
+ pgd_t *pgdp;
+ p4d_t *p4dp, p4d;
pud_t *pudp;
pmd_t *pmdp;
unsigned long addr = FIXADDR_START;
pgdp = pgd_offset_k(addr);
- pgd = READ_ONCE(*pgdp);
+ p4dp = p4d_offset(pgdp, addr);
+ p4d = READ_ONCE(*p4dp);
if (CONFIG_PGTABLE_LEVELS > 3 &&
- !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
+ !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) {
/*
* We only end up here if the kernel mapping and the fixmap
* share the top level pgd entry, which should only happen on
* 16k/4 levels configurations.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- pudp = pud_offset_kimg(pgdp, addr);
+ pudp = pud_offset_kimg(p4dp, addr);
} else {
- if (pgd_none(pgd))
- __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
+ if (p4d_none(p4d))
+ __p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
pudp = fixmap_pud(addr);
}
if (pud_none(READ_ONCE(*pudp)))
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index bde08090b838..23f648c2a199 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -8,7 +8,6 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
-#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
@@ -198,6 +197,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
bool kernel_page_present(struct page *page)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp, pud;
pmd_t *pmdp, pmd;
pte_t *ptep;
@@ -210,7 +210,11 @@ bool kernel_page_present(struct page *page)
if (pgd_none(READ_ONCE(*pgdp)))
return false;
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ if (p4d_none(READ_ONCE(*p4dp)))
+ return false;
+
+ pudp = pud_offset(p4dp, addr);
pud = READ_ONCE(*pudp);
if (pud_none(pud))
return false;
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index b7bebb12a56d..796e47a571e6 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -9,11 +9,11 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/asm_pointer_auth.h>
#include <asm/hwcap.h>
-#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index e65e8d82442a..6444ebfd06a6 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -11,6 +11,7 @@ config C6X
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select CLKDEV_LOOKUP
+ select HAVE_LEGACY_CLK
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select HAVE_ARCH_TRACEHOOK
diff --git a/arch/c6x/include/asm/cacheflush.h b/arch/c6x/include/asm/cacheflush.h
index 4540b40475e6..10922d528de6 100644
--- a/arch/c6x/include/asm/cacheflush.h
+++ b/arch/c6x/include/asm/cacheflush.h
@@ -17,21 +17,6 @@
#include <asm/string.h>
/*
- * virtually-indexed cache management (our cache is physically indexed)
- */
-#define flush_cache_all() do {} while (0)
-#define flush_cache_mm(mm) do {} while (0)
-#define flush_cache_dup_mm(mm) do {} while (0)
-#define flush_cache_range(mm, start, end) do {} while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
-#define flush_cache_vmap(start, end) do {} while (0)
-#define flush_cache_vunmap(start, end) do {} while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do {} while (0)
-#define flush_dcache_mmap_lock(mapping) do {} while (0)
-#define flush_dcache_mmap_unlock(mapping) do {} while (0)
-
-/*
* physically-indexed cache management
*/
#define flush_icache_range(s, e) \
@@ -49,14 +34,12 @@ do { \
(unsigned long) page_address(page) + PAGE_SIZE)); \
} while (0)
-
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#endif /* _ASM_C6X_CACHEFLUSH_H */
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h
index 197c473b796a..8a91ceda39fa 100644
--- a/arch/c6x/include/asm/pgtable.h
+++ b/arch/c6x/include/asm/pgtable.h
@@ -26,7 +26,6 @@
#define pgd_clear(pgdp)
#define kern_addr_valid(addr) (1)
-#define pmd_offset(a, b) ((void *)0)
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x))
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
@@ -64,6 +63,4 @@ extern unsigned long empty_zero_page;
*/
#define pgprot_writecombine pgprot_noncached
-#include <asm-generic/pgtable.h>
-
#endif /* _ASM_C6X_PGTABLE_H */
diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c
index ec61034fdf56..2b9121c755be 100644
--- a/arch/c6x/kernel/traps.c
+++ b/arch/c6x/kernel/traps.c
@@ -344,12 +344,13 @@ asmlinkage int process_exception(struct pt_regs *regs)
static int kstack_depth_to_print = 48;
-static void show_trace(unsigned long *stack, unsigned long *endstack)
+static void show_trace(unsigned long *stack, unsigned long *endstack,
+ const char *loglvl)
{
unsigned long addr;
int i;
- pr_debug("Call trace:");
+ printk("%sCall trace:", loglvl);
i = 0;
while (stack + 1 <= endstack) {
addr = *stack++;
@@ -364,16 +365,17 @@ static void show_trace(unsigned long *stack, unsigned long *endstack)
if (__kernel_text_address(addr)) {
#ifndef CONFIG_KALLSYMS
if (i % 5 == 0)
- pr_debug("\n ");
+ printk("%s\n ", loglvl);
#endif
- pr_debug(" [<%08lx>] %pS\n", addr, (void *)addr);
+ printk("%s [<%08lx>] %pS\n", loglvl, addr, (void *)addr);
i++;
}
}
- pr_debug("\n");
+ printk("%s\n", loglvl);
}
-void show_stack(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *stack,
+ const char *loglvl)
{
unsigned long *p, *endstack;
int i;
@@ -398,7 +400,7 @@ void show_stack(struct task_struct *task, unsigned long *stack)
pr_cont(" %08lx", *p++);
}
pr_cont("\n");
- show_trace(stack, endstack);
+ show_trace(stack, endstack, loglvl);
}
int is_valid_bugaddr(unsigned long addr)
diff --git a/arch/csky/include/asm/highmem.h b/arch/csky/include/asm/highmem.h
index a345a2f2c22e..14645e3d5cd5 100644
--- a/arch/csky/include/asm/highmem.h
+++ b/arch/csky/include/asm/highmem.h
@@ -30,22 +30,14 @@ extern pte_t *pkmap_page_table;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
-
-extern void *kmap(struct page *page);
-extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
+#define ARCH_HAS_KMAP_FLUSH_TLB
+extern void kmap_flush_tlb(unsigned long addr);
extern void *kmap_atomic_pfn(unsigned long pfn);
-extern struct page *kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() do {} while (0)
extern void kmap_init(void);
-#define kmap_prot PAGE_KERNEL
-
#endif /* __KERNEL__ */
#endif /* __ASM_CSKY_HIGHMEM_H */
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
index 332f51bc68fb..e909587f24c5 100644
--- a/arch/csky/include/asm/io.h
+++ b/arch/csky/include/asm/io.h
@@ -4,7 +4,7 @@
#ifndef __ASM_CSKY_IO_H
#define __ASM_CSKY_IO_H
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/types.h>
#include <linux/version.h>
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index 9ab4a445ad99..2002cb7f1053 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -32,13 +32,6 @@
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-/* Find an entry in the third-level page table.. */
-#define __pte_offset_t(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- (pmd_page_vaddr(*(dir)) + __pte_offset_t(address))
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address))
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#define pte_clear(mm, addr, ptep) set_pte((ptep), \
(((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
@@ -54,8 +47,6 @@
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \
_CACHE_MASK)
-#define pte_unmap(pte) ((void)(pte))
-
#define __swp_type(x) (((x).val >> 4) & 0xff)
#define __swp_offset(x) ((x).val >> 12)
#define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \
@@ -229,15 +220,6 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte;
}
-#define __pgd_offset(address) pgd_index(address)
-#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@@ -281,19 +263,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
(pgprot_val(newprot)));
}
-/* to find an entry in a page-table-directory */
-static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address)
-{
- return mm->pgd + pgd_index(address);
-}
-
-/* Find an entry in the third-level page table.. */
-static inline pte_t *pte_offset(pmd_t *dir, unsigned long address)
-{
- return (pte_t *) (pmd_page_vaddr(*dir)) +
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
-}
-
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void);
@@ -306,6 +275,4 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#include <asm-generic/pgtable.h>
-
#endif /* __ASM_CSKY_PGTABLE_H */
diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c
index b5ad7d9de18c..6cd82d69c655 100644
--- a/arch/csky/kernel/module.c
+++ b/arch/csky/kernel/module.c
@@ -10,7 +10,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_CPU_CK810
#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000)
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
index 5a82230bddf9..944ca2fdcdd9 100644
--- a/arch/csky/kernel/ptrace.c
+++ b/arch/csky/kernel/ptrace.c
@@ -18,7 +18,6 @@
#include <asm/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/asm-offsets.h>
@@ -344,7 +343,7 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
trace_sys_exit(regs, syscall_get_return_value(current, regs));
}
-extern void show_stack(struct task_struct *task, unsigned long *stack);
+extern void show_stack(struct task_struct *task, unsigned long *stack, const char *loglvl);
void show_regs(struct pt_regs *fp)
{
unsigned long *sp;
@@ -420,6 +419,6 @@ void show_regs(struct pt_regs *fp)
}
pr_cont("\n");
- show_stack(NULL, (unsigned long *)fp->regs[4]);
+ show_stack(NULL, (unsigned long *)fp->regs[4], KERN_INFO);
return;
}
diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c
index 92809e1da723..16ae20a0af34 100644
--- a/arch/csky/kernel/stacktrace.c
+++ b/arch/csky/kernel/stacktrace.c
@@ -91,14 +91,14 @@ static void notrace walk_stackframe(struct task_struct *task,
static bool print_trace_address(unsigned long pc, void *arg)
{
- print_ip_sym(pc);
+ print_ip_sym((const char *)arg, pc);
return false;
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
pr_cont("Call Trace:\n");
- walk_stackframe(task, NULL, print_trace_address, NULL);
+ walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
}
static bool save_wchan(unsigned long pc, void *arg)
diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c
index 60ff7adfad1d..abc3dbc658d4 100644
--- a/arch/csky/kernel/vdso.c
+++ b/arch/csky/kernel/vdso.c
@@ -50,7 +50,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long addr;
struct mm_struct *mm = current->mm;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
@@ -70,7 +70,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
mm->context.vdso = (void *)addr;
up_fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index 4e6dc68f3258..0b9cbf2cf6a9 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
- int offset = __pgd_offset(address);
+ int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
@@ -120,7 +120,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -170,7 +170,7 @@ good_area:
address);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -178,7 +178,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -217,7 +217,7 @@ out_of_memory:
do_sigbus:
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
index 813129145f3d..89ec32e602a1 100644
--- a/arch/csky/mm/highmem.c
+++ b/arch/csky/mm/highmem.c
@@ -13,59 +13,39 @@ static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn;
-void *kmap(struct page *page)
+void kmap_flush_tlb(unsigned long addr)
{
- void *addr;
-
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- addr = kmap_high(page);
- flush_tlb_one((unsigned long)addr);
-
- return addr;
+ flush_tlb_one(addr);
}
-EXPORT_SYMBOL(kmap);
+EXPORT_SYMBOL(kmap_flush_tlb);
-void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-EXPORT_SYMBOL(kunmap);
+EXPORT_SYMBOL(kmap);
-void *kmap_atomic(struct page *page)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
- set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
flush_tlb_one((unsigned long)vaddr);
return (void *)vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int idx;
if (vaddr < FIXADDR_START)
- goto out;
+ return;
#ifdef CONFIG_DEBUG_HIGHMEM
idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
@@ -78,11 +58,8 @@ void __kunmap_atomic(void *kvaddr)
(void) idx; /* to kill a warning */
#endif
kmap_atomic_idx_pop();
-out:
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
@@ -104,19 +81,6 @@ void *kmap_atomic_pfn(unsigned long pfn)
return (void *) vaddr;
}
-struct page *kmap_atomic_to_page(void *ptr)
-{
- unsigned long idx, vaddr = (unsigned long)ptr;
- pte_t *pte;
-
- if (vaddr < FIXADDR_START)
- return virt_to_page(ptr);
-
- idx = virt_to_fix(vaddr);
- pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
- return pte_page(*pte);
-}
-
static void __init kmap_pages_init(void)
{
unsigned long vaddr;
@@ -128,7 +92,7 @@ static void __init kmap_pages_init(void)
vaddr = PKMAP_BASE;
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
- pgd = swapper_pg_dir + __pgd_offset(vaddr);
+ pgd = swapper_pg_dir + pgd_index(vaddr);
pud = (pud_t *)pgd;
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index cb64d8647a78..af627128314f 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -24,7 +24,6 @@
#include <asm/setup.h>
#include <asm/cachectl.h>
#include <asm/dma.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
@@ -158,9 +157,9 @@ void __init fixrange_init(unsigned long start, unsigned long end,
unsigned long vaddr;
vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pud_offset(vaddr);
- k = __pmd_offset(vaddr);
+ i = pgd_index(vaddr);
+ j = pud_index(vaddr);
+ k = pmd_index(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c
index eb3ba6c9c927..ed1512381112 100644
--- a/arch/csky/mm/tlb.c
+++ b/arch/csky/mm/tlb.c
@@ -7,7 +7,6 @@
#include <linux/sched.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
/*
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index ec800e9d5aad..d11666d538fe 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -13,7 +13,6 @@ config H8300
select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA
select GENERIC_CLOCKEVENTS
- select CLKDEV_LOOKUP
select COMMON_CLK
select ARCH_WANT_FRAME_POINTERS
select OF
diff --git a/arch/h8300/boot/compressed/Makefile b/arch/h8300/boot/compressed/Makefile
index 9e2701069bbe..5942793f77a0 100644
--- a/arch/h8300/boot/compressed/Makefile
+++ b/arch/h8300/boot/compressed/Makefile
@@ -18,7 +18,7 @@ CONFIG_MEMORY_START ?= 0x00400000
CONFIG_BOOT_LINK_OFFSET ?= 0x00280000
IMAGE_OFFSET := $(shell printf "0x%08x" $$(($(CONFIG_MEMORY_START)+$(CONFIG_BOOT_LINK_OFFSET))))
-LIBGCC := $(shell $(CROSS-COMPILE)$(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+LIBGCC := $(shell $(CROSS-COMPILE)$(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name 2>/dev/null)
LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -estartup -T $(obj)/vmlinux.lds \
--defsym output=$(CONFIG_MEMORY_START)
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
index 4d00152fab58..ea833a5d8bcf 100644
--- a/arch/h8300/include/asm/pgtable.h
+++ b/arch/h8300/include/asm/pgtable.h
@@ -1,9 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _H8300_PGTABLE_H
#define _H8300_PGTABLE_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
-#include <asm-generic/pgtable.h>
extern void paging_init(void);
#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index e35cdf092e07..0ef55e3052c9 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -45,7 +45,6 @@
#include <linux/uaccess.h>
#include <asm/traps.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 23a979a85f14..28ac88358a89 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -31,7 +31,6 @@
#include <asm/setup.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/page.h>
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index ef7489b7c459..38d335488a54 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -43,7 +43,6 @@
#include <asm/setup.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/ucontext.h>
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
index e47a9e0dc278..5d8b969cd8f3 100644
--- a/arch/h8300/kernel/traps.c
+++ b/arch/h8300/kernel/traps.c
@@ -115,7 +115,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err)
static int kstack_depth_to_print = 24;
-void show_stack(struct task_struct *task, unsigned long *esp)
+void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl)
{
unsigned long *stack, addr;
int i;
@@ -125,17 +125,17 @@ void show_stack(struct task_struct *task, unsigned long *esp)
stack = esp;
- pr_info("Stack from %08lx:", (unsigned long)stack);
+ printk("%sStack from %08lx:", loglvl, (unsigned long)stack);
for (i = 0; i < kstack_depth_to_print; i++) {
if (((unsigned long)stack & (THREAD_SIZE - 1)) >=
THREAD_SIZE-4)
break;
if (i % 8 == 0)
- pr_info(" ");
+ printk("%s ", loglvl);
pr_cont(" %08lx", *stack++);
}
- pr_info("\nCall Trace:\n");
+ printk("%s\nCall Trace:\n", loglvl);
i = 0;
stack = esp;
while (((unsigned long)stack & (THREAD_SIZE - 1)) < THREAD_SIZE-4) {
@@ -150,10 +150,10 @@ void show_stack(struct task_struct *task, unsigned long *esp)
*/
if (check_kernel_text(addr)) {
if (i % 4 == 0)
- pr_info(" ");
+ printk("%s ", loglvl);
pr_cont(" [<%08lx>]", addr);
i++;
}
}
- pr_info("\n");
+ printk("%s\n", loglvl);
}
diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c
index fabffb83930a..d4bc9c16f2df 100644
--- a/arch/h8300/mm/fault.c
+++ b/arch/h8300/mm/fault.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/ptrace.h>
-#include <asm/pgtable.h>
void die(const char *str, struct pt_regs *fp, unsigned long err);
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 27a0020e3771..1f3b345d68b9 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -36,7 +36,6 @@
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
/*
diff --git a/arch/h8300/mm/memory.c b/arch/h8300/mm/memory.c
index 3785f72bf3fc..4a60e2b5eb96 100644
--- a/arch/h8300/mm/memory.c
+++ b/arch/h8300/mm/memory.c
@@ -26,7 +26,6 @@
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/io.h>
diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile
index 4c5858b80f0e..c168c6980d05 100644
--- a/arch/hexagon/Makefile
+++ b/arch/hexagon/Makefile
@@ -30,7 +30,7 @@ TIR_NAME := r19
KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__
KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name 2>/dev/null)
libs-y += $(LIBGCC)
head-y := arch/hexagon/kernel/head.o
diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h
index fb447de45d54..6eff0730e6ef 100644
--- a/arch/hexagon/include/asm/cacheflush.h
+++ b/arch/hexagon/include/asm/cacheflush.h
@@ -25,29 +25,17 @@
#define LINESIZE 32
#define LINEBITS 5
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_page(vma, pg) do { } while (0)
-#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
/*
* Flush Dcache range through current map.
*/
extern void flush_dcache_range(unsigned long start, unsigned long end);
+#define flush_dcache_range flush_dcache_range
/*
* Flush Icache range through current map.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_range flush_icache_range
/*
* Memory-management related flushes are there to ensure in non-physically
@@ -78,6 +66,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len);
+#define copy_to_user_page copy_to_user_page
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
@@ -85,4 +74,6 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
+#include <asm-generic/cacheflush.h>
+
#endif
diff --git a/arch/hexagon/include/asm/fixmap.h b/arch/hexagon/include/asm/fixmap.h
index 933dac167504..920660a04aa4 100644
--- a/arch/hexagon/include/asm/fixmap.h
+++ b/arch/hexagon/include/asm/fixmap.h
@@ -15,8 +15,4 @@
#include <asm-generic/fixmap.h>
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), \
- (vaddr)), (vaddr)), (vaddr))
-
#endif
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index d383e8bea5b2..dbb22b80b8c4 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -12,7 +12,6 @@
* Page table definitions for Qualcomm Hexagon processor.
*/
#include <asm/page.h>
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/* A handy thing to have if one has the RAM. Declared in head.S */
@@ -207,33 +206,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_val(*ptep) = _NULL_PTE;
}
-#ifdef NEED_PMD_INDEX_DESPITE_BEING_2_LEVEL
-/**
- * pmd_index - returns the index of the entry in the PMD page
- * which would control the given virtual address
- */
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-#endif
-
-/**
- * pgd_index - returns the index of the entry in the PGD page
- * which would control the given virtual address
- *
- * This returns the *index* for the address in the pgd_t
- */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-
-/*
- * pgd_offset - find an offset in a page-table-directory
- */
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-
-/*
- * pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
/**
* pmd_none - check if pmd_entry is mapped
* @pmd_entry: pmd entry
@@ -404,31 +376,14 @@ static inline int pte_exec(pte_t pte)
*/
#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
-/*
- * May need to invoke the virtual machine as well...
- */
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
-/*
- * pte_offset_map - returns the linear address of the page table entry
- * corresponding to an address
- */
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-
-#define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr)
-
-/* pte_offset_kernel - kernel version of pte_offset */
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \
- + __pte_offset(address))
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
+}
/* ZERO_PAGE - returns the globally shared zero page */
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
-#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
/*
* Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is
* interpreted as swap information. The remaining free bits are interpreted as
@@ -461,7 +416,4 @@ static inline int pte_exec(pte_t pte)
((type << 1) | \
((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) })
-/* Oh boy. There are a lot of possible arch overrides found in this file. */
-#include <asm-generic/pgtable.h>
-
#endif
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index 69c623b14ddd..904134b37232 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -79,7 +79,7 @@ static const char *ex_name(int ex)
}
static void do_show_stack(struct task_struct *task, unsigned long *fp,
- unsigned long ip)
+ unsigned long ip, const char *loglvl)
{
int kstack_depth_to_print = 24;
unsigned long offset, size;
@@ -93,9 +93,8 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
if (task == NULL)
task = current;
- printk(KERN_INFO "CPU#%d, %s/%d, Call Trace:\n",
- raw_smp_processor_id(), task->comm,
- task_pid_nr(task));
+ printk("%sCPU#%d, %s/%d, Call Trace:\n", loglvl, raw_smp_processor_id(),
+ task->comm, task_pid_nr(task));
if (fp == NULL) {
if (task == current) {
@@ -108,7 +107,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
}
if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
- printk(KERN_INFO "-- Corrupt frame pointer %p\n", fp);
+ printk("%s-- Corrupt frame pointer %p\n", loglvl, fp);
return;
}
@@ -125,8 +124,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
- printk(KERN_INFO "[%p] 0x%lx: %s + 0x%lx", fp, ip, name,
- offset);
+ printk("%s[%p] 0x%lx: %s + 0x%lx", loglvl, fp, ip, name, offset);
if (((unsigned long) fp < low) || (high < (unsigned long) fp))
printk(KERN_CONT " (FP out of bounds!)");
if (modname)
@@ -136,8 +134,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
newfp = (unsigned long *) *fp;
if (((unsigned long) newfp) & 0x3) {
- printk(KERN_INFO "-- Corrupt frame pointer %p\n",
- newfp);
+ printk("%s-- Corrupt frame pointer %p\n", loglvl, newfp);
break;
}
@@ -147,7 +144,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
+ 8);
if (regs->syscall_nr != -1) {
- printk(KERN_INFO "-- trap0 -- syscall_nr: %ld",
+ printk("%s-- trap0 -- syscall_nr: %ld", loglvl,
regs->syscall_nr);
printk(KERN_CONT " psp: %lx elr: %lx\n",
pt_psp(regs), pt_elr(regs));
@@ -155,7 +152,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
} else {
/* really want to see more ... */
kstack_depth_to_print += 6;
- printk(KERN_INFO "-- %s (0x%lx) badva: %lx\n",
+ printk("%s-- %s (0x%lx) badva: %lx\n", loglvl,
ex_name(pt_cause(regs)), pt_cause(regs),
pt_badva(regs));
}
@@ -178,10 +175,10 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
}
}
-void show_stack(struct task_struct *task, unsigned long *fp)
+void show_stack(struct task_struct *task, unsigned long *fp, const char *loglvl)
{
/* Saved link reg is one word above FP */
- do_show_stack(task, fp, 0);
+ do_show_stack(task, fp, 0, loglvl);
}
int die(const char *str, struct pt_regs *regs, long err)
@@ -207,7 +204,7 @@ int die(const char *str, struct pt_regs *regs, long err)
print_modules();
show_regs(regs);
- do_show_stack(current, &regs->r30, pt_elr(regs));
+ do_show_stack(current, &regs->r30, pt_elr(regs), KERN_EMERG);
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
index 25a1d9cfd4cc..b70970ac809f 100644
--- a/arch/hexagon/kernel/vdso.c
+++ b/arch/hexagon/kernel/vdso.c
@@ -52,7 +52,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long vdso_base;
struct mm_struct *mm = current->mm;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
/* Try to get it loaded right near ld.so/glibc. */
@@ -76,7 +76,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
mm->context.vdso = (void *)vdso_base;
up_fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
index f8ddc35cf159..650bca92f0b7 100644
--- a/arch/hexagon/mm/uaccess.c
+++ b/arch/hexagon/mm/uaccess.c
@@ -11,7 +11,7 @@
*/
#include <linux/types.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
/*
* For clear_user(), exploit previously defined copy_to_user function
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index 72334b26317a..cd3808f96b93 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -11,7 +11,6 @@
* execptions.
*/
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
@@ -55,7 +54,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -107,11 +106,11 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Handle copyin/out exception cases */
if (!user_mode(regs))
@@ -138,7 +137,7 @@ good_area:
return;
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (user_mode(regs)) {
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 32240000dc0c..f817f3d5e758 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -40,7 +40,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from
endif
quiet_cmd_gzip = GZIP $@
-cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@
+cmd_gzip = cat $(real-prereqs) | $(_GZIP) -n -f -9 > $@
quiet_cmd_objcopy = OBJCOPY $@
cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index 6d3478f8abc8..708c0fa5d975 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -12,44 +12,22 @@
#include <asm/page.h>
-/*
- * Cache flushing routines. This is the kind of stuff that can be very expensive, so try
- * to avoid them whenever possible.
- */
-
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define flush_icache_page(vma,page) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &(page)->flags); \
} while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
-extern void flush_icache_range (unsigned long start, unsigned long end);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_range flush_icache_range
extern void clflush_cache_range(void *addr, int size);
-
-#define flush_icache_user_range(vma, page, user_addr, len) \
+#define flush_icache_user_page(vma, page, user_addr, len) \
do { \
unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \
} while (0)
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#endif /* _ASM_IA64_CACHEFLUSH_H */
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index f4c491044882..2a3050345099 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -36,9 +36,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#if CONFIG_PGTABLE_LEVELS == 4
static inline void
-pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+p4d_populate(struct mm_struct *mm, p4d_t * p4d_entry, pud_t * pud)
{
- pgd_val(*pgd_entry) = __pa(pud);
+ p4d_val(*p4d_entry) = __pa(pud);
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 0e7b645b76c6..10850897a91c 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -283,12 +283,12 @@ extern unsigned long VMALLOC_END;
#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
#if CONFIG_PGTABLE_LEVELS == 4
-#define pgd_none(pgd) (!pgd_val(pgd))
-#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
-#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
-#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
-#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
-#define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET))
+#define p4d_none(p4d) (!p4d_val(p4d))
+#define p4d_bad(p4d) (!ia64_phys_addr_valid(p4d_val(p4d)))
+#define p4d_present(p4d) (p4d_val(p4d) != 0UL)
+#define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
+#define p4d_page_vaddr(p4d) ((unsigned long) __va(p4d_val(p4d) & _PFN_MASK))
+#define p4d_page(p4d) virt_to_page((p4d_val(p4d) + PAGE_OFFSET))
#endif
/*
@@ -364,44 +364,13 @@ pgd_index (unsigned long address)
return (region << (PAGE_SHIFT - 6)) | l1index;
}
-
-/* The offset in the 1-level directory is given by the 3 region bits
- (61..63) and the level-1 bits. */
-static inline pgd_t*
-pgd_offset (const struct mm_struct *mm, unsigned long address)
-{
- return mm->pgd + pgd_index(address);
-}
-
-/* In the kernel's mapped region we completely ignore the region number
- (since we know it's in region number 5). */
-#define pgd_offset_k(addr) \
- (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+#define pgd_index pgd_index
/* Look up a pgd entry in the gate area. On IA-64, the gate-area
resides in the kernel-mapped segment, hence we use pgd_offset_k()
here. */
#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
-#if CONFIG_PGTABLE_LEVELS == 4
-/* Find an entry in the second-level page table.. */
-#define pud_offset(dir,addr) \
- ((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
-#endif
-
-/* Find an entry in the third-level page table.. */
-#define pmd_offset(dir,addr) \
- ((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
-
-/*
- * Find an entry in the third-level page table. This looks more complicated than it
- * should be because some platforms place page tables in high memory.
- */
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
-#define pte_unmap(pte) do { } while (0)
-
/* atomic versions of the some PTE manipulations: */
static inline int
@@ -580,10 +549,8 @@ extern struct page *zero_page_memmap_ptr;
#if CONFIG_PGTABLE_LEVELS == 3
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#endif
-#include <asm-generic/5level-fixup.h>
-#include <asm-generic/pgtable.h>
+#include <asm-generic/pgtable-nop4d.h>
#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
index 7ff574d56429..b3aa46090101 100644
--- a/arch/ia64/include/asm/ptrace.h
+++ b/arch/ia64/include/asm/ptrace.h
@@ -114,7 +114,6 @@ static inline long regs_return_value(struct pt_regs *regs)
struct task_struct; /* forward decl */
struct unw_frame_info; /* forward decl */
- extern void ia64_do_show_stack (struct unw_frame_info *, void *);
extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
unsigned long *);
extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 5c7e79eccaee..8aa473a4b0f4 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -37,7 +37,7 @@
#include <linux/page-flags.h>
#include <asm/intrinsics.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/extable.h>
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index a54eacbc61a9..f932b25fb817 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -37,7 +37,6 @@
#include <asm/io.h>
#include <asm/kregs.h>
#include <asm/meminit.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mca.h>
#include <asm/setup.h>
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 2ac926331500..c5efac285bc3 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -38,12 +38,12 @@
*/
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/kregs.h>
#include <asm/asm-offsets.h>
-#include <asm/pgtable.h>
#include <asm/percpu.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index e6f45170a4b9..30f1ef760136 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -21,18 +21,19 @@
*/
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/fpu.h>
#include <asm/kregs.h>
#include <asm/mmu_context.h>
#include <asm/asm-offsets.h>
#include <asm/pal.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/mca_asm.h>
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/export.h>
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index e7862e4cb1e7..6fff934150eb 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -16,6 +16,7 @@
*/
#include <linux/module.h>
+#include <linux/pgtable.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
@@ -37,7 +38,6 @@
#include <asm/intrinsics.h>
#include <asm/io.h>
#include <asm/hw_irq.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PERFMON
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 1efcbe5f0c78..d6d4229b28db 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -48,11 +48,11 @@
*/
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/break.h>
#include <asm/kregs.h>
#include <asm/asm-offsets.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index a6d6a0556f08..7a7df944d798 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -17,8 +17,8 @@
#include <linux/preempt.h>
#include <linux/extable.h>
#include <linux/kdebug.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/exception.h>
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6fb54dfa1350..2703f7795672 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1631,7 +1631,7 @@ default_monarch_init_process(struct notifier_block *self, unsigned long val, voi
if (read_trylock(&tasklist_lock)) {
do_each_thread (g, t) {
printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
- show_stack(t, NULL);
+ show_stack(t, NULL, KERN_DEFAULT);
} while_each_thread (g, t);
read_unlock(&tasklist_lock);
}
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 086cfa4999fd..0d6b8cf9d1d0 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -25,9 +25,9 @@
* Use per cpu MCA/INIT stacks for all data.
*/
#include <linux/threads.h>
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mca_asm.h>
#include <asm/mca.h>
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index df257002950e..971f166873aa 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2260,13 +2260,13 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
* now we atomically find some area in the address space and
* remap the buffer in it.
*/
- down_write(&task->mm->mmap_sem);
+ mmap_write_lock(task->mm);
/* find some free area in address space, must have mmap sem held */
vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
if (IS_ERR_VALUE(vma->vm_start)) {
DPRINT(("Cannot find unmapped area for size %ld\n", size));
- up_write(&task->mm->mmap_sem);
+ mmap_write_unlock(task->mm);
goto error;
}
vma->vm_end = vma->vm_start + size;
@@ -2277,7 +2277,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
/* can only be applied to current task, need to have the mm semaphore held when called */
if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
DPRINT(("Can't remap buffer\n"));
- up_write(&task->mm->mmap_sem);
+ mmap_write_unlock(task->mm);
goto error;
}
@@ -2288,7 +2288,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
insert_vm_struct(mm, vma);
vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
- up_write(&task->mm->mmap_sem);
+ mmap_write_unlock(task->mm);
/*
* keep track of user level virtual address
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 10cb9382ab76..96dfb9e4b16f 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -64,12 +64,13 @@ EXPORT_SYMBOL(boot_option_idle_override);
void (*pm_power_off) (void);
EXPORT_SYMBOL(pm_power_off);
-void
+static void
ia64_do_show_stack (struct unw_frame_info *info, void *arg)
{
unsigned long ip, sp, bsp;
+ const char *loglvl = arg;
- printk("\nCall Trace:\n");
+ printk("%s\nCall Trace:\n", loglvl);
do {
unw_get_ip(info, &ip);
if (ip == 0)
@@ -77,22 +78,22 @@ ia64_do_show_stack (struct unw_frame_info *info, void *arg)
unw_get_sp(info, &sp);
unw_get_bsp(info, &bsp);
- printk(" [<%016lx>] %pS\n"
+ printk("%s [<%016lx>] %pS\n"
" sp=%016lx bsp=%016lx\n",
- ip, (void *)ip, sp, bsp);
+ loglvl, ip, (void *)ip, sp, bsp);
} while (unw_unwind(info) >= 0);
}
void
-show_stack (struct task_struct *task, unsigned long *sp)
+show_stack (struct task_struct *task, unsigned long *sp, const char *loglvl)
{
if (!task)
- unw_init_running(ia64_do_show_stack, NULL);
+ unw_init_running(ia64_do_show_stack, (void *)loglvl);
else {
struct unw_frame_info info;
unw_init_from_blocked_task(&info, task);
- ia64_do_show_stack(&info, NULL);
+ ia64_do_show_stack(&info, (void *)loglvl);
}
}
@@ -150,7 +151,7 @@ show_regs (struct pt_regs *regs)
((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
}
} else
- show_stack(NULL, NULL);
+ show_stack(NULL, NULL, KERN_DEFAULT);
}
/* local support for deprecated console_print */
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index bf9c24d9ce84..82aaacf64583 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -25,7 +25,6 @@
#include <linux/elf.h>
#include <linux/tracehook.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace_offsets.h>
#include <asm/rse.h>
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
index 7124fe7bec7c..527a7b896a6e 100644
--- a/arch/ia64/kernel/relocate_kernel.S
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -8,10 +8,10 @@
* Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
* Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
*/
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/kregs.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mca_asm.h>
/* Must be relocatable PIC code callable as a C function
@@ -319,5 +319,3 @@ GLOBAL_ENTRY(ia64_dump_cpu_regs)
;;
br.ret.sptk.many rp
END(ia64_dump_cpu_regs)
-
-
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 4009383453f7..d2d440fe855b 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -25,6 +25,7 @@
*/
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <linux/acpi.h>
#include <linux/console.h>
@@ -56,7 +57,6 @@
#include <asm/meminit.h>
#include <asm/page.h>
#include <asm/patch.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/sal.h>
#include <asm/sections.h>
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index de35c54f033d..bbfd421e6deb 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -40,7 +40,6 @@
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 6501d9a9a21b..016683b743c2 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -50,7 +50,6 @@
#include <asm/mca.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 3776ef225125..0750f367837d 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -19,9 +19,9 @@
#include <linux/nmi.h>
#include <linux/genalloc.h>
#include <linux/gfp.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
#include <asm/pal.h>
-#include <asm/pgtable.h>
#include <linux/atomic.h>
#include <asm/tlbflush.h>
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 6b5652ee76f9..d259690eb91a 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/pgtable.h>
#include <asm/cache.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/thread_info.h>
#define EMITS_PT_NOTE
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 8786fa5c7612..d7d31c718d2d 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -22,7 +22,6 @@
#include <asm/meminit.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/mca.h>
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 30d0c1fca99e..3a4dec334cc5 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -15,7 +15,6 @@
#include <linux/prefetch.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/exception.h>
@@ -29,6 +28,7 @@ static int
mapped_kernel_page_is_present (unsigned long address)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
@@ -37,7 +37,11 @@ mapped_kernel_page_is_present (unsigned long address)
if (pgd_none(*pgd) || pgd_bad(*pgd))
return 0;
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none(*p4d) || p4d_bad(*p4d))
+ return 0;
+
+ pud = pud_offset(p4d, address);
if (pud_none(*pud) || pud_bad(*pud))
return 0;
@@ -70,8 +74,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
- /* mmap_sem is performance critical.... */
- prefetchw(&mm->mmap_sem);
+ /* mmap_lock is performance critical.... */
+ prefetchw(&mm->mmap_lock);
/*
* If we're in an interrupt or have no user context, we must not take the fault..
@@ -82,7 +86,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
#ifdef CONFIG_VIRTUAL_MEM_MAP
/*
* If fault is in region 5 and we are in the kernel, we may already
- * have the mmap_sem (pfn_valid macro is called during mmap). There
+ * have the mmap_lock (pfn_valid macro is called during mmap). There
* is no vma for region 5 addr's anyway, so skip getting the semaphore
* and go directly to the exception handling code.
*/
@@ -102,7 +106,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (mask & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma && !prev_vma )
@@ -169,7 +173,7 @@ retry:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -178,7 +182,7 @@ retry:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
check_expansion:
@@ -209,7 +213,7 @@ retry:
goto good_area;
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
#ifdef CONFIG_VIRTUAL_MEM_MAP
bad_area_no_up:
#endif
@@ -275,7 +279,7 @@ retry:
return;
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index d16e419fd712..32352a73df0c 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -30,12 +30,14 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr);
- pud = pud_alloc(mm, pgd, taddr);
+ p4d = p4d_offset(pgd, taddr);
+ pud = pud_alloc(mm, p4d, taddr);
if (pud) {
pmd = pmd_alloc(mm, pud, taddr);
if (pmd)
@@ -49,17 +51,21 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr);
if (pgd_present(*pgd)) {
- pud = pud_offset(pgd, taddr);
- if (pud_present(*pud)) {
- pmd = pmd_offset(pud, taddr);
- if (pmd_present(*pmd))
- pte = pte_offset_map(pmd, taddr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_present(*p4d)) {
+ pud = pud_offset(p4d, taddr);
+ if (pud_present(*pud)) {
+ pmd = pmd_offset(pud, taddr);
+ if (pmd_present(*pmd))
+ pte = pte_offset_map(pmd, taddr);
+ }
}
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index d637b4ea3147..0b3fb4c7af29 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -118,13 +118,13 @@ ia64_init_addr_space (void)
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
vm_area_free(vma);
return;
}
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
@@ -136,13 +136,13 @@ ia64_init_addr_space (void)
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
VM_DONTEXPAND | VM_DONTDUMP;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
vm_area_free(vma);
return;
}
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
}
}
@@ -208,6 +208,7 @@ static struct page * __init
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -215,7 +216,10 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
{
- pud = pud_alloc(&init_mm, pgd, address);
+ p4d = p4d_alloc(&init_mm, pgd, address);
+ if (!p4d)
+ goto out;
+ pud = pud_alloc(&init_mm, p4d, address);
if (!pud)
goto out;
pmd = pmd_alloc(&init_mm, pud, address);
@@ -382,6 +386,7 @@ int vmemmap_find_next_valid_pfn(int node, int i)
do {
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -392,7 +397,13 @@ int vmemmap_find_next_valid_pfn(int node, int i)
continue;
}
- pud = pud_offset(pgd, end_address);
+ p4d = p4d_offset(pgd, end_address);
+ if (p4d_none(*p4d)) {
+ end_address += P4D_SIZE;
+ continue;
+ }
+
+ pud = pud_offset(p4d, end_address);
if (pud_none(*pud)) {
end_address += PUD_SIZE;
continue;
@@ -430,6 +441,7 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
struct page *map_start, *map_end;
int node;
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -444,12 +456,20 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
if (pgd_none(*pgd)) {
+ p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!p4d)
+ goto err_alloc;
+ pgd_populate(&init_mm, pgd, p4d);
+ }
+ p4d = p4d_offset(pgd, address);
+
+ if (p4d_none(*p4d)) {
pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
if (!pud)
goto err_alloc;
- pgd_populate(&init_mm, pgd, pud);
+ p4d_populate(&init_mm, p4d, pud);
}
- pud = pud_offset(pgd, address);
+ pud = pud_offset(p4d, address);
if (pud_none(*pud)) {
pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
diff --git a/arch/m68k/68000/m68EZ328.c b/arch/m68k/68000/m68EZ328.c
index 6a309a3cfbfc..05f137dc257e 100644
--- a/arch/m68k/68000/m68EZ328.c
+++ b/arch/m68k/68000/m68EZ328.c
@@ -17,7 +17,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rtc.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
#include <asm/MC68EZ328.h>
#ifdef CONFIG_UCSIMM
diff --git a/arch/m68k/68000/m68VZ328.c b/arch/m68k/68000/m68VZ328.c
index 81b5491685a4..ada87b23afdc 100644
--- a/arch/m68k/68000/m68VZ328.c
+++ b/arch/m68k/68000/m68VZ328.c
@@ -22,8 +22,8 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/rtc.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/MC68VZ328.h>
#include <asm/bootstd.h>
diff --git a/arch/m68k/68000/timers.c b/arch/m68k/68000/timers.c
index 1c8e8a83c325..e8dfdd2556a5 100644
--- a/arch/m68k/68000/timers.c
+++ b/arch/m68k/68000/timers.c
@@ -22,7 +22,6 @@
#include <linux/clocksource.h>
#include <linux/rtc.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/MC68VZ328.h>
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 60ac1cd8b96f..bd2d29c22a10 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -28,7 +28,7 @@ config COLDFIRE
select CPU_HAS_NO_MULDIV64
select GENERIC_CSUM
select GPIOLIB
- select HAVE_CLK
+ select HAVE_LEGACY_CLK
endchoice
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 5d9288384096..ce6db5e5a5a3 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -135,10 +135,10 @@ vmlinux.gz: vmlinux
ifndef CONFIG_KGDB
cp vmlinux vmlinux.tmp
$(STRIP) vmlinux.tmp
- gzip -9c vmlinux.tmp >vmlinux.gz
+ $(_GZIP) -9c vmlinux.tmp >vmlinux.gz
rm vmlinux.tmp
else
- gzip -9c vmlinux >vmlinux.gz
+ $(_GZIP) -9c vmlinux >vmlinux.gz
endif
bzImage: vmlinux.bz2
@@ -148,10 +148,10 @@ vmlinux.bz2: vmlinux
ifndef CONFIG_KGDB
cp vmlinux vmlinux.tmp
$(STRIP) vmlinux.tmp
- bzip2 -1c vmlinux.tmp >vmlinux.bz2
+ $(_BZIP2) -1c vmlinux.tmp >vmlinux.bz2
rm vmlinux.tmp
else
- bzip2 -1c vmlinux >vmlinux.bz2
+ $(_BZIP2) -1c vmlinux >vmlinux.bz2
endif
archclean:
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 4eb911d64e8d..8f23b2fab64c 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -32,7 +32,6 @@
#include <asm/bootinfo-amiga.h>
#include <asm/byteorder.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/irq.h>
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 7d168e6dfb01..762da5d7a415 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -13,7 +13,6 @@
#include <asm/bootinfo.h>
#include <asm/bootinfo-apollo.h>
#include <asm/byteorder.h>
-#include <asm/pgtable.h>
#include <asm/apollohw.h>
#include <asm/irq.h>
#include <asm/machdep.h>
diff --git a/arch/m68k/atari/atasound.c b/arch/m68k/atari/atasound.c
index 1c1181ebb947..a8724d998c39 100644
--- a/arch/m68k/atari/atasound.c
+++ b/arch/m68k/atari/atasound.c
@@ -26,7 +26,6 @@
#include <asm/atarihw.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include <asm/atariints.h>
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index 6152f9f631d2..ce79b322a99c 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -26,7 +26,6 @@
#include <asm/setup.h>
#include <asm/machdep.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/atarihw.h>
#include <asm/atari_stram.h>
#include <asm/io.h>
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 8ebaabc931cd..50f4d01363df 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -31,7 +31,6 @@
#include <asm/bootinfo.h>
#include <asm/bootinfo-vme.h>
#include <asm/byteorder.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c
index 62b0eb6cf69a..84eab0f5e00a 100644
--- a/arch/m68k/coldfire/pci.c
+++ b/arch/m68k/coldfire/pci.c
@@ -216,8 +216,10 @@ static int __init mcf_pci_init(void)
/* Keep a virtual mapping to IO/config space active */
iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE);
- if (iospace == 0)
+ if (iospace == 0) {
+ pci_free_host_bridge(bridge);
return -ENODEV;
+ }
pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n",
(u32) iospace);
diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig
index 27fa9465d19d..2b746f55f419 100644
--- a/arch/m68k/configs/stmark2_defconfig
+++ b/arch/m68k/configs/stmark2_defconfig
@@ -48,7 +48,6 @@ CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_ROM=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PLATRAM=y
-CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h
index 1e2544ecaf88..1ac55e7b47f0 100644
--- a/arch/m68k/include/asm/cacheflush_mm.h
+++ b/arch/m68k/include/asm/cacheflush_mm.h
@@ -254,9 +254,11 @@ static inline void __flush_page_to_ram(void *vaddr)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
-extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len);
extern void flush_icache_range(unsigned long address, unsigned long endaddr);
+extern void flush_icache_user_range(unsigned long address,
+ unsigned long endaddr);
static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
@@ -264,7 +266,7 @@ static inline void copy_to_user_page(struct vm_area_struct *vma,
{
flush_cache_page(vma, vaddr, page_to_pfn(page));
memcpy(dst, src, len);
- flush_icache_user_range(vma, page, vaddr, len);
+ flush_icache_user_page(vma, page, vaddr, len);
}
static inline void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
diff --git a/arch/m68k/include/asm/cacheflush_no.h b/arch/m68k/include/asm/cacheflush_no.h
index 11e9a9dcbfb2..2731f07e7be8 100644
--- a/arch/m68k/include/asm/cacheflush_no.h
+++ b/arch/m68k/include/asm/cacheflush_no.h
@@ -9,25 +9,8 @@
#include <asm/mcfsim.h>
#define flush_cache_all() __flush_cache_all()
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_range(start, len) __flush_dcache_all()
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, len) __flush_icache_all()
-#define flush_icache_page(vma,pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
void mcf_cache_push(void);
@@ -98,4 +81,6 @@ static inline void cache_clear(unsigned long paddr, int len)
__clear_cache_all();
}
+#include <asm-generic/cacheflush.h>
+
#endif /* _M68KNOMMU_CACHEFLUSH_H */
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index 0031cd387b75..8d4ec05996c5 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -170,7 +170,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
}
#define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK))
-#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
+#define pmd_page_vaddr(pmd) ((unsigned long) (pmd_val(pmd)))
static inline int pte_none(pte_t pte)
{
@@ -311,64 +311,6 @@ static inline pte_t pte_mkcache(pte_t pte)
extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
/*
- * Find an entry in a pagetable directory.
- */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/*
- * Find an entry in a kernel pagetable directory.
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/*
- * Find an entry in the third-level pagetable.
- */
-#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) __pmd_page(*(dir)) + __pte_offset(address))
-
-/*
- * Disable caching for page at given kernel virtual address.
- */
-static inline void nocache_page(void *vaddr)
-{
- pgd_t *dir;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
- unsigned long addr = (unsigned long) vaddr;
-
- dir = pgd_offset_k(addr);
- p4dp = p4d_offset(dir, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset_kernel(pmdp, addr);
- *ptep = pte_mknocache(*ptep);
-}
-
-/*
- * Enable caching for page at given kernel virtual address.
- */
-static inline void cache_page(void *vaddr)
-{
- pgd_t *dir;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
- unsigned long addr = (unsigned long) vaddr;
-
- dir = pgd_offset_k(addr);
- p4dp = p4d_offset(dir, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset_kernel(pmdp, addr);
- *ptep = pte_mkcache(*ptep);
-}
-
-/*
* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e))
*/
#define __swp_type(x) ((x).val & 0xFF)
@@ -380,9 +322,6 @@ static inline void cache_page(void *vaddr)
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-#define pte_offset_map(pmdp, addr) ((pte_t *)__pmd_page(*pmdp) + \
- __pte_offset(addr))
-#define pte_unmap(pte) ((void) 0)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index c66e42917912..b4fc3b4f6bb3 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -18,6 +18,12 @@ extern void init_pointer_table(void *table, int type);
extern void *get_pointer_table(int type);
extern int free_pointer_table(void *table, int type);
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
+
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return get_pointer_table(TABLE_PTE);
@@ -82,7 +88,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
{
pmd_set(pmd, page);
}
-#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
+#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 48f19f0ab1e7..8076467eff4b 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -128,7 +128,7 @@ static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
}
#define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
-#define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
+#define pmd_page_vaddr(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
#define pud_page_vaddr(pud) ((unsigned long)__va(pud_val(pud) & _TABLE_MASK))
@@ -192,91 +192,9 @@ static inline pte_t pte_mkcache(pte_t pte)
return pte;
}
-#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
-
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
-/* to find an entry in a page-table-directory */
-static inline pgd_t *pgd_offset(const struct mm_struct *mm,
- unsigned long address)
-{
- return mm->pgd + pgd_index(address);
-}
-
#define swapper_pg_dir kernel_pg_dir
extern pgd_t kernel_pg_dir[128];
-static inline pgd_t *pgd_offset_k(unsigned long address)
-{
- return kernel_pg_dir + (address >> PGDIR_SHIFT);
-}
-
-
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
-{
- return (pmd_t *)pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
-}
-
-/* Find an entry in the third-level page table.. */
-static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
-{
- return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
-}
-
-#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-#define pte_unmap(pte) ((void)0)
-
-/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any.
- */
-
-/* Prior to calling these routines, the page should have been flushed
- * from both the cache and ATC, or the CPU might not notice that the
- * cache setting for the page has been changed. -jskov
- */
-static inline void nocache_page(void *vaddr)
-{
- unsigned long addr = (unsigned long)vaddr;
-
- if (CPU_IS_040_OR_060) {
- pgd_t *dir;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
-
- dir = pgd_offset_k(addr);
- p4dp = p4d_offset(dir, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset_kernel(pmdp, addr);
- *ptep = pte_mknocache(*ptep);
- }
-}
-
-static inline void cache_page(void *vaddr)
-{
- unsigned long addr = (unsigned long)vaddr;
-
- if (CPU_IS_040_OR_060) {
- pgd_t *dir;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
-
- dir = pgd_offset_k(addr);
- p4dp = p4d_offset(dir, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset_kernel(pmdp, addr);
- *ptep = pte_mkcache(*ptep);
- }
-}
-
/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
#define __swp_type(x) (((x).val >> 4) & 0xff)
#define __swp_offset(x) ((x).val >> 12)
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index f0e5167de834..aca22c2c1ee2 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -176,7 +176,6 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot);
#define pgprot_dmacoherent(prot) pgprot_dmacoherent(prot)
#endif /* CONFIG_COLDFIRE */
-#include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */
#endif /* _M68K_PGTABLE_H */
diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h
index ccc4568299e5..87151d67d91e 100644
--- a/arch/m68k/include/asm/pgtable_no.h
+++ b/arch/m68k/include/asm/pgtable_no.h
@@ -53,6 +53,4 @@ extern void paging_init(void);
#define KMAP_START 0
#define KMAP_END 0xffffffff
-#include <asm-generic/pgtable.h>
-
#endif /* _M68KNOMMU_PGTABLE_H */
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index 0caa18a08437..5b24283a0a42 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -112,8 +112,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __pte_page(pte) \
((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
-#define __pmd_page(pmd) \
-((unsigned long) __va (pmd_val (pmd) & PAGE_MASK))
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
+}
static inline int pte_none (pte_t pte) { return !pte_val (pte); }
static inline int pte_present (pte_t pte) { return pte_val (pte) & SUN3_PAGE_VALID; }
@@ -127,7 +130,7 @@ static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *p
({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; })
#define pte_page(pte) virt_to_page(__pte_page(pte))
-#define pmd_page(pmd) virt_to_page(__pmd_page(pmd))
+#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
@@ -171,21 +174,6 @@ static inline pte_t pte_mkcache(pte_t pte) { return pte; }
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
-/* Find an entry in a pagetable directory. */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
-#define pgd_offset(mm, address) \
-((mm)->pgd + pgd_index(address))
-
-/* Find an entry in a kernel pagetable directory. */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* Find an entry in the third-level pagetable. */
-#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
-#define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
-#define pte_offset_map(pmd, address) ((pte_t *)page_address(pmd_page(*pmd)) + pte_index(address))
-#define pte_unmap(pte) do { } while (0)
-
/* Macros to (de)construct the fake PTEs representing swap pages. */
#define __swp_type(x) ((x).val & 0x7F)
#define __swp_offset(x) (((x).val) >> 7)
diff --git a/arch/m68k/include/asm/sun3xflop.h b/arch/m68k/include/asm/sun3xflop.h
index ef04c43acd13..93f2a8431c0e 100644
--- a/arch/m68k/include/asm/sun3xflop.h
+++ b/arch/m68k/include/asm/sun3xflop.h
@@ -10,8 +10,8 @@
#ifndef __ASM_SUN3X_FLOPPY_H
#define __ASM_SUN3X_FLOPPY_H
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/sun3x.h>
diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h
index a24cfe4a0d32..dcfb69361408 100644
--- a/arch/m68k/include/asm/uaccess_no.h
+++ b/arch/m68k/include/asm/uaccess_no.h
@@ -42,7 +42,7 @@ static inline int _access_ok(unsigned long addr, unsigned long size)
__put_user_asm(__pu_err, __pu_val, ptr, l); \
break; \
case 8: \
- memcpy(ptr, &__pu_val, sizeof (*(ptr))); \
+ memcpy((void __force *)ptr, &__pu_val, sizeof(*(ptr))); \
break; \
default: \
__pu_err = __put_user_bad(); \
@@ -60,7 +60,7 @@ extern int __put_user_bad(void);
* aliasing issues.
*/
-#define __ptr(x) ((unsigned long *)(x))
+#define __ptr(x) ((unsigned long __user *)(x))
#define __put_user_asm(err,x,ptr,bwl) \
__asm__ ("move" #bwl " %0,%1" \
@@ -85,7 +85,7 @@ extern int __put_user_bad(void);
u64 l; \
__typeof__(*(ptr)) t; \
} __gu_val; \
- memcpy(&__gu_val.l, ptr, sizeof(__gu_val.l)); \
+ memcpy(&__gu_val.l, (const void __force *)ptr, sizeof(__gu_val.l)); \
(x) = __gu_val.t; \
break; \
} \
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index a54788458ca3..29de2b3108ea 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -255,6 +255,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/bootinfo.h>
#include <asm/bootinfo-amiga.h>
#include <asm/bootinfo-atari.h>
@@ -264,7 +265,6 @@
#include <asm/bootinfo-vme.h>
#include <asm/setup.h>
#include <asm/entry.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
#ifdef CONFIG_MAC
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 8f0d9140700f..90ae376b7ab1 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -36,7 +36,6 @@
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
asmlinkage void ret_from_fork(void);
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 748c63bd0081..94b3b274186d 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -23,7 +23,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
/*
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index a63483de7a42..e779b19e0193 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -38,7 +38,6 @@
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
unsigned long memory_start;
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 05610e6924c1..b3ff39588f36 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -47,7 +47,6 @@
#include <asm/setup.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/ucontext.h>
#include <asm/cacheflush.h>
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 18a4de7d5934..1c235d8f53f3 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -399,7 +399,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
* Verify that the specified address region actually belongs
* to this process.
*/
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, addr);
if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
goto out_unlock;
@@ -450,7 +450,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
}
}
out_unlock:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
out:
return ret;
}
@@ -472,7 +472,7 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
spinlock_t *ptl;
unsigned long mem_value;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
pgd = pgd_offset(mm, (unsigned long)mem);
if (!pgd_present(*pgd))
goto bad_access;
@@ -501,11 +501,11 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
__put_user(newval, mem);
pte_unmap_unlock(pte, ptl);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return mem_value;
bad_access:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* This is not necessarily a bad access, we can get here if
a memory we're trying to write to should be copied-on-write.
Make the kernel do the necessary page stuff, then re-iterate.
@@ -545,13 +545,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
struct mm_struct *mm = current->mm;
unsigned long mem_value;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return mem_value;
}
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 344f93d36a9a..df6fc782754f 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -811,13 +811,13 @@ asmlinkage void buserr_c(struct frame *fp)
static int kstack_depth_to_print = 48;
-void show_trace(unsigned long *stack)
+static void show_trace(unsigned long *stack, const char *loglvl)
{
unsigned long *endstack;
unsigned long addr;
int i;
- pr_info("Call Trace:");
+ printk("%sCall Trace:", loglvl);
addr = (unsigned long)stack + THREAD_SIZE - 1;
endstack = (unsigned long *)(addr & -THREAD_SIZE);
i = 0;
@@ -916,7 +916,7 @@ void show_registers(struct pt_regs *regs)
default:
pr_cont("\n");
}
- show_stack(NULL, (unsigned long *)addr);
+ show_stack(NULL, (unsigned long *)addr, KERN_INFO);
pr_info("Code:");
set_fs(KERNEL_DS);
@@ -935,7 +935,8 @@ void show_registers(struct pt_regs *regs)
pr_cont("\n");
}
-void show_stack(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *stack,
+ const char *loglvl)
{
unsigned long *p;
unsigned long *endstack;
@@ -949,7 +950,7 @@ void show_stack(struct task_struct *task, unsigned long *stack)
}
endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
- pr_info("Stack from %08lx:", (unsigned long)stack);
+ printk("%sStack from %08lx:", loglvl, (unsigned long)stack);
p = stack;
for (i = 0; i < kstack_depth_to_print; i++) {
if (p + 1 > endstack)
@@ -959,7 +960,7 @@ void show_stack(struct task_struct *task, unsigned long *stack)
pr_cont(" %08lx", *p++);
}
pr_cont("\n");
- show_trace(stack);
+ show_trace(stack, loglvl);
}
/*
diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c
index 1b4c562753da..928dbd33fc4a 100644
--- a/arch/m68k/kernel/uboot.c
+++ b/arch/m68k/kernel/uboot.c
@@ -26,7 +26,6 @@
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
/*
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index d0126ab01360..5c9f3a2d6538 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -36,7 +36,6 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/macintosh.h>
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c
index 079e64898e6a..5ecb3310e874 100644
--- a/arch/m68k/mm/cache.c
+++ b/arch/m68k/mm/cache.c
@@ -73,7 +73,7 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
/* Push n pages at kernel virtual address and clear the icache */
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
-void flush_icache_range(unsigned long address, unsigned long endaddr)
+void flush_icache_user_range(unsigned long address, unsigned long endaddr)
{
if (CPU_IS_COLDFIRE) {
unsigned long start, end;
@@ -104,9 +104,18 @@ void flush_icache_range(unsigned long address, unsigned long endaddr)
: "di" (FLUSH_I));
}
}
+
+void flush_icache_range(unsigned long address, unsigned long endaddr)
+{
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ flush_icache_user_range(address, endaddr);
+ set_fs(old_fs);
+}
EXPORT_SYMBOL(flush_icache_range);
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
if (CPU_IS_COLDFIRE) {
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 3bfb5c8ac3c7..a94a814ad6ad 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -86,7 +86,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -165,7 +165,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -174,7 +174,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return 0;
/*
@@ -182,7 +182,7 @@ good_area:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
@@ -211,6 +211,6 @@ acc_err:
current->thread.faddr = address;
send_sig:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return send_fault_sig(regs);
}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 6d3147662ff2..53040857a9ed 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -141,7 +141,7 @@ static inline void init_pointer_tables(void)
if (!pmd_present(*pmd))
continue;
- pte_dir = (pte_t *)__pmd_page(*pmd);
+ pte_dir = (pte_t *)pmd_page_vaddr(*pmd);
init_pointer_table(pte_dir, TABLE_PTE);
}
}
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 80064e6d064f..29f47923aa46 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -17,7 +17,6 @@
#include <asm/setup.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/mcf_pgalloc.h>
#include <asm/tlbflush.h>
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 904c2a663977..2bb006bdc31c 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -45,6 +45,31 @@ unsigned long mm_cachebits;
EXPORT_SYMBOL(mm_cachebits);
#endif
+/* Prior to calling these routines, the page should have been flushed
+ * from both the cache and ATC, or the CPU might not notice that the
+ * cache setting for the page has been changed. -jskov
+ */
+static inline void nocache_page(void *vaddr)
+{
+ unsigned long addr = (unsigned long)vaddr;
+
+ if (CPU_IS_040_OR_060) {
+ pte_t *ptep = virt_to_kpte(addr);
+
+ *ptep = pte_mknocache(*ptep);
+ }
+}
+
+static inline void cache_page(void *vaddr)
+{
+ unsigned long addr = (unsigned long)vaddr;
+
+ if (CPU_IS_040_OR_060) {
+ pte_t *ptep = virt_to_kpte(addr);
+
+ *ptep = pte_mkcache(*ptep);
+ }
+}
/*
* Motorola 680x0 user's manual recommends using uncached memory for address
diff --git a/arch/m68k/mm/sun3kmap.c b/arch/m68k/mm/sun3kmap.c
index ae03555449b8..4f2a7ef8348b 100644
--- a/arch/m68k/mm/sun3kmap.c
+++ b/arch/m68k/mm/sun3kmap.c
@@ -15,7 +15,6 @@
#include <linux/vmalloc.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/sun3mmu.h>
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index 5d8d956d9329..dad494224497 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -21,7 +21,6 @@
#include <asm/setup.h>
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/io.h>
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 545a1fe0e119..490700aa2212 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -29,7 +29,6 @@
#include <asm/bootinfo.h>
#include <asm/bootinfo-vme.h>
#include <asm/byteorder.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 9bc2da69f80c..5b86d10e0f84 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -32,7 +32,6 @@
#include <asm/bootinfo.h>
#include <asm/bootinfo-vme.h>
#include <asm/byteorder.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index f31890078197..4627de3c0603 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -29,7 +29,6 @@
#include <asm/io.h>
#include <asm/bootinfo.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 229ea37dfe1b..7204c0ea0dc7 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -22,7 +22,6 @@
#include <asm/setup.h>
#include <asm/contregs.h>
#include <asm/movs.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/sun3-head.h>
#include <asm/sun3mmu.h>
diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c
index a2c1c9304895..f15ff16b9997 100644
--- a/arch/m68k/sun3/dvma.c
+++ b/arch/m68k/sun3/dvma.c
@@ -14,7 +14,6 @@
#include <linux/memblock.h>
#include <linux/list.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sun3mmu.h>
#include <asm/dvma.h>
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 582a1284059a..7aa879b7c7ff 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -22,7 +22,6 @@
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sun3mmu.h>
#include <asm/segment.h>
#include <asm/oplib.h>
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index 399f3d06125f..4b560f4d3960 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -16,7 +16,6 @@
#include <linux/list.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/dvma.h>
#undef DVMA_DEBUG
diff --git a/arch/m68k/sun3x/dvma.c b/arch/m68k/sun3x/dvma.c
index c4b8aa1d80f4..fef52d222d46 100644
--- a/arch/m68k/sun3x/dvma.c
+++ b/arch/m68k/sun3x/dvma.c
@@ -22,7 +22,6 @@
#include <asm/dvma.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
/* IOMMU support */
diff --git a/arch/m68k/sun3x/prom.c b/arch/m68k/sun3x/prom.c
index be14c899ab7d..74d2fe57524b 100644
--- a/arch/m68k/sun3x/prom.c
+++ b/arch/m68k/sun3x/prom.c
@@ -10,7 +10,6 @@
#include <linux/string.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/traps.h>
#include <asm/sun3xprom.h>
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h
index 11f56c85056b..39f8fb6768d8 100644
--- a/arch/microblaze/include/asm/cacheflush.h
+++ b/arch/microblaze/include/asm/cacheflush.h
@@ -57,9 +57,6 @@ void microblaze_cache_init(void);
#define invalidate_icache() mbc->iin();
#define invalidate_icache_range(start, end) mbc->iinr(start, end);
-#define flush_icache_user_range(vma, pg, adr, len) flush_icache();
-#define flush_icache_page(vma, pg) do { } while (0)
-
#define enable_dcache() mbc->de();
#define disable_dcache() mbc->dd();
/* FIXME for LL-temac driver */
@@ -77,27 +74,9 @@ do { \
flush_dcache_range((unsigned) (addr), (unsigned) (addr) + PAGE_SIZE); \
} while (0);
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-
#define flush_cache_page(vma, vmaddr, pfn) \
flush_dcache_range(pfn << PAGE_SHIFT, (pfn << PAGE_SHIFT) + PAGE_SIZE);
-/* MS: kgdb code use this macro, wrong len with FLASH */
-#if 0
-#define flush_cache_range(vma, start, len) { \
- flush_icache_range((unsigned) (start), (unsigned) (start) + (len)); \
- flush_dcache_range((unsigned) (start), (unsigned) (start) + (len)); \
-}
-#endif
-
-#define flush_cache_range(vma, start, len) do { } while (0)
-
static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
void *dst, void *src, int len)
@@ -109,12 +88,8 @@ static inline void copy_to_user_page(struct vm_area_struct *vma,
flush_dcache_range(addr, addr + PAGE_SIZE);
}
}
+#define copy_to_user_page copy_to_user_page
-static inline void copy_from_user_page(struct vm_area_struct *vma,
- struct page *page, unsigned long vaddr,
- void *dst, void *src, int len)
-{
- memcpy(dst, src, len);
-}
+#include <asm-generic/cacheflush.h>
#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
diff --git a/arch/microblaze/include/asm/highmem.h b/arch/microblaze/include/asm/highmem.h
index 332c78e15198..284ca8fb54c1 100644
--- a/arch/microblaze/include/asm/highmem.h
+++ b/arch/microblaze/include/asm/highmem.h
@@ -26,7 +26,6 @@
#include <asm/fixmap.h>
extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
/*
@@ -51,32 +50,6 @@ extern pte_t *pkmap_page_table;
#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
-extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
-extern void __kunmap_atomic(void *kvaddr);
-
-static inline void *kmap(struct page *page)
-{
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_high(page);
-}
-
-static inline void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-
-static inline void *kmap_atomic(struct page *page)
-{
- return kmap_atomic_prot(page, kmap_prot);
-}
-
#define flush_cache_kmaps() { flush_icache(); flush_dcache(); }
#endif /* __KERNEL__ */
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 1d7a91252d03..ebb6b7939bb8 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -12,11 +12,11 @@
#include <linux/kernel.h> /* For min/max macros */
#include <linux/highmem.h>
+#include <linux/pgtable.h>
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/cache.h>
-#include <asm/pgtable.h>
#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
#include <asm-generic/pgalloc.h>
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 6b056f6545d8..3fa1df90925e 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -21,7 +21,6 @@ extern int mem_init_done;
#define pgd_bad(pgd) (0)
#define pgd_clear(pgdp)
#define kern_addr_valid(addr) (1)
-#define pmd_offset(a, b) ((void *) 0)
#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */
#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */
@@ -438,27 +437,15 @@ static inline void ptep_mkdirty(struct mm_struct *mm,
/* Convert pmd entry to page */
/* our pmd entry is an effective address of pte table*/
/* returns effective address of the pmd entry*/
-#define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long) (pmd_val(pmd) & PAGE_MASK));
+}
/* returns struct *page of the pmd entry*/
#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, addr) \
- ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
-
-#define pte_unmap(pte) kunmap_atomic(pte)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
@@ -507,8 +494,6 @@ void __init *early_get_page(void);
#endif /* CONFIG_MMU */
#ifndef __ASSEMBLY__
-#include <asm-generic/pgtable.h>
-
extern unsigned long ioremap_bot, ioremap_base;
void setup_memory(void);
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 070ba6139a62..6723c56ec378 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -12,7 +12,7 @@
#include <asm/mmu.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/extable.h>
#include <linux/string.h>
diff --git a/arch/microblaze/include/asm/unwind.h b/arch/microblaze/include/asm/unwind.h
index c327d673622a..3db81777a887 100644
--- a/arch/microblaze/include/asm/unwind.h
+++ b/arch/microblaze/include/asm/unwind.h
@@ -20,7 +20,8 @@ extern struct trap_handler_info microblaze_trap_handlers;
extern const char _hw_exception_handler;
extern const char ex_handler_unhandled;
-void microblaze_unwind(struct task_struct *task, struct stack_trace *trace);
+void microblaze_unwind(struct task_struct *task, struct stack_trace *trace,
+ const char *loglvl);
#endif /* __MICROBLAZE_UNWIND_H */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 95558f32d60a..54411de22fa6 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -68,9 +68,9 @@
#include <asm/entry.h>
#include <asm/current.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/signal.h>
#include <asm/registers.h>
#include <asm/asm-offsets.h>
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index d9a2014a222f..9f12e3c2bb42 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -11,8 +11,8 @@
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index dd121e33b8e3..2310daff1f8a 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -18,6 +18,7 @@
#include <linux/console.h>
#include <linux/debugfs.h>
#include <linux/of_fdt.h>
+#include <linux/pgtable.h>
#include <asm/setup.h>
#include <asm/sections.h>
@@ -33,7 +34,6 @@
#include <asm/entry.h>
#include <asm/cpuinfo.h>
-#include <asm/pgtable.h>
DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
DEFINE_PER_CPU(unsigned int, KM); /* Kernel/user mode */
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index c9125c328949..bdd6d0c86e16 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -35,7 +35,6 @@
#include <asm/entry.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <linux/syscalls.h>
#include <asm/cacheflush.h>
@@ -160,9 +159,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
int err = 0, sig = ksig->sig;
unsigned long address = 0;
#ifdef CONFIG_MMU
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
#endif
@@ -198,10 +194,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
address = ((unsigned long)frame->tramp);
#ifdef CONFIG_MMU
- pgdp = pgd_offset(current->mm, address);
- p4dp = p4d_offset(pgdp, address);
- pudp = pud_offset(p4dp, address);
- pmdp = pmd_offset(pudp, address);
+ pmdp = pmd_off(current->mm, address);
preempt_disable();
ptep = pte_offset_map(pmdp, address);
diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c
index b4debe283a79..b266c4d6ed9d 100644
--- a/arch/microblaze/kernel/stacktrace.c
+++ b/arch/microblaze/kernel/stacktrace.c
@@ -20,12 +20,12 @@ void save_stack_trace(struct stack_trace *trace)
{
/* Exclude our helper functions from the trace*/
trace->skip += 2;
- microblaze_unwind(NULL, trace);
+ microblaze_unwind(NULL, trace, "");
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
- microblaze_unwind(tsk, trace);
+ microblaze_unwind(tsk, trace, "");
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index 45bbba9d919f..94b6fe93147d 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -31,7 +31,7 @@ static int __init kstack_setup(char *s)
}
__setup("kstack=", kstack_setup);
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
unsigned long words_to_show;
u32 fp = (u32) sp;
@@ -50,7 +50,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print))
words_to_show = kstack_depth_to_print;
- pr_info("Kernel Stack:\n");
+ printk("%sKernel Stack:\n", loglvl);
/*
* Make the first line an 'odd' size if necessary to get
@@ -65,11 +65,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
words_to_show -= line1_words;
}
}
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp,
+ print_hex_dump(loglvl, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp,
words_to_show << 2, 0);
- pr_info("\n\nCall Trace:\n");
- microblaze_unwind(task, NULL);
- pr_info("\n");
+ printk("%s\n\nCall Trace:\n", loglvl);
+ microblaze_unwind(task, NULL, loglvl);
+ printk("%s\n", loglvl);
if (!task)
task = current;
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c
index 34c270cb11fc..778a761af0a7 100644
--- a/arch/microblaze/kernel/unwind.c
+++ b/arch/microblaze/kernel/unwind.c
@@ -154,7 +154,8 @@ static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
static void microblaze_unwind_inner(struct task_struct *task,
unsigned long pc, unsigned long fp,
unsigned long leaf_return,
- struct stack_trace *trace);
+ struct stack_trace *trace,
+ const char *loglvl);
/**
* unwind_trap - Unwind through a system trap, that stored previous state
@@ -162,16 +163,18 @@ static void microblaze_unwind_inner(struct task_struct *task,
*/
#ifdef CONFIG_MMU
static inline void unwind_trap(struct task_struct *task, unsigned long pc,
- unsigned long fp, struct stack_trace *trace)
+ unsigned long fp, struct stack_trace *trace,
+ const char *loglvl)
{
/* To be implemented */
}
#else
static inline void unwind_trap(struct task_struct *task, unsigned long pc,
- unsigned long fp, struct stack_trace *trace)
+ unsigned long fp, struct stack_trace *trace,
+ const char *loglvl)
{
const struct pt_regs *regs = (const struct pt_regs *) fp;
- microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace);
+ microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace, loglvl);
}
#endif
@@ -184,11 +187,13 @@ static inline void unwind_trap(struct task_struct *task, unsigned long pc,
* the caller's return address.
* @trace : Where to store stack backtrace (PC values).
* NULL == print backtrace to kernel log
+ * @loglvl : Used for printk log level if (trace == NULL).
*/
static void microblaze_unwind_inner(struct task_struct *task,
unsigned long pc, unsigned long fp,
unsigned long leaf_return,
- struct stack_trace *trace)
+ struct stack_trace *trace,
+ const char *loglvl)
{
int ofs = 0;
@@ -214,11 +219,11 @@ static void microblaze_unwind_inner(struct task_struct *task,
const struct pt_regs *regs =
(const struct pt_regs *) fp;
#endif
- pr_info("HW EXCEPTION\n");
+ printk("%sHW EXCEPTION\n", loglvl);
#ifndef CONFIG_MMU
microblaze_unwind_inner(task, regs->r17 - 4,
fp + EX_HANDLER_STACK_SIZ,
- regs->r15, trace);
+ regs->r15, trace, loglvl);
#endif
return;
}
@@ -228,8 +233,8 @@ static void microblaze_unwind_inner(struct task_struct *task,
if ((return_to >= handler->start_addr)
&& (return_to <= handler->end_addr)) {
if (!trace)
- pr_info("%s\n", handler->trap_name);
- unwind_trap(task, pc, fp, trace);
+ printk("%s%s\n", loglvl, handler->trap_name);
+ unwind_trap(task, pc, fp, trace, loglvl);
return;
}
}
@@ -248,13 +253,13 @@ static void microblaze_unwind_inner(struct task_struct *task,
} else {
/* Have we reached userland? */
if (unlikely(pc == task_pt_regs(task)->pc)) {
- pr_info("[<%p>] PID %lu [%s]\n",
- (void *) pc,
+ printk("%s[<%p>] PID %lu [%s]\n",
+ loglvl, (void *) pc,
(unsigned long) task->pid,
task->comm);
break;
} else
- print_ip_sym(pc);
+ print_ip_sym(loglvl, pc);
}
/* Stop when we reach anything not part of the kernel */
@@ -282,14 +287,16 @@ static void microblaze_unwind_inner(struct task_struct *task,
* @task : Task whose stack we are to unwind (NULL == current)
* @trace : Where to store stack backtrace (PC values).
* NULL == print backtrace to kernel log
+ * @loglvl : Used for printk log level if (trace == NULL).
*/
-void microblaze_unwind(struct task_struct *task, struct stack_trace *trace)
+void microblaze_unwind(struct task_struct *task, struct stack_trace *trace,
+ const char *loglvl)
{
if (task) {
if (task == current) {
const struct pt_regs *regs = task_pt_regs(task);
microblaze_unwind_inner(task, regs->pc, regs->r1,
- regs->r15, trace);
+ regs->r15, trace, loglvl);
} else {
struct thread_info *thread_info =
(struct thread_info *)(task->stack);
@@ -299,7 +306,8 @@ void microblaze_unwind(struct task_struct *task, struct stack_trace *trace)
microblaze_unwind_inner(task,
(unsigned long) &_switch_to,
cpu_context->r1,
- cpu_context->r15, trace);
+ cpu_context->r15,
+ trace, loglvl);
}
} else {
unsigned long pc, fp;
@@ -314,7 +322,7 @@ void microblaze_unwind(struct task_struct *task, struct stack_trace *trace)
);
/* Since we are not a leaf function, use leaf_return = 0 */
- microblaze_unwind_inner(current, pc, fp, 0, trace);
+ microblaze_unwind_inner(current, pc, fp, 0, trace, loglvl);
}
}
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index 3248141f8ed5..a2bfe587b491 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -30,7 +30,6 @@
#include <linux/interrupt.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/mmu_context.h>
#include <linux/uaccess.h>
@@ -125,7 +124,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
- * erroneous fault occurring in a code path which already holds mmap_sem
+ * erroneous fault occurring in a code path which already holds mmap_lock
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
@@ -137,12 +136,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mm))) {
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
vma = find_vma(mm, address);
@@ -239,7 +238,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -248,7 +247,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* keep track of tlb+htab misses that are good addrs but
@@ -259,7 +258,7 @@ good_area:
return;
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
pte_errors++;
@@ -278,7 +277,7 @@ bad_area_nosemaphore:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
bad_page_fault(regs, address, SIGKILL);
else
@@ -286,7 +285,7 @@ out_of_memory:
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (user_mode(regs)) {
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
return;
diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c
index d7569f77fa15..92e0890416c9 100644
--- a/arch/microblaze/mm/highmem.c
+++ b/arch/microblaze/mm/highmem.c
@@ -32,18 +32,12 @@
*/
#include <asm/tlbflush.h>
-void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
-
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -55,19 +49,16 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
return (void *) vaddr;
}
-EXPORT_SYMBOL(kmap_atomic_prot);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type;
unsigned int idx;
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
- preempt_enable();
+ if (vaddr < __fix_to_virt(FIX_KMAP_END))
return;
- }
type = kmap_atomic_idx();
@@ -83,7 +74,5 @@ void __kunmap_atomic(void *kvaddr)
local_flush_tlb_page(NULL, vaddr);
kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index dcaa53d11339..521b59ba716c 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -49,17 +49,6 @@ unsigned long lowmem_size;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
-pgprot_t kmap_prot;
-EXPORT_SYMBOL(kmap_prot);
-
-static inline pte_t *virt_to_kpte(unsigned long vaddr)
-{
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
-
- return pte_offset_kernel(pmd_offset(pud, vaddr), vaddr);
-}
static void __init highmem_init(void)
{
@@ -68,7 +57,6 @@ static void __init highmem_init(void)
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
- kmap_prot = PAGE_KERNEL;
}
static void highmem_setup(void)
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 68c26cacd930..38ccb909bc9d 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -32,8 +32,8 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <linux/io.h>
#include <asm/mmu.h>
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 1ad50c01c970..26c63e8161f0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -184,7 +184,7 @@ config AR7
select SYS_SUPPORTS_ZBOOT_UART16550
select GPIOLIB
select VLYNQ
- select HAVE_CLK
+ select HAVE_LEGACY_CLK
help
Support for the Texas Instruments AR7 System-on-a-Chip
family: TNETD7100, 7200 and 7300.
@@ -212,9 +212,7 @@ config ATH79
select DMA_NONCOHERENT
select GPIOLIB
select PINCTRL
- select HAVE_CLK
select COMMON_CLK
- select CLKDEV_LOOKUP
select IRQ_MIPS_CPU
select SYS_HAS_CPU_MIPS32_R2
select SYS_HAS_EARLY_PRINTK
@@ -301,9 +299,9 @@ config BCM63XX
select SYS_HAS_EARLY_PRINTK
select SWAP_IO_SPACE
select GPIOLIB
- select HAVE_CLK
select MIPS_L1_CACHE_SHIFT_4
select CLKDEV_LOOKUP
+ select HAVE_LEGACY_CLK
help
Support for BCM63XX based boards
@@ -424,6 +422,7 @@ config LANTIQ
select SWAP_IO_SPACE
select BOOT_RAW
select CLKDEV_LOOKUP
+ select HAVE_LEGACY_CLK
select USE_OF
select PINCTRL
select PINCTRL_LANTIQ
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index 89fa6e62a3b3..da0712ad85f5 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -22,7 +22,6 @@
#include <asm/sgialib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/bootinfo.h>
#undef DEBUG
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index 1784d4348c36..743535be7528 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -69,9 +69,6 @@ enum fixed_addresses {
#include <asm-generic/fixmap.h>
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)), (vaddr))
-
/*
* Called from pgtable_init()
*/
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 9d84aafc33d0..f1f788b57166 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -46,21 +46,14 @@ extern pte_t *pkmap_page_table;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-extern void * kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
-
-extern void *kmap(struct page *page);
-extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
+#define ARCH_HAS_KMAP_FLUSH_TLB
+extern void kmap_flush_tlb(unsigned long addr);
extern void *kmap_atomic_pfn(unsigned long pfn);
#define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
extern void kmap_init(void);
-#define kmap_prot PAGE_KERNEL
-
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/mips/include/asm/mach-generic/floppy.h b/arch/mips/include/asm/mach-generic/floppy.h
index e3f446d54827..e0c9cd41f9b9 100644
--- a/arch/mips/include/asm/mach-generic/floppy.h
+++ b/arch/mips/include/asm/mach-generic/floppy.h
@@ -21,7 +21,6 @@
#include <asm/floppy.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
/*
* How to access the FDC's registers.
diff --git a/arch/mips/include/asm/mach-jazz/floppy.h b/arch/mips/include/asm/mach-jazz/floppy.h
index 095000c290e5..294ebb834632 100644
--- a/arch/mips/include/asm/mach-jazz/floppy.h
+++ b/arch/mips/include/asm/mach-jazz/floppy.h
@@ -15,7 +15,6 @@
#include <asm/addrspace.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
-#include <asm/pgtable.h>
static inline unsigned char fd_inb(unsigned int base, unsigned int reg)
{
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 1945c8970141..a950fc1ddb4d 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -195,28 +195,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_page(x) pfn_to_page(pte_pfn(x))
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-
-/* Find an entry in the third-level page table.. */
-#define __pte_offset(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_unmap(pte) ((void)(pte))
-
#if defined(CONFIG_CPU_R3K_TLB)
/* Swap entries must have VALID bit cleared. */
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index ee5dc0c145b9..1e7d6ce9d8d6 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -172,8 +172,6 @@
extern pte_t invalid_pte_table[PTRS_PER_PTE];
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-
#ifndef __PAGETABLE_PUD_FOLDED
/*
* For 4-level pagetables we defines these ourselves, for 3-level the
@@ -222,11 +220,6 @@ static inline unsigned long p4d_page_vaddr(p4d_t p4d)
#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
-{
- return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
-}
-
static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
{
*p4d = p4dval;
@@ -320,15 +313,6 @@ static inline void pud_clear(pud_t *pudp)
#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
#endif
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-
#ifndef __PAGETABLE_PMD_FOLDED
static inline unsigned long pud_page_vaddr(pud_t pud)
{
@@ -337,24 +321,8 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
-{
- return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
-}
#endif
-/* Find an entry in the third-level page table.. */
-#define __pte_offset(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_unmap(pte) ((void)(pte))
-
/*
* Initialize a new pgd / pmd table with invalid pointers.
*/
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 32760b41aa31..dd7a0f552cac 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -736,8 +736,6 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
#define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
-#include <asm-generic/pgtable.h>
-
/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index 04b9c4068493..495ba7cc56ec 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -14,12 +14,12 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
+#include <linux/pgtable.h>
#include <asm/irq_cpu.h>
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/jazz.h>
-#include <asm/pgtable.h>
#include <asm/tlbmisc.h>
static DEFINE_RAW_SPINLOCK(r4030_lock);
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index c64a297e82b3..014773f0bfcd 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -24,7 +24,6 @@
#include <linux/uaccess.h>
#include <asm/dma.h>
#include <asm/jazzdma.h>
-#include <asm/pgtable.h>
/*
* Set this to one to enable additional vdma debug code.
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index 1b5e121c3f0d..04aab419a0fc 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -17,11 +17,11 @@
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
#include <asm/reboot.h>
-#include <asm/pgtable.h>
#include <asm/tlbmisc.h>
extern asmlinkage void jazz_handle_int(void);
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 8713b69c5048..3c0c3d1260c1 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -21,7 +21,6 @@
#include <linux/spinlock.h>
#include <linux/jump_label.h>
-#include <asm/pgtable.h> /* MODULE_START */
struct mips_hi16 {
struct mips_hi16 *next;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index b2a797557825..ff5320b79100 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -42,7 +42,6 @@
#include <asm/irq.h>
#include <asm/mips-cps.h>
#include <asm/msa.h>
-#include <asm/pgtable.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/reg.h>
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 414b6e9c900b..2a61641c680b 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -39,7 +39,6 @@
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/syscall.h>
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 2525eca9c962..afcf27a877cb 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -30,7 +30,6 @@
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/reg.h>
#include <asm/syscall.h>
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 9058e9dcf080..2f513506a3d5 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -28,7 +28,6 @@
#include <linux/kexec.h>
#include <asm/time.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c
index e5a2a6ab71ac..9c1a2019113b 100644
--- a/arch/mips/kernel/sysrq.c
+++ b/arch/mips/kernel/sysrq.c
@@ -52,7 +52,7 @@ static void sysrq_handle_tlbdump(int key)
#endif
}
-static struct sysrq_key_op sysrq_tlbdump_op = {
+static const struct sysrq_key_op sysrq_tlbdump_op = {
.handler = sysrq_handle_tlbdump,
.help_msg = "show-tlbs(x)",
.action_msg = "Show TLB entries",
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 22f805a73921..7c32c956156a 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -57,7 +57,6 @@
#include <asm/mipsmtregs.h>
#include <asm/module.h>
#include <asm/msa.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/siginfo.h>
@@ -108,26 +107,26 @@ void (*board_bind_eic_interrupt)(int irq, int regset);
void (*board_ebase_setup)(void);
void(*board_cache_error_setup)(void);
-static void show_raw_backtrace(unsigned long reg29)
+static void show_raw_backtrace(unsigned long reg29, const char *loglvl)
{
unsigned long *sp = (unsigned long *)(reg29 & ~3);
unsigned long addr;
- printk("Call Trace:");
+ printk("%sCall Trace:", loglvl);
#ifdef CONFIG_KALLSYMS
- printk("\n");
+ printk("%s\n", loglvl);
#endif
while (!kstack_end(sp)) {
unsigned long __user *p =
(unsigned long __user *)(unsigned long)sp++;
if (__get_user(addr, p)) {
- printk(" (Bad stack address)");
+ printk("%s (Bad stack address)", loglvl);
break;
}
if (__kernel_text_address(addr))
- print_ip_sym(addr);
+ print_ip_sym(loglvl, addr);
}
- printk("\n");
+ printk("%s\n", loglvl);
}
#ifdef CONFIG_KALLSYMS
@@ -140,7 +139,8 @@ static int __init set_raw_show_trace(char *str)
__setup("raw_show_trace", set_raw_show_trace);
#endif
-static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
+static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
+ const char *loglvl)
{
unsigned long sp = regs->regs[29];
unsigned long ra = regs->regs[31];
@@ -150,12 +150,12 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
task = current;
if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
- show_raw_backtrace(sp);
+ show_raw_backtrace(sp, loglvl);
return;
}
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
do {
- print_ip_sym(pc);
+ print_ip_sym(loglvl, pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
pr_cont("\n");
@@ -166,19 +166,19 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
* with at least a bit of error checking ...
*/
static void show_stacktrace(struct task_struct *task,
- const struct pt_regs *regs)
+ const struct pt_regs *regs, const char *loglvl)
{
const int field = 2 * sizeof(unsigned long);
long stackdata;
int i;
unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
- printk("Stack :");
+ printk("%sStack :", loglvl);
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0)) {
pr_cont("\n");
- printk(" ");
+ printk("%s ", loglvl);
}
if (i > 39) {
pr_cont(" ...");
@@ -194,10 +194,10 @@ static void show_stacktrace(struct task_struct *task,
i++;
}
pr_cont("\n");
- show_backtrace(task, regs);
+ show_backtrace(task, regs, loglvl);
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
struct pt_regs regs;
mm_segment_t old_fs = get_fs();
@@ -221,7 +221,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
* the stack in the kernel (not user) address space.
*/
set_fs(KERNEL_DS);
- show_stacktrace(task, &regs);
+ show_stacktrace(task, &regs, loglvl);
set_fs(old_fs);
}
@@ -373,7 +373,7 @@ void show_registers(struct pt_regs *regs)
if (!user_mode(regs))
/* Necessary for getting the correct stack content */
set_fs(KERNEL_DS);
- show_stacktrace(current, regs);
+ show_stacktrace(current, regs, KERN_DEFAULT);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
set_fs(old_fs);
@@ -793,13 +793,13 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
return 1;
case SIGSEGV:
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)fault_addr);
if (vma && (vma->vm_start <= (unsigned long)fault_addr))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
force_sig_fault(SIGSEGV, si_code, fault_addr);
return 1;
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 3adb7354bc01..242dc5e83847 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -94,7 +94,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct vm_area_struct *vma;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
@@ -187,6 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
ret = 0;
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index f62667bb54f3..521bd5891e84 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -19,13 +19,13 @@
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/fpu.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <linux/kvm_host.h>
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 7dad7a293eae..49bd160f4d85 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -168,7 +168,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
clear_page(new_pte);
pmd_populate_kernel(NULL, pmd, new_pte);
}
- return pte_offset(pmd, addr);
+ return pte_offset_kernel(pmd, addr);
}
/* Caller must hold kvm->mm_lock */
@@ -187,8 +187,8 @@ static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
unsigned long end_gpa)
{
- int i_min = __pte_offset(start_gpa);
- int i_max = __pte_offset(end_gpa);
+ int i_min = pte_index(start_gpa);
+ int i_max = pte_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
int i;
@@ -215,7 +215,7 @@ static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
if (!pmd_present(pmd[i]))
continue;
- pte = pte_offset(pmd + i, 0);
+ pte = pte_offset_kernel(pmd + i, 0);
if (i == i_max)
end = end_gpa;
@@ -312,8 +312,8 @@ static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
- int i_min = __pte_offset(start); \
- int i_max = __pte_offset(end); \
+ int i_min = pte_index(start); \
+ int i_max = pte_index(end); \
int i; \
pte_t old, new; \
\
@@ -346,7 +346,7 @@ static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
if (!pmd_present(pmd[i])) \
continue; \
\
- pte = pte_offset(pmd + i, 0); \
+ pte = pte_offset_kernel(pmd + i, 0); \
if (i == i_max) \
cur_end = end; \
\
@@ -842,8 +842,8 @@ void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
unsigned long end_gva)
{
- int i_min = __pte_offset(start_gva);
- int i_max = __pte_offset(end_gva);
+ int i_min = pte_index(start_gva);
+ int i_max = pte_index(end_gva);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
int i;
@@ -877,7 +877,7 @@ static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
if (!pmd_present(pmd[i]))
continue;
- pte = pte_offset(pmd + i, 0);
+ pte = pte_offset_kernel(pmd + i, 0);
if (i == i_max)
end = end_gva;
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 2ecf4f05a980..1c1fbce3f566 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -22,7 +22,6 @@
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
#include <asm/tlbdebug.h>
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index f464506b1fea..34ad0b46e610 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -597,7 +597,7 @@ static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
pmd_va = pud_va | (k << PMD_SHIFT);
if (pmd_va >= end)
break;
- pte = pte_offset(pmd + k, 0);
+ pte = pte_offset_kernel(pmd + k, 0);
pte_free_kernel(NULL, pte);
}
pmd_free(NULL, pmd);
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 83ed37298e66..5a418ba5e75f 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -12,7 +12,6 @@
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
void dump_tlb_regs(void)
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index b97d9c5d8323..10b4bf7f70a3 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -12,7 +12,6 @@
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
extern int r3k_have_wired_reg;
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 4baf965e6fe8..8ae181e08311 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -20,7 +20,6 @@
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/r4kcache.h>
#include <asm/traps.h>
#include <asm/mmu_context.h>
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 780dd2a567c1..df6755ca1892 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -16,7 +16,6 @@
#include <linux/mm.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/isadep.h>
#include <asm/io.h>
@@ -240,9 +239,6 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -253,11 +249,8 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
if (cpu_context(smp_processor_id(), mm) == 0)
return;
- pgdp = pgd_offset(mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset(pmdp, addr);
+ pmdp = pmd_off(mm, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
/* Invalid => no such page in the cache. */
if (!(pte_val(*ptep) & _PAGE_PRESENT))
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 6fb83ac7c475..49569e5666d7 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -29,7 +29,6 @@
#include <asm/cpu-type.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/r4kcache.h>
#include <asm/sections.h>
#include <asm/mmu_context.h>
@@ -653,9 +652,6 @@ static inline void local_r4k_flush_cache_page(void *args)
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
int map_coherent = 0;
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
void *vaddr;
@@ -668,11 +664,8 @@ static inline void local_r4k_flush_cache_page(void *args)
return;
addr &= PAGE_MASK;
- pgdp = pgd_offset(mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset(pmdp, addr);
+ pmdp = pmd_off(mm, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
/*
* If the page isn't marked valid, the page cannot possibly be
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 2d479cc7e66b..03dfbb40ec73 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -17,7 +17,6 @@
#include <asm/cacheops.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/isadep.h>
#include <asm/io.h>
@@ -169,9 +168,6 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
{
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -183,11 +179,8 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
return;
page &= PAGE_MASK;
- pgdp = pgd_offset(mm, page);
- p4dp = p4d_offset(pgdp, page);
- pudp = pud_offset(p4dp, page);
- pmdp = pmd_offset(pudp, page);
- ptep = pte_offset(pmdp, page);
+ pmdp = pmd_off(mm, page);
+ ptep = pte_offset_kernel(pmdp, page);
/*
* If the page isn't marked valid, the page cannot possibly be
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index ad6df1cea866..3e81ba000096 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -14,9 +14,9 @@
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
#include <asm/cacheflush.h>
-#include <asm/highmem.h>
#include <asm/processor.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
@@ -103,7 +103,7 @@ void __flush_dcache_page(struct page *page)
flush_data_cache_page(addr);
if (PageHighMem(page))
- __kunmap_atomic((void *)addr);
+ kunmap_atomic((void *)addr);
}
EXPORT_SYMBOL(__flush_dcache_page);
@@ -146,7 +146,7 @@ void __update_cache(unsigned long address, pte_t pte)
flush_data_cache_page(addr);
if (PageHighMem(page))
- __kunmap_atomic((void *)addr);
+ kunmap_atomic((void *)addr);
ClearPageDcacheDirty(page);
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index f8d62cd83b36..01b168a90434 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -97,7 +97,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -181,7 +181,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -190,7 +190,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -198,7 +198,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -250,14 +250,14 @@ out_of_memory:
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index d08e6d7d533b..5fec7f45d79a 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -12,71 +12,37 @@ static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn;
-void *kmap(struct page *page)
+void kmap_flush_tlb(unsigned long addr)
{
- void *addr;
-
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- addr = kmap_high(page);
- flush_tlb_one((unsigned long)addr);
-
- return addr;
+ flush_tlb_one(addr);
}
-EXPORT_SYMBOL(kmap);
+EXPORT_SYMBOL(kmap_flush_tlb);
-void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-EXPORT_SYMBOL(kunmap);
-
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap is is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
-
-void *kmap_atomic(struct page *page)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
- set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
local_flush_tlb_one((unsigned long)vaddr);
return (void*) vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type __maybe_unused;
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
- preempt_enable();
+ if (vaddr < FIXADDR_START)
return;
- }
type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
@@ -94,10 +60,8 @@ void __kunmap_atomic(void *kvaddr)
}
#endif
kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
@@ -126,5 +90,5 @@ void __init kmap_init(void)
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+ kmap_pte = virt_to_kpte(kmap_vstart);
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 7c9f0c0a6cd3..336b58173dc7 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -40,7 +40,6 @@
#include <asm/maar.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index c5578897a4fa..cd805b005509 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -20,7 +20,6 @@
#include <asm/inst.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/prefetch.h>
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index 37c7a01427d2..bd4b0656add3 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -10,7 +10,6 @@
#include <linux/memblock.h>
#include <linux/highmem.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 6fd6e96fdebb..183ff9f9c026 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -10,7 +10,6 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index ea059cd86496..d7238687d790 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -12,7 +12,6 @@
#include <asm/bcache.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/bootinfo.h>
#include <asm/sgi/ip22.h>
#include <asm/sgi/mc.h>
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index eedad47df24f..97dc0511e63f 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -12,7 +12,6 @@
#include <asm/bcache.h>
#include <asm/cacheops.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/r4kcache.h>
#include <asm/mips-cps.h>
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index c7b94c951d98..736615d68f7a 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -12,7 +12,6 @@
#include <asm/bcache.h>
#include <asm/cacheops.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/r4kcache.h>
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 50f207591b6d..a36622ebea55 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/tlbmisc.h>
#include <asm/isadep.h>
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index d7a9d5f211f0..6677dcb72580 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -21,7 +21,6 @@
#include <asm/bootinfo.h>
#include <asm/hazards.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/tlbmisc.h>
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 38c204204529..14f8ba93367f 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -28,11 +28,11 @@
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/cache.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/cpu-type.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/war.h>
#include <asm/uasm.h>
#include <asm/setup.h>
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index 35c2ebd8f094..c10d8b233ab1 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -27,18 +27,22 @@ choice
config SOC_RT288X
bool "RT288x"
select MIPS_L1_CACHE_SHIFT_4
+ select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_RT305X
bool "RT305x"
+ select HAVE_LEGACY_CLK
config SOC_RT3883
bool "RT3883"
+ select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_MT7620
bool "MT7620/8"
select CPU_MIPSR2_IRQ_VI
+ select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_MT7621
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 32bcb8d1dd88..a4daf8ccd16c 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -16,7 +16,6 @@
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/sgialib.h>
#include <asm/time.h>
#include <asm/sn/agent.h>
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index c0e33632bc37..79c434fece52 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -19,7 +19,6 @@
#include <linux/platform_device.h>
#include <asm/time.h>
-#include <asm/pgtable.h>
#include <asm/sgialib.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/arch.h>
diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c
index 828ce131c228..be1b2cfc4c3e 100644
--- a/arch/mips/sgi-ip32/ip32-memory.c
+++ b/arch/mips/sgi-ip32/ip32-memory.c
@@ -14,7 +14,6 @@
#include <asm/ip32/crime.h>
#include <asm/bootinfo.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
extern void crime_init(void);
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index caddded56e77..7d6824f7c0e8 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -44,9 +44,9 @@ void invalidate_kernel_vmap_range(void *addr, int size);
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
#else
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len);
-#define flush_icache_user_range flush_icache_user_range
+#define flush_icache_user_page flush_icache_user_page
#include <asm-generic/cacheflush.h>
#endif
diff --git a/arch/nds32/include/asm/highmem.h b/arch/nds32/include/asm/highmem.h
index b3a82c97ded3..fe986d0e6e3f 100644
--- a/arch/nds32/include/asm/highmem.h
+++ b/arch/nds32/include/asm/highmem.h
@@ -7,7 +7,6 @@
#include <asm/proc-fns.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
/*
* Right now we initialize only a single pte table. It can be extended
@@ -32,7 +31,6 @@
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) (((virt) - (PKMAP_BASE)) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-#define kmap_prot PAGE_KERNEL
static inline void flush_cache_kmaps(void)
{
@@ -44,9 +42,6 @@ extern unsigned long highstart_pfn, highend_pfn;
extern pte_t *pkmap_page_table;
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
-
extern void kmap_init(void);
/*
@@ -54,12 +49,7 @@ extern void kmap_init(void);
* when CONFIG_HIGHMEM is not set.
*/
#ifdef CONFIG_HIGHMEM
-extern void *kmap(struct page *page);
-extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
-extern struct page *kmap_atomic_to_page(void *ptr);
#endif
#endif
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index 476cc4dd1709..419f984eef70 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -186,16 +186,10 @@ extern void paging_init(void);
#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) ((pte_t *)pmd_page_kernel(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address) ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
-#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
-#define pmd_off_k(address) pmd_offset(pud_offset(p4d_offset(pgd_offset_k(address), (address)), (address)), (address))
+static unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK));
+}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*
@@ -346,12 +340,6 @@ static inline pmd_t __mk_pmd(pte_t * ptep, unsigned long prot)
*
*/
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const unsigned long mask = 0xfff;
@@ -374,8 +362,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
/*
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S
index fcefb62606ca..7347f00451a9 100644
--- a/arch/nds32/kernel/head.S
+++ b/arch/nds32/kernel/head.S
@@ -3,10 +3,10 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <linux/sizes.h>
#include <asm/thread_info.h>
diff --git a/arch/nds32/kernel/module.c b/arch/nds32/kernel/module.c
index 1e31829cbc2a..3897fd14a21d 100644
--- a/arch/nds32/kernel/module.c
+++ b/arch/nds32/kernel/module.c
@@ -5,7 +5,7 @@
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/moduleloader.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
void *module_alloc(unsigned long size)
{
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
index f4d386b52622..6a9772ba7392 100644
--- a/arch/nds32/kernel/traps.c
+++ b/arch/nds32/kernel/traps.c
@@ -97,18 +97,19 @@ static void dump_instr(struct pt_regs *regs)
}
#define LOOP_TIMES (100)
-static void __dump(struct task_struct *tsk, unsigned long *base_reg)
+static void __dump(struct task_struct *tsk, unsigned long *base_reg,
+ const char *loglvl)
{
unsigned long ret_addr;
int cnt = LOOP_TIMES, graph = 0;
- pr_emerg("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
while (!kstack_end(base_reg)) {
ret_addr = *base_reg++;
if (__kernel_text_address(ret_addr)) {
ret_addr = ftrace_graph_ret_addr(
tsk, &graph, ret_addr, NULL);
- print_ip_sym(ret_addr);
+ print_ip_sym(loglvl, ret_addr);
}
if (--cnt < 0)
break;
@@ -124,17 +125,17 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
ret_addr = ftrace_graph_ret_addr(
tsk, &graph, ret_addr, NULL);
- print_ip_sym(ret_addr);
+ print_ip_sym(loglvl, ret_addr);
}
if (--cnt < 0)
break;
base_reg = (unsigned long *)next_fp;
}
}
- pr_emerg("\n");
+ printk("%s\n", loglvl);
}
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
unsigned long *base_reg;
@@ -151,7 +152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
else
__asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
}
- __dump(tsk, base_reg);
+ __dump(tsk, base_reg, loglvl);
barrier();
}
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c
index 90bcae6f8554..e16009a07971 100644
--- a/arch/nds32/kernel/vdso.c
+++ b/arch/nds32/kernel/vdso.c
@@ -130,7 +130,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1;
#endif
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
addr = vdso_random_addr(vdso_mapping_len);
@@ -185,12 +185,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto up_fail;
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return 0;
up_fail:
mm->context.vdso = NULL;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
index 254703653b6f..6eb98a7ad27d 100644
--- a/arch/nds32/mm/cacheflush.c
+++ b/arch/nds32/mm/cacheflush.c
@@ -35,9 +35,8 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page)
kunmap_atomic((void *)kaddr);
local_irq_restore(flags);
}
-EXPORT_SYMBOL(flush_icache_page);
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
unsigned long kaddr;
diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c
index f331e533edc2..8fb73f6401a0 100644
--- a/arch/nds32/mm/fault.c
+++ b/arch/nds32/mm/fault.c
@@ -11,7 +11,6 @@
#include <linux/uaccess.h>
#include <linux/perf_event.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
extern void die(const char *str, struct pt_regs *regs, long err);
@@ -127,12 +126,12 @@ void do_page_fault(unsigned long entry, unsigned long addr,
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mm))) {
if (!user_mode(regs) &&
!search_exception_tables(instruction_pointer(regs)))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in which
@@ -211,7 +210,7 @@ good_area:
/*
* If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because it
+ * signal first. We do not need to release the mmap_lock because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
if (fault_signal_pending(fault, regs)) {
@@ -248,7 +247,7 @@ good_area:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -256,7 +255,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -264,7 +263,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
@@ -324,14 +323,14 @@ no_context:
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/nds32/mm/highmem.c b/arch/nds32/mm/highmem.c
index 022779af6148..4284cd59e21a 100644
--- a/arch/nds32/mm/highmem.c
+++ b/arch/nds32/mm/highmem.c
@@ -10,45 +10,18 @@
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
-void *kmap(struct page *page)
-{
- unsigned long vaddr;
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- vaddr = (unsigned long)kmap_high(page);
- return (void *)vaddr;
-}
-
-EXPORT_SYMBOL(kmap);
-
-void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-
-EXPORT_SYMBOL(kunmap);
-
-void *kmap_atomic(struct page *page)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned int idx;
unsigned long vaddr, pte;
int type;
pte_t *ptep;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- pte = (page_to_pfn(page) << PAGE_SHIFT) | (PAGE_KERNEL);
+ pte = (page_to_pfn(page) << PAGE_SHIFT) | prot;
ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
set_pte(ptep, pte);
@@ -58,10 +31,9 @@ void *kmap_atomic(struct page *page)
__nds32__isb();
return (void *)vaddr;
}
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-EXPORT_SYMBOL(kmap_atomic);
-
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
if (kvaddr >= (void *)FIXADDR_START) {
unsigned long vaddr = (unsigned long)kvaddr;
@@ -72,8 +44,5 @@ void __kunmap_atomic(void *kvaddr)
ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
set_pte(ptep, 0);
}
- pagefault_enable();
- preempt_enable();
}
-
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index 91147cca4b64..fa86f7b2f416 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -98,9 +98,6 @@ static pmd_t *fixmap_pmd_p;
static void __init fixedrange_init(void)
{
unsigned long vaddr;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
pmd_t *pmd;
#ifdef CONFIG_HIGHMEM
pte_t *pte;
@@ -110,10 +107,7 @@ static void __init fixedrange_init(void)
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
- pgd = swapper_pg_dir + pgd_index(vaddr);
- p4d = p4d_offset(pgd, vaddr);
- pud = pud_offset(p4d, vaddr);
- pmd = pmd_offset(pud, vaddr);
+ pmd = pmd_off_k(vaddr);
fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!fixmap_pmd_p)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
@@ -126,10 +120,7 @@ static void __init fixedrange_init(void)
*/
vaddr = PKMAP_BASE;
- pgd = swapper_pg_dir + pgd_index(vaddr);
- p4d = p4d_offset(pgd, vaddr);
- pud = pud_offset(p4d, vaddr);
- pmd = pmd_offset(pud, vaddr);
+ pmd = pmd_off_k(vaddr);
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
diff --git a/arch/nds32/mm/proc.c b/arch/nds32/mm/proc.c
index 837ae7728830..848c845f5f33 100644
--- a/arch/nds32/mm/proc.c
+++ b/arch/nds32/mm/proc.c
@@ -5,7 +5,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/nds32.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/l2_cache.h>
@@ -16,14 +15,10 @@ extern struct cache_info L1_cache_info[2];
int va_kernel_present(unsigned long addr)
{
- p4d_t *p4d;
- pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
- p4d = p4d_offset(pgd_offset_k(addr), addr);
- pud = pud_offset(p4d, addr);
- pmd = pmd_offset(pud, addr);
+ pmd = pmd_off_k(addr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index f98b7f4519ba..2600d76c310c 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -22,7 +22,6 @@
#include <asm/tlbflush.h>
#include <asm/pgtable-bits.h>
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#define FIRST_USER_ADDRESS 0UL
@@ -100,13 +99,9 @@ extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
*/
static inline void set_pmd(pmd_t *pmdptr, pmd_t pmdval)
{
- pmdptr->pud.pgd.pgd = pmdval.pud.pgd.pgd;
+ *pmdptr = pmdval;
}
-/* to find an entry in a page-table-directory */
-#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-
static inline int pte_write(pte_t pte) \
{ return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) \
@@ -237,27 +232,17 @@ static inline void pte_clear(struct mm_struct *mm,
*/
#define mk_pte(page, prot) (pfn_pte(page_to_pfn(page), prot))
-#define pte_unmap(pte) do { } while (0)
-
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
-#define pmd_page_vaddr(pmd) pmd_val(pmd)
-
-#define pte_offset_map(dir, addr) \
- ((pte_t *) page_address(pmd_page(*dir)) + \
- (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-/* Get the address to the PTE for a vaddr in specific directory */
-#define pte_offset_kernel(dir, addr) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + \
- (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return pmd_val(pmd);
+}
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", \
@@ -286,8 +271,6 @@ static inline void pte_clear(struct mm_struct *mm,
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
extern void __init paging_init(void);
extern void __init mmu_init(void);
diff --git a/arch/nios2/kernel/module.c b/arch/nios2/kernel/module.c
index e2e3f13f98d5..76e0a42d6e36 100644
--- a/arch/nios2/kernel/module.c
+++ b/arch/nios2/kernel/module.c
@@ -19,7 +19,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
/*
diff --git a/arch/nios2/kernel/nios2_ksyms.c b/arch/nios2/kernel/nios2_ksyms.c
index 4e704046a150..54f7b23df1bf 100644
--- a/arch/nios2/kernel/nios2_ksyms.c
+++ b/arch/nios2/kernel/nios2_ksyms.c
@@ -8,9 +8,9 @@
#include <linux/export.h>
#include <linux/string.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
/* string functions */
diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c
index 486db793923c..b172da4eb1a9 100644
--- a/arch/nios2/kernel/traps.c
+++ b/arch/nios2/kernel/traps.c
@@ -52,12 +52,13 @@ void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)
}
/*
- * The show_stack is an external API which we do not use ourselves.
+ * The show_stack() is external API which we do not use ourselves.
*/
int kstack_depth_to_print = 48;
-void show_stack(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *stack,
+ const char *loglvl)
{
unsigned long *endstack, addr;
int i;
@@ -72,16 +73,16 @@ void show_stack(struct task_struct *task, unsigned long *stack)
addr = (unsigned long) stack;
endstack = (unsigned long *) PAGE_ALIGN(addr);
- pr_emerg("Stack from %08lx:", (unsigned long)stack);
+ printk("%sStack from %08lx:", loglvl, (unsigned long)stack);
for (i = 0; i < kstack_depth_to_print; i++) {
if (stack + 1 > endstack)
break;
if (i % 8 == 0)
- pr_emerg("\n ");
- pr_emerg(" %08lx", *stack++);
+ printk("%s\n ", loglvl);
+ printk("%s %08lx", loglvl, *stack++);
}
- pr_emerg("\nCall Trace:");
+ printk("%s\nCall Trace:", loglvl);
i = 0;
while (stack + 1 <= endstack) {
addr = *stack++;
@@ -97,11 +98,11 @@ void show_stack(struct task_struct *task, unsigned long *stack)
(addr <= (unsigned long) _etext))) {
if (i % 4 == 0)
pr_emerg("\n ");
- pr_emerg(" [<%08lx>]", addr);
+ printk("%s [<%08lx>]", loglvl, addr);
i++;
}
}
- pr_emerg("\n");
+ printk("%s\n", loglvl);
}
void __init trap_init(void)
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index ec9d8a9c426f..4112ef0e247e 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -83,11 +83,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->ea))
goto bad_area_nosemaphore;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
vma = find_vma(mm, address);
@@ -160,7 +160,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -169,7 +169,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -177,7 +177,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -215,14 +215,14 @@ no_context:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
@@ -242,6 +242,7 @@ vmalloc_fault:
*/
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@@ -253,8 +254,12 @@ vmalloc_fault:
goto no_context;
set_pgd(pgd, *pgd_k);
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ goto no_context;
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd = pmd_offset(pud, address);
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 9afca77d10b1..61862dbb0e32 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -29,7 +29,6 @@
#include <asm/setup.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
@@ -110,14 +109,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct mm_struct *mm = current->mm;
int ret;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
/* Map kuser helpers to user space address */
ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYEXEC, kuser_page);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/nios2/mm/ioremap.c b/arch/nios2/mm/ioremap.c
index 819bdfcc2e71..fe821efb9a99 100644
--- a/arch/nios2/mm/ioremap.c
+++ b/arch/nios2/mm/ioremap.c
@@ -86,11 +86,15 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
if (address >= end)
BUG();
do {
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
error = -ENOMEM;
- pud = pud_alloc(&init_mm, dir, address);
+ p4d = p4d_alloc(&init_mm, dir, address);
+ if (!p4d)
+ break;
+ pud = pud_alloc(&init_mm, p4d, address);
if (!pud)
break;
pmd = pmd_alloc(&init_mm, pud, address);
diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c
index 61e24a25f71a..9b587fd592dd 100644
--- a/arch/nios2/mm/pgtable.c
+++ b/arch/nios2/mm/pgtable.c
@@ -10,7 +10,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
-#include <asm/pgtable.h>
#include <asm/cpuinfo.h>
/* pteaddr:
diff --git a/arch/nios2/mm/tlb.c b/arch/nios2/mm/tlb.c
index 7fea59e53f94..f90ac35f05f3 100644
--- a/arch/nios2/mm/tlb.c
+++ b/arch/nios2/mm/tlb.c
@@ -16,7 +16,6 @@
#include <asm/tlb.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/cpuinfo.h>
#define TLB_INDEX_MASK \
diff --git a/arch/openrisc/include/asm/cacheflush.h b/arch/openrisc/include/asm/cacheflush.h
index 79d5d7753fe4..eeac40d4a854 100644
--- a/arch/openrisc/include/asm/cacheflush.h
+++ b/arch/openrisc/include/asm/cacheflush.h
@@ -62,31 +62,12 @@ static inline void flush_dcache_page(struct page *page)
clear_bit(PG_dc_clean, &page->flags);
}
-/*
- * Other interfaces are not required since we do not have virtually
- * indexed or tagged caches. So we can use the default here.
- */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_range(start, end) do { } while (0)
-#define flush_icache_page(vma, pg) do { } while (0)
-#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- if (vma->vm_flags & VM_EXEC) \
- sync_icache_dcache(page); \
- } while (0)
+#define flush_icache_user_page(vma, page, addr, len) \
+do { \
+ if (vma->vm_flags & VM_EXEC) \
+ sync_icache_dcache(page); \
+} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#endif /* __ASM_CACHEFLUSH_H */
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
index e18f038b2a6d..db02fb2077d9 100644
--- a/arch/openrisc/include/asm/io.h
+++ b/arch/openrisc/include/asm/io.h
@@ -26,7 +26,6 @@
#define PIO_MASK 0
#include <asm-generic/io.h>
-#include <asm/pgtable.h>
void __iomem *ioremap(phys_addr_t offset, unsigned long size);
extern void iounmap(void *addr);
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 7f3fb9ceb083..9425bedab4fc 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -21,7 +21,6 @@
#ifndef __ASM_OPENRISC_PGTABLE_H
#define __ASM_OPENRISC_PGTABLE_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__
@@ -364,38 +363,15 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
}
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-
-#define __pgd_offset(address) pgd_index(address)
-
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK));
+}
#define __pmd_offset(address) \
(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this macro returns the index of the entry in the pte page which would
- * control the given virtual address
- */
-#define __pte_offset(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address))
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address) \
- pte_offset_map(dir, address)
-
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
@@ -439,8 +415,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
typedef pte_t *pte_addr_t;
#endif /* __ASSEMBLY__ */
diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h
index e9a7f0b35a15..4a4639c65cbb 100644
--- a/arch/openrisc/include/asm/tlbflush.h
+++ b/arch/openrisc/include/asm/tlbflush.h
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/current.h>
#include <linux/sched.h>
diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c
index e435ae01c600..18c703d1d761 100644
--- a/arch/openrisc/kernel/asm-offsets.c
+++ b/arch/openrisc/kernel/asm-offsets.c
@@ -32,7 +32,6 @@
#include <linux/thread_info.h>
#include <linux/kbuild.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
int main(void)
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index e4a78571f883..a1d25c3e28d1 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/unistd.h>
@@ -21,7 +22,6 @@
#include <asm/spr_defs.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/asm-offsets.h>
#define DISABLE_INTERRUPTS(t1,t2) \
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index b0dc974f9a74..af355e3f4619 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -16,10 +16,10 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/serial_reg.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/spr_defs.h>
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
index 7d6a62eee2ef..277ac7a55752 100644
--- a/arch/openrisc/kernel/or32_ksyms.c
+++ b/arch/openrisc/kernel/or32_ksyms.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <linux/semaphore.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
@@ -26,7 +27,6 @@
#include <asm/hardirq.h>
#include <asm/delay.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 6bcdca424e11..d7010e72450c 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -36,7 +36,6 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/spr_defs.h>
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c
index 6a5a91c76338..c8f47a623754 100644
--- a/arch/openrisc/kernel/ptrace.c
+++ b/arch/openrisc/kernel/ptrace.c
@@ -27,7 +27,6 @@
#include <asm/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
/*
* Copy the thread state to a regset that can be interpreted by userspace.
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index c0a774b51e45..8aa438e1f51f 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -35,7 +35,6 @@
#include <linux/device.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/io.h>
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index c11aa2e17ce0..206e5325e61b 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -31,7 +31,6 @@
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/unwinder.h>
#include <asm/sections.h>
@@ -41,18 +40,20 @@ unsigned long __user *lwa_addr;
void print_trace(void *data, unsigned long addr, int reliable)
{
- pr_emerg("[<%p>] %s%pS\n", (void *) addr, reliable ? "" : "? ",
+ const char *loglvl = data;
+
+ printk("%s[<%p>] %s%pS\n", loglvl, (void *) addr, reliable ? "" : "? ",
(void *) addr);
}
/* displays a short stack trace */
-void show_stack(struct task_struct *task, unsigned long *esp)
+void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl)
{
if (esp == NULL)
esp = (unsigned long *)&esp;
- pr_emerg("Call trace:\n");
- unwind_stack(NULL, esp, print_trace);
+ printk("%sCall trace:\n", loglvl);
+ unwind_stack((void *)loglvl, esp, print_trace);
}
void show_registers(struct pt_regs *regs)
@@ -96,7 +97,7 @@ void show_registers(struct pt_regs *regs)
if (in_kernel) {
printk("\nStack: ");
- show_stack(NULL, (unsigned long *)esp);
+ show_stack(NULL, (unsigned long *)esp, KERN_EMERG);
printk("\nCode: ");
if (regs->pc < PAGE_OFFSET)
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 8af1cc78c4fb..d2224ccca294 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -104,7 +104,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -183,7 +183,7 @@ good_area:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -192,7 +192,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -201,7 +201,7 @@ good_area:
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
@@ -260,14 +260,14 @@ out_of_memory:
__asm__ __volatile__("l.nop 42");
__asm__ __volatile__("l.nop 1");
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Send a sigbus, regardless of whether we were in kernel
@@ -295,6 +295,7 @@ vmalloc_fault:
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
@@ -321,8 +322,13 @@ vmalloc_fault:
* it exists.
*/
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ goto no_context;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
goto no_context;
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index f94fe6d3f499..3d7c79c7745d 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -29,7 +29,6 @@
#include <linux/pagemap.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/tlb.h>
@@ -68,6 +67,7 @@ static void __init map_ram(void)
unsigned long v, p, e;
pgprot_t prot;
pgd_t *pge;
+ p4d_t *p4e;
pud_t *pue;
pmd_t *pme;
pte_t *pte;
@@ -87,7 +87,8 @@ static void __init map_ram(void)
while (p < e) {
int j;
- pue = pud_offset(pge, v);
+ p4e = p4d_offset(pge, v);
+ pue = pud_offset(p4e, v);
pme = pmd_offset(pue, v);
if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 8f8e97f7eac9..a978590d802d 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -13,11 +13,11 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/bug.h>
-#include <asm/pgtable.h>
#include <linux/sched.h>
#include <asm/tlbflush.h>
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
index dd4f2007f7c9..4b680aed8f5f 100644
--- a/arch/openrisc/mm/tlb.c
+++ b/arch/openrisc/mm/tlb.c
@@ -23,7 +23,6 @@
#include <linux/init.h>
#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/spr_defs.h>
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index fadbbd010337..182a5bca3e2c 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -162,7 +162,7 @@ vmlinuz: bzImage
$(OBJCOPY) $(boot)/bzImage $@
else
vmlinuz: vmlinux
- @gzip -cf -9 $< > $@
+ @$(_GZIP) -cf -9 $< > $@
endif
install:
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 0c83644bfa5c..99663fc1f997 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -100,37 +100,11 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
}
}
-#include <asm/kmap_types.h>
-
-#define ARCH_HAS_KMAP
-
-static inline void *kmap(struct page *page)
-{
- might_sleep();
- return page_address(page);
-}
-
-static inline void kunmap(struct page *page)
-{
- flush_kernel_dcache_page_addr(page_address(page));
-}
-
-static inline void *kmap_atomic(struct page *page)
-{
- preempt_disable();
- pagefault_disable();
- return page_address(page);
-}
-
-static inline void __kunmap_atomic(void *addr)
+#define ARCH_HAS_FLUSH_ON_KUNMAP
+static inline void kunmap_flush_on_unmap(void *addr)
{
flush_kernel_dcache_page_addr(addr);
- pagefault_enable();
- preempt_enable();
}
-#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
-
#endif /* _PARISC_CACHEFLUSH_H */
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index cab8f64ca4a2..116effe26143 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -3,7 +3,7 @@
#define _ASM_IO_H
#include <linux/types.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#define virt_to_phys(a) ((unsigned long)__pa(a))
#define phys_to_virt(a) __va(a)
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 697a906ab1b0..07b89c74abeb 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -6,7 +6,6 @@
#include <linux/sched.h>
#include <linux/atomic.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index cd7df48dc874..75cf84070fc9 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -427,40 +427,16 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd)))
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long) __va(pmd_address(pmd)));
+}
#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm, address) \
-((mm)->pgd + ((address) >> PGDIR_SHIFT))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
/* Find an entry in the second-level page table.. */
-#if CONFIG_PGTABLE_LEVELS == 3
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-#define pmd_offset(dir,address) \
-((pmd_t *) pud_page_vaddr(*(dir)) + pmd_index(address))
-#else
-#define pmd_offset(dir,addr) ((pmd_t *) dir)
-#endif
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
-#define pte_offset_kernel(pmd, address) \
- ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
-#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_unmap(pte) do { } while (0)
-
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
extern void paging_init (void);
/* Used for deferring calls to flush_dcache_page() */
@@ -571,6 +547,5 @@ extern void arch_report_meminfo(struct seq_file *m);
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
-#include <asm-generic/pgtable.h>
#endif /* _PARISC_PGTABLE_H */
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index aa79d35dedfa..305768a40773 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -20,8 +20,8 @@
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/kbuild.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/pdc.h>
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 9a03e29c8733..4b484ec7c7da 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -19,7 +19,6 @@
#include <asm/psw.h>
#include <asm/cache.h> /* for L1_CACHE_SHIFT */
#include <asm/assembly.h> /* for LDREG/STREG defines */
-#include <asm/pgtable.h>
#include <asm/signal.h>
#include <asm/unistd.h>
#include <asm/ldcw.h>
@@ -28,6 +27,7 @@
#include <asm/alternative.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#ifdef CONFIG_64BIT
.level 2.0w
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 951a339369dd..aa93d775c34d 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -17,10 +17,10 @@
#include <asm/pdc.h>
#include <asm/assembly.h>
-#include <asm/pgtable.h>
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
.level PA_ASM_LEVEL
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index fac18c623d16..7df140545b22 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -50,7 +50,6 @@
#include <linux/mm.h>
#include <linux/slab.h>
-#include <asm/pgtable.h>
#include <asm/unwind.h>
#include <asm/sections.h>
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index fa092ed1e837..b2ba6d633065 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -21,12 +21,12 @@
#include <asm/psw.h>
#include <asm/assembly.h>
-#include <asm/pgtable.h>
#include <asm/cache.h>
#include <asm/ldcw.h>
#include <asm/alternative.h>
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
.section .text.hot
.align 16
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 0f1b460ee715..70cd24bdcfec 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -201,7 +201,7 @@ static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
pgd_clear(dir);
return;
}
- pmd = pmd_offset(dir, vaddr);
+ pmd = pmd_offset(pud_offset(p4d_offset(dir, vaddr), vaddr), vaddr);
vaddr &= ~PGDIR_MASK;
end = vaddr + size;
if (end > PGDIR_SIZE)
diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c
index 749c4579db0d..6e8550fefad6 100644
--- a/arch/parisc/kernel/pdt.c
+++ b/arch/parisc/kernel/pdt.c
@@ -17,11 +17,11 @@
#include <linux/seq_file.h>
#include <linux/kthread.h>
#include <linux/initrd.h>
+#include <linux/pgtable.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
enum pdt_access_type {
PDT_NONE,
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index f8c07dcbfb49..b51418ad8655 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -26,7 +26,6 @@
#include <linux/audit.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/asm-offsets.h>
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index e202c37e56af..f8a842ddd82d 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -39,7 +39,6 @@
#include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
#include <asm/mmu_context.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 82fc01189488..5400e23a77a1 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -49,7 +49,7 @@
#include "../math-emu/math-emu.h" /* for handle_fpe() */
static void parisc_show_stack(struct task_struct *task,
- struct pt_regs *regs);
+ struct pt_regs *regs, const char *loglvl);
static int printbinary(char *buf, unsigned long x, int nbits)
{
@@ -155,7 +155,7 @@ void show_regs(struct pt_regs *regs)
printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
- parisc_show_stack(current, regs);
+ parisc_show_stack(current, regs, KERN_DEFAULT);
}
}
@@ -170,37 +170,37 @@ static DEFINE_RATELIMIT_STATE(_hppa_rs,
}
-static void do_show_stack(struct unwind_frame_info *info)
+static void do_show_stack(struct unwind_frame_info *info, const char *loglvl)
{
int i = 1;
- printk(KERN_CRIT "Backtrace:\n");
+ printk("%sBacktrace:\n", loglvl);
while (i <= MAX_UNWIND_ENTRIES) {
if (unwind_once(info) < 0 || info->ip == 0)
break;
if (__kernel_text_address(info->ip)) {
- printk(KERN_CRIT " [<" RFMT ">] %pS\n",
- info->ip, (void *) info->ip);
+ printk("%s [<" RFMT ">] %pS\n",
+ loglvl, info->ip, (void *) info->ip);
i++;
}
}
- printk(KERN_CRIT "\n");
+ printk("%s\n", loglvl);
}
static void parisc_show_stack(struct task_struct *task,
- struct pt_regs *regs)
+ struct pt_regs *regs, const char *loglvl)
{
struct unwind_frame_info info;
unwind_frame_init_task(&info, task, regs);
- do_show_stack(&info);
+ do_show_stack(&info, loglvl);
}
-void show_stack(struct task_struct *t, unsigned long *sp)
+void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl)
{
- parisc_show_stack(t, NULL);
+ parisc_show_stack(t, NULL, loglvl);
}
int is_valid_bugaddr(unsigned long iaoq)
@@ -446,7 +446,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
struct unwind_frame_info info;
unwind_frame_init(&info, current, regs);
- do_show_stack(&info);
+ do_show_stack(&info, KERN_CRIT);
}
printk("\n");
@@ -717,7 +717,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
if (user_mode(regs)) {
struct vm_area_struct *vma;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm,regs->iaoq[0]);
if (vma && (regs->iaoq[0] >= vma->vm_start)
&& (vma->vm_flags & VM_EXEC)) {
@@ -725,10 +725,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
break; /* call do_page_fault() */
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
/* Fall Through */
case 27:
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index beceaab34ecb..94a9fe2702c2 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -57,14 +57,10 @@ void * memcpy(void * dst,const void *src, size_t count)
EXPORT_SYMBOL(raw_copy_in_user);
EXPORT_SYMBOL(memcpy);
-long probe_kernel_read(void *dst, const void *src, size_t size)
+bool probe_kernel_read_allowed(const void *unsafe_src, size_t size)
{
- unsigned long addr = (unsigned long)src;
-
- if (addr < PAGE_SIZE)
- return -EFAULT;
-
+ if ((unsigned long)unsafe_src < PAGE_SIZE)
+ return false;
/* check for I/O space F_EXTEND(0xfff00000) access as well? */
-
- return __probe_kernel_read(dst, src, size);
+ return true;
}
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 86e8c848f3d7..66ac0719bd49 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -282,7 +282,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
if (acc_type & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma || address < vma->vm_start)
goto check_expansion;
@@ -329,7 +329,7 @@ good_area:
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -337,7 +337,7 @@ good_area:
goto retry;
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
check_expansion:
@@ -349,7 +349,7 @@ check_expansion:
* Something tried to access memory that isn't in our memory map..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (user_mode(regs)) {
int signo, si_code;
@@ -421,7 +421,7 @@ no_context:
parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
diff --git a/arch/parisc/mm/fixmap.c b/arch/parisc/mm/fixmap.c
index e2d8b0a857ee..24426a7e1a5e 100644
--- a/arch/parisc/mm/fixmap.c
+++ b/arch/parisc/mm/fixmap.c
@@ -33,11 +33,7 @@ void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
void notrace clear_fixmap(enum fixed_addresses idx)
{
unsigned long vaddr = __fix_to_virt(idx);
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
- pmd_t *pmd = pmd_offset(pud, vaddr);
- pte_t *pte = pte_offset_kernel(pmd, vaddr);
+ pte_t *pte = virt_to_kpte(vaddr);
if (WARN_ON(pte_none(*pte)))
return;
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index cdd760d39e7c..48d628a1a0af 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -26,7 +26,6 @@
#include <linux/compat.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/pdc_chassis.h>
#include <asm/mmzone.h>
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a8eee7a64add..9fa23eb320ff 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -116,6 +116,7 @@ config PPC
#
select ARCH_32BIT_OFF_T if PPC32
select ARCH_HAS_DEBUG_VIRTUAL
+ select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
@@ -170,8 +171,8 @@ config PPC
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_KASAN if PPC32
- select HAVE_ARCH_KASAN_VMALLOC if PPC32
+ select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
+ select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
@@ -476,7 +477,7 @@ config LD_HEAD_STUB_CATCH
If unsure, say "N".
config MPROFILE_KERNEL
- depends on PPC64 && CPU_LITTLE_ENDIAN
+ depends on PPC64 && CPU_LITTLE_ENDIAN && FUNCTION_TRACER
def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-mprofile-kernel.sh $(CC) -I$(srctree)/include -D__KERNEL__)
config HOTPLUG_CPU
@@ -763,41 +764,18 @@ config THREAD_SHIFT
range 13 15
default "15" if PPC_256K_PAGES
default "14" if PPC64
+ default "14" if KASAN
default "13"
help
Used to define the stack size. The default is almost always what you
want. Only change this if you know what you are doing.
-config ETEXT_SHIFT_BOOL
- bool "Set custom etext alignment" if STRICT_KERNEL_RWX && \
- (PPC_BOOK3S_32 || PPC_8xx)
- depends on ADVANCED_OPTIONS
- help
- This option allows you to set the kernel end of text alignment. When
- RAM is mapped by blocks, the alignment needs to fit the size and
- number of possible blocks. The default should be OK for most configs.
-
- Say N here unless you know what you are doing.
-
-config ETEXT_SHIFT
- int "_etext shift" if ETEXT_SHIFT_BOOL
- range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
- range 19 23 if STRICT_KERNEL_RWX && PPC_8xx
- default 17 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
- default 19 if STRICT_KERNEL_RWX && PPC_8xx
- default PPC_PAGE_SHIFT
- help
- On Book3S 32 (603+), IBATs are used to map kernel text.
- Smaller is the alignment, greater is the number of necessary IBATs.
-
- On 8xx, large pages (512kb or 8M) are used to map kernel linear
- memory. Aligning to 8M reduces TLB misses as only 8M pages are used
- in that case.
-
config DATA_SHIFT_BOOL
- bool "Set custom data alignment" if STRICT_KERNEL_RWX && \
- (PPC_BOOK3S_32 || PPC_8xx)
+ bool "Set custom data alignment"
depends on ADVANCED_OPTIONS
+ depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC
+ depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && \
+ (!PIN_TLB_TEXT || !STRICT_KERNEL_RWX))
help
This option allows you to set the kernel data alignment. When
RAM is mapped by blocks, the alignment needs to fit the size and
@@ -808,10 +786,13 @@ config DATA_SHIFT_BOOL
config DATA_SHIFT
int "Data shift" if DATA_SHIFT_BOOL
default 24 if STRICT_KERNEL_RWX && PPC64
- range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
- range 19 23 if STRICT_KERNEL_RWX && PPC_8xx
+ range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_BOOK3S_32
+ range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_8xx
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
+ default 18 if DEBUG_PAGEALLOC && PPC_BOOK3S_32
default 23 if STRICT_KERNEL_RWX && PPC_8xx
+ default 23 if DEBUG_PAGEALLOC && PPC_8xx && PIN_TLB_DATA
+ default 19 if DEBUG_PAGEALLOC && PPC_8xx
default PPC_PAGE_SHIFT
help
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
@@ -819,7 +800,8 @@ config DATA_SHIFT
On 8xx, large pages (512kb or 8M) are used to map kernel linear
memory. Aligning to 8M reduces TLB misses as only 8M pages are used
- in that case.
+ in that case. If PIN_TLB is selected, it must be aligned to 8M as
+ 8M pages will be pinned.
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
@@ -1217,26 +1199,6 @@ config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
default "0x80000000" if PPC_8xx
default "0xc0000000"
-
-config PIN_TLB
- bool "Pinned Kernel TLBs (860 ONLY)"
- depends on ADVANCED_OPTIONS && PPC_8xx && \
- !DEBUG_PAGEALLOC && !STRICT_KERNEL_RWX
-
-config PIN_TLB_DATA
- bool "Pinned TLB for DATA"
- depends on PIN_TLB
- default y
-
-config PIN_TLB_IMMR
- bool "Pinned TLB for IMMR"
- depends on PIN_TLB || PPC_EARLY_DEBUG_CPM
- default y
-
-config PIN_TLB_TEXT
- bool "Pinned TLB for TEXT"
- depends on PIN_TLB
- default y
endmenu
if PPC64
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 0b063830eea8..b88900f4832f 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -230,7 +230,7 @@ config PPC_EARLY_DEBUG_40x
help
Select this to enable early debugging for IBM 40x chips via the
inbuilt serial port. This works on chips with a 16550 compatible
- UART. Xilinx chips with uartlite cannot use this option.
+ UART.
config PPC_EARLY_DEBUG_CPM
bool "Early serial debugging for Freescale CPM-based serial ports"
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index c53a1b8bba8b..63d7456b9518 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -75,11 +75,9 @@ $(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
-$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-currituck.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-akebono.o: BOOTCFLAGS += -mcpu=405
-$(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
# The pre-boot decompressors pull in a lot of kernel headers and other source
# files. This creates a bit of a dependency headache since we need to copy
@@ -129,14 +127,12 @@ src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
src-wlib-$(CONFIG_PPC_8xx) += mpc8xx.c planetcore.c fsl-soc.c
src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
src-wlib-$(CONFIG_EMBEDDED6xx) += ugecon.c fsl-soc.c
-src-wlib-$(CONFIG_XILINX_VIRTEX) += uartlite.c
src-wlib-$(CONFIG_CPM) += cpm-serial.c
src-plat-y := of.c epapr.c
-src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
- treeboot-walnut.c cuboot-acadia.c \
- cuboot-kilauea.c simpleboot.c \
- virtex405-head.S virtex.c
+src-plat-$(CONFIG_40x) += fixed-head.S cuboot-hotfoot.c \
+ cuboot-acadia.c \
+ cuboot-kilauea.c simpleboot.c
src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \
cuboot-bamboo.c cuboot-sam440ep.c \
cuboot-sequoia.c cuboot-rainier.c \
@@ -144,7 +140,7 @@ src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \
cuboot-warp.c cuboot-yosemite.c \
treeboot-iss4xx.c treeboot-currituck.c \
treeboot-akebono.c \
- simpleboot.c fixed-head.S virtex.c
+ simpleboot.c fixed-head.S
src-plat-$(CONFIG_PPC_8xx) += cuboot-8xx.c fixed-head.S ep88xc.c redboot-8xx.c
src-plat-$(CONFIG_PPC_MPC52xx) += cuboot-52xx.c
src-plat-$(CONFIG_PPC_82xx) += cuboot-pq2.c fixed-head.S ep8248e.c cuboot-824x.c
@@ -279,9 +275,7 @@ image-$(CONFIG_EPAPR_BOOT) += zImage.epapr
#
# Board ports in arch/powerpc/platform/40x/Kconfig
-image-$(CONFIG_EP405) += dtbImage.ep405
image-$(CONFIG_HOTFOOT) += cuImage.hotfoot
-image-$(CONFIG_WALNUT) += treeImage.walnut
image-$(CONFIG_ACADIA) += cuImage.acadia
image-$(CONFIG_OBS600) += uImage.obs600
diff --git a/arch/powerpc/boot/dts/Makefile b/arch/powerpc/boot/dts/Makefile
index 1cbc0e4ce857..fb335d05aae8 100644
--- a/arch/powerpc/boot/dts/Makefile
+++ b/arch/powerpc/boot/dts/Makefile
@@ -4,4 +4,3 @@ subdir-y += fsl
dtstree := $(srctree)/$(src)
dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
-dtb-$(CONFIG_XILINX_VIRTEX440_GENERIC_BOARD) += virtex440-ml507.dtb virtex440-ml510.dtb
diff --git a/arch/powerpc/boot/dts/ep405.dts b/arch/powerpc/boot/dts/ep405.dts
deleted file mode 100644
index 4ac9c5ab6e6b..000000000000
--- a/arch/powerpc/boot/dts/ep405.dts
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Device Tree Source for EP405
- *
- * Copyright 2007 IBM Corp.
- * Benjamin Herrenschmidt <benh@kernel.crashing.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without
- * any warranty of any kind, whether express or implied.
- */
-
-/dts-v1/;
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
- model = "ep405";
- compatible = "ep405";
- dcr-parent = <&{/cpus/cpu@0}>;
-
- aliases {
- ethernet0 = &EMAC;
- serial0 = &UART0;
- serial1 = &UART1;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- model = "PowerPC,405GP";
- reg = <0x00000000>;
- clock-frequency = <200000000>; /* Filled in by zImage */
- timebase-frequency = <0>; /* Filled in by zImage */
- i-cache-line-size = <32>;
- d-cache-line-size = <32>;
- i-cache-size = <16384>;
- d-cache-size = <16384>;
- dcr-controller;
- dcr-access-method = "native";
- };
- };
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x00000000>; /* Filled in by zImage */
- };
-
- UIC0: interrupt-controller {
- compatible = "ibm,uic";
- interrupt-controller;
- cell-index = <0>;
- dcr-reg = <0x0c0 0x009>;
- #address-cells = <0>;
- #size-cells = <0>;
- #interrupt-cells = <2>;
- };
-
- plb {
- compatible = "ibm,plb3";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- clock-frequency = <0>; /* Filled in by zImage */
-
- SDRAM0: memory-controller {
- compatible = "ibm,sdram-405gp";
- dcr-reg = <0x010 0x002>;
- };
-
- MAL: mcmal {
- compatible = "ibm,mcmal-405gp", "ibm,mcmal";
- dcr-reg = <0x180 0x062>;
- num-tx-chans = <1>;
- num-rx-chans = <1>;
- interrupt-parent = <&UIC0>;
- interrupts = <
- 0xb 0x4 /* TXEOB */
- 0xc 0x4 /* RXEOB */
- 0xa 0x4 /* SERR */
- 0xd 0x4 /* TXDE */
- 0xe 0x4 /* RXDE */>;
- };
-
- POB0: opb {
- compatible = "ibm,opb-405gp", "ibm,opb";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xef600000 0xef600000 0x00a00000>;
- dcr-reg = <0x0a0 0x005>;
- clock-frequency = <0>; /* Filled in by zImage */
-
- UART0: serial@ef600300 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0xef600300 0x00000008>;
- virtual-reg = <0xef600300>;
- clock-frequency = <0>; /* Filled in by zImage */
- current-speed = <9600>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x0 0x4>;
- };
-
- UART1: serial@ef600400 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0xef600400 0x00000008>;
- virtual-reg = <0xef600400>;
- clock-frequency = <0>; /* Filled in by zImage */
- current-speed = <9600>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x1 0x4>;
- };
-
- IIC: i2c@ef600500 {
- compatible = "ibm,iic-405gp", "ibm,iic";
- reg = <0xef600500 0x00000011>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x2 0x4>;
- };
-
- GPIO: gpio@ef600700 {
- compatible = "ibm,gpio-405gp";
- reg = <0xef600700 0x00000020>;
- };
-
- EMAC: ethernet@ef600800 {
- linux,network-index = <0x0>;
- device_type = "network";
- compatible = "ibm,emac-405gp", "ibm,emac";
- interrupt-parent = <&UIC0>;
- interrupts = <
- 0xf 0x4 /* Ethernet */
- 0x9 0x4 /* Ethernet Wake Up */>;
- local-mac-address = [000000000000]; /* Filled in by zImage */
- reg = <0xef600800 0x00000070>;
- mal-device = <&MAL>;
- mal-tx-channel = <0>;
- mal-rx-channel = <0>;
- cell-index = <0>;
- max-frame-size = <1500>;
- rx-fifo-size = <4096>;
- tx-fifo-size = <2048>;
- phy-mode = "rmii";
- phy-map = <0x00000000>;
- };
-
- };
-
- EBC0: ebc {
- compatible = "ibm,ebc-405gp", "ibm,ebc";
- dcr-reg = <0x012 0x002>;
- #address-cells = <2>;
- #size-cells = <1>;
-
-
- /* The ranges property is supplied by the bootwrapper
- * and is based on the firmware's configuration of the
- * EBC bridge
- */
- clock-frequency = <0>; /* Filled in by zImage */
-
- /* NVRAM and RTC */
- nvrtc@4,200000 {
- compatible = "ds1742";
- reg = <0x00000004 0x00200000 0x00000000>; /* size fixed up by zImage */
- };
-
- /* "BCSR" CPLD contains a PCI irq controller */
- bcsr@4,0 {
- compatible = "ep405-bcsr";
- reg = <0x00000004 0x00000000 0x00000010>;
- interrupt-controller;
- /* Routing table */
- irq-routing = [ 00 /* SYSERR */
- 01 /* STTM */
- 01 /* RTC */
- 01 /* FENET */
- 02 /* NB PCIIRQ mux ? */
- 03 /* SB Winbond 8259 ? */
- 04 /* Serial Ring */
- 05 /* USB (ep405pc) */
- 06 /* XIRQ 0 */
- 06 /* XIRQ 1 */
- 06 /* XIRQ 2 */
- 06 /* XIRQ 3 */
- 06 /* XIRQ 4 */
- 06 /* XIRQ 5 */
- 06 /* XIRQ 6 */
- 07]; /* Reserved */
- };
- };
-
- PCI0: pci@ec000000 {
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- compatible = "ibm,plb405gp-pci", "ibm,plb-pci";
- primary;
- reg = <0xeec00000 0x00000008 /* Config space access */
- 0xeed80000 0x00000004 /* IACK */
- 0xeed80000 0x00000004 /* Special cycle */
- 0xef480000 0x00000040>; /* Internal registers */
-
- /* Outbound ranges, one memory and one IO,
- * later cannot be changed. Chip supports a second
- * IO range but we don't use it for now
- */
- ranges = <0x02000000 0x00000000 0x80000000 0x80000000 0x00000000 0x20000000
- 0x01000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>;
-
- /* Inbound 2GB range starting at 0 */
- dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>;
-
- /* That's all I know about IRQs on that thing ... */
- interrupt-map-mask = <0xf800 0x0 0x0 0x0>;
- interrupt-map = <
- /* USB */
- 0x7000 0x0 0x0 0x0 &UIC0 0x1e 0x8 /* IRQ5 */
- >;
- };
- };
-
- chosen {
- stdout-path = "/plb/opb/serial@ef600300";
- };
-};
diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts
index c259c6b3ac5a..780e13d99e7b 100644
--- a/arch/powerpc/boot/dts/pcm032.dts
+++ b/arch/powerpc/boot/dts/pcm032.dts
@@ -3,9 +3,7 @@
* phyCORE-MPC5200B-IO (pcm032) board Device Tree Source
*
* Copyright (C) 2006-2009 Pengutronix
- * Sascha Hauer <s.hauer@pengutronix.de>
- * Juergen Beisert <j.beisert@pengutronix.de>
- * Wolfram Sang <w.sang@pengutronix.de>
+ * Sascha Hauer, Juergen Beisert, Wolfram Sang <kernel@pengutronix.de>
*/
/include/ "mpc5200b.dtsi"
diff --git a/arch/powerpc/boot/dts/virtex440-ml507.dts b/arch/powerpc/boot/dts/virtex440-ml507.dts
deleted file mode 100644
index 66f1c6312de6..000000000000
--- a/arch/powerpc/boot/dts/virtex440-ml507.dts
+++ /dev/null
@@ -1,406 +0,0 @@
-/*
- * This file supports the Xilinx ML507 board with the 440 processor.
- * A reference design for the FPGA is provided at http://git.xilinx.com.
- *
- * (C) Copyright 2008 Xilinx, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
- * ---
- *
- * Device Tree Generator version: 1.1
- *
- * CAUTION: This file is automatically generated by libgen.
- * Version: Xilinx EDK 10.1.03 EDK_K_SP3.6
- *
- * XPS project directory: ml507_ppc440_emb_ref
- */
-
-/dts-v1/;
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,virtex440";
- dcr-parent = <&ppc440_0>;
- model = "testing";
- DDR2_SDRAM: memory@0 {
- device_type = "memory";
- reg = < 0 0x10000000 >;
- } ;
- chosen {
- bootargs = "console=ttyS0 root=/dev/ram";
- stdout-path = &RS232_Uart_1;
- } ;
- cpus {
- #address-cells = <1>;
- #cpus = <1>;
- #size-cells = <0>;
- ppc440_0: cpu@0 {
- clock-frequency = <400000000>;
- compatible = "PowerPC,440", "ibm,ppc440";
- d-cache-line-size = <0x20>;
- d-cache-size = <0x8000>;
- dcr-access-method = "native";
- dcr-controller ;
- device_type = "cpu";
- i-cache-line-size = <0x20>;
- i-cache-size = <0x8000>;
- model = "PowerPC,440";
- reg = <0>;
- timebase-frequency = <400000000>;
- xlnx,apu-control = <1>;
- xlnx,apu-udi-0 = <0>;
- xlnx,apu-udi-1 = <0>;
- xlnx,apu-udi-10 = <0>;
- xlnx,apu-udi-11 = <0>;
- xlnx,apu-udi-12 = <0>;
- xlnx,apu-udi-13 = <0>;
- xlnx,apu-udi-14 = <0>;
- xlnx,apu-udi-15 = <0>;
- xlnx,apu-udi-2 = <0>;
- xlnx,apu-udi-3 = <0>;
- xlnx,apu-udi-4 = <0>;
- xlnx,apu-udi-5 = <0>;
- xlnx,apu-udi-6 = <0>;
- xlnx,apu-udi-7 = <0>;
- xlnx,apu-udi-8 = <0>;
- xlnx,apu-udi-9 = <0>;
- xlnx,dcr-autolock-enable = <1>;
- xlnx,dcu-rd-ld-cache-plb-prio = <0>;
- xlnx,dcu-rd-noncache-plb-prio = <0>;
- xlnx,dcu-rd-touch-plb-prio = <0>;
- xlnx,dcu-rd-urgent-plb-prio = <0>;
- xlnx,dcu-wr-flush-plb-prio = <0>;
- xlnx,dcu-wr-store-plb-prio = <0>;
- xlnx,dcu-wr-urgent-plb-prio = <0>;
- xlnx,dma0-control = <0>;
- xlnx,dma0-plb-prio = <0>;
- xlnx,dma0-rxchannelctrl = <0x1010000>;
- xlnx,dma0-rxirqtimer = <0x3ff>;
- xlnx,dma0-txchannelctrl = <0x1010000>;
- xlnx,dma0-txirqtimer = <0x3ff>;
- xlnx,dma1-control = <0>;
- xlnx,dma1-plb-prio = <0>;
- xlnx,dma1-rxchannelctrl = <0x1010000>;
- xlnx,dma1-rxirqtimer = <0x3ff>;
- xlnx,dma1-txchannelctrl = <0x1010000>;
- xlnx,dma1-txirqtimer = <0x3ff>;
- xlnx,dma2-control = <0>;
- xlnx,dma2-plb-prio = <0>;
- xlnx,dma2-rxchannelctrl = <0x1010000>;
- xlnx,dma2-rxirqtimer = <0x3ff>;
- xlnx,dma2-txchannelctrl = <0x1010000>;
- xlnx,dma2-txirqtimer = <0x3ff>;
- xlnx,dma3-control = <0>;
- xlnx,dma3-plb-prio = <0>;
- xlnx,dma3-rxchannelctrl = <0x1010000>;
- xlnx,dma3-rxirqtimer = <0x3ff>;
- xlnx,dma3-txchannelctrl = <0x1010000>;
- xlnx,dma3-txirqtimer = <0x3ff>;
- xlnx,endian-reset = <0>;
- xlnx,generate-plb-timespecs = <1>;
- xlnx,icu-rd-fetch-plb-prio = <0>;
- xlnx,icu-rd-spec-plb-prio = <0>;
- xlnx,icu-rd-touch-plb-prio = <0>;
- xlnx,interconnect-imask = <0xffffffff>;
- xlnx,mplb-allow-lock-xfer = <1>;
- xlnx,mplb-arb-mode = <0>;
- xlnx,mplb-awidth = <0x20>;
- xlnx,mplb-counter = <0x500>;
- xlnx,mplb-dwidth = <0x80>;
- xlnx,mplb-max-burst = <8>;
- xlnx,mplb-native-dwidth = <0x80>;
- xlnx,mplb-p2p = <0>;
- xlnx,mplb-prio-dcur = <2>;
- xlnx,mplb-prio-dcuw = <3>;
- xlnx,mplb-prio-icu = <4>;
- xlnx,mplb-prio-splb0 = <1>;
- xlnx,mplb-prio-splb1 = <0>;
- xlnx,mplb-read-pipe-enable = <1>;
- xlnx,mplb-sync-tattribute = <0>;
- xlnx,mplb-wdog-enable = <1>;
- xlnx,mplb-write-pipe-enable = <1>;
- xlnx,mplb-write-post-enable = <1>;
- xlnx,num-dma = <1>;
- xlnx,pir = <0xf>;
- xlnx,ppc440mc-addr-base = <0>;
- xlnx,ppc440mc-addr-high = <0xfffffff>;
- xlnx,ppc440mc-arb-mode = <0>;
- xlnx,ppc440mc-bank-conflict-mask = <0xc00000>;
- xlnx,ppc440mc-control = <0xf810008f>;
- xlnx,ppc440mc-max-burst = <8>;
- xlnx,ppc440mc-prio-dcur = <2>;
- xlnx,ppc440mc-prio-dcuw = <3>;
- xlnx,ppc440mc-prio-icu = <4>;
- xlnx,ppc440mc-prio-splb0 = <1>;
- xlnx,ppc440mc-prio-splb1 = <0>;
- xlnx,ppc440mc-row-conflict-mask = <0x3ffe00>;
- xlnx,ppcdm-asyncmode = <0>;
- xlnx,ppcds-asyncmode = <0>;
- xlnx,user-reset = <0>;
- DMA0: sdma@80 {
- compatible = "xlnx,ll-dma-1.00.a";
- dcr-reg = < 0x80 0x11 >;
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 10 2 11 2 >;
- } ;
- } ;
- } ;
- plb_v46_0: plb@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,plb-v46-1.03.a", "simple-bus";
- ranges ;
- DIP_Switches_8Bit: gpio@81460000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 7 2 >;
- reg = < 0x81460000 0x10000 >;
- xlnx,all-inputs = <1>;
- xlnx,all-inputs-2 = <0>;
- xlnx,dout-default = <0>;
- xlnx,dout-default-2 = <0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <8>;
- xlnx,interrupt-present = <1>;
- xlnx,is-bidir = <1>;
- xlnx,is-bidir-2 = <1>;
- xlnx,is-dual = <0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- FLASH: flash@fc000000 {
- bank-width = <2>;
- compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash";
- reg = < 0xfc000000 0x2000000 >;
- xlnx,family = "virtex5";
- xlnx,include-datawidth-matching-0 = <0x1>;
- xlnx,include-datawidth-matching-1 = <0x0>;
- xlnx,include-datawidth-matching-2 = <0x0>;
- xlnx,include-datawidth-matching-3 = <0x0>;
- xlnx,include-negedge-ioregs = <0x0>;
- xlnx,include-plb-ipif = <0x1>;
- xlnx,include-wrbuf = <0x1>;
- xlnx,max-mem-width = <0x10>;
- xlnx,mch-native-dwidth = <0x20>;
- xlnx,mch-plb-clk-period-ps = <0x2710>;
- xlnx,mch-splb-awidth = <0x20>;
- xlnx,mch0-accessbuf-depth = <0x10>;
- xlnx,mch0-protocol = <0x0>;
- xlnx,mch0-rddatabuf-depth = <0x10>;
- xlnx,mch1-accessbuf-depth = <0x10>;
- xlnx,mch1-protocol = <0x0>;
- xlnx,mch1-rddatabuf-depth = <0x10>;
- xlnx,mch2-accessbuf-depth = <0x10>;
- xlnx,mch2-protocol = <0x0>;
- xlnx,mch2-rddatabuf-depth = <0x10>;
- xlnx,mch3-accessbuf-depth = <0x10>;
- xlnx,mch3-protocol = <0x0>;
- xlnx,mch3-rddatabuf-depth = <0x10>;
- xlnx,mem0-width = <0x10>;
- xlnx,mem1-width = <0x20>;
- xlnx,mem2-width = <0x20>;
- xlnx,mem3-width = <0x20>;
- xlnx,num-banks-mem = <0x1>;
- xlnx,num-channels = <0x2>;
- xlnx,priority-mode = <0x0>;
- xlnx,synch-mem-0 = <0x0>;
- xlnx,synch-mem-1 = <0x0>;
- xlnx,synch-mem-2 = <0x0>;
- xlnx,synch-mem-3 = <0x0>;
- xlnx,synch-pipedelay-0 = <0x2>;
- xlnx,synch-pipedelay-1 = <0x2>;
- xlnx,synch-pipedelay-2 = <0x2>;
- xlnx,synch-pipedelay-3 = <0x2>;
- xlnx,tavdv-ps-mem-0 = <0x1adb0>;
- xlnx,tavdv-ps-mem-1 = <0x3a98>;
- xlnx,tavdv-ps-mem-2 = <0x3a98>;
- xlnx,tavdv-ps-mem-3 = <0x3a98>;
- xlnx,tcedv-ps-mem-0 = <0x1adb0>;
- xlnx,tcedv-ps-mem-1 = <0x3a98>;
- xlnx,tcedv-ps-mem-2 = <0x3a98>;
- xlnx,tcedv-ps-mem-3 = <0x3a98>;
- xlnx,thzce-ps-mem-0 = <0x88b8>;
- xlnx,thzce-ps-mem-1 = <0x1b58>;
- xlnx,thzce-ps-mem-2 = <0x1b58>;
- xlnx,thzce-ps-mem-3 = <0x1b58>;
- xlnx,thzoe-ps-mem-0 = <0x1b58>;
- xlnx,thzoe-ps-mem-1 = <0x1b58>;
- xlnx,thzoe-ps-mem-2 = <0x1b58>;
- xlnx,thzoe-ps-mem-3 = <0x1b58>;
- xlnx,tlzwe-ps-mem-0 = <0x88b8>;
- xlnx,tlzwe-ps-mem-1 = <0x0>;
- xlnx,tlzwe-ps-mem-2 = <0x0>;
- xlnx,tlzwe-ps-mem-3 = <0x0>;
- xlnx,twc-ps-mem-0 = <0x2af8>;
- xlnx,twc-ps-mem-1 = <0x3a98>;
- xlnx,twc-ps-mem-2 = <0x3a98>;
- xlnx,twc-ps-mem-3 = <0x3a98>;
- xlnx,twp-ps-mem-0 = <0x11170>;
- xlnx,twp-ps-mem-1 = <0x2ee0>;
- xlnx,twp-ps-mem-2 = <0x2ee0>;
- xlnx,twp-ps-mem-3 = <0x2ee0>;
- xlnx,xcl0-linesize = <0x4>;
- xlnx,xcl0-writexfer = <0x1>;
- xlnx,xcl1-linesize = <0x4>;
- xlnx,xcl1-writexfer = <0x1>;
- xlnx,xcl2-linesize = <0x4>;
- xlnx,xcl2-writexfer = <0x1>;
- xlnx,xcl3-linesize = <0x4>;
- xlnx,xcl3-writexfer = <0x1>;
- } ;
- Hard_Ethernet_MAC: xps-ll-temac@81c00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,compound";
- ethernet@81c00000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "xlnx,xps-ll-temac-1.01.b";
- device_type = "network";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 5 2 >;
- llink-connected = <&DMA0>;
- local-mac-address = [ 02 00 00 00 00 00 ];
- reg = < 0x81c00000 0x40 >;
- xlnx,bus2core-clk-ratio = <1>;
- xlnx,phy-type = <1>;
- xlnx,phyaddr = <1>;
- xlnx,rxcsum = <1>;
- xlnx,rxfifo = <0x1000>;
- xlnx,temac-type = <0>;
- xlnx,txcsum = <1>;
- xlnx,txfifo = <0x1000>;
- phy-handle = <&phy7>;
- clock-frequency = <100000000>;
- phy7: phy@7 {
- compatible = "marvell,88e1111";
- reg = <7>;
- } ;
- } ;
- } ;
- IIC_EEPROM: i2c@81600000 {
- compatible = "xlnx,xps-iic-2.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 6 2 >;
- reg = < 0x81600000 0x10000 >;
- xlnx,clk-freq = <0x5f5e100>;
- xlnx,family = "virtex5";
- xlnx,gpo-width = <0x1>;
- xlnx,iic-freq = <0x186a0>;
- xlnx,scl-inertial-delay = <0x0>;
- xlnx,sda-inertial-delay = <0x0>;
- xlnx,ten-bit-adr = <0x0>;
- } ;
- LEDs_8Bit: gpio@81400000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- reg = < 0x81400000 0x10000 >;
- xlnx,all-inputs = <0>;
- xlnx,all-inputs-2 = <0>;
- xlnx,dout-default = <0>;
- xlnx,dout-default-2 = <0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <8>;
- xlnx,interrupt-present = <0>;
- xlnx,is-bidir = <1>;
- xlnx,is-bidir-2 = <1>;
- xlnx,is-dual = <0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- LEDs_Positions: gpio@81420000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- reg = < 0x81420000 0x10000 >;
- xlnx,all-inputs = <0>;
- xlnx,all-inputs-2 = <0>;
- xlnx,dout-default = <0>;
- xlnx,dout-default-2 = <0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <5>;
- xlnx,interrupt-present = <0>;
- xlnx,is-bidir = <1>;
- xlnx,is-bidir-2 = <1>;
- xlnx,is-dual = <0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- Push_Buttons_5Bit: gpio@81440000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 8 2 >;
- reg = < 0x81440000 0x10000 >;
- xlnx,all-inputs = <1>;
- xlnx,all-inputs-2 = <0>;
- xlnx,dout-default = <0>;
- xlnx,dout-default-2 = <0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <5>;
- xlnx,interrupt-present = <1>;
- xlnx,is-bidir = <1>;
- xlnx,is-bidir-2 = <1>;
- xlnx,is-dual = <0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- RS232_Uart_1: serial@83e00000 {
- clock-frequency = <100000000>;
- compatible = "xlnx,xps-uart16550-2.00.b", "ns16550";
- current-speed = <9600>;
- device_type = "serial";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 9 2 >;
- reg = < 0x83e00000 0x10000 >;
- reg-offset = <0x1003>;
- reg-shift = <2>;
- xlnx,family = "virtex5";
- xlnx,has-external-rclk = <0>;
- xlnx,has-external-xin = <0>;
- xlnx,is-a-16550 = <1>;
- } ;
- SysACE_CompactFlash: sysace@83600000 {
- compatible = "xlnx,xps-sysace-1.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 4 2 >;
- reg = < 0x83600000 0x10000 >;
- xlnx,family = "virtex5";
- xlnx,mem-width = <0x10>;
- } ;
- xps_bram_if_cntlr_1: xps-bram-if-cntlr@ffff0000 {
- compatible = "xlnx,xps-bram-if-cntlr-1.00.a";
- reg = < 0xffff0000 0x10000 >;
- xlnx,family = "virtex5";
- } ;
- xps_intc_0: interrupt-controller@81800000 {
- #interrupt-cells = <2>;
- compatible = "xlnx,xps-intc-1.00.a";
- interrupt-controller ;
- reg = < 0x81800000 0x10000 >;
- xlnx,num-intr-inputs = <0xc>;
- } ;
- xps_timebase_wdt_1: xps-timebase-wdt@83a00000 {
- compatible = "xlnx,xps-timebase-wdt-1.00.b";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 2 0 1 2 >;
- reg = < 0x83a00000 0x10000 >;
- xlnx,family = "virtex5";
- xlnx,wdt-enable-once = <0>;
- xlnx,wdt-interval = <0x1e>;
- } ;
- xps_timer_1: timer@83c00000 {
- compatible = "xlnx,xps-timer-1.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 3 2 >;
- reg = < 0x83c00000 0x10000 >;
- xlnx,count-width = <0x20>;
- xlnx,family = "virtex5";
- xlnx,gen0-assert = <1>;
- xlnx,gen1-assert = <1>;
- xlnx,one-timer-only = <1>;
- xlnx,trig0-assert = <1>;
- xlnx,trig1-assert = <1>;
- } ;
- } ;
-} ;
diff --git a/arch/powerpc/boot/dts/virtex440-ml510.dts b/arch/powerpc/boot/dts/virtex440-ml510.dts
deleted file mode 100644
index 3b736ca26ddc..000000000000
--- a/arch/powerpc/boot/dts/virtex440-ml510.dts
+++ /dev/null
@@ -1,466 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Xilinx ML510 Reference Design support
- *
- * This DTS file was created for the ml510_bsb1_pcores_ppc440 reference design.
- * The reference design contains a bug which prevent PCI DMA from working
- * properly. A description of the bug is given in the plbv46_pci section. It
- * needs to be fixed by the user until Xilinx updates their reference design.
- *
- * Copyright 2009, Roderick Colenbrander
- */
-
-/dts-v1/;
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,ml510-ref-design", "xlnx,virtex440";
- dcr-parent = <&ppc440_0>;
- DDR2_SDRAM_DIMM0: memory@0 {
- device_type = "memory";
- reg = < 0x0 0x20000000 >;
- } ;
- alias {
- ethernet0 = &Hard_Ethernet_MAC;
- serial0 = &RS232_Uart_1;
- } ;
- chosen {
- bootargs = "console=ttyS0 root=/dev/ram";
- stdout-path = "/plb@0/serial@83e00000";
- } ;
- cpus {
- #address-cells = <1>;
- #cpus = <0x1>;
- #size-cells = <0>;
- ppc440_0: cpu@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- clock-frequency = <300000000>;
- compatible = "PowerPC,440", "ibm,ppc440";
- d-cache-line-size = <0x20>;
- d-cache-size = <0x8000>;
- dcr-access-method = "native";
- dcr-controller ;
- device_type = "cpu";
- i-cache-line-size = <0x20>;
- i-cache-size = <0x8000>;
- model = "PowerPC,440";
- reg = <0>;
- timebase-frequency = <300000000>;
- xlnx,apu-control = <0x2000>;
- xlnx,apu-udi-0 = <0x0>;
- xlnx,apu-udi-1 = <0x0>;
- xlnx,apu-udi-10 = <0x0>;
- xlnx,apu-udi-11 = <0x0>;
- xlnx,apu-udi-12 = <0x0>;
- xlnx,apu-udi-13 = <0x0>;
- xlnx,apu-udi-14 = <0x0>;
- xlnx,apu-udi-15 = <0x0>;
- xlnx,apu-udi-2 = <0x0>;
- xlnx,apu-udi-3 = <0x0>;
- xlnx,apu-udi-4 = <0x0>;
- xlnx,apu-udi-5 = <0x0>;
- xlnx,apu-udi-6 = <0x0>;
- xlnx,apu-udi-7 = <0x0>;
- xlnx,apu-udi-8 = <0x0>;
- xlnx,apu-udi-9 = <0x0>;
- xlnx,dcr-autolock-enable = <0x1>;
- xlnx,dcu-rd-ld-cache-plb-prio = <0x0>;
- xlnx,dcu-rd-noncache-plb-prio = <0x0>;
- xlnx,dcu-rd-touch-plb-prio = <0x0>;
- xlnx,dcu-rd-urgent-plb-prio = <0x0>;
- xlnx,dcu-wr-flush-plb-prio = <0x0>;
- xlnx,dcu-wr-store-plb-prio = <0x0>;
- xlnx,dcu-wr-urgent-plb-prio = <0x0>;
- xlnx,dma0-control = <0x0>;
- xlnx,dma0-plb-prio = <0x0>;
- xlnx,dma0-rxchannelctrl = <0x1010000>;
- xlnx,dma0-rxirqtimer = <0x3ff>;
- xlnx,dma0-txchannelctrl = <0x1010000>;
- xlnx,dma0-txirqtimer = <0x3ff>;
- xlnx,dma1-control = <0x0>;
- xlnx,dma1-plb-prio = <0x0>;
- xlnx,dma1-rxchannelctrl = <0x1010000>;
- xlnx,dma1-rxirqtimer = <0x3ff>;
- xlnx,dma1-txchannelctrl = <0x1010000>;
- xlnx,dma1-txirqtimer = <0x3ff>;
- xlnx,dma2-control = <0x0>;
- xlnx,dma2-plb-prio = <0x0>;
- xlnx,dma2-rxchannelctrl = <0x1010000>;
- xlnx,dma2-rxirqtimer = <0x3ff>;
- xlnx,dma2-txchannelctrl = <0x1010000>;
- xlnx,dma2-txirqtimer = <0x3ff>;
- xlnx,dma3-control = <0x0>;
- xlnx,dma3-plb-prio = <0x0>;
- xlnx,dma3-rxchannelctrl = <0x1010000>;
- xlnx,dma3-rxirqtimer = <0x3ff>;
- xlnx,dma3-txchannelctrl = <0x1010000>;
- xlnx,dma3-txirqtimer = <0x3ff>;
- xlnx,endian-reset = <0x0>;
- xlnx,generate-plb-timespecs = <0x1>;
- xlnx,icu-rd-fetch-plb-prio = <0x0>;
- xlnx,icu-rd-spec-plb-prio = <0x0>;
- xlnx,icu-rd-touch-plb-prio = <0x0>;
- xlnx,interconnect-imask = <0xffffffff>;
- xlnx,mplb-allow-lock-xfer = <0x1>;
- xlnx,mplb-arb-mode = <0x0>;
- xlnx,mplb-awidth = <0x20>;
- xlnx,mplb-counter = <0x500>;
- xlnx,mplb-dwidth = <0x80>;
- xlnx,mplb-max-burst = <0x8>;
- xlnx,mplb-native-dwidth = <0x80>;
- xlnx,mplb-p2p = <0x0>;
- xlnx,mplb-prio-dcur = <0x2>;
- xlnx,mplb-prio-dcuw = <0x3>;
- xlnx,mplb-prio-icu = <0x4>;
- xlnx,mplb-prio-splb0 = <0x1>;
- xlnx,mplb-prio-splb1 = <0x0>;
- xlnx,mplb-read-pipe-enable = <0x1>;
- xlnx,mplb-sync-tattribute = <0x0>;
- xlnx,mplb-wdog-enable = <0x1>;
- xlnx,mplb-write-pipe-enable = <0x1>;
- xlnx,mplb-write-post-enable = <0x1>;
- xlnx,num-dma = <0x0>;
- xlnx,pir = <0xf>;
- xlnx,ppc440mc-addr-base = <0x0>;
- xlnx,ppc440mc-addr-high = <0x1fffffff>;
- xlnx,ppc440mc-arb-mode = <0x0>;
- xlnx,ppc440mc-bank-conflict-mask = <0x1800000>;
- xlnx,ppc440mc-control = <0xf810008f>;
- xlnx,ppc440mc-max-burst = <0x8>;
- xlnx,ppc440mc-prio-dcur = <0x2>;
- xlnx,ppc440mc-prio-dcuw = <0x3>;
- xlnx,ppc440mc-prio-icu = <0x4>;
- xlnx,ppc440mc-prio-splb0 = <0x1>;
- xlnx,ppc440mc-prio-splb1 = <0x0>;
- xlnx,ppc440mc-row-conflict-mask = <0x7ffe00>;
- xlnx,ppcdm-asyncmode = <0x0>;
- xlnx,ppcds-asyncmode = <0x0>;
- xlnx,user-reset = <0x0>;
- } ;
- } ;
- plb_v46_0: plb@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,plb-v46-1.03.a", "simple-bus";
- ranges ;
- FLASH: flash@fc000000 {
- bank-width = <2>;
- compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash";
- reg = < 0xfc000000 0x2000000 >;
- xlnx,family = "virtex5";
- xlnx,include-datawidth-matching-0 = <0x1>;
- xlnx,include-datawidth-matching-1 = <0x0>;
- xlnx,include-datawidth-matching-2 = <0x0>;
- xlnx,include-datawidth-matching-3 = <0x0>;
- xlnx,include-negedge-ioregs = <0x0>;
- xlnx,include-plb-ipif = <0x1>;
- xlnx,include-wrbuf = <0x1>;
- xlnx,max-mem-width = <0x10>;
- xlnx,mch-native-dwidth = <0x20>;
- xlnx,mch-plb-clk-period-ps = <0x2710>;
- xlnx,mch-splb-awidth = <0x20>;
- xlnx,mch0-accessbuf-depth = <0x10>;
- xlnx,mch0-protocol = <0x0>;
- xlnx,mch0-rddatabuf-depth = <0x10>;
- xlnx,mch1-accessbuf-depth = <0x10>;
- xlnx,mch1-protocol = <0x0>;
- xlnx,mch1-rddatabuf-depth = <0x10>;
- xlnx,mch2-accessbuf-depth = <0x10>;
- xlnx,mch2-protocol = <0x0>;
- xlnx,mch2-rddatabuf-depth = <0x10>;
- xlnx,mch3-accessbuf-depth = <0x10>;
- xlnx,mch3-protocol = <0x0>;
- xlnx,mch3-rddatabuf-depth = <0x10>;
- xlnx,mem0-width = <0x10>;
- xlnx,mem1-width = <0x20>;
- xlnx,mem2-width = <0x20>;
- xlnx,mem3-width = <0x20>;
- xlnx,num-banks-mem = <0x1>;
- xlnx,num-channels = <0x2>;
- xlnx,priority-mode = <0x0>;
- xlnx,synch-mem-0 = <0x0>;
- xlnx,synch-mem-1 = <0x0>;
- xlnx,synch-mem-2 = <0x0>;
- xlnx,synch-mem-3 = <0x0>;
- xlnx,synch-pipedelay-0 = <0x2>;
- xlnx,synch-pipedelay-1 = <0x2>;
- xlnx,synch-pipedelay-2 = <0x2>;
- xlnx,synch-pipedelay-3 = <0x2>;
- xlnx,tavdv-ps-mem-0 = <0x1adb0>;
- xlnx,tavdv-ps-mem-1 = <0x3a98>;
- xlnx,tavdv-ps-mem-2 = <0x3a98>;
- xlnx,tavdv-ps-mem-3 = <0x3a98>;
- xlnx,tcedv-ps-mem-0 = <0x1adb0>;
- xlnx,tcedv-ps-mem-1 = <0x3a98>;
- xlnx,tcedv-ps-mem-2 = <0x3a98>;
- xlnx,tcedv-ps-mem-3 = <0x3a98>;
- xlnx,thzce-ps-mem-0 = <0x88b8>;
- xlnx,thzce-ps-mem-1 = <0x1b58>;
- xlnx,thzce-ps-mem-2 = <0x1b58>;
- xlnx,thzce-ps-mem-3 = <0x1b58>;
- xlnx,thzoe-ps-mem-0 = <0x1b58>;
- xlnx,thzoe-ps-mem-1 = <0x1b58>;
- xlnx,thzoe-ps-mem-2 = <0x1b58>;
- xlnx,thzoe-ps-mem-3 = <0x1b58>;
- xlnx,tlzwe-ps-mem-0 = <0x88b8>;
- xlnx,tlzwe-ps-mem-1 = <0x0>;
- xlnx,tlzwe-ps-mem-2 = <0x0>;
- xlnx,tlzwe-ps-mem-3 = <0x0>;
- xlnx,twc-ps-mem-0 = <0x1adb0>;
- xlnx,twc-ps-mem-1 = <0x3a98>;
- xlnx,twc-ps-mem-2 = <0x3a98>;
- xlnx,twc-ps-mem-3 = <0x3a98>;
- xlnx,twp-ps-mem-0 = <0x11170>;
- xlnx,twp-ps-mem-1 = <0x2ee0>;
- xlnx,twp-ps-mem-2 = <0x2ee0>;
- xlnx,twp-ps-mem-3 = <0x2ee0>;
- xlnx,xcl0-linesize = <0x4>;
- xlnx,xcl0-writexfer = <0x1>;
- xlnx,xcl1-linesize = <0x4>;
- xlnx,xcl1-writexfer = <0x1>;
- xlnx,xcl2-linesize = <0x4>;
- xlnx,xcl2-writexfer = <0x1>;
- xlnx,xcl3-linesize = <0x4>;
- xlnx,xcl3-writexfer = <0x1>;
- } ;
- Hard_Ethernet_MAC: xps-ll-temac@81c00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,compound";
- ethernet@81c00000 {
- compatible = "xlnx,xps-ll-temac-1.01.b";
- device_type = "network";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 8 2 >;
- llink-connected = <&Hard_Ethernet_MAC_fifo>;
- local-mac-address = [ 02 00 00 00 00 00 ];
- reg = < 0x81c00000 0x40 >;
- xlnx,bus2core-clk-ratio = <0x1>;
- xlnx,phy-type = <0x3>;
- xlnx,phyaddr = <0x1>;
- xlnx,rxcsum = <0x0>;
- xlnx,rxfifo = <0x8000>;
- xlnx,temac-type = <0x0>;
- xlnx,txcsum = <0x0>;
- xlnx,txfifo = <0x8000>;
- } ;
- } ;
- Hard_Ethernet_MAC_fifo: xps-ll-fifo@81a00000 {
- compatible = "xlnx,xps-ll-fifo-1.01.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 6 2 >;
- reg = < 0x81a00000 0x10000 >;
- xlnx,family = "virtex5";
- } ;
- IIC_EEPROM: i2c@81600000 {
- compatible = "xlnx,xps-iic-2.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 9 2 >;
- reg = < 0x81600000 0x10000 >;
- xlnx,clk-freq = <0x5f5e100>;
- xlnx,family = "virtex5";
- xlnx,gpo-width = <0x1>;
- xlnx,iic-freq = <0x186a0>;
- xlnx,scl-inertial-delay = <0x5>;
- xlnx,sda-inertial-delay = <0x5>;
- xlnx,ten-bit-adr = <0x0>;
- } ;
- LCD_OPTIONAL: gpio@81420000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- reg = < 0x81420000 0x10000 >;
- xlnx,all-inputs = <0x0>;
- xlnx,all-inputs-2 = <0x0>;
- xlnx,dout-default = <0x0>;
- xlnx,dout-default-2 = <0x0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <0xb>;
- xlnx,interrupt-present = <0x0>;
- xlnx,is-bidir = <0x1>;
- xlnx,is-bidir-2 = <0x1>;
- xlnx,is-dual = <0x0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- LEDs_4Bit: gpio@81400000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- reg = < 0x81400000 0x10000 >;
- xlnx,all-inputs = <0x0>;
- xlnx,all-inputs-2 = <0x0>;
- xlnx,dout-default = <0x0>;
- xlnx,dout-default-2 = <0x0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <0x4>;
- xlnx,interrupt-present = <0x0>;
- xlnx,is-bidir = <0x1>;
- xlnx,is-bidir-2 = <0x1>;
- xlnx,is-dual = <0x0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- RS232_Uart_1: serial@83e00000 {
- clock-frequency = <100000000>;
- compatible = "xlnx,xps-uart16550-2.00.b", "ns16550";
- current-speed = <9600>;
- device_type = "serial";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 11 2 >;
- reg = < 0x83e00000 0x10000 >;
- reg-offset = <0x1003>;
- reg-shift = <2>;
- xlnx,family = "virtex5";
- xlnx,has-external-rclk = <0x0>;
- xlnx,has-external-xin = <0x0>;
- xlnx,is-a-16550 = <0x1>;
- } ;
- SPI_EEPROM: xps-spi@feff8000 {
- compatible = "xlnx,xps-spi-2.00.b";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 10 2 >;
- reg = < 0xfeff8000 0x80 >;
- xlnx,family = "virtex5";
- xlnx,fifo-exist = <0x1>;
- xlnx,num-ss-bits = <0x1>;
- xlnx,num-transfer-bits = <0x8>;
- xlnx,sck-ratio = <0x80>;
- } ;
- SysACE_CompactFlash: sysace@83600000 {
- compatible = "xlnx,xps-sysace-1.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 7 2 >;
- reg = < 0x83600000 0x10000 >;
- xlnx,family = "virtex5";
- xlnx,mem-width = <0x10>;
- } ;
- plbv46_pci_0: plbv46-pci@85e00000 {
- #size-cells = <2>;
- #address-cells = <3>;
- compatible = "xlnx,plbv46-pci-1.03.a";
- device_type = "pci";
- reg = < 0x85e00000 0x10000 >;
-
- /*
- * The default ML510 BSB has C_IPIFBAR2PCIBAR_0 set to
- * 0 which means that a read/write to the memory mapped
- * i/o region (which starts at 0xa0000000) for pci
- * bar 0 on the plb side translates to 0.
- * It is important to set this value to 0xa0000000, so
- * that inbound and outbound pci transactions work
- * properly including DMA.
- */
- ranges = <0x02000000 0 0xa0000000 0xa0000000 0 0x20000000
- 0x01000000 0 0x00000000 0xf0000000 0 0x00010000>;
-
- #interrupt-cells = <1>;
- interrupt-parent = <&xps_intc_0>;
- interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
- interrupt-map = <
- /* IRQ mapping for pci slots and ALI M1533
- * periperhals. In total there are 5 interrupt
- * lines connected to a xps_intc controller.
- * Four of them are PCI IRQ A, B, C, D and
- * which correspond to respectively xpx_intc
- * 5, 4, 3 and 2. The fifth interrupt line is
- * connected to the south bridge and this one
- * uses irq 1 and is active high instead of
- * active low.
- *
- * The M1533 contains various peripherals
- * including AC97 audio, a modem, USB, IDE and
- * some power management stuff. The modem
- * isn't connected on the ML510 and the power
- * management core also isn't used.
- */
-
- /* IDSEL 0x16 / dev=6, bus=0 / PCI slot 3 */
- 0x3000 0 0 1 &xps_intc_0 3 2
- 0x3000 0 0 2 &xps_intc_0 2 2
- 0x3000 0 0 3 &xps_intc_0 5 2
- 0x3000 0 0 4 &xps_intc_0 4 2
-
- /* IDSEL 0x13 / dev=3, bus=1 / PCI slot 4 */
- /*
- 0x11800 0 0 1 &xps_intc_0 5 0 2
- 0x11800 0 0 2 &xps_intc_0 4 0 2
- 0x11800 0 0 3 &xps_intc_0 3 0 2
- 0x11800 0 0 4 &xps_intc_0 2 0 2
- */
-
- /* According to the datasheet + schematic
- * ABCD [FPGA] of slot 5 is mapped to DABC.
- * Testing showed that at least A maps to B,
- * the mapping of the other pins is a guess
- * and for that reason the lines have been
- * commented out.
- */
- /* IDSEL 0x15 / dev=5, bus=0 / PCI slot 5 */
- 0x2800 0 0 1 &xps_intc_0 4 2
- /*
- 0x2800 0 0 2 &xps_intc_0 3 2
- 0x2800 0 0 3 &xps_intc_0 2 2
- 0x2800 0 0 4 &xps_intc_0 5 2
- */
-
- /* IDSEL 0x12 / dev=2, bus=1 / PCI slot 6 */
- /*
- 0x11000 0 0 1 &xps_intc_0 4 0 2
- 0x11000 0 0 2 &xps_intc_0 3 0 2
- 0x11000 0 0 3 &xps_intc_0 2 0 2
- 0x11000 0 0 4 &xps_intc_0 5 0 2
- */
-
- /* IDSEL 0x11 / dev=1, bus=0 / AC97 audio */
- 0x0800 0 0 1 &i8259 7 2
-
- /* IDSEL 0x1b / dev=11, bus=0 / IDE */
- 0x5800 0 0 1 &i8259 14 2
-
- /* IDSEL 0x1f / dev 15, bus=0 / 2x USB 1.1 */
- 0x7800 0 0 1 &i8259 7 2
- >;
- ali_m1533 {
- #size-cells = <1>;
- #address-cells = <2>;
- i8259: interrupt-controller@20 {
- reg = <1 0x20 2
- 1 0xa0 2
- 1 0x4d0 2>;
- interrupt-controller;
- device_type = "interrupt-controller";
- #address-cells = <0>;
- #interrupt-cells = <2>;
- compatible = "chrp,iic";
-
- /* south bridge irq is active high */
- interrupts = <1 3>;
- interrupt-parent = <&xps_intc_0>;
- };
- };
- } ;
- xps_bram_if_cntlr_1: xps-bram-if-cntlr@ffff0000 {
- compatible = "xlnx,xps-bram-if-cntlr-1.00.a";
- reg = < 0xffff0000 0x10000 >;
- xlnx,family = "virtex5";
- } ;
- xps_intc_0: interrupt-controller@81800000 {
- #interrupt-cells = <0x2>;
- compatible = "xlnx,xps-intc-1.00.a";
- interrupt-controller ;
- reg = < 0x81800000 0x10000 >;
- xlnx,num-intr-inputs = <0xc>;
- } ;
- xps_tft_0: tft@86e00000 {
- compatible = "xlnx,xps-tft-1.00.a";
- reg = < 0x86e00000 0x10000 >;
- xlnx,dcr-splb-slave-if = <0x1>;
- xlnx,default-tft-base-addr = <0x0>;
- xlnx,family = "virtex5";
- xlnx,i2c-slave-addr = <0x76>;
- xlnx,mplb-awidth = <0x20>;
- xlnx,mplb-dwidth = <0x80>;
- xlnx,mplb-native-dwidth = <0x40>;
- xlnx,mplb-smallest-slave = <0x20>;
- xlnx,tft-interface = <0x1>;
- } ;
- } ;
-} ;
diff --git a/arch/powerpc/boot/dts/walnut.dts b/arch/powerpc/boot/dts/walnut.dts
deleted file mode 100644
index 0872862c9363..000000000000
--- a/arch/powerpc/boot/dts/walnut.dts
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Device Tree Source for IBM Walnut
- *
- * Copyright 2007 IBM Corp.
- * Josh Boyer <jwboyer@linux.vnet.ibm.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without
- * any warranty of any kind, whether express or implied.
- */
-
-/dts-v1/;
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
- model = "ibm,walnut";
- compatible = "ibm,walnut";
- dcr-parent = <&{/cpus/cpu@0}>;
-
- aliases {
- ethernet0 = &EMAC;
- serial0 = &UART0;
- serial1 = &UART1;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- model = "PowerPC,405GP";
- reg = <0x00000000>;
- clock-frequency = <200000000>; /* Filled in by zImage */
- timebase-frequency = <0>; /* Filled in by zImage */
- i-cache-line-size = <32>;
- d-cache-line-size = <32>;
- i-cache-size = <16384>;
- d-cache-size = <16384>;
- dcr-controller;
- dcr-access-method = "native";
- };
- };
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x00000000>; /* Filled in by zImage */
- };
-
- UIC0: interrupt-controller {
- compatible = "ibm,uic";
- interrupt-controller;
- cell-index = <0>;
- dcr-reg = <0x0c0 0x009>;
- #address-cells = <0>;
- #size-cells = <0>;
- #interrupt-cells = <2>;
- };
-
- plb {
- compatible = "ibm,plb3";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- clock-frequency = <0>; /* Filled in by zImage */
-
- SDRAM0: memory-controller {
- compatible = "ibm,sdram-405gp";
- dcr-reg = <0x010 0x002>;
- };
-
- MAL: mcmal {
- compatible = "ibm,mcmal-405gp", "ibm,mcmal";
- dcr-reg = <0x180 0x062>;
- num-tx-chans = <1>;
- num-rx-chans = <1>;
- interrupt-parent = <&UIC0>;
- interrupts = <
- 0xb 0x4 /* TXEOB */
- 0xc 0x4 /* RXEOB */
- 0xa 0x4 /* SERR */
- 0xd 0x4 /* TXDE */
- 0xe 0x4 /* RXDE */>;
- };
-
- POB0: opb {
- compatible = "ibm,opb-405gp", "ibm,opb";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xef600000 0xef600000 0x00a00000>;
- dcr-reg = <0x0a0 0x005>;
- clock-frequency = <0>; /* Filled in by zImage */
-
- UART0: serial@ef600300 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0xef600300 0x00000008>;
- virtual-reg = <0xef600300>;
- clock-frequency = <0>; /* Filled in by zImage */
- current-speed = <9600>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x0 0x4>;
- };
-
- UART1: serial@ef600400 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0xef600400 0x00000008>;
- virtual-reg = <0xef600400>;
- clock-frequency = <0>; /* Filled in by zImage */
- current-speed = <9600>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x1 0x4>;
- };
-
- IIC: i2c@ef600500 {
- compatible = "ibm,iic-405gp", "ibm,iic";
- reg = <0xef600500 0x00000011>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x2 0x4>;
- };
-
- GPIO: gpio@ef600700 {
- compatible = "ibm,gpio-405gp";
- reg = <0xef600700 0x00000020>;
- };
-
- EMAC: ethernet@ef600800 {
- device_type = "network";
- compatible = "ibm,emac-405gp", "ibm,emac";
- interrupt-parent = <&UIC0>;
- interrupts = <
- 0xf 0x4 /* Ethernet */
- 0x9 0x4 /* Ethernet Wake Up */>;
- local-mac-address = [000000000000]; /* Filled in by zImage */
- reg = <0xef600800 0x00000070>;
- mal-device = <&MAL>;
- mal-tx-channel = <0>;
- mal-rx-channel = <0>;
- cell-index = <0>;
- max-frame-size = <1500>;
- rx-fifo-size = <4096>;
- tx-fifo-size = <2048>;
- phy-mode = "rmii";
- phy-map = <0x00000001>;
- };
-
- };
-
- EBC0: ebc {
- compatible = "ibm,ebc-405gp", "ibm,ebc";
- dcr-reg = <0x012 0x002>;
- #address-cells = <2>;
- #size-cells = <1>;
- /* The ranges property is supplied by the bootwrapper
- * and is based on the firmware's configuration of the
- * EBC bridge
- */
- clock-frequency = <0>; /* Filled in by zImage */
-
- sram@0,0 {
- reg = <0x00000000 0x00000000 0x00080000>;
- };
-
- flash@0,80000 {
- compatible = "jedec-flash";
- bank-width = <1>;
- reg = <0x00000000 0x00080000 0x00080000>;
- #address-cells = <1>;
- #size-cells = <1>;
- partition@0 {
- label = "OpenBIOS";
- reg = <0x00000000 0x00080000>;
- read-only;
- };
- };
-
- nvram@1,0 {
- /* NVRAM and RTC */
- compatible = "ds1743-nvram";
- #bytes = <0x2000>;
- reg = <0x00000001 0x00000000 0x00002000>;
- };
-
- keyboard@2,0 {
- compatible = "intel,82C42PC";
- reg = <0x00000002 0x00000000 0x00000002>;
- };
-
- ir@3,0 {
- compatible = "ti,TIR2000PAG";
- reg = <0x00000003 0x00000000 0x00000010>;
- };
-
- fpga@7,0 {
- compatible = "Walnut-FPGA";
- reg = <0x00000007 0x00000000 0x00000010>;
- virtual-reg = <0xf0300005>;
- };
- };
-
- PCI0: pci@ec000000 {
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- compatible = "ibm,plb405gp-pci", "ibm,plb-pci";
- primary;
- reg = <0xeec00000 0x00000008 /* Config space access */
- 0xeed80000 0x00000004 /* IACK */
- 0xeed80000 0x00000004 /* Special cycle */
- 0xef480000 0x00000040>; /* Internal registers */
-
- /* Outbound ranges, one memory and one IO,
- * later cannot be changed. Chip supports a second
- * IO range but we don't use it for now
- */
- ranges = <0x02000000 0x00000000 0x80000000 0x80000000 0x00000000 0x20000000
- 0x01000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>;
-
- /* Inbound 2GB range starting at 0 */
- dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>;
-
- /* Walnut has all 4 IRQ pins tied together per slot */
- interrupt-map-mask = <0xf800 0x0 0x0 0x0>;
- interrupt-map = <
- /* IDSEL 1 */
- 0x800 0x0 0x0 0x0 &UIC0 0x1c 0x8
-
- /* IDSEL 2 */
- 0x1000 0x0 0x0 0x0 &UIC0 0x1d 0x8
-
- /* IDSEL 3 */
- 0x1800 0x0 0x0 0x0 &UIC0 0x1e 0x8
-
- /* IDSEL 4 */
- 0x2000 0x0 0x0 0x0 &UIC0 0x1f 0x8
- >;
- };
- };
-
- chosen {
- stdout-path = "/plb/opb/serial@ef600300";
- };
-};
diff --git a/arch/powerpc/boot/ep405.c b/arch/powerpc/boot/ep405.c
deleted file mode 100644
index f9ad1e6a844e..000000000000
--- a/arch/powerpc/boot/ep405.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Embedded Planet EP405 with PlanetCore firmware
- *
- * (c) Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp,\
- *
- * Based on ep88xc.c by
- *
- * Scott Wood <scottwood@freescale.com>
- *
- * Copyright (c) 2007 Freescale Semiconductor, Inc.
- */
-
-#include "ops.h"
-#include "stdio.h"
-#include "planetcore.h"
-#include "dcr.h"
-#include "4xx.h"
-#include "io.h"
-
-static char *table;
-static u64 mem_size;
-
-static void platform_fixups(void)
-{
- u64 val;
- void *nvrtc;
-
- dt_fixup_memory(0, mem_size);
- planetcore_set_mac_addrs(table);
-
- if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) {
- printf("No PlanetCore crystal frequency key.\r\n");
- return;
- }
- ibm405gp_fixup_clocks(val, 0xa8c000);
- ibm4xx_quiesce_eth((u32 *)0xef600800, NULL);
- ibm4xx_fixup_ebc_ranges("/plb/ebc");
-
- if (!planetcore_get_decimal(table, PLANETCORE_KEY_KB_NVRAM, &val)) {
- printf("No PlanetCore NVRAM size key.\r\n");
- return;
- }
- nvrtc = finddevice("/plb/ebc/nvrtc@4,200000");
- if (nvrtc != NULL) {
- u32 reg[3] = { 4, 0x200000, 0};
- getprop(nvrtc, "reg", reg, 3);
- reg[2] = (val << 10) & 0xffffffff;
- setprop(nvrtc, "reg", reg, 3);
- }
-}
-
-void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- table = (char *)r3;
- planetcore_prepare_table(table);
-
- if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size))
- return;
-
- mem_size *= 1024 * 1024;
- simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64);
-
- fdt_init(_dtb_start);
-
- planetcore_set_stdout_path(table);
-
- serial_console_init();
- platform_ops.fixups = platform_fixups;
-}
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index e0606766480f..6455fc9a244f 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -88,7 +88,6 @@ int serial_console_init(void);
int ns16550_console_init(void *devp, struct serial_console_data *scdp);
int cpm_console_init(void *devp, struct serial_console_data *scdp);
int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp);
-int uartlite_console_init(void *devp, struct serial_console_data *scdp);
int opal_console_init(void *devp, struct serial_console_data *scdp);
void *simple_alloc_init(char *base, unsigned long heap_size,
unsigned long granularity, unsigned long max_allocs);
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index 9457863147f9..0bfa7e87e546 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -132,11 +132,6 @@ int serial_console_init(void)
else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart"))
rc = mpc5200_psc_console_init(devp, &serial_cd);
#endif
-#ifdef CONFIG_XILINX_VIRTEX
- else if (dt_is_compatible(devp, "xlnx,opb-uartlite-1.00.b") ||
- dt_is_compatible(devp, "xlnx,xps-uartlite-1.00.a"))
- rc = uartlite_console_init(devp, &serial_cd);
-#endif
#ifdef CONFIG_PPC64_BOOT_WRAPPER
else if (dt_is_compatible(devp, "ibm,opal-console-raw"))
rc = opal_console_init(devp, &serial_cd);
diff --git a/arch/powerpc/boot/treeboot-walnut.c b/arch/powerpc/boot/treeboot-walnut.c
deleted file mode 100644
index 623f58e7f7c9..000000000000
--- a/arch/powerpc/boot/treeboot-walnut.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Old U-boot compatibility for Walnut
- *
- * Author: Josh Boyer <jwboyer@linux.vnet.ibm.com>
- *
- * Copyright 2007 IBM Corporation
- * Based on cuboot-83xx.c, which is:
- * Copyright (c) 2007 Freescale Semiconductor, Inc.
- */
-
-#include "ops.h"
-#include "stdio.h"
-#include "dcr.h"
-#include "4xx.h"
-#include "io.h"
-
-BSS_STACK(4096);
-
-static void walnut_flashsel_fixup(void)
-{
- void *devp, *sram;
- u32 reg_flash[3] = {0x0, 0x0, 0x80000};
- u32 reg_sram[3] = {0x0, 0x0, 0x80000};
- u8 *fpga;
- u8 fpga_brds1 = 0x0;
-
- devp = finddevice("/plb/ebc/fpga");
- if (!devp)
- fatal("Couldn't locate FPGA node\n\r");
-
- if (getprop(devp, "virtual-reg", &fpga, sizeof(fpga)) != sizeof(fpga))
- fatal("no virtual-reg property\n\r");
-
- fpga_brds1 = in_8(fpga);
-
- devp = finddevice("/plb/ebc/flash");
- if (!devp)
- fatal("Couldn't locate flash node\n\r");
-
- if (getprop(devp, "reg", reg_flash, sizeof(reg_flash)) != sizeof(reg_flash))
- fatal("flash reg property has unexpected size\n\r");
-
- sram = finddevice("/plb/ebc/sram");
- if (!sram)
- fatal("Couldn't locate sram node\n\r");
-
- if (getprop(sram, "reg", reg_sram, sizeof(reg_sram)) != sizeof(reg_sram))
- fatal("sram reg property has unexpected size\n\r");
-
- if (fpga_brds1 & 0x1) {
- reg_flash[1] ^= 0x80000;
- reg_sram[1] ^= 0x80000;
- }
-
- setprop(devp, "reg", reg_flash, sizeof(reg_flash));
- setprop(sram, "reg", reg_sram, sizeof(reg_sram));
-}
-
-#define WALNUT_OPENBIOS_MAC_OFF 0xfffffe0b
-static void walnut_fixups(void)
-{
- ibm4xx_sdram_fixup_memsize();
- ibm405gp_fixup_clocks(33330000, 0xa8c000);
- ibm4xx_quiesce_eth((u32 *)0xef600800, NULL);
- ibm4xx_fixup_ebc_ranges("/plb/ebc");
- walnut_flashsel_fixup();
- dt_fixup_mac_address_by_alias("ethernet0", (u8 *) WALNUT_OPENBIOS_MAC_OFF);
-}
-
-void platform_init(void)
-{
- unsigned long end_of_ram = 0x2000000;
- unsigned long avail_ram = end_of_ram - (unsigned long) _end;
-
- simple_alloc_init(_end, avail_ram, 32, 32);
- platform_ops.fixups = walnut_fixups;
- platform_ops.exit = ibm40x_dbcr_reset;
- fdt_init(_dtb_start);
- serial_console_init();
-}
diff --git a/arch/powerpc/boot/uartlite.c b/arch/powerpc/boot/uartlite.c
deleted file mode 100644
index 46bed69b4169..000000000000
--- a/arch/powerpc/boot/uartlite.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Xilinx UARTLITE bootloader driver
- *
- * Copyright (C) 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <stdarg.h>
-#include <stddef.h>
-#include "types.h"
-#include "string.h"
-#include "stdio.h"
-#include "io.h"
-#include "ops.h"
-
-#define ULITE_RX 0x00
-#define ULITE_TX 0x04
-#define ULITE_STATUS 0x08
-#define ULITE_CONTROL 0x0c
-
-#define ULITE_STATUS_RXVALID 0x01
-#define ULITE_STATUS_TXFULL 0x08
-
-#define ULITE_CONTROL_RST_RX 0x02
-
-static void * reg_base;
-
-static int uartlite_open(void)
-{
- /* Clear the RX FIFO */
- out_be32(reg_base + ULITE_CONTROL, ULITE_CONTROL_RST_RX);
- return 0;
-}
-
-static void uartlite_putc(unsigned char c)
-{
- u32 reg = ULITE_STATUS_TXFULL;
- while (reg & ULITE_STATUS_TXFULL) /* spin on TXFULL bit */
- reg = in_be32(reg_base + ULITE_STATUS);
- out_be32(reg_base + ULITE_TX, c);
-}
-
-static unsigned char uartlite_getc(void)
-{
- u32 reg = 0;
- while (!(reg & ULITE_STATUS_RXVALID)) /* spin waiting for RXVALID bit */
- reg = in_be32(reg_base + ULITE_STATUS);
- return in_be32(reg_base + ULITE_RX);
-}
-
-static u8 uartlite_tstc(void)
-{
- u32 reg = in_be32(reg_base + ULITE_STATUS);
- return reg & ULITE_STATUS_RXVALID;
-}
-
-int uartlite_console_init(void *devp, struct serial_console_data *scdp)
-{
- int n;
- unsigned long reg_phys;
-
- n = getprop(devp, "virtual-reg", &reg_base, sizeof(reg_base));
- if (n != sizeof(reg_base)) {
- if (!dt_xlate_reg(devp, 0, &reg_phys, NULL))
- return -1;
-
- reg_base = (void *)reg_phys;
- }
-
- scdp->open = uartlite_open;
- scdp->putc = uartlite_putc;
- scdp->getc = uartlite_getc;
- scdp->tstc = uartlite_tstc;
- scdp->close = NULL;
- return 0;
-}
diff --git a/arch/powerpc/boot/virtex.c b/arch/powerpc/boot/virtex.c
deleted file mode 100644
index f731cbb4bff0..000000000000
--- a/arch/powerpc/boot/virtex.c
+++ /dev/null
@@ -1,97 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * The platform specific code for virtex devices since a boot loader is not
- * always used.
- *
- * (C) Copyright 2008 Xilinx, Inc.
- */
-
-#include "ops.h"
-#include "io.h"
-#include "stdio.h"
-
-#define UART_DLL 0 /* Out: Divisor Latch Low */
-#define UART_DLM 1 /* Out: Divisor Latch High */
-#define UART_FCR 2 /* Out: FIFO Control Register */
-#define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */
-#define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */
-#define UART_LCR 3 /* Out: Line Control Register */
-#define UART_MCR 4 /* Out: Modem Control Register */
-#define UART_MCR_RTS 0x02 /* RTS complement */
-#define UART_MCR_DTR 0x01 /* DTR complement */
-#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
-#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */
-
-static int virtex_ns16550_console_init(void *devp)
-{
- unsigned char *reg_base;
- u32 reg_shift, reg_offset, clk, spd;
- u16 divisor;
- int n;
-
- if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1)
- return -1;
-
- n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset));
- if (n == sizeof(reg_offset))
- reg_base += reg_offset;
-
- n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
- if (n != sizeof(reg_shift))
- reg_shift = 0;
-
- n = getprop(devp, "current-speed", (void *)&spd, sizeof(spd));
- if (n != sizeof(spd))
- spd = 9600;
-
- /* should there be a default clock rate?*/
- n = getprop(devp, "clock-frequency", (void *)&clk, sizeof(clk));
- if (n != sizeof(clk))
- return -1;
-
- divisor = clk / (16 * spd);
-
- /* Access baud rate */
- out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_DLAB);
-
- /* Baud rate based on input clock */
- out_8(reg_base + (UART_DLL << reg_shift), divisor & 0xFF);
- out_8(reg_base + (UART_DLM << reg_shift), divisor >> 8);
-
- /* 8 data, 1 stop, no parity */
- out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_WLEN8);
-
- /* RTS/DTR */
- out_8(reg_base + (UART_MCR << reg_shift), UART_MCR_RTS | UART_MCR_DTR);
-
- /* Clear transmitter and receiver */
- out_8(reg_base + (UART_FCR << reg_shift),
- UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
- return 0;
-}
-
-/* For virtex, the kernel may be loaded without using a bootloader and if so
- some UARTs need more setup than is provided in the normal console init
-*/
-int platform_specific_init(void)
-{
- void *devp;
- char devtype[MAX_PROP_LEN];
- char path[MAX_PATH_LEN];
-
- devp = finddevice("/chosen");
- if (devp == NULL)
- return -1;
-
- if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) {
- devp = finddevice(path);
- if (devp == NULL)
- return -1;
-
- if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0)
- && !strcmp(devtype, "serial")
- && (dt_is_compatible(devp, "ns16550")))
- virtex_ns16550_console_init(devp);
- }
- return 0;
-}
diff --git a/arch/powerpc/boot/virtex405-head.S b/arch/powerpc/boot/virtex405-head.S
deleted file mode 100644
index 00bab7d7c48c..000000000000
--- a/arch/powerpc/boot/virtex405-head.S
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include "ppc_asm.h"
-
- .text
- .global _zimage_start
-_zimage_start:
-
- /* PPC errata 213: needed by Virtex-4 FX */
- mfccr0 0
- oris 0,0,0x50000000@h
- mtccr0 0
-
- /*
- * Invalidate the data cache if the data cache is turned off.
- * - The 405 core does not invalidate the data cache on power-up
- * or reset but does turn off the data cache. We cannot assume
- * that the cache contents are valid.
- * - If the data cache is turned on this must have been done by
- * a bootloader and we assume that the cache contents are
- * valid.
- */
- mfdccr r9
- cmplwi r9,0
- bne 2f
- lis r9,0
- li r8,256
- mtctr r8
-1: dccci r0,r9
- addi r9,r9,0x20
- bdnz 1b
-2: b _zimage_start_lib
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index ed6266367bc0..cd58a62e810d 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -29,6 +29,7 @@ set -e
# Allow for verbose output
if [ "$V" = 1 ]; then
set -x
+ map="-Map wrapper.map"
fi
# defaults
@@ -323,14 +324,6 @@ adder875-redboot)
platformo="$object/fixed-head.o $object/redboot-8xx.o"
binary=y
;;
-simpleboot-virtex405-*)
- platformo="$object/virtex405-head.o $object/simpleboot.o $object/virtex.o"
- binary=y
- ;;
-simpleboot-virtex440-*)
- platformo="$object/fixed-head.o $object/simpleboot.o $object/virtex.o"
- binary=y
- ;;
simpleboot-*)
platformo="$object/fixed-head.o $object/simpleboot.o"
binary=y
@@ -500,7 +493,7 @@ if [ "$platform" != "miboot" ]; then
text_start="-Ttext $link_address"
fi
#link everything
- ${CROSS}ld -m $format -T $lds $text_start $pie $nodl -o "$ofile" \
+ ${CROSS}ld -m $format -T $lds $text_start $pie $nodl -o "$ofile" $map \
$platformo $tmp $object/wrapper.a
rm $tmp
fi
@@ -570,7 +563,18 @@ ps3)
count=$overlay_size bs=1
odir="$(dirname "$ofile.bin")"
- rm -f "$odir/otheros.bld"
- gzip -n --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld"
+
+ # The ps3's flash loader has a size limit of 16 MiB for the uncompressed
+ # image. If a compressed image that exceeded this limit is written to
+ # flash the loader will decompress that image until the 16 MiB limit is
+ # reached, then enter the system reset vector of the partially decompressed
+ # image. No warning is issued.
+ rm -f "$odir"/{otheros,otheros-too-big}.bld
+ size=$(${CROSS}nm --no-sort --radix=d "$ofile" | egrep ' _end$' | cut -d' ' -f1)
+ bld="otheros.bld"
+ if [ $size -gt $((0x1000000)) ]; then
+ bld="otheros-too-big.bld"
+ fi
+ gzip -n --force -9 --stdout "$ofile.bin" > "$odir/$bld"
;;
esac
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index db93c117be36..25eed86ec528 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -9,7 +9,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ACADIA=y
-# CONFIG_WALNUT is not set
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
deleted file mode 100644
index a3854cf65f8d..000000000000
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ /dev/null
@@ -1,62 +0,0 @@
-CONFIG_40x=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_EP405=y
-# CONFIG_WALNUT is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_CONNECTOR=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=m
-CONFIG_MTD_CFI=y
-CONFIG_MTD_JEDECPROBE=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=35000
-CONFIG_NETDEVICES=y
-CONFIG_IBM_EMAC=y
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_OF_PLATFORM=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-CONFIG_THERMAL=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
-CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_EXT2_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_PCBC=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_DES=y
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index edc22464dfb5..3549c9e950e8 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -11,7 +11,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_KILAUEA=y
-# CONFIG_WALNUT is not set
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig
index 579fa846839c..6a735ee75715 100644
--- a/arch/powerpc/configs/40x/klondike_defconfig
+++ b/arch/powerpc/configs/40x/klondike_defconfig
@@ -8,7 +8,6 @@ CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_WALNUT is not set
CONFIG_APM8018X=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_MATH_EMULATION=y
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index 188789b9aa4c..4563f88acf0c 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -9,7 +9,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_MAKALU=y
-# CONFIG_WALNUT is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig
index 5bf6af7ef093..2a2bb3f46847 100644
--- a/arch/powerpc/configs/40x/obs600_defconfig
+++ b/arch/powerpc/configs/40x/obs600_defconfig
@@ -10,7 +10,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_WALNUT is not set
CONFIG_OBS600=y
CONFIG_MATH_EMULATION=y
CONFIG_NET=y
diff --git a/arch/powerpc/configs/40x/virtex_defconfig b/arch/powerpc/configs/40x/virtex_defconfig
deleted file mode 100644
index 5e7c61d1d7d0..000000000000
--- a/arch/powerpc/configs/40x/virtex_defconfig
+++ /dev/null
@@ -1,75 +0,0 @@
-CONFIG_40x=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_WALNUT is not set
-CONFIG_XILINX_VIRTEX_GENERIC_BOARD=y
-CONFIG_PREEMPT=y
-CONFIG_MATH_EMULATION=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
-CONFIG_PCI=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_NETFILTER=y
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_XILINX_SYSACE=y
-CONFIG_NETDEVICES=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_XILINX_XPS_PS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_UARTLITE=y
-CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_XILINX_HWICAP=y
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_XILINX=y
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_XILINX=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_UTF8=m
-CONFIG_CRC_CCITT=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_KERNEL=y
diff --git a/arch/powerpc/configs/44x/virtex5_defconfig b/arch/powerpc/configs/44x/virtex5_defconfig
deleted file mode 100644
index 1f74079e1703..000000000000
--- a/arch/powerpc/configs/44x/virtex5_defconfig
+++ /dev/null
@@ -1,74 +0,0 @@
-CONFIG_44x=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_EBONY is not set
-CONFIG_XILINX_VIRTEX440_GENERIC_BOARD=y
-CONFIG_PREEMPT=y
-CONFIG_MATH_EMULATION=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_NETFILTER=y
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_XILINX_SYSACE=y
-CONFIG_NETDEVICES=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_XILINX_XPS_PS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_UARTLITE=y
-CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_XILINX_HWICAP=y
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_XILINX=y
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_XILINX=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_UTF8=m
-CONFIG_CRC_CCITT=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_KERNEL=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index f55e23cb176c..5326bc739279 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -10,7 +10,6 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_PPC_ADDER875=y
-CONFIG_8xx_COPYBACK=y
CONFIG_GEN_RTC=y
CONFIG_HZ_1000=y
# CONFIG_SECCOMP is not set
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index 0e2e5e81a359..f5c3e72da719 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_PPC_EP88XC=y
-CONFIG_8xx_COPYBACK=y
CONFIG_GEN_RTC=y
CONFIG_HZ_100=y
# CONFIG_SECCOMP is not set
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 5320735395e7..5c56d36cdfc5 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MPC86XADS=y
-CONFIG_8xx_COPYBACK=y
CONFIG_GEN_RTC=y
CONFIG_HZ_1000=y
CONFIG_MATH_EMULATION=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 82a008c04eae..949ff9ccda5e 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -11,7 +11,6 @@ CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
-CONFIG_8xx_COPYBACK=y
CONFIG_GEN_RTC=y
CONFIG_HZ_100=y
# CONFIG_SECCOMP is not set
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index df8bdbaa5d8f..2de9aadf0f50 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -347,3 +347,4 @@ CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
CONFIG_VHOST_NET=m
CONFIG_PRINTK_TIME=y
+CONFIG_PRINTK_CALLER=y
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig
index a5f683aed328..25f6c91e843a 100644
--- a/arch/powerpc/configs/ppc40x_defconfig
+++ b/arch/powerpc/configs/ppc40x_defconfig
@@ -10,11 +10,9 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PPC4xx_GPIO=y
CONFIG_ACADIA=y
-CONFIG_EP405=y
CONFIG_HOTFOOT=y
CONFIG_KILAUEA=y
CONFIG_MAKALU=y
-CONFIG_XILINX_VIRTEX_GENERIC_BOARD=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -37,33 +35,26 @@ CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-CONFIG_XILINX_SYSACE=m
CONFIG_NETDEVICES=y
CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
CONFIG_SERIO=m
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_XILINX_XPS_PS2=m
# CONFIG_VT is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_UARTLITE=y
-CONFIG_SERIAL_UARTLITE_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_XILINX_HWICAP=m
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_GPIO=m
CONFIG_I2C_IBM_IIC=m
-CONFIG_GPIO_XILINX=y
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
CONFIG_FB=m
-CONFIG_FB_XILINX=m
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=m
CONFIG_VFAT_FS=m
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index a41eedfe0a5f..8b595f67068c 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -22,7 +22,6 @@ CONFIG_GLACIER=y
CONFIG_REDWOOD=y
CONFIG_EIGER=y
CONFIG_YOSEMITE=y
-CONFIG_XILINX_VIRTEX440_GENERIC_BOARD=y
CONFIG_PPC4xx_GPIO=y
CONFIG_MATH_EMULATION=y
CONFIG_NET=y
@@ -46,7 +45,6 @@ CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-CONFIG_XILINX_SYSACE=m
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
# CONFIG_SCSI_LOWLEVEL is not set
@@ -57,7 +55,6 @@ CONFIG_IBM_EMAC=y
CONFIG_SERIO=m
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_XILINX_XPS_PS2=m
# CONFIG_VT is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
@@ -65,18 +62,13 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_UARTLITE=y
-CONFIG_SERIAL_UARTLITE_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_XILINX_HWICAP=m
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_GPIO=m
CONFIG_I2C_IBM_IIC=m
-CONFIG_GPIO_XILINX=y
# CONFIG_HWMON is not set
CONFIG_FB=m
-CONFIG_FB_XILINX=m
CONFIG_USB=m
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=m
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index bae8170d7401..8d7e3e98856d 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -281,6 +281,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
+CONFIG_LIBNVDIMM=y
CONFIG_RAS=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
@@ -358,6 +359,7 @@ CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_CRYPTO_DEV_VMX=y
CONFIG_PRINTK_TIME=y
+CONFIG_PRINTK_CALLER=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 0bea4d3ffb85..dfa4a726333b 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -322,3 +322,4 @@ CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
CONFIG_VHOST_NET=m
CONFIG_PRINTK_TIME=y
+CONFIG_PRINTK_CALLER=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index eda8bfb2d0a3..77857d513022 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -15,7 +15,6 @@ CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_TQM8XX=y
-CONFIG_8xx_COPYBACK=y
# CONFIG_8xx_CPU15 is not set
CONFIG_GEN_RTC=y
CONFIG_HZ_100=y
diff --git a/arch/powerpc/include/asm/asm-405.h b/arch/powerpc/include/asm/asm-405.h
deleted file mode 100644
index 7270d3ae7c8e..000000000000
--- a/arch/powerpc/include/asm/asm-405.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _ASM_POWERPC_ASM_405_H
-#define _ASM_POWERPC_ASM_405_H
-
-#include <asm/asm-const.h>
-
-#ifdef __KERNEL__
-#ifdef CONFIG_IBM405_ERR77
-/* Erratum #77 on the 405 means we need a sync or dcbt before every
- * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
- */
-#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
-#define PPC405_ERR77_SYNC stringify_in_c(sync;)
-#else
-#define PPC405_ERR77(ra,rb)
-#define PPC405_ERR77_SYNC
-#endif
-#endif
-
-#endif /* _ASM_POWERPC_ASM_405_H */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 31c231ea56b7..498785ffc25f 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -10,7 +10,6 @@
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#include <asm/asm-405.h>
#define ATOMIC_INIT(i) { (i) }
@@ -47,7 +46,6 @@ static __inline__ void atomic_##op(int a, atomic_t *v) \
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_" #op "\n" \
#asm_op " %0,%2,%0\n" \
- PPC405_ERR77(0,%3) \
" stwcx. %0,0,%3 \n" \
" bne- 1b\n" \
: "=&r" (t), "+m" (v->counter) \
@@ -63,7 +61,6 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
#asm_op " %0,%2,%0\n" \
- PPC405_ERR77(0, %3) \
" stwcx. %0,0,%3\n" \
" bne- 1b\n" \
: "=&r" (t), "+m" (v->counter) \
@@ -81,7 +78,6 @@ static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
__asm__ __volatile__( \
"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
#asm_op " %1,%3,%0\n" \
- PPC405_ERR77(0, %4) \
" stwcx. %1,0,%4\n" \
" bne- 1b\n" \
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
@@ -130,7 +126,6 @@ static __inline__ void atomic_inc(atomic_t *v)
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_inc\n\
addic %0,%0,1\n"
- PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
: "=&r" (t), "+m" (v->counter)
@@ -146,7 +141,6 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
" addic %0,%0,1\n"
- PPC405_ERR77(0, %2)
" stwcx. %0,0,%2\n"
" bne- 1b"
: "=&r" (t), "+m" (v->counter)
@@ -163,7 +157,6 @@ static __inline__ void atomic_dec(atomic_t *v)
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_dec\n\
addic %0,%0,-1\n"
- PPC405_ERR77(0,%2)\
" stwcx. %0,0,%2\n\
bne- 1b"
: "=&r" (t), "+m" (v->counter)
@@ -179,7 +172,6 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
" addic %0,%0,-1\n"
- PPC405_ERR77(0, %2)
" stwcx. %0,0,%2\n"
" bne- 1b"
: "=&r" (t), "+m" (v->counter)
@@ -220,7 +212,6 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
cmpw 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
- PPC405_ERR77(0,%2)
" stwcx. %0,0,%1 \n\
bne- 1b \n"
PPC_ATOMIC_EXIT_BARRIER
@@ -251,7 +242,6 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
cmpwi 0,%0,0\n\
beq- 2f\n\
addic %1,%0,1\n"
- PPC405_ERR77(0,%2)
" stwcx. %1,0,%2\n\
bne- 1b\n"
PPC_ATOMIC_EXIT_BARRIER
@@ -280,7 +270,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
cmpwi %0,1\n\
addi %0,%0,-1\n\
blt- 2f\n"
- PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
bne- 1b"
PPC_ATOMIC_EXIT_BARRIER
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 28dcf8222943..4a4d3afd5340 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -41,7 +41,6 @@
#include <linux/compiler.h>
#include <asm/asm-compat.h>
#include <asm/synch.h>
-#include <asm/asm-405.h>
/* PPC bit number conversion */
#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
@@ -73,7 +72,6 @@ static inline void fn(unsigned long mask, \
prefix \
"1:" PPC_LLARX(%0,0,%3,0) "\n" \
stringify_in_c(op) "%0,%0,%2\n" \
- PPC405_ERR77(0,%3) \
PPC_STLCX "%0,0,%3\n" \
"bne- 1b\n" \
: "=&r" (old), "+m" (*p) \
@@ -119,7 +117,6 @@ static inline unsigned long fn( \
prefix \
"1:" PPC_LLARX(%0,0,%3,eh) "\n" \
stringify_in_c(op) "%1,%0,%2\n" \
- PPC405_ERR77(0,%3) \
PPC_STLCX "%1,0,%3\n" \
"bne- 1b\n" \
postfix \
@@ -175,7 +172,6 @@ clear_bit_unlock_return_word(int nr, volatile unsigned long *addr)
PPC_RELEASE_BARRIER
"1:" PPC_LLARX(%0,0,%3,0) "\n"
"andc %1,%0,%2\n"
- PPC405_ERR77(0,%3)
PPC_STLCX "%1,0,%3\n"
"bne- 1b\n"
: "=&r" (old), "=&r" (t)
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index db0a1c281587..32fd4452e960 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -2,6 +2,7 @@
#ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
#define _ASM_POWERPC_BOOK3S_32_KUP_H
+#include <asm/bug.h>
#include <asm/book3s/32/mmu-hash.h>
#ifdef __ASSEMBLY__
@@ -75,7 +76,7 @@
.macro kuap_check current, gpr
#ifdef CONFIG_PPC_KUAP_DEBUG
- lwz \gpr, KUAP(thread)
+ lwz \gpr, THREAD + KUAP(\current)
999: twnei \gpr, 0
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
#endif
@@ -108,7 +109,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user
u32 addr, end;
BUILD_BUG_ON(!__builtin_constant_p(dir));
- BUILD_BUG_ON(dir == KUAP_CURRENT);
+ BUILD_BUG_ON(dir & ~KUAP_READ_WRITE);
if (!(dir & KUAP_WRITE))
return;
@@ -131,7 +132,7 @@ static __always_inline void prevent_user_access(void __user *to, const void __us
BUILD_BUG_ON(!__builtin_constant_p(dir));
- if (dir == KUAP_CURRENT) {
+ if (dir & KUAP_CURRENT_WRITE) {
u32 kuap = current->thread.kuap;
if (unlikely(!kuap))
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 7549393c4c43..224912432821 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -2,7 +2,6 @@
#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/book3s/32/hash.h>
@@ -113,6 +112,9 @@ static inline bool pte_user(pte_t pte)
#define PMD_TABLE_SIZE 0
#define PUD_TABLE_SIZE 0
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
#endif /* __ASSEMBLY__ */
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
@@ -188,14 +190,14 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
* memory shall not share segments.
*/
#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES)
-#define VMALLOC_START ((_ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \
+#define VMALLOC_START ((ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \
~(VMALLOC_OFFSET - 1))
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif
#ifdef CONFIG_KASAN_VMALLOC
-#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
#define VMALLOC_END ioremap_bot
#endif
@@ -218,7 +220,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#define pte_clear(mm, addr, ptep) \
- do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
+ do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
@@ -253,84 +255,68 @@ extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
* and the PTE may be either 32 or 64 bit wide. In the later case,
* when using atomic updates, only the low part of the PTE is
* accessed atomically.
- *
- * In addition, on 44x, we also maintain a global flag indicating
- * that an executable user mapping was modified, which is needed
- * to properly flush the virtually tagged instruction cache of
- * those implementations.
*/
-#ifndef CONFIG_PTE_64BIT
-static inline unsigned long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
-{
- unsigned long old, tmp;
-
- __asm__ __volatile__("\
-1: lwarx %0,0,%3\n\
- andc %1,%0,%4\n\
- or %1,%1,%5\n"
-" stwcx. %1,0,%3\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
-
- return old;
-}
-#else /* CONFIG_PTE_64BIT */
-static inline unsigned long long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
{
- unsigned long long old;
+ pte_basic_t old;
unsigned long tmp;
- __asm__ __volatile__("\
-1: lwarx %L0,0,%4\n\
- lwzx %0,0,%3\n\
- andc %1,%L0,%5\n\
- or %1,%1,%6\n"
-" stwcx. %1,0,%4\n\
- bne- 1b"
+ __asm__ __volatile__(
+#ifndef CONFIG_PTE_64BIT
+"1: lwarx %0, 0, %3\n"
+" andc %1, %0, %4\n"
+#else
+"1: lwarx %L0, 0, %3\n"
+" lwz %0, -4(%3)\n"
+" andc %1, %L0, %4\n"
+#endif
+" or %1, %1, %5\n"
+" stwcx. %1, 0, %3\n"
+" bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
+#ifndef CONFIG_PTE_64BIT
+ : "r" (p),
+#else
+ : "b" ((unsigned long)(p) + 4),
+#endif
+ "r" (clr), "r" (set), "m" (*p)
: "cc" );
return old;
}
-#endif /* CONFIG_PTE_64BIT */
/*
* 2.6 calls this without flushing the TLB entry; this is wrong
* for our hash-based implementation, we fix that up here.
*/
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
unsigned long old;
- old = pte_update(ptep, _PAGE_ACCESSED, 0);
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
if (old & _PAGE_HASHPTE) {
unsigned long ptephys = __pa(ptep) & PAGE_MASK;
- flush_hash_pages(context, addr, ptephys, 1);
+ flush_hash_pages(mm->context.id, addr, ptephys, 1);
}
return (old & _PAGE_ACCESSED) != 0;
}
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
- __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
+ return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- pte_update(ptep, _PAGE_RW, 0);
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
}
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
@@ -341,7 +327,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
- pte_update(ptep, 0, set);
+ pte_update(vma->vm_mm, address, ptep, 0, set, 0);
flush_tlb_page(vma, address);
}
@@ -349,26 +335,9 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
-#define pmd_page_vaddr(pmd) \
- ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \
pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, addr) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
-static inline void pte_unmap(pte_t *pte) { }
-
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
@@ -539,7 +508,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
| (pte_val(pte) & ~_PAGE_HASHPTE));
else
- pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
+ pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
#elif defined(CONFIG_PTE_64BIT)
/* Second case is 32-bit with 64-bit PTE. In this case, we
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 6fc4520092c7..73ad038ed10b 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -134,9 +134,9 @@ static inline int get_region_id(unsigned long ea)
#define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
#define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
-static inline int hash__pgd_bad(pgd_t pgd)
+static inline int hash__p4d_bad(p4d_t p4d)
{
- return (pgd_val(pgd) == 0);
+ return (p4d_val(p4d) == 0);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
extern void hash__mark_rodata_ro(void);
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
index 3bcef989a35d..3ee1ec60be84 100644
--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -12,11 +12,17 @@
#ifdef __ASSEMBLY__
-.macro kuap_restore_amr gpr
+.macro kuap_restore_amr gpr1, gpr2
#ifdef CONFIG_PPC_KUAP
BEGIN_MMU_FTR_SECTION_NESTED(67)
- ld \gpr, STACK_REGS_KUAP(r1)
- mtspr SPRN_AMR, \gpr
+ mfspr \gpr1, SPRN_AMR
+ ld \gpr2, STACK_REGS_KUAP(r1)
+ cmpd \gpr1, \gpr2
+ beq 998f
+ isync
+ mtspr SPRN_AMR, \gpr2
+ /* No isync required, see kuap_restore_amr() */
+998:
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
#endif
.endm
@@ -60,10 +66,28 @@
#include <asm/mmu.h>
#include <asm/ptrace.h>
-static inline void kuap_restore_amr(struct pt_regs *regs)
+static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
{
- if (mmu_has_feature(MMU_FTR_RADIX_KUAP))
+ if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) {
+ isync();
mtspr(SPRN_AMR, regs->kuap);
+ /*
+ * No isync required here because we are about to RFI back to
+ * previous context before any user accesses would be made,
+ * which is a CSI.
+ */
+ }
+}
+
+static inline unsigned long kuap_get_and_check_amr(void)
+{
+ if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) {
+ unsigned long amr = mfspr(SPRN_AMR);
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
+ WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
+ return amr;
+ }
+ return 0;
}
static inline void kuap_check_amr(void)
@@ -142,13 +166,18 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
}
#else /* CONFIG_PPC_KUAP */
-static inline void kuap_restore_amr(struct pt_regs *regs)
+static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
{
}
static inline void kuap_check_amr(void)
{
}
+
+static inline unsigned long kuap_get_and_check_amr(void)
+{
+ return 0;
+}
#endif /* CONFIG_PPC_KUAP */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index bb3deb76c951..5393a535240c 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -116,6 +116,9 @@ typedef struct {
/* Number of users of the external (Nest) MMU */
atomic_t copros;
+ /* Number of user space windows opened in process mm_context */
+ atomic_t vas_windows;
+
struct hash_mm_context *hash_context;
unsigned long vdso_base;
@@ -208,7 +211,7 @@ void hash__early_init_devtree(void);
void radix__early_init_devtree(void);
extern void hash__early_init_mmu(void);
extern void radix__early_init_mmu(void);
-static inline void early_init_mmu(void)
+static inline void __init early_init_mmu(void)
{
if (radix_enabled())
return radix__early_init_mmu();
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index a41e91bd0580..69c5b051734f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -85,9 +85,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
}
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud)
{
- *pgd = __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
+ *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index d6438659926c..25c3cb8272c0 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -2,7 +2,7 @@
#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
-#include <asm-generic/5level-fixup.h>
+#include <asm-generic/pgtable-nop4d.h>
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
@@ -251,7 +251,7 @@ extern unsigned long __pmd_frag_size_shift;
/* Bits to mask out from a PUD to get to the PMD page */
#define PUD_MASKED_BITS 0xc0000000000000ffUL
/* Bits to mask out from a PGD to get to the PUD page */
-#define PGD_MASKED_BITS 0xc0000000000000ffUL
+#define P4D_MASKED_BITS 0xc0000000000000ffUL
/*
* Used as an indicator for rcu callback functions
@@ -553,6 +553,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
}
#endif /* CONFIG_NUMA_BALANCING */
+static inline bool pte_hw_valid(pte_t pte)
+{
+ return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE)) ==
+ cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
+}
+
static inline int pte_present(pte_t pte)
{
/*
@@ -561,12 +567,11 @@ static inline int pte_present(pte_t pte)
* invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID
* if we find _PAGE_PRESENT cleared.
*/
- return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID));
-}
-static inline bool pte_hw_valid(pte_t pte)
-{
- return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
+ if (pte_hw_valid(pte))
+ return true;
+ return (pte_raw(pte) & cpu_to_be64(_PAGE_INVALID | _PAGE_PTE)) ==
+ cpu_to_be64(_PAGE_INVALID | _PAGE_PTE);
}
#ifdef CONFIG_PPC_MEM_KEYS
@@ -949,81 +954,59 @@ static inline bool pud_access_permitted(pud_t pud, bool write)
return pte_access_permitted(pud_pte(pud), write);
}
-#define pgd_write(pgd) pte_write(pgd_pte(pgd))
+#define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) })
+static inline __be64 p4d_raw(p4d_t x)
+{
+ return pgd_raw(x.pgd);
+}
+
+#define p4d_write(p4d) pte_write(p4d_pte(p4d))
-static inline void pgd_clear(pgd_t *pgdp)
+static inline void p4d_clear(p4d_t *p4dp)
{
- *pgdp = __pgd(0);
+ *p4dp = __p4d(0);
}
-static inline int pgd_none(pgd_t pgd)
+static inline int p4d_none(p4d_t p4d)
{
- return !pgd_raw(pgd);
+ return !p4d_raw(p4d);
}
-static inline int pgd_present(pgd_t pgd)
+static inline int p4d_present(p4d_t p4d)
{
- return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
+ return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT));
}
-static inline pte_t pgd_pte(pgd_t pgd)
+static inline pte_t p4d_pte(p4d_t p4d)
{
- return __pte_raw(pgd_raw(pgd));
+ return __pte_raw(p4d_raw(p4d));
}
-static inline pgd_t pte_pgd(pte_t pte)
+static inline p4d_t pte_p4d(pte_t pte)
{
- return __pgd_raw(pte_raw(pte));
+ return __p4d_raw(pte_raw(pte));
}
-static inline int pgd_bad(pgd_t pgd)
+static inline int p4d_bad(p4d_t p4d)
{
if (radix_enabled())
- return radix__pgd_bad(pgd);
- return hash__pgd_bad(pgd);
+ return radix__p4d_bad(p4d);
+ return hash__p4d_bad(p4d);
}
-#define pgd_access_permitted pgd_access_permitted
-static inline bool pgd_access_permitted(pgd_t pgd, bool write)
+#define p4d_access_permitted p4d_access_permitted
+static inline bool p4d_access_permitted(p4d_t p4d, bool write)
{
- return pte_access_permitted(pgd_pte(pgd), write);
+ return pte_access_permitted(p4d_pte(p4d), write);
}
-extern struct page *pgd_page(pgd_t pgd);
+extern struct page *p4d_page(p4d_t p4d);
/* Pointers in the page table tree are physical addresses */
#define __pgtable_ptr_val(ptr) __pa(ptr)
-#define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS)
#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS)
-#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS)
-
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
-#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1))
-#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1))
-#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1))
-
-/*
- * Find an entry in a page-table-directory. We combine the address region
- * (the high order N bits) and the pgd portion of the address.
- */
-
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-#define pud_offset(pgdp, addr) \
- (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr))
-#define pmd_offset(pudp,addr) \
- (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
-#define pte_offset_kernel(dir,addr) \
- (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-
-static inline void pte_unmap(pte_t *pte) { }
-
-/* to find an entry in a kernel page-table-directory */
-/* This now only contains the vmalloc pages */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS)
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
@@ -1139,8 +1122,11 @@ extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
-extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd);
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd)
+{
+}
+
extern int hash__has_transparent_hugepage(void);
static inline int has_transparent_hugepage(void)
{
@@ -1256,6 +1242,11 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
}
#define pmdp_collapse_flush pmdp_collapse_flush
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
+pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
+ unsigned long addr,
+ pmd_t *pmdp, int full);
+
#define __HAVE_ARCH_PGTABLE_DEPOSIT
static inline void pgtable_trans_huge_deposit(struct mm_struct *mm,
pmd_t *pmdp, pgtable_t pgtable)
@@ -1366,11 +1357,11 @@ static inline bool pud_is_leaf(pud_t pud)
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
-#define pgd_is_leaf pgd_is_leaf
-#define pgd_leaf pgd_is_leaf
-static inline bool pgd_is_leaf(pgd_t pgd)
+#define p4d_is_leaf p4d_is_leaf
+#define p4d_leaf p4d_is_leaf
+static inline bool p4d_is_leaf(p4d_t p4d)
{
- return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
+ return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE));
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 08c222d5b764..0cba794c4fb8 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -30,7 +30,7 @@
/* Don't have anything in the reserved bits and leaf bits */
#define RADIX_PMD_BAD_BITS 0x60000000000000e0UL
#define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
-#define RADIX_PGD_BAD_BITS 0x60000000000000e0UL
+#define RADIX_P4D_BAD_BITS 0x60000000000000e0UL
#define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
#define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
@@ -227,9 +227,9 @@ static inline int radix__pud_bad(pud_t pud)
}
-static inline int radix__pgd_bad(pgd_t pgd)
+static inline int radix__p4d_bad(p4d_t p4d)
{
- return !!(pgd_val(pgd) & RADIX_PGD_BAD_BITS);
+ return !!(p4d_val(p4d) & RADIX_P4D_BAD_BITS);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index 64d02a704bcb..3b95769739c7 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -113,8 +113,7 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start,
struct mmu_gather;
extern void hash__tlb_flush(struct mmu_gather *tlb);
/* Private function for use by PCI IO mapping code */
-extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
- unsigned long end);
+extern void __flush_hash_table_range(unsigned long start, unsigned long end);
extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr);
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 609cab1d58f2..2124b7090db9 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -6,7 +6,7 @@
/* bytes per L1 cache line */
-#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
+#if defined(CONFIG_PPC_8xx)
#define L1_CACHE_SHIFT 4
#define MAX_COPY_PREFETCH 1
#define IFETCH_ALIGN_SHIFT 2
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index e92191b390f3..de600b915a3c 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -4,23 +4,9 @@
#ifndef _ASM_POWERPC_CACHEFLUSH_H
#define _ASM_POWERPC_CACHEFLUSH_H
-#ifdef __KERNEL__
-
#include <linux/mm.h>
#include <asm/cputable.h>
-/*
- * No cache flushing is required when address mappings are changed,
- * because the caches on PowerPCs are physically addressed.
- */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define flush_icache_page(vma, page) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
#ifdef CONFIG_PPC_BOOK3S_64
/*
* Book3s has no ptesync after setting a pte, so without this ptesync it's
@@ -33,20 +19,20 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
asm volatile("ptesync" ::: "memory");
}
-#else
-static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
-#endif
+#define flush_cache_vmap flush_cache_vmap
+#endif /* CONFIG_PPC_BOOK3S_64 */
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *page);
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
void flush_icache_range(unsigned long start, unsigned long stop);
-extern void flush_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long addr,
- int len);
-extern void flush_dcache_icache_page(struct page *page);
+#define flush_icache_range flush_icache_range
+
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len);
+#define flush_icache_user_page flush_icache_user_page
+
+void flush_dcache_icache_page(struct page *page);
void __flush_dcache_icache(void *page);
/**
@@ -111,14 +97,6 @@ static inline void invalidate_dcache_range(unsigned long start,
mb(); /* sync */
}
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
- } while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-#endif /* __KERNEL__ */
+#include <asm-generic/cacheflush.h>
#endif /* _ASM_POWERPC_CACHEFLUSH_H */
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index 27183871eb3b..cf091c4c22e5 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -6,7 +6,6 @@
#include <linux/compiler.h>
#include <asm/synch.h>
#include <linux/bug.h>
-#include <asm/asm-405.h>
#ifdef __BIG_ENDIAN
#define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
@@ -29,7 +28,6 @@ static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
"1: lwarx %0,0,%3\n" \
" andc %1,%0,%5\n" \
" or %1,%1,%4\n" \
- PPC405_ERR77(0,%3) \
" stwcx. %1,0,%3\n" \
" bne- 1b\n" \
: "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
@@ -60,7 +58,6 @@ u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
" bne- 2f\n" \
" andc %1,%0,%6\n" \
" or %1,%1,%5\n" \
- PPC405_ERR77(0,%3) \
" stwcx. %1,0,%3\n" \
" bne- 1b\n" \
br2 \
@@ -92,7 +89,6 @@ __xchg_u32_local(volatile void *p, unsigned long val)
__asm__ __volatile__(
"1: lwarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\
bne- 1b"
: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
@@ -109,7 +105,6 @@ __xchg_u32_relaxed(u32 *p, unsigned long val)
__asm__ __volatile__(
"1: lwarx %0,0,%2\n"
- PPC405_ERR77(0, %2)
" stwcx. %3,0,%2\n"
" bne- 1b"
: "=&r" (prev), "+m" (*p)
@@ -127,7 +122,6 @@ __xchg_u64_local(volatile void *p, unsigned long val)
__asm__ __volatile__(
"1: ldarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\
bne- 1b"
: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
@@ -144,7 +138,6 @@ __xchg_u64_relaxed(u64 *p, unsigned long val)
__asm__ __volatile__(
"1: ldarx %0,0,%2\n"
- PPC405_ERR77(0, %2)
" stdcx. %3,0,%2\n"
" bne- 1b"
: "=&r" (prev), "+m" (*p)
@@ -229,7 +222,6 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
- PPC405_ERR77(0,%2)
" stwcx. %4,0,%2\n\
bne- 1b"
PPC_ATOMIC_EXIT_BARRIER
@@ -252,7 +244,6 @@ __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
- PPC405_ERR77(0,%2)
" stwcx. %4,0,%2\n\
bne- 1b"
"\n\
@@ -273,7 +264,6 @@ __cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
"1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
" cmpw 0,%0,%3\n"
" bne- 2f\n"
- PPC405_ERR77(0, %2)
" stwcx. %4,0,%2\n"
" bne- 1b\n"
"2:"
@@ -301,7 +291,6 @@ __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
"1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
" cmpw 0,%0,%3\n"
" bne- 2f\n"
- PPC405_ERR77(0, %2)
" stwcx. %4,0,%2\n"
" bne- 1b\n"
PPC_ACQUIRE_BARRIER
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 898b54262881..eacc9102c251 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -11,6 +11,7 @@
#include <linux/string.h>
#include <linux/kallsyms.h>
#include <asm/asm-compat.h>
+#include <asm/inst.h>
/* Flags for create_branch:
* "b" == create_branch(addr, target, 0);
@@ -22,33 +23,33 @@
#define BRANCH_ABSOLUTE 0x2
bool is_offset_in_branch_range(long offset);
-unsigned int create_branch(const unsigned int *addr,
- unsigned long target, int flags);
-unsigned int create_cond_branch(const unsigned int *addr,
- unsigned long target, int flags);
-int patch_branch(unsigned int *addr, unsigned long target, int flags);
-int patch_instruction(unsigned int *addr, unsigned int instr);
-int raw_patch_instruction(unsigned int *addr, unsigned int instr);
+int create_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+ unsigned long target, int flags);
+int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+ unsigned long target, int flags);
+int patch_branch(struct ppc_inst *addr, unsigned long target, int flags);
+int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
+int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
static inline unsigned long patch_site_addr(s32 *site)
{
return (unsigned long)site + *site;
}
-static inline int patch_instruction_site(s32 *site, unsigned int instr)
+static inline int patch_instruction_site(s32 *site, struct ppc_inst instr)
{
- return patch_instruction((unsigned int *)patch_site_addr(site), instr);
+ return patch_instruction((struct ppc_inst *)patch_site_addr(site), instr);
}
static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
{
- return patch_branch((unsigned int *)patch_site_addr(site), target, flags);
+ return patch_branch((struct ppc_inst *)patch_site_addr(site), target, flags);
}
static inline int modify_instruction(unsigned int *addr, unsigned int clr,
unsigned int set)
{
- return patch_instruction(addr, (*addr & ~clr) | set);
+ return patch_instruction((struct ppc_inst *)addr, ppc_inst((*addr & ~clr) | set));
}
static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
@@ -56,13 +57,13 @@ static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned
return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
}
-int instr_is_relative_branch(unsigned int instr);
-int instr_is_relative_link_branch(unsigned int instr);
-int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
-unsigned long branch_target(const unsigned int *instr);
-unsigned int translate_branch(const unsigned int *dest,
- const unsigned int *src);
-extern bool is_conditional_branch(unsigned int instr);
+int instr_is_relative_branch(struct ppc_inst instr);
+int instr_is_relative_link_branch(struct ppc_inst instr);
+int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr);
+unsigned long branch_target(const struct ppc_inst *instr);
+int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest,
+ const struct ppc_inst *src);
+extern bool is_conditional_branch(struct ppc_inst instr);
#ifdef CONFIG_PPC_BOOK3E_64
void __patch_exception(int exc, unsigned long addr);
#define patch_exception(exc, name) do { \
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 40a4d3c6fd99..bac2252c839e 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -213,6 +213,7 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000)
#define CPU_FTR_P9_TLBIE_ERAT_BUG LONG_ASM_CONST(0x0001000000000000)
#define CPU_FTR_P9_RADIX_PREFETCH_BUG LONG_ASM_CONST(0x0002000000000000)
+#define CPU_FTR_ARCH_31 LONG_ASM_CONST(0x0004000000000000)
#ifndef __ASSEMBLY__
@@ -467,6 +468,17 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
CPU_FTR_P9_TM_HV_ASSIST | \
CPU_FTR_P9_TM_XER_SO_BUG)
+#define CPU_FTRS_POWER10 (CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
+ CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
+ CPU_FTR_ARCH_31)
#define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -485,14 +497,14 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | CPU_FTRS_POWER9 | \
- CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | CPU_FTRS_POWER10)
#else
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
CPU_FTRS_POWER8 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | CPU_FTRS_POWER9 | \
- CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | CPU_FTRS_POWER10)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif
#else
@@ -614,7 +626,11 @@ enum {
};
#endif /* __powerpc64__ */
-#define HBP_NUM 1
+/*
+ * Maximum number of hw breakpoint supported on powerpc. Number of
+ * breakpoints supported by actual hw might be less than this.
+ */
+#define HBP_NUM_MAX 1
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index 7756026b95ca..ec57daf87f40 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -45,7 +45,7 @@ static inline int debugger_break_match(struct pt_regs *regs) { return 0; }
static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
#endif
-void __set_breakpoint(struct arch_hw_breakpoint *brk);
+void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk);
bool ppc_breakpoint_available(void);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
extern void do_send_trap(struct pt_regs *regs, unsigned long address,
diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
index 28c3d936fdf3..414d209f45bb 100644
--- a/arch/powerpc/include/asm/drmem.h
+++ b/arch/powerpc/include/asm/drmem.h
@@ -65,6 +65,7 @@ struct of_drconf_cell_v2 {
#define DRCONF_MEM_ASSIGNED 0x00000008
#define DRCONF_MEM_AI_INVALID 0x00000040
#define DRCONF_MEM_RESERVED 0x00000080
+#define DRCONF_MEM_HOTREMOVABLE 0x00000100
static inline u32 drmem_lmb_size(void)
{
diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h
index c814a2b55389..8d61c8f3fec4 100644
--- a/arch/powerpc/include/asm/fadump-internal.h
+++ b/arch/powerpc/include/asm/fadump-internal.h
@@ -64,12 +64,14 @@ struct fadump_memory_range {
};
/* fadump memory ranges info */
+#define RNG_NAME_SZ 16
struct fadump_mrange_info {
- char name[16];
+ char name[RNG_NAME_SZ];
struct fadump_memory_range *mem_ranges;
u32 mem_ranges_sz;
u32 mem_range_cnt;
u32 max_mem_ranges;
+ bool is_static;
};
/* Platform specific callback functions */
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index ca33f4ef6cb4..6003c2e533a0 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -128,6 +128,7 @@ extern void machine_check_fwnmi(void);
/* This is true if we are using the firmware NMI handler (typically LPAR) */
extern int fwnmi_active;
+extern int ibm_nmi_interlock_token;
extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 2ef155a3c821..29188810ba30 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -16,8 +16,8 @@
#ifndef __ASSEMBLY__
#include <linux/sizes.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
@@ -86,6 +86,10 @@ enum fixed_addresses {
#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
+#define FIXMAP_ALIGNED_SIZE (ALIGN(FIXADDR_TOP, PGDIR_SIZE) - \
+ ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE))
+#define FIXMAP_PTE_SIZE (FIXMAP_ALIGNED_SIZE / PGDIR_SIZE * PTE_TABLE_SIZE)
+
#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG
#define FIXMAP_PAGE_IO PAGE_KERNEL_NCG
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index f54a08a2cd70..bc76970b6ee5 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -108,9 +108,23 @@ static inline void this_cpu_enable_ftrace(void)
{
get_paca()->ftrace_enabled = 1;
}
+
+/* Disable ftrace on this CPU if possible (may not be implemented) */
+static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled)
+{
+ get_paca()->ftrace_enabled = ftrace_enabled;
+}
+
+static inline u8 this_cpu_get_ftrace_enabled(void)
+{
+ return get_paca()->ftrace_enabled;
+}
+
#else /* CONFIG_PPC64 */
static inline void this_cpu_disable_ftrace(void) { }
static inline void this_cpu_enable_ftrace(void) { }
+static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { }
+static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
#endif /* CONFIG_PPC64 */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index f187bb5e524e..e93ee3202e4c 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -8,14 +8,12 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
#include <asm/synch.h>
-#include <asm/asm-405.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
PPC_ATOMIC_ENTRY_BARRIER \
"1: lwarx %0,0,%2\n" \
insn \
- PPC405_ERR77(0, %2) \
"2: stwcx. %1,0,%2\n" \
"bne- 1b\n" \
PPC_ATOMIC_EXIT_BARRIER \
@@ -82,7 +80,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
cmpw 0,%1,%4\n\
bne- 3f\n"
- PPC405_ERR77(0,%3)
"2: stwcx. %5,0,%3\n\
bne- 1b\n"
PPC_ATOMIC_EXIT_BARRIER
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index a4b65b186ec6..104026f7d6bc 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -30,7 +30,6 @@
#include <asm/fixmap.h>
extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
/*
@@ -59,33 +58,6 @@ extern pte_t *pkmap_page_table;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
-extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
-extern void __kunmap_atomic(void *kvaddr);
-
-static inline void *kmap(struct page *page)
-{
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_high(page);
-}
-
-static inline void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-
-static inline void *kmap_atomic(struct page *page)
-{
- return kmap_atomic_prot(page, kmap_prot);
-}
-
-
#define flush_cache_kmaps() flush_cache_all()
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index e6dfa63da552..551a9d4d3958 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -41,11 +41,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
-#ifdef CONFIG_PPC64
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
-#else
- return __pte(pte_update(ptep, ~0UL, 0));
-#endif
}
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index f2f8d8aa8e3b..cb424799da0d 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -34,15 +34,21 @@ struct arch_hw_breakpoint {
#define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \
HW_BRK_TYPE_HYP)
+/* Minimum granularity */
#ifdef CONFIG_PPC_8xx
-#define HW_BREAKPOINT_ALIGN 0x3
+#define HW_BREAKPOINT_SIZE 0x4
#else
-#define HW_BREAKPOINT_ALIGN 0x7
+#define HW_BREAKPOINT_SIZE 0x8
#endif
#define DABR_MAX_LEN 8
#define DAWR_MAX_LEN 512
+static inline int nr_wp_slots(void)
+{
+ return HBP_NUM_MAX;
+}
+
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <linux/kdebug.h>
#include <asm/reg.h>
@@ -64,7 +70,6 @@ extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
int arch_install_hw_breakpoint(struct perf_event *bp);
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
-void arch_unregister_hw_breakpoint(struct perf_event *bp);
void hw_breakpoint_pmu_read(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
@@ -73,14 +78,14 @@ extern void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs);
static inline void hw_breakpoint_disable(void)
{
- struct arch_hw_breakpoint brk;
-
- brk.address = 0;
- brk.type = 0;
- brk.len = 0;
- brk.hw_len = 0;
- if (ppc_breakpoint_available())
- __set_breakpoint(&brk);
+ int i;
+ struct arch_hw_breakpoint null_brk = {0};
+
+ if (!ppc_breakpoint_available())
+ return;
+
+ for (i = 0; i < nr_wp_slots(); i++)
+ __set_breakpoint(i, &null_brk);
}
extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
int hw_breakpoint_handler(struct die_args *args);
@@ -99,10 +104,10 @@ static inline bool dawr_enabled(void)
{
return dawr_force_enable;
}
-int set_dawr(struct arch_hw_breakpoint *brk);
+int set_dawr(int nr, struct arch_hw_breakpoint *brk);
#else
static inline bool dawr_enabled(void) { return false; }
-static inline int set_dawr(struct arch_hw_breakpoint *brk) { return -1; }
+static inline int set_dawr(int nr, struct arch_hw_breakpoint *brk) { return -1; }
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h
index 9872f85d356f..965b1f39b2a5 100644
--- a/arch/powerpc/include/asm/icswx.h
+++ b/arch/powerpc/include/asm/icswx.h
@@ -108,6 +108,17 @@ struct data_descriptor_entry {
__be64 address;
} __packed __aligned(DDE_ALIGN);
+/* 4.3.2 NX-stamped Fault CRB */
+
+#define NX_STAMP_ALIGN (0x10)
+
+struct nx_fault_stamp {
+ __be64 fault_storage_addr;
+ __be16 reserved;
+ __u8 flags;
+ __u8 fault_status;
+ __be32 pswid;
+} __packed __aligned(NX_STAMP_ALIGN);
/* Chapter 6.5.2 Coprocessor-Request Block (CRB) */
@@ -135,10 +146,15 @@ struct coprocessor_request_block {
struct coprocessor_completion_block ccb;
- u8 reserved[48];
+ union {
+ struct nx_fault_stamp nx;
+ u8 reserved[16];
+ } stamp;
+
+ u8 reserved[32];
struct coprocessor_status_block csb;
-} __packed __aligned(CRB_ALIGN);
+} __packed;
/* RFC02167 Initiate Coprocessor Instructions document
diff --git a/arch/powerpc/include/asm/idle.h b/arch/powerpc/include/asm/idle.h
new file mode 100644
index 000000000000..accd1f50085a
--- /dev/null
+++ b/arch/powerpc/include/asm/idle.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_IDLE_H
+#define _ASM_POWERPC_IDLE_H
+#include <asm/runlatch.h>
+#include <asm/paca.h>
+
+#ifdef CONFIG_PPC_PSERIES
+DECLARE_PER_CPU(u64, idle_spurr_cycles);
+DECLARE_PER_CPU(u64, idle_entry_purr_snap);
+DECLARE_PER_CPU(u64, idle_entry_spurr_snap);
+
+static inline void snapshot_purr_idle_entry(void)
+{
+ *this_cpu_ptr(&idle_entry_purr_snap) = mfspr(SPRN_PURR);
+}
+
+static inline void snapshot_spurr_idle_entry(void)
+{
+ *this_cpu_ptr(&idle_entry_spurr_snap) = mfspr(SPRN_SPURR);
+}
+
+static inline void update_idle_purr_accounting(void)
+{
+ u64 wait_cycles;
+ u64 in_purr = *this_cpu_ptr(&idle_entry_purr_snap);
+
+ wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
+ wait_cycles += mfspr(SPRN_PURR) - in_purr;
+ get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
+}
+
+static inline void update_idle_spurr_accounting(void)
+{
+ u64 *idle_spurr_cycles_ptr = this_cpu_ptr(&idle_spurr_cycles);
+ u64 in_spurr = *this_cpu_ptr(&idle_entry_spurr_snap);
+
+ *idle_spurr_cycles_ptr += mfspr(SPRN_SPURR) - in_spurr;
+}
+
+static inline void pseries_idle_prolog(void)
+{
+ ppc64_runlatch_off();
+ snapshot_purr_idle_entry();
+ snapshot_spurr_idle_entry();
+ /*
+ * Indicate to the HV that we are idle. Now would be
+ * a good time to find other work to dispatch.
+ */
+ get_lppaca()->idle = 1;
+}
+
+static inline void pseries_idle_epilog(void)
+{
+ update_idle_purr_accounting();
+ update_idle_spurr_accounting();
+ get_lppaca()->idle = 0;
+ ppc64_runlatch_on();
+}
+
+static inline u64 read_this_idle_purr(void)
+{
+ /*
+ * If we are reading from an idle context, update the
+ * idle-purr cycles corresponding to the last idle period.
+ * Since the idle context is not yet over, take a fresh
+ * snapshot of the idle-purr.
+ */
+ if (unlikely(get_lppaca()->idle == 1)) {
+ update_idle_purr_accounting();
+ snapshot_purr_idle_entry();
+ }
+
+ return be64_to_cpu(get_lppaca()->wait_state_cycles);
+}
+
+static inline u64 read_this_idle_spurr(void)
+{
+ /*
+ * If we are reading from an idle context, update the
+ * idle-spurr cycles corresponding to the last idle period.
+ * Since the idle context is not yet over, take a fresh
+ * snapshot of the idle-spurr.
+ */
+ if (get_lppaca()->idle == 1) {
+ update_idle_spurr_accounting();
+ snapshot_spurr_idle_entry();
+ }
+
+ return *this_cpu_ptr(&idle_spurr_cycles);
+}
+
+#endif /* CONFIG_PPC_PSERIES */
+#endif
diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h
new file mode 100644
index 000000000000..45f3ec868258
--- /dev/null
+++ b/arch/powerpc/include/asm/inst.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_INST_H
+#define _ASM_POWERPC_INST_H
+
+#include <asm/ppc-opcode.h>
+
+/*
+ * Instruction data type for POWER
+ */
+
+struct ppc_inst {
+ u32 val;
+#ifdef CONFIG_PPC64
+ u32 suffix;
+#endif
+} __packed;
+
+static inline u32 ppc_inst_val(struct ppc_inst x)
+{
+ return x.val;
+}
+
+static inline int ppc_inst_primary_opcode(struct ppc_inst x)
+{
+ return ppc_inst_val(x) >> 26;
+}
+
+#ifdef CONFIG_PPC64
+#define ppc_inst(x) ((struct ppc_inst){ .val = (x), .suffix = 0xff })
+
+#define ppc_inst_prefix(x, y) ((struct ppc_inst){ .val = (x), .suffix = (y) })
+
+static inline u32 ppc_inst_suffix(struct ppc_inst x)
+{
+ return x.suffix;
+}
+
+static inline bool ppc_inst_prefixed(struct ppc_inst x)
+{
+ return (ppc_inst_primary_opcode(x) == 1) && ppc_inst_suffix(x) != 0xff;
+}
+
+static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
+{
+ return ppc_inst_prefix(swab32(ppc_inst_val(x)),
+ swab32(ppc_inst_suffix(x)));
+}
+
+static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
+{
+ u32 val, suffix;
+
+ val = *(u32 *)ptr;
+ if ((val >> 26) == OP_PREFIX) {
+ suffix = *((u32 *)ptr + 1);
+ return ppc_inst_prefix(val, suffix);
+ } else {
+ return ppc_inst(val);
+ }
+}
+
+static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
+{
+ return *(u64 *)&x == *(u64 *)&y;
+}
+
+#else
+
+#define ppc_inst(x) ((struct ppc_inst){ .val = x })
+
+static inline bool ppc_inst_prefixed(struct ppc_inst x)
+{
+ return false;
+}
+
+static inline u32 ppc_inst_suffix(struct ppc_inst x)
+{
+ return 0;
+}
+
+static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
+{
+ return ppc_inst(swab32(ppc_inst_val(x)));
+}
+
+static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
+{
+ return *ptr;
+}
+
+static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
+{
+ return ppc_inst_val(x) == ppc_inst_val(y);
+}
+
+#endif /* CONFIG_PPC64 */
+
+static inline int ppc_inst_len(struct ppc_inst x)
+{
+ return ppc_inst_prefixed(x) ? 8 : 4;
+}
+
+/*
+ * Return the address of the next instruction, if the instruction @value was
+ * located at @location.
+ */
+static inline struct ppc_inst *ppc_inst_next(void *location, struct ppc_inst *value)
+{
+ struct ppc_inst tmp;
+
+ tmp = ppc_inst_read(value);
+
+ return location + ppc_inst_len(tmp);
+}
+
+static inline u64 ppc_inst_as_u64(struct ppc_inst x)
+{
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x);
+#else
+ return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x);
+#endif
+}
+
+int probe_user_read_inst(struct ppc_inst *inst,
+ struct ppc_inst __user *nip);
+
+int probe_kernel_read_inst(struct ppc_inst *inst,
+ struct ppc_inst *src);
+
+#endif /* _ASM_POWERPC_INST_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 13f90dd03450..58635960403c 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -34,7 +34,6 @@ extern struct pci_dev *isa_bridge_pcidev;
#include <asm/mmiowb.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
-#include <asm/pgtable.h>
#define SIO_CONFIG_RA 0x398
#define SIO_CONFIG_RD 0x399
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 350101e11ddb..5032f1593299 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -22,11 +22,11 @@
#define IOMMU_PAGE_SHIFT_4K 12
#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
-#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
+#define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K)
#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
-#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))
+#define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr))
/* Boot time flags */
extern int iommu_is_off;
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index fbff9ff9032e..be85c7005fb1 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -23,20 +23,20 @@
#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
-#define KASAN_SHADOW_END 0UL
-
-#define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
+#define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT))
#ifdef CONFIG_KASAN
void kasan_early_init(void);
-void kasan_mmu_init(void);
void kasan_init(void);
void kasan_late_init(void);
#else
static inline void kasan_init(void) { }
-static inline void kasan_mmu_init(void) { }
static inline void kasan_late_init(void) { }
#endif
+void kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte);
+int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end);
+int kasan_init_region(void *start, size_t size);
+
#endif /* __ASSEMBLY */
#endif
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index 66b3f2983b22..4fc0e15e23a5 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -43,7 +43,7 @@ extern kprobe_opcode_t optprobe_template_ret[];
extern kprobe_opcode_t optprobe_template_end[];
/* Fixed instruction size for powerpc */
-#define MAX_INSN_SIZE 1
+#define MAX_INSN_SIZE 2
#define MAX_OPTIMIZED_LENGTH sizeof(kprobe_opcode_t) /* 4 bytes */
#define MAX_OPTINSN_SIZE (optprobe_template_end - optprobe_template_entry)
#define RELATIVEJUMP_SIZE sizeof(kprobe_opcode_t) /* 4 bytes */
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index 92bcd1a26d73..1d0f7d838b2e 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -10,7 +10,9 @@
* Use the current saved situation instead of the to/from/size params.
* Used on book3s/32
*/
-#define KUAP_CURRENT 4
+#define KUAP_CURRENT_READ 4
+#define KUAP_CURRENT_WRITE 8
+#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE)
#ifdef CONFIG_PPC64
#include <asm/book3s/64/kup-radix.h>
@@ -37,7 +39,7 @@
#else /* !__ASSEMBLY__ */
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
void setup_kup(void);
@@ -101,6 +103,16 @@ static inline void prevent_current_access_user(void)
prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT);
}
+static inline void prevent_current_read_from_user(void)
+{
+ prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_READ);
+}
+
+static inline void prevent_current_write_to_user(void)
+{
+ prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_WRITE);
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_KUAP_H_ */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index e1772e4bf710..d32ec9ae73bd 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -196,7 +196,7 @@ extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
unsigned int shift,
const struct kvm_memory_slot *memslot,
unsigned int lpid);
-extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable,
+extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
bool writing, unsigned long gpa,
unsigned int lpid);
extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 04b2b927bb5a..9bb9bb370b53 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -14,6 +14,7 @@
#include <asm/book3s/64/mmu-hash.h>
#include <asm/cpu_has_feature.h>
#include <asm/ppc-opcode.h>
+#include <asm/pte-walk.h>
#ifdef CONFIG_PPC_PSERIES
static inline bool kvmhv_on_pseries(void)
@@ -434,7 +435,7 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
continue;
}
/* If pte is not present return None */
- if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
+ if (unlikely(!pte_present(old_pte)))
return __pte(0);
new_pte = pte_mkyoung(old_pte);
@@ -634,6 +635,47 @@ extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
unsigned long gpa, unsigned long hpa,
unsigned long nbytes);
+static inline pte_t *
+find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
+ unsigned *hshift)
+{
+ pte_t *pte;
+
+ pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
+ return pte;
+}
+
+static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
+ unsigned *hshift)
+{
+ pte_t *pte;
+
+ VM_WARN(!spin_is_locked(&kvm->mmu_lock),
+ "%s called with kvm mmu_lock not held \n", __func__);
+ pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
+
+ return pte;
+}
+
+static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
+ unsigned long ea, unsigned *hshift)
+{
+ pte_t *pte;
+
+ VM_WARN(!spin_is_locked(&kvm->mmu_lock),
+ "%s called with kvm mmu_lock not held \n", __func__);
+
+ if (mmu_notifier_retry(kvm, mmu_seq))
+ return NULL;
+
+ pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
+
+ return pte;
+}
+
+extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
+ unsigned long ea, unsigned *hshift);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 0699cfeeb8c9..f4ac25d4df05 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -122,6 +122,7 @@
#define MMU_FTRS_POWER7 MMU_FTRS_POWER6
#define MMU_FTRS_POWER8 MMU_FTRS_POWER6
#define MMU_FTRS_POWER9 MMU_FTRS_POWER6
+#define MMU_FTRS_POWER10 MMU_FTRS_POWER6
#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
@@ -291,15 +292,6 @@ static inline bool early_radix_enabled(void)
}
#endif
-#ifdef CONFIG_PPC_MEM_KEYS
-extern u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address);
-#else
-static inline u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address)
-{
- return 0;
-}
-#endif /* CONFIG_PPC_MEM_KEYS */
-
#ifdef CONFIG_STRICT_KERNEL_RWX
static inline bool strict_kernel_rwx_enabled(void)
{
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 360367c579de..1a474f6b1992 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -185,11 +185,41 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
dec_mm_active_cpus(mm);
}
}
+
+/*
+ * vas_windows counter shows number of open windows in the mm
+ * context. During context switch, use this counter to clear the
+ * foreign real address mapping (CP_ABORT) for the thread / process
+ * that intend to use COPY/PASTE. When a process closes all windows,
+ * disable CP_ABORT which is expensive to run.
+ *
+ * For user context, register a copro so that TLBIs are seen by the
+ * nest MMU. mm_context_add/remove_vas_window() are used only for user
+ * space windows.
+ */
+static inline void mm_context_add_vas_window(struct mm_struct *mm)
+{
+ atomic_inc(&mm->context.vas_windows);
+ mm_context_add_copro(mm);
+}
+
+static inline void mm_context_remove_vas_window(struct mm_struct *mm)
+{
+ int v;
+
+ mm_context_remove_copro(mm);
+ v = atomic_dec_if_positive(&mm->context.vas_windows);
+
+ /* Detect imbalance between add and remove */
+ WARN_ON(v < 0);
+}
#else
static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
static inline void mm_context_add_copro(struct mm_struct *mm) { }
static inline void mm_context_remove_copro(struct mm_struct *mm) { }
+static inline void mm_context_add_vas_windows(struct mm_struct *mm) { }
+static inline void mm_context_remove_vas_windows(struct mm_struct *mm) { }
#endif
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 5398bfc465b4..857d9ff24295 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -72,12 +72,9 @@ struct mod_arch_specific {
# ifdef MODULE
asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
# endif /* MODULE */
-#endif
int module_trampoline_target(struct module *mod, unsigned long trampoline,
unsigned long *target);
-
-#ifdef CONFIG_DYNAMIC_FTRACE
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs);
#else
static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
index a46616937d20..e752a5807a59 100644
--- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
@@ -13,13 +13,13 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
- return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
+ return PAGE_SHIFT_8M;
}
static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned int pdshift)
{
- unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT;
+ unsigned long idx = (addr & (SZ_4M - 1)) >> PAGE_SHIFT;
return hugepd_page(hpd) + idx;
}
@@ -32,8 +32,12 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
{
- *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT |
- (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : _PMD_PAGE_512K));
+ *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M);
+}
+
+static inline void hugepd_populate_kernel(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+ *hpdp = __hugepd(__pa(new) | _PMD_PRESENT | _PMD_PAGE_8M);
}
static inline int check_and_get_huge_psize(int shift)
@@ -41,4 +45,24 @@ static inline int check_and_get_huge_psize(int shift)
return shift_to_mmu_psize(shift);
}
+#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
+
+#define __HAVE_ARCH_HUGE_PTE_CLEAR
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
+{
+ pte_update(mm, addr, ptep, ~0UL, 0, 1);
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
+ unsigned long set = pte_val(pte_wrprotect(__pte(0)));
+
+ pte_update(mm, addr, ptep, clr, set, 1);
+}
+
#endif /* _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 76af5b0cb16e..1d9ac0f9c794 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -19,7 +19,6 @@
#define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */
#define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
#define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */
-#define MI_RESETVAL 0x00000000 /* Value of register at reset */
/* These are the Ks and Kp from the PowerPC books. For proper operation,
* Ks = 0, Kp = 1.
@@ -37,16 +36,16 @@
* Therefore, we define 2 APG groups. lsb is _PMD_USER
* 0 => Kernel => 01 (all accesses performed according to page definition)
* 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
+ * 2-15 => Not Used
*/
-#define MI_APG_INIT 0x4fffffff
+#define MI_APG_INIT 0x40000000
/*
* 0 => Kernel => 01 (all accesses performed according to page definition)
* 1 => User => 10 (all accesses performed according to swaped page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
+ * 2-15 => Not Used
*/
-#define MI_APG_KUEP 0x6fffffff
+#define MI_APG_KUEP 0x60000000
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in
@@ -95,7 +94,6 @@
#define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
#define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
-#define MD_RESETVAL 0x04000000 /* Value of register at reset */
#define SPRN_M_CASID 793 /* Address space ID (context) to match */
#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
@@ -117,16 +115,16 @@
* Therefore, we define 2 APG groups. lsb is _PMD_USER
* 0 => Kernel => 01 (all accesses performed according to page definition)
* 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
+ * 2-15 => Not Used
*/
-#define MD_APG_INIT 0x4fffffff
+#define MD_APG_INIT 0x40000000
/*
* 0 => No user => 01 (all accesses performed according to page definition)
* 1 => User => 10 (all accesses performed according to swaped page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
+ * 2-15 => Not Used
*/
-#define MD_APG_KUAP 0x6fffffff
+#define MD_APG_KUAP 0x60000000
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in
@@ -178,12 +176,6 @@
*/
#define SPRN_M_TW 799
-#ifdef CONFIG_PPC_MM_SLICES
-#include <asm/nohash/32/slice.h>
-#define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1))
-#define LOW_SLICE_ARRAY_SZ SLICE_ARRAY_SIZE
-#endif
-
#if defined(CONFIG_PPC_4K_PAGES)
#define mmu_virtual_psize MMU_PAGE_4K
#elif defined(CONFIG_PPC_16K_PAGES)
@@ -201,71 +193,15 @@
#include <linux/mmdebug.h>
-struct slice_mask {
- u64 low_slices;
- DECLARE_BITMAP(high_slices, 0);
-};
+void mmu_pin_tlb(unsigned long top, bool readonly);
typedef struct {
unsigned int id;
unsigned int active;
unsigned long vdso_base;
-#ifdef CONFIG_PPC_MM_SLICES
- u16 user_psize; /* page size index */
- unsigned char low_slices_psize[SLICE_ARRAY_SIZE];
- unsigned char high_slices_psize[0];
- unsigned long slb_addr_limit;
- struct slice_mask mask_base_psize; /* 4k or 16k */
- struct slice_mask mask_512k;
- struct slice_mask mask_8m;
-#endif
void *pte_frag;
} mm_context_t;
-#ifdef CONFIG_PPC_MM_SLICES
-static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
-{
- return ctx->user_psize;
-}
-
-static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
-{
- ctx->user_psize = user_psize;
-}
-
-static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
-{
- return ctx->low_slices_psize;
-}
-
-static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
-{
- return ctx->high_slices_psize;
-}
-
-static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
-{
- return ctx->slb_addr_limit;
-}
-
-static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
-{
- ctx->slb_addr_limit = limit;
-}
-
-static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
-{
- if (psize == MMU_PAGE_512K)
- return &ctx->mask_512k;
- if (psize == MMU_PAGE_8M)
- return &ctx->mask_8m;
-
- BUG_ON(psize != mmu_virtual_psize);
-
- return &ctx->mask_base_psize;
-}
-#endif /* CONFIG_PPC_MM_SLICE */
-
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
@@ -304,13 +240,7 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
}
/* patch sites */
-extern s32 patch__itlbmiss_linmem_top, patch__itlbmiss_linmem_top8;
-extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp;
-extern s32 patch__fixupdar_linmem_top;
-extern s32 patch__dtlbmiss_romem_top, patch__dtlbmiss_romem_top8;
-
-extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2;
-extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3;
+extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;
extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index b04ba257fddb..b56f14160ae5 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -2,14 +2,12 @@
#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
-#include <asm/asm-405.h>
#ifdef CONFIG_44x
extern int icache_44x_need_flush;
@@ -30,6 +28,8 @@ extern int icache_44x_need_flush;
#define PMD_TABLE_SIZE 0
#define PUD_TABLE_SIZE 0
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
#endif /* __ASSEMBLY__ */
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
@@ -110,13 +110,13 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#define VMALLOC_OFFSET (0x1000000) /* 16M */
#ifdef PPC_PIN_SIZE
-#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif
#ifdef CONFIG_KASAN_VMALLOC
-#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
#define VMALLOC_END ioremap_bot
#endif
@@ -166,7 +166,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#ifndef __ASSEMBLY__
#define pte_clear(mm, addr, ptep) \
- do { pte_update(ptep, ~0, 0); } while (0)
+ do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
#ifndef pte_mkwrite
static inline pte_t pte_mkwrite(pte_t pte)
@@ -205,7 +205,9 @@ static inline void pmd_clear(pmd_t *pmdp)
*pmdp = __pmd(0);
}
-
+/* to find an entry in a page-table-directory */
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
/*
* PTE updates. This function is called whenever an existing
@@ -221,66 +223,42 @@ static inline void pmd_clear(pmd_t *pmdp)
* that an executable user mapping was modified, which is needed
* to properly flush the virtually tagged instruction cache of
* those implementations.
+ *
+ * On the 8xx, the page tables are a bit special. For 16k pages, we have
+ * 4 identical entries. For 512k pages, we have 128 entries as if it was
+ * 4k pages, but they are flagged as 512k pages for the hardware.
+ * For other page sizes, we have a single entry in the table.
*/
-#ifndef CONFIG_PTE_64BIT
-static inline unsigned long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
+#ifdef CONFIG_PPC_8xx
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
{
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long old, tmp;
-
- __asm__ __volatile__("\
-1: lwarx %0,0,%3\n\
- andc %1,%0,%4\n\
- or %1,%1,%5\n"
- PPC405_ERR77(0,%3)
-" stwcx. %1,0,%3\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
-#else /* PTE_ATOMIC_UPDATES */
- unsigned long old = pte_val(*p);
- unsigned long new = (old & ~clr) | set;
-
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
- p->pte = p->pte1 = p->pte2 = p->pte3 = new;
-#else
- *p = __pte(new);
-#endif
-#endif /* !PTE_ATOMIC_UPDATES */
+ pte_basic_t *entry = &p->pte;
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+ int num, i;
+ pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
+
+ if (!huge)
+ num = PAGE_SIZE / SZ_4K;
+ else if ((pmd_val(*pmd) & _PMD_PAGE_MASK) != _PMD_PAGE_8M)
+ num = SZ_512K / SZ_4K;
+ else
+ num = 1;
+
+ for (i = 0; i < num; i++, entry++, new += SZ_4K)
+ *entry = new;
-#ifdef CONFIG_44x
- if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
- icache_44x_need_flush = 1;
-#endif
return old;
}
-#else /* CONFIG_PTE_64BIT */
-static inline unsigned long long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
+#else
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
{
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long long old;
- unsigned long tmp;
-
- __asm__ __volatile__("\
-1: lwarx %L0,0,%4\n\
- lwzx %0,0,%3\n\
- andc %1,%L0,%5\n\
- or %1,%1,%6\n"
- PPC405_ERR77(0,%3)
-" stwcx. %1,0,%4\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
-#else /* PTE_ATOMIC_UPDATES */
- unsigned long long old = pte_val(*p);
- *p = __pte((old & ~(unsigned long long)clr) | set);
-#endif /* !PTE_ATOMIC_UPDATES */
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+
+ *p = __pte(new);
#ifdef CONFIG_44x
if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
@@ -288,23 +266,24 @@ static inline unsigned long long pte_update(pte_t *p,
#endif
return old;
}
-#endif /* CONFIG_PTE_64BIT */
+#endif
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
unsigned long old;
- old = pte_update(ptep, _PAGE_ACCESSED, 0);
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
}
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
- __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- return __pte(pte_update(ptep, ~0, 0));
+ return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
@@ -314,7 +293,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
unsigned long set = pte_val(pte_wrprotect(__pte(0)));
- pte_update(ptep, clr, set);
+ pte_update(mm, addr, ptep, clr, set, 0);
}
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
@@ -326,8 +305,9 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
unsigned long set = pte_val(entry) & pte_val(pte_set);
unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
+ int huge = psize > mmu_virtual_psize ? 1 : 0;
- pte_update(ptep, clr, set);
+ pte_update(vma->vm_mm, address, ptep, clr, set, huge);
flush_tlb_page(vma, address);
}
@@ -348,8 +328,6 @@ static inline int pte_young(pte_t pte)
* of the pte page. -- paulus
*/
#ifndef CONFIG_BOOKE
-#define pmd_page_vaddr(pmd) \
- ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \
pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
#else
@@ -359,22 +337,6 @@ static inline int pte_young(pte_t pte)
pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
#endif
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, addr) \
- (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
- pte_index(addr))
-#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
-static inline void pte_unmap(pte_t *pte) { }
-
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
index 12c6811e344b..2d3153cfc0d7 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
@@ -44,9 +44,8 @@
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
#define _PAGE_SPECIAL 0x020 /* software: Special page */
-#define _PAGE_RW 0x040 /* software: Writes permitted */
#define _PAGE_DIRTY 0x080 /* software: dirty page */
-#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
+#define _PAGE_RW 0x100 /* hardware: WR, anded with dirty in exception */
#define _PAGE_EXEC 0x200 /* hardware: EX permission */
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
@@ -58,8 +57,8 @@
#define _PAGE_KERNEL_RO 0
#define _PAGE_KERNEL_ROX _PAGE_EXEC
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC)
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
#define _PMD_PRESENT_MASK _PMD_PRESENT
@@ -85,21 +84,5 @@
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#ifndef __ASSEMBLY__
-static inline pte_t pte_wrprotect(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE));
-}
-
-#define pte_wrprotect pte_wrprotect
-
-static inline pte_t pte_mkclean(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
-}
-
-#define pte_mkclean pte_mkclean
-#endif
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index c9e4b2d90f65..66f403a7da44 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -46,6 +46,8 @@
#define _PAGE_NA 0x0200 /* Supervisor NA, User no access */
#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
+#define _PAGE_HUGE 0x0800 /* Copied to L1 PS bit 29 */
+
/* cache related flags non existing on 8xx */
#define _PAGE_COHERENT 0
#define _PAGE_WRITETHRU 0
@@ -128,7 +130,7 @@ static inline pte_t pte_mkuser(pte_t pte)
static inline pte_t pte_mkhuge(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SPS);
+ return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
}
#define pte_mkhuge pte_mkhuge
diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h
deleted file mode 100644
index 39eb0154ae2d..000000000000
--- a/arch/powerpc/include/asm/nohash/32/slice.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H
-#define _ASM_POWERPC_NOHASH_32_SLICE_H
-
-#ifdef CONFIG_PPC_MM_SLICES
-
-#define SLICE_LOW_SHIFT 26 /* 64 slices */
-#define SLICE_LOW_TOP (0x100000000ull)
-#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
-#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
-
-#define SLICE_HIGH_SHIFT 0
-#define SLICE_NUM_HIGH 0ul
-#define GET_HIGH_SLICE_INDEX(addr) (addr & 0)
-
-#define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW
-
-#endif /* CONFIG_PPC_MM_SLICES */
-
-#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index b9534a793293..668aee6017e7 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -15,7 +15,7 @@ struct vmemmap_backing {
};
extern struct vmemmap_backing *vmemmap_list;
-#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD)
+#define p4d_populate(MM, P4D, PUD) p4d_set(P4D, (unsigned long)PUD)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
index c40ec32b8194..fe2f4c9acd9e 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -2,7 +2,7 @@
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
-#include <asm-generic/5level-fixup.h>
+#include <asm-generic/pgtable-nop4d.h>
/*
* Entries per page directory level. The PTE level must use a 64b record
@@ -45,43 +45,39 @@
#define PMD_MASKED_BITS 0
/* Bits to mask out from a PUD to get to the PMD page */
#define PUD_MASKED_BITS 0
-/* Bits to mask out from a PGD to get to the PUD page */
-#define PGD_MASKED_BITS 0
+/* Bits to mask out from a P4D to get to the PUD page */
+#define P4D_MASKED_BITS 0
/*
* 4-level page tables related bits
*/
-#define pgd_none(pgd) (!pgd_val(pgd))
-#define pgd_bad(pgd) (pgd_val(pgd) == 0)
-#define pgd_present(pgd) (pgd_val(pgd) != 0)
-#define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS)
+#define p4d_none(p4d) (!p4d_val(p4d))
+#define p4d_bad(p4d) (p4d_val(p4d) == 0)
+#define p4d_present(p4d) (p4d_val(p4d) != 0)
+#define p4d_page_vaddr(p4d) (p4d_val(p4d) & ~P4D_MASKED_BITS)
#ifndef __ASSEMBLY__
-static inline void pgd_clear(pgd_t *pgdp)
+static inline void p4d_clear(p4d_t *p4dp)
{
- *pgdp = __pgd(0);
+ *p4dp = __p4d(0);
}
-static inline pte_t pgd_pte(pgd_t pgd)
+static inline pte_t p4d_pte(p4d_t p4d)
{
- return __pte(pgd_val(pgd));
+ return __pte(p4d_val(p4d));
}
-static inline pgd_t pte_pgd(pte_t pte)
+static inline p4d_t pte_p4d(pte_t pte)
{
- return __pgd(pte_val(pte));
+ return __p4d(pte_val(pte));
}
-extern struct page *pgd_page(pgd_t pgd);
+extern struct page *p4d_page(p4d_t p4d);
#endif /* !__ASSEMBLY__ */
-#define pud_offset(pgdp, addr) \
- (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
- (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
-
#define pud_ERROR(e) \
pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 9a33b8bd842d..6cb8aa357191 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -175,35 +175,13 @@ static inline pud_t pte_pud(pte_t pte)
return __pud(pte_val(pte));
}
#define pud_write(pud) pte_write(pud_pte(pud))
-#define pgd_write(pgd) pte_write(pgd_pte(pgd))
+#define p4d_write(pgd) pte_write(p4d_pte(p4d))
-static inline void pgd_set(pgd_t *pgdp, unsigned long val)
+static inline void p4d_set(p4d_t *p4dp, unsigned long val)
{
- *pgdp = __pgd(val);
+ *p4dp = __p4d(val);
}
-/*
- * Find an entry in a page-table-directory. We combine the address region
- * (the high order N bits) and the pgd portion of the address.
- */
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
-
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-#define pmd_offset(pudp,addr) \
- (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
-
-#define pte_offset_kernel(dir,addr) \
- (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-
-static inline void pte_unmap(pte_t *pte) { }
-
-/* to find an entry in a kernel page-table-directory */
-/* This now only contains the vmalloc pages */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
@@ -211,22 +189,9 @@ static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long set,
int huge)
{
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long old, tmp;
-
- __asm__ __volatile__(
- "1: ldarx %0,0,%3 # pte_update\n\
- andc %1,%0,%4 \n\
- or %1,%1,%6\n\
- stdcx. %1,0,%3 \n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
- : "r" (ptep), "r" (clr), "m" (*ptep), "r" (set)
- : "cc" );
-#else
unsigned long old = pte_val(*ptep);
*ptep = __pte((old & ~clr) | set);
-#endif
+
/* huge pages use the old page table lock */
if (!huge)
assert_pte_locked(mm, addr);
@@ -310,21 +275,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long bits = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long old, tmp;
-
- __asm__ __volatile__(
- "1: ldarx %0,0,%4\n\
- or %0,%3,%0\n\
- stdcx. %0,0,%4\n\
- bne- 1b"
- :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
- :"r" (bits), "r" (ptep), "m" (*ptep)
- :"cc");
-#else
unsigned long old = pte_val(*ptep);
*ptep = __pte(old | bits);
-#endif
flush_tlb_page(vma, address);
}
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 7fed9dc0f147..4b7c3472eab1 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -56,7 +56,7 @@ static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
#ifdef CONFIG_NUMA_BALANCING
/*
* These work without NUMA balancing but the kernel does not care. See the
- * comment in include/asm-generic/pgtable.h . On powerpc, this will only
+ * comment in include/linux/pgtable.h . On powerpc, this will only
* work for user pages and always return true for kernel pages.
*/
static inline int pte_protnone(pte_t pte)
@@ -130,12 +130,10 @@ static inline pte_t pte_exprotect(pte_t pte)
return __pte(pte_val(pte) & ~_PAGE_EXEC);
}
-#ifndef pte_mkclean
static inline pte_t pte_mkclean(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
}
-#endif
static inline pte_t pte_mkold(pte_t pte)
{
@@ -267,7 +265,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
static inline int hugepd_ok(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
- return ((hpd_val(hpd) & 0x4) != 0);
+ return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M);
#else
/* We clear the top bit to indicate hugepd */
return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index e3cc9eb9204d..45a839a7c6cf 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -29,6 +29,7 @@
#include <asm/hmi.h>
#include <asm/cpuidle.h>
#include <asm/atomic.h>
+#include <asm/rtas-types.h>
#include <asm-generic/mmiowb_types.h>
@@ -256,6 +257,7 @@ struct paca_struct {
u64 l1d_flush_size;
#endif
#ifdef CONFIG_PPC_PSERIES
+ struct rtas_args *rtas_args_reentrant;
u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */
#endif /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 3ee8df0f66e0..a63fe6f3a0ff 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -249,13 +249,6 @@ static inline bool pfn_valid(unsigned long pfn)
#include <asm/page_32.h>
#endif
-/* align addr on a size boundary - adjust address up/down if needed */
-#define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size)
-#define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1)))
-
-/* align addr on a size boundary - adjust address up if needed */
-#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
-
/*
* Don't compare things with KERNELBASE or PAGE_OFFSET to test for
* "kernelness", use is_kernel_addr() - it should do what you want.
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index b1f1d5339735..f7613f43c9cf 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -41,25 +41,6 @@ struct mm_struct;
#ifndef __ASSEMBLY__
-#ifdef CONFIG_PPC32
-static inline pmd_t *pmd_ptr(struct mm_struct *mm, unsigned long va)
-{
- return pmd_offset(pud_offset(pgd_offset(mm, va), va), va);
-}
-
-static inline pmd_t *pmd_ptr_k(unsigned long va)
-{
- return pmd_offset(pud_offset(pgd_offset_k(va), va), va);
-}
-
-static inline pte_t *virt_to_kpte(unsigned long vaddr)
-{
- pmd_t *pmd = pmd_ptr_k(vaddr);
-
- return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
-}
-#endif
-
#include <asm/tlbflush.h>
/* Keep these as a macros to avoid include dependency mess */
@@ -76,6 +57,13 @@ static inline pgprot_t pte_pgprot(pte_t pte)
return __pgprot(pte_flags);
}
+#ifndef pmd_page_vaddr
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long)__va(pmd_val(pmd) & ~PMD_MASKED_BITS));
+}
+#define pmd_page_vaddr pmd_page_vaddr
+#endif
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -96,8 +84,6 @@ extern unsigned long ioremap_bot;
*/
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_large(pmd) 0
#endif
@@ -107,6 +93,8 @@ unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned int shift);
+pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
+
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
void mark_initmem_nx(void);
#else
@@ -158,9 +146,9 @@ static inline bool pud_is_leaf(pud_t pud)
}
#endif
-#ifndef pgd_is_leaf
-#define pgd_is_leaf pgd_is_leaf
-static inline bool pgd_is_leaf(pgd_t pgd)
+#ifndef p4d_is_leaf
+#define p4d_is_leaf p4d_is_leaf
+static inline bool p4d_is_leaf(p4d_t p4d)
{
return false;
}
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
index 20ebf153c871..2fe6cae14d10 100644
--- a/arch/powerpc/include/asm/pkeys.h
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -101,7 +101,7 @@ static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
/*
* Returns a positive, 5-bit key on success, or -1 on failure.
- * Relies on the mmap_sem to protect against concurrency in mm_pkey_alloc() and
+ * Relies on the mmap_lock to protect against concurrency in mm_pkey_alloc() and
* mm_pkey_free().
*/
static inline int mm_pkey_alloc(struct mm_struct *mm)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c1df75edde44..2a39c716c343 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -158,6 +158,9 @@
/* VMX Vector Store Instructions */
#define OP_31_XOP_STVX 231
+/* Prefixed Instructions */
+#define OP_PREFIX 1
+
#define OP_31 31
#define OP_LWZ 32
#define OP_STFS 52
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index eedcbfb9a6ff..52a67835057a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -180,14 +180,14 @@ struct thread_struct {
int fpexc_mode; /* floating-point exception mode */
unsigned int align_ctl; /* alignment handling control */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- struct perf_event *ptrace_bps[HBP_NUM];
+ struct perf_event *ptrace_bps[HBP_NUM_MAX];
/*
* Helps identify source of single-step exception and subsequent
* hw-breakpoint enablement
*/
- struct perf_event *last_hit_ubp;
+ struct perf_event *last_hit_ubp[HBP_NUM_MAX];
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
- struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
+ struct arch_hw_breakpoint hw_brk[HBP_NUM_MAX]; /* hardware breakpoint info */
unsigned long trap_nr; /* last trap # on this thread */
u8 load_slb; /* Ages out SLB preload cache entries */
u8 load_fp;
@@ -272,7 +272,6 @@ struct thread_struct {
unsigned mmcr0;
unsigned used_ebb;
- unsigned int used_vas;
#endif
};
@@ -301,14 +300,12 @@ struct thread_struct {
#else
#define INIT_THREAD { \
.ksp = INIT_SP, \
- .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
.addr_limit = KERNEL_DS, \
.fpexc_mode = 0, \
- .fscr = FSCR_TAR | FSCR_EBB \
}
#endif
-#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs)
+#define task_pt_regs(tsk) ((tsk)->thread.regs)
unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 94e3fd54f2c8..324a13351749 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -117,6 +117,7 @@ extern int of_read_drc_info_cell(struct property **prop,
#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */
#define OV1_PPC_3_00 0x80 /* set if we support PowerPC 3.00 */
+#define OV1_PPC_3_1 0x40 /* set if we support PowerPC 3.1 */
/* Option vector 2: Open Firmware options supported */
#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index e0195e6b892b..ac3970fff0d5 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -179,6 +179,22 @@ extern int ptrace_put_reg(struct task_struct *task, int regno,
#define current_pt_regs() \
((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
+
+#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3S
+#define TRAP_FLAGS_MASK 0x10
+#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
+#define FULL_REGS(regs) true
+#define SET_FULL_REGS(regs) do { } while (0)
+#else
+#define TRAP_FLAGS_MASK 0x11
+#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
+#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
+#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
+#endif
+#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
+#define NV_REG_POISON 0xdeadbeefdeadbeefUL
+#else
/*
* We use the least-significant bit of the trap field to indicate
* whether we have saved the full set of registers, or only a
@@ -186,17 +202,13 @@ extern int ptrace_put_reg(struct task_struct *task, int regno,
* On 4xx we use the next bit to indicate whether the exception
* is a critical exception (1 means it is).
*/
+#define TRAP_FLAGS_MASK 0x1F
+#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
-#ifndef __powerpc64__
+#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
-#endif /* ! __powerpc64__ */
-#define TRAP(regs) ((regs)->trap & ~0xF)
-#ifdef __powerpc64__
-#define NV_REG_POISON 0xdeadbeefdeadbeefUL
-#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
-#else
#define NV_REG_POISON 0xdeadbeef
#define CHECK_FULL_REGS(regs) \
do { \
@@ -205,6 +217,26 @@ do { \
} while (0)
#endif /* __powerpc64__ */
+static inline void set_trap(struct pt_regs *regs, unsigned long val)
+{
+ regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK);
+}
+
+static inline bool trap_is_syscall(struct pt_regs *regs)
+{
+ return TRAP(regs) == 0xc00;
+}
+
+static inline bool trap_norestart(struct pt_regs *regs)
+{
+ return regs->trap & 0x10;
+}
+
+static inline void set_trap_norestart(struct pt_regs *regs)
+{
+ regs->trap |= 0x10;
+}
+
#define arch_has_single_step() (1)
#ifndef CONFIG_BOOK3S_601
#define arch_has_block_step() (true)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index da5cab038e25..88e6c78100d9 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -283,14 +283,16 @@
#define CTRL_CT1 0x40000000 /* thread 1 */
#define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1
-#define SPRN_DAWR 0xB4
+#define SPRN_DAWR0 0xB4
+#define SPRN_DAWR1 0xB5
#define SPRN_RPR 0xBA /* Relative Priority Register */
#define SPRN_CIABR 0xBB
#define CIABR_PRIV 0x3
#define CIABR_PRIV_USER 1
#define CIABR_PRIV_SUPER 2
#define CIABR_PRIV_HYPER 3
-#define SPRN_DAWRX 0xBC
+#define SPRN_DAWRX0 0xBC
+#define SPRN_DAWRX1 0xBD
#define DAWRX_USER __MASK(0)
#define DAWRX_KERNEL __MASK(1)
#define DAWRX_HYP __MASK(2)
@@ -397,6 +399,7 @@
#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */
/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_PREFIX_LG 13 /* Enable Prefix Instructions */
#define FSCR_SCV_LG 12 /* Enable System Call Vectored */
#define FSCR_MSGP_LG 10 /* Enable MSGP */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
@@ -408,11 +411,13 @@
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_PREFIX __MASK(FSCR_PREFIX_LG)
#define FSCR_SCV __MASK(FSCR_SCV_LG)
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
+#define HFSCR_PREFIX __MASK(FSCR_PREFIX_LG)
#define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
@@ -476,16 +481,18 @@
#define PCR_VEC_DIS (__MASK(63-0)) /* Vec. disable (bit NA since POWER8) */
#define PCR_VSX_DIS (__MASK(63-1)) /* VSX disable (bit NA since POWER8) */
#define PCR_TM_DIS (__MASK(63-2)) /* Trans. memory disable (POWER8) */
-#define PCR_HIGH_BITS (PCR_VEC_DIS | PCR_VSX_DIS | PCR_TM_DIS)
+#define PCR_MMA_DIS (__MASK(63-3)) /* Matrix-Multiply Accelerator */
+#define PCR_HIGH_BITS (PCR_MMA_DIS | PCR_VEC_DIS | PCR_VSX_DIS | PCR_TM_DIS)
/*
* These bits are used in the function kvmppc_set_arch_compat() to specify and
* determine both the compatibility level which we want to emulate and the
* compatibility level which the host is capable of emulating.
*/
+#define PCR_ARCH_300 0x10 /* Architecture 3.00 */
#define PCR_ARCH_207 0x8 /* Architecture 2.07 */
#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
-#define PCR_LOW_BITS (PCR_ARCH_207 | PCR_ARCH_206 | PCR_ARCH_205)
+#define PCR_LOW_BITS (PCR_ARCH_207 | PCR_ARCH_206 | PCR_ARCH_205 | PCR_ARCH_300)
#define PCR_MASK ~(PCR_HIGH_BITS | PCR_LOW_BITS) /* PCR Reserved Bits */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
@@ -759,7 +766,7 @@
#endif
#define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */
-#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
+#define SRR1_ISI_N_G_OR_CIP 0x10000000 /* ISI: Access is no-exec or G or CI for a prefixed instruction */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
@@ -786,6 +793,8 @@
#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
#define SRR1_MCE_MCP 0x00080000 /* Machine check signal caused interrupt */
+#define SRR1_BOUNDARY 0x10000000 /* Prefixed instruction crosses 64-byte boundary */
+#define SRR1_PREFIXED 0x20000000 /* Exception caused by prefixed instruction */
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index f26fe482fbca..ff30f1076162 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -663,60 +663,6 @@
#define EPC_EPID 0x00003fff
#define EPC_EPID_SHIFT 0
-/*
- * The IBM-403 is an even more odd special case, as it is much
- * older than the IBM-405 series. We put these down here incase someone
- * wishes to support these machines again.
- */
-#ifdef CONFIG_403GCX
-/* Special Purpose Registers (SPRNs)*/
-#define SPRN_TBHU 0x3CC /* Time Base High User-mode */
-#define SPRN_TBLU 0x3CD /* Time Base Low User-mode */
-#define SPRN_CDBCR 0x3D7 /* Cache Debug Control Register */
-#define SPRN_TBHI 0x3DC /* Time Base High */
-#define SPRN_TBLO 0x3DD /* Time Base Low */
-#define SPRN_DBCR 0x3F2 /* Debug Control Register */
-#define SPRN_PBL1 0x3FC /* Protection Bound Lower 1 */
-#define SPRN_PBL2 0x3FE /* Protection Bound Lower 2 */
-#define SPRN_PBU1 0x3FD /* Protection Bound Upper 1 */
-#define SPRN_PBU2 0x3FF /* Protection Bound Upper 2 */
-
-
-/* Bit definitions for the DBCR. */
-#define DBCR_EDM DBCR0_EDM
-#define DBCR_IDM DBCR0_IDM
-#define DBCR_RST(x) (((x) & 0x3) << 28)
-#define DBCR_RST_NONE 0
-#define DBCR_RST_CORE 1
-#define DBCR_RST_CHIP 2
-#define DBCR_RST_SYSTEM 3
-#define DBCR_IC DBCR0_IC /* Instruction Completion Debug Evnt */
-#define DBCR_BT DBCR0_BT /* Branch Taken Debug Event */
-#define DBCR_EDE DBCR0_EDE /* Exception Debug Event */
-#define DBCR_TDE DBCR0_TDE /* TRAP Debug Event */
-#define DBCR_FER 0x00F80000 /* First Events Remaining Mask */
-#define DBCR_FT 0x00040000 /* Freeze Timers on Debug Event */
-#define DBCR_IA1 0x00020000 /* Instr. Addr. Compare 1 Enable */
-#define DBCR_IA2 0x00010000 /* Instr. Addr. Compare 2 Enable */
-#define DBCR_D1R 0x00008000 /* Data Addr. Compare 1 Read Enable */
-#define DBCR_D1W 0x00004000 /* Data Addr. Compare 1 Write Enable */
-#define DBCR_D1S(x) (((x) & 0x3) << 12) /* Data Adrr. Compare 1 Size */
-#define DAC_BYTE 0
-#define DAC_HALF 1
-#define DAC_WORD 2
-#define DAC_QUAD 3
-#define DBCR_D2R 0x00000800 /* Data Addr. Compare 2 Read Enable */
-#define DBCR_D2W 0x00000400 /* Data Addr. Compare 2 Write Enable */
-#define DBCR_D2S(x) (((x) & 0x3) << 8) /* Data Addr. Compare 2 Size */
-#define DBCR_SBT 0x00000040 /* Second Branch Taken Debug Event */
-#define DBCR_SED 0x00000020 /* Second Exception Debug Event */
-#define DBCR_STD 0x00000010 /* Second Trap Debug Event */
-#define DBCR_SIA 0x00000008 /* Second IAC Enable */
-#define DBCR_SDA 0x00000004 /* Second DAC Enable */
-#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */
-#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */
-#endif /* 403GCX */
-
/* Some 476 specific registers */
#define SPRN_SSPCR 830
#define SPRN_USPCR 831
diff --git a/arch/powerpc/include/asm/rtas-types.h b/arch/powerpc/include/asm/rtas-types.h
new file mode 100644
index 000000000000..aa420561bc10
--- /dev/null
+++ b/arch/powerpc/include/asm/rtas-types.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_RTAS_TYPES_H
+#define _ASM_POWERPC_RTAS_TYPES_H
+
+#include <linux/spinlock_types.h>
+
+typedef __be32 rtas_arg_t;
+
+struct rtas_args {
+ __be32 token;
+ __be32 nargs;
+ __be32 nret;
+ rtas_arg_t args[16];
+ rtas_arg_t *rets; /* Pointer to return values in args[]. */
+};
+
+struct rtas_t {
+ unsigned long entry; /* physical address pointer */
+ unsigned long base; /* physical address pointer */
+ unsigned long size;
+ arch_spinlock_t lock;
+ struct rtas_args args;
+ struct device_node *dev; /* virtual address pointer */
+};
+
+struct rtas_suspend_me_data {
+ atomic_t working; /* number of cpus accessing this struct */
+ atomic_t done;
+ int token; /* ibm,suspend-me */
+ atomic_t error;
+ struct completion *complete; /* wait on this until working == 0 */
+};
+
+struct rtas_error_log {
+ /* Byte 0 */
+ u8 byte0; /* Architectural version */
+
+ /* Byte 1 */
+ u8 byte1;
+ /* XXXXXXXX
+ * XXX 3: Severity level of error
+ * XX 2: Degree of recovery
+ * X 1: Extended log present?
+ * XX 2: Reserved
+ */
+
+ /* Byte 2 */
+ u8 byte2;
+ /* XXXXXXXX
+ * XXXX 4: Initiator of event
+ * XXXX 4: Target of failed operation
+ */
+ u8 byte3; /* General event or error*/
+ __be32 extended_log_length; /* length in bytes */
+ unsigned char buffer[1]; /* Start of extended log */
+ /* Variable length. */
+};
+
+/* RTAS general extended event log, Version 6. The extended log starts
+ * from "buffer" field of struct rtas_error_log defined above.
+ */
+struct rtas_ext_event_log_v6 {
+ /* Byte 0 */
+ u8 byte0;
+ /* XXXXXXXX
+ * X 1: Log valid
+ * X 1: Unrecoverable error
+ * X 1: Recoverable (correctable or successfully retried)
+ * X 1: Bypassed unrecoverable error (degraded operation)
+ * X 1: Predictive error
+ * X 1: "New" log (always 1 for data returned from RTAS)
+ * X 1: Big Endian
+ * X 1: Reserved
+ */
+
+ /* Byte 1 */
+ u8 byte1; /* reserved */
+
+ /* Byte 2 */
+ u8 byte2;
+ /* XXXXXXXX
+ * X 1: Set to 1 (indicating log is in PowerPC format)
+ * XXX 3: Reserved
+ * XXXX 4: Log format used for bytes 12-2047
+ */
+
+ /* Byte 3 */
+ u8 byte3; /* reserved */
+ /* Byte 4-11 */
+ u8 reserved[8]; /* reserved */
+ /* Byte 12-15 */
+ __be32 company_id; /* Company ID of the company */
+ /* that defines the format for */
+ /* the vendor specific log type */
+ /* Byte 16-end of log */
+ u8 vendor_log[1]; /* Start of vendor specific log */
+ /* Variable length. */
+};
+
+/* Vendor specific Platform Event Log Format, Version 6, section header */
+struct pseries_errorlog {
+ __be16 id; /* 0x00 2-byte ASCII section ID */
+ __be16 length; /* 0x02 Section length in bytes */
+ u8 version; /* 0x04 Section version */
+ u8 subtype; /* 0x05 Section subtype */
+ __be16 creator_component; /* 0x06 Creator component ID */
+ u8 data[]; /* 0x08 Start of section data */
+};
+
+/* RTAS pseries hotplug errorlog section */
+struct pseries_hp_errorlog {
+ u8 resource;
+ u8 action;
+ u8 id_type;
+ u8 reserved;
+ union {
+ __be32 drc_index;
+ __be32 drc_count;
+ struct { __be32 count, index; } ic;
+ char drc_name[1];
+ } _drc_u;
+};
+
+#endif /* _ASM_POWERPC_RTAS_TYPES_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 3c1887351c71..014968f25f7e 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <asm/page.h>
+#include <asm/rtas-types.h>
#include <linux/time.h>
#include <linux/cpumask.h>
@@ -42,33 +43,6 @@
*
*/
-typedef __be32 rtas_arg_t;
-
-struct rtas_args {
- __be32 token;
- __be32 nargs;
- __be32 nret;
- rtas_arg_t args[16];
- rtas_arg_t *rets; /* Pointer to return values in args[]. */
-};
-
-struct rtas_t {
- unsigned long entry; /* physical address pointer */
- unsigned long base; /* physical address pointer */
- unsigned long size;
- arch_spinlock_t lock;
- struct rtas_args args;
- struct device_node *dev; /* virtual address pointer */
-};
-
-struct rtas_suspend_me_data {
- atomic_t working; /* number of cpus accessing this struct */
- atomic_t done;
- int token; /* ibm,suspend-me */
- atomic_t error;
- struct completion *complete; /* wait on this until working == 0 */
-};
-
/* RTAS event classes */
#define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */
#define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */
@@ -148,31 +122,6 @@ struct rtas_suspend_me_data {
/* RTAS check-exception vector offset */
#define RTAS_VECTOR_EXTERNAL_INTERRUPT 0x500
-struct rtas_error_log {
- /* Byte 0 */
- uint8_t byte0; /* Architectural version */
-
- /* Byte 1 */
- uint8_t byte1;
- /* XXXXXXXX
- * XXX 3: Severity level of error
- * XX 2: Degree of recovery
- * X 1: Extended log present?
- * XX 2: Reserved
- */
-
- /* Byte 2 */
- uint8_t byte2;
- /* XXXXXXXX
- * XXXX 4: Initiator of event
- * XXXX 4: Target of failed operation
- */
- uint8_t byte3; /* General event or error*/
- __be32 extended_log_length; /* length in bytes */
- unsigned char buffer[1]; /* Start of extended log */
- /* Variable length. */
-};
-
static inline uint8_t rtas_error_severity(const struct rtas_error_log *elog)
{
return (elog->byte1 & 0xE0) >> 5;
@@ -212,47 +161,6 @@ uint32_t rtas_error_extended_log_length(const struct rtas_error_log *elog)
#define RTAS_V6EXT_COMPANY_ID_IBM (('I' << 24) | ('B' << 16) | ('M' << 8))
-/* RTAS general extended event log, Version 6. The extended log starts
- * from "buffer" field of struct rtas_error_log defined above.
- */
-struct rtas_ext_event_log_v6 {
- /* Byte 0 */
- uint8_t byte0;
- /* XXXXXXXX
- * X 1: Log valid
- * X 1: Unrecoverable error
- * X 1: Recoverable (correctable or successfully retried)
- * X 1: Bypassed unrecoverable error (degraded operation)
- * X 1: Predictive error
- * X 1: "New" log (always 1 for data returned from RTAS)
- * X 1: Big Endian
- * X 1: Reserved
- */
-
- /* Byte 1 */
- uint8_t byte1; /* reserved */
-
- /* Byte 2 */
- uint8_t byte2;
- /* XXXXXXXX
- * X 1: Set to 1 (indicating log is in PowerPC format)
- * XXX 3: Reserved
- * XXXX 4: Log format used for bytes 12-2047
- */
-
- /* Byte 3 */
- uint8_t byte3; /* reserved */
- /* Byte 4-11 */
- uint8_t reserved[8]; /* reserved */
- /* Byte 12-15 */
- __be32 company_id; /* Company ID of the company */
- /* that defines the format for */
- /* the vendor specific log type */
- /* Byte 16-end of log */
- uint8_t vendor_log[1]; /* Start of vendor specific log */
- /* Variable length. */
-};
-
static
inline uint8_t rtas_ext_event_log_format(struct rtas_ext_event_log_v6 *ext_log)
{
@@ -287,16 +195,6 @@ inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log)
#define PSERIES_ELOG_SECT_ID_HOTPLUG (('H' << 8) | 'P')
#define PSERIES_ELOG_SECT_ID_MCE (('M' << 8) | 'C')
-/* Vendor specific Platform Event Log Format, Version 6, section header */
-struct pseries_errorlog {
- __be16 id; /* 0x00 2-byte ASCII section ID */
- __be16 length; /* 0x02 Section length in bytes */
- uint8_t version; /* 0x04 Section version */
- uint8_t subtype; /* 0x05 Section subtype */
- __be16 creator_component; /* 0x06 Creator component ID */
- uint8_t data[]; /* 0x08 Start of section data */
-};
-
static
inline uint16_t pseries_errorlog_id(struct pseries_errorlog *sect)
{
@@ -309,20 +207,6 @@ inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect)
return be16_to_cpu(sect->length);
}
-/* RTAS pseries hotplug errorlog section */
-struct pseries_hp_errorlog {
- u8 resource;
- u8 action;
- u8 id_type;
- u8 reserved;
- union {
- __be32 drc_index;
- __be32 drc_count;
- struct { __be32 count, index; } ic;
- char drc_name[1];
- } _drc_u;
-};
-
#define PSERIES_HP_ELOG_RESOURCE_CPU 1
#define PSERIES_HP_ELOG_RESOURCE_MEM 2
#define PSERIES_HP_ELOG_RESOURCE_SLOT 3
@@ -352,6 +236,7 @@ extern struct rtas_t rtas;
extern int rtas_token(const char *service);
extern int rtas_service_present(const char *service);
extern int rtas_call(int token, int, int, int *, ...);
+int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...);
void rtas_call_unlocked(struct rtas_args *args, int token, int nargs,
int nret, ...);
extern void __noreturn rtas_restart(char *cmd);
@@ -483,5 +368,11 @@ static inline void rtas_initialize(void) { };
extern int call_rtas(const char *, int, int, unsigned long *, ...);
+#ifdef CONFIG_HV_PERF_CTRS
+void read_24x7_sys_info(void);
+#else
+static inline void read_24x7_sys_info(void) { }
+#endif
+
#endif /* __KERNEL__ */
#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
index c6f466f4c241..0bdd9c62eca0 100644
--- a/arch/powerpc/include/asm/slice.h
+++ b/arch/powerpc/include/asm/slice.h
@@ -4,8 +4,6 @@
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/slice.h>
-#elif defined(CONFIG_PPC_MMU_NOHASH_32)
-#include <asm/nohash/32/slice.h>
#endif
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 860228e917dc..2d620896cdae 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -23,7 +23,6 @@
#endif
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
-#include <asm/asm-405.h>
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
@@ -210,7 +209,6 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
__DO_SIGN_EXTEND
" addic. %0,%0,1\n\
ble- 2f\n"
- PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
bne- 1b\n"
PPC_ACQUIRE_BARRIER
@@ -234,7 +232,6 @@ static inline long __arch_write_trylock(arch_rwlock_t *rw)
"1: " PPC_LWARX(%0,0,%2,1) "\n\
cmpwi 0,%0,0\n\
bne- 2f\n"
- PPC405_ERR77(0,%1)
" stwcx. %1,0,%2\n\
bne- 1b\n"
PPC_ACQUIRE_BARRIER
@@ -292,7 +289,6 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
PPC_RELEASE_BARRIER
"1: lwarx %0,0,%1\n\
addic %0,%0,-1\n"
- PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
bne- 1b"
: "=&r"(tmp)
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
index 769f055509c9..3b01c69a44aa 100644
--- a/arch/powerpc/include/asm/sstep.h
+++ b/arch/powerpc/include/asm/sstep.h
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
*/
+#include <asm/inst.h>
struct pt_regs;
@@ -15,9 +16,9 @@ struct pt_regs;
* Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
* and an mtmsrd (64-bit).
*/
-#define IS_MTMSRD(instr) (((instr) & 0xfc0007be) == 0x7c000124)
-#define IS_RFID(instr) (((instr) & 0xfc0007fe) == 0x4c000024)
-#define IS_RFI(instr) (((instr) & 0xfc0007fe) == 0x4c000064)
+#define IS_MTMSRD(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
+#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000024)
+#define IS_RFI(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000064)
enum instruction_type {
COMPUTE, /* arith/logical/CR op, etc. */
@@ -48,6 +49,8 @@ enum instruction_type {
#define INSTR_TYPE_MASK 0x1f
+#define OP_IS_LOAD(type) ((LOAD <= (type) && (type) <= LOAD_VSX) || (type) == LARX)
+#define OP_IS_STORE(type) ((STORE <= (type) && (type) <= STORE_VSX) || (type) == STCX)
#define OP_IS_LOAD_STORE(type) (LOAD <= (type) && (type) <= STCX)
/* Compute flags, ORed in with type */
@@ -89,11 +92,15 @@ enum instruction_type {
#define VSX_LDLEFT 4 /* load VSX register from left */
#define VSX_CHECK_VEC 8 /* check MSR_VEC not MSR_VSX for reg >= 32 */
+/* Prefixed flag, ORed in with type */
+#define PREFIXED 0x800
+
/* Size field in type word */
#define SIZE(n) ((n) << 12)
#define GETSIZE(w) ((w) >> 12)
#define GETTYPE(t) ((t) & INSTR_TYPE_MASK)
+#define GETLENGTH(t) (((t) & PREFIXED) ? 8 : 4)
#define MKOP(t, f, s) ((t) | (f) | SIZE(s))
@@ -132,7 +139,7 @@ union vsx_reg {
* otherwise.
*/
extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
- unsigned int instr);
+ struct ppc_inst instr);
/*
* Emulate an instruction that can be executed just by updating
@@ -149,7 +156,7 @@ void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
* 0 if it could not be emulated, or -1 for an instruction that
* should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
*/
-extern int emulate_step(struct pt_regs *regs, unsigned int instr);
+extern int emulate_step(struct pt_regs *regs, struct ppc_inst instr);
/*
* Emulate a load or store instruction by reading/writing the
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index b867b58b1093..fdab93428372 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -102,8 +102,6 @@ static inline void clear_task_ebb(struct task_struct *t)
#endif
}
-extern int set_thread_uses_vas(void);
-
extern int set_thread_tidr(struct task_struct *t);
#endif /* _ASM_POWERPC_SWITCH_TO_H */
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 38d62acfdce7..fd1b518eed17 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -26,7 +26,10 @@ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
* This is important for seccomp so that compat tasks can set r0 = -1
* to reject the syscall.
*/
- return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1;
+ if (trap_is_syscall(regs))
+ return regs->gpr[0];
+ else
+ return -1;
}
static inline void syscall_rollback(struct task_struct *task,
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 39ce95016a3a..b287cfc2dd85 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -51,24 +51,12 @@ struct div_result {
static inline unsigned long get_tbl(void)
{
-#if defined(CONFIG_403GCX)
- unsigned long tbl;
- asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
- return tbl;
-#else
return mftbl();
-#endif
}
static inline unsigned int get_tbu(void)
{
-#ifdef CONFIG_403GCX
- unsigned int tbu;
- asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
- return tbu;
-#else
return mftbu();
-#endif
}
#endif /* !CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 7f3a8b902325..862985cf5180 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -10,7 +10,7 @@
#ifdef __KERNEL__
#ifndef __powerpc64__
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#endif
#include <asm/pgalloc.h>
#ifndef __powerpc64__
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 0969285996cb..64c04ab09112 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -93,18 +93,63 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
#define __get_user(x, ptr) \
__get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
#define __put_user(x, ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#define __put_user_goto(x, ptr, label) \
+ __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
#define __get_user_allowed(x, ptr) \
__get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
-#define __put_user_allowed(x, ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
#define __get_user_inatomic(x, ptr) \
__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
#define __put_user_inatomic(x, ptr) \
__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#ifdef CONFIG_PPC64
+
+#define ___get_user_instr(gu_op, dest, ptr) \
+({ \
+ long __gui_ret = 0; \
+ unsigned long __gui_ptr = (unsigned long)ptr; \
+ struct ppc_inst __gui_inst; \
+ unsigned int __prefix, __suffix; \
+ __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
+ if (__gui_ret == 0) { \
+ if ((__prefix >> 26) == OP_PREFIX) { \
+ __gui_ret = gu_op(__suffix, \
+ (unsigned int __user *)__gui_ptr + 1); \
+ __gui_inst = ppc_inst_prefix(__prefix, \
+ __suffix); \
+ } else { \
+ __gui_inst = ppc_inst(__prefix); \
+ } \
+ if (__gui_ret == 0) \
+ (dest) = __gui_inst; \
+ } \
+ __gui_ret; \
+})
+
+#define get_user_instr(x, ptr) \
+ ___get_user_instr(get_user, x, ptr)
+
+#define __get_user_instr(x, ptr) \
+ ___get_user_instr(__get_user, x, ptr)
+
+#define __get_user_instr_inatomic(x, ptr) \
+ ___get_user_instr(__get_user_inatomic, x, ptr)
+
+#else /* !CONFIG_PPC64 */
+#define get_user_instr(x, ptr) \
+ get_user((x).val, (u32 __user *)(ptr))
+
+#define __get_user_instr(x, ptr) \
+ __get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
+
+#define __get_user_instr_inatomic(x, ptr) \
+ __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
+
+#endif /* CONFIG_PPC64 */
+
extern long __put_user_bad(void);
/*
@@ -162,7 +207,7 @@ do { \
prevent_write_to_user(ptr, size); \
} while (0)
-#define __put_user_nocheck(x, ptr, size, do_allow) \
+#define __put_user_nocheck(x, ptr, size) \
({ \
long __pu_err; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
@@ -172,10 +217,7 @@ do { \
if (!is_kernel_addr((unsigned long)__pu_addr)) \
might_fault(); \
__chk_user_ptr(__pu_addr); \
- if (do_allow) \
- __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
- else \
- __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \
+ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
\
__pu_err; \
})
@@ -208,6 +250,52 @@ do { \
})
+#define __put_user_asm_goto(x, addr, label, op) \
+ asm volatile goto( \
+ "1: " op "%U1%X1 %0,%1 # put_user\n" \
+ EX_TABLE(1b, %l2) \
+ : \
+ : "r" (x), "m" (*addr) \
+ : \
+ : label)
+
+#ifdef __powerpc64__
+#define __put_user_asm2_goto(x, ptr, label) \
+ __put_user_asm_goto(x, ptr, label, "std")
+#else /* __powerpc64__ */
+#define __put_user_asm2_goto(x, addr, label) \
+ asm volatile goto( \
+ "1: stw%X1 %0, %1\n" \
+ "2: stw%X1 %L0, %L1\n" \
+ EX_TABLE(1b, %l2) \
+ EX_TABLE(2b, %l2) \
+ : \
+ : "r" (x), "m" (*addr) \
+ : \
+ : label)
+#endif /* __powerpc64__ */
+
+#define __put_user_size_goto(x, ptr, size, label) \
+do { \
+ switch (size) { \
+ case 1: __put_user_asm_goto(x, ptr, label, "stb"); break; \
+ case 2: __put_user_asm_goto(x, ptr, label, "sth"); break; \
+ case 4: __put_user_asm_goto(x, ptr, label, "stw"); break; \
+ case 8: __put_user_asm2_goto(x, ptr, label); break; \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+#define __put_user_nocheck_goto(x, ptr, size, label) \
+do { \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ if (!is_kernel_addr((unsigned long)__pu_addr)) \
+ might_fault(); \
+ __chk_user_ptr(ptr); \
+ __put_user_size_goto((x), __pu_addr, (size), label); \
+} while (0)
+
+
extern long __get_user_bad(void);
/*
@@ -489,10 +577,51 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
#define user_access_save prevent_user_access_return
#define user_access_restore restore_user_access
+static __must_check inline bool
+user_read_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr, len)))
+ return false;
+ allow_read_from_user(ptr, len);
+ return true;
+}
+#define user_read_access_begin user_read_access_begin
+#define user_read_access_end prevent_current_read_from_user
+
+static __must_check inline bool
+user_write_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr, len)))
+ return false;
+ allow_write_to_user((void __user *)ptr, len);
+ return true;
+}
+#define user_write_access_begin user_write_access_begin
+#define user_write_access_end prevent_current_write_to_user
+
#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
-#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
+#define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
+
#define unsafe_copy_to_user(d, s, l, e) \
- unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
+do { \
+ u8 __user *_dst = (u8 __user *)(d); \
+ const u8 *_src = (const u8 *)(s); \
+ size_t _len = (l); \
+ int _i; \
+ \
+ for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \
+ __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\
+ if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \
+ __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
+ _i += 4; \
+ } \
+ if (_len & 2) { \
+ __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
+ _i += 2; \
+ } \
+ if (_len & 1) \
+ __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\
+} while (0)
#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index 2bbdf27d09b5..5bf65f5d44a9 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -11,10 +11,11 @@
#include <linux/notifier.h>
#include <asm/probes.h>
+#include <asm/inst.h>
typedef ppc_opcode_t uprobe_opcode_t;
-#define MAX_UINSN_BYTES 4
+#define MAX_UINSN_BYTES 8
#define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES)
/* The following alias is needed for reference from arch-agnostic code */
@@ -23,8 +24,8 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe {
union {
- u32 insn;
- u32 ixol;
+ struct ppc_inst insn;
+ struct ppc_inst ixol;
};
};
diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h
index f93e6b0f5c84..e33f80b0ea81 100644
--- a/arch/powerpc/include/asm/vas.h
+++ b/arch/powerpc/include/asm/vas.h
@@ -86,7 +86,6 @@ struct vas_tx_win_attr {
int wcreds_max;
int lpid;
int pidr; /* hardware PID (from SPRN_PID) */
- int pid; /* linux process id */
int pswid;
int rsvd_txbuf_count;
int tc_mode;
@@ -163,4 +162,16 @@ int vas_copy_crb(void *crb, int offset);
*/
int vas_paste_crb(struct vas_window *win, int offset, bool re);
+/*
+ * Register / unregister coprocessor type to VAS API which will be exported
+ * to user space. Applications can use this API to open / close window
+ * which can be used to send / receive requests directly to cooprcessor.
+ *
+ * Only NX GZIP coprocessor type is supported now, but this API can be
+ * used for others in future.
+ */
+int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ const char *name);
+void vas_unregister_coproc_api(void);
+
#endif /* __ASM_POWERPC_VAS_H */
diff --git a/arch/powerpc/include/asm/xilinx_intc.h b/arch/powerpc/include/asm/xilinx_intc.h
deleted file mode 100644
index ca9aa162fb09..000000000000
--- a/arch/powerpc/include/asm/xilinx_intc.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Xilinx intc external definitions
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- */
-#ifndef _ASM_POWERPC_XILINX_INTC_H
-#define _ASM_POWERPC_XILINX_INTC_H
-
-#ifdef __KERNEL__
-
-extern void __init xilinx_intc_init_tree(void);
-extern unsigned int xintc_get_irq(void);
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_XILINX_INTC_H */
diff --git a/arch/powerpc/include/asm/xilinx_pci.h b/arch/powerpc/include/asm/xilinx_pci.h
deleted file mode 100644
index 7a8275caf6af..000000000000
--- a/arch/powerpc/include/asm/xilinx_pci.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Xilinx pci external definitions
- *
- * Copyright 2009 Roderick Colenbrander
- * Copyright 2009 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef INCLUDE_XILINX_PCI
-#define INCLUDE_XILINX_PCI
-
-#ifdef CONFIG_XILINX_PCI
-extern void __init xilinx_pci_init(void);
-#else
-static inline void __init xilinx_pci_init(void) { return; }
-#endif
-
-#endif /* INCLUDE_XILINX_PCI */
diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h
index 33aee7490cbb..8b211faa0e42 100644
--- a/arch/powerpc/include/asm/xive-regs.h
+++ b/arch/powerpc/include/asm/xive-regs.h
@@ -37,6 +37,14 @@
#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
+/*
+ * Load-after-store ordering
+ *
+ * Adding this offset to the load address will enforce
+ * load-after-store ordering. This is required to use StoreEOI.
+ */
+#define XIVE_ESB_LD_ST_MO 0x40 /* Load-after-store ordering */
+
#define XIVE_ESB_VAL_P 0x2
#define XIVE_ESB_VAL_Q 0x1
#define XIVE_ESB_INVALID 0xFF
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 93f982dbb3d4..d08ea11b271c 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -5,6 +5,8 @@
#ifndef _ASM_POWERPC_XIVE_H
#define _ASM_POWERPC_XIVE_H
+#include <asm/opal-api.h>
+
#define XIVE_INVALID_VP 0xffffffff
#ifdef CONFIG_PPC_XIVE
@@ -108,7 +110,6 @@ void xive_native_free_vp_block(u32 vp_base);
int xive_native_populate_irq_data(u32 hw_irq,
struct xive_irq_data *data);
void xive_cleanup_irq_data(struct xive_irq_data *xd);
-u32 xive_native_alloc_irq(void);
void xive_native_free_irq(u32 irq);
int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
@@ -137,6 +138,12 @@ int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
u32 qindex);
int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
bool xive_native_has_queue_state_support(void);
+extern u32 xive_native_alloc_irq_on_chip(u32 chip_id);
+
+static inline u32 xive_native_alloc_irq(void)
+{
+ return xive_native_alloc_irq_on_chip(OPAL_XIVE_ANY_CHIP);
+}
#else
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index 540592034740..731b97dc2d15 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -50,6 +50,8 @@
#define PPC_FEATURE2_DARN 0x00200000 /* darn random number insn */
#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
#define PPC_FEATURE2_HTM_NO_SUSPEND 0x00080000 /* TM w/out suspended state */
+#define PPC_FEATURE2_ARCH_3_1 0x00040000 /* ISA 3.1 */
+#define PPC_FEATURE2_MMA 0x00020000 /* Matrix Multiply Assist */
/*
* IMPORTANT!
diff --git a/arch/powerpc/include/uapi/asm/vas-api.h b/arch/powerpc/include/uapi/asm/vas-api.h
new file mode 100644
index 000000000000..ebd4b2424785
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/vas-api.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright 2019 IBM Corp.
+ */
+
+#ifndef _UAPI_MISC_VAS_H
+#define _UAPI_MISC_VAS_H
+
+#include <linux/types.h>
+
+#include <asm/ioctl.h>
+
+#define VAS_MAGIC 'v'
+#define VAS_TX_WIN_OPEN _IOW(VAS_MAGIC, 0x20, struct vas_tx_win_open_attr)
+
+struct vas_tx_win_open_attr {
+ __u32 version;
+ __s16 vas_id; /* specific instance of vas or -1 for default */
+ __u16 reserved1;
+ __u64 flags; /* Future use */
+ __u64 reserved2[6];
+};
+
+#endif /* _UAPI_MISC_VAS_H */
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 92045ed64976..1f1ce8b86d5b 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -24,6 +24,7 @@
#include <asm/disassemble.h>
#include <asm/cpu_has_feature.h>
#include <asm/sstep.h>
+#include <asm/inst.h>
struct aligninfo {
unsigned char len;
@@ -104,7 +105,7 @@ static struct aligninfo spe_aligninfo[32] = {
* so we don't need the address swizzling.
*/
static int emulate_spe(struct pt_regs *regs, unsigned int reg,
- unsigned int instr)
+ struct ppc_inst ppc_instr)
{
int ret;
union {
@@ -115,8 +116,9 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
} data, temp;
unsigned char __user *p, *addr;
unsigned long *evr = &current->thread.evr[reg];
- unsigned int nb, flags;
+ unsigned int nb, flags, instr;
+ instr = ppc_inst_val(ppc_instr);
instr = (instr >> 1) & 0x1f;
/* DAR has the operand effective address */
@@ -293,7 +295,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
int fix_alignment(struct pt_regs *regs)
{
- unsigned int instr;
+ struct ppc_inst instr;
struct instruction_op op;
int r, type;
@@ -303,18 +305,18 @@ int fix_alignment(struct pt_regs *regs)
*/
CHECK_FULL_REGS(regs);
- if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip)))
+ if (unlikely(__get_user_instr(instr, (void __user *)regs->nip)))
return -EFAULT;
if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
/* We don't handle PPC little-endian any more... */
if (cpu_has_feature(CPU_FTR_PPC_LE))
return -EIO;
- instr = swab32(instr);
+ instr = ppc_inst_swab(instr);
}
#ifdef CONFIG_SPE
- if ((instr >> 26) == 0x4) {
- int reg = (instr >> 21) & 0x1f;
+ if (ppc_inst_primary_opcode(instr) == 0x4) {
+ int reg = (ppc_inst_val(instr) >> 21) & 0x1f;
PPC_WARN_ALIGNMENT(spe, regs);
return emulate_spe(regs, reg, instr);
}
@@ -331,7 +333,7 @@ int fix_alignment(struct pt_regs *regs)
* when pasting to a co-processor. Furthermore, paste_last is the
* synchronisation point for preceding copy/paste sequences.
*/
- if ((instr & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
+ if ((ppc_inst_val(instr) & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
return -EIO;
r = analyse_instr(&op, regs, instr);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index fcf24a365fc0..6657dc6b2336 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -30,7 +30,6 @@
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
@@ -70,6 +69,10 @@
#include <asm/fixmap.h>
#endif
+#ifdef CONFIG_XMON
+#include "../xmon/xmon_bpts.h"
+#endif
+
#define STACK_PT_REGS_OFFSET(sym, val) \
DEFINE(sym, STACK_FRAME_OVERHEAD + offsetof(struct pt_regs, val))
@@ -795,5 +798,9 @@ int main(void)
DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
#endif
+#ifdef CONFIG_XMON
+ DEFINE(BPT_SIZE, BPT_SIZE);
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index f57712a55815..02300edc6989 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -9,13 +9,13 @@
#include <linux/init.h>
#include <linux/export.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/btext.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index f6517f67265a..f8b5ff64b604 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -288,6 +288,7 @@ _GLOBAL(__init_fpu_registers)
mtmsr r10
isync
blr
+_ASM_NOKPROBE_SYMBOL(__init_fpu_registers)
/* Definitions for the table use to save CPU states */
@@ -483,4 +484,5 @@ _GLOBAL(__restore_cpu_setup)
1:
mtcr r7
blr
+_ASM_NOKPROBE_SYMBOL(__restore_cpu_setup)
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index a460298c7ddb..efdcfa714106 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -91,10 +91,15 @@ _GLOBAL(__restore_cpu_power8)
mtlr r11
blr
+_GLOBAL(__setup_cpu_power10)
+ mflr r11
+ bl __init_FSCR_power10
+ b 1f
+
_GLOBAL(__setup_cpu_power9)
mflr r11
bl __init_FSCR
- bl __init_PMU
+1: bl __init_PMU
bl __init_hvmode_206
mtlr r11
beqlr
@@ -116,10 +121,15 @@ _GLOBAL(__setup_cpu_power9)
mtlr r11
blr
+_GLOBAL(__restore_cpu_power10)
+ mflr r11
+ bl __init_FSCR_power10
+ b 1f
+
_GLOBAL(__restore_cpu_power9)
mflr r11
bl __init_FSCR
- bl __init_PMU
+1: bl __init_PMU
mfmsr r3
rldicl. r0,r3,4,63
mtlr r11
@@ -182,9 +192,15 @@ __init_LPCR_ISA300:
isync
blr
+__init_FSCR_power10:
+ mfspr r3, SPRN_FSCR
+ ori r3, r3, FSCR_PREFIX
+ mtspr SPRN_FSCR, r3
+ // fall through
+
__init_FSCR:
mfspr r3,SPRN_FSCR
- ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
+ ori r3,r3,FSCR_TAR|FSCR_EBB
mtspr SPRN_FSCR,r3
blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 13eba2eb46fe..b4066354f073 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -70,6 +70,8 @@ extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power8(void);
extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power9(void);
+extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec);
+extern void __restore_cpu_power10(void);
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
@@ -119,6 +121,10 @@ extern void __restore_cpu_e6500(void);
PPC_FEATURE2_ARCH_3_00 | \
PPC_FEATURE2_HAS_IEEE128 | \
PPC_FEATURE2_DARN )
+#define COMMON_USER_POWER10 COMMON_USER_POWER9
+#define COMMON_USER2_POWER10 (COMMON_USER2_POWER9 | \
+ PPC_FEATURE2_ARCH_3_1 | \
+ PPC_FEATURE2_MMA)
#ifdef CONFIG_PPC_BOOK3E_64
#define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
@@ -367,6 +373,22 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_power9,
.platform = "power9",
},
+ { /* 3.1-compliant processor, i.e. Power10 "architected" mode */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000006,
+ .cpu_name = "POWER10 (architected)",
+ .cpu_features = CPU_FTRS_POWER10,
+ .cpu_user_features = COMMON_USER_POWER10,
+ .cpu_user_features2 = COMMON_USER2_POWER10,
+ .mmu_features = MMU_FTRS_POWER10,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .oprofile_cpu_type = "ppc64/ibm-compat-v1",
+ .cpu_setup = __setup_cpu_power10,
+ .cpu_restore = __restore_cpu_power10,
+ .platform = "power10",
+ },
{ /* Power7 */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003f0000,
@@ -1232,69 +1254,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
},
#endif /* CONFIG_PPC_8xx */
#ifdef CONFIG_40x
- { /* 403GC */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x00200200,
- .cpu_name = "403GC",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 16,
- .dcache_bsize = 16,
- .machine_check = machine_check_4xx,
- .platform = "ppc403",
- },
- { /* 403GCX */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x00201400,
- .cpu_name = "403GCX",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 16,
- .dcache_bsize = 16,
- .machine_check = machine_check_4xx,
- .platform = "ppc403",
- },
- { /* 403G ?? */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00200000,
- .cpu_name = "403G ??",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 16,
- .dcache_bsize = 16,
- .machine_check = machine_check_4xx,
- .platform = "ppc403",
- },
- { /* 405GP */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x40110000,
- .cpu_name = "405GP",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* STB 03xxx */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x40130000,
- .cpu_name = "STB03xxx",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
{ /* STB 04xxx */
.pvr_mask = 0xffff0000,
.pvr_value = 0x41810000,
@@ -1385,32 +1344,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_4xx,
.platform = "ppc405",
},
- { /* Xilinx Virtex-II Pro */
- .pvr_mask = 0xfffff000,
- .pvr_value = 0x20010000,
- .cpu_name = "Virtex-II Pro",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* Xilinx Virtex-4 FX */
- .pvr_mask = 0xfffff000,
- .pvr_value = 0x20011000,
- .cpu_name = "Virtex-4 FX",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
{ /* 405EP */
.pvr_mask = 0xffff0000,
.pvr_value = 0x51210000,
@@ -1800,19 +1733,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_440A,
.platform = "ppc440",
},
- { /* 440 in Xilinx Virtex-5 FXT */
- .pvr_mask = 0xfffffff0,
- .pvr_value = 0x7ff21910,
- .cpu_name = "440 in Virtex-5 FXT",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440x5,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
{ /* 460EX */
.pvr_mask = 0xffff0006,
.pvr_value = 0x13020002,
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 05745ddbd229..735e89337398 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -18,6 +18,7 @@
#include <asm/firmware.h>
#include <linux/uaccess.h>
#include <asm/rtas.h>
+#include <asm/inst.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -34,7 +35,7 @@ void __init reserve_kdump_trampoline(void)
static void __init create_trampoline(unsigned long addr)
{
- unsigned int *p = (unsigned int *)addr;
+ struct ppc_inst *p = (struct ppc_inst *)addr;
/* The maximum range of a single instruction branch, is the current
* instruction's address + (32 MB - 4) bytes. For the trampoline we
@@ -44,8 +45,8 @@ static void __init create_trampoline(unsigned long addr)
* branch to "addr" we jump to ("addr" + 32 MB). Although it requires
* two instructions it doesn't require any registers.
*/
- patch_instruction(p, PPC_INST_NOP);
- patch_branch(++p, addr + PHYSICAL_START, 0);
+ patch_instruction(p, ppc_inst(PPC_INST_NOP));
+ patch_branch((void *)p + 4, addr + PHYSICAL_START, 0);
}
void __init setup_kdump_trampoline(void)
diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c
index cc14aa6c4a1b..500f52fa4711 100644
--- a/arch/powerpc/kernel/dawr.c
+++ b/arch/powerpc/kernel/dawr.c
@@ -16,7 +16,7 @@
bool dawr_force_enable;
EXPORT_SYMBOL_GPL(dawr_force_enable);
-int set_dawr(struct arch_hw_breakpoint *brk)
+int set_dawr(int nr, struct arch_hw_breakpoint *brk)
{
unsigned long dawr, dawrx, mrd;
@@ -39,15 +39,24 @@ int set_dawr(struct arch_hw_breakpoint *brk)
if (ppc_md.set_dawr)
return ppc_md.set_dawr(dawr, dawrx);
- mtspr(SPRN_DAWR, dawr);
- mtspr(SPRN_DAWRX, dawrx);
+ if (nr == 0) {
+ mtspr(SPRN_DAWR0, dawr);
+ mtspr(SPRN_DAWRX0, dawrx);
+ } else {
+ mtspr(SPRN_DAWR1, dawr);
+ mtspr(SPRN_DAWRX1, dawrx);
+ }
return 0;
}
-static void set_dawr_cb(void *info)
+static void disable_dawrs_cb(void *info)
{
- set_dawr(info);
+ struct arch_hw_breakpoint null_brk = {0};
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++)
+ set_dawr(i, &null_brk);
}
static ssize_t dawr_write_file_bool(struct file *file,
@@ -60,7 +69,7 @@ static ssize_t dawr_write_file_bool(struct file *file,
/* Send error to user if they hypervisor won't allow us to write DAWR */
if (!dawr_force_enable &&
firmware_has_feature(FW_FEATURE_LPAR) &&
- set_dawr(&null_brk) != H_SUCCESS)
+ set_dawr(0, &null_brk) != H_SUCCESS)
return -ENODEV;
rc = debugfs_write_file_bool(file, user_buf, count, ppos);
@@ -69,7 +78,7 @@ static ssize_t dawr_write_file_bool(struct file *file,
/* If we are clearing, make sure all CPUs have the DAWR cleared */
if (!dawr_force_enable)
- smp_call_function(set_dawr_cb, &null_brk, 0);
+ smp_call_function(disable_dawrs_cb, NULL, 0);
return rc;
}
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 36bc0d5c4f3a..3a409517c031 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -26,6 +26,7 @@
/* Device-tree visible constants follow */
#define ISA_V2_07B 2070
#define ISA_V3_0B 3000
+#define ISA_V3_1 3100
#define USABLE_PR (1U << 0)
#define USABLE_OS (1U << 1)
@@ -74,6 +75,7 @@ static struct {
u64 lpcr_clear;
u64 hfscr;
u64 fscr;
+ u64 pcr;
} system_registers;
static void (*init_pmu_registers)(void);
@@ -101,7 +103,7 @@ static void __restore_cpu_cpufeatures(void)
if (hv_mode) {
mtspr(SPRN_LPID, 0);
mtspr(SPRN_HFSCR, system_registers.hfscr);
- mtspr(SPRN_PCR, PCR_MASK);
+ mtspr(SPRN_PCR, system_registers.pcr);
}
mtspr(SPRN_FSCR, system_registers.fscr);
@@ -346,6 +348,14 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f)
{
u64 lpcr;
+ /*
+ * Linux relies on FSCR[DSCR] being clear, so that we can take the
+ * facility unavailable interrupt and track the task's usage of DSCR.
+ * See facility_unavailable_exception().
+ * Clear the bit here so that feat_enable() doesn't set it.
+ */
+ f->fscr_bit_nr = -1;
+
feat_enable(f);
lpcr = mfspr(SPRN_LPCR);
@@ -552,6 +562,18 @@ static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
return 1;
}
+static int __init feat_enable_mma(struct dt_cpu_feature *f)
+{
+ u64 pcr;
+
+ feat_enable(f);
+ pcr = mfspr(SPRN_PCR);
+ pcr &= ~PCR_MMA_DIS;
+ mtspr(SPRN_PCR, pcr);
+
+ return 1;
+}
+
struct dt_cpu_feature_match {
const char *name;
int (*enable)(struct dt_cpu_feature *f);
@@ -625,6 +647,8 @@ static struct dt_cpu_feature_match __initdata
{"vector-binary128", feat_enable, 0},
{"vector-binary16", feat_enable, 0},
{"wait-v3", feat_enable, 0},
+ {"prefix-instructions", feat_enable, 0},
+ {"matrix-multiply-assist", feat_enable_mma, 0},
};
static bool __initdata using_dt_cpu_ftrs;
@@ -654,6 +678,11 @@ static void __init cpufeatures_setup_start(u32 isa)
cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
}
+
+ if (isa >= 3100) {
+ cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_31;
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_1;
+ }
}
static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
@@ -770,6 +799,7 @@ static void __init cpufeatures_setup_finished(void)
system_registers.lpcr = mfspr(SPRN_LPCR);
system_registers.hfscr = mfspr(SPRN_HFSCR);
system_registers.fscr = mfspr(SPRN_FSCR);
+ system_registers.pcr = mfspr(SPRN_PCR);
pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 7cdcb413bb44..d407981dec76 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1106,6 +1106,37 @@ static int eeh_init(void)
core_initcall_sync(eeh_init);
+static int eeh_device_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ /*
+ * Note: It's not possible to perform EEH device addition (i.e.
+ * {pseries,pnv}_pcibios_bus_add_device()) here because it depends on
+ * the device's resources, which have not yet been set up.
+ */
+ case BUS_NOTIFY_DEL_DEVICE:
+ eeh_remove_device(to_pci_dev(dev));
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block eeh_device_nb = {
+ .notifier_call = eeh_device_notifier,
+};
+
+static __init int eeh_set_bus_notifier(void)
+{
+ bus_register_notifier(&pci_bus_type, &eeh_device_nb);
+ return 0;
+}
+arch_initcall(eeh_set_bus_notifier);
+
/**
* eeh_probe_device() - Perform EEH initialization for the indicated pci device
* @dev: pci device for which to set up EEH
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 8420abd4ea1c..217ebdf5b00b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -28,7 +28,6 @@
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/export.h>
-#include <asm/asm-405.h>
#include <asm/feature-fixups.h>
#include <asm/barrier.h>
#include <asm/kup.h>
@@ -51,6 +50,7 @@ mcheck_transfer_to_handler:
mfspr r0,SPRN_DSRR1
stw r0,_DSRR1(r11)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
.globl debug_transfer_to_handler
debug_transfer_to_handler:
@@ -59,6 +59,7 @@ debug_transfer_to_handler:
mfspr r0,SPRN_CSRR1
stw r0,_CSRR1(r11)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
.globl crit_transfer_to_handler
crit_transfer_to_handler:
@@ -94,6 +95,7 @@ crit_transfer_to_handler:
rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
stw r0,KSP_LIMIT(r8)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
#endif
#ifdef CONFIG_40x
@@ -115,6 +117,7 @@ crit_transfer_to_handler:
rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
stw r0,KSP_LIMIT(r8)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
#endif
/*
@@ -127,6 +130,7 @@ crit_transfer_to_handler:
.globl transfer_to_handler_full
transfer_to_handler_full:
SAVE_NVGPRS(r11)
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
/* fall through */
.globl transfer_to_handler
@@ -227,6 +231,23 @@ transfer_to_handler_cont:
SYNC
RFI /* jump to handler, enable MMU */
+#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
+4: rlwinm r12,r12,0,~_TLF_NAPPING
+ stw r12,TI_LOCAL_FLAGS(r2)
+ b power_save_ppc32_restore
+
+7: rlwinm r12,r12,0,~_TLF_SLEEPING
+ stw r12,TI_LOCAL_FLAGS(r2)
+ lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
+ rlwinm r9,r9,0,~MSR_EE
+ lwz r12,_LINK(r11) /* and return to address in LR */
+ kuap_restore r11, r2, r3, r4, r5
+ lwz r2, GPR2(r11)
+ b fast_exception_return
+#endif
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
+
#ifdef CONFIG_TRACE_IRQFLAGS
1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
* keep interrupts disabled at this point otherwise we might risk
@@ -272,21 +293,6 @@ reenable_mmu:
bctr /* jump to handler */
#endif /* CONFIG_TRACE_IRQFLAGS */
-#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
-4: rlwinm r12,r12,0,~_TLF_NAPPING
- stw r12,TI_LOCAL_FLAGS(r2)
- b power_save_ppc32_restore
-
-7: rlwinm r12,r12,0,~_TLF_SLEEPING
- stw r12,TI_LOCAL_FLAGS(r2)
- lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
- rlwinm r9,r9,0,~MSR_EE
- lwz r12,_LINK(r11) /* and return to address in LR */
- kuap_restore r11, r2, r3, r4, r5
- lwz r2, GPR2(r11)
- b fast_exception_return
-#endif
-
#ifndef CONFIG_VMAP_STACK
/*
* On kernel stack overflow, load up an initial stack pointer
@@ -313,6 +319,7 @@ stack_ovf:
mtspr SPRN_SRR1,r10
SYNC
RFI
+_ASM_NOKPROBE_SYMBOL(stack_ovf)
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -455,6 +462,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
lwz r7,_NIP(r1)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
+syscall_exit_finish:
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
mtspr SPRN_NRI, r0
#endif
@@ -462,6 +470,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
mtspr SPRN_SRR1,r8
SYNC
RFI
+_ASM_NOKPROBE_SYMBOL(syscall_exit_finish)
#ifdef CONFIG_44x
2: li r7,0
iccci r0,r0
@@ -541,9 +550,6 @@ syscall_exit_work:
addi r12,r2,TI_FLAGS
3: lwarx r8,0,r12
andc r8,r8,r11
-#ifdef CONFIG_IBM405_ERR77
- dcbt 0,r12
-#endif
stwcx. r8,0,r12
bne- 3b
@@ -596,6 +602,7 @@ ret_from_kernel_syscall:
mtspr SPRN_SRR1, r10
SYNC
RFI
+_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall)
/*
* The fork/clone functions need to copy the full register set into
@@ -799,6 +806,7 @@ fast_exception_return:
lwz r11,GPR11(r11)
SYNC
RFI
+_ASM_NOKPROBE_SYMBOL(fast_exception_return)
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/* check if the exception happened in a restartable section */
@@ -918,9 +926,6 @@ resume_kernel:
addi r5,r2,TI_FLAGS
0: lwarx r8,0,r5
andc r8,r8,r11
-#ifdef CONFIG_IBM405_ERR77
- dcbt 0,r5
-#endif
stwcx. r8,0,r5
bne- 0b
1:
@@ -997,7 +1002,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
mtspr SPRN_XER,r10
mtctr r11
- PPC405_ERR77(0,r1)
BEGIN_FTR_SECTION
lwarx r11,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
@@ -1038,6 +1042,8 @@ exc_exit_restart:
exc_exit_restart_end:
SYNC
RFI
+_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
+_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end)
#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
/*
@@ -1059,16 +1065,15 @@ exc_exit_restart_end:
exc_exit_restart:
lwz r11,_NIP(r1)
lwz r12,_MSR(r1)
-exc_exit_start:
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r12
REST_2GPRS(11, r1)
lwz r1,GPR1(r1)
.globl exc_exit_restart_end
exc_exit_restart_end:
- PPC405_ERR77_SYNC
rfi
b . /* prevent prefetch past rfi */
+_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
/*
* Returning from a critical interrupt in user mode doesn't need
@@ -1109,7 +1114,6 @@ exc_exit_restart_end:
lwz r11,_CTR(r1); \
mtspr SPRN_XER,r10; \
mtctr r11; \
- PPC405_ERR77(0,r1); \
stwcx. r0,0,r1; /* to clear the reservation */ \
lwz r11,_LINK(r1); \
mtlr r11; \
@@ -1129,7 +1133,6 @@ exc_exit_restart_end:
lwz r10,GPR10(r1); \
lwz r11,GPR11(r1); \
lwz r1,GPR1(r1); \
- PPC405_ERR77_SYNC; \
exc_lvl_rfi; \
b .; /* prevent prefetch past exc_lvl_rfi */
@@ -1182,6 +1185,7 @@ ret_from_crit_exc:
mtspr SPRN_SRR0,r9;
mtspr SPRN_SRR1,r10;
RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
+_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
#endif /* CONFIG_40x */
#ifdef CONFIG_BOOKE
@@ -1193,6 +1197,7 @@ ret_from_crit_exc:
RESTORE_xSRR(SRR0,SRR1);
RESTORE_MMU_REGS;
RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
+_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
.globl ret_from_debug_exc
ret_from_debug_exc:
@@ -1203,6 +1208,7 @@ ret_from_debug_exc:
RESTORE_xSRR(CSRR0,CSRR1);
RESTORE_MMU_REGS;
RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
+_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
.globl ret_from_mcheck_exc
ret_from_mcheck_exc:
@@ -1214,6 +1220,7 @@ ret_from_mcheck_exc:
RESTORE_xSRR(DSRR0,DSRR1);
RESTORE_MMU_REGS;
RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
+_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
#endif /* CONFIG_BOOKE */
/*
@@ -1337,6 +1344,7 @@ nonrecoverable:
bl unrecoverable_exception
/* shouldn't return */
b 4b
+_ASM_NOKPROBE_SYMBOL(nonrecoverable)
.section .bss
.align 2
@@ -1391,10 +1399,5 @@ _GLOBAL(enter_rtas)
mtspr SPRN_SRR0,r8
mtspr SPRN_SRR1,r9
RFI /* return to caller */
-
- .globl machine_check_in_rtas
-machine_check_in_rtas:
- twi 31,0,0
- /* XXX load up BATs and panic */
-
+_ASM_NOKPROBE_SYMBOL(enter_rtas)
#endif /* CONFIG_PPC_RTAS */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index b3c9f15089b6..9d49338e0c85 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -479,11 +479,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
fast_interrupt_return:
_ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
kuap_check_amr r3, r4
- ld r4,_MSR(r1)
- andi. r0,r4,MSR_PR
+ ld r5,_MSR(r1)
+ andi. r0,r5,MSR_PR
bne .Lfast_user_interrupt_return
- kuap_restore_amr r3
- andi. r0,r4,MSR_RI
+ kuap_restore_amr r3, r4
+ andi. r0,r5,MSR_RI
li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
bne+ .Lfast_kernel_interrupt_return
addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 9d32158ce36f..2ed14d4a47f5 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -11,6 +11,7 @@
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
#include <asm/machdep.h>
+#include <asm/inst.h>
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
extern void epapr_ev_idle(void);
@@ -36,10 +37,10 @@ static int __init early_init_dt_scan_epapr(unsigned long node,
return -1;
for (i = 0; i < (len / 4); i++) {
- u32 inst = be32_to_cpu(insts[i]);
- patch_instruction(epapr_hypercall_start + i, inst);
+ struct ppc_inst inst = ppc_inst(be32_to_cpu(insts[i]));
+ patch_instruction((struct ppc_inst *)(epapr_hypercall_start + i), inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
- patch_instruction(epapr_ev_idle_start + i, inst);
+ patch_instruction((struct ppc_inst *)(epapr_ev_idle_start + i), inst);
#endif
}
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ebeebab74b56..e70ebb5c318c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -939,13 +939,13 @@ EXC_COMMON_BEGIN(system_reset_common)
* the right thing. We do not want to reconcile because that goes
* through irq tracing which we don't want in NMI.
*
- * Save PACAIRQHAPPENED to _DAR (otherwise unused), and set HARD_DIS
+ * Save PACAIRQHAPPENED to RESULT (otherwise unused), and set HARD_DIS
* as we are running with MSR[EE]=0.
*/
li r10,IRQS_ALL_DISABLED
stb r10,PACAIRQSOFTMASK(r13)
lbz r10,PACAIRQHAPPENED(r13)
- std r10,_DAR(r1)
+ std r10,RESULT(r1)
ori r10,r10,PACA_IRQ_HARD_DIS
stb r10,PACAIRQHAPPENED(r13)
@@ -966,12 +966,12 @@ EXC_COMMON_BEGIN(system_reset_common)
/*
* Restore soft mask settings.
*/
- ld r10,_DAR(r1)
+ ld r10,RESULT(r1)
stb r10,PACAIRQHAPPENED(r13)
ld r10,SOFTE(r1)
stb r10,PACAIRQSOFTMASK(r13)
- kuap_restore_amr r10
+ kuap_restore_amr r9, r10
EXCEPTION_RESTORE_REGS
RFI_TO_USER_OR_KERNEL
@@ -1117,11 +1117,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
li r10,MSR_RI
mtmsrd r10,1
+ /*
+ * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see
+ * system_reset_common)
+ */
+ li r10,IRQS_ALL_DISABLED
+ stb r10,PACAIRQSOFTMASK(r13)
+ lbz r10,PACAIRQHAPPENED(r13)
+ std r10,RESULT(r1)
+ ori r10,r10,PACA_IRQ_HARD_DIS
+ stb r10,PACAIRQHAPPENED(r13)
+
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_early
std r3,RESULT(r1) /* Save result */
ld r12,_MSR(r1)
+ /*
+ * Restore soft mask settings.
+ */
+ ld r10,RESULT(r1)
+ stb r10,PACAIRQHAPPENED(r13)
+ ld r10,SOFTE(r1)
+ stb r10,PACAIRQSOFTMASK(r13)
+
#ifdef CONFIG_PPC_P7_NAP
/*
* Check if thread was in power saving mode. We come here when any
@@ -1225,17 +1244,19 @@ EXC_COMMON_BEGIN(machine_check_idle_common)
bl machine_check_queue_event
/*
- * We have not used any non-volatile GPRs here, and as a rule
- * most exception code including machine check does not.
- * Therefore PACA_NAPSTATELOST does not need to be set. Idle
- * wakeup will restore volatile registers.
+ * GPR-loss wakeups are relatively straightforward, because the
+ * idle sleep code has saved all non-volatile registers on its
+ * own stack, and r1 in PACAR1.
*
- * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
+ * For no-loss wakeups the r1 and lr registers used by the
+ * early machine check handler have to be restored first. r2 is
+ * the kernel TOC, so no need to restore it.
*
* Then decrement MCE nesting after finishing with the stack.
*/
ld r3,_MSR(r1)
ld r4,_LINK(r1)
+ ld r1,GPR1(r1)
lhz r11,PACA_IN_MCE(r13)
subi r11,r11,1
@@ -1244,7 +1265,7 @@ EXC_COMMON_BEGIN(machine_check_idle_common)
mtlr r4
rlwinm r10,r3,47-31,30,31
cmpwi cr1,r10,2
- bltlr cr1 /* no state loss, return to idle caller */
+ bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
b idle_return_gpr_loss
#endif
@@ -1266,6 +1287,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
andc r10,r10,r3
mtmsrd r10
+ lhz r12,PACA_IN_MCE(r13)
+ subi r12,r12,1
+ sth r12,PACA_IN_MCE(r13)
+
/* Invoke machine_check_exception to print MCE event and panic. */
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
@@ -2740,7 +2765,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
li r10,IRQS_ALL_DISABLED
stb r10,PACAIRQSOFTMASK(r13)
lbz r10,PACAIRQHAPPENED(r13)
- std r10,_DAR(r1)
+ std r10,RESULT(r1)
ori r10,r10,PACA_IRQ_HARD_DIS
stb r10,PACAIRQHAPPENED(r13)
@@ -2754,12 +2779,12 @@ EXC_COMMON_BEGIN(soft_nmi_common)
/*
* Restore soft mask settings.
*/
- ld r10,_DAR(r1)
+ ld r10,RESULT(r1)
stb r10,PACAIRQHAPPENED(r13)
ld r10,SOFTE(r1)
stb r10,PACAIRQSOFTMASK(r13)
- kuap_restore_amr r10
+ kuap_restore_amr r9, r10
EXCEPTION_RESTORE_REGS hsrr=0
RFI_TO_KERNEL
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 59e60a9a9f5c..78ab9a6ee6ac 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -40,8 +40,17 @@ struct kobject *fadump_kobj;
#ifndef CONFIG_PRESERVE_FA_DUMP
static DEFINE_MUTEX(fadump_mutex);
-struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 };
-struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 };
+struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
+
+#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
+#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
+ sizeof(struct fadump_memory_range))
+static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
+struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs,
+ RESERVED_RNGS_SZ, 0,
+ RESERVED_RNGS_CNT, true };
+
+static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
#ifdef CONFIG_CMA
static struct cma *fadump_cma;
@@ -110,6 +119,11 @@ static int __init fadump_cma_init(void) { return 1; }
int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
int depth, void *data)
{
+ if (depth == 0) {
+ early_init_dt_scan_reserved_ranges(node);
+ return 0;
+ }
+
if (depth != 1)
return 0;
@@ -431,10 +445,72 @@ static int __init fadump_get_boot_mem_regions(void)
return ret;
}
+/*
+ * Returns true, if the given range overlaps with reserved memory ranges
+ * starting at idx. Also, updates idx to index of overlapping memory range
+ * with the given memory range.
+ * False, otherwise.
+ */
+static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
+{
+ bool ret = false;
+ int i;
+
+ for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
+ u64 rbase = reserved_mrange_info.mem_ranges[i].base;
+ u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
+
+ if (end <= rbase)
+ break;
+
+ if ((end > rbase) && (base < rend)) {
+ *idx = i;
+ ret = true;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Locate a suitable memory area to reserve memory for FADump. While at it,
+ * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
+ */
+static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
+{
+ struct fadump_memory_range *mrngs;
+ phys_addr_t mstart, mend;
+ int idx = 0;
+ u64 i, ret = 0;
+
+ mrngs = reserved_mrange_info.mem_ranges;
+ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+ &mstart, &mend, NULL) {
+ pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
+ i, mstart, mend, base);
+
+ if (mstart > base)
+ base = PAGE_ALIGN(mstart);
+
+ while ((mend > base) && ((mend - base) >= size)) {
+ if (!overlaps_reserved_ranges(base, base+size, &idx)) {
+ ret = base;
+ goto out;
+ }
+
+ base = mrngs[idx].base + mrngs[idx].size;
+ base = PAGE_ALIGN(base);
+ }
+ }
+
+out:
+ return ret;
+}
+
int __init fadump_reserve_mem(void)
{
- u64 base, size, mem_boundary, bootmem_min, align = PAGE_SIZE;
- bool is_memblock_bottom_up = memblock_bottom_up();
+ u64 base, size, mem_boundary, bootmem_min;
int ret = 1;
if (!fw_dump.fadump_enabled)
@@ -455,9 +531,9 @@ int __init fadump_reserve_mem(void)
PAGE_ALIGN(fadump_calculate_reserve_size());
#ifdef CONFIG_CMA
if (!fw_dump.nocma) {
- align = FADUMP_CMA_ALIGNMENT;
fw_dump.boot_memory_size =
- ALIGN(fw_dump.boot_memory_size, align);
+ ALIGN(fw_dump.boot_memory_size,
+ FADUMP_CMA_ALIGNMENT);
}
#endif
@@ -525,13 +601,9 @@ int __init fadump_reserve_mem(void)
* Reserve memory at an offset closer to bottom of the RAM to
* minimize the impact of memory hot-remove operation.
*/
- memblock_set_bottom_up(true);
- base = memblock_find_in_range(base, mem_boundary, size, align);
+ base = fadump_locate_reserve_mem(base, size);
- /* Restore the previous allocation mode */
- memblock_set_bottom_up(is_memblock_bottom_up);
-
- if (!base) {
+ if (!base || (base + size > mem_boundary)) {
pr_err("Failed to find memory chunk for reservation!\n");
goto error_out;
}
@@ -728,10 +800,14 @@ void fadump_free_cpu_notes_buf(void)
static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
{
+ if (mrange_info->is_static) {
+ mrange_info->mem_range_cnt = 0;
+ return;
+ }
+
kfree(mrange_info->mem_ranges);
- mrange_info->mem_ranges = NULL;
- mrange_info->mem_ranges_sz = 0;
- mrange_info->max_mem_ranges = 0;
+ memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
+ (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
}
/*
@@ -788,6 +864,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
int ret;
+ if (mrange_info->is_static) {
+ pr_err("Reached array size limit for %s memory ranges\n",
+ mrange_info->name);
+ return -ENOSPC;
+ }
+
ret = fadump_alloc_mem_ranges(mrange_info);
if (ret)
return ret;
@@ -1204,20 +1286,19 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
* Scan reserved-ranges to consider them while reserving/releasing
* memory for FADump.
*/
-static inline int fadump_scan_reserved_mem_ranges(void)
+static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
{
- struct device_node *root;
const __be32 *prop;
int len, ret = -1;
unsigned long i;
- root = of_find_node_by_path("/");
- if (!root)
- return ret;
+ /* reserved-ranges already scanned */
+ if (reserved_mrange_info.mem_range_cnt != 0)
+ return;
- prop = of_get_property(root, "reserved-ranges", &len);
+ prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
if (!prop)
- return ret;
+ return;
/*
* Each reserved range is an (address,size) pair, 2 cells each,
@@ -1239,7 +1320,8 @@ static inline int fadump_scan_reserved_mem_ranges(void)
}
}
- return ret;
+ /* Compact reserved ranges */
+ sort_and_merge_mem_ranges(&reserved_mrange_info);
}
/*
@@ -1253,32 +1335,21 @@ static void fadump_release_memory(u64 begin, u64 end)
u64 ra_start, ra_end, tstart;
int i, ret;
- fadump_scan_reserved_mem_ranges();
-
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
/*
- * Add reserved dump area to reserved ranges list
- * and exclude all these ranges while releasing memory.
+ * If reserved ranges array limit is hit, overwrite the last reserved
+ * memory range with reserved dump area to ensure it is excluded from
+ * the memory being released (reused for next FADump registration).
*/
- ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
- if (ret != 0) {
- /*
- * Not enough memory to setup reserved ranges but the system is
- * running shortage of memory. So, release all the memory except
- * Reserved dump area (reused for next fadump registration).
- */
- if (begin < ra_end && end > ra_start) {
- if (begin < ra_start)
- fadump_release_reserved_area(begin, ra_start);
- if (end > ra_end)
- fadump_release_reserved_area(ra_end, end);
- } else
- fadump_release_reserved_area(begin, end);
+ if (reserved_mrange_info.mem_range_cnt ==
+ reserved_mrange_info.max_mem_ranges)
+ reserved_mrange_info.mem_range_cnt--;
+ ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
+ if (ret != 0)
return;
- }
/* Get the reserved ranges list in order first. */
sort_and_merge_mem_ranges(&reserved_mrange_info);
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 3235a8da6af7..cac22cb97a8c 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -12,7 +12,6 @@
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
@@ -119,6 +118,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
/* restore registers and return */
/* we haven't used ctr or xer or lr */
blr
+_ASM_NOKPROBE_SYMBOL(load_up_fpu)
/*
* save_fpu(tsk)
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 97c887950c3c..705c042309d8 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -17,10 +17,10 @@
*/
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
@@ -297,7 +297,7 @@ MachineCheck:
cmpwi cr1, r4, 0
#endif
beq cr1, machine_check_tramp
- b machine_check_in_rtas
+ twi 31, 0, 0
#else
b machine_check_tramp
#endif
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 2cec543c38f0..926bfa73586a 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -26,17 +26,16 @@
*/
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/export.h>
-#include <asm/asm-405.h>
#include "head_32.h"
@@ -176,135 +175,16 @@ _ENTRY(saved_ksp_limit)
* 0x0300 - Data Storage Exception
* This happens for just a few reasons. U0 set (but we don't do that),
* or zone protection fault (user violation, write to protected page).
- * If this is just an update of modified status, we do that quickly
- * and exit. Otherwise, we call heavywight functions to do the work.
+ * The other Data TLB exceptions bail out to this point
+ * if they can't resolve the lightweight TLB fault.
*/
START_EXCEPTION(0x0300, DataStorage)
- mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
- mtspr SPRN_SPRG_SCRATCH1, r11
-#ifdef CONFIG_403GCX
- stw r12, 0(r0)
- stw r9, 4(r0)
- mfcr r11
- mfspr r12, SPRN_PID
- stw r11, 8(r0)
- stw r12, 12(r0)
-#else
- mtspr SPRN_SPRG_SCRATCH3, r12
- mtspr SPRN_SPRG_SCRATCH4, r9
- mfcr r11
- mfspr r12, SPRN_PID
- mtspr SPRN_SPRG_SCRATCH6, r11
- mtspr SPRN_SPRG_SCRATCH5, r12
-#endif
-
- /* First, check if it was a zone fault (which means a user
- * tried to access a kernel or read-protected page - always
- * a SEGV). All other faults here must be stores, so no
- * need to check ESR_DST as well. */
- mfspr r10, SPRN_ESR
- andis. r10, r10, ESR_DIZ@h
- bne 2f
-
- mfspr r10, SPRN_DEAR /* Get faulting address */
-
- /* If we are faulting a kernel address, we have to use the
- * kernel page tables.
- */
- lis r11, PAGE_OFFSET@h
- cmplw r10, r11
- blt+ 3f
- lis r11, swapper_pg_dir@h
- ori r11, r11, swapper_pg_dir@l
- li r9, 0
- mtspr SPRN_PID, r9 /* TLB will have 0 TID */
- b 4f
-
- /* Get the PGD for the current thread.
- */
-3:
- mfspr r11,SPRN_SPRG_THREAD
- lwz r11,PGDIR(r11)
-4:
- tophys(r11, r11)
- rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
- lwz r11, 0(r11) /* Get L1 entry */
- rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
- beq 2f /* Bail if no table */
-
- rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
- lwz r11, 0(r12) /* Get Linux PTE */
-
- andi. r9, r11, _PAGE_RW /* Is it writeable? */
- beq 2f /* Bail if not */
-
- /* Update 'changed'.
- */
- ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
- stw r11, 0(r12) /* Update Linux page table */
-
- /* Most of the Linux PTE is ready to load into the TLB LO.
- * We set ZSEL, where only the LS-bit determines user access.
- * We set execute, because we don't have the granularity to
- * properly set this at the page level (Linux problem).
- * If shared is set, we cause a zero PID->TID load.
- * Many of these bits are software only. Bits we don't set
- * here we (properly should) assume have the appropriate value.
- */
- li r12, 0x0ce2
- andc r11, r11, r12 /* Make sure 20, 21 are zero */
-
- /* find the TLB index that caused the fault. It has to be here.
- */
- tlbsx r9, 0, r10
-
- tlbwe r11, r9, TLB_DATA /* Load TLB LO */
-
- /* Done...restore registers and get out of here.
- */
-#ifdef CONFIG_403GCX
- lwz r12, 12(r0)
- lwz r11, 8(r0)
- mtspr SPRN_PID, r12
- mtcr r11
- lwz r9, 4(r0)
- lwz r12, 0(r0)
-#else
- mfspr r12, SPRN_SPRG_SCRATCH5
- mfspr r11, SPRN_SPRG_SCRATCH6
- mtspr SPRN_PID, r12
- mtcr r11
- mfspr r9, SPRN_SPRG_SCRATCH4
- mfspr r12, SPRN_SPRG_SCRATCH3
-#endif
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r10, SPRN_SPRG_SCRATCH0
- PPC405_ERR77_SYNC
- rfi /* Should sync shadow TLBs */
- b . /* prevent prefetch past rfi */
-
-2:
- /* The bailout. Restore registers to pre-exception conditions
- * and call the heavyweights to help us out.
- */
-#ifdef CONFIG_403GCX
- lwz r12, 12(r0)
- lwz r11, 8(r0)
- mtspr SPRN_PID, r12
- mtcr r11
- lwz r9, 4(r0)
- lwz r12, 0(r0)
-#else
- mfspr r12, SPRN_SPRG_SCRATCH5
- mfspr r11, SPRN_SPRG_SCRATCH6
- mtspr SPRN_PID, r12
- mtcr r11
- mfspr r9, SPRN_SPRG_SCRATCH4
- mfspr r12, SPRN_SPRG_SCRATCH3
-#endif
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r10, SPRN_SPRG_SCRATCH0
- b DataAccess
+ EXCEPTION_PROLOG
+ mfspr r5, SPRN_ESR /* Grab the ESR, save it, pass arg3 */
+ stw r5, _ESR(r11)
+ mfspr r4, SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
+ stw r4, _DEAR(r11)
+ EXC_XFER_LITE(0x300, handle_page_fault)
/*
* 0x0400 - Instruction Storage Exception
@@ -372,21 +252,11 @@ _ENTRY(saved_ksp_limit)
START_EXCEPTION(0x1100, DTLBMiss)
mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
mtspr SPRN_SPRG_SCRATCH1, r11
-#ifdef CONFIG_403GCX
- stw r12, 0(r0)
- stw r9, 4(r0)
- mfcr r11
- mfspr r12, SPRN_PID
- stw r11, 8(r0)
- stw r12, 12(r0)
-#else
mtspr SPRN_SPRG_SCRATCH3, r12
mtspr SPRN_SPRG_SCRATCH4, r9
- mfcr r11
- mfspr r12, SPRN_PID
- mtspr SPRN_SPRG_SCRATCH6, r11
- mtspr SPRN_SPRG_SCRATCH5, r12
-#endif
+ mfcr r12
+ mfspr r9, SPRN_PID
+ mtspr SPRN_SPRG_SCRATCH5, r9
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -409,28 +279,34 @@ _ENTRY(saved_ksp_limit)
4:
tophys(r11, r11)
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
- lwz r12, 0(r11) /* Get L1 entry */
- andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
+ lwz r11, 0(r11) /* Get L1 entry */
+ andi. r9, r11, _PMD_PRESENT /* Check if it points to a PTE page */
beq 2f /* Bail if no table */
- rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
- lwz r11, 0(r12) /* Get Linux PTE */
- andi. r9, r11, _PAGE_PRESENT
- beq 5f
+ rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */
+ lwz r11, 0(r11) /* Get Linux PTE */
+#ifdef CONFIG_SWAP
+ li r9, _PAGE_PRESENT | _PAGE_ACCESSED
+#else
+ li r9, _PAGE_PRESENT
+#endif
+ andc. r9, r9, r11 /* Check permission */
+ bne 5f
- ori r11, r11, _PAGE_ACCESSED
- stw r11, 0(r12)
+ rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */
+ and r9, r9, r11 /* hwwrite = dirty & rw */
+ rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */
/* Create TLB tag. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0.
*/
- li r12, 0x00c0
- rlwimi r10, r12, 0, 20, 31
+ li r9, 0x00c0
+ rlwimi r10, r9, 0, 20, 31
b finish_tlb_load
2: /* Check for possible large-page pmd entry */
- rlwinm. r9, r12, 2, 22, 24
+ rlwinm. r9, r11, 2, 22, 24
beq 5f
/* Create TLB tag. This is the faulting address, plus a static
@@ -438,7 +314,6 @@ _ENTRY(saved_ksp_limit)
*/
ori r9, r9, 0x40
rlwimi r10, r9, 0, 20, 31
- mr r11, r12
b finish_tlb_load
@@ -446,24 +321,14 @@ _ENTRY(saved_ksp_limit)
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
-#ifdef CONFIG_403GCX
- lwz r12, 12(r0)
- lwz r11, 8(r0)
- mtspr SPRN_PID, r12
- mtcr r11
- lwz r9, 4(r0)
- lwz r12, 0(r0)
-#else
- mfspr r12, SPRN_SPRG_SCRATCH5
- mfspr r11, SPRN_SPRG_SCRATCH6
- mtspr SPRN_PID, r12
- mtcr r11
+ mfspr r9, SPRN_SPRG_SCRATCH5
+ mtspr SPRN_PID, r9
+ mtcr r12
mfspr r9, SPRN_SPRG_SCRATCH4
mfspr r12, SPRN_SPRG_SCRATCH3
-#endif
mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r10, SPRN_SPRG_SCRATCH0
- b DataAccess
+ b DataStorage
/* 0x1200 - Instruction TLB Miss Exception
* Nearly the same as above, except we get our information from different
@@ -472,21 +337,11 @@ _ENTRY(saved_ksp_limit)
START_EXCEPTION(0x1200, ITLBMiss)
mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
mtspr SPRN_SPRG_SCRATCH1, r11
-#ifdef CONFIG_403GCX
- stw r12, 0(r0)
- stw r9, 4(r0)
- mfcr r11
- mfspr r12, SPRN_PID
- stw r11, 8(r0)
- stw r12, 12(r0)
-#else
mtspr SPRN_SPRG_SCRATCH3, r12
mtspr SPRN_SPRG_SCRATCH4, r9
- mfcr r11
- mfspr r12, SPRN_PID
- mtspr SPRN_SPRG_SCRATCH6, r11
- mtspr SPRN_SPRG_SCRATCH5, r12
-#endif
+ mfcr r12
+ mfspr r9, SPRN_PID
+ mtspr SPRN_SPRG_SCRATCH5, r9
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -509,28 +364,34 @@ _ENTRY(saved_ksp_limit)
4:
tophys(r11, r11)
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
- lwz r12, 0(r11) /* Get L1 entry */
- andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
+ lwz r11, 0(r11) /* Get L1 entry */
+ andi. r9, r11, _PMD_PRESENT /* Check if it points to a PTE page */
beq 2f /* Bail if no table */
- rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
- lwz r11, 0(r12) /* Get Linux PTE */
- andi. r9, r11, _PAGE_PRESENT
- beq 5f
+ rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */
+ lwz r11, 0(r11) /* Get Linux PTE */
+#ifdef CONFIG_SWAP
+ li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+#else
+ li r9, _PAGE_PRESENT | _PAGE_EXEC
+#endif
+ andc. r9, r9, r11 /* Check permission */
+ bne 5f
- ori r11, r11, _PAGE_ACCESSED
- stw r11, 0(r12)
+ rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */
+ and r9, r9, r11 /* hwwrite = dirty & rw */
+ rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */
/* Create TLB tag. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0.
*/
- li r12, 0x00c0
- rlwimi r10, r12, 0, 20, 31
+ li r9, 0x00c0
+ rlwimi r10, r9, 0, 20, 31
b finish_tlb_load
2: /* Check for possible large-page pmd entry */
- rlwinm. r9, r12, 2, 22, 24
+ rlwinm. r9, r11, 2, 22, 24
beq 5f
/* Create TLB tag. This is the faulting address, plus a static
@@ -538,7 +399,6 @@ _ENTRY(saved_ksp_limit)
*/
ori r9, r9, 0x40
rlwimi r10, r9, 0, 20, 31
- mr r11, r12
b finish_tlb_load
@@ -546,21 +406,11 @@ _ENTRY(saved_ksp_limit)
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
-#ifdef CONFIG_403GCX
- lwz r12, 12(r0)
- lwz r11, 8(r0)
- mtspr SPRN_PID, r12
- mtcr r11
- lwz r9, 4(r0)
- lwz r12, 0(r0)
-#else
- mfspr r12, SPRN_SPRG_SCRATCH5
- mfspr r11, SPRN_SPRG_SCRATCH6
- mtspr SPRN_PID, r12
- mtcr r11
+ mfspr r9, SPRN_SPRG_SCRATCH5
+ mtspr SPRN_PID, r9
+ mtcr r12
mfspr r9, SPRN_SPRG_SCRATCH4
mfspr r12, SPRN_SPRG_SCRATCH3
-#endif
mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r10, SPRN_SPRG_SCRATCH0
b InstructionAccess
@@ -569,13 +419,7 @@ _ENTRY(saved_ksp_limit)
EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_STD)
EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_STD)
-#ifdef CONFIG_IBM405_ERR51
- /* 405GP errata 51 */
- START_EXCEPTION(0x1700, Trap_17)
- b DTLBMiss
-#else
EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_STD)
-#endif
EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_STD)
@@ -636,7 +480,6 @@ _ENTRY(saved_ksp_limit)
lwz r12,GPR12(r11)
lwz r10,crit_r10@l(0)
lwz r11,crit_r11@l(0)
- PPC405_ERR77_SYNC
rfci
b .
@@ -669,18 +512,6 @@ WDTException:
(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)),
crit_transfer_to_handler, ret_from_crit_exc)
-/*
- * The other Data TLB exceptions bail out to this point
- * if they can't resolve the lightweight TLB fault.
- */
-DataAccess:
- EXCEPTION_PROLOG
- mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
- stw r5,_ESR(r11)
- mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
- stw r4, _DEAR(r11)
- EXC_XFER_LITE(0x300, handle_page_fault)
-
/* Other PowerPC processors, namely those derived from the 6xx-series
* have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
* However, for the 4xx-series processors these are neither defined nor
@@ -692,7 +523,7 @@ DataAccess:
* miss get to this point to load the TLB.
* r10 - TLB_TAG value
* r11 - Linux PTE
- * r12, r9 - available to use
+ * r9 - available to use
* PID - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
* Actually, it will fit now, but oh well.....a common place
@@ -701,45 +532,32 @@ DataAccess:
tlb_4xx_index:
.long 0
finish_tlb_load:
- /* load the next available TLB index.
- */
- lwz r9, tlb_4xx_index@l(0)
- addi r9, r9, 1
- andi. r9, r9, (PPC40X_TLB_SIZE-1)
- stw r9, tlb_4xx_index@l(0)
-
-6:
/*
* Clear out the software-only bits in the PTE to generate the
* TLB_DATA value. These are the bottom 2 bits of the RPM, the
* top 3 bits of the zone field, and M.
*/
- li r12, 0x0ce2
- andc r11, r11, r12
+ li r9, 0x0ce2
+ andc r11, r11, r9
+
+ /* load the next available TLB index. */
+ lwz r9, tlb_4xx_index@l(0)
+ addi r9, r9, 1
+ andi. r9, r9, PPC40X_TLB_SIZE - 1
+ stw r9, tlb_4xx_index@l(0)
tlbwe r11, r9, TLB_DATA /* Load TLB LO */
tlbwe r10, r9, TLB_TAG /* Load TLB HI */
/* Done...restore registers and get out of here.
*/
-#ifdef CONFIG_403GCX
- lwz r12, 12(r0)
- lwz r11, 8(r0)
- mtspr SPRN_PID, r12
- mtcr r11
- lwz r9, 4(r0)
- lwz r12, 0(r0)
-#else
- mfspr r12, SPRN_SPRG_SCRATCH5
- mfspr r11, SPRN_SPRG_SCRATCH6
- mtspr SPRN_PID, r12
- mtcr r11
+ mfspr r9, SPRN_SPRG_SCRATCH5
+ mtspr SPRN_PID, r9
+ mtcr r12
mfspr r9, SPRN_SPRG_SCRATCH4
mfspr r12, SPRN_SPRG_SCRATCH3
-#endif
mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r10, SPRN_SPRG_SCRATCH0
- PPC405_ERR77_SYNC
rfi /* Should sync shadow TLBs */
b . /* prevent prefetch past rfi */
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 51dd01a27314..8e36718f3167 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -25,10 +25,10 @@
*/
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index ddfbd02140d9..0e05a9a47a4b 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -947,15 +947,8 @@ start_here_multiplatform:
std r0,0(r4)
#endif
- /* The following gets the stack set up with the regs */
- /* pointing to the real addr of the kernel stack. This is */
- /* all done to support the C function call below which sets */
- /* up the htab. This is done because we have relocated the */
- /* kernel but are still running in real mode. */
-
- LOAD_REG_ADDR(r3,init_thread_union)
-
/* set up a stack pointer */
+ LOAD_REG_ADDR(r3,init_thread_union)
LOAD_REG_IMMEDIATE(r1,THREAD_SIZE)
add r1,r3,r1
li r0,0
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 073a651787df..9f359d3fba74 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -16,11 +16,12 @@
#include <linux/init.h>
#include <linux/magic.h>
+#include <linux/pgtable.h>
+#include <linux/sizes.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/cache.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
@@ -31,10 +32,15 @@
#include "head_32.h"
+.macro compare_to_kernel_boundary scratch, addr
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
/* By simply checking Address >= 0x80000000, we know if its a kernel address */
-#define SIMPLE_KERNEL_ADDRESS 1
+ not. \scratch, \addr
+#else
+ rlwinm \scratch, \addr, 16, 0xfff8
+ cmpli cr0, \scratch, PAGE_OFFSET@h
#endif
+.endm
/*
* We need an ITLB miss handler for kernel addresses if:
@@ -196,7 +202,7 @@ SystemCall:
InstructionTLBMiss:
mtspr SPRN_SPRG_SCRATCH0, r10
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
+#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
mtspr SPRN_SPRG_SCRATCH1, r11
#endif
@@ -206,44 +212,31 @@ InstructionTLBMiss:
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r10)
mtspr SPRN_MD_EPN, r10
- /* Only modules will cause ITLB Misses as we always
- * pin the first 8MB of kernel memory */
#ifdef ITLB_MISS_KERNEL
mfcr r11
-#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT)
- cmpi cr0, r10, 0 /* Address >= 0x80000000 */
-#else
- rlwinm r10, r10, 16, 0xfff8
- cmpli cr0, r10, PAGE_OFFSET@h
-#ifndef CONFIG_PIN_TLB_TEXT
- /* It is assumed that kernel code fits into the first 32M */
-0: cmpli cr7, r10, (PAGE_OFFSET + 0x2000000)@h
- patch_site 0b, patch__itlbmiss_linmem_top
-#endif
-#endif
+ compare_to_kernel_boundary r10, r10
#endif
mfspr r10, SPRN_M_TWB /* Get level 1 table */
#ifdef ITLB_MISS_KERNEL
-#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT)
- bge+ 3f
-#else
blt+ 3f
-#endif
-#ifndef CONFIG_PIN_TLB_TEXT
- blt cr7, ITLBMissLinear
-#endif
rlwinm r10, r10, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
3:
+ mtcr r11
#endif
+#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
+ lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
+ mtspr SPRN_MD_TWC, r11
+#else
lwz r10, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
mtspr SPRN_MI_TWC, r10 /* Set segment attributes */
-
mtspr SPRN_MD_TWC, r10
+#endif
mfspr r10, SPRN_MD_TWC
lwz r10, 0(r10) /* Get the pte */
-#ifdef ITLB_MISS_KERNEL
- mtcr r11
+#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
+ rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
+ mtspr SPRN_MI_TWC, r11
#endif
#ifdef CONFIG_SWAP
rlwinm r11, r10, 32-5, _PAGE_PRESENT
@@ -263,7 +256,7 @@ InstructionTLBMiss:
/* Restore registers */
0: mfspr r10, SPRN_SPRG_SCRATCH0
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
+#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
mfspr r11, SPRN_SPRG_SCRATCH1
#endif
rfi
@@ -281,33 +274,6 @@ InstructionTLBMiss:
rfi
#endif
-#ifndef CONFIG_PIN_TLB_TEXT
-ITLBMissLinear:
- mtcr r11
-#if defined(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23
- patch_site 0f, patch__itlbmiss_linmem_top8
-
- mfspr r10, SPRN_SRR0
-0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha
- rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K
- ori r11, r11, MI_PS512K | MI_SVALID
- rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */
-#else
- /* Set 8M byte page and mark it valid */
- li r11, MI_PS8MEG | MI_SVALID
- rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */
-#endif
- mtspr SPRN_MI_TWC, r11
- ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
- _PAGE_PRESENT
- mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
-
-0: mfspr r10, SPRN_SPRG_SCRATCH0
- mfspr r11, SPRN_SPRG_SCRATCH1
- rfi
- patch_site 0b, patch__itlbmiss_exit_2
-#endif
-
. = 0x1200
DataStoreTLBMiss:
mtspr SPRN_DAR, r10
@@ -318,21 +284,9 @@ DataStoreTLBMiss:
* kernel page tables.
*/
mfspr r10, SPRN_MD_EPN
- rlwinm r10, r10, 16, 0xfff8
- cmpli cr0, r10, PAGE_OFFSET@h
-#ifndef CONFIG_PIN_TLB_IMMR
- cmpli cr6, r10, VIRT_IMMR_BASE@h
-#endif
-0: cmpli cr7, r10, (PAGE_OFFSET + 0x2000000)@h
- patch_site 0b, patch__dtlbmiss_linmem_top
-
+ compare_to_kernel_boundary r10, r10
mfspr r10, SPRN_M_TWB /* Get level 1 table */
blt+ 3f
-#ifndef CONFIG_PIN_TLB_IMMR
-0: beq- cr6, DTLBMissIMMR
- patch_site 0b, patch__dtlbmiss_immr_jmp
-#endif
- blt cr7, DTLBMissLinear
rlwinm r10, r10, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
3:
@@ -350,6 +304,7 @@ DataStoreTLBMiss:
* above.
*/
rlwimi r11, r10, 0, _PAGE_GUARDED
+ rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
mtspr SPRN_MD_TWC, r11
/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
@@ -383,61 +338,16 @@ DataStoreTLBMiss:
rfi
patch_site 0b, patch__dtlbmiss_exit_1
-DTLBMissIMMR:
- mtcr r11
- /* Set 512k byte guarded page and mark it valid */
- li r10, MD_PS512K | MD_GUARDED | MD_SVALID
- mtspr SPRN_MD_TWC, r10
- mfspr r10, SPRN_IMMR /* Get current IMMR */
- rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
- ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
- _PAGE_PRESENT | _PAGE_NO_CACHE
- mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
-
- li r11, RPN_PATTERN
-
-0: mfspr r10, SPRN_DAR
+#ifdef CONFIG_PERF_EVENTS
+ patch_site 0f, patch__dtlbmiss_perf
+0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
+ addi r10, r10, 1
+ stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
+ mfspr r10, SPRN_DAR
mtspr SPRN_DAR, r11 /* Tag DAR */
mfspr r11, SPRN_M_TW
rfi
- patch_site 0b, patch__dtlbmiss_exit_2
-
-DTLBMissLinear:
- mtcr r11
- rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */
-#if defined(CONFIG_STRICT_KERNEL_RWX) && CONFIG_DATA_SHIFT < 23
- patch_site 0f, patch__dtlbmiss_romem_top8
-
-0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha
- rlwinm r11, r11, 0, 0xff800000
- neg r10, r11
- or r11, r11, r10
- rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K
- ori r11, r11, MI_PS512K | MI_SVALID
- mfspr r10, SPRN_MD_EPN
- rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */
-#else
- /* Set 8M byte page and mark it valid */
- li r11, MD_PS8MEG | MD_SVALID
#endif
- mtspr SPRN_MD_TWC, r11
-#ifdef CONFIG_STRICT_KERNEL_RWX
- patch_site 0f, patch__dtlbmiss_romem_top
-
-0: subis r11, r10, 0
- rlwimi r10, r11, 11, _PAGE_RO
-#endif
- ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
- _PAGE_PRESENT
- mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
-
- li r11, RPN_PATTERN
-
-0: mfspr r10, SPRN_DAR
- mtspr SPRN_DAR, r11 /* Tag DAR */
- mfspr r11, SPRN_M_TW
- rfi
- patch_site 0b, patch__dtlbmiss_exit_3
/* This is an instruction TLB error on the MPC8xx. This could be due
* to many reasons, such as executing guarded memory or illegal instruction
@@ -485,18 +395,6 @@ DARFixed:/* Return from dcbx instruction bug workaround */
/* 0x300 is DataAccess exception, needed by bad_page_fault() */
EXC_XFER_LITE(0x300, handle_page_fault)
-/* Called from DataStoreTLBMiss when perf TLB misses events are activated */
-#ifdef CONFIG_PERF_EVENTS
- patch_site 0f, patch__dtlbmiss_perf
-0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
- addi r10, r10, 1
- stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
- mfspr r10, SPRN_DAR
- mtspr SPRN_DAR, r11 /* Tag DAR */
- mfspr r11, SPRN_M_TW
- rfi
-#endif
-
stack_overflow:
vmap_stack_overflow_exception
@@ -563,14 +461,9 @@ FixupDAR:/* Entry point for dcbx workaround. */
cmpli cr1, r11, PAGE_OFFSET@h
mfspr r11, SPRN_M_TWB /* Get level 1 table */
blt+ cr1, 3f
- rlwinm r11, r10, 16, 0xfff8
-
-0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
- patch_site 0b, patch__fixupdar_linmem_top
/* create physical page address from effective address */
tophys(r11, r10)
- blt- cr7, 201f
mfspr r11, SPRN_M_TWB /* Get level 1 table */
rlwinm r11, r11, 0, 20, 31
oris r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha
@@ -581,7 +474,6 @@ FixupDAR:/* Entry point for dcbx workaround. */
mfspr r11, SPRN_MD_TWC
lwz r11, 0(r11) /* Get the pte */
bt 28,200f /* bit 28 = Large page (8M) */
- bt 29,202f /* bit 29 = Large page (8M or 512K) */
/* concat physical page address(r11) and page offset(r10) */
rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31
201: lwz r11,0(r11)
@@ -608,11 +500,6 @@ FixupDAR:/* Entry point for dcbx workaround. */
rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31
b 201b
-202:
- /* concat physical page address(r11) and page offset(r10) */
- rlwimi r11, r10, 0, 32 - PAGE_SHIFT_512K, 31
- b 201b
-
144: mfspr r10, SPRN_DSISR
rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
mtspr SPRN_DSISR, r10
@@ -747,6 +634,31 @@ start_here:
rfi
/* Load up the kernel context */
2:
+#ifdef CONFIG_PIN_TLB_IMMR
+ lis r0, MD_TWAM@h
+ oris r0, r0, 0x1f00
+ mtspr SPRN_MD_CTR, r0
+ LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
+ tlbie r0
+ mtspr SPRN_MD_EPN, r0
+ LOAD_REG_IMMEDIATE(r0, MD_SVALID | MD_PS512K | MD_GUARDED)
+ mtspr SPRN_MD_TWC, r0
+ mfspr r0, SPRN_IMMR
+ rlwinm r0, r0, 0, 0xfff80000
+ ori r0, r0, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
+ _PAGE_NO_CACHE | _PAGE_PRESENT
+ mtspr SPRN_MD_RPN, r0
+ lis r0, (MD_TWAM | MD_RSV4I)@h
+ mtspr SPRN_MD_CTR, r0
+#endif
+#ifndef CONFIG_PIN_TLB_TEXT
+ li r0, 0
+ mtspr SPRN_MI_CTR, r0
+#endif
+#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR)
+ lis r0, MD_TWAM@h
+ mtspr SPRN_MD_CTR, r0
+#endif
tlbia /* Clear all TLB entries */
sync /* wait for tlbia/tlbie to finish */
@@ -779,17 +691,10 @@ start_here:
initial_mmu:
li r8, 0
mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */
- lis r10, MD_RESETVAL@h
-#ifndef CONFIG_8xx_COPYBACK
- oris r10, r10, MD_WTDEF@h
-#endif
+ lis r10, MD_TWAM@h
mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */
tlbia /* Invalidate all TLB entries */
-#ifdef CONFIG_PIN_TLB_DATA
- oris r10, r10, MD_RSV4I@h
- mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
-#endif
lis r8, MI_APG_INIT@h /* Set protection modes */
ori r8, r8, MI_APG_INIT@l
@@ -798,55 +703,32 @@ initial_mmu:
ori r8, r8, MD_APG_INIT@l
mtspr SPRN_MD_AP, r8
- /* Map a 512k page for the IMMR to get the processor
- * internal registers (among other things).
- */
-#ifdef CONFIG_PIN_TLB_IMMR
- oris r10, r10, MD_RSV4I@h
- ori r10, r10, 0x1c00
- mtspr SPRN_MD_CTR, r10
-
- mfspr r9, 638 /* Get current IMMR */
- andis. r9, r9, 0xfff8 /* Get 512 kbytes boundary */
-
- lis r8, VIRT_IMMR_BASE@h /* Create vaddr for TLB */
- ori r8, r8, MD_EVALID /* Mark it valid */
- mtspr SPRN_MD_EPN, r8
- li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */
- ori r8, r8, MD_SVALID /* Make it valid */
- mtspr SPRN_MD_TWC, r8
- mr r8, r9 /* Create paddr for TLB */
- ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
- mtspr SPRN_MD_RPN, r8
-#endif
-
- /* Now map the lower RAM (up to 32 Mbytes) into the ITLB. */
-#ifdef CONFIG_PIN_TLB_TEXT
+ /* Map the lower RAM (up to 32 Mbytes) into the ITLB and DTLB */
lis r8, MI_RSV4I@h
ori r8, r8, 0x1c00
-#endif
+ oris r12, r10, MD_RSV4I@h
+ ori r12, r12, 0x1c00
li r9, 4 /* up to 4 pages of 8M */
mtctr r9
lis r9, KERNELBASE@h /* Create vaddr for TLB */
li r10, MI_PS8MEG | MI_SVALID /* Set 8M byte page */
li r11, MI_BOOTINIT /* Create RPN for address 0 */
- lis r12, _einittext@h
- ori r12, r12, _einittext@l
1:
-#ifdef CONFIG_PIN_TLB_TEXT
mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
addi r8, r8, 0x100
-#endif
-
ori r0, r9, MI_EVALID /* Mark it valid */
mtspr SPRN_MI_EPN, r0
mtspr SPRN_MI_TWC, r10
mtspr SPRN_MI_RPN, r11 /* Store TLB entry */
+ mtspr SPRN_MD_CTR, r12
+ addi r12, r12, 0x100
+ mtspr SPRN_MD_EPN, r0
+ mtspr SPRN_MD_TWC, r10
+ mtspr SPRN_MD_RPN, r11
addis r9, r9, 0x80
addis r11, r11, 0x80
- cmpl cr0, r9, r12
- bdnzf gt, 1b
+ bdnz 1b
/* Since the cache is enabled according to the information we
* just loaded into the TLB, invalidate and enable the caches here.
@@ -857,17 +739,7 @@ initial_mmu:
mtspr SPRN_DC_CST, r8
lis r8, IDC_ENABLE@h
mtspr SPRN_IC_CST, r8
-#ifdef CONFIG_8xx_COPYBACK
mtspr SPRN_DC_CST, r8
-#else
- /* For a debug option, I left this here to easily enable
- * the write through cache mode
- */
- lis r8, DC_SFWT@h
- mtspr SPRN_DC_CST, r8
- lis r8, IDC_ENABLE@h
- mtspr SPRN_DC_CST, r8
-#endif
/* Disable debug mode entry on breakpoints */
mfspr r8, SPRN_DER
#ifdef CONFIG_PERF_EVENTS
@@ -878,6 +750,108 @@ initial_mmu:
mtspr SPRN_DER, r8
blr
+#ifdef CONFIG_PIN_TLB
+_GLOBAL(mmu_pin_tlb)
+ lis r9, (1f - PAGE_OFFSET)@h
+ ori r9, r9, (1f - PAGE_OFFSET)@l
+ mfmsr r10
+ mflr r11
+ li r12, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
+ rlwinm r0, r10, 0, ~MSR_RI
+ rlwinm r0, r0, 0, ~MSR_EE
+ mtmsr r0
+ isync
+ .align 4
+ mtspr SPRN_SRR0, r9
+ mtspr SPRN_SRR1, r12
+ rfi
+1:
+ li r5, 0
+ lis r6, MD_TWAM@h
+ mtspr SPRN_MI_CTR, r5
+ mtspr SPRN_MD_CTR, r6
+ tlbia
+
+#ifdef CONFIG_PIN_TLB_TEXT
+ LOAD_REG_IMMEDIATE(r5, 28 << 8)
+ LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
+ LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG)
+ LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
+ LOAD_REG_ADDR(r9, _sinittext)
+ li r0, 4
+ mtctr r0
+
+2: ori r0, r6, MI_EVALID
+ mtspr SPRN_MI_CTR, r5
+ mtspr SPRN_MI_EPN, r0
+ mtspr SPRN_MI_TWC, r7
+ mtspr SPRN_MI_RPN, r8
+ addi r5, r5, 0x100
+ addis r6, r6, SZ_8M@h
+ addis r8, r8, SZ_8M@h
+ cmplw r6, r9
+ bdnzt lt, 2b
+ lis r0, MI_RSV4I@h
+ mtspr SPRN_MI_CTR, r0
+#endif
+ LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
+#ifdef CONFIG_PIN_TLB_DATA
+ LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
+ LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG)
+#ifdef CONFIG_PIN_TLB_IMMR
+ li r0, 3
+#else
+ li r0, 4
+#endif
+ mtctr r0
+ cmpwi r4, 0
+ beq 4f
+ LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
+ LOAD_REG_ADDR(r9, _sinittext)
+
+2: ori r0, r6, MD_EVALID
+ mtspr SPRN_MD_CTR, r5
+ mtspr SPRN_MD_EPN, r0
+ mtspr SPRN_MD_TWC, r7
+ mtspr SPRN_MD_RPN, r8
+ addi r5, r5, 0x100
+ addis r6, r6, SZ_8M@h
+ addis r8, r8, SZ_8M@h
+ cmplw r6, r9
+ bdnzt lt, 2b
+
+4: LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
+2: ori r0, r6, MD_EVALID
+ mtspr SPRN_MD_CTR, r5
+ mtspr SPRN_MD_EPN, r0
+ mtspr SPRN_MD_TWC, r7
+ mtspr SPRN_MD_RPN, r8
+ addi r5, r5, 0x100
+ addis r6, r6, SZ_8M@h
+ addis r8, r8, SZ_8M@h
+ cmplw r6, r3
+ bdnzt lt, 2b
+#endif
+#ifdef CONFIG_PIN_TLB_IMMR
+ LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
+ LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED)
+ mfspr r8, SPRN_IMMR
+ rlwinm r8, r8, 0, 0xfff80000
+ ori r8, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
+ _PAGE_NO_CACHE | _PAGE_PRESENT
+ mtspr SPRN_MD_CTR, r5
+ mtspr SPRN_MD_EPN, r0
+ mtspr SPRN_MD_TWC, r7
+ mtspr SPRN_MD_RPN, r8
+#endif
+#if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA)
+ lis r0, (MD_RSV4I | MD_TWAM)@h
+ mtspr SPRN_MI_CTR, r0
+#endif
+ mtspr SPRN_SRR1, r10
+ mtspr SPRN_SRR0, r11
+ rfi
+#endif /* CONFIG_PIN_TLB */
/*
* We put a few things here that have to be page-aligned.
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index bd2e5ed8dd50..18f87bf9e32b 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -534,7 +534,7 @@ struct exception_regs {
};
/* ensure this structure is always sized to a multiple of the stack alignment */
-#define STACK_EXC_LVL_FRAME_SIZE _ALIGN_UP(sizeof (struct exception_regs), 16)
+#define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16)
#endif /* __ASSEMBLY__ */
#endif /* __HEAD_BOOKE_H__ */
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 840af004041e..586a6ac501e9 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -28,10 +28,10 @@
#include <linux/init.h>
#include <linux/threads.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 72f461bd70fb..0000daf0e1da 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -24,13 +24,14 @@
#include <asm/debug.h>
#include <asm/debugfs.h>
#include <asm/hvcall.h>
+#include <asm/inst.h>
#include <linux/uaccess.h>
/*
* Stores the breakpoints currently in use on each breakpoint address
* register for every cpu
*/
-static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
+static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
/*
* Returns total number of data or instruction breakpoints available.
@@ -38,10 +39,21 @@ static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
int hw_breakpoint_slots(int type)
{
if (type == TYPE_DATA)
- return HBP_NUM;
+ return nr_wp_slots();
return 0; /* no instruction breakpoints available */
}
+static bool single_step_pending(void)
+{
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (current->thread.last_hit_ubp[i])
+ return true;
+ }
+ return false;
+}
+
/*
* Install a perf counter breakpoint.
*
@@ -54,16 +66,26 @@ int hw_breakpoint_slots(int type)
int arch_install_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
+ struct perf_event **slot;
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ slot = this_cpu_ptr(&bp_per_reg[i]);
+ if (!*slot) {
+ *slot = bp;
+ break;
+ }
+ }
- *slot = bp;
+ if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
+ return -EBUSY;
/*
* Do not install DABR values if the instruction must be single-stepped.
* If so, DABR will be populated in single_step_dabr_instruction().
*/
- if (current->thread.last_hit_ubp != bp)
- __set_breakpoint(info);
+ if (!single_step_pending())
+ __set_breakpoint(i, info);
return 0;
}
@@ -79,15 +101,248 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
*/
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
- struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
+ struct arch_hw_breakpoint null_brk = {0};
+ struct perf_event **slot;
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ slot = this_cpu_ptr(&bp_per_reg[i]);
+ if (*slot == bp) {
+ *slot = NULL;
+ break;
+ }
+ }
- if (*slot != bp) {
- WARN_ONCE(1, "Can't find the breakpoint");
+ if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
return;
+
+ __set_breakpoint(i, &null_brk);
+}
+
+static bool is_ptrace_bp(struct perf_event *bp)
+{
+ return bp->overflow_handler == ptrace_triggered;
+}
+
+struct breakpoint {
+ struct list_head list;
+ struct perf_event *bp;
+ bool ptrace_bp;
+};
+
+static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
+static LIST_HEAD(task_bps);
+
+static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
+{
+ struct breakpoint *tmp;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ERR_PTR(-ENOMEM);
+ tmp->bp = bp;
+ tmp->ptrace_bp = is_ptrace_bp(bp);
+ return tmp;
+}
+
+static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2)
+{
+ __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr;
+
+ bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE);
+ bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE);
+ bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE);
+ bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE);
+
+ return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr);
+}
+
+static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp)
+{
+ return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp;
+}
+
+static bool can_co_exist(struct breakpoint *b, struct perf_event *bp)
+{
+ return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp));
+}
+
+static int task_bps_add(struct perf_event *bp)
+{
+ struct breakpoint *tmp;
+
+ tmp = alloc_breakpoint(bp);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ list_add(&tmp->list, &task_bps);
+ return 0;
+}
+
+static void task_bps_remove(struct perf_event *bp)
+{
+ struct list_head *pos, *q;
+
+ list_for_each_safe(pos, q, &task_bps) {
+ struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
+
+ if (tmp->bp == bp) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ break;
+ }
}
+}
- *slot = NULL;
- hw_breakpoint_disable();
+/*
+ * If any task has breakpoint from alternate infrastructure,
+ * return true. Otherwise return false.
+ */
+static bool all_task_bps_check(struct perf_event *bp)
+{
+ struct breakpoint *tmp;
+
+ list_for_each_entry(tmp, &task_bps, list) {
+ if (!can_co_exist(tmp, bp))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * If same task has breakpoint from alternate infrastructure,
+ * return true. Otherwise return false.
+ */
+static bool same_task_bps_check(struct perf_event *bp)
+{
+ struct breakpoint *tmp;
+
+ list_for_each_entry(tmp, &task_bps, list) {
+ if (tmp->bp->hw.target == bp->hw.target &&
+ !can_co_exist(tmp, bp))
+ return true;
+ }
+ return false;
+}
+
+static int cpu_bps_add(struct perf_event *bp)
+{
+ struct breakpoint **cpu_bp;
+ struct breakpoint *tmp;
+ int i = 0;
+
+ tmp = alloc_breakpoint(bp);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!cpu_bp[i]) {
+ cpu_bp[i] = tmp;
+ break;
+ }
+ }
+ return 0;
+}
+
+static void cpu_bps_remove(struct perf_event *bp)
+{
+ struct breakpoint **cpu_bp;
+ int i = 0;
+
+ cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!cpu_bp[i])
+ continue;
+
+ if (cpu_bp[i]->bp == bp) {
+ kfree(cpu_bp[i]);
+ cpu_bp[i] = NULL;
+ break;
+ }
+ }
+}
+
+static bool cpu_bps_check(int cpu, struct perf_event *bp)
+{
+ struct breakpoint **cpu_bp;
+ int i;
+
+ cpu_bp = per_cpu_ptr(cpu_bps, cpu);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
+ return true;
+ }
+ return false;
+}
+
+static bool all_cpu_bps_check(struct perf_event *bp)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu_bps_check(cpu, bp))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * We don't use any locks to serialize accesses to cpu_bps or task_bps
+ * because are already inside nr_bp_mutex.
+ */
+int arch_reserve_bp_slot(struct perf_event *bp)
+{
+ int ret;
+
+ /* ptrace breakpoint */
+ if (is_ptrace_bp(bp)) {
+ if (all_cpu_bps_check(bp))
+ return -ENOSPC;
+
+ if (same_task_bps_check(bp))
+ return -ENOSPC;
+
+ return task_bps_add(bp);
+ }
+
+ /* perf breakpoint */
+ if (is_kernel_addr(bp->attr.bp_addr))
+ return 0;
+
+ if (bp->hw.target && bp->cpu == -1) {
+ if (same_task_bps_check(bp))
+ return -ENOSPC;
+
+ return task_bps_add(bp);
+ } else if (!bp->hw.target && bp->cpu != -1) {
+ if (all_task_bps_check(bp))
+ return -ENOSPC;
+
+ return cpu_bps_add(bp);
+ }
+
+ if (same_task_bps_check(bp))
+ return -ENOSPC;
+
+ ret = cpu_bps_add(bp);
+ if (ret)
+ return ret;
+ ret = task_bps_add(bp);
+ if (ret)
+ cpu_bps_remove(bp);
+
+ return ret;
+}
+
+void arch_release_bp_slot(struct perf_event *bp)
+{
+ if (!is_kernel_addr(bp->attr.bp_addr)) {
+ if (bp->hw.target)
+ task_bps_remove(bp);
+ if (bp->cpu != -1)
+ cpu_bps_remove(bp);
+ }
}
/*
@@ -102,8 +357,14 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
* restoration variables to prevent dangling pointers.
* FIXME, this should not be using bp->ctx at all! Sayeth peterz.
*/
- if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
- bp->ctx->task->thread.last_hit_ubp = NULL;
+ if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) {
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (bp->ctx->task->thread.last_hit_ubp[i] == bp)
+ bp->ctx->task->thread.last_hit_ubp[i] = NULL;
+ }
+ }
}
/*
@@ -140,10 +401,10 @@ int arch_bp_generic_fields(int type, int *gen_bp_type)
* <---8 bytes--->
*
* In this case, we should configure hw as:
- * start_addr = address & ~HW_BREAKPOINT_ALIGN
+ * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1)
* len = 16 bytes
*
- * @start_addr and @end_addr are inclusive.
+ * @start_addr is inclusive but @end_addr is exclusive.
*/
static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
{
@@ -151,14 +412,14 @@ static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
u16 hw_len;
unsigned long start_addr, end_addr;
- start_addr = hw->address & ~HW_BREAKPOINT_ALIGN;
- end_addr = (hw->address + hw->len - 1) | HW_BREAKPOINT_ALIGN;
- hw_len = end_addr - start_addr + 1;
+ start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE);
+ end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE);
+ hw_len = end_addr - start_addr;
if (dawr_enabled()) {
max_len = DAWR_MAX_LEN;
/* DAWR region can't cross 512 bytes boundary */
- if ((start_addr >> 9) != (end_addr >> 9))
+ if (ALIGN(start_addr, SZ_512M) != ALIGN(end_addr - 1, SZ_512M))
return -EINVAL;
} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
/* 8xx can setup a range without limitation */
@@ -215,90 +476,209 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
{
struct arch_hw_breakpoint *info;
+ int i;
- if (likely(!tsk->thread.last_hit_ubp))
- return;
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (unlikely(tsk->thread.last_hit_ubp[i]))
+ goto reset;
+ }
+ return;
- info = counter_arch_bp(tsk->thread.last_hit_ubp);
+reset:
regs->msr &= ~MSR_SE;
- __set_breakpoint(info);
- tsk->thread.last_hit_ubp = NULL;
+ for (i = 0; i < nr_wp_slots(); i++) {
+ info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
+ __set_breakpoint(i, info);
+ tsk->thread.last_hit_ubp[i] = NULL;
+ }
}
-static bool dar_within_range(unsigned long dar, struct arch_hw_breakpoint *info)
+static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info)
{
return ((info->address <= dar) && (dar - info->address < info->len));
}
-static bool
-dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info)
+static bool dar_user_range_overlaps(unsigned long dar, int size,
+ struct arch_hw_breakpoint *info)
+{
+ return ((dar < info->address + info->len) &&
+ (dar + size > info->address));
+}
+
+static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info)
+{
+ unsigned long hw_start_addr, hw_end_addr;
+
+ hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
+ hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
+
+ return ((hw_start_addr <= dar) && (hw_end_addr > dar));
+}
+
+static bool dar_hw_range_overlaps(unsigned long dar, int size,
+ struct arch_hw_breakpoint *info)
{
- return ((dar <= info->address + info->len - 1) &&
- (dar + size - 1 >= info->address));
+ unsigned long hw_start_addr, hw_end_addr;
+
+ hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
+ hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
+
+ return ((dar < hw_end_addr) && (dar + size > hw_start_addr));
}
/*
- * Handle debug exception notifications.
+ * If hw has multiple DAWR registers, we also need to check all
+ * dawrx constraint bits to confirm this is _really_ a valid event.
*/
-static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp,
- struct arch_hw_breakpoint *info)
+static bool check_dawrx_constraints(struct pt_regs *regs, int type,
+ struct arch_hw_breakpoint *info)
{
- unsigned int instr = 0;
- int ret, type, size;
- struct instruction_op op;
- unsigned long addr = info->address;
+ if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
+ return false;
- if (__get_user_inatomic(instr, (unsigned int *)regs->nip))
- goto fail;
+ if (OP_IS_STORE(type) && !(info->type & HW_BRK_TYPE_WRITE))
+ return false;
- ret = analyse_instr(&op, regs, instr);
- type = GETTYPE(op.type);
- size = GETSIZE(op.type);
+ if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
+ return false;
- if (!ret && (type == LARX || type == STCX)) {
- printk_ratelimited("Breakpoint hit on instruction that can't be emulated."
- " Breakpoint at 0x%lx will be disabled.\n", addr);
- goto disable;
- }
+ if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER))
+ return false;
+
+ return true;
+}
+
+/*
+ * Return true if the event is valid wrt dawr configuration,
+ * including extraneous exception. Otherwise return false.
+ */
+static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr,
+ int type, int size, struct arch_hw_breakpoint *info)
+{
+ bool in_user_range = dar_in_user_range(regs->dar, info);
+ bool dawrx_constraints;
/*
- * If it's extraneous event, we still need to emulate/single-
- * step the instruction, but we don't generate an event.
+ * 8xx supports only one breakpoint and thus we can
+ * unconditionally return true.
*/
- if (size && !dar_range_overlaps(regs->dar, size, info))
- info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+ if (IS_ENABLED(CONFIG_PPC_8xx)) {
+ if (!in_user_range)
+ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+ return true;
+ }
- /* Do not emulate user-space instructions, instead single-step them */
- if (user_mode(regs)) {
- current->thread.last_hit_ubp = bp;
- regs->msr |= MSR_SE;
+ if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) {
+ if (in_user_range)
+ return true;
+
+ if (dar_in_hw_range(regs->dar, info)) {
+ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+ return true;
+ }
return false;
}
- if (!emulate_step(regs, instr))
- goto fail;
+ dawrx_constraints = check_dawrx_constraints(regs, type, info);
- return true;
+ if (dar_user_range_overlaps(regs->dar, size, info))
+ return dawrx_constraints;
+
+ if (dar_hw_range_overlaps(regs->dar, size, info)) {
+ if (dawrx_constraints) {
+ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+ return true;
+ }
+ }
+ return false;
+}
+
+static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
+ int *type, int *size, bool *larx_stcx)
+{
+ struct instruction_op op;
+
+ if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip))
+ return;
+
+ analyse_instr(&op, regs, *instr);
-fail:
/*
- * We've failed in reliably handling the hw-breakpoint. Unregister
- * it and throw a warning message to let the user know about it.
+ * Set size = 8 if analyse_instr() fails. If it's a userspace
+ * watchpoint(valid or extraneous), we can notify user about it.
+ * If it's a kernel watchpoint, instruction emulation will fail
+ * in stepping_handler() and watchpoint will be disabled.
*/
- WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
- "0x%lx will be disabled.", addr);
+ *type = GETTYPE(op.type);
+ *size = !(*type == UNKNOWN) ? GETSIZE(op.type) : 8;
+ *larx_stcx = (*type == LARX || *type == STCX);
+}
-disable:
+/*
+ * We've failed in reliably handling the hw-breakpoint. Unregister
+ * it and throw a warning message to let the user know about it.
+ */
+static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info)
+{
+ WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.",
+ info->address);
perf_event_disable_inatomic(bp);
- return false;
+}
+
+static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info)
+{
+ printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n",
+ info->address);
+ perf_event_disable_inatomic(bp);
+}
+
+static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
+ struct arch_hw_breakpoint **info, int *hit,
+ struct ppc_inst instr)
+{
+ int i;
+ int stepped;
+
+ /* Do not emulate user-space instructions, instead single-step them */
+ if (user_mode(regs)) {
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!hit[i])
+ continue;
+ current->thread.last_hit_ubp[i] = bp[i];
+ info[i] = NULL;
+ }
+ regs->msr |= MSR_SE;
+ return false;
+ }
+
+ stepped = emulate_step(regs, instr);
+ if (!stepped) {
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!hit[i])
+ continue;
+ handler_error(bp[i], info[i]);
+ info[i] = NULL;
+ }
+ return false;
+ }
+ return true;
}
int hw_breakpoint_handler(struct die_args *args)
{
+ bool err = false;
int rc = NOTIFY_STOP;
- struct perf_event *bp;
+ struct perf_event *bp[HBP_NUM_MAX] = { NULL };
struct pt_regs *regs = args->regs;
- struct arch_hw_breakpoint *info;
+ struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL };
+ int i;
+ int hit[HBP_NUM_MAX] = {0};
+ int nr_hit = 0;
+ bool ptrace_bp = false;
+ struct ppc_inst instr = ppc_inst(0);
+ int type = 0;
+ int size = 0;
+ bool larx_stcx = false;
/* Disable breakpoints during exception handling */
hw_breakpoint_disable();
@@ -311,12 +691,40 @@ int hw_breakpoint_handler(struct die_args *args)
*/
rcu_read_lock();
- bp = __this_cpu_read(bp_per_reg);
- if (!bp) {
+ if (!IS_ENABLED(CONFIG_PPC_8xx))
+ get_instr_detail(regs, &instr, &type, &size, &larx_stcx);
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ bp[i] = __this_cpu_read(bp_per_reg[i]);
+ if (!bp[i])
+ continue;
+
+ info[i] = counter_arch_bp(bp[i]);
+ info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
+
+ if (check_constraints(regs, instr, type, size, info[i])) {
+ if (!IS_ENABLED(CONFIG_PPC_8xx) &&
+ ppc_inst_equal(instr, ppc_inst(0))) {
+ handler_error(bp[i], info[i]);
+ info[i] = NULL;
+ err = 1;
+ continue;
+ }
+
+ if (is_ptrace_bp(bp[i]))
+ ptrace_bp = true;
+ hit[i] = 1;
+ nr_hit++;
+ }
+ }
+
+ if (err)
+ goto reset;
+
+ if (!nr_hit) {
rc = NOTIFY_DONE;
goto out;
}
- info = counter_arch_bp(bp);
/*
* Return early after invoking user-callback function without restoring
@@ -324,29 +732,50 @@ int hw_breakpoint_handler(struct die_args *args)
* one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
* generated in do_dabr().
*/
- if (bp->overflow_handler == ptrace_triggered) {
- perf_bp_event(bp, regs);
+ if (ptrace_bp) {
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!hit[i])
+ continue;
+ perf_bp_event(bp[i], regs);
+ info[i] = NULL;
+ }
rc = NOTIFY_DONE;
- goto out;
+ goto reset;
}
- info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
- if (IS_ENABLED(CONFIG_PPC_8xx)) {
- if (!dar_within_range(regs->dar, info))
- info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
- } else {
- if (!stepping_handler(regs, bp, info))
- goto out;
+ if (!IS_ENABLED(CONFIG_PPC_8xx)) {
+ if (larx_stcx) {
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!hit[i])
+ continue;
+ larx_stcx_err(bp[i], info[i]);
+ info[i] = NULL;
+ }
+ goto reset;
+ }
+
+ if (!stepping_handler(regs, bp, info, hit, instr))
+ goto reset;
}
/*
* As a policy, the callback is invoked in a 'trigger-after-execute'
* fashion
*/
- if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
- perf_bp_event(bp, regs);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!hit[i])
+ continue;
+ if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
+ perf_bp_event(bp[i], regs);
+ }
+
+reset:
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!info[i])
+ continue;
+ __set_breakpoint(i, info[i]);
+ }
- __set_breakpoint(info);
out:
rcu_read_unlock();
return rc;
@@ -361,26 +790,43 @@ static int single_step_dabr_instruction(struct die_args *args)
struct pt_regs *regs = args->regs;
struct perf_event *bp = NULL;
struct arch_hw_breakpoint *info;
+ int i;
+ bool found = false;
- bp = current->thread.last_hit_ubp;
/*
* Check if we are single-stepping as a result of a
* previous HW Breakpoint exception
*/
- if (!bp)
- return NOTIFY_DONE;
+ for (i = 0; i < nr_wp_slots(); i++) {
+ bp = current->thread.last_hit_ubp[i];
+
+ if (!bp)
+ continue;
+
+ found = true;
+ info = counter_arch_bp(bp);
+
+ /*
+ * We shall invoke the user-defined callback function in the
+ * single stepping handler to confirm to 'trigger-after-execute'
+ * semantics
+ */
+ if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
+ perf_bp_event(bp, regs);
+ current->thread.last_hit_ubp[i] = NULL;
+ }
- info = counter_arch_bp(bp);
+ if (!found)
+ return NOTIFY_DONE;
- /*
- * We shall invoke the user-defined callback function in the single
- * stepping handler to confirm to 'trigger-after-execute' semantics
- */
- if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
- perf_bp_event(bp, regs);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ bp = __this_cpu_read(bp_per_reg[i]);
+ if (!bp)
+ continue;
- __set_breakpoint(info);
- current->thread.last_hit_ubp = NULL;
+ info = counter_arch_bp(bp);
+ __set_breakpoint(i, info);
+ }
/*
* If the process was being single-stepped by ptrace, let the
@@ -419,10 +865,13 @@ NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
+ int i;
struct thread_struct *t = &tsk->thread;
- unregister_hw_breakpoint(t->ptrace_bps[0]);
- t->ptrace_bps[0] = NULL;
+ for (i = 0; i < nr_wp_slots(); i++) {
+ unregister_hw_breakpoint(t->ptrace_bps[i]);
+ t->ptrace_bps[i] = NULL;
+ }
}
void hw_breakpoint_pmu_read(struct perf_event *bp)
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 433d97bea1f3..69df840f7253 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -187,6 +187,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_HID1, r9
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
b transfer_to_handler_cont
+_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
.data
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S
index 308f499e146c..72c85b6f3898 100644
--- a/arch/powerpc/kernel/idle_e500.S
+++ b/arch/powerpc/kernel/idle_e500.S
@@ -90,3 +90,4 @@ _GLOBAL(power_save_ppc32_restore)
#endif
b transfer_to_handler_cont
+_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index 0276bc8c8969..51bbaae94ccc 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -10,10 +10,10 @@
#include <linux/kernel.h>
#include <linux/sched/mm.h> /* for init_mm */
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
#include <asm/io-workarounds.h>
#include <asm/pte-walk.h>
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 112d150354b2..05b1cc0e009e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -51,10 +51,10 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/cache.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index ca37702bde97..144858027fa3 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -6,14 +6,15 @@
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <asm/code-patching.h>
+#include <asm/inst.h>
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- u32 *addr = (u32 *)(unsigned long)entry->code;
+ struct ppc_inst *addr = (struct ppc_inst *)(unsigned long)entry->code;
if (type == JUMP_LABEL_JMP)
patch_branch(addr, entry->target, 0);
else
- patch_instruction(addr, PPC_INST_NOP);
+ patch_instruction(addr, ppc_inst(PPC_INST_NOP));
}
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 7dd55eb1259d..652b2852bea3 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -26,6 +26,7 @@
#include <asm/debug.h>
#include <asm/code-patching.h>
#include <linux/slab.h>
+#include <asm/inst.h>
/*
* This table contains the mapping between PowerPC hardware trap types, and
@@ -418,13 +419,13 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
unsigned int instr;
- unsigned int *addr = (unsigned int *)bpt->bpt_addr;
+ struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
err = probe_kernel_address(addr, instr);
if (err)
return err;
- err = patch_instruction(addr, BREAK_INSTR);
+ err = patch_instruction(addr, ppc_inst(BREAK_INSTR));
if (err)
return -EFAULT;
@@ -437,9 +438,9 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
unsigned int instr = *(unsigned int *)bpt->saved_instr;
- unsigned int *addr = (unsigned int *)bpt->bpt_addr;
+ struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
- err = patch_instruction(addr, instr);
+ err = patch_instruction(addr, ppc_inst(instr));
if (err)
return -EFAULT;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 81efb605113e..6f96f65ebfe8 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -23,6 +23,7 @@
#include <asm/cacheflush.h>
#include <asm/sstep.h>
#include <asm/sections.h>
+#include <asm/inst.h>
#include <linux/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -105,7 +106,9 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
int arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
- kprobe_opcode_t insn = *p->addr;
+ struct kprobe *prev;
+ struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
+ struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
@@ -113,6 +116,17 @@ int arch_prepare_kprobe(struct kprobe *p)
} else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
ret = -EINVAL;
+ } else if (ppc_inst_prefixed(prefix)) {
+ printk("Cannot register a kprobe on the second word of prefixed instruction\n");
+ ret = -EINVAL;
+ }
+ preempt_disable();
+ prev = get_kprobe(p->addr - 1);
+ preempt_enable_no_resched();
+ if (prev &&
+ ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)prev->ainsn.insn))) {
+ printk("Cannot register a kprobe on the second word of prefixed instruction\n");
+ ret = -EINVAL;
}
/* insn must be on a special executable page on ppc64. This is
@@ -124,11 +138,8 @@ int arch_prepare_kprobe(struct kprobe *p)
}
if (!ret) {
- memcpy(p->ainsn.insn, p->addr,
- MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
- p->opcode = *p->addr;
- flush_icache_range((unsigned long)p->ainsn.insn,
- (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
+ patch_instruction((struct ppc_inst *)p->ainsn.insn, insn);
+ p->opcode = ppc_inst_val(insn);
}
p->ainsn.boostable = 0;
@@ -138,13 +149,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe);
void arch_arm_kprobe(struct kprobe *p)
{
- patch_instruction(p->addr, BREAKPOINT_INSTRUCTION);
+ patch_instruction((struct ppc_inst *)p->addr, ppc_inst(BREAKPOINT_INSTRUCTION));
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
- patch_instruction(p->addr, p->opcode);
+ patch_instruction((struct ppc_inst *)p->addr, ppc_inst(p->opcode));
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
@@ -216,7 +227,7 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe);
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
{
int ret;
- unsigned int insn = *p->ainsn.insn;
+ struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
/* regs->nip is also adjusted if emulate_step returns 1 */
ret = emulate_step(regs, insn);
@@ -233,7 +244,7 @@ static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
* So, we should never get here... but, its still
* good to catch them, just in case...
*/
- printk("Can't step on instruction %x\n", insn);
+ printk("Can't step on instruction %x\n", ppc_inst_val(insn));
BUG();
} else {
/*
@@ -276,14 +287,18 @@ int kprobe_handler(struct pt_regs *regs)
p = get_kprobe(addr);
if (!p) {
- if (*addr != BREAKPOINT_INSTRUCTION) {
+ unsigned int instr;
+
+ if (probe_kernel_address(addr, instr))
+ goto no_kprobe;
+
+ if (instr != BREAKPOINT_INSTRUCTION) {
/*
* PowerPC has multiple variants of the "trap"
* instruction. If the current instruction is a
* trap variant, it could belong to someone else
*/
- kprobe_opcode_t cur_insn = *addr;
- if (is_trap(cur_insn))
+ if (is_trap(instr))
goto no_kprobe;
/*
* The breakpoint instruction was removed right
@@ -464,14 +479,16 @@ NOKPROBE_SYMBOL(trampoline_probe_handler);
*/
int kprobe_post_handler(struct pt_regs *regs)
{
+ int len;
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur || user_mode(regs))
return 0;
+ len = ppc_inst_len(ppc_inst_read((struct ppc_inst *)cur->ainsn.insn));
/* make sure we got here for instruction we have a kprobe on */
- if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
+ if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
return 0;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
@@ -480,7 +497,7 @@ int kprobe_post_handler(struct pt_regs *regs)
}
/* Adjust nip to after the single-stepped instruction */
- regs->nip = (unsigned long)cur->addr + 4;
+ regs->nip = (unsigned long)cur->addr + len;
regs->msr |= kcb->kprobe_saved_msr;
/*Restore back the original saved kprobes variables and continue. */
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S
index 2020d255585f..5f07aa5e9851 100644
--- a/arch/powerpc/kernel/l2cr_6xx.S
+++ b/arch/powerpc/kernel/l2cr_6xx.S
@@ -455,5 +455,6 @@ _GLOBAL(__inval_enable_L1)
sync
blr
+_ASM_NOKPROBE_SYMBOL(__inval_enable_L1)
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 8077b5fb18a7..fd90c0eda229 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -16,6 +16,7 @@
#include <linux/export.h>
#include <linux/irq_work.h>
#include <linux/extable.h>
+#include <linux/ftrace.h>
#include <asm/machdep.h>
#include <asm/mce.h>
@@ -571,9 +572,16 @@ EXPORT_SYMBOL_GPL(machine_check_print_event_info);
*
* regs->nip and regs->msr contains srr0 and ssr1.
*/
-long machine_check_early(struct pt_regs *regs)
+long notrace machine_check_early(struct pt_regs *regs)
{
long handled = 0;
+ bool nested = in_nmi();
+ u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
+
+ this_cpu_set_ftrace_enabled(0);
+
+ if (!nested)
+ nmi_enter();
hv_nmi_check_nonrecoverable(regs);
@@ -582,6 +590,12 @@ long machine_check_early(struct pt_regs *regs)
*/
if (ppc_md.machine_check_early)
handled = ppc_md.machine_check_early(regs);
+
+ if (!nested)
+ nmi_exit();
+
+ this_cpu_set_ftrace_enabled(ftrace_enabled);
+
return handled;
}
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 067b094bfeff..c3b522bff9b4 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -12,14 +12,15 @@
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/extable.h>
+#include <linux/pgtable.h>
#include <asm/mmu.h>
#include <asm/mce.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/pte-walk.h>
#include <asm/sstep.h>
#include <asm/exception-64s.h>
#include <asm/extable.h>
+#include <asm/inst.h>
/*
* Convert an address related to an mm to a PFN. NOTE: we are in real
@@ -27,7 +28,7 @@
*/
unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
{
- pte_t *ptep;
+ pte_t *ptep, pte;
unsigned int shift;
unsigned long pfn, flags;
struct mm_struct *mm;
@@ -39,19 +40,23 @@ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
local_irq_save(flags);
ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
+ if (!ptep) {
+ pfn = ULONG_MAX;
+ goto out;
+ }
+ pte = READ_ONCE(*ptep);
- if (!ptep || pte_special(*ptep)) {
+ if (!pte_present(pte) || pte_special(pte)) {
pfn = ULONG_MAX;
goto out;
}
if (shift <= PAGE_SHIFT)
- pfn = pte_pfn(*ptep);
+ pfn = pte_pfn(pte);
else {
unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
- pfn = pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
+ pfn = pte_pfn(__pte(pte_val(pte) | (addr & rpnmask)));
}
-
out:
local_irq_restore(flags);
return pfn;
@@ -365,7 +370,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
* in real-mode is tricky and can lead to recursive
* faults
*/
- int instr;
+ struct ppc_inst instr;
unsigned long pfn, instr_addr;
struct instruction_op op;
struct pt_regs tmp = *regs;
@@ -373,7 +378,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
pfn = addr_to_pfn(regs, regs->nip);
if (pfn != ULONG_MAX) {
instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK);
- instr = *(unsigned int *)(instr_addr);
+ instr = ppc_inst_read((struct ppc_inst *)instr_addr);
if (!analyse_instr(&op, &tmp, instr)) {
pfn = addr_to_pfn(regs, op.ea);
*addr = op.ea;
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 65f9f731c229..5be96feccb55 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -36,6 +36,8 @@ _GLOBAL(add_reloc_offset)
add r3,r3,r5
mtlr r0
blr
+_ASM_NOKPROBE_SYMBOL(reloc_offset)
+_ASM_NOKPROBE_SYMBOL(add_reloc_offset)
.align 3
2: PPC_LONG 1b
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index d80212be8698..b24f866fef81 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -246,6 +246,7 @@ _GLOBAL(real_readb)
sync
isync
blr
+_ASM_NOKPROBE_SYMBOL(real_readb)
/*
* Do an IO access in real mode
@@ -263,6 +264,7 @@ _GLOBAL(real_writeb)
sync
isync
blr
+_ASM_NOKPROBE_SYMBOL(real_writeb)
#endif /* CONFIG_40x */
@@ -274,17 +276,8 @@ _GLOBAL(real_writeb)
#ifndef CONFIG_PPC_8xx
_GLOBAL(flush_instruction_cache)
#if defined(CONFIG_4xx)
-#ifdef CONFIG_403GCX
- li r3, 512
- mtctr r3
- lis r4, KERNELBASE@h
-1: iccci 0, r4
- addi r4, r4, 16
- bdnz 1b
-#else
lis r3, KERNELBASE@h
iccci 0,r3
-#endif
#elif defined(CONFIG_FSL_BOOKE)
#ifdef CONFIG_E200
mfspr r3,SPRN_L1CSR0
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index d7134c614c16..c27b8687b82a 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -67,21 +67,6 @@ static int relacmp(const void *_x, const void *_y)
return 0;
}
-static void relaswap(void *_x, void *_y, int size)
-{
- uint32_t *x, *y, tmp;
- int i;
-
- y = (uint32_t *)_x;
- x = (uint32_t *)_y;
-
- for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) {
- tmp = x[i];
- x[i] = y[i];
- y[i] = tmp;
- }
-}
-
/* Get the potential trampolines size required of the init and
non-init sections */
static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
@@ -118,7 +103,7 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
*/
sort((void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size / sizeof(Elf32_Rela),
- sizeof(Elf32_Rela), relacmp, relaswap);
+ sizeof(Elf32_Rela), relacmp, NULL);
ret += count_relocs((void *)hdr
+ sechdrs[i].sh_offset,
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 007606a48fd9..f4c2fa190192 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -20,6 +20,7 @@
#include <linux/sort.h>
#include <asm/setup.h>
#include <asm/sections.h>
+#include <asm/inst.h>
/* FIXME: We don't do .init separately. To do this, we'd need to have
a separate r2 value in the init and core section, and stub between
@@ -144,42 +145,6 @@ static u32 ppc64_stub_insns[] = {
PPC_INST_BCTR,
};
-#ifdef CONFIG_DYNAMIC_FTRACE
-int module_trampoline_target(struct module *mod, unsigned long addr,
- unsigned long *target)
-{
- struct ppc64_stub_entry *stub;
- func_desc_t funcdata;
- u32 magic;
-
- if (!within_module_core(addr, mod)) {
- pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name);
- return -EFAULT;
- }
-
- stub = (struct ppc64_stub_entry *)addr;
-
- if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) {
- pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
- return -EFAULT;
- }
-
- if (magic != STUB_MAGIC) {
- pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name);
- return -EFAULT;
- }
-
- if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) {
- pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
- return -EFAULT;
- }
-
- *target = stub_func_addr(funcdata);
-
- return 0;
-}
-#endif
-
/* Count how many different 24-bit relocations (different symbol,
different addend) */
static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
@@ -226,21 +191,6 @@ static int relacmp(const void *_x, const void *_y)
return 0;
}
-static void relaswap(void *_x, void *_y, int size)
-{
- uint64_t *x, *y, tmp;
- int i;
-
- y = (uint64_t *)_x;
- x = (uint64_t *)_y;
-
- for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
- tmp = x[i];
- x[i] = y[i];
- y[i] = tmp;
- }
-}
-
/* Get size of potential trampolines required. */
static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
const Elf64_Shdr *sechdrs)
@@ -264,7 +214,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
*/
sort((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size / sizeof(Elf64_Rela),
- sizeof(Elf64_Rela), relacmp, relaswap);
+ sizeof(Elf64_Rela), relacmp, NULL);
relocs += count_relocs((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size
@@ -384,6 +334,92 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
return 0;
}
+#ifdef CONFIG_MPROFILE_KERNEL
+
+#define PACATOC offsetof(struct paca_struct, kernel_toc)
+
+/*
+ * ld r12,PACATOC(r13)
+ * addis r12,r12,<high>
+ * addi r12,r12,<low>
+ * mtctr r12
+ * bctr
+ */
+static u32 stub_insns[] = {
+ PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R13) | PACATOC,
+ PPC_INST_ADDIS | __PPC_RT(R12) | __PPC_RA(R12),
+ PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12),
+ PPC_INST_MTCTR | __PPC_RS(R12),
+ PPC_INST_BCTR,
+};
+
+/*
+ * For mprofile-kernel we use a special stub for ftrace_caller() because we
+ * can't rely on r2 containing this module's TOC when we enter the stub.
+ *
+ * That can happen if the function calling us didn't need to use the toc. In
+ * that case it won't have setup r2, and the r2 value will be either the
+ * kernel's toc, or possibly another modules toc.
+ *
+ * To deal with that this stub uses the kernel toc, which is always accessible
+ * via the paca (in r13). The target (ftrace_caller()) is responsible for
+ * saving and restoring the toc before returning.
+ */
+static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
+ unsigned long addr,
+ struct module *me)
+{
+ long reladdr;
+
+ memcpy(entry->jump, stub_insns, sizeof(stub_insns));
+
+ /* Stub uses address relative to kernel toc (from the paca) */
+ reladdr = addr - kernel_toc_addr();
+ if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+ pr_err("%s: Address of %ps out of range of kernel_toc.\n",
+ me->name, (void *)addr);
+ return 0;
+ }
+
+ entry->jump[1] |= PPC_HA(reladdr);
+ entry->jump[2] |= PPC_LO(reladdr);
+
+ /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
+ entry->funcdata = func_desc(addr);
+ entry->magic = STUB_MAGIC;
+
+ return 1;
+}
+
+static bool is_mprofile_ftrace_call(const char *name)
+{
+ if (!strcmp("_mcount", name))
+ return true;
+#ifdef CONFIG_DYNAMIC_FTRACE
+ if (!strcmp("ftrace_caller", name))
+ return true;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ if (!strcmp("ftrace_regs_caller", name))
+ return true;
+#endif
+#endif
+
+ return false;
+}
+#else
+static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
+ unsigned long addr,
+ struct module *me)
+{
+ return 0;
+}
+
+static bool is_mprofile_ftrace_call(const char *name)
+{
+ return false;
+}
+#endif
+
/*
* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the
* value maximum span in an instruction which uses a signed offset). Round down
@@ -399,10 +435,14 @@ static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me)
static inline int create_stub(const Elf64_Shdr *sechdrs,
struct ppc64_stub_entry *entry,
unsigned long addr,
- struct module *me)
+ struct module *me,
+ const char *name)
{
long reladdr;
+ if (is_mprofile_ftrace_call(name))
+ return create_ftrace_stub(entry, addr, me);
+
memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
/* Stub uses address relative to r2. */
@@ -426,7 +466,8 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
stub to set up the TOC ptr (r2) for the function. */
static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
unsigned long addr,
- struct module *me)
+ struct module *me,
+ const char *name)
{
struct ppc64_stub_entry *stubs;
unsigned int i, num_stubs;
@@ -443,62 +484,19 @@ static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
return (unsigned long)&stubs[i];
}
- if (!create_stub(sechdrs, &stubs[i], addr, me))
+ if (!create_stub(sechdrs, &stubs[i], addr, me, name))
return 0;
return (unsigned long)&stubs[i];
}
-#ifdef CONFIG_MPROFILE_KERNEL
-static bool is_mprofile_mcount_callsite(const char *name, u32 *instruction)
-{
- if (strcmp("_mcount", name))
- return false;
-
- /*
- * Check if this is one of the -mprofile-kernel sequences.
- */
- if (instruction[-1] == PPC_INST_STD_LR &&
- instruction[-2] == PPC_INST_MFLR)
- return true;
-
- if (instruction[-1] == PPC_INST_MFLR)
- return true;
-
- return false;
-}
-
-/*
- * In case of _mcount calls, do not save the current callee's TOC (in r2) into
- * the original caller's stack frame. If we did we would clobber the saved TOC
- * value of the original caller.
- */
-static void squash_toc_save_inst(const char *name, unsigned long addr)
-{
- struct ppc64_stub_entry *stub = (struct ppc64_stub_entry *)addr;
-
- /* Only for calls to _mcount */
- if (strcmp("_mcount", name) != 0)
- return;
-
- stub->jump[2] = PPC_INST_NOP;
-}
-#else
-static void squash_toc_save_inst(const char *name, unsigned long addr) { }
-
-static bool is_mprofile_mcount_callsite(const char *name, u32 *instruction)
-{
- return false;
-}
-#endif
-
/* We expect a noop next: if it is, replace it with instruction to
restore r2. */
static int restore_r2(const char *name, u32 *instruction, struct module *me)
{
u32 *prev_insn = instruction - 1;
- if (is_mprofile_mcount_callsite(name, prev_insn))
+ if (is_mprofile_ftrace_call(name))
return 1;
/*
@@ -506,7 +504,7 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me)
* "link" branches and they don't return, so they don't need the r2
* restore afterwards.
*/
- if (!instr_is_relative_link_branch(*prev_insn))
+ if (!instr_is_relative_link_branch(ppc_inst(*prev_insn)))
return 1;
if (*instruction != PPC_INST_NOP) {
@@ -636,14 +634,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if (sym->st_shndx == SHN_UNDEF ||
sym->st_shndx == SHN_LIVEPATCH) {
/* External: go via stub */
- value = stub_for_addr(sechdrs, value, me);
+ value = stub_for_addr(sechdrs, value, me,
+ strtab + sym->st_name);
if (!value)
return -ENOENT;
if (!restore_r2(strtab + sym->st_name,
(u32 *)location + 1, me))
return -ENOEXEC;
-
- squash_toc_save_inst(strtab + sym->st_name, value);
} else
value += local_entry_offset(sym);
@@ -745,89 +742,51 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
#ifdef CONFIG_DYNAMIC_FTRACE
-
-#ifdef CONFIG_MPROFILE_KERNEL
-
-#define PACATOC offsetof(struct paca_struct, kernel_toc)
-
-/*
- * For mprofile-kernel we use a special stub for ftrace_caller() because we
- * can't rely on r2 containing this module's TOC when we enter the stub.
- *
- * That can happen if the function calling us didn't need to use the toc. In
- * that case it won't have setup r2, and the r2 value will be either the
- * kernel's toc, or possibly another modules toc.
- *
- * To deal with that this stub uses the kernel toc, which is always accessible
- * via the paca (in r13). The target (ftrace_caller()) is responsible for
- * saving and restoring the toc before returning.
- */
-static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs,
- struct module *me, unsigned long addr)
+int module_trampoline_target(struct module *mod, unsigned long addr,
+ unsigned long *target)
{
- struct ppc64_stub_entry *entry;
- unsigned int i, num_stubs;
- /*
- * ld r12,PACATOC(r13)
- * addis r12,r12,<high>
- * addi r12,r12,<low>
- * mtctr r12
- * bctr
- */
- static u32 stub_insns[] = {
- PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R13) | PACATOC,
- PPC_INST_ADDIS | __PPC_RT(R12) | __PPC_RA(R12),
- PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12),
- PPC_INST_MTCTR | __PPC_RS(R12),
- PPC_INST_BCTR,
- };
- long reladdr;
+ struct ppc64_stub_entry *stub;
+ func_desc_t funcdata;
+ u32 magic;
- num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*entry);
+ if (!within_module_core(addr, mod)) {
+ pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name);
+ return -EFAULT;
+ }
- /* Find the next available stub entry */
- entry = (void *)sechdrs[me->arch.stubs_section].sh_addr;
- for (i = 0; i < num_stubs && stub_func_addr(entry->funcdata); i++, entry++);
+ stub = (struct ppc64_stub_entry *)addr;
- if (i >= num_stubs) {
- pr_err("%s: Unable to find a free slot for ftrace stub.\n", me->name);
- return 0;
+ if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) {
+ pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
+ return -EFAULT;
}
- memcpy(entry->jump, stub_insns, sizeof(stub_insns));
-
- /* Stub uses address relative to kernel toc (from the paca) */
- reladdr = addr - kernel_toc_addr();
- if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
- pr_err("%s: Address of %ps out of range of kernel_toc.\n",
- me->name, (void *)addr);
- return 0;
+ if (magic != STUB_MAGIC) {
+ pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name);
+ return -EFAULT;
}
- entry->jump[1] |= PPC_HA(reladdr);
- entry->jump[2] |= PPC_LO(reladdr);
+ if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) {
+ pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
+ return -EFAULT;
+ }
- /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
- entry->funcdata = func_desc(addr);
- entry->magic = STUB_MAGIC;
+ *target = stub_func_addr(funcdata);
- return (unsigned long)entry;
-}
-#else
-static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs,
- struct module *me, unsigned long addr)
-{
- return stub_for_addr(sechdrs, addr, me);
+ return 0;
}
-#endif
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
{
- mod->arch.tramp = create_ftrace_stub(sechdrs, mod,
- (unsigned long)ftrace_caller);
+ mod->arch.tramp = stub_for_addr(sechdrs,
+ (unsigned long)ftrace_caller,
+ mod,
+ "ftrace_caller");
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- mod->arch.tramp_regs = create_ftrace_stub(sechdrs, mod,
- (unsigned long)ftrace_regs_caller);
+ mod->arch.tramp_regs = stub_for_addr(sechdrs,
+ (unsigned long)ftrace_regs_caller,
+ mod,
+ "ftrace_regs_caller");
if (!mod->arch.tramp_regs)
return -ENOENT;
#endif
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 0cd1c88bfc8b..532f22637783 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -852,8 +852,8 @@ loff_t __init nvram_create_partition(const char *name, int sig,
BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
/* Convert sizes from bytes to blocks */
- req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
- min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+ req_size = ALIGN(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+ min_size = ALIGN(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
/* If no minimum size specified, make it the same as the
* requested size
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 024f7aad1952..69bfe96884e2 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -16,6 +16,7 @@
#include <asm/code-patching.h>
#include <asm/sstep.h>
#include <asm/ppc-opcode.h>
+#include <asm/inst.h>
#define TMPL_CALL_HDLR_IDX \
(optprobe_template_call_handler - optprobe_template_entry)
@@ -99,8 +100,9 @@ static unsigned long can_optimize(struct kprobe *p)
* Ensure that the instruction is not a conditional branch,
* and that can be emulated.
*/
- if (!is_conditional_branch(*p->ainsn.insn) &&
- analyse_instr(&op, &regs, *p->ainsn.insn) == 1) {
+ if (!is_conditional_branch(ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) &&
+ analyse_instr(&op, &regs,
+ ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) == 1) {
emulate_update_regs(&regs, &op);
nip = regs.nip;
}
@@ -147,50 +149,57 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
{
/* addis r4,0,(insn)@h */
- patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
- ((val >> 16) & 0xffff));
+ patch_instruction((struct ppc_inst *)addr,
+ ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) |
+ ((val >> 16) & 0xffff)));
addr++;
/* ori r4,r4,(insn)@l */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
- ___PPC_RS(4) | (val & 0xffff));
+ patch_instruction((struct ppc_inst *)addr,
+ ppc_inst(PPC_INST_ORI | ___PPC_RA(4) |
+ ___PPC_RS(4) | (val & 0xffff)));
}
/*
* Generate instructions to load provided immediate 64-bit value
- * to register 'r3' and patch these instructions at 'addr'.
+ * to register 'reg' and patch these instructions at 'addr'.
*/
-void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
+void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
{
- /* lis r3,(op)@highest */
- patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
- ((val >> 48) & 0xffff));
+ /* lis reg,(op)@highest */
+ patch_instruction((struct ppc_inst *)addr,
+ ppc_inst(PPC_INST_ADDIS | ___PPC_RT(reg) |
+ ((val >> 48) & 0xffff)));
addr++;
- /* ori r3,r3,(op)@higher */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
- ___PPC_RS(3) | ((val >> 32) & 0xffff));
+ /* ori reg,reg,(op)@higher */
+ patch_instruction((struct ppc_inst *)addr,
+ ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
+ ___PPC_RS(reg) | ((val >> 32) & 0xffff)));
addr++;
- /* rldicr r3,r3,32,31 */
- patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
- ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
+ /* rldicr reg,reg,32,31 */
+ patch_instruction((struct ppc_inst *)addr,
+ ppc_inst(PPC_INST_RLDICR | ___PPC_RA(reg) |
+ ___PPC_RS(reg) | __PPC_SH64(32) | __PPC_ME64(31)));
addr++;
- /* oris r3,r3,(op)@h */
- patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
- ___PPC_RS(3) | ((val >> 16) & 0xffff));
+ /* oris reg,reg,(op)@h */
+ patch_instruction((struct ppc_inst *)addr,
+ ppc_inst(PPC_INST_ORIS | ___PPC_RA(reg) |
+ ___PPC_RS(reg) | ((val >> 16) & 0xffff)));
addr++;
- /* ori r3,r3,(op)@l */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
- ___PPC_RS(3) | (val & 0xffff));
+ /* ori reg,reg,(op)@l */
+ patch_instruction((struct ppc_inst *)addr,
+ ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
+ ___PPC_RS(reg) | (val & 0xffff)));
}
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
{
- kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
- kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
+ struct ppc_inst branch_op_callback, branch_emulate_step, temp;
+ kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff;
long b_offset;
unsigned long nip, size;
int rc, i;
@@ -230,7 +239,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
pr_devel("Copying template to %p, size %lu\n", buff, size);
for (i = 0; i < size; i++) {
- rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
+ rc = patch_instruction((struct ppc_inst *)(buff + i),
+ ppc_inst(*(optprobe_template_entry + i)));
if (rc < 0)
goto error;
}
@@ -239,7 +249,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
* Fixup the template with instructions to:
* 1. load the address of the actual probepoint
*/
- patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
+ patch_imm64_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
/*
* 2. branch to optimized_callback() and emulate_step()
@@ -251,29 +261,34 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
goto error;
}
- branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
- (unsigned long)op_callback_addr,
- BRANCH_SET_LINK);
+ rc = create_branch(&branch_op_callback,
+ (struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
+ (unsigned long)op_callback_addr,
+ BRANCH_SET_LINK);
- branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
- (unsigned long)emulate_step_addr,
- BRANCH_SET_LINK);
+ rc |= create_branch(&branch_emulate_step,
+ (struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
+ (unsigned long)emulate_step_addr,
+ BRANCH_SET_LINK);
- if (!branch_op_callback || !branch_emulate_step)
+ if (rc)
goto error;
- patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
- patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
+ patch_instruction((struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
+ branch_op_callback);
+ patch_instruction((struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
+ branch_emulate_step);
/*
* 3. load instruction to be emulated into relevant register, and
*/
- patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
+ temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
+ patch_imm64_load_insns(ppc_inst_as_u64(temp), 4, buff + TMPL_INSN_IDX);
/*
* 4. branch back from trampoline
*/
- patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
+ patch_branch((struct ppc_inst *)(buff + TMPL_RET_IDX), (unsigned long)nip, 0);
flush_icache_range((unsigned long)buff,
(unsigned long)(&buff[TMPL_END_IDX]));
@@ -305,6 +320,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
void arch_optimize_kprobes(struct list_head *oplist)
{
+ struct ppc_inst instr;
struct optimized_kprobe *op;
struct optimized_kprobe *tmp;
@@ -315,9 +331,10 @@ void arch_optimize_kprobes(struct list_head *oplist)
*/
memcpy(op->optinsn.copied_insn, op->kp.addr,
RELATIVEJUMP_SIZE);
- patch_instruction(op->kp.addr,
- create_branch((unsigned int *)op->kp.addr,
- (unsigned long)op->optinsn.insn, 0));
+ create_branch(&instr,
+ (struct ppc_inst *)op->kp.addr,
+ (unsigned long)op->optinsn.insn, 0);
+ patch_instruction((struct ppc_inst *)op->kp.addr, instr);
list_del_init(&op->list);
}
}
diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S
index cf383520843f..ff8ba4d3824d 100644
--- a/arch/powerpc/kernel/optprobes_head.S
+++ b/arch/powerpc/kernel/optprobes_head.S
@@ -94,6 +94,9 @@ optprobe_template_insn:
/* 2, Pass instruction to be emulated in r4 */
nop
nop
+ nop
+ nop
+ nop
.global optprobe_template_call_emulate
optprobe_template_call_emulate:
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 3f91ccaa9c74..2168372b792d 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -8,14 +8,15 @@
#include <linux/memblock.h>
#include <linux/sched/task.h>
#include <linux/numa.h>
+#include <linux/pgtable.h>
#include <asm/lppaca.h>
#include <asm/paca.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/kexec.h>
#include <asm/svm.h>
#include <asm/ultravisor.h>
+#include <asm/rtas.h>
#include "setup.h"
@@ -164,6 +165,30 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_PPC_PSERIES
+/**
+ * new_rtas_args() - Allocates rtas args
+ * @cpu: CPU number
+ * @limit: Memory limit for this allocation
+ *
+ * Allocates a struct rtas_args and return it's pointer,
+ * if not in Hypervisor mode
+ *
+ * Return: Pointer to allocated rtas_args
+ * NULL if CPU in Hypervisor Mode
+ */
+static struct rtas_args * __init new_rtas_args(int cpu, unsigned long limit)
+{
+ limit = min_t(unsigned long, limit, RTAS_INSTANTIATE_MAX);
+
+ if (early_cpu_has_feature(CPU_FTR_HVMODE))
+ return NULL;
+
+ return alloc_paca_data(sizeof(struct rtas_args), L1_CACHE_BYTES,
+ limit, cpu);
+}
+#endif /* CONFIG_PPC_PSERIES */
+
/* The Paca is an array with one entry per processor. Each contains an
* lppaca, which contains the information shared between the
* hypervisor and Linux.
@@ -202,6 +227,10 @@ void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int
/* For now -- if we have threads this will be adjusted later */
new_paca->tcd_ptr = &new_paca->tcd;
#endif
+
+#ifdef CONFIG_PPC_PSERIES
+ new_paca->rtas_args_reentrant = NULL;
+#endif
}
/* Put the paca pointer into r13 and SPRG_PACA */
@@ -274,6 +303,9 @@ void __init allocate_paca(int cpu)
#ifdef CONFIG_PPC_BOOK3S_64
paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
#endif
+#ifdef CONFIG_PPC_PSERIES
+ paca->rtas_args_reentrant = new_rtas_args(cpu, limit);
+#endif
paca_struct_size += sizeof(struct paca_struct);
}
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index bf83f76563a3..2fc12198ec07 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -57,8 +57,6 @@ void pcibios_release_device(struct pci_dev *dev)
struct pci_controller *phb = pci_bus_to_host(dev->bus);
struct pci_dn *pdn = pci_get_pdn(dev);
- eeh_remove_device(dev);
-
if (phb->controller_ops.release_device)
phb->controller_ops.release_device(dev);
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index d9ac980c398c..9312e6eda7ff 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -100,7 +100,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
pci_name(bus->self));
#ifdef CONFIG_PPC_BOOK3S_64
- __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
+ __flush_hash_table_range(res->start + _IO_BASE,
res->end + _IO_BASE + 1);
#endif
return 0;
@@ -154,8 +154,8 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose)
unsigned long size_page;
unsigned long io_virt_offset;
- phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
- size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
+ phys_page = ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
+ size_page = ALIGN(hose->pci_io_size, PAGE_SIZE);
/* Make sure IO area address is clear */
hose->io_base_alloc = NULL;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9c21288f8645..7bb7faf84490 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -41,7 +41,6 @@
#include <linux/pkeys.h>
#include <linux/seq_buf.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
@@ -629,15 +628,12 @@ void do_break (struct pt_regs *regs, unsigned long address,
if (debugger_break_match(regs))
return;
- /* Clear the breakpoint */
- hw_breakpoint_disable();
-
/* Deliver the signal to userspace */
force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
+static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
@@ -711,21 +707,49 @@ void switch_booke_debug_regs(struct debug_reg *new_debug)
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
#ifndef CONFIG_HAVE_HW_BREAKPOINT
-static void set_breakpoint(struct arch_hw_breakpoint *brk)
+static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
{
preempt_disable();
- __set_breakpoint(brk);
+ __set_breakpoint(i, brk);
preempt_enable();
}
static void set_debug_reg_defaults(struct thread_struct *thread)
{
- thread->hw_brk.address = 0;
- thread->hw_brk.type = 0;
- thread->hw_brk.len = 0;
- thread->hw_brk.hw_len = 0;
- if (ppc_breakpoint_available())
- set_breakpoint(&thread->hw_brk);
+ int i;
+ struct arch_hw_breakpoint null_brk = {0};
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ thread->hw_brk[i] = null_brk;
+ if (ppc_breakpoint_available())
+ set_breakpoint(i, &thread->hw_brk[i]);
+ }
+}
+
+static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
+ struct arch_hw_breakpoint *b)
+{
+ if (a->address != b->address)
+ return false;
+ if (a->type != b->type)
+ return false;
+ if (a->len != b->len)
+ return false;
+ /* no need to check hw_len. it's calculated from address and len */
+ return true;
+}
+
+static void switch_hw_breakpoint(struct task_struct *new)
+{
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (likely(hw_brk_match(this_cpu_ptr(&current_brk[i]),
+ &new->thread.hw_brk[i])))
+ continue;
+
+ __set_breakpoint(i, &new->thread.hw_brk[i]);
+ }
}
#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
@@ -772,12 +796,12 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
LCTRL1_CRWF_RW;
unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
- unsigned long start_addr = brk->address & ~HW_BREAKPOINT_ALIGN;
- unsigned long end_addr = (brk->address + brk->len - 1) | HW_BREAKPOINT_ALIGN;
+ unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
+ unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
if (start_addr == 0)
lctrl2 |= LCTRL2_LW0LA_F;
- else if (end_addr == ~0U)
+ else if (end_addr == 0)
lctrl2 |= LCTRL2_LW0LA_E;
else
lctrl2 |= LCTRL2_LW0LA_EandF;
@@ -793,20 +817,20 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
mtspr(SPRN_CMPE, start_addr - 1);
- mtspr(SPRN_CMPF, end_addr + 1);
+ mtspr(SPRN_CMPF, end_addr);
mtspr(SPRN_LCTRL1, lctrl1);
mtspr(SPRN_LCTRL2, lctrl2);
return 0;
}
-void __set_breakpoint(struct arch_hw_breakpoint *brk)
+void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
{
- memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
+ memcpy(this_cpu_ptr(&current_brk[nr]), brk, sizeof(*brk));
if (dawr_enabled())
// Power8 or later
- set_dawr(brk);
+ set_dawr(nr, brk);
else if (IS_ENABLED(CONFIG_PPC_8xx))
set_breakpoint_8xx(brk);
else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
@@ -829,19 +853,6 @@ bool ppc_breakpoint_available(void)
}
EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
-static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
- struct arch_hw_breakpoint *b)
-{
- if (a->address != b->address)
- return false;
- if (a->type != b->type)
- return false;
- if (a->len != b->len)
- return false;
- /* no need to check hw_len. it's calculated from address and len */
- return true;
-}
-
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline bool tm_enabled(struct task_struct *tsk)
@@ -1174,8 +1185,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* schedule DABR
*/
#ifndef CONFIG_HAVE_HW_BREAKPOINT
- if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
- __set_breakpoint(&new->thread.hw_brk);
+ switch_hw_breakpoint(new);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
@@ -1228,7 +1238,8 @@ struct task_struct *__switch_to(struct task_struct *prev,
* mappings, we must issue a cp_abort to clear any state and
* prevent snooping, corruption or a covert channel.
*/
- if (current->thread.used_vas)
+ if (current->mm &&
+ atomic_read(&current->mm->context.vas_windows))
asm volatile(PPC_CP_ABORT);
}
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -1412,7 +1423,7 @@ void show_regs(struct pt_regs * regs)
print_msr_bits(regs->msr);
pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
trap = TRAP(regs);
- if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
+ if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
pr_cont("CFAR: "REG" ", regs->orig_gpr3);
if (trap == 0x200 || trap == 0x300 || trap == 0x600)
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
@@ -1444,7 +1455,7 @@ void show_regs(struct pt_regs * regs)
printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
#endif
- show_stack(current, (unsigned long *) regs->gpr[1]);
+ show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
if (!user_mode(regs))
show_instructions(regs);
}
@@ -1467,27 +1478,6 @@ void arch_setup_new_exec(void)
}
#endif
-int set_thread_uses_vas(void)
-{
-#ifdef CONFIG_PPC_BOOK3S_64
- if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -EINVAL;
-
- current->thread.used_vas = 1;
-
- /*
- * Even a process that has no foreign real address mapping can use
- * an unpaired COPY instruction (to no real effect). Issue CP_ABORT
- * to clear any pending COPY and prevent a covert channel.
- *
- * __switch_to() will issue CP_ABORT on future context switches.
- */
- asm volatile(PPC_CP_ABORT);
-
-#endif /* CONFIG_PPC_BOOK3S_64 */
- return 0;
-}
-
#ifdef CONFIG_PPC64
/**
* Assign a TIDR (thread ID) for task @t and set it in the thread
@@ -1610,6 +1600,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
void (*f)(void);
unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
struct thread_info *ti = task_thread_info(p);
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int i;
+#endif
klp_init_thread_info(p);
@@ -1669,7 +1662,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
p->thread.ksp_limit = (unsigned long)end_of_stack(p);
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- p->thread.ptrace_bps[0] = NULL;
+ for (i = 0; i < nr_wp_slots(); i++)
+ p->thread.ptrace_bps[i] = NULL;
#endif
p->thread.fp_save_area = NULL;
@@ -1740,7 +1734,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
* FULL_REGS(regs) return true. This is necessary to allow
* ptrace to examine the thread immediately after exec.
*/
- regs->trap &= ~1UL;
+ SET_FULL_REGS(regs);
#ifdef CONFIG_PPC32
regs->mq = 0;
@@ -2068,7 +2062,8 @@ unsigned long get_wchan(struct task_struct *p)
static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-void show_stack(struct task_struct *tsk, unsigned long *stack)
+void show_stack(struct task_struct *tsk, unsigned long *stack,
+ const char *loglvl)
{
unsigned long sp, ip, lr, newsp;
int count = 0;
@@ -2093,7 +2088,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
}
lr = 0;
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
do {
if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
break;
@@ -2102,7 +2097,8 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
+ printk("%s["REG"] ["REG"] %pS",
+ loglvl, sp, ip, (void *)ip);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ret_addr = ftrace_graph_ret_addr(current,
&ftrace_idx, ip, stack);
@@ -2124,8 +2120,9 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);
lr = regs->link;
- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
- regs->trap, (void *)regs->nip, (void *)lr);
+ printk("%s--- interrupt: %lx at %pS\n LR = %pS\n",
+ loglvl, regs->trap,
+ (void *)regs->nip, (void *)lr);
firstframe = 1;
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 6620f37abe73..9cc49f265c86 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -30,6 +30,7 @@
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -41,7 +42,6 @@
#include <asm/smp.h>
#include <asm/mmu.h>
#include <asm/paca.h>
-#include <asm/pgtable.h>
#include <asm/powernv.h>
#include <asm/iommu.h>
#include <asm/btext.h>
@@ -96,8 +96,8 @@ static inline int overlaps_initrd(unsigned long start, unsigned long size)
if (!initrd_start)
return 0;
- return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
- start <= _ALIGN_UP(initrd_end, PAGE_SIZE);
+ return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
+ start <= ALIGN(initrd_end, PAGE_SIZE);
#else
return 0;
#endif
@@ -515,9 +515,14 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
size = 0x80000000ul - base;
}
+ if (!validate_mem_limit(base, &size))
+ continue;
+
DBG("Adding: %llx -> %llx\n", base, size);
- if (validate_mem_limit(base, &size))
- memblock_add(base, size);
+ memblock_add(base, size);
+
+ if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
+ memblock_mark_hotplug(base, size);
} while (--rngs);
}
#endif /* CONFIG_PPC_PSERIES */
@@ -623,9 +628,9 @@ static void __init early_reserve_mem(void)
#ifdef CONFIG_BLK_DEV_INITRD
/* Then reserve the initrd, if any */
if (initrd_start && (initrd_end > initrd_start)) {
- memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
- _ALIGN_UP(initrd_end, PAGE_SIZE) -
- _ALIGN_DOWN(initrd_start, PAGE_SIZE));
+ memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
+ ALIGN(initrd_end, PAGE_SIZE) -
+ ALIGN_DOWN(initrd_start, PAGE_SIZE));
}
#endif /* CONFIG_BLK_DEV_INITRD */
@@ -685,6 +690,23 @@ static void __init tm_init(void)
static void tm_init(void) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+#ifdef CONFIG_PPC64
+static void __init save_fscr_to_task(void)
+{
+ /*
+ * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
+ * have configured via the device tree features or via __init_FSCR().
+ * That value will then be propagated to pid 1 (init) and all future
+ * processes.
+ */
+ if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
+ init_task.thread.fscr = mfspr(SPRN_FSCR);
+}
+#else
+static inline void save_fscr_to_task(void) {};
+#endif
+
+
void __init early_init_devtree(void *params)
{
phys_addr_t limit;
@@ -773,6 +795,8 @@ void __init early_init_devtree(void *params)
BUG();
}
+ save_fscr_to_task();
+
#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
/* We'll later wait for secondaries to check in; there are
* NCPUS-1 non-boot CPUs :-)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 806be751c336..90c604d00b7d 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
+#include <linux/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
@@ -34,7 +35,6 @@
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
@@ -920,7 +920,7 @@ struct option_vector6 {
} __packed;
struct ibm_arch_vec {
- struct { u32 mask, val; } pvrs[12];
+ struct { u32 mask, val; } pvrs[14];
u8 num_vectors;
@@ -974,6 +974,14 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.val = cpu_to_be32(0x004e0000),
},
{
+ .mask = cpu_to_be32(0xffff0000), /* POWER10 */
+ .val = cpu_to_be32(0x00800000),
+ },
+ {
+ .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
+ .val = cpu_to_be32(0x0f000006),
+ },
+ {
.mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
.val = cpu_to_be32(0x0f000005),
},
@@ -1002,7 +1010,7 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.byte1 = 0,
.arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
- .arch_versions3 = OV1_PPC_3_00,
+ .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
},
.vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
@@ -1449,18 +1457,18 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
unsigned long addr = 0;
if (align)
- base = _ALIGN_UP(base, align);
+ base = ALIGN(base, align);
prom_debug("%s(%lx, %lx)\n", __func__, size, align);
if (ram_top == 0)
prom_panic("alloc_up() called with mem not initialized\n");
if (align)
- base = _ALIGN_UP(alloc_bottom, align);
+ base = ALIGN(alloc_bottom, align);
else
base = alloc_bottom;
for(; (base + size) <= alloc_top;
- base = _ALIGN_UP(base + 0x100000, align)) {
+ base = ALIGN(base + 0x100000, align)) {
prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if (addr != PROM_ERROR && addr != 0)
@@ -1500,7 +1508,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
if (highmem) {
/* Carve out storage for the TCE table. */
- addr = _ALIGN_DOWN(alloc_top_high - size, align);
+ addr = ALIGN_DOWN(alloc_top_high - size, align);
if (addr <= alloc_bottom)
return 0;
/* Will we bump into the RMO ? If yes, check out that we
@@ -1518,9 +1526,9 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
goto bail;
}
- base = _ALIGN_DOWN(alloc_top - size, align);
+ base = ALIGN_DOWN(alloc_top - size, align);
for (; base > alloc_bottom;
- base = _ALIGN_DOWN(base - 0x100000, align)) {
+ base = ALIGN_DOWN(base - 0x100000, align)) {
prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if (addr != PROM_ERROR && addr != 0)
@@ -1586,8 +1594,8 @@ static void __init reserve_mem(u64 base, u64 size)
* have our terminator with "size" set to 0 since we are
* dumb and just copy this entire array to the boot params
*/
- base = _ALIGN_DOWN(base, PAGE_SIZE);
- top = _ALIGN_UP(top, PAGE_SIZE);
+ base = ALIGN_DOWN(base, PAGE_SIZE);
+ top = ALIGN(top, PAGE_SIZE);
size = top - base;
if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
@@ -2426,7 +2434,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
{
void *ret;
- *mem_start = _ALIGN(*mem_start, align);
+ *mem_start = ALIGN(*mem_start, align);
while ((*mem_start + needed) > *mem_end) {
unsigned long room, chunk;
@@ -2562,7 +2570,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
*lp++ = *p;
}
*lp = 0;
- *mem_start = _ALIGN((unsigned long)lp + 1, 4);
+ *mem_start = ALIGN((unsigned long)lp + 1, 4);
}
/* get it again for debugging */
@@ -2608,7 +2616,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
/* push property content */
valp = make_room(mem_start, mem_end, l, 4);
call_prom("getprop", 4, 1, node, pname, valp, l);
- *mem_start = _ALIGN(*mem_start, 4);
+ *mem_start = ALIGN(*mem_start, 4);
if (!prom_strcmp(pname, "phandle"))
has_phandle = 1;
@@ -2667,7 +2675,7 @@ static void __init flatten_device_tree(void)
prom_panic ("couldn't get device tree root\n");
/* Build header and make room for mem rsv map */
- mem_start = _ALIGN(mem_start, 4);
+ mem_start = ALIGN(mem_start, 4);
hdr = make_room(&mem_start, &mem_end,
sizeof(struct boot_param_header), 4);
dt_header_start = (unsigned long)hdr;
diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
index f87e7c5c3bf3..697c7e4b5877 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
@@ -44,7 +44,7 @@ void ppc_gethwdinfo(struct ppc_debug_info *dbginfo)
dbginfo->version = 1;
dbginfo->num_instruction_bps = 0;
if (ppc_breakpoint_available())
- dbginfo->num_data_bps = 1;
+ dbginfo->num_data_bps = nr_wp_slots();
else
dbginfo->num_data_bps = 0;
dbginfo->num_condition_regs = 0;
@@ -67,11 +67,16 @@ int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
return -EINVAL;
- dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
- (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
+ dabr_fake = ((child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) |
+ (child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR));
return put_user(dabr_fake, datalp);
}
+/*
+ * ptrace_set_debugreg() fakes DABR and DABR is only one. So even if
+ * internal hw supports more than one watchpoint, we support only one
+ * watchpoint with this interface.
+ */
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
@@ -137,7 +142,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l
return ret;
thread->ptrace_bps[0] = bp;
- thread->hw_brk = hw_brk;
+ thread->hw_brk[0] = hw_brk;
return 0;
}
@@ -159,12 +164,37 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l
if (set_bp && (!ppc_breakpoint_available()))
return -ENODEV;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
- task->thread.hw_brk = hw_brk;
+ task->thread.hw_brk[0] = hw_brk;
return 0;
}
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+static int find_empty_ptrace_bp(struct thread_struct *thread)
+{
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!thread->ptrace_bps[i])
+ return i;
+ }
+ return -1;
+}
+#endif
+
+static int find_empty_hw_brk(struct thread_struct *thread)
+{
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!thread->hw_brk[i].address)
+ return i;
+ }
+ return -1;
+}
+
long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
{
+ int i;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int len = 0;
struct thread_struct *thread = &child->thread;
@@ -186,7 +216,7 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf
if ((unsigned long)bp_info->addr >= TASK_SIZE)
return -EIO;
- brk.address = bp_info->addr & ~HW_BREAKPOINT_ALIGN;
+ brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE);
brk.type = HW_BRK_TYPE_TRANSLATE;
brk.len = DABR_MAX_LEN;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
@@ -200,8 +230,9 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf
len = 1;
else
return -EINVAL;
- bp = thread->ptrace_bps[0];
- if (bp)
+
+ i = find_empty_ptrace_bp(thread);
+ if (i < 0)
return -ENOSPC;
/* Create a new breakpoint request if one doesn't exist already */
@@ -211,27 +242,28 @@ long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_inf
arch_bp_generic_fields(brk.type, &attr.bp_type);
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, NULL, child);
- thread->ptrace_bps[0] = bp;
+ thread->ptrace_bps[i] = bp;
if (IS_ERR(bp)) {
- thread->ptrace_bps[0] = NULL;
+ thread->ptrace_bps[i] = NULL;
return PTR_ERR(bp);
}
- return 1;
+ return i + 1;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
return -EINVAL;
- if (child->thread.hw_brk.address)
+ i = find_empty_hw_brk(&child->thread);
+ if (i < 0)
return -ENOSPC;
if (!ppc_breakpoint_available())
return -ENODEV;
- child->thread.hw_brk = brk;
+ child->thread.hw_brk[i] = brk;
- return 1;
+ return i + 1;
}
long ppc_del_hwdebug(struct task_struct *child, long data)
@@ -241,24 +273,24 @@ long ppc_del_hwdebug(struct task_struct *child, long data)
struct thread_struct *thread = &child->thread;
struct perf_event *bp;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
- if (data != 1)
+ if (data < 1 || data > nr_wp_slots())
return -EINVAL;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- bp = thread->ptrace_bps[0];
+ bp = thread->ptrace_bps[data - 1];
if (bp) {
unregister_hw_breakpoint(bp);
- thread->ptrace_bps[0] = NULL;
+ thread->ptrace_bps[data - 1] = NULL;
} else {
ret = -ENOENT;
}
return ret;
#else /* CONFIG_HAVE_HW_BREAKPOINT */
- if (child->thread.hw_brk.address == 0)
+ if (child->thread.hw_brk[data - 1].address == 0)
return -ENOENT;
- child->thread.hw_brk.address = 0;
- child->thread.hw_brk.type = 0;
+ child->thread.hw_brk[data - 1].address = 0;
+ child->thread.hw_brk[data - 1].type = 0;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
return 0;
diff --git a/arch/powerpc/kernel/ptrace/ptrace-tm.c b/arch/powerpc/kernel/ptrace/ptrace-tm.c
index d75aff31f637..32d62c606681 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-tm.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-tm.c
@@ -43,7 +43,7 @@ static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
{
- task->thread.ckpt_regs.trap = trap & 0xfff0;
+ set_trap(&task->thread.ckpt_regs, trap);
return 0;
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
index 15e3b79b6395..caeb5822a8f4 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-view.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
@@ -149,7 +149,7 @@ static int set_user_dscr(struct task_struct *task, unsigned long dscr)
*/
static int set_user_trap(struct task_struct *task, unsigned long trap)
{
- task->thread.regs->trap = trap & 0xfff0;
+ set_trap(task->thread.regs, trap);
return 0;
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace32.c b/arch/powerpc/kernel/ptrace/ptrace32.c
index 7976ddf29c0e..7589a9665ffb 100644
--- a/arch/powerpc/kernel/ptrace/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace/ptrace32.c
@@ -259,8 +259,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
ret = put_user(child->thread.debug.dac1, (u32 __user *)data);
#else
dabr_fake = (
- (child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
- (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
+ (child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) |
+ (child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR));
ret = put_user(dabr_fake, (u32 __user *)data);
#endif
break;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index c5fa251b8950..a09eba03f180 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -41,6 +41,7 @@
#include <asm/time.h>
#include <asm/mmu.h>
#include <asm/topology.h>
+#include <asm/paca.h>
/* This is here deliberately so it's only used in this file */
void enter_rtas(unsigned long);
@@ -1014,6 +1015,57 @@ out:
free_cpumask_var(offline_mask);
return atomic_read(&data.error);
}
+
+/**
+ * rtas_call_reentrant() - Used for reentrant rtas calls
+ * @token: Token for desired reentrant RTAS call
+ * @nargs: Number of Input Parameters
+ * @nret: Number of Output Parameters
+ * @outputs: Array of outputs
+ * @...: Inputs for desired RTAS call
+ *
+ * According to LoPAR documentation, only "ibm,int-on", "ibm,int-off",
+ * "ibm,get-xive" and "ibm,set-xive" are currently reentrant.
+ * Reentrant calls need their own rtas_args buffer, so not using rtas.args, but
+ * PACA one instead.
+ *
+ * Return: -1 on error,
+ * First output value of RTAS call if (nret > 0),
+ * 0 otherwise,
+ */
+int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...)
+{
+ va_list list;
+ struct rtas_args *args;
+ unsigned long flags;
+ int i, ret = 0;
+
+ if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
+ return -1;
+
+ local_irq_save(flags);
+ preempt_disable();
+
+ /* We use the per-cpu (PACA) rtas args buffer */
+ args = local_paca->rtas_args_reentrant;
+
+ va_start(list, outputs);
+ va_rtas_call_unlocked(args, token, nargs, nret, list);
+ va_end(list);
+
+ if (nret > 1 && outputs)
+ for (i = 0; i < nret - 1; ++i)
+ outputs[i] = be32_to_cpu(args->rets[i + 1]);
+
+ if (nret > 0)
+ ret = be32_to_cpu(args->rets[0]);
+
+ local_irq_restore(flags);
+ preempt_enable();
+
+ return ret;
+}
+
#else /* CONFIG_PPC_PSERIES */
int rtas_ibm_suspend_me(u64 handle)
{
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index ae5e43eaca48..781c1869902e 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -13,9 +13,9 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index bd70f5be1c27..d86701ce116b 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -7,6 +7,8 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
#include <linux/seq_buf.h>
#include <asm/asm-prototypes.h>
@@ -14,6 +16,7 @@
#include <asm/debugfs.h>
#include <asm/security_features.h>
#include <asm/setup.h>
+#include <asm/inst.h>
u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
@@ -353,6 +356,40 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
return sprintf(buf, "Vulnerable\n");
}
+static int ssb_prctl_get(struct task_struct *task)
+{
+ if (stf_enabled_flush_types == STF_BARRIER_NONE)
+ /*
+ * We don't have an explicit signal from firmware that we're
+ * vulnerable or not, we only have certain CPU revisions that
+ * are known to be vulnerable.
+ *
+ * We assume that if we're on another CPU, where the barrier is
+ * NONE, then we are not vulnerable.
+ */
+ return PR_SPEC_NOT_AFFECTED;
+ else
+ /*
+ * If we do have a barrier type then we are vulnerable. The
+ * barrier is not a global or per-process mitigation, so the
+ * only value we can report here is PR_SPEC_ENABLE, which
+ * appears as "vulnerable" in /proc.
+ */
+ return PR_SPEC_ENABLE;
+
+ return -EINVAL;
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssb_prctl_get(task);
+ default:
+ return -ENODEV;
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
static int stf_barrier_set(void *data, u64 val)
{
@@ -403,9 +440,11 @@ static void toggle_count_cache_flush(bool enable)
enable = false;
if (!enable) {
- patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
+ patch_instruction_site(&patch__call_flush_count_cache,
+ ppc_inst(PPC_INST_NOP));
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
+ patch_instruction_site(&patch__call_kvm_flush_link_stack,
+ ppc_inst(PPC_INST_NOP));
#endif
pr_info("link-stack-flush: software flush disabled.\n");
link_stack_flush_enabled = false;
@@ -428,7 +467,8 @@ static void toggle_count_cache_flush(bool enable)
// If we just need to flush the link stack, patch an early return
if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
- patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
+ patch_instruction_site(&patch__flush_link_stack_return,
+ ppc_inst(PPC_INST_BLR));
no_count_cache_flush();
return;
}
@@ -439,7 +479,7 @@ static void toggle_count_cache_flush(bool enable)
return;
}
- patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
+ patch_instruction_site(&patch__flush_count_cache_return, ppc_inst(PPC_INST_BLR));
count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index f9c0d888ce8a..9d3faac53295 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -31,13 +31,13 @@
#include <linux/memblock.h>
#include <linux/of_platform.h>
#include <linux/hugetlb.h>
+#include <linux/pgtable.h>
#include <asm/debugfs.h>
#include <asm/io.h>
#include <asm/paca.h>
#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/vdso_datapage.h>
-#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/machdep.h>
@@ -306,10 +306,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
} else {
switch (PVR_VER(pvr)) {
- case 0x0020: /* 403 family */
- maj = PVR_MAJ(pvr) + 1;
- min = PVR_MIN(pvr);
- break;
case 0x1008: /* 740P/750P ?? */
maj = ((pvr >> 8) & 0xFF) - 1;
min = pvr & 0xFF;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 305ca89d856f..1823706ae076 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -19,11 +19,11 @@
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/nvram.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/elf.h>
@@ -74,20 +74,20 @@ EXPORT_SYMBOL(DMA_MODE_WRITE);
*/
notrace void __init machine_init(u64 dt_ptr)
{
- unsigned int *addr = (unsigned int *)patch_site_addr(&patch__memset_nocache);
- unsigned long insn;
+ struct ppc_inst *addr = (struct ppc_inst *)patch_site_addr(&patch__memset_nocache);
+ struct ppc_inst insn;
/* Configure static keys first, now that we're relocated. */
setup_feature_keys();
- early_ioremap_setup();
+ early_ioremap_init();
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
- patch_instruction_site(&patch__memcpy_nocache, PPC_INST_NOP);
+ patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_INST_NOP));
- insn = create_cond_branch(addr, branch_target(addr), 0x820000);
+ create_cond_branch(&insn, addr, branch_target(addr), 0x820000);
patch_instruction(addr, insn); /* replace b by bne cr0 */
/* Do some early initialization based on the flat device tree */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 8105010b0e76..0ba1ed77dc68 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -30,13 +30,13 @@
#include <linux/lockdep.h>
#include <linux/memory.h>
#include <linux/nmi.h>
+#include <linux/pgtable.h>
#include <asm/debugfs.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/machdep.h>
@@ -711,7 +711,7 @@ void __init exc_lvl_early_init(void)
*/
void __init emergency_stack_init(void)
{
- u64 limit;
+ u64 limit, mce_limit;
unsigned int i;
/*
@@ -728,7 +728,16 @@ void __init emergency_stack_init(void)
* initialized in kernel/irq.c. These are initialized here in order
* to have emergency stacks available as early as possible.
*/
- limit = min(ppc64_bolted_size(), ppc64_rma_size);
+ limit = mce_limit = min(ppc64_bolted_size(), ppc64_rma_size);
+
+ /*
+ * Machine check on pseries calls rtas, but can't use the static
+ * rtas_args due to a machine check hitting while the lock is held.
+ * rtas args have to be under 4GB, so the machine check stack is
+ * limited to 4GB so args can be put on stack.
+ */
+ if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G)
+ mce_limit = SZ_4G;
for_each_possible_cpu(i) {
paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
@@ -738,7 +747,7 @@ void __init emergency_stack_init(void)
paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
/* emergency stack for machine check exception handling. */
- paca_ptrs[i]->mc_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
+ paca_ptrs[i]->mc_emergency_sp = alloc_stack(mce_limit, i) + THREAD_SIZE;
#endif
}
}
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index a264989626fd..b4143b6ff093 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -198,7 +198,10 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
int restart = 1;
/* syscall ? */
- if (TRAP(regs) != 0x0C00)
+ if (!trap_is_syscall(regs))
+ return;
+
+ if (trap_norestart(regs))
return;
/* error signalled ? */
@@ -258,19 +261,24 @@ static void do_signal(struct task_struct *tsk)
if (ksig.sig <= 0) {
/* No signal to deliver -- put the saved sigmask back */
restore_saved_sigmask();
- tsk->thread.regs->trap = 0;
+ set_trap_norestart(tsk->thread.regs);
return; /* no signals delivered */
}
-#ifndef CONFIG_PPC_ADV_DEBUG_REGS
/*
* Reenable the DABR before delivering the signal to
* user space. The DABR will have been cleared if it
* triggered inside the kernel.
*/
- if (tsk->thread.hw_brk.address && tsk->thread.hw_brk.type)
- __set_breakpoint(&tsk->thread.hw_brk);
-#endif
+ if (!IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (tsk->thread.hw_brk[i].address && tsk->thread.hw_brk[i].type)
+ __set_breakpoint(i, &tsk->thread.hw_brk[i]);
+ }
+ }
+
/* Re-enable the breakpoints for the signal stack */
thread_change_pc(tsk, tsk->thread.regs);
@@ -285,7 +293,7 @@ static void do_signal(struct task_struct *tsk)
ret = handle_rt_signal64(&ksig, oldset, tsk);
}
- tsk->thread.regs->trap = 0;
+ set_trap_norestart(tsk->thread.regs);
signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP));
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 4f96d29a22bf..1415c16ab628 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -47,7 +47,6 @@
#include <asm/unistd.h>
#else
#include <asm/ucontext.h>
-#include <asm/pgtable.h>
#endif
#include "signal.h"
@@ -500,7 +499,7 @@ static long restore_user_regs(struct pt_regs *regs,
if (!sig)
save_r2 = (unsigned int)regs->gpr[2];
err = restore_general_regs(regs, sr);
- regs->trap = 0;
+ set_trap_norestart(regs);
err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
if (!sig)
regs->gpr[2] = (unsigned long) save_r2;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index adfde59cf4ba..55e5f76554da 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -25,7 +25,6 @@
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#include <asm/syscalls.h>
@@ -350,8 +349,8 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig,
err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]);
err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
- /* skip SOFTE */
- regs->trap = 0;
+ /* Don't allow userspace to set SOFTE */
+ set_trap_norestart(regs);
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
@@ -472,10 +471,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
&sc->gp_regs[PT_XER]);
err |= __get_user(tsk->thread.ckpt_regs.ccr,
&sc->gp_regs[PT_CCR]);
-
- /* Don't allow userspace to set the trap value */
- regs->trap = 0;
-
+ /* Don't allow userspace to set SOFTE */
+ set_trap_norestart(regs);
/* These regs are not checkpointed; they can go in 'regs'. */
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 6d2a3a3666f0..73199470c265 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -33,6 +33,7 @@
#include <linux/processor.h>
#include <linux/random.h>
#include <linux/stackprotector.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
@@ -41,7 +42,6 @@
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/time.h>
@@ -1383,7 +1383,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
#ifdef CONFIG_SCHED_SMT
if (has_big_cores) {
- pr_info("Using small cores at SMT level\n");
+ pr_info("Big cores detected but using small core scheduling\n");
power9_topology[0].mask = smallcore_smt_mask;
powerpc_topology[0].mask = smallcore_smt_mask;
}
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index c477b8585a29..b6440657ef92 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -260,7 +260,7 @@ static void raise_backtrace_ipi(cpumask_t *mask)
pr_cont(" current pointer corrupt? (%px)\n", p->__current);
pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
- show_stack(p->__current, (unsigned long *)p->saved_r1);
+ show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
}
}
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index cbdf86228eaa..f73f4d72fea4 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -395,6 +395,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
li r3,0
blr
+_ASM_NOKPROBE_SYMBOL(swsusp_arch_resume)
/* FIXME:This construct is actually not useful since we don't shut
* down the instruction MMU, we could just flip back MSR-DR on.
@@ -406,4 +407,5 @@ turn_on_mmu:
sync
isync
rfi
+_ASM_NOKPROBE_SYMBOL(turn_on_mmu)
diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
index 7b7c89cad901..79edba3ab312 100644
--- a/arch/powerpc/kernel/syscall_64.c
+++ b/arch/powerpc/kernel/syscall_64.c
@@ -102,6 +102,31 @@ notrace long system_call_exception(long r3, long r4, long r5,
}
/*
+ * local irqs must be disabled. Returns false if the caller must re-enable
+ * them, check for new work, and try again.
+ */
+static notrace inline bool prep_irq_for_enabled_exit(void)
+{
+ /* This must be done with RI=1 because tracing may touch vmaps */
+ trace_hardirqs_on();
+
+ /* This pattern matches prep_irq_for_idle */
+ __hard_EE_RI_disable();
+ if (unlikely(lazy_irq_pending_nocheck())) {
+ /* Took an interrupt, may have more exit work to do. */
+ __hard_RI_enable();
+ trace_hardirqs_off();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ return false;
+ }
+ local_paca->irq_happened = 0;
+ irq_soft_mask_set(IRQS_ENABLED);
+
+ return true;
+}
+
+/*
* This should be called after a syscall returns, with r3 the return value
* from the syscall. If this function returns non-zero, the system call
* exit assembly should additionally load all GPR registers and CTR and XER
@@ -186,21 +211,10 @@ again:
}
}
- /* This must be done with RI=1 because tracing may touch vmaps */
- trace_hardirqs_on();
-
- /* This pattern matches prep_irq_for_idle */
- __hard_EE_RI_disable();
- if (unlikely(lazy_irq_pending_nocheck())) {
- __hard_RI_enable();
- trace_hardirqs_off();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ if (unlikely(!prep_irq_for_enabled_exit())) {
local_irq_enable();
- /* Took an interrupt, may have more exit work to do. */
goto again;
}
- local_paca->irq_happened = 0;
- irq_soft_mask_set(IRQS_ENABLED);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
local_paca->tm_scratch = regs->msr;
@@ -228,6 +242,10 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
BUG_ON(!FULL_REGS(regs));
BUG_ON(regs->softe != IRQS_ENABLED);
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * AMR can only have been unlocked if we interrupted the kernel.
+ */
kuap_check_amr();
local_irq_save(flags);
@@ -264,19 +282,11 @@ again:
}
}
- trace_hardirqs_on();
- __hard_EE_RI_disable();
- if (unlikely(lazy_irq_pending_nocheck())) {
- __hard_RI_enable();
- trace_hardirqs_off();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ if (unlikely(!prep_irq_for_enabled_exit())) {
local_irq_enable();
local_irq_disable();
- /* Took an interrupt, may have more exit work to do. */
goto again;
}
- local_paca->irq_happened = 0;
- irq_soft_mask_set(IRQS_ENABLED);
#ifdef CONFIG_PPC_BOOK3E
if (unlikely(ts->debug.dbcr0 & DBCR0_IDM)) {
@@ -307,13 +317,14 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
unsigned long *ti_flagsp = &current_thread_info()->flags;
unsigned long flags;
unsigned long ret = 0;
+ unsigned long amr;
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && unlikely(!(regs->msr & MSR_RI)))
unrecoverable_exception(regs);
BUG_ON(regs->msr & MSR_PR);
BUG_ON(!FULL_REGS(regs));
- kuap_check_amr();
+ amr = kuap_get_and_check_amr();
if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
@@ -334,13 +345,7 @@ again:
}
}
- trace_hardirqs_on();
- __hard_EE_RI_disable();
- if (unlikely(lazy_irq_pending_nocheck())) {
- __hard_RI_enable();
- irq_soft_mask_set(IRQS_ALL_DISABLED);
- trace_hardirqs_off();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ if (unlikely(!prep_irq_for_enabled_exit())) {
/*
* Can't local_irq_restore to replay if we were in
* interrupt context. Must replay directly.
@@ -354,8 +359,6 @@ again:
/* Took an interrupt, may have more exit work to do. */
goto again;
}
- local_paca->irq_happened = 0;
- irq_soft_mask_set(IRQS_ENABLED);
} else {
/* Returning to a kernel context with local irqs disabled. */
__hard_EE_RI_disable();
@@ -369,10 +372,11 @@ again:
#endif
/*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
+ * Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr,
+ * which would cause Read-After-Write stalls. Hence, we take the AMR
+ * value from the check above.
*/
- kuap_restore_amr(regs);
+ kuap_restore_amr(regs, amr);
return ret;
}
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 479c70680b76..571b3259697e 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -19,6 +19,7 @@
#include <asm/smp.h>
#include <asm/pmc.h>
#include <asm/firmware.h>
+#include <asm/idle.h>
#include <asm/svm.h>
#include "cacheinfo.h"
@@ -760,6 +761,74 @@ static void create_svm_file(void)
}
#endif /* CONFIG_PPC_SVM */
+#ifdef CONFIG_PPC_PSERIES
+static void read_idle_purr(void *val)
+{
+ u64 *ret = val;
+
+ *ret = read_this_idle_purr();
+}
+
+static ssize_t idle_purr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ u64 val;
+
+ smp_call_function_single(cpu->dev.id, read_idle_purr, &val, 1);
+ return sprintf(buf, "%llx\n", val);
+}
+static DEVICE_ATTR(idle_purr, 0400, idle_purr_show, NULL);
+
+static void create_idle_purr_file(struct device *s)
+{
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ device_create_file(s, &dev_attr_idle_purr);
+}
+
+static void remove_idle_purr_file(struct device *s)
+{
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ device_remove_file(s, &dev_attr_idle_purr);
+}
+
+static void read_idle_spurr(void *val)
+{
+ u64 *ret = val;
+
+ *ret = read_this_idle_spurr();
+}
+
+static ssize_t idle_spurr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ u64 val;
+
+ smp_call_function_single(cpu->dev.id, read_idle_spurr, &val, 1);
+ return sprintf(buf, "%llx\n", val);
+}
+static DEVICE_ATTR(idle_spurr, 0400, idle_spurr_show, NULL);
+
+static void create_idle_spurr_file(struct device *s)
+{
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ device_create_file(s, &dev_attr_idle_spurr);
+}
+
+static void remove_idle_spurr_file(struct device *s)
+{
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ device_remove_file(s, &dev_attr_idle_spurr);
+}
+
+#else /* CONFIG_PPC_PSERIES */
+#define create_idle_purr_file(s)
+#define remove_idle_purr_file(s)
+#define create_idle_spurr_file(s)
+#define remove_idle_spurr_file(s)
+#endif /* CONFIG_PPC_PSERIES */
+
static int register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -823,10 +892,13 @@ static int register_cpu_online(unsigned int cpu)
if (!firmware_has_feature(FW_FEATURE_LPAR))
add_write_permission_dev_attr(&dev_attr_purr);
device_create_file(s, &dev_attr_purr);
+ create_idle_purr_file(s);
}
- if (cpu_has_feature(CPU_FTR_SPURR))
+ if (cpu_has_feature(CPU_FTR_SPURR)) {
device_create_file(s, &dev_attr_spurr);
+ create_idle_spurr_file(s);
+ }
if (cpu_has_feature(CPU_FTR_DSCR))
device_create_file(s, &dev_attr_dscr);
@@ -910,11 +982,15 @@ static int unregister_cpu_online(unsigned int cpu)
device_remove_file(s, &dev_attr_mmcra);
#endif /* CONFIG_PMU_SYSFS */
- if (cpu_has_feature(CPU_FTR_PURR))
+ if (cpu_has_feature(CPU_FTR_PURR)) {
device_remove_file(s, &dev_attr_purr);
+ remove_idle_purr_file(s);
+ }
- if (cpu_has_feature(CPU_FTR_SPURR))
+ if (cpu_has_feature(CPU_FTR_SPURR)) {
device_remove_file(s, &dev_attr_spurr);
+ remove_idle_spurr_file(s);
+ }
if (cpu_has_feature(CPU_FTR_DSCR))
device_remove_file(s, &dev_attr_dscr);
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 7ea0ca044b65..5e399628f51a 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -27,6 +27,7 @@
#include <asm/code-patching.h>
#include <asm/ftrace.h>
#include <asm/syscall.h>
+#include <asm/inst.h>
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -40,23 +41,23 @@
#define NUM_FTRACE_TRAMPS 8
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
-static unsigned int
+static struct ppc_inst
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{
- unsigned int op;
+ struct ppc_inst op;
addr = ppc_function_entry((void *)addr);
/* if (link) set op to 'bl' else 'b' */
- op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
+ create_branch(&op, (struct ppc_inst *)ip, addr, link ? 1 : 0);
return op;
}
static int
-ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
+ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new)
{
- unsigned int replaced;
+ struct ppc_inst replaced;
/*
* Note:
@@ -67,18 +68,18 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
*/
/* read the text we want to modify */
- if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
+ if (probe_kernel_read_inst(&replaced, (void *)ip))
return -EFAULT;
/* Make sure it is what we expect it to be */
- if (replaced != old) {
+ if (!ppc_inst_equal(replaced, old)) {
pr_err("%p: replaced (%#x) != old (%#x)",
- (void *)ip, replaced, old);
+ (void *)ip, ppc_inst_val(replaced), ppc_inst_val(old));
return -EINVAL;
}
/* replace the text with the new text */
- if (patch_instruction((unsigned int *)ip, new))
+ if (patch_instruction((struct ppc_inst *)ip, new))
return -EPERM;
return 0;
@@ -89,27 +90,28 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
*/
static int test_24bit_addr(unsigned long ip, unsigned long addr)
{
+ struct ppc_inst op;
addr = ppc_function_entry((void *)addr);
/* use the create_branch to verify that this offset can be branched */
- return create_branch((unsigned int *)ip, addr, 0);
+ return create_branch(&op, (struct ppc_inst *)ip, addr, 0) == 0;
}
-static int is_bl_op(unsigned int op)
+static int is_bl_op(struct ppc_inst op)
{
- return (op & 0xfc000003) == 0x48000001;
+ return (ppc_inst_val(op) & 0xfc000003) == 0x48000001;
}
-static int is_b_op(unsigned int op)
+static int is_b_op(struct ppc_inst op)
{
- return (op & 0xfc000003) == 0x48000000;
+ return (ppc_inst_val(op) & 0xfc000003) == 0x48000000;
}
-static unsigned long find_bl_target(unsigned long ip, unsigned int op)
+static unsigned long find_bl_target(unsigned long ip, struct ppc_inst op)
{
int offset;
- offset = (op & 0x03fffffc);
+ offset = (ppc_inst_val(op) & 0x03fffffc);
/* make it signed */
if (offset & 0x02000000)
offset |= 0xfe000000;
@@ -125,17 +127,17 @@ __ftrace_make_nop(struct module *mod,
{
unsigned long entry, ptr, tramp;
unsigned long ip = rec->ip;
- unsigned int op, pop;
+ struct ppc_inst op, pop;
/* read where this goes */
- if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
+ if (probe_kernel_read_inst(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
/* Make sure that that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %x\n", op);
+ pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op));
return -EINVAL;
}
@@ -160,16 +162,18 @@ __ftrace_make_nop(struct module *mod,
#ifdef CONFIG_MPROFILE_KERNEL
/* When using -mkernel_profile there is no load to jump over */
- pop = PPC_INST_NOP;
+ pop = ppc_inst(PPC_INST_NOP);
- if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
+ if (probe_kernel_read_inst(&op, (void *)(ip - 4))) {
pr_err("Fetching instruction at %lx failed.\n", ip - 4);
return -EFAULT;
}
/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
- if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
- pr_err("Unexpected instruction %08x around bl _mcount\n", op);
+ if (!ppc_inst_equal(op, ppc_inst(PPC_INST_MFLR)) &&
+ !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
+ pr_err("Unexpected instruction %08x around bl _mcount\n",
+ ppc_inst_val(op));
return -EINVAL;
}
#else
@@ -187,24 +191,24 @@ __ftrace_make_nop(struct module *mod,
* Use a b +8 to jump over the load.
*/
- pop = PPC_INST_BRANCH | 8; /* b +8 */
+ pop = ppc_inst(PPC_INST_BRANCH | 8); /* b +8 */
/*
* Check what is in the next instruction. We can see ld r2,40(r1), but
* on first pass after boot we will see mflr r0.
*/
- if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
+ if (probe_kernel_read_inst(&op, (void *)(ip + 4))) {
pr_err("Fetching op failed.\n");
return -EFAULT;
}
- if (op != PPC_INST_LD_TOC) {
- pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
+ if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
+ pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, ppc_inst_val(op));
return -EINVAL;
}
#endif /* CONFIG_MPROFILE_KERNEL */
- if (patch_instruction((unsigned int *)ip, pop)) {
+ if (patch_instruction((struct ppc_inst *)ip, pop)) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
@@ -217,7 +221,7 @@ static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned int op;
+ struct ppc_inst op;
unsigned int jmp[4];
unsigned long ip = rec->ip;
unsigned long tramp;
@@ -227,7 +231,7 @@ __ftrace_make_nop(struct module *mod,
/* Make sure that that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %x\n", op);
+ pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op));
return -EINVAL;
}
@@ -274,9 +278,9 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
- op = PPC_INST_NOP;
+ op = ppc_inst(PPC_INST_NOP);
- if (patch_instruction((unsigned int *)ip, op))
+ if (patch_instruction((struct ppc_inst *)ip, op))
return -EPERM;
return 0;
@@ -287,6 +291,7 @@ __ftrace_make_nop(struct module *mod,
static unsigned long find_ftrace_tramp(unsigned long ip)
{
int i;
+ struct ppc_inst instr;
/*
* We have the compiler generated long_branch tramps at the end
@@ -295,7 +300,8 @@ static unsigned long find_ftrace_tramp(unsigned long ip)
for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
if (!ftrace_tramps[i])
continue;
- else if (create_branch((void *)ip, ftrace_tramps[i], 0))
+ else if (create_branch(&instr, (void *)ip,
+ ftrace_tramps[i], 0) == 0)
return ftrace_tramps[i];
return 0;
@@ -322,8 +328,10 @@ static int add_ftrace_tramp(unsigned long tramp)
*/
static int setup_mcount_compiler_tramp(unsigned long tramp)
{
- int i, op;
+ int i;
+ struct ppc_inst op;
unsigned long ptr;
+ struct ppc_inst instr;
static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
/* Is this a known long jump tramp? */
@@ -341,7 +349,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
return -1;
/* New trampoline -- read where this goes */
- if (probe_kernel_read(&op, (void *)tramp, sizeof(int))) {
+ if (probe_kernel_read_inst(&op, (void *)tramp)) {
pr_debug("Fetching opcode failed.\n");
return -1;
}
@@ -366,13 +374,13 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
#else
ptr = ppc_global_function_entry((void *)ftrace_caller);
#endif
- if (!create_branch((void *)tramp, ptr, 0)) {
+ if (create_branch(&instr, (void *)tramp, ptr, 0)) {
pr_debug("%ps is not reachable from existing mcount tramp\n",
(void *)ptr);
return -1;
}
- if (patch_branch((unsigned int *)tramp, ptr, 0)) {
+ if (patch_branch((struct ppc_inst *)tramp, ptr, 0)) {
pr_debug("REL24 out of range!\n");
return -1;
}
@@ -388,17 +396,17 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long tramp, ip = rec->ip;
- unsigned int op;
+ struct ppc_inst op;
/* Read where this goes */
- if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
+ if (probe_kernel_read_inst(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
/* Make sure that that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %x\n", op);
+ pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op));
return -EINVAL;
}
@@ -416,7 +424,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
}
}
- if (patch_instruction((unsigned int *)ip, PPC_INST_NOP)) {
+ if (patch_instruction((struct ppc_inst *)ip, ppc_inst(PPC_INST_NOP))) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
@@ -428,7 +436,7 @@ int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
- unsigned int old, new;
+ struct ppc_inst old, new;
/*
* If the calling address is more that 24 bits away,
@@ -438,7 +446,7 @@ int ftrace_make_nop(struct module *mod,
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ftrace_call_replace(ip, addr, 1);
- new = PPC_INST_NOP;
+ new = ppc_inst(PPC_INST_NOP);
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip))
return __ftrace_make_nop_kernel(rec, addr);
@@ -481,7 +489,7 @@ int ftrace_make_nop(struct module *mod,
*/
#ifndef CONFIG_MPROFILE_KERNEL
static int
-expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
+expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
{
/*
* We expect to see:
@@ -492,16 +500,17 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
* The load offset is different depending on the ABI. For simplicity
* just mask it out when doing the compare.
*/
- if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
+ if (!ppc_inst_equal(op0, ppc_inst(0x48000008)) ||
+ (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000)
return 0;
return 1;
}
#else
static int
-expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
+expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
{
/* look for patched "NOP" on ppc64 with -mprofile-kernel */
- if (op0 != PPC_INST_NOP)
+ if (!ppc_inst_equal(op0, ppc_inst(PPC_INST_NOP)))
return 0;
return 1;
}
@@ -510,18 +519,22 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned int op[2];
+ struct ppc_inst op[2];
+ struct ppc_inst instr;
void *ip = (void *)rec->ip;
unsigned long entry, ptr, tramp;
struct module *mod = rec->arch.mod;
/* read where this goes */
- if (probe_kernel_read(op, ip, sizeof(op)))
+ if (probe_kernel_read_inst(op, ip))
+ return -EFAULT;
+
+ if (probe_kernel_read_inst(op + 1, ip + 4))
return -EFAULT;
if (!expected_nop_sequence(ip, op[0], op[1])) {
pr_err("Unexpected call sequence at %p: %x %x\n",
- ip, op[0], op[1]);
+ ip, ppc_inst_val(op[0]), ppc_inst_val(op[1]));
return -EINVAL;
}
@@ -557,7 +570,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}
/* Ensure branch is within 24 bits */
- if (!create_branch(ip, tramp, BRANCH_SET_LINK)) {
+ if (create_branch(&instr, ip, tramp, BRANCH_SET_LINK)) {
pr_err("Branch out of range\n");
return -EINVAL;
}
@@ -574,16 +587,17 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned int op;
+ int err;
+ struct ppc_inst op;
unsigned long ip = rec->ip;
/* read where this goes */
- if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
+ if (probe_kernel_read_inst(&op, (void *)ip))
return -EFAULT;
/* It should be pointing to a nop */
- if (op != PPC_INST_NOP) {
- pr_err("Expected NOP but have %x\n", op);
+ if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) {
+ pr_err("Expected NOP but have %x\n", ppc_inst_val(op));
return -EINVAL;
}
@@ -594,16 +608,16 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}
/* create the branch to the trampoline */
- op = create_branch((unsigned int *)ip,
- rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
- if (!op) {
+ err = create_branch(&op, (struct ppc_inst *)ip,
+ rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
+ if (err) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
pr_devel("write to %lx\n", rec->ip);
- if (patch_instruction((unsigned int *)ip, op))
+ if (patch_instruction((struct ppc_inst *)ip, op))
return -EPERM;
return 0;
@@ -613,7 +627,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned int op;
+ struct ppc_inst op;
void *ip = (void *)rec->ip;
unsigned long tramp, entry, ptr;
@@ -634,13 +648,13 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
}
/* Make sure we have a nop */
- if (probe_kernel_read(&op, ip, sizeof(op))) {
+ if (probe_kernel_read_inst(&op, ip)) {
pr_err("Unable to read ftrace location %p\n", ip);
return -EFAULT;
}
- if (op != PPC_INST_NOP) {
- pr_err("Unexpected call sequence at %p: %x\n", ip, op);
+ if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) {
+ pr_err("Unexpected call sequence at %p: %x\n", ip, ppc_inst_val(op));
return -EINVAL;
}
@@ -661,7 +675,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
- unsigned int old, new;
+ struct ppc_inst old, new;
/*
* If the calling address is more that 24 bits away,
@@ -670,7 +684,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
- old = PPC_INST_NOP;
+ old = ppc_inst(PPC_INST_NOP);
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip))
@@ -700,7 +714,7 @@ static int
__ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
- unsigned int op;
+ struct ppc_inst op;
unsigned long ip = rec->ip;
unsigned long entry, ptr, tramp;
struct module *mod = rec->arch.mod;
@@ -712,14 +726,14 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
/* read where this goes */
- if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
+ if (probe_kernel_read_inst(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
/* Make sure that that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %x\n", op);
+ pr_err("Not expected bl: opcode is %x\n", ppc_inst_val(op));
return -EINVAL;
}
@@ -748,7 +762,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
/* The new target may be within range */
if (test_24bit_addr(ip, addr)) {
/* within range */
- if (patch_branch((unsigned int *)ip, addr, BRANCH_SET_LINK)) {
+ if (patch_branch((struct ppc_inst *)ip, addr, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
@@ -776,12 +790,12 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
/* Ensure branch is within 24 bits */
- if (!create_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) {
+ if (create_branch(&op, (struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("Branch out of range\n");
return -EINVAL;
}
- if (patch_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) {
+ if (patch_branch((struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
@@ -794,7 +808,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
unsigned long ip = rec->ip;
- unsigned int old, new;
+ struct ppc_inst old, new;
/*
* If the calling address is more that 24 bits away,
@@ -834,10 +848,10 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
- unsigned int old, new;
+ struct ppc_inst old, new;
int ret;
- old = *(unsigned int *)&ftrace_call;
+ old = ppc_inst_read((struct ppc_inst *)&ftrace_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
@@ -845,7 +859,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
/* Also update the regs callback function */
if (!ret) {
ip = (unsigned long)(&ftrace_regs_call);
- old = *(unsigned int *)&ftrace_regs_call;
+ old = ppc_inst_read((struct ppc_inst *)&ftrace_regs_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
}
@@ -919,7 +933,7 @@ int ftrace_enable_ftrace_graph_caller(void)
unsigned long ip = (unsigned long)(&ftrace_graph_call);
unsigned long addr = (unsigned long)(&ftrace_graph_caller);
unsigned long stub = (unsigned long)(&ftrace_graph_stub);
- unsigned int old, new;
+ struct ppc_inst old, new;
old = ftrace_call_replace(ip, stub, 0);
new = ftrace_call_replace(ip, addr, 0);
@@ -932,7 +946,7 @@ int ftrace_disable_ftrace_graph_caller(void)
unsigned long ip = (unsigned long)(&ftrace_graph_call);
unsigned long addr = (unsigned long)(&ftrace_graph_caller);
unsigned long stub = (unsigned long)(&ftrace_graph_stub);
- unsigned int old, new;
+ struct ppc_inst old, new;
old = ftrace_call_replace(ip, addr, 0);
new = ftrace_call_replace(ip, stub, 0);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index b44dd75de517..97413a385720 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -39,7 +39,6 @@
#include <linux/kmsg_dump.h>
#include <asm/emulated_ops.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/debugfs.h>
#include <asm/io.h>
@@ -442,6 +441,9 @@ void system_reset_exception(struct pt_regs *regs)
{
unsigned long hsrr0, hsrr1;
bool saved_hsrrs = false;
+ u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
+
+ this_cpu_set_ftrace_enabled(0);
nmi_enter();
@@ -504,11 +506,11 @@ out:
#ifdef CONFIG_PPC_BOOK3S_64
BUG_ON(get_paca()->in_nmi == 0);
if (get_paca()->in_nmi > 1)
- nmi_panic(regs, "Unrecoverable nested System Reset");
+ die("Unrecoverable nested System Reset", regs, SIGABRT);
#endif
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
- nmi_panic(regs, "Unrecoverable System Reset");
+ die("Unrecoverable System Reset", regs, SIGABRT);
if (saved_hsrrs) {
mtspr(SPRN_HSRR0, hsrr0);
@@ -517,6 +519,8 @@ out:
nmi_exit();
+ this_cpu_set_ftrace_enabled(ftrace_enabled);
+
/* What should we do here? We could issue a shutdown or hard reset. */
}
@@ -576,6 +580,8 @@ static inline int check_io_access(struct pt_regs *regs)
#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
#define REASON_PRIVILEGED ESR_PPR
#define REASON_TRAP ESR_PTR
+#define REASON_PREFIXED 0
+#define REASON_BOUNDARY 0
/* single-step stuff */
#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
@@ -590,12 +596,16 @@ static inline int check_io_access(struct pt_regs *regs)
#define REASON_ILLEGAL SRR1_PROGILL
#define REASON_PRIVILEGED SRR1_PROGPRIV
#define REASON_TRAP SRR1_PROGTRAP
+#define REASON_PREFIXED SRR1_PREFIXED
+#define REASON_BOUNDARY SRR1_BOUNDARY
#define single_stepping(regs) ((regs)->msr & MSR_SE)
#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
#define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
#endif
+#define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
+
#if defined(CONFIG_E500)
int machine_check_e500mc(struct pt_regs *regs)
{
@@ -817,7 +827,19 @@ void machine_check_exception(struct pt_regs *regs)
{
int recover = 0;
- nmi_enter();
+ /*
+ * BOOK3S_64 does not call this handler as a non-maskable interrupt
+ * (it uses its own early real-mode handler to handle the MCE proper
+ * and then raises irq_work to call this handler when interrupts are
+ * enabled).
+ *
+ * This is silly. The BOOK3S_64 should just call a different function
+ * rather than expecting semantics to magically change. Something
+ * like 'non_nmi_machine_check_exception()', perhaps?
+ */
+ const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64);
+
+ if (nmi) nmi_enter();
__this_cpu_inc(irq_stat.mce_exceptions);
@@ -843,18 +865,18 @@ void machine_check_exception(struct pt_regs *regs)
if (check_io_access(regs))
goto bail;
- nmi_exit();
+ if (nmi) nmi_exit();
die("Machine check", regs, SIGBUS);
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
- nmi_panic(regs, "Unrecoverable Machine check");
+ die("Unrecoverable Machine check", regs, SIGBUS);
return;
bail:
- nmi_exit();
+ if (nmi) nmi_exit();
}
void SMIException(struct pt_regs *regs)
@@ -1583,11 +1605,20 @@ void alignment_exception(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
int sig, code, fixed = 0;
+ unsigned long reason;
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
+ reason = get_reason(regs);
+
+ if (reason & REASON_BOUNDARY) {
+ sig = SIGBUS;
+ code = BUS_ADRALN;
+ goto bad;
+ }
+
if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
goto bail;
@@ -1596,7 +1627,8 @@ void alignment_exception(struct pt_regs *regs)
fixed = fix_alignment(regs);
if (fixed == 1) {
- regs->nip += 4; /* skip over emulated instruction */
+ /* skip over emulated instruction */
+ regs->nip += inst_length(reason);
emulate_single_step(regs);
goto bail;
}
@@ -1609,6 +1641,7 @@ void alignment_exception(struct pt_regs *regs)
sig = SIGBUS;
code = BUS_ADRALN;
}
+bad:
if (user_mode(regs))
_exception(sig, regs, code, regs->dar);
else
@@ -1710,6 +1743,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
[FSCR_TAR_LG] = "TAR",
[FSCR_MSGP_LG] = "MSGP",
[FSCR_SCV_LG] = "SCV",
+ [FSCR_PREFIX_LG] = "PREFIX",
};
char *facility = "unknown";
u64 value;
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index 1cfef0e5fec5..d200e7df7167 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -14,6 +14,7 @@
#include <linux/kdebug.h>
#include <asm/sstep.h>
+#include <asm/inst.h>
#define UPROBE_TRAP_NR UINT_MAX
@@ -111,7 +112,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
* support doesn't exist and have to fix-up the next instruction
* to be executed.
*/
- regs->nip = utask->vaddr + MAX_UINSN_BYTES;
+ regs->nip = (unsigned long)ppc_inst_next((void *)utask->vaddr, &auprobe->insn);
user_disable_single_step(current);
return 0;
@@ -173,7 +174,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
* emulate_step() returns 1 if the insn was successfully emulated.
* For all other cases, we need to single-step in hardware.
*/
- ret = emulate_step(regs, auprobe->insn);
+ ret = emulate_step(regs, ppc_inst_read(&auprobe->insn));
if (ret > 0)
return true;
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index f38f26e844b6..e0f4ba45b6cc 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -18,7 +18,6 @@
#include <linux/security.h>
#include <linux/memblock.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -171,7 +170,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* and end up putting it elsewhere.
* Add enough to the size so that the result can be aligned.
*/
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
vdso_base = get_unmapped_area(NULL, vdso_base,
(vdso_pages << PAGE_SHIFT) +
@@ -211,11 +210,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto fail_mmapsem;
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return 0;
fail_mmapsem:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return rc;
}
diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c
index 4acd3fb2b38e..ae632569446f 100644
--- a/arch/powerpc/kernel/vecemu.c
+++ b/arch/powerpc/kernel/vecemu.c
@@ -10,6 +10,7 @@
#include <asm/processor.h>
#include <asm/switch_to.h>
#include <linux/uaccess.h>
+#include <asm/inst.h>
/* Functions in vector.S */
extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
@@ -260,21 +261,24 @@ static unsigned int rfin(unsigned int x)
int emulate_altivec(struct pt_regs *regs)
{
- unsigned int instr, i;
+ struct ppc_inst instr;
+ unsigned int i, word;
unsigned int va, vb, vc, vd;
vector128 *vrs;
- if (get_user(instr, (unsigned int __user *) regs->nip))
+ if (get_user_instr(instr, (void __user *)regs->nip))
return -EFAULT;
- if ((instr >> 26) != 4)
+
+ word = ppc_inst_val(instr);
+ if (ppc_inst_primary_opcode(instr) != 4)
return -EINVAL; /* not an altivec instruction */
- vd = (instr >> 21) & 0x1f;
- va = (instr >> 16) & 0x1f;
- vb = (instr >> 11) & 0x1f;
- vc = (instr >> 6) & 0x1f;
+ vd = (word >> 21) & 0x1f;
+ va = (word >> 16) & 0x1f;
+ vb = (word >> 11) & 0x1f;
+ vc = (word >> 6) & 0x1f;
vrs = current->thread.vr_state.vr;
- switch (instr & 0x3f) {
+ switch (word & 0x3f) {
case 10:
switch (vc) {
case 0: /* vaddfp */
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index d20c5e79e03c..efc5b52f95d2 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -89,6 +89,7 @@ _GLOBAL(load_up_altivec)
REST_32VRS(0,r4,r6)
/* restore registers and return */
blr
+_ASM_NOKPROBE_SYMBOL(load_up_altivec)
/*
* save_altivec(tsk)
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index a1706b63b82d..326e113d2e45 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -15,7 +15,6 @@
#include <asm/thread_info.h>
#define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT)
-#define ETEXT_ALIGN_SIZE (1 << CONFIG_ETEXT_SHIFT)
ENTRY(_stext)
@@ -117,7 +116,7 @@ SECTIONS
} :text
- . = ALIGN(ETEXT_ALIGN_SIZE);
+ . = ALIGN(PAGE_SIZE);
_etext = .;
PROVIDE32 (etext = .);
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 078fe3d76feb..56da5eb2b923 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -115,11 +115,12 @@ void machine_kexec(struct kimage *image)
void __init reserve_crashkernel(void)
{
- unsigned long long crash_size, crash_base;
+ unsigned long long crash_size, crash_base, total_mem_sz;
int ret;
+ total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
/* use common parsing */
- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ ret = parse_crashkernel(boot_command_line, total_mem_sz,
&crash_size, &crash_base);
if (ret == 0 && crash_size > 0) {
crashk_res.start = crash_base;
@@ -178,6 +179,7 @@ void __init reserve_crashkernel(void)
/* Crash kernel trumps memory limit */
if (memory_limit && memory_limit <= crashk_res.end) {
memory_limit = crashk_res.end + 1;
+ total_mem_sz = memory_limit;
printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
memory_limit);
}
@@ -186,7 +188,7 @@ void __init reserve_crashkernel(void)
"for crashkernel (System RAM: %ldMB)\n",
(unsigned long)(crash_size >> 20),
(unsigned long)(crashk_res.start >> 20),
- (unsigned long)(memblock_phys_mem_size() >> 20));
+ (unsigned long)(total_mem_sz >> 20));
if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
memblock_reserve(crashk_res.start, crash_size)) {
diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
index d488311efab1..c9a889880214 100644
--- a/arch/powerpc/kexec/crash.c
+++ b/arch/powerpc/kexec/crash.c
@@ -311,6 +311,9 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
unsigned int i;
int (*old_handler)(struct pt_regs *regs);
+ /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
+ printk_nmi_enter();
+
/*
* This function is only called after the system
* has panicked or is otherwise in a critical state.
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 36a07656ebbb..7c5a1812a1c3 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -281,11 +281,10 @@ static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
{
long ret;
- /* Protect linux PTE lookup from page table destruction */
- rcu_read_lock_sched(); /* this disables preemption too */
+ preempt_disable();
ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
kvm->mm->pgd, false, pte_idx_ret);
- rcu_read_unlock_sched();
+ preempt_enable();
if (ret == H_TOO_HARD) {
/* this can't happen */
pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
@@ -582,7 +581,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
* We always ask for write permission since the common case
* is that the page is writable.
*/
- if (__get_user_pages_fast(hva, 1, 1, &page) == 1) {
+ if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
write_ok = true;
} else {
/* Call KVM generic code to do the slow-path check */
@@ -602,12 +601,12 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
* Read the PTE from the process' radix tree and use that
* so we get the shift and attribute bits.
*/
- local_irq_disable();
- ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+ spin_lock(&kvm->mmu_lock);
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
pte = __pte(0);
if (ptep)
- pte = *ptep;
- local_irq_enable();
+ pte = READ_ONCE(*ptep);
+ spin_unlock(&kvm->mmu_lock);
/*
* If the PTE disappeared temporarily due to a THP
* collapse, just return and let the guest try again.
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index aa41183d2a97..3cb0c9843d01 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -11,12 +11,12 @@
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/debugfs.h>
+#include <linux/pgtable.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
#include <asm/ultravisor.h>
@@ -514,13 +514,14 @@ void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
unsigned long ig;
for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
+ p4d_t *p4d = p4d_offset(pgd, 0);
pud_t *pud;
- if (!pgd_present(*pgd))
+ if (!p4d_present(*p4d))
continue;
- pud = pud_offset(pgd, 0);
+ pud = pud_offset(p4d, 0);
kvmppc_unmap_free_pud(kvm, pud, lpid);
- pgd_clear(pgd);
+ p4d_clear(p4d);
}
}
@@ -581,6 +582,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
unsigned long *rmapp, struct rmap_nested **n_rmap)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud, *new_pud = NULL;
pmd_t *pmd, *new_pmd = NULL;
pte_t *ptep, *new_ptep = NULL;
@@ -588,9 +590,11 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
/* Traverse the guest's 2nd-level tree, allocate new levels needed */
pgd = pgtable + pgd_index(gpa);
+ p4d = p4d_offset(pgd, gpa);
+
pud = NULL;
- if (pgd_present(*pgd))
- pud = pud_offset(pgd, gpa);
+ if (p4d_present(*p4d))
+ pud = pud_offset(p4d, gpa);
else
new_pud = pud_alloc_one(kvm->mm, gpa);
@@ -611,13 +615,13 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
/* Now traverse again under the lock and change the tree */
ret = -ENOMEM;
- if (pgd_none(*pgd)) {
+ if (p4d_none(*p4d)) {
if (!new_pud)
goto out_unlock;
- pgd_populate(kvm->mm, pgd, new_pud);
+ p4d_populate(kvm->mm, p4d, new_pud);
new_pud = NULL;
}
- pud = pud_offset(pgd, gpa);
+ pud = pud_offset(p4d, gpa);
if (pud_is_leaf(*pud)) {
unsigned long hgpa = gpa & PUD_MASK;
@@ -750,7 +754,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
return ret;
}
-bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
+bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
unsigned long gpa, unsigned int lpid)
{
unsigned long pgflags;
@@ -765,12 +769,12 @@ bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
pgflags = _PAGE_ACCESSED;
if (writing)
pgflags |= _PAGE_DIRTY;
- /*
- * We are walking the secondary (partition-scoped) page table here.
- * We can do this without disabling irq because the Linux MM
- * subsystem doesn't do THP splits and collapses on this tree.
- */
- ptep = __find_linux_pte(pgtable, gpa, NULL, &shift);
+
+ if (nested)
+ ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
+ else
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
+
if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
return true;
@@ -806,7 +810,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
* is that the page is writable.
*/
hva = gfn_to_hva_memslot(memslot, gfn);
- if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
+ if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
upgrade_write = true;
} else {
unsigned long pfn;
@@ -828,12 +832,12 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
* Read the PTE from the process' radix tree and use that
* so we get the shift and attribute bits.
*/
- local_irq_disable();
- ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+ spin_lock(&kvm->mmu_lock);
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
pte = __pte(0);
if (ptep)
- pte = *ptep;
- local_irq_enable();
+ pte = READ_ONCE(*ptep);
+ spin_unlock(&kvm->mmu_lock);
/*
* If the PTE disappeared temporarily due to a THP
* collapse, just return and let the guest try again.
@@ -964,8 +968,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
/* Failed to set the reference/change bits */
if (dsisr & DSISR_SET_RC) {
spin_lock(&kvm->mmu_lock);
- if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable,
- writing, gpa, kvm->arch.lpid))
+ if (kvmppc_hv_handle_set_rc(kvm, false, writing,
+ gpa, kvm->arch.lpid))
dsisr &= ~DSISR_SET_RC;
spin_unlock(&kvm->mmu_lock);
@@ -996,11 +1000,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
return 0;
}
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
kvm->arch.lpid);
- return 0;
+ return 0;
}
/* Called with kvm->mmu_lock held */
@@ -1016,7 +1020,7 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ref;
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
gpa, shift);
@@ -1043,7 +1047,7 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ref;
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep))
ref = 1;
return ref;
@@ -1055,7 +1059,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
{
unsigned long gfn = memslot->base_gfn + pagenum;
unsigned long gpa = gfn << PAGE_SHIFT;
- pte_t *ptep;
+ pte_t *ptep, pte;
unsigned int shift;
int ret = 0;
unsigned long old, *rmapp;
@@ -1063,12 +1067,35 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ret;
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
- if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
- ret = 1;
- if (shift)
- ret = 1 << (shift - PAGE_SHIFT);
+ /*
+ * For performance reasons we don't hold kvm->mmu_lock while walking the
+ * partition scoped table.
+ */
+ ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
+ if (!ptep)
+ return 0;
+
+ pte = READ_ONCE(*ptep);
+ if (pte_present(pte) && pte_dirty(pte)) {
spin_lock(&kvm->mmu_lock);
+ /*
+ * Recheck the pte again
+ */
+ if (pte_val(pte) != pte_val(*ptep)) {
+ /*
+ * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
+ * only find PAGE_SIZE pte entries here. We can continue
+ * to use the pte addr returned by above page table
+ * walk.
+ */
+ if (!pte_present(*ptep) || !pte_dirty(*ptep)) {
+ spin_unlock(&kvm->mmu_lock);
+ return 0;
+ }
+ }
+
+ ret = 1;
+ VM_BUG_ON(shift);
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
gpa, shift);
kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
@@ -1124,7 +1151,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
gpa = memslot->base_gfn << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
for (n = memslot->npages; n; --n) {
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
kvm->arch.lpid);
@@ -1240,7 +1267,8 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
unsigned long gpa;
pgd_t *pgt;
struct kvm_nested_guest *nested;
- pgd_t pgd, *pgdp;
+ pgd_t *pgdp;
+ p4d_t p4d, *p4dp;
pud_t pud, *pudp;
pmd_t pmd, *pmdp;
pte_t *ptep;
@@ -1313,13 +1341,14 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
}
pgdp = pgt + pgd_index(gpa);
- pgd = READ_ONCE(*pgdp);
- if (!(pgd_val(pgd) & _PAGE_PRESENT)) {
- gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE;
+ p4dp = p4d_offset(pgdp, gpa);
+ p4d = READ_ONCE(*p4dp);
+ if (!(p4d_val(p4d) & _PAGE_PRESENT)) {
+ gpa = (gpa & P4D_MASK) + P4D_SIZE;
continue;
}
- pudp = pud_offset(&pgd, gpa);
+ pudp = pud_offset(&p4d, gpa);
pud = READ_ONCE(*pudp);
if (!(pud_val(pud) & _PAGE_PRESENT)) {
gpa = (gpa & PUD_MASK) + PUD_SIZE;
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 6fcaf1fa8e02..ac6ac192b8bb 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -74,8 +74,8 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
EXPORT_SYMBOL_GPL(kvmppc_find_table);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
- unsigned long *ua, unsigned long **prmap)
+static long kvmppc_rm_tce_to_ua(struct kvm *kvm,
+ unsigned long tce, unsigned long *ua)
{
unsigned long gfn = tce >> PAGE_SHIFT;
struct kvm_memory_slot *memslot;
@@ -87,9 +87,6 @@ static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
*ua = __gfn_to_hva_memslot(memslot, gfn) |
(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
- if (prmap)
- *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
-
return 0;
}
@@ -116,7 +113,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_PARAMETER;
- if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
+ if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua))
return H_TOO_HARD;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -208,7 +205,7 @@ static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
idx = (ioba >> stt->page_shift) - stt->offset;
sttpage = idx / TCES_PER_PAGE;
- sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
+ sttpages = ALIGN(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
TCES_PER_PAGE;
for (i = sttpage; i < sttpage + sttpages; ++i)
if (!stt->pages[i])
@@ -411,7 +408,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret;
dir = iommu_tce_direction(tce);
- if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
+ if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua))
return H_PARAMETER;
entry = ioba >> stt->page_shift;
@@ -437,8 +434,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return H_SUCCESS;
}
-static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
- unsigned long ua, unsigned long *phpa)
+static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
+ unsigned long ua, unsigned long *phpa)
{
pte_t *ptep, pte;
unsigned shift = 0;
@@ -452,10 +449,17 @@ static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
* to exit which will agains result in the below page table walk
* to finish.
*/
- ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
- if (!ptep || !pte_present(*ptep))
+ /* an rmap lock won't make it safe. because that just ensure hash
+ * page table entries are removed with rmap lock held. After that
+ * mmu notifier returns and we go ahead and removing ptes from Qemu page table.
+ */
+ ptep = find_kvm_host_pte(vcpu->kvm, mmu_seq, ua, &shift);
+ if (!ptep)
+ return -ENXIO;
+
+ pte = READ_ONCE(*ptep);
+ if (!pte_present(pte))
return -ENXIO;
- pte = *ptep;
if (!shift)
shift = PAGE_SHIFT;
@@ -477,10 +481,11 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
unsigned long liobn, unsigned long ioba,
unsigned long tce_list, unsigned long npages)
{
+ struct kvm *kvm = vcpu->kvm;
struct kvmppc_spapr_tce_table *stt;
long i, ret = H_SUCCESS;
unsigned long tces, entry, ua = 0;
- unsigned long *rmap = NULL;
+ unsigned long mmu_seq;
bool prereg = false;
struct kvmppc_spapr_tce_iommu_table *stit;
@@ -488,6 +493,12 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (kvm_is_radix(vcpu->kvm))
return H_TOO_HARD;
+ /*
+ * used to check for invalidations in progress
+ */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
+
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -515,7 +526,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
*/
struct mm_iommu_table_group_mem_t *mem;
- if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
+ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
return H_TOO_HARD;
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
@@ -531,23 +542,11 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
* We do not require memory to be preregistered in this case
* so lock rmap and do __find_linux_pte_or_hugepte().
*/
- if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
- return H_TOO_HARD;
-
- rmap = (void *) vmalloc_to_phys(rmap);
- if (WARN_ON_ONCE_RM(!rmap))
+ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
return H_TOO_HARD;
- /*
- * Synchronize with the MMU notifier callbacks in
- * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
- * While we have the rmap lock, code running on other CPUs
- * cannot finish unmapping the host real page that backs
- * this guest real page, so we are OK to access the host
- * real page.
- */
- lock_rmap(rmap);
- if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+ if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
ret = H_TOO_HARD;
goto unlock_exit;
}
@@ -565,7 +564,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
ua = 0;
- if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
+ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
goto invalidate_exit;
}
@@ -590,9 +589,8 @@ invalidate_exit:
iommu_tce_kill_rm(stit->tbl, entry, npages);
unlock_exit:
- if (rmap)
- unlock_rmap(rmap);
-
+ if (!prereg)
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 1b84806abef6..6bf66649ab92 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -342,9 +342,6 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
vcpu->arch.pvr = pvr;
}
-/* Dummy value used in computing PCR value below */
-#define PCR_ARCH_300 (PCR_ARCH_207 << 1)
-
static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
@@ -3395,8 +3392,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
int trap;
unsigned long host_hfscr = mfspr(SPRN_HFSCR);
unsigned long host_ciabr = mfspr(SPRN_CIABR);
- unsigned long host_dawr = mfspr(SPRN_DAWR);
- unsigned long host_dawrx = mfspr(SPRN_DAWRX);
+ unsigned long host_dawr = mfspr(SPRN_DAWR0);
+ unsigned long host_dawrx = mfspr(SPRN_DAWRX0);
unsigned long host_psscr = mfspr(SPRN_PSSCR);
unsigned long host_pidr = mfspr(SPRN_PID);
@@ -3425,8 +3422,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_SPURR, vcpu->arch.spurr);
if (dawr_enabled()) {
- mtspr(SPRN_DAWR, vcpu->arch.dawr);
- mtspr(SPRN_DAWRX, vcpu->arch.dawrx);
+ mtspr(SPRN_DAWR0, vcpu->arch.dawr);
+ mtspr(SPRN_DAWRX0, vcpu->arch.dawrx);
}
mtspr(SPRN_CIABR, vcpu->arch.ciabr);
mtspr(SPRN_IC, vcpu->arch.ic);
@@ -3478,8 +3475,8 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
(local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
mtspr(SPRN_HFSCR, host_hfscr);
mtspr(SPRN_CIABR, host_ciabr);
- mtspr(SPRN_DAWR, host_dawr);
- mtspr(SPRN_DAWRX, host_dawrx);
+ mtspr(SPRN_DAWR0, host_dawr);
+ mtspr(SPRN_DAWRX0, host_dawrx);
mtspr(SPRN_PID, host_pidr);
/*
@@ -4629,14 +4626,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* Look up the VMA for the start of this memory slot */
hva = memslot->userspace_addr;
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
vma = find_vma(kvm->mm, hva);
if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
goto up_out;
psize = vma_kernel_pagesize(vma);
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
/* We can handle 4k, 64k or 16M pages in the VRMA */
if (psize >= 0x1000000)
@@ -4669,7 +4666,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
return err;
up_out:
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
goto out_srcu;
}
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 2fda52b969dc..0989751c9d5e 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -11,11 +11,11 @@
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/llist.h>
+#include <linux/pgtable.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
#include <asm/reg.h>
@@ -749,6 +749,23 @@ static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
return kvm->arch.nested_guests[lpid];
}
+pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
+ unsigned long ea, unsigned *hshift)
+{
+ struct kvm_nested_guest *gp;
+ pte_t *pte;
+
+ gp = kvmhv_find_nested(kvm, lpid);
+ if (!gp)
+ return NULL;
+
+ VM_WARN(!spin_is_locked(&kvm->mmu_lock),
+ "%s called with kvm mmu_lock not held \n", __func__);
+ pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
+
+ return pte;
+}
+
static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
{
return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
@@ -791,19 +808,15 @@ static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
unsigned long clr, unsigned long set,
unsigned long hpa, unsigned long mask)
{
- struct kvm_nested_guest *gp;
unsigned long gpa;
unsigned int shift, lpid;
pte_t *ptep;
gpa = n_rmap & RMAP_NESTED_GPA_MASK;
lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
- gp = kvmhv_find_nested(kvm, lpid);
- if (!gp)
- return;
/* Find the pte */
- ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
/*
* If the pte is present and the pfn is still the same, update the pte.
* If the pfn has changed then this is a stale rmap entry, the nested
@@ -853,7 +866,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
return;
/* Find and invalidate the pte */
- ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
/* Don't spuriously invalidate ptes if the pfn has changed */
if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
@@ -920,7 +933,7 @@ static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
int shift;
spin_lock(&kvm->mmu_lock);
- ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
if (!shift)
shift = PAGE_SHIFT;
if (ptep && pte_present(*ptep)) {
@@ -1168,7 +1181,7 @@ static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
} else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
/* Can we execute? */
if (!gpte_p->may_execute) {
- flags |= SRR1_ISI_N_OR_G;
+ flags |= SRR1_ISI_N_G_OR_CIP;
goto forward_to_l1;
}
} else {
@@ -1211,16 +1224,16 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
spin_lock(&kvm->mmu_lock);
/* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
- ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing,
- gpte.raddr, kvm->arch.lpid);
+ ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
+ gpte.raddr, kvm->arch.lpid);
if (!ret) {
ret = -EINVAL;
goto out_unlock;
}
/* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
- ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa,
- gp->shadow_lpid);
+ ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
+ n_gpa, gp->shadow_lpid);
if (!ret)
ret = -EINVAL;
else
@@ -1360,7 +1373,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
/* See if can find translation in our partition scoped tables for L1 */
pte = __pte(0);
spin_lock(&kvm->mmu_lock);
- pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
if (!shift)
shift = PAGE_SHIFT;
if (pte_p)
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 220305454c23..88da2764c1bb 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -210,7 +210,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pte_t *ptep;
unsigned int writing;
unsigned long mmu_seq;
- unsigned long rcbits, irq_flags = 0;
+ unsigned long rcbits;
if (kvm_is_radix(kvm))
return H_FUNCTION;
@@ -248,17 +248,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Translate to host virtual address */
hva = __gfn_to_hva_memslot(memslot, gfn);
- /*
- * If we had a page table table change after lookup, we would
- * retry via mmu_notifier_retry.
- */
- if (!realmode)
- local_irq_save(irq_flags);
- /*
- * If called in real mode we have MSR_EE = 0. Otherwise
- * we disable irq above.
- */
- ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
+
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift);
if (ptep) {
pte_t pte;
unsigned int host_pte_size;
@@ -272,8 +264,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
* to <= host page size, if host is using hugepage
*/
if (host_pte_size < psize) {
- if (!realmode)
- local_irq_restore(flags);
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
return H_PARAMETER;
}
pte = kvmppc_read_update_linux_pte(ptep, writing);
@@ -287,8 +278,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pa |= gpa & ~PAGE_MASK;
}
}
- if (!realmode)
- local_irq_restore(irq_flags);
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
ptel |= pa;
@@ -888,8 +878,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
return ret;
}
-static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
- int writing, unsigned long *hpa,
+static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
+ unsigned long gpa, int writing, unsigned long *hpa,
struct kvm_memory_slot **memslot_p)
{
struct kvm *kvm = vcpu->kvm;
@@ -908,7 +898,7 @@ static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
hva = __gfn_to_hva_memslot(memslot, gfn);
/* Try to find the host pte for that virtual address */
- ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
if (!ptep)
return H_TOO_HARD;
pte = kvmppc_read_update_linux_pte(ptep, writing);
@@ -943,16 +933,11 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
- ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot);
- if (ret != H_SUCCESS)
- return ret;
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
- /* Check if we've been invalidated */
- raw_spin_lock(&kvm->mmu_lock.rlock);
- if (mmu_notifier_retry(kvm, mmu_seq)) {
- ret = H_TOO_HARD;
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot);
+ if (ret != H_SUCCESS)
goto out_unlock;
- }
/* Zero the page */
for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES)
@@ -960,7 +945,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
out_unlock:
- raw_spin_unlock(&kvm->mmu_lock.rlock);
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
return ret;
}
@@ -976,19 +961,14 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
- ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot);
- if (ret != H_SUCCESS)
- return ret;
- ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL);
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot);
if (ret != H_SUCCESS)
- return ret;
+ goto out_unlock;
- /* Check if we've been invalidated */
- raw_spin_lock(&kvm->mmu_lock.rlock);
- if (mmu_notifier_retry(kvm, mmu_seq)) {
- ret = H_TOO_HARD;
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, src, 0, &src_pa, NULL);
+ if (ret != H_SUCCESS)
goto out_unlock;
- }
/* Copy the page */
memcpy((void *)dest_pa, (void *)src_pa, SZ_4K);
@@ -996,7 +976,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
out_unlock:
- raw_spin_unlock(&kvm->mmu_lock.rlock);
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
return ret;
}
@@ -1260,7 +1240,7 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
if (!data) {
if (gr & (HPTE_R_N | HPTE_R_G))
- return status | SRR1_ISI_N_OR_G;
+ return status | SRR1_ISI_N_G_OR_CIP;
if (!hpte_read_permission(pp, slb_v & key))
return status | SRR1_ISI_PROT;
} else if (status & DSISR_ISSTORE) {
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 287d5911df0f..4d7e5610731a 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -8,6 +8,7 @@
#include <linux/kvm_host.h>
#include <linux/err.h>
#include <linux/kernel_stat.h>
+#include <linux/pgtable.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
@@ -15,7 +16,6 @@
#include <asm/xics.h>
#include <asm/synch.h>
#include <asm/cputhreads.h>
-#include <asm/pgtable.h>
#include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c
index 174d75e476fa..6f18632e30e9 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xive.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c
@@ -3,6 +3,7 @@
#include <linux/kvm_host.h>
#include <linux/err.h>
#include <linux/kernel_stat.h>
+#include <linux/pgtable.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
@@ -11,7 +12,6 @@
#include <asm/debug.h>
#include <asm/synch.h>
#include <asm/cputhreads.h>
-#include <asm/pgtable.h>
#include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 780a499c7114..71943892c81c 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -707,8 +707,8 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
mfspr r5, SPRN_CIABR
- mfspr r6, SPRN_DAWR
- mfspr r7, SPRN_DAWRX
+ mfspr r6, SPRN_DAWR0
+ mfspr r7, SPRN_DAWRX0
mfspr r8, SPRN_IAMR
std r5, STACK_SLOT_CIABR(r1)
std r6, STACK_SLOT_DAWR(r1)
@@ -803,8 +803,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
beq 1f
ld r5, VCPU_DAWR(r4)
ld r6, VCPU_DAWRX(r4)
- mtspr SPRN_DAWR, r5
- mtspr SPRN_DAWRX, r6
+ mtspr SPRN_DAWR0, r5
+ mtspr SPRN_DAWRX0, r6
1:
ld r7, VCPU_CIABR(r4)
ld r8, VCPU_TAR(r4)
@@ -1766,8 +1766,8 @@ BEGIN_FTR_SECTION
* If the DAWR doesn't work, it's ok to write these here as
* this value should always be zero
*/
- mtspr SPRN_DAWR, r6
- mtspr SPRN_DAWRX, r7
+ mtspr SPRN_DAWR0, r6
+ mtspr SPRN_DAWRX0, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION
ld r5, STACK_SLOT_TID(r1)
@@ -2577,8 +2577,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfmsr r6
andi. r6, r6, MSR_DR /* in real mode? */
bne 4f
- mtspr SPRN_DAWR, r4
- mtspr SPRN_DAWRX, r5
+ mtspr SPRN_DAWR0, r4
+ mtspr SPRN_DAWRX0, r5
4: li r3, 0
blr
@@ -2907,6 +2907,11 @@ kvm_cede_exit:
beq 4f
li r0, 0
stb r0, VCPU_CEDED(r9)
+ /*
+ * The escalation interrupts are special as we don't EOI them.
+ * There is no need to use the load-after-store ordering offset
+ * to set PQ to 10 as we won't use StoreEOI.
+ */
li r6, XIVE_ESB_SET_PQ_10
b 5f
4: li r0, 1
@@ -3329,7 +3334,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_AMR, r0
mtspr SPRN_IAMR, r0
mtspr SPRN_CIABR, r0
- mtspr SPRN_DAWRX, r0
+ mtspr SPRN_DAWRX0, r0
BEGIN_MMU_FTR_SECTION
b 4f
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index e2cb3ce4931c..09d8119024db 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -47,7 +47,7 @@
* Locking order
*
* 1. kvm->srcu - Protects KVM memslots
- * 2. kvm->mm->mmap_sem - find_vma, migrate_vma_pages and helpers, ksm_madvise
+ * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
* 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
* as sync-points for page-in/out
*/
@@ -402,13 +402,13 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
mig.dst = &dst_pfn;
/*
- * We come here with mmap_sem write lock held just for
- * ksm_madvise(), otherwise we only need read mmap_sem.
+ * We come here with mmap_lock write lock held just for
+ * ksm_madvise(), otherwise we only need read mmap_lock.
* Hence downgrade to read lock once ksm_madvise() is done.
*/
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
MADV_UNMERGEABLE, &vma->vm_flags);
- downgrade_write(&kvm->mm->mmap_sem);
+ mmap_write_downgrade(kvm->mm);
*downgrade = true;
if (ret)
return ret;
@@ -525,7 +525,7 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
ret = H_PARAMETER;
srcu_idx = srcu_read_lock(&kvm->srcu);
- down_write(&kvm->mm->mmap_sem);
+ mmap_write_lock(kvm->mm);
start = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(start))
@@ -548,9 +548,9 @@ out_unlock:
mutex_unlock(&kvm->arch.uvmem_lock);
out:
if (downgrade)
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
else
- up_write(&kvm->mm->mmap_sem);
+ mmap_write_unlock(kvm->mm);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
@@ -703,7 +703,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
ret = H_PARAMETER;
srcu_idx = srcu_read_lock(&kvm->srcu);
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
start = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(start))
goto out;
@@ -716,7 +716,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
ret = H_SUCCESS;
out:
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 6ef0151ff70a..bdea91df1497 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -31,6 +31,12 @@ static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
{
u64 val;
+ /*
+ * The KVM XIVE native device does not use the XIVE_ESB_SET_PQ_10
+ * load operation, so there is no need to enforce load-after-store
+ * ordering.
+ */
+
if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
offset |= offset << 4;
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index a8a900ace1e6..4ad3c0279458 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -58,6 +58,9 @@ static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
{
u64 val;
+ if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ offset |= XIVE_ESB_LD_ST_MO;
+
if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
offset |= offset << 4;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index df9989cf7ba3..d6c1069e9954 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -355,7 +355,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (tlbsel == 1) {
struct vm_area_struct *vma;
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
vma = find_vma(kvm->mm, hva);
if (vma && hva >= vma->vm_start &&
@@ -441,7 +441,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
}
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
}
if (likely(!pfnmap)) {
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index e8a47c84d77d..48272a9b9c30 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -94,7 +94,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = EMULATE_FAIL;
vcpu->arch.regs.msr = vcpu->arch.shared->msr;
- if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
+ if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) {
int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type);
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S
index 3dfae0cb6228..315c94946bad 100644
--- a/arch/powerpc/kvm/fpu.S
+++ b/arch/powerpc/kvm/fpu.S
@@ -5,10 +5,10 @@
* Copyright (C) 2010 Alexander Graf (agraf@suse.de)
*/
+#include <linux/pgtable.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index b8de3be10eb4..5e994cda8e40 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -16,7 +16,7 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
endif
-obj-y += alloc.o code-patching.o feature-fixups.o pmem.o
+obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o test_code-patching.o
ifndef CONFIG_KASAN
obj-y += string.o memcmp_$(BITS).o
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 3345f039a876..0a051dfeb177 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -12,18 +12,23 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/code-patching.h>
#include <asm/setup.h>
+#include <asm/inst.h>
-static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
- unsigned int *patch_addr)
+static int __patch_instruction(struct ppc_inst *exec_addr, struct ppc_inst instr,
+ struct ppc_inst *patch_addr)
{
int err = 0;
- __put_user_asm(instr, patch_addr, err, "stw");
+ if (!ppc_inst_prefixed(instr)) {
+ __put_user_asm(ppc_inst_val(instr), patch_addr, err, "stw");
+ } else {
+ __put_user_asm(ppc_inst_as_u64(instr), patch_addr, err, "std");
+ }
+
if (err)
return err;
@@ -33,7 +38,7 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
return 0;
}
-int raw_patch_instruction(unsigned int *addr, unsigned int instr)
+int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
{
return __patch_instruction(addr, instr, addr);
}
@@ -107,13 +112,18 @@ static inline int unmap_patch_area(unsigned long addr)
pte_t *ptep;
pmd_t *pmdp;
pud_t *pudp;
+ p4d_t *p4dp;
pgd_t *pgdp;
pgdp = pgd_offset_k(addr);
if (unlikely(!pgdp))
return -EINVAL;
- pudp = pud_offset(pgdp, addr);
+ p4dp = p4d_offset(pgdp, addr);
+ if (unlikely(!p4dp))
+ return -EINVAL;
+
+ pudp = pud_offset(p4dp, addr);
if (unlikely(!pudp))
return -EINVAL;
@@ -136,10 +146,10 @@ static inline int unmap_patch_area(unsigned long addr)
return 0;
}
-static int do_patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
{
int err;
- unsigned int *patch_addr = NULL;
+ struct ppc_inst *patch_addr = NULL;
unsigned long flags;
unsigned long text_poke_addr;
unsigned long kaddr = (unsigned long)addr;
@@ -160,8 +170,7 @@ static int do_patch_instruction(unsigned int *addr, unsigned int instr)
goto out;
}
- patch_addr = (unsigned int *)(text_poke_addr) +
- ((kaddr & ~PAGE_MASK) / sizeof(unsigned int));
+ patch_addr = (struct ppc_inst *)(text_poke_addr + (kaddr & ~PAGE_MASK));
__patch_instruction(addr, instr, patch_addr);
@@ -176,14 +185,14 @@ out:
}
#else /* !CONFIG_STRICT_KERNEL_RWX */
-static int do_patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
{
return raw_patch_instruction(addr, instr);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
-int patch_instruction(unsigned int *addr, unsigned int instr)
+int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
{
/* Make sure we aren't patching a freed init section */
if (init_mem_is_free && init_section_contains(addr, 4)) {
@@ -194,9 +203,12 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
}
NOKPROBE_SYMBOL(patch_instruction);
-int patch_branch(unsigned int *addr, unsigned long target, int flags)
+int patch_branch(struct ppc_inst *addr, unsigned long target, int flags)
{
- return patch_instruction(addr, create_branch(addr, target, flags));
+ struct ppc_inst instr;
+
+ create_branch(&instr, addr, target, flags);
+ return patch_instruction(addr, instr);
}
bool is_offset_in_branch_range(long offset)
@@ -225,14 +237,14 @@ bool is_offset_in_branch_range(long offset)
* Helper to check if a given instruction is a conditional branch
* Derived from the conditional checks in analyse_instr()
*/
-bool is_conditional_branch(unsigned int instr)
+bool is_conditional_branch(struct ppc_inst instr)
{
- unsigned int opcode = instr >> 26;
+ unsigned int opcode = ppc_inst_primary_opcode(instr);
if (opcode == 16) /* bc, bca, bcl, bcla */
return true;
if (opcode == 19) {
- switch ((instr >> 1) & 0x3ff) {
+ switch ((ppc_inst_val(instr) >> 1) & 0x3ff) {
case 16: /* bclr, bclrl */
case 528: /* bcctr, bcctrl */
case 560: /* bctar, bctarl */
@@ -243,30 +255,30 @@ bool is_conditional_branch(unsigned int instr)
}
NOKPROBE_SYMBOL(is_conditional_branch);
-unsigned int create_branch(const unsigned int *addr,
- unsigned long target, int flags)
+int create_branch(struct ppc_inst *instr,
+ const struct ppc_inst *addr,
+ unsigned long target, int flags)
{
- unsigned int instruction;
long offset;
+ *instr = ppc_inst(0);
offset = target;
if (! (flags & BRANCH_ABSOLUTE))
offset = offset - (unsigned long)addr;
/* Check we can represent the target in the instruction format */
if (!is_offset_in_branch_range(offset))
- return 0;
+ return 1;
/* Mask out the flags and target, so they don't step on each other. */
- instruction = 0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC);
+ *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
- return instruction;
+ return 0;
}
-unsigned int create_cond_branch(const unsigned int *addr,
- unsigned long target, int flags)
+int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+ unsigned long target, int flags)
{
- unsigned int instruction;
long offset;
offset = target;
@@ -275,104 +287,107 @@ unsigned int create_cond_branch(const unsigned int *addr,
/* Check we can represent the target in the instruction format */
if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
- return 0;
+ return 1;
/* Mask out the flags and target, so they don't step on each other. */
- instruction = 0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC);
+ *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC));
- return instruction;
+ return 0;
}
-static unsigned int branch_opcode(unsigned int instr)
+static unsigned int branch_opcode(struct ppc_inst instr)
{
- return (instr >> 26) & 0x3F;
+ return ppc_inst_primary_opcode(instr) & 0x3F;
}
-static int instr_is_branch_iform(unsigned int instr)
+static int instr_is_branch_iform(struct ppc_inst instr)
{
return branch_opcode(instr) == 18;
}
-static int instr_is_branch_bform(unsigned int instr)
+static int instr_is_branch_bform(struct ppc_inst instr)
{
return branch_opcode(instr) == 16;
}
-int instr_is_relative_branch(unsigned int instr)
+int instr_is_relative_branch(struct ppc_inst instr)
{
- if (instr & BRANCH_ABSOLUTE)
+ if (ppc_inst_val(instr) & BRANCH_ABSOLUTE)
return 0;
return instr_is_branch_iform(instr) || instr_is_branch_bform(instr);
}
-int instr_is_relative_link_branch(unsigned int instr)
+int instr_is_relative_link_branch(struct ppc_inst instr)
{
- return instr_is_relative_branch(instr) && (instr & BRANCH_SET_LINK);
+ return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK);
}
-static unsigned long branch_iform_target(const unsigned int *instr)
+static unsigned long branch_iform_target(const struct ppc_inst *instr)
{
signed long imm;
- imm = *instr & 0x3FFFFFC;
+ imm = ppc_inst_val(*instr) & 0x3FFFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x2000000)
imm -= 0x4000000;
- if ((*instr & BRANCH_ABSOLUTE) == 0)
+ if ((ppc_inst_val(*instr) & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
-static unsigned long branch_bform_target(const unsigned int *instr)
+static unsigned long branch_bform_target(const struct ppc_inst *instr)
{
signed long imm;
- imm = *instr & 0xFFFC;
+ imm = ppc_inst_val(*instr) & 0xFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x8000)
imm -= 0x10000;
- if ((*instr & BRANCH_ABSOLUTE) == 0)
+ if ((ppc_inst_val(*instr) & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
-unsigned long branch_target(const unsigned int *instr)
+unsigned long branch_target(const struct ppc_inst *instr)
{
- if (instr_is_branch_iform(*instr))
+ if (instr_is_branch_iform(ppc_inst_read(instr)))
return branch_iform_target(instr);
- else if (instr_is_branch_bform(*instr))
+ else if (instr_is_branch_bform(ppc_inst_read(instr)))
return branch_bform_target(instr);
return 0;
}
-int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr)
+int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr)
{
- if (instr_is_branch_iform(*instr) || instr_is_branch_bform(*instr))
+ if (instr_is_branch_iform(ppc_inst_read(instr)) ||
+ instr_is_branch_bform(ppc_inst_read(instr)))
return branch_target(instr) == addr;
return 0;
}
-unsigned int translate_branch(const unsigned int *dest, const unsigned int *src)
+int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest,
+ const struct ppc_inst *src)
{
unsigned long target;
-
target = branch_target(src);
- if (instr_is_branch_iform(*src))
- return create_branch(dest, target, *src);
- else if (instr_is_branch_bform(*src))
- return create_cond_branch(dest, target, *src);
+ if (instr_is_branch_iform(ppc_inst_read(src)))
+ return create_branch(instr, dest, target,
+ ppc_inst_val(ppc_inst_read(src)));
+ else if (instr_is_branch_bform(ppc_inst_read(src)))
+ return create_cond_branch(instr, dest, target,
+ ppc_inst_val(ppc_inst_read(src)));
- return 0;
+ return 1;
}
#ifdef CONFIG_PPC_BOOK3E_64
@@ -387,7 +402,7 @@ void __patch_exception(int exc, unsigned long addr)
* instruction of the exception, not the first one
*/
- patch_branch(ibase + (exc / 4) + 1, addr, 0);
+ patch_branch((struct ppc_inst *)(ibase + (exc / 4) + 1), addr, 0);
}
#endif
@@ -403,165 +418,171 @@ static void __init test_trampoline(void)
static void __init test_branch_iform(void)
{
- unsigned int instr;
+ int err;
+ struct ppc_inst instr;
unsigned long addr;
addr = (unsigned long)&instr;
/* The simplest case, branch to self, no flags */
- check(instr_is_branch_iform(0x48000000));
+ check(instr_is_branch_iform(ppc_inst(0x48000000)));
/* All bits of target set, and flags */
- check(instr_is_branch_iform(0x4bffffff));
+ check(instr_is_branch_iform(ppc_inst(0x4bffffff)));
/* High bit of opcode set, which is wrong */
- check(!instr_is_branch_iform(0xcbffffff));
+ check(!instr_is_branch_iform(ppc_inst(0xcbffffff)));
/* Middle bits of opcode set, which is wrong */
- check(!instr_is_branch_iform(0x7bffffff));
+ check(!instr_is_branch_iform(ppc_inst(0x7bffffff)));
/* Simplest case, branch to self with link */
- check(instr_is_branch_iform(0x48000001));
+ check(instr_is_branch_iform(ppc_inst(0x48000001)));
/* All bits of targets set */
- check(instr_is_branch_iform(0x4bfffffd));
+ check(instr_is_branch_iform(ppc_inst(0x4bfffffd)));
/* Some bits of targets set */
- check(instr_is_branch_iform(0x4bff00fd));
+ check(instr_is_branch_iform(ppc_inst(0x4bff00fd)));
/* Must be a valid branch to start with */
- check(!instr_is_branch_iform(0x7bfffffd));
+ check(!instr_is_branch_iform(ppc_inst(0x7bfffffd)));
/* Absolute branch to 0x100 */
- instr = 0x48000103;
+ instr = ppc_inst(0x48000103);
check(instr_is_branch_to_addr(&instr, 0x100));
/* Absolute branch to 0x420fc */
- instr = 0x480420ff;
+ instr = ppc_inst(0x480420ff);
check(instr_is_branch_to_addr(&instr, 0x420fc));
/* Maximum positive relative branch, + 20MB - 4B */
- instr = 0x49fffffc;
+ instr = ppc_inst(0x49fffffc);
check(instr_is_branch_to_addr(&instr, addr + 0x1FFFFFC));
/* Smallest negative relative branch, - 4B */
- instr = 0x4bfffffc;
+ instr = ppc_inst(0x4bfffffc);
check(instr_is_branch_to_addr(&instr, addr - 4));
/* Largest negative relative branch, - 32 MB */
- instr = 0x4a000000;
+ instr = ppc_inst(0x4a000000);
check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
/* Branch to self, with link */
- instr = create_branch(&instr, addr, BRANCH_SET_LINK);
+ err = create_branch(&instr, &instr, addr, BRANCH_SET_LINK);
check(instr_is_branch_to_addr(&instr, addr));
/* Branch to self - 0x100, with link */
- instr = create_branch(&instr, addr - 0x100, BRANCH_SET_LINK);
+ err = create_branch(&instr, &instr, addr - 0x100, BRANCH_SET_LINK);
check(instr_is_branch_to_addr(&instr, addr - 0x100));
/* Branch to self + 0x100, no link */
- instr = create_branch(&instr, addr + 0x100, 0);
+ err = create_branch(&instr, &instr, addr + 0x100, 0);
check(instr_is_branch_to_addr(&instr, addr + 0x100));
/* Maximum relative negative offset, - 32 MB */
- instr = create_branch(&instr, addr - 0x2000000, BRANCH_SET_LINK);
+ err = create_branch(&instr, &instr, addr - 0x2000000, BRANCH_SET_LINK);
check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
/* Out of range relative negative offset, - 32 MB + 4*/
- instr = create_branch(&instr, addr - 0x2000004, BRANCH_SET_LINK);
- check(instr == 0);
+ err = create_branch(&instr, &instr, addr - 0x2000004, BRANCH_SET_LINK);
+ check(err);
/* Out of range relative positive offset, + 32 MB */
- instr = create_branch(&instr, addr + 0x2000000, BRANCH_SET_LINK);
- check(instr == 0);
+ err = create_branch(&instr, &instr, addr + 0x2000000, BRANCH_SET_LINK);
+ check(err);
/* Unaligned target */
- instr = create_branch(&instr, addr + 3, BRANCH_SET_LINK);
- check(instr == 0);
+ err = create_branch(&instr, &instr, addr + 3, BRANCH_SET_LINK);
+ check(err);
/* Check flags are masked correctly */
- instr = create_branch(&instr, addr, 0xFFFFFFFC);
+ err = create_branch(&instr, &instr, addr, 0xFFFFFFFC);
check(instr_is_branch_to_addr(&instr, addr));
- check(instr == 0x48000000);
+ check(ppc_inst_equal(instr, ppc_inst(0x48000000)));
}
static void __init test_create_function_call(void)
{
- unsigned int *iptr;
+ struct ppc_inst *iptr;
unsigned long dest;
+ struct ppc_inst instr;
/* Check we can create a function call */
- iptr = (unsigned int *)ppc_function_entry(test_trampoline);
+ iptr = (struct ppc_inst *)ppc_function_entry(test_trampoline);
dest = ppc_function_entry(test_create_function_call);
- patch_instruction(iptr, create_branch(iptr, dest, BRANCH_SET_LINK));
+ create_branch(&instr, iptr, dest, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
check(instr_is_branch_to_addr(iptr, dest));
}
static void __init test_branch_bform(void)
{
+ int err;
unsigned long addr;
- unsigned int *iptr, instr, flags;
+ struct ppc_inst *iptr, instr;
+ unsigned int flags;
iptr = &instr;
addr = (unsigned long)iptr;
/* The simplest case, branch to self, no flags */
- check(instr_is_branch_bform(0x40000000));
+ check(instr_is_branch_bform(ppc_inst(0x40000000)));
/* All bits of target set, and flags */
- check(instr_is_branch_bform(0x43ffffff));
+ check(instr_is_branch_bform(ppc_inst(0x43ffffff)));
/* High bit of opcode set, which is wrong */
- check(!instr_is_branch_bform(0xc3ffffff));
+ check(!instr_is_branch_bform(ppc_inst(0xc3ffffff)));
/* Middle bits of opcode set, which is wrong */
- check(!instr_is_branch_bform(0x7bffffff));
+ check(!instr_is_branch_bform(ppc_inst(0x7bffffff)));
/* Absolute conditional branch to 0x100 */
- instr = 0x43ff0103;
+ instr = ppc_inst(0x43ff0103);
check(instr_is_branch_to_addr(&instr, 0x100));
/* Absolute conditional branch to 0x20fc */
- instr = 0x43ff20ff;
+ instr = ppc_inst(0x43ff20ff);
check(instr_is_branch_to_addr(&instr, 0x20fc));
/* Maximum positive relative conditional branch, + 32 KB - 4B */
- instr = 0x43ff7ffc;
+ instr = ppc_inst(0x43ff7ffc);
check(instr_is_branch_to_addr(&instr, addr + 0x7FFC));
/* Smallest negative relative conditional branch, - 4B */
- instr = 0x43fffffc;
+ instr = ppc_inst(0x43fffffc);
check(instr_is_branch_to_addr(&instr, addr - 4));
/* Largest negative relative conditional branch, - 32 KB */
- instr = 0x43ff8000;
+ instr = ppc_inst(0x43ff8000);
check(instr_is_branch_to_addr(&instr, addr - 0x8000));
/* All condition code bits set & link */
flags = 0x3ff000 | BRANCH_SET_LINK;
/* Branch to self */
- instr = create_cond_branch(iptr, addr, flags);
+ err = create_cond_branch(&instr, iptr, addr, flags);
check(instr_is_branch_to_addr(&instr, addr));
/* Branch to self - 0x100 */
- instr = create_cond_branch(iptr, addr - 0x100, flags);
+ err = create_cond_branch(&instr, iptr, addr - 0x100, flags);
check(instr_is_branch_to_addr(&instr, addr - 0x100));
/* Branch to self + 0x100 */
- instr = create_cond_branch(iptr, addr + 0x100, flags);
+ err = create_cond_branch(&instr, iptr, addr + 0x100, flags);
check(instr_is_branch_to_addr(&instr, addr + 0x100));
/* Maximum relative negative offset, - 32 KB */
- instr = create_cond_branch(iptr, addr - 0x8000, flags);
+ err = create_cond_branch(&instr, iptr, addr - 0x8000, flags);
check(instr_is_branch_to_addr(&instr, addr - 0x8000));
/* Out of range relative negative offset, - 32 KB + 4*/
- instr = create_cond_branch(iptr, addr - 0x8004, flags);
- check(instr == 0);
+ err = create_cond_branch(&instr, iptr, addr - 0x8004, flags);
+ check(err);
/* Out of range relative positive offset, + 32 KB */
- instr = create_cond_branch(iptr, addr + 0x8000, flags);
- check(instr == 0);
+ err = create_cond_branch(&instr, iptr, addr + 0x8000, flags);
+ check(err);
/* Unaligned target */
- instr = create_cond_branch(iptr, addr + 3, flags);
- check(instr == 0);
+ err = create_cond_branch(&instr, iptr, addr + 3, flags);
+ check(err);
/* Check flags are masked correctly */
- instr = create_cond_branch(iptr, addr, 0xFFFFFFFC);
+ err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC);
check(instr_is_branch_to_addr(&instr, addr));
- check(instr == 0x43FF0000);
+ check(ppc_inst_equal(instr, ppc_inst(0x43FF0000)));
}
static void __init test_translate_branch(void)
{
unsigned long addr;
- unsigned int *p, *q;
+ void *p, *q;
+ struct ppc_inst instr;
void *buf;
buf = vmalloc(PAGE_ALIGN(0x2000000 + 1));
@@ -574,8 +595,9 @@ static void __init test_translate_branch(void)
addr = (unsigned long)p;
patch_branch(p, addr, 0);
check(instr_is_branch_to_addr(p, addr));
- q = p + 1;
- patch_instruction(q, translate_branch(q, p));
+ q = p + 4;
+ translate_branch(&instr, q, p);
+ patch_instruction(q, instr);
check(instr_is_branch_to_addr(q, addr));
/* Maximum negative case, move b . to addr + 32 MB */
@@ -583,27 +605,30 @@ static void __init test_translate_branch(void)
addr = (unsigned long)p;
patch_branch(p, addr, 0);
q = buf + 0x2000000;
- patch_instruction(q, translate_branch(q, p));
+ translate_branch(&instr, q, p);
+ patch_instruction(q, instr);
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
- check(*q == 0x4a000000);
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x4a000000)));
/* Maximum positive case, move x to x - 32 MB + 4 */
p = buf + 0x2000000;
addr = (unsigned long)p;
patch_branch(p, addr, 0);
q = buf + 4;
- patch_instruction(q, translate_branch(q, p));
+ translate_branch(&instr, q, p);
+ patch_instruction(q, instr);
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
- check(*q == 0x49fffffc);
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x49fffffc)));
/* Jump to x + 16 MB moved to x + 20 MB */
p = buf;
addr = 0x1000000 + (unsigned long)buf;
patch_branch(p, addr, BRANCH_SET_LINK);
q = buf + 0x1400000;
- patch_instruction(q, translate_branch(q, p));
+ translate_branch(&instr, q, p);
+ patch_instruction(q, instr);
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
@@ -612,7 +637,8 @@ static void __init test_translate_branch(void)
addr = 0x2000000 + (unsigned long)buf;
patch_branch(p, addr, 0);
q = buf + 4;
- patch_instruction(q, translate_branch(q, p));
+ translate_branch(&instr, q, p);
+ patch_instruction(q, instr);
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
@@ -622,47 +648,57 @@ static void __init test_translate_branch(void)
/* Simple case, branch to self moved a little */
p = buf;
addr = (unsigned long)p;
- patch_instruction(p, create_cond_branch(p, addr, 0));
+ create_cond_branch(&instr, p, addr, 0);
+ patch_instruction(p, instr);
check(instr_is_branch_to_addr(p, addr));
- q = p + 1;
- patch_instruction(q, translate_branch(q, p));
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ patch_instruction(q, instr);
check(instr_is_branch_to_addr(q, addr));
/* Maximum negative case, move b . to addr + 32 KB */
p = buf;
addr = (unsigned long)p;
- patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC));
+ create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
+ patch_instruction(p, instr);
q = buf + 0x8000;
- patch_instruction(q, translate_branch(q, p));
+ translate_branch(&instr, q, p);
+ patch_instruction(q, instr);
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
- check(*q == 0x43ff8000);
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff8000)));
/* Maximum positive case, move x to x - 32 KB + 4 */
p = buf + 0x8000;
addr = (unsigned long)p;
- patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC));
+ create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
+ patch_instruction(p, instr);
q = buf + 4;
- patch_instruction(q, translate_branch(q, p));
+ translate_branch(&instr, q, p);
+ patch_instruction(q, instr);
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
- check(*q == 0x43ff7ffc);
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff7ffc)));
/* Jump to x + 12 KB moved to x + 20 KB */
p = buf;
addr = 0x3000 + (unsigned long)buf;
- patch_instruction(p, create_cond_branch(p, addr, BRANCH_SET_LINK));
+ create_cond_branch(&instr, p, addr, BRANCH_SET_LINK);
+ patch_instruction(p, instr);
q = buf + 0x5000;
- patch_instruction(q, translate_branch(q, p));
+ translate_branch(&instr, q, p);
+ patch_instruction(q, instr);
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
/* Jump to x + 8 KB moved to x - 8 KB + 4 */
p = buf + 0x2000;
addr = 0x4000 + (unsigned long)buf;
- patch_instruction(p, create_cond_branch(p, addr, 0));
+ create_cond_branch(&instr, p, addr, 0);
+ patch_instruction(p, instr);
q = buf + 4;
- patch_instruction(q, translate_branch(q, p));
+ translate_branch(&instr, q, p);
+ patch_instruction(q, instr);
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
@@ -670,6 +706,26 @@ static void __init test_translate_branch(void)
vfree(buf);
}
+#ifdef CONFIG_PPC64
+static void __init test_prefixed_patching(void)
+{
+ extern unsigned int code_patching_test1[];
+ extern unsigned int code_patching_test1_expected[];
+ extern unsigned int end_code_patching_test1[];
+
+ __patch_instruction((struct ppc_inst *)code_patching_test1,
+ ppc_inst_prefix(OP_PREFIX << 26, 0x00000000),
+ (struct ppc_inst *)code_patching_test1);
+
+ check(!memcmp(code_patching_test1,
+ code_patching_test1_expected,
+ sizeof(unsigned int) *
+ (end_code_patching_test1 - code_patching_test1)));
+}
+#else
+static inline void test_prefixed_patching(void) {}
+#endif
+
static int __init test_code_patching(void)
{
printk(KERN_DEBUG "Running code patching self-tests ...\n");
@@ -678,6 +734,7 @@ static int __init test_code_patching(void)
test_branch_bform();
test_create_function_call();
test_translate_branch();
+ test_prefixed_patching();
return 0;
}
diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S
index b12168c2447a..480172fbd024 100644
--- a/arch/powerpc/lib/feature-fixups-test.S
+++ b/arch/powerpc/lib/feature-fixups-test.S
@@ -7,6 +7,7 @@
#include <asm/ppc_asm.h>
#include <asm/synch.h>
#include <asm/asm-compat.h>
+#include <asm/ppc-opcode.h>
.text
@@ -791,3 +792,71 @@ globl(lwsync_fixup_test_expected_SYNC)
1: or 1,1,1
sync
+globl(ftr_fixup_prefix1)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+globl(end_ftr_fixup_prefix1)
+
+globl(ftr_fixup_prefix1_orig)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+
+globl(ftr_fixup_prefix1_expected)
+ or 1,1,1
+ nop
+ nop
+ or 2,2,2
+
+globl(ftr_fixup_prefix2)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+globl(end_ftr_fixup_prefix2)
+
+globl(ftr_fixup_prefix2_orig)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+
+globl(ftr_fixup_prefix2_alt)
+ .long OP_PREFIX << 26
+ .long 0x0000001
+
+globl(ftr_fixup_prefix2_expected)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000001
+ or 2,2,2
+
+globl(ftr_fixup_prefix3)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+ or 3,3,3
+globl(end_ftr_fixup_prefix3)
+
+globl(ftr_fixup_prefix3_orig)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+ or 3,3,3
+
+globl(ftr_fixup_prefix3_alt)
+ .long OP_PREFIX << 26
+ .long 0x0000001
+ nop
+
+globl(ftr_fixup_prefix3_expected)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000001
+ nop
+ or 3,3,3
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 4ba634b89ce5..4c0a7ee9fa00 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -21,6 +21,7 @@
#include <asm/setup.h>
#include <asm/security_features.h>
#include <asm/firmware.h>
+#include <asm/inst.h>
struct fixup_entry {
unsigned long mask;
@@ -31,30 +32,31 @@ struct fixup_entry {
long alt_end_off;
};
-static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
+static struct ppc_inst *calc_addr(struct fixup_entry *fcur, long offset)
{
/*
* We store the offset to the code as a negative offset from
* the start of the alt_entry, to support the VDSO. This
* routine converts that back into an actual address.
*/
- return (unsigned int *)((unsigned long)fcur + offset);
+ return (struct ppc_inst *)((unsigned long)fcur + offset);
}
-static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
- unsigned int *alt_start, unsigned int *alt_end)
+static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest,
+ struct ppc_inst *alt_start, struct ppc_inst *alt_end)
{
- unsigned int instr;
+ int err;
+ struct ppc_inst instr;
- instr = *src;
+ instr = ppc_inst_read(src);
if (instr_is_relative_branch(*src)) {
- unsigned int *target = (unsigned int *)branch_target(src);
+ struct ppc_inst *target = (struct ppc_inst *)branch_target(src);
/* Branch within the section doesn't need translating */
if (target < alt_start || target > alt_end) {
- instr = translate_branch(dest, src);
- if (!instr)
+ err = translate_branch(&instr, dest, src);
+ if (err)
return 1;
}
}
@@ -66,7 +68,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
{
- unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
+ struct ppc_inst *start, *end, *alt_start, *alt_end, *src, *dest, nop;
start = calc_addr(fcur, fcur->start_off);
end = calc_addr(fcur, fcur->end_off);
@@ -82,13 +84,15 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
src = alt_start;
dest = start;
- for (; src < alt_end; src++, dest++) {
+ for (; src < alt_end; src = ppc_inst_next(src, src),
+ dest = ppc_inst_next(dest, dest)) {
if (patch_alt_instruction(src, dest, alt_start, alt_end))
return 1;
}
- for (; dest < end; dest++)
- raw_patch_instruction(dest, PPC_INST_NOP);
+ nop = ppc_inst(PPC_INST_NOP);
+ for (; dest < end; dest = ppc_inst_next(dest, &nop))
+ raw_patch_instruction(dest, nop);
return 0;
}
@@ -145,15 +149,17 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, instrs[0]);
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
if (types & STF_BARRIER_FALLBACK)
- patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
+ patch_branch((struct ppc_inst *)(dest + 1),
+ (unsigned long)&stf_barrier_fallback,
BRANCH_SET_LINK);
else
- patch_instruction(dest + 1, instrs[1]);
+ patch_instruction((struct ppc_inst *)(dest + 1),
+ ppc_inst(instrs[1]));
- patch_instruction(dest + 2, instrs[2]);
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
}
printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
@@ -206,12 +212,12 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, instrs[0]);
- patch_instruction(dest + 1, instrs[1]);
- patch_instruction(dest + 2, instrs[2]);
- patch_instruction(dest + 3, instrs[3]);
- patch_instruction(dest + 4, instrs[4]);
- patch_instruction(dest + 5, instrs[5]);
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
+ patch_instruction((struct ppc_inst *)(dest + 4), ppc_inst(instrs[4]));
+ patch_instruction((struct ppc_inst *)(dest + 5), ppc_inst(instrs[5]));
}
printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
(types == STF_BARRIER_NONE) ? "no" :
@@ -259,9 +265,9 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, instrs[0]);
- patch_instruction(dest + 1, instrs[1]);
- patch_instruction(dest + 2, instrs[2]);
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+ patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
}
printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
@@ -294,7 +300,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, instr);
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instr));
}
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
@@ -337,8 +343,8 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, instr[0]);
- patch_instruction(dest + 1, instr[1]);
+ patch_instruction((struct ppc_inst *)dest, ppc_inst(instr[0]));
+ patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instr[1]));
}
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
@@ -352,7 +358,7 @@ static void patch_btb_flush_section(long *curr)
end = (void *)curr + *(curr + 1);
for (; start < end; start++) {
pr_devel("patching dest %lx\n", (unsigned long)start);
- patch_instruction(start, PPC_INST_NOP);
+ patch_instruction((struct ppc_inst *)start, ppc_inst(PPC_INST_NOP));
}
}
@@ -371,7 +377,7 @@ void do_btb_flush_fixups(void)
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
long *start, *end;
- unsigned int *dest;
+ struct ppc_inst *dest;
if (!(value & CPU_FTR_LWSYNC))
return ;
@@ -381,27 +387,27 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
for (; start < end; start++) {
dest = (void *)start + *start;
- raw_patch_instruction(dest, PPC_INST_LWSYNC);
+ raw_patch_instruction(dest, ppc_inst(PPC_INST_LWSYNC));
}
}
static void do_final_fixups(void)
{
#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
- int *src, *dest;
- unsigned long length;
+ struct ppc_inst inst, *src, *dest, *end;
if (PHYSICAL_START == 0)
return;
- src = (int *)(KERNELBASE + PHYSICAL_START);
- dest = (int *)KERNELBASE;
- length = (__end_interrupts - _stext) / sizeof(int);
+ src = (struct ppc_inst *)(KERNELBASE + PHYSICAL_START);
+ dest = (struct ppc_inst *)KERNELBASE;
+ end = (void *)src + (__end_interrupts - _stext);
- while (length--) {
- raw_patch_instruction(dest, *src);
- src++;
- dest++;
+ while (src < end) {
+ inst = ppc_inst_read(src);
+ raw_patch_instruction(dest, inst);
+ src = ppc_inst_next(src, src);
+ dest = ppc_inst_next(dest, dest);
}
#endif
}
@@ -684,6 +690,78 @@ static void test_lwsync_macros(void)
}
}
+#ifdef CONFIG_PPC64
+static void __init test_prefix_patching(void)
+{
+ extern unsigned int ftr_fixup_prefix1[];
+ extern unsigned int end_ftr_fixup_prefix1[];
+ extern unsigned int ftr_fixup_prefix1_orig[];
+ extern unsigned int ftr_fixup_prefix1_expected[];
+ int size = sizeof(unsigned int) * (end_ftr_fixup_prefix1 - ftr_fixup_prefix1);
+
+ fixup.value = fixup.mask = 8;
+ fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix1 + 1);
+ fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix1 + 3);
+ fixup.alt_start_off = fixup.alt_end_off = 0;
+
+ /* Sanity check */
+ check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) == 0);
+
+ patch_feature_section(0, &fixup);
+ check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_expected, size) == 0);
+ check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) != 0);
+}
+
+static void __init test_prefix_alt_patching(void)
+{
+ extern unsigned int ftr_fixup_prefix2[];
+ extern unsigned int end_ftr_fixup_prefix2[];
+ extern unsigned int ftr_fixup_prefix2_orig[];
+ extern unsigned int ftr_fixup_prefix2_expected[];
+ extern unsigned int ftr_fixup_prefix2_alt[];
+ int size = sizeof(unsigned int) * (end_ftr_fixup_prefix2 - ftr_fixup_prefix2);
+
+ fixup.value = fixup.mask = 8;
+ fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix2 + 1);
+ fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix2 + 3);
+ fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix2_alt);
+ fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix2_alt + 2);
+ /* Sanity check */
+ check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) == 0);
+
+ patch_feature_section(0, &fixup);
+ check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_expected, size) == 0);
+ check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) != 0);
+}
+
+static void __init test_prefix_word_alt_patching(void)
+{
+ extern unsigned int ftr_fixup_prefix3[];
+ extern unsigned int end_ftr_fixup_prefix3[];
+ extern unsigned int ftr_fixup_prefix3_orig[];
+ extern unsigned int ftr_fixup_prefix3_expected[];
+ extern unsigned int ftr_fixup_prefix3_alt[];
+ int size = sizeof(unsigned int) * (end_ftr_fixup_prefix3 - ftr_fixup_prefix3);
+
+ fixup.value = fixup.mask = 8;
+ fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix3 + 1);
+ fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix3 + 4);
+ fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix3_alt);
+ fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix3_alt + 3);
+ /* Sanity check */
+ check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) == 0);
+
+ patch_feature_section(0, &fixup);
+ check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_expected, size) == 0);
+ patch_feature_section(0, &fixup);
+ check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) != 0);
+}
+#else
+static inline void test_prefix_patching(void) {}
+static inline void test_prefix_alt_patching(void) {}
+static inline void test_prefix_word_alt_patching(void) {}
+#endif /* CONFIG_PPC64 */
+
static int __init test_feature_fixups(void)
{
printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
@@ -698,6 +776,9 @@ static int __init test_feature_fixups(void)
test_cpu_macros();
test_fw_macros();
test_lwsync_macros();
+ test_prefix_patching();
+ test_prefix_alt_patching();
+ test_prefix_word_alt_patching();
return 0;
}
diff --git a/arch/powerpc/lib/inst.c b/arch/powerpc/lib/inst.c
new file mode 100644
index 000000000000..aedfd6e31e53
--- /dev/null
+++ b/arch/powerpc/lib/inst.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2020, IBM Corporation.
+ */
+
+#include <linux/uaccess.h>
+#include <asm/disassemble.h>
+#include <asm/inst.h>
+#include <asm/ppc-opcode.h>
+
+#ifdef CONFIG_PPC64
+int probe_user_read_inst(struct ppc_inst *inst,
+ struct ppc_inst __user *nip)
+{
+ unsigned int val, suffix;
+ int err;
+
+ err = probe_user_read(&val, nip, sizeof(val));
+ if (err)
+ return err;
+ if (get_op(val) == OP_PREFIX) {
+ err = probe_user_read(&suffix, (void __user *)nip + 4, 4);
+ *inst = ppc_inst_prefix(val, suffix);
+ } else {
+ *inst = ppc_inst(val);
+ }
+ return err;
+}
+
+int probe_kernel_read_inst(struct ppc_inst *inst,
+ struct ppc_inst *src)
+{
+ unsigned int val, suffix;
+ int err;
+
+ err = probe_kernel_read(&val, src, sizeof(val));
+ if (err)
+ return err;
+ if (get_op(val) == OP_PREFIX) {
+ err = probe_kernel_read(&suffix, (void *)src + 4, 4);
+ *inst = ppc_inst_prefix(val, suffix);
+ } else {
+ *inst = ppc_inst(val);
+ }
+ return err;
+}
+#else /* !CONFIG_PPC64 */
+int probe_user_read_inst(struct ppc_inst *inst,
+ struct ppc_inst __user *nip)
+{
+ unsigned int val;
+ int err;
+
+ err = probe_user_read(&val, nip, sizeof(val));
+ if (!err)
+ *inst = ppc_inst(val);
+
+ return err;
+}
+
+int probe_kernel_read_inst(struct ppc_inst *inst,
+ struct ppc_inst *src)
+{
+ unsigned int val;
+ int err;
+
+ err = probe_kernel_read(&val, src, sizeof(val));
+ if (!err)
+ *inst = ppc_inst(val);
+
+ return err;
+}
+#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 5f3a7bd9d90d..5abe98216dc2 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -13,6 +13,7 @@
#include <linux/uaccess.h>
#include <asm/cpu_has_feature.h>
#include <asm/cputable.h>
+#include <asm/disassemble.h>
extern char system_call_common[];
@@ -188,6 +189,44 @@ static nokprobe_inline unsigned long xform_ea(unsigned int instr,
}
/*
+ * Calculate effective address for a MLS:D-form / 8LS:D-form
+ * prefixed instruction
+ */
+static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
+ unsigned int suffix,
+ const struct pt_regs *regs)
+{
+ int ra, prefix_r;
+ unsigned int dd;
+ unsigned long ea, d0, d1, d;
+
+ prefix_r = instr & (1ul << 20);
+ ra = (suffix >> 16) & 0x1f;
+
+ d0 = instr & 0x3ffff;
+ d1 = suffix & 0xffff;
+ d = (d0 << 16) | d1;
+
+ /*
+ * sign extend a 34 bit number
+ */
+ dd = (unsigned int)(d >> 2);
+ ea = (signed int)dd;
+ ea = (ea << 2) | (d & 0x3);
+
+ if (!prefix_r && ra)
+ ea += regs->gpr[ra];
+ else if (!prefix_r && !ra)
+ ; /* Leave ea as is */
+ else if (prefix_r && !ra)
+ ea += regs->nip;
+ else if (prefix_r && ra)
+ ; /* Invalid form. Should already be checked for by caller! */
+
+ return ea;
+}
+
+/*
* Return the largest power of 2, not greater than sizeof(unsigned long),
* such that x is a multiple of it.
*/
@@ -1163,32 +1202,39 @@ static nokprobe_inline int trap_compare(long v1, long v2)
* otherwise.
*/
int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
- unsigned int instr)
+ struct ppc_inst instr)
{
+#ifdef CONFIG_PPC64
+ unsigned int suffixopcode, prefixtype, prefix_r;
+#endif
unsigned int opcode, ra, rb, rc, rd, spr, u;
unsigned long int imm;
unsigned long int val, val2;
unsigned int mb, me, sh;
+ unsigned int word, suffix;
long ival;
+ word = ppc_inst_val(instr);
+ suffix = ppc_inst_suffix(instr);
+
op->type = COMPUTE;
- opcode = instr >> 26;
+ opcode = ppc_inst_primary_opcode(instr);
switch (opcode) {
case 16: /* bc */
op->type = BRANCH;
- imm = (signed short)(instr & 0xfffc);
- if ((instr & 2) == 0)
+ imm = (signed short)(word & 0xfffc);
+ if ((word & 2) == 0)
imm += regs->nip;
op->val = truncate_if_32bit(regs->msr, imm);
- if (instr & 1)
+ if (word & 1)
op->type |= SETLK;
- if (branch_taken(instr, regs, op))
+ if (branch_taken(word, regs, op))
op->type |= BRTAKEN;
return 1;
#ifdef CONFIG_PPC64
case 17: /* sc */
- if ((instr & 0xfe2) == 2)
+ if ((word & 0xfe2) == 2)
op->type = SYSCALL;
else
op->type = UNKNOWN;
@@ -1196,21 +1242,21 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#endif
case 18: /* b */
op->type = BRANCH | BRTAKEN;
- imm = instr & 0x03fffffc;
+ imm = word & 0x03fffffc;
if (imm & 0x02000000)
imm -= 0x04000000;
- if ((instr & 2) == 0)
+ if ((word & 2) == 0)
imm += regs->nip;
op->val = truncate_if_32bit(regs->msr, imm);
- if (instr & 1)
+ if (word & 1)
op->type |= SETLK;
return 1;
case 19:
- switch ((instr >> 1) & 0x3ff) {
+ switch ((word >> 1) & 0x3ff) {
case 0: /* mcrf */
op->type = COMPUTE + SETCC;
- rd = 7 - ((instr >> 23) & 0x7);
- ra = 7 - ((instr >> 18) & 0x7);
+ rd = 7 - ((word >> 23) & 0x7);
+ ra = 7 - ((word >> 18) & 0x7);
rd *= 4;
ra *= 4;
val = (regs->ccr >> ra) & 0xf;
@@ -1220,11 +1266,11 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 16: /* bclr */
case 528: /* bcctr */
op->type = BRANCH;
- imm = (instr & 0x400)? regs->ctr: regs->link;
+ imm = (word & 0x400)? regs->ctr: regs->link;
op->val = truncate_if_32bit(regs->msr, imm);
- if (instr & 1)
+ if (word & 1)
op->type |= SETLK;
- if (branch_taken(instr, regs, op))
+ if (branch_taken(word, regs, op))
op->type |= BRTAKEN;
return 1;
@@ -1247,23 +1293,23 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 417: /* crorc */
case 449: /* cror */
op->type = COMPUTE + SETCC;
- ra = (instr >> 16) & 0x1f;
- rb = (instr >> 11) & 0x1f;
- rd = (instr >> 21) & 0x1f;
+ ra = (word >> 16) & 0x1f;
+ rb = (word >> 11) & 0x1f;
+ rd = (word >> 21) & 0x1f;
ra = (regs->ccr >> (31 - ra)) & 1;
rb = (regs->ccr >> (31 - rb)) & 1;
- val = (instr >> (6 + ra * 2 + rb)) & 1;
+ val = (word >> (6 + ra * 2 + rb)) & 1;
op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
(val << (31 - rd));
return 1;
}
break;
case 31:
- switch ((instr >> 1) & 0x3ff) {
+ switch ((word >> 1) & 0x3ff) {
case 598: /* sync */
op->type = BARRIER + BARRIER_SYNC;
#ifdef __powerpc64__
- switch ((instr >> 21) & 3) {
+ switch ((word >> 21) & 3) {
case 1: /* lwsync */
op->type = BARRIER + BARRIER_LWSYNC;
break;
@@ -1285,20 +1331,40 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
if (!FULL_REGS(regs))
return -1;
- rd = (instr >> 21) & 0x1f;
- ra = (instr >> 16) & 0x1f;
- rb = (instr >> 11) & 0x1f;
- rc = (instr >> 6) & 0x1f;
+ rd = (word >> 21) & 0x1f;
+ ra = (word >> 16) & 0x1f;
+ rb = (word >> 11) & 0x1f;
+ rc = (word >> 6) & 0x1f;
switch (opcode) {
#ifdef __powerpc64__
+ case 1:
+ prefix_r = word & (1ul << 20);
+ ra = (suffix >> 16) & 0x1f;
+ rd = (suffix >> 21) & 0x1f;
+ op->reg = rd;
+ op->val = regs->gpr[rd];
+ suffixopcode = get_op(suffix);
+ prefixtype = (word >> 24) & 0x3;
+ switch (prefixtype) {
+ case 2:
+ if (prefix_r && ra)
+ return 0;
+ switch (suffixopcode) {
+ case 14: /* paddi */
+ op->type = COMPUTE | PREFIXED;
+ op->val = mlsd_8lsd_ea(word, suffix, regs);
+ goto compute_done;
+ }
+ }
+ break;
case 2: /* tdi */
- if (rd & trap_compare(regs->gpr[ra], (short) instr))
+ if (rd & trap_compare(regs->gpr[ra], (short) word))
goto trap;
return 1;
#endif
case 3: /* twi */
- if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
+ if (rd & trap_compare((int)regs->gpr[ra], (short) word))
goto trap;
return 1;
@@ -1307,7 +1373,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
if (!cpu_has_feature(CPU_FTR_ARCH_300))
return -1;
- switch (instr & 0x3f) {
+ switch (word & 0x3f) {
case 48: /* maddhd */
asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
"=r" (op->val) : "r" (regs->gpr[ra]),
@@ -1335,16 +1401,16 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#endif
case 7: /* mulli */
- op->val = regs->gpr[ra] * (short) instr;
+ op->val = regs->gpr[ra] * (short) word;
goto compute_done;
case 8: /* subfic */
- imm = (short) instr;
+ imm = (short) word;
add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
return 1;
case 10: /* cmpli */
- imm = (unsigned short) instr;
+ imm = (unsigned short) word;
val = regs->gpr[ra];
#ifdef __powerpc64__
if ((rd & 1) == 0)
@@ -1354,7 +1420,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
case 11: /* cmpi */
- imm = (short) instr;
+ imm = (short) word;
val = regs->gpr[ra];
#ifdef __powerpc64__
if ((rd & 1) == 0)
@@ -1364,35 +1430,35 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
case 12: /* addic */
- imm = (short) instr;
+ imm = (short) word;
add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
return 1;
case 13: /* addic. */
- imm = (short) instr;
+ imm = (short) word;
add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
set_cr0(regs, op);
return 1;
case 14: /* addi */
- imm = (short) instr;
+ imm = (short) word;
if (ra)
imm += regs->gpr[ra];
op->val = imm;
goto compute_done;
case 15: /* addis */
- imm = ((short) instr) << 16;
+ imm = ((short) word) << 16;
if (ra)
imm += regs->gpr[ra];
op->val = imm;
goto compute_done;
case 19:
- if (((instr >> 1) & 0x1f) == 2) {
+ if (((word >> 1) & 0x1f) == 2) {
/* addpcis */
- imm = (short) (instr & 0xffc1); /* d0 + d2 fields */
- imm |= (instr >> 15) & 0x3e; /* d1 field */
+ imm = (short) (word & 0xffc1); /* d0 + d2 fields */
+ imm |= (word >> 15) & 0x3e; /* d1 field */
op->val = regs->nip + (imm << 16) + 4;
goto compute_done;
}
@@ -1400,65 +1466,65 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 0;
case 20: /* rlwimi */
- mb = (instr >> 6) & 0x1f;
- me = (instr >> 1) & 0x1f;
+ mb = (word >> 6) & 0x1f;
+ me = (word >> 1) & 0x1f;
val = DATA32(regs->gpr[rd]);
imm = MASK32(mb, me);
op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
goto logical_done;
case 21: /* rlwinm */
- mb = (instr >> 6) & 0x1f;
- me = (instr >> 1) & 0x1f;
+ mb = (word >> 6) & 0x1f;
+ me = (word >> 1) & 0x1f;
val = DATA32(regs->gpr[rd]);
op->val = ROTATE(val, rb) & MASK32(mb, me);
goto logical_done;
case 23: /* rlwnm */
- mb = (instr >> 6) & 0x1f;
- me = (instr >> 1) & 0x1f;
+ mb = (word >> 6) & 0x1f;
+ me = (word >> 1) & 0x1f;
rb = regs->gpr[rb] & 0x1f;
val = DATA32(regs->gpr[rd]);
op->val = ROTATE(val, rb) & MASK32(mb, me);
goto logical_done;
case 24: /* ori */
- op->val = regs->gpr[rd] | (unsigned short) instr;
+ op->val = regs->gpr[rd] | (unsigned short) word;
goto logical_done_nocc;
case 25: /* oris */
- imm = (unsigned short) instr;
+ imm = (unsigned short) word;
op->val = regs->gpr[rd] | (imm << 16);
goto logical_done_nocc;
case 26: /* xori */
- op->val = regs->gpr[rd] ^ (unsigned short) instr;
+ op->val = regs->gpr[rd] ^ (unsigned short) word;
goto logical_done_nocc;
case 27: /* xoris */
- imm = (unsigned short) instr;
+ imm = (unsigned short) word;
op->val = regs->gpr[rd] ^ (imm << 16);
goto logical_done_nocc;
case 28: /* andi. */
- op->val = regs->gpr[rd] & (unsigned short) instr;
+ op->val = regs->gpr[rd] & (unsigned short) word;
set_cr0(regs, op);
goto logical_done_nocc;
case 29: /* andis. */
- imm = (unsigned short) instr;
+ imm = (unsigned short) word;
op->val = regs->gpr[rd] & (imm << 16);
set_cr0(regs, op);
goto logical_done_nocc;
#ifdef __powerpc64__
case 30: /* rld* */
- mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
+ mb = ((word >> 6) & 0x1f) | (word & 0x20);
val = regs->gpr[rd];
- if ((instr & 0x10) == 0) {
- sh = rb | ((instr & 2) << 4);
+ if ((word & 0x10) == 0) {
+ sh = rb | ((word & 2) << 4);
val = ROTATE(val, sh);
- switch ((instr >> 2) & 3) {
+ switch ((word >> 2) & 3) {
case 0: /* rldicl */
val &= MASK64_L(mb);
break;
@@ -1478,7 +1544,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
} else {
sh = regs->gpr[rb] & 0x3f;
val = ROTATE(val, sh);
- switch ((instr >> 1) & 7) {
+ switch ((word >> 1) & 7) {
case 0: /* rldcl */
op->val = val & MASK64_L(mb);
goto logical_done;
@@ -1493,8 +1559,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 31:
/* isel occupies 32 minor opcodes */
- if (((instr >> 1) & 0x1f) == 15) {
- mb = (instr >> 6) & 0x1f; /* bc field */
+ if (((word >> 1) & 0x1f) == 15) {
+ mb = (word >> 6) & 0x1f; /* bc field */
val = (regs->ccr >> (31 - mb)) & 1;
val2 = (ra) ? regs->gpr[ra] : 0;
@@ -1502,7 +1568,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
goto compute_done;
}
- switch ((instr >> 1) & 0x3ff) {
+ switch ((word >> 1) & 0x3ff) {
case 4: /* tw */
if (rd == 0x1f ||
(rd & trap_compare((int)regs->gpr[ra],
@@ -1536,17 +1602,17 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->reg = rd;
/* only MSR_EE and MSR_RI get changed if bit 15 set */
/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
- imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
+ imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
op->val = imm;
return 0;
#endif
case 19: /* mfcr */
imm = 0xffffffffUL;
- if ((instr >> 20) & 1) {
+ if ((word >> 20) & 1) {
imm = 0xf0000000UL;
for (sh = 0; sh < 8; ++sh) {
- if (instr & (0x80000 >> sh))
+ if (word & (0x80000 >> sh))
break;
imm >>= 4;
}
@@ -1560,7 +1626,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
val = regs->gpr[rd];
op->ccval = regs->ccr;
for (sh = 0; sh < 8; ++sh) {
- if (instr & (0x80000 >> sh))
+ if (word & (0x80000 >> sh))
op->ccval = (op->ccval & ~imm) |
(val & imm);
imm >>= 4;
@@ -1568,7 +1634,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
case 339: /* mfspr */
- spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
+ spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
op->type = MFSPR;
op->reg = rd;
op->spr = spr;
@@ -1578,7 +1644,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 0;
case 467: /* mtspr */
- spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
+ spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
op->type = MTSPR;
op->val = regs->gpr[rd];
op->spr = spr;
@@ -1948,7 +2014,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 826: /* sradi with sh_5 = 0 */
case 827: /* sradi with sh_5 = 1 */
op->type = COMPUTE + SETREG + SETXER;
- sh = rb | ((instr & 2) << 4);
+ sh = rb | ((word & 2) << 4);
ival = (signed long int) regs->gpr[rd];
op->val = ival >> sh;
op->xerval = regs->xer;
@@ -1964,7 +2030,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
if (!cpu_has_feature(CPU_FTR_ARCH_300))
return -1;
op->type = COMPUTE + SETREG;
- sh = rb | ((instr & 2) << 4);
+ sh = rb | ((word & 2) << 4);
val = (signed int) regs->gpr[rd];
if (sh)
op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
@@ -1979,34 +2045,34 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
*/
case 54: /* dcbst */
op->type = MKOP(CACHEOP, DCBST, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
return 0;
case 86: /* dcbf */
op->type = MKOP(CACHEOP, DCBF, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
return 0;
case 246: /* dcbtst */
op->type = MKOP(CACHEOP, DCBTST, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
op->reg = rd;
return 0;
case 278: /* dcbt */
op->type = MKOP(CACHEOP, DCBTST, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
op->reg = rd;
return 0;
case 982: /* icbi */
op->type = MKOP(CACHEOP, ICBI, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
return 0;
case 1014: /* dcbz */
op->type = MKOP(CACHEOP, DCBZ, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
return 0;
}
break;
@@ -2019,14 +2085,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->update_reg = ra;
op->reg = rd;
op->val = regs->gpr[rd];
- u = (instr >> 20) & UPDATE;
+ u = (word >> 20) & UPDATE;
op->vsx_flags = 0;
switch (opcode) {
case 31:
- u = instr & UPDATE;
- op->ea = xform_ea(instr, regs);
- switch ((instr >> 1) & 0x3ff) {
+ u = word & UPDATE;
+ op->ea = xform_ea(word, regs);
+ switch ((word >> 1) & 0x3ff) {
case 20: /* lwarx */
op->type = MKOP(LARX, 0, 4);
break;
@@ -2271,25 +2337,25 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef CONFIG_VSX
case 12: /* lxsiwzx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 4);
op->element_size = 8;
break;
case 76: /* lxsiwax */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
op->element_size = 8;
break;
case 140: /* stxsiwx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 4);
op->element_size = 8;
break;
case 268: /* lxvx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 16;
op->vsx_flags = VSX_CHECK_VEC;
@@ -2298,33 +2364,33 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 269: /* lxvl */
case 301: { /* lxvll */
int nb;
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->ea = ra ? regs->gpr[ra] : 0;
nb = regs->gpr[rb] & 0xff;
if (nb > 16)
nb = 16;
op->type = MKOP(LOAD_VSX, 0, nb);
op->element_size = 16;
- op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
+ op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
VSX_CHECK_VEC;
break;
}
case 332: /* lxvdsx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 8);
op->element_size = 8;
op->vsx_flags = VSX_SPLAT;
break;
case 364: /* lxvwsx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 4);
op->element_size = 4;
op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
break;
case 396: /* stxvx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 16;
op->vsx_flags = VSX_CHECK_VEC;
@@ -2333,118 +2399,118 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 397: /* stxvl */
case 429: { /* stxvll */
int nb;
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->ea = ra ? regs->gpr[ra] : 0;
nb = regs->gpr[rb] & 0xff;
if (nb > 16)
nb = 16;
op->type = MKOP(STORE_VSX, 0, nb);
op->element_size = 16;
- op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
+ op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
VSX_CHECK_VEC;
break;
}
case 524: /* lxsspx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 4);
op->element_size = 8;
op->vsx_flags = VSX_FPCONV;
break;
case 588: /* lxsdx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 8);
op->element_size = 8;
break;
case 652: /* stxsspx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 4);
op->element_size = 8;
op->vsx_flags = VSX_FPCONV;
break;
case 716: /* stxsdx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 8);
op->element_size = 8;
break;
case 780: /* lxvw4x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 4;
break;
case 781: /* lxsibzx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 1);
op->element_size = 8;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 812: /* lxvh8x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 2;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 813: /* lxsihzx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 2);
op->element_size = 8;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 844: /* lxvd2x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 8;
break;
case 876: /* lxvb16x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 1;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 908: /* stxvw4x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 4;
break;
case 909: /* stxsibx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 1);
op->element_size = 8;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 940: /* stxvh8x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 2;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 941: /* stxsihx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 2);
op->element_size = 8;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 972: /* stxvd2x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 8;
break;
case 1004: /* stxvb16x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 1;
op->vsx_flags = VSX_CHECK_VEC;
@@ -2457,80 +2523,80 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 32: /* lwz */
case 33: /* lwzu */
op->type = MKOP(LOAD, u, 4);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 34: /* lbz */
case 35: /* lbzu */
op->type = MKOP(LOAD, u, 1);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 36: /* stw */
case 37: /* stwu */
op->type = MKOP(STORE, u, 4);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 38: /* stb */
case 39: /* stbu */
op->type = MKOP(STORE, u, 1);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 40: /* lhz */
case 41: /* lhzu */
op->type = MKOP(LOAD, u, 2);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 42: /* lha */
case 43: /* lhau */
op->type = MKOP(LOAD, SIGNEXT | u, 2);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 44: /* sth */
case 45: /* sthu */
op->type = MKOP(STORE, u, 2);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 46: /* lmw */
if (ra >= rd)
break; /* invalid form, ra in range to load */
op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 47: /* stmw */
op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
#ifdef CONFIG_PPC_FPU
case 48: /* lfs */
case 49: /* lfsu */
op->type = MKOP(LOAD_FP, u | FPCONV, 4);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 50: /* lfd */
case 51: /* lfdu */
op->type = MKOP(LOAD_FP, u, 8);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 52: /* stfs */
case 53: /* stfsu */
op->type = MKOP(STORE_FP, u | FPCONV, 4);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 54: /* stfd */
case 55: /* stfdu */
op->type = MKOP(STORE_FP, u, 8);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
#endif
@@ -2538,14 +2604,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 56: /* lq */
if (!((rd & 1) || (rd == ra)))
op->type = MKOP(LOAD, 0, 16);
- op->ea = dqform_ea(instr, regs);
+ op->ea = dqform_ea(word, regs);
break;
#endif
#ifdef CONFIG_VSX
case 57: /* lfdp, lxsd, lxssp */
- op->ea = dsform_ea(instr, regs);
- switch (instr & 3) {
+ op->ea = dsform_ea(word, regs);
+ switch (word & 3) {
case 0: /* lfdp */
if (rd & 1)
break; /* reg must be even */
@@ -2569,8 +2635,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef __powerpc64__
case 58: /* ld[u], lwa */
- op->ea = dsform_ea(instr, regs);
- switch (instr & 3) {
+ op->ea = dsform_ea(word, regs);
+ switch (word & 3) {
case 0: /* ld */
op->type = MKOP(LOAD, 0, 8);
break;
@@ -2586,16 +2652,16 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef CONFIG_VSX
case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
- switch (instr & 7) {
+ switch (word & 7) {
case 0: /* stfdp with LSB of DS field = 0 */
case 4: /* stfdp with LSB of DS field = 1 */
- op->ea = dsform_ea(instr, regs);
+ op->ea = dsform_ea(word, regs);
op->type = MKOP(STORE_FP, 0, 16);
break;
case 1: /* lxv */
- op->ea = dqform_ea(instr, regs);
- if (instr & 8)
+ op->ea = dqform_ea(word, regs);
+ if (word & 8)
op->reg = rd + 32;
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 16;
@@ -2604,7 +2670,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 2: /* stxsd with LSB of DS field = 0 */
case 6: /* stxsd with LSB of DS field = 1 */
- op->ea = dsform_ea(instr, regs);
+ op->ea = dsform_ea(word, regs);
op->reg = rd + 32;
op->type = MKOP(STORE_VSX, 0, 8);
op->element_size = 8;
@@ -2613,7 +2679,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 3: /* stxssp with LSB of DS field = 0 */
case 7: /* stxssp with LSB of DS field = 1 */
- op->ea = dsform_ea(instr, regs);
+ op->ea = dsform_ea(word, regs);
op->reg = rd + 32;
op->type = MKOP(STORE_VSX, 0, 4);
op->element_size = 8;
@@ -2621,8 +2687,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 5: /* stxv */
- op->ea = dqform_ea(instr, regs);
- if (instr & 8)
+ op->ea = dqform_ea(word, regs);
+ if (word & 8)
op->reg = rd + 32;
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 16;
@@ -2634,8 +2700,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef __powerpc64__
case 62: /* std[u] */
- op->ea = dsform_ea(instr, regs);
- switch (instr & 3) {
+ op->ea = dsform_ea(word, regs);
+ switch (word & 3) {
case 0: /* std */
op->type = MKOP(STORE, 0, 8);
break;
@@ -2648,6 +2714,124 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
}
break;
+ case 1: /* Prefixed instructions */
+ prefix_r = word & (1ul << 20);
+ ra = (suffix >> 16) & 0x1f;
+ op->update_reg = ra;
+ rd = (suffix >> 21) & 0x1f;
+ op->reg = rd;
+ op->val = regs->gpr[rd];
+
+ suffixopcode = get_op(suffix);
+ prefixtype = (word >> 24) & 0x3;
+ switch (prefixtype) {
+ case 0: /* Type 00 Eight-Byte Load/Store */
+ if (prefix_r && ra)
+ break;
+ op->ea = mlsd_8lsd_ea(word, suffix, regs);
+ switch (suffixopcode) {
+ case 41: /* plwa */
+ op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
+ break;
+ case 42: /* plxsd */
+ op->reg = rd + 32;
+ op->type = MKOP(LOAD_VSX, PREFIXED, 8);
+ op->element_size = 8;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+ case 43: /* plxssp */
+ op->reg = rd + 32;
+ op->type = MKOP(LOAD_VSX, PREFIXED, 4);
+ op->element_size = 8;
+ op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
+ break;
+ case 46: /* pstxsd */
+ op->reg = rd + 32;
+ op->type = MKOP(STORE_VSX, PREFIXED, 8);
+ op->element_size = 8;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+ case 47: /* pstxssp */
+ op->reg = rd + 32;
+ op->type = MKOP(STORE_VSX, PREFIXED, 4);
+ op->element_size = 8;
+ op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
+ break;
+ case 51: /* plxv1 */
+ op->reg += 32;
+ fallthrough;
+ case 50: /* plxv0 */
+ op->type = MKOP(LOAD_VSX, PREFIXED, 16);
+ op->element_size = 16;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+ case 55: /* pstxv1 */
+ op->reg = rd + 32;
+ fallthrough;
+ case 54: /* pstxv0 */
+ op->type = MKOP(STORE_VSX, PREFIXED, 16);
+ op->element_size = 16;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+ case 56: /* plq */
+ op->type = MKOP(LOAD, PREFIXED, 16);
+ break;
+ case 57: /* pld */
+ op->type = MKOP(LOAD, PREFIXED, 8);
+ break;
+ case 60: /* stq */
+ op->type = MKOP(STORE, PREFIXED, 16);
+ break;
+ case 61: /* pstd */
+ op->type = MKOP(STORE, PREFIXED, 8);
+ break;
+ }
+ break;
+ case 1: /* Type 01 Eight-Byte Register-to-Register */
+ break;
+ case 2: /* Type 10 Modified Load/Store */
+ if (prefix_r && ra)
+ break;
+ op->ea = mlsd_8lsd_ea(word, suffix, regs);
+ switch (suffixopcode) {
+ case 32: /* plwz */
+ op->type = MKOP(LOAD, PREFIXED, 4);
+ break;
+ case 34: /* plbz */
+ op->type = MKOP(LOAD, PREFIXED, 1);
+ break;
+ case 36: /* pstw */
+ op->type = MKOP(STORE, PREFIXED, 4);
+ break;
+ case 38: /* pstb */
+ op->type = MKOP(STORE, PREFIXED, 1);
+ break;
+ case 40: /* plhz */
+ op->type = MKOP(LOAD, PREFIXED, 2);
+ break;
+ case 42: /* plha */
+ op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
+ break;
+ case 44: /* psth */
+ op->type = MKOP(STORE, PREFIXED, 2);
+ break;
+ case 48: /* plfs */
+ op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
+ break;
+ case 50: /* plfd */
+ op->type = MKOP(LOAD_FP, PREFIXED, 8);
+ break;
+ case 52: /* pstfs */
+ op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
+ break;
+ case 54: /* pstfd */
+ op->type = MKOP(STORE_FP, PREFIXED, 8);
+ break;
+ }
+ break;
+ case 3: /* Type 11 Modified Register-to-Register */
+ break;
+ }
#endif /* __powerpc64__ */
}
@@ -2663,7 +2847,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 0;
logical_done:
- if (instr & 1)
+ if (word & 1)
set_cr0(regs, op);
logical_done_nocc:
op->reg = ra;
@@ -2671,7 +2855,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
arith_done:
- if (instr & 1)
+ if (word & 1)
set_cr0(regs, op);
compute_done:
op->reg = rd;
@@ -2756,7 +2940,7 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
{
unsigned long next_pc;
- next_pc = truncate_if_32bit(regs->msr, regs->nip + 4);
+ next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
switch (GETTYPE(op->type)) {
case COMPUTE:
if (op->type & SETREG)
@@ -3101,7 +3285,7 @@ NOKPROBE_SYMBOL(emulate_loadstore);
* or -1 if the instruction is one that should not be stepped,
* such as an rfid, or a mtmsrd that would clear MSR_RI.
*/
-int emulate_step(struct pt_regs *regs, unsigned int instr)
+int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
{
struct instruction_op op;
int r, err, type;
@@ -3201,7 +3385,7 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
return 0;
instr_done:
- regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
+ regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type));
return 1;
}
NOKPROBE_SYMBOL(emulate_step);
diff --git a/arch/powerpc/lib/test_code-patching.S b/arch/powerpc/lib/test_code-patching.S
new file mode 100644
index 000000000000..a9be6107844e
--- /dev/null
+++ b/arch/powerpc/lib/test_code-patching.S
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 IBM Corporation
+ */
+#include <asm/ppc-opcode.h>
+
+ .text
+
+#define globl(x) \
+ .globl x; \
+x:
+
+globl(code_patching_test1)
+ nop
+ nop
+globl(end_code_patching_test1)
+
+globl(code_patching_test1_expected)
+ .long OP_PREFIX << 26
+ .long 0x0000000
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
index 53df4146dd32..46af80279ebc 100644
--- a/arch/powerpc/lib/test_emulate_step.c
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -11,6 +11,7 @@
#include <asm/sstep.h>
#include <asm/ppc-opcode.h>
#include <asm/code-patching.h>
+#include <asm/inst.h>
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc)
@@ -19,40 +20,40 @@
* Defined with TEST_ prefix so it does not conflict with other
* definitions.
*/
-#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \
+#define TEST_LD(r, base, i) ppc_inst(PPC_INST_LD | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_DS(i))
-#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \
+#define TEST_LWZ(r, base, i) ppc_inst(PPC_INST_LWZ | ___PPC_RT(r) | \
___PPC_RA(base) | IMM_L(i))
-#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \
+#define TEST_LWZX(t, a, b) ppc_inst(PPC_INST_LWZX | ___PPC_RT(t) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \
+#define TEST_STD(r, base, i) ppc_inst(PPC_INST_STD | ___PPC_RS(r) | \
___PPC_RA(base) | IMM_DS(i))
-#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \
+#define TEST_LDARX(t, a, b, eh) ppc_inst(PPC_INST_LDARX | ___PPC_RT(t) | \
___PPC_RA(a) | ___PPC_RB(b) | \
__PPC_EH(eh))
-#define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \
+#define TEST_STDCX(s, a, b) ppc_inst(PPC_INST_STDCX | ___PPC_RS(s) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \
+#define TEST_LFSX(t, a, b) ppc_inst(PPC_INST_LFSX | ___PPC_RT(t) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \
+#define TEST_STFSX(s, a, b) ppc_inst(PPC_INST_STFSX | ___PPC_RS(s) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \
+#define TEST_LFDX(t, a, b) ppc_inst(PPC_INST_LFDX | ___PPC_RT(t) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \
+#define TEST_STFDX(s, a, b) ppc_inst(PPC_INST_STFDX | ___PPC_RS(s) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \
+#define TEST_LVX(t, a, b) ppc_inst(PPC_INST_LVX | ___PPC_RT(t) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \
+#define TEST_STVX(s, a, b) ppc_inst(PPC_INST_STVX | ___PPC_RS(s) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b))
-#define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b))
-#define TEST_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | \
+#define TEST_LXVD2X(s, a, b) ppc_inst(PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b))
+#define TEST_STXVD2X(s, a, b) ppc_inst(PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b))
+#define TEST_ADD(t, a, b) ppc_inst(PPC_INST_ADD | ___PPC_RT(t) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | \
+#define TEST_ADD_DOT(t, a, b) ppc_inst(PPC_INST_ADD | ___PPC_RT(t) | \
___PPC_RA(a) | ___PPC_RB(b) | 0x1)
-#define TEST_ADDC(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | \
+#define TEST_ADDC(t, a, b) ppc_inst(PPC_INST_ADDC | ___PPC_RT(t) | \
___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_ADDC_DOT(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | \
+#define TEST_ADDC_DOT(t, a, b) ppc_inst(PPC_INST_ADDC | ___PPC_RT(t) | \
___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define MAX_SUBTESTS 16
@@ -461,7 +462,7 @@ struct compute_test {
struct {
char *descr;
unsigned long flags;
- unsigned int instr;
+ struct ppc_inst instr;
struct pt_regs regs;
} subtests[MAX_SUBTESTS + 1];
};
@@ -472,7 +473,7 @@ static struct compute_test compute_tests[] = {
.subtests = {
{
.descr = "R0 = LONG_MAX",
- .instr = PPC_INST_NOP,
+ .instr = ppc_inst(PPC_INST_NOP),
.regs = {
.gpr[0] = LONG_MAX,
}
@@ -842,16 +843,16 @@ static struct compute_test compute_tests[] = {
};
static int __init emulate_compute_instr(struct pt_regs *regs,
- unsigned int instr)
+ struct ppc_inst instr)
{
struct instruction_op op;
- if (!regs || !instr)
+ if (!regs || !ppc_inst_val(instr))
return -EINVAL;
if (analyse_instr(&op, regs, instr) != 1 ||
GETTYPE(op.type) != COMPUTE) {
- pr_info("emulation failed, instruction = 0x%08x\n", instr);
+ pr_info("emulation failed, instruction = 0x%08x\n", ppc_inst_val(instr));
return -EFAULT;
}
@@ -860,18 +861,18 @@ static int __init emulate_compute_instr(struct pt_regs *regs,
}
static int __init execute_compute_instr(struct pt_regs *regs,
- unsigned int instr)
+ struct ppc_inst instr)
{
extern int exec_instr(struct pt_regs *regs);
extern s32 patch__exec_instr;
- if (!regs || !instr)
+ if (!regs || !ppc_inst_val(instr))
return -EINVAL;
/* Patch the NOP with the actual instruction */
patch_instruction_site(&patch__exec_instr, instr);
if (exec_instr(regs)) {
- pr_info("execution failed, instruction = 0x%08x\n", instr);
+ pr_info("execution failed, instruction = 0x%08x\n", ppc_inst_val(instr));
return -EFAULT;
}
@@ -891,7 +892,8 @@ static void __init run_tests_compute(void)
unsigned long flags;
struct compute_test *test;
struct pt_regs *regs, exp, got;
- unsigned int i, j, k, instr;
+ unsigned int i, j, k;
+ struct ppc_inst instr;
bool ignore_gpr, ignore_xer, ignore_ccr, passed;
for (i = 0; i < ARRAY_SIZE(compute_tests); i++) {
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 877d880890fe..923ad8f374eb 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -14,9 +14,9 @@
* hash table, so this file is not used on them.)
*/
+#include <linux/pgtable.h>
#include <asm/reg.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
#include <asm/thread_info.h>
@@ -81,7 +81,7 @@ _GLOBAL(hash_page)
rlwinm. r8,r8,0,0,20 /* extract pt base address */
#endif
#ifdef CONFIG_SMP
- beq- hash_page_out /* return if no mapping */
+ beq- .Lhash_page_out /* return if no mapping */
#else
/* XXX it seems like the 601 will give a machine fault on the
rfi if its alignment is wrong (bottom 4 bits of address are
@@ -109,11 +109,11 @@ _GLOBAL(hash_page)
#if (PTE_FLAGS_OFFSET != 0)
addi r8,r8,PTE_FLAGS_OFFSET
#endif
-retry:
+.Lretry:
lwarx r6,0,r8 /* get linux-style pte, flag word */
andc. r5,r3,r6 /* check access & ~permission */
#ifdef CONFIG_SMP
- bne- hash_page_out /* return if access not permitted */
+ bne- .Lhash_page_out /* return if access not permitted */
#else
bnelr-
#endif
@@ -128,7 +128,7 @@ retry:
#endif /* CONFIG_SMP */
#endif /* CONFIG_PTE_64BIT */
stwcx. r5,0,r8 /* attempt to update PTE */
- bne- retry /* retry if someone got there first */
+ bne- .Lretry /* retry if someone got there first */
mfsrin r3,r4 /* get segment reg for segment */
#ifndef CONFIG_VMAP_STACK
@@ -156,13 +156,14 @@ retry:
#endif
#ifdef CONFIG_SMP
-hash_page_out:
+.Lhash_page_out:
eieio
lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
li r0,0
stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
blr
#endif /* CONFIG_SMP */
+_ASM_NOKPROBE_SYMBOL(hash_page)
/*
* Add an entry for a particular page to the hash table.
@@ -267,6 +268,7 @@ _GLOBAL(add_hash_page)
lwz r0,4(r1)
mtlr r0
blr
+_ASM_NOKPROBE_SYMBOL(add_hash_page)
/*
* This routine adds a hardware PTE to the hash table.
@@ -360,7 +362,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
CMPPTE 0,r6,r5
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
- beq+ found_slot
+ beq+ .Lfound_slot
patch_site 0f, patch__hash_page_B
/* Search the secondary PTEG for a matching PTE */
@@ -372,7 +374,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
2: LDPTEu r6,HPTE_SIZE(r4)
CMPPTE 0,r6,r5
bdnzf 2,2b
- beq+ found_slot
+ beq+ .Lfound_slot
xori r5,r5,PTE_H /* clear H bit again */
/* Search the primary PTEG for an empty slot */
@@ -381,7 +383,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
TST_V(r6) /* test valid bit */
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
- beq+ found_empty
+ beq+ .Lfound_empty
/* update counter of times that the primary PTEG is full */
lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
@@ -399,7 +401,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
2: LDPTEu r6,HPTE_SIZE(r4)
TST_V(r6)
bdnzf 2,2b
- beq+ found_empty
+ beq+ .Lfound_empty
xori r5,r5,PTE_H /* clear H bit again */
/*
@@ -437,9 +439,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
#ifndef CONFIG_SMP
/* Store PTE in PTEG */
-found_empty:
+.Lfound_empty:
STPTE r5,0(r4)
-found_slot:
+.Lfound_slot:
STPTE r8,HPTE_SIZE/2(r4)
#else /* CONFIG_SMP */
@@ -460,8 +462,8 @@ found_slot:
* We do however have to make sure that the PTE is never in an invalid
* state with the V bit set.
*/
-found_empty:
-found_slot:
+.Lfound_empty:
+.Lfound_slot:
CLR_V(r5,r0) /* clear V (valid) bit in PTE */
STPTE r5,0(r4)
sync
@@ -474,6 +476,7 @@ found_slot:
sync /* make sure pte updates get to memory */
blr
+_ASM_NOKPROBE_SYMBOL(create_hpte)
.section .bss
.align 2
@@ -630,6 +633,7 @@ _GLOBAL(flush_hash_pages)
isync
blr
EXPORT_SYMBOL(flush_hash_pages)
+_ASM_NOKPROBE_SYMBOL(flush_hash_pages)
/*
* Flush an entry from the TLB
@@ -667,6 +671,7 @@ _GLOBAL(_tlbie)
sync
#endif /* CONFIG_SMP */
blr
+_ASM_NOKPROBE_SYMBOL(_tlbie)
/*
* Flush the entire TLB. 603/603e only
@@ -708,3 +713,4 @@ _GLOBAL(_tlbia)
isync
#endif /* CONFIG_SMP */
blr
+_ASM_NOKPROBE_SYMBOL(_tlbia)
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 39ba53ca5bb5..03b6ba54460e 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -170,6 +170,12 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
pr_debug("RAM mapped without BATs\n");
return base;
}
+ if (debug_pagealloc_enabled()) {
+ if (base >= border)
+ return base;
+ if (top >= border)
+ top = border;
+ }
if (!strict_kernel_rwx_enabled() || base >= border || top <= border)
return __mmu_mapin_ram(base, top);
@@ -187,6 +193,7 @@ void mmu_mark_initmem_nx(void)
int i;
unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
unsigned long top = (unsigned long)_etext - PAGE_OFFSET;
+ unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
unsigned long size;
if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
@@ -201,9 +208,10 @@ void mmu_mark_initmem_nx(void)
size = block_size(base, top);
size = max(size, 128UL << 10);
if ((top - base) > size) {
- if (strict_kernel_rwx_enabled())
- pr_warn("Kernel _etext not properly aligned\n");
size <<= 1;
+ if (strict_kernel_rwx_enabled() && base + size > border)
+ pr_warn("Some RW data is getting mapped X. "
+ "Adjust CONFIG_DATA_SHIFT to avoid that.\n");
}
setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
base += size;
@@ -312,7 +320,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea)
if (!Hash)
return;
- pmd = pmd_ptr(mm, ea);
+ pmd = pmd_off(mm, ea);
if (!pmd_none(*pmd))
add_hash_page(mm->context.id, ea, pmd_val(*pmd));
}
diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c
index dc9039a170aa..b6c7427daa6f 100644
--- a/arch/powerpc/mm/book3s32/tlb.c
+++ b/arch/powerpc/mm/book3s32/tlb.c
@@ -90,7 +90,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
if (start >= end)
return;
end = (end - 1) | ~PAGE_MASK;
- pmd = pmd_ptr(mm, start);
+ pmd = pmd_off(mm, start);
for (;;) {
pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
if (pmd_end > end)
@@ -129,7 +129,7 @@ void flush_tlb_mm(struct mm_struct *mm)
/*
* It is safe to go down the mm's list of vmas when called
- * from dup_mmap, holding mmap_sem. It would also be safe from
+ * from dup_mmap, holding mmap_lock. It would also be safe from
* unmap_region or exit_mmap, but not from vmtruncate on SMP -
* but it seems dup_mmap is the only SMP case which gets here.
*/
@@ -148,7 +148,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
return;
}
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
- pmd = pmd_ptr(mm, vmaddr);
+ pmd = pmd_off(mm, vmaddr);
if (!pmd_none(*pmd))
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
}
diff --git a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
index eefa89c6117b..25acb9c5ee1b 100644
--- a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
@@ -10,7 +10,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index d2d8237ea9d5..cf20e5229ce1 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -14,11 +14,11 @@
#include <linux/processor.h>
#include <linux/threads.h>
#include <linux/smp.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/trace.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index 64733b9cb20a..2a99167afbaf 100644
--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -10,7 +10,6 @@
#include <linux/mm.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/mmu.h>
#include <asm/tlb.h>
@@ -148,6 +147,7 @@ void hash__vmemmap_remove_mapping(unsigned long start,
int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -155,7 +155,8 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
if (slab_is_available()) {
pgdp = pgd_offset_k(ea);
- pudp = pud_alloc(&init_mm, pgdp, ea);
+ p4dp = p4d_offset(pgdp, ea);
+ pudp = pud_alloc(&init_mm, p4dp, ea);
if (!pudp)
return -ENOMEM;
pmdp = pmd_alloc(&init_mm, pudp, ea);
@@ -236,7 +237,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
* to hugepage, we first clear the pmd, then invalidate all
* the PTE entries. The assumption here is that any low level
* page fault will see a none pmd and take the slow path that
- * will wait on mmap_sem. But we could very well be in a
+ * will wait on mmap_lock. But we could very well be in a
* hash_page with local ptep pointer value. Such a hash page
* can result in adding new HPTE entries for normal subpages.
* That means we could be modifying the page content as we
@@ -250,7 +251,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
* Now invalidate the hpte entries in the range
* covered by pmd. This make sure we take a
* fault and will find the pmd as none, which will
- * result in a major fault which takes mmap_sem and
+ * result in a major fault which takes mmap_lock and
* hence wait for collapse to complete. Without this
* the __collapse_huge_page_copy can result in copying
* the old content.
@@ -363,17 +364,6 @@ pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
* hash fault look at them.
*/
memset(pgtable, 0, PTE_FRAG_SIZE);
- /*
- * Serialize against find_current_mm_pte variants which does lock-less
- * lookup in page tables with local interrupts disabled. For huge pages
- * it casts pmd_t to pte_t. Since format of pte_t is different from
- * pmd_t we want to prevent transit from pmd pointing to page table
- * to pmd pointing to huge page (and back) while interrupts are disabled.
- * We clear pmd to possibly replace it with page table pointer in
- * different code paths. So make sure we wait for the parallel
- * find_curren_mm_pte to finish.
- */
- serialize_against_pte_lookup(mm);
return old_pmd;
}
diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c
index 4a70d8dd39cd..0fbf3dc9f2c2 100644
--- a/arch/powerpc/mm/book3s64/hash_tlb.c
+++ b/arch/powerpc/mm/book3s64/hash_tlb.c
@@ -176,7 +176,6 @@ void hash__tlb_flush(struct mmu_gather *tlb)
* from the hash table (and the TLB). But keeps
* the linux PTEs intact.
*
- * @mm : mm_struct of the target address space (generally init_mm)
* @start : starting address
* @end : ending address (not included in the flush)
*
@@ -189,17 +188,14 @@ void hash__tlb_flush(struct mmu_gather *tlb)
* Because of that usage pattern, it is implemented for small size rather
* than speed.
*/
-void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+void __flush_hash_table_range(unsigned long start, unsigned long end)
{
- bool is_thp;
int hugepage_shift;
unsigned long flags;
- start = _ALIGN_DOWN(start, PAGE_SIZE);
- end = _ALIGN_UP(end, PAGE_SIZE);
+ start = ALIGN_DOWN(start, PAGE_SIZE);
+ end = ALIGN(end, PAGE_SIZE);
- BUG_ON(!mm->pgd);
/*
* Note: Normally, we should only ever use a batch within a
@@ -212,21 +208,15 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
local_irq_save(flags);
arch_enter_lazy_mmu_mode();
for (; start < end; start += PAGE_SIZE) {
- pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
- &hugepage_shift);
+ pte_t *ptep = find_init_mm_pte(start, &hugepage_shift);
unsigned long pte;
if (ptep == NULL)
continue;
pte = pte_val(*ptep);
- if (is_thp)
- trace_hugepage_invalidate(start, pte);
if (!(pte & H_PAGE_HASHPTE))
continue;
- if (unlikely(is_thp))
- hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
- else
- hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
+ hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift);
}
arch_leave_lazy_mmu_mode();
local_irq_restore(flags);
@@ -238,7 +228,7 @@ void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
pte_t *start_pte;
unsigned long flags;
- addr = _ALIGN_DOWN(addr, PMD_SIZE);
+ addr = ALIGN_DOWN(addr, PMD_SIZE);
/*
* Note: Normally, we should only ever use a batch within a
* PTE locked section. This violates the rule, but will work
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 8ed2411c3f39..468169e33c86 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -35,10 +35,10 @@
#include <linux/pkeys.h>
#include <linux/hugetlb.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <asm/debugfs.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
@@ -66,6 +66,9 @@
#include <mm/mmu_decl.h>
+#include "internal.h"
+
+
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
@@ -870,6 +873,9 @@ static void __init htab_initialize(void)
printk(KERN_INFO "Using 1TB segments\n");
}
+ if (stress_slb_enabled)
+ static_branch_enable(&stress_slb_key);
+
/*
* Calculate the required size of the htab. We want the number of
* PTEGs to equal one half the number of real pages.
@@ -1350,8 +1356,15 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
goto bail;
}
- /* Add _PAGE_PRESENT to the required access perm */
- access |= _PAGE_PRESENT;
+ /*
+ * Add _PAGE_PRESENT to the required access perm. If there are parallel
+ * updates to the pte that can possibly clear _PAGE_PTE, catch that too.
+ *
+ * We can safely use the return pte address in rest of the function
+ * because we do set H_PAGE_BUSY which prevents further updates to pte
+ * from generic code.
+ */
+ access |= _PAGE_PRESENT | _PAGE_PTE;
/*
* Pre-check access permissions (will be re-checked atomically
@@ -1539,14 +1552,11 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
}
#endif
-static void hash_preload(struct mm_struct *mm, unsigned long ea,
+static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
bool is_exec, unsigned long trap)
{
- int hugepage_shift;
unsigned long vsid;
pgd_t *pgdir;
- pte_t *ptep;
- unsigned long flags;
int rc, ssize, update_flags = 0;
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
@@ -1568,30 +1578,18 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
vsid = get_user_vsid(&mm->context, ea, ssize);
if (!vsid)
return;
- /*
- * Hash doesn't like irqs. Walking linux page table with irq disabled
- * saves us from holding multiple locks.
- */
- local_irq_save(flags);
- /*
- * THP pages use update_mmu_cache_pmd. We don't do
- * hash preload there. Hence can ignore THP here
- */
- ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift);
- if (!ptep)
- goto out_exit;
-
- WARN_ON(hugepage_shift);
#ifdef CONFIG_PPC_64K_PAGES
/* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on
* a 64K kernel), then we don't preload, hash_page() will take
* care of it once we actually try to access the page.
* That way we don't have to duplicate all of the logic for segment
* page size demotion here
+ * Called with PTL held, hence can be sure the value won't change in
+ * between.
*/
if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep))
- goto out_exit;
+ return;
#endif /* CONFIG_PPC_64K_PAGES */
/* Is that local to this CPU ? */
@@ -1616,8 +1614,6 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
mm_ctx_user_psize(&mm->context),
mm_ctx_user_psize(&mm->context),
pte_val(*ptep));
-out_exit:
- local_irq_restore(flags);
}
/*
@@ -1638,10 +1634,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
unsigned long trap;
bool is_exec;
- if (radix_enabled()) {
- prefetch((void *)address);
+ if (radix_enabled())
return;
- }
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(*ptep) || address >= TASK_SIZE)
@@ -1668,32 +1662,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
return;
}
- hash_preload(vma->vm_mm, address, is_exec, trap);
-}
-
-#ifdef CONFIG_PPC_MEM_KEYS
-/*
- * Return the protection key associated with the given address and the
- * mm_struct.
- */
-u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address)
-{
- pte_t *ptep;
- u16 pkey = 0;
- unsigned long flags;
-
- if (!mm || !mm->pgd)
- return 0;
-
- local_irq_save(flags);
- ptep = find_linux_pte(mm->pgd, address, NULL, NULL);
- if (ptep)
- pkey = pte_to_pkey_bits(pte_val(READ_ONCE(*ptep)));
- local_irq_restore(flags);
-
- return pkey;
+ hash_preload(vma->vm_mm, ptep, address, is_exec, trap);
}
-#endif /* CONFIG_PPC_MEM_KEYS */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline void tm_flush_hash_page(int local)
diff --git a/arch/powerpc/mm/book3s64/internal.h b/arch/powerpc/mm/book3s64/internal.h
new file mode 100644
index 000000000000..7eda0d30d765
--- /dev/null
+++ b/arch/powerpc/mm/book3s64/internal.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H
+#define ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H
+
+#include <linux/jump_label.h>
+
+extern bool stress_slb_enabled;
+
+DECLARE_STATIC_KEY_FALSE(stress_slb_key);
+
+static inline bool stress_slb(void)
+{
+ return static_branch_unlikely(&stress_slb_key);
+}
+
+#endif /* ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H */
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index fa05bbd1f682..563faa10bb66 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -96,7 +96,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
goto unlock_exit;
}
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
sizeof(struct vm_area_struct *);
chunk = min(chunk, entries);
@@ -114,7 +114,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
pinned += ret;
break;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (pinned != entries) {
if (!ret)
ret = -EFAULT;
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index e0bb69c616e4..c58ad1049909 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -109,15 +109,25 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ return __pmd(old_pmd);
+}
+
+pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp, int full)
+{
+ pmd_t pmd;
+ VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
+ VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
+ !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
+ pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
/*
- * This ensures that generic code that rely on IRQ disabling
- * to prevent a parallel THP split work as expected.
- *
- * Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires
- * a special case check in pmd_access_permitted.
+ * if it not a fullmm flush, then we can possibly end up converting
+ * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
+ * Make sure we flush the tlb in this case.
*/
- serialize_against_pte_lookup(vma->vm_mm);
- return __pmd(old_pmd);
+ if (!full)
+ flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
+ return pmd;
}
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
@@ -146,19 +156,6 @@ pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
pmdv &= _HPAGE_CHG_MASK;
return pmd_set_protbits(__pmd(pmdv), newprot);
}
-
-/*
- * This is called at the end of handling a user page fault, when the
- * fault has been handled by updating a HUGE PMD entry in the linux page tables.
- * We use it to preload an HPTE into the hash table corresponding to
- * the updated linux HUGE PMD entry.
- */
-void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd)
-{
- if (radix_enabled())
- prefetch((void *)addr);
-}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/* For use by kexec */
diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
index cab06331c0c0..c812b401b66c 100644
--- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
@@ -2,7 +2,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/security.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 8f9edf07063a..bb00e0cba119 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -17,7 +17,6 @@
#include <linux/string_helpers.h>
#include <linux/stop_machine.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/dma.h>
@@ -65,17 +64,19 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
{
unsigned long pfn = pa >> PAGE_SHIFT;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
pgdp = pgd_offset_k(ea);
- if (pgd_none(*pgdp)) {
+ p4dp = p4d_offset(pgdp, ea);
+ if (p4d_none(*p4dp)) {
pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
region_start, region_end);
- pgd_populate(&init_mm, pgdp, pudp);
+ p4d_populate(&init_mm, p4dp, pudp);
}
- pudp = pud_offset(pgdp, ea);
+ pudp = pud_offset(p4dp, ea);
if (map_page_size == PUD_SIZE) {
ptep = (pte_t *)pudp;
goto set_the_pte;
@@ -115,6 +116,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
{
unsigned long pfn = pa >> PAGE_SHIFT;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -137,7 +139,8 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
* boot.
*/
pgdp = pgd_offset_k(ea);
- pudp = pud_alloc(&init_mm, pgdp, ea);
+ p4dp = p4d_offset(pgdp, ea);
+ pudp = pud_alloc(&init_mm, p4dp, ea);
if (!pudp)
return -ENOMEM;
if (map_page_size == PUD_SIZE) {
@@ -174,6 +177,7 @@ void radix__change_memory_range(unsigned long start, unsigned long end,
{
unsigned long idx;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -186,7 +190,8 @@ void radix__change_memory_range(unsigned long start, unsigned long end,
for (idx = start; idx < end; idx += PAGE_SIZE) {
pgdp = pgd_offset_k(idx);
- pudp = pud_alloc(&init_mm, pgdp, idx);
+ p4dp = p4d_offset(pgdp, idx);
+ pudp = pud_alloc(&init_mm, p4dp, idx);
if (!pudp)
continue;
if (pud_is_leaf(*pudp)) {
@@ -261,7 +266,7 @@ static int __meminit create_physical_mapping(unsigned long start,
pgprot_t prot;
int psize;
- start = _ALIGN_UP(start, PAGE_SIZE);
+ start = ALIGN(start, PAGE_SIZE);
for (addr = start; addr < end; addr += mapping_size) {
unsigned long gap, previous_size;
int rc;
@@ -850,6 +855,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
unsigned long addr, next;
pud_t *pud_base;
pgd_t *pgd;
+ p4d_t *p4d;
spin_lock(&init_mm.page_table_lock);
@@ -857,15 +863,16 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
next = pgd_addr_end(addr, end);
pgd = pgd_offset_k(addr);
- if (!pgd_present(*pgd))
+ p4d = p4d_offset(pgd, addr);
+ if (!p4d_present(*p4d))
continue;
- if (pgd_is_leaf(*pgd)) {
- split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
+ if (p4d_is_leaf(*p4d)) {
+ split_kernel_mapping(addr, end, P4D_SIZE, (pte_t *)p4d);
continue;
}
- pud_base = (pud_t *)pgd_page_vaddr(*pgd);
+ pud_base = (pud_t *)p4d_page_vaddr(*p4d);
remove_pud_table(pud_base, addr, next);
}
@@ -962,7 +969,13 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
pmd = *pmdp;
pmd_clear(pmdp);
- /*FIXME!! Verify whether we need this kick below */
+ /*
+ * pmdp collapse_flush need to ensure that there are no parallel gup
+ * walk after this call. This is needed so that we can have stable
+ * page ref count when collapsing a page. We don't allow a collapse page
+ * if we have gup taken on the page. We can ensure that by sending IPI
+ * because gup walk happens with IRQ disabled.
+ */
serialize_against_pte_lookup(vma->vm_mm);
radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
@@ -1023,17 +1036,6 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
old_pmd = __pmd(old);
- /*
- * Serialize against find_current_mm_pte which does lock-less
- * lookup in page tables with local interrupts disabled. For huge pages
- * it casts pmd_t to pte_t. Since format of pte_t is different from
- * pmd_t we want to prevent transit from pmd pointing to page table
- * to pmd pointing to huge page (and back) while interrupts are disabled.
- * We clear pmd to possibly replace it with page table pointer in
- * different code paths. So make sure we wait for the parallel
- * find_current_mm_pte to finish.
- */
- serialize_against_pte_lookup(mm);
return old_pmd;
}
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 758ade2c2b6e..b5cc9b23cf02 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -884,9 +884,7 @@ is_local:
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
hstart = (start + PMD_SIZE - 1) & PMD_MASK;
hend = end & PMD_MASK;
- if (hstart == hend)
- hflush = false;
- else
+ if (hstart < hend)
hflush = true;
}
diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c
index 716204aee3da..156c38f89511 100644
--- a/arch/powerpc/mm/book3s64/slb.c
+++ b/arch/powerpc/mm/book3s64/slb.c
@@ -10,7 +10,6 @@
*/
#include <asm/asm-prototypes.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
@@ -21,10 +20,14 @@
#include <linux/compiler.h>
#include <linux/context_tracking.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/udbg.h>
#include <asm/code-patching.h>
+#include "internal.h"
+
+
enum slb_index {
LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
KSTACK_INDEX = 1, /* Kernel stack map */
@@ -54,6 +57,17 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
}
+bool stress_slb_enabled __initdata;
+
+static int __init parse_stress_slb(char *p)
+{
+ stress_slb_enabled = true;
+ return 0;
+}
+early_param("stress_slb", parse_stress_slb);
+
+__ro_after_init DEFINE_STATIC_KEY_FALSE(stress_slb_key);
+
static void assert_slb_presence(bool present, unsigned long ea)
{
#ifdef CONFIG_DEBUG_VM
@@ -68,7 +82,7 @@ static void assert_slb_presence(bool present, unsigned long ea)
* slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware
* ignores all other bits from 0-27, so just clear them all.
*/
- ea &= ~((1UL << 28) - 1);
+ ea &= ~((1UL << SID_SHIFT) - 1);
asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
WARN_ON(present == (tmp == 0));
@@ -153,14 +167,42 @@ void slb_flush_all_realmode(void)
asm volatile("slbmte %0,%0; slbia" : : "r" (0));
}
+static __always_inline void __slb_flush_and_restore_bolted(bool preserve_kernel_lookaside)
+{
+ struct slb_shadow *p = get_slb_shadow();
+ unsigned long ksp_esid_data, ksp_vsid_data;
+ u32 ih;
+
+ /*
+ * SLBIA IH=1 on ISA v2.05 and newer processors may preserve lookaside
+ * information created with Class=0 entries, which we use for kernel
+ * SLB entries (the SLB entries themselves are still invalidated).
+ *
+ * Older processors will ignore this optimisation. Over-invalidation
+ * is fine because we never rely on lookaside information existing.
+ */
+ if (preserve_kernel_lookaside)
+ ih = 1;
+ else
+ ih = 0;
+
+ ksp_esid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
+ ksp_vsid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
+
+ asm volatile(PPC_SLBIA(%0)" \n"
+ "slbmte %1, %2 \n"
+ :: "i" (ih),
+ "r" (ksp_vsid_data),
+ "r" (ksp_esid_data)
+ : "memory");
+}
+
/*
* This flushes non-bolted entries, it can be run in virtual mode. Must
* be called with interrupts disabled.
*/
void slb_flush_and_restore_bolted(void)
{
- struct slb_shadow *p = get_slb_shadow();
-
BUILD_BUG_ON(SLB_NUM_BOLTED != 2);
WARN_ON(!irqs_disabled());
@@ -171,13 +213,10 @@ void slb_flush_and_restore_bolted(void)
*/
hard_irq_disable();
- asm volatile("isync\n"
- "slbia\n"
- "slbmte %0, %1\n"
- "isync\n"
- :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
- "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
- : "memory");
+ isync();
+ __slb_flush_and_restore_bolted(false);
+ isync();
+
assert_slb_presence(true, get_paca()->kstack);
get_paca()->slb_cache_ptr = 0;
@@ -400,6 +439,30 @@ void preload_new_slb_context(unsigned long start, unsigned long sp)
local_irq_enable();
}
+static void slb_cache_slbie_kernel(unsigned int index)
+{
+ unsigned long slbie_data = get_paca()->slb_cache[index];
+ unsigned long ksp = get_paca()->kstack;
+
+ slbie_data <<= SID_SHIFT;
+ slbie_data |= 0xc000000000000000ULL;
+ if ((ksp & slb_esid_mask(mmu_kernel_ssize)) == slbie_data)
+ return;
+ slbie_data |= mmu_kernel_ssize << SLBIE_SSIZE_SHIFT;
+
+ asm volatile("slbie %0" : : "r" (slbie_data));
+}
+
+static void slb_cache_slbie_user(unsigned int index)
+{
+ unsigned long slbie_data = get_paca()->slb_cache[index];
+
+ slbie_data <<= SID_SHIFT;
+ slbie_data |= user_segment_size(slbie_data) << SLBIE_SSIZE_SHIFT;
+ slbie_data |= SLBIE_C; /* user slbs have C=1 */
+
+ asm volatile("slbie %0" : : "r" (slbie_data));
+}
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
@@ -414,8 +477,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
* which would update the slb_cache/slb_cache_ptr fields in the PACA.
*/
hard_irq_disable();
- asm volatile("isync" : : : "memory");
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ isync();
+ if (stress_slb()) {
+ __slb_flush_and_restore_bolted(false);
+ isync();
+ get_paca()->slb_cache_ptr = 0;
+ get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
+
+ } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
* SLBIA IH=3 invalidates all Class=1 SLBEs and their
* associated lookaside structures, which matches what
@@ -423,47 +492,29 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
* cache.
*/
asm volatile(PPC_SLBIA(3));
+
} else {
unsigned long offset = get_paca()->slb_cache_ptr;
if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
offset <= SLB_CACHE_ENTRIES) {
- unsigned long slbie_data = 0;
-
- for (i = 0; i < offset; i++) {
- unsigned long ea;
-
- ea = (unsigned long)
- get_paca()->slb_cache[i] << SID_SHIFT;
- /*
- * Could assert_slb_presence(true) here, but
- * hypervisor or machine check could have come
- * in and removed the entry at this point.
- */
-
- slbie_data = ea;
- slbie_data |= user_segment_size(slbie_data)
- << SLBIE_SSIZE_SHIFT;
- slbie_data |= SLBIE_C; /* user slbs have C=1 */
- asm volatile("slbie %0" : : "r" (slbie_data));
- }
+ /*
+ * Could assert_slb_presence(true) here, but
+ * hypervisor or machine check could have come
+ * in and removed the entry at this point.
+ */
+
+ for (i = 0; i < offset; i++)
+ slb_cache_slbie_user(i);
/* Workaround POWER5 < DD2.1 issue */
if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1)
- asm volatile("slbie %0" : : "r" (slbie_data));
+ slb_cache_slbie_user(0);
} else {
- struct slb_shadow *p = get_slb_shadow();
- unsigned long ksp_esid_data =
- be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
- unsigned long ksp_vsid_data =
- be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
-
- asm volatile(PPC_SLBIA(1) "\n"
- "slbmte %0,%1\n"
- "isync"
- :: "r"(ksp_vsid_data),
- "r"(ksp_esid_data));
+ /* Flush but retain kernel lookaside information */
+ __slb_flush_and_restore_bolted(true);
+ isync();
get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
}
@@ -503,7 +554,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
* address accesses by the kernel (user mode won't happen until
* rfid, which is safe).
*/
- asm volatile("isync" : : : "memory");
+ isync();
}
void slb_set_size(u16 size)
@@ -571,6 +622,9 @@ static void slb_cache_update(unsigned long esid_data)
if (cpu_has_feature(CPU_FTR_ARCH_300))
return; /* ISAv3.0B and later does not use slb_cache */
+ if (stress_slb())
+ return;
+
/*
* Now update slb cache entries
*/
@@ -580,7 +634,7 @@ static void slb_cache_update(unsigned long esid_data)
* We have space in slb cache for optimized switch_slb().
* Top 36 bits from esid_data as per ISA
*/
- local_paca->slb_cache[slb_cache_index++] = esid_data >> 28;
+ local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
local_paca->slb_cache_ptr++;
} else {
/*
@@ -671,6 +725,28 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
* accesses user memory before it returns to userspace with rfid.
*/
assert_slb_presence(false, ea);
+ if (stress_slb()) {
+ int slb_cache_index = local_paca->slb_cache_ptr;
+
+ /*
+ * stress_slb() does not use slb cache, repurpose as a
+ * cache of inserted (non-bolted) kernel SLB entries. All
+ * non-bolted kernel entries are flushed on any user fault,
+ * or if there are already 3 non-boled kernel entries.
+ */
+ BUILD_BUG_ON(SLB_CACHE_ENTRIES < 3);
+ if (!kernel || slb_cache_index == 3) {
+ int i;
+
+ for (i = 0; i < slb_cache_index; i++)
+ slb_cache_slbie_kernel(i);
+ slb_cache_index = 0;
+ }
+
+ if (kernel)
+ local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
+ local_paca->slb_cache_ptr = slb_cache_index;
+ }
asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
barrier();
diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c
index 2ef24a53f4c9..60c6ea16a972 100644
--- a/arch/powerpc/mm/book3s64/subpage_prot.c
+++ b/arch/powerpc/mm/book3s64/subpage_prot.c
@@ -11,7 +11,7 @@
#include <linux/hugetlb.h>
#include <linux/syscalls.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/uaccess.h>
/*
@@ -54,15 +54,17 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
int npages)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
pgd = pgd_offset(mm, addr);
- if (pgd_none(*pgd))
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d))
return;
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
if (pud_none(*pud))
return;
pmd = pmd_offset(pud, addr);
@@ -92,7 +94,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
size_t nw;
unsigned long next, limit;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
spt = mm_ctx_subpage_prot(&mm->context);
if (!spt)
@@ -127,7 +129,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
}
err_out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -217,13 +219,13 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
return -EFAULT;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
spt = mm_ctx_subpage_prot(&mm->context);
if (!spt) {
/*
* Allocate subpage prot table if not already done.
- * Do this with mmap_sem held
+ * Do this with mmap_lock held
*/
spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
if (!spt) {
@@ -267,11 +269,11 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
if (addr + (nw << PAGE_SHIFT) > next)
nw = (next - addr) >> PAGE_SHIFT;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (__copy_from_user(spp, map, nw * sizeof(u32)))
return -EFAULT;
map += nw;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
/* now flush any existing HPTEs for the range */
hpte_flush_range(mm, addr, nw);
@@ -280,6 +282,6 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
spt->maxaddr = limit;
err = 0;
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return err;
}
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index beb060b96632..b83abbead4a2 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -33,7 +33,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (mm->pgd == NULL)
return -EFAULT;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = -EFAULT;
vma = find_vma(mm, ea);
if (!vma)
@@ -82,7 +82,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
current->min_flt++;
out_unlock:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return ret;
}
EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 84af6c8eecf7..641fc5f3d7dd 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -35,24 +35,24 @@
#include <asm/firmware.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/siginfo.h>
#include <asm/debug.h>
#include <asm/kup.h>
+#include <asm/inst.h>
/*
* Check whether the instruction inst is a store using
* an update addressing form which will update r1.
*/
-static bool store_updates_sp(unsigned int inst)
+static bool store_updates_sp(struct ppc_inst inst)
{
/* check for 1 in the rA field */
- if (((inst >> 16) & 0x1f) != 1)
+ if (((ppc_inst_val(inst) >> 16) & 0x1f) != 1)
return false;
/* check major opcode */
- switch (inst >> 26) {
+ switch (ppc_inst_primary_opcode(inst)) {
case OP_STWU:
case OP_STBU:
case OP_STHU:
@@ -60,10 +60,10 @@ static bool store_updates_sp(unsigned int inst)
case OP_STFDU:
return true;
case OP_STD: /* std or stdu */
- return (inst & 3) == 1;
+ return (ppc_inst_val(inst) & 3) == 1;
case OP_31:
/* check minor opcode */
- switch ((inst >> 1) & 0x3ff) {
+ switch ((ppc_inst_val(inst) >> 1) & 0x3ff) {
case OP_31_XOP_STDUX:
case OP_31_XOP_STWUX:
case OP_31_XOP_STBUX:
@@ -108,7 +108,7 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return __bad_area_nosemaphore(regs, address, si_code);
}
@@ -118,9 +118,34 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
return __bad_area(regs, address, SEGV_MAPERR);
}
-static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address,
- int pkey)
+#ifdef CONFIG_PPC_MEM_KEYS
+static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
+ struct vm_area_struct *vma)
{
+ struct mm_struct *mm = current->mm;
+ int pkey;
+
+ /*
+ * We don't try to fetch the pkey from page table because reading
+ * page table without locking doesn't guarantee stable pte value.
+ * Hence the pkey value that we return to userspace can be different
+ * from the pkey that actually caused access error.
+ *
+ * It does *not* guarantee that the VMA we find here
+ * was the one that we faulted on.
+ *
+ * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
+ * 2. T1 : set AMR to deny access to pkey=4, touches, page
+ * 3. T1 : faults...
+ * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
+ * 5. T1 : enters fault handler, takes mmap_lock, etc...
+ * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
+ * faulted on a pte with its pkey=4.
+ */
+ pkey = vma_pkey(vma);
+
+ mmap_read_unlock(mm);
+
/*
* If we are in kernel mode, bail out with a SEGV, this will
* be caught by the assembly which will restore the non-volatile
@@ -133,6 +158,7 @@ static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address,
return 0;
}
+#endif
static noinline int bad_access(struct pt_regs *regs, unsigned long address)
{
@@ -255,7 +281,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
* expand to 1MB without further checks.
*/
if (address + 0x100000 < vma->vm_end) {
- unsigned int __user *nip = (unsigned int __user *)regs->nip;
+ struct ppc_inst __user *nip = (struct ppc_inst __user *)regs->nip;
/* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs;
if (uregs == NULL)
@@ -278,9 +304,9 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
access_ok(nip, sizeof(*nip))) {
- unsigned int inst;
+ struct ppc_inst inst;
- if (!probe_user_read(&inst, nip, sizeof(inst)))
+ if (!probe_user_read_inst(&inst, nip))
return !store_updates_sp(inst);
*must_retry = true;
}
@@ -289,8 +315,23 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
return false;
}
-static bool access_error(bool is_write, bool is_exec,
- struct vm_area_struct *vma)
+#ifdef CONFIG_PPC_MEM_KEYS
+static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey,
+ struct vm_area_struct *vma)
+{
+ /*
+ * Make sure to check the VMA so that we do not perform
+ * faults just to hit a pkey fault as soon as we fill in a
+ * page. Only called for current mm, hence foreign == 0
+ */
+ if (!arch_vma_access_permitted(vma, is_write, is_exec, 0))
+ return true;
+
+ return false;
+}
+#endif
+
+static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma)
{
/*
* Allow execution from readable areas if the MMU does not
@@ -483,14 +524,10 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (error_code & DSISR_KEYFAULT)
- return bad_key_fault_exception(regs, address,
- get_mm_addr_key(mm, address));
-
/*
- * We want to do this outside mmap_sem, because reading code around nip
+ * We want to do this outside mmap_lock, because reading code around nip
* can result in fault, which will cause a deadlock when called with
- * mmap_sem held
+ * mmap_lock held
*/
if (is_user)
flags |= FAULT_FLAG_USER;
@@ -502,7 +539,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
- * erroneous fault occurring in a code path which already holds mmap_sem
+ * erroneous fault occurring in a code path which already holds mmap_lock
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
@@ -514,12 +551,12 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mm))) {
if (!is_user && !search_exception_tables(regs->nip))
return bad_area_nosemaphore(regs, address);
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -543,7 +580,7 @@ retry:
if (!must_retry)
return bad_area(regs, address);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (fault_in_pages_readable((const char __user *)regs->nip,
sizeof(unsigned int)))
return bad_area_nosemaphore(regs, address);
@@ -555,6 +592,13 @@ retry:
return bad_area(regs, address);
good_area:
+
+#ifdef CONFIG_PPC_MEM_KEYS
+ if (unlikely(access_pkey_error(is_write, is_exec,
+ (error_code & DSISR_KEYFAULT), vma)))
+ return bad_access_pkey(regs, address, vma);
+#endif /* CONFIG_PPC_MEM_KEYS */
+
if (unlikely(access_error(is_write, is_exec, vma)))
return bad_access(regs, address);
@@ -565,28 +609,13 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags);
-#ifdef CONFIG_PPC_MEM_KEYS
- /*
- * we skipped checking for access error due to key earlier.
- * Check that using handle_mm_fault error return.
- */
- if (unlikely(fault & VM_FAULT_SIGSEGV) &&
- !arch_vma_access_permitted(vma, is_write, is_exec, 0)) {
-
- int pkey = vma_pkey(vma);
-
- up_read(&mm->mmap_sem);
- return bad_key_fault_exception(regs, address, pkey);
- }
-#endif /* CONFIG_PPC_MEM_KEYS */
-
major |= fault & VM_FAULT_MAJOR;
if (fault_signal_pending(fault, regs))
return user_mode(regs) ? 0 : SIGBUS;
/*
- * Handle the retry right now, the mmap_sem has been released in that
+ * Handle the retry right now, the mmap_lock has been released in that
* case.
*/
if (unlikely(fault & VM_FAULT_RETRY)) {
@@ -596,7 +625,7 @@ good_area:
}
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (unlikely(fault & VM_FAULT_ERROR))
return mm_fault_error(regs, address, fault);
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
index 320c1672b2ae..624b4438aff9 100644
--- a/arch/powerpc/mm/highmem.c
+++ b/arch/powerpc/mm/highmem.c
@@ -24,22 +24,11 @@
#include <linux/highmem.h>
#include <linux/module.h>
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
- */
-void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -49,17 +38,14 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
return (void*) vaddr;
}
-EXPORT_SYMBOL(kmap_atomic_prot);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
- preempt_enable();
+ if (vaddr < __fix_to_virt(FIX_KMAP_END))
return;
- }
if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM)) {
int type = kmap_atomic_idx();
@@ -77,7 +63,5 @@ void __kunmap_atomic(void *kvaddr)
}
kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 4d5ed1093615..e9bfbccd975d 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -19,7 +19,6 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/kmemleak.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/setup.h>
@@ -30,7 +29,8 @@ bool hugetlb_disabled = false;
#define hugepd_none(hpd) (hpd_val(hpd) == 0)
-#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *)))
+#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \
+ __builtin_ffs(sizeof(void *)))
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
@@ -53,24 +53,17 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (pshift >= pdshift) {
cachep = PGT_CACHE(PTE_T_ORDER);
num_hugepd = 1 << (pshift - pdshift);
- new = NULL;
- } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
- cachep = NULL;
- num_hugepd = 1;
- new = pte_alloc_one(mm);
} else {
cachep = PGT_CACHE(pdshift - pshift);
num_hugepd = 1;
- new = NULL;
}
- if (!cachep && !new) {
+ if (!cachep) {
WARN_ONCE(1, "No page table cache created for hugetlb tables");
return -ENOMEM;
}
- if (cachep)
- new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
+ new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
@@ -101,10 +94,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (i < num_hugepd) {
for (i = i - 1 ; i >= 0; i--, hpdp--)
*hpdp = __hugepd(0);
- if (cachep)
- kmem_cache_free(cachep, new);
- else
- pte_free(mm, new);
+ kmem_cache_free(cachep, new);
} else {
kmemleak_ignore(new);
}
@@ -119,6 +109,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
pgd_t *pg;
+ p4d_t *p4;
pud_t *pu;
pmd_t *pm;
hugepd_t *hpdp = NULL;
@@ -128,20 +119,21 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
addr &= ~(sz-1);
pg = pgd_offset(mm, addr);
+ p4 = p4d_offset(pg, addr);
#ifdef CONFIG_PPC_BOOK3S_64
if (pshift == PGDIR_SHIFT)
/* 16GB huge page */
- return (pte_t *) pg;
+ return (pte_t *) p4;
else if (pshift > PUD_SHIFT) {
/*
* We need to use hugepd table
*/
ptl = &mm->page_table_lock;
- hpdp = (hugepd_t *)pg;
+ hpdp = (hugepd_t *)p4;
} else {
pdshift = PUD_SHIFT;
- pu = pud_alloc(mm, pg, addr);
+ pu = pud_alloc(mm, p4, addr);
if (!pu)
return NULL;
if (pshift == PUD_SHIFT)
@@ -166,10 +158,10 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
#else
if (pshift >= PGDIR_SHIFT) {
ptl = &mm->page_table_lock;
- hpdp = (hugepd_t *)pg;
+ hpdp = (hugepd_t *)p4;
} else {
pdshift = PUD_SHIFT;
- pu = pud_alloc(mm, pg, addr);
+ pu = pud_alloc(mm, p4, addr);
if (!pu)
return NULL;
if (pshift >= PUD_SHIFT) {
@@ -188,6 +180,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
if (!hpdp)
return NULL;
+ if (IS_ENABLED(CONFIG_PPC_8xx) && sz == SZ_512K)
+ return pte_alloc_map(mm, (pmd_t *)hpdp, addr);
+
BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
@@ -253,7 +248,7 @@ int __init alloc_bootmem_huge_page(struct hstate *h)
struct hugepd_freelist {
struct rcu_head rcu;
unsigned int index;
- void *ptes[0];
+ void *ptes[];
};
static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
@@ -330,13 +325,20 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
if (shift >= pdshift)
hugepd_free(tlb, hugepte);
- else if (IS_ENABLED(CONFIG_PPC_8xx))
- pgtable_free_tlb(tlb, hugepte, 0);
else
pgtable_free_tlb(tlb, hugepte,
get_hugepd_cache_index(pdshift - shift));
}
+static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr)
+{
+ pgtable_t token = pmd_pgtable(*pmd);
+
+ pmd_clear(pmd);
+ pte_free_tlb(tlb, token, addr);
+ mm_dec_nr_ptes(tlb->mm);
+}
+
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
@@ -352,11 +354,17 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end);
if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+
/*
* if it is not hugepd pointer, we should already find
* it cleared.
*/
- WARN_ON(!pmd_none_or_clear_bad(pmd));
+ WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx));
+
+ hugetlb_free_pte_range(tlb, pmd, addr);
+
continue;
}
/*
@@ -390,7 +398,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
mm_dec_nr_pmds(tlb->mm);
}
-static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
@@ -400,7 +408,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
start = addr;
do {
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
next = pud_addr_end(addr, end);
if (!is_hugepd(__hugepd(pud_val(*pud)))) {
if (pud_none_or_clear_bad(pud))
@@ -435,8 +443,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
if (end - 1 > ceiling - 1)
return;
- pud = pud_offset(pgd, start);
- pgd_clear(pgd);
+ pud = pud_offset(p4d, start);
+ p4d_clear(p4d);
pud_free_tlb(tlb, pud, start);
mm_dec_nr_puds(tlb->mm);
}
@@ -449,6 +457,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long floor, unsigned long ceiling)
{
pgd_t *pgd;
+ p4d_t *p4d;
unsigned long next;
/*
@@ -471,10 +480,11 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
do {
next = pgd_addr_end(addr, end);
pgd = pgd_offset(tlb->mm, addr);
+ p4d = p4d_offset(pgd, addr);
if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
- if (pgd_none_or_clear_bad(pgd))
+ if (p4d_none_or_clear_bad(p4d))
continue;
- hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+ hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
} else {
unsigned long more;
/*
@@ -487,7 +497,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
if (more > next)
next = more;
- free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
+ free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT,
addr, next, floor, ceiling);
}
} while (addr = next, addr != end);
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index 42ef7a6e6098..8e0d792ac296 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -17,8 +17,8 @@
#undef DEBUG
#include <linux/string.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/kup.h>
phys_addr_t memstart_addr __ro_after_init = (phys_addr_t)~0ull;
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 872df48ae41b..5a5469eb3174 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -32,7 +32,6 @@
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
@@ -96,11 +95,13 @@ static void __init MMU_setup(void)
if (strstr(boot_command_line, "noltlbs")) {
__map_without_ltlbs = 1;
}
- if (debug_pagealloc_enabled()) {
- __map_without_bats = 1;
+ if (IS_ENABLED(CONFIG_PPC_8xx))
+ return;
+
+ if (debug_pagealloc_enabled())
__map_without_ltlbs = 1;
- }
- if (strict_kernel_rwx_enabled() && !IS_ENABLED(CONFIG_PPC_8xx))
+
+ if (strict_kernel_rwx_enabled())
__map_without_ltlbs = 1;
}
@@ -170,8 +171,6 @@ void __init MMU_init(void)
btext_unmap();
#endif
- kasan_mmu_init();
-
setup_kup();
/* Shortly after that, the entire linear mapping will be available */
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 4002ced3596f..bc73abf0bc25 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -47,7 +47,6 @@
#include <asm/rtas.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
@@ -203,7 +202,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
/* Align to the page size of the linear mapping. */
- start = _ALIGN_DOWN(start, page_size);
+ start = ALIGN_DOWN(start, page_size);
pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
@@ -292,7 +291,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
unsigned long alt_start = ~0, alt_end = ~0;
unsigned long base_pfn;
- start = _ALIGN_DOWN(start, page_size);
+ start = ALIGN_DOWN(start, page_size);
if (altmap) {
alt_start = altmap->base_pfn;
alt_end = altmap->base_pfn + altmap->reserve +
diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c
new file mode 100644
index 000000000000..569d98a41881
--- /dev/null
+++ b/arch/powerpc/mm/kasan/8xx.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/memblock.h>
+#include <linux/hugetlb.h>
+#include <asm/pgalloc.h>
+
+static int __init
+kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
+{
+ pmd_t *pmd = pmd_off_k(k_start);
+ unsigned long k_cur, k_next;
+
+ for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
+ pte_basic_t *new;
+
+ k_next = pgd_addr_end(k_cur, k_end);
+ k_next = pgd_addr_end(k_next, k_end);
+ if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
+ continue;
+
+ new = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
+ if (!new)
+ return -ENOMEM;
+
+ *new = pte_val(pte_mkhuge(pfn_pte(PHYS_PFN(__pa(block)), PAGE_KERNEL)));
+
+ hugepd_populate_kernel((hugepd_t *)pmd, (pte_t *)new, PAGE_SHIFT_8M);
+ hugepd_populate_kernel((hugepd_t *)pmd + 1, (pte_t *)new, PAGE_SHIFT_8M);
+ }
+ return 0;
+}
+
+int __init kasan_init_region(void *start, size_t size)
+{
+ unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
+ unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
+ unsigned long k_cur;
+ int ret;
+ void *block;
+
+ block = memblock_alloc(k_end - k_start, SZ_8M);
+ if (!block)
+ return -ENOMEM;
+
+ if (IS_ALIGNED(k_start, SZ_8M)) {
+ kasan_init_shadow_8M(k_start, ALIGN_DOWN(k_end, SZ_8M), block);
+ k_cur = ALIGN_DOWN(k_end, SZ_8M);
+ if (k_cur == k_end)
+ goto finish;
+ } else {
+ k_cur = k_start;
+ }
+
+ ret = kasan_init_shadow_page_tables(k_start, k_end);
+ if (ret)
+ return ret;
+
+ for (; k_cur < k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_off_k(k_cur);
+ void *va = block + k_cur - k_start;
+ pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
+
+ if (k_cur < ALIGN_DOWN(k_end, SZ_512K))
+ pte = pte_mkhuge(pte);
+
+ __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
+ }
+finish:
+ flush_tlb_kernel_range(k_start, k_end);
+ return 0;
+}
diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
index 6577897673dd..bb1a5408b86b 100644
--- a/arch/powerpc/mm/kasan/Makefile
+++ b/arch/powerpc/mm/kasan/Makefile
@@ -3,3 +3,5 @@
KASAN_SANITIZE := n
obj-$(CONFIG_PPC32) += kasan_init_32.o
+obj-$(CONFIG_PPC_8xx) += 8xx.o
+obj-$(CONFIG_PPC_BOOK3S_32) += book3s_32.o
diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c
new file mode 100644
index 000000000000..a32b4640b9de
--- /dev/null
+++ b/arch/powerpc/mm/kasan/book3s_32.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/memblock.h>
+#include <asm/pgalloc.h>
+#include <mm/mmu_decl.h>
+
+int __init kasan_init_region(void *start, size_t size)
+{
+ unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
+ unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
+ unsigned long k_cur = k_start;
+ int k_size = k_end - k_start;
+ int k_size_base = 1 << (ffs(k_size) - 1);
+ int ret;
+ void *block;
+
+ block = memblock_alloc(k_size, k_size_base);
+
+ if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) {
+ int k_size_more = 1 << (ffs(k_size - k_size_base) - 1);
+
+ setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL);
+ if (k_size_more >= SZ_128K)
+ setbat(-1, k_start + k_size_base, __pa(block) + k_size_base,
+ k_size_more, PAGE_KERNEL);
+ if (v_block_mapped(k_start))
+ k_cur = k_start + k_size_base;
+ if (v_block_mapped(k_start + k_size_base))
+ k_cur = k_start + k_size_base + k_size_more;
+
+ update_bats();
+ }
+
+ if (!block)
+ block = memblock_alloc(k_size, PAGE_SIZE);
+ if (!block)
+ return -ENOMEM;
+
+ ret = kasan_init_shadow_page_tables(k_start, k_end);
+ if (ret)
+ return ret;
+
+ kasan_update_early_region(k_start, k_cur, __pte(0));
+
+ for (; k_cur < k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_off_k(k_cur);
+ void *va = block + k_cur - k_start;
+ pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
+
+ __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
+ }
+ flush_tlb_kernel_range(k_start, k_end);
+ return 0;
+}
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index cbcad369fcb2..0760e1e754e4 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -5,9 +5,7 @@
#include <linux/kasan.h>
#include <linux/printk.h>
#include <linux/memblock.h>
-#include <linux/moduleloader.h>
#include <linux/sched/task.h>
-#include <linux/vmalloc.h>
#include <asm/pgalloc.h>
#include <asm/code-patching.h>
#include <mm/mmu_decl.h>
@@ -30,40 +28,31 @@ static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
__set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
}
-static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
+int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
{
pmd_t *pmd;
unsigned long k_cur, k_next;
- pte_t *new = NULL;
- pmd = pmd_ptr_k(k_start);
+ pmd = pmd_off_k(k_start);
for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
+ pte_t *new;
+
k_next = pgd_addr_end(k_cur, k_end);
if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
continue;
- if (!new)
- new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
+ new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
if (!new)
return -ENOMEM;
kasan_populate_pte(new, PAGE_KERNEL);
-
- smp_wmb(); /* See comment in __pte_alloc */
-
- spin_lock(&init_mm.page_table_lock);
- /* Has another populated it ? */
- if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) {
- pmd_populate_kernel(&init_mm, pmd, new);
- new = NULL;
- }
- spin_unlock(&init_mm.page_table_lock);
+ pmd_populate_kernel(&init_mm, pmd, new);
}
return 0;
}
-static int __init kasan_init_region(void *start, size_t size)
+int __init __weak kasan_init_region(void *start, size_t size)
{
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
@@ -76,75 +65,63 @@ static int __init kasan_init_region(void *start, size_t size)
return ret;
block = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ if (!block)
+ return -ENOMEM;
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_ptr_k(k_cur);
+ pmd_t *pmd = pmd_off_k(k_cur);
void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
- if (!va)
- return -ENOMEM;
-
__set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
}
flush_tlb_kernel_range(k_start, k_end);
return 0;
}
-static void __init kasan_remap_early_shadow_ro(void)
+void __init
+kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte)
{
- pgprot_t prot = kasan_prot_ro();
- unsigned long k_start = KASAN_SHADOW_START;
- unsigned long k_end = KASAN_SHADOW_END;
unsigned long k_cur;
phys_addr_t pa = __pa(kasan_early_shadow_page);
- kasan_populate_pte(kasan_early_shadow_pte, prot);
-
- for (k_cur = k_start & PAGE_MASK; k_cur != k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_ptr_k(k_cur);
+ for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_off_k(k_cur);
pte_t *ptep = pte_offset_kernel(pmd, k_cur);
if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
continue;
- __set_pte_at(&init_mm, k_cur, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
+ __set_pte_at(&init_mm, k_cur, ptep, pte, 0);
}
- flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ flush_tlb_kernel_range(k_start, k_end);
}
-static void __init kasan_unmap_early_shadow_vmalloc(void)
+static void __init kasan_remap_early_shadow_ro(void)
{
- unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START);
- unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END);
- unsigned long k_cur;
+ pgprot_t prot = kasan_prot_ro();
phys_addr_t pa = __pa(kasan_early_shadow_page);
- for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
- pte_t *ptep = pte_offset_kernel(pmd, k_cur);
+ kasan_populate_pte(kasan_early_shadow_pte, prot);
- if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
- continue;
+ kasan_update_early_region(KASAN_SHADOW_START, KASAN_SHADOW_END,
+ pfn_pte(PHYS_PFN(pa), prot));
+}
- __set_pte_at(&init_mm, k_cur, ptep, __pte(0), 0);
- }
- flush_tlb_kernel_range(k_start, k_end);
+static void __init kasan_unmap_early_shadow_vmalloc(void)
+{
+ unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START);
+ unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END);
+
+ kasan_update_early_region(k_start, k_end, __pte(0));
}
-void __init kasan_mmu_init(void)
+static void __init kasan_mmu_init(void)
{
int ret;
struct memblock_region *reg;
- if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
- IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
- ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
-
- if (ret)
- panic("kasan: kasan_init_shadow_page_tables() failed");
- }
-
for_each_memblock(memory, reg) {
phys_addr_t base = reg->base;
phys_addr_t top = min(base + reg->size, total_lowmem);
@@ -156,10 +133,21 @@ void __init kasan_mmu_init(void)
if (ret)
panic("kasan: kasan_init_region() failed");
}
+
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
+ IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ if (ret)
+ panic("kasan: kasan_init_shadow_page_tables() failed");
+ }
+
}
void __init kasan_init(void)
{
+ kasan_mmu_init();
+
kasan_remap_early_shadow_ro();
clear_page(kasan_early_shadow_page);
@@ -196,7 +184,7 @@ void __init kasan_early_init(void)
unsigned long addr = KASAN_SHADOW_START;
unsigned long end = KASAN_SHADOW_END;
unsigned long next;
- pmd_t *pmd = pmd_ptr_k(addr);
+ pmd_t *pmd = pmd_off_k(addr);
BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0fcea21f26b4..c2c11eb8dcfc 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -32,12 +32,12 @@
#include <linux/vmalloc.h>
#include <linux/memremap.h>
#include <linux/dma-direct.h>
+#include <linux/kprobes.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
@@ -64,8 +64,6 @@ bool init_mem_is_free;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
-pgprot_t kmap_prot;
-EXPORT_SYMBOL(kmap_prot);
#endif
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@@ -245,7 +243,6 @@ void __init paging_init(void)
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
- kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */
printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
@@ -468,6 +465,7 @@ static void flush_dcache_icache_phys(unsigned long physaddr)
: "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
: "ctr", "memory");
}
+NOKPROBE_SYMBOL(flush_dcache_icache_phys)
#endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
/*
@@ -578,7 +576,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
flush_dcache_page(pg);
}
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
unsigned long maddr;
@@ -587,7 +585,6 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
flush_icache_range(maddr, maddr + len);
kunmap(page);
}
-EXPORT_SYMBOL(flush_icache_user_range);
/*
* System memory should not be in /proc/iomem but various tools expect it
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 7097e07a209a..1b6d39e9baed 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -182,6 +182,10 @@ static inline void mmu_mark_initmem_nx(void) { }
static inline void mmu_mark_rodata_ro(void) { }
#endif
+#ifdef CONFIG_PPC_8xx
+void __init mmu_mapin_immr(void);
+#endif
+
#ifdef CONFIG_PPC_DEBUG_WX
void ptdump_check_wx(void);
#else
diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c
index 82862723ab42..13e74bc39ba5 100644
--- a/arch/powerpc/mm/nohash/40x.c
+++ b/arch/powerpc/mm/nohash/40x.c
@@ -36,7 +36,6 @@
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
@@ -102,9 +101,9 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
while (s >= LARGE_PAGE_SIZE_16M) {
pmd_t *pmdp;
- unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE;
+ unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW;
- pmdp = pmd_ptr_k(v);
+ pmdp = pmd_off_k(v);
*pmdp++ = __pmd(val);
*pmdp++ = __pmd(val);
*pmdp++ = __pmd(val);
@@ -117,9 +116,9 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
while (s >= LARGE_PAGE_SIZE_4M) {
pmd_t *pmdp;
- unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE;
+ unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW;
- pmdp = pmd_ptr_k(v);
+ pmdp = pmd_off_k(v);
*pmdp = __pmd(val);
v += LARGE_PAGE_SIZE_4M;
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index d83a12c5bc7f..92e8929cbe3e 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -9,8 +9,11 @@
#include <linux/memblock.h>
#include <linux/mmu_context.h>
+#include <linux/hugetlb.h>
#include <asm/fixmap.h>
#include <asm/code-patching.h>
+#include <asm/inst.h>
+#include <asm/pgalloc.h>
#include <mm/mmu_decl.h>
@@ -54,158 +57,148 @@ unsigned long p_block_mapped(phys_addr_t pa)
return 0;
}
-#define LARGE_PAGE_SIZE_8M (1<<23)
-
-/*
- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
- */
-void __init MMU_init_hw(void)
+static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
{
- /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
- if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) {
- unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
- unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY;
- int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28;
- unsigned long addr = 0;
- unsigned long mem = total_lowmem;
-
- for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
- mtspr(SPRN_MD_CTR, ctr | (i << 8));
- mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
- mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
- mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
- addr += LARGE_PAGE_SIZE_8M;
- mem -= LARGE_PAGE_SIZE_8M;
- }
+ if (hpd_val(*pmdp) == 0) {
+ pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
+
+ if (!ptep)
+ return NULL;
+
+ hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
+ hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
}
+ return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
}
-static void __init mmu_mapin_immr(void)
+static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
+ pgprot_t prot, int psize, bool new)
{
- unsigned long p = PHYS_IMMR_BASE;
- unsigned long v = VIRT_IMMR_BASE;
- int offset;
+ pmd_t *pmdp = pmd_off_k(va);
+ pte_t *ptep;
+
+ if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
+ return -EINVAL;
+
+ if (new) {
+ if (WARN_ON(slab_is_available()))
+ return -EINVAL;
+
+ if (psize == MMU_PAGE_512K)
+ ptep = early_pte_alloc_kernel(pmdp, va);
+ else
+ ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
+ } else {
+ if (psize == MMU_PAGE_512K)
+ ptep = pte_offset_kernel(pmdp, va);
+ else
+ ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
+ }
+
+ if (WARN_ON(!ptep))
+ return -ENOMEM;
- for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
- map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
+ /* The PTE should never be already present */
+ if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
+ return -EINVAL;
+
+ set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
+
+ return 0;
}
-static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
{
- modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
}
-static void mmu_patch_addis(s32 *site, long simm)
+static bool immr_is_mapped __initdata;
+
+void __init mmu_mapin_immr(void)
{
- unsigned int instr = *(unsigned int *)patch_site_addr(site);
+ if (immr_is_mapped)
+ return;
+
+ immr_is_mapped = true;
- instr &= 0xffff0000;
- instr |= ((unsigned long)simm) >> 16;
- patch_instruction_site(site, instr);
+ __early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE,
+ PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
}
-static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot)
+static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
+ pgprot_t prot, bool new)
{
- unsigned long s = offset;
- unsigned long v = PAGE_OFFSET + s;
- phys_addr_t p = memstart_addr + s;
-
- for (; s < top; s += PAGE_SIZE) {
- map_kernel_page(v, p, prot);
- v += PAGE_SIZE;
- p += PAGE_SIZE;
- }
+ unsigned long v = PAGE_OFFSET + offset;
+ unsigned long p = offset;
+
+ WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
+
+ for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+ for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
+ for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+
+ if (!new)
+ flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
}
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
- unsigned long mapped;
-
- if (__map_without_ltlbs) {
- mapped = 0;
- mmu_mapin_immr();
- if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR))
- patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP);
- if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
- mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
+ unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+ unsigned long sinittext = __pa(_sinittext);
+ bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled();
+ unsigned long boundary = strict_boundary ? sinittext : etext8;
+ unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
+ WARN_ON(top < einittext8);
+
+ mmu_mapin_immr();
+
+ if (__map_without_ltlbs)
+ return 0;
+
+ mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
+ if (debug_pagealloc_enabled()) {
+ top = boundary;
} else {
- unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
-
- mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
- if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
- mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, einittext8);
-
- /*
- * Populate page tables to:
- * - have them appear in /sys/kernel/debug/kernel_page_tables
- * - allow the BDI to find the pages when they are not PINNED
- */
- mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X);
- mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL);
- mmu_mapin_immr();
+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
+ mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
}
- mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
- mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped);
+ if (top > SZ_32M)
+ memblock_set_current_limit(top);
- /* If the size of RAM is not an exact power of two, we may not
- * have covered RAM in its entirety with 8 MiB
- * pages. Consequently, restrict the top end of RAM currently
- * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
- * coverage with normal-sized pages (or other reasons) do not
- * attempt to allocate outside the allowed range.
- */
- if (mapped)
- memblock_set_current_limit(mapped);
+ block_mapped_ram = top;
- block_mapped_ram = mapped;
-
- return mapped;
+ return top;
}
void mmu_mark_initmem_nx(void)
{
- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
- mmu_patch_addis(&patch__itlbmiss_linmem_top8,
- -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
- if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) {
- unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
- unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
- unsigned long etext = __pa(_etext);
-
- mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
-
- /* Update page tables for PTDUMP and BDI */
- mmu_mapin_ram_chunk(0, einittext8, __pgprot(0));
- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
- mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_TEXT);
- mmu_mapin_ram_chunk(etext, einittext8, PAGE_KERNEL);
- } else {
- mmu_mapin_ram_chunk(0, etext8, PAGE_KERNEL_TEXT);
- mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL);
- }
- }
- _tlbil_all();
+ unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+ unsigned long sinittext = __pa(_sinittext);
+ unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
+ unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
+ mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
+
+ if (IS_ENABLED(CONFIG_PIN_TLB_TEXT))
+ mmu_pin_tlb(block_mapped_ram, false);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void)
{
unsigned long sinittext = __pa(_sinittext);
- unsigned long etext = __pa(_etext);
-
- if (CONFIG_DATA_SHIFT < 23)
- mmu_patch_addis(&patch__dtlbmiss_romem_top8,
- -__pa(((unsigned long)_sinittext) &
- ~(LARGE_PAGE_SIZE_8M - 1)));
- mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
-
- _tlbil_all();
- /* Update page tables for PTDUMP and BDI */
- mmu_mapin_ram_chunk(0, sinittext, __pgprot(0));
- mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX);
- mmu_mapin_ram_chunk(etext, sinittext, PAGE_KERNEL_RO);
+ mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
+ if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
+ mmu_pin_tlb(block_mapped_ram, true);
}
#endif
@@ -218,7 +211,7 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
BUG_ON(first_memblock_base != 0);
/* 8xx can only access 32MB at the moment */
- memblock_set_current_limit(min_t(u64, first_memblock_size, 0x02000000));
+ memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
}
/*
diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c
index 4637fdd469cf..77884e24281d 100644
--- a/arch/powerpc/mm/nohash/book3e_pgtable.c
+++ b/arch/powerpc/mm/nohash/book3e_pgtable.c
@@ -73,6 +73,7 @@ static void __init *early_alloc_pgtable(unsigned long size)
int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -80,7 +81,8 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
BUILD_BUG_ON(TASK_SIZE_USER64 > PGTABLE_RANGE);
if (slab_is_available()) {
pgdp = pgd_offset_k(ea);
- pudp = pud_alloc(&init_mm, pgdp, ea);
+ p4dp = p4d_offset(pgdp, ea);
+ pudp = pud_alloc(&init_mm, p4dp, ea);
if (!pudp)
return -ENOMEM;
pmdp = pmd_alloc(&init_mm, pudp, ea);
@@ -91,13 +93,12 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
return -ENOMEM;
} else {
pgdp = pgd_offset_k(ea);
-#ifndef __PAGETABLE_PUD_FOLDED
- if (pgd_none(*pgdp)) {
- pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
- pgd_populate(&init_mm, pgdp, pudp);
+ p4dp = p4d_offset(pgdp, ea);
+ if (p4d_none(*p4dp)) {
+ pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
+ p4d_populate(&init_mm, p4dp, pmdp);
}
-#endif /* !__PAGETABLE_PUD_FOLDED */
- pudp = pud_offset(pgdp, ea);
+ pudp = pud_offset(p4dp, ea);
if (pud_none(*pudp)) {
pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
pud_populate(&init_mm, pudp, pmdp);
diff --git a/arch/powerpc/mm/nohash/fsl_booke.c b/arch/powerpc/mm/nohash/fsl_booke.c
index b4eb06ceb189..c06dfbb771f4 100644
--- a/arch/powerpc/mm/nohash/fsl_booke.c
+++ b/arch/powerpc/mm/nohash/fsl_booke.c
@@ -41,7 +41,6 @@
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
index 1f110c3c48fb..d5e2704d0096 100644
--- a/arch/powerpc/mm/nohash/tlb_low_64e.S
+++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
@@ -6,6 +6,7 @@
* Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
*/
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/reg.h>
#include <asm/page.h>
@@ -13,7 +14,6 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
-#include <asm/pgtable.h>
#include <asm/exception-64e.h>
#include <asm/ppc-opcode.h>
#include <asm/kvm_asm.h>
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index e3759b69f81b..1136257c3a99 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -100,7 +100,7 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
* instead we "filter out" the exec permission for non clean pages.
*/
-static pte_t set_pte_filter(pte_t pte)
+static inline pte_t set_pte_filter(pte_t pte)
{
struct page *pg;
@@ -249,22 +249,49 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
#else
/*
- * Not used on non book3s64 platforms. But 8xx
- * can possibly use tsize derived from hstate.
+ * Not used on non book3s64 platforms.
+ * 8xx compares it with mmu_virtual_psize to
+ * know if it is a huge page or not.
*/
- psize = 0;
+ psize = MMU_PAGE_COUNT;
#endif
__ptep_set_access_flags(vma, ptep, pte, addr, psize);
}
return changed;
#endif
}
+
+#if defined(CONFIG_PPC_8xx)
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+ pmd_t *pmd = pmd_off(mm, addr);
+ pte_basic_t val;
+ pte_basic_t *entry = &ptep->pte;
+ int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K;
+ int i;
+
+ /*
+ * Make sure hardware valid bit is not set. We don't do
+ * tlb flush for this update.
+ */
+ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
+
+ pte = pte_mkpte(pte);
+
+ pte = set_pte_filter(pte);
+
+ val = pte_val(pte);
+ for (i = 0; i < num; i++, entry++, val += SZ_4K)
+ *entry = val;
+}
+#endif
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_DEBUG_VM
void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@@ -272,12 +299,14 @@ void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
return;
pgd = mm->pgd + pgd_index(addr);
BUG_ON(pgd_none(*pgd));
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ BUG_ON(p4d_none(*p4d));
+ pud = pud_offset(p4d, addr);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, addr);
/*
* khugepaged to collapse normal pages to hugepage, first set
- * pmd to none to force page fault/gup to take mmap_sem. After
+ * pmd to none to force page fault/gup to take mmap_lock. After
* pmd is set to none, we do a pte_clear which does this assertion
* so if we find pmd none, return.
*/
@@ -312,12 +341,13 @@ EXPORT_SYMBOL_GPL(vmalloc_to_phys);
pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
bool *is_thp, unsigned *hpage_shift)
{
- pgd_t pgd, *pgdp;
+ pgd_t *pgdp;
+ p4d_t p4d, *p4dp;
pud_t pud, *pudp;
pmd_t pmd, *pmdp;
pte_t *ret_pte;
hugepd_t *hpdp = NULL;
- unsigned pdshift = PGDIR_SHIFT;
+ unsigned pdshift;
if (hpage_shift)
*hpage_shift = 0;
@@ -325,24 +355,28 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (is_thp)
*is_thp = false;
- pgdp = pgdir + pgd_index(ea);
- pgd = READ_ONCE(*pgdp);
/*
* Always operate on the local stack value. This make sure the
* value don't get updated by a parallel THP split/collapse,
* page fault or a page unmap. The return pte_t * is still not
* stable. So should be checked there for above conditions.
+ * Top level is an exception because it is folded into p4d.
*/
- if (pgd_none(pgd))
+ pgdp = pgdir + pgd_index(ea);
+ p4dp = p4d_offset(pgdp, ea);
+ p4d = READ_ONCE(*p4dp);
+ pdshift = P4D_SHIFT;
+
+ if (p4d_none(p4d))
return NULL;
- if (pgd_is_leaf(pgd)) {
- ret_pte = (pte_t *)pgdp;
+ if (p4d_is_leaf(p4d)) {
+ ret_pte = (pte_t *)p4dp;
goto out;
}
- if (is_hugepd(__hugepd(pgd_val(pgd)))) {
- hpdp = (hugepd_t *)&pgd;
+ if (is_hugepd(__hugepd(p4d_val(p4d)))) {
+ hpdp = (hugepd_t *)&p4d;
goto out_huge;
}
@@ -352,7 +386,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
* irq disabled
*/
pdshift = PUD_SHIFT;
- pudp = pud_offset(&pgd, ea);
+ pudp = pud_offset(&p4d, ea);
pud = READ_ONCE(*pudp);
if (pud_none(pud))
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index f62de06e3d07..6eb4eab79385 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -24,16 +24,31 @@
#include <linux/memblock.h>
#include <linux/slab.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/setup.h>
#include <asm/sections.h>
+#include <asm/early_ioremap.h>
#include <mm/mmu_decl.h>
extern char etext[], _stext[], _sinittext[], _einittext[];
+static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
+
+notrace void __init early_ioremap_init(void)
+{
+ unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
+ pte_t *ptep = (pte_t *)early_fixmap_pagetable;
+ pmd_t *pmdp = pmd_off_k(addr);
+
+ for (; (s32)(FIXADDR_TOP - addr) > 0;
+ addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
+
+ early_ioremap_setup();
+}
+
static void __init *early_alloc_pgtable(unsigned long size)
{
void *ptr = memblock_alloc(size, size);
@@ -45,7 +60,7 @@ static void __init *early_alloc_pgtable(unsigned long size)
return ptr;
}
-static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
+pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
{
if (pmd_none(*pmdp)) {
pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
@@ -63,7 +78,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
int err = -ENOMEM;
/* Use upper 10 bits of VA to index the first level map */
- pd = pmd_ptr_k(va);
+ pd = pmd_off_k(va);
/* Use middle 10 bits of VA to index the second-level map */
if (likely(slab_is_available()))
pg = pte_alloc_kernel(pd, va);
@@ -169,7 +184,7 @@ void mark_initmem_nx(void)
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
- if (v_block_mapped((unsigned long)_stext + 1))
+ if (v_block_mapped((unsigned long)_sinittext))
mmu_mark_initmem_nx();
else
change_page_attr(page, numpages, PAGE_KERNEL);
@@ -181,7 +196,7 @@ void mark_rodata_ro(void)
struct page *page;
unsigned long numpages;
- if (v_block_mapped((unsigned long)_sinittext)) {
+ if (v_block_mapped((unsigned long)_stext + 1)) {
mmu_mark_rodata_ro();
ptdump_check_wx();
return;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e78832dce7bb..bb43a8c04bee 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -35,7 +35,6 @@
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
@@ -101,13 +100,13 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
#ifndef __PAGETABLE_PUD_FOLDED
/* 4 level page table */
-struct page *pgd_page(pgd_t pgd)
+struct page *p4d_page(p4d_t p4d)
{
- if (pgd_is_leaf(pgd)) {
- VM_WARN_ON(!pgd_huge(pgd));
- return pte_page(pgd_pte(pgd));
+ if (p4d_is_leaf(p4d)) {
+ VM_WARN_ON(!p4d_huge(p4d));
+ return pte_page(p4d_pte(p4d));
}
- return virt_to_page(pgd_page_vaddr(pgd));
+ return virt_to_page(p4d_page_vaddr(p4d));
}
#endif
diff --git a/arch/powerpc/mm/ptdump/8xx.c b/arch/powerpc/mm/ptdump/8xx.c
index 9e2d8e847d6e..8a797dcbf475 100644
--- a/arch/powerpc/mm/ptdump/8xx.c
+++ b/arch/powerpc/mm/ptdump/8xx.c
@@ -5,12 +5,17 @@
*
*/
#include <linux/kernel.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "ptdump.h"
static const struct flag_info flag_array[] = {
{
+ .mask = _PAGE_HUGE,
+ .val = _PAGE_HUGE,
+ .set = "huge",
+ .clear = " ",
+ }, {
.mask = _PAGE_SH,
.val = 0,
.set = "user",
diff --git a/arch/powerpc/mm/ptdump/bats.c b/arch/powerpc/mm/ptdump/bats.c
index d3a5d6b318d1..e29b338d499f 100644
--- a/arch/powerpc/mm/ptdump/bats.c
+++ b/arch/powerpc/mm/ptdump/bats.c
@@ -6,19 +6,21 @@
* This dumps the content of BATS
*/
+#include <linux/pgtable.h>
#include <asm/debugfs.h>
-#include <asm/pgtable.h>
#include <asm/cpu_has_feature.h>
+#include "ptdump.h"
+
static char *pp_601(int k, int pp)
{
if (pp == 0)
- return k ? "NA" : "RWX";
+ return k ? " " : "rwx";
if (pp == 1)
- return k ? "ROX" : "RWX";
+ return k ? "r x" : "rwx";
if (pp == 2)
- return k ? "RWX" : "RWX";
- return k ? "ROX" : "ROX";
+ return "rwx";
+ return "r x";
}
static void bat_show_601(struct seq_file *m, int idx, u32 lower, u32 upper)
@@ -42,15 +44,13 @@ static void bat_show_601(struct seq_file *m, int idx, u32 lower, u32 upper)
#else
seq_printf(m, "0x%08x ", pbn);
#endif
+ pt_dump_size(m, size);
seq_printf(m, "Kernel %s User %s", pp_601(k & 2, pp), pp_601(k & 1, pp));
- if (lower & _PAGE_WRITETHRU)
- seq_puts(m, "write through ");
- if (lower & _PAGE_NO_CACHE)
- seq_puts(m, "no cache ");
- if (lower & _PAGE_COHERENT)
- seq_puts(m, "coherent ");
+ seq_puts(m, lower & _PAGE_WRITETHRU ? "w " : " ");
+ seq_puts(m, lower & _PAGE_NO_CACHE ? "i " : " ");
+ seq_puts(m, lower & _PAGE_COHERENT ? "m " : " ");
seq_puts(m, "\n");
}
@@ -88,6 +88,7 @@ static void bat_show_603(struct seq_file *m, int idx, u32 lower, u32 upper, bool
#else
seq_printf(m, "0x%08x ", brpn);
#endif
+ pt_dump_size(m, size);
if (k == 1)
seq_puts(m, "User ");
@@ -97,20 +98,16 @@ static void bat_show_603(struct seq_file *m, int idx, u32 lower, u32 upper, bool
seq_puts(m, "Kernel/User ");
if (lower & BPP_RX)
- seq_puts(m, is_d ? "RO " : "EXEC ");
+ seq_puts(m, is_d ? "r " : " x ");
else if (lower & BPP_RW)
- seq_puts(m, is_d ? "RW " : "EXEC ");
+ seq_puts(m, is_d ? "rw " : " x ");
else
- seq_puts(m, is_d ? "NA " : "NX ");
-
- if (lower & _PAGE_WRITETHRU)
- seq_puts(m, "write through ");
- if (lower & _PAGE_NO_CACHE)
- seq_puts(m, "no cache ");
- if (lower & _PAGE_COHERENT)
- seq_puts(m, "coherent ");
- if (lower & _PAGE_GUARDED)
- seq_puts(m, "guarded ");
+ seq_puts(m, is_d ? " " : " ");
+
+ seq_puts(m, lower & _PAGE_WRITETHRU ? "w " : " ");
+ seq_puts(m, lower & _PAGE_NO_CACHE ? "i " : " ");
+ seq_puts(m, lower & _PAGE_COHERENT ? "m " : " ");
+ seq_puts(m, lower & _PAGE_GUARDED ? "g " : " ");
seq_puts(m, "\n");
}
diff --git a/arch/powerpc/mm/ptdump/book3s64.c b/arch/powerpc/mm/ptdump/book3s64.c
index 0dfca72cb9bd..14f73868db66 100644
--- a/arch/powerpc/mm/ptdump/book3s64.c
+++ b/arch/powerpc/mm/ptdump/book3s64.c
@@ -5,7 +5,7 @@
*
*/
#include <linux/kernel.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "ptdump.h"
diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c
index b6ed9578382f..a2c33efc7ce8 100644
--- a/arch/powerpc/mm/ptdump/hashpagetable.c
+++ b/arch/powerpc/mm/ptdump/hashpagetable.c
@@ -15,7 +15,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <asm/pgtable.h>
#include <linux/const.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
@@ -417,9 +416,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
}
}
-static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
{
- pud_t *pud = pud_offset(pgd, 0);
+ pud_t *pud = pud_offset(p4d, 0);
unsigned long addr;
unsigned int i;
@@ -431,6 +430,20 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
}
}
+static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start)
+{
+ p4d_t *p4d = p4d_offset(pgd, 0);
+ unsigned long addr;
+ unsigned int i;
+
+ for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
+ addr = start + i * P4D_SIZE;
+ if (!p4d_none(*p4d))
+ /* p4d exists */
+ walk_pud(st, p4d, addr);
+ }
+}
+
static void walk_pagetables(struct pg_state *st)
{
pgd_t *pgd = pgd_offset_k(0UL);
@@ -445,7 +458,7 @@ static void walk_pagetables(struct pg_state *st)
addr = KERN_VIRT_START + i * PGDIR_SIZE;
if (!pgd_none(*pgd))
/* pgd exists */
- walk_pud(st, pgd, addr);
+ walk_p4d(st, pgd, addr);
}
}
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index d92bb8ea229c..de6e05ef871c 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -19,10 +19,10 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <linux/const.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
+#include <asm/hugetlb.h>
#include <mm/mmu_decl.h>
@@ -60,6 +60,7 @@ struct pg_state {
unsigned long start_address;
unsigned long start_pa;
unsigned long last_pa;
+ unsigned long page_size;
unsigned int level;
u64 current_flags;
bool check_wx;
@@ -112,6 +113,19 @@ static struct addr_marker address_markers[] = {
seq_putc(m, c); \
})
+void pt_dump_size(struct seq_file *m, unsigned long size)
+{
+ static const char units[] = "KMGTPE";
+ const char *unit = units;
+
+ /* Work out what appropriate unit to use */
+ while (!(size & 1023) && unit[1]) {
+ size >>= 10;
+ unit++;
+ }
+ pt_dump_seq_printf(m, "%9lu%c ", size, *unit);
+}
+
static void dump_flag_info(struct pg_state *st, const struct flag_info
*flag, u64 pte, int num)
{
@@ -146,8 +160,6 @@ static void dump_flag_info(struct pg_state *st, const struct flag_info
static void dump_addr(struct pg_state *st, unsigned long addr)
{
- static const char units[] = "KMGTPE";
- const char *unit = units;
unsigned long delta;
#ifdef CONFIG_PPC64
@@ -157,20 +169,14 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
#endif
pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
- if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) {
+ if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) {
pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa);
- delta = PAGE_SIZE >> 10;
+ delta = st->page_size >> 10;
} else {
pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
delta = (addr - st->start_address) >> 10;
}
- /* Work out what appropriate unit to use */
- while (!(delta & 1023) && unit[1]) {
- delta >>= 10;
- unit++;
- }
- pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
-
+ pt_dump_size(st->seq, delta);
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
@@ -190,7 +196,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
}
static void note_page(struct pg_state *st, unsigned long addr,
- unsigned int level, u64 val)
+ unsigned int level, u64 val, unsigned long page_size)
{
u64 flag = val & pg_level[level].mask;
u64 pa = val & PTE_RPN_MASK;
@@ -202,6 +208,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
st->start_address = addr;
st->start_pa = pa;
st->last_pa = pa;
+ st->page_size = page_size;
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
/*
* Dump the section of virtual memory when:
@@ -213,7 +220,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
*/
} else if (flag != st->current_flags || level != st->level ||
addr >= st->marker[1].start_address ||
- (pa != st->last_pa + PAGE_SIZE &&
+ (pa != st->last_pa + st->page_size &&
(pa != st->start_pa || st->start_pa != st->last_pa))) {
/* Check the PTE flags */
@@ -241,6 +248,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
st->start_address = addr;
st->start_pa = pa;
st->last_pa = pa;
+ st->page_size = page_size;
st->current_flags = flag;
st->level = level;
} else {
@@ -256,11 +264,31 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
addr = start + i * PAGE_SIZE;
- note_page(st, addr, 4, pte_val(*pte));
+ note_page(st, addr, 4, pte_val(*pte), PAGE_SIZE);
}
}
+static void walk_hugepd(struct pg_state *st, hugepd_t *phpd, unsigned long start,
+ int pdshift, int level)
+{
+#ifdef CONFIG_ARCH_HAS_HUGEPD
+ unsigned int i;
+ int shift = hugepd_shift(*phpd);
+ int ptrs_per_hpd = pdshift - shift > 0 ? 1 << (pdshift - shift) : 1;
+
+ if (start & ((1 << shift) - 1))
+ return;
+
+ for (i = 0; i < ptrs_per_hpd; i++) {
+ unsigned long addr = start + (i << shift);
+ pte_t *pte = hugepte_offset(*phpd, addr, pdshift);
+
+ note_page(st, addr, level + 1, pte_val(*pte), 1 << shift);
+ }
+#endif
+}
+
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
{
pmd_t *pmd = pmd_offset(pud, 0);
@@ -273,13 +301,13 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
/* pmd exists */
walk_pte(st, pmd, addr);
else
- note_page(st, addr, 3, pmd_val(*pmd));
+ note_page(st, addr, 3, pmd_val(*pmd), PMD_SIZE);
}
}
-static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
{
- pud_t *pud = pud_offset(pgd, 0);
+ pud_t *pud = pud_offset(p4d, 0);
unsigned long addr;
unsigned int i;
@@ -289,7 +317,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
/* pud exists */
walk_pmd(st, pud, addr);
else
- note_page(st, addr, 2, pud_val(*pud));
+ note_page(st, addr, 2, pud_val(*pud), PUD_SIZE);
}
}
@@ -304,11 +332,15 @@ static void walk_pagetables(struct pg_state *st)
* the hash pagetable.
*/
for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
- if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd))
- /* pgd exists */
- walk_pud(st, pgd, addr);
+ p4d_t *p4d = p4d_offset(pgd, 0);
+
+ if (p4d_none(*p4d) || p4d_is_leaf(*p4d))
+ note_page(st, addr, 1, p4d_val(*p4d), PGDIR_SIZE);
+ else if (is_hugepd(__hugepd(p4d_val(*p4d))))
+ walk_hugepd(st, (hugepd_t *)p4d, addr, PGDIR_SHIFT, 1);
else
- note_page(st, addr, 1, pgd_val(*pgd));
+ /* p4d exists */
+ walk_pud(st, p4d, addr);
}
}
@@ -363,7 +395,7 @@ static int ptdump_show(struct seq_file *m, void *v)
/* Traverse kernel page tables */
walk_pagetables(&st);
- note_page(&st, 0, 0, 0);
+ note_page(&st, 0, 0, 0, 0);
return 0;
}
diff --git a/arch/powerpc/mm/ptdump/ptdump.h b/arch/powerpc/mm/ptdump/ptdump.h
index 5d513636de73..154efae96ae0 100644
--- a/arch/powerpc/mm/ptdump/ptdump.h
+++ b/arch/powerpc/mm/ptdump/ptdump.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/types.h>
+#include <linux/seq_file.h>
struct flag_info {
u64 mask;
@@ -17,3 +18,5 @@ struct pgtable_level {
};
extern struct pgtable_level pg_level[5];
+
+void pt_dump_size(struct seq_file *m, unsigned long delta);
diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c
index f7ed2f187cb0..c005fe041c18 100644
--- a/arch/powerpc/mm/ptdump/shared.c
+++ b/arch/powerpc/mm/ptdump/shared.c
@@ -5,7 +5,7 @@
*
*/
#include <linux/kernel.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "ptdump.h"
@@ -31,6 +31,11 @@ static const struct flag_info flag_array[] = {
.set = "present",
.clear = " ",
}, {
+ .mask = _PAGE_COHERENT,
+ .val = _PAGE_COHERENT,
+ .set = "coherent",
+ .clear = " ",
+ }, {
.mask = _PAGE_GUARDED,
.val = _PAGE_GUARDED,
.set = "guarded",
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index dffe1a45b6ed..82b45b1cb973 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -478,7 +478,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* If hint, make sure it matches our alignment restrictions */
if (!fixed && addr) {
- addr = _ALIGN_UP(addr, page_size);
+ addr = ALIGN(addr, page_size);
slice_dbg(" aligned addr=%lx\n", addr);
/* Ignore hint if it's too large or overlaps a VMA */
if (addr > high_limit - len || addr < mmap_min_addr ||
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 0caec3d8d436..df59d0bb121f 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -332,7 +332,7 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
fput(exe_file);
}
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
continue;
@@ -349,13 +349,13 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
*spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
pr_debug("got dcookie for %pD\n", vma->vm_file);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out:
return app_cookie;
fail_no_image_cookie:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
printk(KERN_ERR "SPU_PROF: "
"%s, line %d: Cannot find dcookie for SPU binary\n",
diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index 1ad03c55c88c..e53c3c161257 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -15,6 +15,7 @@
#include <asm/firmware.h>
#include <asm/ptrace.h>
#include <asm/code-patching.h>
+#include <asm/inst.h>
#define PERF_8xx_ID_CPU_CYCLES 1
#define PERF_8xx_ID_HW_INSTRUCTIONS 2
@@ -99,9 +100,6 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags)
unsigned long target = patch_site_addr(&patch__itlbmiss_perf);
patch_branch_site(&patch__itlbmiss_exit_1, target, 0);
-#ifndef CONFIG_PIN_TLB_TEXT
- patch_branch_site(&patch__itlbmiss_exit_2, target, 0);
-#endif
}
val = itlb_miss_counter;
break;
@@ -110,8 +108,6 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags)
unsigned long target = patch_site_addr(&patch__dtlbmiss_perf);
patch_branch_site(&patch__dtlbmiss_exit_1, target, 0);
- patch_branch_site(&patch__dtlbmiss_exit_2, target, 0);
- patch_branch_site(&patch__dtlbmiss_exit_3, target, 0);
}
val = dtlb_miss_counter;
break;
@@ -170,24 +166,19 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
case PERF_8xx_ID_ITLB_LOAD_MISS:
if (atomic_dec_return(&itlb_miss_ref) == 0) {
/* mfspr r10, SPRN_SPRG_SCRATCH0 */
- unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
- __PPC_SPR(SPRN_SPRG_SCRATCH0);
+ struct ppc_inst insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) |
+ __PPC_SPR(SPRN_SPRG_SCRATCH0));
patch_instruction_site(&patch__itlbmiss_exit_1, insn);
-#ifndef CONFIG_PIN_TLB_TEXT
- patch_instruction_site(&patch__itlbmiss_exit_2, insn);
-#endif
}
break;
case PERF_8xx_ID_DTLB_LOAD_MISS:
if (atomic_dec_return(&dtlb_miss_ref) == 0) {
/* mfspr r10, SPRN_DAR */
- unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
- __PPC_SPR(SPRN_DAR);
+ struct ppc_inst insn = ppc_inst(PPC_INST_MFSPR | __PPC_RS(R10) |
+ __PPC_SPR(SPRN_DAR));
patch_instruction_site(&patch__dtlbmiss_exit_1, insn);
- patch_instruction_site(&patch__dtlbmiss_exit_2, insn);
- patch_instruction_site(&patch__dtlbmiss_exit_3, insn);
}
break;
}
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index dd5051015008..6c028ee513c0 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -11,7 +11,6 @@
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c
index 8aa951003141..f7d888d39cd3 100644
--- a/arch/powerpc/perf/callchain_32.c
+++ b/arch/powerpc/perf/callchain_32.c
@@ -11,7 +11,6 @@
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
index df1ffd8b20f2..814d1c2c2b9c 100644
--- a/arch/powerpc/perf/callchain_64.c
+++ b/arch/powerpc/perf/callchain_64.c
@@ -11,7 +11,6 @@
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
@@ -26,43 +25,23 @@
*/
int read_user_stack_slow(void __user *ptr, void *buf, int nb)
{
- int ret = -EFAULT;
- pgd_t *pgdir;
- pte_t *ptep, pte;
- unsigned int shift;
+
unsigned long addr = (unsigned long) ptr;
unsigned long offset;
- unsigned long pfn, flags;
+ struct page *page;
void *kaddr;
- pgdir = current->mm->pgd;
- if (!pgdir)
- return -EFAULT;
+ if (get_user_page_fast_only(addr, FOLL_WRITE, &page)) {
+ kaddr = page_address(page);
+
+ /* align address to page boundary */
+ offset = addr & ~PAGE_MASK;
- local_irq_save(flags);
- ptep = find_current_mm_pte(pgdir, addr, NULL, &shift);
- if (!ptep)
- goto err_out;
- if (!shift)
- shift = PAGE_SHIFT;
-
- /* align address to page boundary */
- offset = addr & ((1UL << shift) - 1);
-
- pte = READ_ONCE(*ptep);
- if (!pte_present(pte) || !pte_user(pte))
- goto err_out;
- pfn = pte_pfn(pte);
- if (!page_is_ram(pfn))
- goto err_out;
-
- /* no highmem to worry about here */
- kaddr = pfn_to_kaddr(pfn);
- memcpy(buf, kaddr + offset, nb);
- ret = 0;
-err_out:
- local_irq_restore(flags);
- return ret;
+ memcpy(buf, kaddr + offset, nb);
+ put_page(page);
+ return 0;
+ }
+ return -EFAULT;
}
static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 3dcfecf858f3..13b9dd5e4a76 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -421,14 +421,14 @@ static __u64 power_pmu_bhrb_to(u64 addr)
if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
return 0;
- return branch_target(&instr);
+ return branch_target((struct ppc_inst *)&instr);
}
/* Userspace: need copy instruction here then translate it */
if (probe_user_read(&instr, (unsigned int __user *)addr, sizeof(instr)))
return 0;
- target = branch_target(&instr);
+ target = branch_target((struct ppc_inst *)&instr);
if ((!target) || (instr & BRANCH_ABSOLUTE))
return target;
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 573e0b309c0c..db213eb7cb02 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -20,6 +20,7 @@
#include <asm/io.h>
#include <linux/byteorder/generic.h>
+#include <asm/rtas.h>
#include "hv-24x7.h"
#include "hv-24x7-catalog.h"
#include "hv-common.h"
@@ -57,6 +58,65 @@ static bool is_physical_domain(unsigned domain)
}
}
+/*
+ * The Processor Module Information system parameter allows transferring
+ * of certain processor module information from the platform to the OS.
+ * Refer PAPR+ document to get parameter token value as '43'.
+ */
+
+#define PROCESSOR_MODULE_INFO 43
+
+static u32 phys_sockets; /* Physical sockets */
+static u32 phys_chipspersocket; /* Physical chips per socket*/
+static u32 phys_coresperchip; /* Physical cores per chip */
+
+/*
+ * read_24x7_sys_info()
+ * Retrieve the number of sockets and chips per socket and cores per
+ * chip details through the get-system-parameter rtas call.
+ */
+void read_24x7_sys_info(void)
+{
+ int call_status, len, ntypes;
+
+ spin_lock(&rtas_data_buf_lock);
+
+ /*
+ * Making system parameter: chips and sockets and cores per chip
+ * default to 1.
+ */
+ phys_sockets = 1;
+ phys_chipspersocket = 1;
+ phys_coresperchip = 1;
+
+ call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
+ NULL,
+ PROCESSOR_MODULE_INFO,
+ __pa(rtas_data_buf),
+ RTAS_DATA_BUF_SIZE);
+
+ if (call_status != 0) {
+ pr_err("Error calling get-system-parameter %d\n",
+ call_status);
+ } else {
+ len = be16_to_cpup((__be16 *)&rtas_data_buf[0]);
+ if (len < 8)
+ goto out;
+
+ ntypes = be16_to_cpup((__be16 *)&rtas_data_buf[2]);
+
+ if (!ntypes)
+ goto out;
+
+ phys_sockets = be16_to_cpup((__be16 *)&rtas_data_buf[4]);
+ phys_chipspersocket = be16_to_cpup((__be16 *)&rtas_data_buf[6]);
+ phys_coresperchip = be16_to_cpup((__be16 *)&rtas_data_buf[8]);
+ }
+
+out:
+ spin_unlock(&rtas_data_buf_lock);
+}
+
/* Domains for which more than one result element are returned for each event. */
static bool domain_needs_aggregation(unsigned int domain)
{
@@ -386,6 +446,24 @@ static ssize_t device_show_string(struct device *dev,
return sprintf(buf, "%s\n", (char *)d->var);
}
+static ssize_t sockets_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", phys_sockets);
+}
+
+static ssize_t chipspersocket_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", phys_chipspersocket);
+}
+
+static ssize_t coresperchip_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", phys_coresperchip);
+}
+
static struct attribute *device_str_attr_create_(char *name, char *str)
{
struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL);
@@ -1032,6 +1110,9 @@ PAGE_0_ATTR(catalog_len, "%lld\n",
(unsigned long long)be32_to_cpu(page_0->length) * 4096);
static BIN_ATTR_RO(catalog, 0/* real length varies */);
static DEVICE_ATTR_RO(domains);
+static DEVICE_ATTR_RO(sockets);
+static DEVICE_ATTR_RO(chipspersocket);
+static DEVICE_ATTR_RO(coresperchip);
static struct bin_attribute *if_bin_attrs[] = {
&bin_attr_catalog,
@@ -1042,6 +1123,9 @@ static struct attribute *if_attrs[] = {
&dev_attr_catalog_len.attr,
&dev_attr_catalog_version.attr,
&dev_attr_domains.attr,
+ &dev_attr_sockets.attr,
+ &dev_attr_chipspersocket.attr,
+ &dev_attr_coresperchip.attr,
NULL,
};
@@ -1400,16 +1484,6 @@ static void h_24x7_event_read(struct perf_event *event)
h24x7hw = &get_cpu_var(hv_24x7_hw);
h24x7hw->events[i] = event;
put_cpu_var(h24x7hw);
- /*
- * Clear the event count so we can compute the _change_
- * in the 24x7 raw counter value at the end of the txn.
- *
- * Note that we could alternatively read the 24x7 value
- * now and save its value in event->hw.prev_count. But
- * that would require issuing a hcall, which would then
- * defeat the purpose of using the txn interface.
- */
- local64_set(&event->count, 0);
}
put_cpu_var(hv_24x7_reqb);
@@ -1615,6 +1689,8 @@ static int hv_24x7_init(void)
if (r)
return r;
+ read_24x7_sys_info();
+
return 0;
}
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index 6da813b65b42..e3e5217c9822 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -7,14 +7,6 @@ config ACADIA
help
This option enables support for the AMCC 405EZ Acadia evaluation board.
-config EP405
- bool "EP405/EP405PC"
- depends on 40x
- select 405GP
- select FORCE_PCI
- help
- This option enables support for the EP405/EP405PC boards.
-
config HOTFOOT
bool "Hotfoot"
depends on 40x
@@ -45,33 +37,6 @@ config MAKALU
help
This option enables support for the AMCC PPC405EX board.
-config WALNUT
- bool "Walnut"
- depends on 40x
- default y
- select 405GP
- select FORCE_PCI
- select OF_RTC
- help
- This option enables support for the IBM PPC405GP evaluation board.
-
-config XILINX_VIRTEX_GENERIC_BOARD
- bool "Generic Xilinx Virtex board"
- depends on 40x
- select XILINX_VIRTEX_II_PRO
- select XILINX_VIRTEX_4_FX
- select XILINX_INTC
- help
- This option enables generic support for Xilinx Virtex based boards.
-
- The generic virtex board support matches any device tree which
- specifies 'xilinx,virtex' in its compatible field. This includes
- the Xilinx ML3xx and ML4xx reference designs using the powerpc
- core.
-
- Most Virtex designs should use this unless it needs to do some
- special configuration at board probe time.
-
config OBS600
bool "OpenBlockS 600"
depends on 40x
@@ -86,18 +51,6 @@ config PPC40x_SIMPLE
help
This option enables the simple PowerPC 40x platform support.
-# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
-config 403GCX
- bool
- #depends on OAK
- select IBM405_ERR51
-
-config 405GP
- bool
- select IBM405_ERR77
- select IBM405_ERR51
- select IBM_EMAC_ZMII if IBM_EMAC
-
config 405EX
bool
select IBM_EMAC_EMAC4 if IBM_EMAC
@@ -109,25 +62,6 @@ config 405EZ
select IBM_EMAC_MAL_CLR_ICINTSTAT if IBM_EMAC
select IBM_EMAC_MAL_COMMON_ERR if IBM_EMAC
-config XILINX_VIRTEX
- bool
- select DEFAULT_UIMAGE
-
-config XILINX_VIRTEX_II_PRO
- bool
- select XILINX_VIRTEX
- select IBM405_ERR77
- select IBM405_ERR51
-
-config XILINX_VIRTEX_4_FX
- bool
- select XILINX_VIRTEX
-
-config STB03xxx
- bool
- select IBM405_ERR77
- select IBM405_ERR51
-
config PPC4xx_GPIO
bool "PPC4xx GPIO support"
depends on 40x
@@ -135,16 +69,6 @@ config PPC4xx_GPIO
help
Enable gpiolib support for ppc40x based boards
-# 40x errata/workaround config symbols, selected by the CPU models above
-
-# All 405-based cores up until the 405GPR and 405EP have this errata.
-config IBM405_ERR77
- bool
-
-# All 40x-based cores, up until the 405GPR and 405EP have this errata.
-config IBM405_ERR51
- bool
-
config APM8018X
bool "APM8018X"
depends on 40x
diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile
index 828d78340dd9..122de98527c4 100644
--- a/arch/powerpc/platforms/40x/Makefile
+++ b/arch/powerpc/platforms/40x/Makefile
@@ -1,5 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_WALNUT) += walnut.o
-obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o
-obj-$(CONFIG_EP405) += ep405.o
obj-$(CONFIG_PPC40x_SIMPLE) += ppc40x_simple.o
diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c
deleted file mode 100644
index 1c8aec6e9bb7..000000000000
--- a/arch/powerpc/platforms/40x/ep405.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Architecture- / platform-specific boot-time initialization code for
- * IBM PowerPC 4xx based boards. Adapted from original
- * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
- * <dan@net4x.com>.
- *
- * Copyright(c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
- *
- * Rewritten and ported to the merged powerpc tree:
- * Copyright 2007 IBM Corporation
- * Josh Boyer <jwboyer@linux.vnet.ibm.com>
- *
- * Adapted to EP405 by Ben. Herrenschmidt <benh@kernel.crashing.org>
- *
- * TODO: Wire up the PCI IRQ mux and the southbridge interrupts
- *
- * 2002 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/time.h>
-#include <asm/uic.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc4xx.h>
-
-static struct device_node *bcsr_node;
-static void __iomem *bcsr_regs;
-
-/* BCSR registers */
-#define BCSR_ID 0
-#define BCSR_PCI_CTRL 1
-#define BCSR_FLASH_NV_POR_CTRL 2
-#define BCSR_FENET_UART_CTRL 3
-#define BCSR_PCI_IRQ 4
-#define BCSR_XIRQ_SELECT 5
-#define BCSR_XIRQ_ROUTING 6
-#define BCSR_XIRQ_STATUS 7
-#define BCSR_XIRQ_STATUS2 8
-#define BCSR_SW_STAT_LED_CTRL 9
-#define BCSR_GPIO_IRQ_PAR_CTRL 10
-/* there's more, can't be bothered typing them tho */
-
-
-static const struct of_device_id ep405_of_bus[] __initconst = {
- { .compatible = "ibm,plb3", },
- { .compatible = "ibm,opb", },
- { .compatible = "ibm,ebc", },
- {},
-};
-
-static int __init ep405_device_probe(void)
-{
- of_platform_bus_probe(NULL, ep405_of_bus, NULL);
-
- return 0;
-}
-machine_device_initcall(ep405, ep405_device_probe);
-
-static void __init ep405_init_bcsr(void)
-{
- const u8 *irq_routing;
- int i;
-
- /* Find the bloody thing & map it */
- bcsr_node = of_find_compatible_node(NULL, NULL, "ep405-bcsr");
- if (bcsr_node == NULL) {
- printk(KERN_ERR "EP405 BCSR not found !\n");
- return;
- }
- bcsr_regs = of_iomap(bcsr_node, 0);
- if (bcsr_regs == NULL) {
- printk(KERN_ERR "EP405 BCSR failed to map !\n");
- return;
- }
-
- /* Get the irq-routing property and apply the routing to the CPLD */
- irq_routing = of_get_property(bcsr_node, "irq-routing", NULL);
- if (irq_routing == NULL)
- return;
- for (i = 0; i < 16; i++) {
- u8 irq = irq_routing[i];
- out_8(bcsr_regs + BCSR_XIRQ_SELECT, i);
- out_8(bcsr_regs + BCSR_XIRQ_ROUTING, irq);
- }
- in_8(bcsr_regs + BCSR_XIRQ_SELECT);
- mb();
- out_8(bcsr_regs + BCSR_GPIO_IRQ_PAR_CTRL, 0xfe);
-}
-
-static void __init ep405_setup_arch(void)
-{
- /* Find & init the BCSR CPLD */
- ep405_init_bcsr();
-
- pci_set_flags(PCI_REASSIGN_ALL_RSRC);
-}
-
-static int __init ep405_probe(void)
-{
- if (!of_machine_is_compatible("ep405"))
- return 0;
-
- return 1;
-}
-
-define_machine(ep405) {
- .name = "EP405",
- .probe = ep405_probe,
- .setup_arch = ep405_setup_arch,
- .progress = udbg_progress,
- .init_IRQ = uic_init_tree,
- .get_irq = uic_get_irq,
- .restart = ppc4xx_reset_system,
- .calibrate_decr = generic_calibrate_decr,
-};
diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c
deleted file mode 100644
index e3d5e095846b..000000000000
--- a/arch/powerpc/platforms/40x/virtex.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Xilinx Virtex (IIpro & 4FX) based board support
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/time.h>
-#include <asm/xilinx_intc.h>
-#include <asm/xilinx_pci.h>
-#include <asm/ppc4xx.h>
-
-static const struct of_device_id xilinx_of_bus_ids[] __initconst = {
- { .compatible = "xlnx,plb-v46-1.00.a", },
- { .compatible = "xlnx,plb-v34-1.01.a", },
- { .compatible = "xlnx,plb-v34-1.02.a", },
- { .compatible = "xlnx,opb-v20-1.10.c", },
- { .compatible = "xlnx,dcr-v29-1.00.a", },
- { .compatible = "xlnx,compound", },
- {}
-};
-
-static int __init virtex_device_probe(void)
-{
- of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(virtex, virtex_device_probe);
-
-static int __init virtex_probe(void)
-{
- if (!of_machine_is_compatible("xlnx,virtex"))
- return 0;
-
- return 1;
-}
-
-define_machine(virtex) {
- .name = "Xilinx Virtex",
- .probe = virtex_probe,
- .setup_arch = xilinx_pci_init,
- .init_IRQ = xilinx_intc_init_tree,
- .get_irq = xintc_get_irq,
- .restart = ppc4xx_reset_system,
- .calibrate_decr = generic_calibrate_decr,
-};
diff --git a/arch/powerpc/platforms/40x/walnut.c b/arch/powerpc/platforms/40x/walnut.c
deleted file mode 100644
index e5797815e2f1..000000000000
--- a/arch/powerpc/platforms/40x/walnut.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Architecture- / platform-specific boot-time initialization code for
- * IBM PowerPC 4xx based boards. Adapted from original
- * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
- * <dan@net4x.com>.
- *
- * Copyright(c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
- *
- * Rewritten and ported to the merged powerpc tree:
- * Copyright 2007 IBM Corporation
- * Josh Boyer <jwboyer@linux.vnet.ibm.com>
- *
- * 2002 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <linux/rtc.h>
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/time.h>
-#include <asm/uic.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc4xx.h>
-
-static const struct of_device_id walnut_of_bus[] __initconst = {
- { .compatible = "ibm,plb3", },
- { .compatible = "ibm,opb", },
- { .compatible = "ibm,ebc", },
- {},
-};
-
-static int __init walnut_device_probe(void)
-{
- of_platform_bus_probe(NULL, walnut_of_bus, NULL);
- of_instantiate_rtc();
-
- return 0;
-}
-machine_device_initcall(walnut, walnut_device_probe);
-
-static int __init walnut_probe(void)
-{
- if (!of_machine_is_compatible("ibm,walnut"))
- return 0;
-
- pci_set_flags(PCI_REASSIGN_ALL_RSRC);
-
- return 1;
-}
-
-define_machine(walnut) {
- .name = "Walnut",
- .probe = walnut_probe,
- .progress = udbg_progress,
- .init_IRQ = uic_init_tree,
- .get_irq = uic_get_irq,
- .restart = ppc4xx_reset_system,
- .calibrate_decr = generic_calibrate_decr,
-};
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 25ebe634a661..78ac6d67a935 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -167,8 +167,7 @@ config YOSEMITE
config ISS4xx
bool "ISS 4xx Simulator"
- depends on (44x || 40x)
- select 405GP if 40x
+ depends on 44x
select 440GP if 44x && !PPC_47x
select PPC_FPU
select OF_RTC
@@ -232,33 +231,6 @@ config ICON
help
This option enables support for the AMCC PPC440SPe evaluation board.
-config XILINX_VIRTEX440_GENERIC_BOARD
- bool "Generic Xilinx Virtex 5 FXT board support"
- depends on 44x
- select XILINX_VIRTEX_5_FXT
- select XILINX_INTC
- help
- This option enables generic support for Xilinx Virtex based boards
- that use a 440 based processor in the Virtex 5 FXT FPGA architecture.
-
- The generic virtex board support matches any device tree which
- specifies 'xlnx,virtex440' in its compatible field. This includes
- the Xilinx ML5xx reference designs using the powerpc core.
-
- Most Virtex 5 designs should use this unless it needs to do some
- special configuration at board probe time.
-
-config XILINX_ML510
- bool "Xilinx ML510 extra support"
- depends on XILINX_VIRTEX440_GENERIC_BOARD
- select HAVE_PCI
- select XILINX_PCI if PCI
- select PPC_INDIRECT_PCI if PCI
- select PPC_I8259 if PCI
- help
- This option enables extra support for features on the Xilinx ML510
- board. The ML510 has a PCI bus with ALI south bridge.
-
config PPC44x_SIMPLE
bool "Simple PowerPC 44x board support"
depends on 44x
@@ -354,13 +326,3 @@ config 476FPE_ERR46
config IBM440EP_ERR42
bool
-# Xilinx specific config options.
-config XILINX_VIRTEX
- bool
- select DEFAULT_UIMAGE
-
-# Xilinx Virtex 5 FXT FPGA architecture, selected by a Xilinx board above
-config XILINX_VIRTEX_5_FXT
- bool
- select XILINX_VIRTEX
-
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 1b78c6af821a..5ba031f57652 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -7,8 +7,6 @@ obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o
obj-$(CONFIG_EBONY) += ebony.o
obj-$(CONFIG_SAM440EP) += sam440ep.o
obj-$(CONFIG_WARP) += warp.o
-obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
-obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
obj-$(CONFIG_ISS4xx) += iss4xx.o
obj-$(CONFIG_CANYONLANDS)+= canyonlands.o
obj-$(CONFIG_CURRITUCK) += ppc476.o
diff --git a/arch/powerpc/platforms/44x/virtex.c b/arch/powerpc/platforms/44x/virtex.c
deleted file mode 100644
index 3eb13ed926ee..000000000000
--- a/arch/powerpc/platforms/44x/virtex.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Xilinx Virtex 5FXT based board support, derived from
- * the Xilinx Virtex (IIpro & 4FX) based board support
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- * Copyright 2008 Xilinx, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/time.h>
-#include <asm/xilinx_intc.h>
-#include <asm/xilinx_pci.h>
-#include <asm/reg.h>
-#include <asm/ppc4xx.h>
-#include "44x.h"
-
-static const struct of_device_id xilinx_of_bus_ids[] __initconst = {
- { .compatible = "simple-bus", },
- { .compatible = "xlnx,plb-v46-1.00.a", },
- { .compatible = "xlnx,plb-v46-1.02.a", },
- { .compatible = "xlnx,plb-v34-1.01.a", },
- { .compatible = "xlnx,plb-v34-1.02.a", },
- { .compatible = "xlnx,opb-v20-1.10.c", },
- { .compatible = "xlnx,dcr-v29-1.00.a", },
- { .compatible = "xlnx,compound", },
- {}
-};
-
-static int __init virtex_device_probe(void)
-{
- of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(virtex, virtex_device_probe);
-
-static int __init virtex_probe(void)
-{
- if (!of_machine_is_compatible("xlnx,virtex440"))
- return 0;
-
- return 1;
-}
-
-define_machine(virtex) {
- .name = "Xilinx Virtex440",
- .probe = virtex_probe,
- .setup_arch = xilinx_pci_init,
- .init_IRQ = xilinx_intc_init_tree,
- .get_irq = xintc_get_irq,
- .calibrate_decr = generic_calibrate_decr,
- .restart = ppc4xx_reset_system,
-};
diff --git a/arch/powerpc/platforms/44x/virtex_ml510.c b/arch/powerpc/platforms/44x/virtex_ml510.c
deleted file mode 100644
index 349f218b335c..000000000000
--- a/arch/powerpc/platforms/44x/virtex_ml510.c
+++ /dev/null
@@ -1,30 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <asm/i8259.h>
-#include <linux/pci.h>
-#include "44x.h"
-
-/**
- * ml510_ail_quirk
- */
-static void ml510_ali_quirk(struct pci_dev *dev)
-{
- /* Enable the IDE controller */
- pci_write_config_byte(dev, 0x58, 0x4c);
- /* Assign irq 14 to the primary ide channel */
- pci_write_config_byte(dev, 0x44, 0x0d);
- /* Assign irq 15 to the secondary ide channel */
- pci_write_config_byte(dev, 0x75, 0x0f);
- /* Set the ide controller in native mode */
- pci_write_config_byte(dev, 0x09, 0xff);
-
- /* INTB = disabled, INTA = disabled */
- pci_write_config_byte(dev, 0x48, 0x00);
- /* INTD = disabled, INTC = disabled */
- pci_write_config_byte(dev, 0x4a, 0x00);
- /* Audio = INT7, Modem = disabled. */
- pci_write_config_byte(dev, 0x4b, 0x60);
- /* USB = INT7 */
- pci_write_config_byte(dev, 0x74, 0x06);
-}
-DECLARE_PCI_FIXUP_EARLY(0x10b9, 0x1533, ml510_ali_quirk);
-
diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c
index e6e2adcc7b64..c13d64c3b019 100644
--- a/arch/powerpc/platforms/4xx/pci.c
+++ b/arch/powerpc/platforms/4xx/pci.c
@@ -1242,7 +1242,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
if (mbase == NULL) {
printk(KERN_ERR "%pOF: Can't map internal config space !",
port->node);
- goto done;
+ return;
}
while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
@@ -1252,9 +1252,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
}
if (attempt)
port->link = 1;
-done:
iounmap(mbase);
-
}
static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
index 3a9969c429b3..70083649c9ea 100644
--- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
+++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
@@ -248,6 +248,7 @@ mmu_on:
blr
+_ASM_NOKPROBE_SYMBOL(lite5200_wakeup)
/* ---------------------------------------------------------------------- */
@@ -391,6 +392,7 @@ restore_regs:
LOAD_SPRN(TBWU, 0x5b);
blr
+_ASM_NOKPROBE_SYMBOL(restore_regs)
diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c
index 1cdd5ed9d896..3b5cb39a564c 100644
--- a/arch/powerpc/platforms/82xx/pq2.c
+++ b/arch/powerpc/platforms/82xx/pq2.c
@@ -10,6 +10,8 @@
* Copyright (c) 2006 MontaVista Software, Inc.
*/
+#include <linux/kprobes.h>
+
#include <asm/cpm2.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
@@ -29,6 +31,7 @@ void __noreturn pq2_restart(char *cmd)
panic("Restart failed\n");
}
+NOKPROBE_SYMBOL(pq2_restart)
#ifdef CONFIG_PCI
static int pq2_pci_exclude_device(struct pci_controller *hose,
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
index 3acd7470dc5e..bc6bd4d0ae96 100644
--- a/arch/powerpc/platforms/83xx/suspend-asm.S
+++ b/arch/powerpc/platforms/83xx/suspend-asm.S
@@ -548,3 +548,4 @@ mpc83xx_deep_resume:
mtdec r0
rfi
+_ASM_NOKPROBE_SYMBOL(mpc83xx_deep_resume)
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 27ac38f7e1a9..6aa8defb5857 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -12,11 +12,11 @@
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/pgtable.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 915ab6710b93..172d2b7cfeb7 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -22,8 +22,8 @@
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
#include <linux/of_platform.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <linux/atomic.h>
#include <asm/time.h>
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index a43b8c30157c..a4127b0b161f 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -13,8 +13,8 @@
#include <linux/kernel.h>
#include <linux/of_fdt.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index dd97ef277276..e4acf5ce6b07 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -24,8 +24,8 @@
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
#include <linux/of_platform.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <linux/atomic.h>
#include <asm/time.h>
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 48f7d96ae37d..fda108bae95f 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -16,9 +16,9 @@
#include <linux/highmem.h>
#include <linux/cpu.h>
#include <linux/fsl/guts.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mpic.h>
#include <asm/cacheflush.h>
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index 5b91ea5694e3..87f524e4b09c 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -10,13 +10,14 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/pgtable.h>
#include <asm/code-patching.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
#include <asm/cacheflush.h>
+#include <asm/inst.h>
#include <sysdev/fsl_soc.h>
@@ -72,7 +73,7 @@ smp_86xx_kick_cpu(int nr)
/* Setup fake reset vector to call __secondary_start_mpc86xx. */
target = (unsigned long) __secondary_start_mpc86xx;
- patch_branch(vector, target, BRANCH_SET_LINK);
+ patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK);
/* Kick that CPU */
smp_86xx_release_core(nr);
@@ -82,7 +83,7 @@ smp_86xx_kick_cpu(int nr)
mdelay(1);
/* Restore the exception vector */
- patch_instruction(vector, save_vector);
+ patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector));
local_irq_restore(flags);
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index e0fe670f06f6..abb2b45b2789 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -98,15 +98,6 @@ menu "MPC8xx CPM Options"
# 8xx specific questions.
comment "Generic MPC8xx Options"
-config 8xx_COPYBACK
- bool "Copy-Back Data Cache (else Writethrough)"
- help
- Saying Y here will cause the cache on an MPC8xx processor to be used
- in Copy-Back mode. If you say N here, it is used in Writethrough
- mode.
-
- If in doubt, say Y here.
-
config 8xx_GPIO
bool "GPIO API Support"
select GPIOLIB
@@ -171,4 +162,45 @@ config UCODE_PATCH
default y
depends on !NO_UCODE_PATCH
+menu "8xx advanced setup"
+ depends on PPC_8xx
+
+config PIN_TLB
+ bool "Pinned Kernel TLBs"
+ depends on ADVANCED_OPTIONS
+ help
+ On the 8xx, we have 32 instruction TLBs and 32 data TLBs. In each
+ table 4 TLBs can be pinned.
+
+ It reduces the amount of usable TLBs to 28 (ie by 12%). That's the
+ reason why we make it selectable.
+
+ This option does nothing, it just activate the selection of what
+ to pin.
+
+config PIN_TLB_DATA
+ bool "Pinned TLB for DATA"
+ depends on PIN_TLB
+ default y
+ help
+ This pins the first 32 Mbytes of memory with 8M pages.
+
+config PIN_TLB_IMMR
+ bool "Pinned TLB for IMMR"
+ depends on PIN_TLB
+ default y
+ help
+ This pins the IMMR area with a 512kbytes page. In case
+ CONFIG_PIN_TLB_DATA is also selected, it will reduce
+ CONFIG_PIN_TLB_DATA to 24 Mbytes.
+
+config PIN_TLB_TEXT
+ bool "Pinned TLB for TEXT"
+ depends on PIN_TLB
+ default y
+ help
+ This pins kernel text with 8M pages.
+
+endmenu
+
endmenu
diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
index 4db4ca2e1222..c58b6f1c40e3 100644
--- a/arch/powerpc/platforms/8xx/cpm1.c
+++ b/arch/powerpc/platforms/8xx/cpm1.c
@@ -34,7 +34,6 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/8xx_immap.h>
#include <asm/cpm1.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/8xx/micropatch.c b/arch/powerpc/platforms/8xx/micropatch.c
index c80bd7afd6c5..aed4bc75f352 100644
--- a/arch/powerpc/platforms/8xx/micropatch.c
+++ b/arch/powerpc/platforms/8xx/micropatch.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/8xx_immap.h>
#include <asm/cpm.h>
#include <asm/cpm1.h>
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 1f8025383caa..5e6479d409a0 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -317,8 +317,4 @@ config MCU_MPC8349EMITX
also register MCU GPIOs with the generic GPIO API, so you'll able
to use MCU pins as GPIOs.
-config XILINX_PCI
- bool "Xilinx PCI host bridge support"
- depends on PCI && XILINX_VIRTEX
-
endmenu
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 27a81c291be8..d349603fb889 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -55,8 +55,8 @@ config PPC_8xx
select SYS_SUPPORTS_HUGETLBFS
select PPC_HAVE_KUEP
select PPC_HAVE_KUAP
- select PPC_MM_SLICES if HUGETLB_PAGE
select HAVE_ARCH_VMAP_STACK
+ select HUGETLBFS
config 40x
bool "AMCC 40x"
@@ -377,7 +377,7 @@ config PPC_HAVE_KUEP
config PPC_KUEP
bool "Kernel Userspace Execution Prevention"
depends on PPC_HAVE_KUEP
- default y
+ default y if !PPC_BOOK3S_32
help
Enable support for Kernel Userspace Execution Prevention (KUEP)
@@ -389,7 +389,7 @@ config PPC_HAVE_KUAP
config PPC_KUAP
bool "Kernel Userspace Access Protection"
depends on PPC_HAVE_KUAP
- default y
+ default y if !PPC_BOOK3S_32
help
Enable support for Kernel Userspace Access Protection (KUAP)
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 0be212a27254..c2a0678d85db 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -12,9 +12,9 @@
#include <linux/export.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/cell-regs.h>
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 5927ead4aed2..c0ab62ba6f16 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -23,9 +23,9 @@
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/kernel_stat.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index ca9ffc1c8685..2124831cf57c 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -943,7 +943,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
fbase = max(fbase, dbase + dsize);
}
- fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
+ fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT);
fsize = memblock_phys_mem_size();
if ((fbase + fsize) <= 0x800000000ul)
@@ -963,8 +963,8 @@ static int __init cell_iommu_fixed_mapping_init(void)
hend = hbase + htab_size_bytes;
/* The window must start and end on a segment boundary */
- if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) ||
- (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) {
+ if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) ||
+ (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) {
pr_debug("iommu: hash window not segment aligned\n");
return -1;
}
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 6af3a6e600a7..9068edef71f7 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -15,11 +15,11 @@
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/kallsyms.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/reg.h>
#include <asm/cell-regs.h>
#include <asm/cpu_has_feature.h>
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 855eedb8d7d7..edefa785d2ef 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -31,7 +31,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index 85d795d96a27..c855a0aeb49c 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -21,12 +21,12 @@
#include <linux/err.h>
#include <linux/device.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 026b72c0a452..210785f59271 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -10,8 +10,8 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/ioport.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index e44427c24585..62d90a5e23d1 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -325,7 +325,7 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
return VM_FAULT_SIGBUS;
/*
- * Because we release the mmap_sem, the context may be destroyed while
+ * Because we release the mmap_lock, the context may be destroyed while
* we're in spu_wait. Grab an extra reference so it isn't destroyed
* in the meantime.
*/
@@ -334,8 +334,8 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
/*
* We have to wait for context to be loaded before we have
* pages to hand out to the user, but we don't want to wait
- * with the mmap_sem held.
- * It is possible to drop the mmap_sem here, but then we need
+ * with the mmap_lock held.
+ * It is possible to drop the mmap_lock here, but then we need
* to return VM_FAULT_NOPAGE because the mappings may have
* hanged.
*/
@@ -343,11 +343,11 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
goto refault;
if (ctx->state == SPU_STATE_SAVED) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
} else {
area = ctx->spu->problem_phys + ps_offs;
ret = vmf_insert_pfn(vmf->vma, vmf->address,
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index b020c757d2bf..b2c2bf35b76c 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -8,9 +8,9 @@
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 65a7e01a8f7d..c45435aa5e36 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -34,7 +34,6 @@
#include <linux/timer.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/dma.h>
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index f7bb6cb8d1e3..e30cd2915e54 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -16,12 +16,12 @@
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 67e48b0a164e..a802ef957d63 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -172,19 +172,6 @@ static void wii_shutdown(void)
flipper_quiesce();
}
-define_machine(wii) {
- .name = "wii",
- .probe = wii_probe,
- .setup_arch = wii_setup_arch,
- .restart = wii_restart,
- .halt = wii_halt,
- .init_IRQ = wii_pic_probe,
- .get_irq = flipper_pic_get_irq,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
- .machine_shutdown = wii_shutdown,
-};
-
static const struct of_device_id wii_of_bus[] = {
{ .compatible = "nintendo,hollywood", },
{ },
@@ -200,3 +187,15 @@ static int __init wii_device_probe(void)
}
device_initcall(wii_device_probe);
+define_machine(wii) {
+ .name = "wii",
+ .probe = wii_probe,
+ .setup_arch = wii_setup_arch,
+ .restart = wii_restart,
+ .halt = wii_halt,
+ .init_IRQ = wii_pic_probe,
+ .get_irq = flipper_pic_get_irq,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+ .machine_shutdown = wii_shutdown,
+};
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 15b2c6eb506d..f7e66a2005b4 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -42,7 +42,6 @@
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c
index 701c4e098fe9..78209bb7629c 100644
--- a/arch/powerpc/platforms/maple/time.c
+++ b/arch/powerpc/platforms/maple/time.c
@@ -23,7 +23,6 @@
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/time.h>
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index af309ee99114..9d4ecd292255 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -108,7 +108,7 @@ static void * __init bootx_early_getprop(unsigned long base,
#define dt_push_token(token, mem) \
do { \
- *(mem) = _ALIGN_UP(*(mem),4); \
+ *(mem) = ALIGN(*(mem),4); \
*((u32 *)*(mem)) = token; \
*(mem) += 4; \
} while(0)
@@ -150,7 +150,7 @@ static void __init bootx_dt_add_prop(char *name, void *data, int size,
/* push property content */
if (size && data) {
memcpy((void *)*mem_end, data, size);
- *mem_end = _ALIGN_UP(*mem_end + size, 4);
+ *mem_end = ALIGN(*mem_end + size, 4);
}
}
@@ -303,7 +303,7 @@ static void __init bootx_scan_dt_build_struct(unsigned long base,
*lp++ = *p;
}
*lp = 0;
- *mem_end = _ALIGN_UP((unsigned long)lp + 1, 4);
+ *mem_end = ALIGN((unsigned long)lp + 1, 4);
/* get and store all properties */
while (*ppp) {
@@ -356,11 +356,11 @@ static unsigned long __init bootx_flatten_dt(unsigned long start)
/* Start using memory after the big blob passed by BootX, get
* some space for the header
*/
- mem_start = mem_end = _ALIGN_UP(((unsigned long)bi) + start, 4);
+ mem_start = mem_end = ALIGN(((unsigned long)bi) + start, 4);
DBG("Boot params header at: %x\n", mem_start);
hdr = (struct boot_param_header *)mem_start;
mem_end += sizeof(struct boot_param_header);
- rsvmap = (u64 *)(_ALIGN_UP(mem_end, 8));
+ rsvmap = (u64 *)(ALIGN(mem_end, 8));
hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - mem_start;
mem_end = ((unsigned long)rsvmap) + 8 * sizeof(u64);
@@ -386,7 +386,7 @@ static unsigned long __init bootx_flatten_dt(unsigned long start)
hdr->dt_strings_size = bootx_dt_strend - bootx_dt_strbase;
/* Build structure */
- mem_end = _ALIGN(mem_end, 16);
+ mem_end = ALIGN(mem_end, 16);
DBG("Building device tree structure at: %x\n", mem_end);
hdr->off_dt_struct = mem_end - mem_start;
bootx_scan_dt_build_struct(base, 4, &mem_end);
@@ -404,7 +404,7 @@ static unsigned long __init bootx_flatten_dt(unsigned long start)
* also bump mem_reserve_cnt to cause further reservations to
* fail since it's too late.
*/
- mem_end = _ALIGN(mem_end, PAGE_SIZE);
+ mem_end = ALIGN(mem_end, PAGE_SIZE);
DBG("End of boot params: %x\n", mem_end);
rsvmap[0] = mem_start;
rsvmap[1] = mem_end;
diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S
index da69e0fcb4f1..ced225415486 100644
--- a/arch/powerpc/platforms/powermac/cache.S
+++ b/arch/powerpc/platforms/powermac/cache.S
@@ -184,6 +184,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtlr r10
blr
+_ASM_NOKPROBE_SYMBOL(flush_disable_75x)
/* This code is for 745x processors */
flush_disable_745x:
@@ -351,4 +352,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
mtmsr r11 /* restore DR and EE */
isync
blr
+_ASM_NOKPROBE_SYMBOL(flush_disable_745x)
#endif /* CONFIG_PPC_BOOK3S_32 */
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index dc7a5bae8f1c..853ccc4480e2 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -55,7 +55,7 @@ struct chrp_header {
u8 cksum;
u16 len;
char name[12];
- u8 data[0];
+ u8 data[];
};
struct core99_header {
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 95fb4feb6ccc..f002b0fa69b8 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -51,7 +51,6 @@
#include <asm/reg.h>
#include <asm/sections.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/ohare.h>
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index bd6085b470b7..f9a680fdd9c4 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -244,7 +244,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
mtmsr r2
isync
b 1b
-
+_ASM_NOKPROBE_SYMBOL(low_cpu_die)
/*
* Here is the resume code.
*/
@@ -282,6 +282,7 @@ _GLOBAL(core99_wake_up)
lwz r1,0(r3)
/* Pass thru to older resume code ... */
+_ASM_NOKPROBE_SYMBOL(core99_wake_up)
/*
* Here is the resume code for older machines.
* r1 has the physical address of SL_PC(sp).
@@ -429,6 +430,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
lwz r0,4(r1)
mtlr r0
blr
+_ASM_NOKPROBE_SYMBOL(grackle_wake_up)
turn_on_mmu:
mflr r4
@@ -438,6 +440,7 @@ turn_on_mmu:
sync
isync
rfi
+_ASM_NOKPROBE_SYMBOL(turn_on_mmu)
#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index be2ab5b11e57..eb23264910e1 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -30,13 +30,13 @@
#include <linux/hardirq.h>
#include <linux/cpu.h>
#include <linux/compiler.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/code-patching.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/prom.h>
@@ -49,6 +49,7 @@
#include <asm/keylargo.h>
#include <asm/pmac_low_i2c.h>
#include <asm/pmac_pfunc.h>
+#include <asm/inst.h>
#include "pmac.h"
@@ -813,7 +814,7 @@ static int smp_core99_kick_cpu(int nr)
* b __secondary_start_pmac_0 + nr*8
*/
target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
- patch_branch(vector, target, BRANCH_SET_LINK);
+ patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK);
/* Put some life in our friend */
pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
@@ -826,7 +827,7 @@ static int smp_core99_kick_cpu(int nr)
mdelay(1);
/* Restore our exception vector */
- patch_instruction(vector, save_vector);
+ patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector));
local_irq_restore(flags);
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index b36ddee17c87..31d6213a6c8f 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -28,7 +28,6 @@
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/nvram.h>
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index c0f8120045c3..fe3f0fb5aeca 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_OPAL_PRD) += opal-prd.o
obj-$(CONFIG_PERF_EVENTS) += opal-imc.o
obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o
-obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o
+obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o vas-fault.o vas-api.o
obj-$(CONFIG_OCXL_BASE) += ocxl.o
obj-$(CONFIG_SCOM_DEBUGFS) += opal-xscom.o
obj-$(CONFIG_PPC_SECURE_BOOT) += opal-secvar.o
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 78599bca66c2..2dd467383a88 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -1270,7 +1270,7 @@ static int pnv_parse_cpuidle_dt(void)
/* Read residencies */
if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns",
temp_u32, nr_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n");
rc = -EINVAL;
goto out;
}
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index b95b9e3c4c98..abeaa533b976 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -15,6 +15,7 @@
#include <asm/debugfs.h>
#include <asm/powernv.h>
+#include <asm/ppc-pci.h>
#include <asm/opal.h>
#include "pci.h"
@@ -425,9 +426,10 @@ static void pnv_comp_attach_table_group(struct npu_comp *npucomp,
++npucomp->pe_num;
}
-struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
+static struct iommu_table_group *
+ pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
{
- struct iommu_table_group *table_group;
+ struct iommu_table_group *compound_group;
struct npu_comp *npucomp;
struct pci_dev *gpdev = NULL;
struct pci_controller *hose;
@@ -446,39 +448,52 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
hose = pci_bus_to_host(npdev->bus);
if (hose->npu) {
- table_group = &hose->npu->npucomp.table_group;
-
- if (!table_group->group) {
- table_group->ops = &pnv_npu_peers_ops;
- iommu_register_group(table_group,
- hose->global_number,
- pe->pe_number);
- }
+ /* P9 case: compound group is per-NPU (all gpus, all links) */
+ npucomp = &hose->npu->npucomp;
} else {
- /* Create a group for 1 GPU and attached NPUs for POWER8 */
- pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL);
- table_group = &pe->npucomp->table_group;
- table_group->ops = &pnv_npu_peers_ops;
- iommu_register_group(table_group, hose->global_number,
- pe->pe_number);
+ /* P8 case: Compound group is per-GPU (1 gpu, 2 links) */
+ npucomp = pe->npucomp = kzalloc(sizeof(*npucomp), GFP_KERNEL);
}
- /* Steal capabilities from a GPU PE */
- table_group->max_dynamic_windows_supported =
- pe->table_group.max_dynamic_windows_supported;
- table_group->tce32_start = pe->table_group.tce32_start;
- table_group->tce32_size = pe->table_group.tce32_size;
- table_group->max_levels = pe->table_group.max_levels;
- if (!table_group->pgsizes)
- table_group->pgsizes = pe->table_group.pgsizes;
+ compound_group = &npucomp->table_group;
+ if (!compound_group->group) {
+ compound_group->ops = &pnv_npu_peers_ops;
+ iommu_register_group(compound_group, hose->global_number,
+ pe->pe_number);
- npucomp = container_of(table_group, struct npu_comp, table_group);
+ /* Steal capabilities from a GPU PE */
+ compound_group->max_dynamic_windows_supported =
+ pe->table_group.max_dynamic_windows_supported;
+ compound_group->tce32_start = pe->table_group.tce32_start;
+ compound_group->tce32_size = pe->table_group.tce32_size;
+ compound_group->max_levels = pe->table_group.max_levels;
+ if (!compound_group->pgsizes)
+ compound_group->pgsizes = pe->table_group.pgsizes;
+ }
+
+ /*
+ * The gpu would have been added to the iommu group that's created
+ * for the PE. Pull it out now.
+ */
+ iommu_del_device(&gpdev->dev);
+
+ /*
+ * I'm not sure this is strictly required, but it's probably a good idea
+ * since the table_group for the PE is going to be attached to the
+ * compound table group. If we leave the PE's iommu group active then
+ * we might have the same table_group being modifiable via two sepeate
+ * iommu groups.
+ */
+ iommu_group_put(pe->table_group.group);
+
+ /* now put the GPU into the compound group */
pnv_comp_attach_table_group(npucomp, pe);
+ iommu_add_device(compound_group, &gpdev->dev);
- return table_group;
+ return compound_group;
}
-struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
+static struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
{
struct iommu_table_group *table_group;
struct npu_comp *npucomp;
@@ -521,6 +536,54 @@ struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
return table_group;
}
+
+void pnv_pci_npu_setup_iommu_groups(void)
+{
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pnv_ioda_pe *pe;
+
+ /*
+ * For non-nvlink devices the IOMMU group is registered when the PE is
+ * configured and devices are added to the group when the per-device
+ * DMA setup is run. That's done in hose->ops.dma_dev_setup() which is
+ * only initialise for "normal" IODA PHBs.
+ *
+ * For NVLink devices we need to ensure the NVLinks and the GPU end up
+ * in the same IOMMU group, so that's handled here.
+ */
+ list_for_each_entry(hose, &hose_list, list_node) {
+ phb = hose->private_data;
+
+ if (phb->type == PNV_PHB_IODA2)
+ list_for_each_entry(pe, &phb->ioda.pe_list, list)
+ pnv_try_setup_npu_table_group(pe);
+ }
+
+ /*
+ * Now we have all PHBs discovered, time to add NPU devices to
+ * the corresponding IOMMU groups.
+ */
+ list_for_each_entry(hose, &hose_list, list_node) {
+ unsigned long pgsizes;
+
+ phb = hose->private_data;
+
+ if (phb->type != PNV_PHB_NPU_NVLINK)
+ continue;
+
+ pgsizes = pnv_ioda_parse_tce_sizes(phb);
+ list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+ /*
+ * IODA2 bridges get this set up from
+ * pci_controller_ops::setup_bridge but NPU bridges
+ * do not have this hook defined so we do it here.
+ */
+ pe->table_group.pgsizes = pgsizes;
+ pnv_npu_compound_attach(pe);
+ }
+ }
+}
#endif /* CONFIG_IOMMU_API */
int pnv_npu2_init(struct pci_controller *hose)
diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c
index d361d37d975f..9a360ced663b 100644
--- a/arch/powerpc/platforms/powernv/opal-fadump.c
+++ b/arch/powerpc/platforms/powernv/opal-fadump.c
@@ -671,7 +671,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
* Firmware supports 32-bit field for size. Align it to PAGE_SIZE
* and request firmware to copy multiple kernel boot memory regions.
*/
- fadump_conf->max_copy_size = _ALIGN_DOWN(U32_MAX, PAGE_SIZE);
+ fadump_conf->max_copy_size = ALIGN_DOWN(U32_MAX, PAGE_SIZE);
/*
* Check if dump has been initiated on last reboot.
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 2b3dfd0b6cdd..d95954ad4c0a 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -811,6 +811,10 @@ static int opal_add_one_export(struct kobject *parent, const char *export_name,
goto out;
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr) {
+ rc = -ENOMEM;
+ goto out;
+ }
name = kstrdup(export_name, GFP_KERNEL);
if (!name) {
rc = -ENOMEM;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index 5dc6847d5f4c..f923359d8afc 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -17,6 +17,34 @@
#include <asm/tce.h>
#include "pci.h"
+unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
+{
+ struct pci_controller *hose = phb->hose;
+ struct device_node *dn = hose->dn;
+ unsigned long mask = 0;
+ int i, rc, count;
+ u32 val;
+
+ count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes");
+ if (count <= 0) {
+ mask = SZ_4K | SZ_64K;
+ /* Add 16M for POWER8 by default */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
+ !cpu_has_feature(CPU_FTR_ARCH_300))
+ mask |= SZ_16M | SZ_256M;
+ return mask;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes",
+ i, &val);
+ if (rc == 0)
+ mask |= 1ULL << val;
+ }
+
+ return mask;
+}
+
void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
u64 dma_offset, unsigned int page_shift)
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 57d3a6af1d52..73a63efcf855 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -51,6 +51,7 @@ static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK",
"NPU_OCAPI" };
static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
+static void pnv_pci_configure_bus(struct pci_bus *bus);
void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...)
@@ -264,8 +265,8 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
if (!r->parent || !pnv_pci_is_m64(phb, r))
continue;
- start = _ALIGN_DOWN(r->start - base, sgsz);
- end = _ALIGN_UP(r->end - base, sgsz);
+ start = ALIGN_DOWN(r->start - base, sgsz);
+ end = ALIGN(r->end - base, sgsz);
for (segno = start / sgsz; segno < end / sgsz; segno++) {
if (pe_bitmap)
set_bit(segno, pe_bitmap);
@@ -361,7 +362,7 @@ static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
return NULL;
/* Allocate bitmap */
- size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
+ size = ALIGN(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
pe_alloc = kzalloc(size, GFP_KERNEL);
if (!pe_alloc) {
pr_warn("%s: Out of memory !\n",
@@ -660,6 +661,16 @@ static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
return state;
}
+struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn)
+{
+ int pe_number = phb->ioda.pe_rmap[bdfn];
+
+ if (pe_number == IODA_INVALID_PE)
+ return NULL;
+
+ return &phb->ioda.pe_array[pe_number];
+}
+
struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -1110,34 +1121,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
return pe;
}
-static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- struct pci_dn *pdn = pci_get_pdn(dev);
-
- if (pdn == NULL) {
- pr_warn("%s: No device node associated with device !\n",
- pci_name(dev));
- continue;
- }
-
- /*
- * In partial hotplug case, the PCI device might be still
- * associated with the PE and needn't attach it to the PE
- * again.
- */
- if (pdn->pe_number != IODA_INVALID_PE)
- continue;
-
- pe->device_count++;
- pdn->pe_number = pe->pe_number;
- if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
- pnv_ioda_setup_same_PE(dev->subordinate, pe);
- }
-}
-
/*
* There're 2 types of PCI bus sensitive PEs: One that is compromised of
* single PCI bus. Another one that contains the primary PCI bus and its
@@ -1156,15 +1139,13 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
* We should reuse it instead of allocating a new one.
*/
pe_num = phb->ioda.pe_rmap[bus->number << 8];
- if (pe_num != IODA_INVALID_PE) {
+ if (WARN_ON(pe_num != IODA_INVALID_PE)) {
pe = &phb->ioda.pe_array[pe_num];
- pnv_ioda_setup_same_PE(bus, pe);
return NULL;
}
/* PE number for root bus should have been reserved */
- if (pci_is_root_bus(bus) &&
- phb->ioda.root_pe_idx != IODA_INVALID_PE)
+ if (pci_is_root_bus(bus))
pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
/* Check if PE is determined by M64 */
@@ -1202,9 +1183,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
return NULL;
}
- /* Associate it with all child devices */
- pnv_ioda_setup_same_PE(bus, pe);
-
/* Put PE to the list */
list_add_tail(&pe->list, &phb->ioda.pe_list);
@@ -1288,7 +1266,7 @@ static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
pnv_ioda_setup_npu_PE(pdev);
}
-static void pnv_pci_ioda_setup_PEs(void)
+static void pnv_pci_ioda_setup_nvlink(void)
{
struct pci_controller *hose;
struct pnv_phb *phb;
@@ -1312,6 +1290,11 @@ static void pnv_pci_ioda_setup_PEs(void)
list_for_each_entry(pe, &phb->ioda.pe_list, list)
pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV);
}
+
+#ifdef CONFIG_IOMMU_API
+ /* setup iommu groups so we can do nvlink pass-thru */
+ pnv_pci_npu_setup_iommu_groups();
+#endif
}
#ifdef CONFIG_PCI_IOV
@@ -1550,11 +1533,6 @@ void pnv_pci_sriov_disable(struct pci_dev *pdev)
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe);
-#ifdef CONFIG_IOMMU_API
-static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe,
- struct iommu_table_group *table_group, struct pci_bus *bus);
-
-#endif
static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
{
struct pci_bus *bus;
@@ -1619,11 +1597,6 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
}
pnv_pci_ioda2_setup_dma_pe(phb, pe);
-#ifdef CONFIG_IOMMU_API
- iommu_register_group(&pe->table_group,
- pe->phb->hose->global_number, pe->pe_number);
- pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL);
-#endif
}
}
@@ -1767,24 +1740,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev)
struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *pe;
- /*
- * The function can be called while the PE#
- * hasn't been assigned. Do nothing for the
- * case.
- */
- if (!pdn || pdn->pe_number == IODA_INVALID_PE)
- return;
+ /* Check if the BDFN for this device is associated with a PE yet */
+ pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8));
+ if (!pe) {
+ /* VF PEs should be pre-configured in pnv_pci_sriov_enable() */
+ if (WARN_ON(pdev->is_virtfn))
+ return;
+
+ pnv_pci_configure_bus(pdev->bus);
+ pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8));
+ pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff);
+
+
+ /*
+ * If we can't setup the IODA PE something has gone horribly
+ * wrong and we can't enable DMA for the device.
+ */
+ if (WARN_ON(!pe))
+ return;
+ } else {
+ pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number);
+ }
+
+ if (pdn)
+ pdn->pe_number = pe->pe_number;
+ pe->device_count++;
- pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
pdev->dev.archdata.dma_offset = pe->tce_bypass_base;
set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
- /*
- * Note: iommu_add_device() will fail here as
- * for physical PE: the device is already added by now;
- * for virtual PE: sysfs entries are not ready yet and
- * tce_iommu_bus_notifier will add the device to a group later.
- */
+
+ /* PEs with a DMA weight of zero won't have a group */
+ if (pe->table_group.group)
+ iommu_add_device(&pe->table_group, &pdev->dev);
}
/*
@@ -2297,9 +2285,6 @@ found:
pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
iommu_init_table(tbl, phb->hose->node, 0, 0);
- if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
-
return;
fail:
/* XXX Failure: Try to fallback to 64-bit only ? */
@@ -2537,7 +2522,7 @@ unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
direct_table_size = 1UL << table_shift;
for ( ; levels; --levels) {
- bytes += _ALIGN_UP(tce_table_size, direct_table_size);
+ bytes += ALIGN(tce_table_size, direct_table_size);
tce_table_size /= direct_table_size;
tce_table_size <<= 3;
@@ -2596,137 +2581,8 @@ static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
.take_ownership = pnv_ioda2_take_ownership,
.release_ownership = pnv_ioda2_release_ownership,
};
-
-static void pnv_ioda_setup_bus_iommu_group_add_devices(struct pnv_ioda_pe *pe,
- struct iommu_table_group *table_group,
- struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- iommu_add_device(table_group, &dev->dev);
-
- if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
- pnv_ioda_setup_bus_iommu_group_add_devices(pe,
- table_group, dev->subordinate);
- }
-}
-
-static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe,
- struct iommu_table_group *table_group, struct pci_bus *bus)
-{
-
- if (pe->flags & PNV_IODA_PE_DEV)
- iommu_add_device(table_group, &pe->pdev->dev);
-
- if ((pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) || bus)
- pnv_ioda_setup_bus_iommu_group_add_devices(pe, table_group,
- bus);
-}
-
-static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb);
-
-static void pnv_pci_ioda_setup_iommu_api(void)
-{
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
-
- /*
- * There are 4 types of PEs:
- * - PNV_IODA_PE_BUS: a downstream port with an adapter,
- * created from pnv_pci_setup_bridge();
- * - PNV_IODA_PE_BUS_ALL: a PCI-PCIX bridge with devices behind it,
- * created from pnv_pci_setup_bridge();
- * - PNV_IODA_PE_VF: a SRIOV virtual function,
- * created from pnv_pcibios_sriov_enable();
- * - PNV_IODA_PE_DEV: an NPU or OCAPI device,
- * created from pnv_pci_ioda_fixup().
- *
- * Normally a PE is represented by an IOMMU group, however for
- * devices with side channels the groups need to be more strict.
- */
- list_for_each_entry(hose, &hose_list, list_node) {
- phb = hose->private_data;
-
- if (phb->type == PNV_PHB_NPU_NVLINK ||
- phb->type == PNV_PHB_NPU_OCAPI)
- continue;
-
- list_for_each_entry(pe, &phb->ioda.pe_list, list) {
- struct iommu_table_group *table_group;
-
- table_group = pnv_try_setup_npu_table_group(pe);
- if (!table_group) {
- if (!pnv_pci_ioda_pe_dma_weight(pe))
- continue;
-
- table_group = &pe->table_group;
- iommu_register_group(&pe->table_group,
- pe->phb->hose->global_number,
- pe->pe_number);
- }
- pnv_ioda_setup_bus_iommu_group(pe, table_group,
- pe->pbus);
- }
- }
-
- /*
- * Now we have all PHBs discovered, time to add NPU devices to
- * the corresponding IOMMU groups.
- */
- list_for_each_entry(hose, &hose_list, list_node) {
- unsigned long pgsizes;
-
- phb = hose->private_data;
-
- if (phb->type != PNV_PHB_NPU_NVLINK)
- continue;
-
- pgsizes = pnv_ioda_parse_tce_sizes(phb);
- list_for_each_entry(pe, &phb->ioda.pe_list, list) {
- /*
- * IODA2 bridges get this set up from
- * pci_controller_ops::setup_bridge but NPU bridges
- * do not have this hook defined so we do it here.
- */
- pe->table_group.pgsizes = pgsizes;
- pnv_npu_compound_attach(pe);
- }
- }
-}
-#else /* !CONFIG_IOMMU_API */
-static void pnv_pci_ioda_setup_iommu_api(void) { };
#endif
-static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
-{
- struct pci_controller *hose = phb->hose;
- struct device_node *dn = hose->dn;
- unsigned long mask = 0;
- int i, rc, count;
- u32 val;
-
- count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes");
- if (count <= 0) {
- mask = SZ_4K | SZ_64K;
- /* Add 16M for POWER8 by default */
- if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
- !cpu_has_feature(CPU_FTR_ARCH_300))
- mask |= SZ_16M | SZ_256M;
- return mask;
- }
-
- for (i = 0; i < count; i++) {
- rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes",
- i, &val);
- if (rc == 0)
- mask |= 1ULL << val;
- }
-
- return mask;
-}
-
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe)
{
@@ -2749,16 +2605,16 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
IOMMU_TABLE_GROUP_MAX_TABLES;
pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb);
-#ifdef CONFIG_IOMMU_API
- pe->table_group.ops = &pnv_pci_ioda2_ops;
-#endif
rc = pnv_pci_ioda2_setup_default_config(pe);
if (rc)
return;
- if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
+#ifdef CONFIG_IOMMU_API
+ pe->table_group.ops = &pnv_pci_ioda2_ops;
+ iommu_register_group(&pe->table_group, phb->hose->global_number,
+ pe->pe_number);
+#endif
}
int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq)
@@ -3220,8 +3076,7 @@ static void pnv_pci_enable_bridges(void)
static void pnv_pci_ioda_fixup(void)
{
- pnv_pci_ioda_setup_PEs();
- pnv_pci_ioda_setup_iommu_api();
+ pnv_pci_ioda_setup_nvlink();
pnv_pci_ioda_create_dbgfs();
pnv_pci_enable_bridges();
@@ -3333,28 +3188,18 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
}
}
-static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
+static void pnv_pci_configure_bus(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
struct pnv_phb *phb = hose->private_data;
struct pci_dev *bridge = bus->self;
struct pnv_ioda_pe *pe;
- bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
-
- /* Extend bridge's windows if necessary */
- pnv_pci_fixup_bridge_resources(bus, type);
+ bool all = (bridge && pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
- /* The PE for root bus should be realized before any one else */
- if (!phb->ioda.root_pe_populated) {
- pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false);
- if (pe) {
- phb->ioda.root_pe_idx = pe->pe_number;
- phb->ioda.root_pe_populated = true;
- }
- }
+ dev_info(&bus->dev, "Configuring PE for bus\n");
/* Don't assign PE to PCI bus, which doesn't have subordinate devices */
- if (list_empty(&bus->devices))
+ if (WARN_ON(list_empty(&bus->devices)))
return;
/* Reserve PEs according to used M64 resources */
@@ -3599,6 +3444,8 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
struct pnv_phb *phb = pe->phb;
struct pnv_ioda_pe *slave, *tmp;
+ pe_info(pe, "Releasing PE\n");
+
mutex_lock(&phb->ioda.pe_list_mutex);
list_del(&pe->list);
mutex_unlock(&phb->ioda.pe_list_mutex);
@@ -3633,11 +3480,10 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
* that it can be populated again in PCI hot add path. The PE
* shouldn't be destroyed as it's the global reserved resource.
*/
- if (phb->ioda.root_pe_populated &&
- phb->ioda.root_pe_idx == pe->pe_number)
- phb->ioda.root_pe_populated = false;
- else
- pnv_ioda_free_pe(pe);
+ if (phb->ioda.root_pe_idx == pe->pe_number)
+ return;
+
+ pnv_ioda_free_pe(pe);
}
static void pnv_pci_release_device(struct pci_dev *pdev)
@@ -3715,7 +3561,7 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.enable_device_hook = pnv_pci_enable_device_hook,
.release_device = pnv_pci_release_device,
.window_alignment = pnv_pci_window_alignment,
- .setup_bridge = pnv_pci_setup_bridge,
+ .setup_bridge = pnv_pci_fixup_bridge_resources,
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
.shutdown = pnv_pci_ioda_shutdown,
};
@@ -3745,6 +3591,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
struct pnv_phb *phb;
unsigned long size, m64map_off, m32map_off, pemap_off;
unsigned long iomap_off = 0, dma32map_off = 0;
+ struct pnv_ioda_pe *root_pe;
struct resource r;
const __be64 *prop64;
const __be32 *prop32;
@@ -3863,7 +3710,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
PNV_IODA1_DMA32_SEGSIZE;
/* Allocate aux data & arrays. We don't have IO ports on PHB3 */
- size = _ALIGN_UP(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
+ size = ALIGN(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
sizeof(unsigned long));
m64map_off = size;
size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
@@ -3912,7 +3759,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
} else {
- phb->ioda.root_pe_idx = IODA_INVALID_PE;
+ /* otherwise just allocate one */
+ root_pe = pnv_ioda_alloc_pe(phb);
+ phb->ioda.root_pe_idx = root_pe->pe_number;
}
INIT_LIST_HEAD(&phb->ioda.pe_list);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 5bf818246339..091fe1cf386b 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -955,28 +955,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
- struct pci_dev *pdev;
- struct pci_dn *pdn;
- struct pnv_ioda_pe *pe;
- struct pci_controller *hose;
- struct pnv_phb *phb;
switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- pdev = to_pci_dev(dev);
- pdn = pci_get_pdn(pdev);
- hose = pci_bus_to_host(pdev->bus);
- phb = hose->private_data;
-
- WARN_ON_ONCE(!phb);
- if (!pdn || pdn->pe_number == IODA_INVALID_PE || !phb)
- return 0;
-
- pe = &phb->ioda.pe_array[pdn->pe_number];
- if (!pe->table_group.group)
- return 0;
- iommu_add_device(&pe->table_group, dev);
- return 0;
case BUS_NOTIFY_DEL_DEVICE:
iommu_del_device(dev);
return 0;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index d3bbdeab3a32..51c254f2f3cb 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -33,6 +33,24 @@ enum pnv_phb_model {
#define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
#define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
+/*
+ * A brief note on PNV_IODA_PE_BUS_ALL
+ *
+ * This is needed because of the behaviour of PCIe-to-PCI bridges. The PHB uses
+ * the Requester ID field of the PCIe request header to determine the device
+ * (and PE) that initiated a DMA. In legacy PCI individual memory read/write
+ * requests aren't tagged with the RID. To work around this the PCIe-to-PCI
+ * bridge will use (secondary_bus_no << 8) | 0x00 as the RID on the PCIe side.
+ *
+ * PCIe-to-X bridges have a similar issue even though PCI-X requests also have
+ * a RID in the transaction header. The PCIe-to-X bridge is permitted to "take
+ * ownership" of a transaction by a PCI-X device when forwarding it to the PCIe
+ * side of the bridge.
+ *
+ * To work around these problems we use the BUS_ALL flag since every subordinate
+ * bus of the bridge should go into the same PE.
+ */
+
/* Indicates operations are frozen for a PE: MMIO in PESTA & DMA in PESTB. */
#define PNV_IODA_STOPPED_STATE 0x8000000000000000
@@ -118,7 +136,6 @@ struct pnv_phb {
unsigned int total_pe_num;
unsigned int reserved_pe_idx;
unsigned int root_pe_idx;
- bool root_pe_populated;
/* 32-bit MMIO window */
unsigned int m32_size;
@@ -190,6 +207,7 @@ extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
+extern struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn);
extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
@@ -209,11 +227,7 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
/* Nvlink functions */
extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
-extern struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe);
-extern struct iommu_table_group *pnv_try_setup_npu_table_group(
- struct pnv_ioda_pe *pe);
-extern struct iommu_table_group *pnv_npu_compound_attach(
- struct pnv_ioda_pe *pe);
+extern void pnv_pci_npu_setup_iommu_groups(void);
/* pci-ioda-tce.c */
#define POWERNV_IOMMU_DEFAULT_LEVELS 2
@@ -244,4 +258,6 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
u64 dma_offset, unsigned int page_shift);
+extern unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb);
+
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/vas-api.c b/arch/powerpc/platforms/powernv/vas-api.c
new file mode 100644
index 000000000000..98ed5d8c5441
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/vas-api.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * VAS user space API for its accelerators (Only NX-GZIP is supported now)
+ * Copyright (C) 2019 Haren Myneni, IBM Corp
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/vas.h>
+#include <uapi/asm/vas-api.h>
+#include "vas.h"
+
+/*
+ * The driver creates the device node that can be used as follows:
+ * For NX-GZIP
+ *
+ * fd = open("/dev/crypto/nx-gzip", O_RDWR);
+ * rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
+ * paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL).
+ * vas_copy(&crb, 0, 1);
+ * vas_paste(paste_addr, 0, 1);
+ * close(fd) or exit process to close window.
+ *
+ * where "vas_copy" and "vas_paste" are defined in copy-paste.h.
+ * copy/paste returns to the user space directly. So refer NX hardware
+ * documententation for exact copy/paste usage and completion / error
+ * conditions.
+ */
+
+/*
+ * Wrapper object for the nx-gzip device - there is just one instance of
+ * this node for the whole system.
+ */
+static struct coproc_dev {
+ struct cdev cdev;
+ struct device *device;
+ char *name;
+ dev_t devt;
+ struct class *class;
+ enum vas_cop_type cop_type;
+} coproc_device;
+
+struct coproc_instance {
+ struct coproc_dev *coproc;
+ struct vas_window *txwin;
+};
+
+static char *coproc_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev));
+}
+
+static int coproc_open(struct inode *inode, struct file *fp)
+{
+ struct coproc_instance *cp_inst;
+
+ cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL);
+ if (!cp_inst)
+ return -ENOMEM;
+
+ cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev,
+ cdev);
+ fp->private_data = cp_inst;
+
+ return 0;
+}
+
+static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+{
+ void __user *uptr = (void __user *)arg;
+ struct vas_tx_win_attr txattr = {};
+ struct vas_tx_win_open_attr uattr;
+ struct coproc_instance *cp_inst;
+ struct vas_window *txwin;
+ int rc, vasid;
+
+ cp_inst = fp->private_data;
+
+ /*
+ * One window for file descriptor
+ */
+ if (cp_inst->txwin)
+ return -EEXIST;
+
+ rc = copy_from_user(&uattr, uptr, sizeof(uattr));
+ if (rc) {
+ pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
+ return -EFAULT;
+ }
+
+ if (uattr.version != 1) {
+ pr_err("Invalid version\n");
+ return -EINVAL;
+ }
+
+ vasid = uattr.vas_id;
+
+ vas_init_tx_win_attr(&txattr, cp_inst->coproc->cop_type);
+
+ txattr.lpid = mfspr(SPRN_LPID);
+ txattr.pidr = mfspr(SPRN_PID);
+ txattr.user_win = true;
+ txattr.rsvd_txbuf_count = false;
+ txattr.pswid = false;
+
+ pr_devel("Pid %d: Opening txwin, PIDR %ld\n", txattr.pidr,
+ mfspr(SPRN_PID));
+
+ txwin = vas_tx_win_open(vasid, cp_inst->coproc->cop_type, &txattr);
+ if (IS_ERR(txwin)) {
+ pr_err("%s() vas_tx_win_open() failed, %ld\n", __func__,
+ PTR_ERR(txwin));
+ return PTR_ERR(txwin);
+ }
+
+ cp_inst->txwin = txwin;
+
+ return 0;
+}
+
+static int coproc_release(struct inode *inode, struct file *fp)
+{
+ struct coproc_instance *cp_inst = fp->private_data;
+
+ if (cp_inst->txwin) {
+ vas_win_close(cp_inst->txwin);
+ cp_inst->txwin = NULL;
+ }
+
+ kfree(cp_inst);
+ fp->private_data = NULL;
+
+ /*
+ * We don't know here if user has other receive windows
+ * open, so we can't really call clear_thread_tidr().
+ * So, once the process calls set_thread_tidr(), the
+ * TIDR value sticks around until process exits, resulting
+ * in an extra copy in restore_sprs().
+ */
+
+ return 0;
+}
+
+static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct coproc_instance *cp_inst = fp->private_data;
+ struct vas_window *txwin;
+ unsigned long pfn;
+ u64 paste_addr;
+ pgprot_t prot;
+ int rc;
+
+ txwin = cp_inst->txwin;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+ pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
+ (vma->vm_end - vma->vm_start), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /* Ensure instance has an open send window */
+ if (!txwin) {
+ pr_err("%s(): No send window open?\n", __func__);
+ return -EINVAL;
+ }
+
+ vas_win_paste_addr(txwin, &paste_addr, NULL);
+ pfn = paste_addr >> PAGE_SHIFT;
+
+ /* flags, page_prot from cxl_mmap(), except we want cachable */
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
+
+ prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
+
+ rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, prot);
+
+ pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
+ paste_addr, vma->vm_start, rc);
+
+ return rc;
+}
+
+static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case VAS_TX_WIN_OPEN:
+ return coproc_ioc_tx_win_open(fp, arg);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct file_operations coproc_fops = {
+ .open = coproc_open,
+ .release = coproc_release,
+ .mmap = coproc_mmap,
+ .unlocked_ioctl = coproc_ioctl,
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ const char *name)
+{
+ int rc = -EINVAL;
+ dev_t devno;
+
+ rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name);
+ if (rc) {
+ pr_err("Unable to allocate coproc major number: %i\n", rc);
+ return rc;
+ }
+
+ pr_devel("%s device allocated, dev [%i,%i]\n", name,
+ MAJOR(coproc_device.devt), MINOR(coproc_device.devt));
+
+ coproc_device.class = class_create(mod, name);
+ if (IS_ERR(coproc_device.class)) {
+ rc = PTR_ERR(coproc_device.class);
+ pr_err("Unable to create %s class %d\n", name, rc);
+ goto err_class;
+ }
+ coproc_device.class->devnode = coproc_devnode;
+ coproc_device.cop_type = cop_type;
+
+ coproc_fops.owner = mod;
+ cdev_init(&coproc_device.cdev, &coproc_fops);
+
+ devno = MKDEV(MAJOR(coproc_device.devt), 0);
+ rc = cdev_add(&coproc_device.cdev, devno, 1);
+ if (rc) {
+ pr_err("cdev_add() failed %d\n", rc);
+ goto err_cdev;
+ }
+
+ coproc_device.device = device_create(coproc_device.class, NULL,
+ devno, NULL, name, MINOR(devno));
+ if (IS_ERR(coproc_device.device)) {
+ rc = PTR_ERR(coproc_device.device);
+ pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc);
+ goto err;
+ }
+
+ pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
+ MINOR(devno));
+
+ return 0;
+
+err:
+ cdev_del(&coproc_device.cdev);
+err_cdev:
+ class_destroy(coproc_device.class);
+err_class:
+ unregister_chrdev_region(coproc_device.devt, 1);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(vas_register_coproc_api);
+
+void vas_unregister_coproc_api(void)
+{
+ dev_t devno;
+
+ cdev_del(&coproc_device.cdev);
+ devno = MKDEV(MAJOR(coproc_device.devt), 0);
+ device_destroy(coproc_device.class, devno);
+
+ class_destroy(coproc_device.class);
+ unregister_chrdev_region(coproc_device.devt, 1);
+}
+EXPORT_SYMBOL_GPL(vas_unregister_coproc_api);
diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c
index 44035a3d6414..41fa90d2f4ab 100644
--- a/arch/powerpc/platforms/powernv/vas-debug.c
+++ b/arch/powerpc/platforms/powernv/vas-debug.c
@@ -38,7 +38,7 @@ static int info_show(struct seq_file *s, void *private)
seq_printf(s, "Type: %s, %s\n", cop_to_str(window->cop),
window->tx_win ? "Send" : "Receive");
- seq_printf(s, "Pid : %d\n", window->pid);
+ seq_printf(s, "Pid : %d\n", vas_window_pid(window));
unlock:
mutex_unlock(&vas_mutex);
diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c
new file mode 100644
index 000000000000..266a6ca5e15e
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/vas-fault.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VAS Fault handling.
+ * Copyright 2019, IBM Corporation
+ */
+
+#define pr_fmt(fmt) "vas: " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/mmu_context.h>
+#include <asm/icswx.h>
+
+#include "vas.h"
+
+/*
+ * The maximum FIFO size for fault window can be 8MB
+ * (VAS_RX_FIFO_SIZE_MAX). Using 4MB FIFO since each VAS
+ * instance will be having fault window.
+ * 8MB FIFO can be used if expects more faults for each VAS
+ * instance.
+ */
+#define VAS_FAULT_WIN_FIFO_SIZE (4 << 20)
+
+static void dump_crb(struct coprocessor_request_block *crb)
+{
+ struct data_descriptor_entry *dde;
+ struct nx_fault_stamp *nx;
+
+ dde = &crb->source;
+ pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
+ be64_to_cpu(dde->address), be32_to_cpu(dde->length),
+ dde->count, dde->index, dde->flags);
+
+ dde = &crb->target;
+ pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
+ be64_to_cpu(dde->address), be32_to_cpu(dde->length),
+ dde->count, dde->index, dde->flags);
+
+ nx = &crb->stamp.nx;
+ pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
+ be32_to_cpu(nx->pswid),
+ be64_to_cpu(crb->stamp.nx.fault_storage_addr),
+ nx->flags, nx->fault_status);
+}
+
+/*
+ * Update the CSB to indicate a translation error.
+ *
+ * User space will be polling on CSB after the request is issued.
+ * If NX can handle the request without any issues, it updates CSB.
+ * Whereas if NX encounters page fault, the kernel will handle the
+ * fault and update CSB with translation error.
+ *
+ * If we are unable to update the CSB means copy_to_user failed due to
+ * invalid csb_addr, send a signal to the process.
+ */
+static void update_csb(struct vas_window *window,
+ struct coprocessor_request_block *crb)
+{
+ struct coprocessor_status_block csb;
+ struct kernel_siginfo info;
+ struct task_struct *tsk;
+ void __user *csb_addr;
+ struct pid *pid;
+ int rc;
+
+ /*
+ * NX user space windows can not be opened for task->mm=NULL
+ * and faults will not be generated for kernel requests.
+ */
+ if (WARN_ON_ONCE(!window->mm || !window->user_win))
+ return;
+
+ csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
+
+ memset(&csb, 0, sizeof(csb));
+ csb.cc = CSB_CC_TRANSLATION;
+ csb.ce = CSB_CE_TERMINATION;
+ csb.cs = 0;
+ csb.count = 0;
+
+ /*
+ * NX operates and returns in BE format as defined CRB struct.
+ * So saves fault_storage_addr in BE as NX pastes in FIFO and
+ * expects user space to convert to CPU format.
+ */
+ csb.address = crb->stamp.nx.fault_storage_addr;
+ csb.flags = 0;
+
+ pid = window->pid;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ /*
+ * Process closes send window after all pending NX requests are
+ * completed. In multi-thread applications, a child thread can
+ * open a window and can exit without closing it. May be some
+ * requests are pending or this window can be used by other
+ * threads later. We should handle faults if NX encounters
+ * pages faults on these requests. Update CSB with translation
+ * error and fault address. If csb_addr passed by user space is
+ * invalid, send SEGV signal to pid saved in window. If the
+ * child thread is not running, send the signal to tgid.
+ * Parent thread (tgid) will close this window upon its exit.
+ *
+ * pid and mm references are taken when window is opened by
+ * process (pid). So tgid is used only when child thread opens
+ * a window and exits without closing it.
+ */
+ if (!tsk) {
+ pid = window->tgid;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ /*
+ * Parent thread (tgid) will be closing window when it
+ * exits. So should not get here.
+ */
+ if (WARN_ON_ONCE(!tsk))
+ return;
+ }
+
+ /* Return if the task is exiting. */
+ if (tsk->flags & PF_EXITING) {
+ put_task_struct(tsk);
+ return;
+ }
+
+ kthread_use_mm(window->mm);
+ rc = copy_to_user(csb_addr, &csb, sizeof(csb));
+ /*
+ * User space polls on csb.flags (first byte). So add barrier
+ * then copy first byte with csb flags update.
+ */
+ if (!rc) {
+ csb.flags = CSB_V;
+ /* Make sure update to csb.flags is visible now */
+ smp_mb();
+ rc = copy_to_user(csb_addr, &csb, sizeof(u8));
+ }
+ kthread_unuse_mm(window->mm);
+ put_task_struct(tsk);
+
+ /* Success */
+ if (!rc)
+ return;
+
+ pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
+ csb_addr, pid_vnr(pid));
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_errno = EFAULT;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = csb_addr;
+
+ /*
+ * process will be polling on csb.flags after request is sent to
+ * NX. So generally CSB update should not fail except when an
+ * application passes invalid csb_addr. So an error message will
+ * be displayed and leave it to user space whether to ignore or
+ * handle this signal.
+ */
+ rcu_read_lock();
+ rc = kill_pid_info(SIGSEGV, &info, pid);
+ rcu_read_unlock();
+
+ pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
+ pid_vnr(pid), rc);
+}
+
+static void dump_fifo(struct vas_instance *vinst, void *entry)
+{
+ unsigned long *end = vinst->fault_fifo + vinst->fault_fifo_size;
+ unsigned long *fifo = entry;
+ int i;
+
+ pr_err("Fault fifo size %d, Max crbs %d\n", vinst->fault_fifo_size,
+ vinst->fault_fifo_size / CRB_SIZE);
+
+ /* Dump 10 CRB entries or until end of FIFO */
+ pr_err("Fault FIFO Dump:\n");
+ for (i = 0; i < 10*(CRB_SIZE/8) && fifo < end; i += 4, fifo += 4) {
+ pr_err("[%.3d, %p]: 0x%.16lx 0x%.16lx 0x%.16lx 0x%.16lx\n",
+ i, fifo, *fifo, *(fifo+1), *(fifo+2), *(fifo+3));
+ }
+}
+
+/*
+ * Process valid CRBs in fault FIFO.
+ * NX process user space requests, return credit and update the status
+ * in CRB. If it encounters transalation error when accessing CRB or
+ * request buffers, raises interrupt on the CPU to handle the fault.
+ * It takes credit on fault window, updates nx_fault_stamp in CRB with
+ * the following information and pastes CRB in fault FIFO.
+ *
+ * pswid - window ID of the window on which the request is sent.
+ * fault_storage_addr - fault address
+ *
+ * It can raise a single interrupt for multiple faults. Expects OS to
+ * process all valid faults and return credit for each fault on user
+ * space and fault windows. This fault FIFO control will be done with
+ * credit mechanism. NX can continuously paste CRBs until credits are not
+ * available on fault window. Otherwise, returns with RMA_reject.
+ *
+ * Total credits available on fault window: FIFO_SIZE(4MB)/CRBS_SIZE(128)
+ *
+ */
+irqreturn_t vas_fault_thread_fn(int irq, void *data)
+{
+ struct vas_instance *vinst = data;
+ struct coprocessor_request_block *crb, *entry;
+ struct coprocessor_request_block buf;
+ struct vas_window *window;
+ unsigned long flags;
+ void *fifo;
+
+ crb = &buf;
+
+ /*
+ * VAS can interrupt with multiple page faults. So process all
+ * valid CRBs within fault FIFO until reaches invalid CRB.
+ * We use CCW[0] and pswid to validate validate CRBs:
+ *
+ * CCW[0] Reserved bit. When NX pastes CRB, CCW[0]=0
+ * OS sets this bit to 1 after reading CRB.
+ * pswid NX assigns window ID. Set pswid to -1 after
+ * reading CRB from fault FIFO.
+ *
+ * We exit this function if no valid CRBs are available to process.
+ * So acquire fault_lock and reset fifo_in_progress to 0 before
+ * exit.
+ * In case kernel receives another interrupt with different page
+ * fault, interrupt handler returns with IRQ_HANDLED if
+ * fifo_in_progress is set. Means these new faults will be
+ * handled by the current thread. Otherwise set fifo_in_progress
+ * and return IRQ_WAKE_THREAD to wake up thread.
+ */
+ while (true) {
+ spin_lock_irqsave(&vinst->fault_lock, flags);
+ /*
+ * Advance the fault fifo pointer to next CRB.
+ * Use CRB_SIZE rather than sizeof(*crb) since the latter is
+ * aligned to CRB_ALIGN (256) but the CRB written to by VAS is
+ * only CRB_SIZE in len.
+ */
+ fifo = vinst->fault_fifo + (vinst->fault_crbs * CRB_SIZE);
+ entry = fifo;
+
+ if ((entry->stamp.nx.pswid == cpu_to_be32(FIFO_INVALID_ENTRY))
+ || (entry->ccw & cpu_to_be32(CCW0_INVALID))) {
+ vinst->fifo_in_progress = 0;
+ spin_unlock_irqrestore(&vinst->fault_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&vinst->fault_lock, flags);
+ vinst->fault_crbs++;
+ if (vinst->fault_crbs == (vinst->fault_fifo_size / CRB_SIZE))
+ vinst->fault_crbs = 0;
+
+ memcpy(crb, fifo, CRB_SIZE);
+ entry->stamp.nx.pswid = cpu_to_be32(FIFO_INVALID_ENTRY);
+ entry->ccw |= cpu_to_be32(CCW0_INVALID);
+ /*
+ * Return credit for the fault window.
+ */
+ vas_return_credit(vinst->fault_win, false);
+
+ pr_devel("VAS[%d] fault_fifo %p, fifo %p, fault_crbs %d\n",
+ vinst->vas_id, vinst->fault_fifo, fifo,
+ vinst->fault_crbs);
+
+ dump_crb(crb);
+ window = vas_pswid_to_window(vinst,
+ be32_to_cpu(crb->stamp.nx.pswid));
+
+ if (IS_ERR(window)) {
+ /*
+ * We got an interrupt about a specific send
+ * window but we can't find that window and we can't
+ * even clean it up (return credit on user space
+ * window).
+ * But we should not get here.
+ * TODO: Disable IRQ.
+ */
+ dump_fifo(vinst, (void *)entry);
+ pr_err("VAS[%d] fault_fifo %p, fifo %p, pswid 0x%x, fault_crbs %d bad CRB?\n",
+ vinst->vas_id, vinst->fault_fifo, fifo,
+ be32_to_cpu(crb->stamp.nx.pswid),
+ vinst->fault_crbs);
+
+ WARN_ON_ONCE(1);
+ } else {
+ update_csb(window, crb);
+ /*
+ * Return credit for send window after processing
+ * fault CRB.
+ */
+ vas_return_credit(window, true);
+ }
+ }
+}
+
+irqreturn_t vas_fault_handler(int irq, void *dev_id)
+{
+ struct vas_instance *vinst = dev_id;
+ irqreturn_t ret = IRQ_WAKE_THREAD;
+ unsigned long flags;
+
+ /*
+ * NX can generate an interrupt for multiple faults. So the
+ * fault handler thread process all CRBs until finds invalid
+ * entry. In case if NX sees continuous faults, it is possible
+ * that the thread function entered with the first interrupt
+ * can execute and process all valid CRBs.
+ * So wake up thread only if the fault thread is not in progress.
+ */
+ spin_lock_irqsave(&vinst->fault_lock, flags);
+
+ if (vinst->fifo_in_progress)
+ ret = IRQ_HANDLED;
+ else
+ vinst->fifo_in_progress = 1;
+
+ spin_unlock_irqrestore(&vinst->fault_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Fault window is opened per VAS instance. NX pastes fault CRB in fault
+ * FIFO upon page faults.
+ */
+int vas_setup_fault_window(struct vas_instance *vinst)
+{
+ struct vas_rx_win_attr attr;
+
+ vinst->fault_fifo_size = VAS_FAULT_WIN_FIFO_SIZE;
+ vinst->fault_fifo = kzalloc(vinst->fault_fifo_size, GFP_KERNEL);
+ if (!vinst->fault_fifo) {
+ pr_err("Unable to alloc %d bytes for fault_fifo\n",
+ vinst->fault_fifo_size);
+ return -ENOMEM;
+ }
+
+ /*
+ * Invalidate all CRB entries. NX pastes valid entry for each fault.
+ */
+ memset(vinst->fault_fifo, FIFO_INVALID_ENTRY, vinst->fault_fifo_size);
+ vas_init_rx_win_attr(&attr, VAS_COP_TYPE_FAULT);
+
+ attr.rx_fifo_size = vinst->fault_fifo_size;
+ attr.rx_fifo = vinst->fault_fifo;
+
+ /*
+ * Max creds is based on number of CRBs can fit in the FIFO.
+ * (fault_fifo_size/CRB_SIZE). If 8MB FIFO is used, max creds
+ * will be 0xffff since the receive creds field is 16bits wide.
+ */
+ attr.wcreds_max = vinst->fault_fifo_size / CRB_SIZE;
+ attr.lnotify_lpid = 0;
+ attr.lnotify_pid = mfspr(SPRN_PID);
+ attr.lnotify_tid = mfspr(SPRN_PID);
+
+ vinst->fault_win = vas_rx_win_open(vinst->vas_id, VAS_COP_TYPE_FAULT,
+ &attr);
+
+ if (IS_ERR(vinst->fault_win)) {
+ pr_err("VAS: Error %ld opening FaultWin\n",
+ PTR_ERR(vinst->fault_win));
+ kfree(vinst->fault_fifo);
+ return PTR_ERR(vinst->fault_win);
+ }
+
+ pr_devel("VAS: Created FaultWin %d, LPID/PID/TID [%d/%d/%d]\n",
+ vinst->fault_win->winid, attr.lnotify_lpid,
+ attr.lnotify_pid, attr.lnotify_tid);
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index 0c0d27d17976..6434f9cb5aed 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -12,6 +12,8 @@
#include <linux/log2.h>
#include <linux/rcupdate.h>
#include <linux/cred.h>
+#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
#include <asm/switch_to.h>
#include <asm/ppc-opcode.h>
#include "vas.h"
@@ -24,7 +26,7 @@
* Compute the paste address region for the window @window using the
* ->paste_base_addr and ->paste_win_id_shift we got from device tree.
*/
-static void compute_paste_address(struct vas_window *window, u64 *addr, int *len)
+void vas_win_paste_addr(struct vas_window *window, u64 *addr, int *len)
{
int winid;
u64 base, shift;
@@ -78,7 +80,7 @@ static void *map_paste_region(struct vas_window *txwin)
goto free_name;
txwin->paste_addr_name = name;
- compute_paste_address(txwin, &start, &len);
+ vas_win_paste_addr(txwin, &start, &len);
if (!request_mem_region(start, len, name)) {
pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n",
@@ -136,7 +138,7 @@ static void unmap_paste_region(struct vas_window *window)
u64 busaddr_start;
if (window->paste_kaddr) {
- compute_paste_address(window, &busaddr_start, &len);
+ vas_win_paste_addr(window, &busaddr_start, &len);
unmap_region(window->paste_kaddr, busaddr_start, len);
window->paste_kaddr = NULL;
kfree(window->paste_addr_name);
@@ -373,7 +375,7 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
init_xlate_regs(window, winctx->user_win);
val = 0ULL;
- val = SET_FIELD(VAS_FAULT_TX_WIN, val, 0);
+ val = SET_FIELD(VAS_FAULT_TX_WIN, val, winctx->fault_win_id);
write_hvwc_reg(window, VREG(FAULT_TX_WIN), val);
/* In PowerNV, interrupts go to HV. */
@@ -748,6 +750,8 @@ static void init_winctx_for_rxwin(struct vas_window *rxwin,
winctx->min_scope = VAS_SCOPE_LOCAL;
winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
+ if (rxwin->vinst->virq)
+ winctx->irq_port = rxwin->vinst->irq_port;
}
static bool rx_win_args_valid(enum vas_cop_type cop,
@@ -768,7 +772,7 @@ static bool rx_win_args_valid(enum vas_cop_type cop,
if (attr->rx_fifo_size > VAS_RX_FIFO_SIZE_MAX)
return false;
- if (attr->wcreds_max > VAS_RX_WCREDS_MAX)
+ if (!attr->wcreds_max)
return false;
if (attr->nx_win) {
@@ -813,7 +817,8 @@ void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop)
{
memset(rxattr, 0, sizeof(*rxattr));
- if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) {
+ if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI ||
+ cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) {
rxattr->pin_win = true;
rxattr->nx_win = true;
rxattr->fault_win = false;
@@ -827,9 +832,9 @@ void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop)
rxattr->fault_win = true;
rxattr->notify_disable = true;
rxattr->rx_wcred_mode = true;
- rxattr->tx_wcred_mode = true;
rxattr->rx_win_ord_mode = true;
- rxattr->tx_win_ord_mode = true;
+ rxattr->rej_no_credit = true;
+ rxattr->tc_mode = VAS_THRESH_DISABLED;
} else if (cop == VAS_COP_TYPE_FTW) {
rxattr->user_win = true;
rxattr->intr_disable = true;
@@ -873,9 +878,7 @@ struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
rxwin->nx_win = rxattr->nx_win;
rxwin->user_win = rxattr->user_win;
rxwin->cop = cop;
- rxwin->wcreds_max = rxattr->wcreds_max ?: VAS_WCREDS_DEFAULT;
- if (rxattr->user_win)
- rxwin->pid = task_pid_vnr(current);
+ rxwin->wcreds_max = rxattr->wcreds_max;
init_winctx_for_rxwin(rxwin, rxattr, &winctx);
init_winctx_regs(rxwin, &winctx);
@@ -890,7 +893,8 @@ void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop)
{
memset(txattr, 0, sizeof(*txattr));
- if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) {
+ if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI ||
+ cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) {
txattr->rej_no_credit = false;
txattr->rx_wcred_mode = true;
txattr->tx_wcred_mode = true;
@@ -944,13 +948,22 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
winctx->lpid = txattr->lpid;
winctx->pidr = txattr->pidr;
winctx->rx_win_id = txwin->rxwin->winid;
+ /*
+ * IRQ and fault window setup is successful. Set fault window
+ * for the send window so that ready to handle faults.
+ */
+ if (txwin->vinst->virq)
+ winctx->fault_win_id = txwin->vinst->fault_win->winid;
winctx->dma_type = VAS_DMA_TYPE_INJECT;
winctx->tc_mode = txattr->tc_mode;
winctx->min_scope = VAS_SCOPE_LOCAL;
winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
+ if (txwin->vinst->virq)
+ winctx->irq_port = txwin->vinst->irq_port;
- winctx->pswid = 0;
+ winctx->pswid = txattr->pswid ? txattr->pswid :
+ encode_pswid(txwin->vinst->vas_id, txwin->winid);
}
static bool tx_win_args_valid(enum vas_cop_type cop,
@@ -965,9 +978,14 @@ static bool tx_win_args_valid(enum vas_cop_type cop,
if (attr->wcreds_max > VAS_TX_WCREDS_MAX)
return false;
- if (attr->user_win &&
- (cop != VAS_COP_TYPE_FTW || attr->rsvd_txbuf_count))
- return false;
+ if (attr->user_win) {
+ if (attr->rsvd_txbuf_count)
+ return false;
+
+ if (cop != VAS_COP_TYPE_FTW && cop != VAS_COP_TYPE_GZIP &&
+ cop != VAS_COP_TYPE_GZIP_HIPRI)
+ return false;
+ }
return true;
}
@@ -1016,7 +1034,6 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
txwin->tx_win = 1;
txwin->rxwin = rxwin;
txwin->nx_win = txwin->rxwin->nx_win;
- txwin->pid = attr->pid;
txwin->user_win = attr->user_win;
txwin->wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
@@ -1040,12 +1057,59 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
}
} else {
/*
- * A user mapping must ensure that context switch issues
- * CP_ABORT for this thread.
+ * Interrupt hanlder or fault window setup failed. Means
+ * NX can not generate fault for page fault. So not
+ * opening for user space tx window.
*/
- rc = set_thread_uses_vas();
- if (rc)
+ if (!vinst->virq) {
+ rc = -ENODEV;
goto free_window;
+ }
+
+ /*
+ * Window opened by a child thread may not be closed when
+ * it exits. So take reference to its pid and release it
+ * when the window is free by parent thread.
+ * Acquire a reference to the task's pid to make sure
+ * pid will not be re-used - needed only for multithread
+ * applications.
+ */
+ txwin->pid = get_task_pid(current, PIDTYPE_PID);
+ /*
+ * Acquire a reference to the task's mm.
+ */
+ txwin->mm = get_task_mm(current);
+
+ if (!txwin->mm) {
+ put_pid(txwin->pid);
+ pr_err("VAS: pid(%d): mm_struct is not found\n",
+ current->pid);
+ rc = -EPERM;
+ goto free_window;
+ }
+
+ mmgrab(txwin->mm);
+ mmput(txwin->mm);
+ mm_context_add_vas_window(txwin->mm);
+ /*
+ * Process closes window during exit. In the case of
+ * multithread application, the child thread can open
+ * window and can exit without closing it. Expects parent
+ * thread to use and close the window. So do not need
+ * to take pid reference for parent thread.
+ */
+ txwin->tgid = find_get_pid(task_tgid_vnr(current));
+ /*
+ * Even a process that has no foreign real address mapping can
+ * use an unpaired COPY instruction (to no real effect). Issue
+ * CP_ABORT to clear any pending COPY and prevent a covert
+ * channel.
+ *
+ * __switch_to() will issue CP_ABORT on future context switches
+ * if process / thread has any open VAS window (Use
+ * current->mm->context.vas_windows).
+ */
+ asm volatile(PPC_CP_ABORT);
}
set_vinst_win(vinst, txwin);
@@ -1128,6 +1192,7 @@ static void poll_window_credits(struct vas_window *window)
{
u64 val;
int creds, mode;
+ int count = 0;
val = read_hvwc_reg(window, VREG(WINCTL));
if (window->tx_win)
@@ -1146,10 +1211,27 @@ retry:
creds = GET_FIELD(VAS_LRX_WCRED, val);
}
+ /*
+ * Takes around few milliseconds to complete all pending requests
+ * and return credits.
+ * TODO: Scan fault FIFO and invalidate CRBs points to this window
+ * and issue CRB Kill to stop all pending requests. Need only
+ * if there is a bug in NX or fault handling in kernel.
+ */
if (creds < window->wcreds_max) {
val = 0;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(10));
+ count++;
+ /*
+ * Process can not close send window until all credits are
+ * returned.
+ */
+ if (!(count % 1000))
+ pr_warn_ratelimited("VAS: pid %d stuck. Waiting for credits returned for Window(%d). creds %d, Retries %d\n",
+ vas_window_pid(window), window->winid,
+ creds, count);
+
goto retry;
}
}
@@ -1163,6 +1245,7 @@ static void poll_window_busy_state(struct vas_window *window)
{
int busy;
u64 val;
+ int count = 0;
retry:
val = read_hvwc_reg(window, VREG(WIN_STATUS));
@@ -1170,7 +1253,16 @@ retry:
if (busy) {
val = 0;
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(5));
+ schedule_timeout(msecs_to_jiffies(10));
+ count++;
+ /*
+ * Takes around few milliseconds to process all pending
+ * requests.
+ */
+ if (!(count % 1000))
+ pr_warn_ratelimited("VAS: pid %d stuck. Window (ID=%d) is in busy state. Retries %d\n",
+ vas_window_pid(window), window->winid, count);
+
goto retry;
}
}
@@ -1235,22 +1327,118 @@ int vas_win_close(struct vas_window *window)
unmap_paste_region(window);
- clear_vinst_win(window);
-
poll_window_busy_state(window);
unpin_close_window(window);
poll_window_credits(window);
+ clear_vinst_win(window);
+
poll_window_castout(window);
/* if send window, drop reference to matching receive window */
- if (window->tx_win)
+ if (window->tx_win) {
+ if (window->user_win) {
+ /* Drop references to pid and mm */
+ put_pid(window->pid);
+ if (window->mm) {
+ mm_context_remove_vas_window(window->mm);
+ mmdrop(window->mm);
+ }
+ }
put_rx_win(window->rxwin);
+ }
vas_window_free(window);
return 0;
}
EXPORT_SYMBOL_GPL(vas_win_close);
+
+/*
+ * Return credit for the given window.
+ * Send windows and fault window uses credit mechanism as follows:
+ *
+ * Send windows:
+ * - The default number of credits available for each send window is
+ * 1024. It means 1024 requests can be issued asynchronously at the
+ * same time. If the credit is not available, that request will be
+ * returned with RMA_Busy.
+ * - One credit is taken when NX request is issued.
+ * - This credit is returned after NX processed that request.
+ * - If NX encounters translation error, kernel will return the
+ * credit on the specific send window after processing the fault CRB.
+ *
+ * Fault window:
+ * - The total number credits available is FIFO_SIZE/CRB_SIZE.
+ * Means 4MB/128 in the current implementation. If credit is not
+ * available, RMA_Reject is returned.
+ * - A credit is taken when NX pastes CRB in fault FIFO.
+ * - The kernel with return credit on fault window after reading entry
+ * from fault FIFO.
+ */
+void vas_return_credit(struct vas_window *window, bool tx)
+{
+ uint64_t val;
+
+ val = 0ULL;
+ if (tx) { /* send window */
+ val = SET_FIELD(VAS_TX_WCRED, val, 1);
+ write_hvwc_reg(window, VREG(TX_WCRED_ADDER), val);
+ } else {
+ val = SET_FIELD(VAS_LRX_WCRED, val, 1);
+ write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), val);
+ }
+}
+
+struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
+ uint32_t pswid)
+{
+ struct vas_window *window;
+ int winid;
+
+ if (!pswid) {
+ pr_devel("%s: called for pswid 0!\n", __func__);
+ return ERR_PTR(-ESRCH);
+ }
+
+ decode_pswid(pswid, NULL, &winid);
+
+ if (winid >= VAS_WINDOWS_PER_CHIP)
+ return ERR_PTR(-ESRCH);
+
+ /*
+ * If application closes the window before the hardware
+ * returns the fault CRB, we should wait in vas_win_close()
+ * for the pending requests. so the window must be active
+ * and the process alive.
+ *
+ * If its a kernel process, we should not get any faults and
+ * should not get here.
+ */
+ window = vinst->windows[winid];
+
+ if (!window) {
+ pr_err("PSWID decode: Could not find window for winid %d pswid %d vinst 0x%p\n",
+ winid, pswid, vinst);
+ return NULL;
+ }
+
+ /*
+ * Do some sanity checks on the decoded window. Window should be
+ * NX GZIP user send window. FTW windows should not incur faults
+ * since their CRBs are ignored (not queued on FIFO or processed
+ * by NX).
+ */
+ if (!window->tx_win || !window->user_win || !window->nx_win ||
+ window->cop == VAS_COP_TYPE_FAULT ||
+ window->cop == VAS_COP_TYPE_FTW) {
+ pr_err("PSWID decode: id %d, tx %d, user %d, nx %d, cop %d\n",
+ winid, window->tx_win, window->user_win,
+ window->nx_win, window->cop);
+ WARN_ON(1);
+ }
+
+ return window;
+}
diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c
index ed9cc6df329a..598e4cd563fb 100644
--- a/arch/powerpc/platforms/powernv/vas.c
+++ b/arch/powerpc/platforms/powernv/vas.c
@@ -14,7 +14,10 @@
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
#include <asm/prom.h>
+#include <asm/xive.h>
#include "vas.h"
@@ -23,12 +26,37 @@ static LIST_HEAD(vas_instances);
static DEFINE_PER_CPU(int, cpu_vas_id);
+static int vas_irq_fault_window_setup(struct vas_instance *vinst)
+{
+ char devname[64];
+ int rc = 0;
+
+ snprintf(devname, sizeof(devname), "vas-%d", vinst->vas_id);
+ rc = request_threaded_irq(vinst->virq, vas_fault_handler,
+ vas_fault_thread_fn, 0, devname, vinst);
+
+ if (rc) {
+ pr_err("VAS[%d]: Request IRQ(%d) failed with %d\n",
+ vinst->vas_id, vinst->virq, rc);
+ goto out;
+ }
+
+ rc = vas_setup_fault_window(vinst);
+ if (rc)
+ free_irq(vinst->virq, vinst);
+
+out:
+ return rc;
+}
+
static int init_vas_instance(struct platform_device *pdev)
{
- int rc, cpu, vasid;
- struct resource *res;
- struct vas_instance *vinst;
struct device_node *dn = pdev->dev.of_node;
+ struct vas_instance *vinst;
+ struct xive_irq_data *xd;
+ uint32_t chipid, hwirq;
+ struct resource *res;
+ int rc, cpu, vasid;
rc = of_property_read_u32(dn, "ibm,vas-id", &vasid);
if (rc) {
@@ -36,6 +64,12 @@ static int init_vas_instance(struct platform_device *pdev)
return -ENODEV;
}
+ rc = of_property_read_u32(dn, "ibm,chip-id", &chipid);
+ if (rc) {
+ pr_err("No ibm,chip-id property for %s?\n", pdev->name);
+ return -ENODEV;
+ }
+
if (pdev->num_resources != 4) {
pr_err("Unexpected DT configuration for [%s, %d]\n",
pdev->name, vasid);
@@ -69,9 +103,32 @@ static int init_vas_instance(struct platform_device *pdev)
vinst->paste_win_id_shift = 63 - res->end;
- pr_devel("Initialized instance [%s, %d], paste_base 0x%llx, "
- "paste_win_id_shift 0x%llx\n", pdev->name, vasid,
- vinst->paste_base_addr, vinst->paste_win_id_shift);
+ hwirq = xive_native_alloc_irq_on_chip(chipid);
+ if (!hwirq) {
+ pr_err("Inst%d: Unable to allocate global irq for chip %d\n",
+ vinst->vas_id, chipid);
+ return -ENOENT;
+ }
+
+ vinst->virq = irq_create_mapping(NULL, hwirq);
+ if (!vinst->virq) {
+ pr_err("Inst%d: Unable to map global irq %d\n",
+ vinst->vas_id, hwirq);
+ return -EINVAL;
+ }
+
+ xd = irq_get_handler_data(vinst->virq);
+ if (!xd) {
+ pr_err("Inst%d: Invalid virq %d\n",
+ vinst->vas_id, vinst->virq);
+ return -EINVAL;
+ }
+
+ vinst->irq_port = xd->trig_page;
+ pr_devel("Initialized instance [%s, %d] paste_base 0x%llx paste_win_id_shift 0x%llx IRQ %d Port 0x%llx\n",
+ pdev->name, vasid, vinst->paste_base_addr,
+ vinst->paste_win_id_shift, vinst->virq,
+ vinst->irq_port);
for_each_possible_cpu(cpu) {
if (cpu_to_chip_id(cpu) == of_get_ibm_chip_id(dn))
@@ -82,6 +139,22 @@ static int init_vas_instance(struct platform_device *pdev)
list_add(&vinst->node, &vas_instances);
mutex_unlock(&vas_mutex);
+ spin_lock_init(&vinst->fault_lock);
+ /*
+ * IRQ and fault handling setup is needed only for user space
+ * send windows.
+ */
+ if (vinst->virq) {
+ rc = vas_irq_fault_window_setup(vinst);
+ /*
+ * Fault window is used only for user space send windows.
+ * So if vinst->virq is NULL, tx_win_open returns -ENODEV
+ * for user space.
+ */
+ if (rc)
+ vinst->virq = 0;
+ }
+
vas_instance_init_dbgdir(vinst);
dev_set_drvdata(&pdev->dev, vinst);
diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
index 5574aec9ee88..70f793e8f6cc 100644
--- a/arch/powerpc/platforms/powernv/vas.h
+++ b/arch/powerpc/platforms/powernv/vas.h
@@ -101,11 +101,9 @@
/*
* Initial per-process credits.
* Max send window credits: 4K-1 (12-bits in VAS_TX_WCRED)
- * Max receive window credits: 64K-1 (16 bits in VAS_LRX_WCRED)
*
* TODO: Needs tuning for per-process credits
*/
-#define VAS_RX_WCREDS_MAX ((64 << 10) - 1)
#define VAS_TX_WCREDS_MAX ((4 << 10) - 1)
#define VAS_WCREDS_DEFAULT (1 << 10)
@@ -296,6 +294,22 @@ enum vas_notify_after_count {
};
/*
+ * NX can generate an interrupt for multiple faults and expects kernel
+ * to process all of them. So read all valid CRB entries until find the
+ * invalid one. So use pswid which is pasted by NX and ccw[0] (reserved
+ * bit in BE) to check valid CRB. CCW[0] will not be touched by user
+ * space. Application gets CRB formt error if it updates this bit.
+ *
+ * Invalidate FIFO during allocation and process all entries from last
+ * successful read until finds invalid pswid and ccw[0] values.
+ * After reading each CRB entry from fault FIFO, the kernel invalidate
+ * it by updating pswid with FIFO_INVALID_ENTRY and CCW[0] with
+ * CCW0_INVALID.
+ */
+#define FIFO_INVALID_ENTRY 0xffffffff
+#define CCW0_INVALID 1
+
+/*
* One per instance of VAS. Each instance will have a separate set of
* receive windows, one per coprocessor type.
*
@@ -313,6 +327,15 @@ struct vas_instance {
u64 paste_base_addr;
u64 paste_win_id_shift;
+ u64 irq_port;
+ int virq;
+ int fault_crbs;
+ int fault_fifo_size;
+ int fifo_in_progress; /* To wake up thread or return IRQ_HANDLED */
+ spinlock_t fault_lock; /* Protects fifo_in_progress update */
+ void *fault_fifo;
+ struct vas_window *fault_win; /* Fault window */
+
struct mutex mutex;
struct vas_window *rxwin[VAS_COP_TYPE_MAX];
struct vas_window *windows[VAS_WINDOWS_PER_CHIP];
@@ -333,7 +356,9 @@ struct vas_window {
bool user_win; /* True if user space window */
void *hvwc_map; /* HV window context */
void *uwc_map; /* OS/User window context */
- pid_t pid; /* Linux process id of owner */
+ struct pid *pid; /* Linux process id of owner */
+ struct pid *tgid; /* Thread group ID of owner */
+ struct mm_struct *mm; /* Linux process mm_struct */
int wcreds_max; /* Window credits */
char *dbgname;
@@ -406,6 +431,19 @@ extern void vas_init_dbgdir(void);
extern void vas_instance_init_dbgdir(struct vas_instance *vinst);
extern void vas_window_init_dbgdir(struct vas_window *win);
extern void vas_window_free_dbgdir(struct vas_window *win);
+extern int vas_setup_fault_window(struct vas_instance *vinst);
+extern irqreturn_t vas_fault_thread_fn(int irq, void *data);
+extern irqreturn_t vas_fault_handler(int irq, void *dev_id);
+extern void vas_return_credit(struct vas_window *window, bool tx);
+extern struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
+ uint32_t pswid);
+extern void vas_win_paste_addr(struct vas_window *window, u64 *addr,
+ int *len);
+
+static inline int vas_window_pid(struct vas_window *window)
+{
+ return pid_vnr(window->pid);
+}
static inline void vas_log_write(struct vas_window *win, char *name,
void *regptr, u64 val)
@@ -444,6 +482,21 @@ static inline u64 read_hvwc_reg(struct vas_window *win,
return in_be64(win->hvwc_map+reg);
}
+/*
+ * Encode/decode the Partition Send Window ID (PSWID) for a window in
+ * a way that we can uniquely identify any window in the system. i.e.
+ * we should be able to locate the 'struct vas_window' given the PSWID.
+ *
+ * Bits Usage
+ * 0:7 VAS id (8 bits)
+ * 8:15 Unused, 0 (3 bits)
+ * 16:31 Window id (16 bits)
+ */
+static inline u32 encode_pswid(int vasid, int winid)
+{
+ return ((u32)winid | (vasid << (31 - 7)));
+}
+
static inline void decode_pswid(u32 pswid, int *vasid, int *winid)
{
if (vasid)
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 423be34f0f5f..d094321964fb 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -200,13 +200,14 @@ void ps3_mm_vas_destroy(void)
{
int result;
- DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id);
-
if (map.vas_id) {
result = lv1_select_virtual_address_space(0);
- BUG_ON(result);
- result = lv1_destruct_virtual_address_space(map.vas_id);
- BUG_ON(result);
+ result += lv1_destruct_virtual_address_space(map.vas_id);
+
+ if (result) {
+ lv1_panic(0);
+ }
+
map.vas_id = 0;
}
}
@@ -263,7 +264,7 @@ static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
int result;
u64 muid;
- r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
+ r->size = ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size);
@@ -304,19 +305,20 @@ static void ps3_mm_region_destroy(struct mem_region *r)
int result;
if (!r->destroy) {
- pr_info("%s:%d: Not destroying high region: %llxh %llxh\n",
- __func__, __LINE__, r->base, r->size);
return;
}
- DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
-
if (r->base) {
result = lv1_release_memory(r->base);
- BUG_ON(result);
+
+ if (result) {
+ lv1_panic(0);
+ }
+
r->size = r->base = r->offset = 0;
map.total = map.rm.size;
}
+
ps3_mm_set_repository_highmem(NULL);
}
@@ -394,8 +396,8 @@ static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
unsigned long bus_addr, unsigned long len)
{
struct dma_chunk *c;
- unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
+ unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size);
+ unsigned long aligned_len = ALIGN(len+bus_addr-aligned_bus,
1 << r->page_size);
list_for_each_entry(c, &r->chunk_list.head, link) {
@@ -423,8 +425,8 @@ static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
unsigned long lpar_addr, unsigned long len)
{
struct dma_chunk *c;
- unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
+ unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size);
+ unsigned long aligned_len = ALIGN(len + lpar_addr - aligned_lpar,
1 << r->page_size);
list_for_each_entry(c, &r->chunk_list.head, link) {
@@ -775,8 +777,8 @@ static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
struct dma_chunk *c;
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
: virt_addr;
- unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
+ unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
+ unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
1 << r->page_size);
*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
@@ -830,8 +832,8 @@ static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
struct dma_chunk *c;
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
: virt_addr;
- unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
+ unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
+ unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
1 << r->page_size);
DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
@@ -889,9 +891,9 @@ static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
c = dma_find_chunk(r, bus_addr, len);
if (!c) {
- unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
+ unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len + bus_addr
+ unsigned long aligned_len = ALIGN(len + bus_addr
- aligned_bus, 1 << r->page_size);
DBG("%s:%d: not found: bus_addr %llxh\n",
__func__, __LINE__, bus_addr);
@@ -926,9 +928,9 @@ static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
c = dma_find_chunk(r, bus_addr, len);
if (!c) {
- unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
+ unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len + bus_addr
+ unsigned long aligned_len = ALIGN(len + bus_addr
- aligned_bus,
1 << r->page_size);
DBG("%s:%d: not found: bus_addr %llxh\n",
@@ -974,7 +976,7 @@ static int dma_sb_region_create_linear(struct ps3_dma_region *r)
pr_info("%s:%d: forcing 16M pages for linear map\n",
__func__, __LINE__);
r->page_size = PS3_DMA_16M;
- r->len = _ALIGN_UP(r->len, 1 << r->page_size);
+ r->len = ALIGN(r->len, 1 << r->page_size);
}
}
@@ -1125,7 +1127,7 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
r->offset = lpar_addr;
if (r->offset >= map.rm.size)
r->offset -= map.r1.offset;
- r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
+ r->len = len ? len : ALIGN(map.total, 1 << r->page_size);
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_SB:
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index b29368931c56..e9ae5dd03593 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -138,7 +138,7 @@ static int __init early_parse_ps3fb(char *p)
if (!p)
return 1;
- ps3fb_videomemory.size = _ALIGN_UP(memparse(p, &p),
+ ps3fb_videomemory.size = ALIGN(memparse(p, &p),
ps3fb_videomemory.align);
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 845342814edc..ace117f99d94 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -664,6 +664,8 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
if (!ret)
return ret;
+ if (ret < 0)
+ break;
/*
* If RTAS returns a delay value that's above 100ms, cut it
@@ -684,7 +686,11 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
__func__, pe->phb->global_number, pe->addr, ret);
- return ret;
+ /* PAPR defines -3 as "Parameter Error" for this function: */
+ if (ret == -3)
+ return -EINVAL;
+ else
+ return -EIO;
}
/**
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index b2cde1732301..5ace2f9a277e 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -337,39 +337,19 @@ static int pseries_remove_mem_node(struct device_node *np)
static bool lmb_is_removable(struct drmem_lmb *lmb)
{
- int i, scns_per_block;
- bool rc = true;
- unsigned long pfn, block_sz;
- u64 phys_addr;
-
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
return false;
- block_sz = memory_block_size_bytes();
- scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
- phys_addr = lmb->base_addr;
-
#ifdef CONFIG_FA_DUMP
/*
* Don't hot-remove memory that falls in fadump boot memory area
* and memory that is reserved for capturing old kernel memory.
*/
- if (is_fadump_memory_area(phys_addr, block_sz))
+ if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
return false;
#endif
-
- for (i = 0; i < scns_per_block; i++) {
- pfn = PFN_DOWN(phys_addr);
- if (!pfn_in_present_section(pfn)) {
- phys_addr += MIN_MEMORY_BLOCK_SIZE;
- continue;
- }
-
- rc = rc && is_mem_section_removable(pfn, PAGES_PER_SECTION);
- phys_addr += MIN_MEMORY_BLOCK_SIZE;
- }
-
- return rc;
+ /* device_offline() will determine if we can actually remove this lmb */
+ return true;
}
static int dlpar_add_lmb(struct drmem_lmb *);
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index b91eb0929ed1..a6f101c958e8 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -47,6 +47,7 @@
#include <linux/stat.h>
#include <linux/of_platform.h>
#include <asm/ibmebus.h>
+#include <asm/machdep.h>
static struct device ibmebus_bus_device = { /* fake "parent" device */
.init_name = "ibmebus",
@@ -464,4 +465,4 @@ static int __init ibmebus_bus_init(void)
return 0;
}
-postcore_initcall(ibmebus_bus_init);
+machine_postcore_initcall(pseries, ibmebus_bus_init);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index e4ed5317f117..fd26f3d21d7b 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -21,10 +21,10 @@
#include <linux/cpuhotplug.h>
#include <linux/workqueue.h>
#include <linux/proc_fs.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index b571285f6c14..10d982997736 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -371,6 +371,9 @@ void post_mobility_fixup(void)
/* Possibly switch to a new RFI flush type */
pseries_setup_rfi_flush();
+ /* Reinitialise system information for hv-24x7 */
+ read_24x7_sys_info();
+
return;
}
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 1d1da639b8b7..f3736fcd98fc 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -395,16 +395,31 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
/*
* Some versions of FWNMI place the buffer inside the 4kB page starting at
* 0x7000. Other versions place it inside the rtas buffer. We check both.
+ * Minimum size of the buffer is 16 bytes.
*/
#define VALID_FWNMI_BUFFER(A) \
- ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \
- (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16))))
+ ((((A) >= 0x7000) && ((A) <= 0x8000 - 16)) || \
+ (((A) >= rtas.base) && ((A) <= (rtas.base + rtas.size - 16))))
static inline struct rtas_error_log *fwnmi_get_errlog(void)
{
return (struct rtas_error_log *)local_paca->mce_data_buf;
}
+static __be64 *fwnmi_get_savep(struct pt_regs *regs)
+{
+ unsigned long savep_ra;
+
+ /* Mask top two bits */
+ savep_ra = regs->gpr[3] & ~(0x3UL << 62);
+ if (!VALID_FWNMI_BUFFER(savep_ra)) {
+ printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
+ return NULL;
+ }
+
+ return __va(savep_ra);
+}
+
/*
* Get the error information for errors coming through the
* FWNMI vectors. The pt_regs' r3 will be updated to reflect
@@ -422,19 +437,14 @@ static inline struct rtas_error_log *fwnmi_get_errlog(void)
*/
static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
{
- unsigned long *savep;
struct rtas_error_log *h;
+ __be64 *savep;
- /* Mask top two bits */
- regs->gpr[3] &= ~(0x3UL << 62);
-
- if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
- printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
+ savep = fwnmi_get_savep(regs);
+ if (!savep)
return NULL;
- }
- savep = __va(regs->gpr[3]);
- regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
+ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
h = (struct rtas_error_log *)&savep[1];
/* Use the per cpu buffer from paca to store rtas error log */
@@ -458,7 +468,15 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
*/
static void fwnmi_release_errinfo(void)
{
- int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL);
+ struct rtas_args rtas_args;
+ int ret;
+
+ /*
+ * On pseries, the machine check stack is limited to under 4GB, so
+ * args can be on-stack.
+ */
+ rtas_call_unlocked(&rtas_args, ibm_nmi_interlock_token, 0, 1, NULL);
+ ret = be32_to_cpu(rtas_args.rets[0]);
if (ret != 0)
printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret);
}
@@ -481,11 +499,21 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
#endif
if (fwnmi_active) {
- struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs);
- if (errhdr) {
- /* XXX Should look at FWNMI information */
- }
- fwnmi_release_errinfo();
+ __be64 *savep;
+
+ /*
+ * Firmware (PowerVM and KVM) saves r3 to a save area like
+ * machine check, which is not exactly what PAPR (2.9)
+ * suggests but there is no way to detect otherwise, so this
+ * is the interface now.
+ *
+ * System resets do not save any error log or require an
+ * "ibm,nmi-interlock" rtas call to release.
+ */
+
+ savep = fwnmi_get_savep(regs);
+ if (savep)
+ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
}
if (smp_handle_nmi_ipi(regs))
diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c
index 70c3013fdd07..81343908ed33 100644
--- a/arch/powerpc/platforms/pseries/rtas-fadump.c
+++ b/arch/powerpc/platforms/pseries/rtas-fadump.c
@@ -506,7 +506,7 @@ void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
fadump_conf->fadump_supported = 1;
/* Firmware supports 64-bit value for size, align it to pagesize. */
- fadump_conf->max_copy_size = _ALIGN_DOWN(U64_MAX, PAGE_SIZE);
+ fadump_conf->max_copy_size = ALIGN_DOWN(U64_MAX, PAGE_SIZE);
/*
* The 'ibm,kernel-dump' rtas node is present only if there is
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 0c8421dd01ab..2db8469e475f 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -43,7 +43,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
@@ -68,6 +67,7 @@
#include <asm/isa-bridge.h>
#include <asm/security_features.h>
#include <asm/asm-const.h>
+#include <asm/idle.h>
#include <asm/swiotlb.h>
#include <asm/svm.h>
@@ -83,6 +83,7 @@ unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
EXPORT_SYMBOL(CMO_PageSize);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
+int ibm_nmi_interlock_token;
static void pSeries_show_cpuinfo(struct seq_file *m)
{
@@ -113,9 +114,14 @@ static void __init fwnmi_init(void)
struct slb_entry *slb_ptr;
size_t size;
#endif
+ int ibm_nmi_register_token;
- int ibm_nmi_register = rtas_token("ibm,nmi-register");
- if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
+ ibm_nmi_register_token = rtas_token("ibm,nmi-register");
+ if (ibm_nmi_register_token == RTAS_UNKNOWN_SERVICE)
+ return;
+
+ ibm_nmi_interlock_token = rtas_token("ibm,nmi-interlock");
+ if (WARN_ON(ibm_nmi_interlock_token == RTAS_UNKNOWN_SERVICE))
return;
/* If the kernel's not linked at zero we point the firmware at low
@@ -123,8 +129,8 @@ static void __init fwnmi_init(void)
system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START;
machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
- if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
- machine_check_addr))
+ if (0 == rtas_call(ibm_nmi_register_token, 2, 1, NULL,
+ system_reset_addr, machine_check_addr))
fwnmi_active = 1;
/*
@@ -317,6 +323,9 @@ static int alloc_dispatch_log_kmem_cache(void)
}
machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
+DEFINE_PER_CPU(u64, idle_spurr_cycles);
+DEFINE_PER_CPU(u64, idle_entry_purr_snap);
+DEFINE_PER_CPU(u64, idle_entry_spurr_snap);
static void pseries_lpar_idle(void)
{
/*
@@ -328,7 +337,7 @@ static void pseries_lpar_idle(void)
return;
/* Indicate to hypervisor that we are idle. */
- get_lppaca()->idle = 1;
+ pseries_idle_prolog();
/*
* Yield the processor to the hypervisor. We return if
@@ -339,7 +348,7 @@ static void pseries_lpar_idle(void)
*/
cede_processor();
- get_lppaca()->idle = 0;
+ pseries_idle_epilog();
}
/*
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index ad61e90032da..6891710833be 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -20,12 +20,12 @@
#include <linux/err.h>
#include <linux/device.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 37f1f25ba804..0487b26f6f1a 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -31,6 +31,7 @@
#include <asm/tce.h>
#include <asm/page.h>
#include <asm/hvcall.h>
+#include <asm/machdep.h>
static struct vio_dev vio_bus_device = { /* fake "parent" device */
.name = "vio",
@@ -1513,7 +1514,7 @@ static int __init vio_bus_init(void)
return 0;
}
-postcore_initcall(vio_bus_init);
+machine_postcore_initcall(pseries, vio_bus_init);
static int __init vio_device_init(void)
{
@@ -1522,7 +1523,7 @@ static int __init vio_device_init(void)
return 0;
}
-device_initcall(vio_device_init);
+machine_device_initcall(pseries, vio_device_init);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1703,4 +1704,4 @@ static int __init vio_init(void)
dma_debug_add_bus(&vio_bus_type);
return 0;
}
-fs_initcall(vio_init);
+machine_fs_initcall(pseries, vio_init);
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index cb5a5bd2cef5..026b3f01a991 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -31,8 +31,6 @@ obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o
obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
obj-$(CONFIG_PPC_I8259) += i8259.o
obj-$(CONFIG_IPIC) += ipic.o
-obj-$(CONFIG_XILINX_VIRTEX) += xilinx_intc.o
-obj-$(CONFIG_XILINX_PCI) += xilinx_pci.o
obj-$(CONFIG_OF_RTC) += of_rtc.o
obj-$(CONFIG_CPM) += cpm_common.o
diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c
index 07718b9a2c99..68538b8329f7 100644
--- a/arch/powerpc/sysdev/cpm2.c
+++ b/arch/powerpc/sysdev/cpm2.c
@@ -39,7 +39,6 @@
#include <asm/irq.h>
#include <asm/mpc8260.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/cpm2.h>
#include <asm/rheap.h>
#include <asm/fs_pd.h>
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 71660bacb264..7dc1960f8bdb 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -68,6 +68,8 @@ static void udbg_putc_cpm(char c)
void __init udbg_init_cpm(void)
{
#ifdef CONFIG_PPC_8xx
+ mmu_mapin_immr();
+
cpm_udbg_txdesc = (u32 __iomem __force *)
(CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE +
VIRT_IMMR_BASE);
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
index f6c665dac725..a3aeaa5f0f1b 100644
--- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of_platform.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/fsl_85xx_cache_sram.h>
#include "fsl_85xx_cache_ctlr.h"
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index a3a72b780e67..b0426f28946a 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -29,11 +29,11 @@
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/ratelimit.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
index 6aabc74688a6..4cf18000f07c 100644
--- a/arch/powerpc/sysdev/xics/ics-rtas.c
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -50,8 +50,8 @@ static void ics_rtas_unmask_irq(struct irq_data *d)
server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
- call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server,
- DEFAULT_PRIORITY);
+ call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq,
+ server, DEFAULT_PRIORITY);
if (call_status != 0) {
printk(KERN_ERR
"%s: ibm_set_xive irq %u server %x returned %d\n",
@@ -60,7 +60,7 @@ static void ics_rtas_unmask_irq(struct irq_data *d)
}
/* Now unmask the interrupt (often a no-op) */
- call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq);
+ call_status = rtas_call_reentrant(ibm_int_on, 1, 1, NULL, hw_irq);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
__func__, hw_irq, call_status);
@@ -91,7 +91,7 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq)
if (hw_irq == XICS_IPI)
return;
- call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq);
+ call_status = rtas_call_reentrant(ibm_int_off, 1, 1, NULL, hw_irq);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
__func__, hw_irq, call_status);
@@ -99,8 +99,8 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq)
}
/* Have to set XIVE to 0xff to be able to remove a slot */
- call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq,
- xics_default_server, 0xff);
+ call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq,
+ xics_default_server, 0xff);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
__func__, hw_irq, call_status);
@@ -131,7 +131,7 @@ static int ics_rtas_set_affinity(struct irq_data *d,
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return -1;
- status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq);
+ status = rtas_call_reentrant(ibm_get_xive, 1, 3, xics_status, hw_irq);
if (status) {
printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
@@ -146,8 +146,8 @@ static int ics_rtas_set_affinity(struct irq_data *d,
return -1;
}
- status = rtas_call(ibm_set_xive, 3, 1, NULL,
- hw_irq, irq_server, xics_status[1]);
+ status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL,
+ hw_irq, irq_server, xics_status[1]);
if (status) {
printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
@@ -179,7 +179,7 @@ static int ics_rtas_map(struct ics *ics, unsigned int virq)
return -EINVAL;
/* Check if RTAS knows about this interrupt */
- rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq);
+ rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, hw_irq);
if (rc)
return -ENXIO;
@@ -198,7 +198,7 @@ static long ics_rtas_get_server(struct ics *ics, unsigned long vec)
{
int rc, status[2];
- rc = rtas_call(ibm_get_xive, 1, 3, status, vec);
+ rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, vec);
if (rc)
return -1;
return status[0];
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
deleted file mode 100644
index 4a86dcff3fcd..000000000000
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Interrupt controller driver for Xilinx Virtex FPGAs
- *
- * Copyright (C) 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
- */
-
-/*
- * This is a driver for the interrupt controller typically found in
- * Xilinx Virtex FPGA designs.
- *
- * The interrupt sense levels are hard coded into the FPGA design with
- * typically a 1:1 relationship between irq lines and devices (no shared
- * irq lines). Therefore, this driver does not attempt to handle edge
- * and level interrupts differently.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/i8259.h>
-#include <asm/irq.h>
-#include <linux/irqchip.h>
-
-#if defined(CONFIG_PPC_I8259)
-/*
- * Support code for cascading to 8259 interrupt controllers
- */
-static void xilinx_i8259_cascade(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int cascade_irq = i8259_irq();
-
- if (cascade_irq)
- generic_handle_irq(cascade_irq);
-
- /* Let xilinx_intc end the interrupt */
- chip->irq_unmask(&desc->irq_data);
-}
-
-static void __init xilinx_i8259_setup_cascade(void)
-{
- struct device_node *cascade_node;
- int cascade_irq;
-
- /* Initialize i8259 controller */
- cascade_node = of_find_compatible_node(NULL, NULL, "chrp,iic");
- if (!cascade_node)
- return;
-
- cascade_irq = irq_of_parse_and_map(cascade_node, 0);
- if (!cascade_irq) {
- pr_err("virtex_ml510: Failed to map cascade interrupt\n");
- goto out;
- }
-
- i8259_init(cascade_node, 0);
- irq_set_chained_handler(cascade_irq, xilinx_i8259_cascade);
-
- /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */
- /* This looks like a dirty hack to me --gcl */
- outb(0xc0, 0x4d0);
- outb(0xc0, 0x4d1);
-
- out:
- of_node_put(cascade_node);
-}
-#else
-static inline void xilinx_i8259_setup_cascade(void) { return; }
-#endif /* defined(CONFIG_PPC_I8259) */
-
-/*
- * Initialize master Xilinx interrupt controller
- */
-void __init xilinx_intc_init_tree(void)
-{
- irqchip_init();
- xilinx_i8259_setup_cascade();
-}
diff --git a/arch/powerpc/sysdev/xilinx_pci.c b/arch/powerpc/sysdev/xilinx_pci.c
deleted file mode 100644
index fea5667699ed..000000000000
--- a/arch/powerpc/sysdev/xilinx_pci.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * PCI support for Xilinx plbv46_pci soft-core which can be used on
- * Xilinx Virtex ML410 / ML510 boards.
- *
- * Copyright 2009 Roderick Colenbrander
- * Copyright 2009 Secret Lab Technologies Ltd.
- *
- * The pci bridge fixup code was copied from ppc4xx_pci.c and was written
- * by Benjamin Herrenschmidt.
- * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/ioport.h>
-#include <linux/of.h>
-#include <linux/pci.h>
-#include <mm/mmu_decl.h>
-#include <asm/io.h>
-#include <asm/xilinx_pci.h>
-
-#define XPLB_PCI_ADDR 0x10c
-#define XPLB_PCI_DATA 0x110
-#define XPLB_PCI_BUS 0x114
-
-#define PCI_HOST_ENABLE_CMD PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
-
-static const struct of_device_id xilinx_pci_match[] = {
- { .compatible = "xlnx,plbv46-pci-1.03.a", },
- {}
-};
-
-/**
- * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration.
- */
-static void xilinx_pci_fixup_bridge(struct pci_dev *dev)
-{
- struct pci_controller *hose;
- int i;
-
- if (dev->devfn || dev->bus->self)
- return;
-
- hose = pci_bus_to_host(dev->bus);
- if (!hose)
- return;
-
- if (!of_match_node(xilinx_pci_match, hose->dn))
- return;
-
- /* Hide the PCI host BARs from the kernel as their content doesn't
- * fit well in the resource management
- */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- dev->resource[i].start = 0;
- dev->resource[i].end = 0;
- dev->resource[i].flags = 0;
- }
-
- dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n",
- pci_name(dev));
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge);
-
-/**
- * xilinx_pci_exclude_device - Don't do config access for non-root bus
- *
- * This is a hack. Config access to any bus other than bus 0 does not
- * currently work on the ML510 so we prevent it here.
- */
-static int
-xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
-{
- return (bus != 0);
-}
-
-/**
- * xilinx_pci_init - Find and register a Xilinx PCI host bridge
- */
-void __init xilinx_pci_init(void)
-{
- struct pci_controller *hose;
- struct resource r;
- void __iomem *pci_reg;
- struct device_node *pci_node;
-
- pci_node = of_find_matching_node(NULL, xilinx_pci_match);
- if(!pci_node)
- return;
-
- if (of_address_to_resource(pci_node, 0, &r)) {
- pr_err("xilinx-pci: cannot resolve base address\n");
- return;
- }
-
- hose = pcibios_alloc_controller(pci_node);
- if (!hose) {
- pr_err("xilinx-pci: pcibios_alloc_controller() failed\n");
- return;
- }
-
- /* Setup config space */
- setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR,
- r.start + XPLB_PCI_DATA,
- PPC_INDIRECT_TYPE_SET_CFG_TYPE);
-
- /* According to the xilinx plbv46_pci documentation the soft-core starts
- * a self-init when the bus master enable bit is set. Without this bit
- * set the pci bus can't be scanned.
- */
- early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD);
-
- /* Set the max latency timer to 255 */
- early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff);
-
- /* Set the max bus number to 255 */
- pci_reg = of_iomap(pci_node, 0);
- out_8(pci_reg + XPLB_PCI_BUS, 0xff);
- iounmap(pci_reg);
-
- /* Nothing past the root bridge is working right now. By default
- * exclude config access to anything except bus 0 */
- if (!ppc_md.pci_exclude_device)
- ppc_md.pci_exclude_device = xilinx_pci_exclude_device;
-
- /* Register the host bridge with the linux kernel! */
- pci_process_bridge_OF_ranges(hose, pci_node, 1);
-
- pr_info("xilinx-pci: Registered PCI host bridge\n");
-}
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index b294f70f1a67..f591be9f01f4 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/msi.h>
+#include <linux/vmalloc.h>
#include <asm/debugfs.h>
#include <asm/prom.h>
@@ -196,6 +197,9 @@ static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
{
u64 val;
+ if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ offset |= XIVE_ESB_LD_ST_MO;
+
/* Handle HW errata */
if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
offset |= offset << 4;
@@ -1017,12 +1021,16 @@ EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
if (xd->eoi_mmio) {
+ unmap_kernel_range((unsigned long)xd->eoi_mmio,
+ 1u << xd->esb_shift);
iounmap(xd->eoi_mmio);
if (xd->eoi_mmio == xd->trig_mmio)
xd->trig_mmio = NULL;
xd->eoi_mmio = NULL;
}
if (xd->trig_mmio) {
+ unmap_kernel_range((unsigned long)xd->trig_mmio,
+ 1u << xd->esb_shift);
iounmap(xd->trig_mmio);
xd->trig_mmio = NULL;
}
@@ -1656,7 +1664,8 @@ DEFINE_SHOW_ATTRIBUTE(xive_core_debug);
int xive_core_debug_init(void)
{
- debugfs_create_file("xive", 0400, powerpc_debugfs_root,
- NULL, &xive_core_debug_fops);
+ if (xive_enabled())
+ debugfs_create_file("xive", 0400, powerpc_debugfs_root,
+ NULL, &xive_core_debug_fops);
return 0;
}
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 5218fdc4b29a..71b881e554fc 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -280,12 +280,12 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
}
#endif /* CONFIG_SMP */
-u32 xive_native_alloc_irq(void)
+u32 xive_native_alloc_irq_on_chip(u32 chip_id)
{
s64 rc;
for (;;) {
- rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
+ rc = opal_xive_allocate_irq(chip_id);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
@@ -294,7 +294,7 @@ u32 xive_native_alloc_irq(void)
return 0;
return rc;
}
-EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
+EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
void xive_native_free_irq(u32 irq)
{
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index 7ab5c6780997..f0551a2be9df 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -27,6 +27,8 @@
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include <asm/hvcall.h>
+#include <asm/svm.h>
+#include <asm/ultravisor.h>
#include "xive-internal.h"
@@ -502,6 +504,9 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
rc = -EIO;
} else {
q->qpage = qpage;
+ if (is_secure_guest())
+ uv_share_page(PHYS_PFN(qpage_phys),
+ 1 << xive_alloc_order(order));
}
fail:
return rc;
@@ -535,6 +540,8 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
hw_cpu, prio);
alloc_order = xive_alloc_order(xive_queue_shift);
+ if (is_secure_guest())
+ uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
free_pages((unsigned long)q->qpage, alloc_order);
q->qpage = NULL;
}
diff --git a/arch/powerpc/tools/head_check.sh b/arch/powerpc/tools/head_check.sh
index ad9e57209aa4..e32d3162e5ed 100644
--- a/arch/powerpc/tools/head_check.sh
+++ b/arch/powerpc/tools/head_check.sh
@@ -31,8 +31,10 @@
# level entry code (boot, interrupt vectors, etc) until r2 is set up. This
# could cause the kernel to die in early boot.
-# Turn this on if you want more debug output:
-# set -x
+# Allow for verbose output
+if [ "$V" = "1" ]; then
+ set -x
+fi
if [ $# -lt 2 ]; then
echo "$0 [path to nm] [path to vmlinux]" 1>&2
@@ -44,7 +46,7 @@ nm="$1"
vmlinux="$2"
# gcc-4.6-era toolchain make _stext an A (absolute) symbol rather than T
-$nm "$vmlinux" | grep -e " [TA] _stext$" -e " t start_first_256B$" -e " a text_start$" -e " t start_text$" -m4 > .tmp_symbols.txt
+$nm "$vmlinux" | grep -e " [TA] _stext$" -e " t start_first_256B$" -e " a text_start$" -e " t start_text$" > .tmp_symbols.txt
vma=$(cat .tmp_symbols.txt | grep -e " [TA] _stext$" | cut -d' ' -f1)
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 6f9cccea54f3..89c76ca35640 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -18,7 +18,7 @@ endif
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y += xmon.o nonstdio.o spr_access.o
+obj-y += xmon.o nonstdio.o spr_access.o xmon_bpts.o
ifdef CONFIG_XMON_DISASSEMBLY
obj-y += ppc-dis.o ppc-opc.o
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 7af840c0fc93..7efe4bc3ccf6 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -35,7 +35,6 @@
#include <asm/machdep.h>
#include <asm/xmon.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/plpar_wrappers.h>
@@ -54,6 +53,7 @@
#include <asm/firmware.h>
#include <asm/code-patching.h>
#include <asm/sections.h>
+#include <asm/inst.h>
#ifdef CONFIG_PPC64
#include <asm/hvcall.h>
@@ -62,6 +62,7 @@
#include "nonstdio.h"
#include "dis-asm.h"
+#include "xmon_bpts.h"
#ifdef CONFIG_SMP
static cpumask_t cpus_in_xmon = CPU_MASK_NONE;
@@ -98,7 +99,7 @@ static long *xmon_fault_jmp[NR_CPUS];
/* Breakpoint stuff */
struct bpt {
unsigned long address;
- unsigned int instr[2];
+ struct ppc_inst *instr;
atomic_t ref_count;
int enabled;
unsigned long pad;
@@ -109,9 +110,8 @@ struct bpt {
#define BP_TRAP 2
#define BP_DABR 4
-#define NBPTS 256
static struct bpt bpts[NBPTS];
-static struct bpt dabr;
+static struct bpt dabr[HBP_NUM_MAX];
static struct bpt *iabr;
static unsigned bpinstr = 0x7fe00008; /* trap */
@@ -121,6 +121,7 @@ static unsigned bpinstr = 0x7fe00008; /* trap */
static int cmds(struct pt_regs *);
static int mread(unsigned long, void *, int);
static int mwrite(unsigned long, void *, int);
+static int mread_instr(unsigned long, struct ppc_inst *);
static int handle_fault(struct pt_regs *);
static void byterev(unsigned char *, int);
static void memex(void);
@@ -326,11 +327,6 @@ static inline void sync(void)
asm volatile("sync; isync");
}
-static inline void store_inst(void *p)
-{
- asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p));
-}
-
static inline void cflush(void *p)
{
asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p));
@@ -706,13 +702,13 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
bp = at_breakpoint(regs->nip);
if (bp != NULL) {
- int stepped = emulate_step(regs, bp->instr[0]);
+ int stepped = emulate_step(regs, ppc_inst_read(bp->instr));
if (stepped == 0) {
regs->nip = (unsigned long) &bp->instr[0];
atomic_inc(&bp->ref_count);
} else if (stepped < 0) {
printf("Couldn't single-step %s instruction\n",
- (IS_RFID(bp->instr[0])? "rfid": "mtmsrd"));
+ IS_RFID(ppc_inst_read(bp->instr))? "rfid": "mtmsrd");
}
}
}
@@ -761,8 +757,8 @@ static int xmon_bpt(struct pt_regs *regs)
/* Are we at the trap at bp->instr[1] for some bp? */
bp = in_breakpoint_table(regs->nip, &offset);
- if (bp != NULL && offset == 4) {
- regs->nip = bp->address + 4;
+ if (bp != NULL && (offset == 4 || offset == 8)) {
+ regs->nip = bp->address + offset;
atomic_dec(&bp->ref_count);
return 1;
}
@@ -787,10 +783,17 @@ static int xmon_sstep(struct pt_regs *regs)
static int xmon_break_match(struct pt_regs *regs)
{
+ int i;
+
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
return 0;
- if (dabr.enabled == 0)
- return 0;
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (dabr[i].enabled)
+ goto found;
+ }
+ return 0;
+
+found:
xmon_core(regs, 0);
return 1;
}
@@ -859,15 +862,13 @@ static struct bpt *in_breakpoint_table(unsigned long nip, unsigned long *offp)
{
unsigned long off;
- off = nip - (unsigned long) bpts;
- if (off >= sizeof(bpts))
+ off = nip - (unsigned long)bpt_table;
+ if (off >= sizeof(bpt_table))
return NULL;
- off %= sizeof(struct bpt);
- if (off != offsetof(struct bpt, instr[0])
- && off != offsetof(struct bpt, instr[1]))
+ *offp = off & (BPT_SIZE - 1);
+ if (off & 3)
return NULL;
- *offp = off - offsetof(struct bpt, instr[0]);
- return (struct bpt *) (nip - off);
+ return bpts + (off / BPT_SIZE);
}
static struct bpt *new_breakpoint(unsigned long a)
@@ -882,8 +883,7 @@ static struct bpt *new_breakpoint(unsigned long a)
for (bp = bpts; bp < &bpts[NBPTS]; ++bp) {
if (!bp->enabled && atomic_read(&bp->ref_count) == 0) {
bp->address = a;
- bp->instr[1] = bpinstr;
- store_inst(&bp->instr[1]);
+ bp->instr = (void *)(bpt_table + ((bp - bpts) * BPT_WORDS));
return bp;
}
}
@@ -895,47 +895,75 @@ static struct bpt *new_breakpoint(unsigned long a)
static void insert_bpts(void)
{
int i;
- struct bpt *bp;
+ struct ppc_inst instr, instr2;
+ struct bpt *bp, *bp2;
bp = bpts;
for (i = 0; i < NBPTS; ++i, ++bp) {
if ((bp->enabled & (BP_TRAP|BP_CIABR)) == 0)
continue;
- if (mread(bp->address, &bp->instr[0], 4) != 4) {
+ if (!mread_instr(bp->address, &instr)) {
printf("Couldn't read instruction at %lx, "
"disabling breakpoint there\n", bp->address);
bp->enabled = 0;
continue;
}
- if (IS_MTMSRD(bp->instr[0]) || IS_RFID(bp->instr[0])) {
+ if (IS_MTMSRD(instr) || IS_RFID(instr)) {
printf("Breakpoint at %lx is on an mtmsrd or rfid "
"instruction, disabling it\n", bp->address);
bp->enabled = 0;
continue;
}
- store_inst(&bp->instr[0]);
+ /*
+ * Check the address is not a suffix by looking for a prefix in
+ * front of it.
+ */
+ if (mread_instr(bp->address - 4, &instr2) == 8) {
+ printf("Breakpoint at %lx is on the second word of a prefixed instruction, disabling it\n",
+ bp->address);
+ bp->enabled = 0;
+ continue;
+ }
+ /*
+ * We might still be a suffix - if the prefix has already been
+ * replaced by a breakpoint we won't catch it with the above
+ * test.
+ */
+ bp2 = at_breakpoint(bp->address - 4);
+ if (bp2 && ppc_inst_prefixed(ppc_inst_read(bp2->instr))) {
+ printf("Breakpoint at %lx is on the second word of a prefixed instruction, disabling it\n",
+ bp->address);
+ bp->enabled = 0;
+ continue;
+ }
+
+ patch_instruction(bp->instr, instr);
+ patch_instruction(ppc_inst_next(bp->instr, &instr),
+ ppc_inst(bpinstr));
if (bp->enabled & BP_CIABR)
continue;
- if (patch_instruction((unsigned int *)bp->address,
- bpinstr) != 0) {
+ if (patch_instruction((struct ppc_inst *)bp->address,
+ ppc_inst(bpinstr)) != 0) {
printf("Couldn't write instruction at %lx, "
"disabling breakpoint there\n", bp->address);
bp->enabled &= ~BP_TRAP;
continue;
}
- store_inst((void *)bp->address);
}
}
static void insert_cpu_bpts(void)
{
+ int i;
struct arch_hw_breakpoint brk;
- if (dabr.enabled) {
- brk.address = dabr.address;
- brk.type = (dabr.enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
- brk.len = DABR_MAX_LEN;
- __set_breakpoint(&brk);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (dabr[i].enabled) {
+ brk.address = dabr[i].address;
+ brk.type = (dabr[i].enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
+ brk.len = 8;
+ __set_breakpoint(i, &brk);
+ }
}
if (iabr)
@@ -946,20 +974,18 @@ static void remove_bpts(void)
{
int i;
struct bpt *bp;
- unsigned instr;
+ struct ppc_inst instr;
bp = bpts;
for (i = 0; i < NBPTS; ++i, ++bp) {
if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP)
continue;
- if (mread(bp->address, &instr, 4) == 4
- && instr == bpinstr
+ if (mread_instr(bp->address, &instr)
+ && ppc_inst_equal(instr, ppc_inst(bpinstr))
&& patch_instruction(
- (unsigned int *)bp->address, bp->instr[0]) != 0)
+ (struct ppc_inst *)bp->address, ppc_inst_read(bp->instr)) != 0)
printf("Couldn't remove breakpoint at %lx\n",
bp->address);
- else
- store_inst((void *)bp->address);
}
}
@@ -1164,13 +1190,13 @@ static int do_step(struct pt_regs *regs)
*/
static int do_step(struct pt_regs *regs)
{
- unsigned int instr;
+ struct ppc_inst instr;
int stepped;
force_enable_xmon();
/* check we are in 64-bit kernel mode, translation enabled */
if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) {
- if (mread(regs->nip, &instr, 4) == 4) {
+ if (mread_instr(regs->nip, &instr)) {
stepped = emulate_step(regs, instr);
if (stepped < 0) {
printf("Couldn't single-step %s instruction\n",
@@ -1178,7 +1204,7 @@ static int do_step(struct pt_regs *regs)
return 0;
}
if (stepped > 0) {
- regs->trap = 0xd00 | (regs->trap & 1);
+ set_trap(regs, 0xd00);
printf("stepped to ");
xmon_print_symbol(regs->nip, " ", "\n");
ppc_inst_dump(regs->nip, 1, 0);
@@ -1330,14 +1356,14 @@ csum(void)
*/
static long check_bp_loc(unsigned long addr)
{
- unsigned int instr;
+ struct ppc_inst instr;
addr &= ~3;
if (!is_kernel_addr(addr)) {
printf("Breakpoints may only be placed at kernel addresses\n");
return 0;
}
- if (!mread(addr, &instr, sizeof(instr))) {
+ if (!mread_instr(addr, &instr)) {
printf("Can't read instruction at address %lx\n", addr);
return 0;
}
@@ -1349,6 +1375,35 @@ static long check_bp_loc(unsigned long addr)
return 1;
}
+static int find_free_data_bpt(void)
+{
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!dabr[i].enabled)
+ return i;
+ }
+ printf("Couldn't find free breakpoint register\n");
+ return -1;
+}
+
+static void print_data_bpts(void)
+{
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!dabr[i].enabled)
+ continue;
+
+ printf(" data "REG" [", dabr[i].address);
+ if (dabr[i].enabled & 1)
+ printf("r");
+ if (dabr[i].enabled & 2)
+ printf("w");
+ printf("]\n");
+ }
+}
+
static char *breakpoint_help_string =
"Breakpoint command usage:\n"
"b show breakpoints\n"
@@ -1382,6 +1437,9 @@ bpt_cmds(void)
printf("Hardware data breakpoint not supported on this cpu\n");
break;
}
+ i = find_free_data_bpt();
+ if (i < 0)
+ break;
mode = 7;
cmd = inchar();
if (cmd == 'r')
@@ -1390,15 +1448,15 @@ bpt_cmds(void)
mode = 6;
else
termch = cmd;
- dabr.address = 0;
- dabr.enabled = 0;
- if (scanhex(&dabr.address)) {
- if (!is_kernel_addr(dabr.address)) {
+ dabr[i].address = 0;
+ dabr[i].enabled = 0;
+ if (scanhex(&dabr[i].address)) {
+ if (!is_kernel_addr(dabr[i].address)) {
printf(badaddr);
break;
}
- dabr.address &= ~HW_BRK_TYPE_DABR;
- dabr.enabled = mode | BP_DABR;
+ dabr[i].address &= ~HW_BRK_TYPE_DABR;
+ dabr[i].enabled = mode | BP_DABR;
}
force_enable_xmon();
@@ -1437,7 +1495,9 @@ bpt_cmds(void)
for (i = 0; i < NBPTS; ++i)
bpts[i].enabled = 0;
iabr = NULL;
- dabr.enabled = 0;
+ for (i = 0; i < nr_wp_slots(); i++)
+ dabr[i].enabled = 0;
+
printf("All breakpoints cleared\n");
break;
}
@@ -1471,14 +1531,7 @@ bpt_cmds(void)
if (xmon_is_ro || !scanhex(&a)) {
/* print all breakpoints */
printf(" type address\n");
- if (dabr.enabled) {
- printf(" data "REG" [", dabr.address);
- if (dabr.enabled & 1)
- printf("r");
- if (dabr.enabled & 2)
- printf("w");
- printf("]\n");
- }
+ print_data_bpts();
for (bp = bpts; bp < &bpts[NBPTS]; ++bp) {
if (!bp->enabled)
continue;
@@ -1776,7 +1829,7 @@ static void prregs(struct pt_regs *fp)
#endif
printf("pc = ");
xmon_print_symbol(fp->nip, " ", "\n");
- if (TRAP(fp) != 0xc00 && cpu_has_feature(CPU_FTR_CFAR)) {
+ if (!trap_is_syscall(fp) && cpu_has_feature(CPU_FTR_CFAR)) {
printf("cfar= ");
xmon_print_symbol(fp->orig_gpr3, " ", "\n");
}
@@ -1938,8 +1991,13 @@ static void dump_207_sprs(void)
printf("hfscr = %.16lx dhdes = %.16lx rpr = %.16lx\n",
mfspr(SPRN_HFSCR), mfspr(SPRN_DHDES), mfspr(SPRN_RPR));
- printf("dawr = %.16lx dawrx = %.16lx ciabr = %.16lx\n",
- mfspr(SPRN_DAWR), mfspr(SPRN_DAWRX), mfspr(SPRN_CIABR));
+ printf("dawr0 = %.16lx dawrx0 = %.16lx\n",
+ mfspr(SPRN_DAWR0), mfspr(SPRN_DAWRX0));
+ if (nr_wp_slots() > 1) {
+ printf("dawr1 = %.16lx dawrx1 = %.16lx\n",
+ mfspr(SPRN_DAWR1), mfspr(SPRN_DAWRX1));
+ }
+ printf("ciabr = %.16lx\n", mfspr(SPRN_CIABR));
#endif
}
@@ -2130,6 +2188,25 @@ mwrite(unsigned long adrs, void *buf, int size)
return n;
}
+static int
+mread_instr(unsigned long adrs, struct ppc_inst *instr)
+{
+ volatile int n;
+
+ n = 0;
+ if (setjmp(bus_error_jmp) == 0) {
+ catch_memory_errors = 1;
+ sync();
+ *instr = ppc_inst_read((struct ppc_inst *)adrs);
+ sync();
+ /* wait a little while to see if we get a machine check */
+ __delay(200);
+ n = ppc_inst_len(*instr);
+ }
+ catch_memory_errors = 0;
+ return n;
+}
+
static int fault_type;
static int fault_except;
static char *fault_chars[] = { "--", "**", "##" };
@@ -2856,7 +2933,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
{
int nr, dotted;
unsigned long first_adr;
- unsigned int inst, last_inst = 0;
+ struct ppc_inst inst, last_inst = ppc_inst(0);
unsigned char val[4];
dotted = 0;
@@ -2869,8 +2946,8 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
}
break;
}
- inst = GETWORD(val);
- if (adr > first_adr && inst == last_inst) {
+ inst = ppc_inst(GETWORD(val));
+ if (adr > first_adr && ppc_inst_equal(inst, last_inst)) {
if (!dotted) {
printf(" ...\n");
dotted = 1;
@@ -2880,9 +2957,9 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
dotted = 0;
last_inst = inst;
if (praddr)
- printf(REG" %.8x", adr, inst);
+ printf(REG" %.8x", adr, ppc_inst_val(inst));
printf("\t");
- dump_func(inst, adr);
+ dump_func(ppc_inst_val(inst), adr);
printf("\n");
}
return adr - first_adr;
@@ -3107,8 +3184,8 @@ static void show_task(struct task_struct *tsk)
(tsk->exit_state & EXIT_DEAD) ? 'E' :
(tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
- printf("%px %016lx %6d %6d %c %2d %s\n", tsk,
- tsk->thread.ksp,
+ printf("%16px %16lx %16px %6d %6d %c %2d %s\n", tsk,
+ tsk->thread.ksp, tsk->thread.regs,
tsk->pid, rcu_dereference(tsk->parent)->pid,
state, task_cpu(tsk),
tsk->comm);
@@ -3135,7 +3212,8 @@ static void show_pte(unsigned long addr)
unsigned long tskv = 0;
struct task_struct *tsk = NULL;
struct mm_struct *mm;
- pgd_t *pgdp, *pgdir;
+ pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -3159,28 +3237,26 @@ static void show_pte(unsigned long addr)
catch_memory_errors = 1;
sync();
- if (mm == &init_mm) {
+ if (mm == &init_mm)
pgdp = pgd_offset_k(addr);
- pgdir = pgd_offset_k(0);
- } else {
+ else
pgdp = pgd_offset(mm, addr);
- pgdir = pgd_offset(mm, 0);
- }
- if (pgd_none(*pgdp)) {
- printf("no linux page table for address\n");
+ p4dp = p4d_offset(pgdp, addr);
+
+ if (p4d_none(*p4dp)) {
+ printf("No valid P4D\n");
return;
}
- printf("pgd @ 0x%px\n", pgdir);
-
- if (pgd_is_leaf(*pgdp)) {
- format_pte(pgdp, pgd_val(*pgdp));
+ if (p4d_is_leaf(*p4dp)) {
+ format_pte(p4dp, p4d_val(*p4dp));
return;
}
- printf("pgdp @ 0x%px = 0x%016lx\n", pgdp, pgd_val(*pgdp));
- pudp = pud_offset(pgdp, addr);
+ printf("p4dp @ 0x%px = 0x%016lx\n", p4dp, p4d_val(*p4dp));
+
+ pudp = pud_offset(p4dp, addr);
if (pud_none(*pudp)) {
printf("No valid PUD\n");
@@ -3231,7 +3307,7 @@ static void show_tasks(void)
unsigned long tskv;
struct task_struct *tsk = NULL;
- printf(" task_struct ->thread.ksp PID PPID S P CMD\n");
+ printf(" task_struct ->thread.ksp ->thread.regs PID PPID S P CMD\n");
if (scanhex(&tskv))
tsk = (struct task_struct *)tskv;
@@ -3842,7 +3918,7 @@ static void sysrq_handle_xmon(int key)
xmon_init(0);
}
-static struct sysrq_key_op sysrq_xmon_op = {
+static const struct sysrq_key_op sysrq_xmon_op = {
.handler = sysrq_handle_xmon,
.help_msg = "xmon(x)",
.action_msg = "Entering xmon",
@@ -3869,10 +3945,9 @@ static void clear_all_bpt(void)
bpts[i].enabled = 0;
/* Clear any data or iabr breakpoints */
- if (iabr || dabr.enabled) {
- iabr = NULL;
- dabr.enabled = 0;
- }
+ iabr = NULL;
+ for (i = 0; i < nr_wp_slots(); i++)
+ dabr[i].enabled = 0;
}
#ifdef CONFIG_DEBUG_FS
diff --git a/arch/powerpc/xmon/xmon_bpts.S b/arch/powerpc/xmon/xmon_bpts.S
new file mode 100644
index 000000000000..69726814cd27
--- /dev/null
+++ b/arch/powerpc/xmon/xmon_bpts.S
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/ppc_asm.h>
+#include <asm/asm-compat.h>
+#include <asm/asm-offsets.h>
+#include "xmon_bpts.h"
+
+/* Prefixed instructions can not cross 64 byte boundaries */
+.align 6
+.global bpt_table
+bpt_table:
+ .space NBPTS * BPT_SIZE
diff --git a/arch/powerpc/xmon/xmon_bpts.h b/arch/powerpc/xmon/xmon_bpts.h
new file mode 100644
index 000000000000..57e6fb03de48
--- /dev/null
+++ b/arch/powerpc/xmon/xmon_bpts.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef XMON_BPTS_H
+#define XMON_BPTS_H
+
+#define NBPTS 256
+#ifndef __ASSEMBLY__
+#include <asm/inst.h>
+#define BPT_SIZE (sizeof(struct ppc_inst) * 2)
+#define BPT_WORDS (BPT_SIZE / sizeof(struct ppc_inst))
+
+extern unsigned int bpt_table[NBPTS * BPT_WORDS];
+#endif /* __ASSEMBLY__ */
+
+#endif /* XMON_BPTS_H */
diff --git a/arch/riscv/Kbuild b/arch/riscv/Kbuild
index d1d0aa70fdf1..4614c01ba5b3 100644
--- a/arch/riscv/Kbuild
+++ b/arch/riscv/Kbuild
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y += kernel/ mm/ net/
+obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 68418201734a..128192e14ff2 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -12,62 +12,70 @@ config 32BIT
config RISCV
def_bool y
- select OF
- select OF_EARLY_FLATTREE
- select OF_IRQ
+ select ARCH_CLOCKSOURCE_INIT
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_WX
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_MMIOWB
+ select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SET_DIRECT_MAP
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_STRICT_KERNEL_RWX if MMU
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
+ select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select CLONE_BACKWARDS
select COMMON_CLK
+ select EDAC_SUPPORT
+ select GENERIC_ARCH_TOPOLOGY if SMP
+ select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS
+ select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
+ select GENERIC_IOREMAP
+ select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
+ select GENERIC_PTDUMP if MMU
select GENERIC_SCHED_CLOCK
+ select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
- select GENERIC_SMP_IDLE_THREAD
- select GENERIC_ATOMIC64 if !64BIT
- select GENERIC_IOREMAP
- select GENERIC_PTDUMP if MMU
+ select GENERIC_TIME_VSYSCALL if MMU && 64BIT
+ select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_KASAN if MMU && 64BIT
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB_QXFER_PKT
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
select HAVE_ASM_MODVERSIONS
+ select HAVE_COPY_THREAD_TLS
select HAVE_DMA_CONTIGUOUS if MMU
+ select HAVE_EBPF_JIT if MMU
select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_GENERIC_VDSO if MMU && 64BIT
+ select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
- select SPARSE_IRQ
- select SYSCTL_EXCEPTION_TRACE
- select HAVE_ARCH_TRACEHOOK
- select HAVE_PCI
select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES
- select THREAD_INFO_IN_TASK
+ select OF
+ select OF_EARLY_FLATTREE
+ select OF_IRQ
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
+ select RISCV_INTC
select RISCV_TIMER
- select GENERIC_IRQ_MULTI_HANDLER
- select GENERIC_ARCH_TOPOLOGY if SMP
- select ARCH_HAS_PTE_SPECIAL
- select ARCH_HAS_MMIOWB
- select ARCH_HAS_DEBUG_VIRTUAL if MMU
- select HAVE_EBPF_JIT if MMU
- select EDAC_SUPPORT
- select ARCH_HAS_GIGANTIC_PAGE
- select ARCH_HAS_SET_DIRECT_MAP
- select ARCH_HAS_SET_MEMORY
- select ARCH_HAS_STRICT_KERNEL_RWX if MMU
- select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT
- select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
- select HAVE_ARCH_MMAP_RND_BITS if MMU
- select ARCH_HAS_GCOV_PROFILE_ALL
- select HAVE_COPY_THREAD_TLS
- select HAVE_ARCH_KASAN if MMU && 64BIT
+ select SPARSE_IRQ
+ select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
@@ -194,11 +202,11 @@ config ARCH_RV64I
bool "RV64I"
select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
- select HAVE_FUNCTION_TRACER
- select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE if MMU
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
select SWIOTLB if MMU
endchoice
@@ -381,6 +389,11 @@ endchoice
endmenu
+config BUILTIN_DTB
+ def_bool n
+ depends on RISCV_M_MODE
+ depends on OF
+
menu "Power management options"
source "kernel/power/Kconfig"
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index d646332e44f1..6c88148f1b9b 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -24,11 +24,26 @@ config SOC_VIRT
config SOC_KENDRYTE
bool "Kendryte K210 SoC"
depends on !MMU
- select BUILTIN_DTB
select SERIAL_SIFIVE if TTY
select SERIAL_SIFIVE_CONSOLE if TTY
select SIFIVE_PLIC
help
This enables support for Kendryte K210 SoC platform hardware.
+config SOC_KENDRYTE_K210_DTB
+ def_bool y
+ depends on SOC_KENDRYTE_K210_DTB_BUILTIN
+
+config SOC_KENDRYTE_K210_DTB_BUILTIN
+ bool "Builtin device tree for the Kendryte K210"
+ depends on SOC_KENDRYTE
+ default y
+ select OF
+ select BUILTIN_DTB
+ select SOC_KENDRYTE_K210_DTB
+ help
+ Builds a device tree for the Kendryte K210 into the Linux image.
+ This option should be selected if no bootloader is being used.
+ If unsure, say Y.
+
endmenu
diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile
index 557f0b519c8e..ca1f8cbd78c0 100644
--- a/arch/riscv/boot/dts/Makefile
+++ b/arch/riscv/boot/dts/Makefile
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
subdir-y += sifive
subdir-y += kendryte
+
+obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y))
diff --git a/arch/riscv/boot/dts/kendryte/Makefile b/arch/riscv/boot/dts/kendryte/Makefile
index 815444e69e89..1a88e616f18e 100644
--- a/arch/riscv/boot/dts/kendryte/Makefile
+++ b/arch/riscv/boot/dts/kendryte/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_SOC_KENDRYTE) += k210.dtb
+dtb-$(CONFIG_SOC_KENDRYTE_K210_DTB) += k210.dtb
+
+obj-$(CONFIG_SOC_KENDRYTE_K210_DTB_BUILTIN) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index 632aa2f95e57..b48138e329ea 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -2,14 +2,12 @@
CONFIG_LOG_BUF_SHIFT=15
CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
CONFIG_INITRAMFS_FORCE=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
-# CONFIG_BOOT_CONFIG is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_SYSFS_SYSCALL is not set
# CONFIG_FHANDLE is not set
@@ -35,8 +33,6 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_CMDLINE="earlycon console=ttySIF0"
CONFIG_CMDLINE_FORCE=y
-CONFIG_USE_BUILTIN_DTB=y
-CONFIG_BUILTIN_DTB_SOURCE="kendryte/k210"
# CONFIG_BLOCK is not set
CONFIG_BINFMT_FLAT=y
# CONFIG_COREDUMP is not set
@@ -49,8 +45,8 @@ CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_LDISC_AUTOLOAD is not set
-# CONFIG_DEVMEM is not set
# CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
@@ -62,6 +58,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_LSM="[]"
CONFIG_PRINTK_TIME=y
# CONFIG_DEBUG_MISC is not set
+CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index c8677c75f82c..23ff70350992 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -8,65 +8,6 @@
#include <linux/mm.h>
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-
-/*
- * The cache doesn't need to be flushed when TLB entries change when
- * the cache is mapped to physical memory, not virtual memory
- */
-static inline void flush_cache_all(void)
-{
-}
-
-static inline void flush_cache_mm(struct mm_struct *mm)
-{
-}
-
-static inline void flush_cache_dup_mm(struct mm_struct *mm)
-{
-}
-
-static inline void flush_cache_range(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end)
-{
-}
-
-static inline void flush_cache_page(struct vm_area_struct *vma,
- unsigned long vmaddr,
- unsigned long pfn)
-{
-}
-
-static inline void flush_dcache_mmap_lock(struct address_space *mapping)
-{
-}
-
-static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
-{
-}
-
-static inline void flush_icache_page(struct vm_area_struct *vma,
- struct page *page)
-{
-}
-
-static inline void flush_cache_vmap(unsigned long start, unsigned long end)
-{
-}
-
-static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
-{
-}
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
- } while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
static inline void local_flush_icache_all(void)
{
asm volatile ("fence.i" ::: "memory");
@@ -79,13 +20,15 @@ static inline void flush_dcache_page(struct page *page)
if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
}
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
/*
* RISC-V doesn't have an instruction to flush parts of the instruction cache,
* so instead we just flush the whole thing.
*/
#define flush_icache_range(start, end) flush_icache_all()
-#define flush_icache_user_range(vma, pg, addr, len) flush_icache_mm(vma->vm_mm, 0)
+#define flush_icache_user_page(vma, pg, addr, len) \
+ flush_icache_mm(vma->vm_mm, 0)
#ifndef CONFIG_SMP
@@ -105,4 +48,6 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
#define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL)
+#include <asm-generic/cacheflush.h>
+
#endif /* _ASM_RISCV_CACHEFLUSH_H */
diff --git a/arch/riscv/include/asm/cacheinfo.h b/arch/riscv/include/asm/cacheinfo.h
new file mode 100644
index 000000000000..5d9662e9aba8
--- /dev/null
+++ b/arch/riscv/include/asm/cacheinfo.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_CACHEINFO_H
+#define _ASM_RISCV_CACHEINFO_H
+
+#include <linux/cacheinfo.h>
+
+struct riscv_cacheinfo_ops {
+ const struct attribute_group * (*get_priv_group)(struct cacheinfo
+ *this_leaf);
+};
+
+void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops);
+
+#endif /* _ASM_RISCV_CACHEINFO_H */
diff --git a/arch/riscv/include/asm/clocksource.h b/arch/riscv/include/asm/clocksource.h
new file mode 100644
index 000000000000..482185566b0c
--- /dev/null
+++ b/arch/riscv/include/asm/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_CLOCKSOURCE_H
+#define _ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 2368d49eb4ef..1ff075a8dfc7 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -8,8 +8,8 @@
#include <linux/kernel.h>
#include <linux/sizes.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_MMU
/*
diff --git a/arch/riscv/include/asm/gdb_xml.h b/arch/riscv/include/asm/gdb_xml.h
new file mode 100644
index 000000000000..041b45f5b997
--- /dev/null
+++ b/arch/riscv/include/asm/gdb_xml.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GDB_XML_H_
+#define __ASM_GDB_XML_H_
+
+#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
+static const char riscv_gdb_stub_feature[64] =
+ "PacketSize=800;qXfer:features:read+;";
+
+static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:";
+
+#ifdef CONFIG_64BIT
+static const char gdb_xfer_read_cpuxml[39] =
+ "qXfer:features:read:riscv-64bit-cpu.xml";
+
+static const char riscv_gdb_stub_target_desc[256] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
+"<target>"
+"<xi:include href=\"riscv-64bit-cpu.xml\"/>"
+"</target>";
+
+static const char riscv_gdb_stub_cpuxml[2048] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"
+"<feature name=\"org.gnu.gdb.riscv.cpu\">"
+"<reg name=\""DBG_REG_ZERO"\" bitsize=\"64\" type=\"int\" regnum=\"0\"/>"
+"<reg name=\""DBG_REG_RA"\" bitsize=\"64\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_SP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_GP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_TP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_T0"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_FP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_S1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A0"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A7"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S7"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S8"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S9"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S10"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S11"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_EPC"\" bitsize=\"64\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_STATUS"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"64\" type=\"int\"/>"
+"</feature>";
+#else
+static const char gdb_xfer_read_cpuxml[39] =
+ "qXfer:features:read:riscv-32bit-cpu.xml";
+
+static const char riscv_gdb_stub_target_desc[256] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
+"<target>"
+"<xi:include href=\"riscv-32bit-cpu.xml\"/>"
+"</target>";
+
+static const char riscv_gdb_stub_cpuxml[2048] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"
+"<feature name=\"org.gnu.gdb.riscv.cpu\">"
+"<reg name=\""DBG_REG_ZERO"\" bitsize=\"32\" type=\"int\" regnum=\"0\"/>"
+"<reg name=\""DBG_REG_RA"\" bitsize=\"32\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_SP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_GP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_TP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_T0"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_FP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_S1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A0"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A7"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S7"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S8"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S9"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S10"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S11"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_EPC"\" bitsize=\"32\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_STATUS"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"32\" type=\"int\"/>"
+"</feature>";
+#endif
+#endif
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 0f477206a4ed..3835c3295dc5 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -12,8 +12,8 @@
#define _ASM_RISCV_IO_H
#include <linux/types.h>
+#include <linux/pgtable.h>
#include <asm/mmiowb.h>
-#include <asm/pgtable.h>
/*
* MMIO access functions are separated out to break dependency cycles
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 6e1b0e0325eb..9807ad164015 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -10,11 +10,6 @@
#include <linux/interrupt.h>
#include <linux/linkage.h>
-#define NR_IRQS 0
-
-void riscv_timer_interrupt(void);
-void riscv_software_interrupt(void);
-
#include <asm-generic/irq.h>
#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h
index b47045cb85ce..b04028c6218c 100644
--- a/arch/riscv/include/asm/kasan.h
+++ b/arch/riscv/include/asm/kasan.h
@@ -8,8 +8,6 @@
#ifdef CONFIG_KASAN
-#include <asm/pgtable.h>
-
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_SIZE (UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT))
diff --git a/arch/riscv/include/asm/kdebug.h b/arch/riscv/include/asm/kdebug.h
new file mode 100644
index 000000000000..85ac00411f6e
--- /dev/null
+++ b/arch/riscv/include/asm/kdebug.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_TRAP,
+ DIE_OOPS
+};
+
+#endif
diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h
new file mode 100644
index 000000000000..8177a457caff
--- /dev/null
+++ b/arch/riscv/include/asm/kgdb.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_KGDB_H_
+#define __ASM_KGDB_H_
+
+#ifdef __KERNEL__
+
+#define GDB_SIZEOF_REG sizeof(unsigned long)
+
+#define DBG_MAX_REG_NUM (36)
+#define NUMREGBYTES ((DBG_MAX_REG_NUM) * GDB_SIZEOF_REG)
+#define CACHE_FLUSH_IS_SAFE 1
+#define BUFMAX 2048
+#ifdef CONFIG_RISCV_ISA_C
+#define BREAK_INSTR_SIZE 2
+#else
+#define BREAK_INSTR_SIZE 4
+#endif
+
+#ifndef __ASSEMBLY__
+
+extern int kgdb_has_hit_break(unsigned long addr);
+extern unsigned long kgdb_compiled_break;
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm(".global kgdb_compiled_break\n"
+ ".option norvc\n"
+ "kgdb_compiled_break: ebreak\n"
+ ".option rvc\n");
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define DBG_REG_ZERO "zero"
+#define DBG_REG_RA "ra"
+#define DBG_REG_SP "sp"
+#define DBG_REG_GP "gp"
+#define DBG_REG_TP "tp"
+#define DBG_REG_T0 "t0"
+#define DBG_REG_T1 "t1"
+#define DBG_REG_T2 "t2"
+#define DBG_REG_FP "fp"
+#define DBG_REG_S1 "s1"
+#define DBG_REG_A0 "a0"
+#define DBG_REG_A1 "a1"
+#define DBG_REG_A2 "a2"
+#define DBG_REG_A3 "a3"
+#define DBG_REG_A4 "a4"
+#define DBG_REG_A5 "a5"
+#define DBG_REG_A6 "a6"
+#define DBG_REG_A7 "a7"
+#define DBG_REG_S2 "s2"
+#define DBG_REG_S3 "s3"
+#define DBG_REG_S4 "s4"
+#define DBG_REG_S5 "s5"
+#define DBG_REG_S6 "s6"
+#define DBG_REG_S7 "s7"
+#define DBG_REG_S8 "s8"
+#define DBG_REG_S9 "s9"
+#define DBG_REG_S10 "s10"
+#define DBG_REG_S11 "s11"
+#define DBG_REG_T3 "t3"
+#define DBG_REG_T4 "t4"
+#define DBG_REG_T5 "t5"
+#define DBG_REG_T6 "t6"
+#define DBG_REG_EPC "pc"
+#define DBG_REG_STATUS "sstatus"
+#define DBG_REG_BADADDR "stval"
+#define DBG_REG_CAUSE "scause"
+
+#define DBG_REG_ZERO_OFF 0
+#define DBG_REG_RA_OFF 1
+#define DBG_REG_SP_OFF 2
+#define DBG_REG_GP_OFF 3
+#define DBG_REG_TP_OFF 4
+#define DBG_REG_T0_OFF 5
+#define DBG_REG_T1_OFF 6
+#define DBG_REG_T2_OFF 7
+#define DBG_REG_FP_OFF 8
+#define DBG_REG_S1_OFF 9
+#define DBG_REG_A0_OFF 10
+#define DBG_REG_A1_OFF 11
+#define DBG_REG_A2_OFF 12
+#define DBG_REG_A3_OFF 13
+#define DBG_REG_A4_OFF 14
+#define DBG_REG_A5_OFF 15
+#define DBG_REG_A6_OFF 16
+#define DBG_REG_A7_OFF 17
+#define DBG_REG_S2_OFF 18
+#define DBG_REG_S3_OFF 19
+#define DBG_REG_S4_OFF 20
+#define DBG_REG_S5_OFF 21
+#define DBG_REG_S6_OFF 22
+#define DBG_REG_S7_OFF 23
+#define DBG_REG_S8_OFF 24
+#define DBG_REG_S9_OFF 25
+#define DBG_REG_S10_OFF 26
+#define DBG_REG_S11_OFF 27
+#define DBG_REG_T3_OFF 28
+#define DBG_REG_T4_OFF 29
+#define DBG_REG_T5_OFF 30
+#define DBG_REG_T6_OFF 31
+#define DBG_REG_EPC_OFF 32
+#define DBG_REG_STATUS_OFF 33
+#define DBG_REG_BADADDR_OFF 34
+#define DBG_REG_CAUSE_OFF 35
+
+#include <asm/gdb_xml.h>
+
+#endif
+#endif
diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h
new file mode 100644
index 000000000000..f36368de839f
--- /dev/null
+++ b/arch/riscv/include/asm/parse_asm.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/bits.h>
+
+/* The bit field of immediate value in I-type instruction */
+#define I_IMM_SIGN_OPOFF 31
+#define I_IMM_11_0_OPOFF 20
+#define I_IMM_SIGN_OFF 12
+#define I_IMM_11_0_OFF 0
+#define I_IMM_11_0_MASK GENMASK(11, 0)
+
+/* The bit field of immediate value in J-type instruction */
+#define J_IMM_SIGN_OPOFF 31
+#define J_IMM_10_1_OPOFF 21
+#define J_IMM_11_OPOFF 20
+#define J_IMM_19_12_OPOFF 12
+#define J_IMM_SIGN_OFF 20
+#define J_IMM_10_1_OFF 1
+#define J_IMM_11_OFF 11
+#define J_IMM_19_12_OFF 12
+#define J_IMM_10_1_MASK GENMASK(9, 0)
+#define J_IMM_11_MASK GENMASK(0, 0)
+#define J_IMM_19_12_MASK GENMASK(7, 0)
+
+/* The bit field of immediate value in B-type instruction */
+#define B_IMM_SIGN_OPOFF 31
+#define B_IMM_10_5_OPOFF 25
+#define B_IMM_4_1_OPOFF 8
+#define B_IMM_11_OPOFF 7
+#define B_IMM_SIGN_OFF 12
+#define B_IMM_10_5_OFF 5
+#define B_IMM_4_1_OFF 1
+#define B_IMM_11_OFF 11
+#define B_IMM_10_5_MASK GENMASK(5, 0)
+#define B_IMM_4_1_MASK GENMASK(3, 0)
+#define B_IMM_11_MASK GENMASK(0, 0)
+
+/* The register offset in RVG instruction */
+#define RVG_RS1_OPOFF 15
+#define RVG_RS2_OPOFF 20
+#define RVG_RD_OPOFF 7
+
+/* The bit field of immediate value in RVC J instruction */
+#define RVC_J_IMM_SIGN_OPOFF 12
+#define RVC_J_IMM_4_OPOFF 11
+#define RVC_J_IMM_9_8_OPOFF 9
+#define RVC_J_IMM_10_OPOFF 8
+#define RVC_J_IMM_6_OPOFF 7
+#define RVC_J_IMM_7_OPOFF 6
+#define RVC_J_IMM_3_1_OPOFF 3
+#define RVC_J_IMM_5_OPOFF 2
+#define RVC_J_IMM_SIGN_OFF 11
+#define RVC_J_IMM_4_OFF 4
+#define RVC_J_IMM_9_8_OFF 8
+#define RVC_J_IMM_10_OFF 10
+#define RVC_J_IMM_6_OFF 6
+#define RVC_J_IMM_7_OFF 7
+#define RVC_J_IMM_3_1_OFF 1
+#define RVC_J_IMM_5_OFF 5
+#define RVC_J_IMM_4_MASK GENMASK(0, 0)
+#define RVC_J_IMM_9_8_MASK GENMASK(1, 0)
+#define RVC_J_IMM_10_MASK GENMASK(0, 0)
+#define RVC_J_IMM_6_MASK GENMASK(0, 0)
+#define RVC_J_IMM_7_MASK GENMASK(0, 0)
+#define RVC_J_IMM_3_1_MASK GENMASK(2, 0)
+#define RVC_J_IMM_5_MASK GENMASK(0, 0)
+
+/* The bit field of immediate value in RVC B instruction */
+#define RVC_B_IMM_SIGN_OPOFF 12
+#define RVC_B_IMM_4_3_OPOFF 10
+#define RVC_B_IMM_7_6_OPOFF 5
+#define RVC_B_IMM_2_1_OPOFF 3
+#define RVC_B_IMM_5_OPOFF 2
+#define RVC_B_IMM_SIGN_OFF 8
+#define RVC_B_IMM_4_3_OFF 3
+#define RVC_B_IMM_7_6_OFF 6
+#define RVC_B_IMM_2_1_OFF 1
+#define RVC_B_IMM_5_OFF 5
+#define RVC_B_IMM_4_3_MASK GENMASK(1, 0)
+#define RVC_B_IMM_7_6_MASK GENMASK(1, 0)
+#define RVC_B_IMM_2_1_MASK GENMASK(1, 0)
+#define RVC_B_IMM_5_MASK GENMASK(0, 0)
+
+/* The register offset in RVC op=C0 instruction */
+#define RVC_C0_RS1_OPOFF 7
+#define RVC_C0_RS2_OPOFF 2
+#define RVC_C0_RD_OPOFF 2
+
+/* The register offset in RVC op=C1 instruction */
+#define RVC_C1_RS1_OPOFF 7
+#define RVC_C1_RS2_OPOFF 2
+#define RVC_C1_RD_OPOFF 7
+
+/* The register offset in RVC op=C2 instruction */
+#define RVC_C2_RS1_OPOFF 7
+#define RVC_C2_RS2_OPOFF 2
+#define RVC_C2_RD_OPOFF 7
+
+/* parts of opcode for RVG*/
+#define OPCODE_BRANCH 0x63
+#define OPCODE_JALR 0x67
+#define OPCODE_JAL 0x6f
+#define OPCODE_SYSTEM 0x73
+
+/* parts of opcode for RVC*/
+#define OPCODE_C_0 0x0
+#define OPCODE_C_1 0x1
+#define OPCODE_C_2 0x2
+
+/* parts of funct3 code for I, M, A extension*/
+#define FUNCT3_JALR 0x0
+#define FUNCT3_BEQ 0x0
+#define FUNCT3_BNE 0x1000
+#define FUNCT3_BLT 0x4000
+#define FUNCT3_BGE 0x5000
+#define FUNCT3_BLTU 0x6000
+#define FUNCT3_BGEU 0x7000
+
+/* parts of funct3 code for C extension*/
+#define FUNCT3_C_BEQZ 0xc000
+#define FUNCT3_C_BNEZ 0xe000
+#define FUNCT3_C_J 0xa000
+#define FUNCT3_C_JAL 0x2000
+#define FUNCT4_C_JR 0x8000
+#define FUNCT4_C_JALR 0xf000
+
+#define FUNCT12_SRET 0x10200000
+
+#define MATCH_JALR (FUNCT3_JALR | OPCODE_JALR)
+#define MATCH_JAL (OPCODE_JAL)
+#define MATCH_BEQ (FUNCT3_BEQ | OPCODE_BRANCH)
+#define MATCH_BNE (FUNCT3_BNE | OPCODE_BRANCH)
+#define MATCH_BLT (FUNCT3_BLT | OPCODE_BRANCH)
+#define MATCH_BGE (FUNCT3_BGE | OPCODE_BRANCH)
+#define MATCH_BLTU (FUNCT3_BLTU | OPCODE_BRANCH)
+#define MATCH_BGEU (FUNCT3_BGEU | OPCODE_BRANCH)
+#define MATCH_SRET (FUNCT12_SRET | OPCODE_SYSTEM)
+#define MATCH_C_BEQZ (FUNCT3_C_BEQZ | OPCODE_C_1)
+#define MATCH_C_BNEZ (FUNCT3_C_BNEZ | OPCODE_C_1)
+#define MATCH_C_J (FUNCT3_C_J | OPCODE_C_1)
+#define MATCH_C_JAL (FUNCT3_C_JAL | OPCODE_C_1)
+#define MATCH_C_JR (FUNCT4_C_JR | OPCODE_C_2)
+#define MATCH_C_JALR (FUNCT4_C_JALR | OPCODE_C_2)
+
+#define MASK_JALR 0x707f
+#define MASK_JAL 0x7f
+#define MASK_C_JALR 0xf07f
+#define MASK_C_JR 0xf07f
+#define MASK_C_JAL 0xe003
+#define MASK_C_J 0xe003
+#define MASK_BEQ 0x707f
+#define MASK_BNE 0x707f
+#define MASK_BLT 0x707f
+#define MASK_BGE 0x707f
+#define MASK_BLTU 0x707f
+#define MASK_BGEU 0x707f
+#define MASK_C_BEQZ 0xe003
+#define MASK_C_BNEZ 0xe003
+#define MASK_SRET 0xffffffff
+
+#define __INSN_LENGTH_MASK _UL(0x3)
+#define __INSN_LENGTH_GE_32 _UL(0x3)
+#define __INSN_OPCODE_MASK _UL(0x7F)
+#define __INSN_BRANCH_OPCODE _UL(OPCODE_BRANCH)
+
+/* Define a series of is_XXX_insn functions to check if the value INSN
+ * is an instance of instruction XXX.
+ */
+#define DECLARE_INSN(INSN_NAME, INSN_MATCH, INSN_MASK) \
+static inline bool is_ ## INSN_NAME ## _insn(long insn) \
+{ \
+ return (insn & (INSN_MASK)) == (INSN_MATCH); \
+}
+
+#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
+#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1))
+#define RV_X(X, s, mask) (((X) >> (s)) & (mask))
+#define RVC_X(X, s, mask) RV_X(X, s, mask)
+
+#define EXTRACT_JTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, J_IMM_10_1_OPOFF, J_IMM_10_1_MASK) << J_IMM_10_1_OFF) | \
+ (RV_X(x_, J_IMM_11_OPOFF, J_IMM_11_MASK) << J_IMM_11_OFF) | \
+ (RV_X(x_, J_IMM_19_12_OPOFF, J_IMM_19_12_MASK) << J_IMM_19_12_OFF) | \
+ (RV_IMM_SIGN(x_) << J_IMM_SIGN_OFF); })
+
+#define EXTRACT_ITYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, I_IMM_11_0_OPOFF, I_IMM_11_0_MASK)) | \
+ (RV_IMM_SIGN(x_) << I_IMM_SIGN_OFF); })
+
+#define EXTRACT_BTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, B_IMM_4_1_OPOFF, B_IMM_4_1_MASK) << B_IMM_4_1_OFF) | \
+ (RV_X(x_, B_IMM_10_5_OPOFF, B_IMM_10_5_MASK) << B_IMM_10_5_OFF) | \
+ (RV_X(x_, B_IMM_11_OPOFF, B_IMM_11_MASK) << B_IMM_11_OFF) | \
+ (RV_IMM_SIGN(x_) << B_IMM_SIGN_OFF); })
+
+#define EXTRACT_RVC_J_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); })
+
+#define EXTRACT_RVC_B_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
index b5918a6e0615..9a7d7346001e 100644
--- a/arch/riscv/include/asm/patch.h
+++ b/arch/riscv/include/asm/patch.h
@@ -6,7 +6,7 @@
#ifndef _ASM_RISCV_PATCH_H
#define _ASM_RISCV_PATCH_H
-int riscv_patch_text_nosync(void *addr, const void *insns, size_t len);
-int riscv_patch_text(void *addr, u32 insn);
+int patch_text_nosync(void *addr, const void *insns, size_t len);
+int patch_text(void *addr, u32 insn);
#endif /* _ASM_RISCV_PATCH_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index b15f70a1fdfa..f3b0da64c6c8 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -70,13 +70,6 @@ static inline struct page *pud_page(pud_t pud)
return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
}
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
-
static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
{
return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index d50706ea1c94..eaea1f717010 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -173,16 +173,6 @@ static inline unsigned long _pgd_pfn(pgd_t pgd)
return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
}
-#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-
-/* Locate an entry in the page global directory */
-static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr)
-{
- return mm->pgd + pgd_index(addr);
-}
-/* Locate an entry in the kernel page global directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, (addr))
-
static inline struct page *pmd_page(pmd_t pmd)
{
return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
@@ -209,16 +199,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
-{
- return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr);
-}
-
-#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) ((void)(pte))
-
static inline int pte_present(pte_t pte)
{
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
@@ -496,8 +476,6 @@ void paging_init(void);
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-#include <asm-generic/pgtable.h>
-
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_RISCV_PGTABLE_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 3ddb798264f1..bdddcd5c1b71 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -8,6 +8,8 @@
#include <linux/const.h>
+#include <vdso/processor.h>
+
#include <asm/ptrace.h>
/*
@@ -58,16 +60,6 @@ static inline void release_thread(struct task_struct *dead_task)
extern unsigned long get_wchan(struct task_struct *p);
-static inline void cpu_relax(void)
-{
-#ifdef __riscv_muldiv
- int dummy;
- /* In lieu of a halt instruction, induce a long-latency stall. */
- __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
-#endif
- barrier();
-}
-
static inline void wait_for_interrupt(void)
{
__asm__ __volatile__ ("wfi");
@@ -75,6 +67,7 @@ static inline void wait_for_interrupt(void)
struct device_node;
int riscv_of_processor_hartid(struct device_node *node);
+int riscv_of_parent_hartid(struct device_node *node);
extern void riscv_fill_hwcap(void);
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index f4c7cfda6b7f..40bb1c15a731 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -28,6 +28,9 @@ void show_ipi_stats(struct seq_file *p, int prec);
/* SMP initialization hook for setup_arch */
void __init setup_smp(void);
+/* Called from C code, this handles an IPI. */
+void handle_IPI(struct pt_regs *regs);
+
/* Hook for the generic smp_call_function_many() routine. */
void arch_send_call_function_ipi_mask(struct cpumask *mask);
diff --git a/arch/riscv/include/asm/soc.h b/arch/riscv/include/asm/soc.h
index 7cec1968c8b4..136a442ef876 100644
--- a/arch/riscv/include/asm/soc.h
+++ b/arch/riscv/include/asm/soc.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2020 Google, Inc
*/
#ifndef _ASM_RISCV_SOC_H
@@ -20,4 +21,42 @@ void soc_early_init(void);
extern unsigned long __soc_early_init_table_start;
extern unsigned long __soc_early_init_table_end;
+/*
+ * Allows Linux to provide a device tree, which is necessary for SOCs that
+ * don't provide a useful one on their own.
+ */
+struct soc_builtin_dtb {
+ unsigned long vendor_id;
+ unsigned long arch_id;
+ unsigned long imp_id;
+ void *(*dtb_func)(void);
+};
+
+/*
+ * The argument name must specify a valid DTS file name without the dts
+ * extension.
+ */
+#define SOC_BUILTIN_DTB_DECLARE(name, vendor, arch, impl) \
+ extern void *__dtb_##name##_begin; \
+ \
+ static __init __used \
+ void *__soc_builtin_dtb_f__##name(void) \
+ { \
+ return (void *)&__dtb_##name##_begin; \
+ } \
+ \
+ static const struct soc_builtin_dtb __soc_builtin_dtb__##name \
+ __used __section(__soc_builtin_dtb_table) = \
+ { \
+ .vendor_id = vendor, \
+ .arch_id = arch, \
+ .imp_id = impl, \
+ .dtb_func = __soc_builtin_dtb_f__##name, \
+ }
+
+extern unsigned long __soc_builtin_dtb_table_start;
+extern unsigned long __soc_builtin_dtb_table_end;
+
+void *soc_lookup_builtin_dtb(void);
+
#endif
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index 7a7fce63c474..8454f746bbfd 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -10,8 +10,10 @@
#include <linux/types.h>
+#ifndef GENERIC_TIME_VSYSCALL
struct vdso_data {
};
+#endif
/*
* The VDSO symbols are mapped into Linux so we can just use regular symbol
diff --git a/arch/riscv/include/asm/vdso/clocksource.h b/arch/riscv/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..df6ea65c1dec
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif
diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..c8e818688ec1
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/csr.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register struct timezone *tz asm("a1") = _tz;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_gettimeofday;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(tv), "r"(tz), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_gettime;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_getres;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
+{
+ /*
+ * The purpose of csr_read(CSR_TIME) is to trap the system into
+ * M-mode to obtain the value of CSR_TIME. Hence, unlike other
+ * architecture, no fence instructions surround the csr_read()
+ */
+ return csr_read(CSR_TIME);
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return _vdso_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..82a5693b1861
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+static inline void cpu_relax(void)
+{
+#ifdef __riscv_muldiv
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+#endif
+ barrier();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..82fd5d83bd60
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/vsyscall.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+
+#define __arch_get_k_vdso_data __riscv_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index d8bbd3207100..b355cf485671 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -51,5 +51,6 @@ ifeq ($(CONFIG_RISCV_SBI), y)
obj-$(CONFIG_SMP) += cpu_ops_sbi.o
endif
obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
+obj-$(CONFIG_KGDB) += kgdb.o
clean:
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index 4c90c07d8c39..bd0f122965c3 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -7,6 +7,23 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <asm/cacheinfo.h>
+
+static struct riscv_cacheinfo_ops *rv_cache_ops;
+
+void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops)
+{
+ rv_cache_ops = ops;
+}
+EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops);
+
+const struct attribute_group *
+cache_get_priv_group(struct cacheinfo *this_leaf)
+{
+ if (rv_cache_ops && rv_cache_ops->get_priv_group)
+ return rv_cache_ops->get_priv_group(this_leaf);
+ return NULL;
+}
static void ci_leaf_init(struct cacheinfo *this_leaf,
struct device_node *node,
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 40a3c442ac5f..6d59e6906fdd 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -44,6 +44,22 @@ int riscv_of_processor_hartid(struct device_node *node)
return hart;
}
+/*
+ * Find hart ID of the CPU DT node under which given DT node falls.
+ *
+ * To achieve this, we walk up the DT tree until we find an active
+ * RISC-V core (HART) node and extract the cpuid from it.
+ */
+int riscv_of_parent_hartid(struct device_node *node)
+{
+ for (; node; node = node->parent) {
+ if (of_device_is_compatible(node, "riscv"))
+ return riscv_of_processor_hartid(node);
+ }
+
+ return -1;
+}
+
#ifdef CONFIG_PROC_FS
static void print_isa(struct seq_file *f, const char *isa)
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 56d071b2c0a1..cae7e6d4c7ef 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -106,7 +106,9 @@ _save_context:
/* Handle interrupts */
move a0, sp /* pt_regs */
- tail do_IRQ
+ la a1, handle_arch_irq
+ REG_L a1, (a1)
+ jr a1
1:
/*
* Exceptions run with interrupts enabled or disabled depending on the
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index ce69b34ff55d..08396614d6f4 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -7,10 +7,23 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
+#include <linux/memory.h>
#include <asm/cacheflush.h>
#include <asm/patch.h>
#ifdef CONFIG_DYNAMIC_FTRACE
+int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
+{
+ mutex_lock(&text_mutex);
+ return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
+{
+ mutex_unlock(&text_mutex);
+ return 0;
+}
+
static int ftrace_check_current_call(unsigned long hook_pos,
unsigned int *expected)
{
@@ -51,7 +64,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
make_call(hook_pos, target, call);
/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
- if (riscv_patch_text_nosync
+ if (patch_text_nosync
((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
return -EPERM;
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 98a406474e7d..7ed1b22950fd 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -161,11 +161,20 @@ ENTRY(_start_kernel)
/* Reset all registers except ra, a0, a1 */
call reset_regs
- /* Setup a PMP to permit access to all of memory. */
+ /*
+ * Setup a PMP to permit access to all of memory. Some machines may
+ * not implement PMPs, so we set up a quick trap handler to just skip
+ * touching the PMPs on any trap.
+ */
+ la a0, pmp_done
+ csrw CSR_TVEC, a0
+
li a0, -1
csrw CSR_PMPADDR0, a0
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0
+.align 2
+pmp_done:
/*
* The hartid in a0 is expected later on, and we have no firmware
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 345c4f2eba13..7207fa08d78f 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -7,7 +7,6 @@
#include <linux/interrupt.h>
#include <linux/irqchip.h>
-#include <linux/irqdomain.h>
#include <linux/seq_file.h>
#include <asm/smp.h>
@@ -17,37 +16,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0;
}
-asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- irq_enter();
- switch (regs->cause & ~CAUSE_IRQ_FLAG) {
- case RV_IRQ_TIMER:
- riscv_timer_interrupt();
- break;
-#ifdef CONFIG_SMP
- case RV_IRQ_SOFT:
- /*
- * We only use software interrupts to pass IPIs, so if a non-SMP
- * system gets one, then we don't know what to do.
- */
- riscv_software_interrupt();
- break;
-#endif
- case RV_IRQ_EXT:
- handle_arch_irq(regs);
- break;
- default:
- pr_alert("unexpected interrupt cause 0x%lx", regs->cause);
- BUG();
- }
- irq_exit();
-
- set_irq_regs(old_regs);
-}
-
void __init init_IRQ(void)
{
irqchip_init();
+ if (!handle_arch_irq)
+ panic("No interrupt controller found.");
}
diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c
new file mode 100644
index 000000000000..f16ade84a11f
--- /dev/null
+++ b/arch/riscv/kernel/kgdb.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/ptrace.h>
+#include <linux/kdebug.h>
+#include <linux/bug.h>
+#include <linux/kgdb.h>
+#include <linux/irqflags.h>
+#include <linux/string.h>
+#include <asm/cacheflush.h>
+#include <asm/gdb_xml.h>
+#include <asm/parse_asm.h>
+
+enum {
+ NOT_KGDB_BREAK = 0,
+ KGDB_SW_BREAK,
+ KGDB_COMPILED_BREAK,
+ KGDB_SW_SINGLE_STEP
+};
+
+static unsigned long stepped_address;
+static unsigned int stepped_opcode;
+
+#if __riscv_xlen == 32
+/* C.JAL is an RV32C-only instruction */
+DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
+#else
+#define is_c_jal_insn(opcode) 0
+#endif
+DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
+DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
+DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
+DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
+DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
+DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
+DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
+DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
+DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
+DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
+DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
+DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
+DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
+DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
+
+int decode_register_index(unsigned long opcode, int offset)
+{
+ return (opcode >> offset) & 0x1F;
+}
+
+int decode_register_index_short(unsigned long opcode, int offset)
+{
+ return ((opcode >> offset) & 0x7) + 8;
+}
+
+/* Calculate the new address for after a step */
+int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
+{
+ unsigned long pc = regs->epc;
+ unsigned long *regs_ptr = (unsigned long *)regs;
+ unsigned int rs1_num, rs2_num;
+ int op_code;
+
+ if (probe_kernel_address((void *)pc, op_code))
+ return -EINVAL;
+ if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) {
+ if (is_c_jalr_insn(op_code) || is_c_jr_insn(op_code)) {
+ rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF);
+ *next_addr = regs_ptr[rs1_num];
+ } else if (is_c_j_insn(op_code) || is_c_jal_insn(op_code)) {
+ *next_addr = EXTRACT_RVC_J_IMM(op_code) + pc;
+ } else if (is_c_beqz_insn(op_code)) {
+ rs1_num = decode_register_index_short(op_code,
+ RVC_C1_RS1_OPOFF);
+ if (!rs1_num || regs_ptr[rs1_num] == 0)
+ *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc;
+ else
+ *next_addr = pc + 2;
+ } else if (is_c_bnez_insn(op_code)) {
+ rs1_num =
+ decode_register_index_short(op_code, RVC_C1_RS1_OPOFF);
+ if (rs1_num && regs_ptr[rs1_num] != 0)
+ *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc;
+ else
+ *next_addr = pc + 2;
+ } else {
+ *next_addr = pc + 2;
+ }
+ } else {
+ if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) {
+ bool result = false;
+ long imm = EXTRACT_BTYPE_IMM(op_code);
+ unsigned long rs1_val = 0, rs2_val = 0;
+
+ rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
+ rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF);
+ if (rs1_num)
+ rs1_val = regs_ptr[rs1_num];
+ if (rs2_num)
+ rs2_val = regs_ptr[rs2_num];
+
+ if (is_beq_insn(op_code))
+ result = (rs1_val == rs2_val) ? true : false;
+ else if (is_bne_insn(op_code))
+ result = (rs1_val != rs2_val) ? true : false;
+ else if (is_blt_insn(op_code))
+ result =
+ ((long)rs1_val <
+ (long)rs2_val) ? true : false;
+ else if (is_bge_insn(op_code))
+ result =
+ ((long)rs1_val >=
+ (long)rs2_val) ? true : false;
+ else if (is_bltu_insn(op_code))
+ result = (rs1_val < rs2_val) ? true : false;
+ else if (is_bgeu_insn(op_code))
+ result = (rs1_val >= rs2_val) ? true : false;
+ if (result)
+ *next_addr = imm + pc;
+ else
+ *next_addr = pc + 4;
+ } else if (is_jal_insn(op_code)) {
+ *next_addr = EXTRACT_JTYPE_IMM(op_code) + pc;
+ } else if (is_jalr_insn(op_code)) {
+ rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
+ if (rs1_num)
+ *next_addr = ((unsigned long *)regs)[rs1_num];
+ *next_addr += EXTRACT_ITYPE_IMM(op_code);
+ } else if (is_sret_insn(op_code)) {
+ *next_addr = pc;
+ } else {
+ *next_addr = pc + 4;
+ }
+ }
+ return 0;
+}
+
+int do_single_step(struct pt_regs *regs)
+{
+ /* Determine where the target instruction will send us to */
+ unsigned long addr = 0;
+ int error = get_step_address(regs, &addr);
+
+ if (error)
+ return error;
+
+ /* Store the op code in the stepped address */
+ error = probe_kernel_address((void *)addr, stepped_opcode);
+ if (error)
+ return error;
+
+ stepped_address = addr;
+
+ /* Replace the op code with the break instruction */
+ error = probe_kernel_write((void *)stepped_address,
+ arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+ /* Flush and return */
+ if (!error) {
+ flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+ kgdb_single_step = 1;
+ atomic_set(&kgdb_cpu_doing_single_step,
+ raw_smp_processor_id());
+ } else {
+ stepped_address = 0;
+ stepped_opcode = 0;
+ }
+ return error;
+}
+
+/* Undo a single step */
+static void undo_single_step(struct pt_regs *regs)
+{
+ if (stepped_opcode != 0) {
+ probe_kernel_write((void *)stepped_address,
+ (void *)&stepped_opcode, BREAK_INSTR_SIZE);
+ flush_icache_range(stepped_address,
+ stepped_address + BREAK_INSTR_SIZE);
+ }
+ stepped_address = 0;
+ stepped_opcode = 0;
+ kgdb_single_step = 0;
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+}
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ {DBG_REG_ZERO, GDB_SIZEOF_REG, -1},
+ {DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)},
+ {DBG_REG_SP, GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
+ {DBG_REG_GP, GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
+ {DBG_REG_TP, GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
+ {DBG_REG_T0, GDB_SIZEOF_REG, offsetof(struct pt_regs, t0)},
+ {DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)},
+ {DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)},
+ {DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)},
+ {DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
+ {DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)},
+ {DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
+ {DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)},
+ {DBG_REG_A3, GDB_SIZEOF_REG, offsetof(struct pt_regs, a3)},
+ {DBG_REG_A4, GDB_SIZEOF_REG, offsetof(struct pt_regs, a4)},
+ {DBG_REG_A5, GDB_SIZEOF_REG, offsetof(struct pt_regs, a5)},
+ {DBG_REG_A6, GDB_SIZEOF_REG, offsetof(struct pt_regs, a6)},
+ {DBG_REG_A7, GDB_SIZEOF_REG, offsetof(struct pt_regs, a7)},
+ {DBG_REG_S2, GDB_SIZEOF_REG, offsetof(struct pt_regs, s2)},
+ {DBG_REG_S3, GDB_SIZEOF_REG, offsetof(struct pt_regs, s3)},
+ {DBG_REG_S4, GDB_SIZEOF_REG, offsetof(struct pt_regs, s4)},
+ {DBG_REG_S5, GDB_SIZEOF_REG, offsetof(struct pt_regs, s5)},
+ {DBG_REG_S6, GDB_SIZEOF_REG, offsetof(struct pt_regs, s6)},
+ {DBG_REG_S7, GDB_SIZEOF_REG, offsetof(struct pt_regs, s7)},
+ {DBG_REG_S8, GDB_SIZEOF_REG, offsetof(struct pt_regs, s8)},
+ {DBG_REG_S9, GDB_SIZEOF_REG, offsetof(struct pt_regs, s9)},
+ {DBG_REG_S10, GDB_SIZEOF_REG, offsetof(struct pt_regs, s10)},
+ {DBG_REG_S11, GDB_SIZEOF_REG, offsetof(struct pt_regs, s11)},
+ {DBG_REG_T3, GDB_SIZEOF_REG, offsetof(struct pt_regs, t3)},
+ {DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)},
+ {DBG_REG_T5, GDB_SIZEOF_REG, offsetof(struct pt_regs, t5)},
+ {DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)},
+ {DBG_REG_EPC, GDB_SIZEOF_REG, offsetof(struct pt_regs, epc)},
+ {DBG_REG_STATUS, GDB_SIZEOF_REG, offsetof(struct pt_regs, status)},
+ {DBG_REG_BADADDR, GDB_SIZEOF_REG, offsetof(struct pt_regs, badaddr)},
+ {DBG_REG_CAUSE, GDB_SIZEOF_REG, offsetof(struct pt_regs, cause)},
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ else
+ memset(mem, 0, dbg_reg_def[regno].size);
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ return 0;
+}
+
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+ /* Initialize to zero */
+ memset((char *)gdb_regs, 0, NUMREGBYTES);
+
+ gdb_regs[DBG_REG_SP_OFF] = task->thread.sp;
+ gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0];
+ gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1];
+ gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2];
+ gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3];
+ gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4];
+ gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5];
+ gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6];
+ gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7];
+ gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8];
+ gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10];
+ gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11];
+ gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->epc = pc;
+}
+
+void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
+ char *remcom_out_buffer)
+{
+ if (!strncmp(remcom_in_buffer, gdb_xfer_read_target,
+ sizeof(gdb_xfer_read_target)))
+ strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc);
+ else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml,
+ sizeof(gdb_xfer_read_cpuxml)))
+ strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml);
+}
+
+static inline void kgdb_arch_update_addr(struct pt_regs *regs,
+ char *remcom_in_buffer)
+{
+ unsigned long addr;
+ char *ptr;
+
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->epc = addr;
+}
+
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ int err = 0;
+
+ undo_single_step(regs);
+
+ switch (remcom_in_buffer[0]) {
+ case 'c':
+ case 'D':
+ case 'k':
+ if (remcom_in_buffer[0] == 'c')
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ break;
+ case 's':
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ err = do_single_step(regs);
+ break;
+ default:
+ err = -1;
+ }
+ return err;
+}
+
+int kgdb_riscv_kgdbbreak(unsigned long addr)
+{
+ if (stepped_address == addr)
+ return KGDB_SW_SINGLE_STEP;
+ if (atomic_read(&kgdb_setting_breakpoint))
+ if (addr == (unsigned long)&kgdb_compiled_break)
+ return KGDB_COMPILED_BREAK;
+
+ return kgdb_has_hit_break(addr);
+}
+
+static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd,
+ void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+ unsigned long flags;
+ int type;
+
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+
+ type = kgdb_riscv_kgdbbreak(regs->epc);
+ if (type == NOT_KGDB_BREAK && cmd == DIE_TRAP)
+ return NOTIFY_DONE;
+
+ local_irq_save(flags);
+
+ if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1,
+ args->signr, cmd, regs))
+ return NOTIFY_DONE;
+
+ if (type == KGDB_COMPILED_BREAK)
+ regs->epc += 4;
+
+ local_irq_restore(flags);
+
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_riscv_notify,
+};
+
+int kgdb_arch_init(void)
+{
+ register_die_notifier(&kgdb_notifier);
+
+ return 0;
+}
+
+void kgdb_arch_exit(void)
+{
+ unregister_die_notifier(&kgdb_notifier);
+}
+
+/*
+ * Global data
+ */
+#ifdef CONFIG_RISCV_ISA_C
+const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x02, 0x90}, /* c.ebreak */
+};
+#else
+const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x73, 0x00, 0x10, 0x00}, /* ebreak */
+};
+#endif
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 8bbe5dbe1341..7191342c54da 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -10,7 +10,7 @@
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#include <linux/sizes.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/sections.h>
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 8a4fc65ee022..d4a64dfed342 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -5,22 +5,22 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <asm/kprobes.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
+#include <asm/patch.h>
-struct riscv_insn_patch {
+struct patch_insn {
void *addr;
u32 insn;
atomic_t cpu_count;
};
#ifdef CONFIG_MMU
-static DEFINE_RAW_SPINLOCK(patch_lock);
-
-static void __kprobes *patch_map(void *addr, int fixmap)
+static void *patch_map(void *addr, int fixmap)
{
uintptr_t uintaddr = (uintptr_t) addr;
struct page *page;
@@ -37,20 +37,26 @@ static void __kprobes *patch_map(void *addr, int fixmap)
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
(uintaddr & ~PAGE_MASK));
}
+NOKPROBE_SYMBOL(patch_map);
-static void __kprobes patch_unmap(int fixmap)
+static void patch_unmap(int fixmap)
{
clear_fixmap(fixmap);
}
+NOKPROBE_SYMBOL(patch_unmap);
-static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len)
+static int patch_insn_write(void *addr, const void *insn, size_t len)
{
void *waddr = addr;
bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
- unsigned long flags = 0;
int ret;
- raw_spin_lock_irqsave(&patch_lock, flags);
+ /*
+ * Before reaching here, it was expected to lock the text_mutex
+ * already, so we don't need to give another lock here and could
+ * ensure that it was safe between each cores.
+ */
+ lockdep_assert_held(&text_mutex);
if (across_pages)
patch_map(addr + len, FIX_TEXT_POKE1);
@@ -64,38 +70,39 @@ static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len)
if (across_pages)
patch_unmap(FIX_TEXT_POKE1);
- raw_spin_unlock_irqrestore(&patch_lock, flags);
-
return ret;
}
+NOKPROBE_SYMBOL(patch_insn_write);
#else
-static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len)
+static int patch_insn_write(void *addr, const void *insn, size_t len)
{
return probe_kernel_write(addr, insn, len);
}
+NOKPROBE_SYMBOL(patch_insn_write);
#endif /* CONFIG_MMU */
-int __kprobes riscv_patch_text_nosync(void *addr, const void *insns, size_t len)
+int patch_text_nosync(void *addr, const void *insns, size_t len)
{
u32 *tp = addr;
int ret;
- ret = riscv_insn_write(tp, insns, len);
+ ret = patch_insn_write(tp, insns, len);
if (!ret)
flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
return ret;
}
+NOKPROBE_SYMBOL(patch_text_nosync);
-static int __kprobes riscv_patch_text_cb(void *data)
+static int patch_text_cb(void *data)
{
- struct riscv_insn_patch *patch = data;
+ struct patch_insn *patch = data;
int ret = 0;
if (atomic_inc_return(&patch->cpu_count) == 1) {
ret =
- riscv_patch_text_nosync(patch->addr, &patch->insn,
+ patch_text_nosync(patch->addr, &patch->insn,
GET_INSN_LENGTH(patch->insn));
atomic_inc(&patch->cpu_count);
} else {
@@ -106,15 +113,17 @@ static int __kprobes riscv_patch_text_cb(void *data)
return ret;
}
+NOKPROBE_SYMBOL(patch_text_cb);
-int __kprobes riscv_patch_text(void *addr, u32 insn)
+int patch_text(void *addr, u32 insn)
{
- struct riscv_insn_patch patch = {
+ struct patch_insn patch = {
.addr = addr,
.insn = insn,
.cpu_count = ATOMIC_INIT(0),
};
- return stop_machine_cpuslocked(riscv_patch_text_cb,
+ return stop_machine_cpuslocked(patch_text_cb,
&patch, cpu_online_mask);
}
+NOKPROBE_SYMBOL(patch_text);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 145128a7e560..f04373be54a6 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -22,7 +22,6 @@
#include <asm/cpu_ops.h>
#include <asm/setup.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/thread_info.h>
@@ -75,7 +74,11 @@ void __init setup_arch(char **cmdline_p)
setup_bootmem();
paging_init();
+#if IS_ENABLED(CONFIG_BUILTIN_DTB)
+ unflatten_and_copy_device_tree();
+#else
unflatten_device_tree();
+#endif
clint_init_boot_cpu();
#ifdef CONFIG_SWIOTLB
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index a65a8fa0c22d..b1d4f452f843 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -123,11 +123,14 @@ static inline void clear_ipi(void)
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
}
-void riscv_software_interrupt(void)
+void handle_IPI(struct pt_regs *regs)
{
+ struct pt_regs *old_regs = set_irq_regs(regs);
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
unsigned long *stats = ipi_data[smp_processor_id()].stats;
+ irq_enter();
+
clear_ipi();
while (true) {
@@ -138,7 +141,7 @@ void riscv_software_interrupt(void)
ops = xchg(pending_ipis, 0);
if (ops == 0)
- return;
+ goto done;
if (ops & (1 << IPI_RESCHEDULE)) {
stats[IPI_RESCHEDULE]++;
@@ -160,6 +163,10 @@ void riscv_software_interrupt(void)
/* Order data access and bit testing. */
mb();
}
+
+done:
+ irq_exit();
+ set_irq_regs(old_regs);
}
static const char * const ipi_names[] = {
diff --git a/arch/riscv/kernel/soc.c b/arch/riscv/kernel/soc.c
index 0b3b3dc9ad0f..c7b0a73e382e 100644
--- a/arch/riscv/kernel/soc.c
+++ b/arch/riscv/kernel/soc.c
@@ -4,7 +4,7 @@
*/
#include <linux/init.h>
#include <linux/libfdt.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/soc.h>
/*
@@ -26,3 +26,30 @@ void __init soc_early_init(void)
}
}
}
+
+static bool soc_builtin_dtb_match(unsigned long vendor_id,
+ unsigned long arch_id, unsigned long imp_id,
+ const struct soc_builtin_dtb *entry)
+{
+ return entry->vendor_id == vendor_id &&
+ entry->arch_id == arch_id &&
+ entry->imp_id == imp_id;
+}
+
+void * __init soc_lookup_builtin_dtb(void)
+{
+ unsigned long vendor_id, arch_id, imp_id;
+ const struct soc_builtin_dtb *s;
+
+ __asm__ ("csrr %0, mvendorid" : "=r"(vendor_id));
+ __asm__ ("csrr %0, marchid" : "=r"(arch_id));
+ __asm__ ("csrr %0, mimpid" : "=r"(imp_id));
+
+ for (s = (void *)&__soc_builtin_dtb_table_start;
+ (void *)s < (void *)&__soc_builtin_dtb_table_end; s++) {
+ if (soc_builtin_dtb_match(vendor_id, arch_id, imp_id, s))
+ return s->dtb_func();
+ }
+
+ return NULL;
+}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 837b9b38f825..595342910c3f 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -99,17 +99,18 @@ void notrace walk_stackframe(struct task_struct *task,
static bool print_trace_address(unsigned long pc, void *arg)
{
- print_ip_sym(pc);
+ const char *loglvl = arg;
+
+ print_ip_sym(loglvl, pc);
return false;
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
pr_cont("Call Trace:\n");
- walk_stackframe(task, NULL, print_trace_address, NULL);
+ walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
}
-
static bool save_wchan(unsigned long pc, void *arg)
{
if (!in_sched_functions(pc)) {
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
index 6a53c02e9c73..4d3a1048ad8b 100644
--- a/arch/riscv/kernel/time.c
+++ b/arch/riscv/kernel/time.c
@@ -26,3 +26,12 @@ void __init time_init(void)
lpj_fine = riscv_timebase / HZ;
timer_probe();
}
+
+void clocksource_arch_init(struct clocksource *cs)
+{
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER;
+#else
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+#endif
+}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 7f58fa53033f..ecec1778e3a4 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -147,6 +147,11 @@ asmlinkage __visible void do_trap_break(struct pt_regs *regs)
{
if (user_mode(regs))
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
+#ifdef CONFIG_KGDB
+ else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
+ == NOTIFY_STOP)
+ return;
+#endif
else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
regs->epc += get_break_insn_length(regs->epc);
else
@@ -178,6 +183,4 @@ void trap_init(void)
csr_write(CSR_SCRATCH, 0);
/* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception);
- /* Enable interrupts */
- csr_write(CSR_IE, IE_SIE);
}
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 484d95a70907..678204231700 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -11,8 +11,12 @@
#include <linux/slab.h>
#include <linux/binfmts.h>
#include <linux/err.h>
-
+#include <asm/page.h>
+#ifdef GENERIC_TIME_VSYSCALL
+#include <vdso/datapage.h>
+#else
#include <asm/vdso.h>
+#endif
extern char vdso_start[], vdso_end[];
@@ -26,7 +30,7 @@ static union {
struct vdso_data data;
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
-static struct vdso_data *vdso_data = &vdso_data_store.data;
+struct vdso_data *vdso_data = &vdso_data_store.data;
static int __init vdso_init(void)
{
@@ -61,7 +65,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = vdso_base;
@@ -75,15 +79,24 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
*/
mm->context.vdso = (void *)vdso_base;
- ret = install_special_mapping(mm, vdso_base, vdso_len,
+ ret =
+ install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
vdso_pagelist);
- if (unlikely(ret))
+ if (unlikely(ret)) {
mm->context.vdso = NULL;
+ goto end;
+ }
+ vdso_base += (vdso_pages << PAGE_SHIFT);
+ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+
+ if (unlikely(ret))
+ mm->context.vdso = NULL;
end:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
@@ -91,5 +104,8 @@ const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
return "[vdso]";
+ if (vma->vm_mm && (vma->vm_start ==
+ (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+ return "[vdso_data]";
return NULL;
}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 4c8b2a4a6a70..38ba55b0eb9d 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -1,12 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copied from arch/tile/kernel/vdso/Makefile
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_RISCV_32|R_RISCV_64|R_RISCV_JUMP_SLOT
+include $(srctree)/lib/vdso/Makefile
# Symbols present in the vdso
vdso-syms = rt_sigreturn
ifdef CONFIG_64BIT
-vdso-syms += gettimeofday
-vdso-syms += clock_gettime
-vdso-syms += clock_getres
+vdso-syms += vgettimeofday
endif
vdso-syms += getcpu
vdso-syms += flush_icache
@@ -14,6 +16,10 @@ vdso-syms += flush_icache
# Files to link into the vdso
obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
+
# Build rules
targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
diff --git a/arch/riscv/kernel/vdso/clock_getres.S b/arch/riscv/kernel/vdso/clock_getres.S
deleted file mode 100644
index 91378a52eb22..000000000000
--- a/arch/riscv/kernel/vdso/clock_getres.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */
-ENTRY(__vdso_clock_getres)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_clock_getres
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_clock_getres)
diff --git a/arch/riscv/kernel/vdso/clock_gettime.S b/arch/riscv/kernel/vdso/clock_gettime.S
deleted file mode 100644
index 5371fd9bc01f..000000000000
--- a/arch/riscv/kernel/vdso/clock_gettime.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */
-ENTRY(__vdso_clock_gettime)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_clock_gettime
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_clock_gettime)
diff --git a/arch/riscv/kernel/vdso/gettimeofday.S b/arch/riscv/kernel/vdso/gettimeofday.S
deleted file mode 100644
index e6fb8af88632..000000000000
--- a/arch/riscv/kernel/vdso/gettimeofday.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */
-ENTRY(__vdso_gettimeofday)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_gettimeofday
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_gettimeofday)
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index f66a091cb890..e6f558bca71b 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -2,11 +2,13 @@
/*
* Copyright (C) 2012 Regents of the University of California
*/
+#include <asm/page.h>
OUTPUT_ARCH(riscv)
SECTIONS
{
+ PROVIDE(_vdso_data = . + PAGE_SIZE);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/riscv/kernel/vdso/vgettimeofday.c b/arch/riscv/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..d264943e2e47
--- /dev/null
+++ b/arch/riscv/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copied from arch/arm64/kernel/vdso/vgettimeofday.c
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/time.h>
+#include <linux/types.h>
+
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres(clock_id, res);
+}
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 0339b6bbe11a..e6f8016b366a 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -34,6 +34,11 @@ SECTIONS
KEEP(*(__soc_early_init_table))
__soc_early_init_table_end = .;
}
+ __soc_builtin_dtb_table : {
+ __soc_builtin_dtb_table_start = .;
+ KEEP(*(__soc_builtin_dtb_table))
+ __soc_builtin_dtb_table_end = .;
+ }
/* we have to discard exit text and such at runtime, not link time */
.exit.text :
{
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 8930ab7278e6..094118663285 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -3,7 +3,6 @@
* Copyright (C) 2017 SiFive
*/
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_SMP
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index be84e32adc4c..ae7b7fe24658 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -69,7 +69,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, addr);
if (unlikely(!vma))
goto bad_area;
@@ -114,7 +114,7 @@ good_area:
/*
* If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because it
+ * signal first. We do not need to release the mmap_lock because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
if (fault_signal_pending(fault, regs))
@@ -147,7 +147,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -155,7 +155,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -163,7 +163,7 @@ good_area:
* Fix it, but check if it's kernel or user first.
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
@@ -191,14 +191,14 @@ no_context:
* (which will retry the fault, or kill us if we got oom-killed).
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 939159b13a13..f4adb3684f3d 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -17,7 +17,7 @@
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
+#include <asm/soc.h>
#include <asm/io.h>
#include <asm/ptdump.h>
@@ -235,12 +235,12 @@ static void __init create_pte_mapping(pte_t *ptep,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
- uintptr_t pte_index = pte_index(va);
+ uintptr_t pte_idx = pte_index(va);
BUG_ON(sz != PAGE_SIZE);
- if (pte_none(ptep[pte_index]))
- ptep[pte_index] = pfn_pte(PFN_DOWN(pa), prot);
+ if (pte_none(ptep[pte_idx]))
+ ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
}
#ifndef __PAGETABLE_PMD_FOLDED
@@ -283,21 +283,21 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
{
pte_t *ptep;
phys_addr_t pte_phys;
- uintptr_t pmd_index = pmd_index(va);
+ uintptr_t pmd_idx = pmd_index(va);
if (sz == PMD_SIZE) {
- if (pmd_none(pmdp[pmd_index]))
- pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pa), prot);
+ if (pmd_none(pmdp[pmd_idx]))
+ pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
return;
}
- if (pmd_none(pmdp[pmd_index])) {
+ if (pmd_none(pmdp[pmd_idx])) {
pte_phys = alloc_pte(va);
- pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
+ pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
ptep = get_pte_virt(pte_phys);
memset(ptep, 0, PAGE_SIZE);
} else {
- pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_index]));
+ pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
ptep = get_pte_virt(pte_phys);
}
@@ -325,21 +325,21 @@ static void __init create_pgd_mapping(pgd_t *pgdp,
{
pgd_next_t *nextp;
phys_addr_t next_phys;
- uintptr_t pgd_index = pgd_index(va);
+ uintptr_t pgd_idx = pgd_index(va);
if (sz == PGDIR_SIZE) {
- if (pgd_val(pgdp[pgd_index]) == 0)
- pgdp[pgd_index] = pfn_pgd(PFN_DOWN(pa), prot);
+ if (pgd_val(pgdp[pgd_idx]) == 0)
+ pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
return;
}
- if (pgd_val(pgdp[pgd_index]) == 0) {
+ if (pgd_val(pgdp[pgd_idx]) == 0) {
next_phys = alloc_pgd_next(va);
- pgdp[pgd_index] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
+ pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
nextp = get_pgd_next_virt(next_phys);
memset(nextp, 0, PAGE_SIZE);
} else {
- next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_index]));
+ next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
nextp = get_pgd_next_virt(next_phys);
}
@@ -480,21 +480,18 @@ static void __init setup_vm_final(void)
csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all();
}
-
-void free_initmem(void)
-{
- unsigned long init_begin = (unsigned long)__init_begin;
- unsigned long init_end = (unsigned long)__init_end;
-
- /* Make the region as non-execuatble. */
- set_memory_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
- free_initmem_default(POISON_FREE_INITMEM);
-}
-
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
+#ifdef CONFIG_BUILTIN_DTB
+ dtb_early_va = soc_lookup_builtin_dtb();
+ if (!dtb_early_va) {
+ /* Fallback to first available DTS */
+ dtb_early_va = (void *) __dtb_start;
+ }
+#else
dtb_early_va = (void *)dtb_pa;
+#endif
}
static inline void setup_vm_final(void)
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index ec0ca90dd900..4a8b61806633 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -6,8 +6,8 @@
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
#include <asm/fixmap.h>
extern pgd_t early_pg_dir[PTRS_PER_PGD];
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 728759eb530a..ec2c70f84994 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -4,7 +4,7 @@
*/
#include <linux/pagewalk.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/bitops.h>
@@ -117,10 +117,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
if (!numpages)
return 0;
- down_read(&init_mm.mmap_sem);
+ mmap_read_lock(&init_mm);
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
&masks);
- up_read(&init_mm.mmap_sem);
+ mmap_read_unlock(&init_mm);
flush_tlb_kernel_range(start, end);
diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
index 070505d79b06..0831c2e61a8f 100644
--- a/arch/riscv/mm/ptdump.c
+++ b/arch/riscv/mm/ptdump.c
@@ -9,7 +9,7 @@
#include <linux/ptdump.h>
#include <asm/ptdump.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/kasan.h>
#define pt_dump_seq_printf(m, fmt, args...) \
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index d6dc6933adc2..f854faff38c3 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -59,6 +59,7 @@ config KASAN_SHADOW_OFFSET
config S390
def_bool y
select ARCH_BINFMT_ELF_STATE
+ select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index e68136c3c23a..21c3147bd92a 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -29,10 +29,6 @@
* the structure version (product ID, see appldata_base.c) needs to be changed
* as well and all documentation and z/VM applications using it must be
* updated.
- *
- * The record layout is documented in the Linux for zSeries Device Drivers
- * book:
- * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
*/
struct appldata_mem_data {
u64 timestamp;
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 8bc14b0d1def..59c282ca002f 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -25,10 +25,6 @@
* This is accessed as binary data by z/VM. If changes to it can't be avoided,
* the structure version (product ID, see appldata_base.c) needs to be changed
* as well and all documentation and z/VM applications using it must be updated.
- *
- * The record layout is documented in the Linux for zSeries Device Drivers
- * book:
- * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
*/
struct appldata_net_sum_data {
u64 timestamp;
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 8bf46d705957..5503217366ec 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -32,10 +32,6 @@
* the structure version (product ID, see appldata_base.c) needs to be changed
* as well and all documentation and z/VM applications using it must be
* updated.
- *
- * The record layout is documented in the Linux for zSeries Device Drivers
- * book:
- * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
*/
struct appldata_os_per_cpu {
u32 per_cpu_user; /* timer ticks spent in user mode */
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 357adad991d2..8e222a666025 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -2,12 +2,12 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ctype.h>
+#include <linux/pgtable.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
#include <asm/facility.h>
-#include <asm/pgtable.h>
#include <asm/uv.h>
#include "boot.h"
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index 5591243d673e..d4442163ffa9 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -2,8 +2,8 @@
/*
* Copyright IBM Corp. 2019
*/
+#include <linux/pgtable.h>
#include <asm/mem_detect.h>
-#include <asm/pgtable.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 865ce1cb86d5..3cfe1eb89838 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -11,6 +11,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <asm/chsc.h>
#include <asm/fcx.h>
#include <asm/irq.h>
#include <asm/schid.h>
@@ -236,4 +237,8 @@ extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *, int);
u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx);
+int ccw_device_pnso(struct ccw_device *cdev,
+ struct chsc_pnso_area *pnso_area,
+ struct chsc_pnso_resume_token resume_token,
+ int cnc);
#endif /* _S390_CCWDEV_H_ */
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
new file mode 100644
index 000000000000..36ce2d25a5fc
--- /dev/null
+++ b/arch/s390/include/asm/chsc.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s): Alexandra Winter <wintera@linux.ibm.com>
+ *
+ * Interface for Channel Subsystem Call
+ */
+#ifndef _ASM_S390_CHSC_H
+#define _ASM_S390_CHSC_H
+
+#include <uapi/asm/chsc.h>
+
+/**
+ * struct chsc_pnso_naid_l2 - network address information descriptor
+ * @nit: Network interface token
+ * @addr_lnid: network address and logical network id (VLAN ID)
+ */
+struct chsc_pnso_naid_l2 {
+ u64 nit;
+ struct { u8 mac[6]; u16 lnid; } addr_lnid;
+} __packed;
+
+struct chsc_pnso_resume_token {
+ u64 t1;
+ u64 t2;
+} __packed;
+
+struct chsc_pnso_naihdr {
+ struct chsc_pnso_resume_token resume_token;
+ u32:32;
+ u32 instance;
+ u32:24;
+ u8 naids;
+ u32 reserved[3];
+} __packed;
+
+struct chsc_pnso_area {
+ struct chsc_header request;
+ u8:2;
+ u8 m:1;
+ u8:5;
+ u8:2;
+ u8 ssid:2;
+ u8 fmt:4;
+ u16 sch;
+ u8:8;
+ u8 cssid;
+ u16:16;
+ u8 oc;
+ u32:24;
+ struct chsc_pnso_resume_token resume_token;
+ u32 n:1;
+ u32:31;
+ u32 reserved[3];
+ struct chsc_header response;
+ u32:32;
+ struct chsc_pnso_naihdr naihdr;
+ struct chsc_pnso_naid_l2 entries[0];
+} __packed __aligned(PAGE_SIZE);
+
+#endif /* _ASM_S390_CHSC_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 9ddf4a43a590..60f9241e5e4a 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -9,8 +9,8 @@
#ifndef _ASM_S390_HUGETLB_H
#define _ASM_S390_HUGETLB_H
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#define hugetlb_free_pgd_range free_pgd_range
#define hugepages_supported() (MACHINE_HAS_EDAT1)
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 5a16f500515a..da014e4f8113 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -26,7 +26,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define IO_SPACE_LIMIT 0
-void __iomem *ioremap(unsigned long offset, unsigned long size);
+void __iomem *ioremap(phys_addr_t addr, size_t size);
void iounmap(volatile void __iomem *addr);
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index b63bd66404b8..7d5cfdda5277 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -21,6 +21,7 @@ struct ipl_parameter_block {
struct ipl_pb0_common common;
struct ipl_pb0_fcp fcp;
struct ipl_pb0_ccw ccw;
+ struct ipl_pb0_nvme nvme;
char raw[PAGE_SIZE - sizeof(struct ipl_pl_hdr)];
};
} __packed __aligned(PAGE_SIZE);
@@ -30,6 +31,11 @@ struct ipl_parameter_block {
#define IPL_BP_FCP_LEN (sizeof(struct ipl_pl_hdr) + \
sizeof(struct ipl_pb0_fcp))
#define IPL_BP0_FCP_LEN (sizeof(struct ipl_pb0_fcp))
+
+#define IPL_BP_NVME_LEN (sizeof(struct ipl_pl_hdr) + \
+ sizeof(struct ipl_pb0_nvme))
+#define IPL_BP0_NVME_LEN (sizeof(struct ipl_pb0_nvme))
+
#define IPL_BP_CCW_LEN (sizeof(struct ipl_pl_hdr) + \
sizeof(struct ipl_pb0_ccw))
#define IPL_BP0_CCW_LEN (sizeof(struct ipl_pb0_ccw))
@@ -59,6 +65,7 @@ enum ipl_type {
IPL_TYPE_FCP = 4,
IPL_TYPE_FCP_DUMP = 8,
IPL_TYPE_NSS = 16,
+ IPL_TYPE_NVME = 32,
};
struct ipl_info
@@ -74,6 +81,10 @@ struct ipl_info
u64 lun;
} fcp;
struct {
+ u32 fid;
+ u32 nsid;
+ } nvme;
+ struct {
char name[NSS_NAME_SIZE + 1];
} nss;
} data;
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index 70930fe5c496..89d6886040c8 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -2,8 +2,6 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
-#include <asm/pgtable.h>
-
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index b160da8fa14b..5afee80cff58 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -99,7 +99,7 @@ int nmi_alloc_per_cpu(struct lowcore *lc);
void nmi_free_per_cpu(struct lowcore *lc);
void s390_handle_mcck(void);
-void s390_do_machine_check(struct pt_regs *regs);
+int s390_do_machine_check(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_NMI_H */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 7485ee561fec..99b92c3e46b0 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -22,12 +22,17 @@ int pci_domain_nr(struct pci_bus *);
int pci_proc_domain(struct pci_bus *);
#define ZPCI_BUS_NR 0 /* default bus number */
-#define ZPCI_DEVFN 0 /* default device number */
#define ZPCI_NR_DMA_SPACES 1
#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
#define ZPCI_DOMAIN_BITMAP_SIZE (1 << 16)
+#ifdef PCI
+#if (ZPCI_NR_DEVICES > ZPCI_DOMAIN_BITMAP_SIZE)
+# error ZPCI_NR_DEVICES can not be bigger than ZPCI_DOMAIN_BITMAP_SIZE
+#endif
+#endif /* PCI */
+
/* PCI Function Controls */
#define ZPCI_FC_FN_ENABLED 0x80
#define ZPCI_FC_ERROR 0x40
@@ -94,10 +99,26 @@ struct zpci_bar_struct {
struct s390_domain;
+#define ZPCI_FUNCTIONS_PER_BUS 256
+struct zpci_bus {
+ struct kref kref;
+ struct pci_bus *bus;
+ struct zpci_dev *function[ZPCI_FUNCTIONS_PER_BUS];
+ struct list_head resources;
+ struct list_head bus_next;
+ struct resource bus_resource;
+ int pchid;
+ int domain_nr;
+ bool multifunction;
+ enum pci_bus_speed max_bus_speed;
+};
+
/* Private data per function */
struct zpci_dev {
- struct pci_bus *bus;
+ struct zpci_bus *zbus;
struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */
+ struct list_head bus_next;
+ struct kref kref;
struct hotplug_slot hotplug_slot;
enum zpci_state state;
@@ -107,7 +128,12 @@ struct zpci_dev {
u16 pchid; /* physical channel ID */
u8 pfgid; /* function group ID */
u8 pft; /* pci function type */
- u16 domain;
+ u8 port;
+ u8 rid_available : 1;
+ u8 has_hp_slot : 1;
+ u8 is_physfn : 1;
+ u8 reserved : 5;
+ unsigned int devfn; /* DEVFN part of the RID*/
struct mutex lock;
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
@@ -167,6 +193,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
extern const struct attribute_group *zpci_attr_groups[];
extern unsigned int s390_pci_force_floating __initdata;
+extern unsigned int s390_pci_no_rid;
/* -----------------------------------------------------------------------------
Prototypes
@@ -227,7 +254,14 @@ static inline void zpci_exit_slot(struct zpci_dev *zdev) {}
/* Helpers */
static inline struct zpci_dev *to_zpci(struct pci_dev *pdev)
{
- return pdev->sysdata;
+ struct zpci_bus *zbus = pdev->sysdata;
+
+ return zbus->function[pdev->devfn];
+}
+
+static inline struct zpci_dev *to_zpci_dev(struct device *dev)
+{
+ return to_zpci(to_pci_dev(dev));
}
struct zpci_dev *get_zdev_by_fid(u32);
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index bd2cb4ea7d93..eb51272dd2cc 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -93,7 +93,10 @@ struct clp_req_query_pci {
struct clp_rsp_query_pci {
struct clp_rsp_hdr hdr;
u16 vfn; /* virtual fn number */
- u16 : 6;
+ u16 : 3;
+ u16 rid_avail : 1;
+ u16 is_physfn : 1;
+ u16 reserved1 : 1;
u16 mio_addr_avail : 1;
u16 util_str_avail : 1; /* utility string available? */
u16 pfgid : 8; /* pci function group id */
@@ -102,12 +105,16 @@ struct clp_rsp_query_pci {
u16 pchid;
__le32 bar[PCI_STD_NUM_BARS];
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
- u32 : 16;
+ u16 : 12;
+ u16 port : 4;
u8 fmb_len;
u8 pft; /* pci function type */
u64 sdma; /* start dma as */
u64 edma; /* end dma as */
- u32 reserved[11];
+#define ZPCI_RID_MASK_DEVFN 0x00ff
+ u16 rid; /* BUS/DEVFN PCI address */
+ u16 reserved0;
+ u32 reserved[10];
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
u32 reserved2[16];
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 6076c8c912d2..19d603bd1f36 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1229,7 +1229,6 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
@@ -1260,7 +1259,6 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
}
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
@@ -1275,6 +1273,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
return (pud_t *) p4d_deref(*p4d) + pud_index(address);
return (pud_t *) p4d;
}
+#define pud_offset pud_offset
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
@@ -1282,17 +1281,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
return (pmd_t *) pud_deref(*pud) + pmd_index(address);
return (pmd_t *) pud;
}
+#define pmd_offset pmd_offset
-static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
- return (pte_t *) pmd_deref(*pmd) + pte_index(address);
+ return (unsigned long) pmd_deref(pmd);
}
-#define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
-#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-
-static inline void pte_unmap(pte_t *pte) { }
-
static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
{
return end <= current->mm->context.asce_limit;
@@ -1560,7 +1555,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
}
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
-static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
+static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmdp, int full)
{
@@ -1569,7 +1564,7 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
return pmd;
}
- return pmdp_xchg_lazy(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
+ return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
}
#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
@@ -1683,6 +1678,4 @@ extern void s390_reset_cmma(struct mm_struct *mm);
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#include <asm-generic/pgtable.h>
-
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 555d148ccf32..962da04234af 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,17 +14,15 @@
#include <linux/bits.h>
-#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
-#define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */
-#define CIF_ASCE_SECONDARY 2 /* secondary asce needs fixup / uaccess */
-#define CIF_NOHZ_DELAY 3 /* delay HZ disable for a tick */
-#define CIF_FPU 4 /* restore FPU registers */
-#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
-#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
-#define CIF_MCCK_GUEST 7 /* machine check happening in guest */
-#define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */
-
-#define _CIF_MCCK_PENDING BIT(CIF_MCCK_PENDING)
+#define CIF_ASCE_PRIMARY 0 /* primary asce needs fixup / uaccess */
+#define CIF_ASCE_SECONDARY 1 /* secondary asce needs fixup / uaccess */
+#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
+#define CIF_FPU 3 /* restore FPU registers */
+#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
+#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
+#define CIF_MCCK_GUEST 6 /* machine check happening in guest */
+#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
+
#define _CIF_ASCE_PRIMARY BIT(CIF_ASCE_PRIMARY)
#define _CIF_ASCE_SECONDARY BIT(CIF_ASCE_SECONDARY)
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 86a3796e9be8..e69dbf438f99 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -365,34 +365,6 @@ struct qdio_initialize {
struct qdio_outbuf_state *output_sbal_state_array;
};
-/**
- * enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc()
- * @l3_ipv6_addr: entry contains IPv6 address
- * @l3_ipv4_addr: entry contains IPv4 address
- * @l2_addr_lnid: entry contains MAC address and VLAN ID
- */
-enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid};
-
-/**
- * struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc()
- * @nit: Network interface token
- * @addr: Address of one of the three types
- *
- * The struct is passed to the callback function by qdio_brinfo_desc()
- */
-struct qdio_brinfo_entry_l3_ipv6 {
- u64 nit;
- struct { unsigned char _s6_addr[16]; } addr;
-} __packed;
-struct qdio_brinfo_entry_l3_ipv4 {
- u64 nit;
- struct { uint32_t _s_addr; } addr;
-} __packed;
-struct qdio_brinfo_entry_l2 {
- u64 nit;
- struct { u8 mac[6]; u16 lnid; } addr_lnid;
-} __packed;
-
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
@@ -423,10 +395,5 @@ extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
extern int qdio_shutdown(struct ccw_device *, int);
extern int qdio_free(struct ccw_device *);
extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
-extern int qdio_pnso_brinfo(struct subchannel_id schid,
- int cnc, u16 *response,
- void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
- void *entry),
- void *priv);
#endif /* __QDIO_H__ */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 231a51e870fe..7326f110d48c 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -58,5 +58,6 @@ extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void);
extern void __cpu_die(unsigned int cpu);
extern int __cpu_disable(void);
+extern void schedule_mcck_handler(void);
#endif /* __ASM_SMP_H */
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 82703e03f35d..2204704840ea 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -6,7 +6,6 @@
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
/*
* Flush all TLB entries on the local CPU.
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index a470f1fa9f2a..324438889fe1 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -276,6 +276,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
}
int copy_to_user_real(void __user *dest, void *src, unsigned long count);
-void s390_kernel_write(void *dst, const void *src, size_t size);
+void *s390_kernel_write(void *dst, const void *src, size_t size);
#endif /* __S390_UACCESS_H */
diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h
index 451ba7d08905..d1ecd5d722a0 100644
--- a/arch/s390/include/uapi/asm/ipl.h
+++ b/arch/s390/include/uapi/asm/ipl.h
@@ -27,6 +27,7 @@ enum ipl_pbt {
IPL_PBT_FCP = 0,
IPL_PBT_SCP_DATA = 1,
IPL_PBT_CCW = 2,
+ IPL_PBT_NVME = 4,
};
/* IPL Parameter Block 0 with common fields */
@@ -67,6 +68,30 @@ struct ipl_pb0_fcp {
#define IPL_PB0_FCP_OPT_IPL 0x10
#define IPL_PB0_FCP_OPT_DUMP 0x20
+/* IPL Parameter Block 0 for NVMe */
+struct ipl_pb0_nvme {
+ __u32 len;
+ __u8 pbt;
+ __u8 reserved1[3];
+ __u8 loadparm[8];
+ __u8 reserved2[304];
+ __u8 opt;
+ __u8 reserved3[3];
+ __u32 fid;
+ __u8 reserved4[12];
+ __u32 nsid;
+ __u8 reserved5[4];
+ __u32 bootprog;
+ __u8 reserved6[12];
+ __u64 br_lba;
+ __u32 scp_data_len;
+ __u8 reserved7[260];
+ __u8 scp_data[];
+} __packed;
+
+#define IPL_PB0_NVME_OPT_IPL 0x10
+#define IPL_PB0_NVME_OPT_DUMP 0x20
+
/* IPL Parameter Block 0 for CCW */
struct ipl_pb0_ccw {
__u32 len;
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 75f26d775027..a8f136943deb 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -33,11 +33,6 @@ CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
-#
-# Pass UTS_MACHINE for user_regset definition
-#
-CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e80f0e6f5972..165031bd3370 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -11,9 +11,9 @@
#include <linux/kvm_host.h>
#include <linux/sched.h>
#include <linux/purgatory.h>
+#include <linux/pgtable.h>
#include <asm/idle.h>
#include <asm/vdso.h>
-#include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
#include <asm/stacktrace.h>
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 2c122d8bab93..0dc4b258b98d 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -126,15 +126,16 @@ unknown:
return -EINVAL;
}
-void show_stack(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *stack,
+ const char *loglvl)
{
struct unwind_state state;
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
unwind_for_each_frame(&state, task, NULL, (unsigned long) stack)
- printk(state.reliable ? " [<%016lx>] %pSR \n" :
- "([<%016lx>] %pSR)\n",
- state.ip, (void *) state.ip);
+ printk(state.reliable ? "%s [<%016lx>] %pSR \n" :
+ "%s([<%016lx>] %pSR)\n",
+ loglvl, state.ip, (void *) state.ip);
debug_show_held_locks(task ? : current);
}
@@ -175,7 +176,7 @@ void show_regs(struct pt_regs *regs)
show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */
if (!user_mode(regs))
- show_stack(NULL, (unsigned long *) regs->gprs[15]);
+ show_stack(NULL, (unsigned long *) regs->gprs[15], KERN_DEFAULT);
show_last_breaking_event(regs);
}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3ae64914bd14..50ff6dd0f995 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -55,14 +55,11 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
-_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
- _CIF_ASCE_SECONDARY | _CIF_FPU)
+_CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU)
_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
_LPP_OFFSET = __LC_LPP
-#define BASED(name) name-cleanup_critical(%r13)
-
.macro TRACE_IRQS_ON
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
@@ -116,17 +113,39 @@ _LPP_OFFSET = __LC_LPP
.macro SWITCH_ASYNC savearea,timer
tmhh %r8,0x0001 # interrupting from user ?
jnz 2f
+#if IS_ENABLED(CONFIG_KVM)
lgr %r14,%r9
- cghi %r14,__LC_RETURN_LPSWE
- je 0f
- slg %r14,BASED(.Lcritical_start)
- clg %r14,BASED(.Lcritical_length)
- jhe 1f
-0:
+ larl %r13,.Lsie_gmap
+ slgr %r14,%r13
+ lghi %r13,.Lsie_done - .Lsie_gmap
+ clgr %r14,%r13
+ jhe 0f
lghi %r11,\savearea # inside critical section, do cleanup
- brasl %r14,cleanup_critical
- tmhh %r8,0x0001 # retest problem state after cleanup
- jnz 2f
+ brasl %r14,.Lcleanup_sie
+#endif
+0: larl %r13,.Lpsw_idle_exit
+ cgr %r13,%r9
+ jne 1f
+
+ mvc __CLOCK_IDLE_EXIT(8,%r2), __LC_INT_CLOCK
+ mvc __TIMER_IDLE_EXIT(8,%r2), __LC_ASYNC_ENTER_TIMER
+ # account system time going idle
+ ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
+
+ lg %r13,__LC_STEAL_TIMER
+ alg %r13,__CLOCK_IDLE_ENTER(%r2)
+ slg %r13,__LC_LAST_UPDATE_CLOCK
+ stg %r13,__LC_STEAL_TIMER
+
+ mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
+
+ lg %r13,__LC_SYSTEM_TIMER
+ alg %r13,__LC_LAST_UPDATE_TIMER
+ slg %r13,__TIMER_IDLE_ENTER(%r2)
+ stg %r13,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
+
+ nihh %r8,0xfcfd # clear wait state and irq bits
1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
slgr %r14,%r15
srag %r14,%r14,STACK_SHIFT
@@ -152,12 +171,30 @@ _LPP_OFFSET = __LC_LPP
mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
.endm
- .macro REENABLE_IRQS
+ .macro RESTORE_SM_CLEAR_PER
stg %r8,__LC_RETURN_PSW
ni __LC_RETURN_PSW,0xbf
ssm __LC_RETURN_PSW
.endm
+ .macro ENABLE_INTS
+ stosm __SF_EMPTY(%r15),3
+ .endm
+
+ .macro ENABLE_INTS_TRACE
+ TRACE_IRQS_ON
+ ENABLE_INTS
+ .endm
+
+ .macro DISABLE_INTS
+ stnsm __SF_EMPTY(%r15),0xfc
+ .endm
+
+ .macro DISABLE_INTS_TRACE
+ DISABLE_INTS
+ TRACE_IRQS_OFF
+ .endm
+
.macro STCK savearea
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
.insn s,0xb27c0000,\savearea # store clock fast
@@ -254,8 +291,6 @@ ENTRY(__switch_to)
BR_EX %r14
ENDPROC(__switch_to)
-.L__critical_start:
-
#if IS_ENABLED(CONFIG_KVM)
/*
* sie64a calling convention:
@@ -288,7 +323,6 @@ ENTRY(sie64a)
BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_entry:
sie 0(%r14)
-.Lsie_exit:
BPOFF
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip:
@@ -341,7 +375,6 @@ EXPORT_SYMBOL(sie_exit)
ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
-.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
BPOFF
lg %r12,__LC_CURRENT
@@ -350,7 +383,6 @@ ENTRY(system_call)
.Lsysc_per:
lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
-.Lsysc_vtime:
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
stmg %r0,%r7,__PT_R0(%r11)
@@ -358,6 +390,7 @@ ENTRY(system_call)
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
+ ENABLE_INTS
.Lsysc_do_svc:
# clear user controlled register to prevent speculative use
xgr %r0,%r0
@@ -393,26 +426,26 @@ ENTRY(system_call)
jnz .Lsysc_work
TSTMSK __TI_flags(%r12),_TIF_WORK
jnz .Lsysc_work # check for work
- TSTMSK __LC_CPU_FLAGS,_CIF_WORK
+ TSTMSK __LC_CPU_FLAGS,(_CIF_WORK-_CIF_FPU)
jnz .Lsysc_work
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lsysc_restore:
+ DISABLE_INTS
+ TSTMSK __LC_CPU_FLAGS, _CIF_FPU
+ jz .Lsysc_skip_fpu
+ brasl %r14,load_fpu_regs
+.Lsysc_skip_fpu:
lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
-.Lsysc_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
- lmg %r11,%r15,__PT_R11(%r11)
- b __LC_RETURN_LPSWE(%r0)
-.Lsysc_done:
+ lmg %r0,%r15,__PT_R0(%r11)
+ b __LC_RETURN_LPSWE
#
# One of the work bits is on. Find out which one.
#
.Lsysc_work:
- TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
- jo .Lsysc_mcck_pending
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jo .Lsysc_reschedule
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
@@ -436,11 +469,9 @@ ENTRY(system_call)
jo .Lsysc_sigpending
TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
jo .Lsysc_notify_resume
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- jo .Lsysc_vxrs
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jnz .Lsysc_asce
- j .Lsysc_return # beware of critical section cleanup
+ j .Lsysc_return
#
# _TIF_NEED_RESCHED is set, call schedule
@@ -450,13 +481,6 @@ ENTRY(system_call)
jg schedule
#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lsysc_mcck_pending:
- larl %r14,.Lsysc_return
- jg s390_handle_mcck # TIF bit will be cleared by handler
-
-#
# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
#
.Lsysc_asce:
@@ -475,12 +499,6 @@ ENTRY(system_call)
larl %r14,.Lsysc_return
jg set_fs_fixup
-#
-# CIF_FPU is set, restore floating-point controls and floating-point registers.
-#
-.Lsysc_vxrs:
- larl %r14,.Lsysc_return
- jg load_fpu_regs
#
# _TIF_SIGPENDING is set, call do_signal
@@ -564,7 +582,6 @@ ENTRY(system_call)
jnh .Lsysc_tracenogo
sllg %r8,%r2,3
lg %r9,0(%r8,%r10)
-.Lsysc_tracego:
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r2,__PT_ORIG_GPR2(%r11)
@@ -585,8 +602,6 @@ ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15)
lg %r12,__LC_CURRENT
brasl %r14,schedule_tail
- TRACE_IRQS_ON
- ssm __LC_SVC_NEW_PSW # reenable interrupts
tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
jne .Lsysc_tracenogo
# it's a kernel thread
@@ -620,15 +635,16 @@ ENTRY(pgm_check_handler)
lghi %r10,1
0: lg %r12,__LC_CURRENT
lghi %r11,0
- larl %r13,cleanup_critical
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # test problem state bit
jnz 3f # -> fault in user space
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in sie64a
lgr %r14,%r9
- slg %r14,BASED(.Lsie_critical_start)
- clg %r14,BASED(.Lsie_critical_length)
+ larl %r13,.Lsie_gmap
+ slgr %r14,%r13
+ lghi %r13,.Lsie_done - .Lsie_gmap
+ clgr %r14,%r13
jhe 1f
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
@@ -680,7 +696,7 @@ ENTRY(pgm_check_handler)
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-6: REENABLE_IRQS
+6: RESTORE_SM_CLEAR_PER
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
@@ -702,7 +718,7 @@ ENTRY(pgm_check_handler)
# PER event in supervisor state, must be kprobes
#
.Lpgm_kprobe:
- REENABLE_IRQS
+ RESTORE_SM_CLEAR_PER
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_per_trap
@@ -713,11 +729,10 @@ ENTRY(pgm_check_handler)
#
.Lpgm_svcper:
mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
- lghi %r13,__TASK_thread
larl %r14,.Lsysc_per
stg %r14,__LC_RETURN_PSW+8
lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
- lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
+ lpswe __LC_RETURN_PSW # branch to .Lsysc_per
ENDPROC(pgm_check_handler)
/*
@@ -729,7 +744,6 @@ ENTRY(io_int_handler)
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT
- larl %r13,cleanup_critical
lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
@@ -749,7 +763,12 @@ ENTRY(io_int_handler)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
+#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+ tmhh %r8,0x300
+ jz 1f
TRACE_IRQS_OFF
+1:
+#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
.Lio_loop:
lgr %r2,%r11 # pass pointer to pt_regs
@@ -767,25 +786,27 @@ ENTRY(io_int_handler)
j .Lio_loop
.Lio_return:
LOCKDEP_SYS_EXIT
- TRACE_IRQS_ON
-.Lio_tif:
TSTMSK __TI_flags(%r12),_TIF_WORK
jnz .Lio_work # there is work to do (signals etc.)
TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lio_work
.Lio_restore:
+#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+ tm __PT_PSW(%r11),3
+ jno 0f
+ TRACE_IRQS_ON
+0:
+#endif
lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lio_exit_kernel
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
-.Lio_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
.Lio_exit_kernel:
- lmg %r11,%r15,__PT_R11(%r11)
- b __LC_RETURN_LPSWE(%r0)
+ lmg %r0,%r15,__PT_R0(%r11)
+ b __LC_RETURN_LPSWE
.Lio_done:
#
@@ -813,9 +834,6 @@ ENTRY(io_int_handler)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
- # TRACE_IRQS_ON already done at .Lio_return, call
- # TRACE_IRQS_OFF to keep things symmetrical
- TRACE_IRQS_OFF
brasl %r14,preempt_schedule_irq
j .Lio_return
#else
@@ -835,9 +853,6 @@ ENTRY(io_int_handler)
#
# One of the work bits is on. Find out which one.
#
-.Lio_work_tif:
- TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
- jo .Lio_mcck_pending
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jo .Lio_reschedule
#ifdef CONFIG_LIVEPATCH
@@ -854,15 +869,6 @@ ENTRY(io_int_handler)
jo .Lio_vxrs
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jnz .Lio_asce
- j .Lio_return # beware of critical section cleanup
-
-#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lio_mcck_pending:
- # TRACE_IRQS_ON already done at .Lio_return
- brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
- TRACE_IRQS_OFF
j .Lio_return
#
@@ -895,23 +901,19 @@ ENTRY(io_int_handler)
# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
#
.Lio_guarded_storage:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
+ ENABLE_INTS_TRACE
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,gs_load_bc_cb
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
+ DISABLE_INTS_TRACE
j .Lio_return
#
# _TIF_NEED_RESCHED is set, call schedule
#
.Lio_reschedule:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
+ ENABLE_INTS_TRACE
brasl %r14,schedule # call scheduler
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
+ DISABLE_INTS_TRACE
j .Lio_return
#
@@ -928,24 +930,20 @@ ENTRY(io_int_handler)
# _TIF_SIGPENDING or is set, call do_signal
#
.Lio_sigpending:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
+ ENABLE_INTS_TRACE
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
+ DISABLE_INTS_TRACE
j .Lio_return
#
# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
#
.Lio_notify_resume:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
+ ENABLE_INTS_TRACE
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_notify_resume
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
+ DISABLE_INTS_TRACE
j .Lio_return
ENDPROC(io_int_handler)
@@ -958,7 +956,6 @@ ENTRY(ext_int_handler)
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT
- larl %r13,cleanup_critical
lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
@@ -981,7 +978,12 @@ ENTRY(ext_int_handler)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
+#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+ tmhh %r8,0x300
+ jz 1f
TRACE_IRQS_OFF
+1:
+#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,EXT_INTERRUPT
@@ -990,11 +992,11 @@ ENTRY(ext_int_handler)
ENDPROC(ext_int_handler)
/*
- * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
+ * Load idle PSW.
*/
ENTRY(psw_idle)
stg %r3,__SF_EMPTY(%r15)
- larl %r1,.Lpsw_idle_lpsw+4
+ larl %r1,.Lpsw_idle_exit
stg %r1,__SF_EMPTY+8(%r15)
larl %r1,smp_cpu_mtid
llgf %r1,0(%r1)
@@ -1006,10 +1008,9 @@ ENTRY(psw_idle)
BPON
STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
-.Lpsw_idle_lpsw:
lpswe __SF_EMPTY(%r15)
+.Lpsw_idle_exit:
BR_EX %r14
-.Lpsw_idle_end:
ENDPROC(psw_idle)
/*
@@ -1020,6 +1021,7 @@ ENDPROC(psw_idle)
* of the register contents at return from io or a system call.
*/
ENTRY(save_fpu_regs)
+ stnsm __SF_EMPTY(%r15),0xfc
lg %r2,__LC_CURRENT
aghi %r2,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
@@ -1051,6 +1053,7 @@ ENTRY(save_fpu_regs)
.Lsave_fpu_regs_done:
oi __LC_CPU_FLAGS+7,_CIF_FPU
.Lsave_fpu_regs_exit:
+ ssm __SF_EMPTY(%r15)
BR_EX %r14
.Lsave_fpu_regs_end:
ENDPROC(save_fpu_regs)
@@ -1102,8 +1105,6 @@ load_fpu_regs:
.Lload_fpu_regs_end:
ENDPROC(load_fpu_regs)
-.L__critical_end:
-
/*
* Machine check handler routines
*/
@@ -1116,7 +1117,6 @@ ENTRY(mcck_int_handler)
lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
lg %r12,__LC_CURRENT
- larl %r13,cleanup_critical
lmg %r8,%r9,__LC_MCK_OLD_PSW
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
jo .Lmcck_panic # yes -> rest of mcck code invalid
@@ -1202,15 +1202,13 @@ ENTRY(mcck_int_handler)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,s390_do_machine_check
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno .Lmcck_return
+ cghi %r2,0
+ je .Lmcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
- TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
- jno .Lmcck_return
TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck
TRACE_IRQS_ON
@@ -1280,265 +1278,23 @@ ENTRY(stack_overflow)
ENDPROC(stack_overflow)
#endif
-ENTRY(cleanup_critical)
- cghi %r9,__LC_RETURN_LPSWE
- je .Lcleanup_lpswe
-#if IS_ENABLED(CONFIG_KVM)
- clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
- jl 0f
- clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
- jl .Lcleanup_sie
-#endif
- clg %r9,BASED(.Lcleanup_table) # system_call
- jl 0f
- clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
- jl .Lcleanup_system_call
- clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
- jl 0f
- clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
- jl .Lcleanup_sysc_tif
- clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
- jl .Lcleanup_sysc_restore
- clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
- jl 0f
- clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
- jl .Lcleanup_io_tif
- clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
- jl .Lcleanup_io_restore
- clg %r9,BASED(.Lcleanup_table+64) # psw_idle
- jl 0f
- clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
- jl .Lcleanup_idle
- clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs
- jl 0f
- clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end
- jl .Lcleanup_save_fpu_regs
- clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs
- jl 0f
- clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
- jl .Lcleanup_load_fpu_regs
-0: BR_EX %r14,%r11
-ENDPROC(cleanup_critical)
-
- .align 8
-.Lcleanup_table:
- .quad system_call
- .quad .Lsysc_do_svc
- .quad .Lsysc_tif
- .quad .Lsysc_restore
- .quad .Lsysc_done
- .quad .Lio_tif
- .quad .Lio_restore
- .quad .Lio_done
- .quad psw_idle
- .quad .Lpsw_idle_end
- .quad save_fpu_regs
- .quad .Lsave_fpu_regs_end
- .quad load_fpu_regs
- .quad .Lload_fpu_regs_end
-
#if IS_ENABLED(CONFIG_KVM)
-.Lcleanup_table_sie:
- .quad .Lsie_gmap
- .quad .Lsie_done
-
.Lcleanup_sie:
- cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
- je 1f
- slg %r9,BASED(.Lsie_crit_mcck_start)
- clg %r9,BASED(.Lsie_crit_mcck_length)
- jh 1f
- oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
-1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
+ je 1f
+ larl %r13,.Lsie_entry
+ slgr %r9,%r13
+ larl %r13,.Lsie_skip
+ clgr %r9,%r13
+ jh 1f
+ oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
+1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
BR_EX %r14,%r11
-#endif
-.Lcleanup_system_call:
- # check if stpt has been executed
- clg %r9,BASED(.Lcleanup_system_call_insn)
- jh 0f
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: # check if stmg has been executed
- clg %r9,BASED(.Lcleanup_system_call_insn+8)
- jh 0f
- mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
-0: # check if base register setup + TIF bit load has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+16)
- jhe 0f
- # set up saved register r12 task struct pointer
- stg %r12,32(%r11)
- # set up saved register r13 __TASK_thread offset
- mvc 40(8,%r11),BASED(.Lcleanup_system_call_const)
-0: # check if the user time update has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+24)
- jh 0f
- lg %r15,__LC_EXIT_TIMER
- slg %r15,__LC_SYNC_ENTER_TIMER
- alg %r15,__LC_USER_TIMER
- stg %r15,__LC_USER_TIMER
-0: # check if the system time update has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+32)
- jh 0f
- lg %r15,__LC_LAST_UPDATE_TIMER
- slg %r15,__LC_EXIT_TIMER
- alg %r15,__LC_SYSTEM_TIMER
- stg %r15,__LC_SYSTEM_TIMER
-0: # update accounting time stamp
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
- # set up saved register r11
- lg %r15,__LC_KERNEL_STACK
- la %r9,STACK_FRAME_OVERHEAD(%r15)
- stg %r9,24(%r11) # r11 pt_regs pointer
- # fill pt_regs
- mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
- stmg %r0,%r7,__PT_R0(%r9)
- mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
- xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
- mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
- # setup saved register r15
- stg %r15,56(%r11) # r15 stack pointer
- # set new psw address and exit
- larl %r9,.Lsysc_do_svc
- BR_EX %r14,%r11
-.Lcleanup_system_call_insn:
- .quad system_call
- .quad .Lsysc_stmg
- .quad .Lsysc_per
- .quad .Lsysc_vtime+36
- .quad .Lsysc_vtime+42
-.Lcleanup_system_call_const:
- .quad __TASK_thread
-
-.Lcleanup_sysc_tif:
- larl %r9,.Lsysc_tif
- BR_EX %r14,%r11
-
-.Lcleanup_sysc_restore:
- # check if stpt has been executed
- clg %r9,BASED(.Lcleanup_sysc_restore_insn)
- jh 0f
- mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
- je 1f
- lg %r9,24(%r11) # get saved pointer to pt_regs
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
- mvc 0(64,%r11),__PT_R8(%r9)
- lmg %r0,%r7,__PT_R0(%r9)
-.Lcleanup_lpswe:
-1: lmg %r8,%r9,__LC_RETURN_PSW
- BR_EX %r14,%r11
-.Lcleanup_sysc_restore_insn:
- .quad .Lsysc_exit_timer
- .quad .Lsysc_done - 4
-
-.Lcleanup_io_tif:
- larl %r9,.Lio_tif
- BR_EX %r14,%r11
-
-.Lcleanup_io_restore:
- # check if stpt has been executed
- clg %r9,BASED(.Lcleanup_io_restore_insn)
- jh 0f
- mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
- je 1f
- lg %r9,24(%r11) # get saved r11 pointer to pt_regs
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
- mvc 0(64,%r11),__PT_R8(%r9)
- lmg %r0,%r7,__PT_R0(%r9)
-1: lmg %r8,%r9,__LC_RETURN_PSW
- BR_EX %r14,%r11
-.Lcleanup_io_restore_insn:
- .quad .Lio_exit_timer
- .quad .Lio_done - 4
-
-.Lcleanup_idle:
- ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
- # copy interrupt clock & cpu timer
- mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
- mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
- mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
-0: # check if stck & stpt have been executed
- clg %r9,BASED(.Lcleanup_idle_insn)
- jhe 1f
- mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
- mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1: # calculate idle cycles
- clg %r9,BASED(.Lcleanup_idle_insn)
- jl 3f
- larl %r1,smp_cpu_mtid
- llgf %r1,0(%r1)
- ltgr %r1,%r1
- jz 3f
- .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
- larl %r3,mt_cycles
- ag %r3,__LC_PERCPU_OFFSET
- la %r4,__SF_EMPTY+16(%r15)
-2: lg %r0,0(%r3)
- slg %r0,0(%r4)
- alg %r0,64(%r4)
- stg %r0,0(%r3)
- la %r3,8(%r3)
- la %r4,8(%r4)
- brct %r1,2b
-3: # account system time going idle
- lg %r9,__LC_STEAL_TIMER
- alg %r9,__CLOCK_IDLE_ENTER(%r2)
- slg %r9,__LC_LAST_UPDATE_CLOCK
- stg %r9,__LC_STEAL_TIMER
- mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
- lg %r9,__LC_SYSTEM_TIMER
- alg %r9,__LC_LAST_UPDATE_TIMER
- slg %r9,__TIMER_IDLE_ENTER(%r2)
- stg %r9,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
- # prepare return psw
- nihh %r8,0xfcfd # clear irq & wait state bits
- lg %r9,48(%r11) # return from psw_idle
- BR_EX %r14,%r11
-.Lcleanup_idle_insn:
- .quad .Lpsw_idle_lpsw
-
-.Lcleanup_save_fpu_regs:
- larl %r9,save_fpu_regs
- BR_EX %r14,%r11
-
-.Lcleanup_load_fpu_regs:
- larl %r9,load_fpu_regs
- BR_EX %r14,%r11
-
-/*
- * Integer constants
- */
- .align 8
-.Lcritical_start:
- .quad .L__critical_start
-.Lcritical_length:
- .quad .L__critical_end - .L__critical_start
-#if IS_ENABLED(CONFIG_KVM)
-.Lsie_critical_start:
- .quad .Lsie_gmap
-.Lsie_critical_length:
- .quad .Lsie_done - .Lsie_gmap
-.Lsie_crit_mcck_start:
- .quad .Lsie_entry
-.Lsie_crit_mcck_length:
- .quad .Lsie_skip - .Lsie_entry
#endif
.section .rodata, "a"
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 4cd9b1ada834..44e01dd1e624 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -72,22 +72,6 @@ static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
#endif
}
-static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_KPROBES
- insn->opc = BREAKPOINT_INSTRUCTION;
- insn->disp = KPROBE_ON_FTRACE_NOP;
-#endif
-}
-
-static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_KPROBES
- insn->opc = BREAKPOINT_INSTRUCTION;
- insn->disp = KPROBE_ON_FTRACE_CALL;
-#endif
-}
-
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index 8f8456816d83..0d7fbdfe995a 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -24,19 +24,19 @@ void enabled_wait(void)
{
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
unsigned long long idle_time;
- unsigned long psw_mask;
+ unsigned long psw_mask, flags;
- trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
+ local_irq_save(flags);
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
+ local_irq_restore(flags);
- trace_hardirqs_off();
/* Account time spent with enabled wait psw loaded as idle time. */
write_seqcount_begin(&idle->seqcount);
@@ -118,22 +118,16 @@ u64 arch_cpu_idle_time(int cpu)
void arch_cpu_idle_enter(void)
{
- local_mcck_disable();
}
void arch_cpu_idle(void)
{
- if (!test_cpu_flag(CIF_MCCK_PENDING))
- /* Halt the cpu and keep track of cpu time accounting. */
- enabled_wait();
+ enabled_wait();
local_irq_enable();
}
void arch_cpu_idle_exit(void)
{
- local_mcck_enable();
- if (test_cpu_flag(CIF_MCCK_PENDING))
- s390_handle_mcck();
}
void arch_cpu_idle_dead(void)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 4a71061974fd..ccea9a245867 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -39,6 +39,7 @@
#define IPL_CCW_STR "ccw"
#define IPL_FCP_STR "fcp"
#define IPL_FCP_DUMP_STR "fcp_dump"
+#define IPL_NVME_STR "nvme"
#define IPL_NSS_STR "nss"
#define DUMP_CCW_STR "ccw"
@@ -93,6 +94,8 @@ static char *ipl_type_str(enum ipl_type type)
return IPL_FCP_DUMP_STR;
case IPL_TYPE_NSS:
return IPL_NSS_STR;
+ case IPL_TYPE_NVME:
+ return IPL_NVME_STR;
case IPL_TYPE_UNKNOWN:
default:
return IPL_UNKNOWN_STR;
@@ -133,6 +136,7 @@ static int reipl_capabilities = IPL_TYPE_UNKNOWN;
static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
static struct ipl_parameter_block *reipl_block_fcp;
+static struct ipl_parameter_block *reipl_block_nvme;
static struct ipl_parameter_block *reipl_block_ccw;
static struct ipl_parameter_block *reipl_block_nss;
static struct ipl_parameter_block *reipl_block_actual;
@@ -261,6 +265,8 @@ static __init enum ipl_type get_ipl_type(void)
return IPL_TYPE_FCP_DUMP;
else
return IPL_TYPE_FCP;
+ case IPL_PBT_NVME:
+ return IPL_TYPE_NVME;
}
return IPL_TYPE_UNKNOWN;
}
@@ -317,6 +323,8 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno);
+ case IPL_TYPE_NVME:
+ return sprintf(page, "%08ux\n", ipl_block.nvme.fid);
default:
return 0;
}
@@ -345,15 +353,35 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
+
+static ssize_t ipl_nvme_scp_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ unsigned int size = ipl_block.nvme.scp_data_len;
+ void *scp_data = &ipl_block.nvme.scp_data;
+
+ return memory_read_from_buffer(buf, count, &off, scp_data, size);
+}
+
static struct bin_attribute ipl_scp_data_attr =
__BIN_ATTR(scp_data, S_IRUGO, ipl_scp_data_read, NULL, PAGE_SIZE);
+static struct bin_attribute ipl_nvme_scp_data_attr =
+ __BIN_ATTR(scp_data, S_IRUGO, ipl_nvme_scp_data_read, NULL, PAGE_SIZE);
+
static struct bin_attribute *ipl_fcp_bin_attrs[] = {
&ipl_parameter_attr,
&ipl_scp_data_attr,
NULL,
};
+static struct bin_attribute *ipl_nvme_bin_attrs[] = {
+ &ipl_parameter_attr,
+ &ipl_nvme_scp_data_attr,
+ NULL,
+};
+
/* FCP ipl device attributes */
DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n",
@@ -365,6 +393,16 @@ DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n",
DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n",
(unsigned long long)ipl_block.fcp.br_lba);
+/* NVMe ipl device attributes */
+DEFINE_IPL_ATTR_RO(ipl_nvme, fid, "0x%08llx\n",
+ (unsigned long long)ipl_block.nvme.fid);
+DEFINE_IPL_ATTR_RO(ipl_nvme, nsid, "0x%08llx\n",
+ (unsigned long long)ipl_block.nvme.nsid);
+DEFINE_IPL_ATTR_RO(ipl_nvme, bootprog, "%lld\n",
+ (unsigned long long)ipl_block.nvme.bootprog);
+DEFINE_IPL_ATTR_RO(ipl_nvme, br_lba, "%lld\n",
+ (unsigned long long)ipl_block.nvme.br_lba);
+
static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
@@ -399,6 +437,24 @@ static struct attribute_group ipl_fcp_attr_group = {
.bin_attrs = ipl_fcp_bin_attrs,
};
+static struct attribute *ipl_nvme_attrs[] = {
+ &sys_ipl_type_attr.attr,
+ &sys_ipl_nvme_fid_attr.attr,
+ &sys_ipl_nvme_nsid_attr.attr,
+ &sys_ipl_nvme_bootprog_attr.attr,
+ &sys_ipl_nvme_br_lba_attr.attr,
+ &sys_ipl_ccw_loadparm_attr.attr,
+ &sys_ipl_secure_attr.attr,
+ &sys_ipl_has_secure_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ipl_nvme_attr_group = {
+ .attrs = ipl_nvme_attrs,
+ .bin_attrs = ipl_nvme_bin_attrs,
+};
+
+
/* CCW ipl device attributes */
static struct attribute *ipl_ccw_attrs_vm[] = {
@@ -474,6 +530,9 @@ static int __init ipl_init(void)
case IPL_TYPE_FCP_DUMP:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
break;
+ case IPL_TYPE_NVME:
+ rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nvme_attr_group);
+ break;
default:
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_unknown_attr_group);
@@ -727,6 +786,93 @@ static struct attribute_group reipl_fcp_attr_group = {
static struct kobj_attribute sys_reipl_fcp_clear_attr =
__ATTR(clear, 0644, reipl_fcp_clear_show, reipl_fcp_clear_store);
+/* NVME reipl device attributes */
+
+static ssize_t reipl_nvme_scpdata_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t size = reipl_block_nvme->nvme.scp_data_len;
+ void *scp_data = reipl_block_nvme->nvme.scp_data;
+
+ return memory_read_from_buffer(buf, count, &off, scp_data, size);
+}
+
+static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t scpdata_len = count;
+ size_t padding;
+
+ if (off)
+ return -EINVAL;
+
+ memcpy(reipl_block_nvme->nvme.scp_data, buf, count);
+ if (scpdata_len % 8) {
+ padding = 8 - (scpdata_len % 8);
+ memset(reipl_block_nvme->nvme.scp_data + scpdata_len,
+ 0, padding);
+ scpdata_len += padding;
+ }
+
+ reipl_block_nvme->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
+ reipl_block_nvme->nvme.len = IPL_BP0_FCP_LEN + scpdata_len;
+ reipl_block_nvme->nvme.scp_data_len = scpdata_len;
+
+ return count;
+}
+
+static struct bin_attribute sys_reipl_nvme_scp_data_attr =
+ __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_nvme_scpdata_read,
+ reipl_nvme_scpdata_write, DIAG308_SCPDATA_SIZE);
+
+static struct bin_attribute *reipl_nvme_bin_attrs[] = {
+ &sys_reipl_nvme_scp_data_attr,
+ NULL,
+};
+
+DEFINE_IPL_ATTR_RW(reipl_nvme, fid, "0x%08llx\n", "%llx\n",
+ reipl_block_nvme->nvme.fid);
+DEFINE_IPL_ATTR_RW(reipl_nvme, nsid, "0x%08llx\n", "%llx\n",
+ reipl_block_nvme->nvme.nsid);
+DEFINE_IPL_ATTR_RW(reipl_nvme, bootprog, "%lld\n", "%lld\n",
+ reipl_block_nvme->nvme.bootprog);
+DEFINE_IPL_ATTR_RW(reipl_nvme, br_lba, "%lld\n", "%lld\n",
+ reipl_block_nvme->nvme.br_lba);
+
+/* nvme wrapper */
+static ssize_t reipl_nvme_loadparm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return reipl_generic_loadparm_show(reipl_block_nvme, page);
+}
+
+static ssize_t reipl_nvme_loadparm_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return reipl_generic_loadparm_store(reipl_block_nvme, buf, len);
+}
+
+static struct kobj_attribute sys_reipl_nvme_loadparm_attr =
+ __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_nvme_loadparm_show,
+ reipl_nvme_loadparm_store);
+
+static struct attribute *reipl_nvme_attrs[] = {
+ &sys_reipl_nvme_fid_attr.attr,
+ &sys_reipl_nvme_nsid_attr.attr,
+ &sys_reipl_nvme_bootprog_attr.attr,
+ &sys_reipl_nvme_br_lba_attr.attr,
+ &sys_reipl_nvme_loadparm_attr.attr,
+ NULL,
+};
+
+static struct attribute_group reipl_nvme_attr_group = {
+ .attrs = reipl_nvme_attrs,
+ .bin_attrs = reipl_nvme_bin_attrs
+};
+
/* CCW reipl device attributes */
DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw);
@@ -891,6 +1037,9 @@ static int reipl_set_type(enum ipl_type type)
case IPL_TYPE_FCP:
reipl_block_actual = reipl_block_fcp;
break;
+ case IPL_TYPE_NVME:
+ reipl_block_actual = reipl_block_nvme;
+ break;
case IPL_TYPE_NSS:
reipl_block_actual = reipl_block_nss;
break;
@@ -917,6 +1066,8 @@ static ssize_t reipl_type_store(struct kobject *kobj,
rc = reipl_set_type(IPL_TYPE_CCW);
else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_FCP);
+ else if (strncmp(buf, IPL_NVME_STR, strlen(IPL_NVME_STR)) == 0)
+ rc = reipl_set_type(IPL_TYPE_NVME);
else if (strncmp(buf, IPL_NSS_STR, strlen(IPL_NSS_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_NSS);
return (rc != 0) ? rc : len;
@@ -927,6 +1078,7 @@ static struct kobj_attribute reipl_type_attr =
static struct kset *reipl_kset;
static struct kset *reipl_fcp_kset;
+static struct kset *reipl_nvme_kset;
static void __reipl_run(void *unused)
{
@@ -945,6 +1097,10 @@ static void __reipl_run(void *unused)
else
diag308(DIAG308_LOAD_NORMAL, NULL);
break;
+ case IPL_TYPE_NVME:
+ diag308(DIAG308_SET, reipl_block_nvme);
+ diag308(DIAG308_LOAD_CLEAR, NULL);
+ break;
case IPL_TYPE_NSS:
diag308(DIAG308_SET, reipl_block_nss);
diag308(DIAG308_LOAD_CLEAR, NULL);
@@ -1093,6 +1249,49 @@ out1:
return rc;
}
+static int __init reipl_nvme_init(void)
+{
+ int rc;
+
+ reipl_block_nvme = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!reipl_block_nvme)
+ return -ENOMEM;
+
+ /* sysfs: create kset for mixing attr group and bin attrs */
+ reipl_nvme_kset = kset_create_and_add(IPL_NVME_STR, NULL,
+ &reipl_kset->kobj);
+ if (!reipl_nvme_kset) {
+ free_page((unsigned long) reipl_block_nvme);
+ return -ENOMEM;
+ }
+
+ rc = sysfs_create_group(&reipl_nvme_kset->kobj, &reipl_nvme_attr_group);
+ if (rc) {
+ kset_unregister(reipl_nvme_kset);
+ free_page((unsigned long) reipl_block_nvme);
+ return rc;
+ }
+
+ if (ipl_info.type == IPL_TYPE_NVME) {
+ memcpy(reipl_block_nvme, &ipl_block, sizeof(ipl_block));
+ /*
+ * Fix loadparm: There are systems where the (SCSI) LOADPARM
+ * is invalid in the IPL parameter block, so take it
+ * always from sclp_ipl_info.
+ */
+ memcpy(reipl_block_nvme->nvme.loadparm, sclp_ipl_info.loadparm,
+ LOADPARM_LEN);
+ } else {
+ reipl_block_nvme->hdr.len = IPL_BP_NVME_LEN;
+ reipl_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
+ reipl_block_nvme->nvme.len = IPL_BP0_NVME_LEN;
+ reipl_block_nvme->nvme.pbt = IPL_PBT_NVME;
+ reipl_block_nvme->nvme.opt = IPL_PB0_NVME_OPT_IPL;
+ }
+ reipl_capabilities |= IPL_TYPE_NVME;
+ return 0;
+}
+
static int __init reipl_type_init(void)
{
enum ipl_type reipl_type = ipl_info.type;
@@ -1108,6 +1307,9 @@ static int __init reipl_type_init(void)
if (reipl_block->pb0_hdr.pbt == IPL_PBT_FCP) {
memcpy(reipl_block_fcp, reipl_block, size);
reipl_type = IPL_TYPE_FCP;
+ } else if (reipl_block->pb0_hdr.pbt == IPL_PBT_NVME) {
+ memcpy(reipl_block_nvme, reipl_block, size);
+ reipl_type = IPL_TYPE_NVME;
} else if (reipl_block->pb0_hdr.pbt == IPL_PBT_CCW) {
memcpy(reipl_block_ccw, reipl_block, size);
reipl_type = IPL_TYPE_CCW;
@@ -1134,6 +1336,9 @@ static int __init reipl_init(void)
rc = reipl_fcp_init();
if (rc)
return rc;
+ rc = reipl_nvme_init();
+ if (rc)
+ return rc;
rc = reipl_nss_init();
if (rc)
return rc;
@@ -1750,6 +1955,10 @@ void __init setup_ipl(void)
ipl_info.data.fcp.wwpn = ipl_block.fcp.wwpn;
ipl_info.data.fcp.lun = ipl_block.fcp.lun;
break;
+ case IPL_TYPE_NVME:
+ ipl_info.data.nvme.fid = ipl_block.nvme.fid;
+ ipl_info.data.nvme.nsid = ipl_block.nvme.nsid;
+ break;
case IPL_TYPE_NSS:
case IPL_TYPE_UNKNOWN:
/* We have no info to copy */
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 3a854cb5a4c6..93c6b8932fbd 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -16,7 +16,6 @@
#include <linux/debug_locks.h>
#include <asm/cio.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/smp.h>
#include <asm/ipl.h>
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index ba8f19bb438b..4055f1c49814 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -19,6 +19,7 @@
#include <linux/kasan.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
+#include <linux/memory.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
#include <asm/facility.h>
@@ -174,10 +175,12 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
}
static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
- int sign, int bits, int shift)
+ int sign, int bits, int shift,
+ void *(*write)(void *dest, const void *src, size_t len))
{
unsigned long umax;
long min, max;
+ void *dest = (void *)loc;
if (val & ((1UL << shift) - 1))
return -ENOEXEC;
@@ -194,26 +197,33 @@ static int apply_rela_bits(Elf_Addr loc, Elf_Addr val,
return -ENOEXEC;
}
- if (bits == 8)
- *(unsigned char *) loc = val;
- else if (bits == 12)
- *(unsigned short *) loc = (val & 0xfff) |
+ if (bits == 8) {
+ unsigned char tmp = val;
+ write(dest, &tmp, 1);
+ } else if (bits == 12) {
+ unsigned short tmp = (val & 0xfff) |
(*(unsigned short *) loc & 0xf000);
- else if (bits == 16)
- *(unsigned short *) loc = val;
- else if (bits == 20)
- *(unsigned int *) loc = (val & 0xfff) << 16 |
- (val & 0xff000) >> 4 |
- (*(unsigned int *) loc & 0xf00000ff);
- else if (bits == 32)
- *(unsigned int *) loc = val;
- else if (bits == 64)
- *(unsigned long *) loc = val;
+ write(dest, &tmp, 2);
+ } else if (bits == 16) {
+ unsigned short tmp = val;
+ write(dest, &tmp, 2);
+ } else if (bits == 20) {
+ unsigned int tmp = (val & 0xfff) << 16 |
+ (val & 0xff000) >> 4 | (*(unsigned int *) loc & 0xf00000ff);
+ write(dest, &tmp, 4);
+ } else if (bits == 32) {
+ unsigned int tmp = val;
+ write(dest, &tmp, 4);
+ } else if (bits == 64) {
+ unsigned long tmp = val;
+ write(dest, &tmp, 8);
+ }
return 0;
}
static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
- const char *strtab, struct module *me)
+ const char *strtab, struct module *me,
+ void *(*write)(void *dest, const void *src, size_t len))
{
struct mod_arch_syminfo *info;
Elf_Addr loc, val;
@@ -241,17 +251,17 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_64: /* Direct 64 bit. */
val += rela->r_addend;
if (r_type == R_390_8)
- rc = apply_rela_bits(loc, val, 0, 8, 0);
+ rc = apply_rela_bits(loc, val, 0, 8, 0, write);
else if (r_type == R_390_12)
- rc = apply_rela_bits(loc, val, 0, 12, 0);
+ rc = apply_rela_bits(loc, val, 0, 12, 0, write);
else if (r_type == R_390_16)
- rc = apply_rela_bits(loc, val, 0, 16, 0);
+ rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_20)
- rc = apply_rela_bits(loc, val, 1, 20, 0);
+ rc = apply_rela_bits(loc, val, 1, 20, 0, write);
else if (r_type == R_390_32)
- rc = apply_rela_bits(loc, val, 0, 32, 0);
+ rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_64)
- rc = apply_rela_bits(loc, val, 0, 64, 0);
+ rc = apply_rela_bits(loc, val, 0, 64, 0, write);
break;
case R_390_PC16: /* PC relative 16 bit. */
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
@@ -260,15 +270,15 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_PC64: /* PC relative 64 bit. */
val += rela->r_addend - loc;
if (r_type == R_390_PC16)
- rc = apply_rela_bits(loc, val, 1, 16, 0);
+ rc = apply_rela_bits(loc, val, 1, 16, 0, write);
else if (r_type == R_390_PC16DBL)
- rc = apply_rela_bits(loc, val, 1, 16, 1);
+ rc = apply_rela_bits(loc, val, 1, 16, 1, write);
else if (r_type == R_390_PC32DBL)
- rc = apply_rela_bits(loc, val, 1, 32, 1);
+ rc = apply_rela_bits(loc, val, 1, 32, 1, write);
else if (r_type == R_390_PC32)
- rc = apply_rela_bits(loc, val, 1, 32, 0);
+ rc = apply_rela_bits(loc, val, 1, 32, 0, write);
else if (r_type == R_390_PC64)
- rc = apply_rela_bits(loc, val, 1, 64, 0);
+ rc = apply_rela_bits(loc, val, 1, 64, 0, write);
break;
case R_390_GOT12: /* 12 bit GOT offset. */
case R_390_GOT16: /* 16 bit GOT offset. */
@@ -283,33 +293,33 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
if (info->got_initialized == 0) {
- Elf_Addr *gotent;
+ Elf_Addr *gotent = me->core_layout.base +
+ me->arch.got_offset +
+ info->got_offset;
- gotent = me->core_layout.base + me->arch.got_offset +
- info->got_offset;
- *gotent = val;
+ write(gotent, &val, sizeof(*gotent));
info->got_initialized = 1;
}
val = info->got_offset + rela->r_addend;
if (r_type == R_390_GOT12 ||
r_type == R_390_GOTPLT12)
- rc = apply_rela_bits(loc, val, 0, 12, 0);
+ rc = apply_rela_bits(loc, val, 0, 12, 0, write);
else if (r_type == R_390_GOT16 ||
r_type == R_390_GOTPLT16)
- rc = apply_rela_bits(loc, val, 0, 16, 0);
+ rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_GOT20 ||
r_type == R_390_GOTPLT20)
- rc = apply_rela_bits(loc, val, 1, 20, 0);
+ rc = apply_rela_bits(loc, val, 1, 20, 0, write);
else if (r_type == R_390_GOT32 ||
r_type == R_390_GOTPLT32)
- rc = apply_rela_bits(loc, val, 0, 32, 0);
+ rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_GOT64 ||
r_type == R_390_GOTPLT64)
- rc = apply_rela_bits(loc, val, 0, 64, 0);
+ rc = apply_rela_bits(loc, val, 0, 64, 0, write);
else if (r_type == R_390_GOTENT ||
r_type == R_390_GOTPLTENT) {
val += (Elf_Addr) me->core_layout.base - loc;
- rc = apply_rela_bits(loc, val, 1, 32, 1);
+ rc = apply_rela_bits(loc, val, 1, 32, 1, write);
}
break;
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
@@ -320,25 +330,29 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) {
- unsigned int *ip;
- ip = me->core_layout.base + me->arch.plt_offset +
- info->plt_offset;
- ip[0] = 0x0d10e310; /* basr 1,0 */
- ip[1] = 0x100a0004; /* lg 1,10(1) */
+ unsigned int insn[5];
+ unsigned int *ip = me->core_layout.base +
+ me->arch.plt_offset +
+ info->plt_offset;
+
+ insn[0] = 0x0d10e310; /* basr 1,0 */
+ insn[1] = 0x100a0004; /* lg 1,10(1) */
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
unsigned int *ij;
ij = me->core_layout.base +
me->arch.plt_offset +
me->arch.plt_size - PLT_ENTRY_SIZE;
- ip[2] = 0xa7f40000 + /* j __jump_r1 */
+ insn[2] = 0xa7f40000 + /* j __jump_r1 */
(unsigned int)(u16)
(((unsigned long) ij - 8 -
(unsigned long) ip) / 2);
} else {
- ip[2] = 0x07f10000; /* br %r1 */
+ insn[2] = 0x07f10000; /* br %r1 */
}
- ip[3] = (unsigned int) (val >> 32);
- ip[4] = (unsigned int) val;
+ insn[3] = (unsigned int) (val >> 32);
+ insn[4] = (unsigned int) val;
+
+ write(ip, insn, sizeof(insn));
info->plt_initialized = 1;
}
if (r_type == R_390_PLTOFF16 ||
@@ -357,17 +371,17 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
val += rela->r_addend - loc;
}
if (r_type == R_390_PLT16DBL)
- rc = apply_rela_bits(loc, val, 1, 16, 1);
+ rc = apply_rela_bits(loc, val, 1, 16, 1, write);
else if (r_type == R_390_PLTOFF16)
- rc = apply_rela_bits(loc, val, 0, 16, 0);
+ rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_PLT32DBL)
- rc = apply_rela_bits(loc, val, 1, 32, 1);
+ rc = apply_rela_bits(loc, val, 1, 32, 1, write);
else if (r_type == R_390_PLT32 ||
r_type == R_390_PLTOFF32)
- rc = apply_rela_bits(loc, val, 0, 32, 0);
+ rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_PLT64 ||
r_type == R_390_PLTOFF64)
- rc = apply_rela_bits(loc, val, 0, 64, 0);
+ rc = apply_rela_bits(loc, val, 0, 64, 0, write);
break;
case R_390_GOTOFF16: /* 16 bit offset to GOT. */
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
@@ -375,20 +389,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
val = val + rela->r_addend -
((Elf_Addr) me->core_layout.base + me->arch.got_offset);
if (r_type == R_390_GOTOFF16)
- rc = apply_rela_bits(loc, val, 0, 16, 0);
+ rc = apply_rela_bits(loc, val, 0, 16, 0, write);
else if (r_type == R_390_GOTOFF32)
- rc = apply_rela_bits(loc, val, 0, 32, 0);
+ rc = apply_rela_bits(loc, val, 0, 32, 0, write);
else if (r_type == R_390_GOTOFF64)
- rc = apply_rela_bits(loc, val, 0, 64, 0);
+ rc = apply_rela_bits(loc, val, 0, 64, 0, write);
break;
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
val = (Elf_Addr) me->core_layout.base + me->arch.got_offset +
rela->r_addend - loc;
if (r_type == R_390_GOTPC)
- rc = apply_rela_bits(loc, val, 1, 32, 0);
+ rc = apply_rela_bits(loc, val, 1, 32, 0, write);
else if (r_type == R_390_GOTPCDBL)
- rc = apply_rela_bits(loc, val, 1, 32, 1);
+ rc = apply_rela_bits(loc, val, 1, 32, 1, write);
break;
case R_390_COPY:
case R_390_GLOB_DAT: /* Create GOT entry. */
@@ -412,9 +426,10 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
return 0;
}
-int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+static int __apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
- struct module *me)
+ struct module *me,
+ void *(*write)(void *dest, const void *src, size_t len))
{
Elf_Addr base;
Elf_Sym *symtab;
@@ -430,13 +445,27 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
for (i = 0; i < n; i++, rela++) {
- rc = apply_rela(rela, base, symtab, strtab, me);
+ rc = apply_rela(rela, base, symtab, strtab, me, write);
if (rc)
return rc;
}
return 0;
}
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ bool early = me->state == MODULE_STATE_UNFORMED;
+ void *(*write)(void *, const void *, size_t) = memcpy;
+
+ if (!early)
+ write = s390_kernel_write;
+
+ return __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
+ write);
+}
+
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 0a487fae763e..86c8d5370e7f 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -148,7 +148,6 @@ void s390_handle_mcck(void)
local_mcck_disable();
mcck = *this_cpu_ptr(&cpu_mcck);
memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
- clear_cpu_flag(CIF_MCCK_PENDING);
local_mcck_enable();
local_irq_restore(flags);
@@ -333,7 +332,7 @@ NOKPROBE_SYMBOL(s390_backup_mcck_info);
/*
* machine check handler.
*/
-void notrace s390_do_machine_check(struct pt_regs *regs)
+int notrace s390_do_machine_check(struct pt_regs *regs)
{
static int ipd_count;
static DEFINE_SPINLOCK(ipd_lock);
@@ -342,6 +341,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
unsigned long long tmp;
union mci mci;
unsigned long mcck_dam_code;
+ int mcck_pending = 0;
nmi_enter();
inc_irq_stat(NMI_NMI);
@@ -400,7 +400,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
*/
mcck->kill_task = 1;
mcck->mcck_code = mci.val;
- set_cpu_flag(CIF_MCCK_PENDING);
+ mcck_pending = 1;
}
/*
@@ -420,8 +420,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
mcck->stp_queue |= stp_sync_check();
if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
mcck->stp_queue |= stp_island_check();
- if (mcck->stp_queue)
- set_cpu_flag(CIF_MCCK_PENDING);
+ mcck_pending = 1;
}
/*
@@ -442,12 +441,12 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
if (mci.cp) {
/* Channel report word pending */
mcck->channel_report = 1;
- set_cpu_flag(CIF_MCCK_PENDING);
+ mcck_pending = 1;
}
if (mci.w) {
/* Warning pending */
mcck->warning = 1;
- set_cpu_flag(CIF_MCCK_PENDING);
+ mcck_pending = 1;
}
/*
@@ -462,7 +461,17 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
}
clear_cpu_flag(CIF_MCCK_GUEST);
+
+ if (user_mode(regs) && mcck_pending) {
+ nmi_exit();
+ return 1;
+ }
+
+ if (mcck_pending)
+ schedule_mcck_handler();
+
nmi_exit();
+ return 0;
}
NOKPROBE_SYMBOL(s390_do_machine_check);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 58faa12542a1..ce60a459a143 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -25,7 +25,6 @@
#include <linux/compat.h>
#include <trace/syscall.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -1416,7 +1415,7 @@ static const struct user_regset s390_regsets[] = {
};
static const struct user_regset_view user_s390_view = {
- .name = UTS_MACHINE,
+ .name = "s390x",
.e_machine = EM_S390,
.regsets = s390_regsets,
.n = ARRAY_SIZE(s390_regsets)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 0f0b140b5558..5853c9872dfe 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -381,8 +381,7 @@ static void __init setup_lowcore_dat_off(void)
lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->external_new_psw.addr = (unsigned long) ext_int_handler;
- lc->svc_new_psw.mask = PSW_KERNEL_BITS |
- PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
+ lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->svc_new_psw.addr = (unsigned long) system_call;
lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
@@ -594,9 +593,10 @@ static void __init setup_memory_end(void)
#ifdef CONFIG_CRASH_DUMP
/*
- * When kdump is enabled, we have to ensure that no memory from
- * the area [0 - crashkernel memory size] and
- * [crashk_res.start - crashk_res.end] is set offline.
+ * When kdump is enabled, we have to ensure that no memory from the area
+ * [0 - crashkernel memory size] is set offline - it will be exchanged with
+ * the crashkernel memory region when kdump is triggered. The crashkernel
+ * memory region can never get offlined (pages are unmovable).
*/
static int kdump_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
@@ -607,11 +607,7 @@ static int kdump_mem_notifier(struct notifier_block *nb,
return NOTIFY_OK;
if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
return NOTIFY_BAD;
- if (arg->start_pfn > PFN_DOWN(crashk_res.end))
- return NOTIFY_OK;
- if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
- return NOTIFY_OK;
- return NOTIFY_BAD;
+ return NOTIFY_OK;
}
static struct notifier_block kdump_mem_nb = {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 10dbb12eb14d..e6be63ff162a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -61,6 +61,7 @@ enum {
ec_schedule = 0,
ec_call_function_single,
ec_stop_cpu,
+ ec_mcck_pending,
};
enum {
@@ -403,6 +404,11 @@ int smp_find_processor_id(u16 address)
return -1;
}
+void schedule_mcck_handler(void)
+{
+ pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
+}
+
bool notrace arch_vcpu_is_preempted(int cpu)
{
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
@@ -497,6 +503,8 @@ static void smp_handle_ext_call(void)
scheduler_ipi();
if (test_bit(ec_call_function_single, &bits))
generic_smp_call_function_single_interrupt();
+ if (test_bit(ec_mcck_pending, &bits))
+ s390_handle_mcck();
}
static void do_ext_call_interrupt(struct ext_code ext_code,
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 4c0677fc8904..66e89b2866d7 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -205,7 +205,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
again:
rc = -EFAULT;
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
uaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(uaddr))
@@ -234,7 +234,7 @@ again:
pte_unmap_unlock(ptep, ptelock);
unlock_page(page);
out:
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
if (rc == -EAGAIN) {
wait_on_page_writeback(page);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index bcc9bdb39ba2..c4baefaa6e34 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -21,7 +21,6 @@
#include <linux/memblock.h>
#include <linux/compat.h>
#include <asm/asm-offsets.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -208,7 +207,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* it at vdso_base which is the "natural" base for it, but we might
* fail and end up putting it elsewhere.
*/
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
@@ -239,7 +238,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
rc = 0;
out_up:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return rc;
}
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 47a67a958107..6d6b57059493 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -9,8 +9,8 @@
#include <linux/vmalloc.h>
#include <linux/mm_types.h>
#include <linux/err.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/gmap.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -1173,7 +1173,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
int dat_protection, fake;
int rc;
- down_read(&sg->mm->mmap_sem);
+ mmap_read_lock(sg->mm);
/*
* We don't want any guest-2 tables to change - so the parent
* tables/pointers we read stay valid - unshadowing is however
@@ -1202,6 +1202,6 @@ shadow_page:
if (!rc)
rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
ipte_unlock(vcpu);
- up_read(&sg->mm->mmap_sem);
+ mmap_read_unlock(sg->mm);
return rc;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index a4d4ca2769bd..1608fd99bbee 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2767,10 +2767,10 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
{
struct page *page = NULL;
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE,
&page, NULL, NULL);
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
return page;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 33fea4488ef3..d47c19718615 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -31,11 +31,11 @@
#include <linux/bitmap.h>
#include <linux/sched/signal.h>
#include <linux/string.h>
+#include <linux/pgtable.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/stp.h>
-#include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
@@ -763,9 +763,9 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
r = -EINVAL;
else {
r = 0;
- down_write(&kvm->mm->mmap_sem);
+ mmap_write_lock(kvm->mm);
kvm->mm->context.allow_gmap_hpage_1m = 1;
- up_write(&kvm->mm->mmap_sem);
+ mmap_write_unlock(kvm->mm);
/*
* We might have to create fake 4k page
* tables. To avoid that the hardware works on
@@ -1815,7 +1815,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (!keys)
return -ENOMEM;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -1829,7 +1829,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
break;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (!r) {
r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
@@ -1873,7 +1873,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
goto out;
i = 0;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
while (i < args->count) {
unlocked = false;
@@ -1900,7 +1900,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
i++;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
out:
kvfree(keys);
return r;
@@ -2089,14 +2089,14 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm,
if (!values)
return -ENOMEM;
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
if (peek)
ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
else
ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
if (kvm->arch.migration_mode)
args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
@@ -2146,7 +2146,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
goto out;
}
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -2161,12 +2161,12 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
set_pgste_bits(kvm->mm, hva, mask, pgstev);
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
if (!kvm->mm->context.uses_cmm) {
- down_write(&kvm->mm->mmap_sem);
+ mmap_write_lock(kvm->mm);
kvm->mm->context.uses_cmm = 1;
- up_write(&kvm->mm->mmap_sem);
+ mmap_write_unlock(kvm->mm);
}
out:
vfree(bits);
@@ -2239,9 +2239,9 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
if (r)
break;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
r = gmap_mark_unmergeable();
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
if (r)
break;
@@ -4000,9 +4000,6 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
if (need_resched())
schedule();
- if (test_cpu_flag(CIF_MCCK_PENDING))
- s390_handle_mcck();
-
if (!kvm_is_ucontrol(vcpu->kvm)) {
rc = kvm_s390_deliver_pending_interrupts(vcpu);
if (rc)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 893893642415..96ae368aa0a2 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/compat.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/asm-offsets.h>
#include <asm/facility.h>
@@ -20,7 +21,6 @@
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
-#include <asm/pgtable.h>
#include <asm/page-states.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
@@ -270,18 +270,18 @@ static int handle_iske(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
retry:
unlocked = false;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
rc = get_guest_storage_key(current->mm, vmaddr, &key);
if (rc) {
rc = fixup_user_fault(current, current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked);
if (!rc) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
goto retry;
}
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc < 0)
@@ -317,17 +317,17 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
retry:
unlocked = false;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
rc = reset_guest_reference_bit(current->mm, vmaddr);
if (rc < 0) {
rc = fixup_user_fault(current, current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked);
if (!rc) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
goto retry;
}
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc < 0)
@@ -385,7 +385,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
m3 & SSKE_NQ, m3 & SSKE_MR,
m3 & SSKE_MC);
@@ -395,7 +395,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc < 0)
@@ -1091,7 +1091,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
if (rc)
return rc;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
rc = cond_set_guest_storage_key(current->mm, vmaddr,
key, NULL, nq, mr, mc);
if (rc < 0) {
@@ -1099,7 +1099,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc == -EAGAIN)
@@ -1122,7 +1122,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
}
/*
- * Must be called with relevant read locks held (kvm->mm->mmap_sem, kvm->srcu)
+ * Must be called with relevant read locks held (kvm->mm->mmap_lock, kvm->srcu)
*/
static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
{
@@ -1220,9 +1220,9 @@ static int handle_essa(struct kvm_vcpu *vcpu)
* already correct, we do nothing and avoid the lock.
*/
if (vcpu->kvm->mm->context.uses_cmm == 0) {
- down_write(&vcpu->kvm->mm->mmap_sem);
+ mmap_write_lock(vcpu->kvm->mm);
vcpu->kvm->mm->context.uses_cmm = 1;
- up_write(&vcpu->kvm->mm->mmap_sem);
+ mmap_write_unlock(vcpu->kvm->mm);
}
/*
* If we are here, we are supposed to have CMMA enabled in
@@ -1239,11 +1239,11 @@ static int handle_essa(struct kvm_vcpu *vcpu)
} else {
int srcu_idx;
- down_read(&vcpu->kvm->mm->mmap_sem);
+ mmap_read_lock(vcpu->kvm->mm);
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
i = __do_essa(vcpu, orc);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
- up_read(&vcpu->kvm->mm->mmap_sem);
+ mmap_read_unlock(vcpu->kvm->mm);
if (i < 0)
return i;
/* Account for the possible extra cbrl entry */
@@ -1251,10 +1251,10 @@ static int handle_essa(struct kvm_vcpu *vcpu)
}
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
for (i = 0; i < entries; ++i)
__gmap_zap(gmap, cbrlo[i]);
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
return 0;
}
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index ef05b4e167fb..9e9056cebfcf 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1000,9 +1000,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
handle_last_fault(vcpu, vsie_page);
- if (test_cpu_flag(CIF_MCCK_PENDING))
- s390_handle_mcck();
-
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
/* save current guest state of bp isolation override */
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index d4aa10795605..daca7bad66de 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(__delay);
static void __udelay_disabled(unsigned long long usecs)
{
- unsigned long cr0, cr0_new, psw_mask;
+ unsigned long cr0, cr0_new, psw_mask, flags;
struct s390_idle_data idle;
u64 end;
@@ -45,7 +45,9 @@ static void __udelay_disabled(unsigned long long usecs)
psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
set_clock_comparator(end);
set_cpu_flag(CIF_IGNORE_IRQ);
+ local_irq_save(flags);
psw_idle(&idle, psw_mask);
+ local_irq_restore(flags);
clear_cpu_flag(CIF_IGNORE_IRQ);
set_clock_comparator(S390_lowcore.clock_comparator);
__ctl_load(cr0, 0, 0);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 5d67b81c704a..c2ac9b8ae612 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -6,7 +6,6 @@
#include <linux/kasan.h>
#include <asm/kasan.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
static unsigned long max_addr;
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index fd0dae9d10f4..9e0aa7aa03ba 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -20,9 +20,9 @@
#include <linux/ctype.h>
#include <linux/ioport.h>
#include <linux/refcount.h>
+#include <linux/pgtable.h>
#include <asm/diag.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/ebcdic.h>
#include <asm/errno.h>
#include <asm/extmem.h>
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index dedc28be27ab..6a24751557f0 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -33,7 +33,6 @@
#include <linux/hugetlb.h>
#include <asm/asm-offsets.h>
#include <asm/diag.h>
-#include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
@@ -434,7 +433,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
flags |= FAULT_FLAG_USER;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
gmap = NULL;
if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
@@ -508,14 +507,14 @@ retry:
if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
(flags & FAULT_FLAG_RETRY_NOWAIT)) {
/* FAULT_FLAG_RETRY_NOWAIT has been set,
- * mmap_sem has not been released */
+ * mmap_lock has not been released */
current->thread.gmap_pfault = 1;
fault = VM_FAULT_PFAULT;
goto out_up;
}
flags &= ~FAULT_FLAG_RETRY_NOWAIT;
flags |= FAULT_FLAG_TRIED;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
goto retry;
}
}
@@ -533,7 +532,7 @@ retry:
}
fault = 0;
out_up:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out:
return fault;
}
@@ -825,22 +824,22 @@ void do_secure_storage_access(struct pt_regs *regs)
switch (get_fault_type(regs)) {
case USER_FAULT:
mm = current->mm;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, addr);
if (!vma) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
break;
}
page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
if (IS_ERR_OR_NULL(page)) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
break;
}
if (arch_make_page_accessible(page))
send_sig(SIGSEGV, current, 0);
put_page(page);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
break;
case KERNEL_FAULT:
page = phys_to_page(addr);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 4b6903fbba4a..190357ff86b3 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -17,8 +17,8 @@
#include <linux/swapops.h>
#include <linux/ksm.h>
#include <linux/mman.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
#include <asm/tlb.h>
@@ -300,7 +300,7 @@ struct gmap *gmap_get_enabled(void)
EXPORT_SYMBOL_GPL(gmap_get_enabled);
/*
- * gmap_alloc_table is assumed to be called with mmap_sem held
+ * gmap_alloc_table is assumed to be called with mmap_lock held
*/
static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
unsigned long init, unsigned long gaddr)
@@ -405,10 +405,10 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
return -EINVAL;
flush = 0;
- down_write(&gmap->mm->mmap_sem);
+ mmap_write_lock(gmap->mm);
for (off = 0; off < len; off += PMD_SIZE)
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
- up_write(&gmap->mm->mmap_sem);
+ mmap_write_unlock(gmap->mm);
if (flush)
gmap_flush_tlb(gmap);
return 0;
@@ -438,7 +438,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
return -EINVAL;
flush = 0;
- down_write(&gmap->mm->mmap_sem);
+ mmap_write_lock(gmap->mm);
for (off = 0; off < len; off += PMD_SIZE) {
/* Remove old translation */
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
@@ -448,7 +448,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
(void *) from + off))
break;
}
- up_write(&gmap->mm->mmap_sem);
+ mmap_write_unlock(gmap->mm);
if (flush)
gmap_flush_tlb(gmap);
if (off >= len)
@@ -466,7 +466,7 @@ EXPORT_SYMBOL_GPL(gmap_map_segment);
* Returns user space address which corresponds to the guest address or
* -EFAULT if no such mapping exists.
* This function does not establish potentially missing page table entries.
- * The mmap_sem of the mm that belongs to the address space must be held
+ * The mmap_lock of the mm that belongs to the address space must be held
* when this function gets called.
*
* Note: Can also be called for shadow gmaps.
@@ -495,9 +495,9 @@ unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
{
unsigned long rc;
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
rc = __gmap_translate(gmap, gaddr);
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_translate);
@@ -534,7 +534,7 @@ static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
*
* Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
* if the vm address is already mapped to a different guest segment.
- * The mmap_sem of the mm that belongs to the address space must be held
+ * The mmap_lock of the mm that belongs to the address space must be held
* when this function gets called.
*/
int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
@@ -640,7 +640,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
int rc;
bool unlocked;
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
retry:
unlocked = false;
@@ -655,7 +655,7 @@ retry:
goto out_up;
}
/*
- * In the case that fixup_user_fault unlocked the mmap_sem during
+ * In the case that fixup_user_fault unlocked the mmap_lock during
* faultin redo __gmap_translate to not race with a map/unmap_segment.
*/
if (unlocked)
@@ -663,13 +663,13 @@ retry:
rc = __gmap_link(gmap, gaddr, vmaddr);
out_up:
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_fault);
/*
- * this function is assumed to be called with mmap_sem held
+ * this function is assumed to be called with mmap_lock held
*/
void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
{
@@ -696,7 +696,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
unsigned long gaddr, vmaddr, size;
struct vm_area_struct *vma;
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
for (gaddr = from; gaddr < to;
gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
/* Find the vm address for the guest address */
@@ -719,7 +719,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size);
}
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
}
EXPORT_SYMBOL_GPL(gmap_discard);
@@ -882,7 +882,7 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
return -EFAULT;
if (unlocked)
- /* lost mmap_sem, caller has to retry __gmap_translate */
+ /* lost mmap_lock, caller has to retry __gmap_translate */
return 0;
/* Connect the page tables */
return __gmap_link(gmap, gaddr, vmaddr);
@@ -953,7 +953,7 @@ static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
* -EAGAIN if a fixup is needed
* -EINVAL if unsupported notifier bits have been specified
*
- * Expected to be called with sg->mm->mmap_sem in read and
+ * Expected to be called with sg->mm->mmap_lock in read and
* guest_table_lock held.
*/
static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
@@ -999,7 +999,7 @@ static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
* Returns 0 if successfully protected, -ENOMEM if out of memory and
* -EAGAIN if a fixup is needed.
*
- * Expected to be called with sg->mm->mmap_sem in read
+ * Expected to be called with sg->mm->mmap_lock in read
*/
static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
pmd_t *pmdp, int prot, unsigned long bits)
@@ -1035,7 +1035,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
* Returns 0 if successfully protected, -ENOMEM if out of memory and
* -EFAULT if gaddr is invalid (or mapping for shadows is missing).
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
unsigned long len, int prot, unsigned long bits)
@@ -1106,9 +1106,9 @@ int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
return -EINVAL;
if (!MACHINE_HAS_ESOP && prot == PROT_READ)
return -EINVAL;
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
@@ -1124,7 +1124,7 @@ EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
* if reading using the virtual address failed. -EINVAL if called on a gmap
* shadow.
*
- * Called with gmap->mm->mmap_sem in read.
+ * Called with gmap->mm->mmap_lock in read.
*/
int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
{
@@ -1696,11 +1696,11 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
}
spin_unlock(&parent->shadow_lock);
/* protect after insertion, so it will get properly invalidated */
- down_read(&parent->mm->mmap_sem);
+ mmap_read_lock(parent->mm);
rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
PROT_READ, GMAP_NOTIFY_SHADOW);
- up_read(&parent->mm->mmap_sem);
+ mmap_read_unlock(parent->mm);
spin_lock(&parent->shadow_lock);
new->initialized = true;
if (rc) {
@@ -1729,7 +1729,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow);
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
int fake)
@@ -1813,7 +1813,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
int fake)
@@ -1897,7 +1897,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
int fake)
@@ -1981,7 +1981,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
* Returns 0 if the shadow page table was found and -EAGAIN if the page
* table was not found.
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
unsigned long *pgt, int *dat_protection,
@@ -2021,7 +2021,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
* shadow table structure is incomplete, -ENOMEM if out of memory,
* -EFAULT if an address in the parent gmap could not be resolved and
*
- * Called with gmap->mm->mmap_sem in read
+ * Called with gmap->mm->mmap_lock in read
*/
int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
int fake)
@@ -2100,7 +2100,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
{
@@ -2543,12 +2543,12 @@ int s390_enable_sie(void)
/* Fail if the page tables are 2K */
if (!mm_alloc_pgste(mm))
return -EINVAL;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
mm->context.has_pgste = 1;
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
@@ -2617,7 +2617,7 @@ int s390_enable_skey(void)
struct mm_struct *mm = current->mm;
int rc = 0;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
if (mm_uses_skeys(mm))
goto out_up;
@@ -2630,7 +2630,7 @@ int s390_enable_skey(void)
walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
out_up:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return rc;
}
EXPORT_SYMBOL_GPL(s390_enable_skey);
@@ -2651,9 +2651,9 @@ static const struct mm_walk_ops reset_cmma_walk_ops = {
void s390_reset_cmma(struct mm_struct *mm)
{
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
}
EXPORT_SYMBOL_GPL(s390_reset_cmma);
@@ -2685,9 +2685,9 @@ void s390_reset_acc(struct mm_struct *mm)
*/
if (!mmget_not_zero(mm))
return;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
EXPORT_SYMBOL_GPL(s390_reset_acc);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index b11bcf4da531..6dc7c3b60ef6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -33,7 +33,6 @@
#include <linux/dma-direct.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/lowcore.h>
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index 06345616a646..99dd1c63a065 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -2,8 +2,8 @@
#include <linux/kasan.h>
#include <linux/sched/task.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/kasan.h>
#include <asm/mem_detect.h>
#include <asm/processor.h>
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index de7ca4b6718f..22a0be655f27 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -55,19 +55,22 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz
*/
static DEFINE_SPINLOCK(s390_kernel_write_lock);
-void notrace s390_kernel_write(void *dst, const void *src, size_t size)
+notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
{
+ void *tmp = dst;
unsigned long flags;
long copied;
spin_lock_irqsave(&s390_kernel_write_lock, flags);
while (size) {
- copied = s390_kernel_write_odd(dst, src, size);
- dst += copied;
+ copied = s390_kernel_write_odd(tmp, src, size);
+ tmp += copied;
src += copied;
size -= copied;
}
spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
+
+ return dst;
}
static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index e22c06d5f206..c5c52ec2b46f 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -7,7 +7,6 @@
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/set_memory.h>
@@ -86,7 +85,7 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
{
pte_t *ptep, new;
- ptep = pte_offset(pmdp, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
do {
new = *ptep;
if (pte_none(new))
@@ -338,19 +337,11 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long address;
int nr, i, j;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
pte_t *pte;
for (i = 0; i < numpages;) {
address = page_to_phys(page + i);
- pgd = pgd_offset_k(address);
- p4d = p4d_offset(pgd, address);
- pud = pud_offset(p4d, address);
- pmd = pmd_offset(pud, address);
- pte = pte_offset_kernel(pmd, address);
+ pte = virt_to_kpte(address);
nr = (unsigned long)pte >> ilog2(sizeof(long));
nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
nr = min(numpages - i, nr);
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index fff169d64711..11d2c8395e2a 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -114,7 +114,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
spin_lock_bh(&mm->page_table_lock);
/*
- * This routine gets called with mmap_sem lock held and there is
+ * This routine gets called with mmap_lock lock held and there is
* no reason to optimize for the case of otherwise. However, if
* that would ever change, the below check will let us know.
*/
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 9ebd01219812..2e0cc19f4cd7 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -19,7 +19,6 @@
#include <linux/ksm.h>
#include <linux/mman.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index f810930aff42..8b6282cf7d13 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index 748626a33028..b4e3c84772a1 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -4,4 +4,5 @@
#
obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \
- pci_event.o pci_debug.o pci_insn.o pci_mmio.o
+ pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
+ pci_bus.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 94ca121933de..3902c9f6f2d6 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -36,18 +36,21 @@
#include <asm/pci_clp.h>
#include <asm/pci_dma.h>
+#include "pci_bus.h"
+
/* list of all detected zpci devices */
static LIST_HEAD(zpci_list);
static DEFINE_SPINLOCK(zpci_list_lock);
static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
static DEFINE_SPINLOCK(zpci_domain_lock);
-static unsigned int zpci_num_domains_allocated;
#define ZPCI_IOMAP_ENTRIES \
min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
ZPCI_IOMAP_MAX_ENTRIES)
+unsigned int s390_pci_no_rid;
+
static DEFINE_SPINLOCK(zpci_iomap_lock);
static unsigned long *zpci_iomap_bitmap;
struct zpci_iomap_entry *zpci_iomap_start;
@@ -88,17 +91,12 @@ void zpci_remove_reserved_devices(void)
spin_unlock(&zpci_list_lock);
list_for_each_entry_safe(zdev, tmp, &remove, entry)
- zpci_remove_device(zdev);
-}
-
-static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
-{
- return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
+ zpci_zdev_put(zdev);
}
int pci_domain_nr(struct pci_bus *bus)
{
- return ((struct zpci_dev *) bus->sysdata)->domain;
+ return ((struct zpci_bus *) bus->sysdata)->domain_nr;
}
EXPORT_SYMBOL_GPL(pci_domain_nr);
@@ -228,28 +226,29 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
zpci_memcpy_toio(to, from, count);
}
-void __iomem *ioremap(unsigned long ioaddr, unsigned long size)
+void __iomem *ioremap(phys_addr_t addr, size_t size)
{
+ unsigned long offset, vaddr;
struct vm_struct *area;
- unsigned long offset;
+ phys_addr_t last_addr;
- if (!size)
+ last_addr = addr + size - 1;
+ if (!size || last_addr < addr)
return NULL;
if (!static_branch_unlikely(&have_mio))
- return (void __iomem *) ioaddr;
+ return (void __iomem *) addr;
- offset = ioaddr & ~PAGE_MASK;
- ioaddr &= PAGE_MASK;
+ offset = addr & ~PAGE_MASK;
+ addr &= PAGE_MASK;
size = PAGE_ALIGN(size + offset);
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
- if (ioremap_page_range((unsigned long) area->addr,
- (unsigned long) area->addr + size,
- ioaddr, PAGE_KERNEL)) {
- vunmap(area->addr);
+ vaddr = (unsigned long) area->addr;
+ if (ioremap_page_range(vaddr, vaddr + size, addr, PAGE_KERNEL)) {
+ free_vm_area(area);
return NULL;
}
return (void __iomem *) ((unsigned long) area->addr + offset);
@@ -373,29 +372,17 @@ EXPORT_SYMBOL(pci_iounmap);
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
- struct zpci_dev *zdev = get_zdev_by_bus(bus);
- int ret;
+ struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
- if (!zdev || devfn != ZPCI_DEVFN)
- ret = -ENODEV;
- else
- ret = zpci_cfg_load(zdev, where, val, size);
-
- return ret;
+ return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
- struct zpci_dev *zdev = get_zdev_by_bus(bus);
- int ret;
+ struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
- if (!zdev || devfn != ZPCI_DEVFN)
- ret = -ENODEV;
- else
- ret = zpci_cfg_store(zdev, where, val, size);
-
- return ret;
+ return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
}
static struct pci_ops pci_root_ops = {
@@ -506,15 +493,15 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
return r;
}
-static int zpci_setup_bus_resources(struct zpci_dev *zdev,
- struct list_head *resources)
+int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ struct list_head *resources)
{
unsigned long addr, size, flags;
struct resource *res;
int i, entry;
snprintf(zdev->res_name, sizeof(zdev->res_name),
- "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
+ "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (!zdev->bars[i].size)
@@ -608,98 +595,53 @@ void pcibios_disable_device(struct pci_dev *pdev)
zpci_debug_exit_device(zdev);
}
-static int zpci_alloc_domain(struct zpci_dev *zdev)
+static int __zpci_register_domain(int domain)
{
spin_lock(&zpci_domain_lock);
- if (zpci_num_domains_allocated > (ZPCI_NR_DEVICES - 1)) {
+ if (test_bit(domain, zpci_domain)) {
spin_unlock(&zpci_domain_lock);
- pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n",
- zdev->fid, ZPCI_NR_DEVICES);
- return -ENOSPC;
+ pr_err("Domain %04x is already assigned\n", domain);
+ return -EEXIST;
}
+ set_bit(domain, zpci_domain);
+ spin_unlock(&zpci_domain_lock);
+ return domain;
+}
- if (zpci_unique_uid) {
- zdev->domain = (u16) zdev->uid;
- if (zdev->domain == 0) {
- pr_warn("UID checking is active but no UID is set for PCI function %08x, so automatic domain allocation is used instead\n",
- zdev->fid);
- update_uid_checking(false);
- goto auto_allocate;
- }
+static int __zpci_alloc_domain(void)
+{
+ int domain;
- if (test_bit(zdev->domain, zpci_domain)) {
- spin_unlock(&zpci_domain_lock);
- pr_err("Adding PCI function %08x failed because domain %04x is already assigned\n",
- zdev->fid, zdev->domain);
- return -EEXIST;
- }
- set_bit(zdev->domain, zpci_domain);
- zpci_num_domains_allocated++;
- spin_unlock(&zpci_domain_lock);
- return 0;
- }
-auto_allocate:
+ spin_lock(&zpci_domain_lock);
/*
* We can always auto allocate domains below ZPCI_NR_DEVICES.
* There is either a free domain or we have reached the maximum in
* which case we would have bailed earlier.
*/
- zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
- set_bit(zdev->domain, zpci_domain);
- zpci_num_domains_allocated++;
+ domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
+ set_bit(domain, zpci_domain);
spin_unlock(&zpci_domain_lock);
- return 0;
+ return domain;
}
-static void zpci_free_domain(struct zpci_dev *zdev)
+int zpci_alloc_domain(int domain)
{
- spin_lock(&zpci_domain_lock);
- clear_bit(zdev->domain, zpci_domain);
- zpci_num_domains_allocated--;
- spin_unlock(&zpci_domain_lock);
+ if (zpci_unique_uid) {
+ if (domain)
+ return __zpci_register_domain(domain);
+ pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
+ update_uid_checking(false);
+ }
+ return __zpci_alloc_domain();
}
-void pcibios_remove_bus(struct pci_bus *bus)
+void zpci_free_domain(int domain)
{
- struct zpci_dev *zdev = get_zdev_by_bus(bus);
-
- zpci_exit_slot(zdev);
- zpci_cleanup_bus_resources(zdev);
- zpci_destroy_iommu(zdev);
- zpci_free_domain(zdev);
-
- spin_lock(&zpci_list_lock);
- list_del(&zdev->entry);
- spin_unlock(&zpci_list_lock);
-
- zpci_dbg(3, "rem fid:%x\n", zdev->fid);
- kfree(zdev);
+ spin_lock(&zpci_domain_lock);
+ clear_bit(domain, zpci_domain);
+ spin_unlock(&zpci_domain_lock);
}
-static int zpci_scan_bus(struct zpci_dev *zdev)
-{
- LIST_HEAD(resources);
- int ret;
-
- ret = zpci_setup_bus_resources(zdev, &resources);
- if (ret)
- goto error;
-
- zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
- zdev, &resources);
- if (!zdev->bus) {
- ret = -EIO;
- goto error;
- }
- zdev->bus->max_bus_speed = zdev->max_bus_speed;
- pci_bus_add_devices(zdev->bus);
- return 0;
-
-error:
- zpci_cleanup_bus_resources(zdev);
- pci_free_resource_list(&resources);
- return ret;
-}
int zpci_enable_device(struct zpci_dev *zdev)
{
@@ -734,13 +676,15 @@ int zpci_create_device(struct zpci_dev *zdev)
{
int rc;
- rc = zpci_alloc_domain(zdev);
- if (rc)
- goto out;
+ kref_init(&zdev->kref);
+
+ spin_lock(&zpci_list_lock);
+ list_add_tail(&zdev->entry, &zpci_list);
+ spin_unlock(&zpci_list_lock);
rc = zpci_init_iommu(zdev);
if (rc)
- goto out_free;
+ goto out;
mutex_init(&zdev->lock);
if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
@@ -748,36 +692,59 @@ int zpci_create_device(struct zpci_dev *zdev)
if (rc)
goto out_destroy_iommu;
}
- rc = zpci_scan_bus(zdev);
+
+ rc = zpci_bus_device_register(zdev, &pci_root_ops);
if (rc)
goto out_disable;
- spin_lock(&zpci_list_lock);
- list_add_tail(&zdev->entry, &zpci_list);
- spin_unlock(&zpci_list_lock);
-
- zpci_init_slot(zdev);
-
return 0;
out_disable:
if (zdev->state == ZPCI_FN_STATE_ONLINE)
zpci_disable_device(zdev);
+
out_destroy_iommu:
zpci_destroy_iommu(zdev);
-out_free:
- zpci_free_domain(zdev);
out:
+ spin_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ spin_unlock(&zpci_list_lock);
return rc;
}
-void zpci_remove_device(struct zpci_dev *zdev)
+void zpci_release_device(struct kref *kref)
{
- if (!zdev->bus)
- return;
+ struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
+
+ if (zdev->zbus->bus) {
+ struct pci_dev *pdev;
+
+ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+ if (pdev)
+ pci_stop_and_remove_bus_device_locked(pdev);
+ }
+
+ switch (zdev->state) {
+ case ZPCI_FN_STATE_ONLINE:
+ case ZPCI_FN_STATE_CONFIGURED:
+ zpci_disable_device(zdev);
+ fallthrough;
+ case ZPCI_FN_STATE_STANDBY:
+ if (zdev->has_hp_slot)
+ zpci_exit_slot(zdev);
+ zpci_cleanup_bus_resources(zdev);
+ zpci_bus_device_unregister(zdev);
+ zpci_destroy_iommu(zdev);
+ fallthrough;
+ default:
+ break;
+ }
- pci_stop_root_bus(zdev->bus);
- pci_remove_root_bus(zdev->bus);
+ spin_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ spin_unlock(&zpci_list_lock);
+ zpci_dbg(3, "rem fid:%x\n", zdev->fid);
+ kfree(zdev);
}
int zpci_report_error(struct pci_dev *pdev,
@@ -844,6 +811,10 @@ char * __init pcibios_setup(char *str)
s390_pci_force_floating = 1;
return NULL;
}
+ if (!strcmp(str, "norid")) {
+ s390_pci_no_rid = 1;
+ return NULL;
+ }
return str;
}
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
new file mode 100644
index 000000000000..642a99384688
--- /dev/null
+++ b/arch/s390/pci/pci_bus.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s):
+ * Pierre Morel <pmorel@linux.ibm.com>
+ *
+ */
+
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/jump_label.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+
+#include <asm/pci_clp.h>
+#include <asm/pci_dma.h>
+
+#include "pci_bus.h"
+
+static LIST_HEAD(zbus_list);
+static DEFINE_SPINLOCK(zbus_list_lock);
+static int zpci_nb_devices;
+
+/* zpci_bus_scan
+ * @zbus: the zbus holding the zdevices
+ * @ops: the pci operations
+ *
+ * The domain number must be set before pci_scan_root_bus is called.
+ * This function can be called once the domain is known, hence
+ * when the function_0 is dicovered.
+ */
+static int zpci_bus_scan(struct zpci_bus *zbus, int domain, struct pci_ops *ops)
+{
+ struct pci_bus *bus;
+ int rc;
+
+ rc = zpci_alloc_domain(domain);
+ if (rc < 0)
+ return rc;
+ zbus->domain_nr = rc;
+
+ bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, ops, zbus, &zbus->resources);
+ if (!bus) {
+ zpci_free_domain(zbus->domain_nr);
+ return -EFAULT;
+ }
+
+ zbus->bus = bus;
+ pci_bus_add_devices(bus);
+ return 0;
+}
+
+static void zpci_bus_release(struct kref *kref)
+{
+ struct zpci_bus *zbus = container_of(kref, struct zpci_bus, kref);
+
+ if (zbus->bus) {
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(zbus->bus);
+
+ zpci_free_domain(zbus->domain_nr);
+ pci_free_resource_list(&zbus->resources);
+
+ pci_remove_root_bus(zbus->bus);
+ pci_unlock_rescan_remove();
+ }
+
+ spin_lock(&zbus_list_lock);
+ list_del(&zbus->bus_next);
+ spin_unlock(&zbus_list_lock);
+ kfree(zbus);
+}
+
+static void zpci_bus_put(struct zpci_bus *zbus)
+{
+ kref_put(&zbus->kref, zpci_bus_release);
+}
+
+static struct zpci_bus *zpci_bus_get(int pchid)
+{
+ struct zpci_bus *zbus;
+
+ spin_lock(&zbus_list_lock);
+ list_for_each_entry(zbus, &zbus_list, bus_next) {
+ if (pchid == zbus->pchid) {
+ kref_get(&zbus->kref);
+ goto out_unlock;
+ }
+ }
+ zbus = NULL;
+out_unlock:
+ spin_unlock(&zbus_list_lock);
+ return zbus;
+}
+
+static struct zpci_bus *zpci_bus_alloc(int pchid)
+{
+ struct zpci_bus *zbus;
+
+ zbus = kzalloc(sizeof(*zbus), GFP_KERNEL);
+ if (!zbus)
+ return NULL;
+
+ zbus->pchid = pchid;
+ INIT_LIST_HEAD(&zbus->bus_next);
+ spin_lock(&zbus_list_lock);
+ list_add_tail(&zbus->bus_next, &zbus_list);
+ spin_unlock(&zbus_list_lock);
+
+ kref_init(&zbus->kref);
+ INIT_LIST_HEAD(&zbus->resources);
+
+ zbus->bus_resource.start = 0;
+ zbus->bus_resource.end = ZPCI_BUS_NR;
+ zbus->bus_resource.flags = IORESOURCE_BUS;
+ pci_add_resource(&zbus->resources, &zbus->bus_resource);
+
+ return zbus;
+}
+
+#ifdef CONFIG_PCI_IOV
+static int zpci_bus_link_virtfn(struct pci_dev *pdev,
+ struct pci_dev *virtfn, int vfid)
+{
+ int rc;
+
+ virtfn->physfn = pci_dev_get(pdev);
+ rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
+ if (rc) {
+ pci_dev_put(pdev);
+ virtfn->physfn = NULL;
+ return rc;
+ }
+ return 0;
+}
+
+static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
+ struct pci_dev *virtfn, int vfn)
+{
+ int i, cand_devfn;
+ struct zpci_dev *zdev;
+ struct pci_dev *pdev;
+ int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
+ int rc = 0;
+
+ virtfn->is_virtfn = 1;
+ virtfn->multifunction = 0;
+ WARN_ON(vfid < 0);
+ /* If the parent PF for the given VF is also configured in the
+ * instance, it must be on the same zbus.
+ * We can then identify the parent PF by checking what
+ * devfn the VF would have if it belonged to that PF using the PF's
+ * stride and offset. Only if this candidate devfn matches the
+ * actual devfn will we link both functions.
+ */
+ for (i = 0; i < ZPCI_FUNCTIONS_PER_BUS; i++) {
+ zdev = zbus->function[i];
+ if (zdev && zdev->is_physfn) {
+ pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
+ if (cand_devfn == virtfn->devfn) {
+ rc = zpci_bus_link_virtfn(pdev, virtfn, vfid);
+ break;
+ }
+ }
+ }
+ return rc;
+}
+#else
+static inline int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
+ struct pci_dev *virtfn, int vfn)
+{
+ virtfn->is_virtfn = 1;
+ virtfn->multifunction = 0;
+ return 0;
+}
+#endif
+
+static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
+{
+ struct pci_bus *bus;
+ struct resource_entry *window, *n;
+ struct resource *res;
+ struct pci_dev *pdev;
+ int rc;
+
+ bus = zbus->bus;
+ if (!bus)
+ return -EINVAL;
+
+ pdev = pci_get_slot(bus, zdev->devfn);
+ if (pdev) {
+ /* Device is already known. */
+ pci_dev_put(pdev);
+ return 0;
+ }
+
+ rc = zpci_init_slot(zdev);
+ if (rc)
+ return rc;
+ zdev->has_hp_slot = 1;
+
+ resource_list_for_each_entry_safe(window, n, &zbus->resources) {
+ res = window->res;
+ pci_bus_add_resource(bus, res, 0);
+ }
+
+ pdev = pci_scan_single_device(bus, zdev->devfn);
+ if (pdev) {
+ if (!zdev->is_physfn) {
+ rc = zpci_bus_setup_virtfn(zbus, pdev, zdev->vfn);
+ if (rc)
+ goto failed_with_pdev;
+ }
+ pci_bus_add_device(pdev);
+ }
+ return 0;
+
+failed_with_pdev:
+ pci_stop_and_remove_bus_device(pdev);
+ pci_dev_put(pdev);
+ return rc;
+}
+
+static void zpci_bus_add_devices(struct zpci_bus *zbus)
+{
+ int i;
+
+ for (i = 1; i < ZPCI_FUNCTIONS_PER_BUS; i++)
+ if (zbus->function[i])
+ zpci_bus_add_device(zbus, zbus->function[i]);
+
+ pci_lock_rescan_remove();
+ pci_bus_add_devices(zbus->bus);
+ pci_unlock_rescan_remove();
+}
+
+int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
+{
+ struct zpci_bus *zbus = NULL;
+ int rc = -EBADF;
+
+ if (zpci_nb_devices == ZPCI_NR_DEVICES) {
+ pr_warn("Adding PCI function %08x failed because the configured limit of %d is reached\n",
+ zdev->fid, ZPCI_NR_DEVICES);
+ return -ENOSPC;
+ }
+ zpci_nb_devices++;
+
+ if (zdev->devfn >= ZPCI_FUNCTIONS_PER_BUS)
+ return -EINVAL;
+
+ if (!s390_pci_no_rid && zdev->rid_available)
+ zbus = zpci_bus_get(zdev->pchid);
+
+ if (!zbus) {
+ zbus = zpci_bus_alloc(zdev->pchid);
+ if (!zbus)
+ return -ENOMEM;
+ }
+
+ zdev->zbus = zbus;
+ if (zbus->function[zdev->devfn]) {
+ pr_err("devfn %04x is already assigned\n", zdev->devfn);
+ goto error; /* rc already set */
+ }
+ zbus->function[zdev->devfn] = zdev;
+
+ zpci_setup_bus_resources(zdev, &zbus->resources);
+
+ if (zbus->bus) {
+ if (!zbus->multifunction) {
+ WARN_ONCE(1, "zbus is not multifunction\n");
+ goto error_bus;
+ }
+ if (!zdev->rid_available) {
+ WARN_ONCE(1, "rid_available not set for multifunction\n");
+ goto error_bus;
+ }
+ rc = zpci_bus_add_device(zbus, zdev);
+ if (rc)
+ goto error_bus;
+ } else if (zdev->devfn == 0) {
+ if (zbus->multifunction && !zdev->rid_available) {
+ WARN_ONCE(1, "rid_available not set on function 0 for multifunction\n");
+ goto error_bus;
+ }
+ rc = zpci_bus_scan(zbus, (u16)zdev->uid, ops);
+ if (rc)
+ goto error_bus;
+ zpci_bus_add_devices(zbus);
+ rc = zpci_init_slot(zdev);
+ if (rc)
+ goto error_bus;
+ zdev->has_hp_slot = 1;
+ zbus->multifunction = zdev->rid_available;
+ zbus->max_bus_speed = zdev->max_bus_speed;
+ } else {
+ zbus->multifunction = 1;
+ }
+
+ return 0;
+
+error_bus:
+ zpci_nb_devices--;
+ zbus->function[zdev->devfn] = NULL;
+error:
+ pr_err("Adding PCI function %08x failed\n", zdev->fid);
+ zpci_bus_put(zbus);
+ return rc;
+}
+
+void zpci_bus_device_unregister(struct zpci_dev *zdev)
+{
+ struct zpci_bus *zbus = zdev->zbus;
+
+ zpci_nb_devices--;
+ zbus->function[zdev->devfn] = NULL;
+ zpci_bus_put(zbus);
+}
diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
new file mode 100644
index 000000000000..89be3c354b7b
--- /dev/null
+++ b/arch/s390/pci/pci_bus.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s):
+ * Pierre Morel <pmorel@linux.ibm.com>
+ *
+ */
+
+int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops);
+void zpci_bus_device_unregister(struct zpci_dev *zdev);
+int zpci_bus_init(void);
+
+void zpci_release_device(struct kref *kref);
+static inline void zpci_zdev_put(struct zpci_dev *zdev)
+{
+ kref_put(&zdev->kref, zpci_release_device);
+}
+
+int zpci_alloc_domain(int domain);
+void zpci_free_domain(int domain);
+int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ struct list_head *resources);
+
+static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
+ unsigned int devfn)
+{
+ struct zpci_bus *zbus = bus->sysdata;
+
+ return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn];
+}
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index ea794ae755ae..7e735f41a0a6 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -155,8 +155,13 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
zdev->pfgid = response->pfgid;
zdev->pft = response->pft;
zdev->vfn = response->vfn;
+ zdev->port = response->port;
zdev->uid = response->uid;
zdev->fmb_length = sizeof(u32) * response->fmb_len;
+ zdev->rid_available = response->rid_avail;
+ zdev->is_physfn = response->is_physfn;
+ if (!s390_pci_no_rid && zdev->rid_available)
+ zdev->devfn = response->rid & ZPCI_RID_MASK_DEVFN;
memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
if (response->util_str_avail) {
@@ -309,14 +314,13 @@ out:
int clp_disable_fh(struct zpci_dev *zdev)
{
- u32 fh = zdev->fh;
int rc;
if (!zdev_enabled(zdev))
return 0;
rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
- zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
+ zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 8d6ee4af4230..08e1d619398e 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -14,6 +14,8 @@
#include <asm/pci_debug.h>
#include <asm/sclp.h>
+#include "pci_bus.h"
+
/* Content Code Description for PCI Function Error */
struct zpci_ccdf_err {
u32 reserved1;
@@ -53,7 +55,7 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
zpci_err_hex(ccdf, sizeof(*ccdf));
if (zdev)
- pdev = pci_get_slot(zdev->bus, ZPCI_DEVFN);
+ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
@@ -78,36 +80,28 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
enum zpci_state state;
int ret;
- if (zdev)
- pdev = pci_get_slot(zdev->bus, ZPCI_DEVFN);
+ if (zdev && zdev->zbus && zdev->zbus->bus)
+ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
- pr_info("%s: Event 0x%x reconfigured PCI function 0x%x\n",
- pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
zpci_err("avail CCDF:\n");
zpci_err_hex(ccdf, sizeof(*ccdf));
switch (ccdf->pec) {
case 0x0301: /* Reserved|Standby -> Configured */
if (!zdev) {
- ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
- if (ret)
- break;
- zdev = get_zdev_by_fid(ccdf->fid);
- }
- if (!zdev || zdev->state != ZPCI_FN_STATE_STANDBY)
+ ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
break;
- zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ }
zdev->fh = ccdf->fh;
- ret = zpci_enable_device(zdev);
- if (ret)
- break;
- pci_lock_rescan_remove();
- pci_rescan_bus(zdev->bus);
- pci_unlock_rescan_remove();
+ zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ zpci_create_device(zdev);
break;
case 0x0302: /* Reserved -> Standby */
- if (!zdev)
+ if (!zdev) {
clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
+ break;
+ }
+ zdev->fh = ccdf->fh;
break;
case 0x0303: /* Deconfiguration requested */
if (!zdev)
@@ -135,12 +129,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
pci_stop_and_remove_bus_device_locked(pdev);
}
- zdev->fh = ccdf->fh;
- zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY;
if (!clp_get_state(ccdf->fid, &state) &&
state == ZPCI_FN_STATE_RESERVED) {
- zpci_remove_device(zdev);
+ zpci_zdev_put(zdev);
}
break;
case 0x0306: /* 0x308 or 0x302 for multiple devices */
@@ -149,12 +141,11 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
case 0x0308: /* Standby -> Reserved */
if (!zdev)
break;
- zpci_remove_device(zdev);
+ zpci_zdev_put(zdev);
break;
default:
break;
}
- pci_dev_put(pdev);
}
void zpci_event_availability(void *data)
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 020a2c514d96..38efa3e852c4 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -125,7 +125,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access,
struct vm_area_struct *vma;
long ret;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
ret = -EINVAL;
vma = find_vma(current->mm, user_addr);
if (!vma)
@@ -135,7 +135,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access,
goto out;
ret = follow_pfn(vma, user_addr, pfn);
out:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return ret;
}
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index 215f17437a4f..5c028bee91b9 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -33,6 +33,7 @@ zpci_attr(pchid, "0x%04x\n", pchid);
zpci_attr(pfgid, "0x%02x\n", pfgid);
zpci_attr(vfn, "0x%04x\n", vfn);
zpci_attr(pft, "0x%02x\n", pft);
+zpci_attr(port, "%d\n", port);
zpci_attr(uid, "0x%x\n", uid);
zpci_attr(segment0, "0x%02x\n", pfip[0]);
zpci_attr(segment1, "0x%02x\n", pfip[1]);
@@ -88,7 +89,7 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
ret = zpci_enable_device(zdev);
if (ret)
goto out;
- pci_rescan_bus(zdev->bus);
+ pci_rescan_bus(zdev->zbus->bus);
}
out:
pci_unlock_rescan_remove();
@@ -142,6 +143,7 @@ static struct attribute *zpci_dev_attrs[] = {
&dev_attr_pchid.attr,
&dev_attr_pfgid.attr,
&dev_attr_pft.attr,
+ &dev_attr_port.attr,
&dev_attr_vfn.attr,
&dev_attr_uid.attr,
&dev_attr_recover.attr,
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 0424b8f2f8d3..a7cc1464011a 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -53,15 +53,6 @@ config SUPERH
select HAVE_NMI
select NEED_SG_DMA_LENGTH
select ARCH_HAS_GIGANTIC_PAGE
-
- help
- The SuperH is a RISC processor targeted for use in embedded systems
- and consumer electronics; it was also used in the Sega Dreamcast
- gaming console. The SuperH port has a home page at
- <http://www.linux-sh.org/>.
-
-config SUPERH32
- def_bool "$(ARCH)" = "sh"
select ARCH_32BIT_OFF_T
select GUP_GET_PTE_LOW_HIGH if X2TLB
select HAVE_KPROBES
@@ -79,19 +70,15 @@ config SUPERH32
select ARCH_HIBERNATION_POSSIBLE if MMU
select SPARSE_IRQ
select HAVE_STACKPROTECTOR
-
-config SUPERH64
- def_bool "$(ARCH)" = "sh64"
- select HAVE_EXIT_THREAD
- select KALLSYMS
+ help
+ The SuperH is a RISC processor targeted for use in embedded systems
+ and consumer electronics; it was also used in the Sega Dreamcast
+ gaming console. The SuperH port has a home page at
+ <http://www.linux-sh.org/>.
config GENERIC_BUG
def_bool y
- depends on BUG && SUPERH32
-
-config GENERIC_CSUM
- def_bool y
- depends on SUPERH64
+ depends on BUG
config GENERIC_HWEIGHT
def_bool y
@@ -201,12 +188,6 @@ config CPU_SH4AL_DSP
select CPU_SH4A
select CPU_HAS_DSP
-config CPU_SH5
- bool
- select CPU_HAS_FPU
- select SYS_SUPPORTS_SH_TMU
- select SYS_SUPPORTS_HUGETLBFS if MMU
-
config CPU_SHX2
bool
@@ -226,8 +207,6 @@ config CPU_HAS_PMU
default y
bool
-if SUPERH32
-
choice
prompt "Processor sub-type selection"
@@ -516,27 +495,6 @@ config CPU_SUBTYPE_SH7366
endchoice
-endif
-
-if SUPERH64
-
-choice
- prompt "Processor sub-type selection"
-
-# SH-5 Processor Support
-
-config CPU_SUBTYPE_SH5_101
- bool "Support SH5-101 processor"
- select CPU_SH5
-
-config CPU_SUBTYPE_SH5_103
- bool "Support SH5-103 processor"
- select CPU_SH5
-
-endchoice
-
-endif
-
source "arch/sh/mm/Kconfig"
source "arch/sh/Kconfig.cpu"
@@ -590,7 +548,7 @@ source "kernel/Kconfig.hz"
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
- depends on SUPERH32 && MMU
+ depends on MMU
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your
@@ -608,7 +566,7 @@ config KEXEC
config CRASH_DUMP
bool "kernel crash dumps (EXPERIMENTAL)"
- depends on SUPERH32 && BROKEN_ON_SMP
+ depends on BROKEN_ON_SMP
help
Generate crash dump after being started by kexec.
This should be normally only set in special crash dump kernels
@@ -622,7 +580,7 @@ config CRASH_DUMP
config KEXEC_JUMP
bool "kexec jump (EXPERIMENTAL)"
- depends on SUPERH32 && KEXEC && HIBERNATION
+ depends on KEXEC && HIBERNATION
help
Jump between original kernel and kexeced kernel and invoke
code via KEXEC
@@ -699,7 +657,7 @@ config HOTPLUG_CPU
config GUSA
def_bool y
- depends on !SMP && SUPERH32
+ depends on !SMP
help
This enables support for gUSA (general UserSpace Atomicity).
This is the default implementation for both UP and non-ll/sc
diff --git a/arch/sh/Kconfig.cpu b/arch/sh/Kconfig.cpu
index 4a4edc7e03d4..97ca35f2cd37 100644
--- a/arch/sh/Kconfig.cpu
+++ b/arch/sh/Kconfig.cpu
@@ -13,7 +13,6 @@ config CPU_LITTLE_ENDIAN
config CPU_BIG_ENDIAN
bool "Big Endian"
- depends on !CPU_SH5
endchoice
@@ -27,10 +26,6 @@ config SH_FPU
This option must be set in order to enable the FPU.
-config SH64_FPU_DENORM_FLUSH
- bool "Flush floating point denorms to zero"
- depends on SH_FPU && SUPERH64
-
config SH_FPU_EMU
def_bool n
prompt "FPU emulation support"
@@ -77,10 +72,6 @@ config SPECULATIVE_EXECUTION
If unsure, say N.
-config SH64_ID2815_WORKAROUND
- bool "Include workaround for SH5-101 cut2 silicon defect ID2815"
- depends on CPU_SUBTYPE_SH5_101
-
config CPU_HAS_INTEVT
bool
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 010b6c33bbba..28a43d63bde1 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -5,7 +5,6 @@ config TRACE_IRQFLAGS_SUPPORT
config SH_STANDARD_BIOS
bool "Use LinuxSH standard BIOS"
- depends on SUPERH32
help
Say Y here if your target has the gdb-sh-stub
package from www.m17n.org (or any conforming standard LinuxSH BIOS)
@@ -19,7 +18,7 @@ config SH_STANDARD_BIOS
config STACK_DEBUG
bool "Check for stack overflows"
- depends on DEBUG_KERNEL && SUPERH32
+ depends on DEBUG_KERNEL
help
This option will cause messages to be printed if free stack space
drops below a certain limit. Saying Y here will add overhead to
@@ -38,7 +37,7 @@ config 4KSTACKS
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
- depends on DEBUG_KERNEL && SUPERH32 && BROKEN
+ depends on DEBUG_KERNEL && BROKEN
help
If you say Y here the kernel will use separate kernel stacks
for handling hard and soft interrupts. This can help avoid
@@ -46,7 +45,7 @@ config IRQSTACKS
config DUMP_CODE
bool "Show disassembly of nearby code in register dumps"
- depends on DEBUG_KERNEL && SUPERH32
+ depends on DEBUG_KERNEL
default y if DEBUG_BUGVERBOSE
default n
help
@@ -59,7 +58,6 @@ config DUMP_CODE
config DWARF_UNWINDER
bool "Enable the DWARF unwinder for stacktraces"
select FRAME_POINTER
- depends on SUPERH32
default n
help
Enabling this option will make stacktraces more accurate, at
@@ -77,11 +75,6 @@ config SH_NO_BSS_INIT
For all other cases, say N. If this option seems perplexing, or
you aren't sure, say N.
-config SH64_SR_WATCH
- bool "Debug: set SR.WATCH to enable hardware watchpoints and trace"
- depends on SUPERH64
-
config MCOUNT
def_bool y
- depends on SUPERH32
depends on STACK_DEBUG || FUNCTION_TRACER
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index b4a86f27e048..da9cf952f33c 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -11,7 +11,7 @@
#
ifneq ($(SUBARCH),$(ARCH))
ifeq ($(CROSS_COMPILE),)
- CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
+ CROSS_COMPILE := $(call cc-cross-prefix, sh-linux- sh-linux-gnu- sh-unknown-linux-gnu-)
endif
endif
@@ -29,12 +29,9 @@ isa-$(CONFIG_CPU_SH3) := sh3
isa-$(CONFIG_CPU_SH4) := sh4
isa-$(CONFIG_CPU_SH4A) := sh4a
isa-$(CONFIG_CPU_SH4AL_DSP) := sh4al
-isa-$(CONFIG_CPU_SH5) := shmedia
-ifeq ($(CONFIG_SUPERH32),y)
isa-$(CONFIG_SH_DSP) := $(isa-y)-dsp
isa-y := $(isa-y)-up
-endif
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
cflags-$(CONFIG_CPU_J2) += $(call cc-option,-mj2,)
@@ -47,7 +44,6 @@ cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
cflags-$(CONFIG_CPU_SH4A) += $(call cc-option,-m4a,) \
$(call cc-option,-m4a-nofpu,)
cflags-$(CONFIG_CPU_SH4AL_DSP) += $(call cc-option,-m4al,)
-cflags-$(CONFIG_CPU_SH5) := $(call cc-option,-m5-32media-nofpu,)
ifeq ($(cflags-y),)
#
@@ -88,7 +84,7 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment \
-R .stab -R .stabstr -S
# Give the various platforms the opportunity to set default image types
-defaultimage-$(CONFIG_SUPERH32) := zImage
+defaultimage-y := zImage
defaultimage-$(CONFIG_SH_SH7785LCR) := uImage
defaultimage-$(CONFIG_SH_RSK) := uImage
defaultimage-$(CONFIG_SH_URQUELL) := uImage
@@ -107,31 +103,22 @@ KBUILD_IMAGE := $(boot)/$(defaultimage-y)
# Choosing incompatible machines durings configuration will result in
# error messages during linking.
#
-ifdef CONFIG_SUPERH32
UTS_MACHINE := sh
-BITS := 32
LDFLAGS_vmlinux += -e _stext
-else
-UTS_MACHINE := sh64
-BITS := 64
-LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
- --defsym phys_stext_shmedia=phys_stext+1 \
- -e phys_stext_shmedia
-endif
ifdef CONFIG_CPU_LITTLE_ENDIAN
-ld-bfd := elf32-$(UTS_MACHINE)-linux
+ld-bfd := elf32-sh-linux
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
KBUILD_LDFLAGS += -EL
else
-ld-bfd := elf32-$(UTS_MACHINE)big-linux
+ld-bfd := elf32-shbig-linux
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
KBUILD_LDFLAGS += -EB
endif
-export ld-bfd BITS
+export ld-bfd
-head-y := arch/sh/kernel/head_$(BITS).o
+head-y := arch/sh/kernel/head_32.o
core-y += arch/sh/kernel/ arch/sh/mm/ arch/sh/boards/
core-$(CONFIG_SH_FPU_EMU) += arch/sh/math-emu/
@@ -185,7 +172,6 @@ cpuincdir-$(CONFIG_CPU_SH2) += cpu-sh2
cpuincdir-$(CONFIG_CPU_SH3) += cpu-sh3
cpuincdir-$(CONFIG_CPU_SH4A) += cpu-sh4a
cpuincdir-$(CONFIG_CPU_SH4) += cpu-sh4
-cpuincdir-$(CONFIG_CPU_SH5) += cpu-sh5
cpuincdir-y += cpu-common # Must be last
drivers-y += arch/sh/drivers/
@@ -206,8 +192,7 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y)
KBUILD_CFLAGS += -fasynchronous-unwind-tables
endif
-libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y)
-libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y)
+libs-y := arch/sh/lib/ $(libs-y)
BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \
uImage.srec uImage.bin zImage vmlinux.bin vmlinux.srec \
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index cee24c308337..fb0ca0c1efe1 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -7,6 +7,11 @@ config SOLUTION_ENGINE
config SH_ALPHA_BOARD
bool
+config SH_CUSTOM_CLK
+ def_bool y
+ depends on !SH_DEVICE_TREE
+ select HAVE_LEGACY_CLK
+
config SH_DEVICE_TREE
bool
select OF
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index f5e1bd779789..ad0e2403e56f 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -8,9 +8,9 @@
targets := vmlinux vmlinux.bin vmlinux.bin.gz \
vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo \
- head_$(BITS).o misc.o piggy.o
+ head_32.o misc.o piggy.o
-OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
+OBJECTS = $(obj)/head_32.o $(obj)/misc.o $(obj)/cache.o
GCOV_PROFILE := n
@@ -39,15 +39,11 @@ LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
#
# Pull in the necessary libgcc bits from the in-kernel implementation.
#
-lib1funcs-$(CONFIG_SUPERH32) := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S \
- lshrsi3.S
-lib1funcs-obj := \
+lib1funcs-y := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S lshrsi3.S
+lib1funcs-obj := \
$(addsuffix .o, $(basename $(addprefix $(obj)/, $(lib1funcs-y))))
lib1funcs-dir := $(srctree)/arch/$(SRCARCH)/lib
-ifeq ($(BITS),64)
- lib1funcs-dir := $(addsuffix $(BITS), $(lib1funcs-dir))
-endif
KBUILD_CFLAGS += -I$(lib1funcs-dir) -DDISABLE_BRANCH_PROFILING
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index e69ec12cbbe6..a03b6680a9d9 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -116,11 +116,7 @@ void ftrace_stub(void)
{
}
-#ifdef CONFIG_SUPERH64
-#define stackalign 8
-#else
#define stackalign 4
-#endif
#define STACK_SIZE (4096)
long __attribute__ ((aligned(stackalign))) user_stack[STACK_SIZE];
@@ -130,14 +126,10 @@ void decompress_kernel(void)
{
unsigned long output_addr;
-#ifdef CONFIG_SUPERH64
- output_addr = (CONFIG_MEMORY_START + 0x2000);
-#else
output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
#if defined(CONFIG_29BIT)
output_addr |= P2SEG;
#endif
-#endif
output = (unsigned char *)output_addr;
free_mem_ptr = (unsigned long)&_end;
diff --git a/arch/sh/boot/compressed/vmlinux.scr b/arch/sh/boot/compressed/vmlinux.scr
index 862d74808236..dd292b4b9082 100644
--- a/arch/sh/boot/compressed/vmlinux.scr
+++ b/arch/sh/boot/compressed/vmlinux.scr
@@ -1,6 +1,6 @@
SECTIONS
{
- .rodata..compressed : {
+ .rodata..compressed : ALIGN(8) {
input_len = .;
LONG(input_data_end - input_data) input_data = .;
*(.data)
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index 6dd0da73ca5a..6abd9bd70106 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -20,7 +20,8 @@ CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_CPU_SUBTYPE_SH7786=y
CONFIG_MEMORY_SIZE=0x10000000
CONFIG_HUGETLB_PAGE_SIZE_1MB=y
diff --git a/arch/sh/configs/kfr2r09_defconfig b/arch/sh/configs/kfr2r09_defconfig
index 1dc3f670c481..833404490cfe 100644
--- a/arch/sh/configs/kfr2r09_defconfig
+++ b/arch/sh/configs/kfr2r09_defconfig
@@ -10,8 +10,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7724=y
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig
index 664c4dee6e6a..0989ed929540 100644
--- a/arch/sh/configs/magicpanelr2_defconfig
+++ b/arch/sh/configs/magicpanelr2_defconfig
@@ -14,8 +14,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7720=y
CONFIG_MEMORY_START=0x0C000000
CONFIG_MEMORY_SIZE=0x03F00000
diff --git a/arch/sh/configs/polaris_defconfig b/arch/sh/configs/polaris_defconfig
index e3a1d3d2694a..246408ec7462 100644
--- a/arch/sh/configs/polaris_defconfig
+++ b/arch/sh/configs/polaris_defconfig
@@ -12,7 +12,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_CPU_SUBTYPE_SH7709=y
CONFIG_MEMORY_START=0x0C000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig
index 0a18f8011c55..c97ec60cff27 100644
--- a/arch/sh/configs/r7780mp_defconfig
+++ b/arch/sh/configs/r7780mp_defconfig
@@ -12,8 +12,6 @@ CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7780=y
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
index 7226ac5a1d44..55fce65eb454 100644
--- a/arch/sh/configs/r7785rp_defconfig
+++ b/arch/sh/configs/r7785rp_defconfig
@@ -15,8 +15,6 @@ CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7785=y
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_HUGETLB_PAGE_SIZE_1MB=y
diff --git a/arch/sh/configs/rsk7201_defconfig b/arch/sh/configs/rsk7201_defconfig
index 9f4f474705b7..841809b5c2dc 100644
--- a/arch/sh/configs/rsk7201_defconfig
+++ b/arch/sh/configs/rsk7201_defconfig
@@ -15,8 +15,6 @@ CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7201=y
CONFIG_MEMORY_SIZE=0x01000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/rsk7203_defconfig b/arch/sh/configs/rsk7203_defconfig
index 10a32bd4cf66..0055031664ad 100644
--- a/arch/sh/configs/rsk7203_defconfig
+++ b/arch/sh/configs/rsk7203_defconfig
@@ -16,8 +16,6 @@ CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7203=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x01000000
diff --git a/arch/sh/configs/rsk7264_defconfig b/arch/sh/configs/rsk7264_defconfig
index 78643191c99e..f7b9c528c6df 100644
--- a/arch/sh/configs/rsk7264_defconfig
+++ b/arch/sh/configs/rsk7264_defconfig
@@ -17,8 +17,6 @@ CONFIG_MMAP_ALLOW_UNINITIALIZED=y
CONFIG_PROFILING=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7264=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/rsk7269_defconfig b/arch/sh/configs/rsk7269_defconfig
index fb9fa7faf635..4bff14fb185d 100644
--- a/arch/sh/configs/rsk7269_defconfig
+++ b/arch/sh/configs/rsk7269_defconfig
@@ -4,8 +4,6 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_SWAP_IO_SPACE=y
CONFIG_CPU_SUBTYPE_SH7269=y
CONFIG_MEMORY_START=0x0c000000
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 7fa116b436c3..61bec46ebd66 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -39,7 +39,8 @@ CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_CPU_SUBTYPE_SH7786=y
CONFIG_MEMORY_START=0x40000000
CONFIG_MEMORY_SIZE=0x20000000
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index a93402b3a319..21a43f14ffac 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -28,8 +28,6 @@ CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7206=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig
index 06d067c842cd..4e794e719a28 100644
--- a/arch/sh/configs/se7343_defconfig
+++ b/arch/sh/configs/se7343_defconfig
@@ -11,7 +11,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7343=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x01000000
diff --git a/arch/sh/configs/se7619_defconfig b/arch/sh/configs/se7619_defconfig
index f54722dbc8f5..3264415a5931 100644
--- a/arch/sh/configs/se7619_defconfig
+++ b/arch/sh/configs/se7619_defconfig
@@ -11,8 +11,6 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_MEMORY_START=0x0c000000
CONFIG_FLATMEM_MANUAL=y
CONFIG_CPU_BIG_ENDIAN=y
diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig
index ddfc69841955..4496b94b7d88 100644
--- a/arch/sh/configs/se7705_defconfig
+++ b/arch/sh/configs/se7705_defconfig
@@ -8,8 +8,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7705=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x02000000
diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
index 9a527f978106..ee6d28ae08de 100644
--- a/arch/sh/configs/se7712_defconfig
+++ b/arch/sh/configs/se7712_defconfig
@@ -12,8 +12,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7712=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x02000000
diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
index 3b0e1eb6e874..bad921bc10f8 100644
--- a/arch/sh/configs/se7721_defconfig
+++ b/arch/sh/configs/se7721_defconfig
@@ -12,8 +12,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7721=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x02000000
diff --git a/arch/sh/configs/se7722_defconfig b/arch/sh/configs/se7722_defconfig
index 88bf9e849008..09e455817447 100644
--- a/arch/sh/configs/se7722_defconfig
+++ b/arch/sh/configs/se7722_defconfig
@@ -8,8 +8,6 @@ CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7722=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_NUMA=y
diff --git a/arch/sh/configs/se7780_defconfig b/arch/sh/configs/se7780_defconfig
index ec32c82646ed..dcd85b858ac8 100644
--- a/arch/sh/configs/se7780_defconfig
+++ b/arch/sh/configs/se7780_defconfig
@@ -9,7 +9,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7780=y
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_SH_7780_SOLUTION_ENGINE=y
diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig
index c86f28442a80..08426913c0e3 100644
--- a/arch/sh/configs/sh7710voipgw_defconfig
+++ b/arch/sh/configs/sh7710voipgw_defconfig
@@ -11,7 +11,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7710=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x00800000
diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig
index 9f2aed0b3bca..d0933a9b9799 100644
--- a/arch/sh/configs/sh7757lcr_defconfig
+++ b/arch/sh/configs/sh7757lcr_defconfig
@@ -36,7 +36,7 @@ CONFIG_IPV6=y
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_M25P80=y
+CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_RAM=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
diff --git a/arch/sh/configs/shmin_defconfig b/arch/sh/configs/shmin_defconfig
index d589cfdfb7eb..a27b129b93c5 100644
--- a/arch/sh/configs/shmin_defconfig
+++ b/arch/sh/configs/shmin_defconfig
@@ -12,8 +12,6 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_SHMEM is not set
CONFIG_SLOB=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7706=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x00800000
diff --git a/arch/sh/configs/ul2_defconfig b/arch/sh/configs/ul2_defconfig
index dc2e3061130f..103b81ec1ffb 100644
--- a/arch/sh/configs/ul2_defconfig
+++ b/arch/sh/configs/ul2_defconfig
@@ -8,8 +8,6 @@ CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7366=y
CONFIG_MEMORY_SIZE=0x01f00000
CONFIG_NUMA=y
diff --git a/arch/sh/drivers/pci/Makefile b/arch/sh/drivers/pci/Makefile
index 947bfe8bb0a7..a5c1e9066f83 100644
--- a/arch/sh/drivers/pci/Makefile
+++ b/arch/sh/drivers/pci/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7763) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7780) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7785) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7786) += pcie-sh7786.o ops-sh7786.o
-obj-$(CONFIG_CPU_SH5) += pci-sh5.o ops-sh5.o
obj-$(CONFIG_SH_DREAMCAST) += ops-dreamcast.o fixups-dreamcast.o \
pci-dreamcast.o
diff --git a/arch/sh/drivers/pci/ops-sh5.c b/arch/sh/drivers/pci/ops-sh5.c
deleted file mode 100644
index 9fbaf72949ab..000000000000
--- a/arch/sh/drivers/pci/ops-sh5.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support functions for the SH5 PCI hardware.
- *
- * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
- * Copyright (C) 2003, 2004 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#include <linux/kernel.h>
-#include <linux/rwsem.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/irq.h>
-#include <asm/io.h>
-#include "pci-sh5.h"
-
-static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 *val)
-{
- SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
-
- switch (size) {
- case 1:
- *val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
- break;
- case 2:
- *val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
- break;
- case 4:
- *val = SH5PCI_READ(PDR);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 val)
-{
- SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
-
- switch (size) {
- case 1:
- SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
- break;
- case 2:
- SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
- break;
- case 4:
- SH5PCI_WRITE(PDR, val);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-struct pci_ops sh5_pci_ops = {
- .read = sh5pci_read,
- .write = sh5pci_write,
-};
diff --git a/arch/sh/drivers/pci/pci-sh5.c b/arch/sh/drivers/pci/pci-sh5.c
deleted file mode 100644
index 03225d27770b..000000000000
--- a/arch/sh/drivers/pci/pci-sh5.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
- * Copyright (C) 2003, 2004 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- *
- * Support functions for the SH5 PCI hardware.
- */
-
-#include <linux/kernel.h>
-#include <linux/rwsem.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/irq.h>
-#include <cpu/irq.h>
-#include <asm/io.h>
-#include "pci-sh5.h"
-
-unsigned long pcicr_virt;
-unsigned long PCI_IO_AREA;
-
-/* Rounds a number UP to the nearest power of two. Used for
- * sizing the PCI window.
- */
-static u32 __init r2p2(u32 num)
-{
- int i = 31;
- u32 tmp = num;
-
- if (num == 0)
- return 0;
-
- do {
- if (tmp & (1 << 31))
- break;
- i--;
- tmp <<= 1;
- } while (i >= 0);
-
- tmp = 1 << i;
- /* If the original number isn't a power of 2, round it up */
- if (tmp != num)
- tmp <<= 1;
-
- return tmp;
-}
-
-static irqreturn_t pcish5_err_irq(int irq, void *dev_id)
-{
- struct pt_regs *regs = get_irq_regs();
- unsigned pci_int, pci_air, pci_cir, pci_aint;
-
- pci_int = SH5PCI_READ(INT);
- pci_cir = SH5PCI_READ(CIR);
- pci_air = SH5PCI_READ(AIR);
-
- if (pci_int) {
- printk("PCI INTERRUPT (at %08llx)!\n", regs->pc);
- printk("PCI INT -> 0x%x\n", pci_int & 0xffff);
- printk("PCI AIR -> 0x%x\n", pci_air);
- printk("PCI CIR -> 0x%x\n", pci_cir);
- SH5PCI_WRITE(INT, ~0);
- }
-
- pci_aint = SH5PCI_READ(AINT);
- if (pci_aint) {
- printk("PCI ARB INTERRUPT!\n");
- printk("PCI AINT -> 0x%x\n", pci_aint);
- printk("PCI AIR -> 0x%x\n", pci_air);
- printk("PCI CIR -> 0x%x\n", pci_cir);
- SH5PCI_WRITE(AINT, ~0);
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t pcish5_serr_irq(int irq, void *dev_id)
-{
- printk("SERR IRQ\n");
-
- return IRQ_NONE;
-}
-
-static struct resource sh5_pci_resources[2];
-
-static struct pci_channel sh5pci_controller = {
- .pci_ops = &sh5_pci_ops,
- .resources = sh5_pci_resources,
- .nr_resources = ARRAY_SIZE(sh5_pci_resources),
- .mem_offset = 0x00000000,
- .io_offset = 0x00000000,
-};
-
-static int __init sh5pci_init(void)
-{
- unsigned long memStart = __pa(memory_start);
- unsigned long memSize = __pa(memory_end) - memStart;
- u32 lsr0;
- u32 uval;
-
- if (request_irq(IRQ_ERR, pcish5_err_irq,
- 0, "PCI Error",NULL) < 0) {
- printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
- return -EINVAL;
- }
-
- if (request_irq(IRQ_SERR, pcish5_serr_irq,
- 0, "PCI SERR interrupt", NULL) < 0) {
- printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
- return -EINVAL;
- }
-
- pcicr_virt = (unsigned long)ioremap(SH5PCI_ICR_BASE, 1024);
- if (!pcicr_virt) {
- panic("Unable to remap PCICR\n");
- }
-
- PCI_IO_AREA = (unsigned long)ioremap(SH5PCI_IO_BASE, 0x10000);
- if (!PCI_IO_AREA) {
- panic("Unable to remap PCIIO\n");
- }
-
- /* Clear snoop registers */
- SH5PCI_WRITE(CSCR0, 0);
- SH5PCI_WRITE(CSCR1, 0);
-
- /* Switch off interrupts */
- SH5PCI_WRITE(INTM, 0);
- SH5PCI_WRITE(AINTM, 0);
- SH5PCI_WRITE(PINTM, 0);
-
- /* Set bus active, take it out of reset */
- uval = SH5PCI_READ(CR);
-
- /* Set command Register */
- SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE |
- CR_PFCS | CR_BMAM);
-
- uval=SH5PCI_READ(CR);
-
- /* Allow it to be a master */
- /* NB - WE DISABLE I/O ACCESS to stop overlap */
- /* set WAIT bit to enable stepping, an attempt to improve stability */
- SH5PCI_WRITE_SHORT(CSR_CMD,
- PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
- PCI_COMMAND_WAIT);
-
- /*
- ** Set translation mapping memory in order to convert the address
- ** used for the main bus, to the PCI internal address.
- */
- SH5PCI_WRITE(MBR,0x40000000);
-
- /* Always set the max size 512M */
- SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));
-
- /*
- ** I/O addresses are mapped at internal PCI specific address
- ** as is described into the configuration bridge table.
- ** These are changed to 0, to allow cards that have legacy
- ** io such as vga to function correctly. We set the SH5 IOBAR to
- ** 256K, which is a bit big as we can only have 64K of address space
- */
-
- SH5PCI_WRITE(IOBR,0x0);
-
- /* Set up a 256K window. Totally pointless waste of address space */
- SH5PCI_WRITE(IOBMR,0);
-
- /* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec.
- * Ideally, we would want to map the I/O region somewhere, but it
- * is so big this is not that easy!
- */
- SH5PCI_WRITE(CSR_IBAR0,~0);
- /* Set memory size value */
- memSize = memory_end - memory_start;
-
- /* Now we set up the mbars so the PCI bus can see the memory of
- * the machine */
- if (memSize < (1024 * 1024)) {
- printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%lx?\n",
- memSize);
- return -EINVAL;
- }
-
- /* Set LSR 0 */
- lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 :
- ((r2p2(memSize) - 0x100000) | 0x1);
- SH5PCI_WRITE(LSR0, lsr0);
-
- /* Set MBAR 0 */
- SH5PCI_WRITE(CSR_MBAR0, memory_start);
- SH5PCI_WRITE(LAR0, memory_start);
-
- SH5PCI_WRITE(CSR_MBAR1,0);
- SH5PCI_WRITE(LAR1,0);
- SH5PCI_WRITE(LSR1,0);
-
- /* Enable the PCI interrupts on the device */
- SH5PCI_WRITE(INTM, ~0);
- SH5PCI_WRITE(AINTM, ~0);
- SH5PCI_WRITE(PINTM, ~0);
-
- sh5_pci_resources[0].start = PCI_IO_AREA;
- sh5_pci_resources[0].end = PCI_IO_AREA + 0x10000;
-
- sh5_pci_resources[1].start = memStart;
- sh5_pci_resources[1].end = memStart + memSize;
-
- return register_pci_controller(&sh5pci_controller);
-}
-arch_initcall(sh5pci_init);
diff --git a/arch/sh/drivers/pci/pci-sh5.h b/arch/sh/drivers/pci/pci-sh5.h
deleted file mode 100644
index 91348af0ef6c..000000000000
--- a/arch/sh/drivers/pci/pci-sh5.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
- *
- * Definitions for the SH5 PCI hardware.
- */
-#ifndef __PCI_SH5_H
-#define __PCI_SH5_H
-
-/* Product ID */
-#define PCISH5_PID 0x350d
-
-/* vendor ID */
-#define PCISH5_VID 0x1054
-
-/* Configuration types */
-#define ST_TYPE0 0x00 /* Configuration cycle type 0 */
-#define ST_TYPE1 0x01 /* Configuration cycle type 1 */
-
-/* VCR data */
-#define PCISH5_VCR_STATUS 0x00
-#define PCISH5_VCR_VERSION 0x08
-
-/*
-** ICR register offsets and bits
-*/
-#define PCISH5_ICR_CR 0x100 /* PCI control register values */
-#define CR_PBAM (1<<12)
-#define CR_PFCS (1<<11)
-#define CR_FTO (1<<10)
-#define CR_PFE (1<<9)
-#define CR_TBS (1<<8)
-#define CR_SPUE (1<<7)
-#define CR_BMAM (1<<6)
-#define CR_HOST (1<<5)
-#define CR_CLKEN (1<<4)
-#define CR_SOCS (1<<3)
-#define CR_IOCS (1<<2)
-#define CR_RSTCTL (1<<1)
-#define CR_CFINT (1<<0)
-#define CR_LOCK_MASK 0xa5000000
-
-#define PCISH5_ICR_INT 0x114 /* Interrupt registert values */
-#define INT_MADIM (1<<2)
-
-#define PCISH5_ICR_LSR0 0X104 /* Local space register values */
-#define PCISH5_ICR_LSR1 0X108 /* Local space register values */
-#define PCISH5_ICR_LAR0 0x10c /* Local address register values */
-#define PCISH5_ICR_LAR1 0x110 /* Local address register values */
-#define PCISH5_ICR_INTM 0x118 /* Interrupt mask register values */
-#define PCISH5_ICR_AIR 0x11c /* Interrupt error address information register values */
-#define PCISH5_ICR_CIR 0x120 /* Interrupt error command information register values */
-#define PCISH5_ICR_AINT 0x130 /* Interrupt error arbiter interrupt register values */
-#define PCISH5_ICR_AINTM 0x134 /* Interrupt error arbiter interrupt mask register values */
-#define PCISH5_ICR_BMIR 0x138 /* Interrupt error info register of bus master values */
-#define PCISH5_ICR_PAR 0x1c0 /* Pio address register values */
-#define PCISH5_ICR_MBR 0x1c4 /* Memory space bank register values */
-#define PCISH5_ICR_IOBR 0x1c8 /* I/O space bank register values */
-#define PCISH5_ICR_PINT 0x1cc /* power management interrupt register values */
-#define PCISH5_ICR_PINTM 0x1d0 /* power management interrupt mask register values */
-#define PCISH5_ICR_MBMR 0x1d8 /* memory space bank mask register values */
-#define PCISH5_ICR_IOBMR 0x1dc /* I/O space bank mask register values */
-#define PCISH5_ICR_CSCR0 0x210 /* PCI cache snoop control register 0 */
-#define PCISH5_ICR_CSCR1 0x214 /* PCI cache snoop control register 1 */
-#define PCISH5_ICR_PDR 0x220 /* Pio data register values */
-
-/* These are configs space registers */
-#define PCISH5_ICR_CSR_VID 0x000 /* Vendor id */
-#define PCISH5_ICR_CSR_DID 0x002 /* Device id */
-#define PCISH5_ICR_CSR_CMD 0x004 /* Command register */
-#define PCISH5_ICR_CSR_STATUS 0x006 /* Stautus */
-#define PCISH5_ICR_CSR_IBAR0 0x010 /* I/O base address register */
-#define PCISH5_ICR_CSR_MBAR0 0x014 /* First Memory base address register */
-#define PCISH5_ICR_CSR_MBAR1 0x018 /* Second Memory base address register */
-
-/* Base address of registers */
-#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000)
-#define SH5PCI_IO_BASE (PHYS_PCI_BLOCK + 0x00800000)
-/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG) */
-
-extern unsigned long pcicr_virt;
-/* Register selection macro */
-#define PCISH5_ICR_REG(x) ( pcicr_virt + (PCISH5_ICR_##x))
-/* #define PCISH5_VCR_REG(x) ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
-
-/* Write I/O functions */
-#define SH5PCI_WRITE(reg,val) __raw_writel((u32)(val),PCISH5_ICR_REG(reg))
-#define SH5PCI_WRITE_SHORT(reg,val) __raw_writew((u16)(val),PCISH5_ICR_REG(reg))
-#define SH5PCI_WRITE_BYTE(reg,val) __raw_writeb((u8)(val),PCISH5_ICR_REG(reg))
-
-/* Read I/O functions */
-#define SH5PCI_READ(reg) __raw_readl(PCISH5_ICR_REG(reg))
-#define SH5PCI_READ_SHORT(reg) __raw_readw(PCISH5_ICR_REG(reg))
-#define SH5PCI_READ_BYTE(reg) __raw_readb(PCISH5_ICR_REG(reg))
-
-/* Set PCI config bits */
-#define SET_CONFIG_BITS(bus,devfn,where) ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
-
-/* Set PCI command register */
-#define CONFIG_CMD(bus, devfn, where) SET_CONFIG_BITS(bus->number,devfn,where)
-
-/* Size converters */
-#define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18)
-#define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18)
-
-extern struct pci_ops sh5_pci_ops;
-
-#endif /* __PCI_SH5_H */
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
index 66faae19d254..0d58a0159aa6 100644
--- a/arch/sh/include/asm/barrier.h
+++ b/arch/sh/include/asm/barrier.h
@@ -6,7 +6,7 @@
#ifndef __ASM_SH_BARRIER_H
#define __ASM_SH_BARRIER_H
-#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
+#if defined(CONFIG_CPU_SH4A)
#include <asm/cache_insns.h>
#endif
@@ -24,7 +24,7 @@
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
-#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
+#if defined(CONFIG_CPU_SH4A)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() mb()
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index 8c3578288db5..445dd14c448a 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -26,7 +26,6 @@
#include <asm-generic/bitops/non-atomic.h>
#endif
-#ifdef CONFIG_SUPERH32
static inline unsigned long ffz(unsigned long word)
{
unsigned long result;
@@ -60,31 +59,6 @@ static inline unsigned long __ffs(unsigned long word)
: "t");
return result;
}
-#else
-static inline unsigned long ffz(unsigned long word)
-{
- unsigned long result, __d2, __d3;
-
- __asm__("gettr tr0, %2\n\t"
- "pta $+32, tr0\n\t"
- "andi %1, 1, %3\n\t"
- "beq %3, r63, tr0\n\t"
- "pta $+4, tr0\n"
- "0:\n\t"
- "shlri.l %1, 1, %1\n\t"
- "addi %0, 1, %0\n\t"
- "andi %1, 1, %3\n\t"
- "beqi %3, 1, tr0\n"
- "1:\n\t"
- "ptabs %2, tr0\n\t"
- : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
- : "0" (0L), "1" (word));
-
- return result;
-}
-
-#include <asm-generic/bitops/__ffs.h>
-#endif
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffs.h>
diff --git a/arch/sh/include/asm/bl_bit.h b/arch/sh/include/asm/bl_bit.h
index 7e3d81691ad5..5d04f2c62563 100644
--- a/arch/sh/include/asm/bl_bit.h
+++ b/arch/sh/include/asm/bl_bit.h
@@ -1,11 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_BL_BIT_H
-#define __ASM_SH_BL_BIT_H
-
-#ifdef CONFIG_SUPERH32
-# include <asm/bl_bit_32.h>
-#else
-# include <asm/bl_bit_64.h>
-#endif
-
-#endif /* __ASM_SH_BL_BIT_H */
+#include <asm/bl_bit_32.h>
diff --git a/arch/sh/include/asm/bl_bit_64.h b/arch/sh/include/asm/bl_bit_64.h
deleted file mode 100644
index aac9780fe864..000000000000
--- a/arch/sh/include/asm/bl_bit_64.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#ifndef __ASM_SH_BL_BIT_64_H
-#define __ASM_SH_BL_BIT_64_H
-
-#include <asm/processor.h>
-
-#define SR_BL_LL 0x0000000010000000LL
-
-static inline void set_bl_bit(void)
-{
- unsigned long long __dummy0, __dummy1 = SR_BL_LL;
-
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "or %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy0)
- : "r" (__dummy1));
-
-}
-
-static inline void clear_bl_bit(void)
-{
- unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
-
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "and %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy0)
- : "r" (__dummy1));
-}
-
-#endif /* __ASM_SH_BL_BIT_64_H */
diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h
index 030df56bfdb2..fe52abb69cea 100644
--- a/arch/sh/include/asm/bugs.h
+++ b/arch/sh/include/asm/bugs.h
@@ -53,10 +53,6 @@ static void __init check_bugs(void)
*p++ = 's';
*p++ = 'p';
break;
- case CPU_FAMILY_SH5:
- *p++ = '6';
- *p++ = '4';
- break;
case CPU_FAMILY_UNKNOWN:
/*
* Specifically use CPU_FAMILY_UNKNOWN rather than
diff --git a/arch/sh/include/asm/cache_insns.h b/arch/sh/include/asm/cache_insns.h
index c5a4acdc53f9..d7edd5297bd0 100644
--- a/arch/sh/include/asm/cache_insns.h
+++ b/arch/sh/include/asm/cache_insns.h
@@ -1,12 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CACHE_INSNS_H
-#define __ASM_SH_CACHE_INSNS_H
-
-
-#ifdef CONFIG_SUPERH32
-# include <asm/cache_insns_32.h>
-#else
-# include <asm/cache_insns_64.h>
-#endif
-
-#endif /* __ASM_SH_CACHE_INSNS_H */
+#include <asm/cache_insns_32.h>
diff --git a/arch/sh/include/asm/cache_insns_64.h b/arch/sh/include/asm/cache_insns_64.h
deleted file mode 100644
index ed682b987b0d..000000000000
--- a/arch/sh/include/asm/cache_insns_64.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#ifndef __ASM_SH_CACHE_INSNS_64_H
-#define __ASM_SH_CACHE_INSNS_64_H
-
-#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
-#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
-#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
-#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
-
-static inline reg_size_t register_align(void *val)
-{
- return (unsigned long long)(signed long long)(signed long)val;
-}
-
-#endif /* __ASM_SH_CACHE_INSNS_64_H */
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index b932e42ef028..fe7400079b97 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -46,6 +46,7 @@ extern void flush_cache_range(struct vm_area_struct *vma,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *page);
extern void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_user_range flush_icache_range
extern void flush_icache_page(struct vm_area_struct *vma,
struct page *page);
extern void flush_cache_sigtramp(unsigned long address);
diff --git a/arch/sh/include/asm/checksum.h b/arch/sh/include/asm/checksum.h
index a460a108969d..00e39dd0d146 100644
--- a/arch/sh/include/asm/checksum.h
+++ b/arch/sh/include/asm/checksum.h
@@ -1,6 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_SUPERH32
-# include <asm/checksum_32.h>
-#else
-# include <asm-generic/checksum.h>
-#endif
+#include <asm/checksum_32.h>
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index 5ec8db1ddc20..7661fb5d548a 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -133,28 +133,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
#define ELF_PLATFORM (utsname()->machine)
-#ifdef __SH5__
-#define ELF_PLAT_INIT(_r, load_addr) \
- do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
- _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
- _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
- _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
- _r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
- _r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
- _r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
- _r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
- _r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
- _r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
- _r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
- _r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
- _r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
- _r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
- _r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
- _r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
- _r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
- _r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
- _r->sr = SR_FD | SR_MMU; } while (0)
-#else
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
@@ -182,7 +160,6 @@ do { \
_r->regs[14] = 0; \
_r->sr = SR_FD; \
} while (0)
-#endif
#define SET_PERSONALITY(ex) \
set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
diff --git a/arch/sh/include/asm/extable.h b/arch/sh/include/asm/extable.h
index ed46f8bebb9f..5658d2bae372 100644
--- a/arch/sh/include/asm/extable.h
+++ b/arch/sh/include/asm/extable.h
@@ -4,8 +4,4 @@
#include <asm-generic/extable.h>
-#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
-#define ARCH_HAS_SEARCH_EXTABLE
-#endif
-
#endif
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h
index e30348c58073..f38adc189b83 100644
--- a/arch/sh/include/asm/fixmap.h
+++ b/arch/sh/include/asm/fixmap.h
@@ -83,11 +83,7 @@ extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags);
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
-#ifdef CONFIG_SUPERH32
#define FIXADDR_TOP (P4SEG - PAGE_SIZE)
-#else
-#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
-#endif
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 39c9ead489e5..26f0f9b4658b 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -17,7 +17,7 @@
#include <asm/cache.h>
#include <asm/addrspace.h>
#include <asm/machvec.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm-generic/iomap.h>
#ifdef __KERNEL__
@@ -115,12 +115,8 @@ static inline void pfx##reads##bwlq(volatile void __iomem *mem, \
__BUILD_MEMORY_STRING(__raw_, b, u8)
__BUILD_MEMORY_STRING(__raw_, w, u16)
-#ifdef CONFIG_SUPERH32
void __raw_writesl(void __iomem *addr, const void *data, int longlen);
void __raw_readsl(const void __iomem *addr, void *data, int longlen);
-#else
-__BUILD_MEMORY_STRING(__raw_, l, u32)
-#endif
__BUILD_MEMORY_STRING(__raw_, q, u64)
@@ -328,7 +324,7 @@ __ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
#else
#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
-#define iounmap(addr) do { } while (0)
+static inline void iounmap(void __iomem *addr) {}
#endif /* CONFIG_MMU */
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
diff --git a/arch/sh/include/asm/io_noioport.h b/arch/sh/include/asm/io_noioport.h
index 90d6109f1622..f7938fe0f911 100644
--- a/arch/sh/include/asm/io_noioport.h
+++ b/arch/sh/include/asm/io_noioport.h
@@ -53,12 +53,34 @@ static inline void ioport_unmap(void __iomem *addr)
#define outw_p(x, addr) outw((x), (addr))
#define outl_p(x, addr) outl((x), (addr))
-#define insb(a, b, c) BUG()
-#define insw(a, b, c) BUG()
-#define insl(a, b, c) BUG()
+static inline void insb(unsigned long port, void *dst, unsigned long count)
+{
+ BUG();
+}
+
+static inline void insw(unsigned long port, void *dst, unsigned long count)
+{
+ BUG();
+}
+
+static inline void insl(unsigned long port, void *dst, unsigned long count)
+{
+ BUG();
+}
-#define outsb(a, b, c) BUG()
-#define outsw(a, b, c) BUG()
-#define outsl(a, b, c) BUG()
+static inline void outsb(unsigned long port, const void *src, unsigned long count)
+{
+ BUG();
+}
+
+static inline void outsw(unsigned long port, const void *src, unsigned long count)
+{
+ BUG();
+}
+
+static inline void outsl(unsigned long port, const void *src, unsigned long count)
+{
+ BUG();
+}
#endif /* __ASM_SH_IO_NOIOPORT_H */
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index 8065a3222e19..6d44c32ef047 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -66,8 +66,5 @@ extern void irq_finish(unsigned int irq);
#endif
#include <asm-generic/irq.h>
-#ifdef CONFIG_CPU_SH5
-#include <cpu/irq.h>
-#endif
#endif /* __ASM_SH_IRQ_H */
diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h
index 5212f5fcd752..960545306afa 100644
--- a/arch/sh/include/asm/kdebug.h
+++ b/arch/sh/include/asm/kdebug.h
@@ -12,7 +12,9 @@ enum die_val {
};
/* arch/sh/kernel/dumpstack.c */
-extern void printk_address(unsigned long address, int reliable);
-extern void dump_mem(const char *str, unsigned long bottom, unsigned long top);
+extern void printk_address(unsigned long address, int reliable,
+ const char *loglvl);
+extern void dump_mem(const char *str, const char *loglvl,
+ unsigned long bottom, unsigned long top);
#endif /* __ASM_SH_KDEBUG_H */
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index 2d09650093c7..48e67d544d53 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -48,11 +48,7 @@
*/
#define MMU_VPN_MASK 0xfffff000
-#if defined(CONFIG_SUPERH32)
#include <asm/mmu_context_32.h>
-#else
-#include <asm/mmu_context_64.h>
-#endif
/*
* Get MMU context if needed.
@@ -74,14 +70,6 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
*/
local_flush_tlb_all();
-#ifdef CONFIG_SUPERH64
- /*
- * The SH-5 cache uses the ASIDs, requiring both the I and D
- * cache to be flushed when the ASID is exhausted. Weak.
- */
- flush_cache_all();
-#endif
-
/*
* Fix version; Note that we avoid version #0
* to distinguish NO_CONTEXT.
diff --git a/arch/sh/include/asm/mmu_context_64.h b/arch/sh/include/asm/mmu_context_64.h
deleted file mode 100644
index bacafe0b887d..000000000000
--- a/arch/sh/include/asm/mmu_context_64.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_MMU_CONTEXT_64_H
-#define __ASM_SH_MMU_CONTEXT_64_H
-
-/*
- * sh64-specific mmu_context interface.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2007 Paul Mundt
- */
-#include <cpu/registers.h>
-#include <asm/cacheflush.h>
-
-#define SR_ASID_MASK 0xffffffffff00ffffULL
-#define SR_ASID_SHIFT 16
-
-/*
- * Destroy context related info for an mm_struct that is about
- * to be put to rest.
- */
-static inline void destroy_context(struct mm_struct *mm)
-{
- /* Well, at least free TLB entries */
- flush_tlb_mm(mm);
-}
-
-static inline unsigned long get_asid(void)
-{
- unsigned long long sr;
-
- asm volatile ("getcon " __SR ", %0\n\t"
- : "=r" (sr));
-
- sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
- return (unsigned long) sr;
-}
-
-/* Set ASID into SR */
-static inline void set_asid(unsigned long asid)
-{
- unsigned long long sr, pc;
-
- asm volatile ("getcon " __SR ", %0" : "=r" (sr));
-
- sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
-
- /*
- * It is possible that this function may be inlined and so to avoid
- * the assembler reporting duplicate symbols we make use of the
- * gas trick of generating symbols using numerics and forward
- * reference.
- */
- asm volatile ("movi 1, %1\n\t"
- "shlli %1, 28, %1\n\t"
- "or %0, %1, %1\n\t"
- "putcon %1, " __SR "\n\t"
- "putcon %0, " __SSR "\n\t"
- "movi 1f, %1\n\t"
- "ori %1, 1 , %1\n\t"
- "putcon %1, " __SPC "\n\t"
- "rte\n"
- "1:\n\t"
- : "=r" (sr), "=r" (pc) : "0" (sr));
-}
-
-/* arch/sh/kernel/cpu/sh5/entry.S */
-extern unsigned long switch_and_save_asid(unsigned long new_asid);
-
-/* No spare register to twiddle, so use a software cache */
-extern pgd_t *mmu_pdtp_cache;
-
-#define set_TTB(pgd) (mmu_pdtp_cache = (pgd))
-#define get_TTB() (mmu_pdtp_cache)
-
-#endif /* __ASM_SH_MMU_CONTEXT_64_H */
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index ea8d68f58e39..eca5daa43b93 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -35,8 +35,6 @@
#define HPAGE_SHIFT 22
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
#define HPAGE_SHIFT 26
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
-#define HPAGE_SHIFT 29
#endif
#ifdef CONFIG_HUGETLB_PAGE
@@ -82,18 +80,12 @@ typedef struct { unsigned long long pgd; } pgd_t;
((x).pte_low | ((unsigned long long)(x).pte_high << 32))
#define __pte(x) \
({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
-#elif defined(CONFIG_SUPERH32)
+#else
typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
#define __pte(x) ((pte_t) { (x) } )
-#else
-typedef struct { unsigned long long pte_low; } pte_t;
-typedef struct { unsigned long long pgprot; } pgprot_t;
-typedef struct { unsigned long pgd; } pgd_t;
-#define pte_val(x) ((x).pte_low)
-#define __pte(x) ((pte_t) { (x) } )
#endif
#define pgd_val(x) ((x).pgd)
@@ -191,15 +183,4 @@ typedef struct page *pgtable_t;
*/
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-#ifdef CONFIG_SUPERH64
-/*
- * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
- * happily generate {ld/st}.q pairs, requiring us to have 8-byte
- * alignment to avoid traps. The kmalloc alignment is guaranteed by
- * virtue of L1_CACHE_BYTES, requiring this to only be special cased
- * for slab caches.
- */
-#define ARCH_SLAB_MINALIGN 8
-#endif
-
#endif /* __ASM_SH_PAGE_H */
diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h
index bf1eb51c3ee5..08bff93927ff 100644
--- a/arch/sh/include/asm/pgtable-2level.h
+++ b/arch/sh/include/asm/pgtable-2level.h
@@ -2,7 +2,6 @@
#ifndef __ASM_SH_PGTABLE_2LEVEL_H
#define __ASM_SH_PGTABLE_2LEVEL_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/*
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 779260b721ca..82d74472dfcd 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -2,7 +2,6 @@
#ifndef __ASM_SH_PGTABLE_3LEVEL_H
#define __ASM_SH_PGTABLE_3LEVEL_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
/*
@@ -40,13 +39,6 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
/* only used by the stubbed out hugetlb gup code, should never be called */
#define pud_page(pud) NULL
-
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
-}
-
#define pud_none(x) (!pud_val(x))
#define pud_present(x) (pud_val(x))
#define pud_clear(xp) do { set_pud(xp, __pud(0)); } while (0)
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index cbd0f3c55a0c..27751e9470df 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -76,18 +76,10 @@ static inline unsigned long phys_addr_mask(void)
#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
-#ifdef CONFIG_SUPERH32
#define VMALLOC_START (P3SEG)
-#else
-#define VMALLOC_START (0xf0000000)
-#endif
#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
-#if defined(CONFIG_SUPERH32)
#include <asm/pgtable_32.h>
-#else
-#include <asm/pgtable_64.h>
-#endif
/*
* SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
@@ -159,15 +151,6 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
prot |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
return __pte_access_permitted(pte, prot);
}
-#elif defined(CONFIG_SUPERH64)
-static inline bool pte_access_permitted(pte_t pte, bool write)
-{
- u64 prot = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
-
- if (write)
- prot |= _PAGE_WRITE;
- return __pte_access_permitted(pte, prot);
-}
#else
static inline bool pte_access_permitted(pte_t pte, bool write)
{
@@ -185,6 +168,4 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#include <asm-generic/pgtable.h>
-
#endif /* __ASM_SH_PGTABLE_H */
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index 29274f0e428e..41be43e99cff 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -401,28 +401,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
return pte;
}
-#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd))
-#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
-
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-#define __pgd_offset(address) pgd_index(address)
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define __pte_offset(address) pte_index(address)
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)pmd_val(pmd);
+}
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_unmap(pte) do { } while (0)
+#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
#ifdef CONFIG_X2TLB
#define pte_ERROR(e) \
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h
deleted file mode 100644
index 1778bc5971e7..000000000000
--- a/arch/sh/include/asm/pgtable_64.h
+++ /dev/null
@@ -1,307 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_PGTABLE_64_H
-#define __ASM_SH_PGTABLE_64_H
-
-/*
- * include/asm-sh/pgtable_64.h
- *
- * This file contains the functions and defines necessary to modify and use
- * the SuperH page table tree.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- * Copyright (C) 2003, 2004 Richard Curnow
- */
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-
-/*
- * Error outputs.
- */
-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/*
- * Table setting routines. Used within arch/mm only.
- */
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
-
-static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
-{
- unsigned long long x = ((unsigned long long) pteval.pte_low);
- unsigned long long *xp = (unsigned long long *) pteptr;
- /*
- * Sign-extend based on NPHYS.
- */
- *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
-}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
-/*
- * PGD defines. Top level.
- */
-
-/* To find an entry in a generic PGD. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define __pgd_offset(address) pgd_index(address)
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/* To find an entry in a kernel PGD. */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/*
- * PMD level access routines. Same notes as above.
- */
-#define _PMD_EMPTY 0x0
-/* Either the PMD is empty or present, it's not paged out */
-#define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT)
-#define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
-#define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)
-#define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-
-#define pmd_page_vaddr(pmd_entry) \
- ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
-
-#define pmd_page(pmd) \
- (virt_to_page(pmd_val(pmd)))
-
-/* PMD to PTE dereferencing */
-#define pte_index(address) \
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define __pte_offset(address) pte_index(address)
-
-#define pte_offset_kernel(dir, addr) \
- ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
-#define pte_unmap(pte) do { } while (0)
-
-#ifndef __ASSEMBLY__
-/*
- * PTEL coherent flags.
- * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
- */
-/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
- positions, to avoid expensive bit shuffling on every refill. The remaining
- bits are used for s/w purposes and masked out on each refill.
-
- Note, the PTE slots are used to hold data of type swp_entry_t when a page is
- swapped out. Only the _PAGE_PRESENT flag is significant when the page is
- swapped out, and it must be placed so that it doesn't overlap either the
- type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type
- at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This
- scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit
- [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
- into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */
-#define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */
-#define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
-#define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
-#define _PAGE_PRESENT 0x004 /* software: page referenced */
-#define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
-#define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
-#define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
-#define _PAGE_READ 0x040 /* PR0-bit : read access allowed */
-#define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */
-#define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */
-#define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */
-#define _PAGE_DIRTY 0x400 /* software: page accessed in write */
-#define _PAGE_ACCESSED 0x800 /* software: page referenced */
-
-/* Wrapper for extended mode pgprot twiddling */
-#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
-
-/*
- * We can use the sign-extended bits in the PTEL to get 32 bits of
- * software flags. This works for now because no implementations uses
- * anything above the PPN field.
- */
-#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
-#define _PAGE_SPECIAL _PAGE_EXT(0x002)
-
-#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_SHARED | \
- _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
-
-/* Mask which drops software flags */
-#define _PAGE_FLAGS_HARDWARE_MASK (NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
-
-/*
- * HugeTLB support
- */
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-#define _PAGE_SZHUGE (_PAGE_SIZE0)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
-#define _PAGE_SZHUGE (_PAGE_SIZE1)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
-#define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1)
-#endif
-
-/*
- * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
- * to make pte_mkhuge() happy.
- */
-#ifndef _PAGE_SZHUGE
-# define _PAGE_SZHUGE (0)
-#endif
-
-/*
- * Default flags for a Kernel page.
- * This is fundametally also SHARED because the main use of this define
- * (other than for PGD/PMD entries) is for the VMALLOC pool which is
- * contextless.
- *
- * _PAGE_EXECUTE is required for modules
- *
- */
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
- _PAGE_EXECUTE | \
- _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SHARED)
-
-/* Default flags for a User page */
-#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
-
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SPECIAL)
-
-/*
- * We have full permissions (Read/Write/Execute/Shared).
- */
-#define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_CACHABLE | _PAGE_ACCESSED)
-
-#define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
- _PAGE_SHARED)
-#define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
-
-/*
- * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
- * protection mode for the stack.
- */
-#define PAGE_COPY PAGE_EXECREAD
-
-#define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ)
-#define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE)
-#define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \
- _PAGE_WRITE | _PAGE_EXECUTE)
-#define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
-
-#define PAGE_KERNEL_NOCACHE \
- __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
- _PAGE_EXECUTE | _PAGE_ACCESSED | \
- _PAGE_DIRTY | _PAGE_SHARED)
-
-/* Make it a device mapping for maximum safety (e.g. for mapping device
- registers into user-space via /dev/map). */
-#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
-#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
-
-/*
- * PTE level access routines.
- *
- * Note1:
- * It's the tree walk leaf. This is physical address to be stored.
- *
- * Note 2:
- * Regarding the choice of _PTE_EMPTY:
-
- We must choose a bit pattern that cannot be valid, whether or not the page
- is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped
- out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
- left for us to select. If we force bit[7]==0 when swapped out, we could use
- the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if
- we force bit[7]==1 when swapped out, we can use all zeroes to indicate
- empty. This is convenient, because the page tables get cleared to zero
- when they are allocated.
-
- */
-#define _PTE_EMPTY 0x0
-#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
-#define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
-#define pte_none(x) (pte_val(x) == _PTE_EMPTY)
-
-/*
- * Some definitions to translate between mem_map, PTEs, and page
- * addresses:
- */
-
-/*
- * Given a PTE, return the index of the mem_map[] entry corresponding
- * to the page frame the PTE. Get the absolute physical address, make
- * a relative physical address and translate it to an index.
- */
-#define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \
- __MEMORY_START) >> PAGE_SHIFT)
-
-/*
- * Given a PTE, return the "struct page *".
- */
-#define pte_page(x) (mem_map + pte_pagenr(x))
-
-/*
- * Return number of (down rounded) MB corresponding to x pages.
- */
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
-
-/*
- * The following have defined behavior only work if pte_present() is true.
- */
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
-static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
-
-static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
-static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
-static inline pte_t pte_mkspecial(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
-
-/*
- * Conversion functions: convert a page and protection to a page entry.
- *
- * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
- */
-#define mk_pte(page,pgprot) \
-({ \
- pte_t __pte; \
- \
- set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
- __MEMORY_START | pgprot_val((pgprot)))); \
- __pte; \
-})
-
-/*
- * This takes a (absolute) physical page address that is used
- * by the remapping functions
- */
-#define mk_pte_phys(physpage, pgprot) \
-({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
-
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
-
-/* Encode and decode a swap entry */
-#define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c))
-#define __swp_offset(x) ((x).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-#endif /* !__ASSEMBLY__ */
-
-#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-
-#endif /* __ASM_SH_PGTABLE_64_H */
diff --git a/arch/sh/include/asm/posix_types.h b/arch/sh/include/asm/posix_types.h
index 0d670fd94fe7..f8982b757c33 100644
--- a/arch/sh/include/asm/posix_types.h
+++ b/arch/sh/include/asm/posix_types.h
@@ -1,6 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-# ifdef CONFIG_SUPERH32
-# include <asm/posix_types_32.h>
-# else
-# include <asm/posix_types_64.h>
-# endif
+#include <asm/posix_types_32.h>
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 6fbf8c80e498..3820d698846e 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -39,9 +39,6 @@ enum cpu_type {
/* SH4AL-DSP types */
CPU_SH7343, CPU_SH7722, CPU_SH7366, CPU_SH7372,
- /* SH-5 types */
- CPU_SH5_101, CPU_SH5_103,
-
/* Unknown subtype */
CPU_SH_NONE
};
@@ -53,7 +50,6 @@ enum cpu_family {
CPU_FAMILY_SH4,
CPU_FAMILY_SH4A,
CPU_FAMILY_SH4AL_DSP,
- CPU_FAMILY_SH5,
CPU_FAMILY_UNKNOWN,
};
@@ -167,18 +163,12 @@ int vsyscall_init(void);
*/
#ifdef CONFIG_CPU_SH2A
extern unsigned int instruction_size(unsigned int insn);
-#elif defined(CONFIG_SUPERH32)
-#define instruction_size(insn) (2)
#else
-#define instruction_size(insn) (4)
+#define instruction_size(insn) (2)
#endif
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_SUPERH32
-# include <asm/processor_32.h>
-#else
-# include <asm/processor_64.h>
-#endif
+#include <asm/processor_32.h>
#endif /* __ASM_SH_PROCESSOR_H */
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 0e0ecc0132e3..d44409413418 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -171,7 +171,7 @@ static __inline__ void enable_fpu(void)
#define thread_saved_pc(tsk) (tsk->thread.pc)
void show_trace(struct task_struct *tsk, unsigned long *sp,
- struct pt_regs *regs);
+ struct pt_regs *regs, const char *loglvl);
#ifdef CONFIG_DUMP_CODE
void show_code(struct pt_regs *regs);
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
deleted file mode 100644
index 53efc9f51ef1..000000000000
--- a/arch/sh/include/asm/processor_64.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_PROCESSOR_64_H
-#define __ASM_SH_PROCESSOR_64_H
-
-/*
- * include/asm-sh/processor_64.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#ifndef __ASSEMBLY__
-
-#include <linux/compiler.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <cpu/registers.h>
-
-#endif
-
-/*
- * User space process size: 2GB - 4k.
- */
-#define TASK_SIZE 0x7ffff000UL
-
-#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX STACK_TOP
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
-
-/*
- * Bit of SR register
- *
- * FD-bit:
- * When it's set, it means the processor doesn't have right to use FPU,
- * and it results exception when the floating operation is executed.
- *
- * IMASK-bit:
- * Interrupt level mask
- *
- * STEP-bit:
- * Single step bit
- *
- */
-#if defined(CONFIG_SH64_SR_WATCH)
-#define SR_MMU 0x84000000
-#else
-#define SR_MMU 0x80000000
-#endif
-
-#define SR_IMASK 0x000000f0
-#define SR_FD 0x00008000
-#define SR_SSTEP 0x08000000
-
-#ifndef __ASSEMBLY__
-
-/*
- * FPU structure and data : require 8-byte alignment as we need to access it
- with fld.p, fst.p
- */
-
-struct sh_fpu_hard_struct {
- unsigned long fp_regs[64];
- unsigned int fpscr;
- /* long status; * software status information */
-};
-
-/* Dummy fpu emulator */
-struct sh_fpu_soft_struct {
- unsigned long fp_regs[64];
- unsigned int fpscr;
- unsigned char lookahead;
- unsigned long entry_pc;
-};
-
-union thread_xstate {
- struct sh_fpu_hard_struct hardfpu;
- struct sh_fpu_soft_struct softfpu;
- /*
- * The structure definitions only produce 32 bit alignment, yet we need
- * to access them using 64 bit load/store as well.
- */
- unsigned long long alignment_dummy;
-};
-
-struct thread_struct {
- unsigned long sp;
- unsigned long pc;
-
- /* Various thread flags, see SH_THREAD_xxx */
- unsigned long flags;
-
- /* This stores the address of the pt_regs built during a context
- switch, or of the register save area built for a kernel mode
- exception. It is used for backtracing the stack of a sleeping task
- or one that traps in kernel mode. */
- struct pt_regs *kregs;
- /* This stores the address of the pt_regs constructed on entry from
- user mode. It is a fixed value over the lifetime of a process, or
- NULL for a kernel thread. */
- struct pt_regs *uregs;
-
- unsigned long address;
- /* Hardware debugging registers may come here */
-
- /* floating point info */
- union thread_xstate *xstate;
-
- /*
- * fpu_counter contains the number of consecutive context switches
- * that the FPU is used. If this is over a threshold, the lazy fpu
- * saving becomes unlazy to save the trap. This is an unsigned char
- * so that after 256 times the counter wraps and the behavior turns
- * lazy again; this to deal with bursty apps that only use FPU for
- * a short time
- */
- unsigned char fpu_counter;
-};
-
-#define INIT_MMAP \
-{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
-
-#define INIT_THREAD { \
- .sp = sizeof(init_stack) + \
- (long) &init_stack, \
- .pc = 0, \
- .kregs = &fake_swapper_regs, \
- .uregs = NULL, \
- .address = 0, \
- .flags = 0, \
-}
-
-/*
- * Do necessary setup to start up a newly executed thread.
- */
-#define SR_USER (SR_MMU | SR_FD)
-
-#define start_thread(_regs, new_pc, new_sp) \
- _regs->sr = SR_USER; /* User mode. */ \
- _regs->pc = new_pc - 4; /* Compensate syscall exit */ \
- _regs->pc |= 1; /* Set SHmedia ! */ \
- _regs->regs[18] = 0; \
- _regs->regs[15] = new_sp
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-struct mm_struct;
-
-/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
-
-/*
- * FPU lazy state save handling.
- */
-
-static inline void disable_fpu(void)
-{
- unsigned long long __dummy;
-
- /* Set FD flag in SR */
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "or %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy)
- : "r" (SR_FD));
-}
-
-static inline void enable_fpu(void)
-{
- unsigned long long __dummy;
-
- /* Clear out FD flag in SR */
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "and %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy)
- : "r" (~SR_FD));
-}
-
-/* Round to nearest, no exceptions on inexact, overflow, underflow,
- zero-divide, invalid. Configure option for whether to flush denorms to
- zero, or except if a denorm is encountered. */
-#if defined(CONFIG_SH64_FPU_DENORM_FLUSH)
-#define FPSCR_INIT 0x00040000
-#else
-#define FPSCR_INIT 0x00000000
-#endif
-
-#ifdef CONFIG_SH_FPU
-/* Initialise the FP state of a task */
-void fpinit(struct sh_fpu_hard_struct *fpregs);
-#else
-#define fpinit(fpregs) do { } while (0)
-#endif
-
-extern struct task_struct *last_task_used_math;
-
-/*
- * Return saved PC of a blocked thread.
- */
-#define thread_saved_pc(tsk) (tsk->thread.pc)
-
-extern unsigned long get_wchan(struct task_struct *p);
-
-#define KSTK_EIP(tsk) ((tsk)->thread.pc)
-#define KSTK_ESP(tsk) ((tsk)->thread.sp)
-
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_SH_PROCESSOR_64_H */
diff --git a/arch/sh/include/asm/ptrace_64.h b/arch/sh/include/asm/ptrace_64.h
deleted file mode 100644
index 6ee08229b433..000000000000
--- a/arch/sh/include/asm/ptrace_64.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_PTRACE_64_H
-#define __ASM_SH_PTRACE_64_H
-
-#include <uapi/asm/ptrace_64.h>
-
-
-#define MAX_REG_OFFSET offsetof(struct pt_regs, tregs[7])
-static inline long regs_return_value(struct pt_regs *regs)
-{
- return regs->regs[3];
-}
-
-#endif /* __ASM_SH_PTRACE_64_H */
diff --git a/arch/sh/include/asm/string.h b/arch/sh/include/asm/string.h
index 84fc5ed9c5b3..0f6331ec28ed 100644
--- a/arch/sh/include/asm/string.h
+++ b/arch/sh/include/asm/string.h
@@ -1,6 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_SUPERH32
-# include <asm/string_32.h>
-#else
-# include <asm/string_64.h>
-#endif
+#include <asm/string_32.h>
diff --git a/arch/sh/include/asm/string_64.h b/arch/sh/include/asm/string_64.h
deleted file mode 100644
index d51d6150a4e2..000000000000
--- a/arch/sh/include/asm/string_64.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_STRING_64_H
-#define __ASM_SH_STRING_64_H
-
-#ifdef __KERNEL__
-
-#define __HAVE_ARCH_MEMSET
-extern void *memset(void *__s, int __c, size_t __count);
-
-#define __HAVE_ARCH_MEMCPY
-extern void *memcpy(void *dest, const void *src, size_t count);
-
-#define __HAVE_ARCH_STRLEN
-extern size_t strlen(const char *);
-
-#define __HAVE_ARCH_STRCPY
-extern char *strcpy(char *__dest, const char *__src);
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASM_SH_STRING_64_H */
diff --git a/arch/sh/include/asm/switch_to.h b/arch/sh/include/asm/switch_to.h
index 9eec80ab5aa2..bd139bcdeec1 100644
--- a/arch/sh/include/asm/switch_to.h
+++ b/arch/sh/include/asm/switch_to.h
@@ -4,13 +4,4 @@
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
-#ifndef __ASM_SH_SWITCH_TO_H
-#define __ASM_SH_SWITCH_TO_H
-
-#ifdef CONFIG_SUPERH32
-# include <asm/switch_to_32.h>
-#else
-# include <asm/switch_to_64.h>
-#endif
-
-#endif /* __ASM_SH_SWITCH_TO_H */
+#include <asm/switch_to_32.h>
diff --git a/arch/sh/include/asm/switch_to_64.h b/arch/sh/include/asm/switch_to_64.h
deleted file mode 100644
index 2dbf2311669f..000000000000
--- a/arch/sh/include/asm/switch_to_64.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#ifndef __ASM_SH_SWITCH_TO_64_H
-#define __ASM_SH_SWITCH_TO_64_H
-
-struct thread_struct;
-struct task_struct;
-
-/*
- * switch_to() should switch tasks to task nr n, first
- */
-struct task_struct *sh64_switch_to(struct task_struct *prev,
- struct thread_struct *prev_thread,
- struct task_struct *next,
- struct thread_struct *next_thread);
-
-#define switch_to(prev,next,last) \
-do { \
- if (last_task_used_math != next) { \
- struct pt_regs *regs = next->thread.uregs; \
- if (regs) regs->sr |= SR_FD; \
- } \
- last = sh64_switch_to(prev, &prev->thread, next, \
- &next->thread); \
-} while (0)
-
-
-#endif /* __ASM_SH_SWITCH_TO_64_H */
diff --git a/arch/sh/include/asm/syscall.h b/arch/sh/include/asm/syscall.h
index 90ba00002626..570699eb0e58 100644
--- a/arch/sh/include/asm/syscall.h
+++ b/arch/sh/include/asm/syscall.h
@@ -4,10 +4,6 @@
extern const unsigned long sys_call_table[];
-#ifdef CONFIG_SUPERH32
-# include <asm/syscall_32.h>
-#else
-# include <asm/syscall_64.h>
-#endif
+#include <asm/syscall_32.h>
#endif /* __ASM_SH_SYSCALL_H */
diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h
deleted file mode 100644
index 72efcbc76f91..000000000000
--- a/arch/sh/include/asm/syscall_64.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_SYSCALL_64_H
-#define __ASM_SH_SYSCALL_64_H
-
-#include <uapi/linux/audit.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <asm/ptrace.h>
-
-/* The system call number is given by the user in R9 */
-static inline long syscall_get_nr(struct task_struct *task,
- struct pt_regs *regs)
-{
- return (regs->syscall_nr >= 0) ? regs->regs[9] : -1L;
-}
-
-static inline void syscall_rollback(struct task_struct *task,
- struct pt_regs *regs)
-{
- /*
- * XXX: This needs some thought. On SH we don't
- * save away the original R9 value anywhere.
- */
-}
-
-static inline long syscall_get_error(struct task_struct *task,
- struct pt_regs *regs)
-{
- return IS_ERR_VALUE(regs->regs[9]) ? regs->regs[9] : 0;
-}
-
-static inline long syscall_get_return_value(struct task_struct *task,
- struct pt_regs *regs)
-{
- return regs->regs[9];
-}
-
-static inline void syscall_set_return_value(struct task_struct *task,
- struct pt_regs *regs,
- int error, long val)
-{
- if (error)
- regs->regs[9] = -error;
- else
- regs->regs[9] = val;
-}
-
-static inline void syscall_get_arguments(struct task_struct *task,
- struct pt_regs *regs,
- unsigned long *args)
-{
- memcpy(args, &regs->regs[2], 6 * sizeof(args[0]));
-}
-
-static inline void syscall_set_arguments(struct task_struct *task,
- struct pt_regs *regs,
- const unsigned long *args)
-{
- memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
-}
-
-static inline int syscall_get_arch(struct task_struct *task)
-{
- int arch = AUDIT_ARCH_SH;
-
-#ifdef CONFIG_64BIT
- arch |= __AUDIT_ARCH_64BIT;
-#endif
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
- arch |= __AUDIT_ARCH_LE;
-#endif
-
- return arch;
-}
-#endif /* __ASM_SH_SYSCALL_64_H */
diff --git a/arch/sh/include/asm/syscalls.h b/arch/sh/include/asm/syscalls.h
index 995ef046232c..387105316d28 100644
--- a/arch/sh/include/asm/syscalls.h
+++ b/arch/sh/include/asm/syscalls.h
@@ -2,8 +2,6 @@
#ifndef __ASM_SH_SYSCALLS_H
#define __ASM_SH_SYSCALLS_H
-#ifdef __KERNEL__
-
asmlinkage int old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
int fd, unsigned long off);
@@ -11,11 +9,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
-#ifdef CONFIG_SUPERH32
-# include <asm/syscalls_32.h>
-#else
-# include <asm/syscalls_64.h>
-#endif
+#include <asm/syscalls_32.h>
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_SYSCALLS_H */
diff --git a/arch/sh/include/asm/syscalls_64.h b/arch/sh/include/asm/syscalls_64.h
deleted file mode 100644
index df42656cebea..000000000000
--- a/arch/sh/include/asm/syscalls_64.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_SYSCALLS_64_H
-#define __ASM_SH_SYSCALLS_64_H
-
-#ifdef __KERNEL__
-
-#include <linux/compiler.h>
-#include <linux/linkage.h>
-#include <linux/types.h>
-
-struct pt_regs;
-
-/* Misc syscall related bits */
-asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs);
-asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
-
-#endif /* __KERNEL__ */
-#endif /* __ASM_SH_SYSCALLS_64_H */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index cf5c792bf70b..6404be69d5fa 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -70,9 +70,7 @@ register unsigned long current_stack_pointer asm("r15") __used;
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
-#if defined(CONFIG_SUPERH64)
- __asm__ __volatile__ ("getcon cr17, %0" : "=r" (ti));
-#elif defined(CONFIG_CPU_HAS_SR_RB)
+#if defined(CONFIG_CPU_HAS_SR_RB)
__asm__ __volatile__ ("stc r7_bank, %0" : "=r" (ti));
#else
unsigned long __dummy;
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index bc77f3dd4261..360f713d009b 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -2,10 +2,6 @@
#ifndef __ASM_SH_TLB_H
#define __ASM_SH_TLB_H
-#ifdef CONFIG_SUPERH64
-# include <asm/tlb_64.h>
-#endif
-
#ifndef __ASSEMBLY__
#include <linux/pagemap.h>
@@ -14,7 +10,7 @@
#include <asm-generic/tlb.h>
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
+#if defined(CONFIG_CPU_SH4)
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
extern void tlb_unwire_entry(void);
#else
diff --git a/arch/sh/include/asm/tlb_64.h b/arch/sh/include/asm/tlb_64.h
deleted file mode 100644
index 59fa0a23dad7..000000000000
--- a/arch/sh/include/asm/tlb_64.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * include/asm-sh/tlb_64.h
- *
- * Copyright (C) 2003 Paul Mundt
- */
-#ifndef __ASM_SH_TLB_64_H
-#define __ASM_SH_TLB_64_H
-
-/* ITLB defines */
-#define ITLB_FIXED 0x00000000 /* First fixed ITLB, see head.S */
-#define ITLB_LAST_VAR_UNRESTRICTED 0x000003F0 /* Last ITLB */
-
-/* DTLB defines */
-#define DTLB_FIXED 0x00800000 /* First fixed DTLB, see head.S */
-#define DTLB_LAST_VAR_UNRESTRICTED 0x008003F0 /* Last DTLB */
-
-#ifndef __ASSEMBLY__
-
-/**
- * for_each_dtlb_entry - Iterate over free (non-wired) DTLB entries
- *
- * @tlb: TLB entry
- */
-#define for_each_dtlb_entry(tlb) \
- for (tlb = cpu_data->dtlb.first; \
- tlb <= cpu_data->dtlb.last; \
- tlb += cpu_data->dtlb.step)
-
-/**
- * for_each_itlb_entry - Iterate over free (non-wired) ITLB entries
- *
- * @tlb: TLB entry
- */
-#define for_each_itlb_entry(tlb) \
- for (tlb = cpu_data->itlb.first; \
- tlb <= cpu_data->itlb.last; \
- tlb += cpu_data->itlb.step)
-
-/**
- * __flush_tlb_slot - Flushes TLB slot @slot.
- *
- * @slot: Address of TLB slot.
- */
-static inline void __flush_tlb_slot(unsigned long long slot)
-{
- __asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
-}
-
-#ifdef CONFIG_MMU
-/* arch/sh64/mm/tlb.c */
-int sh64_tlb_init(void);
-unsigned long long sh64_next_free_dtlb_entry(void);
-unsigned long long sh64_get_wired_dtlb_entry(void);
-int sh64_put_wired_dtlb_entry(unsigned long long entry);
-void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
- unsigned long asid, unsigned long paddr);
-void sh64_teardown_tlb_slot(unsigned long long config_addr);
-#else
-#define sh64_tlb_init() do { } while (0)
-#define sh64_next_free_dtlb_entry() (0)
-#define sh64_get_wired_dtlb_entry() (0)
-#define sh64_put_wired_dtlb_entry(entry) do { } while (0)
-#define sh64_setup_tlb_slot(conf, virt, asid, phys) do { } while (0)
-#define sh64_teardown_tlb_slot(addr) do { } while (0)
-#endif /* CONFIG_MMU */
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_SH_TLB_64_H */
diff --git a/arch/sh/include/asm/traps.h b/arch/sh/include/asm/traps.h
index 8844ed0c0fde..ba831bc7e08f 100644
--- a/arch/sh/include/asm/traps.h
+++ b/arch/sh/include/asm/traps.h
@@ -4,11 +4,7 @@
#include <linux/compiler.h>
-#ifdef CONFIG_SUPERH32
# include <asm/traps_32.h>
-#else
-# include <asm/traps_64.h>
-#endif
BUILD_TRAP_HANDLER(address_error);
BUILD_TRAP_HANDLER(debug);
diff --git a/arch/sh/include/asm/traps_64.h b/arch/sh/include/asm/traps_64.h
deleted file mode 100644
index f28db6dfbe45..000000000000
--- a/arch/sh/include/asm/traps_64.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#ifndef __ASM_SH_TRAPS_64_H
-#define __ASM_SH_TRAPS_64_H
-
-#include <cpu/registers.h>
-
-extern void phys_stext(void);
-
-#define lookup_exception_vector() \
-({ \
- unsigned long _vec; \
- \
- __asm__ __volatile__ ( \
- "getcon " __EXPEVT ", %0\n\t" \
- : "=r" (_vec) \
- ); \
- \
- _vec; \
-})
-
-static inline void trigger_address_error(void)
-{
- phys_stext();
-}
-
-#define BUILD_TRAP_HANDLER(name) \
-asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs)
-#define TRAP_HANDLER_DECL
-
-#endif /* __ASM_SH_TRAPS_64_H */
diff --git a/arch/sh/include/asm/types.h b/arch/sh/include/asm/types.h
index df96c511bb6e..68eb24ad2013 100644
--- a/arch/sh/include/asm/types.h
+++ b/arch/sh/include/asm/types.h
@@ -9,13 +9,8 @@
*/
#ifndef __ASSEMBLY__
-#ifdef CONFIG_SUPERH32
typedef u16 insn_size_t;
typedef u32 reg_size_t;
-#else
-typedef u32 insn_size_t;
-typedef u64 reg_size_t;
-#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_TYPES_H */
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 5fe751ad7582..73f3b48d4a34 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -96,11 +96,7 @@ struct __large_struct { unsigned long buf[100]; };
__pu_err; \
})
-#ifdef CONFIG_SUPERH32
# include <asm/uaccess_32.h>
-#else
-# include <asm/uaccess_64.h>
-#endif
extern long strncpy_from_user(char *dest, const char __user *src, long count);
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
deleted file mode 100644
index 0c19d02dc566..000000000000
--- a/arch/sh/include/asm/uaccess_64.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_UACCESS_64_H
-#define __ASM_SH_UACCESS_64_H
-
-/*
- * include/asm-sh/uaccess_64.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- *
- * User space memory access functions
- *
- * Copyright (C) 1999 Niibe Yutaka
- *
- * Based on:
- * MIPS implementation version 1.15 by
- * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
- * and i386 version.
- */
-
-#define __get_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- x = 0; \
- switch (size) { \
- case 1: \
- retval = __get_user_asm_b((void *)&x, \
- (long)ptr); \
- break; \
- case 2: \
- retval = __get_user_asm_w((void *)&x, \
- (long)ptr); \
- break; \
- case 4: \
- retval = __get_user_asm_l((void *)&x, \
- (long)ptr); \
- break; \
- case 8: \
- retval = __get_user_asm_q((void *)&x, \
- (long)ptr); \
- break; \
- default: \
- __get_user_unknown(); \
- break; \
- } \
-} while (0)
-
-extern long __get_user_asm_b(void *, long);
-extern long __get_user_asm_w(void *, long);
-extern long __get_user_asm_l(void *, long);
-extern long __get_user_asm_q(void *, long);
-extern void __get_user_unknown(void);
-
-#define __put_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: \
- retval = __put_user_asm_b((void *)&x, \
- (__force long)ptr); \
- break; \
- case 2: \
- retval = __put_user_asm_w((void *)&x, \
- (__force long)ptr); \
- break; \
- case 4: \
- retval = __put_user_asm_l((void *)&x, \
- (__force long)ptr); \
- break; \
- case 8: \
- retval = __put_user_asm_q((void *)&x, \
- (__force long)ptr); \
- break; \
- default: \
- __put_user_unknown(); \
- } \
-} while (0)
-
-extern long __put_user_asm_b(void *, long);
-extern long __put_user_asm_w(void *, long);
-extern long __put_user_asm_l(void *, long);
-extern long __put_user_asm_q(void *, long);
-extern void __put_user_unknown(void);
-
-#endif /* __ASM_SH_UACCESS_64_H */
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index 9c7d9d9999c6..d6e126250136 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -1,9 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-# ifdef CONFIG_SUPERH32
-# include <asm/unistd_32.h>
-# else
-# include <asm/unistd_64.h>
-# endif
+#include <asm/unistd_32.h>
#define NR_syscalls __NR_syscalls
diff --git a/arch/sh/include/asm/user.h b/arch/sh/include/asm/user.h
index e97f2efed527..7dfd3f6461e6 100644
--- a/arch/sh/include/asm/user.h
+++ b/arch/sh/include/asm/user.h
@@ -28,19 +28,12 @@
* to write an integer number of pages.
*/
-#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
-struct user_fpu_struct {
- unsigned long fp_regs[32];
- unsigned int fpscr;
-};
-#else
struct user_fpu_struct {
unsigned long fp_regs[16];
unsigned long xfp_regs[16];
unsigned long fpscr;
unsigned long fpul;
};
-#endif
struct user {
struct pt_regs regs; /* entire machine state */
diff --git a/arch/sh/include/asm/vermagic.h b/arch/sh/include/asm/vermagic.h
index 13d8eaa9188e..5b2057c39170 100644
--- a/arch/sh/include/asm/vermagic.h
+++ b/arch/sh/include/asm/vermagic.h
@@ -10,8 +10,6 @@
# define MODULE_PROC_FAMILY "SH3LE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4LE "
-# elif defined CONFIG_CPU_SH5
-# define MODULE_PROC_FAMILY "SH5LE "
# else
# error unknown processor family
# endif
@@ -22,8 +20,6 @@
# define MODULE_PROC_FAMILY "SH3BE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4BE "
-# elif defined CONFIG_CPU_SH5
-# define MODULE_PROC_FAMILY "SH5BE "
# else
# error unknown processor family
# endif
diff --git a/arch/sh/include/asm/vmlinux.lds.h b/arch/sh/include/asm/vmlinux.lds.h
index 992955685874..8d96c4f9b35b 100644
--- a/arch/sh/include/asm/vmlinux.lds.h
+++ b/arch/sh/include/asm/vmlinux.lds.h
@@ -15,12 +15,4 @@
#define DWARF_EH_FRAME
#endif
-#ifdef CONFIG_SUPERH64
-#define EXTRA_TEXT \
- *(.text64) \
- *(.text..SHmedia32)
-#else
-#define EXTRA_TEXT
-#endif
-
#endif /* __ASM_SH_VMLINUX_LDS_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/addrspace.h b/arch/sh/include/cpu-sh5/cpu/addrspace.h
deleted file mode 100644
index 6dd1e72f31b2..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/addrspace.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_ADDRSPACE_H
-#define __ASM_SH_CPU_SH5_ADDRSPACE_H
-
-#define PHYS_PERIPHERAL_BLOCK 0x09000000
-#define PHYS_DMAC_BLOCK 0x0e000000
-#define PHYS_PCI_BLOCK 0x60000000
-#define PHYS_EMI_BLOCK 0xff000000
-
-/* No segmentation.. */
-
-#endif /* __ASM_SH_CPU_SH5_ADDRSPACE_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/cache.h b/arch/sh/include/cpu-sh5/cpu/cache.h
deleted file mode 100644
index ef49538f386f..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/cache.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_CACHE_H
-#define __ASM_SH_CPU_SH5_CACHE_H
-
-/*
- * include/asm-sh/cpu-sh5/cache.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- */
-
-#define L1_CACHE_SHIFT 5
-
-/* Valid and Dirty bits */
-#define SH_CACHE_VALID (1LL<<0)
-#define SH_CACHE_UPDATED (1LL<<57)
-
-/* Unimplemented compat bits.. */
-#define SH_CACHE_COMBINED 0
-#define SH_CACHE_ASSOC 0
-
-/* Cache flags */
-#define SH_CACHE_MODE_WT (1LL<<0)
-#define SH_CACHE_MODE_WB (1LL<<1)
-
-/*
- * Control Registers.
- */
-#define ICCR_BASE 0x01600000 /* Instruction Cache Control Register */
-#define ICCR_REG0 0 /* Register 0 offset */
-#define ICCR_REG1 1 /* Register 1 offset */
-#define ICCR0 ICCR_BASE+ICCR_REG0
-#define ICCR1 ICCR_BASE+ICCR_REG1
-
-#define ICCR0_OFF 0x0 /* Set ICACHE off */
-#define ICCR0_ON 0x1 /* Set ICACHE on */
-#define ICCR0_ICI 0x2 /* Invalidate all in IC */
-
-#define ICCR1_NOLOCK 0x0 /* Set No Locking */
-
-#define OCCR_BASE 0x01E00000 /* Operand Cache Control Register */
-#define OCCR_REG0 0 /* Register 0 offset */
-#define OCCR_REG1 1 /* Register 1 offset */
-#define OCCR0 OCCR_BASE+OCCR_REG0
-#define OCCR1 OCCR_BASE+OCCR_REG1
-
-#define OCCR0_OFF 0x0 /* Set OCACHE off */
-#define OCCR0_ON 0x1 /* Set OCACHE on */
-#define OCCR0_OCI 0x2 /* Invalidate all in OC */
-#define OCCR0_WT 0x4 /* Set OCACHE in WT Mode */
-#define OCCR0_WB 0x0 /* Set OCACHE in WB Mode */
-
-#define OCCR1_NOLOCK 0x0 /* Set No Locking */
-
-/*
- * SH-5
- * A bit of description here, for neff=32.
- *
- * |<--- tag (19 bits) --->|
- * +-----------------------------+-----------------+------+----------+------+
- * | | | ways |set index |offset|
- * +-----------------------------+-----------------+------+----------+------+
- * ^ 2 bits 8 bits 5 bits
- * +- Bit 31
- *
- * Cacheline size is based on offset: 5 bits = 32 bytes per line
- * A cache line is identified by a tag + set but OCACHETAG/ICACHETAG
- * have a broader space for registers. These are outlined by
- * CACHE_?C_*_STEP below.
- *
- */
-
-/* Instruction cache */
-#define CACHE_IC_ADDRESS_ARRAY 0x01000000
-
-/* Operand Cache */
-#define CACHE_OC_ADDRESS_ARRAY 0x01800000
-
-/* These declarations relate to cache 'synonyms' in the operand cache. A
- 'synonym' occurs where effective address bits overlap between those used for
- indexing the cache sets and those passed to the MMU for translation. In the
- case of SH5-101 & SH5-103, only bit 12 is affected for 4k pages. */
-
-#define CACHE_OC_N_SYNBITS 1 /* Number of synonym bits */
-#define CACHE_OC_SYN_SHIFT 12
-/* Mask to select synonym bit(s) */
-#define CACHE_OC_SYN_MASK (((1UL<<CACHE_OC_N_SYNBITS)-1)<<CACHE_OC_SYN_SHIFT)
-
-/*
- * Instruction cache can't be invalidated based on physical addresses.
- * No Instruction Cache defines required, then.
- */
-
-#endif /* __ASM_SH_CPU_SH5_CACHE_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/irq.h b/arch/sh/include/cpu-sh5/cpu/irq.h
deleted file mode 100644
index 4aa6ac54b9d6..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/irq.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_IRQ_H
-#define __ASM_SH_CPU_SH5_IRQ_H
-
-/*
- * include/asm-sh/cpu-sh5/irq.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- */
-
-
-/*
- * Encoded IRQs are not considered worth to be supported.
- * Main reason is that there's no per-encoded-interrupt
- * enable/disable mechanism (as there was in SH3/4).
- * An all enabled/all disabled is worth only if there's
- * a cascaded IC to disable/enable/ack on. Until such
- * IC is available there's no such support.
- *
- * Presumably Encoded IRQs may use extra IRQs beyond 64,
- * below. Some logic must be added to cope with IRQ_IRL?
- * in an exclusive way.
- *
- * Priorities are set at Platform level, when IRQ_IRL0-3
- * are set to 0 Encoding is allowed. Otherwise it's not
- * allowed.
- */
-
-/* Independent IRQs */
-#define IRQ_IRL0 0
-#define IRQ_IRL1 1
-#define IRQ_IRL2 2
-#define IRQ_IRL3 3
-
-#define IRQ_INTA 4
-#define IRQ_INTB 5
-#define IRQ_INTC 6
-#define IRQ_INTD 7
-
-#define IRQ_SERR 12
-#define IRQ_ERR 13
-#define IRQ_PWR3 14
-#define IRQ_PWR2 15
-#define IRQ_PWR1 16
-#define IRQ_PWR0 17
-
-#define IRQ_DMTE0 18
-#define IRQ_DMTE1 19
-#define IRQ_DMTE2 20
-#define IRQ_DMTE3 21
-#define IRQ_DAERR 22
-
-#define IRQ_TUNI0 32
-#define IRQ_TUNI1 33
-#define IRQ_TUNI2 34
-#define IRQ_TICPI2 35
-
-#define IRQ_ATI 36
-#define IRQ_PRI 37
-#define IRQ_CUI 38
-
-#define IRQ_ERI 39
-#define IRQ_RXI 40
-#define IRQ_BRI 41
-#define IRQ_TXI 42
-
-#define IRQ_ITI 63
-
-#define NR_INTC_IRQS 64
-
-#ifdef CONFIG_SH_CAYMAN
-#define NR_EXT_IRQS 32
-#define START_EXT_IRQS 64
-
-/* PCI bus 2 uses encoded external interrupts on the Cayman board */
-#define IRQ_P2INTA (START_EXT_IRQS + (3*8) + 0)
-#define IRQ_P2INTB (START_EXT_IRQS + (3*8) + 1)
-#define IRQ_P2INTC (START_EXT_IRQS + (3*8) + 2)
-#define IRQ_P2INTD (START_EXT_IRQS + (3*8) + 3)
-
-#define I8042_KBD_IRQ (START_EXT_IRQS + 2)
-#define I8042_AUX_IRQ (START_EXT_IRQS + 6)
-
-#define IRQ_CFCARD (START_EXT_IRQS + 7)
-#define IRQ_PCMCIA (0)
-
-#else
-#define NR_EXT_IRQS 0
-#endif
-
-/* Default IRQs, fixed */
-#define TIMER_IRQ IRQ_TUNI0
-#define RTC_IRQ IRQ_CUI
-
-/* Default Priorities, Platform may choose differently */
-#define NO_PRIORITY 0 /* Disabled */
-#define TIMER_PRIORITY 2
-#define RTC_PRIORITY TIMER_PRIORITY
-#define SCIF_PRIORITY 3
-#define INTD_PRIORITY 3
-#define IRL3_PRIORITY 4
-#define INTC_PRIORITY 6
-#define IRL2_PRIORITY 7
-#define INTB_PRIORITY 9
-#define IRL1_PRIORITY 10
-#define INTA_PRIORITY 12
-#define IRL0_PRIORITY 13
-#define TOP_PRIORITY 15
-
-extern int intc_evt_to_irq[(0xE20/0x20)+1];
-extern int platform_int_priority[NR_INTC_IRQS];
-
-#endif /* __ASM_SH_CPU_SH5_IRQ_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/mmu_context.h b/arch/sh/include/cpu-sh5/cpu/mmu_context.h
deleted file mode 100644
index 23c53be945b7..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/mmu_context.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_MMU_CONTEXT_H
-#define __ASM_SH_CPU_SH5_MMU_CONTEXT_H
-
-/* Common defines */
-#define TLB_STEP 0x00000010
-#define TLB_PTEH 0x00000000
-#define TLB_PTEL 0x00000008
-
-/* PTEH defines */
-#define PTEH_ASID_SHIFT 2
-#define PTEH_VALID 0x0000000000000001
-#define PTEH_SHARED 0x0000000000000002
-#define PTEH_MATCH_ASID 0x00000000000003ff
-
-#ifndef __ASSEMBLY__
-/* This has to be a common function because the next location to fill
- * information is shared. */
-extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
-#endif /* __ASSEMBLY__ */
-
-#endif /* __ASM_SH_CPU_SH5_MMU_CONTEXT_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/registers.h b/arch/sh/include/cpu-sh5/cpu/registers.h
deleted file mode 100644
index 372c1e1978b3..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/registers.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_REGISTERS_H
-#define __ASM_SH_CPU_SH5_REGISTERS_H
-
-/*
- * include/asm-sh/cpu-sh5/registers.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2004 Richard Curnow
- */
-
-#ifdef __ASSEMBLY__
-/* =====================================================================
-**
-** Section 1: acts on assembly sources pre-processed by GPP ( <source.S>).
-** Assigns symbolic names to control & target registers.
-*/
-
-/*
- * Define some useful aliases for control registers.
- */
-#define SR cr0
-#define SSR cr1
-#define PSSR cr2
- /* cr3 UNDEFINED */
-#define INTEVT cr4
-#define EXPEVT cr5
-#define PEXPEVT cr6
-#define TRA cr7
-#define SPC cr8
-#define PSPC cr9
-#define RESVEC cr10
-#define VBR cr11
- /* cr12 UNDEFINED */
-#define TEA cr13
- /* cr14-cr15 UNDEFINED */
-#define DCR cr16
-#define KCR0 cr17
-#define KCR1 cr18
- /* cr19-cr31 UNDEFINED */
- /* cr32-cr61 RESERVED */
-#define CTC cr62
-#define USR cr63
-
-/*
- * ABI dependent registers (general purpose set)
- */
-#define RET r2
-#define ARG1 r2
-#define ARG2 r3
-#define ARG3 r4
-#define ARG4 r5
-#define ARG5 r6
-#define ARG6 r7
-#define SP r15
-#define LINK r18
-#define ZERO r63
-
-/*
- * Status register defines: used only by assembly sources (and
- * syntax independednt)
- */
-#define SR_RESET_VAL 0x0000000050008000
-#define SR_HARMLESS 0x00000000500080f0 /* Write ignores for most */
-#define SR_ENABLE_FPU 0xffffffffffff7fff /* AND with this */
-
-#if defined (CONFIG_SH64_SR_WATCH)
-#define SR_ENABLE_MMU 0x0000000084000000 /* OR with this */
-#else
-#define SR_ENABLE_MMU 0x0000000080000000 /* OR with this */
-#endif
-
-#define SR_UNBLOCK_EXC 0xffffffffefffffff /* AND with this */
-#define SR_BLOCK_EXC 0x0000000010000000 /* OR with this */
-
-#else /* Not __ASSEMBLY__ syntax */
-
-/*
-** Stringify reg. name
-*/
-#define __str(x) #x
-
-/* Stringify control register names for use in inline assembly */
-#define __SR __str(SR)
-#define __SSR __str(SSR)
-#define __PSSR __str(PSSR)
-#define __INTEVT __str(INTEVT)
-#define __EXPEVT __str(EXPEVT)
-#define __PEXPEVT __str(PEXPEVT)
-#define __TRA __str(TRA)
-#define __SPC __str(SPC)
-#define __PSPC __str(PSPC)
-#define __RESVEC __str(RESVEC)
-#define __VBR __str(VBR)
-#define __TEA __str(TEA)
-#define __DCR __str(DCR)
-#define __KCR0 __str(KCR0)
-#define __KCR1 __str(KCR1)
-#define __CTC __str(CTC)
-#define __USR __str(USR)
-
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_SH_CPU_SH5_REGISTERS_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/rtc.h b/arch/sh/include/cpu-sh5/cpu/rtc.h
deleted file mode 100644
index d7e25d435f4a..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/rtc.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_RTC_H
-#define __ASM_SH_CPU_SH5_RTC_H
-
-#define rtc_reg_size sizeof(u32)
-#define RTC_BIT_INVERTED 0 /* The SH-5 RTC is surprisingly sane! */
-#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
-
-#endif /* __ASM_SH_CPU_SH5_RTC_H */
diff --git a/arch/sh/include/uapi/asm/posix_types.h b/arch/sh/include/uapi/asm/posix_types.h
index 2644fdd444e6..adc998a64c76 100644
--- a/arch/sh/include/uapi/asm/posix_types.h
+++ b/arch/sh/include/uapi/asm/posix_types.h
@@ -1,8 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __KERNEL__
-# ifdef __SH5__
-# include <asm/posix_types_64.h>
-# else
-# include <asm/posix_types_32.h>
-# endif
-#endif /* __KERNEL__ */
+#include <asm/posix_types_32.h>
diff --git a/arch/sh/include/uapi/asm/posix_types_64.h b/arch/sh/include/uapi/asm/posix_types_64.h
deleted file mode 100644
index 3a9128d4aee3..000000000000
--- a/arch/sh/include/uapi/asm/posix_types_64.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ASM_SH_POSIX_TYPES_64_H
-#define __ASM_SH_POSIX_TYPES_64_H
-
-typedef unsigned short __kernel_mode_t;
-#define __kernel_mode_t __kernel_mode_t
-typedef unsigned short __kernel_ipc_pid_t;
-#define __kernel_ipc_pid_t __kernel_ipc_pid_t
-typedef unsigned short __kernel_uid_t;
-#define __kernel_uid_t __kernel_uid_t
-typedef unsigned short __kernel_gid_t;
-#define __kernel_gid_t __kernel_gid_t
-typedef long unsigned int __kernel_size_t;
-#define __kernel_size_t __kernel_size_t
-typedef int __kernel_ssize_t;
-#define __kernel_ssize_t __kernel_ssize_t
-typedef int __kernel_ptrdiff_t;
-#define __kernel_ptrdiff_t __kernel_ptrdiff_t
-
-typedef unsigned short __kernel_old_uid_t;
-#define __kernel_old_uid_t __kernel_old_uid_t
-typedef unsigned short __kernel_old_gid_t;
-#define __kernel_old_gid_t __kernel_old_gid_t
-typedef unsigned short __kernel_old_dev_t;
-#define __kernel_old_dev_t __kernel_old_dev_t
-
-#include <asm-generic/posix_types.h>
-
-#endif /* __ASM_SH_POSIX_TYPES_64_H */
diff --git a/arch/sh/include/uapi/asm/ptrace.h b/arch/sh/include/uapi/asm/ptrace.h
index 4ec9c2b65fdb..5c88e46b7773 100644
--- a/arch/sh/include/uapi/asm/ptrace.h
+++ b/arch/sh/include/uapi/asm/ptrace.h
@@ -25,11 +25,6 @@
#define PT_DATA_ADDR 248 /* &(struct user)->start_data */
#define PT_TEXT_LEN 252
-#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
-#include <asm/ptrace_64.h>
-#else
#include <asm/ptrace_32.h>
-#endif
-
#endif /* _UAPI__ASM_SH_PTRACE_H */
diff --git a/arch/sh/include/uapi/asm/ptrace_64.h b/arch/sh/include/uapi/asm/ptrace_64.h
deleted file mode 100644
index a6f84eba5277..000000000000
--- a/arch/sh/include/uapi/asm/ptrace_64.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI__ASM_SH_PTRACE_64_H
-#define _UAPI__ASM_SH_PTRACE_64_H
-
-struct pt_regs {
- unsigned long long pc;
- unsigned long long sr;
- long long syscall_nr;
- unsigned long long regs[63];
- unsigned long long tregs[8];
- unsigned long long pad[2];
-};
-
-
-#endif /* _UAPI__ASM_SH_PTRACE_64_H */
diff --git a/arch/sh/include/uapi/asm/sigcontext.h b/arch/sh/include/uapi/asm/sigcontext.h
index d2b7e4f033c0..a9cc8bad0f36 100644
--- a/arch/sh/include/uapi/asm/sigcontext.h
+++ b/arch/sh/include/uapi/asm/sigcontext.h
@@ -5,18 +5,6 @@
struct sigcontext {
unsigned long oldmask;
-#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
- /* CPU registers */
- unsigned long long sc_regs[63];
- unsigned long long sc_tregs[8];
- unsigned long long sc_pc;
- unsigned long long sc_sr;
-
- /* FPU registers */
- unsigned long long sc_fpregs[32];
- unsigned int sc_fpscr;
- unsigned int sc_fpvalid;
-#else
/* CPU registers */
unsigned long sc_regs[16];
unsigned long sc_pc;
@@ -32,7 +20,6 @@ struct sigcontext {
unsigned int sc_fpscr;
unsigned int sc_fpul;
unsigned int sc_ownedfp;
-#endif
};
#endif /* __ASM_SH_SIGCONTEXT_H */
diff --git a/arch/sh/include/uapi/asm/stat.h b/arch/sh/include/uapi/asm/stat.h
index 659b87c7c25a..b0ca755ea08d 100644
--- a/arch/sh/include/uapi/asm/stat.h
+++ b/arch/sh/include/uapi/asm/stat.h
@@ -16,66 +16,6 @@ struct __old_kernel_stat {
unsigned long st_ctime;
};
-#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
-struct stat {
- unsigned short st_dev;
- unsigned short __pad1;
- unsigned long st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned short __pad2;
- unsigned long st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-/* This matches struct stat64 in glibc2.1, hence the absolutely
- * insane amounts of padding around dev_t's.
- */
-struct stat64 {
- unsigned short st_dev;
- unsigned char __pad0[10];
-
- unsigned long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
-
- unsigned long st_uid;
- unsigned long st_gid;
-
- unsigned short st_rdev;
- unsigned char __pad3[10];
-
- long long st_size;
- unsigned long st_blksize;
-
- unsigned long st_blocks; /* Number 512-byte blocks allocated. */
- unsigned long __pad4; /* future possible st_blocks high bits */
-
- unsigned long st_atime;
- unsigned long st_atime_nsec;
-
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
-
- unsigned long st_ctime;
- unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */
-
- unsigned long __unused1;
- unsigned long __unused2;
-};
-#else
struct stat {
unsigned long st_dev;
unsigned long st_ino;
@@ -134,6 +74,5 @@ struct stat64 {
};
#define STAT_HAVE_NSEC 1
-#endif
#endif /* __ASM_SH_STAT_H */
diff --git a/arch/sh/include/uapi/asm/swab.h b/arch/sh/include/uapi/asm/swab.h
index f0b02152745c..c727d381a30a 100644
--- a/arch/sh/include/uapi/asm/swab.h
+++ b/arch/sh/include/uapi/asm/swab.h
@@ -13,14 +13,9 @@
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__asm__(
-#ifdef __SH5__
- "byterev %1, %0\n\t"
- "shari %0, 32, %0"
-#else
"swap.b %1, %0\n\t"
"swap.w %0, %0\n\t"
"swap.b %0, %0"
-#endif
: "=r" (x)
: "r" (x));
@@ -31,12 +26,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
__asm__(
-#ifdef __SH5__
- "byterev %1, %0\n\t"
- "shari %0, 32, %0"
-#else
"swap.b %1, %0"
-#endif
: "=r" (x)
: "r" (x));
diff --git a/arch/sh/include/uapi/asm/unistd.h b/arch/sh/include/uapi/asm/unistd.h
index 9e0b4e5e6da2..0f7c7772a2fb 100644
--- a/arch/sh/include/uapi/asm/unistd.h
+++ b/arch/sh/include/uapi/asm/unistd.h
@@ -1,8 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __KERNEL__
-# ifdef __SH5__
-# include <asm/unistd_64.h>
-# else
-# include <asm/unistd_32.h>
-# endif
-#endif
+#include <asm/unistd_32.h>
diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
deleted file mode 100644
index 75da54851f02..000000000000
--- a/arch/sh/include/uapi/asm/unistd_64.h
+++ /dev/null
@@ -1,423 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ASM_SH_UNISTD_64_H
-#define __ASM_SH_UNISTD_64_H
-
-/*
- * include/asm-sh/unistd_64.h
- *
- * This file contains the system call numbers.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2007 Paul Mundt
- * Copyright (C) 2004 Sean McGoogan
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lchown 16
- /* 17 was sys_break */
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
- /* 31 was sys_stty */
- /* 32 was sys_gtty */
-#define __NR_access 33
-#define __NR_nice 34
- /* 35 was sys_ftime */
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
- /* 44 was sys_prof */
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
- /* 53 was sys_lock */
-#define __NR_ioctl 54
-#define __NR_fcntl 55
- /* 56 was sys_mpx */
-#define __NR_setpgid 57
- /* 58 was sys_ulimit */
- /* 59 was sys_olduname */
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
- /* 82 was sys_select */
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
- /* 98 was sys_profil */
-#define __NR_statfs 99
-#define __NR_fstatfs 100
- /* 101 was sys_ioperm */
-#define __NR_socketcall 102 /* old implementation of socket systemcall */
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_olduname 109
- /* 110 was sys_iopl */
-#define __NR_vhangup 111
- /* 112 was sys_idle */
- /* 113 was sys_vm86old */
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_cacheflush 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
- /* 127 was sys_create_module */
-#define __NR_init_module 128
-#define __NR_delete_module 129
- /* 130 was sys_get_kernel_syms */
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
- /* 137 was sys_afs_syscall */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
- /* 166 was sys_vm86 */
- /* 167 was sys_query_module */
-#define __NR_poll 168
-#define __NR_nfsservctl 169
-#define __NR_setresgid 170
-#define __NR_getresgid 171
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread64 180
-#define __NR_pwrite64 181
-#define __NR_chown 182
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
- /* 188 reserved for getpmsg */
- /* 189 reserved for putpmsg */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_lchown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_chown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_pivot_root 217
-#define __NR_mincore 218
-#define __NR_madvise 219
-
-/* Non-multiplexed socket family */
-#define __NR_socket 220
-#define __NR_bind 221
-#define __NR_connect 222
-#define __NR_listen 223
-#define __NR_accept 224
-#define __NR_getsockname 225
-#define __NR_getpeername 226
-#define __NR_socketpair 227
-#define __NR_send 228
-#define __NR_sendto 229
-#define __NR_recv 230
-#define __NR_recvfrom 231
-#define __NR_shutdown 232
-#define __NR_setsockopt 233
-#define __NR_getsockopt 234
-#define __NR_sendmsg 235
-#define __NR_recvmsg 236
-
-/* Non-multiplexed IPC family */
-#define __NR_semop 237
-#define __NR_semget 238
-#define __NR_semctl 239
-#define __NR_msgsnd 240
-#define __NR_msgrcv 241
-#define __NR_msgget 242
-#define __NR_msgctl 243
-#define __NR_shmat 244
-#define __NR_shmdt 245
-#define __NR_shmget 246
-#define __NR_shmctl 247
-
-#define __NR_getdents64 248
-#define __NR_fcntl64 249
- /* 250 is reserved for tux */
- /* 251 is unused */
-#define __NR_gettid 252
-#define __NR_readahead 253
-#define __NR_setxattr 254
-#define __NR_lsetxattr 255
-#define __NR_fsetxattr 256
-#define __NR_getxattr 257
-#define __NR_lgetxattr 258
-#define __NR_fgetxattr 259
-#define __NR_listxattr 260
-#define __NR_llistxattr 261
-#define __NR_flistxattr 262
-#define __NR_removexattr 263
-#define __NR_lremovexattr 264
-#define __NR_fremovexattr 265
-#define __NR_tkill 266
-#define __NR_sendfile64 267
-#define __NR_futex 268
-#define __NR_sched_setaffinity 269
-#define __NR_sched_getaffinity 270
- /* 271 is reserved for set_thread_area */
- /* 272 is reserved for get_thread_area */
-#define __NR_io_setup 273
-#define __NR_io_destroy 274
-#define __NR_io_getevents 275
-#define __NR_io_submit 276
-#define __NR_io_cancel 277
-#define __NR_fadvise64 278
- /* 279 is unused */
-#define __NR_exit_group 280
-
-#define __NR_lookup_dcookie 281
-#define __NR_epoll_create 282
-#define __NR_epoll_ctl 283
-#define __NR_epoll_wait 284
-#define __NR_remap_file_pages 285
-#define __NR_set_tid_address 286
-#define __NR_timer_create 287
-#define __NR_timer_settime (__NR_timer_create+1)
-#define __NR_timer_gettime (__NR_timer_create+2)
-#define __NR_timer_getoverrun (__NR_timer_create+3)
-#define __NR_timer_delete (__NR_timer_create+4)
-#define __NR_clock_settime (__NR_timer_create+5)
-#define __NR_clock_gettime (__NR_timer_create+6)
-#define __NR_clock_getres (__NR_timer_create+7)
-#define __NR_clock_nanosleep (__NR_timer_create+8)
-#define __NR_statfs64 296
-#define __NR_fstatfs64 297
-#define __NR_tgkill 298
-#define __NR_utimes 299
-#define __NR_fadvise64_64 300
- /* 301 is reserved for vserver */
- /* 302 is reserved for mbind */
- /* 303 is reserved for get_mempolicy */
- /* 304 is reserved for set_mempolicy */
-#define __NR_mq_open 305
-#define __NR_mq_unlink (__NR_mq_open+1)
-#define __NR_mq_timedsend (__NR_mq_open+2)
-#define __NR_mq_timedreceive (__NR_mq_open+3)
-#define __NR_mq_notify (__NR_mq_open+4)
-#define __NR_mq_getsetattr (__NR_mq_open+5)
- /* 311 is reserved for kexec */
-#define __NR_waitid 312
-#define __NR_add_key 313
-#define __NR_request_key 314
-#define __NR_keyctl 315
-#define __NR_ioprio_set 316
-#define __NR_ioprio_get 317
-#define __NR_inotify_init 318
-#define __NR_inotify_add_watch 319
-#define __NR_inotify_rm_watch 320
- /* 321 is unused */
-#define __NR_migrate_pages 322
-#define __NR_openat 323
-#define __NR_mkdirat 324
-#define __NR_mknodat 325
-#define __NR_fchownat 326
-#define __NR_futimesat 327
-#define __NR_fstatat64 328
-#define __NR_unlinkat 329
-#define __NR_renameat 330
-#define __NR_linkat 331
-#define __NR_symlinkat 332
-#define __NR_readlinkat 333
-#define __NR_fchmodat 334
-#define __NR_faccessat 335
-#define __NR_pselect6 336
-#define __NR_ppoll 337
-#define __NR_unshare 338
-#define __NR_set_robust_list 339
-#define __NR_get_robust_list 340
-#define __NR_splice 341
-#define __NR_sync_file_range 342
-#define __NR_tee 343
-#define __NR_vmsplice 344
-#define __NR_move_pages 345
-#define __NR_getcpu 346
-#define __NR_epoll_pwait 347
-#define __NR_utimensat 348
-#define __NR_signalfd 349
-#define __NR_timerfd_create 350
-#define __NR_eventfd 351
-#define __NR_fallocate 352
-#define __NR_timerfd_settime 353
-#define __NR_timerfd_gettime 354
-#define __NR_signalfd4 355
-#define __NR_eventfd2 356
-#define __NR_epoll_create1 357
-#define __NR_dup3 358
-#define __NR_pipe2 359
-#define __NR_inotify_init1 360
-#define __NR_preadv 361
-#define __NR_pwritev 362
-#define __NR_rt_tgsigqueueinfo 363
-#define __NR_perf_event_open 364
-#define __NR_recvmmsg 365
-#define __NR_accept4 366
-#define __NR_fanotify_init 367
-#define __NR_fanotify_mark 368
-#define __NR_prlimit64 369
-#define __NR_name_to_handle_at 370
-#define __NR_open_by_handle_at 371
-#define __NR_clock_adjtime 372
-#define __NR_syncfs 373
-#define __NR_sendmmsg 374
-#define __NR_setns 375
-#define __NR_process_vm_readv 376
-#define __NR_process_vm_writev 377
-#define __NR_kcmp 378
-#define __NR_finit_module 379
-#define __NR_sched_getattr 380
-#define __NR_sched_setattr 381
-#define __NR_renameat2 382
-#define __NR_seccomp 383
-#define __NR_getrandom 384
-#define __NR_memfd_create 385
-#define __NR_bpf 386
-#define __NR_execveat 387
-#define __NR_userfaultfd 388
-#define __NR_membarrier 389
-#define __NR_mlock2 390
-#define __NR_copy_file_range 391
-#define __NR_preadv2 392
-#define __NR_pwritev2 393
-
-#ifdef __KERNEL__
-#define __NR_syscalls 394
-#endif
-
-#endif /* __ASM_SH_UNISTD_64_H */
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 59673f8a3379..b0f5574b6228 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -3,7 +3,7 @@
# Makefile for the Linux/SuperH kernel.
#
-extra-y := head_$(BITS).o vmlinux.lds
+extra-y := head_32.o vmlinux.lds
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
@@ -13,26 +13,26 @@ endif
CFLAGS_REMOVE_return_address.o = -pg
obj-y := debugtraps.o dumpstack.o \
- idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
+ idle.o io.o irq.o irq_32.o kdebugfs.o \
machvec.o nmi_debug.o process.o \
- process_$(BITS).o ptrace.o ptrace_$(BITS).o \
+ process_32.o ptrace.o ptrace_32.o \
reboot.o return_address.o \
- setup.o signal_$(BITS).o sys_sh.o \
- syscalls_$(BITS).o time.o topology.o traps.o \
- traps_$(BITS).o unwinder.o
+ setup.o signal_32.o sys_sh.o \
+ syscalls_32.o time.o topology.o traps.o \
+ traps_32.o unwinder.o
ifndef CONFIG_GENERIC_IOMAP
obj-y += iomap.o
obj-$(CONFIG_HAS_IOPORT_MAP) += ioport.o
endif
-obj-$(CONFIG_SUPERH32) += sys_sh32.o
+obj-y += sys_sh32.o
obj-y += cpu/
obj-$(CONFIG_VSYSCALL) += vsyscall/
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
obj-$(CONFIG_KGDB) += kgdb.o
-obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
+obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index f7c22ea98b0f..46118236bf04 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_CPU_SH2) = sh2/
obj-$(CONFIG_CPU_SH2A) = sh2a/
obj-$(CONFIG_CPU_SH3) = sh3/
obj-$(CONFIG_CPU_SH4) = sh4/
-obj-$(CONFIG_CPU_SH5) = sh5/
# Special cases for family ancestry.
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index ce7291e12a30..1d008745877f 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -103,7 +103,7 @@ void __attribute__ ((weak)) l2_cache_init(void)
/*
* Generic first-level cache init
*/
-#if defined(CONFIG_SUPERH32) && !defined(CONFIG_CPU_J2)
+#if !defined(CONFIG_CPU_J2)
static void cache_init(void)
{
unsigned long ccr, flags;
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 8b91cb96411b..e4578cde46ba 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -2,6 +2,5 @@
#
# Makefile for the Linux/SuperH CPU-specific IRQ handlers.
#
-obj-$(CONFIG_SUPERH32) += imask.o
-obj-$(CONFIG_CPU_SH5) += intc-sh5.o
+obj-y += imask.o
obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
deleted file mode 100644
index 1b3050facda8..000000000000
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/irq/intc-sh5.c
- *
- * Interrupt Controller support for SH5 INTC.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- *
- * Per-interrupt selective. IRLM=0 (Fixed priority) is not
- * supported being useless without a cascaded interrupt
- * controller.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <cpu/irq.h>
-#include <asm/page.h>
-
-/*
- * Maybe the generic Peripheral block could move to a more
- * generic include file. INTC Block will be defined here
- * and only here to make INTC self-contained in a single
- * file.
- */
-#define INTC_BLOCK_OFFSET 0x01000000
-
-/* Base */
-#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
- INTC_BLOCK_OFFSET
-
-/* Address */
-#define INTC_ICR_SET (intc_virt + 0x0)
-#define INTC_ICR_CLEAR (intc_virt + 0x8)
-#define INTC_INTPRI_0 (intc_virt + 0x10)
-#define INTC_INTSRC_0 (intc_virt + 0x50)
-#define INTC_INTSRC_1 (intc_virt + 0x58)
-#define INTC_INTREQ_0 (intc_virt + 0x60)
-#define INTC_INTREQ_1 (intc_virt + 0x68)
-#define INTC_INTENB_0 (intc_virt + 0x70)
-#define INTC_INTENB_1 (intc_virt + 0x78)
-#define INTC_INTDSB_0 (intc_virt + 0x80)
-#define INTC_INTDSB_1 (intc_virt + 0x88)
-
-#define INTC_ICR_IRLM 0x1
-#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
-#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
-
-
-/*
- * Mapper between the vector ordinal and the IRQ number
- * passed to kernel/device drivers.
- */
-int intc_evt_to_irq[(0xE20/0x20)+1] = {
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
- 0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
- 2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
- 32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
- -1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
- -1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
- 39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
- 4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
- 12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
- -1, -1 /* 0xE00 - 0xE20 */
-};
-
-static unsigned long intc_virt;
-static int irlm; /* IRL mode */
-
-static void enable_intc_irq(struct irq_data *data)
-{
- unsigned int irq = data->irq;
- unsigned long reg;
- unsigned long bitmask;
-
- if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
- printk("Trying to use straight IRL0-3 with an encoding platform.\n");
-
- if (irq < 32) {
- reg = INTC_INTENB_0;
- bitmask = 1 << irq;
- } else {
- reg = INTC_INTENB_1;
- bitmask = 1 << (irq - 32);
- }
-
- __raw_writel(bitmask, reg);
-}
-
-static void disable_intc_irq(struct irq_data *data)
-{
- unsigned int irq = data->irq;
- unsigned long reg;
- unsigned long bitmask;
-
- if (irq < 32) {
- reg = INTC_INTDSB_0;
- bitmask = 1 << irq;
- } else {
- reg = INTC_INTDSB_1;
- bitmask = 1 << (irq - 32);
- }
-
- __raw_writel(bitmask, reg);
-}
-
-static struct irq_chip intc_irq_type = {
- .name = "INTC",
- .irq_enable = enable_intc_irq,
- .irq_disable = disable_intc_irq,
-};
-
-void __init plat_irq_setup(void)
-{
- unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
- unsigned long reg;
- int i;
-
- intc_virt = (unsigned long)ioremap(INTC_BASE, 1024);
- if (!intc_virt) {
- panic("Unable to remap INTC\n");
- }
-
-
- /* Set default: per-line enable/disable, priority driven ack/eoi */
- for (i = 0; i < NR_INTC_IRQS; i++)
- irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
-
-
- /* Disable all interrupts and set all priorities to 0 to avoid trouble */
- __raw_writel(-1, INTC_INTDSB_0);
- __raw_writel(-1, INTC_INTDSB_1);
-
- for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
- __raw_writel( NO_PRIORITY, reg);
-
-
-#ifdef CONFIG_SH_CAYMAN
- {
- unsigned long data;
-
- /* Set IRLM */
- /* If all the priorities are set to 'no priority', then
- * assume we are using encoded mode.
- */
- irlm = platform_int_priority[IRQ_IRL0] +
- platform_int_priority[IRQ_IRL1] +
- platform_int_priority[IRQ_IRL2] +
- platform_int_priority[IRQ_IRL3];
- if (irlm == NO_PRIORITY) {
- /* IRLM = 0 */
- reg = INTC_ICR_CLEAR;
- i = IRQ_INTA;
- printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
- } else {
- /* IRLM = 1 */
- reg = INTC_ICR_SET;
- i = IRQ_IRL0;
- }
- __raw_writel(INTC_ICR_IRLM, reg);
-
- /* Set interrupt priorities according to platform description */
- for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
- data |= platform_int_priority[i] <<
- ((i % INTC_INTPRI_PPREG) * 4);
- if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
- /* Upon the 7th, set Priority Register */
- __raw_writel(data, reg);
- data = 0;
- reg += 8;
- }
- }
- }
-#endif
-
- /*
- * And now let interrupts come in.
- * sti() is not enough, we need to
- * lower priority, too.
- */
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "and %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy0)
- : "r" (__dummy1));
-}
diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c
index 85961b4f9c69..a306bcd6b341 100644
--- a/arch/sh/kernel/cpu/proc.c
+++ b/arch/sh/kernel/cpu/proc.c
@@ -24,7 +24,6 @@ static const char *cpu_name[] = {
[CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
[CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
[CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
- [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
[CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
[CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
[CPU_SH7372] = "SH7372", [CPU_SH7734] = "SH7734",
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 4b0db8259e3d..74620f30b19b 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -391,6 +391,7 @@ static struct platform_device *sh7786_early_devices[] __initdata = {
&tmu0_device,
&tmu1_device,
&tmu2_device,
+ &tmu3_device,
};
static struct platform_device *sh7786_devices[] __initdata = {
diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
deleted file mode 100644
index 97d23ec3005f..000000000000
--- a/arch/sh/kernel/cpu/sh5/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the Linux/SuperH SH-5 backends.
-#
-obj-y := entry.o probe.o switchto.o
-
-obj-$(CONFIG_SH_FPU) += fpu.o
-obj-$(CONFIG_KALLSYMS) += unwind.o
-
-# CPU subtype setup
-obj-$(CONFIG_CPU_SH5) += setup-sh5.o
-
-# Primary on-chip clocks (common)
-clock-$(CONFIG_CPU_SH5) := clock-sh5.o
-
-obj-y += $(clock-y)
diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c
deleted file mode 100644
index dee6be2c2344..000000000000
--- a/arch/sh/kernel/cpu/sh5/clock-sh5.c
+++ /dev/null
@@ -1,76 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/clock-sh5.c
- *
- * SH-5 support for the clock framework
- *
- * Copyright (C) 2008 Paul Mundt
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <asm/clock.h>
-#include <asm/io.h>
-
-static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
-
-/* Clock, Power and Reset Controller */
-#define CPRC_BLOCK_OFF 0x01010000
-#define CPRC_BASE (PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF)
-
-static unsigned long cprc_base;
-
-static void master_clk_init(struct clk *clk)
-{
- int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007;
- clk->rate *= ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_master_clk_ops = {
- .init = master_clk_init,
-};
-
-static unsigned long module_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readw(cprc_base) >> 12) & 0x0007;
- return clk->parent->rate / ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_module_clk_ops = {
- .recalc = module_clk_recalc,
-};
-
-static unsigned long bus_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readw(cprc_base) >> 3) & 0x0007;
- return clk->parent->rate / ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_bus_clk_ops = {
- .recalc = bus_clk_recalc,
-};
-
-static unsigned long cpu_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readw(cprc_base) & 0x0007);
- return clk->parent->rate / ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_cpu_clk_ops = {
- .recalc = cpu_clk_recalc,
-};
-
-static struct sh_clk_ops *sh5_clk_ops[] = {
- &sh5_master_clk_ops,
- &sh5_module_clk_ops,
- &sh5_bus_clk_ops,
- &sh5_cpu_clk_ops,
-};
-
-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
-{
- cprc_base = (unsigned long)ioremap(CPRC_BASE, 1024);
- BUG_ON(!cprc_base);
-
- if (idx < ARRAY_SIZE(sh5_clk_ops))
- *ops = sh5_clk_ops[idx];
-}
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
deleted file mode 100644
index 81c8b64b977f..000000000000
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ /dev/null
@@ -1,2000 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * arch/sh/kernel/cpu/sh5/entry.S
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2004 - 2008 Paul Mundt
- * Copyright (C) 2003, 2004 Richard Curnow
- */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/sys.h>
-#include <cpu/registers.h>
-#include <asm/processor.h>
-#include <asm/unistd.h>
-#include <asm/thread_info.h>
-#include <asm/asm-offsets.h>
-
-/*
- * SR fields.
- */
-#define SR_ASID_MASK 0x00ff0000
-#define SR_FD_MASK 0x00008000
-#define SR_SS 0x08000000
-#define SR_BL 0x10000000
-#define SR_MD 0x40000000
-
-/*
- * Event code.
- */
-#define EVENT_INTERRUPT 0
-#define EVENT_FAULT_TLB 1
-#define EVENT_FAULT_NOT_TLB 2
-#define EVENT_DEBUG 3
-
-/* EXPEVT values */
-#define RESET_CAUSE 0x20
-#define DEBUGSS_CAUSE 0x980
-
-/*
- * Frame layout. Quad index.
- */
-#define FRAME_T(x) FRAME_TBASE+(x*8)
-#define FRAME_R(x) FRAME_RBASE+(x*8)
-#define FRAME_S(x) FRAME_SBASE+(x*8)
-#define FSPC 0
-#define FSSR 1
-#define FSYSCALL_ID 2
-
-/* Arrange the save frame to be a multiple of 32 bytes long */
-#define FRAME_SBASE 0
-#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
-#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
-#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
-#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
-
-#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
-#define FP_FRAME_BASE 0
-
-#define SAVED_R2 0*8
-#define SAVED_R3 1*8
-#define SAVED_R4 2*8
-#define SAVED_R5 3*8
-#define SAVED_R18 4*8
-#define SAVED_R6 5*8
-#define SAVED_TR0 6*8
-
-/* These are the registers saved in the TLB path that aren't saved in the first
- level of the normal one. */
-#define TLB_SAVED_R25 7*8
-#define TLB_SAVED_TR1 8*8
-#define TLB_SAVED_TR2 9*8
-#define TLB_SAVED_TR3 10*8
-#define TLB_SAVED_TR4 11*8
-/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
- breakage otherwise. */
-#define TLB_SAVED_R0 12*8
-#define TLB_SAVED_R1 13*8
-
-#define CLI() \
- getcon SR, r6; \
- ori r6, 0xf0, r6; \
- putcon r6, SR;
-
-#define STI() \
- getcon SR, r6; \
- andi r6, ~0xf0, r6; \
- putcon r6, SR;
-
-#ifdef CONFIG_PREEMPTION
-# define preempt_stop() CLI()
-#else
-# define preempt_stop()
-# define resume_kernel restore_all
-#endif
-
- .section .data, "aw"
-
-#define FAST_TLBMISS_STACK_CACHELINES 4
-#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
-
-/* Register back-up area for all exceptions */
- .balign 32
- /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
- * register saves etc. */
- .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
-/* This is 32 byte aligned by construction */
-/* Register back-up area for all exceptions */
-reg_save_area:
- .quad 0
- .quad 0
- .quad 0
- .quad 0
-
- .quad 0
- .quad 0
- .quad 0
- .quad 0
-
- .quad 0
- .quad 0
- .quad 0
- .quad 0
-
- .quad 0
- .quad 0
-
-/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
- * reentrancy. Note this area may be accessed via physical address.
- * Align so this fits a whole single cache line, for ease of purging.
- */
- .balign 32,0,32
-resvec_save_area:
- .quad 0
- .quad 0
- .quad 0
- .quad 0
- .quad 0
- .balign 32,0,32
-
-/* Jump table of 3rd level handlers */
-trap_jtable:
- .long do_exception_error /* 0x000 */
- .long do_exception_error /* 0x020 */
-#ifdef CONFIG_MMU
- .long tlb_miss_load /* 0x040 */
- .long tlb_miss_store /* 0x060 */
-#else
- .long do_exception_error
- .long do_exception_error
-#endif
- ! ARTIFICIAL pseudo-EXPEVT setting
- .long do_debug_interrupt /* 0x080 */
-#ifdef CONFIG_MMU
- .long tlb_miss_load /* 0x0A0 */
- .long tlb_miss_store /* 0x0C0 */
-#else
- .long do_exception_error
- .long do_exception_error
-#endif
- .long do_address_error_load /* 0x0E0 */
- .long do_address_error_store /* 0x100 */
-#ifdef CONFIG_SH_FPU
- .long do_fpu_error /* 0x120 */
-#else
- .long do_exception_error /* 0x120 */
-#endif
- .long do_exception_error /* 0x140 */
- .long system_call /* 0x160 */
- .long do_reserved_inst /* 0x180 */
- .long do_illegal_slot_inst /* 0x1A0 */
- .long do_exception_error /* 0x1C0 - NMI */
- .long do_exception_error /* 0x1E0 */
- .rept 15
- .long do_IRQ /* 0x200 - 0x3C0 */
- .endr
- .long do_exception_error /* 0x3E0 */
- .rept 32
- .long do_IRQ /* 0x400 - 0x7E0 */
- .endr
- .long fpu_error_or_IRQA /* 0x800 */
- .long fpu_error_or_IRQB /* 0x820 */
- .long do_IRQ /* 0x840 */
- .long do_IRQ /* 0x860 */
- .rept 6
- .long do_exception_error /* 0x880 - 0x920 */
- .endr
- .long breakpoint_trap_handler /* 0x940 */
- .long do_exception_error /* 0x960 */
- .long do_single_step /* 0x980 */
-
- .rept 3
- .long do_exception_error /* 0x9A0 - 0x9E0 */
- .endr
- .long do_IRQ /* 0xA00 */
- .long do_IRQ /* 0xA20 */
-#ifdef CONFIG_MMU
- .long itlb_miss_or_IRQ /* 0xA40 */
-#else
- .long do_IRQ
-#endif
- .long do_IRQ /* 0xA60 */
- .long do_IRQ /* 0xA80 */
-#ifdef CONFIG_MMU
- .long itlb_miss_or_IRQ /* 0xAA0 */
-#else
- .long do_IRQ
-#endif
- .long do_exception_error /* 0xAC0 */
- .long do_address_error_exec /* 0xAE0 */
- .rept 8
- .long do_exception_error /* 0xB00 - 0xBE0 */
- .endr
- .rept 18
- .long do_IRQ /* 0xC00 - 0xE20 */
- .endr
-
- .section .text64, "ax"
-
-/*
- * --- Exception/Interrupt/Event Handling Section
- */
-
-/*
- * VBR and RESVEC blocks.
- *
- * First level handler for VBR-based exceptions.
- *
- * To avoid waste of space, align to the maximum text block size.
- * This is assumed to be at most 128 bytes or 32 instructions.
- * DO NOT EXCEED 32 instructions on the first level handlers !
- *
- * Also note that RESVEC is contained within the VBR block
- * where the room left (1KB - TEXT_SIZE) allows placing
- * the RESVEC block (at most 512B + TEXT_SIZE).
- *
- * So first (and only) level handler for RESVEC-based exceptions.
- *
- * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
- * and interrupt) we are a lot tight with register space until
- * saving onto the stack frame, which is done in handle_exception().
- *
- */
-
-#define TEXT_SIZE 128
-#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
-
- .balign TEXT_SIZE
-LVBR_block:
- .space 256, 0 /* Power-on class handler, */
- /* not required here */
-not_a_tlb_miss:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- /* Save original stack pointer into KCR1 */
- putcon SP, KCR1
-
- /* Save other original registers into reg_save_area */
- movi reg_save_area, SP
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* Set args for Non-debug, Not a TLB miss class handler */
- getcon EXPEVT, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_FAULT_NOT_TLB, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
-
- .balign 256
- ! VBR+0x200
- nop
- .balign 256
- ! VBR+0x300
- nop
- .balign 256
- /*
- * Instead of the natural .balign 1024 place RESVEC here
- * respecting the final 1KB alignment.
- */
- .balign TEXT_SIZE
- /*
- * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
- * block making sure the final alignment is correct.
- */
-#ifdef CONFIG_MMU
-tlb_miss:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- putcon SP, KCR1
- movi reg_save_area, SP
- /* SP is guaranteed 32-byte aligned. */
- st.q SP, TLB_SAVED_R0 , r0
- st.q SP, TLB_SAVED_R1 , r1
- st.q SP, SAVED_R2 , r2
- st.q SP, SAVED_R3 , r3
- st.q SP, SAVED_R4 , r4
- st.q SP, SAVED_R5 , r5
- st.q SP, SAVED_R6 , r6
- st.q SP, SAVED_R18, r18
-
- /* Save R25 for safety; as/ld may want to use it to achieve the call to
- * the code in mm/tlbmiss.c */
- st.q SP, TLB_SAVED_R25, r25
- gettr tr0, r2
- gettr tr1, r3
- gettr tr2, r4
- gettr tr3, r5
- gettr tr4, r18
- st.q SP, SAVED_TR0 , r2
- st.q SP, TLB_SAVED_TR1 , r3
- st.q SP, TLB_SAVED_TR2 , r4
- st.q SP, TLB_SAVED_TR3 , r5
- st.q SP, TLB_SAVED_TR4 , r18
-
- pt do_fast_page_fault, tr0
- getcon SSR, r2
- getcon EXPEVT, r3
- getcon TEA, r4
- shlri r2, 30, r2
- andi r2, 1, r2 /* r2 = SSR.MD */
- blink tr0, LINK
-
- pt fixup_to_invoke_general_handler, tr1
-
- /* If the fast path handler fixed the fault, just drop through quickly
- to the restore code right away to return to the excepting context.
- */
- bnei/u r2, 0, tr1
-
-fast_tlb_miss_restore:
- ld.q SP, SAVED_TR0, r2
- ld.q SP, TLB_SAVED_TR1, r3
- ld.q SP, TLB_SAVED_TR2, r4
-
- ld.q SP, TLB_SAVED_TR3, r5
- ld.q SP, TLB_SAVED_TR4, r18
-
- ptabs r2, tr0
- ptabs r3, tr1
- ptabs r4, tr2
- ptabs r5, tr3
- ptabs r18, tr4
-
- ld.q SP, TLB_SAVED_R0, r0
- ld.q SP, TLB_SAVED_R1, r1
- ld.q SP, SAVED_R2, r2
- ld.q SP, SAVED_R3, r3
- ld.q SP, SAVED_R4, r4
- ld.q SP, SAVED_R5, r5
- ld.q SP, SAVED_R6, r6
- ld.q SP, SAVED_R18, r18
- ld.q SP, TLB_SAVED_R25, r25
-
- getcon KCR1, SP
- rte
- nop /* for safety, in case the code is run on sh5-101 cut1.x */
-
-fixup_to_invoke_general_handler:
-
- /* OK, new method. Restore stuff that's not expected to get saved into
- the 'first-level' reg save area, then just fall through to setting
- up the registers and calling the second-level handler. */
-
- /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
- r25,tr1-4 and save r6 to get into the right state. */
-
- ld.q SP, TLB_SAVED_TR1, r3
- ld.q SP, TLB_SAVED_TR2, r4
- ld.q SP, TLB_SAVED_TR3, r5
- ld.q SP, TLB_SAVED_TR4, r18
- ld.q SP, TLB_SAVED_R25, r25
-
- ld.q SP, TLB_SAVED_R0, r0
- ld.q SP, TLB_SAVED_R1, r1
-
- ptabs/u r3, tr1
- ptabs/u r4, tr2
- ptabs/u r5, tr3
- ptabs/u r18, tr4
-
- /* Set args for Non-debug, TLB miss class handler */
- getcon EXPEVT, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_FAULT_TLB, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
-#else /* CONFIG_MMU */
- .balign 256
-#endif
-
-/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
- DOES END UP AT VBR+0x600 */
- nop
- nop
- nop
- nop
- nop
- nop
-
- .balign 256
- /* VBR + 0x600 */
-
-interrupt:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- /* Save original stack pointer into KCR1 */
- putcon SP, KCR1
-
- /* Save other original registers into reg_save_area */
- movi reg_save_area, SP
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* Set args for interrupt class handler */
- getcon INTEVT, r2
- movi ret_from_irq, r3
- ori r3, 1, r3
- movi EVENT_INTERRUPT, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
- .balign TEXT_SIZE /* let's waste the bare minimum */
-
-LVBR_block_end: /* Marker. Used for total checking */
-
- .balign 256
-LRESVEC_block:
- /* Panic handler. Called with MMU off. Possible causes/actions:
- * - Reset: Jump to program start.
- * - Single Step: Turn off Single Step & return.
- * - Others: Call panic handler, passing PC as arg.
- * (this may need to be extended...)
- */
-reset_or_panic:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- putcon SP, DCR
- /* First save r0-1 and tr0, as we need to use these */
- movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
- st.q SP, 0, r0
- st.q SP, 8, r1
- gettr tr0, r0
- st.q SP, 32, r0
-
- /* Check cause */
- getcon EXPEVT, r0
- movi RESET_CAUSE, r1
- sub r1, r0, r1 /* r1=0 if reset */
- movi _stext-CONFIG_PAGE_OFFSET, r0
- ori r0, 1, r0
- ptabs r0, tr0
- beqi r1, 0, tr0 /* Jump to start address if reset */
-
- getcon EXPEVT, r0
- movi DEBUGSS_CAUSE, r1
- sub r1, r0, r1 /* r1=0 if single step */
- pta single_step_panic, tr0
- beqi r1, 0, tr0 /* jump if single step */
-
- /* Now jump to where we save the registers. */
- movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
- ptabs r1, tr0
- blink tr0, r63
-
-single_step_panic:
- /* We are in a handler with Single Step set. We need to resume the
- * handler, by turning on MMU & turning off Single Step. */
- getcon SSR, r0
- movi SR_MMU, r1
- or r0, r1, r0
- movi ~SR_SS, r1
- and r0, r1, r0
- putcon r0, SSR
- /* Restore EXPEVT, as the rte won't do this */
- getcon PEXPEVT, r0
- putcon r0, EXPEVT
- /* Restore regs */
- ld.q SP, 32, r0
- ptabs r0, tr0
- ld.q SP, 0, r0
- ld.q SP, 8, r1
- getcon DCR, SP
- synco
- rte
-
-
- .balign 256
-debug_exception:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- /*
- * Single step/software_break_point first level handler.
- * Called with MMU off, so the first thing we do is enable it
- * by doing an rte with appropriate SSR.
- */
- putcon SP, DCR
- /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
- movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
-
- /* With the MMU off, we are bypassing the cache, so purge any
- * data that will be made stale by the following stores.
- */
- ocbp SP, 0
- synco
-
- st.q SP, 0, r0
- st.q SP, 8, r1
- getcon SPC, r0
- st.q SP, 16, r0
- getcon SSR, r0
- st.q SP, 24, r0
-
- /* Enable MMU, block exceptions, set priv mode, disable single step */
- movi SR_MMU | SR_BL | SR_MD, r1
- or r0, r1, r0
- movi ~SR_SS, r1
- and r0, r1, r0
- putcon r0, SSR
- /* Force control to debug_exception_2 when rte is executed */
- movi debug_exeception_2, r0
- ori r0, 1, r0 /* force SHmedia, just in case */
- putcon r0, SPC
- getcon DCR, SP
- synco
- rte
-debug_exeception_2:
- /* Restore saved regs */
- putcon SP, KCR1
- movi resvec_save_area, SP
- ld.q SP, 24, r0
- putcon r0, SSR
- ld.q SP, 16, r0
- putcon r0, SPC
- ld.q SP, 0, r0
- ld.q SP, 8, r1
-
- /* Save other original registers into reg_save_area */
- movi reg_save_area, SP
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* Set args for debug class handler */
- getcon EXPEVT, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_DEBUG, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
-
- .balign 256
-debug_interrupt:
- /* !!! WE COME HERE IN REAL MODE !!! */
- /* Hook-up debug interrupt to allow various debugging options to be
- * hooked into its handler. */
- /* Save original stack pointer into KCR1 */
- synco
- putcon SP, KCR1
- movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
- ocbp SP, 0
- ocbp SP, 32
- synco
-
- /* Save other original registers into reg_save_area thru real addresses */
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* move (spc,ssr)->(pspc,pssr). The rte will shift
- them back again, so that they look like the originals
- as far as the real handler code is concerned. */
- getcon spc, r6
- putcon r6, pspc
- getcon ssr, r6
- putcon r6, pssr
-
- ! construct useful SR for handle_exception
- movi 3, r6
- shlli r6, 30, r6
- getcon sr, r18
- or r18, r6, r6
- putcon r6, ssr
-
- ! SSR is now the current SR with the MD and MMU bits set
- ! i.e. the rte will switch back to priv mode and put
- ! the mmu back on
-
- ! construct spc
- movi handle_exception, r18
- ori r18, 1, r18 ! for safety (do we need this?)
- putcon r18, spc
-
- /* Set args for Non-debug, Not a TLB miss class handler */
-
- ! EXPEVT==0x80 is unused, so 'steal' this value to put the
- ! debug interrupt handler in the vectoring table
- movi 0x80, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_FAULT_NOT_TLB, r4
-
- or SP, ZERO, r5
- movi CONFIG_PAGE_OFFSET, r6
- add r6, r5, r5
- getcon KCR1, SP
-
- synco ! for safety
- rte ! -> handle_exception, switch back to priv mode again
-
-LRESVEC_block_end: /* Marker. Unused. */
-
- .balign TEXT_SIZE
-
-/*
- * Second level handler for VBR-based exceptions. Pre-handler.
- * In common to all stack-frame sensitive handlers.
- *
- * Inputs:
- * (KCR0) Current [current task union]
- * (KCR1) Original SP
- * (r2) INTEVT/EXPEVT
- * (r3) appropriate return address
- * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
- * (r5) Pointer to reg_save_area
- * (SP) Original SP
- *
- * Available registers:
- * (r6)
- * (r18)
- * (tr0)
- *
- */
-handle_exception:
- /* Common 2nd level handler. */
-
- /* First thing we need an appropriate stack pointer */
- getcon SSR, r6
- shlri r6, 30, r6
- andi r6, 1, r6
- pta stack_ok, tr0
- bne r6, ZERO, tr0 /* Original stack pointer is fine */
-
- /* Set stack pointer for user fault */
- getcon KCR0, SP
- movi THREAD_SIZE, r6 /* Point to the end */
- add SP, r6, SP
-
-stack_ok:
-
-/* DEBUG : check for underflow/overflow of the kernel stack */
- pta no_underflow, tr0
- getcon KCR0, r6
- movi 1024, r18
- add r6, r18, r6
- bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
-
-/* Just panic to cause a crash. */
-bad_sp:
- ld.b r63, 0, r6
- nop
-
-no_underflow:
- pta bad_sp, tr0
- getcon kcr0, r6
- movi THREAD_SIZE, r18
- add r18, r6, r6
- bgt SP, r6, tr0 ! sp above the stack
-
- /* Make some room for the BASIC frame. */
- movi -(FRAME_SIZE), r6
- add SP, r6, SP
-
-/* Could do this with no stalling if we had another spare register, but the
- code below will be OK. */
- ld.q r5, SAVED_R2, r6
- ld.q r5, SAVED_R3, r18
- st.q SP, FRAME_R(2), r6
- ld.q r5, SAVED_R4, r6
- st.q SP, FRAME_R(3), r18
- ld.q r5, SAVED_R5, r18
- st.q SP, FRAME_R(4), r6
- ld.q r5, SAVED_R6, r6
- st.q SP, FRAME_R(5), r18
- ld.q r5, SAVED_R18, r18
- st.q SP, FRAME_R(6), r6
- ld.q r5, SAVED_TR0, r6
- st.q SP, FRAME_R(18), r18
- st.q SP, FRAME_T(0), r6
-
- /* Keep old SP around */
- getcon KCR1, r6
-
- /* Save the rest of the general purpose registers */
- st.q SP, FRAME_R(0), r0
- st.q SP, FRAME_R(1), r1
- st.q SP, FRAME_R(7), r7
- st.q SP, FRAME_R(8), r8
- st.q SP, FRAME_R(9), r9
- st.q SP, FRAME_R(10), r10
- st.q SP, FRAME_R(11), r11
- st.q SP, FRAME_R(12), r12
- st.q SP, FRAME_R(13), r13
- st.q SP, FRAME_R(14), r14
-
- /* SP is somewhere else */
- st.q SP, FRAME_R(15), r6
-
- st.q SP, FRAME_R(16), r16
- st.q SP, FRAME_R(17), r17
- /* r18 is saved earlier. */
- st.q SP, FRAME_R(19), r19
- st.q SP, FRAME_R(20), r20
- st.q SP, FRAME_R(21), r21
- st.q SP, FRAME_R(22), r22
- st.q SP, FRAME_R(23), r23
- st.q SP, FRAME_R(24), r24
- st.q SP, FRAME_R(25), r25
- st.q SP, FRAME_R(26), r26
- st.q SP, FRAME_R(27), r27
- st.q SP, FRAME_R(28), r28
- st.q SP, FRAME_R(29), r29
- st.q SP, FRAME_R(30), r30
- st.q SP, FRAME_R(31), r31
- st.q SP, FRAME_R(32), r32
- st.q SP, FRAME_R(33), r33
- st.q SP, FRAME_R(34), r34
- st.q SP, FRAME_R(35), r35
- st.q SP, FRAME_R(36), r36
- st.q SP, FRAME_R(37), r37
- st.q SP, FRAME_R(38), r38
- st.q SP, FRAME_R(39), r39
- st.q SP, FRAME_R(40), r40
- st.q SP, FRAME_R(41), r41
- st.q SP, FRAME_R(42), r42
- st.q SP, FRAME_R(43), r43
- st.q SP, FRAME_R(44), r44
- st.q SP, FRAME_R(45), r45
- st.q SP, FRAME_R(46), r46
- st.q SP, FRAME_R(47), r47
- st.q SP, FRAME_R(48), r48
- st.q SP, FRAME_R(49), r49
- st.q SP, FRAME_R(50), r50
- st.q SP, FRAME_R(51), r51
- st.q SP, FRAME_R(52), r52
- st.q SP, FRAME_R(53), r53
- st.q SP, FRAME_R(54), r54
- st.q SP, FRAME_R(55), r55
- st.q SP, FRAME_R(56), r56
- st.q SP, FRAME_R(57), r57
- st.q SP, FRAME_R(58), r58
- st.q SP, FRAME_R(59), r59
- st.q SP, FRAME_R(60), r60
- st.q SP, FRAME_R(61), r61
- st.q SP, FRAME_R(62), r62
-
- /*
- * Save the S* registers.
- */
- getcon SSR, r61
- st.q SP, FRAME_S(FSSR), r61
- getcon SPC, r62
- st.q SP, FRAME_S(FSPC), r62
- movi -1, r62 /* Reset syscall_nr */
- st.q SP, FRAME_S(FSYSCALL_ID), r62
-
- /* Save the rest of the target registers */
- gettr tr1, r6
- st.q SP, FRAME_T(1), r6
- gettr tr2, r6
- st.q SP, FRAME_T(2), r6
- gettr tr3, r6
- st.q SP, FRAME_T(3), r6
- gettr tr4, r6
- st.q SP, FRAME_T(4), r6
- gettr tr5, r6
- st.q SP, FRAME_T(5), r6
- gettr tr6, r6
- st.q SP, FRAME_T(6), r6
- gettr tr7, r6
- st.q SP, FRAME_T(7), r6
-
- ! setup FP so that unwinder can wind back through nested kernel mode
- ! exceptions
- add SP, ZERO, r14
-
- /* For syscall and debug race condition, get TRA now */
- getcon TRA, r5
-
- /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
- * Also set FD, to catch FPU usage in the kernel.
- *
- * benedict.gaster@superh.com 29/07/2002
- *
- * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
- * same time change BL from 1->0, as any pending interrupt of a level
- * higher than he previous value of IMASK will leak through and be
- * taken unexpectedly.
- *
- * To avoid this we raise the IMASK and then issue another PUTCON to
- * enable interrupts.
- */
- getcon SR, r6
- movi SR_IMASK | SR_FD, r7
- or r6, r7, r6
- putcon r6, SR
- movi SR_UNBLOCK_EXC, r7
- and r6, r7, r6
- putcon r6, SR
-
-
- /* Now call the appropriate 3rd level handler */
- or r3, ZERO, LINK
- movi trap_jtable, r3
- shlri r2, 3, r2
- ldx.l r2, r3, r3
- shlri r2, 2, r2
- ptabs r3, tr0
- or SP, ZERO, r3
- blink tr0, ZERO
-
-/*
- * Second level handler for VBR-based exceptions. Post-handlers.
- *
- * Post-handlers for interrupts (ret_from_irq), exceptions
- * (ret_from_exception) and common reentrance doors (restore_all
- * to get back to the original context, ret_from_syscall loop to
- * check kernel exiting).
- *
- * ret_with_reschedule and work_notifysig are an inner lables of
- * the ret_from_syscall loop.
- *
- * In common to all stack-frame sensitive handlers.
- *
- * Inputs:
- * (SP) struct pt_regs *, original register's frame pointer (basic)
- *
- */
- .global ret_from_irq
-ret_from_irq:
- ld.q SP, FRAME_S(FSSR), r6
- shlri r6, 30, r6
- andi r6, 1, r6
- pta resume_kernel, tr0
- bne r6, ZERO, tr0 /* no further checks */
- STI()
- pta ret_with_reschedule, tr0
- blink tr0, ZERO /* Do not check softirqs */
-
- .global ret_from_exception
-ret_from_exception:
- preempt_stop()
-
- ld.q SP, FRAME_S(FSSR), r6
- shlri r6, 30, r6
- andi r6, 1, r6
- pta resume_kernel, tr0
- bne r6, ZERO, tr0 /* no further checks */
-
- /* Check softirqs */
-
-#ifdef CONFIG_PREEMPTION
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-resume_kernel:
- CLI()
-
- pta restore_all, tr0
-
- getcon KCR0, r6
- ld.l r6, TI_PRE_COUNT, r7
- beq/u r7, ZERO, tr0
-
-need_resched:
- ld.l r6, TI_FLAGS, r7
- movi (1 << TIF_NEED_RESCHED), r8
- and r8, r7, r8
- bne r8, ZERO, tr0
-
- getcon SR, r7
- andi r7, 0xf0, r7
- bne r7, ZERO, tr0
-
- movi preempt_schedule_irq, r7
- ori r7, 1, r7
- ptabs r7, tr1
- blink tr1, LINK
-
- pta need_resched, tr1
- blink tr1, ZERO
-#endif
-
- .global ret_from_syscall
-ret_from_syscall:
-
-ret_with_reschedule:
- getcon KCR0, r6 ! r6 contains current_thread_info
- ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
-
- movi _TIF_NEED_RESCHED, r8
- and r8, r7, r8
- pta work_resched, tr0
- bne r8, ZERO, tr0
-
- pta restore_all, tr1
-
- movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
- and r8, r7, r8
- pta work_notifysig, tr0
- bne r8, ZERO, tr0
-
- blink tr1, ZERO
-
-work_resched:
- pta ret_from_syscall, tr0
- gettr tr0, LINK
- movi schedule, r6
- ptabs r6, tr0
- blink tr0, ZERO /* Call schedule(), return on top */
-
-work_notifysig:
- gettr tr1, LINK
-
- movi do_notify_resume, r6
- ptabs r6, tr0
- or SP, ZERO, r2
- or r7, ZERO, r3
- blink tr0, LINK /* Call do_notify_resume(regs, current_thread_info->flags), return here */
-
-restore_all:
- /* Do prefetches */
-
- ld.q SP, FRAME_T(0), r6
- ld.q SP, FRAME_T(1), r7
- ld.q SP, FRAME_T(2), r8
- ld.q SP, FRAME_T(3), r9
- ptabs r6, tr0
- ptabs r7, tr1
- ptabs r8, tr2
- ptabs r9, tr3
- ld.q SP, FRAME_T(4), r6
- ld.q SP, FRAME_T(5), r7
- ld.q SP, FRAME_T(6), r8
- ld.q SP, FRAME_T(7), r9
- ptabs r6, tr4
- ptabs r7, tr5
- ptabs r8, tr6
- ptabs r9, tr7
-
- ld.q SP, FRAME_R(0), r0
- ld.q SP, FRAME_R(1), r1
- ld.q SP, FRAME_R(2), r2
- ld.q SP, FRAME_R(3), r3
- ld.q SP, FRAME_R(4), r4
- ld.q SP, FRAME_R(5), r5
- ld.q SP, FRAME_R(6), r6
- ld.q SP, FRAME_R(7), r7
- ld.q SP, FRAME_R(8), r8
- ld.q SP, FRAME_R(9), r9
- ld.q SP, FRAME_R(10), r10
- ld.q SP, FRAME_R(11), r11
- ld.q SP, FRAME_R(12), r12
- ld.q SP, FRAME_R(13), r13
- ld.q SP, FRAME_R(14), r14
-
- ld.q SP, FRAME_R(16), r16
- ld.q SP, FRAME_R(17), r17
- ld.q SP, FRAME_R(18), r18
- ld.q SP, FRAME_R(19), r19
- ld.q SP, FRAME_R(20), r20
- ld.q SP, FRAME_R(21), r21
- ld.q SP, FRAME_R(22), r22
- ld.q SP, FRAME_R(23), r23
- ld.q SP, FRAME_R(24), r24
- ld.q SP, FRAME_R(25), r25
- ld.q SP, FRAME_R(26), r26
- ld.q SP, FRAME_R(27), r27
- ld.q SP, FRAME_R(28), r28
- ld.q SP, FRAME_R(29), r29
- ld.q SP, FRAME_R(30), r30
- ld.q SP, FRAME_R(31), r31
- ld.q SP, FRAME_R(32), r32
- ld.q SP, FRAME_R(33), r33
- ld.q SP, FRAME_R(34), r34
- ld.q SP, FRAME_R(35), r35
- ld.q SP, FRAME_R(36), r36
- ld.q SP, FRAME_R(37), r37
- ld.q SP, FRAME_R(38), r38
- ld.q SP, FRAME_R(39), r39
- ld.q SP, FRAME_R(40), r40
- ld.q SP, FRAME_R(41), r41
- ld.q SP, FRAME_R(42), r42
- ld.q SP, FRAME_R(43), r43
- ld.q SP, FRAME_R(44), r44
- ld.q SP, FRAME_R(45), r45
- ld.q SP, FRAME_R(46), r46
- ld.q SP, FRAME_R(47), r47
- ld.q SP, FRAME_R(48), r48
- ld.q SP, FRAME_R(49), r49
- ld.q SP, FRAME_R(50), r50
- ld.q SP, FRAME_R(51), r51
- ld.q SP, FRAME_R(52), r52
- ld.q SP, FRAME_R(53), r53
- ld.q SP, FRAME_R(54), r54
- ld.q SP, FRAME_R(55), r55
- ld.q SP, FRAME_R(56), r56
- ld.q SP, FRAME_R(57), r57
- ld.q SP, FRAME_R(58), r58
-
- getcon SR, r59
- movi SR_BLOCK_EXC, r60
- or r59, r60, r59
- putcon r59, SR /* SR.BL = 1, keep nesting out */
- ld.q SP, FRAME_S(FSSR), r61
- ld.q SP, FRAME_S(FSPC), r62
- movi SR_ASID_MASK, r60
- and r59, r60, r59
- andc r61, r60, r61 /* Clear out older ASID */
- or r59, r61, r61 /* Retain current ASID */
- putcon r61, SSR
- putcon r62, SPC
-
- /* Ignore FSYSCALL_ID */
-
- ld.q SP, FRAME_R(59), r59
- ld.q SP, FRAME_R(60), r60
- ld.q SP, FRAME_R(61), r61
- ld.q SP, FRAME_R(62), r62
-
- /* Last touch */
- ld.q SP, FRAME_R(15), SP
- rte
- nop
-
-/*
- * Third level handlers for VBR-based exceptions. Adapting args to
- * and/or deflecting to fourth level handlers.
- *
- * Fourth level handlers interface.
- * Most are C-coded handlers directly pointed by the trap_jtable.
- * (Third = Fourth level)
- * Inputs:
- * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
- * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
- * (r3) struct pt_regs *, original register's frame pointer
- * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
- * (r5) TRA control register (for syscall/debug benefit only)
- * (LINK) return address
- * (SP) = r3
- *
- * Kernel TLB fault handlers will get a slightly different interface.
- * (r2) struct pt_regs *, original register's frame pointer
- * (r3) page fault error code (see asm/thread_info.h)
- * (r4) Effective Address of fault
- * (LINK) return address
- * (SP) = r2
- *
- * fpu_error_or_IRQ? is a helper to deflect to the right cause.
- *
- */
-#ifdef CONFIG_MMU
-tlb_miss_load:
- or SP, ZERO, r2
- or ZERO, ZERO, r3 /* Read */
- getcon TEA, r4
- pta call_do_page_fault, tr0
- beq ZERO, ZERO, tr0
-
-tlb_miss_store:
- or SP, ZERO, r2
- movi FAULT_CODE_WRITE, r3 /* Write */
- getcon TEA, r4
- pta call_do_page_fault, tr0
- beq ZERO, ZERO, tr0
-
-itlb_miss_or_IRQ:
- pta its_IRQ, tr0
- beqi/u r4, EVENT_INTERRUPT, tr0
-
- /* ITLB miss */
- or SP, ZERO, r2
- movi FAULT_CODE_ITLB, r3
- getcon TEA, r4
- /* Fall through */
-
-call_do_page_fault:
- movi do_page_fault, r6
- ptabs r6, tr0
- blink tr0, ZERO
-#endif /* CONFIG_MMU */
-
-fpu_error_or_IRQA:
- pta its_IRQ, tr0
- beqi/l r4, EVENT_INTERRUPT, tr0
-#ifdef CONFIG_SH_FPU
- movi fpu_state_restore_trap_handler, r6
-#else
- movi do_exception_error, r6
-#endif
- ptabs r6, tr0
- blink tr0, ZERO
-
-fpu_error_or_IRQB:
- pta its_IRQ, tr0
- beqi/l r4, EVENT_INTERRUPT, tr0
-#ifdef CONFIG_SH_FPU
- movi fpu_state_restore_trap_handler, r6
-#else
- movi do_exception_error, r6
-#endif
- ptabs r6, tr0
- blink tr0, ZERO
-
-its_IRQ:
- movi do_IRQ, r6
- ptabs r6, tr0
- blink tr0, ZERO
-
-/*
- * system_call/unknown_trap third level handler:
- *
- * Inputs:
- * (r2) fault/interrupt code, entry number (TRAP = 11)
- * (r3) struct pt_regs *, original register's frame pointer
- * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
- * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
- * (SP) = r3
- * (LINK) return address: ret_from_exception
- * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
- *
- * Outputs:
- * (*r3) Syscall reply (Saved r2)
- * (LINK) In case of syscall only it can be scrapped.
- * Common second level post handler will be ret_from_syscall.
- * Common (non-trace) exit point to that is syscall_ret (saving
- * result to r2). Common bad exit point is syscall_bad (returning
- * ENOSYS then saved to r2).
- *
- */
-
-unknown_trap:
- /* Unknown Trap or User Trace */
- movi do_unknown_trapa, r6
- ptabs r6, tr0
- ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
- andi r2, 0x1ff, r2 /* r2 = syscall # */
- blink tr0, LINK
-
- pta syscall_ret, tr0
- blink tr0, ZERO
-
- /* New syscall implementation*/
-system_call:
- pta unknown_trap, tr0
- or r5, ZERO, r4 /* TRA (=r5) -> r4 */
- shlri r4, 20, r4
- bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
-
- /* It's a system call */
- st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
- andi r5, 0x1ff, r5 /* syscall # -> r5 */
-
- STI()
-
- pta syscall_allowed, tr0
- movi NR_syscalls - 1, r4 /* Last valid */
- bgeu/l r4, r5, tr0
-
-syscall_bad:
- /* Return ENOSYS ! */
- movi -(ENOSYS), r2 /* Fall-through */
-
- .global syscall_ret
-syscall_ret:
- st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-
-/* A different return path for ret_from_fork, because we now need
- * to call schedule_tail with the later kernels. Because prev is
- * loaded into r2 by switch_to() means we can just call it straight away
- */
-
-.global ret_from_fork
-ret_from_fork:
-
- movi schedule_tail,r5
- ori r5, 1, r5
- ptabs r5, tr0
- blink tr0, LINK
-
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-.global ret_from_kernel_thread
-ret_from_kernel_thread:
-
- movi schedule_tail,r5
- ori r5, 1, r5
- ptabs r5, tr0
- blink tr0, LINK
-
- ld.q SP, FRAME_R(2), r2
- ld.q SP, FRAME_R(3), r3
- ptabs r3, tr0
- blink tr0, LINK
-
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-syscall_allowed:
- /* Use LINK to deflect the exit point, default is syscall_ret */
- pta syscall_ret, tr0
- gettr tr0, LINK
- pta syscall_notrace, tr0
-
- getcon KCR0, r2
- ld.l r2, TI_FLAGS, r4
- movi _TIF_WORK_SYSCALL_MASK, r6
- and r6, r4, r6
- beq/l r6, ZERO, tr0
-
- /* Trace it by calling syscall_trace before and after */
- movi do_syscall_trace_enter, r4
- or SP, ZERO, r2
- ptabs r4, tr0
- blink tr0, LINK
-
- /* Save the retval */
- st.q SP, FRAME_R(2), r2
-
- /* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
- ld.q SP, FRAME_S(FSYSCALL_ID), r5
- andi r5, 0x1ff, r5
-
- pta syscall_ret_trace, tr0
- gettr tr0, LINK
-
-syscall_notrace:
- /* Now point to the appropriate 4th level syscall handler */
- movi sys_call_table, r4
- shlli r5, 2, r5
- ldx.l r4, r5, r5
- ptabs r5, tr0
-
- /* Prepare original args */
- ld.q SP, FRAME_R(2), r2
- ld.q SP, FRAME_R(3), r3
- ld.q SP, FRAME_R(4), r4
- ld.q SP, FRAME_R(5), r5
- ld.q SP, FRAME_R(6), r6
- ld.q SP, FRAME_R(7), r7
-
- /* And now the trick for those syscalls requiring regs * ! */
- or SP, ZERO, r8
-
- /* Call it */
- blink tr0, ZERO /* LINK is already properly set */
-
-syscall_ret_trace:
- /* We get back here only if under trace */
- st.q SP, FRAME_R(9), r2 /* Save return value */
-
- movi do_syscall_trace_leave, LINK
- or SP, ZERO, r2
- ptabs LINK, tr0
- blink tr0, LINK
-
- /* This needs to be done after any syscall tracing */
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
-
- pta ret_from_syscall, tr0
- blink tr0, ZERO /* Resume normal return sequence */
-
-/*
- * --- Switch to running under a particular ASID and return the previous ASID value
- * --- The caller is assumed to have done a cli before calling this.
- *
- * Input r2 : new ASID
- * Output r2 : old ASID
- */
-
- .global switch_and_save_asid
-switch_and_save_asid:
- getcon sr, r0
- movi 255, r4
- shlli r4, 16, r4 /* r4 = mask to select ASID */
- and r0, r4, r3 /* r3 = shifted old ASID */
- andi r2, 255, r2 /* mask down new ASID */
- shlli r2, 16, r2 /* align new ASID against SR.ASID */
- andc r0, r4, r0 /* efface old ASID from SR */
- or r0, r2, r0 /* insert the new ASID */
- putcon r0, ssr
- movi 1f, r0
- putcon r0, spc
- rte
- nop
-1:
- ptabs LINK, tr0
- shlri r3, 16, r2 /* r2 = old ASID */
- blink tr0, r63
-
- .global route_to_panic_handler
-route_to_panic_handler:
- /* Switch to real mode, goto panic_handler, don't return. Useful for
- last-chance debugging, e.g. if no output wants to go to the console.
- */
-
- movi panic_handler - CONFIG_PAGE_OFFSET, r1
- ptabs r1, tr0
- pta 1f, tr1
- gettr tr1, r0
- putcon r0, spc
- getcon sr, r0
- movi 1, r1
- shlli r1, 31, r1
- andc r0, r1, r0
- putcon r0, ssr
- rte
- nop
-1: /* Now in real mode */
- blink tr0, r63
- nop
-
- .global peek_real_address_q
-peek_real_address_q:
- /* Two args:
- r2 : real mode address to peek
- r2(out) : result quadword
-
- This is provided as a cheapskate way of manipulating device
- registers for debugging (to avoid the need to ioremap the debug
- module, and to avoid the need to ioremap the watchpoint
- controller in a way that identity maps sufficient bits to avoid the
- SH5-101 cut2 silicon defect).
-
- This code is not performance critical
- */
-
- add.l r2, r63, r2 /* sign extend address */
- getcon sr, r0 /* r0 = saved original SR */
- movi 1, r1
- shlli r1, 28, r1
- or r0, r1, r1 /* r0 with block bit set */
- putcon r1, sr /* now in critical section */
- movi 1, r36
- shlli r36, 31, r36
- andc r1, r36, r1 /* turn sr.mmu off in real mode section */
-
- putcon r1, ssr
- movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
- movi 1f, r37 /* virtual mode return addr */
- putcon r36, spc
-
- synco
- rte
- nop
-
-.peek0: /* come here in real mode, don't touch caches!!
- still in critical section (sr.bl==1) */
- putcon r0, ssr
- putcon r37, spc
- /* Here's the actual peek. If the address is bad, all bets are now off
- * what will happen (handlers invoked in real-mode = bad news) */
- ld.q r2, 0, r2
- synco
- rte /* Back to virtual mode */
- nop
-
-1:
- ptabs LINK, tr0
- blink tr0, r63
-
- .global poke_real_address_q
-poke_real_address_q:
- /* Two args:
- r2 : real mode address to poke
- r3 : quadword value to write.
-
- This is provided as a cheapskate way of manipulating device
- registers for debugging (to avoid the need to ioremap the debug
- module, and to avoid the need to ioremap the watchpoint
- controller in a way that identity maps sufficient bits to avoid the
- SH5-101 cut2 silicon defect).
-
- This code is not performance critical
- */
-
- add.l r2, r63, r2 /* sign extend address */
- getcon sr, r0 /* r0 = saved original SR */
- movi 1, r1
- shlli r1, 28, r1
- or r0, r1, r1 /* r0 with block bit set */
- putcon r1, sr /* now in critical section */
- movi 1, r36
- shlli r36, 31, r36
- andc r1, r36, r1 /* turn sr.mmu off in real mode section */
-
- putcon r1, ssr
- movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
- movi 1f, r37 /* virtual mode return addr */
- putcon r36, spc
-
- synco
- rte
- nop
-
-.poke0: /* come here in real mode, don't touch caches!!
- still in critical section (sr.bl==1) */
- putcon r0, ssr
- putcon r37, spc
- /* Here's the actual poke. If the address is bad, all bets are now off
- * what will happen (handlers invoked in real-mode = bad news) */
- st.q r2, 0, r3
- synco
- rte /* Back to virtual mode */
- nop
-
-1:
- ptabs LINK, tr0
- blink tr0, r63
-
-#ifdef CONFIG_MMU
-/*
- * --- User Access Handling Section
- */
-
-/*
- * User Access support. It all moved to non inlined Assembler
- * functions in here.
- *
- * __kernel_size_t __copy_user(void *__to, const void *__from,
- * __kernel_size_t __n)
- *
- * Inputs:
- * (r2) target address
- * (r3) source address
- * (r4) size in bytes
- *
- * Ouputs:
- * (*r2) target data
- * (r2) non-copied bytes
- *
- * If a fault occurs on the user pointer, bail out early and return the
- * number of bytes not copied in r2.
- * Strategy : for large blocks, call a real memcpy function which can
- * move >1 byte at a time using unaligned ld/st instructions, and can
- * manipulate the cache using prefetch + alloco to improve the speed
- * further. If a fault occurs in that function, just revert to the
- * byte-by-byte approach used for small blocks; this is rare so the
- * performance hit for that case does not matter.
- *
- * For small blocks it's not worth the overhead of setting up and calling
- * the memcpy routine; do the copy a byte at a time.
- *
- */
- .global __copy_user
-__copy_user:
- pta __copy_user_byte_by_byte, tr1
- movi 16, r0 ! this value is a best guess, should tune it by benchmarking
- bge/u r0, r4, tr1
- pta copy_user_memcpy, tr0
- addi SP, -32, SP
- /* Save arguments in case we have to fix-up unhandled page fault */
- st.q SP, 0, r2
- st.q SP, 8, r3
- st.q SP, 16, r4
- st.q SP, 24, r35 ! r35 is callee-save
- /* Save LINK in a register to reduce RTS time later (otherwise
- ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
- ori LINK, 0, r35
- blink tr0, LINK
-
- /* Copy completed normally if we get back here */
- ptabs r35, tr0
- ld.q SP, 24, r35
- /* don't restore r2-r4, pointless */
- /* set result=r2 to zero as the copy must have succeeded. */
- or r63, r63, r2
- addi SP, 32, SP
- blink tr0, r63 ! RTS
-
- .global __copy_user_fixup
-__copy_user_fixup:
- /* Restore stack frame */
- ori r35, 0, LINK
- ld.q SP, 24, r35
- ld.q SP, 16, r4
- ld.q SP, 8, r3
- ld.q SP, 0, r2
- addi SP, 32, SP
- /* Fall through to original code, in the 'same' state we entered with */
-
-/* The slow byte-by-byte method is used if the fast copy traps due to a bad
- user address. In that rare case, the speed drop can be tolerated. */
-__copy_user_byte_by_byte:
- pta ___copy_user_exit, tr1
- pta ___copy_user1, tr0
- beq/u r4, r63, tr1 /* early exit for zero length copy */
- sub r2, r3, r0
- addi r0, -1, r0
-
-___copy_user1:
- ld.b r3, 0, r5 /* Fault address 1 */
-
- /* Could rewrite this to use just 1 add, but the second comes 'free'
- due to load latency */
- addi r3, 1, r3
- addi r4, -1, r4 /* No real fixup required */
-___copy_user2:
- stx.b r3, r0, r5 /* Fault address 2 */
- bne r4, ZERO, tr0
-
-___copy_user_exit:
- or r4, ZERO, r2
- ptabs LINK, tr0
- blink tr0, ZERO
-
-/*
- * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
- *
- * Inputs:
- * (r2) target address
- * (r3) size in bytes
- *
- * Ouputs:
- * (*r2) zero-ed target data
- * (r2) non-zero-ed bytes
- */
- .global __clear_user
-__clear_user:
- pta ___clear_user_exit, tr1
- pta ___clear_user1, tr0
- beq/u r3, r63, tr1
-
-___clear_user1:
- st.b r2, 0, ZERO /* Fault address */
- addi r2, 1, r2
- addi r3, -1, r3 /* No real fixup required */
- bne r3, ZERO, tr0
-
-___clear_user_exit:
- or r3, ZERO, r2
- ptabs LINK, tr0
- blink tr0, ZERO
-
-#endif /* CONFIG_MMU */
-
-/*
- * extern long __get_user_asm_?(void *val, long addr)
- *
- * Inputs:
- * (r2) dest address
- * (r3) source address (in User Space)
- *
- * Ouputs:
- * (r2) -EFAULT (faulting)
- * 0 (not faulting)
- */
- .global __get_user_asm_b
-__get_user_asm_b:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_b1:
- ld.b r3, 0, r5 /* r5 = data */
- st.b r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_b_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __get_user_asm_w
-__get_user_asm_w:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_w1:
- ld.w r3, 0, r5 /* r5 = data */
- st.w r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_w_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __get_user_asm_l
-__get_user_asm_l:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_l1:
- ld.l r3, 0, r5 /* r5 = data */
- st.l r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_l_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __get_user_asm_q
-__get_user_asm_q:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_q1:
- ld.q r3, 0, r5 /* r5 = data */
- st.q r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_q_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-/*
- * extern long __put_user_asm_?(void *pval, long addr)
- *
- * Inputs:
- * (r2) kernel pointer to value
- * (r3) dest address (in User Space)
- *
- * Ouputs:
- * (r2) -EFAULT (faulting)
- * 0 (not faulting)
- */
- .global __put_user_asm_b
-__put_user_asm_b:
- ld.b r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_b1:
- st.b r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_b_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __put_user_asm_w
-__put_user_asm_w:
- ld.w r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_w1:
- st.w r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_w_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __put_user_asm_l
-__put_user_asm_l:
- ld.l r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_l1:
- st.l r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_l_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __put_user_asm_q
-__put_user_asm_q:
- ld.q r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_q1:
- st.q r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_q_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-panic_stash_regs:
- /* The idea is : when we get an unhandled panic, we dump the registers
- to a known memory location, the just sit in a tight loop.
- This allows the human to look at the memory region through the GDB
- session (assuming the debug module's SHwy initiator isn't locked up
- or anything), to hopefully analyze the cause of the panic. */
-
- /* On entry, former r15 (SP) is in DCR
- former r0 is at resvec_saved_area + 0
- former r1 is at resvec_saved_area + 8
- former tr0 is at resvec_saved_area + 32
- DCR is the only register whose value is lost altogether.
- */
-
- movi 0xffffffff80000000, r0 ! phy of dump area
- ld.q SP, 0x000, r1 ! former r0
- st.q r0, 0x000, r1
- ld.q SP, 0x008, r1 ! former r1
- st.q r0, 0x008, r1
- st.q r0, 0x010, r2
- st.q r0, 0x018, r3
- st.q r0, 0x020, r4
- st.q r0, 0x028, r5
- st.q r0, 0x030, r6
- st.q r0, 0x038, r7
- st.q r0, 0x040, r8
- st.q r0, 0x048, r9
- st.q r0, 0x050, r10
- st.q r0, 0x058, r11
- st.q r0, 0x060, r12
- st.q r0, 0x068, r13
- st.q r0, 0x070, r14
- getcon dcr, r14
- st.q r0, 0x078, r14
- st.q r0, 0x080, r16
- st.q r0, 0x088, r17
- st.q r0, 0x090, r18
- st.q r0, 0x098, r19
- st.q r0, 0x0a0, r20
- st.q r0, 0x0a8, r21
- st.q r0, 0x0b0, r22
- st.q r0, 0x0b8, r23
- st.q r0, 0x0c0, r24
- st.q r0, 0x0c8, r25
- st.q r0, 0x0d0, r26
- st.q r0, 0x0d8, r27
- st.q r0, 0x0e0, r28
- st.q r0, 0x0e8, r29
- st.q r0, 0x0f0, r30
- st.q r0, 0x0f8, r31
- st.q r0, 0x100, r32
- st.q r0, 0x108, r33
- st.q r0, 0x110, r34
- st.q r0, 0x118, r35
- st.q r0, 0x120, r36
- st.q r0, 0x128, r37
- st.q r0, 0x130, r38
- st.q r0, 0x138, r39
- st.q r0, 0x140, r40
- st.q r0, 0x148, r41
- st.q r0, 0x150, r42
- st.q r0, 0x158, r43
- st.q r0, 0x160, r44
- st.q r0, 0x168, r45
- st.q r0, 0x170, r46
- st.q r0, 0x178, r47
- st.q r0, 0x180, r48
- st.q r0, 0x188, r49
- st.q r0, 0x190, r50
- st.q r0, 0x198, r51
- st.q r0, 0x1a0, r52
- st.q r0, 0x1a8, r53
- st.q r0, 0x1b0, r54
- st.q r0, 0x1b8, r55
- st.q r0, 0x1c0, r56
- st.q r0, 0x1c8, r57
- st.q r0, 0x1d0, r58
- st.q r0, 0x1d8, r59
- st.q r0, 0x1e0, r60
- st.q r0, 0x1e8, r61
- st.q r0, 0x1f0, r62
- st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
-
- ld.q SP, 0x020, r1 ! former tr0
- st.q r0, 0x200, r1
- gettr tr1, r1
- st.q r0, 0x208, r1
- gettr tr2, r1
- st.q r0, 0x210, r1
- gettr tr3, r1
- st.q r0, 0x218, r1
- gettr tr4, r1
- st.q r0, 0x220, r1
- gettr tr5, r1
- st.q r0, 0x228, r1
- gettr tr6, r1
- st.q r0, 0x230, r1
- gettr tr7, r1
- st.q r0, 0x238, r1
-
- getcon sr, r1
- getcon ssr, r2
- getcon pssr, r3
- getcon spc, r4
- getcon pspc, r5
- getcon intevt, r6
- getcon expevt, r7
- getcon pexpevt, r8
- getcon tra, r9
- getcon tea, r10
- getcon kcr0, r11
- getcon kcr1, r12
- getcon vbr, r13
- getcon resvec, r14
-
- st.q r0, 0x240, r1
- st.q r0, 0x248, r2
- st.q r0, 0x250, r3
- st.q r0, 0x258, r4
- st.q r0, 0x260, r5
- st.q r0, 0x268, r6
- st.q r0, 0x270, r7
- st.q r0, 0x278, r8
- st.q r0, 0x280, r9
- st.q r0, 0x288, r10
- st.q r0, 0x290, r11
- st.q r0, 0x298, r12
- st.q r0, 0x2a0, r13
- st.q r0, 0x2a8, r14
-
- getcon SPC,r2
- getcon SSR,r3
- getcon EXPEVT,r4
- /* Prepare to jump to C - physical address */
- movi panic_handler-CONFIG_PAGE_OFFSET, r1
- ori r1, 1, r1
- ptabs r1, tr0
- getcon DCR, SP
- blink tr0, ZERO
- nop
- nop
- nop
- nop
-
-
-
-
-/*
- * --- Signal Handling Section
- */
-
-/*
- * extern long long _sa_default_rt_restorer
- * extern long long _sa_default_restorer
- *
- * or, better,
- *
- * extern void _sa_default_rt_restorer(void)
- * extern void _sa_default_restorer(void)
- *
- * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
- * from user space. Copied into user space by signal management.
- * Both must be quad aligned and 2 quad long (4 instructions).
- *
- */
- .balign 8
- .global sa_default_rt_restorer
-sa_default_rt_restorer:
- movi 0x10, r9
- shori __NR_rt_sigreturn, r9
- trapa r9
- nop
-
- .balign 8
- .global sa_default_restorer
-sa_default_restorer:
- movi 0x10, r9
- shori __NR_sigreturn, r9
- trapa r9
- nop
-
-/*
- * --- __ex_table Section
- */
-
-/*
- * User Access Exception Table.
- */
- .section __ex_table, "a"
-
- .global asm_uaccess_start /* Just a marker */
-asm_uaccess_start:
-
-#ifdef CONFIG_MMU
- .long ___copy_user1, ___copy_user_exit
- .long ___copy_user2, ___copy_user_exit
- .long ___clear_user1, ___clear_user_exit
-#endif
- .long ___get_user_asm_b1, ___get_user_asm_b_exit
- .long ___get_user_asm_w1, ___get_user_asm_w_exit
- .long ___get_user_asm_l1, ___get_user_asm_l_exit
- .long ___get_user_asm_q1, ___get_user_asm_q_exit
- .long ___put_user_asm_b1, ___put_user_asm_b_exit
- .long ___put_user_asm_w1, ___put_user_asm_w_exit
- .long ___put_user_asm_l1, ___put_user_asm_l_exit
- .long ___put_user_asm_q1, ___put_user_asm_q_exit
-
- .global asm_uaccess_end /* Just a marker */
-asm_uaccess_end:
-
-
-
-
-/*
- * --- .init.text Section
- */
-
- __INIT
-
-/*
- * void trap_init (void)
- *
- */
- .global trap_init
-trap_init:
- addi SP, -24, SP /* Room to save r28/r29/r30 */
- st.q SP, 0, r28
- st.q SP, 8, r29
- st.q SP, 16, r30
-
- /* Set VBR and RESVEC */
- movi LVBR_block, r19
- andi r19, -4, r19 /* reset MMUOFF + reserved */
- /* For RESVEC exceptions we force the MMU off, which means we need the
- physical address. */
- movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
- andi r20, -4, r20 /* reset reserved */
- ori r20, 1, r20 /* set MMUOFF */
- putcon r19, VBR
- putcon r20, RESVEC
-
- /* Sanity check */
- movi LVBR_block_end, r21
- andi r21, -4, r21
- movi BLOCK_SIZE, r29 /* r29 = expected size */
- or r19, ZERO, r30
- add r19, r29, r19
-
- /*
- * Ugly, but better loop forever now than crash afterwards.
- * We should print a message, but if we touch LVBR or
- * LRESVEC blocks we should not be surprised if we get stuck
- * in trap_init().
- */
- pta trap_init_loop, tr1
- gettr tr1, r28 /* r28 = trap_init_loop */
- sub r21, r30, r30 /* r30 = actual size */
-
- /*
- * VBR/RESVEC handlers overlap by being bigger than
- * allowed. Very bad. Just loop forever.
- * (r28) panic/loop address
- * (r29) expected size
- * (r30) actual size
- */
-trap_init_loop:
- bne r19, r21, tr1
-
- /* Now that exception vectors are set up reset SR.BL */
- getcon SR, r22
- movi SR_UNBLOCK_EXC, r23
- and r22, r23, r22
- putcon r22, SR
-
- addi SP, 24, SP
- ptabs LINK, tr0
- blink tr0, ZERO
-
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
deleted file mode 100644
index 3966b5ee8e93..000000000000
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/fpu.c
- *
- * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
- * Copyright (C) 2002 STMicroelectronics Limited
- * Author : Stuart Menefy
- *
- * Started from SH4 version:
- * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
- */
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <asm/processor.h>
-
-void save_fpu(struct task_struct *tsk)
-{
- asm volatile("fst.p %0, (0*8), fp0\n\t"
- "fst.p %0, (1*8), fp2\n\t"
- "fst.p %0, (2*8), fp4\n\t"
- "fst.p %0, (3*8), fp6\n\t"
- "fst.p %0, (4*8), fp8\n\t"
- "fst.p %0, (5*8), fp10\n\t"
- "fst.p %0, (6*8), fp12\n\t"
- "fst.p %0, (7*8), fp14\n\t"
- "fst.p %0, (8*8), fp16\n\t"
- "fst.p %0, (9*8), fp18\n\t"
- "fst.p %0, (10*8), fp20\n\t"
- "fst.p %0, (11*8), fp22\n\t"
- "fst.p %0, (12*8), fp24\n\t"
- "fst.p %0, (13*8), fp26\n\t"
- "fst.p %0, (14*8), fp28\n\t"
- "fst.p %0, (15*8), fp30\n\t"
- "fst.p %0, (16*8), fp32\n\t"
- "fst.p %0, (17*8), fp34\n\t"
- "fst.p %0, (18*8), fp36\n\t"
- "fst.p %0, (19*8), fp38\n\t"
- "fst.p %0, (20*8), fp40\n\t"
- "fst.p %0, (21*8), fp42\n\t"
- "fst.p %0, (22*8), fp44\n\t"
- "fst.p %0, (23*8), fp46\n\t"
- "fst.p %0, (24*8), fp48\n\t"
- "fst.p %0, (25*8), fp50\n\t"
- "fst.p %0, (26*8), fp52\n\t"
- "fst.p %0, (27*8), fp54\n\t"
- "fst.p %0, (28*8), fp56\n\t"
- "fst.p %0, (29*8), fp58\n\t"
- "fst.p %0, (30*8), fp60\n\t"
- "fst.p %0, (31*8), fp62\n\t"
-
- "fgetscr fr63\n\t"
- "fst.s %0, (32*8), fr63\n\t"
- : /* no output */
- : "r" (&tsk->thread.xstate->hardfpu)
- : "memory");
-}
-
-void restore_fpu(struct task_struct *tsk)
-{
- asm volatile("fld.p %0, (0*8), fp0\n\t"
- "fld.p %0, (1*8), fp2\n\t"
- "fld.p %0, (2*8), fp4\n\t"
- "fld.p %0, (3*8), fp6\n\t"
- "fld.p %0, (4*8), fp8\n\t"
- "fld.p %0, (5*8), fp10\n\t"
- "fld.p %0, (6*8), fp12\n\t"
- "fld.p %0, (7*8), fp14\n\t"
- "fld.p %0, (8*8), fp16\n\t"
- "fld.p %0, (9*8), fp18\n\t"
- "fld.p %0, (10*8), fp20\n\t"
- "fld.p %0, (11*8), fp22\n\t"
- "fld.p %0, (12*8), fp24\n\t"
- "fld.p %0, (13*8), fp26\n\t"
- "fld.p %0, (14*8), fp28\n\t"
- "fld.p %0, (15*8), fp30\n\t"
- "fld.p %0, (16*8), fp32\n\t"
- "fld.p %0, (17*8), fp34\n\t"
- "fld.p %0, (18*8), fp36\n\t"
- "fld.p %0, (19*8), fp38\n\t"
- "fld.p %0, (20*8), fp40\n\t"
- "fld.p %0, (21*8), fp42\n\t"
- "fld.p %0, (22*8), fp44\n\t"
- "fld.p %0, (23*8), fp46\n\t"
- "fld.p %0, (24*8), fp48\n\t"
- "fld.p %0, (25*8), fp50\n\t"
- "fld.p %0, (26*8), fp52\n\t"
- "fld.p %0, (27*8), fp54\n\t"
- "fld.p %0, (28*8), fp56\n\t"
- "fld.p %0, (29*8), fp58\n\t"
- "fld.p %0, (30*8), fp60\n\t"
-
- "fld.s %0, (32*8), fr63\n\t"
- "fputscr fr63\n\t"
-
- "fld.p %0, (31*8), fp62\n\t"
- : /* no output */
- : "r" (&tsk->thread.xstate->hardfpu)
- : "memory");
-}
-
-asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
-{
- regs->pc += 4;
-
- force_sig(SIGFPE);
-}
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
deleted file mode 100644
index 947250188065..000000000000
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/probe.c
- *
- * CPU Subtype Probing for SH-5.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2007 Paul Mundt
- */
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/string.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/tlb.h>
-
-void cpu_probe(void)
-{
- unsigned long long cir;
-
- /*
- * Do peeks in real mode to avoid having to set up a mapping for
- * the WPC registers. On SH5-101 cut2, such a mapping would be
- * exposed to an address translation erratum which would make it
- * hard to set up correctly.
- */
- cir = peek_real_address_q(0x0d000008);
- if ((cir & 0xffff) == 0x5103)
- boot_cpu_data.type = CPU_SH5_103;
- else if (((cir >> 32) & 0xffff) == 0x51e2)
- /* CPU.VCR aliased at CIR address on SH5-101 */
- boot_cpu_data.type = CPU_SH5_101;
-
- boot_cpu_data.family = CPU_FAMILY_SH5;
-
- /*
- * First, setup some sane values for the I-cache.
- */
- boot_cpu_data.icache.ways = 4;
- boot_cpu_data.icache.sets = 256;
- boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
- boot_cpu_data.icache.way_incr = (1 << 13);
- boot_cpu_data.icache.entry_shift = 5;
- boot_cpu_data.icache.way_size = boot_cpu_data.icache.sets *
- boot_cpu_data.icache.linesz;
- boot_cpu_data.icache.entry_mask = 0x1fe0;
- boot_cpu_data.icache.flags = 0;
-
- /*
- * Next, setup some sane values for the D-cache.
- *
- * On the SH5, these are pretty consistent with the I-cache settings,
- * so we just copy over the existing definitions.. these can be fixed
- * up later, especially if we add runtime CPU probing.
- *
- * Though in the meantime it saves us from having to duplicate all of
- * the above definitions..
- */
- boot_cpu_data.dcache = boot_cpu_data.icache;
-
- /*
- * Setup any cache-related flags here
- */
-#if defined(CONFIG_CACHE_WRITETHROUGH)
- set_bit(SH_CACHE_MODE_WT, &(boot_cpu_data.dcache.flags));
-#elif defined(CONFIG_CACHE_WRITEBACK)
- set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags));
-#endif
-
- /* Setup some I/D TLB defaults */
- sh64_tlb_init();
-}
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
deleted file mode 100644
index dc8476d67244..000000000000
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SH5-101/SH5-103 CPU Setup
- *
- * Copyright (C) 2009 Paul Mundt
- */
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial.h>
-#include <linux/serial_sci.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/sh_timer.h>
-#include <asm/addrspace.h>
-#include <asm/platform_early.h>
-
-static struct plat_sci_port scif0_platform_data = {
- .flags = UPF_IOREMAP,
- .scscr = SCSCR_REIE,
- .type = PORT_SCIF,
-};
-
-static struct resource scif0_resources[] = {
- DEFINE_RES_MEM(PHYS_PERIPHERAL_BLOCK + 0x01030000, 0x100),
- DEFINE_RES_IRQ(39),
- DEFINE_RES_IRQ(40),
- DEFINE_RES_IRQ(42),
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .resource = scif0_resources,
- .num_resources = ARRAY_SIZE(scif0_resources),
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct resource rtc_resources[] = {
- [0] = {
- .start = PHYS_PERIPHERAL_BLOCK + 0x01040000,
- .end = PHYS_PERIPHERAL_BLOCK + 0x01040000 + 0x58 - 1,
- .flags = IORESOURCE_IO,
- },
- [1] = {
- /* Period IRQ */
- .start = IRQ_PRI,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- /* Carry IRQ */
- .start = IRQ_CUI,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- /* Alarm IRQ */
- .start = IRQ_ATI,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device rtc_device = {
- .name = "sh-rtc",
- .id = -1,
- .num_resources = ARRAY_SIZE(rtc_resources),
- .resource = rtc_resources,
-};
-
-#define TMU_BLOCK_OFF 0x01020000
-#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
-
-static struct sh_timer_config tmu0_platform_data = {
- .channels_mask = 7,
-};
-
-static struct resource tmu0_resources[] = {
- DEFINE_RES_MEM(TMU_BASE, 0x30),
- DEFINE_RES_IRQ(IRQ_TUNI0),
- DEFINE_RES_IRQ(IRQ_TUNI1),
- DEFINE_RES_IRQ(IRQ_TUNI2),
-};
-
-static struct platform_device tmu0_device = {
- .name = "sh-tmu",
- .id = 0,
- .dev = {
- .platform_data = &tmu0_platform_data,
- },
- .resource = tmu0_resources,
- .num_resources = ARRAY_SIZE(tmu0_resources),
-};
-
-static struct platform_device *sh5_early_devices[] __initdata = {
- &scif0_device,
- &tmu0_device,
-};
-
-static struct platform_device *sh5_devices[] __initdata = {
- &rtc_device,
-};
-
-static int __init sh5_devices_setup(void)
-{
- int ret;
-
- ret = platform_add_devices(sh5_early_devices,
- ARRAY_SIZE(sh5_early_devices));
- if (unlikely(ret != 0))
- return ret;
-
- return platform_add_devices(sh5_devices,
- ARRAY_SIZE(sh5_devices));
-}
-arch_initcall(sh5_devices_setup);
-
-void __init plat_early_device_setup(void)
-{
- sh_early_platform_add_devices(sh5_early_devices,
- ARRAY_SIZE(sh5_early_devices));
-}
diff --git a/arch/sh/kernel/cpu/sh5/switchto.S b/arch/sh/kernel/cpu/sh5/switchto.S
deleted file mode 100644
index d1beff755632..000000000000
--- a/arch/sh/kernel/cpu/sh5/switchto.S
+++ /dev/null
@@ -1,195 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * arch/sh/kernel/cpu/sh5/switchto.S
- *
- * sh64 context switch
- *
- * Copyright (C) 2004 Richard Curnow
-*/
-
- .section .text..SHmedia32,"ax"
- .little
-
- .balign 32
-
- .type sh64_switch_to,@function
- .global sh64_switch_to
- .global __sh64_switch_to_end
-sh64_switch_to:
-
-/* Incoming args
- r2 - prev
- r3 - &prev->thread
- r4 - next
- r5 - &next->thread
-
- Outgoing results
- r2 - last (=prev) : this just stays in r2 throughout
-
- Want to create a full (struct pt_regs) on the stack to allow backtracing
- functions to work. However, we only need to populate the callee-save
- register slots in this structure; since we're a function our ancestors must
- have themselves preserved all caller saved state in the stack. This saves
- some wasted effort since we won't need to look at the values.
-
- In particular, all caller-save registers are immediately available for
- scratch use.
-
-*/
-
-#define FRAME_SIZE (76*8 + 8)
-
- movi FRAME_SIZE, r0
- sub.l r15, r0, r15
- ! Do normal-style register save to support backtrace
-
- st.l r15, 0, r18 ! save link reg
- st.l r15, 4, r14 ! save fp
- add.l r15, r63, r14 ! setup frame pointer
-
- ! hopefully this looks normal to the backtrace now.
-
- addi.l r15, 8, r1 ! base of pt_regs
- addi.l r1, 24, r0 ! base of pt_regs.regs
- addi.l r0, (63*8), r8 ! base of pt_regs.trregs
-
- /* Note : to be fixed?
- struct pt_regs is really designed for holding the state on entry
- to an exception, i.e. pc,sr,regs etc. However, for the context
- switch state, some of this is not required. But the unwinder takes
- struct pt_regs * as an arg so we have to build this structure
- to allow unwinding switched tasks in show_state() */
-
- st.q r0, ( 9*8), r9
- st.q r0, (10*8), r10
- st.q r0, (11*8), r11
- st.q r0, (12*8), r12
- st.q r0, (13*8), r13
- st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
- ! the point where the process is left in suspended animation, i.e. current
- ! fp here, not the saved one.
- st.q r0, (16*8), r16
-
- st.q r0, (24*8), r24
- st.q r0, (25*8), r25
- st.q r0, (26*8), r26
- st.q r0, (27*8), r27
- st.q r0, (28*8), r28
- st.q r0, (29*8), r29
- st.q r0, (30*8), r30
- st.q r0, (31*8), r31
- st.q r0, (32*8), r32
- st.q r0, (33*8), r33
- st.q r0, (34*8), r34
- st.q r0, (35*8), r35
-
- st.q r0, (44*8), r44
- st.q r0, (45*8), r45
- st.q r0, (46*8), r46
- st.q r0, (47*8), r47
- st.q r0, (48*8), r48
- st.q r0, (49*8), r49
- st.q r0, (50*8), r50
- st.q r0, (51*8), r51
- st.q r0, (52*8), r52
- st.q r0, (53*8), r53
- st.q r0, (54*8), r54
- st.q r0, (55*8), r55
- st.q r0, (56*8), r56
- st.q r0, (57*8), r57
- st.q r0, (58*8), r58
- st.q r0, (59*8), r59
-
- ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
- ! Use a local label to avoid creating a symbol that will confuse the !
- ! backtrace
- pta .Lsave_pc, tr0
-
- gettr tr5, r45
- gettr tr6, r46
- gettr tr7, r47
- st.q r8, (5*8), r45
- st.q r8, (6*8), r46
- st.q r8, (7*8), r47
-
- ! Now switch context
- gettr tr0, r9
- st.l r3, 0, r15 ! prev->thread.sp
- st.l r3, 8, r1 ! prev->thread.kregs
- st.l r3, 4, r9 ! prev->thread.pc
- st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
-
- ! Load PC for next task (init value or save_pc later)
- ld.l r5, 4, r18 ! next->thread.pc
- ! Switch stacks
- ld.l r5, 0, r15 ! next->thread.sp
- ptabs r18, tr0
-
- ! Update current
- ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
- putcon r9, kcr0 ! current = next->thread_info
-
- ! go to save_pc for a reschedule, or the initial thread.pc for a new process
- blink tr0, r63
-
- ! Restore (when we come back to a previously saved task)
-.Lsave_pc:
- addi.l r15, 32, r0 ! r0 = next's regs
- addi.l r0, (63*8), r8 ! r8 = next's tr_regs
-
- ld.q r8, (5*8), r45
- ld.q r8, (6*8), r46
- ld.q r8, (7*8), r47
- ptabs r45, tr5
- ptabs r46, tr6
- ptabs r47, tr7
-
- ld.q r0, ( 9*8), r9
- ld.q r0, (10*8), r10
- ld.q r0, (11*8), r11
- ld.q r0, (12*8), r12
- ld.q r0, (13*8), r13
- ld.q r0, (14*8), r14
- ld.q r0, (16*8), r16
-
- ld.q r0, (24*8), r24
- ld.q r0, (25*8), r25
- ld.q r0, (26*8), r26
- ld.q r0, (27*8), r27
- ld.q r0, (28*8), r28
- ld.q r0, (29*8), r29
- ld.q r0, (30*8), r30
- ld.q r0, (31*8), r31
- ld.q r0, (32*8), r32
- ld.q r0, (33*8), r33
- ld.q r0, (34*8), r34
- ld.q r0, (35*8), r35
-
- ld.q r0, (44*8), r44
- ld.q r0, (45*8), r45
- ld.q r0, (46*8), r46
- ld.q r0, (47*8), r47
- ld.q r0, (48*8), r48
- ld.q r0, (49*8), r49
- ld.q r0, (50*8), r50
- ld.q r0, (51*8), r51
- ld.q r0, (52*8), r52
- ld.q r0, (53*8), r53
- ld.q r0, (54*8), r54
- ld.q r0, (55*8), r55
- ld.q r0, (56*8), r56
- ld.q r0, (57*8), r57
- ld.q r0, (58*8), r58
- ld.q r0, (59*8), r59
-
- ! epilogue
- ld.l r15, 0, r18
- ld.l r15, 4, r14
- ptabs r18, tr0
- movi FRAME_SIZE, r0
- add r15, r0, r15
- blink tr0, r63
-__sh64_switch_to_end:
-.LFE1:
- .size sh64_switch_to,.LFE1-sh64_switch_to
-
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
deleted file mode 100644
index 3cb0cd9cea29..000000000000
--- a/arch/sh/kernel/cpu/sh5/unwind.c
+++ /dev/null
@@ -1,342 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/unwind.c
- *
- * Copyright (C) 2004 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#include <linux/kallsyms.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/unwinder.h>
-#include <asm/stacktrace.h>
-
-static u8 regcache[63];
-
-/*
- * Finding the previous stack frame isn't horribly straightforward as it is
- * on some other platforms. In the sh64 case, we don't have "linked" stack
- * frames, so we need to do a bit of work to determine the previous frame,
- * and in turn, the previous r14/r18 pair.
- *
- * There are generally a few cases which determine where we can find out
- * the r14/r18 values. In the general case, this can be determined by poking
- * around the prologue of the symbol PC is in (note that we absolutely must
- * have frame pointer support as well as the kernel symbol table mapped,
- * otherwise we can't even get this far).
- *
- * In other cases, such as the interrupt/exception path, we can poke around
- * the sp/fp.
- *
- * Notably, this entire approach is somewhat error prone, and in the event
- * that the previous frame cannot be determined, that's all we can do.
- * Either way, this still leaves us with a more correct backtrace then what
- * we would be able to come up with by walking the stack (which is garbage
- * for anything beyond the first frame).
- * -- PFM.
- */
-static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
- unsigned long *pprev_fp, unsigned long *pprev_pc,
- struct pt_regs *regs)
-{
- const char *sym;
- char namebuf[128];
- unsigned long offset;
- unsigned long prologue = 0;
- unsigned long fp_displacement = 0;
- unsigned long fp_prev = 0;
- unsigned long offset_r14 = 0, offset_r18 = 0;
- int i, found_prologue_end = 0;
-
- sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
- if (!sym)
- return -EINVAL;
-
- prologue = pc - offset;
- if (!prologue)
- return -EINVAL;
-
- /* Validate fp, to avoid risk of dereferencing a bad pointer later.
- Assume 128Mb since that's the amount of RAM on a Cayman. Modify
- when there is an SH-5 board with more. */
- if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
- (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
- ((fp & 7) != 0)) {
- return -EINVAL;
- }
-
- /*
- * Depth to walk, depth is completely arbitrary.
- */
- for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
- unsigned long op;
- u8 major, minor;
- u8 src, dest, disp;
-
- op = *(unsigned long *)prologue;
-
- major = (op >> 26) & 0x3f;
- src = (op >> 20) & 0x3f;
- minor = (op >> 16) & 0xf;
- disp = (op >> 10) & 0x3f;
- dest = (op >> 4) & 0x3f;
-
- /*
- * Stack frame creation happens in a number of ways.. in the
- * general case when the stack frame is less than 511 bytes,
- * it's generally created by an addi or addi.l:
- *
- * addi/addi.l r15, -FRAME_SIZE, r15
- *
- * in the event that the frame size is bigger than this, it's
- * typically created using a movi/sub pair as follows:
- *
- * movi FRAME_SIZE, rX
- * sub r15, rX, r15
- */
-
- switch (major) {
- case (0x00 >> 2):
- switch (minor) {
- case 0x8: /* add.l */
- case 0x9: /* add */
- /* Look for r15, r63, r14 */
- if (src == 15 && disp == 63 && dest == 14)
- found_prologue_end = 1;
-
- break;
- case 0xa: /* sub.l */
- case 0xb: /* sub */
- if (src != 15 || dest != 15)
- continue;
-
- fp_displacement -= regcache[disp];
- fp_prev = fp - fp_displacement;
- break;
- }
- break;
- case (0xa8 >> 2): /* st.l */
- if (src != 15)
- continue;
-
- switch (dest) {
- case 14:
- if (offset_r14 || fp_displacement == 0)
- continue;
-
- offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
- offset_r14 *= sizeof(unsigned long);
- offset_r14 += fp_displacement;
- break;
- case 18:
- if (offset_r18 || fp_displacement == 0)
- continue;
-
- offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
- offset_r18 *= sizeof(unsigned long);
- offset_r18 += fp_displacement;
- break;
- }
-
- break;
- case (0xcc >> 2): /* movi */
- if (dest >= 63) {
- printk(KERN_NOTICE "%s: Invalid dest reg %d "
- "specified in movi handler. Failed "
- "opcode was 0x%lx: ", __func__,
- dest, op);
-
- continue;
- }
-
- /* Sign extend */
- regcache[dest] =
- sign_extend64((((u64)op >> 10) & 0xffff), 9);
- break;
- case (0xd0 >> 2): /* addi */
- case (0xd4 >> 2): /* addi.l */
- /* Look for r15, -FRAME_SIZE, r15 */
- if (src != 15 || dest != 15)
- continue;
-
- /* Sign extended frame size.. */
- fp_displacement +=
- (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
- fp_prev = fp - fp_displacement;
- break;
- }
-
- if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
- break;
- }
-
- if (offset_r14 == 0 || fp_prev == 0) {
- if (!offset_r14)
- pr_debug("Unable to find r14 offset\n");
- if (!fp_prev)
- pr_debug("Unable to find previous fp\n");
-
- return -EINVAL;
- }
-
- /* For innermost leaf function, there might not be a offset_r18 */
- if (!*pprev_pc && (offset_r18 == 0))
- return -EINVAL;
-
- *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
-
- if (offset_r18)
- *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
-
- *pprev_pc &= ~1;
-
- return 0;
-}
-
-/*
- * Don't put this on the stack since we'll want to call in to
- * sh64_unwinder_dump() when we're close to underflowing the stack
- * anyway.
- */
-static struct pt_regs here_regs;
-
-extern const char syscall_ret;
-extern const char ret_from_syscall;
-extern const char ret_from_exception;
-extern const char ret_from_irq;
-
-static void sh64_unwind_inner(const struct stacktrace_ops *ops,
- void *data, struct pt_regs *regs);
-
-static inline void unwind_nested(const struct stacktrace_ops *ops, void *data,
- unsigned long pc, unsigned long fp)
-{
- if ((fp >= __MEMORY_START) &&
- ((fp & 7) == 0))
- sh64_unwind_inner(ops, data, (struct pt_regs *)fp);
-}
-
-static void sh64_unwind_inner(const struct stacktrace_ops *ops,
- void *data, struct pt_regs *regs)
-{
- unsigned long pc, fp;
- int ofs = 0;
- int first_pass;
-
- pc = regs->pc & ~1;
- fp = regs->regs[14];
-
- first_pass = 1;
- for (;;) {
- int cond;
- unsigned long next_fp, next_pc;
-
- if (pc == ((unsigned long)&syscall_ret & ~1)) {
- printk("SYSCALL\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- if (pc == ((unsigned long)&ret_from_syscall & ~1)) {
- printk("SYSCALL (PREEMPTED)\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- /* In this case, the PC is discovered by lookup_prev_stack_frame but
- it has 4 taken off it to look like the 'caller' */
- if (pc == ((unsigned long)&ret_from_exception & ~1)) {
- printk("EXCEPTION\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- if (pc == ((unsigned long)&ret_from_irq & ~1)) {
- printk("IRQ\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
- ((pc & 3) == 0) && ((fp & 7) == 0));
-
- pc -= ofs;
-
- ops->address(data, pc, 1);
-
- if (first_pass) {
- /* If the innermost frame is a leaf function, it's
- * possible that r18 is never saved out to the stack.
- */
- next_pc = regs->regs[18];
- } else {
- next_pc = 0;
- }
-
- if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
- ofs = sizeof(unsigned long);
- pc = next_pc & ~1;
- fp = next_fp;
- } else {
- printk("Unable to lookup previous stack frame\n");
- break;
- }
- first_pass = 0;
- }
-
- printk("\n");
-}
-
-static void sh64_unwinder_dump(struct task_struct *task,
- struct pt_regs *regs,
- unsigned long *sp,
- const struct stacktrace_ops *ops,
- void *data)
-{
- if (!regs) {
- /*
- * Fetch current regs if we have no other saved state to back
- * trace from.
- */
- regs = &here_regs;
-
- __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
- __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
- __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
-
- __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
- __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
- __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
- __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
- __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
- __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
- __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
- __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
-
- __asm__ __volatile__ (
- "pta 0f, tr0\n\t"
- "blink tr0, %0\n\t"
- "0: nop"
- : "=r" (regs->pc)
- );
- }
-
- sh64_unwind_inner(ops, data, regs);
-}
-
-static struct unwinder sh64_unwinder = {
- .name = "sh64-unwinder",
- .dump = sh64_unwinder_dump,
- .rating = 150,
-};
-
-static int __init sh64_unwinder_init(void)
-{
- return unwinder_register(&sh64_unwinder);
-}
-early_initcall(sh64_unwinder_init);
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
index 9f1c9c11d62d..a13c045804ed 100644
--- a/arch/sh/kernel/dumpstack.c
+++ b/arch/sh/kernel/dumpstack.c
@@ -16,36 +16,37 @@
#include <asm/unwinder.h>
#include <asm/stacktrace.h>
-void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+void dump_mem(const char *str, const char *loglvl,
+ unsigned long bottom, unsigned long top)
{
unsigned long p;
int i;
- printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+ printk("%s%s(0x%08lx to 0x%08lx)\n", loglvl, str, bottom, top);
for (p = bottom & ~31; p < top; ) {
- printk("%04lx: ", p & 0xffff);
+ printk("%s%04lx: ", loglvl, p & 0xffff);
for (i = 0; i < 8; i++, p += 4) {
unsigned int val;
if (p < bottom || p >= top)
- printk(" ");
+ printk("%s ", loglvl);
else {
if (__get_user(val, (unsigned int __user *)p)) {
- printk("\n");
+ printk("%s\n", loglvl);
return;
}
- printk("%08x ", val);
+ printk("%s%08x ", loglvl, val);
}
}
- printk("\n");
+ printk("%s\n", loglvl);
}
}
-void printk_address(unsigned long address, int reliable)
+void printk_address(unsigned long address, int reliable, const char *loglvl)
{
- printk(" [<%p>] %s%pS\n", (void *) address,
+ printk("%s [<%p>] %s%pS\n", loglvl, (void *) address,
reliable ? "" : "? ", (void *) address);
}
@@ -117,8 +118,7 @@ static int print_trace_stack(void *data, char *name)
*/
static void print_trace_address(void *data, unsigned long addr, int reliable)
{
- printk("%s", (char *)data);
- printk_address(addr, reliable);
+ printk_address(addr, reliable, (char *)data);
}
static const struct stacktrace_ops print_trace_ops = {
@@ -127,16 +127,16 @@ static const struct stacktrace_ops print_trace_ops = {
};
void show_trace(struct task_struct *tsk, unsigned long *sp,
- struct pt_regs *regs)
+ struct pt_regs *regs, const char *loglvl)
{
if (regs && user_mode(regs))
return;
- printk("\nCall trace:\n");
+ printk("%s\nCall trace:\n", loglvl);
- unwind_stack(tsk, regs, sp, &print_trace_ops, "");
+ unwind_stack(tsk, regs, sp, &print_trace_ops, (void *)loglvl);
- printk("\n");
+ printk("%s\n", loglvl);
if (!tsk)
tsk = current;
@@ -144,7 +144,7 @@ void show_trace(struct task_struct *tsk, unsigned long *sp,
debug_show_held_locks(tsk);
}
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
unsigned long stack;
@@ -156,7 +156,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
sp = (unsigned long *)tsk->thread.sp;
stack = (unsigned long)sp;
- dump_mem("Stack: ", stack, THREAD_SIZE +
+ dump_mem("Stack: ", loglvl, stack, THREAD_SIZE +
(unsigned long)task_stack_page(tsk));
- show_trace(tsk, sp, NULL);
+ show_trace(tsk, sp, NULL, loglvl);
}
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
deleted file mode 100644
index 67685e1f00e1..000000000000
--- a/arch/sh/kernel/head_64.S
+++ /dev/null
@@ -1,346 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * arch/sh/kernel/head_64.S
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- */
-
-#include <linux/init.h>
-
-#include <asm/page.h>
-#include <asm/cache.h>
-#include <asm/tlb.h>
-#include <cpu/registers.h>
-#include <cpu/mmu_context.h>
-#include <asm/thread_info.h>
-
-/*
- * MMU defines: TLB boundaries.
- */
-
-#define MMUIR_FIRST ITLB_FIXED
-#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
-#define MMUIR_STEP TLB_STEP
-
-#define MMUDR_FIRST DTLB_FIXED
-#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
-#define MMUDR_STEP TLB_STEP
-
-/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
-#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
-#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
-#endif
-
-/*
- * MMU defines: Fixed TLBs.
- */
-/* Deal safely with the case where the base of RAM is not 512Mb aligned */
-
-#define ALIGN_512M_MASK (0xffffffffe0000000)
-#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
-#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
-
-#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
- /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
-
-#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
- /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
-
-#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
- /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
-#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
- /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
-
-#ifdef CONFIG_CACHE_OFF
-#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
-#else
-#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
-#endif
-#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
-
-#if defined (CONFIG_CACHE_OFF)
-#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
-#elif defined (CONFIG_CACHE_WRITETHROUGH)
-#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
- /* WT, invalidate */
-#elif defined (CONFIG_CACHE_WRITEBACK)
-#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
- /* WB, invalidate */
-#else
-#error preprocessor flag CONFIG_CACHE_... not recognized!
-#endif
-
-#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
-
- .section .empty_zero_page, "aw"
- .global empty_zero_page
-
-empty_zero_page:
- .long 1 /* MOUNT_ROOT_RDONLY */
- .long 0 /* RAMDISK_FLAGS */
- .long 0x0200 /* ORIG_ROOT_DEV */
- .long 1 /* LOADER_TYPE */
- .long 0x00800000 /* INITRD_START */
- .long 0x00800000 /* INITRD_SIZE */
- .long 0
-
- .text
- .balign 4096,0,4096
-
- .section .data, "aw"
- .balign PAGE_SIZE
-
- .section .data, "aw"
- .balign PAGE_SIZE
-
- .global mmu_pdtp_cache
-mmu_pdtp_cache:
- .space PAGE_SIZE, 0
-
- .global fpu_in_use
-fpu_in_use: .quad 0
-
-
- __HEAD
- .balign L1_CACHE_BYTES
-/*
- * Condition at the entry of __stext:
- * . Reset state:
- * . SR.FD = 1 (FPU disabled)
- * . SR.BL = 1 (Exceptions disabled)
- * . SR.MD = 1 (Privileged Mode)
- * . SR.MMU = 0 (MMU Disabled)
- * . SR.CD = 0 (CTC User Visible)
- * . SR.IMASK = Undefined (Interrupt Mask)
- *
- * Operations supposed to be performed by __stext:
- * . prevent speculative fetch onto device memory while MMU is off
- * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
- * . first, save CPU state and set it to something harmless
- * . any CPU detection and/or endianness settings (?)
- * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
- * . set initial TLB entries for cached and uncached regions
- * (no fine granularity paging)
- * . set initial cache state
- * . enable MMU and caches
- * . set CPU to a consistent state
- * . registers (including stack pointer and current/KCR0)
- * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
- * at this stage. This is all to later Linux initialization steps.
- * . initialize FPU
- * . clear BSS
- * . jump into start_kernel()
- * . be prepared to hopeless start_kernel() returns.
- *
- */
- .global _stext
-_stext:
- /*
- * Prevent speculative fetch on device memory due to
- * uninitialized target registers.
- */
- ptabs/u ZERO, tr0
- ptabs/u ZERO, tr1
- ptabs/u ZERO, tr2
- ptabs/u ZERO, tr3
- ptabs/u ZERO, tr4
- ptabs/u ZERO, tr5
- ptabs/u ZERO, tr6
- ptabs/u ZERO, tr7
- synci
-
- /*
- * Read/Set CPU state. After this block:
- * r29 = Initial SR
- */
- getcon SR, r29
- movi SR_HARMLESS, r20
- putcon r20, SR
-
- /*
- * Initialize EMI/LMI. To Be Done.
- */
-
- /*
- * CPU detection and/or endianness settings (?). To Be Done.
- * Pure PIC code here, please ! Just save state into r30.
- * After this block:
- * r30 = CPU type/Platform Endianness
- */
-
- /*
- * Set initial TLB entries for cached and uncached regions.
- * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
- */
- /* Clear ITLBs */
- pta clear_ITLB, tr1
- movi MMUIR_FIRST, r21
- movi MMUIR_END, r22
-clear_ITLB:
- putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
- addi r21, MMUIR_STEP, r21
- bne r21, r22, tr1
-
- /* Clear DTLBs */
- pta clear_DTLB, tr1
- movi MMUDR_FIRST, r21
- movi MMUDR_END, r22
-clear_DTLB:
- putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
- addi r21, MMUDR_STEP, r21
- bne r21, r22, tr1
-
- /* Map one big (512Mb) page for ITLB */
- movi MMUIR_FIRST, r21
- movi MMUIR_TEXT_L, r22 /* PTEL first */
- add.l r22, r63, r22 /* Sign extend */
- putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
- movi MMUIR_TEXT_H, r22 /* PTEH last */
- add.l r22, r63, r22 /* Sign extend */
- putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
-
- /* Map one big CACHED (512Mb) page for DTLB */
- movi MMUDR_FIRST, r21
- movi MMUDR_CACHED_L, r22 /* PTEL first */
- add.l r22, r63, r22 /* Sign extend */
- putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
- movi MMUDR_CACHED_H, r22 /* PTEH last */
- add.l r22, r63, r22 /* Sign extend */
- putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
-
- /*
- * Setup a DTLB translation for SCIF phys.
- */
- addi r21, MMUDR_STEP, r21
- movi 0x0a03, r22 /* SCIF phys */
- shori 0x0148, r22
- putcfg r21, 1, r22 /* PTEL first */
- movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
- shori 0x0003, r22
- putcfg r21, 0, r22 /* PTEH last */
-
- /*
- * Set cache behaviours.
- */
- /* ICache */
- movi ICCR_BASE, r21
- movi ICCR0_INIT_VAL, r22
- movi ICCR1_INIT_VAL, r23
- putcfg r21, ICCR_REG0, r22
- putcfg r21, ICCR_REG1, r23
-
- /* OCache */
- movi OCCR_BASE, r21
- movi OCCR0_INIT_VAL, r22
- movi OCCR1_INIT_VAL, r23
- putcfg r21, OCCR_REG0, r22
- putcfg r21, OCCR_REG1, r23
-
-
- /*
- * Enable Caches and MMU. Do the first non-PIC jump.
- * Now head.S global variables, constants and externs
- * can be used.
- */
- getcon SR, r21
- movi SR_ENABLE_MMU, r22
- or r21, r22, r21
- putcon r21, SSR
- movi hyperspace, r22
- ori r22, 1, r22 /* Make it SHmedia, not required but..*/
- putcon r22, SPC
- synco
- rte /* And now go into the hyperspace ... */
-hyperspace: /* ... that's the next instruction ! */
-
- /*
- * Set CPU to a consistent state.
- * r31 = FPU support flag
- * tr0/tr7 in use. Others give a chance to loop somewhere safe
- */
- movi start_kernel, r32
- ori r32, 1, r32
-
- ptabs r32, tr0 /* r32 = _start_kernel address */
- pta/u hopeless, tr1
- pta/u hopeless, tr2
- pta/u hopeless, tr3
- pta/u hopeless, tr4
- pta/u hopeless, tr5
- pta/u hopeless, tr6
- pta/u hopeless, tr7
- gettr tr1, r28 /* r28 = hopeless address */
-
- /* Set initial stack pointer */
- movi init_thread_union, SP
- putcon SP, KCR0 /* Set current to init_task */
- movi THREAD_SIZE, r22 /* Point to the end */
- add SP, r22, SP
-
- /*
- * Initialize FPU.
- * Keep FPU flag in r31. After this block:
- * r31 = FPU flag
- */
- movi fpu_in_use, r31 /* Temporary */
-
-#ifdef CONFIG_SH_FPU
- getcon SR, r21
- movi SR_ENABLE_FPU, r22
- and r21, r22, r22
- putcon r22, SR /* Try to enable */
- getcon SR, r22
- xor r21, r22, r21
- shlri r21, 15, r21 /* Supposedly 0/1 */
- st.q r31, 0 , r21 /* Set fpu_in_use */
-#else
- movi 0, r21
- st.q r31, 0 , r21 /* Set fpu_in_use */
-#endif
- or r21, ZERO, r31 /* Set FPU flag at last */
-
-#ifndef CONFIG_SH_NO_BSS_INIT
-/* Don't clear BSS if running on slow platforms such as an RTL simulation,
- remote memory via SHdebug link, etc. For these the memory can be guaranteed
- to be all zero on boot anyway. */
- /*
- * Clear bss
- */
- pta clear_quad, tr1
- movi __bss_start, r22
- movi _end, r23
-clear_quad:
- st.q r22, 0, ZERO
- addi r22, 8, r22
- bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
-#endif
- pta/u hopeless, tr1
-
- /* Say bye to head.S but be prepared to wrongly get back ... */
- blink tr0, LINK
-
- /* If we ever get back here through LINK/tr1-tr7 */
- pta/u hopeless, tr7
-
-hopeless:
- /*
- * Something's badly wrong here. Loop endlessly,
- * there's nothing more we can do about it.
- *
- * Note on hopeless: it can be jumped into invariably
- * before or after jumping into hyperspace. The only
- * requirement is to be PIC called (PTA) before and
- * any way (PTA/PTABS) after. According to Virtual
- * to Physical mapping a simulator/emulator can easily
- * tell where we came here from just looking at hopeless
- * (PC) address.
- *
- * For debugging purposes:
- * (r28) hopeless/loop address
- * (r29) Original SR
- * (r30) CPU type/Platform endianness
- * (r31) FPU Support
- * (r32) _start_kernel address
- */
- blink tr7, ZERO
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
index 60c828a2b8a2..037aab2708b7 100644
--- a/arch/sh/kernel/io_trapped.c
+++ b/arch/sh/kernel/io_trapped.c
@@ -136,6 +136,7 @@ EXPORT_SYMBOL_GPL(match_trapped_io_handler);
static struct trapped_io *lookup_tiop(unsigned long address)
{
pgd_t *pgd_k;
+ p4d_t *p4d_k;
pud_t *pud_k;
pmd_t *pmd_k;
pte_t *pte_k;
@@ -145,7 +146,11 @@ static struct trapped_io *lookup_tiop(unsigned long address)
if (!pgd_present(*pgd_k))
return NULL;
- pud_k = pud_offset(pgd_k, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ return NULL;
+
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
return NULL;
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c
deleted file mode 100644
index 7a1f50435e33..000000000000
--- a/arch/sh/kernel/irq_64.c
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SHmedia irqflags support
- *
- * Copyright (C) 2006 - 2009 Paul Mundt
- */
-#include <linux/irqflags.h>
-#include <linux/module.h>
-#include <cpu/registers.h>
-
-void notrace arch_local_irq_restore(unsigned long flags)
-{
- unsigned long long __dummy;
-
- if (flags == ARCH_IRQ_DISABLED) {
- __asm__ __volatile__ (
- "getcon " __SR ", %0\n\t"
- "or %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy)
- : "r" (ARCH_IRQ_DISABLED)
- );
- } else {
- __asm__ __volatile__ (
- "getcon " __SR ", %0\n\t"
- "and %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy)
- : "r" (~ARCH_IRQ_DISABLED)
- );
- }
-}
-EXPORT_SYMBOL(arch_local_irq_restore);
-
-unsigned long notrace arch_local_save_flags(void)
-{
- unsigned long flags;
-
- __asm__ __volatile__ (
- "getcon " __SR ", %0\n\t"
- "and %0, %1, %0"
- : "=&r" (flags)
- : "r" (ARCH_IRQ_DISABLED)
- );
-
- return flags;
-}
-EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 63d63a36f6f2..4a98980b8a07 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -14,7 +14,6 @@
#include <linux/ftrace.h>
#include <linux/suspend.h>
#include <linux/memblock.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index bbc78d1d618e..b9cee98a754e 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -46,15 +46,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ ELF32_R_SYM(rel[i].r_info);
relocation = sym->st_value + rel[i].r_addend;
-#ifdef CONFIG_SUPERH64
- /* For text addresses, bit2 of the st_other field indicates
- * whether the symbol is SHmedia (1) or SHcompact (0). If
- * SHmedia, the LSB of the symbol needs to be asserted
- * for the CPU to be in SHmedia mode when it starts executing
- * the branch target. */
- relocation |= !!(sym->st_other & 4);
-#endif
-
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_NONE:
break;
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 4d1bfc848dd3..169832fcf21b 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -23,9 +23,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
-#ifdef CONFIG_SUPERH32
unlazy_fpu(src, task_pt_regs(src));
-#endif
*dst = *src;
if (src->thread.xstate) {
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index a094633874c3..456cc8d171f7 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -59,7 +59,7 @@ void show_regs(struct pt_regs * regs)
printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
regs->mach, regs->macl, regs->gbr, regs->pr);
- show_trace(NULL, (unsigned long *)regs->regs[15], regs);
+ show_trace(NULL, (unsigned long *)regs->regs[15], regs, KERN_DEFAULT);
show_code(regs);
}
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
deleted file mode 100644
index c2844a2e18cd..000000000000
--- a/arch/sh/kernel/process_64.c
+++ /dev/null
@@ -1,461 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/process_64.c
- *
- * This file handles the architecture-dependent parts of process handling..
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2007 Paul Mundt
- * Copyright (C) 2003, 2004 Richard Curnow
- *
- * Started from SH3/4 version:
- * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
- *
- * In turn started from i386 version:
- * Copyright (C) 1995 Linus Torvalds
- */
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/ptrace.h>
-#include <linux/reboot.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task.h>
-#include <linux/sched/task_stack.h>
-#include <asm/syscalls.h>
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/mmu_context.h>
-#include <asm/fpu.h>
-#include <asm/switch_to.h>
-
-struct task_struct *last_task_used_math = NULL;
-struct pt_regs fake_swapper_regs = { 0, };
-
-void show_regs(struct pt_regs *regs)
-{
- unsigned long long ah, al, bh, bl, ch, cl;
-
- printk("\n");
- show_regs_print_info(KERN_DEFAULT);
-
- ah = (regs->pc) >> 32;
- al = (regs->pc) & 0xffffffff;
- bh = (regs->regs[18]) >> 32;
- bl = (regs->regs[18]) & 0xffffffff;
- ch = (regs->regs[15]) >> 32;
- cl = (regs->regs[15]) & 0xffffffff;
- printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->sr) >> 32;
- al = (regs->sr) & 0xffffffff;
- asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
- asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
- bh = (bh) >> 32;
- bl = (bl) & 0xffffffff;
- asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
- asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
- ch = (ch) >> 32;
- cl = (cl) & 0xffffffff;
- printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[0]) >> 32;
- al = (regs->regs[0]) & 0xffffffff;
- bh = (regs->regs[1]) >> 32;
- bl = (regs->regs[1]) & 0xffffffff;
- ch = (regs->regs[2]) >> 32;
- cl = (regs->regs[2]) & 0xffffffff;
- printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[3]) >> 32;
- al = (regs->regs[3]) & 0xffffffff;
- bh = (regs->regs[4]) >> 32;
- bl = (regs->regs[4]) & 0xffffffff;
- ch = (regs->regs[5]) >> 32;
- cl = (regs->regs[5]) & 0xffffffff;
- printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[6]) >> 32;
- al = (regs->regs[6]) & 0xffffffff;
- bh = (regs->regs[7]) >> 32;
- bl = (regs->regs[7]) & 0xffffffff;
- ch = (regs->regs[8]) >> 32;
- cl = (regs->regs[8]) & 0xffffffff;
- printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[9]) >> 32;
- al = (regs->regs[9]) & 0xffffffff;
- bh = (regs->regs[10]) >> 32;
- bl = (regs->regs[10]) & 0xffffffff;
- ch = (regs->regs[11]) >> 32;
- cl = (regs->regs[11]) & 0xffffffff;
- printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[12]) >> 32;
- al = (regs->regs[12]) & 0xffffffff;
- bh = (regs->regs[13]) >> 32;
- bl = (regs->regs[13]) & 0xffffffff;
- ch = (regs->regs[14]) >> 32;
- cl = (regs->regs[14]) & 0xffffffff;
- printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[16]) >> 32;
- al = (regs->regs[16]) & 0xffffffff;
- bh = (regs->regs[17]) >> 32;
- bl = (regs->regs[17]) & 0xffffffff;
- ch = (regs->regs[19]) >> 32;
- cl = (regs->regs[19]) & 0xffffffff;
- printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[20]) >> 32;
- al = (regs->regs[20]) & 0xffffffff;
- bh = (regs->regs[21]) >> 32;
- bl = (regs->regs[21]) & 0xffffffff;
- ch = (regs->regs[22]) >> 32;
- cl = (regs->regs[22]) & 0xffffffff;
- printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[23]) >> 32;
- al = (regs->regs[23]) & 0xffffffff;
- bh = (regs->regs[24]) >> 32;
- bl = (regs->regs[24]) & 0xffffffff;
- ch = (regs->regs[25]) >> 32;
- cl = (regs->regs[25]) & 0xffffffff;
- printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[26]) >> 32;
- al = (regs->regs[26]) & 0xffffffff;
- bh = (regs->regs[27]) >> 32;
- bl = (regs->regs[27]) & 0xffffffff;
- ch = (regs->regs[28]) >> 32;
- cl = (regs->regs[28]) & 0xffffffff;
- printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[29]) >> 32;
- al = (regs->regs[29]) & 0xffffffff;
- bh = (regs->regs[30]) >> 32;
- bl = (regs->regs[30]) & 0xffffffff;
- ch = (regs->regs[31]) >> 32;
- cl = (regs->regs[31]) & 0xffffffff;
- printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[32]) >> 32;
- al = (regs->regs[32]) & 0xffffffff;
- bh = (regs->regs[33]) >> 32;
- bl = (regs->regs[33]) & 0xffffffff;
- ch = (regs->regs[34]) >> 32;
- cl = (regs->regs[34]) & 0xffffffff;
- printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[35]) >> 32;
- al = (regs->regs[35]) & 0xffffffff;
- bh = (regs->regs[36]) >> 32;
- bl = (regs->regs[36]) & 0xffffffff;
- ch = (regs->regs[37]) >> 32;
- cl = (regs->regs[37]) & 0xffffffff;
- printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[38]) >> 32;
- al = (regs->regs[38]) & 0xffffffff;
- bh = (regs->regs[39]) >> 32;
- bl = (regs->regs[39]) & 0xffffffff;
- ch = (regs->regs[40]) >> 32;
- cl = (regs->regs[40]) & 0xffffffff;
- printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[41]) >> 32;
- al = (regs->regs[41]) & 0xffffffff;
- bh = (regs->regs[42]) >> 32;
- bl = (regs->regs[42]) & 0xffffffff;
- ch = (regs->regs[43]) >> 32;
- cl = (regs->regs[43]) & 0xffffffff;
- printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[44]) >> 32;
- al = (regs->regs[44]) & 0xffffffff;
- bh = (regs->regs[45]) >> 32;
- bl = (regs->regs[45]) & 0xffffffff;
- ch = (regs->regs[46]) >> 32;
- cl = (regs->regs[46]) & 0xffffffff;
- printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[47]) >> 32;
- al = (regs->regs[47]) & 0xffffffff;
- bh = (regs->regs[48]) >> 32;
- bl = (regs->regs[48]) & 0xffffffff;
- ch = (regs->regs[49]) >> 32;
- cl = (regs->regs[49]) & 0xffffffff;
- printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[50]) >> 32;
- al = (regs->regs[50]) & 0xffffffff;
- bh = (regs->regs[51]) >> 32;
- bl = (regs->regs[51]) & 0xffffffff;
- ch = (regs->regs[52]) >> 32;
- cl = (regs->regs[52]) & 0xffffffff;
- printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[53]) >> 32;
- al = (regs->regs[53]) & 0xffffffff;
- bh = (regs->regs[54]) >> 32;
- bl = (regs->regs[54]) & 0xffffffff;
- ch = (regs->regs[55]) >> 32;
- cl = (regs->regs[55]) & 0xffffffff;
- printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[56]) >> 32;
- al = (regs->regs[56]) & 0xffffffff;
- bh = (regs->regs[57]) >> 32;
- bl = (regs->regs[57]) & 0xffffffff;
- ch = (regs->regs[58]) >> 32;
- cl = (regs->regs[58]) & 0xffffffff;
- printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[59]) >> 32;
- al = (regs->regs[59]) & 0xffffffff;
- bh = (regs->regs[60]) >> 32;
- bl = (regs->regs[60]) & 0xffffffff;
- ch = (regs->regs[61]) >> 32;
- cl = (regs->regs[61]) & 0xffffffff;
- printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[62]) >> 32;
- al = (regs->regs[62]) & 0xffffffff;
- bh = (regs->tregs[0]) >> 32;
- bl = (regs->tregs[0]) & 0xffffffff;
- ch = (regs->tregs[1]) >> 32;
- cl = (regs->tregs[1]) & 0xffffffff;
- printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->tregs[2]) >> 32;
- al = (regs->tregs[2]) & 0xffffffff;
- bh = (regs->tregs[3]) >> 32;
- bl = (regs->tregs[3]) & 0xffffffff;
- ch = (regs->tregs[4]) >> 32;
- cl = (regs->tregs[4]) & 0xffffffff;
- printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->tregs[5]) >> 32;
- al = (regs->tregs[5]) & 0xffffffff;
- bh = (regs->tregs[6]) >> 32;
- bl = (regs->tregs[6]) & 0xffffffff;
- ch = (regs->tregs[7]) >> 32;
- cl = (regs->tregs[7]) & 0xffffffff;
- printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- /*
- * If we're in kernel mode, dump the stack too..
- */
- if (!user_mode(regs)) {
- void show_stack(struct task_struct *tsk, unsigned long *sp);
- unsigned long sp = regs->regs[15] & 0xffffffff;
- struct task_struct *tsk = get_current();
-
- tsk->thread.kregs = regs;
-
- show_stack(tsk, (unsigned long *)sp);
- }
-}
-
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(struct task_struct *tsk)
-{
- /*
- * See arch/sparc/kernel/process.c for the precedent for doing
- * this -- RPC.
- *
- * The SH-5 FPU save/restore approach relies on
- * last_task_used_math pointing to a live task_struct. When
- * another task tries to use the FPU for the 1st time, the FPUDIS
- * trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
- * existing FPU state to the FP regs field within
- * last_task_used_math before re-loading the new task's FPU state
- * (or initialising it if the FPU has been used before). So if
- * last_task_used_math is stale, and its page has already been
- * re-allocated for another use, the consequences are rather
- * grim. Unless we null it here, there is no other path through
- * which it would get safely nulled.
- */
-#ifdef CONFIG_SH_FPU
- if (last_task_used_math == tsk)
- last_task_used_math = NULL;
-#endif
-}
-
-void flush_thread(void)
-{
-
- /* Called by fs/exec.c (setup_new_exec) to remove traces of a
- * previously running executable. */
-#ifdef CONFIG_SH_FPU
- if (last_task_used_math == current) {
- last_task_used_math = NULL;
- }
- /* Force FPU state to be reinitialised after exec */
- clear_used_math();
-#endif
-
- /* if we are a kernel thread, about to change to user thread,
- * update kreg
- */
- if(current->thread.kregs==&fake_swapper_regs) {
- current->thread.kregs =
- ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
- current->thread.uregs = current->thread.kregs;
- }
-}
-
-void release_thread(struct task_struct *dead_task)
-{
- /* do nothing */
-}
-
-/* Fill in the fpu structure for a core dump.. */
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
-{
-#ifdef CONFIG_SH_FPU
- int fpvalid;
- struct task_struct *tsk = current;
-
- fpvalid = !!tsk_used_math(tsk);
- if (fpvalid) {
- if (current == last_task_used_math) {
- enable_fpu();
- save_fpu(tsk);
- disable_fpu();
- last_task_used_math = 0;
- regs->sr |= SR_FD;
- }
-
- memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
- }
-
- return fpvalid;
-#else
- return 0; /* Task didn't use the fpu at all. */
-#endif
-}
-EXPORT_SYMBOL(dump_fpu);
-
-asmlinkage void ret_from_fork(void);
-asmlinkage void ret_from_kernel_thread(void);
-
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p)
-{
- struct pt_regs *childregs;
-
-#ifdef CONFIG_SH_FPU
- /* can't happen for a kernel thread */
- if (last_task_used_math == current) {
- enable_fpu();
- save_fpu(current);
- disable_fpu();
- last_task_used_math = NULL;
- current_pt_regs()->sr |= SR_FD;
- }
-#endif
- /* Copy from sh version */
- childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
- p->thread.sp = (unsigned long) childregs;
-
- if (unlikely(p->flags & PF_KTHREAD)) {
- memset(childregs, 0, sizeof(struct pt_regs));
- childregs->regs[2] = (unsigned long)arg;
- childregs->regs[3] = (unsigned long)usp;
- childregs->sr = (1 << 30); /* not user_mode */
- childregs->sr |= SR_FD; /* Invalidate FPU flag */
- p->thread.pc = (unsigned long) ret_from_kernel_thread;
- return 0;
- }
- *childregs = *current_pt_regs();
-
- /*
- * Sign extend the edited stack.
- * Note that thread.pc and thread.pc will stay
- * 32-bit wide and context switch must take care
- * of NEFF sign extension.
- */
- if (usp)
- childregs->regs[15] = neff_sign_extend(usp);
- p->thread.uregs = childregs;
-
- childregs->regs[9] = 0; /* Set return value for child */
- childregs->sr |= SR_FD; /* Invalidate FPU flag */
-
- p->thread.pc = (unsigned long) ret_from_fork;
-
- return 0;
-}
-
-#ifdef CONFIG_FRAME_POINTER
-static int in_sh64_switch_to(unsigned long pc)
-{
- extern char __sh64_switch_to_end;
- /* For a sleeping task, the PC is somewhere in the middle of the function,
- so we don't have to worry about masking the LSB off */
- return (pc >= (unsigned long) sh64_switch_to) &&
- (pc < (unsigned long) &__sh64_switch_to_end);
-}
-#endif
-
-unsigned long get_wchan(struct task_struct *p)
-{
- unsigned long pc;
-
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
-
- /*
- * The same comment as on the Alpha applies here, too ...
- */
- pc = thread_saved_pc(p);
-
-#ifdef CONFIG_FRAME_POINTER
- if (in_sh64_switch_to(pc)) {
- unsigned long schedule_fp;
- unsigned long sh64_switch_to_fp;
- unsigned long schedule_caller_pc;
-
- sh64_switch_to_fp = (long) p->thread.sp;
- /* r14 is saved at offset 4 in the sh64_switch_to frame */
- schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
-
- /* and the caller of 'schedule' is (currently!) saved at offset 24
- in the frame of schedule (from disasm) */
- schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
- return schedule_caller_pc;
- }
-#endif
- return pc;
-}
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index d5052c30a0e9..64bfb714943e 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -25,7 +25,6 @@
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
deleted file mode 100644
index 11085e48eaa6..000000000000
--- a/arch/sh/kernel/ptrace_64.c
+++ /dev/null
@@ -1,576 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/ptrace_64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2008 Paul Mundt
- *
- * Started from SH3/4 version:
- * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
- *
- * Original x86 implementation:
- * By Ross Biro 1/23/92
- * edited by Linus Torvalds
- */
-#include <linux/kernel.h>
-#include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/signal.h>
-#include <linux/syscalls.h>
-#include <linux/audit.h>
-#include <linux/seccomp.h>
-#include <linux/tracehook.h>
-#include <linux/elf.h>
-#include <linux/regset.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/mmu_context.h>
-#include <asm/syscalls.h>
-#include <asm/fpu.h>
-#include <asm/traps.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/syscalls.h>
-
-/* This mask defines the bits of the SR which the user is not allowed to
- change, which are everything except S, Q, M, PR, SZ, FR. */
-#define SR_MASK (0xffff8cfd)
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
-
-/*
- * This routine will get a word from the user area in the process kernel stack.
- */
-static inline int get_stack_long(struct task_struct *task, int offset)
-{
- unsigned char *stack;
-
- stack = (unsigned char *)(task->thread.uregs);
- stack += offset;
- return (*((int *)stack));
-}
-
-static inline unsigned long
-get_fpu_long(struct task_struct *task, unsigned long addr)
-{
- unsigned long tmp;
- struct pt_regs *regs;
- regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
-
- if (!tsk_used_math(task)) {
- if (addr == offsetof(struct user_fpu_struct, fpscr)) {
- tmp = FPSCR_INIT;
- } else {
- tmp = 0xffffffffUL; /* matches initial value in fpu.c */
- }
- return tmp;
- }
-
- if (last_task_used_math == task) {
- enable_fpu();
- save_fpu(task);
- disable_fpu();
- last_task_used_math = 0;
- regs->sr |= SR_FD;
- }
-
- tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
- return tmp;
-}
-
-/*
- * This routine will put a word into the user area in the process kernel stack.
- */
-static inline int put_stack_long(struct task_struct *task, int offset,
- unsigned long data)
-{
- unsigned char *stack;
-
- stack = (unsigned char *)(task->thread.uregs);
- stack += offset;
- *(unsigned long *) stack = data;
- return 0;
-}
-
-static inline int
-put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
-{
- struct pt_regs *regs;
-
- regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
-
- if (!tsk_used_math(task)) {
- init_fpu(task);
- } else if (last_task_used_math == task) {
- enable_fpu();
- save_fpu(task);
- disable_fpu();
- last_task_used_math = 0;
- regs->sr |= SR_FD;
- }
-
- ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
- return 0;
-}
-
-void user_enable_single_step(struct task_struct *child)
-{
- struct pt_regs *regs = child->thread.uregs;
-
- regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
-
- set_tsk_thread_flag(child, TIF_SINGLESTEP);
-}
-
-void user_disable_single_step(struct task_struct *child)
-{
- struct pt_regs *regs = child->thread.uregs;
-
- regs->sr &= ~SR_SSTEP;
-
- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-}
-
-static int genregs_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- const struct pt_regs *regs = task_pt_regs(target);
- int ret;
-
- /* PC, SR, SYSCALL */
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &regs->pc,
- 0, 3 * sizeof(unsigned long long));
-
- /* R1 -> R63 */
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- regs->regs,
- offsetof(struct pt_regs, regs[0]),
- 63 * sizeof(unsigned long long));
- /* TR0 -> TR7 */
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- regs->tregs,
- offsetof(struct pt_regs, tregs[0]),
- 8 * sizeof(unsigned long long));
-
- if (!ret)
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- sizeof(struct pt_regs), -1);
-
- return ret;
-}
-
-static int genregs_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- struct pt_regs *regs = task_pt_regs(target);
- int ret;
-
- /* PC, SR, SYSCALL */
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &regs->pc,
- 0, 3 * sizeof(unsigned long long));
-
- /* R1 -> R63 */
- if (!ret && count > 0)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- regs->regs,
- offsetof(struct pt_regs, regs[0]),
- 63 * sizeof(unsigned long long));
-
- /* TR0 -> TR7 */
- if (!ret && count > 0)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- regs->tregs,
- offsetof(struct pt_regs, tregs[0]),
- 8 * sizeof(unsigned long long));
-
- if (!ret)
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- sizeof(struct pt_regs), -1);
-
- return ret;
-}
-
-#ifdef CONFIG_SH_FPU
-int fpregs_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- ret = init_fpu(target);
- if (ret)
- return ret;
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->hardfpu, 0, -1);
-}
-
-static int fpregs_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- ret = init_fpu(target);
- if (ret)
- return ret;
-
- set_stopped_child_used_math(target);
-
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->hardfpu, 0, -1);
-}
-
-static int fpregs_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- return tsk_used_math(target) ? regset->n : 0;
-}
-#endif
-
-const struct pt_regs_offset regoffset_table[] = {
- REG_OFFSET_NAME(pc),
- REG_OFFSET_NAME(sr),
- REG_OFFSET_NAME(syscall_nr),
- REGS_OFFSET_NAME(0),
- REGS_OFFSET_NAME(1),
- REGS_OFFSET_NAME(2),
- REGS_OFFSET_NAME(3),
- REGS_OFFSET_NAME(4),
- REGS_OFFSET_NAME(5),
- REGS_OFFSET_NAME(6),
- REGS_OFFSET_NAME(7),
- REGS_OFFSET_NAME(8),
- REGS_OFFSET_NAME(9),
- REGS_OFFSET_NAME(10),
- REGS_OFFSET_NAME(11),
- REGS_OFFSET_NAME(12),
- REGS_OFFSET_NAME(13),
- REGS_OFFSET_NAME(14),
- REGS_OFFSET_NAME(15),
- REGS_OFFSET_NAME(16),
- REGS_OFFSET_NAME(17),
- REGS_OFFSET_NAME(18),
- REGS_OFFSET_NAME(19),
- REGS_OFFSET_NAME(20),
- REGS_OFFSET_NAME(21),
- REGS_OFFSET_NAME(22),
- REGS_OFFSET_NAME(23),
- REGS_OFFSET_NAME(24),
- REGS_OFFSET_NAME(25),
- REGS_OFFSET_NAME(26),
- REGS_OFFSET_NAME(27),
- REGS_OFFSET_NAME(28),
- REGS_OFFSET_NAME(29),
- REGS_OFFSET_NAME(30),
- REGS_OFFSET_NAME(31),
- REGS_OFFSET_NAME(32),
- REGS_OFFSET_NAME(33),
- REGS_OFFSET_NAME(34),
- REGS_OFFSET_NAME(35),
- REGS_OFFSET_NAME(36),
- REGS_OFFSET_NAME(37),
- REGS_OFFSET_NAME(38),
- REGS_OFFSET_NAME(39),
- REGS_OFFSET_NAME(40),
- REGS_OFFSET_NAME(41),
- REGS_OFFSET_NAME(42),
- REGS_OFFSET_NAME(43),
- REGS_OFFSET_NAME(44),
- REGS_OFFSET_NAME(45),
- REGS_OFFSET_NAME(46),
- REGS_OFFSET_NAME(47),
- REGS_OFFSET_NAME(48),
- REGS_OFFSET_NAME(49),
- REGS_OFFSET_NAME(50),
- REGS_OFFSET_NAME(51),
- REGS_OFFSET_NAME(52),
- REGS_OFFSET_NAME(53),
- REGS_OFFSET_NAME(54),
- REGS_OFFSET_NAME(55),
- REGS_OFFSET_NAME(56),
- REGS_OFFSET_NAME(57),
- REGS_OFFSET_NAME(58),
- REGS_OFFSET_NAME(59),
- REGS_OFFSET_NAME(60),
- REGS_OFFSET_NAME(61),
- REGS_OFFSET_NAME(62),
- REGS_OFFSET_NAME(63),
- TREGS_OFFSET_NAME(0),
- TREGS_OFFSET_NAME(1),
- TREGS_OFFSET_NAME(2),
- TREGS_OFFSET_NAME(3),
- TREGS_OFFSET_NAME(4),
- TREGS_OFFSET_NAME(5),
- TREGS_OFFSET_NAME(6),
- TREGS_OFFSET_NAME(7),
- REG_OFFSET_END,
-};
-
-/*
- * These are our native regset flavours.
- */
-enum sh_regset {
- REGSET_GENERAL,
-#ifdef CONFIG_SH_FPU
- REGSET_FPU,
-#endif
-};
-
-static const struct user_regset sh_regsets[] = {
- /*
- * Format is:
- * PC, SR, SYSCALL,
- * R1 --> R63,
- * TR0 --> TR7,
- */
- [REGSET_GENERAL] = {
- .core_note_type = NT_PRSTATUS,
- .n = ELF_NGREG,
- .size = sizeof(long long),
- .align = sizeof(long long),
- .get = genregs_get,
- .set = genregs_set,
- },
-
-#ifdef CONFIG_SH_FPU
- [REGSET_FPU] = {
- .core_note_type = NT_PRFPREG,
- .n = sizeof(struct user_fpu_struct) /
- sizeof(long long),
- .size = sizeof(long long),
- .align = sizeof(long long),
- .get = fpregs_get,
- .set = fpregs_set,
- .active = fpregs_active,
- },
-#endif
-};
-
-static const struct user_regset_view user_sh64_native_view = {
- .name = "sh64",
- .e_machine = EM_SH,
- .regsets = sh_regsets,
- .n = ARRAY_SIZE(sh_regsets),
-};
-
-const struct user_regset_view *task_user_regset_view(struct task_struct *task)
-{
- return &user_sh64_native_view;
-}
-
-long arch_ptrace(struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
-{
- int ret;
- unsigned long __user *datap = (unsigned long __user *) data;
-
- switch (request) {
- /* read the word at location addr in the USER area. */
- case PTRACE_PEEKUSR: {
- unsigned long tmp;
-
- ret = -EIO;
- if ((addr & 3) || addr < 0)
- break;
-
- if (addr < sizeof(struct pt_regs))
- tmp = get_stack_long(child, addr);
- else if ((addr >= offsetof(struct user, fpu)) &&
- (addr < offsetof(struct user, u_fpvalid))) {
- unsigned long index;
- ret = init_fpu(child);
- if (ret)
- break;
- index = addr - offsetof(struct user, fpu);
- tmp = get_fpu_long(child, index);
- } else if (addr == offsetof(struct user, u_fpvalid)) {
- tmp = !!tsk_used_math(child);
- } else {
- break;
- }
- ret = put_user(tmp, datap);
- break;
- }
-
- case PTRACE_POKEUSR:
- /* write the word at location addr in the USER area. We must
- disallow any changes to certain SR bits or u_fpvalid, since
- this could crash the kernel or result in a security
- loophole. */
- ret = -EIO;
- if ((addr & 3) || addr < 0)
- break;
-
- if (addr < sizeof(struct pt_regs)) {
- /* Ignore change of top 32 bits of SR */
- if (addr == offsetof (struct pt_regs, sr)+4)
- {
- ret = 0;
- break;
- }
- /* If lower 32 bits of SR, ignore non-user bits */
- if (addr == offsetof (struct pt_regs, sr))
- {
- long cursr = get_stack_long(child, addr);
- data &= ~(SR_MASK);
- data |= (cursr & SR_MASK);
- }
- ret = put_stack_long(child, addr, data);
- }
- else if ((addr >= offsetof(struct user, fpu)) &&
- (addr < offsetof(struct user, u_fpvalid))) {
- unsigned long index;
- ret = init_fpu(child);
- if (ret)
- break;
- index = addr - offsetof(struct user, fpu);
- ret = put_fpu_long(child, index, data);
- }
- break;
-
- case PTRACE_GETREGS:
- return copy_regset_to_user(child, &user_sh64_native_view,
- REGSET_GENERAL,
- 0, sizeof(struct pt_regs),
- datap);
- case PTRACE_SETREGS:
- return copy_regset_from_user(child, &user_sh64_native_view,
- REGSET_GENERAL,
- 0, sizeof(struct pt_regs),
- datap);
-#ifdef CONFIG_SH_FPU
- case PTRACE_GETFPREGS:
- return copy_regset_to_user(child, &user_sh64_native_view,
- REGSET_FPU,
- 0, sizeof(struct user_fpu_struct),
- datap);
- case PTRACE_SETFPREGS:
- return copy_regset_from_user(child, &user_sh64_native_view,
- REGSET_FPU,
- 0, sizeof(struct user_fpu_struct),
- datap);
-#endif
- default:
- ret = ptrace_request(child, request, addr, data);
- break;
- }
-
- return ret;
-}
-
-asmlinkage int sh64_ptrace(long request, long pid,
- unsigned long addr, unsigned long data)
-{
-#define WPC_DBRMODE 0x0d104008
- static unsigned long first_call;
-
- if (!test_and_set_bit(0, &first_call)) {
- /* Set WPC.DBRMODE to 0. This makes all debug events get
- * delivered through RESVEC, i.e. into the handlers in entry.S.
- * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
- * would normally be left set to 1, which makes debug events get
- * delivered through DBRVEC, i.e. into the remote gdb's
- * handlers. This prevents ptrace getting them, and confuses
- * the remote gdb.) */
- printk("DBRMODE set to 0 to permit native debugging\n");
- poke_real_address_q(WPC_DBRMODE, 0);
- }
-
- return sys_ptrace(request, pid, addr, data);
-}
-
-asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
-{
- long long ret = 0;
-
- secure_computing_strict(regs->regs[9]);
-
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- /*
- * Tracing decided this syscall should not happen.
- * We'll return a bogus call number to get an ENOSYS
- * error, but leave the original number in regs->regs[0].
- */
- ret = -1LL;
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_enter(regs, regs->regs[9]);
-
- audit_syscall_entry(regs->regs[1], regs->regs[2], regs->regs[3],
- regs->regs[4], regs->regs[5]);
-
- return ret ?: regs->regs[9];
-}
-
-asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
-{
- int step;
-
- audit_syscall_exit(regs);
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->regs[9]);
-
- step = test_thread_flag(TIF_SINGLESTEP);
- if (step || test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, step);
-}
-
-/* Called with interrupts disabled */
-asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
-{
- /* This is called after a single step exception (DEBUGSS).
- There is no need to change the PC, as it is a post-execution
- exception, as entry.S does not do anything to the PC for DEBUGSS.
- We need to clear the Single Step setting in SR to avoid
- continually stepping. */
- local_irq_enable();
- regs->sr &= ~SR_SSTEP;
- force_sig(SIGTRAP);
-}
-
-/* Called with interrupts disabled */
-BUILD_TRAP_HANDLER(breakpoint)
-{
- TRAP_HANDLER_DECL;
-
- /* We need to forward step the PC, to counteract the backstep done
- in signal.c. */
- local_irq_enable();
- force_sig(SIGTRAP);
- regs->pc += 4;
-}
-
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
- */
-void ptrace_disable(struct task_struct *child)
-{
- user_disable_single_step(child);
-}
diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c
index 11001a8a5fe0..5c33f036418b 100644
--- a/arch/sh/kernel/reboot.c
+++ b/arch/sh/kernel/reboot.c
@@ -4,9 +4,7 @@
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/module.h>
-#ifdef CONFIG_SUPERH32
#include <asm/watchdog.h>
-#endif
#include <asm/addrspace.h>
#include <asm/reboot.h>
#include <asm/tlbflush.h>
@@ -15,13 +13,11 @@
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
-#ifdef CONFIG_SUPERH32
static void watchdog_trigger_immediate(void)
{
sh_wdt_write_cnt(0xFF);
sh_wdt_write_csr(0xC2);
}
-#endif
static void native_machine_restart(char * __unused)
{
@@ -33,10 +29,8 @@ static void native_machine_restart(char * __unused)
/* Address error with SR.BL=1 first. */
trigger_address_error();
-#ifdef CONFIG_SUPERH32
/* If that fails or is unsupported, go for the watchdog next. */
watchdog_trigger_immediate();
-#endif
/*
* Give up and sleep.
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 282774472603..5858936cb431 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -38,6 +38,13 @@ DECLARE_EXPORT(__ashlsi3);
DECLARE_EXPORT(__lshrsi3_r0);
DECLARE_EXPORT(__ashrsi3_r0);
DECLARE_EXPORT(__ashlsi3_r0);
+
+DECLARE_EXPORT(__ashiftrt_r4_0);
+DECLARE_EXPORT(__ashiftrt_r4_1);
+DECLARE_EXPORT(__ashiftrt_r4_2);
+DECLARE_EXPORT(__ashiftrt_r4_3);
+DECLARE_EXPORT(__ashiftrt_r4_4);
+DECLARE_EXPORT(__ashiftrt_r4_5);
DECLARE_EXPORT(__ashiftrt_r4_6);
DECLARE_EXPORT(__ashiftrt_r4_7);
DECLARE_EXPORT(__ashiftrt_r4_8);
@@ -48,13 +55,23 @@ DECLARE_EXPORT(__ashiftrt_r4_12);
DECLARE_EXPORT(__ashiftrt_r4_13);
DECLARE_EXPORT(__ashiftrt_r4_14);
DECLARE_EXPORT(__ashiftrt_r4_15);
+DECLARE_EXPORT(__ashiftrt_r4_16);
+DECLARE_EXPORT(__ashiftrt_r4_17);
+DECLARE_EXPORT(__ashiftrt_r4_18);
+DECLARE_EXPORT(__ashiftrt_r4_19);
DECLARE_EXPORT(__ashiftrt_r4_20);
DECLARE_EXPORT(__ashiftrt_r4_21);
DECLARE_EXPORT(__ashiftrt_r4_22);
DECLARE_EXPORT(__ashiftrt_r4_23);
DECLARE_EXPORT(__ashiftrt_r4_24);
+DECLARE_EXPORT(__ashiftrt_r4_25);
+DECLARE_EXPORT(__ashiftrt_r4_26);
DECLARE_EXPORT(__ashiftrt_r4_27);
+DECLARE_EXPORT(__ashiftrt_r4_28);
+DECLARE_EXPORT(__ashiftrt_r4_29);
DECLARE_EXPORT(__ashiftrt_r4_30);
+DECLARE_EXPORT(__ashiftrt_r4_31);
+DECLARE_EXPORT(__ashiftrt_r4_32);
DECLARE_EXPORT(__movstr);
DECLARE_EXPORT(__movstrSI8);
DECLARE_EXPORT(__movstrSI12);
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
deleted file mode 100644
index 9de17065afb4..000000000000
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ /dev/null
@@ -1,51 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/sh_ksyms_64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- */
-#include <linux/rwsem.h>
-#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-#include <linux/screen_info.h>
-#include <asm/cacheflush.h>
-#include <asm/processor.h>
-#include <linux/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/irq.h>
-
-EXPORT_SYMBOL(__put_user_asm_b);
-EXPORT_SYMBOL(__put_user_asm_w);
-EXPORT_SYMBOL(__put_user_asm_l);
-EXPORT_SYMBOL(__put_user_asm_q);
-EXPORT_SYMBOL(__get_user_asm_b);
-EXPORT_SYMBOL(__get_user_asm_w);
-EXPORT_SYMBOL(__get_user_asm_l);
-EXPORT_SYMBOL(__get_user_asm_q);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(__const_udelay);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcpy);
-
-/* Ugh. These come in from libgcc.a at link time. */
-#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
-
-DECLARE_EXPORT(__sdivsi3);
-DECLARE_EXPORT(__sdivsi3_1);
-DECLARE_EXPORT(__sdivsi3_2);
-DECLARE_EXPORT(__udivsi3);
-DECLARE_EXPORT(__div_table);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 24473fa6c3b6..a0fbb8427b39 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -28,7 +28,6 @@
#include <linux/tracehook.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
deleted file mode 100644
index b9aaa9266b34..000000000000
--- a/arch/sh/kernel/signal_64.c
+++ /dev/null
@@ -1,567 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/signal_64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2008 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/personality.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/tracehook.h>
-#include <asm/ucontext.h>
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
-#include <asm/fpu.h>
-
-#define REG_RET 9
-#define REG_ARG1 2
-#define REG_ARG2 3
-#define REG_ARG3 4
-#define REG_SP 15
-#define REG_PR 18
-#define REF_REG_RET regs->regs[REG_RET]
-#define REF_REG_SP regs->regs[REG_SP]
-#define DEREF_REG_PR regs->regs[REG_PR]
-
-#define DEBUG_SIG 0
-
-static void
-handle_signal(struct ksignal *ksig, struct pt_regs *regs);
-
-static inline void
-handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
-{
- /* If we're not from a syscall, bail out */
- if (regs->syscall_nr < 0)
- return;
-
- /* check for system call restart.. */
- switch (regs->regs[REG_RET]) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- no_system_call_restart:
- regs->regs[REG_RET] = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(sa->sa_flags & SA_RESTART))
- goto no_system_call_restart;
- /* fallthrough */
- case -ERESTARTNOINTR:
- /* Decode syscall # */
- regs->regs[REG_RET] = regs->syscall_nr;
- regs->pc -= 4;
- break;
- }
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- *
- * Note that we go through the signals twice: once to check the signals that
- * the kernel can handle, and then we build all the user-level signal handling
- * stack-frames in one go after that.
- */
-static void do_signal(struct pt_regs *regs)
-{
- struct ksignal ksig;
-
- /*
- * We want the common case to go fast, which
- * is why we may in certain cases get here from
- * kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
- return;
-
- if (get_signal(&ksig)) {
- handle_syscall_restart(regs, &ksig.ka.sa);
-
- /* Whee! Actually deliver the signal. */
- handle_signal(&ksig, regs);
- return;
- }
-
- /* Did we come from a system call? */
- if (regs->syscall_nr >= 0) {
- /* Restart the system call - no handlers present */
- switch (regs->regs[REG_RET]) {
- case -ERESTARTNOHAND:
- case -ERESTARTSYS:
- case -ERESTARTNOINTR:
- /* Decode Syscall # */
- regs->regs[REG_RET] = regs->syscall_nr;
- regs->pc -= 4;
- break;
-
- case -ERESTART_RESTARTBLOCK:
- regs->regs[REG_RET] = __NR_restart_syscall;
- regs->pc -= 4;
- break;
- }
- }
-
- /* No signal to deliver -- put the saved sigmask back */
- restore_saved_sigmask();
-}
-
-/*
- * Do a signal return; undo the signal stack.
- */
-struct sigframe {
- struct sigcontext sc;
- unsigned long extramask[_NSIG_WORDS-1];
- long long retcode[2];
-};
-
-struct rt_sigframe {
- struct siginfo __user *pinfo;
- void *puc;
- struct siginfo info;
- struct ucontext uc;
- long long retcode[2];
-};
-
-#ifdef CONFIG_SH_FPU
-static inline int
-restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- int err = 0;
- int fpvalid;
-
- err |= __get_user (fpvalid, &sc->sc_fpvalid);
- conditional_used_math(fpvalid);
- if (! fpvalid)
- return err;
-
- if (current == last_task_used_math) {
- last_task_used_math = NULL;
- regs->sr |= SR_FD;
- }
-
- err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
- (sizeof(long long) * 32) + (sizeof(int) * 1));
-
- return err;
-}
-
-static inline int
-setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- int err = 0;
- int fpvalid;
-
- fpvalid = !!used_math();
- err |= __put_user(fpvalid, &sc->sc_fpvalid);
- if (! fpvalid)
- return err;
-
- if (current == last_task_used_math) {
- enable_fpu();
- save_fpu(current);
- disable_fpu();
- last_task_used_math = NULL;
- regs->sr |= SR_FD;
- }
-
- err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
- (sizeof(long long) * 32) + (sizeof(int) * 1));
- clear_used_math();
-
- return err;
-}
-#else
-static inline int
-restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- return 0;
-}
-static inline int
-setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- return 0;
-}
-#endif
-
-static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
-{
- unsigned int err = 0;
- unsigned long long current_sr, new_sr;
-#define SR_MASK 0xffff8cfd
-
-#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
-
- COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
- COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
- COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
- COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
- COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
- COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
- COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
- COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
- COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
- COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
- COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
- COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
- COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
- COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
- COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
- COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
- COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
- COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
-
- /* Prevent the signal handler manipulating SR in a way that can
- crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
- modified */
- current_sr = regs->sr;
- err |= __get_user(new_sr, &sc->sc_sr);
- regs->sr &= SR_MASK;
- regs->sr |= (new_sr & ~SR_MASK);
-
- COPY(pc);
-
-#undef COPY
-
- /* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
- * has been restored above.) */
- err |= restore_sigcontext_fpu(regs, sc);
-
- regs->syscall_nr = -1; /* disable syscall checks */
- err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
- return err;
-}
-
-asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
- unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs * regs)
-{
- struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
- sigset_t set;
- long long ret;
-
- /* Always make any pending restarted system calls return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- if (!access_ok(frame, sizeof(*frame)))
- goto badframe;
-
- if (__get_user(set.sig[0], &frame->sc.oldmask)
- || (_NSIG_WORDS > 1
- && __copy_from_user(&set.sig[1], &frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(regs, &frame->sc, &ret))
- goto badframe;
- regs->pc -= 4;
-
- return (int) ret;
-
-badframe:
- force_sig(SIGSEGV);
- return 0;
-}
-
-asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
- unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs * regs)
-{
- struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
- sigset_t set;
- long long ret;
-
- /* Always make any pending restarted system calls return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- if (!access_ok(frame, sizeof(*frame)))
- goto badframe;
-
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
- goto badframe;
- regs->pc -= 4;
-
- if (restore_altstack(&frame->uc.uc_stack))
- goto badframe;
-
- return (int) ret;
-
-badframe:
- force_sig(SIGSEGV);
- return 0;
-}
-
-/*
- * Set up a signal frame.
- */
-static int
-setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
- unsigned long mask)
-{
- int err = 0;
-
- /* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
- err |= setup_sigcontext_fpu(regs, sc);
-
-#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
-
- COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
- COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
- COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
- COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
- COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
- COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
- COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
- COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
- COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
- COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
- COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
- COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
- COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
- COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
- COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
- COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
- COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
- COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
- COPY(sr); COPY(pc);
-
-#undef COPY
-
- err |= __put_user(mask, &sc->oldmask);
-
- return err;
-}
-
-/*
- * Determine which stack to use..
- */
-static inline void __user *
-get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
-{
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
- sp = current->sas_ss_sp + current->sas_ss_size;
-
- return (void __user *)((sp - frame_size) & -8ul);
-}
-
-void sa_default_restorer(void); /* See comments below */
-void sa_default_rt_restorer(void); /* See comments below */
-
-static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
-{
- struct sigframe __user *frame;
- int err = 0, sig = ksig->sig;
- int signal;
-
- frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame));
-
- if (!access_ok(frame, sizeof(*frame)))
- return -EFAULT;
-
- err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
-
- /* Give up earlier as i386, in case */
- if (err)
- return -EFAULT;
-
- if (_NSIG_WORDS > 1) {
- err |= __copy_to_user(frame->extramask, &set->sig[1],
- sizeof(frame->extramask)); }
-
- /* Give up earlier as i386, in case */
- if (err)
- return -EFAULT;
-
- /* Set up to return from userspace. If provided, use a stub
- already in userspace. */
- if (ksig->ka.sa.sa_flags & SA_RESTORER) {
- /*
- * On SH5 all edited pointers are subject to NEFF
- */
- DEREF_REG_PR = neff_sign_extend((unsigned long)
- ksig->ka->sa.sa_restorer | 0x1);
- } else {
- /*
- * Different approach on SH5.
- * . Endianness independent asm code gets placed in entry.S .
- * This is limited to four ASM instructions corresponding
- * to two long longs in size.
- * . err checking is done on the else branch only
- * . flush_icache_range() is called upon __put_user() only
- * . all edited pointers are subject to NEFF
- * . being code, linker turns ShMedia bit on, always
- * dereference index -1.
- */
- DEREF_REG_PR = neff_sign_extend((unsigned long)
- frame->retcode | 0x01);
-
- if (__copy_to_user(frame->retcode,
- (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
- return -EFAULT;
-
- /* Cohere the trampoline with the I-cache. */
- flush_cache_sigtramp(DEREF_REG_PR-1);
- }
-
- /*
- * Set up registers for signal handler.
- * All edited pointers are subject to NEFF.
- */
- regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
- regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
-
- /* FIXME:
- The glibc profiling support for SH-5 needs to be passed a sigcontext
- so it can retrieve the PC. At some point during 2003 the glibc
- support was changed to receive the sigcontext through the 2nd
- argument, but there are still versions of libc.so in use that use
- the 3rd argument. Until libc.so is stabilised, pass the sigcontext
- through both 2nd and 3rd arguments.
- */
-
- regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
- regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
-
- regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
-
- /* Broken %016Lx */
- pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
- sig, current->comm, current->pid, frame,
- regs->pc >> 32, regs->pc & 0xffffffff,
- DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
-
- return 0;
-}
-
-static int setup_rt_frame(struct ksignal *kig, sigset_t *set,
- struct pt_regs *regs)
-{
- struct rt_sigframe __user *frame;
- int err = 0, sig = ksig->sig;
-
- frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame));
-
- if (!access_ok(frame, sizeof(*frame)))
- return -EFAULT;
-
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
- err |= copy_siginfo_to_user(&frame->info, &ksig->info);
-
- /* Give up earlier as i386, in case */
- if (err)
- return -EFAULT;
-
- /* Create the ucontext. */
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __save_altstack(&frame->uc.uc_stack, regs->regs[REG_SP]);
- err |= setup_sigcontext(&frame->uc.uc_mcontext,
- regs, set->sig[0]);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-
- /* Give up earlier as i386, in case */
- if (err)
- return -EFAULT;
-
- /* Set up to return from userspace. If provided, use a stub
- already in userspace. */
- if (ksig->ka.sa.sa_flags & SA_RESTORER) {
- /*
- * On SH5 all edited pointers are subject to NEFF
- */
- DEREF_REG_PR = neff_sign_extend((unsigned long)
- ksig->ka.sa.sa_restorer | 0x1);
- } else {
- /*
- * Different approach on SH5.
- * . Endianness independent asm code gets placed in entry.S .
- * This is limited to four ASM instructions corresponding
- * to two long longs in size.
- * . err checking is done on the else branch only
- * . flush_icache_range() is called upon __put_user() only
- * . all edited pointers are subject to NEFF
- * . being code, linker turns ShMedia bit on, always
- * dereference index -1.
- */
- DEREF_REG_PR = neff_sign_extend((unsigned long)
- frame->retcode | 0x01);
-
- if (__copy_to_user(frame->retcode,
- (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
- return -EFAULT;
-
- /* Cohere the trampoline with the I-cache. */
- flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
- }
-
- /*
- * Set up registers for signal handler.
- * All edited pointers are subject to NEFF.
- */
- regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
- regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
- regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
- regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
- regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
-
- pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
- sig, current->comm, current->pid, frame,
- regs->pc >> 32, regs->pc & 0xffffffff,
- DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
-
- return 0;
-}
-
-/*
- * OK, we're invoking a handler
- */
-static void
-handle_signal(struct ksignal *ksig, struct pt_regs *regs)
-{
- sigset_t *oldset = sigmask_to_save();
- int ret;
-
- /* Set up the stack frame */
- if (ksig->ka.sa.sa_flags & SA_SIGINFO)
- ret = setup_rt_frame(ksig, oldset, regs);
- else
- ret = setup_frame(ksig, oldset, regs);
-
- signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
-}
-
-asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
-{
- if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs);
-
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- }
-}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index f8afc014e084..a5a7b33ed81a 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -69,10 +69,10 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
if (addr + len < addr)
return -EFAULT;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma (current->mm, addr);
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
@@ -91,6 +91,6 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
if (op & CACHEFLUSH_I)
flush_icache_range(addr, addr+len);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return 0;
}
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
deleted file mode 100644
index 1bcb86f0b728..000000000000
--- a/arch/sh/kernel/syscalls_64.S
+++ /dev/null
@@ -1,419 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * arch/sh/kernel/syscalls_64.S
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2004 - 2007 Paul Mundt
- * Copyright (C) 2003, 2004 Richard Curnow
- */
-
-#include <linux/sys.h>
-
- .section .data, "aw"
- .balign 32
-
-/*
- * System calls jump table
- */
- .globl sys_call_table
-sys_call_table:
- .long sys_restart_syscall /* 0 - old "setup()" system call */
- .long sys_exit
- .long sys_fork
- .long sys_read
- .long sys_write
- .long sys_open /* 5 */
- .long sys_close
- .long sys_waitpid
- .long sys_creat
- .long sys_link
- .long sys_unlink /* 10 */
- .long sys_execve
- .long sys_chdir
- .long sys_time
- .long sys_mknod
- .long sys_chmod /* 15 */
- .long sys_lchown16
- .long sys_ni_syscall /* old break syscall holder */
- .long sys_stat
- .long sys_lseek
- .long sys_getpid /* 20 */
- .long sys_mount
- .long sys_oldumount
- .long sys_setuid16
- .long sys_getuid16
- .long sys_stime /* 25 */
- .long sh64_ptrace
- .long sys_alarm
- .long sys_fstat
- .long sys_pause
- .long sys_utime /* 30 */
- .long sys_ni_syscall /* old stty syscall holder */
- .long sys_ni_syscall /* old gtty syscall holder */
- .long sys_access
- .long sys_nice
- .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
- .long sys_sync
- .long sys_kill
- .long sys_rename
- .long sys_mkdir
- .long sys_rmdir /* 40 */
- .long sys_dup
- .long sys_pipe
- .long sys_times
- .long sys_ni_syscall /* old prof syscall holder */
- .long sys_brk /* 45 */
- .long sys_setgid16
- .long sys_getgid16
- .long sys_signal
- .long sys_geteuid16
- .long sys_getegid16 /* 50 */
- .long sys_acct
- .long sys_umount /* recycled never used phys( */
- .long sys_ni_syscall /* old lock syscall holder */
- .long sys_ioctl
- .long sys_fcntl /* 55 */
- .long sys_ni_syscall /* old mpx syscall holder */
- .long sys_setpgid
- .long sys_ni_syscall /* old ulimit syscall holder */
- .long sys_ni_syscall /* sys_olduname */
- .long sys_umask /* 60 */
- .long sys_chroot
- .long sys_ustat
- .long sys_dup2
- .long sys_getppid
- .long sys_getpgrp /* 65 */
- .long sys_setsid
- .long sys_sigaction
- .long sys_sgetmask
- .long sys_ssetmask
- .long sys_setreuid16 /* 70 */
- .long sys_setregid16
- .long sys_sigsuspend
- .long sys_sigpending
- .long sys_sethostname
- .long sys_setrlimit /* 75 */
- .long sys_old_getrlimit
- .long sys_getrusage
- .long sys_gettimeofday
- .long sys_settimeofday
- .long sys_getgroups16 /* 80 */
- .long sys_setgroups16
- .long sys_ni_syscall /* sys_oldselect */
- .long sys_symlink
- .long sys_lstat
- .long sys_readlink /* 85 */
- .long sys_uselib
- .long sys_swapon
- .long sys_reboot
- .long sys_old_readdir
- .long old_mmap /* 90 */
- .long sys_munmap
- .long sys_truncate
- .long sys_ftruncate
- .long sys_fchmod
- .long sys_fchown16 /* 95 */
- .long sys_getpriority
- .long sys_setpriority
- .long sys_ni_syscall /* old profil syscall holder */
- .long sys_statfs
- .long sys_fstatfs /* 100 */
- .long sys_ni_syscall /* ioperm */
- .long sys_socketcall /* Obsolete implementation of socket syscall */
- .long sys_syslog
- .long sys_setitimer
- .long sys_getitimer /* 105 */
- .long sys_newstat
- .long sys_newlstat
- .long sys_newfstat
- .long sys_uname
- .long sys_ni_syscall /* 110 */ /* iopl */
- .long sys_vhangup
- .long sys_ni_syscall /* idle */
- .long sys_ni_syscall /* vm86old */
- .long sys_wait4
- .long sys_swapoff /* 115 */
- .long sys_sysinfo
- .long sys_ipc /* Obsolete ipc syscall implementation */
- .long sys_fsync
- .long sys_sigreturn
- .long sys_clone /* 120 */
- .long sys_setdomainname
- .long sys_newuname
- .long sys_cacheflush /* x86: sys_modify_ldt */
- .long sys_adjtimex
- .long sys_mprotect /* 125 */
- .long sys_sigprocmask
- .long sys_ni_syscall /* old "create_module" */
- .long sys_init_module
- .long sys_delete_module
- .long sys_ni_syscall /* 130: old "get_kernel_syms" */
- .long sys_quotactl
- .long sys_getpgid
- .long sys_fchdir
- .long sys_bdflush
- .long sys_sysfs /* 135 */
- .long sys_personality
- .long sys_ni_syscall /* for afs_syscall */
- .long sys_setfsuid16
- .long sys_setfsgid16
- .long sys_llseek /* 140 */
- .long sys_getdents
- .long sys_select
- .long sys_flock
- .long sys_msync
- .long sys_readv /* 145 */
- .long sys_writev
- .long sys_getsid
- .long sys_fdatasync
- .long sys_sysctl
- .long sys_mlock /* 150 */
- .long sys_munlock
- .long sys_mlockall
- .long sys_munlockall
- .long sys_sched_setparam
- .long sys_sched_getparam /* 155 */
- .long sys_sched_setscheduler
- .long sys_sched_getscheduler
- .long sys_sched_yield
- .long sys_sched_get_priority_max
- .long sys_sched_get_priority_min /* 160 */
- .long sys_sched_rr_get_interval
- .long sys_nanosleep
- .long sys_mremap
- .long sys_setresuid16
- .long sys_getresuid16 /* 165 */
- .long sys_ni_syscall /* vm86 */
- .long sys_ni_syscall /* old "query_module" */
- .long sys_poll
- .long sys_ni_syscall /* was nfsservctl */
- .long sys_setresgid16 /* 170 */
- .long sys_getresgid16
- .long sys_prctl
- .long sys_rt_sigreturn
- .long sys_rt_sigaction
- .long sys_rt_sigprocmask /* 175 */
- .long sys_rt_sigpending
- .long sys_rt_sigtimedwait
- .long sys_rt_sigqueueinfo
- .long sys_rt_sigsuspend
- .long sys_pread64 /* 180 */
- .long sys_pwrite64
- .long sys_chown16
- .long sys_getcwd
- .long sys_capget
- .long sys_capset /* 185 */
- .long sys_sigaltstack
- .long sys_sendfile
- .long sys_ni_syscall /* getpmsg */
- .long sys_ni_syscall /* putpmsg */
- .long sys_vfork /* 190 */
- .long sys_getrlimit
- .long sys_mmap2
- .long sys_truncate64
- .long sys_ftruncate64
- .long sys_stat64 /* 195 */
- .long sys_lstat64
- .long sys_fstat64
- .long sys_lchown
- .long sys_getuid
- .long sys_getgid /* 200 */
- .long sys_geteuid
- .long sys_getegid
- .long sys_setreuid
- .long sys_setregid
- .long sys_getgroups /* 205 */
- .long sys_setgroups
- .long sys_fchown
- .long sys_setresuid
- .long sys_getresuid
- .long sys_setresgid /* 210 */
- .long sys_getresgid
- .long sys_chown
- .long sys_setuid
- .long sys_setgid
- .long sys_setfsuid /* 215 */
- .long sys_setfsgid
- .long sys_pivot_root
- .long sys_mincore
- .long sys_madvise
- /* Broken-out socket family (maintain backwards compatibility in syscall
- numbering with 2.4) */
- .long sys_socket /* 220 */
- .long sys_bind
- .long sys_connect
- .long sys_listen
- .long sys_accept
- .long sys_getsockname /* 225 */
- .long sys_getpeername
- .long sys_socketpair
- .long sys_send
- .long sys_sendto
- .long sys_recv /* 230*/
- .long sys_recvfrom
- .long sys_shutdown
- .long sys_setsockopt
- .long sys_getsockopt
- .long sys_sendmsg /* 235 */
- .long sys_recvmsg
- /* Broken-out IPC family (maintain backwards compatibility in syscall
- numbering with 2.4) */
- .long sys_semop
- .long sys_semget
- .long sys_semctl
- .long sys_msgsnd /* 240 */
- .long sys_msgrcv
- .long sys_msgget
- .long sys_msgctl
- .long sys_shmat
- .long sys_shmdt /* 245 */
- .long sys_shmget
- .long sys_shmctl
- /* Rest of syscalls listed in 2.4 i386 unistd.h */
- .long sys_getdents64
- .long sys_fcntl64
- .long sys_ni_syscall /* 250 reserved for TUX */
- .long sys_ni_syscall /* Reserved for Security */
- .long sys_gettid
- .long sys_readahead
- .long sys_setxattr
- .long sys_lsetxattr /* 255 */
- .long sys_fsetxattr
- .long sys_getxattr
- .long sys_lgetxattr
- .long sys_fgetxattr
- .long sys_listxattr /* 260 */
- .long sys_llistxattr
- .long sys_flistxattr
- .long sys_removexattr
- .long sys_lremovexattr
- .long sys_fremovexattr /* 265 */
- .long sys_tkill
- .long sys_sendfile64
- .long sys_futex
- .long sys_sched_setaffinity
- .long sys_sched_getaffinity /* 270 */
- .long sys_ni_syscall /* reserved for set_thread_area */
- .long sys_ni_syscall /* reserved for get_thread_area */
- .long sys_io_setup
- .long sys_io_destroy
- .long sys_io_getevents /* 275 */
- .long sys_io_submit
- .long sys_io_cancel
- .long sys_fadvise64
- .long sys_ni_syscall
- .long sys_exit_group /* 280 */
- /* Rest of new 2.6 syscalls */
- .long sys_lookup_dcookie
- .long sys_epoll_create
- .long sys_epoll_ctl
- .long sys_epoll_wait
- .long sys_remap_file_pages /* 285 */
- .long sys_set_tid_address
- .long sys_timer_create
- .long sys_timer_settime
- .long sys_timer_gettime
- .long sys_timer_getoverrun /* 290 */
- .long sys_timer_delete
- .long sys_clock_settime
- .long sys_clock_gettime
- .long sys_clock_getres
- .long sys_clock_nanosleep /* 295 */
- .long sys_statfs64
- .long sys_fstatfs64
- .long sys_tgkill
- .long sys_utimes
- .long sys_fadvise64_64 /* 300 */
- .long sys_ni_syscall /* Reserved for vserver */
- .long sys_ni_syscall /* Reserved for mbind */
- .long sys_ni_syscall /* get_mempolicy */
- .long sys_ni_syscall /* set_mempolicy */
- .long sys_mq_open /* 305 */
- .long sys_mq_unlink
- .long sys_mq_timedsend
- .long sys_mq_timedreceive
- .long sys_mq_notify
- .long sys_mq_getsetattr /* 310 */
- .long sys_ni_syscall /* Reserved for kexec */
- .long sys_waitid
- .long sys_add_key
- .long sys_request_key
- .long sys_keyctl /* 315 */
- .long sys_ioprio_set
- .long sys_ioprio_get
- .long sys_inotify_init
- .long sys_inotify_add_watch
- .long sys_inotify_rm_watch /* 320 */
- .long sys_ni_syscall
- .long sys_migrate_pages
- .long sys_openat
- .long sys_mkdirat
- .long sys_mknodat /* 325 */
- .long sys_fchownat
- .long sys_futimesat
- .long sys_fstatat64
- .long sys_unlinkat
- .long sys_renameat /* 330 */
- .long sys_linkat
- .long sys_symlinkat
- .long sys_readlinkat
- .long sys_fchmodat
- .long sys_faccessat /* 335 */
- .long sys_pselect6
- .long sys_ppoll
- .long sys_unshare
- .long sys_set_robust_list
- .long sys_get_robust_list /* 340 */
- .long sys_splice
- .long sys_sync_file_range
- .long sys_tee
- .long sys_vmsplice
- .long sys_move_pages /* 345 */
- .long sys_getcpu
- .long sys_epoll_pwait
- .long sys_utimensat
- .long sys_signalfd
- .long sys_timerfd_create /* 350 */
- .long sys_eventfd
- .long sys_fallocate
- .long sys_timerfd_settime
- .long sys_timerfd_gettime
- .long sys_signalfd4 /* 355 */
- .long sys_eventfd2
- .long sys_epoll_create1
- .long sys_dup3
- .long sys_pipe2
- .long sys_inotify_init1 /* 360 */
- .long sys_preadv
- .long sys_pwritev
- .long sys_rt_tgsigqueueinfo
- .long sys_perf_event_open
- .long sys_recvmmsg /* 365 */
- .long sys_accept4
- .long sys_fanotify_init
- .long sys_fanotify_mark
- .long sys_prlimit64
- .long sys_name_to_handle_at /* 370 */
- .long sys_open_by_handle_at
- .long sys_clock_adjtime
- .long sys_syncfs
- .long sys_sendmmsg
- .long sys_setns /* 375 */
- .long sys_process_vm_readv
- .long sys_process_vm_writev
- .long sys_kcmp
- .long sys_finit_module
- .long sys_sched_getattr /* 380 */
- .long sys_sched_setattr
- .long sys_renameat2
- .long sys_seccomp
- .long sys_getrandom
- .long sys_memfd_create /* 385 */
- .long sys_bpf
- .long sys_execveat
- .long sys_userfaultfd
- .long sys_membarrier
- .long sys_mlock2 /* 390 */
- .long sys_copy_file_range
- .long sys_preadv2
- .long sys_pwritev2
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 2130381c9d57..a33025451fcd 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -38,8 +38,8 @@ void die(const char *str, struct pt_regs *regs, long err)
task_pid_nr(current), task_stack_page(current) + 1);
if (!user_mode(regs) || in_interrupt())
- dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
- (unsigned long)task_stack_page(current));
+ dump_mem("Stack: ", KERN_DEFAULT, regs->regs[15],
+ THREAD_SIZE + (unsigned long)task_stack_page(current));
notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
deleted file mode 100644
index 37046f3a26d3..000000000000
--- a/arch/sh/kernel/traps_64.c
+++ /dev/null
@@ -1,814 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/traps_64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- * Copyright (C) 2003, 2004 Richard Curnow
- */
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/kallsyms.h>
-#include <linux/interrupt.h>
-#include <linux/sysctl.h>
-#include <linux/module.h>
-#include <linux/perf_event.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-#include <asm/alignment.h>
-#include <asm/processor.h>
-#include <asm/pgtable.h>
-#include <asm/fpu.h>
-
-static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
-{
- int get_user_error;
- unsigned long aligned_pc;
- insn_size_t opcode;
-
- if ((pc & 3) == 1) {
- /* SHmedia */
- aligned_pc = pc & ~3;
- if (from_user_mode) {
- if (!access_ok(aligned_pc, sizeof(insn_size_t))) {
- get_user_error = -EFAULT;
- } else {
- get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
- *result_opcode = opcode;
- }
- return get_user_error;
- } else {
- /* If the fault was in the kernel, we can either read
- * this directly, or if not, we fault.
- */
- *result_opcode = *(insn_size_t *)aligned_pc;
- return 0;
- }
- } else if ((pc & 1) == 0) {
- /* SHcompact */
- /* TODO : provide handling for this. We don't really support
- user-mode SHcompact yet, and for a kernel fault, this would
- have to come from a module built for SHcompact. */
- return -EFAULT;
- } else {
- /* misaligned */
- return -EFAULT;
- }
-}
-
-static int address_is_sign_extended(__u64 a)
-{
- __u64 b;
-#if (NEFF == 32)
- b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
- return (b == a) ? 1 : 0;
-#else
-#error "Sign extend check only works for NEFF==32"
-#endif
-}
-
-/* return -1 for fault, 0 for OK */
-static int generate_and_check_address(struct pt_regs *regs,
- insn_size_t opcode,
- int displacement_not_indexed,
- int width_shift,
- __u64 *address)
-{
- __u64 base_address, addr;
- int basereg;
-
- switch (1 << width_shift) {
- case 1: inc_unaligned_byte_access(); break;
- case 2: inc_unaligned_word_access(); break;
- case 4: inc_unaligned_dword_access(); break;
- case 8: inc_unaligned_multi_access(); break;
- }
-
- basereg = (opcode >> 20) & 0x3f;
- base_address = regs->regs[basereg];
- if (displacement_not_indexed) {
- __s64 displacement;
- displacement = (opcode >> 10) & 0x3ff;
- displacement = sign_extend64(displacement, 9);
- addr = (__u64)((__s64)base_address + (displacement << width_shift));
- } else {
- __u64 offset;
- int offsetreg;
- offsetreg = (opcode >> 10) & 0x3f;
- offset = regs->regs[offsetreg];
- addr = base_address + offset;
- }
-
- /* Check sign extended */
- if (!address_is_sign_extended(addr))
- return -1;
-
- /* Check accessible. For misaligned access in the kernel, assume the
- address is always accessible (and if not, just fault when the
- load/store gets done.) */
- if (user_mode(regs)) {
- inc_unaligned_user_access();
-
- if (addr >= TASK_SIZE)
- return -1;
- } else
- inc_unaligned_kernel_access();
-
- *address = addr;
-
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
- unaligned_fixups_notify(current, opcode, regs);
-
- return 0;
-}
-
-static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
-{
- unsigned short x;
- unsigned char *p, *q;
- p = (unsigned char *) (int) address;
- q = (unsigned char *) &x;
- q[0] = p[0];
- q[1] = p[1];
-
- if (do_sign_extend) {
- *result = (__u64)(__s64) *(short *) &x;
- } else {
- *result = (__u64) x;
- }
-}
-
-static void misaligned_kernel_word_store(__u64 address, __u64 value)
-{
- unsigned short x;
- unsigned char *p, *q;
- p = (unsigned char *) (int) address;
- q = (unsigned char *) &x;
-
- x = (__u16) value;
- p[0] = q[0];
- p[1] = q[1];
-}
-
-static int misaligned_load(struct pt_regs *regs,
- insn_size_t opcode,
- int displacement_not_indexed,
- int width_shift,
- int do_sign_extend)
-{
- /* Return -1 for a fault, 0 for OK */
- int error;
- int destreg;
- __u64 address;
-
- error = generate_and_check_address(regs, opcode,
- displacement_not_indexed, width_shift, &address);
- if (error < 0)
- return error;
-
- destreg = (opcode >> 4) & 0x3f;
- if (user_mode(regs)) {
- __u64 buffer;
-
- if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
- return -1;
- }
-
- if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
- return -1; /* fault */
- }
- switch (width_shift) {
- case 1:
- if (do_sign_extend) {
- regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
- } else {
- regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
- }
- break;
- case 2:
- regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
- break;
- case 3:
- regs->regs[destreg] = buffer;
- break;
- default:
- printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
- } else {
- /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
- __u64 lo, hi;
-
- switch (width_shift) {
- case 1:
- misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
- break;
- case 2:
- asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
- asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
- regs->regs[destreg] = lo | hi;
- break;
- case 3:
- asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
- asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
- regs->regs[destreg] = lo | hi;
- break;
-
- default:
- printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
- }
-
- return 0;
-}
-
-static int misaligned_store(struct pt_regs *regs,
- insn_size_t opcode,
- int displacement_not_indexed,
- int width_shift)
-{
- /* Return -1 for a fault, 0 for OK */
- int error;
- int srcreg;
- __u64 address;
-
- error = generate_and_check_address(regs, opcode,
- displacement_not_indexed, width_shift, &address);
- if (error < 0)
- return error;
-
- srcreg = (opcode >> 4) & 0x3f;
- if (user_mode(regs)) {
- __u64 buffer;
-
- if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
- return -1;
- }
-
- switch (width_shift) {
- case 1:
- *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
- break;
- case 2:
- *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
- break;
- case 3:
- buffer = regs->regs[srcreg];
- break;
- default:
- printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
-
- if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
- return -1; /* fault */
- }
- } else {
- /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
- __u64 val = regs->regs[srcreg];
-
- switch (width_shift) {
- case 1:
- misaligned_kernel_word_store(address, val);
- break;
- case 2:
- asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
- asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
- break;
- case 3:
- asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
- asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
- break;
-
- default:
- printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
- }
-
- return 0;
-}
-
-/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
- error. */
-static int misaligned_fpu_load(struct pt_regs *regs,
- insn_size_t opcode,
- int displacement_not_indexed,
- int width_shift,
- int do_paired_load)
-{
- /* Return -1 for a fault, 0 for OK */
- int error;
- int destreg;
- __u64 address;
-
- error = generate_and_check_address(regs, opcode,
- displacement_not_indexed, width_shift, &address);
- if (error < 0)
- return error;
-
- destreg = (opcode >> 4) & 0x3f;
- if (user_mode(regs)) {
- __u64 buffer;
- __u32 buflo, bufhi;
-
- if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
- return -1;
- }
-
- if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
- return -1; /* fault */
- }
- /* 'current' may be the current owner of the FPU state, so
- context switch the registers into memory so they can be
- indexed by register number. */
- if (last_task_used_math == current) {
- enable_fpu();
- save_fpu(current);
- disable_fpu();
- last_task_used_math = NULL;
- regs->sr |= SR_FD;
- }
-
- buflo = *(__u32*) &buffer;
- bufhi = *(1 + (__u32*) &buffer);
-
- switch (width_shift) {
- case 2:
- current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
- break;
- case 3:
- if (do_paired_load) {
- current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
- current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
- } else {
-#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
- current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
-#else
- current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
- current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
-#endif
- }
- break;
- default:
- printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
- return 0;
- } else {
- die ("Misaligned FPU load inside kernel", regs, 0);
- return -1;
- }
-}
-
-static int misaligned_fpu_store(struct pt_regs *regs,
- insn_size_t opcode,
- int displacement_not_indexed,
- int width_shift,
- int do_paired_load)
-{
- /* Return -1 for a fault, 0 for OK */
- int error;
- int srcreg;
- __u64 address;
-
- error = generate_and_check_address(regs, opcode,
- displacement_not_indexed, width_shift, &address);
- if (error < 0)
- return error;
-
- srcreg = (opcode >> 4) & 0x3f;
- if (user_mode(regs)) {
- __u64 buffer;
- /* Initialise these to NaNs. */
- __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
-
- if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
- return -1;
- }
-
- /* 'current' may be the current owner of the FPU state, so
- context switch the registers into memory so they can be
- indexed by register number. */
- if (last_task_used_math == current) {
- enable_fpu();
- save_fpu(current);
- disable_fpu();
- last_task_used_math = NULL;
- regs->sr |= SR_FD;
- }
-
- switch (width_shift) {
- case 2:
- buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
- break;
- case 3:
- if (do_paired_load) {
- buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
- bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
- } else {
-#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
- buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
-#else
- buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
- bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
-#endif
- }
- break;
- default:
- printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
-
- *(__u32*) &buffer = buflo;
- *(1 + (__u32*) &buffer) = bufhi;
- if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
- return -1; /* fault */
- }
- return 0;
- } else {
- die ("Misaligned FPU load inside kernel", regs, 0);
- return -1;
- }
-}
-
-static int misaligned_fixup(struct pt_regs *regs)
-{
- insn_size_t opcode;
- int error;
- int major, minor;
- unsigned int user_action;
-
- user_action = unaligned_user_action();
- if (!(user_action & UM_FIXUP))
- return -1;
-
- error = read_opcode(regs->pc, &opcode, user_mode(regs));
- if (error < 0) {
- return error;
- }
- major = (opcode >> 26) & 0x3f;
- minor = (opcode >> 16) & 0xf;
-
- switch (major) {
- case (0x84>>2): /* LD.W */
- error = misaligned_load(regs, opcode, 1, 1, 1);
- break;
- case (0xb0>>2): /* LD.UW */
- error = misaligned_load(regs, opcode, 1, 1, 0);
- break;
- case (0x88>>2): /* LD.L */
- error = misaligned_load(regs, opcode, 1, 2, 1);
- break;
- case (0x8c>>2): /* LD.Q */
- error = misaligned_load(regs, opcode, 1, 3, 0);
- break;
-
- case (0xa4>>2): /* ST.W */
- error = misaligned_store(regs, opcode, 1, 1);
- break;
- case (0xa8>>2): /* ST.L */
- error = misaligned_store(regs, opcode, 1, 2);
- break;
- case (0xac>>2): /* ST.Q */
- error = misaligned_store(regs, opcode, 1, 3);
- break;
-
- case (0x40>>2): /* indexed loads */
- switch (minor) {
- case 0x1: /* LDX.W */
- error = misaligned_load(regs, opcode, 0, 1, 1);
- break;
- case 0x5: /* LDX.UW */
- error = misaligned_load(regs, opcode, 0, 1, 0);
- break;
- case 0x2: /* LDX.L */
- error = misaligned_load(regs, opcode, 0, 2, 1);
- break;
- case 0x3: /* LDX.Q */
- error = misaligned_load(regs, opcode, 0, 3, 0);
- break;
- default:
- error = -1;
- break;
- }
- break;
-
- case (0x60>>2): /* indexed stores */
- switch (minor) {
- case 0x1: /* STX.W */
- error = misaligned_store(regs, opcode, 0, 1);
- break;
- case 0x2: /* STX.L */
- error = misaligned_store(regs, opcode, 0, 2);
- break;
- case 0x3: /* STX.Q */
- error = misaligned_store(regs, opcode, 0, 3);
- break;
- default:
- error = -1;
- break;
- }
- break;
-
- case (0x94>>2): /* FLD.S */
- error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
- break;
- case (0x98>>2): /* FLD.P */
- error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
- break;
- case (0x9c>>2): /* FLD.D */
- error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
- break;
- case (0x1c>>2): /* floating indexed loads */
- switch (minor) {
- case 0x8: /* FLDX.S */
- error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
- break;
- case 0xd: /* FLDX.P */
- error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
- break;
- case 0x9: /* FLDX.D */
- error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
- break;
- default:
- error = -1;
- break;
- }
- break;
- case (0xb4>>2): /* FLD.S */
- error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
- break;
- case (0xb8>>2): /* FLD.P */
- error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
- break;
- case (0xbc>>2): /* FLD.D */
- error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
- break;
- case (0x3c>>2): /* floating indexed stores */
- switch (minor) {
- case 0x8: /* FSTX.S */
- error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
- break;
- case 0xd: /* FSTX.P */
- error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
- break;
- case 0x9: /* FSTX.D */
- error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
- break;
- default:
- error = -1;
- break;
- }
- break;
-
- default:
- /* Fault */
- error = -1;
- break;
- }
-
- if (error < 0) {
- return error;
- } else {
- regs->pc += 4; /* Skip the instruction that's just been emulated */
- return 0;
- }
-}
-
-static void do_unhandled_exception(int signr, char *str, unsigned long error,
- struct pt_regs *regs)
-{
- if (user_mode(regs))
- force_sig(signr);
-
- die_if_no_fixup(str, regs, error);
-}
-
-#define DO_ERROR(signr, str, name) \
-asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
-{ \
- do_unhandled_exception(signr, str, error_code, regs); \
-}
-
-DO_ERROR(SIGILL, "illegal slot instruction", illegal_slot_inst)
-DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
-
-#if defined(CONFIG_SH64_ID2815_WORKAROUND)
-
-#define OPCODE_INVALID 0
-#define OPCODE_USER_VALID 1
-#define OPCODE_PRIV_VALID 2
-
-/* getcon/putcon - requires checking which control register is referenced. */
-#define OPCODE_CTRL_REG 3
-
-/* Table of valid opcodes for SHmedia mode.
- Form a 10-bit value by concatenating the major/minor opcodes i.e.
- opcode[31:26,20:16]. The 6 MSBs of this value index into the following
- array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
- LSBs==4'b0000 etc). */
-static unsigned long shmedia_opcode_table[64] = {
- 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
- 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
- 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
- 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
- 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
-};
-
-/* Workaround SH5-101 cut2 silicon defect #2815 :
- in some situations, inter-mode branches from SHcompact -> SHmedia
- which should take ITLBMISS or EXECPROT exceptions at the target
- falsely take RESINST at the target instead. */
-void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
-{
- insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
- unsigned long pc, aligned_pc;
- unsigned long index, shift;
- unsigned long major, minor, combined;
- unsigned long reserved_field;
- int opcode_state;
- int get_user_error;
- int signr = SIGILL;
- char *exception_name = "reserved_instruction";
-
- pc = regs->pc;
-
- /* SHcompact is not handled */
- if (unlikely((pc & 3) == 0))
- goto out;
-
- /* SHmedia : check for defect. This requires executable vmas
- to be readable too. */
- aligned_pc = pc & ~3;
- if (!access_ok(aligned_pc, sizeof(insn_size_t)))
- get_user_error = -EFAULT;
- else
- get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
-
- if (get_user_error < 0) {
- /*
- * Error trying to read opcode. This typically means a
- * real fault, not a RESINST any more. So change the
- * codes.
- */
- exception_name = "address error (exec)";
- signr = SIGSEGV;
- goto out;
- }
-
- /* These bits are currently reserved as zero in all valid opcodes */
- reserved_field = opcode & 0xf;
- if (unlikely(reserved_field))
- goto out; /* invalid opcode */
-
- major = (opcode >> 26) & 0x3f;
- minor = (opcode >> 16) & 0xf;
- combined = (major << 4) | minor;
- index = major;
- shift = minor << 1;
- opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
- switch (opcode_state) {
- case OPCODE_INVALID:
- /* Trap. */
- break;
- case OPCODE_USER_VALID:
- /*
- * Restart the instruction: the branch to the instruction
- * will now be from an RTE not from SHcompact so the
- * silicon defect won't be triggered.
- */
- return;
- case OPCODE_PRIV_VALID:
- if (!user_mode(regs)) {
- /*
- * Should only ever get here if a module has
- * SHcompact code inside it. If so, the same fix
- * up is needed.
- */
- return; /* same reason */
- }
-
- /*
- * Otherwise, user mode trying to execute a privileged
- * instruction - fall through to trap.
- */
- break;
- case OPCODE_CTRL_REG:
- /* If in privileged mode, return as above. */
- if (!user_mode(regs))
- return;
-
- /* In user mode ... */
- if (combined == 0x9f) { /* GETCON */
- unsigned long regno = (opcode >> 20) & 0x3f;
-
- if (regno >= 62)
- return;
-
- /* reserved/privileged control register => trap */
- } else if (combined == 0x1bf) { /* PUTCON */
- unsigned long regno = (opcode >> 4) & 0x3f;
-
- if (regno >= 62)
- return;
-
- /* reserved/privileged control register => trap */
- }
-
- break;
- default:
- /* Fall through to trap. */
- break;
- }
-
-out:
- do_unhandled_exception(signr, exception_name, error_code, regs);
-}
-
-#else /* CONFIG_SH64_ID2815_WORKAROUND */
-
-/* If the workaround isn't needed, this is just a straightforward reserved
- instruction */
-DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
-
-#endif /* CONFIG_SH64_ID2815_WORKAROUND */
-
-/* Called with interrupts disabled */
-asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
-{
- die_if_kernel("exception", regs, ex);
-}
-
-asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
-{
- /* Syscall debug */
- printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
-
- die_if_kernel("unknown trapa", regs, scId);
-
- return -ENOSYS;
-}
-
-/* Implement misaligned load/store handling for kernel (and optionally for user
- mode too). Limitation : only SHmedia mode code is handled - there is no
- handling at all for misaligned accesses occurring in SHcompact code yet. */
-
-asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
-{
- if (misaligned_fixup(regs) < 0)
- do_unhandled_exception(SIGSEGV, "address error(load)",
- error_code, regs);
-}
-
-asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
-{
- if (misaligned_fixup(regs) < 0)
- do_unhandled_exception(SIGSEGV, "address error(store)",
- error_code, regs);
-}
-
-asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
-{
- u64 peek_real_address_q(u64 addr);
- u64 poke_real_address_q(u64 addr, u64 val);
- unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
- unsigned long long exp_cause;
- /* It's not worth ioremapping the debug module registers for the amount
- of access we make to them - just go direct to their physical
- addresses. */
- exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
- if (exp_cause & ~4)
- printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
- (unsigned long)(exp_cause & 0xffffffff));
- show_state();
- /* Clear all DEBUGINT causes */
- poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
-}
-
-void per_cpu_trap_init(void)
-{
- /* Nothing to do for now, VBR initialization later. */
-}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index c60b19958c35..bde7a6c01aaf 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -3,14 +3,7 @@
* ld script to make SuperH Linux kernel
* Written by Niibe Yutaka and Paul Mundt
*/
-#ifdef CONFIG_SUPERH64
-#define LOAD_OFFSET PAGE_OFFSET
-OUTPUT_ARCH(sh:sh5)
-#else
-#define LOAD_OFFSET 0
OUTPUT_ARCH(sh)
-#endif
-
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
@@ -28,14 +21,13 @@ SECTIONS
_text = .; /* Text and read-only data */
- .empty_zero_page : AT(ADDR(.empty_zero_page) - LOAD_OFFSET) {
+ .empty_zero_page : AT(ADDR(.empty_zero_page)) {
*(.empty_zero_page)
} = 0
- .text : AT(ADDR(.text) - LOAD_OFFSET) {
+ .text : AT(ADDR(.text)) {
HEAD_TEXT
TEXT_TEXT
- EXTRA_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
@@ -62,7 +54,7 @@ SECTIONS
INIT_DATA_SECTION(16)
. = ALIGN(4);
- .machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) {
+ .machvec.init : AT(ADDR(.machvec.init)) {
__machvec_start = .;
*(.machvec.init)
__machvec_end = .;
@@ -74,8 +66,8 @@ SECTIONS
* .exit.text is discarded at runtime, not link time, to deal with
* references from __bug_table
*/
- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT }
- .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA }
+ .exit.text : AT(ADDR(.exit.text)) { EXIT_TEXT }
+ .exit.data : AT(ADDR(.exit.data)) { EXIT_DATA }
. = ALIGN(PAGE_SIZE);
__init_end = .;
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 98494480f048..1bd85a6949c4 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -61,7 +61,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long addr;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
@@ -80,7 +80,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
current->mm->context.vdso = (void *)addr;
up_fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index dad8e6a54906..540e670dbafc 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -29,6 +29,7 @@ void __delay(unsigned long loops)
: "0" (loops)
: "t");
}
+EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile
deleted file mode 100644
index 69779ff741df..000000000000
--- a/arch/sh/lib64/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Makefile for the SH-5 specific library files..
-#
-# Copyright (C) 2000, 2001 Paolo Alberelli
-# Copyright (C) 2003 - 2008 Paul Mundt
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-
-# Panic should really be compiled as PIC
-lib-y := udelay.o panic.o memcpy.o memset.o \
- copy_user_memcpy.o copy_page.o strcpy.o strlen.o
-
-# Extracted from libgcc
-lib-y += udivsi3.o udivdi3.o sdivsi3.o
diff --git a/arch/sh/lib64/copy_page.S b/arch/sh/lib64/copy_page.S
deleted file mode 100644
index 0ec6fca63b56..000000000000
--- a/arch/sh/lib64/copy_page.S
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
-
- This file is subject to the terms and conditions of the GNU General Public
- License. See the file "COPYING" in the main directory of this archive
- for more details.
-
- Tight version of mempy for the case of just copying a page.
- Prefetch strategy empirically optimised against RTL simulations
- of SH5-101 cut2 eval chip with Cayman board DDR memory.
-
- Parameters:
- r2 : destination effective address (start of page)
- r3 : source effective address (start of page)
-
- Always copies 4096 bytes.
-
- Points to review.
- * Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead.
- It seems like the prefetch needs to be at at least 4 lines ahead to get
- the data into the cache in time, and the allocos contend with outstanding
- prefetches for the same cache set, so it's better to have the numbers
- different.
- */
-
- .section .text..SHmedia32,"ax"
- .little
-
- .balign 8
- .global copy_page
-copy_page:
-
- /* Copy 4096 bytes worth of data from r3 to r2.
- Do prefetches 4 lines ahead.
- Do alloco 2 lines ahead */
-
- pta 1f, tr1
- pta 2f, tr2
- pta 3f, tr3
- ptabs r18, tr0
-
-#if 0
- /* TAKum03020 */
- ld.q r3, 0x00, r63
- ld.q r3, 0x20, r63
- ld.q r3, 0x40, r63
- ld.q r3, 0x60, r63
-#endif
- alloco r2, 0x00
- synco ! TAKum03020
- alloco r2, 0x20
- synco ! TAKum03020
-
- movi 3968, r6
- add r2, r6, r6
- addi r6, 64, r7
- addi r7, 64, r8
- sub r3, r2, r60
- addi r60, 8, r61
- addi r61, 8, r62
- addi r62, 8, r23
- addi r60, 0x80, r22
-
-/* Minimal code size. The extra branches inside the loop don't cost much
- because they overlap with the time spent waiting for prefetches to
- complete. */
-1:
-#if 0
- /* TAKum03020 */
- bge/u r2, r6, tr2 ! skip prefetch for last 4 lines
- ldx.q r2, r22, r63 ! prefetch 4 lines hence
-#endif
-2:
- bge/u r2, r7, tr3 ! skip alloco for last 2 lines
- alloco r2, 0x40 ! alloc destination line 2 lines ahead
- synco ! TAKum03020
-3:
- ldx.q r2, r60, r36
- ldx.q r2, r61, r37
- ldx.q r2, r62, r38
- ldx.q r2, r23, r39
- st.q r2, 0, r36
- st.q r2, 8, r37
- st.q r2, 16, r38
- st.q r2, 24, r39
- addi r2, 32, r2
- bgt/l r8, r2, tr1
-
- blink tr0, r63 ! return
diff --git a/arch/sh/lib64/copy_user_memcpy.S b/arch/sh/lib64/copy_user_memcpy.S
deleted file mode 100644
index 515f81b00202..000000000000
--- a/arch/sh/lib64/copy_user_memcpy.S
+++ /dev/null
@@ -1,218 +0,0 @@
-! SPDX-License-Identifier: GPL-2.0
-!
-! Fast SH memcpy
-!
-! by Toshiyasu Morita (tm@netcom.com)
-! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
-! SH5 code Copyright 2002 SuperH Ltd.
-!
-! Entry: ARG0: destination pointer
-! ARG1: source pointer
-! ARG2: byte count
-!
-! Exit: RESULT: destination pointer
-! any other registers in the range r0-r7: trashed
-!
-! Notes: Usually one wants to do small reads and write a longword, but
-! unfortunately it is difficult in some cases to concatanate bytes
-! into a longword on the SH, so this does a longword read and small
-! writes.
-!
-! This implementation makes two assumptions about how it is called:
-!
-! 1.: If the byte count is nonzero, the address of the last byte to be
-! copied is unsigned greater than the address of the first byte to
-! be copied. This could be easily swapped for a signed comparison,
-! but the algorithm used needs some comparison.
-!
-! 2.: When there are two or three bytes in the last word of an 11-or-more
-! bytes memory chunk to b copied, the rest of the word can be read
-! without side effects.
-! This could be easily changed by increasing the minimum size of
-! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
-! however, this would cost a few extra cyles on average.
-! For SHmedia, the assumption is that any quadword can be read in its
-! enirety if at least one byte is included in the copy.
-
-/* Imported into Linux kernel by Richard Curnow. This is used to implement the
- __copy_user function in the general case, so it has to be a distinct
- function from intra-kernel memcpy to allow for exception fix-ups in the
- event that the user pointer is bad somewhere in the copy (e.g. due to
- running off the end of the vma).
-
- Note, this algorithm will be slightly wasteful in the case where the source
- and destination pointers are equally aligned, because the stlo/sthi pairs
- could then be merged back into single stores. If there are a lot of cache
- misses, this is probably offset by the stall lengths on the preloads.
-
-*/
-
-/* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020
- * erratum. The first two prefetches are nop-ed out to avoid upsetting the
- * instruction counts used in the jump address calculation.
- * */
-
- .section .text..SHmedia32,"ax"
- .little
- .balign 32
- .global copy_user_memcpy
- .global copy_user_memcpy_end
-copy_user_memcpy:
-
-#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
-#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
-#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
-#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
-
- nop ! ld.b r3,0,r63 ! TAKum03020
- pta/l Large,tr0
- movi 25,r0
- bgeu/u r4,r0,tr0
- nsb r4,r0
- shlli r0,5,r0
- movi (L1-L0+63*32 + 1) & 0xffff,r1
- sub r1, r0, r0
-L0: ptrel r0,tr0
- add r2,r4,r5
- ptabs r18,tr1
- add r3,r4,r6
- blink tr0,r63
-
-/* Rearranged to make cut2 safe */
- .balign 8
-L4_7: /* 4..7 byte memcpy cntd. */
- stlo.l r2, 0, r0
- or r6, r7, r6
- sthi.l r5, -1, r6
- stlo.l r5, -4, r6
- blink tr1,r63
-
- .balign 8
-L1: /* 0 byte memcpy */
- nop
- blink tr1,r63
- nop
- nop
- nop
- nop
-
-L2_3: /* 2 or 3 byte memcpy cntd. */
- st.b r5,-1,r6
- blink tr1,r63
-
- /* 1 byte memcpy */
- ld.b r3,0,r0
- st.b r2,0,r0
- blink tr1,r63
-
-L8_15: /* 8..15 byte memcpy cntd. */
- stlo.q r2, 0, r0
- or r6, r7, r6
- sthi.q r5, -1, r6
- stlo.q r5, -8, r6
- blink tr1,r63
-
- /* 2 or 3 byte memcpy */
- ld.b r3,0,r0
- nop ! ld.b r2,0,r63 ! TAKum03020
- ld.b r3,1,r1
- st.b r2,0,r0
- pta/l L2_3,tr0
- ld.b r6,-1,r6
- st.b r2,1,r1
- blink tr0, r63
-
- /* 4 .. 7 byte memcpy */
- LDUAL (r3, 0, r0, r1)
- pta L4_7, tr0
- ldlo.l r6, -4, r7
- or r0, r1, r0
- sthi.l r2, 3, r0
- ldhi.l r6, -1, r6
- blink tr0, r63
-
- /* 8 .. 15 byte memcpy */
- LDUAQ (r3, 0, r0, r1)
- pta L8_15, tr0
- ldlo.q r6, -8, r7
- or r0, r1, r0
- sthi.q r2, 7, r0
- ldhi.q r6, -1, r6
- blink tr0, r63
-
- /* 16 .. 24 byte memcpy */
- LDUAQ (r3, 0, r0, r1)
- LDUAQ (r3, 8, r8, r9)
- or r0, r1, r0
- sthi.q r2, 7, r0
- or r8, r9, r8
- sthi.q r2, 15, r8
- ldlo.q r6, -8, r7
- ldhi.q r6, -1, r6
- stlo.q r2, 8, r8
- stlo.q r2, 0, r0
- or r6, r7, r6
- sthi.q r5, -1, r6
- stlo.q r5, -8, r6
- blink tr1,r63
-
-Large:
- ! ld.b r2, 0, r63 ! TAKum03020
- pta/l Loop_ua, tr1
- ori r3, -8, r7
- sub r2, r7, r22
- sub r3, r2, r6
- add r2, r4, r5
- ldlo.q r3, 0, r0
- addi r5, -16, r5
- movi 64+8, r27 ! could subtract r7 from that.
- stlo.q r2, 0, r0
- sthi.q r2, 7, r0
- ldx.q r22, r6, r0
- bgtu/l r27, r4, tr1
-
- addi r5, -48, r27
- pta/l Loop_line, tr0
- addi r6, 64, r36
- addi r6, -24, r19
- addi r6, -16, r20
- addi r6, -8, r21
-
-Loop_line:
- ! ldx.q r22, r36, r63 ! TAKum03020
- alloco r22, 32
- synco
- addi r22, 32, r22
- ldx.q r22, r19, r23
- sthi.q r22, -25, r0
- ldx.q r22, r20, r24
- ldx.q r22, r21, r25
- stlo.q r22, -32, r0
- ldx.q r22, r6, r0
- sthi.q r22, -17, r23
- sthi.q r22, -9, r24
- sthi.q r22, -1, r25
- stlo.q r22, -24, r23
- stlo.q r22, -16, r24
- stlo.q r22, -8, r25
- bgeu r27, r22, tr0
-
-Loop_ua:
- addi r22, 8, r22
- sthi.q r22, -1, r0
- stlo.q r22, -8, r0
- ldx.q r22, r6, r0
- bgtu/l r5, r22, tr1
-
- add r3, r4, r7
- ldlo.q r7, -8, r1
- sthi.q r22, 7, r0
- ldhi.q r7, -1, r7
- ptabs r18,tr1
- stlo.q r22, 0, r0
- or r1, r7, r1
- sthi.q r5, 15, r1
- stlo.q r5, 8, r1
- blink tr1, r63
-copy_user_memcpy_end:
- nop
diff --git a/arch/sh/lib64/memcpy.S b/arch/sh/lib64/memcpy.S
deleted file mode 100644
index 231ea595b39a..000000000000
--- a/arch/sh/lib64/memcpy.S
+++ /dev/null
@@ -1,202 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
-/* Modified by SuperH, Inc. September 2003 */
-!
-! Fast SH memcpy
-!
-! by Toshiyasu Morita (tm@netcom.com)
-! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
-! SH5 code Copyright 2002 SuperH Ltd.
-!
-! Entry: ARG0: destination pointer
-! ARG1: source pointer
-! ARG2: byte count
-!
-! Exit: RESULT: destination pointer
-! any other registers in the range r0-r7: trashed
-!
-! Notes: Usually one wants to do small reads and write a longword, but
-! unfortunately it is difficult in some cases to concatanate bytes
-! into a longword on the SH, so this does a longword read and small
-! writes.
-!
-! This implementation makes two assumptions about how it is called:
-!
-! 1.: If the byte count is nonzero, the address of the last byte to be
-! copied is unsigned greater than the address of the first byte to
-! be copied. This could be easily swapped for a signed comparison,
-! but the algorithm used needs some comparison.
-!
-! 2.: When there are two or three bytes in the last word of an 11-or-more
-! bytes memory chunk to b copied, the rest of the word can be read
-! without side effects.
-! This could be easily changed by increasing the minimum size of
-! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
-! however, this would cost a few extra cyles on average.
-! For SHmedia, the assumption is that any quadword can be read in its
-! enirety if at least one byte is included in the copy.
-!
-
- .section .text..SHmedia32,"ax"
- .globl memcpy
- .type memcpy, @function
- .align 5
-
-memcpy:
-
-#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
-#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
-#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
-#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
-
- ld.b r3,0,r63
- pta/l Large,tr0
- movi 25,r0
- bgeu/u r4,r0,tr0
- nsb r4,r0
- shlli r0,5,r0
- movi (L1-L0+63*32 + 1) & 0xffff,r1
- sub r1, r0, r0
-L0: ptrel r0,tr0
- add r2,r4,r5
- ptabs r18,tr1
- add r3,r4,r6
- blink tr0,r63
-
-/* Rearranged to make cut2 safe */
- .balign 8
-L4_7: /* 4..7 byte memcpy cntd. */
- stlo.l r2, 0, r0
- or r6, r7, r6
- sthi.l r5, -1, r6
- stlo.l r5, -4, r6
- blink tr1,r63
-
- .balign 8
-L1: /* 0 byte memcpy */
- nop
- blink tr1,r63
- nop
- nop
- nop
- nop
-
-L2_3: /* 2 or 3 byte memcpy cntd. */
- st.b r5,-1,r6
- blink tr1,r63
-
- /* 1 byte memcpy */
- ld.b r3,0,r0
- st.b r2,0,r0
- blink tr1,r63
-
-L8_15: /* 8..15 byte memcpy cntd. */
- stlo.q r2, 0, r0
- or r6, r7, r6
- sthi.q r5, -1, r6
- stlo.q r5, -8, r6
- blink tr1,r63
-
- /* 2 or 3 byte memcpy */
- ld.b r3,0,r0
- ld.b r2,0,r63
- ld.b r3,1,r1
- st.b r2,0,r0
- pta/l L2_3,tr0
- ld.b r6,-1,r6
- st.b r2,1,r1
- blink tr0, r63
-
- /* 4 .. 7 byte memcpy */
- LDUAL (r3, 0, r0, r1)
- pta L4_7, tr0
- ldlo.l r6, -4, r7
- or r0, r1, r0
- sthi.l r2, 3, r0
- ldhi.l r6, -1, r6
- blink tr0, r63
-
- /* 8 .. 15 byte memcpy */
- LDUAQ (r3, 0, r0, r1)
- pta L8_15, tr0
- ldlo.q r6, -8, r7
- or r0, r1, r0
- sthi.q r2, 7, r0
- ldhi.q r6, -1, r6
- blink tr0, r63
-
- /* 16 .. 24 byte memcpy */
- LDUAQ (r3, 0, r0, r1)
- LDUAQ (r3, 8, r8, r9)
- or r0, r1, r0
- sthi.q r2, 7, r0
- or r8, r9, r8
- sthi.q r2, 15, r8
- ldlo.q r6, -8, r7
- ldhi.q r6, -1, r6
- stlo.q r2, 8, r8
- stlo.q r2, 0, r0
- or r6, r7, r6
- sthi.q r5, -1, r6
- stlo.q r5, -8, r6
- blink tr1,r63
-
-Large:
- ld.b r2, 0, r63
- pta/l Loop_ua, tr1
- ori r3, -8, r7
- sub r2, r7, r22
- sub r3, r2, r6
- add r2, r4, r5
- ldlo.q r3, 0, r0
- addi r5, -16, r5
- movi 64+8, r27 // could subtract r7 from that.
- stlo.q r2, 0, r0
- sthi.q r2, 7, r0
- ldx.q r22, r6, r0
- bgtu/l r27, r4, tr1
-
- addi r5, -48, r27
- pta/l Loop_line, tr0
- addi r6, 64, r36
- addi r6, -24, r19
- addi r6, -16, r20
- addi r6, -8, r21
-
-Loop_line:
- ldx.q r22, r36, r63
- alloco r22, 32
- addi r22, 32, r22
- ldx.q r22, r19, r23
- sthi.q r22, -25, r0
- ldx.q r22, r20, r24
- ldx.q r22, r21, r25
- stlo.q r22, -32, r0
- ldx.q r22, r6, r0
- sthi.q r22, -17, r23
- sthi.q r22, -9, r24
- sthi.q r22, -1, r25
- stlo.q r22, -24, r23
- stlo.q r22, -16, r24
- stlo.q r22, -8, r25
- bgeu r27, r22, tr0
-
-Loop_ua:
- addi r22, 8, r22
- sthi.q r22, -1, r0
- stlo.q r22, -8, r0
- ldx.q r22, r6, r0
- bgtu/l r5, r22, tr1
-
- add r3, r4, r7
- ldlo.q r7, -8, r1
- sthi.q r22, 7, r0
- ldhi.q r7, -1, r7
- ptabs r18,tr1
- stlo.q r22, 0, r0
- or r1, r7, r1
- sthi.q r5, 15, r1
- stlo.q r5, 8, r1
- blink tr1, r63
-
- .size memcpy,.-memcpy
diff --git a/arch/sh/lib64/memset.S b/arch/sh/lib64/memset.S
deleted file mode 100644
index 453aa5f1d263..000000000000
--- a/arch/sh/lib64/memset.S
+++ /dev/null
@@ -1,92 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
-/* Modified by SuperH, Inc. September 2003 */
-!
-! Fast SH memset
-!
-! by Toshiyasu Morita (tm@netcom.com)
-!
-! SH5 code by J"orn Rennecke (joern.rennecke@superh.com)
-! Copyright 2002 SuperH Ltd.
-!
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#define SHHI shlld
-#define SHLO shlrd
-#else
-#define SHHI shlrd
-#define SHLO shlld
-#endif
-
- .section .text..SHmedia32,"ax"
- .globl memset
- .type memset, @function
-
- .align 5
-
-memset:
- pta/l multiquad, tr0
- andi r2, 7, r22
- ptabs r18, tr2
- mshflo.b r3,r3,r3
- add r4, r22, r23
- mperm.w r3, r63, r3 // Fill pattern now in every byte of r3
-
- movi 8, r9
- bgtu/u r23, r9, tr0 // multiquad
-
- beqi/u r4, 0, tr2 // Return with size 0 - ensures no mem accesses
- ldlo.q r2, 0, r7
- shlli r4, 2, r4
- movi -1, r8
- SHHI r8, r4, r8
- SHHI r8, r4, r8
- mcmv r7, r8, r3
- stlo.q r2, 0, r3
- blink tr2, r63
-
-multiquad:
- pta/l lastquad, tr0
- stlo.q r2, 0, r3
- shlri r23, 3, r24
- add r2, r4, r5
- beqi/u r24, 1, tr0 // lastquad
- pta/l loop, tr1
- sub r2, r22, r25
- andi r5, -8, r20 // calculate end address and
- addi r20, -7*8, r8 // loop end address; This might overflow, so we need
- // to use a different test before we start the loop
- bge/u r24, r9, tr1 // loop
- st.q r25, 8, r3
- st.q r20, -8, r3
- shlri r24, 1, r24
- beqi/u r24, 1, tr0 // lastquad
- st.q r25, 16, r3
- st.q r20, -16, r3
- beqi/u r24, 2, tr0 // lastquad
- st.q r25, 24, r3
- st.q r20, -24, r3
-lastquad:
- sthi.q r5, -1, r3
- blink tr2,r63
-
-loop:
-!!! alloco r25, 32 // QQQ comment out for short-term fix to SHUK #3895.
- // QQQ commenting out is locically correct, but sub-optimal
- // QQQ Sean McGoogan - 4th April 2003.
- st.q r25, 8, r3
- st.q r25, 16, r3
- st.q r25, 24, r3
- st.q r25, 32, r3
- addi r25, 32, r25
- bgeu/l r8, r25, tr1 // loop
-
- st.q r20, -40, r3
- st.q r20, -32, r3
- st.q r20, -24, r3
- st.q r20, -16, r3
- st.q r20, -8, r3
- sthi.q r5, -1, r3
- blink tr2,r63
-
- .size memset,.-memset
diff --git a/arch/sh/lib64/panic.c b/arch/sh/lib64/panic.c
deleted file mode 100644
index 38c954e04f6a..000000000000
--- a/arch/sh/lib64/panic.c
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (C) 2003 Richard Curnow, SuperH UK Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-void
-panic_handler(unsigned long panicPC, unsigned long panicSSR,
- unsigned long panicEXPEVT)
-{
- /* Never return from the panic handler */
- for (;;) ;
-}
diff --git a/arch/sh/lib64/sdivsi3.S b/arch/sh/lib64/sdivsi3.S
deleted file mode 100644
index b422e2374430..000000000000
--- a/arch/sh/lib64/sdivsi3.S
+++ /dev/null
@@ -1,136 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
- .global __sdivsi3
- .global __sdivsi3_1
- .global __sdivsi3_2
- .section .text..SHmedia32,"ax"
- .align 2
-
- /* inputs: r4,r5 */
- /* clobbered: r1,r18,r19,r20,r21,r25,tr0 */
- /* result in r0 */
-__sdivsi3:
-__sdivsi3_1:
- ptb __div_table,tr0
- gettr tr0,r20
-
-__sdivsi3_2:
- nsb r5, r1
- shlld r5, r1, r25 /* normalize; [-2 ..1, 1..2) in s2.62 */
- shari r25, 58, r21 /* extract 5(6) bit index (s2.4 with hole -1..1) */
- /* bubble */
- ldx.ub r20, r21, r19 /* u0.8 */
- shari r25, 32, r25 /* normalize to s2.30 */
- shlli r21, 1, r21
- muls.l r25, r19, r19 /* s2.38 */
- ldx.w r20, r21, r21 /* s2.14 */
- ptabs r18, tr0
- shari r19, 24, r19 /* truncate to s2.14 */
- sub r21, r19, r19 /* some 11 bit inverse in s1.14 */
- muls.l r19, r19, r21 /* u0.28 */
- sub r63, r1, r1
- addi r1, 92, r1
- muls.l r25, r21, r18 /* s2.58 */
- shlli r19, 45, r19 /* multiply by two and convert to s2.58 */
- /* bubble */
- sub r19, r18, r18
- shari r18, 28, r18 /* some 22 bit inverse in s1.30 */
- muls.l r18, r25, r0 /* s2.60 */
- muls.l r18, r4, r25 /* s32.30 */
- /* bubble */
- shari r0, 16, r19 /* s-16.44 */
- muls.l r19, r18, r19 /* s-16.74 */
- shari r25, 63, r0
- shari r4, 14, r18 /* s19.-14 */
- shari r19, 30, r19 /* s-16.44 */
- muls.l r19, r18, r19 /* s15.30 */
- xor r21, r0, r21 /* You could also use the constant 1 << 27. */
- add r21, r25, r21
- sub r21, r19, r21
- shard r21, r1, r21
- sub r21, r0, r0
- blink tr0, r63
-
-/* This table has been generated by divtab.c .
-Defects for bias -330:
- Max defect: 6.081536e-07 at -1.000000e+00
- Min defect: 2.849516e-08 at 1.030651e+00
- Max 2nd step defect: 9.606539e-12 at -1.000000e+00
- Min 2nd step defect: 0.000000e+00 at 0.000000e+00
- Defect at 1: 1.238659e-07
- Defect at -2: 1.061708e-07 */
-
- .balign 2
- .type __div_table,@object
- .size __div_table,128
-/* negative division constants */
- .word -16638
- .word -17135
- .word -17737
- .word -18433
- .word -19103
- .word -19751
- .word -20583
- .word -21383
- .word -22343
- .word -23353
- .word -24407
- .word -25582
- .word -26863
- .word -28382
- .word -29965
- .word -31800
-/* negative division factors */
- .byte 66
- .byte 70
- .byte 75
- .byte 81
- .byte 87
- .byte 93
- .byte 101
- .byte 109
- .byte 119
- .byte 130
- .byte 142
- .byte 156
- .byte 172
- .byte 192
- .byte 214
- .byte 241
- .skip 16
- .global __div_table
-__div_table:
- .skip 16
-/* positive division factors */
- .byte 241
- .byte 214
- .byte 192
- .byte 172
- .byte 156
- .byte 142
- .byte 130
- .byte 119
- .byte 109
- .byte 101
- .byte 93
- .byte 87
- .byte 81
- .byte 75
- .byte 70
- .byte 66
-/* positive division constants */
- .word 31801
- .word 29966
- .word 28383
- .word 26864
- .word 25583
- .word 24408
- .word 23354
- .word 22344
- .word 21384
- .word 20584
- .word 19752
- .word 19104
- .word 18434
- .word 17738
- .word 17136
- .word 16639
diff --git a/arch/sh/lib64/strcpy.S b/arch/sh/lib64/strcpy.S
deleted file mode 100644
index b61631e523d4..000000000000
--- a/arch/sh/lib64/strcpy.S
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
-/* Modified by SuperH, Inc. September 2003 */
-! Entry: arg0: destination
-! arg1: source
-! Exit: result: destination
-!
-! SH5 code Copyright 2002 SuperH Ltd.
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#define SHHI shlld
-#define SHLO shlrd
-#else
-#define SHHI shlrd
-#define SHLO shlld
-#endif
-
- .section .text..SHmedia32,"ax"
- .globl strcpy
- .type strcpy, @function
- .align 5
-
-strcpy:
-
- pta/l shortstring,tr1
- ldlo.q r3,0,r4
- ptabs r18,tr4
- shlli r3,3,r7
- addi r2, 8, r0
- mcmpeq.b r4,r63,r6
- SHHI r6,r7,r6
- bnei/u r6,0,tr1 // shortstring
- pta/l no_lddst, tr2
- ori r3,-8,r23
- sub r2, r23, r0
- sub r3, r2, r21
- addi r21, 8, r20
- ldx.q r0, r21, r5
- pta/l loop, tr0
- ori r2,-8,r22
- mcmpeq.b r5, r63, r6
- bgt/u r22, r23, tr2 // no_lddst
-
- // r22 < r23 : Need to do a load from the destination.
- // r22 == r23 : Doesn't actually need to load from destination,
- // but still can be handled here.
- ldlo.q r2, 0, r9
- movi -1, r8
- SHLO r8, r7, r8
- mcmv r4, r8, r9
- stlo.q r2, 0, r9
- beqi/l r6, 0, tr0 // loop
-
- add r5, r63, r4
- addi r0, 8, r0
- blink tr1, r63 // shortstring
-no_lddst:
- // r22 > r23: note that for r22 == r23 the sthi.q would clobber
- // bytes before the destination region.
- stlo.q r2, 0, r4
- SHHI r4, r7, r4
- sthi.q r0, -1, r4
- beqi/l r6, 0, tr0 // loop
-
- add r5, r63, r4
- addi r0, 8, r0
-shortstring:
-#if __BYTE_ORDER != __LITTLE_ENDIAN
- pta/l shortstring2,tr1
- byterev r4,r4
-#endif
-shortstring2:
- st.b r0,-8,r4
- andi r4,0xff,r5
- shlri r4,8,r4
- addi r0,1,r0
- bnei/l r5,0,tr1
- blink tr4,r63 // return
-
- .balign 8
-loop:
- stlo.q r0, 0, r5
- ldx.q r0, r20, r4
- addi r0, 16, r0
- sthi.q r0, -9, r5
- mcmpeq.b r4, r63, r6
- bnei/u r6, 0, tr1 // shortstring
- ldx.q r0, r21, r5
- stlo.q r0, -8, r4
- sthi.q r0, -1, r4
- mcmpeq.b r5, r63, r6
- beqi/l r6, 0, tr0 // loop
-
- add r5, r63, r4
- addi r0, 8, r0
- blink tr1, r63 // shortstring
-
- .size strcpy,.-strcpy
diff --git a/arch/sh/lib64/strlen.S b/arch/sh/lib64/strlen.S
deleted file mode 100644
index c00b972f9999..000000000000
--- a/arch/sh/lib64/strlen.S
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Simplistic strlen() implementation for SHmedia.
- *
- * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
- */
-
- .section .text..SHmedia32,"ax"
- .globl strlen
- .type strlen,@function
-
- .balign 16
-strlen:
- ptabs r18, tr4
-
- /*
- * Note: We could easily deal with the NULL case here with a simple
- * sanity check, though it seems that the behavior we want is to fault
- * in the event that r2 == NULL, so we don't bother.
- */
-/* beqi r2, 0, tr4 */ ! Sanity check
-
- movi -1, r0
- pta/l loop, tr0
-loop:
- ld.b r2, 0, r1
- addi r2, 1, r2
- addi r0, 1, r0
- bnei/l r1, 0, tr0
-
- or r0, r63, r2
- blink tr4, r63
-
- .size strlen,.-strlen
diff --git a/arch/sh/lib64/udelay.c b/arch/sh/lib64/udelay.c
deleted file mode 100644
index f215b063da70..000000000000
--- a/arch/sh/lib64/udelay.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * arch/sh/lib64/udelay.c
- *
- * Delay routines, using a pre-computed "loops_per_jiffy" value.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/sched.h>
-#include <asm/param.h>
-
-/*
- * Use only for very small delays (< 1 msec).
- *
- * The active part of our cycle counter is only 32-bits wide, and
- * we're treating the difference between two marks as signed. On
- * a 1GHz box, that's about 2 seconds.
- */
-
-void __delay(unsigned long loops)
-{
- long long dummy;
- __asm__ __volatile__("gettr tr0, %1\n\t"
- "pta $+4, tr0\n\t"
- "addi %0, -1, %0\n\t"
- "bne %0, r63, tr0\n\t"
- "ptabs %1, tr0\n\t":"=r"(loops),
- "=r"(dummy)
- :"0"(loops));
-}
-
-void __const_udelay(unsigned long xloops)
-{
- __delay(xloops * (HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy));
-}
-
-void __udelay(unsigned long usecs)
-{
- __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
-}
-
-void __ndelay(unsigned long nsecs)
-{
- __const_udelay(nsecs * 0x00000005);
-}
diff --git a/arch/sh/lib64/udivdi3.S b/arch/sh/lib64/udivdi3.S
deleted file mode 100644
index c032cb157589..000000000000
--- a/arch/sh/lib64/udivdi3.S
+++ /dev/null
@@ -1,121 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
- .section .text..SHmedia32,"ax"
- .align 2
- .global __udivdi3
-__udivdi3:
- shlri r3,1,r4
- nsb r4,r22
- shlld r3,r22,r6
- shlri r6,49,r5
- movi 0xffffffffffffbaf1,r21 /* .l shift count 17. */
- sub r21,r5,r1
- mmulfx.w r1,r1,r4
- mshflo.w r1,r63,r1
- sub r63,r22,r20 // r63 == 64 % 64
- mmulfx.w r5,r4,r4
- pta large_divisor,tr0
- addi r20,32,r9
- msub.w r1,r4,r1
- madd.w r1,r1,r1
- mmulfx.w r1,r1,r4
- shlri r6,32,r7
- bgt/u r9,r63,tr0 // large_divisor
- mmulfx.w r5,r4,r4
- shlri r2,32+14,r19
- addi r22,-31,r0
- msub.w r1,r4,r1
-
- mulu.l r1,r7,r4
- addi r1,-3,r5
- mulu.l r5,r19,r5
- sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
- shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
- the case may be, %0000000000000000 000.11111111111, still */
- muls.l r1,r4,r4 /* leaving at least one sign bit. */
- mulu.l r5,r3,r8
- mshalds.l r1,r21,r1
- shari r4,26,r4
- shlld r8,r0,r8
- add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
- sub r2,r8,r2
- /* Can do second step of 64 : 32 div now, using r1 and the rest in r2. */
-
- shlri r2,22,r21
- mulu.l r21,r1,r21
- shlld r5,r0,r8
- addi r20,30-22,r0
- shlrd r21,r0,r21
- mulu.l r21,r3,r5
- add r8,r21,r8
- mcmpgt.l r21,r63,r21 // See Note 1
- addi r20,30,r0
- mshfhi.l r63,r21,r21
- sub r2,r5,r2
- andc r2,r21,r2
-
- /* small divisor: need a third divide step */
- mulu.l r2,r1,r7
- ptabs r18,tr0
- addi r2,1,r2
- shlrd r7,r0,r7
- mulu.l r7,r3,r5
- add r8,r7,r8
- sub r2,r3,r2
- cmpgt r2,r5,r5
- add r8,r5,r2
- /* could test r3 here to check for divide by zero. */
- blink tr0,r63
-
-large_divisor:
- mmulfx.w r5,r4,r4
- shlrd r2,r9,r25
- shlri r25,32,r8
- msub.w r1,r4,r1
-
- mulu.l r1,r7,r4
- addi r1,-3,r5
- mulu.l r5,r8,r5
- sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
- shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
- the case may be, %0000000000000000 000.11111111111, still */
- muls.l r1,r4,r4 /* leaving at least one sign bit. */
- shlri r5,14-1,r8
- mulu.l r8,r7,r5
- mshalds.l r1,r21,r1
- shari r4,26,r4
- add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
- sub r25,r5,r25
- /* Can do second step of 64 : 32 div now, using r1 and the rest in r25. */
-
- shlri r25,22,r21
- mulu.l r21,r1,r21
- pta no_lo_adj,tr0
- addi r22,32,r0
- shlri r21,40,r21
- mulu.l r21,r7,r5
- add r8,r21,r8
- shlld r2,r0,r2
- sub r25,r5,r25
- bgtu/u r7,r25,tr0 // no_lo_adj
- addi r8,1,r8
- sub r25,r7,r25
-no_lo_adj:
- mextr4 r2,r25,r2
-
- /* large_divisor: only needs a few adjustments. */
- mulu.l r8,r6,r5
- ptabs r18,tr0
- /* bubble */
- cmpgtu r5,r2,r5
- sub r8,r5,r2
- blink tr0,r63
-
-/* Note 1: To shift the result of the second divide stage so that the result
- always fits into 32 bits, yet we still reduce the rest sufficiently
- would require a lot of instructions to do the shifts just right. Using
- the full 64 bit shift result to multiply with the divisor would require
- four extra instructions for the upper 32 bits (shift / mulu / shift / sub).
- Fortunately, if the upper 32 bits of the shift result are nonzero, we
- know that the rest after taking this partial result into account will
- fit into 32 bits. So we just clear the upper 32 bits of the rest if the
- upper 32 bits of the partial result are nonzero. */
diff --git a/arch/sh/lib64/udivsi3.S b/arch/sh/lib64/udivsi3.S
deleted file mode 100644
index e4788fb4fe82..000000000000
--- a/arch/sh/lib64/udivsi3.S
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
- .global __udivsi3
- .section .text..SHmedia32,"ax"
- .align 2
-
-/*
- inputs: r4,r5
- clobbered: r18,r19,r20,r21,r22,r25,tr0
- result in r0.
- */
-__udivsi3:
- addz.l r5,r63,r22
- nsb r22,r0
- shlld r22,r0,r25
- shlri r25,48,r25
- movi 0xffffffffffffbb0c,r20 /* shift count eqiv 76 */
- sub r20,r25,r21
- mmulfx.w r21,r21,r19
- mshflo.w r21,r63,r21
- ptabs r18,tr0
- mmulfx.w r25,r19,r19
- sub r20,r0,r0
- /* bubble */
- msub.w r21,r19,r19
-
- /*
- * It would be nice for scheduling to do this add to r21 before
- * the msub.w, but we need a different value for r19 to keep
- * errors under control.
- */
- addi r19,-2,r21
- mulu.l r4,r21,r18
- mmulfx.w r19,r19,r19
- shlli r21,15,r21
- shlrd r18,r0,r18
- mulu.l r18,r22,r20
- mmacnfx.wl r25,r19,r21
- /* bubble */
- sub r4,r20,r25
-
- mulu.l r25,r21,r19
- addi r0,14,r0
- /* bubble */
- shlrd r19,r0,r19
- mulu.l r19,r22,r20
- add r18,r19,r18
- /* bubble */
- sub.l r25,r20,r25
-
- mulu.l r25,r21,r19
- addz.l r25,r63,r25
- sub r25,r22,r25
- shlrd r19,r0,r19
- mulu.l r19,r22,r20
- addi r25,1,r25
- add r18,r19,r18
-
- cmpgt r25,r20,r25
- add.l r18,r25,r0
- blink tr0,r63
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 5c8a2ebfc720..6c39d24ad919 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -15,8 +15,7 @@ config MMU
config PAGE_OFFSET
hex
- default "0x80000000" if MMU && SUPERH32
- default "0x20000000" if MMU && SUPERH64
+ default "0x80000000" if MMU
default "0x00000000"
config FORCE_MAX_ZONEORDER
@@ -72,12 +71,11 @@ config MEMORY_SIZE
config 29BIT
def_bool !32BIT
- depends on SUPERH32
select UNCACHED_MAPPING
config 32BIT
bool
- default y if CPU_SH5 || !MMU
+ default !MMU
config PMB
bool "Support 32-bit physical addressing through PMB"
@@ -152,7 +150,7 @@ config ARCH_MEMORY_PROBE
config IOREMAP_FIXED
def_bool y
- depends on X2TLB || SUPERH64
+ depends on X2TLB
config UNCACHED_MAPPING
bool
@@ -184,7 +182,7 @@ config PAGE_SIZE_16KB
config PAGE_SIZE_64KB
bool "64kB"
- depends on !MMU || CPU_SH4 || CPU_SH5
+ depends on !MMU || CPU_SH4
help
This enables support for 64kB pages, possible on all SH-4
CPUs and later.
@@ -216,10 +214,6 @@ config HUGETLB_PAGE_SIZE_64MB
bool "64MB"
depends on X2TLB
-config HUGETLB_PAGE_SIZE_512MB
- bool "512MB"
- depends on CPU_SH5
-
endchoice
config SCHED_MC
@@ -242,7 +236,7 @@ config SH7705_CACHE_32KB
choice
prompt "Cache mode"
- default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4 || CPU_SH5
+ default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4
default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A)
config CACHE_WRITEBACK
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 5051b38fd5b6..487da0ff03b3 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -10,15 +10,14 @@ cacheops-$(CONFIG_CPU_SUBTYPE_SH7619) := cache-sh2.o
cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o
cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o
cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o
-cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o
cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
obj-y += $(cacheops-y)
mmu-y := nommu.o extable_32.o
-mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o ioremap.o kmap.o \
- pgtable.o tlbex_$(BITS).o tlbflush_$(BITS).o
+mmu-$(CONFIG_MMU) := extable_32.o fault.o ioremap.o kmap.o \
+ pgtable.o tlbex_32.o tlbflush_32.o
obj-y += $(mmu-y)
@@ -31,7 +30,6 @@ ifdef CONFIG_MMU
debugfs-$(CONFIG_CPU_SH4) += tlb-debugfs.o
tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o
-tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o
tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o
obj-y += $(tlb-y)
endif
@@ -46,29 +44,4 @@ obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
GCOV_PROFILE_pmb.o := n
-# Special flags for tlbex_64.o. This puts restrictions on the number of
-# caller-save registers that the compiler can target when building this file.
-# This is required because the code is called from a context in entry.S where
-# very few registers have been saved in the exception handler (for speed
-# reasons).
-# The caller save registers that have been saved and which can be used are
-# r2,r3,r4,r5 : argument passing
-# r15, r18 : SP and LINK
-# tr0-4 : allow all caller-save TR's. The compiler seems to be able to make
-# use of them, so it's probably beneficial to performance to save them
-# and have them available for it.
-#
-# The resources not listed below are callee save, i.e. the compiler is free to
-# use any of them and will spill them to the stack itself.
-
-CFLAGS_tlbex_64.o += -ffixed-r7 \
- -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \
- -ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \
- -ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \
- -ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \
- -ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \
- -ffixed-r41 -ffixed-r42 -ffixed-r43 \
- -ffixed-r60 -ffixed-r61 -ffixed-r62 \
- -fomit-frame-pointer
-
ccflags-y := -Werror
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c
index 8172a171d727..26f3bd43e850 100644
--- a/arch/sh/mm/cache-sh3.c
+++ b/arch/sh/mm/cache-sh3.c
@@ -12,7 +12,6 @@
#include <linux/threads.h>
#include <asm/addrspace.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index eee911422cf9..ddfa9685f1ef 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -16,7 +16,6 @@
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/highmem.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/cache_insns.h>
#include <asm/cacheflush.h>
@@ -183,7 +182,7 @@ static void sh4_flush_cache_all(void *unused)
* accessed with (hence cache set) is in accord with the physical
* address (i.e. tag). It's no different here.
*
- * Caller takes mm->mmap_sem.
+ * Caller takes mm->mmap_lock.
*/
static void sh4_flush_cache_mm(void *arg)
{
@@ -208,8 +207,6 @@ static void sh4_flush_cache_page(void *args)
struct page *page;
unsigned long address, pfn, phys;
int map_coherent = 0;
- pgd_t *pgd;
- pud_t *pud;
pmd_t *pmd;
pte_t *pte;
void *vaddr;
@@ -223,9 +220,7 @@ static void sh4_flush_cache_page(void *args)
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
return;
- pgd = pgd_offset(vma->vm_mm, address);
- pud = pud_offset(pgd, address);
- pmd = pmd_offset(pud, address);
+ pmd = pmd_off(vma->vm_mm, address);
pte = pte_offset_kernel(pmd, address);
/* If the page isn't present, there is nothing to do here. */
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
deleted file mode 100644
index 445b5e69b73c..000000000000
--- a/arch/sh/mm/cache-sh5.c
+++ /dev/null
@@ -1,621 +0,0 @@
-/*
- * arch/sh/mm/cache-sh5.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2002 Benedict Gaster
- * Copyright (C) 2003 Richard Curnow
- * Copyright (C) 2003 - 2008 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <asm/tlb.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/pgalloc.h>
-#include <linux/uaccess.h>
-#include <asm/mmu_context.h>
-
-extern void __weak sh4__flush_region_init(void);
-
-/* Wired TLB entry for the D-cache */
-static unsigned long long dtlb_cache_slot;
-
-/*
- * The following group of functions deal with mapping and unmapping a
- * temporary page into a DTLB slot that has been set aside for exclusive
- * use.
- */
-static inline void
-sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
- unsigned long paddr)
-{
- local_irq_disable();
- sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
-}
-
-static inline void sh64_teardown_dtlb_cache_slot(void)
-{
- sh64_teardown_tlb_slot(dtlb_cache_slot);
- local_irq_enable();
-}
-
-static inline void sh64_icache_inv_all(void)
-{
- unsigned long long addr, flag, data;
- unsigned long flags;
-
- addr = ICCR0;
- flag = ICCR0_ICI;
- data = 0;
-
- /* Make this a critical section for safety (probably not strictly necessary.) */
- local_irq_save(flags);
-
- /* Without %1 it gets unexplicably wrong */
- __asm__ __volatile__ (
- "getcfg %3, 0, %0\n\t"
- "or %0, %2, %0\n\t"
- "putcfg %3, 0, %0\n\t"
- "synci"
- : "=&r" (data)
- : "0" (data), "r" (flag), "r" (addr));
-
- local_irq_restore(flags);
-}
-
-static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
-{
- /* Invalidate range of addresses [start,end] from the I-cache, where
- * the addresses lie in the kernel superpage. */
-
- unsigned long long ullend, addr, aligned_start;
- aligned_start = (unsigned long long)(signed long long)(signed long) start;
- addr = L1_CACHE_ALIGN(aligned_start);
- ullend = (unsigned long long) (signed long long) (signed long) end;
-
- while (addr <= ullend) {
- __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
- addr += L1_CACHE_BYTES;
- }
-}
-
-static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
-{
- /* If we get called, we know that vma->vm_flags contains VM_EXEC.
- Also, eaddr is page-aligned. */
- unsigned int cpu = smp_processor_id();
- unsigned long long addr, end_addr;
- unsigned long flags = 0;
- unsigned long running_asid, vma_asid;
- addr = eaddr;
- end_addr = addr + PAGE_SIZE;
-
- /* Check whether we can use the current ASID for the I-cache
- invalidation. For example, if we're called via
- access_process_vm->flush_cache_page->here, (e.g. when reading from
- /proc), 'running_asid' will be that of the reader, not of the
- victim.
-
- Also, note the risk that we might get pre-empted between the ASID
- compare and blocking IRQs, and before we regain control, the
- pid->ASID mapping changes. However, the whole cache will get
- invalidated when the mapping is renewed, so the worst that can
- happen is that the loop below ends up invalidating somebody else's
- cache entries.
- */
-
- running_asid = get_asid();
- vma_asid = cpu_asid(cpu, vma->vm_mm);
- if (running_asid != vma_asid) {
- local_irq_save(flags);
- switch_and_save_asid(vma_asid);
- }
- while (addr < end_addr) {
- /* Worth unrolling a little */
- __asm__ __volatile__("icbi %0, 0" : : "r" (addr));
- __asm__ __volatile__("icbi %0, 32" : : "r" (addr));
- __asm__ __volatile__("icbi %0, 64" : : "r" (addr));
- __asm__ __volatile__("icbi %0, 96" : : "r" (addr));
- addr += 128;
- }
- if (running_asid != vma_asid) {
- switch_and_save_asid(running_asid);
- local_irq_restore(flags);
- }
-}
-
-static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- /* Used for invalidating big chunks of I-cache, i.e. assume the range
- is whole pages. If 'start' or 'end' is not page aligned, the code
- is conservative and invalidates to the ends of the enclosing pages.
- This is functionally OK, just a performance loss. */
-
- /* See the comments below in sh64_dcache_purge_user_range() regarding
- the choice of algorithm. However, for the I-cache option (2) isn't
- available because there are no physical tags so aliases can't be
- resolved. The icbi instruction has to be used through the user
- mapping. Because icbi is cheaper than ocbp on a cache hit, it
- would be cheaper to use the selective code for a large range than is
- possible with the D-cache. Just assume 64 for now as a working
- figure.
- */
- int n_pages;
-
- if (!mm)
- return;
-
- n_pages = ((end - start) >> PAGE_SHIFT);
- if (n_pages >= 64) {
- sh64_icache_inv_all();
- } else {
- unsigned long aligned_start;
- unsigned long eaddr;
- unsigned long after_last_page_start;
- unsigned long mm_asid, current_asid;
- unsigned long flags = 0;
-
- mm_asid = cpu_asid(smp_processor_id(), mm);
- current_asid = get_asid();
-
- if (mm_asid != current_asid) {
- /* Switch ASID and run the invalidate loop under cli */
- local_irq_save(flags);
- switch_and_save_asid(mm_asid);
- }
-
- aligned_start = start & PAGE_MASK;
- after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
-
- while (aligned_start < after_last_page_start) {
- struct vm_area_struct *vma;
- unsigned long vma_end;
- vma = find_vma(mm, aligned_start);
- if (!vma || (aligned_start <= vma->vm_end)) {
- /* Avoid getting stuck in an error condition */
- aligned_start += PAGE_SIZE;
- continue;
- }
- vma_end = vma->vm_end;
- if (vma->vm_flags & VM_EXEC) {
- /* Executable */
- eaddr = aligned_start;
- while (eaddr < vma_end) {
- sh64_icache_inv_user_page(vma, eaddr);
- eaddr += PAGE_SIZE;
- }
- }
- aligned_start = vma->vm_end; /* Skip to start of next region */
- }
-
- if (mm_asid != current_asid) {
- switch_and_save_asid(current_asid);
- local_irq_restore(flags);
- }
- }
-}
-
-static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
-{
- /* The icbi instruction never raises ITLBMISS. i.e. if there's not a
- cache hit on the virtual tag the instruction ends there, without a
- TLB lookup. */
-
- unsigned long long aligned_start;
- unsigned long long ull_end;
- unsigned long long addr;
-
- ull_end = end;
-
- /* Just invalidate over the range using the natural addresses. TLB
- miss handling will be OK (TBC). Since it's for the current process,
- either we're already in the right ASID context, or the ASIDs have
- been recycled since we were last active in which case we might just
- invalidate another processes I-cache entries : no worries, just a
- performance drop for him. */
- aligned_start = L1_CACHE_ALIGN(start);
- addr = aligned_start;
- while (addr < ull_end) {
- __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- addr += L1_CACHE_BYTES;
- }
-}
-
-/* Buffer used as the target of alloco instructions to purge data from cache
- sets by natural eviction. -- RPC */
-#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
-static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
-
-static inline void sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
-{
- /* Purge all ways in a particular block of sets, specified by the base
- set number and number of sets. Can handle wrap-around, if that's
- needed. */
-
- int dummy_buffer_base_set;
- unsigned long long eaddr, eaddr0, eaddr1;
- int j;
- int set_offset;
-
- dummy_buffer_base_set = ((int)&dummy_alloco_area &
- cpu_data->dcache.entry_mask) >>
- cpu_data->dcache.entry_shift;
- set_offset = sets_to_purge_base - dummy_buffer_base_set;
-
- for (j = 0; j < n_sets; j++, set_offset++) {
- set_offset &= (cpu_data->dcache.sets - 1);
- eaddr0 = (unsigned long long)dummy_alloco_area +
- (set_offset << cpu_data->dcache.entry_shift);
-
- /*
- * Do one alloco which hits the required set per cache
- * way. For write-back mode, this will purge the #ways
- * resident lines. There's little point unrolling this
- * loop because the allocos stall more if they're too
- * close together.
- */
- eaddr1 = eaddr0 + cpu_data->dcache.way_size *
- cpu_data->dcache.ways;
-
- for (eaddr = eaddr0; eaddr < eaddr1;
- eaddr += cpu_data->dcache.way_size) {
- __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
- __asm__ __volatile__ ("synco"); /* TAKum03020 */
- }
-
- eaddr1 = eaddr0 + cpu_data->dcache.way_size *
- cpu_data->dcache.ways;
-
- for (eaddr = eaddr0; eaddr < eaddr1;
- eaddr += cpu_data->dcache.way_size) {
- /*
- * Load from each address. Required because
- * alloco is a NOP if the cache is write-through.
- */
- if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
- __raw_readb((unsigned long)eaddr);
- }
- }
-
- /*
- * Don't use OCBI to invalidate the lines. That costs cycles
- * directly. If the dummy block is just left resident, it will
- * naturally get evicted as required.
- */
-}
-
-/*
- * Purge the entire contents of the dcache. The most efficient way to
- * achieve this is to use alloco instructions on a region of unused
- * memory equal in size to the cache, thereby causing the current
- * contents to be discarded by natural eviction. The alternative, namely
- * reading every tag, setting up a mapping for the corresponding page and
- * doing an OCBP for the line, would be much more expensive.
- */
-static void sh64_dcache_purge_all(void)
-{
-
- sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
-}
-
-
-/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
- anything else in the kernel */
-#define MAGIC_PAGE0_START 0xffffffffec000000ULL
-
-/* Purge the physical page 'paddr' from the cache. It's known that any
- * cache lines requiring attention have the same page colour as the the
- * address 'eaddr'.
- *
- * This relies on the fact that the D-cache matches on physical tags when
- * no virtual tag matches. So we create an alias for the original page
- * and purge through that. (Alternatively, we could have done this by
- * switching ASID to match the original mapping and purged through that,
- * but that involves ASID switching cost + probably a TLBMISS + refill
- * anyway.)
- */
-static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
- unsigned long eaddr)
-{
- unsigned long long magic_page_start;
- unsigned long long magic_eaddr, magic_eaddr_end;
-
- magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
-
- /* As long as the kernel is not pre-emptible, this doesn't need to be
- under cli/sti. */
- sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
-
- magic_eaddr = magic_page_start;
- magic_eaddr_end = magic_eaddr + PAGE_SIZE;
-
- while (magic_eaddr < magic_eaddr_end) {
- /* Little point in unrolling this loop - the OCBPs are blocking
- and won't go any quicker (i.e. the loop overhead is parallel
- to part of the OCBP execution.) */
- __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
- magic_eaddr += L1_CACHE_BYTES;
- }
-
- sh64_teardown_dtlb_cache_slot();
-}
-
-/*
- * Purge a page given its physical start address, by creating a temporary
- * 1 page mapping and purging across that. Even if we know the virtual
- * address (& vma or mm) of the page, the method here is more elegant
- * because it avoids issues of coping with page faults on the purge
- * instructions (i.e. no special-case code required in the critical path
- * in the TLB miss handling).
- */
-static void sh64_dcache_purge_phy_page(unsigned long paddr)
-{
- unsigned long long eaddr_start, eaddr, eaddr_end;
- int i;
-
- /* As long as the kernel is not pre-emptible, this doesn't need to be
- under cli/sti. */
- eaddr_start = MAGIC_PAGE0_START;
- for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
- sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
-
- eaddr = eaddr_start;
- eaddr_end = eaddr + PAGE_SIZE;
- while (eaddr < eaddr_end) {
- __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
- eaddr += L1_CACHE_BYTES;
- }
-
- sh64_teardown_dtlb_cache_slot();
- eaddr_start += PAGE_SIZE;
- }
-}
-
-static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
- unsigned long addr, unsigned long end)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t entry;
- spinlock_t *ptl;
- unsigned long paddr;
-
- if (!mm)
- return; /* No way to find physical address of page */
-
- pgd = pgd_offset(mm, addr);
- if (pgd_bad(*pgd))
- return;
-
- pud = pud_offset(pgd, addr);
- if (pud_none(*pud) || pud_bad(*pud))
- return;
-
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd) || pmd_bad(*pmd))
- return;
-
- pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
- do {
- entry = *pte;
- if (pte_none(entry) || !pte_present(entry))
- continue;
- paddr = pte_val(entry) & PAGE_MASK;
- sh64_dcache_purge_coloured_phy_page(paddr, addr);
- } while (pte++, addr += PAGE_SIZE, addr != end);
- pte_unmap_unlock(pte - 1, ptl);
-}
-
-/*
- * There are at least 5 choices for the implementation of this, with
- * pros (+), cons(-), comments(*):
- *
- * 1. ocbp each line in the range through the original user's ASID
- * + no lines spuriously evicted
- * - tlbmiss handling (must either handle faults on demand => extra
- * special-case code in tlbmiss critical path), or map the page in
- * advance (=> flush_tlb_range in advance to avoid multiple hits)
- * - ASID switching
- * - expensive for large ranges
- *
- * 2. temporarily map each page in the range to a special effective
- * address and ocbp through the temporary mapping; relies on the
- * fact that SH-5 OCB* always do TLB lookup and match on ptags (they
- * never look at the etags)
- * + no spurious evictions
- * - expensive for large ranges
- * * surely cheaper than (1)
- *
- * 3. walk all the lines in the cache, check the tags, if a match
- * occurs create a page mapping to ocbp the line through
- * + no spurious evictions
- * - tag inspection overhead
- * - (especially for small ranges)
- * - potential cost of setting up/tearing down page mapping for
- * every line that matches the range
- * * cost partly independent of range size
- *
- * 4. walk all the lines in the cache, check the tags, if a match
- * occurs use 4 * alloco to purge the line (+3 other probably
- * innocent victims) by natural eviction
- * + no tlb mapping overheads
- * - spurious evictions
- * - tag inspection overhead
- *
- * 5. implement like flush_cache_all
- * + no tag inspection overhead
- * - spurious evictions
- * - bad for small ranges
- *
- * (1) can be ruled out as more expensive than (2). (2) appears best
- * for small ranges. The choice between (3), (4) and (5) for large
- * ranges and the range size for the large/small boundary need
- * benchmarking to determine.
- *
- * For now use approach (2) for small ranges and (5) for large ones.
- */
-static void sh64_dcache_purge_user_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- int n_pages = ((end - start) >> PAGE_SHIFT);
-
- if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
- sh64_dcache_purge_all();
- } else {
- /* Small range, covered by a single page table page */
- start &= PAGE_MASK; /* should already be so */
- end = PAGE_ALIGN(end); /* should already be so */
- sh64_dcache_purge_user_pages(mm, start, end);
- }
-}
-
-/*
- * Invalidate the entire contents of both caches, after writing back to
- * memory any dirty data from the D-cache.
- */
-static void sh5_flush_cache_all(void *unused)
-{
- sh64_dcache_purge_all();
- sh64_icache_inv_all();
-}
-
-/*
- * Invalidate an entire user-address space from both caches, after
- * writing back dirty data (e.g. for shared mmap etc).
- *
- * This could be coded selectively by inspecting all the tags then
- * doing 4*alloco on any set containing a match (as for
- * flush_cache_range), but fork/exit/execve (where this is called from)
- * are expensive anyway.
- *
- * Have to do a purge here, despite the comments re I-cache below.
- * There could be odd-coloured dirty data associated with the mm still
- * in the cache - if this gets written out through natural eviction
- * after the kernel has reused the page there will be chaos.
- *
- * The mm being torn down won't ever be active again, so any Icache
- * lines tagged with its ASID won't be visible for the rest of the
- * lifetime of this ASID cycle. Before the ASID gets reused, there
- * will be a flush_cache_all. Hence we don't need to touch the
- * I-cache. This is similar to the lack of action needed in
- * flush_tlb_mm - see fault.c.
- */
-static void sh5_flush_cache_mm(void *unused)
-{
- sh64_dcache_purge_all();
-}
-
-/*
- * Invalidate (from both caches) the range [start,end) of virtual
- * addresses from the user address space specified by mm, after writing
- * back any dirty data.
- *
- * Note, 'end' is 1 byte beyond the end of the range to flush.
- */
-static void sh5_flush_cache_range(void *args)
-{
- struct flusher_data *data = args;
- struct vm_area_struct *vma;
- unsigned long start, end;
-
- vma = data->vma;
- start = data->addr1;
- end = data->addr2;
-
- sh64_dcache_purge_user_range(vma->vm_mm, start, end);
- sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
-}
-
-/*
- * Invalidate any entries in either cache for the vma within the user
- * address space vma->vm_mm for the page starting at virtual address
- * 'eaddr'. This seems to be used primarily in breaking COW. Note,
- * the I-cache must be searched too in case the page in question is
- * both writable and being executed from (e.g. stack trampolines.)
- *
- * Note, this is called with pte lock held.
- */
-static void sh5_flush_cache_page(void *args)
-{
- struct flusher_data *data = args;
- struct vm_area_struct *vma;
- unsigned long eaddr, pfn;
-
- vma = data->vma;
- eaddr = data->addr1;
- pfn = data->addr2;
-
- sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
-
- if (vma->vm_flags & VM_EXEC)
- sh64_icache_inv_user_page(vma, eaddr);
-}
-
-static void sh5_flush_dcache_page(void *page)
-{
- sh64_dcache_purge_phy_page(page_to_phys((struct page *)page));
- wmb();
-}
-
-/*
- * Flush the range [start,end] of kernel virtual address space from
- * the I-cache. The corresponding range must be purged from the
- * D-cache also because the SH-5 doesn't have cache snooping between
- * the caches. The addresses will be visible through the superpage
- * mapping, therefore it's guaranteed that there no cache entries for
- * the range in cache sets of the wrong colour.
- */
-static void sh5_flush_icache_range(void *args)
-{
- struct flusher_data *data = args;
- unsigned long start, end;
-
- start = data->addr1;
- end = data->addr2;
-
- __flush_purge_region((void *)start, end);
- wmb();
- sh64_icache_inv_kernel_range(start, end);
-}
-
-/*
- * For the address range [start,end), write back the data from the
- * D-cache and invalidate the corresponding region of the I-cache for the
- * current process. Used to flush signal trampolines on the stack to
- * make them executable.
- */
-static void sh5_flush_cache_sigtramp(void *vaddr)
-{
- unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES;
-
- __flush_wback_region(vaddr, L1_CACHE_BYTES);
- wmb();
- sh64_icache_inv_current_user_range((unsigned long)vaddr, end);
-}
-
-void __init sh5_cache_init(void)
-{
- local_flush_cache_all = sh5_flush_cache_all;
- local_flush_cache_mm = sh5_flush_cache_mm;
- local_flush_cache_dup_mm = sh5_flush_cache_mm;
- local_flush_cache_page = sh5_flush_cache_page;
- local_flush_cache_range = sh5_flush_cache_range;
- local_flush_dcache_page = sh5_flush_dcache_page;
- local_flush_icache_range = sh5_flush_icache_range;
- local_flush_cache_sigtramp = sh5_flush_cache_sigtramp;
-
- /* Reserve a slot for dcache colouring in the DTLB */
- dtlb_cache_slot = sh64_get_wired_dtlb_entry();
-
- sh4__flush_region_init();
-}
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index ed25eba80667..48978293226c 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -16,7 +16,6 @@
#include <linux/threads.h>
#include <asm/addrspace.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 464f160a9576..3aef78ceb820 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -355,12 +355,6 @@ void __init cpu_cache_init(void)
}
}
- if (boot_cpu_data.family == CPU_FAMILY_SH5) {
- extern void __weak sh5_cache_init(void);
-
- sh5_cache_init();
- }
-
skip:
emit_cache_params();
}
diff --git a/arch/sh/mm/extable_64.c b/arch/sh/mm/extable_64.c
deleted file mode 100644
index 7a3b4d33d2e7..000000000000
--- a/arch/sh/mm/extable_64.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * arch/sh/mm/extable_64.c
- *
- * Copyright (C) 2003 Richard Curnow
- * Copyright (C) 2003, 2004 Paul Mundt
- *
- * Cloned from the 2.5 SH version..
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/bsearch.h>
-#include <linux/rwsem.h>
-#include <linux/extable.h>
-#include <linux/uaccess.h>
-
-extern unsigned long copy_user_memcpy, copy_user_memcpy_end;
-extern void __copy_user_fixup(void);
-
-static const struct exception_table_entry __copy_user_fixup_ex = {
- .fixup = (unsigned long)&__copy_user_fixup,
-};
-
-/*
- * Some functions that may trap due to a bad user-mode address have too
- * many loads and stores in them to make it at all practical to label
- * each one and put them all in the main exception table.
- *
- * In particular, the fast memcpy routine is like this. It's fix-up is
- * just to fall back to a slow byte-at-a-time copy, which is handled the
- * conventional way. So it's functionally OK to just handle any trap
- * occurring in the fast memcpy with that fixup.
- */
-static const struct exception_table_entry *check_exception_ranges(unsigned long addr)
-{
- if ((addr >= (unsigned long)&copy_user_memcpy) &&
- (addr <= (unsigned long)&copy_user_memcpy_end))
- return &__copy_user_fixup_ex;
-
- return NULL;
-}
-
-static int cmp_ex_search(const void *key, const void *elt)
-{
- const struct exception_table_entry *_elt = elt;
- unsigned long _key = *(unsigned long *)key;
-
- /* avoid overflow */
- if (_key > _elt->insn)
- return 1;
- if (_key < _elt->insn)
- return -1;
- return 0;
-}
-
-/* Simple binary search */
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *base,
- const size_t num,
- unsigned long value)
-{
- const struct exception_table_entry *mid;
-
- mid = check_exception_ranges(value);
- if (mid)
- return mid;
-
- return bsearch(&value, base, num,
- sizeof(struct exception_table_entry), cmp_ex_search);
-}
-
-int fixup_exception(struct pt_regs *regs)
-{
- const struct exception_table_entry *fixup;
-
- fixup = search_exception_tables(regs->pc);
- if (fixup) {
- regs->pc = fixup->fixup;
- return 1;
- }
-
- return 0;
-}
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 5f23d7907597..fbe1f2fe9a8c 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -47,12 +47,13 @@ static void show_pte(struct mm_struct *mm, unsigned long addr)
pgd = swapper_pg_dir;
}
- printk(KERN_ALERT "pgd = %p\n", pgd);
+ pr_alert("pgd = %p\n", pgd);
pgd += pgd_index(addr);
- printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
- (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
+ pr_alert("[%08lx] *pgd=%0*llx", addr, (u32)(sizeof(*pgd) * 2),
+ (u64)pgd_val(*pgd));
do {
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -61,33 +62,46 @@ static void show_pte(struct mm_struct *mm, unsigned long addr)
break;
if (pgd_bad(*pgd)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (PTRS_PER_P4D != 1)
+ pr_cont(", *p4d=%0*Lx", (u32)(sizeof(*p4d) * 2),
+ (u64)p4d_val(*p4d));
+
+ if (p4d_none(*p4d))
+ break;
+
+ if (p4d_bad(*p4d)) {
+ pr_cont("(bad)");
+ break;
+ }
+
+ pud = pud_offset(p4d, addr);
if (PTRS_PER_PUD != 1)
- printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
- (u64)pud_val(*pud));
+ pr_cont(", *pud=%0*llx", (u32)(sizeof(*pud) * 2),
+ (u64)pud_val(*pud));
if (pud_none(*pud))
break;
if (pud_bad(*pud)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
pmd = pmd_offset(pud, addr);
if (PTRS_PER_PMD != 1)
- printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
- (u64)pmd_val(*pmd));
+ pr_cont(", *pmd=%0*llx", (u32)(sizeof(*pmd) * 2),
+ (u64)pmd_val(*pmd));
if (pmd_none(*pmd))
break;
if (pmd_bad(*pmd)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
@@ -96,17 +110,18 @@ static void show_pte(struct mm_struct *mm, unsigned long addr)
break;
pte = pte_offset_kernel(pmd, addr);
- printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
- (u64)pte_val(*pte));
+ pr_cont(", *pte=%0*llx", (u32)(sizeof(*pte) * 2),
+ (u64)pte_val(*pte));
} while (0);
- printk("\n");
+ pr_cont("\n");
}
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
{
unsigned index = pgd_index(address);
pgd_t *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
@@ -116,8 +131,13 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
if (!pgd_present(*pgd_k))
return NULL;
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ return NULL;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
return NULL;
@@ -188,15 +208,13 @@ show_fault_oops(struct pt_regs *regs, unsigned long address)
if (!oops_may_print())
return;
- printk(KERN_ALERT "BUG: unable to handle kernel ");
- if (address < PAGE_SIZE)
- printk(KERN_CONT "NULL pointer dereference");
- else
- printk(KERN_CONT "paging request");
-
- printk(KERN_CONT " at %08lx\n", address);
printk(KERN_ALERT "PC:");
- printk_address(regs->pc, 1);
+ pr_alert("BUG: unable to handle kernel %s at %08lx\n",
+ address < PAGE_SIZE ? "NULL pointer dereference"
+ : "paging request",
+ address);
+ pr_alert("PC:");
+ printk_address(regs->pc, 1, KERN_ALERT);
show_pte(NULL, address);
}
@@ -261,7 +279,7 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
__bad_area_nosemaphore(regs, error_code, address, si_code);
}
@@ -285,7 +303,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs))
@@ -308,9 +326,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
return 1;
}
- /* Release mmap_sem first if necessary */
+ /* Release mmap_lock first if necessary */
if (!(fault & VM_FAULT_RETRY))
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (!(fault & VM_FAULT_ERROR))
return 0;
@@ -424,7 +442,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
}
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (unlikely(!vma)) {
@@ -484,7 +502,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -492,5 +510,5 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 960deb1f24a1..acd5652a0de3 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -26,17 +26,21 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
if (pgd) {
- pud = pud_alloc(mm, pgd, addr);
- if (pud) {
- pmd = pmd_alloc(mm, pud, addr);
- if (pmd)
- pte = pte_alloc_map(mm, pmd, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (p4d) {
+ pud = pud_alloc(mm, p4d, addr);
+ if (pud) {
+ pmd = pmd_alloc(mm, pud, addr);
+ if (pmd)
+ pte = pte_alloc_map(mm, pmd, addr);
+ }
}
}
@@ -47,17 +51,21 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
if (pgd) {
- pud = pud_offset(pgd, addr);
- if (pud) {
- pmd = pmd_offset(pud, addr);
- if (pmd)
- pte = pte_offset_map(pmd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d) {
+ pud = pud_offset(p4d, addr);
+ if (pud) {
+ pmd = pmd_offset(pud, addr);
+ if (pmd)
+ pte = pte_offset_map(pmd, addr);
+ }
}
}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 628f461b8993..a70ba0fdd0b3 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -45,6 +45,7 @@ void __init __weak plat_mem_setup(void)
static pte_t *__get_pte_phys(unsigned long addr)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@@ -54,7 +55,13 @@ static pte_t *__get_pte_phys(unsigned long addr)
return NULL;
}
- pud = pud_alloc(NULL, pgd, addr);
+ p4d = p4d_alloc(NULL, pgd, addr);
+ if (unlikely(!p4d)) {
+ p4d_ERROR(*p4d);
+ return NULL;
+ }
+
+ pud = pud_alloc(NULL, p4d, addr);
if (unlikely(!pud)) {
pud_ERROR(*pud);
return NULL;
@@ -172,9 +179,9 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
unsigned long vaddr;
vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pud_offset(vaddr);
- k = __pmd_offset(vaddr);
+ i = pgd_index(vaddr);
+ j = pud_index(vaddr);
+ k = pmd_index(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index 9e6b38b03cf7..73fd7cc99430 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -14,9 +14,6 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
-
static pte_t *kmap_coherent_pte;
void __init kmap_coherent_init(void)
@@ -25,7 +22,7 @@ void __init kmap_coherent_init(void)
/* cache the first coherent kmap pte */
vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
- kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
+ kmap_coherent_pte = virt_to_kpte(vaddr);
}
void *kmap_coherent(struct page *page, unsigned long addr)
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
index dca946f426c6..8b4504413c5f 100644
--- a/arch/sh/mm/nommu.c
+++ b/arch/sh/mm/nommu.c
@@ -10,7 +10,6 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/mm.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <linux/uaccess.h>
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index b59bad86b31e..b20aba6e1b37 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -23,10 +23,10 @@
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <linux/sizes.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
deleted file mode 100644
index e4bb2a8e0a69..000000000000
--- a/arch/sh/mm/tlb-sh5.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * arch/sh/mm/tlb-sh5.c
- *
- * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
- * Copyright (C) 2003 Richard Curnow <richard.curnow@superh.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include <asm/tlb.h>
-#include <asm/mmu_context.h>
-
-/**
- * sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
- */
-int sh64_tlb_init(void)
-{
- /* Assign some sane DTLB defaults */
- cpu_data->dtlb.entries = 64;
- cpu_data->dtlb.step = 0x10;
-
- cpu_data->dtlb.first = DTLB_FIXED | cpu_data->dtlb.step;
- cpu_data->dtlb.next = cpu_data->dtlb.first;
-
- cpu_data->dtlb.last = DTLB_FIXED |
- ((cpu_data->dtlb.entries - 1) *
- cpu_data->dtlb.step);
-
- /* And again for the ITLB */
- cpu_data->itlb.entries = 64;
- cpu_data->itlb.step = 0x10;
-
- cpu_data->itlb.first = ITLB_FIXED | cpu_data->itlb.step;
- cpu_data->itlb.next = cpu_data->itlb.first;
- cpu_data->itlb.last = ITLB_FIXED |
- ((cpu_data->itlb.entries - 1) *
- cpu_data->itlb.step);
-
- return 0;
-}
-
-/**
- * sh64_next_free_dtlb_entry - Find the next available DTLB entry
- */
-unsigned long long sh64_next_free_dtlb_entry(void)
-{
- return cpu_data->dtlb.next;
-}
-
-/**
- * sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB
- */
-unsigned long long sh64_get_wired_dtlb_entry(void)
-{
- unsigned long long entry = sh64_next_free_dtlb_entry();
-
- cpu_data->dtlb.first += cpu_data->dtlb.step;
- cpu_data->dtlb.next += cpu_data->dtlb.step;
-
- return entry;
-}
-
-/**
- * sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB.
- *
- * @entry: Address of TLB slot.
- *
- * Works like a stack, last one to allocate must be first one to free.
- */
-int sh64_put_wired_dtlb_entry(unsigned long long entry)
-{
- __flush_tlb_slot(entry);
-
- /*
- * We don't do any particularly useful tracking of wired entries,
- * so this approach works like a stack .. last one to be allocated
- * has to be the first one to be freed.
- *
- * We could potentially load wired entries into a list and work on
- * rebalancing the list periodically (which also entails moving the
- * contents of a TLB entry) .. though I have a feeling that this is
- * more trouble than it's worth.
- */
-
- /*
- * Entry must be valid .. we don't want any ITLB addresses!
- */
- if (entry <= DTLB_FIXED)
- return -EINVAL;
-
- /*
- * Next, check if we're within range to be freed. (ie, must be the
- * entry beneath the first 'free' entry!
- */
- if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step))
- return -EINVAL;
-
- /* If we are, then bring this entry back into the list */
- cpu_data->dtlb.first -= cpu_data->dtlb.step;
- cpu_data->dtlb.next = entry;
-
- return 0;
-}
-
-/**
- * sh64_setup_tlb_slot - Load up a translation in a wired slot.
- *
- * @config_addr: Address of TLB slot.
- * @eaddr: Virtual address.
- * @asid: Address Space Identifier.
- * @paddr: Physical address.
- *
- * Load up a virtual<->physical translation for @eaddr<->@paddr in the
- * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
- */
-void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
- unsigned long asid, unsigned long paddr)
-{
- unsigned long long pteh, ptel;
-
- pteh = neff_sign_extend(eaddr);
- pteh &= PAGE_MASK;
- pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
- ptel = neff_sign_extend(paddr);
- ptel &= PAGE_MASK;
- ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE);
-
- asm volatile("putcfg %0, 1, %1\n\t"
- "putcfg %0, 0, %2\n"
- : : "r" (config_addr), "r" (ptel), "r" (pteh));
-}
-
-/**
- * sh64_teardown_tlb_slot - Teardown a translation.
- *
- * @config_addr: Address of TLB slot.
- *
- * Teardown any existing mapping in the TLB slot @config_addr.
- */
-void sh64_teardown_tlb_slot(unsigned long long config_addr)
- __attribute__ ((alias("__flush_tlb_slot")));
-
-static int dtlb_entry;
-static unsigned long long dtlb_entries[64];
-
-void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
-{
- unsigned long long entry;
- unsigned long paddr, flags;
-
- BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries));
-
- local_irq_save(flags);
-
- entry = sh64_get_wired_dtlb_entry();
- dtlb_entries[dtlb_entry++] = entry;
-
- paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
- paddr &= ~PAGE_MASK;
-
- sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
-
- local_irq_restore(flags);
-}
-
-void tlb_unwire_entry(void)
-{
- unsigned long long entry;
- unsigned long flags;
-
- BUG_ON(!dtlb_entry);
-
- local_irq_save(flags);
- entry = dtlb_entries[dtlb_entry--];
-
- sh64_teardown_tlb_slot(entry);
- sh64_put_wired_dtlb_entry(entry);
-
- local_irq_restore(flags);
-}
-
-void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
-{
- unsigned long long ptel;
- unsigned long long pteh=0;
- struct tlb_info *tlbp;
- unsigned long long next;
- unsigned int fault_code = get_thread_fault_code();
-
- /* Get PTEL first */
- ptel = pte.pte_low;
-
- /*
- * Set PTEH register
- */
- pteh = neff_sign_extend(address & MMU_VPN_MASK);
-
- /* Set the ASID. */
- pteh |= get_asid() << PTEH_ASID_SHIFT;
- pteh |= PTEH_VALID;
-
- /* Set PTEL register, set_pte has performed the sign extension */
- ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
-
- if (fault_code & FAULT_CODE_ITLB)
- tlbp = &cpu_data->itlb;
- else
- tlbp = &cpu_data->dtlb;
-
- next = tlbp->next;
- __flush_tlb_slot(next);
- asm volatile ("putcfg %0,1,%2\n\n\t"
- "putcfg %0,0,%1\n"
- : : "r" (next), "r" (pteh), "r" (ptel) );
-
- next += TLB_STEP;
- if (next > tlbp->last)
- next = tlbp->first;
- tlbp->next = next;
-}
diff --git a/arch/sh/mm/tlbex_32.c b/arch/sh/mm/tlbex_32.c
index 382262dc0c4b..1c53868632ee 100644
--- a/arch/sh/mm/tlbex_32.c
+++ b/arch/sh/mm/tlbex_32.c
@@ -23,6 +23,7 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -42,7 +43,10 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
pgd = pgd_offset(current->mm, address);
}
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none_or_clear_bad(p4d))
+ return 1;
+ pud = pud_offset(p4d, address);
if (pud_none_or_clear_bad(pud))
return 1;
pmd = pmd_offset(pud, address);
diff --git a/arch/sh/mm/tlbex_64.c b/arch/sh/mm/tlbex_64.c
deleted file mode 100644
index 8ff966dd0c74..000000000000
--- a/arch/sh/mm/tlbex_64.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * The SH64 TLB miss.
- *
- * Original code from fault.c
- * Copyright (C) 2000, 2001 Paolo Alberelli
- *
- * Fast PTE->TLB refill path
- * Copyright (C) 2003 Richard.Curnow@superh.com
- *
- * IMPORTANT NOTES :
- * The do_fast_page_fault function is called from a context in entry.S
- * where very few registers have been saved. In particular, the code in
- * this file must be compiled not to use ANY caller-save registers that
- * are not part of the restricted save set. Also, it means that code in
- * this file must not make calls to functions elsewhere in the kernel, or
- * else the excepting context will see corruption in its caller-save
- * registers. Plus, the entry.S save area is non-reentrant, so this code
- * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
- * on any exception.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/kprobes.h>
-#include <asm/tlb.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu_context.h>
-
-static int handle_tlbmiss(unsigned long long protection_flags,
- unsigned long address)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t entry;
-
- if (is_vmalloc_addr((void *)address)) {
- pgd = pgd_offset_k(address);
- } else {
- if (unlikely(address >= TASK_SIZE || !current->mm))
- return 1;
-
- pgd = pgd_offset(current->mm, address);
- }
-
- pud = pud_offset(pgd, address);
- if (pud_none(*pud) || !pud_present(*pud))
- return 1;
-
- pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd) || !pmd_present(*pmd))
- return 1;
-
- pte = pte_offset_kernel(pmd, address);
- entry = *pte;
- if (pte_none(entry) || !pte_present(entry))
- return 1;
-
- /*
- * If the page doesn't have sufficient protection bits set to
- * service the kind of fault being handled, there's not much
- * point doing the TLB refill. Punt the fault to the general
- * handler.
- */
- if ((pte_val(entry) & protection_flags) != protection_flags)
- return 1;
-
- update_mmu_cache(NULL, address, pte);
-
- return 0;
-}
-
-/*
- * Put all this information into one structure so that everything is just
- * arithmetic relative to a single base address. This reduces the number
- * of movi/shori pairs needed just to load addresses of static data.
- */
-struct expevt_lookup {
- unsigned short protection_flags[8];
- unsigned char is_text_access[8];
- unsigned char is_write_access[8];
-};
-
-#define PRU (1<<9)
-#define PRW (1<<8)
-#define PRX (1<<7)
-#define PRR (1<<6)
-
-/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
- the fault happened in user mode or privileged mode. */
-static struct expevt_lookup expevt_lookup_table = {
- .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
- .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0}
-};
-
-static inline unsigned int
-expevt_to_fault_code(unsigned long expevt)
-{
- if (expevt == 0xa40)
- return FAULT_CODE_ITLB;
- else if (expevt == 0x060)
- return FAULT_CODE_WRITE;
-
- return 0;
-}
-
-/*
- This routine handles page faults that can be serviced just by refilling a
- TLB entry from an existing page table entry. (This case represents a very
- large majority of page faults.) Return 1 if the fault was successfully
- handled. Return 0 if the fault could not be handled. (This leads into the
- general fault handling in fault.c which deals with mapping file-backed
- pages, stack growth, segmentation faults, swapping etc etc)
- */
-asmlinkage int __kprobes
-do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
- unsigned long address)
-{
- unsigned long long protection_flags;
- unsigned long long index;
- unsigned long long expevt4;
- unsigned int fault_code;
-
- /* The next few lines implement a way of hashing EXPEVT into a
- * small array index which can be used to lookup parameters
- * specific to the type of TLBMISS being handled.
- *
- * Note:
- * ITLBMISS has EXPEVT==0xa40
- * RTLBMISS has EXPEVT==0x040
- * WTLBMISS has EXPEVT==0x060
- */
- expevt4 = (expevt >> 4);
- /* TODO : xor ssr_md into this expression too. Then we can check
- * that PRU is set when it needs to be. */
- index = expevt4 ^ (expevt4 >> 5);
- index &= 7;
-
- fault_code = expevt_to_fault_code(expevt);
-
- protection_flags = expevt_lookup_table.protection_flags[index];
-
- if (expevt_lookup_table.is_text_access[index])
- fault_code |= FAULT_CODE_ITLB;
- if (!ssr_md)
- fault_code |= FAULT_CODE_USER;
-
- set_thread_fault_code(fault_code);
-
- return handle_tlbmiss(protection_flags, address);
-}
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
deleted file mode 100644
index bd0715d5dca4..000000000000
--- a/arch/sh/mm/tlbflush_64.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * arch/sh/mm/tlb-flush_64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
- * Copyright (C) 2003 - 2012 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/signal.h>
-#include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/perf_event.h>
-#include <linux/interrupt.h>
-#include <asm/io.h>
-#include <asm/tlb.h>
-#include <linux/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu_context.h>
-
-void local_flush_tlb_one(unsigned long asid, unsigned long page)
-{
- unsigned long long match, pteh=0, lpage;
- unsigned long tlb;
-
- /*
- * Sign-extend based on neff.
- */
- lpage = neff_sign_extend(page);
- match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
- match |= lpage;
-
- for_each_itlb_entry(tlb) {
- asm volatile ("getcfg %1, 0, %0"
- : "=r" (pteh)
- : "r" (tlb) );
-
- if (pteh == match) {
- __flush_tlb_slot(tlb);
- break;
- }
- }
-
- for_each_dtlb_entry(tlb) {
- asm volatile ("getcfg %1, 0, %0"
- : "=r" (pteh)
- : "r" (tlb) );
-
- if (pteh == match) {
- __flush_tlb_slot(tlb);
- break;
- }
-
- }
-}
-
-void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
- unsigned long flags;
-
- if (vma->vm_mm) {
- page &= PAGE_MASK;
- local_irq_save(flags);
- local_flush_tlb_one(get_asid(), page);
- local_irq_restore(flags);
- }
-}
-
-void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- unsigned long flags;
- unsigned long long match, pteh=0, pteh_epn, pteh_low;
- unsigned long tlb;
- unsigned int cpu = smp_processor_id();
- struct mm_struct *mm;
-
- mm = vma->vm_mm;
- if (cpu_context(cpu, mm) == NO_CONTEXT)
- return;
-
- local_irq_save(flags);
-
- start &= PAGE_MASK;
- end &= PAGE_MASK;
-
- match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
-
- /* Flush ITLB */
- for_each_itlb_entry(tlb) {
- asm volatile ("getcfg %1, 0, %0"
- : "=r" (pteh)
- : "r" (tlb) );
-
- pteh_epn = pteh & PAGE_MASK;
- pteh_low = pteh & ~PAGE_MASK;
-
- if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
- __flush_tlb_slot(tlb);
- }
-
- /* Flush DTLB */
- for_each_dtlb_entry(tlb) {
- asm volatile ("getcfg %1, 0, %0"
- : "=r" (pteh)
- : "r" (tlb) );
-
- pteh_epn = pteh & PAGE_MASK;
- pteh_low = pteh & ~PAGE_MASK;
-
- if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
- __flush_tlb_slot(tlb);
- }
-
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- unsigned long flags;
- unsigned int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) == NO_CONTEXT)
- return;
-
- local_irq_save(flags);
-
- cpu_context(cpu, mm) = NO_CONTEXT;
- if (mm == current->mm)
- activate_context(mm, cpu);
-
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_all(void)
-{
- /* Invalidate all, including shared pages, excluding fixed TLBs */
- unsigned long flags, tlb;
-
- local_irq_save(flags);
-
- /* Flush each ITLB entry */
- for_each_itlb_entry(tlb)
- __flush_tlb_slot(tlb);
-
- /* Flush each DTLB entry */
- for_each_dtlb_entry(tlb)
- __flush_tlb_slot(tlb);
-
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- /* FIXME: Optimize this later.. */
- flush_tlb_all();
-}
-
-void __flush_tlb_global(void)
-{
- flush_tlb_all();
-}
diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h
index fb66094a2c30..41c6d734a474 100644
--- a/arch/sparc/include/asm/cacheflush_32.h
+++ b/arch/sparc/include/asm/cacheflush_32.h
@@ -17,8 +17,6 @@
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
index e7517434d1fa..b9341836597e 100644
--- a/arch/sparc/include/asm/cacheflush_64.h
+++ b/arch/sparc/include/asm/cacheflush_64.h
@@ -49,7 +49,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end);
void flush_dcache_page(struct page *page);
#define flush_icache_page(vma, pg) do { } while(0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
void flush_ptrace_access(struct vm_area_struct *, struct page *,
unsigned long uaddr, void *kaddr,
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index 946dbcbf3a83..e10ab9ad3097 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -9,8 +9,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/idprom.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index 18d776925c45..6c35f0d27ee1 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -22,14 +22,15 @@
#ifdef __KERNEL__
#include <linux/interrupt.h>
+#include <linux/pgtable.h>
#include <asm/vaddrs.h>
#include <asm/kmap_types.h>
-#include <asm/pgtable.h>
+#include <asm/pgtsrmmu.h>
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
-extern pgprot_t kmap_prot;
+#define kmap_prot __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE)
extern pte_t *pkmap_page_table;
void kmap_init(void) __init;
@@ -50,28 +51,6 @@ void kmap_init(void) __init;
#define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
-void *kmap_high(struct page *page);
-void kunmap_high(struct page *page);
-
-static inline void *kmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_high(page);
-}
-
-static inline void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-
-void *kmap_atomic(struct page *page);
-void __kunmap_atomic(void *kvaddr);
-
#define flush_cache_kmaps() flush_cache_all()
#endif /* __KERNEL__ */
diff --git a/arch/sparc/include/asm/ide.h b/arch/sparc/include/asm/ide.h
index 09f026585550..499aa2e6e276 100644
--- a/arch/sparc/include/asm/ide.h
+++ b/arch/sparc/include/asm/ide.h
@@ -18,7 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/page.h>
#else
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/psr.h>
#endif
diff --git a/arch/sparc/include/asm/io-unit.h b/arch/sparc/include/asm/io-unit.h
index 3ce96e8c088f..8c38f5b9f927 100644
--- a/arch/sparc/include/asm/io-unit.h
+++ b/arch/sparc/include/asm/io-unit.h
@@ -7,8 +7,8 @@
#define _SPARC_IO_UNIT_H
#include <linux/spinlock.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
/* The io-unit handles all virtual to physical address translations
* that occur between the SBUS and physical memory. Access by
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
index 478260002836..fff8861df107 100644
--- a/arch/sparc/include/asm/page_32.h
+++ b/arch/sparc/include/asm/page_32.h
@@ -54,7 +54,7 @@ extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long iopte; } iopte_t;
-typedef struct { unsigned long pmdv[16]; } pmd_t;
+typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long ctxd; } ctxd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
@@ -62,7 +62,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
#define pte_val(x) ((x).pte)
#define iopte_val(x) ((x).iopte)
-#define pmd_val(x) ((x).pmdv[0])
+#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
#define ctxd_val(x) ((x).ctxd)
#define pgprot_val(x) ((x).pgprot)
@@ -82,7 +82,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
*/
typedef unsigned long pte_t;
typedef unsigned long iopte_t;
-typedef struct { unsigned long pmdv[16]; } pmd_t;
+typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long ctxd_t;
typedef unsigned long pgprot_t;
@@ -90,14 +90,14 @@ typedef unsigned long iopgprot_t;
#define pte_val(x) (x)
#define iopte_val(x) (x)
-#define pmd_val(x) ((x).pmdv[0])
+#define pmd_val(x) (x)
#define pgd_val(x) (x)
#define ctxd_val(x) (x)
#define pgprot_val(x) (x)
#define iopgprot_val(x) (x)
#define __pte(x) (x)
-#define __pmd(x) ((pmd_t) { { (x) }, })
+#define __pmd(x) (x)
#define __iopte(x) (x)
#define __pgd(x) (x)
#define __ctxd(x) (x)
@@ -106,7 +106,7 @@ typedef unsigned long iopgprot_t;
#endif
-typedef struct page *pgtable_t;
+typedef pte_t *pgtable_t;
#define TASK_UNMAPPED_BASE 0x50000000
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index eae0c92ec422..9d353e6dc5a9 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -4,9 +4,9 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/pgtable.h>
#include <asm/pgtsrmmu.h>
-#include <asm/pgtable.h>
#include <asm/vaddrs.h>
#include <asm/page.h>
@@ -50,23 +50,24 @@ static inline void free_pmd_fast(pmd_t * pmd)
#define pmd_free(mm, pmd) free_pmd_fast(pmd)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
-void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
-#define pmd_pgtable(pmd) pmd_page(pmd)
+#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
+#define pmd_pgtable(pmd) (pgtable_t)__pmd_page(pmd)
void pmd_set(pmd_t *pmdp, pte_t *ptep);
-#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
+#define pmd_populate_kernel pmd_populate
pgtable_t pte_alloc_one(struct mm_struct *mm);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
- return srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
+ return srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE,
+ SRMMU_PTE_TABLE_SIZE);
}
static inline void free_pte_fast(pte_t *pte)
{
- srmmu_free_nocache(pte, PTE_SIZE);
+ srmmu_free_nocache(pte, SRMMU_PTE_TABLE_SIZE);
}
#define pte_free_kernel(mm, pte) free_pte_fast(pte)
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index 264e76ceccf6..a8dafc550985 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -67,7 +67,7 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage);
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
-#define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD))
+#define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
void pgtable_free(void *table, bool is_page);
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 0de659ae0ba4..632cdb959542 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -11,6 +11,16 @@
#include <linux/const.h>
+#define PMD_SHIFT 18
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
+
+#define PGDIR_SHIFT 24
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PGDIR_ALIGN(__addr) (((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
+
#ifndef __ASSEMBLY__
#include <asm-generic/pgtable-nopud.h>
@@ -34,17 +44,10 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
#define pmd_ERROR(e) __builtin_trap()
#define pgd_ERROR(e) __builtin_trap()
-#define PMD_SHIFT 22
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
-#define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
-#define PGDIR_SIZE SRMMU_PGDIR_SIZE
-#define PGDIR_MASK SRMMU_PGDIR_MASK
-#define PTRS_PER_PTE 1024
-#define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
-#define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
-#define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
+#define PTRS_PER_PTE 64
+#define PTRS_PER_PMD 64
+#define PTRS_PER_PGD 256
+#define USER_PTRS_PER_PGD PAGE_OFFSET / PGDIR_SIZE
#define FIRST_USER_ADDRESS 0UL
#define PTE_SIZE (PTRS_PER_PTE*4)
@@ -132,6 +135,23 @@ static inline struct page *pmd_page(pmd_t pmd)
return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
}
+static inline unsigned long __pmd_page(pmd_t pmd)
+{
+ unsigned long v;
+
+ if (srmmu_device_memory(pmd_val(pmd)))
+ BUG();
+
+ v = pmd_val(pmd) & SRMMU_PTD_PMASK;
+ return (unsigned long)__nocache_va(v << 4);
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
+ return (unsigned long)__nocache_va(v << 4);
+}
+
static inline unsigned long pud_page_vaddr(pud_t pud)
{
if (srmmu_device_memory(pud_val(pud))) {
@@ -179,9 +199,7 @@ static inline int pmd_none(pmd_t pmd)
static inline void pmd_clear(pmd_t *pmdp)
{
- int i;
- for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
- set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
+ set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
}
static inline int pud_none(pud_t pud)
@@ -303,30 +321,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pgprot_val(newprot));
}
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *pmd_offset(pud_t * dir, unsigned long address)
-{
- return (pmd_t *) pud_page_vaddr(*dir) +
- ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
-}
-
-/* Find an entry in the third-level page table.. */
-pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
-
-/*
- * This shortcut works on sun4m (and sun4d) because the nocache area is static.
- */
-#define pte_offset_map(d, a) pte_offset_kernel(d,a)
-#define pte_unmap(pte) do{}while(0)
-
struct seq_file;
void mmu_info(struct seq_file *m);
@@ -415,7 +409,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
-#define io_remap_pfn_range io_remap_pfn_range
+#define io_remap_pfn_range io_remap_pfn_range
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
@@ -428,8 +422,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
__changed; \
})
-#include <asm-generic/pgtable.h>
-
#endif /* !(__ASSEMBLY__) */
#define VMALLOC_START _AC(0xfe600000,UL)
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index da527b27cf7d..7ef6affa105e 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -835,7 +835,7 @@ static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
#define pud_set(pudp, pmdp) \
(pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
-static inline unsigned long __pmd_page(pmd_t pmd)
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
unsigned long pfn;
@@ -855,7 +855,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
return ((unsigned long) __va(pfn << PAGE_SHIFT));
}
-#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
+#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pud_present(pud) (pud_val(pud) != 0U)
@@ -889,31 +889,6 @@ static inline unsigned long pud_pfn(pud_t pud)
#define p4d_set(p4dp, pudp) \
(p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* Find an entry in the third-level page table.. */
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-#define pud_offset(p4dp, address) \
- ((pud_t *) p4d_page_vaddr(*(p4dp)) + pud_index(address))
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pudp, address) \
- ((pmd_t *) pud_page_vaddr(*(pudp)) + \
- (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) __pmd_page(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
-#define pte_unmap(pte) do { } while (0)
-
/* We cannot include <linux/mm_types.h> at this point yet: */
extern struct mm_struct init_mm;
@@ -1078,7 +1053,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
-#define io_remap_pfn_range io_remap_pfn_range
+#define io_remap_pfn_range io_remap_pfn_range
static inline unsigned long __untagged_addr(unsigned long start)
{
@@ -1122,7 +1097,6 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
#define pte_access_permitted pte_access_permitted
#include <asm/tlbflush.h>
-#include <asm-generic/pgtable.h>
/* We provide our own get_unmapped_area to cope with VA holes and
* SHM area cache aliasing for userland.
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index 32a508897501..7708d015712b 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -17,39 +17,9 @@
/* Number of contexts is implementation-dependent; 64k is the most we support */
#define SRMMU_MAX_CONTEXTS 65536
-/* PMD_SHIFT determines the size of the area a second-level page table entry can map */
-#define SRMMU_REAL_PMD_SHIFT 18
-#define SRMMU_REAL_PMD_SIZE (1UL << SRMMU_REAL_PMD_SHIFT)
-#define SRMMU_REAL_PMD_MASK (~(SRMMU_REAL_PMD_SIZE-1))
-#define SRMMU_REAL_PMD_ALIGN(__addr) (((__addr)+SRMMU_REAL_PMD_SIZE-1)&SRMMU_REAL_PMD_MASK)
-
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define SRMMU_PGDIR_SHIFT 24
-#define SRMMU_PGDIR_SIZE (1UL << SRMMU_PGDIR_SHIFT)
-#define SRMMU_PGDIR_MASK (~(SRMMU_PGDIR_SIZE-1))
-#define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK)
-
-#define SRMMU_REAL_PTRS_PER_PTE 64
-#define SRMMU_REAL_PTRS_PER_PMD 64
-#define SRMMU_PTRS_PER_PGD 256
-
-#define SRMMU_REAL_PTE_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PTE*4)
-#define SRMMU_PMD_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PMD*4)
-#define SRMMU_PGD_TABLE_SIZE (SRMMU_PTRS_PER_PGD*4)
-
-/*
- * To support pagetables in highmem, Linux introduces APIs which
- * return struct page* and generally manipulate page tables when
- * they are not mapped into kernel space. Our hardware page tables
- * are smaller than pages. We lump hardware tabes into big, page sized
- * software tables.
- *
- * PMD_SHIFT determines the size of the area a second-level page table entry
- * can map, and our pmd_t is 16 times larger than normal. The values which
- * were once defined here are now generic for 4c and srmmu, so they're
- * found in pgtable.h.
- */
-#define SRMMU_PTRS_PER_PMD 4
+#define SRMMU_PTE_TABLE_SIZE (PTRS_PER_PTE*4)
+#define SRMMU_PMD_TABLE_SIZE (PTRS_PER_PMD*4)
+#define SRMMU_PGD_TABLE_SIZE (PTRS_PER_PGD*4)
/* Definition of the values in the ET field of PTD's and PTE's */
#define SRMMU_ET_MASK 0x3
diff --git a/arch/sparc/include/asm/viking.h b/arch/sparc/include/asm/viking.h
index 0bbefd184221..08ffc605035f 100644
--- a/arch/sparc/include/asm/viking.h
+++ b/arch/sparc/include/asm/viking.h
@@ -10,6 +10,7 @@
#include <asm/asi.h>
#include <asm/mxcc.h>
+#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h>
/* Bits in the SRMMU control register for GNU/Viking modules.
@@ -227,7 +228,7 @@ static inline unsigned long viking_hwprobe(unsigned long vaddr)
: "=r" (val)
: "r" (vaddr | 0x200), "i" (ASI_M_FLUSH_PROBE));
if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
- vaddr &= ~SRMMU_PGDIR_MASK;
+ vaddr &= ~PGDIR_MASK;
vaddr >>= PAGE_SHIFT;
return val | (vaddr << 8);
}
@@ -237,7 +238,7 @@ static inline unsigned long viking_hwprobe(unsigned long vaddr)
: "=r" (val)
: "r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE));
if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
- vaddr &= ~SRMMU_REAL_PMD_MASK;
+ vaddr &= ~PMD_MASK;
vaddr >>= PAGE_SHIFT;
return val | (vaddr << 8);
}
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 4401dee30018..79cd6ccfeac0 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -11,9 +11,9 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/threads.h>
+#include <linux/pgtable.h>
#include <asm/spitfire.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/setup.h>
#include <asm/page.h>
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index 1cb62bfeaa1f..f07ea88a83af 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -50,7 +50,7 @@ struct cpuinfo_tree {
/* Offsets into nodes[] for each level of the tree */
struct cpuinfo_level level[CPUINFO_LVL_MAX];
- struct cpuinfo_node nodes[0];
+ struct cpuinfo_node nodes[];
};
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 75232cbd58bf..522e5b51050c 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -87,7 +87,7 @@ struct ds_reg_req {
__u64 handle;
__u16 major;
__u16 minor;
- char svc_id[0];
+ char svc_id[];
};
struct ds_reg_ack {
@@ -701,12 +701,12 @@ struct ds_var_hdr {
struct ds_var_set_msg {
struct ds_var_hdr hdr;
- char name_and_value[0];
+ char name_and_value[];
};
struct ds_var_delete_msg {
struct ds_var_hdr hdr;
- char name[0];
+ char name[];
};
struct ds_var_resp {
@@ -989,7 +989,7 @@ struct ds_queue_entry {
struct ds_info *dp;
int req_len;
int __pad;
- u64 req[0];
+ u64 req[];
};
static void process_ds_work(void)
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 4d3696973325..f636acf3312f 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -10,6 +10,7 @@
#include <linux/linkage.h>
#include <linux/errno.h>
+#include <linux/pgtable.h>
#include <asm/head.h>
#include <asm/asi.h>
@@ -20,7 +21,6 @@
#include <asm/psr.h>
#include <asm/vaddrs.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/winmacro.h>
#include <asm/signal.h>
#include <asm/obio.h>
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index e55f2c075165..be30c8d4cc73 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -24,7 +24,7 @@
#include <asm/winmacro.h>
#include <asm/thread_info.h> /* TI_UWINMASK */
#include <asm/errno.h>
-#include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */
+#include <asm/pgtable.h> /* PGDIR_SHIFT */
#include <asm/export.h>
.data
@@ -273,7 +273,7 @@ not_a_sun4:
lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd
/* Calculate to KERNBASE entry. */
- add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
+ add %o1, KERNBASE >> (PGDIR_SHIFT - 2), %o3
/* Poke the entry into the calculated address. */
sta %o2, [%o3] ASI_M_BYPASS
@@ -317,7 +317,7 @@ srmmu_not_viking:
sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
- add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
+ add %g1, KERNBASE >> (PGDIR_SHIFT - 2), %g3
sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
b go_to_highmem
nop ! wheee....
@@ -341,7 +341,7 @@ leon_remap:
sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
- add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
+ add %g1, KERNBASE >> (PGDIR_SHIFT - 2), %g3
sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
b go_to_highmem
nop ! wheee....
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 540bfc98472c..c5ff2472b3d9 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -12,13 +12,13 @@
#include <linux/threads.h>
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/thread_info.h>
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/spitfire.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/errno.h>
#include <asm/signal.h>
#include <asm/processor.h>
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 1cf91c05e275..6bfaf73ce8a0 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -7,10 +7,10 @@
* Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/pgtable.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tsb.h>
.text
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index da6f1486318e..41829c024f92 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -39,7 +39,6 @@
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/cpudata.h>
#include <asm/asi.h>
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 5ed43828e078..5d45b6d766d6 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -21,9 +21,9 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pgtable.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/apb.h>
@@ -593,7 +593,7 @@ show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char *
pdev = to_pci_dev(dev);
dp = pdev->dev.of_node;
- return snprintf (buf, PAGE_SIZE, "%pOF\n", dp);
+ return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp);
}
static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 26cca65e9246..13cb5638fab8 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -35,7 +35,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/delay.h>
#include <asm/processor.h>
#include <asm/psr.h>
@@ -145,10 +144,10 @@ void show_regs(struct pt_regs *r)
}
/*
- * The show_stack is an external API which we do not use ourselves.
+ * The show_stack() is external API which we do not use ourselves.
* The oops is printed in die_if_kernel.
*/
-void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl)
{
unsigned long pc, fp;
unsigned long task_base;
@@ -170,11 +169,11 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
break;
rw = (struct reg_window32 *) fp;
pc = rw->ins[7];
- printk("[%08lx : ", pc);
- printk("%pS ] ", (void *) pc);
+ printk("%s[%08lx : ", loglvl, pc);
+ printk("%s%pS ] ", loglvl, (void *) pc);
fp = rw->ins[6];
} while (++count < 16);
- printk("\n");
+ printk("%s\n", loglvl);
}
/*
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 4282116e28e7..54945eacd3b5 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -41,7 +41,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/pstate.h>
#include <asm/elf.h>
@@ -195,7 +194,7 @@ void show_regs(struct pt_regs *regs)
regs->u_regs[15]);
printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
show_regwindow(regs);
- show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ show_stack(current, (unsigned long *)regs->u_regs[UREG_FP], KERN_DEFAULT);
}
union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
@@ -313,7 +312,7 @@ static void sysrq_handle_globreg(int key)
trigger_all_cpu_backtrace();
}
-static struct sysrq_key_op sparc_globalreg_op = {
+static const struct sysrq_key_op sparc_globalreg_op = {
.handler = sysrq_handle_globreg,
.help_msg = "global-regs(y)",
.action_msg = "Show Global CPU Regs",
@@ -388,7 +387,7 @@ static void sysrq_handle_globpmu(int key)
pmu_snapshot_all_cpus();
}
-static struct sysrq_key_op sparc_globalpmu_op = {
+static const struct sysrq_key_op sparc_globalpmu_op = {
.handler = sysrq_handle_globpmu,
.help_msg = "global-pmu(x)",
.action_msg = "Show Global PMU Regs",
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index 16b50afe7b52..47eb315d411c 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -23,7 +23,6 @@
#include <linux/elf.h>
#include <linux/tracehook.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -46,82 +45,79 @@ enum sparc_regset {
REGSET_FP,
};
+static int regwindow32_get(struct task_struct *target,
+ const struct pt_regs *regs,
+ u32 *uregs)
+{
+ unsigned long reg_window = regs->u_regs[UREG_I6];
+ int size = 16 * sizeof(u32);
+
+ if (target == current) {
+ if (copy_from_user(uregs, (void __user *)reg_window, size))
+ return -EFAULT;
+ } else {
+ if (access_process_vm(target, reg_window, uregs, size,
+ FOLL_FORCE) != size)
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int regwindow32_set(struct task_struct *target,
+ const struct pt_regs *regs,
+ u32 *uregs)
+{
+ unsigned long reg_window = regs->u_regs[UREG_I6];
+ int size = 16 * sizeof(u32);
+
+ if (target == current) {
+ if (copy_to_user((void __user *)reg_window, uregs, size))
+ return -EFAULT;
+ } else {
+ if (access_process_vm(target, reg_window, uregs, size,
+ FOLL_FORCE | FOLL_WRITE) != size)
+ return -EFAULT;
+ }
+ return 0;
+}
+
static int genregs32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = target->thread.kregs;
- unsigned long __user *reg_window;
- unsigned long *k = kbuf;
- unsigned long __user *u = ubuf;
- unsigned long reg;
+ u32 uregs[16];
+ int ret;
if (target == current)
flush_user_windows();
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf) {
- for (; count > 0 && pos < 16; count--)
- *k++ = regs->u_regs[pos++];
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(*k++, &reg_window[pos++]))
- return -EFAULT;
- }
- } else {
- for (; count > 0 && pos < 16; count--) {
- if (put_user(regs->u_regs[pos++], u++))
- return -EFAULT;
- }
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(reg, &reg_window[pos++]) ||
- put_user(reg, u++))
- return -EFAULT;
- }
- }
- while (count > 0) {
- switch (pos) {
- case 32: /* PSR */
- reg = regs->psr;
- break;
- case 33: /* PC */
- reg = regs->pc;
- break;
- case 34: /* NPC */
- reg = regs->npc;
- break;
- case 35: /* Y */
- reg = regs->y;
- break;
- case 36: /* WIM */
- case 37: /* TBR */
- reg = 0;
- break;
- default:
- goto finish;
- }
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->u_regs,
+ 0, 16 * sizeof(u32));
+ if (ret || !count)
+ return ret;
- if (kbuf)
- *k++ = reg;
- else if (put_user(reg, u++))
+ if (pos < 32 * sizeof(u32)) {
+ if (regwindow32_get(target, regs, uregs))
return -EFAULT;
- pos++;
- count--;
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ uregs,
+ 16 * sizeof(u32), 32 * sizeof(u32));
+ if (ret || !count)
+ return ret;
}
-finish:
- pos *= sizeof(reg);
- count *= sizeof(reg);
- return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 38 * sizeof(reg), -1);
+ uregs[0] = regs->psr;
+ uregs[1] = regs->pc;
+ uregs[2] = regs->npc;
+ uregs[3] = regs->y;
+ uregs[4] = 0; /* WIM */
+ uregs[5] = 0; /* TBR */
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ uregs,
+ 32 * sizeof(u32), 38 * sizeof(u32));
}
static int genregs32_set(struct task_struct *target,
@@ -130,82 +126,58 @@ static int genregs32_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = target->thread.kregs;
- unsigned long __user *reg_window;
- const unsigned long *k = kbuf;
- const unsigned long __user *u = ubuf;
- unsigned long reg;
+ u32 uregs[16];
+ u32 psr;
+ int ret;
if (target == current)
flush_user_windows();
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf) {
- for (; count > 0 && pos < 16; count--)
- regs->u_regs[pos++] = *k++;
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (put_user(*k++, &reg_window[pos++]))
- return -EFAULT;
- }
- } else {
- for (; count > 0 && pos < 16; count--) {
- if (get_user(reg, u++))
- return -EFAULT;
- regs->u_regs[pos++] = reg;
- }
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(reg, u++) ||
- put_user(reg, &reg_window[pos++]))
- return -EFAULT;
- }
- }
- while (count > 0) {
- unsigned long psr;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->u_regs,
+ 0, 16 * sizeof(u32));
+ if (ret || !count)
+ return ret;
- if (kbuf)
- reg = *k++;
- else if (get_user(reg, u++))
+ if (pos < 32 * sizeof(u32)) {
+ if (regwindow32_get(target, regs, uregs))
return -EFAULT;
-
- switch (pos) {
- case 32: /* PSR */
- psr = regs->psr;
- psr &= ~(PSR_ICC | PSR_SYSCALL);
- psr |= (reg & (PSR_ICC | PSR_SYSCALL));
- regs->psr = psr;
- break;
- case 33: /* PC */
- regs->pc = reg;
- break;
- case 34: /* NPC */
- regs->npc = reg;
- break;
- case 35: /* Y */
- regs->y = reg;
- break;
- case 36: /* WIM */
- case 37: /* TBR */
- break;
- default:
- goto finish;
- }
-
- pos++;
- count--;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ uregs,
+ 16 * sizeof(u32), 32 * sizeof(u32));
+ if (ret)
+ return ret;
+ if (regwindow32_set(target, regs, uregs))
+ return -EFAULT;
+ if (!count)
+ return 0;
}
-finish:
- pos *= sizeof(reg);
- count *= sizeof(reg);
-
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &psr,
+ 32 * sizeof(u32), 33 * sizeof(u32));
+ if (ret)
+ return ret;
+ regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) |
+ (psr & (PSR_ICC | PSR_SYSCALL));
+ if (!count)
+ return 0;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ 33 * sizeof(u32), 34 * sizeof(u32));
+ if (ret || !count)
+ return ret;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->npc,
+ 34 * sizeof(u32), 35 * sizeof(u32));
+ if (ret || !count)
+ return ret;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->y,
+ 35 * sizeof(u32), 36 * sizeof(u32));
+ if (ret || !count)
+ return ret;
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- 38 * sizeof(reg), -1);
+ 36 * sizeof(u32), 38 * sizeof(u32));
}
static int fpregs32_get(struct task_struct *target,
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index c9d41a96468f..7122efb4b1cc 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -32,7 +32,6 @@
#include <linux/context_tracking.h>
#include <asm/asi.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/psrcompat.h>
#include <asm/visasm.h>
@@ -572,19 +571,13 @@ static int genregs32_get(struct task_struct *target,
for (; count > 0 && pos < 32; count--) {
if (access_process_vm(target,
(unsigned long)
- &reg_window[pos],
+ &reg_window[pos++],
&reg, sizeof(reg),
FOLL_FORCE)
!= sizeof(reg))
return -EFAULT;
- if (access_process_vm(target,
- (unsigned long) u,
- &reg, sizeof(reg),
- FOLL_FORCE | FOLL_WRITE)
- != sizeof(reg))
+ if (put_user(reg, u++))
return -EFAULT;
- pos++;
- u++;
}
}
}
@@ -684,12 +677,7 @@ static int genregs32_set(struct task_struct *target,
}
} else {
for (; count > 0 && pos < 32; count--) {
- if (access_process_vm(target,
- (unsigned long)
- u,
- &reg, sizeof(reg),
- FOLL_FORCE)
- != sizeof(reg))
+ if (get_user(reg, u++))
return -EFAULT;
if (access_process_vm(target,
(unsigned long)
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 5d1bcfce05d8..6d07b85b9e24 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -40,7 +40,6 @@
#include <asm/processor.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/vaddrs.h>
#include <asm/mbus.h>
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 75e3992203b6..f765fda871eb 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -39,7 +39,6 @@
#include <asm/processor.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/idprom.h>
#include <asm/head.h>
#include <asm/starfire.h>
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 2a734ecd0a40..e2c6f0abda00 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -24,7 +24,6 @@
#include <linux/uaccess.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/psrcompat.h>
#include <asm/fpumacro.h>
#include <asm/visasm.h>
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 42c3de313fd6..3b005b6c3e0f 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -24,7 +24,6 @@
#include <linux/uaccess.h>
#include <asm/ptrace.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h> /* flush_sig_insns */
#include <asm/switch_to.h>
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 69ae814b7e90..6937339a272c 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -25,7 +25,6 @@
#include <linux/uaccess.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/fpumacro.h>
#include <asm/uctx.h>
#include <asm/siginfo.h>
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index e078680a1768..76ce290c67cf 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -30,7 +30,6 @@
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 80f20b3808ee..0085e28bf019 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -43,7 +43,6 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <linux/uaccess.h>
#include <asm/starfire.h>
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index c01767a0480e..91b61f012d19 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -12,11 +12,11 @@
#include <linux/slab.h>
#include <linux/sched/debug.h>
+#include <linux/pgtable.h>
#include <asm/timer.h>
#include <asm/traps.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index b5da3bfdc225..f84a02ab6bf9 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -22,7 +22,6 @@
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/uio.h>
-#include <linux/nfs_fs.h>
#include <linux/quota.h>
#include <linux/poll.h>
#include <linux/personality.h>
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index fe59122d257d..51bf1eb92a36 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -6,6 +6,7 @@
*/
+#include <linux/pgtable.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/lsu.h>
@@ -13,7 +14,6 @@
#include <asm/dcu.h>
#include <asm/pstate.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 4ceecad556a9..247a0d9683b2 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -18,12 +18,12 @@
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/export.h>
+#include <linux/pgtable.h>
#include <asm/delay.h>
#include <asm/ptrace.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/unistd.h>
#include <asm/traps.h>
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 27778b65a965..d92e5eaa4c1d 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -18,6 +18,7 @@
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/init.h>
+#include <linux/kallsyms.h>
#include <linux/kdebug.h>
#include <linux/ftrace.h>
#include <linux/reboot.h>
@@ -29,7 +30,6 @@
#include <asm/ptrace.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/unistd.h>
#include <linux/uaccess.h>
#include <asm/fpumacro.h>
@@ -2452,7 +2452,7 @@ static void user_instruction_dump(unsigned int __user *pc)
printk("\n");
}
-void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl)
{
unsigned long fp, ksp;
struct thread_info *tp;
@@ -2476,7 +2476,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
fp = ksp + STACK_BIAS;
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
@@ -2497,14 +2497,14 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
fp = (unsigned long)sf->fp + STACK_BIAS;
}
- printk(" [%016lx] %pS\n", pc, (void *) pc);
+ print_ip_sym(loglvl, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
struct ftrace_ret_stack *ret_stack;
ret_stack = ftrace_graph_get_ret_stack(tsk, graph);
if (ret_stack) {
pc = ret_stack->ret;
- printk(" [%016lx] %pS\n", pc, (void *) pc);
+ print_ip_sym(loglvl, pc);
graph++;
}
}
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index c7cad9b7bba7..4f57056ed463 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -193,7 +193,7 @@ show_pciobppath_attr(struct device *dev, struct device_attribute *attr,
vdev = to_vio_dev(dev);
dp = vdev->dp;
- return snprintf (buf, PAGE_SIZE, "%pOF\n", dp);
+ return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp);
}
static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH,
diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S
index 8a6c783a6301..302d3454a994 100644
--- a/arch/sparc/lib/clear_page.S
+++ b/arch/sparc/lib/clear_page.S
@@ -5,10 +5,10 @@
* Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
*/
+#include <linux/pgtable.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/export.h>
diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S
index c088e871e8e3..5ebcfd479f4f 100644
--- a/arch/sparc/lib/copy_page.S
+++ b/arch/sparc/lib/copy_page.S
@@ -8,7 +8,7 @@
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/export.h>
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index f6e0e601f857..cfef656eda0f 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -25,7 +25,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/setup.h>
@@ -196,7 +195,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (!from_user && address >= PAGE_OFFSET)
goto bad_area;
@@ -263,7 +262,7 @@ good_area:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -272,7 +271,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -280,7 +279,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -329,7 +328,7 @@ no_context:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (from_user) {
pagefault_out_of_memory();
return;
@@ -337,7 +336,7 @@ out_of_memory:
goto no_context;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
if (!from_user)
goto no_context;
@@ -391,7 +390,7 @@ static void force_user_fault(unsigned long address, int write)
code = SEGV_MAPERR;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -416,15 +415,15 @@ good_area:
case VM_FAULT_OOM:
goto do_sigbus;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
}
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index c0c0dd471b6b..a3806614e4dc 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -27,7 +27,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/asi.h>
@@ -71,7 +70,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
}
/*
- * We now make sure that mmap_sem is held in all paths that call
+ * We now make sure that mmap_lock is held in all paths that call
* this. Additionally, to prevent kswapd from ripping ptes from
* under us, raise interrupts around the time that we look at the
* pte, kswapd will have to wait to get his smp ipi response from
@@ -319,7 +318,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
if ((regs->tstate & TSTATE_PRIV) &&
!search_exception_tables(regs->tpc)) {
insn = get_fault_insn(regs, insn);
@@ -327,7 +326,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
}
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
if (fault_code & FAULT_CODE_BAD_RA)
@@ -451,7 +450,7 @@ good_area:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -459,7 +458,7 @@ good_area:
goto retry;
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mm_rss = get_mm_rss(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
@@ -490,7 +489,7 @@ exit_exception:
*/
bad_area:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address);
@@ -502,7 +501,7 @@ handle_kernel_fault:
*/
out_of_memory:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!(regs->tstate & TSTATE_PRIV)) {
pagefault_out_of_memory();
goto exit_exception;
@@ -515,7 +514,7 @@ intr_or_no_mm:
do_sigbus:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Send a sigbus, regardless of whether we were in kernel
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index d4a80adea7e5..d1fc9a7b7d78 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -32,37 +32,21 @@
#include <asm/pgalloc.h>
#include <asm/vaddrs.h>
-pgprot_t kmap_prot;
-
static pte_t *kmap_pte;
void __init kmap_init(void)
{
- unsigned long address;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *dir;
-
- address = __fix_to_virt(FIX_KMAP_BEGIN);
- p4d = p4d_offset(pgd_offset_k(address), address);
- pud = pud_offset(p4d, address);
- dir = pmd_offset(pud, address);
+ unsigned long address = __fix_to_virt(FIX_KMAP_BEGIN);
/* cache the first kmap pte */
- kmap_pte = pte_offset_kernel(dir, address);
- kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
+ kmap_pte = virt_to_kpte(address);
}
-void *kmap_atomic(struct page *page)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
long idx, type;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -77,7 +61,7 @@ void *kmap_atomic(struct page *page)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
- set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
/* XXX Fix - Anton */
#if 0
__flush_tlb_one(vaddr);
@@ -87,18 +71,15 @@ void *kmap_atomic(struct page *page)
return (void*) vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type;
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
- preempt_enable();
+ if (vaddr < FIXADDR_START)
return;
- }
type = kmap_atomic_idx();
@@ -131,7 +112,5 @@ void __kunmap_atomic(void *kvaddr)
#endif
kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 7b9fa861b67c..ec423b5f17dd 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -14,7 +14,6 @@
#include <asm/mman.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
index 66885a8dc50a..6c2521e85a42 100644
--- a/arch/sparc/mm/hypersparc.S
+++ b/arch/sparc/mm/hypersparc.S
@@ -10,6 +10,7 @@
#include <asm/asm-offsets.h>
#include <asm/asi.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h>
#include <linux/init.h>
@@ -293,7 +294,7 @@ hypersparc_flush_tlb_range:
cmp %o3, -1
be hypersparc_flush_tlb_range_out
#endif
- sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1
add %o1, 0x200, %o1
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 3cb3dffcbcdc..eb2946b1df8a 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -29,7 +29,6 @@
#include <asm/sections.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/vaddrs.h>
#include <asm/setup.h>
#include <asm/tlb.h>
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 5774529ceb43..02e6e5e0f106 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -31,7 +31,6 @@
#include <asm/head.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/iommu.h>
#include <asm/io.h>
@@ -504,11 +503,7 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
if (kaddr >= PAGE_OFFSET)
paddr = kaddr & mask;
else {
- pgd_t *pgdp = pgd_offset_k(kaddr);
- p4d_t *p4dp = p4d_offset(pgdp, kaddr);
- pud_t *pudp = pud_offset(p4dp, kaddr);
- pmd_t *pmdp = pmd_offset(pudp, kaddr);
- pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
+ pte_t *ptep = virt_to_kpte(kaddr);
paddr = pte_val(*ptep) & mask;
}
@@ -1649,29 +1644,29 @@ bool kern_addr_valid(unsigned long addr)
pgd = pgd_offset_k(addr);
if (pgd_none(*pgd))
- return 0;
+ return false;
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d))
- return 0;
+ return false;
pud = pud_offset(p4d, addr);
if (pud_none(*pud))
- return 0;
+ return false;
if (pud_large(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
- return 0;
+ return false;
if (pmd_large(*pmd))
return pfn_valid(pmd_pfn(*pmd));
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
- return 0;
+ return false;
return pfn_valid(pte_pfn(*pte));
}
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 289276b99b01..bfcc04bfce54 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -10,14 +10,12 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
-#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/io-unit.h>
#include <asm/mxcc.h>
@@ -242,21 +240,15 @@ static void *iounit_alloc(struct device *dev, size_t len,
while(addr < end) {
page = va;
{
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
long i;
- pgdp = pgd_offset(&init_mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
+ pmdp = pmd_off_k(addr);
ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
-
+
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
iopte = iounit->page_table + i;
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index b00dde13681b..35b002eb312e 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -12,13 +12,11 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/mxcc.h>
#include <asm/mbus.h>
@@ -350,9 +348,6 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
while(addr < end) {
page = va;
{
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -363,10 +358,7 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
else
__flush_page_to_ram(page);
- pgdp = pgd_offset(&init_mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
+ pmdp = pmd_off_k(addr);
ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 7c2278dd308d..0070f8b9a753 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -136,36 +136,8 @@ static void msi_set_sync(void)
void pmd_set(pmd_t *pmdp, pte_t *ptep)
{
- unsigned long ptp; /* Physical address, shifted right by 4 */
- int i;
-
- ptp = __nocache_pa(ptep) >> 4;
- for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
- set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
- ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
- }
-}
-
-void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
-{
- unsigned long ptp; /* Physical address, shifted right by 4 */
- int i;
-
- ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
- for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
- set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
- ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
- }
-}
-
-/* Find an entry in the third-level page table.. */
-pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
-{
- void *pte;
-
- pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
- return (pte_t *) pte +
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
+ unsigned long ptp = __nocache_pa(ptep) >> 4;
+ set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp));
}
/*
@@ -175,18 +147,18 @@ pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
*/
static void *__srmmu_get_nocache(int size, int align)
{
- int offset;
+ int offset, minsz = 1 << SRMMU_NOCACHE_BITMAP_SHIFT;
unsigned long addr;
- if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
+ if (size < minsz) {
printk(KERN_ERR "Size 0x%x too small for nocache request\n",
size);
- size = SRMMU_NOCACHE_BITMAP_SHIFT;
+ size = minsz;
}
- if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
- printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
+ if (size & (minsz - 1)) {
+ printk(KERN_ERR "Size 0x%x unaligned in nocache request\n",
size);
- size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
+ size += minsz - 1;
}
BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
@@ -376,31 +348,33 @@ pgd_t *get_pgd_fast(void)
*/
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- unsigned long pte;
+ pte_t *ptep;
struct page *page;
- if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
- return NULL;
- page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
- if (!pgtable_pte_page_ctor(page)) {
- __free_page(page);
+ if ((ptep = pte_alloc_one_kernel(mm)) == 0)
return NULL;
+ page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
+ spin_lock(&mm->page_table_lock);
+ if (page_ref_inc_return(page) == 2 && !pgtable_pte_page_ctor(page)) {
+ page_ref_dec(page);
+ ptep = NULL;
}
- return page;
+ spin_unlock(&mm->page_table_lock);
+
+ return ptep;
}
-void pte_free(struct mm_struct *mm, pgtable_t pte)
+void pte_free(struct mm_struct *mm, pgtable_t ptep)
{
- unsigned long p;
+ struct page *page;
- pgtable_pte_page_dtor(pte);
- p = (unsigned long)page_address(pte); /* Cached address (for test) */
- if (p == 0)
- BUG();
- p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
+ page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
+ spin_lock(&mm->page_table_lock);
+ if (page_ref_dec_return(page) == 1)
+ pgtable_pte_page_dtor(page);
+ spin_unlock(&mm->page_table_lock);
- /* free non cached virtual address*/
- srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
+ srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
}
/* context handling - a dynamically sized pool is used */
@@ -822,13 +796,13 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
what = 0;
addr = start - PAGE_SIZE;
- if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
- if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
+ if (!(start & ~(PMD_MASK))) {
+ if (srmmu_probe(addr + PMD_SIZE) == probed)
what = 1;
}
- if (!(start & ~(SRMMU_PGDIR_MASK))) {
- if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
+ if (!(start & ~(PGDIR_MASK))) {
+ if (srmmu_probe(addr + PGDIR_SIZE) == probed)
what = 2;
}
@@ -837,7 +811,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
pudp = pud_offset(p4dp, start);
if (what == 2) {
*(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
- start += SRMMU_PGDIR_SIZE;
+ start += PGDIR_SIZE;
continue;
}
if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
@@ -849,6 +823,11 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
pud_set(__nocache_fix(pudp), pmdp);
}
pmdp = pmd_offset(__nocache_fix(pgdp), start);
+ if (what == 1) {
+ *(pmd_t *)__nocache_fix(pmdp) = __pmd(probed);
+ start += PMD_SIZE;
+ continue;
+ }
if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
@@ -856,19 +835,6 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
memset(__nocache_fix(ptep), 0, PTE_SIZE);
pmd_set(__nocache_fix(pmdp), ptep);
}
- if (what == 1) {
- /* We bend the rule where all 16 PTPs in a pmd_t point
- * inside the same PTE page, and we leak a perfectly
- * good hardware PTE piece. Alternatives seem worse.
- */
- unsigned int x; /* Index of HW PMD in soft cluster */
- unsigned long *val;
- x = (start >> PMD_SHIFT) & 15;
- val = &pmdp->pmdv[x];
- *(unsigned long *)__nocache_fix(val) = probed;
- start += SRMMU_REAL_PMD_SIZE;
- continue;
- }
ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
*(pte_t *)__nocache_fix(ptep) = __pte(probed);
start += PAGE_SIZE;
@@ -890,9 +856,9 @@ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base
/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
{
- unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
- unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
- unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
+ unsigned long pstart = (sp_banks[sp_entry].base_addr & PGDIR_MASK);
+ unsigned long vstart = (vbase & PGDIR_MASK);
+ unsigned long vend = PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
/* Map "low" memory only */
const unsigned long min_vaddr = PAGE_OFFSET;
const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
@@ -905,7 +871,7 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
while (vstart < vend) {
do_large_mapping(vstart, pstart);
- vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
+ vstart += PGDIR_SIZE; pstart += PGDIR_SIZE;
}
return vstart;
}
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 3d72d2deb13b..a32a16c18617 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -10,7 +10,6 @@
#include <linux/swap.h>
#include <linux/preempt.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index f5edc28aa3a5..0dce4b7ff73e 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -8,9 +8,9 @@
#include <linux/preempt.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/tsb.h>
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index d220b6848746..70e658d107e0 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -5,8 +5,8 @@
* Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
*/
+#include <linux/pgtable.h>
#include <asm/asi.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/spitfire.h>
#include <asm/mmu_context.h>
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
index adaef6e7b8cf..48f062de7a7f 100644
--- a/arch/sparc/mm/viking.S
+++ b/arch/sparc/mm/viking.S
@@ -13,6 +13,7 @@
#include <asm/asi.h>
#include <asm/mxcc.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h>
#include <asm/viking.h>
@@ -157,7 +158,7 @@ viking_flush_tlb_range:
cmp %o3, -1
be 2f
#endif
- sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1
add %o1, 0x200, %o1
@@ -243,7 +244,7 @@ sun4dsmp_flush_tlb_range:
ld [%o0 + VMA_VM_MM], %o0
ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5
- sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1
add %o1, 0x200, %o1
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
index 9961b0f81693..cc19e09b0fa1 100644
--- a/arch/sparc/vdso/vma.c
+++ b/arch/sparc/vdso/vma.c
@@ -366,7 +366,7 @@ static int map_vdso(const struct vdso_image *image,
unsigned long text_start, addr = 0;
int ret = 0;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
/*
* First, get an unmapped region: then randomize it, and make sure that
@@ -422,7 +422,7 @@ up_fail:
if (ret)
current->mm->context.vdso = NULL;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 275f5ffdf6f0..3f27aa3ec0a6 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -140,7 +140,7 @@ export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
# When cleaning we don't include .config, so we don't include
# TT or skas makefiles and don't clean skas_ptregs.h.
CLEAN_FILES += linux x.i gmon.out
-MRPROPER_DIRS += arch/$(SUBARCH)/include/generated
+MRPROPER_FILES += arch/$(SUBARCH)/include/generated
archclean:
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index a290821e355c..2a249f619467 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -18,9 +18,9 @@ ubd-objs := ubd_kern.o ubd_user.o
port-objs := port_kern.o port_user.o
harddog-objs := harddog_kern.o harddog_user.o
-LDFLAGS_pcap.o := -r $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a)
+LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a)
-LDFLAGS_vde.o := -r $(shell $(CC) $(CFLAGS) -print-file-name=libvdeplug.a)
+LDFLAGS_vde.o = $(shell $(CC) $(CFLAGS) -print-file-name=libvdeplug.a)
targets := pcap_kern.o pcap_user.o vde_kern.o vde_user.o
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 30575bd92975..a2e680f7d39f 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -648,7 +648,7 @@ static void stack_proc(void *arg)
{
struct task_struct *task = arg;
- show_stack(task, NULL);
+ show_stack(task, NULL, KERN_INFO);
}
/*
diff --git a/arch/um/drivers/vector_kern.h b/arch/um/drivers/vector_kern.h
index d0159082faf0..8fff93a75a92 100644
--- a/arch/um/drivers/vector_kern.h
+++ b/arch/um/drivers/vector_kern.h
@@ -129,7 +129,7 @@ struct vector_private {
struct vector_estats estats;
struct sock_fprog *bpf;
- char user[0];
+ char user[];
};
extern int build_transport_data(struct vector_private *vp);
diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c
index aa28e9eecb7b..c4a0f26b2824 100644
--- a/arch/um/drivers/vector_user.c
+++ b/arch/um/drivers/vector_user.c
@@ -29,6 +29,7 @@
#include <netdb.h>
#include <stdlib.h>
#include <os.h>
+#include <limits.h>
#include <um_malloc.h>
#include "vector_user.h"
@@ -42,6 +43,9 @@
#define TRANS_RAW "raw"
#define TRANS_RAW_LEN strlen(TRANS_RAW)
+#define TRANS_FD "fd"
+#define TRANS_FD_LEN strlen(TRANS_FD)
+
#define VNET_HDR_FAIL "could not enable vnet headers on fd %d"
#define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s"
#define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i"
@@ -347,6 +351,59 @@ unix_cleanup:
return NULL;
}
+static int strtofd(const char *nptr)
+{
+ long fd;
+ char *endptr;
+
+ if (nptr == NULL)
+ return -1;
+
+ errno = 0;
+ fd = strtol(nptr, &endptr, 10);
+ if (nptr == endptr ||
+ errno != 0 ||
+ *endptr != '\0' ||
+ fd < 0 ||
+ fd > INT_MAX) {
+ return -1;
+ }
+ return fd;
+}
+
+static struct vector_fds *user_init_fd_fds(struct arglist *ifspec)
+{
+ int fd = -1;
+ char *fdarg = NULL;
+ struct vector_fds *result = NULL;
+
+ fdarg = uml_vector_fetch_arg(ifspec, "fd");
+ fd = strtofd(fdarg);
+ if (fd == -1) {
+ printk(UM_KERN_ERR "fd open: bad or missing fd argument");
+ goto fd_cleanup;
+ }
+
+ result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
+ if (result == NULL) {
+ printk(UM_KERN_ERR "fd open: allocation failed");
+ goto fd_cleanup;
+ }
+
+ result->rx_fd = fd;
+ result->tx_fd = fd;
+ result->remote_addr_size = 0;
+ result->remote_addr = NULL;
+ return result;
+
+fd_cleanup:
+ if (fd >= 0)
+ os_close_file(fd);
+ if (result != NULL)
+ kfree(result);
+ return NULL;
+}
+
static struct vector_fds *user_init_raw_fds(struct arglist *ifspec)
{
int rxfd = -1, txfd = -1;
@@ -578,6 +635,8 @@ struct vector_fds *uml_vector_user_open(
return user_init_socket_fds(parsed, ID_L2TPV3);
if (strncmp(transport, TRANS_BESS, TRANS_BESS_LEN) == 0)
return user_init_unix_fds(parsed, ID_BESS);
+ if (strncmp(transport, TRANS_FD, TRANS_FD_LEN) == 0)
+ return user_init_fd_fds(parsed);
return NULL;
}
diff --git a/arch/um/drivers/vhost_user.h b/arch/um/drivers/vhost_user.h
index 6c71b6005177..6f147cd3c9f7 100644
--- a/arch/um/drivers/vhost_user.h
+++ b/arch/um/drivers/vhost_user.h
@@ -78,7 +78,7 @@ struct vhost_user_config {
u32 offset;
u32 size;
u32 flags;
- u8 payload[0]; /* Variable length */
+ u8 payload[]; /* Variable length */
} __packed;
struct vhost_user_vring_state {
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index be54d368e73d..351aee52aca6 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -74,7 +74,7 @@ struct virtio_uml_vq_info {
extern unsigned long long physmem_size, highmem;
-#define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, __VA_ARGS__)
+#define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
/* Vhost-user protocol */
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index b4deb1bfbb68..17ddd4edf875 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <asm/mmu.h>
@@ -47,9 +48,9 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
* when the new ->mm is used for the first time.
*/
__switch_mm(&new->context.id);
- down_write_nested(&new->mmap_sem, 1);
+ mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
uml_setup_stubs(new);
- up_write(&new->mmap_sem);
+ mmap_write_unlock(new);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
index 8a3b689e0f86..36f452957cef 100644
--- a/arch/um/include/asm/pgtable-3level.h
+++ b/arch/um/include/asm/pgtable-3level.h
@@ -89,10 +89,6 @@ static inline void pud_clear (pud_t *pud)
#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
- pmd_index(address))
-
static inline unsigned long pte_pfn(pte_t pte)
{
return phys_to_pfn(pte_val(pte));
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index b5ddf5d98bd5..def376194dce 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
+/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright 2003 PathScale, Inc.
* Derived from include/asm-i386/pgtable.h
@@ -131,7 +131,7 @@ static inline int pte_none(pte_t pte)
* Undefined behaviour if not..
*/
static inline int pte_read(pte_t pte)
-{
+{
return((pte_get_bits(pte, _PAGE_USER)) &&
!(pte_get_bits(pte, _PAGE_PROTNONE)));
}
@@ -163,7 +163,7 @@ static inline int pte_newpage(pte_t pte)
}
static inline int pte_newprot(pte_t pte)
-{
+{
return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
}
@@ -185,31 +185,31 @@ static inline pte_t pte_mkclean(pte_t pte)
return(pte);
}
-static inline pte_t pte_mkold(pte_t pte)
-{
+static inline pte_t pte_mkold(pte_t pte)
+{
pte_clear_bits(pte, _PAGE_ACCESSED);
return(pte);
}
static inline pte_t pte_wrprotect(pte_t pte)
-{
+{
if (likely(pte_get_bits(pte, _PAGE_RW)))
pte_clear_bits(pte, _PAGE_RW);
else
return pte;
- return(pte_mknewprot(pte));
+ return(pte_mknewprot(pte));
}
static inline pte_t pte_mkread(pte_t pte)
-{
+{
if (unlikely(pte_get_bits(pte, _PAGE_USER)))
return pte;
pte_set_bits(pte, _PAGE_USER);
- return(pte_mknewprot(pte));
+ return(pte_mknewprot(pte));
}
static inline pte_t pte_mkdirty(pte_t pte)
-{
+{
pte_set_bits(pte, _PAGE_DIRTY);
return(pte);
}
@@ -220,20 +220,20 @@ static inline pte_t pte_mkyoung(pte_t pte)
return(pte);
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte)
{
if (unlikely(pte_get_bits(pte, _PAGE_RW)))
return pte;
pte_set_bits(pte, _PAGE_RW);
- return(pte_mknewprot(pte));
+ return(pte_mknewprot(pte));
}
-static inline pte_t pte_mkuptodate(pte_t pte)
+static inline pte_t pte_mkuptodate(pte_t pte)
{
pte_clear_bits(pte, _PAGE_NEWPAGE);
if(pte_present(pte))
pte_clear_bits(pte, _PAGE_NEWPROT);
- return(pte);
+ return(pte);
}
static inline pte_t pte_mknewpage(pte_t pte)
@@ -288,53 +288,16 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
- return pte;
+ return pte;
}
/*
- * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
- *
- * this macro returns the index of the entry in the pgd page which would
- * control the given virtual address
- */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-
-/*
- * pgd_offset() returns a (pgd_t *)
- * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
- */
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/*
- * a shortcut which implies the use of the kernel's pgd, instead
- * of a process's
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
*
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-#define pmd_page_vaddr(pmd) \
- ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this macro returns the index of the entry in the pte page which would
- * control the given virtual address
- */
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_unmap(pte) do { } while (0)
struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
@@ -353,8 +316,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
/* Clear a kernel PTE and flush it from the TLB */
#define kpte_clear_flush(ptep, vaddr) \
do { \
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 70ee60383900..ff9c62828962 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -2,6 +2,8 @@
#ifndef __UM_TLB_H
#define __UM_TLB_H
+#include <linux/mm.h>
+
#include <asm/tlbflush.h>
#include <asm-generic/cacheflush.h>
#include <asm-generic/tlb.h>
diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
index 67b2e0fa92bb..e929c0966696 100644
--- a/arch/um/kernel/maccess.c
+++ b/arch/um/kernel/maccess.c
@@ -7,15 +7,13 @@
#include <linux/kernel.h>
#include <os.h>
-long probe_kernel_read(void *dst, const void *src, size_t size)
+bool probe_kernel_read_allowed(const void *src, size_t size)
{
void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
if ((unsigned long)src < PAGE_SIZE || size <= 0)
- return -EFAULT;
-
+ return false;
if (os_mincore(psrc, size + src - psrc) <= 0)
- return -EFAULT;
-
- return __probe_kernel_read(dst, src, size);
+ return false;
+ return true;
}
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 401b22f14743..c2ff76c8981e 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -125,10 +125,6 @@ static void __init fixaddr_user_init( void)
{
#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
long size = FIXADDR_USER_END - FIXADDR_USER_START;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
pte_t *pte;
phys_t p;
unsigned long v, vaddr = FIXADDR_USER_START;
@@ -146,11 +142,7 @@ static void __init fixaddr_user_init( void)
p = __pa(v);
for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
p += PAGE_SIZE) {
- pgd = swapper_pg_dir + pgd_index(vaddr);
- p4d = p4d_offset(pgd, vaddr);
- pud = pud_offset(p4d, vaddr);
- pmd = pmd_offset(pud, vaddr);
- pte = pte_offset_kernel(pmd, vaddr);
+ pte = virt_to_kpte(vaddr);
pte_set_val(*pte, p, PAGE_READONLY);
}
#endif
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index cbe33af2a880..e3a2cf92a373 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -25,7 +25,6 @@
#include <linux/threads.h>
#include <linux/tracehook.h>
#include <asm/current.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <linux/uaccess.h>
#include <as-layout.h>
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 3f0d9a573fd6..d9961163da66 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -9,7 +9,6 @@
#include <linux/slab.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <as-layout.h>
#include <os.h>
@@ -115,7 +114,7 @@ void uml_setup_stubs(struct mm_struct *mm)
mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start);
mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
- /* dup_mmap already holds mmap_sem */
+ /* dup_mmap already holds mmap_lock */
err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
VM_READ | VM_MAYREAD | VM_EXEC |
VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index d617f8dc9c19..2dec915abe6f 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -10,7 +10,6 @@
#include <linux/sched.h>
#include <asm/current.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <kern_util.h>
#include <os.h>
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index c71b5ef7ea8c..acbc879d2773 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -17,7 +17,9 @@
static void _print_addr(void *data, unsigned long address, int reliable)
{
- pr_info(" [<%08lx>] %s%pS\n", address, reliable ? "" : "? ",
+ const char *loglvl = data;
+
+ printk("%s [<%08lx>] %s%pS\n", loglvl, address, reliable ? "" : "? ",
(void *)address);
}
@@ -25,9 +27,9 @@ static const struct stacktrace_ops stackops = {
.address = _print_addr
};
-void show_stack(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *stack,
+ const char *loglvl)
{
- unsigned long *sp = stack;
struct pt_regs *segv_regs = current->thread.segv_regs;
int i;
@@ -38,20 +40,19 @@ void show_stack(struct task_struct *task, unsigned long *stack)
}
if (!stack)
- sp = get_stack_pointer(task, segv_regs);
+ stack = get_stack_pointer(task, segv_regs);
- pr_info("Stack:\n");
- stack = sp;
+ printk("%sStack:\n", loglvl);
for (i = 0; i < 3 * STACKSLOTS_PER_LINE; i++) {
if (kstack_end(stack))
break;
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- pr_cont("\n");
+ printk("%s\n", loglvl);
pr_cont(" %08lx", *stack++);
}
- pr_cont("\n");
+ printk("%s\n", loglvl);
- pr_info("Call Trace:\n");
- dump_trace(current, &stackops, NULL);
- pr_info("\n");
+ printk("%sCall Trace:\n", loglvl);
+ dump_trace(current, &stackops, (void *)loglvl);
+ printk("%s\n", loglvl);
}
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 80a358c6d652..61776790cd67 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -7,7 +7,6 @@
#include <linux/module.h>
#include <linux/sched/signal.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <as-layout.h>
#include <mem_user.h>
@@ -349,8 +348,8 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
if (ret) {
printk(KERN_ERR "fix_range_common: failed, killing current "
"process: %d\n", task_tgid_vnr(current));
- /* We are under mmap_sem, release it such that current can terminate */
- up_write(&current->mm->mmap_sem);
+ /* We are under mmap_lock, release it such that current can terminate */
+ mmap_write_unlock(current->mm);
force_sig(SIGKILL);
do_signal(&current->thread.regs);
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 8f18cf56b3dd..2b3afa354a90 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -10,7 +10,6 @@
#include <linux/uaccess.h>
#include <linux/sched/debug.h>
#include <asm/current.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <arch.h>
#include <as-layout.h>
@@ -27,9 +26,6 @@ int handle_page_fault(unsigned long address, unsigned long ip,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int err = -EFAULT;
@@ -47,7 +43,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
if (is_user)
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto out;
@@ -103,10 +99,7 @@ good_area:
}
}
- pgd = pgd_offset(mm, address);
- p4d = p4d_offset(pgd, address);
- pud = pud_offset(p4d, address);
- pmd = pmd_offset(pud, address);
+ pmd = pmd_off(mm, address);
pte = pte_offset_kernel(pmd, address);
} while (!pte_present(*pte));
err = 0;
@@ -123,7 +116,7 @@ good_area:
#endif
flush_tlb_page(vma, address);
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_nosemaphore:
return err;
@@ -132,7 +125,7 @@ out_of_memory:
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!is_user)
goto out_nosemaphore;
pagefault_out_of_memory();
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 0f40eccbd759..00141e70de56 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -14,7 +14,6 @@
#include <linux/sched/task.h>
#include <linux/kmsg_dump.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -362,3 +361,19 @@ void __init check_bugs(void)
void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
}
+
+void *text_poke(void *addr, const void *opcode, size_t len)
+{
+ /*
+ * In UML, the only reference to this function is in
+ * apply_relocate_add(), which shouldn't ever actually call this
+ * because UML doesn't have live patching.
+ */
+ WARN_ON(1);
+
+ return memcpy(addr, opcode, len);
+}
+
+void text_poke_sync(void)
+{
+}
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
index 26ecbd64c409..e4421dbc4c36 100644
--- a/arch/um/os-Linux/file.c
+++ b/arch/um/os-Linux/file.c
@@ -6,6 +6,7 @@
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
+#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
@@ -289,7 +290,7 @@ int os_write_file(int fd, const void *buf, int len)
int os_sync_file(int fd)
{
- int n = fsync(fd);
+ int n = fdatasync(fd);
if (n < 0)
return -errno;
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 41fe944005f8..11ba1839d198 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -70,7 +70,7 @@ config ARCH_PUV3
def_bool y
select CPU_UCV2
select GENERIC_CLOCKEVENTS
- select HAVE_CLK
+ select HAVE_LEGACY_CLK
select GPIOLIB
# CONFIGs for ARCH_PUV3
diff --git a/arch/unicore32/include/asm/cacheflush.h b/arch/unicore32/include/asm/cacheflush.h
index dc8c0b41538f..ff0be92ebc32 100644
--- a/arch/unicore32/include/asm/cacheflush.h
+++ b/arch/unicore32/include/asm/cacheflush.h
@@ -133,14 +133,6 @@ extern void flush_cache_page(struct vm_area_struct *vma,
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
/*
- * flush_cache_user_range is used when we want to ensure that the
- * Harvard caches are synchronised for the user space address range.
- * This is used for the UniCore private sys_cacheflush system call.
- */
-#define flush_cache_user_range(vma, start, end) \
- __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
-
-/*
* Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU.
*/
@@ -170,9 +162,6 @@ extern void flush_dcache_page(struct page *);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_user_range(vma, page, addr, len) \
- flush_dcache_page(page)
-
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
index 3b8731b3a937..97f564c8ecba 100644
--- a/arch/unicore32/include/asm/pgtable.h
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -9,7 +9,6 @@
#ifndef __UNICORE_PGTABLE_H__
#define __UNICORE_PGTABLE_H__
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/cpu-single.h>
@@ -154,12 +153,6 @@ extern struct page *empty_zero_page;
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(mm, addr, ptep) set_pte(ptep, __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-#define pte_offset_kernel(dir, addr) (pmd_page_vaddr(*(dir)) \
- + __pte_index(addr))
-
-#define pte_offset_map(dir, addr) (pmd_page_vaddr(*(dir)) \
- + __pte_index(addr))
-#define pte_unmap(pte) do { } while (0)
#define set_pte(ptep, pte) cpu_set_pte(ptep, pte)
@@ -222,17 +215,6 @@ PTE_BIT_FUNC(mkyoung, |= PTE_YOUNG);
*/
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-/* to find an entry in a page-table-directory */
-#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
-
-#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-
-/* Find an entry in the third-level page table.. */
-#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const unsigned long mask = PTE_EXEC | PTE_WRITE | PTE_READ;
@@ -280,8 +262,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* FIXME: this is not correct */
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
#endif /* !__ASSEMBLY__ */
#endif /* __UNICORE_PGTABLE_H__ */
diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c
index f3812245cc00..4cdf3c846a2d 100644
--- a/arch/unicore32/kernel/hibernate.c
+++ b/arch/unicore32/kernel/hibernate.c
@@ -11,9 +11,9 @@
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/suspend.h>
@@ -33,9 +33,11 @@ struct swsusp_arch_regs swsusp_arch_regs_cpu0;
static pmd_t *resume_one_md_table_init(pgd_t *pgd)
{
pud_t *pud;
+ p4d_t *p4d;
pmd_t *pmd_table;
- pud = pud_offset(pgd, 0);
+ p4d = p4d_offset(pgd, 0);
+ pud = pud_offset(p4d, 0);
pmd_table = pmd_offset(pud, 0);
return pmd_table;
diff --git a/arch/unicore32/kernel/hibernate_asm.S b/arch/unicore32/kernel/hibernate_asm.S
index 7e7499c49089..a589bc189e24 100644
--- a/arch/unicore32/kernel/hibernate_asm.S
+++ b/arch/unicore32/kernel/hibernate_asm.S
@@ -11,9 +11,9 @@
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <generated/asm-offsets.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/assembler.h>
@ restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist)
diff --git a/arch/unicore32/kernel/module.c b/arch/unicore32/kernel/module.c
index 717ee1b78350..67c89ef2d6ee 100644
--- a/arch/unicore32/kernel/module.c
+++ b/arch/unicore32/kernel/module.c
@@ -16,7 +16,6 @@
#include <linux/string.h>
#include <linux/gfp.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
void *module_alloc(unsigned long size)
diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h
index e40d3603c7e7..967352323185 100644
--- a/arch/unicore32/kernel/setup.h
+++ b/arch/unicore32/kernel/setup.h
@@ -29,7 +29,7 @@ extern void kernel_thread_helper(void);
extern void __init early_signal_init(void);
extern asmlinkage void __backtrace(void);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+extern asmlinkage void c_backtrace(unsigned long fp, const char *loglvl);
extern void __show_regs(struct pt_regs *);
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
index e24f67283864..a3ac01df1a2e 100644
--- a/arch/unicore32/kernel/traps.c
+++ b/arch/unicore32/kernel/traps.c
@@ -135,44 +135,42 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
set_fs(fs);
}
-static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
- unsigned int fp, mode;
+ unsigned int fp;
int ok = 1;
- printk(KERN_DEFAULT "Backtrace: ");
+ printk("%sBacktrace: ", loglvl);
if (!tsk)
tsk = current;
- if (regs) {
+ if (regs)
fp = regs->UCreg_fp;
- mode = processor_mode(regs);
- } else if (tsk != current) {
+ else if (tsk != current)
fp = thread_saved_fp(tsk);
- mode = 0x10;
- } else {
+ else
asm("mov %0, fp" : "=r" (fp) : : "cc");
- mode = 0x10;
- }
if (!fp) {
- printk("no frame pointer");
+ printk("%sno frame pointer", loglvl);
ok = 0;
} else if (verify_stack(fp)) {
- printk("invalid frame pointer 0x%08x", fp);
+ printk("%sinvalid frame pointer 0x%08x", loglvl, fp);
ok = 0;
} else if (fp < (unsigned long)end_of_stack(tsk))
- printk("frame pointer underflow");
- printk("\n");
+ printk("%sframe pointer underflow", loglvl);
+ printk("%s\n", loglvl);
if (ok)
- c_backtrace(fp, mode);
+ c_backtrace(fp, loglvl);
}
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp,
+ const char *loglvl)
{
- dump_backtrace(NULL, tsk);
+ dump_backtrace(NULL, tsk, loglvl);
barrier();
}
@@ -200,7 +198,7 @@ static int __die(const char *str, int err, struct thread_info *thread,
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->UCreg_sp,
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
- dump_backtrace(regs, tsk);
+ dump_backtrace(regs, tsk, KERN_EMERG);
dump_instr(KERN_EMERG, regs);
}
diff --git a/arch/unicore32/lib/Makefile b/arch/unicore32/lib/Makefile
index 098981a01841..5af06645b8f0 100644
--- a/arch/unicore32/lib/Makefile
+++ b/arch/unicore32/lib/Makefile
@@ -10,12 +10,12 @@ lib-y += strncpy_from_user.o strnlen_user.o
lib-y += clear_user.o copy_page.o
lib-y += copy_from_user.o copy_to_user.o
-GNU_LIBC_A := $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libc.a)
+GNU_LIBC_A = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libc.a)
GNU_LIBC_A_OBJS := memchr.o memcpy.o memmove.o memset.o
GNU_LIBC_A_OBJS += strchr.o strrchr.o
GNU_LIBC_A_OBJS += rawmemchr.o # needed by strrchr.o
-GNU_LIBGCC_A := $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libgcc.a)
+GNU_LIBGCC_A = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libgcc.a)
GNU_LIBGCC_A_OBJS := _ashldi3.o _ashrdi3.o _lshrdi3.o
GNU_LIBGCC_A_OBJS += _divsi3.o _modsi3.o _ucmpdi2.o _umodsi3.o _udivsi3.o
diff --git a/arch/unicore32/lib/backtrace.S b/arch/unicore32/lib/backtrace.S
index f303671e2a4e..6221944b81f3 100644
--- a/arch/unicore32/lib/backtrace.S
+++ b/arch/unicore32/lib/backtrace.S
@@ -16,6 +16,7 @@
#define sv_fp v5
#define sv_pc v6
#define offset v8
+#define loglvl v9
ENTRY(__backtrace)
mov r0, fp
@@ -27,10 +28,11 @@ ENTRY(c_backtrace)
ENDPROC(__backtrace)
ENDPROC(c_backtrace)
#else
- stm.w (v4 - v8, lr), [sp-] @ Save an extra register
+ stm.w (v4 - v10, lr), [sp-] @ Save an extra register
@ so we have a location...
mov.a frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
+ mov loglvl, r1
1: stm.w (pc), [sp-] @ calculate offset of PC stored
ldw.w r0, [sp]+, #4 @ by stmfd for this CPU
@@ -95,9 +97,10 @@ for_each_frame:
bua for_each_frame
1006: adr r0, .Lbad
- mov r1, frame
+ mov r1, loglvl
+ mov r2, frame
b.l printk
-no_frame: ldm.w (v4 - v8, pc), [sp]+
+no_frame: ldm.w (v4 - v10, pc), [sp]+
ENDPROC(__backtrace)
ENDPROC(c_backtrace)
@@ -128,8 +131,11 @@ ENDPROC(c_backtrace)
add v7, v7, #1
cxor.a v7, #6
cmoveq v7, #1
- cmoveq r1, #'\n'
- cmovne r1, #' '
+ bne 201f
+ adr r0, .Lcr
+ mov r1, loglvl
+ b.l printk
+201:
ldw.w r3, [stack]+, #-4
mov r2, reg
csub.a r2, #8
@@ -141,18 +147,20 @@ ENDPROC(c_backtrace)
add r2, r2, #0x10 @ so r2 need add 16
201:
adr r0, .Lfp
+ mov r1, loglvl
b.l printk
2: sub.a reg, reg, #1
bns 1b
cxor.a v7, #0
beq 201f
adr r0, .Lcr
+ mov r1, loglvl
b.l printk
201: ldm.w (instr, reg, stack, v7, pc), [sp]+
-.Lfp: .asciz "%cr%d:%08x"
-.Lcr: .asciz "\n"
-.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
+.Lfp: .asciz "%sr%d:%08x "
+.Lcr: .asciz "%s\n"
+.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
.align
.Ldsi: .word 0x92eec000 >> 14 @ stm.w sp, (... fp, ip, lr, pc)
.word 0x92e10000 >> 14 @ stm.w sp, ()
diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c
index a07ae5cc58e5..2ea98f7a4156 100644
--- a/arch/unicore32/mm/alignment.c
+++ b/arch/unicore32/mm/alignment.c
@@ -18,8 +18,8 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/unaligned.h>
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
index 3022104aa613..7654bddde133 100644
--- a/arch/unicore32/mm/fault.c
+++ b/arch/unicore32/mm/fault.c
@@ -17,7 +17,6 @@
#include <linux/sched/signal.h>
#include <linux/io.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
@@ -224,12 +223,12 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
if (!user_mode(regs)
&& !search_exception_tables(regs->UCreg_pc))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -247,7 +246,7 @@ retry:
fault = __do_pf(mm, addr, fsr, flags, tsk);
/* If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because
+ * signal first. We do not need to release the mmap_lock because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
if (fault_signal_pending(fault, regs))
@@ -264,7 +263,7 @@ retry:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR
diff --git a/arch/unicore32/mm/mm.h b/arch/unicore32/mm/mm.h
index 27127abc95fb..f157f5d249ab 100644
--- a/arch/unicore32/mm/mm.h
+++ b/arch/unicore32/mm/mm.h
@@ -14,16 +14,6 @@ extern int sysctl_overcommit_memory;
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
-static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
-{
- return pmd_offset((pud_t *)pgd, virt);
-}
-
-static inline pmd_t *pmd_off_k(unsigned long virt)
-{
- return pmd_off(pgd_offset_k(virt), virt);
-}
-
struct mem_type {
unsigned int prot_pte;
unsigned int prot_l1;
diff --git a/arch/unicore32/mm/proc-ucv2.S b/arch/unicore32/mm/proc-ucv2.S
index 8cc9a1b16d60..18f8c4fb21a0 100644
--- a/arch/unicore32/mm/proc-ucv2.S
+++ b/arch/unicore32/mm/proc-ucv2.S
@@ -8,10 +8,10 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include "proc-macros.S"
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 67f6a40b5e93..d41812aba393 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -59,6 +59,7 @@ config X86
select ARCH_CLOCKSOURCE_INIT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEBUG_VIRTUAL
+ select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_EARLY_DEBUG if KGDB
select ARCH_HAS_ELF_RANDOMIZE
@@ -232,6 +233,7 @@ config X86
select THREAD_INFO_IN_TASK
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
+ select HAVE_ARCH_KCSAN if X86_64
select X86_FEATURE_NAMES if PROC_FS
select PROC_PID_ARCH_STATUS if PROC_FS
imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI
@@ -1515,6 +1517,7 @@ config X86_CPA_STATISTICS
config AMD_MEM_ENCRYPT
bool "AMD Secure Memory Encryption (SME) support"
depends on X86_64 && CPU_SUP_AMD
+ select DMA_COHERENT_POOL
select DYNAMIC_PHYSICAL_MASK
select ARCH_USE_MEMREMAP_PROT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 4c5355684321..fe605205b4ce 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -9,7 +9,9 @@
# Changed by many, many contributors over the years.
#
+# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Kernel does not boot with kcov instrumentation here.
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 5f7c262bcc99..7619742f91c9 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -17,7 +17,9 @@
# (see scripts/Makefile.lib size_append)
# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all
+# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c
index 9557c5a15b91..f9c5c13d979b 100644
--- a/arch/x86/boot/compressed/kaslr_64.c
+++ b/arch/x86/boot/compressed/kaslr_64.c
@@ -22,8 +22,8 @@
#include "misc.h"
/* These actually do the work of building the kernel identity maps. */
+#include <linux/pgtable.h>
#include <asm/init.h>
-#include <asm/pgtable.h>
/* Use the static base for this part of the boot process */
#undef __PAGE_OFFSET
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 54e03ab26ff3..04e65f0698f6 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -10,8 +10,11 @@ ARCH_REL_TYPE_ABS += R_386_GLOB_DAT|R_386_JMP_SLOT|R_386_RELATIVE
include $(srctree)/lib/vdso/Makefile
KBUILD_CFLAGS += $(DISABLE_LTO)
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
@@ -29,6 +32,9 @@ vobjs32-y += vdso32/vclock_gettime.o
# files to link into kernel
obj-y += vma.o
+KASAN_SANITIZE_vma.o := y
+UBSAN_SANITIZE_vma.o := y
+KCSAN_SANITIZE_vma.o := y
OBJECT_FILES_NON_STANDARD_vma.o := n
# vDSO images to build
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 43428cc514c8..ea7c1f0b79df 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -144,7 +144,7 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -154,7 +154,7 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
zap_page_range(vma, vma->vm_start, size);
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return 0;
}
#else
@@ -268,7 +268,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
unsigned long text_start;
int ret = 0;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
addr = get_unmapped_area(NULL, addr,
@@ -311,7 +311,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
}
up_fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
@@ -373,7 +373,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
/*
* Check if we have already mapped vdso blob - fail to prevent
* abusing from userspace install_speciall_mapping, which may
@@ -384,11 +384,11 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma_is_special_mapping(vma, &vdso_mapping) ||
vma_is_special_mapping(vma, &vvar_mapping)) {
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return -EEXIST;
}
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return map_vdso(image, addr);
}
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 9e63ee50b19a..4103665c6e03 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2166,11 +2166,6 @@ static int x86_pmu_event_init(struct perf_event *event)
return err;
}
-static void refresh_pce(void *ignored)
-{
- load_mm_cr4_irqsoff(this_cpu_read(cpu_tlbstate.loaded_mm));
-}
-
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
@@ -2183,13 +2178,13 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
* userspace with CR4.PCE clear while another task is still
* doing on_each_cpu_mask() to propagate CR4.PCE.
*
- * For now, this can't happen because all callers hold mmap_sem
+ * For now, this can't happen because all callers hold mmap_lock
* for write. If this changes, we'll need a different solution.
*/
- lockdep_assert_held_write(&mm->mmap_sem);
+ mmap_assert_write_locked(mm);
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
- on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
+ on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
}
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
@@ -2199,7 +2194,7 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m
return;
if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
- on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
+ on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
}
static int x86_pmu_event_idx(struct perf_event *event)
@@ -2257,7 +2252,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
else if (x86_pmu.attr_rdpmc == 2)
static_branch_dec(&rdpmc_always_available_key);
- on_each_cpu(refresh_pce, NULL, 1);
+ on_each_cpu(cr4_update_pce, NULL, 1);
x86_pmu.attr_rdpmc = val;
}
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 9bb71abd66bd..385d3d172ee1 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -131,7 +131,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
return -ENOMEM;
/* Flush all traces of the currently running executable */
- retval = flush_old_exec(bprm);
+ retval = begin_new_exec(bprm);
if (retval)
return retval;
@@ -156,8 +156,6 @@ static int load_aout_binary(struct linux_binprm *bprm)
if (retval < 0)
return retval;
- install_exec_creds(bprm);
-
if (N_MAGIC(ex) == OMAGIC) {
unsigned long text_addr, map_size;
diff --git a/arch/x86/include/asm/agp.h b/arch/x86/include/asm/agp.h
index 8e25bf4f323a..62da760d6d5a 100644
--- a/arch/x86/include/asm/agp.h
+++ b/arch/x86/include/asm/agp.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_AGP_H
#define _ASM_X86_AGP_H
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
/*
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 9bf2620ce817..5a42f9206138 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/ftrace.h>
#include <linux/uaccess.h>
+#include <linux/pgtable.h>
#include <asm/string.h>
#include <asm/page.h>
#include <asm/checksum.h>
#include <asm-generic/asm-prototypes.h>
-#include <asm/pgtable.h>
#include <asm/special_insns.h>
#include <asm/preempt.h>
#include <asm/asm.h>
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 115127c7ad28..a9ae58826074 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -28,7 +28,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
* Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
* it's non-inlined function that increases binary size and stack usage.
*/
- return READ_ONCE((v)->counter);
+ return __READ_ONCE((v)->counter);
}
/**
@@ -40,7 +40,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
*/
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
- WRITE_ONCE(v->counter, i);
+ __WRITE_ONCE(v->counter, i);
}
/**
@@ -166,6 +166,7 @@ static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
+#define arch_atomic_add_return arch_atomic_add_return
/**
* arch_atomic_sub_return - subtract integer and return
@@ -178,32 +179,37 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
{
return arch_atomic_add_return(-i, v);
}
+#define arch_atomic_sub_return arch_atomic_sub_return
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
{
return xadd(&v->counter, -i);
}
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return arch_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return try_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static inline int arch_atomic_xchg(atomic_t *v, int new)
{
return arch_xchg(&v->counter, new);
}
+#define arch_atomic_xchg arch_atomic_xchg
static inline void arch_atomic_and(int i, atomic_t *v)
{
@@ -221,6 +227,7 @@ static inline int arch_atomic_fetch_and(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
static inline void arch_atomic_or(int i, atomic_t *v)
{
@@ -238,6 +245,7 @@ static inline int arch_atomic_fetch_or(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
static inline void arch_atomic_xor(int i, atomic_t *v)
{
@@ -255,6 +263,7 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
@@ -262,6 +271,6 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
# include <asm/atomic64_64.h>
#endif
-#include <asm-generic/atomic-instrumented.h>
+#define ARCH_ATOMIC
#endif /* _ASM_X86_ATOMIC_H */
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 52cfaecb13f9..5efd01b548d1 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -75,6 +75,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
return arch_cmpxchg64(&v->counter, o, n);
}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
/**
* arch_atomic64_xchg - xchg atomic64 variable
@@ -94,6 +95,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
: "memory");
return o;
}
+#define arch_atomic64_xchg arch_atomic64_xchg
/**
* arch_atomic64_set - set atomic64 variable
@@ -138,6 +140,7 @@ static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
+#define arch_atomic64_add_return arch_atomic64_add_return
/*
* Other variants with different arithmetic operators:
@@ -149,6 +152,7 @@ static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_inc_return(atomic64_t *v)
{
@@ -242,6 +246,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
"S" (v) : "memory");
return (int)a;
}
+#define arch_atomic64_add_unless arch_atomic64_add_unless
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
@@ -281,6 +286,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@@ -299,6 +305,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@@ -317,6 +324,7 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
@@ -327,6 +335,7 @@ static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 95c6ceac66b9..809bd010a751 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -19,7 +19,7 @@
*/
static inline s64 arch_atomic64_read(const atomic64_t *v)
{
- return READ_ONCE((v)->counter);
+ return __READ_ONCE((v)->counter);
}
/**
@@ -31,7 +31,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v)
*/
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
- WRITE_ONCE(v->counter, i);
+ __WRITE_ONCE(v->counter, i);
}
/**
@@ -159,37 +159,43 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
return i + xadd(&v->counter, i);
}
+#define arch_atomic64_add_return arch_atomic64_add_return
static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return(-i, v);
}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return xadd(&v->counter, i);
}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
return xadd(&v->counter, -i);
}
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return arch_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
return try_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
{
return arch_xchg(&v->counter, new);
}
+#define arch_atomic64_xchg arch_atomic64_xchg
static inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
@@ -207,6 +213,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
return val;
}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@@ -224,6 +231,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
return val;
}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@@ -241,5 +249,6 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
return val;
}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 0367efdc5b7a..35460fef39b8 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -201,8 +201,12 @@ arch_test_and_change_bit(long nr, volatile unsigned long *addr)
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
-static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
+static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
+ /*
+ * Because this is a plain access, we need to disable KCSAN here to
+ * avoid double instrumentation via instrumented bitops.
+ */
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 63feaf2a5f93..b192d917a6d0 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_CACHEFLUSH_H
#define _ASM_X86_CACHEFLUSH_H
+#include <linux/mm.h>
+
/* Caches aren't brain-dead on the intel. */
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index db189945e9b0..02dabc9e77b0 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -362,6 +362,7 @@
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
+#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
@@ -407,5 +408,6 @@
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
+#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 89dcc7aa7e2c..e7d2ccfdd507 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -3,13 +3,13 @@
#define _ASM_X86_EFI_H
#include <asm/fpu/api.h>
-#include <asm/pgtable.h>
#include <asm/processor-flags.h>
#include <asm/tlb.h>
#include <asm/nospec-branch.h>
#include <asm/mmu_context.h>
#include <linux/build_bug.h>
#include <linux/kernel.h>
+#include <linux/pgtable.h>
extern unsigned long efi_fw_vendor, efi_config_table;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 69c0f892e310..452beed7892b 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -281,9 +281,29 @@ extern u32 elf_hwcap2;
/*
* An executable for which elf_read_implies_exec() returns TRUE will
* have the READ_IMPLIES_EXEC personality flag set automatically.
+ *
+ * The decision process for determining the results are:
+ *
+ *              CPU: | lacks NX*  | has NX, ia32     | has NX, x86_64 |
+ * ELF:              |            |                  |                |
+ * ---------------------|------------|------------------|----------------|
+ * missing PT_GNU_STACK | exec-all   | exec-all         | exec-none      |
+ * PT_GNU_STACK == RWX  | exec-stack | exec-stack       | exec-stack     |
+ * PT_GNU_STACK == RW   | exec-none  | exec-none        | exec-none      |
+ *
+ * exec-all : all PROT_READ user mappings are executable, except when
+ * backed by files on a noexec-filesystem.
+ * exec-none : only PROT_EXEC user mappings are executable.
+ * exec-stack: only the stack and PROT_EXEC user mappings are executable.
+ *
+ * *this column has no architectural effect: NX markings are ignored by
+ * hardware, but may have behavioral effects when "wants X" collides with
+ * "cannot be X" constraints in memory permission flags, as in
+ * https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com
+ *
*/
#define elf_read_implies_exec(ex, executable_stack) \
- (executable_stack != EXSTACK_DISABLE_X)
+ (mmap_is_ia32() && executable_stack == EXSTACK_DEFAULT)
struct task_struct;
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 28183ee3cc42..b9527a54db99 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -152,7 +152,6 @@ extern void reserve_top_address(unsigned long reserve);
extern int fixmaps_set;
extern pte_t *kmap_pte;
-#define kmap_prot PAGE_KERNEL
extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index a8059930056d..0f420b24e0fc 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -58,15 +58,6 @@ extern unsigned long highstart_pfn, highend_pfn;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
-
-void *kmap(struct page *page);
-void kunmap(struct page *page);
-
-void *kmap_atomic_prot(struct page *page, pgprot_t prot);
-void *kmap_atomic(struct page *page);
-void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 8f1e94f29a16..a338a6deb950 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -89,6 +89,8 @@
#define INTEL_FAM6_COMETLAKE 0xA5
#define INTEL_FAM6_COMETLAKE_L 0xA6
+#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F
+
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index 2a7b3211ee7a..bacf68c4d70e 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -10,7 +10,6 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
void __iomem *
diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h
index db7ba2feb947..0648190467ba 100644
--- a/arch/x86/include/asm/kaslr.h
+++ b/arch/x86/include/asm/kaslr.h
@@ -6,8 +6,10 @@ unsigned long kaslr_get_random_long(const char *purpose);
#ifdef CONFIG_RANDOMIZE_MEMORY
void kernel_randomize_memory(void);
+void init_trampoline_kaslr(void);
#else
static inline void kernel_randomize_memory(void) { }
+static inline void init_trampoline_kaslr(void) {}
#endif /* CONFIG_RANDOMIZE_MEMORY */
#endif
diff --git a/arch/x86/include/asm/memtype.h b/arch/x86/include/asm/memtype.h
index 9c2447b3555d..9ca760e430b9 100644
--- a/arch/x86/include/asm/memtype.h
+++ b/arch/x86/include/asm/memtype.h
@@ -24,4 +24,7 @@ extern void memtype_free_io(resource_size_t start, resource_size_t end);
extern bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn);
+bool x86_has_pat_wp(void);
+enum page_cache_mode pgprot2cachemode(pgprot_t pgprot);
+
#endif /* _ASM_X86_MEMTYPE_H */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index bdeae9291e5c..0a301ad0b02f 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -45,7 +45,7 @@ typedef struct {
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
/*
* One bit per protection key says whether userspace can
- * use it or not. protected by mmap_sem.
+ * use it or not. protected by mmap_lock.
*/
u16 pkey_allocation_map;
s16 execute_only_pkey;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 4e55370e48e8..47562147e70b 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -24,21 +24,9 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
#endif /* !CONFIG_PARAVIRT_XXL */
#ifdef CONFIG_PERF_EVENTS
-
DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
-
-static inline void load_mm_cr4_irqsoff(struct mm_struct *mm)
-{
- if (static_branch_unlikely(&rdpmc_always_available_key) ||
- (!static_branch_unlikely(&rdpmc_never_available_key) &&
- atomic_read(&mm->context.perf_rdpmc_allowed)))
- cr4_set_bits_irqsoff(X86_CR4_PCE);
- else
- cr4_clear_bits_irqsoff(X86_CR4_PCE);
-}
-#else
-static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) {}
+void cr4_update_pce(void *ignored);
#endif
#ifdef CONFIG_MODIFY_LDT_SYSCALL
@@ -225,78 +213,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
return __pkru_allows_pkey(vma_pkey(vma), write);
}
-/*
- * This can be used from process context to figure out what the value of
- * CR3 is without needing to do a (slow) __read_cr3().
- *
- * It's intended to be used for code like KVM that sneakily changes CR3
- * and needs to restore it. It needs to be used very carefully.
- */
-static inline unsigned long __get_current_cr3_fast(void)
-{
- unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
- this_cpu_read(cpu_tlbstate.loaded_mm_asid));
-
- /* For now, be very restrictive about when this can be called. */
- VM_WARN_ON(in_nmi() || preemptible());
-
- VM_BUG_ON(cr3 != __read_cr3());
- return cr3;
-}
-
-typedef struct {
- struct mm_struct *mm;
-} temp_mm_state_t;
-
-/*
- * Using a temporary mm allows to set temporary mappings that are not accessible
- * by other CPUs. Such mappings are needed to perform sensitive memory writes
- * that override the kernel memory protections (e.g., W^X), without exposing the
- * temporary page-table mappings that are required for these write operations to
- * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
- * mapping is torn down.
- *
- * Context: The temporary mm needs to be used exclusively by a single core. To
- * harden security IRQs must be disabled while the temporary mm is
- * loaded, thereby preventing interrupt handler bugs from overriding
- * the kernel memory protection.
- */
-static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
-{
- temp_mm_state_t temp_state;
-
- lockdep_assert_irqs_disabled();
- temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
- switch_mm_irqs_off(NULL, mm, current);
-
- /*
- * If breakpoints are enabled, disable them while the temporary mm is
- * used. Userspace might set up watchpoints on addresses that are used
- * in the temporary mm, which would lead to wrong signals being sent or
- * crashes.
- *
- * Note that breakpoints are not disabled selectively, which also causes
- * kernel breakpoints (e.g., perf's) to be disabled. This might be
- * undesirable, but still seems reasonable as the code that runs in the
- * temporary mm should be short.
- */
- if (hw_breakpoint_active())
- hw_breakpoint_disable();
-
- return temp_state;
-}
-
-static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
-{
- lockdep_assert_irqs_disabled();
- switch_mm_irqs_off(NULL, prev_state.mm, current);
-
- /*
- * Restore the breakpoints if they were disabled before the temporary mm
- * was loaded.
- */
- if (hw_breakpoint_active())
- hw_breakpoint_restore();
-}
+unsigned long __get_current_cr3_fast(void);
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index ef452b817f44..e8370e64a155 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -128,6 +128,10 @@
#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */
+/* SRBDS support */
+#define MSR_IA32_MCU_OPT_CTRL 0x00000123
+#define RNGDS_MITG_DIS BIT(0)
+
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
#define MSR_IA32_SYSENTER_EIP 0x00000176
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 694d8daf4983..5ca5d297df75 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -47,7 +47,13 @@ static inline void slow_down_io(void)
#endif
}
-static inline void __flush_tlb(void)
+void native_flush_tlb_local(void);
+void native_flush_tlb_global(void);
+void native_flush_tlb_one_user(unsigned long addr);
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info);
+
+static inline void __flush_tlb_local(void)
{
PVOP_VCALL0(mmu.flush_tlb_user);
}
@@ -62,8 +68,8 @@ static inline void __flush_tlb_one_user(unsigned long addr)
PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
}
-static inline void flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
+static inline void __flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
{
PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
}
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 5afb5e0fe903..e896ebef8c24 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -39,23 +39,23 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
* pte_offset_map_lock() on 32-bit PAE kernels was reading the pmd_t with
* a "*pmdp" dereference done by GCC. Problem is, in certain places
* where pte_offset_map_lock() is called, concurrent page faults are
- * allowed, if the mmap_sem is hold for reading. An example is mincore
+ * allowed, if the mmap_lock is hold for reading. An example is mincore
* vs page faults vs MADV_DONTNEED. On the page fault side
* pmd_populate() rightfully does a set_64bit(), but if we're reading the
* pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
* because GCC will not read the 64-bit value of the pmd atomically.
*
* To fix this all places running pte_offset_map_lock() while holding the
- * mmap_sem in read mode, shall read the pmdp pointer using this
+ * mmap_lock in read mode, shall read the pmdp pointer using this
* function to know if the pmd is null or not, and in turn to know if
* they can run pte_offset_map_lock() or pmd_trans_huge() or other pmd
* operations.
*
- * Without THP if the mmap_sem is held for reading, the pmd can only
+ * Without THP if the mmap_lock is held for reading, the pmd can only
* transition from null to not null while pmd_read_atomic() runs. So
* we can always return atomic pmd values with this function.
*
- * With THP if the mmap_sem is held for reading, the pmd can become
+ * With THP if the mmap_lock is held for reading, the pmd can become
* trans_huge or none or point to a pte (and in turn become "stable")
* at any time under pmd_read_atomic(). We could read it truly
* atomically here with an atomic64_read() for the THP enabled case (and
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index f51d8997ed00..76aa21e8128d 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -257,6 +257,7 @@ static inline int pmd_large(pmd_t pte)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
static inline int pmd_trans_huge(pmd_t pmd)
{
return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
@@ -801,7 +802,7 @@ static inline int pmd_present(pmd_t pmd)
#ifdef CONFIG_NUMA_BALANCING
/*
* These work without NUMA balancing but the kernel does not care. See the
- * comment in include/asm-generic/pgtable.h
+ * comment in include/linux/pgtable.h
*/
static inline int pte_protnone(pte_t pte)
{
@@ -836,17 +837,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
/*
- * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
- *
- * this macro returns the index of the entry in the pmd page which would
- * control the given virtual address
- */
-static inline unsigned long pmd_index(unsigned long address)
-{
- return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
-}
-
-/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
@@ -855,25 +845,6 @@ static inline unsigned long pmd_index(unsigned long address)
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this function returns the index of the entry in the pte page which would
- * control the given virtual address
- *
- * Also define macro so we can test if pte_index is defined for arch.
- */
-#define pte_index pte_index
-static inline unsigned long pte_index(unsigned long address)
-{
- return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
-}
-
-static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
-{
- return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
-}
-
static inline int pmd_bad(pmd_t pmd)
{
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
@@ -906,12 +877,6 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
*/
#define pud_page(pud) pfn_to_page(pud_pfn(pud))
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
-}
-
#define pud_leaf pud_large
static inline int pud_large(pud_t pud)
{
@@ -931,11 +896,6 @@ static inline int pud_large(pud_t pud)
}
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
-static inline unsigned long pud_index(unsigned long address)
-{
- return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
-}
-
#if CONFIG_PGTABLE_LEVELS > 3
static inline int p4d_none(p4d_t p4d)
{
@@ -958,12 +918,6 @@ static inline unsigned long p4d_page_vaddr(p4d_t p4d)
*/
#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
-/* Find an entry in the third-level page table.. */
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
-{
- return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
-}
-
static inline int p4d_bad(p4d_t p4d)
{
unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
@@ -1036,30 +990,6 @@ static inline int pgd_none(pgd_t pgd)
#endif /* __ASSEMBLY__ */
-/*
- * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
- *
- * this macro returns the index of the entry in the pgd page which would
- * control the given virtual address
- */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-
-/*
- * pgd_offset() returns a (pgd_t *)
- * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
- */
-#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
-/*
- * a shortcut to get a pgd_t in a given mm
- */
-#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
-/*
- * a shortcut which implies the use of the kernel's pgd, instead
- * of a process's
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
-
-
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
@@ -1070,27 +1000,14 @@ void init_mem_mapping(void);
void early_alloc_pgt_buf(void);
extern void memblock_find_dma_reserve(void);
+
#ifdef CONFIG_X86_64
-/* Realmode trampoline initialization. */
extern pgd_t trampoline_pgd_entry;
-static inline void __meminit init_trampoline_default(void)
-{
- /* Default trampoline pgd value */
- trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
-}
void __init poking_init(void);
unsigned long init_memory_mapping(unsigned long start,
unsigned long end, pgprot_t prot);
-
-# ifdef CONFIG_RANDOMIZE_MEMORY
-void __meminit init_trampoline(void);
-# else
-# define init_trampoline init_trampoline_default
-# endif
-#else
-static inline void init_trampoline(void) { }
#endif
/* local pte updates need not use xchg for locking */
@@ -1545,7 +1462,6 @@ static inline bool arch_faults_on_old_pte(void)
return false;
}
-#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_H */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index be7b19646897..d7acae4120d5 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -32,35 +32,17 @@ extern pmd_t initial_pg_pmd[];
void paging_init(void);
void sync_initial_page_table(void);
-/*
- * Define this if things work differently on an i386 and an i486:
- * it will (on an i486) warn about kernel memory accesses that are
- * done without a 'access_ok( ..)'
- */
-#undef TEST_ACCESS_OK
-
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
#else
# include <asm/pgtable-2level.h>
#endif
-#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
- pte_index((address)))
-#define pte_unmap(pte) kunmap_atomic((pte))
-#else
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
-#define pte_unmap(pte) do { } while (0)
-#endif
-
/* Clear a kernel PTE and flush it from the TLB */
#define kpte_clear_flush(ptep, vaddr) \
do { \
pte_clear(&init_mm, (vaddr), (ptep)); \
- __flush_tlb_one_kernel((vaddr)); \
+ flush_tlb_one_kernel((vaddr)); \
} while (0)
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index df1373415f11..1b68d24dc6a0 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -53,6 +53,12 @@ static inline void sync_initial_page_table(void) { }
struct mm_struct;
+#define mm_p4d_folded mm_p4d_folded
+static inline bool mm_p4d_folded(struct mm_struct *mm)
+{
+ return !pgtable_l5_enabled();
+}
+
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
@@ -180,10 +186,6 @@ extern void sync_global_pgds(unsigned long start, unsigned long end);
/* PTE - Level 1 access. */
-/* x86-64 always has all page tables mapped. */
-#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
-#define pte_unmap(pte) ((void)(pte))/* NOP */
-
/*
* Encode and de-code a swap entry
*
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 2e7c442cc618..2da1f95b88d7 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -471,9 +471,6 @@ static inline pteval_t pte_flags(pte_t pte)
return native_pte_val(pte) & PTE_FLAGS_MASK;
}
-extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
-extern uint8_t __pte2cachemode_tbl[8];
-
#define __pte2cm_idx(cb) \
((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \
(((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \
@@ -483,43 +480,26 @@ extern uint8_t __pte2cachemode_tbl[8];
(((i) & 2) << (_PAGE_BIT_PCD - 1)) | \
(((i) & 1) << _PAGE_BIT_PWT))
-static inline unsigned long cachemode2protval(enum page_cache_mode pcm)
+unsigned long cachemode2protval(enum page_cache_mode pcm);
+
+static inline pgprotval_t protval_4k_2_large(pgprotval_t val)
{
- if (likely(pcm == 0))
- return 0;
- return __cachemode2pte_tbl[pcm];
+ return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
+ ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
}
-static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
+static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
{
- return __pgprot(cachemode2protval(pcm));
+ return __pgprot(protval_4k_2_large(pgprot_val(pgprot)));
}
-static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
+static inline pgprotval_t protval_large_2_4k(pgprotval_t val)
{
- unsigned long masked;
-
- masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
- if (likely(masked == 0))
- return 0;
- return __pte2cachemode_tbl[__pte2cm_idx(masked)];
-}
-static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
-{
- pgprotval_t val = pgprot_val(pgprot);
- pgprot_t new;
-
- pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
- ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
- return new;
+ return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
+ ((val & _PAGE_PAT_LARGE) >>
+ (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
}
static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
{
- pgprotval_t val = pgprot_val(pgprot);
- pgprot_t new;
-
- pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
- ((val & _PAGE_PAT_LARGE) >>
- (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
- return new;
+ return __pgprot(protval_large_2_4k(pgprot_val(pgprot)));
}
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ed8ec011a9fd..84b645cc8bc9 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -75,7 +75,17 @@ extern char _text[];
static inline bool kaslr_enabled(void)
{
- return !!(boot_params.hdr.loadflags & KASLR_FLAG);
+ return IS_ENABLED(CONFIG_RANDOMIZE_MEMORY) &&
+ !!(boot_params.hdr.loadflags & KASLR_FLAG);
+}
+
+/*
+ * Apply no randomization if KASLR was disabled at boot or if KASAN
+ * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
+ */
+static inline bool kaslr_memory_enabled(void)
+{
+ return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
}
static inline unsigned long kaslr_offset(void)
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 14db05086bbf..5ae5a68e469d 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -87,7 +87,7 @@ get_stack_pointer(struct task_struct *task, struct pt_regs *regs)
}
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *stack, char *log_lvl);
+ unsigned long *stack, const char *log_lvl);
/* The form of the top of the frame on the stack */
struct stack_frame {
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6f66d841262d..8c87a2e0b660 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -13,140 +13,51 @@
#include <asm/pti.h>
#include <asm/processor-flags.h>
-/*
- * The x86 feature is called PCID (Process Context IDentifier). It is similar
- * to what is traditionally called ASID on the RISC processors.
- *
- * We don't use the traditional ASID implementation, where each process/mm gets
- * its own ASID and flush/restart when we run out of ASID space.
- *
- * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
- * that came by on this CPU, allowing cheaper switch_mm between processes on
- * this CPU.
- *
- * We end up with different spaces for different things. To avoid confusion we
- * use different names for each of them:
- *
- * ASID - [0, TLB_NR_DYN_ASIDS-1]
- * the canonical identifier for an mm
- *
- * kPCID - [1, TLB_NR_DYN_ASIDS]
- * the value we write into the PCID part of CR3; corresponds to the
- * ASID+1, because PCID 0 is special.
- *
- * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
- * for KPTI each mm has two address spaces and thus needs two
- * PCID values, but we can still do with a single ASID denomination
- * for each mm. Corresponds to kPCID + 2048.
- *
- */
-
-/* There are 12 bits of space for ASIDS in CR3 */
-#define CR3_HW_ASID_BITS 12
-
-/*
- * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
- * user/kernel switches
- */
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-# define PTI_CONSUMED_PCID_BITS 1
-#else
-# define PTI_CONSUMED_PCID_BITS 0
-#endif
+void __flush_tlb_all(void);
-#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
+#define TLB_FLUSH_ALL -1UL
-/*
- * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
- * for them being zero-based. Another -1 is because PCID 0 is reserved for
- * use by non-PCID-aware users.
- */
-#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
+void cr4_update_irqsoff(unsigned long set, unsigned long clear);
+unsigned long cr4_read_shadow(void);
-/*
- * 6 because 6 should be plenty and struct tlb_state will fit in two cache
- * lines.
- */
-#define TLB_NR_DYN_ASIDS 6
-
-/*
- * Given @asid, compute kPCID
- */
-static inline u16 kern_pcid(u16 asid)
+/* Set in this cpu's CR4. */
+static inline void cr4_set_bits_irqsoff(unsigned long mask)
{
- VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
-
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
- /*
- * Make sure that the dynamic ASID space does not confict with the
- * bit we are using to switch between user and kernel ASIDs.
- */
- BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
-
- /*
- * The ASID being passed in here should have respected the
- * MAX_ASID_AVAILABLE and thus never have the switch bit set.
- */
- VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
-#endif
- /*
- * The dynamically-assigned ASIDs that get passed in are small
- * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set,
- * so do not bother to clear it.
- *
- * If PCID is on, ASID-aware code paths put the ASID+1 into the
- * PCID bits. This serves two purposes. It prevents a nasty
- * situation in which PCID-unaware code saves CR3, loads some other
- * value (with PCID == 0), and then restores CR3, thus corrupting
- * the TLB for ASID 0 if the saved ASID was nonzero. It also means
- * that any bugs involving loading a PCID-enabled CR3 with
- * CR4.PCIDE off will trigger deterministically.
- */
- return asid + 1;
+ cr4_update_irqsoff(mask, 0);
}
-/*
- * Given @asid, compute uPCID
- */
-static inline u16 user_pcid(u16 asid)
+/* Clear in this cpu's CR4. */
+static inline void cr4_clear_bits_irqsoff(unsigned long mask)
{
- u16 ret = kern_pcid(asid);
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
- ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
-#endif
- return ret;
+ cr4_update_irqsoff(0, mask);
}
-struct pgd_t;
-static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
+/* Set in this cpu's CR4. */
+static inline void cr4_set_bits(unsigned long mask)
{
- if (static_cpu_has(X86_FEATURE_PCID)) {
- return __sme_pa(pgd) | kern_pcid(asid);
- } else {
- VM_WARN_ON_ONCE(asid != 0);
- return __sme_pa(pgd);
- }
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cr4_set_bits_irqsoff(mask);
+ local_irq_restore(flags);
}
-static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
+/* Clear in this cpu's CR4. */
+static inline void cr4_clear_bits(unsigned long mask)
{
- VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
- /*
- * Use boot_cpu_has() instead of this_cpu_has() as this function
- * might be called during early boot. This should work even after
- * boot because all CPU's the have same capabilities:
- */
- VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
- return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cr4_clear_bits_irqsoff(mask);
+ local_irq_restore(flags);
}
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else
-#define __flush_tlb() __native_flush_tlb()
-#define __flush_tlb_global() __native_flush_tlb_global()
-#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
-#endif
+#ifndef MODULE
+/*
+ * 6 because 6 should be plenty and struct tlb_state will fit in two cache
+ * lines.
+ */
+#define TLB_NR_DYN_ASIDS 6
struct tlb_context {
u64 ctx_id;
@@ -242,38 +153,7 @@ struct tlb_state {
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
-/*
- * Blindly accessing user memory from NMI context can be dangerous
- * if we're in the middle of switching the current user task or
- * switching the loaded mm. It can also be dangerous if we
- * interrupted some kernel code that was temporarily using a
- * different mm.
- */
-static inline bool nmi_uaccess_okay(void)
-{
- struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
- struct mm_struct *current_mm = current->mm;
-
- VM_WARN_ON_ONCE(!loaded_mm);
-
- /*
- * The condition we want to check is
- * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
- * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
- * is supposed to be reasonably fast.
- *
- * Instead, we check the almost equivalent but somewhat conservative
- * condition below, and we rely on the fact that switch_mm_irqs_off()
- * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
- */
- if (loaded_mm != current_mm)
- return false;
-
- VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
-
- return true;
-}
-
+bool nmi_uaccess_okay(void);
#define nmi_uaccess_okay nmi_uaccess_okay
/* Initialize cr4 shadow for this CPU. */
@@ -282,250 +162,12 @@ static inline void cr4_init_shadow(void)
this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
}
-static inline void __cr4_set(unsigned long cr4)
-{
- lockdep_assert_irqs_disabled();
- this_cpu_write(cpu_tlbstate.cr4, cr4);
- __write_cr4(cr4);
-}
-
-/* Set in this cpu's CR4. */
-static inline void cr4_set_bits_irqsoff(unsigned long mask)
-{
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
- if ((cr4 | mask) != cr4)
- __cr4_set(cr4 | mask);
-}
-
-/* Clear in this cpu's CR4. */
-static inline void cr4_clear_bits_irqsoff(unsigned long mask)
-{
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
- if ((cr4 & ~mask) != cr4)
- __cr4_set(cr4 & ~mask);
-}
-
-/* Set in this cpu's CR4. */
-static inline void cr4_set_bits(unsigned long mask)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- cr4_set_bits_irqsoff(mask);
- local_irq_restore(flags);
-}
-
-/* Clear in this cpu's CR4. */
-static inline void cr4_clear_bits(unsigned long mask)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- cr4_clear_bits_irqsoff(mask);
- local_irq_restore(flags);
-}
-
-static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
-{
- unsigned long cr4;
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
- __cr4_set(cr4 ^ mask);
-}
-
-/* Read the CR4 shadow. */
-static inline unsigned long cr4_read_shadow(void)
-{
- return this_cpu_read(cpu_tlbstate.cr4);
-}
-
-/*
- * Mark all other ASIDs as invalid, preserves the current.
- */
-static inline void invalidate_other_asid(void)
-{
- this_cpu_write(cpu_tlbstate.invalidate_other, true);
-}
-
-/*
- * Save some of cr4 feature set we're using (e.g. Pentium 4MB
- * enable and PPro Global page enable), so that any CPU's that boot
- * up after us can get the correct flags. This should only be used
- * during boot on the boot cpu.
- */
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;
-static inline void cr4_set_bits_and_update_boot(unsigned long mask)
-{
- mmu_cr4_features |= mask;
- if (trampoline_cr4_features)
- *trampoline_cr4_features = mmu_cr4_features;
- cr4_set_bits(mask);
-}
-
extern void initialize_tlbstate_and_flush(void);
/*
- * Given an ASID, flush the corresponding user ASID. We can delay this
- * until the next time we switch to it.
- *
- * See SWITCH_TO_USER_CR3.
- */
-static inline void invalidate_user_asid(u16 asid)
-{
- /* There is no user ASID if address space separation is off */
- if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
- return;
-
- /*
- * We only have a single ASID if PCID is off and the CR3
- * write will have flushed it.
- */
- if (!cpu_feature_enabled(X86_FEATURE_PCID))
- return;
-
- if (!static_cpu_has(X86_FEATURE_PTI))
- return;
-
- __set_bit(kern_pcid(asid),
- (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
-}
-
-/*
- * flush the entire current user mapping
- */
-static inline void __native_flush_tlb(void)
-{
- /*
- * Preemption or interrupts must be disabled to protect the access
- * to the per CPU variable and to prevent being preempted between
- * read_cr3() and write_cr3().
- */
- WARN_ON_ONCE(preemptible());
-
- invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
-
- /* If current->mm == NULL then the read_cr3() "borrows" an mm */
- native_write_cr3(__native_read_cr3());
-}
-
-/*
- * flush everything
- */
-static inline void __native_flush_tlb_global(void)
-{
- unsigned long cr4, flags;
-
- if (static_cpu_has(X86_FEATURE_INVPCID)) {
- /*
- * Using INVPCID is considerably faster than a pair of writes
- * to CR4 sandwiched inside an IRQ flag save/restore.
- *
- * Note, this works with CR4.PCIDE=0 or 1.
- */
- invpcid_flush_all();
- return;
- }
-
- /*
- * Read-modify-write to CR4 - protect it from preemption and
- * from interrupts. (Use the raw variant because this code can
- * be called from deep inside debugging code.)
- */
- raw_local_irq_save(flags);
-
- cr4 = this_cpu_read(cpu_tlbstate.cr4);
- /* toggle PGE */
- native_write_cr4(cr4 ^ X86_CR4_PGE);
- /* write old PGE again and flush TLBs */
- native_write_cr4(cr4);
-
- raw_local_irq_restore(flags);
-}
-
-/*
- * flush one page in the user mapping
- */
-static inline void __native_flush_tlb_one_user(unsigned long addr)
-{
- u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
-
- asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
-
- if (!static_cpu_has(X86_FEATURE_PTI))
- return;
-
- /*
- * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
- * Just use invalidate_user_asid() in case we are called early.
- */
- if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
- invalidate_user_asid(loaded_mm_asid);
- else
- invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
-}
-
-/*
- * flush everything
- */
-static inline void __flush_tlb_all(void)
-{
- /*
- * This is to catch users with enabled preemption and the PGE feature
- * and don't trigger the warning in __native_flush_tlb().
- */
- VM_WARN_ON_ONCE(preemptible());
-
- if (boot_cpu_has(X86_FEATURE_PGE)) {
- __flush_tlb_global();
- } else {
- /*
- * !PGE -> !PCID (setup_pcid()), thus every flush is total.
- */
- __flush_tlb();
- }
-}
-
-/*
- * flush one page in the kernel mapping
- */
-static inline void __flush_tlb_one_kernel(unsigned long addr)
-{
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
-
- /*
- * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
- * paravirt equivalent. Even with PCID, this is sufficient: we only
- * use PCID if we also use global PTEs for the kernel mapping, and
- * INVLPG flushes global translations across all address spaces.
- *
- * If PTI is on, then the kernel is mapped with non-global PTEs, and
- * __flush_tlb_one_user() will flush the given address for the current
- * kernel address space and for its usermode counterpart, but it does
- * not flush it for other address spaces.
- */
- __flush_tlb_one_user(addr);
-
- if (!static_cpu_has(X86_FEATURE_PTI))
- return;
-
- /*
- * See above. We need to propagate the flush to all other address
- * spaces. In principle, we only need to propagate it to kernelmode
- * address spaces, but the extra bookkeeping we would need is not
- * worth it.
- */
- invalidate_other_asid();
-}
-
-#define TLB_FLUSH_ALL -1UL
-
-/*
* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLBs
@@ -563,7 +205,15 @@ struct flush_tlb_info {
bool freed_tables;
};
-#define local_flush_tlb() __flush_tlb()
+void flush_tlb_local(void);
+void flush_tlb_one_user(unsigned long addr);
+void flush_tlb_one_kernel(unsigned long addr);
+void flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info);
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#endif
#define flush_tlb_mm(mm) \
flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
@@ -585,9 +235,6 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
}
-void native_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info);
-
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
{
/*
@@ -608,12 +255,6 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
-#ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, info) \
- native_flush_tlb_others(mask, info)
-
-#define paravirt_tlb_remove_table(tlb, page) \
- tlb_remove_page(tlb, (void *)(page))
-#endif
+#endif /* !MODULE */
#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index d8f283b9a569..18dfa07d3ef0 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -504,12 +504,12 @@ do { \
* We want the unsafe accessors to always be inlined and use
* the error labels - thus the macro games.
*/
-#define unsafe_copy_loop(dst, src, len, type, label) \
- while (len >= sizeof(type)) { \
- unsafe_put_user(*(type *)src,(type __user *)dst,label); \
- dst += sizeof(type); \
- src += sizeof(type); \
- len -= sizeof(type); \
+#define unsafe_copy_loop(dst, src, len, type, label) \
+ while (len >= sizeof(type)) { \
+ unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
}
#define unsafe_copy_to_user(_dst,_src,_len,label) \
@@ -523,5 +523,21 @@ do { \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
} while (0)
+#define HAVE_GET_KERNEL_NOFAULT
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ int __kr_err; \
+ \
+ __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
+ sizeof(type), __kr_err); \
+ if (unlikely(__kr_err)) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+ __put_user_size(*((type *)(src)), (__force type __user *)(dst), \
+ sizeof(type), err_label)
+
#endif /* _ASM_X86_UACCESS_H */
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index ae587ce544f4..3db85626048f 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -8,6 +8,7 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
struct cpumask;
struct mm_struct;
+struct flush_tlb_info;
#ifdef CONFIG_X86_UV
#include <linux/efi.h>
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index 9a6dc9b4ec99..fb81fea99093 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -271,6 +271,24 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
return __vdso_data;
}
+static inline bool arch_vdso_clocksource_ok(const struct vdso_data *vd)
+{
+ return true;
+}
+#define vdso_clocksource_ok arch_vdso_clocksource_ok
+
+/*
+ * Clocksource read value validation to handle PV and HyperV clocksources
+ * which can be invalidated asynchronously and indicate invalidation by
+ * returning U64_MAX, which can be effectively tested by checking for a
+ * negative value after casting it to s64.
+ */
+static inline bool arch_vdso_cycles_ok(u64 cycles)
+{
+ return (s64)cycles >= 0;
+}
+#define vdso_cycles_ok arch_vdso_cycles_ok
+
/*
* x86 specific delta calculation.
*
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index d50c7b747d8b..ba4c1b15908b 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -38,11 +38,11 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/pgtable.h>
#include <trace/events/xen.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/smap.h>
#include <asm/nospec-branch.h>
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 790ce08e41f2..5941e18edd5a 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -11,7 +11,6 @@
#include <asm/extable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <xen/interface/xen.h>
#include <xen/interface/grant_table.h>
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 2a7c3afa62e2..e77261db2391 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -28,6 +28,10 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
+# With some compiler versions the generated code results in boot hangs, caused
+# by several compilation units. To be safe, disable all instrumentation.
+KCSAN_SANITIZE := n
+
OBJECT_FILES_NON_STANDARD_test_nx.o := y
OBJECT_FILES_NON_STANDARD_paravirt_patch.o := y
@@ -90,7 +94,6 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 683ed9e12e6b..7bdc0239a943 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -20,11 +20,11 @@
#include <linux/pci.h>
#include <linux/efi-bgrt.h>
#include <linux/serial_core.h>
+#include <linux/pgtable.h>
#include <asm/e820/api.h>
#include <asm/irqdomain.h>
#include <asm/pci_x86.h>
-#include <asm/pgtable.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
#include <asm/io.h>
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index ed3b04483972..cc1fea76aab0 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -10,9 +10,9 @@
#include <linux/memblock.h>
#include <linux/dmi.h>
#include <linux/cpumask.h>
+#include <linux/pgtable.h>
#include <asm/segment.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/realmode.h>
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 7867dfb3963e..a9195ce8265d 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -18,7 +18,6 @@
#include <asm/text-patching.h>
#include <asm/alternative.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/mce.h>
#include <asm/nmi.h>
#include <asm/cacheflush.h>
@@ -783,6 +782,61 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
}
}
+typedef struct {
+ struct mm_struct *mm;
+} temp_mm_state_t;
+
+/*
+ * Using a temporary mm allows to set temporary mappings that are not accessible
+ * by other CPUs. Such mappings are needed to perform sensitive memory writes
+ * that override the kernel memory protections (e.g., W^X), without exposing the
+ * temporary page-table mappings that are required for these write operations to
+ * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
+ * mapping is torn down.
+ *
+ * Context: The temporary mm needs to be used exclusively by a single core. To
+ * harden security IRQs must be disabled while the temporary mm is
+ * loaded, thereby preventing interrupt handler bugs from overriding
+ * the kernel memory protection.
+ */
+static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
+{
+ temp_mm_state_t temp_state;
+
+ lockdep_assert_irqs_disabled();
+ temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+ switch_mm_irqs_off(NULL, mm, current);
+
+ /*
+ * If breakpoints are enabled, disable them while the temporary mm is
+ * used. Userspace might set up watchpoints on addresses that are used
+ * in the temporary mm, which would lead to wrong signals being sent or
+ * crashes.
+ *
+ * Note that breakpoints are not disabled selectively, which also causes
+ * kernel breakpoints (e.g., perf's) to be disabled. This might be
+ * undesirable, but still seems reasonable as the code that runs in the
+ * temporary mm should be short.
+ */
+ if (hw_breakpoint_active())
+ hw_breakpoint_disable();
+
+ return temp_state;
+}
+
+static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
+{
+ lockdep_assert_irqs_disabled();
+ switch_mm_irqs_off(NULL, prev_state.mm, current);
+
+ /*
+ * Restore the breakpoints if they were disabled before the temporary mm
+ * was loaded.
+ */
+ if (hw_breakpoint_active())
+ hw_breakpoint_restore();
+}
+
__ro_after_init struct mm_struct *poking_mm;
__ro_after_init unsigned long poking_addr;
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 16133819415c..17cb5b933dcf 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -33,7 +33,6 @@
#include <linux/atomic.h>
#include <linux/dma-direct.h>
#include <asm/mtrr.h>
-#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/iommu.h>
#include <asm/gart.h>
@@ -159,7 +158,7 @@ static void dump_leak(void)
return;
dump = 1;
- show_stack(NULL, NULL);
+ show_stack(NULL, NULL, KERN_ERR);
debug_dma_dump_mappings(NULL);
}
#endif
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 4b1d31be50b4..bf4acb0b5365 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2060,7 +2060,7 @@ void __init init_apic_mappings(void)
unsigned int new_apicid;
if (apic_validate_deadline_timer())
- pr_debug("TSC deadline timer available\n");
+ pr_info("TSC deadline timer available\n");
if (x2apic_mode) {
boot_cpu_physical_apicid = read_apic_id();
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index cdf45b4700f2..35edd57f064a 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -12,11 +12,11 @@
*/
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/numachip/numachip.h>
#include <asm/numachip/numachip_csr.h>
-#include <asm/pgtable.h>
#include "local.h"
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 7dc4ad68eb41..dba6a83bc349 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -13,6 +13,9 @@ endif
KCOV_INSTRUMENT_common.o := n
KCOV_INSTRUMENT_perf_event.o := n
+# As above, instrumenting secondary CPU boot code causes boot hangs.
+KCSAN_SANITIZE_common.o := n
+
# Make sure load_percpu_segment has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_common.o := $(nostackp)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ed54b3b21c39..0b71970d2d3d 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -15,6 +15,7 @@
#include <linux/nospec.h>
#include <linux/prctl.h>
#include <linux/sched/smt.h>
+#include <linux/pgtable.h>
#include <asm/spec-ctrl.h>
#include <asm/cmdline.h>
@@ -26,7 +27,6 @@
#include <asm/vmx.h>
#include <asm/paravirt.h>
#include <asm/alternative.h>
-#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/intel-family.h>
#include <asm/e820/api.h>
@@ -41,6 +41,7 @@ static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
static void __init mds_print_mitigation(void);
static void __init taa_select_mitigation(void);
+static void __init srbds_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
u64 x86_spec_ctrl_base;
@@ -108,6 +109,7 @@ void __init check_bugs(void)
l1tf_select_mitigation();
mds_select_mitigation();
taa_select_mitigation();
+ srbds_select_mitigation();
/*
* As MDS and TAA mitigations are inter-related, print MDS
@@ -398,6 +400,97 @@ static int __init tsx_async_abort_parse_cmdline(char *str)
early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) "SRBDS: " fmt
+
+enum srbds_mitigations {
+ SRBDS_MITIGATION_OFF,
+ SRBDS_MITIGATION_UCODE_NEEDED,
+ SRBDS_MITIGATION_FULL,
+ SRBDS_MITIGATION_TSX_OFF,
+ SRBDS_MITIGATION_HYPERVISOR,
+};
+
+static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
+
+static const char * const srbds_strings[] = {
+ [SRBDS_MITIGATION_OFF] = "Vulnerable",
+ [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
+ [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
+ [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
+ [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
+};
+
+static bool srbds_off;
+
+void update_srbds_msr(void)
+{
+ u64 mcu_ctrl;
+
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return;
+
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return;
+
+ if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
+ return;
+
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+
+ switch (srbds_mitigation) {
+ case SRBDS_MITIGATION_OFF:
+ case SRBDS_MITIGATION_TSX_OFF:
+ mcu_ctrl |= RNGDS_MITG_DIS;
+ break;
+ case SRBDS_MITIGATION_FULL:
+ mcu_ctrl &= ~RNGDS_MITG_DIS;
+ break;
+ default:
+ break;
+ }
+
+ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+}
+
+static void __init srbds_select_mitigation(void)
+{
+ u64 ia32_cap;
+
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return;
+
+ /*
+ * Check to see if this is one of the MDS_NO systems supporting
+ * TSX that are only exposed to SRBDS when TSX is enabled.
+ */
+ ia32_cap = x86_read_arch_cap_msr();
+ if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
+ srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
+ else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
+ else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
+ srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
+ else if (cpu_mitigations_off() || srbds_off)
+ srbds_mitigation = SRBDS_MITIGATION_OFF;
+
+ update_srbds_msr();
+ pr_info("%s\n", srbds_strings[srbds_mitigation]);
+}
+
+static int __init srbds_parse_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return 0;
+
+ srbds_off = !strcmp(str, "off");
+ return 0;
+}
+early_param("srbds", srbds_parse_cmdline);
+
+#undef pr_fmt
#define pr_fmt(fmt) "Spectre V1 : " fmt
enum spectre_v1_mitigation {
@@ -495,7 +588,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE;
-static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
+static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
+ SPECTRE_V2_USER_NONE;
+static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
SPECTRE_V2_USER_NONE;
#ifdef CONFIG_RETPOLINE
@@ -641,15 +736,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
break;
}
- /*
- * At this point, an STIBP mode other than "off" has been set.
- * If STIBP support is not being forced, check if STIBP always-on
- * is preferred.
- */
- if (mode != SPECTRE_V2_USER_STRICT &&
- boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
- mode = SPECTRE_V2_USER_STRICT_PREFERRED;
-
/* Initialize Indirect Branch Prediction Barrier */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
@@ -672,23 +758,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
static_key_enabled(&switch_mm_always_ibpb) ?
"always-on" : "conditional");
+
+ spectre_v2_user_ibpb = mode;
}
- /* If enhanced IBRS is enabled no STIBP required */
- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ /*
+ * If enhanced IBRS is enabled or SMT impossible, STIBP is not
+ * required.
+ */
+ if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return;
/*
- * If SMT is not possible or STIBP is not available clear the STIBP
- * mode.
+ * At this point, an STIBP mode other than "off" has been set.
+ * If STIBP support is not being forced, check if STIBP always-on
+ * is preferred.
+ */
+ if (mode != SPECTRE_V2_USER_STRICT &&
+ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+ mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+
+ /*
+ * If STIBP is not available, clear the STIBP mode.
*/
- if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
+ if (!boot_cpu_has(X86_FEATURE_STIBP))
mode = SPECTRE_V2_USER_NONE;
+
+ spectre_v2_user_stibp = mode;
+
set_mode:
- spectre_v2_user = mode;
- /* Only print the STIBP mode when SMT possible */
- if (smt_possible)
- pr_info("%s\n", spectre_v2_user_strings[mode]);
+ pr_info("%s\n", spectre_v2_user_strings[mode]);
}
static const char * const spectre_v2_strings[] = {
@@ -921,7 +1020,7 @@ void cpu_bugs_smt_update(void)
{
mutex_lock(&spec_ctrl_mutex);
- switch (spectre_v2_user) {
+ switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
break;
case SPECTRE_V2_USER_STRICT:
@@ -1164,14 +1263,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
{
switch (ctrl) {
case PR_SPEC_ENABLE:
- if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return 0;
/*
* Indirect branch speculation is always disabled in strict
- * mode.
+ * mode. It can neither be enabled if it was force-disabled
+ * by a previous prctl call.
+
*/
- if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
+ task_spec_ib_force_disable(task))
return -EPERM;
task_clear_spec_ib_disable(task);
task_update_spec_tif(task);
@@ -1182,10 +1286,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
* Indirect branch speculation is always allowed when
* mitigation is force disabled.
*/
- if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return -EPERM;
- if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return 0;
task_set_spec_ib_disable(task);
if (ctrl == PR_SPEC_FORCE_DISABLE)
@@ -1216,7 +1322,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
{
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
- if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
}
#endif
@@ -1247,22 +1354,24 @@ static int ib_prctl_get(struct task_struct *task)
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return PR_SPEC_NOT_AFFECTED;
- switch (spectre_v2_user) {
- case SPECTRE_V2_USER_NONE:
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return PR_SPEC_ENABLE;
- case SPECTRE_V2_USER_PRCTL:
- case SPECTRE_V2_USER_SECCOMP:
+ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+ return PR_SPEC_DISABLE;
+ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
if (task_spec_ib_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ib_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
- case SPECTRE_V2_USER_STRICT:
- case SPECTRE_V2_USER_STRICT_PREFERRED:
- return PR_SPEC_DISABLE;
- default:
+ } else
return PR_SPEC_NOT_AFFECTED;
- }
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
@@ -1501,7 +1610,7 @@ static char *stibp_state(void)
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return "";
- switch (spectre_v2_user) {
+ switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
return ", STIBP: disabled";
case SPECTRE_V2_USER_STRICT:
@@ -1528,6 +1637,11 @@ static char *ibpb_state(void)
return "";
}
+static ssize_t srbds_show_state(char *buf)
+{
+ return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
+}
+
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@@ -1572,6 +1686,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_ITLB_MULTIHIT:
return itlb_multihit_show_state(buf);
+ case X86_BUG_SRBDS:
+ return srbds_show_state(buf);
+
default:
break;
}
@@ -1618,4 +1735,9 @@ ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr
{
return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
}
+
+ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
+}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d07809286b95..8be042df12c3 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -21,6 +21,7 @@
#include <linux/smp.h>
#include <linux/io.h>
#include <linux/syscore_ops.h>
+#include <linux/pgtable.h>
#include <asm/stackprotector.h>
#include <asm/perf_event.h>
@@ -35,7 +36,6 @@
#include <asm/vsyscall.h>
#include <linux/topology.h>
#include <linux/cpumask.h>
-#include <asm/pgtable.h>
#include <linux/atomic.h>
#include <asm/proto.h>
#include <asm/setup.h>
@@ -387,7 +387,30 @@ set_register:
bits_missing);
}
}
-EXPORT_SYMBOL(native_write_cr4);
+#if IS_MODULE(CONFIG_LKDTM)
+EXPORT_SYMBOL_GPL(native_write_cr4);
+#endif
+
+void cr4_update_irqsoff(unsigned long set, unsigned long clear)
+{
+ unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
+
+ lockdep_assert_irqs_disabled();
+
+ newval = (cr4 & ~clear) | set;
+ if (newval != cr4) {
+ this_cpu_write(cpu_tlbstate.cr4, newval);
+ __write_cr4(newval);
+ }
+}
+EXPORT_SYMBOL(cr4_update_irqsoff);
+
+/* Read the CR4 shadow. */
+unsigned long cr4_read_shadow(void)
+{
+ return this_cpu_read(cpu_tlbstate.cr4);
+}
+EXPORT_SYMBOL_GPL(cr4_read_shadow);
void cr4_init(void)
{
@@ -1050,9 +1073,30 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
{}
};
-static bool __init cpu_matches(unsigned long which)
+#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
+ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
+ INTEL_FAM6_##model, steppings, \
+ X86_FEATURE_ANY, issues)
+
+#define SRBDS BIT(0)
+
+static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS),
+ {}
+};
+
+static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
{
- const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
+ const struct x86_cpu_id *m = x86_match_cpu(table);
return m && !!(m->driver_data & which);
}
@@ -1072,31 +1116,34 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
u64 ia32_cap = x86_read_arch_cap_msr();
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
- if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+ if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
+ !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
- if (cpu_matches(NO_SPECULATION))
+ if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
return;
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- if (!cpu_matches(NO_SPECTRE_V2))
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
- if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
+ !(ia32_cap & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
if (ia32_cap & ARCH_CAP_IBRS_ALL)
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
- if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+ if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+ !(ia32_cap & ARCH_CAP_MDS_NO)) {
setup_force_cpu_bug(X86_BUG_MDS);
- if (cpu_matches(MSBDS_ONLY))
+ if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
}
- if (!cpu_matches(NO_SWAPGS))
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
setup_force_cpu_bug(X86_BUG_SWAPGS);
/*
@@ -1114,7 +1161,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
setup_force_cpu_bug(X86_BUG_TAA);
- if (cpu_matches(NO_MELTDOWN))
+ /*
+ * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
+ * in the vulnerability blacklist.
+ */
+ if ((cpu_has(c, X86_FEATURE_RDRAND) ||
+ cpu_has(c, X86_FEATURE_RDSEED)) &&
+ cpu_matches(cpu_vuln_blacklist, SRBDS))
+ setup_force_cpu_bug(X86_BUG_SRBDS);
+
+ if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
/* Rogue Data Cache Load? No! */
@@ -1123,7 +1179,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
- if (cpu_matches(NO_L1TF))
+ if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
return;
setup_force_cpu_bug(X86_BUG_L1TF);
@@ -1551,6 +1607,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init();
validate_apic_and_package_id(c);
x86_spec_ctrl_setup_ap();
+ update_srbds_msr();
}
static __init int setup_noclflush(char *arg)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 37fdefd14f28..fb538fccd24c 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -77,6 +77,7 @@ extern void detect_ht(struct cpuinfo_x86 *c);
unsigned int aperfmperf_get_khz(int cpu);
extern void x86_spec_ctrl_setup_ap(void);
+extern void update_srbds_msr(void);
extern u64 x86_read_arch_cap_msr(void);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 166d7c355896..c25a67a34bd3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
+#include <linux/pgtable.h>
#include <linux/string.h>
#include <linux/bitops.h>
@@ -11,7 +12,6 @@
#include <linux/uaccess.h>
#include <asm/cpufeature.h>
-#include <asm/pgtable.h>
#include <asm/msr.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
@@ -1142,9 +1142,12 @@ void switch_to_sld(unsigned long tifn)
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
{}
};
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 51b9190c628b..23ad8e953dfb 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -761,7 +761,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
- __flush_tlb();
+ flush_tlb_local();
/* Save MTRR state */
rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
@@ -778,7 +778,7 @@ static void post_set(void) __releases(set_atomicity_lock)
{
/* Flush TLBs (no need to flush caches - they are disabled) */
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
- __flush_tlb();
+ flush_tlb_local();
/* Intel (P6) standard MTRRs */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 4bd28b388a1a..0daf2f1cf7a8 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -1326,9 +1326,9 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
* pseudo-locked region will still be here on return.
*
* The mutex has to be released temporarily to avoid a potential
- * deadlock with the mm->mmap_sem semaphore which is obtained in
- * the device_create() and debugfs_create_dir() callpath below
- * as well as before the mmap() callback is called.
+ * deadlock with the mm->mmap_lock which is obtained in the
+ * device_create() and debugfs_create_dir() callpath below as well as
+ * before the mmap() callback is called.
*/
mutex_unlock(&rdtgroup_mutex);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index d7cb5ab0d1f0..23b4b61319d3 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -3199,10 +3199,10 @@ int __init rdtgroup_init(void)
* during the debugfs directory creation also &sb->s_type->i_mutex_key
* (the lockdep class of inode->i_rwsem). Other filesystem
* interactions (eg. SyS_getdents) have the lock ordering:
- * &sb->s_type->i_mutex_key --> &mm->mmap_sem
- * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex
+ * &sb->s_type->i_mutex_key --> &mm->mmap_lock
+ * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex
* is taken, thus creating dependency:
- * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause
+ * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause
* issues considering the other two lock dependencies.
* By creating the debugfs directory here we avoid a dependency
* that may cause deadlock (even though file operations cannot
diff --git a/arch/x86/kernel/crash_core_32.c b/arch/x86/kernel/crash_core_32.c
index c0159a7bca6d..8a89c109e20a 100644
--- a/arch/x86/kernel/crash_core_32.c
+++ b/arch/x86/kernel/crash_core_32.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/crash_core.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/x86/kernel/crash_core_64.c b/arch/x86/kernel/crash_core_64.c
index 845a57eb4eb7..7d255f882afe 100644
--- a/arch/x86/kernel/crash_core_64.c
+++ b/arch/x86/kernel/crash_core_64.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/crash_core.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
index 3793646f0fb5..2ccc57f152a4 100644
--- a/arch/x86/kernel/doublefault_32.c
+++ b/arch/x86/kernel/doublefault_32.c
@@ -6,7 +6,6 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/desc.h>
#include <asm/traps.h>
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index ae64ec7f752f..456511b2284e 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -65,7 +65,7 @@ bool in_entry_stack(unsigned long *stack, struct stack_info *info)
}
static void printk_stack_address(unsigned long address, int reliable,
- char *log_lvl)
+ const char *log_lvl)
{
touch_nmi_watchdog();
printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
@@ -160,7 +160,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
}
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *stack, char *log_lvl)
+ unsigned long *stack, const char *log_lvl)
{
struct unwind_state state;
struct stack_info stack_info = {0};
@@ -279,7 +279,8 @@ next:
}
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp,
+ const char *loglvl)
{
task = task ? : current;
@@ -290,7 +291,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if (!sp && task == current)
sp = get_stack_pointer(current, NULL);
- show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT);
+ show_trace_log_lvl(task, NULL, sp, loglvl);
}
void show_stack_regs(struct pt_regs *regs)
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 4d13c57f370a..983cd53ed4c9 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -991,7 +991,15 @@ void __init e820__reserve_setup_data(void)
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
- e820__range_update_kexec(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+
+ /*
+ * SETUP_EFI is supplied by kexec and does not need to be
+ * reserved.
+ */
+ if (data->type != SETUP_EFI)
+ e820__range_update_kexec(pa_data,
+ sizeof(*data) + data->len,
+ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
if (data->type == SETUP_INDIRECT &&
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 93fbdff2974f..d3c531d3b244 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -8,6 +8,7 @@
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
#include <linux/errno.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/fcntl.h>
@@ -15,7 +16,6 @@
#include <xen/hvc-console.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <linux/usb/ehci_def.h>
#include <linux/usb/xhci-dbgp.h>
#include <asm/pci_x86.h>
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 12e7d4406c32..4fe7af58cfe1 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -29,7 +29,7 @@
#include <linux/percpu.h>
#include <linux/gfp.h>
#include <linux/random.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/setup.h>
#include <asm/espfix.h>
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 206a4b6144c2..cbb71c1b574f 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -20,13 +20,13 @@
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mem_encrypt.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/proto.h>
#include <asm/smp.h>
#include <asm/setup.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/kdebug.h>
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 4bbc770af632..4fc33fdf0f16 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -13,8 +13,8 @@
#include <linux/linkage.h>
#include <linux/threads.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/segment.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/cache.h>
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 519649ddf100..f3c76252247d 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -15,11 +15,11 @@
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/pgtable.h>
#include <linux/atomic.h>
#include <asm/timer.h>
#include <asm/hw_irq.h>
-#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/apic.h>
#include <asm/i8259.h>
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 5aa523c2d573..dd73135d7cee 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -16,11 +16,11 @@
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/pgtable.h>
#include <linux/atomic.h>
#include <asm/timer.h>
#include <asm/hw_irq.h>
-#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/apic.h>
#include <asm/setup.h>
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 4d7022a740ab..85de8fa69b24 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -41,11 +41,11 @@
#include <linux/kasan.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/insn.h>
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index ea13f6888284..234f58e0fe8c 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -16,11 +16,11 @@
#include <linux/kallsyms.h>
#include <linux/ftrace.h>
#include <linux/frame.h>
+#include <linux/pgtable.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/insn.h>
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 84c3ba32f211..8748321c4486 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -8,7 +8,7 @@
*
* Lock order:
* contex.ldt_usr_sem
- * mmap_sem
+ * mmap_lock
* context.lock
*/
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
deleted file mode 100644
index 6a68e41206e7..000000000000
--- a/arch/x86/kernel/livepatch.c
+++ /dev/null
@@ -1,53 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * livepatch.c - x86-specific Kernel Live Patching Core
- */
-
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/livepatch.h>
-#include <asm/text-patching.h>
-
-/* Apply per-object alternatives. Based on x86 module_finalize() */
-void arch_klp_init_object_loaded(struct klp_patch *patch,
- struct klp_object *obj)
-{
- int cnt;
- struct klp_modinfo *info;
- Elf_Shdr *s, *alt = NULL, *para = NULL;
- void *aseg, *pseg;
- const char *objname;
- char sec_objname[MODULE_NAME_LEN];
- char secname[KSYM_NAME_LEN];
-
- info = patch->mod->klp_info;
- objname = obj->name ? obj->name : "vmlinux";
-
- /* See livepatch core code for BUILD_BUG_ON() explanation */
- BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
-
- for (s = info->sechdrs; s < info->sechdrs + info->hdr.e_shnum; s++) {
- /* Apply per-object .klp.arch sections */
- cnt = sscanf(info->secstrings + s->sh_name,
- ".klp.arch.%55[^.].%127s",
- sec_objname, secname);
- if (cnt != 2)
- continue;
- if (strcmp(sec_objname, objname))
- continue;
- if (!strcmp(".altinstructions", secname))
- alt = s;
- if (!strcmp(".parainstructions", secname))
- para = s;
- }
-
- if (alt) {
- aseg = (void *) alt->sh_addr;
- apply_alternatives(aseg, aseg + alt->sh_size);
- }
-
- if (para) {
- pseg = (void *) para->sh_addr;
- apply_paravirt(pseg, pseg + para->sh_size);
- }
-}
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 02bddfc122a4..64b00b0d7fe8 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -13,7 +13,6 @@
#include <linux/gfp.h>
#include <linux/io.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index ad5cdd6a5f23..a29a44a98e5b 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -19,7 +19,6 @@
#include <linux/efi.h>
#include <asm/init.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/io_apic.h>
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index d5c72cb877b3..34b153cbd4ac 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -18,10 +18,10 @@
#include <linux/gfp.h>
#include <linux/jump_label.h>
#include <linux/random.h>
+#include <linux/memory.h>
#include <asm/text-patching.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/unwind.h>
@@ -126,11 +126,12 @@ int apply_relocate(Elf32_Shdr *sechdrs,
return 0;
}
#else /*X86_64*/
-int apply_relocate_add(Elf64_Shdr *sechdrs,
+static int __apply_relocate_add(Elf64_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
- struct module *me)
+ struct module *me,
+ void *(*write)(void *dest, const void *src, size_t len))
{
unsigned int i;
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
@@ -162,19 +163,19 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_X86_64_64:
if (*(u64 *)loc != 0)
goto invalid_relocation;
- *(u64 *)loc = val;
+ write(loc, &val, 8);
break;
case R_X86_64_32:
if (*(u32 *)loc != 0)
goto invalid_relocation;
- *(u32 *)loc = val;
+ write(loc, &val, 4);
if (val != *(u32 *)loc)
goto overflow;
break;
case R_X86_64_32S:
if (*(s32 *)loc != 0)
goto invalid_relocation;
- *(s32 *)loc = val;
+ write(loc, &val, 4);
if ((s64)val != *(s32 *)loc)
goto overflow;
break;
@@ -183,7 +184,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if (*(u32 *)loc != 0)
goto invalid_relocation;
val -= (u64)loc;
- *(u32 *)loc = val;
+ write(loc, &val, 4);
#if 0
if ((s64)val != *(s32 *)loc)
goto overflow;
@@ -193,7 +194,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if (*(u64 *)loc != 0)
goto invalid_relocation;
val -= (u64)loc;
- *(u64 *)loc = val;
+ write(loc, &val, 8);
break;
default:
pr_err("%s: Unknown rela relocation: %llu\n",
@@ -215,6 +216,33 @@ overflow:
me->name);
return -ENOEXEC;
}
+
+int apply_relocate_add(Elf64_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ int ret;
+ bool early = me->state == MODULE_STATE_UNFORMED;
+ void *(*write)(void *, const void *, size_t) = memcpy;
+
+ if (!early) {
+ write = text_poke;
+ mutex_lock(&text_mutex);
+ }
+
+ ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
+ write);
+
+ if (!early) {
+ text_poke_sync();
+ mutex_unlock(&text_mutex);
+ }
+
+ return ret;
+}
+
#endif
int module_finalize(const Elf_Ehdr *hdr,
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c131ba4e70ef..674a7d66d960 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -13,13 +13,13 @@
#include <linux/bcd.h>
#include <linux/highmem.h>
#include <linux/kprobes.h>
+#include <linux/pgtable.h>
#include <asm/bug.h>
#include <asm/paravirt.h>
#include <asm/debugreg.h>
#include <asm/desc.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
#include <asm/time.h>
#include <asm/pgalloc.h>
#include <asm/irq.h>
@@ -160,25 +160,6 @@ unsigned paravirt_patch_insns(void *insn_buff, unsigned len,
return insn_len;
}
-static void native_flush_tlb(void)
-{
- __native_flush_tlb();
-}
-
-/*
- * Global pages have to be flushed a bit differently. Not a real
- * performance problem because this does not happen often.
- */
-static void native_flush_tlb_global(void)
-{
- __native_flush_tlb_global();
-}
-
-static void native_flush_tlb_one_user(unsigned long addr)
-{
- __native_flush_tlb_one_user(addr);
-}
-
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
@@ -359,7 +340,7 @@ struct paravirt_patch_template pv_ops = {
#endif /* CONFIG_PARAVIRT_XXL */
/* Mmu ops. */
- .mmu.flush_tlb_user = native_flush_tlb,
+ .mmu.flush_tlb_user = native_flush_tlb_local,
.mmu.flush_tlb_kernel = native_flush_tlb_global,
.mmu.flush_tlb_one_user = native_flush_tlb_one_user,
.mmu.flush_tlb_others = native_flush_tlb_others,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ce6cd220f722..f362ce0d5ac0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -545,28 +545,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
lockdep_assert_irqs_disabled();
- /*
- * If TIF_SSBD is different, select the proper mitigation
- * method. Note that if SSBD mitigation is disabled or permanentely
- * enabled this branch can't be taken because nothing can set
- * TIF_SSBD.
- */
- if (tif_diff & _TIF_SSBD) {
- if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ /* Handle change of TIF_SSBD depending on the mitigation method. */
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ if (tif_diff & _TIF_SSBD)
amd_set_ssb_virt_state(tifn);
- } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ if (tif_diff & _TIF_SSBD)
amd_set_core_ssb_state(tifn);
- } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
- static_cpu_has(X86_FEATURE_AMD_SSBD)) {
- msr |= ssbd_tif_to_spec_ctrl(tifn);
- updmsr = true;
- }
+ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ updmsr |= !!(tif_diff & _TIF_SSBD);
+ msr |= ssbd_tif_to_spec_ctrl(tifn);
}
- /*
- * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
- * otherwise avoid the MSR write.
- */
+ /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
if (IS_ENABLED(CONFIG_SMP) &&
static_branch_unlikely(&switch_to_cond_stibp)) {
updmsr |= !!(tif_diff & _TIF_SPEC_IB);
@@ -612,6 +604,17 @@ void speculation_ctrl_update_current(void)
preempt_enable();
}
+static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
+{
+ unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
+
+ newval = cr4 ^ mask;
+ if (newval != cr4) {
+ this_cpu_write(cpu_tlbstate.cr4, newval);
+ __write_cr4(newval);
+ }
+}
+
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
{
unsigned long tifp, tifn;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 538d4e8d6589..acfd6d2a0cbf 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -39,7 +39,6 @@
#include <linux/kdebug.h>
#include <linux/syscalls.h>
-#include <asm/pgtable.h>
#include <asm/ldt.h>
#include <asm/processor.h>
#include <asm/fpu/internal.h>
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 0c169a5687e1..9a97415b2139 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -40,7 +40,6 @@
#include <linux/ftrace.h>
#include <linux/syscalls.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/fpu/internal.h>
#include <asm/mmu_context.h>
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index f0e1ddbc2fd7..44130588987f 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -28,7 +28,6 @@
#include <linux/nospec.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/fpu/internal.h>
#include <asm/fpu/signal.h>
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 3ca43be4f9cf..0ec7ced727fe 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -11,13 +11,13 @@
#include <linux/tboot.h>
#include <linux/delay.h>
#include <linux/frame.h>
+#include <linux/pgtable.h>
#include <acpi/reboot.h>
#include <asm/io.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
-#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/reboot_fixups.h>
#include <asm/reboot.h>
@@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
},
},
+ { /* Handle problems with rebooting on Apple MacBook6,1 */
+ .callback = set_pci_reboot,
+ .ident = "Apple MacBook6,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
+ },
+ },
{ /* Handle problems with rebooting on Apple MacBookPro5 */
.callback = set_pci_reboot,
.ident = "Apple MacBookPro5",
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2467f3dd35d3..ffbd9a3d78d8 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -55,6 +55,7 @@
#include <linux/gfp.h>
#include <linux/cpuidle.h>
#include <linux/numa.h>
+#include <linux/pgtable.h>
#include <asm/acpi.h>
#include <asm/desc.h>
@@ -63,7 +64,6 @@
#include <asm/realmode.h>
#include <asm/cpu.h>
#include <asm/numa.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mtrr.h>
#include <asm/mwait.h>
diff --git a/arch/x86/kernel/sys_ia32.c b/arch/x86/kernel/sys_ia32.c
index ab03fede1422..f8d65c99feb8 100644
--- a/arch/x86/kernel/sys_ia32.c
+++ b/arch/x86/kernel/sys_ia32.c
@@ -135,26 +135,30 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
typeof(ubuf->st_gid) gid = 0;
SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
- if (!access_ok(ubuf, sizeof(struct stat64)) ||
- __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) ||
- __put_user(stat->ino, &ubuf->__st_ino) ||
- __put_user(stat->ino, &ubuf->st_ino) ||
- __put_user(stat->mode, &ubuf->st_mode) ||
- __put_user(stat->nlink, &ubuf->st_nlink) ||
- __put_user(uid, &ubuf->st_uid) ||
- __put_user(gid, &ubuf->st_gid) ||
- __put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev) ||
- __put_user(stat->size, &ubuf->st_size) ||
- __put_user(stat->atime.tv_sec, &ubuf->st_atime) ||
- __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec) ||
- __put_user(stat->mtime.tv_sec, &ubuf->st_mtime) ||
- __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
- __put_user(stat->ctime.tv_sec, &ubuf->st_ctime) ||
- __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
- __put_user(stat->blksize, &ubuf->st_blksize) ||
- __put_user(stat->blocks, &ubuf->st_blocks))
+ if (!user_write_access_begin(ubuf, sizeof(struct stat64)))
return -EFAULT;
+ unsafe_put_user(huge_encode_dev(stat->dev), &ubuf->st_dev, Efault);
+ unsafe_put_user(stat->ino, &ubuf->__st_ino, Efault);
+ unsafe_put_user(stat->ino, &ubuf->st_ino, Efault);
+ unsafe_put_user(stat->mode, &ubuf->st_mode, Efault);
+ unsafe_put_user(stat->nlink, &ubuf->st_nlink, Efault);
+ unsafe_put_user(uid, &ubuf->st_uid, Efault);
+ unsafe_put_user(gid, &ubuf->st_gid, Efault);
+ unsafe_put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev, Efault);
+ unsafe_put_user(stat->size, &ubuf->st_size, Efault);
+ unsafe_put_user(stat->atime.tv_sec, &ubuf->st_atime, Efault);
+ unsafe_put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec, Efault);
+ unsafe_put_user(stat->mtime.tv_sec, &ubuf->st_mtime, Efault);
+ unsafe_put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec, Efault);
+ unsafe_put_user(stat->ctime.tv_sec, &ubuf->st_ctime, Efault);
+ unsafe_put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec, Efault);
+ unsafe_put_user(stat->blksize, &ubuf->st_blksize, Efault);
+ unsafe_put_user(stat->blocks, &ubuf->st_blocks, Efault);
+ user_access_end();
return 0;
+Efault:
+ user_write_access_end();
+ return -EFAULT;
}
COMPAT_SYSCALL_DEFINE2(ia32_stat64, const char __user *, filename,
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index b2942b2dbfcf..992fb1415c0f 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -23,7 +23,6 @@
#include <asm/realmode.h>
#include <asm/processor.h>
#include <asm/bootparam.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/swiotlb.h>
#include <asm/fixmap.h>
@@ -94,7 +93,7 @@ static struct mm_struct tboot_mm = {
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
- .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
+ MMAP_LOCK_INITIALIZER(init_mm)
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
};
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 371a6b348e44..e42faa792c07 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -25,10 +25,6 @@
#include <asm/hpet.h>
#include <asm/time.h>
-#ifdef CONFIG_X86_64
-__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
-#endif
-
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 47a8676c7395..764573de3996 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -171,7 +171,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
pte_t *pte;
int i;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -197,7 +197,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 1bf7e312361f..7c35556c7827 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -40,13 +40,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
#ifdef CONFIG_X86_32
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
-jiffies = jiffies_64;
#else
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
-jiffies_64 = jiffies;
#endif
+jiffies = jiffies_64;
+
#if defined(CONFIG_X86_64)
/*
* On 64-bit, align RODATA to 2MB so we retain large page mappings for
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 238b78e069fe..af9cdb426dd2 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1252,7 +1252,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
* only, there can be valuable data in the rest which needs
* to be preserved e.g. on migration.
*/
- if (__clear_user((void __user *)addr, sizeof(u32)))
+ if (__put_user(0, (u32 __user *)addr))
return 1;
hv_vcpu->hv_vapic = data;
kvm_vcpu_mark_page_dirty(vcpu, gfn);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 38c576495048..a6d484ea110b 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -165,22 +165,22 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned long pfn;
unsigned long paddr;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
paddr = pfn << PAGE_SHIFT;
table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
if (!table) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
ret = CMPXCHG(&table[index], orig_pte, new_pte);
memunmap(table);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
return (ret != orig_pte);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 5246db42de45..6110bce7237b 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -6,10 +6,19 @@
# Produces uninteresting flaky coverage.
KCOV_INSTRUMENT_delay.o := n
+# KCSAN uses udelay for introducing watchpoint delay; avoid recursion.
+KCSAN_SANITIZE_delay.o := n
+ifdef CONFIG_KCSAN
+# In case KCSAN+lockdep+ftrace are enabled, disable ftrace for delay.o to avoid
+# lockdep -> [other libs] -> KCSAN -> udelay -> ftrace -> lockdep recursion.
+CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
+endif
+
# Early boot use of cmdline; don't instrument it
ifdef CONFIG_AMD_MEM_ENCRYPT
KCOV_INSTRUMENT_cmdline.o := n
KASAN_SANITIZE_cmdline.o := n
+KCSAN_SANITIZE_cmdline.o := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_cmdline.o = -pg
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 98f7c6fa2eaa..f7fd0e868c9c 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -7,6 +7,10 @@ KCOV_INSTRUMENT_mem_encrypt_identity.o := n
KASAN_SANITIZE_mem_encrypt.o := n
KASAN_SANITIZE_mem_encrypt_identity.o := n
+# Disable KCSAN entirely, because otherwise we get warnings that some functions
+# reference __initdata sections.
+KCSAN_SANITIZE := n
+
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 5199d8a1daf1..6f8b48f545f4 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -4,9 +4,9 @@
#include <linux/percpu.h>
#include <linux/kallsyms.h>
#include <linux/kcore.h>
+#include <linux/pgtable.h>
#include <asm/cpu_entry_area.h>
-#include <asm/pgtable.h>
#include <asm/fixmap.h>
#include <asm/desc.h>
diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c
index 4a3b62f780b4..092ea436c7e6 100644
--- a/arch/x86/mm/debug_pagetables.c
+++ b/arch/x86/mm/debug_pagetables.c
@@ -3,7 +3,7 @@
#include <linux/efi.h>
#include <linux/module.h>
#include <linux/seq_file.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
static int ptdump_show(struct seq_file *m, void *v)
{
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index ea9010113f69..e1b599ecbbc2 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -19,7 +19,6 @@
#include <linux/ptdump.h>
#include <asm/e820/types.h>
-#include <asm/pgtable.h>
/*
* The dumper groups pagetable entries of the same type into one, and for
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index c5437f2964ee..0b03ae8c39cd 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -811,7 +811,7 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
}
@@ -865,7 +865,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
* 2. T1 : set PKRU to deny access to pkey=4, touches page
* 3. T1 : faults...
* 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
- * 5. T1 : enters fault handler, takes mmap_sem, etc...
+ * 5. T1 : enters fault handler, takes mmap_lock, etc...
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
* faulted on a pte with its pkey=4.
*/
@@ -1231,15 +1231,15 @@ void do_user_addr_fault(struct pt_regs *regs,
* Kernel-mode access to the user address space should only occur
* on well-defined single instructions listed in the exception
* tables. But, an erroneous kernel fault occurring outside one of
- * those areas which also holds mmap_sem might deadlock attempting
+ * those areas which also holds mmap_lock might deadlock attempting
* to validate the fault against the address space.
*
* Only do the expensive exception table search when we might be at
* risk of a deadlock. This happens if we
- * 1. Failed to acquire mmap_sem, and
+ * 1. Failed to acquire mmap_lock, and
* 2. The access did not originate in userspace.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mm))) {
if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
/*
* Fault from code in kernel from
@@ -1249,7 +1249,7 @@ void do_user_addr_fault(struct pt_regs *regs,
return;
}
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -1289,9 +1289,9 @@ good_area:
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
- * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
+ * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
*
- * Note that handle_userfault() may also release and reacquire mmap_sem
+ * Note that handle_userfault() may also release and reacquire mmap_lock
* (and not return with VM_FAULT_RETRY), when returning to userland to
* repeat the page fault later with a VM_FAULT_NOPAGE retval
* (potentially after handling any pending signal during the return to
@@ -1310,7 +1310,7 @@ good_area:
}
/*
- * If we need to retry the mmap_sem has already been released,
+ * If we need to retry the mmap_lock has already been released,
* and if there is a fatal signal pending there is no guarantee
* that we made any progress. Handle this case first.
*/
@@ -1320,7 +1320,7 @@ good_area:
goto retry;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, hw_error_code, address, fault);
return;
@@ -1359,7 +1359,7 @@ dotraplinkage void
do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
unsigned long address)
{
- prefetchw(&current->mm->mmap_sem);
+ prefetchw(&current->mm->mmap_lock);
/*
* KVM has two types of events that are, logically, interrupts, but
* are unfortunately delivered using the #PF vector. These events are
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 0a1898b8552e..075fe51317b0 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -4,44 +4,11 @@
#include <linux/swap.h> /* for totalram_pages */
#include <linux/memblock.h>
-void *kmap(struct page *page)
-{
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_high(page);
-}
-EXPORT_SYMBOL(kmap);
-
-void kunmap(struct page *page)
-{
- if (in_interrupt())
- BUG();
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-EXPORT_SYMBOL(kunmap);
-
-/*
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
- * no global lock is needed and because the kmap code must perform a global TLB
- * invalidation when the kmap pool wraps.
- *
- * However when holding an atomic kmap it is not legal to sleep, so atomic
- * kmaps are appropriate for short, tight code paths only.
- */
-void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
- preempt_disable();
- pagefault_disable();
-
- if (!PageHighMem(page))
- return page_address(page);
-
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -51,13 +18,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
return (void *)vaddr;
}
-EXPORT_SYMBOL(kmap_atomic_prot);
-
-void *kmap_atomic(struct page *page)
-{
- return kmap_atomic_prot(page, kmap_prot);
-}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
@@ -69,7 +30,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
}
EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
@@ -99,11 +60,8 @@ void __kunmap_atomic(void *kvaddr)
BUG_ON(vaddr >= (unsigned long)high_memory);
}
#endif
-
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
void __init set_highmem_pages_init(void)
{
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 1decb645dac0..001dd7dc829f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -49,7 +49,7 @@
* Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
* (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
*/
-uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
+static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
[_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
[_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD,
[_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
@@ -57,9 +57,16 @@ uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
[_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
[_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
};
-EXPORT_SYMBOL(__cachemode2pte_tbl);
-uint8_t __pte2cachemode_tbl[8] = {
+unsigned long cachemode2protval(enum page_cache_mode pcm)
+{
+ if (likely(pcm == 0))
+ return 0;
+ return __cachemode2pte_tbl[pcm];
+}
+EXPORT_SYMBOL(cachemode2protval);
+
+static uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
[__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
[__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
@@ -69,7 +76,22 @@ uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
};
-EXPORT_SYMBOL(__pte2cachemode_tbl);
+
+/* Check that the write-protect PAT entry is set for write-protect */
+bool x86_has_pat_wp(void)
+{
+ return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP;
+}
+
+enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
+{
+ unsigned long masked;
+
+ masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
+ if (likely(masked == 0))
+ return 0;
+ return __pte2cachemode_tbl[__pte2cm_idx(masked)];
+}
static unsigned long __initdata pgt_buf_start;
static unsigned long __initdata pgt_buf_end;
@@ -170,6 +192,19 @@ struct map_range {
static int page_size_mask;
+/*
+ * Save some of cr4 feature set we're using (e.g. Pentium 4MB
+ * enable and PPro Global page enable), so that any CPU's that boot
+ * up after us can get the correct flags. Invoked on the boot CPU.
+ */
+static inline void cr4_set_bits_and_update_boot(unsigned long mask)
+{
+ mmu_cr4_features |= mask;
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
+ cr4_set_bits(mask);
+}
+
static void __init probe_page_size_mask(void)
{
/*
@@ -645,6 +680,28 @@ static void __init memory_map_bottom_up(unsigned long map_start,
}
}
+/*
+ * The real mode trampoline, which is required for bootstrapping CPUs
+ * occupies only a small area under the low 1MB. See reserve_real_mode()
+ * for details.
+ *
+ * If KASLR is disabled the first PGD entry of the direct mapping is copied
+ * to map the real mode trampoline.
+ *
+ * If KASLR is enabled, copy only the PUD which covers the low 1MB
+ * area. This limits the randomization granularity to 1GB for both 4-level
+ * and 5-level paging.
+ */
+static void __init init_trampoline(void)
+{
+#ifdef CONFIG_X86_64
+ if (!kaslr_memory_enabled())
+ trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
+ else
+ init_trampoline_kaslr();
+#endif
+}
+
void __init init_mem_mapping(void)
{
unsigned long end;
@@ -955,7 +1012,6 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
.next_asid = 1,
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
};
-EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
{
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4222a010057a..bda909e3e37e 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -35,7 +35,6 @@
#include <asm/bios_ebda.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820/api.h>
@@ -396,15 +395,6 @@ repeat:
pte_t *kmap_pte;
-static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
-{
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
- pmd_t *pmd = pmd_offset(pud, vaddr);
- return pte_offset_kernel(pmd, vaddr);
-}
-
static void __init kmap_init(void)
{
unsigned long kmap_vstart;
@@ -413,28 +403,17 @@ static void __init kmap_init(void)
* Cache the first kmap pte:
*/
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+ kmap_pte = virt_to_kpte(kmap_vstart);
}
#ifdef CONFIG_HIGHMEM
static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
- unsigned long vaddr;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
+ unsigned long vaddr = PKMAP_BASE;
- vaddr = PKMAP_BASE;
page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
- pgd = swapper_pg_dir + pgd_index(vaddr);
- p4d = p4d_offset(pgd, vaddr);
- pud = pud_offset(p4d, vaddr);
- pmd = pmd_offset(pud, vaddr);
- pte = pte_offset_kernel(pmd, vaddr);
- pkmap_page_table = pte;
+ pkmap_page_table = virt_to_kpte(vaddr);
}
void __init add_highpages_with_active_regions(int nid,
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e08f1007f776..dbae185511cd 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -37,7 +37,6 @@
#include <asm/processor.h>
#include <asm/bios_ebda.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
@@ -304,7 +303,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
- __flush_tlb_one_kernel(vaddr);
+ flush_tlb_one_kernel(vaddr);
}
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
@@ -373,7 +372,7 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
pgprot_t prot;
pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
- pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
+ protval_4k_2_large(cachemode2protval(cache));
BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
pgd = pgd_offset_k((unsigned long)__va(phys));
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 18c637c0dc6f..84d85dbd1dad 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -16,12 +16,12 @@
#include <linux/mmiotrace.h>
#include <linux/mem_encrypt.h>
#include <linux/efi.h>
+#include <linux/pgtable.h>
#include <asm/set_memory.h>
#include <asm/e820/api.h>
#include <asm/efi.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/memtype.h>
@@ -778,10 +778,8 @@ void __init *early_memremap_encrypted(resource_size_t phys_addr,
void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
unsigned long size)
{
- /* Be sure the write-protect PAT entry is set for write-protect */
- if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
+ if (!x86_has_pat_wp())
return NULL;
-
return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
}
@@ -799,10 +797,8 @@ void __init *early_memremap_decrypted(resource_size_t phys_addr,
void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
unsigned long size)
{
- /* Be sure the write-protect PAT entry is set for write-protect */
- if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
+ if (!x86_has_pat_wp())
return NULL;
-
return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
}
#endif /* CONFIG_AMD_MEM_ENCRYPT */
@@ -889,5 +885,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
else
pte_clear(&init_mm, addr, pte);
- __flush_tlb_one_kernel(addr);
+ flush_tlb_one_kernel(addr);
}
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 763e71abc0fe..1a50434c8a4d 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -17,7 +17,6 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/cpu_entry_area.h>
extern struct range pfn_mapped[E820_MAX_ENTRIES];
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index dc6182eecefa..fb620fd9dae9 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -24,9 +24,9 @@
#include <linux/init.h>
#include <linux/random.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/kaslr.h>
@@ -61,15 +61,6 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
return (region->size_tb << TB_SHIFT);
}
-/*
- * Apply no randomization if KASLR was disabled at boot or if KASAN
- * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
- */
-static inline bool kaslr_memory_enabled(void)
-{
- return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
-}
-
/* Initialize base and padding for each memory region randomized with KASLR */
void __init kernel_randomize_memory(void)
{
@@ -148,7 +139,7 @@ void __init kernel_randomize_memory(void)
}
}
-static void __meminit init_trampoline_pud(void)
+void __meminit init_trampoline_kaslr(void)
{
pud_t *pud_page_tramp, *pud, *pud_tramp;
p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;
@@ -189,25 +180,3 @@ static void __meminit init_trampoline_pud(void)
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
}
}
-
-/*
- * The real mode trampoline, which is required for bootstrapping CPUs
- * occupies only a small area under the low 1MB. See reserve_real_mode()
- * for details.
- *
- * If KASLR is disabled the first PGD entry of the direct mapping is copied
- * to map the real mode trampoline.
- *
- * If KASLR is enabled, copy only the PUD which covers the low 1MB
- * area. This limits the randomization granularity to 1GB for both 4-level
- * and 5-level paging.
- */
-void __meminit init_trampoline(void)
-{
- if (!kaslr_memory_enabled()) {
- init_trampoline_default();
- return;
- }
-
- init_trampoline_pud();
-}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 22bae5828c3d..be020a7bc414 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -173,7 +173,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
return -1;
}
- __flush_tlb_one_kernel(f->addr);
+ flush_tlb_one_kernel(f->addr);
return 0;
}
diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
index f5b85bdc0535..e1d7d7477c22 100644
--- a/arch/x86/mm/maccess.c
+++ b/arch/x86/mm/maccess.c
@@ -9,35 +9,21 @@ static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits)
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
}
-static __always_inline bool invalid_probe_range(u64 vaddr)
+bool probe_kernel_read_allowed(const void *unsafe_src, size_t size)
{
+ unsigned long vaddr = (unsigned long)unsafe_src;
+
/*
* Range covering the highest possible canonical userspace address
* as well as non-canonical address range. For the canonical range
* we also need to include the userspace guard page.
*/
- return vaddr < TASK_SIZE_MAX + PAGE_SIZE ||
- canonical_address(vaddr, boot_cpu_data.x86_virt_bits) != vaddr;
+ return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
+ canonical_address(vaddr, boot_cpu_data.x86_virt_bits) == vaddr;
}
#else
-static __always_inline bool invalid_probe_range(u64 vaddr)
+bool probe_kernel_read_allowed(const void *unsafe_src, size_t size)
{
- return vaddr < TASK_SIZE_MAX;
+ return (unsigned long)unsafe_src >= TASK_SIZE_MAX;
}
#endif
-
-long probe_kernel_read_strict(void *dst, const void *src, size_t size)
-{
- if (unlikely(invalid_probe_range((unsigned long)src)))
- return -EFAULT;
-
- return __probe_kernel_read(dst, src, size);
-}
-
-long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, long count)
-{
- if (unlikely(invalid_probe_range((unsigned long)unsafe_addr)))
- return -EFAULT;
-
- return __strncpy_from_unsafe(dst, unsafe_addr, count);
-}
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index a03614bd3e1a..4a781cf99e92 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -134,7 +134,7 @@ static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
} while (size);
- __native_flush_tlb();
+ flush_tlb_local();
}
void __init sme_unmap_bootdata(char *real_mode_data)
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index 106ead05bbe3..7a84fc8bc5c3 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -8,7 +8,7 @@
*/
#include <linux/linkage.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
#include <asm/processor-flags.h>
#include <asm/msr-index.h>
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 43fd19b3f118..bd7aff5c51f7 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -17,8 +17,8 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/io.h>
-#include <asm/pgtable.h>
#include <linux/mmiotrace.h>
+#include <linux/pgtable.h>
#include <asm/e820/api.h> /* for ISA_START_ADDRESS */
#include <linux/atomic.h>
#include <linux/percpu.h>
diff --git a/arch/x86/mm/pat/cpa-test.c b/arch/x86/mm/pat/cpa-test.c
index facce271e8b9..0612a73638a8 100644
--- a/arch/x86/mm/pat/cpa-test.c
+++ b/arch/x86/mm/pat/cpa-test.c
@@ -14,7 +14,6 @@
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
#include <asm/kdebug.h>
/*
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index 394be8611748..8f665c352bf0 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -46,7 +46,6 @@
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
-#include <asm/pgtable.h>
#include <asm/fcntl.h>
#include <asm/e820/api.h>
#include <asm/mtrr.h>
diff --git a/arch/x86/mm/pat/memtype_interval.c b/arch/x86/mm/pat/memtype_interval.c
index a07e4882bf36..645613d59942 100644
--- a/arch/x86/mm/pat/memtype_interval.c
+++ b/arch/x86/mm/pat/memtype_interval.c
@@ -14,8 +14,8 @@
#include <linux/interval_tree_generic.h>
#include <linux/sched.h>
#include <linux/gfp.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/memtype.h>
#include "memtype.h"
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index b8c55a2e402d..77e04304a2a7 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -69,6 +69,11 @@ static DEFINE_SPINLOCK(cpa_lock);
#define CPA_PAGES_ARRAY 4
#define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
+static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
+{
+ return __pgprot(cachemode2protval(pcm));
+}
+
#ifdef CONFIG_PROC_FS
static unsigned long direct_pages_count[PG_LEVEL_NUM];
@@ -341,7 +346,7 @@ static void __cpa_flush_tlb(void *data)
unsigned int i;
for (i = 0; i < cpa->numpages; i++)
- __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
+ flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
}
static void cpa_flush(struct cpa_data *data, int cache)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 7bd2c3a52297..dfd82f51ba66 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -3,7 +3,6 @@
#include <linux/gfp.h>
#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
#include <asm/mtrr.h>
@@ -19,6 +18,14 @@ EXPORT_SYMBOL(physical_mask);
#define PGTABLE_HIGHMEM 0
#endif
+#ifndef CONFIG_PARAVIRT
+static inline
+void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ tlb_remove_page(tlb, table);
+}
+#endif
+
gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
pgtable_t pte_alloc_one(struct mm_struct *mm)
@@ -706,11 +713,9 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
if (pud_present(*pud) && !pud_huge(*pud))
return 0;
- prot = pgprot_4k_2_large(prot);
-
set_pte((pte_t *)pud, pfn_pte(
(u64)addr >> PAGE_SHIFT,
- __pgprot(pgprot_val(prot) | _PAGE_PSE)));
+ __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
return 1;
}
@@ -738,11 +743,9 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
if (pmd_present(*pmd) && !pmd_huge(*pmd))
return 0;
- prot = pgprot_4k_2_large(prot);
-
set_pte((pte_t *)pmd, pfn_pte(
(u64)addr >> PAGE_SHIFT,
- __pgprot(pgprot_val(prot) | _PAGE_PSE)));
+ __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
return 1;
}
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 0e6700eaa4f9..1953685c2ddf 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -11,7 +11,6 @@
#include <linux/spinlock.h>
#include <asm/cpu_entry_area.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/e820/api.h>
@@ -64,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
* It's enough to flush this one mapping.
* (PGE mappings get flushed as well)
*/
- __flush_tlb_one_kernel(vaddr);
+ flush_tlb_one_kernel(vaddr);
}
unsigned long __FIXADDR_TOP = 0xfffff000;
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index da0fb17a1a36..a3c6757a65c7 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -34,7 +34,6 @@
#include <asm/vsyscall.h>
#include <asm/cmdline.h>
#include <asm/pti.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/desc.h>
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index adb3c5784dac..ed5667f5169f 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -2,8 +2,8 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/cpufeature.h>
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index f3fe261e5936..1a3569b43aa5 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -18,6 +18,16 @@
#include "mm_internal.h"
+#ifdef CONFIG_PARAVIRT
+# define STATIC_NOPV
+#else
+# define STATIC_NOPV static
+# define __flush_tlb_local native_flush_tlb_local
+# define __flush_tlb_global native_flush_tlb_global
+# define __flush_tlb_one_user(addr) native_flush_tlb_one_user(addr)
+# define __flush_tlb_others(msk, info) native_flush_tlb_others(msk, info)
+#endif
+
/*
* TLB flushing, formerly SMP-only
* c/o Linus Torvalds.
@@ -39,6 +49,126 @@
#define LAST_USER_MM_IBPB 0x1UL
/*
+ * The x86 feature is called PCID (Process Context IDentifier). It is similar
+ * to what is traditionally called ASID on the RISC processors.
+ *
+ * We don't use the traditional ASID implementation, where each process/mm gets
+ * its own ASID and flush/restart when we run out of ASID space.
+ *
+ * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
+ * that came by on this CPU, allowing cheaper switch_mm between processes on
+ * this CPU.
+ *
+ * We end up with different spaces for different things. To avoid confusion we
+ * use different names for each of them:
+ *
+ * ASID - [0, TLB_NR_DYN_ASIDS-1]
+ * the canonical identifier for an mm
+ *
+ * kPCID - [1, TLB_NR_DYN_ASIDS]
+ * the value we write into the PCID part of CR3; corresponds to the
+ * ASID+1, because PCID 0 is special.
+ *
+ * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
+ * for KPTI each mm has two address spaces and thus needs two
+ * PCID values, but we can still do with a single ASID denomination
+ * for each mm. Corresponds to kPCID + 2048.
+ *
+ */
+
+/* There are 12 bits of space for ASIDS in CR3 */
+#define CR3_HW_ASID_BITS 12
+
+/*
+ * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
+ * user/kernel switches
+ */
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define PTI_CONSUMED_PCID_BITS 1
+#else
+# define PTI_CONSUMED_PCID_BITS 0
+#endif
+
+#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
+
+/*
+ * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
+ * for them being zero-based. Another -1 is because PCID 0 is reserved for
+ * use by non-PCID-aware users.
+ */
+#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
+
+/*
+ * Given @asid, compute kPCID
+ */
+static inline u16 kern_pcid(u16 asid)
+{
+ VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ /*
+ * Make sure that the dynamic ASID space does not confict with the
+ * bit we are using to switch between user and kernel ASIDs.
+ */
+ BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
+
+ /*
+ * The ASID being passed in here should have respected the
+ * MAX_ASID_AVAILABLE and thus never have the switch bit set.
+ */
+ VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
+#endif
+ /*
+ * The dynamically-assigned ASIDs that get passed in are small
+ * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set,
+ * so do not bother to clear it.
+ *
+ * If PCID is on, ASID-aware code paths put the ASID+1 into the
+ * PCID bits. This serves two purposes. It prevents a nasty
+ * situation in which PCID-unaware code saves CR3, loads some other
+ * value (with PCID == 0), and then restores CR3, thus corrupting
+ * the TLB for ASID 0 if the saved ASID was nonzero. It also means
+ * that any bugs involving loading a PCID-enabled CR3 with
+ * CR4.PCIDE off will trigger deterministically.
+ */
+ return asid + 1;
+}
+
+/*
+ * Given @asid, compute uPCID
+ */
+static inline u16 user_pcid(u16 asid)
+{
+ u16 ret = kern_pcid(asid);
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+ ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
+#endif
+ return ret;
+}
+
+static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
+{
+ if (static_cpu_has(X86_FEATURE_PCID)) {
+ return __sme_pa(pgd) | kern_pcid(asid);
+ } else {
+ VM_WARN_ON_ONCE(asid != 0);
+ return __sme_pa(pgd);
+ }
+}
+
+static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
+{
+ VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
+ /*
+ * Use boot_cpu_has() instead of this_cpu_has() as this function
+ * might be called during early boot. This should work even after
+ * boot because all CPU's the have same capabilities:
+ */
+ VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
+ return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
+}
+
+/*
* We get here when we do something requiring a TLB invalidation
* but could not go invalidate all of the contexts. We do the
* necessary invalidation by clearing out the 'ctx_id' which
@@ -110,6 +240,32 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
*need_flush = true;
}
+/*
+ * Given an ASID, flush the corresponding user ASID. We can delay this
+ * until the next time we switch to it.
+ *
+ * See SWITCH_TO_USER_CR3.
+ */
+static inline void invalidate_user_asid(u16 asid)
+{
+ /* There is no user ASID if address space separation is off */
+ if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
+ return;
+
+ /*
+ * We only have a single ASID if PCID is off and the CR3
+ * write will have flushed it.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_PCID))
+ return;
+
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
+
+ __set_bit(kern_pcid(asid),
+ (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
+}
+
static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
{
unsigned long new_mm_cr3;
@@ -244,6 +400,26 @@ static void cond_ibpb(struct task_struct *next)
}
}
+#ifdef CONFIG_PERF_EVENTS
+static inline void cr4_update_pce_mm(struct mm_struct *mm)
+{
+ if (static_branch_unlikely(&rdpmc_always_available_key) ||
+ (!static_branch_unlikely(&rdpmc_never_available_key) &&
+ atomic_read(&mm->context.perf_rdpmc_allowed)))
+ cr4_set_bits_irqsoff(X86_CR4_PCE);
+ else
+ cr4_clear_bits_irqsoff(X86_CR4_PCE);
+}
+
+void cr4_update_pce(void *ignored)
+{
+ cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm));
+}
+
+#else
+static inline void cr4_update_pce_mm(struct mm_struct *mm) { }
+#endif
+
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
@@ -403,7 +579,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
if (next != real_prev) {
- load_mm_cr4_irqsoff(next);
+ cr4_update_pce_mm(next);
switch_ldt(real_prev, next);
}
}
@@ -580,7 +756,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
unsigned long addr = f->start;
while (addr < f->end) {
- __flush_tlb_one_user(addr);
+ flush_tlb_one_user(addr);
addr += 1UL << f->stride_shift;
}
if (local)
@@ -588,7 +764,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
trace_tlb_flush(reason, nr_invalidate);
} else {
/* Full flush. */
- local_flush_tlb();
+ flush_tlb_local();
if (local)
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
trace_tlb_flush(reason, TLB_FLUSH_ALL);
@@ -623,8 +799,8 @@ static bool tlb_is_not_lazy(int cpu, void *data)
return !per_cpu(cpu_tlbstate.is_lazy, cpu);
}
-void native_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
+STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
{
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (info->end == TLB_FLUSH_ALL)
@@ -674,6 +850,12 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
(void *)info, 1, cpumask);
}
+void flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
+{
+ __flush_tlb_others(cpumask, info);
+}
+
/*
* See Documentation/x86/tlb.rst for details. We choose 33
* because it is large enough to cover the vast majority (at
@@ -784,7 +966,7 @@ static void do_kernel_range_flush(void *info)
/* flush range by one by one 'invlpg' */
for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
- __flush_tlb_one_kernel(addr);
+ flush_tlb_one_kernel(addr);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -807,6 +989,164 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
}
/*
+ * This can be used from process context to figure out what the value of
+ * CR3 is without needing to do a (slow) __read_cr3().
+ *
+ * It's intended to be used for code like KVM that sneakily changes CR3
+ * and needs to restore it. It needs to be used very carefully.
+ */
+unsigned long __get_current_cr3_fast(void)
+{
+ unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
+ this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+
+ /* For now, be very restrictive about when this can be called. */
+ VM_WARN_ON(in_nmi() || preemptible());
+
+ VM_BUG_ON(cr3 != __read_cr3());
+ return cr3;
+}
+EXPORT_SYMBOL_GPL(__get_current_cr3_fast);
+
+/*
+ * Flush one page in the kernel mapping
+ */
+void flush_tlb_one_kernel(unsigned long addr)
+{
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
+
+ /*
+ * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
+ * paravirt equivalent. Even with PCID, this is sufficient: we only
+ * use PCID if we also use global PTEs for the kernel mapping, and
+ * INVLPG flushes global translations across all address spaces.
+ *
+ * If PTI is on, then the kernel is mapped with non-global PTEs, and
+ * __flush_tlb_one_user() will flush the given address for the current
+ * kernel address space and for its usermode counterpart, but it does
+ * not flush it for other address spaces.
+ */
+ flush_tlb_one_user(addr);
+
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
+
+ /*
+ * See above. We need to propagate the flush to all other address
+ * spaces. In principle, we only need to propagate it to kernelmode
+ * address spaces, but the extra bookkeeping we would need is not
+ * worth it.
+ */
+ this_cpu_write(cpu_tlbstate.invalidate_other, true);
+}
+
+/*
+ * Flush one page in the user mapping
+ */
+STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
+{
+ u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+
+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
+
+ /*
+ * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
+ * Just use invalidate_user_asid() in case we are called early.
+ */
+ if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
+ invalidate_user_asid(loaded_mm_asid);
+ else
+ invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
+}
+
+void flush_tlb_one_user(unsigned long addr)
+{
+ __flush_tlb_one_user(addr);
+}
+
+/*
+ * Flush everything
+ */
+STATIC_NOPV void native_flush_tlb_global(void)
+{
+ unsigned long cr4, flags;
+
+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
+ /*
+ * Using INVPCID is considerably faster than a pair of writes
+ * to CR4 sandwiched inside an IRQ flag save/restore.
+ *
+ * Note, this works with CR4.PCIDE=0 or 1.
+ */
+ invpcid_flush_all();
+ return;
+ }
+
+ /*
+ * Read-modify-write to CR4 - protect it from preemption and
+ * from interrupts. (Use the raw variant because this code can
+ * be called from deep inside debugging code.)
+ */
+ raw_local_irq_save(flags);
+
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
+ /* toggle PGE */
+ native_write_cr4(cr4 ^ X86_CR4_PGE);
+ /* write old PGE again and flush TLBs */
+ native_write_cr4(cr4);
+
+ raw_local_irq_restore(flags);
+}
+
+/*
+ * Flush the entire current user mapping
+ */
+STATIC_NOPV void native_flush_tlb_local(void)
+{
+ /*
+ * Preemption or interrupts must be disabled to protect the access
+ * to the per CPU variable and to prevent being preempted between
+ * read_cr3() and write_cr3().
+ */
+ WARN_ON_ONCE(preemptible());
+
+ invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+
+ /* If current->mm == NULL then the read_cr3() "borrows" an mm */
+ native_write_cr3(__native_read_cr3());
+}
+
+void flush_tlb_local(void)
+{
+ __flush_tlb_local();
+}
+
+/*
+ * Flush everything
+ */
+void __flush_tlb_all(void)
+{
+ /*
+ * This is to catch users with enabled preemption and the PGE feature
+ * and don't trigger the warning in __native_flush_tlb().
+ */
+ VM_WARN_ON_ONCE(preemptible());
+
+ if (boot_cpu_has(X86_FEATURE_PGE)) {
+ __flush_tlb_global();
+ } else {
+ /*
+ * !PGE -> !PCID (setup_pcid()), thus every flush is total.
+ */
+ flush_tlb_local();
+ }
+}
+EXPORT_SYMBOL_GPL(__flush_tlb_all);
+
+/*
* arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
* This means that the 'struct flush_tlb_info' that describes which mappings to
* flush is actually fixed. We therefore set a single fixed struct and use it in
@@ -837,6 +1177,38 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
put_cpu();
}
+/*
+ * Blindly accessing user memory from NMI context can be dangerous
+ * if we're in the middle of switching the current user task or
+ * switching the loaded mm. It can also be dangerous if we
+ * interrupted some kernel code that was temporarily using a
+ * different mm.
+ */
+bool nmi_uaccess_okay(void)
+{
+ struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+ struct mm_struct *current_mm = current->mm;
+
+ VM_WARN_ON_ONCE(!loaded_mm);
+
+ /*
+ * The condition we want to check is
+ * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
+ * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
+ * is supposed to be reasonably fast.
+ *
+ * Instead, we check the almost equivalent but somewhat conservative
+ * condition below, and we rely on the fact that switch_mm_irqs_off()
+ * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
+ */
+ if (loaded_mm != current_mm)
+ return false;
+
+ VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
+
+ return true;
+}
+
static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index e723559c386a..0c67a5a94de3 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
/*
* Device [1022:7808]
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 91220cc25854..e3f1ca316068 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -60,8 +60,7 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
}
#ifdef CONFIG_ACPI
-static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
- bool set_pirq)
+static int xen_register_pirq(u32 gsi, int triggering, bool set_pirq)
{
int rc, pirq = -1, irq = -1;
struct physdev_map_pirq map_irq;
@@ -94,9 +93,6 @@ static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
name = "ioapic-level";
}
- if (gsi_override >= 0)
- gsi = gsi_override;
-
irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
if (irq < 0)
goto out;
@@ -112,12 +108,12 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
if (!xen_hvm_domain())
return -1;
- return xen_register_pirq(gsi, -1 /* no GSI override */, trigger,
+ return xen_register_pirq(gsi, trigger,
false /* no mapping of GSI to PIRQ */);
}
#ifdef CONFIG_XEN_DOM0
-static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
+static int xen_register_gsi(u32 gsi, int triggering, int polarity)
{
int rc, irq;
struct physdev_setup_gsi setup_gsi;
@@ -128,7 +124,7 @@ static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polar
printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
gsi, triggering, polarity);
- irq = xen_register_pirq(gsi, gsi_override, triggering, true);
+ irq = xen_register_pirq(gsi, triggering, true);
setup_gsi.gsi = gsi;
setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
@@ -148,7 +144,7 @@ static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polar
static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
int trigger, int polarity)
{
- return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
+ return xen_register_gsi(gsi, trigger, polarity);
}
#endif
#endif
@@ -491,7 +487,7 @@ int __init pci_xen_initial_domain(void)
if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
continue;
- xen_register_pirq(irq, -1 /* no GSI override */,
+ xen_register_pirq(irq,
trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
true /* Map GSI to PIRQ */);
}
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index c049c432745d..826ead67753d 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -24,11 +24,11 @@
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/efi.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/efi.h>
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index c5e393f8bb3f..8e364c4c6768 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -39,7 +39,6 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/e820/api.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/proto.h>
#include <asm/efi.h>
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 933dd4fe3a97..f03a6883dcc6 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -52,7 +52,7 @@ static const char * const lid_wake_mode_names[] = {
static void battery_status_changed(void)
{
- struct power_supply *psy = power_supply_get_by_name("olpc-battery");
+ struct power_supply *psy = power_supply_get_by_name("olpc_battery");
if (psy) {
power_supply_changed(psy);
@@ -62,7 +62,7 @@ static void battery_status_changed(void)
static void ac_status_changed(void)
{
- struct power_supply *psy = power_supply_get_by_name("olpc-ac");
+ struct power_supply *psy = power_supply_get_by_name("olpc_ac");
if (psy) {
power_supply_changed(psy);
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 089413cd944e..85f4638764d6 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -75,7 +75,7 @@ static struct kobj_attribute lid_wake_on_close_attr =
static void battery_status_changed(void)
{
- struct power_supply *psy = power_supply_get_by_name("olpc-battery");
+ struct power_supply *psy = power_supply_get_by_name("olpc_battery");
if (psy) {
power_supply_changed(psy);
@@ -85,7 +85,7 @@ static void battery_status_changed(void)
static void ac_status_changed(void)
{
- struct power_supply *psy = power_supply_get_by_name("olpc-ac");
+ struct power_supply *psy = power_supply_get_by_name("olpc_ac");
if (psy) {
power_supply_changed(psy);
diff --git a/arch/x86/platform/olpc/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c
index 20a064568463..6bab0f0aa8f3 100644
--- a/arch/x86/platform/olpc/olpc_ofw.c
+++ b/arch/x86/platform/olpc/olpc_ofw.c
@@ -3,12 +3,12 @@
#include <linux/export.h>
#include <linux/spinlock_types.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
-#include <asm/pgtable.h>
#include <asm/olpc_ofw.h>
/* address of OFW callback interface; will be NULL if OFW isn't found */
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 1fd321f37f1b..4ea69690c3e4 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -293,10 +293,10 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
* This must be a normal message, or retry of a normal message
*/
if (msg->address == TLB_FLUSH_ALL) {
- local_flush_tlb();
+ flush_tlb_local();
stat->d_alltlb++;
} else {
- __flush_tlb_one_user(msg->address);
+ flush_tlb_one_user(msg->address);
stat->d_onetlb++;
}
stat->d_requestee++;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index fc3b757afb2c..7c65102debaf 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -13,8 +13,8 @@
#include <linux/perf_event.h>
#include <linux/tboot.h>
#include <linux/dmi.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/mtrr.h>
#include <asm/page.h>
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index fc413717a45f..d147f1b2c925 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -12,6 +12,7 @@
#include <linux/scatterlist.h>
#include <linux/kdebug.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <crypto/hash.h>
@@ -19,7 +20,6 @@
#include <asm/init.h>
#include <asm/proto.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mtrr.h>
#include <asm/sections.h>
#include <asm/suspend.h>
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index a1061d471b73..223d5bca29b8 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -8,9 +8,9 @@
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmzone.h>
#include <asm/sections.h>
#include <asm/suspend.h>
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 0197095d9637..a595953f1d6d 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -12,6 +12,7 @@
#include <linux/suspend.h>
#include <linux/scatterlist.h>
#include <linux/kdebug.h>
+#include <linux/pgtable.h>
#include <crypto/hash.h>
@@ -19,7 +20,6 @@
#include <asm/init.h>
#include <asm/proto.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mtrr.h>
#include <asm/sections.h>
#include <asm/suspend.h>
diff --git a/arch/x86/purgatory/.gitignore b/arch/x86/purgatory/.gitignore
new file mode 100644
index 000000000000..d2be1500671d
--- /dev/null
+++ b/arch/x86/purgatory/.gitignore
@@ -0,0 +1 @@
+purgatory.chk
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index fb4ee5444379..b04e6e72a592 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -14,10 +14,18 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
CFLAGS_sha256.o := -D__DISABLE_EXPORTS
-LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
-targets += purgatory.ro
-
+# When linking purgatory.ro with -r unresolved symbols are not checked,
+# also link a purgatory.chk binary without -r to check for unresolved symbols.
+PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib
+LDFLAGS_purgatory.ro := -r $(PURGATORY_LDFLAGS)
+LDFLAGS_purgatory.chk := $(PURGATORY_LDFLAGS)
+targets += purgatory.ro purgatory.chk
+
+# Sanitizer, etc. runtimes are unavailable and cannot be linked here.
+GCOV_PROFILE := n
KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCSAN_SANITIZE := n
KCOV_INSTRUMENT := n
# These are adjustments to the compiler flags used for objects that
@@ -25,7 +33,7 @@ KCOV_INSTRUMENT := n
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
-PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
+PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
@@ -58,12 +66,15 @@ CFLAGS_string.o += $(PURGATORY_CFLAGS)
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
+$(obj)/purgatory.chk: $(obj)/purgatory.ro FORCE
+ $(call if_changed,ld)
+
targets += kexec-purgatory.c
quiet_cmd_bin2c = BIN2C $@
cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
-$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
+$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro $(obj)/purgatory.chk FORCE
$(call if_changed,bin2c)
obj-$(CONFIG_KEXEC_FILE) += kexec-purgatory.o
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index 682c895753d9..6b1f3a4eeb44 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -6,7 +6,10 @@
# for more details.
#
#
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
subdir- := rm
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 262f83cad355..1ed1208931e0 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -3,9 +3,9 @@
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/mem_encrypt.h>
+#include <linux/pgtable.h>
#include <asm/set_memory.h>
-#include <asm/pgtable.h>
#include <asm/realmode.h>
#include <asm/tlbflush.h>
#include <asm/crash.h>
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index b11ec5d8f8ac..83f1b6a56449 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -6,7 +6,10 @@
# for more details.
#
#
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c
index 9e7c4aba6c3a..76d9f6ce7a3d 100644
--- a/arch/x86/um/vdso/vma.c
+++ b/arch/x86/um/vdso/vma.c
@@ -58,7 +58,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (!vdso_enabled)
return 0;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
@@ -66,7 +66,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdsop);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return err;
}
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 507f4fb88fa7..c2c97faaf004 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -63,7 +63,6 @@
#include <asm/setup.h>
#include <asm/desc.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/reboot.h>
#include <asm/stackprotector.h>
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index ecb0d5450334..4988e19598c8 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -21,7 +21,6 @@
#include <xen/grant_table.h>
#include <xen/xen.h>
-#include <asm/pgtable.h>
static struct gnttab_vm_area {
struct vm_struct *area;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index bbba8b17829a..a58d9c69807a 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -51,13 +51,13 @@
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/crash_dump.h>
+#include <linux/pgtable.h>
#ifdef CONFIG_KEXEC_CORE
#include <linux/kexec.h>
#endif
#include <trace/events/xen.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
#include <asm/mmu_context.h>
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index f2adb63b2d7c..8fa01c545460 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -23,10 +23,10 @@
#include <linux/nmi.h>
#include <linux/cpuhotplug.h>
#include <linux/stackprotector.h>
+#include <linux/pgtable.h>
#include <asm/paravirt.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
#include <asm/cpu.h>
#include <xen/interface/xen.h>
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index a0d50be5a8cb..cf907e5bf2f2 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -145,6 +145,8 @@ void local_flush_cache_page(struct vm_area_struct *vma,
#endif
+#define flush_icache_user_range flush_icache_range
+
/* Ensure consistency between data and instruction cache. */
#define local_flush_icache_range(start, end) \
do { \
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index cfb8696917e9..a06ffb0c61c7 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -13,9 +13,9 @@
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
-#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
+#include <linux/pgtable.h>
#include <asm/kmap_types.h>
#endif
@@ -76,12 +76,4 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#endif
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel( \
- pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \
- (vaddr)), \
- (vaddr)), \
- (vaddr)), \
- (vaddr))
-
#endif
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 04e9340eac4b..eac503215f17 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -13,10 +13,10 @@
#define _XTENSA_HIGHMEM_H
#include <linux/wait.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kmap_types.h>
-#include <asm/pgtable.h>
#define PKMAP_BASE ((FIXADDR_START - \
(LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
@@ -63,38 +63,11 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
extern pte_t *pkmap_page_table;
-void *kmap_high(struct page *page);
-void kunmap_high(struct page *page);
-
-static inline void *kmap(struct page *page)
-{
- /* Check if this memory layout is broken because PKMAP overlaps
- * page table.
- */
- BUILD_BUG_ON(PKMAP_BASE <
- TLBTEMP_BASE_1 + TLBTEMP_SIZE);
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_high(page);
-}
-
-static inline void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-
static inline void flush_cache_kmaps(void)
{
flush_cache_all();
}
-void *kmap_atomic(struct page *page);
-void __kunmap_atomic(void *kvaddr);
-
void kmap_init(void);
#endif
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index e3e1d9a1ef69..9ee0c1d004f9 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -24,7 +24,7 @@
#define _XTENSA_INITIALIZE_MMU_H
#include <linux/init.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/vectors.h>
#if XCHAL_HAVE_PTP_MMU
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index de5e6cbbafe4..74923ef3b228 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -18,10 +18,10 @@
#include <linux/stringify.h>
#include <linux/sched.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/vectors.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 8be0c0568c50..fa054a1772e1 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -267,7 +267,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline pte_t pte_wrprotect(pte_t pte)
+static inline pte_t pte_wrprotect(pte_t pte)
{ pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
static inline pte_t pte_mkclean(pte_t pte)
{ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
@@ -359,22 +359,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
update_pte(ptep, pte_wrprotect(pte));
}
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
-
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir,addr) \
- ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
-#define pte_unmap(pte) do { } while (0)
-
-
/*
* Encode and decode a swap and file entry.
*/
@@ -438,6 +422,4 @@ typedef pte_t *pte_addr_t;
*/
#define HAVE_ARCH_UNMAPPED_AREA
-#include <asm-generic/pgtable.h>
-
#endif /* _XTENSA_PGTABLE_H */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index fae33ddcaebb..98515c24d9b2 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/processor.h>
@@ -22,7 +23,6 @@
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/current.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/signal.h>
#include <asm/tlbflush.h>
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 3edecc41ef8c..b7fe6f443b42 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -37,7 +37,6 @@
#include <linux/slab.h>
#include <linux/rcupdate.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/processor.h>
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 145742d70a9f..b4c07bd890fe 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -33,7 +33,6 @@
#include <asm/coprocessor.h>
#include <asm/elf.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
static int gpr_get(struct task_struct *target,
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 3880c765d448..d9204dc2656e 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -37,7 +37,6 @@
#include <asm/bootparam.h>
#include <asm/kasan.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/timex.h>
#include <asm/platform.h>
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 0976e27b8d5d..efc3a29cde80 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -34,12 +34,12 @@
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/ratelimit.h>
+#include <linux/pgtable.h>
#include <asm/stacktrace.h>
#include <asm/ptrace.h>
#include <asm/timex.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/traps.h>
#include <asm/hw_breakpoint.h>
@@ -479,25 +479,29 @@ void show_regs(struct pt_regs * regs)
static int show_trace_cb(struct stackframe *frame, void *data)
{
+ const char *loglvl = data;
+
if (kernel_text_address(frame->pc))
- pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
+ printk("%s [<%08lx>] %pB\n",
+ loglvl, frame->pc, (void *)frame->pc);
return 0;
}
-void show_trace(struct task_struct *task, unsigned long *sp)
+static void show_trace(struct task_struct *task, unsigned long *sp,
+ const char *loglvl)
{
if (!sp)
sp = stack_pointer(task);
- pr_info("Call Trace:\n");
- walk_stackframe(sp, show_trace_cb, NULL);
+ printk("%sCall Trace:\n", loglvl);
+ walk_stackframe(sp, show_trace_cb, (void *)loglvl);
}
#define STACK_DUMP_ENTRY_SIZE 4
#define STACK_DUMP_LINE_SIZE 32
static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
size_t len;
@@ -507,11 +511,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
- pr_info("Stack:\n");
- print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE,
+ printk("%sStack:\n", loglvl);
+ print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
sp, len, false);
- show_trace(task, sp);
+ show_trace(task, sp, loglvl);
}
DEFINE_SPINLOCK(die_lock);
@@ -530,7 +534,7 @@ void die(const char * str, struct pt_regs * regs, long err)
pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr);
show_regs(regs);
if (!user_mode(regs))
- show_stack(NULL, (unsigned long*)regs->areg[1]);
+ show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 95ad1e773991..1a7538ccfc5a 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -43,11 +43,11 @@
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index b27359e2a464..2369433b734a 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -24,6 +24,7 @@
#include <linux/memblock.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
+#include <linux/pgtable.h>
#include <asm/bootparam.h>
#include <asm/mmu_context.h>
@@ -31,7 +32,6 @@
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
/*
* Note:
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index e7172bd53ced..c4decc73fd86 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -74,7 +74,7 @@ void do_page_fault(struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -130,7 +130,7 @@ good_area:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -139,7 +139,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (flags & VM_FAULT_MAJOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
@@ -152,7 +152,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (user_mode(regs)) {
current->thread.bad_vaddr = address;
current->thread.error_code = is_write;
@@ -167,7 +167,7 @@ bad_area:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
bad_page_fault(regs, address, SIGKILL);
else
@@ -175,7 +175,7 @@ out_of_memory:
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Send a sigbus, regardless of whether we were in kernel
* or user mode.
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
index 184ceadccc1a..673196fe862e 100644
--- a/arch/xtensa/mm/highmem.c
+++ b/arch/xtensa/mm/highmem.c
@@ -37,29 +37,24 @@ static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
color;
}
-void *kmap_atomic(struct page *page)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
enum fixed_addresses idx;
unsigned long vaddr;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
idx = kmap_idx(kmap_atomic_idx_push(),
DCACHE_ALIAS(page_to_phys(page)));
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte + idx)));
#endif
- set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC));
+ set_pte(kmap_pte + idx, mk_pte(page, prot));
return (void *)vaddr;
}
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
if (kvaddr >= (void *)FIXADDR_START &&
kvaddr < (void *)FIXADDR_TOP) {
@@ -78,18 +73,19 @@ void __kunmap_atomic(void *kvaddr)
kmap_atomic_idx_pop();
}
-
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
void __init kmap_init(void)
{
unsigned long kmap_vstart;
+ /* Check if this memory layout is broken because PKMAP overlaps
+ * page table.
+ */
+ BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+ kmap_pte = virt_to_kpte(kmap_vstart);
kmap_waitqueues_init();
}
diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c
index 9ea3f21d60c7..a400188c16b9 100644
--- a/arch/xtensa/mm/ioremap.c
+++ b/arch/xtensa/mm/ioremap.c
@@ -7,9 +7,9 @@
#include <linux/io.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size,
pgprot_t prot)
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index e3baa21ff24c..1fef24db2ff6 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -19,10 +19,7 @@
void __init kasan_early_init(void)
{
unsigned long vaddr = KASAN_SHADOW_START;
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
- pmd_t *pmd = pmd_offset(pud, vaddr);
+ pmd_t *pmd = pmd_off_k(vaddr);
int i;
for (i = 0; i < PTRS_PER_PTE; ++i)
@@ -43,10 +40,7 @@ static void __init populate(void *start, void *end)
unsigned long n_pmds = n_pages / PTRS_PER_PTE;
unsigned long i, j;
unsigned long vaddr = (unsigned long)start;
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
- pmd_t *pmd = pmd_offset(pud, vaddr);
+ pmd_t *pmd = pmd_off_k(vaddr);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
if (!pte)
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index 6aa036c427c3..25cd67debee6 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -14,8 +14,8 @@
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/cacheasm.h>
#include <asm/tlbflush.h>
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 37e478a27877..fd2193df8a14 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -21,10 +21,7 @@
#if defined(CONFIG_HIGHMEM)
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
- pmd_t *pmd = pmd_offset(pud, vaddr);
+ pmd_t *pmd = pmd_off_k(vaddr);
pte_t *pte;
unsigned long i;
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 3579ac0f6ec1..23632a33ed39 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -281,7 +281,6 @@ bool bio_integrity_prep(struct bio *bio)
if (ret == 0) {
printk(KERN_ERR "could not attach integrity payload\n");
- kfree(buf);
status = BLK_STS_RESOURCE;
goto err_end_io;
}
diff --git a/block/bio.c b/block/bio.c
index 5235da6434aa..a7366c02c9b5 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1434,8 +1434,7 @@ again:
}
if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
- trace_block_bio_complete(bio->bi_disk->queue, bio,
- blk_status_to_errno(bio->bi_status));
+ trace_block_bio_complete(bio->bi_disk->queue, bio);
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 96a39d0724a2..44f3d0967cb4 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -191,6 +191,33 @@ found_tag:
return tag + tag_offset;
}
+bool __blk_mq_get_driver_tag(struct request *rq)
+{
+ struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
+ unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
+ bool shared = blk_mq_tag_busy(rq->mq_hctx);
+ int tag;
+
+ if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
+ bt = &rq->mq_hctx->tags->breserved_tags;
+ tag_offset = 0;
+ }
+
+ if (!hctx_may_queue(rq->mq_hctx, bt))
+ return false;
+ tag = __sbitmap_queue_get(bt);
+ if (tag == BLK_MQ_NO_TAG)
+ return false;
+
+ rq->tag = tag + tag_offset;
+ if (shared) {
+ rq->rq_flags |= RQF_MQ_INFLIGHT;
+ atomic_inc(&rq->mq_hctx->nr_active);
+ }
+ rq->mq_hctx->tags->rqs[rq->tag] = rq;
+ return true;
+}
+
void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
unsigned int tag)
{
@@ -269,6 +296,7 @@ struct bt_tags_iter_data {
#define BT_TAG_ITER_RESERVED (1 << 0)
#define BT_TAG_ITER_STARTED (1 << 1)
+#define BT_TAG_ITER_STATIC_RQS (1 << 2)
static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
@@ -282,9 +310,12 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
/*
* We can hit rq == NULL here, because the tagging functions
- * test and set the bit before assining ->rqs[].
+ * test and set the bit before assigning ->rqs[].
*/
- rq = tags->rqs[bitnr];
+ if (iter_data->flags & BT_TAG_ITER_STATIC_RQS)
+ rq = tags->static_rqs[bitnr];
+ else
+ rq = tags->rqs[bitnr];
if (!rq)
return true;
if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
@@ -339,11 +370,13 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
* indicates whether or not @rq is a reserved request. Return
* true to continue iterating tags, false to stop.
* @priv: Will be passed as second argument to @fn.
+ *
+ * Caller has to pass the tag map from which requests are allocated.
*/
void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
void *priv)
{
- return __blk_mq_all_tag_iter(tags, fn, priv, 0);
+ return __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
}
/**
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index d38e48f2a0a4..2e4ef51cdb32 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -51,6 +51,14 @@ enum {
BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
};
+bool __blk_mq_get_driver_tag(struct request *rq);
+static inline bool blk_mq_get_driver_tag(struct request *rq)
+{
+ if (rq->tag != BLK_MQ_NO_TAG)
+ return true;
+ return __blk_mq_get_driver_tag(rq);
+}
+
extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9a36ac1c1fa1..4f57d27bfa73 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1052,35 +1052,6 @@ static inline unsigned int queued_to_index(unsigned int queued)
return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
}
-bool blk_mq_get_driver_tag(struct request *rq)
-{
- struct blk_mq_alloc_data data = {
- .q = rq->q,
- .hctx = rq->mq_hctx,
- .flags = BLK_MQ_REQ_NOWAIT,
- .cmd_flags = rq->cmd_flags,
- };
- bool shared;
-
- if (rq->tag != BLK_MQ_NO_TAG)
- return true;
-
- if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
- data.flags |= BLK_MQ_REQ_RESERVED;
-
- shared = blk_mq_tag_busy(data.hctx);
- rq->tag = blk_mq_get_tag(&data);
- if (rq->tag >= 0) {
- if (shared) {
- rq->rq_flags |= RQF_MQ_INFLIGHT;
- atomic_inc(&data.hctx->nr_active);
- }
- data.hctx->tags->rqs[rq->tag] = rq;
- }
-
- return rq->tag != BLK_MQ_NO_TAG;
-}
-
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
int flags, void *key)
{
diff --git a/block/blk-mq.h b/block/blk-mq.h
index a139b0631817..b3ce0f3a2ad2 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -44,7 +44,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
bool kick_requeue_list);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
-bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start);
diff --git a/block/blk.h b/block/blk.h
index aa16e524dc35..b5d1f0fc6547 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -420,9 +420,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part)
static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ preempt_disable();
write_seqcount_begin(&part->nr_sects_seq);
part->nr_sects = size;
write_seqcount_end(&part->nr_sects_seq);
+ preempt_enable();
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
preempt_disable();
part->nr_sects = size;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index d5daf35431e3..091c0a0bbf26 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -316,7 +316,6 @@ config CRYPTO_AEGIS128
config CRYPTO_AEGIS128_SIMD
bool "Support SIMD acceleration for AEGIS-128"
depends on CRYPTO_AEGIS128 && ((ARM || ARM64) && KERNEL_MODE_NEON)
- depends on !ARM || CC_IS_CLANG || GCC_VERSION >= 40800
default y
config CRYPTO_AEGIS128_AESNI_SSE2
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index f2df416d0d2d..d41eb9e67500 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -51,6 +51,8 @@ static acpi_adr_space_type acpi_gbl_space_id_list[] = {
ACPI_ADR_SPACE_IPMI,
ACPI_ADR_SPACE_GPIO,
ACPI_ADR_SPACE_GSBUS,
+ ACPI_ADR_SPACE_PLATFORM_COMM,
+ ACPI_ADR_SPACE_PLATFORM_RT,
ACPI_ADR_SPACE_DATA_TABLE,
ACPI_ADR_SPACE_FIXED_HARDWARE
};
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 177ab88d95de..ed9aedf604a1 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -78,7 +78,8 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"IPMI", /* 0x07 */
"GeneralPurposeIo", /* 0x08 */
"GenericSerialBus", /* 0x09 */
- "PCC" /* 0x0A */
+ "PCC", /* 0x0A */
+ "PlatformRtMechanism" /* 0x0B */
};
const char *acpi_ut_get_region_name(u8 space_id)
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index b44b12a931e7..94d91c67aeae 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -186,7 +186,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
* possibly drop references to the power resources in use.
*/
state = ACPI_STATE_D3_HOT;
- /* If _PR3 is not available, use D3hot as the target state. */
+ /* If D3cold is not supported, use D3hot as the target state. */
if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid)
target_state = state;
} else if (!device->power.states[state].flags.valid) {
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 47b4969d9b93..5be5a977da1b 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -35,6 +35,7 @@ int pxm_to_node(int pxm)
return NUMA_NO_NODE;
return pxm_to_node_map[pxm];
}
+EXPORT_SYMBOL(pxm_to_node);
int node_to_pxm(int node)
{
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index 6b347d9920cc..54b36b7ad47d 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -29,7 +29,7 @@ struct mcfg_fixup {
u32 oem_revision;
u16 segment;
struct resource bus_range;
- struct pci_ecam_ops *ops;
+ const struct pci_ecam_ops *ops;
struct resource cfgres;
};
@@ -165,7 +165,7 @@ static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment,
static void pci_mcfg_apply_quirks(struct acpi_pci_root *root,
struct resource *cfgres,
- struct pci_ecam_ops **ecam_ops)
+ const struct pci_ecam_ops **ecam_ops)
{
#ifdef CONFIG_PCI_QUIRKS
u16 segment = root->segment;
@@ -191,9 +191,9 @@ static void pci_mcfg_apply_quirks(struct acpi_pci_root *root,
static LIST_HEAD(pci_mcfg_list);
int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
- struct pci_ecam_ops **ecam_ops)
+ const struct pci_ecam_ops **ecam_ops)
{
- struct pci_ecam_ops *ops = &pci_generic_ecam_ops;
+ const struct pci_ecam_ops *ops = &pci_generic_ecam_ops;
struct resource *bus_res = &root->secondary;
u16 seg = root->segment;
struct mcfg_entry *e;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ac8ad6cb82aa..f90e841c59f5 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -483,13 +483,8 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
if (IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
control |= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
- if (pci_aer_available()) {
- if (aer_acpi_firmware_first())
- dev_info(&device->dev,
- "PCIe AER handled by firmware\n");
- else
- control |= OSC_PCI_EXPRESS_AER_CONTROL;
- }
+ if (pci_aer_available())
+ control |= OSC_PCI_EXPRESS_AER_CONTROL;
/*
* Per the Downstream Port Containment Related Enhancements ECN to
@@ -938,7 +933,7 @@ struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
* assignments made by firmware for this host bridge.
*/
obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 1,
- IGNORE_PCI_BOOT_CONFIG_DSM, NULL);
+ DSM_PCI_PRESERVE_BOOT_CONFIG, NULL);
if (obj && obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 0)
host_bridge->preserve_config = 1;
ACPI_FREE(obj);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5287ab98b8c1..8777faced51a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -15,8 +15,7 @@
#include <linux/nls.h>
#include <linux/dma-mapping.h>
#include <linux/platform_data/x86/apple.h>
-
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "internal.h"
@@ -919,12 +918,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
if (buffer.length && package
&& package->type == ACPI_TYPE_PACKAGE
- && package->package.count) {
- int err = acpi_extract_power_resources(package, 0,
- &ps->resources);
- if (!err)
- device->power.flags.power_resources = 1;
- }
+ && package->package.count)
+ acpi_extract_power_resources(package, 0, &ps->resources);
+
ACPI_FREE(buffer.pointer);
}
@@ -971,14 +967,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
acpi_bus_init_power_state(device, i);
INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
- if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
- device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
- /* Set defaults for D0 and D3hot states (always valid) */
+ /* Set the defaults for D0 and D3hot (always supported). */
device->power.states[ACPI_STATE_D0].flags.valid = 1;
device->power.states[ACPI_STATE_D0].power = 100;
device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
+ /*
+ * Use power resources only if the D0 list of them is populated, because
+ * some platforms may provide _PR3 only to indicate D3cold support and
+ * in those cases the power resources list returned by it may be bogus.
+ */
+ if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) {
+ device->power.flags.power_resources = 1;
+ /*
+ * D3cold is supported if the D3hot list of power resources is
+ * not empty.
+ */
+ if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
+ device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
+ }
+
if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
}
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 8558b629880b..ecc304149067 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -505,7 +505,7 @@ static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func);
#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000))
-static void amba_deferred_retry_func(struct work_struct *dummy)
+static int amba_deferred_retry(void)
{
struct deferred_device *ddev, *tmp;
@@ -521,11 +521,19 @@ static void amba_deferred_retry_func(struct work_struct *dummy)
kfree(ddev);
}
+ mutex_unlock(&deferred_devices_lock);
+
+ return 0;
+}
+late_initcall(amba_deferred_retry);
+
+static void amba_deferred_retry_func(struct work_struct *dummy)
+{
+ amba_deferred_retry();
+
if (!list_empty(&deferred_devices))
schedule_delayed_work(&deferred_retry_work,
DEFERRED_DEVICE_TIMEOUT);
-
- mutex_unlock(&deferred_devices_lock);
}
/**
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 2d8b9b91dee0..42c672f1584e 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -212,7 +212,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = alloc->vma;
}
@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
return 0;
@@ -303,7 +303,7 @@ err_page_ptr_cleared:
}
err_no_vma:
if (mm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
@@ -932,8 +932,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mm = alloc->vma_vm_mm;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!down_read_trylock(&mm->mmap_sem))
- goto err_down_read_mmap_sem_failed;
+ if (!mmap_read_trylock(mm))
+ goto err_mmap_read_lock_failed;
vma = binder_alloc_get_vma(alloc);
list_lru_isolate(lru, item);
@@ -946,7 +946,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);
@@ -960,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY;
-err_down_read_mmap_sem_failed:
+err_mmap_read_lock_failed:
mmput_async(mm);
err_mmget:
err_page_already_freed:
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 9ecad74183a3..7cf566aafe1f 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -650,7 +650,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
struct binderfs_info *info;
struct binderfs_mount_opts *ctx = fc->fs_private;
struct inode *inode = NULL;
- struct binderfs_device device_info = { 0 };
+ struct binderfs_device device_info = {};
const char *name;
size_t len;
@@ -747,7 +747,7 @@ static const struct fs_context_operations binderfs_fs_context_ops = {
static int binderfs_init_fs_context(struct fs_context *fc)
{
- struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_mount_opts *ctx;
ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
if (!ctx)
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 8fbd36eb8941..f4ad7ce25ae8 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -25,6 +25,7 @@
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/firmware.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/string.h>
#include <asm/page.h>
@@ -40,7 +41,6 @@
#include <asm/idprom.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
-#include <asm/pgtable.h>
#endif
#if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 5f0bc74d2409..8d7001712062 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -149,8 +149,9 @@ config DEBUG_TEST_DRIVER_REMOVE
test this functionality.
config PM_QOS_KUNIT_TEST
- bool "KUnit Test for PM QoS features"
+ bool "KUnit Test for PM QoS features" if !KUNIT_ALL_TESTS
depends on KUNIT=y
+ default KUNIT_ALL_TESTS
config HMEM_REPORTING
bool
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 40fb069a8a7e..95c22c0f9036 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -153,6 +153,7 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
+extern void driver_deferred_probe_force_trigger(void);
/* /sys/devices directory */
extern struct kset *devices_kset;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index de808c5a187b..67d39a90b45c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -49,6 +49,9 @@ static LIST_HEAD(wait_for_suppliers);
static DEFINE_MUTEX(wfs_lock);
static LIST_HEAD(deferred_sync);
static unsigned int defer_sync_state_count = 1;
+static unsigned int defer_fw_devlink_count;
+static DEFINE_MUTEX(defer_fw_devlink_lock);
+static bool fw_devlink_is_permissive(void);
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
@@ -529,7 +532,7 @@ static void device_link_add_missing_supplier_links(void)
int ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
if (!ret)
list_del_init(&dev->links.needs_suppliers);
- else if (ret != -ENODEV)
+ else if (ret != -ENODEV || fw_devlink_is_permissive())
dev->links.need_for_probe = false;
}
mutex_unlock(&wfs_lock);
@@ -643,9 +646,17 @@ static void device_links_missing_supplier(struct device *dev)
{
struct device_link *link;
- list_for_each_entry(link, &dev->links.suppliers, c_node)
- if (link->status == DL_STATE_CONSUMER_PROBE)
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->status != DL_STATE_CONSUMER_PROBE)
+ continue;
+
+ if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ } else {
+ WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+ }
}
/**
@@ -684,11 +695,11 @@ int device_links_check_suppliers(struct device *dev)
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
- if (!(link->flags & DL_FLAG_MANAGED) ||
- link->flags & DL_FLAG_SYNC_STATE_ONLY)
+ if (!(link->flags & DL_FLAG_MANAGED))
continue;
- if (link->status != DL_STATE_AVAILABLE) {
+ if (link->status != DL_STATE_AVAILABLE &&
+ !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
device_links_missing_supplier(dev);
ret = -EPROBE_DEFER;
break;
@@ -949,11 +960,21 @@ static void __device_links_no_driver(struct device *dev)
if (!(link->flags & DL_FLAG_MANAGED))
continue;
- if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
+ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
device_link_drop_managed(link);
- else if (link->status == DL_STATE_CONSUMER_PROBE ||
- link->status == DL_STATE_ACTIVE)
+ continue;
+ }
+
+ if (link->status != DL_STATE_CONSUMER_PROBE &&
+ link->status != DL_STATE_ACTIVE)
+ continue;
+
+ if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ } else {
+ WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
}
dev->links.status = DL_DEV_NO_DRIVER;
@@ -1162,6 +1183,150 @@ static void device_links_purge(struct device *dev)
device_links_write_unlock();
}
+static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+static int __init fw_devlink_setup(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp(arg, "off") == 0) {
+ fw_devlink_flags = 0;
+ } else if (strcmp(arg, "permissive") == 0) {
+ fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+ } else if (strcmp(arg, "on") == 0) {
+ fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
+ } else if (strcmp(arg, "rpm") == 0) {
+ fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER |
+ DL_FLAG_PM_RUNTIME;
+ }
+ return 0;
+}
+early_param("fw_devlink", fw_devlink_setup);
+
+u32 fw_devlink_get_flags(void)
+{
+ return fw_devlink_flags;
+}
+
+static bool fw_devlink_is_permissive(void)
+{
+ return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
+}
+
+static void fw_devlink_link_device(struct device *dev)
+{
+ int fw_ret;
+
+ if (!fw_devlink_flags)
+ return;
+
+ mutex_lock(&defer_fw_devlink_lock);
+ if (!defer_fw_devlink_count)
+ device_link_add_missing_supplier_links();
+
+ /*
+ * The device's fwnode not having add_links() doesn't affect if other
+ * consumers can find this device as a supplier. So, this check is
+ * intentionally placed after device_link_add_missing_supplier_links().
+ */
+ if (!fwnode_has_op(dev->fwnode, add_links))
+ goto out;
+
+ /*
+ * If fw_devlink is being deferred, assume all devices have mandatory
+ * suppliers they need to link to later. Then, when the fw_devlink is
+ * resumed, all these devices will get a chance to try and link to any
+ * suppliers they have.
+ */
+ if (!defer_fw_devlink_count) {
+ fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
+ if (fw_ret == -ENODEV && fw_devlink_is_permissive())
+ fw_ret = -EAGAIN;
+ } else {
+ fw_ret = -ENODEV;
+ }
+
+ if (fw_ret == -ENODEV)
+ device_link_wait_for_mandatory_supplier(dev);
+ else if (fw_ret)
+ device_link_wait_for_optional_supplier(dev);
+
+out:
+ mutex_unlock(&defer_fw_devlink_lock);
+}
+
+/**
+ * fw_devlink_pause - Pause parsing of fwnode to create device links
+ *
+ * Calling this function defers any fwnode parsing to create device links until
+ * fw_devlink_resume() is called. Both these functions are ref counted and the
+ * caller needs to match the calls.
+ *
+ * While fw_devlink is paused:
+ * - Any device that is added won't have its fwnode parsed to create device
+ * links.
+ * - The probe of the device will also be deferred during this period.
+ * - Any devices that were already added, but waiting for suppliers won't be
+ * able to link to newly added devices.
+ *
+ * Once fw_devlink_resume():
+ * - All the fwnodes that was not parsed will be parsed.
+ * - All the devices that were deferred probing will be reattempted if they
+ * aren't waiting for any more suppliers.
+ *
+ * This pair of functions, is mainly meant to optimize the parsing of fwnodes
+ * when a lot of devices that need to link to each other are added in a short
+ * interval of time. For example, adding all the top level devices in a system.
+ *
+ * For example, if N devices are added and:
+ * - All the consumers are added before their suppliers
+ * - All the suppliers of the N devices are part of the N devices
+ *
+ * Then:
+ *
+ * - With the use of fw_devlink_pause() and fw_devlink_resume(), each device
+ * will only need one parsing of its fwnode because it is guaranteed to find
+ * all the supplier devices already registered and ready to link to. It won't
+ * have to do another pass later to find one or more suppliers it couldn't
+ * find in the first parse of the fwnode. So, we'll only need O(N) fwnode
+ * parses.
+ *
+ * - Without the use of fw_devlink_pause() and fw_devlink_resume(), we would
+ * end up doing O(N^2) parses of fwnodes because every device that's added is
+ * guaranteed to trigger a parse of the fwnode of every device added before
+ * it. This O(N^2) parse is made worse by the fact that when a fwnode of a
+ * device is parsed, all it descendant devices might need to have their
+ * fwnodes parsed too (even if the devices themselves aren't added).
+ */
+void fw_devlink_pause(void)
+{
+ mutex_lock(&defer_fw_devlink_lock);
+ defer_fw_devlink_count++;
+ mutex_unlock(&defer_fw_devlink_lock);
+}
+
+/** fw_devlink_resume - Resume parsing of fwnode to create device links
+ *
+ * This function is used in conjunction with fw_devlink_pause() and is ref
+ * counted. See documentation for fw_devlink_pause() for more details.
+ */
+void fw_devlink_resume(void)
+{
+ mutex_lock(&defer_fw_devlink_lock);
+ if (!defer_fw_devlink_count) {
+ WARN(true, "Unmatched fw_devlink pause/resume!");
+ goto out;
+ }
+
+ defer_fw_devlink_count--;
+ if (defer_fw_devlink_count)
+ goto out;
+
+ device_link_add_missing_supplier_links();
+ driver_deferred_probe_force_trigger();
+out:
+ mutex_unlock(&defer_fw_devlink_lock);
+}
/* Device links support end. */
int (*platform_notify)(struct device *dev) = NULL;
@@ -2364,36 +2529,6 @@ static int device_private_init(struct device *dev)
return 0;
}
-static u32 fw_devlink_flags;
-static int __init fw_devlink_setup(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- if (strcmp(arg, "off") == 0) {
- fw_devlink_flags = 0;
- } else if (strcmp(arg, "permissive") == 0) {
- fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
- } else if (strcmp(arg, "on") == 0) {
- fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
- } else if (strcmp(arg, "rpm") == 0) {
- fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER |
- DL_FLAG_PM_RUNTIME;
- }
- return 0;
-}
-early_param("fw_devlink", fw_devlink_setup);
-
-u32 fw_devlink_get_flags(void)
-{
- return fw_devlink_flags;
-}
-
-static bool fw_devlink_is_permissive(void)
-{
- return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
-}
-
/**
* device_add - add device to device hierarchy.
* @dev: device.
@@ -2426,9 +2561,8 @@ int device_add(struct device *dev)
struct device *parent;
struct kobject *kobj;
struct class_interface *class_intf;
- int error = -EINVAL, fw_ret;
+ int error = -EINVAL;
struct kobject *glue_dir = NULL;
- bool is_fwnode_dev = false;
dev = get_device(dev);
if (!dev)
@@ -2526,11 +2660,6 @@ int device_add(struct device *dev)
kobject_uevent(&dev->kobj, KOBJ_ADD);
- if (dev->fwnode && !dev->fwnode->dev) {
- dev->fwnode->dev = dev;
- is_fwnode_dev = true;
- }
-
/*
* Check if any of the other devices (consumers) have been waiting for
* this device (supplier) to be added so that they can create a device
@@ -2539,19 +2668,13 @@ int device_add(struct device *dev)
* This needs to happen after device_pm_add() because device_link_add()
* requires the supplier be registered before it's called.
*
- * But this also needs to happe before bus_probe_device() to make sure
+ * But this also needs to happen before bus_probe_device() to make sure
* waiting consumers can link to it before the driver is bound to the
* device and the driver sync_state callback is called for this device.
*/
- device_link_add_missing_supplier_links();
-
- if (fw_devlink_flags && is_fwnode_dev &&
- fwnode_has_op(dev->fwnode, add_links)) {
- fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
- if (fw_ret == -ENODEV && !fw_devlink_is_permissive())
- device_link_wait_for_mandatory_supplier(dev);
- else if (fw_ret)
- device_link_wait_for_optional_supplier(dev);
+ if (dev->fwnode && !dev->fwnode->dev) {
+ dev->fwnode->dev = dev;
+ fw_devlink_link_device(dev);
}
bus_probe_device(dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 9a1c00fbbaef..d2136ab9b14a 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -562,6 +562,12 @@ ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_srbds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -570,6 +576,7 @@ static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
+static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -580,6 +587,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_mds.attr,
&dev_attr_tsx_async_abort.attr,
&dev_attr_itlb_multihit.attr,
+ &dev_attr_srbds.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 94037be7f5d7..9a1d940342ac 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -164,6 +164,11 @@ static void driver_deferred_probe_trigger(void)
if (!driver_deferred_probe_enable)
return;
+ driver_deferred_probe_force_trigger();
+}
+
+void driver_deferred_probe_force_trigger(void)
+{
/*
* A successful probe means that all the devices in the pending list
* should be triggered to be reprobed. Move all the deferred devices
@@ -254,12 +259,12 @@ __setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
int driver_deferred_probe_check_state(struct device *dev)
{
if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
- dev_warn(dev, "ignoring dependency for device, assuming no driver");
+ dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
return -ENODEV;
}
if (!driver_deferred_probe_timeout && initcalls_done) {
- dev_warn(dev, "deferred probe timeout, ignoring dependency");
+ dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
return -ETIMEDOUT;
}
@@ -275,7 +280,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
flush_work(&deferred_probe_work);
list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
- dev_info(private->device, "deferred probe pending");
+ dev_info(private->device, "deferred probe pending\n");
wake_up(&probe_timeout_waitqueue);
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
@@ -336,7 +341,7 @@ bool device_is_bound(struct device *dev)
static void driver_bound(struct device *dev)
{
if (device_is_bound(dev)) {
- printk(KERN_WARNING "%s: device %s already bound\n",
+ pr_warn("%s: device %s already bound\n",
__func__, kobject_name(&dev->kobj));
return;
}
@@ -505,8 +510,8 @@ re_probe:
}
if (driver_sysfs_add(dev)) {
- printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
- __func__, dev_name(dev));
+ pr_err("%s: driver_sysfs_add(%s) failed\n",
+ __func__, dev_name(dev));
goto probe_failed;
}
@@ -597,9 +602,8 @@ pinctrl_bind_failed:
break;
default:
/* driver matched but the probe failed */
- printk(KERN_WARNING
- "%s: probe of %s failed with error %d\n",
- drv->name, dev_name(dev), ret);
+ pr_warn("%s: probe of %s failed with error %d\n",
+ drv->name, dev_name(dev), ret);
}
/*
* Ignore errors returned by ->probe so that the next driver can try
@@ -624,8 +628,8 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv)
ret = really_probe(dev, drv);
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
- printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
- dev_name(dev), ret, (s64) ktime_to_us(delta));
+ pr_debug("probe of %s returned %d after %lld usecs\n",
+ dev_name(dev), ret, (s64) ktime_to_us(delta));
return ret;
}
@@ -713,8 +717,7 @@ static inline bool cmdline_requested_async_probing(const char *drv_name)
static int __init save_async_options(char *buf)
{
if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
- printk(KERN_WARNING
- "Too long list of driver names for 'driver_async_probe'!\n");
+ pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
return 0;
@@ -789,7 +792,7 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
} else if (ret < 0) {
- dev_dbg(dev, "Bus failed to match device: %d", ret);
+ dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
} /* ret > 0 means positive match */
@@ -1022,7 +1025,7 @@ static int __driver_attach(struct device *dev, void *data)
dev_dbg(dev, "Device match requests probe deferral\n");
driver_deferred_probe_add(dev);
} else if (ret < 0) {
- dev_dbg(dev, "Bus failed to match device: %d", ret);
+ dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
} /* ret > 0 means positive match */
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index 1e9c96e3ed63..5327bfc6ba71 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -9,6 +9,7 @@
#include <linux/umh.h>
#include <linux/sysctl.h>
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include "fallback.h"
#include "firmware.h"
@@ -17,6 +18,8 @@
* firmware fallback mechanism
*/
+MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+
extern struct firmware_fallback_config fw_fallback_config;
/* These getters are vetted to use int properly */
@@ -460,7 +463,7 @@ static const struct attribute_group *fw_dev_attr_groups[] = {
static struct fw_sysfs *
fw_create_instance(struct firmware *firmware, const char *fw_name,
- struct device *device, enum fw_opt opt_flags)
+ struct device *device, u32 opt_flags)
{
struct fw_sysfs *fw_sysfs;
struct device *f_dev;
@@ -493,7 +496,7 @@ exit:
* In charge of constructing a sysfs fallback interface for firmware loading.
**/
static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
- enum fw_opt opt_flags, long timeout)
+ u32 opt_flags, long timeout)
{
int retval = 0;
struct device *f_dev = &fw_sysfs->dev;
@@ -547,7 +550,7 @@ err_put_dev:
static int fw_load_from_user_helper(struct firmware *firmware,
const char *name, struct device *device,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
struct fw_sysfs *fw_sysfs;
long timeout;
@@ -588,7 +591,7 @@ out_unlock:
return ret;
}
-static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
+static bool fw_force_sysfs_fallback(u32 opt_flags)
{
if (fw_fallback_config.force_sysfs_fallback)
return true;
@@ -597,7 +600,7 @@ static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
return true;
}
-static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
+static bool fw_run_sysfs_fallback(u32 opt_flags)
{
int ret;
@@ -640,7 +643,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
**/
int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
- enum fw_opt opt_flags,
+ u32 opt_flags,
int ret)
{
if (!fw_run_sysfs_fallback(opt_flags))
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 06f4577733a8..2afdb6adb23f 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -33,7 +33,7 @@ struct firmware_fallback_config {
#ifdef CONFIG_FW_LOADER_USER_HELPER
int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
- enum fw_opt opt_flags,
+ u32 opt_flags,
int ret);
void kill_pending_fw_fallback_reqs(bool only_kill_custom);
@@ -45,7 +45,7 @@ void unregister_sysfs_loader(void);
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
- enum fw_opt opt_flags,
+ u32 opt_flags,
int ret)
{
/* Keep carrying over the same error */
@@ -67,10 +67,10 @@ static inline void unregister_sysfs_loader(void)
#endif /* CONFIG_FW_LOADER_USER_HELPER */
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
-int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags);
+int firmware_fallback_platform(struct fw_priv *fw_priv, u32 opt_flags);
#else
static inline int firmware_fallback_platform(struct fw_priv *fw_priv,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
return -ENOENT;
}
diff --git a/drivers/base/firmware_loader/fallback_platform.c b/drivers/base/firmware_loader/fallback_platform.c
index c88c745590fe..cdd2c9a9f38a 100644
--- a/drivers/base/firmware_loader/fallback_platform.c
+++ b/drivers/base/firmware_loader/fallback_platform.c
@@ -8,7 +8,7 @@
#include "fallback.h"
#include "firmware.h"
-int firmware_fallback_platform(struct fw_priv *fw_priv, enum fw_opt opt_flags)
+int firmware_fallback_platform(struct fw_priv *fw_priv, u32 opt_flags)
{
const u8 *data;
size_t size;
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index a182e318bd09..46a731dede6f 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -21,7 +21,7 @@ struct firmware_fallback_config fw_fallback_config = {
.loading_timeout = 60,
.old_timeout = 60,
};
-EXPORT_SYMBOL_GPL(fw_fallback_config);
+EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
#ifdef CONFIG_SYSCTL
struct ctl_table firmware_config_table[] = {
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 25836a6afc9f..933e2192fbe8 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -136,8 +136,7 @@ static inline void fw_state_done(struct fw_priv *fw_priv)
__fw_state_set(fw_priv, FW_STATUS_DONE);
}
-int assign_fw(struct firmware *fw, struct device *device,
- enum fw_opt opt_flags);
+int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags);
#ifdef CONFIG_FW_LOADER_PAGED_BUF
void fw_free_paged_buf(struct fw_priv *fw_priv);
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 76f79913916d..ca871b13524e 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -210,7 +210,7 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
static int alloc_lookup_fw_priv(const char *fw_name,
struct firmware_cache *fwc,
struct fw_priv **fw_priv, void *dbuf,
- size_t size, enum fw_opt opt_flags)
+ size_t size, u32 opt_flags)
{
struct fw_priv *tmp;
@@ -548,9 +548,6 @@ static void firmware_free_data(const struct firmware *fw)
static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
{
fw->priv = fw_priv;
-#ifdef CONFIG_FW_LOADER_USER_HELPER
- fw->pages = fw_priv->pages;
-#endif
fw->size = fw_priv->size;
fw->data = fw_priv->data;
@@ -635,8 +632,7 @@ static int fw_add_devm_name(struct device *dev, const char *name)
}
#endif
-int assign_fw(struct firmware *fw, struct device *device,
- enum fw_opt opt_flags)
+int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags)
{
struct fw_priv *fw_priv = fw->priv;
int ret;
@@ -687,7 +683,7 @@ int assign_fw(struct firmware *fw, struct device *device,
static int
_request_firmware_prepare(struct firmware **firmware_p, const char *name,
struct device *device, void *dbuf, size_t size,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
struct firmware *firmware;
struct fw_priv *fw_priv;
@@ -753,7 +749,7 @@ static void fw_abort_batch_reqs(struct firmware *fw)
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
struct device *device, void *buf, size_t size,
- enum fw_opt opt_flags)
+ u32 opt_flags)
{
struct firmware *fw = NULL;
int ret;
@@ -990,7 +986,7 @@ struct firmware_work {
struct device *device;
void *context;
void (*cont)(const struct firmware *fw, void *context);
- enum fw_opt opt_flags;
+ u32 opt_flags;
};
static void request_firmware_work_func(struct work_struct *work)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 615c6b06b427..c0d0a5490ac6 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -153,23 +153,24 @@ EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
* if (irq < 0)
* return irq;
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
{
+ int ret;
#ifdef CONFIG_SPARC
/* sparc does not have irqs represented as IORESOURCE_IRQ resources */
if (!dev || num >= dev->archdata.num_irqs)
return -ENXIO;
- return dev->archdata.irqs[num];
+ ret = dev->archdata.irqs[num];
+ goto out;
#else
struct resource *r;
- int ret;
if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
ret = of_irq_get(dev->dev.of_node, num);
if (ret > 0 || ret == -EPROBE_DEFER)
- return ret;
+ goto out;
}
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
@@ -177,7 +178,7 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
if (r && r->flags & IORESOURCE_DISABLED) {
ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
if (ret)
- return ret;
+ goto out;
}
}
@@ -191,13 +192,17 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
struct irq_data *irqd;
irqd = irq_get_irq_data(r->start);
- if (!irqd)
- return -ENXIO;
+ if (!irqd) {
+ ret = -ENXIO;
+ goto out;
+ }
irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
}
- if (r)
- return r->start;
+ if (r) {
+ ret = r->start;
+ goto out;
+ }
/*
* For the index 0 interrupt, allow falling back to GpioInt
@@ -210,11 +215,14 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
/* Our callers expect -ENXIO for missing IRQs. */
if (ret >= 0 || ret == -EPROBE_DEFER)
- return ret;
+ goto out;
}
- return -ENXIO;
+ ret = -ENXIO;
#endif
+out:
+ WARN(ret == 0, "0 is an invalid IRQ number\n");
+ return ret;
}
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
@@ -233,7 +241,7 @@ EXPORT_SYMBOL_GPL(platform_get_irq_optional);
* if (irq < 0)
* return irq;
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq(struct platform_device *dev, unsigned int num)
{
@@ -305,8 +313,10 @@ static int __platform_get_irq_byname(struct platform_device *dev,
}
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
- if (r)
+ if (r) {
+ WARN(r->start == 0, "0 is an invalid IRQ number\n");
return r->start;
+ }
return -ENXIO;
}
@@ -318,7 +328,7 @@ static int __platform_get_irq_byname(struct platform_device *dev,
*
* Get an IRQ like platform_get_irq(), but then by name rather then by index.
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_byname(struct platform_device *dev, const char *name)
{
@@ -340,7 +350,7 @@ EXPORT_SYMBOL_GPL(platform_get_irq_byname);
* Get an optional IRQ by name like platform_get_irq_byname(). Except that it
* does not print an error message if an IRQ can not be obtained.
*
- * Return: IRQ number on success, negative error number on failure.
+ * Return: non-zero IRQ number on success, negative error number on failure.
*/
int platform_get_irq_byname_optional(struct platform_device *dev,
const char *name)
@@ -672,7 +682,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo)
{
- int ret = -ENOMEM;
+ int ret;
struct platform_device *pdev;
pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
@@ -853,6 +863,8 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv,
/* temporary section violation during probe() */
drv->probe = probe;
retval = code = __platform_driver_register(drv, module);
+ if (retval)
+ return retval;
/*
* Fixup that section violation, being paranoid about code scanning
@@ -977,7 +989,7 @@ EXPORT_SYMBOL_GPL(__platform_register_drivers);
* @drivers: an array of drivers to unregister
* @count: the number of drivers to unregister
*
- * Unegisters platform drivers specified by an array. This is typically used
+ * Unregisters platform drivers specified by an array. This is typically used
* to complement an earlier call to platform_register_drivers(). Drivers are
* unregistered in the reverse order in which they were registered.
*/
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index bb98b813554f..9dd85bea4026 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -519,7 +519,7 @@ static void dpm_watchdog_handler(struct timer_list *t)
struct dpm_watchdog *wd = from_timer(wd, t, timer);
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
- show_stack(wd->tsk, NULL);
+ show_stack(wd->tsk, NULL, KERN_EMERG);
panic("%s %s: unrecoverable failure\n",
dev_driver_string(wd->dev), dev_name(wd->dev));
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 5f35c0ccf5e0..1e6d75e65938 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -708,14 +708,23 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
struct fwnode_handle *child)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- struct fwnode_handle *fwnode = NULL;
+ struct fwnode_handle *fwnode = NULL, *next;
if (dev->of_node)
fwnode = &dev->of_node->fwnode;
else if (adev)
fwnode = acpi_fwnode_handle(adev);
- return fwnode_get_next_child_node(fwnode, child);
+ /* Try to find a child in primary fwnode */
+ next = fwnode_get_next_child_node(fwnode, child);
+ if (next)
+ return next;
+
+ /* When no more children in primary, continue with secondary */
+ if (!IS_ERR_OR_NULL(fwnode->secondary))
+ next = fwnode_get_next_child_node(fwnode->secondary, child);
+
+ return next;
}
EXPORT_SYMBOL_GPL(device_get_next_child_node);
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 4af11a423475..a5bae551167d 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -46,7 +46,7 @@ static umode_t soc_attribute_mode(struct kobject *kobj,
struct attribute *attr,
int index)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
if ((attr == &dev_attr_machine.attr)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 2079937ddb51..e5eb27375416 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -712,17 +712,18 @@ EXPORT_SYMBOL_GPL(software_node_register_nodes);
* @nodes: Zero terminated array of software nodes to be unregistered
*
* Unregister multiple software nodes at once.
+ *
+ * NOTE: Be careful using this call if the nodes had parent pointers set up in
+ * them before registering. If so, it is wiser to remove the nodes
+ * individually, in the correct order (child before parent) instead of relying
+ * on the sequential order of the list of nodes in the array.
*/
void software_node_unregister_nodes(const struct software_node *nodes)
{
- struct swnode *swnode;
int i;
- for (i = 0; nodes[i].name; i++) {
- swnode = software_node_to_swnode(&nodes[i]);
- if (swnode)
- fwnode_remove_software_node(&swnode->fwnode);
- }
+ for (i = 0; nodes[i].name; i++)
+ software_node_unregister(&nodes[i]);
}
EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
@@ -789,6 +790,20 @@ int software_node_register(const struct software_node *node)
}
EXPORT_SYMBOL_GPL(software_node_register);
+/**
+ * software_node_unregister - Unregister static software node
+ * @node: The software node to be unregistered
+ */
+void software_node_unregister(const struct software_node *node)
+{
+ struct swnode *swnode;
+
+ swnode = software_node_to_swnode(node);
+ if (swnode)
+ fwnode_remove_software_node(&swnode->fwnode);
+}
+EXPORT_SYMBOL_GPL(software_node_unregister);
+
struct fwnode_handle *
fwnode_create_software_node(const struct property_entry *properties,
const struct fwnode_handle *parent)
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 305c7751184a..ba225eb1b761 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -9,5 +9,6 @@ config TEST_ASYNC_DRIVER_PROBE
If unsure say N.
config KUNIT_DRIVER_PE_TEST
- bool "KUnit Tests for property entry API"
+ bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
depends on KUNIT=y
+ default KUNIT_ALL_TESTS
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 025b1b77b11a..084b9efcefca 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -458,4 +458,6 @@ config BLK_DEV_RSXX
To compile this driver as a module, choose M here: the
module will be called rsxx.
+source "drivers/block/rnbd/Kconfig"
+
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 795facd8cf19..e1f63117ee94 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_ZRAM) += zram/
+obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
null_blk-objs := null_blk_main.o
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 2e96d8b8758b..c33bbbfd1bd9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1390,7 +1390,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
goto out_unfreeze;
/* Mask out flags that can't be set using LOOP_SET_STATUS. */
- lo->lo_flags &= ~LOOP_SET_STATUS_SETTABLE_FLAGS;
+ lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
/* For those flags, use the previous values instead */
lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
/* For flags that can't be cleared, use previous values too */
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 0b944ac96d6b..27a33adc41e4 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1613,7 +1613,7 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
disc_information di;
track_information ti;
__u32 last_track;
- int ret = -1;
+ int ret;
ret = pkt_get_disc_info(pd, &di);
if (ret)
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index c5c6487a19d5..7b55811c2a81 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -454,7 +454,6 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
queue->queuedata = dev;
blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
- blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 67d65ac785e9..7420648a1de6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -836,6 +836,7 @@ enum {
Opt_lock_timeout,
/* int args above */
Opt_pool_ns,
+ Opt_compression_hint,
/* string args above */
Opt_read_only,
Opt_read_write,
@@ -844,8 +845,23 @@ enum {
Opt_notrim,
};
+enum {
+ Opt_compression_hint_none,
+ Opt_compression_hint_compressible,
+ Opt_compression_hint_incompressible,
+};
+
+static const struct constant_table rbd_param_compression_hint[] = {
+ {"none", Opt_compression_hint_none},
+ {"compressible", Opt_compression_hint_compressible},
+ {"incompressible", Opt_compression_hint_incompressible},
+ {}
+};
+
static const struct fs_parameter_spec rbd_parameters[] = {
fsparam_u32 ("alloc_size", Opt_alloc_size),
+ fsparam_enum ("compression_hint", Opt_compression_hint,
+ rbd_param_compression_hint),
fsparam_flag ("exclusive", Opt_exclusive),
fsparam_flag ("lock_on_read", Opt_lock_on_read),
fsparam_u32 ("lock_timeout", Opt_lock_timeout),
@@ -867,6 +883,8 @@ struct rbd_options {
bool lock_on_read;
bool exclusive;
bool trim;
+
+ u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
};
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
@@ -2253,7 +2271,8 @@ static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
osd_req_op_alloc_hint_init(osd_req, which++,
rbd_dev->layout.object_size,
- rbd_dev->layout.object_size);
+ rbd_dev->layout.object_size,
+ rbd_dev->opts->alloc_hint_flags);
}
if (rbd_obj_is_entire(obj_req))
@@ -6331,6 +6350,29 @@ static int rbd_parse_param(struct fs_parameter *param,
pctx->spec->pool_ns = param->string;
param->string = NULL;
break;
+ case Opt_compression_hint:
+ switch (result.uint_32) {
+ case Opt_compression_hint_none:
+ opt->alloc_hint_flags &=
+ ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
+ CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
+ break;
+ case Opt_compression_hint_compressible:
+ opt->alloc_hint_flags |=
+ CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
+ opt->alloc_hint_flags &=
+ ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
+ break;
+ case Opt_compression_hint_incompressible:
+ opt->alloc_hint_flags |=
+ CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
+ opt->alloc_hint_flags &=
+ ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
+ break;
+ default:
+ BUG();
+ }
+ break;
case Opt_read_only:
opt->read_only = true;
break;
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
index ac98ab6ccd3b..a600e0eb6b6f 100644
--- a/drivers/block/rbd_types.h
+++ b/drivers/block/rbd_types.h
@@ -93,7 +93,7 @@ struct rbd_image_header_ondisk {
__le32 snap_count;
__le32 reserved;
__le64 snap_names_len;
- struct rbd_image_snap_ondisk snaps[0];
+ struct rbd_image_snap_ondisk snaps[];
} __attribute__((packed));
diff --git a/drivers/block/rnbd/Kconfig b/drivers/block/rnbd/Kconfig
new file mode 100644
index 000000000000..4b6d3d816d1f
--- /dev/null
+++ b/drivers/block/rnbd/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config BLK_DEV_RNBD
+ bool
+
+config BLK_DEV_RNBD_CLIENT
+ tristate "RDMA Network Block Device driver client"
+ depends on INFINIBAND_RTRS_CLIENT
+ select BLK_DEV_RNBD
+ help
+ RNBD client is a network block device driver using rdma transport.
+
+ RNBD client allows for mapping of a remote block devices over
+ RTRS protocol from a target system where RNBD server is running.
+
+ If unsure, say N.
+
+config BLK_DEV_RNBD_SERVER
+ tristate "RDMA Network Block Device driver server"
+ depends on INFINIBAND_RTRS_SERVER
+ select BLK_DEV_RNBD
+ help
+ RNBD server is the server side of RNBD using rdma transport.
+
+ RNBD server allows for exporting local block devices to a remote client
+ over RTRS protocol.
+
+ If unsure, say N.
diff --git a/drivers/block/rnbd/Makefile b/drivers/block/rnbd/Makefile
new file mode 100644
index 000000000000..5bb1a7ad1ada
--- /dev/null
+++ b/drivers/block/rnbd/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+ccflags-y := -I$(srctree)/drivers/infiniband/ulp/rtrs
+
+rnbd-client-y := rnbd-clt.o \
+ rnbd-clt-sysfs.o \
+ rnbd-common.o
+
+rnbd-server-y := rnbd-common.o \
+ rnbd-srv.o \
+ rnbd-srv-dev.o \
+ rnbd-srv-sysfs.o
+
+obj-$(CONFIG_BLK_DEV_RNBD_CLIENT) += rnbd-client.o
+obj-$(CONFIG_BLK_DEV_RNBD_SERVER) += rnbd-server.o
diff --git a/drivers/block/rnbd/README b/drivers/block/rnbd/README
new file mode 100644
index 000000000000..1773c0aa0bd4
--- /dev/null
+++ b/drivers/block/rnbd/README
@@ -0,0 +1,92 @@
+********************************
+RDMA Network Block Device (RNBD)
+********************************
+
+Introduction
+------------
+
+RNBD (RDMA Network Block Device) is a pair of kernel modules
+(client and server) that allow for remote access of a block device on
+the server over RTRS protocol using the RDMA (InfiniBand, RoCE, iWARP)
+transport. After being mapped, the remote block devices can be accessed
+on the client side as local block devices.
+
+I/O is transferred between client and server by the RTRS transport
+modules. The administration of RNBD and RTRS modules is done via
+sysfs entries.
+
+Requirements
+------------
+
+ RTRS kernel modules
+
+Quick Start
+-----------
+
+Server side:
+ # modprobe rnbd_server
+
+Client side:
+ # modprobe rnbd_client
+ # echo "sessname=blya path=ip:10.50.100.66 device_path=/dev/ram0" > \
+ /sys/devices/virtual/rnbd-client/ctl/map_device
+
+ Where "sessname=" is a session name, a string to identify the session
+ on client and on server sides; "path=" is a destination IP address or
+ a pair of a source and a destination IPs, separated by comma. Multiple
+ "path=" options can be specified in order to use multipath (see RTRS
+ description for details); "device_path=" is the block device to be
+ mapped from the server side. After the session to the server machine is
+ established, the mapped device will appear on the client side under
+ /dev/rnbd<N>.
+
+
+RNBD-Server Module Parameters
+=============================
+
+dev_search_path
+---------------
+
+When a device is mapped from the client, the server generates the path
+to the block device on the server side by concatenating dev_search_path
+and the "device_path" that was specified in the map_device operation.
+
+The default dev_search_path is: "/".
+
+dev_search_path option can also contain %SESSNAME% in order to provide
+different device namespaces for different sessions. See "device_path"
+option for details.
+
+============================
+Protocol (rnbd/rnbd-proto.h)
+============================
+
+1. Before mapping first device from a given server, client sends an
+RNBD_MSG_SESS_INFO to the server. Server responds with
+RNBD_MSG_SESS_INFO_RSP. Currently the messages only contain the protocol
+version for backward compatibility.
+
+2. Client requests to open a device by sending RNBD_MSG_OPEN message. This
+contains the path to the device and access mode (read-only or writable).
+Server responds to the message with RNBD_MSG_OPEN_RSP. This contains
+a 32 bit device id to be used for IOs and device "geometry" related
+information: side, max_hw_sectors, etc.
+
+3. Client attaches RNBD_MSG_IO to each IO message send to a device. This
+message contains device id, provided by server in his rnbd_msg_open_rsp,
+sector to be accessed, read-write flags and bi_size.
+
+4. Client closes a device by sending RNBD_MSG_CLOSE which contains only the
+device id provided by the server.
+
+=========================================
+Contributors List(in alphabetical order)
+=========================================
+Danil Kipnis <danil.kipnis@profitbricks.com>
+Fabian Holler <mail@fholler.de>
+Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
+Jack Wang <jinpu.wang@profitbricks.com>
+Kleber Souza <kleber.souza@profitbricks.com>
+Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
+Milind Dumbare <Milind.dumbare@gmail.com>
+Roman Penyaev <roman.penyaev@profitbricks.com>
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
new file mode 100644
index 000000000000..4f4474eecadb
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/parser.h>
+#include <linux/module.h>
+#include <linux/in6.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <rdma/ib.h>
+#include <rdma/rdma_cm.h>
+
+#include "rnbd-clt.h"
+
+static struct device *rnbd_dev;
+static struct class *rnbd_dev_class;
+static struct kobject *rnbd_devs_kobj;
+
+enum {
+ RNBD_OPT_ERR = 0,
+ RNBD_OPT_DEST_PORT = 1 << 0,
+ RNBD_OPT_PATH = 1 << 1,
+ RNBD_OPT_DEV_PATH = 1 << 2,
+ RNBD_OPT_ACCESS_MODE = 1 << 3,
+ RNBD_OPT_SESSNAME = 1 << 6,
+};
+
+static const unsigned int rnbd_opt_mandatory[] = {
+ RNBD_OPT_PATH,
+ RNBD_OPT_DEV_PATH,
+ RNBD_OPT_SESSNAME,
+};
+
+static const match_table_t rnbd_opt_tokens = {
+ {RNBD_OPT_PATH, "path=%s" },
+ {RNBD_OPT_DEV_PATH, "device_path=%s"},
+ {RNBD_OPT_DEST_PORT, "dest_port=%d" },
+ {RNBD_OPT_ACCESS_MODE, "access_mode=%s"},
+ {RNBD_OPT_SESSNAME, "sessname=%s" },
+ {RNBD_OPT_ERR, NULL },
+};
+
+struct rnbd_map_options {
+ char *sessname;
+ struct rtrs_addr *paths;
+ size_t *path_cnt;
+ char *pathname;
+ u16 *dest_port;
+ enum rnbd_access_mode *access_mode;
+};
+
+static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt,
+ struct rnbd_map_options *opt)
+{
+ char *options, *sep_opt;
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int opt_mask = 0;
+ int token;
+ int ret = -EINVAL;
+ int i, dest_port;
+ int p_cnt = 0;
+
+ options = kstrdup(buf, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ sep_opt = strstrip(options);
+ while ((p = strsep(&sep_opt, " ")) != NULL) {
+ if (!*p)
+ continue;
+
+ token = match_token(p, rnbd_opt_tokens, args);
+ opt_mask |= token;
+
+ switch (token) {
+ case RNBD_OPT_SESSNAME:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) > NAME_MAX) {
+ pr_err("map_device: sessname too long\n");
+ ret = -EINVAL;
+ kfree(p);
+ goto out;
+ }
+ strlcpy(opt->sessname, p, NAME_MAX);
+ kfree(p);
+ break;
+
+ case RNBD_OPT_PATH:
+ if (p_cnt >= max_path_cnt) {
+ pr_err("map_device: too many (> %zu) paths provided\n",
+ max_path_cnt);
+ ret = -ENOMEM;
+ goto out;
+ }
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = rtrs_addr_to_sockaddr(p, strlen(p),
+ *opt->dest_port,
+ &opt->paths[p_cnt]);
+ if (ret) {
+ pr_err("Can't parse path %s: %d\n", p, ret);
+ kfree(p);
+ goto out;
+ }
+
+ p_cnt++;
+
+ kfree(p);
+ break;
+
+ case RNBD_OPT_DEV_PATH:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (strlen(p) > NAME_MAX) {
+ pr_err("map_device: Device path too long\n");
+ ret = -EINVAL;
+ kfree(p);
+ goto out;
+ }
+ strlcpy(opt->pathname, p, NAME_MAX);
+ kfree(p);
+ break;
+
+ case RNBD_OPT_DEST_PORT:
+ if (match_int(args, &dest_port) || dest_port < 0 ||
+ dest_port > 65535) {
+ pr_err("bad destination port number parameter '%d'\n",
+ dest_port);
+ ret = -EINVAL;
+ goto out;
+ }
+ *opt->dest_port = dest_port;
+ break;
+
+ case RNBD_OPT_ACCESS_MODE:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!strcmp(p, "ro")) {
+ *opt->access_mode = RNBD_ACCESS_RO;
+ } else if (!strcmp(p, "rw")) {
+ *opt->access_mode = RNBD_ACCESS_RW;
+ } else if (!strcmp(p, "migration")) {
+ *opt->access_mode = RNBD_ACCESS_MIGRATION;
+ } else {
+ pr_err("map_device: Invalid access_mode: '%s'\n",
+ p);
+ ret = -EINVAL;
+ kfree(p);
+ goto out;
+ }
+
+ kfree(p);
+ break;
+
+ default:
+ pr_err("map_device: Unknown parameter or missing value '%s'\n",
+ p);
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rnbd_opt_mandatory); i++) {
+ if ((opt_mask & rnbd_opt_mandatory[i])) {
+ ret = 0;
+ } else {
+ pr_err("map_device: Parameters missing\n");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ *opt->path_cnt = p_cnt;
+ kfree(options);
+ return ret;
+}
+
+static ssize_t state_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ switch (dev->dev_state) {
+ case DEV_STATE_INIT:
+ return snprintf(page, PAGE_SIZE, "init\n");
+ case DEV_STATE_MAPPED:
+ /* TODO fix cli tool before changing to proper state */
+ return snprintf(page, PAGE_SIZE, "open\n");
+ case DEV_STATE_MAPPED_DISCONNECTED:
+ /* TODO fix cli tool before changing to proper state */
+ return snprintf(page, PAGE_SIZE, "closed\n");
+ case DEV_STATE_UNMAPPED:
+ return snprintf(page, PAGE_SIZE, "unmapped\n");
+ default:
+ return snprintf(page, PAGE_SIZE, "unknown\n");
+ }
+}
+
+static struct kobj_attribute rnbd_clt_state_attr = __ATTR_RO(state);
+
+static ssize_t mapping_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", dev->pathname);
+}
+
+static struct kobj_attribute rnbd_clt_mapping_path_attr =
+ __ATTR_RO(mapping_path);
+
+static ssize_t access_mode_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ rnbd_access_mode_str(dev->access_mode));
+}
+
+static struct kobj_attribute rnbd_clt_access_mode =
+ __ATTR_RO(access_mode);
+
+static ssize_t rnbd_clt_unmap_dev_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo <normal|force> > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rnbd_clt_unmap_dev_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rnbd_clt_dev *dev;
+ char *opt, *options;
+ bool force;
+ int err;
+
+ opt = kstrdup(buf, GFP_KERNEL);
+ if (!opt)
+ return -ENOMEM;
+
+ options = strstrip(opt);
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+ if (sysfs_streq(options, "normal")) {
+ force = false;
+ } else if (sysfs_streq(options, "force")) {
+ force = true;
+ } else {
+ rnbd_clt_err(dev,
+ "unmap_device: Invalid value: %s\n",
+ options);
+ err = -EINVAL;
+ goto out;
+ }
+
+ rnbd_clt_info(dev, "Unmapping device, option: %s.\n",
+ force ? "force" : "normal");
+
+ /*
+ * We take explicit module reference only for one reason: do not
+ * race with lockless rnbd_destroy_sessions().
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ err = -ENODEV;
+ goto out;
+ }
+ err = rnbd_clt_unmap_device(dev, force, &attr->attr);
+ if (err) {
+ if (err != -EALREADY)
+ rnbd_clt_err(dev, "unmap_device: %d\n", err);
+ goto module_put;
+ }
+
+ /*
+ * Here device can be vanished!
+ */
+
+ err = count;
+
+module_put:
+ module_put(THIS_MODULE);
+out:
+ kfree(opt);
+
+ return err;
+}
+
+static struct kobj_attribute rnbd_clt_unmap_device_attr =
+ __ATTR(unmap_device, 0644, rnbd_clt_unmap_dev_show,
+ rnbd_clt_unmap_dev_store);
+
+static ssize_t rnbd_clt_resize_dev_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE,
+ "Usage: echo <new size in sectors> > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rnbd_clt_resize_dev_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long sectors;
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ ret = kstrtoul(buf, 0, &sectors);
+ if (ret)
+ return ret;
+
+ ret = rnbd_clt_resize_disk(dev, (size_t)sectors);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rnbd_clt_resize_dev_attr =
+ __ATTR(resize, 0644, rnbd_clt_resize_dev_show,
+ rnbd_clt_resize_dev_store);
+
+static ssize_t rnbd_clt_remap_dev_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo <1> > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rnbd_clt_remap_dev_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rnbd_clt_dev *dev;
+ char *opt, *options;
+ int err;
+
+ opt = kstrdup(buf, GFP_KERNEL);
+ if (!opt)
+ return -ENOMEM;
+
+ options = strstrip(opt);
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+ if (!sysfs_streq(options, "1")) {
+ rnbd_clt_err(dev,
+ "remap_device: Invalid value: %s\n",
+ options);
+ err = -EINVAL;
+ goto out;
+ }
+ err = rnbd_clt_remap_device(dev);
+ if (likely(!err))
+ err = count;
+
+out:
+ kfree(opt);
+
+ return err;
+}
+
+static struct kobj_attribute rnbd_clt_remap_device_attr =
+ __ATTR(remap_device, 0644, rnbd_clt_remap_dev_show,
+ rnbd_clt_remap_dev_store);
+
+static ssize_t session_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *page)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_clt_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", dev->sess->sessname);
+}
+
+static struct kobj_attribute rnbd_clt_session_attr =
+ __ATTR_RO(session);
+
+static struct attribute *rnbd_dev_attrs[] = {
+ &rnbd_clt_unmap_device_attr.attr,
+ &rnbd_clt_resize_dev_attr.attr,
+ &rnbd_clt_remap_device_attr.attr,
+ &rnbd_clt_mapping_path_attr.attr,
+ &rnbd_clt_state_attr.attr,
+ &rnbd_clt_session_attr.attr,
+ &rnbd_clt_access_mode.attr,
+ NULL,
+};
+
+void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
+{
+ /*
+ * The module unload rnbd_client_exit path is racing with unmapping of
+ * the last single device from the sysfs manually
+ * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
+ * of sysfs link already was removed already.
+ */
+ if (strlen(dev->blk_symlink_name) && try_module_get(THIS_MODULE)) {
+ sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
+ module_put(THIS_MODULE);
+ }
+}
+
+static struct kobj_type rnbd_dev_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = rnbd_dev_attrs,
+};
+
+static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev)
+{
+ int ret;
+ struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
+
+ ret = kobject_init_and_add(&dev->kobj, &rnbd_dev_ktype, gd_kobj, "%s",
+ "rnbd");
+ if (ret)
+ rnbd_clt_err(dev, "Failed to create device sysfs dir, err: %d\n",
+ ret);
+
+ return ret;
+}
+
+static ssize_t rnbd_clt_map_device_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE,
+ "Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n",
+ attr->attr.name);
+}
+
+static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
+ size_t len)
+{
+ int ret;
+ char pathname[NAME_MAX], *s;
+
+ strlcpy(pathname, dev->pathname, sizeof(pathname));
+ while ((s = strchr(pathname, '/')))
+ s[0] = '!';
+
+ ret = snprintf(buf, len, "%s", pathname);
+ if (ret >= len)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
+
+static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev)
+{
+ struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
+ int ret;
+
+ ret = rnbd_clt_get_path_name(dev, dev->blk_symlink_name,
+ sizeof(dev->blk_symlink_name));
+ if (ret) {
+ rnbd_clt_err(dev, "Failed to get /sys/block symlink path, err: %d\n",
+ ret);
+ goto out_err;
+ }
+
+ ret = sysfs_create_link(rnbd_devs_kobj, gd_kobj,
+ dev->blk_symlink_name);
+ if (ret) {
+ rnbd_clt_err(dev, "Creating /sys/block symlink failed, err: %d\n",
+ ret);
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ dev->blk_symlink_name[0] = '\0';
+ return ret;
+}
+
+static ssize_t rnbd_clt_map_device_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rnbd_clt_dev *dev;
+ struct rnbd_map_options opt;
+ int ret;
+ char pathname[NAME_MAX];
+ char sessname[NAME_MAX];
+ enum rnbd_access_mode access_mode = RNBD_ACCESS_RW;
+ u16 port_nr = RTRS_PORT;
+
+ struct sockaddr_storage *addrs;
+ struct rtrs_addr paths[6];
+ size_t path_cnt;
+
+ opt.sessname = sessname;
+ opt.paths = paths;
+ opt.path_cnt = &path_cnt;
+ opt.pathname = pathname;
+ opt.dest_port = &port_nr;
+ opt.access_mode = &access_mode;
+ addrs = kcalloc(ARRAY_SIZE(paths) * 2, sizeof(*addrs), GFP_KERNEL);
+ if (!addrs)
+ return -ENOMEM;
+
+ for (path_cnt = 0; path_cnt < ARRAY_SIZE(paths); path_cnt++) {
+ paths[path_cnt].src = &addrs[path_cnt * 2];
+ paths[path_cnt].dst = &addrs[path_cnt * 2 + 1];
+ }
+
+ ret = rnbd_clt_parse_map_options(buf, ARRAY_SIZE(paths), &opt);
+ if (ret)
+ goto out;
+
+ pr_info("Mapping device %s on session %s, (access_mode: %s)\n",
+ pathname, sessname,
+ rnbd_access_mode_str(access_mode));
+
+ dev = rnbd_clt_map_device(sessname, paths, path_cnt, port_nr, pathname,
+ access_mode);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto out;
+ }
+
+ ret = rnbd_clt_add_dev_kobj(dev);
+ if (ret)
+ goto unmap_dev;
+
+ ret = rnbd_clt_add_dev_symlink(dev);
+ if (ret)
+ goto unmap_dev;
+
+ kfree(addrs);
+ return count;
+
+unmap_dev:
+ rnbd_clt_unmap_device(dev, true, NULL);
+out:
+ kfree(addrs);
+ return ret;
+}
+
+static struct kobj_attribute rnbd_clt_map_device_attr =
+ __ATTR(map_device, 0644,
+ rnbd_clt_map_device_show, rnbd_clt_map_device_store);
+
+static struct attribute *default_attrs[] = {
+ &rnbd_clt_map_device_attr.attr,
+ NULL,
+};
+
+static struct attribute_group default_attr_group = {
+ .attrs = default_attrs,
+};
+
+static const struct attribute_group *default_attr_groups[] = {
+ &default_attr_group,
+ NULL,
+};
+
+int rnbd_clt_create_sysfs_files(void)
+{
+ int err;
+
+ rnbd_dev_class = class_create(THIS_MODULE, "rnbd-client");
+ if (IS_ERR(rnbd_dev_class))
+ return PTR_ERR(rnbd_dev_class);
+
+ rnbd_dev = device_create_with_groups(rnbd_dev_class, NULL,
+ MKDEV(0, 0), NULL,
+ default_attr_groups, "ctl");
+ if (IS_ERR(rnbd_dev)) {
+ err = PTR_ERR(rnbd_dev);
+ goto cls_destroy;
+ }
+ rnbd_devs_kobj = kobject_create_and_add("devices", &rnbd_dev->kobj);
+ if (!rnbd_devs_kobj) {
+ err = -ENOMEM;
+ goto dev_destroy;
+ }
+
+ return 0;
+
+dev_destroy:
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+cls_destroy:
+ class_destroy(rnbd_dev_class);
+
+ return err;
+}
+
+void rnbd_clt_destroy_default_group(void)
+{
+ sysfs_remove_group(&rnbd_dev->kobj, &default_attr_group);
+}
+
+void rnbd_clt_destroy_sysfs_files(void)
+{
+ kobject_del(rnbd_devs_kobj);
+ kobject_put(rnbd_devs_kobj);
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+ class_destroy(rnbd_dev_class);
+}
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
new file mode 100644
index 000000000000..cc6a4e2587ae
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -0,0 +1,1729 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/scatterlist.h>
+#include <linux/idr.h>
+
+#include "rnbd-clt.h"
+
+MODULE_DESCRIPTION("RDMA Network Block Device Client");
+MODULE_LICENSE("GPL");
+
+static int rnbd_client_major;
+static DEFINE_IDA(index_ida);
+static DEFINE_MUTEX(ida_lock);
+static DEFINE_MUTEX(sess_lock);
+static LIST_HEAD(sess_list);
+
+/*
+ * Maximum number of partitions an instance can have.
+ * 6 bits = 64 minors = 63 partitions (one minor is used for the device itself)
+ */
+#define RNBD_PART_BITS 6
+
+static inline bool rnbd_clt_get_sess(struct rnbd_clt_session *sess)
+{
+ return refcount_inc_not_zero(&sess->refcount);
+}
+
+static void free_sess(struct rnbd_clt_session *sess);
+
+static void rnbd_clt_put_sess(struct rnbd_clt_session *sess)
+{
+ might_sleep();
+
+ if (refcount_dec_and_test(&sess->refcount))
+ free_sess(sess);
+}
+
+static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
+{
+ might_sleep();
+
+ if (!refcount_dec_and_test(&dev->refcount))
+ return;
+
+ mutex_lock(&ida_lock);
+ ida_simple_remove(&index_ida, dev->clt_device_id);
+ mutex_unlock(&ida_lock);
+ kfree(dev->hw_queues);
+ rnbd_clt_put_sess(dev->sess);
+ mutex_destroy(&dev->lock);
+ kfree(dev);
+}
+
+static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev)
+{
+ return refcount_inc_not_zero(&dev->refcount);
+}
+
+static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
+ const struct rnbd_msg_open_rsp *rsp)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+
+ if (!rsp->logical_block_size)
+ return -EINVAL;
+
+ dev->device_id = le32_to_cpu(rsp->device_id);
+ dev->nsectors = le64_to_cpu(rsp->nsectors);
+ dev->logical_block_size = le16_to_cpu(rsp->logical_block_size);
+ dev->physical_block_size = le16_to_cpu(rsp->physical_block_size);
+ dev->max_write_same_sectors = le32_to_cpu(rsp->max_write_same_sectors);
+ dev->max_discard_sectors = le32_to_cpu(rsp->max_discard_sectors);
+ dev->discard_granularity = le32_to_cpu(rsp->discard_granularity);
+ dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
+ dev->secure_discard = le16_to_cpu(rsp->secure_discard);
+ dev->rotational = rsp->rotational;
+
+ dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
+ dev->max_segments = BMAX_SEGMENTS;
+
+ dev->max_hw_sectors = min_t(u32, dev->max_hw_sectors,
+ le32_to_cpu(rsp->max_hw_sectors));
+ dev->max_segments = min_t(u16, dev->max_segments,
+ le16_to_cpu(rsp->max_segments));
+
+ return 0;
+}
+
+static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
+ size_t new_nsectors)
+{
+ int err = 0;
+
+ rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
+ dev->nsectors, new_nsectors);
+ dev->nsectors = new_nsectors;
+ set_capacity(dev->gd, dev->nsectors);
+ err = revalidate_disk(dev->gd);
+ if (err)
+ rnbd_clt_err(dev,
+ "Failed to change device size from %zu to %zu, err: %d\n",
+ dev->nsectors, new_nsectors, err);
+ return err;
+}
+
+static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
+ struct rnbd_msg_open_rsp *rsp)
+{
+ int err = 0;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_UNMAPPED) {
+ rnbd_clt_info(dev,
+ "Ignoring Open-Response message from server for unmapped device\n");
+ err = -ENOENT;
+ goto out;
+ }
+ if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) {
+ u64 nsectors = le64_to_cpu(rsp->nsectors);
+
+ /*
+ * If the device was remapped and the size changed in the
+ * meantime we need to revalidate it
+ */
+ if (dev->nsectors != nsectors)
+ rnbd_clt_change_capacity(dev, nsectors);
+ rnbd_clt_info(dev, "Device online, device remapped successfully\n");
+ }
+ err = rnbd_clt_set_dev_attr(dev, rsp);
+ if (err)
+ goto out;
+ dev->dev_state = DEV_STATE_MAPPED;
+
+out:
+ mutex_unlock(&dev->lock);
+
+ return err;
+}
+
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
+{
+ int ret = 0;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state != DEV_STATE_MAPPED) {
+ pr_err("Failed to set new size of the device, device is not opened\n");
+ ret = -ENOENT;
+ goto out;
+ }
+ ret = rnbd_clt_change_capacity(dev, newsize);
+
+out:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
+{
+ if (WARN_ON(!q->hctx))
+ return;
+
+ /* We can come here from interrupt, thus async=true */
+ blk_mq_run_hw_queue(q->hctx, true);
+}
+
+enum {
+ RNBD_DELAY_IFBUSY = -1,
+};
+
+/**
+ * rnbd_get_cpu_qlist() - finds a list with HW queues to be rerun
+ * @sess: Session to find a queue for
+ * @cpu: Cpu to start the search from
+ *
+ * Description:
+ * Each CPU has a list of HW queues, which needs to be rerun. If a list
+ * is not empty - it is marked with a bit. This function finds first
+ * set bit in a bitmap and returns corresponding CPU list.
+ */
+static struct rnbd_cpu_qlist *
+rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
+{
+ int bit;
+
+ /* Search from cpu to nr_cpu_ids */
+ bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu);
+ if (bit < nr_cpu_ids) {
+ return per_cpu_ptr(sess->cpu_queues, bit);
+ } else if (cpu != 0) {
+ /* Search from 0 to cpu */
+ bit = find_next_bit(sess->cpu_queues_bm, cpu, 0);
+ if (bit < cpu)
+ return per_cpu_ptr(sess->cpu_queues, bit);
+ }
+
+ return NULL;
+}
+
+static inline int nxt_cpu(int cpu)
+{
+ return (cpu + 1) % nr_cpu_ids;
+}
+
+/**
+ * rnbd_rerun_if_needed() - rerun next queue marked as stopped
+ * @sess: Session to rerun a queue on
+ *
+ * Description:
+ * Each CPU has it's own list of HW queues, which should be rerun.
+ * Function finds such list with HW queues, takes a list lock, picks up
+ * the first HW queue out of the list and requeues it.
+ *
+ * Return:
+ * True if the queue was requeued, false otherwise.
+ *
+ * Context:
+ * Does not matter.
+ */
+static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess)
+{
+ struct rnbd_queue *q = NULL;
+ struct rnbd_cpu_qlist *cpu_q;
+ unsigned long flags;
+ int *cpup;
+
+ /*
+ * To keep fairness and not to let other queues starve we always
+ * try to wake up someone else in round-robin manner. That of course
+ * increases latency but queues always have a chance to be executed.
+ */
+ cpup = get_cpu_ptr(sess->cpu_rr);
+ for (cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(*cpup)); cpu_q;
+ cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) {
+ if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags))
+ continue;
+ if (unlikely(!test_bit(cpu_q->cpu, sess->cpu_queues_bm)))
+ goto unlock;
+ q = list_first_entry_or_null(&cpu_q->requeue_list,
+ typeof(*q), requeue_list);
+ if (WARN_ON(!q))
+ goto clear_bit;
+ list_del_init(&q->requeue_list);
+ clear_bit_unlock(0, &q->in_list);
+
+ if (list_empty(&cpu_q->requeue_list)) {
+ /* Clear bit if nothing is left */
+clear_bit:
+ clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ }
+unlock:
+ spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
+
+ if (q)
+ break;
+ }
+
+ /**
+ * Saves the CPU that is going to be requeued on the per-cpu var. Just
+ * incrementing it doesn't work because rnbd_get_cpu_qlist() will
+ * always return the first CPU with something on the queue list when the
+ * value stored on the var is greater than the last CPU with something
+ * on the list.
+ */
+ if (cpu_q)
+ *cpup = cpu_q->cpu;
+ put_cpu_var(sess->cpu_rr);
+
+ if (q)
+ rnbd_clt_dev_requeue(q);
+
+ return q;
+}
+
+/**
+ * rnbd_rerun_all_if_idle() - rerun all queues left in the list if
+ * session is idling (there are no requests
+ * in-flight).
+ * @sess: Session to rerun the queues on
+ *
+ * Description:
+ * This function tries to rerun all stopped queues if there are no
+ * requests in-flight anymore. This function tries to solve an obvious
+ * problem, when number of tags < than number of queues (hctx), which
+ * are stopped and put to sleep. If last permit, which has been just put,
+ * does not wake up all left queues (hctxs), IO requests hang forever.
+ *
+ * That can happen when all number of permits, say N, have been exhausted
+ * from one CPU, and we have many block devices per session, say M.
+ * Each block device has it's own queue (hctx) for each CPU, so eventually
+ * we can put that number of queues (hctxs) to sleep: M x nr_cpu_ids.
+ * If number of permits N < M x nr_cpu_ids finally we will get an IO hang.
+ *
+ * To avoid this hang last caller of rnbd_put_permit() (last caller is the
+ * one who observes sess->busy == 0) must wake up all remaining queues.
+ *
+ * Context:
+ * Does not matter.
+ */
+static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess)
+{
+ bool requeued;
+
+ do {
+ requeued = rnbd_rerun_if_needed(sess);
+ } while (atomic_read(&sess->busy) == 0 && requeued);
+}
+
+static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess,
+ enum rtrs_clt_con_type con_type,
+ int wait)
+{
+ struct rtrs_permit *permit;
+
+ permit = rtrs_clt_get_permit(sess->rtrs, con_type,
+ wait ? RTRS_PERMIT_WAIT :
+ RTRS_PERMIT_NOWAIT);
+ if (likely(permit))
+ /* We have a subtle rare case here, when all permits can be
+ * consumed before busy counter increased. This is safe,
+ * because loser will get NULL as a permit, observe 0 busy
+ * counter and immediately restart the queue himself.
+ */
+ atomic_inc(&sess->busy);
+
+ return permit;
+}
+
+static void rnbd_put_permit(struct rnbd_clt_session *sess,
+ struct rtrs_permit *permit)
+{
+ rtrs_clt_put_permit(sess->rtrs, permit);
+ atomic_dec(&sess->busy);
+ /* Paired with rnbd_clt_dev_add_to_requeue(). Decrement first
+ * and then check queue bits.
+ */
+ smp_mb__after_atomic();
+ rnbd_rerun_all_if_idle(sess);
+}
+
+static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
+ enum rtrs_clt_con_type con_type,
+ int wait)
+{
+ struct rnbd_iu *iu;
+ struct rtrs_permit *permit;
+
+ permit = rnbd_get_permit(sess, con_type,
+ wait ? RTRS_PERMIT_WAIT :
+ RTRS_PERMIT_NOWAIT);
+ if (unlikely(!permit))
+ return NULL;
+ iu = rtrs_permit_to_pdu(permit);
+ iu->permit = permit;
+ /*
+ * 1st reference is dropped after finishing sending a "user" message,
+ * 2nd reference is dropped after confirmation with the response is
+ * returned.
+ * 1st and 2nd can happen in any order, so the rnbd_iu should be
+ * released (rtrs_permit returned to ibbtrs) only leased after both
+ * are finished.
+ */
+ atomic_set(&iu->refcount, 2);
+ init_waitqueue_head(&iu->comp.wait);
+ iu->comp.errno = INT_MAX;
+
+ return iu;
+}
+
+static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
+{
+ if (atomic_dec_and_test(&iu->refcount))
+ rnbd_put_permit(sess, iu->permit);
+}
+
+static void rnbd_softirq_done_fn(struct request *rq)
+{
+ struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_iu *iu;
+
+ iu = blk_mq_rq_to_pdu(rq);
+ rnbd_put_permit(sess, iu->permit);
+ blk_mq_end_request(rq, errno_to_blk_status(iu->errno));
+}
+
+static void msg_io_conf(void *priv, int errno)
+{
+ struct rnbd_iu *iu = priv;
+ struct rnbd_clt_dev *dev = iu->dev;
+ struct request *rq = iu->rq;
+ int rw = rq_data_dir(rq);
+
+ iu->errno = errno;
+
+ blk_mq_complete_request(rq);
+
+ if (errno)
+ rnbd_clt_info_rl(dev, "%s I/O failed with err: %d\n",
+ rw == READ ? "read" : "write", errno);
+}
+
+static void wake_up_iu_comp(struct rnbd_iu *iu, int errno)
+{
+ iu->comp.errno = errno;
+ wake_up(&iu->comp.wait);
+}
+
+static void msg_conf(void *priv, int errno)
+{
+ struct rnbd_iu *iu = priv;
+
+ iu->errno = errno;
+ schedule_work(&iu->work);
+}
+
+enum wait_type {
+ NO_WAIT = 0,
+ WAIT = 1
+};
+
+static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
+ struct rnbd_iu *iu, struct kvec *vec, size_t nr,
+ size_t len, struct scatterlist *sg, unsigned int sg_len,
+ void (*conf)(struct work_struct *work),
+ int *errno, enum wait_type wait)
+{
+ int err;
+ struct rtrs_clt_req_ops req_ops;
+
+ INIT_WORK(&iu->work, conf);
+ req_ops = (struct rtrs_clt_req_ops) {
+ .priv = iu,
+ .conf_fn = msg_conf,
+ };
+ err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit,
+ vec, nr, len, sg, sg_len);
+ if (!err && wait) {
+ wait_event(iu->comp.wait, iu->comp.errno != INT_MAX);
+ *errno = iu->comp.errno;
+ } else {
+ *errno = 0;
+ }
+
+ return err;
+}
+
+static void msg_close_conf(struct work_struct *work)
+{
+ struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
+ struct rnbd_clt_dev *dev = iu->dev;
+
+ wake_up_iu_comp(iu, iu->errno);
+ rnbd_put_iu(dev->sess, iu);
+ rnbd_clt_put_dev(dev);
+}
+
+static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_msg_close msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ int err, errno;
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu)
+ return -ENOMEM;
+
+ iu->buf = NULL;
+ iu->dev = dev;
+
+ sg_mark_end(&iu->sglist[0]);
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
+ msg.device_id = cpu_to_le32(device_id);
+
+ WARN_ON(!rnbd_clt_get_dev(dev));
+ err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 1, 0, NULL, 0,
+ msg_close_conf, &errno, wait);
+ if (err) {
+ rnbd_clt_put_dev(dev);
+ rnbd_put_iu(sess, iu);
+ } else {
+ err = errno;
+ }
+
+ rnbd_put_iu(sess, iu);
+ return err;
+}
+
+static void msg_open_conf(struct work_struct *work)
+{
+ struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
+ struct rnbd_msg_open_rsp *rsp = iu->buf;
+ struct rnbd_clt_dev *dev = iu->dev;
+ int errno = iu->errno;
+
+ if (errno) {
+ rnbd_clt_err(dev,
+ "Opening failed, server responded: %d\n",
+ errno);
+ } else {
+ errno = process_msg_open_rsp(dev, rsp);
+ if (errno) {
+ u32 device_id = le32_to_cpu(rsp->device_id);
+ /*
+ * If server thinks its fine, but we fail to process
+ * then be nice and send a close to server.
+ */
+ (void)send_msg_close(dev, device_id, NO_WAIT);
+ }
+ }
+ kfree(rsp);
+ wake_up_iu_comp(iu, errno);
+ rnbd_put_iu(dev->sess, iu);
+ rnbd_clt_put_dev(dev);
+}
+
+static void msg_sess_info_conf(struct work_struct *work)
+{
+ struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
+ struct rnbd_msg_sess_info_rsp *rsp = iu->buf;
+ struct rnbd_clt_session *sess = iu->sess;
+
+ if (!iu->errno)
+ sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR);
+
+ kfree(rsp);
+ wake_up_iu_comp(iu, iu->errno);
+ rnbd_put_iu(sess, iu);
+ rnbd_clt_put_sess(sess);
+}
+
+static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_msg_open_rsp *rsp;
+ struct rnbd_msg_open msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ int err, errno;
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu) {
+ kfree(rsp);
+ return -ENOMEM;
+ }
+
+ iu->buf = rsp;
+ iu->dev = dev;
+
+ sg_init_one(iu->sglist, rsp, sizeof(*rsp));
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
+ msg.access_mode = dev->access_mode;
+ strlcpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name));
+
+ WARN_ON(!rnbd_clt_get_dev(dev));
+ err = send_usr_msg(sess->rtrs, READ, iu,
+ &vec, 1, sizeof(*rsp), iu->sglist, 1,
+ msg_open_conf, &errno, wait);
+ if (err) {
+ rnbd_clt_put_dev(dev);
+ rnbd_put_iu(sess, iu);
+ kfree(rsp);
+ } else {
+ err = errno;
+ }
+
+ rnbd_put_iu(sess, iu);
+ return err;
+}
+
+static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
+{
+ struct rnbd_msg_sess_info_rsp *rsp;
+ struct rnbd_msg_sess_info msg;
+ struct rnbd_iu *iu;
+ struct kvec vec = {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ int err, errno;
+
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ return -ENOMEM;
+
+ iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
+ if (!iu) {
+ kfree(rsp);
+ return -ENOMEM;
+ }
+
+ iu->buf = rsp;
+ iu->sess = sess;
+
+ sg_init_one(iu->sglist, rsp, sizeof(*rsp));
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
+ msg.ver = RNBD_PROTO_VER_MAJOR;
+
+ if (!rnbd_clt_get_sess(sess)) {
+ /*
+ * That can happen only in one case, when RTRS has restablished
+ * the connection and link_ev() is called, but session is almost
+ * dead, last reference on session is put and caller is waiting
+ * for RTRS to close everything.
+ */
+ err = -ENODEV;
+ goto put_iu;
+ }
+ err = send_usr_msg(sess->rtrs, READ, iu,
+ &vec, 1, sizeof(*rsp), iu->sglist, 1,
+ msg_sess_info_conf, &errno, wait);
+ if (err) {
+ rnbd_clt_put_sess(sess);
+put_iu:
+ rnbd_put_iu(sess, iu);
+ kfree(rsp);
+ } else {
+ err = errno;
+ }
+
+ rnbd_put_iu(sess, iu);
+ return err;
+}
+
+static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess)
+{
+ struct rnbd_clt_dev *dev;
+
+ mutex_lock(&sess->lock);
+ list_for_each_entry(dev, &sess->devs_list, list) {
+ rnbd_clt_err(dev, "Device disconnected.\n");
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_MAPPED)
+ dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED;
+ mutex_unlock(&dev->lock);
+ }
+ mutex_unlock(&sess->lock);
+}
+
+static void remap_devs(struct rnbd_clt_session *sess)
+{
+ struct rnbd_clt_dev *dev;
+ struct rtrs_attrs attrs;
+ int err;
+
+ /*
+ * Careful here: we are called from RTRS link event directly,
+ * thus we can't send any RTRS request and wait for response
+ * or RTRS will not be able to complete request with failure
+ * if something goes wrong (failing of outstanding requests
+ * happens exactly from the context where we are blocking now).
+ *
+ * So to avoid deadlocks each usr message sent from here must
+ * be asynchronous.
+ */
+
+ err = send_msg_sess_info(sess, NO_WAIT);
+ if (err) {
+ pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err);
+ return;
+ }
+
+ rtrs_clt_query(sess->rtrs, &attrs);
+ mutex_lock(&sess->lock);
+ sess->max_io_size = attrs.max_io_size;
+
+ list_for_each_entry(dev, &sess->devs_list, list) {
+ bool skip;
+
+ mutex_lock(&dev->lock);
+ skip = (dev->dev_state == DEV_STATE_INIT);
+ mutex_unlock(&dev->lock);
+ if (skip)
+ /*
+ * When device is establishing connection for the first
+ * time - do not remap, it will be closed soon.
+ */
+ continue;
+
+ rnbd_clt_info(dev, "session reconnected, remapping device\n");
+ err = send_msg_open(dev, NO_WAIT);
+ if (err) {
+ rnbd_clt_err(dev, "send_msg_open(): %d\n", err);
+ break;
+ }
+ }
+ mutex_unlock(&sess->lock);
+}
+
+static void rnbd_clt_link_ev(void *priv, enum rtrs_clt_link_ev ev)
+{
+ struct rnbd_clt_session *sess = priv;
+
+ switch (ev) {
+ case RTRS_CLT_LINK_EV_DISCONNECTED:
+ set_dev_states_to_disconnected(sess);
+ break;
+ case RTRS_CLT_LINK_EV_RECONNECTED:
+ remap_devs(sess);
+ break;
+ default:
+ pr_err("Unknown session event received (%d), session: %s\n",
+ ev, sess->sessname);
+ }
+}
+
+static void rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu *cpu_queues)
+{
+ unsigned int cpu;
+ struct rnbd_cpu_qlist *cpu_q;
+
+ for_each_possible_cpu(cpu) {
+ cpu_q = per_cpu_ptr(cpu_queues, cpu);
+
+ cpu_q->cpu = cpu;
+ INIT_LIST_HEAD(&cpu_q->requeue_list);
+ spin_lock_init(&cpu_q->requeue_lock);
+ }
+}
+
+static void destroy_mq_tags(struct rnbd_clt_session *sess)
+{
+ if (sess->tag_set.tags)
+ blk_mq_free_tag_set(&sess->tag_set);
+}
+
+static inline void wake_up_rtrs_waiters(struct rnbd_clt_session *sess)
+{
+ sess->rtrs_ready = true;
+ wake_up_all(&sess->rtrs_waitq);
+}
+
+static void close_rtrs(struct rnbd_clt_session *sess)
+{
+ might_sleep();
+
+ if (!IS_ERR_OR_NULL(sess->rtrs)) {
+ rtrs_clt_close(sess->rtrs);
+ sess->rtrs = NULL;
+ wake_up_rtrs_waiters(sess);
+ }
+}
+
+static void free_sess(struct rnbd_clt_session *sess)
+{
+ WARN_ON(!list_empty(&sess->devs_list));
+
+ might_sleep();
+
+ close_rtrs(sess);
+ destroy_mq_tags(sess);
+ if (!list_empty(&sess->list)) {
+ mutex_lock(&sess_lock);
+ list_del(&sess->list);
+ mutex_unlock(&sess_lock);
+ }
+ free_percpu(sess->cpu_queues);
+ free_percpu(sess->cpu_rr);
+ mutex_destroy(&sess->lock);
+ kfree(sess);
+}
+
+static struct rnbd_clt_session *alloc_sess(const char *sessname)
+{
+ struct rnbd_clt_session *sess;
+ int err, cpu;
+
+ sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE);
+ if (!sess)
+ return ERR_PTR(-ENOMEM);
+ strlcpy(sess->sessname, sessname, sizeof(sess->sessname));
+ atomic_set(&sess->busy, 0);
+ mutex_init(&sess->lock);
+ INIT_LIST_HEAD(&sess->devs_list);
+ INIT_LIST_HEAD(&sess->list);
+ bitmap_zero(sess->cpu_queues_bm, NR_CPUS);
+ init_waitqueue_head(&sess->rtrs_waitq);
+ refcount_set(&sess->refcount, 1);
+
+ sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist);
+ if (!sess->cpu_queues) {
+ err = -ENOMEM;
+ goto err;
+ }
+ rnbd_init_cpu_qlists(sess->cpu_queues);
+
+ /*
+ * That is simple percpu variable which stores cpu indeces, which are
+ * incremented on each access. We need that for the sake of fairness
+ * to wake up queues in a round-robin manner.
+ */
+ sess->cpu_rr = alloc_percpu(int);
+ if (!sess->cpu_rr) {
+ err = -ENOMEM;
+ goto err;
+ }
+ for_each_possible_cpu(cpu)
+ * per_cpu_ptr(sess->cpu_rr, cpu) = cpu;
+
+ return sess;
+
+err:
+ free_sess(sess);
+
+ return ERR_PTR(err);
+}
+
+static int wait_for_rtrs_connection(struct rnbd_clt_session *sess)
+{
+ wait_event(sess->rtrs_waitq, sess->rtrs_ready);
+ if (IS_ERR_OR_NULL(sess->rtrs))
+ return -ECONNRESET;
+
+ return 0;
+}
+
+static void wait_for_rtrs_disconnection(struct rnbd_clt_session *sess)
+ __releases(&sess_lock)
+ __acquires(&sess_lock)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE);
+ if (IS_ERR_OR_NULL(sess->rtrs)) {
+ finish_wait(&sess->rtrs_waitq, &wait);
+ return;
+ }
+ mutex_unlock(&sess_lock);
+ /* loop in caller, see __find_and_get_sess().
+ * You can't leave mutex locked and call schedule(), you will catch a
+ * deadlock with a caller of free_sess(), which has just put the last
+ * reference and is about to take the sess_lock in order to delete
+ * the session from the list.
+ */
+ schedule();
+ mutex_lock(&sess_lock);
+}
+
+static struct rnbd_clt_session *__find_and_get_sess(const char *sessname)
+ __releases(&sess_lock)
+ __acquires(&sess_lock)
+{
+ struct rnbd_clt_session *sess, *sn;
+ int err;
+
+again:
+ list_for_each_entry_safe(sess, sn, &sess_list, list) {
+ if (strcmp(sessname, sess->sessname))
+ continue;
+
+ if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs))
+ /*
+ * No RTRS connection, session is dying.
+ */
+ continue;
+
+ if (rnbd_clt_get_sess(sess)) {
+ /*
+ * Alive session is found, wait for RTRS connection.
+ */
+ mutex_unlock(&sess_lock);
+ err = wait_for_rtrs_connection(sess);
+ if (err)
+ rnbd_clt_put_sess(sess);
+ mutex_lock(&sess_lock);
+
+ if (err)
+ /* Session is dying, repeat the loop */
+ goto again;
+
+ return sess;
+ }
+ /*
+ * Ref is 0, session is dying, wait for RTRS disconnect
+ * in order to avoid session names clashes.
+ */
+ wait_for_rtrs_disconnection(sess);
+ /*
+ * RTRS is disconnected and soon session will be freed,
+ * so repeat a loop.
+ */
+ goto again;
+ }
+
+ return NULL;
+}
+
+static struct
+rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first)
+{
+ struct rnbd_clt_session *sess = NULL;
+
+ mutex_lock(&sess_lock);
+ sess = __find_and_get_sess(sessname);
+ if (!sess) {
+ sess = alloc_sess(sessname);
+ if (IS_ERR(sess)) {
+ mutex_unlock(&sess_lock);
+ return sess;
+ }
+ list_add(&sess->list, &sess_list);
+ *first = true;
+ } else
+ *first = false;
+ mutex_unlock(&sess_lock);
+
+ return sess;
+}
+
+static int rnbd_client_open(struct block_device *block_device, fmode_t mode)
+{
+ struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+
+ if (dev->read_only && (mode & FMODE_WRITE))
+ return -EPERM;
+
+ if (dev->dev_state == DEV_STATE_UNMAPPED ||
+ !rnbd_clt_get_dev(dev))
+ return -EIO;
+
+ return 0;
+}
+
+static void rnbd_client_release(struct gendisk *gen, fmode_t mode)
+{
+ struct rnbd_clt_dev *dev = gen->private_data;
+
+ rnbd_clt_put_dev(dev);
+}
+
+static int rnbd_client_getgeo(struct block_device *block_device,
+ struct hd_geometry *geo)
+{
+ u64 size;
+ struct rnbd_clt_dev *dev;
+
+ dev = block_device->bd_disk->private_data;
+ size = dev->size * (dev->logical_block_size / SECTOR_SIZE);
+ geo->cylinders = size >> 6; /* size/64 */
+ geo->heads = 4;
+ geo->sectors = 16;
+ geo->start = 0;
+
+ return 0;
+}
+
+static const struct block_device_operations rnbd_client_ops = {
+ .owner = THIS_MODULE,
+ .open = rnbd_client_open,
+ .release = rnbd_client_release,
+ .getgeo = rnbd_client_getgeo
+};
+
+/* The amount of data that belongs to an I/O and the amount of data that
+ * should be read or written to the disk (bi_size) can differ.
+ *
+ * E.g. When WRITE_SAME is used, only a small amount of data is
+ * transferred that is then written repeatedly over a lot of sectors.
+ *
+ * Get the size of data to be transferred via RTRS by summing up the size
+ * of the scather-gather list entries.
+ */
+static size_t rnbd_clt_get_sg_size(struct scatterlist *sglist, u32 len)
+{
+ struct scatterlist *sg;
+ size_t tsize = 0;
+ int i;
+
+ for_each_sg(sglist, sg, len, i)
+ tsize += sg->length;
+ return tsize;
+}
+
+static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
+ struct request *rq,
+ struct rnbd_iu *iu)
+{
+ struct rtrs_clt *rtrs = dev->sess->rtrs;
+ struct rtrs_permit *permit = iu->permit;
+ struct rnbd_msg_io msg;
+ struct rtrs_clt_req_ops req_ops;
+ unsigned int sg_cnt = 0;
+ struct kvec vec;
+ size_t size;
+ int err;
+
+ iu->rq = rq;
+ iu->dev = dev;
+ msg.sector = cpu_to_le64(blk_rq_pos(rq));
+ msg.bi_size = cpu_to_le32(blk_rq_bytes(rq));
+ msg.rw = cpu_to_le32(rq_to_rnbd_flags(rq));
+ msg.prio = cpu_to_le16(req_get_ioprio(rq));
+
+ /*
+ * We only support discards with single segment for now.
+ * See queue limits.
+ */
+ if (req_op(rq) != REQ_OP_DISCARD)
+ sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sglist);
+
+ if (sg_cnt == 0)
+ /* Do not forget to mark the end */
+ sg_mark_end(&iu->sglist[0]);
+
+ msg.hdr.type = cpu_to_le16(RNBD_MSG_IO);
+ msg.device_id = cpu_to_le32(dev->device_id);
+
+ vec = (struct kvec) {
+ .iov_base = &msg,
+ .iov_len = sizeof(msg)
+ };
+ size = rnbd_clt_get_sg_size(iu->sglist, sg_cnt);
+ req_ops = (struct rtrs_clt_req_ops) {
+ .priv = iu,
+ .conf_fn = msg_io_conf,
+ };
+ err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
+ &vec, 1, size, iu->sglist, sg_cnt);
+ if (unlikely(err)) {
+ rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * rnbd_clt_dev_add_to_requeue() - add device to requeue if session is busy
+ * @dev: Device to be checked
+ * @q: Queue to be added to the requeue list if required
+ *
+ * Description:
+ * If session is busy, that means someone will requeue us when resources
+ * are freed. If session is not doing anything - device is not added to
+ * the list and @false is returned.
+ */
+static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev,
+ struct rnbd_queue *q)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ struct rnbd_cpu_qlist *cpu_q;
+ unsigned long flags;
+ bool added = true;
+ bool need_set;
+
+ cpu_q = get_cpu_ptr(sess->cpu_queues);
+ spin_lock_irqsave(&cpu_q->requeue_lock, flags);
+
+ if (likely(!test_and_set_bit_lock(0, &q->in_list))) {
+ if (WARN_ON(!list_empty(&q->requeue_list)))
+ goto unlock;
+
+ need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ if (need_set) {
+ set_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ /* Paired with rnbd_put_permit(). Set a bit first
+ * and then observe the busy counter.
+ */
+ smp_mb__before_atomic();
+ }
+ if (likely(atomic_read(&sess->busy))) {
+ list_add_tail(&q->requeue_list, &cpu_q->requeue_list);
+ } else {
+ /* Very unlikely, but possible: busy counter was
+ * observed as zero. Drop all bits and return
+ * false to restart the queue by ourselves.
+ */
+ if (need_set)
+ clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
+ clear_bit_unlock(0, &q->in_list);
+ added = false;
+ }
+ }
+unlock:
+ spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
+ put_cpu_ptr(sess->cpu_queues);
+
+ return added;
+}
+
+static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev,
+ struct blk_mq_hw_ctx *hctx,
+ int delay)
+{
+ struct rnbd_queue *q = hctx->driver_data;
+
+ if (delay != RNBD_DELAY_IFBUSY)
+ blk_mq_delay_run_hw_queue(hctx, delay);
+ else if (unlikely(!rnbd_clt_dev_add_to_requeue(dev, q)))
+ /*
+ * If session is not busy we have to restart
+ * the queue ourselves.
+ */
+ blk_mq_delay_run_hw_queue(hctx, 10/*ms*/);
+}
+
+static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *rq = bd->rq;
+ struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
+ struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
+ int err;
+
+ if (unlikely(dev->dev_state != DEV_STATE_MAPPED))
+ return BLK_STS_IOERR;
+
+ iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON,
+ RTRS_PERMIT_NOWAIT);
+ if (unlikely(!iu->permit)) {
+ rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY);
+ return BLK_STS_RESOURCE;
+ }
+
+ blk_mq_start_request(rq);
+ err = rnbd_client_xfer_request(dev, rq, iu);
+ if (likely(err == 0))
+ return BLK_STS_OK;
+ if (unlikely(err == -EAGAIN || err == -ENOMEM)) {
+ rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
+ rnbd_put_permit(dev->sess, iu->permit);
+ return BLK_STS_RESOURCE;
+ }
+
+ rnbd_put_permit(dev->sess, iu->permit);
+ return BLK_STS_IOERR;
+}
+
+static int rnbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
+
+ sg_init_table(iu->sglist, BMAX_SEGMENTS);
+ return 0;
+}
+
+static struct blk_mq_ops rnbd_mq_ops = {
+ .queue_rq = rnbd_queue_rq,
+ .init_request = rnbd_init_request,
+ .complete = rnbd_softirq_done_fn,
+};
+
+static int setup_mq_tags(struct rnbd_clt_session *sess)
+{
+ struct blk_mq_tag_set *tag_set = &sess->tag_set;
+
+ memset(tag_set, 0, sizeof(*tag_set));
+ tag_set->ops = &rnbd_mq_ops;
+ tag_set->queue_depth = sess->queue_depth;
+ tag_set->numa_node = NUMA_NO_NODE;
+ tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_TAG_SHARED;
+ tag_set->cmd_size = sizeof(struct rnbd_iu);
+ tag_set->nr_hw_queues = num_online_cpus();
+
+ return blk_mq_alloc_tag_set(tag_set);
+}
+
+static struct rnbd_clt_session *
+find_and_get_or_create_sess(const char *sessname,
+ const struct rtrs_addr *paths,
+ size_t path_cnt, u16 port_nr)
+{
+ struct rnbd_clt_session *sess;
+ struct rtrs_attrs attrs;
+ int err;
+ bool first;
+ struct rtrs_clt_ops rtrs_ops;
+
+ sess = find_or_create_sess(sessname, &first);
+ if (sess == ERR_PTR(-ENOMEM))
+ return ERR_PTR(-ENOMEM);
+ else if (!first)
+ return sess;
+
+ rtrs_ops = (struct rtrs_clt_ops) {
+ .priv = sess,
+ .link_ev = rnbd_clt_link_ev,
+ };
+ /*
+ * Nothing was found, establish rtrs connection and proceed further.
+ */
+ sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname,
+ paths, path_cnt, port_nr,
+ sizeof(struct rnbd_iu),
+ RECONNECT_DELAY, BMAX_SEGMENTS,
+ BLK_MAX_SEGMENT_SIZE,
+ MAX_RECONNECTS);
+ if (IS_ERR(sess->rtrs)) {
+ err = PTR_ERR(sess->rtrs);
+ goto wake_up_and_put;
+ }
+ rtrs_clt_query(sess->rtrs, &attrs);
+ sess->max_io_size = attrs.max_io_size;
+ sess->queue_depth = attrs.queue_depth;
+
+ err = setup_mq_tags(sess);
+ if (err)
+ goto close_rtrs;
+
+ err = send_msg_sess_info(sess, WAIT);
+ if (err)
+ goto close_rtrs;
+
+ wake_up_rtrs_waiters(sess);
+
+ return sess;
+
+close_rtrs:
+ close_rtrs(sess);
+put_sess:
+ rnbd_clt_put_sess(sess);
+
+ return ERR_PTR(err);
+
+wake_up_and_put:
+ wake_up_rtrs_waiters(sess);
+ goto put_sess;
+}
+
+static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev,
+ struct rnbd_queue *q,
+ struct blk_mq_hw_ctx *hctx)
+{
+ INIT_LIST_HEAD(&q->requeue_list);
+ q->dev = dev;
+ q->hctx = hctx;
+}
+
+static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
+{
+ int i;
+ struct blk_mq_hw_ctx *hctx;
+ struct rnbd_queue *q;
+
+ queue_for_each_hw_ctx(dev->queue, hctx, i) {
+ q = &dev->hw_queues[i];
+ rnbd_init_hw_queue(dev, q, hctx);
+ hctx->driver_data = q;
+ }
+}
+
+static int setup_mq_dev(struct rnbd_clt_dev *dev)
+{
+ dev->queue = blk_mq_init_queue(&dev->sess->tag_set);
+ if (IS_ERR(dev->queue)) {
+ rnbd_clt_err(dev, "Initializing multiqueue queue failed, err: %ld\n",
+ PTR_ERR(dev->queue));
+ return PTR_ERR(dev->queue);
+ }
+ rnbd_init_mq_hw_queues(dev);
+ return 0;
+}
+
+static void setup_request_queue(struct rnbd_clt_dev *dev)
+{
+ blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
+ blk_queue_physical_block_size(dev->queue, dev->physical_block_size);
+ blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors);
+ blk_queue_max_write_same_sectors(dev->queue,
+ dev->max_write_same_sectors);
+
+ /*
+ * we don't support discards to "discontiguous" segments
+ * in on request
+ */
+ blk_queue_max_discard_segments(dev->queue, 1);
+
+ blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors);
+ dev->queue->limits.discard_granularity = dev->discard_granularity;
+ dev->queue->limits.discard_alignment = dev->discard_alignment;
+ if (dev->max_discard_sectors)
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue);
+ if (dev->secure_discard)
+ blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue);
+
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
+ blk_queue_max_segments(dev->queue, dev->max_segments);
+ blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
+ blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
+ blk_queue_write_cache(dev->queue, true, true);
+ dev->queue->queuedata = dev;
+}
+
+static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
+{
+ dev->gd->major = rnbd_client_major;
+ dev->gd->first_minor = idx << RNBD_PART_BITS;
+ dev->gd->fops = &rnbd_client_ops;
+ dev->gd->queue = dev->queue;
+ dev->gd->private_data = dev;
+ snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d",
+ idx);
+ pr_debug("disk_name=%s, capacity=%zu\n",
+ dev->gd->disk_name,
+ dev->nsectors * (dev->logical_block_size / SECTOR_SIZE)
+ );
+
+ set_capacity(dev->gd, dev->nsectors);
+
+ if (dev->access_mode == RNBD_ACCESS_RO) {
+ dev->read_only = true;
+ set_disk_ro(dev->gd, true);
+ } else {
+ dev->read_only = false;
+ }
+
+ if (!dev->rotational)
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
+}
+
+static int rnbd_client_setup_device(struct rnbd_clt_session *sess,
+ struct rnbd_clt_dev *dev, int idx)
+{
+ int err;
+
+ dev->size = dev->nsectors * dev->logical_block_size;
+
+ err = setup_mq_dev(dev);
+ if (err)
+ return err;
+
+ setup_request_queue(dev);
+
+ dev->gd = alloc_disk_node(1 << RNBD_PART_BITS, NUMA_NO_NODE);
+ if (!dev->gd) {
+ blk_cleanup_queue(dev->queue);
+ return -ENOMEM;
+ }
+
+ rnbd_clt_setup_gen_disk(dev, idx);
+
+ return 0;
+}
+
+static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
+ enum rnbd_access_mode access_mode,
+ const char *pathname)
+{
+ struct rnbd_clt_dev *dev;
+ int ret;
+
+ dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, NUMA_NO_NODE);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->hw_queues = kcalloc(nr_cpu_ids, sizeof(*dev->hw_queues),
+ GFP_KERNEL);
+ if (!dev->hw_queues) {
+ ret = -ENOMEM;
+ goto out_alloc;
+ }
+
+ mutex_lock(&ida_lock);
+ ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS),
+ GFP_KERNEL);
+ mutex_unlock(&ida_lock);
+ if (ret < 0) {
+ pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+ pathname, sess->sessname, ret);
+ goto out_queues;
+ }
+ dev->clt_device_id = ret;
+ dev->sess = sess;
+ dev->access_mode = access_mode;
+ strlcpy(dev->pathname, pathname, sizeof(dev->pathname));
+ mutex_init(&dev->lock);
+ refcount_set(&dev->refcount, 1);
+ dev->dev_state = DEV_STATE_INIT;
+
+ /*
+ * Here we called from sysfs entry, thus clt-sysfs is
+ * responsible that session will not disappear.
+ */
+ WARN_ON(!rnbd_clt_get_sess(sess));
+
+ return dev;
+
+out_queues:
+ kfree(dev->hw_queues);
+out_alloc:
+ kfree(dev);
+ return ERR_PTR(ret);
+}
+
+static bool __exists_dev(const char *pathname)
+{
+ struct rnbd_clt_session *sess;
+ struct rnbd_clt_dev *dev;
+ bool found = false;
+
+ list_for_each_entry(sess, &sess_list, list) {
+ mutex_lock(&sess->lock);
+ list_for_each_entry(dev, &sess->devs_list, list) {
+ if (!strncmp(dev->pathname, pathname,
+ sizeof(dev->pathname))) {
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&sess->lock);
+ if (found)
+ break;
+ }
+
+ return found;
+}
+
+static bool exists_devpath(const char *pathname)
+{
+ bool found;
+
+ mutex_lock(&sess_lock);
+ found = __exists_dev(pathname);
+ mutex_unlock(&sess_lock);
+
+ return found;
+}
+
+static bool insert_dev_if_not_exists_devpath(const char *pathname,
+ struct rnbd_clt_session *sess,
+ struct rnbd_clt_dev *dev)
+{
+ bool found;
+
+ mutex_lock(&sess_lock);
+ found = __exists_dev(pathname);
+ if (!found) {
+ mutex_lock(&sess->lock);
+ list_add_tail(&dev->list, &sess->devs_list);
+ mutex_unlock(&sess->lock);
+ }
+ mutex_unlock(&sess_lock);
+
+ return found;
+}
+
+static void delete_dev(struct rnbd_clt_dev *dev)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+
+ mutex_lock(&sess->lock);
+ list_del(&dev->list);
+ mutex_unlock(&sess->lock);
+}
+
+struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
+ struct rtrs_addr *paths,
+ size_t path_cnt, u16 port_nr,
+ const char *pathname,
+ enum rnbd_access_mode access_mode)
+{
+ struct rnbd_clt_session *sess;
+ struct rnbd_clt_dev *dev;
+ int ret;
+
+ if (exists_devpath(pathname))
+ return ERR_PTR(-EEXIST);
+
+ sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr);
+ if (IS_ERR(sess))
+ return ERR_CAST(sess);
+
+ dev = init_dev(sess, access_mode, pathname);
+ if (IS_ERR(dev)) {
+ pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n",
+ pathname, sess->sessname, PTR_ERR(dev));
+ ret = PTR_ERR(dev);
+ goto put_sess;
+ }
+ if (insert_dev_if_not_exists_devpath(pathname, sess, dev)) {
+ ret = -EEXIST;
+ goto put_dev;
+ }
+ ret = send_msg_open(dev, WAIT);
+ if (ret) {
+ rnbd_clt_err(dev,
+ "map_device: failed, can't open remote device, err: %d\n",
+ ret);
+ goto del_dev;
+ }
+ mutex_lock(&dev->lock);
+ pr_debug("Opened remote device: session=%s, path='%s'\n",
+ sess->sessname, pathname);
+ ret = rnbd_client_setup_device(sess, dev, dev->clt_device_id);
+ if (ret) {
+ rnbd_clt_err(dev,
+ "map_device: Failed to configure device, err: %d\n",
+ ret);
+ mutex_unlock(&dev->lock);
+ goto del_dev;
+ }
+
+ rnbd_clt_info(dev,
+ "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d)\n",
+ dev->gd->disk_name, dev->nsectors,
+ dev->logical_block_size, dev->physical_block_size,
+ dev->max_write_same_sectors, dev->max_discard_sectors,
+ dev->discard_granularity, dev->discard_alignment,
+ dev->secure_discard, dev->max_segments,
+ dev->max_hw_sectors, dev->rotational);
+
+ mutex_unlock(&dev->lock);
+
+ add_disk(dev->gd);
+ rnbd_clt_put_sess(sess);
+
+ return dev;
+
+del_dev:
+ delete_dev(dev);
+put_dev:
+ rnbd_clt_put_dev(dev);
+put_sess:
+ rnbd_clt_put_sess(sess);
+
+ return ERR_PTR(ret);
+}
+
+static void destroy_gen_disk(struct rnbd_clt_dev *dev)
+{
+ del_gendisk(dev->gd);
+ blk_cleanup_queue(dev->queue);
+ put_disk(dev->gd);
+}
+
+static void destroy_sysfs(struct rnbd_clt_dev *dev,
+ const struct attribute *sysfs_self)
+{
+ rnbd_clt_remove_dev_symlink(dev);
+ if (dev->kobj.state_initialized) {
+ if (sysfs_self)
+ /* To avoid deadlock firstly remove itself */
+ sysfs_remove_file_self(&dev->kobj, sysfs_self);
+ kobject_del(&dev->kobj);
+ kobject_put(&dev->kobj);
+ }
+}
+
+int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
+ const struct attribute *sysfs_self)
+{
+ struct rnbd_clt_session *sess = dev->sess;
+ int refcount, ret = 0;
+ bool was_mapped;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_UNMAPPED) {
+ rnbd_clt_info(dev, "Device is already being unmapped\n");
+ ret = -EALREADY;
+ goto err;
+ }
+ refcount = refcount_read(&dev->refcount);
+ if (!force && refcount > 1) {
+ rnbd_clt_err(dev,
+ "Closing device failed, device is in use, (%d device users)\n",
+ refcount - 1);
+ ret = -EBUSY;
+ goto err;
+ }
+ was_mapped = (dev->dev_state == DEV_STATE_MAPPED);
+ dev->dev_state = DEV_STATE_UNMAPPED;
+ mutex_unlock(&dev->lock);
+
+ delete_dev(dev);
+ destroy_sysfs(dev, sysfs_self);
+ destroy_gen_disk(dev);
+ if (was_mapped && sess->rtrs)
+ send_msg_close(dev, dev->device_id, WAIT);
+
+ rnbd_clt_info(dev, "Device is unmapped\n");
+
+ /* Likely last reference put */
+ rnbd_clt_put_dev(dev);
+
+ /*
+ * Here device and session can be vanished!
+ */
+
+ return 0;
+err:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+int rnbd_clt_remap_device(struct rnbd_clt_dev *dev)
+{
+ int err;
+
+ mutex_lock(&dev->lock);
+ if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED)
+ err = 0;
+ else if (dev->dev_state == DEV_STATE_UNMAPPED)
+ err = -ENODEV;
+ else if (dev->dev_state == DEV_STATE_MAPPED)
+ err = -EALREADY;
+ else
+ err = -EBUSY;
+ mutex_unlock(&dev->lock);
+ if (!err) {
+ rnbd_clt_info(dev, "Remapping device.\n");
+ err = send_msg_open(dev, WAIT);
+ if (err)
+ rnbd_clt_err(dev, "remap_device: %d\n", err);
+ }
+
+ return err;
+}
+
+static void unmap_device_work(struct work_struct *work)
+{
+ struct rnbd_clt_dev *dev;
+
+ dev = container_of(work, typeof(*dev), unmap_on_rmmod_work);
+ rnbd_clt_unmap_device(dev, true, NULL);
+}
+
+static void rnbd_destroy_sessions(void)
+{
+ struct rnbd_clt_session *sess, *sn;
+ struct rnbd_clt_dev *dev, *tn;
+
+ /* Firstly forbid access through sysfs interface */
+ rnbd_clt_destroy_default_group();
+ rnbd_clt_destroy_sysfs_files();
+
+ /*
+ * Here at this point there is no any concurrent access to sessions
+ * list and devices list:
+ * 1. New session or device can'be be created - session sysfs files
+ * are removed.
+ * 2. Device or session can't be removed - module reference is taken
+ * into account in unmap device sysfs callback.
+ * 3. No IO requests inflight - each file open of block_dev increases
+ * module reference in get_disk().
+ *
+ * But still there can be user requests inflights, which are sent by
+ * asynchronous send_msg_*() functions, thus before unmapping devices
+ * RTRS session must be explicitly closed.
+ */
+
+ list_for_each_entry_safe(sess, sn, &sess_list, list) {
+ WARN_ON(!rnbd_clt_get_sess(sess));
+ close_rtrs(sess);
+ list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
+ /*
+ * Here unmap happens in parallel for only one reason:
+ * blk_cleanup_queue() takes around half a second, so
+ * on huge amount of devices the whole module unload
+ * procedure takes minutes.
+ */
+ INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work);
+ queue_work(system_long_wq, &dev->unmap_on_rmmod_work);
+ }
+ rnbd_clt_put_sess(sess);
+ }
+ /* Wait for all scheduled unmap works */
+ flush_workqueue(system_long_wq);
+ WARN_ON(!list_empty(&sess_list));
+}
+
+static int __init rnbd_client_init(void)
+{
+ int err = 0;
+
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56);
+ rnbd_client_major = register_blkdev(rnbd_client_major, "rnbd");
+ if (rnbd_client_major <= 0) {
+ pr_err("Failed to load module, block device registration failed\n");
+ return -EBUSY;
+ }
+
+ err = rnbd_clt_create_sysfs_files();
+ if (err) {
+ pr_err("Failed to load module, creating sysfs device files failed, err: %d\n",
+ err);
+ unregister_blkdev(rnbd_client_major, "rnbd");
+ }
+
+ return err;
+}
+
+static void __exit rnbd_client_exit(void)
+{
+ rnbd_destroy_sessions();
+ unregister_blkdev(rnbd_client_major, "rnbd");
+ ida_destroy(&index_ida);
+}
+
+module_init(rnbd_client_init);
+module_exit(rnbd_client_exit);
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
new file mode 100644
index 000000000000..ed33654aa486
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RNBD_CLT_H
+#define RNBD_CLT_H
+
+#include <linux/wait.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/blk-mq.h>
+#include <linux/refcount.h>
+
+#include <rtrs.h>
+#include "rnbd-proto.h"
+#include "rnbd-log.h"
+
+/* Max. number of segments per IO request, Mellanox Connect X ~ Connect X5,
+ * choose minimial 30 for all, minus 1 for internal protocol, so 29.
+ */
+#define BMAX_SEGMENTS 29
+/* time in seconds between reconnect tries, default to 30 s */
+#define RECONNECT_DELAY 30
+/*
+ * Number of times to reconnect on error before giving up, 0 for * disabled,
+ * -1 for forever
+ */
+#define MAX_RECONNECTS -1
+
+enum rnbd_clt_dev_state {
+ DEV_STATE_INIT,
+ DEV_STATE_MAPPED,
+ DEV_STATE_MAPPED_DISCONNECTED,
+ DEV_STATE_UNMAPPED,
+};
+
+struct rnbd_iu_comp {
+ wait_queue_head_t wait;
+ int errno;
+};
+
+struct rnbd_iu {
+ union {
+ struct request *rq; /* for block io */
+ void *buf; /* for user messages */
+ };
+ struct rtrs_permit *permit;
+ union {
+ /* use to send msg associated with a dev */
+ struct rnbd_clt_dev *dev;
+ /* use to send msg associated with a sess */
+ struct rnbd_clt_session *sess;
+ };
+ struct scatterlist sglist[BMAX_SEGMENTS];
+ struct work_struct work;
+ int errno;
+ struct rnbd_iu_comp comp;
+ atomic_t refcount;
+};
+
+struct rnbd_cpu_qlist {
+ struct list_head requeue_list;
+ spinlock_t requeue_lock;
+ unsigned int cpu;
+};
+
+struct rnbd_clt_session {
+ struct list_head list;
+ struct rtrs_clt *rtrs;
+ wait_queue_head_t rtrs_waitq;
+ bool rtrs_ready;
+ struct rnbd_cpu_qlist __percpu
+ *cpu_queues;
+ DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
+ int __percpu *cpu_rr; /* per-cpu var for CPU round-robin */
+ atomic_t busy;
+ int queue_depth;
+ u32 max_io_size;
+ struct blk_mq_tag_set tag_set;
+ struct mutex lock; /* protects state and devs_list */
+ struct list_head devs_list; /* list of struct rnbd_clt_dev */
+ refcount_t refcount;
+ char sessname[NAME_MAX];
+ u8 ver; /* protocol version */
+};
+
+/**
+ * Submission queues.
+ */
+struct rnbd_queue {
+ struct list_head requeue_list;
+ unsigned long in_list;
+ struct rnbd_clt_dev *dev;
+ struct blk_mq_hw_ctx *hctx;
+};
+
+struct rnbd_clt_dev {
+ struct rnbd_clt_session *sess;
+ struct request_queue *queue;
+ struct rnbd_queue *hw_queues;
+ u32 device_id;
+ /* local Idr index - used to track minor number allocations. */
+ u32 clt_device_id;
+ struct mutex lock;
+ enum rnbd_clt_dev_state dev_state;
+ char pathname[NAME_MAX];
+ enum rnbd_access_mode access_mode;
+ bool read_only;
+ bool rotational;
+ u32 max_hw_sectors;
+ u32 max_write_same_sectors;
+ u32 max_discard_sectors;
+ u32 discard_granularity;
+ u32 discard_alignment;
+ u16 secure_discard;
+ u16 physical_block_size;
+ u16 logical_block_size;
+ u16 max_segments;
+ size_t nsectors;
+ u64 size; /* device size in bytes */
+ struct list_head list;
+ struct gendisk *gd;
+ struct kobject kobj;
+ char blk_symlink_name[NAME_MAX];
+ refcount_t refcount;
+ struct work_struct unmap_on_rmmod_work;
+};
+
+/* rnbd-clt.c */
+
+struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
+ struct rtrs_addr *paths,
+ size_t path_cnt, u16 port_nr,
+ const char *pathname,
+ enum rnbd_access_mode access_mode);
+int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
+ const struct attribute *sysfs_self);
+
+int rnbd_clt_remap_device(struct rnbd_clt_dev *dev);
+int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize);
+
+/* rnbd-clt-sysfs.c */
+
+int rnbd_clt_create_sysfs_files(void);
+
+void rnbd_clt_destroy_sysfs_files(void);
+void rnbd_clt_destroy_default_group(void);
+
+void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev);
+
+#endif /* RNBD_CLT_H */
diff --git a/drivers/block/rnbd/rnbd-common.c b/drivers/block/rnbd/rnbd-common.c
new file mode 100644
index 000000000000..596c3f732403
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-common.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#include "rnbd-proto.h"
+
+const char *rnbd_access_mode_str(enum rnbd_access_mode mode)
+{
+ switch (mode) {
+ case RNBD_ACCESS_RO:
+ return "ro";
+ case RNBD_ACCESS_RW:
+ return "rw";
+ case RNBD_ACCESS_MIGRATION:
+ return "migration";
+ default:
+ return "unknown";
+ }
+}
diff --git a/drivers/block/rnbd/rnbd-log.h b/drivers/block/rnbd/rnbd-log.h
new file mode 100644
index 000000000000..136e7d6c3451
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-log.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_LOG_H
+#define RNBD_LOG_H
+
+#include "rnbd-clt.h"
+#include "rnbd-srv.h"
+
+#define rnbd_clt_log(fn, dev, fmt, ...) ( \
+ fn("<%s@%s> " fmt, (dev)->pathname, \
+ (dev)->sess->sessname, \
+ ##__VA_ARGS__))
+#define rnbd_srv_log(fn, dev, fmt, ...) ( \
+ fn("<%s@%s>: " fmt, (dev)->pathname, \
+ (dev)->sess->sessname, ##__VA_ARGS__))
+
+#define rnbd_clt_err(dev, fmt, ...) \
+ rnbd_clt_log(pr_err, dev, fmt, ##__VA_ARGS__)
+#define rnbd_clt_err_rl(dev, fmt, ...) \
+ rnbd_clt_log(pr_err_ratelimited, dev, fmt, ##__VA_ARGS__)
+#define rnbd_clt_info(dev, fmt, ...) \
+ rnbd_clt_log(pr_info, dev, fmt, ##__VA_ARGS__)
+#define rnbd_clt_info_rl(dev, fmt, ...) \
+ rnbd_clt_log(pr_info_ratelimited, dev, fmt, ##__VA_ARGS__)
+
+#define rnbd_srv_err(dev, fmt, ...) \
+ rnbd_srv_log(pr_err, dev, fmt, ##__VA_ARGS__)
+#define rnbd_srv_err_rl(dev, fmt, ...) \
+ rnbd_srv_log(pr_err_ratelimited, dev, fmt, ##__VA_ARGS__)
+#define rnbd_srv_info(dev, fmt, ...) \
+ rnbd_srv_log(pr_info, dev, fmt, ##__VA_ARGS__)
+#define rnbd_srv_info_rl(dev, fmt, ...) \
+ rnbd_srv_log(pr_info_ratelimited, dev, fmt, ##__VA_ARGS__)
+
+#endif /* RNBD_LOG_H */
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
new file mode 100644
index 000000000000..ca166241452c
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_PROTO_H
+#define RNBD_PROTO_H
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/limits.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <rdma/ib.h>
+
+#define RNBD_PROTO_VER_MAJOR 2
+#define RNBD_PROTO_VER_MINOR 0
+
+/* The default port number the RTRS server is listening on. */
+#define RTRS_PORT 1234
+
+/**
+ * enum rnbd_msg_types - RNBD message types
+ * @RNBD_MSG_SESS_INFO: initial session info from client to server
+ * @RNBD_MSG_SESS_INFO_RSP: initial session info from server to client
+ * @RNBD_MSG_OPEN: open (map) device request
+ * @RNBD_MSG_OPEN_RSP: response to an @RNBD_MSG_OPEN
+ * @RNBD_MSG_IO: block IO request operation
+ * @RNBD_MSG_CLOSE: close (unmap) device request
+ */
+enum rnbd_msg_type {
+ RNBD_MSG_SESS_INFO,
+ RNBD_MSG_SESS_INFO_RSP,
+ RNBD_MSG_OPEN,
+ RNBD_MSG_OPEN_RSP,
+ RNBD_MSG_IO,
+ RNBD_MSG_CLOSE,
+};
+
+/**
+ * struct rnbd_msg_hdr - header of RNBD messages
+ * @type: Message type, valid values see: enum rnbd_msg_types
+ */
+struct rnbd_msg_hdr {
+ __le16 type;
+ __le16 __padding;
+};
+
+/**
+ * We allow to map RO many times and RW only once. We allow to map yet another
+ * time RW, if MIGRATION is provided (second RW export can be required for
+ * example for VM migration)
+ */
+enum rnbd_access_mode {
+ RNBD_ACCESS_RO,
+ RNBD_ACCESS_RW,
+ RNBD_ACCESS_MIGRATION,
+};
+
+/**
+ * struct rnbd_msg_sess_info - initial session info from client to server
+ * @hdr: message header
+ * @ver: RNBD protocol version
+ */
+struct rnbd_msg_sess_info {
+ struct rnbd_msg_hdr hdr;
+ u8 ver;
+ u8 reserved[31];
+};
+
+/**
+ * struct rnbd_msg_sess_info_rsp - initial session info from server to client
+ * @hdr: message header
+ * @ver: RNBD protocol version
+ */
+struct rnbd_msg_sess_info_rsp {
+ struct rnbd_msg_hdr hdr;
+ u8 ver;
+ u8 reserved[31];
+};
+
+/**
+ * struct rnbd_msg_open - request to open a remote device.
+ * @hdr: message header
+ * @access_mode: the mode to open remote device, valid values see:
+ * enum rnbd_access_mode
+ * @device_name: device path on remote side
+ */
+struct rnbd_msg_open {
+ struct rnbd_msg_hdr hdr;
+ u8 access_mode;
+ u8 resv1;
+ s8 dev_name[NAME_MAX];
+ u8 reserved[3];
+};
+
+/**
+ * struct rnbd_msg_close - request to close a remote device.
+ * @hdr: message header
+ * @device_id: device_id on server side to identify the device
+ */
+struct rnbd_msg_close {
+ struct rnbd_msg_hdr hdr;
+ __le32 device_id;
+};
+
+/**
+ * struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN
+ * @hdr: message header
+ * @device_id: device_id on server side to identify the device
+ * @nsectors: number of sectors in the usual 512b unit
+ * @max_hw_sectors: max hardware sectors in the usual 512b unit
+ * @max_write_same_sectors: max sectors for WRITE SAME in the 512b unit
+ * @max_discard_sectors: max. sectors that can be discarded at once in 512b
+ * unit.
+ * @discard_granularity: size of the internal discard allocation unit in bytes
+ * @discard_alignment: offset from internal allocation assignment in bytes
+ * @physical_block_size: physical block size device supports in bytes
+ * @logical_block_size: logical block size device supports in bytes
+ * @max_segments: max segments hardware support in one transfer
+ * @secure_discard: supports secure discard
+ * @rotation: is a rotational disc?
+ */
+struct rnbd_msg_open_rsp {
+ struct rnbd_msg_hdr hdr;
+ __le32 device_id;
+ __le64 nsectors;
+ __le32 max_hw_sectors;
+ __le32 max_write_same_sectors;
+ __le32 max_discard_sectors;
+ __le32 discard_granularity;
+ __le32 discard_alignment;
+ __le16 physical_block_size;
+ __le16 logical_block_size;
+ __le16 max_segments;
+ __le16 secure_discard;
+ u8 rotational;
+ u8 reserved[11];
+};
+
+/**
+ * struct rnbd_msg_io - message for I/O read/write
+ * @hdr: message header
+ * @device_id: device_id on server side to find the right device
+ * @sector: bi_sector attribute from struct bio
+ * @rw: valid values are defined in enum rnbd_io_flags
+ * @bi_size: number of bytes for I/O read/write
+ * @prio: priority
+ */
+struct rnbd_msg_io {
+ struct rnbd_msg_hdr hdr;
+ __le32 device_id;
+ __le64 sector;
+ __le32 rw;
+ __le32 bi_size;
+ __le16 prio;
+};
+
+#define RNBD_OP_BITS 8
+#define RNBD_OP_MASK ((1 << RNBD_OP_BITS) - 1)
+
+/**
+ * enum rnbd_io_flags - RNBD request types from rq_flag_bits
+ * @RNBD_OP_READ: read sectors from the device
+ * @RNBD_OP_WRITE: write sectors to the device
+ * @RNBD_OP_FLUSH: flush the volatile write cache
+ * @RNBD_OP_DISCARD: discard sectors
+ * @RNBD_OP_SECURE_ERASE: securely erase sectors
+ * @RNBD_OP_WRITE_SAME: write the same sectors many times
+
+ * @RNBD_F_SYNC: request is sync (sync write or read)
+ * @RNBD_F_FUA: forced unit access
+ */
+enum rnbd_io_flags {
+
+ /* Operations */
+
+ RNBD_OP_READ = 0,
+ RNBD_OP_WRITE = 1,
+ RNBD_OP_FLUSH = 2,
+ RNBD_OP_DISCARD = 3,
+ RNBD_OP_SECURE_ERASE = 4,
+ RNBD_OP_WRITE_SAME = 5,
+
+ RNBD_OP_LAST,
+
+ /* Flags */
+
+ RNBD_F_SYNC = 1<<(RNBD_OP_BITS + 0),
+ RNBD_F_FUA = 1<<(RNBD_OP_BITS + 1),
+
+ RNBD_F_ALL = (RNBD_F_SYNC | RNBD_F_FUA)
+
+};
+
+static inline u32 rnbd_op(u32 flags)
+{
+ return flags & RNBD_OP_MASK;
+}
+
+static inline u32 rnbd_flags(u32 flags)
+{
+ return flags & ~RNBD_OP_MASK;
+}
+
+static inline bool rnbd_flags_supported(u32 flags)
+{
+ u32 op;
+
+ op = rnbd_op(flags);
+ flags = rnbd_flags(flags);
+
+ if (op >= RNBD_OP_LAST)
+ return false;
+ if (flags & ~RNBD_F_ALL)
+ return false;
+
+ return true;
+}
+
+static inline u32 rnbd_to_bio_flags(u32 rnbd_opf)
+{
+ u32 bio_opf;
+
+ switch (rnbd_op(rnbd_opf)) {
+ case RNBD_OP_READ:
+ bio_opf = REQ_OP_READ;
+ break;
+ case RNBD_OP_WRITE:
+ bio_opf = REQ_OP_WRITE;
+ break;
+ case RNBD_OP_FLUSH:
+ bio_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
+ break;
+ case RNBD_OP_DISCARD:
+ bio_opf = REQ_OP_DISCARD;
+ break;
+ case RNBD_OP_SECURE_ERASE:
+ bio_opf = REQ_OP_SECURE_ERASE;
+ break;
+ case RNBD_OP_WRITE_SAME:
+ bio_opf = REQ_OP_WRITE_SAME;
+ break;
+ default:
+ WARN(1, "Unknown RNBD type: %d (flags %d)\n",
+ rnbd_op(rnbd_opf), rnbd_opf);
+ bio_opf = 0;
+ }
+
+ if (rnbd_opf & RNBD_F_SYNC)
+ bio_opf |= REQ_SYNC;
+
+ if (rnbd_opf & RNBD_F_FUA)
+ bio_opf |= REQ_FUA;
+
+ return bio_opf;
+}
+
+static inline u32 rq_to_rnbd_flags(struct request *rq)
+{
+ u32 rnbd_opf;
+
+ switch (req_op(rq)) {
+ case REQ_OP_READ:
+ rnbd_opf = RNBD_OP_READ;
+ break;
+ case REQ_OP_WRITE:
+ rnbd_opf = RNBD_OP_WRITE;
+ break;
+ case REQ_OP_DISCARD:
+ rnbd_opf = RNBD_OP_DISCARD;
+ break;
+ case REQ_OP_SECURE_ERASE:
+ rnbd_opf = RNBD_OP_SECURE_ERASE;
+ break;
+ case REQ_OP_WRITE_SAME:
+ rnbd_opf = RNBD_OP_WRITE_SAME;
+ break;
+ case REQ_OP_FLUSH:
+ rnbd_opf = RNBD_OP_FLUSH;
+ break;
+ default:
+ WARN(1, "Unknown request type %d (flags %llu)\n",
+ req_op(rq), (unsigned long long)rq->cmd_flags);
+ rnbd_opf = 0;
+ }
+
+ if (op_is_sync(rq->cmd_flags))
+ rnbd_opf |= RNBD_F_SYNC;
+
+ if (op_is_flush(rq->cmd_flags))
+ rnbd_opf |= RNBD_F_FUA;
+
+ return rnbd_opf;
+}
+
+const char *rnbd_access_mode_str(enum rnbd_access_mode mode);
+
+#endif /* RNBD_PROTO_H */
diff --git a/drivers/block/rnbd/rnbd-srv-dev.c b/drivers/block/rnbd/rnbd-srv-dev.c
new file mode 100644
index 000000000000..5eddfd29ab64
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-dev.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rnbd-srv-dev.h"
+#include "rnbd-log.h"
+
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
+ struct bio_set *bs)
+{
+ struct rnbd_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->blk_open_flags = flags;
+ dev->bdev = blkdev_get_by_path(path, flags, THIS_MODULE);
+ ret = PTR_ERR_OR_ZERO(dev->bdev);
+ if (ret)
+ goto err;
+
+ dev->blk_open_flags = flags;
+ bdevname(dev->bdev, dev->name);
+ dev->ibd_bio_set = bs;
+
+ return dev;
+
+err:
+ kfree(dev);
+ return ERR_PTR(ret);
+}
+
+void rnbd_dev_close(struct rnbd_dev *dev)
+{
+ blkdev_put(dev->bdev, dev->blk_open_flags);
+ kfree(dev);
+}
+
+static void rnbd_dev_bi_end_io(struct bio *bio)
+{
+ struct rnbd_dev_blk_io *io = bio->bi_private;
+
+ rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
+}
+
+/**
+ * rnbd_bio_map_kern - map kernel address into bio
+ * @data: pointer to buffer to map
+ * @bs: bio_set to use.
+ * @len: length in bytes
+ * @gfp_mask: allocation flags for bio allocation
+ *
+ * Map the kernel address into a bio suitable for io to a block
+ * device. Returns an error pointer in case of error.
+ */
+static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
+ unsigned int len, gfp_t gfp_mask)
+{
+ unsigned long kaddr = (unsigned long)data;
+ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = kaddr >> PAGE_SHIFT;
+ const int nr_pages = end - start;
+ int offset, i;
+ struct bio *bio;
+
+ bio = bio_alloc_bioset(gfp_mask, nr_pages, bs);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+
+ offset = offset_in_page(kaddr);
+ for (i = 0; i < nr_pages; i++) {
+ unsigned int bytes = PAGE_SIZE - offset;
+
+ if (len <= 0)
+ break;
+
+ if (bytes > len)
+ bytes = len;
+
+ if (bio_add_page(bio, virt_to_page(data), bytes,
+ offset) < bytes) {
+ /* we don't support partial mappings */
+ bio_put(bio);
+ return ERR_PTR(-EINVAL);
+ }
+
+ data += bytes;
+ len -= bytes;
+ offset = 0;
+ }
+
+ bio->bi_end_io = bio_put;
+ return bio;
+}
+
+int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
+ size_t len, u32 bi_size, enum rnbd_io_flags flags,
+ short prio, void *priv)
+{
+ struct rnbd_dev_blk_io *io;
+ struct bio *bio;
+
+ /* Generate bio with pages pointing to the rdma buffer */
+ bio = rnbd_bio_map_kern(data, dev->ibd_bio_set, len, GFP_KERNEL);
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ io = container_of(bio, struct rnbd_dev_blk_io, bio);
+
+ io->dev = dev;
+ io->priv = priv;
+
+ bio->bi_end_io = rnbd_dev_bi_end_io;
+ bio->bi_private = io;
+ bio->bi_opf = rnbd_to_bio_flags(flags);
+ bio->bi_iter.bi_sector = sector;
+ bio->bi_iter.bi_size = bi_size;
+ bio_set_prio(bio, prio);
+ bio_set_dev(bio, dev->bdev);
+
+ submit_bio(bio);
+
+ return 0;
+}
diff --git a/drivers/block/rnbd/rnbd-srv-dev.h b/drivers/block/rnbd/rnbd-srv-dev.h
new file mode 100644
index 000000000000..0f65b09a270e
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-dev.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_SRV_DEV_H
+#define RNBD_SRV_DEV_H
+
+#include <linux/fs.h>
+#include "rnbd-proto.h"
+
+struct rnbd_dev {
+ struct block_device *bdev;
+ struct bio_set *ibd_bio_set;
+ fmode_t blk_open_flags;
+ char name[BDEVNAME_SIZE];
+};
+
+struct rnbd_dev_blk_io {
+ struct rnbd_dev *dev;
+ void *priv;
+ /* have to be last member for front_pad usage of bioset_init */
+ struct bio bio;
+};
+
+/**
+ * rnbd_dev_open() - Open a device
+ * @flags: open flags
+ * @bs: bio_set to use during block io,
+ */
+struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
+ struct bio_set *bs);
+
+/**
+ * rnbd_dev_close() - Close a device
+ */
+void rnbd_dev_close(struct rnbd_dev *dev);
+
+void rnbd_endio(void *priv, int error);
+
+static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
+{
+ return queue_max_segments(bdev_get_queue(dev->bdev));
+}
+
+static inline int rnbd_dev_get_max_hw_sects(const struct rnbd_dev *dev)
+{
+ return queue_max_hw_sectors(bdev_get_queue(dev->bdev));
+}
+
+static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev)
+{
+ return blk_queue_secure_erase(bdev_get_queue(dev->bdev));
+}
+
+static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
+{
+ if (!blk_queue_discard(bdev_get_queue(dev->bdev)))
+ return 0;
+
+ return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev),
+ REQ_OP_DISCARD);
+}
+
+static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev)
+{
+ return bdev_get_queue(dev->bdev)->limits.discard_granularity;
+}
+
+static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev)
+{
+ return bdev_get_queue(dev->bdev)->limits.discard_alignment;
+}
+
+/**
+ * rnbd_dev_submit_io() - Submit an I/O to the disk
+ * @dev: device to that the I/O is submitted
+ * @sector: address to read/write data to
+ * @data: I/O data to write or buffer to read I/O date into
+ * @len: length of @data
+ * @bi_size: Amount of data that will be read/written
+ * @prio: IO priority
+ * @priv: private data passed to @io_fn
+ */
+int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
+ size_t len, u32 bi_size, enum rnbd_io_flags flags,
+ short prio, void *priv);
+
+#endif /* RNBD_SRV_DEV_H */
diff --git a/drivers/block/rnbd/rnbd-srv-sysfs.c b/drivers/block/rnbd/rnbd-srv-sysfs.c
new file mode 100644
index 000000000000..106775c074d1
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv-sysfs.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <uapi/linux/limits.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/genhd.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+
+#include "rnbd-srv.h"
+
+static struct device *rnbd_dev;
+static struct class *rnbd_dev_class;
+static struct kobject *rnbd_devs_kobj;
+
+static void rnbd_srv_dev_release(struct kobject *kobj)
+{
+ struct rnbd_srv_dev *dev;
+
+ dev = container_of(kobj, struct rnbd_srv_dev, dev_kobj);
+
+ kfree(dev);
+}
+
+static struct kobj_type dev_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rnbd_srv_dev_release
+};
+
+int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
+ struct block_device *bdev,
+ const char *dev_name)
+{
+ struct kobject *bdev_kobj;
+ int ret;
+
+ ret = kobject_init_and_add(&dev->dev_kobj, &dev_ktype,
+ rnbd_devs_kobj, dev_name);
+ if (ret)
+ return ret;
+
+ dev->dev_sessions_kobj = kobject_create_and_add("sessions",
+ &dev->dev_kobj);
+ if (!dev->dev_sessions_kobj)
+ goto put_dev_kobj;
+
+ bdev_kobj = &disk_to_dev(bdev->bd_disk)->kobj;
+ ret = sysfs_create_link(&dev->dev_kobj, bdev_kobj, "block_dev");
+ if (ret)
+ goto put_sess_kobj;
+
+ return 0;
+
+put_sess_kobj:
+ kobject_put(dev->dev_sessions_kobj);
+put_dev_kobj:
+ kobject_put(&dev->dev_kobj);
+ return ret;
+}
+
+void rnbd_srv_destroy_dev_sysfs(struct rnbd_srv_dev *dev)
+{
+ sysfs_remove_link(&dev->dev_kobj, "block_dev");
+ kobject_del(dev->dev_sessions_kobj);
+ kobject_put(dev->dev_sessions_kobj);
+ kobject_del(&dev->dev_kobj);
+ kobject_put(&dev->dev_kobj);
+}
+
+static ssize_t read_only_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *page)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%d\n",
+ !(sess_dev->open_flags & FMODE_WRITE));
+}
+
+static struct kobj_attribute rnbd_srv_dev_session_ro_attr =
+ __ATTR_RO(read_only);
+
+static ssize_t access_mode_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n",
+ rnbd_access_mode_str(sess_dev->access_mode));
+}
+
+static struct kobj_attribute rnbd_srv_dev_session_access_mode_attr =
+ __ATTR_RO(access_mode);
+
+static ssize_t mapping_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", sess_dev->pathname);
+}
+
+static struct kobj_attribute rnbd_srv_dev_session_mapping_path_attr =
+ __ATTR_RO(mapping_path);
+
+static struct attribute *rnbd_srv_default_dev_sessions_attrs[] = {
+ &rnbd_srv_dev_session_access_mode_attr.attr,
+ &rnbd_srv_dev_session_ro_attr.attr,
+ &rnbd_srv_dev_session_mapping_path_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rnbd_srv_default_dev_session_attr_group = {
+ .attrs = rnbd_srv_default_dev_sessions_attrs,
+};
+
+void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev)
+{
+ sysfs_remove_group(&sess_dev->kobj,
+ &rnbd_srv_default_dev_session_attr_group);
+
+ kobject_del(&sess_dev->kobj);
+ kobject_put(&sess_dev->kobj);
+}
+
+static void rnbd_srv_sess_dev_release(struct kobject *kobj)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kobj, struct rnbd_srv_sess_dev, kobj);
+ rnbd_destroy_sess_dev(sess_dev);
+}
+
+static struct kobj_type rnbd_srv_sess_dev_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rnbd_srv_sess_dev_release,
+};
+
+int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev)
+{
+ int ret;
+
+ ret = kobject_init_and_add(&sess_dev->kobj, &rnbd_srv_sess_dev_ktype,
+ sess_dev->dev->dev_sessions_kobj, "%s",
+ sess_dev->sess->sessname);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&sess_dev->kobj,
+ &rnbd_srv_default_dev_session_attr_group);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ kobject_put(&sess_dev->kobj);
+
+ return ret;
+}
+
+int rnbd_srv_create_sysfs_files(void)
+{
+ int err;
+
+ rnbd_dev_class = class_create(THIS_MODULE, "rnbd-server");
+ if (IS_ERR(rnbd_dev_class))
+ return PTR_ERR(rnbd_dev_class);
+
+ rnbd_dev = device_create(rnbd_dev_class, NULL,
+ MKDEV(0, 0), NULL, "ctl");
+ if (IS_ERR(rnbd_dev)) {
+ err = PTR_ERR(rnbd_dev);
+ goto cls_destroy;
+ }
+ rnbd_devs_kobj = kobject_create_and_add("devices", &rnbd_dev->kobj);
+ if (!rnbd_devs_kobj) {
+ err = -ENOMEM;
+ goto dev_destroy;
+ }
+
+ return 0;
+
+dev_destroy:
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+cls_destroy:
+ class_destroy(rnbd_dev_class);
+
+ return err;
+}
+
+void rnbd_srv_destroy_sysfs_files(void)
+{
+ kobject_del(rnbd_devs_kobj);
+ kobject_put(rnbd_devs_kobj);
+ device_destroy(rnbd_dev_class, MKDEV(0, 0));
+ class_destroy(rnbd_dev_class);
+}
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
new file mode 100644
index 000000000000..86e61523907b
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+
+#include "rnbd-srv.h"
+#include "rnbd-srv-dev.h"
+
+MODULE_DESCRIPTION("RDMA Network Block Device Server");
+MODULE_LICENSE("GPL");
+
+static u16 port_nr = RTRS_PORT;
+
+module_param_named(port_nr, port_nr, ushort, 0444);
+MODULE_PARM_DESC(port_nr,
+ "The port number the server is listening on (default: "
+ __stringify(RTRS_PORT)")");
+
+#define DEFAULT_DEV_SEARCH_PATH "/"
+
+static char dev_search_path[PATH_MAX] = DEFAULT_DEV_SEARCH_PATH;
+
+static int dev_search_path_set(const char *val, const struct kernel_param *kp)
+{
+ const char *p = strrchr(val, '\n') ? : val + strlen(val);
+
+ if (strlen(val) >= sizeof(dev_search_path))
+ return -EINVAL;
+
+ snprintf(dev_search_path, sizeof(dev_search_path), "%.*s",
+ (int)(p - val), val);
+
+ pr_info("dev_search_path changed to '%s'\n", dev_search_path);
+
+ return 0;
+}
+
+static struct kparam_string dev_search_path_kparam_str = {
+ .maxlen = sizeof(dev_search_path),
+ .string = dev_search_path
+};
+
+static const struct kernel_param_ops dev_search_path_ops = {
+ .set = dev_search_path_set,
+ .get = param_get_string,
+};
+
+module_param_cb(dev_search_path, &dev_search_path_ops,
+ &dev_search_path_kparam_str, 0444);
+MODULE_PARM_DESC(dev_search_path,
+ "Sets the dev_search_path. When a device is mapped this path is prepended to the device path from the map device operation. If %SESSNAME% is specified in a path, then device will be searched in a session namespace. (default: "
+ DEFAULT_DEV_SEARCH_PATH ")");
+
+static DEFINE_MUTEX(sess_lock);
+static DEFINE_SPINLOCK(dev_lock);
+
+static LIST_HEAD(sess_list);
+static LIST_HEAD(dev_list);
+
+struct rnbd_io_private {
+ struct rtrs_srv_op *id;
+ struct rnbd_srv_sess_dev *sess_dev;
+};
+
+static void rnbd_sess_dev_release(struct kref *kref)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = container_of(kref, struct rnbd_srv_sess_dev, kref);
+ complete(sess_dev->destroy_comp);
+}
+
+static inline void rnbd_put_sess_dev(struct rnbd_srv_sess_dev *sess_dev)
+{
+ kref_put(&sess_dev->kref, rnbd_sess_dev_release);
+}
+
+void rnbd_endio(void *priv, int error)
+{
+ struct rnbd_io_private *rnbd_priv = priv;
+ struct rnbd_srv_sess_dev *sess_dev = rnbd_priv->sess_dev;
+
+ rnbd_put_sess_dev(sess_dev);
+
+ rtrs_srv_resp_rdma(rnbd_priv->id, error);
+
+ kfree(priv);
+}
+
+static struct rnbd_srv_sess_dev *
+rnbd_get_sess_dev(int dev_id, struct rnbd_srv_session *srv_sess)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+ int ret = 0;
+
+ rcu_read_lock();
+ sess_dev = xa_load(&srv_sess->index_idr, dev_id);
+ if (likely(sess_dev))
+ ret = kref_get_unless_zero(&sess_dev->kref);
+ rcu_read_unlock();
+
+ if (!sess_dev || !ret)
+ return ERR_PTR(-ENXIO);
+
+ return sess_dev;
+}
+
+static int process_rdma(struct rtrs_srv *sess,
+ struct rnbd_srv_session *srv_sess,
+ struct rtrs_srv_op *id, void *data, u32 datalen,
+ const void *usr, size_t usrlen)
+{
+ const struct rnbd_msg_io *msg = usr;
+ struct rnbd_io_private *priv;
+ struct rnbd_srv_sess_dev *sess_dev;
+ u32 dev_id;
+ int err;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_id = le32_to_cpu(msg->device_id);
+
+ sess_dev = rnbd_get_sess_dev(dev_id, srv_sess);
+ if (IS_ERR(sess_dev)) {
+ pr_err_ratelimited("Got I/O request on session %s for unknown device id %d\n",
+ srv_sess->sessname, dev_id);
+ err = -ENOTCONN;
+ goto err;
+ }
+
+ priv->sess_dev = sess_dev;
+ priv->id = id;
+
+ err = rnbd_dev_submit_io(sess_dev->rnbd_dev, le64_to_cpu(msg->sector),
+ data, datalen, le32_to_cpu(msg->bi_size),
+ le32_to_cpu(msg->rw),
+ srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
+ usrlen < sizeof(*msg) ?
+ 0 : le16_to_cpu(msg->prio), priv);
+ if (unlikely(err)) {
+ rnbd_srv_err(sess_dev, "Submitting I/O to device failed, err: %d\n",
+ err);
+ goto sess_dev_put;
+ }
+
+ return 0;
+
+sess_dev_put:
+ rnbd_put_sess_dev(sess_dev);
+err:
+ kfree(priv);
+ return err;
+}
+
+static void destroy_device(struct rnbd_srv_dev *dev)
+{
+ WARN_ONCE(!list_empty(&dev->sess_dev_list),
+ "Device %s is being destroyed but still in use!\n",
+ dev->id);
+
+ spin_lock(&dev_lock);
+ list_del(&dev->list);
+ spin_unlock(&dev_lock);
+
+ mutex_destroy(&dev->lock);
+ if (dev->dev_kobj.state_in_sysfs)
+ /*
+ * Destroy kobj only if it was really created.
+ */
+ rnbd_srv_destroy_dev_sysfs(dev);
+ else
+ kfree(dev);
+}
+
+static void destroy_device_cb(struct kref *kref)
+{
+ struct rnbd_srv_dev *dev;
+
+ dev = container_of(kref, struct rnbd_srv_dev, kref);
+
+ destroy_device(dev);
+}
+
+static void rnbd_put_srv_dev(struct rnbd_srv_dev *dev)
+{
+ kref_put(&dev->kref, destroy_device_cb);
+}
+
+void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev)
+{
+ DECLARE_COMPLETION_ONSTACK(dc);
+
+ xa_erase(&sess_dev->sess->index_idr, sess_dev->device_id);
+ synchronize_rcu();
+ sess_dev->destroy_comp = &dc;
+ rnbd_put_sess_dev(sess_dev);
+ wait_for_completion(&dc); /* wait for inflights to drop to zero */
+
+ rnbd_dev_close(sess_dev->rnbd_dev);
+ list_del(&sess_dev->sess_list);
+ mutex_lock(&sess_dev->dev->lock);
+ list_del(&sess_dev->dev_list);
+ if (sess_dev->open_flags & FMODE_WRITE)
+ sess_dev->dev->open_write_cnt--;
+ mutex_unlock(&sess_dev->dev->lock);
+
+ rnbd_put_srv_dev(sess_dev->dev);
+
+ rnbd_srv_info(sess_dev, "Device closed\n");
+ kfree(sess_dev);
+}
+
+static void destroy_sess(struct rnbd_srv_session *srv_sess)
+{
+ struct rnbd_srv_sess_dev *sess_dev, *tmp;
+
+ if (list_empty(&srv_sess->sess_dev_list))
+ goto out;
+
+ mutex_lock(&srv_sess->lock);
+ list_for_each_entry_safe(sess_dev, tmp, &srv_sess->sess_dev_list,
+ sess_list)
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&srv_sess->lock);
+
+out:
+ xa_destroy(&srv_sess->index_idr);
+ bioset_exit(&srv_sess->sess_bio_set);
+
+ pr_info("RTRS Session %s disconnected\n", srv_sess->sessname);
+
+ mutex_lock(&sess_lock);
+ list_del(&srv_sess->list);
+ mutex_unlock(&sess_lock);
+
+ mutex_destroy(&srv_sess->lock);
+ kfree(srv_sess);
+}
+
+static int create_sess(struct rtrs_srv *rtrs)
+{
+ struct rnbd_srv_session *srv_sess;
+ char sessname[NAME_MAX];
+ int err;
+
+ err = rtrs_srv_get_sess_name(rtrs, sessname, sizeof(sessname));
+ if (err) {
+ pr_err("rtrs_srv_get_sess_name(%s): %d\n", sessname, err);
+
+ return err;
+ }
+ srv_sess = kzalloc(sizeof(*srv_sess), GFP_KERNEL);
+ if (!srv_sess)
+ return -ENOMEM;
+
+ srv_sess->queue_depth = rtrs_srv_get_queue_depth(rtrs);
+ err = bioset_init(&srv_sess->sess_bio_set, srv_sess->queue_depth,
+ offsetof(struct rnbd_dev_blk_io, bio),
+ BIOSET_NEED_BVECS);
+ if (err) {
+ pr_err("Allocating srv_session for session %s failed\n",
+ sessname);
+ kfree(srv_sess);
+ return err;
+ }
+
+ xa_init_flags(&srv_sess->index_idr, XA_FLAGS_ALLOC);
+ INIT_LIST_HEAD(&srv_sess->sess_dev_list);
+ mutex_init(&srv_sess->lock);
+ mutex_lock(&sess_lock);
+ list_add(&srv_sess->list, &sess_list);
+ mutex_unlock(&sess_lock);
+
+ srv_sess->rtrs = rtrs;
+ strlcpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname));
+
+ rtrs_srv_set_sess_priv(rtrs, srv_sess);
+
+ return 0;
+}
+
+static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
+ enum rtrs_srv_link_ev ev, void *priv)
+{
+ struct rnbd_srv_session *srv_sess = priv;
+
+ switch (ev) {
+ case RTRS_SRV_LINK_EV_CONNECTED:
+ return create_sess(rtrs);
+
+ case RTRS_SRV_LINK_EV_DISCONNECTED:
+ if (WARN_ON_ONCE(!srv_sess))
+ return -EINVAL;
+
+ destroy_sess(srv_sess);
+ return 0;
+
+ default:
+ pr_warn("Received unknown RTRS session event %d from session %s\n",
+ ev, srv_sess->sessname);
+ return -EINVAL;
+ }
+}
+
+static int process_msg_close(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ void *data, size_t datalen, const void *usr,
+ size_t usrlen)
+{
+ const struct rnbd_msg_close *close_msg = usr;
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ sess_dev = rnbd_get_sess_dev(le32_to_cpu(close_msg->device_id),
+ srv_sess);
+ if (IS_ERR(sess_dev))
+ return 0;
+
+ rnbd_put_sess_dev(sess_dev);
+ mutex_lock(&srv_sess->lock);
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&srv_sess->lock);
+ return 0;
+}
+
+static int process_msg_open(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen);
+
+static int process_msg_sess_info(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen);
+
+static int rnbd_srv_rdma_ev(struct rtrs_srv *rtrs, void *priv,
+ struct rtrs_srv_op *id, int dir,
+ void *data, size_t datalen, const void *usr,
+ size_t usrlen)
+{
+ struct rnbd_srv_session *srv_sess = priv;
+ const struct rnbd_msg_hdr *hdr = usr;
+ int ret = 0;
+ u16 type;
+
+ if (WARN_ON_ONCE(!srv_sess))
+ return -ENODEV;
+
+ type = le16_to_cpu(hdr->type);
+
+ switch (type) {
+ case RNBD_MSG_IO:
+ return process_rdma(rtrs, srv_sess, id, data, datalen, usr,
+ usrlen);
+ case RNBD_MSG_CLOSE:
+ ret = process_msg_close(rtrs, srv_sess, data, datalen,
+ usr, usrlen);
+ break;
+ case RNBD_MSG_OPEN:
+ ret = process_msg_open(rtrs, srv_sess, usr, usrlen,
+ data, datalen);
+ break;
+ case RNBD_MSG_SESS_INFO:
+ ret = process_msg_sess_info(rtrs, srv_sess, usr, usrlen,
+ data, datalen);
+ break;
+ default:
+ pr_warn("Received unexpected message type %d with dir %d from session %s\n",
+ type, dir, srv_sess->sessname);
+ return -EINVAL;
+ }
+
+ rtrs_srv_resp_rdma(id, ret);
+ return 0;
+}
+
+static struct rnbd_srv_sess_dev
+*rnbd_sess_dev_alloc(struct rnbd_srv_session *srv_sess)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+ int error;
+
+ sess_dev = kzalloc(sizeof(*sess_dev), GFP_KERNEL);
+ if (!sess_dev)
+ return ERR_PTR(-ENOMEM);
+
+ error = xa_alloc(&srv_sess->index_idr, &sess_dev->device_id, sess_dev,
+ xa_limit_32b, GFP_NOWAIT);
+ if (error < 0) {
+ pr_warn("Allocating idr failed, err: %d\n", error);
+ kfree(sess_dev);
+ return ERR_PTR(error);
+ }
+
+ return sess_dev;
+}
+
+static struct rnbd_srv_dev *rnbd_srv_init_srv_dev(const char *id)
+{
+ struct rnbd_srv_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ strlcpy(dev->id, id, sizeof(dev->id));
+ kref_init(&dev->kref);
+ INIT_LIST_HEAD(&dev->sess_dev_list);
+ mutex_init(&dev->lock);
+
+ return dev;
+}
+
+static struct rnbd_srv_dev *
+rnbd_srv_find_or_add_srv_dev(struct rnbd_srv_dev *new_dev)
+{
+ struct rnbd_srv_dev *dev;
+
+ spin_lock(&dev_lock);
+ list_for_each_entry(dev, &dev_list, list) {
+ if (!strncmp(dev->id, new_dev->id, sizeof(dev->id))) {
+ if (!kref_get_unless_zero(&dev->kref))
+ /*
+ * We lost the race, device is almost dead.
+ * Continue traversing to find a valid one.
+ */
+ continue;
+ spin_unlock(&dev_lock);
+ return dev;
+ }
+ }
+ list_add(&new_dev->list, &dev_list);
+ spin_unlock(&dev_lock);
+
+ return new_dev;
+}
+
+static int rnbd_srv_check_update_open_perm(struct rnbd_srv_dev *srv_dev,
+ struct rnbd_srv_session *srv_sess,
+ enum rnbd_access_mode access_mode)
+{
+ int ret = -EPERM;
+
+ mutex_lock(&srv_dev->lock);
+
+ switch (access_mode) {
+ case RNBD_ACCESS_RO:
+ ret = 0;
+ break;
+ case RNBD_ACCESS_RW:
+ if (srv_dev->open_write_cnt == 0) {
+ srv_dev->open_write_cnt++;
+ ret = 0;
+ } else {
+ pr_err("Mapping device '%s' for session %s with RW permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
+ srv_dev->id, srv_sess->sessname,
+ srv_dev->open_write_cnt,
+ rnbd_access_mode_str(access_mode));
+ }
+ break;
+ case RNBD_ACCESS_MIGRATION:
+ if (srv_dev->open_write_cnt < 2) {
+ srv_dev->open_write_cnt++;
+ ret = 0;
+ } else {
+ pr_err("Mapping device '%s' for session %s with migration permissions failed. Device already opened as 'RW' by %d client(s), access mode %s.\n",
+ srv_dev->id, srv_sess->sessname,
+ srv_dev->open_write_cnt,
+ rnbd_access_mode_str(access_mode));
+ }
+ break;
+ default:
+ pr_err("Received mapping request for device '%s' on session %s with invalid access mode: %d\n",
+ srv_dev->id, srv_sess->sessname, access_mode);
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&srv_dev->lock);
+
+ return ret;
+}
+
+static struct rnbd_srv_dev *
+rnbd_srv_get_or_create_srv_dev(struct rnbd_dev *rnbd_dev,
+ struct rnbd_srv_session *srv_sess,
+ enum rnbd_access_mode access_mode)
+{
+ int ret;
+ struct rnbd_srv_dev *new_dev, *dev;
+
+ new_dev = rnbd_srv_init_srv_dev(rnbd_dev->name);
+ if (IS_ERR(new_dev))
+ return new_dev;
+
+ dev = rnbd_srv_find_or_add_srv_dev(new_dev);
+ if (dev != new_dev)
+ kfree(new_dev);
+
+ ret = rnbd_srv_check_update_open_perm(dev, srv_sess, access_mode);
+ if (ret) {
+ rnbd_put_srv_dev(dev);
+ return ERR_PTR(ret);
+ }
+
+ return dev;
+}
+
+static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
+ struct rnbd_srv_sess_dev *sess_dev)
+{
+ struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
+
+ rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
+ rsp->device_id =
+ cpu_to_le32(sess_dev->device_id);
+ rsp->nsectors =
+ cpu_to_le64(get_capacity(rnbd_dev->bdev->bd_disk));
+ rsp->logical_block_size =
+ cpu_to_le16(bdev_logical_block_size(rnbd_dev->bdev));
+ rsp->physical_block_size =
+ cpu_to_le16(bdev_physical_block_size(rnbd_dev->bdev));
+ rsp->max_segments =
+ cpu_to_le16(rnbd_dev_get_max_segs(rnbd_dev));
+ rsp->max_hw_sectors =
+ cpu_to_le32(rnbd_dev_get_max_hw_sects(rnbd_dev));
+ rsp->max_write_same_sectors =
+ cpu_to_le32(bdev_write_same(rnbd_dev->bdev));
+ rsp->max_discard_sectors =
+ cpu_to_le32(rnbd_dev_get_max_discard_sects(rnbd_dev));
+ rsp->discard_granularity =
+ cpu_to_le32(rnbd_dev_get_discard_granularity(rnbd_dev));
+ rsp->discard_alignment =
+ cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
+ rsp->secure_discard =
+ cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
+ rsp->rotational =
+ !blk_queue_nonrot(bdev_get_queue(rnbd_dev->bdev));
+}
+
+static struct rnbd_srv_sess_dev *
+rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
+ const struct rnbd_msg_open *open_msg,
+ struct rnbd_dev *rnbd_dev, fmode_t open_flags,
+ struct rnbd_srv_dev *srv_dev)
+{
+ struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
+
+ if (IS_ERR(sdev))
+ return sdev;
+
+ kref_init(&sdev->kref);
+
+ strlcpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname));
+
+ sdev->rnbd_dev = rnbd_dev;
+ sdev->sess = srv_sess;
+ sdev->dev = srv_dev;
+ sdev->open_flags = open_flags;
+ sdev->access_mode = open_msg->access_mode;
+
+ return sdev;
+}
+
+static char *rnbd_srv_get_full_path(struct rnbd_srv_session *srv_sess,
+ const char *dev_name)
+{
+ char *full_path;
+ char *a, *b;
+
+ full_path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!full_path)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Replace %SESSNAME% with a real session name in order to
+ * create device namespace.
+ */
+ a = strnstr(dev_search_path, "%SESSNAME%", sizeof(dev_search_path));
+ if (a) {
+ int len = a - dev_search_path;
+
+ len = snprintf(full_path, PATH_MAX, "%.*s/%s/%s", len,
+ dev_search_path, srv_sess->sessname, dev_name);
+ if (len >= PATH_MAX) {
+ pr_err("Too long path: %s, %s, %s\n",
+ dev_search_path, srv_sess->sessname, dev_name);
+ kfree(full_path);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ snprintf(full_path, PATH_MAX, "%s/%s",
+ dev_search_path, dev_name);
+ }
+
+ /* eliminitate duplicated slashes */
+ a = strchr(full_path, '/');
+ b = a;
+ while (*b != '\0') {
+ if (*b == '/' && *a == '/') {
+ b++;
+ } else {
+ a++;
+ *a = *b;
+ b++;
+ }
+ }
+ a++;
+ *a = '\0';
+
+ return full_path;
+}
+
+static int process_msg_sess_info(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen)
+{
+ const struct rnbd_msg_sess_info *sess_info_msg = msg;
+ struct rnbd_msg_sess_info_rsp *rsp = data;
+
+ srv_sess->ver = min_t(u8, sess_info_msg->ver, RNBD_PROTO_VER_MAJOR);
+ pr_debug("Session %s using protocol version %d (client version: %d, server version: %d)\n",
+ srv_sess->sessname, srv_sess->ver,
+ sess_info_msg->ver, RNBD_PROTO_VER_MAJOR);
+
+ rsp->hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO_RSP);
+ rsp->ver = srv_sess->ver;
+
+ return 0;
+}
+
+/**
+ * find_srv_sess_dev() - a dev is already opened by this name
+ * @srv_sess: the session to search.
+ * @dev_name: string containing the name of the device.
+ *
+ * Return struct rnbd_srv_sess_dev if srv_sess already opened the dev_name
+ * NULL if the session didn't open the device yet.
+ */
+static struct rnbd_srv_sess_dev *
+find_srv_sess_dev(struct rnbd_srv_session *srv_sess, const char *dev_name)
+{
+ struct rnbd_srv_sess_dev *sess_dev;
+
+ if (list_empty(&srv_sess->sess_dev_list))
+ return NULL;
+
+ list_for_each_entry(sess_dev, &srv_sess->sess_dev_list, sess_list)
+ if (!strcmp(sess_dev->pathname, dev_name))
+ return sess_dev;
+
+ return NULL;
+}
+
+static int process_msg_open(struct rtrs_srv *rtrs,
+ struct rnbd_srv_session *srv_sess,
+ const void *msg, size_t len,
+ void *data, size_t datalen)
+{
+ int ret;
+ struct rnbd_srv_dev *srv_dev;
+ struct rnbd_srv_sess_dev *srv_sess_dev;
+ const struct rnbd_msg_open *open_msg = msg;
+ fmode_t open_flags;
+ char *full_path;
+ struct rnbd_dev *rnbd_dev;
+ struct rnbd_msg_open_rsp *rsp = data;
+
+ pr_debug("Open message received: session='%s' path='%s' access_mode=%d\n",
+ srv_sess->sessname, open_msg->dev_name,
+ open_msg->access_mode);
+ open_flags = FMODE_READ;
+ if (open_msg->access_mode != RNBD_ACCESS_RO)
+ open_flags |= FMODE_WRITE;
+
+ mutex_lock(&srv_sess->lock);
+
+ srv_sess_dev = find_srv_sess_dev(srv_sess, open_msg->dev_name);
+ if (srv_sess_dev)
+ goto fill_response;
+
+ if ((strlen(dev_search_path) + strlen(open_msg->dev_name))
+ >= PATH_MAX) {
+ pr_err("Opening device for session %s failed, device path too long. '%s/%s' is longer than PATH_MAX (%d)\n",
+ srv_sess->sessname, dev_search_path, open_msg->dev_name,
+ PATH_MAX);
+ ret = -EINVAL;
+ goto reject;
+ }
+ if (strstr(open_msg->dev_name, "..")) {
+ pr_err("Opening device for session %s failed, device path %s contains relative path ..\n",
+ srv_sess->sessname, open_msg->dev_name);
+ ret = -EINVAL;
+ goto reject;
+ }
+ full_path = rnbd_srv_get_full_path(srv_sess, open_msg->dev_name);
+ if (IS_ERR(full_path)) {
+ ret = PTR_ERR(full_path);
+ pr_err("Opening device '%s' for client %s failed, failed to get device full path, err: %d\n",
+ open_msg->dev_name, srv_sess->sessname, ret);
+ goto reject;
+ }
+
+ rnbd_dev = rnbd_dev_open(full_path, open_flags,
+ &srv_sess->sess_bio_set);
+ if (IS_ERR(rnbd_dev)) {
+ pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %ld\n",
+ full_path, srv_sess->sessname, PTR_ERR(rnbd_dev));
+ ret = PTR_ERR(rnbd_dev);
+ goto free_path;
+ }
+
+ srv_dev = rnbd_srv_get_or_create_srv_dev(rnbd_dev, srv_sess,
+ open_msg->access_mode);
+ if (IS_ERR(srv_dev)) {
+ pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n",
+ full_path, srv_sess->sessname, PTR_ERR(srv_dev));
+ ret = PTR_ERR(srv_dev);
+ goto rnbd_dev_close;
+ }
+
+ srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg,
+ rnbd_dev, open_flags,
+ srv_dev);
+ if (IS_ERR(srv_sess_dev)) {
+ pr_err("Opening device '%s' on session %s failed, creating sess_dev failed, err: %ld\n",
+ full_path, srv_sess->sessname, PTR_ERR(srv_sess_dev));
+ ret = PTR_ERR(srv_sess_dev);
+ goto srv_dev_put;
+ }
+
+ /* Create the srv_dev sysfs files if they haven't been created yet. The
+ * reason to delay the creation is not to create the sysfs files before
+ * we are sure the device can be opened.
+ */
+ mutex_lock(&srv_dev->lock);
+ if (!srv_dev->dev_kobj.state_in_sysfs) {
+ ret = rnbd_srv_create_dev_sysfs(srv_dev, rnbd_dev->bdev,
+ rnbd_dev->name);
+ if (ret) {
+ mutex_unlock(&srv_dev->lock);
+ rnbd_srv_err(srv_sess_dev,
+ "Opening device failed, failed to create device sysfs files, err: %d\n",
+ ret);
+ goto free_srv_sess_dev;
+ }
+ }
+
+ ret = rnbd_srv_create_dev_session_sysfs(srv_sess_dev);
+ if (ret) {
+ mutex_unlock(&srv_dev->lock);
+ rnbd_srv_err(srv_sess_dev,
+ "Opening device failed, failed to create dev client sysfs files, err: %d\n",
+ ret);
+ goto free_srv_sess_dev;
+ }
+
+ list_add(&srv_sess_dev->dev_list, &srv_dev->sess_dev_list);
+ mutex_unlock(&srv_dev->lock);
+
+ list_add(&srv_sess_dev->sess_list, &srv_sess->sess_dev_list);
+
+ rnbd_srv_info(srv_sess_dev, "Opened device '%s'\n", srv_dev->id);
+
+ kfree(full_path);
+
+fill_response:
+ rnbd_srv_fill_msg_open_rsp(rsp, srv_sess_dev);
+ mutex_unlock(&srv_sess->lock);
+ return 0;
+
+free_srv_sess_dev:
+ xa_erase(&srv_sess->index_idr, srv_sess_dev->device_id);
+ synchronize_rcu();
+ kfree(srv_sess_dev);
+srv_dev_put:
+ if (open_msg->access_mode != RNBD_ACCESS_RO) {
+ mutex_lock(&srv_dev->lock);
+ srv_dev->open_write_cnt--;
+ mutex_unlock(&srv_dev->lock);
+ }
+ rnbd_put_srv_dev(srv_dev);
+rnbd_dev_close:
+ rnbd_dev_close(rnbd_dev);
+free_path:
+ kfree(full_path);
+reject:
+ mutex_unlock(&srv_sess->lock);
+ return ret;
+}
+
+static struct rtrs_srv_ctx *rtrs_ctx;
+
+static struct rtrs_srv_ops rtrs_ops;
+static int __init rnbd_srv_init_module(void)
+{
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8);
+ BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56);
+ rtrs_ops = (struct rtrs_srv_ops) {
+ .rdma_ev = rnbd_srv_rdma_ev,
+ .link_ev = rnbd_srv_link_ev,
+ };
+ rtrs_ctx = rtrs_srv_open(&rtrs_ops, port_nr);
+ if (IS_ERR(rtrs_ctx)) {
+ err = PTR_ERR(rtrs_ctx);
+ pr_err("rtrs_srv_open(), err: %d\n", err);
+ return err;
+ }
+
+ err = rnbd_srv_create_sysfs_files();
+ if (err) {
+ pr_err("rnbd_srv_create_sysfs_files(), err: %d\n", err);
+ rtrs_srv_close(rtrs_ctx);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit rnbd_srv_cleanup_module(void)
+{
+ rtrs_srv_close(rtrs_ctx);
+ WARN_ON(!list_empty(&sess_list));
+ rnbd_srv_destroy_sysfs_files();
+}
+
+module_init(rnbd_srv_init_module);
+module_exit(rnbd_srv_cleanup_module);
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
new file mode 100644
index 000000000000..5a8544b5e74f
--- /dev/null
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Network Block Driver
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RNBD_SRV_H
+#define RNBD_SRV_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+
+#include <rtrs.h>
+#include "rnbd-proto.h"
+#include "rnbd-log.h"
+
+struct rnbd_srv_session {
+ /* Entry inside global sess_list */
+ struct list_head list;
+ struct rtrs_srv *rtrs;
+ char sessname[NAME_MAX];
+ int queue_depth;
+ struct bio_set sess_bio_set;
+
+ struct xarray index_idr;
+ /* List of struct rnbd_srv_sess_dev */
+ struct list_head sess_dev_list;
+ struct mutex lock;
+ u8 ver;
+};
+
+struct rnbd_srv_dev {
+ /* Entry inside global dev_list */
+ struct list_head list;
+ struct kobject dev_kobj;
+ struct kobject *dev_sessions_kobj;
+ struct kref kref;
+ char id[NAME_MAX];
+ /* List of rnbd_srv_sess_dev structs */
+ struct list_head sess_dev_list;
+ struct mutex lock;
+ int open_write_cnt;
+};
+
+/* Structure which binds N devices and N sessions */
+struct rnbd_srv_sess_dev {
+ /* Entry inside rnbd_srv_dev struct */
+ struct list_head dev_list;
+ /* Entry inside rnbd_srv_session struct */
+ struct list_head sess_list;
+ struct rnbd_dev *rnbd_dev;
+ struct rnbd_srv_session *sess;
+ struct rnbd_srv_dev *dev;
+ struct kobject kobj;
+ u32 device_id;
+ fmode_t open_flags;
+ struct kref kref;
+ struct completion *destroy_comp;
+ char pathname[NAME_MAX];
+ enum rnbd_access_mode access_mode;
+};
+
+/* rnbd-srv-sysfs.c */
+
+int rnbd_srv_create_dev_sysfs(struct rnbd_srv_dev *dev,
+ struct block_device *bdev,
+ const char *dir_name);
+void rnbd_srv_destroy_dev_sysfs(struct rnbd_srv_dev *dev);
+int rnbd_srv_create_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
+void rnbd_srv_destroy_dev_session_sysfs(struct rnbd_srv_sess_dev *sess_dev);
+int rnbd_srv_create_sysfs_files(void);
+void rnbd_srv_destroy_sysfs_files(void);
+void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev);
+
+#endif /* RNBD_SRV_H */
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index d84e8a878df2..1e2aa5ae2796 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -784,7 +784,7 @@ static const struct block_device_operations mm_fops = {
static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- int ret = -ENODEV;
+ int ret;
struct cardinfo *card = &cards[num_cards];
unsigned char mem_present;
unsigned char batt_status;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 600430685e28..0e734802ee7c 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -35,10 +35,10 @@
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/pgtable.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
-#include <asm/pgtable.h>
#include <linux/zorro.h>
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 5ee8e3fae551..33e3b76c4fa9 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -29,7 +29,6 @@ static const char * const backends[] = {
#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
"zstd",
#endif
- NULL
};
static void zcomp_strm_free(struct zcomp_strm *zstrm)
@@ -64,7 +63,7 @@ bool zcomp_available_algorithm(const char *comp)
{
int i;
- i = __sysfs_match_string(backends, -1, comp);
+ i = sysfs_match_string(backends, comp);
if (i >= 0)
return true;
@@ -83,9 +82,9 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
{
bool known_algorithm = false;
ssize_t sz = 0;
- int i = 0;
+ int i;
- for (; backends[i]; i++) {
+ for (i = 0; i < ARRAY_SIZE(backends); i++) {
if (!strcmp(comp, backends[i])) {
known_algorithm = true;
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 6d4e4497b59b..c8818e3b1079 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -20,6 +20,15 @@ config ARM_CCI400_PORT_CTRL
Low level power management driver for CCI400 cache coherent
interconnect for ARM platforms.
+config ARM_INTEGRATOR_LM
+ bool "ARM Integrator Logic Module bus"
+ depends on HAS_IOMEM
+ depends on ARCH_INTEGRATOR || COMPILE_TEST
+ default ARCH_INTEGRATOR
+ help
+ Say y here to enable support for the ARM Logic Module bus
+ found on the ARM Integrator AP (Application Platform)
+
config BRCMSTB_GISB_ARB
bool "Broadcom STB GISB bus arbiter"
depends on ARM || ARM64 || MIPS
@@ -29,6 +38,36 @@ config BRCMSTB_GISB_ARB
arbiter. This driver provides timeout and target abort error handling
and internal bus master decoding.
+config BT1_APB
+ bool "Baikal-T1 APB-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Baikal-T1 AXI-APB bridge is used to access the SoC subsystem CSRs.
+ IO requests are routed to this bus by means of the DW AMBA 3 AXI
+ Interconnect. In case of any APB protocol collisions, slave device
+ not responding on timeout an IRQ is raised with an erroneous address
+ reported to the APB terminator (APB Errors Handler Block). This
+ driver provides the interrupt handler to detect the erroneous
+ address, prints an error message about the address fault, updates an
+ errors counter. The counter and the APB-bus operations timeout can be
+ accessed via corresponding sysfs nodes.
+
+config BT1_AXI
+ bool "Baikal-T1 AXI-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ AXI3-bus is the main communication bus connecting all high-speed
+ peripheral IP-cores with RAM controller and with MIPS P5600 cores on
+ Baikal-T1 SoC. Traffic arbitration is done by means of DW AMBA 3 AXI
+ Interconnect (so called AXI Main Interconnect) routing IO requests
+ from one SoC block to another. This driver provides a way to detect
+ any bus protocol errors and device not responding situations by
+ means of an embedded on top of the interconnect errors handler
+ block (EHB). AXI Interconnect QoS arbitration tuning is currently
+ unsupported.
+
config MOXTET
tristate "CZ.NIC Turris Mox module configuration bus"
depends on SPI_MASTER && OF
@@ -183,7 +222,7 @@ config UNIPHIER_SYSTEM_BUS
needed to use on-board devices connected to UniPhier SoCs.
config VEXPRESS_CONFIG
- bool "Versatile Express configuration bus"
+ tristate "Versatile Express configuration bus"
default y if ARCH_VEXPRESS
depends on ARM || ARM64
depends on OF
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 05f32cd694a4..397e35392bff 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -5,7 +5,7 @@
# Interconnect bus drivers for ARM platforms
obj-$(CONFIG_ARM_CCI) += arm-cci.o
-
+obj-$(CONFIG_ARM_INTEGRATOR_LM) += arm-integrator-lm.o
obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o
obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
obj-$(CONFIG_MOXTET) += moxtet.o
@@ -13,6 +13,8 @@ obj-$(CONFIG_MOXTET) += moxtet.o
# DPAA2 fsl-mc bus
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
+obj-$(CONFIG_BT1_APB) += bt1-apb.o
+obj-$(CONFIG_BT1_AXI) += bt1-axi.o
obj-$(CONFIG_IMX_WEIM) += imx-weim.o
obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
new file mode 100644
index 000000000000..845b6c43fef8
--- /dev/null
+++ b/drivers/bus/arm-integrator-lm.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Integrator Logical Module bus driver
+ * Copyright (C) 2020 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+/* All information about the connected logic modules are in here */
+#define INTEGRATOR_SC_DEC_OFFSET 0x10
+
+/* Base address for the expansion modules */
+#define INTEGRATOR_AP_EXP_BASE 0xc0000000
+#define INTEGRATOR_AP_EXP_STRIDE 0x10000000
+
+static int integrator_lm_populate(int num, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ u32 base;
+ int ret;
+
+ base = INTEGRATOR_AP_EXP_BASE + (num * INTEGRATOR_AP_EXP_STRIDE);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ struct resource res;
+
+ ret = of_address_to_resource(child, 0, &res);
+ if (ret) {
+ dev_info(dev, "no valid address on child\n");
+ continue;
+ }
+
+ /* First populate the syscon then any devices */
+ if (res.start == base) {
+ dev_info(dev, "populate module @0x%08x from DT\n",
+ base);
+ ret = of_platform_default_populate(child, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate module\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_syscon_match[] = {
+ { .compatible = "arm,integrator-ap-syscon"},
+ { },
+};
+
+static int integrator_ap_lm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *syscon;
+ static struct regmap *map;
+ u32 val;
+ int ret;
+ int i;
+
+ /* Look up the system controller */
+ syscon = of_find_matching_node(NULL, integrator_ap_syscon_match);
+ if (!syscon) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return -ENODEV;
+ }
+ map = syscon_node_to_regmap(syscon);
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return PTR_ERR(map);
+ }
+
+ ret = regmap_read(map, INTEGRATOR_SC_DEC_OFFSET, &val);
+ if (ret) {
+ dev_err(dev, "could not read from Integrator/AP syscon\n");
+ return ret;
+ }
+
+ /* Loop over the connected modules */
+ for (i = 0; i < 4; i++) {
+ if (!(val & BIT(4 + i)))
+ continue;
+
+ dev_info(dev, "detected module in slot %d\n", i);
+ ret = integrator_lm_populate(i, dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_lm_match[] = {
+ { .compatible = "arm,integrator-ap-lm"},
+ { },
+};
+
+static struct platform_driver integrator_ap_lm_driver = {
+ .probe = integrator_ap_lm_probe,
+ .driver = {
+ .name = "integratorap-lm",
+ .of_match_table = integrator_ap_lm_match,
+ },
+};
+module_platform_driver(integrator_ap_lm_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Integrator AP Logical Module driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
new file mode 100644
index 000000000000..b25ff941e7c7
--- /dev/null
+++ b/drivers/bus/bt1-apb.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 APB-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/time64.h>
+#include <linux/clk.h>
+#include <linux/sysfs.h>
+
+#define APB_EHB_ISR 0x00
+#define APB_EHB_ISR_PENDING BIT(0)
+#define APB_EHB_ISR_MASK BIT(1)
+#define APB_EHB_ADDR 0x04
+#define APB_EHB_TIMEOUT 0x08
+
+#define APB_EHB_TIMEOUT_MIN 0x000003FFU
+#define APB_EHB_TIMEOUT_MAX 0xFFFFFFFFU
+
+/*
+ * struct bt1_apb - Baikal-T1 APB EHB private data
+ * @dev: Pointer to the device structure.
+ * @regs: APB EHB registers map.
+ * @res: No-device error injection memory region.
+ * @irq: Errors IRQ number.
+ * @rate: APB-bus reference clock rate.
+ * @pclk: APB-reference clock.
+ * @prst: APB domain reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_apb {
+ struct device *dev;
+
+ struct regmap *regs;
+ void __iomem *res;
+ int irq;
+
+ unsigned long rate;
+ struct clk *pclk;
+
+ struct reset_control *prst;
+
+ atomic_t count;
+};
+
+static const struct regmap_config bt1_apb_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = APB_EHB_TIMEOUT,
+ .fast_io = true
+};
+
+static inline unsigned long bt1_apb_n_to_timeout_us(struct bt1_apb *apb, u32 n)
+{
+ u64 timeout = (u64)n * USEC_PER_SEC;
+
+ do_div(timeout, apb->rate);
+
+ return timeout;
+
+}
+
+static inline unsigned long bt1_apb_timeout_to_n_us(struct bt1_apb *apb,
+ unsigned long timeout)
+{
+ u64 n = (u64)timeout * apb->rate;
+
+ do_div(n, USEC_PER_SEC);
+
+ return n;
+
+}
+
+static irqreturn_t bt1_apb_isr(int irq, void *data)
+{
+ struct bt1_apb *apb = data;
+ u32 addr = 0;
+
+ regmap_read(apb->regs, APB_EHB_ADDR, &addr);
+
+ dev_crit_ratelimited(apb->dev,
+ "APB-bus fault %d: Slave access timeout at 0x%08x\n",
+ atomic_inc_return(&apb->count),
+ addr);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_apb_clear_data(void *data)
+{
+ struct bt1_apb *apb = data;
+ struct platform_device *pdev = to_platform_device(apb->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_apb *bt1_apb_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = devm_kzalloc(dev, sizeof(*apb), GFP_KERNEL);
+ if (!apb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_apb_clear_data, apb);
+ if (ret) {
+ dev_err(dev, "Can't add APB EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ apb->dev = dev;
+ atomic_set(&apb->count, 0);
+ platform_set_drvdata(pdev, apb);
+
+ return apb;
+}
+
+static int bt1_apb_request_regs(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ void __iomem *regs;
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "ehb");
+ if (IS_ERR(regs)) {
+ dev_err(apb->dev, "Couldn't map APB EHB registers\n");
+ return PTR_ERR(regs);
+ }
+
+ apb->regs = devm_regmap_init_mmio(apb->dev, regs, &bt1_apb_regmap_cfg);
+ if (IS_ERR(apb->regs)) {
+ dev_err(apb->dev, "Couldn't create APB EHB regmap\n");
+ return PTR_ERR(apb->regs);
+ }
+
+ apb->res = devm_platform_ioremap_resource_byname(pdev, "nodev");
+ if (IS_ERR(apb->res))
+ dev_err(apb->dev, "Couldn't map reserved region\n");
+
+ return PTR_ERR_OR_ZERO(apb->res);
+}
+
+static int bt1_apb_request_rst(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
+ if (IS_ERR(apb->prst)) {
+ dev_warn(apb->dev, "Couldn't get reset control line\n");
+ return PTR_ERR(apb->prst);
+ }
+
+ ret = reset_control_deassert(apb->prst);
+ if (ret)
+ dev_err(apb->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_apb_disable_clk(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ clk_disable_unprepare(apb->pclk);
+}
+
+static int bt1_apb_request_clk(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->pclk = devm_clk_get(apb->dev, "pclk");
+ if (IS_ERR(apb->pclk)) {
+ dev_err(apb->dev, "Couldn't get APB clock descriptor\n");
+ return PTR_ERR(apb->pclk);
+ }
+
+ ret = clk_prepare_enable(apb->pclk);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't enable the APB clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
+ return ret;
+ }
+
+ apb->rate = clk_get_rate(apb->pclk);
+ if (!apb->rate) {
+ dev_err(apb->dev, "Invalid clock rate\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bt1_apb_clear_irq(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_MASK, 0);
+}
+
+static int bt1_apb_request_irq(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ int ret;
+
+ apb->irq = platform_get_irq(pdev, 0);
+ if (apb->irq < 0)
+ return apb->irq;
+
+ ret = devm_request_irq(apb->dev, apb->irq, bt1_apb_isr, IRQF_SHARED,
+ "bt1-apb", apb);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't request APB EHB IRQ\n");
+ return ret;
+ }
+
+ ret = devm_add_action(apb->dev, bt1_apb_clear_irq, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB IRQs clear action\n");
+ return ret;
+ }
+
+ /* Unmask IRQ and clear it' pending flag. */
+ regmap_update_bits(apb->regs, APB_EHB_ISR,
+ APB_EHB_ISR_PENDING | APB_EHB_ISR_MASK,
+ APB_EHB_ISR_MASK);
+
+ return 0;
+}
+
+static ssize_t count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ ret = regmap_read(apb->regs, APB_EHB_TIMEOUT, &n);
+ if (ret)
+ return ret;
+
+ timeout = bt1_apb_n_to_timeout_us(apb, n);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout);
+}
+
+static ssize_t timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ if (kstrtoul(buf, 0, &timeout) < 0)
+ return -EINVAL;
+
+ n = bt1_apb_timeout_to_n_us(apb, timeout);
+ n = clamp(n, APB_EHB_TIMEOUT_MIN, APB_EHB_TIMEOUT_MAX);
+
+ ret = regmap_write(apb->regs, APB_EHB_TIMEOUT, n);
+
+ return ret ?: count;
+}
+static DEVICE_ATTR_RW(timeout);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ /*
+ * Either dummy read from the unmapped address in the APB IO area
+ * or manually set the IRQ status.
+ */
+ if (sysfs_streq(data, "nodev"))
+ readl(apb->res);
+ else if (sysfs_streq(data, "irq"))
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING,
+ APB_EHB_ISR_PENDING);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_apb_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_apb_sysfs);
+
+static void bt1_apb_remove_sysfs(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ device_remove_groups(apb->dev, bt1_apb_sysfs_groups);
+}
+
+static int bt1_apb_init_sysfs(struct bt1_apb *apb)
+{
+ int ret;
+
+ ret = device_add_groups(apb->dev, bt1_apb_sysfs_groups);
+ if (ret) {
+ dev_err(apb->dev, "Failed to create EHB APB sysfs nodes\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_remove_sysfs, apb);
+ if (ret)
+ dev_err(apb->dev, "Can't add APB EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_apb_probe(struct platform_device *pdev)
+{
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = bt1_apb_create_data(pdev);
+ if (IS_ERR(apb))
+ return PTR_ERR(apb);
+
+ ret = bt1_apb_request_regs(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_rst(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_clk(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_irq(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_init_sysfs(apb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_apb_of_match[] = {
+ { .compatible = "baikal,bt1-apb" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_apb_of_match);
+
+static struct platform_driver bt1_apb_driver = {
+ .probe = bt1_apb_probe,
+ .driver = {
+ .name = "bt1-apb",
+ .of_match_table = bt1_apb_of_match
+ }
+};
+module_platform_driver(bt1_apb_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
new file mode 100644
index 000000000000..e7a6744acc7b
--- /dev/null
+++ b/drivers/bus/bt1-axi.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 AXI-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/sysfs.h>
+
+#define BT1_AXI_WERRL 0x110
+#define BT1_AXI_WERRH 0x114
+#define BT1_AXI_WERRH_TYPE BIT(23)
+#define BT1_AXI_WERRH_ADDR_FLD 24
+#define BT1_AXI_WERRH_ADDR_MASK GENMASK(31, BT1_AXI_WERRH_ADDR_FLD)
+
+/*
+ * struct bt1_axi - Baikal-T1 AXI-bus private data
+ * @dev: Pointer to the device structure.
+ * @qos_regs: AXI Interconnect QoS tuning registers.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @irq: Errors IRQ number.
+ * @aclk: AXI reference clock.
+ * @arst: AXI Interconnect reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_axi {
+ struct device *dev;
+
+ void __iomem *qos_regs;
+ struct regmap *sys_regs;
+ int irq;
+
+ struct clk *aclk;
+
+ struct reset_control *arst;
+
+ atomic_t count;
+};
+
+static irqreturn_t bt1_axi_isr(int irq, void *data)
+{
+ struct bt1_axi *axi = data;
+ u32 low = 0, high = 0;
+
+ regmap_read(axi->sys_regs, BT1_AXI_WERRL, &low);
+ regmap_read(axi->sys_regs, BT1_AXI_WERRH, &high);
+
+ dev_crit_ratelimited(axi->dev,
+ "AXI-bus fault %d: %s at 0x%x%08x\n",
+ atomic_inc_return(&axi->count),
+ high & BT1_AXI_WERRH_TYPE ? "no slave" : "slave protocol error",
+ high, low);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_axi_clear_data(void *data)
+{
+ struct bt1_axi *axi = data;
+ struct platform_device *pdev = to_platform_device(axi->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_axi *bt1_axi_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = devm_kzalloc(dev, sizeof(*axi), GFP_KERNEL);
+ if (!axi)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_axi_clear_data, axi);
+ if (ret) {
+ dev_err(dev, "Can't add AXI EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ axi->dev = dev;
+ atomic_set(&axi->count, 0);
+ platform_set_drvdata(pdev, axi);
+
+ return axi;
+}
+
+static int bt1_axi_request_regs(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ struct device *dev = axi->dev;
+
+ axi->sys_regs = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(axi->sys_regs)) {
+ dev_err(dev, "Couldn't find syscon registers\n");
+ return PTR_ERR(axi->sys_regs);
+ }
+
+ axi->qos_regs = devm_platform_ioremap_resource_byname(pdev, "qos");
+ if (IS_ERR(axi->qos_regs))
+ dev_err(dev, "Couldn't map AXI-bus QoS registers\n");
+
+ return PTR_ERR_OR_ZERO(axi->qos_regs);
+}
+
+static int bt1_axi_request_rst(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst");
+ if (IS_ERR(axi->arst)) {
+ dev_warn(axi->dev, "Couldn't get reset control line\n");
+ return PTR_ERR(axi->arst);
+ }
+
+ ret = reset_control_deassert(axi->arst);
+ if (ret)
+ dev_err(axi->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_axi_disable_clk(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ clk_disable_unprepare(axi->aclk);
+}
+
+static int bt1_axi_request_clk(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->aclk = devm_clk_get(axi->dev, "aclk");
+ if (IS_ERR(axi->aclk)) {
+ dev_err(axi->dev, "Couldn't get AXI Interconnect clock\n");
+ return PTR_ERR(axi->aclk);
+ }
+
+ ret = clk_prepare_enable(axi->aclk);
+ if (ret) {
+ dev_err(axi->dev, "Couldn't enable the AXI clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI clock disable action\n");
+
+ return ret;
+}
+
+static int bt1_axi_request_irq(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ int ret;
+
+ axi->irq = platform_get_irq(pdev, 0);
+ if (axi->irq < 0)
+ return axi->irq;
+
+ ret = devm_request_irq(axi->dev, axi->irq, bt1_axi_isr, IRQF_SHARED,
+ "bt1-axi", axi);
+ if (ret)
+ dev_err(axi->dev, "Couldn't request AXI EHB IRQ\n");
+
+ return ret;
+}
+
+static ssize_t count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&axi->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: bus unaligned\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ /*
+ * Performing unaligned read from the memory will cause the CM2 bus
+ * error while unaligned writing - the AXI bus write error handled
+ * by this driver.
+ */
+ if (sysfs_streq(data, "bus"))
+ readb(axi->qos_regs);
+ else if (sysfs_streq(data, "unaligned"))
+ writeb(0, axi->qos_regs);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_axi_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_axi_sysfs);
+
+static void bt1_axi_remove_sysfs(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ device_remove_groups(axi->dev, bt1_axi_sysfs_groups);
+}
+
+static int bt1_axi_init_sysfs(struct bt1_axi *axi)
+{
+ int ret;
+
+ ret = device_add_groups(axi->dev, bt1_axi_sysfs_groups);
+ if (ret) {
+ dev_err(axi->dev, "Failed to add sysfs files group\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_remove_sysfs, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_axi_probe(struct platform_device *pdev)
+{
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = bt1_axi_create_data(pdev);
+ if (IS_ERR(axi))
+ return PTR_ERR(axi);
+
+ ret = bt1_axi_request_regs(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_rst(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_clk(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_irq(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_init_sysfs(axi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_axi_of_match[] = {
+ { .compatible = "baikal,bt1-axi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_axi_of_match);
+
+static struct platform_driver bt1_axi_driver = {
+ .probe = bt1_axi_probe,
+ .driver = {
+ .name = "bt1-axi",
+ .of_match_table = bt1_axi_of_match
+ }
+};
+module_platform_driver(bt1_axi_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
index ebad5eb48e5a..0b38014d040e 100644
--- a/drivers/bus/mhi/core/boot.c
+++ b/drivers/bus/mhi/core/boot.c
@@ -43,10 +43,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
- sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
-
- if (unlikely(!sequence_id))
- sequence_id = 1;
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
@@ -121,7 +118,8 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
ee = mhi_get_exec_env(mhi_cntrl);
}
- dev_dbg(dev, "Waiting for image download completion, current EE: %s\n",
+ dev_dbg(dev,
+ "Waiting for RDDM image download via BHIe, current EE:%s\n",
TO_MHI_EXEC_STR(ee));
while (retry--) {
@@ -152,11 +150,14 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
{
void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 rx_status;
if (in_panic)
return __mhi_download_rddm_in_panic(mhi_cntrl);
+ dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
+
/* Wait for the image download to complete */
wait_event_timeout(mhi_cntrl->state_event,
mhi_read_reg_field(mhi_cntrl, base,
@@ -174,8 +175,10 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
u32 tx_status, sequence_id;
+ int ret;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
@@ -183,6 +186,9 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
return -EIO;
}
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting AMSS download via BHIe. Sequence ID:%u\n",
+ sequence_id);
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
@@ -191,26 +197,25 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
- sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
sequence_id);
read_unlock_bh(pm_lock);
/* Wait for the image download to complete */
- wait_event_timeout(mhi_cntrl->state_event,
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
- mhi_read_reg_field(mhi_cntrl, base,
- BHIE_TXVECSTATUS_OFFS,
- BHIE_TXVECSTATUS_STATUS_BMSK,
- BHIE_TXVECSTATUS_STATUS_SHFT,
- &tx_status) || tx_status,
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_TXVECSTATUS_OFFS,
+ BHIE_TXVECSTATUS_STATUS_BMSK,
+ BHIE_TXVECSTATUS_STATUS_SHFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
return -EIO;
- return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+ return (!ret) ? -ETIMEDOUT : 0;
}
static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
@@ -239,14 +244,15 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
goto invalid_pm_state;
}
- dev_dbg(dev, "Starting SBL download via BHI\n");
+ session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting SBL download via BHI. Session ID:%u\n",
+ session_id);
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
upper_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
lower_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
- session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
read_unlock_bh(pm_lock);
@@ -377,30 +383,18 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
}
}
-void mhi_fw_load_worker(struct work_struct *work)
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
- struct mhi_controller *mhi_cntrl;
const struct firmware *firmware = NULL;
struct image_info *image_info;
- struct device *dev;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
const char *fw_name;
void *buf;
dma_addr_t dma_addr;
size_t size;
int ret;
- mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
- dev = &mhi_cntrl->mhi_dev->dev;
-
- dev_dbg(dev, "Waiting for device to enter PBL from: %s\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee));
-
- ret = wait_event_timeout(mhi_cntrl->state_event,
- MHI_IN_PBL(mhi_cntrl->ee) ||
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
- if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev, "Device MHI is not in valid state\n");
return;
}
@@ -446,7 +440,12 @@ void mhi_fw_load_worker(struct work_struct *work)
release_firmware(firmware);
/* Error or in EDL mode, we're done */
- if (ret || mhi_cntrl->ee == MHI_EE_EDL)
+ if (ret) {
+ dev_err(dev, "MHI did not load SBL, ret:%d\n", ret);
+ return;
+ }
+
+ if (mhi_cntrl->ee == MHI_EE_EDL)
return;
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -474,8 +473,10 @@ fw_load_ee_pthru:
if (!mhi_cntrl->fbc_download)
return;
- if (ret)
+ if (ret) {
+ dev_err(dev, "MHI did not enter READY state\n");
goto error_read;
+ }
/* Wait for the SBL event */
ret = wait_event_timeout(mhi_cntrl->state_event,
@@ -493,6 +494,8 @@ fw_load_ee_pthru:
ret = mhi_fw_load_amss(mhi_cntrl,
/* Vector table is the last entry */
&image_info->mhi_buf[image_info->entries - 1]);
+ if (ret)
+ dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
release_firmware(firmware);
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index 1f8c82603179..e43a190a7a36 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -34,6 +34,8 @@ const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
[DEV_ST_TRANSITION_READY] = "READY",
[DEV_ST_TRANSITION_SBL] = "SBL",
[DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
+ [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
+ [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
};
const char * const mhi_state_str[MHI_STATE_MAX] = {
@@ -835,8 +837,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
spin_lock_init(&mhi_cntrl->transition_lock);
spin_lock_init(&mhi_cntrl->wlock);
INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
- INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
- INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
init_waitqueue_head(&mhi_cntrl->state_event);
mhi_cmd = mhi_cntrl->mhi_cmd;
@@ -864,6 +864,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mutex_init(&mhi_chan->mutex);
init_completion(&mhi_chan->completion);
rwlock_init(&mhi_chan->lock);
+
+ /* used in setting bei field of TRE */
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ mhi_chan->intmod = mhi_event->intmod;
}
if (mhi_cntrl->bounce_buf) {
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 095d95bc0e37..b1f640b75a94 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -386,6 +386,8 @@ enum dev_st_transition {
DEV_ST_TRANSITION_READY,
DEV_ST_TRANSITION_SBL,
DEV_ST_TRANSITION_MISSION_MODE,
+ DEV_ST_TRANSITION_SYS_ERR,
+ DEV_ST_TRANSITION_DISABLE,
DEV_ST_TRANSITION_MAX,
};
@@ -452,6 +454,7 @@ enum mhi_pm_state {
#define PRIMARY_CMD_RING 0
#define MHI_DEV_WAKE_DB 127
#define MHI_MAX_MTU 0xffff
+#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1)
enum mhi_er_type {
MHI_ER_TYPE_INVALID = 0x0,
@@ -586,7 +589,7 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
enum dev_st_transition state);
void mhi_pm_st_worker(struct work_struct *work);
-void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
void mhi_fw_load_worker(struct work_struct *work);
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
void mhi_ctrl_ev_task(unsigned long data);
@@ -627,6 +630,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
@@ -670,8 +674,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
- void *buf, void *cb, size_t buf_len, enum mhi_flags flags);
-
+ struct mhi_buf_info *info, enum mhi_flags flags);
int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info);
int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 97e06cc586e4..1f622ce6be8b 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -258,7 +258,7 @@ int mhi_destroy_device(struct device *dev, void *data)
return 0;
}
-static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
{
struct mhi_driver *mhi_drv;
@@ -270,6 +270,7 @@ static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
if (mhi_drv->status_cb)
mhi_drv->status_cb(mhi_dev, cb_reason);
}
+EXPORT_SYMBOL_GPL(mhi_notify);
/* Bind MHI channels to MHI devices */
void mhi_create_devices(struct mhi_controller *mhi_cntrl)
@@ -368,30 +369,37 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
return IRQ_HANDLED;
}
-irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
{
- struct mhi_controller *mhi_cntrl = dev;
+ struct mhi_controller *mhi_cntrl = priv;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state state = MHI_STATE_MAX;
enum mhi_pm_state pm_state = 0;
enum mhi_ee_type ee = 0;
write_lock_irq(&mhi_cntrl->pm_lock);
- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
- state = mhi_get_mhi_state(mhi_cntrl);
- ee = mhi_cntrl->ee;
- mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ goto exit_intvec;
}
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_cntrl->ee;
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
+ TO_MHI_STATE_STR(state));
+
if (state == MHI_STATE_SYS_ERR) {
- dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n");
+ dev_dbg(dev, "System error detected\n");
pm_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
}
write_unlock_irq(&mhi_cntrl->pm_lock);
- /* If device in RDDM don't bother processing SYS error */
- if (mhi_cntrl->ee == MHI_EE_RDDM) {
- if (mhi_cntrl->ee != ee) {
+ /* If device supports RDDM don't bother processing SYS error */
+ if (mhi_cntrl->rddm_image) {
+ if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
wake_up_all(&mhi_cntrl->state_event);
}
@@ -405,7 +413,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
if (MHI_IN_PBL(ee))
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
else
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
}
exit_intvec:
@@ -513,7 +521,10 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
result.buf_addr = buf_info->cb_buf;
- result.bytes_xferd = xfer_len;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd =
+ min_t(u16, xfer_len, buf_info->len);
mhi_del_ring_element(mhi_cntrl, buf_ring);
mhi_del_ring_element(mhi_cntrl, tre_ring);
local_rp = tre_ring->rp;
@@ -597,7 +608,9 @@ static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
-EOVERFLOW : 0;
- result.bytes_xferd = xfer_len;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
result.buf_addr = buf_info->cb_buf;
result.dir = mhi_chan->dir;
@@ -722,13 +735,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
{
enum mhi_pm_state new_state;
+ /* skip SYS_ERROR handling if RDDM supported */
+ if (mhi_cntrl->ee == MHI_EE_RDDM ||
+ mhi_cntrl->rddm_image)
+ break;
+
dev_dbg(dev, "System error detected\n");
write_lock_irq(&mhi_cntrl->pm_lock);
new_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (new_state == MHI_PM_SYS_ERR_DETECT)
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
break;
}
default:
@@ -774,9 +792,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
}
case MHI_PKT_TYPE_TX_EVENT:
chan = MHI_TRE_GET_EV_CHID(local_rp);
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
break;
default:
dev_err(dev, "Unhandled event type: %d\n", type);
@@ -819,14 +846,23 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
chan = MHI_TRE_GET_EV_CHID(local_rp);
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
-
- if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
- } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
- parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+ if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
+ parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
}
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
@@ -896,7 +932,7 @@ void mhi_ctrl_ev_task(unsigned long data)
}
write_unlock_irq(&mhi_cntrl->pm_lock);
if (pm_state == MHI_PM_SYS_ERR_DETECT)
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
}
}
@@ -918,9 +954,7 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
- struct mhi_buf_info *buf_info;
- struct mhi_tre *mhi_tre;
+ struct mhi_buf_info buf_info = { };
int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */
@@ -945,27 +979,15 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
- /* Generate the TRE */
- buf_info = buf_ring->wp;
+ buf_info.v_addr = skb->data;
+ buf_info.cb_buf = skb;
+ buf_info.len = len;
- buf_info->v_addr = skb->data;
- buf_info->cb_buf = skb;
- buf_info->wp = tre_ring->wp;
- buf_info->dir = mhi_chan->dir;
- buf_info->len = len;
- ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret)
- goto map_error;
-
- mhi_tre = tre_ring->wp;
-
- mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
- mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
-
- /* increment WP */
- mhi_add_ring_element(mhi_cntrl, tre_ring);
- mhi_add_ring_element(mhi_cntrl, buf_ring);
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
+ if (unlikely(ret)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return ret;
+ }
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
@@ -979,11 +1001,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
read_unlock_bh(&mhi_cntrl->pm_lock);
return 0;
-
-map_error:
- read_unlock_bh(&mhi_cntrl->pm_lock);
-
- return ret;
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
@@ -995,9 +1012,8 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
mhi_dev->dl_chan;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
- struct mhi_buf_info *buf_info;
- struct mhi_tre *mhi_tre;
+ struct mhi_buf_info buf_info = { };
+ int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */
if (mhi_chan->pre_alloc)
@@ -1024,25 +1040,16 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
- /* Generate the TRE */
- buf_info = buf_ring->wp;
- WARN_ON(buf_info->used);
- buf_info->p_addr = mhi_buf->dma_addr;
- buf_info->pre_mapped = true;
- buf_info->cb_buf = mhi_buf;
- buf_info->wp = tre_ring->wp;
- buf_info->dir = mhi_chan->dir;
- buf_info->len = len;
-
- mhi_tre = tre_ring->wp;
-
- mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
- mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+ buf_info.p_addr = mhi_buf->dma_addr;
+ buf_info.cb_buf = mhi_buf;
+ buf_info.pre_mapped = true;
+ buf_info.len = len;
- /* increment WP */
- mhi_add_ring_element(mhi_cntrl, tre_ring);
- mhi_add_ring_element(mhi_cntrl, buf_ring);
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
+ if (unlikely(ret)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return ret;
+ }
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
@@ -1060,7 +1067,7 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
EXPORT_SYMBOL_GPL(mhi_queue_dma);
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
- void *buf, void *cb, size_t buf_len, enum mhi_flags flags)
+ struct mhi_buf_info *info, enum mhi_flags flags)
{
struct mhi_ring *buf_ring, *tre_ring;
struct mhi_tre *mhi_tre;
@@ -1072,15 +1079,22 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
tre_ring = &mhi_chan->tre_ring;
buf_info = buf_ring->wp;
- buf_info->v_addr = buf;
- buf_info->cb_buf = cb;
+ WARN_ON(buf_info->used);
+ buf_info->pre_mapped = info->pre_mapped;
+ if (info->pre_mapped)
+ buf_info->p_addr = info->p_addr;
+ else
+ buf_info->v_addr = info->v_addr;
+ buf_info->cb_buf = info->cb_buf;
buf_info->wp = tre_ring->wp;
buf_info->dir = mhi_chan->dir;
- buf_info->len = buf_len;
+ buf_info->len = info->len;
- ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret)
- return ret;
+ if (!info->pre_mapped) {
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ return ret;
+ }
eob = !!(flags & MHI_EOB);
eot = !!(flags & MHI_EOT);
@@ -1089,7 +1103,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_tre = tre_ring->wp;
mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
/* increment WP */
@@ -1106,6 +1120,7 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring;
+ struct mhi_buf_info buf_info = { };
unsigned long flags;
int ret;
@@ -1121,7 +1136,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags);
+ buf_info.v_addr = buf;
+ buf_info.cb_buf = buf;
+ buf_info.len = len;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
if (unlikely(ret))
return ret;
@@ -1322,7 +1341,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
while (nr_el--) {
void *buf;
-
+ struct mhi_buf_info info = { };
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
@@ -1330,8 +1349,10 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
}
/* Prepare transfer descriptors */
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf,
- len, MHI_EOT);
+ info.v_addr = buf;
+ info.cb_buf = buf;
+ info.len = len;
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
if (ret) {
kfree(buf);
goto error_pre_alloc;
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index dc83d65f7784..796098078083 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -288,14 +288,18 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- write_lock_irq(&mhi_chan->lock);
- if (mhi_chan->db_cfg.reset_req)
+ if (mhi_chan->db_cfg.reset_req) {
+ write_lock_irq(&mhi_chan->lock);
mhi_chan->db_cfg.db_mode = true;
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ read_lock_irq(&mhi_chan->lock);
/* Only ring DB if ring is not empty */
if (tre_ring->base && tre_ring->wp != tre_ring->rp)
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- write_unlock_irq(&mhi_chan->lock);
+ read_unlock_irq(&mhi_chan->lock);
}
mhi_cntrl->wake_put(mhi_cntrl, false);
@@ -449,19 +453,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
to_mhi_pm_state_str(transition_state));
/* We must notify MHI control driver so it can clean up first */
- if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
- /*
- * If controller supports RDDM, we do not process
- * SYS error state, instead we will jump directly
- * to RDDM state
- */
- if (mhi_cntrl->rddm_image) {
- dev_dbg(dev,
- "Controller supports RDDM, so skip SYS_ERR\n");
- return;
- }
+ if (transition_state == MHI_PM_SYS_ERR_PROCESS)
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
- }
mutex_lock(&mhi_cntrl->pm_mutex);
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -527,8 +520,6 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
mutex_unlock(&mhi_cntrl->pm_mutex);
dev_dbg(dev, "Waiting for all pending threads to complete\n");
wake_up_all(&mhi_cntrl->state_event);
- flush_work(&mhi_cntrl->st_worker);
- flush_work(&mhi_cntrl->fw_worker);
dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
@@ -608,13 +599,17 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
}
/* SYS_ERR worker */
-void mhi_pm_sys_err_worker(struct work_struct *work)
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
{
- struct mhi_controller *mhi_cntrl = container_of(work,
- struct mhi_controller,
- syserr_worker);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ /* skip if controller supports RDDM */
+ if (mhi_cntrl->rddm_image) {
+ dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
+ return;
+ }
- mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
}
/* Device State Transition worker */
@@ -643,7 +638,7 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (MHI_IN_PBL(mhi_cntrl->ee))
- wake_up_all(&mhi_cntrl->state_event);
+ mhi_fw_load_handler(mhi_cntrl);
break;
case DEV_ST_TRANSITION_SBL:
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -662,6 +657,14 @@ void mhi_pm_st_worker(struct work_struct *work)
case DEV_ST_TRANSITION_READY:
mhi_ready_state_transition(mhi_cntrl);
break;
+ case DEV_ST_TRANSITION_SYS_ERR:
+ mhi_pm_disable_transition
+ (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+ break;
+ case DEV_ST_TRANSITION_DISABLE:
+ mhi_pm_disable_transition
+ (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+ break;
default:
break;
}
@@ -669,6 +672,149 @@ void mhi_pm_st_worker(struct work_struct *work)
}
}
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
+ int ret;
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return -EINVAL;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Return busy if there are any pending resources */
+ if (atomic_read(&mhi_cntrl->dev_wake))
+ return -EBUSY;
+
+ /* Take MHI out of M2 state */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M1 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Could not enter M0/M1 state");
+ return -EIO;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+
+ if (atomic_read(&mhi_cntrl->dev_wake)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ return -EBUSY;
+ }
+
+ dev_info(dev, "Allowing M3 transition\n");
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+ if (new_state != MHI_PM_M3_ENTER) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_err(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M3 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev, "Wait for M3 completion\n");
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M3 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M3 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Notify clients about entering LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+ mutex_unlock(&itr->mutex);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_suspend);
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state cur_state;
+ int ret;
+
+ dev_info(dev, "Entered with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return 0;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Notify clients about exiting LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+ mutex_unlock(&itr->mutex);
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
+ if (cur_state != MHI_PM_M3_EXIT) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_EXIT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M0 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M0 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume);
+
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
{
int ret;
@@ -760,6 +906,7 @@ static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
{
+ enum mhi_state state;
enum mhi_ee_type current_ee;
enum dev_st_transition next_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -829,13 +976,36 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
goto error_bhi_offset;
}
+ state = mhi_get_mhi_state(mhi_cntrl);
+ if (state == MHI_STATE_SYS_ERR) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl,
+ mhi_cntrl->regs,
+ MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT,
+ &val) ||
+ !val,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (ret) {
+ ret = -EIO;
+ dev_info(dev, "Failed to reset MHI due to syserr state\n");
+ goto error_bhi_offset;
+ }
+
+ /*
+ * device cleares INTVEC as part of RESET processing,
+ * re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
/* Transition to next state */
next_state = MHI_IN_PBL(current_ee) ?
DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
- if (next_state == DEV_ST_TRANSITION_PBL)
- schedule_work(&mhi_cntrl->fw_worker);
-
mhi_queue_state_transition(mhi_cntrl, next_state);
mutex_unlock(&mhi_cntrl->pm_mutex);
@@ -876,7 +1046,12 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
}
- mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
+
+ /* Wait for shutdown to complete */
+ flush_work(&mhi_cntrl->st_worker);
+
mhi_deinit_free_irq(mhi_cntrl);
if (!mhi_cntrl->pre_init) {
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e5f5f48d69d2..3affd180baac 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1275,13 +1275,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
- 0),
- /* Some timers on omap4 and later */
- SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff,
- 0),
- SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff,
- 0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
@@ -1404,6 +1397,13 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
+ /* Some timers on omap4 and later */
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
@@ -2744,6 +2744,17 @@ static int sysc_init_soc(struct sysc *ddata)
if (match && match->data)
sysc_soc->soc = (int)match->data;
+ /* Ignore devices that are not available on HS and EMU SoCs */
+ if (!sysc_soc->general_purpose) {
+ switch (sysc_soc->soc) {
+ case SOC_3430 ... SOC_3630:
+ sysc_add_disabled(0x48304000); /* timer12 */
+ break;
+ default:
+ break;
+ };
+ }
+
match = soc_device_match(sysc_soc_feat_match);
if (!match)
return 0;
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index ff70575b2db6..a58ac0c8e282 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -6,10 +6,61 @@
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
#include <linux/vexpress.h>
+#define SYS_MISC 0x0
+#define SYS_MISC_MASTERSITE (1 << 14)
+
+#define SYS_PROCID0 0x24
+#define SYS_PROCID1 0x28
+#define SYS_HBI_MASK 0xfff
+#define SYS_PROCIDx_HBI_SHIFT 0
+
+#define SYS_CFGDATA 0x40
+
+#define SYS_CFGCTRL 0x44
+#define SYS_CFGCTRL_START (1 << 31)
+#define SYS_CFGCTRL_WRITE (1 << 30)
+#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
+#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
+#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
+#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
+#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
+
+#define SYS_CFGSTAT 0x48
+#define SYS_CFGSTAT_ERR (1 << 1)
+#define SYS_CFGSTAT_COMPLETE (1 << 0)
+
+#define VEXPRESS_SITE_MB 0
+#define VEXPRESS_SITE_DB1 1
+#define VEXPRESS_SITE_DB2 2
+#define VEXPRESS_SITE_MASTER 0xf
+
+struct vexpress_syscfg {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head funcs;
+};
+
+struct vexpress_syscfg_func {
+ struct list_head list;
+ struct vexpress_syscfg *syscfg;
+ struct regmap *regmap;
+ int num_templates;
+ u32 template[]; /* Keep it last! */
+};
+
+struct vexpress_config_bridge_ops {
+ struct regmap * (*regmap_init)(struct device *dev, void *context);
+ void (*regmap_exit)(struct regmap *regmap, void *context);
+};
struct vexpress_config_bridge {
struct vexpress_config_bridge_ops *ops;
@@ -18,26 +69,20 @@ struct vexpress_config_bridge {
static DEFINE_MUTEX(vexpress_config_mutex);
-static struct class *vexpress_config_class;
static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER;
-void vexpress_config_set_master(u32 site)
+static void vexpress_config_set_master(u32 site)
{
vexpress_config_site_master = site;
}
-u32 vexpress_config_get_master(void)
-{
- return vexpress_config_site_master;
-}
-
-void vexpress_config_lock(void *arg)
+static void vexpress_config_lock(void *arg)
{
mutex_lock(&vexpress_config_mutex);
}
-void vexpress_config_unlock(void *arg)
+static void vexpress_config_unlock(void *arg)
{
mutex_unlock(&vexpress_config_mutex);
}
@@ -59,7 +104,7 @@ static void vexpress_config_find_prop(struct device_node *node,
}
}
-int vexpress_config_get_topo(struct device_node *node, u32 *site,
+static int vexpress_config_get_topo(struct device_node *node, u32 *site,
u32 *position, u32 *dcc)
{
vexpress_config_find_prop(node, "arm,vexpress,site", site);
@@ -88,9 +133,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
struct regmap *regmap;
struct regmap **res;
- if (WARN_ON(dev->parent->class != vexpress_config_class))
- return ERR_PTR(-ENODEV);
-
bridge = dev_get_drvdata(dev->parent);
if (WARN_ON(!bridge))
return ERR_PTR(-EINVAL);
@@ -113,91 +155,265 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config);
-struct device *vexpress_config_bridge_register(struct device *parent,
- struct vexpress_config_bridge_ops *ops, void *context)
+static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
+ int index, bool write, u32 *data)
{
- struct device *dev;
- struct vexpress_config_bridge *bridge;
+ struct vexpress_syscfg *syscfg = func->syscfg;
+ u32 command, status;
+ int tries;
+ long timeout;
- if (!vexpress_config_class) {
- vexpress_config_class = class_create(THIS_MODULE,
- "vexpress-config");
- if (IS_ERR(vexpress_config_class))
- return (void *)vexpress_config_class;
+ if (WARN_ON(index >= func->num_templates))
+ return -EINVAL;
+
+ command = readl(syscfg->base + SYS_CFGCTRL);
+ if (WARN_ON(command & SYS_CFGCTRL_START))
+ return -EBUSY;
+
+ command = func->template[index];
+ command |= SYS_CFGCTRL_START;
+ command |= write ? SYS_CFGCTRL_WRITE : 0;
+
+ /* Use a canary for reads */
+ if (!write)
+ *data = 0xdeadbeef;
+
+ dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
+ func, command, *data);
+ writel(*data, syscfg->base + SYS_CFGDATA);
+ writel(0, syscfg->base + SYS_CFGSTAT);
+ writel(command, syscfg->base + SYS_CFGCTRL);
+ mb();
+
+ /* The operation can take ages... Go to sleep, 100us initially */
+ tries = 100;
+ timeout = 100;
+ do {
+ if (!irqs_disabled()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(timeout));
+ if (signal_pending(current))
+ return -EINTR;
+ } else {
+ udelay(timeout);
+ }
+
+ status = readl(syscfg->base + SYS_CFGSTAT);
+ if (status & SYS_CFGSTAT_ERR)
+ return -EFAULT;
+
+ if (timeout > 20)
+ timeout -= 20;
+ } while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
+ if (WARN_ON_ONCE(!tries))
+ return -ETIMEDOUT;
+
+ if (!write) {
+ *data = readl(syscfg->base + SYS_CFGDATA);
+ dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
}
- dev = device_create(vexpress_config_class, parent, 0,
- NULL, "%s.bridge", dev_name(parent));
+ return 0;
+}
+
+static int vexpress_syscfg_read(void *context, unsigned int index,
+ unsigned int *val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, false, val);
+}
+
+static int vexpress_syscfg_write(void *context, unsigned int index,
+ unsigned int val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, true, &val);
+}
+
+static struct regmap_config vexpress_syscfg_regmap_config = {
+ .lock = vexpress_config_lock,
+ .unlock = vexpress_config_unlock,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = vexpress_syscfg_read,
+ .reg_write = vexpress_syscfg_write,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
- if (IS_ERR(dev))
- return dev;
+static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
+ void *context)
+{
+ int err;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func;
+ struct property *prop;
+ const __be32 *val = NULL;
+ __be32 energy_quirk[4];
+ int num;
+ u32 site, position, dcc;
+ int i;
+
+ err = vexpress_config_get_topo(dev->of_node, &site,
+ &position, &dcc);
+ if (err)
+ return ERR_PTR(err);
+
+ prop = of_find_property(dev->of_node,
+ "arm,vexpress-sysreg,func", NULL);
+ if (!prop)
+ return ERR_PTR(-EINVAL);
- bridge = devm_kmalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- put_device(dev);
- device_unregister(dev);
+ num = prop->length / sizeof(u32) / 2;
+ val = prop->value;
+
+ /*
+ * "arm,vexpress-energy" function used to be described
+ * by its first device only, now it requires both
+ */
+ if (num == 1 && of_device_is_compatible(dev->of_node,
+ "arm,vexpress-energy")) {
+ num = 2;
+ energy_quirk[0] = *val;
+ energy_quirk[2] = *val++;
+ energy_quirk[1] = *val;
+ energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
+ val = energy_quirk;
+ }
+
+ func = kzalloc(struct_size(func, template, num), GFP_KERNEL);
+ if (!func)
return ERR_PTR(-ENOMEM);
+
+ func->syscfg = syscfg;
+ func->num_templates = num;
+
+ for (i = 0; i < num; i++) {
+ u32 function, device;
+
+ function = be32_to_cpup(val++);
+ device = be32_to_cpup(val++);
+
+ dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
+ func, site, position, dcc,
+ function, device);
+
+ func->template[i] = SYS_CFGCTRL_DCC(dcc);
+ func->template[i] |= SYS_CFGCTRL_SITE(site);
+ func->template[i] |= SYS_CFGCTRL_POSITION(position);
+ func->template[i] |= SYS_CFGCTRL_FUNC(function);
+ func->template[i] |= SYS_CFGCTRL_DEVICE(device);
}
- bridge->ops = ops;
- bridge->context = context;
- dev_set_drvdata(dev, bridge);
+ vexpress_syscfg_regmap_config.max_register = num - 1;
- dev_dbg(parent, "Registered bridge '%s', parent node %p\n",
- dev_name(dev), parent->of_node);
+ func->regmap = regmap_init(dev, NULL, func,
+ &vexpress_syscfg_regmap_config);
- return dev;
-}
+ if (IS_ERR(func->regmap)) {
+ void *err = func->regmap;
+ kfree(func);
+ return err;
+ }
+
+ list_add(&func->list, &syscfg->funcs);
-static int vexpress_config_node_match(struct device *dev, const void *data)
+ return func->regmap;
+}
+
+static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
{
- const struct device_node *node = data;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func, *tmp;
- dev_dbg(dev, "Parent node %p, looking for %p\n",
- dev->parent->of_node, node);
+ regmap_exit(regmap);
- return dev->parent->of_node == node;
+ list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
+ if (func->regmap == regmap) {
+ list_del(&syscfg->funcs);
+ kfree(func);
+ break;
+ }
+ }
}
-static int vexpress_config_populate(struct device_node *node)
+static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
+ .regmap_init = vexpress_syscfg_regmap_init,
+ .regmap_exit = vexpress_syscfg_regmap_exit,
+};
+
+
+static int vexpress_syscfg_probe(struct platform_device *pdev)
{
- struct device_node *bridge;
- struct device *parent;
- int ret;
+ struct vexpress_syscfg *syscfg;
+ struct resource *res;
+ struct vexpress_config_bridge *bridge;
+ struct device_node *node;
+ int master;
+ u32 dt_hbi;
+
+ syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
+ if (!syscfg)
+ return -ENOMEM;
+ syscfg->dev = &pdev->dev;
+ INIT_LIST_HEAD(&syscfg->funcs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ syscfg->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(syscfg->base))
+ return PTR_ERR(syscfg->base);
- bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ bridge = devm_kmalloc(&pdev->dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
- return -EINVAL;
+ return -ENOMEM;
- parent = class_find_device(vexpress_config_class, NULL, bridge,
- vexpress_config_node_match);
- of_node_put(bridge);
- if (WARN_ON(!parent))
- return -ENODEV;
+ bridge->ops = &vexpress_syscfg_bridge_ops;
+ bridge->context = syscfg;
- ret = of_platform_populate(node, NULL, NULL, parent);
+ dev_set_drvdata(&pdev->dev, bridge);
- put_device(parent);
+ master = readl(syscfg->base + SYS_MISC) & SYS_MISC_MASTERSITE ?
+ VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1;
+ vexpress_config_set_master(master);
- return ret;
-}
+ /* Confirm board type against DT property, if available */
+ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
+ u32 id = readl(syscfg->base + (master == VEXPRESS_SITE_DB1 ?
+ SYS_PROCID0 : SYS_PROCID1));
+ u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
-static int __init vexpress_config_init(void)
-{
- int err = 0;
- struct device_node *node;
+ if (WARN_ON(dt_hbi != hbi))
+ dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n",
+ dt_hbi, hbi);
+ }
- /* Need the config devices early, before the "normal" devices... */
for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
- err = vexpress_config_populate(node);
- if (err) {
- of_node_put(node);
- break;
- }
+ struct device_node *bridge_np;
+
+ bridge_np = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ if (bridge_np != pdev->dev.parent->of_node)
+ continue;
+
+ of_platform_populate(node, NULL, NULL, &pdev->dev);
}
- return err;
+ return 0;
}
-postcore_initcall(vexpress_config_init);
+static const struct platform_device_id vexpress_syscfg_id_table[] = {
+ { "vexpress-syscfg", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, vexpress_syscfg_id_table);
+
+static struct platform_driver vexpress_syscfg_driver = {
+ .driver.name = "vexpress-syscfg",
+ .id_table = vexpress_syscfg_id_table,
+ .probe = vexpress_syscfg_probe,
+};
+module_platform_driver(vexpress_syscfg_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index a0a7ae705de8..d82b3b7658bd 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3535,7 +3535,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
}
static int cdrom_sysctl_info(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int pos;
char *info = cdrom_sysctl_settings.info;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d4665fe9ccd2..ac25833eb19e 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -209,7 +209,7 @@ config DTLK
config XILINX_HWICAP
tristate "Xilinx HWICAP Support"
- depends on XILINX_VIRTEX || MICROBLAZE
+ depends on MICROBLAZE
help
This option enables support for Xilinx Internal Configuration
Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 47098648502d..00ff5fcb808a 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -39,7 +39,6 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include "agp.h"
struct agp_front_data agp_fe;
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 9e84239f88d4..3ffbb1c80c5c 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -42,7 +42,6 @@
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
-#include <asm/pgtable.h>
#include "agp.h"
__u32 *agp_gatt_table;
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index e5e5333f302d..cce2af5df7b4 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -17,7 +17,6 @@
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
/*
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index d36aeacb290e..a395e2e70dc5 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -399,15 +399,15 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
struct device *dev = &pdev->dev;
int rc;
- bt_bmc->irq = platform_get_irq(pdev, 0);
- if (!bt_bmc->irq)
- return -ENODEV;
+ bt_bmc->irq = platform_get_irq_optional(pdev, 0);
+ if (bt_bmc->irq < 0)
+ return bt_bmc->irq;
rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
DEVICE_NAME, bt_bmc);
if (rc < 0) {
dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
- bt_bmc->irq = 0;
+ bt_bmc->irq = rc;
return rc;
}
@@ -430,9 +430,6 @@ static int bt_bmc_probe(struct platform_device *pdev)
struct device *dev;
int rc;
- if (!pdev || !pdev->dev.of_node)
- return -ENODEV;
-
dev = &pdev->dev;
dev_info(dev, "Found bt bmc device\n");
@@ -466,9 +463,9 @@ static int bt_bmc_probe(struct platform_device *pdev)
init_waitqueue_head(&bt_bmc->queue);
bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR,
- bt_bmc->miscdev.name = DEVICE_NAME,
- bt_bmc->miscdev.fops = &bt_bmc_fops,
- bt_bmc->miscdev.parent = dev;
+ bt_bmc->miscdev.name = DEVICE_NAME,
+ bt_bmc->miscdev.fops = &bt_bmc_fops,
+ bt_bmc->miscdev.parent = dev;
rc = misc_register(&bt_bmc->miscdev);
if (rc) {
dev_err(dev, "Unable to register misc device\n");
@@ -477,7 +474,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
bt_bmc_config_irq(bt_bmc, pdev);
- if (bt_bmc->irq) {
+ if (bt_bmc->irq >= 0) {
dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
} else {
dev_info(dev, "No IRQ; using timer\n");
@@ -503,7 +500,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
misc_deregister(&bt_bmc->miscdev);
- if (!bt_bmc->irq)
+ if (bt_bmc->irq < 0)
del_timer_sync(&bt_bmc->poll_timer);
return 0;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c48d8f086382..e1b22fe0916c 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -33,6 +33,7 @@
#include <linux/workqueue.h>
#include <linux/uuid.h>
#include <linux/nospec.h>
+#include <linux/vmalloc.h>
#define IPMI_DRIVER_VERSION "39.2"
@@ -1153,7 +1154,7 @@ static void free_user_work(struct work_struct *work)
remove_work);
cleanup_srcu_struct(&user->release_barrier);
- kfree(user);
+ vfree(user);
}
int ipmi_create_user(unsigned int if_num,
@@ -1185,7 +1186,7 @@ int ipmi_create_user(unsigned int if_num,
if (rv)
return rv;
- new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
+ new_user = vzalloc(sizeof(*new_user));
if (!new_user)
return -ENOMEM;
@@ -1232,7 +1233,7 @@ int ipmi_create_user(unsigned int if_num,
out_kfree:
srcu_read_unlock(&ipmi_interfaces_srcu, index);
- kfree(new_user);
+ vfree(new_user);
return rv;
}
EXPORT_SYMBOL(ipmi_create_user);
@@ -3171,7 +3172,7 @@ static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
goto out;
}
- guid_copy(&bmc->fetch_guid, (guid_t *)(msg->msg.data + 1));
+ import_guid(&bmc->fetch_guid, msg->msg.data + 1);
/*
* Make sure the guid data is available before setting
* dyn_guid_set.
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 638c693e17ad..129b5713f187 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -393,6 +393,8 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
dev_info(io.dev, "%pR regsize %d spacing %d irq %d\n",
res, io.regsize, io.regspacing, io.irq);
+ request_module("acpi_ipmi");
+
return ipmi_si_add_smi(&io);
err_free:
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 2704470e021d..198b65d45c5e 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -189,8 +189,6 @@ struct ssif_addr_info {
struct device *dev;
struct i2c_client *client;
- struct i2c_client *added_client;
-
struct mutex clients_mutex;
struct list_head clients;
@@ -1472,6 +1470,7 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
if (acpi_handle) {
ssif_info->addr_source = SI_ACPI;
ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle;
+ request_module("acpi_ipmi");
return true;
}
#endif
@@ -1940,21 +1939,6 @@ out_remove_attr:
goto out;
}
-static int ssif_adapter_handler(struct device *adev, void *opaque)
-{
- struct ssif_addr_info *addr_info = opaque;
-
- if (adev->type != &i2c_adapter_type)
- return 0;
-
- addr_info->added_client = i2c_new_client_device(to_i2c_adapter(adev),
- &addr_info->binfo);
-
- if (!addr_info->adapter_name)
- return 1; /* Only try the first I2C adapter by default. */
- return 0;
-}
-
static int new_ssif_client(int addr, char *adapter_name,
int debug, int slave_addr,
enum ipmi_addr_src addr_src,
@@ -1998,9 +1982,7 @@ static int new_ssif_client(int addr, char *adapter_name,
list_add_tail(&addr_info->link, &ssif_infos);
- if (initialized)
- i2c_for_each_dev(addr_info, ssif_adapter_handler);
- /* Otherwise address list will get it */
+ /* Address list will get it */
out_unlock:
mutex_unlock(&ssif_infos_mutex);
@@ -2120,8 +2102,6 @@ static int ssif_platform_remove(struct platform_device *dev)
return 0;
mutex_lock(&ssif_infos_mutex);
- i2c_unregister_device(addr_info->added_client);
-
list_del(&addr_info->link);
kfree(addr_info);
mutex_unlock(&ssif_infos_mutex);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 43dd0891ca1e..31cae88a730b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -31,11 +31,15 @@
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/security.h>
+#include <linux/pseudo_fs.h>
+#include <uapi/linux/magic.h>
+#include <linux/mount.h>
#ifdef CONFIG_IA64
# include <linux/efi.h>
#endif
+#define DEVMEM_MINOR 1
#define DEVPORT_MINOR 4
static inline unsigned long size_inside_page(unsigned long start,
@@ -805,12 +809,64 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
+static struct inode *devmem_inode;
+
+#ifdef CONFIG_IO_STRICT_DEVMEM
+void revoke_devmem(struct resource *res)
+{
+ struct inode *inode = READ_ONCE(devmem_inode);
+
+ /*
+ * Check that the initialization has completed. Losing the race
+ * is ok because it means drivers are claiming resources before
+ * the fs_initcall level of init and prevent /dev/mem from
+ * establishing mappings.
+ */
+ if (!inode)
+ return;
+
+ /*
+ * The expectation is that the driver has successfully marked
+ * the resource busy by this point, so devmem_is_allowed()
+ * should start returning false, however for performance this
+ * does not iterate the entire resource range.
+ */
+ if (devmem_is_allowed(PHYS_PFN(res->start)) &&
+ devmem_is_allowed(PHYS_PFN(res->end))) {
+ /*
+ * *cringe* iomem=relaxed says "go ahead, what's the
+ * worst that can happen?"
+ */
+ return;
+ }
+
+ unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
+}
+#endif
+
static int open_port(struct inode *inode, struct file *filp)
{
+ int rc;
+
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- return security_locked_down(LOCKDOWN_DEV_MEM);
+ rc = security_locked_down(LOCKDOWN_DEV_MEM);
+ if (rc)
+ return rc;
+
+ if (iminor(inode) != DEVMEM_MINOR)
+ return 0;
+
+ /*
+ * Use a unified address space to have a single point to manage
+ * revocations when drivers want to take over a /dev/mem mapped
+ * range.
+ */
+ inode->i_mapping = devmem_inode->i_mapping;
+ filp->f_mapping = inode->i_mapping;
+
+ return 0;
}
#define zero_lseek null_lseek
@@ -885,7 +941,7 @@ static const struct memdev {
fmode_t fmode;
} devlist[] = {
#ifdef CONFIG_DEVMEM
- [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
+ [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
#endif
#ifdef CONFIG_DEVKMEM
[2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
@@ -939,6 +995,45 @@ static char *mem_devnode(struct device *dev, umode_t *mode)
static struct class *mem_class;
+static int devmem_fs_init_fs_context(struct fs_context *fc)
+{
+ return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
+}
+
+static struct file_system_type devmem_fs_type = {
+ .name = "devmem",
+ .owner = THIS_MODULE,
+ .init_fs_context = devmem_fs_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
+static int devmem_init_inode(void)
+{
+ static struct vfsmount *devmem_vfs_mount;
+ static int devmem_fs_cnt;
+ struct inode *inode;
+ int rc;
+
+ rc = simple_pin_fs(&devmem_fs_type, &devmem_vfs_mount, &devmem_fs_cnt);
+ if (rc < 0) {
+ pr_err("Cannot mount /dev/mem pseudo filesystem: %d\n", rc);
+ return rc;
+ }
+
+ inode = alloc_anon_inode(devmem_vfs_mount->mnt_sb);
+ if (IS_ERR(inode)) {
+ rc = PTR_ERR(inode);
+ pr_err("Cannot allocate inode for /dev/mem: %d\n", rc);
+ simple_release_fs(&devmem_vfs_mount, &devmem_fs_cnt);
+ return rc;
+ }
+
+ /* publish /dev/mem initialized */
+ WRITE_ONCE(devmem_inode, inode);
+
+ return 0;
+}
+
static int __init chr_dev_init(void)
{
int minor;
@@ -960,6 +1055,8 @@ static int __init chr_dev_init(void)
*/
if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
continue;
+ if ((minor == DEVMEM_MINOR) && devmem_init_inode() != 0)
+ continue;
device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
NULL, devlist[minor].name);
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 7d583222e8fa..0fae33319d2e 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -39,7 +39,6 @@
#include <linux/numa.h>
#include <linux/refcount.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <linux/atomic.h>
#include <asm/tlbflush.h>
#include <asm/uncached.h>
@@ -65,7 +64,7 @@ enum mspec_page_type {
* This structure is shared by all vma's that are split off from the
* original vma when split_vma()'s are done.
*
- * The refcnt is incremented atomically because mm->mmap_sem does not
+ * The refcnt is incremented atomically because mm->mmap_lock does not
* protect in fork case where multiple tasks share the vma_data.
*/
struct vma_data {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index a7cf6aa65908..2a41b21623ae 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2087,7 +2087,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
* Return entropy available scaled to integral bits
*/
static int proc_do_entropy(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
int entropy_count;
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 6d81bb3bb503..896a3550fba9 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -777,17 +777,21 @@ static int __init tlclk_init(void)
{
int ret;
+ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
+
+ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
+ if (!alarm_events) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
if (ret < 0) {
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
+ kfree(alarm_events);
return ret;
}
tlclk_major = ret;
- alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
- if (!alarm_events) {
- ret = -ENOMEM;
- goto out1;
- }
/* Read telecom clock IRQ number (Set by BIOS) */
if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
@@ -796,7 +800,6 @@ static int __init tlclk_init(void)
ret = -EBUSY;
goto out2;
}
- telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
@@ -837,8 +840,8 @@ out3:
release_region(TLCLK_BASE, 8);
out2:
kfree(alarm_events);
-out1:
unregister_chrdev(tlclk_major, "telco_clock");
+out1:
return ret;
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index bcb257baed06..8f50a1caecba 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -1,5 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
+config HAVE_CLK
+ bool
+ help
+ The <linux/clk.h> calls support software clock gating and
+ thus are a key power management tool on many systems.
+
config CLKDEV_LOOKUP
bool
select HAVE_CLK
@@ -7,8 +13,18 @@ config CLKDEV_LOOKUP
config HAVE_CLK_PREPARE
bool
-config COMMON_CLK
+config HAVE_LEGACY_CLK # TODO: Remove once all legacy users are migrated
bool
+ select HAVE_CLK
+ help
+ Select this option when the clock API in <linux/clk.h> is implemented
+ by platform/architecture code. This method is deprecated. Modern
+ code should select COMMON_CLK instead and not define a custom
+ 'struct clk'.
+
+menuconfig COMMON_CLK
+ bool "Common Clock Framework"
+ depends on !HAVE_LEGACY_CLK
select HAVE_CLK_PREPARE
select CLKDEV_LOOKUP
select SRCU
@@ -20,8 +36,7 @@ config COMMON_CLK
Architectures utilizing the common struct clk should select
this option.
-menu "Common Clock Framework"
- depends on COMMON_CLK
+if COMMON_CLK
config COMMON_CLK_WM831X
tristate "Clock driver for WM831x/2x PMICs"
@@ -252,7 +267,7 @@ config COMMON_CLK_XGENE
default ARCH_XGENE
depends on ARM64 || COMPILE_TEST
---help---
- Sypport for the APM X-Gene SoC reference, PLL, and device clocks.
+ Support for the APM X-Gene SoC reference, PLL, and device clocks.
config COMMON_CLK_LOCHNAGAR
tristate "Cirrus Logic Lochnagar clock driver"
@@ -326,6 +341,12 @@ config COMMON_CLK_MMP2
help
Support for Marvell MMP2 and MMP3 SoC clocks
+config COMMON_CLK_MMP2_AUDIO
+ tristate "Clock driver for MMP2 Audio subsystem"
+ depends on COMMON_CLK_MMP2 || COMPILE_TEST
+ help
+ This driver supports clocks for Audio subsystem on MMP2 SoC.
+
config COMMON_CLK_BD718XX
tristate "Clock driver for 32K clk gates on ROHM PMICs"
depends on MFD_ROHM_BD718XX || MFD_ROHM_BD70528 || MFD_ROHM_BD71828
@@ -341,6 +362,7 @@ config COMMON_CLK_FIXED_MMIO
source "drivers/clk/actions/Kconfig"
source "drivers/clk/analogbits/Kconfig"
+source "drivers/clk/baikal-t1/Kconfig"
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
source "drivers/clk/imgtec/Kconfig"
@@ -360,6 +382,7 @@ source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
source "drivers/clk/uniphier/Kconfig"
+source "drivers/clk/x86/Kconfig"
source "drivers/clk/zynqmp/Kconfig"
-endmenu
+endif
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index f4169cc2fd31..ca9af11d3391 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -75,6 +75,7 @@ obj-y += analogbits/
obj-$(CONFIG_COMMON_CLK_AT91) += at91/
obj-$(CONFIG_ARCH_ARTPEC) += axis/
obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/
+obj-$(CONFIG_CLK_BAIKAL_T1) += baikal-t1/
obj-y += bcm/
obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
@@ -104,17 +105,18 @@ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
+obj-$(CONFIG_ARCH_AGILEX) += socfpga/
+obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
-obj-$(CONFIG_ARCH_SPRD) += sprd/
+obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
-obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_U8500) += ux500/
-obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
+obj-y += versatile/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_X86) += x86/
endif
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index c44a431b6c97..38bdb4981315 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -98,9 +98,9 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- at91rm9200_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ at91rm9200_pmc = pmc_data_allocate(PMC_PLLBCK + 1,
nck(at91rm9200_systemck),
- nck(at91rm9200_periphck), 0);
+ nck(at91rm9200_periphck), 0, 4);
if (!at91rm9200_pmc)
return;
@@ -123,12 +123,16 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ at91rm9200_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1,
&at91rm9200_pll_layout,
&rm9200_pll_characteristics);
if (IS_ERR(hw))
goto err_free;
+ at91rm9200_pmc->chws[PMC_PLLBCK] = hw;
+
parent_names[0] = slowxtal_name;
parent_names[1] = "mainck";
parent_names[2] = "pllack";
@@ -159,6 +163,8 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
&at91rm9200_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91rm9200_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(at91rm9200_systemck); i++) {
@@ -187,7 +193,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(at91rm9200_pmc);
+ kfree(at91rm9200_pmc);
}
/*
* While the TCB can be used as the clocksource, the system timer is most likely
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index a9d4234758d7..6d0723aa8b13 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -352,9 +352,10 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
if (IS_ERR(regmap))
return;
- at91sam9260_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ at91sam9260_pmc = pmc_data_allocate(PMC_PLLBCK + 1,
ndck(data->sck, data->num_sck),
- ndck(data->pck, data->num_pck), 0);
+ ndck(data->pck, data->num_pck),
+ 0, data->num_progck);
if (!at91sam9260_pmc)
return;
@@ -398,12 +399,16 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
if (IS_ERR(hw))
goto err_free;
+ at91sam9260_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1,
data->pllb_layout,
data->pllb_characteristics);
if (IS_ERR(hw))
goto err_free;
+ at91sam9260_pmc->chws[PMC_PLLBCK] = hw;
+
parent_names[0] = slck_name;
parent_names[1] = "mainck";
parent_names[2] = "pllack";
@@ -434,6 +439,8 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
&at91rm9200_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91sam9260_pmc->pchws[i] = hw;
}
for (i = 0; i < data->num_sck; i++) {
@@ -462,7 +469,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
return;
err_free:
- pmc_data_free(at91sam9260_pmc);
+ kfree(at91sam9260_pmc);
}
static void __init at91sam9260_pmc_setup(struct device_node *np)
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index 38a7d2d2df0c..9873b583c260 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -115,9 +115,9 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- at91sam9g45_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ at91sam9g45_pmc = pmc_data_allocate(PMC_PLLACK + 1,
nck(at91sam9g45_systemck),
- nck(at91sam9g45_periphck), 0);
+ nck(at91sam9g45_periphck), 0, 2);
if (!at91sam9g45_pmc)
return;
@@ -143,6 +143,8 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ at91sam9g45_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
if (IS_ERR(hw))
goto err_free;
@@ -182,6 +184,8 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
&at91sam9g45_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91sam9g45_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(at91sam9g45_systemck); i++) {
@@ -210,7 +214,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(at91sam9g45_pmc);
+ kfree(at91sam9g45_pmc);
}
/*
* The TCB is used as the clocksource so its clock is needed early. This means
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
index 8bb39d2ba84b..630dc5d87171 100644
--- a/drivers/clk/at91/at91sam9n12.c
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -128,8 +128,8 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- at91sam9n12_pmc = pmc_data_allocate(PMC_MAIN + 1,
- nck(at91sam9n12_systemck), 31, 0);
+ at91sam9n12_pmc = pmc_data_allocate(PMC_PLLBCK + 1,
+ nck(at91sam9n12_systemck), 31, 0, 2);
if (!at91sam9n12_pmc)
return;
@@ -162,11 +162,15 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ at91sam9n12_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1,
&at91rm9200_pll_layout, &pllb_characteristics);
if (IS_ERR(hw))
goto err_free;
+ at91sam9n12_pmc->chws[PMC_PLLBCK] = hw;
+
parent_names[0] = slck_name;
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
@@ -198,6 +202,8 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
&at91sam9x5_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91sam9n12_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(at91sam9n12_systemck); i++) {
@@ -228,7 +234,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(at91sam9n12_pmc);
+ kfree(at91sam9n12_pmc);
}
/*
* The TCB is used as the clocksource so its clock is needed early. This means
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index 77fe83a73bf4..0d1cc44b056f 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -87,9 +87,9 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- at91sam9rl_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ at91sam9rl_pmc = pmc_data_allocate(PMC_PLLACK + 1,
nck(at91sam9rl_systemck),
- nck(at91sam9rl_periphck), 0);
+ nck(at91sam9rl_periphck), 0, 2);
if (!at91sam9rl_pmc)
return;
@@ -105,6 +105,8 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ at91sam9rl_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
if (IS_ERR(hw))
goto err_free;
@@ -138,6 +140,8 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
&at91rm9200_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91sam9rl_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(at91sam9rl_systemck); i++) {
@@ -166,6 +170,6 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(at91sam9rl_pmc);
+ kfree(at91sam9rl_pmc);
}
CLK_OF_DECLARE_DRIVER(at91sam9rl_pmc, "atmel,at91sam9rl-pmc", at91sam9rl_pmc_setup);
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 086cf0b4955c..0ce3da080287 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -150,8 +150,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
if (IS_ERR(regmap))
return;
- at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
- nck(at91sam9x5_systemck), 31, 0);
+ at91sam9x5_pmc = pmc_data_allocate(PMC_PLLACK + 1,
+ nck(at91sam9x5_systemck), 31, 0, 2);
if (!at91sam9x5_pmc)
return;
@@ -184,6 +184,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
if (IS_ERR(hw))
goto err_free;
+ at91sam9x5_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
if (IS_ERR(hw))
goto err_free;
@@ -227,6 +229,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
&at91sam9x5_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ at91sam9x5_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(at91sam9x5_systemck); i++) {
@@ -278,7 +282,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
return;
err_free:
- pmc_data_free(at91sam9x5_pmc);
+ kfree(at91sam9x5_pmc);
}
static void __init at91sam9g15_pmc_setup(struct device_node *np)
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index b71515acdec1..20ee9dccee78 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -67,6 +67,10 @@ struct clk_hw *of_clk_hw_pmc_get(struct of_phandle_args *clkspec, void *data)
if (idx < pmc_data->ngck)
return pmc_data->ghws[idx];
break;
+ case PMC_TYPE_PROGRAMMABLE:
+ if (idx < pmc_data->npck)
+ return pmc_data->pchws[idx];
+ break;
default:
break;
}
@@ -76,48 +80,34 @@ struct clk_hw *of_clk_hw_pmc_get(struct of_phandle_args *clkspec, void *data)
return ERR_PTR(-EINVAL);
}
-void pmc_data_free(struct pmc_data *pmc_data)
-{
- kfree(pmc_data->chws);
- kfree(pmc_data->shws);
- kfree(pmc_data->phws);
- kfree(pmc_data->ghws);
-}
-
struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
- unsigned int nperiph, unsigned int ngck)
+ unsigned int nperiph, unsigned int ngck,
+ unsigned int npck)
{
- struct pmc_data *pmc_data = kzalloc(sizeof(*pmc_data), GFP_KERNEL);
+ unsigned int num_clks = ncore + nsystem + nperiph + ngck + npck;
+ struct pmc_data *pmc_data;
+ pmc_data = kzalloc(struct_size(pmc_data, hwtable, num_clks),
+ GFP_KERNEL);
if (!pmc_data)
return NULL;
pmc_data->ncore = ncore;
- pmc_data->chws = kcalloc(ncore, sizeof(struct clk_hw *), GFP_KERNEL);
- if (!pmc_data->chws)
- goto err;
+ pmc_data->chws = pmc_data->hwtable;
pmc_data->nsystem = nsystem;
- pmc_data->shws = kcalloc(nsystem, sizeof(struct clk_hw *), GFP_KERNEL);
- if (!pmc_data->shws)
- goto err;
+ pmc_data->shws = pmc_data->chws + ncore;
pmc_data->nperiph = nperiph;
- pmc_data->phws = kcalloc(nperiph, sizeof(struct clk_hw *), GFP_KERNEL);
- if (!pmc_data->phws)
- goto err;
+ pmc_data->phws = pmc_data->shws + nsystem;
pmc_data->ngck = ngck;
- pmc_data->ghws = kcalloc(ngck, sizeof(struct clk_hw *), GFP_KERNEL);
- if (!pmc_data->ghws)
- goto err;
+ pmc_data->ghws = pmc_data->phws + nperiph;
- return pmc_data;
-
-err:
- pmc_data_free(pmc_data);
+ pmc_data->npck = npck;
+ pmc_data->pchws = pmc_data->ghws + ngck;
- return NULL;
+ return pmc_data;
}
#ifdef CONFIG_PM
@@ -274,8 +264,11 @@ static int __init pmc_register_ops(void)
struct device_node *np;
np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids);
+ if (!np)
+ return -ENODEV;
pmcreg = device_node_to_regmap(np);
+ of_node_put(np);
if (IS_ERR(pmcreg))
return PTR_ERR(pmcreg);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 9b8db9cdcda5..df616f2937e7 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -24,6 +24,10 @@ struct pmc_data {
struct clk_hw **phws;
unsigned int ngck;
struct clk_hw **ghws;
+ unsigned int npck;
+ struct clk_hw **pchws;
+
+ struct clk_hw *hwtable[];
};
struct clk_range {
@@ -94,8 +98,8 @@ struct clk_pcr_layout {
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
- unsigned int nperiph, unsigned int ngck);
-void pmc_data_free(struct pmc_data *pmc_data);
+ unsigned int nperiph, unsigned int ngck,
+ unsigned int npck);
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range);
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index cc19e8fb83be..3e20aa68259f 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -182,10 +182,10 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- sam9x60_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ sam9x60_pmc = pmc_data_allocate(PMC_PLLACK + 1,
nck(sam9x60_systemck),
nck(sam9x60_periphck),
- nck(sam9x60_gck));
+ nck(sam9x60_gck), 8);
if (!sam9x60_pmc)
return;
@@ -214,6 +214,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ sam9x60_pmc->chws[PMC_PLLACK] = hw;
+
hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "upllck",
"main_osc", 1, &upll_characteristics);
if (IS_ERR(hw))
@@ -255,6 +257,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
&sam9x60_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ sam9x60_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(sam9x60_systemck); i++) {
@@ -299,7 +303,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(sam9x60_pmc);
+ kfree(sam9x60_pmc);
}
/* Some clks are used for a clocksource */
CLK_OF_DECLARE(sam9x60_pmc, "microchip,sam9x60-pmc", sam9x60_pmc_setup);
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index ff7e3f727082..d69421d71daf 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -89,6 +89,7 @@ static const struct {
{ .n = "i2s1_clk", .id = 55, .r = { .min = 0, .max = 83000000 }, },
{ .n = "can0_clk", .id = 56, .r = { .min = 0, .max = 83000000 }, },
{ .n = "can1_clk", .id = 57, .r = { .min = 0, .max = 83000000 }, },
+ { .n = "ptc_clk", .id = 58, .r = { .min = 0, .max = 83000000 }, },
{ .n = "classd_clk", .id = 59, .r = { .min = 0, .max = 83000000 }, },
};
@@ -166,10 +167,10 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- sama5d2_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1,
+ sama5d2_pmc = pmc_data_allocate(PMC_AUDIOPLLCK + 1,
nck(sama5d2_systemck),
nck(sama5d2_periph32ck),
- nck(sama5d2_gck));
+ nck(sama5d2_gck), 3);
if (!sama5d2_pmc)
return;
@@ -202,6 +203,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ sama5d2_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_audio_pll_frac(regmap, "audiopll_fracck",
"mainck");
if (IS_ERR(hw))
@@ -217,6 +220,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ sama5d2_pmc->chws[PMC_AUDIOPLLCK] = hw;
+
regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
if (IS_ERR(regmap_sfr))
regmap_sfr = NULL;
@@ -267,6 +272,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
&sama5d2_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ sama5d2_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(sama5d2_systemck); i++) {
@@ -350,6 +357,6 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(sama5d2_pmc);
+ kfree(sama5d2_pmc);
}
CLK_OF_DECLARE_DRIVER(sama5d2_pmc, "atmel,sama5d2-pmc", sama5d2_pmc_setup);
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
index 88506f909c08..5e4e44dd4c37 100644
--- a/drivers/clk/at91/sama5d3.c
+++ b/drivers/clk/at91/sama5d3.c
@@ -125,9 +125,9 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- sama5d3_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ sama5d3_pmc = pmc_data_allocate(PMC_PLLACK + 1,
nck(sama5d3_systemck),
- nck(sama5d3_periphck), 0);
+ nck(sama5d3_periphck), 0, 3);
if (!sama5d3_pmc)
return;
@@ -158,6 +158,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ sama5d3_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
if (IS_ERR(hw))
goto err_free;
@@ -201,6 +203,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
&at91sam9x5_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ sama5d3_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(sama5d3_systemck); i++) {
@@ -231,7 +235,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(sama5d3_pmc);
+ kfree(sama5d3_pmc);
}
/*
* The TCB is used as the clocksource so its clock is needed early. This means
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index a6dee4a3b6e4..662ff5fa6e98 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -140,9 +140,9 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- sama5d4_pmc = pmc_data_allocate(PMC_MCK2 + 1,
+ sama5d4_pmc = pmc_data_allocate(PMC_PLLACK + 1,
nck(sama5d4_systemck),
- nck(sama5d4_periph32ck), 0);
+ nck(sama5d4_periph32ck), 0, 3);
if (!sama5d4_pmc)
return;
@@ -173,6 +173,8 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
+ sama5d4_pmc->chws[PMC_PLLACK] = hw;
+
hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck");
if (IS_ERR(hw))
goto err_free;
@@ -224,6 +226,8 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
&at91sam9x5_programmable_layout);
if (IS_ERR(hw))
goto err_free;
+
+ sama5d4_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(sama5d4_systemck); i++) {
@@ -267,6 +271,6 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
return;
err_free:
- pmc_data_free(sama5d4_pmc);
+ kfree(sama5d4_pmc);
}
CLK_OF_DECLARE_DRIVER(sama5d4_pmc, "atmel,sama5d4-pmc", sama5d4_pmc_setup);
diff --git a/drivers/clk/baikal-t1/Kconfig b/drivers/clk/baikal-t1/Kconfig
new file mode 100644
index 000000000000..03102f1094bc
--- /dev/null
+++ b/drivers/clk/baikal-t1/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CLK_BAIKAL_T1
+ bool "Baikal-T1 Clocks Control Unit interface"
+ depends on (MIPS_BAIKAL_T1 && OF) || COMPILE_TEST
+ default MIPS_BAIKAL_T1
+ help
+ Clocks Control Unit is the core of Baikal-T1 SoC System Controller
+ responsible for the chip subsystems clocking and resetting. It
+ consists of multiple global clock domains, which can be reset by
+ means of the CCU control registers. These domains and devices placed
+ in them are fed with clocks generated by a hierarchy of PLLs,
+ configurable and fixed clock dividers. Enable this option to be able
+ to select Baikal-T1 CCU PLLs and Dividers drivers.
+
+if CLK_BAIKAL_T1
+
+config CLK_BT1_CCU_PLL
+ bool "Baikal-T1 CCU PLLs support"
+ select MFD_SYSCON
+ default MIPS_BAIKAL_T1
+ help
+ Enable this to support the PLLs embedded into the Baikal-T1 SoC
+ System Controller. These are five PLLs placed at the root of the
+ clocks hierarchy, right after an external reference oscillator
+ (normally of 25MHz). They are used to generate high frequency
+ signals, which are either directly wired to the consumers (like
+ CPUs, DDR, etc.) or passed over the clock dividers to be only
+ then used as an individual reference clock of a target device.
+
+config CLK_BT1_CCU_DIV
+ bool "Baikal-T1 CCU Dividers support"
+ select RESET_CONTROLLER
+ select MFD_SYSCON
+ default MIPS_BAIKAL_T1
+ help
+ Enable this to support the CCU dividers used to distribute clocks
+ between AXI-bus and system devices coming from CCU PLLs of Baikal-T1
+ SoC. CCU dividers can be either configurable or with fixed divider,
+ either gateable or ungateable. Some of the CCU dividers can be as well
+ used to reset the domains they're supplying clock to.
+
+endif
diff --git a/drivers/clk/baikal-t1/Makefile b/drivers/clk/baikal-t1/Makefile
new file mode 100644
index 000000000000..b3b9590b95ed
--- /dev/null
+++ b/drivers/clk/baikal-t1/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CLK_BT1_CCU_PLL) += ccu-pll.o clk-ccu-pll.o
+obj-$(CONFIG_CLK_BT1_CCU_DIV) += ccu-div.o clk-ccu-div.o
diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c
new file mode 100644
index 000000000000..4062092d67f9
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-div.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * Baikal-T1 CCU Dividers interface driver
+ */
+
+#define pr_fmt(fmt) "bt1-ccu-div: " fmt
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/time64.h>
+#include <linux/debugfs.h>
+
+#include "ccu-div.h"
+
+#define CCU_DIV_CTL 0x00
+#define CCU_DIV_CTL_EN BIT(0)
+#define CCU_DIV_CTL_RST BIT(1)
+#define CCU_DIV_CTL_SET_CLKDIV BIT(2)
+#define CCU_DIV_CTL_CLKDIV_FLD 4
+#define CCU_DIV_CTL_CLKDIV_MASK(_width) \
+ GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
+#define CCU_DIV_CTL_LOCK_SHIFTED BIT(27)
+#define CCU_DIV_CTL_LOCK_NORMAL BIT(31)
+
+#define CCU_DIV_RST_DELAY_US 1
+#define CCU_DIV_LOCK_CHECK_RETRIES 50
+
+#define CCU_DIV_CLKDIV_MIN 0
+#define CCU_DIV_CLKDIV_MAX(_mask) \
+ ((_mask) >> CCU_DIV_CTL_CLKDIV_FLD)
+
+/*
+ * Use the next two methods until there are generic field setter and
+ * getter available with non-constant mask support.
+ */
+static inline u32 ccu_div_get(u32 mask, u32 val)
+{
+ return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD;
+}
+
+static inline u32 ccu_div_prep(u32 mask, u32 val)
+{
+ return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask;
+}
+
+static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk,
+ unsigned long div)
+{
+ u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC;
+
+ do_div(ns, ref_clk);
+
+ return ns;
+}
+
+static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk,
+ unsigned long div)
+{
+ return ref_clk / (div ?: 1);
+}
+
+static int ccu_div_var_update_clkdiv(struct ccu_div *div,
+ unsigned long parent_rate,
+ unsigned long divider)
+{
+ unsigned long nd;
+ u32 val = 0;
+ u32 lock;
+ int count;
+
+ nd = ccu_div_lock_delay_ns(parent_rate, divider);
+
+ if (div->features & CCU_DIV_LOCK_SHIFTED)
+ lock = CCU_DIV_CTL_LOCK_SHIFTED;
+ else
+ lock = CCU_DIV_CTL_LOCK_NORMAL;
+
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV);
+
+ /*
+ * Until there is nsec-version of readl_poll_timeout() is available
+ * we have to implement the next polling loop.
+ */
+ count = CCU_DIV_LOCK_CHECK_RETRIES;
+ do {
+ ndelay(nd);
+ regmap_read(div->sys_regs, div->reg_ctl, &val);
+ if (val & lock)
+ return 0;
+ } while (--count);
+
+ return -ETIMEDOUT;
+}
+
+static int ccu_div_var_enable(struct clk_hw *hw)
+{
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+ u32 val = 0;
+ int ret;
+
+ if (!parent_hw) {
+ pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ regmap_read(div->sys_regs, div->reg_ctl, &val);
+ if (val & CCU_DIV_CTL_EN)
+ return 0;
+
+ spin_lock_irqsave(&div->lock, flags);
+ ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw),
+ ccu_div_get(div->mask, val));
+ if (!ret)
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
+ spin_unlock_irqrestore(&div->lock, flags);
+ if (ret)
+ pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
+
+ return ret;
+}
+
+static int ccu_div_gate_enable(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ return 0;
+}
+
+static void ccu_div_gate_disable(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0);
+ spin_unlock_irqrestore(&div->lock, flags);
+}
+
+static int ccu_div_gate_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ u32 val = 0;
+
+ regmap_read(div->sys_regs, div->reg_ctl, &val);
+
+ return !!(val & CCU_DIV_CTL_EN);
+}
+
+static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long divider;
+ u32 val = 0;
+
+ regmap_read(div->sys_regs, div->reg_ctl, &val);
+ divider = ccu_div_get(div->mask, val);
+
+ return ccu_div_calc_freq(parent_rate, divider);
+}
+
+static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned int mask)
+{
+ unsigned long divider;
+
+ divider = parent_rate / rate;
+ return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN,
+ CCU_DIV_CLKDIV_MAX(mask));
+}
+
+static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long divider;
+
+ divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
+
+ return ccu_div_calc_freq(*parent_rate, divider);
+}
+
+/*
+ * This method is used for the clock divider blocks, which support the
+ * on-the-fly rate change. So due to lacking the EN bit functionality
+ * they can't be gated before the rate adjustment.
+ */
+static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags, divider;
+ u32 val;
+ int ret;
+
+ divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
+ if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) {
+ divider = 0;
+ } else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) {
+ if (divider == 1 || divider == 2)
+ divider = 0;
+ else if (divider == 3)
+ divider = 4;
+ }
+
+ val = ccu_div_prep(div->mask, divider);
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val);
+ ret = ccu_div_var_update_clkdiv(div, parent_rate, divider);
+ spin_unlock_irqrestore(&div->lock, flags);
+ if (ret)
+ pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
+
+ return ret;
+}
+
+/*
+ * This method is used for the clock divider blocks, which don't support
+ * the on-the-fly rate change.
+ */
+static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ unsigned long flags, divider;
+ u32 val;
+
+ divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
+ val = ccu_div_prep(div->mask, divider);
+
+ /*
+ * Also disable the clock divider block if it was enabled by default
+ * or by the bootloader.
+ */
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ div->mask | CCU_DIV_CTL_EN, val);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ return 0;
+}
+
+static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+
+ return ccu_div_calc_freq(parent_rate, div->divider);
+}
+
+static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+
+ return ccu_div_calc_freq(*parent_rate, div->divider);
+}
+
+static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+int ccu_div_reset_domain(struct ccu_div *div)
+{
+ unsigned long flags;
+
+ if (!div || !(div->features & CCU_DIV_RESET_DOMAIN))
+ return -EINVAL;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ CCU_DIV_CTL_RST, CCU_DIV_CTL_RST);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ /* The next delay must be enough to cover all the resets. */
+ udelay(CCU_DIV_RST_DELAY_US);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+struct ccu_div_dbgfs_bit {
+ struct ccu_div *div;
+ const char *name;
+ u32 mask;
+};
+
+#define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) { \
+ .name = _name, \
+ .mask = _mask \
+ }
+
+static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
+ CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
+ CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
+ CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
+ CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
+};
+
+#define CCU_DIV_DBGFS_BIT_NUM ARRAY_SIZE(ccu_div_bits)
+
+/*
+ * It can be dangerous to change the Divider settings behind clock framework
+ * back, therefore we don't provide any kernel config based compile time option
+ * for this feature to enable.
+ */
+#undef CCU_DIV_ALLOW_WRITE_DEBUGFS
+#ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS
+
+static int ccu_div_dbgfs_bit_set(void *priv, u64 val)
+{
+ const struct ccu_div_dbgfs_bit *bit = priv;
+ struct ccu_div *div = bit->div;
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl,
+ bit->mask, val ? bit->mask : 0);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ return 0;
+}
+
+static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val)
+{
+ struct ccu_div *div = priv;
+ unsigned long flags;
+ u32 data;
+
+ val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN,
+ CCU_DIV_CLKDIV_MAX(div->mask));
+ data = ccu_div_prep(div->mask, val);
+
+ spin_lock_irqsave(&div->lock, flags);
+ regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data);
+ spin_unlock_irqrestore(&div->lock, flags);
+
+ return 0;
+}
+
+#define ccu_div_dbgfs_mode 0644
+
+#else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
+
+#define ccu_div_dbgfs_bit_set NULL
+#define ccu_div_dbgfs_var_clkdiv_set NULL
+#define ccu_div_dbgfs_mode 0444
+
+#endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
+
+static int ccu_div_dbgfs_bit_get(void *priv, u64 *val)
+{
+ const struct ccu_div_dbgfs_bit *bit = priv;
+ struct ccu_div *div = bit->div;
+ u32 data = 0;
+
+ regmap_read(div->sys_regs, div->reg_ctl, &data);
+ *val = !!(data & bit->mask);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops,
+ ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n");
+
+static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val)
+{
+ struct ccu_div *div = priv;
+ u32 data = 0;
+
+ regmap_read(div->sys_regs, div->reg_ctl, &data);
+ *val = ccu_div_get(div->mask, data);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops,
+ ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n");
+
+static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val)
+{
+ struct ccu_div *div = priv;
+
+ *val = div->divider;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops,
+ ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n");
+
+static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ struct ccu_div_dbgfs_bit *bits;
+ int didx, bidx, num = 2;
+ const char *name;
+
+ num += !!(div->flags & CLK_SET_RATE_GATE) +
+ !!(div->features & CCU_DIV_RESET_DOMAIN);
+
+ bits = kcalloc(num, sizeof(*bits), GFP_KERNEL);
+ if (!bits)
+ return;
+
+ for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) {
+ name = ccu_div_bits[bidx].name;
+ if (!(div->flags & CLK_SET_RATE_GATE) &&
+ !strcmp("div_en", name)) {
+ continue;
+ }
+
+ if (!(div->features & CCU_DIV_RESET_DOMAIN) &&
+ !strcmp("div_rst", name)) {
+ continue;
+ }
+
+ bits[didx] = ccu_div_bits[bidx];
+ bits[didx].div = div;
+
+ if (div->features & CCU_DIV_LOCK_SHIFTED &&
+ !strcmp("div_lock", name)) {
+ bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED;
+ }
+
+ debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode,
+ dentry, &bits[didx],
+ &ccu_div_dbgfs_bit_fops);
+ ++didx;
+ }
+
+ debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry,
+ div, &ccu_div_dbgfs_var_clkdiv_fops);
+}
+
+static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+ struct ccu_div_dbgfs_bit *bit;
+
+ bit = kmalloc(sizeof(*bit), GFP_KERNEL);
+ if (!bit)
+ return;
+
+ *bit = ccu_div_bits[0];
+ bit->div = div;
+ debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
+ &ccu_div_dbgfs_bit_fops);
+
+ debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
+ &ccu_div_dbgfs_fixed_clkdiv_fops);
+}
+
+static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct ccu_div *div = to_ccu_div(hw);
+
+ debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
+ &ccu_div_dbgfs_fixed_clkdiv_fops);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+
+#define ccu_div_var_debug_init NULL
+#define ccu_div_gate_debug_init NULL
+#define ccu_div_fixed_debug_init NULL
+
+#endif /* !CONFIG_DEBUG_FS */
+
+static const struct clk_ops ccu_div_var_gate_to_set_ops = {
+ .enable = ccu_div_var_enable,
+ .disable = ccu_div_gate_disable,
+ .is_enabled = ccu_div_gate_is_enabled,
+ .recalc_rate = ccu_div_var_recalc_rate,
+ .round_rate = ccu_div_var_round_rate,
+ .set_rate = ccu_div_var_set_rate_fast,
+ .debug_init = ccu_div_var_debug_init
+};
+
+static const struct clk_ops ccu_div_var_nogate_ops = {
+ .recalc_rate = ccu_div_var_recalc_rate,
+ .round_rate = ccu_div_var_round_rate,
+ .set_rate = ccu_div_var_set_rate_slow,
+ .debug_init = ccu_div_var_debug_init
+};
+
+static const struct clk_ops ccu_div_gate_ops = {
+ .enable = ccu_div_gate_enable,
+ .disable = ccu_div_gate_disable,
+ .is_enabled = ccu_div_gate_is_enabled,
+ .recalc_rate = ccu_div_fixed_recalc_rate,
+ .round_rate = ccu_div_fixed_round_rate,
+ .set_rate = ccu_div_fixed_set_rate,
+ .debug_init = ccu_div_gate_debug_init
+};
+
+static const struct clk_ops ccu_div_fixed_ops = {
+ .recalc_rate = ccu_div_fixed_recalc_rate,
+ .round_rate = ccu_div_fixed_round_rate,
+ .set_rate = ccu_div_fixed_set_rate,
+ .debug_init = ccu_div_fixed_debug_init
+};
+
+struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
+{
+ struct clk_parent_data parent_data = { };
+ struct clk_init_data hw_init = { };
+ struct ccu_div *div;
+ int ret;
+
+ if (!div_init)
+ return ERR_PTR(-EINVAL);
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Note since Baikal-T1 System Controller registers are MMIO-backed
+ * we won't check the regmap IO operations return status, because it
+ * must be zero anyway.
+ */
+ div->hw.init = &hw_init;
+ div->id = div_init->id;
+ div->reg_ctl = div_init->base + CCU_DIV_CTL;
+ div->sys_regs = div_init->sys_regs;
+ div->flags = div_init->flags;
+ div->features = div_init->features;
+ spin_lock_init(&div->lock);
+
+ hw_init.name = div_init->name;
+ hw_init.flags = div_init->flags;
+
+ if (div_init->type == CCU_DIV_VAR) {
+ if (hw_init.flags & CLK_SET_RATE_GATE)
+ hw_init.ops = &ccu_div_var_gate_to_set_ops;
+ else
+ hw_init.ops = &ccu_div_var_nogate_ops;
+ div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width);
+ } else if (div_init->type == CCU_DIV_GATE) {
+ hw_init.ops = &ccu_div_gate_ops;
+ div->divider = div_init->divider;
+ } else if (div_init->type == CCU_DIV_FIXED) {
+ hw_init.ops = &ccu_div_fixed_ops;
+ div->divider = div_init->divider;
+ } else {
+ ret = -EINVAL;
+ goto err_free_div;
+ }
+
+ if (!div_init->parent_name) {
+ ret = -EINVAL;
+ goto err_free_div;
+ }
+ parent_data.fw_name = div_init->parent_name;
+ hw_init.parent_data = &parent_data;
+ hw_init.num_parents = 1;
+
+ ret = of_clk_hw_register(div_init->np, &div->hw);
+ if (ret)
+ goto err_free_div;
+
+ return div;
+
+err_free_div:
+ kfree(div);
+
+ return ERR_PTR(ret);
+}
+
+void ccu_div_hw_unregister(struct ccu_div *div)
+{
+ clk_hw_unregister(&div->hw);
+
+ kfree(div);
+}
diff --git a/drivers/clk/baikal-t1/ccu-div.h b/drivers/clk/baikal-t1/ccu-div.h
new file mode 100644
index 000000000000..795665caefbd
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-div.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU Dividers interface driver
+ */
+#ifndef __CLK_BT1_CCU_DIV_H__
+#define __CLK_BT1_CCU_DIV_H__
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/bits.h>
+#include <linux/of.h>
+
+/*
+ * CCU Divider private flags
+ * @CCU_DIV_SKIP_ONE: Due to some reason divider can't be set to 1.
+ * It can be 0 though, which is functionally the same.
+ * @CCU_DIV_SKIP_ONE_TO_THREE: For some reason divider can't be within [1,3].
+ * It can be either 0 or greater than 3.
+ * @CCU_DIV_LOCK_SHIFTED: Find lock-bit at non-standard position.
+ * @CCU_DIV_RESET_DOMAIN: Provide reset clock domain method.
+ */
+#define CCU_DIV_SKIP_ONE BIT(1)
+#define CCU_DIV_SKIP_ONE_TO_THREE BIT(2)
+#define CCU_DIV_LOCK_SHIFTED BIT(3)
+#define CCU_DIV_RESET_DOMAIN BIT(4)
+
+/*
+ * enum ccu_div_type - CCU Divider types
+ * @CCU_DIV_VAR: Clocks gate with variable divider.
+ * @CCU_DIV_GATE: Clocks gate with fixed divider.
+ * @CCU_DIV_FIXED: Ungateable clock with fixed divider.
+ */
+enum ccu_div_type {
+ CCU_DIV_VAR,
+ CCU_DIV_GATE,
+ CCU_DIV_FIXED
+};
+
+/*
+ * struct ccu_div_init_data - CCU Divider initialization data
+ * @id: Clocks private identifier.
+ * @name: Clocks name.
+ * @parent_name: Parent clocks name in a fw node.
+ * @base: Divider register base address with respect to the sys_regs base.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @np: Pointer to the node describing the CCU Dividers.
+ * @type: CCU divider type (variable, fixed with and without gate).
+ * @width: Divider width if it's variable.
+ * @divider: Divider fixed value.
+ * @flags: CCU Divider clock flags.
+ * @features: CCU Divider private features.
+ */
+struct ccu_div_init_data {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned int base;
+ struct regmap *sys_regs;
+ struct device_node *np;
+ enum ccu_div_type type;
+ union {
+ unsigned int width;
+ unsigned int divider;
+ };
+ unsigned long flags;
+ unsigned long features;
+};
+
+/*
+ * struct ccu_div - CCU Divider descriptor
+ * @hw: clk_hw of the divider.
+ * @id: Clock private identifier.
+ * @reg_ctl: Divider control register base address.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @lock: Divider state change spin-lock.
+ * @mask: Divider field mask.
+ * @divider: Divider fixed value.
+ * @flags: Divider clock flags.
+ * @features: CCU Divider private features.
+ */
+struct ccu_div {
+ struct clk_hw hw;
+ unsigned int id;
+ unsigned int reg_ctl;
+ struct regmap *sys_regs;
+ spinlock_t lock;
+ union {
+ u32 mask;
+ unsigned int divider;
+ };
+ unsigned long flags;
+ unsigned long features;
+};
+#define to_ccu_div(_hw) container_of(_hw, struct ccu_div, hw)
+
+static inline struct clk_hw *ccu_div_get_clk_hw(struct ccu_div *div)
+{
+ return div ? &div->hw : NULL;
+}
+
+struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *init);
+
+void ccu_div_hw_unregister(struct ccu_div *div);
+
+int ccu_div_reset_domain(struct ccu_div *div);
+
+#endif /* __CLK_BT1_CCU_DIV_H__ */
diff --git a/drivers/clk/baikal-t1/ccu-pll.c b/drivers/clk/baikal-t1/ccu-pll.c
new file mode 100644
index 000000000000..13ef28001439
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-pll.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * Baikal-T1 CCU PLL interface driver
+ */
+
+#define pr_fmt(fmt) "bt1-ccu-pll: " fmt
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/limits.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/iopoll.h>
+#include <linux/time64.h>
+#include <linux/rational.h>
+#include <linux/debugfs.h>
+
+#include "ccu-pll.h"
+
+#define CCU_PLL_CTL 0x000
+#define CCU_PLL_CTL_EN BIT(0)
+#define CCU_PLL_CTL_RST BIT(1)
+#define CCU_PLL_CTL_CLKR_FLD 2
+#define CCU_PLL_CTL_CLKR_MASK GENMASK(7, CCU_PLL_CTL_CLKR_FLD)
+#define CCU_PLL_CTL_CLKF_FLD 8
+#define CCU_PLL_CTL_CLKF_MASK GENMASK(20, CCU_PLL_CTL_CLKF_FLD)
+#define CCU_PLL_CTL_CLKOD_FLD 21
+#define CCU_PLL_CTL_CLKOD_MASK GENMASK(24, CCU_PLL_CTL_CLKOD_FLD)
+#define CCU_PLL_CTL_BYPASS BIT(30)
+#define CCU_PLL_CTL_LOCK BIT(31)
+#define CCU_PLL_CTL1 0x004
+#define CCU_PLL_CTL1_BWADJ_FLD 3
+#define CCU_PLL_CTL1_BWADJ_MASK GENMASK(14, CCU_PLL_CTL1_BWADJ_FLD)
+
+#define CCU_PLL_LOCK_CHECK_RETRIES 50
+
+#define CCU_PLL_NR_MAX \
+ ((CCU_PLL_CTL_CLKR_MASK >> CCU_PLL_CTL_CLKR_FLD) + 1)
+#define CCU_PLL_NF_MAX \
+ ((CCU_PLL_CTL_CLKF_MASK >> (CCU_PLL_CTL_CLKF_FLD + 1)) + 1)
+#define CCU_PLL_OD_MAX \
+ ((CCU_PLL_CTL_CLKOD_MASK >> CCU_PLL_CTL_CLKOD_FLD) + 1)
+#define CCU_PLL_NB_MAX \
+ ((CCU_PLL_CTL1_BWADJ_MASK >> CCU_PLL_CTL1_BWADJ_FLD) + 1)
+#define CCU_PLL_FDIV_MIN 427000UL
+#define CCU_PLL_FDIV_MAX 3500000000UL
+#define CCU_PLL_FOUT_MIN 200000000UL
+#define CCU_PLL_FOUT_MAX 2500000000UL
+#define CCU_PLL_FVCO_MIN 700000000UL
+#define CCU_PLL_FVCO_MAX 3500000000UL
+#define CCU_PLL_CLKOD_FACTOR 2
+
+static inline unsigned long ccu_pll_lock_delay_us(unsigned long ref_clk,
+ unsigned long nr)
+{
+ u64 us = 500ULL * nr * USEC_PER_SEC;
+
+ do_div(us, ref_clk);
+
+ return us;
+}
+
+static inline unsigned long ccu_pll_calc_freq(unsigned long ref_clk,
+ unsigned long nr,
+ unsigned long nf,
+ unsigned long od)
+{
+ u64 tmp = ref_clk;
+
+ do_div(tmp, nr);
+ tmp *= nf;
+ do_div(tmp, od);
+
+ return tmp;
+}
+
+static int ccu_pll_reset(struct ccu_pll *pll, unsigned long ref_clk,
+ unsigned long nr)
+{
+ unsigned long ud, ut;
+ u32 val;
+
+ ud = ccu_pll_lock_delay_us(ref_clk, nr);
+ ut = ud * CCU_PLL_LOCK_CHECK_RETRIES;
+
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl,
+ CCU_PLL_CTL_RST, CCU_PLL_CTL_RST);
+
+ return regmap_read_poll_timeout_atomic(pll->sys_regs, pll->reg_ctl, val,
+ val & CCU_PLL_CTL_LOCK, ud, ut);
+}
+
+static int ccu_pll_enable(struct clk_hw *hw)
+{
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ unsigned long flags;
+ u32 val = 0;
+ int ret;
+
+ if (!parent_hw) {
+ pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ regmap_read(pll->sys_regs, pll->reg_ctl, &val);
+ if (val & CCU_PLL_CTL_EN)
+ return 0;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_write(pll->sys_regs, pll->reg_ctl, val | CCU_PLL_CTL_EN);
+ ret = ccu_pll_reset(pll, clk_hw_get_rate(parent_hw),
+ FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1);
+ spin_unlock_irqrestore(&pll->lock, flags);
+ if (ret)
+ pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw));
+
+ return ret;
+}
+
+static void ccu_pll_disable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl, CCU_PLL_CTL_EN, 0);
+ spin_unlock_irqrestore(&pll->lock, flags);
+}
+
+static int ccu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ u32 val = 0;
+
+ regmap_read(pll->sys_regs, pll->reg_ctl, &val);
+
+ return !!(val & CCU_PLL_CTL_EN);
+}
+
+static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ unsigned long nr, nf, od;
+ u32 val = 0;
+
+ regmap_read(pll->sys_regs, pll->reg_ctl, &val);
+ nr = FIELD_GET(CCU_PLL_CTL_CLKR_MASK, val) + 1;
+ nf = FIELD_GET(CCU_PLL_CTL_CLKF_MASK, val) + 1;
+ od = FIELD_GET(CCU_PLL_CTL_CLKOD_MASK, val) + 1;
+
+ return ccu_pll_calc_freq(parent_rate, nr, nf, od);
+}
+
+static void ccu_pll_calc_factors(unsigned long rate, unsigned long parent_rate,
+ unsigned long *nr, unsigned long *nf,
+ unsigned long *od)
+{
+ unsigned long err, freq, min_err = ULONG_MAX;
+ unsigned long num, denom, n1, d1, nri;
+ unsigned long nr_max, nf_max, od_max;
+
+ /*
+ * Make sure PLL is working with valid input signal (Fdiv). If
+ * you want to speed the function up just reduce CCU_PLL_NR_MAX.
+ * This will cause a worse approximation though.
+ */
+ nri = (parent_rate / CCU_PLL_FDIV_MAX) + 1;
+ nr_max = min(parent_rate / CCU_PLL_FDIV_MIN, CCU_PLL_NR_MAX);
+
+ /*
+ * Find a closest [nr;nf;od] vector taking into account the
+ * limitations like: 1) 700MHz <= Fvco <= 3.5GHz, 2) PLL Od is
+ * either 1 or even number within the acceptable range (alas 1s
+ * is also excluded by the next loop).
+ */
+ for (; nri <= nr_max; ++nri) {
+ /* Use Od factor to fulfill the limitation 2). */
+ num = CCU_PLL_CLKOD_FACTOR * rate;
+ denom = parent_rate / nri;
+
+ /*
+ * Make sure Fvco is within the acceptable range to fulfill
+ * the condition 1). Note due to the CCU_PLL_CLKOD_FACTOR value
+ * the actual upper limit is also divided by that factor.
+ * It's not big problem for us since practically there is no
+ * need in clocks with that high frequency.
+ */
+ nf_max = min(CCU_PLL_FVCO_MAX / denom, CCU_PLL_NF_MAX);
+ od_max = CCU_PLL_OD_MAX / CCU_PLL_CLKOD_FACTOR;
+
+ /*
+ * Bypass the out-of-bound values, which can't be properly
+ * handled by the rational fraction approximation algorithm.
+ */
+ if (num / denom >= nf_max) {
+ n1 = nf_max;
+ d1 = 1;
+ } else if (denom / num >= od_max) {
+ n1 = 1;
+ d1 = od_max;
+ } else {
+ rational_best_approximation(num, denom, nf_max, od_max,
+ &n1, &d1);
+ }
+
+ /* Select the best approximation of the target rate. */
+ freq = ccu_pll_calc_freq(parent_rate, nri, n1, d1);
+ err = abs((int64_t)freq - num);
+ if (err < min_err) {
+ min_err = err;
+ *nr = nri;
+ *nf = n1;
+ *od = CCU_PLL_CLKOD_FACTOR * d1;
+ }
+ }
+}
+
+static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long nr = 1, nf = 1, od = 1;
+
+ ccu_pll_calc_factors(rate, *parent_rate, &nr, &nf, &od);
+
+ return ccu_pll_calc_freq(*parent_rate, nr, nf, od);
+}
+
+/*
+ * This method is used for PLLs, which support the on-the-fly dividers
+ * adjustment. So there is no need in gating such clocks.
+ */
+static int ccu_pll_set_rate_reset(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ unsigned long nr, nf, od;
+ unsigned long flags;
+ u32 mask, val;
+ int ret;
+
+ ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od);
+
+ mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK |
+ CCU_PLL_CTL_CLKOD_MASK;
+ val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) |
+ FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) |
+ FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1);
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val);
+ ret = ccu_pll_reset(pll, parent_rate, nr);
+ spin_unlock_irqrestore(&pll->lock, flags);
+ if (ret)
+ pr_err("PLL '%s' reset timed out\n", clk_hw_get_name(hw));
+
+ return ret;
+}
+
+/*
+ * This method is used for PLLs, which don't support the on-the-fly dividers
+ * adjustment. So the corresponding clocks are supposed to be gated first.
+ */
+static int ccu_pll_set_rate_norst(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ unsigned long nr, nf, od;
+ unsigned long flags;
+ u32 mask, val;
+
+ ccu_pll_calc_factors(rate, parent_rate, &nr, &nf, &od);
+
+ /*
+ * Disable PLL if it was enabled by default or left enabled by the
+ * system bootloader.
+ */
+ mask = CCU_PLL_CTL_CLKR_MASK | CCU_PLL_CTL_CLKF_MASK |
+ CCU_PLL_CTL_CLKOD_MASK | CCU_PLL_CTL_EN;
+ val = FIELD_PREP(CCU_PLL_CTL_CLKR_MASK, nr - 1) |
+ FIELD_PREP(CCU_PLL_CTL_CLKF_MASK, nf - 1) |
+ FIELD_PREP(CCU_PLL_CTL_CLKOD_MASK, od - 1);
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl, mask, val);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+struct ccu_pll_dbgfs_bit {
+ struct ccu_pll *pll;
+ const char *name;
+ unsigned int reg;
+ u32 mask;
+};
+
+struct ccu_pll_dbgfs_fld {
+ struct ccu_pll *pll;
+ const char *name;
+ unsigned int reg;
+ unsigned int lsb;
+ u32 mask;
+ u32 min;
+ u32 max;
+};
+
+#define CCU_PLL_DBGFS_BIT_ATTR(_name, _reg, _mask) \
+ { \
+ .name = _name, \
+ .reg = _reg, \
+ .mask = _mask \
+ }
+
+#define CCU_PLL_DBGFS_FLD_ATTR(_name, _reg, _lsb, _mask, _min, _max) \
+ { \
+ .name = _name, \
+ .reg = _reg, \
+ .lsb = _lsb, \
+ .mask = _mask, \
+ .min = _min, \
+ .max = _max \
+ }
+
+static const struct ccu_pll_dbgfs_bit ccu_pll_bits[] = {
+ CCU_PLL_DBGFS_BIT_ATTR("pll_en", CCU_PLL_CTL, CCU_PLL_CTL_EN),
+ CCU_PLL_DBGFS_BIT_ATTR("pll_rst", CCU_PLL_CTL, CCU_PLL_CTL_RST),
+ CCU_PLL_DBGFS_BIT_ATTR("pll_bypass", CCU_PLL_CTL, CCU_PLL_CTL_BYPASS),
+ CCU_PLL_DBGFS_BIT_ATTR("pll_lock", CCU_PLL_CTL, CCU_PLL_CTL_LOCK)
+};
+
+#define CCU_PLL_DBGFS_BIT_NUM ARRAY_SIZE(ccu_pll_bits)
+
+static const struct ccu_pll_dbgfs_fld ccu_pll_flds[] = {
+ CCU_PLL_DBGFS_FLD_ATTR("pll_nr", CCU_PLL_CTL, CCU_PLL_CTL_CLKR_FLD,
+ CCU_PLL_CTL_CLKR_MASK, 1, CCU_PLL_NR_MAX),
+ CCU_PLL_DBGFS_FLD_ATTR("pll_nf", CCU_PLL_CTL, CCU_PLL_CTL_CLKF_FLD,
+ CCU_PLL_CTL_CLKF_MASK, 1, CCU_PLL_NF_MAX),
+ CCU_PLL_DBGFS_FLD_ATTR("pll_od", CCU_PLL_CTL, CCU_PLL_CTL_CLKOD_FLD,
+ CCU_PLL_CTL_CLKOD_MASK, 1, CCU_PLL_OD_MAX),
+ CCU_PLL_DBGFS_FLD_ATTR("pll_nb", CCU_PLL_CTL1, CCU_PLL_CTL1_BWADJ_FLD,
+ CCU_PLL_CTL1_BWADJ_MASK, 1, CCU_PLL_NB_MAX)
+};
+
+#define CCU_PLL_DBGFS_FLD_NUM ARRAY_SIZE(ccu_pll_flds)
+
+/*
+ * It can be dangerous to change the PLL settings behind clock framework back,
+ * therefore we don't provide any kernel config based compile time option for
+ * this feature to enable.
+ */
+#undef CCU_PLL_ALLOW_WRITE_DEBUGFS
+#ifdef CCU_PLL_ALLOW_WRITE_DEBUGFS
+
+static int ccu_pll_dbgfs_bit_set(void *priv, u64 val)
+{
+ const struct ccu_pll_dbgfs_bit *bit = priv;
+ struct ccu_pll *pll = bit->pll;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl + bit->reg,
+ bit->mask, val ? bit->mask : 0);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ return 0;
+}
+
+static int ccu_pll_dbgfs_fld_set(void *priv, u64 val)
+{
+ struct ccu_pll_dbgfs_fld *fld = priv;
+ struct ccu_pll *pll = fld->pll;
+ unsigned long flags;
+ u32 data;
+
+ val = clamp_t(u64, val, fld->min, fld->max);
+ data = ((val - 1) << fld->lsb) & fld->mask;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ regmap_update_bits(pll->sys_regs, pll->reg_ctl + fld->reg, fld->mask,
+ data);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ return 0;
+}
+
+#define ccu_pll_dbgfs_mode 0644
+
+#else /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */
+
+#define ccu_pll_dbgfs_bit_set NULL
+#define ccu_pll_dbgfs_fld_set NULL
+#define ccu_pll_dbgfs_mode 0444
+
+#endif /* !CCU_PLL_ALLOW_WRITE_DEBUGFS */
+
+static int ccu_pll_dbgfs_bit_get(void *priv, u64 *val)
+{
+ struct ccu_pll_dbgfs_bit *bit = priv;
+ struct ccu_pll *pll = bit->pll;
+ u32 data = 0;
+
+ regmap_read(pll->sys_regs, pll->reg_ctl + bit->reg, &data);
+ *val = !!(data & bit->mask);
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_bit_fops,
+ ccu_pll_dbgfs_bit_get, ccu_pll_dbgfs_bit_set, "%llu\n");
+
+static int ccu_pll_dbgfs_fld_get(void *priv, u64 *val)
+{
+ struct ccu_pll_dbgfs_fld *fld = priv;
+ struct ccu_pll *pll = fld->pll;
+ u32 data = 0;
+
+ regmap_read(pll->sys_regs, pll->reg_ctl + fld->reg, &data);
+ *val = ((data & fld->mask) >> fld->lsb) + 1;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ccu_pll_dbgfs_fld_fops,
+ ccu_pll_dbgfs_fld_get, ccu_pll_dbgfs_fld_set, "%llu\n");
+
+static void ccu_pll_debug_init(struct clk_hw *hw, struct dentry *dentry)
+{
+ struct ccu_pll *pll = to_ccu_pll(hw);
+ struct ccu_pll_dbgfs_bit *bits;
+ struct ccu_pll_dbgfs_fld *flds;
+ int idx;
+
+ bits = kcalloc(CCU_PLL_DBGFS_BIT_NUM, sizeof(*bits), GFP_KERNEL);
+ if (!bits)
+ return;
+
+ for (idx = 0; idx < CCU_PLL_DBGFS_BIT_NUM; ++idx) {
+ bits[idx] = ccu_pll_bits[idx];
+ bits[idx].pll = pll;
+
+ debugfs_create_file_unsafe(bits[idx].name, ccu_pll_dbgfs_mode,
+ dentry, &bits[idx],
+ &ccu_pll_dbgfs_bit_fops);
+ }
+
+ flds = kcalloc(CCU_PLL_DBGFS_FLD_NUM, sizeof(*flds), GFP_KERNEL);
+ if (!flds)
+ return;
+
+ for (idx = 0; idx < CCU_PLL_DBGFS_FLD_NUM; ++idx) {
+ flds[idx] = ccu_pll_flds[idx];
+ flds[idx].pll = pll;
+
+ debugfs_create_file_unsafe(flds[idx].name, ccu_pll_dbgfs_mode,
+ dentry, &flds[idx],
+ &ccu_pll_dbgfs_fld_fops);
+ }
+}
+
+#else /* !CONFIG_DEBUG_FS */
+
+#define ccu_pll_debug_init NULL
+
+#endif /* !CONFIG_DEBUG_FS */
+
+static const struct clk_ops ccu_pll_gate_to_set_ops = {
+ .enable = ccu_pll_enable,
+ .disable = ccu_pll_disable,
+ .is_enabled = ccu_pll_is_enabled,
+ .recalc_rate = ccu_pll_recalc_rate,
+ .round_rate = ccu_pll_round_rate,
+ .set_rate = ccu_pll_set_rate_norst,
+ .debug_init = ccu_pll_debug_init
+};
+
+static const struct clk_ops ccu_pll_straight_set_ops = {
+ .enable = ccu_pll_enable,
+ .disable = ccu_pll_disable,
+ .is_enabled = ccu_pll_is_enabled,
+ .recalc_rate = ccu_pll_recalc_rate,
+ .round_rate = ccu_pll_round_rate,
+ .set_rate = ccu_pll_set_rate_reset,
+ .debug_init = ccu_pll_debug_init
+};
+
+struct ccu_pll *ccu_pll_hw_register(const struct ccu_pll_init_data *pll_init)
+{
+ struct clk_parent_data parent_data = { };
+ struct clk_init_data hw_init = { };
+ struct ccu_pll *pll;
+ int ret;
+
+ if (!pll_init)
+ return ERR_PTR(-EINVAL);
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Note since Baikal-T1 System Controller registers are MMIO-backed
+ * we won't check the regmap IO operations return status, because it
+ * must be zero anyway.
+ */
+ pll->hw.init = &hw_init;
+ pll->reg_ctl = pll_init->base + CCU_PLL_CTL;
+ pll->reg_ctl1 = pll_init->base + CCU_PLL_CTL1;
+ pll->sys_regs = pll_init->sys_regs;
+ pll->id = pll_init->id;
+ spin_lock_init(&pll->lock);
+
+ hw_init.name = pll_init->name;
+ hw_init.flags = pll_init->flags;
+
+ if (hw_init.flags & CLK_SET_RATE_GATE)
+ hw_init.ops = &ccu_pll_gate_to_set_ops;
+ else
+ hw_init.ops = &ccu_pll_straight_set_ops;
+
+ if (!pll_init->parent_name) {
+ ret = -EINVAL;
+ goto err_free_pll;
+ }
+ parent_data.fw_name = pll_init->parent_name;
+ hw_init.parent_data = &parent_data;
+ hw_init.num_parents = 1;
+
+ ret = of_clk_hw_register(pll_init->np, &pll->hw);
+ if (ret)
+ goto err_free_pll;
+
+ return pll;
+
+err_free_pll:
+ kfree(pll);
+
+ return ERR_PTR(ret);
+}
+
+void ccu_pll_hw_unregister(struct ccu_pll *pll)
+{
+ clk_hw_unregister(&pll->hw);
+
+ kfree(pll);
+}
diff --git a/drivers/clk/baikal-t1/ccu-pll.h b/drivers/clk/baikal-t1/ccu-pll.h
new file mode 100644
index 000000000000..76cd9132a219
--- /dev/null
+++ b/drivers/clk/baikal-t1/ccu-pll.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU PLL interface driver
+ */
+#ifndef __CLK_BT1_CCU_PLL_H__
+#define __CLK_BT1_CCU_PLL_H__
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include <linux/regmap.h>
+#include <linux/bits.h>
+#include <linux/of.h>
+
+/*
+ * struct ccu_pll_init_data - CCU PLL initialization data
+ * @id: Clock private identifier.
+ * @name: Clocks name.
+ * @parent_name: Clocks parent name in a fw node.
+ * @base: PLL registers base address with respect to the sys_regs base.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @np: Pointer to the node describing the CCU PLLs.
+ * @flags: PLL clock flags.
+ */
+struct ccu_pll_init_data {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned int base;
+ struct regmap *sys_regs;
+ struct device_node *np;
+ unsigned long flags;
+};
+
+/*
+ * struct ccu_pll - CCU PLL descriptor
+ * @hw: clk_hw of the PLL.
+ * @id: Clock private identifier.
+ * @reg_ctl: PLL control register base.
+ * @reg_ctl1: PLL control1 register base.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @lock: PLL state change spin-lock.
+ */
+struct ccu_pll {
+ struct clk_hw hw;
+ unsigned int id;
+ unsigned int reg_ctl;
+ unsigned int reg_ctl1;
+ struct regmap *sys_regs;
+ spinlock_t lock;
+};
+#define to_ccu_pll(_hw) container_of(_hw, struct ccu_pll, hw)
+
+static inline struct clk_hw *ccu_pll_get_clk_hw(struct ccu_pll *pll)
+{
+ return pll ? &pll->hw : NULL;
+}
+
+struct ccu_pll *ccu_pll_hw_register(const struct ccu_pll_init_data *init);
+
+void ccu_pll_hw_unregister(struct ccu_pll *pll);
+
+#endif /* __CLK_BT1_CCU_PLL_H__ */
diff --git a/drivers/clk/baikal-t1/clk-ccu-div.c b/drivers/clk/baikal-t1/clk-ccu-div.c
new file mode 100644
index 000000000000..f141fda12b09
--- /dev/null
+++ b/drivers/clk/baikal-t1/clk-ccu-div.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * Baikal-T1 CCU Dividers clock driver
+ */
+
+#define pr_fmt(fmt) "bt1-ccu-div: " fmt
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/reset-controller.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/ioport.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/bt1-ccu.h>
+#include <dt-bindings/reset/bt1-ccu.h>
+
+#include "ccu-div.h"
+
+#define CCU_AXI_MAIN_BASE 0x030
+#define CCU_AXI_DDR_BASE 0x034
+#define CCU_AXI_SATA_BASE 0x038
+#define CCU_AXI_GMAC0_BASE 0x03C
+#define CCU_AXI_GMAC1_BASE 0x040
+#define CCU_AXI_XGMAC_BASE 0x044
+#define CCU_AXI_PCIE_M_BASE 0x048
+#define CCU_AXI_PCIE_S_BASE 0x04C
+#define CCU_AXI_USB_BASE 0x050
+#define CCU_AXI_HWA_BASE 0x054
+#define CCU_AXI_SRAM_BASE 0x058
+
+#define CCU_SYS_SATA_REF_BASE 0x060
+#define CCU_SYS_APB_BASE 0x064
+#define CCU_SYS_GMAC0_BASE 0x068
+#define CCU_SYS_GMAC1_BASE 0x06C
+#define CCU_SYS_XGMAC_BASE 0x070
+#define CCU_SYS_USB_BASE 0x074
+#define CCU_SYS_PVT_BASE 0x078
+#define CCU_SYS_HWA_BASE 0x07C
+#define CCU_SYS_UART_BASE 0x084
+#define CCU_SYS_TIMER0_BASE 0x088
+#define CCU_SYS_TIMER1_BASE 0x08C
+#define CCU_SYS_TIMER2_BASE 0x090
+#define CCU_SYS_WDT_BASE 0x150
+
+#define CCU_DIV_VAR_INFO(_id, _name, _pname, _base, _width, _flags, _features) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .base = _base, \
+ .type = CCU_DIV_VAR, \
+ .width = _width, \
+ .flags = _flags, \
+ .features = _features \
+ }
+
+#define CCU_DIV_GATE_INFO(_id, _name, _pname, _base, _divider) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .base = _base, \
+ .type = CCU_DIV_GATE, \
+ .divider = _divider \
+ }
+
+#define CCU_DIV_FIXED_INFO(_id, _name, _pname, _divider) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .type = CCU_DIV_FIXED, \
+ .divider = _divider \
+ }
+
+#define CCU_DIV_RST_MAP(_rst_id, _clk_id) \
+ { \
+ .rst_id = _rst_id, \
+ .clk_id = _clk_id \
+ }
+
+struct ccu_div_info {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned int base;
+ enum ccu_div_type type;
+ union {
+ unsigned int width;
+ unsigned int divider;
+ };
+ unsigned long flags;
+ unsigned long features;
+};
+
+struct ccu_div_rst_map {
+ unsigned int rst_id;
+ unsigned int clk_id;
+};
+
+struct ccu_div_data {
+ struct device_node *np;
+ struct regmap *sys_regs;
+
+ unsigned int divs_num;
+ const struct ccu_div_info *divs_info;
+ struct ccu_div **divs;
+
+ unsigned int rst_num;
+ const struct ccu_div_rst_map *rst_map;
+ struct reset_controller_dev rcdev;
+};
+#define to_ccu_div_data(_rcdev) container_of(_rcdev, struct ccu_div_data, rcdev)
+
+/*
+ * AXI Main Interconnect (axi_main_clk) and DDR AXI-bus (axi_ddr_clk) clocks
+ * must be left enabled in any case, since former one is responsible for
+ * clocking a bus between CPU cores and the rest of the SoC components, while
+ * the later is clocking the AXI-bus between DDR controller and the Main
+ * Interconnect. So should any of these clocks get to be disabled, the system
+ * will literally stop working. That's why we marked them as critical.
+ */
+static const struct ccu_div_info axi_info[] = {
+ CCU_DIV_VAR_INFO(CCU_AXI_MAIN_CLK, "axi_main_clk", "pcie_clk",
+ CCU_AXI_MAIN_BASE, 4,
+ CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_DDR_CLK, "axi_ddr_clk", "sata_clk",
+ CCU_AXI_DDR_BASE, 4,
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_SATA_CLK, "axi_sata_clk", "sata_clk",
+ CCU_AXI_SATA_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_GMAC0_CLK, "axi_gmac0_clk", "eth_clk",
+ CCU_AXI_GMAC0_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_GMAC1_CLK, "axi_gmac1_clk", "eth_clk",
+ CCU_AXI_GMAC1_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_XGMAC_CLK, "axi_xgmac_clk", "eth_clk",
+ CCU_AXI_XGMAC_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_PCIE_M_CLK, "axi_pcie_m_clk", "pcie_clk",
+ CCU_AXI_PCIE_M_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_PCIE_S_CLK, "axi_pcie_s_clk", "pcie_clk",
+ CCU_AXI_PCIE_S_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_USB_CLK, "axi_usb_clk", "sata_clk",
+ CCU_AXI_USB_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_HWA_CLK, "axi_hwa_clk", "sata_clk",
+ CCU_AXI_HWA_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_AXI_SRAM_CLK, "axi_sram_clk", "eth_clk",
+ CCU_AXI_SRAM_BASE, 4,
+ CLK_SET_RATE_GATE, CCU_DIV_RESET_DOMAIN)
+};
+
+static const struct ccu_div_rst_map axi_rst_map[] = {
+ CCU_DIV_RST_MAP(CCU_AXI_MAIN_RST, CCU_AXI_MAIN_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_DDR_RST, CCU_AXI_DDR_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_SATA_RST, CCU_AXI_SATA_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_GMAC0_RST, CCU_AXI_GMAC0_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_GMAC1_RST, CCU_AXI_GMAC1_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_XGMAC_RST, CCU_AXI_XGMAC_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_PCIE_M_RST, CCU_AXI_PCIE_M_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_PCIE_S_RST, CCU_AXI_PCIE_S_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_USB_RST, CCU_AXI_USB_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_HWA_RST, CCU_AXI_HWA_CLK),
+ CCU_DIV_RST_MAP(CCU_AXI_SRAM_RST, CCU_AXI_SRAM_CLK)
+};
+
+/*
+ * APB-bus clock is marked as critical since it's a main communication bus
+ * for the SoC devices registers IO-operations.
+ */
+static const struct ccu_div_info sys_info[] = {
+ CCU_DIV_VAR_INFO(CCU_SYS_SATA_REF_CLK, "sys_sata_ref_clk",
+ "sata_clk", CCU_SYS_SATA_REF_BASE, 4,
+ CLK_SET_RATE_GATE,
+ CCU_DIV_SKIP_ONE | CCU_DIV_LOCK_SHIFTED |
+ CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_VAR_INFO(CCU_SYS_APB_CLK, "sys_apb_clk",
+ "pcie_clk", CCU_SYS_APB_BASE, 5,
+ CLK_IS_CRITICAL, CCU_DIV_RESET_DOMAIN),
+ CCU_DIV_GATE_INFO(CCU_SYS_GMAC0_TX_CLK, "sys_gmac0_tx_clk",
+ "eth_clk", CCU_SYS_GMAC0_BASE, 5),
+ CCU_DIV_FIXED_INFO(CCU_SYS_GMAC0_PTP_CLK, "sys_gmac0_ptp_clk",
+ "eth_clk", 10),
+ CCU_DIV_GATE_INFO(CCU_SYS_GMAC1_TX_CLK, "sys_gmac1_tx_clk",
+ "eth_clk", CCU_SYS_GMAC1_BASE, 5),
+ CCU_DIV_FIXED_INFO(CCU_SYS_GMAC1_PTP_CLK, "sys_gmac1_ptp_clk",
+ "eth_clk", 10),
+ CCU_DIV_GATE_INFO(CCU_SYS_XGMAC_REF_CLK, "sys_xgmac_ref_clk",
+ "eth_clk", CCU_SYS_XGMAC_BASE, 8),
+ CCU_DIV_FIXED_INFO(CCU_SYS_XGMAC_PTP_CLK, "sys_xgmac_ptp_clk",
+ "eth_clk", 10),
+ CCU_DIV_GATE_INFO(CCU_SYS_USB_CLK, "sys_usb_clk",
+ "eth_clk", CCU_SYS_USB_BASE, 10),
+ CCU_DIV_VAR_INFO(CCU_SYS_PVT_CLK, "sys_pvt_clk",
+ "ref_clk", CCU_SYS_PVT_BASE, 5,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_VAR_INFO(CCU_SYS_HWA_CLK, "sys_hwa_clk",
+ "sata_clk", CCU_SYS_HWA_BASE, 4,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_VAR_INFO(CCU_SYS_UART_CLK, "sys_uart_clk",
+ "eth_clk", CCU_SYS_UART_BASE, 17,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_FIXED_INFO(CCU_SYS_I2C1_CLK, "sys_i2c1_clk",
+ "eth_clk", 10),
+ CCU_DIV_FIXED_INFO(CCU_SYS_I2C2_CLK, "sys_i2c2_clk",
+ "eth_clk", 10),
+ CCU_DIV_FIXED_INFO(CCU_SYS_GPIO_CLK, "sys_gpio_clk",
+ "ref_clk", 25),
+ CCU_DIV_VAR_INFO(CCU_SYS_TIMER0_CLK, "sys_timer0_clk",
+ "ref_clk", CCU_SYS_TIMER0_BASE, 17,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_VAR_INFO(CCU_SYS_TIMER1_CLK, "sys_timer1_clk",
+ "ref_clk", CCU_SYS_TIMER1_BASE, 17,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_VAR_INFO(CCU_SYS_TIMER2_CLK, "sys_timer2_clk",
+ "ref_clk", CCU_SYS_TIMER2_BASE, 17,
+ CLK_SET_RATE_GATE, 0),
+ CCU_DIV_VAR_INFO(CCU_SYS_WDT_CLK, "sys_wdt_clk",
+ "eth_clk", CCU_SYS_WDT_BASE, 17,
+ CLK_SET_RATE_GATE, CCU_DIV_SKIP_ONE_TO_THREE)
+};
+
+static const struct ccu_div_rst_map sys_rst_map[] = {
+ CCU_DIV_RST_MAP(CCU_SYS_SATA_REF_RST, CCU_SYS_SATA_REF_CLK),
+ CCU_DIV_RST_MAP(CCU_SYS_APB_RST, CCU_SYS_APB_CLK),
+};
+
+static struct ccu_div *ccu_div_find_desc(struct ccu_div_data *data,
+ unsigned int clk_id)
+{
+ struct ccu_div *div;
+ int idx;
+
+ for (idx = 0; idx < data->divs_num; ++idx) {
+ div = data->divs[idx];
+ if (div && div->id == clk_id)
+ return div;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int ccu_div_reset(struct reset_controller_dev *rcdev,
+ unsigned long rst_id)
+{
+ struct ccu_div_data *data = to_ccu_div_data(rcdev);
+ const struct ccu_div_rst_map *map;
+ struct ccu_div *div;
+ int idx, ret;
+
+ for (idx = 0, map = data->rst_map; idx < data->rst_num; ++idx, ++map) {
+ if (map->rst_id == rst_id)
+ break;
+ }
+ if (idx == data->rst_num) {
+ pr_err("Invalid reset ID %lu specified\n", rst_id);
+ return -EINVAL;
+ }
+
+ div = ccu_div_find_desc(data, map->clk_id);
+ if (IS_ERR(div)) {
+ pr_err("Invalid clock ID %d in mapping\n", map->clk_id);
+ return PTR_ERR(div);
+ }
+
+ ret = ccu_div_reset_domain(div);
+ if (ret) {
+ pr_err("Reset isn't supported by divider %s\n",
+ clk_hw_get_name(ccu_div_get_clk_hw(div)));
+ }
+
+ return ret;
+}
+
+static const struct reset_control_ops ccu_div_rst_ops = {
+ .reset = ccu_div_reset,
+};
+
+static struct ccu_div_data *ccu_div_create_data(struct device_node *np)
+{
+ struct ccu_div_data *data;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ data->np = np;
+ if (of_device_is_compatible(np, "baikal,bt1-ccu-axi")) {
+ data->divs_num = ARRAY_SIZE(axi_info);
+ data->divs_info = axi_info;
+ data->rst_num = ARRAY_SIZE(axi_rst_map);
+ data->rst_map = axi_rst_map;
+ } else if (of_device_is_compatible(np, "baikal,bt1-ccu-sys")) {
+ data->divs_num = ARRAY_SIZE(sys_info);
+ data->divs_info = sys_info;
+ data->rst_num = ARRAY_SIZE(sys_rst_map);
+ data->rst_map = sys_rst_map;
+ } else {
+ pr_err("Incompatible DT node '%s' specified\n",
+ of_node_full_name(np));
+ ret = -EINVAL;
+ goto err_kfree_data;
+ }
+
+ data->divs = kcalloc(data->divs_num, sizeof(*data->divs), GFP_KERNEL);
+ if (!data->divs) {
+ ret = -ENOMEM;
+ goto err_kfree_data;
+ }
+
+ return data;
+
+err_kfree_data:
+ kfree(data);
+
+ return ERR_PTR(ret);
+}
+
+static void ccu_div_free_data(struct ccu_div_data *data)
+{
+ kfree(data->divs);
+
+ kfree(data);
+}
+
+static int ccu_div_find_sys_regs(struct ccu_div_data *data)
+{
+ data->sys_regs = syscon_node_to_regmap(data->np->parent);
+ if (IS_ERR(data->sys_regs)) {
+ pr_err("Failed to find syscon regs for '%s'\n",
+ of_node_full_name(data->np));
+ return PTR_ERR(data->sys_regs);
+ }
+
+ return 0;
+}
+
+static struct clk_hw *ccu_div_of_clk_hw_get(struct of_phandle_args *clkspec,
+ void *priv)
+{
+ struct ccu_div_data *data = priv;
+ struct ccu_div *div;
+ unsigned int clk_id;
+
+ clk_id = clkspec->args[0];
+ div = ccu_div_find_desc(data, clk_id);
+ if (IS_ERR(div)) {
+ pr_info("Invalid clock ID %d specified\n", clk_id);
+ return ERR_CAST(div);
+ }
+
+ return ccu_div_get_clk_hw(div);
+}
+
+static int ccu_div_clk_register(struct ccu_div_data *data)
+{
+ int idx, ret;
+
+ for (idx = 0; idx < data->divs_num; ++idx) {
+ const struct ccu_div_info *info = &data->divs_info[idx];
+ struct ccu_div_init_data init = {0};
+
+ init.id = info->id;
+ init.name = info->name;
+ init.parent_name = info->parent_name;
+ init.np = data->np;
+ init.type = info->type;
+ init.flags = info->flags;
+ init.features = info->features;
+
+ if (init.type == CCU_DIV_VAR) {
+ init.base = info->base;
+ init.sys_regs = data->sys_regs;
+ init.width = info->width;
+ } else if (init.type == CCU_DIV_GATE) {
+ init.base = info->base;
+ init.sys_regs = data->sys_regs;
+ init.divider = info->divider;
+ } else {
+ init.divider = info->divider;
+ }
+
+ data->divs[idx] = ccu_div_hw_register(&init);
+ if (IS_ERR(data->divs[idx])) {
+ ret = PTR_ERR(data->divs[idx]);
+ pr_err("Couldn't register divider '%s' hw\n",
+ init.name);
+ goto err_hw_unregister;
+ }
+ }
+
+ ret = of_clk_add_hw_provider(data->np, ccu_div_of_clk_hw_get, data);
+ if (ret) {
+ pr_err("Couldn't register dividers '%s' clock provider\n",
+ of_node_full_name(data->np));
+ goto err_hw_unregister;
+ }
+
+ return 0;
+
+err_hw_unregister:
+ for (--idx; idx >= 0; --idx)
+ ccu_div_hw_unregister(data->divs[idx]);
+
+ return ret;
+}
+
+static void ccu_div_clk_unregister(struct ccu_div_data *data)
+{
+ int idx;
+
+ of_clk_del_provider(data->np);
+
+ for (idx = 0; idx < data->divs_num; ++idx)
+ ccu_div_hw_unregister(data->divs[idx]);
+}
+
+static int ccu_div_rst_register(struct ccu_div_data *data)
+{
+ int ret;
+
+ data->rcdev.ops = &ccu_div_rst_ops;
+ data->rcdev.of_node = data->np;
+ data->rcdev.nr_resets = data->rst_num;
+
+ ret = reset_controller_register(&data->rcdev);
+ if (ret)
+ pr_err("Couldn't register divider '%s' reset controller\n",
+ of_node_full_name(data->np));
+
+ return ret;
+}
+
+static void ccu_div_init(struct device_node *np)
+{
+ struct ccu_div_data *data;
+ int ret;
+
+ data = ccu_div_create_data(np);
+ if (IS_ERR(data))
+ return;
+
+ ret = ccu_div_find_sys_regs(data);
+ if (ret)
+ goto err_free_data;
+
+ ret = ccu_div_clk_register(data);
+ if (ret)
+ goto err_free_data;
+
+ ret = ccu_div_rst_register(data);
+ if (ret)
+ goto err_clk_unregister;
+
+ return;
+
+err_clk_unregister:
+ ccu_div_clk_unregister(data);
+
+err_free_data:
+ ccu_div_free_data(data);
+}
+
+CLK_OF_DECLARE(ccu_axi, "baikal,bt1-ccu-axi", ccu_div_init);
+CLK_OF_DECLARE(ccu_sys, "baikal,bt1-ccu-sys", ccu_div_init);
diff --git a/drivers/clk/baikal-t1/clk-ccu-pll.c b/drivers/clk/baikal-t1/clk-ccu-pll.c
new file mode 100644
index 000000000000..1eec8c0b8f50
--- /dev/null
+++ b/drivers/clk/baikal-t1/clk-ccu-pll.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
+ *
+ * Baikal-T1 CCU PLL clocks driver
+ */
+
+#define pr_fmt(fmt) "bt1-ccu-pll: " fmt
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/bt1-ccu.h>
+
+#include "ccu-pll.h"
+
+#define CCU_CPU_PLL_BASE 0x000
+#define CCU_SATA_PLL_BASE 0x008
+#define CCU_DDR_PLL_BASE 0x010
+#define CCU_PCIE_PLL_BASE 0x018
+#define CCU_ETH_PLL_BASE 0x020
+
+#define CCU_PLL_INFO(_id, _name, _pname, _base, _flags) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .base = _base, \
+ .flags = _flags \
+ }
+
+#define CCU_PLL_NUM ARRAY_SIZE(pll_info)
+
+struct ccu_pll_info {
+ unsigned int id;
+ const char *name;
+ const char *parent_name;
+ unsigned int base;
+ unsigned long flags;
+};
+
+/*
+ * Mark as critical all PLLs except Ethernet one. CPU and DDR PLLs are sources
+ * of CPU cores and DDR controller reference clocks, due to which they
+ * obviously shouldn't be ever gated. SATA and PCIe PLLs are the parents of
+ * APB-bus and DDR controller AXI-bus clocks. If they are gated the system will
+ * be unusable.
+ */
+static const struct ccu_pll_info pll_info[] = {
+ CCU_PLL_INFO(CCU_CPU_PLL, "cpu_pll", "ref_clk", CCU_CPU_PLL_BASE,
+ CLK_IS_CRITICAL),
+ CCU_PLL_INFO(CCU_SATA_PLL, "sata_pll", "ref_clk", CCU_SATA_PLL_BASE,
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE),
+ CCU_PLL_INFO(CCU_DDR_PLL, "ddr_pll", "ref_clk", CCU_DDR_PLL_BASE,
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE),
+ CCU_PLL_INFO(CCU_PCIE_PLL, "pcie_pll", "ref_clk", CCU_PCIE_PLL_BASE,
+ CLK_IS_CRITICAL),
+ CCU_PLL_INFO(CCU_ETH_PLL, "eth_pll", "ref_clk", CCU_ETH_PLL_BASE,
+ CLK_SET_RATE_GATE)
+};
+
+struct ccu_pll_data {
+ struct device_node *np;
+ struct regmap *sys_regs;
+ struct ccu_pll *plls[CCU_PLL_NUM];
+};
+
+static struct ccu_pll *ccu_pll_find_desc(struct ccu_pll_data *data,
+ unsigned int clk_id)
+{
+ struct ccu_pll *pll;
+ int idx;
+
+ for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
+ pll = data->plls[idx];
+ if (pll && pll->id == clk_id)
+ return pll;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct ccu_pll_data *ccu_pll_create_data(struct device_node *np)
+{
+ struct ccu_pll_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ data->np = np;
+
+ return data;
+}
+
+static void ccu_pll_free_data(struct ccu_pll_data *data)
+{
+ kfree(data);
+}
+
+static int ccu_pll_find_sys_regs(struct ccu_pll_data *data)
+{
+ data->sys_regs = syscon_node_to_regmap(data->np->parent);
+ if (IS_ERR(data->sys_regs)) {
+ pr_err("Failed to find syscon regs for '%s'\n",
+ of_node_full_name(data->np));
+ return PTR_ERR(data->sys_regs);
+ }
+
+ return 0;
+}
+
+static struct clk_hw *ccu_pll_of_clk_hw_get(struct of_phandle_args *clkspec,
+ void *priv)
+{
+ struct ccu_pll_data *data = priv;
+ struct ccu_pll *pll;
+ unsigned int clk_id;
+
+ clk_id = clkspec->args[0];
+ pll = ccu_pll_find_desc(data, clk_id);
+ if (IS_ERR(pll)) {
+ pr_info("Invalid PLL clock ID %d specified\n", clk_id);
+ return ERR_CAST(pll);
+ }
+
+ return ccu_pll_get_clk_hw(pll);
+}
+
+static int ccu_pll_clk_register(struct ccu_pll_data *data)
+{
+ int idx, ret;
+
+ for (idx = 0; idx < CCU_PLL_NUM; ++idx) {
+ const struct ccu_pll_info *info = &pll_info[idx];
+ struct ccu_pll_init_data init = {0};
+
+ init.id = info->id;
+ init.name = info->name;
+ init.parent_name = info->parent_name;
+ init.base = info->base;
+ init.sys_regs = data->sys_regs;
+ init.np = data->np;
+ init.flags = info->flags;
+
+ data->plls[idx] = ccu_pll_hw_register(&init);
+ if (IS_ERR(data->plls[idx])) {
+ ret = PTR_ERR(data->plls[idx]);
+ pr_err("Couldn't register PLL hw '%s'\n",
+ init.name);
+ goto err_hw_unregister;
+ }
+ }
+
+ ret = of_clk_add_hw_provider(data->np, ccu_pll_of_clk_hw_get, data);
+ if (ret) {
+ pr_err("Couldn't register PLL provider of '%s'\n",
+ of_node_full_name(data->np));
+ goto err_hw_unregister;
+ }
+
+ return 0;
+
+err_hw_unregister:
+ for (--idx; idx >= 0; --idx)
+ ccu_pll_hw_unregister(data->plls[idx]);
+
+ return ret;
+}
+
+static __init void ccu_pll_init(struct device_node *np)
+{
+ struct ccu_pll_data *data;
+ int ret;
+
+ data = ccu_pll_create_data(np);
+ if (IS_ERR(data))
+ return;
+
+ ret = ccu_pll_find_sys_regs(data);
+ if (ret)
+ goto err_free_data;
+
+ ret = ccu_pll_clk_register(data);
+ if (ret)
+ goto err_free_data;
+
+ return;
+
+err_free_data:
+ ccu_pll_free_data(data);
+}
+CLK_OF_DECLARE(ccu_pll, "baikal,bt1-ccu-pll", ccu_pll_init);
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index ded13ccf768e..6bb7efa12037 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -396,8 +396,8 @@ out:
}
static void bcm2835_debugfs_regset(struct bcm2835_cprman *cprman, u32 base,
- struct debugfs_reg32 *regs, size_t nregs,
- struct dentry *dentry)
+ const struct debugfs_reg32 *regs,
+ size_t nregs, struct dentry *dentry)
{
struct debugfs_regset32 *regset;
@@ -1240,7 +1240,7 @@ static u8 bcm2835_clock_get_parent(struct clk_hw *hw)
return (src & CM_SRC_MASK) >> CM_SRC_SHIFT;
}
-static struct debugfs_reg32 bcm2835_debugfs_clock_reg32[] = {
+static const struct debugfs_reg32 bcm2835_debugfs_clock_reg32[] = {
{
.name = "ctl",
.offset = 0,
@@ -1296,8 +1296,9 @@ static const struct clk_ops bcm2835_vpu_clock_clk_ops = {
};
static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
- const struct bcm2835_pll_data *data)
+ const void *data)
{
+ const struct bcm2835_pll_data *pll_data = data;
struct bcm2835_pll *pll;
struct clk_init_data init;
int ret;
@@ -1307,7 +1308,7 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
/* All of the PLLs derive from the external oscillator. */
init.parent_names = &cprman->real_parent_names[0];
init.num_parents = 1;
- init.name = data->name;
+ init.name = pll_data->name;
init.ops = &bcm2835_pll_clk_ops;
init.flags = CLK_IGNORE_UNUSED;
@@ -1316,7 +1317,7 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
return NULL;
pll->cprman = cprman;
- pll->data = data;
+ pll->data = pll_data;
pll->hw.init = &init;
ret = devm_clk_hw_register(cprman->dev, &pll->hw);
@@ -1327,35 +1328,36 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman,
static struct clk_hw *
bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
- const struct bcm2835_pll_divider_data *data)
+ const void *data)
{
+ const struct bcm2835_pll_divider_data *divider_data = data;
struct bcm2835_pll_divider *divider;
struct clk_init_data init;
const char *divider_name;
int ret;
- if (data->fixed_divider != 1) {
+ if (divider_data->fixed_divider != 1) {
divider_name = devm_kasprintf(cprman->dev, GFP_KERNEL,
- "%s_prediv", data->name);
+ "%s_prediv", divider_data->name);
if (!divider_name)
return NULL;
} else {
- divider_name = data->name;
+ divider_name = divider_data->name;
}
memset(&init, 0, sizeof(init));
- init.parent_names = &data->source_pll;
+ init.parent_names = &divider_data->source_pll;
init.num_parents = 1;
init.name = divider_name;
init.ops = &bcm2835_pll_divider_clk_ops;
- init.flags = data->flags | CLK_IGNORE_UNUSED;
+ init.flags = divider_data->flags | CLK_IGNORE_UNUSED;
divider = devm_kzalloc(cprman->dev, sizeof(*divider), GFP_KERNEL);
if (!divider)
return NULL;
- divider->div.reg = cprman->regs + data->a2w_reg;
+ divider->div.reg = cprman->regs + divider_data->a2w_reg;
divider->div.shift = A2W_PLL_DIV_SHIFT;
divider->div.width = A2W_PLL_DIV_BITS;
divider->div.flags = CLK_DIVIDER_MAX_AT_ZERO;
@@ -1364,7 +1366,7 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
divider->div.table = NULL;
divider->cprman = cprman;
- divider->data = data;
+ divider->data = divider_data;
ret = devm_clk_hw_register(cprman->dev, &divider->div.hw);
if (ret)
@@ -1374,20 +1376,22 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
* PLLH's channels have a fixed divide by 10 afterwards, which
* is what our consumers are actually using.
*/
- if (data->fixed_divider != 1) {
- return clk_hw_register_fixed_factor(cprman->dev, data->name,
+ if (divider_data->fixed_divider != 1) {
+ return clk_hw_register_fixed_factor(cprman->dev,
+ divider_data->name,
divider_name,
CLK_SET_RATE_PARENT,
1,
- data->fixed_divider);
+ divider_data->fixed_divider);
}
return &divider->div.hw;
}
static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
- const struct bcm2835_clock_data *data)
+ const void *data)
{
+ const struct bcm2835_clock_data *clock_data = data;
struct bcm2835_clock *clock;
struct clk_init_data init;
const char *parents[1 << CM_SRC_BITS];
@@ -1398,8 +1402,8 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
* Replace our strings referencing parent clocks with the
* actual clock-output-name of the parent.
*/
- for (i = 0; i < data->num_mux_parents; i++) {
- parents[i] = data->parents[i];
+ for (i = 0; i < clock_data->num_mux_parents; i++) {
+ parents[i] = clock_data->parents[i];
ret = match_string(cprman_parent_names,
ARRAY_SIZE(cprman_parent_names),
@@ -1410,18 +1414,18 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
memset(&init, 0, sizeof(init));
init.parent_names = parents;
- init.num_parents = data->num_mux_parents;
- init.name = data->name;
- init.flags = data->flags | CLK_IGNORE_UNUSED;
+ init.num_parents = clock_data->num_mux_parents;
+ init.name = clock_data->name;
+ init.flags = clock_data->flags | CLK_IGNORE_UNUSED;
/*
* Pass the CLK_SET_RATE_PARENT flag if we are allowed to propagate
* rate changes on at least of the parents.
*/
- if (data->set_rate_parent)
+ if (clock_data->set_rate_parent)
init.flags |= CLK_SET_RATE_PARENT;
- if (data->is_vpu_clock) {
+ if (clock_data->is_vpu_clock) {
init.ops = &bcm2835_vpu_clock_clk_ops;
} else {
init.ops = &bcm2835_clock_clk_ops;
@@ -1430,7 +1434,7 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
/* If the clock wasn't actually enabled at boot, it's not
* critical.
*/
- if (!(cprman_read(cprman, data->ctl_reg) & CM_ENABLE))
+ if (!(cprman_read(cprman, clock_data->ctl_reg) & CM_ENABLE))
init.flags &= ~CLK_IS_CRITICAL;
}
@@ -1439,7 +1443,7 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
return NULL;
clock->cprman = cprman;
- clock->data = data;
+ clock->data = clock_data;
clock->hw.init = &init;
ret = devm_clk_hw_register(cprman->dev, &clock->hw);
@@ -1448,25 +1452,27 @@ static struct clk_hw *bcm2835_register_clock(struct bcm2835_cprman *cprman,
return &clock->hw;
}
-static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman,
- const struct bcm2835_gate_data *data)
+static struct clk_hw *bcm2835_register_gate(struct bcm2835_cprman *cprman,
+ const void *data)
{
- return clk_register_gate(cprman->dev, data->name, data->parent,
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- cprman->regs + data->ctl_reg,
- CM_GATE_BIT, 0, &cprman->regs_lock);
+ const struct bcm2835_gate_data *gate_data = data;
+
+ return clk_hw_register_gate(cprman->dev, gate_data->name,
+ gate_data->parent,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ cprman->regs + gate_data->ctl_reg,
+ CM_GATE_BIT, 0, &cprman->regs_lock);
}
-typedef struct clk_hw *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman,
- const void *data);
struct bcm2835_clk_desc {
- bcm2835_clk_register clk_register;
+ struct clk_hw *(*clk_register)(struct bcm2835_cprman *cprman,
+ const void *data);
unsigned int supported;
const void *data;
};
/* assignment helper macros for different clock types */
-#define _REGISTER(f, s, ...) { .clk_register = (bcm2835_clk_register)f, \
+#define _REGISTER(f, s, ...) { .clk_register = f, \
.supported = s, \
.data = __VA_ARGS__ }
#define REGISTER_PLL(s, ...) _REGISTER(&bcm2835_register_pll, \
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 392d01705b97..99afc949925f 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -642,14 +642,22 @@ static const u32 ast2600_a0_axi_ahb_div_table[] = {
2, 2, 3, 5,
};
-static const u32 ast2600_a1_axi_ahb_div_table[] = {
- 4, 6, 2, 4,
+static const u32 ast2600_a1_axi_ahb_div0_tbl[] = {
+ 3, 2, 3, 4,
+};
+
+static const u32 ast2600_a1_axi_ahb_div1_tbl[] = {
+ 3, 4, 6, 8,
+};
+
+static const u32 ast2600_a1_axi_ahb200_tbl[] = {
+ 3, 4, 3, 4, 2, 2, 2, 2,
};
static void __init aspeed_g6_cc(struct regmap *map)
{
struct clk_hw *hw;
- u32 val, div, chip_id, axi_div, ahb_div;
+ u32 val, div, divbits, chip_id, axi_div, ahb_div;
clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000);
@@ -679,11 +687,22 @@ static void __init aspeed_g6_cc(struct regmap *map)
else
axi_div = 2;
+ divbits = (val >> 11) & 0x3;
regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id);
- if (chip_id & BIT(16))
- ahb_div = ast2600_a1_axi_ahb_div_table[(val >> 11) & 0x3];
- else
+ if (chip_id & BIT(16)) {
+ if (!divbits) {
+ ahb_div = ast2600_a1_axi_ahb200_tbl[(val >> 8) & 0x3];
+ if (val & BIT(16))
+ ahb_div *= 2;
+ } else {
+ if (val & BIT(16))
+ ahb_div = ast2600_a1_axi_ahb_div1_tbl[divbits];
+ else
+ ahb_div = ast2600_a1_axi_ahb_div0_tbl[divbits];
+ }
+ } else {
ahb_div = ast2600_a0_axi_ahb_div_table[(val >> 11) & 0x3];
+ }
hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, axi_div * ahb_div);
aspeed_g6_clk_data->hws[ASPEED_CLK_AHB] = hw;
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index 97d1e8c35b71..b4f8852201cb 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -53,35 +53,38 @@ struct hsdk_pll_cfg {
u32 fbdiv;
u32 odiv;
u32 band;
+ u32 bypass;
};
static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
- { 100000000, 0, 11, 3, 0 },
- { 133000000, 0, 15, 3, 0 },
- { 200000000, 1, 47, 3, 0 },
- { 233000000, 1, 27, 2, 0 },
- { 300000000, 1, 35, 2, 0 },
- { 333000000, 1, 39, 2, 0 },
- { 400000000, 1, 47, 2, 0 },
- { 500000000, 0, 14, 1, 0 },
- { 600000000, 0, 17, 1, 0 },
- { 700000000, 0, 20, 1, 0 },
- { 800000000, 0, 23, 1, 0 },
- { 900000000, 1, 26, 0, 0 },
- { 1000000000, 1, 29, 0, 0 },
- { 1100000000, 1, 32, 0, 0 },
- { 1200000000, 1, 35, 0, 0 },
- { 1300000000, 1, 38, 0, 0 },
- { 1400000000, 1, 41, 0, 0 },
- { 1500000000, 1, 44, 0, 0 },
- { 1600000000, 1, 47, 0, 0 },
+ { 100000000, 0, 11, 3, 0, 0 },
+ { 133000000, 0, 15, 3, 0, 0 },
+ { 200000000, 1, 47, 3, 0, 0 },
+ { 233000000, 1, 27, 2, 0, 0 },
+ { 300000000, 1, 35, 2, 0, 0 },
+ { 333000000, 1, 39, 2, 0, 0 },
+ { 400000000, 1, 47, 2, 0, 0 },
+ { 500000000, 0, 14, 1, 0, 0 },
+ { 600000000, 0, 17, 1, 0, 0 },
+ { 700000000, 0, 20, 1, 0, 0 },
+ { 800000000, 0, 23, 1, 0, 0 },
+ { 900000000, 1, 26, 0, 0, 0 },
+ { 1000000000, 1, 29, 0, 0, 0 },
+ { 1100000000, 1, 32, 0, 0, 0 },
+ { 1200000000, 1, 35, 0, 0, 0 },
+ { 1300000000, 1, 38, 0, 0, 0 },
+ { 1400000000, 1, 41, 0, 0, 0 },
+ { 1500000000, 1, 44, 0, 0, 0 },
+ { 1600000000, 1, 47, 0, 0, 0 },
{}
};
static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
- { 297000000, 0, 21, 2, 0 },
- { 540000000, 0, 19, 1, 0 },
- { 594000000, 0, 21, 1, 0 },
+ { 27000000, 0, 0, 0, 0, 1 },
+ { 148500000, 0, 21, 3, 0, 0 },
+ { 297000000, 0, 21, 2, 0, 0 },
+ { 540000000, 0, 19, 1, 0, 0 },
+ { 594000000, 0, 21, 1, 0, 0 },
{}
};
@@ -134,11 +137,16 @@ static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
{
u32 val = 0;
- /* Powerdown and Bypass bits should be cleared */
- val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
- val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
- val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
- val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
+ if (cfg->bypass) {
+ val = hsdk_pll_read(clk, CGU_PLL_CTRL);
+ val |= CGU_PLL_CTRL_BYPASS;
+ } else {
+ /* Powerdown and Bypass bits should be cleared */
+ val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
+ val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
+ val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
+ val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
+ }
dev_dbg(clk->dev, "write configuration: %#x\n", val);
@@ -172,14 +180,14 @@ static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
dev_dbg(clk->dev, "current configuration: %#x\n", val);
- /* Check if PLL is disabled */
- if (val & CGU_PLL_CTRL_PD)
- return 0;
-
/* Check if PLL is bypassed */
if (val & CGU_PLL_CTRL_BYPASS)
return parent_rate;
+ /* Check if PLL is disabled */
+ if (val & CGU_PLL_CTRL_PD)
+ return 0;
+
/* input divider = reg.idiv + 1 */
idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
/* fb divider = 2*(reg.fbdiv + 1) */
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 3c228b018116..3d7acab9d280 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -1,8 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Driver for Silicon Labs Si5341/Si5340 Clock generator
+ * Driver for Silicon Labs Si5340, Si5341, Si5342, Si5344 and Si5345
* Copyright (C) 2019 Topic Embedded Products
* Author: Mike Looijmans <mike.looijmans@topic.nl>
+ *
+ * The Si5341 has 10 outputs and 5 synthesizers.
+ * The Si5340 is a smaller version of the Si5341 with only 4 outputs.
+ * The Si5345 is similar to the Si5341, with the addition of fractional input
+ * dividers and automatic input selection.
+ * The Si5342 and Si5344 are smaller versions of the Si5345.
*/
#include <linux/clk.h>
@@ -18,11 +24,17 @@
#define SI5341_NUM_INPUTS 4
-#define SI5341_MAX_NUM_OUTPUTS 10
#define SI5340_MAX_NUM_OUTPUTS 4
+#define SI5341_MAX_NUM_OUTPUTS 10
+#define SI5342_MAX_NUM_OUTPUTS 2
+#define SI5344_MAX_NUM_OUTPUTS 4
+#define SI5345_MAX_NUM_OUTPUTS 10
-#define SI5341_NUM_SYNTH 5
#define SI5340_NUM_SYNTH 4
+#define SI5341_NUM_SYNTH 5
+#define SI5342_NUM_SYNTH 2
+#define SI5344_NUM_SYNTH 4
+#define SI5345_NUM_SYNTH 5
/* Range of the synthesizer fractional divider */
#define SI5341_SYNTH_N_MIN 10
@@ -65,6 +77,7 @@ struct clk_si5341 {
u64 freq_vco; /* 13500–14256 MHz */
u8 num_outputs;
u8 num_synth;
+ u16 chip_id;
};
#define to_clk_si5341(_hw) container_of(_hw, struct clk_si5341, hw)
@@ -142,6 +155,7 @@ static const char * const si5341_input_clock_names[] = {
};
/* Output configuration registers 0..9 are not quite logically organized */
+/* Also for si5345 */
static const u16 si5341_reg_output_offset[] = {
0x0108,
0x010D,
@@ -155,6 +169,7 @@ static const u16 si5341_reg_output_offset[] = {
0x013A,
};
+/* for si5340, si5342 and si5344 */
static const u16 si5340_reg_output_offset[] = {
0x0112,
0x0117,
@@ -974,12 +989,32 @@ static int si5341_probe_chip_id(struct clk_si5341 *data)
data->reg_output_offset = si5341_reg_output_offset;
data->reg_rdiv_offset = si5341_reg_rdiv_offset;
break;
+ case 0x5342:
+ data->num_outputs = SI5342_MAX_NUM_OUTPUTS;
+ data->num_synth = SI5342_NUM_SYNTH;
+ data->reg_output_offset = si5340_reg_output_offset;
+ data->reg_rdiv_offset = si5340_reg_rdiv_offset;
+ break;
+ case 0x5344:
+ data->num_outputs = SI5344_MAX_NUM_OUTPUTS;
+ data->num_synth = SI5344_NUM_SYNTH;
+ data->reg_output_offset = si5340_reg_output_offset;
+ data->reg_rdiv_offset = si5340_reg_rdiv_offset;
+ break;
+ case 0x5345:
+ data->num_outputs = SI5345_MAX_NUM_OUTPUTS;
+ data->num_synth = SI5345_NUM_SYNTH;
+ data->reg_output_offset = si5341_reg_output_offset;
+ data->reg_rdiv_offset = si5341_reg_rdiv_offset;
+ break;
default:
dev_err(&data->i2c_client->dev, "Model '%x' not supported\n",
model);
return -EINVAL;
}
+ data->chip_id = model;
+
return 0;
}
@@ -1054,6 +1089,11 @@ static const struct si5341_reg_default si5341_preamble[] = {
{ 0x0B4E, 0x1A },
};
+static const struct si5341_reg_default si5345_preamble[] = {
+ { 0x0B25, 0x00 },
+ { 0x0540, 0x01 },
+};
+
static int si5341_send_preamble(struct clk_si5341 *data)
{
int res;
@@ -1068,8 +1108,14 @@ static int si5341_send_preamble(struct clk_si5341 *data)
res = regmap_write(data->regmap, 0xB24, revision < 2 ? 0xD8 : 0xC0);
if (res < 0)
return res;
- res = si5341_write_multiple(data,
- si5341_preamble, ARRAY_SIZE(si5341_preamble));
+
+ /* The si5342..si5345 require a different preamble */
+ if (data->chip_id > 0x5341)
+ res = si5341_write_multiple(data,
+ si5345_preamble, ARRAY_SIZE(si5345_preamble));
+ else
+ res = si5341_write_multiple(data,
+ si5341_preamble, ARRAY_SIZE(si5341_preamble));
if (res < 0)
return res;
@@ -1095,6 +1141,13 @@ static int si5341_finalize_defaults(struct clk_si5341 *data)
if (res < 0)
return res;
+ /* The si5342..si5345 have an additional post-amble */
+ if (data->chip_id > 0x5341) {
+ res = regmap_write(data->regmap, 0x540, 0x0);
+ if (res < 0)
+ return res;
+ }
+
/* Datasheet does not explain these nameless registers */
res = regmap_write(data->regmap, 0xB24, revision < 2 ? 0xDB : 0xC3);
if (res < 0)
@@ -1499,6 +1552,9 @@ static int si5341_probe(struct i2c_client *client,
static const struct i2c_device_id si5341_id[] = {
{ "si5340", 0 },
{ "si5341", 1 },
+ { "si5342", 2 },
+ { "si5344", 4 },
+ { "si5345", 5 },
{ }
};
MODULE_DEVICE_TABLE(i2c, si5341_id);
@@ -1506,6 +1562,9 @@ MODULE_DEVICE_TABLE(i2c, si5341_id);
static const struct of_device_id clk_si5341_of_match[] = {
{ .compatible = "silabs,si5340" },
{ .compatible = "silabs,si5341" },
+ { .compatible = "silabs,si5342" },
+ { .compatible = "silabs,si5344" },
+ { .compatible = "silabs,si5345" },
{ }
};
MODULE_DEVICE_TABLE(of, clk_si5341_of_match);
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 24fef51fbcb5..fa96659f8023 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -124,6 +124,7 @@ enum vc5_model {
IDT_VC5_5P49V5933,
IDT_VC5_5P49V5935,
IDT_VC6_5P49V6901,
+ IDT_VC6_5P49V6965,
};
/* Structure to describe features of a particular VC5 model */
@@ -683,6 +684,7 @@ static int vc5_map_index_to_output(const enum vc5_model model,
case IDT_VC5_5P49V5925:
case IDT_VC5_5P49V5935:
case IDT_VC6_5P49V6901:
+ case IDT_VC6_5P49V6965:
default:
return n;
}
@@ -956,12 +958,20 @@ static const struct vc5_chip_info idt_5p49v6901_info = {
.flags = VC5_HAS_PFD_FREQ_DBL,
};
+static const struct vc5_chip_info idt_5p49v6965_info = {
+ .model = IDT_VC6_5P49V6965,
+ .clk_fod_cnt = 4,
+ .clk_out_cnt = 5,
+ .flags = 0,
+};
+
static const struct i2c_device_id vc5_id[] = {
{ "5p49v5923", .driver_data = IDT_VC5_5P49V5923 },
{ "5p49v5925", .driver_data = IDT_VC5_5P49V5925 },
{ "5p49v5933", .driver_data = IDT_VC5_5P49V5933 },
{ "5p49v5935", .driver_data = IDT_VC5_5P49V5935 },
{ "5p49v6901", .driver_data = IDT_VC6_5P49V6901 },
+ { "5p49v6965", .driver_data = IDT_VC6_5P49V6965 },
{ }
};
MODULE_DEVICE_TABLE(i2c, vc5_id);
@@ -972,6 +982,7 @@ static const struct of_device_id clk_vc5_of_match[] = {
{ .compatible = "idt,5p49v5933", .data = &idt_5p49v5933_info },
{ .compatible = "idt,5p49v5935", .data = &idt_5p49v5935_info },
{ .compatible = "idt,5p49v6901", .data = &idt_5p49v6901_info },
+ { .compatible = "idt,5p49v6965", .data = &idt_5p49v6965_info },
{ },
};
MODULE_DEVICE_TABLE(of, clk_vc5_of_match);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 407f6919604c..3f588ed06ce3 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3299,10 +3299,6 @@ static int __init clk_debug_init(void)
late_initcall(clk_debug_init);
#else
static inline void clk_debug_register(struct clk_core *core) { }
-static inline void clk_debug_reparent(struct clk_core *core,
- struct clk_core *new_parent)
-{
-}
static inline void clk_debug_unregister(struct clk_core *core)
{
}
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 01eadee88d66..db0253fa3d64 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -10,25 +10,25 @@ config MXC_CLK_SCU
config CLK_IMX8MM
bool "IMX8MM CCM Clock Driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
help
Build the driver for i.MX8MM CCM Clock Driver
config CLK_IMX8MN
bool "IMX8MN CCM Clock Driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
help
Build the driver for i.MX8MN CCM Clock Driver
config CLK_IMX8MP
bool "IMX8MP CCM Clock Driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
help
Build the driver for i.MX8MP CCM Clock Driver
config CLK_IMX8MQ
bool "IMX8MQ CCM Clock Driver"
- depends on ARCH_MXC && ARM64
+ depends on ARCH_MXC
help
Build the driver for i.MX8MQ CCM Clock Driver
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 99773519b5a5..d2b5af826f2c 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -124,6 +124,52 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = {
.set_rate = imx8m_clk_composite_divider_set_rate,
};
+static u8 imx8m_clk_composite_mux_get_parent(struct clk_hw *hw)
+{
+ return clk_mux_ops.get_parent(hw);
+}
+
+static int imx8m_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
+ unsigned long flags = 0;
+ u32 reg;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+
+ reg = readl(mux->reg);
+ reg &= ~(mux->mask << mux->shift);
+ val = val << mux->shift;
+ reg |= val;
+ /*
+ * write twice to make sure non-target interface
+ * SEL_A/B point the same clk input.
+ */
+ writel(reg, mux->reg);
+ writel(reg, mux->reg);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+
+ return 0;
+}
+
+static int
+imx8m_clk_composite_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_mux_ops.determine_rate(hw, req);
+}
+
+
+static const struct clk_ops imx8m_clk_composite_mux_ops = {
+ .get_parent = imx8m_clk_composite_mux_get_parent,
+ .set_parent = imx8m_clk_composite_mux_set_parent,
+ .determine_rate = imx8m_clk_composite_mux_determine_rate,
+};
+
struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
const char * const *parent_names,
int num_parents, void __iomem *reg,
@@ -136,6 +182,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
struct clk_gate *gate = NULL;
struct clk_mux *mux = NULL;
const struct clk_ops *divider_ops;
+ const struct clk_ops *mux_ops;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -157,10 +204,17 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
div->shift = PCG_DIV_SHIFT;
div->width = PCG_CORE_DIV_WIDTH;
divider_ops = &clk_divider_ops;
+ mux_ops = &imx8m_clk_composite_mux_ops;
+ } else if (composite_flags & IMX_COMPOSITE_BUS) {
+ div->shift = PCG_PREDIV_SHIFT;
+ div->width = PCG_PREDIV_WIDTH;
+ divider_ops = &imx8m_clk_composite_divider_ops;
+ mux_ops = &imx8m_clk_composite_mux_ops;
} else {
div->shift = PCG_PREDIV_SHIFT;
div->width = PCG_PREDIV_WIDTH;
divider_ops = &imx8m_clk_composite_divider_ops;
+ mux_ops = &clk_mux_ops;
}
div->lock = &imx_ccm_lock;
@@ -176,7 +230,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
gate->lock = &imx_ccm_lock;
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
- mux_hw, &clk_mux_ops, div_hw,
+ mux_hw, mux_ops, div_hw,
divider_ops, gate_hw, &clk_gate_ops, flags);
if (IS_ERR(hw))
goto fail;
diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c
index ce0060e8873e..b87ab3c3ba1e 100644
--- a/drivers/clk/imx/clk-gate2.c
+++ b/drivers/clk/imx/clk-gate2.c
@@ -41,21 +41,26 @@ static int clk_gate2_enable(struct clk_hw *hw)
struct clk_gate2 *gate = to_clk_gate2(hw);
u32 reg;
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(gate->lock, flags);
if (gate->share_count && (*gate->share_count)++ > 0)
goto out;
- reg = readl(gate->reg);
- reg &= ~(3 << gate->bit_idx);
- reg |= gate->cgr_val << gate->bit_idx;
- writel(reg, gate->reg);
+ if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) {
+ ret = clk_gate_ops.enable(hw);
+ } else {
+ reg = readl(gate->reg);
+ reg &= ~(3 << gate->bit_idx);
+ reg |= gate->cgr_val << gate->bit_idx;
+ writel(reg, gate->reg);
+ }
out:
spin_unlock_irqrestore(gate->lock, flags);
- return 0;
+ return ret;
}
static void clk_gate2_disable(struct clk_hw *hw)
@@ -73,9 +78,13 @@ static void clk_gate2_disable(struct clk_hw *hw)
goto out;
}
- reg = readl(gate->reg);
- reg &= ~(3 << gate->bit_idx);
- writel(reg, gate->reg);
+ if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) {
+ clk_gate_ops.disable(hw);
+ } else {
+ reg = readl(gate->reg);
+ reg &= ~(3 << gate->bit_idx);
+ writel(reg, gate->reg);
+ }
out:
spin_unlock_irqrestore(gate->lock, flags);
@@ -95,6 +104,9 @@ static int clk_gate2_is_enabled(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
+ if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT)
+ return clk_gate_ops.is_enabled(hw);
+
return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
}
@@ -104,6 +116,9 @@ static void clk_gate2_disable_unused(struct clk_hw *hw)
unsigned long flags;
u32 reg;
+ if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT)
+ return;
+
spin_lock_irqsave(gate->lock, flags);
if (!gate->share_count || *gate->share_count == 0) {
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index dafc8806b03e..5dbb6a937732 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -503,7 +503,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_prepare_enable(hws[IMX6UL_CLK_USBPHY2_GATE]->clk);
}
- clk_set_parent(hws[IMX6UL_CLK_CAN_SEL]->clk, hws[IMX6UL_CLK_PLL3_60M]->clk);
+ clk_set_parent(hws[IMX6UL_CLK_CAN_SEL]->clk, hws[IMX6UL_CLK_PLL3_80M]->clk);
if (clk_on_imx6ul())
clk_set_parent(hws[IMX6UL_CLK_SIM_PRE_SEL]->clk, hws[IMX6UL_CLK_PLL3_USB_OTG]->clk);
else if (clk_on_imx6ull())
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 3710aa0dee9b..634c0b6636b0 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -29,7 +29,7 @@ static const char * const ddr_sels[] = { "apll_pfd_sel", "dummy", "dummy", "dum
static const char * const nic_sels[] = { "firc", "ddr_clk", };
static const char * const periph_plat_sels[] = { "dummy", "nic1_bus_clk", "nic1_clk", "ddr_clk", "apll_pfd2", "apll_pfd1", "apll_pfd0", "upll", };
static const char * const periph_bus_sels[] = { "dummy", "sosc_bus_clk", "dummy", "firc_bus_clk", "rosc", "nic1_bus_clk", "nic1_clk", "spll_bus_clk", };
-static const char * const arm_sels[] = { "divcore", "dummy", "dummy", "hsrun_divcore", };
+static const char * const arm_sels[] = { "core", "dummy", "dummy", "hsrun_core", };
/* used by sosc/sirc/firc/ddr/spll/apll dividers */
static const struct clk_div_table ulp_div_table[] = {
@@ -121,7 +121,9 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
hws[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 2, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
hws[IMX7ULP_CLK_CORE_DIV] = imx_clk_hw_divider_flags("divcore", "scs_sel", base + 0x14, 16, 4, CLK_SET_RATE_PARENT);
+ hws[IMX7ULP_CLK_CORE] = imx_clk_hw_cpu("core", "divcore", hws[IMX7ULP_CLK_CORE_DIV]->clk, hws[IMX7ULP_CLK_SYS_SEL]->clk, hws[IMX7ULP_CLK_SPLL_SEL]->clk, hws[IMX7ULP_CLK_FIRC]->clk);
hws[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT);
+ hws[IMX7ULP_CLK_HSRUN_CORE] = imx_clk_hw_cpu("hsrun_core", "hsrun_divcore", hws[IMX7ULP_CLK_HSRUN_CORE_DIV]->clk, hws[IMX7ULP_CLK_HSRUN_SYS_SEL]->clk, hws[IMX7ULP_CLK_SPLL_SEL]->clk, hws[IMX7ULP_CLK_FIRC]->clk);
hws[IMX7ULP_CLK_DDR_DIV] = imx_clk_hw_divider_gate("ddr_clk", "ddr_sel", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, base + 0x30, 0, 3,
0, ulp_div_table, &imx_ccm_lock);
@@ -270,7 +272,7 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np)
base = of_iomap(np, 0);
WARN_ON(!base);
- hws[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_IS_CRITICAL);
+ hws[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_SET_RATE_PARENT);
imx_check_clk_hws(hws, clk_data->num);
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index 925670438f23..b793264c21c6 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -416,9 +416,9 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
return PTR_ERR(base);
/* Core Slice */
- hws[IMX8MM_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels));
- hws[IMX8MM_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
- hws[IMX8MM_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ hws[IMX8MM_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mm_a53_sels, base + 0x8000);
+ hws[IMX8MM_CLK_A53_CG] = hws[IMX8MM_CLK_A53_DIV];
+ hws[IMX8MM_CLK_A53_SRC] = hws[IMX8MM_CLK_A53_DIV];
hws[IMX8MM_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mm_m4_sels, base + 0x8080);
hws[IMX8MM_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mm_vpu_sels, base + 0x8100);
@@ -444,21 +444,21 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
/* BUS */
hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800);
- hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
+ hws[IMX8MM_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mm_enet_axi_sels, base + 0x8880);
hws[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900);
- hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
- hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
- hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
- hws[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
- hws[IMX8MM_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
- hws[IMX8MM_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
- hws[IMX8MM_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
+ hws[IMX8MM_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980);
+ hws[IMX8MM_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00);
+ hws[IMX8MM_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80);
+ hws[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_hw_composite_bus("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00);
+ hws[IMX8MM_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80);
+ hws[IMX8MM_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00);
+ hws[IMX8MM_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80);
hws[IMX8MM_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00);
hws[IMX8MM_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80);
/* AHB */
hws[IMX8MM_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000);
- hws[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
+ hws[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100);
/* IPG */
hws[IMX8MM_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
@@ -614,9 +614,6 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
hws[IMX8MM_ARM_PLL_OUT]->clk,
hws[IMX8MM_CLK_A53_DIV]->clk);
- clk_hw_set_parent(hws[IMX8MM_CLK_A53_SRC], hws[IMX8MM_SYS_PLL1_800M]);
- clk_hw_set_parent(hws[IMX8MM_CLK_A53_CORE], hws[IMX8MM_ARM_PLL_OUT]);
-
imx_check_clk_hws(hws, IMX8MM_CLK_END);
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index 0bc7070235bd..213cc37b3173 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -413,9 +413,9 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
}
/* CORE */
- hws[IMX8MN_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels));
- hws[IMX8MN_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28);
- hws[IMX8MN_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ hws[IMX8MN_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mn_a53_sels, base + 0x8000);
+ hws[IMX8MN_CLK_A53_SRC] = hws[IMX8MN_CLK_A53_DIV];
+ hws[IMX8MN_CLK_A53_CG] = hws[IMX8MN_CLK_A53_DIV];
hws[IMX8MN_CLK_GPU_CORE] = imx8m_clk_hw_composite_core("gpu_core", imx8mn_gpu_core_sels, base + 0x8180);
hws[IMX8MN_CLK_GPU_SHADER] = imx8m_clk_hw_composite_core("gpu_shader", imx8mn_gpu_shader_sels, base + 0x8200);
@@ -432,17 +432,17 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
/* BUS */
hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800);
- hws[IMX8MN_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
- hws[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
- hws[IMX8MN_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
- hws[IMX8MN_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80);
- hws[IMX8MN_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
- hws[IMX8MN_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
- hws[IMX8MN_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
+ hws[IMX8MN_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mn_enet_axi_sels, base + 0x8880);
+ hws[IMX8MN_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus("nand_usdhc_bus", imx8mn_nand_usdhc_sels, base + 0x8900);
+ hws[IMX8MN_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mn_disp_axi_sels, base + 0x8a00);
+ hws[IMX8MN_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mn_disp_apb_sels, base + 0x8a80);
+ hws[IMX8MN_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mn_usb_bus_sels, base + 0x8b80);
+ hws[IMX8MN_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mn_gpu_axi_sels, base + 0x8c00);
+ hws[IMX8MN_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mn_gpu_ahb_sels, base + 0x8c80);
hws[IMX8MN_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mn_noc_sels, base + 0x8d00);
hws[IMX8MN_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mn_ahb_sels, base + 0x9000);
- hws[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
+ hws[IMX8MN_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mn_audio_ahb_sels, base + 0x9100);
hws[IMX8MN_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
hws[IMX8MN_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
hws[IMX8MN_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mn_dram_core_sels, ARRAY_SIZE(imx8mn_dram_core_sels), CLK_IS_CRITICAL);
@@ -565,9 +565,6 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
hws[IMX8MN_ARM_PLL_OUT]->clk,
hws[IMX8MN_CLK_A53_DIV]->clk);
- clk_hw_set_parent(hws[IMX8MN_CLK_A53_SRC], hws[IMX8MN_SYS_PLL1_800M]);
- clk_hw_set_parent(hws[IMX8MN_CLK_A53_CORE], hws[IMX8MN_ARM_PLL_OUT]);
-
imx_check_clk_hws(hws, IMX8MN_CLK_END);
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 41469e2cc3de..b4d9db9d5bf1 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -486,16 +486,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_SYS_PLL2] = imx_clk_hw_pll14xx("sys_pll2", "sys_pll2_ref_sel", anatop_base + 0x104, &imx_1416x_pll);
hws[IMX8MP_SYS_PLL3] = imx_clk_hw_pll14xx("sys_pll3", "sys_pll3_ref_sel", anatop_base + 0x114, &imx_1416x_pll);
- hws[IMX8MP_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", anatop_base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", anatop_base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", anatop_base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", anatop_base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", anatop_base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", anatop_base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", anatop_base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_SYS_PLL1_BYPASS] = imx_clk_hw_mux_flags("sys_pll1_bypass", anatop_base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_SYS_PLL2_BYPASS] = imx_clk_hw_mux_flags("sys_pll2_bypass", anatop_base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX8MP_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", anatop_base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_AUDIO_PLL1_BYPASS] = imx_clk_hw_mux_flags("audio_pll1_bypass", anatop_base, 16, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_AUDIO_PLL2_BYPASS] = imx_clk_hw_mux_flags("audio_pll2_bypass", anatop_base + 0x14, 16, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_VIDEO_PLL1_BYPASS] = imx_clk_hw_mux_flags("video_pll1_bypass", anatop_base + 0x28, 16, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_DRAM_PLL_BYPASS] = imx_clk_hw_mux_flags("dram_pll_bypass", anatop_base + 0x50, 16, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_GPU_PLL_BYPASS] = imx_clk_hw_mux_flags("gpu_pll_bypass", anatop_base + 0x64, 28, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_VPU_PLL_BYPASS] = imx_clk_hw_mux_flags("vpu_pll_bypass", anatop_base + 0x74, 28, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_ARM_PLL_BYPASS] = imx_clk_hw_mux_flags("arm_pll_bypass", anatop_base + 0x84, 28, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_SYS_PLL1_BYPASS] = imx_clk_hw_mux_flags("sys_pll1_bypass", anatop_base + 0x94, 28, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_SYS_PLL2_BYPASS] = imx_clk_hw_mux_flags("sys_pll2_bypass", anatop_base + 0x104, 28, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT);
+ hws[IMX8MP_SYS_PLL3_BYPASS] = imx_clk_hw_mux_flags("sys_pll3_bypass", anatop_base + 0x114, 28, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX8MP_AUDIO_PLL1_OUT] = imx_clk_hw_gate("audio_pll1_out", "audio_pll1_bypass", anatop_base, 13);
hws[IMX8MP_AUDIO_PLL2_OUT] = imx_clk_hw_gate("audio_pll2_out", "audio_pll2_bypass", anatop_base + 0x14, 13);
@@ -504,79 +504,82 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_GPU_PLL_OUT] = imx_clk_hw_gate("gpu_pll_out", "gpu_pll_bypass", anatop_base + 0x64, 11);
hws[IMX8MP_VPU_PLL_OUT] = imx_clk_hw_gate("vpu_pll_out", "vpu_pll_bypass", anatop_base + 0x74, 11);
hws[IMX8MP_ARM_PLL_OUT] = imx_clk_hw_gate("arm_pll_out", "arm_pll_bypass", anatop_base + 0x84, 11);
- hws[IMX8MP_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1_bypass", anatop_base + 0x94, 11);
- hws[IMX8MP_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2_bypass", anatop_base + 0x104, 11);
hws[IMX8MP_SYS_PLL3_OUT] = imx_clk_hw_gate("sys_pll3_out", "sys_pll3_bypass", anatop_base + 0x114, 11);
- hws[IMX8MP_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20);
- hws[IMX8MP_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10);
- hws[IMX8MP_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8);
- hws[IMX8MP_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6);
- hws[IMX8MP_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5);
- hws[IMX8MP_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4);
- hws[IMX8MP_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3);
- hws[IMX8MP_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2);
+ hws[IMX8MP_SYS_PLL1_40M_CG] = imx_clk_hw_gate("sys_pll1_40m_cg", "sys_pll1_bypass", anatop_base + 0x94, 27);
+ hws[IMX8MP_SYS_PLL1_80M_CG] = imx_clk_hw_gate("sys_pll1_80m_cg", "sys_pll1_bypass", anatop_base + 0x94, 25);
+ hws[IMX8MP_SYS_PLL1_100M_CG] = imx_clk_hw_gate("sys_pll1_100m_cg", "sys_pll1_bypass", anatop_base + 0x94, 23);
+ hws[IMX8MP_SYS_PLL1_133M_CG] = imx_clk_hw_gate("sys_pll1_133m_cg", "sys_pll1_bypass", anatop_base + 0x94, 21);
+ hws[IMX8MP_SYS_PLL1_160M_CG] = imx_clk_hw_gate("sys_pll1_160m_cg", "sys_pll1_bypass", anatop_base + 0x94, 19);
+ hws[IMX8MP_SYS_PLL1_200M_CG] = imx_clk_hw_gate("sys_pll1_200m_cg", "sys_pll1_bypass", anatop_base + 0x94, 17);
+ hws[IMX8MP_SYS_PLL1_266M_CG] = imx_clk_hw_gate("sys_pll1_266m_cg", "sys_pll1_bypass", anatop_base + 0x94, 15);
+ hws[IMX8MP_SYS_PLL1_400M_CG] = imx_clk_hw_gate("sys_pll1_400m_cg", "sys_pll1_bypass", anatop_base + 0x94, 13);
+ hws[IMX8MP_SYS_PLL1_OUT] = imx_clk_hw_gate("sys_pll1_out", "sys_pll1_bypass", anatop_base + 0x94, 11);
+
+ hws[IMX8MP_SYS_PLL1_40M] = imx_clk_hw_fixed_factor("sys_pll1_40m", "sys_pll1_40m_cg", 1, 20);
+ hws[IMX8MP_SYS_PLL1_80M] = imx_clk_hw_fixed_factor("sys_pll1_80m", "sys_pll1_80m_cg", 1, 10);
+ hws[IMX8MP_SYS_PLL1_100M] = imx_clk_hw_fixed_factor("sys_pll1_100m", "sys_pll1_100m_cg", 1, 8);
+ hws[IMX8MP_SYS_PLL1_133M] = imx_clk_hw_fixed_factor("sys_pll1_133m", "sys_pll1_133m_cg", 1, 6);
+ hws[IMX8MP_SYS_PLL1_160M] = imx_clk_hw_fixed_factor("sys_pll1_160m", "sys_pll1_160m_cg", 1, 5);
+ hws[IMX8MP_SYS_PLL1_200M] = imx_clk_hw_fixed_factor("sys_pll1_200m", "sys_pll1_200m_cg", 1, 4);
+ hws[IMX8MP_SYS_PLL1_266M] = imx_clk_hw_fixed_factor("sys_pll1_266m", "sys_pll1_266m_cg", 1, 3);
+ hws[IMX8MP_SYS_PLL1_400M] = imx_clk_hw_fixed_factor("sys_pll1_400m", "sys_pll1_400m_cg", 1, 2);
hws[IMX8MP_SYS_PLL1_800M] = imx_clk_hw_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1);
- hws[IMX8MP_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20);
- hws[IMX8MP_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10);
- hws[IMX8MP_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8);
- hws[IMX8MP_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6);
- hws[IMX8MP_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5);
- hws[IMX8MP_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4);
- hws[IMX8MP_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3);
- hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2);
+ hws[IMX8MP_SYS_PLL2_50M_CG] = imx_clk_hw_gate("sys_pll2_50m_cg", "sys_pll2_bypass", anatop_base + 0x104, 27);
+ hws[IMX8MP_SYS_PLL2_100M_CG] = imx_clk_hw_gate("sys_pll2_100m_cg", "sys_pll2_bypass", anatop_base + 0x104, 25);
+ hws[IMX8MP_SYS_PLL2_125M_CG] = imx_clk_hw_gate("sys_pll2_125m_cg", "sys_pll2_bypass", anatop_base + 0x104, 23);
+ hws[IMX8MP_SYS_PLL2_166M_CG] = imx_clk_hw_gate("sys_pll2_166m_cg", "sys_pll2_bypass", anatop_base + 0x104, 21);
+ hws[IMX8MP_SYS_PLL2_200M_CG] = imx_clk_hw_gate("sys_pll2_200m_cg", "sys_pll2_bypass", anatop_base + 0x104, 19);
+ hws[IMX8MP_SYS_PLL2_250M_CG] = imx_clk_hw_gate("sys_pll2_250m_cg", "sys_pll2_bypass", anatop_base + 0x104, 17);
+ hws[IMX8MP_SYS_PLL2_333M_CG] = imx_clk_hw_gate("sys_pll2_333m_cg", "sys_pll2_bypass", anatop_base + 0x104, 15);
+ hws[IMX8MP_SYS_PLL2_500M_CG] = imx_clk_hw_gate("sys_pll2_500m_cg", "sys_pll2_bypass", anatop_base + 0x104, 13);
+ hws[IMX8MP_SYS_PLL2_OUT] = imx_clk_hw_gate("sys_pll2_out", "sys_pll2_bypass", anatop_base + 0x104, 11);
+
+ hws[IMX8MP_SYS_PLL2_50M] = imx_clk_hw_fixed_factor("sys_pll2_50m", "sys_pll2_50m_cg", 1, 20);
+ hws[IMX8MP_SYS_PLL2_100M] = imx_clk_hw_fixed_factor("sys_pll2_100m", "sys_pll2_100m_cg", 1, 10);
+ hws[IMX8MP_SYS_PLL2_125M] = imx_clk_hw_fixed_factor("sys_pll2_125m", "sys_pll2_125m_cg", 1, 8);
+ hws[IMX8MP_SYS_PLL2_166M] = imx_clk_hw_fixed_factor("sys_pll2_166m", "sys_pll2_166m_cg", 1, 6);
+ hws[IMX8MP_SYS_PLL2_200M] = imx_clk_hw_fixed_factor("sys_pll2_200m", "sys_pll2_200m_cg", 1, 5);
+ hws[IMX8MP_SYS_PLL2_250M] = imx_clk_hw_fixed_factor("sys_pll2_250m", "sys_pll2_250m_cg", 1, 4);
+ hws[IMX8MP_SYS_PLL2_333M] = imx_clk_hw_fixed_factor("sys_pll2_333m", "sys_pll2_333m_cg", 1, 3);
+ hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2);
hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1);
- hws[IMX8MP_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", ccm_base + 0x8000, 24, 3, imx8mp_a53_sels, ARRAY_SIZE(imx8mp_a53_sels));
- hws[IMX8MP_CLK_M7_SRC] = imx_clk_hw_mux2("arm_m7_src", ccm_base + 0x8080, 24, 3, imx8mp_m7_sels, ARRAY_SIZE(imx8mp_m7_sels));
- hws[IMX8MP_CLK_ML_SRC] = imx_clk_hw_mux2("ml_src", ccm_base + 0x8100, 24, 3, imx8mp_ml_sels, ARRAY_SIZE(imx8mp_ml_sels));
- hws[IMX8MP_CLK_GPU3D_CORE_SRC] = imx_clk_hw_mux2("gpu3d_core_src", ccm_base + 0x8180, 24, 3, imx8mp_gpu3d_core_sels, ARRAY_SIZE(imx8mp_gpu3d_core_sels));
- hws[IMX8MP_CLK_GPU3D_SHADER_SRC] = imx_clk_hw_mux2("gpu3d_shader_src", ccm_base + 0x8200, 24, 3, imx8mp_gpu3d_shader_sels, ARRAY_SIZE(imx8mp_gpu3d_shader_sels));
- hws[IMX8MP_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", ccm_base + 0x8280, 24, 3, imx8mp_gpu2d_sels, ARRAY_SIZE(imx8mp_gpu2d_sels));
- hws[IMX8MP_CLK_AUDIO_AXI_SRC] = imx_clk_hw_mux2("audio_axi_src", ccm_base + 0x8300, 24, 3, imx8mp_audio_axi_sels, ARRAY_SIZE(imx8mp_audio_axi_sels));
- hws[IMX8MP_CLK_HSIO_AXI_SRC] = imx_clk_hw_mux2("hsio_axi_src", ccm_base + 0x8380, 24, 3, imx8mp_hsio_axi_sels, ARRAY_SIZE(imx8mp_hsio_axi_sels));
- hws[IMX8MP_CLK_MEDIA_ISP_SRC] = imx_clk_hw_mux2("media_isp_src", ccm_base + 0x8400, 24, 3, imx8mp_media_isp_sels, ARRAY_SIZE(imx8mp_media_isp_sels));
- hws[IMX8MP_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", ccm_base + 0x8000, 28);
- hws[IMX8MP_CLK_M4_CG] = imx_clk_hw_gate3("arm_m7_cg", "arm_m7_src", ccm_base + 0x8080, 28);
- hws[IMX8MP_CLK_ML_CG] = imx_clk_hw_gate3("ml_cg", "ml_src", ccm_base + 0x8100, 28);
- hws[IMX8MP_CLK_GPU3D_CORE_CG] = imx_clk_hw_gate3("gpu3d_core_cg", "gpu3d_core_src", ccm_base + 0x8180, 28);
- hws[IMX8MP_CLK_GPU3D_SHADER_CG] = imx_clk_hw_gate3("gpu3d_shader_cg", "gpu3d_shader_src", ccm_base + 0x8200, 28);
- hws[IMX8MP_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", ccm_base + 0x8280, 28);
- hws[IMX8MP_CLK_AUDIO_AXI_CG] = imx_clk_hw_gate3("audio_axi_cg", "audio_axi_src", ccm_base + 0x8300, 28);
- hws[IMX8MP_CLK_HSIO_AXI_CG] = imx_clk_hw_gate3("hsio_axi_cg", "hsio_axi_src", ccm_base + 0x8380, 28);
- hws[IMX8MP_CLK_MEDIA_ISP_CG] = imx_clk_hw_gate3("media_isp_cg", "media_isp_src", ccm_base + 0x8400, 28);
- hws[IMX8MP_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", ccm_base + 0x8000, 0, 3);
- hws[IMX8MP_CLK_M7_DIV] = imx_clk_hw_divider2("arm_m7_div", "arm_m7_cg", ccm_base + 0x8080, 0, 3);
- hws[IMX8MP_CLK_ML_DIV] = imx_clk_hw_divider2("ml_div", "ml_cg", ccm_base + 0x8100, 0, 3);
- hws[IMX8MP_CLK_GPU3D_CORE_DIV] = imx_clk_hw_divider2("gpu3d_core_div", "gpu3d_core_cg", ccm_base + 0x8180, 0, 3);
- hws[IMX8MP_CLK_GPU3D_SHADER_DIV] = imx_clk_hw_divider2("gpu3d_shader_div", "gpu3d_shader_cg", ccm_base + 0x8200, 0, 3);
- hws[IMX8MP_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", ccm_base + 0x8280, 0, 3);
- hws[IMX8MP_CLK_AUDIO_AXI_DIV] = imx_clk_hw_divider2("audio_axi_div", "audio_axi_cg", ccm_base + 0x8300, 0, 3);
- hws[IMX8MP_CLK_HSIO_AXI_DIV] = imx_clk_hw_divider2("hsio_axi_div", "hsio_axi_cg", ccm_base + 0x8380, 0, 3);
- hws[IMX8MP_CLK_MEDIA_ISP_DIV] = imx_clk_hw_divider2("media_isp_div", "media_isp_cg", ccm_base + 0x8400, 0, 3);
+ hws[IMX8MP_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mp_a53_sels, ccm_base + 0x8000);
+ hws[IMX8MP_CLK_A53_SRC] = hws[IMX8MP_CLK_A53_DIV];
+ hws[IMX8MP_CLK_A53_CG] = hws[IMX8MP_CLK_A53_DIV];
+ hws[IMX8MP_CLK_M7_CORE] = imx8m_clk_hw_composite_core("m7_core", imx8mp_m7_sels, ccm_base + 0x8080);
+ hws[IMX8MP_CLK_ML_CORE] = imx8m_clk_hw_composite_core("ml_core", imx8mp_ml_sels, ccm_base + 0x8100);
+ hws[IMX8MP_CLK_GPU3D_CORE] = imx8m_clk_hw_composite_core("gpu3d_core", imx8mp_gpu3d_core_sels, ccm_base + 0x8180);
+ hws[IMX8MP_CLK_GPU3D_SHADER_CORE] = imx8m_clk_hw_composite("gpu3d_shader_core", imx8mp_gpu3d_shader_sels, ccm_base + 0x8200);
+ hws[IMX8MP_CLK_GPU2D_CORE] = imx8m_clk_hw_composite("gpu2d_core", imx8mp_gpu2d_sels, ccm_base + 0x8280);
+ hws[IMX8MP_CLK_AUDIO_AXI] = imx8m_clk_hw_composite("audio_axi", imx8mp_audio_axi_sels, ccm_base + 0x8300);
+ hws[IMX8MP_CLK_AUDIO_AXI_SRC] = hws[IMX8MP_CLK_AUDIO_AXI];
+ hws[IMX8MP_CLK_HSIO_AXI] = imx8m_clk_hw_composite("hsio_axi", imx8mp_hsio_axi_sels, ccm_base + 0x8380);
+ hws[IMX8MP_CLK_MEDIA_ISP] = imx8m_clk_hw_composite("media_isp", imx8mp_media_isp_sels, ccm_base + 0x8400);
/* CORE SEL */
hws[IMX8MP_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", ccm_base + 0x9880, 24, 1, imx8mp_a53_core_sels, ARRAY_SIZE(imx8mp_a53_core_sels));
hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800);
- hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
+ hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880);
hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900);
- hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980);
- hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
- hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
- hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00);
- hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80);
- hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00);
- hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80);
+ hws[IMX8MP_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mp_vpu_bus_sels, ccm_base + 0x8980);
+ hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite_bus("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
+ hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite_bus("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
+ hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite_bus("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00);
+ hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite_bus("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80);
+ hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00);
+ hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80);
hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00);
hws[IMX8MP_CLK_NOC_IO] = imx8m_clk_hw_composite_critical("noc_io", imx8mp_noc_io_sels, ccm_base + 0x8d80);
- hws[IMX8MP_CLK_ML_AXI] = imx8m_clk_hw_composite("ml_axi", imx8mp_ml_axi_sels, ccm_base + 0x8e00);
- hws[IMX8MP_CLK_ML_AHB] = imx8m_clk_hw_composite("ml_ahb", imx8mp_ml_ahb_sels, ccm_base + 0x8e80);
+ hws[IMX8MP_CLK_ML_AXI] = imx8m_clk_hw_composite_bus("ml_axi", imx8mp_ml_axi_sels, ccm_base + 0x8e00);
+ hws[IMX8MP_CLK_ML_AHB] = imx8m_clk_hw_composite_bus("ml_ahb", imx8mp_ml_ahb_sels, ccm_base + 0x8e80);
hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000);
- hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
- hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
+ hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100);
+ hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200);
hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1);
hws[IMX8MP_CLK_IPG_AUDIO_ROOT] = imx_clk_hw_divider2("ipg_audio_root", "audio_ahb", ccm_base + 0x9180, 0, 1);
@@ -695,8 +698,8 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0);
hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "sim_enet_root_clk", ccm_base + 0x43b0, 0);
hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0);
- hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", ccm_base + 0x4450, 0);
- hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core_div", ccm_base + 0x4460, 0);
+ hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_core", ccm_base + 0x4450, 0);
+ hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core", ccm_base + 0x4460, 0);
hws[IMX8MP_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", ccm_base + 0x4470, 0);
hws[IMX8MP_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", ccm_base + 0x4490, 0);
hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0);
@@ -713,7 +716,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_axi", ccm_base + 0x4570, 0);
hws[IMX8MP_CLK_VPU_VC8KE_ROOT] = imx_clk_hw_gate4("vpu_vc8ke_root_clk", "vpu_vc8000e", ccm_base + 0x4590, 0);
hws[IMX8MP_CLK_VPU_G2_ROOT] = imx_clk_hw_gate4("vpu_g2_root_clk", "vpu_g2", ccm_base + 0x45a0, 0);
- hws[IMX8MP_CLK_NPU_ROOT] = imx_clk_hw_gate4("npu_root_clk", "ml_div", ccm_base + 0x45b0, 0);
+ hws[IMX8MP_CLK_NPU_ROOT] = imx_clk_hw_gate4("npu_root_clk", "ml_core", ccm_base + 0x45b0, 0);
hws[IMX8MP_CLK_HSIO_ROOT] = imx_clk_hw_gate4("hsio_root_clk", "ipg_root", ccm_base + 0x45c0, 0);
hws[IMX8MP_CLK_MEDIA_APB_ROOT] = imx_clk_hw_gate2_shared2("media_apb_root_clk", "media_apb", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_MEDIA_AXI_ROOT] = imx_clk_hw_gate2_shared2("media_axi_root_clk", "media_axi", ccm_base + 0x45d0, 0, &share_count_media);
@@ -721,7 +724,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_cam2_pix_root_clk", "media_cam2_pix", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp1_pix_root_clk", "media_disp1_pix", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT] = imx_clk_hw_gate2_shared2("media_disp2_pix_root_clk", "media_disp2_pix", ccm_base + 0x45d0, 0, &share_count_media);
- hws[IMX8MP_CLK_MEDIA_ISP_ROOT] = imx_clk_hw_gate2_shared2("media_isp_root_clk", "media_isp_div", ccm_base + 0x45d0, 0, &share_count_media);
+ hws[IMX8MP_CLK_MEDIA_ISP_ROOT] = imx_clk_hw_gate2_shared2("media_isp_root_clk", "media_isp", ccm_base + 0x45d0, 0, &share_count_media);
hws[IMX8MP_CLK_USDHC3_ROOT] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3", ccm_base + 0x45e0, 0);
hws[IMX8MP_CLK_HDMI_ROOT] = imx_clk_hw_gate4("hdmi_root_clk", "hdmi_axi", ccm_base + 0x45f0, 0);
@@ -735,9 +738,6 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_ARM_PLL_OUT]->clk,
hws[IMX8MP_CLK_A53_DIV]->clk);
- clk_hw_set_parent(hws[IMX8MP_CLK_A53_SRC], hws[IMX8MP_SYS_PLL1_800M]);
- clk_hw_set_parent(hws[IMX8MP_CLK_A53_CORE], hws[IMX8MP_ARM_PLL_OUT]);
-
imx_check_clk_hws(hws, IMX8MP_CLK_END);
of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index fdc68db68de5..a64aace213c2 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -405,9 +405,9 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
return PTR_ERR(base);
/* CORE */
- hws[IMX8MQ_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
- hws[IMX8MQ_CLK_A53_CG] = imx_clk_hw_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
- hws[IMX8MQ_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ hws[IMX8MQ_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mq_a53_sels, base + 0x8000);
+ hws[IMX8MQ_CLK_A53_CG] = hws[IMX8MQ_CLK_A53_DIV];
+ hws[IMX8MQ_CLK_A53_SRC] = hws[IMX8MQ_CLK_A53_DIV];
hws[IMX8MQ_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mq_arm_m4_sels, base + 0x8080);
hws[IMX8MQ_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mq_vpu_sels, base + 0x8100);
@@ -432,22 +432,22 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
/* BUS */
hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
- hws[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
- hws[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
- hws[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_hw_composite("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
- hws[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_hw_composite("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00);
- hws[IMX8MQ_CLK_DISP_APB] = imx8m_clk_hw_composite("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80);
- hws[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_hw_composite("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00);
- hws[IMX8MQ_CLK_USB_BUS] = imx8m_clk_hw_composite("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
- hws[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
- hws[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
+ hws[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_hw_composite_bus("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
+ hws[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_bus("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
+ hws[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_hw_composite_bus("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
+ hws[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_hw_composite_bus("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00);
+ hws[IMX8MQ_CLK_DISP_APB] = imx8m_clk_hw_composite_bus("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80);
+ hws[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_hw_composite_bus("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00);
+ hws[IMX8MQ_CLK_USB_BUS] = imx8m_clk_hw_composite_bus("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
+ hws[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_hw_composite_bus("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
+ hws[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_hw_composite_bus("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
hws[IMX8MQ_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00);
hws[IMX8MQ_CLK_NOC_APB] = imx8m_clk_hw_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
/* AHB */
/* AHB clock is used by the AHB bus therefore marked as critical */
hws[IMX8MQ_CLK_AHB] = imx8m_clk_hw_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
- hws[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_hw_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
+ hws[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
/* IPG */
hws[IMX8MQ_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
@@ -599,9 +599,6 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
hws[IMX8MQ_ARM_PLL_OUT]->clk,
hws[IMX8MQ_CLK_A53_DIV]->clk);
- clk_hw_set_parent(hws[IMX8MQ_CLK_A53_SRC], hws[IMX8MQ_SYS1_PLL_800M]);
- clk_hw_set_parent(hws[IMX8MQ_CLK_A53_CORE], hws[IMX8MQ_ARM_PLL_OUT]);
-
imx_check_clk_hws(hws, IMX8MQ_CLK_END);
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index a83bbbee77d9..f9eb189b93c0 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -378,9 +378,9 @@ static const struct clk_ops clk_pll1443x_ops = {
.set_rate = clk_pll1443x_set_rate,
};
-struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
- void __iomem *base,
- const struct imx_pll14xx_clk *pll_clk)
+struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
+ const char *parent_name, void __iomem *base,
+ const struct imx_pll14xx_clk *pll_clk)
{
struct clk_pll14xx *pll;
struct clk_hw *hw;
@@ -426,7 +426,7 @@ struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
hw = &pll->hw;
- ret = clk_hw_register(NULL, hw);
+ ret = clk_hw_register(dev, hw);
if (ret) {
pr_err("%s: failed to register pll %s %d\n",
__func__, name, ret);
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index df91a8244fb4..a7db93030e02 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/err.h>
@@ -25,6 +26,8 @@
#define IMX7_ENET_PLL_POWER (0x1 << 5)
#define IMX7_DDR_PLL_POWER (0x1 << 20)
+#define PLL_LOCK_TIMEOUT 10000
+
/**
* struct clk_pllv3 - IMX PLL clock version 3
* @clk_hw: clock source
@@ -53,23 +56,14 @@ struct clk_pllv3 {
static int clk_pllv3_wait_lock(struct clk_pllv3 *pll)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(10);
u32 val = readl_relaxed(pll->base) & pll->power_bit;
/* No need to wait for lock when pll is not powered up */
if ((pll->powerup_set && !val) || (!pll->powerup_set && val))
return 0;
- /* Wait for PLL to lock */
- do {
- if (readl_relaxed(pll->base) & BM_PLL_LOCK)
- break;
- if (time_after(jiffies, timeout))
- break;
- usleep_range(50, 500);
- } while (1);
-
- return readl_relaxed(pll->base) & BM_PLL_LOCK ? 0 : -ETIMEDOUT;
+ return readl_relaxed_poll_timeout(pll->base, val, val & BM_PLL_LOCK,
+ 500, PLL_LOCK_TIMEOUT);
}
static int clk_pllv3_prepare(struct clk_hw *hw)
diff --git a/drivers/clk/imx/clk-sscg-pll.c b/drivers/clk/imx/clk-sscg-pll.c
index d4a2be16d132..773d8a545cdf 100644
--- a/drivers/clk/imx/clk-sscg-pll.c
+++ b/drivers/clk/imx/clk-sscg-pll.c
@@ -72,7 +72,6 @@ struct clk_sscg_pll_setup {
int divr2, divf2;
int divq;
int bypass;
-
uint64_t vco1;
uint64_t vco2;
uint64_t fout;
@@ -86,11 +85,8 @@ struct clk_sscg_pll_setup {
struct clk_sscg_pll {
struct clk_hw hw;
const struct clk_ops ops;
-
void __iomem *base;
-
struct clk_sscg_pll_setup setup;
-
u8 parent;
u8 bypass1;
u8 bypass2;
@@ -194,7 +190,6 @@ static int clk_sscg_pll2_find_setup(struct clk_sscg_pll_setup *setup,
struct clk_sscg_pll_setup *temp_setup,
uint64_t ref)
{
-
int ret;
if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
@@ -253,7 +248,6 @@ static int clk_sscg_pll1_find_setup(struct clk_sscg_pll_setup *setup,
struct clk_sscg_pll_setup *temp_setup,
uint64_t ref)
{
-
int ret;
if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
@@ -280,7 +274,6 @@ static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup,
temp_setup.fout_request = rate;
switch (try_bypass) {
-
case PLL_BYPASS2:
if (prate == rate) {
setup->bypass = PLL_BYPASS2;
@@ -288,11 +281,9 @@ static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup,
ret = 0;
}
break;
-
case PLL_BYPASS1:
ret = clk_sscg_pll2_find_setup(setup, &temp_setup, prate);
break;
-
case PLL_BYPASS_NONE:
ret = clk_sscg_pll1_find_setup(setup, &temp_setup, prate);
break;
@@ -301,7 +292,6 @@ static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup,
return ret;
}
-
static int clk_sscg_pll_is_prepared(struct clk_hw *hw)
{
struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index f074dd8ec42e..16adbc34e05f 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -5,6 +5,8 @@
#include <linux/spinlock.h>
#include <linux/clk-provider.h>
+#define IMX_CLK_GATE2_SINGLE_BIT 1
+
extern spinlock_t imx_ccm_lock;
void imx_check_clocks(struct clk *clks[], unsigned int count);
@@ -131,9 +133,9 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
#define imx_clk_pll14xx(name, parent_name, base, pll_clk) \
to_clk(imx_clk_hw_pll14xx(name, parent_name, base, pll_clk))
-struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
- void __iomem *base,
- const struct imx_pll14xx_clk *pll_clk);
+struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
+ const char *parent_name, void __iomem *base,
+ const struct imx_pll14xx_clk *pll_clk);
struct clk_hw *imx_clk_hw_pllv1(enum imx_pllv1_type type, const char *name,
const char *parent, void __iomem *base);
@@ -240,6 +242,13 @@ static inline struct clk *to_clk(struct clk_hw *hw)
return hw->clk;
}
+static inline struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
+ void __iomem *base,
+ const struct imx_pll14xx_clk *pll_clk)
+{
+ return imx_dev_clk_hw_pll14xx(NULL, name, parent_name, base, pll_clk);
+}
+
static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate)
{
return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate);
@@ -310,6 +319,13 @@ static inline struct clk_hw *imx_clk_hw_gate(const char *name, const char *paren
shift, 0, &imx_ccm_lock);
}
+static inline struct clk_hw *imx_dev_clk_hw_gate(struct device *dev, const char *name,
+ const char *parent, void __iomem *reg, u8 shift)
+{
+ return clk_hw_register_gate(dev, name, parent, CLK_SET_RATE_PARENT, reg,
+ shift, 0, &imx_ccm_lock);
+}
+
static inline struct clk_hw *imx_clk_hw_gate_dis(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
@@ -355,6 +371,17 @@ static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name,
&imx_ccm_lock, share_count);
}
+static inline struct clk_hw *imx_dev_clk_hw_gate_shared(struct device *dev,
+ const char *name, const char *parent,
+ void __iomem *reg, u8 shift,
+ unsigned int *share_count)
+{
+ return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
+ CLK_OPS_PARENT_ENABLE, reg, shift, 0x3,
+ IMX_CLK_GATE2_SINGLE_BIT,
+ &imx_ccm_lock, share_count);
+}
+
static inline struct clk *imx_clk_gate2_cgr(const char *name,
const char *parent, void __iomem *reg, u8 shift, u8 cgr_val)
{
@@ -411,6 +438,15 @@ static inline struct clk_hw *imx_clk_hw_mux(const char *name, void __iomem *reg,
width, 0, &imx_ccm_lock);
}
+static inline struct clk_hw *imx_dev_clk_hw_mux(struct device *dev,
+ const char *name, void __iomem *reg, u8 shift,
+ u8 width, const char * const *parents, int num_parents)
+{
+ return clk_hw_register_mux(dev, name, parents, num_parents,
+ CLK_SET_RATE_NO_REPARENT | CLK_SET_PARENT_GATE,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
u8 shift, u8 width, const char * const *parents,
int num_parents)
@@ -473,11 +509,25 @@ static inline struct clk_hw *imx_clk_hw_mux_flags(const char *name,
reg, shift, width, 0, &imx_ccm_lock);
}
+static inline struct clk_hw *imx_dev_clk_hw_mux_flags(struct device *dev,
+ const char *name,
+ void __iomem *reg, u8 shift,
+ u8 width,
+ const char * const *parents,
+ int num_parents,
+ unsigned long flags)
+{
+ return clk_hw_register_mux(dev, name, parents, num_parents,
+ flags | CLK_SET_RATE_NO_REPARENT,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
struct clk *div, struct clk *mux, struct clk *pll,
struct clk *step);
#define IMX_COMPOSITE_CORE BIT(0)
+#define IMX_COMPOSITE_BUS BIT(1)
struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
const char * const *parent_names,
@@ -486,6 +536,12 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
u32 composite_flags,
unsigned long flags);
+#define imx8m_clk_hw_composite_bus(name, parent_names, reg) \
+ imx8m_clk_hw_composite_flags(name, parent_names, \
+ ARRAY_SIZE(parent_names), reg, \
+ IMX_COMPOSITE_BUS, \
+ CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+
#define imx8m_clk_hw_composite_core(name, parent_names, reg) \
imx8m_clk_hw_composite_flags(name, parent_names, \
ARRAY_SIZE(parent_names), reg, \
diff --git a/drivers/clk/ingenic/Kconfig b/drivers/clk/ingenic/Kconfig
index b4555b465ea6..580b0cf69ed5 100644
--- a/drivers/clk/ingenic/Kconfig
+++ b/drivers/clk/ingenic/Kconfig
@@ -55,6 +55,16 @@ config INGENIC_CGU_X1000
If building for a X1000 SoC, you want to say Y here.
+config INGENIC_CGU_X1830
+ bool "Ingenic X1830 CGU driver"
+ default MACH_X1830
+ select INGENIC_CGU_COMMON
+ help
+ Support the clocks provided by the CGU hardware on Ingenic X1830
+ and compatible SoCs.
+
+ If building for a X1830 SoC, you want to say Y here.
+
config INGENIC_TCU_CLK
bool "Ingenic JZ47xx TCU clocks driver"
default MACH_INGENIC
diff --git a/drivers/clk/ingenic/Makefile b/drivers/clk/ingenic/Makefile
index 8b1dad9b74a7..aaa4bffe03c6 100644
--- a/drivers/clk/ingenic/Makefile
+++ b/drivers/clk/ingenic/Makefile
@@ -5,4 +5,5 @@ obj-$(CONFIG_INGENIC_CGU_JZ4725B) += jz4725b-cgu.o
obj-$(CONFIG_INGENIC_CGU_JZ4770) += jz4770-cgu.o
obj-$(CONFIG_INGENIC_CGU_JZ4780) += jz4780-cgu.o
obj-$(CONFIG_INGENIC_CGU_X1000) += x1000-cgu.o
+obj-$(CONFIG_INGENIC_CGU_X1830) += x1830-cgu.o
obj-$(CONFIG_INGENIC_TCU_CLK) += tcu.o
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 6e963031cd87..d7981b670221 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -76,16 +76,13 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
const struct ingenic_cgu_pll_info *pll_info;
unsigned m, n, od_enc, od;
bool bypass;
- unsigned long flags;
u32 ctl;
clk_info = &cgu->clock_info[ingenic_clk->idx];
BUG_ON(clk_info->type != CGU_CLK_PLL);
pll_info = &clk_info->pll;
- spin_lock_irqsave(&cgu->lock, flags);
ctl = readl(cgu->base + pll_info->reg);
- spin_unlock_irqrestore(&cgu->lock, flags);
m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
m += pll_info->m_offset;
@@ -93,6 +90,9 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
n += pll_info->n_offset;
od_enc = ctl >> pll_info->od_shift;
od_enc &= GENMASK(pll_info->od_bits - 1, 0);
+
+ ctl = readl(cgu->base + pll_info->bypass_reg);
+
bypass = !pll_info->no_bypass_bit &&
!!(ctl & BIT(pll_info->bypass_bit));
@@ -106,7 +106,8 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
BUG_ON(od == pll_info->od_max);
od++;
- return div_u64((u64)parent_rate * m, n * od);
+ return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
+ n * od);
}
static unsigned long
@@ -139,7 +140,8 @@ ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
if (pod)
*pod = od;
- return div_u64((u64)parent_rate * m, n * od);
+ return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
+ n * od);
}
static inline const struct ingenic_cgu_clk_info *to_clk_info(
@@ -212,9 +214,14 @@ static int ingenic_pll_enable(struct clk_hw *hw)
u32 ctl;
spin_lock_irqsave(&cgu->lock, flags);
- ctl = readl(cgu->base + pll_info->reg);
+ ctl = readl(cgu->base + pll_info->bypass_reg);
ctl &= ~BIT(pll_info->bypass_bit);
+
+ writel(ctl, cgu->base + pll_info->bypass_reg);
+
+ ctl = readl(cgu->base + pll_info->reg);
+
ctl |= BIT(pll_info->enable_bit);
writel(ctl, cgu->base + pll_info->reg);
@@ -259,12 +266,9 @@ static int ingenic_pll_is_enabled(struct clk_hw *hw)
struct ingenic_cgu *cgu = ingenic_clk->cgu;
const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
- unsigned long flags;
u32 ctl;
- spin_lock_irqsave(&cgu->lock, flags);
ctl = readl(cgu->base + pll_info->reg);
- spin_unlock_irqrestore(&cgu->lock, flags);
return !!(ctl & BIT(pll_info->enable_bit));
}
@@ -562,16 +566,12 @@ static int ingenic_clk_is_enabled(struct clk_hw *hw)
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
const struct ingenic_cgu_clk_info *clk_info;
- unsigned long flags;
int enabled = 1;
clk_info = &cgu->clock_info[ingenic_clk->idx];
- if (clk_info->type & CGU_CLK_GATE) {
- spin_lock_irqsave(&cgu->lock, flags);
+ if (clk_info->type & CGU_CLK_GATE)
enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
- spin_unlock_irqrestore(&cgu->lock, flags);
- }
return enabled;
}
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 0dc8004079ee..2c75ef4a36f5 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -17,6 +17,7 @@
/**
* struct ingenic_cgu_pll_info - information about a PLL
* @reg: the offset of the PLL's control register within the CGU
+ * @rate_multiplier: the multiplier needed by pll rate calculation
* @m_shift: the number of bits to shift the multiplier value by (ie. the
* index of the lowest bit of the multiplier value in the PLL's
* control register)
@@ -37,6 +38,7 @@
* @od_encoding: a pointer to an array mapping post-VCO divider values to
* their encoded values in the PLL control register, or -1 for
* unsupported values
+ * @bypass_reg: the offset of the bypass control register within the CGU
* @bypass_bit: the index of the bypass bit in the PLL control register
* @enable_bit: the index of the enable bit in the PLL control register
* @stable_bit: the index of the stable bit in the PLL control register
@@ -44,10 +46,12 @@
*/
struct ingenic_cgu_pll_info {
unsigned reg;
+ unsigned rate_multiplier;
const s8 *od_encoding;
u8 m_shift, m_bits, m_offset;
u8 n_shift, n_bits, n_offset;
u8 od_shift, od_bits, od_max;
+ unsigned bypass_reg;
u8 bypass_bit;
u8 enable_bit;
u8 stable_bit;
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index a3b4635f6278..8c38e72d14a7 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -9,7 +9,9 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/of.h>
+
#include <dt-bindings/clock/jz4725b-cgu.h>
+
#include "cgu.h"
#include "pm.h"
@@ -54,6 +56,7 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
.parents = { JZ4725B_CLK_EXT, -1, -1, -1 },
.pll = {
.reg = CGU_REG_CPPCR,
+ .rate_multiplier = 1,
.m_shift = 23,
.m_bits = 9,
.m_offset = 2,
@@ -65,6 +68,7 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
.od_max = 4,
.od_encoding = pll_od_encoding,
.stable_bit = 10,
+ .bypass_reg = CGU_REG_CPPCR,
.bypass_bit = 9,
.enable_bit = 8,
},
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 4f0e92c877d6..c0ac9196a581 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -10,7 +10,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
+
#include <dt-bindings/clock/jz4740-cgu.h>
+
#include "cgu.h"
#include "pm.h"
@@ -69,6 +71,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
.parents = { JZ4740_CLK_EXT, -1, -1, -1 },
.pll = {
.reg = CGU_REG_CPPCR,
+ .rate_multiplier = 1,
.m_shift = 23,
.m_bits = 9,
.m_offset = 2,
@@ -80,6 +83,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
.od_max = 4,
.od_encoding = pll_od_encoding,
.stable_bit = 10,
+ .bypass_reg = CGU_REG_CPPCR,
.bypass_bit = 9,
.enable_bit = 8,
},
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index c051ecba5cf8..9ea4490ecb7f 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -9,7 +9,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
+
#include <dt-bindings/clock/jz4770-cgu.h>
+
#include "cgu.h"
#include "pm.h"
@@ -102,6 +104,7 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.parents = { JZ4770_CLK_EXT },
.pll = {
.reg = CGU_REG_CPPCR0,
+ .rate_multiplier = 1,
.m_shift = 24,
.m_bits = 7,
.m_offset = 1,
@@ -112,6 +115,7 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.od_bits = 2,
.od_max = 8,
.od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR0,
.bypass_bit = 9,
.enable_bit = 8,
.stable_bit = 10,
@@ -124,6 +128,7 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.parents = { JZ4770_CLK_EXT },
.pll = {
.reg = CGU_REG_CPPCR1,
+ .rate_multiplier = 1,
.m_shift = 24,
.m_bits = 7,
.m_offset = 1,
@@ -134,9 +139,10 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.od_bits = 2,
.od_max = 8,
.od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR1,
+ .no_bypass_bit = true,
.enable_bit = 7,
.stable_bit = 6,
- .no_bypass_bit = true,
},
},
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index c758f1643067..6c5b8029cc8a 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <dt-bindings/clock/jz4780-cgu.h>
+
#include "cgu.h"
#include "pm.h"
@@ -266,6 +267,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
#define DEF_PLL(name) { \
.reg = CGU_REG_ ## name, \
+ .rate_multiplier = 1, \
.m_shift = 19, \
.m_bits = 13, \
.m_offset = 1, \
@@ -277,6 +279,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.od_max = 16, \
.od_encoding = pll_od_encoding, \
.stable_bit = 6, \
+ .bypass_reg = CGU_REG_ ## name, \
.bypass_bit = 1, \
.enable_bit = 0, \
}
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 153a954b0d2f..9382dc3aa27e 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -323,7 +323,7 @@ static const struct ingenic_soc_info x1000_soc_info = {
.has_tcu_clk = false,
};
-static const struct of_device_id ingenic_tcu_of_match[] __initconst = {
+static const struct of_device_id __maybe_unused ingenic_tcu_of_match[] __initconst = {
{ .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
{ .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
{ .compatible = "ingenic,jz4770-tcu", .data = &jz4770_soc_info, },
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
index b22d87b3f555..453f3323cb99 100644
--- a/drivers/clk/ingenic/x1000-cgu.c
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -1,13 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/*
* X1000 SoC CGU driver
- * Copyright (c) 2019 Zhou Yanjie <zhouyanjie@zoho.com>
+ * Copyright (c) 2019 周ç°æ° (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
+
#include <dt-bindings/clock/x1000-cgu.h>
+
#include "cgu.h"
#include "pm.h"
@@ -18,6 +21,9 @@
#define CGU_REG_CLKGR 0x20
#define CGU_REG_OPCR 0x24
#define CGU_REG_DDRCDR 0x2c
+#define CGU_REG_USBPCR 0x3c
+#define CGU_REG_USBPCR1 0x48
+#define CGU_REG_USBCDR 0x50
#define CGU_REG_MACCDR 0x54
#define CGU_REG_I2SCDR 0x60
#define CGU_REG_LPCDR 0x64
@@ -38,8 +44,47 @@
#define OPCR_SPENDN0 BIT(7)
#define OPCR_SPENDN1 BIT(6)
+/* bits within the USBPCR register */
+#define USBPCR_SIDDQ BIT(21)
+#define USBPCR_OTG_DISABLE BIT(20)
+
static struct ingenic_cgu *cgu;
+static int x1000_usb_phy_enable(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ writel(readl(reg_opcr) | OPCR_SPENDN0, reg_opcr);
+ writel(readl(reg_usbpcr) & ~USBPCR_OTG_DISABLE & ~USBPCR_SIDDQ, reg_usbpcr);
+ return 0;
+}
+
+static void x1000_usb_phy_disable(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ writel(readl(reg_opcr) & ~OPCR_SPENDN0, reg_opcr);
+ writel(readl(reg_usbpcr) | USBPCR_OTG_DISABLE | USBPCR_SIDDQ, reg_usbpcr);
+}
+
+static int x1000_usb_phy_is_enabled(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ return (readl(reg_opcr) & OPCR_SPENDN0) &&
+ !(readl(reg_usbpcr) & USBPCR_SIDDQ) &&
+ !(readl(reg_usbpcr) & USBPCR_OTG_DISABLE);
+}
+
+static const struct clk_ops x1000_otg_phy_ops = {
+ .enable = x1000_usb_phy_enable,
+ .disable = x1000_usb_phy_disable,
+ .is_enabled = x1000_usb_phy_is_enabled,
+};
+
static const s8 pll_od_encoding[8] = {
0x0, 0x1, -1, 0x2, -1, -1, -1, 0x3,
};
@@ -58,6 +103,7 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.parents = { X1000_CLK_EXCLK, -1, -1, -1 },
.pll = {
.reg = CGU_REG_APLL,
+ .rate_multiplier = 1,
.m_shift = 24,
.m_bits = 7,
.m_offset = 1,
@@ -68,6 +114,7 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.od_bits = 2,
.od_max = 8,
.od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_APLL,
.bypass_bit = 9,
.enable_bit = 8,
.stable_bit = 10,
@@ -79,6 +126,7 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.parents = { X1000_CLK_EXCLK, -1, -1, -1 },
.pll = {
.reg = CGU_REG_MPLL,
+ .rate_multiplier = 1,
.m_shift = 24,
.m_bits = 7,
.m_offset = 1,
@@ -89,12 +137,22 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.od_bits = 2,
.od_max = 8,
.od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_MPLL,
.bypass_bit = 6,
.enable_bit = 7,
.stable_bit = 0,
},
},
+
+ /* Custom (SoC-specific) OTG PHY */
+
+ [X1000_CLK_OTGPHY] = {
+ "otg_phy", CGU_CLK_CUSTOM,
+ .parents = { -1, -1, X1000_CLK_EXCLK, -1 },
+ .custom = { &x1000_otg_phy_ops },
+ },
+
/* Muxes & dividers */
[X1000_CLK_SCLKA] = {
@@ -110,9 +168,10 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
},
[X1000_CLK_CPU] = {
- "cpu", CGU_CLK_DIV,
+ "cpu", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { X1000_CLK_CPUMUX, -1, -1, -1 },
.div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 30 },
},
[X1000_CLK_L2CACHE] = {
@@ -141,9 +200,10 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
},
[X1000_CLK_PCLK] = {
- "pclk", CGU_CLK_DIV,
+ "pclk", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { X1000_CLK_AHB2PMUX, -1, -1, -1 },
.div = { CGU_REG_CPCCR, 16, 1, 4, 20, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 28 },
},
[X1000_CLK_DDR] = {
@@ -156,12 +216,20 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
[X1000_CLK_MAC] = {
"mac", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
- .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL},
+ .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL },
.mux = { CGU_REG_MACCDR, 31, 1 },
.div = { CGU_REG_MACCDR, 0, 1, 8, 29, 28, 27 },
.gate = { CGU_REG_CLKGR, 25 },
},
+ [X1000_CLK_LCD] = {
+ "lcd", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL },
+ .mux = { CGU_REG_LPCDR, 31, 1 },
+ .div = { CGU_REG_LPCDR, 0, 1, 8, 28, 27, 26 },
+ .gate = { CGU_REG_CLKGR, 23 },
+ },
+
[X1000_CLK_MSCMUX] = {
"msc_mux", CGU_CLK_MUX,
.parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL},
@@ -182,6 +250,15 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.gate = { CGU_REG_CLKGR, 5 },
},
+ [X1000_CLK_OTG] = {
+ "otg", CGU_CLK_DIV | CGU_CLK_GATE | CGU_CLK_MUX,
+ .parents = { X1000_CLK_EXCLK, -1,
+ X1000_CLK_APLL, X1000_CLK_MPLL },
+ .mux = { CGU_REG_USBCDR, 30, 2 },
+ .div = { CGU_REG_USBCDR, 0, 1, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR, 3 },
+ },
+
[X1000_CLK_SSIPLL] = {
"ssi_pll", CGU_CLK_MUX | CGU_CLK_DIV,
.parents = { X1000_CLK_SCLKA, X1000_CLK_MPLL, -1, -1 },
@@ -189,14 +266,32 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.div = { CGU_REG_SSICDR, 0, 1, 8, 29, 28, 27 },
},
+ [X1000_CLK_SSIPLL_DIV2] = {
+ "ssi_pll_div2", CGU_CLK_FIXDIV,
+ .parents = { X1000_CLK_SSIPLL },
+ .fixdiv = { 2 },
+ },
+
[X1000_CLK_SSIMUX] = {
"ssi_mux", CGU_CLK_MUX,
- .parents = { X1000_CLK_EXCLK, X1000_CLK_SSIPLL, -1, -1 },
+ .parents = { X1000_CLK_EXCLK, X1000_CLK_SSIPLL_DIV2, -1, -1 },
.mux = { CGU_REG_SSICDR, 30, 1 },
},
/* Gate-only clocks */
+ [X1000_CLK_EMC] = {
+ "emc", CGU_CLK_GATE,
+ .parents = { X1000_CLK_AHB2, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 0 },
+ },
+
+ [X1000_CLK_EFUSE] = {
+ "efuse", CGU_CLK_GATE,
+ .parents = { X1000_CLK_AHB2, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 1 },
+ },
+
[X1000_CLK_SFC] = {
"sfc", CGU_CLK_GATE,
.parents = { X1000_CLK_SSIPLL, -1, -1, -1 },
@@ -239,12 +334,24 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = {
.gate = { CGU_REG_CLKGR, 16 },
},
+ [X1000_CLK_TCU] = {
+ "tcu", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 18 },
+ },
+
[X1000_CLK_SSI] = {
"ssi", CGU_CLK_GATE,
.parents = { X1000_CLK_SSIMUX, -1, -1, -1 },
.gate = { CGU_REG_CLKGR, 19 },
},
+ [X1000_CLK_OST] = {
+ "ost", CGU_CLK_GATE,
+ .parents = { X1000_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR, 20 },
+ },
+
[X1000_CLK_PDMA] = {
"pdma", CGU_CLK_GATE,
.parents = { X1000_CLK_EXCLK, -1, -1, -1 },
@@ -271,4 +378,8 @@ static void __init x1000_cgu_init(struct device_node *np)
ingenic_cgu_register_syscore_ops(cgu);
}
-CLK_OF_DECLARE(x1000_cgu, "ingenic,x1000-cgu", x1000_cgu_init);
+/*
+ * CGU has some children devices, this is useful for probing children devices
+ * in the case where the device node is compatible with "simple-mfd".
+ */
+CLK_OF_DECLARE_DRIVER(x1000_cgu, "ingenic,x1000-cgu", x1000_cgu_init);
diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c
new file mode 100644
index 000000000000..a1b2ff0ee487
--- /dev/null
+++ b/drivers/clk/ingenic/x1830-cgu.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * X1830 SoC CGU driver
+ * Copyright (c) 2019 周ç°æ° (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include <dt-bindings/clock/x1830-cgu.h>
+
+#include "cgu.h"
+#include "pm.h"
+
+/* CGU register offsets */
+#define CGU_REG_CPCCR 0x00
+#define CGU_REG_CPPCR 0x0c
+#define CGU_REG_APLL 0x10
+#define CGU_REG_MPLL 0x14
+#define CGU_REG_CLKGR0 0x20
+#define CGU_REG_OPCR 0x24
+#define CGU_REG_CLKGR1 0x28
+#define CGU_REG_DDRCDR 0x2c
+#define CGU_REG_USBPCR 0x3c
+#define CGU_REG_USBRDT 0x40
+#define CGU_REG_USBVBFIL 0x44
+#define CGU_REG_USBPCR1 0x48
+#define CGU_REG_MACCDR 0x54
+#define CGU_REG_EPLL 0x58
+#define CGU_REG_I2SCDR 0x60
+#define CGU_REG_LPCDR 0x64
+#define CGU_REG_MSC0CDR 0x68
+#define CGU_REG_I2SCDR1 0x70
+#define CGU_REG_SSICDR 0x74
+#define CGU_REG_CIMCDR 0x7c
+#define CGU_REG_MSC1CDR 0xa4
+#define CGU_REG_CMP_INTR 0xb0
+#define CGU_REG_CMP_INTRE 0xb4
+#define CGU_REG_DRCG 0xd0
+#define CGU_REG_CPCSR 0xd4
+#define CGU_REG_VPLL 0xe0
+#define CGU_REG_MACPHYC 0xe8
+
+/* bits within the OPCR register */
+#define OPCR_GATE_USBPHYCLK BIT(23)
+#define OPCR_SPENDN0 BIT(7)
+#define OPCR_SPENDN1 BIT(6)
+
+/* bits within the USBPCR register */
+#define USBPCR_SIDDQ BIT(21)
+#define USBPCR_OTG_DISABLE BIT(20)
+
+static struct ingenic_cgu *cgu;
+
+static int x1830_usb_phy_enable(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ writel((readl(reg_opcr) | OPCR_SPENDN0) & ~OPCR_GATE_USBPHYCLK, reg_opcr);
+ writel(readl(reg_usbpcr) & ~USBPCR_OTG_DISABLE & ~USBPCR_SIDDQ, reg_usbpcr);
+ return 0;
+}
+
+static void x1830_usb_phy_disable(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ writel((readl(reg_opcr) & ~OPCR_SPENDN0) | OPCR_GATE_USBPHYCLK, reg_opcr);
+ writel(readl(reg_usbpcr) | USBPCR_OTG_DISABLE | USBPCR_SIDDQ, reg_usbpcr);
+}
+
+static int x1830_usb_phy_is_enabled(struct clk_hw *hw)
+{
+ void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR;
+ void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR;
+
+ return (readl(reg_opcr) & OPCR_SPENDN0) &&
+ !(readl(reg_usbpcr) & USBPCR_SIDDQ) &&
+ !(readl(reg_usbpcr) & USBPCR_OTG_DISABLE);
+}
+
+static const struct clk_ops x1830_otg_phy_ops = {
+ .enable = x1830_usb_phy_enable,
+ .disable = x1830_usb_phy_disable,
+ .is_enabled = x1830_usb_phy_is_enabled,
+};
+
+static const s8 pll_od_encoding[64] = {
+ 0x0, 0x1, -1, 0x2, -1, -1, -1, 0x3,
+ -1, -1, -1, -1, -1, -1, -1, 0x4,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 0x5,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 0x6,
+};
+
+static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = {
+
+ /* External clocks */
+
+ [X1830_CLK_EXCLK] = { "ext", CGU_CLK_EXT },
+ [X1830_CLK_RTCLK] = { "rtc", CGU_CLK_EXT },
+
+ /* PLLs */
+
+ [X1830_CLK_APLL] = {
+ "apll", CGU_CLK_PLL,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_APLL,
+ .rate_multiplier = 2,
+ .m_shift = 20,
+ .m_bits = 9,
+ .m_offset = 1,
+ .n_shift = 14,
+ .n_bits = 6,
+ .n_offset = 1,
+ .od_shift = 11,
+ .od_bits = 3,
+ .od_max = 64,
+ .od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR,
+ .bypass_bit = 30,
+ .enable_bit = 0,
+ .stable_bit = 3,
+ },
+ },
+
+ [X1830_CLK_MPLL] = {
+ "mpll", CGU_CLK_PLL,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_MPLL,
+ .rate_multiplier = 2,
+ .m_shift = 20,
+ .m_bits = 9,
+ .m_offset = 1,
+ .n_shift = 14,
+ .n_bits = 6,
+ .n_offset = 1,
+ .od_shift = 11,
+ .od_bits = 3,
+ .od_max = 64,
+ .od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR,
+ .bypass_bit = 28,
+ .enable_bit = 0,
+ .stable_bit = 3,
+ },
+ },
+
+ [X1830_CLK_EPLL] = {
+ "epll", CGU_CLK_PLL,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_EPLL,
+ .rate_multiplier = 2,
+ .m_shift = 20,
+ .m_bits = 9,
+ .m_offset = 1,
+ .n_shift = 14,
+ .n_bits = 6,
+ .n_offset = 1,
+ .od_shift = 11,
+ .od_bits = 3,
+ .od_max = 64,
+ .od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR,
+ .bypass_bit = 24,
+ .enable_bit = 0,
+ .stable_bit = 3,
+ },
+ },
+
+ [X1830_CLK_VPLL] = {
+ "vpll", CGU_CLK_PLL,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .pll = {
+ .reg = CGU_REG_VPLL,
+ .rate_multiplier = 2,
+ .m_shift = 20,
+ .m_bits = 9,
+ .m_offset = 1,
+ .n_shift = 14,
+ .n_bits = 6,
+ .n_offset = 1,
+ .od_shift = 11,
+ .od_bits = 3,
+ .od_max = 64,
+ .od_encoding = pll_od_encoding,
+ .bypass_reg = CGU_REG_CPPCR,
+ .bypass_bit = 26,
+ .enable_bit = 0,
+ .stable_bit = 3,
+ },
+ },
+
+ /* Custom (SoC-specific) OTG PHY */
+
+ [X1830_CLK_OTGPHY] = {
+ "otg_phy", CGU_CLK_CUSTOM,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .custom = { &x1830_otg_phy_ops },
+ },
+
+ /* Muxes & dividers */
+
+ [X1830_CLK_SCLKA] = {
+ "sclk_a", CGU_CLK_MUX,
+ .parents = { -1, X1830_CLK_EXCLK, X1830_CLK_APLL, -1 },
+ .mux = { CGU_REG_CPCCR, 30, 2 },
+ },
+
+ [X1830_CLK_CPUMUX] = {
+ "cpu_mux", CGU_CLK_MUX,
+ .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 28, 2 },
+ },
+
+ [X1830_CLK_CPU] = {
+ "cpu", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_CPUMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 },
+ .gate = { CGU_REG_CLKGR1, 15 },
+ },
+
+ [X1830_CLK_L2CACHE] = {
+ "l2cache", CGU_CLK_DIV,
+ .parents = { X1830_CLK_CPUMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 },
+ },
+
+ [X1830_CLK_AHB0] = {
+ "ahb0", CGU_CLK_MUX | CGU_CLK_DIV,
+ .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 26, 2 },
+ .div = { CGU_REG_CPCCR, 8, 1, 4, 21, -1, -1 },
+ },
+
+ [X1830_CLK_AHB2PMUX] = {
+ "ahb2_apb_mux", CGU_CLK_MUX,
+ .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 },
+ .mux = { CGU_REG_CPCCR, 24, 2 },
+ },
+
+ [X1830_CLK_AHB2] = {
+ "ahb2", CGU_CLK_DIV,
+ .parents = { X1830_CLK_AHB2PMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 12, 1, 4, 20, -1, -1 },
+ },
+
+ [X1830_CLK_PCLK] = {
+ "pclk", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_AHB2PMUX, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 16, 1, 4, 20, -1, -1 },
+ .gate = { CGU_REG_CLKGR1, 14 },
+ },
+
+ [X1830_CLK_DDR] = {
+ "ddr", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 },
+ .mux = { CGU_REG_DDRCDR, 30, 2 },
+ .div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR0, 31 },
+ },
+
+ [X1830_CLK_MAC] = {
+ "mac", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL,
+ X1830_CLK_VPLL, X1830_CLK_EPLL },
+ .mux = { CGU_REG_MACCDR, 30, 2 },
+ .div = { CGU_REG_MACCDR, 0, 1, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR1, 4 },
+ },
+
+ [X1830_CLK_LCD] = {
+ "lcd", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL,
+ X1830_CLK_VPLL, X1830_CLK_EPLL },
+ .mux = { CGU_REG_LPCDR, 30, 2 },
+ .div = { CGU_REG_LPCDR, 0, 1, 8, 28, 27, 26 },
+ .gate = { CGU_REG_CLKGR1, 9 },
+ },
+
+ [X1830_CLK_MSCMUX] = {
+ "msc_mux", CGU_CLK_MUX,
+ .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL,
+ X1830_CLK_VPLL, X1830_CLK_EPLL },
+ .mux = { CGU_REG_MSC0CDR, 30, 2 },
+ },
+
+ [X1830_CLK_MSC0] = {
+ "msc0", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_MSCMUX, -1, -1, -1 },
+ .div = { CGU_REG_MSC0CDR, 0, 2, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR0, 4 },
+ },
+
+ [X1830_CLK_MSC1] = {
+ "msc1", CGU_CLK_DIV | CGU_CLK_GATE,
+ .parents = { X1830_CLK_MSCMUX, -1, -1, -1 },
+ .div = { CGU_REG_MSC1CDR, 0, 2, 8, 29, 28, 27 },
+ .gate = { CGU_REG_CLKGR0, 5 },
+ },
+
+ [X1830_CLK_SSIPLL] = {
+ "ssi_pll", CGU_CLK_MUX | CGU_CLK_DIV,
+ .parents = { X1830_CLK_SCLKA, X1830_CLK_MPLL,
+ X1830_CLK_VPLL, X1830_CLK_EPLL },
+ .mux = { CGU_REG_SSICDR, 30, 2 },
+ .div = { CGU_REG_SSICDR, 0, 1, 8, 28, 27, 26 },
+ },
+
+ [X1830_CLK_SSIPLL_DIV2] = {
+ "ssi_pll_div2", CGU_CLK_FIXDIV,
+ .parents = { X1830_CLK_SSIPLL },
+ .fixdiv = { 2 },
+ },
+
+ [X1830_CLK_SSIMUX] = {
+ "ssi_mux", CGU_CLK_MUX,
+ .parents = { X1830_CLK_EXCLK, X1830_CLK_SSIPLL_DIV2, -1, -1 },
+ .mux = { CGU_REG_SSICDR, 29, 1 },
+ },
+
+ /* Gate-only clocks */
+
+ [X1830_CLK_EMC] = {
+ "emc", CGU_CLK_GATE,
+ .parents = { X1830_CLK_AHB2, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 0 },
+ },
+
+ [X1830_CLK_EFUSE] = {
+ "efuse", CGU_CLK_GATE,
+ .parents = { X1830_CLK_AHB2, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 1 },
+ },
+
+ [X1830_CLK_OTG] = {
+ "otg", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 3 },
+ },
+
+ [X1830_CLK_SSI0] = {
+ "ssi0", CGU_CLK_GATE,
+ .parents = { X1830_CLK_SSIMUX, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 6 },
+ },
+
+ [X1830_CLK_SMB0] = {
+ "smb0", CGU_CLK_GATE,
+ .parents = { X1830_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 7 },
+ },
+
+ [X1830_CLK_SMB1] = {
+ "smb1", CGU_CLK_GATE,
+ .parents = { X1830_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 8 },
+ },
+
+ [X1830_CLK_SMB2] = {
+ "smb2", CGU_CLK_GATE,
+ .parents = { X1830_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 9 },
+ },
+
+ [X1830_CLK_UART0] = {
+ "uart0", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 14 },
+ },
+
+ [X1830_CLK_UART1] = {
+ "uart1", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 15 },
+ },
+
+ [X1830_CLK_SSI1] = {
+ "ssi1", CGU_CLK_GATE,
+ .parents = { X1830_CLK_SSIMUX, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 19 },
+ },
+
+ [X1830_CLK_SFC] = {
+ "sfc", CGU_CLK_GATE,
+ .parents = { X1830_CLK_SSIPLL, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 20 },
+ },
+
+ [X1830_CLK_PDMA] = {
+ "pdma", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 21 },
+ },
+
+ [X1830_CLK_TCU] = {
+ "tcu", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR0, 30 },
+ },
+
+ [X1830_CLK_DTRNG] = {
+ "dtrng", CGU_CLK_GATE,
+ .parents = { X1830_CLK_PCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR1, 1 },
+ },
+
+ [X1830_CLK_OST] = {
+ "ost", CGU_CLK_GATE,
+ .parents = { X1830_CLK_EXCLK, -1, -1, -1 },
+ .gate = { CGU_REG_CLKGR1, 11 },
+ },
+};
+
+static void __init x1830_cgu_init(struct device_node *np)
+{
+ int retval;
+
+ cgu = ingenic_cgu_new(x1830_cgu_clocks,
+ ARRAY_SIZE(x1830_cgu_clocks), np);
+ if (!cgu) {
+ pr_err("%s: failed to initialise CGU\n", __func__);
+ return;
+ }
+
+ retval = ingenic_cgu_register_clocks(cgu);
+ if (retval) {
+ pr_err("%s: failed to register CGU Clocks\n", __func__);
+ return;
+ }
+
+ ingenic_cgu_register_syscore_ops(cgu);
+}
+/*
+ * CGU has some children devices, this is useful for probing children devices
+ * in the case where the device node is compatible with "simple-mfd".
+ */
+CLK_OF_DECLARE_DRIVER(x1830_cgu, "ingenic,x1830-cgu", x1830_cgu_init);
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index ea3c70d1307e..1d2b7d717541 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -117,6 +117,92 @@ config COMMON_CLK_MT2712_VENCSYS
---help---
This driver supports MediaTek MT2712 vencsys clocks.
+config COMMON_CLK_MT6765
+ bool "Clock driver for MediaTek MT6765"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK && ARM64
+ help
+ This driver supports MediaTek MT6765 basic clocks.
+
+config COMMON_CLK_MT6765_AUDIOSYS
+ bool "Clock driver for MediaTek MT6765 audiosys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 audiosys clocks.
+
+config COMMON_CLK_MT6765_CAMSYS
+ bool "Clock driver for MediaTek MT6765 camsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 camsys clocks.
+
+config COMMON_CLK_MT6765_GCESYS
+ bool "Clock driver for MediaTek MT6765 gcesys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 gcesys clocks.
+
+config COMMON_CLK_MT6765_MMSYS
+ bool "Clock driver for MediaTek MT6765 mmsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mmsys clocks.
+
+config COMMON_CLK_MT6765_IMGSYS
+ bool "Clock driver for MediaTek MT6765 imgsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 imgsys clocks.
+
+config COMMON_CLK_MT6765_VCODECSYS
+ bool "Clock driver for MediaTek MT6765 vcodecsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 vcodecsys clocks.
+
+config COMMON_CLK_MT6765_MFGSYS
+ bool "Clock driver for MediaTek MT6765 mfgsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mfgsys clocks.
+
+config COMMON_CLK_MT6765_MIPI0ASYS
+ bool "Clock driver for MediaTek MT6765 mipi0asys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi0asys clocks.
+
+config COMMON_CLK_MT6765_MIPI0BSYS
+ bool "Clock driver for MediaTek MT6765 mipi0bsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi0bsys clocks.
+
+config COMMON_CLK_MT6765_MIPI1ASYS
+ bool "Clock driver for MediaTek MT6765 mipi1asys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi1asys clocks.
+
+config COMMON_CLK_MT6765_MIPI1BSYS
+ bool "Clock driver for MediaTek MT6765 mipi1bsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi1bsys clocks.
+
+config COMMON_CLK_MT6765_MIPI2ASYS
+ bool "Clock driver for MediaTek MT6765 mipi2asys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi2asys clocks.
+
+config COMMON_CLK_MT6765_MIPI2BSYS
+ bool "Clock driver for MediaTek MT6765 mipi2bsys"
+ depends on COMMON_CLK_MT6765
+ help
+ This driver supports MediaTek MT6765 mipi2bsys clocks.
+
config COMMON_CLK_MT6779
bool "Clock driver for MediaTek MT6779"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
@@ -274,6 +360,13 @@ config COMMON_CLK_MT8173
---help---
This driver supports MediaTek MT8173 clocks.
+config COMMON_CLK_MT8173_MMSYS
+ bool "Clock driver for MediaTek MT8173 mmsys"
+ depends on COMMON_CLK_MT8173
+ default COMMON_CLK_MT8173
+ help
+ This driver supports MediaTek MT8173 mmsys clocks.
+
config COMMON_CLK_MT8183
bool "Clock driver for MediaTek MT8183"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 8cdb76a5cd71..959b556d32ea 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -1,6 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o
+obj-$(CONFIG_COMMON_CLK_MT6765) += clk-mt6765.o
+obj-$(CONFIG_COMMON_CLK_MT6765_AUDIOSYS) += clk-mt6765-audio.o
+obj-$(CONFIG_COMMON_CLK_MT6765_CAMSYS) += clk-mt6765-cam.o
+obj-$(CONFIG_COMMON_CLK_MT6765_IMGSYS) += clk-mt6765-img.o
+obj-$(CONFIG_COMMON_CLK_MT6765_MIPI0ASYS) += clk-mt6765-mipi0a.o
+obj-$(CONFIG_COMMON_CLK_MT6765_MMSYS) += clk-mt6765-mm.o
+obj-$(CONFIG_COMMON_CLK_MT6765_VCODECSYS) += clk-mt6765-vcodec.o
obj-$(CONFIG_COMMON_CLK_MT6779) += clk-mt6779.o
obj-$(CONFIG_COMMON_CLK_MT6779_MMSYS) += clk-mt6779-mm.o
obj-$(CONFIG_COMMON_CLK_MT6779_IMGSYS) += clk-mt6779-img.o
@@ -41,6 +48,7 @@ obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
+obj-$(CONFIG_COMMON_CLK_MT8173_MMSYS) += clk-mt8173-mm.o
obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183.o
obj-$(CONFIG_COMMON_CLK_MT8183_AUDIOSYS) += clk-mt8183-audio.o
obj-$(CONFIG_COMMON_CLK_MT8183_CAMSYS) += clk-mt8183-cam.o
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index 054b597d4a73..cb18e1849492 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -79,16 +79,12 @@ static const struct mtk_gate mm_clks[] = {
GATE_DISP1(CLK_MM_TVE_FMM, "mm_tve_fmm", "mm_sel", 14),
};
-static const struct of_device_id of_match_clk_mt2701_mm[] = {
- { .compatible = "mediatek,mt2701-mmsys", },
- {}
-};
-
static int clk_mt2701_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
int r;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR);
@@ -108,7 +104,6 @@ static struct platform_driver clk_mt2701_mm_drv = {
.probe = clk_mt2701_mm_probe,
.driver = {
.name = "clk-mt2701-mm",
- .of_match_table = of_match_clk_mt2701_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
index 1c5948be35f3..5519c3d68c1f 100644
--- a/drivers/clk/mediatek/clk-mt2712-mm.c
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -128,9 +128,10 @@ static const struct mtk_gate mm_clks[] = {
static int clk_mt2712_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
int r;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -146,16 +147,10 @@ static int clk_mt2712_mm_probe(struct platform_device *pdev)
return r;
}
-static const struct of_device_id of_match_clk_mt2712_mm[] = {
- { .compatible = "mediatek,mt2712-mmsys", },
- {}
-};
-
static struct platform_driver clk_mt2712_mm_drv = {
.probe = clk_mt2712_mm_probe,
.driver = {
.name = "clk-mt2712-mm",
- .of_match_table = of_match_clk_mt2712_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
new file mode 100644
index 000000000000..4c989165d795
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-audio.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs audio0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs audio1_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x4,
+};
+
+#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate audio_clks[] = {
+ /* AUDIO0 */
+ GATE_AUDIO0(CLK_AUDIO_AFE, "aud_afe", "audio_ck", 2),
+ GATE_AUDIO0(CLK_AUDIO_22M, "aud_22m", "aud_engen1_ck", 8),
+ GATE_AUDIO0(CLK_AUDIO_APLL_TUNER, "aud_apll_tuner",
+ "aud_engen1_ck", 19),
+ GATE_AUDIO0(CLK_AUDIO_ADC, "aud_adc", "audio_ck", 24),
+ GATE_AUDIO0(CLK_AUDIO_DAC, "aud_dac", "audio_ck", 25),
+ GATE_AUDIO0(CLK_AUDIO_DAC_PREDIS, "aud_dac_predis",
+ "audio_ck", 26),
+ GATE_AUDIO0(CLK_AUDIO_TML, "aud_tml", "audio_ck", 27),
+ /* AUDIO1 */
+ GATE_AUDIO1(CLK_AUDIO_I2S1_BCLK, "aud_i2s1_bclk",
+ "audio_ck", 4),
+ GATE_AUDIO1(CLK_AUDIO_I2S2_BCLK, "aud_i2s2_bclk",
+ "audio_ck", 5),
+ GATE_AUDIO1(CLK_AUDIO_I2S3_BCLK, "aud_i2s3_bclk",
+ "audio_ck", 6),
+ GATE_AUDIO1(CLK_AUDIO_I2S4_BCLK, "aud_i2s4_bclk",
+ "audio_ck", 7),
+};
+
+static int clk_mt6765_audio_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
+
+ mtk_clk_register_gates(node, audio_clks,
+ ARRAY_SIZE(audio_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_audio[] = {
+ { .compatible = "mediatek,mt6765-audsys", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_audio_drv = {
+ .probe = clk_mt6765_audio_probe,
+ .driver = {
+ .name = "clk-mt6765-audio",
+ .of_match_table = of_match_clk_mt6765_audio,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_audio_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
new file mode 100644
index 000000000000..c96394893bcf
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-cam.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs cam_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &cam_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate cam_clks[] = {
+ GATE_CAM(CLK_CAM_LARB3, "cam_larb3", "mm_ck", 0),
+ GATE_CAM(CLK_CAM_DFP_VAD, "cam_dfp_vad", "mm_ck", 1),
+ GATE_CAM(CLK_CAM, "cam", "mm_ck", 6),
+ GATE_CAM(CLK_CAMTG, "camtg", "mm_ck", 7),
+ GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "mm_ck", 8),
+ GATE_CAM(CLK_CAMSV0, "camsv0", "mm_ck", 9),
+ GATE_CAM(CLK_CAMSV1, "camsv1", "mm_ck", 10),
+ GATE_CAM(CLK_CAMSV2, "camsv2", "mm_ck", 11),
+ GATE_CAM(CLK_CAM_CCU, "cam_ccu", "mm_ck", 12),
+};
+
+static int clk_mt6765_cam_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
+
+ mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_cam[] = {
+ { .compatible = "mediatek,mt6765-camsys", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_cam_drv = {
+ .probe = clk_mt6765_cam_probe,
+ .driver = {
+ .name = "clk-mt6765-cam",
+ .of_match_table = of_match_clk_mt6765_cam,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_cam_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
new file mode 100644
index 000000000000..6fd8bf8030fc
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-img.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &img_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate img_clks[] = {
+ GATE_IMG(CLK_IMG_LARB2, "img_larb2", "mm_ck", 0),
+ GATE_IMG(CLK_IMG_DIP, "img_dip", "mm_ck", 2),
+ GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "mm_ck", 3),
+ GATE_IMG(CLK_IMG_DPE, "img_dpe", "mm_ck", 4),
+ GATE_IMG(CLK_IMG_RSC, "img_rsc", "mm_ck", 5),
+};
+
+static int clk_mt6765_img_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+
+ mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_img[] = {
+ { .compatible = "mediatek,mt6765-imgsys", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_img_drv = {
+ .probe = clk_mt6765_img_probe,
+ .driver = {
+ .name = "clk-mt6765-img",
+ .of_match_table = of_match_clk_mt6765_img,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
new file mode 100644
index 000000000000..81744d0f95a0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs mipi0a_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x80,
+ .sta_ofs = 0x80,
+};
+
+#define GATE_MIPI0A(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mipi0a_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate mipi0a_clks[] = {
+ GATE_MIPI0A(CLK_MIPI0A_CSR_CSI_EN_0A,
+ "mipi0a_csr_0a", "f_fseninf_ck", 1),
+};
+
+static int clk_mt6765_mipi0a_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MIPI0A_NR_CLK);
+
+ mtk_clk_register_gates(node, mipi0a_clks,
+ ARRAY_SIZE(mipi0a_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_mipi0a[] = {
+ { .compatible = "mediatek,mt6765-mipi0a", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_mipi0a_drv = {
+ .probe = clk_mt6765_mipi0a_probe,
+ .driver = {
+ .name = "clk-mt6765-mipi0a",
+ .of_match_table = of_match_clk_mt6765_mipi0a,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_mipi0a_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
new file mode 100644
index 000000000000..6d8214c51684
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-mm.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs mm_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+#define GATE_MM(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM */
+ GATE_MM(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_ck", 0),
+ GATE_MM(CLK_MM_MDP_CCORR0, "mm_mdp_ccorr0", "mm_ck", 1),
+ GATE_MM(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_ck", 2),
+ GATE_MM(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_ck", 3),
+ GATE_MM(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_ck", 4),
+ GATE_MM(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_ck", 5),
+ GATE_MM(CLK_MM_MDP_WDMA0, "mm_mdp_wdma0", "mm_ck", 6),
+ GATE_MM(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_ck", 7),
+ GATE_MM(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "mm_ck", 8),
+ GATE_MM(CLK_MM_DISP_RSZ0, "mm_disp_rsz0", "mm_ck", 9),
+ GATE_MM(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_ck", 10),
+ GATE_MM(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_ck", 11),
+ GATE_MM(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_ck", 12),
+ GATE_MM(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "mm_ck", 13),
+ GATE_MM(CLK_MM_DISP_AAL0, "mm_disp_aal0", "mm_ck", 14),
+ GATE_MM(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "mm_ck", 15),
+ GATE_MM(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "mm_ck", 16),
+ GATE_MM(CLK_MM_DSI0, "mm_dsi0", "mm_ck", 17),
+ GATE_MM(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_ck", 18),
+ GATE_MM(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_ck", 19),
+ GATE_MM(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_ck", 20),
+ GATE_MM(CLK_MM_SMI_COMM0, "mm_smi_comm0", "mm_ck", 21),
+ GATE_MM(CLK_MM_SMI_COMM1, "mm_smi_comm1", "mm_ck", 22),
+ GATE_MM(CLK_MM_CAM_MDP, "mm_cam_mdp_ck", "mm_ck", 23),
+ GATE_MM(CLK_MM_SMI_IMG, "mm_smi_img_ck", "mm_ck", 24),
+ GATE_MM(CLK_MM_SMI_CAM, "mm_smi_cam_ck", "mm_ck", 25),
+ GATE_MM(CLK_MM_IMG_DL_RELAY, "mm_img_dl_relay", "mm_ck", 26),
+ GATE_MM(CLK_MM_IMG_DL_ASYNC_TOP, "mm_imgdl_async", "mm_ck", 27),
+ GATE_MM(CLK_MM_DIG_DSI, "mm_dig_dsi_ck", "mm_ck", 28),
+ GATE_MM(CLK_MM_F26M_HRTWT, "mm_hrtwt", "f_f26m_ck", 29),
+};
+
+static int clk_mt6765_mm_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+ mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_mm[] = {
+ { .compatible = "mediatek,mt6765-mmsys", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_mm_drv = {
+ .probe = clk_mt6765_mm_probe,
+ .driver = {
+ .name = "clk-mt6765-mm",
+ .of_match_table = of_match_clk_mt6765_mm,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
new file mode 100644
index 000000000000..baae665fab31
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &venc_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC(CLK_VENC_SET0_LARB, "venc_set0_larb", "mm_ck", 0),
+ GATE_VENC(CLK_VENC_SET1_VENC, "venc_set1_venc", "mm_ck", 4),
+ GATE_VENC(CLK_VENC_SET2_JPGENC, "jpgenc", "mm_ck", 8),
+ GATE_VENC(CLK_VENC_SET3_VDEC, "venc_set3_vdec", "mm_ck", 12),
+};
+
+static int clk_mt6765_vcodec_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
+
+ mtk_clk_register_gates(node, venc_clks,
+ ARRAY_SIZE(venc_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765_vcodec[] = {
+ { .compatible = "mediatek,mt6765-vcodecsys", },
+ {}
+};
+
+static struct platform_driver clk_mt6765_vcodec_drv = {
+ .probe = clk_mt6765_vcodec_probe,
+ .driver = {
+ .name = "clk-mt6765-vcodec",
+ .of_match_table = of_match_clk_mt6765_vcodec,
+ },
+};
+
+builtin_platform_driver(clk_mt6765_vcodec_drv);
diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
new file mode 100644
index 000000000000..db8db1b3b79d
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt6765.c
@@ -0,0 +1,922 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mt6765-clk.h>
+
+/*fmeter div select 4*/
+#define _DIV4_ 1
+
+static DEFINE_SPINLOCK(mt6765_clk_lock);
+
+/* Total 12 subsys */
+static void __iomem *cksys_base;
+static void __iomem *apmixed_base;
+
+/* CKSYS */
+#define CLK_SCP_CFG_0 (cksys_base + 0x200)
+#define CLK_SCP_CFG_1 (cksys_base + 0x204)
+
+/* CG */
+#define AP_PLL_CON3 (apmixed_base + 0x0C)
+#define PLLON_CON0 (apmixed_base + 0x44)
+#define PLLON_CON1 (apmixed_base + 0x48)
+
+/* clk cfg update */
+#define CLK_CFG_0 0x40
+#define CLK_CFG_0_SET 0x44
+#define CLK_CFG_0_CLR 0x48
+#define CLK_CFG_1 0x50
+#define CLK_CFG_1_SET 0x54
+#define CLK_CFG_1_CLR 0x58
+#define CLK_CFG_2 0x60
+#define CLK_CFG_2_SET 0x64
+#define CLK_CFG_2_CLR 0x68
+#define CLK_CFG_3 0x70
+#define CLK_CFG_3_SET 0x74
+#define CLK_CFG_3_CLR 0x78
+#define CLK_CFG_4 0x80
+#define CLK_CFG_4_SET 0x84
+#define CLK_CFG_4_CLR 0x88
+#define CLK_CFG_5 0x90
+#define CLK_CFG_5_SET 0x94
+#define CLK_CFG_5_CLR 0x98
+#define CLK_CFG_6 0xa0
+#define CLK_CFG_6_SET 0xa4
+#define CLK_CFG_6_CLR 0xa8
+#define CLK_CFG_7 0xb0
+#define CLK_CFG_7_SET 0xb4
+#define CLK_CFG_7_CLR 0xb8
+#define CLK_CFG_8 0xc0
+#define CLK_CFG_8_SET 0xc4
+#define CLK_CFG_8_CLR 0xc8
+#define CLK_CFG_9 0xd0
+#define CLK_CFG_9_SET 0xd4
+#define CLK_CFG_9_CLR 0xd8
+#define CLK_CFG_10 0xe0
+#define CLK_CFG_10_SET 0xe4
+#define CLK_CFG_10_CLR 0xe8
+#define CLK_CFG_UPDATE 0x004
+
+static const struct mtk_fixed_clk fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_F_FRTC, "f_frtc_ck", "clk32k", 32768),
+ FIXED_CLK(CLK_TOP_CLK26M, "clk_26m_ck", "clk26m", 26000000),
+ FIXED_CLK(CLK_TOP_DMPLL, "dmpll_ck", NULL, 466000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_SYSPLL, "syspll_ck", "mainpll", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "syspll_d2", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "syspll_d2", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "syspll_d2", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "syspll_d2", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "syspll_d3", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "syspll_d3", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "syspll_d3", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "syspll_d5", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "syspll_d5", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "syspll_d7", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "syspll_d7", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1, 2),
+ FACTOR(CLK_TOP_USB20_192M, "usb20_192m_ck", "univpll", 2, 13),
+ FACTOR(CLK_TOP_USB20_192M_D4, "usb20_192m_d4", "usb20_192m_ck", 1, 4),
+ FACTOR(CLK_TOP_USB20_192M_D8, "usb20_192m_d8", "usb20_192m_ck", 1, 8),
+ FACTOR(CLK_TOP_USB20_192M_D16,
+ "usb20_192m_d16", "usb20_192m_ck", 1, 16),
+ FACTOR(CLK_TOP_USB20_192M_D32,
+ "usb20_192m_d32", "usb20_192m_ck", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_d2", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_d2", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_d3", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_d3", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_d3", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL2_D32, "univpll2_d32", "univpll_d3", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll_d5", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll_d5", 1, 4),
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll_ck", 1, 2),
+ FACTOR(CLK_TOP_MPLL, "mpll_ck", "mpll", 1, 1),
+ FACTOR(CLK_TOP_DA_MPLL_104M_DIV, "mpll_104m_div", "mpll_ck", 1, 2),
+ FACTOR(CLK_TOP_DA_MPLL_52M_DIV, "mpll_52m_div", "mpll_ck", 1, 4),
+ FACTOR(CLK_TOP_MFGPLL, "mfgpll_ck", "mfgpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1_ck", 1, 4),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1_ck", 1, 8),
+ FACTOR(CLK_TOP_ULPOSC1, "ulposc1_ck", "ulposc1", 1, 1),
+ FACTOR(CLK_TOP_ULPOSC1_D2, "ulposc1_d2", "ulposc1_ck", 1, 2),
+ FACTOR(CLK_TOP_ULPOSC1_D4, "ulposc1_d4", "ulposc1_ck", 1, 4),
+ FACTOR(CLK_TOP_ULPOSC1_D8, "ulposc1_d8", "ulposc1_ck", 1, 8),
+ FACTOR(CLK_TOP_ULPOSC1_D16, "ulposc1_d16", "ulposc1_ck", 1, 16),
+ FACTOR(CLK_TOP_ULPOSC1_D32, "ulposc1_d32", "ulposc1_ck", 1, 32),
+ FACTOR(CLK_TOP_F_F26M, "f_f26m_ck", "clk_26m_ck", 1, 1),
+ FACTOR(CLK_TOP_AXI, "axi_ck", "axi_sel", 1, 1),
+ FACTOR(CLK_TOP_MM, "mm_ck", "mm_sel", 1, 1),
+ FACTOR(CLK_TOP_SCP, "scp_ck", "scp_sel", 1, 1),
+ FACTOR(CLK_TOP_MFG, "mfg_ck", "mfg_sel", 1, 1),
+ FACTOR(CLK_TOP_F_FUART, "f_fuart_ck", "uart_sel", 1, 1),
+ FACTOR(CLK_TOP_SPI, "spi_ck", "spi_sel", 1, 1),
+ FACTOR(CLK_TOP_MSDC50_0, "msdc50_0_ck", "msdc50_0_sel", 1, 1),
+ FACTOR(CLK_TOP_MSDC30_1, "msdc30_1_ck", "msdc30_1_sel", 1, 1),
+ FACTOR(CLK_TOP_AUDIO, "audio_ck", "audio_sel", 1, 1),
+ FACTOR(CLK_TOP_AUD_1, "aud_1_ck", "aud_1_sel", 1, 1),
+ FACTOR(CLK_TOP_AUD_ENGEN1, "aud_engen1_ck", "aud_engen1_sel", 1, 1),
+ FACTOR(CLK_TOP_F_FDISP_PWM, "f_fdisp_pwm_ck", "disp_pwm_sel", 1, 1),
+ FACTOR(CLK_TOP_SSPM, "sspm_ck", "sspm_sel", 1, 1),
+ FACTOR(CLK_TOP_DXCC, "dxcc_ck", "dxcc_sel", 1, 1),
+ FACTOR(CLK_TOP_I2C, "i2c_ck", "i2c_sel", 1, 1),
+ FACTOR(CLK_TOP_F_FPWM, "f_fpwm_ck", "pwm_sel", 1, 1),
+ FACTOR(CLK_TOP_F_FSENINF, "f_fseninf_ck", "seninf_sel", 1, 1),
+ FACTOR(CLK_TOP_AES_FDE, "aes_fde_ck", "aes_fde_sel", 1, 1),
+ FACTOR(CLK_TOP_F_BIST2FPC, "f_bist2fpc_ck", "univpll2_d2", 1, 1),
+ FACTOR(CLK_TOP_ARMPLL_DIVIDER_PLL0, "arm_div_pll0", "syspll_d2", 1, 1),
+ FACTOR(CLK_TOP_ARMPLL_DIVIDER_PLL1, "arm_div_pll1", "syspll_ck", 1, 1),
+ FACTOR(CLK_TOP_ARMPLL_DIVIDER_PLL2, "arm_div_pll2", "univpll_d2", 1, 1),
+ FACTOR(CLK_TOP_DA_USB20_48M_DIV,
+ "usb20_48m_div", "usb20_192m_d4", 1, 1),
+ FACTOR(CLK_TOP_DA_UNIV_48M_DIV, "univ_48m_div", "usb20_192m_d4", 1, 1),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll_d7",
+ "syspll1_d4",
+ "syspll3_d2"
+};
+
+static const char * const mem_parents[] = {
+ "clk26m",
+ "dmpll_ck",
+ "apll1_ck"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll1_d2",
+ "mmpll_d2"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "syspll4_d2",
+ "univpll2_d2",
+ "syspll1_d2",
+ "univpll1_d2",
+ "syspll_d3",
+ "univpll_d3"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mfgpll_ck",
+ "syspll_d3",
+ "univpll_d3"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll1_d2"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "usb20_192m_d8",
+ "univpll2_d8",
+ "usb20_192m_d4",
+ "univpll2_d32",
+ "usb20_192m_d16",
+ "usb20_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "syspll4_d2",
+ "syspll2_d4"
+};
+
+static const char * const msdc5hclk_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll1_d4",
+ "syspll2_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "syspll2_d2",
+ "syspll4_d2",
+ "univpll1_d2",
+ "syspll1_d2",
+ "univpll_d5",
+ "univpll1_d4"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "msdcpll_d2",
+ "univpll2_d2",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d4",
+ "usb20_192m_d4",
+ "syspll2_d4"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck"
+};
+
+static const char * const aud_engen1_parents[] = {
+ "clk26m",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "ulposc1_d2",
+ "ulposc1_d8"
+};
+
+static const char * const sspm_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d3"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll1_d4",
+ "syspll1_d8"
+};
+
+static const char * const usb_top_parents[] = {
+ "clk26m",
+ "univpll3_d4"
+};
+
+static const char * const spm_parents[] = {
+ "clk26m",
+ "syspll1_d8"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "univpll3_d4",
+ "univpll3_d2",
+ "syspll1_d8",
+ "syspll2_d8"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll3_d4",
+ "syspll1_d8"
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "univpll1_d4",
+ "univpll1_d2",
+ "univpll2_d2"
+};
+
+static const char * const aes_fde_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "univpll_d3",
+ "univpll2_d2",
+ "univpll1_d2",
+ "syspll1_d2"
+};
+
+static const char * const ulposc_parents[] = {
+ "clk26m",
+ "ulposc1_d4",
+ "ulposc1_d8",
+ "ulposc1_d16",
+ "ulposc1_d32"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll1_d4",
+ "univpll1_d2",
+ "univpll2_d2"
+};
+
+#define INVALID_UPDATE_REG 0xFFFFFFFF
+#define INVALID_UPDATE_SHIFT -1
+#define INVALID_MUX_GATE -1
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR,
+ 0, 2, 7, CLK_CFG_UPDATE, 0, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+ CLK_CFG_0, CLK_CFG_0_SET, CLK_CFG_0_CLR,
+ 8, 2, 15, CLK_CFG_UPDATE, 1, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MM_SEL, "mm_sel", mm_parents, CLK_CFG_0,
+ CLK_CFG_0_SET, CLK_CFG_0_CLR, 16, 3, 23,
+ CLK_CFG_UPDATE, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SCP_SEL, "scp_sel", scp_parents, CLK_CFG_0,
+ CLK_CFG_0_SET, CLK_CFG_0_CLR, 24, 3, 31,
+ CLK_CFG_UPDATE, 3),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, CLK_CFG_1,
+ CLK_CFG_1_SET, CLK_CFG_1_CLR, 0, 2, 7,
+ CLK_CFG_UPDATE, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, CLK_CFG_1,
+ CLK_CFG_1_SET, CLK_CFG_1_CLR, 8, 2, 15,
+ CLK_CFG_UPDATE, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG_SEL, "camtg_sel",
+ camtg_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 16, 3, 23, CLK_CFG_UPDATE, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG1_SEL, "camtg1_sel", camtg_parents,
+ CLK_CFG_1, CLK_CFG_1_SET, CLK_CFG_1_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2_SEL, "camtg2_sel",
+ camtg_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 0, 3, 7, CLK_CFG_UPDATE, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3_SEL, "camtg3_sel", camtg_parents,
+ CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_parents,
+ CLK_CFG_2, CLK_CFG_2_SET, CLK_CFG_2_CLR, 16, 1, 23,
+ CLK_CFG_UPDATE, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, CLK_CFG_2,
+ CLK_CFG_2_SET, CLK_CFG_2_CLR, 24, 2, 31,
+ CLK_CFG_UPDATE, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_HCLK_SEL, "msdc5hclk",
+ msdc5hclk_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 0, 2, 7, CLK_CFG_UPDATE, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel",
+ msdc50_0_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 8, 3, 15, CLK_CFG_UPDATE, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel",
+ msdc30_1_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 16, 3, 23, CLK_CFG_UPDATE, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents,
+ CLK_CFG_3, CLK_CFG_3_SET, CLK_CFG_3_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel",
+ aud_intbus_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 0, 2, 7, CLK_CFG_UPDATE, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents,
+ CLK_CFG_4, CLK_CFG_4_SET, CLK_CFG_4_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel",
+ aud_engen1_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 16, 2, 23, CLK_CFG_UPDATE, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM_SEL, "disp_pwm_sel",
+ disp_pwm_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 24, 2, 31, CLK_CFG_UPDATE, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSPM_SEL, "sspm_sel", sspm_parents,
+ CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 0, 2, 7,
+ CLK_CFG_UPDATE, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DXCC_SEL, "dxcc_sel", dxcc_parents,
+ CLK_CFG_5, CLK_CFG_5_SET, CLK_CFG_5_CLR, 8, 2, 15,
+ CLK_CFG_UPDATE, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_SEL, "usb_top_sel",
+ usb_top_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 16, 1, 23, CLK_CFG_UPDATE, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPM_SEL, "spm_sel", spm_parents, CLK_CFG_5,
+ CLK_CFG_5_SET, CLK_CFG_5_CLR, 24, 1, 31,
+ CLK_CFG_UPDATE, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents, CLK_CFG_6,
+ CLK_CFG_6_SET, CLK_CFG_6_CLR, 0, 3, 7, CLK_CFG_UPDATE,
+ 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, CLK_CFG_6,
+ CLK_CFG_6_SET, CLK_CFG_6_CLR, 8, 2, 15, CLK_CFG_UPDATE,
+ 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF_SEL, "seninf_sel", seninf_parents,
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR, 16, 2, 23,
+ CLK_CFG_UPDATE, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_FDE_SEL, "aes_fde_sel",
+ aes_fde_parents, CLK_CFG_6, CLK_CFG_6_SET,
+ CLK_CFG_6_CLR, 24, 3, 31, CLK_CFG_UPDATE, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_PWRAP_ULPOSC_SEL, "ulposc_sel",
+ ulposc_parents, CLK_CFG_7, CLK_CFG_7_SET,
+ CLK_CFG_7_CLR, 0, 3, 7, CLK_CFG_UPDATE, 28,
+ CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM_SEL, "camtm_sel", camtm_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR, 8, 2, 15,
+ CLK_CFG_UPDATE, 29),
+};
+
+static const struct mtk_gate_regs top0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x104,
+ .sta_ofs = 0x104,
+};
+
+static const struct mtk_gate_regs top2_cg_regs = {
+ .set_ofs = 0x320,
+ .clr_ofs = 0x320,
+ .sta_ofs = 0x320,
+};
+
+#define GATE_TOP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_TOP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+#define GATE_TOP2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate top_clks[] = {
+ /* TOP0 */
+ GATE_TOP0(CLK_TOP_MD_32K, "md_32k", "f_frtc_ck", 8),
+ GATE_TOP0(CLK_TOP_MD_26M, "md_26m", "f_f26m_ck", 9),
+ GATE_TOP0(CLK_TOP_MD2_32K, "md2_32k", "f_frtc_ck", 10),
+ GATE_TOP0(CLK_TOP_MD2_26M, "md2_26m", "f_f26m_ck", 11),
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_ARMPLL_DIVIDER_PLL0_EN,
+ "arm_div_pll0_en", "arm_div_pll0", 3),
+ GATE_TOP1(CLK_TOP_ARMPLL_DIVIDER_PLL1_EN,
+ "arm_div_pll1_en", "arm_div_pll1", 4),
+ GATE_TOP1(CLK_TOP_ARMPLL_DIVIDER_PLL2_EN,
+ "arm_div_pll2_en", "arm_div_pll2", 5),
+ GATE_TOP1(CLK_TOP_FMEM_OCC_DRC_EN, "drc_en", "univpll2_d2", 6),
+ GATE_TOP1(CLK_TOP_USB20_48M_EN, "usb20_48m_en", "usb20_48m_div", 8),
+ GATE_TOP1(CLK_TOP_UNIVPLL_48M_EN, "univpll_48m_en", "univ_48m_div", 9),
+ GATE_TOP1(CLK_TOP_F_UFS_MP_SAP_CFG_EN, "ufs_sap", "f_f26m_ck", 12),
+ GATE_TOP1(CLK_TOP_F_BIST2FPC_EN, "bist2fpc", "f_bist2fpc_ck", 16),
+ /* TOP2 */
+ GATE_TOP2(CLK_TOP_APLL12_DIV0, "apll12_div0", "aud_1_ck", 2),
+ GATE_TOP2(CLK_TOP_APLL12_DIV1, "apll12_div1", "aud_1_ck", 3),
+ GATE_TOP2(CLK_TOP_APLL12_DIV2, "apll12_div2", "aud_1_ck", 4),
+ GATE_TOP2(CLK_TOP_APLL12_DIV3, "apll12_div3", "aud_1_ck", 5),
+};
+
+static const struct mtk_gate_regs ifr2_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs ifr3_cg_regs = {
+ .set_ofs = 0x88,
+ .clr_ofs = 0x8c,
+ .sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs ifr4_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xa8,
+ .sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs ifr5_cg_regs = {
+ .set_ofs = 0xc0,
+ .clr_ofs = 0xc4,
+ .sta_ofs = 0xc8,
+};
+
+#define GATE_IFR2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR3(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr3_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR4(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr4_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_IFR5(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ifr5_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate ifr_clks[] = {
+ /* INFRA_TOPAXI */
+ /* INFRA PERI */
+ /* INFRA mode 0 */
+ GATE_IFR2(CLK_IFR_ICUSB, "ifr_icusb", "axi_ck", 8),
+ GATE_IFR2(CLK_IFR_GCE, "ifr_gce", "axi_ck", 9),
+ GATE_IFR2(CLK_IFR_THERM, "ifr_therm", "axi_ck", 10),
+ GATE_IFR2(CLK_IFR_I2C_AP, "ifr_i2c_ap", "i2c_ck", 11),
+ GATE_IFR2(CLK_IFR_I2C_CCU, "ifr_i2c_ccu", "i2c_ck", 12),
+ GATE_IFR2(CLK_IFR_I2C_SSPM, "ifr_i2c_sspm", "i2c_ck", 13),
+ GATE_IFR2(CLK_IFR_I2C_RSV, "ifr_i2c_rsv", "i2c_ck", 14),
+ GATE_IFR2(CLK_IFR_PWM_HCLK, "ifr_pwm_hclk", "axi_ck", 15),
+ GATE_IFR2(CLK_IFR_PWM1, "ifr_pwm1", "f_fpwm_ck", 16),
+ GATE_IFR2(CLK_IFR_PWM2, "ifr_pwm2", "f_fpwm_ck", 17),
+ GATE_IFR2(CLK_IFR_PWM3, "ifr_pwm3", "f_fpwm_ck", 18),
+ GATE_IFR2(CLK_IFR_PWM4, "ifr_pwm4", "f_fpwm_ck", 19),
+ GATE_IFR2(CLK_IFR_PWM5, "ifr_pwm5", "f_fpwm_ck", 20),
+ GATE_IFR2(CLK_IFR_PWM, "ifr_pwm", "f_fpwm_ck", 21),
+ GATE_IFR2(CLK_IFR_UART0, "ifr_uart0", "f_fuart_ck", 22),
+ GATE_IFR2(CLK_IFR_UART1, "ifr_uart1", "f_fuart_ck", 23),
+ GATE_IFR2(CLK_IFR_GCE_26M, "ifr_gce_26m", "f_f26m_ck", 27),
+ GATE_IFR2(CLK_IFR_CQ_DMA_FPC, "ifr_dma", "axi_ck", 28),
+ GATE_IFR2(CLK_IFR_BTIF, "ifr_btif", "axi_ck", 31),
+ /* INFRA mode 1 */
+ GATE_IFR3(CLK_IFR_SPI0, "ifr_spi0", "spi_ck", 1),
+ GATE_IFR3(CLK_IFR_MSDC0, "ifr_msdc0", "msdc5hclk", 2),
+ GATE_IFR3(CLK_IFR_MSDC1, "ifr_msdc1", "axi_ck", 4),
+ GATE_IFR3(CLK_IFR_TRNG, "ifr_trng", "axi_ck", 9),
+ GATE_IFR3(CLK_IFR_AUXADC, "ifr_auxadc", "f_f26m_ck", 10),
+ GATE_IFR3(CLK_IFR_CCIF1_AP, "ifr_ccif1_ap", "axi_ck", 12),
+ GATE_IFR3(CLK_IFR_CCIF1_MD, "ifr_ccif1_md", "axi_ck", 13),
+ GATE_IFR3(CLK_IFR_AUXADC_MD, "ifr_auxadc_md", "f_f26m_ck", 14),
+ GATE_IFR3(CLK_IFR_AP_DMA, "ifr_ap_dma", "axi_ck", 18),
+ GATE_IFR3(CLK_IFR_DEVICE_APC, "ifr_dapc", "axi_ck", 20),
+ GATE_IFR3(CLK_IFR_CCIF_AP, "ifr_ccif_ap", "axi_ck", 23),
+ GATE_IFR3(CLK_IFR_AUDIO, "ifr_audio", "axi_ck", 25),
+ GATE_IFR3(CLK_IFR_CCIF_MD, "ifr_ccif_md", "axi_ck", 26),
+ /* INFRA mode 2 */
+ GATE_IFR4(CLK_IFR_RG_PWM_FBCLK6, "ifr_pwmfb", "f_f26m_ck", 0),
+ GATE_IFR4(CLK_IFR_DISP_PWM, "ifr_disp_pwm", "f_fdisp_pwm_ck", 2),
+ GATE_IFR4(CLK_IFR_CLDMA_BCLK, "ifr_cldmabclk", "axi_ck", 3),
+ GATE_IFR4(CLK_IFR_AUDIO_26M_BCLK, "ifr_audio26m", "f_f26m_ck", 4),
+ GATE_IFR4(CLK_IFR_SPI1, "ifr_spi1", "spi_ck", 6),
+ GATE_IFR4(CLK_IFR_I2C4, "ifr_i2c4", "i2c_ck", 7),
+ GATE_IFR4(CLK_IFR_SPI2, "ifr_spi2", "spi_ck", 9),
+ GATE_IFR4(CLK_IFR_SPI3, "ifr_spi3", "spi_ck", 10),
+ GATE_IFR4(CLK_IFR_I2C5, "ifr_i2c5", "i2c_ck", 18),
+ GATE_IFR4(CLK_IFR_I2C5_ARBITER, "ifr_i2c5a", "i2c_ck", 19),
+ GATE_IFR4(CLK_IFR_I2C5_IMM, "ifr_i2c5_imm", "i2c_ck", 20),
+ GATE_IFR4(CLK_IFR_I2C1_ARBITER, "ifr_i2c1a", "i2c_ck", 21),
+ GATE_IFR4(CLK_IFR_I2C1_IMM, "ifr_i2c1_imm", "i2c_ck", 22),
+ GATE_IFR4(CLK_IFR_I2C2_ARBITER, "ifr_i2c2a", "i2c_ck", 23),
+ GATE_IFR4(CLK_IFR_I2C2_IMM, "ifr_i2c2_imm", "i2c_ck", 24),
+ GATE_IFR4(CLK_IFR_SPI4, "ifr_spi4", "spi_ck", 25),
+ GATE_IFR4(CLK_IFR_SPI5, "ifr_spi5", "spi_ck", 26),
+ GATE_IFR4(CLK_IFR_CQ_DMA, "ifr_cq_dma", "axi_ck", 27),
+ GATE_IFR4(CLK_IFR_FAES_FDE, "ifr_faes_fde_ck", "aes_fde_ck", 29),
+ /* INFRA mode 3 */
+ GATE_IFR5(CLK_IFR_MSDC0_SELF, "ifr_msdc0sf", "msdc50_0_ck", 0),
+ GATE_IFR5(CLK_IFR_MSDC1_SELF, "ifr_msdc1sf", "msdc50_0_ck", 1),
+ GATE_IFR5(CLK_IFR_I2C6, "ifr_i2c6", "i2c_ck", 6),
+ GATE_IFR5(CLK_IFR_AP_MSDC0, "ifr_ap_msdc0", "msdc50_0_ck", 7),
+ GATE_IFR5(CLK_IFR_MD_MSDC0, "ifr_md_msdc0", "msdc50_0_ck", 8),
+ GATE_IFR5(CLK_IFR_MSDC0_SRC, "ifr_msdc0_clk", "msdc50_0_ck", 9),
+ GATE_IFR5(CLK_IFR_MSDC1_SRC, "ifr_msdc1_clk", "msdc30_1_ck", 10),
+ GATE_IFR5(CLK_IFR_MCU_PM_BCLK, "ifr_mcu_pm_bclk", "axi_ck", 17),
+ GATE_IFR5(CLK_IFR_CCIF2_AP, "ifr_ccif2_ap", "axi_ck", 18),
+ GATE_IFR5(CLK_IFR_CCIF2_MD, "ifr_ccif2_md", "axi_ck", 19),
+ GATE_IFR5(CLK_IFR_CCIF3_AP, "ifr_ccif3_ap", "axi_ck", 20),
+ GATE_IFR5(CLK_IFR_CCIF3_MD, "ifr_ccif3_md", "axi_ck", 21),
+};
+
+/* additional CCF control for mipi26M race condition(disp/camera) */
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x14,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x14,
+};
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &apmixed_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate apmixed_clks[] = {
+ /* AUDIO0 */
+ GATE_APMIXED(CLK_APMIXED_SSUSB26M, "apmixed_ssusb26m", "f_f26m_ck",
+ 4),
+ GATE_APMIXED(CLK_APMIXED_APPLL26M, "apmixed_appll26m", "f_f26m_ck",
+ 5),
+ GATE_APMIXED(CLK_APMIXED_MIPIC0_26M, "apmixed_mipic026m", "f_f26m_ck",
+ 6),
+ GATE_APMIXED(CLK_APMIXED_MDPLLGP26M, "apmixed_mdpll26m", "f_f26m_ck",
+ 7),
+ GATE_APMIXED(CLK_APMIXED_MMSYS_F26M, "apmixed_mmsys26m", "f_f26m_ck",
+ 8),
+ GATE_APMIXED(CLK_APMIXED_UFS26M, "apmixed_ufs26m", "f_f26m_ck",
+ 9),
+ GATE_APMIXED(CLK_APMIXED_MIPIC1_26M, "apmixed_mipic126m", "f_f26m_ck",
+ 11),
+ GATE_APMIXED(CLK_APMIXED_MEMPLL26M, "apmixed_mempll26m", "f_f26m_ck",
+ 13),
+ GATE_APMIXED(CLK_APMIXED_CLKSQ_LVPLL_26M, "apmixed_lvpll26m",
+ "f_f26m_ck", 14),
+ GATE_APMIXED(CLK_APMIXED_MIPID0_26M, "apmixed_mipid026m", "f_f26m_ck",
+ 16),
+};
+
+#define MT6765_PLL_FMAX (3800UL * MHZ)
+#define MT6765_PLL_FMIN (1500UL * MHZ)
+
+#define CON0_MT6765_RST_BAR BIT(23)
+
+#define PLL_INFO_NULL (0xFF)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pcwibits, _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg,\
+ _tuner_en_bit, _pcw_reg, _pcw_shift, _div_table) {\
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT6765_RST_BAR, \
+ .fmax = MT6765_PLL_FMAX, \
+ .fmin = MT6765_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = _pcwibits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pcwibits, _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, _pcw_reg, \
+ _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _pcwbits, _pcwibits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, NULL) \
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x021C, 0x0228, BIT(0),
+ PLL_AO, 22, 8, 0x0220, 24, 0, 0, 0, 0x0220, 0),
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x020C, 0x0218, BIT(0),
+ PLL_AO, 22, 8, 0x0210, 24, 0, 0, 0, 0x0210, 0),
+ PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x022C, 0x0238, BIT(0),
+ PLL_AO, 22, 8, 0x0230, 24, 0, 0, 0, 0x0230, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x023C, 0x0248, BIT(0),
+ (HAVE_RST_BAR | PLL_AO), 22, 8, 0x0240, 24, 0, 0, 0, 0x0240,
+ 0),
+ PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x024C, 0x0258, BIT(0),
+ 0, 22, 8, 0x0250, 24, 0, 0, 0, 0x0250, 0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x025C, 0x0268, BIT(0),
+ 0, 22, 8, 0x0260, 24, 0, 0, 0, 0x0260, 0),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x026C, 0x0278, BIT(0),
+ HAVE_RST_BAR, 22, 8, 0x0270, 24, 0, 0, 0, 0x0270, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x027C, 0x0288, BIT(0),
+ 0, 22, 8, 0x0280, 24, 0, 0, 0, 0x0280, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x028C, 0x029C, BIT(0),
+ 0, 32, 8, 0x0290, 24, 0x0040, 0x000C, 0, 0x0294, 0),
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x02A0, 0x02AC, BIT(0),
+ PLL_AO, 22, 8, 0x02A4, 24, 0, 0, 0, 0x02A4, 0),
+};
+
+static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return PTR_ERR(base);
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+ mtk_clk_register_gates(node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ apmixed_base = base;
+ /* MPLL, CCIPLL, MAINPLL set HW mode, TDCLKSQ, CLKSQ1 */
+ writel(readl(AP_PLL_CON3) & 0xFFFFFFE1, AP_PLL_CON3);
+ writel(readl(PLLON_CON0) & 0x01041041, PLLON_CON0);
+ writel(readl(PLLON_CON1) & 0x01041041, PLLON_CON1);
+
+ return r;
+}
+
+static int clk_mt6765_top_probe(struct platform_device *pdev)
+{
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct clk_onecell_data *clk_data;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return PTR_ERR(base);
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
+ clk_data);
+ mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ &mt6765_clk_lock, clk_data);
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ cksys_base = base;
+ /* [4]:no need */
+ writel(readl(CLK_SCP_CFG_0) | 0x3EF, CLK_SCP_CFG_0);
+ /*[1,2,3,8]: no need*/
+ writel(readl(CLK_SCP_CFG_1) | 0x1, CLK_SCP_CFG_1);
+
+ return r;
+}
+
+static int clk_mt6765_ifr_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return PTR_ERR(base);
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+
+ mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
+ clk_data);
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt6765[] = {
+ {
+ .compatible = "mediatek,mt6765-apmixedsys",
+ .data = clk_mt6765_apmixed_probe,
+ }, {
+ .compatible = "mediatek,mt6765-topckgen",
+ .data = clk_mt6765_top_probe,
+ }, {
+ .compatible = "mediatek,mt6765-infracfg",
+ .data = clk_mt6765_ifr_probe,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt6765_probe(struct platform_device *pdev)
+{
+ int (*clk_probe)(struct platform_device *d);
+ int r;
+
+ clk_probe = of_device_get_match_data(&pdev->dev);
+ if (!clk_probe)
+ return -EINVAL;
+
+ r = clk_probe(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt6765_drv = {
+ .probe = clk_mt6765_probe,
+ .driver = {
+ .name = "clk-mt6765",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_clk_mt6765,
+ },
+};
+
+static int __init clk_mt6765_init(void)
+{
+ return platform_driver_register(&clk_mt6765_drv);
+}
+
+arch_initcall(clk_mt6765_init);
diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
index fb5fbb8e3e41..059c1a41ac7a 100644
--- a/drivers/clk/mediatek/clk-mt6779-mm.c
+++ b/drivers/clk/mediatek/clk-mt6779-mm.c
@@ -84,15 +84,11 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM1(CLK_MM_DISP_OVL_FBDC, "mm_disp_ovl_fbdc", "mm_sel", 16),
};
-static const struct of_device_id of_match_clk_mt6779_mm[] = {
- { .compatible = "mediatek,mt6779-mmsys", },
- {}
-};
-
static int clk_mt6779_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -106,7 +102,6 @@ static struct platform_driver clk_mt6779_mm_drv = {
.probe = clk_mt6779_mm_probe,
.driver = {
.name = "clk-mt6779-mm",
- .of_match_table = of_match_clk_mt6779_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
index 8f05653b387d..01fdce287247 100644
--- a/drivers/clk/mediatek/clk-mt6797-mm.c
+++ b/drivers/clk/mediatek/clk-mt6797-mm.c
@@ -92,16 +92,12 @@ static const struct mtk_gate mm_clks[] = {
"clk26m", 3),
};
-static const struct of_device_id of_match_clk_mt6797_mm[] = {
- { .compatible = "mediatek,mt6797-mmsys", },
- {}
-};
-
static int clk_mt6797_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
int r;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR);
@@ -121,7 +117,6 @@ static struct platform_driver clk_mt6797_mm_drv = {
.probe = clk_mt6797_mm_probe,
.driver = {
.name = "clk-mt6797-mm",
- .of_match_table = of_match_clk_mt6797_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
new file mode 100644
index 000000000000..36fa20be77b6
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8173-mm.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8173-clk.h>
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x0104,
+ .clr_ofs = 0x0108,
+ .sta_ofs = 0x0100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x0114,
+ .clr_ofs = 0x0118,
+ .sta_ofs = 0x0110,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mt8173_mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+ GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
+ GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
+ GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
+ GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
+ GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
+ GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
+ GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
+ GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
+ GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "rtc_sel", 15),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
+ GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
+ GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
+ GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
+ GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
+ GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
+ GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
+ GATE_MM0(CLK_MM_DISP_SPLIT1, "mm_disp_split1", "mm_sel", 29),
+ GATE_MM0(CLK_MM_DISP_MERGE, "mm_disp_merge", "mm_sel", 30),
+ GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
+ /* MM1 */
+ GATE_MM1(CLK_MM_DISP_PWM0MM, "mm_disp_pwm0mm", "mm_sel", 0),
+ GATE_MM1(CLK_MM_DISP_PWM026M, "mm_disp_pwm026m", "pwm_sel", 1),
+ GATE_MM1(CLK_MM_DISP_PWM1MM, "mm_disp_pwm1mm", "mm_sel", 2),
+ GATE_MM1(CLK_MM_DISP_PWM126M, "mm_disp_pwm126m", "pwm_sel", 3),
+ GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
+ GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_dig", 5),
+ GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
+ GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_dig", 7),
+ GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "dpi0_sel", 8),
+ GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
+ GATE_MM1(CLK_MM_DPI1_PIXEL, "mm_dpi1_pixel", "lvds_pxl", 10),
+ GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "mm_sel", 11),
+ GATE_MM1(CLK_MM_HDMI_PIXEL, "mm_hdmi_pixel", "dpi0_sel", 12),
+ GATE_MM1(CLK_MM_HDMI_PLLCK, "mm_hdmi_pllck", "hdmi_sel", 13),
+ GATE_MM1(CLK_MM_HDMI_AUDIO, "mm_hdmi_audio", "apll1", 14),
+ GATE_MM1(CLK_MM_HDMI_SPDIF, "mm_hdmi_spdif", "apll2", 15),
+ GATE_MM1(CLK_MM_LVDS_PIXEL, "mm_lvds_pixel", "lvds_pxl", 16),
+ GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvds_cts", 17),
+ GATE_MM1(CLK_MM_SMI_LARB4, "mm_smi_larb4", "mm_sel", 18),
+ GATE_MM1(CLK_MM_HDMI_HDCP, "mm_hdmi_hdcp", "hdcp_sel", 19),
+ GATE_MM1(CLK_MM_HDMI_HDCP24M, "mm_hdmi_hdcp24m", "hdcp_24m_sel", 20),
+};
+
+struct clk_mt8173_mm_driver_data {
+ const struct mtk_gate *gates_clk;
+ int gates_num;
+};
+
+static const struct clk_mt8173_mm_driver_data mt8173_mmsys_driver_data = {
+ .gates_clk = mt8173_mm_clks,
+ .gates_num = ARRAY_SIZE(mt8173_mm_clks),
+};
+
+static int clk_mt8173_mm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ const struct clk_mt8173_mm_driver_data *data;
+ struct clk_onecell_data *clk_data;
+ int ret;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ data = &mt8173_mmsys_driver_data;
+
+ ret = mtk_clk_register_gates(node, data->gates_clk, data->gates_num,
+ clk_data);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct platform_driver clk_mt8173_mm_drv = {
+ .driver = {
+ .name = "clk-mt8173-mm",
+ },
+ .probe = clk_mt8173_mm_probe,
+};
+
+builtin_platform_driver(clk_mt8173_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 537a7f49b0f7..8f898ac476c0 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -753,93 +753,6 @@ static const struct mtk_gate img_clks[] __initconst = {
GATE_IMG(CLK_IMG_FD, "img_fd", "mm_sel", 11),
};
-static const struct mtk_gate_regs mm0_cg_regs __initconst = {
- .set_ofs = 0x0104,
- .clr_ofs = 0x0108,
- .sta_ofs = 0x0100,
-};
-
-static const struct mtk_gate_regs mm1_cg_regs __initconst = {
- .set_ofs = 0x0114,
- .clr_ofs = 0x0118,
- .sta_ofs = 0x0110,
-};
-
-#define GATE_MM0(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm0_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-#define GATE_MM1(_id, _name, _parent, _shift) { \
- .id = _id, \
- .name = _name, \
- .parent_name = _parent, \
- .regs = &mm1_cg_regs, \
- .shift = _shift, \
- .ops = &mtk_clk_gate_ops_setclr, \
- }
-
-static const struct mtk_gate mm_clks[] __initconst = {
- /* MM0 */
- GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
- GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
- GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
- GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
- GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
- GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
- GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
- GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
- GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
- GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
- GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
- GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
- GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
- GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
- GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "rtc_sel", 15),
- GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
- GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
- GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
- GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
- GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
- GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
- GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
- GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
- GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
- GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
- GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
- GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
- GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
- GATE_MM0(CLK_MM_DISP_SPLIT1, "mm_disp_split1", "mm_sel", 29),
- GATE_MM0(CLK_MM_DISP_MERGE, "mm_disp_merge", "mm_sel", 30),
- GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
- /* MM1 */
- GATE_MM1(CLK_MM_DISP_PWM0MM, "mm_disp_pwm0mm", "mm_sel", 0),
- GATE_MM1(CLK_MM_DISP_PWM026M, "mm_disp_pwm026m", "pwm_sel", 1),
- GATE_MM1(CLK_MM_DISP_PWM1MM, "mm_disp_pwm1mm", "mm_sel", 2),
- GATE_MM1(CLK_MM_DISP_PWM126M, "mm_disp_pwm126m", "pwm_sel", 3),
- GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
- GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_dig", 5),
- GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
- GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_dig", 7),
- GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "dpi0_sel", 8),
- GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
- GATE_MM1(CLK_MM_DPI1_PIXEL, "mm_dpi1_pixel", "lvds_pxl", 10),
- GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "mm_sel", 11),
- GATE_MM1(CLK_MM_HDMI_PIXEL, "mm_hdmi_pixel", "dpi0_sel", 12),
- GATE_MM1(CLK_MM_HDMI_PLLCK, "mm_hdmi_pllck", "hdmi_sel", 13),
- GATE_MM1(CLK_MM_HDMI_AUDIO, "mm_hdmi_audio", "apll1", 14),
- GATE_MM1(CLK_MM_HDMI_SPDIF, "mm_hdmi_spdif", "apll2", 15),
- GATE_MM1(CLK_MM_LVDS_PIXEL, "mm_lvds_pixel", "lvds_pxl", 16),
- GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvds_cts", 17),
- GATE_MM1(CLK_MM_SMI_LARB4, "mm_smi_larb4", "mm_sel", 18),
- GATE_MM1(CLK_MM_HDMI_HDCP, "mm_hdmi_hdcp", "hdcp_sel", 19),
- GATE_MM1(CLK_MM_HDMI_HDCP24M, "mm_hdmi_hdcp24m", "hdcp_24m_sel", 20),
-};
-
static const struct mtk_gate_regs vdec0_cg_regs __initconst = {
.set_ofs = 0x0000,
.clr_ofs = 0x0004,
@@ -1144,23 +1057,6 @@ static void __init mtk_imgsys_init(struct device_node *node)
}
CLK_OF_DECLARE(mtk_imgsys, "mediatek,mt8173-imgsys", mtk_imgsys_init);
-static void __init mtk_mmsys_init(struct device_node *node)
-{
- struct clk_onecell_data *clk_data;
- int r;
-
- clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
-
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
-
- r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
- if (r)
- pr_err("%s(): could not register clock provider: %d\n",
- __func__, r);
-}
-CLK_OF_DECLARE(mtk_mmsys, "mediatek,mt8173-mmsys", mtk_mmsys_init);
-
static void __init mtk_vdecsys_init(struct device_node *node)
{
struct clk_onecell_data *clk_data;
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
index 720c696b506d..9d60e09619c1 100644
--- a/drivers/clk/mediatek/clk-mt8183-mm.c
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -84,8 +84,9 @@ static const struct mtk_gate mm_clks[] = {
static int clk_mt8183_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
struct clk_onecell_data *clk_data;
- struct device_node *node = pdev->dev.of_node;
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
@@ -95,16 +96,10 @@ static int clk_mt8183_mm_probe(struct platform_device *pdev)
return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
-static const struct of_device_id of_match_clk_mt8183_mm[] = {
- { .compatible = "mediatek,mt8183-mmsys", },
- {}
-};
-
static struct platform_driver clk_mt8183_mm_drv = {
.probe = clk_mt8183_mm_probe,
.driver = {
.name = "clk-mt8183-mm",
- .of_match_table = of_match_clk_mt8183_mm,
},
};
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 76f9cd039195..14e127e9a740 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -160,7 +160,7 @@ struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
spinlock_t *lock)
{
struct mtk_clk_mux *clk_mux;
- struct clk_init_data init;
+ struct clk_init_data init = {};
struct clk *clk;
clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index fad616cac01e..30c15766ebb1 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -3702,7 +3702,9 @@ static struct clk_regmap g12a_hdmi = {
/*
* The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
- * muxed by a glitch-free switch.
+ * muxed by a glitch-free switch. The CCF can manage this glitch-free
+ * mux because it does top-to-bottom updates the each clock tree and
+ * switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
static const struct clk_parent_data g12a_mali_0_1_parent_data[] = {
{ .fw_name = "xtal", },
@@ -3726,7 +3728,13 @@ static struct clk_regmap g12a_mali_0_sel = {
.ops = &clk_regmap_mux_ops,
.parent_data = g12a_mali_0_1_parent_data,
.num_parents = 8,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ /*
+ * Don't request the parent to change the rate because
+ * all GPU frequencies can be derived from the fclk_*
+ * clocks and one special GP0_PLL setting. This is
+ * important because we need the MPLL clocks for audio.
+ */
+ .flags = 0,
},
};
@@ -3743,7 +3751,7 @@ static struct clk_regmap g12a_mali_0_div = {
&g12a_mali_0_sel.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3759,7 +3767,7 @@ static struct clk_regmap g12a_mali_0 = {
&g12a_mali_0_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -3774,7 +3782,13 @@ static struct clk_regmap g12a_mali_1_sel = {
.ops = &clk_regmap_mux_ops,
.parent_data = g12a_mali_0_1_parent_data,
.num_parents = 8,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ /*
+ * Don't request the parent to change the rate because
+ * all GPU frequencies can be derived from the fclk_*
+ * clocks and one special GP0_PLL setting. This is
+ * important because we need the MPLL clocks for audio.
+ */
+ .flags = 0,
},
};
@@ -3791,7 +3805,7 @@ static struct clk_regmap g12a_mali_1_div = {
&g12a_mali_1_sel.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3807,7 +3821,7 @@ static struct clk_regmap g12a_mali_1 = {
&g12a_mali_1_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -3827,7 +3841,7 @@ static struct clk_regmap g12a_mali = {
.ops = &clk_regmap_mux_ops,
.parent_hws = g12a_mali_parent_hws,
.num_parents = 2,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 5fd6a574f8c3..0a68af6eec3d 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -957,7 +957,9 @@ static struct clk_regmap gxbb_sar_adc_clk = {
/*
* The MALI IP is clocked by two identical clocks (mali_0 and mali_1)
- * muxed by a glitch-free switch.
+ * muxed by a glitch-free switch. The CCF can manage this glitch-free
+ * mux because it does top-to-bottom updates the each clock tree and
+ * switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
static const struct clk_parent_data gxbb_mali_0_1_parent_data[] = {
@@ -980,14 +982,15 @@ static struct clk_regmap gxbb_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- /*
- * bits 10:9 selects from 8 possible parents:
- * xtal, gp0_pll, mpll2, mpll1, fclk_div7,
- * fclk_div4, fclk_div3, fclk_div5
- */
.parent_data = gxbb_mali_0_1_parent_data,
.num_parents = 8,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ /*
+ * Don't request the parent to change the rate because
+ * all GPU frequencies can be derived from the fclk_*
+ * clocks and one special GP0_PLL setting. This is
+ * important because we need the MPLL clocks for audio.
+ */
+ .flags = 0,
},
};
@@ -1004,7 +1007,7 @@ static struct clk_regmap gxbb_mali_0_div = {
&gxbb_mali_0_sel.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -1020,7 +1023,7 @@ static struct clk_regmap gxbb_mali_0 = {
&gxbb_mali_0_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -1033,14 +1036,15 @@ static struct clk_regmap gxbb_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- /*
- * bits 10:9 selects from 8 possible parents:
- * xtal, gp0_pll, mpll2, mpll1, fclk_div7,
- * fclk_div4, fclk_div3, fclk_div5
- */
.parent_data = gxbb_mali_0_1_parent_data,
.num_parents = 8,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ /*
+ * Don't request the parent to change the rate because
+ * all GPU frequencies can be derived from the fclk_*
+ * clocks and one special GP0_PLL setting. This is
+ * important because we need the MPLL clocks for audio.
+ */
+ .flags = 0,
},
};
@@ -1057,7 +1061,7 @@ static struct clk_regmap gxbb_mali_1_div = {
&gxbb_mali_1_sel.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -1073,7 +1077,7 @@ static struct clk_regmap gxbb_mali_1 = {
&gxbb_mali_1_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -1093,7 +1097,7 @@ static struct clk_regmap gxbb_mali = {
.ops = &clk_regmap_mux_ops,
.parent_hws = gxbb_mali_parent_hws,
.num_parents = 2,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 34a70c4b4899..edc09d050ecf 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1077,7 +1077,7 @@ static struct clk_regmap meson8b_vid_pll_in_sel = {
* Meson8m2: vid2_pll
*/
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_hdmi_pll_dco.hw
+ &meson8b_hdmi_pll_lvds_out.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1213,7 +1213,7 @@ static struct clk_regmap meson8b_vclk_in_en = {
static struct clk_regmap meson8b_vclk_div1_gate = {
.data = &(struct clk_regmap_gate_data){
- .offset = HHI_VID_CLK_DIV,
+ .offset = HHI_VID_CLK_CNTL,
.bit_idx = 0,
},
.hw.init = &(struct clk_init_data){
@@ -1243,7 +1243,7 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = {
static struct clk_regmap meson8b_vclk_div2_div_gate = {
.data = &(struct clk_regmap_gate_data){
- .offset = HHI_VID_CLK_DIV,
+ .offset = HHI_VID_CLK_CNTL,
.bit_idx = 1,
},
.hw.init = &(struct clk_init_data){
@@ -1273,7 +1273,7 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = {
static struct clk_regmap meson8b_vclk_div4_div_gate = {
.data = &(struct clk_regmap_gate_data){
- .offset = HHI_VID_CLK_DIV,
+ .offset = HHI_VID_CLK_CNTL,
.bit_idx = 2,
},
.hw.init = &(struct clk_init_data){
@@ -1303,7 +1303,7 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = {
static struct clk_regmap meson8b_vclk_div6_div_gate = {
.data = &(struct clk_regmap_gate_data){
- .offset = HHI_VID_CLK_DIV,
+ .offset = HHI_VID_CLK_CNTL,
.bit_idx = 3,
},
.hw.init = &(struct clk_init_data){
@@ -1333,7 +1333,7 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = {
static struct clk_regmap meson8b_vclk_div12_div_gate = {
.data = &(struct clk_regmap_gate_data){
- .offset = HHI_VID_CLK_DIV,
+ .offset = HHI_VID_CLK_CNTL,
.bit_idx = 4,
},
.hw.init = &(struct clk_init_data){
@@ -1725,7 +1725,7 @@ static struct clk_regmap meson8b_hdmi_sys_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_sys_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
/* FIXME: all other parents are unknown */
.parent_data = &(const struct clk_parent_data) {
.fw_name = "xtal",
@@ -1745,7 +1745,7 @@ static struct clk_regmap meson8b_hdmi_sys_div = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_sys_div",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_hdmi_sys_sel.hw
},
@@ -1761,7 +1761,7 @@ static struct clk_regmap meson8b_hdmi_sys = {
},
.hw.init = &(struct clk_init_data) {
.name = "hdmi_sys",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_hdmi_sys_div.hw
},
@@ -1918,6 +1918,13 @@ static struct clk_regmap meson8b_mali = {
},
};
+static const struct reg_sequence meson8m2_gp_pll_init_regs[] = {
+ { .reg = HHI_GP_PLL_CNTL2, .def = 0x59c88000 },
+ { .reg = HHI_GP_PLL_CNTL3, .def = 0xca463823 },
+ { .reg = HHI_GP_PLL_CNTL4, .def = 0x0286a027 },
+ { .reg = HHI_GP_PLL_CNTL5, .def = 0x00003000 },
+};
+
static const struct pll_params_table meson8m2_gp_pll_params_table[] = {
PLL_PARAMS(182, 3),
{ /* sentinel */ },
@@ -1951,6 +1958,8 @@ static struct clk_regmap meson8m2_gp_pll_dco = {
.width = 1,
},
.table = meson8m2_gp_pll_params_table,
+ .init_regs = meson8m2_gp_pll_init_regs,
+ .init_count = ARRAY_SIZE(meson8m2_gp_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp_pll_dco",
@@ -2063,7 +2072,7 @@ static struct clk_regmap meson8b_vpu_0 = {
&meson8b_vpu_0_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
@@ -2134,10 +2143,18 @@ static struct clk_regmap meson8b_vpu_1 = {
&meson8b_vpu_1_div.hw
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT,
},
};
+/*
+ * The VPU clock has two two identical clock trees (vpu_0 and vpu_1)
+ * muxed by a glitch-free switch on Meson8b and Meson8m2. The CCF can
+ * actually manage this glitch-free mux because it does top-to-bottom
+ * updates the each clock tree and switches to the "inactive" one when
+ * CLK_SET_RATE_GATE is set.
+ * Meson8 only has vpu_0 and no glitch-free mux.
+ */
static struct clk_regmap meson8b_vpu = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
@@ -2152,7 +2169,7 @@ static struct clk_regmap meson8b_vpu = {
&meson8b_vpu_1.hw,
},
.num_parents = 2,
- .flags = CLK_SET_RATE_NO_REPARENT,
+ .flags = CLK_SET_RATE_PARENT,
},
};
@@ -3506,54 +3523,87 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
static const struct meson8b_clk_reset_line {
u32 reg;
u8 bit_idx;
+ bool active_low;
} meson8b_clk_reset_bits[] = {
[CLKC_RESET_L2_CACHE_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 30
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 30,
+ .active_low = false,
},
[CLKC_RESET_AXI_64_TO_128_BRIDGE_A5_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 29
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 29,
+ .active_low = false,
},
[CLKC_RESET_SCU_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 28
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 28,
+ .active_low = false,
},
[CLKC_RESET_CPU3_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 27
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 27,
+ .active_low = false,
},
[CLKC_RESET_CPU2_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 26
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 26,
+ .active_low = false,
},
[CLKC_RESET_CPU1_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 25
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 25,
+ .active_low = false,
},
[CLKC_RESET_CPU0_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 24
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 24,
+ .active_low = false,
},
[CLKC_RESET_A5_GLOBAL_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 18
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 18,
+ .active_low = false,
},
[CLKC_RESET_A5_AXI_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 17
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 17,
+ .active_low = false,
},
[CLKC_RESET_A5_ABP_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL0, .bit_idx = 16
+ .reg = HHI_SYS_CPU_CLK_CNTL0,
+ .bit_idx = 16,
+ .active_low = false,
},
[CLKC_RESET_AXI_64_TO_128_BRIDGE_MMC_SOFT_RESET] = {
- .reg = HHI_SYS_CPU_CLK_CNTL1, .bit_idx = 30
+ .reg = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 30,
+ .active_low = false,
},
[CLKC_RESET_VID_CLK_CNTL_SOFT_RESET] = {
- .reg = HHI_VID_CLK_CNTL, .bit_idx = 15
+ .reg = HHI_VID_CLK_CNTL,
+ .bit_idx = 15,
+ .active_low = false,
},
[CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_POST] = {
- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 7
+ .reg = HHI_VID_DIVIDER_CNTL,
+ .bit_idx = 7,
+ .active_low = false,
},
[CLKC_RESET_VID_DIVIDER_CNTL_SOFT_RESET_PRE] = {
- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 3
+ .reg = HHI_VID_DIVIDER_CNTL,
+ .bit_idx = 3,
+ .active_low = false,
},
[CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_POST] = {
- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 1
+ .reg = HHI_VID_DIVIDER_CNTL,
+ .bit_idx = 1,
+ .active_low = true,
},
[CLKC_RESET_VID_DIVIDER_CNTL_RESET_N_PRE] = {
- .reg = HHI_VID_DIVIDER_CNTL, .bit_idx = 0
+ .reg = HHI_VID_DIVIDER_CNTL,
+ .bit_idx = 0,
+ .active_low = true,
},
};
@@ -3562,22 +3612,22 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev,
{
struct meson8b_clk_reset *meson8b_clk_reset =
container_of(rcdev, struct meson8b_clk_reset, reset);
- unsigned long flags;
const struct meson8b_clk_reset_line *reset;
+ unsigned int value = 0;
+ unsigned long flags;
if (id >= ARRAY_SIZE(meson8b_clk_reset_bits))
return -EINVAL;
reset = &meson8b_clk_reset_bits[id];
+ if (assert != reset->active_low)
+ value = BIT(reset->bit_idx);
+
spin_lock_irqsave(&meson_clk_lock, flags);
- if (assert)
- regmap_update_bits(meson8b_clk_reset->regmap, reset->reg,
- BIT(reset->bit_idx), BIT(reset->bit_idx));
- else
- regmap_update_bits(meson8b_clk_reset->regmap, reset->reg,
- BIT(reset->bit_idx), 0);
+ regmap_update_bits(meson8b_clk_reset->regmap, reset->reg,
+ BIT(reset->bit_idx), value);
spin_unlock_irqrestore(&meson_clk_lock, flags);
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index c889fbeec30f..cd38ae2a9cb5 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -20,6 +20,10 @@
* [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
*/
#define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */
+#define HHI_GP_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */
+#define HHI_GP_PLL_CNTL3 0x48 /* 0x12 offset in data sheet */
+#define HHI_GP_PLL_CNTL4 0x4C /* 0x13 offset in data sheet */
+#define HHI_GP_PLL_CNTL5 0x50 /* 0x14 offset in data sheet */
#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
@@ -146,7 +150,6 @@
#define CLKID_CTS_VDAC0 171
#define CLKID_HDMI_SYS_SEL 172
#define CLKID_HDMI_SYS_DIV 173
-#define CLKID_HDMI_SYS 174
#define CLKID_MALI_0_SEL 175
#define CLKID_MALI_0_DIV 176
#define CLKID_MALI_0 177
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 14dc8a8a9d08..cbcc2f8430a2 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -8,7 +8,8 @@ obj-y += clk-apbc.o clk-apmu.o clk-frac.o clk-mix.o clk-gate.o clk.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
-obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o
+obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o pwr-island.o
+obj-$(CONFIG_COMMON_CLK_MMP2_AUDIO) += clk-audio.o
obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o
obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
new file mode 100644
index 000000000000..eea69d498bd2
--- /dev/null
+++ b/drivers/clk/mmp/clk-audio.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MMP Audio Clock Controller driver
+ *
+ * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <dt-bindings/clock/marvell,mmp2-audio.h>
+
+/* Audio Controller Registers */
+#define SSPA_AUD_CTRL 0x04
+#define SSPA_AUD_PLL_CTRL0 0x08
+#define SSPA_AUD_PLL_CTRL1 0x0c
+
+/* SSPA Audio Control Register */
+#define SSPA_AUD_CTRL_SYSCLK_SHIFT 0
+#define SSPA_AUD_CTRL_SYSCLK_DIV_SHIFT 1
+#define SSPA_AUD_CTRL_SSPA0_MUX_SHIFT 7
+#define SSPA_AUD_CTRL_SSPA0_SHIFT 8
+#define SSPA_AUD_CTRL_SSPA0_DIV_SHIFT 9
+#define SSPA_AUD_CTRL_SSPA1_SHIFT 16
+#define SSPA_AUD_CTRL_SSPA1_DIV_SHIFT 17
+#define SSPA_AUD_CTRL_SSPA1_MUX_SHIFT 23
+#define SSPA_AUD_CTRL_DIV_MASK 0x7e
+
+/* SSPA Audio PLL Control 0 Register */
+#define SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO_MASK (0x7 << 28)
+#define SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO(x) ((x) << 28)
+#define SSPA_AUD_PLL_CTRL0_FRACT_MASK (0xfffff << 8)
+#define SSPA_AUD_PLL_CTRL0_FRACT(x) ((x) << 8)
+#define SSPA_AUD_PLL_CTRL0_ENA_DITHER (1 << 7)
+#define SSPA_AUD_PLL_CTRL0_ICP_2UA (0 << 5)
+#define SSPA_AUD_PLL_CTRL0_ICP_5UA (1 << 5)
+#define SSPA_AUD_PLL_CTRL0_ICP_7UA (2 << 5)
+#define SSPA_AUD_PLL_CTRL0_ICP_10UA (3 << 5)
+#define SSPA_AUD_PLL_CTRL0_DIV_FBCCLK_MASK (0x3 << 3)
+#define SSPA_AUD_PLL_CTRL0_DIV_FBCCLK(x) ((x) << 3)
+#define SSPA_AUD_PLL_CTRL0_DIV_MCLK_MASK (0x1 << 2)
+#define SSPA_AUD_PLL_CTRL0_DIV_MCLK(x) ((x) << 2)
+#define SSPA_AUD_PLL_CTRL0_PD_OVPROT_DIS (1 << 1)
+#define SSPA_AUD_PLL_CTRL0_PU (1 << 0)
+
+/* SSPA Audio PLL Control 1 Register */
+#define SSPA_AUD_PLL_CTRL1_SEL_FAST_CLK (1 << 24)
+#define SSPA_AUD_PLL_CTRL1_CLK_SEL_MASK (1 << 11)
+#define SSPA_AUD_PLL_CTRL1_CLK_SEL_AUDIO_PLL (1 << 11)
+#define SSPA_AUD_PLL_CTRL1_CLK_SEL_VCXO (0 << 11)
+#define SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN_MASK (0x7ff << 0)
+#define SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN(x) ((x) << 0)
+
+struct mmp2_audio_clk {
+ void __iomem *mmio_base;
+
+ struct clk_hw audio_pll_hw;
+ struct clk_mux sspa_mux;
+ struct clk_mux sspa1_mux;
+ struct clk_divider sysclk_div;
+ struct clk_divider sspa0_div;
+ struct clk_divider sspa1_div;
+ struct clk_gate sysclk_gate;
+ struct clk_gate sspa0_gate;
+ struct clk_gate sspa1_gate;
+
+ u32 aud_ctrl;
+ u32 aud_pll_ctrl0;
+ u32 aud_pll_ctrl1;
+
+ spinlock_t lock;
+
+ /* Must be last */
+ struct clk_hw_onecell_data clk_data;
+};
+
+static const struct {
+ unsigned long parent_rate;
+ unsigned long freq_vco;
+ unsigned char mclk;
+ unsigned char fbcclk;
+ unsigned short fract;
+} predivs[] = {
+ { 26000000, 135475200, 0, 0, 0x8a18 },
+ { 26000000, 147456000, 0, 1, 0x0da1 },
+ { 38400000, 135475200, 1, 2, 0x8208 },
+ { 38400000, 147456000, 1, 3, 0xaaaa },
+};
+
+static const struct {
+ unsigned char divisor;
+ unsigned char modulo;
+ unsigned char pattern;
+} postdivs[] = {
+ { 1, 3, 0, },
+ { 2, 5, 0, },
+ { 4, 0, 0, },
+ { 6, 1, 1, },
+ { 8, 1, 0, },
+ { 9, 1, 2, },
+ { 12, 2, 1, },
+ { 16, 2, 0, },
+ { 18, 2, 2, },
+ { 24, 4, 1, },
+ { 36, 4, 2, },
+ { 48, 6, 1, },
+ { 72, 6, 2, },
+};
+
+static unsigned long audio_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mmp2_audio_clk *priv = container_of(hw, struct mmp2_audio_clk, audio_pll_hw);
+ unsigned int prediv;
+ unsigned int postdiv;
+ u32 aud_pll_ctrl0;
+ u32 aud_pll_ctrl1;
+
+ aud_pll_ctrl0 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL0);
+ aud_pll_ctrl0 &= SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO_MASK |
+ SSPA_AUD_PLL_CTRL0_FRACT_MASK |
+ SSPA_AUD_PLL_CTRL0_ENA_DITHER |
+ SSPA_AUD_PLL_CTRL0_DIV_FBCCLK_MASK |
+ SSPA_AUD_PLL_CTRL0_DIV_MCLK_MASK |
+ SSPA_AUD_PLL_CTRL0_PU;
+
+ aud_pll_ctrl1 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL1);
+ aud_pll_ctrl1 &= SSPA_AUD_PLL_CTRL1_CLK_SEL_MASK |
+ SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN_MASK;
+
+ for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) {
+ if (predivs[prediv].parent_rate != parent_rate)
+ continue;
+ for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) {
+ unsigned long freq;
+ u32 val;
+
+ val = SSPA_AUD_PLL_CTRL0_ENA_DITHER;
+ val |= SSPA_AUD_PLL_CTRL0_PU;
+ val |= SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO(postdivs[postdiv].modulo);
+ val |= SSPA_AUD_PLL_CTRL0_FRACT(predivs[prediv].fract);
+ val |= SSPA_AUD_PLL_CTRL0_DIV_FBCCLK(predivs[prediv].fbcclk);
+ val |= SSPA_AUD_PLL_CTRL0_DIV_MCLK(predivs[prediv].mclk);
+ if (val != aud_pll_ctrl0)
+ continue;
+
+ val = SSPA_AUD_PLL_CTRL1_CLK_SEL_AUDIO_PLL;
+ val |= SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN(postdivs[postdiv].pattern);
+ if (val != aud_pll_ctrl1)
+ continue;
+
+ freq = predivs[prediv].freq_vco;
+ freq /= postdivs[postdiv].divisor;
+ return freq;
+ }
+ }
+
+ return 0;
+}
+
+static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned int prediv;
+ unsigned int postdiv;
+ long rounded = 0;
+
+ for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) {
+ if (predivs[prediv].parent_rate != *parent_rate)
+ continue;
+ for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) {
+ long freq = predivs[prediv].freq_vco;
+
+ freq /= postdivs[postdiv].divisor;
+ if (freq == rate)
+ return rate;
+ if (freq < rate)
+ continue;
+ if (rounded && freq > rounded)
+ continue;
+ rounded = freq;
+ }
+ }
+
+ return rounded;
+}
+
+static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct mmp2_audio_clk *priv = container_of(hw, struct mmp2_audio_clk, audio_pll_hw);
+ unsigned int prediv;
+ unsigned int postdiv;
+ unsigned long val;
+
+ for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) {
+ if (predivs[prediv].parent_rate != parent_rate)
+ continue;
+
+ for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) {
+ if (rate * postdivs[postdiv].divisor != predivs[prediv].freq_vco)
+ continue;
+
+ val = SSPA_AUD_PLL_CTRL0_ENA_DITHER;
+ val |= SSPA_AUD_PLL_CTRL0_PU;
+ val |= SSPA_AUD_PLL_CTRL0_DIV_OCLK_MODULO(postdivs[postdiv].modulo);
+ val |= SSPA_AUD_PLL_CTRL0_FRACT(predivs[prediv].fract);
+ val |= SSPA_AUD_PLL_CTRL0_DIV_FBCCLK(predivs[prediv].fbcclk);
+ val |= SSPA_AUD_PLL_CTRL0_DIV_MCLK(predivs[prediv].mclk);
+ writel(val, priv->mmio_base + SSPA_AUD_PLL_CTRL0);
+
+ val = SSPA_AUD_PLL_CTRL1_CLK_SEL_AUDIO_PLL;
+ val |= SSPA_AUD_PLL_CTRL1_DIV_OCLK_PATTERN(postdivs[postdiv].pattern);
+ writel(val, priv->mmio_base + SSPA_AUD_PLL_CTRL1);
+
+ return 0;
+ }
+ }
+
+ return -ERANGE;
+}
+
+static const struct clk_ops audio_pll_ops = {
+ .recalc_rate = audio_pll_recalc_rate,
+ .round_rate = audio_pll_round_rate,
+ .set_rate = audio_pll_set_rate,
+};
+
+static int register_clocks(struct mmp2_audio_clk *priv, struct device *dev)
+{
+ const struct clk_parent_data sspa_mux_parents[] = {
+ { .hw = &priv->audio_pll_hw },
+ { .fw_name = "i2s0" },
+ };
+ const struct clk_parent_data sspa1_mux_parents[] = {
+ { .hw = &priv->audio_pll_hw },
+ { .fw_name = "i2s1" },
+ };
+ int ret;
+
+ priv->audio_pll_hw.init = CLK_HW_INIT_FW_NAME("audio_pll",
+ "vctcxo", &audio_pll_ops,
+ CLK_SET_RATE_PARENT);
+ ret = devm_clk_hw_register(dev, &priv->audio_pll_hw);
+ if (ret)
+ return ret;
+
+ priv->sspa_mux.hw.init = CLK_HW_INIT_PARENTS_DATA("sspa_mux",
+ sspa_mux_parents, &clk_mux_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sspa_mux.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa_mux.mask = 1;
+ priv->sspa_mux.shift = SSPA_AUD_CTRL_SSPA0_MUX_SHIFT;
+ ret = devm_clk_hw_register(dev, &priv->sspa_mux.hw);
+ if (ret)
+ return ret;
+
+ priv->sysclk_div.hw.init = CLK_HW_INIT_HW("sys_div",
+ &priv->sspa_mux.hw, &clk_divider_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sysclk_div.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sysclk_div.shift = SSPA_AUD_CTRL_SYSCLK_DIV_SHIFT;
+ priv->sysclk_div.width = 6;
+ priv->sysclk_div.flags = CLK_DIVIDER_ONE_BASED;
+ priv->sysclk_div.flags |= CLK_DIVIDER_ROUND_CLOSEST;
+ priv->sysclk_div.flags |= CLK_DIVIDER_ALLOW_ZERO;
+ ret = devm_clk_hw_register(dev, &priv->sysclk_div.hw);
+ if (ret)
+ return ret;
+
+ priv->sysclk_gate.hw.init = CLK_HW_INIT_HW("sys_clk",
+ &priv->sysclk_div.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sysclk_gate.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sysclk_gate.bit_idx = SSPA_AUD_CTRL_SYSCLK_SHIFT;
+ ret = devm_clk_hw_register(dev, &priv->sysclk_gate.hw);
+ if (ret)
+ return ret;
+
+ priv->sspa0_div.hw.init = CLK_HW_INIT_HW("sspa0_div",
+ &priv->sspa_mux.hw, &clk_divider_ops, 0);
+ priv->sspa0_div.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa0_div.shift = SSPA_AUD_CTRL_SSPA0_DIV_SHIFT;
+ priv->sspa0_div.width = 6;
+ priv->sspa0_div.flags = CLK_DIVIDER_ONE_BASED;
+ priv->sspa0_div.flags |= CLK_DIVIDER_ROUND_CLOSEST;
+ priv->sspa0_div.flags |= CLK_DIVIDER_ALLOW_ZERO;
+ ret = devm_clk_hw_register(dev, &priv->sspa0_div.hw);
+ if (ret)
+ return ret;
+
+ priv->sspa0_gate.hw.init = CLK_HW_INIT_HW("sspa0_clk",
+ &priv->sspa0_div.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sspa0_gate.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa0_gate.bit_idx = SSPA_AUD_CTRL_SSPA0_SHIFT;
+ ret = devm_clk_hw_register(dev, &priv->sspa0_gate.hw);
+ if (ret)
+ return ret;
+
+ priv->sspa1_mux.hw.init = CLK_HW_INIT_PARENTS_DATA("sspa1_mux",
+ sspa1_mux_parents, &clk_mux_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sspa1_mux.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa1_mux.mask = 1;
+ priv->sspa1_mux.shift = SSPA_AUD_CTRL_SSPA1_MUX_SHIFT;
+ ret = devm_clk_hw_register(dev, &priv->sspa1_mux.hw);
+ if (ret)
+ return ret;
+
+ priv->sspa1_div.hw.init = CLK_HW_INIT_HW("sspa1_div",
+ &priv->sspa1_mux.hw, &clk_divider_ops, 0);
+ priv->sspa1_div.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa1_div.shift = SSPA_AUD_CTRL_SSPA1_DIV_SHIFT;
+ priv->sspa1_div.width = 6;
+ priv->sspa1_div.flags = CLK_DIVIDER_ONE_BASED;
+ priv->sspa1_div.flags |= CLK_DIVIDER_ROUND_CLOSEST;
+ priv->sspa1_div.flags |= CLK_DIVIDER_ALLOW_ZERO;
+ ret = devm_clk_hw_register(dev, &priv->sspa1_div.hw);
+ if (ret)
+ return ret;
+
+ priv->sspa1_gate.hw.init = CLK_HW_INIT_HW("sspa1_clk",
+ &priv->sspa1_div.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
+ priv->sspa1_gate.reg = priv->mmio_base + SSPA_AUD_CTRL;
+ priv->sspa1_gate.bit_idx = SSPA_AUD_CTRL_SSPA1_SHIFT;
+ ret = devm_clk_hw_register(dev, &priv->sspa1_gate.hw);
+ if (ret)
+ return ret;
+
+ priv->clk_data.hws[MMP2_CLK_AUDIO_SYSCLK] = &priv->sysclk_gate.hw;
+ priv->clk_data.hws[MMP2_CLK_AUDIO_SSPA0] = &priv->sspa0_gate.hw;
+ priv->clk_data.hws[MMP2_CLK_AUDIO_SSPA1] = &priv->sspa1_gate.hw;
+ priv->clk_data.num = MMP2_CLK_AUDIO_NR_CLKS;
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ &priv->clk_data);
+}
+
+static int mmp2_audio_clk_probe(struct platform_device *pdev)
+{
+ struct mmp2_audio_clk *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev,
+ struct_size(priv, clk_data.hws,
+ MMP2_CLK_AUDIO_NR_CLKS),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ priv->mmio_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->mmio_base))
+ return PTR_ERR(priv->mmio_base);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ goto disable_pm_runtime;
+
+ ret = pm_clk_add(&pdev->dev, "audio");
+ if (ret)
+ goto destroy_pm_clk;
+
+ ret = register_clocks(priv, &pdev->dev);
+ if (ret)
+ goto destroy_pm_clk;
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int mmp2_audio_clk_remove(struct platform_device *pdev)
+{
+ pm_clk_destroy(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
+{
+ struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
+
+ priv->aud_ctrl = readl(priv->mmio_base + SSPA_AUD_CTRL);
+ priv->aud_pll_ctrl0 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL0);
+ priv->aud_pll_ctrl1 = readl(priv->mmio_base + SSPA_AUD_PLL_CTRL1);
+ pm_clk_suspend(dev);
+
+ return 0;
+}
+
+static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
+{
+ struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
+
+ pm_clk_resume(dev);
+ writel(priv->aud_ctrl, priv->mmio_base + SSPA_AUD_CTRL);
+ writel(priv->aud_pll_ctrl0, priv->mmio_base + SSPA_AUD_PLL_CTRL0);
+ writel(priv->aud_pll_ctrl1, priv->mmio_base + SSPA_AUD_PLL_CTRL1);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
+ SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)
+};
+
+static const struct of_device_id mmp2_audio_clk_of_match[] = {
+ { .compatible = "marvell,mmp2-audio-clock" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, mmp2_audio_clk_of_match);
+
+static struct platform_driver mmp2_audio_clk_driver = {
+ .driver = {
+ .name = "mmp2-audio-clock",
+ .of_match_table = of_match_ptr(mmp2_audio_clk_of_match),
+ .pm = &mmp2_audio_clk_pm_ops,
+ },
+ .probe = mmp2_audio_clk_probe,
+ .remove = mmp2_audio_clk_remove,
+};
+module_platform_driver(mmp2_audio_clk_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Clock driver for MMP2 Audio subsystem");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index fabc09aca6c4..48f592bd633d 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -28,13 +28,15 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
unsigned long *prate)
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
- unsigned long rate = 0, prev_rate;
+ u64 rate = 0, prev_rate;
int i;
for (i = 0; i < factor->ftbl_cnt; i++) {
prev_rate = rate;
- rate = (((*prate / 10000) * factor->ftbl[i].den) /
- (factor->ftbl[i].num * factor->masks->factor)) * 10000;
+ rate = *prate;
+ rate *= factor->ftbl[i].den;
+ do_div(rate, factor->ftbl[i].num * factor->masks->factor);
+
if (rate > drate)
break;
}
@@ -54,6 +56,7 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
struct mmp_clk_factor *factor = to_clk_factor(hw);
struct mmp_clk_factor_masks *masks = factor->masks;
unsigned int val, num, den;
+ u64 rate;
val = readl_relaxed(factor->base);
@@ -66,8 +69,11 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
if (!den)
return 0;
- return (((parent_rate / 10000) * den) /
- (num * factor->masks->factor)) * 10000;
+ rate = parent_rate;
+ rate *= den;
+ do_div(rate, num * factor->masks->factor);
+
+ return rate;
}
/* Configures new clock rate*/
@@ -78,12 +84,14 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
struct mmp_clk_factor_masks *masks = factor->masks;
int i;
unsigned long val;
- unsigned long rate = 0;
unsigned long flags = 0;
+ u64 rate = 0;
for (i = 0; i < factor->ftbl_cnt; i++) {
- rate = (((prate / 10000) * factor->ftbl[i].den) /
- (factor->ftbl[i].num * factor->masks->factor)) * 10000;
+ rate = prate;
+ rate *= factor->ftbl[i].den;
+ do_div(rate, factor->ftbl[i].num * factor->masks->factor);
+
if (rate > drate)
break;
}
@@ -140,7 +148,10 @@ static int clk_factor_init(struct clk_hw *hw)
val &= ~(masks->den_mask << masks->den_shift);
val |= (factor->ftbl[0].den & masks->den_mask) <<
masks->den_shift;
+ }
+ if (!(val & masks->enable_mask) || i >= factor->ftbl_cnt) {
+ val |= masks->enable_mask;
writel(val, factor->base);
}
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 52dc8b43acd9..67208aea94c5 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -17,8 +17,10 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/of_address.h>
+#include <linux/clk.h>
#include <dt-bindings/clock/marvell,mmp2.h>
+#include <dt-bindings/power/marvell,mmp2.h>
#include "clk.h"
#include "reset.h"
@@ -45,6 +47,10 @@
#define APBC_SSP1 0x54
#define APBC_SSP2 0x58
#define APBC_SSP3 0x5c
+#define APBC_THERMAL0 0x90
+#define APBC_THERMAL1 0x98
+#define APBC_THERMAL2 0x9c
+#define APBC_THERMAL3 0xa0
#define APMU_SDH0 0x54
#define APMU_SDH1 0x58
#define APMU_SDH2 0xe8
@@ -55,18 +61,19 @@
#define APMU_DISP1 0x110
#define APMU_CCIC0 0x50
#define APMU_CCIC1 0xf4
-#define APBC_THERMAL0 0x90
-#define APBC_THERMAL1 0x98
-#define APBC_THERMAL2 0x9c
-#define APBC_THERMAL3 0xa0
#define APMU_USBHSIC0 0xf8
#define APMU_USBHSIC1 0xfc
#define APMU_GPU 0xcc
+#define APMU_AUDIO 0x10c
+#define APMU_CAMERA 0x1fc
#define MPMU_FCCR 0x8
#define MPMU_POSR 0x10
#define MPMU_UART_PLL 0x14
#define MPMU_PLL2_CR 0x34
+#define MPMU_I2S0_PLL 0x40
+#define MPMU_I2S1_PLL 0x44
+#define MPMU_ACGR 0x1024
/* MMP3 specific below */
#define MPMU_PLL3_CR 0x50
#define MPMU_PLL3_CTRL1 0x58
@@ -82,6 +89,8 @@ enum mmp2_clk_model {
struct mmp2_clk_unit {
struct mmp_clk_unit unit;
enum mmp2_clk_model model;
+ struct genpd_onecell_data pd_data;
+ struct generic_pm_domain *pm_domains[MMP2_NR_POWER_DOMAINS];
void __iomem *mpmu_base;
void __iomem *apmu_base;
void __iomem *apbc_base;
@@ -91,6 +100,7 @@ static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
{MMP2_CLK_CLK32, "clk32", NULL, 0, 32768},
{MMP2_CLK_VCTCXO, "vctcxo", NULL, 0, 26000000},
{MMP2_CLK_USB_PLL, "usb_pll", NULL, 0, 480000000},
+ {0, "i2s_pll", NULL, 0, 99666667},
};
static struct mmp_param_pll_clk pll_clks[] = {
@@ -139,7 +149,35 @@ static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
{.num = 3521, .den = 689}, /*19.23MHZ */
};
-static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
+static struct mmp_clk_factor_masks i2s_factor_masks = {
+ .factor = 2,
+ .num_mask = 0x7fff,
+ .den_mask = 0x1fff,
+ .num_shift = 0,
+ .den_shift = 15,
+ .enable_mask = 0xd0000000,
+};
+
+static struct mmp_clk_factor_tbl i2s_factor_tbl[] = {
+ {.num = 24868, .den = 511}, /* 2.0480 MHz */
+ {.num = 28003, .den = 793}, /* 2.8224 MHz */
+ {.num = 24941, .den = 1025}, /* 4.0960 MHz */
+ {.num = 28003, .den = 1586}, /* 5.6448 MHz */
+ {.num = 31158, .den = 2561}, /* 8.1920 MHz */
+ {.num = 16288, .den = 1845}, /* 11.2896 MHz */
+ {.num = 20772, .den = 2561}, /* 12.2880 MHz */
+ {.num = 8144, .den = 1845}, /* 22.5792 MHz */
+ {.num = 10386, .den = 2561}, /* 24.5760 MHz */
+};
+
+static DEFINE_SPINLOCK(acgr_lock);
+
+static struct mmp_param_gate_clk mpmu_gate_clks[] = {
+ {MMP2_CLK_I2S0, "i2s0_clk", "i2s0_pll", CLK_SET_RATE_PARENT, MPMU_ACGR, 0x200000, 0x200000, 0x0, 0, &acgr_lock},
+ {MMP2_CLK_I2S1, "i2s1_clk", "i2s1_pll", CLK_SET_RATE_PARENT, MPMU_ACGR, 0x100000, 0x100000, 0x0, 0, &acgr_lock},
+};
+
+static void mmp2_main_clk_init(struct mmp2_clk_unit *pxa_unit)
{
struct clk *clk;
struct mmp_clk_unit *unit = &pxa_unit->unit;
@@ -166,6 +204,20 @@ static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
&uart_factor_masks, uart_factor_tbl,
ARRAY_SIZE(uart_factor_tbl), NULL);
mmp_clk_add(unit, MMP2_CLK_UART_PLL, clk);
+
+ mmp_clk_register_factor("i2s0_pll", "pll1_4",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->mpmu_base + MPMU_I2S0_PLL,
+ &i2s_factor_masks, i2s_factor_tbl,
+ ARRAY_SIZE(i2s_factor_tbl), NULL);
+ mmp_clk_register_factor("i2s1_pll", "pll1_4",
+ CLK_SET_RATE_PARENT,
+ pxa_unit->mpmu_base + MPMU_I2S1_PLL,
+ &i2s_factor_masks, i2s_factor_tbl,
+ ARRAY_SIZE(i2s_factor_tbl), NULL);
+
+ mmp_register_gate_clks(unit, mpmu_gate_clks, pxa_unit->mpmu_base,
+ ARRAY_SIZE(mpmu_gate_clks));
}
static DEFINE_SPINLOCK(uart0_lock);
@@ -271,6 +323,8 @@ static u32 mmp2_gpu_bus_parent_table[] = { 0x0000, 0x0020, 0x0030,
static const char * const mmp3_gpu_bus_parent_names[] = {"pll1_4", "pll1_6", "pll1_2", "pll2_2"};
static const char * const mmp3_gpu_gc_parent_names[] = {"pll1", "pll2", "pll1_p", "pll2_p"};
+static DEFINE_SPINLOCK(audio_lock);
+
static struct mmp_clk_mix_config ccic0_mix_config = {
.reg_info = DEFINE_MIX_REG_INFO(4, 17, 2, 6, 32),
};
@@ -326,6 +380,7 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
{MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
{MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
{MMP2_CLK_GPU_BUS, "gpu_bus_clk", "gpu_bus_mux", CLK_SET_RATE_PARENT, APMU_GPU, 0xa, 0xa, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock},
+ {MMP2_CLK_AUDIO, "audio_clk", "audio_mix_clk", CLK_SET_RATE_PARENT, APMU_AUDIO, 0x12, 0x12, 0x0, 0, &audio_lock},
};
static struct mmp_param_gate_clk mmp2_apmu_gate_clks[] = {
@@ -423,6 +478,41 @@ static void mmp2_clk_reset_init(struct device_node *np,
mmp_clk_reset_register(np, cells, nr_resets);
}
+static void mmp2_pm_domain_init(struct device_node *np,
+ struct mmp2_clk_unit *pxa_unit)
+{
+ if (pxa_unit->model == CLK_MODEL_MMP3) {
+ pxa_unit->pm_domains[MMP2_POWER_DOMAIN_GPU]
+ = mmp_pm_domain_register("gpu",
+ pxa_unit->apmu_base + APMU_GPU,
+ 0x0600, 0x40003, 0x18000c, 0, &gpu_lock);
+ } else {
+ pxa_unit->pm_domains[MMP2_POWER_DOMAIN_GPU]
+ = mmp_pm_domain_register("gpu",
+ pxa_unit->apmu_base + APMU_GPU,
+ 0x8600, 0x00003, 0x00000c,
+ MMP_PM_DOMAIN_NO_DISABLE, &gpu_lock);
+ }
+ pxa_unit->pd_data.num_domains++;
+
+ pxa_unit->pm_domains[MMP2_POWER_DOMAIN_AUDIO]
+ = mmp_pm_domain_register("audio",
+ pxa_unit->apmu_base + APMU_AUDIO,
+ 0x600, 0x2, 0, 0, &audio_lock);
+ pxa_unit->pd_data.num_domains++;
+
+ if (pxa_unit->model == CLK_MODEL_MMP3) {
+ pxa_unit->pm_domains[MMP3_POWER_DOMAIN_CAMERA]
+ = mmp_pm_domain_register("camera",
+ pxa_unit->apmu_base + APMU_CAMERA,
+ 0x600, 0, 0, 0, NULL);
+ pxa_unit->pd_data.num_domains++;
+ }
+
+ pxa_unit->pd_data.domains = pxa_unit->pm_domains;
+ of_genpd_add_provider_onecell(np, &pxa_unit->pd_data);
+}
+
static void __init mmp2_clk_init(struct device_node *np)
{
struct mmp2_clk_unit *pxa_unit;
@@ -454,9 +544,11 @@ static void __init mmp2_clk_init(struct device_node *np)
goto unmap_apmu_region;
}
+ mmp2_pm_domain_init(np, pxa_unit);
+
mmp_clk_init(np, &pxa_unit->unit, MMP2_NR_CLKS);
- mmp2_pll_init(pxa_unit);
+ mmp2_main_clk_init(pxa_unit);
mmp2_apb_periph_clk_init(pxa_unit);
diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h
index 20dc1e5dd756..55ac05379781 100644
--- a/drivers/clk/mmp/clk.h
+++ b/drivers/clk/mmp/clk.h
@@ -3,6 +3,7 @@
#define __MACH_MMP_CLK_H
#include <linux/clk-provider.h>
+#include <linux/pm_domain.h>
#include <linux/clkdev.h>
#define APBC_NO_BUS_CTRL BIT(0)
@@ -16,6 +17,7 @@ struct mmp_clk_factor_masks {
unsigned int den_mask;
unsigned int num_shift;
unsigned int den_shift;
+ unsigned int enable_mask;
};
struct mmp_clk_factor_tbl {
@@ -251,4 +253,13 @@ void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit,
int nr_clks);
void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
struct clk *clk);
+
+/* Power islands */
+#define MMP_PM_DOMAIN_NO_DISABLE BIT(0)
+
+struct generic_pm_domain *mmp_pm_domain_register(const char *name,
+ void __iomem *reg,
+ u32 power_on, u32 reset, u32 clock_enable,
+ unsigned int flags, spinlock_t *lock);
+
#endif
diff --git a/drivers/clk/mmp/pwr-island.c b/drivers/clk/mmp/pwr-island.c
new file mode 100644
index 000000000000..ab57c0e995c1
--- /dev/null
+++ b/drivers/clk/mmp/pwr-island.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MMP PMU power island support
+ *
+ * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include "clk.h"
+
+#define to_mmp_pm_domain(genpd) container_of(genpd, struct mmp_pm_domain, genpd)
+
+struct mmp_pm_domain {
+ struct generic_pm_domain genpd;
+ void __iomem *reg;
+ spinlock_t *lock;
+ u32 power_on;
+ u32 reset;
+ u32 clock_enable;
+ unsigned int flags;
+};
+
+static int mmp_pm_domain_power_on(struct generic_pm_domain *genpd)
+{
+ struct mmp_pm_domain *pm_domain = to_mmp_pm_domain(genpd);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pm_domain->lock)
+ spin_lock_irqsave(pm_domain->lock, flags);
+
+ val = readl(pm_domain->reg);
+
+ /* Turn on the power island */
+ val |= pm_domain->power_on;
+ writel(val, pm_domain->reg);
+
+ /* Disable isolation */
+ val |= 0x100;
+ writel(val, pm_domain->reg);
+
+ /* Some blocks need to be reset after a power up */
+ if (pm_domain->reset || pm_domain->clock_enable) {
+ u32 after_power_on = val;
+
+ val &= ~pm_domain->reset;
+ writel(val, pm_domain->reg);
+
+ val |= pm_domain->clock_enable;
+ writel(val, pm_domain->reg);
+
+ val |= pm_domain->reset;
+ writel(val, pm_domain->reg);
+
+ writel(after_power_on, pm_domain->reg);
+ }
+
+ if (pm_domain->lock)
+ spin_unlock_irqrestore(pm_domain->lock, flags);
+
+ return 0;
+}
+
+static int mmp_pm_domain_power_off(struct generic_pm_domain *genpd)
+{
+ struct mmp_pm_domain *pm_domain = to_mmp_pm_domain(genpd);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (pm_domain->flags & MMP_PM_DOMAIN_NO_DISABLE)
+ return 0;
+
+ if (pm_domain->lock)
+ spin_lock_irqsave(pm_domain->lock, flags);
+
+ /* Turn off and isolate the the power island. */
+ val = readl(pm_domain->reg);
+ val &= ~pm_domain->power_on;
+ val &= ~0x100;
+ writel(val, pm_domain->reg);
+
+ if (pm_domain->lock)
+ spin_unlock_irqrestore(pm_domain->lock, flags);
+
+ return 0;
+}
+
+struct generic_pm_domain *mmp_pm_domain_register(const char *name,
+ void __iomem *reg,
+ u32 power_on, u32 reset, u32 clock_enable,
+ unsigned int flags, spinlock_t *lock)
+{
+ struct mmp_pm_domain *pm_domain;
+
+ pm_domain = kzalloc(sizeof(*pm_domain), GFP_KERNEL);
+ if (!pm_domain)
+ return ERR_PTR(-ENOMEM);
+
+ pm_domain->reg = reg;
+ pm_domain->power_on = power_on;
+ pm_domain->reset = reset;
+ pm_domain->clock_enable = clock_enable;
+ pm_domain->flags = flags;
+ pm_domain->lock = lock;
+
+ pm_genpd_init(&pm_domain->genpd, NULL, true);
+ pm_domain->genpd.name = name;
+ pm_domain->genpd.power_on = mmp_pm_domain_power_on;
+ pm_domain->genpd.power_off = mmp_pm_domain_power_off;
+
+ return &pm_domain->genpd;
+}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index abb121f8de52..cde6ca90a06b 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -142,6 +142,14 @@ config MSM_GCC_8916
Say Y if you want to use devices such as UART, SPI i2c, USB,
SD/eMMC, display, graphics, camera etc.
+config MSM_GCC_8939
+ tristate "MSM8939 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on msm8939 devices.
+ Say Y if you want to use devices such as UART, SPI i2c, USB,
+ SD/eMMC, display, graphics, camera etc.
+
config MSM_GCC_8960
tristate "APQ8064/MSM8960 Global Clock Controller"
help
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 691efbf7e81f..7ec8561a1270 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
+obj-$(CONFIG_MSM_GCC_8939) += gcc-msm8939.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 4e329a7baf2b..17e4a5a2a9fd 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -260,7 +260,7 @@ static struct clk_pll gpll0 = {
.l_reg = 0x21004,
.m_reg = 0x21008,
.n_reg = 0x2100c,
- .config_reg = 0x21014,
+ .config_reg = 0x21010,
.mode_reg = 0x21000,
.status_reg = 0x2101c,
.status_bit = 17,
@@ -287,7 +287,7 @@ static struct clk_pll gpll1 = {
.l_reg = 0x20004,
.m_reg = 0x20008,
.n_reg = 0x2000c,
- .config_reg = 0x20014,
+ .config_reg = 0x20010,
.mode_reg = 0x20000,
.status_reg = 0x2001c,
.status_bit = 17,
@@ -314,7 +314,7 @@ static struct clk_pll gpll2 = {
.l_reg = 0x4a004,
.m_reg = 0x4a008,
.n_reg = 0x4a00c,
- .config_reg = 0x4a014,
+ .config_reg = 0x4a010,
.mode_reg = 0x4a000,
.status_reg = 0x4a01c,
.status_bit = 17,
@@ -341,7 +341,7 @@ static struct clk_pll bimc_pll = {
.l_reg = 0x23004,
.m_reg = 0x23008,
.n_reg = 0x2300c,
- .config_reg = 0x23014,
+ .config_reg = 0x23010,
.mode_reg = 0x23000,
.status_reg = 0x2301c,
.status_bit = 17,
diff --git a/drivers/clk/qcom/gcc-msm8939.c b/drivers/clk/qcom/gcc-msm8939.c
new file mode 100644
index 000000000000..778354f82b1e
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8939.c
@@ -0,0 +1,3988 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Linaro Limited
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8939.h>
+#include <dt-bindings/reset/qcom,gcc-msm8939.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPLL0_AUX,
+ P_BIMC,
+ P_GPLL1,
+ P_GPLL1_AUX,
+ P_GPLL2,
+ P_GPLL2_AUX,
+ P_GPLL3,
+ P_GPLL3_AUX,
+ P_GPLL4,
+ P_GPLL5,
+ P_GPLL5_AUX,
+ P_GPLL5_EARLY,
+ P_GPLL6,
+ P_GPLL6_AUX,
+ P_SLEEP_CLK,
+ P_DSI0_PHYPLL_BYTE,
+ P_DSI0_PHYPLL_DSI,
+ P_EXT_PRI_I2S,
+ P_EXT_SEC_I2S,
+ P_EXT_MCLK,
+};
+
+static struct clk_pll gpll0 = {
+ .l_reg = 0x21004,
+ .m_reg = 0x21008,
+ .n_reg = 0x2100c,
+ .config_reg = 0x21010,
+ .mode_reg = 0x21000,
+ .status_reg = 0x2101c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll0_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll1 = {
+ .l_reg = 0x20004,
+ .m_reg = 0x20008,
+ .n_reg = 0x2000c,
+ .config_reg = 0x20010,
+ .mode_reg = 0x20000,
+ .status_reg = 0x2001c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll1_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll2 = {
+ .l_reg = 0x4a004,
+ .m_reg = 0x4a008,
+ .n_reg = 0x4a00c,
+ .config_reg = 0x4a010,
+ .mode_reg = 0x4a000,
+ .status_reg = 0x4a01c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll2_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll2_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll bimc_pll = {
+ .l_reg = 0x23004,
+ .m_reg = 0x23008,
+ .n_reg = 0x2300c,
+ .config_reg = 0x23010,
+ .mode_reg = 0x23000,
+ .status_reg = 0x2301c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap bimc_pll_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "bimc_pll_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &bimc_pll.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll3 = {
+ .l_reg = 0x22004,
+ .m_reg = 0x22008,
+ .n_reg = 0x2200c,
+ .config_reg = 0x22010,
+ .mode_reg = 0x22000,
+ .status_reg = 0x2201c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll3_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+/* GPLL3 at 1100 MHz, main output enabled. */
+static const struct pll_config gpll3_config = {
+ .l = 57,
+ .m = 7,
+ .n = 24,
+ .vco_val = 0x0,
+ .vco_mask = BIT(20),
+ .pre_div_val = 0x0,
+ .pre_div_mask = BIT(12),
+ .post_div_val = 0x0,
+ .post_div_mask = BIT(9) | BIT(8),
+ .mn_ena_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+};
+
+static struct clk_pll gpll4 = {
+ .l_reg = 0x24004,
+ .m_reg = 0x24008,
+ .n_reg = 0x2400c,
+ .config_reg = 0x24010,
+ .mode_reg = 0x24000,
+ .status_reg = 0x2401c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll4_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+/* GPLL4 at 1200 MHz, main output enabled. */
+static struct pll_config gpll4_config = {
+ .l = 62,
+ .m = 1,
+ .n = 2,
+ .vco_val = 0x0,
+ .vco_mask = BIT(20),
+ .pre_div_val = 0x0,
+ .pre_div_mask = BIT(12),
+ .post_div_val = 0x0,
+ .post_div_mask = BIT(9) | BIT(8),
+ .mn_ena_mask = BIT(24),
+ .main_output_mask = BIT(0),
+};
+
+static struct clk_pll gpll5 = {
+ .l_reg = 0x25004,
+ .m_reg = 0x25008,
+ .n_reg = 0x2500c,
+ .config_reg = 0x25010,
+ .mode_reg = 0x25000,
+ .status_reg = 0x2501c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll5_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll5_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll6 = {
+ .l_reg = 0x37004,
+ .m_reg = 0x37008,
+ .n_reg = 0x3700c,
+ .config_reg = 0x37010,
+ .mode_reg = 0x37000,
+ .status_reg = 0x3701c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll6_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6_vote",
+ .parent_data = &(const struct clk_parent_data) {
+ .hw = &gpll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_bimc_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_BIMC, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_bimc_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &bimc_pll_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll6a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL6_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll6a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2a_gpll3_gpll6a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2_AUX, 4 },
+ { P_GPLL3, 2 },
+ { P_GPLL6_AUX, 3 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll2a_gpll3_gpll6a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+ { .hw = &gpll3_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll2_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 3 },
+ { P_GPLL4, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll2_gpll4_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+ { .hw = &gpll4_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll1a_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1_AUX, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll1a_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll1a_gpll6_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1_AUX, 2 },
+ { P_GPLL6, 2 },
+ { P_SLEEP_CLK, 6 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .hw = &gpll6_vote.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll1a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll1a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+};
+
+static const struct parent_map gcc_xo_dsibyte_map[] = {
+ { P_XO, 0, },
+ { P_DSI0_PHYPLL_BYTE, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_dsibyte_parent_data[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
+};
+
+static const struct parent_map gcc_xo_gpll0a_dsibyte_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 2 },
+ { P_DSI0_PHYPLL_BYTE, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0a_dsibyte_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "dsi0pllbyte", .name = "dsi0pllbyte" },
+};
+
+static const struct parent_map gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_DSI0_PHYPLL_DSI, 2 },
+ { P_GPLL6, 3 },
+ { P_GPLL3_AUX, 4 },
+ { P_GPLL0_AUX, 5 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
+ { .hw = &gpll6_vote.hw },
+ { .hw = &gpll3_vote.hw },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0a_dsiphy_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 2 },
+ { P_DSI0_PHYPLL_DSI, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0a_dsiphy_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "dsi0pll", .name = "dsi0pll" },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll5a_gpll6_bimc_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL5_AUX, 3 },
+ { P_GPLL6, 2 },
+ { P_BIMC, 4 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll5_vote.hw },
+ { .hw = &gpll6_vote.hw },
+ { .hw = &bimc_pll_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll1_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1, 2 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll1_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll1_epi2s_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_PRI_I2S, 2 },
+ { P_EXT_MCLK, 3 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll1_epi2s_emclk_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .fw_name = "ext_pri_i2s", .name = "ext_pri_i2s" },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll1_esi2s_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_SEC_I2S, 2 },
+ { P_EXT_MCLK, 3 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll1_esi2s_emclk_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "ext_sec_i2s", .name = "ext_sec_i2s" },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_sleep_map[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll1_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_MCLK, 2 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const struct clk_parent_data gcc_xo_gpll1_emclk_sleep_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll1_vote.hw },
+ { .fw_name = "ext_mclk", .name = "ext_mclk" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_xo_gpll6_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL6, 1 },
+ { P_GPLL0, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll6_gpll0_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll6_vote.hw },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_xo_gpll6_gpll0a_map[] = {
+ { P_XO, 0 },
+ { P_GPLL6, 1 },
+ { P_GPLL0_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll6_gpll0a_parent_data[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll6_vote.hw },
+ { .hw = &gpll0_vote.hw },
+};
+
+static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x27000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcnoc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 system_noc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x26004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll6a_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_noc_bfdcd_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll6a_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 bimc_ddr_clk_src = {
+ .cmd_rcgr = 0x32004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_ddr_clk_src",
+ .parent_data = gcc_xo_gpll0_bimc_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ahb_clk[] = {
+ F(40000000, P_GPLL0, 10, 1, 2),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_ahb_clk_src = {
+ .cmd_rcgr = 0x5a000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133330000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x46000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_apss_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apss_ahb_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0_1_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x4e020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x4f020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_oxili_gfx3d_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(220000000, P_GPLL3, 5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(310000000, P_GPLL2_AUX, 3, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(465000000, P_GPLL2_AUX, 2, 0, 0),
+ F(550000000, P_GPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2a_gpll3_gpll6a_map,
+ .freq_tbl = ftbl_gcc_oxili_gfx3d_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2a_gpll3_gpll6a_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_vfe0_clk[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177780000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(465000000, P_GPLL2, 2, 0, 0),
+ F(480000000, P_GPLL4, 2.5, 0, 0),
+ F(600000000, P_GPLL4, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x58000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_gpll4_map,
+ .freq_tbl = ftbl_gcc_camss_vfe0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2_gpll4_parent_data,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_6_i2c_apps_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_6_spi_apps_clk[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x03000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x03014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x04000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x04024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x05000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x05024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_6_apps_clk[] = {
+ F(3686400, P_GPLL0, 1, 72, 15625),
+ F(7372800, P_GPLL0, 1, 144, 15625),
+ F(14745600, P_GPLL0, 1, 288, 15625),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 1, 3, 100),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(32000000, P_GPLL0, 1, 1, 25),
+ F(40000000, P_GPLL0, 1, 1, 20),
+ F(46400000, P_GPLL0, 1, 29, 500),
+ F(48000000, P_GPLL0, 1, 3, 50),
+ F(51200000, P_GPLL0, 1, 8, 125),
+ F(56000000, P_GPLL0, 1, 7, 100),
+ F(58982400, P_GPLL0, 1, 1152, 15625),
+ F(60000000, P_GPLL0, 1, 3, 40),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x02044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x03034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cci_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_map,
+ .freq_tbl = ftbl_gcc_camss_cci_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_data = gcc_xo_gpll0a_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_gp0_1_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x54000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_gp0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x55000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_gp0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_jpeg0_clk[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x57000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_camss_jpeg0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = {
+ F(24000000, P_GPLL0, 1, 1, 45),
+ F(66670000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x52000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_gpll6_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x53000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_gpll6_sleep_map,
+ .freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_gpll6_sleep_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0_1phytimer_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x4e000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x4f000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_map,
+ .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cpp_clk[] = {
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(465000000, P_GPLL2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x58018,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_map,
+ .freq_tbl = ftbl_gcc_camss_cpp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll2_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_crypto_clk[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ { }
+};
+
+/* This is not in the documentation but is in the downstream driver */
+static struct clk_rcg2 crypto_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_crypto_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "crypto_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_3_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x08004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x09004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x0a004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1a_sleep_map,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1a_sleep_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x4d044,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_data = gcc_xo_gpll0a_dsibyte_parent_data,
+ .num_parents = 3,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x4d0b0,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = gcc_xo_gpll0a_dsibyte_parent_data,
+ .num_parents = 3,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_mdss_esc_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x4d060,
+ .hid_width = 5,
+ .parent_map = gcc_xo_dsibyte_map,
+ .freq_tbl = ftbl_gcc_mdss_esc_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_data = gcc_xo_dsibyte_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x4d0a8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_dsibyte_map,
+ .freq_tbl = ftbl_gcc_mdss_esc_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = gcc_xo_dsibyte_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_mdss_mdp_clk[] = {
+ F(50000000, P_GPLL0_AUX, 16, 0, 0),
+ F(80000000, P_GPLL0_AUX, 10, 0, 0),
+ F(100000000, P_GPLL0_AUX, 8, 0, 0),
+ F(160000000, P_GPLL0_AUX, 5, 0, 0),
+ F(177780000, P_GPLL0_AUX, 4.5, 0, 0),
+ F(200000000, P_GPLL0_AUX, 4, 0, 0),
+ F(266670000, P_GPLL0_AUX, 3, 0, 0),
+ F(307200000, P_GPLL1, 2, 0, 0),
+ F(366670000, P_GPLL3_AUX, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_map,
+ .freq_tbl = ftbl_gcc_mdss_mdp_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_data = gcc_xo_gpll1_dsiphy_gpll6_gpll3a_gpll0a_parent_data,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x4d000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_dsiphy_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_data = gcc_xo_gpll0a_dsiphy_parent_data,
+ .num_parents = 3,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x4d0b8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_dsiphy_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = gcc_xo_gpll0a_dsiphy_parent_data,
+ .num_parents = 3,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_mdss_vsync_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x4d02c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0a_map,
+ .freq_tbl = ftbl_gcc_mdss_vsync_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_data = gcc_xo_gpll0a_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk[] = {
+ F(64000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
+/* This is not in the documentation but is in the downstream driver */
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x44010,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_pdm2_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc_apps_clk[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 10, 1, 4),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(177770000, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x42004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x43004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_sdcc_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_apss_tcu_clk[] = {
+ F(154285000, P_GPLL6, 7, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_tcu_clk_src = {
+ .cmd_rcgr = 0x1207c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll5a_gpll6_bimc_map,
+ .freq_tbl = ftbl_gcc_apss_tcu_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apss_tcu_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_bimc_gpu_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266500000, P_BIMC, 4, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(533000000, P_BIMC, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 bimc_gpu_clk_src = {
+ .cmd_rcgr = 0x31028,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll5a_gpll6_bimc_map,
+ .freq_tbl = ftbl_gcc_bimc_gpu_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_gpu_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll5a_gpll6_bimc_parent_data,
+ .num_parents = 5,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+ F(80000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x41010,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_hs_system_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hs_system_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_fs_system_clk[] = {
+ F(64000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_fs_system_clk_src = {
+ .cmd_rcgr = 0x3f010,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_fs_system_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_fs_system_clk_src",
+ .parent_data = gcc_xo_gpll6_gpll0_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_fs_ic_clk[] = {
+ F(60000000, P_GPLL6, 1, 1, 18),
+ { }
+};
+
+static struct clk_rcg2 usb_fs_ic_clk_src = {
+ .cmd_rcgr = 0x3f034,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_usb_fs_ic_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_fs_ic_clk_src",
+ .parent_data = gcc_xo_gpll6_gpll0a_parent_data,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_ahb_clk[] = {
+ F(3200000, P_XO, 6, 0, 0),
+ F(6400000, P_XO, 3, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0, 10, 1, 2),
+ F(66670000, P_GPLL0, 12, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_ahbfabric_clk_src = {
+ .cmd_rcgr = 0x1c010,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll0_gpll1_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_ahbfabric_clk_src",
+ .parent_data = gcc_xo_gpll0_gpll1_sleep_parent_data,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_clk = {
+ .halt_reg = 0x1c028,
+ .clkr = {
+ .enable_reg = 0x1c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_ahbfabric_ixfabric_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_ahbfabric_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = {
+ .halt_reg = 0x1c024,
+ .clkr = {
+ .enable_reg = 0x1c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_ahbfabric_ixfabric_lpm_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_ahbfabric_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_lpaif_i2s_clk[] = {
+ F(128000, P_XO, 10, 1, 15),
+ F(256000, P_XO, 5, 1, 15),
+ F(384000, P_XO, 5, 1, 10),
+ F(512000, P_XO, 5, 2, 15),
+ F(576000, P_XO, 5, 3, 20),
+ F(705600, P_GPLL1, 16, 1, 80),
+ F(768000, P_XO, 5, 1, 5),
+ F(800000, P_XO, 5, 5, 24),
+ F(1024000, P_XO, 5, 4, 15),
+ F(1152000, P_XO, 1, 3, 50),
+ F(1411200, P_GPLL1, 16, 1, 40),
+ F(1536000, P_XO, 1, 2, 25),
+ F(1600000, P_XO, 12, 0, 0),
+ F(1728000, P_XO, 5, 9, 20),
+ F(2048000, P_XO, 5, 8, 15),
+ F(2304000, P_XO, 5, 3, 5),
+ F(2400000, P_XO, 8, 0, 0),
+ F(2822400, P_GPLL1, 16, 1, 20),
+ F(3072000, P_XO, 5, 4, 5),
+ F(4096000, P_GPLL1, 9, 2, 49),
+ F(4800000, P_XO, 4, 0, 0),
+ F(5644800, P_GPLL1, 16, 1, 10),
+ F(6144000, P_GPLL1, 7, 1, 21),
+ F(8192000, P_GPLL1, 9, 4, 49),
+ F(9600000, P_XO, 2, 0, 0),
+ F(11289600, P_GPLL1, 16, 1, 5),
+ F(12288000, P_GPLL1, 7, 2, 21),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_lpaif_pri_i2s_clk_src = {
+ .cmd_rcgr = 0x1c054,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_epi2s_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_pri_i2s_clk_src",
+ .parent_data = gcc_xo_gpll1_epi2s_emclk_sleep_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_pri_i2s_clk = {
+ .halt_reg = 0x1c068,
+ .clkr = {
+ .enable_reg = 0x1c068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_pri_i2s_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_lpaif_pri_i2s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 ultaudio_lpaif_sec_i2s_clk_src = {
+ .cmd_rcgr = 0x1c06c,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_esi2s_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_sec_i2s_clk_src",
+ .parent_data = gcc_xo_gpll1_esi2s_emclk_sleep_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_sec_i2s_clk = {
+ .halt_reg = 0x1c080,
+ .clkr = {
+ .enable_reg = 0x1c080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_sec_i2s_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_lpaif_sec_i2s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 ultaudio_lpaif_aux_i2s_clk_src = {
+ .cmd_rcgr = 0x1c084,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_aux_i2s_clk_src",
+ .parent_data = gcc_xo_gpll1_esi2s_emclk_sleep_parent_data,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_aux_i2s_clk = {
+ .halt_reg = 0x1c098,
+ .clkr = {
+ .enable_reg = 0x1c098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_aux_i2s_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_lpaif_aux_i2s_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_xo_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_xo_clk_src = {
+ .cmd_rcgr = 0x1c034,
+ .hid_width = 5,
+ .parent_map = gcc_xo_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_xo_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_xo_clk_src",
+ .parent_data = gcc_xo_sleep_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_avsync_xo_clk = {
+ .halt_reg = 0x1c04c,
+ .clkr = {
+ .enable_reg = 0x1c04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_avsync_xo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_stc_xo_clk = {
+ .halt_reg = 0x1c050,
+ .clkr = {
+ .enable_reg = 0x1c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_stc_xo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &ultaudio_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_codec_clk[] = {
+ F(9600000, P_XO, 2, 0, 0),
+ F(12288000, P_XO, 1, 16, 25),
+ F(19200000, P_XO, 1, 0, 0),
+ F(11289600, P_EXT_MCLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 codec_digcodec_clk_src = {
+ .cmd_rcgr = 0x1c09c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll1_emclk_sleep_map,
+ .freq_tbl = ftbl_codec_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "codec_digcodec_clk_src",
+ .parent_data = gcc_xo_gpll1_emclk_sleep_parent_data,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_codec_digcodec_clk = {
+ .halt_reg = 0x1c0b0,
+ .clkr = {
+ .enable_reg = 0x1c0b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_codec_digcodec_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &codec_digcodec_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_pcnoc_mport_clk = {
+ .halt_reg = 0x1c000,
+ .clkr = {
+ .enable_reg = 0x1c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_pcnoc_mport_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
+ .halt_reg = 0x1c004,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_pcnoc_sway_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x4C000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_gcc_venus0_vcodec0_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_xo_gpll0_parent_data,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x01008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x01004,
+ .clkr = {
+ .enable_reg = 0x01004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x03010,
+ .clkr = {
+ .enable_reg = 0x03010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x0300c,
+ .clkr = {
+ .enable_reg = 0x0300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x04020,
+ .clkr = {
+ .enable_reg = 0x04020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0401c,
+ .clkr = {
+ .enable_reg = 0x0401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x05020,
+ .clkr = {
+ .enable_reg = 0x05020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x0501c,
+ .clkr = {
+ .enable_reg = 0x0501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x06020,
+ .clkr = {
+ .enable_reg = 0x06020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x0601c,
+ .clkr = {
+ .enable_reg = 0x0601c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup5_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x07020,
+ .clkr = {
+ .enable_reg = 0x07020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x0701c,
+ .clkr = {
+ .enable_reg = 0x0701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_qup6_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0203c,
+ .clkr = {
+ .enable_reg = 0x0203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0302c,
+ .clkr = {
+ .enable_reg = 0x0302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &blsp1_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_ahb_clk = {
+ .halt_reg = 0x5101c,
+ .clkr = {
+ .enable_reg = 0x5101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_clk = {
+ .halt_reg = 0x51018,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cci_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0_ahb_clk = {
+ .halt_reg = 0x4e040,
+ .clkr = {
+ .enable_reg = 0x4e040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0_clk = {
+ .halt_reg = 0x4e03c,
+ .clkr = {
+ .enable_reg = 0x4e03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phy_clk = {
+ .halt_reg = 0x4e048,
+ .clkr = {
+ .enable_reg = 0x4e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phy_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0pix_clk = {
+ .halt_reg = 0x4e058,
+ .clkr = {
+ .enable_reg = 0x4e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0pix_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0rdi_clk = {
+ .halt_reg = 0x4e050,
+ .clkr = {
+ .enable_reg = 0x4e050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0rdi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1_ahb_clk = {
+ .halt_reg = 0x4f040,
+ .clkr = {
+ .enable_reg = 0x4f040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1_clk = {
+ .halt_reg = 0x4f03c,
+ .clkr = {
+ .enable_reg = 0x4f03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phy_clk = {
+ .halt_reg = 0x4f048,
+ .clkr = {
+ .enable_reg = 0x4f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phy_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1pix_clk = {
+ .halt_reg = 0x4f058,
+ .clkr = {
+ .enable_reg = 0x4f058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1pix_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1rdi_clk = {
+ .halt_reg = 0x4f050,
+ .clkr = {
+ .enable_reg = 0x4f050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1rdi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi_vfe0_clk = {
+ .halt_reg = 0x58050,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi_vfe0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_gp0_clk = {
+ .halt_reg = 0x54018,
+ .clkr = {
+ .enable_reg = 0x54018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_gp0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_gp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_gp1_clk = {
+ .halt_reg = 0x55018,
+ .clkr = {
+ .enable_reg = 0x55018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ispif_ahb_clk = {
+ .halt_reg = 0x50004,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ispif_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg0_clk = {
+ .halt_reg = 0x57020,
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_jpeg0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &jpeg0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg_ahb_clk = {
+ .halt_reg = 0x57024,
+ .clkr = {
+ .enable_reg = 0x57024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_jpeg_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg_axi_clk = {
+ .halt_reg = 0x57028,
+ .clkr = {
+ .enable_reg = 0x57028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_jpeg_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x52018,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x53018,
+ .clkr = {
+ .enable_reg = 0x53018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_micro_ahb_clk = {
+ .halt_reg = 0x5600c,
+ .clkr = {
+ .enable_reg = 0x5600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_micro_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x4e01c,
+ .clkr = {
+ .enable_reg = 0x4e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x4f01c,
+ .clkr = {
+ .enable_reg = 0x4f01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ahb_clk = {
+ .halt_reg = 0x5a014,
+ .clkr = {
+ .enable_reg = 0x5a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x56004,
+ .clkr = {
+ .enable_reg = 0x56004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_ahb_clk = {
+ .halt_reg = 0x58040,
+ .clkr = {
+ .enable_reg = 0x58040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cpp_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_clk = {
+ .halt_reg = 0x5803c,
+ .clkr = {
+ .enable_reg = 0x5803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cpp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cpp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe0_clk = {
+ .halt_reg = 0x58038,
+ .clkr = {
+ .enable_reg = 0x58038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_vfe0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe_ahb_clk = {
+ .halt_reg = 0x58044,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_vfe_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &camss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe_axi_clk = {
+ .halt_reg = 0x58048,
+ .clkr = {
+ .enable_reg = 0x58048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_vfe_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_gmem_clk = {
+ .halt_reg = 0x59024,
+ .clkr = {
+ .enable_reg = 0x59024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_gmem_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x08000,
+ .clkr = {
+ .enable_reg = 0x08000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x09000,
+ .clkr = {
+ .enable_reg = 0x09000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x0a000,
+ .clkr = {
+ .enable_reg = 0x0a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_ahb_clk = {
+ .halt_reg = 0x4d07c,
+ .clkr = {
+ .enable_reg = 0x4d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_axi_clk = {
+ .halt_reg = 0x4d080,
+ .clkr = {
+ .enable_reg = 0x4d080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_byte0_clk = {
+ .halt_reg = 0x4d094,
+ .clkr = {
+ .enable_reg = 0x4d094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_byte0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_byte1_clk = {
+ .halt_reg = 0x4d0a0,
+ .clkr = {
+ .enable_reg = 0x4d0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_byte1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_esc0_clk = {
+ .halt_reg = 0x4d098,
+ .clkr = {
+ .enable_reg = 0x4d098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_esc0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_esc1_clk = {
+ .halt_reg = 0x4d09c,
+ .clkr = {
+ .enable_reg = 0x4d09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_esc1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_mdp_clk = {
+ .halt_reg = 0x4D088,
+ .clkr = {
+ .enable_reg = 0x4D088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_mdp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_pclk0_clk = {
+ .halt_reg = 0x4d084,
+ .clkr = {
+ .enable_reg = 0x4d084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_pclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_pclk1_clk = {
+ .halt_reg = 0x4d0a4,
+ .clkr = {
+ .enable_reg = 0x4d0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_pclk1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_vsync_clk = {
+ .halt_reg = 0x4d090,
+ .clkr = {
+ .enable_reg = 0x4d090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_vsync_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x49000,
+ .clkr = {
+ .enable_reg = 0x49000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_cfg_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_ahb_clk = {
+ .halt_reg = 0x59028,
+ .clkr = {
+ .enable_reg = 0x59028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_gfx3d_clk = {
+ .halt_reg = 0x59020,
+ .clkr = {
+ .enable_reg = 0x59020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_gfx3d_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4400c,
+ .clkr = {
+ .enable_reg = 0x4400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x44004,
+ .clkr = {
+ .enable_reg = 0x44004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4201c,
+ .clkr = {
+ .enable_reg = 0x4201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x42018,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x4301c,
+ .clkr = {
+ .enable_reg = 0x4301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x43018,
+ .clkr = {
+ .enable_reg = 0x43018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_tcu_clk = {
+ .halt_reg = 0x12018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_tcu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tcu_clk = {
+ .halt_reg = 0x12020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tcu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tbu_clk = {
+ .halt_reg = 0x12010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_ddr_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdp_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdp_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_tbu_clk = {
+ .halt_reg = 0x12014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vfe_tbu_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vfe_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_jpeg_tbu_clk = {
+ .halt_reg = 0x12034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_jpeg_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_smmu_cfg_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_cfg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gtcu_ahb_clk = {
+ .halt_reg = 0x12044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gtcu_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpp_tbu_clk = {
+ .halt_reg = 0x12040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpp_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdp_rt_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdp_rt_tbu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x31024,
+ .clkr = {
+ .enable_reg = 0x31024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gfx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_gpu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_clk = {
+ .halt_reg = 0x31040,
+ .clkr = {
+ .enable_reg = 0x31040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &bimc_gpu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+ .halt_reg = 0x4102c,
+ .clkr = {
+ .enable_reg = 0x4102c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2a_phy_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_ahb_clk = {
+ .halt_reg = 0x3f008,
+ .clkr = {
+ .enable_reg = 0x3f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_fs_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_ic_clk = {
+ .halt_reg = 0x3f030,
+ .clkr = {
+ .enable_reg = 0x3f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_fs_ic_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &usb_fs_ic_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_system_clk = {
+ .halt_reg = 0x3f004,
+ .clkr = {
+ .enable_reg = 0x3f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_fs_system_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &usb_fs_system_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+ .halt_reg = 0x41008,
+ .clkr = {
+ .enable_reg = 0x41008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x41004,
+ .clkr = {
+ .enable_reg = 0x41004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_hs_system_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &usb_hs_system_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_ahb_clk = {
+ .halt_reg = 0x4c020,
+ .clkr = {
+ .enable_reg = 0x4c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &pcnoc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_axi_clk = {
+ .halt_reg = 0x4c024,
+ .clkr = {
+ .enable_reg = 0x4c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &system_noc_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_vcodec0_clk = {
+ .halt_reg = 0x4c01c,
+ .clkr = {
+ .enable_reg = 0x4c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_vcodec0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
+ .halt_reg = 0x4c02c,
+ .clkr = {
+ .enable_reg = 0x4c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_core0_vcodec0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_core1_vcodec0_clk = {
+ .halt_reg = 0x4c034,
+ .clkr = {
+ .enable_reg = 0x4c034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus0_core1_vcodec0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_timer_clk = {
+ .halt_reg = 0x59040,
+ .clkr = {
+ .enable_reg = 0x59040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_timer_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x4c018,
+ .pd = {
+ .name = "venus",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .pd = {
+ .name = "mdss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x5701c,
+ .pd = {
+ .name = "jpeg",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe_gdsc = {
+ .gdscr = 0x58034,
+ .pd = {
+ .name = "vfe",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x5901c,
+ .pd = {
+ .name = "oxili",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x4c028,
+ .pd = {
+ .name = "venus_core0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core1_gdsc = {
+ .gdscr = 0x4c030,
+ .pd = {
+ .name = "venus_core1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_msm8939_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_VOTE] = &gpll0_vote,
+ [BIMC_PLL] = &bimc_pll.clkr,
+ [BIMC_PLL_VOTE] = &bimc_pll_vote,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL1_VOTE] = &gpll1_vote,
+ [GPLL2] = &gpll2.clkr,
+ [GPLL2_VOTE] = &gpll2_vote,
+ [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
+ [SYSTEM_NOC_BFDCD_CLK_SRC] = &system_noc_bfdcd_clk_src.clkr,
+ [CAMSS_AHB_CLK_SRC] = &camss_ahb_clk_src.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [APSS_TCU_CLK_SRC] = &apss_tcu_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr,
+ [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE_AHB_CLK] = &gcc_camss_vfe_ahb_clk.clkr,
+ [GCC_CAMSS_VFE_AXI_CLK] = &gcc_camss_vfe_axi_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_OXILI_GMEM_CLK] = &gcc_oxili_gmem_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr,
+ [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [BIMC_DDR_CLK_SRC] = &bimc_ddr_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr,
+ [BIMC_GPU_CLK_SRC] = &bimc_gpu_clk_src.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [ULTAUDIO_AHBFABRIC_CLK_SRC] = &ultaudio_ahbfabric_clk_src.clkr,
+ [ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC] = &ultaudio_lpaif_pri_i2s_clk_src.clkr,
+ [ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC] = &ultaudio_lpaif_sec_i2s_clk_src.clkr,
+ [ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC] = &ultaudio_lpaif_aux_i2s_clk_src.clkr,
+ [ULTAUDIO_XO_CLK_SRC] = &ultaudio_xo_clk_src.clkr,
+ [CODEC_DIGCODEC_CLK_SRC] = &codec_digcodec_clk_src.clkr,
+ [GCC_ULTAUDIO_PCNOC_MPORT_CLK] = &gcc_ultaudio_pcnoc_mport_clk.clkr,
+ [GCC_ULTAUDIO_PCNOC_SWAY_CLK] = &gcc_ultaudio_pcnoc_sway_clk.clkr,
+ [GCC_ULTAUDIO_AVSYNC_XO_CLK] = &gcc_ultaudio_avsync_xo_clk.clkr,
+ [GCC_ULTAUDIO_STC_XO_CLK] = &gcc_ultaudio_stc_xo_clk.clkr,
+ [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_clk.clkr,
+ [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_lpm_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK] = &gcc_ultaudio_lpaif_pri_i2s_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK] = &gcc_ultaudio_lpaif_sec_i2s_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK] = &gcc_ultaudio_lpaif_aux_i2s_clk.clkr,
+ [GCC_CODEC_DIGCODEC_CLK] = &gcc_codec_digcodec_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_VOTE] = &gpll3_vote,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_VOTE] = &gpll4_vote,
+ [GPLL5] = &gpll5.clkr,
+ [GPLL5_VOTE] = &gpll5_vote,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_VOTE] = &gpll6_vote,
+ [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [GCC_MDSS_BYTE1_CLK] = &gcc_mdss_byte1_clk.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [GCC_MDSS_ESC1_CLK] = &gcc_mdss_esc1_clk.clkr,
+ [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [GCC_MDSS_PCLK1_CLK] = &gcc_mdss_pclk1_clk.clkr,
+ [GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr,
+ [GCC_CPP_TBU_CLK] = &gcc_cpp_tbu_clk.clkr,
+ [GCC_MDP_RT_TBU_CLK] = &gcc_mdp_rt_tbu_clk.clkr,
+ [USB_FS_SYSTEM_CLK_SRC] = &usb_fs_system_clk_src.clkr,
+ [USB_FS_IC_CLK_SRC] = &usb_fs_ic_clk_src.clkr,
+ [GCC_USB_FS_AHB_CLK] = &gcc_usb_fs_ahb_clk.clkr,
+ [GCC_USB_FS_IC_CLK] = &gcc_usb_fs_ic_clk.clkr,
+ [GCC_USB_FS_SYSTEM_CLK] = &gcc_usb_fs_system_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_CORE1_VCODEC0_CLK] = &gcc_venus0_core1_vcodec0_clk.clkr,
+ [GCC_OXILI_TIMER_CLK] = &gcc_oxili_timer_clk.clkr,
+};
+
+static struct gdsc *gcc_msm8939_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [VFE_GDSC] = &vfe_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_CORE1_GDSC] = &venus_core1_gdsc,
+};
+
+static const struct qcom_reset_map gcc_msm8939_resets[] = {
+ [GCC_BLSP1_BCR] = { 0x01000 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x02000 },
+ [GCC_BLSP1_UART1_BCR] = { 0x02038 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x03008 },
+ [GCC_BLSP1_UART2_BCR] = { 0x03028 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x04018 },
+ [GCC_BLSP1_UART3_BCR] = { 0x04038 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x05018 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x06018 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x07018 },
+ [GCC_IMEM_BCR] = { 0x0e000 },
+ [GCC_SMMU_BCR] = { 0x12000 },
+ [GCC_APSS_TCU_BCR] = { 0x12050 },
+ [GCC_SMMU_XPU_BCR] = { 0x12054 },
+ [GCC_PCNOC_TBU_BCR] = { 0x12058 },
+ [GCC_PRNG_BCR] = { 0x13000 },
+ [GCC_BOOT_ROM_BCR] = { 0x13008 },
+ [GCC_CRYPTO_BCR] = { 0x16000 },
+ [GCC_SEC_CTRL_BCR] = { 0x1a000 },
+ [GCC_AUDIO_CORE_BCR] = { 0x1c008 },
+ [GCC_ULT_AUDIO_BCR] = { 0x1c0b4 },
+ [GCC_DEHR_BCR] = { 0x1f000 },
+ [GCC_SYSTEM_NOC_BCR] = { 0x26000 },
+ [GCC_PCNOC_BCR] = { 0x27018 },
+ [GCC_TCSR_BCR] = { 0x28000 },
+ [GCC_QDSS_BCR] = { 0x29000 },
+ [GCC_DCD_BCR] = { 0x2a000 },
+ [GCC_MSG_RAM_BCR] = { 0x2b000 },
+ [GCC_MPM_BCR] = { 0x2c000 },
+ [GCC_SPMI_BCR] = { 0x2e000 },
+ [GCC_SPDM_BCR] = { 0x2f000 },
+ [GCC_MM_SPDM_BCR] = { 0x2f024 },
+ [GCC_BIMC_BCR] = { 0x31000 },
+ [GCC_RBCPR_BCR] = { 0x33000 },
+ [GCC_TLMM_BCR] = { 0x34000 },
+ [GCC_CAMSS_CSI2_BCR] = { 0x3c038 },
+ [GCC_CAMSS_CSI2PHY_BCR] = { 0x3c044 },
+ [GCC_CAMSS_CSI2RDI_BCR] = { 0x3c04c },
+ [GCC_CAMSS_CSI2PIX_BCR] = { 0x3c054 },
+ [GCC_USB_FS_BCR] = { 0x3f000 },
+ [GCC_USB_HS_BCR] = { 0x41000 },
+ [GCC_USB2A_PHY_BCR] = { 0x41028 },
+ [GCC_SDCC1_BCR] = { 0x42000 },
+ [GCC_SDCC2_BCR] = { 0x43000 },
+ [GCC_PDM_BCR] = { 0x44000 },
+ [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x47000 },
+ [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x48000 },
+ [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x48008 },
+ [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x48010 },
+ [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x48018 },
+ [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x48020 },
+ [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x48028 },
+ [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x48030 },
+ [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x48038 },
+ [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x48040 },
+ [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x48048 },
+ [GCC_MMSS_BCR] = { 0x4b000 },
+ [GCC_VENUS0_BCR] = { 0x4c014 },
+ [GCC_MDSS_BCR] = { 0x4d074 },
+ [GCC_CAMSS_PHY0_BCR] = { 0x4e018 },
+ [GCC_CAMSS_CSI0_BCR] = { 0x4e038 },
+ [GCC_CAMSS_CSI0PHY_BCR] = { 0x4e044 },
+ [GCC_CAMSS_CSI0RDI_BCR] = { 0x4e04c },
+ [GCC_CAMSS_CSI0PIX_BCR] = { 0x4e054 },
+ [GCC_CAMSS_PHY1_BCR] = { 0x4f018 },
+ [GCC_CAMSS_CSI1_BCR] = { 0x4f038 },
+ [GCC_CAMSS_CSI1PHY_BCR] = { 0x4f044 },
+ [GCC_CAMSS_CSI1RDI_BCR] = { 0x4f04c },
+ [GCC_CAMSS_CSI1PIX_BCR] = { 0x4f054 },
+ [GCC_CAMSS_ISPIF_BCR] = { 0x50000 },
+ [GCC_BLSP1_QUP4_SPI_APPS_CBCR] = { 0x0501c },
+ [GCC_CAMSS_CCI_BCR] = { 0x51014 },
+ [GCC_CAMSS_MCLK0_BCR] = { 0x52014 },
+ [GCC_CAMSS_MCLK1_BCR] = { 0x53014 },
+ [GCC_CAMSS_GP0_BCR] = { 0x54014 },
+ [GCC_CAMSS_GP1_BCR] = { 0x55014 },
+ [GCC_CAMSS_TOP_BCR] = { 0x56000 },
+ [GCC_CAMSS_MICRO_BCR] = { 0x56008 },
+ [GCC_CAMSS_JPEG_BCR] = { 0x57018 },
+ [GCC_CAMSS_VFE_BCR] = { 0x58030 },
+ [GCC_CAMSS_CSI_VFE0_BCR] = { 0x5804c },
+ [GCC_OXILI_BCR] = { 0x59018 },
+ [GCC_GMEM_BCR] = { 0x5902c },
+ [GCC_CAMSS_AHB_BCR] = { 0x5a018 },
+ [GCC_CAMSS_MCLK2_BCR] = { 0x5c014 },
+ [GCC_MDP_TBU_BCR] = { 0x62000 },
+ [GCC_GFX_TBU_BCR] = { 0x63000 },
+ [GCC_GFX_TCU_BCR] = { 0x64000 },
+ [GCC_MSS_TBU_AXI_BCR] = { 0x65000 },
+ [GCC_MSS_TBU_GSS_AXI_BCR] = { 0x66000 },
+ [GCC_MSS_TBU_Q6_AXI_BCR] = { 0x67000 },
+ [GCC_GTCU_AHB_BCR] = { 0x68000 },
+ [GCC_SMMU_CFG_BCR] = { 0x69000 },
+ [GCC_VFE_TBU_BCR] = { 0x6a000 },
+ [GCC_VENUS_TBU_BCR] = { 0x6b000 },
+ [GCC_JPEG_TBU_BCR] = { 0x6c000 },
+ [GCC_PRONTO_TBU_BCR] = { 0x6d000 },
+ [GCC_CPP_TBU_BCR] = { 0x6e000 },
+ [GCC_MDP_RT_TBU_BCR] = { 0x6f000 },
+ [GCC_SMMU_CATS_BCR] = { 0x7c000 },
+};
+
+static const struct regmap_config gcc_msm8939_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8939_desc = {
+ .config = &gcc_msm8939_regmap_config,
+ .clks = gcc_msm8939_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8939_clocks),
+ .resets = gcc_msm8939_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8939_resets),
+ .gdscs = gcc_msm8939_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8939_gdscs),
+};
+
+static const struct of_device_id gcc_msm8939_match_table[] = {
+ { .compatible = "qcom,gcc-msm8939" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8939_match_table);
+
+static int gcc_msm8939_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_msm8939_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_pll_configure_sr_hpm_lp(&gpll3, regmap, &gpll3_config, true);
+ clk_pll_configure_sr_hpm_lp(&gpll4, regmap, &gpll4_config, true);
+
+ return qcom_cc_really_probe(pdev, &gcc_msm8939_desc, regmap);
+}
+
+static struct platform_driver gcc_msm8939_driver = {
+ .probe = gcc_msm8939_probe,
+ .driver = {
+ .name = "gcc-msm8939",
+ .of_match_table = gcc_msm8939_match_table,
+ },
+};
+
+static int __init gcc_msm8939_init(void)
+{
+ return platform_driver_register(&gcc_msm8939_driver);
+}
+core_initcall(gcc_msm8939_init);
+
+static void __exit gcc_msm8939_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8939_driver);
+}
+module_exit(gcc_msm8939_exit);
+
+MODULE_DESCRIPTION("Qualcomm GCC MSM8939 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index df1d7056436c..9d7016bcd680 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -1110,6 +1110,27 @@ static struct clk_rcg2 ufs_axi_clk_src = {
},
};
+static const struct freq_tbl ftbl_ufs_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_unipro_core_clk_src = {
+ .cmd_rcgr = 0x76028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_ufs_unipro_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
@@ -2549,6 +2570,11 @@ static struct clk_branch gcc_ufs_unipro_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "ufs_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2904,6 +2930,7 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr,
[TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
[UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
+ [UFS_UNIPRO_CORE_CLK_SRC] = &ufs_unipro_core_clk_src.clkr,
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
[USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index 6a51b5b5fc19..ca4383e3a02a 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -390,6 +390,7 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(51200000, P_GPLL6_OUT_MAIN, 7.5, 0, 0),
F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
@@ -405,8 +406,8 @@ static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.name = "gcc_qupv3_wrap0_s0_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -414,15 +415,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
.cmd_rcgr = 0x17034,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.name = "gcc_qupv3_wrap0_s1_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -430,15 +431,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
.cmd_rcgr = 0x17164,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.name = "gcc_qupv3_wrap0_s2_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -446,15 +447,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
.cmd_rcgr = 0x17294,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.name = "gcc_qupv3_wrap0_s3_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -462,15 +463,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
.cmd_rcgr = 0x173c4,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.name = "gcc_qupv3_wrap0_s4_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -478,15 +479,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
.cmd_rcgr = 0x174f4,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.name = "gcc_qupv3_wrap0_s5_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -494,15 +495,15 @@ static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
.cmd_rcgr = 0x17624,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.name = "gcc_qupv3_wrap1_s0_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -510,15 +511,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
.cmd_rcgr = 0x18018,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.name = "gcc_qupv3_wrap1_s1_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -526,15 +527,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
.cmd_rcgr = 0x18148,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.name = "gcc_qupv3_wrap1_s2_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -542,15 +543,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
.cmd_rcgr = 0x18278,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.name = "gcc_qupv3_wrap1_s3_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -558,15 +559,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
.cmd_rcgr = 0x183a8,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.name = "gcc_qupv3_wrap1_s4_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -574,15 +575,15 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
.cmd_rcgr = 0x184d8,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
};
static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.name = "gcc_qupv3_wrap1_s5_clk_src",
- .parent_data = gcc_parent_data_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
};
@@ -590,7 +591,7 @@ static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
.cmd_rcgr = 0x18608,
.mnd_width = 16,
.hid_width = 5,
- .parent_map = gcc_parent_map_0,
+ .parent_map = gcc_parent_map_1,
.freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
.clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
};
@@ -816,6 +817,26 @@ static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_sec_ctrl_clk_src[] = {
+ F(4800000, P_BI_TCXO, 4, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sec_ctrl_clk_src = {
+ .cmd_rcgr = 0x3d030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_sec_ctrl_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sec_ctrl_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
.halt_reg = 0x82024,
.halt_check = BRANCH_HALT_DELAY,
@@ -2406,6 +2427,7 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_MSS_NAV_AXI_CLK] = &gcc_mss_nav_axi_clk.clkr,
[GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr,
[GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr,
+ [GCC_SEC_CTRL_CLK_SRC] = &gcc_sec_ctrl_clk_src.clkr,
};
static const struct qcom_reset_map gcc_sc7180_resets[] = {
diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c
index 732bc7c937e6..72524cf11048 100644
--- a/drivers/clk/qcom/gcc-sm8150.c
+++ b/drivers/clk/qcom/gcc-sm8150.c
@@ -1616,6 +1616,36 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = {
},
};
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gpu_gpll0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_gpu_iref_clk = {
.halt_reg = 0x8c010,
.halt_check = BRANCH_HALT,
@@ -1698,6 +1728,36 @@ static struct clk_branch gcc_npu_cfg_ahb_clk = {
},
};
+static struct clk_branch gcc_npu_gpll0_clk_src = {
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_npu_gpll0_div_clk_src = {
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_npu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_npu_gpll0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_npu_trig_clk = {
.halt_reg = 0x4d00c,
.halt_check = BRANCH_VOTED,
@@ -2812,6 +2872,45 @@ static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = {
},
};
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x7501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x750ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_card_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ufs_card_unipro_core_clk = {
.halt_reg = 0x75058,
.halt_check = BRANCH_HALT,
@@ -2992,6 +3091,45 @@ static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
},
};
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x770ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* external clocks so add BRANCH_HALT_SKIP */
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x77018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
.halt_reg = 0x77058,
.halt_check = BRANCH_HALT,
@@ -3374,12 +3512,16 @@ static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
[GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
[GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
[GCC_NPU_AT_CLK] = &gcc_npu_at_clk.clkr,
[GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
[GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr,
+ [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
+ [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
[GCC_NPU_TRIG_CLK] = &gcc_npu_trig_clk.clkr,
[GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
[GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr,
@@ -3484,6 +3626,9 @@ static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr,
[GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] =
&gcc_ufs_card_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr,
+ [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr,
+ [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr,
[GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr,
[GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] =
&gcc_ufs_card_unipro_core_clk_src.clkr,
@@ -3501,6 +3646,9 @@ static struct clk_regmap *gcc_sm8150_clocks[] = {
[GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
[GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
[GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
[GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
[GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
&gcc_ufs_phy_unipro_core_clk_src.clkr,
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index a250f59708d8..04944f11659b 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -11,6 +11,7 @@
#include <linux/ktime.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include "gdsc.h"
@@ -112,6 +113,12 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
int ret;
u32 val = (status == GDSC_ON) ? 0 : SW_COLLAPSE_MASK;
+ if (status == GDSC_ON && sc->rsupply) {
+ ret = regulator_enable(sc->rsupply);
+ if (ret < 0)
+ return ret;
+ }
+
ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
if (ret)
return ret;
@@ -143,6 +150,13 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
ret = gdsc_poll_status(sc, status);
WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n");
+
+ if (!ret && status == GDSC_OFF && sc->rsupply) {
+ ret = regulator_disable(sc->rsupply);
+ if (ret < 0)
+ return ret;
+ }
+
return ret;
}
@@ -371,6 +385,15 @@ int gdsc_register(struct gdsc_desc *desc,
if (!data->domains)
return -ENOMEM;
+ for (i = 0; i < num; i++) {
+ if (!scs[i] || !scs[i]->supply)
+ continue;
+
+ scs[i]->rsupply = devm_regulator_get(dev, scs[i]->supply);
+ if (IS_ERR(scs[i]->rsupply))
+ return PTR_ERR(scs[i]->rsupply);
+ }
+
data->num_domains = num;
for (i = 0; i < num; i++) {
if (!scs[i])
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 64cdc8cf0d4d..c36fc26dcdff 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -10,6 +10,7 @@
#include <linux/pm_domain.h>
struct regmap;
+struct regulator;
struct reset_controller_dev;
/**
@@ -52,6 +53,9 @@ struct gdsc {
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
+
+ const char *supply;
+ struct regulator *rsupply;
};
struct gdsc_desc {
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 6c7592ddf8bb..3b3aac07fb2d 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -3064,7 +3064,9 @@ static struct gdsc gpu_gx_gdsc = {
.name = "gpu_gx",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &gpu_gdsc.pd,
.flags = CLAMP_IO,
+ .supply = "vdd-gfx",
};
static struct clk_regmap *mmcc_msm8996_clocks[] = {
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index ac2dd92ce2ef..9eb79bf90643 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -8,6 +8,7 @@ config CLK_RENESAS
select CLK_R7S9210 if ARCH_R7S9210
select CLK_R8A73A4 if ARCH_R8A73A4
select CLK_R8A7740 if ARCH_R8A7740
+ select CLK_R8A7742 if ARCH_R8A7742
select CLK_R8A7743 if ARCH_R8A7743 || ARCH_R8A7744
select CLK_R8A7745 if ARCH_R8A7745
select CLK_R8A77470 if ARCH_R8A77470
@@ -55,6 +56,10 @@ config CLK_R8A7740
select CLK_RENESAS_CPG_MSTP
select CLK_RENESAS_DIV6
+config CLK_R8A7742
+ bool "RZ/G1H clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN2_CPG
+
config CLK_R8A7743
bool "RZ/G1M clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
@@ -90,12 +95,10 @@ config CLK_R8A7779
config CLK_R8A7790
bool "R-Car H2 clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
- select CLK_RENESAS_DIV6
config CLK_R8A7791
bool "R-Car M2-W/N clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
- select CLK_RENESAS_DIV6
config CLK_R8A7792
bool "R-Car V2H clock support" if COMPILE_TEST
@@ -104,7 +107,6 @@ config CLK_R8A7792
config CLK_R8A7794
bool "R-Car E2 clock support" if COMPILE_TEST
select CLK_RCAR_GEN2_CPG
- select CLK_RENESAS_DIV6
config CLK_R8A7795
bool "R-Car H3 clock support" if COMPILE_TEST
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 4a722bc5aac7..a4066f9b34ef 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_CLK_RZA1) += clk-rz.o
obj-$(CONFIG_CLK_R7S9210) += r7s9210-cpg-mssr.o
obj-$(CONFIG_CLK_R8A73A4) += clk-r8a73a4.o
obj-$(CONFIG_CLK_R8A7740) += clk-r8a7740.o
+obj-$(CONFIG_CLK_R8A7742) += r8a7742-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7743) += r8a7743-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7745) += r8a7745-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77470) += r8a77470-cpg-mssr.o
diff --git a/drivers/clk/renesas/r8a7742-cpg-mssr.c b/drivers/clk/renesas/r8a7742-cpg-mssr.c
new file mode 100644
index 000000000000..e919828668a4
--- /dev/null
+++ b/drivers/clk/renesas/r8a7742-cpg-mssr.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a7742 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a7742-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A7742_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_USB_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a7742_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("usb_extal", CLK_USB_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("z", R8A7742_CLK_Z, CLK_TYPE_GEN2_Z, CLK_PLL0),
+ DEF_BASE("lb", R8A7742_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
+ DEF_BASE("sdh", R8A7742_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
+ DEF_BASE("sd0", R8A7742_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
+ DEF_BASE("sd1", R8A7742_CLK_SD1, CLK_TYPE_GEN2_SD1, CLK_PLL1),
+ DEF_BASE("qspi", R8A7742_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
+ DEF_BASE("rcan", R8A7742_CLK_RCAN, CLK_TYPE_GEN2_RCAN, CLK_USB_EXTAL),
+
+ DEF_FIXED("z2", R8A7742_CLK_Z2, CLK_PLL1, 2, 1),
+ DEF_FIXED("zg", R8A7742_CLK_ZG, CLK_PLL1, 3, 1),
+ DEF_FIXED("zx", R8A7742_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("zs", R8A7742_CLK_ZS, CLK_PLL1, 6, 1),
+ DEF_FIXED("hp", R8A7742_CLK_HP, CLK_PLL1, 12, 1),
+ DEF_FIXED("b", R8A7742_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("p", R8A7742_CLK_P, CLK_PLL1, 24, 1),
+ DEF_FIXED("cl", R8A7742_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("m2", R8A7742_CLK_M2, CLK_PLL1, 8, 1),
+ DEF_FIXED("zb3", R8A7742_CLK_ZB3, CLK_PLL3, 4, 1),
+ DEF_FIXED("zb3d2", R8A7742_CLK_ZB3D2, CLK_PLL3, 8, 1),
+ DEF_FIXED("ddr", R8A7742_CLK_DDR, CLK_PLL3, 8, 1),
+ DEF_FIXED("mp", R8A7742_CLK_MP, CLK_PLL1_DIV2, 15, 1),
+ DEF_FIXED("cp", R8A7742_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("r", R8A7742_CLK_R, CLK_PLL1, 49152, 1),
+ DEF_FIXED("osc", R8A7742_CLK_OSC, CLK_PLL1, 12288, 1),
+
+ DEF_DIV6P1("sd2", R8A7742_CLK_SD2, CLK_PLL1_DIV2, 0x078),
+ DEF_DIV6P1("sd3", R8A7742_CLK_SD3, CLK_PLL1_DIV2, 0x26c),
+ DEF_DIV6P1("mmc0", R8A7742_CLK_MMC0, CLK_PLL1_DIV2, 0x240),
+ DEF_DIV6P1("mmc1", R8A7742_CLK_MMC1, CLK_PLL1_DIV2, 0x244),
+};
+
+static const struct mssr_mod_clk r8a7742_mod_clks[] __initconst = {
+ DEF_MOD("msiof0", 0, R8A7742_CLK_MP),
+ DEF_MOD("vcp1", 100, R8A7742_CLK_ZS),
+ DEF_MOD("vcp0", 101, R8A7742_CLK_ZS),
+ DEF_MOD("vpc1", 102, R8A7742_CLK_ZS),
+ DEF_MOD("vpc0", 103, R8A7742_CLK_ZS),
+ DEF_MOD("tmu1", 111, R8A7742_CLK_P),
+ DEF_MOD("3dg", 112, R8A7742_CLK_ZG),
+ DEF_MOD("2d-dmac", 115, R8A7742_CLK_ZS),
+ DEF_MOD("fdp1-2", 117, R8A7742_CLK_ZS),
+ DEF_MOD("fdp1-1", 118, R8A7742_CLK_ZS),
+ DEF_MOD("fdp1-0", 119, R8A7742_CLK_ZS),
+ DEF_MOD("tmu3", 121, R8A7742_CLK_P),
+ DEF_MOD("tmu2", 122, R8A7742_CLK_P),
+ DEF_MOD("cmt0", 124, R8A7742_CLK_R),
+ DEF_MOD("tmu0", 125, R8A7742_CLK_CP),
+ DEF_MOD("vsp1du1", 127, R8A7742_CLK_ZS),
+ DEF_MOD("vsp1du0", 128, R8A7742_CLK_ZS),
+ DEF_MOD("vsp1-sy", 131, R8A7742_CLK_ZS),
+ DEF_MOD("scifa2", 202, R8A7742_CLK_MP),
+ DEF_MOD("scifa1", 203, R8A7742_CLK_MP),
+ DEF_MOD("scifa0", 204, R8A7742_CLK_MP),
+ DEF_MOD("msiof2", 205, R8A7742_CLK_MP),
+ DEF_MOD("scifb0", 206, R8A7742_CLK_MP),
+ DEF_MOD("scifb1", 207, R8A7742_CLK_MP),
+ DEF_MOD("msiof1", 208, R8A7742_CLK_MP),
+ DEF_MOD("msiof3", 215, R8A7742_CLK_MP),
+ DEF_MOD("scifb2", 216, R8A7742_CLK_MP),
+ DEF_MOD("sys-dmac1", 218, R8A7742_CLK_ZS),
+ DEF_MOD("sys-dmac0", 219, R8A7742_CLK_ZS),
+ DEF_MOD("iic2", 300, R8A7742_CLK_HP),
+ DEF_MOD("tpu0", 304, R8A7742_CLK_CP),
+ DEF_MOD("mmcif1", 305, R8A7742_CLK_MMC1),
+ DEF_MOD("scif2", 310, R8A7742_CLK_P),
+ DEF_MOD("sdhi3", 311, R8A7742_CLK_SD3),
+ DEF_MOD("sdhi2", 312, R8A7742_CLK_SD2),
+ DEF_MOD("sdhi1", 313, R8A7742_CLK_SD1),
+ DEF_MOD("sdhi0", 314, R8A7742_CLK_SD0),
+ DEF_MOD("mmcif0", 315, R8A7742_CLK_MMC0),
+ DEF_MOD("iic0", 318, R8A7742_CLK_HP),
+ DEF_MOD("pciec", 319, R8A7742_CLK_MP),
+ DEF_MOD("iic1", 323, R8A7742_CLK_HP),
+ DEF_MOD("usb3.0", 328, R8A7742_CLK_MP),
+ DEF_MOD("cmt1", 329, R8A7742_CLK_R),
+ DEF_MOD("usbhs-dmac0", 330, R8A7742_CLK_HP),
+ DEF_MOD("usbhs-dmac1", 331, R8A7742_CLK_HP),
+ DEF_MOD("rwdt", 402, R8A7742_CLK_R),
+ DEF_MOD("irqc", 407, R8A7742_CLK_CP),
+ DEF_MOD("intc-sys", 408, R8A7742_CLK_ZS),
+ DEF_MOD("audio-dmac1", 501, R8A7742_CLK_HP),
+ DEF_MOD("audio-dmac0", 502, R8A7742_CLK_HP),
+ DEF_MOD("thermal", 522, CLK_EXTAL),
+ DEF_MOD("pwm", 523, R8A7742_CLK_P),
+ DEF_MOD("usb-ehci", 703, R8A7742_CLK_MP),
+ DEF_MOD("usbhs", 704, R8A7742_CLK_HP),
+ DEF_MOD("hscif1", 716, R8A7742_CLK_ZS),
+ DEF_MOD("hscif0", 717, R8A7742_CLK_ZS),
+ DEF_MOD("scif1", 720, R8A7742_CLK_P),
+ DEF_MOD("scif0", 721, R8A7742_CLK_P),
+ DEF_MOD("du2", 722, R8A7742_CLK_ZX),
+ DEF_MOD("du1", 723, R8A7742_CLK_ZX),
+ DEF_MOD("du0", 724, R8A7742_CLK_ZX),
+ DEF_MOD("lvds1", 725, R8A7742_CLK_ZX),
+ DEF_MOD("lvds0", 726, R8A7742_CLK_ZX),
+ DEF_MOD("r-gp2d", 807, R8A7742_CLK_ZX),
+ DEF_MOD("vin3", 808, R8A7742_CLK_ZG),
+ DEF_MOD("vin2", 809, R8A7742_CLK_ZG),
+ DEF_MOD("vin1", 810, R8A7742_CLK_ZG),
+ DEF_MOD("vin0", 811, R8A7742_CLK_ZG),
+ DEF_MOD("etheravb", 812, R8A7742_CLK_HP),
+ DEF_MOD("ether", 813, R8A7742_CLK_P),
+ DEF_MOD("sata1", 814, R8A7742_CLK_ZS),
+ DEF_MOD("sata0", 815, R8A7742_CLK_ZS),
+ DEF_MOD("imr-x2-1", 820, R8A7742_CLK_ZG),
+ DEF_MOD("imr-x2-0", 821, R8A7742_CLK_HP),
+ DEF_MOD("imr-lsx2-1", 822, R8A7742_CLK_P),
+ DEF_MOD("imr-lsx2-0", 823, R8A7742_CLK_ZS),
+ DEF_MOD("gpio5", 907, R8A7742_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A7742_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A7742_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A7742_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A7742_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A7742_CLK_CP),
+ DEF_MOD("can1", 915, R8A7742_CLK_P),
+ DEF_MOD("can0", 916, R8A7742_CLK_P),
+ DEF_MOD("qspi_mod", 917, R8A7742_CLK_QSPI),
+ DEF_MOD("iicdvfs", 926, R8A7742_CLK_CP),
+ DEF_MOD("i2c3", 928, R8A7742_CLK_HP),
+ DEF_MOD("i2c2", 929, R8A7742_CLK_HP),
+ DEF_MOD("i2c1", 930, R8A7742_CLK_HP),
+ DEF_MOD("i2c0", 931, R8A7742_CLK_HP),
+ DEF_MOD("ssi-all", 1005, R8A7742_CLK_P),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A7742_CLK_P),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a7742_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(402), /* RWDT */
+ MOD_CLK_ID(408), /* INTC-SYS (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *1
+ *---------------------------------------------------
+ * 0 0 0 15 x172/2 x208/2 x106
+ * 0 0 1 15 x172/2 x208/2 x88
+ * 0 1 0 20 x130/2 x156/2 x80
+ * 0 1 1 20 x130/2 x156/2 x66
+ * 1 0 0 26 / 2 x200/2 x240/2 x122
+ * 1 0 1 26 / 2 x200/2 x240/2 x102
+ * 1 1 0 30 / 2 x172/2 x208/2 x106
+ * 1 1 1 30 / 2 x172/2 x208/2 x88
+ *
+ * *1 : Table 7.5a indicates VCO output (PLLx = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+
+static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = {
+ /* EXTAL div PLL1 mult PLL3 mult */
+ { 1, 208, 106, },
+ { 1, 208, 88, },
+ { 1, 156, 80, },
+ { 1, 156, 66, },
+ { 2, 240, 122, },
+ { 2, 240, 102, },
+ { 2, 208, 106, },
+ { 2, 208, 88, },
+};
+
+static int __init r8a7742_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen2_cpg_init(cpg_pll_config, 2, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a7742_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a7742_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a7742_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a7742_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a7742_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a7742_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a7742_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a7742_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen2_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 1907ee195a08..d900f6bf53d0 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R9A09G032 clock driver
+ * R9A06G032 clock driver
*
* Copyright (C) 2018 Renesas Electronics Europe Limited
*
@@ -338,8 +338,8 @@ clk_rdesc_get(struct r9a06g032_priv *clocks,
}
/*
- * This implements the R9A09G032 clock gate 'driver'. We cannot use the system's
- * clock gate framework as the gates on the R9A09G032 have a special enabling
+ * This implements the R9A06G032 clock gate 'driver'. We cannot use the system's
+ * clock gate framework as the gates on the R9A06G032 have a special enabling
* sequence, therefore we use this little proxy.
*/
struct r9a06g032_clk_gate {
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index a2663fbbd7a5..dcb6e2706d37 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -673,6 +673,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r7s9210_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A7742
+ {
+ .compatible = "renesas,r8a7742-cpg-mssr",
+ .data = &r8a7742_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A7743
{
.compatible = "renesas,r8a7743-cpg-mssr",
@@ -812,7 +818,8 @@ static int cpg_mssr_suspend_noirq(struct device *dev)
/* Save module registers with bits under our control */
for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
if (priv->smstpcr_saved[reg].mask)
- priv->smstpcr_saved[reg].val =
+ priv->smstpcr_saved[reg].val = priv->stbyctrl ?
+ readb(priv->base + STBCR(reg)) :
readl(priv->base + SMSTPCR(reg));
}
@@ -872,8 +879,9 @@ static int cpg_mssr_resume_noirq(struct device *dev)
}
if (!i)
- dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n",
- priv->base + SMSTPCR(reg), oldval & mask);
+ dev_warn(dev, "Failed to enable %s%u[0x%x]\n",
+ priv->stbyctrl ? "STB" : "SMSTP", reg,
+ oldval & mask);
}
return 0;
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 3b852ba0ecec..55a18ef0efaf 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -155,6 +155,7 @@ struct cpg_mssr_info {
};
extern const struct cpg_mssr_info r7s9210_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a7742_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7745_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77470_cpg_mssr_info;
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index c9e5a1fb6653..fea33399a632 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -540,7 +540,7 @@ static const struct samsung_div_clock exynos5800_div_clks[] __initconst = {
static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
- GATE_BUS_TOP, 24, 0, 0),
+ GATE_BUS_TOP, 24, CLK_IS_CRITICAL, 0),
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
};
@@ -943,25 +943,25 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
- GATE_BUS_TOP, 5, 0, 0),
+ GATE_BUS_TOP, 5, CLK_IS_CRITICAL, 0),
GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
- GATE_BUS_TOP, 8, 0, 0),
+ GATE_BUS_TOP, 8, CLK_IS_CRITICAL, 0),
GATE(CLK_PCLK66_GPIO, "pclk66_gpio", "mout_user_pclk66_gpio",
GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen",
GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk266_isp", "mout_user_aclk266_isp",
- GATE_BUS_TOP, 13, 0, 0),
+ GATE_BUS_TOP, 13, CLK_IS_CRITICAL, 0),
GATE(0, "aclk166", "mout_user_aclk166",
GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
- GATE_BUS_TOP, 16, 0, 0),
+ GATE_BUS_TOP, 16, CLK_IS_CRITICAL, 0),
GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
GATE_BUS_TOP, 17, CLK_IS_CRITICAL, 0),
GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
@@ -1161,9 +1161,11 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE_IP_GSCL1, 3, 0, 0),
GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "dout_gscl_blk_333",
GATE_IP_GSCL1, 4, 0, 0),
- GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, 0, 0),
- GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, 0, 0),
- GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333",
+ GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12,
+ CLK_IS_CRITICAL, 0),
+ GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13,
+ CLK_IS_CRITICAL, 0),
+ GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3", "dout_gscl_blk_333",
GATE_IP_GSCL1, 16, 0, 0),
GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
GATE_IP_GSCL1, 17, 0, 0),
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 4b1aa9382ad2..6f29ecd0442e 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -1706,7 +1706,8 @@ static const struct samsung_gate_clock peric_gate_clks[] __initconst = {
GATE(CLK_SCLK_PCM1, "sclk_pcm1", "sclk_pcm1_peric",
ENABLE_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_I2S1, "sclk_i2s1", "sclk_i2s1_peric",
- ENABLE_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0),
+ ENABLE_SCLK_PERIC, 6,
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
GATE(CLK_SCLK_SPI2, "sclk_spi2", "sclk_spi2_peric", ENABLE_SCLK_PERIC,
5, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_SPI1, "sclk_spi1", "sclk_spi1_peric", ENABLE_SCLK_PERIC,
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index 5f30fe72cd51..c7aba1e1af70 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -387,7 +387,7 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
ARRAY_SIZE(s3c2450_gates));
samsung_clk_register_alias(ctx, s3c2450_aliases,
ARRAY_SIZE(s3c2450_aliases));
- /* fall through - as s3c2450 extends the s3c2416 clocks */
+ fallthrough; /* as s3c2450 extends the s3c2416 clocks */
case S3C2416:
samsung_clk_register_div(ctx, s3c2416_dividers,
ARRAY_SIZE(s3c2416_dividers));
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
index ce5aa7802eb8..bf736f8d201a 100644
--- a/drivers/clk/socfpga/Makefile
+++ b/drivers/clk/socfpga/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_ARCH_SOCFPGA) += clk.o clk-gate.o clk-pll.o clk-periph.o
obj-$(CONFIG_ARCH_SOCFPGA) += clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o
obj-$(CONFIG_ARCH_STRATIX10) += clk-s10.o
obj-$(CONFIG_ARCH_STRATIX10) += clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o
+obj-$(CONFIG_ARCH_AGILEX) += clk-agilex.o
+obj-$(CONFIG_ARCH_AGILEX) += clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o
diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
new file mode 100644
index 000000000000..699527f7e764
--- /dev/null
+++ b/drivers/clk/socfpga/clk-agilex.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019, Intel Corporation
+ */
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/agilex-clock.h>
+
+#include "stratix10-clk.h"
+
+static const struct clk_parent_data pll_mux[] = {
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data cntr_mux[] = {
+ { .fw_name = "main_pll",
+ .name = "main_pll", },
+ { .fw_name = "periph_pll",
+ .name = "periph_pll", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data boot_mux[] = {
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+};
+
+static const struct clk_parent_data mpu_free_mux[] = {
+ { .fw_name = "main_pll_c0",
+ .name = "main_pll_c0", },
+ { .fw_name = "peri_pll_c0",
+ .name = "peri_pll_c0", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data noc_free_mux[] = {
+ { .fw_name = "main_pll_c1",
+ .name = "main_pll_c1", },
+ { .fw_name = "peri_pll_c1",
+ .name = "peri_pll_c1", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data emaca_free_mux[] = {
+ { .fw_name = "main_pll_c2",
+ .name = "main_pll_c2", },
+ { .fw_name = "peri_pll_c2",
+ .name = "peri_pll_c2", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data emacb_free_mux[] = {
+ { .fw_name = "main_pll_c3",
+ .name = "main_pll_c3", },
+ { .fw_name = "peri_pll_c3",
+ .name = "peri_pll_c3", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data emac_ptp_free_mux[] = {
+ { .fw_name = "main_pll_c3",
+ .name = "main_pll_c3", },
+ { .fw_name = "peri_pll_c3",
+ .name = "peri_pll_c3", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data gpio_db_free_mux[] = {
+ { .fw_name = "main_pll_c3",
+ .name = "main_pll_c3", },
+ { .fw_name = "peri_pll_c3",
+ .name = "peri_pll_c3", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data psi_ref_free_mux[] = {
+ { .fw_name = "main_pll_c3",
+ .name = "main_pll_c3", },
+ { .fw_name = "peri_pll_c3",
+ .name = "peri_pll_c3", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data sdmmc_free_mux[] = {
+ { .fw_name = "main_pll_c3",
+ .name = "main_pll_c3", },
+ { .fw_name = "peri_pll_c3",
+ .name = "peri_pll_c3", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data s2f_usr0_free_mux[] = {
+ { .fw_name = "main_pll_c2",
+ .name = "main_pll_c2", },
+ { .fw_name = "peri_pll_c2",
+ .name = "peri_pll_c2", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data s2f_usr1_free_mux[] = {
+ { .fw_name = "main_pll_c2",
+ .name = "main_pll_c2", },
+ { .fw_name = "peri_pll_c2",
+ .name = "peri_pll_c2", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data mpu_mux[] = {
+ { .fw_name = "mpu_free_clk",
+ .name = "mpu_free_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data s2f_usr0_mux[] = {
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data emac_mux[] = {
+ { .fw_name = "emaca_free_clk",
+ .name = "emaca_free_clk", },
+ { .fw_name = "emacb_free_clk",
+ .name = "emacb_free_clk", },
+};
+
+static const struct clk_parent_data noc_mux[] = {
+ { .fw_name = "noc_free_clk",
+ .name = "noc_free_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+/* clocks in AO (always on) controller */
+static const struct stratix10_pll_clock agilex_pll_clks[] = {
+ { AGILEX_BOOT_CLK, "boot_clk", boot_mux, ARRAY_SIZE(boot_mux), 0,
+ 0x0},
+ { AGILEX_MAIN_PLL_CLK, "main_pll", pll_mux, ARRAY_SIZE(pll_mux),
+ 0, 0x48},
+ { AGILEX_PERIPH_PLL_CLK, "periph_pll", pll_mux, ARRAY_SIZE(pll_mux),
+ 0, 0x9c},
+};
+
+static const struct stratix10_perip_c_clock agilex_main_perip_c_clks[] = {
+ { AGILEX_MAIN_PLL_C0_CLK, "main_pll_c0", "main_pll", NULL, 1, 0, 0x58},
+ { AGILEX_MAIN_PLL_C1_CLK, "main_pll_c1", "main_pll", NULL, 1, 0, 0x5C},
+ { AGILEX_MAIN_PLL_C2_CLK, "main_pll_c2", "main_pll", NULL, 1, 0, 0x64},
+ { AGILEX_MAIN_PLL_C3_CLK, "main_pll_c3", "main_pll", NULL, 1, 0, 0x68},
+ { AGILEX_PERIPH_PLL_C0_CLK, "peri_pll_c0", "periph_pll", NULL, 1, 0, 0xAC},
+ { AGILEX_PERIPH_PLL_C1_CLK, "peri_pll_c1", "periph_pll", NULL, 1, 0, 0xB0},
+ { AGILEX_PERIPH_PLL_C2_CLK, "peri_pll_c2", "periph_pll", NULL, 1, 0, 0xB8},
+ { AGILEX_PERIPH_PLL_C3_CLK, "peri_pll_c3", "periph_pll", NULL, 1, 0, 0xBC},
+};
+
+static const struct stratix10_perip_cnt_clock agilex_main_perip_cnt_clks[] = {
+ { AGILEX_MPU_FREE_CLK, "mpu_free_clk", NULL, mpu_free_mux, ARRAY_SIZE(mpu_free_mux),
+ 0, 0x3C, 0, 0, 0},
+ { AGILEX_NOC_FREE_CLK, "noc_free_clk", NULL, noc_free_mux, ARRAY_SIZE(noc_free_mux),
+ 0, 0x40, 0, 0, 1},
+ { AGILEX_L4_SYS_FREE_CLK, "l4_sys_free_clk", "noc_free_clk", NULL, 1, 0,
+ 0, 4, 0, 0},
+ { AGILEX_NOC_CLK, "noc_clk", NULL, noc_mux, ARRAY_SIZE(noc_mux),
+ 0, 0, 0, 0x30, 1},
+ { AGILEX_EMAC_A_FREE_CLK, "emaca_free_clk", NULL, emaca_free_mux, ARRAY_SIZE(emaca_free_mux),
+ 0, 0xD4, 0, 0x88, 0},
+ { AGILEX_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux),
+ 0, 0xD8, 0, 0x88, 1},
+ { AGILEX_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux,
+ ARRAY_SIZE(emac_ptp_free_mux), 0, 0xDC, 0, 0x88, 2},
+ { AGILEX_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux,
+ ARRAY_SIZE(gpio_db_free_mux), 0, 0xE0, 0, 0x88, 3},
+ { AGILEX_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux,
+ ARRAY_SIZE(sdmmc_free_mux), 0, 0xE4, 0, 0x88, 4},
+ { AGILEX_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", NULL, s2f_usr0_free_mux,
+ ARRAY_SIZE(s2f_usr0_free_mux), 0, 0xE8, 0, 0, 0},
+ { AGILEX_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", NULL, s2f_usr1_free_mux,
+ ARRAY_SIZE(s2f_usr1_free_mux), 0, 0xEC, 0, 0x88, 5},
+ { AGILEX_PSI_REF_FREE_CLK, "psi_ref_free_clk", NULL, psi_ref_free_mux,
+ ARRAY_SIZE(psi_ref_free_mux), 0, 0xF0, 0, 0x88, 6},
+};
+
+static const struct stratix10_gate_clock agilex_gate_clks[] = {
+ { AGILEX_MPU_CLK, "mpu_clk", NULL, mpu_mux, ARRAY_SIZE(mpu_mux), 0, 0x24,
+ 0, 0, 0, 0, 0x30, 0, 0},
+ { AGILEX_MPU_PERIPH_CLK, "mpu_periph_clk", "mpu_clk", NULL, 1, 0, 0x24,
+ 0, 0, 0, 0, 0, 0, 4},
+ { AGILEX_MPU_L2RAM_CLK, "mpu_l2ram_clk", "mpu_clk", NULL, 1, 0, 0x24,
+ 0, 0, 0, 0, 0, 0, 2},
+ { AGILEX_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x24,
+ 1, 0x44, 0, 2, 0, 0, 0},
+ { AGILEX_L4_MP_CLK, "l4_mp_clk", "noc_clk", NULL, 1, 0, 0x24,
+ 2, 0x44, 8, 2, 0, 0, 0},
+ /*
+ * The l4_sp_clk feeds a 100 MHz clock to various peripherals, one of them
+ * being the SP timers, thus cannot get gated.
+ */
+ { AGILEX_L4_SP_CLK, "l4_sp_clk", "noc_clk", NULL, 1, CLK_IS_CRITICAL, 0x24,
+ 3, 0x44, 16, 2, 0, 0, 0},
+ { AGILEX_CS_AT_CLK, "cs_at_clk", "noc_clk", NULL, 1, 0, 0x24,
+ 4, 0x44, 24, 2, 0, 0, 0},
+ { AGILEX_CS_TRACE_CLK, "cs_trace_clk", "noc_clk", NULL, 1, 0, 0x24,
+ 4, 0x44, 26, 2, 0, 0, 0},
+ { AGILEX_CS_PDBG_CLK, "cs_pdbg_clk", "cs_at_clk", NULL, 1, 0, 0x24,
+ 4, 0x44, 28, 1, 0, 0, 0},
+ { AGILEX_CS_TIMER_CLK, "cs_timer_clk", "noc_clk", NULL, 1, 0, 0x24,
+ 5, 0, 0, 0, 0, 0, 0},
+ { AGILEX_S2F_USER0_CLK, "s2f_user0_clk", NULL, s2f_usr0_mux, ARRAY_SIZE(s2f_usr0_mux), 0, 0x24,
+ 6, 0, 0, 0, 0, 0, 0},
+ { AGILEX_EMAC0_CLK, "emac0_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
+ 0, 0, 0, 0, 0x94, 26, 0},
+ { AGILEX_EMAC1_CLK, "emac1_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
+ 1, 0, 0, 0, 0x94, 27, 0},
+ { AGILEX_EMAC2_CLK, "emac2_clk", NULL, emac_mux, ARRAY_SIZE(emac_mux), 0, 0x7C,
+ 2, 0, 0, 0, 0x94, 28, 0},
+ { AGILEX_EMAC_PTP_CLK, "emac_ptp_clk", "emac_ptp_free_clk", NULL, 1, 0, 0x7C,
+ 3, 0, 0, 0, 0, 0, 0},
+ { AGILEX_GPIO_DB_CLK, "gpio_db_clk", "gpio_db_free_clk", NULL, 1, 0, 0x7C,
+ 4, 0x98, 0, 16, 0, 0, 0},
+ { AGILEX_SDMMC_CLK, "sdmmc_clk", "sdmmc_free_clk", NULL, 1, 0, 0x7C,
+ 5, 0, 0, 0, 0, 0, 4},
+ { AGILEX_S2F_USER1_CLK, "s2f_user1_clk", "s2f_user1_free_clk", NULL, 1, 0, 0x7C,
+ 6, 0, 0, 0, 0, 0, 0},
+ { AGILEX_PSI_REF_CLK, "psi_ref_clk", "psi_ref_free_clk", NULL, 1, 0, 0x7C,
+ 7, 0, 0, 0, 0, 0, 0},
+ { AGILEX_USB_CLK, "usb_clk", "l4_mp_clk", NULL, 1, 0, 0x7C,
+ 8, 0, 0, 0, 0, 0, 0},
+ { AGILEX_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0x7C,
+ 9, 0, 0, 0, 0, 0, 0},
+ { AGILEX_NAND_CLK, "nand_clk", "l4_main_clk", NULL, 1, 0, 0x7C,
+ 10, 0, 0, 0, 0, 0, 0},
+};
+
+static int agilex_clk_register_c_perip(const struct stratix10_perip_c_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_periph(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+ return 0;
+}
+
+static int agilex_clk_register_cnt_perip(const struct stratix10_perip_cnt_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_cnt_periph(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static int agilex_clk_register_gate(const struct stratix10_gate_clock *clks, int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = s10_register_gate(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static int agilex_clk_register_pll(const struct stratix10_pll_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk *clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ clk = agilex_register_pll(&clks[i], base);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ continue;
+ }
+ data->clk_data.clks[clks[i].id] = clk;
+ }
+
+ return 0;
+}
+
+static struct stratix10_clock_data *__socfpga_agilex_clk_init(struct platform_device *pdev,
+ int nr_clks)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct stratix10_clock_data *clk_data;
+ struct clk **clk_table;
+ struct resource *res;
+ void __iomem *base;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ clk_data = devm_kzalloc(dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return ERR_PTR(-ENOMEM);
+
+ clk_data->base = base;
+ clk_table = devm_kcalloc(dev, nr_clks, sizeof(*clk_table), GFP_KERNEL);
+ if (!clk_table)
+ return ERR_PTR(-ENOMEM);
+
+ clk_data->clk_data.clks = clk_table;
+ clk_data->clk_data.clk_num = nr_clks;
+ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data->clk_data);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_data;
+}
+
+static int agilex_clkmgr_probe(struct platform_device *pdev)
+{
+ struct stratix10_clock_data *clk_data;
+
+ clk_data = __socfpga_agilex_clk_init(pdev, AGILEX_NUM_CLKS);
+ if (IS_ERR(clk_data))
+ return PTR_ERR(clk_data);
+
+ agilex_clk_register_pll(agilex_pll_clks, ARRAY_SIZE(agilex_pll_clks), clk_data);
+
+ agilex_clk_register_c_perip(agilex_main_perip_c_clks,
+ ARRAY_SIZE(agilex_main_perip_c_clks), clk_data);
+
+ agilex_clk_register_cnt_perip(agilex_main_perip_cnt_clks,
+ ARRAY_SIZE(agilex_main_perip_cnt_clks),
+ clk_data);
+
+ agilex_clk_register_gate(agilex_gate_clks, ARRAY_SIZE(agilex_gate_clks),
+ clk_data);
+ return 0;
+}
+
+static const struct of_device_id agilex_clkmgr_match_table[] = {
+ { .compatible = "intel,agilex-clkmgr",
+ .data = agilex_clkmgr_probe },
+ { }
+};
+
+static struct platform_driver agilex_clkmgr_driver = {
+ .probe = agilex_clkmgr_probe,
+ .driver = {
+ .name = "agilex-clkmgr",
+ .suppress_bind_attrs = true,
+ .of_match_table = agilex_clkmgr_match_table,
+ },
+};
+
+static int __init agilex_clk_init(void)
+{
+ return platform_driver_register(&agilex_clkmgr_driver);
+}
+core_initcall(agilex_clk_init);
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
index 8be4722f6064..083b2ec21fdd 100644
--- a/drivers/clk/socfpga/clk-gate-s10.c
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -70,7 +70,6 @@ struct clk *s10_register_gate(const struct stratix10_gate_clock *clks, void __io
struct clk *clk;
struct socfpga_gate_clk *socfpga_clk;
struct clk_init_data init;
- const char * const *parent_names = clks->parent_names;
const char *parent_name = clks->parent_name;
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
@@ -108,7 +107,9 @@ struct clk *s10_register_gate(const struct stratix10_gate_clock *clks, void __io
init.flags = clks->flags;
init.num_parents = clks->num_parents;
- init.parent_names = parent_names ? parent_names : &parent_name;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ if (init.parent_names == NULL)
+ init.parent_data = clks->parent_data;
socfpga_clk->hw.hw.init = &init;
clk = clk_register(NULL, &socfpga_clk->hw.hw);
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index dd6d4056e9de..397b77b89b16 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -81,7 +81,6 @@ struct clk *s10_register_periph(const struct stratix10_perip_c_clock *clks,
struct clk_init_data init;
const char *name = clks->name;
const char *parent_name = clks->parent_name;
- const char * const *parent_names = clks->parent_names;
periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
if (WARN_ON(!periph_clk))
@@ -94,7 +93,9 @@ struct clk *s10_register_periph(const struct stratix10_perip_c_clock *clks,
init.flags = clks->flags;
init.num_parents = clks->num_parents;
- init.parent_names = parent_names ? parent_names : &parent_name;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ if (init.parent_names == NULL)
+ init.parent_data = clks->parent_data;
periph_clk->hw.hw.init = &init;
@@ -114,7 +115,6 @@ struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *clks
struct clk_init_data init;
const char *name = clks->name;
const char *parent_name = clks->parent_name;
- const char * const *parent_names = clks->parent_names;
periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
if (WARN_ON(!periph_clk))
@@ -137,7 +137,9 @@ struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *clks
init.flags = clks->flags;
init.num_parents = clks->num_parents;
- init.parent_names = parent_names ? parent_names : &parent_name;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ if (init.parent_names == NULL)
+ init.parent_data = clks->parent_data;
periph_clk->hw.hw.init = &init;
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index 3816fc04b274..db54f7d806a0 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -58,7 +58,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
CLK_MGR_PLL_CLK_SRC_MASK;
}
-static struct clk_ops clk_pll_ops = {
+static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.get_parent = clk_pll_get_parent,
};
@@ -102,8 +102,6 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
pll_clk->hw.hw.init = &init;
pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
- clk_pll_ops.enable = clk_gate_ops.enable;
- clk_pll_ops.disable = clk_gate_ops.disable;
clk = clk_register(NULL, &pll_clk->hw.hw);
if (WARN_ON(IS_ERR(clk))) {
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index a301bb22f36c..4e268953b7da 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -18,8 +18,12 @@
#define SOCFPGA_PLL_RESET_MASK 0x2
#define SOCFPGA_PLL_REFDIV_MASK 0x00003F00
#define SOCFPGA_PLL_REFDIV_SHIFT 8
+#define SOCFPGA_PLL_AREFDIV_MASK 0x00000F00
+#define SOCFPGA_PLL_DREFDIV_MASK 0x00003000
+#define SOCFPGA_PLL_DREFDIV_SHIFT 12
#define SOCFPGA_PLL_MDIV_MASK 0xFF000000
#define SOCFPGA_PLL_MDIV_SHIFT 24
+#define SOCFPGA_AGILEX_PLL_MDIV_MASK 0x000003FF
#define SWCTRLBTCLKSEL_MASK 0x200
#define SWCTRLBTCLKSEL_SHIFT 9
@@ -27,6 +31,27 @@
#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
+static unsigned long agilex_clk_pll_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk);
+ unsigned long arefdiv, reg, mdiv;
+ unsigned long long vco_freq;
+
+ /* read VCO1 reg for numerator and denominator */
+ reg = readl(socfpgaclk->hw.reg);
+ arefdiv = (reg & SOCFPGA_PLL_AREFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
+
+ vco_freq = (unsigned long long)parent_rate / arefdiv;
+
+ /* Read mdiv and fdiv from the fdbck register */
+ reg = readl(socfpgaclk->hw.reg + 0x24);
+ mdiv = reg & SOCFPGA_AGILEX_PLL_MDIV_MASK;
+
+ vco_freq = (unsigned long long)vco_freq * mdiv;
+ return (unsigned long)vco_freq;
+}
+
static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
@@ -98,13 +123,19 @@ static int clk_pll_prepare(struct clk_hw *hwclk)
return 0;
}
-static struct clk_ops clk_pll_ops = {
+static const struct clk_ops agilex_clk_pll_ops = {
+ .recalc_rate = agilex_clk_pll_recalc_rate,
+ .get_parent = clk_pll_get_parent,
+ .prepare = clk_pll_prepare,
+};
+
+static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.get_parent = clk_pll_get_parent,
.prepare = clk_pll_prepare,
};
-static struct clk_ops clk_boot_ops = {
+static const struct clk_ops clk_boot_ops = {
.recalc_rate = clk_boot_clk_recalc_rate,
.get_parent = clk_boot_get_parent,
.prepare = clk_pll_prepare,
@@ -117,7 +148,6 @@ struct clk *s10_register_pll(const struct stratix10_pll_clock *clks,
struct socfpga_pll *pll_clk;
struct clk_init_data init;
const char *name = clks->name;
- const char * const *parent_names = clks->parent_names;
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
if (WARN_ON(!pll_clk))
@@ -134,12 +164,48 @@ struct clk *s10_register_pll(const struct stratix10_pll_clock *clks,
init.flags = clks->flags;
init.num_parents = clks->num_parents;
- init.parent_names = parent_names;
+ init.parent_names = NULL;
+ init.parent_data = clks->parent_data;
+ pll_clk->hw.hw.init = &init;
+
+ pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER;
+
+ clk = clk_register(NULL, &pll_clk->hw.hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(pll_clk);
+ return NULL;
+ }
+ return clk;
+}
+
+struct clk *agilex_register_pll(const struct stratix10_pll_clock *clks,
+ void __iomem *reg)
+{
+ struct clk *clk;
+ struct socfpga_pll *pll_clk;
+ struct clk_init_data init;
+ const char *name = clks->name;
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (WARN_ON(!pll_clk))
+ return NULL;
+
+ pll_clk->hw.reg = reg + clks->offset;
+
+ if (streq(name, SOCFPGA_BOOT_CLK))
+ init.ops = &clk_boot_ops;
+ else
+ init.ops = &agilex_clk_pll_ops;
+
+ init.name = name;
+ init.flags = clks->flags;
+
+ init.num_parents = clks->num_parents;
+ init.parent_names = NULL;
+ init.parent_data = clks->parent_data;
pll_clk->hw.hw.init = &init;
pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER;
- clk_pll_ops.enable = clk_gate_ops.enable;
- clk_pll_ops.disable = clk_gate_ops.disable;
clk = clk_register(NULL, &pll_clk->hw.hw);
if (WARN_ON(IS_ERR(clk))) {
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index dc65cc0fd3bd..e5fb786843f3 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -65,7 +65,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
CLK_MGR_PLL_CLK_SRC_MASK;
}
-static struct clk_ops clk_pll_ops = {
+static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.get_parent = clk_pll_get_parent,
};
@@ -105,8 +105,6 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
pll_clk->hw.hw.init = &init;
pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
- clk_pll_ops.enable = clk_gate_ops.enable;
- clk_pll_ops.disable = clk_gate_ops.disable;
clk = clk_register(NULL, &pll_clk->hw.hw);
if (WARN_ON(IS_ERR(clk))) {
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index dea7c6c7d269..c1dfc9b34e4e 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -12,35 +12,137 @@
#include "stratix10-clk.h"
-static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
- "f2s-free-clk",};
-static const char * const cntr_mux[] = { "main_pll", "periph_pll",
- "osc1", "cb-intosc-hs-div2-clk",
- "f2s-free-clk"};
-static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
-
-static const char * const noc_free_mux[] = {"main_noc_base_clk",
- "peri_noc_base_clk",
- "osc1", "cb-intosc-hs-div2-clk",
- "f2s-free-clk"};
-
-static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
-static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
-static const char * const emac_ptp_free_mux[] = {"peri_emac_ptp_clk", "boot_clk"};
-static const char * const gpio_db_free_mux[] = {"peri_gpio_db_clk", "boot_clk"};
-static const char * const sdmmc_free_mux[] = {"main_sdmmc_clk", "boot_clk"};
-static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"};
-static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
-static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
-
-static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
-static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
-static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
-
-static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
- "peri_mpu_base_clk",
- "osc1", "cb-intosc-hs-div2-clk",
- "f2s-free-clk"};
+static const struct clk_parent_data pll_mux[] = {
+ { .fw_name = "osc1",
+ .name = "osc1" },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk" },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk" },
+};
+
+static const struct clk_parent_data cntr_mux[] = {
+ { .fw_name = "main_pll",
+ .name = "main_pll", },
+ { .fw_name = "periph_pll",
+ .name = "periph_pll", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data boot_mux[] = {
+ { .fw_name = "osc1",
+ .name = "osc1" },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk" },
+};
+
+static const struct clk_parent_data noc_free_mux[] = {
+ { .fw_name = "main_noc_base_clk",
+ .name = "main_noc_base_clk", },
+ { .fw_name = "peri_noc_base_clk",
+ .name = "peri_noc_base_clk", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
+
+static const struct clk_parent_data emaca_free_mux[] = {
+ { .fw_name = "peri_emaca_clk",
+ .name = "peri_emaca_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data emacb_free_mux[] = {
+ { .fw_name = "peri_emacb_clk",
+ .name = "peri_emacb_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data emac_ptp_free_mux[] = {
+ { .fw_name = "peri_emac_ptp_clk",
+ .name = "peri_emac_ptp_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data gpio_db_free_mux[] = {
+ { .fw_name = "peri_gpio_db_clk",
+ .name = "peri_gpio_db_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data sdmmc_free_mux[] = {
+ { .fw_name = "main_sdmmc_clk",
+ .name = "main_sdmmc_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data s2f_usr1_free_mux[] = {
+ { .fw_name = "peri_s2f_usr1_clk",
+ .name = "peri_s2f_usr1_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data psi_ref_free_mux[] = {
+ { .fw_name = "peri_psi_ref_clk",
+ .name = "peri_psi_ref_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data mpu_mux[] = {
+ { .fw_name = "mpu_free_clk",
+ .name = "mpu_free_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data s2f_usr0_mux[] = {
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data emac_mux[] = {
+ { .fw_name = "emaca_free_clk",
+ .name = "emaca_free_clk", },
+ { .fw_name = "emacb_free_clk",
+ .name = "emacb_free_clk", },
+};
+
+static const struct clk_parent_data noc_mux[] = {
+ { .fw_name = "noc_free_clk",
+ .name = "noc_free_clk", },
+ { .fw_name = "boot_clk",
+ .name = "boot_clk", },
+};
+
+static const struct clk_parent_data mpu_free_mux[] = {
+ { .fw_name = "main_mpu_base_clk",
+ .name = "main_mpu_base_clk", },
+ { .fw_name = "peri_mpu_base_clk",
+ .name = "peri_mpu_base_clk", },
+ { .fw_name = "osc1",
+ .name = "osc1", },
+ { .fw_name = "cb-intosc-hs-div2-clk",
+ .name = "cb-intosc-hs-div2-clk", },
+ { .fw_name = "f2s-free-clk",
+ .name = "f2s-free-clk", },
+};
/* clocks in AO (always on) controller */
static const struct stratix10_pll_clock s10_pll_clks[] = {
diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
index fcabef42249c..f9d5d724c694 100644
--- a/drivers/clk/socfpga/stratix10-clk.h
+++ b/drivers/clk/socfpga/stratix10-clk.h
@@ -14,7 +14,7 @@ struct stratix10_clock_data {
struct stratix10_pll_clock {
unsigned int id;
const char *name;
- const char *const *parent_names;
+ const struct clk_parent_data *parent_data;
u8 num_parents;
unsigned long flags;
unsigned long offset;
@@ -24,7 +24,7 @@ struct stratix10_perip_c_clock {
unsigned int id;
const char *name;
const char *parent_name;
- const char *const *parent_names;
+ const struct clk_parent_data *parent_data;
u8 num_parents;
unsigned long flags;
unsigned long offset;
@@ -34,7 +34,7 @@ struct stratix10_perip_cnt_clock {
unsigned int id;
const char *name;
const char *parent_name;
- const char *const *parent_names;
+ const struct clk_parent_data *parent_data;
u8 num_parents;
unsigned long flags;
unsigned long offset;
@@ -47,7 +47,7 @@ struct stratix10_gate_clock {
unsigned int id;
const char *name;
const char *parent_name;
- const char *const *parent_names;
+ const struct clk_parent_data *parent_data;
u8 num_parents;
unsigned long flags;
unsigned long gate_reg;
@@ -62,6 +62,8 @@ struct stratix10_gate_clock {
struct clk *s10_register_pll(const struct stratix10_pll_clock *,
void __iomem *);
+struct clk *agilex_register_pll(const struct stratix10_pll_clock *,
+ void __iomem *);
struct clk *s10_register_periph(const struct stratix10_perip_c_clock *,
void __iomem *);
struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *,
diff --git a/drivers/clk/sprd/gate.c b/drivers/clk/sprd/gate.c
index 574cfc116bbc..56e1714b541e 100644
--- a/drivers/clk/sprd/gate.c
+++ b/drivers/clk/sprd/gate.c
@@ -94,8 +94,15 @@ static int sprd_gate_is_enabled(struct clk_hw *hw)
{
struct sprd_gate *sg = hw_to_sprd_gate(hw);
struct sprd_clk_common *common = &sg->common;
+ struct clk_hw *parent;
unsigned int reg;
+ if (sg->flags & SPRD_GATE_NON_AON) {
+ parent = clk_hw_get_parent(hw);
+ if (!parent || !clk_hw_is_enabled(parent))
+ return 0;
+ }
+
regmap_read(common->regmap, common->reg, &reg);
if (sg->flags & CLK_GATE_SET_TO_DISABLE)
diff --git a/drivers/clk/sprd/gate.h b/drivers/clk/sprd/gate.h
index b55817869367..e738dafa4fe9 100644
--- a/drivers/clk/sprd/gate.h
+++ b/drivers/clk/sprd/gate.h
@@ -19,6 +19,15 @@ struct sprd_gate {
struct sprd_clk_common common;
};
+/*
+ * sprd_gate->flags is used for:
+ * CLK_GATE_SET_TO_DISABLE BIT(0)
+ * CLK_GATE_HIWORD_MASK BIT(1)
+ * CLK_GATE_BIG_ENDIAN BIT(2)
+ * so we define new flags from BIT(3)
+ */
+#define SPRD_GATE_NON_AON BIT(3) /* not alway powered on, check before read */
+
#define SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \
_sc_offset, _enable_mask, _flags, \
_gate_flags, _udelay, _ops, _fn) \
diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
index 15791484388f..13a322b2535a 100644
--- a/drivers/clk/sprd/pll.c
+++ b/drivers/clk/sprd/pll.c
@@ -106,7 +106,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
cfg = kcalloc(regs_num, sizeof(*cfg), GFP_KERNEL);
if (!cfg)
- return -ENOMEM;
+ return parent_rate;
for (i = 0; i < regs_num; i++)
cfg[i] = sprd_pll_read(pll, i);
diff --git a/drivers/clk/sprd/sc9863a-clk.c b/drivers/clk/sprd/sc9863a-clk.c
index 2e2dfb2d48ff..ad2e0f9f8563 100644
--- a/drivers/clk/sprd/sc9863a-clk.c
+++ b/drivers/clk/sprd/sc9863a-clk.c
@@ -23,22 +23,22 @@
#include "pll.h"
/* mpll*_gate clocks control cpu cores, they were enabled by default */
-SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll0_gate, "mpll0-gate", "ext-26m", 0x94,
- 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll0_gate, "dpll0-gate", "ext-26m", 0x98,
- 0x1000, BIT(0), 0, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(lpll_gate, "lpll-gate", "ext-26m", 0x9c,
- 0x1000, BIT(0), 0, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(gpll_gate, "gpll-gate", "ext-26m", 0xa8,
- 0x1000, BIT(0), 0, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll1_gate, "dpll1-gate", "ext-26m", 0x1dc,
- 0x1000, BIT(0), 0, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll1_gate, "mpll1-gate", "ext-26m", 0x1e0,
- 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll2_gate, "mpll2-gate", "ext-26m", 0x1e4,
- 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
-SPRD_PLL_SC_GATE_CLK_FW_NAME(isppll_gate, "isppll-gate", "ext-26m", 0x1e8,
- 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll0_gate, "mpll0-gate", "ext-26m", 0x94,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll0_gate, "dpll0-gate", "ext-26m", 0x98,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(lpll_gate, "lpll-gate", "ext-26m", 0x9c,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(gpll_gate, "gpll-gate", "ext-26m", 0xa8,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll1_gate, "dpll1-gate", "ext-26m", 0x1dc,
+ 0x1000, BIT(0), 0, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll1_gate, "mpll1-gate", "ext-26m", 0x1e0,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll2_gate, "mpll2-gate", "ext-26m", 0x1e4,
+ 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240);
+static SPRD_PLL_SC_GATE_CLK_FW_NAME(isppll_gate, "isppll-gate", "ext-26m",
+ 0x1e8, 0x1000, BIT(0), 0, 0, 240);
static struct sprd_clk_common *sc9863a_pmu_gate_clks[] = {
/* address base is 0x402b0000 */
@@ -1615,6 +1615,36 @@ static const struct sprd_clk_desc sc9863a_mm_gate_desc = {
.hw_clks = &sc9863a_mm_gate_hws,
};
+/* camera sensor clocks */
+static SPRD_GATE_CLK_HW(mipi_csi_clk, "mipi-csi-clk", &mahb_ckg_eb.common.hw,
+ 0x20, BIT(16), 0, SPRD_GATE_NON_AON);
+static SPRD_GATE_CLK_HW(mipi_csi_s_clk, "mipi-csi-s-clk", &mahb_ckg_eb.common.hw,
+ 0x24, BIT(16), 0, SPRD_GATE_NON_AON);
+static SPRD_GATE_CLK_HW(mipi_csi_m_clk, "mipi-csi-m-clk", &mahb_ckg_eb.common.hw,
+ 0x28, BIT(16), 0, SPRD_GATE_NON_AON);
+
+static struct sprd_clk_common *sc9863a_mm_clk_clks[] = {
+ /* address base is 0x60900000 */
+ &mipi_csi_clk.common,
+ &mipi_csi_s_clk.common,
+ &mipi_csi_m_clk.common,
+};
+
+static struct clk_hw_onecell_data sc9863a_mm_clk_hws = {
+ .hws = {
+ [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
+ [CLK_MIPI_CSI_S] = &mipi_csi_s_clk.common.hw,
+ [CLK_MIPI_CSI_M] = &mipi_csi_m_clk.common.hw,
+ },
+ .num = CLK_MM_CLK_NUM,
+};
+
+static const struct sprd_clk_desc sc9863a_mm_clk_desc = {
+ .clk_clks = sc9863a_mm_clk_clks,
+ .num_clk_clks = ARRAY_SIZE(sc9863a_mm_clk_clks),
+ .hw_clks = &sc9863a_mm_clk_hws,
+};
+
static SPRD_SC_GATE_CLK_FW_NAME(sim0_eb, "sim0-eb", "ext-26m", 0x0,
0x1000, BIT(0), 0, 0);
static SPRD_SC_GATE_CLK_FW_NAME(iis0_eb, "iis0-eb", "ext-26m", 0x0,
@@ -1738,6 +1768,8 @@ static const struct of_device_id sprd_sc9863a_clk_ids[] = {
.data = &sc9863a_aonapb_gate_desc },
{ .compatible = "sprd,sc9863a-mm-gate", /* 0x60800000 */
.data = &sc9863a_mm_gate_desc },
+ { .compatible = "sprd,sc9863a-mm-clk", /* 0x60900000 */
+ .data = &sc9863a_mm_clk_desc },
{ .compatible = "sprd,sc9863a-apapb-gate", /* 0x71300000 */
.data = &sc9863a_apapb_gate_desc },
{ }
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 4413b6e04a8e..55873d4b7603 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -375,6 +375,7 @@ static void __init st_of_flexgen_setup(struct device_node *np)
break;
}
+ flex_flags &= ~CLK_IS_CRITICAL;
of_clk_detect_critical(np, i, &flex_flags);
/*
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 27201fd26e44..e1aa1fbac48a 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -90,7 +90,7 @@ static void sun6i_a31_get_pll1_factors(struct factors_request *req)
* Round down the frequency to the closest multiple of either
* 6 or 16
*/
- u32 round_freq_6 = round_down(freq_mhz, 6);
+ u32 round_freq_6 = rounddown(freq_mhz, 6);
u32 round_freq_16 = round_down(freq_mhz, 16);
if (round_freq_6 > round_freq_16)
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
index 4d99a8770485..deaa4605824c 100644
--- a/drivers/clk/tegra/Kconfig
+++ b/drivers/clk/tegra/Kconfig
@@ -1,8 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config TEGRA_CLK_EMC
- def_bool y
- depends on TEGRA124_EMC
-
config CLK_TEGRA_BPMP
def_bool y
depends on TEGRA_BPMP
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 1f7c30f87ece..eec2313fd37e 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -13,8 +13,8 @@ obj-y += clk-super.o
obj-y += clk-tegra-audio.o
obj-y += clk-tegra-periph.o
obj-y += clk-tegra-fixed.o
+obj-y += clk-tegra-super-cclk.o
obj-y += clk-tegra-super-gen4.o
-obj-$(CONFIG_TEGRA_CLK_EMC) += clk-emc.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20-emc.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
@@ -22,8 +22,10 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra20-emc.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
obj-$(CONFIG_TEGRA_CLK_DFLL) += clk-tegra124-dfll-fcpu.o
+obj-$(CONFIG_TEGRA124_EMC) += clk-tegra124-emc.o
obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
obj-y += cvb.o
obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o
+obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210-emc.o
obj-$(CONFIG_CLK_TEGRA_BPMP) += clk-bpmp.o
obj-y += clk-utils.o
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 531c2b3d814e..0b212cf2e794 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -744,13 +744,19 @@ static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
state = clk_pll_is_enabled(hw);
+ if (state && pll->params->pre_rate_change) {
+ ret = pll->params->pre_rate_change();
+ if (WARN_ON(ret))
+ return ret;
+ }
+
_get_pll_mnp(pll, &old_cfg);
if (state && pll->params->defaults_set && pll->params->dyn_ramp &&
(cfg->m == old_cfg.m) && (cfg->p == old_cfg.p)) {
ret = pll->params->dyn_ramp(pll, cfg);
if (!ret)
- return 0;
+ goto done;
}
if (state) {
@@ -772,6 +778,10 @@ static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
pll_clk_start_ss(pll);
}
+done:
+ if (state && pll->params->post_rate_change)
+ pll->params->post_rate_change();
+
return ret;
}
diff --git a/drivers/clk/tegra/clk-tegra-super-cclk.c b/drivers/clk/tegra/clk-tegra-super-cclk.c
new file mode 100644
index 000000000000..a03119c30456
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra-super-cclk.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on clk-super.c
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Based on older tegra20-cpufreq driver by Colin Cross <ccross@google.com>
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ * Copyright (C) 2019 GRATE-DRIVER project
+ */
+
+#include <linux/bits.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+#define PLLP_INDEX 4
+#define PLLX_INDEX 8
+
+#define SUPER_CDIV_ENB BIT(31)
+
+static struct tegra_clk_super_mux *cclk_super;
+static bool cclk_on_pllx;
+
+static u8 cclk_super_get_parent(struct clk_hw *hw)
+{
+ return tegra_clk_super_ops.get_parent(hw);
+}
+
+static int cclk_super_set_parent(struct clk_hw *hw, u8 index)
+{
+ return tegra_clk_super_ops.set_parent(hw, index);
+}
+
+static int cclk_super_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return tegra_clk_super_ops.set_rate(hw, rate, parent_rate);
+}
+
+static unsigned long cclk_super_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ if (cclk_super_get_parent(hw) == PLLX_INDEX)
+ return parent_rate;
+
+ return tegra_clk_super_ops.recalc_rate(hw, parent_rate);
+}
+
+static int cclk_super_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *pllp_hw = clk_hw_get_parent_by_index(hw, PLLP_INDEX);
+ struct clk_hw *pllx_hw = clk_hw_get_parent_by_index(hw, PLLX_INDEX);
+ struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
+ unsigned long pllp_rate;
+ long rate = req->rate;
+
+ if (WARN_ON_ONCE(!pllp_hw || !pllx_hw))
+ return -EINVAL;
+
+ /*
+ * Switch parent to PLLP for all CCLK rates that are suitable for PLLP.
+ * PLLX will be disabled in this case, saving some power.
+ */
+ pllp_rate = clk_hw_get_rate(pllp_hw);
+
+ if (rate <= pllp_rate) {
+ if (super->flags & TEGRA20_SUPER_CLK)
+ rate = pllp_rate;
+ else
+ rate = tegra_clk_super_ops.round_rate(hw, rate,
+ &pllp_rate);
+
+ req->best_parent_rate = pllp_rate;
+ req->best_parent_hw = pllp_hw;
+ req->rate = rate;
+ } else {
+ rate = clk_hw_round_rate(pllx_hw, rate);
+ req->best_parent_rate = rate;
+ req->best_parent_hw = pllx_hw;
+ req->rate = rate;
+ }
+
+ if (WARN_ON_ONCE(rate <= 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct clk_ops tegra_cclk_super_ops = {
+ .get_parent = cclk_super_get_parent,
+ .set_parent = cclk_super_set_parent,
+ .set_rate = cclk_super_set_rate,
+ .recalc_rate = cclk_super_recalc_rate,
+ .determine_rate = cclk_super_determine_rate,
+};
+
+static const struct clk_ops tegra_cclk_super_mux_ops = {
+ .get_parent = cclk_super_get_parent,
+ .set_parent = cclk_super_set_parent,
+ .determine_rate = cclk_super_determine_rate,
+};
+
+struct clk *tegra_clk_register_super_cclk(const char *name,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg, u8 clk_super_flags,
+ spinlock_t *lock)
+{
+ struct tegra_clk_super_mux *super;
+ struct clk *clk;
+ struct clk_init_data init;
+ u32 val;
+
+ if (WARN_ON(cclk_super))
+ return ERR_PTR(-EBUSY);
+
+ super = kzalloc(sizeof(*super), GFP_KERNEL);
+ if (!super)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ super->reg = reg;
+ super->lock = lock;
+ super->width = 4;
+ super->flags = clk_super_flags;
+ super->hw.init = &init;
+
+ if (super->flags & TEGRA20_SUPER_CLK) {
+ init.ops = &tegra_cclk_super_mux_ops;
+ } else {
+ init.ops = &tegra_cclk_super_ops;
+
+ super->frac_div.reg = reg + 4;
+ super->frac_div.shift = 16;
+ super->frac_div.width = 8;
+ super->frac_div.frac_width = 1;
+ super->frac_div.lock = lock;
+ super->div_ops = &tegra_clk_frac_div_ops;
+ }
+
+ /*
+ * Tegra30+ has the following CPUG clock topology:
+ *
+ * +---+ +-------+ +-+ +-+ +-+
+ * PLLP+->+ +->+DIVIDER+->+0| +-------->+0| ------------->+0|
+ * | | +-------+ | | | +---+ | | | | |
+ * PLLC+->+MUX| | +->+ | S | | +->+ | +->+CPU
+ * ... | | | | | | K | | | | +-------+ | |
+ * PLLX+->+-->+------------>+1| +->+ I +->+1| +->+ DIV2 +->+1|
+ * +---+ +++ | P | +++ |SKIPPER| +++
+ * ^ | P | ^ +-------+ ^
+ * | | E | | |
+ * PLLX_SEL+--+ | R | | OVERHEAT+--+
+ * +---+ |
+ * |
+ * SUPER_CDIV_ENB+--+
+ *
+ * Tegra20 is similar, but simpler. It doesn't have the divider and
+ * thermal DIV2 skipper.
+ *
+ * At least for now we're not going to use clock-skipper, hence let's
+ * ensure that it is disabled.
+ */
+ val = readl_relaxed(reg + 4);
+ val &= ~SUPER_CDIV_ENB;
+ writel_relaxed(val, reg + 4);
+
+ clk = clk_register(NULL, &super->hw);
+ if (IS_ERR(clk))
+ kfree(super);
+ else
+ cclk_super = super;
+
+ return clk;
+}
+
+int tegra_cclk_pre_pllx_rate_change(void)
+{
+ if (IS_ERR_OR_NULL(cclk_super))
+ return -EINVAL;
+
+ if (cclk_super_get_parent(&cclk_super->hw) == PLLX_INDEX)
+ cclk_on_pllx = true;
+ else
+ cclk_on_pllx = false;
+
+ /*
+ * CPU needs to be temporarily re-parented away from PLLX if PLLX
+ * changes its rate. PLLP is a safe parent for CPU on all Tegra SoCs.
+ */
+ if (cclk_on_pllx)
+ cclk_super_set_parent(&cclk_super->hw, PLLP_INDEX);
+
+ return 0;
+}
+
+void tegra_cclk_post_pllx_rate_change(void)
+{
+ if (cclk_on_pllx)
+ cclk_super_set_parent(&cclk_super->hw, PLLX_INDEX);
+}
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c
index 745f9faa98d8..745f9faa98d8 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-tegra124-emc.c
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 085feb04e913..3efc651b42e3 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -391,6 +391,8 @@ static struct tegra_clk_pll_params pll_x_params = {
.lock_delay = 300,
.freq_table = pll_x_freq_table,
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .pre_rate_change = tegra_cclk_pre_pllx_rate_change,
+ .post_rate_change = tegra_cclk_post_pllx_rate_change,
};
static struct tegra_clk_pll_params pll_e_params = {
@@ -702,9 +704,10 @@ static void tegra20_super_clk_init(void)
struct clk *clk;
/* CCLK */
- clk = tegra_clk_register_super_mux("cclk", cclk_parents,
+ clk = tegra_clk_register_super_cclk("cclk", cclk_parents,
ARRAY_SIZE(cclk_parents), CLK_SET_RATE_PARENT,
- clk_base + CCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
+ clk_base + CCLK_BURST_POLICY, TEGRA20_SUPER_CLK,
+ NULL);
clks[TEGRA20_CLK_CCLK] = clk;
/* SCLK */
diff --git a/drivers/clk/tegra/clk-tegra210-emc.c b/drivers/clk/tegra/clk-tegra210-emc.c
new file mode 100644
index 000000000000..352a2c3fc374
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra210-emc.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/tegra.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define CLK_SOURCE_EMC 0x19c
+#define CLK_SOURCE_EMC_2X_CLK_SRC GENMASK(31, 29)
+#define CLK_SOURCE_EMC_MC_EMC_SAME_FREQ BIT(16)
+#define CLK_SOURCE_EMC_2X_CLK_DIVISOR GENMASK(7, 0)
+
+#define CLK_SRC_PLLM 0
+#define CLK_SRC_PLLC 1
+#define CLK_SRC_PLLP 2
+#define CLK_SRC_CLK_M 3
+#define CLK_SRC_PLLM_UD 4
+#define CLK_SRC_PLLMB_UD 5
+#define CLK_SRC_PLLMB 6
+#define CLK_SRC_PLLP_UD 7
+
+struct tegra210_clk_emc {
+ struct clk_hw hw;
+ void __iomem *regs;
+
+ struct tegra210_clk_emc_provider *provider;
+
+ struct clk *parents[8];
+};
+
+static inline struct tegra210_clk_emc *
+to_tegra210_clk_emc(struct clk_hw *hw)
+{
+ return container_of(hw, struct tegra210_clk_emc, hw);
+}
+
+static const char *tegra210_clk_emc_parents[] = {
+ "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb_ud",
+ "pll_mb", "pll_p_ud",
+};
+
+static u8 tegra210_clk_emc_get_parent(struct clk_hw *hw)
+{
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
+ u32 value;
+ u8 src;
+
+ value = readl_relaxed(emc->regs + CLK_SOURCE_EMC);
+ src = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_SRC, value);
+
+ return src;
+}
+
+static unsigned long tegra210_clk_emc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
+ u32 value, div;
+
+ /*
+ * CCF assumes that neither the parent nor its rate will change during
+ * ->set_rate(), so the parent rate passed in here was cached from the
+ * parent before the ->set_rate() call.
+ *
+ * This can lead to wrong results being reported for the EMC clock if
+ * the parent and/or parent rate have changed as part of the EMC rate
+ * change sequence. Fix this by overriding the parent clock with what
+ * we know to be the correct value after the rate change.
+ */
+ parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
+
+ value = readl_relaxed(emc->regs + CLK_SOURCE_EMC);
+
+ div = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_DIVISOR, value);
+ div += 2;
+
+ return DIV_ROUND_UP(parent_rate * 2, div);
+}
+
+static long tegra210_clk_emc_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
+ struct tegra210_clk_emc_provider *provider = emc->provider;
+ unsigned int i;
+
+ if (!provider || !provider->configs || provider->num_configs == 0)
+ return clk_hw_get_rate(hw);
+
+ for (i = 0; i < provider->num_configs; i++) {
+ if (provider->configs[i].rate >= rate)
+ return provider->configs[i].rate;
+ }
+
+ return provider->configs[i - 1].rate;
+}
+
+static struct clk *tegra210_clk_emc_find_parent(struct tegra210_clk_emc *emc,
+ u8 index)
+{
+ struct clk_hw *parent = clk_hw_get_parent_by_index(&emc->hw, index);
+ const char *name = clk_hw_get_name(parent);
+
+ /* XXX implement cache? */
+
+ return __clk_lookup(name);
+}
+
+static int tegra210_clk_emc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
+ struct tegra210_clk_emc_provider *provider = emc->provider;
+ struct tegra210_clk_emc_config *config;
+ struct device *dev = provider->dev;
+ struct clk_hw *old, *new, *parent;
+ u8 old_idx, new_idx, index;
+ struct clk *clk;
+ unsigned int i;
+ int err;
+
+ if (!provider || !provider->configs || provider->num_configs == 0)
+ return -EINVAL;
+
+ for (i = 0; i < provider->num_configs; i++) {
+ if (provider->configs[i].rate >= rate) {
+ config = &provider->configs[i];
+ break;
+ }
+ }
+
+ if (i == provider->num_configs)
+ config = &provider->configs[i - 1];
+
+ old_idx = tegra210_clk_emc_get_parent(hw);
+ new_idx = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_SRC, config->value);
+
+ old = clk_hw_get_parent_by_index(hw, old_idx);
+ new = clk_hw_get_parent_by_index(hw, new_idx);
+
+ /* if the rate has changed... */
+ if (config->parent_rate != clk_hw_get_rate(old)) {
+ /* ... but the clock source remains the same ... */
+ if (new_idx == old_idx) {
+ /* ... switch to the alternative clock source. */
+ switch (new_idx) {
+ case CLK_SRC_PLLM:
+ new_idx = CLK_SRC_PLLMB;
+ break;
+
+ case CLK_SRC_PLLM_UD:
+ new_idx = CLK_SRC_PLLMB_UD;
+ break;
+
+ case CLK_SRC_PLLMB_UD:
+ new_idx = CLK_SRC_PLLM_UD;
+ break;
+
+ case CLK_SRC_PLLMB:
+ new_idx = CLK_SRC_PLLM;
+ break;
+ }
+
+ /*
+ * This should never happen because we can't deal with
+ * it.
+ */
+ if (WARN_ON(new_idx == old_idx))
+ return -EINVAL;
+
+ new = clk_hw_get_parent_by_index(hw, new_idx);
+ }
+
+ index = new_idx;
+ parent = new;
+ } else {
+ index = old_idx;
+ parent = old;
+ }
+
+ clk = tegra210_clk_emc_find_parent(emc, index);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ dev_err(dev, "failed to get parent clock for index %u: %d\n",
+ index, err);
+ return err;
+ }
+
+ /* set the new parent clock to the required rate */
+ if (clk_get_rate(clk) != config->parent_rate) {
+ err = clk_set_rate(clk, config->parent_rate);
+ if (err < 0) {
+ dev_err(dev, "failed to set rate %lu Hz for %pC: %d\n",
+ config->parent_rate, clk, err);
+ return err;
+ }
+ }
+
+ /* enable the new parent clock */
+ if (parent != old) {
+ err = clk_prepare_enable(clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable parent clock %pC: %d\n",
+ clk, err);
+ return err;
+ }
+ }
+
+ /* update the EMC source configuration to reflect the new parent */
+ config->value &= ~CLK_SOURCE_EMC_2X_CLK_SRC;
+ config->value |= FIELD_PREP(CLK_SOURCE_EMC_2X_CLK_SRC, index);
+
+ /*
+ * Finally, switch the EMC programming with both old and new parent
+ * clocks enabled.
+ */
+ err = provider->set_rate(dev, config);
+ if (err < 0) {
+ dev_err(dev, "failed to set EMC rate to %lu Hz: %d\n", rate,
+ err);
+
+ /*
+ * If we're unable to switch to the new EMC frequency, we no
+ * longer need the new parent to be enabled.
+ */
+ if (parent != old)
+ clk_disable_unprepare(clk);
+
+ return err;
+ }
+
+ /* reparent to new parent clock and disable the old parent clock */
+ if (parent != old) {
+ clk = tegra210_clk_emc_find_parent(emc, old_idx);
+ if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
+ dev_err(dev,
+ "failed to get parent clock for index %u: %d\n",
+ old_idx, err);
+ return err;
+ }
+
+ clk_hw_reparent(hw, parent);
+ clk_disable_unprepare(clk);
+ }
+
+ return err;
+}
+
+static const struct clk_ops tegra210_clk_emc_ops = {
+ .get_parent = tegra210_clk_emc_get_parent,
+ .recalc_rate = tegra210_clk_emc_recalc_rate,
+ .round_rate = tegra210_clk_emc_round_rate,
+ .set_rate = tegra210_clk_emc_set_rate,
+};
+
+struct clk *tegra210_clk_register_emc(struct device_node *np,
+ void __iomem *regs)
+{
+ struct tegra210_clk_emc *emc;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ emc = kzalloc(sizeof(*emc), GFP_KERNEL);
+ if (!emc)
+ return ERR_PTR(-ENOMEM);
+
+ emc->regs = regs;
+
+ init.name = "emc";
+ init.ops = &tegra210_clk_emc_ops;
+ init.flags = CLK_IS_CRITICAL | CLK_GET_RATE_NOCACHE;
+ init.parent_names = tegra210_clk_emc_parents;
+ init.num_parents = ARRAY_SIZE(tegra210_clk_emc_parents);
+ emc->hw.init = &init;
+
+ clk = clk_register(NULL, &emc->hw);
+ if (IS_ERR(clk)) {
+ kfree(emc);
+ return clk;
+ }
+
+ return clk;
+}
+
+int tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider)
+{
+ struct clk_hw *hw = __clk_get_hw(clk);
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
+ struct device *dev = provider->dev;
+ unsigned int i;
+ int err;
+
+ if (!try_module_get(provider->owner))
+ return -ENODEV;
+
+ for (i = 0; i < provider->num_configs; i++) {
+ struct tegra210_clk_emc_config *config = &provider->configs[i];
+ struct clk_hw *parent;
+ bool same_freq;
+ u8 div, src;
+
+ div = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_DIVISOR, config->value);
+ src = FIELD_GET(CLK_SOURCE_EMC_2X_CLK_SRC, config->value);
+
+ /* do basic sanity checking on the EMC timings */
+ if (div & 0x1) {
+ dev_err(dev, "invalid odd divider %u for rate %lu Hz\n",
+ div, config->rate);
+ err = -EINVAL;
+ goto put;
+ }
+
+ same_freq = config->value & CLK_SOURCE_EMC_MC_EMC_SAME_FREQ;
+
+ if (same_freq != config->same_freq) {
+ dev_err(dev,
+ "ambiguous EMC to MC ratio for rate %lu Hz\n",
+ config->rate);
+ err = -EINVAL;
+ goto put;
+ }
+
+ parent = clk_hw_get_parent_by_index(hw, src);
+ config->parent = src;
+
+ if (src == CLK_SRC_PLLM || src == CLK_SRC_PLLM_UD) {
+ config->parent_rate = config->rate * (1 + div / 2);
+ } else {
+ unsigned long rate = config->rate * (1 + div / 2);
+
+ config->parent_rate = clk_hw_get_rate(parent);
+
+ if (config->parent_rate != rate) {
+ dev_err(dev,
+ "rate %lu Hz does not match input\n",
+ config->rate);
+ err = -EINVAL;
+ goto put;
+ }
+ }
+ }
+
+ emc->provider = provider;
+
+ return 0;
+
+put:
+ module_put(provider->owner);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tegra210_clk_emc_attach);
+
+void tegra210_clk_emc_detach(struct clk *clk)
+{
+ struct tegra210_clk_emc *emc = to_tegra210_clk_emc(__clk_get_hw(clk));
+
+ module_put(emc->provider->owner);
+ emc->provider = NULL;
+}
+EXPORT_SYMBOL_GPL(tegra210_clk_emc_detach);
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index defe3b7ebfa4..68cbb98af567 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -37,6 +37,7 @@
#define CLK_SOURCE_LA 0x1f8
#define CLK_SOURCE_SDMMC2 0x154
#define CLK_SOURCE_SDMMC4 0x164
+#define CLK_SOURCE_EMC_DLL 0x664
#define PLLC_BASE 0x80
#define PLLC_OUT 0x84
@@ -227,6 +228,10 @@
#define RST_DFLL_DVCO 0x2f4
#define DVFS_DFLL_RESET_SHIFT 0
+#define CLK_RST_CONTROLLER_CLK_OUT_ENB_X_SET 0x284
+#define CLK_RST_CONTROLLER_CLK_OUT_ENB_X_CLR 0x288
+#define CLK_OUT_ENB_X_CLK_ENB_EMC_DLL BIT(14)
+
#define CLK_RST_CONTROLLER_RST_DEV_Y_SET 0x2a8
#define CLK_RST_CONTROLLER_RST_DEV_Y_CLR 0x2ac
#define CPU_SOFTRST_CTRL 0x380
@@ -314,12 +319,6 @@ static unsigned long tegra210_input_freq[] = {
[8] = 12000000,
};
-static const char *mux_pllmcp_clkm[] = {
- "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb",
- "pll_p",
-};
-#define mux_pllmcp_clkm_idx NULL
-
#define PLL_ENABLE (1 << 30)
#define PLLCX_MISC1_IDDQ (1 << 27)
@@ -555,6 +554,27 @@ void tegra210_set_sata_pll_seq_sw(bool state)
}
EXPORT_SYMBOL_GPL(tegra210_set_sata_pll_seq_sw);
+void tegra210_clk_emc_dll_enable(bool flag)
+{
+ u32 offset = flag ? CLK_RST_CONTROLLER_CLK_OUT_ENB_X_SET :
+ CLK_RST_CONTROLLER_CLK_OUT_ENB_X_CLR;
+
+ writel_relaxed(CLK_OUT_ENB_X_CLK_ENB_EMC_DLL, clk_base + offset);
+}
+EXPORT_SYMBOL_GPL(tegra210_clk_emc_dll_enable);
+
+void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value)
+{
+ writel_relaxed(emc_dll_src_value, clk_base + CLK_SOURCE_EMC_DLL);
+}
+EXPORT_SYMBOL_GPL(tegra210_clk_emc_dll_update_setting);
+
+void tegra210_clk_emc_update_setting(u32 emc_src_value)
+{
+ writel_relaxed(emc_src_value, clk_base + CLK_SOURCE_EMC);
+}
+EXPORT_SYMBOL_GPL(tegra210_clk_emc_update_setting);
+
static void tegra210_generic_mbist_war(struct tegra210_domain_mbist_war *mbist)
{
u32 val;
@@ -2310,7 +2330,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_i2c2] = { .dt_id = TEGRA210_CLK_I2C2, .present = true },
[tegra_clk_uartc_8] = { .dt_id = TEGRA210_CLK_UARTC, .present = true },
[tegra_clk_mipi_cal] = { .dt_id = TEGRA210_CLK_MIPI_CAL, .present = true },
- [tegra_clk_emc] = { .dt_id = TEGRA210_CLK_EMC, .present = true },
[tegra_clk_usb2] = { .dt_id = TEGRA210_CLK_USB2, .present = true },
[tegra_clk_bsev] = { .dt_id = TEGRA210_CLK_BSEV, .present = true },
[tegra_clk_uartd_8] = { .dt_id = TEGRA210_CLK_UARTD, .present = true },
@@ -2953,6 +2972,27 @@ static const char * const sor1_parents[] = {
static u32 sor1_parents_idx[] = { 0, 2, 5, 6 };
+static const struct clk_div_table mc_div_table_tegra210[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 4 },
+ { .val = 2, .div = 1 },
+ { .val = 3, .div = 2 },
+ { .val = 0, .div = 0 },
+};
+
+static void tegra210_clk_register_mc(const char *name,
+ const char *parent_name)
+{
+ struct clk *clk;
+
+ clk = clk_register_divider_table(NULL, name, parent_name,
+ CLK_IS_CRITICAL,
+ clk_base + CLK_SOURCE_EMC,
+ 15, 2, CLK_DIVIDER_READ_ONLY,
+ mc_div_table_tegra210, &emc_lock);
+ clks[TEGRA210_CLK_MC] = clk;
+}
+
static const char * const sor1_out_parents[] = {
/*
* Bit 0 of the mux selects sor1_pad_clkout, irrespective of bit 1, so
@@ -2995,7 +3035,8 @@ static const char * const la_parents[] = {
static struct tegra_clk_periph tegra210_la =
TEGRA_CLK_PERIPH(29, 7, 9, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, 76, 0, NULL, NULL);
-static __init void tegra210_periph_clk_init(void __iomem *clk_base,
+static __init void tegra210_periph_clk_init(struct device_node *np,
+ void __iomem *clk_base,
void __iomem *pmc_base)
{
struct clk *clk;
@@ -3035,22 +3076,19 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
periph_clk_enb_refcnt);
clks[TEGRA210_CLK_DSIB] = clk;
+ /* csi_tpg */
+ clk = clk_register_gate(NULL, "csi_tpg", "pll_d",
+ CLK_SET_RATE_PARENT, clk_base + PLLD_BASE,
+ 23, 0, &pll_d_lock);
+ clk_register_clkdev(clk, "csi_tpg", NULL);
+ clks[TEGRA210_CLK_CSI_TPG] = clk;
+
/* la */
clk = tegra_clk_register_periph("la", la_parents,
ARRAY_SIZE(la_parents), &tegra210_la, clk_base,
CLK_SOURCE_LA, 0);
clks[TEGRA210_CLK_LA] = clk;
- /* emc mux */
- clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm), 0,
- clk_base + CLK_SOURCE_EMC,
- 29, 3, 0, &emc_lock);
-
- clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
- &emc_lock);
- clks[TEGRA210_CLK_MC] = clk;
-
/* cml0 */
clk = clk_register_gate(NULL, "cml0", "pll_e", 0, clk_base + PLLE_AUX,
0, 0, &pll_e_lock);
@@ -3093,6 +3131,13 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
}
tegra_periph_clk_init(clk_base, pmc_base, tegra210_clks, &pll_p_params);
+
+ /* emc */
+ clk = tegra210_clk_register_emc(np, clk_base);
+ clks[TEGRA210_CLK_EMC] = clk;
+
+ /* mc */
+ tegra210_clk_register_mc("mc", "emc");
}
static void __init tegra210_pll_init(void __iomem *clk_base,
@@ -3153,6 +3198,17 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
clk_register_clkdev(clk, "pll_m_ud", NULL);
clks[TEGRA210_CLK_PLL_M_UD] = clk;
+ /* PLLMB_UD */
+ clk = clk_register_fixed_factor(NULL, "pll_mb_ud", "pll_mb",
+ CLK_SET_RATE_PARENT, 1, 1);
+ clk_register_clkdev(clk, "pll_mb_ud", NULL);
+ clks[TEGRA210_CLK_PLL_MB_UD] = clk;
+
+ /* PLLP_UD */
+ clk = clk_register_fixed_factor(NULL, "pll_p_ud", "pll_p",
+ 0, 1, 1);
+ clks[TEGRA210_CLK_PLL_P_UD] = clk;
+
/* PLLU_VCO */
if (!tegra210_init_pllu()) {
clk = clk_register_fixed_rate(NULL, "pll_u_vco", "pll_ref", 0,
@@ -3680,7 +3736,7 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_fixed_clk_init(tegra210_clks);
tegra210_pll_init(clk_base, pmc_base);
- tegra210_periph_clk_init(clk_base, pmc_base);
+ tegra210_periph_clk_init(np, clk_base, pmc_base);
tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks,
tegra210_audio_plls,
ARRAY_SIZE(tegra210_audio_plls), 24576000);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 3255f82e61b5..37244a7e68c2 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -499,6 +499,8 @@ static struct tegra_clk_pll_params pll_x_params __ro_after_init = {
.freq_table = pll_x_freq_table,
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_DCCON |
TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .pre_rate_change = tegra_cclk_pre_pllx_rate_change,
+ .post_rate_change = tegra_cclk_post_pllx_rate_change,
};
static struct tegra_clk_pll_params pll_e_params __ro_after_init = {
@@ -926,11 +928,11 @@ static void __init tegra30_super_clk_init(void)
clk_register_clkdev(clk, "pll_p_out4_cclkg", NULL);
/* CCLKG */
- clk = tegra_clk_register_super_mux("cclk_g", cclk_g_parents,
+ clk = tegra_clk_register_super_cclk("cclk_g", cclk_g_parents,
ARRAY_SIZE(cclk_g_parents),
CLK_SET_RATE_PARENT,
clk_base + CCLKG_BURST_POLICY,
- 0, 4, 0, 0, NULL);
+ 0, NULL);
clks[TEGRA30_CLK_CCLK_G] = clk;
/*
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 2c9a68302e02..6b565f6b5f66 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -266,6 +266,10 @@ struct tegra_clk_pll;
* disabled.
* @dyn_ramp: Callback which can be used to define a custom
* dynamic ramp function for a given PLL.
+ * @pre_rate_change: Callback which is invoked just before changing
+ * PLL's rate.
+ * @post_rate_change: Callback which is invoked right after changing
+ * PLL's rate.
*
* Flags:
* TEGRA_PLL_USE_LOCK - This flag indicated to use lock bits for
@@ -342,6 +346,8 @@ struct tegra_clk_pll_params {
void (*set_defaults)(struct tegra_clk_pll *pll);
int (*dyn_ramp)(struct tegra_clk_pll *pll,
struct tegra_clk_pll_freq_table *cfg);
+ int (*pre_rate_change)(void);
+ void (*post_rate_change)(void);
};
#define TEGRA_PLL_USE_LOCK BIT(0)
@@ -729,8 +735,10 @@ struct clk *tegra_clk_register_periph_data(void __iomem *clk_base,
* TEGRA_DIVIDER_2 - LP cluster has additional divider. This flag indicates
* that this is LP cluster clock.
* TEGRA210_CPU_CLK - This flag is used to identify CPU cluster for gen5
- * super mux parent using PLLP branches. To use PLLP branches to CPU, need
- * to configure additional bit PLLP_OUT_CPU in the clock registers.
+ * super mux parent using PLLP branches. To use PLLP branches to CPU, need
+ * to configure additional bit PLLP_OUT_CPU in the clock registers.
+ * TEGRA20_SUPER_CLK - Tegra20 doesn't have a dedicated divider for Super
+ * clocks, it only has a clock-skipper.
*/
struct tegra_clk_super_mux {
struct clk_hw hw;
@@ -748,6 +756,7 @@ struct tegra_clk_super_mux {
#define TEGRA_DIVIDER_2 BIT(0)
#define TEGRA210_CPU_CLK BIT(1)
+#define TEGRA20_SUPER_CLK BIT(2)
extern const struct clk_ops tegra_clk_super_ops;
struct clk *tegra_clk_register_super_mux(const char *name,
@@ -758,6 +767,12 @@ struct clk *tegra_clk_register_super_clk(const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags, void __iomem *reg, u8 clk_super_flags,
spinlock_t *lock);
+struct clk *tegra_clk_register_super_cclk(const char *name,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags, void __iomem *reg, u8 clk_super_flags,
+ spinlock_t *lock);
+int tegra_cclk_pre_pllx_rate_change(void);
+void tegra_cclk_post_pllx_rate_change(void);
/**
* struct tegra_sdmmc_mux - switch divider with Low Jitter inputs for SDMMC
@@ -866,7 +881,7 @@ void tegra_super_clk_gen5_init(void __iomem *clk_base,
void __iomem *pmc_base, struct tegra_clk *tegra_clks,
struct tegra_clk_pll_params *pll_params);
-#ifdef CONFIG_TEGRA_CLK_EMC
+#ifdef CONFIG_TEGRA124_EMC
struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
spinlock_t *lock);
#else
@@ -907,4 +922,7 @@ void tegra_clk_periph_resume(void);
bool tegra20_clk_emc_driver_available(struct clk_hw *emc_hw);
struct clk *tegra20_clk_register_emc(void __iomem *ioaddr, bool low_jitter);
+struct clk *tegra210_clk_register_emc(struct device_node *np,
+ void __iomem *regs);
+
#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 312a20f8ec0e..a38c92153979 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -606,13 +606,13 @@ static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initcons
static const struct
omap_clkctrl_reg_data omap4_l4_secure_clkctrl_regs[] __initconst = {
- { OMAP4_AES1_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP4_AES2_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP4_DES3DES_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP4_PKA_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP4_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
- { OMAP4_SHA2MD5_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP4_CRYPTODMA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+ { OMAP4_AES1_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" },
+ { OMAP4_AES2_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" },
+ { OMAP4_DES3DES_CLKCTRL, NULL, CLKF_SW_SUP, "l4_div_ck" },
+ { OMAP4_PKA_CLKCTRL, NULL, CLKF_SW_SUP, "l4_div_ck" },
+ { OMAP4_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_div_ck" },
+ { OMAP4_SHA2MD5_CLKCTRL, NULL, CLKF_SW_SUP, "l3_div_ck" },
+ { OMAP4_CRYPTODMA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l3_div_ck" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 92bf2dda95b9..8694bc9f5fc7 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -303,13 +303,13 @@ static const struct omap_clkctrl_reg_data omap5_l4per_clkctrl_regs[] __initconst
static const struct
omap_clkctrl_reg_data omap5_l4_secure_clkctrl_regs[] __initconst = {
- { OMAP5_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "" },
- { OMAP5_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "" },
- { OMAP5_DES3DES_CLKCTRL, NULL, CLKF_HW_SUP, "" },
- { OMAP5_FPKA_CLKCTRL, NULL, CLKF_SW_SUP, "" },
- { OMAP5_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
- { OMAP5_SHA2MD5_CLKCTRL, NULL, CLKF_HW_SUP, "" },
- { OMAP5_DMA_CRYPTO_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+ { OMAP5_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { OMAP5_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { OMAP5_DES3DES_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
+ { OMAP5_FPKA_CLKCTRL, NULL, CLKF_SW_SUP, "l4_root_clk_div" },
+ { OMAP5_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_root_clk_div" },
+ { OMAP5_SHA2MD5_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
+ { OMAP5_DMA_CRYPTO_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l3_iclk_div" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 14b645093107..b4cf578a69e1 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -312,15 +312,6 @@ static const char * const dra7_gpu_hyd_mux_parents[] __initconst = {
NULL,
};
-static const char * const dra7_gpu_sys_clk_parents[] __initconst = {
- "sys_clkin",
- NULL,
-};
-
-static const struct omap_clkctrl_div_data dra7_gpu_sys_clk_data __initconst = {
- .max_div = 2,
-};
-
static const struct omap_clkctrl_bit_data dra7_gpu_core_bit_data[] __initconst = {
{ 24, TI_CLK_MUX, dra7_gpu_core_mux_parents, NULL, },
{ 26, TI_CLK_MUX, dra7_gpu_hyd_mux_parents, NULL, },
@@ -328,7 +319,7 @@ static const struct omap_clkctrl_bit_data dra7_gpu_core_bit_data[] __initconst =
};
static const struct omap_clkctrl_reg_data dra7_gpu_clkctrl_regs[] __initconst = {
- { DRA7_GPU_CLKCTRL, dra7_gpu_core_bit_data, CLKF_SW_SUP, "gpu_cm:clk:0000:24", },
+ { DRA7_GPU_CLKCTRL, dra7_gpu_core_bit_data, CLKF_SW_SUP, "gpu-clkctrl:0000:24", },
{ 0 },
};
@@ -644,7 +635,7 @@ static const struct omap_clkctrl_reg_data dra7_l4sec_clkctrl_regs[] __initconst
{ DRA7_L4SEC_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ DRA7_L4SEC_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ DRA7_L4SEC_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
+ { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l4_root_clk_div" },
{ DRA7_L4SEC_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ 0 },
};
@@ -815,7 +806,7 @@ static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initcons
{ DRA7_WKUPAON_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ DRA7_WKUPAON_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0060:24" },
{ DRA7_WKUPAON_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0068:24" },
- { DRA7_WKUPAON_ADC_CLKCTRL, NULL, CLKF_SW_SUP, "mcan_clk" },
+ { DRA7_WKUPAON_ADC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SOC_DRA76, "mcan_clk" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
index 7d215cdf9dda..9daf3825f289 100644
--- a/drivers/clk/ti/clk-816x.c
+++ b/drivers/clk/ti/clk-816x.c
@@ -73,6 +73,7 @@ static const char *enable_init_clks[] = {
"ddr_pll_clk1",
"ddr_pll_clk2",
"ddr_pll_clk3",
+ "sysclk6_ck",
};
int __init dm816x_dt_clk_init(void)
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 6a89936ba03a..eaa43575cfa5 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -196,6 +196,7 @@ cleanup:
if (!cclk->comp_clks[i])
continue;
list_del(&cclk->comp_clks[i]->link);
+ kfree(cclk->comp_clks[i]->parent_names);
kfree(cclk->comp_clks[i]);
}
diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig
index c2618f1477a2..8c1b0e8e8d32 100644
--- a/drivers/clk/versatile/Kconfig
+++ b/drivers/clk/versatile/Kconfig
@@ -1,33 +1,32 @@
# SPDX-License-Identifier: GPL-2.0-only
-config ICST
- bool
-config COMMON_CLK_VERSATILE
- bool "Clock driver for ARM Reference designs"
+menu "Clock driver for ARM Reference designs"
depends on ARCH_INTEGRATOR || ARCH_REALVIEW || \
- ARCH_VERSATILE || ARCH_VEXPRESS || ARM64 || \
- COMPILE_TEST
+ ARCH_VERSATILE || ARCH_VEXPRESS || COMPILE_TEST
+
+config ICST
+ bool "Clock driver for ARM Reference designs ICST"
select REGMAP_MMIO
---help---
Supports clocking on ARM Reference designs:
- Integrator/AP and Integrator/CP
- RealView PB1176, EB, PB11MP and PBX
- - Versatile Express
config CLK_SP810
bool "Clock driver for ARM SP810 System Controller"
- depends on COMMON_CLK_VERSATILE
- default y if ARCH_VEXPRESS
+ default y if (ARCH_VEXPRESS && ARM)
---help---
Supports clock muxing (REFCLK/TIMCLK to TIMERCLKEN0-3) capabilities
of the ARM SP810 System Controller cell.
config CLK_VEXPRESS_OSC
- bool "Clock driver for Versatile Express OSC clock generators"
- depends on COMMON_CLK_VERSATILE
+ tristate "Clock driver for Versatile Express OSC clock generators"
depends on VEXPRESS_CONFIG
+ select REGMAP_MMIO
default y if ARCH_VEXPRESS
---help---
Simple regmap-based driver driving clock generators on Versatile
Express platforms hidden behind its configuration infrastructure,
commonly known as OSCs.
+
+endmenu
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index f9f4babe3ca6..ca798249544d 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -8,7 +8,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/clk-integrator.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -20,26 +19,6 @@
#define IMPD1_OSC2 0x04
#define IMPD1_LOCK 0x08
-struct impd1_clk {
- char *pclkname;
- struct clk *pclk;
- char *vco1name;
- struct clk *vco1clk;
- char *vco2name;
- struct clk *vco2clk;
- struct clk *mmciclk;
- char *uartname;
- struct clk *uartclk;
- char *spiname;
- struct clk *spiclk;
- char *scname;
- struct clk *scclk;
- struct clk_lookup *clks[15];
-};
-
-/* One entry for each connected IM-PD1 LM */
-static struct impd1_clk impd1_clks[4];
-
/*
* There are two VCO's on the IM-PD1
*/
@@ -80,106 +59,6 @@ static const struct clk_icst_desc impd1_icst2_desc = {
.lock_offset = IMPD1_LOCK,
};
-/**
- * integrator_impd1_clk_init() - set up the integrator clock tree
- * @base: base address of the logic module (LM)
- * @id: the ID of this LM
- */
-void integrator_impd1_clk_init(void __iomem *base, unsigned int id)
-{
- struct impd1_clk *imc;
- struct clk *clk;
- struct clk *pclk;
- int i;
-
- if (id > 3) {
- pr_crit("no more than 4 LMs can be attached\n");
- return;
- }
- imc = &impd1_clks[id];
-
- /* Register the fixed rate PCLK */
- imc->pclkname = kasprintf(GFP_KERNEL, "lm%x-pclk", id);
- pclk = clk_register_fixed_rate(NULL, imc->pclkname, NULL, 0, 0);
- imc->pclk = pclk;
-
- imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id);
- clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, NULL,
- base);
- imc->vco1clk = clk;
- imc->clks[0] = clkdev_alloc(pclk, "apb_pclk", "lm%x:01000", id);
- imc->clks[1] = clkdev_alloc(clk, NULL, "lm%x:01000", id);
-
- /* VCO2 is also called "CLK2" */
- imc->vco2name = kasprintf(GFP_KERNEL, "lm%x-vco2", id);
- clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, NULL,
- base);
- imc->vco2clk = clk;
-
- /* MMCI uses CLK2 right off */
- imc->clks[2] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00700", id);
- imc->clks[3] = clkdev_alloc(clk, NULL, "lm%x:00700", id);
-
- /* UART reference clock divides CLK2 by a fixed factor 4 */
- imc->uartname = kasprintf(GFP_KERNEL, "lm%x-uartclk", id);
- clk = clk_register_fixed_factor(NULL, imc->uartname, imc->vco2name,
- CLK_IGNORE_UNUSED, 1, 4);
- imc->uartclk = clk;
- imc->clks[4] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00100", id);
- imc->clks[5] = clkdev_alloc(clk, NULL, "lm%x:00100", id);
- imc->clks[6] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00200", id);
- imc->clks[7] = clkdev_alloc(clk, NULL, "lm%x:00200", id);
-
- /* SPI PL022 clock divides CLK2 by a fixed factor 64 */
- imc->spiname = kasprintf(GFP_KERNEL, "lm%x-spiclk", id);
- clk = clk_register_fixed_factor(NULL, imc->spiname, imc->vco2name,
- CLK_IGNORE_UNUSED, 1, 64);
- imc->clks[8] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00300", id);
- imc->clks[9] = clkdev_alloc(clk, NULL, "lm%x:00300", id);
-
- /* The GPIO blocks and AACI have only PCLK */
- imc->clks[10] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00400", id);
- imc->clks[11] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00500", id);
- imc->clks[12] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00800", id);
-
- /* Smart Card clock divides CLK2 by a fixed factor 4 */
- imc->scname = kasprintf(GFP_KERNEL, "lm%x-scclk", id);
- clk = clk_register_fixed_factor(NULL, imc->scname, imc->vco2name,
- CLK_IGNORE_UNUSED, 1, 4);
- imc->scclk = clk;
- imc->clks[13] = clkdev_alloc(pclk, "apb_pclk", "lm%x:00600", id);
- imc->clks[14] = clkdev_alloc(clk, NULL, "lm%x:00600", id);
-
- for (i = 0; i < ARRAY_SIZE(imc->clks); i++)
- clkdev_add(imc->clks[i]);
-}
-EXPORT_SYMBOL_GPL(integrator_impd1_clk_init);
-
-void integrator_impd1_clk_exit(unsigned int id)
-{
- int i;
- struct impd1_clk *imc;
-
- if (id > 3)
- return;
- imc = &impd1_clks[id];
-
- for (i = 0; i < ARRAY_SIZE(imc->clks); i++)
- clkdev_drop(imc->clks[i]);
- clk_unregister(imc->spiclk);
- clk_unregister(imc->uartclk);
- clk_unregister(imc->vco2clk);
- clk_unregister(imc->vco1clk);
- clk_unregister(imc->pclk);
- kfree(imc->scname);
- kfree(imc->spiname);
- kfree(imc->uartname);
- kfree(imc->vco2name);
- kfree(imc->vco1name);
- kfree(imc->pclkname);
-}
-EXPORT_SYMBOL_GPL(integrator_impd1_clk_exit);
-
static int integrator_impd1_clk_spawn(struct device *dev,
struct device_node *parent,
struct device_node *np)
diff --git a/drivers/clk/versatile/clk-versatile.c b/drivers/clk/versatile/clk-versatile.c
index fd54d5c0251c..8ed7a179f651 100644
--- a/drivers/clk/versatile/clk-versatile.c
+++ b/drivers/clk/versatile/clk-versatile.c
@@ -56,7 +56,7 @@ static const struct clk_icst_desc versatile_auxosc_desc __initconst = {
static void __init cm_osc_setup(struct device_node *np,
const struct clk_icst_desc *desc)
{
- struct clk *clk = ERR_PTR(-EINVAL);
+ struct clk *clk;
const char *clk_name = np->name;
const char *parent_name;
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index 7ade146a3ea9..b2b32fa2d7c3 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -7,6 +7,7 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -65,8 +66,8 @@ static int vexpress_osc_probe(struct platform_device *pdev)
{
struct clk_init_data init;
struct vexpress_osc *osc;
- struct clk *clk;
u32 range[2];
+ int ret;
osc = devm_kzalloc(&pdev->dev, sizeof(*osc), GFP_KERNEL);
if (!osc)
@@ -92,11 +93,11 @@ static int vexpress_osc_probe(struct platform_device *pdev)
osc->hw.init = &init;
- clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = devm_clk_hw_register(&pdev->dev, &osc->hw);
+ if (ret < 0)
+ return ret;
- of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get, clk);
+ devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get, &osc->hw);
clk_hw_set_rate_range(&osc->hw, osc->rate_min, osc->rate_max);
dev_dbg(&pdev->dev, "Registered clock '%s'\n", init.name);
@@ -108,6 +109,7 @@ static const struct of_device_id vexpress_osc_of_match[] = {
{ .compatible = "arm,vexpress-osc", },
{}
};
+MODULE_DEVICE_TABLE(of, vexpress_osc_of_match);
static struct platform_driver vexpress_osc_driver = {
.driver = {
@@ -116,9 +118,5 @@ static struct platform_driver vexpress_osc_driver = {
},
.probe = vexpress_osc_probe,
};
-
-static int __init vexpress_osc_init(void)
-{
- return platform_driver_register(&vexpress_osc_driver);
-}
-core_initcall(vexpress_osc_init);
+module_platform_driver(vexpress_osc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/x86/Kconfig b/drivers/clk/x86/Kconfig
new file mode 100644
index 000000000000..69642e15fcc1
--- /dev/null
+++ b/drivers/clk/x86/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CLK_LGM_CGU
+ depends on OF && HAS_IOMEM && (X86 || COMPILE_TEST)
+ select OF_EARLY_FLATTREE
+ bool "Clock driver for Lightning Mountain(LGM) platform"
+ help
+ Clock Generation Unit(CGU) driver for Intel Lightning Mountain(LGM)
+ network processor SoC.
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
index e3ec81e2a1c2..7c774ea7ddeb 100644
--- a/drivers/clk/x86/Makefile
+++ b/drivers/clk/x86/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_PMC_ATOM) += clk-pmc-atom.o
obj-$(CONFIG_X86_AMD_PLATFORM_DEVICE) += clk-st.o
clk-x86-lpss-objs := clk-lpt.o
obj-$(CONFIG_X86_INTEL_LPSS) += clk-x86-lpss.o
+obj-$(CONFIG_CLK_LGM_CGU) += clk-cgu.o clk-cgu-pll.o clk-lgm.o
diff --git a/drivers/clk/x86/clk-cgu-pll.c b/drivers/clk/x86/clk-cgu-pll.c
new file mode 100644
index 000000000000..c03cc6b85b9f
--- /dev/null
+++ b/drivers/clk/x86/clk-cgu-pll.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ * Zhu YiXin <yixin.zhu@intel.com>
+ * Rahul Tanwar <rahul.tanwar@intel.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+
+#include "clk-cgu.h"
+
+#define to_lgm_clk_pll(_hw) container_of(_hw, struct lgm_clk_pll, hw)
+#define PLL_REF_DIV(x) ((x) + 0x08)
+
+/*
+ * Calculate formula:
+ * rate = (prate * mult + (prate * frac) / frac_div) / div
+ */
+static unsigned long
+lgm_pll_calc_rate(unsigned long prate, unsigned int mult,
+ unsigned int div, unsigned int frac, unsigned int frac_div)
+{
+ u64 crate, frate, rate64;
+
+ rate64 = prate;
+ crate = rate64 * mult;
+ frate = rate64 * frac;
+ do_div(frate, frac_div);
+ crate += frate;
+ do_div(crate, div);
+
+ return crate;
+}
+
+static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+ unsigned int div, mult, frac;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
+ div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
+ frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ if (pll->type == TYPE_LJPLL)
+ div *= 4;
+
+ return lgm_pll_calc_rate(prate, mult, div, frac, BIT(24));
+}
+
+static int lgm_pll_is_enabled(struct clk_hw *hw)
+{
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+ unsigned long flags;
+ unsigned int ret;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ return ret;
+}
+
+static int lgm_pll_enable(struct clk_hw *hw)
+{
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
+ ret = readl_poll_timeout_atomic(pll->membase + pll->reg,
+ val, (val & 0x1), 1, 100);
+ spin_unlock_irqrestore(&pll->lock, flags);
+
+ return ret;
+}
+
+static void lgm_pll_disable(struct clk_hw *hw)
+{
+ struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll->lock, flags);
+ lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
+ spin_unlock_irqrestore(&pll->lock, flags);
+}
+
+static const struct clk_ops lgm_pll_ops = {
+ .recalc_rate = lgm_pll_recalc_rate,
+ .is_enabled = lgm_pll_is_enabled,
+ .enable = lgm_pll_enable,
+ .disable = lgm_pll_disable,
+};
+
+static struct clk_hw *
+lgm_clk_register_pll(struct lgm_clk_provider *ctx,
+ const struct lgm_pll_clk_data *list)
+{
+ struct clk_init_data init = {};
+ struct lgm_clk_pll *pll;
+ struct device *dev = ctx->dev;
+ struct clk_hw *hw;
+ int ret;
+
+ init.ops = &lgm_pll_ops;
+ init.name = list->name;
+ init.flags = list->flags;
+ init.parent_data = list->parent_data;
+ init.num_parents = list->num_parents;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->membase = ctx->membase;
+ pll->lock = ctx->lock;
+ pll->reg = list->reg;
+ pll->flags = list->flags;
+ pll->type = list->type;
+ pll->hw.init = &init;
+
+ hw = &pll->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return hw;
+}
+
+int lgm_clk_register_plls(struct lgm_clk_provider *ctx,
+ const struct lgm_pll_clk_data *list,
+ unsigned int nr_clk)
+{
+ struct clk_hw *hw;
+ int i;
+
+ for (i = 0; i < nr_clk; i++, list++) {
+ hw = lgm_clk_register_pll(ctx, list);
+ if (IS_ERR(hw)) {
+ dev_err(ctx->dev, "failed to register pll: %s\n",
+ list->name);
+ return PTR_ERR(hw);
+ }
+ ctx->clk_data.hws[list->id] = hw;
+ }
+
+ return 0;
+}
diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
new file mode 100644
index 000000000000..56af0e04ec1e
--- /dev/null
+++ b/drivers/clk/x86/clk-cgu.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ * Zhu YiXin <yixin.zhu@intel.com>
+ * Rahul Tanwar <rahul.tanwar@intel.com>
+ */
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/of.h>
+
+#include "clk-cgu.h"
+
+#define GATE_HW_REG_STAT(reg) ((reg) + 0x0)
+#define GATE_HW_REG_EN(reg) ((reg) + 0x4)
+#define GATE_HW_REG_DIS(reg) ((reg) + 0x8)
+#define MAX_DDIV_REG 8
+#define MAX_DIVIDER_VAL 64
+
+#define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw)
+#define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw)
+#define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw)
+#define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw)
+
+static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+{
+ unsigned long flags;
+
+ if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
+ spin_lock_irqsave(&ctx->lock, flags);
+ lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
+ list->div_width, list->div_val);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ }
+
+ return clk_hw_register_fixed_rate(NULL, list->name,
+ list->parent_data[0].name,
+ list->flags, list->mux_flags);
+}
+
+static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&mux->lock, flags);
+ if (mux->flags & MUX_CLK_SW)
+ val = mux->reg;
+ else
+ val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
+ mux->width);
+ spin_unlock_irqrestore(&mux->lock, flags);
+ return clk_mux_val_to_index(hw, NULL, mux->flags, val);
+}
+
+static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
+ unsigned long flags;
+ u32 val;
+
+ val = clk_mux_index_to_val(NULL, mux->flags, index);
+ spin_lock_irqsave(&mux->lock, flags);
+ if (mux->flags & MUX_CLK_SW)
+ mux->reg = val;
+ else
+ lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
+ mux->width, val);
+ spin_unlock_irqrestore(&mux->lock, flags);
+
+ return 0;
+}
+
+static int lgm_clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
+
+ return clk_mux_determine_rate_flags(hw, req, mux->flags);
+}
+
+static const struct clk_ops lgm_clk_mux_ops = {
+ .get_parent = lgm_clk_mux_get_parent,
+ .set_parent = lgm_clk_mux_set_parent,
+ .determine_rate = lgm_clk_mux_determine_rate,
+};
+
+static struct clk_hw *
+lgm_clk_register_mux(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+{
+ unsigned long flags, cflags = list->mux_flags;
+ struct device *dev = ctx->dev;
+ u8 shift = list->mux_shift;
+ u8 width = list->mux_width;
+ struct clk_init_data init = {};
+ struct lgm_clk_mux *mux;
+ u32 reg = list->mux_off;
+ struct clk_hw *hw;
+ int ret;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = list->name;
+ init.ops = &lgm_clk_mux_ops;
+ init.flags = list->flags;
+ init.parent_data = list->parent_data;
+ init.num_parents = list->num_parents;
+
+ mux->membase = ctx->membase;
+ mux->lock = ctx->lock;
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->width = width;
+ mux->flags = cflags;
+ mux->hw.init = &init;
+
+ hw = &mux->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (cflags & CLOCK_FLAG_VAL_INIT) {
+ spin_lock_irqsave(&mux->lock, flags);
+ lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
+ spin_unlock_irqrestore(&mux->lock, flags);
+ }
+
+ return hw;
+}
+
+static unsigned long
+lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&divider->lock, flags);
+ val = lgm_get_clk_val(divider->membase, divider->reg,
+ divider->shift, divider->width);
+ spin_unlock_irqrestore(&divider->lock, flags);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static long
+lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
+
+ return divider_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags);
+}
+
+static int
+lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
+ unsigned long flags;
+ int value;
+
+ value = divider_get_val(rate, prate, divider->table,
+ divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ spin_lock_irqsave(&divider->lock, flags);
+ lgm_set_clk_val(divider->membase, divider->reg,
+ divider->shift, divider->width, value);
+ spin_unlock_irqrestore(&divider->lock, flags);
+
+ return 0;
+}
+
+static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
+{
+ struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&div->lock, flags);
+ lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
+ div->width_gate, enable);
+ spin_unlock_irqrestore(&div->lock, flags);
+ return 0;
+}
+
+static int lgm_clk_divider_enable(struct clk_hw *hw)
+{
+ return lgm_clk_divider_enable_disable(hw, 1);
+}
+
+static void lgm_clk_divider_disable(struct clk_hw *hw)
+{
+ lgm_clk_divider_enable_disable(hw, 0);
+}
+
+static const struct clk_ops lgm_clk_divider_ops = {
+ .recalc_rate = lgm_clk_divider_recalc_rate,
+ .round_rate = lgm_clk_divider_round_rate,
+ .set_rate = lgm_clk_divider_set_rate,
+ .enable = lgm_clk_divider_enable,
+ .disable = lgm_clk_divider_disable,
+};
+
+static struct clk_hw *
+lgm_clk_register_divider(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+{
+ unsigned long flags, cflags = list->div_flags;
+ struct device *dev = ctx->dev;
+ struct lgm_clk_divider *div;
+ struct clk_init_data init = {};
+ u8 shift = list->div_shift;
+ u8 width = list->div_width;
+ u8 shift_gate = list->div_shift_gate;
+ u8 width_gate = list->div_width_gate;
+ u32 reg = list->div_off;
+ struct clk_hw *hw;
+ int ret;
+
+ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = list->name;
+ init.ops = &lgm_clk_divider_ops;
+ init.flags = list->flags;
+ init.parent_data = list->parent_data;
+ init.num_parents = 1;
+
+ div->membase = ctx->membase;
+ div->lock = ctx->lock;
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->shift_gate = shift_gate;
+ div->width_gate = width_gate;
+ div->flags = cflags;
+ div->table = list->div_table;
+ div->hw.init = &init;
+
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (cflags & CLOCK_FLAG_VAL_INIT) {
+ spin_lock_irqsave(&div->lock, flags);
+ lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
+ spin_unlock_irqrestore(&div->lock, flags);
+ }
+
+ return hw;
+}
+
+static struct clk_hw *
+lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+{
+ unsigned long flags;
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
+ list->parent_data[0].name, list->flags,
+ list->mult, list->div);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+
+ if (list->div_flags & CLOCK_FLAG_VAL_INIT) {
+ spin_lock_irqsave(&ctx->lock, flags);
+ lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
+ list->div_width, list->div_val);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ }
+
+ return hw;
+}
+
+static int lgm_clk_gate_enable(struct clk_hw *hw)
+{
+ struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+ unsigned long flags;
+ unsigned int reg;
+
+ spin_lock_irqsave(&gate->lock, flags);
+ reg = GATE_HW_REG_EN(gate->reg);
+ lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
+ spin_unlock_irqrestore(&gate->lock, flags);
+
+ return 0;
+}
+
+static void lgm_clk_gate_disable(struct clk_hw *hw)
+{
+ struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+ unsigned long flags;
+ unsigned int reg;
+
+ spin_lock_irqsave(&gate->lock, flags);
+ reg = GATE_HW_REG_DIS(gate->reg);
+ lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
+ spin_unlock_irqrestore(&gate->lock, flags);
+}
+
+static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
+{
+ struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
+ unsigned int reg, ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gate->lock, flags);
+ reg = GATE_HW_REG_STAT(gate->reg);
+ ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
+ spin_unlock_irqrestore(&gate->lock, flags);
+
+ return ret;
+}
+
+static const struct clk_ops lgm_clk_gate_ops = {
+ .enable = lgm_clk_gate_enable,
+ .disable = lgm_clk_gate_disable,
+ .is_enabled = lgm_clk_gate_is_enabled,
+};
+
+static struct clk_hw *
+lgm_clk_register_gate(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list)
+{
+ unsigned long flags, cflags = list->gate_flags;
+ const char *pname = list->parent_data[0].name;
+ struct device *dev = ctx->dev;
+ u8 shift = list->gate_shift;
+ struct clk_init_data init = {};
+ struct lgm_clk_gate *gate;
+ u32 reg = list->gate_off;
+ struct clk_hw *hw;
+ int ret;
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = list->name;
+ init.ops = &lgm_clk_gate_ops;
+ init.flags = list->flags;
+ init.parent_names = pname ? &pname : NULL;
+ init.num_parents = pname ? 1 : 0;
+
+ gate->membase = ctx->membase;
+ gate->lock = ctx->lock;
+ gate->reg = reg;
+ gate->shift = shift;
+ gate->flags = cflags;
+ gate->hw.init = &init;
+
+ hw = &gate->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (cflags & CLOCK_FLAG_VAL_INIT) {
+ spin_lock_irqsave(&gate->lock, flags);
+ lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
+ spin_unlock_irqrestore(&gate->lock, flags);
+ }
+
+ return hw;
+}
+
+int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list,
+ unsigned int nr_clk)
+{
+ struct clk_hw *hw;
+ unsigned int idx;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ switch (list->type) {
+ case CLK_TYPE_FIXED:
+ hw = lgm_clk_register_fixed(ctx, list);
+ break;
+ case CLK_TYPE_MUX:
+ hw = lgm_clk_register_mux(ctx, list);
+ break;
+ case CLK_TYPE_DIVIDER:
+ hw = lgm_clk_register_divider(ctx, list);
+ break;
+ case CLK_TYPE_FIXED_FACTOR:
+ hw = lgm_clk_register_fixed_factor(ctx, list);
+ break;
+ case CLK_TYPE_GATE:
+ hw = lgm_clk_register_gate(ctx, list);
+ break;
+ default:
+ dev_err(ctx->dev, "invalid clk type\n");
+ return -EINVAL;
+ }
+
+ if (IS_ERR(hw)) {
+ dev_err(ctx->dev,
+ "register clk: %s, type: %u failed!\n",
+ list->name, list->type);
+ return -EIO;
+ }
+ ctx->clk_data.hws[list->id] = hw;
+ }
+
+ return 0;
+}
+
+static unsigned long
+lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ unsigned int div0, div1, exdiv;
+ unsigned long flags;
+ u64 prate;
+
+ spin_lock_irqsave(&ddiv->lock, flags);
+ div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
+ ddiv->shift0, ddiv->width0) + 1;
+ div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
+ ddiv->shift1, ddiv->width1) + 1;
+ exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
+ ddiv->shift2, ddiv->width2);
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+
+ prate = (u64)parent_rate;
+ do_div(prate, div0);
+ do_div(prate, div1);
+
+ if (exdiv) {
+ do_div(prate, ddiv->div);
+ prate *= ddiv->mult;
+ }
+
+ return prate;
+}
+
+static int lgm_clk_ddiv_enable(struct clk_hw *hw)
+{
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ddiv->lock, flags);
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
+ ddiv->width_gate, 1);
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+ return 0;
+}
+
+static void lgm_clk_ddiv_disable(struct clk_hw *hw)
+{
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ddiv->lock, flags);
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
+ ddiv->width_gate, 0);
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+}
+
+static int
+lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2)
+{
+ u32 idx, temp;
+
+ *ddiv1 = 1;
+ *ddiv2 = 1;
+
+ if (div > MAX_DIVIDER_VAL)
+ div = MAX_DIVIDER_VAL;
+
+ if (div > 1) {
+ for (idx = 2; idx <= MAX_DDIV_REG; idx++) {
+ temp = DIV_ROUND_UP_ULL((u64)div, idx);
+ if (div % idx == 0 && temp <= MAX_DDIV_REG)
+ break;
+ }
+
+ if (idx > MAX_DDIV_REG)
+ return -EINVAL;
+
+ *ddiv1 = temp;
+ *ddiv2 = idx;
+ }
+
+ return 0;
+}
+
+static int
+lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ u32 div, ddiv1, ddiv2;
+ unsigned long flags;
+
+ div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
+
+ spin_lock_irqsave(&ddiv->lock, flags);
+ if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
+ div = div * 2;
+ }
+
+ if (div <= 0) {
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+ return -EINVAL;
+ }
+
+ if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2)) {
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+ return -EINVAL;
+ }
+
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
+ ddiv1 - 1);
+
+ lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1,
+ ddiv2 - 1);
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+
+ return 0;
+}
+
+static long
+lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
+ u32 div, ddiv1, ddiv2;
+ unsigned long flags;
+ u64 rate64;
+
+ div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
+
+ /* if predivide bit is enabled, modify div by factor of 2.5 */
+ spin_lock_irqsave(&ddiv->lock, flags);
+ if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ div = div * 2;
+ div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
+ }
+
+ if (div <= 0) {
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+ return *prate;
+ }
+
+ if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) {
+ if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) {
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+ return -EINVAL;
+ }
+ }
+
+ rate64 = *prate;
+ do_div(rate64, ddiv1);
+ do_div(rate64, ddiv2);
+
+ /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
+ if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
+ rate64 = rate64 * 2;
+ rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
+ }
+ spin_unlock_irqrestore(&ddiv->lock, flags);
+
+ return rate64;
+}
+
+static const struct clk_ops lgm_clk_ddiv_ops = {
+ .recalc_rate = lgm_clk_ddiv_recalc_rate,
+ .enable = lgm_clk_ddiv_enable,
+ .disable = lgm_clk_ddiv_disable,
+ .set_rate = lgm_clk_ddiv_set_rate,
+ .round_rate = lgm_clk_ddiv_round_rate,
+};
+
+int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_ddiv_data *list,
+ unsigned int nr_clk)
+{
+ struct device *dev = ctx->dev;
+ struct clk_init_data init = {};
+ struct lgm_clk_ddiv *ddiv;
+ struct clk_hw *hw;
+ unsigned int idx;
+ int ret;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ ddiv = NULL;
+ ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
+ if (!ddiv)
+ return -ENOMEM;
+
+ memset(&init, 0, sizeof(init));
+ init.name = list->name;
+ init.ops = &lgm_clk_ddiv_ops;
+ init.flags = list->flags;
+ init.parent_data = list->parent_data;
+ init.num_parents = 1;
+
+ ddiv->membase = ctx->membase;
+ ddiv->lock = ctx->lock;
+ ddiv->reg = list->reg;
+ ddiv->shift0 = list->shift0;
+ ddiv->width0 = list->width0;
+ ddiv->shift1 = list->shift1;
+ ddiv->width1 = list->width1;
+ ddiv->shift_gate = list->shift_gate;
+ ddiv->width_gate = list->width_gate;
+ ddiv->shift2 = list->ex_shift;
+ ddiv->width2 = list->ex_width;
+ ddiv->flags = list->div_flags;
+ ddiv->mult = 2;
+ ddiv->div = 5;
+ ddiv->hw.init = &init;
+
+ hw = &ddiv->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "register clk: %s failed!\n", list->name);
+ return ret;
+ }
+ ctx->clk_data.hws[list->id] = hw;
+ }
+
+ return 0;
+}
diff --git a/drivers/clk/x86/clk-cgu.h b/drivers/clk/x86/clk-cgu.h
new file mode 100644
index 000000000000..4e22bfb22312
--- /dev/null
+++ b/drivers/clk/x86/clk-cgu.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ * Zhu YiXin <yixin.zhu@intel.com>
+ * Rahul Tanwar <rahul.tanwar@intel.com>
+ */
+
+#ifndef __CLK_CGU_H
+#define __CLK_CGU_H
+
+#include <linux/io.h>
+
+struct lgm_clk_mux {
+ struct clk_hw hw;
+ void __iomem *membase;
+ unsigned int reg;
+ u8 shift;
+ u8 width;
+ unsigned long flags;
+ spinlock_t lock;
+};
+
+struct lgm_clk_divider {
+ struct clk_hw hw;
+ void __iomem *membase;
+ unsigned int reg;
+ u8 shift;
+ u8 width;
+ u8 shift_gate;
+ u8 width_gate;
+ unsigned long flags;
+ const struct clk_div_table *table;
+ spinlock_t lock;
+};
+
+struct lgm_clk_ddiv {
+ struct clk_hw hw;
+ void __iomem *membase;
+ unsigned int reg;
+ u8 shift0;
+ u8 width0;
+ u8 shift1;
+ u8 width1;
+ u8 shift2;
+ u8 width2;
+ u8 shift_gate;
+ u8 width_gate;
+ unsigned int mult;
+ unsigned int div;
+ unsigned long flags;
+ spinlock_t lock;
+};
+
+struct lgm_clk_gate {
+ struct clk_hw hw;
+ void __iomem *membase;
+ unsigned int reg;
+ u8 shift;
+ unsigned long flags;
+ spinlock_t lock;
+};
+
+enum lgm_clk_type {
+ CLK_TYPE_FIXED,
+ CLK_TYPE_MUX,
+ CLK_TYPE_DIVIDER,
+ CLK_TYPE_FIXED_FACTOR,
+ CLK_TYPE_GATE,
+ CLK_TYPE_NONE,
+};
+
+/**
+ * struct lgm_clk_provider
+ * @membase: IO mem base address for CGU.
+ * @np: device node
+ * @dev: device
+ * @clk_data: array of hw clocks and clk number.
+ */
+struct lgm_clk_provider {
+ void __iomem *membase;
+ struct device_node *np;
+ struct device *dev;
+ struct clk_hw_onecell_data clk_data;
+ spinlock_t lock;
+};
+
+enum pll_type {
+ TYPE_ROPLL,
+ TYPE_LJPLL,
+ TYPE_NONE,
+};
+
+struct lgm_clk_pll {
+ struct clk_hw hw;
+ void __iomem *membase;
+ unsigned int reg;
+ unsigned long flags;
+ enum pll_type type;
+ spinlock_t lock;
+};
+
+/**
+ * struct lgm_pll_clk_data
+ * @id: platform specific id of the clock.
+ * @name: name of this pll clock.
+ * @parent_data: parent clock data.
+ * @num_parents: number of parents.
+ * @flags: optional flags for basic clock.
+ * @type: platform type of pll.
+ * @reg: offset of the register.
+ */
+struct lgm_pll_clk_data {
+ unsigned int id;
+ const char *name;
+ const struct clk_parent_data *parent_data;
+ u8 num_parents;
+ unsigned long flags;
+ enum pll_type type;
+ int reg;
+};
+
+#define LGM_PLL(_id, _name, _pdata, _flags, \
+ _reg, _type) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_data = _pdata, \
+ .num_parents = ARRAY_SIZE(_pdata), \
+ .flags = _flags, \
+ .reg = _reg, \
+ .type = _type, \
+ }
+
+struct lgm_clk_ddiv_data {
+ unsigned int id;
+ const char *name;
+ const struct clk_parent_data *parent_data;
+ u8 flags;
+ unsigned long div_flags;
+ unsigned int reg;
+ u8 shift0;
+ u8 width0;
+ u8 shift1;
+ u8 width1;
+ u8 shift_gate;
+ u8 width_gate;
+ u8 ex_shift;
+ u8 ex_width;
+};
+
+#define LGM_DDIV(_id, _name, _pname, _flags, _reg, \
+ _shft0, _wdth0, _shft1, _wdth1, \
+ _shft_gate, _wdth_gate, _xshft, _df) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = _pname, \
+ .name = _pname, \
+ }, \
+ .flags = _flags, \
+ .reg = _reg, \
+ .shift0 = _shft0, \
+ .width0 = _wdth0, \
+ .shift1 = _shft1, \
+ .width1 = _wdth1, \
+ .shift_gate = _shft_gate, \
+ .width_gate = _wdth_gate, \
+ .ex_shift = _xshft, \
+ .ex_width = 1, \
+ .div_flags = _df, \
+ }
+
+struct lgm_clk_branch {
+ unsigned int id;
+ enum lgm_clk_type type;
+ const char *name;
+ const struct clk_parent_data *parent_data;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned int mux_off;
+ u8 mux_shift;
+ u8 mux_width;
+ unsigned long mux_flags;
+ unsigned int mux_val;
+ unsigned int div_off;
+ u8 div_shift;
+ u8 div_width;
+ u8 div_shift_gate;
+ u8 div_width_gate;
+ unsigned long div_flags;
+ unsigned int div_val;
+ const struct clk_div_table *div_table;
+ unsigned int gate_off;
+ u8 gate_shift;
+ unsigned long gate_flags;
+ unsigned int gate_val;
+ unsigned int mult;
+ unsigned int div;
+};
+
+/* clock flags definition */
+#define CLOCK_FLAG_VAL_INIT BIT(16)
+#define MUX_CLK_SW BIT(17)
+
+#define LGM_MUX(_id, _name, _pdata, _f, _reg, \
+ _shift, _width, _cf, _v) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_MUX, \
+ .name = _name, \
+ .parent_data = _pdata, \
+ .num_parents = ARRAY_SIZE(_pdata), \
+ .flags = _f, \
+ .mux_off = _reg, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .mux_flags = _cf, \
+ .mux_val = _v, \
+ }
+
+#define LGM_DIV(_id, _name, _pname, _f, _reg, _shift, _width, \
+ _shift_gate, _width_gate, _cf, _v, _dtable) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_DIVIDER, \
+ .name = _name, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = _pname, \
+ .name = _pname, \
+ }, \
+ .num_parents = 1, \
+ .flags = _f, \
+ .div_off = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+ .div_shift_gate = _shift_gate, \
+ .div_width_gate = _width_gate, \
+ .div_flags = _cf, \
+ .div_val = _v, \
+ .div_table = _dtable, \
+ }
+
+#define LGM_GATE(_id, _name, _pname, _f, _reg, \
+ _shift, _cf, _v) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_GATE, \
+ .name = _name, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = _pname, \
+ .name = _pname, \
+ }, \
+ .num_parents = !_pname ? 0 : 1, \
+ .flags = _f, \
+ .gate_off = _reg, \
+ .gate_shift = _shift, \
+ .gate_flags = _cf, \
+ .gate_val = _v, \
+ }
+
+#define LGM_FIXED(_id, _name, _pname, _f, _reg, \
+ _shift, _width, _cf, _freq, _v) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_FIXED, \
+ .name = _name, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = _pname, \
+ .name = _pname, \
+ }, \
+ .num_parents = !_pname ? 0 : 1, \
+ .flags = _f, \
+ .div_off = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+ .div_flags = _cf, \
+ .div_val = _v, \
+ .mux_flags = _freq, \
+ }
+
+#define LGM_FIXED_FACTOR(_id, _name, _pname, _f, _reg, \
+ _shift, _width, _cf, _v, _m, _d) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_FIXED_FACTOR, \
+ .name = _name, \
+ .parent_data = &(const struct clk_parent_data){ \
+ .fw_name = _pname, \
+ .name = _pname, \
+ }, \
+ .num_parents = 1, \
+ .flags = _f, \
+ .div_off = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+ .div_flags = _cf, \
+ .div_val = _v, \
+ .mult = _m, \
+ .div = _d, \
+ }
+
+static inline void lgm_set_clk_val(void __iomem *membase, u32 reg,
+ u8 shift, u8 width, u32 set_val)
+{
+ u32 mask = (GENMASK(width - 1, 0) << shift);
+ u32 regval;
+
+ regval = readl(membase + reg);
+ regval = (regval & ~mask) | ((set_val << shift) & mask);
+ writel(regval, membase + reg);
+}
+
+static inline u32 lgm_get_clk_val(void __iomem *membase, u32 reg,
+ u8 shift, u8 width)
+{
+ u32 mask = (GENMASK(width - 1, 0) << shift);
+ u32 val;
+
+ val = readl(membase + reg);
+ val = (val & mask) >> shift;
+
+ return val;
+}
+
+int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_branch *list,
+ unsigned int nr_clk);
+int lgm_clk_register_plls(struct lgm_clk_provider *ctx,
+ const struct lgm_pll_clk_data *list,
+ unsigned int nr_clk);
+int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
+ const struct lgm_clk_ddiv_data *list,
+ unsigned int nr_clk);
+#endif /* __CLK_CGU_H */
diff --git a/drivers/clk/x86/clk-lgm.c b/drivers/clk/x86/clk-lgm.c
new file mode 100644
index 000000000000..020f4e83a5cc
--- /dev/null
+++ b/drivers/clk/x86/clk-lgm.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ * Zhu YiXin <yixin.zhu@intel.com>
+ * Rahul Tanwar <rahul.tanwar@intel.com>
+ */
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/intel,lgm-clk.h>
+#include "clk-cgu.h"
+
+#define PLL_DIV_WIDTH 4
+#define PLL_DDIV_WIDTH 3
+
+/* Gate0 clock shift */
+#define G_C55_SHIFT 7
+#define G_QSPI_SHIFT 9
+#define G_EIP197_SHIFT 11
+#define G_VAULT130_SHIFT 12
+#define G_TOE_SHIFT 13
+#define G_SDXC_SHIFT 14
+#define G_EMMC_SHIFT 15
+#define G_SPIDBG_SHIFT 17
+#define G_DMA3_SHIFT 28
+
+/* Gate1 clock shift */
+#define G_DMA0_SHIFT 0
+#define G_LEDC0_SHIFT 1
+#define G_LEDC1_SHIFT 2
+#define G_I2S0_SHIFT 3
+#define G_I2S1_SHIFT 4
+#define G_EBU_SHIFT 5
+#define G_PWM_SHIFT 6
+#define G_I2C0_SHIFT 7
+#define G_I2C1_SHIFT 8
+#define G_I2C2_SHIFT 9
+#define G_I2C3_SHIFT 10
+
+#define G_SSC0_SHIFT 12
+#define G_SSC1_SHIFT 13
+#define G_SSC2_SHIFT 14
+#define G_SSC3_SHIFT 15
+
+#define G_GPTC0_SHIFT 17
+#define G_GPTC1_SHIFT 18
+#define G_GPTC2_SHIFT 19
+#define G_GPTC3_SHIFT 20
+
+#define G_ASC0_SHIFT 22
+#define G_ASC1_SHIFT 23
+#define G_ASC2_SHIFT 24
+#define G_ASC3_SHIFT 25
+
+#define G_PCM0_SHIFT 27
+#define G_PCM1_SHIFT 28
+#define G_PCM2_SHIFT 29
+
+/* Gate2 clock shift */
+#define G_PCIE10_SHIFT 1
+#define G_PCIE11_SHIFT 2
+#define G_PCIE30_SHIFT 3
+#define G_PCIE31_SHIFT 4
+#define G_PCIE20_SHIFT 5
+#define G_PCIE21_SHIFT 6
+#define G_PCIE40_SHIFT 7
+#define G_PCIE41_SHIFT 8
+
+#define G_XPCS0_SHIFT 10
+#define G_XPCS1_SHIFT 11
+#define G_XPCS2_SHIFT 12
+#define G_XPCS3_SHIFT 13
+#define G_SATA0_SHIFT 14
+#define G_SATA1_SHIFT 15
+#define G_SATA2_SHIFT 16
+#define G_SATA3_SHIFT 17
+
+/* Gate3 clock shift */
+#define G_ARCEM4_SHIFT 0
+#define G_IDMAR1_SHIFT 2
+#define G_IDMAT0_SHIFT 3
+#define G_IDMAT1_SHIFT 4
+#define G_IDMAT2_SHIFT 5
+
+#define G_PPV4_SHIFT 8
+#define G_GSWIPO_SHIFT 9
+#define G_CQEM_SHIFT 10
+#define G_XPCS5_SHIFT 14
+#define G_USB1_SHIFT 25
+#define G_USB2_SHIFT 26
+
+
+/* Register definition */
+#define CGU_PLL0CZ_CFG0 0x000
+#define CGU_PLL0CM0_CFG0 0x020
+#define CGU_PLL0CM1_CFG0 0x040
+#define CGU_PLL0B_CFG0 0x060
+#define CGU_PLL1_CFG0 0x080
+#define CGU_PLL2_CFG0 0x0A0
+#define CGU_PLLPP_CFG0 0x0C0
+#define CGU_LJPLL3_CFG0 0x0E0
+#define CGU_LJPLL4_CFG0 0x100
+#define CGU_C55_PCMCR 0x18C
+#define CGU_PCMCR 0x190
+#define CGU_IF_CLK1 0x1A0
+#define CGU_IF_CLK2 0x1A4
+#define CGU_GATE0 0x300
+#define CGU_GATE1 0x310
+#define CGU_GATE2 0x320
+#define CGU_GATE3 0x310
+
+#define PLL_DIV(x) ((x) + 0x04)
+#define PLL_SSC(x) ((x) + 0x10)
+
+#define CLK_NR_CLKS (LGM_GCLK_USB2 + 1)
+
+/*
+ * Below table defines the pair's of regval & effective dividers.
+ * It's more efficient to provide an explicit table due to non-linear
+ * relation between values.
+ */
+static const struct clk_div_table pll_div[] = {
+ { .val = 0, .div = 1 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 3 },
+ { .val = 3, .div = 4 },
+ { .val = 4, .div = 5 },
+ { .val = 5, .div = 6 },
+ { .val = 6, .div = 8 },
+ { .val = 7, .div = 10 },
+ { .val = 8, .div = 12 },
+ { .val = 9, .div = 16 },
+ { .val = 10, .div = 20 },
+ { .val = 11, .div = 24 },
+ { .val = 12, .div = 32 },
+ { .val = 13, .div = 40 },
+ { .val = 14, .div = 48 },
+ { .val = 15, .div = 64 },
+ {}
+};
+
+static const struct clk_div_table dcl_div[] = {
+ { .val = 0, .div = 6 },
+ { .val = 1, .div = 12 },
+ { .val = 2, .div = 24 },
+ { .val = 3, .div = 32 },
+ { .val = 4, .div = 48 },
+ { .val = 5, .div = 96 },
+ {}
+};
+
+static const struct clk_parent_data pll_p[] = {
+ { .fw_name = "osc", .name = "osc" },
+};
+static const struct clk_parent_data pllcm_p[] = {
+ { .fw_name = "cpu_cm", .name = "cpu_cm" },
+};
+static const struct clk_parent_data emmc_p[] = {
+ { .fw_name = "emmc4", .name = "emmc4" },
+ { .fw_name = "noc4", .name = "noc4" },
+};
+static const struct clk_parent_data sdxc_p[] = {
+ { .fw_name = "sdxc3", .name = "sdxc3" },
+ { .fw_name = "sdxc2", .name = "sdxc2" },
+};
+static const struct clk_parent_data pcm_p[] = {
+ { .fw_name = "v_docsis", .name = "v_docsis" },
+ { .fw_name = "dcl", .name = "dcl" },
+};
+static const struct clk_parent_data cbphy_p[] = {
+ { .fw_name = "dd_serdes", .name = "dd_serdes" },
+ { .fw_name = "dd_pcie", .name = "dd_pcie" },
+};
+
+static const struct lgm_pll_clk_data lgm_pll_clks[] = {
+ LGM_PLL(LGM_CLK_PLL0CZ, "pll0cz", pll_p, CLK_IGNORE_UNUSED,
+ CGU_PLL0CZ_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLL0CM0, "pll0cm0", pllcm_p, CLK_IGNORE_UNUSED,
+ CGU_PLL0CM0_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLL0CM1, "pll0cm1", pllcm_p, CLK_IGNORE_UNUSED,
+ CGU_PLL0CM1_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLL0B, "pll0b", pll_p, CLK_IGNORE_UNUSED,
+ CGU_PLL0B_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLL1, "pll1", pll_p, 0, CGU_PLL1_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLL2, "pll2", pll_p, CLK_IGNORE_UNUSED,
+ CGU_PLL2_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_PLLPP, "pllpp", pll_p, 0, CGU_PLLPP_CFG0, TYPE_ROPLL),
+ LGM_PLL(LGM_CLK_LJPLL3, "ljpll3", pll_p, 0, CGU_LJPLL3_CFG0, TYPE_LJPLL),
+ LGM_PLL(LGM_CLK_LJPLL4, "ljpll4", pll_p, 0, CGU_LJPLL4_CFG0, TYPE_LJPLL),
+};
+
+static const struct lgm_clk_branch lgm_branch_clks[] = {
+ LGM_DIV(LGM_CLK_PP_HW, "pp_hw", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_PP_UC, "pp_uc", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0),
+ 4, PLL_DIV_WIDTH, 25, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_PP_FXD, "pp_fxd", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0),
+ 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_PP_TBM, "pp_tbm", "pllpp", 0, PLL_DIV(CGU_PLLPP_CFG0),
+ 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_DDR, "ddr", "pll2", CLK_IGNORE_UNUSED,
+ PLL_DIV(CGU_PLL2_CFG0), 0, PLL_DIV_WIDTH, 24, 1, 0, 0,
+ pll_div),
+ LGM_DIV(LGM_CLK_CM, "cpu_cm", "pll0cz", 0, PLL_DIV(CGU_PLL0CZ_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+
+ LGM_DIV(LGM_CLK_IC, "cpu_ic", "pll0cz", CLK_IGNORE_UNUSED,
+ PLL_DIV(CGU_PLL0CZ_CFG0), 4, PLL_DIV_WIDTH, 25,
+ 1, 0, 0, pll_div),
+
+ LGM_DIV(LGM_CLK_SDXC3, "sdxc3", "pll0cz", 0, PLL_DIV(CGU_PLL0CZ_CFG0),
+ 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div),
+
+ LGM_DIV(LGM_CLK_CPU0, "cm0", "pll0cm0",
+ CLK_IGNORE_UNUSED, PLL_DIV(CGU_PLL0CM0_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_CPU1, "cm1", "pll0cm1",
+ CLK_IGNORE_UNUSED, PLL_DIV(CGU_PLL0CM1_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+
+ /*
+ * Marking ngi_clk (next generation interconnect) and noc_clk
+ * (network on chip peripheral clk) as critical clocks because
+ * these are shared parent clock sources for many different
+ * peripherals.
+ */
+ LGM_DIV(LGM_CLK_NGI, "ngi", "pll0b",
+ (CLK_IGNORE_UNUSED|CLK_IS_CRITICAL), PLL_DIV(CGU_PLL0B_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_NOC4, "noc4", "pll0b",
+ (CLK_IGNORE_UNUSED|CLK_IS_CRITICAL), PLL_DIV(CGU_PLL0B_CFG0),
+ 4, PLL_DIV_WIDTH, 25, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_SW, "switch", "pll0b", 0, PLL_DIV(CGU_PLL0B_CFG0),
+ 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_QSPI, "qspi", "pll0b", 0, PLL_DIV(CGU_PLL0B_CFG0),
+ 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_CT, "v_ct", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0),
+ 0, PLL_DIV_WIDTH, 24, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_DSP, "v_dsp", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0),
+ 8, PLL_DIV_WIDTH, 26, 1, 0, 0, pll_div),
+ LGM_DIV(LGM_CLK_VIF, "v_ifclk", "pll1", 0, PLL_DIV(CGU_PLL1_CFG0),
+ 12, PLL_DIV_WIDTH, 27, 1, 0, 0, pll_div),
+
+ LGM_FIXED_FACTOR(LGM_CLK_EMMC4, "emmc4", "sdxc3", 0, 0,
+ 0, 0, 0, 0, 1, 4),
+ LGM_FIXED_FACTOR(LGM_CLK_SDXC2, "sdxc2", "noc4", 0, 0,
+ 0, 0, 0, 0, 1, 4),
+ LGM_MUX(LGM_CLK_EMMC, "emmc", emmc_p, 0, CGU_IF_CLK1,
+ 0, 1, CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_MUX(LGM_CLK_SDXC, "sdxc", sdxc_p, 0, CGU_IF_CLK1,
+ 1, 1, CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_FIXED(LGM_CLK_OSC, "osc", NULL, 0, 0, 0, 0, 0, 40000000, 0),
+ LGM_FIXED(LGM_CLK_SLIC, "slic", NULL, 0, CGU_IF_CLK1,
+ 8, 2, CLOCK_FLAG_VAL_INIT, 8192000, 2),
+ LGM_FIXED(LGM_CLK_DOCSIS, "v_docsis", NULL, 0, 0, 0, 0, 0, 16000000, 0),
+ LGM_DIV(LGM_CLK_DCL, "dcl", "v_ifclk", 0, CGU_PCMCR,
+ 25, 3, 0, 0, 0, 0, dcl_div),
+ LGM_MUX(LGM_CLK_PCM, "pcm", pcm_p, 0, CGU_C55_PCMCR,
+ 0, 1, CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_FIXED_FACTOR(LGM_CLK_DDR_PHY, "ddr_phy", "ddr",
+ CLK_IGNORE_UNUSED, 0,
+ 0, 0, 0, 0, 2, 1),
+ LGM_FIXED_FACTOR(LGM_CLK_PONDEF, "pondef", "dd_pool",
+ CLK_SET_RATE_PARENT, 0, 0, 0, 0, 0, 1, 2),
+ LGM_MUX(LGM_CLK_CBPHY0, "cbphy0", cbphy_p, 0, 0,
+ 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_MUX(LGM_CLK_CBPHY1, "cbphy1", cbphy_p, 0, 0,
+ 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_MUX(LGM_CLK_CBPHY2, "cbphy2", cbphy_p, 0, 0,
+ 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0),
+ LGM_MUX(LGM_CLK_CBPHY3, "cbphy3", cbphy_p, 0, 0,
+ 0, 0, MUX_CLK_SW | CLK_MUX_ROUND_CLOSEST, 0),
+
+ LGM_GATE(LGM_GCLK_C55, "g_c55", NULL, 0, CGU_GATE0,
+ G_C55_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_QSPI, "g_qspi", "qspi", 0, CGU_GATE0,
+ G_QSPI_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_EIP197, "g_eip197", NULL, 0, CGU_GATE0,
+ G_EIP197_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_VAULT, "g_vault130", NULL, 0, CGU_GATE0,
+ G_VAULT130_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_TOE, "g_toe", NULL, 0, CGU_GATE0,
+ G_TOE_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SDXC, "g_sdxc", "sdxc", 0, CGU_GATE0,
+ G_SDXC_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_EMMC, "g_emmc", "emmc", 0, CGU_GATE0,
+ G_EMMC_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SPI_DBG, "g_spidbg", NULL, 0, CGU_GATE0,
+ G_SPIDBG_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_DMA3, "g_dma3", NULL, 0, CGU_GATE0,
+ G_DMA3_SHIFT, 0, 0),
+
+ LGM_GATE(LGM_GCLK_DMA0, "g_dma0", NULL, 0, CGU_GATE1,
+ G_DMA0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_LEDC0, "g_ledc0", NULL, 0, CGU_GATE1,
+ G_LEDC0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_LEDC1, "g_ledc1", NULL, 0, CGU_GATE1,
+ G_LEDC1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2S0, "g_i2s0", NULL, 0, CGU_GATE1,
+ G_I2S0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2S1, "g_i2s1", NULL, 0, CGU_GATE1,
+ G_I2S1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_EBU, "g_ebu", NULL, 0, CGU_GATE1,
+ G_EBU_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PWM, "g_pwm", NULL, 0, CGU_GATE1,
+ G_PWM_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2C0, "g_i2c0", NULL, 0, CGU_GATE1,
+ G_I2C0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2C1, "g_i2c1", NULL, 0, CGU_GATE1,
+ G_I2C1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2C2, "g_i2c2", NULL, 0, CGU_GATE1,
+ G_I2C2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_I2C3, "g_i2c3", NULL, 0, CGU_GATE1,
+ G_I2C3_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SSC0, "g_ssc0", "noc4", 0, CGU_GATE1,
+ G_SSC0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SSC1, "g_ssc1", "noc4", 0, CGU_GATE1,
+ G_SSC1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SSC2, "g_ssc2", "noc4", 0, CGU_GATE1,
+ G_SSC2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SSC3, "g_ssc3", "noc4", 0, CGU_GATE1,
+ G_SSC3_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_GPTC0, "g_gptc0", "noc4", 0, CGU_GATE1,
+ G_GPTC0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_GPTC1, "g_gptc1", "noc4", 0, CGU_GATE1,
+ G_GPTC1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_GPTC2, "g_gptc2", "noc4", 0, CGU_GATE1,
+ G_GPTC2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_GPTC3, "g_gptc3", "osc", 0, CGU_GATE1,
+ G_GPTC3_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_ASC0, "g_asc0", "noc4", 0, CGU_GATE1,
+ G_ASC0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_ASC1, "g_asc1", "noc4", 0, CGU_GATE1,
+ G_ASC1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_ASC2, "g_asc2", "noc4", 0, CGU_GATE1,
+ G_ASC2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_ASC3, "g_asc3", "osc", 0, CGU_GATE1,
+ G_ASC3_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCM0, "g_pcm0", NULL, 0, CGU_GATE1,
+ G_PCM0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCM1, "g_pcm1", NULL, 0, CGU_GATE1,
+ G_PCM1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCM2, "g_pcm2", NULL, 0, CGU_GATE1,
+ G_PCM2_SHIFT, 0, 0),
+
+ LGM_GATE(LGM_GCLK_PCIE10, "g_pcie10", NULL, 0, CGU_GATE2,
+ G_PCIE10_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE11, "g_pcie11", NULL, 0, CGU_GATE2,
+ G_PCIE11_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE30, "g_pcie30", NULL, 0, CGU_GATE2,
+ G_PCIE30_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE31, "g_pcie31", NULL, 0, CGU_GATE2,
+ G_PCIE31_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE20, "g_pcie20", NULL, 0, CGU_GATE2,
+ G_PCIE20_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE21, "g_pcie21", NULL, 0, CGU_GATE2,
+ G_PCIE21_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE40, "g_pcie40", NULL, 0, CGU_GATE2,
+ G_PCIE40_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PCIE41, "g_pcie41", NULL, 0, CGU_GATE2,
+ G_PCIE41_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_XPCS0, "g_xpcs0", NULL, 0, CGU_GATE2,
+ G_XPCS0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_XPCS1, "g_xpcs1", NULL, 0, CGU_GATE2,
+ G_XPCS1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_XPCS2, "g_xpcs2", NULL, 0, CGU_GATE2,
+ G_XPCS2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_XPCS3, "g_xpcs3", NULL, 0, CGU_GATE2,
+ G_XPCS3_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SATA0, "g_sata0", NULL, 0, CGU_GATE2,
+ G_SATA0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SATA1, "g_sata1", NULL, 0, CGU_GATE2,
+ G_SATA1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SATA2, "g_sata2", NULL, 0, CGU_GATE2,
+ G_SATA2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_SATA3, "g_sata3", NULL, 0, CGU_GATE2,
+ G_SATA3_SHIFT, 0, 0),
+
+ LGM_GATE(LGM_GCLK_ARCEM4, "g_arcem4", NULL, 0, CGU_GATE3,
+ G_ARCEM4_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_IDMAR1, "g_idmar1", NULL, 0, CGU_GATE3,
+ G_IDMAR1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_IDMAT0, "g_idmat0", NULL, 0, CGU_GATE3,
+ G_IDMAT0_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_IDMAT1, "g_idmat1", NULL, 0, CGU_GATE3,
+ G_IDMAT1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_IDMAT2, "g_idmat2", NULL, 0, CGU_GATE3,
+ G_IDMAT2_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_PPV4, "g_ppv4", NULL, 0, CGU_GATE3,
+ G_PPV4_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_GSWIPO, "g_gswipo", "switch", 0, CGU_GATE3,
+ G_GSWIPO_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_CQEM, "g_cqem", "switch", 0, CGU_GATE3,
+ G_CQEM_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_XPCS5, "g_xpcs5", NULL, 0, CGU_GATE3,
+ G_XPCS5_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_USB1, "g_usb1", NULL, 0, CGU_GATE3,
+ G_USB1_SHIFT, 0, 0),
+ LGM_GATE(LGM_GCLK_USB2, "g_usb2", NULL, 0, CGU_GATE3,
+ G_USB2_SHIFT, 0, 0),
+};
+
+
+static const struct lgm_clk_ddiv_data lgm_ddiv_clks[] = {
+ LGM_DDIV(LGM_CLK_CML, "dd_cml", "ljpll3", 0,
+ PLL_DIV(CGU_LJPLL3_CFG0), 0, PLL_DDIV_WIDTH,
+ 3, PLL_DDIV_WIDTH, 24, 1, 29, 0),
+ LGM_DDIV(LGM_CLK_SERDES, "dd_serdes", "ljpll3", 0,
+ PLL_DIV(CGU_LJPLL3_CFG0), 6, PLL_DDIV_WIDTH,
+ 9, PLL_DDIV_WIDTH, 25, 1, 28, 0),
+ LGM_DDIV(LGM_CLK_POOL, "dd_pool", "ljpll3", 0,
+ PLL_DIV(CGU_LJPLL3_CFG0), 12, PLL_DDIV_WIDTH,
+ 15, PLL_DDIV_WIDTH, 26, 1, 28, 0),
+ LGM_DDIV(LGM_CLK_PTP, "dd_ptp", "ljpll3", 0,
+ PLL_DIV(CGU_LJPLL3_CFG0), 18, PLL_DDIV_WIDTH,
+ 21, PLL_DDIV_WIDTH, 27, 1, 28, 0),
+ LGM_DDIV(LGM_CLK_PCIE, "dd_pcie", "ljpll4", 0,
+ PLL_DIV(CGU_LJPLL4_CFG0), 0, PLL_DDIV_WIDTH,
+ 3, PLL_DDIV_WIDTH, 24, 1, 29, 0),
+};
+
+static int lgm_cgu_probe(struct platform_device *pdev)
+{
+ struct lgm_clk_provider *ctx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ctx = devm_kzalloc(dev, struct_size(ctx, clk_data.hws, CLK_NR_CLKS),
+ GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->clk_data.num = CLK_NR_CLKS;
+
+ ctx->membase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->membase))
+ return PTR_ERR(ctx->membase);
+
+ ctx->np = np;
+ ctx->dev = dev;
+ spin_lock_init(&ctx->lock);
+
+ ret = lgm_clk_register_plls(ctx, lgm_pll_clks,
+ ARRAY_SIZE(lgm_pll_clks));
+ if (ret)
+ return ret;
+
+ ret = lgm_clk_register_branches(ctx, lgm_branch_clks,
+ ARRAY_SIZE(lgm_branch_clks));
+ if (ret)
+ return ret;
+
+ ret = lgm_clk_register_ddiv(ctx, lgm_ddiv_clks,
+ ARRAY_SIZE(lgm_ddiv_clks));
+ if (ret)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &ctx->clk_data);
+}
+
+static const struct of_device_id of_lgm_cgu_match[] = {
+ { .compatible = "intel,cgu-lgm" },
+ {}
+};
+
+static struct platform_driver lgm_cgu_driver = {
+ .probe = lgm_cgu_probe,
+ .driver = {
+ .name = "cgu-lgm",
+ .of_match_table = of_lgm_cgu_match,
+ },
+};
+builtin_platform_driver(lgm_cgu_driver);
diff --git a/drivers/clk/zynqmp/clk-gate-zynqmp.c b/drivers/clk/zynqmp/clk-gate-zynqmp.c
index 83b236f20fff..10c9b889324f 100644
--- a/drivers/clk/zynqmp/clk-gate-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-gate-zynqmp.c
@@ -37,9 +37,8 @@ static int zynqmp_clk_gate_enable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_enable(clk_id);
+ ret = zynqmp_pm_clock_enable(clk_id);
if (ret)
pr_warn_once("%s() clock enabled failed for %s, ret = %d\n",
@@ -58,9 +57,8 @@ static void zynqmp_clk_gate_disable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_disable(clk_id);
+ ret = zynqmp_pm_clock_disable(clk_id);
if (ret)
pr_warn_once("%s() clock disable failed for %s, ret = %d\n",
@@ -79,9 +77,8 @@ static int zynqmp_clk_gate_is_enabled(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = gate->clk_id;
int state, ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getstate(clk_id, &state);
+ ret = zynqmp_pm_clock_getstate(clk_id, &state);
if (ret) {
pr_warn_once("%s() clock get state failed for %s, ret = %d\n",
__func__, clk_name, ret);
diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
index 0af8f74c5fa5..06194149be83 100644
--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
@@ -47,9 +47,8 @@ static u8 zynqmp_clk_mux_get_parent(struct clk_hw *hw)
u32 clk_id = mux->clk_id;
u32 val;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getparent(clk_id, &val);
+ ret = zynqmp_pm_clock_getparent(clk_id, &val);
if (ret)
pr_warn_once("%s() getparent failed for clock: %s, ret = %d\n",
@@ -71,9 +70,8 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = mux->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_setparent(clk_id, index);
+ ret = zynqmp_pm_clock_setparent(clk_id, index);
if (ret)
pr_warn_once("%s() set parent failed for clock: %s, ret = %d\n",
diff --git a/drivers/clk/zynqmp/clk-zynqmp.h b/drivers/clk/zynqmp/clk-zynqmp.h
index fec9a15c8786..5beeb41b29fa 100644
--- a/drivers/clk/zynqmp/clk-zynqmp.h
+++ b/drivers/clk/zynqmp/clk-zynqmp.h
@@ -30,6 +30,7 @@ struct clock_topology {
u32 type;
u32 flag;
u32 type_flag;
+ u8 custom_type_flag;
};
struct clk_hw *zynqmp_clk_register_pll(const char *name, u32 clk_id,
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index 10e89f23880b..db8d0d7161ce 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -84,6 +84,7 @@ struct name_resp {
struct topology_resp {
#define CLK_TOPOLOGY_TYPE GENMASK(3, 0)
+#define CLK_TOPOLOGY_CUSTOM_TYPE_FLAGS GENMASK(7, 4)
#define CLK_TOPOLOGY_FLAGS GENMASK(23, 8)
#define CLK_TOPOLOGY_TYPE_FLAGS GENMASK(31, 24)
u32 topology[CLK_GET_TOPOLOGY_RESP_WORDS];
@@ -134,7 +135,6 @@ static struct clk_hw *(* const clk_topology[]) (const char *name, u32 clk_id,
static struct zynqmp_clock *clock;
static struct clk_hw_onecell_data *zynqmp_data;
static unsigned int clock_max_idx;
-static const struct zynqmp_eemi_ops *eemi_ops;
/**
* zynqmp_is_valid_clock() - Check whether clock is valid or not
@@ -206,7 +206,7 @@ static int zynqmp_pm_clock_get_num_clocks(u32 *nclocks)
qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
*nclocks = ret_payload[1];
return ret;
@@ -231,7 +231,7 @@ static int zynqmp_pm_clock_get_name(u32 clock_id,
qdata.qid = PM_QID_CLOCK_GET_NAME;
qdata.arg1 = clock_id;
- eemi_ops->query_data(qdata, ret_payload);
+ zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, ret_payload, sizeof(*response));
return 0;
@@ -265,7 +265,7 @@ static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index,
qdata.arg1 = clock_id;
qdata.arg2 = index;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -296,7 +296,7 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
qdata.qid = PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS;
qdata.arg1 = clk_id;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
if (ret)
return ERR_PTR(ret);
@@ -339,7 +339,7 @@ static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index,
qdata.arg1 = clock_id;
qdata.arg2 = index;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -364,7 +364,7 @@ static int zynqmp_pm_clock_get_attributes(u32 clock_id,
qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
qdata.arg1 = clock_id;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
@@ -396,6 +396,9 @@ static int __zynqmp_clock_get_topology(struct clock_topology *topology,
topology[*nnodes].type_flag =
FIELD_GET(CLK_TOPOLOGY_TYPE_FLAGS,
response->topology[i]);
+ topology[*nnodes].custom_type_flag =
+ FIELD_GET(CLK_TOPOLOGY_CUSTOM_TYPE_FLAGS,
+ response->topology[i]);
(*nnodes)++;
}
@@ -558,7 +561,7 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name,
{
int j;
u32 num_nodes, clk_dev_id;
- char *clk_out = NULL;
+ char *clk_out[MAX_NODES];
struct clock_topology *nodes;
struct clk_hw *hw = NULL;
@@ -572,16 +575,16 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name,
* Intermediate clock names are postfixed with type of clock.
*/
if (j != (num_nodes - 1)) {
- clk_out = kasprintf(GFP_KERNEL, "%s%s", clk_name,
+ clk_out[j] = kasprintf(GFP_KERNEL, "%s%s", clk_name,
clk_type_postfix[nodes[j].type]);
} else {
- clk_out = kasprintf(GFP_KERNEL, "%s", clk_name);
+ clk_out[j] = kasprintf(GFP_KERNEL, "%s", clk_name);
}
if (!clk_topology[nodes[j].type])
continue;
- hw = (*clk_topology[nodes[j].type])(clk_out, clk_dev_id,
+ hw = (*clk_topology[nodes[j].type])(clk_out[j], clk_dev_id,
parent_names,
num_parents,
&nodes[j]);
@@ -590,9 +593,12 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name,
__func__, clk_dev_id, clk_name,
PTR_ERR(hw));
- parent_names[0] = clk_out;
+ parent_names[0] = clk_out[j];
}
- kfree(clk_out);
+
+ for (j = 0; j < num_nodes; j++)
+ kfree(clk_out[j]);
+
return hw;
}
@@ -663,6 +669,11 @@ static void zynqmp_get_clock_info(void)
continue;
clock[i].valid = FIELD_GET(CLK_ATTR_VALID, attr.attr[0]);
+ /* skip query for Invalid clock */
+ ret = zynqmp_is_valid_clock(i);
+ if (ret != CLK_ATTR_VALID)
+ continue;
+
clock[i].type = FIELD_GET(CLK_ATTR_TYPE, attr.attr[0]) ?
CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
@@ -738,10 +749,6 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
int ret;
struct device *dev = &pdev->dev;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
ret = zynqmp_clk_setup(dev->of_node);
return ret;
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 4be2cc76aa2e..66da02b83d39 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -25,7 +25,8 @@
#define to_zynqmp_clk_divider(_hw) \
container_of(_hw, struct zynqmp_clk_divider, hw)
-#define CLK_FRAC BIT(13) /* has a fractional parent */
+#define CLK_FRAC BIT(13) /* has a fractional parent */
+#define CUSTOM_FLAG_CLK_FRAC BIT(0) /* has a fractional parent in custom type flag */
/**
* struct zynqmp_clk_divider - adjustable divider clock
@@ -83,9 +84,8 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
u32 div_type = divider->div_type;
u32 div, value;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getdivider(clk_id, &div);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &div);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
@@ -111,23 +111,30 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
static void zynqmp_get_divider2_val(struct clk_hw *hw,
unsigned long rate,
- unsigned long parent_rate,
struct zynqmp_clk_divider *divider,
int *bestdiv)
{
int div1;
int div2;
long error = LONG_MAX;
- struct clk_hw *parent_hw = clk_hw_get_parent(hw);
- struct zynqmp_clk_divider *pdivider = to_zynqmp_clk_divider(parent_hw);
+ unsigned long div1_prate;
+ struct clk_hw *div1_parent_hw;
+ struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw);
+ struct zynqmp_clk_divider *pdivider =
+ to_zynqmp_clk_divider(div2_parent_hw);
if (!pdivider)
return;
+ div1_parent_hw = clk_hw_get_parent(div2_parent_hw);
+ if (!div1_parent_hw)
+ return;
+
+ div1_prate = clk_hw_get_rate(div1_parent_hw);
*bestdiv = 1;
for (div1 = 1; div1 <= pdivider->max_div;) {
for (div2 = 1; div2 <= divider->max_div;) {
- long new_error = ((parent_rate / div1) / div2) - rate;
+ long new_error = ((div1_prate / div1) / div2) - rate;
if (abs(new_error) < abs(error)) {
*bestdiv = div2;
@@ -163,11 +170,10 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
u32 div_type = divider->div_type;
u32 bestdiv;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
/* if read only, just return current value */
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- ret = eemi_ops->clock_getdivider(clk_id, &bestdiv);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &bestdiv);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
@@ -192,11 +198,13 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
*/
if (div_type == TYPE_DIV2 &&
(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
- zynqmp_get_divider2_val(hw, rate, *prate, divider, &bestdiv);
+ zynqmp_get_divider2_val(hw, rate, divider, &bestdiv);
}
if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
bestdiv = rate % *prate ? 1 : bestdiv;
+
+ bestdiv = min_t(u32, bestdiv, divider->max_div);
*prate = rate * bestdiv;
return rate;
@@ -219,7 +227,6 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
u32 div_type = divider->div_type;
u32 value, div;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
value = zynqmp_divider_get_val(parent_rate, rate, divider->flags);
if (div_type == TYPE_DIV1) {
@@ -233,7 +240,7 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
div = __ffs(div);
- ret = eemi_ops->clock_setdivider(clk_id, div);
+ ret = zynqmp_pm_clock_setdivider(clk_id, div);
if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
@@ -256,9 +263,8 @@ static const struct clk_ops zynqmp_clk_divider_ops = {
* Return: Maximum divisor of a clock if query data is successful
* U16_MAX in case of query data is not success
*/
-u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
+static u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -266,7 +272,7 @@ u32 zynqmp_clk_get_max_divisor(u32 clk_id, u32 type)
qdata.qid = PM_QID_CLOCK_GET_MAX_DIVISOR;
qdata.arg1 = clk_id;
qdata.arg2 = type;
- ret = eemi_ops->query_data(qdata, ret_payload);
+ ret = zynqmp_pm_query_data(qdata, ret_payload);
/*
* To maintain backward compatibility return maximum possible value
* (0xFFFF) if query for max divisor is not successful.
@@ -311,7 +317,8 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
init.num_parents = 1;
/* struct clk_divider assignments */
- div->is_frac = !!(nodes->flag & CLK_FRAC);
+ div->is_frac = !!((nodes->flag & CLK_FRAC) |
+ (nodes->custom_type_flag & CUSTOM_FLAG_CLK_FRAC));
div->flags = nodes->type_flag;
div->hw.init = &init;
div->clk_id = clk_id;
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 89b599530105..92f449ed38e5 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -50,10 +50,8 @@ static inline enum pll_mode zynqmp_pll_get_mode(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->ioctl(0, IOCTL_GET_PLL_FRAC_MODE, clk_id, 0,
- ret_payload);
+ ret = zynqmp_pm_get_pll_frac_mode(clk_id, ret_payload);
if (ret)
pr_warn_once("%s() PLL get frac mode failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -73,14 +71,13 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
const char *clk_name = clk_hw_get_name(hw);
int ret;
u32 mode;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (on)
mode = PLL_MODE_FRAC;
else
mode = PLL_MODE_INT;
- ret = eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_MODE, clk_id, mode, NULL);
+ ret = zynqmp_pm_set_pll_frac_mode(clk_id, mode);
if (ret)
pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -139,17 +136,15 @@ static unsigned long zynqmp_pll_recalc_rate(struct clk_hw *hw,
unsigned long rate, frac;
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getdivider(clk_id, &fbdiv);
+ ret = zynqmp_pm_clock_getdivider(clk_id, &fbdiv);
if (ret)
pr_warn_once("%s() get divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
rate = parent_rate * fbdiv;
if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
- eemi_ops->ioctl(0, IOCTL_GET_PLL_FRAC_DATA, clk_id, 0,
- ret_payload);
+ zynqmp_pm_get_pll_frac_data(clk_id, ret_payload);
data = ret_payload[1];
frac = (parent_rate * data) / FRAC_DIV;
rate = rate + frac;
@@ -177,7 +172,6 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
u32 fbdiv;
long rate_div, frac, m, f;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
rate_div = (rate * FRAC_DIV) / parent_rate;
@@ -187,21 +181,21 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
rate = parent_rate * m;
frac = (parent_rate * f) / FRAC_DIV;
- ret = eemi_ops->clock_setdivider(clk_id, m);
+ ret = zynqmp_pm_clock_setdivider(clk_id, m);
if (ret == -EUSERS)
WARN(1, "More than allowed devices are using the %s, which is forbidden\n",
clk_name);
else if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
- eemi_ops->ioctl(0, IOCTL_SET_PLL_FRAC_DATA, clk_id, f, NULL);
+ zynqmp_pm_set_pll_frac_data(clk_id, f);
return rate + frac;
}
fbdiv = DIV_ROUND_CLOSEST(rate, parent_rate);
fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- ret = eemi_ops->clock_setdivider(clk_id, fbdiv);
+ ret = zynqmp_pm_clock_setdivider(clk_id, fbdiv);
if (ret)
pr_warn_once("%s() set divider failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -222,9 +216,8 @@ static int zynqmp_pll_is_enabled(struct clk_hw *hw)
u32 clk_id = clk->clk_id;
unsigned int state;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- ret = eemi_ops->clock_getstate(clk_id, &state);
+ ret = zynqmp_pm_clock_getstate(clk_id, &state);
if (ret) {
pr_warn_once("%s() clock get state failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -246,12 +239,11 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = clk->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (zynqmp_pll_is_enabled(hw))
return 0;
- ret = eemi_ops->clock_enable(clk_id);
+ ret = zynqmp_pm_clock_enable(clk_id);
if (ret)
pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
__func__, clk_name, ret);
@@ -269,12 +261,11 @@ static void zynqmp_pll_disable(struct clk_hw *hw)
const char *clk_name = clk_hw_get_name(hw);
u32 clk_id = clk->clk_id;
int ret;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
if (!zynqmp_pll_is_enabled(hw))
return;
- ret = eemi_ops->clock_disable(clk_id);
+ ret = zynqmp_pm_clock_disable(clk_id);
if (ret)
pr_warn_once("%s() clock disable failed for %s, ret = %d\n",
__func__, clk_name, ret);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index c4f15c4068c0..9de1dabfb126 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -12,8 +12,11 @@
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
#include <asm/smp.h>
#include <asm/sbi.h>
@@ -39,6 +42,7 @@ static int riscv_clock_next_event(unsigned long delta,
return 0;
}
+static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
@@ -74,30 +78,36 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
ce->cpumask = cpumask_of(cpu);
+ ce->irq = riscv_clock_event_irq;
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
- csr_set(CSR_IE, IE_TIE);
+ enable_percpu_irq(riscv_clock_event_irq,
+ irq_get_trigger_type(riscv_clock_event_irq));
return 0;
}
static int riscv_timer_dying_cpu(unsigned int cpu)
{
- csr_clear(CSR_IE, IE_TIE);
+ disable_percpu_irq(riscv_clock_event_irq);
return 0;
}
/* called directly from the low-level interrupt handler */
-void riscv_timer_interrupt(void)
+static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev);
+
+ return IRQ_HANDLED;
}
static int __init riscv_timer_init_dt(struct device_node *n)
{
int cpuid, hartid, error;
+ struct device_node *child;
+ struct irq_domain *domain;
hartid = riscv_of_processor_hartid(n);
if (hartid < 0) {
@@ -115,6 +125,25 @@ static int __init riscv_timer_init_dt(struct device_node *n)
if (cpuid != smp_processor_id())
return 0;
+ domain = NULL;
+ child = of_get_compatible_child(n, "riscv,cpu-intc");
+ if (!child) {
+ pr_err("Failed to find INTC node [%pOF]\n", n);
+ return -ENODEV;
+ }
+ domain = irq_find_host(child);
+ of_node_put(child);
+ if (!domain) {
+ pr_err("Failed to find IRQ domain for node [%pOF]\n", n);
+ return -ENODEV;
+ }
+
+ riscv_clock_event_irq = irq_create_mapping(domain, RV_IRQ_TIMER);
+ if (!riscv_clock_event_irq) {
+ pr_err("Failed to map timer interrupt for node [%pOF]\n", n);
+ return -ENODEV;
+ }
+
pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
__func__, cpuid, hartid);
error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
@@ -126,6 +155,14 @@ static int __init riscv_timer_init_dt(struct device_node *n)
sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
+ error = request_percpu_irq(riscv_clock_event_irq,
+ riscv_timer_interrupt,
+ "riscv-timer", &riscv_clock_event);
+ if (error) {
+ pr_err("registering percpu irq failed [%d]\n", error);
+ return error;
+ }
+
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
"clockevents/riscv/timer:starting",
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 9481292981f0..c6cbfc8baf72 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -295,11 +295,11 @@ config ARM_TANGO_CPUFREQ
default y
config ARM_TEGRA20_CPUFREQ
- tristate "Tegra20 CPUFreq support"
- depends on ARCH_TEGRA
+ tristate "Tegra20/30 CPUFreq support"
+ depends on ARCH_TEGRA && CPUFREQ_DT
default y
help
- This adds the CPUFreq driver support for Tegra20 SOCs.
+ This adds the CPUFreq driver support for Tegra20/30 SOCs.
config ARM_TEGRA124_CPUFREQ
bool "Tegra124 CPUFreq support"
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 289e8ce3fd13..429e5a36c08a 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -126,12 +126,12 @@ static void boost_set_msr_each(void *p_en)
boost_set_msr(enable);
}
-static int set_boost(int val)
+static int set_boost(struct cpufreq_policy *policy, int val)
{
- get_online_cpus();
- on_each_cpu(boost_set_msr_each, (void *)(long)val, 1);
- put_online_cpus();
- pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
+ on_each_cpu_mask(policy->cpus, boost_set_msr_each,
+ (void *)(long)val, 1);
+ pr_debug("CPU %*pbl: Core Boosting %sabled.\n",
+ cpumask_pr_args(policy->cpus), val ? "en" : "dis");
return 0;
}
@@ -162,7 +162,9 @@ static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
if (ret || val > 1)
return -EINVAL;
- set_boost(val);
+ get_online_cpus();
+ set_boost(policy, val);
+ put_online_cpus();
return count;
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index bda0b2406fba..257d726a4456 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -37,6 +37,7 @@
* requested etc.
*/
static struct cppc_cpudata **all_cpu_data;
+static bool boost_supported;
struct cppc_workaround_oem_info {
char oem_id[ACPI_OEM_ID_SIZE + 1];
@@ -310,7 +311,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* Section 8.4.7.1.1.5 of ACPI 6.1 spec)
*/
policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
- policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
+ policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf);
/*
* Set cpuinfo.min_freq to Lowest to make the full range of performance
@@ -318,7 +319,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* nonlinear perf
*/
policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
- policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
+ policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.nominal_perf);
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
policy->shared_type = cpu->shared_type;
@@ -343,6 +344,13 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu->cur_policy = policy;
+ /*
+ * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
+ * is supported.
+ */
+ if (cpu->perf_caps.highest_perf > cpu->perf_caps.nominal_perf)
+ boost_supported = true;
+
/* Set policy->cur to max now. The governors will adjust later. */
policy->cur = cppc_cpufreq_perf_to_khz(cpu,
cpu->perf_caps.highest_perf);
@@ -410,6 +418,32 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
}
+static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
+{
+ struct cppc_cpudata *cpudata;
+ int ret;
+
+ if (!boost_supported) {
+ pr_err("BOOST not supported by CPU or firmware\n");
+ return -EINVAL;
+ }
+
+ cpudata = all_cpu_data[policy->cpu];
+ if (state)
+ policy->max = cppc_cpufreq_perf_to_khz(cpudata,
+ cpudata->perf_caps.highest_perf);
+ else
+ policy->max = cppc_cpufreq_perf_to_khz(cpudata,
+ cpudata->perf_caps.nominal_perf);
+ policy->cpuinfo.max_freq = policy->max;
+
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static struct cpufreq_driver cppc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = cppc_verify_policy,
@@ -417,6 +451,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
.get = cppc_cpufreq_get_rate,
.init = cppc_cpufreq_cpu_init,
.stop_cpu = cppc_cpufreq_stop_cpu,
+ .set_boost = cppc_cpufreq_set_boost,
.name = "cppc_cpufreq",
};
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 26fe8dfb9ce6..79742bbd221f 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -121,6 +121,10 @@ static int resources_available(void)
clk_put(cpu_clk);
+ ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
+ if (ret)
+ return ret;
+
name = find_supply_name(cpu_dev);
/* Platform doesn't require regulator */
if (!name)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d03f250f68e4..0128de3603df 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2532,34 +2532,29 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
/*********************************************************************
* BOOST *
*********************************************************************/
-static int cpufreq_boost_set_sw(int state)
+static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
- struct cpufreq_policy *policy;
-
- for_each_active_policy(policy) {
- int ret;
+ int ret;
- if (!policy->freq_table)
- return -ENXIO;
+ if (!policy->freq_table)
+ return -ENXIO;
- ret = cpufreq_frequency_table_cpuinfo(policy,
- policy->freq_table);
- if (ret) {
- pr_err("%s: Policy frequency update failed\n",
- __func__);
- return ret;
- }
-
- ret = freq_qos_update_request(policy->max_freq_req, policy->max);
- if (ret < 0)
- return ret;
+ ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ if (ret) {
+ pr_err("%s: Policy frequency update failed\n", __func__);
+ return ret;
}
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+ if (ret < 0)
+ return ret;
+
return 0;
}
int cpufreq_boost_trigger_state(int state)
{
+ struct cpufreq_policy *policy;
unsigned long flags;
int ret = 0;
@@ -2570,15 +2565,25 @@ int cpufreq_boost_trigger_state(int state)
cpufreq_driver->boost_enabled = state;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = cpufreq_driver->set_boost(state);
- if (ret) {
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- cpufreq_driver->boost_enabled = !state;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- pr_err("%s: Cannot %s BOOST\n",
- __func__, state ? "enable" : "disable");
+ get_online_cpus();
+ for_each_active_policy(policy) {
+ ret = cpufreq_driver->set_boost(policy, state);
+ if (ret)
+ goto err_reset_state;
}
+ put_online_cpus();
+
+ return 0;
+
+err_reset_state:
+ put_online_cpus();
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver->boost_enabled = !state;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ pr_err("%s: Cannot %s BOOST\n",
+ __func__, state ? "enable" : "disable");
return ret;
}
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 2e233ad72758..3d2f143748ef 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -93,7 +93,8 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
static struct cpufreq_driver tegra186_cpufreq_driver = {
.name = "tegra186",
- .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra186_cpufreq_set_target,
.init = tegra186_cpufreq_init,
diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
index f84ecd22f488..8c893043953e 100644
--- a/drivers/cpufreq/tegra20-cpufreq.c
+++ b/drivers/cpufreq/tegra20-cpufreq.c
@@ -7,201 +7,96 @@
* Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation
*/
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
+#include <linux/bits.h>
+#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/types.h>
-static struct cpufreq_frequency_table freq_table[] = {
- { .frequency = 216000 },
- { .frequency = 312000 },
- { .frequency = 456000 },
- { .frequency = 608000 },
- { .frequency = 760000 },
- { .frequency = 816000 },
- { .frequency = 912000 },
- { .frequency = 1000000 },
- { .frequency = CPUFREQ_TABLE_END },
-};
-
-struct tegra20_cpufreq {
- struct device *dev;
- struct cpufreq_driver driver;
- struct clk *cpu_clk;
- struct clk *pll_x_clk;
- struct clk *pll_p_clk;
- bool pll_x_prepared;
-};
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
-static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy,
- unsigned int index)
+static bool cpu0_node_has_opp_v2_prop(void)
{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
- unsigned int ifreq = clk_get_rate(cpufreq->pll_p_clk) / 1000;
-
- /*
- * Don't switch to intermediate freq if:
- * - we are already at it, i.e. policy->cur == ifreq
- * - index corresponds to ifreq
- */
- if (freq_table[index].frequency == ifreq || policy->cur == ifreq)
- return 0;
-
- return ifreq;
-}
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
-static int tegra_target_intermediate(struct cpufreq_policy *policy,
- unsigned int index)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
- int ret;
-
- /*
- * Take an extra reference to the main pll so it doesn't turn
- * off when we move the cpu off of it as enabling it again while we
- * switch to it from tegra_target() would take additional time.
- *
- * When target-freq is equal to intermediate freq we don't need to
- * switch to an intermediate freq and so this routine isn't called.
- * Also, we wouldn't be using pll_x anymore and must not take extra
- * reference to it, as it can be disabled now to save some power.
- */
- clk_prepare_enable(cpufreq->pll_x_clk);
-
- ret = clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_p_clk);
- if (ret)
- clk_disable_unprepare(cpufreq->pll_x_clk);
- else
- cpufreq->pll_x_prepared = true;
+ if (of_get_property(np, "operating-points-v2", NULL))
+ ret = true;
+ of_node_put(np);
return ret;
}
-static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
- unsigned long rate = freq_table[index].frequency;
- unsigned int ifreq = clk_get_rate(cpufreq->pll_p_clk) / 1000;
- int ret;
-
- /*
- * target freq == pll_p, don't need to take extra reference to pll_x_clk
- * as it isn't used anymore.
- */
- if (rate == ifreq)
- return clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_p_clk);
-
- ret = clk_set_rate(cpufreq->pll_x_clk, rate * 1000);
- /* Restore to earlier frequency on error, i.e. pll_x */
- if (ret)
- dev_err(cpufreq->dev, "Failed to change pll_x to %lu\n", rate);
-
- ret = clk_set_parent(cpufreq->cpu_clk, cpufreq->pll_x_clk);
- /* This shouldn't fail while changing or restoring */
- WARN_ON(ret);
-
- /*
- * Drop count to pll_x clock only if we switched to intermediate freq
- * earlier while transitioning to a target frequency.
- */
- if (cpufreq->pll_x_prepared) {
- clk_disable_unprepare(cpufreq->pll_x_clk);
- cpufreq->pll_x_prepared = false;
- }
-
- return ret;
-}
-
-static int tegra_cpu_init(struct cpufreq_policy *policy)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
-
- clk_prepare_enable(cpufreq->cpu_clk);
-
- /* FIXME: what's the actual transition time? */
- cpufreq_generic_init(policy, freq_table, 300 * 1000);
- policy->clk = cpufreq->cpu_clk;
- policy->suspend_freq = freq_table[0].frequency;
- return 0;
-}
-
-static int tegra_cpu_exit(struct cpufreq_policy *policy)
-{
- struct tegra20_cpufreq *cpufreq = cpufreq_get_driver_data();
-
- clk_disable_unprepare(cpufreq->cpu_clk);
- return 0;
-}
-
static int tegra20_cpufreq_probe(struct platform_device *pdev)
{
- struct tegra20_cpufreq *cpufreq;
+ struct platform_device *cpufreq_dt;
+ struct opp_table *opp_table;
+ struct device *cpu_dev;
+ u32 versions[2];
int err;
- cpufreq = devm_kzalloc(&pdev->dev, sizeof(*cpufreq), GFP_KERNEL);
- if (!cpufreq)
- return -ENOMEM;
+ if (!cpu0_node_has_opp_v2_prop()) {
+ dev_err(&pdev->dev, "operating points not found\n");
+ dev_err(&pdev->dev, "please update your device tree\n");
+ return -ENODEV;
+ }
+
+ if (of_machine_is_compatible("nvidia,tegra20")) {
+ versions[0] = BIT(tegra_sku_info.cpu_process_id);
+ versions[1] = BIT(tegra_sku_info.soc_speedo_id);
+ } else {
+ versions[0] = BIT(tegra_sku_info.cpu_process_id);
+ versions[1] = BIT(tegra_sku_info.cpu_speedo_id);
+ }
+
+ dev_info(&pdev->dev, "hardware version 0x%x 0x%x\n",
+ versions[0], versions[1]);
- cpufreq->cpu_clk = clk_get_sys(NULL, "cclk");
- if (IS_ERR(cpufreq->cpu_clk))
- return PTR_ERR(cpufreq->cpu_clk);
+ cpu_dev = get_cpu_device(0);
+ if (WARN_ON(!cpu_dev))
+ return -ENODEV;
- cpufreq->pll_x_clk = clk_get_sys(NULL, "pll_x");
- if (IS_ERR(cpufreq->pll_x_clk)) {
- err = PTR_ERR(cpufreq->pll_x_clk);
- goto put_cpu;
+ opp_table = dev_pm_opp_set_supported_hw(cpu_dev, versions, 2);
+ err = PTR_ERR_OR_ZERO(opp_table);
+ if (err) {
+ dev_err(&pdev->dev, "failed to set supported hw: %d\n", err);
+ return err;
}
- cpufreq->pll_p_clk = clk_get_sys(NULL, "pll_p");
- if (IS_ERR(cpufreq->pll_p_clk)) {
- err = PTR_ERR(cpufreq->pll_p_clk);
- goto put_pll_x;
+ cpufreq_dt = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ err = PTR_ERR_OR_ZERO(cpufreq_dt);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to create cpufreq-dt device: %d\n", err);
+ goto err_put_supported_hw;
}
- cpufreq->dev = &pdev->dev;
- cpufreq->driver.get = cpufreq_generic_get;
- cpufreq->driver.attr = cpufreq_generic_attr;
- cpufreq->driver.init = tegra_cpu_init;
- cpufreq->driver.exit = tegra_cpu_exit;
- cpufreq->driver.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK;
- cpufreq->driver.verify = cpufreq_generic_frequency_table_verify;
- cpufreq->driver.suspend = cpufreq_generic_suspend;
- cpufreq->driver.driver_data = cpufreq;
- cpufreq->driver.target_index = tegra_target;
- cpufreq->driver.get_intermediate = tegra_get_intermediate;
- cpufreq->driver.target_intermediate = tegra_target_intermediate;
- snprintf(cpufreq->driver.name, CPUFREQ_NAME_LEN, "tegra");
-
- err = cpufreq_register_driver(&cpufreq->driver);
- if (err)
- goto put_pll_p;
-
- platform_set_drvdata(pdev, cpufreq);
+ platform_set_drvdata(pdev, cpufreq_dt);
return 0;
-put_pll_p:
- clk_put(cpufreq->pll_p_clk);
-put_pll_x:
- clk_put(cpufreq->pll_x_clk);
-put_cpu:
- clk_put(cpufreq->cpu_clk);
+err_put_supported_hw:
+ dev_pm_opp_put_supported_hw(opp_table);
return err;
}
static int tegra20_cpufreq_remove(struct platform_device *pdev)
{
- struct tegra20_cpufreq *cpufreq = platform_get_drvdata(pdev);
+ struct platform_device *cpufreq_dt;
+ struct opp_table *opp_table;
- cpufreq_unregister_driver(&cpufreq->driver);
+ cpufreq_dt = platform_get_drvdata(pdev);
+ platform_device_unregister(cpufreq_dt);
- clk_put(cpufreq->pll_p_clk);
- clk_put(cpufreq->pll_x_clk);
- clk_put(cpufreq->cpu_clk);
+ opp_table = dev_pm_opp_get_opp_table(get_cpu_device(0));
+ dev_pm_opp_put_supported_hw(opp_table);
+ dev_pm_opp_put_opp_table(opp_table);
return 0;
}
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 74c247972bb3..6513ef2af66a 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -19,6 +19,7 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/runlatch.h>
+#include <asm/idle.h>
#include <asm/plpar_wrappers.h>
struct cpuidle_driver pseries_idle_driver = {
@@ -31,39 +32,15 @@ static struct cpuidle_state *cpuidle_state_table __read_mostly;
static u64 snooze_timeout __read_mostly;
static bool snooze_timeout_en __read_mostly;
-static inline void idle_loop_prolog(unsigned long *in_purr)
-{
- ppc64_runlatch_off();
- *in_purr = mfspr(SPRN_PURR);
- /*
- * Indicate to the HV that we are idle. Now would be
- * a good time to find other work to dispatch.
- */
- get_lppaca()->idle = 1;
-}
-
-static inline void idle_loop_epilog(unsigned long in_purr)
-{
- u64 wait_cycles;
-
- wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
- wait_cycles += mfspr(SPRN_PURR) - in_purr;
- get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
- get_lppaca()->idle = 0;
-
- ppc64_runlatch_on();
-}
-
static int snooze_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- unsigned long in_purr;
u64 snooze_exit_time;
set_thread_flag(TIF_POLLING_NRFLAG);
- idle_loop_prolog(&in_purr);
+ pseries_idle_prolog();
local_irq_enable();
snooze_exit_time = get_tb() + snooze_timeout;
@@ -87,7 +64,7 @@ static int snooze_loop(struct cpuidle_device *dev,
local_irq_disable();
- idle_loop_epilog(in_purr);
+ pseries_idle_epilog();
return index;
}
@@ -113,9 +90,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- unsigned long in_purr;
- idle_loop_prolog(&in_purr);
+ pseries_idle_prolog();
get_lppaca()->donate_dedicated_cpu = 1;
HMT_medium();
@@ -124,7 +100,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
local_irq_disable();
get_lppaca()->donate_dedicated_cpu = 0;
- idle_loop_epilog(in_purr);
+ pseries_idle_epilog();
return index;
}
@@ -133,9 +109,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- unsigned long in_purr;
- idle_loop_prolog(&in_purr);
+ pseries_idle_prolog();
/*
* Yield the processor to the hypervisor. We return if
@@ -147,7 +122,7 @@ static int shared_cede_loop(struct cpuidle_device *dev,
check_and_cede_processor();
local_irq_disable();
- idle_loop_epilog(in_purr);
+ pseries_idle_epilog();
return index;
}
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index 313b0290e97b..150045849d78 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -365,7 +365,6 @@ static int tegra_cpuidle_probe(struct platform_device *pdev)
break;
case TEGRA30:
- tegra_cpuidle_disable_state(TEGRA_CC6);
break;
case TEGRA114:
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 788c6607078b..cee2a2713038 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -278,7 +278,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
struct nitrox_device *nitrox_get_first_device(void)
{
- struct nitrox_device *ndev = NULL;
+ struct nitrox_device *ndev;
mutex_lock(&devlist_lock);
list_for_each_entry(ndev, &ndevlist, list) {
@@ -286,7 +286,7 @@ struct nitrox_device *nitrox_get_first_device(void)
break;
}
mutex_unlock(&devlist_lock);
- if (!ndev)
+ if (&ndev->list == &ndevlist)
return NULL;
refcount_inc(&ndev->refcnt);
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index f58c2b5c7fc5..d4f6e010dc79 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -389,10 +389,6 @@ static inline void copy_hash_init_values(char *key, int digestsize)
}
}
-static const u8 sgl_lengths[20] = {
- 0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15
-};
-
/* Number of len fields(8) * size of one addr field */
#define PHYSDSGL_MAX_LEN_SIZE 16
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 9a642c79a657..f200fae6f7cb 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -93,8 +93,10 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
struct sock *sk)
{
struct net_device *ndev = cdev->ports[0];
+#if IS_ENABLED(CONFIG_IPV6)
struct net_device *temp;
int addr_type;
+#endif
switch (sk->sk_family) {
case PF_INET:
@@ -102,19 +104,21 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
return ndev;
ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
break;
+#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:
addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
if (likely(addr_type == IPV6_ADDR_ANY))
return ndev;
- for_each_netdev_rcu(&init_net, temp) {
- if (ipv6_chk_addr(&init_net, (struct in6_addr *)
- &sk->sk_v6_rcv_saddr, temp, 1)) {
- ndev = temp;
- break;
+ for_each_netdev_rcu(&init_net, temp) {
+ if (ipv6_chk_addr(&init_net, (struct in6_addr *)
+ &sk->sk_v6_rcv_saddr, temp, 1)) {
+ ndev = temp;
+ break;
+ }
}
- }
break;
+#endif
default:
return NULL;
}
@@ -476,8 +480,10 @@ void chtls_destroy_sock(struct sock *sk)
csk->cdev = NULL;
if (sk->sk_family == AF_INET)
sk->sk_prot = &tcp_prot;
+#if IS_ENABLED(CONFIG_IPV6)
else
sk->sk_prot = &tcpv6_prot;
+#endif
sk->sk_prot->destroy(sk);
}
@@ -629,14 +635,15 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
{
struct net_device *ndev;
+#if IS_ENABLED(CONFIG_IPV6)
+ bool clip_valid = false;
+#endif
struct listen_ctx *ctx;
struct adapter *adap;
struct port_info *pi;
- bool clip_valid;
+ int ret = 0;
int stid;
- int ret;
- clip_valid = false;
rcu_read_lock();
ndev = chtls_find_netdev(cdev, sk);
rcu_read_unlock();
@@ -674,6 +681,7 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_sport, 0,
cdev->lldi->rxq_ids[0]);
+#if IS_ENABLED(CONFIG_IPV6)
} else {
int addr_type;
@@ -689,6 +697,7 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
&sk->sk_v6_rcv_saddr,
inet_sk(sk)->inet_sport,
cdev->lldi->rxq_ids[0]);
+#endif
}
if (ret > 0)
ret = net_xmit_errno(ret);
@@ -696,8 +705,10 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
goto del_hash;
return 0;
del_hash:
+#if IS_ENABLED(CONFIG_IPV6)
if (clip_valid)
cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1);
+#endif
listen_hash_del(cdev, sk);
free_stid:
cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
@@ -711,8 +722,6 @@ free_ctx:
void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
{
struct listen_ctx *listen_ctx;
- struct chtls_sock *csk;
- int addr_type = 0;
int stid;
stid = listen_hash_del(cdev, sk);
@@ -725,7 +734,11 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
cxgb4_remove_server(cdev->lldi->ports[0], stid,
cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6);
+#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET6) {
+ struct chtls_sock *csk;
+ int addr_type = 0;
+
csk = rcu_dereference_sk_user_data(sk);
addr_type = ipv6_addr_type((const struct in6_addr *)
&sk->sk_v6_rcv_saddr);
@@ -733,6 +746,7 @@ void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
cxgb4_clip_release(csk->egress_dev, (const u32 *)
&sk->sk_v6_rcv_saddr, 1);
}
+#endif
chtls_disconnect_acceptq(sk);
}
@@ -941,9 +955,11 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
tp = tcp_sk(sk);
tcpoptsz = 0;
+#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == AF_INET6)
iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
else
+#endif
iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr);
if (req->tcpopt.tstamp)
tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
@@ -1091,13 +1107,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
+ struct neighbour *n = NULL;
struct inet_sock *newinet;
const struct iphdr *iph;
struct tls_context *ctx;
struct net_device *ndev;
struct chtls_sock *csk;
struct dst_entry *dst;
- struct neighbour *n;
struct tcp_sock *tp;
struct sock *newsk;
u16 port_id;
@@ -1115,6 +1131,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
goto free_sk;
n = dst_neigh_lookup(dst, &iph->saddr);
+#if IS_ENABLED(CONFIG_IPV6)
} else {
const struct ipv6hdr *ip6h;
struct flowi6 fl6;
@@ -1131,6 +1148,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (IS_ERR(dst))
goto free_sk;
n = dst_neigh_lookup(dst, &ip6h->saddr);
+#endif
}
if (!n)
goto free_sk;
@@ -1158,6 +1176,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
newinet->inet_daddr = iph->saddr;
newinet->inet_rcv_saddr = iph->daddr;
newinet->inet_saddr = iph->daddr;
+#if IS_ENABLED(CONFIG_IPV6)
} else {
struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk;
struct inet_request_sock *treq = inet_rsk(oreq);
@@ -1175,6 +1194,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
newinet->inet_opt = NULL;
newinet->inet_daddr = LOOPBACK4_IPV6;
newinet->inet_saddr = LOOPBACK4_IPV6;
+#endif
}
oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid));
@@ -1337,10 +1357,12 @@ static void chtls_pass_accept_request(struct sock *sk,
if (iph->version == 0x4) {
chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
ip_dsfield = ipv4_get_dsfield(iph);
+#if IS_ENABLED(CONFIG_IPV6)
} else {
inet_rsk(oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
inet_rsk(oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
ip_dsfield = ipv6_get_dsfield(ipv6_hdr(skb));
+#endif
}
if (req->tcpopt.wsf <= 14 &&
sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 7dfffdde9593..d98b89d0fa6e 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -608,9 +608,11 @@ static void __init chtls_init_ulp_ops(void)
chtls_cpl_prot.recvmsg = chtls_recvmsg;
chtls_cpl_prot.setsockopt = chtls_setsockopt;
chtls_cpl_prot.getsockopt = chtls_getsockopt;
+#if IS_ENABLED(CONFIG_IPV6)
chtls_cpl_protv6 = chtls_cpl_prot;
chtls_init_rsk_ops(&chtls_cpl_protv6, &chtls_rsk_opsv6,
&tcpv6_prot, PF_INET6);
+#endif
}
static int __init chtls_register(void)
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index 015155da59c2..bc89a20e5d9d 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -15,4 +15,4 @@ obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compres
obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
nx-compress-objs := nx-842.o
nx-compress-pseries-objs := nx-842-pseries.o
-nx-compress-powernv-objs := nx-842-powernv.o
+nx-compress-powernv-objs := nx-common-powernv.o
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
deleted file mode 100644
index c037a2403b82..000000000000
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ /dev/null
@@ -1,1062 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for IBM PowerNV 842 compression accelerator
- *
- * Copyright (C) 2015 Dan Streetman, IBM Corp
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "nx-842.h"
-
-#include <linux/timer.h>
-
-#include <asm/prom.h>
-#include <asm/icswx.h>
-#include <asm/vas.h>
-#include <asm/reg.h>
-#include <asm/opal-api.h>
-#include <asm/opal.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors");
-MODULE_ALIAS_CRYPTO("842");
-MODULE_ALIAS_CRYPTO("842-nx");
-
-#define WORKMEM_ALIGN (CRB_ALIGN)
-#define CSB_WAIT_MAX (5000) /* ms */
-#define VAS_RETRIES (10)
-
-struct nx842_workmem {
- /* Below fields must be properly aligned */
- struct coprocessor_request_block crb; /* CRB_ALIGN align */
- struct data_descriptor_entry ddl_in[DDL_LEN_MAX]; /* DDE_ALIGN align */
- struct data_descriptor_entry ddl_out[DDL_LEN_MAX]; /* DDE_ALIGN align */
- /* Above fields must be properly aligned */
-
- ktime_t start;
-
- char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
-} __packed __aligned(WORKMEM_ALIGN);
-
-struct nx842_coproc {
- unsigned int chip_id;
- unsigned int ct;
- unsigned int ci; /* Coprocessor instance, used with icswx */
- struct {
- struct vas_window *rxwin;
- int id;
- } vas;
- struct list_head list;
-};
-
-/*
- * Send the request to NX engine on the chip for the corresponding CPU
- * where the process is executing. Use with VAS function.
- */
-static DEFINE_PER_CPU(struct vas_window *, cpu_txwin);
-
-/* no cpu hotplug on powernv, so this list never changes after init */
-static LIST_HEAD(nx842_coprocs);
-static unsigned int nx842_ct; /* used in icswx function */
-
-static int (*nx842_powernv_exec)(const unsigned char *in,
- unsigned int inlen, unsigned char *out,
- unsigned int *outlenp, void *workmem, int fc);
-
-/**
- * setup_indirect_dde - Setup an indirect DDE
- *
- * The DDE is setup with the the DDE count, byte count, and address of
- * first direct DDE in the list.
- */
-static void setup_indirect_dde(struct data_descriptor_entry *dde,
- struct data_descriptor_entry *ddl,
- unsigned int dde_count, unsigned int byte_count)
-{
- dde->flags = 0;
- dde->count = dde_count;
- dde->index = 0;
- dde->length = cpu_to_be32(byte_count);
- dde->address = cpu_to_be64(nx842_get_pa(ddl));
-}
-
-/**
- * setup_direct_dde - Setup single DDE from buffer
- *
- * The DDE is setup with the buffer and length. The buffer must be properly
- * aligned. The used length is returned.
- * Returns:
- * N Successfully set up DDE with N bytes
- */
-static unsigned int setup_direct_dde(struct data_descriptor_entry *dde,
- unsigned long pa, unsigned int len)
-{
- unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa));
-
- dde->flags = 0;
- dde->count = 0;
- dde->index = 0;
- dde->length = cpu_to_be32(l);
- dde->address = cpu_to_be64(pa);
-
- return l;
-}
-
-/**
- * setup_ddl - Setup DDL from buffer
- *
- * Returns:
- * 0 Successfully set up DDL
- */
-static int setup_ddl(struct data_descriptor_entry *dde,
- struct data_descriptor_entry *ddl,
- unsigned char *buf, unsigned int len,
- bool in)
-{
- unsigned long pa = nx842_get_pa(buf);
- int i, ret, total_len = len;
-
- if (!IS_ALIGNED(pa, DDE_BUFFER_ALIGN)) {
- pr_debug("%s buffer pa 0x%lx not 0x%x-byte aligned\n",
- in ? "input" : "output", pa, DDE_BUFFER_ALIGN);
- return -EINVAL;
- }
-
- /* only need to check last mult; since buffer must be
- * DDE_BUFFER_ALIGN aligned, and that is a multiple of
- * DDE_BUFFER_SIZE_MULT, and pre-last page DDE buffers
- * are guaranteed a multiple of DDE_BUFFER_SIZE_MULT.
- */
- if (len % DDE_BUFFER_LAST_MULT) {
- pr_debug("%s buffer len 0x%x not a multiple of 0x%x\n",
- in ? "input" : "output", len, DDE_BUFFER_LAST_MULT);
- if (in)
- return -EINVAL;
- len = round_down(len, DDE_BUFFER_LAST_MULT);
- }
-
- /* use a single direct DDE */
- if (len <= LEN_ON_PAGE(pa)) {
- ret = setup_direct_dde(dde, pa, len);
- WARN_ON(ret < len);
- return 0;
- }
-
- /* use the DDL */
- for (i = 0; i < DDL_LEN_MAX && len > 0; i++) {
- ret = setup_direct_dde(&ddl[i], pa, len);
- buf += ret;
- len -= ret;
- pa = nx842_get_pa(buf);
- }
-
- if (len > 0) {
- pr_debug("0x%x total %s bytes 0x%x too many for DDL.\n",
- total_len, in ? "input" : "output", len);
- if (in)
- return -EMSGSIZE;
- total_len -= len;
- }
- setup_indirect_dde(dde, ddl, i, total_len);
-
- return 0;
-}
-
-#define CSB_ERR(csb, msg, ...) \
- pr_err("ERROR: " msg " : %02x %02x %02x %02x %08x\n", \
- ##__VA_ARGS__, (csb)->flags, \
- (csb)->cs, (csb)->cc, (csb)->ce, \
- be32_to_cpu((csb)->count))
-
-#define CSB_ERR_ADDR(csb, msg, ...) \
- CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \
- (unsigned long)be64_to_cpu((csb)->address))
-
-/**
- * wait_for_csb
- */
-static int wait_for_csb(struct nx842_workmem *wmem,
- struct coprocessor_status_block *csb)
-{
- ktime_t start = wmem->start, now = ktime_get();
- ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
-
- while (!(READ_ONCE(csb->flags) & CSB_V)) {
- cpu_relax();
- now = ktime_get();
- if (ktime_after(now, timeout))
- break;
- }
-
- /* hw has updated csb and output buffer */
- barrier();
-
- /* check CSB flags */
- if (!(csb->flags & CSB_V)) {
- CSB_ERR(csb, "CSB still not valid after %ld us, giving up",
- (long)ktime_us_delta(now, start));
- return -ETIMEDOUT;
- }
- if (csb->flags & CSB_F) {
- CSB_ERR(csb, "Invalid CSB format");
- return -EPROTO;
- }
- if (csb->flags & CSB_CH) {
- CSB_ERR(csb, "Invalid CSB chaining state");
- return -EPROTO;
- }
-
- /* verify CSB completion sequence is 0 */
- if (csb->cs) {
- CSB_ERR(csb, "Invalid CSB completion sequence");
- return -EPROTO;
- }
-
- /* check CSB Completion Code */
- switch (csb->cc) {
- /* no error */
- case CSB_CC_SUCCESS:
- break;
- case CSB_CC_TPBC_GT_SPBC:
- /* not an error, but the compressed data is
- * larger than the uncompressed data :(
- */
- break;
-
- /* input data errors */
- case CSB_CC_OPERAND_OVERLAP:
- /* input and output buffers overlap */
- CSB_ERR(csb, "Operand Overlap error");
- return -EINVAL;
- case CSB_CC_INVALID_OPERAND:
- CSB_ERR(csb, "Invalid operand");
- return -EINVAL;
- case CSB_CC_NOSPC:
- /* output buffer too small */
- return -ENOSPC;
- case CSB_CC_ABORT:
- CSB_ERR(csb, "Function aborted");
- return -EINTR;
- case CSB_CC_CRC_MISMATCH:
- CSB_ERR(csb, "CRC mismatch");
- return -EINVAL;
- case CSB_CC_TEMPL_INVALID:
- CSB_ERR(csb, "Compressed data template invalid");
- return -EINVAL;
- case CSB_CC_TEMPL_OVERFLOW:
- CSB_ERR(csb, "Compressed data template shows data past end");
- return -EINVAL;
- case CSB_CC_EXCEED_BYTE_COUNT: /* P9 or later */
- /*
- * DDE byte count exceeds the limit specified in Maximum
- * byte count register.
- */
- CSB_ERR(csb, "DDE byte count exceeds the limit");
- return -EINVAL;
-
- /* these should not happen */
- case CSB_CC_INVALID_ALIGN:
- /* setup_ddl should have detected this */
- CSB_ERR_ADDR(csb, "Invalid alignment");
- return -EINVAL;
- case CSB_CC_DATA_LENGTH:
- /* setup_ddl should have detected this */
- CSB_ERR(csb, "Invalid data length");
- return -EINVAL;
- case CSB_CC_WR_TRANSLATION:
- case CSB_CC_TRANSLATION:
- case CSB_CC_TRANSLATION_DUP1:
- case CSB_CC_TRANSLATION_DUP2:
- case CSB_CC_TRANSLATION_DUP3:
- case CSB_CC_TRANSLATION_DUP4:
- case CSB_CC_TRANSLATION_DUP5:
- case CSB_CC_TRANSLATION_DUP6:
- /* should not happen, we use physical addrs */
- CSB_ERR_ADDR(csb, "Translation error");
- return -EPROTO;
- case CSB_CC_WR_PROTECTION:
- case CSB_CC_PROTECTION:
- case CSB_CC_PROTECTION_DUP1:
- case CSB_CC_PROTECTION_DUP2:
- case CSB_CC_PROTECTION_DUP3:
- case CSB_CC_PROTECTION_DUP4:
- case CSB_CC_PROTECTION_DUP5:
- case CSB_CC_PROTECTION_DUP6:
- /* should not happen, we use physical addrs */
- CSB_ERR_ADDR(csb, "Protection error");
- return -EPROTO;
- case CSB_CC_PRIVILEGE:
- /* shouldn't happen, we're in HYP mode */
- CSB_ERR(csb, "Insufficient Privilege error");
- return -EPROTO;
- case CSB_CC_EXCESSIVE_DDE:
- /* shouldn't happen, setup_ddl doesn't use many dde's */
- CSB_ERR(csb, "Too many DDEs in DDL");
- return -EINVAL;
- case CSB_CC_TRANSPORT:
- case CSB_CC_INVALID_CRB: /* P9 or later */
- /* shouldn't happen, we setup CRB correctly */
- CSB_ERR(csb, "Invalid CRB");
- return -EINVAL;
- case CSB_CC_INVALID_DDE: /* P9 or later */
- /*
- * shouldn't happen, setup_direct/indirect_dde creates
- * DDE right
- */
- CSB_ERR(csb, "Invalid DDE");
- return -EINVAL;
- case CSB_CC_SEGMENTED_DDL:
- /* shouldn't happen, setup_ddl creates DDL right */
- CSB_ERR(csb, "Segmented DDL error");
- return -EINVAL;
- case CSB_CC_DDE_OVERFLOW:
- /* shouldn't happen, setup_ddl creates DDL right */
- CSB_ERR(csb, "DDE overflow error");
- return -EINVAL;
- case CSB_CC_SESSION:
- /* should not happen with ICSWX */
- CSB_ERR(csb, "Session violation error");
- return -EPROTO;
- case CSB_CC_CHAIN:
- /* should not happen, we don't use chained CRBs */
- CSB_ERR(csb, "Chained CRB error");
- return -EPROTO;
- case CSB_CC_SEQUENCE:
- /* should not happen, we don't use chained CRBs */
- CSB_ERR(csb, "CRB sequence number error");
- return -EPROTO;
- case CSB_CC_UNKNOWN_CODE:
- CSB_ERR(csb, "Unknown subfunction code");
- return -EPROTO;
-
- /* hardware errors */
- case CSB_CC_RD_EXTERNAL:
- case CSB_CC_RD_EXTERNAL_DUP1:
- case CSB_CC_RD_EXTERNAL_DUP2:
- case CSB_CC_RD_EXTERNAL_DUP3:
- CSB_ERR_ADDR(csb, "Read error outside coprocessor");
- return -EPROTO;
- case CSB_CC_WR_EXTERNAL:
- CSB_ERR_ADDR(csb, "Write error outside coprocessor");
- return -EPROTO;
- case CSB_CC_INTERNAL:
- CSB_ERR(csb, "Internal error in coprocessor");
- return -EPROTO;
- case CSB_CC_PROVISION:
- CSB_ERR(csb, "Storage provision error");
- return -EPROTO;
- case CSB_CC_HW:
- CSB_ERR(csb, "Correctable hardware error");
- return -EPROTO;
- case CSB_CC_HW_EXPIRED_TIMER: /* P9 or later */
- CSB_ERR(csb, "Job did not finish within allowed time");
- return -EPROTO;
-
- default:
- CSB_ERR(csb, "Invalid CC %d", csb->cc);
- return -EPROTO;
- }
-
- /* check Completion Extension state */
- if (csb->ce & CSB_CE_TERMINATION) {
- CSB_ERR(csb, "CSB request was terminated");
- return -EPROTO;
- }
- if (csb->ce & CSB_CE_INCOMPLETE) {
- CSB_ERR(csb, "CSB request not complete");
- return -EPROTO;
- }
- if (!(csb->ce & CSB_CE_TPBC)) {
- CSB_ERR(csb, "TPBC not provided, unknown target length");
- return -EPROTO;
- }
-
- /* successful completion */
- pr_debug_ratelimited("Processed %u bytes in %lu us\n",
- be32_to_cpu(csb->count),
- (unsigned long)ktime_us_delta(now, start));
-
- return 0;
-}
-
-static int nx842_config_crb(const unsigned char *in, unsigned int inlen,
- unsigned char *out, unsigned int outlen,
- struct nx842_workmem *wmem)
-{
- struct coprocessor_request_block *crb;
- struct coprocessor_status_block *csb;
- u64 csb_addr;
- int ret;
-
- crb = &wmem->crb;
- csb = &crb->csb;
-
- /* Clear any previous values */
- memset(crb, 0, sizeof(*crb));
-
- /* set up DDLs */
- ret = setup_ddl(&crb->source, wmem->ddl_in,
- (unsigned char *)in, inlen, true);
- if (ret)
- return ret;
-
- ret = setup_ddl(&crb->target, wmem->ddl_out,
- out, outlen, false);
- if (ret)
- return ret;
-
- /* set up CRB's CSB addr */
- csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS;
- csb_addr |= CRB_CSB_AT; /* Addrs are phys */
- crb->csb_addr = cpu_to_be64(csb_addr);
-
- return 0;
-}
-
-/**
- * nx842_exec_icswx - compress/decompress data using the 842 algorithm
- *
- * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
- * This compresses or decompresses the provided input buffer into the provided
- * output buffer.
- *
- * Upon return from this function @outlen contains the length of the
- * output data. If there is an error then @outlen will be 0 and an
- * error will be specified by the return code from this function.
- *
- * The @workmem buffer should only be used by one function call at a time.
- *
- * @in: input buffer pointer
- * @inlen: input buffer size
- * @out: output buffer pointer
- * @outlenp: output buffer size pointer
- * @workmem: working memory buffer pointer, size determined by
- * nx842_powernv_driver.workmem_size
- * @fc: function code, see CCW Function Codes in nx-842.h
- *
- * Returns:
- * 0 Success, output of length @outlenp stored in the buffer at @out
- * -ENODEV Hardware unavailable
- * -ENOSPC Output buffer is to small
- * -EMSGSIZE Input buffer too large
- * -EINVAL buffer constraints do not fix nx842_constraints
- * -EPROTO hardware error during operation
- * -ETIMEDOUT hardware did not complete operation in reasonable time
- * -EINTR operation was aborted
- */
-static int nx842_exec_icswx(const unsigned char *in, unsigned int inlen,
- unsigned char *out, unsigned int *outlenp,
- void *workmem, int fc)
-{
- struct coprocessor_request_block *crb;
- struct coprocessor_status_block *csb;
- struct nx842_workmem *wmem;
- int ret;
- u32 ccw;
- unsigned int outlen = *outlenp;
-
- wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
-
- *outlenp = 0;
-
- /* shoudn't happen, we don't load without a coproc */
- if (!nx842_ct) {
- pr_err_ratelimited("coprocessor CT is 0");
- return -ENODEV;
- }
-
- ret = nx842_config_crb(in, inlen, out, outlen, wmem);
- if (ret)
- return ret;
-
- crb = &wmem->crb;
- csb = &crb->csb;
-
- /* set up CCW */
- ccw = 0;
- ccw = SET_FIELD(CCW_CT, ccw, nx842_ct);
- ccw = SET_FIELD(CCW_CI_842, ccw, 0); /* use 0 for hw auto-selection */
- ccw = SET_FIELD(CCW_FC_842, ccw, fc);
-
- wmem->start = ktime_get();
-
- /* do ICSWX */
- ret = icswx(cpu_to_be32(ccw), crb);
-
- pr_debug_ratelimited("icswx CR %x ccw %x crb->ccw %x\n", ret,
- (unsigned int)ccw,
- (unsigned int)be32_to_cpu(crb->ccw));
-
- /*
- * NX842 coprocessor sets 3rd bit in CR register with XER[S0].
- * XER[S0] is the integer summary overflow bit which is nothing
- * to do NX. Since this bit can be set with other return values,
- * mask this bit.
- */
- ret &= ~ICSWX_XERS0;
-
- switch (ret) {
- case ICSWX_INITIATED:
- ret = wait_for_csb(wmem, csb);
- break;
- case ICSWX_BUSY:
- pr_debug_ratelimited("842 Coprocessor busy\n");
- ret = -EBUSY;
- break;
- case ICSWX_REJECTED:
- pr_err_ratelimited("ICSWX rejected\n");
- ret = -EPROTO;
- break;
- }
-
- if (!ret)
- *outlenp = be32_to_cpu(csb->count);
-
- return ret;
-}
-
-/**
- * nx842_exec_vas - compress/decompress data using the 842 algorithm
- *
- * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
- * This compresses or decompresses the provided input buffer into the provided
- * output buffer.
- *
- * Upon return from this function @outlen contains the length of the
- * output data. If there is an error then @outlen will be 0 and an
- * error will be specified by the return code from this function.
- *
- * The @workmem buffer should only be used by one function call at a time.
- *
- * @in: input buffer pointer
- * @inlen: input buffer size
- * @out: output buffer pointer
- * @outlenp: output buffer size pointer
- * @workmem: working memory buffer pointer, size determined by
- * nx842_powernv_driver.workmem_size
- * @fc: function code, see CCW Function Codes in nx-842.h
- *
- * Returns:
- * 0 Success, output of length @outlenp stored in the buffer
- * at @out
- * -ENODEV Hardware unavailable
- * -ENOSPC Output buffer is to small
- * -EMSGSIZE Input buffer too large
- * -EINVAL buffer constraints do not fix nx842_constraints
- * -EPROTO hardware error during operation
- * -ETIMEDOUT hardware did not complete operation in reasonable time
- * -EINTR operation was aborted
- */
-static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
- unsigned char *out, unsigned int *outlenp,
- void *workmem, int fc)
-{
- struct coprocessor_request_block *crb;
- struct coprocessor_status_block *csb;
- struct nx842_workmem *wmem;
- struct vas_window *txwin;
- int ret, i = 0;
- u32 ccw;
- unsigned int outlen = *outlenp;
-
- wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
-
- *outlenp = 0;
-
- crb = &wmem->crb;
- csb = &crb->csb;
-
- ret = nx842_config_crb(in, inlen, out, outlen, wmem);
- if (ret)
- return ret;
-
- ccw = 0;
- ccw = SET_FIELD(CCW_FC_842, ccw, fc);
- crb->ccw = cpu_to_be32(ccw);
-
- do {
- wmem->start = ktime_get();
- preempt_disable();
- txwin = this_cpu_read(cpu_txwin);
-
- /*
- * VAS copy CRB into L2 cache. Refer <asm/vas.h>.
- * @crb and @offset.
- */
- vas_copy_crb(crb, 0);
-
- /*
- * VAS paste previously copied CRB to NX.
- * @txwin, @offset and @last (must be true).
- */
- ret = vas_paste_crb(txwin, 0, 1);
- preempt_enable();
- /*
- * Retry copy/paste function for VAS failures.
- */
- } while (ret && (i++ < VAS_RETRIES));
-
- if (ret) {
- pr_err_ratelimited("VAS copy/paste failed\n");
- return ret;
- }
-
- ret = wait_for_csb(wmem, csb);
- if (!ret)
- *outlenp = be32_to_cpu(csb->count);
-
- return ret;
-}
-
-/**
- * nx842_powernv_compress - Compress data using the 842 algorithm
- *
- * Compression provided by the NX842 coprocessor on IBM PowerNV systems.
- * The input buffer is compressed and the result is stored in the
- * provided output buffer.
- *
- * Upon return from this function @outlen contains the length of the
- * compressed data. If there is an error then @outlen will be 0 and an
- * error will be specified by the return code from this function.
- *
- * @in: input buffer pointer
- * @inlen: input buffer size
- * @out: output buffer pointer
- * @outlenp: output buffer size pointer
- * @workmem: working memory buffer pointer, size determined by
- * nx842_powernv_driver.workmem_size
- *
- * Returns: see @nx842_powernv_exec()
- */
-static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
- unsigned char *out, unsigned int *outlenp,
- void *wmem)
-{
- return nx842_powernv_exec(in, inlen, out, outlenp,
- wmem, CCW_FC_842_COMP_CRC);
-}
-
-/**
- * nx842_powernv_decompress - Decompress data using the 842 algorithm
- *
- * Decompression provided by the NX842 coprocessor on IBM PowerNV systems.
- * The input buffer is decompressed and the result is stored in the
- * provided output buffer.
- *
- * Upon return from this function @outlen contains the length of the
- * decompressed data. If there is an error then @outlen will be 0 and an
- * error will be specified by the return code from this function.
- *
- * @in: input buffer pointer
- * @inlen: input buffer size
- * @out: output buffer pointer
- * @outlenp: output buffer size pointer
- * @workmem: working memory buffer pointer, size determined by
- * nx842_powernv_driver.workmem_size
- *
- * Returns: see @nx842_powernv_exec()
- */
-static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
- unsigned char *out, unsigned int *outlenp,
- void *wmem)
-{
- return nx842_powernv_exec(in, inlen, out, outlenp,
- wmem, CCW_FC_842_DECOMP_CRC);
-}
-
-static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc,
- int chipid)
-{
- coproc->chip_id = chipid;
- INIT_LIST_HEAD(&coproc->list);
- list_add(&coproc->list, &nx842_coprocs);
-}
-
-static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
-{
- struct vas_window *txwin = NULL;
- struct vas_tx_win_attr txattr;
-
- /*
- * Kernel requests will be high priority. So open send
- * windows only for high priority RxFIFO entries.
- */
- vas_init_tx_win_attr(&txattr, coproc->ct);
- txattr.lpid = 0; /* lpid is 0 for kernel requests */
- txattr.pid = 0; /* pid is 0 for kernel requests */
-
- /*
- * Open a VAS send window which is used to send request to NX.
- */
- txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr);
- if (IS_ERR(txwin))
- pr_err("ibm,nx-842: Can not open TX window: %ld\n",
- PTR_ERR(txwin));
-
- return txwin;
-}
-
-/*
- * Identify chip ID for each CPU, open send wndow for the corresponding NX
- * engine and save txwin in percpu cpu_txwin.
- * cpu_txwin is used in copy/paste operation for each compression /
- * decompression request.
- */
-static int nx842_open_percpu_txwins(void)
-{
- struct nx842_coproc *coproc, *n;
- unsigned int i, chip_id;
-
- for_each_possible_cpu(i) {
- struct vas_window *txwin = NULL;
-
- chip_id = cpu_to_chip_id(i);
-
- list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
- /*
- * Kernel requests use only high priority FIFOs. So
- * open send windows for these FIFOs.
- */
-
- if (coproc->ct != VAS_COP_TYPE_842_HIPRI)
- continue;
-
- if (coproc->chip_id == chip_id) {
- txwin = nx842_alloc_txwin(coproc);
- if (IS_ERR(txwin))
- return PTR_ERR(txwin);
-
- per_cpu(cpu_txwin, i) = txwin;
- break;
- }
- }
-
- if (!per_cpu(cpu_txwin, i)) {
- /* shouldn't happen, Each chip will have NX engine */
- pr_err("NX engine is not available for CPU %d\n", i);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
- int vasid, int *ct)
-{
- struct vas_window *rxwin = NULL;
- struct vas_rx_win_attr rxattr;
- struct nx842_coproc *coproc;
- u32 lpid, pid, tid, fifo_size;
- u64 rx_fifo;
- const char *priority;
- int ret;
-
- ret = of_property_read_u64(dn, "rx-fifo-address", &rx_fifo);
- if (ret) {
- pr_err("Missing rx-fifo-address property\n");
- return ret;
- }
-
- ret = of_property_read_u32(dn, "rx-fifo-size", &fifo_size);
- if (ret) {
- pr_err("Missing rx-fifo-size property\n");
- return ret;
- }
-
- ret = of_property_read_u32(dn, "lpid", &lpid);
- if (ret) {
- pr_err("Missing lpid property\n");
- return ret;
- }
-
- ret = of_property_read_u32(dn, "pid", &pid);
- if (ret) {
- pr_err("Missing pid property\n");
- return ret;
- }
-
- ret = of_property_read_u32(dn, "tid", &tid);
- if (ret) {
- pr_err("Missing tid property\n");
- return ret;
- }
-
- ret = of_property_read_string(dn, "priority", &priority);
- if (ret) {
- pr_err("Missing priority property\n");
- return ret;
- }
-
- coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
- if (!coproc)
- return -ENOMEM;
-
- if (!strcmp(priority, "High"))
- coproc->ct = VAS_COP_TYPE_842_HIPRI;
- else if (!strcmp(priority, "Normal"))
- coproc->ct = VAS_COP_TYPE_842;
- else {
- pr_err("Invalid RxFIFO priority value\n");
- ret = -EINVAL;
- goto err_out;
- }
-
- vas_init_rx_win_attr(&rxattr, coproc->ct);
- rxattr.rx_fifo = (void *)rx_fifo;
- rxattr.rx_fifo_size = fifo_size;
- rxattr.lnotify_lpid = lpid;
- rxattr.lnotify_pid = pid;
- rxattr.lnotify_tid = tid;
- /*
- * Maximum RX window credits can not be more than #CRBs in
- * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
- */
- rxattr.wcreds_max = fifo_size / CRB_SIZE;
-
- /*
- * Open a VAS receice window which is used to configure RxFIFO
- * for NX.
- */
- rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr);
- if (IS_ERR(rxwin)) {
- ret = PTR_ERR(rxwin);
- pr_err("setting RxFIFO with VAS failed: %d\n",
- ret);
- goto err_out;
- }
-
- coproc->vas.rxwin = rxwin;
- coproc->vas.id = vasid;
- nx842_add_coprocs_list(coproc, chip_id);
-
- /*
- * (lpid, pid, tid) combination has to be unique for each
- * coprocessor instance in the system. So to make it
- * unique, skiboot uses coprocessor type such as 842 or
- * GZIP for pid and provides this value to kernel in pid
- * device-tree property.
- */
- *ct = pid;
-
- return 0;
-
-err_out:
- kfree(coproc);
- return ret;
-}
-
-
-static int __init nx842_powernv_probe_vas(struct device_node *pn)
-{
- struct device_node *dn;
- int chip_id, vasid, ret = 0;
- int nx_fifo_found = 0;
- int uninitialized_var(ct);
-
- chip_id = of_get_ibm_chip_id(pn);
- if (chip_id < 0) {
- pr_err("ibm,chip-id missing\n");
- return -EINVAL;
- }
-
- vasid = chip_to_vas_id(chip_id);
- if (vasid < 0) {
- pr_err("Unable to map chip_id %d to vasid\n", chip_id);
- return -EINVAL;
- }
-
- for_each_child_of_node(pn, dn) {
- if (of_device_is_compatible(dn, "ibm,p9-nx-842")) {
- ret = vas_cfg_coproc_info(dn, chip_id, vasid, &ct);
- if (ret) {
- of_node_put(dn);
- return ret;
- }
- nx_fifo_found++;
- }
- }
-
- if (!nx_fifo_found) {
- pr_err("NX842 FIFO nodes are missing\n");
- return -EINVAL;
- }
-
- /*
- * Initialize NX instance for both high and normal priority FIFOs.
- */
- if (opal_check_token(OPAL_NX_COPROC_INIT)) {
- ret = opal_nx_coproc_init(chip_id, ct);
- if (ret) {
- pr_err("Failed to initialize NX for chip(%d): %d\n",
- chip_id, ret);
- ret = opal_error_code(ret);
- }
- } else
- pr_warn("Firmware doesn't support NX initialization\n");
-
- return ret;
-}
-
-static int __init nx842_powernv_probe(struct device_node *dn)
-{
- struct nx842_coproc *coproc;
- unsigned int ct, ci;
- int chip_id;
-
- chip_id = of_get_ibm_chip_id(dn);
- if (chip_id < 0) {
- pr_err("ibm,chip-id missing\n");
- return -EINVAL;
- }
-
- if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) {
- pr_err("ibm,842-coprocessor-type missing\n");
- return -EINVAL;
- }
-
- if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) {
- pr_err("ibm,842-coprocessor-instance missing\n");
- return -EINVAL;
- }
-
- coproc = kmalloc(sizeof(*coproc), GFP_KERNEL);
- if (!coproc)
- return -ENOMEM;
-
- coproc->ct = ct;
- coproc->ci = ci;
- nx842_add_coprocs_list(coproc, chip_id);
-
- pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci);
-
- if (!nx842_ct)
- nx842_ct = ct;
- else if (nx842_ct != ct)
- pr_err("NX842 chip %d, CT %d != first found CT %d\n",
- chip_id, ct, nx842_ct);
-
- return 0;
-}
-
-static void nx842_delete_coprocs(void)
-{
- struct nx842_coproc *coproc, *n;
- struct vas_window *txwin;
- int i;
-
- /*
- * close percpu txwins that are opened for the corresponding coproc.
- */
- for_each_possible_cpu(i) {
- txwin = per_cpu(cpu_txwin, i);
- if (txwin)
- vas_win_close(txwin);
-
- per_cpu(cpu_txwin, i) = 0;
- }
-
- list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
- if (coproc->vas.rxwin)
- vas_win_close(coproc->vas.rxwin);
-
- list_del(&coproc->list);
- kfree(coproc);
- }
-}
-
-static struct nx842_constraints nx842_powernv_constraints = {
- .alignment = DDE_BUFFER_ALIGN,
- .multiple = DDE_BUFFER_LAST_MULT,
- .minimum = DDE_BUFFER_LAST_MULT,
- .maximum = (DDL_LEN_MAX - 1) * PAGE_SIZE,
-};
-
-static struct nx842_driver nx842_powernv_driver = {
- .name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
- .workmem_size = sizeof(struct nx842_workmem),
- .constraints = &nx842_powernv_constraints,
- .compress = nx842_powernv_compress,
- .decompress = nx842_powernv_decompress,
-};
-
-static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
-{
- return nx842_crypto_init(tfm, &nx842_powernv_driver);
-}
-
-static struct crypto_alg nx842_powernv_alg = {
- .cra_name = "842",
- .cra_driver_name = "842-nx",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
- .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = nx842_powernv_crypto_init,
- .cra_exit = nx842_crypto_exit,
- .cra_u = { .compress = {
- .coa_compress = nx842_crypto_compress,
- .coa_decompress = nx842_crypto_decompress } }
-};
-
-static __init int nx842_powernv_init(void)
-{
- struct device_node *dn;
- int ret;
-
- /* verify workmem size/align restrictions */
- BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
- BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN);
- BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN);
- /* verify buffer size/align restrictions */
- BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN);
- BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
- BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
-
- for_each_compatible_node(dn, NULL, "ibm,power9-nx") {
- ret = nx842_powernv_probe_vas(dn);
- if (ret) {
- nx842_delete_coprocs();
- of_node_put(dn);
- return ret;
- }
- }
-
- if (list_empty(&nx842_coprocs)) {
- for_each_compatible_node(dn, NULL, "ibm,power-nx")
- nx842_powernv_probe(dn);
-
- if (!nx842_ct)
- return -ENODEV;
-
- nx842_powernv_exec = nx842_exec_icswx;
- } else {
- ret = nx842_open_percpu_txwins();
- if (ret) {
- nx842_delete_coprocs();
- return ret;
- }
-
- nx842_powernv_exec = nx842_exec_vas;
- }
-
- ret = crypto_register_alg(&nx842_powernv_alg);
- if (ret) {
- nx842_delete_coprocs();
- return ret;
- }
-
- return 0;
-}
-module_init(nx842_powernv_init);
-
-static void __exit nx842_powernv_exit(void)
-{
- crypto_unregister_alg(&nx842_powernv_alg);
-
- nx842_delete_coprocs();
-}
-module_exit(nx842_powernv_exit);
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
new file mode 100644
index 000000000000..13c65deda8e9
--- /dev/null
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -0,0 +1,1136 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for IBM PowerNV compression accelerator
+ *
+ * Copyright (C) 2015 Dan Streetman, IBM Corp
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "nx-842.h"
+
+#include <linux/timer.h>
+
+#include <asm/prom.h>
+#include <asm/icswx.h>
+#include <asm/vas.h>
+#include <asm/reg.h>
+#include <asm/opal-api.h>
+#include <asm/opal.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
+MODULE_DESCRIPTION("H/W Compression driver for IBM PowerNV processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
+
+#define WORKMEM_ALIGN (CRB_ALIGN)
+#define CSB_WAIT_MAX (5000) /* ms */
+#define VAS_RETRIES (10)
+
+struct nx842_workmem {
+ /* Below fields must be properly aligned */
+ struct coprocessor_request_block crb; /* CRB_ALIGN align */
+ struct data_descriptor_entry ddl_in[DDL_LEN_MAX]; /* DDE_ALIGN align */
+ struct data_descriptor_entry ddl_out[DDL_LEN_MAX]; /* DDE_ALIGN align */
+ /* Above fields must be properly aligned */
+
+ ktime_t start;
+
+ char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
+} __packed __aligned(WORKMEM_ALIGN);
+
+struct nx_coproc {
+ unsigned int chip_id;
+ unsigned int ct; /* Can be 842 or GZIP high/normal*/
+ unsigned int ci; /* Coprocessor instance, used with icswx */
+ struct {
+ struct vas_window *rxwin;
+ int id;
+ } vas;
+ struct list_head list;
+};
+
+/*
+ * Send the request to NX engine on the chip for the corresponding CPU
+ * where the process is executing. Use with VAS function.
+ */
+static DEFINE_PER_CPU(struct vas_window *, cpu_txwin);
+
+/* no cpu hotplug on powernv, so this list never changes after init */
+static LIST_HEAD(nx_coprocs);
+static unsigned int nx842_ct; /* used in icswx function */
+
+/*
+ * Using same values as in skiboot or coprocessor type representing
+ * in NX workbook.
+ */
+#define NX_CT_GZIP (2) /* on P9 and later */
+#define NX_CT_842 (3)
+
+static int (*nx842_powernv_exec)(const unsigned char *in,
+ unsigned int inlen, unsigned char *out,
+ unsigned int *outlenp, void *workmem, int fc);
+
+/**
+ * setup_indirect_dde - Setup an indirect DDE
+ *
+ * The DDE is setup with the the DDE count, byte count, and address of
+ * first direct DDE in the list.
+ */
+static void setup_indirect_dde(struct data_descriptor_entry *dde,
+ struct data_descriptor_entry *ddl,
+ unsigned int dde_count, unsigned int byte_count)
+{
+ dde->flags = 0;
+ dde->count = dde_count;
+ dde->index = 0;
+ dde->length = cpu_to_be32(byte_count);
+ dde->address = cpu_to_be64(nx842_get_pa(ddl));
+}
+
+/**
+ * setup_direct_dde - Setup single DDE from buffer
+ *
+ * The DDE is setup with the buffer and length. The buffer must be properly
+ * aligned. The used length is returned.
+ * Returns:
+ * N Successfully set up DDE with N bytes
+ */
+static unsigned int setup_direct_dde(struct data_descriptor_entry *dde,
+ unsigned long pa, unsigned int len)
+{
+ unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa));
+
+ dde->flags = 0;
+ dde->count = 0;
+ dde->index = 0;
+ dde->length = cpu_to_be32(l);
+ dde->address = cpu_to_be64(pa);
+
+ return l;
+}
+
+/**
+ * setup_ddl - Setup DDL from buffer
+ *
+ * Returns:
+ * 0 Successfully set up DDL
+ */
+static int setup_ddl(struct data_descriptor_entry *dde,
+ struct data_descriptor_entry *ddl,
+ unsigned char *buf, unsigned int len,
+ bool in)
+{
+ unsigned long pa = nx842_get_pa(buf);
+ int i, ret, total_len = len;
+
+ if (!IS_ALIGNED(pa, DDE_BUFFER_ALIGN)) {
+ pr_debug("%s buffer pa 0x%lx not 0x%x-byte aligned\n",
+ in ? "input" : "output", pa, DDE_BUFFER_ALIGN);
+ return -EINVAL;
+ }
+
+ /* only need to check last mult; since buffer must be
+ * DDE_BUFFER_ALIGN aligned, and that is a multiple of
+ * DDE_BUFFER_SIZE_MULT, and pre-last page DDE buffers
+ * are guaranteed a multiple of DDE_BUFFER_SIZE_MULT.
+ */
+ if (len % DDE_BUFFER_LAST_MULT) {
+ pr_debug("%s buffer len 0x%x not a multiple of 0x%x\n",
+ in ? "input" : "output", len, DDE_BUFFER_LAST_MULT);
+ if (in)
+ return -EINVAL;
+ len = round_down(len, DDE_BUFFER_LAST_MULT);
+ }
+
+ /* use a single direct DDE */
+ if (len <= LEN_ON_PAGE(pa)) {
+ ret = setup_direct_dde(dde, pa, len);
+ WARN_ON(ret < len);
+ return 0;
+ }
+
+ /* use the DDL */
+ for (i = 0; i < DDL_LEN_MAX && len > 0; i++) {
+ ret = setup_direct_dde(&ddl[i], pa, len);
+ buf += ret;
+ len -= ret;
+ pa = nx842_get_pa(buf);
+ }
+
+ if (len > 0) {
+ pr_debug("0x%x total %s bytes 0x%x too many for DDL.\n",
+ total_len, in ? "input" : "output", len);
+ if (in)
+ return -EMSGSIZE;
+ total_len -= len;
+ }
+ setup_indirect_dde(dde, ddl, i, total_len);
+
+ return 0;
+}
+
+#define CSB_ERR(csb, msg, ...) \
+ pr_err("ERROR: " msg " : %02x %02x %02x %02x %08x\n", \
+ ##__VA_ARGS__, (csb)->flags, \
+ (csb)->cs, (csb)->cc, (csb)->ce, \
+ be32_to_cpu((csb)->count))
+
+#define CSB_ERR_ADDR(csb, msg, ...) \
+ CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \
+ (unsigned long)be64_to_cpu((csb)->address))
+
+/**
+ * wait_for_csb
+ */
+static int wait_for_csb(struct nx842_workmem *wmem,
+ struct coprocessor_status_block *csb)
+{
+ ktime_t start = wmem->start, now = ktime_get();
+ ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
+
+ while (!(READ_ONCE(csb->flags) & CSB_V)) {
+ cpu_relax();
+ now = ktime_get();
+ if (ktime_after(now, timeout))
+ break;
+ }
+
+ /* hw has updated csb and output buffer */
+ barrier();
+
+ /* check CSB flags */
+ if (!(csb->flags & CSB_V)) {
+ CSB_ERR(csb, "CSB still not valid after %ld us, giving up",
+ (long)ktime_us_delta(now, start));
+ return -ETIMEDOUT;
+ }
+ if (csb->flags & CSB_F) {
+ CSB_ERR(csb, "Invalid CSB format");
+ return -EPROTO;
+ }
+ if (csb->flags & CSB_CH) {
+ CSB_ERR(csb, "Invalid CSB chaining state");
+ return -EPROTO;
+ }
+
+ /* verify CSB completion sequence is 0 */
+ if (csb->cs) {
+ CSB_ERR(csb, "Invalid CSB completion sequence");
+ return -EPROTO;
+ }
+
+ /* check CSB Completion Code */
+ switch (csb->cc) {
+ /* no error */
+ case CSB_CC_SUCCESS:
+ break;
+ case CSB_CC_TPBC_GT_SPBC:
+ /* not an error, but the compressed data is
+ * larger than the uncompressed data :(
+ */
+ break;
+
+ /* input data errors */
+ case CSB_CC_OPERAND_OVERLAP:
+ /* input and output buffers overlap */
+ CSB_ERR(csb, "Operand Overlap error");
+ return -EINVAL;
+ case CSB_CC_INVALID_OPERAND:
+ CSB_ERR(csb, "Invalid operand");
+ return -EINVAL;
+ case CSB_CC_NOSPC:
+ /* output buffer too small */
+ return -ENOSPC;
+ case CSB_CC_ABORT:
+ CSB_ERR(csb, "Function aborted");
+ return -EINTR;
+ case CSB_CC_CRC_MISMATCH:
+ CSB_ERR(csb, "CRC mismatch");
+ return -EINVAL;
+ case CSB_CC_TEMPL_INVALID:
+ CSB_ERR(csb, "Compressed data template invalid");
+ return -EINVAL;
+ case CSB_CC_TEMPL_OVERFLOW:
+ CSB_ERR(csb, "Compressed data template shows data past end");
+ return -EINVAL;
+ case CSB_CC_EXCEED_BYTE_COUNT: /* P9 or later */
+ /*
+ * DDE byte count exceeds the limit specified in Maximum
+ * byte count register.
+ */
+ CSB_ERR(csb, "DDE byte count exceeds the limit");
+ return -EINVAL;
+
+ /* these should not happen */
+ case CSB_CC_INVALID_ALIGN:
+ /* setup_ddl should have detected this */
+ CSB_ERR_ADDR(csb, "Invalid alignment");
+ return -EINVAL;
+ case CSB_CC_DATA_LENGTH:
+ /* setup_ddl should have detected this */
+ CSB_ERR(csb, "Invalid data length");
+ return -EINVAL;
+ case CSB_CC_WR_TRANSLATION:
+ case CSB_CC_TRANSLATION:
+ case CSB_CC_TRANSLATION_DUP1:
+ case CSB_CC_TRANSLATION_DUP2:
+ case CSB_CC_TRANSLATION_DUP3:
+ case CSB_CC_TRANSLATION_DUP4:
+ case CSB_CC_TRANSLATION_DUP5:
+ case CSB_CC_TRANSLATION_DUP6:
+ /* should not happen, we use physical addrs */
+ CSB_ERR_ADDR(csb, "Translation error");
+ return -EPROTO;
+ case CSB_CC_WR_PROTECTION:
+ case CSB_CC_PROTECTION:
+ case CSB_CC_PROTECTION_DUP1:
+ case CSB_CC_PROTECTION_DUP2:
+ case CSB_CC_PROTECTION_DUP3:
+ case CSB_CC_PROTECTION_DUP4:
+ case CSB_CC_PROTECTION_DUP5:
+ case CSB_CC_PROTECTION_DUP6:
+ /* should not happen, we use physical addrs */
+ CSB_ERR_ADDR(csb, "Protection error");
+ return -EPROTO;
+ case CSB_CC_PRIVILEGE:
+ /* shouldn't happen, we're in HYP mode */
+ CSB_ERR(csb, "Insufficient Privilege error");
+ return -EPROTO;
+ case CSB_CC_EXCESSIVE_DDE:
+ /* shouldn't happen, setup_ddl doesn't use many dde's */
+ CSB_ERR(csb, "Too many DDEs in DDL");
+ return -EINVAL;
+ case CSB_CC_TRANSPORT:
+ case CSB_CC_INVALID_CRB: /* P9 or later */
+ /* shouldn't happen, we setup CRB correctly */
+ CSB_ERR(csb, "Invalid CRB");
+ return -EINVAL;
+ case CSB_CC_INVALID_DDE: /* P9 or later */
+ /*
+ * shouldn't happen, setup_direct/indirect_dde creates
+ * DDE right
+ */
+ CSB_ERR(csb, "Invalid DDE");
+ return -EINVAL;
+ case CSB_CC_SEGMENTED_DDL:
+ /* shouldn't happen, setup_ddl creates DDL right */
+ CSB_ERR(csb, "Segmented DDL error");
+ return -EINVAL;
+ case CSB_CC_DDE_OVERFLOW:
+ /* shouldn't happen, setup_ddl creates DDL right */
+ CSB_ERR(csb, "DDE overflow error");
+ return -EINVAL;
+ case CSB_CC_SESSION:
+ /* should not happen with ICSWX */
+ CSB_ERR(csb, "Session violation error");
+ return -EPROTO;
+ case CSB_CC_CHAIN:
+ /* should not happen, we don't use chained CRBs */
+ CSB_ERR(csb, "Chained CRB error");
+ return -EPROTO;
+ case CSB_CC_SEQUENCE:
+ /* should not happen, we don't use chained CRBs */
+ CSB_ERR(csb, "CRB sequence number error");
+ return -EPROTO;
+ case CSB_CC_UNKNOWN_CODE:
+ CSB_ERR(csb, "Unknown subfunction code");
+ return -EPROTO;
+
+ /* hardware errors */
+ case CSB_CC_RD_EXTERNAL:
+ case CSB_CC_RD_EXTERNAL_DUP1:
+ case CSB_CC_RD_EXTERNAL_DUP2:
+ case CSB_CC_RD_EXTERNAL_DUP3:
+ CSB_ERR_ADDR(csb, "Read error outside coprocessor");
+ return -EPROTO;
+ case CSB_CC_WR_EXTERNAL:
+ CSB_ERR_ADDR(csb, "Write error outside coprocessor");
+ return -EPROTO;
+ case CSB_CC_INTERNAL:
+ CSB_ERR(csb, "Internal error in coprocessor");
+ return -EPROTO;
+ case CSB_CC_PROVISION:
+ CSB_ERR(csb, "Storage provision error");
+ return -EPROTO;
+ case CSB_CC_HW:
+ CSB_ERR(csb, "Correctable hardware error");
+ return -EPROTO;
+ case CSB_CC_HW_EXPIRED_TIMER: /* P9 or later */
+ CSB_ERR(csb, "Job did not finish within allowed time");
+ return -EPROTO;
+
+ default:
+ CSB_ERR(csb, "Invalid CC %d", csb->cc);
+ return -EPROTO;
+ }
+
+ /* check Completion Extension state */
+ if (csb->ce & CSB_CE_TERMINATION) {
+ CSB_ERR(csb, "CSB request was terminated");
+ return -EPROTO;
+ }
+ if (csb->ce & CSB_CE_INCOMPLETE) {
+ CSB_ERR(csb, "CSB request not complete");
+ return -EPROTO;
+ }
+ if (!(csb->ce & CSB_CE_TPBC)) {
+ CSB_ERR(csb, "TPBC not provided, unknown target length");
+ return -EPROTO;
+ }
+
+ /* successful completion */
+ pr_debug_ratelimited("Processed %u bytes in %lu us\n",
+ be32_to_cpu(csb->count),
+ (unsigned long)ktime_us_delta(now, start));
+
+ return 0;
+}
+
+static int nx842_config_crb(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int outlen,
+ struct nx842_workmem *wmem)
+{
+ struct coprocessor_request_block *crb;
+ struct coprocessor_status_block *csb;
+ u64 csb_addr;
+ int ret;
+
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
+ /* Clear any previous values */
+ memset(crb, 0, sizeof(*crb));
+
+ /* set up DDLs */
+ ret = setup_ddl(&crb->source, wmem->ddl_in,
+ (unsigned char *)in, inlen, true);
+ if (ret)
+ return ret;
+
+ ret = setup_ddl(&crb->target, wmem->ddl_out,
+ out, outlen, false);
+ if (ret)
+ return ret;
+
+ /* set up CRB's CSB addr */
+ csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS;
+ csb_addr |= CRB_CSB_AT; /* Addrs are phys */
+ crb->csb_addr = cpu_to_be64(csb_addr);
+
+ return 0;
+}
+
+/**
+ * nx842_exec_icswx - compress/decompress data using the 842 algorithm
+ *
+ * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * This compresses or decompresses the provided input buffer into the provided
+ * output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * output data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * The @workmem buffer should only be used by one function call at a time.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
+ * @fc: function code, see CCW Function Codes in nx-842.h
+ *
+ * Returns:
+ * 0 Success, output of length @outlenp stored in the buffer at @out
+ * -ENODEV Hardware unavailable
+ * -ENOSPC Output buffer is to small
+ * -EMSGSIZE Input buffer too large
+ * -EINVAL buffer constraints do not fix nx842_constraints
+ * -EPROTO hardware error during operation
+ * -ETIMEDOUT hardware did not complete operation in reasonable time
+ * -EINTR operation was aborted
+ */
+static int nx842_exec_icswx(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlenp,
+ void *workmem, int fc)
+{
+ struct coprocessor_request_block *crb;
+ struct coprocessor_status_block *csb;
+ struct nx842_workmem *wmem;
+ int ret;
+ u32 ccw;
+ unsigned int outlen = *outlenp;
+
+ wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
+
+ *outlenp = 0;
+
+ /* shoudn't happen, we don't load without a coproc */
+ if (!nx842_ct) {
+ pr_err_ratelimited("coprocessor CT is 0");
+ return -ENODEV;
+ }
+
+ ret = nx842_config_crb(in, inlen, out, outlen, wmem);
+ if (ret)
+ return ret;
+
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
+ /* set up CCW */
+ ccw = 0;
+ ccw = SET_FIELD(CCW_CT, ccw, nx842_ct);
+ ccw = SET_FIELD(CCW_CI_842, ccw, 0); /* use 0 for hw auto-selection */
+ ccw = SET_FIELD(CCW_FC_842, ccw, fc);
+
+ wmem->start = ktime_get();
+
+ /* do ICSWX */
+ ret = icswx(cpu_to_be32(ccw), crb);
+
+ pr_debug_ratelimited("icswx CR %x ccw %x crb->ccw %x\n", ret,
+ (unsigned int)ccw,
+ (unsigned int)be32_to_cpu(crb->ccw));
+
+ /*
+ * NX842 coprocessor sets 3rd bit in CR register with XER[S0].
+ * XER[S0] is the integer summary overflow bit which is nothing
+ * to do NX. Since this bit can be set with other return values,
+ * mask this bit.
+ */
+ ret &= ~ICSWX_XERS0;
+
+ switch (ret) {
+ case ICSWX_INITIATED:
+ ret = wait_for_csb(wmem, csb);
+ break;
+ case ICSWX_BUSY:
+ pr_debug_ratelimited("842 Coprocessor busy\n");
+ ret = -EBUSY;
+ break;
+ case ICSWX_REJECTED:
+ pr_err_ratelimited("ICSWX rejected\n");
+ ret = -EPROTO;
+ break;
+ }
+
+ if (!ret)
+ *outlenp = be32_to_cpu(csb->count);
+
+ return ret;
+}
+
+/**
+ * nx842_exec_vas - compress/decompress data using the 842 algorithm
+ *
+ * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * This compresses or decompresses the provided input buffer into the provided
+ * output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * output data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * The @workmem buffer should only be used by one function call at a time.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
+ * @fc: function code, see CCW Function Codes in nx-842.h
+ *
+ * Returns:
+ * 0 Success, output of length @outlenp stored in the buffer
+ * at @out
+ * -ENODEV Hardware unavailable
+ * -ENOSPC Output buffer is to small
+ * -EMSGSIZE Input buffer too large
+ * -EINVAL buffer constraints do not fix nx842_constraints
+ * -EPROTO hardware error during operation
+ * -ETIMEDOUT hardware did not complete operation in reasonable time
+ * -EINTR operation was aborted
+ */
+static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlenp,
+ void *workmem, int fc)
+{
+ struct coprocessor_request_block *crb;
+ struct coprocessor_status_block *csb;
+ struct nx842_workmem *wmem;
+ struct vas_window *txwin;
+ int ret, i = 0;
+ u32 ccw;
+ unsigned int outlen = *outlenp;
+
+ wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
+
+ *outlenp = 0;
+
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
+ ret = nx842_config_crb(in, inlen, out, outlen, wmem);
+ if (ret)
+ return ret;
+
+ ccw = 0;
+ ccw = SET_FIELD(CCW_FC_842, ccw, fc);
+ crb->ccw = cpu_to_be32(ccw);
+
+ do {
+ wmem->start = ktime_get();
+ preempt_disable();
+ txwin = this_cpu_read(cpu_txwin);
+
+ /*
+ * VAS copy CRB into L2 cache. Refer <asm/vas.h>.
+ * @crb and @offset.
+ */
+ vas_copy_crb(crb, 0);
+
+ /*
+ * VAS paste previously copied CRB to NX.
+ * @txwin, @offset and @last (must be true).
+ */
+ ret = vas_paste_crb(txwin, 0, 1);
+ preempt_enable();
+ /*
+ * Retry copy/paste function for VAS failures.
+ */
+ } while (ret && (i++ < VAS_RETRIES));
+
+ if (ret) {
+ pr_err_ratelimited("VAS copy/paste failed\n");
+ return ret;
+ }
+
+ ret = wait_for_csb(wmem, csb);
+ if (!ret)
+ *outlenp = be32_to_cpu(csb->count);
+
+ return ret;
+}
+
+/**
+ * nx842_powernv_compress - Compress data using the 842 algorithm
+ *
+ * Compression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * The input buffer is compressed and the result is stored in the
+ * provided output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * compressed data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
+ *
+ * Returns: see @nx842_powernv_exec()
+ */
+static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlenp,
+ void *wmem)
+{
+ return nx842_powernv_exec(in, inlen, out, outlenp,
+ wmem, CCW_FC_842_COMP_CRC);
+}
+
+/**
+ * nx842_powernv_decompress - Decompress data using the 842 algorithm
+ *
+ * Decompression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * The input buffer is decompressed and the result is stored in the
+ * provided output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * decompressed data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
+ *
+ * Returns: see @nx842_powernv_exec()
+ */
+static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlenp,
+ void *wmem)
+{
+ return nx842_powernv_exec(in, inlen, out, outlenp,
+ wmem, CCW_FC_842_DECOMP_CRC);
+}
+
+static inline void nx_add_coprocs_list(struct nx_coproc *coproc,
+ int chipid)
+{
+ coproc->chip_id = chipid;
+ INIT_LIST_HEAD(&coproc->list);
+ list_add(&coproc->list, &nx_coprocs);
+}
+
+static struct vas_window *nx_alloc_txwin(struct nx_coproc *coproc)
+{
+ struct vas_window *txwin = NULL;
+ struct vas_tx_win_attr txattr;
+
+ /*
+ * Kernel requests will be high priority. So open send
+ * windows only for high priority RxFIFO entries.
+ */
+ vas_init_tx_win_attr(&txattr, coproc->ct);
+ txattr.lpid = 0; /* lpid is 0 for kernel requests */
+
+ /*
+ * Open a VAS send window which is used to send request to NX.
+ */
+ txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr);
+ if (IS_ERR(txwin))
+ pr_err("ibm,nx-842: Can not open TX window: %ld\n",
+ PTR_ERR(txwin));
+
+ return txwin;
+}
+
+/*
+ * Identify chip ID for each CPU, open send wndow for the corresponding NX
+ * engine and save txwin in percpu cpu_txwin.
+ * cpu_txwin is used in copy/paste operation for each compression /
+ * decompression request.
+ */
+static int nx_open_percpu_txwins(void)
+{
+ struct nx_coproc *coproc, *n;
+ unsigned int i, chip_id;
+
+ for_each_possible_cpu(i) {
+ struct vas_window *txwin = NULL;
+
+ chip_id = cpu_to_chip_id(i);
+
+ list_for_each_entry_safe(coproc, n, &nx_coprocs, list) {
+ /*
+ * Kernel requests use only high priority FIFOs. So
+ * open send windows for these FIFOs.
+ * GZIP is not supported in kernel right now.
+ */
+
+ if (coproc->ct != VAS_COP_TYPE_842_HIPRI)
+ continue;
+
+ if (coproc->chip_id == chip_id) {
+ txwin = nx_alloc_txwin(coproc);
+ if (IS_ERR(txwin))
+ return PTR_ERR(txwin);
+
+ per_cpu(cpu_txwin, i) = txwin;
+ break;
+ }
+ }
+
+ if (!per_cpu(cpu_txwin, i)) {
+ /* shouldn't happen, Each chip will have NX engine */
+ pr_err("NX engine is not available for CPU %d\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int __init nx_set_ct(struct nx_coproc *coproc, const char *priority,
+ int high, int normal)
+{
+ if (!strcmp(priority, "High"))
+ coproc->ct = high;
+ else if (!strcmp(priority, "Normal"))
+ coproc->ct = normal;
+ else {
+ pr_err("Invalid RxFIFO priority value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
+ int vasid, int type, int *ct)
+{
+ struct vas_window *rxwin = NULL;
+ struct vas_rx_win_attr rxattr;
+ u32 lpid, pid, tid, fifo_size;
+ struct nx_coproc *coproc;
+ u64 rx_fifo;
+ const char *priority;
+ int ret;
+
+ ret = of_property_read_u64(dn, "rx-fifo-address", &rx_fifo);
+ if (ret) {
+ pr_err("Missing rx-fifo-address property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "rx-fifo-size", &fifo_size);
+ if (ret) {
+ pr_err("Missing rx-fifo-size property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "lpid", &lpid);
+ if (ret) {
+ pr_err("Missing lpid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "pid", &pid);
+ if (ret) {
+ pr_err("Missing pid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "tid", &tid);
+ if (ret) {
+ pr_err("Missing tid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_string(dn, "priority", &priority);
+ if (ret) {
+ pr_err("Missing priority property\n");
+ return ret;
+ }
+
+ coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
+ if (!coproc)
+ return -ENOMEM;
+
+ if (type == NX_CT_842)
+ ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_842_HIPRI,
+ VAS_COP_TYPE_842);
+ else if (type == NX_CT_GZIP)
+ ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_GZIP_HIPRI,
+ VAS_COP_TYPE_GZIP);
+
+ if (ret)
+ goto err_out;
+
+ vas_init_rx_win_attr(&rxattr, coproc->ct);
+ rxattr.rx_fifo = (void *)rx_fifo;
+ rxattr.rx_fifo_size = fifo_size;
+ rxattr.lnotify_lpid = lpid;
+ rxattr.lnotify_pid = pid;
+ rxattr.lnotify_tid = tid;
+ /*
+ * Maximum RX window credits can not be more than #CRBs in
+ * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
+ */
+ rxattr.wcreds_max = fifo_size / CRB_SIZE;
+
+ /*
+ * Open a VAS receice window which is used to configure RxFIFO
+ * for NX.
+ */
+ rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr);
+ if (IS_ERR(rxwin)) {
+ ret = PTR_ERR(rxwin);
+ pr_err("setting RxFIFO with VAS failed: %d\n",
+ ret);
+ goto err_out;
+ }
+
+ coproc->vas.rxwin = rxwin;
+ coproc->vas.id = vasid;
+ nx_add_coprocs_list(coproc, chip_id);
+
+ /*
+ * (lpid, pid, tid) combination has to be unique for each
+ * coprocessor instance in the system. So to make it
+ * unique, skiboot uses coprocessor type such as 842 or
+ * GZIP for pid and provides this value to kernel in pid
+ * device-tree property.
+ */
+ *ct = pid;
+
+ return 0;
+
+err_out:
+ kfree(coproc);
+ return ret;
+}
+
+static int __init nx_coproc_init(int chip_id, int ct_842, int ct_gzip)
+{
+ int ret = 0;
+
+ if (opal_check_token(OPAL_NX_COPROC_INIT)) {
+ ret = opal_nx_coproc_init(chip_id, ct_842);
+
+ if (!ret)
+ ret = opal_nx_coproc_init(chip_id, ct_gzip);
+
+ if (ret) {
+ ret = opal_error_code(ret);
+ pr_err("Failed to initialize NX for chip(%d): %d\n",
+ chip_id, ret);
+ }
+ } else
+ pr_warn("Firmware doesn't support NX initialization\n");
+
+ return ret;
+}
+
+static int __init find_nx_device_tree(struct device_node *dn, int chip_id,
+ int vasid, int type, char *devname,
+ int *ct)
+{
+ int ret = 0;
+
+ if (of_device_is_compatible(dn, devname)) {
+ ret = vas_cfg_coproc_info(dn, chip_id, vasid, type, ct);
+ if (ret)
+ of_node_put(dn);
+ }
+
+ return ret;
+}
+
+static int __init nx_powernv_probe_vas(struct device_node *pn)
+{
+ int chip_id, vasid, ret = 0;
+ int ct_842 = 0, ct_gzip = 0;
+ struct device_node *dn;
+
+ chip_id = of_get_ibm_chip_id(pn);
+ if (chip_id < 0) {
+ pr_err("ibm,chip-id missing\n");
+ return -EINVAL;
+ }
+
+ vasid = chip_to_vas_id(chip_id);
+ if (vasid < 0) {
+ pr_err("Unable to map chip_id %d to vasid\n", chip_id);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(pn, dn) {
+ ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_842,
+ "ibm,p9-nx-842", &ct_842);
+
+ if (!ret)
+ ret = find_nx_device_tree(dn, chip_id, vasid,
+ NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip);
+
+ if (ret)
+ return ret;
+ }
+
+ if (!ct_842 || !ct_gzip) {
+ pr_err("NX FIFO nodes are missing\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Initialize NX instance for both high and normal priority FIFOs.
+ */
+ ret = nx_coproc_init(chip_id, ct_842, ct_gzip);
+
+ return ret;
+}
+
+static int __init nx842_powernv_probe(struct device_node *dn)
+{
+ struct nx_coproc *coproc;
+ unsigned int ct, ci;
+ int chip_id;
+
+ chip_id = of_get_ibm_chip_id(dn);
+ if (chip_id < 0) {
+ pr_err("ibm,chip-id missing\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) {
+ pr_err("ibm,842-coprocessor-type missing\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) {
+ pr_err("ibm,842-coprocessor-instance missing\n");
+ return -EINVAL;
+ }
+
+ coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
+ if (!coproc)
+ return -ENOMEM;
+
+ coproc->ct = ct;
+ coproc->ci = ci;
+ nx_add_coprocs_list(coproc, chip_id);
+
+ pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci);
+
+ if (!nx842_ct)
+ nx842_ct = ct;
+ else if (nx842_ct != ct)
+ pr_err("NX842 chip %d, CT %d != first found CT %d\n",
+ chip_id, ct, nx842_ct);
+
+ return 0;
+}
+
+static void nx_delete_coprocs(void)
+{
+ struct nx_coproc *coproc, *n;
+ struct vas_window *txwin;
+ int i;
+
+ /*
+ * close percpu txwins that are opened for the corresponding coproc.
+ */
+ for_each_possible_cpu(i) {
+ txwin = per_cpu(cpu_txwin, i);
+ if (txwin)
+ vas_win_close(txwin);
+
+ per_cpu(cpu_txwin, i) = NULL;
+ }
+
+ list_for_each_entry_safe(coproc, n, &nx_coprocs, list) {
+ if (coproc->vas.rxwin)
+ vas_win_close(coproc->vas.rxwin);
+
+ list_del(&coproc->list);
+ kfree(coproc);
+ }
+}
+
+static struct nx842_constraints nx842_powernv_constraints = {
+ .alignment = DDE_BUFFER_ALIGN,
+ .multiple = DDE_BUFFER_LAST_MULT,
+ .minimum = DDE_BUFFER_LAST_MULT,
+ .maximum = (DDL_LEN_MAX - 1) * PAGE_SIZE,
+};
+
+static struct nx842_driver nx842_powernv_driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .workmem_size = sizeof(struct nx842_workmem),
+ .constraints = &nx842_powernv_constraints,
+ .compress = nx842_powernv_compress,
+ .decompress = nx842_powernv_decompress,
+};
+
+static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
+{
+ return nx842_crypto_init(tfm, &nx842_powernv_driver);
+}
+
+static struct crypto_alg nx842_powernv_alg = {
+ .cra_name = "842",
+ .cra_driver_name = "842-nx",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
+ .cra_ctxsize = sizeof(struct nx842_crypto_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = nx842_powernv_crypto_init,
+ .cra_exit = nx842_crypto_exit,
+ .cra_u = { .compress = {
+ .coa_compress = nx842_crypto_compress,
+ .coa_decompress = nx842_crypto_decompress } }
+};
+
+static __init int nx_compress_powernv_init(void)
+{
+ struct device_node *dn;
+ int ret;
+
+ /* verify workmem size/align restrictions */
+ BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
+ BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN);
+ BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN);
+ /* verify buffer size/align restrictions */
+ BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN);
+ BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
+ BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
+
+ for_each_compatible_node(dn, NULL, "ibm,power9-nx") {
+ ret = nx_powernv_probe_vas(dn);
+ if (ret) {
+ nx_delete_coprocs();
+ of_node_put(dn);
+ return ret;
+ }
+ }
+
+ if (list_empty(&nx_coprocs)) {
+ for_each_compatible_node(dn, NULL, "ibm,power-nx")
+ nx842_powernv_probe(dn);
+
+ if (!nx842_ct)
+ return -ENODEV;
+
+ nx842_powernv_exec = nx842_exec_icswx;
+ } else {
+ /*
+ * Register VAS user space API for NX GZIP so
+ * that user space can use GZIP engine.
+ * Using high FIFO priority for kernel requests and
+ * normal FIFO priority is assigned for userspace.
+ * 842 compression is supported only in kernel.
+ */
+ ret = vas_register_coproc_api(THIS_MODULE, VAS_COP_TYPE_GZIP,
+ "nx-gzip");
+
+ /*
+ * GZIP is not supported in kernel right now.
+ * So open tx windows only for 842.
+ */
+ if (!ret)
+ ret = nx_open_percpu_txwins();
+
+ if (ret) {
+ nx_delete_coprocs();
+ return ret;
+ }
+
+ nx842_powernv_exec = nx842_exec_vas;
+ }
+
+ ret = crypto_register_alg(&nx842_powernv_alg);
+ if (ret) {
+ nx_delete_coprocs();
+ return ret;
+ }
+
+ return 0;
+}
+module_init(nx_compress_powernv_init);
+
+static void __exit nx_compress_powernv_exit(void)
+{
+ /*
+ * GZIP engine is supported only in power9 or later and nx842_ct
+ * is used on power8 (icswx).
+ * VAS API for NX GZIP is registered during init for user space
+ * use. So delete this API use for GZIP engine.
+ */
+ if (!nx842_ct)
+ vas_unregister_coproc_api();
+
+ crypto_unregister_alg(&nx842_powernv_alg);
+
+ nx_delete_coprocs();
+}
+module_exit(nx_compress_powernv_exit);
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 32dc00dc570b..9f937bdc53a7 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -77,7 +77,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
tag = (u8 *)rctx->auth_tag;
for (i = 0; i < dd->authsize; i++) {
if (tag[i]) {
- dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
ret = -EBADMSG;
}
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 824ddf2a66ff..b5aff20c5900 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1269,13 +1269,17 @@ static int omap_aes_remove(struct platform_device *pdev)
spin_unlock(&list_lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
- for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
crypto_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
+ dd->pdata->algs_info[i].registered--;
+ }
- for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
+ for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
crypto_unregister_aead(aalg);
+ dd->pdata->aead_algs_info->registered--;
+
}
crypto_engine_exit(dd->engine);
diff --git a/drivers/crypto/omap-crypto.c b/drivers/crypto/omap-crypto.c
index cc88b7362bc2..94b2dba90f0d 100644
--- a/drivers/crypto/omap-crypto.c
+++ b/drivers/crypto/omap-crypto.c
@@ -178,11 +178,17 @@ static void omap_crypto_copy_data(struct scatterlist *src,
amt = min(src->length - srco, dst->length - dsto);
amt = min(len, amt);
- srcb = sg_virt(src) + srco;
- dstb = sg_virt(dst) + dsto;
+ srcb = kmap_atomic(sg_page(src)) + srco + src->offset;
+ dstb = kmap_atomic(sg_page(dst)) + dsto + dst->offset;
memcpy(dstb, srcb, amt);
+ if (!PageSlab(sg_page(dst)))
+ flush_kernel_dcache_page(sg_page(dst));
+
+ kunmap_atomic(srcb);
+ kunmap_atomic(dstb);
+
srco += amt;
dsto += amt;
len -= amt;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 063ad5d03f33..82691a057d2a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -168,8 +168,6 @@ struct omap_sham_hmac_ctx {
};
struct omap_sham_ctx {
- struct omap_sham_dev *dd;
-
unsigned long flags;
/* fallback stuff */
@@ -750,8 +748,17 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
int offset = rctx->offset;
int bufcnt = rctx->bufcnt;
- if (!sg || !sg->length || !nbytes)
+ if (!sg || !sg->length || !nbytes) {
+ if (bufcnt) {
+ bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
+ sg_init_table(rctx->sgl, 1);
+ sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
+ rctx->sg = rctx->sgl;
+ rctx->sg_len = 1;
+ }
+
return 0;
+ }
new_len = nbytes;
@@ -895,7 +902,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
if (hash_later < 0)
hash_later = 0;
- if (hash_later) {
+ if (hash_later && hash_later <= rctx->buflen) {
scatterwalk_map_and_copy(rctx->buffer,
req->src,
req->nbytes - hash_later,
@@ -925,27 +932,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
return 0;
}
+struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
+{
+ struct omap_sham_dev *dd;
+
+ if (ctx->dd)
+ return ctx->dd;
+
+ spin_lock_bh(&sham.lock);
+ dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
+ list_move_tail(&dd->list, &sham.dev_list);
+ ctx->dd = dd;
+ spin_unlock_bh(&sham.lock);
+
+ return dd;
+}
+
static int omap_sham_init(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_dev *dd = NULL, *tmp;
+ struct omap_sham_dev *dd;
int bs = 0;
- spin_lock_bh(&sham.lock);
- if (!tctx->dd) {
- list_for_each_entry(tmp, &sham.dev_list, list) {
- dd = tmp;
- break;
- }
- tctx->dd = dd;
- } else {
- dd = tctx->dd;
- }
- spin_unlock_bh(&sham.lock);
+ ctx->dd = NULL;
- ctx->dd = dd;
+ dd = omap_sham_find_dev(ctx);
+ if (!dd)
+ return -ENODEV;
ctx->flags = 0;
@@ -1215,8 +1230,7 @@ err1:
static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct omap_sham_dev *dd = tctx->dd;
+ struct omap_sham_dev *dd = ctx->dd;
ctx->op = op;
@@ -1226,7 +1240,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
static int omap_sham_update(struct ahash_request *req)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- struct omap_sham_dev *dd = ctx->dd;
+ struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
if (!req->nbytes)
return 0;
@@ -1319,21 +1333,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
struct omap_sham_hmac_ctx *bctx = tctx->base;
int bs = crypto_shash_blocksize(bctx->shash);
int ds = crypto_shash_digestsize(bctx->shash);
- struct omap_sham_dev *dd = NULL, *tmp;
int err, i;
- spin_lock_bh(&sham.lock);
- if (!tctx->dd) {
- list_for_each_entry(tmp, &sham.dev_list, list) {
- dd = tmp;
- break;
- }
- tctx->dd = dd;
- } else {
- dd = tctx->dd;
- }
- spin_unlock_bh(&sham.lock);
-
err = crypto_shash_setkey(tctx->fallback, key, keylen);
if (err)
return err;
@@ -1350,7 +1351,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
memset(bctx->ipad + keylen, 0, bs - keylen);
- if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
+ if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
memcpy(bctx->opad, bctx->ipad, bs);
for (i = 0; i < bs; i++) {
@@ -1571,7 +1572,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha224",
.cra_driver_name = "omap-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1592,7 +1594,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "sha256",
.cra_driver_name = "omap-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1614,7 +1617,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "omap-hmac-sha224",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1637,7 +1641,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "omap-hmac-sha256",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1662,7 +1667,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha384",
.cra_driver_name = "omap-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1683,7 +1689,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "sha512",
.cra_driver_name = "omap-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
@@ -1705,7 +1712,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "omap-hmac-sha384",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -1728,7 +1736,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "omap-hmac-sha512",
.cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
@@ -2154,6 +2163,7 @@ static int omap_sham_probe(struct platform_device *pdev)
}
dd->flags |= dd->pdata->flags;
+ sham.flags |= dd->pdata->flags;
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
@@ -2181,6 +2191,9 @@ static int omap_sham_probe(struct platform_device *pdev)
spin_unlock(&sham.lock);
for (i = 0; i < dd->pdata->algs_info_size; i++) {
+ if (dd->pdata->algs_info[i].registered)
+ break;
+
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
struct ahash_alg *alg;
@@ -2232,9 +2245,11 @@ static int omap_sham_remove(struct platform_device *pdev)
list_del(&dd->list);
spin_unlock(&sham.lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
- for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
+ for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
crypto_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
+ dd->pdata->algs_info[i].registered--;
+ }
tasklet_kill(&dd->done_task);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index fd045e64972a..cb8a6ea2a4bc 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -350,13 +350,18 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
int err;
unsigned long flags;
struct scatterlist outhdr, iv_sg, status_sg, **sgs;
- int i;
u64 dst_len;
unsigned int num_out = 0, num_in = 0;
int sg_total;
uint8_t *iv;
+ struct scatterlist *sg;
src_nents = sg_nents_for_len(req->src, req->cryptlen);
+ if (src_nents < 0) {
+ pr_err("Invalid number of src SG.\n");
+ return src_nents;
+ }
+
dst_nents = sg_nents(req->dst);
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
@@ -402,6 +407,7 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
goto free;
}
+ dst_len = min_t(unsigned int, req->cryptlen, dst_len);
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
req->cryptlen, dst_len);
@@ -442,12 +448,12 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
vc_sym_req->iv = iv;
/* Source data */
- for (i = 0; i < src_nents; i++)
- sgs[num_out++] = &req->src[i];
+ for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
+ sgs[num_out++] = sg;
/* Destination data */
- for (i = 0; i < dst_nents; i++)
- sgs[num_out + num_in++] = &req->dst[i];
+ for (sg = req->dst; sg; sg = sg_next(sg))
+ sgs[num_out + num_in++] = sg;
/* Status */
sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
@@ -577,10 +583,11 @@ static void virtio_crypto_skcipher_finalize_req(
scatterwalk_map_and_copy(req->iv, req->dst,
req->cryptlen - AES_BLOCK_SIZE,
AES_BLOCK_SIZE, 0);
- crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
- req, err);
kzfree(vc_sym_req->iv);
virtcrypto_clear_request(&vc_sym_req->base);
+
+ crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
+ req, err);
}
static struct virtio_crypto_algo virtio_crypto_algs[] = { {
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 09f7f468eef8..cd11558893cd 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -46,7 +46,6 @@ struct zynqmp_aead_drv_ctx {
} alg;
struct device *dev;
struct crypto_engine *engine;
- const struct zynqmp_eemi_ops *eemi_ops;
};
struct zynqmp_aead_hw_req {
@@ -80,21 +79,15 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
struct device *dev = tfm_ctx->dev;
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct zynqmp_aead_drv_ctx *drv_ctx;
struct zynqmp_aead_hw_req *hwreq;
dma_addr_t dma_addr_data, dma_addr_hw_req;
unsigned int data_size;
unsigned int status;
+ int ret;
size_t dma_size;
char *kbuf;
int err;
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
-
- if (!drv_ctx->eemi_ops->aes)
- return -ENOTSUPP;
-
if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY)
dma_size = req->cryptlen + ZYNQMP_AES_KEY_SIZE
+ GCM_AES_IV_SIZE;
@@ -136,9 +129,12 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
hwreq->key = 0;
}
- drv_ctx->eemi_ops->aes(dma_addr_hw_req, &status);
+ ret = zynqmp_pm_aes_engine(dma_addr_hw_req, &status);
- if (status) {
+ if (ret) {
+ dev_err(dev, "ERROR: AES PM API failed\n");
+ err = ret;
+ } else if (status) {
switch (status) {
case ZYNQMP_AES_GCM_TAG_MISMATCH_ERR:
dev_err(dev, "ERROR: Gcm Tag mismatch\n");
@@ -388,12 +384,6 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev)
else
return -ENODEV;
- aes_drv_ctx.eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(aes_drv_ctx.eemi_ops)) {
- dev_err(dev, "Failed to get ZynqMP EEMI interface\n");
- return PTR_ERR(aes_drv_ctx.eemi_ops);
- }
-
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK));
if (err < 0) {
dev_err(dev, "No usable DMA configuration\n");
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index 3107ce80e809..16850d5388ab 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -44,6 +44,7 @@ struct dax_region {
* @dev - device core
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
* @dax_mem_res: physical address range of hotadded DAX memory
+ * @dax_mem_name: name for hotadded DAX memory via add_memory_driver_managed()
*/
struct dev_dax {
struct dax_region *region;
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index 1e678bdf5aed..275aa5f87399 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -14,6 +14,11 @@
#include "dax-private.h"
#include "bus.h"
+/* Memory resource name used for add_memory_driver_managed(). */
+static const char *kmem_name;
+/* Set if any memory will remain added when the driver will be unloaded. */
+static bool any_hotremove_failed;
+
int dev_dax_kmem_probe(struct device *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
@@ -70,7 +75,12 @@ int dev_dax_kmem_probe(struct device *dev)
*/
new_res->flags = IORESOURCE_SYSTEM_RAM;
- rc = add_memory(numa_node, new_res->start, resource_size(new_res));
+ /*
+ * Ensure that future kexec'd kernels will not treat this as RAM
+ * automatically.
+ */
+ rc = add_memory_driver_managed(numa_node, new_res->start,
+ resource_size(new_res), kmem_name);
if (rc) {
release_resource(new_res);
kfree(new_res);
@@ -100,6 +110,7 @@ static int dev_dax_kmem_remove(struct device *dev)
*/
rc = remove_memory(dev_dax->target_node, kmem_start, kmem_size);
if (rc) {
+ any_hotremove_failed = true;
dev_err(dev,
"DAX region %pR cannot be hotremoved until the next reboot\n",
res);
@@ -124,6 +135,7 @@ static int dev_dax_kmem_remove(struct device *dev)
* permanently pinned as reserved by the unreleased
* request_mem_region().
*/
+ any_hotremove_failed = true;
return 0;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
@@ -137,12 +149,24 @@ static struct dax_device_driver device_dax_kmem_driver = {
static int __init dax_kmem_init(void)
{
- return dax_driver_register(&device_dax_kmem_driver);
+ int rc;
+
+ /* Resource name is permanently allocated if any hotremove fails. */
+ kmem_name = kstrdup_const("System RAM (kmem)", GFP_KERNEL);
+ if (!kmem_name)
+ return -ENOMEM;
+
+ rc = dax_driver_register(&device_dax_kmem_driver);
+ if (rc)
+ kfree_const(kmem_name);
+ return rc;
}
static void __exit dax_kmem_exit(void)
{
dax_driver_unregister(&device_dax_kmem_driver);
+ if (!any_hotremove_failed)
+ kfree_const(kmem_name);
}
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index eb25627b059d..21ebd0af268b 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -24,9 +24,7 @@ int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
cd = device_create(dca_class, dca->cd, MKDEV(0, slot + 1), NULL,
"requester%d", req_count++);
- if (IS_ERR(cd))
- return PTR_ERR(cd);
- return 0;
+ return PTR_ERR_OR_ZERO(cd);
}
void dca_sysfs_remove_req(struct dca_provider *dca, int slot)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 4264e64788c4..b45f8514dc82 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -34,6 +34,7 @@
#include <linux/dma-resv.h>
#include <linux/export.h>
+#include <linux/mm.h>
#include <linux/sched/mm.h>
/**
@@ -109,7 +110,7 @@ static int __init dma_resv_lockdep(void)
dma_resv_init(&obj);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ww_acquire_init(&ctx, &reservation_ww_class);
ret = dma_resv_lock(&obj, &ctx);
if (ret == -EDEADLK)
@@ -118,7 +119,7 @@ static int __init dma_resv_lockdep(void)
fs_reclaim_release(GFP_KERNEL);
ww_mutex_unlock(&obj.lock);
ww_acquire_fini(&ctx);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 023db6883d05..e9ed9165de40 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -106,7 +106,7 @@ config AXI_DMAC
select REGMAP_MMIO
help
Enable support for the Analog Devices AXI-DMAC peripheral. This DMA
- controller is often used in Analog Device's reference designs for FPGA
+ controller is often used in Analog Devices' reference designs for FPGA
platforms.
config BCM_SBA_RAID
@@ -395,12 +395,10 @@ config MMP_TDMA
bool "MMP Two-Channel DMA support"
depends on ARCH_MMP || COMPILE_TEST
select DMA_ENGINE
- select MMP_SRAM if ARCH_MMP
select GENERIC_ALLOCATOR
help
Support the MMP Two-Channel DMA engine.
This engine used for MMP Audio DMA and pxa910 SQU.
- It needs sram driver under mach-mmp.
config MOXART_DMA
tristate "MOXART DMA support"
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 397692e937b3..80fc2fe8c77e 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -331,7 +331,7 @@ struct at_dma {
struct dma_pool *dma_desc_pool;
struct dma_pool *memset_pool;
/* AT THE END channels table */
- struct at_dma_chan chan[0];
+ struct at_dma_chan chan[];
};
#define dma_readl(atdma, name) \
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index bb0eaf38b594..fd92f048c491 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -212,7 +212,7 @@ struct at_xdmac {
struct clk *clk;
u32 save_gim;
struct dma_pool *at_xdmac_desc_pool;
- struct at_xdmac_chan chan[0];
+ struct at_xdmac_chan chan[];
};
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d31076d9ef25..2b06a7a8629d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -53,6 +53,8 @@
#include <linux/mempool.h>
#include <linux/numa.h>
+#include "dmaengine.h"
+
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDA(dma_ida);
static LIST_HEAD(dma_device_list);
@@ -145,9 +147,9 @@ static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
/**
* dev_to_dma_chan - convert a device pointer to its sysfs container object
- * @dev - device node
+ * @dev: device node
*
- * Must be called under dma_list_mutex
+ * Must be called under dma_list_mutex.
*/
static struct dma_chan *dev_to_dma_chan(struct device *dev)
{
@@ -243,22 +245,18 @@ static struct class dma_devclass = {
/* --- client and device registration --- */
-/**
- * dma_cap_mask_all - enable iteration over all operation types
- */
+/* enable iteration over all operation types */
static dma_cap_mask_t dma_cap_mask_all;
/**
- * dma_chan_tbl_ent - tracks channel allocations per core/operation
- * @chan - associated channel for this entry
+ * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan: associated channel for this entry
*/
struct dma_chan_tbl_ent {
struct dma_chan *chan;
};
-/**
- * channel_table - percpu lookup table for memory-to-memory offload providers
- */
+/* percpu lookup table for memory-to-memory offload providers */
static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
static int __init dma_channel_table_init(void)
@@ -295,8 +293,11 @@ static int __init dma_channel_table_init(void)
arch_initcall(dma_channel_table_init);
/**
- * dma_chan_is_local - returns true if the channel is in the same numa-node as
- * the cpu
+ * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
+ * @chan: DMA channel to test
+ * @cpu: CPU index which the channel should be close to
+ *
+ * Returns true if the channel is in the same NUMA-node as the CPU.
*/
static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
{
@@ -306,14 +307,14 @@ static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
}
/**
- * min_chan - returns the channel with min count and in the same numa-node as
- * the cpu
- * @cap: capability to match
- * @cpu: cpu index which the channel should be close to
+ * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
+ * @cap: capability to match
+ * @cpu: CPU index which the channel should be close to
*
- * If some channels are close to the given cpu, the one with the lowest
- * reference count is returned. Otherwise, cpu is ignored and only the
+ * If some channels are close to the given CPU, the one with the lowest
+ * reference count is returned. Otherwise, CPU is ignored and only the
* reference count is taken into account.
+ *
* Must be called under dma_list_mutex.
*/
static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
@@ -351,10 +352,11 @@ static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
/**
* dma_channel_rebalance - redistribute the available channels
*
- * Optimize for cpu isolation (each cpu gets a dedicated channel for an
- * operation type) in the SMP case, and operation isolation (avoid
- * multi-tasking channels) in the non-SMP case. Must be called under
- * dma_list_mutex.
+ * Optimize for CPU isolation (each CPU gets a dedicated channel for an
+ * operation type) in the SMP case, and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case.
+ *
+ * Must be called under dma_list_mutex.
*/
static void dma_channel_rebalance(void)
{
@@ -404,9 +406,9 @@ static struct module *dma_chan_to_owner(struct dma_chan *chan)
/**
* balance_ref_count - catch up the channel reference count
- * @chan - channel to balance ->client_count versus dmaengine_ref_count
+ * @chan: channel to balance ->client_count versus dmaengine_ref_count
*
- * balance_ref_count must be called under dma_list_mutex
+ * Must be called under dma_list_mutex.
*/
static void balance_ref_count(struct dma_chan *chan)
{
@@ -436,10 +438,10 @@ static void dma_device_put(struct dma_device *device)
}
/**
- * dma_chan_get - try to grab a dma channel's parent driver module
- * @chan - channel to grab
+ * dma_chan_get - try to grab a DMA channel's parent driver module
+ * @chan: channel to grab
*
- * Must be called under dma_list_mutex
+ * Must be called under dma_list_mutex.
*/
static int dma_chan_get(struct dma_chan *chan)
{
@@ -483,10 +485,10 @@ module_put_out:
}
/**
- * dma_chan_put - drop a reference to a dma channel's parent driver module
- * @chan - channel to release
+ * dma_chan_put - drop a reference to a DMA channel's parent driver module
+ * @chan: channel to release
*
- * Must be called under dma_list_mutex
+ * Must be called under dma_list_mutex.
*/
static void dma_chan_put(struct dma_chan *chan)
{
@@ -537,7 +539,7 @@ EXPORT_SYMBOL(dma_sync_wait);
/**
* dma_find_channel - find a channel to carry out the operation
- * @tx_type: transaction type
+ * @tx_type: transaction type
*/
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
{
@@ -677,7 +679,7 @@ static struct dma_chan *find_candidate(struct dma_device *device,
/**
* dma_get_slave_channel - try to get specific channel exclusively
- * @chan: target channel
+ * @chan: target channel
*/
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
{
@@ -731,10 +733,10 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
/**
* __dma_request_channel - try to allocate an exclusive channel
- * @mask: capabilities that the channel must satisfy
- * @fn: optional callback to disposition available channels
- * @fn_param: opaque parameter to pass to dma_filter_fn
- * @np: device node to look for DMA channels
+ * @mask: capabilities that the channel must satisfy
+ * @fn: optional callback to disposition available channels
+ * @fn_param: opaque parameter to pass to dma_filter_fn()
+ * @np: device node to look for DMA channels
*
* Returns pointer to appropriate DMA channel on success or NULL.
*/
@@ -877,7 +879,7 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel);
/**
* dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
- * @mask: capabilities that the channel must satisfy
+ * @mask: capabilities that the channel must satisfy
*
* Returns pointer to appropriate DMA channel on success or an error pointer.
*/
@@ -968,7 +970,7 @@ void dmaengine_get(void)
EXPORT_SYMBOL(dmaengine_get);
/**
- * dmaengine_put - let dma drivers be removed when ref_count == 0
+ * dmaengine_put - let DMA drivers be removed when ref_count == 0
*/
void dmaengine_put(void)
{
@@ -1132,7 +1134,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
/**
* dma_async_device_register - registers DMA devices found
- * @device: &dma_device
+ * @device: pointer to &struct dma_device
*
* After calling this routine the structure should not be freed except in the
* device_release() callback which will be called after
@@ -1304,7 +1306,7 @@ EXPORT_SYMBOL(dma_async_device_register);
/**
* dma_async_device_unregister - unregister a DMA device
- * @device: &dma_device
+ * @device: pointer to &struct dma_device
*
* This routine is called by dma driver exit routines, dmaengine holds module
* references to prevent it being called while channels are in use.
@@ -1341,7 +1343,7 @@ static void dmam_device_release(struct device *dev, void *res)
/**
* dmaenginem_async_device_register - registers DMA devices found
- * @device: &dma_device
+ * @device: pointer to &struct dma_device
*
* The operation is managed and will be undone on driver detach.
*/
@@ -1578,8 +1580,9 @@ int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
}
EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
-/* dma_wait_for_async_tx - spin wait for a transaction to complete
- * @tx: in-flight transaction to wait on
+/**
+ * dma_wait_for_async_tx - spin wait for a transaction to complete
+ * @tx: in-flight transaction to wait on
*/
enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
@@ -1602,9 +1605,12 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
}
EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
-/* dma_run_dependencies - helper routine for dma drivers to process
- * (start) dependent operations on their target channel
- * @tx: transaction with dependencies
+/**
+ * dma_run_dependencies - process dependent operations on the target channel
+ * @tx: transaction with dependencies
+ *
+ * Helper routine for DMA drivers to process (start) dependent operations
+ * on their target channel.
*/
void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 0425984db118..b175229a4b01 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -60,9 +60,9 @@ MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
-module_param(timeout, uint, S_IRUGO | S_IWUSR);
+module_param(timeout, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
- "Pass 0xFFFFFFFF (4294967295) for maximum timeout");
+ "Pass -1 for infinite timeout");
static bool noverify;
module_param(noverify, bool, S_IRUGO | S_IWUSR);
@@ -72,10 +72,6 @@ static bool norandom;
module_param(norandom, bool, 0644);
MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
-static bool polled;
-module_param(polled, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
-
static bool verbose;
module_param(verbose, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
@@ -88,6 +84,10 @@ static unsigned int transfer_size;
module_param(transfer_size, uint, 0644);
MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
+static bool polled;
+module_param(polled, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
+
/**
* struct dmatest_params - test parameters.
* @buf_size: size of the memcpy test buffer
@@ -98,7 +98,12 @@ MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default
* @iterations: iterations before stopping test
* @xor_sources: number of xor source buffers
* @pq_sources: number of p+q source buffers
- * @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295)
+ * @timeout: transfer timeout in msec, -1 for infinite timeout
+ * @noverify: disable data verification
+ * @norandom: disable random offset setup
+ * @alignment: custom data address alignment taken as 2^alignment
+ * @transfer_size: custom transfer size in bytes
+ * @polled: use polling for completion instead of interrupts
*/
struct dmatest_params {
unsigned int buf_size;
@@ -109,7 +114,7 @@ struct dmatest_params {
unsigned int iterations;
unsigned int xor_sources;
unsigned int pq_sources;
- unsigned int timeout;
+ int timeout;
bool noverify;
bool norandom;
int alignment;
@@ -120,7 +125,10 @@ struct dmatest_params {
/**
* struct dmatest_info - test information.
* @params: test parameters
+ * @channels: channels under test
+ * @nr_channels: number of channels under test
* @lock: access protection to the fields of this structure
+ * @did_init: module has been initialized completely
*/
static struct dmatest_info {
/* Test parameters */
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index ff392c01bad1..ed430ad9b3dd 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -13,8 +13,9 @@
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/dma/edma.h>
-#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
@@ -322,7 +323,7 @@ static struct dma_async_tx_descriptor *
dw_edma_device_transfer(struct dw_edma_transfer *xfer)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
- enum dma_transfer_direction direction = xfer->direction;
+ enum dma_transfer_direction dir = xfer->direction;
phys_addr_t src_addr, dst_addr;
struct scatterlist *sg = NULL;
struct dw_edma_chunk *chunk;
@@ -331,10 +332,26 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
u32 cnt;
int i;
- if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) ||
- (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ))
+ if (!chan->configured)
return NULL;
+ switch (chan->config.direction) {
+ case DMA_DEV_TO_MEM: /* local dma */
+ if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
+ break;
+ return NULL;
+ case DMA_MEM_TO_DEV: /* local dma */
+ if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
+ break;
+ return NULL;
+ default: /* remote dma */
+ if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
+ break;
+ if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
+ break;
+ return NULL;
+ }
+
if (xfer->cyclic) {
if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
return NULL;
@@ -343,9 +360,6 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
return NULL;
}
- if (!chan->configured)
- return NULL;
-
desc = dw_edma_alloc_desc(chan);
if (unlikely(!desc))
goto err_alloc;
@@ -386,7 +400,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
- if (direction == DMA_DEV_TO_MEM) {
+ if (chan->dir == EDMA_DIR_WRITE) {
burst->sar = src_addr;
if (xfer->cyclic) {
burst->dar = xfer->xfer.cyclic.paddr;
@@ -773,6 +787,7 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
u32 rd_mask = 1;
int i, err = 0;
u32 ch_cnt;
+ int irq;
ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
@@ -781,16 +796,16 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
if (dw->nr_irqs == 1) {
/* Common IRQ shared among all channels */
- err = request_irq(pci_irq_vector(to_pci_dev(dev), 0),
- dw_edma_interrupt_common,
+ irq = dw->ops->irq_vector(dev, 0);
+ err = request_irq(irq, dw_edma_interrupt_common,
IRQF_SHARED, dw->name, &dw->irq[0]);
if (err) {
dw->nr_irqs = 0;
return err;
}
- get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0),
- &dw->irq[0].msi);
+ if (irq_get_msi_desc(irq))
+ get_cached_msi_msg(irq, &dw->irq[0].msi);
} else {
/* Distribute IRQs equally among all channels */
int tmp = dw->nr_irqs;
@@ -804,7 +819,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
- err = request_irq(pci_irq_vector(to_pci_dev(dev), i),
+ irq = dw->ops->irq_vector(dev, i);
+ err = request_irq(irq,
i < *wr_alloc ?
dw_edma_interrupt_write :
dw_edma_interrupt_read,
@@ -815,8 +831,8 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
return err;
}
- get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i),
- &dw->irq[i].msi);
+ if (irq_get_msi_desc(irq))
+ get_cached_msi_msg(irq, &dw->irq[i].msi);
}
dw->nr_irqs = i;
@@ -827,12 +843,23 @@ static int dw_edma_irq_request(struct dw_edma_chip *chip,
int dw_edma_probe(struct dw_edma_chip *chip)
{
- struct device *dev = chip->dev;
- struct dw_edma *dw = chip->dw;
+ struct device *dev;
+ struct dw_edma *dw;
u32 wr_alloc = 0;
u32 rd_alloc = 0;
int i, err;
+ if (!chip)
+ return -EINVAL;
+
+ dev = chip->dev;
+ if (!dev)
+ return -EINVAL;
+
+ dw = chip->dw;
+ if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
+ return -EINVAL;
+
raw_spin_lock_init(&dw->lock);
/* Find out how many write channels are supported by hardware */
@@ -884,7 +911,7 @@ int dw_edma_probe(struct dw_edma_chip *chip)
err_irq_free:
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
+ free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
dw->nr_irqs = 0;
@@ -904,7 +931,7 @@ int dw_edma_remove(struct dw_edma_chip *chip)
/* Free irqs */
for (i = (dw->nr_irqs - 1); i >= 0; i--)
- free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
+ free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
/* Power management */
pm_runtime_disable(dev);
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 4e5f9f6e901b..31fc50d31792 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -103,6 +103,10 @@ struct dw_edma_irq {
struct dw_edma *dw;
};
+struct dw_edma_core_ops {
+ int (*irq_vector)(struct device *dev, unsigned int nr);
+};
+
struct dw_edma {
char name[20];
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index dc85f55e1bb8..1eafc602e17e 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -54,6 +54,15 @@ static const struct dw_edma_pcie_data snps_edda_data = {
.irqs = 1,
};
+static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
+{
+ return pci_irq_vector(to_pci_dev(dev), nr);
+}
+
+static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
+ .irq_vector = dw_edma_pcie_irq_vector,
+};
+
static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
@@ -151,6 +160,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
dw->version = pdata->version;
dw->mode = pdata->mode;
dw->nr_irqs = nr_irqs;
+ dw->ops = &dw_edma_pcie_core_ops;
/* Debug info */
pci_dbg(pdev, "Version:\t%u\n", dw->version);
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 3999827970ab..052dae5d6ddd 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -1092,6 +1092,16 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
};
/* IDXD device attribs */
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%#x\n", idxd->hw.version);
+}
+static DEVICE_ATTR_RO(version);
+
static ssize_t max_work_queues_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1313,6 +1323,7 @@ static ssize_t cdev_major_show(struct device *dev,
static DEVICE_ATTR_RO(cdev_major);
static struct attribute *idxd_device_attributes[] = {
+ &dev_attr_version.attr,
&dev_attr_max_groups.attr,
&dev_attr_max_work_queues.attr,
&dev_attr_max_work_queues_size.attr,
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 4d4477df4ede..91774039ae5d 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -2063,7 +2063,7 @@ static int sdma_probe(struct platform_device *pdev)
/* initially no scripts available */
saddr_arr = (s32 *)sdma->script_addrs;
- for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
+ for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++)
saddr_arr[i] = -EINVAL;
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 18c011e57592..8ad0ad861c86 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -332,8 +332,8 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
u8 *pos;
off_t offs;
- chunk = idx / IOAT_DESCS_PER_2M;
- idx &= (IOAT_DESCS_PER_2M - 1);
+ chunk = idx / IOAT_DESCS_PER_CHUNK;
+ idx &= (IOAT_DESCS_PER_CHUNK - 1);
offs = idx * IOAT_DESC_SZ;
pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
phys = ioat_chan->descs[chunk].hw + offs;
@@ -370,7 +370,8 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
if (!ring)
return NULL;
- ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
+ chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
+ ioat_chan->desc_chunks = chunks;
for (i = 0; i < chunks; i++) {
struct ioat_descs *descs = &ioat_chan->descs[i];
@@ -382,8 +383,9 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
for (idx = 0; idx < i; idx++) {
descs = &ioat_chan->descs[idx];
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
- descs->virt, descs->hw);
+ dma_free_coherent(to_dev(ioat_chan),
+ IOAT_CHUNK_SIZE,
+ descs->virt, descs->hw);
descs->virt = NULL;
descs->hw = 0;
}
@@ -404,7 +406,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
dma_free_coherent(to_dev(ioat_chan),
- SZ_2M,
+ IOAT_CHUNK_SIZE,
ioat_chan->descs[idx].virt,
ioat_chan->descs[idx].hw);
ioat_chan->descs[idx].virt = NULL;
@@ -867,6 +869,23 @@ static void check_active(struct ioatdma_chan *ioat_chan)
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
+static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
+{
+ spin_lock_bh(&ioat_chan->prep_lock);
+ set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+
+ ioat_abort_descs(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Reset channel...\n");
+ ioat_reset_hw(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Restart channel...\n");
+ ioat_restart_channel(ioat_chan);
+
+ spin_lock_bh(&ioat_chan->prep_lock);
+ clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+}
+
void ioat_timer_event(struct timer_list *t)
{
struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
@@ -889,19 +908,7 @@ void ioat_timer_event(struct timer_list *t)
if (test_bit(IOAT_RUN, &ioat_chan->state)) {
spin_lock_bh(&ioat_chan->cleanup_lock);
- spin_lock_bh(&ioat_chan->prep_lock);
- set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
-
- ioat_abort_descs(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Reset channel...\n");
- ioat_reset_hw(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Restart channel...\n");
- ioat_restart_channel(ioat_chan);
-
- spin_lock_bh(&ioat_chan->prep_lock);
- clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
+ ioat_reboot_chan(ioat_chan);
spin_unlock_bh(&ioat_chan->cleanup_lock);
}
@@ -915,17 +922,23 @@ void ioat_timer_event(struct timer_list *t)
spin_lock_bh(&ioat_chan->prep_lock);
check_active(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock);
- spin_unlock_bh(&ioat_chan->cleanup_lock);
- return;
+ goto unlock_out;
+ }
+
+ /* handle the missed cleanup case */
+ if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) {
+ /* timer restarted in ioat_cleanup_preamble
+ * and IOAT_COMPLETION_ACK cleared
+ */
+ __cleanup(ioat_chan, phys_complete);
+ goto unlock_out;
}
/* if we haven't made progress and we have already
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
- if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
- __cleanup(ioat_chan, phys_complete);
- else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
+ if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
u32 chanerr;
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -937,25 +950,23 @@ void ioat_timer_event(struct timer_list *t)
dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
ioat_ring_active(ioat_chan));
- spin_lock_bh(&ioat_chan->prep_lock);
- set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
+ ioat_reboot_chan(ioat_chan);
- ioat_abort_descs(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
- ioat_reset_hw(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
- ioat_restart_channel(ioat_chan);
+ goto unlock_out;
+ }
+ /* handle missed issue pending case */
+ if (ioat_ring_pending(ioat_chan)) {
+ dev_warn(to_dev(ioat_chan),
+ "Completion timeout with pending descriptors\n");
spin_lock_bh(&ioat_chan->prep_lock);
- clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ __ioat_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock);
- spin_unlock_bh(&ioat_chan->cleanup_lock);
- return;
- } else
- set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
+ }
+ set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+unlock_out:
spin_unlock_bh(&ioat_chan->cleanup_lock);
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index b8e8e0b9693c..e6b622e1ba92 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -81,6 +81,11 @@ struct ioatdma_device {
u32 msixpba;
};
+#define IOAT_MAX_ORDER 16
+#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
+#define IOAT_CHUNK_SIZE (SZ_512K)
+#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
+
struct ioat_descs {
void *virt;
dma_addr_t hw;
@@ -128,7 +133,7 @@ struct ioatdma_chan {
u16 produce;
struct ioat_ring_ent **ring;
spinlock_t prep_lock;
- struct ioat_descs descs[2];
+ struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
int desc_chunks;
int intr_coalesce;
int prev_intr_coalesce;
@@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err;
}
-#define IOAT_MAX_ORDER 16
-#define IOAT_MAX_DESCS 65536
-#define IOAT_DESCS_PER_2M 32768
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
{
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 60e9afbb896c..58d13564f88b 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
}
for (i = 0; i < ioat_chan->desc_chunks; i++) {
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
ioat_chan->descs[i].virt,
ioat_chan->descs[i].hw);
ioat_chan->descs[i].virt = NULL;
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index d683232d7fea..dbc6a48424fa 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -235,7 +235,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan)
tdcr |= TDCR_BURSTSZ_128B;
break;
default:
- dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
+ dev_err(tdmac->dev, "unknown burst size.\n");
return -EINVAL;
}
@@ -250,7 +250,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan)
tdcr |= TDCR_SSZ_32_BITS;
break;
default:
- dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n");
+ dev_err(tdmac->dev, "unknown bus size.\n");
return -EINVAL;
}
} else if (tdmac->type == PXA910_SQU) {
@@ -276,7 +276,7 @@ static int mmp_tdma_config_chan(struct dma_chan *chan)
tdcr |= TDCR_BURSTSZ_SQU_32B;
break;
default:
- dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
+ dev_err(tdmac->dev, "unknown burst size.\n");
return -EINVAL;
}
}
@@ -429,8 +429,15 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
int num_periods = buf_len / period_len;
int i = 0, buf = 0;
- if (tdmac->status != DMA_COMPLETE)
+ if (!is_slave_direction(direction)) {
+ dev_err(tdmac->dev, "unsupported transfer direction\n");
return NULL;
+ }
+
+ if (tdmac->status != DMA_COMPLETE) {
+ dev_err(tdmac->dev, "controller busy");
+ return NULL;
+ }
if (period_len > TDMA_MAX_XFER_BYTES) {
dev_err(tdmac->dev,
@@ -704,6 +711,17 @@ static int mmp_tdma_probe(struct platform_device *pdev)
tdev->device.device_terminate_all = mmp_tdma_terminate_all;
tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
+ tdev->device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ if (type == MMP_AUD_TDMA) {
+ tdev->device.max_burst = SZ_128;
+ tdev->device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ tdev->device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ } else if (type == PXA910_SQU) {
+ tdev->device.max_burst = SZ_32;
+ }
+ tdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ tdev->device.descriptor_reuse = true;
+
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
platform_set_drvdata(pdev, tdev);
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index e04499c1f27f..4ab493d46375 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -568,7 +568,7 @@ static int moxart_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct resource *res;
- static void __iomem *dma_base_addr;
+ void __iomem *dma_base_addr;
int ret, i;
unsigned int irq;
struct moxart_chan *ch;
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index ef73f65224b1..5a08dd0d3388 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -74,7 +74,7 @@ struct bam_async_desc {
struct list_head desc_node;
enum dma_transfer_direction dir;
size_t length;
- struct bam_desc_hw desc[0];
+ struct bam_desc_hw desc[];
};
enum bam_reg {
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 411f91fde734..0a6d3ea08c78 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -550,7 +550,7 @@ static void hidma_free_chan_resources(struct dma_chan *dmach)
kfree(mdesc);
}
- mchan->allocated = 0;
+ mchan->allocated = false;
spin_unlock_irqrestore(&mchan->lock, irqflags);
}
@@ -897,7 +897,6 @@ uninit:
if (msi)
hidma_free_msis(dmadev);
- hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
dmafree:
if (dmadev)
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 6d0bec947636..5c118c7e02bd 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -506,11 +506,11 @@ static int sf_pdma_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pdma->membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pdma->membase))
- goto ERR_MEMBASE;
+ return PTR_ERR(pdma->membase);
ret = sf_pdma_irq_init(pdev, pdma);
if (ret)
- goto ERR_INITIRQ;
+ return ret;
sf_pdma_setup_chans(pdma);
@@ -544,24 +544,13 @@ static int sf_pdma_probe(struct platform_device *pdev)
"Failed to set DMA mask. Fall back to default.\n");
ret = dma_async_device_register(&pdma->dma_dev);
- if (ret)
- goto ERR_REG_DMADEVICE;
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't register SiFive Platform DMA. (%d)\n", ret);
+ return ret;
+ }
return 0;
-
-ERR_MEMBASE:
- devm_kfree(&pdev->dev, pdma);
- return PTR_ERR(pdma->membase);
-
-ERR_INITIRQ:
- devm_kfree(&pdev->dev, pdma);
- return ret;
-
-ERR_REG_DMADEVICE:
- devm_kfree(&pdev->dev, pdma);
- dev_err(&pdev->dev,
- "Can't register SiFive Platform DMA. (%d)\n", ret);
- return ret;
}
static int sf_pdma_remove(struct platform_device *pdev)
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 0ddbaa4b4f0b..96ad1b3d24c6 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -117,6 +117,7 @@
#define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01
#define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02
#define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
+#define STM32_DMA_FIFO_THRESHOLD_NONE 0x04
#define STM32_DMA_MAX_DATA_ITEMS 0xffff
/*
@@ -136,6 +137,9 @@
/* DMA Features */
#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
+#define STM32_DMA_DIRECT_MODE_MASK BIT(2)
+#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \
+ >> 2)
enum stm32_dma_width {
STM32_DMA_BYTE,
@@ -281,6 +285,9 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
{
u32 remaining;
+ if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
+ return false;
+
if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
if (burst != 0) {
/*
@@ -302,6 +309,10 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
{
+ /* If FIFO direct mode, burst is not possible */
+ if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
+ return false;
+
/*
* Buffer or period length has to be aligned on FIFO depth.
* Otherwise bytes may be stuck within FIFO at buffer or period
@@ -657,6 +668,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
}
}
+ if (status & STM32_DMA_DMEI) {
+ stm32_dma_irq_clear(chan, STM32_DMA_DMEI);
+ status &= ~STM32_DMA_DMEI;
+ if (sfcr & STM32_DMA_SCR_DMEIE)
+ dev_dbg(chan2dev(chan), "Direct mode overrun\n");
+ }
if (status) {
stm32_dma_irq_clear(chan, status);
dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
@@ -692,13 +709,13 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
int src_bus_width, dst_bus_width;
int src_burst_size, dst_burst_size;
u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
- u32 dma_scr, threshold;
+ u32 dma_scr, fifoth;
src_addr_width = chan->dma_sconfig.src_addr_width;
dst_addr_width = chan->dma_sconfig.dst_addr_width;
src_maxburst = chan->dma_sconfig.src_maxburst;
dst_maxburst = chan->dma_sconfig.dst_maxburst;
- threshold = chan->threshold;
+ fifoth = chan->threshold;
switch (direction) {
case DMA_MEM_TO_DEV:
@@ -710,7 +727,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
/* Set device burst size */
dst_best_burst = stm32_dma_get_best_burst(buf_len,
dst_maxburst,
- threshold,
+ fifoth,
dst_addr_width);
dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
@@ -718,7 +735,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return dst_burst_size;
/* Set memory data size */
- src_addr_width = stm32_dma_get_max_width(buf_len, threshold);
+ src_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
chan->mem_width = src_addr_width;
src_bus_width = stm32_dma_get_width(chan, src_addr_width);
if (src_bus_width < 0)
@@ -728,7 +745,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
src_maxburst = STM32_DMA_MAX_BURST;
src_best_burst = stm32_dma_get_best_burst(buf_len,
src_maxburst,
- threshold,
+ fifoth,
src_addr_width);
src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
if (src_burst_size < 0)
@@ -742,7 +759,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
/* Set FIFO threshold */
chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
- chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
+ if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
+ chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
/* Set peripheral address */
chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
@@ -758,7 +776,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
/* Set device burst size */
src_best_burst = stm32_dma_get_best_burst(buf_len,
src_maxburst,
- threshold,
+ fifoth,
src_addr_width);
chan->mem_burst = src_best_burst;
src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
@@ -766,7 +784,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return src_burst_size;
/* Set memory data size */
- dst_addr_width = stm32_dma_get_max_width(buf_len, threshold);
+ dst_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
chan->mem_width = dst_addr_width;
dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
if (dst_bus_width < 0)
@@ -776,7 +794,7 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
dst_maxburst = STM32_DMA_MAX_BURST;
dst_best_burst = stm32_dma_get_best_burst(buf_len,
dst_maxburst,
- threshold,
+ fifoth,
dst_addr_width);
chan->mem_burst = dst_best_burst;
dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
@@ -791,7 +809,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
/* Set FIFO threshold */
chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
- chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
+ if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
+ chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
/* Set peripheral address */
chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
@@ -1216,6 +1235,8 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
+ if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
+ chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
}
static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig
index f76e06651f80..79618fac119a 100644
--- a/drivers/dma/ti/Kconfig
+++ b/drivers/dma/ti/Kconfig
@@ -36,7 +36,7 @@ config DMA_OMAP
config TI_K3_UDMA
bool "Texas Instruments UDMA support"
- depends on ARCH_K3 || COMPILE_TEST
+ depends on ARCH_K3
depends on TI_SCI_PROTOCOL
depends on TI_SCI_INTA_IRQCHIP
select DMA_ENGINE
@@ -49,7 +49,7 @@ config TI_K3_UDMA
config TI_K3_UDMA_GLUE_LAYER
bool "Texas Instruments UDMA Glue layer for non DMAengine users"
- depends on ARCH_K3 || COMPILE_TEST
+ depends on ARCH_K3
depends on TI_K3_UDMA
help
Say y here to support the K3 NAVSS DMA glue interface
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index a90e154b0ae0..945b7c604f91 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -231,7 +231,6 @@ struct udma_chan {
struct udma_tx_drain tx_drain;
u32 bcnt; /* number of bytes completed since the start of the channel */
- u32 in_ring_cnt; /* number of descriptors in flight */
/* Channel configuration parameters */
struct udma_chan_config config;
@@ -574,7 +573,6 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx)
struct udma_desc *d = uc->desc;
struct k3_ring *ring = NULL;
dma_addr_t paddr;
- int ret;
switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
@@ -598,11 +596,7 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx)
udma_sync_for_device(uc, idx);
}
- ret = k3_ringacc_ring_push(ring, &paddr);
- if (!ret)
- uc->in_ring_cnt++;
-
- return ret;
+ return k3_ringacc_ring_push(ring, &paddr);
}
static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
@@ -655,9 +649,6 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
d->hwdesc[0].cppi5_desc_size,
DMA_FROM_DEVICE);
rmb(); /* Ensure that reads are not moved before this point */
-
- if (!ret)
- uc->in_ring_cnt--;
}
return ret;
@@ -697,8 +688,6 @@ static void udma_reset_rings(struct udma_chan *uc)
udma_desc_free(&uc->terminated_desc->vd);
uc->terminated_desc = NULL;
}
-
- uc->in_ring_cnt = 0;
}
static void udma_reset_counters(struct udma_chan *uc)
@@ -1073,9 +1062,6 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
/* Teardown completion message */
if (cppi5_desc_is_tdcm(paddr)) {
- /* Compensate our internal pop/push counter */
- uc->in_ring_cnt++;
-
complete_all(&uc->teardown_completed);
if (uc->terminated_desc) {
@@ -1291,10 +1277,8 @@ static int udma_get_tchan(struct udma_chan *uc)
}
uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
- if (IS_ERR(uc->tchan))
- return PTR_ERR(uc->tchan);
- return 0;
+ return PTR_ERR_OR_ZERO(uc->tchan);
}
static int udma_get_rchan(struct udma_chan *uc)
@@ -1308,10 +1292,8 @@ static int udma_get_rchan(struct udma_chan *uc)
}
uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
- if (IS_ERR(uc->rchan))
- return PTR_ERR(uc->rchan);
- return 0;
+ return PTR_ERR_OR_ZERO(uc->rchan);
}
static int udma_get_chan_pair(struct udma_chan *uc)
@@ -1373,10 +1355,8 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id)
}
uc->rflow = __udma_get_rflow(ud, flow_id);
- if (IS_ERR(uc->rflow))
- return PTR_ERR(uc->rflow);
- return 0;
+ return PTR_ERR_OR_ZERO(uc->rflow);
}
static void udma_put_rchan(struct udma_chan *uc)
@@ -1870,6 +1850,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
udma_stop(uc);
if (udma_is_chan_running(uc)) {
dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ ret = -EBUSY;
goto err_res_free;
}
}
@@ -3189,7 +3170,7 @@ static struct udma_match_data am654_main_data = {
static struct udma_match_data am654_mcu_data = {
.psil_base = 0x6000,
- .enable_memcpy_support = true, /* TEST: DMA domains */
+ .enable_memcpy_support = false,
.statictr_z_mask = GENMASK(11, 0),
.rchan_oes_offset = 0x2000,
.tpl_levels = 2,
@@ -3471,6 +3452,9 @@ static int udma_setup_rx_flush(struct udma_dev *ud)
tr_req->icnt0 = rx_flush->buffer_size;
tr_req->icnt1 = 1;
+ dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
+ hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
+
/* Set up descriptor to be used for packet mode */
hwdesc = &rx_flush->hwdescs[1];
hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index ad02dc6747a4..0317b614b680 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -124,7 +124,7 @@ static int adc_jack_probe(struct platform_device *pdev)
for (i = 0; data->adc_conditions[i].id != EXTCON_NONE; i++);
data->num_conditions = i;
- data->chan = iio_channel_get(&pdev->dev, pdata->consumer_channel);
+ data->chan = devm_iio_channel_get(&pdev->dev, pdata->consumer_channel);
if (IS_ERR(data->chan))
return PTR_ERR(data->chan);
@@ -164,7 +164,6 @@ static int adc_jack_remove(struct platform_device *pdev)
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
- iio_channel_release(data->chan);
return 0;
}
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 7401733db08b..aae82db542a5 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1460,7 +1460,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (!info->input) {
dev_err(arizona->dev, "Can't allocate input dev\n");
ret = -ENOMEM;
- goto err_register;
+ return ret;
}
info->input->name = "Headset";
@@ -1492,7 +1492,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
pdata->micd_pol_gpio, ret);
- goto err_register;
+ return ret;
}
info->micd_pol_gpio = gpio_to_desc(pdata->micd_pol_gpio);
@@ -1515,7 +1515,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
dev_err(arizona->dev,
"Failed to get microphone polarity GPIO: %d\n",
ret);
- goto err_register;
+ return ret;
}
}
@@ -1672,7 +1672,7 @@ static int arizona_extcon_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
ret);
- goto err_gpio;
+ goto err_pm;
}
ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
@@ -1721,14 +1721,14 @@ static int arizona_extcon_probe(struct platform_device *pdev)
dev_warn(arizona->dev, "Failed to set MICVDD to bypass: %d\n",
ret);
- pm_runtime_put(&pdev->dev);
-
ret = input_register_device(info->input);
if (ret) {
dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
goto err_hpdet;
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
err_hpdet:
@@ -1743,10 +1743,11 @@ err_rise_wake:
arizona_set_irq_wake(arizona, jack_irq_rise, 0);
err_rise:
arizona_free_irq(arizona, jack_irq_rise, info);
+err_pm:
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
err_gpio:
gpiod_put(info->micd_pol_gpio);
-err_register:
- pm_runtime_disable(&pdev->dev);
return ret;
}
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index 32f663436e6e..cc47d626095c 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -782,9 +782,19 @@ static const struct platform_device_id max14577_muic_id[] = {
};
MODULE_DEVICE_TABLE(platform, max14577_muic_id);
+static const struct of_device_id of_max14577_muic_dt_match[] = {
+ { .compatible = "maxim,max14577-muic",
+ .data = (void *)MAXIM_DEVICE_TYPE_MAX14577, },
+ { .compatible = "maxim,max77836-muic",
+ .data = (void *)MAXIM_DEVICE_TYPE_MAX77836, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_max14577_muic_dt_match);
+
static struct platform_driver max14577_muic_driver = {
.driver = {
.name = "max14577-muic",
+ .of_match_table = of_max14577_muic_dt_match,
},
.probe = max14577_muic_probe,
.remove = max14577_muic_remove,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 2dfbfec572f9..0a6438cbb3f3 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -900,7 +900,7 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
{
unsigned long flags;
- int ret, idx = -EINVAL;
+ int ret, idx;
if (!edev || !nb)
return -EINVAL;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 4843e94713a4..fbd785dd0513 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -178,8 +178,9 @@ config ISCSI_IBFT
Otherwise, say N.
config RASPBERRYPI_FIRMWARE
- tristate "Raspberry Pi Firmware Driver"
+ bool "Raspberry Pi Firmware Driver"
depends on BCM2835_MBOX
+ default USB_PCI
help
This option enables support for communicating with the firmware on the
Raspberry Pi.
diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile
index 6694d0d908d6..1cad32b38b29 100644
--- a/drivers/firmware/arm_scmi/Makefile
+++ b/drivers/firmware/arm_scmi/Makefile
@@ -2,6 +2,8 @@
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o
scmi-bus-y = bus.o
scmi-driver-y = driver.o
-scmi-transport-y = mailbox.o shmem.o
+scmi-transport-y = shmem.o
+scmi-transport-$(CONFIG_MAILBOX) += mailbox.o
+scmi-transport-$(CONFIG_ARM_PSCI_FW) += smc.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index f804e8af6521..ce7d9203e41b 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -14,6 +14,13 @@ enum scmi_base_protocol_cmd {
BASE_DISCOVER_LIST_PROTOCOLS = 0x6,
BASE_DISCOVER_AGENT = 0x7,
BASE_NOTIFY_ERRORS = 0x8,
+ BASE_SET_DEVICE_PERMISSIONS = 0x9,
+ BASE_SET_PROTOCOL_PERMISSIONS = 0xa,
+ BASE_RESET_AGENT_CONFIGURATION = 0xb,
+};
+
+enum scmi_base_protocol_notify {
+ BASE_ERROR_EVENT = 0x0,
};
struct scmi_msg_resp_base_attributes {
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 5ac06469b01c..31fe5a22a011 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -178,6 +178,8 @@ struct scmi_chan_info {
* @send_message: Callback to send a message
* @mark_txdone: Callback to mark tx as done
* @fetch_response: Callback to fetch response
+ * @fetch_notification: Callback to fetch notification
+ * @clear_channel: Callback to clear a channel
* @poll_done: Callback to poll transfer status
*/
struct scmi_transport_ops {
@@ -190,6 +192,9 @@ struct scmi_transport_ops {
void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
void (*fetch_response)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
+ void (*fetch_notification)(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer);
+ void (*clear_channel)(struct scmi_chan_info *cinfo);
bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
};
@@ -210,6 +215,9 @@ struct scmi_desc {
};
extern const struct scmi_desc scmi_mailbox_desc;
+#ifdef CONFIG_HAVE_ARM_SMCCC
+extern const struct scmi_desc scmi_smc_desc;
+#endif
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
@@ -222,5 +230,8 @@ void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer);
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index dbec767222e9..7483cacf63f9 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -76,6 +76,7 @@ struct scmi_xfers_info {
* implementation version and (sub-)vendor identification.
* @handle: Instance of SCMI handle to send to clients
* @tx_minfo: Universal Transmit Message management info
+ * @rx_minfo: Universal Receive Message management info
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
* @protocols_imp: List of protocols implemented, currently maximum of
@@ -89,6 +90,7 @@ struct scmi_info {
struct scmi_revision_info version;
struct scmi_handle handle;
struct scmi_xfers_info tx_minfo;
+ struct scmi_xfers_info rx_minfo;
struct idr tx_idr;
struct idr rx_idr;
u8 *protocols_imp;
@@ -200,37 +202,66 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
}
-/**
- * scmi_rx_callback() - callback for receiving messages
- *
- * @cinfo: SCMI channel info
- * @msg_hdr: Message header
- *
- * Processes one received message to appropriate transfer information and
- * signals completion of the transfer.
- *
- * NOTE: This function will be invoked in IRQ context, hence should be
- * as optimal as possible.
- */
-void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
{
- struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
- struct scmi_xfers_info *minfo = &info->tx_minfo;
- u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
- u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
- struct device *dev = cinfo->dev;
struct scmi_xfer *xfer;
+ struct device *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->rx_minfo;
- if (msg_type == MSG_TYPE_NOTIFICATION)
- return; /* Notifications not yet supported */
+ xfer = scmi_xfer_get(cinfo->handle, minfo);
+ if (IS_ERR(xfer)) {
+ dev_err(dev, "failed to get free message slot (%ld)\n",
+ PTR_ERR(xfer));
+ info->desc->ops->clear_channel(cinfo);
+ return;
+ }
+
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
+ scmi_dump_header_dbg(dev, &xfer->hdr);
+ info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
+ xfer);
+
+ trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
+ xfer->hdr.protocol_id, xfer->hdr.seq,
+ MSG_TYPE_NOTIFICATION);
+
+ __scmi_xfer_put(minfo, xfer);
+
+ info->desc->ops->clear_channel(cinfo);
+}
+
+static void scmi_handle_response(struct scmi_chan_info *cinfo,
+ u16 xfer_id, u8 msg_type)
+{
+ struct scmi_xfer *xfer;
+ struct device *dev = cinfo->dev;
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
/* Are we even expecting this? */
if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
dev_err(dev, "message for %d is not expected!\n", xfer_id);
+ info->desc->ops->clear_channel(cinfo);
return;
}
xfer = &minfo->xfer_block[xfer_id];
+ /*
+ * Even if a response was indeed expected on this slot at this point,
+ * a buggy platform could wrongly reply feeding us an unexpected
+ * delayed response we're not prepared to handle: bail-out safely
+ * blaming firmware.
+ */
+ if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
+ dev_err(dev,
+ "Delayed Response for %d not expected! Buggy F/W ?\n",
+ xfer_id);
+ info->desc->ops->clear_channel(cinfo);
+ /* It was unexpected, so nobody will clear the xfer if not us */
+ __scmi_xfer_put(minfo, xfer);
+ return;
+ }
scmi_dump_header_dbg(dev, &xfer->hdr);
@@ -240,10 +271,43 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
xfer->hdr.protocol_id, xfer->hdr.seq,
msg_type);
- if (msg_type == MSG_TYPE_DELAYED_RESP)
+ if (msg_type == MSG_TYPE_DELAYED_RESP) {
+ info->desc->ops->clear_channel(cinfo);
complete(xfer->async_done);
- else
+ } else {
complete(&xfer->done);
+ }
+}
+
+/**
+ * scmi_rx_callback() - callback for receiving messages
+ *
+ * @cinfo: SCMI channel info
+ * @msg_hdr: Message header
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
+{
+ u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
+ u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
+
+ switch (msg_type) {
+ case MSG_TYPE_NOTIFICATION:
+ scmi_handle_notification(cinfo, msg_hdr);
+ break;
+ case MSG_TYPE_COMMAND:
+ case MSG_TYPE_DELAYED_RESP:
+ scmi_handle_response(cinfo, xfer_id, msg_type);
+ break;
+ default:
+ WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
+ break;
+ }
}
/**
@@ -525,13 +589,13 @@ int scmi_handle_put(const struct scmi_handle *handle)
return 0;
}
-static int scmi_xfer_info_init(struct scmi_info *sinfo)
+static int __scmi_xfer_info_init(struct scmi_info *sinfo,
+ struct scmi_xfers_info *info)
{
int i;
struct scmi_xfer *xfer;
struct device *dev = sinfo->dev;
const struct scmi_desc *desc = sinfo->desc;
- struct scmi_xfers_info *info = &sinfo->tx_minfo;
/* Pre-allocated messages, no more than what hdr.seq can support */
if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
@@ -566,6 +630,16 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
return 0;
}
+static int scmi_xfer_info_init(struct scmi_info *sinfo)
+{
+ int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+
+ if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
+ ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
+
+ return ret;
+}
+
static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
int prot_id, bool tx)
{
@@ -699,10 +773,6 @@ static int scmi_probe(struct platform_device *pdev)
info->desc = desc;
INIT_LIST_HEAD(&info->node);
- ret = scmi_xfer_info_init(info);
- if (ret)
- return ret;
-
platform_set_drvdata(pdev, info);
idr_init(&info->tx_idr);
idr_init(&info->rx_idr);
@@ -715,6 +785,10 @@ static int scmi_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = scmi_xfer_info_init(info);
+ if (ret)
+ return ret;
+
ret = scmi_base_protocol_init(handle);
if (ret) {
dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
@@ -827,6 +901,9 @@ ATTRIBUTE_GROUPS(versions);
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
+#ifdef CONFIG_ARM_PSCI_FW
+ { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
+#endif
{ /* Sentinel */ },
};
diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index 73077bbc4ad9..6998dc86b5ce 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -158,6 +158,21 @@ static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
shmem_fetch_response(smbox->shmem, xfer);
}
+static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_fetch_notification(smbox->shmem, max_len, xfer);
+}
+
+static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
+{
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+ shmem_clear_channel(smbox->shmem);
+}
+
static bool
mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{
@@ -173,6 +188,8 @@ static struct scmi_transport_ops scmi_mailbox_ops = {
.send_message = mailbox_send_message,
.mark_txdone = mailbox_mark_txdone,
.fetch_response = mailbox_fetch_response,
+ .fetch_notification = mailbox_fetch_notification,
+ .clear_channel = mailbox_clear_channel,
.poll_done = mailbox_poll_done,
};
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 34f3a917dd8d..eadc171e254b 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -27,6 +27,11 @@ enum scmi_performance_protocol_cmd {
PERF_DESCRIBE_FASTCHANNEL = 0xb,
};
+enum scmi_performance_protocol_notify {
+ PERFORMANCE_LIMITS_CHANGED = 0x0,
+ PERFORMANCE_LEVEL_CHANGED = 0x1,
+};
+
struct scmi_opp {
u32 perf;
u32 power;
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index 214886ce84f1..cf7f0312381b 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -12,6 +12,12 @@ enum scmi_power_protocol_cmd {
POWER_STATE_SET = 0x4,
POWER_STATE_GET = 0x5,
POWER_STATE_NOTIFY = 0x6,
+ POWER_STATE_CHANGE_REQUESTED_NOTIFY = 0x7,
+};
+
+enum scmi_power_protocol_notify {
+ POWER_STATE_CHANGED = 0x0,
+ POWER_STATE_CHANGE_REQUESTED = 0x1,
};
struct scmi_msg_resp_power_attributes {
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index eba61b9c1f53..db1b1ab303da 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -14,6 +14,10 @@ enum scmi_sensor_protocol_cmd {
SENSOR_READING_GET = 0x6,
};
+enum scmi_sensor_protocol_notify {
+ SENSOR_TRIP_POINT_EVENT = 0x0,
+};
+
struct scmi_msg_resp_sensor_attributes {
__le16 num_sensors;
u8 max_requests;
diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c
index e1e816e0018c..0e3eaea5d852 100644
--- a/drivers/firmware/arm_scmi/shmem.c
+++ b/drivers/firmware/arm_scmi/shmem.c
@@ -67,6 +67,21 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
}
+void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
+ size_t max_len, struct scmi_xfer *xfer)
+{
+ /* Skip only the length of header in shmem area i.e 4 bytes */
+ xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
+
+ /* Take a copy to the rx buffer.. */
+ memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+}
+
+void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem)
+{
+ iowrite32(SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE, &shmem->channel_status);
+}
+
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer)
{
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
new file mode 100644
index 000000000000..49bc4b0e8428
--- /dev/null
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System Control and Management Interface (SCMI) Message SMC/HVC
+ * Transport driver
+ *
+ * Copyright 2020 NXP
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+/**
+ * struct scmi_smc - Structure representing a SCMI smc transport
+ *
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
+ * @func_id: smc/hvc call function id
+ */
+
+struct scmi_smc {
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
+ struct mutex shmem_lock;
+ u32 func_id;
+};
+
+static bool smc_chan_available(struct device *dev, int idx)
+{
+ struct device_node *np = of_parse_phandle(dev->of_node, "shmem", 0);
+ if (!np)
+ return false;
+
+ of_node_put(np);
+ return true;
+}
+
+static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ bool tx)
+{
+ struct device *cdev = cinfo->dev;
+ struct scmi_smc *scmi_info;
+ resource_size_t size;
+ struct resource res;
+ struct device_node *np;
+ u32 func_id;
+ int ret;
+
+ if (!tx)
+ return -ENODEV;
+
+ scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
+ if (!scmi_info)
+ return -ENOMEM;
+
+ np = of_parse_phandle(cdev->of_node, "shmem", 0);
+ ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (ret) {
+ dev_err(cdev, "failed to get SCMI Tx shared memory\n");
+ return ret;
+ }
+
+ size = resource_size(&res);
+ scmi_info->shmem = devm_ioremap(dev, res.start, size);
+ if (!scmi_info->shmem) {
+ dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
+ if (ret < 0)
+ return ret;
+
+ scmi_info->func_id = func_id;
+ scmi_info->cinfo = cinfo;
+ mutex_init(&scmi_info->shmem_lock);
+ cinfo->transport_info = scmi_info;
+
+ return 0;
+}
+
+static int smc_chan_free(int id, void *p, void *data)
+{
+ struct scmi_chan_info *cinfo = p;
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ cinfo->transport_info = NULL;
+ scmi_info->cinfo = NULL;
+
+ scmi_free_channel(cinfo, data, id);
+
+ return 0;
+}
+
+static int smc_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+ struct arm_smccc_res res;
+
+ mutex_lock(&scmi_info->shmem_lock);
+
+ shmem_tx_prepare(scmi_info->shmem, xfer);
+
+ arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
+ scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem));
+
+ mutex_unlock(&scmi_info->shmem_lock);
+
+ /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
+ if (res.a0)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void smc_fetch_response(struct scmi_chan_info *cinfo,
+ struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ shmem_fetch_response(scmi_info->shmem, xfer);
+}
+
+static bool
+smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
+{
+ struct scmi_smc *scmi_info = cinfo->transport_info;
+
+ return shmem_poll_done(scmi_info->shmem, xfer);
+}
+
+static struct scmi_transport_ops scmi_smc_ops = {
+ .chan_available = smc_chan_available,
+ .chan_setup = smc_chan_setup,
+ .chan_free = smc_chan_free,
+ .send_message = smc_send_message,
+ .fetch_response = smc_fetch_response,
+ .poll_done = smc_poll_done,
+};
+
+const struct scmi_desc scmi_smc_desc = {
+ .ops = &scmi_smc_ops,
+ .max_rx_timeout_ms = 30,
+ .max_msg = 1,
+ .max_msg_size = 128,
+};
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index ff39f64f2aae..86d71b0212b1 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -42,6 +42,8 @@ DEFINE_DMI_ATTR_WITH_SHOW(bios_vendor, 0444, DMI_BIOS_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(bios_version, 0444, DMI_BIOS_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(bios_date, 0444, DMI_BIOS_DATE);
DEFINE_DMI_ATTR_WITH_SHOW(sys_vendor, 0444, DMI_SYS_VENDOR);
+DEFINE_DMI_ATTR_WITH_SHOW(bios_release, 0444, DMI_BIOS_RELEASE);
+DEFINE_DMI_ATTR_WITH_SHOW(ec_firmware_release, 0444, DMI_EC_FIRMWARE_RELEASE);
DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
@@ -78,6 +80,8 @@ static ssize_t get_modalias(char *buffer, size_t buffer_size)
{ "bvn", DMI_BIOS_VENDOR },
{ "bvr", DMI_BIOS_VERSION },
{ "bd", DMI_BIOS_DATE },
+ { "br", DMI_BIOS_RELEASE },
+ { "efr", DMI_EC_FIRMWARE_RELEASE },
{ "svn", DMI_SYS_VENDOR },
{ "pn", DMI_PRODUCT_NAME },
{ "pvr", DMI_PRODUCT_VERSION },
@@ -187,6 +191,8 @@ static void __init dmi_id_init_attr_table(void)
ADD_DMI_ATTR(bios_vendor, DMI_BIOS_VENDOR);
ADD_DMI_ATTR(bios_version, DMI_BIOS_VERSION);
ADD_DMI_ATTR(bios_date, DMI_BIOS_DATE);
+ ADD_DMI_ATTR(bios_release, DMI_BIOS_RELEASE);
+ ADD_DMI_ATTR(ec_firmware_release, DMI_EC_FIRMWARE_RELEASE);
ADD_DMI_ATTR(sys_vendor, DMI_SYS_VENDOR);
ADD_DMI_ATTR(product_name, DMI_PRODUCT_NAME);
ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index f59163cb7cba..5066d1f1d687 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -186,6 +186,34 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
dmi_ident[slot] = p;
}
+static void __init dmi_save_release(const struct dmi_header *dm, int slot,
+ int index)
+{
+ const u8 *minor, *major;
+ char *s;
+
+ /* If the table doesn't have the field, let's return */
+ if (dmi_ident[slot] || dm->length < index)
+ return;
+
+ minor = (u8 *) dm + index;
+ major = (u8 *) dm + index - 1;
+
+ /* As per the spec, if the system doesn't support this field,
+ * the value is FF
+ */
+ if (*major == 0xFF && *minor == 0xFF)
+ return;
+
+ s = dmi_alloc(8);
+ if (!s)
+ return;
+
+ sprintf(s, "%u.%u", *major, *minor);
+
+ dmi_ident[slot] = s;
+}
+
static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
int index)
{
@@ -444,6 +472,8 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
dmi_save_ident(dm, DMI_BIOS_DATE, 8);
+ dmi_save_release(dm, DMI_BIOS_RELEASE, 21);
+ dmi_save_release(dm, DMI_EC_FIRMWARE_RELEASE, 23);
break;
case 1: /* System Information */
dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index b876373f2297..3359ae2adf24 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -18,12 +18,12 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/mmu.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#if defined(CONFIG_PTDUMP_DEBUGFS) && defined(CONFIG_ARM64)
#include <asm/ptdump.h>
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 9357d6b6e87c..7f1657b6c30d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -54,7 +54,7 @@ struct mm_struct efi_mm = {
.mm_rb = RB_ROOT,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
- .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
+ MMAP_LOCK_INITIALIZER(efi_mm)
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
.cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index cce4a7436052..75daaf20374e 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -37,7 +37,9 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
GCOV_PROFILE := n
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index f71eaa5bf52d..2ab048222fe9 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -8,7 +8,6 @@
*/
#include <linux/err.h>
-#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
@@ -38,6 +37,7 @@ struct imx_sc_ipc {
struct device *dev;
struct mutex lock;
struct completion done;
+ bool fast_ipc;
/* temporarily store the SCU msg */
u32 *msg;
@@ -115,6 +115,7 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc;
struct imx_sc_rpc_msg *hdr;
u32 *data = msg;
+ int i;
if (!sc_ipc->msg) {
dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n",
@@ -122,6 +123,19 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
return;
}
+ if (sc_ipc->fast_ipc) {
+ hdr = msg;
+ sc_ipc->rx_size = hdr->size;
+ sc_ipc->msg[0] = *data++;
+
+ for (i = 1; i < sc_ipc->rx_size; i++)
+ sc_ipc->msg[i] = *data++;
+
+ complete(&sc_ipc->done);
+
+ return;
+ }
+
if (sc_chan->idx == 0) {
hdr = msg;
sc_ipc->rx_size = hdr->size;
@@ -143,20 +157,22 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
{
- struct imx_sc_rpc_msg *hdr = msg;
+ struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg;
struct imx_sc_chan *sc_chan;
u32 *data = msg;
int ret;
+ int size;
int i;
/* Check size */
- if (hdr->size > IMX_SC_RPC_MAX_MSG)
+ if (hdr.size > IMX_SC_RPC_MAX_MSG)
return -EINVAL;
- dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr->svc,
- hdr->func, hdr->size);
+ dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc,
+ hdr.func, hdr.size);
- for (i = 0; i < hdr->size; i++) {
+ size = sc_ipc->fast_ipc ? 1 : hdr.size;
+ for (i = 0; i < size; i++) {
sc_chan = &sc_ipc->chans[i % 4];
/*
@@ -168,8 +184,10 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
* Wait for tx_done before every send to ensure that no
* queueing happens at the mailbox channel level.
*/
- wait_for_completion(&sc_chan->tx_done);
- reinit_completion(&sc_chan->tx_done);
+ if (!sc_ipc->fast_ipc) {
+ wait_for_completion(&sc_chan->tx_done);
+ reinit_completion(&sc_chan->tx_done);
+ }
ret = mbox_send_message(sc_chan->ch, &data[i]);
if (ret < 0)
@@ -246,6 +264,8 @@ static int imx_scu_probe(struct platform_device *pdev)
struct imx_sc_chan *sc_chan;
struct mbox_client *cl;
char *chan_name;
+ struct of_phandle_args args;
+ int num_channel;
int ret;
int i;
@@ -253,11 +273,20 @@ static int imx_scu_probe(struct platform_device *pdev)
if (!sc_ipc)
return -ENOMEM;
- for (i = 0; i < SCU_MU_CHAN_NUM; i++) {
- if (i < 4)
+ ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
+ "#mbox-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
+
+ num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
+ for (i = 0; i < num_channel; i++) {
+ if (i < num_channel / 2)
chan_name = kasprintf(GFP_KERNEL, "tx%d", i);
else
- chan_name = kasprintf(GFP_KERNEL, "rx%d", i - 4);
+ chan_name = kasprintf(GFP_KERNEL, "rx%d",
+ i - num_channel / 2);
if (!chan_name)
return -ENOMEM;
@@ -269,19 +298,22 @@ static int imx_scu_probe(struct platform_device *pdev)
cl->knows_txdone = true;
cl->rx_callback = imx_scu_rx_callback;
- /* Initial tx_done completion as "done" */
- cl->tx_done = imx_scu_tx_done;
- init_completion(&sc_chan->tx_done);
- complete(&sc_chan->tx_done);
+ if (!sc_ipc->fast_ipc) {
+ /* Initial tx_done completion as "done" */
+ cl->tx_done = imx_scu_tx_done;
+ init_completion(&sc_chan->tx_done);
+ complete(&sc_chan->tx_done);
+ }
sc_chan->sc_ipc = sc_ipc;
- sc_chan->idx = i % 4;
+ sc_chan->idx = i % (num_channel / 2);
sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(sc_chan->ch)) {
ret = PTR_ERR(sc_chan->ch);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request mbox chan %s ret %d\n",
chan_name, ret);
+ kfree(chan_name);
return ret;
}
diff --git a/drivers/firmware/qcom_scm-legacy.c b/drivers/firmware/qcom_scm-legacy.c
index 8532e7c78ef7..eba6b60bfb61 100644
--- a/drivers/firmware/qcom_scm-legacy.c
+++ b/drivers/firmware/qcom_scm-legacy.c
@@ -56,7 +56,7 @@ struct scm_legacy_command {
__le32 buf_offset;
__le32 resp_hdr_offset;
__le32 id;
- __le32 buf[0];
+ __le32 buf[];
};
/**
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 059bb0fbae9e..0e7233a20f34 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -6,7 +6,6 @@
#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/export.h>
-#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -806,8 +805,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
struct qcom_scm_mem_map_info *mem_to_map;
phys_addr_t mem_to_map_phys;
phys_addr_t dest_phys;
- phys_addr_t ptr_phys;
- dma_addr_t ptr_dma;
+ dma_addr_t ptr_phys;
size_t mem_to_map_sz;
size_t dest_sz;
size_t src_sz;
@@ -824,10 +822,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
ALIGN(dest_sz, SZ_64);
- ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
- ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
/* Fill source vmid detail */
src = ptr;
@@ -855,7 +852,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
ptr_phys, src_sz, dest_phys, dest_sz);
- dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
+ dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d\n", ret);
@@ -943,7 +940,7 @@ bool qcom_scm_hdcp_available(void)
qcom_scm_clk_disable();
- return ret > 0 ? true : false;
+ return ret > 0;
}
EXPORT_SYMBOL(qcom_scm_hdcp_available);
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index a3e85186f8e6..ef8098856a47 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -12,6 +12,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
#define MBOX_MSG(chan, data28) (((data28) & ~0xf) | ((chan) & 0xf))
@@ -19,6 +21,8 @@
#define MBOX_DATA28(msg) ((msg) & ~0xf)
#define MBOX_CHAN_PROPERTY 8
+#define VL805_PCI_CONFIG_VERSION_OFFSET 0x50
+
static struct platform_device *rpi_hwmon;
static struct platform_device *rpi_clk;
@@ -280,6 +284,63 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
}
EXPORT_SYMBOL_GPL(rpi_firmware_get);
+/*
+ * The Raspberry Pi 4 gets its USB functionality from VL805, a PCIe chip that
+ * implements xHCI. After a PCI reset, VL805's firmware may either be loaded
+ * directly from an EEPROM or, if not present, by the SoC's co-processor,
+ * VideoCore. RPi4's VideoCore OS contains both the non public firmware load
+ * logic and the VL805 firmware blob. This function triggers the aforementioned
+ * process.
+ */
+int rpi_firmware_init_vl805(struct pci_dev *pdev)
+{
+ struct device_node *fw_np;
+ struct rpi_firmware *fw;
+ u32 dev_addr, version;
+ int ret;
+
+ fw_np = of_find_compatible_node(NULL, NULL,
+ "raspberrypi,bcm2835-firmware");
+ if (!fw_np)
+ return 0;
+
+ fw = rpi_firmware_get(fw_np);
+ of_node_put(fw_np);
+ if (!fw)
+ return -ENODEV;
+
+ /*
+ * Make sure we don't trigger a firmware load unnecessarily.
+ *
+ * If something went wrong with PCI, this whole exercise would be
+ * futile as VideoCore expects from us a configured PCI bus. Just take
+ * the faulty version (likely ~0) and let xHCI's registration fail
+ * further down the line.
+ */
+ pci_read_config_dword(pdev, VL805_PCI_CONFIG_VERSION_OFFSET, &version);
+ if (version)
+ goto exit;
+
+ dev_addr = pdev->bus->number << 20 | PCI_SLOT(pdev->devfn) << 15 |
+ PCI_FUNC(pdev->devfn) << 12;
+
+ ret = rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_XHCI_RESET,
+ &dev_addr, sizeof(dev_addr));
+ if (ret)
+ return ret;
+
+ /* Wait for vl805 to startup */
+ usleep_range(200, 1000);
+
+ pci_read_config_dword(pdev, VL805_PCI_CONFIG_VERSION_OFFSET,
+ &version);
+exit:
+ pci_info(pdev, "VL805 firmware version %08x\n", version);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rpi_firmware_init_vl805);
+
static const struct of_device_id rpi_firmware_of_match[] = {
{ .compatible = "raspberrypi,bcm2835-firmware", },
{},
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index f8533338b018..4379475c99ed 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -72,7 +72,7 @@ static void rsu_status_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
- if (data->status == BIT(SVC_STATUS_RSU_OK)) {
+ if (data->status == BIT(SVC_STATUS_OK)) {
priv->status.version = FIELD_GET(RSU_VERSION_MASK,
res->a2);
priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
@@ -108,9 +108,9 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
{
struct stratix10_rsu_priv *priv = client->priv;
- if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
dev_warn(client->dev, "Secure FW doesn't support notify\n");
- else if (data->status == BIT(SVC_STATUS_RSU_ERROR))
+ else if (data->status == BIT(SVC_STATUS_ERROR))
dev_err(client->dev, "Failure, returned status is %lu\n",
BIT(data->status));
@@ -133,9 +133,9 @@ static void rsu_retry_callback(struct stratix10_svc_client *client,
struct stratix10_rsu_priv *priv = client->priv;
unsigned int *counter = (unsigned int *)data->kaddr1;
- if (data->status == BIT(SVC_STATUS_RSU_OK))
+ if (data->status == BIT(SVC_STATUS_OK))
priv->retry_counter = *counter;
- else if (data->status == BIT(SVC_STATUS_RSU_NO_SUPPORT))
+ else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
dev_warn(client->dev, "Secure FW doesn't support retry\n");
else
dev_err(client->dev, "Failed to get retry counter %lu\n",
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index d5f0769f3761..e0db8dbfc9d1 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -214,7 +214,7 @@ static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
complete(&ctrl->complete_status);
break;
}
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_DONE);
+ cb_data->status = BIT(SVC_STATUS_BUFFER_DONE);
cb_data->kaddr1 = svc_pa_to_va(res.a1);
cb_data->kaddr2 = (res.a2) ?
svc_pa_to_va(res.a2) : NULL;
@@ -227,7 +227,7 @@ static void svc_thread_cmd_data_claim(struct stratix10_svc_controller *ctrl,
__func__);
}
} while (res.a0 == INTEL_SIP_SMC_STATUS_OK ||
- res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY ||
+ res.a0 == INTEL_SIP_SMC_STATUS_BUSY ||
wait_for_completion_timeout(&ctrl->complete_status, timeout));
}
@@ -250,7 +250,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
cb_data->kaddr1 = NULL;
cb_data->kaddr2 = NULL;
cb_data->kaddr3 = NULL;
- cb_data->status = BIT(SVC_STATUS_RECONFIG_ERROR);
+ cb_data->status = BIT(SVC_STATUS_ERROR);
pr_debug("%s: polling config status\n", __func__);
@@ -259,7 +259,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
ctrl->invoke_fn(INTEL_SIP_SMC_FPGA_CONFIG_ISDONE,
0, 0, 0, 0, 0, 0, 0, &res);
if ((res.a0 == INTEL_SIP_SMC_STATUS_OK) ||
- (res.a0 == INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR))
+ (res.a0 == INTEL_SIP_SMC_STATUS_ERROR))
break;
/*
@@ -271,7 +271,7 @@ static void svc_thread_cmd_config_status(struct stratix10_svc_controller *ctrl,
}
if (res.a0 == INTEL_SIP_SMC_STATUS_OK && count_in_sec)
- cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
p_data->chan->scl->receive_cb(p_data->chan->scl, cb_data);
}
@@ -294,24 +294,18 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
switch (p_data->command) {
case COMMAND_RECONFIG:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_REQUEST_OK);
+ case COMMAND_RSU_UPDATE:
+ case COMMAND_RSU_NOTIFY:
+ cb_data->status = BIT(SVC_STATUS_OK);
break;
case COMMAND_RECONFIG_DATA_SUBMIT:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
- break;
- case COMMAND_NOOP:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED);
- cb_data->kaddr1 = svc_pa_to_va(res.a1);
+ cb_data->status = BIT(SVC_STATUS_BUFFER_SUBMITTED);
break;
case COMMAND_RECONFIG_STATUS:
- cb_data->status = BIT(SVC_STATUS_RECONFIG_COMPLETED);
- break;
- case COMMAND_RSU_UPDATE:
- case COMMAND_RSU_NOTIFY:
- cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ cb_data->status = BIT(SVC_STATUS_COMPLETED);
break;
case COMMAND_RSU_RETRY:
- cb_data->status = BIT(SVC_STATUS_RSU_OK);
+ cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
default:
@@ -430,9 +424,9 @@ static int svc_normal_to_secure_thread(void *data)
if (pdata->command == COMMAND_RSU_STATUS) {
if (res.a0 == INTEL_SIP_SMC_RSU_ERROR)
- cbdata->status = BIT(SVC_STATUS_RSU_ERROR);
+ cbdata->status = BIT(SVC_STATUS_ERROR);
else
- cbdata->status = BIT(SVC_STATUS_RSU_OK);
+ cbdata->status = BIT(SVC_STATUS_OK);
cbdata->kaddr1 = &res;
cbdata->kaddr2 = NULL;
@@ -445,7 +439,7 @@ static int svc_normal_to_secure_thread(void *data)
case INTEL_SIP_SMC_STATUS_OK:
svc_thread_recv_status_ok(pdata, cbdata, res);
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY:
+ case INTEL_SIP_SMC_STATUS_BUSY:
switch (pdata->command) {
case COMMAND_RECONFIG_DATA_SUBMIT:
svc_thread_cmd_data_claim(ctrl,
@@ -460,33 +454,13 @@ static int svc_normal_to_secure_thread(void *data)
break;
}
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED:
+ case INTEL_SIP_SMC_STATUS_REJECTED:
pr_debug("%s: STATUS_REJECTED\n", __func__);
break;
- case INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
+ case INTEL_SIP_SMC_STATUS_ERROR:
case INTEL_SIP_SMC_RSU_ERROR:
pr_err("%s: STATUS_ERROR\n", __func__);
- switch (pdata->command) {
- /* for FPGA mgr */
- case COMMAND_RECONFIG_DATA_CLAIM:
- case COMMAND_RECONFIG:
- case COMMAND_RECONFIG_DATA_SUBMIT:
- case COMMAND_RECONFIG_STATUS:
- cbdata->status =
- BIT(SVC_STATUS_RECONFIG_ERROR);
- break;
-
- /* for RSU */
- case COMMAND_RSU_STATUS:
- case COMMAND_RSU_UPDATE:
- case COMMAND_RSU_NOTIFY:
- case COMMAND_RSU_RETRY:
- cbdata->status =
- BIT(SVC_STATUS_RSU_ERROR);
- break;
- }
-
- cbdata->status = BIT(SVC_STATUS_RECONFIG_ERROR);
+ cbdata->status = BIT(SVC_STATUS_ERROR);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
@@ -502,7 +476,7 @@ static int svc_normal_to_secure_thread(void *data)
if ((pdata->command == COMMAND_RSU_RETRY) ||
(pdata->command == COMMAND_RSU_NOTIFY)) {
cbdata->status =
- BIT(SVC_STATUS_RSU_NO_SUPPORT);
+ BIT(SVC_STATUS_NO_SUPPORT);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
index ea308751635f..63ab21d89c2c 100644
--- a/drivers/firmware/tegra/bpmp-tegra186.c
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -176,7 +176,7 @@ static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
if (!priv->tx.pool) {
dev_err(bpmp->dev, "TX shmem pool not found\n");
- return -ENOMEM;
+ return -EPROBE_DEFER;
}
priv->tx.virt = gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys);
@@ -188,7 +188,7 @@ static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
if (!priv->rx.pool) {
dev_err(bpmp->dev, "RX shmem pool not found\n");
- err = -ENOMEM;
+ err = -EPROBE_DEFER;
goto free_tx;
}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 6741fcda0c37..fe6702df24bf 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -6,6 +6,7 @@
#include <linux/clk/tegra.h>
#include <linux/genalloc.h>
#include <linux/mailbox_client.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -869,12 +870,8 @@ static struct platform_driver tegra_bpmp_driver = {
.name = "tegra-bpmp",
.of_match_table = tegra_bpmp_match,
.pm = &tegra_bpmp_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = tegra_bpmp_probe,
};
-
-static int __init tegra_bpmp_init(void)
-{
- return platform_driver_register(&tegra_bpmp_driver);
-}
-core_initcall(tegra_bpmp_init);
+builtin_platform_driver(tegra_bpmp_driver);
diff --git a/drivers/firmware/trusted_foundations.c b/drivers/firmware/trusted_foundations.c
index fc544e19b0a1..1389fa9418a7 100644
--- a/drivers/firmware/trusted_foundations.c
+++ b/drivers/firmware/trusted_foundations.c
@@ -19,6 +19,7 @@
#define TF_CACHE_ENABLE 1
#define TF_CACHE_DISABLE 2
+#define TF_CACHE_REENABLE 4
#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
@@ -29,6 +30,7 @@
#define TF_CPU_PM_S1 0xffffffe4
#define TF_CPU_PM_S1_NOFLUSH_L2 0xffffffe7
+static unsigned long tf_idle_mode = TF_PM_MODE_NONE;
static unsigned long cpu_boot_addr;
static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
@@ -85,25 +87,40 @@ static int tf_prepare_idle(unsigned long mode)
cpu_boot_addr);
break;
+ case TF_PM_MODE_NONE:
+ break;
+
default:
return -EINVAL;
}
+ tf_idle_mode = mode;
+
return 0;
}
#ifdef CONFIG_CACHE_L2X0
static void tf_cache_write_sec(unsigned long val, unsigned int reg)
{
- u32 l2x0_way_mask = 0xff;
+ u32 enable_op, l2x0_way_mask = 0xff;
switch (reg) {
case L2X0_CTRL:
if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
l2x0_way_mask = 0xffff;
+ switch (tf_idle_mode) {
+ case TF_PM_MODE_LP2:
+ enable_op = TF_CACHE_REENABLE;
+ break;
+
+ default:
+ enable_op = TF_CACHE_ENABLE;
+ break;
+ }
+
if (val == L2X0_CTRL_EN)
- tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_ENABLE,
+ tf_generic_smc(TF_CACHE_MAINT, enable_op,
l2x0_saved_regs.aux_ctrl);
else
tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 43bc6cfdab45..99606b34975e 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -85,14 +85,13 @@ static int get_pm_api_id(char *pm_api_req, u32 *pm_id)
static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
u32 pm_api_version;
int ret;
struct zynqmp_pm_query_data qdata = {0};
switch (pm_id) {
case PM_GET_API_VERSION:
- ret = eemi_ops->get_api_version(&pm_api_version);
+ ret = zynqmp_pm_get_api_version(&pm_api_version);
sprintf(debugfs_buf, "PM-API Version = %d.%d\n",
pm_api_version >> 16, pm_api_version & 0xffff);
break;
@@ -102,7 +101,7 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
qdata.arg2 = pm_api_arg[2];
qdata.arg3 = pm_api_arg[3];
- ret = eemi_ops->query_data(qdata, pm_api_ret);
+ ret = zynqmp_pm_query_data(qdata, pm_api_ret);
if (ret)
break;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 41b65164a367..8d1ff2454e2e 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2018 Xilinx, Inc.
+ * Copyright (C) 2014-2020 Xilinx, Inc.
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -24,8 +24,6 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
-static const struct zynqmp_eemi_ops *eemi_ops_tbl;
-
static bool feature_check_enabled;
static u32 zynqmp_pm_features[PM_API_MAX];
@@ -219,7 +217,7 @@ static u32 pm_tz_version;
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_get_api_version(u32 *version)
+int zynqmp_pm_get_api_version(u32 *version)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -237,6 +235,7 @@ static int zynqmp_pm_get_api_version(u32 *version)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_api_version);
/**
* zynqmp_pm_get_chipid - Get silicon ID registers
@@ -246,7 +245,7 @@ static int zynqmp_pm_get_api_version(u32 *version)
* Return: Returns the status of the operation and the idcode and version
* registers in @idcode and @version.
*/
-static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -260,6 +259,7 @@ static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
@@ -324,7 +324,7 @@ static int get_set_conduit_method(struct device_node *np)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
+int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
{
int ret;
@@ -338,6 +338,7 @@ static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
*/
return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_query_data);
/**
* zynqmp_pm_clock_enable() - Enable the clock for given id
@@ -348,10 +349,11 @@ static int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_enable(u32 clock_id)
+int zynqmp_pm_clock_enable(u32 clock_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, clock_id, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable);
/**
* zynqmp_pm_clock_disable() - Disable the clock for given id
@@ -362,10 +364,11 @@ static int zynqmp_pm_clock_enable(u32 clock_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_disable(u32 clock_id)
+int zynqmp_pm_clock_disable(u32 clock_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, clock_id, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_disable);
/**
* zynqmp_pm_clock_getstate() - Get the clock state for given id
@@ -377,7 +380,7 @@ static int zynqmp_pm_clock_disable(u32 clock_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -388,6 +391,7 @@ static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getstate);
/**
* zynqmp_pm_clock_setdivider() - Set the clock divider for given id
@@ -399,11 +403,12 @@ static int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, clock_id, divider,
0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setdivider);
/**
* zynqmp_pm_clock_getdivider() - Get the clock divider for given id
@@ -415,7 +420,7 @@ static int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -426,6 +431,7 @@ static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getdivider);
/**
* zynqmp_pm_clock_setrate() - Set the clock rate for given id
@@ -436,13 +442,14 @@ static int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
+int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETRATE, clock_id,
lower_32_bits(rate),
upper_32_bits(rate),
0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setrate);
/**
* zynqmp_pm_clock_getrate() - Get the clock rate for given id
@@ -454,7 +461,7 @@ static int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
+int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -465,6 +472,7 @@ static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate);
/**
* zynqmp_pm_clock_setparent() - Set the clock parent for given id
@@ -475,11 +483,12 @@ static int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
{
return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, clock_id,
parent_id, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setparent);
/**
* zynqmp_pm_clock_getparent() - Get the clock parent for given id
@@ -491,7 +500,7 @@ static int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -502,48 +511,191 @@ static int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getparent);
/**
- * zynqmp_is_valid_ioctl() - Check whether IOCTL ID is valid or not
- * @ioctl_id: IOCTL ID
+ * zynqmp_pm_set_pll_frac_mode() - PM API for set PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode (PLL_MODE_FRAC/PLL_MODE_INT)
+ *
+ * This function sets PLL mode
*
- * Return: 1 if IOCTL is valid else 0
+ * Return: Returns status, either success or error+reason
*/
-static inline int zynqmp_is_valid_ioctl(u32 ioctl_id)
+int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
{
- switch (ioctl_id) {
- case IOCTL_SD_DLL_RESET:
- case IOCTL_SET_SD_TAPDELAY:
- case IOCTL_SET_PLL_FRAC_MODE:
- case IOCTL_GET_PLL_FRAC_MODE:
- case IOCTL_SET_PLL_FRAC_DATA:
- case IOCTL_GET_PLL_FRAC_DATA:
- return 1;
- default:
- return 0;
- }
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_MODE,
+ clk_id, mode, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode);
/**
- * zynqmp_pm_ioctl() - PM IOCTL API for device control and configs
- * @node_id: Node ID of the device
- * @ioctl_id: ID of the requested IOCTL
- * @arg1: Argument 1 to requested IOCTL call
- * @arg2: Argument 2 to requested IOCTL call
- * @out: Returned output value
+ * zynqmp_pm_get_pll_frac_mode() - PM API for get PLL mode
+ *
+ * @clk_id: PLL clock ID
+ * @mode: PLL mode
*
- * This function calls IOCTL to firmware for device control and configuration.
+ * This function return current PLL mode
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
- u32 *out)
+int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
{
- if (!zynqmp_is_valid_ioctl(ioctl_id))
- return -EINVAL;
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_MODE,
+ clk_id, 0, mode);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode);
+
+/**
+ * zynqmp_pm_set_pll_frac_data() - PM API for setting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function sets fraction data.
+ * It is valid for fraction mode only.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_DATA,
+ clk_id, data, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data);
+
+/**
+ * zynqmp_pm_get_pll_frac_data() - PM API for getting pll fraction data
+ *
+ * @clk_id: PLL clock ID
+ * @data: fraction data
+ *
+ * This function returns fraction data value.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_DATA,
+ clk_id, 0, data);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
+
+/**
+ * zynqmp_pm_set_sd_tapdelay() - Set tap delay for the SD device
+ *
+ * @node_id Node ID of the device
+ * @type Type of tap delay to set (input/output)
+ * @value Value to set fot the tap delay
+ *
+ * This function sets input/output tap delay for the SD device.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
+ type, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
+
+/**
+ * zynqmp_pm_sd_dll_reset() - Reset DLL logic
+ *
+ * @node_id Node ID of the device
+ * @type Reset type
+ *
+ * This function resets DLL logic for the SD device.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SET_SD_TAPDELAY,
+ type, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
+
+/**
+ * zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
+ * @index GGS register index
+ * @value Register value to be written
+ *
+ * This function writes value to GGS register.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_ggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_GGS,
+ index, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
+
+/**
+ * zynqmp_pm_write_ggs() - PM API for reading global general storage (ggs)
+ * @index GGS register index
+ * @value Register value to be written
+ *
+ * This function returns GGS register value.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_ggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_GGS,
+ index, 0, value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
+
+/**
+ * zynqmp_pm_write_pggs() - PM API for writing persistent global general
+ * storage (pggs)
+ * @index PGGS register index
+ * @value Register value to be written
+ *
+ * This function writes value to PGGS register.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_write_pggs(u32 index, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_PGGS, index, value,
+ NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
+
+/**
+ * zynqmp_pm_write_pggs() - PM API for reading persistent global general
+ * storage (pggs)
+ * @index PGGS register index
+ * @value Register value to be written
+ *
+ * This function returns PGGS register value.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_read_pggs(u32 index, u32 *value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_PGGS, index, 0,
+ value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
- return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, ioctl_id,
- arg1, arg2, out);
+/**
+ * zynqmp_pm_set_boot_health_status() - PM API for setting healthy boot status
+ * @value Status value to be written
+ *
+ * This function sets healthy bit value to indicate boot health status
+ * to firmware.
+ *
+ * @return Returns status, either success or error+reason
+ */
+int zynqmp_pm_set_boot_health_status(u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_BOOT_HEALTH_STATUS,
+ value, 0, NULL);
}
/**
@@ -554,12 +706,13 @@ static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
- const enum zynqmp_pm_reset_action assert_flag)
+int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
{
return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert);
/**
* zynqmp_pm_reset_get_status - Get status of the reset
@@ -568,8 +721,7 @@ static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
- u32 *status)
+int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -583,6 +735,7 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_reset_get_status);
/**
* zynqmp_pm_fpga_load - Perform the fpga load
@@ -597,12 +750,12 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
- const u32 flags)
+int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags)
{
return zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
upper_32_bits(address), size, flags, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_load);
/**
* zynqmp_pm_fpga_get_status - Read value from PCAP status register
@@ -613,7 +766,7 @@ static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_fpga_get_status(u32 *value)
+int zynqmp_pm_fpga_get_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -626,6 +779,7 @@ static int zynqmp_pm_fpga_get_status(u32 *value)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
/**
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
@@ -636,10 +790,11 @@ static int zynqmp_pm_fpga_get_status(u32 *value)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_init_finalize(void)
+int zynqmp_pm_init_finalize(void)
{
return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize);
/**
* zynqmp_pm_set_suspend_mode() - Set system suspend mode
@@ -649,10 +804,11 @@ static int zynqmp_pm_init_finalize(void)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_set_suspend_mode(u32 mode)
+int zynqmp_pm_set_suspend_mode(u32 mode)
{
return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode);
/**
* zynqmp_pm_request_node() - Request a node with specific capabilities
@@ -666,13 +822,13 @@ static int zynqmp_pm_set_suspend_mode(u32 mode)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack)
+int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos, const enum zynqmp_pm_request_ack ack)
{
return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
qos, ack, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_request_node);
/**
* zynqmp_pm_release_node() - Release a node
@@ -684,10 +840,11 @@ static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_release_node(const u32 node)
+int zynqmp_pm_release_node(const u32 node)
{
return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_release_node);
/**
* zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
@@ -701,13 +858,14 @@ static int zynqmp_pm_release_node(const u32 node)
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack)
+int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
{
return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
qos, ack, NULL);
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement);
/**
* zynqmp_pm_aes - Access AES hardware to encrypt/decrypt the data using
@@ -717,7 +875,7 @@ static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
*
* Return: Returns status, either success or error code.
*/
-static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+int zynqmp_pm_aes_engine(const u64 address, u32 *out)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
int ret;
@@ -732,47 +890,304 @@ static int zynqmp_pm_aes_engine(const u64 address, u32 *out)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
-static const struct zynqmp_eemi_ops eemi_ops = {
- .get_api_version = zynqmp_pm_get_api_version,
- .get_chipid = zynqmp_pm_get_chipid,
- .query_data = zynqmp_pm_query_data,
- .clock_enable = zynqmp_pm_clock_enable,
- .clock_disable = zynqmp_pm_clock_disable,
- .clock_getstate = zynqmp_pm_clock_getstate,
- .clock_setdivider = zynqmp_pm_clock_setdivider,
- .clock_getdivider = zynqmp_pm_clock_getdivider,
- .clock_setrate = zynqmp_pm_clock_setrate,
- .clock_getrate = zynqmp_pm_clock_getrate,
- .clock_setparent = zynqmp_pm_clock_setparent,
- .clock_getparent = zynqmp_pm_clock_getparent,
- .ioctl = zynqmp_pm_ioctl,
- .reset_assert = zynqmp_pm_reset_assert,
- .reset_get_status = zynqmp_pm_reset_get_status,
- .init_finalize = zynqmp_pm_init_finalize,
- .set_suspend_mode = zynqmp_pm_set_suspend_mode,
- .request_node = zynqmp_pm_request_node,
- .release_node = zynqmp_pm_release_node,
- .set_requirement = zynqmp_pm_set_requirement,
- .fpga_load = zynqmp_pm_fpga_load,
- .fpga_get_status = zynqmp_pm_fpga_get_status,
- .aes = zynqmp_pm_aes_engine,
+/**
+ * zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
+ * @type: Shutdown or restart? 0 for shutdown, 1 for restart
+ * @subtype: Specifies which system should be restarted or shut down
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype,
+ 0, 0, NULL);
+}
+
+/**
+ * struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
+ * @subtype: Shutdown subtype
+ * @name: Matching string for scope argument
+ *
+ * This struct encapsulates mapping between shutdown scope ID and string.
+ */
+struct zynqmp_pm_shutdown_scope {
+ const enum zynqmp_pm_shutdown_subtype subtype;
+ const char *name;
+};
+
+static struct zynqmp_pm_shutdown_scope shutdown_scopes[] = {
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ .name = "subsystem",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ .name = "ps_only",
+ },
+ [ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM] = {
+ .subtype = ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+ .name = "system",
+ },
};
+static struct zynqmp_pm_shutdown_scope *selected_scope =
+ &shutdown_scopes[ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM];
+
/**
- * zynqmp_pm_get_eemi_ops - Get eemi ops functions
+ * zynqmp_pm_is_shutdown_scope_valid - Check if shutdown scope string is valid
+ * @scope_string: Shutdown scope string
*
- * Return: Pointer of eemi_ops structure
+ * Return: Return pointer to matching shutdown scope struct from
+ * array of available options in system if string is valid,
+ * otherwise returns NULL.
*/
-const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
+static struct zynqmp_pm_shutdown_scope*
+ zynqmp_pm_is_shutdown_scope_valid(const char *scope_string)
+{
+ int count;
+
+ for (count = 0; count < ARRAY_SIZE(shutdown_scopes); count++)
+ if (sysfs_streq(scope_string, shutdown_scopes[count].name))
+ return &shutdown_scopes[count];
+
+ return NULL;
+}
+
+static ssize_t shutdown_scope_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(shutdown_scopes); i++) {
+ if (&shutdown_scopes[i] == selected_scope) {
+ strcat(buf, "[");
+ strcat(buf, shutdown_scopes[i].name);
+ strcat(buf, "]");
+ } else {
+ strcat(buf, shutdown_scopes[i].name);
+ }
+ strcat(buf, " ");
+ }
+ strcat(buf, "\n");
+
+ return strlen(buf);
+}
+
+static ssize_t shutdown_scope_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ struct zynqmp_pm_shutdown_scope *scope;
+
+ scope = zynqmp_pm_is_shutdown_scope_valid(buf);
+ if (!scope)
+ return -EINVAL;
+
+ ret = zynqmp_pm_system_shutdown(ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ scope->subtype);
+ if (ret) {
+ pr_err("unable to set shutdown scope %s\n", buf);
+ return ret;
+ }
+
+ selected_scope = scope;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(shutdown_scope);
+
+static ssize_t health_status_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int value;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ ret = zynqmp_pm_set_boot_health_status(value);
+ if (ret) {
+ dev_err(device, "unable to set healthy bit value to %u\n",
+ value);
+ return ret;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_WO(health_status);
+
+static ssize_t ggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_ggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t ggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
+{
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+
+ ret = zynqmp_pm_write_ggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+err:
+ return count;
+}
+
+/* GGS register show functions */
+#define GGS0_SHOW(N) \
+ ssize_t ggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return ggs_show(device, attr, buf, N); \
+ }
+
+static GGS0_SHOW(0);
+static GGS0_SHOW(1);
+static GGS0_SHOW(2);
+static GGS0_SHOW(3);
+
+/* GGS register store function */
+#define GGS0_STORE(N) \
+ ssize_t ggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return ggs_store(device, attr, buf, count, N); \
+ }
+
+static GGS0_STORE(0);
+static GGS0_STORE(1);
+static GGS0_STORE(2);
+static GGS0_STORE(3);
+
+static ssize_t pggs_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf,
+ u32 reg)
+{
+ int ret;
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+
+ ret = zynqmp_pm_read_pggs(reg, ret_payload);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", ret_payload[1]);
+}
+
+static ssize_t pggs_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ u32 reg)
{
- if (eemi_ops_tbl)
- return eemi_ops_tbl;
- else
- return ERR_PTR(-EPROBE_DEFER);
+ long value;
+ int ret;
+
+ if (reg >= GSS_NUM_REGS)
+ return -EINVAL;
+ ret = kstrtol(buf, 16, &value);
+ if (ret) {
+ count = -EFAULT;
+ goto err;
+ }
+
+ ret = zynqmp_pm_write_pggs(reg, value);
+ if (ret)
+ count = -EFAULT;
+
+err:
+ return count;
}
-EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops);
+
+#define PGGS0_SHOW(N) \
+ ssize_t pggs##N##_show(struct device *device, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return pggs_show(device, attr, buf, N); \
+ }
+
+#define PGGS0_STORE(N) \
+ ssize_t pggs##N##_store(struct device *device, \
+ struct device_attribute *attr, \
+ const char *buf, \
+ size_t count) \
+ { \
+ return pggs_store(device, attr, buf, count, N); \
+ }
+
+/* PGGS register show functions */
+static PGGS0_SHOW(0);
+static PGGS0_SHOW(1);
+static PGGS0_SHOW(2);
+static PGGS0_SHOW(3);
+
+/* PGGS register store functions */
+static PGGS0_STORE(0);
+static PGGS0_STORE(1);
+static PGGS0_STORE(2);
+static PGGS0_STORE(3);
+
+/* GGS register attributes */
+static DEVICE_ATTR_RW(ggs0);
+static DEVICE_ATTR_RW(ggs1);
+static DEVICE_ATTR_RW(ggs2);
+static DEVICE_ATTR_RW(ggs3);
+
+/* PGGS register attributes */
+static DEVICE_ATTR_RW(pggs0);
+static DEVICE_ATTR_RW(pggs1);
+static DEVICE_ATTR_RW(pggs2);
+static DEVICE_ATTR_RW(pggs3);
+
+static struct attribute *zynqmp_firmware_attrs[] = {
+ &dev_attr_ggs0.attr,
+ &dev_attr_ggs1.attr,
+ &dev_attr_ggs2.attr,
+ &dev_attr_ggs3.attr,
+ &dev_attr_pggs0.attr,
+ &dev_attr_pggs1.attr,
+ &dev_attr_pggs2.attr,
+ &dev_attr_pggs3.attr,
+ &dev_attr_shutdown_scope.attr,
+ &dev_attr_health_status.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(zynqmp_firmware);
static int zynqmp_firmware_probe(struct platform_device *pdev)
{
@@ -820,11 +1235,6 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
pr_info("%s Trustzone version v%d.%d\n", __func__,
pm_tz_version >> 16, pm_tz_version & 0xFFFF);
- /* Assign eemi_ops_table */
- eemi_ops_tbl = &eemi_ops;
-
- zynqmp_pm_api_debugfs_init();
-
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
ARRAY_SIZE(firmware_devs), NULL, 0, NULL);
if (ret) {
@@ -832,6 +1242,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
return ret;
}
+ zynqmp_pm_api_debugfs_init();
+
return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
@@ -854,6 +1266,7 @@ static struct platform_driver zynqmp_firmware_driver = {
.driver = {
.name = "zynqmp_firmware",
.of_match_table = zynqmp_firmware_of_match,
+ .dev_groups = zynqmp_firmware_groups,
},
.probe = zynqmp_firmware_probe,
.remove = zynqmp_firmware_remove,
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 72380e1d31c7..b2408a710662 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -156,7 +156,7 @@ config FPGA_DFL
config FPGA_DFL_FME
tristate "FPGA DFL FME Driver"
- depends on FPGA_DFL && HWMON
+ depends on FPGA_DFL && HWMON && PERF_EVENTS
help
The FPGA Management Engine (FME) is a feature device implemented
under Device Feature List (DFL) framework. Select this option to
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 4865b74b00a4..d8e21dfc6778 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o
+dfl-fme-objs += dfl-fme-perf.o
dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
dfl-afu-objs += dfl-afu-error.o
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index d902acb36d14..02d8cbad1ae2 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -61,10 +61,10 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
region->pages);
if (pinned < 0) {
ret = pinned;
- goto put_pages;
+ goto free_pages;
} else if (pinned != npages) {
ret = -EFAULT;
- goto free_pages;
+ goto put_pages;
}
dev_dbg(dev, "%d pages pinned\n", pinned);
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
index 65437b6a6842..b0c31789a909 100644
--- a/drivers/fpga/dfl-afu-main.c
+++ b/drivers/fpga/dfl-afu-main.c
@@ -561,14 +561,16 @@ static int afu_open(struct inode *inode, struct file *filp)
if (WARN_ON(!pdata))
return -ENODEV;
- ret = dfl_feature_dev_use_begin(pdata);
- if (ret)
- return ret;
-
- dev_dbg(&fdev->dev, "Device File Open\n");
- filp->private_data = fdev;
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ if (!ret) {
+ dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
+ dfl_feature_dev_use_count(pdata));
+ filp->private_data = fdev;
+ }
+ mutex_unlock(&pdata->lock);
- return 0;
+ return ret;
}
static int afu_release(struct inode *inode, struct file *filp)
@@ -581,12 +583,14 @@ static int afu_release(struct inode *inode, struct file *filp)
pdata = dev_get_platdata(&pdev->dev);
mutex_lock(&pdata->lock);
- __port_reset(pdev);
- afu_dma_region_destroy(pdata);
- mutex_unlock(&pdata->lock);
-
dfl_feature_dev_use_end(pdata);
+ if (!dfl_feature_dev_use_count(pdata)) {
+ __port_reset(pdev);
+ afu_dma_region_destroy(pdata);
+ }
+ mutex_unlock(&pdata->lock);
+
return 0;
}
@@ -746,6 +750,12 @@ static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
+static const struct vm_operations_struct afu_vma_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct platform_device *pdev = filp->private_data;
@@ -775,6 +785,9 @@ static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
!(region.flags & DFL_PORT_REGION_WRITE))
return -EPERM;
+ /* Support debug access to the mapping */
+ vma->vm_ops = &afu_vma_ops;
+
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start,
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index 1d4690c99268..fc210d4e1863 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -580,6 +580,10 @@ static struct dfl_feature_driver fme_feature_drvs[] = {
.ops = &fme_power_mgmt_ops,
},
{
+ .id_table = fme_perf_id_table,
+ .ops = &fme_perf_ops,
+ },
+ {
.ops = NULL,
},
};
@@ -600,14 +604,16 @@ static int fme_open(struct inode *inode, struct file *filp)
if (WARN_ON(!pdata))
return -ENODEV;
- ret = dfl_feature_dev_use_begin(pdata);
- if (ret)
- return ret;
-
- dev_dbg(&fdev->dev, "Device File Open\n");
- filp->private_data = pdata;
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
+ if (!ret) {
+ dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
+ dfl_feature_dev_use_count(pdata));
+ filp->private_data = pdata;
+ }
+ mutex_unlock(&pdata->lock);
- return 0;
+ return ret;
}
static int fme_release(struct inode *inode, struct file *filp)
@@ -616,7 +622,10 @@ static int fme_release(struct inode *inode, struct file *filp)
struct platform_device *pdev = pdata->dev;
dev_dbg(&pdev->dev, "Device File Release\n");
+
+ mutex_lock(&pdata->lock);
dfl_feature_dev_use_end(pdata);
+ mutex_unlock(&pdata->lock);
return 0;
}
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
new file mode 100644
index 000000000000..6ce1ed222ea4
--- /dev/null
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -0,0 +1,1020 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA Management Engine (FME) Global Performance Reporting
+ *
+ * Copyright 2019 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xu Yilun <yilun.xu@intel.com>
+ * Joseph Grecco <joe.grecco@intel.com>
+ * Enno Luebbers <enno.luebbers@intel.com>
+ * Tim Whisonant <tim.whisonant@intel.com>
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Mitchel, Henry <henry.mitchel@intel.com>
+ */
+
+#include <linux/perf_event.h>
+#include "dfl.h"
+#include "dfl-fme.h"
+
+/*
+ * Performance Counter Registers for Cache.
+ *
+ * Cache Events are listed below as CACHE_EVNT_*.
+ */
+#define CACHE_CTRL 0x8
+#define CACHE_RESET_CNTR BIT_ULL(0)
+#define CACHE_FREEZE_CNTR BIT_ULL(8)
+#define CACHE_CTRL_EVNT GENMASK_ULL(19, 16)
+#define CACHE_EVNT_RD_HIT 0x0
+#define CACHE_EVNT_WR_HIT 0x1
+#define CACHE_EVNT_RD_MISS 0x2
+#define CACHE_EVNT_WR_MISS 0x3
+#define CACHE_EVNT_RSVD 0x4
+#define CACHE_EVNT_HOLD_REQ 0x5
+#define CACHE_EVNT_DATA_WR_PORT_CONTEN 0x6
+#define CACHE_EVNT_TAG_WR_PORT_CONTEN 0x7
+#define CACHE_EVNT_TX_REQ_STALL 0x8
+#define CACHE_EVNT_RX_REQ_STALL 0x9
+#define CACHE_EVNT_EVICTIONS 0xa
+#define CACHE_EVNT_MAX CACHE_EVNT_EVICTIONS
+#define CACHE_CHANNEL_SEL BIT_ULL(20)
+#define CACHE_CHANNEL_RD 0
+#define CACHE_CHANNEL_WR 1
+#define CACHE_CNTR0 0x10
+#define CACHE_CNTR1 0x18
+#define CACHE_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define CACHE_CNTR_EVNT GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Fabric.
+ *
+ * Fabric Events are listed below as FAB_EVNT_*
+ */
+#define FAB_CTRL 0x20
+#define FAB_RESET_CNTR BIT_ULL(0)
+#define FAB_FREEZE_CNTR BIT_ULL(8)
+#define FAB_CTRL_EVNT GENMASK_ULL(19, 16)
+#define FAB_EVNT_PCIE0_RD 0x0
+#define FAB_EVNT_PCIE0_WR 0x1
+#define FAB_EVNT_PCIE1_RD 0x2
+#define FAB_EVNT_PCIE1_WR 0x3
+#define FAB_EVNT_UPI_RD 0x4
+#define FAB_EVNT_UPI_WR 0x5
+#define FAB_EVNT_MMIO_RD 0x6
+#define FAB_EVNT_MMIO_WR 0x7
+#define FAB_EVNT_MAX FAB_EVNT_MMIO_WR
+#define FAB_PORT_ID GENMASK_ULL(21, 20)
+#define FAB_PORT_FILTER BIT_ULL(23)
+#define FAB_PORT_FILTER_DISABLE 0
+#define FAB_PORT_FILTER_ENABLE 1
+#define FAB_CNTR 0x28
+#define FAB_CNTR_EVNT_CNTR GENMASK_ULL(59, 0)
+#define FAB_CNTR_EVNT GENMASK_ULL(63, 60)
+
+/*
+ * Performance Counter Registers for Clock.
+ *
+ * Clock Counter can't be reset or frozen by SW.
+ */
+#define CLK_CNTR 0x30
+#define BASIC_EVNT_CLK 0x0
+#define BASIC_EVNT_MAX BASIC_EVNT_CLK
+
+/*
+ * Performance Counter Registers for IOMMU / VT-D.
+ *
+ * VT-D Events are listed below as VTD_EVNT_* and VTD_SIP_EVNT_*
+ */
+#define VTD_CTRL 0x38
+#define VTD_RESET_CNTR BIT_ULL(0)
+#define VTD_FREEZE_CNTR BIT_ULL(8)
+#define VTD_CTRL_EVNT GENMASK_ULL(19, 16)
+#define VTD_EVNT_AFU_MEM_RD_TRANS 0x0
+#define VTD_EVNT_AFU_MEM_WR_TRANS 0x1
+#define VTD_EVNT_AFU_DEVTLB_RD_HIT 0x2
+#define VTD_EVNT_AFU_DEVTLB_WR_HIT 0x3
+#define VTD_EVNT_DEVTLB_4K_FILL 0x4
+#define VTD_EVNT_DEVTLB_2M_FILL 0x5
+#define VTD_EVNT_DEVTLB_1G_FILL 0x6
+#define VTD_EVNT_MAX VTD_EVNT_DEVTLB_1G_FILL
+#define VTD_CNTR 0x40
+#define VTD_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define VTD_CNTR_EVNT GENMASK_ULL(63, 60)
+
+#define VTD_SIP_CTRL 0x48
+#define VTD_SIP_RESET_CNTR BIT_ULL(0)
+#define VTD_SIP_FREEZE_CNTR BIT_ULL(8)
+#define VTD_SIP_CTRL_EVNT GENMASK_ULL(19, 16)
+#define VTD_SIP_EVNT_IOTLB_4K_HIT 0x0
+#define VTD_SIP_EVNT_IOTLB_2M_HIT 0x1
+#define VTD_SIP_EVNT_IOTLB_1G_HIT 0x2
+#define VTD_SIP_EVNT_SLPWC_L3_HIT 0x3
+#define VTD_SIP_EVNT_SLPWC_L4_HIT 0x4
+#define VTD_SIP_EVNT_RCC_HIT 0x5
+#define VTD_SIP_EVNT_IOTLB_4K_MISS 0x6
+#define VTD_SIP_EVNT_IOTLB_2M_MISS 0x7
+#define VTD_SIP_EVNT_IOTLB_1G_MISS 0x8
+#define VTD_SIP_EVNT_SLPWC_L3_MISS 0x9
+#define VTD_SIP_EVNT_SLPWC_L4_MISS 0xa
+#define VTD_SIP_EVNT_RCC_MISS 0xb
+#define VTD_SIP_EVNT_MAX VTD_SIP_EVNT_SLPWC_L4_MISS
+#define VTD_SIP_CNTR 0X50
+#define VTD_SIP_CNTR_EVNT_CNTR GENMASK_ULL(47, 0)
+#define VTD_SIP_CNTR_EVNT GENMASK_ULL(63, 60)
+
+#define PERF_TIMEOUT 30
+
+#define PERF_MAX_PORT_NUM 1U
+
+/**
+ * struct fme_perf_priv - priv data structure for fme perf driver
+ *
+ * @dev: parent device.
+ * @ioaddr: mapped base address of mmio region.
+ * @pmu: pmu data structure for fme perf counters.
+ * @id: id of this fme performance report private feature.
+ * @fab_users: current user number on fabric counters.
+ * @fab_port_id: used to indicate current working mode of fabric counters.
+ * @fab_lock: lock to protect fabric counters working mode.
+ * @cpu: active CPU to which the PMU is bound for accesses.
+ * @cpuhp_node: node for CPU hotplug notifier link.
+ * @cpuhp_state: state for CPU hotplug notification;
+ */
+struct fme_perf_priv {
+ struct device *dev;
+ void __iomem *ioaddr;
+ struct pmu pmu;
+ u64 id;
+
+ u32 fab_users;
+ u32 fab_port_id;
+ spinlock_t fab_lock;
+
+ unsigned int cpu;
+ struct hlist_node node;
+ enum cpuhp_state cpuhp_state;
+};
+
+/**
+ * struct fme_perf_event_ops - callbacks for fme perf events
+ *
+ * @event_init: callback invoked during event init.
+ * @event_destroy: callback invoked during event destroy.
+ * @read_counter: callback to read hardware counters.
+ */
+struct fme_perf_event_ops {
+ int (*event_init)(struct fme_perf_priv *priv, u32 event, u32 portid);
+ void (*event_destroy)(struct fme_perf_priv *priv, u32 event,
+ u32 portid);
+ u64 (*read_counter)(struct fme_perf_priv *priv, u32 event, u32 portid);
+};
+
+#define to_fme_perf_priv(_pmu) container_of(_pmu, struct fme_perf_priv, pmu)
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct fme_perf_priv *priv;
+
+ priv = to_fme_perf_priv(pmu);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(priv->cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *fme_perf_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group fme_perf_cpumask_group = {
+ .attrs = fme_perf_cpumask_attrs,
+};
+
+#define FME_EVENT_MASK GENMASK_ULL(11, 0)
+#define FME_EVENT_SHIFT 0
+#define FME_EVTYPE_MASK GENMASK_ULL(15, 12)
+#define FME_EVTYPE_SHIFT 12
+#define FME_EVTYPE_BASIC 0
+#define FME_EVTYPE_CACHE 1
+#define FME_EVTYPE_FABRIC 2
+#define FME_EVTYPE_VTD 3
+#define FME_EVTYPE_VTD_SIP 4
+#define FME_EVTYPE_MAX FME_EVTYPE_VTD_SIP
+#define FME_PORTID_MASK GENMASK_ULL(23, 16)
+#define FME_PORTID_SHIFT 16
+#define FME_PORTID_ROOT (0xffU)
+
+#define get_event(_config) FIELD_GET(FME_EVENT_MASK, _config)
+#define get_evtype(_config) FIELD_GET(FME_EVTYPE_MASK, _config)
+#define get_portid(_config) FIELD_GET(FME_PORTID_MASK, _config)
+
+PMU_FORMAT_ATTR(event, "config:0-11");
+PMU_FORMAT_ATTR(evtype, "config:12-15");
+PMU_FORMAT_ATTR(portid, "config:16-23");
+
+static struct attribute *fme_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ &format_attr_evtype.attr,
+ &format_attr_portid.attr,
+ NULL,
+};
+
+static struct attribute_group fme_perf_format_group = {
+ .name = "format",
+ .attrs = fme_perf_format_attrs,
+};
+
+/*
+ * There are no default events, but we need to create
+ * "events" group (with empty attrs) before updating
+ * it with detected events (using pmu->attr_update).
+ */
+static struct attribute *fme_perf_events_attrs_empty[] = {
+ NULL,
+};
+
+static struct attribute_group fme_perf_events_group = {
+ .name = "events",
+ .attrs = fme_perf_events_attrs_empty,
+};
+
+static const struct attribute_group *fme_perf_groups[] = {
+ &fme_perf_format_group,
+ &fme_perf_cpumask_group,
+ &fme_perf_events_group,
+ NULL,
+};
+
+static bool is_portid_root(u32 portid)
+{
+ return portid == FME_PORTID_ROOT;
+}
+
+static bool is_portid_port(u32 portid)
+{
+ return portid < PERF_MAX_PORT_NUM;
+}
+
+static bool is_portid_root_or_port(u32 portid)
+{
+ return is_portid_root(portid) || is_portid_port(portid);
+}
+
+static u64 fme_read_perf_cntr_reg(void __iomem *addr)
+{
+ u32 low;
+ u64 v;
+
+ /*
+ * For 64bit counter registers, the counter may increases and carries
+ * out of bit [31] between 2 32bit reads. So add extra reads to help
+ * to prevent this issue. This only happens in platforms which don't
+ * support 64bit read - readq is split into 2 readl.
+ */
+ do {
+ v = readq(addr);
+ low = readl(addr);
+ } while (((u32)v) > low);
+
+ return v;
+}
+
+static int basic_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (event <= BASIC_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 basic_read_event_counter(struct fme_perf_priv *priv,
+ u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+
+ return fme_read_perf_cntr_reg(base + CLK_CNTR);
+}
+
+static int cache_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= CACHE_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 cache_read_event_counter(struct fme_perf_priv *priv,
+ u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v, count;
+ u8 channel;
+
+ if (event == CACHE_EVNT_WR_HIT || event == CACHE_EVNT_WR_MISS ||
+ event == CACHE_EVNT_DATA_WR_PORT_CONTEN ||
+ event == CACHE_EVNT_TAG_WR_PORT_CONTEN)
+ channel = CACHE_CHANNEL_WR;
+ else
+ channel = CACHE_CHANNEL_RD;
+
+ /* set channel access type and cache event code. */
+ v = readq(base + CACHE_CTRL);
+ v &= ~(CACHE_CHANNEL_SEL | CACHE_CTRL_EVNT);
+ v |= FIELD_PREP(CACHE_CHANNEL_SEL, channel);
+ v |= FIELD_PREP(CACHE_CTRL_EVNT, event);
+ writeq(v, base + CACHE_CTRL);
+
+ if (readq_poll_timeout_atomic(base + CACHE_CNTR0, v,
+ FIELD_GET(CACHE_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched cache event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + CACHE_CNTR0);
+ count = FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+ v = fme_read_perf_cntr_reg(base + CACHE_CNTR1);
+ count += FIELD_GET(CACHE_CNTR_EVNT_CNTR, v);
+
+ return count;
+}
+
+static bool is_fabric_event_supported(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ if (event > FAB_EVNT_MAX || !is_portid_root_or_port(portid))
+ return false;
+
+ if (priv->id == FME_FEATURE_ID_GLOBAL_DPERF &&
+ (event == FAB_EVNT_PCIE1_RD || event == FAB_EVNT_UPI_RD ||
+ event == FAB_EVNT_PCIE1_WR || event == FAB_EVNT_UPI_WR))
+ return false;
+
+ return true;
+}
+
+static int fabric_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ int ret = 0;
+ u64 v;
+
+ if (!is_fabric_event_supported(priv, event, portid))
+ return -EINVAL;
+
+ /*
+ * as fabric counter set only can be in either overall or port mode.
+ * In overall mode, it counts overall data for FPGA, and in port mode,
+ * it is configured to monitor on one individual port.
+ *
+ * so every time, a new event is initialized, driver checks
+ * current working mode and if someone is using this counter set.
+ */
+ spin_lock(&priv->fab_lock);
+ if (priv->fab_users && priv->fab_port_id != portid) {
+ dev_dbg(priv->dev, "conflict fabric event monitoring mode.\n");
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ priv->fab_users++;
+
+ /*
+ * skip if current working mode matches, otherwise change the working
+ * mode per input port_id, to monitor overall data or another port.
+ */
+ if (priv->fab_port_id == portid)
+ goto exit;
+
+ priv->fab_port_id = portid;
+
+ v = readq(base + FAB_CTRL);
+ v &= ~(FAB_PORT_FILTER | FAB_PORT_ID);
+
+ if (is_portid_root(portid)) {
+ v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_DISABLE);
+ } else {
+ v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_ENABLE);
+ v |= FIELD_PREP(FAB_PORT_ID, portid);
+ }
+ writeq(v, base + FAB_CTRL);
+
+exit:
+ spin_unlock(&priv->fab_lock);
+ return ret;
+}
+
+static void fabric_event_destroy(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ spin_lock(&priv->fab_lock);
+ priv->fab_users--;
+ spin_unlock(&priv->fab_lock);
+}
+
+static u64 fabric_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ v = readq(base + FAB_CTRL);
+ v &= ~FAB_CTRL_EVNT;
+ v |= FIELD_PREP(FAB_CTRL_EVNT, event);
+ writeq(v, base + FAB_CTRL);
+
+ if (readq_poll_timeout_atomic(base + FAB_CNTR, v,
+ FIELD_GET(FAB_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched fab event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + FAB_CNTR);
+ return FIELD_GET(FAB_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= VTD_EVNT_MAX && is_portid_port(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 vtd_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ event += (portid * (VTD_EVNT_MAX + 1));
+
+ v = readq(base + VTD_CTRL);
+ v &= ~VTD_CTRL_EVNT;
+ v |= FIELD_PREP(VTD_CTRL_EVNT, event);
+ writeq(v, base + VTD_CTRL);
+
+ if (readq_poll_timeout_atomic(base + VTD_CNTR, v,
+ FIELD_GET(VTD_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched vtd event code in counter register.\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + VTD_CNTR);
+ return FIELD_GET(VTD_CNTR_EVNT_CNTR, v);
+}
+
+static int vtd_sip_event_init(struct fme_perf_priv *priv, u32 event, u32 portid)
+{
+ if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF &&
+ event <= VTD_SIP_EVNT_MAX && is_portid_root(portid))
+ return 0;
+
+ return -EINVAL;
+}
+
+static u64 vtd_sip_read_event_counter(struct fme_perf_priv *priv, u32 event,
+ u32 portid)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ v = readq(base + VTD_SIP_CTRL);
+ v &= ~VTD_SIP_CTRL_EVNT;
+ v |= FIELD_PREP(VTD_SIP_CTRL_EVNT, event);
+ writeq(v, base + VTD_SIP_CTRL);
+
+ if (readq_poll_timeout_atomic(base + VTD_SIP_CNTR, v,
+ FIELD_GET(VTD_SIP_CNTR_EVNT, v) == event,
+ 1, PERF_TIMEOUT)) {
+ dev_err(priv->dev, "timeout, unmatched vtd sip event code in counter register\n");
+ return 0;
+ }
+
+ v = fme_read_perf_cntr_reg(base + VTD_SIP_CNTR);
+ return FIELD_GET(VTD_SIP_CNTR_EVNT_CNTR, v);
+}
+
+static struct fme_perf_event_ops fme_perf_event_ops[] = {
+ [FME_EVTYPE_BASIC] = {.event_init = basic_event_init,
+ .read_counter = basic_read_event_counter,},
+ [FME_EVTYPE_CACHE] = {.event_init = cache_event_init,
+ .read_counter = cache_read_event_counter,},
+ [FME_EVTYPE_FABRIC] = {.event_init = fabric_event_init,
+ .event_destroy = fabric_event_destroy,
+ .read_counter = fabric_read_event_counter,},
+ [FME_EVTYPE_VTD] = {.event_init = vtd_event_init,
+ .read_counter = vtd_read_event_counter,},
+ [FME_EVTYPE_VTD_SIP] = {.event_init = vtd_sip_event_init,
+ .read_counter = vtd_sip_read_event_counter,},
+};
+
+static ssize_t fme_perf_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+ unsigned long config;
+ char *ptr = buf;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ config = (unsigned long)eattr->var;
+
+ ptr += sprintf(ptr, "event=0x%02x", (unsigned int)get_event(config));
+ ptr += sprintf(ptr, ",evtype=0x%02x", (unsigned int)get_evtype(config));
+
+ if (is_portid_root(get_portid(config)))
+ ptr += sprintf(ptr, ",portid=0x%02x\n", FME_PORTID_ROOT);
+ else
+ ptr += sprintf(ptr, ",portid=?\n");
+
+ return (ssize_t)(ptr - buf);
+}
+
+#define FME_EVENT_ATTR(_name) \
+ __ATTR(_name, 0444, fme_perf_event_show, NULL)
+
+#define FME_PORT_EVENT_CONFIG(_event, _type) \
+ (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
+ (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK))
+
+#define FME_EVENT_CONFIG(_event, _type) \
+ (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \
+ (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK) | \
+ (FME_PORTID_ROOT << FME_PORTID_SHIFT))
+
+/* FME Perf Basic Events */
+#define FME_EVENT_BASIC(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_##_name = { \
+ .attr = FME_EVENT_ATTR(_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_BASIC), \
+}
+
+FME_EVENT_BASIC(clock, BASIC_EVNT_CLK);
+
+static struct attribute *fme_perf_basic_events_attrs[] = {
+ &fme_perf_event_clock.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_basic_events_group = {
+ .name = "events",
+ .attrs = fme_perf_basic_events_attrs,
+};
+
+/* FME Perf Cache Events */
+#define FME_EVENT_CACHE(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_cache_##_name = { \
+ .attr = FME_EVENT_ATTR(cache_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_CACHE), \
+}
+
+FME_EVENT_CACHE(read_hit, CACHE_EVNT_RD_HIT);
+FME_EVENT_CACHE(read_miss, CACHE_EVNT_RD_MISS);
+FME_EVENT_CACHE(write_hit, CACHE_EVNT_WR_HIT);
+FME_EVENT_CACHE(write_miss, CACHE_EVNT_WR_MISS);
+FME_EVENT_CACHE(hold_request, CACHE_EVNT_HOLD_REQ);
+FME_EVENT_CACHE(tx_req_stall, CACHE_EVNT_TX_REQ_STALL);
+FME_EVENT_CACHE(rx_req_stall, CACHE_EVNT_RX_REQ_STALL);
+FME_EVENT_CACHE(eviction, CACHE_EVNT_EVICTIONS);
+FME_EVENT_CACHE(data_write_port_contention, CACHE_EVNT_DATA_WR_PORT_CONTEN);
+FME_EVENT_CACHE(tag_write_port_contention, CACHE_EVNT_TAG_WR_PORT_CONTEN);
+
+static struct attribute *fme_perf_cache_events_attrs[] = {
+ &fme_perf_event_cache_read_hit.attr.attr,
+ &fme_perf_event_cache_read_miss.attr.attr,
+ &fme_perf_event_cache_write_hit.attr.attr,
+ &fme_perf_event_cache_write_miss.attr.attr,
+ &fme_perf_event_cache_hold_request.attr.attr,
+ &fme_perf_event_cache_tx_req_stall.attr.attr,
+ &fme_perf_event_cache_rx_req_stall.attr.attr,
+ &fme_perf_event_cache_eviction.attr.attr,
+ &fme_perf_event_cache_data_write_port_contention.attr.attr,
+ &fme_perf_event_cache_tag_write_port_contention.attr.attr,
+ NULL,
+};
+
+static umode_t fme_perf_events_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+
+ return (priv->id == FME_FEATURE_ID_GLOBAL_IPERF) ? attr->mode : 0;
+}
+
+static const struct attribute_group fme_perf_cache_events_group = {
+ .name = "events",
+ .attrs = fme_perf_cache_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf Fabric Events */
+#define FME_EVENT_FABRIC(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_fab_##_name = { \
+ .attr = FME_EVENT_ATTR(fab_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
+}
+
+#define FME_EVENT_FABRIC_PORT(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_fab_port_##_name = { \
+ .attr = FME_EVENT_ATTR(fab_port_##_name), \
+ .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \
+}
+
+FME_EVENT_FABRIC(pcie0_read, FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC(pcie1_read, FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC(upi_read, FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC(upi_write, FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC(mmio_read, FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC(mmio_write, FAB_EVNT_MMIO_WR);
+
+FME_EVENT_FABRIC_PORT(pcie0_read, FAB_EVNT_PCIE0_RD);
+FME_EVENT_FABRIC_PORT(pcie0_write, FAB_EVNT_PCIE0_WR);
+FME_EVENT_FABRIC_PORT(pcie1_read, FAB_EVNT_PCIE1_RD);
+FME_EVENT_FABRIC_PORT(pcie1_write, FAB_EVNT_PCIE1_WR);
+FME_EVENT_FABRIC_PORT(upi_read, FAB_EVNT_UPI_RD);
+FME_EVENT_FABRIC_PORT(upi_write, FAB_EVNT_UPI_WR);
+FME_EVENT_FABRIC_PORT(mmio_read, FAB_EVNT_MMIO_RD);
+FME_EVENT_FABRIC_PORT(mmio_write, FAB_EVNT_MMIO_WR);
+
+static struct attribute *fme_perf_fabric_events_attrs[] = {
+ &fme_perf_event_fab_pcie0_read.attr.attr,
+ &fme_perf_event_fab_pcie0_write.attr.attr,
+ &fme_perf_event_fab_pcie1_read.attr.attr,
+ &fme_perf_event_fab_pcie1_write.attr.attr,
+ &fme_perf_event_fab_upi_read.attr.attr,
+ &fme_perf_event_fab_upi_write.attr.attr,
+ &fme_perf_event_fab_mmio_read.attr.attr,
+ &fme_perf_event_fab_mmio_write.attr.attr,
+ &fme_perf_event_fab_port_pcie0_read.attr.attr,
+ &fme_perf_event_fab_port_pcie0_write.attr.attr,
+ &fme_perf_event_fab_port_pcie1_read.attr.attr,
+ &fme_perf_event_fab_port_pcie1_write.attr.attr,
+ &fme_perf_event_fab_port_upi_read.attr.attr,
+ &fme_perf_event_fab_port_upi_write.attr.attr,
+ &fme_perf_event_fab_port_mmio_read.attr.attr,
+ &fme_perf_event_fab_port_mmio_write.attr.attr,
+ NULL,
+};
+
+static umode_t fme_perf_fabric_events_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct fme_perf_priv *priv = to_fme_perf_priv(pmu);
+ struct dev_ext_attribute *eattr;
+ unsigned long var;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr.attr);
+ var = (unsigned long)eattr->var;
+
+ if (is_fabric_event_supported(priv, get_event(var), get_portid(var)))
+ return attr->mode;
+
+ return 0;
+}
+
+static const struct attribute_group fme_perf_fabric_events_group = {
+ .name = "events",
+ .attrs = fme_perf_fabric_events_attrs,
+ .is_visible = fme_perf_fabric_events_visible,
+};
+
+/* FME Perf VTD Events */
+#define FME_EVENT_VTD_PORT(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_vtd_port_##_name = { \
+ .attr = FME_EVENT_ATTR(vtd_port_##_name), \
+ .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_VTD), \
+}
+
+FME_EVENT_VTD_PORT(read_transaction, VTD_EVNT_AFU_MEM_RD_TRANS);
+FME_EVENT_VTD_PORT(write_transaction, VTD_EVNT_AFU_MEM_WR_TRANS);
+FME_EVENT_VTD_PORT(devtlb_read_hit, VTD_EVNT_AFU_DEVTLB_RD_HIT);
+FME_EVENT_VTD_PORT(devtlb_write_hit, VTD_EVNT_AFU_DEVTLB_WR_HIT);
+FME_EVENT_VTD_PORT(devtlb_4k_fill, VTD_EVNT_DEVTLB_4K_FILL);
+FME_EVENT_VTD_PORT(devtlb_2m_fill, VTD_EVNT_DEVTLB_2M_FILL);
+FME_EVENT_VTD_PORT(devtlb_1g_fill, VTD_EVNT_DEVTLB_1G_FILL);
+
+static struct attribute *fme_perf_vtd_events_attrs[] = {
+ &fme_perf_event_vtd_port_read_transaction.attr.attr,
+ &fme_perf_event_vtd_port_write_transaction.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_read_hit.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_write_hit.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_4k_fill.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_2m_fill.attr.attr,
+ &fme_perf_event_vtd_port_devtlb_1g_fill.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_events_group = {
+ .name = "events",
+ .attrs = fme_perf_vtd_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+/* FME Perf VTD SIP Events */
+#define FME_EVENT_VTD_SIP(_name, _event) \
+static struct dev_ext_attribute fme_perf_event_vtd_sip_##_name = { \
+ .attr = FME_EVENT_ATTR(vtd_sip_##_name), \
+ .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_VTD_SIP), \
+}
+
+FME_EVENT_VTD_SIP(iotlb_4k_hit, VTD_SIP_EVNT_IOTLB_4K_HIT);
+FME_EVENT_VTD_SIP(iotlb_2m_hit, VTD_SIP_EVNT_IOTLB_2M_HIT);
+FME_EVENT_VTD_SIP(iotlb_1g_hit, VTD_SIP_EVNT_IOTLB_1G_HIT);
+FME_EVENT_VTD_SIP(slpwc_l3_hit, VTD_SIP_EVNT_SLPWC_L3_HIT);
+FME_EVENT_VTD_SIP(slpwc_l4_hit, VTD_SIP_EVNT_SLPWC_L4_HIT);
+FME_EVENT_VTD_SIP(rcc_hit, VTD_SIP_EVNT_RCC_HIT);
+FME_EVENT_VTD_SIP(iotlb_4k_miss, VTD_SIP_EVNT_IOTLB_4K_MISS);
+FME_EVENT_VTD_SIP(iotlb_2m_miss, VTD_SIP_EVNT_IOTLB_2M_MISS);
+FME_EVENT_VTD_SIP(iotlb_1g_miss, VTD_SIP_EVNT_IOTLB_1G_MISS);
+FME_EVENT_VTD_SIP(slpwc_l3_miss, VTD_SIP_EVNT_SLPWC_L3_MISS);
+FME_EVENT_VTD_SIP(slpwc_l4_miss, VTD_SIP_EVNT_SLPWC_L4_MISS);
+FME_EVENT_VTD_SIP(rcc_miss, VTD_SIP_EVNT_RCC_MISS);
+
+static struct attribute *fme_perf_vtd_sip_events_attrs[] = {
+ &fme_perf_event_vtd_sip_iotlb_4k_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_2m_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_1g_hit.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l3_hit.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l4_hit.attr.attr,
+ &fme_perf_event_vtd_sip_rcc_hit.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_4k_miss.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_2m_miss.attr.attr,
+ &fme_perf_event_vtd_sip_iotlb_1g_miss.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l3_miss.attr.attr,
+ &fme_perf_event_vtd_sip_slpwc_l4_miss.attr.attr,
+ &fme_perf_event_vtd_sip_rcc_miss.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group fme_perf_vtd_sip_events_group = {
+ .name = "events",
+ .attrs = fme_perf_vtd_sip_events_attrs,
+ .is_visible = fme_perf_events_visible,
+};
+
+static const struct attribute_group *fme_perf_events_groups[] = {
+ &fme_perf_basic_events_group,
+ &fme_perf_cache_events_group,
+ &fme_perf_fabric_events_group,
+ &fme_perf_vtd_events_group,
+ &fme_perf_vtd_sip_events_group,
+ NULL,
+};
+
+static struct fme_perf_event_ops *get_event_ops(u32 evtype)
+{
+ if (evtype > FME_EVTYPE_MAX)
+ return NULL;
+
+ return &fme_perf_event_ops[evtype];
+}
+
+static void fme_perf_event_destroy(struct perf_event *event)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+
+ if (ops->event_destroy)
+ ops->event_destroy(priv, event->hw.idx, event->hw.config_base);
+}
+
+static int fme_perf_event_init(struct perf_event *event)
+{
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct fme_perf_event_ops *ops;
+ u32 eventid, evtype, portid;
+
+ /* test the event attr type check for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * fme counters are shared across all cores.
+ * Therefore, it does not support per-process mode.
+ * Also, it does not support event sampling mode.
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ if (event->cpu != priv->cpu)
+ return -EINVAL;
+
+ eventid = get_event(event->attr.config);
+ portid = get_portid(event->attr.config);
+ evtype = get_evtype(event->attr.config);
+ if (evtype > FME_EVTYPE_MAX)
+ return -EINVAL;
+
+ hwc->event_base = evtype;
+ hwc->idx = (int)eventid;
+ hwc->config_base = portid;
+
+ event->destroy = fme_perf_event_destroy;
+
+ dev_dbg(priv->dev, "%s event=0x%x, evtype=0x%x, portid=0x%x,\n",
+ __func__, eventid, evtype, portid);
+
+ ops = get_event_ops(evtype);
+ if (ops->event_init)
+ return ops->event_init(priv, eventid, portid);
+
+ return 0;
+}
+
+static void fme_perf_event_update(struct perf_event *event)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 now, prev, delta;
+
+ now = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+ prev = local64_read(&hwc->prev_count);
+ delta = now - prev;
+
+ local64_add(delta, &event->count);
+}
+
+static void fme_perf_event_start(struct perf_event *event, int flags)
+{
+ struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base);
+ struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count;
+
+ count = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base);
+ local64_set(&hwc->prev_count, count);
+}
+
+static void fme_perf_event_stop(struct perf_event *event, int flags)
+{
+ fme_perf_event_update(event);
+}
+
+static int fme_perf_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ fme_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void fme_perf_event_del(struct perf_event *event, int flags)
+{
+ fme_perf_event_stop(event, PERF_EF_UPDATE);
+}
+
+static void fme_perf_event_read(struct perf_event *event)
+{
+ fme_perf_event_update(event);
+}
+
+static void fme_perf_setup_hardware(struct fme_perf_priv *priv)
+{
+ void __iomem *base = priv->ioaddr;
+ u64 v;
+
+ /* read and save current working mode for fabric counters */
+ v = readq(base + FAB_CTRL);
+
+ if (FIELD_GET(FAB_PORT_FILTER, v) == FAB_PORT_FILTER_DISABLE)
+ priv->fab_port_id = FME_PORTID_ROOT;
+ else
+ priv->fab_port_id = FIELD_GET(FAB_PORT_ID, v);
+}
+
+static int fme_perf_pmu_register(struct platform_device *pdev,
+ struct fme_perf_priv *priv)
+{
+ struct pmu *pmu = &priv->pmu;
+ char *name;
+ int ret;
+
+ spin_lock_init(&priv->fab_lock);
+
+ fme_perf_setup_hardware(priv);
+
+ pmu->task_ctx_nr = perf_invalid_context;
+ pmu->attr_groups = fme_perf_groups;
+ pmu->attr_update = fme_perf_events_groups;
+ pmu->event_init = fme_perf_event_init;
+ pmu->add = fme_perf_event_add;
+ pmu->del = fme_perf_event_del;
+ pmu->start = fme_perf_event_start;
+ pmu->stop = fme_perf_event_stop;
+ pmu->read = fme_perf_event_read;
+ pmu->capabilities = PERF_PMU_CAP_NO_INTERRUPT |
+ PERF_PMU_CAP_NO_EXCLUDE;
+
+ name = devm_kasprintf(priv->dev, GFP_KERNEL, "dfl_fme%d", pdev->id);
+
+ ret = perf_pmu_register(pmu, name, -1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void fme_perf_pmu_unregister(struct fme_perf_priv *priv)
+{
+ perf_pmu_unregister(&priv->pmu);
+}
+
+static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct fme_perf_priv *priv;
+ int target;
+
+ priv = hlist_entry_safe(node, struct fme_perf_priv, node);
+
+ if (cpu != priv->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ priv->cpu = target;
+ return 0;
+}
+
+static int fme_perf_init(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct fme_perf_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ priv->ioaddr = feature->ioaddr;
+ priv->id = feature->id;
+ priv->cpu = raw_smp_processor_id();
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/fpga/dfl_fme:online",
+ NULL, fme_perf_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ priv->cpuhp_state = ret;
+
+ /* Register the pmu instance for cpu hotplug */
+ ret = cpuhp_state_add_instance_nocalls(priv->cpuhp_state, &priv->node);
+ if (ret)
+ goto cpuhp_instance_err;
+
+ ret = fme_perf_pmu_register(pdev, priv);
+ if (ret)
+ goto pmu_register_err;
+
+ feature->priv = priv;
+ return 0;
+
+pmu_register_err:
+ cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+cpuhp_instance_err:
+ cpuhp_remove_multi_state(priv->cpuhp_state);
+ return ret;
+}
+
+static void fme_perf_uinit(struct platform_device *pdev,
+ struct dfl_feature *feature)
+{
+ struct fme_perf_priv *priv = feature->priv;
+
+ fme_perf_pmu_unregister(priv);
+ cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node);
+ cpuhp_remove_multi_state(priv->cpuhp_state);
+}
+
+const struct dfl_feature_id fme_perf_id_table[] = {
+ {.id = FME_FEATURE_ID_GLOBAL_IPERF,},
+ {.id = FME_FEATURE_ID_GLOBAL_DPERF,},
+ {0,}
+};
+
+const struct dfl_feature_ops fme_perf_ops = {
+ .init = fme_perf_init,
+ .uinit = fme_perf_uinit,
+};
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
index 6685c8ef965b..4195dd68193e 100644
--- a/drivers/fpga/dfl-fme.h
+++ b/drivers/fpga/dfl-fme.h
@@ -38,5 +38,7 @@ extern const struct dfl_feature_id fme_pr_mgmt_id_table[];
extern const struct dfl_feature_ops fme_global_err_ops;
extern const struct dfl_feature_id fme_global_err_id_table[];
extern const struct attribute_group fme_global_err_group;
+extern const struct dfl_feature_ops fme_perf_ops;
+extern const struct dfl_feature_id fme_perf_id_table[];
#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 96a2b8274a33..990994874bf1 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1079,6 +1079,7 @@ static int __init dfl_fpga_init(void)
*/
int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
{
+ struct dfl_feature_platform_data *pdata;
struct platform_device *port_pdev;
int ret = -ENODEV;
@@ -1093,7 +1094,11 @@ int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
goto put_dev_exit;
}
- ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev));
+ pdata = dev_get_platdata(&port_pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ ret = dfl_feature_dev_use_begin(pdata, true);
+ mutex_unlock(&pdata->lock);
if (ret)
goto put_dev_exit;
@@ -1120,6 +1125,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
*/
int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
{
+ struct dfl_feature_platform_data *pdata;
struct platform_device *port_pdev;
int ret = -ENODEV;
@@ -1138,7 +1144,12 @@ int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
if (ret)
goto put_dev_exit;
- dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev));
+ pdata = dev_get_platdata(&port_pdev->dev);
+
+ mutex_lock(&pdata->lock);
+ dfl_feature_dev_use_end(pdata);
+ mutex_unlock(&pdata->lock);
+
cdev->released_port_num--;
put_dev_exit:
put_device(&port_pdev->dev);
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 9f0e656de720..2f5d3052e36e 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -197,16 +197,16 @@ struct dfl_feature_driver {
* feature dev (platform device)'s reources.
* @ioaddr: mapped mmio resource address.
* @ops: ops of this sub feature.
+ * @priv: priv data of this feature.
*/
struct dfl_feature {
u64 id;
int resource_index;
void __iomem *ioaddr;
const struct dfl_feature_ops *ops;
+ void *priv;
};
-#define DEV_STATUS_IN_USE 0
-
#define FEATURE_DEV_ID_UNUSED (-1)
/**
@@ -219,8 +219,9 @@ struct dfl_feature {
* @dfl_cdev: ptr to container device.
* @id: id used for this feature device.
* @disable_count: count for port disable.
+ * @excl_open: set on feature device exclusive open.
+ * @open_count: count for feature device open.
* @num: number for sub features.
- * @dev_status: dev status (e.g. DEV_STATUS_IN_USE).
* @private: ptr to feature dev private data.
* @features: sub features of this feature dev.
*/
@@ -232,26 +233,46 @@ struct dfl_feature_platform_data {
struct dfl_fpga_cdev *dfl_cdev;
int id;
unsigned int disable_count;
- unsigned long dev_status;
+ bool excl_open;
+ int open_count;
void *private;
int num;
- struct dfl_feature features[0];
+ struct dfl_feature features[];
};
static inline
-int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata)
+int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata,
+ bool excl)
{
- /* Test and set IN_USE flags to ensure file is exclusively used */
- if (test_and_set_bit_lock(DEV_STATUS_IN_USE, &pdata->dev_status))
+ if (pdata->excl_open)
return -EBUSY;
+ if (excl) {
+ if (pdata->open_count)
+ return -EBUSY;
+
+ pdata->excl_open = true;
+ }
+ pdata->open_count++;
+
return 0;
}
static inline
void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
{
- clear_bit_unlock(DEV_STATUS_IN_USE, &pdata->dev_status);
+ pdata->excl_open = false;
+
+ if (WARN_ON(pdata->open_count <= 0))
+ return;
+
+ pdata->open_count--;
+}
+
+static inline
+int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata)
+{
+ return pdata->open_count;
}
static inline
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 56e112e14a10..8d689fea0dab 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -46,10 +46,16 @@ static int ice40_fpga_ops_write_init(struct fpga_manager *mgr,
struct spi_message message;
struct spi_transfer assert_cs_then_reset_delay = {
.cs_change = 1,
- .delay_usecs = ICE40_SPI_RESET_DELAY
+ .delay = {
+ .value = ICE40_SPI_RESET_DELAY,
+ .unit = SPI_DELAY_UNIT_USECS
+ }
};
struct spi_transfer housekeeping_delay_then_release_cs = {
- .delay_usecs = ICE40_SPI_HOUSEKEEPING_DELAY
+ .delay = {
+ .value = ICE40_SPI_HOUSEKEEPING_DELAY,
+ .unit = SPI_DELAY_UNIT_USECS
+ }
};
int ret;
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index 4d8a87641587..b316369156fe 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -157,7 +157,8 @@ static int machxo2_cleanup(struct fpga_manager *mgr)
spi_message_init(&msg);
tx[1].tx_buf = &refresh;
tx[1].len = sizeof(refresh);
- tx[1].delay_usecs = MACHXO2_REFRESH_USEC;
+ tx[1].delay.value = MACHXO2_REFRESH_USEC;
+ tx[1].delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx[1], &msg);
ret = spi_sync(spi, &msg);
if (ret)
@@ -208,7 +209,8 @@ static int machxo2_write_init(struct fpga_manager *mgr,
spi_message_init(&msg);
tx[0].tx_buf = &enable;
tx[0].len = sizeof(enable);
- tx[0].delay_usecs = MACHXO2_LOW_DELAY_USEC;
+ tx[0].delay.value = MACHXO2_LOW_DELAY_USEC;
+ tx[0].delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx[0], &msg);
tx[1].tx_buf = &erase;
@@ -269,7 +271,8 @@ static int machxo2_write(struct fpga_manager *mgr, const char *buf,
spi_message_init(&msg);
tx.tx_buf = payload;
tx.len = MACHXO2_BUF_SIZE;
- tx.delay_usecs = MACHXO2_HIGH_DELAY_USEC;
+ tx.delay.value = MACHXO2_HIGH_DELAY_USEC;
+ tx.delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx, &msg);
ret = spi_sync(spi, &msg);
if (ret) {
@@ -317,7 +320,8 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
spi_message_init(&msg);
tx[1].tx_buf = &refresh;
tx[1].len = sizeof(refresh);
- tx[1].delay_usecs = MACHXO2_REFRESH_USEC;
+ tx[1].delay.value = MACHXO2_REFRESH_USEC;
+ tx[1].delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&tx[1], &msg);
ret = spi_sync(spi, &msg);
if (ret)
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 215d33789c74..44b7c569d4dc 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -154,11 +154,11 @@ static void s10_receive_callback(struct stratix10_svc_client *client,
* Here we set status bits as we receive them. Elsewhere, we always use
* test_and_clear_bit() to check status in priv->status
*/
- for (i = 0; i <= SVC_STATUS_RECONFIG_ERROR; i++)
+ for (i = 0; i <= SVC_STATUS_ERROR; i++)
if (status & (1 << i))
set_bit(i, &priv->status);
- if (status & BIT(SVC_STATUS_RECONFIG_BUFFER_DONE)) {
+ if (status & BIT(SVC_STATUS_BUFFER_DONE)) {
s10_unlock_bufs(priv, data->kaddr1);
s10_unlock_bufs(priv, data->kaddr2);
s10_unlock_bufs(priv, data->kaddr3);
@@ -209,8 +209,7 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
}
ret = 0;
- if (!test_and_clear_bit(SVC_STATUS_RECONFIG_REQUEST_OK,
- &priv->status)) {
+ if (!test_and_clear_bit(SVC_STATUS_OK, &priv->status)) {
ret = -ETIMEDOUT;
goto init_done;
}
@@ -323,17 +322,15 @@ static int s10_ops_write(struct fpga_manager *mgr, const char *buf,
&priv->status_return_completion,
S10_BUFFER_TIMEOUT);
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_DONE,
- &priv->status) ||
- test_and_clear_bit(SVC_STATUS_RECONFIG_BUFFER_SUBMITTED,
+ if (test_and_clear_bit(SVC_STATUS_BUFFER_DONE, &priv->status) ||
+ test_and_clear_bit(SVC_STATUS_BUFFER_SUBMITTED,
&priv->status)) {
ret = 0;
continue;
}
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
- &priv->status)) {
- dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
+ if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+ dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
ret = -EFAULT;
break;
}
@@ -393,13 +390,11 @@ static int s10_ops_write_complete(struct fpga_manager *mgr,
timeout = ret;
ret = 0;
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_COMPLETED,
- &priv->status))
+ if (test_and_clear_bit(SVC_STATUS_COMPLETED, &priv->status))
break;
- if (test_and_clear_bit(SVC_STATUS_RECONFIG_ERROR,
- &priv->status)) {
- dev_err(dev, "ERROR - giving up - SVC_STATUS_RECONFIG_ERROR\n");
+ if (test_and_clear_bit(SVC_STATUS_ERROR, &priv->status)) {
+ dev_err(dev, "ERROR - giving up - SVC_STATUS_ERROR\n");
ret = -EFAULT;
break;
}
@@ -482,7 +477,8 @@ static int s10_remove(struct platform_device *pdev)
}
static const struct of_device_id s10_of_match[] = {
- { .compatible = "intel,stratix10-soc-fpga-mgr", },
+ {.compatible = "intel,stratix10-soc-fpga-mgr"},
+ {.compatible = "intel,agilex-soc-fpga-mgr"},
{},
};
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index b8a88d21d038..4a1139e05280 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -40,16 +40,12 @@ static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
const char *buf, size_t size)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
struct zynqmp_fpga_priv *priv;
dma_addr_t dma_addr;
u32 eemi_flags = 0;
char *kbuf;
int ret;
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_load)
- return -ENXIO;
-
priv = mgr->priv;
kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
@@ -63,7 +59,7 @@ static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
- ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
+ ret = zynqmp_pm_fpga_load(dma_addr, size, eemi_flags);
dma_free_coherent(priv->dev, size, kbuf, dma_addr);
@@ -78,13 +74,9 @@ static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr,
static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
{
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- u32 status;
-
- if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_get_status)
- return FPGA_MGR_STATE_UNKNOWN;
+ u32 status = 0;
- eemi_ops->fpga_get_status(&status);
+ zynqmp_pm_fpga_get_status(&status);
if (status & IXR_FPGA_DONE_MASK)
return FPGA_MGR_STATE_OPERATING;
diff --git a/drivers/gnss/serial.h b/drivers/gnss/serial.h
index 980ffdc86c2a..621953f7821d 100644
--- a/drivers/gnss/serial.h
+++ b/drivers/gnss/serial.h
@@ -16,7 +16,7 @@ struct gnss_serial {
struct gnss_device *gdev;
speed_t speed;
const struct gnss_serial_ops *ops;
- unsigned long drvdata[0];
+ unsigned long drvdata[];
};
enum gnss_serial_pm_state {
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index effed3a8d398..2ecb1d3e8eeb 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -439,14 +439,18 @@ static int sirf_probe(struct serdev_device *serdev)
data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff",
GPIOD_OUT_LOW);
- if (IS_ERR(data->on_off))
+ if (IS_ERR(data->on_off)) {
+ ret = PTR_ERR(data->on_off);
goto err_put_device;
+ }
if (data->on_off) {
data->wakeup = devm_gpiod_get_optional(dev, "sirf,wakeup",
GPIOD_IN);
- if (IS_ERR(data->wakeup))
+ if (IS_ERR(data->wakeup)) {
+ ret = PTR_ERR(data->wakeup);
goto err_put_device;
+ }
ret = regulator_enable(data->vcc);
if (ret)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1b96169d84f7..bcacd9c74aa8 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -73,6 +73,10 @@ config GPIO_GENERIC
depends on HAS_IOMEM # Only for IOMEM drivers
tristate
+config GPIO_REGMAP
+ depends on REGMAP
+ tristate
+
# put drivers in the right section, in alphabetical order
# This symbol is selected by both I2C and SPI expanders
@@ -422,7 +426,7 @@ config GPIO_OMAP
Say yes here to enable GPIO support for TI OMAP SoCs.
config GPIO_PL061
- bool "PrimeCell PL061 GPIO support"
+ tristate "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
select IRQ_DOMAIN
select GPIOLIB_IRQCHIP
@@ -439,7 +443,7 @@ config GPIO_PMIC_EIC_SPRD
config GPIO_PXA
bool "PXA GPIO support"
- depends on ARCH_PXA || ARCH_MMP
+ depends on ARCH_PXA || ARCH_MMP || COMPILE_TEST
help
Say yes here to support the PXA GPIO device
@@ -638,7 +642,7 @@ config GPIO_XGENE
config GPIO_XGENE_SB
tristate "APM X-Gene GPIO standby controller support"
- depends on ARCH_XGENE && OF_GPIO
+ depends on (ARCH_XGENE || COMPILE_TEST)
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -952,7 +956,7 @@ config GPIO_PCA953X
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
- depends on GPIO_PCA953X=y
+ depends on GPIO_PCA953X
select GPIOLIB_IRQCHIP
help
Say yes here to enable the pca953x to be used as an interrupt
@@ -1541,6 +1545,18 @@ config GPIO_VIPERBOARD
endmenu
+config GPIO_AGGREGATOR
+ tristate "GPIO Aggregator"
+ help
+ Say yes here to enable the GPIO Aggregator, which provides a way to
+ aggregate existing GPIO lines into a new virtual GPIO chip.
+ This can serve the following purposes:
+ - Assign permissions for a collection of GPIO lines to a user,
+ - Export a collection of GPIO lines to a virtual machine,
+ - Provide a generic driver for a GPIO-operated device in an
+ industrial control context, to be operated from userspace using
+ the GPIO chardev interface.
+
config GPIO_MOCKUP
tristate "GPIO Testing Driver"
select IRQ_SIM
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index b2cfc21a97f3..1e4894e0bf0f 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
# Device drivers. Generally keep list sorted alphabetically
+obj-$(CONFIG_GPIO_REGMAP) += gpio-regmap.o
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
# directly supported by gpio-generic
@@ -25,6 +26,7 @@ obj-$(CONFIG_GPIO_74XX_MMIO) += gpio-74xx-mmio.o
obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
+obj-$(CONFIG_GPIO_AGGREGATOR) += gpio-aggregator.o
obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o
obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 3a44e6ae52bd..b989c9352da2 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -99,6 +99,10 @@ similar and probe a proper driver in the gpiolib subsystem.
In some cases it makes sense to create a GPIO chip from the local driver
for a few GPIOs. Those should stay where they are.
+At the same time it makes sense to get rid of code duplication in existing or
+new coming drivers. For example, gpio-ml-ioh should be incorporated into
+gpio-pch. In similar way gpio-intel-mid into gpio-pxa.
+
Generic MMIO GPIO
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
new file mode 100644
index 000000000000..9b0adbdddbfc
--- /dev/null
+++ b/drivers/gpio/gpio-aggregator.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// GPIO Aggregator
+//
+// Copyright (C) 2019-2020 Glider bv
+
+#define DRV_NAME "gpio-aggregator"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+
+/*
+ * GPIO Aggregator sysfs interface
+ */
+
+struct gpio_aggregator {
+ struct gpiod_lookup_table *lookups;
+ struct platform_device *pdev;
+ char args[];
+};
+
+static DEFINE_MUTEX(gpio_aggregator_lock); /* protects idr */
+static DEFINE_IDR(gpio_aggregator_idr);
+
+static char *get_arg(char **args)
+{
+ char *start = *args, *end;
+
+ start = skip_spaces(start);
+ if (!*start)
+ return NULL;
+
+ if (*start == '"') {
+ /* Quoted arg */
+ end = strchr(++start, '"');
+ if (!end)
+ return ERR_PTR(-EINVAL);
+ } else {
+ /* Unquoted arg */
+ for (end = start; *end && !isspace(*end); end++) ;
+ }
+
+ if (*end)
+ *end++ = '\0';
+
+ *args = end;
+ return start;
+}
+
+static bool isrange(const char *s)
+{
+ size_t n;
+
+ if (IS_ERR_OR_NULL(s))
+ return false;
+
+ while (1) {
+ n = strspn(s, "0123456789");
+ if (!n)
+ return false;
+
+ s += n;
+
+ switch (*s++) {
+ case '\0':
+ return true;
+
+ case '-':
+ case ',':
+ break;
+
+ default:
+ return false;
+ }
+ }
+}
+
+static int aggr_add_gpio(struct gpio_aggregator *aggr, const char *key,
+ int hwnum, unsigned int *n)
+{
+ struct gpiod_lookup_table *lookups;
+
+ lookups = krealloc(aggr->lookups, struct_size(lookups, table, *n + 2),
+ GFP_KERNEL);
+ if (!lookups)
+ return -ENOMEM;
+
+ lookups->table[*n] =
+ (struct gpiod_lookup)GPIO_LOOKUP_IDX(key, hwnum, NULL, *n, 0);
+
+ (*n)++;
+ memset(&lookups->table[*n], 0, sizeof(lookups->table[*n]));
+
+ aggr->lookups = lookups;
+ return 0;
+}
+
+static int aggr_parse(struct gpio_aggregator *aggr)
+{
+ unsigned int first_index, last_index, i, n = 0;
+ char *name, *offsets, *first, *last, *next;
+ char *args = aggr->args;
+ int error;
+
+ for (name = get_arg(&args), offsets = get_arg(&args); name;
+ offsets = get_arg(&args)) {
+ if (IS_ERR(name)) {
+ pr_err("Cannot get GPIO specifier: %pe\n", name);
+ return PTR_ERR(name);
+ }
+
+ if (!isrange(offsets)) {
+ /* Named GPIO line */
+ error = aggr_add_gpio(aggr, name, U16_MAX, &n);
+ if (error)
+ return error;
+
+ name = offsets;
+ continue;
+ }
+
+ /* GPIO chip + offset(s) */
+ for (first = offsets; *first; first = next) {
+ next = strchrnul(first, ',');
+ if (*next)
+ *next++ = '\0';
+
+ last = strchr(first, '-');
+ if (last)
+ *last++ = '\0';
+
+ if (kstrtouint(first, 10, &first_index)) {
+ pr_err("Cannot parse GPIO index %s\n", first);
+ return -EINVAL;
+ }
+
+ if (!last) {
+ last_index = first_index;
+ } else if (kstrtouint(last, 10, &last_index)) {
+ pr_err("Cannot parse GPIO index %s\n", last);
+ return -EINVAL;
+ }
+
+ for (i = first_index; i <= last_index; i++) {
+ error = aggr_add_gpio(aggr, name, i, &n);
+ if (error)
+ return error;
+ }
+ }
+
+ name = get_arg(&args);
+ }
+
+ if (!n) {
+ pr_err("No GPIOs specified\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t new_device_store(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct gpio_aggregator *aggr;
+ struct platform_device *pdev;
+ int res, id;
+
+ /* kernfs guarantees string termination, so count + 1 is safe */
+ aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
+ if (!aggr)
+ return -ENOMEM;
+
+ memcpy(aggr->args, buf, count + 1);
+
+ aggr->lookups = kzalloc(struct_size(aggr->lookups, table, 1),
+ GFP_KERNEL);
+ if (!aggr->lookups) {
+ res = -ENOMEM;
+ goto free_ga;
+ }
+
+ mutex_lock(&gpio_aggregator_lock);
+ id = idr_alloc(&gpio_aggregator_idr, aggr, 0, 0, GFP_KERNEL);
+ mutex_unlock(&gpio_aggregator_lock);
+
+ if (id < 0) {
+ res = id;
+ goto free_table;
+ }
+
+ aggr->lookups->dev_id = kasprintf(GFP_KERNEL, "%s.%d", DRV_NAME, id);
+ if (!aggr->lookups->dev_id) {
+ res = -ENOMEM;
+ goto remove_idr;
+ }
+
+ res = aggr_parse(aggr);
+ if (res)
+ goto free_dev_id;
+
+ gpiod_add_lookup_table(aggr->lookups);
+
+ pdev = platform_device_register_simple(DRV_NAME, id, NULL, 0);
+ if (IS_ERR(pdev)) {
+ res = PTR_ERR(pdev);
+ goto remove_table;
+ }
+
+ aggr->pdev = pdev;
+ return count;
+
+remove_table:
+ gpiod_remove_lookup_table(aggr->lookups);
+free_dev_id:
+ kfree(aggr->lookups->dev_id);
+remove_idr:
+ mutex_lock(&gpio_aggregator_lock);
+ idr_remove(&gpio_aggregator_idr, id);
+ mutex_unlock(&gpio_aggregator_lock);
+free_table:
+ kfree(aggr->lookups);
+free_ga:
+ kfree(aggr);
+ return res;
+}
+
+static DRIVER_ATTR_WO(new_device);
+
+static void gpio_aggregator_free(struct gpio_aggregator *aggr)
+{
+ platform_device_unregister(aggr->pdev);
+ gpiod_remove_lookup_table(aggr->lookups);
+ kfree(aggr->lookups->dev_id);
+ kfree(aggr->lookups);
+ kfree(aggr);
+}
+
+static ssize_t delete_device_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ struct gpio_aggregator *aggr;
+ unsigned int id;
+ int error;
+
+ if (!str_has_prefix(buf, DRV_NAME "."))
+ return -EINVAL;
+
+ error = kstrtouint(buf + strlen(DRV_NAME "."), 10, &id);
+ if (error)
+ return error;
+
+ mutex_lock(&gpio_aggregator_lock);
+ aggr = idr_remove(&gpio_aggregator_idr, id);
+ mutex_unlock(&gpio_aggregator_lock);
+ if (!aggr)
+ return -ENOENT;
+
+ gpio_aggregator_free(aggr);
+ return count;
+}
+static DRIVER_ATTR_WO(delete_device);
+
+static struct attribute *gpio_aggregator_attrs[] = {
+ &driver_attr_new_device.attr,
+ &driver_attr_delete_device.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gpio_aggregator);
+
+static int __exit gpio_aggregator_idr_remove(int id, void *p, void *data)
+{
+ gpio_aggregator_free(p);
+ return 0;
+}
+
+static void __exit gpio_aggregator_remove_all(void)
+{
+ mutex_lock(&gpio_aggregator_lock);
+ idr_for_each(&gpio_aggregator_idr, gpio_aggregator_idr_remove, NULL);
+ idr_destroy(&gpio_aggregator_idr);
+ mutex_unlock(&gpio_aggregator_lock);
+}
+
+
+/*
+ * GPIO Forwarder
+ */
+
+struct gpiochip_fwd {
+ struct gpio_chip chip;
+ struct gpio_desc **descs;
+ union {
+ struct mutex mlock; /* protects tmp[] if can_sleep */
+ spinlock_t slock; /* protects tmp[] if !can_sleep */
+ };
+ unsigned long tmp[]; /* values and descs for multiple ops */
+};
+
+static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_get_direction(fwd->descs[offset]);
+}
+
+static int gpio_fwd_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_direction_input(fwd->descs[offset]);
+}
+
+static int gpio_fwd_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_direction_output(fwd->descs[offset], value);
+}
+
+static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_get_value(fwd->descs[offset]);
+}
+
+static int gpio_fwd_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ unsigned long *values, flags = 0;
+ struct gpio_desc **descs;
+ unsigned int i, j = 0;
+ int error;
+
+ if (chip->can_sleep)
+ mutex_lock(&fwd->mlock);
+ else
+ spin_lock_irqsave(&fwd->slock, flags);
+
+ /* Both values bitmap and desc pointers are stored in tmp[] */
+ values = &fwd->tmp[0];
+ descs = (void *)&fwd->tmp[BITS_TO_LONGS(fwd->chip.ngpio)];
+
+ bitmap_clear(values, 0, fwd->chip.ngpio);
+ for_each_set_bit(i, mask, fwd->chip.ngpio)
+ descs[j++] = fwd->descs[i];
+
+ error = gpiod_get_array_value(j, descs, NULL, values);
+ if (!error) {
+ j = 0;
+ for_each_set_bit(i, mask, fwd->chip.ngpio)
+ __assign_bit(i, bits, test_bit(j++, values));
+ }
+
+ if (chip->can_sleep)
+ mutex_unlock(&fwd->mlock);
+ else
+ spin_unlock_irqrestore(&fwd->slock, flags);
+
+ return error;
+}
+
+static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ gpiod_set_value(fwd->descs[offset], value);
+}
+
+static void gpio_fwd_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ unsigned long *values, flags = 0;
+ struct gpio_desc **descs;
+ unsigned int i, j = 0;
+
+ if (chip->can_sleep)
+ mutex_lock(&fwd->mlock);
+ else
+ spin_lock_irqsave(&fwd->slock, flags);
+
+ /* Both values bitmap and desc pointers are stored in tmp[] */
+ values = &fwd->tmp[0];
+ descs = (void *)&fwd->tmp[BITS_TO_LONGS(fwd->chip.ngpio)];
+
+ for_each_set_bit(i, mask, fwd->chip.ngpio) {
+ __assign_bit(j, values, test_bit(i, bits));
+ descs[j++] = fwd->descs[i];
+ }
+
+ gpiod_set_array_value(j, descs, NULL, values);
+
+ if (chip->can_sleep)
+ mutex_unlock(&fwd->mlock);
+ else
+ spin_unlock_irqrestore(&fwd->slock, flags);
+}
+
+static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return gpiod_set_config(fwd->descs[offset], config);
+}
+
+/**
+ * gpiochip_fwd_create() - Create a new GPIO forwarder
+ * @dev: Parent device pointer
+ * @ngpios: Number of GPIOs in the forwarder.
+ * @descs: Array containing the GPIO descriptors to forward to.
+ * This array must contain @ngpios entries, and must not be deallocated
+ * before the forwarder has been destroyed again.
+ *
+ * This function creates a new gpiochip, which forwards all GPIO operations to
+ * the passed GPIO descriptors.
+ *
+ * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
+ * code on failure.
+ */
+static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
+ unsigned int ngpios,
+ struct gpio_desc *descs[])
+{
+ const char *label = dev_name(dev);
+ struct gpiochip_fwd *fwd;
+ struct gpio_chip *chip;
+ unsigned int i;
+ int error;
+
+ fwd = devm_kzalloc(dev, struct_size(fwd, tmp,
+ BITS_TO_LONGS(ngpios) + ngpios), GFP_KERNEL);
+ if (!fwd)
+ return ERR_PTR(-ENOMEM);
+
+ chip = &fwd->chip;
+
+ /*
+ * If any of the GPIO lines are sleeping, then the entire forwarder
+ * will be sleeping.
+ * If any of the chips support .set_config(), then the forwarder will
+ * support setting configs.
+ */
+ for (i = 0; i < ngpios; i++) {
+ struct gpio_chip *parent = gpiod_to_chip(descs[i]);
+
+ dev_dbg(dev, "%u => gpio-%d\n", i, desc_to_gpio(descs[i]));
+
+ if (gpiod_cansleep(descs[i]))
+ chip->can_sleep = true;
+ if (parent && parent->set_config)
+ chip->set_config = gpio_fwd_set_config;
+ }
+
+ chip->label = label;
+ chip->parent = dev;
+ chip->owner = THIS_MODULE;
+ chip->get_direction = gpio_fwd_get_direction;
+ chip->direction_input = gpio_fwd_direction_input;
+ chip->direction_output = gpio_fwd_direction_output;
+ chip->get = gpio_fwd_get;
+ chip->get_multiple = gpio_fwd_get_multiple;
+ chip->set = gpio_fwd_set;
+ chip->set_multiple = gpio_fwd_set_multiple;
+ chip->base = -1;
+ chip->ngpio = ngpios;
+ fwd->descs = descs;
+
+ if (chip->can_sleep)
+ mutex_init(&fwd->mlock);
+ else
+ spin_lock_init(&fwd->slock);
+
+ error = devm_gpiochip_add_data(dev, chip, fwd);
+ if (error)
+ return ERR_PTR(error);
+
+ return fwd;
+}
+
+
+/*
+ * GPIO Aggregator platform device
+ */
+
+static int gpio_aggregator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gpio_desc **descs;
+ struct gpiochip_fwd *fwd;
+ int i, n;
+
+ n = gpiod_count(dev, NULL);
+ if (n < 0)
+ return n;
+
+ descs = devm_kmalloc_array(dev, n, sizeof(*descs), GFP_KERNEL);
+ if (!descs)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++) {
+ descs[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
+ if (IS_ERR(descs[i]))
+ return PTR_ERR(descs[i]);
+ }
+
+ fwd = gpiochip_fwd_create(dev, n, descs);
+ if (IS_ERR(fwd))
+ return PTR_ERR(fwd);
+
+ platform_set_drvdata(pdev, fwd);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_aggregator_dt_ids[] = {
+ /*
+ * Add GPIO-operated devices controlled from userspace below,
+ * or use "driver_override" in sysfs
+ */
+ {},
+};
+MODULE_DEVICE_TABLE(of, gpio_aggregator_dt_ids);
+#endif
+
+static struct platform_driver gpio_aggregator_driver = {
+ .probe = gpio_aggregator_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .groups = gpio_aggregator_groups,
+ .of_match_table = of_match_ptr(gpio_aggregator_dt_ids),
+ },
+};
+
+static int __init gpio_aggregator_init(void)
+{
+ return platform_driver_register(&gpio_aggregator_driver);
+}
+module_init(gpio_aggregator_init);
+
+static void __exit gpio_aggregator_exit(void)
+{
+ gpio_aggregator_remove_all();
+ platform_driver_unregister(&gpio_aggregator_driver);
+}
+module_exit(gpio_aggregator_exit);
+
+MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>");
+MODULE_DESCRIPTION("GPIO Aggregator");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 92e127e74813..1d8d55bd63aa 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -49,7 +49,9 @@
#define GPIO_EXT_PORTC 0x58
#define GPIO_EXT_PORTD 0x5c
+#define DWAPB_DRIVER_NAME "gpio-dwapb"
#define DWAPB_MAX_PORTS 4
+
#define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */
#define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */
#define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */
@@ -62,6 +64,8 @@
#define GPIO_INTSTATUS_V2 0x3c
#define GPIO_PORTA_EOI_V2 0x40
+#define DWAPB_NR_CLOCKS 2
+
struct dwapb_gpio;
#ifdef CONFIG_PM_SLEEP
@@ -97,7 +101,7 @@ struct dwapb_gpio {
struct irq_domain *domain;
unsigned int flags;
struct reset_control *rst;
- struct clk *clk;
+ struct clk_bulk_data clks[DWAPB_NR_CLOCKS];
};
static inline u32 gpio_reg_v2_convert(unsigned int offset)
@@ -189,22 +193,21 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
{
- u32 irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
- u32 ret = irq_status;
+ unsigned long irq_status;
+ irq_hw_number_t hwirq;
- while (irq_status) {
- int hwirq = fls(irq_status) - 1;
+ irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
+ for_each_set_bit(hwirq, &irq_status, 32) {
int gpio_irq = irq_find_mapping(gpio->domain, hwirq);
+ u32 irq_type = irq_get_trigger_type(gpio_irq);
generic_handle_irq(gpio_irq);
- irq_status &= ~BIT(hwirq);
- if ((irq_get_trigger_type(gpio_irq) & IRQ_TYPE_SENSE_MASK)
- == IRQ_TYPE_EDGE_BOTH)
+ if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
dwapb_toggle_trigger(gpio, hwirq);
}
- return ret;
+ return irq_status;
}
static void dwapb_irq_handler(struct irq_desc *desc)
@@ -212,10 +215,9 @@ static void dwapb_irq_handler(struct irq_desc *desc)
struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ chained_irq_enter(chip, desc);
dwapb_do_irq(gpio);
-
- if (chip->irq_eoi)
- chip->irq_eoi(irq_desc_get_irq_data(desc));
+ chained_irq_exit(chip, desc);
}
static void dwapb_irq_enable(struct irq_data *d)
@@ -228,7 +230,7 @@ static void dwapb_irq_enable(struct irq_data *d)
spin_lock_irqsave(&gc->bgpio_lock, flags);
val = dwapb_read(gpio, GPIO_INTEN);
- val |= BIT(d->hwirq);
+ val |= BIT(irqd_to_hwirq(d));
dwapb_write(gpio, GPIO_INTEN, val);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
@@ -243,46 +245,20 @@ static void dwapb_irq_disable(struct irq_data *d)
spin_lock_irqsave(&gc->bgpio_lock, flags);
val = dwapb_read(gpio, GPIO_INTEN);
- val &= ~BIT(d->hwirq);
+ val &= ~BIT(irqd_to_hwirq(d));
dwapb_write(gpio, GPIO_INTEN, val);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
-static int dwapb_irq_reqres(struct irq_data *d)
-{
- struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
- struct dwapb_gpio *gpio = igc->private;
- struct gpio_chip *gc = &gpio->ports[0].gc;
- int ret;
-
- ret = gpiochip_lock_as_irq(gc, irqd_to_hwirq(d));
- if (ret) {
- dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n",
- irqd_to_hwirq(d));
- return ret;
- }
- return 0;
-}
-
-static void dwapb_irq_relres(struct irq_data *d)
-{
- struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
- struct dwapb_gpio *gpio = igc->private;
- struct gpio_chip *gc = &gpio->ports[0].gc;
-
- gpiochip_unlock_as_irq(gc, irqd_to_hwirq(d));
-}
-
static int dwapb_irq_set_type(struct irq_data *d, u32 type)
{
struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = igc->private;
struct gpio_chip *gc = &gpio->ports[0].gc;
- int bit = d->hwirq;
+ irq_hw_number_t bit = irqd_to_hwirq(d);
unsigned long level, polarity, flags;
- if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
- IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ if (type & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
spin_lock_irqsave(&gc->bgpio_lock, flags);
@@ -328,11 +304,12 @@ static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = igc->private;
struct dwapb_context *ctx = gpio->ports[0].ctx;
+ irq_hw_number_t bit = irqd_to_hwirq(d);
if (enable)
- ctx->wake_en |= BIT(d->hwirq);
+ ctx->wake_en |= BIT(bit);
else
- ctx->wake_en &= ~BIT(d->hwirq);
+ ctx->wake_en &= ~BIT(bit);
return 0;
}
@@ -350,9 +327,10 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
val_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
if (debounce)
- dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb | mask);
+ val_deb |= mask;
else
- dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb & ~mask);
+ val_deb &= ~mask;
+ dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -373,12 +351,7 @@ static int dwapb_gpio_set_config(struct gpio_chip *gc, unsigned offset,
static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id)
{
- u32 worked;
- struct dwapb_gpio *gpio = dev_id;
-
- worked = dwapb_do_irq(gpio);
-
- return worked ? IRQ_HANDLED : IRQ_NONE;
+ return IRQ_RETVAL(dwapb_do_irq(dev_id));
}
static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
@@ -388,17 +361,23 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
struct gpio_chip *gc = &port->gc;
struct fwnode_handle *fwnode = pp->fwnode;
struct irq_chip_generic *irq_gc = NULL;
- unsigned int hwirq, ngpio = gc->ngpio;
+ unsigned int ngpio = gc->ngpio;
struct irq_chip_type *ct;
+ irq_hw_number_t hwirq;
int err, i;
+ if (memchr_inv(pp->irq, 0, sizeof(pp->irq)) == NULL) {
+ dev_warn(gpio->dev, "no IRQ for port%d\n", pp->idx);
+ return;
+ }
+
gpio->domain = irq_domain_create_linear(fwnode, ngpio,
&irq_generic_chip_ops, gpio);
if (!gpio->domain)
return;
err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2,
- "gpio-dwapb", handle_level_irq,
+ DWAPB_DRIVER_NAME, handle_bad_irq,
IRQ_NOREQUEST, 0,
IRQ_GC_INIT_NESTED_LOCK);
if (err) {
@@ -426,8 +405,6 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
ct->chip.irq_set_type = dwapb_irq_set_type;
ct->chip.irq_enable = dwapb_irq_enable;
ct->chip.irq_disable = dwapb_irq_disable;
- ct->chip.irq_request_resources = dwapb_irq_reqres;
- ct->chip.irq_release_resources = dwapb_irq_relres;
#ifdef CONFIG_PM_SLEEP
ct->chip.irq_set_wake = dwapb_irq_set_wake;
#endif
@@ -437,6 +414,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
}
irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
+ irq_gc->chip_types[0].handler = handle_level_irq;
irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
irq_gc->chip_types[1].handler = handle_edge_irq;
@@ -444,7 +422,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
int i;
for (i = 0; i < pp->ngpio; i++) {
- if (pp->irq[i] >= 0)
+ if (pp->irq[i])
irq_set_chained_handler_and_data(pp->irq[i],
dwapb_irq_handler, gpio);
}
@@ -455,7 +433,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
*/
err = devm_request_irq(gpio->dev, pp->irq[0],
dwapb_irq_handler_mfd,
- IRQF_SHARED, "gpio-dwapb-mfd", gpio);
+ IRQF_SHARED, DWAPB_DRIVER_NAME, gpio);
if (err) {
dev_err(gpio->dev, "error requesting IRQ\n");
irq_domain_remove(gpio->domain);
@@ -464,7 +442,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
}
}
- for (hwirq = 0 ; hwirq < ngpio ; hwirq++)
+ for (hwirq = 0; hwirq < ngpio; hwirq++)
irq_create_mapping(gpio->domain, hwirq);
port->gc.to_irq = dwapb_gpio_to_irq;
@@ -480,7 +458,7 @@ static void dwapb_irq_teardown(struct dwapb_gpio *gpio)
if (!gpio->domain)
return;
- for (hwirq = 0 ; hwirq < ngpio ; hwirq++)
+ for (hwirq = 0; hwirq < ngpio; hwirq++)
irq_dispose_mapping(irq_find_mapping(gpio->domain, hwirq));
irq_domain_remove(gpio->domain);
@@ -505,10 +483,9 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
return -ENOMEM;
#endif
- dat = gpio->regs + GPIO_EXT_PORTA + (pp->idx * GPIO_EXT_PORT_STRIDE);
- set = gpio->regs + GPIO_SWPORTA_DR + (pp->idx * GPIO_SWPORT_DR_STRIDE);
- dirout = gpio->regs + GPIO_SWPORTA_DDR +
- (pp->idx * GPIO_SWPORT_DDR_STRIDE);
+ dat = gpio->regs + GPIO_EXT_PORTA + pp->idx * GPIO_EXT_PORT_STRIDE;
+ set = gpio->regs + GPIO_SWPORTA_DR + pp->idx * GPIO_SWPORT_DR_STRIDE;
+ dirout = gpio->regs + GPIO_SWPORTA_DDR + pp->idx * GPIO_SWPORT_DDR_STRIDE;
/* This registers 32 GPIO lines per port */
err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout,
@@ -529,40 +506,66 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
if (pp->idx == 0)
port->gc.set_config = dwapb_gpio_set_config;
- if (pp->has_irq)
+ /* Only port A can provide interrupts in all configurations of the IP */
+ if (pp->idx == 0)
dwapb_configure_irqs(gpio, port, pp);
err = gpiochip_add_data(&port->gc, port);
- if (err)
+ if (err) {
dev_err(gpio->dev, "failed to register gpiochip for port%d\n",
port->idx);
- else
- port->is_registered = true;
+ return err;
+ }
/* Add GPIO-signaled ACPI event support */
- if (pp->has_irq)
- acpi_gpiochip_request_interrupts(&port->gc);
+ acpi_gpiochip_request_interrupts(&port->gc);
- return err;
+ port->is_registered = true;
+
+ return 0;
}
static void dwapb_gpio_unregister(struct dwapb_gpio *gpio)
{
unsigned int m;
- for (m = 0; m < gpio->nr_ports; ++m)
- if (gpio->ports[m].is_registered)
- gpiochip_remove(&gpio->ports[m].gc);
+ for (m = 0; m < gpio->nr_ports; ++m) {
+ struct dwapb_gpio_port *port = &gpio->ports[m];
+
+ if (!port->is_registered)
+ continue;
+
+ acpi_gpiochip_free_interrupts(&port->gc);
+ gpiochip_remove(&port->gc);
+ }
+}
+
+static void dwapb_get_irq(struct device *dev, struct fwnode_handle *fwnode,
+ struct dwapb_port_property *pp)
+{
+ struct device_node *np = NULL;
+ int irq = -ENXIO, j;
+
+ if (fwnode_property_read_bool(fwnode, "interrupt-controller"))
+ np = to_of_node(fwnode);
+
+ for (j = 0; j < pp->ngpio; j++) {
+ if (np)
+ irq = of_irq_get(np, j);
+ else if (has_acpi_companion(dev))
+ irq = platform_get_irq_optional(to_platform_device(dev), j);
+ if (irq > 0)
+ pp->irq[j] = irq;
+ }
}
-static struct dwapb_platform_data *
-dwapb_gpio_get_pdata(struct device *dev)
+static struct dwapb_platform_data *dwapb_gpio_get_pdata(struct device *dev)
{
struct fwnode_handle *fwnode;
struct dwapb_platform_data *pdata;
struct dwapb_port_property *pp;
int nports;
- int i, j;
+ int i;
nports = device_get_child_node_count(dev);
if (nports == 0)
@@ -580,8 +583,6 @@ dwapb_gpio_get_pdata(struct device *dev)
i = 0;
device_for_each_child_node(dev, fwnode) {
- struct device_node *np = NULL;
-
pp = &pdata->properties[i++];
pp->fwnode = fwnode;
@@ -593,8 +594,7 @@ dwapb_gpio_get_pdata(struct device *dev)
return ERR_PTR(-EINVAL);
}
- if (fwnode_property_read_u32(fwnode, "snps,nr-gpios",
- &pp->ngpio)) {
+ if (fwnode_property_read_u32(fwnode, "snps,nr-gpios", &pp->ngpio)) {
dev_info(dev,
"failed to get number of gpios for port%d\n",
i);
@@ -608,28 +608,8 @@ dwapb_gpio_get_pdata(struct device *dev)
* Only port A can provide interrupts in all configurations of
* the IP.
*/
- if (pp->idx != 0)
- continue;
-
- if (dev->of_node && fwnode_property_read_bool(fwnode,
- "interrupt-controller")) {
- np = to_of_node(fwnode);
- }
-
- for (j = 0; j < pp->ngpio; j++) {
- pp->irq[j] = -ENXIO;
-
- if (np)
- pp->irq[j] = of_irq_get(np, j);
- else if (has_acpi_companion(dev))
- pp->irq[j] = platform_get_irq(to_platform_device(dev), j);
-
- if (pp->irq[j] >= 0)
- pp->has_irq = true;
- }
-
- if (!pp->has_irq)
- dev_warn(dev, "no irq for port%d\n", pp->idx);
+ if (pp->idx == 0)
+ dwapb_get_irq(dev, fwnode, pp);
}
return pdata;
@@ -689,29 +669,24 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio->regs))
return PTR_ERR(gpio->regs);
- /* Optional bus clock */
- gpio->clk = devm_clk_get(&pdev->dev, "bus");
- if (!IS_ERR(gpio->clk)) {
- err = clk_prepare_enable(gpio->clk);
- if (err) {
- dev_info(&pdev->dev, "Cannot enable clock\n");
- return err;
- }
+ /* Optional bus and debounce clocks */
+ gpio->clks[0].id = "bus";
+ gpio->clks[1].id = "db";
+ err = devm_clk_bulk_get_optional(&pdev->dev, DWAPB_NR_CLOCKS,
+ gpio->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot get APB/Debounce clocks\n");
+ return err;
}
- gpio->flags = 0;
- if (dev->of_node) {
- gpio->flags = (uintptr_t)of_device_get_match_data(dev);
- } else if (has_acpi_companion(dev)) {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(dwapb_acpi_match, dev);
- if (acpi_id) {
- if (acpi_id->driver_data)
- gpio->flags = acpi_id->driver_data;
- }
+ err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable APB/Debounce clocks\n");
+ return err;
}
+ gpio->flags = (uintptr_t)device_get_match_data(dev);
+
for (i = 0; i < gpio->nr_ports; i++) {
err = dwapb_gpio_add_port(gpio, &pdata->properties[i], i);
if (err)
@@ -724,7 +699,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
out_unregister:
dwapb_gpio_unregister(gpio);
dwapb_irq_teardown(gpio);
- clk_disable_unprepare(gpio->clk);
+ clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
return err;
}
@@ -736,7 +711,7 @@ static int dwapb_gpio_remove(struct platform_device *pdev)
dwapb_gpio_unregister(gpio);
dwapb_irq_teardown(gpio);
reset_control_assert(gpio->rst);
- clk_disable_unprepare(gpio->clk);
+ clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
return 0;
}
@@ -755,8 +730,6 @@ static int dwapb_gpio_suspend(struct device *dev)
unsigned int idx = gpio->ports[i].idx;
struct dwapb_context *ctx = gpio->ports[i].ctx;
- BUG_ON(!ctx);
-
offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE;
ctx->dir = dwapb_read(gpio, offset);
@@ -775,13 +748,12 @@ static int dwapb_gpio_suspend(struct device *dev)
ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
/* Mask out interrupts */
- dwapb_write(gpio, GPIO_INTMASK,
- 0xffffffff & ~ctx->wake_en);
+ dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en);
}
}
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
- clk_disable_unprepare(gpio->clk);
+ clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
return 0;
}
@@ -791,10 +763,13 @@ static int dwapb_gpio_resume(struct device *dev)
struct dwapb_gpio *gpio = dev_get_drvdata(dev);
struct gpio_chip *gc = &gpio->ports[0].gc;
unsigned long flags;
- int i;
+ int i, err;
- if (!IS_ERR(gpio->clk))
- clk_prepare_enable(gpio->clk);
+ err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
+ if (err) {
+ dev_err(gpio->dev, "Cannot reenable APB/Debounce clocks\n");
+ return err;
+ }
spin_lock_irqsave(&gc->bgpio_lock, flags);
for (i = 0; i < gpio->nr_ports; i++) {
@@ -802,8 +777,6 @@ static int dwapb_gpio_resume(struct device *dev)
unsigned int idx = gpio->ports[i].idx;
struct dwapb_context *ctx = gpio->ports[i].ctx;
- BUG_ON(!ctx);
-
offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE;
dwapb_write(gpio, offset, ctx->data);
@@ -836,10 +809,10 @@ static SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops, dwapb_gpio_suspend,
static struct platform_driver dwapb_gpio_driver = {
.driver = {
- .name = "gpio-dwapb",
+ .name = DWAPB_DRIVER_NAME,
.pm = &dwapb_gpio_pm_ops,
- .of_match_table = of_match_ptr(dwapb_of_match),
- .acpi_match_table = ACPI_PTR(dwapb_acpi_match),
+ .of_match_table = dwapb_of_match,
+ .acpi_match_table = dwapb_acpi_match,
},
.probe = dwapb_gpio_probe,
.remove = dwapb_gpio_remove,
@@ -850,3 +823,4 @@ module_platform_driver(dwapb_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("Synopsys DesignWare APB GPIO driver");
+MODULE_ALIAS("platform:" DWAPB_DRIVER_NAME);
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index cadd02993539..18a3147f5a42 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -36,9 +36,19 @@
#define SIO_F71889A_ID 0x1005 /* F71889A chipset ID */
#define SIO_F81866_ID 0x1010 /* F81866 chipset ID */
#define SIO_F81804_ID 0x1502 /* F81804 chipset ID, same for f81966 */
-
-
-enum chips { f71869, f71869a, f71882fg, f71889a, f71889f, f81866, f81804 };
+#define SIO_F81865_ID 0x0704 /* F81865 chipset ID */
+
+
+enum chips {
+ f71869,
+ f71869a,
+ f71882fg,
+ f71889a,
+ f71889f,
+ f81866,
+ f81804,
+ f81865,
+};
static const char * const f7188x_names[] = {
"f71869",
@@ -48,6 +58,7 @@ static const char * const f7188x_names[] = {
"f71889f",
"f81866",
"f81804",
+ "f81865",
};
struct f7188x_sio {
@@ -233,6 +244,15 @@ static struct f7188x_gpio_bank f81804_gpio_bank[] = {
F7188X_GPIO_BANK(90, 8, 0x98),
};
+static struct f7188x_gpio_bank f81865_gpio_bank[] = {
+ F7188X_GPIO_BANK(0, 8, 0xF0),
+ F7188X_GPIO_BANK(10, 8, 0xE0),
+ F7188X_GPIO_BANK(20, 8, 0xD0),
+ F7188X_GPIO_BANK(30, 8, 0xC0),
+ F7188X_GPIO_BANK(40, 8, 0xB0),
+ F7188X_GPIO_BANK(50, 8, 0xA0),
+ F7188X_GPIO_BANK(60, 5, 0x90),
+};
static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
@@ -425,6 +445,10 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
data->nr_bank = ARRAY_SIZE(f81804_gpio_bank);
data->bank = f81804_gpio_bank;
break;
+ case f81865:
+ data->nr_bank = ARRAY_SIZE(f81865_gpio_bank);
+ data->bank = f81865_gpio_bank;
+ break;
default:
return -ENODEV;
}
@@ -490,6 +514,9 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
case SIO_F81804_ID:
sio->type = f81804;
break;
+ case SIO_F81865_ID:
+ sio->type = f81865;
+ break;
default:
pr_info(DRVNAME ": Unsupported Fintek device 0x%04x\n", devid);
goto err;
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index fbddb1662428..4031164780f7 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -193,7 +193,7 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
if (val == deb_div) {
/*
* The debounce timer happens to already be set to the
- * desireable value, what a coincidence! We can just enable
+ * desirable value, what a coincidence! We can just enable
* debounce on this GPIO line and return. This happens more
* often than you think, for example when all GPIO keys
* on a system are requesting the same debounce interval.
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 2f086d0aa1f4..9960bb8b0f5b 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -89,7 +89,7 @@ static struct {
struct device *dev;
struct gpio_chip chip;
struct resource *gpio_base; /* GPIO IO base */
- struct resource *pm_base; /* Power Mangagment IO base */
+ struct resource *pm_base; /* Power Management IO base */
struct ichx_desc *desc; /* Pointer to chipset-specific description */
u32 orig_gpio_ctrl; /* Orig CTRL value, used to restore on exit */
u8 use_gpio; /* Which GPIO groups are usable */
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 1e1935c51096..b8c1fe20f49a 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -47,7 +47,7 @@
static int max7301_direction_input(struct gpio_chip *chip, unsigned offset)
{
- struct max7301 *ts = gpiochip_get_data(chip);
+ struct max7301 *ts = container_of(chip, struct max7301, chip);
u8 *config;
u8 offset_bits, pin_config;
int ret;
@@ -89,7 +89,7 @@ static int __max7301_set(struct max7301 *ts, unsigned offset, int value)
static int max7301_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
- struct max7301 *ts = gpiochip_get_data(chip);
+ struct max7301 *ts = container_of(chip, struct max7301, chip);
u8 *config;
u8 offset_bits;
int ret;
@@ -189,10 +189,6 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.parent = dev;
ts->chip.owner = THIS_MODULE;
- ret = gpiochip_add_data(&ts->chip, ts);
- if (ret)
- goto exit_destroy;
-
/*
* initialize pullups according to platform data and cache the
* register values for later use.
@@ -214,7 +210,9 @@ int __max730x_probe(struct max7301 *ts)
}
}
- return ret;
+ ret = gpiochip_add_data(&ts->chip, ts);
+ if (!ret)
+ return ret;
exit_destroy:
mutex_destroy(&ts->lock);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 501e89548f53..37c5363e391e 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -145,7 +145,9 @@ static int mb86s70_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
for (index = 0;; index++) {
irq = platform_get_irq(to_platform_device(gc->parent), index);
- if (irq <= 0)
+ if (irq < 0)
+ return irq;
+ if (irq == 0)
break;
if (irq_get_irq_data(irq)->hwirq == offset)
return irq;
@@ -168,15 +170,13 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gchip->base))
return PTR_ERR(gchip->base);
- if (!has_acpi_companion(&pdev->dev)) {
- gchip->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(gchip->clk))
- return PTR_ERR(gchip->clk);
+ gchip->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(gchip->clk))
+ return PTR_ERR(gchip->clk);
- ret = clk_prepare_enable(gchip->clk);
- if (ret)
- return ret;
- }
+ ret = clk_prepare_enable(gchip->clk);
+ if (ret)
+ return ret;
spin_lock_init(&gchip->lock);
@@ -186,15 +186,13 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.free = mb86s70_gpio_free;
gchip->gc.get = mb86s70_gpio_get;
gchip->gc.set = mb86s70_gpio_set;
+ gchip->gc.to_irq = mb86s70_gpio_to_irq;
gchip->gc.label = dev_name(&pdev->dev);
gchip->gc.ngpio = 32;
gchip->gc.owner = THIS_MODULE;
gchip->gc.parent = &pdev->dev;
gchip->gc.base = -1;
- if (has_acpi_companion(&pdev->dev))
- gchip->gc.to_irq = mb86s70_gpio_to_irq;
-
ret = gpiochip_add_data(&gchip->gc, gchip);
if (ret) {
dev_err(&pdev->dev, "couldn't register gpio driver\n");
@@ -202,8 +200,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
return ret;
}
- if (has_acpi_companion(&pdev->dev))
- acpi_gpiochip_request_interrupts(&gchip->gc);
+ acpi_gpiochip_request_interrupts(&gchip->gc);
return 0;
}
@@ -212,8 +209,7 @@ static int mb86s70_gpio_remove(struct platform_device *pdev)
{
struct mb86s70_gpio_chip *gchip = platform_get_drvdata(pdev);
- if (has_acpi_companion(&pdev->dev))
- acpi_gpiochip_free_interrupts(&gchip->gc);
+ acpi_gpiochip_free_interrupts(&gchip->gc);
gpiochip_remove(&gchip->gc);
clk_disable_unprepare(gchip->clk);
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 48918a016cd8..706687fab634 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -443,8 +443,8 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
base = pcim_iomap_table(pdev)[1];
- irq_base = readl(base);
- gpio_base = readl(sizeof(u32) + base);
+ irq_base = readl(base + 0 * sizeof(u32));
+ gpio_base = readl(base + 1 * sizeof(u32));
/* Release the IO mapping, since we already get the info from BAR1 */
pcim_iounmap_regions(pdev, BIT(1));
@@ -473,6 +473,10 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
raw_spin_lock_init(&priv->lock);
+ retval = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (retval < 0)
+ return retval;
+
girq = &priv->chip.irq;
girq->chip = &mrfld_irqchip;
girq->init_hw = mrfld_irq_init_hw;
@@ -482,7 +486,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
- girq->parents[0] = pdev->irq;
+ girq->parents[0] = pci_irq_vector(pdev, 0);
girq->first = irq_base;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index da570e63589d..94d5efce1721 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -14,7 +14,6 @@
#include <linux/resource.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/version.h>
/*
* There are 3 YU GPIO blocks:
@@ -110,8 +109,8 @@ static int mlxbf2_gpio_get_lock_res(struct platform_device *pdev)
}
yu_arm_gpio_lock_param.io = devm_ioremap(dev, res->start, size);
- if (IS_ERR(yu_arm_gpio_lock_param.io))
- ret = PTR_ERR(yu_arm_gpio_lock_param.io);
+ if (!yu_arm_gpio_lock_param.io)
+ ret = -ENOMEM;
exit:
mutex_unlock(yu_arm_gpio_lock_param.lock);
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index f460d71b0c92..538e31fe8903 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -36,7 +36,7 @@ struct ltq_mm {
* @chip: Pointer to our private data structure.
*
* Write the shadow value to the EBU to set the gpios. We need to set the
- * global EBU lock to make sure that PCI/MTD dont break.
+ * global EBU lock to make sure that PCI/MTD don't break.
*/
static void ltq_mm_apply(struct ltq_mm *chip)
{
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 4269ea9a817e..1fca8dd7824f 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -306,37 +306,39 @@ static const struct regmap_config pca953x_i2c_regmap = {
.writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register,
+ .disable_locking = true,
.cache_type = REGCACHE_RBTREE,
- /* REVISIT: should be 0x7f but some 24 bit chips use REG_ADDR_AI */
- .max_register = 0xff,
+ .max_register = 0x7f,
};
-static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off,
- bool write, bool addrinc)
+static const struct regmap_config pca953x_ai_i2c_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .read_flag_mask = REG_ADDR_AI,
+ .write_flag_mask = REG_ADDR_AI,
+
+ .readable_reg = pca953x_readable_register,
+ .writeable_reg = pca953x_writeable_register,
+ .volatile_reg = pca953x_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 0x7f,
+};
+
+static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off)
{
int bank_shift = pca953x_bank_shift(chip);
int addr = (reg & PCAL_GPIO_MASK) << bank_shift;
int pinctrl = (reg & PCAL_PINCTRL_MASK) << 1;
u8 regaddr = pinctrl | addr | (off / BANK_SZ);
- /* Single byte read doesn't need AI bit set. */
- if (!addrinc)
- return regaddr;
-
- /* Chips with 24 and more GPIOs always support Auto Increment */
- if (write && NBANK(chip) > 2)
- regaddr |= REG_ADDR_AI;
-
- /* PCA9575 needs address-increment on multi-byte writes */
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE)
- regaddr |= REG_ADDR_AI;
-
return regaddr;
}
static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
- u8 regaddr = pca953x_recalc_addr(chip, reg, 0, true, true);
+ u8 regaddr = pca953x_recalc_addr(chip, reg, 0);
u8 value[MAX_BANK];
int i, ret;
@@ -354,7 +356,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long
static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
- u8 regaddr = pca953x_recalc_addr(chip, reg, 0, false, true);
+ u8 regaddr = pca953x_recalc_addr(chip, reg, 0);
u8 value[MAX_BANK];
int i, ret;
@@ -373,8 +375,7 @@ static int pca953x_read_regs(struct pca953x_chip *chip, int reg, unsigned long *
static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off,
- true, false);
+ u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
u8 bit = BIT(off % BANK_SZ);
int ret;
@@ -388,10 +389,8 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
unsigned off, int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off,
- true, false);
- u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off,
- true, false);
+ u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
+ u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off);
u8 bit = BIT(off % BANK_SZ);
int ret;
@@ -411,8 +410,7 @@ exit:
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 inreg = pca953x_recalc_addr(chip, chip->regs->input, off,
- true, false);
+ u8 inreg = pca953x_recalc_addr(chip, chip->regs->input, off);
u8 bit = BIT(off % BANK_SZ);
u32 reg_val;
int ret;
@@ -436,8 +434,7 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off,
- true, false);
+ u8 outreg = pca953x_recalc_addr(chip, chip->regs->output, off);
u8 bit = BIT(off % BANK_SZ);
mutex_lock(&chip->i2c_lock);
@@ -448,8 +445,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
- u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off,
- true, false);
+ u8 dirreg = pca953x_recalc_addr(chip, chip->regs->direction, off);
u8 bit = BIT(off % BANK_SZ);
u32 reg_val;
int ret;
@@ -466,6 +462,23 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
return GPIO_LINE_DIRECTION_OUT;
}
+static int pca953x_gpio_get_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ struct pca953x_chip *chip = gpiochip_get_data(gc);
+ DECLARE_BITMAP(reg_val, MAX_LINE);
+ int ret;
+
+ mutex_lock(&chip->i2c_lock);
+ ret = pca953x_read_regs(chip, chip->regs->input, reg_val);
+ mutex_unlock(&chip->i2c_lock);
+ if (ret)
+ return ret;
+
+ bitmap_replace(bits, bits, reg_val, mask, gc->ngpio);
+ return 0;
+}
+
static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
@@ -489,10 +502,8 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
unsigned int offset,
unsigned long config)
{
- u8 pull_en_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_EN, offset,
- true, false);
- u8 pull_sel_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_SEL, offset,
- true, false);
+ u8 pull_en_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_EN, offset);
+ u8 pull_sel_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_SEL, offset);
u8 bit = BIT(offset % BANK_SZ);
int ret;
@@ -551,6 +562,7 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->get = pca953x_gpio_get_value;
gc->set = pca953x_gpio_set_value;
gc->get_direction = pca953x_gpio_get_direction;
+ gc->get_multiple = pca953x_gpio_get_multiple;
gc->set_multiple = pca953x_gpio_set_multiple;
gc->set_config = pca953x_gpio_set_config;
gc->can_sleep = true;
@@ -863,6 +875,7 @@ static int pca953x_probe(struct i2c_client *client,
int ret;
u32 invert = 0;
struct regulator *reg;
+ const struct regmap_config *regmap_config;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -925,7 +938,17 @@ static int pca953x_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
- chip->regmap = devm_regmap_init_i2c(client, &pca953x_i2c_regmap);
+ pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
+
+ if (NBANK(chip) > 2 || PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ dev_info(&client->dev, "using AI\n");
+ regmap_config = &pca953x_ai_i2c_regmap;
+ } else {
+ dev_info(&client->dev, "using no AI\n");
+ regmap_config = &pca953x_i2c_regmap;
+ }
+
+ chip->regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
goto err_exit;
@@ -956,7 +979,6 @@ static int pca953x_probe(struct i2c_client *client,
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
- pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
if (PCA_CHIP_TYPE(chip->driver_data) == PCA953X_TYPE) {
chip->regs = &pca953x_regs;
@@ -1154,7 +1176,7 @@ static struct i2c_driver pca953x_driver = {
.name = "pca953x",
.pm = &pca953x_pm_ops,
.of_match_table = pca953x_dt_ids,
- .acpi_match_table = ACPI_PTR(pca953x_acpi_ids),
+ .acpi_match_table = pca953x_acpi_ids,
},
.probe = pca953x_probe,
.remove = pca953x_remove,
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 3f3d9a94b709..e96d28bf43b4 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
+#include <linux/bits.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -11,11 +12,11 @@
#include <linux/slab.h>
#define PCH_EDGE_FALLING 0
-#define PCH_EDGE_RISING BIT(0)
-#define PCH_LEVEL_L BIT(1)
-#define PCH_LEVEL_H (BIT(0) | BIT(1))
-#define PCH_EDGE_BOTH BIT(2)
-#define PCH_IM_MASK (BIT(0) | BIT(1) | BIT(2))
+#define PCH_EDGE_RISING 1
+#define PCH_LEVEL_L 2
+#define PCH_LEVEL_H 3
+#define PCH_EDGE_BOTH 4
+#define PCH_IM_MASK GENMASK(2, 0)
#define PCH_IRQ_BASE 24
@@ -103,9 +104,9 @@ static void pch_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
spin_lock_irqsave(&chip->spinlock, flags);
reg_val = ioread32(&chip->reg->po);
if (val)
- reg_val |= (1 << nr);
+ reg_val |= BIT(nr);
else
- reg_val &= ~(1 << nr);
+ reg_val &= ~BIT(nr);
iowrite32(reg_val, &chip->reg->po);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -115,7 +116,7 @@ static int pch_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct pch_gpio *chip = gpiochip_get_data(gpio);
- return (ioread32(&chip->reg->pi) >> nr) & 1;
+ return !!(ioread32(&chip->reg->pi) & BIT(nr));
}
static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
@@ -130,13 +131,14 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
reg_val = ioread32(&chip->reg->po);
if (val)
- reg_val |= (1 << nr);
+ reg_val |= BIT(nr);
else
- reg_val &= ~(1 << nr);
+ reg_val &= ~BIT(nr);
iowrite32(reg_val, &chip->reg->po);
- pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
- pm |= (1 << nr);
+ pm = ioread32(&chip->reg->pm);
+ pm &= BIT(gpio_pins[chip->ioh]) - 1;
+ pm |= BIT(nr);
iowrite32(pm, &chip->reg->pm);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -151,8 +153,9 @@ static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
unsigned long flags;
spin_lock_irqsave(&chip->spinlock, flags);
- pm = ioread32(&chip->reg->pm) & ((1 << gpio_pins[chip->ioh]) - 1);
- pm &= ~(1 << nr);
+ pm = ioread32(&chip->reg->pm);
+ pm &= BIT(gpio_pins[chip->ioh]) - 1;
+ pm &= ~BIT(nr);
iowrite32(pm, &chip->reg->pm);
spin_unlock_irqrestore(&chip->spinlock, flags);
@@ -226,17 +229,15 @@ static int pch_irq_type(struct irq_data *d, unsigned int type)
int ch, irq = d->irq;
ch = irq - chip->irq_base;
- if (irq <= chip->irq_base + 7) {
+ if (irq < chip->irq_base + 8) {
im_reg = &chip->reg->im0;
- im_pos = ch;
+ im_pos = ch - 0;
} else {
im_reg = &chip->reg->im1;
im_pos = ch - 8;
}
dev_dbg(chip->dev, "irq=%d type=%d ch=%d pos=%d\n", irq, type, ch, im_pos);
- spin_lock_irqsave(&chip->spinlock, flags);
-
switch (type) {
case IRQ_TYPE_EDGE_RISING:
val = PCH_EDGE_RISING;
@@ -254,20 +255,21 @@ static int pch_irq_type(struct irq_data *d, unsigned int type)
val = PCH_LEVEL_L;
break;
default:
- goto unlock;
+ return 0;
}
+ spin_lock_irqsave(&chip->spinlock, flags);
+
/* Set interrupt mode */
im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4));
iowrite32(im | (val << (im_pos * 4)), im_reg);
/* And the handler */
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
- else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ else if (type & IRQ_TYPE_EDGE_BOTH)
irq_set_handler_locked(d, handle_edge_irq);
-unlock:
spin_unlock_irqrestore(&chip->spinlock, flags);
return 0;
}
@@ -277,7 +279,7 @@ static void pch_irq_unmask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pch_gpio *chip = gc->private;
- iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imaskclr);
+ iowrite32(BIT(d->irq - chip->irq_base), &chip->reg->imaskclr);
}
static void pch_irq_mask(struct irq_data *d)
@@ -285,7 +287,7 @@ static void pch_irq_mask(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pch_gpio *chip = gc->private;
- iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask);
+ iowrite32(BIT(d->irq - chip->irq_base), &chip->reg->imask);
}
static void pch_irq_ack(struct irq_data *d)
@@ -293,21 +295,22 @@ static void pch_irq_ack(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pch_gpio *chip = gc->private;
- iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->iclr);
+ iowrite32(BIT(d->irq - chip->irq_base), &chip->reg->iclr);
}
static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
{
struct pch_gpio *chip = dev_id;
unsigned long reg_val = ioread32(&chip->reg->istatus);
- int i, ret = IRQ_NONE;
+ int i;
+
+ dev_dbg(chip->dev, "irq=%d status=0x%lx\n", irq, reg_val);
- for_each_set_bit(i, &reg_val, gpio_pins[chip->ioh]) {
- dev_dbg(chip->dev, "[%d]:irq=%d status=0x%lx\n", i, irq, reg_val);
+ reg_val &= BIT(gpio_pins[chip->ioh]) - 1;
+ for_each_set_bit(i, &reg_val, gpio_pins[chip->ioh])
generic_handle_irq(chip->irq_base + i);
- ret = IRQ_HANDLED;
- }
- return ret;
+
+ return IRQ_RETVAL(reg_val);
}
static int pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
@@ -344,7 +347,6 @@ static int pch_gpio_probe(struct pci_dev *pdev,
s32 ret;
struct pch_gpio *chip;
int irq_base;
- u32 msk;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -357,7 +359,7 @@ static int pch_gpio_probe(struct pci_dev *pdev,
return ret;
}
- ret = pcim_iomap_regions(pdev, 1 << 1, KBUILD_MODNAME);
+ ret = pcim_iomap_regions(pdev, BIT(1), KBUILD_MODNAME);
if (ret) {
dev_err(&pdev->dev, "pci_request_regions FAILED-%d", ret);
return ret;
@@ -393,9 +395,8 @@ static int pch_gpio_probe(struct pci_dev *pdev,
chip->irq_base = irq_base;
/* Mask all interrupts, but enable them */
- msk = (1 << gpio_pins[chip->ioh]) - 1;
- iowrite32(msk, &chip->reg->imask);
- iowrite32(msk, &chip->reg->ien);
+ iowrite32(BIT(gpio_pins[chip->ioh]) - 1, &chip->reg->imask);
+ iowrite32(BIT(gpio_pins[chip->ioh]) - 1, &chip->reg->ien);
ret = devm_request_irq(&pdev->dev, pdev->irq, pch_gpio_handler,
IRQF_SHARED, KBUILD_MODNAME, chip);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index e241fb884c12..f1b53dd1df1a 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/device.h>
@@ -408,6 +409,7 @@ static const struct amba_id pl061_ids[] = {
},
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl061_ids);
static struct amba_driver pl061_gpio_driver = {
.drv = {
@@ -419,9 +421,6 @@ static struct amba_driver pl061_gpio_driver = {
.id_table = pl061_ids,
.probe = pl061_probe,
};
+module_amba_driver(pl061_gpio_driver);
-static int __init pl061_gpio_init(void)
-{
- return amba_driver_register(&pl061_gpio_driver);
-}
-device_initcall(pl061_gpio_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 7284473c9fe3..eac1582c70da 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -250,8 +250,10 @@ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
int error;
error = pm_runtime_get_sync(p->dev);
- if (error < 0)
+ if (error < 0) {
+ pm_runtime_put(p->dev);
return error;
+ }
error = pinctrl_gpio_request(chip->base + offset);
if (error)
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
new file mode 100644
index 000000000000..5412cb3b0b2a
--- /dev/null
+++ b/drivers/gpio/gpio-regmap.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * regmap based generic GPIO driver
+ *
+ * Copyright 2020 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+struct gpio_regmap {
+ struct device *parent;
+ struct regmap *regmap;
+ struct gpio_chip gpio_chip;
+
+ int reg_stride;
+ int ngpio_per_reg;
+ unsigned int reg_dat_base;
+ unsigned int reg_set_base;
+ unsigned int reg_clr_base;
+ unsigned int reg_dir_in_base;
+ unsigned int reg_dir_out_base;
+
+ int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask);
+
+ void *driver_data;
+};
+
+static unsigned int gpio_regmap_addr(unsigned int addr)
+{
+ if (addr == GPIO_REGMAP_ADDR_ZERO)
+ return 0;
+
+ return addr;
+}
+
+static int gpio_regmap_simple_xlate(struct gpio_regmap *gpio,
+ unsigned int base, unsigned int offset,
+ unsigned int *reg, unsigned int *mask)
+{
+ unsigned int line = offset % gpio->ngpio_per_reg;
+ unsigned int stride = offset / gpio->ngpio_per_reg;
+
+ *reg = base + stride * gpio->reg_stride;
+ *mask = BIT(line);
+
+ return 0;
+}
+
+static int gpio_regmap_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, val, reg, mask;
+ int ret;
+
+ /* we might not have an output register if we are input only */
+ if (gpio->reg_dat_base)
+ base = gpio_regmap_addr(gpio->reg_dat_base);
+ else
+ base = gpio_regmap_addr(gpio->reg_set_base);
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(gpio->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & mask);
+}
+
+static void gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base = gpio_regmap_addr(gpio->reg_set_base);
+ unsigned int reg, mask;
+
+ gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (val)
+ regmap_update_bits(gpio->regmap, reg, mask, mask);
+ else
+ regmap_update_bits(gpio->regmap, reg, mask, 0);
+}
+
+static void gpio_regmap_set_with_clear(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, reg, mask;
+
+ if (val)
+ base = gpio_regmap_addr(gpio->reg_set_base);
+ else
+ base = gpio_regmap_addr(gpio->reg_clr_base);
+
+ gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ regmap_write(gpio->regmap, reg, mask);
+}
+
+static int gpio_regmap_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, val, reg, mask;
+ int invert, ret;
+
+ if (gpio->reg_dir_out_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_out_base);
+ invert = 0;
+ } else if (gpio->reg_dir_in_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_in_base);
+ invert = 1;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(gpio->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ if (!!(val & mask) ^ invert)
+ return GPIO_LINE_DIRECTION_OUT;
+ else
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int gpio_regmap_set_direction(struct gpio_chip *chip,
+ unsigned int offset, bool output)
+{
+ struct gpio_regmap *gpio = gpiochip_get_data(chip);
+ unsigned int base, val, reg, mask;
+ int invert, ret;
+
+ if (gpio->reg_dir_out_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_out_base);
+ invert = 0;
+ } else if (gpio->reg_dir_in_base) {
+ base = gpio_regmap_addr(gpio->reg_dir_in_base);
+ invert = 1;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ if (invert)
+ val = output ? 0 : mask;
+ else
+ val = output ? mask : 0;
+
+ return regmap_update_bits(gpio->regmap, reg, mask, val);
+}
+
+static int gpio_regmap_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return gpio_regmap_set_direction(chip, offset, false);
+}
+
+static int gpio_regmap_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ gpio_regmap_set(chip, offset, value);
+
+ return gpio_regmap_set_direction(chip, offset, true);
+}
+
+void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data)
+{
+ gpio->driver_data = data;
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_set_drvdata);
+
+void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio)
+{
+ return gpio->driver_data;
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_get_drvdata);
+
+/**
+ * gpio_regmap_register() - Register a generic regmap GPIO controller
+ * @config: configuration for gpio_regmap
+ *
+ * Return: A pointer to the registered gpio_regmap or ERR_PTR error value.
+ */
+struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config)
+{
+ struct gpio_regmap *gpio;
+ struct gpio_chip *chip;
+ int ret;
+
+ if (!config->parent)
+ return ERR_PTR(-EINVAL);
+
+ if (!config->ngpio)
+ return ERR_PTR(-EINVAL);
+
+ /* we need at least one */
+ if (!config->reg_dat_base && !config->reg_set_base)
+ return ERR_PTR(-EINVAL);
+
+ /* if we have a direction register we need both input and output */
+ if ((config->reg_dir_out_base || config->reg_dir_in_base) &&
+ (!config->reg_dat_base || !config->reg_set_base))
+ return ERR_PTR(-EINVAL);
+
+ /* we don't support having both registers simultaneously for now */
+ if (config->reg_dir_out_base && config->reg_dir_in_base)
+ return ERR_PTR(-EINVAL);
+
+ gpio = kzalloc(sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return ERR_PTR(-ENOMEM);
+
+ gpio->parent = config->parent;
+ gpio->regmap = config->regmap;
+ gpio->ngpio_per_reg = config->ngpio_per_reg;
+ gpio->reg_stride = config->reg_stride;
+ gpio->reg_mask_xlate = config->reg_mask_xlate;
+ gpio->reg_dat_base = config->reg_dat_base;
+ gpio->reg_set_base = config->reg_set_base;
+ gpio->reg_clr_base = config->reg_clr_base;
+ gpio->reg_dir_in_base = config->reg_dir_in_base;
+ gpio->reg_dir_out_base = config->reg_dir_out_base;
+
+ /* if not set, assume there is only one register */
+ if (!gpio->ngpio_per_reg)
+ gpio->ngpio_per_reg = config->ngpio;
+
+ /* if not set, assume they are consecutive */
+ if (!gpio->reg_stride)
+ gpio->reg_stride = 1;
+
+ if (!gpio->reg_mask_xlate)
+ gpio->reg_mask_xlate = gpio_regmap_simple_xlate;
+
+ chip = &gpio->gpio_chip;
+ chip->parent = config->parent;
+ chip->base = -1;
+ chip->ngpio = config->ngpio;
+ chip->names = config->names;
+ chip->label = config->label ?: dev_name(config->parent);
+
+ /*
+ * If our regmap is fast_io we should probably set can_sleep to false.
+ * Right now, the regmap doesn't save this property, nor is there any
+ * access function for it.
+ * The only regmap type which uses fast_io is regmap-mmio. For now,
+ * assume a safe default of true here.
+ */
+ chip->can_sleep = true;
+
+ chip->get = gpio_regmap_get;
+ if (gpio->reg_set_base && gpio->reg_clr_base)
+ chip->set = gpio_regmap_set_with_clear;
+ else if (gpio->reg_set_base)
+ chip->set = gpio_regmap_set;
+
+ if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
+ chip->get_direction = gpio_regmap_get_direction;
+ chip->direction_input = gpio_regmap_direction_input;
+ chip->direction_output = gpio_regmap_direction_output;
+ }
+
+ ret = gpiochip_add_data(chip, gpio);
+ if (ret < 0)
+ goto err_free_gpio;
+
+ if (config->irq_domain) {
+ ret = gpiochip_irqchip_add_domain(chip, config->irq_domain);
+ if (ret)
+ goto err_remove_gpiochip;
+ }
+
+ return gpio;
+
+err_remove_gpiochip:
+ gpiochip_remove(chip);
+err_free_gpio:
+ kfree(gpio);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_register);
+
+/**
+ * gpio_regmap_unregister() - Unregister a generic regmap GPIO controller
+ * @gpio: gpio_regmap device to unregister
+ */
+void gpio_regmap_unregister(struct gpio_regmap *gpio)
+{
+ gpiochip_remove(&gpio->gpio_chip);
+ kfree(gpio);
+}
+EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
+
+static void devm_gpio_regmap_unregister(struct device *dev, void *res)
+{
+ gpio_regmap_unregister(*(struct gpio_regmap **)res);
+}
+
+/**
+ * devm_gpio_regmap_register() - resource managed gpio_regmap_register()
+ * @dev: device that is registering this GPIO device
+ * @config: configuration for gpio_regmap
+ *
+ * Managed gpio_regmap_register(). For generic regmap GPIO device registered by
+ * this function, gpio_regmap_unregister() is automatically called on driver
+ * detach. See gpio_regmap_register() for more information.
+ *
+ * Return: A pointer to the registered gpio_regmap or ERR_PTR error value.
+ */
+struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
+ const struct gpio_regmap_config *config)
+{
+ struct gpio_regmap **ptr, *gpio;
+
+ ptr = devres_alloc(devm_gpio_regmap_unregister, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ gpio = gpio_regmap_register(config);
+ if (!IS_ERR(gpio)) {
+ *ptr = gpio;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return gpio;
+}
+EXPORT_SYMBOL_GPL(devm_gpio_regmap_register);
+
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_DESCRIPTION("GPIO generic regmap driver core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 79b553dc39a3..178e9128ded0 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -894,6 +894,7 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, tegra186_gpio_of_match);
static struct platform_driver tegra186_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 25d86441666e..a809609ee957 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/driver.h>
#include <linux/acpi.h>
@@ -122,7 +122,7 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
fwspec.fwnode = gc->parent->fwnode;
fwspec.param_count = 2;
fwspec.param[0] = GPIO_TO_HWIRQ(priv, gpio);
- fwspec.param[1] = IRQ_TYPE_NONE;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
return irq_create_fwspec_mapping(&fwspec);
}
@@ -290,10 +290,8 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
- if (priv->nirq > 0) {
- /* Register interrupt handlers for gpio signaled acpi events */
- acpi_gpiochip_request_interrupts(&priv->gc);
- }
+ /* Register interrupt handlers for GPIO signaled ACPI Events */
+ acpi_gpiochip_request_interrupts(&priv->gc);
return ret;
}
@@ -302,9 +300,7 @@ static int xgene_gpio_sb_remove(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv = platform_get_drvdata(pdev);
- if (priv->nirq > 0) {
- acpi_gpiochip_free_interrupts(&priv->gc);
- }
+ acpi_gpiochip_free_interrupts(&priv->gc);
irq_domain_remove(priv->irq_domain);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 0017367e94ee..9276051663da 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1353,7 +1353,7 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
}
/* Run deferred acpi_gpiochip_request_irqs() */
-static int acpi_gpio_handle_deferred_request_irqs(void)
+static int __init acpi_gpio_handle_deferred_request_irqs(void)
{
struct acpi_gpio_chip *acpi_gpio, *tmp;
@@ -1371,7 +1371,7 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
-static const struct dmi_system_id gpiolib_acpi_quirks[] = {
+static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
{
/*
* The Minix Neo Z83-4 has a micro-USB-B id-pin handler for
@@ -1455,7 +1455,7 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
{} /* Terminating entry */
};
-static int acpi_gpio_setup_params(void)
+static int __init acpi_gpio_setup_params(void)
{
const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
const struct dmi_system_id *id;
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
index 53781b253986..26741032fa9e 100644
--- a/drivers/gpio/gpiolib-devprop.c
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -37,8 +37,11 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip,
if (count < 0)
return;
- if (count > gdev->ngpio)
+ if (count > gdev->ngpio) {
+ dev_warn(&gdev->dev, "gpio-line-names is length %d but should be at most length %d",
+ count, gdev->ngpio);
count = gdev->ngpio;
+ }
names = kcalloc(count, sizeof(*names), GFP_KERNEL);
if (!names)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ccc449df3792..219eb0054233 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -344,6 +344,12 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
if (transitory)
lflags |= GPIO_TRANSITORY;
+ if (flags & OF_GPIO_PULL_UP)
+ lflags |= GPIO_PULL_UP;
+
+ if (flags & OF_GPIO_PULL_DOWN)
+ lflags |= GPIO_PULL_DOWN;
+
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
gpiod_put(desc);
@@ -460,6 +466,24 @@ static struct gpio_desc *of_find_arizona_gpio(struct device *dev,
return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
}
+static struct gpio_desc *of_find_usb_gpio(struct device *dev,
+ const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ /*
+ * Currently this USB quirk is only for the Fairchild FUSB302 host which is using
+ * an undocumented DT GPIO line named "fcs,int_n" without the compulsory "-gpios"
+ * suffix.
+ */
+ if (!IS_ENABLED(CONFIG_TYPEC_FUSB302))
+ return ERR_PTR(-ENOENT);
+
+ if (!con_id || strcmp(con_id, "fcs,int_n"))
+ return ERR_PTR(-ENOENT);
+
+ return of_get_named_gpiod_flags(dev->of_node, con_id, 0, of_flags);
+}
+
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags)
{
@@ -504,6 +528,9 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
if (PTR_ERR(desc) == -ENOENT)
desc = of_find_arizona_gpio(dev, con_id, &of_flags);
+ if (PTR_ERR(desc) == -ENOENT)
+ desc = of_find_usb_gpio(dev, con_id, &of_flags);
+
if (IS_ERR(desc))
return desc;
@@ -585,6 +612,10 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
*lflags |= GPIO_ACTIVE_LOW;
if (xlate_flags & OF_GPIO_TRANSITORY)
*lflags |= GPIO_TRANSITORY;
+ if (xlate_flags & OF_GPIO_PULL_UP)
+ *lflags |= GPIO_PULL_UP;
+ if (xlate_flags & OF_GPIO_PULL_DOWN)
+ *lflags |= GPIO_PULL_DOWN;
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index c14f0784274a..4fa075d49fbc 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -296,6 +296,9 @@ static int gpiodev_add_to_list(struct gpio_device *gdev)
/*
* Convert a GPIO name to its descriptor
+ * Note that there is no guarantee that GPIO names are globally unique!
+ * Hence this function will return, if it exists, a reference to the first GPIO
+ * line found that matches the given name.
*/
static struct gpio_desc *gpio_name_to_desc(const char * const name)
{
@@ -329,10 +332,12 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
}
/*
- * Takes the names from gc->names and checks if they are all unique. If they
- * are, they are assigned to their gpio descriptors.
+ * Take the names from gc->names and assign them to their GPIO descriptors.
+ * Warn if a name is already used for a GPIO line on a different GPIO chip.
*
- * Warning if one of the names is already used for a different GPIO.
+ * Note that:
+ * 1. Non-unique names are still accepted,
+ * 2. Name collisions within the same GPIO chip are not reported.
*/
static int gpiochip_set_desc_names(struct gpio_chip *gc)
{
@@ -1267,8 +1272,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
return -EFAULT;
return 0;
- } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
- cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
struct gpioline_info lineinfo;
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
@@ -1280,23 +1284,37 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
hwgpio = gpio_chip_hwgpio(desc);
- if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL &&
- test_bit(hwgpio, priv->watched_lines))
- return -EBUSY;
-
gpio_desc_to_lineinfo(desc, &lineinfo);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
-
- if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL)
- set_bit(hwgpio, priv->watched_lines);
-
return 0;
} else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
return linehandle_create(gdev, ip);
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
return lineevent_create(gdev, ip);
+ } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+ struct gpioline_info lineinfo;
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+
+ desc = gpiochip_get_desc(gc, lineinfo.line_offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ hwgpio = gpio_chip_hwgpio(desc);
+
+ if (test_bit(hwgpio, priv->watched_lines))
+ return -EBUSY;
+
+ gpio_desc_to_lineinfo(desc, &lineinfo);
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
+ return -EFAULT;
+
+ set_bit(hwgpio, priv->watched_lines);
+ return 0;
} else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
if (copy_from_user(&offset, ip, sizeof(offset)))
return -EFAULT;
@@ -1538,9 +1556,8 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
/* From this point, the .release() function cleans up gpio_device */
gdev->dev.release = gpiodevice_release;
- pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
- __func__, gdev->base, gdev->base + gdev->ngpio - 1,
- dev_name(&gdev->dev), gdev->chip->label ? : "generic");
+ dev_dbg(&gdev->dev, "registered GPIOs %d to %d on %s\n", gdev->base,
+ gdev->base + gdev->ngpio - 1, gdev->chip->label ? : "generic");
return 0;
@@ -1556,8 +1573,8 @@ static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog)
desc = gpiochip_get_desc(gc, hog->chip_hwnum);
if (IS_ERR(desc)) {
- pr_err("%s: unable to get GPIO desc: %ld\n",
- __func__, PTR_ERR(desc));
+ chip_err(gc, "%s: unable to get GPIO desc: %ld\n", __func__,
+ PTR_ERR(desc));
return;
}
@@ -1566,8 +1583,8 @@ static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog)
rv = gpiod_hog(desc, hog->line_name, hog->lflags, hog->dflags);
if (rv)
- pr_err("%s: unable to hog GPIO line (%s:%u): %d\n",
- __func__, gc->label, hog->chip_hwnum, rv);
+ gpiod_err(desc, "%s: unable to hog GPIO line (%s:%u): %d\n",
+ __func__, gc->label, hog->chip_hwnum, rv);
}
static void machine_gpiochip_add(struct gpio_chip *gc)
@@ -1592,8 +1609,8 @@ static void gpiochip_setup_devs(void)
list_for_each_entry(gdev, &gpio_devices, list) {
ret = gpiochip_setup_dev(gdev);
if (ret)
- pr_err("%s: Failed to initialize gpio device (%d)\n",
- dev_name(&gdev->dev), ret);
+ dev_err(&gdev->dev,
+ "Failed to initialize gpio device (%d)\n", ret);
}
}
@@ -2461,32 +2478,37 @@ static void gpiochip_irq_relres(struct irq_data *d)
gpiochip_relres_irq(gc, d->hwirq);
}
+static void gpiochip_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ if (gc->irq.irq_mask)
+ gc->irq.irq_mask(d);
+ gpiochip_disable_irq(gc, d->hwirq);
+}
+
+static void gpiochip_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ gpiochip_enable_irq(gc, d->hwirq);
+ if (gc->irq.irq_unmask)
+ gc->irq.irq_unmask(d);
+}
+
static void gpiochip_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
gpiochip_enable_irq(gc, d->hwirq);
- if (gc->irq.irq_enable)
- gc->irq.irq_enable(d);
- else
- gc->irq.chip->irq_unmask(d);
+ gc->irq.irq_enable(d);
}
static void gpiochip_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- /*
- * Since we override .irq_disable() we need to mimic the
- * behaviour of __irq_disable() in irq/chip.c.
- * First call .irq_disable() if it exists, else mimic the
- * behaviour of mask_irq() which calls .irq_mask() if
- * it exists.
- */
- if (gc->irq.irq_disable)
- gc->irq.irq_disable(d);
- else if (gc->irq.chip->irq_mask)
- gc->irq.chip->irq_mask(d);
+ gc->irq.irq_disable(d);
gpiochip_disable_irq(gc, d->hwirq);
}
@@ -2511,10 +2533,22 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
"detected irqchip that is shared with multiple gpiochips: please fix the driver.\n");
return;
}
- gc->irq.irq_enable = irqchip->irq_enable;
- gc->irq.irq_disable = irqchip->irq_disable;
- irqchip->irq_enable = gpiochip_irq_enable;
- irqchip->irq_disable = gpiochip_irq_disable;
+
+ if (irqchip->irq_disable) {
+ gc->irq.irq_disable = irqchip->irq_disable;
+ irqchip->irq_disable = gpiochip_irq_disable;
+ } else {
+ gc->irq.irq_mask = irqchip->irq_mask;
+ irqchip->irq_mask = gpiochip_irq_mask;
+ }
+
+ if (irqchip->irq_enable) {
+ gc->irq.irq_enable = irqchip->irq_enable;
+ irqchip->irq_enable = gpiochip_irq_enable;
+ } else {
+ gc->irq.irq_unmask = irqchip->irq_unmask;
+ irqchip->irq_unmask = gpiochip_irq_unmask;
+ }
}
/**
@@ -2702,7 +2736,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gc,
return -EINVAL;
if (!gc->parent) {
- pr_err("missing gpiochip .dev parent pointer\n");
+ chip_err(gc, "missing gpiochip .dev parent pointer\n");
return -EINVAL;
}
gc->irq.threaded = threaded;
@@ -2752,6 +2786,26 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gc,
}
EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
+/**
+ * gpiochip_irqchip_add_domain() - adds an irqdomain to a gpiochip
+ * @gc: the gpiochip to add the irqchip to
+ * @domain: the irqdomain to add to the gpiochip
+ *
+ * This function adds an IRQ domain to the gpiochip.
+ */
+int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain)
+{
+ if (!domain)
+ return -EINVAL;
+
+ gc->to_irq = gpiochip_to_irq;
+ gc->irq.domain = domain;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_domain);
+
#else /* CONFIG_GPIOLIB_IRQCHIP */
static inline int gpiochip_add_irqchip(struct gpio_chip *gc,
@@ -4653,7 +4707,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
if (!table)
return desc;
- for (p = &table->table[0]; p->chip_label; p++) {
+ for (p = &table->table[0]; p->key; p++) {
struct gpio_chip *gc;
/* idx must always match exactly */
@@ -4664,18 +4718,30 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
if (p->con_id && (!con_id || strcmp(p->con_id, con_id)))
continue;
- gc = find_chip_by_name(p->chip_label);
+ if (p->chip_hwnum == U16_MAX) {
+ desc = gpio_name_to_desc(p->key);
+ if (desc) {
+ *flags = p->flags;
+ return desc;
+ }
+
+ dev_warn(dev, "cannot find GPIO line %s, deferring\n",
+ p->key);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ gc = find_chip_by_name(p->key);
if (!gc) {
/*
* As the lookup table indicates a chip with
- * p->chip_label should exist, assume it may
+ * p->key should exist, assume it may
* still appear later and let the interested
* consumer be probed again or let the Deferred
* Probe infrastructure handle the error.
*/
dev_warn(dev, "cannot find GPIO chip %s, deferring\n",
- p->chip_label);
+ p->key);
return ERR_PTR(-EPROBE_DEFER);
}
@@ -4706,7 +4772,7 @@ static int platform_gpio_count(struct device *dev, const char *con_id)
if (!table)
return -ENOENT;
- for (p = &table->table[0]; p->chip_label; p++) {
+ for (p = &table->table[0]; p->key; p++) {
if ((con_id && p->con_id && !strcmp(con_id, p->con_id)) ||
(!con_id && !p->con_id))
count++;
@@ -4877,7 +4943,7 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
- pr_debug("no flags found for %s\n", con_id);
+ gpiod_dbg(desc, "no flags found for %s\n", con_id);
return 0;
}
@@ -5108,8 +5174,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
/* Mark GPIO as hogged so it can be identified and removed later */
set_bit(FLAG_IS_HOGGED, &desc->flags);
- pr_info("GPIO line %d (%s) hogged as %s%s\n",
- desc_to_gpio(desc), name,
+ gpiod_info(desc, "hogged as %s%s\n",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ?
(dflags & GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low" : "");
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 853ce681b4a4..9ed242316414 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -81,8 +81,7 @@ struct gpio_array {
unsigned long invert_mask[];
};
-struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
- unsigned int hwnum);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum);
int gpiod_get_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
@@ -163,18 +162,18 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
/* With chip prefix */
-#define chip_emerg(chip, fmt, ...) \
- dev_emerg(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_crit(chip, fmt, ...) \
- dev_crit(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_err(chip, fmt, ...) \
- dev_err(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_warn(chip, fmt, ...) \
- dev_warn(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_info(chip, fmt, ...) \
- dev_info(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
-#define chip_dbg(chip, fmt, ...) \
- dev_dbg(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
+#define chip_emerg(gc, fmt, ...) \
+ dev_emerg(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_crit(gc, fmt, ...) \
+ dev_crit(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_err(gc, fmt, ...) \
+ dev_err(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_warn(gc, fmt, ...) \
+ dev_warn(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_info(gc, fmt, ...) \
+ dev_info(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define chip_dbg(gc, fmt, ...) \
+ dev_dbg(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
#ifdef CONFIG_GPIO_SYSFS
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 3f2b695cf19e..ffe149aafc39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -186,7 +187,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
* disabled. The memory must be pinned and mapped to the hardware when
* this is called in hqd_load functions, so it should never fault in
* the first place. This resolves a circular lock dependency involving
- * four locks, including the DQM lock and mmap_sem.
+ * four locks, including the DQM lock and mmap_lock.
*/
#define read_user_wptr(mmptr, wptr, dst) \
({ \
@@ -195,10 +196,10 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
pagefault_disable(); \
if ((mmptr) == current->mm) { \
valid = !get_user((dst), (wptr)); \
- } else if (current->mm == NULL) { \
- use_mm(mmptr); \
+ } else if (current->flags & PF_KTHREAD) { \
+ kthread_use_mm(mmptr); \
valid = !get_user((dst), (wptr)); \
- unuse_mm(mmptr); \
+ kthread_unuse_mm(mmptr); \
} \
pagefault_enable(); \
} \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 6529caca88fe..35d4a5ab0228 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/mmu_context.h>
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 691c89705bcd..bf927f432506 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -19,7 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gc/gc_10_1_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 0b7e78748540..744366c7ee85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "cikd.h"
@@ -237,7 +235,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- /* read_user_ptr may take the mm->mmap_sem.
+ /* read_user_ptr may take the mm->mmap_lock.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index ccd635b812b5..feab4cc6e836 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gfx_v8_0.h"
@@ -224,7 +222,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- /* read_user_ptr may take the mm->mmap_sem.
+ /* read_user_ptr may take the mm->mmap_lock.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index df841c2ac5e7..c7fd0c47b254 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -19,8 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gc/gc_9_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 68e6e1bc8f3a..b91b5171270f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1393,9 +1393,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* concurrently and the queues are actually stopped
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
is_invalid_userptr = atomic_read(&mem->invalid);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
mutex_lock(&mem->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index d7646cbce346..775e389c9a13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -163,6 +163,9 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
enum amd_pm_state_type pm;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -196,6 +199,9 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
enum amd_pm_state_type state;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
@@ -297,6 +303,9 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level level = 0xff;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -334,6 +343,9 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -433,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data;
int i, buf_len, ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -472,6 +487,9 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -508,6 +526,9 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (adev->pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
@@ -525,6 +546,9 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
else if (is_support_sw_smu(adev))
@@ -580,6 +604,9 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size, ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -619,6 +646,9 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -721,6 +751,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (count > 127)
return -EINVAL;
@@ -810,6 +843,9 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -859,6 +895,9 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
uint64_t featuremask;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
return -EINVAL;
@@ -899,6 +938,9 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -955,6 +997,9 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1018,6 +1063,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1049,6 +1097,9 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1076,6 +1127,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
uint32_t mask = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1107,6 +1161,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1134,6 +1191,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1167,6 +1227,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1194,6 +1257,9 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1227,6 +1293,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1254,6 +1323,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1287,6 +1359,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1314,6 +1389,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
int ret;
uint32_t mask = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
return ret;
@@ -1347,6 +1425,9 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
uint32_t value = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1372,6 +1453,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1410,6 +1494,9 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
uint32_t value = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1435,6 +1522,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = kstrtol(buf, 0, &value);
if (ret)
@@ -1493,6 +1583,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
ssize_t size;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -1528,6 +1621,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
@@ -1587,6 +1683,9 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(ddev->dev);
if (r < 0)
return r;
@@ -1620,6 +1719,9 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(ddev->dev);
if (r < 0)
return r;
@@ -1658,6 +1760,9 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
uint64_t count0 = 0, count1 = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (adev->flags & AMD_IS_APU)
return -ENODATA;
@@ -1694,6 +1799,9 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (adev->unique_id)
return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
@@ -1888,6 +1996,9 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
int channel = to_sensor_dev_attr(attr)->index;
int r, temp = 0, size = sizeof(temp);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (channel >= PP_TEMP_MAX)
return -EINVAL;
@@ -2019,6 +2130,9 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(adev->ddev->dev);
if (ret < 0)
return ret;
@@ -2050,6 +2164,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err, ret;
int value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2099,6 +2216,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2148,6 +2268,9 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2178,6 +2301,9 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2207,6 +2333,9 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 size = sizeof(min_rpm);
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2232,6 +2361,9 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 size = sizeof(max_rpm);
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2256,6 +2388,9 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2285,6 +2420,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2331,6 +2469,9 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(adev->ddev->dev);
if (ret < 0)
return ret;
@@ -2363,6 +2504,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2403,6 +2547,9 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
u32 vddgfx;
int r, size = sizeof(vddgfx);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2435,6 +2582,9 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
u32 vddnb;
int r, size = sizeof(vddnb);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
@@ -2472,6 +2622,9 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
int r, size = sizeof(u32);
unsigned uw;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2508,6 +2661,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
ssize_t size;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2537,6 +2693,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
ssize_t size;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2567,6 +2726,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -2605,6 +2767,9 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
uint32_t sclk;
int r, size = sizeof(sclk);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2637,6 +2802,9 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
uint32_t mclk;
int r, size = sizeof(mclk);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -3497,6 +3665,9 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
u32 flags = 0;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(dev->dev);
if (r < 0)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9cbecd5ba814..e59c01a83dac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -910,7 +910,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
goto out_free_ranges;
}
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, start);
if (unlikely(!vma || start < vma->vm_start)) {
r = -EFAULT;
@@ -921,15 +921,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
r = -EPERM;
goto out_unlock;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
r = hmm_range_fault(range);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (unlikely(r)) {
/*
* FIXME: This timeout should encompass the retry from
@@ -954,7 +954,7 @@ retry:
return 0;
out_unlock:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_free_pfns:
kvfree(range->hmm_pfns);
out_free_ranges:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 15476fca8fa6..a9583b95fcc1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -901,7 +901,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
memory_exception_data.gpu_id = dev->id;
@@ -924,7 +924,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memory_exception_data.failure.NoExecute = 0;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
pr_debug("notpresent %d, noexecute %d, readonly %d\n",
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bdba0bfd6df1..7ced9f87be97 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1356,7 +1356,7 @@ static int dm_late_init(void *handle)
unsigned int linear_lut[16];
int i;
struct dmcu *dmcu = NULL;
- bool ret = false;
+ bool ret;
if (!adev->dm.fw_dmcu)
return detect_mst_link_for_all_connectors(adev->ddev);
@@ -1377,13 +1377,10 @@ static int dm_late_init(void *handle)
*/
params.min_abm_backlight = 0x28F;
- /* todo will enable for navi10 */
- if (adev->asic_type <= CHIP_RAVEN) {
- ret = dmcu_load_iram(dmcu, params);
+ ret = dmcu_load_iram(dmcu, params);
- if (!ret)
- return -EINVAL;
- }
+ if (!ret)
+ return -EINVAL;
return detect_mst_link_for_all_connectors(adev->ddev);
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 45cfb7c45566..6f93a6ca4cf0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1016,9 +1016,17 @@ static void program_timing_sync(
}
}
- /* set first pipe with plane as master */
+ /* set first unblanked pipe as master */
for (j = 0; j < group_size; j++) {
- if (pipe_set[j]->plane_state) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
if (j == 0)
break;
@@ -1039,9 +1047,17 @@ static void program_timing_sync(
status->timing_sync_info.master = false;
}
- /* remove any other pipes with plane as they have already been synced */
+ /* remove any other unblanked pipes as they have already been synced */
for (j = j + 1; j < group_size; j++) {
- if (pipe_set[j]->plane_state) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;
@@ -2522,6 +2538,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ DC_ERROR("Mode validation failed for stream update!\n");
+ dc_release_state(context);
+ return;
+ }
+
commit_planes_for_stream(
dc,
srf_updates,
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index ae0361e225bb..aa76c2cea747 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1561,6 +1561,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
* events for SMCToHost interrupt.
*/
uint32_t ctxid = entry->src_data[0];
+ uint32_t data;
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
@@ -1590,6 +1591,11 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
orderly_poweroff(true);
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
if (src_id == 0xfe) {
+ /* ACK SMUToHost interrupt */
+ data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
+ data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
+
switch (ctxid) {
case 0x3:
dev_dbg(adev->dev, "Switched to AC mode!\n");
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 7d39b858c9f1..3a3a511670c9 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -226,6 +226,7 @@ static void ast_set_vbios_color_reg(struct ast_private *ast,
case 3:
case 4:
color_index = TrueCModeIndex;
+ break;
default:
return;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index b1099e1251a2..d877ddc6dc57 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -27,6 +27,7 @@
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_sysfs.h>
#include <linux/uaccess.h>
@@ -523,6 +524,10 @@ int drm_connector_register(struct drm_connector *connector)
drm_mode_object_register(connector->dev, &connector->base);
connector->registration_state = DRM_CONNECTOR_REGISTERED;
+
+ /* Let userspace know we have a new connector */
+ drm_sysfs_hotplug_event(connector->dev);
+
goto unlock;
err_debugfs:
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 02fc24026872..170aa7689110 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -307,13 +307,13 @@ static void drm_fb_helper_sysrq(int dummy1)
schedule_work(&drm_fb_helper_restore_work);
}
-static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
+static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.handler = drm_fb_helper_sysrq,
.help_msg = "force-fb(V)",
.action_msg = "Restore framebuffer console",
};
#else
-static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
+static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 939f0032aab1..f0336c804639 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -291,9 +291,6 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
return PTR_ERR(connector->kdev);
}
- /* Let userspace know we have a new connector */
- drm_sysfs_hotplug_event(dev);
-
if (connector->ddc)
return sysfs_create_link(&connector->kdev->kobj,
&connector->ddc->dev.kobj, "ddc");
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 56197ae0b2f9..4391e242356d 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -37,6 +37,7 @@
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#if defined(__ia64__)
#include <linux/efi.h>
@@ -44,7 +45,6 @@
#endif
#include <linux/mem_encrypt.h>
-#include <asm/pgtable.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index dc9ef302f517..701f3995f621 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- might_lock_read(&current->mm->mmap_sem);
+ might_lock_read(&current->mm->mmap_lock);
if (userptr->mm != current->mm)
return -EPERM;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 40d42dcff0b7..ed9e53c373a7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -5206,6 +5206,9 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
unsigned int type)
{
+ if (encoder->type != INTEL_OUTPUT_DDI)
+ return;
+
switch (type) {
case DP_SDP_VSC:
intel_read_dp_vsc_sdp(encoder, crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index 212d4ee68205..7a19215ad844 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -10,6 +10,28 @@
#include "intel_display_types.h"
#include "intel_global_state.h"
+static void __intel_atomic_global_state_free(struct kref *kref)
+{
+ struct intel_global_state *obj_state =
+ container_of(kref, struct intel_global_state, ref);
+ struct intel_global_obj *obj = obj_state->obj;
+
+ obj->funcs->atomic_destroy_state(obj, obj_state);
+}
+
+static void intel_atomic_global_state_put(struct intel_global_state *obj_state)
+{
+ kref_put(&obj_state->ref, __intel_atomic_global_state_free);
+}
+
+static struct intel_global_state *
+intel_atomic_global_state_get(struct intel_global_state *obj_state)
+{
+ kref_get(&obj_state->ref);
+
+ return obj_state;
+}
+
void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
struct intel_global_obj *obj,
struct intel_global_state *state,
@@ -17,6 +39,10 @@ void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
{
memset(obj, 0, sizeof(*obj));
+ state->obj = obj;
+
+ kref_init(&state->ref);
+
obj->state = state;
obj->funcs = funcs;
list_add_tail(&obj->head, &dev_priv->global_obj_list);
@@ -28,7 +54,9 @@ void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
list_del(&obj->head);
- obj->funcs->atomic_destroy_state(obj, obj->state);
+
+ drm_WARN_ON(&dev_priv->drm, kref_read(&obj->state->ref) != 1);
+ intel_atomic_global_state_put(obj->state);
}
}
@@ -97,10 +125,14 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
if (!obj_state)
return ERR_PTR(-ENOMEM);
+ obj_state->obj = obj;
obj_state->changed = false;
+ kref_init(&obj_state->ref);
+
state->global_objs[index].state = obj_state;
- state->global_objs[index].old_state = obj->state;
+ state->global_objs[index].old_state =
+ intel_atomic_global_state_get(obj->state);
state->global_objs[index].new_state = obj_state;
state->global_objs[index].ptr = obj;
obj_state->state = state;
@@ -163,7 +195,9 @@ void intel_atomic_swap_global_state(struct intel_atomic_state *state)
new_obj_state->state = NULL;
state->global_objs[i].state = old_obj_state;
- obj->state = new_obj_state;
+
+ intel_atomic_global_state_put(obj->state);
+ obj->state = intel_atomic_global_state_get(new_obj_state);
}
}
@@ -172,10 +206,9 @@ void intel_atomic_clear_global_state(struct intel_atomic_state *state)
int i;
for (i = 0; i < state->num_global_objs; i++) {
- struct intel_global_obj *obj = state->global_objs[i].ptr;
+ intel_atomic_global_state_put(state->global_objs[i].old_state);
+ intel_atomic_global_state_put(state->global_objs[i].new_state);
- obj->funcs->atomic_destroy_state(obj,
- state->global_objs[i].state);
state->global_objs[i].ptr = NULL;
state->global_objs[i].state = NULL;
state->global_objs[i].old_state = NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
index e6163a469029..1f16fa3073c9 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.h
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_GLOBAL_STATE_H__
#define __INTEL_GLOBAL_STATE_H__
+#include <linux/kref.h>
#include <linux/list.h>
struct drm_i915_private;
@@ -54,7 +55,9 @@ struct intel_global_obj {
for_each_if(obj)
struct intel_global_state {
+ struct intel_global_obj *obj;
struct intel_atomic_state *state;
+ struct kref ref;
bool changed;
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 900ea8b7fc8f..30c229fcb404 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -230,7 +230,7 @@ static void intel_context_set_gem(struct intel_context *ce,
ce->timeline = intel_timeline_get(ctx->timeline);
if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
- intel_engine_has_semaphores(ce->engine))
+ intel_engine_has_timeslices(ce->engine))
__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
}
@@ -1921,11 +1921,6 @@ get_engines(struct i915_gem_context *ctx,
}
user = u64_to_user_ptr(args->value);
- if (!access_ok(user, size)) {
- err = -EFAULT;
- goto err_free;
- }
-
if (put_user(0, &user->extensions)) {
err = -EFAULT;
goto err_free;
@@ -1969,7 +1964,7 @@ static int __apply_priority(struct intel_context *ce, void *arg)
{
struct i915_gem_context *ctx = arg;
- if (!intel_engine_has_semaphores(ce->engine))
+ if (!intel_engine_has_timeslices(ce->engine))
return 0;
if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 3ce185670ca4..db8eb1c6afe9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1988,6 +1988,38 @@ static const struct dma_fence_work_ops eb_parse_ops = {
.release = __eb_parse_release,
};
+static inline int
+__parser_mark_active(struct i915_vma *vma,
+ struct intel_timeline *tl,
+ struct dma_fence *fence)
+{
+ struct intel_gt_buffer_pool_node *node = vma->private;
+
+ return i915_active_ref(&node->active, tl, fence);
+}
+
+static int
+parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
+{
+ int err;
+
+ mutex_lock(&tl->mutex);
+
+ err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
+ if (err)
+ goto unlock;
+
+ if (pw->trampoline) {
+ err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
+ if (err)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&tl->mutex);
+ return err;
+}
+
static int eb_parse_pipeline(struct i915_execbuffer *eb,
struct i915_vma *shadow,
struct i915_vma *trampoline)
@@ -2022,20 +2054,25 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
pw->shadow = shadow;
pw->trampoline = trampoline;
+ /* Mark active refs early for this worker, in case we get interrupted */
+ err = parser_mark_active(pw, eb->context->timeline);
+ if (err)
+ goto err_commit;
+
err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
if (err)
- goto err_trampoline;
+ goto err_commit;
err = dma_resv_reserve_shared(pw->batch->resv, 1);
if (err)
- goto err_batch_unlock;
+ goto err_commit_unlock;
/* Wait for all writes (and relocs) into the batch to complete */
err = i915_sw_fence_await_reservation(&pw->base.chain,
pw->batch->resv, NULL, false,
0, I915_FENCE_GFP);
if (err < 0)
- goto err_batch_unlock;
+ goto err_commit_unlock;
/* Keep the batch alive and unwritten as we parse */
dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
@@ -2050,11 +2087,13 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
dma_fence_work_commit_imm(&pw->base);
return 0;
-err_batch_unlock:
+err_commit_unlock:
dma_resv_unlock(pw->batch->resv);
-err_trampoline:
- if (trampoline)
- i915_active_release(&trampoline->active);
+err_commit:
+ i915_sw_fence_set_error_once(&pw->base.chain, err);
+ dma_fence_work_commit_imm(&pw->base);
+ return err;
+
err_shadow:
i915_active_release(&shadow->active);
err_batch:
@@ -2100,6 +2139,7 @@ static int eb_parse(struct i915_execbuffer *eb)
goto err;
}
i915_gem_object_set_readonly(shadow->obj);
+ shadow->private = pool;
trampoline = NULL;
if (CMDPARSER_USES_GGTT(eb->i915)) {
@@ -2113,6 +2153,7 @@ static int eb_parse(struct i915_execbuffer *eb)
shadow = trampoline;
goto err_shadow;
}
+ shadow->private = pool;
eb->batch_flags |= I915_DISPATCH_SECURE;
}
@@ -2129,7 +2170,6 @@ static int eb_parse(struct i915_execbuffer *eb)
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
- shadow->private = pool;
return 0;
err_trampoline:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 70f5f82da288..fe45bd4d63a5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -93,7 +93,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mmap_write_lock_killable(mm)) {
addr = -EINTR;
goto err;
}
@@ -103,7 +103,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
addr = -ENOMEM;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (IS_ERR_VALUE(addr))
goto err;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 5d5d7eef3f43..7aff3514d97a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -39,7 +39,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment = i915_sg_segment_size();
unsigned int sg_page_sizes;
- struct pagevec pvec;
gfp_t noreclaim;
int ret;
@@ -192,13 +191,17 @@ err_sg:
sg_mark_end(sg);
err_pages:
mapping_clear_unevictable(mapping);
- pagevec_init(&pvec);
- for_each_sgt_page(page, sgt_iter, st) {
- if (!pagevec_add(&pvec, page))
+ if (sg != st->sgl) {
+ struct pagevec pvec;
+
+ pagevec_init(&pvec);
+ for_each_sgt_page(page, sgt_iter, st) {
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
check_release_pagevec(&pvec);
}
- if (pagevec_count(&pvec))
- check_release_pagevec(&pvec);
sg_free_table(st);
kfree(st);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 33776b3f3fa5..c31a6744daee 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -200,10 +200,10 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
if (IS_ERR(mn))
err = PTR_ERR(mn);
- down_write(&mm->mm->mmap_sem);
+ mmap_write_lock(mm->mm);
mutex_lock(&mm->i915->mm_lock);
if (mm->mn == NULL && !err) {
- /* Protected by mmap_sem (write-lock) */
+ /* Protected by mmap_lock (write-lock) */
err = __mmu_notifier_register(&mn->mn, mm->mm);
if (!err) {
/* Protected by mm_lock */
@@ -217,7 +217,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
err = 0;
}
mutex_unlock(&mm->i915->mm_lock);
- up_write(&mm->mm->mmap_sem);
+ mmap_write_unlock(mm->mm);
if (mn && !IS_ERR(mn))
kfree(mn);
@@ -468,7 +468,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (mmget_not_zero(mm)) {
while (pinned < npages) {
if (!locked) {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
locked = 1;
}
ret = pin_user_pages_remote
@@ -483,7 +483,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pinned += ret;
}
if (locked)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
}
@@ -522,8 +522,8 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
/* Spawn a worker so that we can acquire the
* user pages without holding our mutex. Access
- * to the user pages requires mmap_sem, and we have
- * a strict lock ordering of mmap_sem, struct_mutex -
+ * to the user pages requires mmap_lock, and we have
+ * a strict lock ordering of mmap_lock, struct_mutex -
* we already hold struct_mutex here and so cannot
* call gup without encountering a lock inversion.
*
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 74ddb49b2941..e4aece20bc80 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -97,8 +97,6 @@ int __intel_context_do_pin(struct intel_context *ce)
{
int err;
- GEM_BUG_ON(intel_context_is_closed(ce));
-
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = intel_context_alloc_state(ce);
if (err)
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index eee530453aa6..ad8a9df49f29 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -31,7 +31,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
+#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <linux/types.h>
#include <linux/list.h>
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 1d5ff88078bd..7d361623ff67 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -124,7 +124,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
*/
low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
- num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
+ num_types = ARRAY_SIZE(vgpu_types);
gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 189b573d02be..372354d33f55 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -572,6 +572,9 @@ struct drm_i915_reg_descriptor {
#define REG32(_reg, ...) \
{ .addr = (_reg), __VA_ARGS__ }
+#define REG32_IDX(_reg, idx) \
+ { .addr = _reg(idx) }
+
/*
* Convenience macro for adding 64-bit registers.
*
@@ -669,6 +672,7 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
REG32(BCS_SWCTRL),
REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
+ REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE),
REG64_IDX(BCS_GPR, 0),
REG64_IDX(BCS_GPR, 1),
REG64_IDX(BCS_GPR, 2),
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 8e45ca3d2ede..55b97c3a3dde 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -47,20 +47,16 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct drm_i915_getparam32 req32;
- drm_i915_getparam_t __user *request;
+ struct drm_i915_getparam req;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(request, sizeof(*request)) ||
- __put_user(req32.param, &request->param) ||
- __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
- return -EFAULT;
+ req.param = req32.param;
+ req.value = compat_ptr(req32.value);
- return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
- (unsigned long)request);
+ return drm_ioctl_kernel(file, i915_getparam_ioctl, &req,
+ DRM_RENDER_ALLOW);
}
static drm_ioctl_compat_t *i915_compat_ioctls[] = {
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index b6376b25ef63..43039dc8c607 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -25,7 +25,6 @@
#include <linux/mm.h>
#include <linux/io-mapping.h>
-#include <asm/pgtable.h>
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index add00ec1f787..02559da61e6e 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -65,7 +65,7 @@ i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-i915_param_named_unsafe(reset, int, 0600,
+i915_param_named_unsafe(reset, uint, 0600,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
i915_param_named_unsafe(vbt_firmware, charp, 0400,
@@ -173,7 +173,7 @@ i915_param_named(enable_gvt, bool, 0400,
#endif
#if IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)
-i915_param_named_unsafe(fake_lmem_start, ulong, 0600,
+i915_param_named_unsafe(fake_lmem_start, ulong, 0400,
"Fake LMEM start offset (default: 0)");
#endif
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 45323732f099..4f21bfffbf0e 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -64,7 +64,7 @@ struct drm_printer;
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
param(int, edp_vswing, 0, 0400) \
param(unsigned int, reset, 3, 0600) \
- param(unsigned int, inject_probe_failure, 0, 0600) \
+ param(unsigned int, inject_probe_failure, 0, 0) \
param(int, fastboot, -1, 0600) \
param(int, enable_dpcd_backlight, -1, 0600) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 75c60c2afb7e..25329b7600c9 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3676,7 +3676,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
* buffered data written by the GPU besides periodic OA metrics.
*
* Note we copy the properties from userspace outside of the i915 perf
- * mutex to avoid an awkward lockdep with mmap_sem.
+ * mutex to avoid an awkward lockdep with mmap_lock.
*
* Most of the implementation details are handled by
* i915_perf_open_ioctl_locked() after taking the &perf->lock
@@ -3896,9 +3896,6 @@ static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
if (!n_regs)
return NULL;
- if (!access_ok(regs, n_regs * sizeof(u32) * 2))
- return ERR_PTR(-EFAULT);
-
/* No is_valid function means we're not allowing any register to be programmed. */
GEM_BUG_ON(!is_valid);
if (!is_valid)
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index ef25ce6e395e..e75c528ebbe0 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -25,10 +25,6 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
query_sz))
return -EFAULT;
- if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
- total_length))
- return -EFAULT;
-
return 0;
}
@@ -72,20 +68,20 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
topo.eu_offset = slice_length + subslice_length;
topo.eu_stride = sseu->eu_stride;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
return -EFAULT;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
&sseu->slice_mask, slice_length))
return -EFAULT;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
sizeof(topo) + slice_length),
sseu->subslice_mask, subslice_length))
return -EFAULT;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
sizeof(topo) +
slice_length + subslice_length),
sseu->eu_mask, eu_length))
@@ -131,14 +127,14 @@ query_engine_info(struct drm_i915_private *i915,
info.engine.engine_instance = engine->uabi_instance;
info.capabilities = engine->uabi_capabilities;
- if (__copy_to_user(info_ptr, &info, sizeof(info)))
+ if (copy_to_user(info_ptr, &info, sizeof(info)))
return -EFAULT;
query.num_engines++;
info_ptr++;
}
- if (__copy_to_user(query_ptr, &query, sizeof(query)))
+ if (copy_to_user(query_ptr, &query, sizeof(query)))
return -EFAULT;
return len;
@@ -158,10 +154,6 @@ static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
if (user_n_regs < kernel_n_regs)
return -EINVAL;
- if (!access_ok(u64_to_user_ptr(user_regs_ptr),
- 2 * sizeof(u32) * kernel_n_regs))
- return -EFAULT;
-
return 0;
}
@@ -170,6 +162,7 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel
u64 user_regs_ptr,
u32 *user_n_regs)
{
+ u32 __user *p = u64_to_user_ptr(user_regs_ptr);
u32 r;
if (*user_n_regs == 0) {
@@ -179,25 +172,19 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel
*user_n_regs = kernel_n_regs;
- for (r = 0; r < kernel_n_regs; r++) {
- u32 __user *user_reg_ptr =
- u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2);
- u32 __user *user_val_ptr =
- u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 +
- sizeof(u32));
- int ret;
-
- ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
- user_reg_ptr);
- if (ret)
- return -EFAULT;
+ if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
+ return -EFAULT;
- ret = __put_user(kernel_regs[r].value, user_val_ptr);
- if (ret)
- return -EFAULT;
+ for (r = 0; r < kernel_n_regs; r++, p += 2) {
+ unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
+ p, Efault);
+ unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
}
-
+ user_write_access_end();
return 0;
+Efault:
+ user_write_access_end();
+ return -EFAULT;
}
static int query_perf_config_data(struct drm_i915_private *i915,
@@ -233,10 +220,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
return -EINVAL;
}
- if (!access_ok(user_query_config_ptr, total_size))
- return -EFAULT;
-
- if (__get_user(flags, &user_query_config_ptr->flags))
+ if (get_user(flags, &user_query_config_ptr->flags))
return -EFAULT;
if (flags != 0)
@@ -249,7 +233,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
memset(&uuid, 0, sizeof(uuid));
- if (__copy_from_user(uuid, user_query_config_ptr->uuid,
+ if (copy_from_user(uuid, user_query_config_ptr->uuid,
sizeof(user_query_config_ptr->uuid)))
return -EFAULT;
@@ -263,7 +247,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
}
rcu_read_unlock();
} else {
- if (__get_user(config_id, &user_query_config_ptr->config))
+ if (get_user(config_id, &user_query_config_ptr->config))
return -EFAULT;
oa_config = i915_perf_get_oa_config(perf, config_id);
@@ -271,8 +255,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
if (!oa_config)
return -ENOENT;
- if (__copy_from_user(&user_config, user_config_ptr,
- sizeof(user_config))) {
+ if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
ret = -EFAULT;
goto out;
}
@@ -318,8 +301,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
- if (__copy_to_user(user_config_ptr, &user_config,
- sizeof(user_config))) {
+ if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
ret = -EFAULT;
goto out;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6c076a24eb82..7717581350bd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -186,7 +186,7 @@ typedef struct {
#define INVALID_MMIO_REG _MMIO(0)
-static inline u32 i915_mmio_reg_offset(i915_reg_t reg)
+static __always_inline u32 i915_mmio_reg_offset(i915_reg_t reg)
{
return reg.reg;
}
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 526c1e9acbd5..def62100e666 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -121,8 +121,39 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->submit);
i915_sw_fence_fini(&rq->semaphore);
- /* Keep one request on each engine for reserved use under mempressure */
- if (!cmpxchg(&rq->engine->request_pool, NULL, rq))
+ /*
+ * Keep one request on each engine for reserved use under mempressure
+ *
+ * We do not hold a reference to the engine here and so have to be
+ * very careful in what rq->engine we poke. The virtual engine is
+ * referenced via the rq->context and we released that ref during
+ * i915_request_retire(), ergo we must not dereference a virtual
+ * engine here. Not that we would want to, as the only consumer of
+ * the reserved engine->request_pool is the power management parking,
+ * which must-not-fail, and that is only run on the physical engines.
+ *
+ * Since the request must have been executed to be have completed,
+ * we know that it will have been processed by the HW and will
+ * not be unsubmitted again, so rq->engine and rq->execution_mask
+ * at this point is stable. rq->execution_mask will be a single
+ * bit if the last and _only_ engine it could execution on was a
+ * physical engine, if it's multiple bits then it started on and
+ * could still be on a virtual engine. Thus if the mask is not a
+ * power-of-two we assume that rq->engine may still be a virtual
+ * engine and so a dangling invalid pointer that we cannot dereference
+ *
+ * For example, consider the flow of a bonded request through a virtual
+ * engine. The request is created with a wide engine mask (all engines
+ * that we might execute on). On processing the bond, the request mask
+ * is reduced to one or more engines. If the request is subsequently
+ * bound to a single engine, it will then be constrained to only
+ * execute on that engine and never returned to the virtual engine
+ * after timeslicing away, see __unwind_incomplete_requests(). Thus we
+ * know that if the rq->execution_mask is a single bit, rq->engine
+ * can be a physical engine with the exact corresponding mask.
+ */
+ if (is_power_of_2(rq->execution_mask) &&
+ !cmpxchg(&rq->engine->request_pool, NULL, rq))
return;
kmem_cache_free(global.slab_requests, rq);
@@ -326,6 +357,53 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (i915_request_retire(tmp) && tmp != rq);
}
+static struct i915_request * const *
+__engine_active(struct intel_engine_cs *engine)
+{
+ return READ_ONCE(engine->execlists.active);
+}
+
+static bool __request_in_flight(const struct i915_request *signal)
+{
+ struct i915_request * const *port, *rq;
+ bool inflight = false;
+
+ if (!i915_request_is_ready(signal))
+ return false;
+
+ /*
+ * Even if we have unwound the request, it may still be on
+ * the GPU (preempt-to-busy). If that request is inside an
+ * unpreemptible critical section, it will not be removed. Some
+ * GPU functions may even be stuck waiting for the paired request
+ * (__await_execution) to be submitted and cannot be preempted
+ * until the bond is executing.
+ *
+ * As we know that there are always preemption points between
+ * requests, we know that only the currently executing request
+ * may be still active even though we have cleared the flag.
+ * However, we can't rely on our tracking of ELSP[0] to known
+ * which request is currently active and so maybe stuck, as
+ * the tracking maybe an event behind. Instead assume that
+ * if the context is still inflight, then it is still active
+ * even if the active flag has been cleared.
+ */
+ if (!intel_context_inflight(signal->context))
+ return false;
+
+ rcu_read_lock();
+ for (port = __engine_active(signal->engine); (rq = *port); port++) {
+ if (rq->context == signal->context) {
+ inflight = i915_seqno_passed(rq->fence.seqno,
+ signal->fence.seqno);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return inflight;
+}
+
static int
__await_execution(struct i915_request *rq,
struct i915_request *signal,
@@ -356,7 +434,7 @@ __await_execution(struct i915_request *rq,
}
spin_lock_irq(&signal->lock);
- if (i915_request_is_active(signal)) {
+ if (i915_request_is_active(signal) || __request_in_flight(signal)) {
if (hook) {
hook(rq, &signal->fence);
i915_request_put(signal);
@@ -1022,37 +1100,91 @@ await_fence:
I915_FENCE_GFP);
}
+static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
+ struct dma_fence *fence)
+{
+ return __intel_timeline_sync_is_later(tl,
+ fence->context,
+ fence->seqno - 1);
+}
+
+static int intel_timeline_sync_set_start(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
+}
+
static int
-i915_request_await_request(struct i915_request *to, struct i915_request *from)
+__i915_request_await_execution(struct i915_request *to,
+ struct i915_request *from,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal))
{
- int ret;
+ int err;
- GEM_BUG_ON(to == from);
- GEM_BUG_ON(to->timeline == from->timeline);
+ GEM_BUG_ON(intel_context_is_barrier(from->context));
- if (i915_request_completed(from)) {
- i915_sw_fence_set_error_once(&to->submit, from->fence.error);
+ /* Submit both requests at the same time */
+ err = __await_execution(to, from, hook, I915_FENCE_GFP);
+ if (err)
+ return err;
+
+ /* Squash repeated depenendices to the same timelines */
+ if (intel_timeline_sync_has_start(i915_request_timeline(to),
+ &from->fence))
return 0;
+
+ /*
+ * Wait until the start of this request.
+ *
+ * The execution cb fires when we submit the request to HW. But in
+ * many cases this may be long before the request itself is ready to
+ * run (consider that we submit 2 requests for the same context, where
+ * the request of interest is behind an indefinite spinner). So we hook
+ * up to both to reduce our queues and keep the execution lag minimised
+ * in the worst case, though we hope that the await_start is elided.
+ */
+ err = i915_request_await_start(to, from);
+ if (err < 0)
+ return err;
+
+ /*
+ * Ensure both start together [after all semaphores in signal]
+ *
+ * Now that we are queued to the HW at roughly the same time (thanks
+ * to the execute cb) and are ready to run at roughly the same time
+ * (thanks to the await start), our signaler may still be indefinitely
+ * delayed by waiting on a semaphore from a remote engine. If our
+ * signaler depends on a semaphore, so indirectly do we, and we do not
+ * want to start our payload until our signaler also starts theirs.
+ * So we wait.
+ *
+ * However, there is also a second condition for which we need to wait
+ * for the precise start of the signaler. Consider that the signaler
+ * was submitted in a chain of requests following another context
+ * (with just an ordinary intra-engine fence dependency between the
+ * two). In this case the signaler is queued to HW, but not for
+ * immediate execution, and so we must wait until it reaches the
+ * active slot.
+ */
+ if (intel_engine_has_semaphores(to->engine) &&
+ !i915_request_has_initial_breadcrumb(to)) {
+ err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+ if (err < 0)
+ return err;
}
+ /* Couple the dependency tree for PI on this exposed to->fence */
if (to->engine->schedule) {
- ret = i915_sched_node_add_dependency(&to->sched,
+ err = i915_sched_node_add_dependency(&to->sched,
&from->sched,
- I915_DEPENDENCY_EXTERNAL);
- if (ret < 0)
- return ret;
+ I915_DEPENDENCY_WEAK);
+ if (err < 0)
+ return err;
}
- if (to->engine == from->engine)
- ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
- &from->submit,
- I915_FENCE_GFP);
- else
- ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
- if (ret < 0)
- return ret;
-
- return 0;
+ return intel_timeline_sync_set_start(i915_request_timeline(to),
+ &from->fence);
}
static void mark_external(struct i915_request *rq)
@@ -1105,23 +1237,20 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
}
int
-i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
+i915_request_await_execution(struct i915_request *rq,
+ struct dma_fence *fence,
+ void (*hook)(struct i915_request *rq,
+ struct dma_fence *signal))
{
struct dma_fence **child = &fence;
unsigned int nchild = 1;
int ret;
- /*
- * Note that if the fence-array was created in signal-on-any mode,
- * we should *not* decompose it into its individual fences. However,
- * we don't currently store which mode the fence-array is operating
- * in. Fortunately, the only user of signal-on-any is private to
- * amdgpu and we should not see any incoming fence-array from
- * sync-file being in signal-on-any mode.
- */
if (dma_fence_is_array(fence)) {
struct dma_fence_array *array = to_dma_fence_array(fence);
+ /* XXX Error for signal-on-any fence arrays */
+
child = array->fences;
nchild = array->num_fences;
GEM_BUG_ON(!nchild);
@@ -1134,138 +1263,95 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
continue;
}
- /*
- * Requests on the same timeline are explicitly ordered, along
- * with their dependencies, by i915_request_add() which ensures
- * that requests are submitted in-order through each ring.
- */
if (fence->context == rq->fence.context)
continue;
- /* Squash repeated waits to the same timelines */
- if (fence->context &&
- intel_timeline_sync_is_later(i915_request_timeline(rq),
- fence))
- continue;
+ /*
+ * We don't squash repeated fence dependencies here as we
+ * want to run our callback in all cases.
+ */
if (dma_fence_is_i915(fence))
- ret = i915_request_await_request(rq, to_request(fence));
+ ret = __i915_request_await_execution(rq,
+ to_request(fence),
+ hook);
else
ret = i915_request_await_external(rq, fence);
if (ret < 0)
return ret;
-
- /* Record the latest fence used against each timeline */
- if (fence->context)
- intel_timeline_sync_set(i915_request_timeline(rq),
- fence);
} while (--nchild);
return 0;
}
-static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
- struct dma_fence *fence)
-{
- return __intel_timeline_sync_is_later(tl,
- fence->context,
- fence->seqno - 1);
-}
-
-static int intel_timeline_sync_set_start(struct intel_timeline *tl,
- const struct dma_fence *fence)
+static int
+await_request_submit(struct i915_request *to, struct i915_request *from)
{
- return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
+ /*
+ * If we are waiting on a virtual engine, then it may be
+ * constrained to execute on a single engine *prior* to submission.
+ * When it is submitted, it will be first submitted to the virtual
+ * engine and then passed to the physical engine. We cannot allow
+ * the waiter to be submitted immediately to the physical engine
+ * as it may then bypass the virtual request.
+ */
+ if (to->engine == READ_ONCE(from->engine))
+ return i915_sw_fence_await_sw_fence_gfp(&to->submit,
+ &from->submit,
+ I915_FENCE_GFP);
+ else
+ return __i915_request_await_execution(to, from, NULL);
}
static int
-__i915_request_await_execution(struct i915_request *to,
- struct i915_request *from,
- void (*hook)(struct i915_request *rq,
- struct dma_fence *signal))
+i915_request_await_request(struct i915_request *to, struct i915_request *from)
{
- int err;
-
- GEM_BUG_ON(intel_context_is_barrier(from->context));
+ int ret;
- /* Submit both requests at the same time */
- err = __await_execution(to, from, hook, I915_FENCE_GFP);
- if (err)
- return err;
+ GEM_BUG_ON(to == from);
+ GEM_BUG_ON(to->timeline == from->timeline);
- /* Squash repeated depenendices to the same timelines */
- if (intel_timeline_sync_has_start(i915_request_timeline(to),
- &from->fence))
+ if (i915_request_completed(from)) {
+ i915_sw_fence_set_error_once(&to->submit, from->fence.error);
return 0;
-
- /*
- * Wait until the start of this request.
- *
- * The execution cb fires when we submit the request to HW. But in
- * many cases this may be long before the request itself is ready to
- * run (consider that we submit 2 requests for the same context, where
- * the request of interest is behind an indefinite spinner). So we hook
- * up to both to reduce our queues and keep the execution lag minimised
- * in the worst case, though we hope that the await_start is elided.
- */
- err = i915_request_await_start(to, from);
- if (err < 0)
- return err;
-
- /*
- * Ensure both start together [after all semaphores in signal]
- *
- * Now that we are queued to the HW at roughly the same time (thanks
- * to the execute cb) and are ready to run at roughly the same time
- * (thanks to the await start), our signaler may still be indefinitely
- * delayed by waiting on a semaphore from a remote engine. If our
- * signaler depends on a semaphore, so indirectly do we, and we do not
- * want to start our payload until our signaler also starts theirs.
- * So we wait.
- *
- * However, there is also a second condition for which we need to wait
- * for the precise start of the signaler. Consider that the signaler
- * was submitted in a chain of requests following another context
- * (with just an ordinary intra-engine fence dependency between the
- * two). In this case the signaler is queued to HW, but not for
- * immediate execution, and so we must wait until it reaches the
- * active slot.
- */
- if (intel_engine_has_semaphores(to->engine) &&
- !i915_request_has_initial_breadcrumb(to)) {
- err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
- if (err < 0)
- return err;
}
- /* Couple the dependency tree for PI on this exposed to->fence */
if (to->engine->schedule) {
- err = i915_sched_node_add_dependency(&to->sched,
+ ret = i915_sched_node_add_dependency(&to->sched,
&from->sched,
- I915_DEPENDENCY_WEAK);
- if (err < 0)
- return err;
+ I915_DEPENDENCY_EXTERNAL);
+ if (ret < 0)
+ return ret;
}
- return intel_timeline_sync_set_start(i915_request_timeline(to),
- &from->fence);
+ if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
+ ret = await_request_submit(to, from);
+ else
+ ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
int
-i915_request_await_execution(struct i915_request *rq,
- struct dma_fence *fence,
- void (*hook)(struct i915_request *rq,
- struct dma_fence *signal))
+i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
{
struct dma_fence **child = &fence;
unsigned int nchild = 1;
int ret;
+ /*
+ * Note that if the fence-array was created in signal-on-any mode,
+ * we should *not* decompose it into its individual fences. However,
+ * we don't currently store which mode the fence-array is operating
+ * in. Fortunately, the only user of signal-on-any is private to
+ * amdgpu and we should not see any incoming fence-array from
+ * sync-file being in signal-on-any mode.
+ */
if (dma_fence_is_array(fence)) {
struct dma_fence_array *array = to_dma_fence_array(fence);
- /* XXX Error for signal-on-any fence arrays */
-
child = array->fences;
nchild = array->num_fences;
GEM_BUG_ON(!nchild);
@@ -1278,22 +1364,31 @@ i915_request_await_execution(struct i915_request *rq,
continue;
}
+ /*
+ * Requests on the same timeline are explicitly ordered, along
+ * with their dependencies, by i915_request_add() which ensures
+ * that requests are submitted in-order through each ring.
+ */
if (fence->context == rq->fence.context)
continue;
- /*
- * We don't squash repeated fence dependencies here as we
- * want to run our callback in all cases.
- */
+ /* Squash repeated waits to the same timelines */
+ if (fence->context &&
+ intel_timeline_sync_is_later(i915_request_timeline(rq),
+ fence))
+ continue;
if (dma_fence_is_i915(fence))
- ret = __i915_request_await_execution(rq,
- to_request(fence),
- hook);
+ ret = i915_request_await_request(rq, to_request(fence));
else
ret = i915_request_await_external(rq, fence);
if (ret < 0)
return ret;
+
+ /* Record the latest fence used against each timeline */
+ if (fence->context)
+ intel_timeline_sync_set(i915_request_timeline(rq),
+ fence);
} while (--nchild);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index f4ea318781f0..cbb880b10c65 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -209,14 +209,6 @@ static void kick_submission(struct intel_engine_cs *engine,
if (!inflight)
goto unlock;
- ENGINE_TRACE(engine,
- "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
- prio,
- rq->fence.context, rq->fence.seqno,
- inflight->fence.context, inflight->fence.seqno,
- inflight->sched.attr.priority);
- engine->execlists.queue_priority_hint = prio;
-
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
@@ -224,6 +216,14 @@ static void kick_submission(struct intel_engine_cs *engine,
if (inflight->context == rq->context)
goto unlock;
+ ENGINE_TRACE(engine,
+ "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
+ prio,
+ rq->fence.context, rq->fence.seqno,
+ inflight->fence.context, inflight->fence.seqno,
+ inflight->sched.attr.priority);
+
+ engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index fa5ffc4fe823..c420f5a3d33b 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -11,6 +11,7 @@ config DRM_MEDIATEK
select DRM_MIPI_DSI
select DRM_PANEL
select MEMORY
+ select MTK_MMSYS
select MTK_SMI
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 6fb0d6983a4a..3ae9c810845b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -119,7 +119,10 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_color_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 891d80c73e04..28651bc579bc 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -386,7 +386,10 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_ovl_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 0cb848d64206..e04319fedf46 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -294,7 +294,10 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_rdma_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 945c3ac92998..d4f0fb7ad312 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -739,21 +739,27 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dpi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dpi->engine_clk)) {
ret = PTR_ERR(dpi->engine_clk);
- dev_err(dev, "Failed to get engine clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get engine clock: %d\n", ret);
+
return ret;
}
dpi->pixel_clk = devm_clk_get(dev, "pixel");
if (IS_ERR(dpi->pixel_clk)) {
ret = PTR_ERR(dpi->pixel_clk);
- dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+
return ret;
}
dpi->tvd_clk = devm_clk_get(dev, "pll");
if (IS_ERR(dpi->tvd_clk)) {
ret = PTR_ERR(dpi->tvd_clk);
- dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index fe85e487e477..fe46c4bac64d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -28,7 +29,7 @@
* @enabled: records whether crtc_enable succeeded
* @planes: array of 4 drm_plane structures, one for each overlay plane
* @pending_planes: whether any plane has pending changes to be applied
- * @config_regs: memory mapped mmsys configuration register space
+ * @mmsys_dev: pointer to the mmsys device for configuration registers
* @mutex: handle to one of the ten disp_mutex streams
* @ddp_comp_nr: number of components in ddp_comp
* @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
@@ -50,7 +51,7 @@ struct mtk_drm_crtc {
u32 cmdq_event;
#endif
- void __iomem *config_regs;
+ struct device *mmsys_dev;
struct mtk_disp_mutex *mutex;
unsigned int ddp_comp_nr;
struct mtk_ddp_comp **ddp_comp;
@@ -300,9 +301,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
- mtk_ddp_add_comp_to_path(mtk_crtc->config_regs,
- mtk_crtc->ddp_comp[i]->id,
- mtk_crtc->ddp_comp[i + 1]->id);
+ mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
+ mtk_crtc->ddp_comp[i]->id,
+ mtk_crtc->ddp_comp[i + 1]->id);
mtk_disp_mutex_add_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
@@ -360,9 +361,9 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
mtk_crtc->ddp_comp[i]->id);
mtk_disp_mutex_disable(mtk_crtc->mutex);
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
- mtk_ddp_remove_comp_from_path(mtk_crtc->config_regs,
- mtk_crtc->ddp_comp[i]->id,
- mtk_crtc->ddp_comp[i + 1]->id);
+ mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
+ mtk_crtc->ddp_comp[i]->id,
+ mtk_crtc->ddp_comp[i + 1]->id);
mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
@@ -766,7 +767,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (!mtk_crtc)
return -ENOMEM;
- mtk_crtc->config_regs = priv->config_regs;
+ mtk_crtc->mmsys_dev = priv->mmsys_dev;
mtk_crtc->ddp_comp_nr = path_len;
mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
sizeof(*mtk_crtc->ddp_comp),
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 13035c906035..014c1bbe1df2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -13,26 +13,6 @@
#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
-#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
-#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
-#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
-#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
-#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
-#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
-#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
-#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
-#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
-#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
-#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
-#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
-#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
-#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
-
-#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
-#define DISP_REG_CONFIG_OUT_SEL 0x04c
-#define DISP_REG_CONFIG_DSI_SEL 0x050
-#define DISP_REG_CONFIG_DPI_SEL 0x064
-
#define MT2701_DISP_MUTEX0_MOD0 0x2c
#define MT2701_DISP_MUTEX0_SOF0 0x30
@@ -94,48 +74,6 @@
#define MUTEX_SOF_DSI2 5
#define MUTEX_SOF_DSI3 6
-#define OVL0_MOUT_EN_COLOR0 0x1
-#define OD_MOUT_EN_RDMA0 0x1
-#define OD1_MOUT_EN_RDMA1 BIT(16)
-#define UFOE_MOUT_EN_DSI0 0x1
-#define COLOR0_SEL_IN_OVL0 0x1
-#define OVL1_MOUT_EN_COLOR1 0x1
-#define GAMMA_MOUT_EN_RDMA1 0x1
-#define RDMA0_SOUT_DPI0 0x2
-#define RDMA0_SOUT_DPI1 0x3
-#define RDMA0_SOUT_DSI1 0x1
-#define RDMA0_SOUT_DSI2 0x4
-#define RDMA0_SOUT_DSI3 0x5
-#define RDMA1_SOUT_DPI0 0x2
-#define RDMA1_SOUT_DPI1 0x3
-#define RDMA1_SOUT_DSI1 0x1
-#define RDMA1_SOUT_DSI2 0x4
-#define RDMA1_SOUT_DSI3 0x5
-#define RDMA2_SOUT_DPI0 0x2
-#define RDMA2_SOUT_DPI1 0x3
-#define RDMA2_SOUT_DSI1 0x1
-#define RDMA2_SOUT_DSI2 0x4
-#define RDMA2_SOUT_DSI3 0x5
-#define DPI0_SEL_IN_RDMA1 0x1
-#define DPI0_SEL_IN_RDMA2 0x3
-#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
-#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
-#define DSI0_SEL_IN_RDMA1 0x1
-#define DSI0_SEL_IN_RDMA2 0x4
-#define DSI1_SEL_IN_RDMA1 0x1
-#define DSI1_SEL_IN_RDMA2 0x4
-#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
-#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
-#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
-#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
-#define COLOR1_SEL_IN_OVL1 0x1
-
-#define OVL_MOUT_EN_RDMA 0x1
-#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
-#define BLS_TO_DPI_RDMA1_TO_DSI 0x2
-#define DSI_SEL_IN_BLS 0x0
-#define DPI_SEL_IN_BLS 0x0
-#define DSI_SEL_IN_RDMA 0x1
struct mtk_disp_mutex {
int id;
@@ -246,200 +184,6 @@ static const struct mtk_ddp_data mt8173_ddp_driver_data = {
.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
};
-static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next,
- unsigned int *addr)
-{
- unsigned int value;
-
- if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
- *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
- value = OVL0_MOUT_EN_COLOR0;
- } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
- *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
- value = OVL_MOUT_EN_RDMA;
- } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
- *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
- value = OD_MOUT_EN_RDMA0;
- } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DISP_UFOE_MOUT_EN;
- value = UFOE_MOUT_EN_DSI0;
- } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
- *addr = DISP_REG_CONFIG_DISP_OVL1_MOUT_EN;
- value = OVL1_MOUT_EN_COLOR1;
- } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
- *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
- value = GAMMA_MOUT_EN_RDMA1;
- } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
- *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
- value = OD1_MOUT_EN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI3;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI3;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI3;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next,
- unsigned int *addr)
-{
- unsigned int value;
-
- if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
- *addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN;
- value = COLOR0_SEL_IN_OVL0;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI0_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI1_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI0_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI1_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI2_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI3_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI0_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI0_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI2_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI3_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
- *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
- value = COLOR1_SEL_IN_OVL1;
- } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSI_SEL;
- value = DSI_SEL_IN_BLS;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static void mtk_ddp_sout_sel(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
- writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
- config_regs + DISP_REG_CONFIG_OUT_SEL);
- } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DPI0) {
- writel_relaxed(BLS_TO_DPI_RDMA1_TO_DSI,
- config_regs + DISP_REG_CONFIG_OUT_SEL);
- writel_relaxed(DSI_SEL_IN_RDMA,
- config_regs + DISP_REG_CONFIG_DSI_SEL);
- writel_relaxed(DPI_SEL_IN_BLS,
- config_regs + DISP_REG_CONFIG_DPI_SEL);
- }
-}
-
-void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- unsigned int addr, value, reg;
-
- value = mtk_ddp_mout_en(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) | value;
- writel_relaxed(reg, config_regs + addr);
- }
-
- mtk_ddp_sout_sel(config_regs, cur, next);
-
- value = mtk_ddp_sel_in(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) | value;
- writel_relaxed(reg, config_regs + addr);
- }
-}
-
-void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- unsigned int addr, value, reg;
-
- value = mtk_ddp_mout_en(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) & ~value;
- writel_relaxed(reg, config_regs + addr);
- }
-
- value = mtk_ddp_sel_in(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) & ~value;
- writel_relaxed(reg, config_regs + addr);
- }
-}
-
struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id)
{
struct mtk_ddp *ddp = dev_get_drvdata(dev);
@@ -628,7 +372,8 @@ static int mtk_ddp_probe(struct platform_device *pdev)
if (!ddp->data->no_clk) {
ddp->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ddp->clk)) {
- dev_err(dev, "Failed to get clock\n");
+ if (PTR_ERR(ddp->clk) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clock\n");
return PTR_ERR(ddp->clk);
}
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
index 827be424a148..6b691a57be4a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
@@ -12,13 +12,6 @@ struct regmap;
struct device;
struct mtk_disp_mutex;
-void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next);
-void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next);
-
struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id);
int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex);
void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index ce570283b55f..6bd369434d9d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -10,6 +10,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/dma-mapping.h>
#include <drm/drm_atomic.h>
@@ -418,11 +419,22 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
{ }
};
+static const struct of_device_id mtk_drm_of_ids[] = {
+ { .compatible = "mediatek,mt2701-mmsys",
+ .data = &mt2701_mmsys_driver_data},
+ { .compatible = "mediatek,mt2712-mmsys",
+ .data = &mt2712_mmsys_driver_data},
+ { .compatible = "mediatek,mt8173-mmsys",
+ .data = &mt8173_mmsys_driver_data},
+ { }
+};
+
static int mtk_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *phandle = dev->parent->of_node;
+ const struct of_device_id *of_id;
struct mtk_drm_private *private;
- struct resource *mem;
struct device_node *node;
struct component_match *match = NULL;
int ret;
@@ -433,18 +445,20 @@ static int mtk_drm_probe(struct platform_device *pdev)
return -ENOMEM;
private->data = of_device_get_match_data(dev);
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- private->config_regs = devm_ioremap_resource(dev, mem);
- if (IS_ERR(private->config_regs)) {
- ret = PTR_ERR(private->config_regs);
- dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
- ret);
- return ret;
+ private->mmsys_dev = dev->parent;
+ if (!private->mmsys_dev) {
+ dev_err(dev, "Failed to get MMSYS device\n");
+ return -ENODEV;
}
+ of_id = of_match_node(mtk_drm_of_ids, phandle);
+ if (!of_id)
+ return -ENODEV;
+
+ private->data = of_id->data;
+
/* Iterate over sibling DISP function blocks */
- for_each_child_of_node(dev->of_node->parent, node) {
+ for_each_child_of_node(phandle->parent, node) {
const struct of_device_id *of_id;
enum mtk_ddp_comp_type comp_type;
int comp_id;
@@ -578,22 +592,11 @@ static int mtk_drm_sys_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
mtk_drm_sys_resume);
-static const struct of_device_id mtk_drm_of_ids[] = {
- { .compatible = "mediatek,mt2701-mmsys",
- .data = &mt2701_mmsys_driver_data},
- { .compatible = "mediatek,mt2712-mmsys",
- .data = &mt2712_mmsys_driver_data},
- { .compatible = "mediatek,mt8173-mmsys",
- .data = &mt8173_mmsys_driver_data},
- { }
-};
-
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
.remove = mtk_drm_remove,
.driver = {
.name = "mediatek-drm",
- .of_match_table = mtk_drm_of_ids,
.pm = &mtk_drm_pm_ops,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 17bc99b9f5d4..b5be63e53176 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -39,7 +39,7 @@ struct mtk_drm_private {
struct device_node *mutex_node;
struct device *mutex_dev;
- void __iomem *config_regs;
+ struct device *mmsys_dev;
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
const struct mtk_mmsys_driver_data *data;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index a9a25087112f..270bf22c98fe 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1186,14 +1186,18 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
ret = PTR_ERR(dsi->engine_clk);
- dev_err(dev, "Failed to get engine clock: %d\n", ret);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get engine clock: %d\n", ret);
goto err_unregister_host;
}
dsi->digital_clk = devm_clk_get(dev, "digital");
if (IS_ERR(dsi->digital_clk)) {
ret = PTR_ERR(dsi->digital_clk);
- dev_err(dev, "Failed to get digital clock: %d\n", ret);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get digital clock: %d\n", ret);
goto err_unregister_host;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 7bc086ec74f7..5feb760617cb 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1470,7 +1470,9 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
ret = mtk_hdmi_get_all_clk(hdmi, np);
if (ret) {
- dev_err(dev, "Failed to get clocks: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clocks: %d\n", ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 1579cf0d828f..42f8aae28b31 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -65,6 +65,7 @@ msm-y := \
disp/dpu1/dpu_hw_lm.o \
disp/dpu1/dpu_hw_pingpong.o \
disp/dpu1/dpu_hw_sspp.o \
+ disp/dpu1/dpu_hw_dspp.o \
disp/dpu1/dpu_hw_top.o \
disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 1f83bc18d500..60f6472a3e58 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -401,6 +401,21 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
return state;
}
+static struct msm_gem_address_space *
+a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu);
+ struct msm_gem_address_space *aspace;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
+ SZ_16M + 0xfff * SZ_64K);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
/* Register offset defines for A2XX - copy of A3XX */
static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
@@ -429,6 +444,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a2xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = a2xx_create_address_space,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b67f88872726..0a5ea9f56cb8 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -441,6 +441,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 253d8d85daad..b9b26b2bf9c5 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -66,19 +66,22 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
}
}
- for (i = 0; i < 4; i++) {
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
- 0x00000922);
- }
+ /* No CCU for A405 */
+ if (!adreno_is_a405(adreno_gpu)) {
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
+ 0x00000922);
+ }
- for (i = 0; i < 4; i++) {
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
- 0x00000000);
- }
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
+ 0x00000000);
+ }
- for (i = 0; i < 4; i++) {
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
- 0x00000001);
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
+ 0x00000001);
+ }
}
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
@@ -137,7 +140,9 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
uint32_t *ptr, len;
int i, ret;
- if (adreno_is_a420(adreno_gpu)) {
+ if (adreno_is_a405(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else if (adreno_is_a420(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
@@ -440,6 +445,52 @@ static const unsigned int a4xx_registers[] = {
~0 /* sentinel */
};
+static const unsigned int a405_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
+ 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
+ 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
+ /* CP */
+ 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
+ 0x0578, 0x058F,
+ /* VSC */
+ 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
+ /* GRAS */
+ 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
+ /* RB */
+ 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
+ /* PC */
+ 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ /* VFD */
+ 0x0E40, 0x0E4A,
+ /* VPC */
+ 0x0E60, 0x0E61, 0x0E63, 0x0E68,
+ /* UCHE */
+ 0x0E80, 0x0E84, 0x0E88, 0x0E95,
+ /* GRAS CTX 0 */
+ 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
+ /* PC CTX 0 */
+ 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
+ /* VFD CTX 0 */
+ 0x2200, 0x2204, 0x2208, 0x22A9,
+ /* GRAS CTX 1 */
+ 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
+ /* PC CTX 1 */
+ 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
+ /* VFD CTX 1 */
+ 0x2600, 0x2604, 0x2608, 0x26A9,
+ /* VBIF version 0x20050000*/
+ 0x3000, 0x3007, 0x302C, 0x302C, 0x3030, 0x3030, 0x3034, 0x3036,
+ 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040, 0x3049, 0x3049,
+ 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068, 0x306C, 0x306D,
+ 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094, 0x3098, 0x3098,
+ 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8, 0x30D0, 0x30D0,
+ 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100, 0x3108, 0x3108,
+ 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, 0x3124, 0x3125,
+ 0x3129, 0x3129, 0x340C, 0x340C, 0x3410, 0x3410,
+ ~0 /* sentinel */
+};
+
static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
{
struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -532,6 +583,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a4xx_get_timestamp,
};
@@ -563,13 +615,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
gpu->perfcntrs = NULL;
gpu->num_perfcntrs = 0;
- adreno_gpu->registers = a4xx_registers;
- adreno_gpu->reg_offsets = a4xx_register_offsets;
-
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
+ adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
+ a4xx_registers;
+ adreno_gpu->reg_offsets = a4xx_register_offsets;
+
/* if needed, allocate gmem: */
if (adreno_is_a4xx(adreno_gpu)) {
ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 724024a2243a..d95970a73fb4 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1404,6 +1404,10 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
{
u64 busy_cycles, busy_time;
+ /* Only read the gpu busy if the hardware is already active */
+ if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0)
+ return 0;
+
busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
@@ -1412,6 +1416,8 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
gpu->devfreq.busy_cycles = busy_cycles;
+ pm_runtime_put(&gpu->pdev->dev);
+
if (WARN_ON(busy_time > ~0LU))
return ~0LU;
@@ -1439,6 +1445,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a5xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index ed78fee2a262..47840b73cdda 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -1047,6 +1047,8 @@ enum a6xx_tex_type {
#define REG_A6XX_CP_MISC_CNTL 0x00000840
+#define REG_A6XX_CP_APRIV_CNTL 0x00000844
+
#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
@@ -1764,6 +1766,8 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
+#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
+
#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
@@ -2418,6 +2422,16 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
+
#define REG_A6XX_TPL1_PERFCTR_TP_SEL_0 0x0000b610
#define REG_A6XX_TPL1_PERFCTR_TP_SEL_1 0x0000b611
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index c4e71abbdd53..096be97ce9f9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,14 +2,16 @@
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
-#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
+#include <drm/drm_gem.h>
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
{
@@ -127,8 +129,6 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
if (ret)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
- gmu->freq = gmu->gpu_freqs[index];
-
/*
* Eventually we will want to scale the path vote with the frequency but
* for now leave it at max so that the performance is nominal.
@@ -151,8 +151,21 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
break;
gmu->current_perf_index = perf_index;
+ gmu->freq = gmu->gpu_freqs[perf_index];
+
+ /*
+ * This can get called from devfreq while the hardware is idle. Don't
+ * bring up the power if it isn't already active
+ */
+ if (pm_runtime_get_if_in_use(gmu->dev) == 0)
+ return;
- __a6xx_gmu_set_freq(gmu, perf_index);
+ if (gmu->legacy)
+ __a6xx_gmu_set_freq(gmu, perf_index);
+ else
+ a6xx_hfi_set_freq(gmu, perf_index);
+
+ pm_runtime_put(gmu->dev);
}
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
@@ -196,6 +209,12 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
u32 val;
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+
+ /* Set the log wptr index
+ * note: downstream saves the value in poweroff and restores it here
+ */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
+
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
@@ -232,8 +251,13 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
switch (state) {
case GMU_OOB_GPU_SET:
- request = GMU_OOB_GPU_SET_REQUEST;
- ack = GMU_OOB_GPU_SET_ACK;
+ if (gmu->legacy) {
+ request = GMU_OOB_GPU_SET_REQUEST;
+ ack = GMU_OOB_GPU_SET_ACK;
+ } else {
+ request = GMU_OOB_GPU_SET_REQUEST_NEW;
+ ack = GMU_OOB_GPU_SET_ACK_NEW;
+ }
name = "GPU_SET";
break;
case GMU_OOB_BOOT_SLUMBER:
@@ -272,6 +296,13 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
/* Clear a pending OOB state in the GMU */
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
+ if (!gmu->legacy) {
+ WARN_ON(state != GMU_OOB_GPU_SET);
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_GPU_SET_CLEAR_NEW);
+ return;
+ }
+
switch (state) {
case GMU_OOB_GPU_SET:
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
@@ -294,6 +325,9 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
int ret;
u32 val;
+ if (!gmu->legacy)
+ return 0;
+
gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
@@ -313,6 +347,9 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
u32 val;
int ret;
+ if (!gmu->legacy)
+ return;
+
/* Make sure retention is on */
gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
@@ -356,6 +393,11 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
a6xx_sptprac_disable(gmu);
+ if (!gmu->legacy) {
+ ret = a6xx_hfi_send_prep_slumber(gmu);
+ goto out;
+ }
+
/* Tell the GMU to get ready to slumber */
gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
@@ -371,6 +413,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
}
}
+out:
/* Put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
return ret;
@@ -392,7 +435,7 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
return ret;
}
- ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
+ ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
!val, 100, 10000);
if (ret) {
@@ -418,7 +461,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
- ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+ ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
val, val & (1 << 16), 100, 10000);
if (ret)
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
@@ -441,32 +484,48 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+ uint32_t pdc_address_offset;
if (!pdcptr || !seqptr)
goto err;
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu))
+ pdc_address_offset = 0x30090;
+ else if (adreno_is_a650(adreno_gpu))
+ pdc_address_offset = 0x300a0;
+ else
+ pdc_address_offset = 0x30080;
+
/* Disable SDE clock gating */
- gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+ gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
/* Setup RSC PDC handshake for sleep and wakeup */
- gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
- gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
- gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
- gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
- gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
/* Load RSC sequencer uCode for sleep and wakeup */
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+ if (adreno_is_a650(adreno_gpu)) {
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
+ } else {
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+ }
/* Load PDC sequencer uCode for power up and power down sequence */
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
@@ -487,10 +546,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
- if (adreno_is_a618(adreno_gpu))
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090);
- else
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
@@ -502,17 +558,12 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
- if (adreno_is_a618(adreno_gpu))
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a650(adreno_gpu))
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
else
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
-
-
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
- if (adreno_is_a618(adreno_gpu))
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090);
- else
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
@@ -542,6 +593,8 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
{
/* Disable GMU WB/RB buffer */
gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
+ gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
+ gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
@@ -571,14 +624,95 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
}
+struct block_header {
+ u32 addr;
+ u32 size;
+ u32 type;
+ u32 value;
+ u32 data[];
+};
+
+/* this should be a general kernel helper */
+static int in_range(u32 addr, u32 start, u32 size)
+{
+ return addr >= start && addr < start + size;
+}
+
+static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
+{
+ if (!in_range(blk->addr, bo->iova, bo->size))
+ return false;
+
+ memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
+ return true;
+}
+
+static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
+ const struct block_header *blk;
+ u32 reg_offset;
+
+ u32 itcm_base = 0x00000000;
+ u32 dtcm_base = 0x00040000;
+
+ if (adreno_is_a650(adreno_gpu))
+ dtcm_base = 0x10004000;
+
+ if (gmu->legacy) {
+ /* Sanity check the size of the firmware that was loaded */
+ if (fw_image->size > 0x8000) {
+ DRM_DEV_ERROR(gmu->dev,
+ "GMU firmware is bigger than the available region\n");
+ return -EINVAL;
+ }
+
+ gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
+ (u32*) fw_image->data, fw_image->size);
+ return 0;
+ }
+
+
+ for (blk = (const struct block_header *) fw_image->data;
+ (const u8*) blk < fw_image->data + fw_image->size;
+ blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
+ if (blk->size == 0)
+ continue;
+
+ if (in_range(blk->addr, itcm_base, SZ_16K)) {
+ reg_offset = (blk->addr - itcm_base) >> 2;
+ gmu_write_bulk(gmu,
+ REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
+ blk->data, blk->size);
+ } else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
+ reg_offset = (blk->addr - dtcm_base) >> 2;
+ gmu_write_bulk(gmu,
+ REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
+ blk->data, blk->size);
+ } else if (!fw_block_mem(&gmu->icache, blk) &&
+ !fw_block_mem(&gmu->dcache, blk) &&
+ !fw_block_mem(&gmu->dummy, blk)) {
+ DRM_DEV_ERROR(gmu->dev,
+ "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
+ blk->addr, blk->size, blk->data[0]);
+ }
+ }
+
+ return 0;
+}
+
static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
static bool rpmh_init;
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
- int i, ret;
+ int ret;
u32 chipid;
- u32 *image;
+
+ if (adreno_is_a650(adreno_gpu))
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
if (state == GMU_WARM_BOOT) {
ret = a6xx_rpmh_start(gmu);
@@ -589,13 +723,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
"GMU firmware is not loaded\n"))
return -ENOENT;
- /* Sanity check the size of the firmware that was loaded */
- if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
- DRM_DEV_ERROR(gmu->dev,
- "GMU firmware is bigger than the available region\n");
- return -EINVAL;
- }
-
/* Turn on register retention */
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
@@ -609,18 +736,16 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
return ret;
}
- image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
-
- for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
- gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
- image[i]);
+ ret = a6xx_gmu_fw_load(gmu);
+ if (ret)
+ return ret;
}
gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
/* Write the iova of the HFI table */
- gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
@@ -633,6 +758,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
+ gmu->log.iova | (gmu->log.size / SZ_4K - 1));
+
/* Set up the lowest idle level on the GMU */
a6xx_gmu_power_config(gmu);
@@ -640,9 +768,11 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
if (ret)
return ret;
- ret = a6xx_gmu_gfx_rail_on(gmu);
- if (ret)
- return ret;
+ if (gmu->legacy) {
+ ret = a6xx_gmu_gfx_rail_on(gmu);
+ if (ret)
+ return ret;
+ }
/* Enable SPTP_PC if the CPU is responsible for it */
if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
@@ -683,13 +813,13 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
u32 val;
/* Make sure there are no outstanding RPMh votes */
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
(val & 1), 100, 10000);
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
(val & 1), 100, 10000);
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
(val & 1), 100, 10000);
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
(val & 1), 100, 1000);
}
@@ -744,6 +874,13 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
GMU_WARM_BOOT : GMU_COLD_BOOT;
+ /*
+ * Warm boot path does not work on newer GPUs
+ * Presumably this is because icache/dcache regions must be restored
+ */
+ if (!gmu->legacy)
+ status = GMU_COLD_BOOT;
+
ret = a6xx_gmu_fw_start(gmu, status);
if (ret)
goto out;
@@ -761,7 +898,10 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
enable_irq(gmu->hfi_irq);
/* Set the GPU to the current freq */
- __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
+ if (gmu->legacy)
+ __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
+ else
+ a6xx_hfi_set_freq(gmu, gmu->current_perf_index);
/*
* "enable" the GX power domain which won't actually do anything but it
@@ -919,34 +1059,75 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
return 0;
}
-static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
+static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
{
- if (IS_ERR_OR_NULL(bo))
- return;
-
- dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
- kfree(bo);
+ msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false);
+
+ gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
+ msm_gem_address_space_put(gmu->aspace);
}
-static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
- size_t size)
+static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
+ size_t size, u64 iova)
{
- struct a6xx_gmu_bo *bo;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct drm_device *dev = a6xx_gpu->base.base.dev;
+ uint32_t flags = MSM_BO_WC;
+ u64 range_start, range_end;
+ int ret;
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
+ size = PAGE_ALIGN(size);
+ if (!iova) {
+ /* no fixed address - use GMU's uncached range */
+ range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
+ range_end = 0x80000000;
+ } else {
+ /* range for fixed address */
+ range_start = iova;
+ range_end = iova + size;
+ /* use IOMMU_PRIV for icache/dcache */
+ flags |= MSM_BO_MAP_PRIV;
+ }
- bo->size = PAGE_ALIGN(size);
+ bo->obj = msm_gem_new(dev, size, flags);
+ if (IS_ERR(bo->obj))
+ return PTR_ERR(bo->obj);
- bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
+ ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
+ range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT);
+ if (ret) {
+ drm_gem_object_put(bo->obj);
+ return ret;
+ }
+
+ bo->virt = msm_gem_get_vaddr(bo->obj);
+ bo->size = size;
+
+ return 0;
+}
+
+static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+{
+ struct iommu_domain *domain;
+ struct msm_mmu *mmu;
- if (!bo->virt) {
- kfree(bo);
- return ERR_PTR(-ENOMEM);
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return -ENODEV;
+
+ mmu = msm_iommu_new(gmu->dev, domain);
+ gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
+ if (IS_ERR(gmu->aspace)) {
+ iommu_domain_free(domain);
+ return PTR_ERR(gmu->aspace);
}
- return bo;
+ return 0;
}
/* Return the 'arc-level' for the given frequency */
@@ -1011,8 +1192,8 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
if (j == pri_count) {
DRM_DEV_ERROR(dev,
- "Level %u not found in in the RPMh list\n",
- level);
+ "Level %u not found in the RPMh list\n",
+ level);
DRM_DEV_ERROR(dev, "Available levels:\n");
for (j = 0; j < pri_count; j++)
DRM_DEV_ERROR(dev, " %u\n", pri[j]);
@@ -1190,6 +1371,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct platform_device *pdev = to_platform_device(gmu->dev);
if (!gmu->initialized)
return;
@@ -1202,9 +1384,12 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
}
iounmap(gmu->mmio);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
+ iounmap(gmu->rscc);
gmu->mmio = NULL;
+ gmu->rscc = NULL;
- a6xx_gmu_memory_free(gmu, gmu->hfi);
+ a6xx_gmu_memory_free(gmu);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
@@ -1217,6 +1402,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = of_find_device_by_node(node);
int ret;
@@ -1226,15 +1412,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- /* Pass force_dma false to require the DT to set the dma region */
- ret = of_dma_configure(gmu->dev, node, false);
- if (ret)
- return ret;
-
- /* Set the mask after the of_dma_configure() */
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
- if (ret)
- return ret;
+ of_dma_configure(gmu->dev, node, true);
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1246,20 +1424,64 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
+ ret = a6xx_gmu_memory_probe(gmu);
+ if (ret)
+ goto err_put_device;
+
+ /* Allocate memory for the GMU dummy page */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000);
+ if (ret)
+ goto err_memory;
+
+ if (adreno_is_a650(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
+ SZ_16M - SZ_16K, 0x04000);
+ if (ret)
+ goto err_memory;
+ } else if (adreno_is_a640(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
+ SZ_256K - SZ_16K, 0x04000);
+ if (ret)
+ goto err_memory;
+
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
+ SZ_256K - SZ_16K, 0x44000);
+ if (ret)
+ goto err_memory;
+ } else {
+ /* HFI v1, has sptprac */
+ gmu->legacy = true;
+
+ /* Allocate memory for the GMU debug region */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
+ if (ret)
+ goto err_memory;
+ }
+
/* Allocate memory for for the HFI queues */
- gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
- if (IS_ERR(gmu->hfi))
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
+ if (ret)
goto err_memory;
- /* Allocate memory for the GMU debug region */
- gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
- if (IS_ERR(gmu->debug))
+ /* Allocate memory for the GMU log region */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
+ if (ret)
goto err_memory;
/* Map the GMU registers */
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
- if (IS_ERR(gmu->mmio))
+ if (IS_ERR(gmu->mmio)) {
+ ret = PTR_ERR(gmu->mmio);
goto err_memory;
+ }
+
+ if (adreno_is_a650(adreno_gpu)) {
+ gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
+ if (IS_ERR(gmu->rscc))
+ goto err_mmio;
+ } else {
+ gmu->rscc = gmu->mmio + 0x23000;
+ }
/* Get the HFI and GMU interrupts */
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
@@ -1286,13 +1508,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
err_mmio:
iounmap(gmu->mmio);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
+ iounmap(gmu->rscc);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
-err_memory:
- a6xx_gmu_memory_free(gmu, gmu->hfi);
ret = -ENODEV;
+err_memory:
+ a6xx_gmu_memory_free(gmu);
err_put_device:
/* Drop reference taken in of_find_device_by_node */
put_device(gmu->dev);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 4af65a36d5ca..47df4745db50 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -10,9 +10,10 @@
#include "a6xx_hfi.h"
struct a6xx_gmu_bo {
+ struct drm_gem_object *obj;
void *virt;
size_t size;
- dma_addr_t iova;
+ u64 iova;
};
/*
@@ -43,7 +44,10 @@ struct a6xx_gmu_bo {
struct a6xx_gmu {
struct device *dev;
+ struct msm_gem_address_space *aspace;
+
void * __iomem mmio;
+ void * __iomem rscc;
int hfi_irq;
int gmu_irq;
@@ -52,8 +56,12 @@ struct a6xx_gmu {
int idle_level;
- struct a6xx_gmu_bo *hfi;
- struct a6xx_gmu_bo *debug;
+ struct a6xx_gmu_bo hfi;
+ struct a6xx_gmu_bo debug;
+ struct a6xx_gmu_bo icache;
+ struct a6xx_gmu_bo dcache;
+ struct a6xx_gmu_bo dummy;
+ struct a6xx_gmu_bo log;
int nr_clocks;
struct clk_bulk_data *clocks;
@@ -76,6 +84,7 @@ struct a6xx_gmu {
bool initialized;
bool hung;
+ bool legacy; /* a618 or a630 */
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
@@ -88,6 +97,13 @@ static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
return msm_writel(value, gmu->mmio + (offset << 2));
}
+static inline void
+gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size)
+{
+ memcpy_toio(gmu->mmio + (offset << 2), data, size);
+ wmb();
+}
+
static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
{
u32 val = gmu_read(gmu, reg);
@@ -111,6 +127,15 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
+static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ return msm_writel(value, gmu->rscc + (offset << 2));
+}
+
+#define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gmu)->rscc + ((addr) << 2), val, cond, \
+ interval, timeout)
+
/*
* These are the available OOB (out of band requests) to the GMU where "out of
* band" means that the CPU talks to the GMU directly and not through HFI.
@@ -156,10 +181,16 @@ enum a6xx_gmu_oob_state {
#define GMU_OOB_GPU_SET_ACK 24
#define GMU_OOB_GPU_SET_CLEAR 24
+#define GMU_OOB_GPU_SET_REQUEST_NEW 30
+#define GMU_OOB_GPU_SET_ACK_NEW 31
+#define GMU_OOB_GPU_SET_CLEAR_NEW 31
+
void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index 1cc1c135236b..176ae94d9fe6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -101,6 +101,10 @@ static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
+#define REG_A6XX_GMU_ICACHE_CONFIG 0x00004c00
+
+#define REG_A6XX_GMU_DCACHE_CONFIG 0x00004c01
+
#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
@@ -199,6 +203,12 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
+#define REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF 0x000050f0
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x00005100
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x00005101
+
#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
@@ -330,8 +340,6 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
-#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00008c04
-
#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
@@ -344,39 +352,41 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
-#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00008c08
+#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00000004
+
+#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00000008
-#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00008c09
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00000009
-#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x00008c0a
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x0000000a
-#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x00008c0b
+#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x0000000b
-#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x00008c0d
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x0000000d
-#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x00008c0e
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x0000000e
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00008c82
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00000082
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00008c83
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00000083
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00008c89
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00000089
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x00008c8c
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x0000008c
-#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00008d00
+#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00000100
-#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00008d01
+#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101
-#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00008d80
+#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180
-#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00008f46
+#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346
-#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000090ae
+#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000003ee
-#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00009216
+#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00000496
-#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000937e
+#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000053e
#endif /* A6XX_GMU_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 68af24150de5..a1589e040c57 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -414,7 +414,17 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
a6xx_set_hwcg(gpu, true);
/* VBIF/GBIF start*/
- gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+ if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
+ } else {
+ gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+ }
+
if (adreno_is_a630(adreno_gpu))
gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
@@ -429,25 +439,35 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
- /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
- gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
- REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
+ if (!adreno_is_a650(adreno_gpu)) {
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
- gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
- REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
- 0x00100000 + adreno_gpu->gmem - 1);
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+ }
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
- gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
+ if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
+ else
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
/* Setting the mem pool size */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
/* Setting the primFifo thresholds default values */
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
+ if (adreno_is_a650(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300000);
+ else if (adreno_is_a640(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200000);
+ else
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
/* Set the AHB default slave response to "ERROR" */
gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
@@ -471,6 +491,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+ /* Set weights for bicubic filtering */
+ if (adreno_is_a650(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
+ 0x3fe05ff4);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
+ 0x3fa0ebee);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
+ 0x3f5193ed);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
+ 0x3f0243f0);
+ }
+
/* Protect registers from the CP */
gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
@@ -508,6 +541,11 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
A6XX_PROTECT_RDONLY(0x980, 0x4));
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
+ if (adreno_is_a650(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
+ (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
+ }
+
/* Enable interrupts */
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
@@ -566,8 +604,10 @@ out:
*/
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
- /* Take the GMU out of its special boot mode */
- a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+ if (a6xx_gpu->gmu.legacy) {
+ /* Take the GMU out of its special boot mode */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+ }
return ret;
}
@@ -810,6 +850,11 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
u64 busy_cycles, busy_time;
+
+ /* Only read the gpu busy if the hardware is already active */
+ if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0)
+ return 0;
+
busy_cycles = gmu_read64(&a6xx_gpu->gmu,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
@@ -819,6 +864,8 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
gpu->devfreq.busy_cycles = busy_cycles;
+ pm_runtime_put(a6xx_gpu->gmu.dev);
+
if (WARN_ON(busy_time > ~0LU))
return ~0LU;
@@ -846,6 +893,7 @@ static const struct adreno_gpu_funcs funcs = {
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
#endif
},
.get_timestamp = a6xx_get_timestamp,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index e450e0b97211..9921e632f1ca 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -17,10 +17,14 @@ static const char * const a6xx_hfi_msg_id[] = {
HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_TEST),
+ HFI_MSG_ID(HFI_H2F_MSG_START),
+ HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
+ HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
+ HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
};
-static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
- u32 dwords)
+static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
{
struct a6xx_hfi_queue_header *header = queue->header;
u32 i, hdr, index = header->read_index;
@@ -48,6 +52,9 @@ static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
index = (index + 1) % header->size;
}
+ if (!gmu->legacy)
+ index = ALIGN(index, 4) % header->size;
+
header->read_index = index;
return HFI_HEADER_SIZE(hdr);
}
@@ -73,6 +80,12 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
index = (index + 1) % header->size;
}
+ /* Cookify any non used data at the end of the write buffer */
+ if (!gmu->legacy) {
+ for (; index % 4; index = (index + 1) % header->size)
+ queue->data[index] = 0xfafafafa;
+ }
+
header->write_index = index;
spin_unlock(&queue->lock);
@@ -106,7 +119,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
struct a6xx_hfi_msg_response resp;
/* Get the next packet */
- ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
+ ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
sizeof(resp) >> 2);
/* If the queue is empty our response never made it */
@@ -176,8 +189,8 @@ static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
{
struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
- msg.dbg_buffer_addr = (u32) gmu->debug->iova;
- msg.dbg_buffer_size = (u32) gmu->debug->size;
+ msg.dbg_buffer_addr = (u32) gmu->debug.iova;
+ msg.dbg_buffer_size = (u32) gmu->debug.size;
msg.boot_state = boot_state;
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
@@ -195,6 +208,28 @@ static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
version, sizeof(*version));
}
+static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
+ int i;
+
+ msg.num_gpu_levels = gmu->nr_gpu_freqs;
+ msg.num_gmu_levels = gmu->nr_gmu_freqs;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
+ }
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
+ msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
+ }
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_perf_table msg = { 0 };
@@ -205,6 +240,7 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
for (i = 0; i < gmu->nr_gpu_freqs; i++) {
msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].acd = 0xffffffff;
msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
}
@@ -306,7 +342,45 @@ static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
NULL, 0);
}
-int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_start msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_core_fw_start msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
+{
+ struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
+
+ msg.ack_type = 1; /* blocking */
+ msg.freq = index;
+ msg.bw = 0; /* TODO: bus scaling */
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
+
+ /* TODO: should freq and bw fields be non-zero ? */
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
{
int ret;
@@ -324,7 +398,7 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
* the GMU firmware
*/
- ret = a6xx_hfi_send_perf_table(gmu);
+ ret = a6xx_hfi_send_perf_table_v1(gmu);
if (ret)
return ret;
@@ -341,6 +415,37 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
return 0;
}
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+{
+ int ret;
+
+ if (gmu->legacy)
+ return a6xx_hfi_start_v1(gmu, boot_state);
+
+
+ ret = a6xx_hfi_send_perf_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_bw_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_core_fw_start(gmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Downstream driver sends this in its "a6xx_hw_init" equivalent,
+ * but seems to be no harm in sending it here
+ */
+ ret = a6xx_hfi_send_start(gmu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
void a6xx_hfi_stop(struct a6xx_gmu *gmu)
{
int i;
@@ -385,7 +490,7 @@ static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
void a6xx_hfi_init(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu_bo *hfi = gmu->hfi;
+ struct a6xx_gmu_bo *hfi = &gmu->hfi;
struct a6xx_hfi_queue_table_header *table = hfi->virt;
struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
u64 offset;
@@ -415,5 +520,5 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu)
/* GMU response queue */
offset += SZ_4K;
a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
- hfi->iova + offset, 4);
+ hfi->iova + offset, gmu->legacy ? 4 : 1);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 60d1319fa44f..2bd670ca42d6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -51,7 +51,8 @@ struct a6xx_hfi_queue {
/* HFI message types */
#define HFI_MSG_CMD 0
-#define HFI_MSG_ACK 2
+#define HFI_MSG_ACK 1
+#define HFI_MSG_ACK_V1 2
#define HFI_F2H_MSG_ACK 126
@@ -94,7 +95,13 @@ struct perf_level {
u32 freq;
};
-struct a6xx_hfi_msg_perf_table {
+struct perf_gx_level {
+ u32 vote;
+ u32 acd;
+ u32 freq;
+};
+
+struct a6xx_hfi_msg_perf_table_v1 {
u32 header;
u32 num_gpu_levels;
u32 num_gmu_levels;
@@ -103,6 +110,15 @@ struct a6xx_hfi_msg_perf_table {
struct perf_level cx_votes[4];
};
+struct a6xx_hfi_msg_perf_table {
+ u32 header;
+ u32 num_gpu_levels;
+ u32 num_gmu_levels;
+
+ struct perf_gx_level gx_votes[16];
+ struct perf_level cx_votes[4];
+};
+
#define HFI_H2F_MSG_BW_TABLE 3
struct a6xx_hfi_msg_bw_table {
@@ -124,4 +140,34 @@ struct a6xx_hfi_msg_test {
u32 header;
};
+#define HFI_H2F_MSG_START 10
+
+struct a6xx_hfi_msg_start {
+ u32 header;
+};
+
+#define HFI_H2F_MSG_CORE_FW_START 14
+
+struct a6xx_hfi_msg_core_fw_start {
+ u32 header;
+ u32 handle;
+};
+
+#define HFI_H2F_MSG_GX_BW_PERF_VOTE 30
+
+struct a6xx_hfi_gx_bw_perf_vote_cmd {
+ u32 header;
+ u32 ack_type;
+ u32 freq;
+ u32 bw;
+};
+
+#define HFI_H2F_MSG_PREPARE_SLUMBER 33
+
+struct a6xx_hfi_prep_slumber_cmd {
+ u32 header;
+ u32 bw;
+ u32 freq;
+};
+
#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index cb3a6e597d76..7732f03d9e3a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -93,6 +93,17 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
+ .rev = ADRENO_REV(4, 0, 5, ANY_ID),
+ .revn = 405,
+ .name = "A405",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a4xx_gpu_init,
+ }, {
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
.revn = 420,
.name = "A420",
@@ -189,6 +200,30 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(6, 4, 0, ANY_ID),
+ .revn = 640,
+ .name = "A640",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a640_gmu.bin",
+ },
+ .gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a640_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(6, 5, 0, ANY_ID),
+ .revn = 650,
+ .name = "A650",
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a650_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a650_zap.mdt",
},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 1d5c43c22269..89673c7ed473 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -185,6 +185,23 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
+struct msm_gem_address_space *
+adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev)
+{
+ struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
+ struct msm_gem_address_space *aspace;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
+ 0xfffffff);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -197,7 +214,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->gmem;
return 0;
case MSM_PARAM_GMEM_BASE:
- *value = 0x100000;
+ *value = !adreno_is_a650(adreno_gpu) ? 0x100000 : 0;
return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->rev.patchid |
@@ -459,7 +476,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
break;
/* fall-thru */
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
+ OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
@@ -988,12 +1005,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
- adreno_gpu_config.va_start = SZ_16M;
- adreno_gpu_config.va_end = 0xffffffff;
- /* maximum range of a2xx mmu */
- if (adreno_is_a2xx(adreno_gpu))
- adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
-
adreno_gpu_config.nr_rings = nr_rings;
adreno_get_pwrlevels(&pdev->dev, gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 9ff4e550e7bd..2f5d2c3acc3a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -202,6 +202,11 @@ static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
return (gpu->revn >= 400) && (gpu->revn < 500);
}
+static inline int adreno_is_a405(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 405;
+}
+
static inline int adreno_is_a420(struct adreno_gpu *gpu)
{
return gpu->revn == 420;
@@ -237,6 +242,16 @@ static inline int adreno_is_a630(struct adreno_gpu *gpu)
return gpu->revn == 630;
}
+static inline int adreno_is_a640(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 640;
+}
+
+static inline int adreno_is_a650(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 650;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
@@ -273,6 +288,14 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
int adreno_gpu_state_put(struct msm_gpu_state *state);
/*
+ * Common helper function to initialize the default address space for arm-smmu
+ * attached targets
+ */
+struct msm_gem_address_space *
+adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev);
+
+/*
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
* out of secure mode
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 11f2bebe3869..7c230f719ad3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -36,22 +36,6 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
return to_dpu_kms(priv->kms);
}
-static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
-{
- struct drm_crtc *tmp_crtc;
-
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
- tmp_crtc->enabled) {
- DPU_DEBUG("video interface connected crtc:%d\n",
- tmp_crtc->base.id);
- return true;
- }
- }
-
- return false;
-}
-
static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
struct drm_crtc *crtc,
struct drm_crtc_state *state,
@@ -94,7 +78,6 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
u32 bw, threshold;
u64 bw_sum_of_intfs = 0;
enum dpu_crtc_client_type curr_client_type;
- bool is_video_mode;
struct dpu_crtc_state *dpu_cstate;
struct drm_crtc *tmp_crtc;
struct dpu_kms *kms;
@@ -144,11 +127,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
DPU_DEBUG("calculated bandwidth=%uk\n", bw);
- is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
- threshold = (is_video_mode ||
- _dpu_core_video_mode_intf_connected(crtc)) ?
- kms->catalog->perf.max_bw_low :
- kms->catalog->perf.max_bw_high;
+ threshold = kms->catalog->perf.max_bw_high;
DPU_DEBUG("final threshold bw limit = %d\n", threshold);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 17448505a9b5..e15b42a780e0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -9,6 +9,7 @@
#include <linux/sort.h>
#include <linux/debugfs.h>
#include <linux/ktime.h>
+#include <linux/bits.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
@@ -20,6 +21,7 @@
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_dspp.h"
#include "dpu_crtc.h"
#include "dpu_plane.h"
#include "dpu_encoder.h"
@@ -40,6 +42,9 @@
/* timeout in ms waiting for frame done */
#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
+#define CONVERT_S3_15(val) \
+ (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
+
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -88,11 +93,9 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *crtc_state;
int lm_idx, lm_horiz_position;
- dpu_crtc = to_dpu_crtc(crtc);
crtc_state = to_dpu_crtc_state(crtc->state);
lm_horiz_position = 0;
@@ -422,6 +425,74 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
drm_mode_debug_printmodeline(adj_mode);
}
+static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
+ struct dpu_hw_pcc_cfg *cfg)
+{
+ struct drm_color_ctm *ctm;
+
+ memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
+
+ ctm = (struct drm_color_ctm *)state->ctm->data;
+
+ if (!ctm)
+ return;
+
+ cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
+ cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
+ cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
+
+ cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
+ cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
+ cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
+
+ cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
+ cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
+ cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
+}
+
+static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *state = crtc->state;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_crtc_mixer *mixer = cstate->mixers;
+ struct dpu_hw_pcc_cfg cfg;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+ struct dpu_hw_dspp *dspp;
+ int i;
+
+
+ if (!state->color_mgmt_changed)
+ return;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ ctl = mixer[i].lm_ctl;
+ lm = mixer[i].hw_lm;
+ dspp = mixer[i].hw_dspp;
+
+ if (!dspp || !dspp->ops.setup_pcc)
+ continue;
+
+ if (!state->ctm) {
+ dspp->ops.setup_pcc(dspp, NULL);
+ } else {
+ _dpu_crtc_get_pcc_coeff(state, &cfg);
+ dspp->ops.setup_pcc(dspp, &cfg);
+ }
+
+ mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
+ mixer[i].hw_dspp->idx);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+ DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n",
+ mixer[i].hw_lm->idx - DSPP_0,
+ ctl->idx - CTL_0,
+ mixer[i].flush_mask);
+ }
+}
+
static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -430,7 +501,6 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_encoder *encoder;
struct drm_device *dev;
unsigned long flags;
- struct dpu_crtc_smmu_state_data *smmu_state;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
@@ -448,7 +518,6 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(crtc->state);
dev = crtc->dev;
- smmu_state = &dpu_crtc->smmu_state;
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
@@ -475,6 +544,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
_dpu_crtc_blend_setup(crtc);
+ _dpu_crtc_setup_cp_blocks(crtc);
+
/*
* PP_DONE irq is only used by command mode for now.
* It is better to request pending before FLUSH and START trigger
@@ -491,7 +562,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *dev;
struct drm_plane *plane;
struct msm_drm_private *priv;
- struct msm_drm_thread *event_thread;
unsigned long flags;
struct dpu_crtc_state *cstate;
@@ -513,8 +583,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
return;
}
- event_thread = &priv->event_thread[crtc->index];
-
if (dpu_crtc->event) {
DPU_DEBUG("already received dpu_crtc->event\n");
} else {
@@ -567,7 +635,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
if (!crtc || !state) {
@@ -575,7 +642,6 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
return;
}
- dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(state);
DPU_DEBUG("crtc%d\n", crtc->base.id);
@@ -662,11 +728,9 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
/**
* dpu_crtc_duplicate_state - state duplicate hook
* @crtc: Pointer to drm crtc structure
- * @Returns: Pointer to new drm_crtc_state structure
*/
static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate, *old_cstate;
if (!crtc || !crtc->state) {
@@ -674,7 +738,6 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
return NULL;
}
- dpu_crtc = to_dpu_crtc(crtc);
old_cstate = to_dpu_crtc_state(crtc->state);
cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
if (!cstate) {
@@ -693,9 +756,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
- struct drm_display_mode *mode;
struct drm_encoder *encoder;
- struct msm_drm_private *priv;
unsigned long flags;
bool release_bandwidth = false;
@@ -705,8 +766,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
}
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(crtc->state);
- mode = &cstate->base.adjusted_mode;
- priv = crtc->dev->dev_private;
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
@@ -768,14 +827,12 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
{
struct dpu_crtc *dpu_crtc;
struct drm_encoder *encoder;
- struct msm_drm_private *priv;
bool request_bandwidth;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
- priv = crtc->dev->dev_private;
pm_runtime_get_sync(crtc->dev->dev);
@@ -1319,6 +1376,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+ drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
+
/* save user friendly CRTC name for later */
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 5174e86124cc..cec3474340e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -73,12 +73,14 @@ struct dpu_crtc_smmu_state_data {
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
* @lm_ctl: CTL Path HW driver context
+ * @lm_dspp: DSPP HW driver context
* @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe
*/
struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm;
struct dpu_hw_ctl *lm_ctl;
+ struct dpu_hw_dspp *hw_dspp;
u32 mixer_op_mode;
u32 flush_mask;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index a1b79ee2bd9d..63976dcd2ac8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -20,6 +20,7 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_dspp.h"
#include "dpu_formats.h"
#include "dpu_encoder_phys.h"
#include "dpu_crtc.h"
@@ -536,6 +537,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
+ * Adding color blocks only to primary interface
*/
if (intf_count == 2)
topology.num_lm = 2;
@@ -544,6 +546,9 @@ static struct msm_display_topology dpu_encoder_get_topology(
else
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
+ topology.num_dspp = topology.num_lm;
+
topology.num_enc = 0;
topology.num_intf = intf_count;
@@ -959,7 +964,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
- int num_lm, num_ctl, num_pp;
+ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
+ int num_lm, num_ctl, num_pp, num_dspp;
int i, j;
if (!drm_enc) {
@@ -1008,6 +1014,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+ num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
+ ARRAY_SIZE(hw_dspp));
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
@@ -1020,6 +1029,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+ cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
}
cstate->num_mixers = num_lm;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index c567917541e8..29d4fde3172b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -41,6 +41,8 @@
#define PINGPONG_SDM845_SPLIT_MASK \
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
+
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
@@ -291,29 +293,30 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair) \
+#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair, _dspp) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x320, \
.features = _fmask, \
.sblk = _sblk, \
.pingpong = _pp, \
- .lm_pair_mask = (1 << _lmpair) \
+ .lm_pair_mask = (1 << _lmpair), \
+ .dspp = _dspp \
}
static const struct dpu_lm_cfg sdm845_lm[] = {
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_0, LM_1),
+ &sdm845_lm_sblk, PINGPONG_0, LM_1, 0),
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_1, LM_0),
+ &sdm845_lm_sblk, PINGPONG_1, LM_0, 0),
LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_2, LM_5),
+ &sdm845_lm_sblk, PINGPONG_2, LM_5, 0),
LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ &sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ &sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_3, LM_2),
+ &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
};
/* SC7180 */
@@ -328,11 +331,30 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
static const struct dpu_lm_cfg sc7180_lm[] = {
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
- &sc7180_lm_sblk, PINGPONG_0, LM_1),
+ &sc7180_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK,
- &sc7180_lm_sblk, PINGPONG_1, LM_0),
+ &sc7180_lm_sblk, PINGPONG_1, LM_0, 0),
+};
+
+/*************************************************************
+ * DSPP sub blocks config
+ *************************************************************/
+static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = {
+ .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
+ .len = 0x90, .version = 0x10000},
};
+#define DSPP_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1800, \
+ .features = DSPP_SC7180_MASK, \
+ .sblk = &sc7180_dspp_sblk \
+ }
+
+static const struct dpu_dspp_cfg sc7180_dspp[] = {
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000),
+};
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
@@ -515,8 +537,8 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
};
static const struct dpu_perf_cfg sc7180_perf_data = {
- .max_bw_low = 3900000,
- .max_bw_high = 5500000,
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
@@ -587,6 +609,8 @@ static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.sspp = sc7180_sspp,
.mixer_count = ARRAY_SIZE(sc7180_lm),
.mixer = sc7180_lm,
+ .dspp_count = ARRAY_SIZE(sc7180_dspp),
+ .dspp = sc7180_dspp,
.pingpong_count = ARRAY_SIZE(sc7180_pp),
.pingpong = sc7180_pp,
.intf_count = ARRAY_SIZE(sc7180_intf),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 09df7d87dd43..f7de43838c69 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -146,6 +146,17 @@ enum {
};
/**
+ * DSPP sub-blocks
+ * @DPU_DSPP_PCC Panel color correction block
+ * @DPU_DSPP_GC Gamma correction block
+ */
+enum {
+ DPU_DSPP_PCC = 0x1,
+ DPU_DSPP_GC,
+ DPU_DSPP_MAX
+};
+
+/**
* PINGPONG sub-blocks
* @DPU_PINGPONG_TE Tear check block
* @DPU_PINGPONG_TE2 Additional tear check block for split pipes
@@ -377,6 +388,16 @@ struct dpu_lm_sub_blks {
struct dpu_pp_blk gc;
};
+/**
+ * struct dpu_dspp_sub_blks: Information of DSPP block
+ * @gc : gamma correction block
+ * @pcc: pixel color correction block
+ */
+struct dpu_dspp_sub_blks {
+ struct dpu_pp_blk gc;
+ struct dpu_pp_blk pcc;
+};
+
struct dpu_pingpong_sub_blks {
struct dpu_pp_blk te;
struct dpu_pp_blk te2;
@@ -471,10 +492,24 @@ struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
+ u32 dspp;
unsigned long lm_pair_mask;
};
/**
+ * struct dpu_dspp_cfg - information of DSPP blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ * @sblk sub-blocks information
+ */
+struct dpu_dspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_dspp_sub_blks *sblk;
+};
+
+/**
* struct dpu_pingpong_cfg - information of PING-PONG blocks
* @id enum identifying this block
* @base register offset of this block
@@ -688,6 +723,9 @@ struct dpu_mdss_cfg {
u32 ad_count;
+ u32 dspp_count;
+ const struct dpu_dspp_cfg *dspp;
+
/* Add additional block data structures here */
struct dpu_perf_cfg perf;
@@ -716,6 +754,7 @@ struct dpu_mdss_hw_cfg_handler {
#define BLK_PINGPONG(s) ((s)->pingpong)
#define BLK_INTF(s) ((s)->intf)
#define BLK_AD(s) ((s)->ad)
+#define BLK_DSPP(s) ((s)->dspp)
/**
* dpu_hw_catalog_init - dpu hardware catalog init API retrieves
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 831e5f7a9b7f..613ae8f0cfcd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -272,6 +272,31 @@ static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
return 0;
}
+static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp dspp)
+{
+ uint32_t flushbits = 0;
+
+ switch (dspp) {
+ case DSPP_0:
+ flushbits = BIT(13);
+ break;
+ case DSPP_1:
+ flushbits = BIT(14);
+ break;
+ case DSPP_2:
+ flushbits = BIT(15);
+ break;
+ case DSPP_3:
+ flushbits = BIT(21);
+ break;
+ default:
+ return 0;
+ }
+
+ return flushbits;
+}
+
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -548,6 +573,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+ ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
};
static struct dpu_hw_blk_ops dpu_hw_ops;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 09e1263c72e2..ec579b470a80 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -139,6 +139,9 @@ struct dpu_hw_ctl_ops {
uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
enum dpu_lm blk);
+ uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp blk);
+
/**
* Query the value of the intf flush mask
* No effect on hardware
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
new file mode 100644
index 000000000000..a7a24539921f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_dspp.h"
+#include "dpu_kms.h"
+
+
+/* DSPP_PCC */
+#define PCC_EN BIT(0)
+#define PCC_DIS 0
+#define PCC_RED_R_OFF 0x10
+#define PCC_RED_G_OFF 0x1C
+#define PCC_RED_B_OFF 0x28
+#define PCC_GREEN_R_OFF 0x14
+#define PCC_GREEN_G_OFF 0x20
+#define PCC_GREEN_B_OFF 0x2C
+#define PCC_BLUE_R_OFF 0x18
+#define PCC_BLUE_G_OFF 0x24
+#define PCC_BLUE_B_OFF 0x30
+
+static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
+ struct dpu_hw_pcc_cfg *cfg)
+{
+
+ u32 base = ctx->cap->sblk->pcc.base;
+
+ if (!ctx || !base) {
+ DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
+ return;
+ }
+
+ if (!cfg) {
+ DRM_DEBUG_DRIVER("disable pcc feature\n");
+ DPU_REG_WRITE(&ctx->hw, base, PCC_DIS);
+ return;
+ }
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_R_OFF, cfg->r.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_G_OFF, cfg->r.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_B_OFF, cfg->r.b);
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_R_OFF, cfg->g.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_G_OFF, cfg->g.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_B_OFF, cfg->g.b);
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_R_OFF, cfg->b.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_G_OFF, cfg->b.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_B_OFF, cfg->b.b);
+
+ DPU_REG_WRITE(&ctx->hw, base, PCC_EN);
+}
+
+static void _setup_dspp_ops(struct dpu_hw_dspp *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_DSPP_PCC, &features) &&
+ IS_SC7180_TARGET(c->hw.hwversion))
+ c->ops.setup_pcc = dpu_setup_dspp_pcc;
+}
+
+static const struct dpu_dspp_cfg *_dspp_offset(enum dpu_dspp dspp,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < m->dspp_count; i++) {
+ if (dspp == m->dspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->dspp[i].base;
+ b->length = m->dspp[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_DSPP;
+ return &m->dspp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops;
+
+struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_dspp *c;
+ const struct dpu_dspp_cfg *cfg;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _dspp_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_dspp_ops(c, c->cap->features);
+
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_DSPP, idx, &dpu_hw_ops);
+
+ return c;
+}
+
+void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp)
+{
+ if (dspp)
+ dpu_hw_blk_destroy(&dspp->base);
+
+ kfree(dspp);
+}
+
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
new file mode 100644
index 000000000000..7fa189cfcb06
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_DSPP_H
+#define _DPU_HW_DSPP_H
+
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_dspp;
+
+/**
+ * struct dpu_hw_pcc_coeff - PCC coefficient structure for each color
+ * component.
+ * @r: red coefficient.
+ * @g: green coefficient.
+ * @b: blue coefficient.
+ */
+
+struct dpu_hw_pcc_coeff {
+ __u32 r;
+ __u32 g;
+ __u32 b;
+};
+
+/**
+ * struct dpu_hw_pcc - pcc feature structure
+ * @r: red coefficients.
+ * @g: green coefficients.
+ * @b: blue coefficients.
+ */
+struct dpu_hw_pcc_cfg {
+ struct dpu_hw_pcc_coeff r;
+ struct dpu_hw_pcc_coeff g;
+ struct dpu_hw_pcc_coeff b;
+};
+
+/**
+ * struct dpu_hw_dspp_ops - interface to the dspp hardware driver functions
+ * Caller must call the init function to get the dspp context for each dspp
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_dspp_ops {
+ /**
+ * setup_pcc - setup dspp pcc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pcc)(struct dpu_hw_dspp *ctx, struct dpu_hw_pcc_cfg *cfg);
+
+};
+
+/**
+ * struct dpu_hw_dspp - dspp description
+ * @base: Hardware block base structure
+ * @hw: Block hardware details
+ * @idx: DSPP index
+ * @cap: Pointer to layer_cfg
+ * @ops: Pointer to operations possible for this DSPP
+ */
+struct dpu_hw_dspp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* dspp */
+ int idx;
+ const struct dpu_dspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_dspp_ops ops;
+};
+
+/**
+ * dpu_hw_dspp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_dspp, base);
+}
+
+/**
+ * dpu_hw_dspp_init - initializes the dspp hw driver object.
+ * should be called once before accessing every dspp.
+ * @idx: DSPP index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @Return: pointer to structure or ERR_PTR
+ */
+struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
+ void __iomem *addr, const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_dspp_destroy(): Destroys DSPP driver context
+ * @dspp: Pointer to DSPP driver context
+ */
+void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp);
+
+#endif /*_DPU_HW_DSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 686882132bf6..402dc5832361 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -95,6 +95,7 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
+ DPU_HW_BLK_DSPP,
DPU_HW_BLK_MAX,
};
@@ -425,5 +426,6 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_TOP (1 << 7)
#define DPU_DBG_MASK_VBIF (1 << 8)
#define DPU_DBG_MASK_ROT (1 << 9)
+#define DPU_DBG_MASK_DSPP (1 << 10)
#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index ce19f1d39367..b8615d4fe8a3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -772,29 +772,21 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
struct iommu_domain *domain;
struct msm_gem_address_space *aspace;
- int ret;
+ struct msm_mmu *mmu;
domain = iommu_domain_alloc(&platform_bus_type);
if (!domain)
return 0;
- domain->geometry.aperture_start = 0x1000;
- domain->geometry.aperture_end = 0xffffffff;
+ mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
+ aspace = msm_gem_address_space_create(mmu, "dpu1",
+ 0x1000, 0xfffffff);
- aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
- domain, "dpu1");
if (IS_ERR(aspace)) {
- iommu_domain_free(domain);
+ mmu->funcs->destroy(mmu);
return PTR_ERR(aspace);
}
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret) {
- DPU_ERROR("failed to attach iommu %d\n", ret);
- msm_gem_address_space_put(aspace);
- return ret;
- }
-
dpu_kms->base.aspace = aspace;
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 9aba2910d83a..a3b122bfb676 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -158,6 +158,7 @@ struct dpu_global_state {
uint32_t mixer_to_enc_id[LM_MAX - LM_0];
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
+ uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
};
struct dpu_global_state
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 9b62451b01ee..9b2b5044e8e0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -9,6 +9,7 @@
#include "dpu_hw_ctl.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_intf.h"
+#include "dpu_hw_dspp.h"
#include "dpu_encoder.h"
#include "dpu_trace.h"
@@ -174,6 +175,23 @@ int dpu_rm_init(struct dpu_rm *rm,
rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
}
+ for (i = 0; i < cat->dspp_count; i++) {
+ struct dpu_hw_dspp *hw;
+ const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
+
+ if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
+ DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
+ continue;
+ }
+ hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed dspp object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
+ }
+
return 0;
fail:
@@ -222,12 +240,17 @@ static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
* if lm, and all other hardwired blocks connected to the lm (pp) is
* available and appropriate
* @pp_idx: output parameter, index of pingpong block attached to the layer
- * mixer in rm->pongpong_blks[].
+ * mixer in rm->pingpong_blks[].
+ * @dspp_idx: output parameter, index of dspp block attached to the layer
+ * mixer in rm->dspp_blks[].
+ * @reqs: input parameter, rm requirements for HW blocks needed in the
+ * datapath.
* @Return: true if lm matches all requirements, false otherwise
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id, int lm_idx, int *pp_idx)
+ uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
+ struct dpu_rm_requirements *reqs)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
@@ -251,6 +274,23 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return false;
}
*pp_idx = idx;
+
+ if (!reqs->topology.num_dspp)
+ return true;
+
+ idx = lm_cfg->dspp - DSPP_0;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
+ DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
+ return false;
+ }
+
+ if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
+ DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
+ lm_cfg->dspp);
+ return false;
+ }
+ *dspp_idx = idx;
+
return true;
}
@@ -262,6 +302,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
{
int lm_idx[MAX_BLOCKS];
int pp_idx[MAX_BLOCKS];
+ int dspp_idx[MAX_BLOCKS] = {0};
int i, j, lm_count = 0;
if (!reqs->topology.num_lm) {
@@ -279,7 +320,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
lm_idx[lm_count] = i;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
- enc_id, i, &pp_idx[lm_count])) {
+ enc_id, i, &pp_idx[lm_count],
+ &dspp_idx[lm_count], reqs)) {
continue;
}
@@ -299,7 +341,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
global_state, enc_id, j,
- &pp_idx[lm_count])) {
+ &pp_idx[lm_count], &dspp_idx[lm_count],
+ reqs)) {
continue;
}
@@ -316,6 +359,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
for (i = 0; i < lm_count; i++) {
global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
+ global_state->dspp_to_enc_id[dspp_idx[i]] =
+ reqs->topology.num_dspp ? enc_id : 0;
trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
pp_idx[i] + PINGPONG_0);
@@ -560,6 +605,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
hw_to_enc_id = global_state->intf_to_enc_id;
max_blks = ARRAY_SIZE(rm->intf_blks);
break;
+ case DPU_HW_BLK_DSPP:
+ hw_blks = rm->dspp_blks;
+ hw_to_enc_id = global_state->dspp_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->dspp_blks);
+ break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 6d2b04f306f0..08726bb1063a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -19,6 +19,7 @@ struct dpu_global_state;
* @mixer_blks: array of layer mixer hardware resources
* @ctl_blks: array of ctl hardware resources
* @intf_blks: array of intf hardware resources
+ * @dspp_blks: array of dspp hardware resources
* @lm_max_width: cached layer mixer maximum width
* @rm_lock: resource manager mutex
*/
@@ -27,6 +28,7 @@ struct dpu_rm {
struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
+ struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
uint32_t lm_max_width;
};
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index dda05436f716..08897184b1d9 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -510,18 +510,20 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- aspace = msm_gem_address_space_create(&pdev->dev,
- config->iommu, "mdp4");
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev,
+ config->iommu);
+
+ aspace = msm_gem_address_space_create(mmu,
+ "mdp4", 0x1000, 0xffffffff);
+
if (IS_ERR(aspace)) {
+ if (!IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
ret = PTR_ERR(aspace);
goto fail;
}
kms->aspace = aspace;
-
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret)
- goto fail;
} else {
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
@@ -569,10 +571,6 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
- if (config.iommu) {
- config.iommu->geometry.aperture_start = 0x1000;
- config.iommu->geometry.aperture_end = 0xffffffff;
- }
return &config;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index e3c4c250238b..25a13a2a57a9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -342,6 +342,81 @@ static const struct mdp5_cfg_hw msm8x16_config = {
.max_clk = 320000000,
};
+static const struct mdp5_cfg_hw msm8x36_config = {
+ .name = "msm8x36",
+ .mdp = {
+ .count = 1,
+ .base = { 0x0 },
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 8,
+ .mmb_size = 10240,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0x4003ffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x47000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x70000 },
+ },
+ .ad = {
+ .count = 1,
+ .base = { 0x78000 },
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+ },
+ .intf = {
+ .base = { 0x00000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .max_clk = 366670000,
+};
+
static const struct mdp5_cfg_hw msm8x94_config = {
.name = "msm8x94",
.mdp = {
@@ -840,6 +915,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
{ .revision = 6, .config = { .hw = &msm8x16_config } },
+ { .revision = 8, .config = { .hw = &msm8x36_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
{ .revision = 11, .config = { .hw = &msm8x76_config } },
@@ -941,10 +1017,6 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
static struct mdp5_cfg_platform config = {};
config.iommu = iommu_domain_alloc(&platform_bus_type);
- if (config.iommu) {
- config.iommu->geometry.aperture_start = 0x1000;
- config.iommu->geometry.aperture_end = 0xffffffff;
- }
return &config;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 998bef1190a3..b5fed67c4651 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -959,7 +959,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!ctl)
return -EINVAL;
- /* don't support LM cursors when we we have source split enabled */
+ /* don't support LM cursors when we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
@@ -1030,7 +1030,7 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return -EINVAL;
}
- /* don't support LM cursors when we we have source split enabled */
+ /* don't support LM cursors when we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index c902c6503675..19ec48695ffb 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -624,25 +624,25 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
+ struct msm_mmu *mmu;
+
iommu_dev = &pdev->dev;
if (!dev_iommu_fwspec_get(iommu_dev))
iommu_dev = iommu_dev->parent;
- aspace = msm_gem_address_space_create(iommu_dev,
- config->platform.iommu, "mdp5");
+ mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
+
+ aspace = msm_gem_address_space_create(mmu, "mdp5",
+ 0x1000, 0xffffffff);
+
if (IS_ERR(aspace)) {
+ if (!IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
ret = PTR_ERR(aspace);
goto fail;
}
kms->aspace = aspace;
-
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret) {
- DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
- ret);
- goto fail;
- }
} else {
DRM_DEV_INFO(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
@@ -935,7 +935,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
return 0;
fail:
- mdp5_destroy(pdev);
+ if (mdp5_kms)
+ mdp5_destroy(pdev);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 29295dee2a2e..f6ce40bf3699 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -37,9 +37,10 @@
* - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
* GEM object's debug name
* - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
+ * - 1.6.0 - Syncobj support
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 5
+#define MSM_VERSION_MINOR 6
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -1002,7 +1003,8 @@ static struct drm_driver msm_driver = {
.driver_features = DRIVER_GEM |
DRIVER_RENDER |
DRIVER_ATOMIC |
- DRIVER_MODESET,
+ DRIVER_MODESET |
+ DRIVER_SYNCOBJ,
.open = msm_open,
.postclose = msm_postclose,
.lastclose = drm_fb_helper_lastclose,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 194d900a460e..e2d6a6056418 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -105,6 +105,7 @@ struct msm_display_topology {
u32 num_lm;
u32 num_enc;
u32 num_intf;
+ u32 num_dspp;
};
/**
@@ -236,7 +237,8 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc);
void msm_crtc_disable_vblank(struct drm_crtc *crtc);
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int npages);
+ struct msm_gem_vma *vma, int npages,
+ u64 range_start, u64 range_end);
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
@@ -250,12 +252,8 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
struct msm_gem_address_space *
-msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
- const char *name);
-
-struct msm_gem_address_space *
-msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
- const char *name, uint64_t va_start, uint64_t va_end);
+msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 size);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
@@ -276,6 +274,9 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end);
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 5a6a79fbc9d6..6277fde13df9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -389,7 +389,8 @@ put_iova(struct drm_gem_object *obj)
}
static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
@@ -404,7 +405,8 @@ static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
+ ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
+ range_start, range_end);
if (ret) {
del_vma(vma);
return ret;
@@ -426,6 +428,9 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE;
+ if (msm_obj->flags & MSM_BO_MAP_PRIV)
+ prot |= IOMMU_PRIV;
+
WARN_ON(!mutex_is_locked(&msm_obj->lock));
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
@@ -443,9 +448,13 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
msm_obj->sgt, obj->size >> PAGE_SHIFT);
}
-/* get iova and pin it. Should have a matching put */
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+/*
+ * get iova and pin it. Should have a matching put
+ * limits iova to specified range (in pages)
+ */
+int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
u64 local;
@@ -453,7 +462,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
mutex_lock(&msm_obj->lock);
- ret = msm_gem_get_iova_locked(obj, aspace, &local);
+ ret = msm_gem_get_iova_locked(obj, aspace, &local,
+ range_start, range_end);
if (!ret)
ret = msm_gem_pin_iova(obj, aspace);
@@ -465,6 +475,13 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
return ret;
}
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
+}
+
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
@@ -476,7 +493,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
int ret;
mutex_lock(&msm_obj->lock);
- ret = msm_gem_get_iova_locked(obj, aspace, iova);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
mutex_unlock(&msm_obj->lock);
return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 30584eaf8cc8..972490b14ba5 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -13,6 +13,7 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+#define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */
struct msm_gem_address_space {
const char *name;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 385d4965a8d0..6630aa817505 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -8,7 +8,9 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
+#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
#include "msm_drv.h"
#include "msm_gpu.h"
@@ -391,6 +393,186 @@ static void submit_cleanup(struct msm_gem_submit *submit)
}
}
+
+struct msm_submit_post_dep {
+ struct drm_syncobj *syncobj;
+ uint64_t point;
+ struct dma_fence_chain *chain;
+};
+
+static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t in_syncobjs_addr,
+ uint32_t nr_in_syncobjs,
+ size_t syncobj_stride,
+ struct msm_ringbuffer *ring)
+{
+ struct drm_syncobj **syncobjs = NULL;
+ struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!syncobjs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_in_syncobjs; ++i) {
+ uint64_t address = in_syncobjs_addr + i * syncobj_stride;
+ struct dma_fence *fence;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (syncobj_desc.point &&
+ !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = drm_syncobj_find_fence(file, syncobj_desc.handle,
+ syncobj_desc.point, 0, &fence);
+ if (ret)
+ break;
+
+ if (!dma_fence_match_context(fence, ring->fctx->context))
+ ret = dma_fence_wait(fence, true);
+
+ dma_fence_put(fence);
+ if (ret)
+ break;
+
+ if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
+ syncobjs[i] =
+ drm_syncobj_find(file, syncobj_desc.handle);
+ if (!syncobjs[i]) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ if (syncobjs[j])
+ drm_syncobj_put(syncobjs[j]);
+ }
+ kfree(syncobjs);
+ return ERR_PTR(ret);
+ }
+ return syncobjs;
+}
+
+static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
+ uint32_t nr_syncobjs)
+{
+ uint32_t i;
+
+ for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
+ if (syncobjs[i])
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+ }
+}
+
+static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t syncobjs_addr,
+ uint32_t nr_syncobjs,
+ size_t syncobj_stride)
+{
+ struct msm_submit_post_dep *post_deps;
+ struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!post_deps)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_syncobjs; ++i) {
+ uint64_t address = syncobjs_addr + i * syncobj_stride;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ post_deps[i].point = syncobj_desc.point;
+ post_deps[i].chain = NULL;
+
+ if (syncobj_desc.flags) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (syncobj_desc.point) {
+ if (!drm_core_check_feature(dev,
+ DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ post_deps[i].chain =
+ kmalloc(sizeof(*post_deps[i].chain),
+ GFP_KERNEL);
+ if (!post_deps[i].chain) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ post_deps[i].syncobj =
+ drm_syncobj_find(file, syncobj_desc.handle);
+ if (!post_deps[i].syncobj) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ kfree(post_deps[j].chain);
+ if (post_deps[j].syncobj)
+ drm_syncobj_put(post_deps[j].syncobj);
+ }
+
+ kfree(post_deps);
+ return ERR_PTR(ret);
+ }
+
+ return post_deps;
+}
+
+static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
+ uint32_t count, struct dma_fence *fence)
+{
+ uint32_t i;
+
+ for (i = 0; post_deps && i < count; ++i) {
+ if (post_deps[i].chain) {
+ drm_syncobj_add_point(post_deps[i].syncobj,
+ post_deps[i].chain,
+ fence, post_deps[i].point);
+ post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(post_deps[i].syncobj,
+ fence);
+ }
+ }
+}
+
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -403,6 +585,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
+ struct msm_submit_post_dep *post_deps = NULL;
+ struct drm_syncobj **syncobjs_to_reset = NULL;
int out_fence_fd = -1;
struct pid *pid = get_pid(task_pid(current));
bool has_ww_ticket = false;
@@ -411,6 +595,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!gpu)
return -ENXIO;
+ if (args->pad)
+ return -EINVAL;
+
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
@@ -458,9 +645,29 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
return ret;
}
+ if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
+ syncobjs_to_reset = msm_wait_deps(dev, file,
+ args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride, ring);
+ if (IS_ERR(syncobjs_to_reset))
+ return PTR_ERR(syncobjs_to_reset);
+ }
+
+ if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
+ post_deps = msm_parse_post_deps(dev, file,
+ args->out_syncobjs,
+ args->nr_out_syncobjs,
+ args->syncobj_stride);
+ if (IS_ERR(post_deps)) {
+ ret = PTR_ERR(post_deps);
+ goto out_post_unlock;
+ }
+ }
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out_post_unlock;
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
@@ -587,6 +794,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->fence_fd = out_fence_fd;
}
+ msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
+ msm_process_post_deps(post_deps, args->nr_out_syncobjs,
+ submit->fence);
+
+
out:
submit_cleanup(submit);
if (has_ww_ticket)
@@ -597,5 +809,23 @@ out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
mutex_unlock(&dev->struct_mutex);
+
+out_post_unlock:
+ if (!IS_ERR_OR_NULL(post_deps)) {
+ for (i = 0; i < args->nr_out_syncobjs; ++i) {
+ kfree(post_deps[i].chain);
+ drm_syncobj_put(post_deps[i].syncobj);
+ }
+ kfree(post_deps);
+ }
+
+ if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
+ for (i = 0; i < args->nr_in_syncobjs; ++i) {
+ if (syncobjs_to_reset[i])
+ drm_syncobj_put(syncobjs_to_reset[i]);
+ }
+ kfree(syncobjs_to_reset);
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 1af5354bcd46..5f6a11211b64 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -103,7 +103,8 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
/* Initialize a new vma and allocate an iova for it */
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int npages)
+ struct msm_gem_vma *vma, int npages,
+ u64 range_start, u64 range_end)
{
int ret;
@@ -111,7 +112,8 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace,
return -EBUSY;
spin_lock(&aspace->lock);
- ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
+ ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
+ 0, range_start, range_end, 0);
spin_unlock(&aspace->lock);
if (ret)
@@ -125,37 +127,14 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace,
return 0;
}
-
struct msm_gem_address_space *
-msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
- const char *name)
+msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 size)
{
struct msm_gem_address_space *aspace;
- u64 size = domain->geometry.aperture_end -
- domain->geometry.aperture_start;
-
- aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
- if (!aspace)
- return ERR_PTR(-ENOMEM);
-
- spin_lock_init(&aspace->lock);
- aspace->name = name;
- aspace->mmu = msm_iommu_new(dev, domain);
-
- drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
- size >> PAGE_SHIFT);
- kref_init(&aspace->kref);
-
- return aspace;
-}
-
-struct msm_gem_address_space *
-msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
- const char *name, uint64_t va_start, uint64_t va_end)
-{
- struct msm_gem_address_space *aspace;
- u64 size = va_end - va_start;
+ if (IS_ERR(mmu))
+ return ERR_CAST(mmu);
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
@@ -163,10 +142,9 @@ msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
spin_lock_init(&aspace->lock);
aspace->name = name;
- aspace->mmu = msm_gpummu_new(dev, gpu);
+ aspace->mmu = mmu;
- drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
- size >> PAGE_SHIFT);
+ drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
kref_init(&aspace->kref);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 615c5cda5389..a22d30622306 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -821,51 +821,6 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
return 0;
}
-static struct msm_gem_address_space *
-msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
- uint64_t va_start, uint64_t va_end)
-{
- struct msm_gem_address_space *aspace;
- int ret;
-
- /*
- * Setup IOMMU.. eventually we will (I think) do this once per context
- * and have separate page tables per context. For now, to keep things
- * simple and to get something working, just use a single address space:
- */
- if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
- struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
- if (!iommu)
- return NULL;
-
- iommu->geometry.aperture_start = va_start;
- iommu->geometry.aperture_end = va_end;
-
- DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
-
- aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
- if (IS_ERR(aspace))
- iommu_domain_free(iommu);
- } else {
- aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
- va_start, va_end);
- }
-
- if (IS_ERR(aspace)) {
- DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
- PTR_ERR(aspace));
- return ERR_CAST(aspace);
- }
-
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret) {
- msm_gem_address_space_put(aspace);
- return ERR_PTR(ret);
- }
-
- return aspace;
-}
-
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
@@ -938,8 +893,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
msm_devfreq_init(gpu);
- gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
- config->va_start, config->va_end);
+
+ gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
if (gpu->aspace == NULL)
DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 6ccae4ba905c..429cb40f7931 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -21,8 +21,6 @@ struct msm_gpu_state;
struct msm_gpu_config {
const char *ioname;
- uint64_t va_start;
- uint64_t va_end;
unsigned int nr_rings;
};
@@ -64,6 +62,8 @@ struct msm_gpu_funcs {
int (*gpu_state_put)(struct msm_gpu_state *state);
unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
+ struct msm_gem_address_space *(*create_address_space)
+ (struct msm_gpu *gpu, struct platform_device *pdev);
};
struct msm_gpu {
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
index 34980d8eb7ad..310a31b05faa 100644
--- a/drivers/gpu/drm/msm/msm_gpummu.c
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -21,17 +21,12 @@ struct msm_gpummu {
#define GPUMMU_PAGE_SIZE SZ_4K
#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
-static int msm_gpummu_attach(struct msm_mmu *mmu)
-{
- return 0;
-}
-
static void msm_gpummu_detach(struct msm_mmu *mmu)
{
}
static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+ struct sg_table *sgt, size_t len, int prot)
{
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
@@ -59,7 +54,7 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
return 0;
}
-static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
{
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
@@ -85,7 +80,6 @@ static void msm_gpummu_destroy(struct msm_mmu *mmu)
}
static const struct msm_mmu_funcs funcs = {
- .attach = msm_gpummu_attach,
.detach = msm_gpummu_detach,
.map = msm_gpummu_map,
.unmap = msm_gpummu_unmap,
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index ad58cfe5998e..3a381a9674c9 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -23,13 +23,6 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
return 0;
}
-static int msm_iommu_attach(struct msm_mmu *mmu)
-{
- struct msm_iommu *iommu = to_msm_iommu(mmu);
-
- return iommu_attach_device(iommu->domain, mmu->dev);
-}
-
static void msm_iommu_detach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
@@ -38,7 +31,7 @@ static void msm_iommu_detach(struct msm_mmu *mmu)
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+ struct sg_table *sgt, size_t len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
@@ -49,7 +42,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
return (ret == len) ? 0 : -EINVAL;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
@@ -66,7 +59,6 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
}
static const struct msm_mmu_funcs funcs = {
- .attach = msm_iommu_attach,
.detach = msm_iommu_detach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
@@ -76,6 +68,10 @@ static const struct msm_mmu_funcs funcs = {
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
{
struct msm_iommu *iommu;
+ int ret;
+
+ if (!domain)
+ return ERR_PTR(-ENODEV);
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -85,5 +81,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
msm_mmu_init(&iommu->base, dev, &funcs);
iommu_set_fault_handler(domain, msm_fault_handler, iommu);
+ ret = iommu_attach_device(iommu->domain, dev);
+ if (ret) {
+ kfree(iommu);
+ return ERR_PTR(ret);
+ }
+
return &iommu->base;
}
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 67a623f14319..3a534ee59bf6 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -10,11 +10,10 @@
#include <linux/iommu.h>
struct msm_mmu_funcs {
- int (*attach)(struct msm_mmu *mmu);
void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
+ size_t len, int prot);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
};
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 732f65df5c4f..fea30e7aa9e8 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -29,8 +29,6 @@
* or shader programs (if not emitted inline in cmdstream).
*/
-#ifdef CONFIG_DEBUG_FS
-
#include <linux/circ_buf.h>
#include <linux/debugfs.h>
#include <linux/kfifo.h>
@@ -47,6 +45,8 @@ bool rd_full = false;
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
module_param_named(rd_full, rd_full, bool, 0600);
+#ifdef CONFIG_DEBUG_FS
+
enum rd_sect_type {
RD_NONE,
RD_TEST, /* ascii text */
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 7622490d8602..d472942102f5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -277,7 +277,7 @@ nv50_outp_release(struct nouveau_encoder *nv_encoder)
}
static int
-nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
+nv50_outp_acquire(struct nouveau_encoder *nv_encoder, bool hda)
{
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
@@ -289,6 +289,7 @@ nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
.base.method = NV50_DISP_MTHD_V1_ACQUIRE,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = nv_encoder->dcb->hashm,
+ .info.hda = hda,
};
int ret;
@@ -393,7 +394,7 @@ nv50_dac_enable(struct drm_encoder *encoder)
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
- nv50_outp_acquire(nv_encoder);
+ nv50_outp_acquire(nv_encoder, false);
core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
asyh->or.depth = 0;
@@ -510,7 +511,7 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
if (!nv_connector || !nv_crtc || nv_encoder->or != port ||
nv_crtc->index != dev_id)
continue;
- *enabled = drm_detect_monitor_audio(nv_connector->edid);
+ *enabled = nv_encoder->audio;
if (*enabled) {
ret = drm_eld_size(nv_connector->base.eld);
memcpy(buf, nv_connector->base.eld,
@@ -600,6 +601,7 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
(0x0100 << nv_crtc->index),
};
+ nv_encoder->audio = false;
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
@@ -636,6 +638,7 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
+ nv_encoder->audio = true;
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
nv_crtc->index);
@@ -966,7 +969,7 @@ nv50_msto_enable(struct drm_encoder *encoder)
DRM_DEBUG_KMS("Failed to allocate VCPI\n");
if (!mstm->links++)
- nv50_outp_acquire(mstm->outp);
+ nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
if (mstm->outp->link & 1)
proto = 0x8;
@@ -1560,12 +1563,18 @@ nv50_sor_enable(struct drm_encoder *encoder)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
+ bool hda = false;
u8 proto = 0xf;
u8 depth = 0x0;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
nv_encoder->crtc = encoder->crtc;
- nv50_outp_acquire(nv_encoder);
+
+ if ((disp->disp->object.oclass == GT214_DISP ||
+ disp->disp->object.oclass >= GF110_DISP) &&
+ drm_detect_monitor_audio(nv_connector->edid))
+ hda = true;
+ nv50_outp_acquire(nv_encoder, hda);
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
@@ -1775,7 +1784,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
u8 owner = 1 << nv_crtc->index;
u8 proto;
- nv50_outp_acquire(nv_encoder);
+ nv50_outp_acquire(nv_encoder, false);
switch (asyh->or.bpc) {
case 10: asyh->or.depth = 0x6; break;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index e25ead56052c..99b9b681736d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -192,6 +192,8 @@ nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
wndw->func->release(wndw, asyw, asyh);
asyw->ntfy.handle = 0;
asyw->sema.handle = 0;
+ asyw->xlut.handle = 0;
+ memset(asyw->image.handle, 0x00, sizeof(asyw->image.handle));
}
static int
@@ -519,7 +521,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
return PTR_ERR(ctxdma);
}
- asyw->image.handle[0] = ctxdma->object.handle;
+ if (asyw->visible)
+ asyw->image.handle[0] = ctxdma->object.handle;
}
asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
index 38bf4f38e869..53800fb46582 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -46,7 +46,8 @@ struct nv50_disp_acquire_v0 {
__u8 version;
__u8 or;
__u8 link;
- __u8 pad03[5];
+ __u8 hda;
+ __u8 pad04[4];
};
struct nv50_disp_dac_load_v0 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index de51733b0476..a72c412ac8b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -52,6 +52,7 @@ struct nouveau_encoder {
* actually programmed on the hw, not the proposed crtc */
struct drm_crtc *crtc;
u32 ctrl;
+ bool audio;
struct drm_display_mode mode;
int last_dpms;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 22f054f7ee3e..ba9f9359c30e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -175,10 +175,10 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/
mm = get_task_mm(current);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (!cli->svm.svmm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return -EINVAL;
}
@@ -205,7 +205,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/
args->result = 0;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
return 0;
@@ -355,7 +355,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
if (ret)
goto out_free;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
svmm->notifier.ops = &nouveau_mn_ops;
ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
@@ -364,12 +364,12 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
cli->svm.svmm = svmm;
cli->svm.cli = cli;
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
mutex_unlock(&cli->mutex);
return 0;
out_mm_unlock:
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
out_free:
mutex_unlock(&cli->mutex);
kfree(svmm);
@@ -571,9 +571,9 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
return -EBUSY;
range.notifier_seq = mmu_interval_read_begin(range.notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = hmm_range_fault(&range);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret) {
/*
* FIXME: the input PFN_REQ flags are destroyed on
@@ -705,18 +705,18 @@ nouveau_svm_fault(struct nvif_notify *notify)
/* Intersect fault window with the CPU VMA, cancelling
* the fault if the address is invalid.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma_intersection(mm, start, limit);
if (!vma) {
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
start = max_t(u64, start, vma->vm_start);
limit = min_t(u64, limit, vma->vm_end);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
if (buffer->fault[fi]->addr != start) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 571687ba85b8..cf075311cdd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -39,6 +39,7 @@ nvkm-y += nvkm/engine/disp/sorgf119.o
nvkm-y += nvkm/engine/disp/sorgk104.o
nvkm-y += nvkm/engine/disp/sorgm107.o
nvkm-y += nvkm/engine/disp/sorgm200.o
+nvkm-y += nvkm/engine/disp/sorgp100.o
nvkm-y += nvkm/engine/disp/sorgv100.o
nvkm-y += nvkm/engine/disp/sortu102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
index fd6216684f6d..8471de3f3b61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -36,7 +36,7 @@ gp100_disp = {
.super = gf119_disp_super,
.root = &gp100_disp_root_oclass,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
- .sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
+ .sor = { .cnt = gf119_sor_cnt, .new = gp100_sor_new },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index 3468ddec1270..a3779c5046ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -63,7 +63,7 @@ gp102_disp = {
.super = gf119_disp_super,
.root = &gp102_disp_root_oclass,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
- .sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
+ .sor = { .cnt = gf119_sor_cnt, .new = gp100_sor_new },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
index 9b16a08eb4d9..bf6d41fb0c9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
@@ -27,10 +27,10 @@ void
gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
- const u32 hoff = head * 0x800;
+ const u32 soff = nv50_ior_base(ior);
const u32 ctrl = scdc & 0x3;
- nvkm_mask(device, 0x61c5bc + hoff, 0x00000003, ctrl);
+ nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl);
ior->tmds.high_speed = !!(scdc & 0x2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index c1d7a36e4d3c..1a200a9ba4e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -201,6 +201,7 @@ int gf119_sor_new(struct nvkm_disp *, int);
int gk104_sor_new(struct nvkm_disp *, int);
int gm107_sor_new(struct nvkm_disp *, int);
int gm200_sor_new(struct nvkm_disp *, int);
+int gp100_sor_new(struct nvkm_disp *, int);
int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
int gv100_sor_new(struct nvkm_disp *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index c62030c96fba..dcf08249374a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -111,8 +111,44 @@ nvkm_outp_acquire_ior(struct nvkm_outp *outp, u8 user, struct nvkm_ior *ior)
return 0;
}
+static inline int
+nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
+ u8 user, bool hda)
+{
+ struct nvkm_ior *ior;
+
+ /* First preference is to reuse the OR that is currently armed
+ * on HW, if any, in order to prevent unnecessary switching.
+ */
+ list_for_each_entry(ior, &outp->disp->ior, head) {
+ if (!ior->identity && !!ior->func->hda.hpd == hda &&
+ !ior->asy.outp && ior->arm.outp == outp)
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+
+ /* Failing that, a completely unused OR is the next best thing. */
+ list_for_each_entry(ior, &outp->disp->ior, head) {
+ if (!ior->identity && !!ior->func->hda.hpd == hda &&
+ !ior->asy.outp && ior->type == type && !ior->arm.outp &&
+ (ior->func->route.set || ior->id == __ffs(outp->info.or)))
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+
+ /* Last resort is to assign an OR that's already active on HW,
+ * but will be released during the next modeset.
+ */
+ list_for_each_entry(ior, &outp->disp->ior, head) {
+ if (!ior->identity && !!ior->func->hda.hpd == hda &&
+ !ior->asy.outp && ior->type == type &&
+ (ior->func->route.set || ior->id == __ffs(outp->info.or)))
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+
+ return -ENOSPC;
+}
+
int
-nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
+nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
{
struct nvkm_ior *ior = outp->ior;
enum nvkm_ior_proto proto;
@@ -137,32 +173,25 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
return nvkm_outp_acquire_ior(outp, user, ior);
}
- /* First preference is to reuse the OR that is currently armed
- * on HW, if any, in order to prevent unnecessary switching.
+ /* If we don't need HDA, first try to acquire an OR that doesn't
+ * support it to leave free the ones that do.
*/
- list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
- return nvkm_outp_acquire_ior(outp, user, ior);
- }
+ if (!hda) {
+ if (!nvkm_outp_acquire_hda(outp, type, user, false))
+ return 0;
- /* Failing that, a completely unused OR is the next best thing. */
- list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->identity &&
- !ior->asy.outp && ior->type == type && !ior->arm.outp &&
- (ior->func->route.set || ior->id == __ffs(outp->info.or)))
- return nvkm_outp_acquire_ior(outp, user, ior);
+ /* Use a HDA-supporting SOR anyway. */
+ return nvkm_outp_acquire_hda(outp, type, user, true);
}
- /* Last resort is to assign an OR that's already active on HW,
- * but will be released during the next modeset.
- */
- list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->identity && !ior->asy.outp && ior->type == type &&
- (ior->func->route.set || ior->id == __ffs(outp->info.or)))
- return nvkm_outp_acquire_ior(outp, user, ior);
- }
+ /* We want HDA, try to acquire an OR that supports it. */
+ if (!nvkm_outp_acquire_hda(outp, type, user, true))
+ return 0;
- return -ENOSPC;
+ /* There weren't any free ORs that support HDA, grab one that
+ * doesn't and at least allow display to work still.
+ */
+ return nvkm_outp_acquire_hda(outp, type, user, false);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index 721b068b87ef..ee028d30cfe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -32,7 +32,7 @@ int nvkm_outp_new(struct nvkm_disp *, int index, struct dcb_output *,
void nvkm_outp_del(struct nvkm_outp **);
void nvkm_outp_init(struct nvkm_outp *);
void nvkm_outp_fini(struct nvkm_outp *);
-int nvkm_outp_acquire(struct nvkm_outp *, u8 user);
+int nvkm_outp_acquire(struct nvkm_outp *, u8 user, bool hda);
void nvkm_outp_release(struct nvkm_outp *, u8 user);
void nvkm_outp_route(struct nvkm_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index a7672ef17d3b..fb5de44e4b8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -99,7 +99,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
} *args = data;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER);
+ ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, args->v0.hda);
if (ret == 0) {
args->v0.or = outp->ior->id;
args->v0.link = outp->ior->asy.link;
@@ -119,7 +119,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
if (args->v0.data & 0xfff00000)
return -EINVAL;
- ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV);
+ ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV, false);
if (ret)
return ret;
ret = outp->ior->func->sense(outp->ior, args->v0.data);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index cf2075db742a..4dd7f382968e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -89,7 +89,7 @@ gm200_sor_route_get(struct nvkm_outp *outp, int *link)
}
static const struct nvkm_ior_func
-gm200_sor = {
+gm200_sor_hda = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
@@ -119,8 +119,42 @@ gm200_sor = {
},
};
+static const struct nvkm_ior_func
+gm200_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gf119_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gk104_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = gf119_sor_dp_vcpi,
+ .audio = gf119_sor_dp_audio,
+ .audio_sym = gf119_sor_dp_audio_sym,
+ .watermark = gf119_sor_dp_watermark,
+ },
+};
+
int
gm200_sor_new(struct nvkm_disp *disp, int id)
{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda;
+
+ if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
+ hda = nvkm_rd32(device, 0x101034);
+
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&gm200_sor_hda, disp, SOR, id);
return nvkm_ior_new_(&gm200_sor, disp, SOR, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c
new file mode 100644
index 000000000000..c54f88317a07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+static const struct nvkm_ior_func
+gp100_sor_hda = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gf119_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gk104_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = gf119_sor_dp_vcpi,
+ .audio = gf119_sor_dp_audio,
+ .audio_sym = gf119_sor_dp_audio_sym,
+ .watermark = gf119_sor_dp_watermark,
+ },
+ .hda = {
+ .hpd = gf119_hda_hpd,
+ .eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+gp100_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gf119_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gk104_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = gf119_sor_dp_vcpi,
+ .audio = gf119_sor_dp_audio,
+ .audio_sym = gf119_sor_dp_audio_sym,
+ .watermark = gf119_sor_dp_watermark,
+ },
+};
+
+int
+gp100_sor_new(struct nvkm_disp *disp, int id)
+{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda;
+
+ if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
+ hda = nvkm_rd32(device, 0x10ebb0) >> 8;
+
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&gp100_sor_hda, disp, SOR, id);
+ return nvkm_ior_new_(&gp100_sor, disp, SOR, id);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
index d11a0dff10c6..4441187e8ec9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
@@ -78,7 +78,7 @@ gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
}
static const struct nvkm_ior_func
-gv100_sor = {
+gv100_sor_hda = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
@@ -107,9 +107,42 @@ gv100_sor = {
},
};
+static const struct nvkm_ior_func
+gv100_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+};
+
int
gv100_sor_new(struct nvkm_disp *disp, int id)
{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda;
+
+ if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
+ hda = nvkm_rd32(device, 0x118fb0) >> 8;
+
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&gv100_sor_hda, disp, SOR, id);
return nvkm_ior_new_(&gv100_sor, disp, SOR, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
index fa6d74251237..59865a934c4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
@@ -62,7 +62,7 @@ tu102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
}
static const struct nvkm_ior_func
-tu102_sor = {
+tu102_sor_hda = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
@@ -92,8 +92,38 @@ tu102_sor = {
},
};
+static const struct nvkm_ior_func
+tu102_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = tu102_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu102_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+};
+
int
tu102_sor_new(struct nvkm_disp *disp, int id)
{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda = nvkm_rd32(device, 0x08a15c);
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&tu102_sor_hda, disp, SOR, id);
return nvkm_ior_new_(&tu102_sor, disp, SOR, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index ec330d791d15..e56880f3e3bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -352,7 +352,7 @@ gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
static const struct gf100_gr_fwif
gk20a_gr_fwif[] = {
- { -1, gk20a_gr_load, &gk20a_gr },
+ { 0, gk20a_gr_load, &gk20a_gr },
{}
};
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 0d0ab8e0ff3b..cc31d187042e 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -196,12 +196,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
&p->validated);
if (need_mmap_lock)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
if (need_mmap_lock)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 068c3e5da173..3c8f570a20ee 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -342,17 +342,17 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
r = radeon_bo_reserve(bo, true);
if (r) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
goto release_object;
}
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
radeon_bo_unreserve(bo);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (r)
goto release_object;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index 7ad3f06c127e..00ca35f07ba5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -148,7 +148,7 @@
#define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3
#define SUN4I_HDMI_DDC_CLK_REG 0x528
-#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3)
+#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0xf) << 3)
#define SUN4I_HDMI_DDC_CLK_N(n) ((n) & 0x7)
#define SUN4I_HDMI_DDC_LINE_CTRL_REG 0x540
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
index 2ff780114106..12430b9d4e93 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
@@ -33,7 +33,7 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
unsigned long best_rate = 0;
u8 best_m = 0, best_n = 0, _m, _n;
- for (_m = 0; _m < 8; _m++) {
+ for (_m = 0; _m < 16; _m++) {
for (_n = 0; _n < 8; _n++) {
unsigned long tmp_rate;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 52d2b71f1588..f09b096ba4fd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -257,54 +257,6 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
return 0;
}
-#ifdef CONFIG_X86
-#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
-#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
-#else
-#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
-#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
-#endif
-
-
-/**
- * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
- * specified page protection.
- *
- * @page: The page to map.
- * @prot: The page protection.
- *
- * This function maps a TTM page using the kmap_atomic api if available,
- * otherwise falls back to vmap. The user must make sure that the
- * specified page does not have an aliased mapping with a different caching
- * policy unless the architecture explicitly allows it. Also mapping and
- * unmapping using this api must be correctly nested. Unmapping should
- * occur in the reverse order of mapping.
- */
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- return kmap_atomic(page);
- else
- return __ttm_kmap_atomic_prot(page, prot);
-}
-EXPORT_SYMBOL(ttm_kmap_atomic_prot);
-
-/**
- * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
- * ttm_kmap_atomic_prot.
- *
- * @addr: The virtual address from the map.
- * @prot: The page protection.
- */
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- kunmap_atomic(addr);
- else
- __ttm_kunmap_atomic(addr);
-}
-EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
-
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
@@ -316,13 +268,13 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
- dst = ttm_kmap_atomic_prot(d, prot);
+ dst = kmap_atomic_prot(d, prot);
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
- ttm_kunmap_atomic_prot(dst, prot);
+ kunmap_atomic(dst);
return 0;
}
@@ -338,13 +290,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
- src = ttm_kmap_atomic_prot(s, prot);
+ src = kmap_atomic_prot(s, prot);
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
- ttm_kunmap_atomic_prot(src, prot);
+ kunmap_atomic(src);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 0ad30b112982..a43aa7275f12 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -58,7 +58,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_clear;
/*
- * If possible, avoid waiting for GPU with mmap_sem
+ * If possible, avoid waiting for GPU with mmap_lock
* held. We only do this if the fault allows retry and this
* is the first attempt.
*/
@@ -68,7 +68,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
ttm_bo_get(bo);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mmap_read_unlock(vmf->vma->vm_mm);
(void) dma_fence_wait(bo->moving, true);
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
@@ -131,20 +131,20 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
{
/*
* Work around locking order reversal in fault / nopfn
- * between mmap_sem and bo_reserve: Perform a trylock operation
+ * between mmap_lock and bo_reserve: Perform a trylock operation
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
if (unlikely(!dma_resv_trylock(bo->base.resv))) {
/*
* If the fault allows retry and this is the first
- * fault attempt, we try to release the mmap_sem
+ * fault attempt, we try to release the mmap_lock
* before waiting
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mmap_read_unlock(vmf->vma->vm_mm);
if (!dma_resv_lock_interruptible(bo->base.resv,
NULL))
dma_resv_unlock(bo->base.resv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index bb46ca0c458f..1629427d5734 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -27,6 +27,7 @@
**************************************************************************/
#include "vmwgfx_drv.h"
+#include <linux/highmem.h>
/*
* Template that implements find_first_diff() for a generic
@@ -374,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
if (unmap_src) {
- ttm_kunmap_atomic_prot(d->src_addr, d->src_prot);
+ kunmap_atomic(d->src_addr);
d->src_addr = NULL;
}
if (unmap_dst) {
- ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot);
+ kunmap_atomic(d->dst_addr);
d->dst_addr = NULL;
}
@@ -388,8 +389,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL;
d->dst_addr =
- ttm_kmap_atomic_prot(d->dst_pages[dst_page],
- d->dst_prot);
+ kmap_atomic_prot(d->dst_pages[dst_page],
+ d->dst_prot);
if (!d->dst_addr)
return -ENOMEM;
@@ -401,8 +402,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL;
d->src_addr =
- ttm_kmap_atomic_prot(d->src_pages[src_page],
- d->src_prot);
+ kmap_atomic_prot(d->src_pages[src_page],
+ d->src_prot);
if (!d->src_addr)
return -ENOMEM;
@@ -499,9 +500,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
}
out:
if (d.src_addr)
- ttm_kunmap_atomic_prot(d.src_addr, d.src_prot);
+ kunmap_atomic(d.src_addr);
if (d.dst_addr)
- ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot);
+ kunmap_atomic(d.dst_addr);
return ret;
}
diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig
index b84fcaf8b105..aeea082f1418 100644
--- a/drivers/greybus/Kconfig
+++ b/drivers/greybus/Kconfig
@@ -3,7 +3,7 @@ menuconfig GREYBUS
tristate "Greybus support"
depends on SYSFS
---help---
- This option enables the Greybus driver core. Greybus is an
+ This option enables the Greybus driver core. Greybus is a
hardware protocol that was designed to provide Unipro with a
sane application layer. It was originally designed for the
ARA project, a module phone system, but has shown up in other
@@ -12,7 +12,7 @@ menuconfig GREYBUS
Say Y here to enable support for these types of drivers.
- To compile this code as a module, chose M here: the module
+ To compile this code as a module, choose M here: the module
will be called greybus.ko
if GREYBUS
@@ -25,7 +25,7 @@ config GREYBUS_ES2
acts as a Greybus "host controller". This device is a bridge
from a USB device to a Unipro network.
- To compile this code as a module, chose M here: the module
+ To compile this code as a module, choose M here: the module
will be called gb-es2.ko
endif # GREYBUS
diff --git a/drivers/greybus/arpc.h b/drivers/greybus/arpc.h
index c8b83c5cfa79..b9ea81b55b29 100644
--- a/drivers/greybus/arpc.h
+++ b/drivers/greybus/arpc.h
@@ -21,7 +21,7 @@ struct arpc_request_message {
__le16 id; /* RPC unique id */
__le16 size; /* Size in bytes of header + payload */
__u8 type; /* RPC type */
- __u8 data[0]; /* ARPC data */
+ __u8 data[]; /* ARPC data */
} __packed;
struct arpc_response_message {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 34f07371716d..443c5cbbde04 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -42,7 +42,7 @@ config HIDRAW
---help---
Say Y here if you want to support HID devices (from the USB
specification standpoint) that aren't strictly user interface
- devices, like monitor controls and Uninterruptable Power Supplies.
+ devices, like monitor controls and Uninterruptible Power Supplies.
This module supports these devices separately using a separate
event interface on /dev/hidraw.
@@ -149,6 +149,7 @@ config HID_APPLEIR
config HID_ASUS
tristate "Asus"
+ depends on USB_HID
depends on LEDS_CLASS
depends on ASUS_WMI || ASUS_WMI=n
select POWER_SUPPLY
@@ -538,14 +539,14 @@ config HID_LOGITECH
Support for Logitech devices that are not fully compliant with HID standard.
config HID_LOGITECH_DJ
- tristate "Logitech Unifying receivers full support"
+ tristate "Logitech receivers full support"
depends on USB_HID
depends on HIDRAW
depends on HID_LOGITECH
select HID_LOGITECH_HIDPP
---help---
- Say Y if you want support for Logitech Unifying receivers and devices.
- Unifying receivers are capable of pairing up to 6 Logitech compliant
+ Say Y if you want support for Logitech receivers and devices.
+ Logitech receivers are capable of pairing multiple Logitech compliant
devices to the same receiver. Without this driver it will be handled by
generic USB_HID driver and all incoming events will be multiplexed
into a single mouse and a single keyboard device.
@@ -1140,7 +1141,7 @@ config HID_SENSOR_CUSTOM_SENSOR
to decide how to interpret these special sensor ids and process in
the user space. Currently some manufacturers are using these ids for
sensor calibration and debugging other sensors. Manufacturers
- should't use these special custom sensor ids to export any of the
+ shouldn't use these special custom sensor ids to export any of the
standard sensors.
Select this config option for custom/generic sensor support.
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index b2ad319a74b9..6f1fe7248d81 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -387,8 +387,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size)
input_report_abs(hdata->input,
ABS_MT_PRESSURE, z);
} else {
- input_mt_report_slot_state(hdata->input,
- MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(hdata->input);
}
}
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index d732d1d10caf..359bdfbe3701 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -51,6 +51,12 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
"(For people who want to keep Windows PC keyboard muscle memory. "
"[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+static unsigned int swap_fn_leftctrl;
+module_param(swap_fn_leftctrl, uint, 0644);
+MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
+ "(For people who want to keep PC keyboard muscle memory. "
+ "[0] = as-is, Mac layout, 1 = swapped, PC layout)");
+
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
@@ -162,6 +168,11 @@ static const struct apple_key_translation swapped_option_cmd_keys[] = {
{ }
};
+static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
+ { KEY_FN, KEY_LEFTCTRL },
+ { }
+};
+
static const struct apple_key_translation *apple_find_translation(
const struct apple_key_translation *table, u16 from)
{
@@ -183,9 +194,11 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
bool do_translate;
u16 code = 0;
- if (usage->code == KEY_FN) {
+ u16 fn_keycode = (swap_fn_leftctrl) ? (KEY_LEFTCTRL) : (KEY_FN);
+
+ if (usage->code == fn_keycode) {
asc->fn_on = !!value;
- input_event(input, usage->type, usage->code, value);
+ input_event(input, usage->type, KEY_FN, value);
return 1;
}
@@ -270,6 +283,14 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
}
+ if (swap_fn_leftctrl) {
+ trans = apple_find_translation(swapped_fn_leftctrl_keys, usage->code);
+ if (trans) {
+ input_event(input, usage->type, trans->to, value);
+ return 1;
+ }
+ }
+
return 0;
}
@@ -333,6 +354,11 @@ static void apple_setup_input(struct input_dev *input)
for (trans = apple_iso_keyboard; trans->from; trans++)
set_bit(trans->to, input->keybit);
+
+ if (swap_fn_leftctrl) {
+ for (trans = swapped_fn_leftctrl_keys; trans->from; trans++)
+ set_bit(trans->to, input->keybit);
+ }
}
static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index e6e4c841fb06..c183caf89d49 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -40,7 +40,9 @@ MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>");
MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define T100_TPAD_INTF 2
+#define MEDION_E1239T_TPAD_INTF 1
+#define E1239T_TP_TOGGLE_REPORT_ID 0x05
#define T100CHI_MOUSE_REPORT_ID 0x06
#define FEATURE_REPORT_ID 0x0d
#define INPUT_REPORT_ID 0x5d
@@ -77,6 +79,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_G752_KEYBOARD BIT(8)
#define QUIRK_T101HA_DOCK BIT(9)
#define QUIRK_T90CHI BIT(10)
+#define QUIRK_MEDION_E1239T BIT(11)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -102,12 +105,14 @@ struct asus_touchpad_info {
int res_y;
int contact_size;
int max_contacts;
+ int report_size;
};
struct asus_drvdata {
unsigned long quirks;
struct hid_device *hdev;
struct input_dev *input;
+ struct input_dev *tp_kbd_input;
struct asus_kbd_leds *kbd_backlight;
const struct asus_touchpad_info *tp;
bool enable_backlight;
@@ -126,6 +131,7 @@ static const struct asus_touchpad_info asus_i2c_tp = {
.max_y = 1758,
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100ta_tp = {
@@ -135,6 +141,7 @@ static const struct asus_touchpad_info asus_t100ta_tp = {
.res_y = 27, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100ha_tp = {
@@ -144,6 +151,7 @@ static const struct asus_touchpad_info asus_t100ha_tp = {
.res_y = 29, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t200ta_tp = {
@@ -153,6 +161,7 @@ static const struct asus_touchpad_info asus_t200ta_tp = {
.res_y = 28, /* units/mm */
.contact_size = 5,
.max_contacts = 5,
+ .report_size = 28 /* 2 byte header + 5 * 5 + 1 byte footer */,
};
static const struct asus_touchpad_info asus_t100chi_tp = {
@@ -162,6 +171,17 @@ static const struct asus_touchpad_info asus_t100chi_tp = {
.res_y = 29, /* units/mm */
.contact_size = 3,
.max_contacts = 4,
+ .report_size = 15 /* 2 byte header + 3 * 4 + 1 byte footer */,
+};
+
+static const struct asus_touchpad_info medion_e1239t_tp = {
+ .max_x = 2640,
+ .max_y = 1380,
+ .res_x = 29, /* units/mm */
+ .res_y = 28, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+ .report_size = 32 /* 2 byte header + 5 * 5 + 5 byte footer */,
};
static void asus_report_contact_down(struct asus_drvdata *drvdat,
@@ -229,7 +249,7 @@ static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size)
int i, toolType = MT_TOOL_FINGER;
u8 *contactData = data + 2;
- if (size != 3 + drvdat->tp->contact_size * drvdat->tp->max_contacts)
+ if (size != drvdat->tp->report_size)
return 0;
for (i = 0; i < drvdat->tp->max_contacts; i++) {
@@ -257,6 +277,34 @@ static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size)
return 1;
}
+static int asus_e1239t_event(struct asus_drvdata *drvdat, u8 *data, int size)
+{
+ if (size != 3)
+ return 0;
+
+ /* Handle broken mute key which only sends press events */
+ if (!drvdat->tp &&
+ data[0] == 0x02 && data[1] == 0xe2 && data[2] == 0x00) {
+ input_report_key(drvdat->input, KEY_MUTE, 1);
+ input_sync(drvdat->input);
+ input_report_key(drvdat->input, KEY_MUTE, 0);
+ input_sync(drvdat->input);
+ return 1;
+ }
+
+ /* Handle custom touchpad toggle key which only sends press events */
+ if (drvdat->tp_kbd_input &&
+ data[0] == 0x05 && data[1] == 0x02 && data[2] == 0x28) {
+ input_report_key(drvdat->tp_kbd_input, KEY_F21, 1);
+ input_sync(drvdat->tp_kbd_input);
+ input_report_key(drvdat->tp_kbd_input, KEY_F21, 0);
+ input_sync(drvdat->tp_kbd_input);
+ return 1;
+ }
+
+ return 0;
+}
+
static int asus_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
@@ -281,6 +329,9 @@ static int asus_raw_event(struct hid_device *hdev,
if (drvdata->tp && data[0] == INPUT_REPORT_ID)
return asus_report_input(drvdata, data, size);
+ if (drvdata->quirks & QUIRK_MEDION_E1239T)
+ return asus_e1239t_event(drvdata, data, size);
+
return 0;
}
@@ -615,6 +666,21 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
hi->report->id != T100CHI_MOUSE_REPORT_ID)
return 0;
+ /* Handle MULTI_INPUT on E1239T mouse/touchpad USB interface */
+ if (drvdata->tp && (drvdata->quirks & QUIRK_MEDION_E1239T)) {
+ switch (hi->report->id) {
+ case E1239T_TP_TOGGLE_REPORT_ID:
+ input_set_capability(input, EV_KEY, KEY_F21);
+ input->name = "Asus Touchpad Keys";
+ drvdata->tp_kbd_input = input;
+ return 0;
+ case INPUT_REPORT_ID:
+ break; /* Touchpad report, handled below */
+ default:
+ return 0; /* Ignore other reports */
+ }
+ }
+
if (drvdata->tp) {
int ret;
@@ -677,24 +743,16 @@ static int asus_input_mapping(struct hid_device *hdev,
* This avoids a bunch of non-functional hid_input devices getting
* created because of the T100CHI using HID_QUIRK_MULTI_INPUT.
*/
- if (drvdata->quirks & (QUIRK_T100CHI | QUIRK_T90CHI)) {
- if (field->application == (HID_UP_GENDESK | 0x0080) ||
- usage->hid == (HID_UP_GENDEVCTRLS | 0x0024) ||
- usage->hid == (HID_UP_GENDEVCTRLS | 0x0025) ||
- usage->hid == (HID_UP_GENDEVCTRLS | 0x0026))
- return -1;
- /*
- * We use the hid_input for the mouse report for the touchpad,
- * keep the left button, to avoid the core removing it.
- */
- if (field->application == HID_GD_MOUSE &&
- usage->hid != (HID_UP_BUTTON | 1))
- return -1;
- }
+ if ((drvdata->quirks & (QUIRK_T100CHI | QUIRK_T90CHI)) &&
+ (field->application == (HID_UP_GENDESK | 0x0080) ||
+ field->application == HID_GD_MOUSE ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0024) ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0025) ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0026)))
+ return -1;
/* ASUS-specific keyboard hotkeys */
if ((usage->hid & HID_USAGE_PAGE) == 0xff310000) {
- set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
case 0x10: asus_map_key_clear(KEY_BRIGHTNESSDOWN); break;
case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP); break;
@@ -737,11 +795,11 @@ static int asus_input_mapping(struct hid_device *hdev,
if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT)
drvdata->enable_backlight = true;
+ set_bit(EV_REP, hi->input->evbit);
return 1;
}
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR) {
- set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
case 0xff01: asus_map_key_clear(BTN_1); break;
case 0xff02: asus_map_key_clear(BTN_2); break;
@@ -764,6 +822,7 @@ static int asus_input_mapping(struct hid_device *hdev,
return 0;
}
+ set_bit(EV_REP, hi->input->evbit);
return 1;
}
@@ -782,6 +841,16 @@ static int asus_input_mapping(struct hid_device *hdev,
}
}
+ /*
+ * The mute button is broken and only sends press events, we
+ * deal with this in our raw_event handler, so do not map it.
+ */
+ if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
+ usage->hid == (HID_UP_CONSUMER | 0xe2)) {
+ input_set_capability(hi->input, EV_KEY, KEY_MUTE);
+ return -1;
+ }
+
return 0;
}
@@ -849,7 +918,8 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
drvdata->tp = &asus_i2c_tp;
- if (drvdata->quirks & QUIRK_T100_KEYBOARD) {
+ if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
+ hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
@@ -877,6 +947,19 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
drvdata->tp = &asus_t100chi_tp;
}
+ if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
+ hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ struct usb_host_interface *alt =
+ to_usb_interface(hdev->dev.parent)->altsetting;
+
+ if (alt->desc.bInterfaceNumber == MEDION_E1239T_TPAD_INTF) {
+ /* For separate input-devs for tp and tp toggle key */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ drvdata->quirks |= QUIRK_SKIP_INPUT_MAPPING;
+ drvdata->tp = &medion_e1239t_tp;
+ }
+ }
+
if (drvdata->quirks & QUIRK_NO_INIT_REPORTS)
hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
@@ -1056,7 +1139,8 @@ static const struct hid_device_id asus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI },
-
+ { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T),
+ QUIRK_MEDION_E1239T },
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 1c71a1aa76b2..874fc3791f3b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -76,12 +76,9 @@
#define USB_VENDOR_ID_ALPS_JP 0x044E
#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
-#define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F
-#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
#define HID_DEVICE_ID_ALPS_U1 0x1215
#define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY 0x121E
#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
-#define HID_DEVICE_ID_ALPS_1222 0x1222
#define USB_VENDOR_ID_AMI 0x046b
#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
@@ -281,9 +278,6 @@
#define USB_VENDOR_ID_CIDC 0x1677
-#define I2C_VENDOR_ID_CIRQUE 0x0488
-#define I2C_PRODUCT_ID_CIRQUE_121F 0x121F
-
#define USB_VENDOR_ID_CJTOUCH 0x24b8
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020 0x0020
#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040 0x0040
@@ -640,6 +634,7 @@
#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
#define USB_DEVICE_ID_ITE8595 0x8595
+#define USB_DEVICE_ID_ITE_MEDION_E1239T 0xce50
#define USB_VENDOR_ID_JABRA 0x0b0e
#define USB_DEVICE_ID_JABRA_SPEAK_410 0x0412
@@ -730,8 +725,6 @@
#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
-#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
-#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
#define USB_VENDOR_ID_LG 0x1fd2
@@ -1157,6 +1150,9 @@
#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882 0x8882
#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883 0x8883
+#define USB_VENDOR_ID_TRUST 0x145f
+#define USB_DEVICE_ID_TRUST_PANORA_TABLET 0x0212
+
#define USB_VENDOR_ID_TURBOX 0x062a
#define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201
#define USB_DEVICE_ID_ASUS_MD_5110 0x5110
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index ed9b1c1f460d..48dff5d6b605 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * HID driver for Logitech Unifying receivers
+ * HID driver for Logitech receivers
*
* Copyright (c) 2011 Logitech
*/
@@ -701,7 +701,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
type_str, dj_hiddev->product);
} else {
snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
- "Logitech Unifying Device. Wireless PID:%04x",
+ "Logitech Wireless Device PID:%04x",
dj_hiddev->product);
}
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 094f4f1b6555..1e1cf8eae649 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * HIDPP protocol for Logitech Unifying receivers
+ * HIDPP protocol for Logitech receivers
*
* Copyright (c) 2011 Logitech (c)
* Copyright (c) 2012-2013 Google (c)
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index d958475f8c81..e1b93ce32e01 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -15,6 +15,7 @@
#include <linux/hid.h>
#include <linux/hidraw.h>
#include <linux/i2c.h>
+#include <linux/gpio/driver.h>
#include "hid-ids.h"
/* Commands codes in a raw output report */
@@ -27,6 +28,8 @@ enum {
MCP2221_I2C_PARAM_OR_STATUS = 0x10,
MCP2221_I2C_SET_SPEED = 0x20,
MCP2221_I2C_CANCEL = 0x10,
+ MCP2221_GPIO_SET = 0x50,
+ MCP2221_GPIO_GET = 0x51,
};
/* Response codes in a raw input report */
@@ -42,6 +45,8 @@ enum {
MCP2221_I2C_WRADDRL_SEND = 0x21,
MCP2221_I2C_ADDR_NACK = 0x25,
MCP2221_I2C_READ_COMPL = 0x55,
+ MCP2221_ALT_F_NOT_GPIOV = 0xEE,
+ MCP2221_ALT_F_NOT_GPIOD = 0xEF,
};
/*
@@ -59,6 +64,9 @@ struct mcp2221 {
int rxbuf_idx;
int status;
u8 cur_i2c_clk_div;
+ struct gpio_chip *gc;
+ u8 gp_idx;
+ u8 gpio_dir;
};
/*
@@ -526,6 +534,110 @@ static const struct i2c_algorithm mcp_i2c_algo = {
.functionality = mcp_i2c_func,
};
+static int mcp_gpio_get(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mcp->txbuf[0] = MCP2221_GPIO_GET;
+
+ mcp->gp_idx = (offset + 1) * 2;
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ mutex_unlock(&mcp->lock);
+
+ return ret;
+}
+
+static void mcp_gpio_set(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ memset(mcp->txbuf, 0, 18);
+ mcp->txbuf[0] = MCP2221_GPIO_SET;
+
+ mcp->gp_idx = ((offset + 1) * 4) - 1;
+
+ mcp->txbuf[mcp->gp_idx - 1] = 1;
+ mcp->txbuf[mcp->gp_idx] = !!value;
+
+ mutex_lock(&mcp->lock);
+ mcp_send_data_req_status(mcp, mcp->txbuf, 18);
+ mutex_unlock(&mcp->lock);
+}
+
+static int mcp_gpio_dir_set(struct mcp2221 *mcp,
+ unsigned int offset, u8 val)
+{
+ memset(mcp->txbuf, 0, 18);
+ mcp->txbuf[0] = MCP2221_GPIO_SET;
+
+ mcp->gp_idx = (offset + 1) * 5;
+
+ mcp->txbuf[mcp->gp_idx - 1] = 1;
+ mcp->txbuf[mcp->gp_idx] = val;
+
+ return mcp_send_data_req_status(mcp, mcp->txbuf, 18);
+}
+
+static int mcp_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_gpio_dir_set(mcp, offset, 0);
+ mutex_unlock(&mcp->lock);
+
+ return ret;
+}
+
+static int mcp_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_gpio_dir_set(mcp, offset, 1);
+ mutex_unlock(&mcp->lock);
+
+ /* Can't configure as output, bailout early */
+ if (ret)
+ return ret;
+
+ mcp_gpio_set(gc, offset, value);
+
+ return 0;
+}
+
+static int mcp_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ int ret;
+ struct mcp2221 *mcp = gpiochip_get_data(gc);
+
+ mcp->txbuf[0] = MCP2221_GPIO_GET;
+
+ mcp->gp_idx = (offset + 1) * 2;
+
+ mutex_lock(&mcp->lock);
+ ret = mcp_send_data_req_status(mcp, mcp->txbuf, 1);
+ mutex_unlock(&mcp->lock);
+
+ if (ret)
+ return ret;
+
+ if (mcp->gpio_dir)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
/* Gives current state of i2c engine inside mcp2221 */
static int mcp_get_i2c_eng_state(struct mcp2221 *mcp,
u8 *data, u8 idx)
@@ -638,6 +750,39 @@ static int mcp2221_raw_event(struct hid_device *hdev,
complete(&mcp->wait_in_report);
break;
+ case MCP2221_GPIO_GET:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ if ((data[mcp->gp_idx] == MCP2221_ALT_F_NOT_GPIOV) ||
+ (data[mcp->gp_idx + 1] == MCP2221_ALT_F_NOT_GPIOD)) {
+ mcp->status = -ENOENT;
+ } else {
+ mcp->status = !!data[mcp->gp_idx];
+ mcp->gpio_dir = !!data[mcp->gp_idx + 1];
+ }
+ break;
+ default:
+ mcp->status = -EAGAIN;
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
+ case MCP2221_GPIO_SET:
+ switch (data[1]) {
+ case MCP2221_SUCCESS:
+ if ((data[mcp->gp_idx] == MCP2221_ALT_F_NOT_GPIOV) ||
+ (data[mcp->gp_idx - 1] == MCP2221_ALT_F_NOT_GPIOV)) {
+ mcp->status = -ENOENT;
+ } else {
+ mcp->status = 0;
+ }
+ break;
+ default:
+ mcp->status = -EAGAIN;
+ }
+ complete(&mcp->wait_in_report);
+ break;
+
default:
mcp->status = -EIO;
complete(&mcp->wait_in_report);
@@ -702,8 +847,32 @@ static int mcp2221_probe(struct hid_device *hdev,
}
i2c_set_adapdata(&mcp->adapter, mcp);
+ /* Setup GPIO chip */
+ mcp->gc = devm_kzalloc(&hdev->dev, sizeof(*mcp->gc), GFP_KERNEL);
+ if (!mcp->gc) {
+ ret = -ENOMEM;
+ goto err_gc;
+ }
+
+ mcp->gc->label = "mcp2221_gpio";
+ mcp->gc->direction_input = mcp_gpio_direction_input;
+ mcp->gc->direction_output = mcp_gpio_direction_output;
+ mcp->gc->get_direction = mcp_gpio_get_direction;
+ mcp->gc->set = mcp_gpio_set;
+ mcp->gc->get = mcp_gpio_get;
+ mcp->gc->ngpio = 4;
+ mcp->gc->base = -1;
+ mcp->gc->can_sleep = 1;
+ mcp->gc->parent = &hdev->dev;
+
+ ret = devm_gpiochip_add_data(&hdev->dev, mcp->gc, mcp);
+ if (ret)
+ goto err_gc;
+
return 0;
+err_gc:
+ i2c_del_adapter(&mcp->adapter);
err_i2c:
hid_hw_close(mcp->hdev);
err_hstop:
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 03c720b47306..3f94b4954225 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -69,6 +69,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_ASUS_CUSTOM_UP BIT(17)
#define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18)
#define MT_QUIRK_SEPARATE_APP_REPORT BIT(19)
+#define MT_QUIRK_FORCE_MULTI_INPUT BIT(20)
#define MT_INPUTMODE_TOUCHSCREEN 0x02
#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -188,7 +189,8 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
/* reserved 0x0011 */
#define MT_CLS_WIN_8 0x0012
#define MT_CLS_EXPORT_ALL_INPUTS 0x0013
-#define MT_CLS_WIN_8_DUAL 0x0014
+/* reserved 0x0014 */
+#define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -272,12 +274,14 @@ static const struct mt_class mt_classes[] = {
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_CONTACT_CNT_ACCURATE,
.export_all_inputs = true },
- { .name = MT_CLS_WIN_8_DUAL,
+ { .name = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_IGNORE_DUPLICATES |
MT_QUIRK_HOVERING |
MT_QUIRK_CONTACT_CNT_ACCURATE |
- MT_QUIRK_WIN8_PTP_BUTTONS,
+ MT_QUIRK_STICKY_FINGERS |
+ MT_QUIRK_WIN8_PTP_BUTTONS |
+ MT_QUIRK_FORCE_MULTI_INPUT,
.export_all_inputs = true },
/*
@@ -754,8 +758,7 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
MT_STORE_FIELD(inrange_state);
return 1;
case HID_DG_CONFIDENCE:
- if ((cls->name == MT_CLS_WIN_8 ||
- cls->name == MT_CLS_WIN_8_DUAL) &&
+ if (cls->name == MT_CLS_WIN_8 &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN))
app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -896,7 +899,7 @@ static void mt_release_pending_palms(struct mt_device *td,
clear_bit(slotnum, app->pending_palm_slots);
input_mt_slot(input, slotnum);
- input_mt_report_slot_state(input, MT_TOOL_PALM, false);
+ input_mt_report_slot_inactive(input);
need_sync = true;
}
@@ -1640,9 +1643,7 @@ static void mt_release_contacts(struct hid_device *hid)
if (mt) {
for (i = 0; i < mt->num_slots; i++) {
input_mt_slot(input_dev, i);
- input_mt_report_slot_state(input_dev,
- MT_TOOL_FINGER,
- false);
+ input_mt_report_slot_inactive(input_dev);
}
input_mt_sync_frame(input_dev);
input_sync(input_dev);
@@ -1714,6 +1715,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ if (mtclass->quirks & MT_QUIRK_FORCE_MULTI_INPUT) {
+ hdev->quirks &= ~HID_QUIRK_INPUT_PER_APP;
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+ }
+
timer_setup(&td->release_timer, mt_expired_timeout, 0);
ret = hid_parse(hdev);
@@ -1786,32 +1792,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_3M,
USB_DEVICE_ID_3M3266) },
- /* Alps devices */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_ALPS_JP,
- HID_DEVICE_ID_ALPS_U1_DUAL_PTP) },
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_ALPS_JP,
- HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_ALPS_JP,
- HID_DEVICE_ID_ALPS_1222) },
-
- /* Lenovo X1 TAB Gen 2 */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_LENOVO,
- USB_DEVICE_ID_LENOVO_X1_TAB) },
-
- /* Lenovo X1 TAB Gen 3 */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
- USB_VENDOR_ID_LENOVO,
- USB_DEVICE_ID_LENOVO_X1_TAB3) },
-
/* Anton devices */
{ .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
@@ -1846,12 +1826,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
- /* Cirque devices */
- { .driver_data = MT_CLS_WIN_8_DUAL,
- HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
- I2C_VENDOR_ID_CIRQUE,
- I2C_PRODUCT_ID_CIRQUE_121F) },
-
/* CJTouch panels */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
@@ -1926,6 +1900,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
+ /* Elan devices */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ELAN, 0x313a) },
+
/* Elitegroup panel */
{ .driver_data = MT_CLS_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
@@ -2056,6 +2035,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
USB_DEVICE_ID_MTP_STM)},
+ /* Synaptics devices */
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xce08) },
+
/* TopSeed panels */
{ .driver_data = MT_CLS_TOPSEED,
MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index e4cb543de0cd..ca8b5c261c7c 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -168,6 +168,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TRUST, USB_DEVICE_ID_TRUST_PANORA_TABLET), HID_QUIRK_MULTI_INPUT | HID_QUIRK_HIDINPUT_FORCE },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 4c6ed6ef31f1..2f073f536070 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -867,6 +867,23 @@ static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
if (sc->quirks & PS3REMOTE)
return ps3remote_fixup(hdev, rdesc, rsize);
+ /*
+ * Some knock-off USB dongles incorrectly report their button count
+ * as 13 instead of 16 causing three non-functional buttons.
+ */
+ if ((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize >= 45 &&
+ /* Report Count (13) */
+ rdesc[23] == 0x95 && rdesc[24] == 0x0D &&
+ /* Usage Maximum (13) */
+ rdesc[37] == 0x29 && rdesc[38] == 0x0D &&
+ /* Report Count (3) */
+ rdesc[43] == 0x95 && rdesc[44] == 0x03) {
+ hid_info(hdev, "Fixing up USB dongle report descriptor\n");
+ rdesc[24] = 0x10;
+ rdesc[38] = 0x10;
+ rdesc[44] = 0x00;
+ }
+
return rdesc;
}
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index a66f08041a1a..ec142bc8c1da 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -389,6 +389,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
+ {
+ .ident = "Schneider SCL142ALM",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SCHNEIDER"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SCL142ALM"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
{ } /* Terminate list */
};
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index aa2dbed30fc3..6cf59fd26ad7 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -480,6 +480,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data,
sizeof(ldr_xfer_query_resp));
if (rv < 0) {
client_data->flag_retry = true;
+ *fw_info = (struct shim_fw_info){};
return rv;
}
@@ -489,6 +490,7 @@ static int ish_query_loader_prop(struct ishtp_cl_data *client_data,
"data size %d is not equal to size of loader_xfer_query_response %zu\n",
rv, sizeof(struct loader_xfer_query_response));
client_data->flag_retry = true;
+ *fw_info = (struct shim_fw_info){};
return -EMSGSIZE;
}
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 0e3e72f0f510..19497d1d92bf 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -2,7 +2,8 @@
#
# Makefile for CoreSight drivers.
#
-obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o coresight-platform.o
+obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o \
+ coresight-platform.o coresight-sysfs.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \
coresight-tmc-etf.o \
coresight-tmc-etr.o
diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c
index 2fdaeec80ee5..98f830c6ed50 100644
--- a/drivers/hwtracing/coresight/coresight-cti-platform.c
+++ b/drivers/hwtracing/coresight/coresight-cti-platform.c
@@ -2,11 +2,17 @@
/*
* Copyright (c) 2019, The Linaro Limited. All rights reserved.
*/
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/slab.h>
#include <dt-bindings/arm/coresight-cti-dt.h>
-#include <linux/of.h>
#include "coresight-cti.h"
+#include "coresight-priv.h"
/* Number of CTI signals in the v8 architecturally defined connection */
#define NR_V8PE_IN_SIGS 2
@@ -429,8 +435,7 @@ static int cti_plat_create_impdef_connections(struct device *dev,
}
/* get the hardware configuration & connection data. */
-int cti_plat_get_hw_data(struct device *dev,
- struct cti_drvdata *drvdata)
+static int cti_plat_get_hw_data(struct device *dev, struct cti_drvdata *drvdata)
{
int rc = 0;
struct cti_device *cti_dev = &drvdata->ctidev;
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
index 1f8fb7c15e80..392757f3a019 100644
--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -4,7 +4,13 @@
* Author: Mike Leach <mike.leach@linaro.org>
*/
+#include <linux/atomic.h>
#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
#include "coresight-cti.h"
@@ -1036,8 +1042,8 @@ static int cti_create_con_sysfs_attr(struct device *dev,
enum cti_conn_attr_type attr_type,
int attr_idx)
{
- struct dev_ext_attribute *eattr = 0;
- char *name = 0;
+ struct dev_ext_attribute *eattr;
+ char *name;
eattr = devm_kzalloc(dev, sizeof(struct dev_ext_attribute),
GFP_KERNEL);
@@ -1139,7 +1145,7 @@ static int cti_create_con_attr_set(struct device *dev, int con_idx,
}
/* create the array of group pointers for the CTI sysfs groups */
-int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
+static int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
{
int nr_groups;
@@ -1156,8 +1162,8 @@ int cti_create_cons_groups(struct device *dev, struct cti_device *ctidev)
int cti_create_cons_sysfs(struct device *dev, struct cti_drvdata *drvdata)
{
struct cti_device *ctidev = &drvdata->ctidev;
- int err = 0, con_idx = 0, i;
- struct cti_trig_con *tc = NULL;
+ int err, con_idx = 0, i;
+ struct cti_trig_con *tc;
err = cti_create_cons_groups(dev, ctidev);
if (err)
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
index aa6e0249bd70..40387d58c8e7 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -4,7 +4,22 @@
* Author: Mike Leach <mike.leach@linaro.org>
*/
+#include <linux/amba/bus.h>
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/coresight.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpuhotplug.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/spinlock.h>
+
+#include "coresight-priv.h"
#include "coresight-cti.h"
/**
@@ -19,7 +34,7 @@
*/
/* net of CTI devices connected via CTM */
-LIST_HEAD(ect_net);
+static LIST_HEAD(ect_net);
/* protect the list */
static DEFINE_MUTEX(ect_mutex);
@@ -27,6 +42,12 @@ static DEFINE_MUTEX(ect_mutex);
#define csdev_to_cti_drvdata(csdev) \
dev_get_drvdata(csdev->dev.parent)
+/* power management handling */
+static int nr_cti_cpu;
+
+/* quick lookup list for CPU bound CTIs when power handling */
+static struct cti_drvdata *cti_cpu_drvdata[NR_CPUS];
+
/*
* CTI naming. CTI bound to cores will have the name cti_cpu<N> where
* N is the CPU ID. System CTIs will have the name cti_sys<I> where I
@@ -116,6 +137,35 @@ cti_err_not_enabled:
return rc;
}
+/* re-enable CTI on CPU when using CPU hotplug */
+static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
+{
+ struct cti_config *config = &drvdata->config;
+ struct device *dev = &drvdata->csdev->dev;
+
+ pm_runtime_get_sync(dev->parent);
+ spin_lock(&drvdata->spinlock);
+ config->hw_powered = true;
+
+ /* no need to do anything if no enable request */
+ if (!atomic_read(&drvdata->config.enable_req_count))
+ goto cti_hp_not_enabled;
+
+ /* try to claim the device */
+ if (coresight_claim_device(drvdata->base))
+ goto cti_hp_not_enabled;
+
+ cti_write_all_hw_regs(drvdata);
+ config->hw_enabled = true;
+ spin_unlock(&drvdata->spinlock);
+ return;
+
+ /* did not re-enable due to no claim / no request */
+cti_hp_not_enabled:
+ spin_unlock(&drvdata->spinlock);
+ pm_runtime_put(dev->parent);
+}
+
/* disable hardware */
static int cti_disable_hw(struct cti_drvdata *drvdata)
{
@@ -442,6 +492,34 @@ int cti_channel_setop(struct device *dev, enum cti_chan_set_op op,
return err;
}
+static bool cti_add_sysfs_link(struct cti_drvdata *drvdata,
+ struct cti_trig_con *tc)
+{
+ struct coresight_sysfs_link link_info;
+ int link_err = 0;
+
+ link_info.orig = drvdata->csdev;
+ link_info.orig_name = tc->con_dev_name;
+ link_info.target = tc->con_dev;
+ link_info.target_name = dev_name(&drvdata->csdev->dev);
+
+ link_err = coresight_add_sysfs_link(&link_info);
+ if (link_err)
+ dev_warn(&drvdata->csdev->dev,
+ "Failed to set CTI sysfs link %s<=>%s\n",
+ link_info.orig_name, link_info.target_name);
+ return !link_err;
+}
+
+static void cti_remove_sysfs_link(struct cti_trig_con *tc)
+{
+ struct coresight_sysfs_link link_info;
+
+ link_info.orig_name = tc->con_dev_name;
+ link_info.target = tc->con_dev;
+ coresight_remove_sysfs_link(&link_info);
+}
+
/*
* Look for a matching connection device name in the list of connections.
* If found then swap in the csdev name, set trig con association pointer
@@ -452,6 +530,8 @@ cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name,
struct coresight_device *csdev)
{
struct cti_trig_con *tc;
+ struct cti_drvdata *drvdata = container_of(ctidev, struct cti_drvdata,
+ ctidev);
list_for_each_entry(tc, &ctidev->trig_cons, node) {
if (tc->con_dev_name) {
@@ -459,7 +539,12 @@ cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name,
/* match: so swap in csdev name & dev */
tc->con_dev_name = dev_name(&csdev->dev);
tc->con_dev = csdev;
- return true;
+ /* try to set sysfs link */
+ if (cti_add_sysfs_link(drvdata, tc))
+ return true;
+ /* link failed - remove CTI reference */
+ tc->con_dev = NULL;
+ break;
}
}
}
@@ -522,6 +607,7 @@ void cti_remove_assoc_from_csdev(struct coresight_device *csdev)
ctidev = &ctidrv->ctidev;
list_for_each_entry(tc, &ctidev->trig_cons, node) {
if (tc->con_dev == csdev->ect_dev) {
+ cti_remove_sysfs_link(tc);
tc->con_dev = NULL;
break;
}
@@ -543,10 +629,16 @@ static void cti_update_conn_xrefs(struct cti_drvdata *drvdata)
struct cti_device *ctidev = &drvdata->ctidev;
list_for_each_entry(tc, &ctidev->trig_cons, node) {
- if (tc->con_dev)
- /* set tc->con_dev->ect_dev */
- coresight_set_assoc_ectdev_mutex(tc->con_dev,
+ if (tc->con_dev) {
+ /* if we can set the sysfs link */
+ if (cti_add_sysfs_link(drvdata, tc))
+ /* set the CTI/csdev association */
+ coresight_set_assoc_ectdev_mutex(tc->con_dev,
drvdata->csdev);
+ else
+ /* otherwise remove reference from CTI */
+ tc->con_dev = NULL;
+ }
}
}
@@ -559,7 +651,113 @@ static void cti_remove_conn_xrefs(struct cti_drvdata *drvdata)
if (tc->con_dev) {
coresight_set_assoc_ectdev_mutex(tc->con_dev,
NULL);
+ cti_remove_sysfs_link(tc);
+ tc->con_dev = NULL;
+ }
+ }
+}
+
+/** cti PM callbacks **/
+static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct cti_drvdata *drvdata;
+ unsigned int cpu = smp_processor_id();
+ int notify_res = NOTIFY_OK;
+
+ if (!cti_cpu_drvdata[cpu])
+ return NOTIFY_OK;
+
+ drvdata = cti_cpu_drvdata[cpu];
+
+ if (WARN_ON_ONCE(drvdata->ctidev.cpu != cpu))
+ return NOTIFY_BAD;
+
+ spin_lock(&drvdata->spinlock);
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /* CTI regs all static - we have a copy & nothing to save */
+ drvdata->config.hw_powered = false;
+ if (drvdata->config.hw_enabled)
+ coresight_disclaim_device(drvdata->base);
+ break;
+
+ case CPU_PM_ENTER_FAILED:
+ drvdata->config.hw_powered = true;
+ if (drvdata->config.hw_enabled) {
+ if (coresight_claim_device(drvdata->base))
+ drvdata->config.hw_enabled = false;
+ }
+ break;
+
+ case CPU_PM_EXIT:
+ /* write hardware registers to re-enable. */
+ drvdata->config.hw_powered = true;
+ drvdata->config.hw_enabled = false;
+
+ /* check enable reference count to enable HW */
+ if (atomic_read(&drvdata->config.enable_req_count)) {
+ /* check we can claim the device as we re-power */
+ if (coresight_claim_device(drvdata->base))
+ goto cti_notify_exit;
+
+ drvdata->config.hw_enabled = true;
+ cti_write_all_hw_regs(drvdata);
+ }
+ break;
+
+ default:
+ notify_res = NOTIFY_DONE;
+ break;
+ }
+
+cti_notify_exit:
+ spin_unlock(&drvdata->spinlock);
+ return notify_res;
+}
+
+static struct notifier_block cti_cpu_pm_nb = {
+ .notifier_call = cti_cpu_pm_notify,
+};
+
+/* CPU HP handlers */
+static int cti_starting_cpu(unsigned int cpu)
+{
+ struct cti_drvdata *drvdata = cti_cpu_drvdata[cpu];
+
+ if (!drvdata)
+ return 0;
+
+ cti_cpuhp_enable_hw(drvdata);
+ return 0;
+}
+
+static int cti_dying_cpu(unsigned int cpu)
+{
+ struct cti_drvdata *drvdata = cti_cpu_drvdata[cpu];
+
+ if (!drvdata)
+ return 0;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->config.hw_powered = false;
+ coresight_disclaim_device(drvdata->base);
+ spin_unlock(&drvdata->spinlock);
+ return 0;
+}
+
+/* release PM registrations */
+static void cti_pm_release(struct cti_drvdata *drvdata)
+{
+ if (drvdata->ctidev.cpu >= 0) {
+ if (--nr_cti_cpu == 0) {
+ cpu_pm_unregister_notifier(&cti_cpu_pm_nb);
+
+ cpuhp_remove_state_nocalls(
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING);
}
+ cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL;
}
}
@@ -578,12 +776,12 @@ int cti_disable(struct coresight_device *csdev)
return cti_disable_hw(drvdata);
}
-const struct coresight_ops_ect cti_ops_ect = {
+static const struct coresight_ops_ect cti_ops_ect = {
.enable = cti_enable,
.disable = cti_disable,
};
-const struct coresight_ops cti_ops = {
+static const struct coresight_ops cti_ops = {
.ect_ops = &cti_ops_ect,
};
@@ -598,6 +796,7 @@ static void cti_device_release(struct device *dev)
mutex_lock(&ect_mutex);
cti_remove_conn_xrefs(drvdata);
+ cti_pm_release(drvdata);
/* remove from the list */
list_for_each_entry_safe(ect_item, ect_tmp, &ect_net, node) {
@@ -673,6 +872,24 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
goto err_out;
}
+ /* setup CPU power management handling for CPU bound CTI devices. */
+ if (drvdata->ctidev.cpu >= 0) {
+ cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata;
+ if (!nr_cti_cpu++) {
+ cpus_read_lock();
+ ret = cpuhp_setup_state_nocalls_cpuslocked(
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ "arm/coresight_cti:starting",
+ cti_starting_cpu, cti_dying_cpu);
+
+ if (!ret)
+ ret = cpu_pm_register_notifier(&cti_cpu_pm_nb);
+ cpus_read_unlock();
+ if (ret)
+ goto err_out;
+ }
+ }
+
/* create dynamic attributes for connections */
ret = cti_create_cons_sysfs(dev, drvdata);
if (ret) {
@@ -711,6 +928,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_out:
+ cti_pm_release(drvdata);
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h
index 004df3ab9dd0..acf7b545e6b9 100644
--- a/drivers/hwtracing/coresight/coresight-cti.h
+++ b/drivers/hwtracing/coresight/coresight-cti.h
@@ -7,8 +7,14 @@
#ifndef _CORESIGHT_CORESIGHT_CTI_H
#define _CORESIGHT_CORESIGHT_CTI_H
-#include <asm/local.h>
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
#include "coresight-priv.h"
/*
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 3810290e6d07..03e3f2590191 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -717,7 +717,7 @@ static const struct attribute_group coresight_etb_mgmt_group = {
.name = "mgmt",
};
-const struct attribute_group *coresight_etb_groups[] = {
+static const struct attribute_group *coresight_etb_groups[] = {
&coresight_etb_group,
&coresight_etb_mgmt_group,
NULL,
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index e2cb6873c3f2..bf22dcfd3327 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -504,7 +504,7 @@ static int etm_enable_perf(struct coresight_device *csdev,
static int etm_enable_sysfs(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct etm_enable_arg arg = { 0 };
+ struct etm_enable_arg arg = { };
int ret;
spin_lock(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index ce41482431f9..b673e738bc9a 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -205,7 +205,7 @@ static ssize_t reset_store(struct device *dev,
* started state. ARM recommends start-stop logic is set before
* each trace run.
*/
- config->vinst_ctrl |= BIT(0);
+ config->vinst_ctrl = BIT(0);
if (drvdata->nr_addr_cmp == true) {
config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
/* SSSTATUS, bit[9] */
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a90d757f7043..747afc875f91 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -412,7 +412,7 @@ out:
static int etm4_enable_sysfs(struct coresight_device *csdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct etm4_enable_arg arg = { 0 };
+ struct etm4_enable_arg arg = { };
int ret;
spin_lock(&drvdata->spinlock);
@@ -791,7 +791,7 @@ static void etm4_set_default_config(struct etmv4_config *config)
config->ts_ctrl = 0x0;
/* TRCVICTLR::EVENT = 0x01, select the always on logic */
- config->vinst_ctrl |= BIT(0);
+ config->vinst_ctrl = BIT(0);
}
static u64 etm4_get_ns_access_type(struct etmv4_config *config)
@@ -894,17 +894,8 @@ static void etm4_set_start_stop_filter(struct etmv4_config *config,
static void etm4_set_default_filter(struct etmv4_config *config)
{
- u64 start, stop;
-
- /*
- * Configure address range comparator '0' to encompass all
- * possible addresses.
- */
- start = 0x0;
- stop = ~0x0;
-
- etm4_set_comparator_filter(config, start, stop,
- ETM_DEFAULT_ADDR_COMP);
+ /* Trace everything 'default' filter achieved by no filtering */
+ config->viiectlr = 0x0;
/*
* TRCVICTLR::SSSTATUS == 1, the start-stop logic is
@@ -925,11 +916,9 @@ static void etm4_set_default(struct etmv4_config *config)
/*
* Make default initialisation trace everything
*
- * Select the "always true" resource selector on the
- * "Enablign Event" line and configure address range comparator
- * '0' to trace all the possible address range. From there
- * configure the "include/exclude" engine to include address
- * range comparator '0'.
+ * This is done by a minimum default config sufficient to enable
+ * full instruction trace - with a default filter for trace all
+ * achieved by having no filtering.
*/
etm4_set_default_config(config);
etm4_set_default_filter(config);
@@ -1527,6 +1516,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_arch_supported:
+ etmdrvdata[drvdata->cpu] = NULL;
if (--etm4_count == 0) {
etm4_cpu_pm_unregister();
@@ -1552,10 +1542,13 @@ static const struct amba_id etm4_ids[] = {
CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
+ CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */
CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
- CS_AMBA_ID(0x000bb802), /* Qualcomm Kryo 385 Cortex-A55 */
- CS_AMBA_ID(0x000bb803), /* Qualcomm Kryo 385 Cortex-A75 */
+ CS_AMBA_UCI_ID(0x000bb802, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A55 */
+ CS_AMBA_UCI_ID(0x000bb803, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A75 */
+ CS_AMBA_UCI_ID(0x000bb805, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A55 */
+ CS_AMBA_UCI_ID(0x000bb804, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A76 */
CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
{},
};
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 43418a2126ff..e4912abda3aa 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -87,6 +87,7 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
int *nr_inport, int *nr_outport)
{
struct device_node *ep = NULL;
+ struct of_endpoint endpoint;
int in = 0, out = 0;
do {
@@ -94,10 +95,16 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
if (!ep)
break;
- if (of_coresight_legacy_ep_is_input(ep))
- in++;
- else
- out++;
+ if (of_graph_parse_endpoint(ep, &endpoint))
+ continue;
+
+ if (of_coresight_legacy_ep_is_input(ep)) {
+ in = (endpoint.port + 1 > in) ?
+ endpoint.port + 1 : in;
+ } else {
+ out = (endpoint.port + 1) > out ?
+ endpoint.port + 1 : out;
+ }
} while (ep);
@@ -137,9 +144,16 @@ of_coresight_count_ports(struct device_node *port_parent)
{
int i = 0;
struct device_node *ep = NULL;
+ struct of_endpoint endpoint;
+
+ while ((ep = of_graph_get_next_endpoint(port_parent, ep))) {
+ /* Defer error handling to parsing */
+ if (of_graph_parse_endpoint(ep, &endpoint))
+ continue;
+ if (endpoint.port + 1 > i)
+ i = endpoint.port + 1;
+ }
- while ((ep = of_graph_get_next_endpoint(port_parent, ep)))
- i++;
return i;
}
@@ -191,14 +205,12 @@ static int of_coresight_get_cpu(struct device *dev)
* Parses the local port, remote device name and the remote port.
*
* Returns :
- * 1 - If the parsing is successful and a connection record
- * was created for an output connection.
* 0 - If the parsing completed without any fatal errors.
* -Errno - Fatal error, abort the scanning.
*/
static int of_coresight_parse_endpoint(struct device *dev,
struct device_node *ep,
- struct coresight_connection *conn)
+ struct coresight_platform_data *pdata)
{
int ret = 0;
struct of_endpoint endpoint, rendpoint;
@@ -206,6 +218,7 @@ static int of_coresight_parse_endpoint(struct device *dev,
struct device_node *rep = NULL;
struct device *rdev = NULL;
struct fwnode_handle *rdev_fwnode;
+ struct coresight_connection *conn;
do {
/* Parse the local port details */
@@ -232,6 +245,13 @@ static int of_coresight_parse_endpoint(struct device *dev,
break;
}
+ conn = &pdata->conns[endpoint.port];
+ if (conn->child_fwnode) {
+ dev_warn(dev, "Duplicate output port %d\n",
+ endpoint.port);
+ ret = -EINVAL;
+ break;
+ }
conn->outport = endpoint.port;
/*
* Hold the refcount to the target device. This could be
@@ -244,7 +264,6 @@ static int of_coresight_parse_endpoint(struct device *dev,
conn->child_fwnode = fwnode_handle_get(rdev_fwnode);
conn->child_port = rendpoint.port;
/* Connection record updated */
- ret = 1;
} while (0);
of_node_put(rparent);
@@ -258,7 +277,6 @@ static int of_get_coresight_platform_data(struct device *dev,
struct coresight_platform_data *pdata)
{
int ret = 0;
- struct coresight_connection *conn;
struct device_node *ep = NULL;
const struct device_node *parent = NULL;
bool legacy_binding = false;
@@ -287,8 +305,6 @@ static int of_get_coresight_platform_data(struct device *dev,
dev_warn_once(dev, "Uses obsolete Coresight DT bindings\n");
}
- conn = pdata->conns;
-
/* Iterate through each output port to discover topology */
while ((ep = of_graph_get_next_endpoint(parent, ep))) {
/*
@@ -300,15 +316,9 @@ static int of_get_coresight_platform_data(struct device *dev,
if (legacy_binding && of_coresight_legacy_ep_is_input(ep))
continue;
- ret = of_coresight_parse_endpoint(dev, ep, conn);
- switch (ret) {
- case 1:
- conn++; /* Fall through */
- case 0:
- break;
- default:
+ ret = of_coresight_parse_endpoint(dev, ep, pdata);
+ if (ret)
return ret;
- }
}
return 0;
@@ -501,7 +511,7 @@ static inline bool acpi_validate_dsd_graph(const union acpi_object *graph)
}
/* acpi_get_dsd_graph - Find the _DSD Graph property for the given device. */
-const union acpi_object *
+static const union acpi_object *
acpi_get_dsd_graph(struct acpi_device *adev)
{
int i;
@@ -564,7 +574,7 @@ acpi_validate_coresight_graph(const union acpi_object *cs_graph)
* Returns the pointer to the CoreSight Graph Package when found. Otherwise
* returns NULL.
*/
-const union acpi_object *
+static const union acpi_object *
acpi_get_coresight_graph(struct acpi_device *adev)
{
const union acpi_object *graph_list, *graph;
@@ -647,6 +657,16 @@ static int acpi_coresight_parse_link(struct acpi_device *adev,
* coresight_remove_match().
*/
conn->child_fwnode = fwnode_handle_get(&r_adev->fwnode);
+ } else if (dir == ACPI_CORESIGHT_LINK_SLAVE) {
+ /*
+ * We are only interested in the port number
+ * for the input ports at this component.
+ * Store the port number in child_port.
+ */
+ conn->child_port = fields[0].integer.value;
+ } else {
+ /* Invalid direction */
+ return -EINVAL;
}
return dir;
@@ -692,10 +712,20 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
return dir;
if (dir == ACPI_CORESIGHT_LINK_MASTER) {
- pdata->nr_outport++;
+ if (ptr->outport > pdata->nr_outport)
+ pdata->nr_outport = ptr->outport;
ptr++;
} else {
- pdata->nr_inport++;
+ WARN_ON(pdata->nr_inport == ptr->child_port);
+ /*
+ * We do not track input port connections for a device.
+ * However we need the highest port number described,
+ * which can be recorded now and reuse this connection
+ * record for an output connection. Hence, do not move
+ * the ptr for input connections
+ */
+ if (ptr->child_port > pdata->nr_inport)
+ pdata->nr_inport = ptr->child_port;
}
}
@@ -704,8 +734,13 @@ static int acpi_coresight_parse_graph(struct acpi_device *adev,
return rc;
/* Copy the connection information to the final location */
- for (i = 0; i < pdata->nr_outport; i++)
- pdata->conns[i] = conns[i];
+ for (i = 0; conns + i < ptr; i++) {
+ int port = conns[i].outport;
+
+ /* Duplicate output port */
+ WARN_ON(pdata->conns[port].child_fwnode);
+ pdata->conns[port] = conns[i];
+ }
devm_kfree(&adev->dev, conns);
return 0;
@@ -822,7 +857,7 @@ coresight_get_platform_data(struct device *dev)
error:
if (!IS_ERR_OR_NULL(pdata))
/* Cleanup the connection information */
- coresight_release_platform_data(pdata);
+ coresight_release_platform_data(NULL, pdata);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_get_platform_data);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 890f9a5c97c6..36c943ae94d5 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -153,6 +153,15 @@ struct coresight_device *coresight_get_sink_by_id(u32 id);
struct list_head *coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink);
void coresight_release_path(struct list_head *path);
+int coresight_add_sysfs_link(struct coresight_sysfs_link *info);
+void coresight_remove_sysfs_link(struct coresight_sysfs_link *info);
+int coresight_create_conns_sysfs_group(struct coresight_device *csdev);
+void coresight_remove_conns_sysfs_group(struct coresight_device *csdev);
+int coresight_make_links(struct coresight_device *orig,
+ struct coresight_connection *conn,
+ struct coresight_device *target);
+void coresight_remove_links(struct coresight_device *orig,
+ struct coresight_connection *conn);
#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
extern int etm_readl_cp14(u32 off, unsigned int *val);
@@ -206,12 +215,16 @@ cti_remove_assoc_from_csdev(struct coresight_device *csdev) {}
/* extract the data value from a UCI structure given amba_id pointer. */
static inline void *coresight_get_uci_data(const struct amba_id *id)
{
- if (id->data)
- return ((struct amba_cs_uci_id *)(id->data))->data;
- return 0;
+ struct amba_cs_uci_id *uci_id = id->data;
+
+ if (!uci_id)
+ return NULL;
+
+ return uci_id->data;
}
-void coresight_release_platform_data(struct coresight_platform_data *pdata);
+void coresight_release_platform_data(struct coresight_device *csdev,
+ struct coresight_platform_data *pdata);
struct coresight_device *
coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode);
void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
new file mode 100644
index 000000000000..82afeaf2ccc4
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Limited, All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include "coresight-priv.h"
+
+/*
+ * Connections group - links attribute.
+ * Count of created links between coresight components in the group.
+ */
+static ssize_t nr_links_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ return sprintf(buf, "%d\n", csdev->nr_links);
+}
+static DEVICE_ATTR_RO(nr_links);
+
+static struct attribute *coresight_conns_attrs[] = {
+ &dev_attr_nr_links.attr,
+ NULL,
+};
+
+static struct attribute_group coresight_conns_group = {
+ .attrs = coresight_conns_attrs,
+ .name = "connections",
+};
+
+/*
+ * Create connections group for CoreSight devices.
+ * This group will then be used to collate the sysfs links between
+ * devices.
+ */
+int coresight_create_conns_sysfs_group(struct coresight_device *csdev)
+{
+ int ret = 0;
+
+ if (!csdev)
+ return -EINVAL;
+
+ ret = sysfs_create_group(&csdev->dev.kobj, &coresight_conns_group);
+ if (ret)
+ return ret;
+
+ csdev->has_conns_grp = true;
+ return ret;
+}
+
+void coresight_remove_conns_sysfs_group(struct coresight_device *csdev)
+{
+ if (!csdev)
+ return;
+
+ if (csdev->has_conns_grp) {
+ sysfs_remove_group(&csdev->dev.kobj, &coresight_conns_group);
+ csdev->has_conns_grp = false;
+ }
+}
+
+int coresight_add_sysfs_link(struct coresight_sysfs_link *info)
+{
+ int ret = 0;
+
+ if (!info)
+ return -EINVAL;
+ if (!info->orig || !info->target ||
+ !info->orig_name || !info->target_name)
+ return -EINVAL;
+ if (!info->orig->has_conns_grp || !info->target->has_conns_grp)
+ return -EINVAL;
+
+ /* first link orig->target */
+ ret = sysfs_add_link_to_group(&info->orig->dev.kobj,
+ coresight_conns_group.name,
+ &info->target->dev.kobj,
+ info->orig_name);
+ if (ret)
+ return ret;
+
+ /* second link target->orig */
+ ret = sysfs_add_link_to_group(&info->target->dev.kobj,
+ coresight_conns_group.name,
+ &info->orig->dev.kobj,
+ info->target_name);
+
+ /* error in second link - remove first - otherwise inc counts */
+ if (ret) {
+ sysfs_remove_link_from_group(&info->orig->dev.kobj,
+ coresight_conns_group.name,
+ info->orig_name);
+ } else {
+ info->orig->nr_links++;
+ info->target->nr_links++;
+ }
+
+ return ret;
+}
+
+void coresight_remove_sysfs_link(struct coresight_sysfs_link *info)
+{
+ if (!info)
+ return;
+ if (!info->orig || !info->target ||
+ !info->orig_name || !info->target_name)
+ return;
+
+ sysfs_remove_link_from_group(&info->orig->dev.kobj,
+ coresight_conns_group.name,
+ info->orig_name);
+
+ sysfs_remove_link_from_group(&info->target->dev.kobj,
+ coresight_conns_group.name,
+ info->target_name);
+
+ info->orig->nr_links--;
+ info->target->nr_links--;
+}
+
+/*
+ * coresight_make_links: Make a link for a connection from a @orig
+ * device to @target, represented by @conn.
+ *
+ * e.g, for devOrig[output_X] -> devTarget[input_Y] is represented
+ * as two symbolic links :
+ *
+ * /sys/.../devOrig/out:X -> /sys/.../devTarget/
+ * /sys/.../devTarget/in:Y -> /sys/.../devOrig/
+ *
+ * The link names are allocated for a device where it appears. i.e, the
+ * "out" link on the master and "in" link on the slave device.
+ * The link info is stored in the connection record for avoiding
+ * the reconstruction of names for removal.
+ */
+int coresight_make_links(struct coresight_device *orig,
+ struct coresight_connection *conn,
+ struct coresight_device *target)
+{
+ int ret = -ENOMEM;
+ char *outs = NULL, *ins = NULL;
+ struct coresight_sysfs_link *link = NULL;
+
+ do {
+ outs = devm_kasprintf(&orig->dev, GFP_KERNEL,
+ "out:%d", conn->outport);
+ if (!outs)
+ break;
+ ins = devm_kasprintf(&target->dev, GFP_KERNEL,
+ "in:%d", conn->child_port);
+ if (!ins)
+ break;
+ link = devm_kzalloc(&orig->dev,
+ sizeof(struct coresight_sysfs_link),
+ GFP_KERNEL);
+ if (!link)
+ break;
+
+ link->orig = orig;
+ link->target = target;
+ link->orig_name = outs;
+ link->target_name = ins;
+
+ ret = coresight_add_sysfs_link(link);
+ if (ret)
+ break;
+
+ conn->link = link;
+
+ /*
+ * Install the device connection. This also indicates that
+ * the links are operational on both ends.
+ */
+ conn->child_dev = target;
+ return 0;
+ } while (0);
+
+ return ret;
+}
+
+/*
+ * coresight_remove_links: Remove the sysfs links for a given connection @conn,
+ * from @orig device to @target device. See coresight_make_links() for more
+ * details.
+ */
+void coresight_remove_links(struct coresight_device *orig,
+ struct coresight_connection *conn)
+{
+ if (!orig || !conn->link)
+ return;
+
+ coresight_remove_sysfs_link(conn->link);
+
+ devm_kfree(&conn->child_dev->dev, conn->link->target_name);
+ devm_kfree(&orig->dev, conn->link->orig_name);
+ devm_kfree(&orig->dev, conn->link);
+ conn->link = NULL;
+ conn->child_dev = NULL;
+}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d0cc3985b72a..36cce2bfb744 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -596,13 +596,6 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
goto out;
}
- /* There is no point in reading a TMC in HW FIFO mode */
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode != TMC_MODE_CIRCULAR_BUFFER) {
- ret = -EINVAL;
- goto out;
- }
-
/* Don't interfere if operated from Perf */
if (drvdata->mode == CS_MODE_PERF) {
ret = -EINVAL;
@@ -616,8 +609,15 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if need be */
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ ret = -EINVAL;
+ goto out;
+ }
__tmc_etb_disable_hw(drvdata);
+ }
drvdata->reading = true;
out:
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 1cf82fa58289..39fba1d16e6e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -361,7 +361,7 @@ static const struct attribute_group coresight_tmc_mgmt_group = {
.name = "mgmt",
};
-const struct attribute_group *coresight_tmc_groups[] = {
+static const struct attribute_group *coresight_tmc_groups[] = {
&coresight_tmc_group,
&coresight_tmc_mgmt_group,
NULL,
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index c71553c09f8e..f3efbb3b2b4d 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1031,7 +1031,7 @@ static void coresight_device_release(struct device *dev)
static int coresight_orphan_match(struct device *dev, void *data)
{
- int i;
+ int i, ret = 0;
bool still_orphan = false;
struct coresight_device *csdev, *i_csdev;
struct coresight_connection *conn;
@@ -1053,49 +1053,62 @@ static int coresight_orphan_match(struct device *dev, void *data)
for (i = 0; i < i_csdev->pdata->nr_outport; i++) {
conn = &i_csdev->pdata->conns[i];
+ /* Skip the port if FW doesn't describe it */
+ if (!conn->child_fwnode)
+ continue;
/* We have found at least one orphan connection */
if (conn->child_dev == NULL) {
/* Does it match this newly added device? */
- if (conn->child_fwnode == csdev->dev.fwnode)
- conn->child_dev = csdev;
- else
+ if (conn->child_fwnode == csdev->dev.fwnode) {
+ ret = coresight_make_links(i_csdev,
+ conn, csdev);
+ if (ret)
+ return ret;
+ } else {
/* This component still has an orphan */
still_orphan = true;
+ }
}
}
i_csdev->orphan = still_orphan;
/*
- * Returning '0' ensures that all known component on the
- * bus will be checked.
+ * Returning '0' in case we didn't encounter any error,
+ * ensures that all known component on the bus will be checked.
*/
return 0;
}
-static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
+static int coresight_fixup_orphan_conns(struct coresight_device *csdev)
{
- /*
- * No need to check for a return value as orphan connection(s)
- * are hooked-up with each newly added component.
- */
- bus_for_each_dev(&coresight_bustype, NULL,
+ return bus_for_each_dev(&coresight_bustype, NULL,
csdev, coresight_orphan_match);
}
-static void coresight_fixup_device_conns(struct coresight_device *csdev)
+static int coresight_fixup_device_conns(struct coresight_device *csdev)
{
- int i;
+ int i, ret = 0;
for (i = 0; i < csdev->pdata->nr_outport; i++) {
struct coresight_connection *conn = &csdev->pdata->conns[i];
+ if (!conn->child_fwnode)
+ continue;
conn->child_dev =
coresight_find_csdev_by_fwnode(conn->child_fwnode);
- if (!conn->child_dev)
+ if (conn->child_dev) {
+ ret = coresight_make_links(csdev, conn,
+ conn->child_dev);
+ if (ret)
+ break;
+ } else {
csdev->orphan = true;
+ }
}
+
+ return 0;
}
static int coresight_remove_match(struct device *dev, void *data)
@@ -1118,12 +1131,12 @@ static int coresight_remove_match(struct device *dev, void *data)
for (i = 0; i < iterator->pdata->nr_outport; i++) {
conn = &iterator->pdata->conns[i];
- if (conn->child_dev == NULL)
+ if (conn->child_dev == NULL || conn->child_fwnode == NULL)
continue;
if (csdev->dev.fwnode == conn->child_fwnode) {
iterator->orphan = true;
- conn->child_dev = NULL;
+ coresight_remove_links(iterator, conn);
/*
* Drop the reference to the handle for the remote
* device acquired in parsing the connections from
@@ -1213,16 +1226,27 @@ postcore_initcall(coresight_init);
* coresight_release_platform_data: Release references to the devices connected
* to the output port of this device.
*/
-void coresight_release_platform_data(struct coresight_platform_data *pdata)
+void coresight_release_platform_data(struct coresight_device *csdev,
+ struct coresight_platform_data *pdata)
{
int i;
+ struct coresight_connection *conns = pdata->conns;
for (i = 0; i < pdata->nr_outport; i++) {
- if (pdata->conns[i].child_fwnode) {
- fwnode_handle_put(pdata->conns[i].child_fwnode);
+ /* If we have made the links, remove them now */
+ if (csdev && conns[i].child_dev)
+ coresight_remove_links(csdev, &conns[i]);
+ /*
+ * Drop the refcount and clear the handle as this device
+ * is going away
+ */
+ if (conns[i].child_fwnode) {
+ fwnode_handle_put(conns[i].child_fwnode);
pdata->conns[i].child_fwnode = NULL;
}
}
+ if (csdev)
+ coresight_remove_conns_sysfs_group(csdev);
}
struct coresight_device *coresight_register(struct coresight_desc *desc)
@@ -1304,11 +1328,19 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
mutex_lock(&coresight_mutex);
- coresight_fixup_device_conns(csdev);
- coresight_fixup_orphan_conns(csdev);
- cti_add_assoc_to_csdev(csdev);
+ ret = coresight_create_conns_sysfs_group(csdev);
+ if (!ret)
+ ret = coresight_fixup_device_conns(csdev);
+ if (!ret)
+ ret = coresight_fixup_orphan_conns(csdev);
+ if (!ret)
+ cti_add_assoc_to_csdev(csdev);
mutex_unlock(&coresight_mutex);
+ if (ret) {
+ coresight_unregister(csdev);
+ return ERR_PTR(ret);
+ }
return csdev;
@@ -1316,7 +1348,7 @@ err_free_csdev:
kfree(csdev);
err_out:
/* Cleanup the connection information */
- coresight_release_platform_data(desc->pdata);
+ coresight_release_platform_data(NULL, desc->pdata);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_register);
@@ -1326,7 +1358,7 @@ void coresight_unregister(struct coresight_device *csdev)
etm_perf_del_symlink_sink(csdev);
/* Remove references of that device in the topology */
coresight_remove_conns(csdev);
- coresight_release_platform_data(csdev->pdata);
+ coresight_release_platform_data(csdev, csdev->pdata);
device_unregister(&csdev->dev);
}
EXPORT_SYMBOL_GPL(coresight_unregister);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index a9c03f5c3482..4f333889489c 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1439,9 +1439,9 @@ static int i801_add_mux(struct i801_priv *priv)
return -ENOMEM;
lookup->dev_id = "i2c-mux-gpio";
for (i = 0; i < mux_config->n_gpios; i++) {
- lookup->table[i].chip_label = mux_config->gpio_chip;
- lookup->table[i].chip_hwnum = mux_config->gpios[i];
- lookup->table[i].con_id = "mux";
+ lookup->table[i] = (struct gpiod_lookup)
+ GPIO_LOOKUP(mux_config->gpio_chip,
+ mux_config->gpios[i], "mux", 0);
}
gpiod_add_lookup_table(lookup);
priv->lookup = lookup;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 5a5638e1daa1..57986984a90b 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -435,8 +435,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
* fall through to the write state, as we will need to
* send a byte as well
*/
- /* Fall through */
-
+ fallthrough;
case STATE_WRITE:
/*
* we are writing data to the device... check for the
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 5d91a6dda894..1080637ca40e 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -89,13 +89,13 @@ config ADXL372_I2C
module will be called adxl372_i2c.
config BMA180
- tristate "Bosch BMA180/BMA25x 3-Axis Accelerometer Driver"
- depends on I2C
+ tristate "Bosch BMA023/BMA1x0/BMA25x 3-Axis Accelerometer Driver"
+ depends on I2C && INPUT_BMA150=n
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say Y here if you want to build a driver for the Bosch BMA180 or
- BMA25x triaxial acceleration sensor.
+ Say Y here if you want to build a driver for the Bosch BMA023, BMA150
+ BMA180, SMB380, or BMA25x triaxial acceleration sensor.
To compile this driver as a module, choose M here: the
module will be called bma180.
@@ -238,7 +238,7 @@ config IIO_ST_ACCEL_3AXIS
Say yes here to build support for STMicroelectronics accelerometers:
LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL,
- LNG2DM, LIS3DE, LIS2DE12
+ LNG2DM, LIS3DE, LIS2DE12, LIS2HH12
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index fcd91d5f05fd..265722e4b13f 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -7,6 +7,7 @@
* Support for BMA250 (c) Peter Meerwald <pmeerw@pmeerw.net>
*
* SPI is not supported by driver
+ * BMA023/BMA150/SMB380: 7-bit I2C slave address 0x38
* BMA180: 7-bit I2C slave address 0x40 or 0x41
* BMA250: 7-bit I2C slave address 0x18 or 0x19
* BMA254: 7-bit I2C slave address 0x18 or 0x19
@@ -33,6 +34,8 @@
#define BMA180_IRQ_NAME "bma180_event"
enum chip_ids {
+ BMA023,
+ BMA150,
BMA180,
BMA250,
BMA254,
@@ -48,7 +51,7 @@ struct bma180_part_info {
unsigned int num_scales;
const int *bw_table;
unsigned int num_bw;
- int center_temp;
+ int temp_offset;
u8 int_reset_reg, int_reset_mask;
u8 sleep_reg, sleep_mask;
@@ -57,13 +60,25 @@ struct bma180_part_info {
u8 power_reg, power_mask, lowpower_val;
u8 int_enable_reg, int_enable_mask;
u8 int_map_reg, int_enable_dataready_int1_mask;
- u8 softreset_reg;
+ u8 softreset_reg, softreset_val;
int (*chip_config)(struct bma180_data *data);
void (*chip_disable)(struct bma180_data *data);
};
/* Register set */
+#define BMA023_CTRL_REG0 0x0a
+#define BMA023_CTRL_REG1 0x0b
+#define BMA023_CTRL_REG2 0x14
+#define BMA023_CTRL_REG3 0x15
+
+#define BMA023_RANGE_MASK GENMASK(4, 3) /* Range of accel values */
+#define BMA023_BW_MASK GENMASK(2, 0) /* Accel bandwidth */
+#define BMA023_SLEEP BIT(0)
+#define BMA023_INT_RESET_MASK BIT(6)
+#define BMA023_NEW_DATA_INT BIT(5) /* Intr every new accel data is ready */
+#define BMA023_RESET_VAL BIT(1)
+
#define BMA180_CHIP_ID 0x00 /* Need to distinguish BMA180 from other */
#define BMA180_ACC_X_LSB 0x02 /* First of 6 registers of accel data */
#define BMA180_TEMP 0x08
@@ -94,6 +109,7 @@ struct bma180_part_info {
/* We have to write this value in reset register to do soft reset */
#define BMA180_RESET_VAL 0xb6
+#define BMA023_ID_REG_VAL 0x02
#define BMA180_ID_REG_VAL 0x03
#define BMA250_ID_REG_VAL 0x03
#define BMA254_ID_REG_VAL 0xfa /* 250 decimal */
@@ -156,6 +172,9 @@ enum bma180_chan {
TEMP
};
+static int bma023_bw_table[] = { 25, 50, 100, 190, 375, 750, 1500 }; /* Hz */
+static int bma023_scale_table[] = { 2452, 4903, 9709, };
+
static int bma180_bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
static int bma180_scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
@@ -319,7 +338,8 @@ static int bma180_set_pmode(struct bma180_data *data, bool mode)
static int bma180_soft_reset(struct bma180_data *data)
{
int ret = i2c_smbus_write_byte_data(data->client,
- data->part_info->softreset_reg, BMA180_RESET_VAL);
+ data->part_info->softreset_reg,
+ data->part_info->softreset_val);
if (ret)
dev_err(&data->client->dev, "failed to reset the chip\n");
@@ -349,11 +369,28 @@ static int bma180_chip_init(struct bma180_data *data)
*/
msleep(20);
- ret = bma180_set_new_data_intr_state(data, false);
+ return bma180_set_new_data_intr_state(data, false);
+}
+
+static int bma023_chip_config(struct bma180_data *data)
+{
+ int ret = bma180_chip_init(data);
+
if (ret)
- return ret;
+ goto err;
+
+ ret = bma180_set_bw(data, 50); /* 50 Hz */
+ if (ret)
+ goto err;
+ ret = bma180_set_scale(data, 2452); /* 2 G */
+ if (ret)
+ goto err;
- return bma180_set_pmode(data, false);
+ return 0;
+
+err:
+ dev_err(&data->client->dev, "failed to config the chip\n");
+ return ret;
}
static int bma180_chip_config(struct bma180_data *data)
@@ -362,6 +399,9 @@ static int bma180_chip_config(struct bma180_data *data)
if (ret)
goto err;
+ ret = bma180_set_pmode(data, false);
+ if (ret)
+ goto err;
ret = bma180_set_bits(data, BMA180_CTRL_REG0, BMA180_DIS_WAKE_UP, 1);
if (ret)
goto err;
@@ -391,6 +431,9 @@ static int bma25x_chip_config(struct bma180_data *data)
if (ret)
goto err;
+ ret = bma180_set_pmode(data, false);
+ if (ret)
+ goto err;
ret = bma180_set_bw(data, 16); /* 16 Hz */
if (ret)
goto err;
@@ -413,6 +456,17 @@ err:
return ret;
}
+static void bma023_chip_disable(struct bma180_data *data)
+{
+ if (bma180_set_sleep_state(data, true))
+ goto err;
+
+ return;
+
+err:
+ dev_err(&data->client->dev, "failed to disable the chip\n");
+}
+
static void bma180_chip_disable(struct bma180_data *data)
{
if (bma180_set_new_data_intr_state(data, false))
@@ -512,8 +566,12 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
- *val = sign_extend32(ret >> chan->scan_type.shift,
- chan->scan_type.realbits - 1);
+ if (chan->scan_type.sign == 's') {
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+ } else {
+ *val = ret;
+ }
return IIO_VAL_INT;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
*val = data->bw;
@@ -531,7 +589,7 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_OFFSET:
- *val = data->part_info->center_temp;
+ *val = data->part_info->temp_offset;
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -609,6 +667,11 @@ static const struct iio_enum bma180_power_mode_enum = {
.set = bma180_set_power_mode,
};
+static const struct iio_chan_spec_ext_info bma023_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma180_accel_get_mount_matrix),
+ { }
+};
+
static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
IIO_ENUM("power_mode", true, &bma180_power_mode_enum),
IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum),
@@ -616,6 +679,35 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
{ }
};
+#define BMA023_ACC_CHANNEL(_axis, _bits) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .scan_index = AXIS_##_axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = _bits, \
+ .storagebits = 16, \
+ .shift = 16 - _bits, \
+ }, \
+ .ext_info = bma023_ext_info, \
+}
+
+#define BMA150_TEMP_CHANNEL { \
+ .type = IIO_TEMP, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_index = TEMP, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 8, \
+ .storagebits = 16, \
+ }, \
+}
+
#define BMA180_ACC_CHANNEL(_axis, _bits) { \
.type = IIO_ACCEL, \
.modified = 1, \
@@ -645,6 +737,21 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
}, \
}
+static const struct iio_chan_spec bma023_channels[] = {
+ BMA023_ACC_CHANNEL(X, 10),
+ BMA023_ACC_CHANNEL(Y, 10),
+ BMA023_ACC_CHANNEL(Z, 10),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct iio_chan_spec bma150_channels[] = {
+ BMA023_ACC_CHANNEL(X, 10),
+ BMA023_ACC_CHANNEL(Y, 10),
+ BMA023_ACC_CHANNEL(Z, 10),
+ BMA150_TEMP_CHANNEL,
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
static const struct iio_chan_spec bma180_channels[] = {
BMA180_ACC_CHANNEL(X, 14),
BMA180_ACC_CHANNEL(Y, 14),
@@ -670,6 +777,63 @@ static const struct iio_chan_spec bma254_channels[] = {
};
static const struct bma180_part_info bma180_part_info[] = {
+ [BMA023] = {
+ .chip_id = BMA023_ID_REG_VAL,
+ .channels = bma023_channels,
+ .num_channels = ARRAY_SIZE(bma023_channels),
+ .scale_table = bma023_scale_table,
+ .num_scales = ARRAY_SIZE(bma023_scale_table),
+ .bw_table = bma023_bw_table,
+ .num_bw = ARRAY_SIZE(bma023_bw_table),
+ /* No temperature channel */
+ .temp_offset = 0,
+ .int_reset_reg = BMA023_CTRL_REG0,
+ .int_reset_mask = BMA023_INT_RESET_MASK,
+ .sleep_reg = BMA023_CTRL_REG0,
+ .sleep_mask = BMA023_SLEEP,
+ .bw_reg = BMA023_CTRL_REG2,
+ .bw_mask = BMA023_BW_MASK,
+ .scale_reg = BMA023_CTRL_REG2,
+ .scale_mask = BMA023_RANGE_MASK,
+ /* No power mode on bma023 */
+ .power_reg = 0,
+ .power_mask = 0,
+ .lowpower_val = 0,
+ .int_enable_reg = BMA023_CTRL_REG3,
+ .int_enable_mask = BMA023_NEW_DATA_INT,
+ .softreset_reg = BMA023_CTRL_REG0,
+ .softreset_val = BMA023_RESET_VAL,
+ .chip_config = bma023_chip_config,
+ .chip_disable = bma023_chip_disable,
+ },
+ [BMA150] = {
+ .chip_id = BMA023_ID_REG_VAL,
+ .channels = bma150_channels,
+ .num_channels = ARRAY_SIZE(bma150_channels),
+ .scale_table = bma023_scale_table,
+ .num_scales = ARRAY_SIZE(bma023_scale_table),
+ .bw_table = bma023_bw_table,
+ .num_bw = ARRAY_SIZE(bma023_bw_table),
+ .temp_offset = -60, /* 0 LSB @ -30 degree C */
+ .int_reset_reg = BMA023_CTRL_REG0,
+ .int_reset_mask = BMA023_INT_RESET_MASK,
+ .sleep_reg = BMA023_CTRL_REG0,
+ .sleep_mask = BMA023_SLEEP,
+ .bw_reg = BMA023_CTRL_REG2,
+ .bw_mask = BMA023_BW_MASK,
+ .scale_reg = BMA023_CTRL_REG2,
+ .scale_mask = BMA023_RANGE_MASK,
+ /* No power mode on bma150 */
+ .power_reg = 0,
+ .power_mask = 0,
+ .lowpower_val = 0,
+ .int_enable_reg = BMA023_CTRL_REG3,
+ .int_enable_mask = BMA023_NEW_DATA_INT,
+ .softreset_reg = BMA023_CTRL_REG0,
+ .softreset_val = BMA023_RESET_VAL,
+ .chip_config = bma023_chip_config,
+ .chip_disable = bma023_chip_disable,
+ },
[BMA180] = {
.chip_id = BMA180_ID_REG_VAL,
.channels = bma180_channels,
@@ -678,7 +842,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.num_scales = ARRAY_SIZE(bma180_scale_table),
.bw_table = bma180_bw_table,
.num_bw = ARRAY_SIZE(bma180_bw_table),
- .center_temp = 48, /* 0 LSB @ 24 degree C */
+ .temp_offset = 48, /* 0 LSB @ 24 degree C */
.int_reset_reg = BMA180_CTRL_REG0,
.int_reset_mask = BMA180_RESET_INT,
.sleep_reg = BMA180_CTRL_REG0,
@@ -693,6 +857,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.int_enable_reg = BMA180_CTRL_REG3,
.int_enable_mask = BMA180_NEW_DATA_INT,
.softreset_reg = BMA180_RESET,
+ .softreset_val = BMA180_RESET_VAL,
.chip_config = bma180_chip_config,
.chip_disable = bma180_chip_disable,
},
@@ -704,7 +869,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.num_scales = ARRAY_SIZE(bma25x_scale_table),
.bw_table = bma25x_bw_table,
.num_bw = ARRAY_SIZE(bma25x_bw_table),
- .center_temp = 48, /* 0 LSB @ 24 degree C */
+ .temp_offset = 48, /* 0 LSB @ 24 degree C */
.int_reset_reg = BMA250_INT_RESET_REG,
.int_reset_mask = BMA250_INT_RESET_MASK,
.sleep_reg = BMA250_POWER_REG,
@@ -721,6 +886,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.int_map_reg = BMA250_INT_MAP_REG,
.int_enable_dataready_int1_mask = BMA250_INT1_DATA_MASK,
.softreset_reg = BMA250_RESET_REG,
+ .softreset_val = BMA180_RESET_VAL,
.chip_config = bma25x_chip_config,
.chip_disable = bma25x_chip_disable,
},
@@ -732,7 +898,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.num_scales = ARRAY_SIZE(bma25x_scale_table),
.bw_table = bma25x_bw_table,
.num_bw = ARRAY_SIZE(bma25x_bw_table),
- .center_temp = 46, /* 0 LSB @ 23 degree C */
+ .temp_offset = 46, /* 0 LSB @ 23 degree C */
.int_reset_reg = BMA254_INT_RESET_REG,
.int_reset_mask = BMA254_INT_RESET_MASK,
.sleep_reg = BMA254_POWER_REG,
@@ -749,6 +915,7 @@ static const struct bma180_part_info bma180_part_info[] = {
.int_map_reg = BMA254_INT_MAP_REG,
.int_enable_dataready_int1_mask = BMA254_INT1_DATA_MASK,
.softreset_reg = BMA254_RESET_REG,
+ .softreset_val = BMA180_RESET_VAL,
.chip_config = bma25x_chip_config,
.chip_disable = bma25x_chip_disable,
},
@@ -990,9 +1157,12 @@ static SIMPLE_DEV_PM_OPS(bma180_pm_ops, bma180_suspend, bma180_resume);
#endif
static const struct i2c_device_id bma180_ids[] = {
+ { "bma023", BMA023 },
+ { "bma150", BMA150 },
{ "bma180", BMA180 },
{ "bma250", BMA250 },
{ "bma254", BMA254 },
+ { "smb380", BMA150 },
{ }
};
@@ -1000,6 +1170,14 @@ MODULE_DEVICE_TABLE(i2c, bma180_ids);
static const struct of_device_id bma180_of_match[] = {
{
+ .compatible = "bosch,bma023",
+ .data = (void *)BMA023
+ },
+ {
+ .compatible = "bosch,bma150",
+ .data = (void *)BMA150
+ },
+ {
.compatible = "bosch,bma180",
.data = (void *)BMA180
},
@@ -1011,6 +1189,10 @@ static const struct of_device_id bma180_of_match[] = {
.compatible = "bosch,bma254",
.data = (void *)BMA254
},
+ {
+ .compatible = "bosch,smb380",
+ .data = (void *)BMA150
+ },
{ }
};
MODULE_DEVICE_TABLE(of, bma180_of_match);
@@ -1030,5 +1212,5 @@ module_i2c_driver(bma180_driver);
MODULE_AUTHOR("Kravchenko Oleksandr <x0199363@ti.com>");
MODULE_AUTHOR("Texas Instruments, Inc.");
-MODULE_DESCRIPTION("Bosch BMA180/BMA25x triaxial acceleration sensor");
+MODULE_DESCRIPTION("Bosch BMA023/BMA1x0/BMA25x triaxial acceleration sensor");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/dmard06.c b/drivers/iio/accel/dmard06.c
index 2bf210fa4ba6..ef89bded7390 100644
--- a/drivers/iio/accel/dmard06.c
+++ b/drivers/iio/accel/dmard06.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
@@ -226,7 +227,7 @@ static struct i2c_driver dmard06_driver = {
.id_table = dmard06_id,
.driver = {
.name = DMARD06_DRV_NAME,
- .of_match_table = of_match_ptr(dmard06_of_match),
+ .of_match_table = dmard06_of_match,
.pm = DMARD06_PM_OPS,
},
};
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 0d9e2def2b25..0ec0533448bc 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum accel_3d_channel {
@@ -391,18 +389,13 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&accel_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&accel_state->common_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -426,9 +419,7 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&accel_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &accel_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -443,8 +434,7 @@ static int hid_accel_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&accel_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &accel_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index 38411e1c155b..b580d605f848 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -2,6 +2,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/delay.h>
@@ -21,8 +22,8 @@ static int kxsd9_i2c_probe(struct i2c_client *i2c,
regmap = devm_regmap_init_i2c(i2c, &config);
if (IS_ERR(regmap)) {
- dev_err(&i2c->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&i2c->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
@@ -36,15 +37,11 @@ static int kxsd9_i2c_remove(struct i2c_client *client)
return kxsd9_common_remove(&client->dev);
}
-#ifdef CONFIG_OF
static const struct of_device_id kxsd9_of_match[] = {
{ .compatible = "kionix,kxsd9", },
{ },
};
MODULE_DEVICE_TABLE(of, kxsd9_of_match);
-#else
-#define kxsd9_of_match NULL
-#endif
static const struct i2c_device_id kxsd9_i2c_id[] = {
{"kxsd9", 0},
@@ -55,7 +52,7 @@ MODULE_DEVICE_TABLE(i2c, kxsd9_i2c_id);
static struct i2c_driver kxsd9_i2c_driver = {
.driver = {
.name = "kxsd9",
- .of_match_table = of_match_ptr(kxsd9_of_match),
+ .of_match_table = kxsd9_of_match,
.pm = &kxsd9_dev_pm_ops,
},
.probe = kxsd9_i2c_probe,
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index 3d5bea651923..9d07642c0de1 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -135,7 +135,7 @@ static int mxc4005_read_xyz(struct mxc4005_data *data)
int ret;
ret = regmap_bulk_read(data->regmap, MXC4005_REG_XOUT_UPPER,
- (u8 *) data->buffer, sizeof(data->buffer));
+ data->buffer, sizeof(data->buffer));
if (ret < 0) {
dev_err(data->dev, "failed to read axes\n");
return ret;
@@ -150,7 +150,7 @@ static int mxc4005_read_axis(struct mxc4005_data *data,
__be16 reg;
int ret;
- ret = regmap_bulk_read(data->regmap, addr, (u8 *) &reg, sizeof(reg));
+ ret = regmap_bulk_read(data->regmap, addr, &reg, sizeof(reg));
if (ret < 0) {
dev_err(data->dev, "failed to read reg %02x\n", addr);
return ret;
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 5b13e293cade..5d356288e001 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -35,6 +35,7 @@ enum st_accel_type {
LIS2DW12,
LIS3DHH,
LIS2DE12,
+ LIS2HH12,
ST_ACCEL_MAX,
};
@@ -59,6 +60,7 @@ enum st_accel_type {
#define LIS3DHH_ACCEL_DEV_NAME "lis3dhh"
#define LIS3DE_ACCEL_DEV_NAME "lis3de"
#define LIS2DE12_ACCEL_DEV_NAME "lis2de12"
+#define LIS2HH12_ACCEL_DEV_NAME "lis2hh12"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c
index 9f2b40474b8e..b5c814ef1637 100644
--- a/drivers/iio/accel/st_accel_buffer.c
+++ b/drivers/iio/accel/st_accel_buffer.c
@@ -37,8 +37,7 @@ static int st_accel_buffer_postenable(struct iio_dev *indio_dev)
if (err < 0)
return err;
- err = st_sensors_set_axis_enable(indio_dev,
- (u8)indio_dev->active_scan_mask[0]);
+ err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]);
if (err < 0)
goto st_accel_buffer_predisable;
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 7320275c7e56..43c50167d220 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -904,6 +904,83 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.multi_read_bit = true,
.bootime = 2,
},
+ {
+ .wai = 0x41,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LIS2HH12_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_16bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = 0x70,
+ .odr_avl = {
+ { .hz = 10, .value = 0x01, },
+ { .hz = 50, .value = 0x02, },
+ { .hz = 100, .value = 0x03, },
+ { .hz = 200, .value = 0x04, },
+ { .hz = 400, .value = 0x05, },
+ { .hz = 800, .value = 0x06, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0x70,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x23,
+ .mask = 0x30,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(61),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(122),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(244),
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x20,
+ .mask = 0x08,
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x01,
+ },
+ .int2 = {
+ .addr = 0x25,
+ .mask = 0x01,
+ },
+ .addr_ihl = 0x24,
+ .mask_ihl = 0x02,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
+
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -1170,8 +1247,7 @@ EXPORT_SYMBOL(st_accel_get_settings);
int st_accel_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *adata = iio_priv(indio_dev);
- struct st_sensors_platform_data *pdata =
- (struct st_sensors_platform_data *)adata->dev->platform_data;
+ struct st_sensors_platform_data *pdata = dev_get_platdata(adata->dev);
struct iio_chan_spec *channels;
size_t channels_size;
int err;
@@ -1204,8 +1280,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
"failed to apply ACPI orientation data: %d\n", err);
indio_dev->channels = channels;
- adata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &adata->sensor_settings->fs.fs_avl[0];
+ adata->current_fullscale = &adata->sensor_settings->fs.fs_avl[0];
adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
if (!pdata)
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 6b283be26ebc..360e16f2cadb 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -104,6 +104,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis2de12",
.data = LIS2DE12_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lis2hh12",
+ .data = LIS2HH12_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -138,6 +142,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LIS2DW12_ACCEL_DEV_NAME },
{ LIS3DE_ACCEL_DEV_NAME },
{ LIS2DE12_ACCEL_DEV_NAME },
+ { LIS2HH12_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 12bb8b7ca1ff..ff3569635ce0 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -246,6 +246,41 @@ config AD799X
To compile this driver as a module, choose M here: the module will be
called ad799x.
+config AD9467
+ tristate "Analog Devices AD9467 High Speed ADC driver"
+ depends on SPI
+ select ADI_AXI_ADC
+ help
+ Say yes here to build support for Analog Devices:
+ * AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
+
+ The driver requires the assistance of the AXI ADC IP core to operate,
+ since SPI is used for configuration only, while data has to be
+ streamed into memory via DMA.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad9467.
+
+config ADI_AXI_ADC
+ tristate "Analog Devices Generic AXI ADC IP core driver"
+ select IIO_BUFFER
+ select IIO_BUFFER_HW_CONSUMER
+ select IIO_BUFFER_DMAENGINE
+ help
+ Say yes here to build support for Analog Devices Generic
+ AXI ADC IP core. The IP core is used for interfacing with
+ analog-to-digital (ADC) converters that require either a high-speed
+ serial interface (JESD204B/C) or a source synchronous parallel
+ interface (LVDS/CMOS).
+ Typically (for such devices) SPI will be used for configuration only,
+ while this IP core handles the streaming of data into memory via DMA.
+
+ Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called adi-axi-adc.
+
config ASPEED_ADC
tristate "Aspeed ADC"
depends on ARCH_ASPEED || COMPILE_TEST
@@ -595,6 +630,16 @@ config MAX1118
To compile this driver as a module, choose M here: the module will be
called max1118.
+config MAX1241
+ tristate "Maxim max1241 ADC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Maxim max1241 12-bit, single-channel
+ ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max1241.
+
config MAX1363
tristate "Maxim max1363 ADC driver"
depends on I2C
@@ -692,6 +737,16 @@ config MESON_SARADC
To compile this driver as a module, choose M here: the
module will be called meson_saradc.
+config MP2629_ADC
+ tristate "Monolithic MP2629 ADC driver"
+ depends on MFD_MP2629
+ help
+ Say yes to have support for battery charger IC MP2629 ADC device
+ accessed over I2C.
+
+ This driver provides ADC conversion of system, input power supply
+ and battery voltage & current information.
+
config NAU7802
tristate "Nuvoton NAU7802 ADC driver"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 637807861112..90f94ada7b30 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -26,6 +26,8 @@ obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
obj-$(CONFIG_AD7949) += ad7949.o
obj-$(CONFIG_AD799X) += ad799x.o
+obj-$(CONFIG_AD9467) += ad9467.o
+obj-$(CONFIG_ADI_AXI_ADC) += adi-axi-adc.o
obj-$(CONFIG_ASPEED_ADC) += aspeed_adc.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o
@@ -57,6 +59,7 @@ obj-$(CONFIG_LTC2497) += ltc2497.o ltc2497-core.o
obj-$(CONFIG_MAX1027) += max1027.o
obj-$(CONFIG_MAX11100) += max11100.o
obj-$(CONFIG_MAX1118) += max1118.o
+obj-$(CONFIG_MAX1241) += max1241.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MAX9611) += max9611.o
obj-$(CONFIG_MCP320X) += mcp320x.o
@@ -65,6 +68,7 @@ obj-$(CONFIG_MCP3911) += mcp3911.o
obj-$(CONFIG_MEDIATEK_MT6577_AUXADC) += mt6577_auxadc.o
obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
obj-$(CONFIG_MESON_SARADC) += meson_saradc.o
+obj-$(CONFIG_MP2629_ADC) += mp2629_adc.o
obj-$(CONFIG_MXS_LRADC_ADC) += mxs-lradc-adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_NPCM_ADC) += npcm_adc.o
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 76747488044b..4e816d714ad2 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -12,9 +12,11 @@
#include <linux/sysfs.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -27,6 +29,8 @@ struct ad7476_state;
struct ad7476_chip_info {
unsigned int int_vref_uv;
struct iio_chan_spec channel[2];
+ /* channels used when convst gpio is defined */
+ struct iio_chan_spec convst_channel[2];
void (*reset)(struct ad7476_state *);
};
@@ -34,6 +38,7 @@ struct ad7476_state {
struct spi_device *spi;
const struct ad7476_chip_info *chip_info;
struct regulator *reg;
+ struct gpio_desc *convst_gpio;
struct spi_transfer xfer;
struct spi_message msg;
/*
@@ -64,6 +69,17 @@ enum ad7476_supported_device_ids {
ID_ADS7868,
};
+static void ad7091_convst(struct ad7476_state *st)
+{
+ if (!st->convst_gpio)
+ return;
+
+ gpiod_set_value(st->convst_gpio, 0);
+ udelay(1); /* CONVST pulse width: 10 ns min */
+ gpiod_set_value(st->convst_gpio, 1);
+ udelay(1); /* Conversion time: 650 ns max */
+}
+
static irqreturn_t ad7476_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -71,6 +87,8 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
struct ad7476_state *st = iio_priv(indio_dev);
int b_sent;
+ ad7091_convst(st);
+
b_sent = spi_sync(st->spi, &st->msg);
if (b_sent < 0)
goto done;
@@ -93,6 +111,8 @@ static int ad7476_scan_direct(struct ad7476_state *st)
{
int ret;
+ ad7091_convst(st);
+
ret = spi_sync(st->spi, &st->msg);
if (ret)
return ret;
@@ -160,6 +180,8 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
#define AD7940_CHAN(bits) _AD7476_CHAN((bits), 15 - (bits), \
BIT(IIO_CHAN_INFO_RAW))
#define AD7091R_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), 0)
+#define AD7091R_CONVST_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), \
+ BIT(IIO_CHAN_INFO_RAW))
#define ADS786X_CHAN(bits) _AD7476_CHAN((bits), 12 - (bits), \
BIT(IIO_CHAN_INFO_RAW))
@@ -167,6 +189,8 @@ static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
[ID_AD7091R] = {
.channel[0] = AD7091R_CHAN(12),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .convst_channel[0] = AD7091R_CONVST_CHAN(12),
+ .convst_channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
.reset = ad7091_reset,
},
[ID_AD7276] = {
@@ -232,6 +256,13 @@ static const struct iio_info ad7476_info = {
.read_raw = &ad7476_read_raw,
};
+static void ad7476_reg_disable(void *data)
+{
+ struct ad7476_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
static int ad7476_probe(struct spi_device *spi)
{
struct ad7476_state *st;
@@ -254,6 +285,17 @@ static int ad7476_probe(struct spi_device *spi)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&spi->dev, ad7476_reg_disable,
+ st);
+ if (ret)
+ return ret;
+
+ st->convst_gpio = devm_gpiod_get_optional(&spi->dev,
+ "adi,conversion-start",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->convst_gpio))
+ return PTR_ERR(st->convst_gpio);
+
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
@@ -266,6 +308,9 @@ static int ad7476_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channel;
indio_dev->num_channels = 2;
indio_dev->info = &ad7476_info;
+
+ if (st->convst_gpio)
+ indio_dev->channels = st->chip_info->convst_channel;
/* Setup default message */
st->xfer.rx_buf = &st->data;
@@ -295,19 +340,8 @@ error_disable_reg:
return ret;
}
-static int ad7476_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct ad7476_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- regulator_disable(st->reg);
-
- return 0;
-}
-
static const struct spi_device_id ad7476_id[] = {
+ {"ad7091", ID_AD7091R},
{"ad7091r", ID_AD7091R},
{"ad7273", ID_AD7277},
{"ad7274", ID_AD7276},
@@ -343,7 +377,6 @@ static struct spi_driver ad7476_driver = {
.name = "ad7476",
},
.probe = ad7476_probe,
- .remove = ad7476_remove,
.id_table = ad7476_id,
};
module_spi_driver(ad7476_driver);
diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index 291c1a898129..f47606ebbbbe 100644
--- a/drivers/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -206,10 +206,29 @@ static const struct ad_sigma_delta_info ad7780_sigma_delta_info = {
.irq_flags = IRQF_TRIGGER_LOW,
};
-#define AD7780_CHANNEL(bits, wordsize) \
- AD_SD_CHANNEL(1, 0, 0, bits, 32, (wordsize) - (bits))
-#define AD7170_CHANNEL(bits, wordsize) \
- AD_SD_CHANNEL_NO_SAMP_FREQ(1, 0, 0, bits, 32, (wordsize) - (bits))
+#define _AD7780_CHANNEL(_bits, _wordsize, _mask_all) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = _mask_all, \
+ .scan_index = 1, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 32, \
+ .shift = (_wordsize) - (_bits), \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define AD7780_CHANNEL(_bits, _wordsize) \
+ _AD7780_CHANNEL(_bits, _wordsize, BIT(IIO_CHAN_INFO_SAMP_FREQ))
+#define AD7170_CHANNEL(_bits, _wordsize) \
+ _AD7780_CHANNEL(_bits, _wordsize, 0)
static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
[ID_AD7170] = {
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index abb239392631..48432b6f6002 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -64,25 +64,73 @@
#define AD7791_MODE_SEL_MASK (0x3 << 6)
#define AD7791_MODE_SEL(x) ((x) << 6)
+#define __AD7991_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, _extend_name, _type, _mask_all) \
+ { \
+ .type = (_type), \
+ .differential = (_channel2 == -1 ? 0 : 1), \
+ .indexed = 1, \
+ .channel = (_channel1), \
+ .channel2 = (_channel2), \
+ .address = (_address), \
+ .extend_name = (_extend_name), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = _mask_all, \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = (_storagebits), \
+ .shift = (_shift), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define AD7991_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7991_CHANNEL(_si, _channel, _channel, _address, _bits, \
+ _storagebits, _shift, "shorted", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7991_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7991_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7991_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7991_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7991_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
+ _shift) \
+ __AD7991_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, "supply", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
#define DECLARE_AD7787_CHANNELS(name, bits, storagebits) \
const struct iio_chan_spec name[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
+ AD7991_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_CHANNEL(1, 1, AD7791_CH_AIN2, (bits), (storagebits), 0), \
- AD_SD_SHORTED_CHANNEL(2, 0, AD7791_CH_AIN1N_AIN1N, \
+ AD7991_CHANNEL(1, 1, AD7791_CH_AIN2, (bits), (storagebits), 0), \
+ AD7991_SHORTED_CHANNEL(2, 0, AD7791_CH_AIN1N_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_SUPPLY_CHANNEL(3, 2, AD7791_CH_AVDD_MONITOR, \
+ AD7991_SUPPLY_CHANNEL(3, 2, AD7791_CH_AVDD_MONITOR, \
(bits), (storagebits), 0), \
IIO_CHAN_SOFT_TIMESTAMP(4), \
}
#define DECLARE_AD7791_CHANNELS(name, bits, storagebits) \
const struct iio_chan_spec name[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
+ AD7991_DIFF_CHANNEL(0, 0, 0, AD7791_CH_AIN1P_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_SHORTED_CHANNEL(1, 0, AD7791_CH_AIN1N_AIN1N, \
+ AD7991_SHORTED_CHANNEL(1, 0, AD7791_CH_AIN1N_AIN1N, \
(bits), (storagebits), 0), \
- AD_SD_SUPPLY_CHANNEL(2, 1, AD7791_CH_AVDD_MONITOR, \
+ AD7991_SUPPLY_CHANNEL(2, 1, AD7791_CH_AVDD_MONITOR, \
(bits), (storagebits), 0), \
IIO_CHAN_SOFT_TIMESTAMP(3), \
}
@@ -444,5 +492,5 @@ static struct spi_driver ad7791_driver = {
module_spi_driver(ad7791_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_DESCRIPTION("Analog Device AD7787/AD7788/AD7789/AD7790/AD7791 ADC driver");
+MODULE_DESCRIPTION("Analog Devices AD7787/AD7788/AD7789/AD7790/AD7791 ADC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index e5691e330323..808485f42415 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -354,29 +354,28 @@ static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
static IIO_CONST_ATTR_NAMED(sampling_frequency_available_ad7797,
sampling_frequency_available, "123 62 50 33 17 16 12 10 8 6 4");
-static ssize_t ad7793_show_scale_available(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int ad7793_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7793_state *st = iio_priv(indio_dev);
- int i, len = 0;
- for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
- len += sprintf(buf + len, "%d.%09u ", st->scale_avail[i][0],
- st->scale_avail[i][1]);
-
- len += sprintf(buf + len, "\n");
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)st->scale_avail;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(st->scale_avail) * 2;
- return len;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
}
-static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
- in_voltage-voltage_scale_available, S_IRUGO,
- ad7793_show_scale_available, NULL, 0);
-
static struct attribute *ad7793_attributes[] = {
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
NULL
};
@@ -534,6 +533,7 @@ static const struct iio_info ad7793_info = {
.read_raw = &ad7793_read_raw,
.write_raw = &ad7793_write_raw,
.write_raw_get_fmt = &ad7793_write_raw_get_fmt,
+ .read_avail = ad7793_read_avail,
.attrs = &ad7793_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
};
@@ -546,47 +546,113 @@ static const struct iio_info ad7797_info = {
.validate_trigger = ad_sd_validate_trigger,
};
+#define __AD7793_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, _extend_name, _type, _mask_type_av, _mask_all) \
+ { \
+ .type = (_type), \
+ .differential = (_channel2 == -1 ? 0 : 1), \
+ .indexed = 1, \
+ .channel = (_channel1), \
+ .channel2 = (_channel2), \
+ .address = (_address), \
+ .extend_name = (_extend_name), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = (_mask_type_av), \
+ .info_mask_shared_by_all = _mask_all, \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = (_storagebits), \
+ .shift = (_shift), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define AD7793_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7793_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel, _channel, _address, _bits, \
+ _storagebits, _shift, "shorted", IIO_VOLTAGE, \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7793_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, 0, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_TEMP, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7793_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
+ _shift) \
+ __AD7793_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, "supply", IIO_VOLTAGE, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7797_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
+#define AD7797_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD7793_CHANNEL(_si, _channel, _channel, _address, _bits, \
+ _storagebits, _shift, "shorted", IIO_VOLTAGE, \
+ 0, \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ))
+
#define DECLARE_AD7793_CHANNELS(_name, _b, _sb, _s) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), (_s)), \
- AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), (_s)), \
- AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), (_s)), \
- AD_SD_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), (_s)), \
- AD_SD_TEMP_CHANNEL(4, AD7793_CH_TEMP, (_b), (_sb), (_s)), \
- AD_SD_SUPPLY_CHANNEL(5, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), (_s)), \
+ AD7793_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), (_s)), \
+ AD7793_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), (_s)), \
+ AD7793_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), (_s)), \
+ AD7793_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), (_s)), \
+ AD7793_TEMP_CHANNEL(4, AD7793_CH_TEMP, (_b), (_sb), (_s)), \
+ AD7793_SUPPLY_CHANNEL(5, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), (_s)), \
IIO_CHAN_SOFT_TIMESTAMP(6), \
}
#define DECLARE_AD7795_CHANNELS(_name, _b, _sb) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(3, 3, 3, AD7795_CH_AIN4P_AIN4M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(4, 4, 4, AD7795_CH_AIN5P_AIN5M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(5, 5, 5, AD7795_CH_AIN6P_AIN6M, (_b), (_sb), 0), \
- AD_SD_SHORTED_CHANNEL(6, 0, AD7795_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
- AD_SD_TEMP_CHANNEL(7, AD7793_CH_TEMP, (_b), (_sb), 0), \
- AD_SD_SUPPLY_CHANNEL(8, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(3, 3, 3, AD7795_CH_AIN4P_AIN4M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(4, 4, 4, AD7795_CH_AIN5P_AIN5M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(5, 5, 5, AD7795_CH_AIN6P_AIN6M, (_b), (_sb), 0), \
+ AD7793_SHORTED_CHANNEL(6, 0, AD7795_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD7793_TEMP_CHANNEL(7, AD7793_CH_TEMP, (_b), (_sb), 0), \
+ AD7793_SUPPLY_CHANNEL(8, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
IIO_CHAN_SOFT_TIMESTAMP(9), \
}
#define DECLARE_AD7797_CHANNELS(_name, _b, _sb) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
- AD_SD_SHORTED_CHANNEL(1, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
- AD_SD_TEMP_CHANNEL(2, AD7793_CH_TEMP, (_b), (_sb), 0), \
- AD_SD_SUPPLY_CHANNEL(3, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ AD7797_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD7797_SHORTED_CHANNEL(1, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD7793_TEMP_CHANNEL(2, AD7793_CH_TEMP, (_b), (_sb), 0), \
+ AD7793_SUPPLY_CHANNEL(3, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
IIO_CHAN_SOFT_TIMESTAMP(4), \
}
#define DECLARE_AD7799_CHANNELS(_name, _b, _sb) \
const struct iio_chan_spec _name##_channels[] = { \
- AD_SD_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
- AD_SD_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
- AD_SD_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
- AD_SD_SUPPLY_CHANNEL(4, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(0, 0, 0, AD7793_CH_AIN1P_AIN1M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(1, 1, 1, AD7793_CH_AIN2P_AIN2M, (_b), (_sb), 0), \
+ AD7793_DIFF_CHANNEL(2, 2, 2, AD7793_CH_AIN3P_AIN3M, (_b), (_sb), 0), \
+ AD7793_SHORTED_CHANNEL(3, 0, AD7793_CH_AIN1M_AIN1M, (_b), (_sb), 0), \
+ AD7793_SUPPLY_CHANNEL(4, 3, AD7793_CH_AVDD_MONITOR, (_b), (_sb), 0), \
IIO_CHAN_SOFT_TIMESTAMP(5), \
}
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
new file mode 100644
index 000000000000..1e8fd83b9bc2
--- /dev/null
+++ b/drivers/iio/adc/ad9467.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD9467 SPI ADC driver
+ *
+ * Copyright 2012-2020 Analog Devices Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/clk.h>
+
+#include <linux/iio/adc/adi-axi-adc.h>
+
+/*
+ * ADI High-Speed ADC common spi interface registers
+ * See Application-Note AN-877:
+ * https://www.analog.com/media/en/technical-documentation/application-notes/AN-877.pdf
+ */
+
+#define AN877_ADC_REG_CHIP_PORT_CONF 0x00
+#define AN877_ADC_REG_CHIP_ID 0x01
+#define AN877_ADC_REG_CHIP_GRADE 0x02
+#define AN877_ADC_REG_CHAN_INDEX 0x05
+#define AN877_ADC_REG_TRANSFER 0xFF
+#define AN877_ADC_REG_MODES 0x08
+#define AN877_ADC_REG_TEST_IO 0x0D
+#define AN877_ADC_REG_ADC_INPUT 0x0F
+#define AN877_ADC_REG_OFFSET 0x10
+#define AN877_ADC_REG_OUTPUT_MODE 0x14
+#define AN877_ADC_REG_OUTPUT_ADJUST 0x15
+#define AN877_ADC_REG_OUTPUT_PHASE 0x16
+#define AN877_ADC_REG_OUTPUT_DELAY 0x17
+#define AN877_ADC_REG_VREF 0x18
+#define AN877_ADC_REG_ANALOG_INPUT 0x2C
+
+/* AN877_ADC_REG_TEST_IO */
+#define AN877_ADC_TESTMODE_OFF 0x0
+#define AN877_ADC_TESTMODE_MIDSCALE_SHORT 0x1
+#define AN877_ADC_TESTMODE_POS_FULLSCALE 0x2
+#define AN877_ADC_TESTMODE_NEG_FULLSCALE 0x3
+#define AN877_ADC_TESTMODE_ALT_CHECKERBOARD 0x4
+#define AN877_ADC_TESTMODE_PN23_SEQ 0x5
+#define AN877_ADC_TESTMODE_PN9_SEQ 0x6
+#define AN877_ADC_TESTMODE_ONE_ZERO_TOGGLE 0x7
+#define AN877_ADC_TESTMODE_USER 0x8
+#define AN877_ADC_TESTMODE_BIT_TOGGLE 0x9
+#define AN877_ADC_TESTMODE_SYNC 0xA
+#define AN877_ADC_TESTMODE_ONE_BIT_HIGH 0xB
+#define AN877_ADC_TESTMODE_MIXED_BIT_FREQUENCY 0xC
+#define AN877_ADC_TESTMODE_RAMP 0xF
+
+/* AN877_ADC_REG_TRANSFER */
+#define AN877_ADC_TRANSFER_SYNC 0x1
+
+/* AN877_ADC_REG_OUTPUT_MODE */
+#define AN877_ADC_OUTPUT_MODE_OFFSET_BINARY 0x0
+#define AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT 0x1
+#define AN877_ADC_OUTPUT_MODE_GRAY_CODE 0x2
+
+/* AN877_ADC_REG_OUTPUT_PHASE */
+#define AN877_ADC_OUTPUT_EVEN_ODD_MODE_EN 0x20
+#define AN877_ADC_INVERT_DCO_CLK 0x80
+
+/* AN877_ADC_REG_OUTPUT_DELAY */
+#define AN877_ADC_DCO_DELAY_ENABLE 0x80
+
+/*
+ * Analog Devices AD9467 16-Bit, 200/250 MSPS ADC
+ */
+
+#define CHIPID_AD9467 0x50
+#define AD9467_DEF_OUTPUT_MODE 0x08
+#define AD9467_REG_VREF_MASK 0x0F
+
+enum {
+ ID_AD9467,
+};
+
+struct ad9467_state {
+ struct spi_device *spi;
+ struct clk *clk;
+ unsigned int output_mode;
+
+ struct gpio_desc *pwrdown_gpio;
+ struct gpio_desc *reset_gpio;
+};
+
+static int ad9467_spi_read(struct spi_device *spi, unsigned int reg)
+{
+ unsigned char tbuf[2], rbuf[1];
+ int ret;
+
+ tbuf[0] = 0x80 | (reg >> 8);
+ tbuf[1] = reg & 0xFF;
+
+ ret = spi_write_then_read(spi,
+ tbuf, ARRAY_SIZE(tbuf),
+ rbuf, ARRAY_SIZE(rbuf));
+
+ if (ret < 0)
+ return ret;
+
+ return rbuf[0];
+}
+
+static int ad9467_spi_write(struct spi_device *spi, unsigned int reg,
+ unsigned int val)
+{
+ unsigned char buf[3];
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xFF;
+ buf[2] = val;
+
+ return spi_write(spi, buf, ARRAY_SIZE(buf));
+}
+
+static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct spi_device *spi = st->spi;
+ int ret;
+
+ if (readval == NULL) {
+ ret = ad9467_spi_write(spi, reg, writeval);
+ ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
+ return ret;
+ }
+
+ ret = ad9467_spi_read(spi, reg);
+ if (ret < 0)
+ return ret;
+ *readval = ret;
+
+ return 0;
+}
+
+static const unsigned int ad9467_scale_table[][2] = {
+ {2000, 0}, {2100, 6}, {2200, 7},
+ {2300, 8}, {2400, 9}, {2500, 10},
+};
+
+static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
+ unsigned int *val, unsigned int *val2)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ const struct iio_chan_spec *chan = &info->channels[0];
+ unsigned int tmp;
+
+ tmp = (info->scale_table[index][0] * 1000000ULL) >>
+ chan->scan_type.realbits;
+ *val = tmp / 1000000;
+ *val2 = tmp % 1000000;
+}
+
+#define AD9467_CHAN(_chan, _si, _bits, _sign) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _chan, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = _sign, \
+ .realbits = _bits, \
+ .storagebits = 16, \
+ }, \
+}
+
+static const struct iio_chan_spec ad9467_channels[] = {
+ AD9467_CHAN(0, 0, 16, 'S'),
+};
+
+static const struct adi_axi_adc_chip_info ad9467_chip_tbl[] = {
+ [ID_AD9467] = {
+ .id = CHIPID_AD9467,
+ .max_rate = 250000000UL,
+ .scale_table = ad9467_scale_table,
+ .num_scales = ARRAY_SIZE(ad9467_scale_table),
+ .channels = ad9467_channels,
+ .num_channels = ARRAY_SIZE(ad9467_channels),
+ },
+};
+
+static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ unsigned int i, vref_val, vref_mask;
+
+ vref_val = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF);
+
+ switch (info->id) {
+ case CHIPID_AD9467:
+ vref_mask = AD9467_REG_VREF_MASK;
+ break;
+ default:
+ vref_mask = 0xFFFF;
+ break;
+ }
+
+ vref_val &= vref_mask;
+
+ for (i = 0; i < info->num_scales; i++) {
+ if (vref_val == info->scale_table[i][1])
+ break;
+ }
+
+ if (i == info->num_scales)
+ return -ERANGE;
+
+ __ad9467_get_scale(conv, i, val, val2);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ unsigned int scale_val[2];
+ unsigned int i;
+
+ if (val != 0)
+ return -EINVAL;
+
+ for (i = 0; i < info->num_scales; i++) {
+ __ad9467_get_scale(conv, i, &scale_val[0], &scale_val[1]);
+ if (scale_val[0] != val || scale_val[1] != val2)
+ continue;
+
+ ad9467_spi_write(st->spi, AN877_ADC_REG_VREF,
+ info->scale_table[i][1]);
+ ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ad9467_read_raw(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long m)
+{
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ return ad9467_get_scale(conv, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = clk_get_rate(st->clk);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ long r_clk;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return ad9467_set_scale(conv, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ r_clk = clk_round_rate(st->clk, val);
+ if (r_clk < 0 || r_clk > info->max_rate) {
+ dev_warn(&st->spi->dev,
+ "Error setting ADC sample rate %ld", r_clk);
+ return -EINVAL;
+ }
+
+ return clk_set_rate(st->clk, r_clk);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
+{
+ int ret;
+
+ ret = ad9467_spi_write(spi, AN877_ADC_REG_OUTPUT_MODE, mode);
+ if (ret < 0)
+ return ret;
+
+ return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER,
+ AN877_ADC_TRANSFER_SYNC);
+}
+
+static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
+{
+ struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+
+ return ad9467_outputmode_set(st->spi, st->output_mode);
+}
+
+static int ad9467_setup(struct ad9467_state *st, unsigned int chip_id)
+{
+ switch (chip_id) {
+ case CHIPID_AD9467:
+ st->output_mode = AD9467_DEF_OUTPUT_MODE |
+ AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ad9467_clk_disable(void *data)
+{
+ struct ad9467_state *st = data;
+
+ clk_disable_unprepare(st->clk);
+}
+
+static int ad9467_probe(struct spi_device *spi)
+{
+ const struct adi_axi_adc_chip_info *info;
+ struct adi_axi_adc_conv *conv;
+ struct ad9467_state *st;
+ unsigned int id;
+ int ret;
+
+ info = of_device_get_match_data(&spi->dev);
+ if (!info)
+ return -ENODEV;
+
+ conv = devm_adi_axi_adc_conv_register(&spi->dev, sizeof(*st));
+ if (IS_ERR(conv))
+ return PTR_ERR(conv);
+
+ st = adi_axi_adc_conv_priv(conv);
+ st->spi = spi;
+
+ st->clk = devm_clk_get(&spi->dev, "adc-clk");
+ if (IS_ERR(st->clk))
+ return PTR_ERR(st->clk);
+
+ ret = clk_prepare_enable(st->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, ad9467_clk_disable, st);
+ if (ret)
+ return ret;
+
+ st->pwrdown_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->pwrdown_gpio))
+ return PTR_ERR(st->pwrdown_gpio);
+
+ st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(st->reset_gpio))
+ return PTR_ERR(st->reset_gpio);
+
+ if (st->reset_gpio) {
+ udelay(1);
+ ret = gpiod_direction_output(st->reset_gpio, 1);
+ if (ret)
+ return ret;
+ mdelay(10);
+ }
+
+ spi_set_drvdata(spi, st);
+
+ conv->chip_info = info;
+
+ id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
+ if (id != conv->chip_info->id) {
+ dev_err(&spi->dev, "Unrecognized CHIP_ID 0x%X\n", id);
+ return -ENODEV;
+ }
+
+ conv->reg_access = ad9467_reg_access;
+ conv->write_raw = ad9467_write_raw;
+ conv->read_raw = ad9467_read_raw;
+ conv->preenable_setup = ad9467_preenable_setup;
+
+ return ad9467_setup(st, id);
+}
+
+static const struct of_device_id ad9467_of_match[] = {
+ { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl[ID_AD9467], },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ad9467_of_match);
+
+static struct spi_driver ad9467_driver = {
+ .driver = {
+ .name = "ad9467",
+ .of_match_table = ad9467_of_match,
+ },
+ .probe = ad9467_probe,
+};
+module_spi_driver(ad9467_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD9467 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 8115b6de1d6c..dd3d54b3bc8b 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -70,9 +70,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
switch (size) {
case 3:
- data[1] = val >> 16;
- data[2] = val >> 8;
- data[3] = val;
+ put_unaligned_be24(val, &data[1]);
break;
case 2:
put_unaligned_be16(val, &data[1]);
@@ -157,9 +155,7 @@ int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta,
*val = get_unaligned_be32(sigma_delta->data);
break;
case 3:
- *val = (sigma_delta->data[0] << 16) |
- (sigma_delta->data[1] << 8) |
- sigma_delta->data[2];
+ *val = get_unaligned_be24(&sigma_delta->data[0]);
break;
case 2:
*val = get_unaligned_be16(sigma_delta->data);
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
new file mode 100644
index 000000000000..c24c8da99eb4
--- /dev/null
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices Generic AXI ADC IP core
+ * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+ *
+ * Copyright 2012-2020 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/buffer-dmaengine.h>
+
+#include <linux/fpga/adi-axi-common.h>
+#include <linux/iio/adc/adi-axi-adc.h>
+
+/**
+ * Register definitions:
+ * https://wiki.analog.com/resources/fpga/docs/axi_adc_ip#register_map
+ */
+
+/* ADC controls */
+
+#define ADI_AXI_REG_RSTN 0x0040
+#define ADI_AXI_REG_RSTN_CE_N BIT(2)
+#define ADI_AXI_REG_RSTN_MMCM_RSTN BIT(1)
+#define ADI_AXI_REG_RSTN_RSTN BIT(0)
+
+/* ADC Channel controls */
+
+#define ADI_AXI_REG_CHAN_CTRL(c) (0x0400 + (c) * 0x40)
+#define ADI_AXI_REG_CHAN_CTRL_LB_OWR BIT(11)
+#define ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR BIT(10)
+#define ADI_AXI_REG_CHAN_CTRL_IQCOR_EN BIT(9)
+#define ADI_AXI_REG_CHAN_CTRL_DCFILT_EN BIT(8)
+#define ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT BIT(6)
+#define ADI_AXI_REG_CHAN_CTRL_FMT_TYPE BIT(5)
+#define ADI_AXI_REG_CHAN_CTRL_FMT_EN BIT(4)
+#define ADI_AXI_REG_CHAN_CTRL_PN_TYPE_OWR BIT(1)
+#define ADI_AXI_REG_CHAN_CTRL_ENABLE BIT(0)
+
+#define ADI_AXI_REG_CHAN_CTRL_DEFAULTS \
+ (ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT | \
+ ADI_AXI_REG_CHAN_CTRL_FMT_EN | \
+ ADI_AXI_REG_CHAN_CTRL_ENABLE)
+
+struct adi_axi_adc_core_info {
+ unsigned int version;
+};
+
+struct adi_axi_adc_state {
+ struct mutex lock;
+
+ struct adi_axi_adc_client *client;
+ void __iomem *regs;
+};
+
+struct adi_axi_adc_client {
+ struct list_head entry;
+ struct adi_axi_adc_conv conv;
+ struct adi_axi_adc_state *state;
+ struct device *dev;
+ const struct adi_axi_adc_core_info *info;
+};
+
+static LIST_HEAD(registered_clients);
+static DEFINE_MUTEX(registered_clients_lock);
+
+static struct adi_axi_adc_client *conv_to_client(struct adi_axi_adc_conv *conv)
+{
+ return container_of(conv, struct adi_axi_adc_client, conv);
+}
+
+void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv)
+{
+ struct adi_axi_adc_client *cl = conv_to_client(conv);
+
+ return (char *)cl + ALIGN(sizeof(struct adi_axi_adc_client), IIO_ALIGN);
+}
+EXPORT_SYMBOL_GPL(adi_axi_adc_conv_priv);
+
+static void adi_axi_adc_write(struct adi_axi_adc_state *st,
+ unsigned int reg,
+ unsigned int val)
+{
+ iowrite32(val, st->regs + reg);
+}
+
+static unsigned int adi_axi_adc_read(struct adi_axi_adc_state *st,
+ unsigned int reg)
+{
+ return ioread32(st->regs + reg);
+}
+
+static int adi_axi_adc_config_dma_buffer(struct device *dev,
+ struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer;
+ const char *dma_name;
+
+ if (!device_property_present(dev, "dmas"))
+ return 0;
+
+ if (device_property_read_string(dev, "dma-names", &dma_name))
+ dma_name = "rx";
+
+ buffer = devm_iio_dmaengine_buffer_alloc(indio_dev->dev.parent,
+ dma_name);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+ iio_device_attach_buffer(indio_dev, buffer);
+
+ return 0;
+}
+
+static int adi_axi_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+
+ if (!conv->read_raw)
+ return -EOPNOTSUPP;
+
+ return conv->read_raw(conv, chan, val, val2, mask);
+}
+
+static int adi_axi_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+
+ if (!conv->write_raw)
+ return -EOPNOTSUPP;
+
+ return conv->write_raw(conv, chan, val, val2, mask);
+}
+
+static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+ unsigned int i, ctrl;
+
+ for (i = 0; i < conv->chip_info->num_channels; i++) {
+ ctrl = adi_axi_adc_read(st, ADI_AXI_REG_CHAN_CTRL(i));
+
+ if (test_bit(i, scan_mask))
+ ctrl |= ADI_AXI_REG_CHAN_CTRL_ENABLE;
+ else
+ ctrl &= ~ADI_AXI_REG_CHAN_CTRL_ENABLE;
+
+ adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i), ctrl);
+ }
+
+ return 0;
+}
+
+static struct adi_axi_adc_conv *adi_axi_adc_conv_register(struct device *dev,
+ size_t sizeof_priv)
+{
+ struct adi_axi_adc_client *cl;
+ size_t alloc_size;
+
+ alloc_size = ALIGN(sizeof(struct adi_axi_adc_client), IIO_ALIGN);
+ if (sizeof_priv)
+ alloc_size += ALIGN(sizeof_priv, IIO_ALIGN);
+
+ cl = kzalloc(alloc_size, GFP_KERNEL);
+ if (!cl)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&registered_clients_lock);
+
+ cl->dev = get_device(dev);
+
+ list_add_tail(&cl->entry, &registered_clients);
+
+ mutex_unlock(&registered_clients_lock);
+
+ return &cl->conv;
+}
+
+static void adi_axi_adc_conv_unregister(struct adi_axi_adc_conv *conv)
+{
+ struct adi_axi_adc_client *cl = conv_to_client(conv);
+
+ mutex_lock(&registered_clients_lock);
+
+ list_del(&cl->entry);
+ put_device(cl->dev);
+
+ mutex_unlock(&registered_clients_lock);
+
+ kfree(cl);
+}
+
+static void devm_adi_axi_adc_conv_release(struct device *dev, void *res)
+{
+ adi_axi_adc_conv_unregister(*(struct adi_axi_adc_conv **)res);
+}
+
+struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+ size_t sizeof_priv)
+{
+ struct adi_axi_adc_conv **ptr, *conv;
+
+ ptr = devres_alloc(devm_adi_axi_adc_conv_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ conv = adi_axi_adc_conv_register(dev, sizeof_priv);
+ if (IS_ERR(conv)) {
+ devres_free(ptr);
+ return ERR_CAST(conv);
+ }
+
+ *ptr = conv;
+ devres_add(dev, ptr);
+
+ return conv;
+}
+EXPORT_SYMBOL_GPL(devm_adi_axi_adc_conv_register);
+
+static ssize_t in_voltage_scale_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < conv->chip_info->num_scales; i++) {
+ const unsigned int *s = conv->chip_info->scale_table[i];
+
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%u.%06u ", s[0], s[1]);
+ }
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0);
+
+enum {
+ ADI_AXI_ATTR_SCALE_AVAIL,
+};
+
+#define ADI_AXI_ATTR(_en_, _file_) \
+ [ADI_AXI_ATTR_##_en_] = &iio_dev_attr_##_file_.dev_attr.attr
+
+static struct attribute *adi_axi_adc_attributes[] = {
+ ADI_AXI_ATTR(SCALE_AVAIL, in_voltage_scale_available),
+ NULL
+};
+
+static umode_t axi_adc_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct adi_axi_adc_state *st = iio_priv(indio_dev);
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+
+ switch (n) {
+ case ADI_AXI_ATTR_SCALE_AVAIL:
+ if (!conv->chip_info->num_scales)
+ return 0;
+ return attr->mode;
+ default:
+ return attr->mode;
+ }
+}
+
+static const struct attribute_group adi_axi_adc_attribute_group = {
+ .attrs = adi_axi_adc_attributes,
+ .is_visible = axi_adc_attr_is_visible,
+};
+
+static const struct iio_info adi_axi_adc_info = {
+ .read_raw = &adi_axi_adc_read_raw,
+ .write_raw = &adi_axi_adc_write_raw,
+ .attrs = &adi_axi_adc_attribute_group,
+ .update_scan_mode = &adi_axi_adc_update_scan_mode,
+};
+
+static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = {
+ .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
+};
+
+static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
+{
+ const struct adi_axi_adc_core_info *info;
+ struct adi_axi_adc_client *cl;
+ struct device_node *cln;
+
+ info = of_device_get_match_data(dev);
+ if (!info)
+ return ERR_PTR(-ENODEV);
+
+ cln = of_parse_phandle(dev->of_node, "adi,adc-dev", 0);
+ if (!cln) {
+ dev_err(dev, "No 'adi,adc-dev' node defined\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&registered_clients_lock);
+
+ list_for_each_entry(cl, &registered_clients, entry) {
+ if (!cl->dev)
+ continue;
+
+ if (cl->dev->of_node != cln)
+ continue;
+
+ if (!try_module_get(dev->driver->owner)) {
+ mutex_unlock(&registered_clients_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ get_device(dev);
+ cl->info = info;
+ mutex_unlock(&registered_clients_lock);
+ return cl;
+ }
+
+ mutex_unlock(&registered_clients_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int adi_axi_adc_setup_channels(struct device *dev,
+ struct adi_axi_adc_state *st)
+{
+ struct adi_axi_adc_conv *conv = &st->client->conv;
+ int i, ret;
+
+ if (conv->preenable_setup) {
+ ret = conv->preenable_setup(conv);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < conv->chip_info->num_channels; i++) {
+ adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i),
+ ADI_AXI_REG_CHAN_CTRL_DEFAULTS);
+ }
+
+ return 0;
+}
+
+static void axi_adc_reset(struct adi_axi_adc_state *st)
+{
+ adi_axi_adc_write(st, ADI_AXI_REG_RSTN, 0);
+ mdelay(10);
+ adi_axi_adc_write(st, ADI_AXI_REG_RSTN, ADI_AXI_REG_RSTN_MMCM_RSTN);
+ mdelay(10);
+ adi_axi_adc_write(st, ADI_AXI_REG_RSTN,
+ ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
+}
+
+static void adi_axi_adc_cleanup(void *data)
+{
+ struct adi_axi_adc_client *cl = data;
+
+ put_device(cl->dev);
+ module_put(cl->dev->driver->owner);
+}
+
+static int adi_axi_adc_probe(struct platform_device *pdev)
+{
+ struct adi_axi_adc_conv *conv;
+ struct iio_dev *indio_dev;
+ struct adi_axi_adc_client *cl;
+ struct adi_axi_adc_state *st;
+ unsigned int ver;
+ int ret;
+
+ cl = adi_axi_adc_attach_client(&pdev->dev);
+ if (IS_ERR(cl))
+ return PTR_ERR(cl);
+
+ ret = devm_add_action_or_reset(&pdev->dev, adi_axi_adc_cleanup, cl);
+ if (ret)
+ return ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->client = cl;
+ cl->state = st;
+ mutex_init(&st->lock);
+
+ st->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(st->regs))
+ return PTR_ERR(st->regs);
+
+ conv = &st->client->conv;
+
+ axi_adc_reset(st);
+
+ ver = adi_axi_adc_read(st, ADI_AXI_REG_VERSION);
+
+ if (cl->info->version > ver) {
+ dev_err(&pdev->dev,
+ "IP core version is too old. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
+ ADI_AXI_PCORE_VER_MAJOR(cl->info->version),
+ ADI_AXI_PCORE_VER_MINOR(cl->info->version),
+ ADI_AXI_PCORE_VER_PATCH(cl->info->version),
+ ADI_AXI_PCORE_VER_MAJOR(ver),
+ ADI_AXI_PCORE_VER_MINOR(ver),
+ ADI_AXI_PCORE_VER_PATCH(ver));
+ return -ENODEV;
+ }
+
+ indio_dev->info = &adi_axi_adc_info;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = "adi-axi-adc";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = conv->chip_info->num_channels;
+ indio_dev->channels = conv->chip_info->channels;
+
+ ret = adi_axi_adc_config_dma_buffer(&pdev->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = adi_axi_adc_setup_channels(&pdev->dev, st);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n",
+ ADI_AXI_PCORE_VER_MAJOR(ver),
+ ADI_AXI_PCORE_VER_MINOR(ver),
+ ADI_AXI_PCORE_VER_PATCH(ver));
+
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id adi_axi_adc_of_match[] = {
+ { .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info },
+ { /* end of list */ }
+};
+MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match);
+
+static struct platform_driver adi_axi_adc_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = adi_axi_adc_of_match,
+ },
+ .probe = adi_axi_adc_probe,
+};
+module_platform_driver(adi_axi_adc_driver);
+
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
+MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 9d96f7d08b95..9abbbdcc7420 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@@ -100,6 +101,8 @@
#define AT91_SAMA5D2_IER_YRDY BIT(21)
/* Interrupt Enable Register - TS pressure measurement ready */
#define AT91_SAMA5D2_IER_PRDY BIT(22)
+/* Interrupt Enable Register - Data ready */
+#define AT91_SAMA5D2_IER_DRDY BIT(24)
/* Interrupt Enable Register - general overrun error */
#define AT91_SAMA5D2_IER_GOVRE BIT(25)
/* Interrupt Enable Register - Pen detect */
@@ -486,6 +489,21 @@ static inline int at91_adc_of_xlate(struct iio_dev *indio_dev,
return at91_adc_chan_xlate(indio_dev, iiospec->args[0]);
}
+static unsigned int at91_adc_active_scan_mask_to_reg(struct iio_dev *indio_dev)
+{
+ u32 mask = 0;
+ u8 bit;
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->num_channels) {
+ struct iio_chan_spec const *chan =
+ at91_adc_chan_get(indio_dev, bit);
+ mask |= BIT(chan->channel);
+ }
+
+ return mask & GENMASK(11, 0);
+}
+
static void at91_adc_config_emr(struct at91_adc_state *st)
{
/* configure the extended mode register */
@@ -710,7 +728,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
struct iio_dev *indio = iio_trigger_get_drvdata(trig);
struct at91_adc_state *st = iio_priv(indio);
u32 status = at91_adc_readl(st, AT91_SAMA5D2_TRGR);
- u8 bit;
/* clear TRGMOD */
status &= ~AT91_SAMA5D2_TRGR_TRGMOD_MASK;
@@ -721,50 +738,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
/* set/unset hw trigger */
at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
- for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
- struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
- u32 cor;
-
- if (!chan)
- continue;
- /* these channel types cannot be handled by this trigger */
- if (chan->type == IIO_POSITIONRELATIVE ||
- chan->type == IIO_PRESSURE)
- continue;
-
- if (state) {
- cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
-
- if (chan->differential)
- cor |= (BIT(chan->channel) |
- BIT(chan->channel2)) <<
- AT91_SAMA5D2_COR_DIFF_OFFSET;
- else
- cor &= ~(BIT(chan->channel) <<
- AT91_SAMA5D2_COR_DIFF_OFFSET);
-
- at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
- }
-
- if (state) {
- at91_adc_writel(st, AT91_SAMA5D2_CHER,
- BIT(chan->channel));
- /* enable irq only if not using DMA */
- if (!st->dma_st.dma_chan) {
- at91_adc_writel(st, AT91_SAMA5D2_IER,
- BIT(chan->channel));
- }
- } else {
- /* disable irq only if not using DMA */
- if (!st->dma_st.dma_chan) {
- at91_adc_writel(st, AT91_SAMA5D2_IDR,
- BIT(chan->channel));
- }
- at91_adc_writel(st, AT91_SAMA5D2_CHDR,
- BIT(chan->channel));
- }
- }
-
return 0;
}
@@ -781,6 +754,7 @@ static int at91_adc_reenable_trigger(struct iio_trigger *trig)
/* Needed to ACK the DRDY interruption */
at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+
return 0;
}
@@ -888,18 +862,37 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev)
return 0;
}
-static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
+static bool at91_adc_buffer_check_use_irq(struct iio_dev *indio,
+ struct at91_adc_state *st)
+{
+ /* if using DMA, we do not use our own IRQ (we use DMA-controller) */
+ if (st->dma_st.dma_chan)
+ return false;
+ /* if the trigger is not ours, then it has its own IRQ */
+ if (iio_trigger_validate_own_device(indio->trig, indio))
+ return false;
+ return true;
+}
+
+static bool at91_adc_current_chan_is_touch(struct iio_dev *indio_dev)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ return !!bitmap_subset(indio_dev->active_scan_mask,
+ &st->touch_st.channels_bitmask,
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1);
+}
+
+static int at91_adc_buffer_preenable(struct iio_dev *indio_dev)
{
int ret;
+ u8 bit;
struct at91_adc_state *st = iio_priv(indio_dev);
/* check if we are enabling triggered buffer or the touchscreen */
- if (bitmap_subset(indio_dev->active_scan_mask,
- &st->touch_st.channels_bitmask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
- /* touchscreen enabling */
+ if (at91_adc_current_chan_is_touch(indio_dev))
return at91_adc_configure_touch(st, true);
- }
+
/* if we are not in triggered mode, we cannot enable the buffer. */
if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
@@ -911,41 +904,65 @@ static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
return ret;
}
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->num_channels) {
+ struct iio_chan_spec const *chan =
+ at91_adc_chan_get(indio_dev, bit);
+ u32 cor;
+
+ if (!chan)
+ continue;
+ /* these channel types cannot be handled by this trigger */
+ if (chan->type == IIO_POSITIONRELATIVE ||
+ chan->type == IIO_PRESSURE)
+ continue;
+
+ cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
+
+ if (chan->differential)
+ cor |= (BIT(chan->channel) | BIT(chan->channel2)) <<
+ AT91_SAMA5D2_COR_DIFF_OFFSET;
+ else
+ cor &= ~(BIT(chan->channel) <<
+ AT91_SAMA5D2_COR_DIFF_OFFSET);
+
+ at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
+
+ at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
+ }
+
+ if (at91_adc_buffer_check_use_irq(indio_dev, st))
+ at91_adc_writel(st, AT91_SAMA5D2_IER, AT91_SAMA5D2_IER_DRDY);
+
+ return 0;
+}
+
+static int at91_adc_buffer_postenable(struct iio_dev *indio_dev)
+{
+ if (at91_adc_current_chan_is_touch(indio_dev))
+ return 0;
+
return iio_triggered_buffer_postenable(indio_dev);
}
-static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
+static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
{
struct at91_adc_state *st = iio_priv(indio_dev);
- int ret;
u8 bit;
/* check if we are disabling triggered buffer or the touchscreen */
- if (bitmap_subset(indio_dev->active_scan_mask,
- &st->touch_st.channels_bitmask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
- /* touchscreen disable */
+ if (at91_adc_current_chan_is_touch(indio_dev))
return at91_adc_configure_touch(st, false);
- }
+
/* if we are not in triggered mode, nothing to do here */
if (!(indio_dev->currentmode & INDIO_ALL_TRIGGERED_MODES))
return -EINVAL;
- /* continue with the triggered buffer */
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret < 0)
- dev_err(&indio_dev->dev, "buffer predisable failed\n");
-
- if (!st->dma_st.dma_chan)
- return ret;
-
- /* if we are using DMA we must clear registers and end DMA */
- dmaengine_terminate_sync(st->dma_st.dma_chan);
-
/*
- * For each enabled channel we must read the last converted value
+ * For each enable channel we must disable it in hardware.
+ * In the case of DMA, we must read the last converted value
* to clear EOC status and not get a possible interrupt later.
- * This value is being read by DMA from LCDR anyway
+ * This value is being read by DMA from LCDR anyway, so it's not lost.
*/
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->num_channels) {
@@ -958,16 +975,37 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
if (chan->type == IIO_POSITIONRELATIVE ||
chan->type == IIO_PRESSURE)
continue;
+
+ at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+
if (st->dma_st.dma_chan)
at91_adc_readl(st, chan->address);
}
+ if (at91_adc_buffer_check_use_irq(indio_dev, st))
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, AT91_SAMA5D2_IER_DRDY);
+
/* read overflow register to clear possible overflow status */
at91_adc_readl(st, AT91_SAMA5D2_OVER);
- return ret;
+
+ /* if we are using DMA we must clear registers and end DMA */
+ if (st->dma_st.dma_chan)
+ dmaengine_terminate_sync(st->dma_st.dma_chan);
+
+ return 0;
+}
+
+static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
+{
+ if (at91_adc_current_chan_is_touch(indio_dev))
+ return 0;
+
+ return iio_triggered_buffer_predisable(indio_dev);
}
static const struct iio_buffer_setup_ops at91_buffer_setup_ops = {
+ .preenable = &at91_adc_buffer_preenable,
+ .postdisable = &at91_adc_buffer_postdisable,
.postenable = &at91_adc_buffer_postenable,
.predisable = &at91_adc_buffer_predisable,
};
@@ -1015,6 +1053,22 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
int i = 0;
int val;
u8 bit;
+ u32 mask = at91_adc_active_scan_mask_to_reg(indio_dev);
+ unsigned int timeout = 50;
+
+ /*
+ * Check if the conversion is ready. If not, wait a little bit, and
+ * in case of timeout exit with an error.
+ */
+ while ((at91_adc_readl(st, AT91_SAMA5D2_ISR) & mask) != mask &&
+ timeout) {
+ usleep_range(50, 100);
+ timeout--;
+ }
+
+ /* Cannot read data, not ready. Continue without reporting data */
+ if (!timeout)
+ return;
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->num_channels) {
@@ -1102,6 +1156,13 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct at91_adc_state *st = iio_priv(indio_dev);
+ /*
+ * If it's not our trigger, start a conversion now, as we are
+ * actually polling the trigger now.
+ */
+ if (iio_trigger_validate_own_device(indio_dev->trig, indio_dev))
+ at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
+
if (st->dma_st.dma_chan)
at91_adc_trigger_handler_dma(indio_dev);
else
@@ -1114,20 +1175,9 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
static int at91_adc_buffer_init(struct iio_dev *indio)
{
- struct at91_adc_state *st = iio_priv(indio);
-
- if (st->selected_trig->hw_trig) {
- return devm_iio_triggered_buffer_setup(&indio->dev, indio,
- &iio_pollfunc_store_time,
- &at91_adc_trigger_handler, &at91_buffer_setup_ops);
- }
- /*
- * we need to prepare the buffer ops in case we will get
- * another buffer attached (like a callback buffer for the touchscreen)
- */
- indio->setup_ops = &at91_buffer_setup_ops;
-
- return 0;
+ return devm_iio_triggered_buffer_setup(&indio->dev, indio,
+ &iio_pollfunc_store_time,
+ &at91_adc_trigger_handler, &at91_buffer_setup_ops);
}
static unsigned at91_adc_startup_time(unsigned startup_time_min,
@@ -1281,7 +1331,8 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
status = at91_adc_readl(st, AT91_SAMA5D2_XPOSR);
status = at91_adc_readl(st, AT91_SAMA5D2_YPOSR);
status = at91_adc_readl(st, AT91_SAMA5D2_PRESSR);
- } else if (iio_buffer_enabled(indio) && !st->dma_st.dma_chan) {
+ } else if (iio_buffer_enabled(indio) &&
+ (status & AT91_SAMA5D2_IER_DRDY)) {
/* triggered buffer without DMA */
disable_irq_nosync(irq);
iio_trigger_poll(indio->trig);
@@ -1901,14 +1952,10 @@ static __maybe_unused int at91_adc_resume(struct device *dev)
return 0;
/* check if we are enabling triggered buffer or the touchscreen */
- if (bitmap_subset(indio_dev->active_scan_mask,
- &st->touch_st.channels_bitmask,
- AT91_SAMA5D2_MAX_CHAN_IDX + 1)) {
- /* touchscreen enabling */
+ if (at91_adc_current_chan_is_touch(indio_dev))
return at91_adc_configure_touch(st, true);
- } else {
+ else
return at91_adc_configure_trigger(st->trig, true);
- }
/* not needed but more explicit */
return 0;
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index abe99856c823..0368b6dc6d60 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -1152,7 +1152,6 @@ static int at91_adc_probe(struct platform_device *pdev)
int ret;
struct iio_dev *idev;
struct at91_adc_state *st;
- struct resource *res;
u32 reg;
idev = devm_iio_device_alloc(&pdev->dev, sizeof(struct at91_adc_state));
@@ -1182,9 +1181,7 @@ static int at91_adc_probe(struct platform_device *pdev)
if (st->irq < 0)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- st->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ st->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(st->reg_base))
return PTR_ERR(st->reg_base);
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 22131a677445..6bda4f4d89fe 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -449,9 +449,6 @@ static void exynos_adc_exynos7_init_hw(struct exynos_adc *info)
{
u32 con1, con2;
- if (info->data->needs_adc_phy)
- regmap_write(info->pmu_map, info->data->phy_offset, 1);
-
con1 = ADC_V2_CON1_SOFT_RESET;
writel(con1, ADC_V2_CON1(info->regs));
@@ -531,8 +528,19 @@ static int exynos_read_raw(struct iio_dev *indio_dev,
unsigned long timeout;
int ret;
- if (mask != IIO_CHAN_INFO_RAW)
+ if (mask == IIO_CHAN_INFO_SCALE) {
+ ret = regulator_get_voltage(info->vdd);
+ if (ret < 0)
+ return ret;
+
+ /* Regulator voltage is in uV, but need mV */
+ *val = ret / 1000;
+ *val2 = info->data->mask;
+
+ return IIO_VAL_FRACTIONAL;
+ } else if (mask != IIO_CHAN_INFO_RAW) {
return -EINVAL;
+ }
mutex_lock(&indio_dev->mlock);
reinit_completion(&info->completion);
@@ -683,6 +691,7 @@ static const struct iio_info exynos_adc_iio_info = {
.channel = _index, \
.address = _index, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
.datasheet_name = _id, \
}
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
index fa71489195c6..b0a4dc88ba9b 100644
--- a/drivers/iio/adc/fsl-imx25-gcq.c
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -294,7 +294,6 @@ static int mx25_gcq_probe(struct platform_device *pdev)
struct mx25_gcq_priv *priv;
struct mx25_tsadc *tsadc = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
- struct resource *res;
void __iomem *mem;
int ret;
int i;
@@ -305,8 +304,7 @@ static int mx25_gcq_probe(struct platform_device *pdev)
priv = iio_priv(indio_dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem = devm_ioremap_resource(dev, res);
+ mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mem))
return PTR_ERR(mem);
diff --git a/drivers/iio/adc/intel_mrfld_adc.c b/drivers/iio/adc/intel_mrfld_adc.c
index c35a1beb817c..a6d2e1f27e76 100644
--- a/drivers/iio/adc/intel_mrfld_adc.c
+++ b/drivers/iio/adc/intel_mrfld_adc.c
@@ -75,7 +75,7 @@ static int mrfld_adc_single_conv(struct iio_dev *indio_dev,
struct regmap *regmap = adc->regmap;
unsigned int req;
long timeout;
- u8 buf[2];
+ __be16 value;
int ret;
reinit_completion(&adc->completion);
@@ -105,11 +105,11 @@ static int mrfld_adc_single_conv(struct iio_dev *indio_dev,
goto done;
}
- ret = regmap_bulk_read(regmap, chan->address, buf, 2);
+ ret = regmap_bulk_read(regmap, chan->address, &value, sizeof(value));
if (ret)
goto done;
- *result = get_unaligned_be16(buf);
+ *result = be16_to_cpu(value);
ret = IIO_VAL_INT;
done:
diff --git a/drivers/iio/adc/max1241.c b/drivers/iio/adc/max1241.c
new file mode 100644
index 000000000000..541939c7abca
--- /dev/null
+++ b/drivers/iio/adc/max1241.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MAX1241 low-power, 12-bit serial ADC
+ *
+ * Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX1240-MAX1241.pdf
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#define MAX1241_VAL_MASK GENMASK(11, 0)
+#define MAX1241_SHUTDOWN_DELAY_USEC 4
+
+enum max1241_id {
+ max1241,
+};
+
+struct max1241 {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct regulator *vdd;
+ struct regulator *vref;
+ struct gpio_desc *shutdown;
+
+ __be16 data ____cacheline_aligned;
+};
+
+static const struct iio_chan_spec max1241_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int max1241_read(struct max1241 *adc)
+{
+ struct spi_transfer xfers[] = {
+ /*
+ * Begin conversion by bringing /CS low for at least
+ * tconv us.
+ */
+ {
+ .len = 0,
+ .delay.value = 8,
+ .delay.unit = SPI_DELAY_UNIT_USECS,
+ },
+ /*
+ * Then read two bytes of data in our RX buffer.
+ */
+ {
+ .rx_buf = &adc->data,
+ .len = 2,
+ },
+ };
+
+ return spi_sync_transfer(adc->spi, xfers, ARRAY_SIZE(xfers));
+}
+
+static int max1241_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret, vref_uV;
+ struct max1241 *adc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+
+ if (adc->shutdown) {
+ gpiod_set_value(adc->shutdown, 0);
+ udelay(MAX1241_SHUTDOWN_DELAY_USEC);
+ ret = max1241_read(adc);
+ gpiod_set_value(adc->shutdown, 1);
+ } else
+ ret = max1241_read(adc);
+
+ if (ret) {
+ mutex_unlock(&adc->lock);
+ return ret;
+ }
+
+ *val = (be16_to_cpu(adc->data) >> 3) & MAX1241_VAL_MASK;
+
+ mutex_unlock(&adc->lock);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ vref_uV = regulator_get_voltage(adc->vref);
+
+ if (vref_uV < 0)
+ return vref_uV;
+
+ *val = vref_uV / 1000;
+ *val2 = 12;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info max1241_info = {
+ .read_raw = max1241_read_raw,
+};
+
+static void max1241_disable_vdd_action(void *data)
+{
+ struct max1241 *adc = data;
+ struct device *dev = &adc->spi->dev;
+ int err;
+
+ err = regulator_disable(adc->vdd);
+ if (err)
+ dev_err(dev, "could not disable vdd regulator.\n");
+}
+
+static void max1241_disable_vref_action(void *data)
+{
+ struct max1241 *adc = data;
+ struct device *dev = &adc->spi->dev;
+ int err;
+
+ err = regulator_disable(adc->vref);
+ if (err)
+ dev_err(dev, "could not disable vref regulator.\n");
+}
+
+static int max1241_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct max1241 *adc;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->spi = spi;
+ mutex_init(&adc->lock);
+
+ spi_set_drvdata(spi, indio_dev);
+
+ adc->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(adc->vdd)) {
+ dev_err(dev, "failed to get vdd regulator\n");
+ return PTR_ERR(adc->vdd);
+ }
+
+ ret = regulator_enable(adc->vdd);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, max1241_disable_vdd_action, adc);
+ if (ret) {
+ dev_err(dev, "could not set up vdd regulator cleanup action\n");
+ return ret;
+ }
+
+ adc->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(adc->vref)) {
+ dev_err(dev, "failed to get vref regulator\n");
+ return PTR_ERR(adc->vref);
+ }
+
+ ret = regulator_enable(adc->vref);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, max1241_disable_vref_action, adc);
+ if (ret) {
+ dev_err(dev, "could not set up vref regulator cleanup action\n");
+ return ret;
+ }
+
+ adc->shutdown = devm_gpiod_get_optional(dev, "shutdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(adc->shutdown))
+ return PTR_ERR(adc->shutdown);
+
+ if (adc->shutdown)
+ dev_dbg(dev, "shutdown pin passed, low-power mode enabled");
+ else
+ dev_dbg(dev, "no shutdown pin passed, low-power mode disabled");
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &max1241_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max1241_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max1241_channels);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id max1241_id[] = {
+ { "max1241", max1241 },
+ {}
+};
+
+static const struct of_device_id max1241_dt_ids[] = {
+ { .compatible = "maxim,max1241" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, max1241_dt_ids);
+
+static struct spi_driver max1241_spi_driver = {
+ .driver = {
+ .name = "max1241",
+ .of_match_table = max1241_dt_ids,
+ },
+ .probe = max1241_probe,
+ .id_table = max1241_id,
+};
+module_spi_driver(max1241_spi_driver);
+
+MODULE_AUTHOR("Alexandru Lazar <alazar@startmail.com>");
+MODULE_DESCRIPTION("MAX1241 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 5c2cc61b666e..9d92017c79b2 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -150,6 +150,7 @@ struct max1363_chip_info {
* @current_mode: the scan mode of this chip
* @requestedmask: a valid requested set of channels
* @reg: supply regulator
+ * @lock lock to ensure state is consistent
* @monitor_on: whether monitor mode is enabled
* @monitor_speed: parameter corresponding to device monitor speed setting
* @mask_high: bitmask for enabled high thresholds
@@ -169,6 +170,7 @@ struct max1363_state {
const struct max1363_mode *current_mode;
u32 requestedmask;
struct regulator *reg;
+ struct mutex lock;
/* Using monitor modes and buffer at the same time is
currently not supported */
@@ -364,7 +366,11 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
struct max1363_state *st = iio_priv(indio_dev);
struct i2c_client *client = st->client;
- mutex_lock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+
/*
* If monitor mode is enabled, the method for reading a single
* channel will have to be rather different and has not yet
@@ -372,7 +378,7 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
*
* Also, cannot read directly if buffered capture enabled.
*/
- if (st->monitor_on || iio_buffer_enabled(indio_dev)) {
+ if (st->monitor_on) {
ret = -EBUSY;
goto error_ret;
}
@@ -404,8 +410,10 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
data = rxbuf[0];
}
*val = data;
+
error_ret:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -705,9 +713,9 @@ static ssize_t max1363_monitor_store_freq(struct device *dev,
if (!found)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->monitor_speed = i;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return 0;
}
@@ -810,12 +818,12 @@ static int max1363_read_event_config(struct iio_dev *indio_dev,
int val;
int number = chan->channel;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (dir == IIO_EV_DIR_FALLING)
val = (1 << number) & st->mask_low;
else
val = (1 << number) & st->mask_high;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return val;
}
@@ -962,7 +970,11 @@ static int max1363_write_event_config(struct iio_dev *indio_dev,
u16 unifiedmask;
int number = chan->channel;
- mutex_lock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+ mutex_lock(&st->lock);
+
unifiedmask = st->mask_low | st->mask_high;
if (dir == IIO_EV_DIR_FALLING) {
@@ -989,7 +1001,8 @@ static int max1363_write_event_config(struct iio_dev *indio_dev,
max1363_monitor_mode_update(st, !!(st->mask_high | st->mask_low));
error_ret:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -1587,6 +1600,7 @@ static int max1363_probe(struct i2c_client *client,
st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
st->reg = devm_regulator_get(&client->dev, "vcc");
if (IS_ERR(st->reg)) {
ret = PTR_ERR(st->reg);
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index ea24d7c58b12..d86c0b5d80a3 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/sysfs.h>
#include <linux/of.h>
+#include <asm/unaligned.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -117,11 +118,11 @@ static int mcp3422_read(struct mcp3422 *adc, int *value, u8 *config)
if (sample_rate == MCP3422_SRATE_3) {
ret = i2c_master_recv(adc->i2c, buf, 4);
- temp = buf[0] << 16 | buf[1] << 8 | buf[2];
+ temp = get_unaligned_be24(&buf[0]);
*config = buf[3];
} else {
ret = i2c_master_recv(adc->i2c, buf, 3);
- temp = buf[0] << 8 | buf[1];
+ temp = get_unaligned_be16(&buf[0]);
*config = buf[2];
}
diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c
new file mode 100644
index 000000000000..331a9a728217
--- /dev/null
+++ b/drivers/iio/adc/mp2629_adc.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MP2629 Driver for ADC
+ *
+ * Copyright 2020 Monolithic Power Systems, Inc
+ *
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#include <linux/iio/driver.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+#include <linux/mfd/mp2629.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MP2629_REG_ADC_CTRL 0x03
+#define MP2629_REG_BATT_VOLT 0x0e
+#define MP2629_REG_SYSTEM_VOLT 0x0f
+#define MP2629_REG_INPUT_VOLT 0x11
+#define MP2629_REG_BATT_CURRENT 0x12
+#define MP2629_REG_INPUT_CURRENT 0x13
+
+#define MP2629_ADC_START BIT(7)
+#define MP2629_ADC_CONTINUOUS BIT(6)
+
+#define MP2629_MAP(_mp, _mpc) IIO_MAP(#_mp, "mp2629_charger", "mp2629-"_mpc)
+
+#define MP2629_ADC_CHAN(_ch, _type) { \
+ .type = _type, \
+ .indexed = 1, \
+ .datasheet_name = #_ch, \
+ .channel = MP2629_ ## _ch, \
+ .address = MP2629_REG_ ## _ch, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+struct mp2629_adc {
+ struct regmap *regmap;
+ struct device *dev;
+};
+
+static struct iio_chan_spec mp2629_channels[] = {
+ MP2629_ADC_CHAN(BATT_VOLT, IIO_VOLTAGE),
+ MP2629_ADC_CHAN(SYSTEM_VOLT, IIO_VOLTAGE),
+ MP2629_ADC_CHAN(INPUT_VOLT, IIO_VOLTAGE),
+ MP2629_ADC_CHAN(BATT_CURRENT, IIO_CURRENT),
+ MP2629_ADC_CHAN(INPUT_CURRENT, IIO_CURRENT)
+};
+
+static struct iio_map mp2629_adc_maps[] = {
+ MP2629_MAP(BATT_VOLT, "batt-volt"),
+ MP2629_MAP(SYSTEM_VOLT, "system-volt"),
+ MP2629_MAP(INPUT_VOLT, "input-volt"),
+ MP2629_MAP(BATT_CURRENT, "batt-current"),
+ MP2629_MAP(INPUT_CURRENT, "input-current")
+};
+
+static int mp2629_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct mp2629_adc *info = iio_priv(indio_dev);
+ unsigned int rval;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(info->regmap, chan->address, &rval);
+ if (ret)
+ return ret;
+
+ if (chan->address == MP2629_INPUT_VOLT)
+ rval &= GENMASK(6, 0);
+ *val = rval;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel) {
+ case MP2629_BATT_VOLT:
+ case MP2629_SYSTEM_VOLT:
+ *val = 20;
+ return IIO_VAL_INT;
+
+ case MP2629_INPUT_VOLT:
+ *val = 60;
+ return IIO_VAL_INT;
+
+ case MP2629_BATT_CURRENT:
+ *val = 175;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL;
+
+ case MP2629_INPUT_CURRENT:
+ *val = 133;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mp2629_adc_info = {
+ .read_raw = &mp2629_read_raw,
+};
+
+static int mp2629_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mp2629_data *ddata = dev_get_drvdata(dev->parent);
+ struct mp2629_adc *info;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ info = iio_priv(indio_dev);
+ info->regmap = ddata->regmap;
+ info->dev = dev;
+ platform_set_drvdata(pdev, indio_dev);
+
+ ret = regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_START | MP2629_ADC_CONTINUOUS,
+ MP2629_ADC_START | MP2629_ADC_CONTINUOUS);
+ if (ret) {
+ dev_err(dev, "adc enable fail: %d\n", ret);
+ return ret;
+ }
+
+ ret = iio_map_array_register(indio_dev, mp2629_adc_maps);
+ if (ret) {
+ dev_err(dev, "IIO maps register fail: %d\n", ret);
+ goto fail_disable;
+ }
+
+ indio_dev->name = "mp2629-adc";
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = mp2629_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mp2629_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &mp2629_adc_info;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "IIO device register fail: %d\n", ret);
+ goto fail_map_unregister;
+ }
+
+ return 0;
+
+fail_map_unregister:
+ iio_map_array_unregister(indio_dev);
+
+fail_disable:
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_CONTINUOUS, 0);
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_START, 0);
+
+ return ret;
+}
+
+static int mp2629_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct mp2629_adc *info = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ iio_map_array_unregister(indio_dev);
+
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_CONTINUOUS, 0);
+ regmap_update_bits(info->regmap, MP2629_REG_ADC_CTRL,
+ MP2629_ADC_START, 0);
+
+ return 0;
+}
+
+static const struct of_device_id mp2629_adc_of_match[] = {
+ { .compatible = "mps,mp2629_adc"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2629_adc_of_match);
+
+static struct platform_driver mp2629_adc_driver = {
+ .driver = {
+ .name = "mp2629_adc",
+ .of_match_table = mp2629_adc_of_match,
+ },
+ .probe = mp2629_adc_probe,
+ .remove = mp2629_adc_remove,
+};
+module_platform_driver(mp2629_adc_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MP2629 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 2df88d2b880a..0e2068ec068b 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -65,12 +65,14 @@ struct stm32_adc_priv;
* @clk_sel: clock selection routine
* @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet)
* @has_syscfg: SYSCFG capability flags
+ * @num_irqs: number of interrupt lines
*/
struct stm32_adc_priv_cfg {
const struct stm32_adc_common_regs *regs;
int (*clk_sel)(struct platform_device *, struct stm32_adc_priv *);
u32 max_clk_rate_hz;
unsigned int has_syscfg;
+ unsigned int num_irqs;
};
/**
@@ -375,21 +377,15 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
unsigned int i;
- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
+ /*
+ * Interrupt(s) must be provided, depending on the compatible:
+ * - stm32f4/h7 shares a common interrupt line.
+ * - stm32mp1, has one line per ADC
+ */
+ for (i = 0; i < priv->cfg->num_irqs; i++) {
priv->irq[i] = platform_get_irq(pdev, i);
- if (priv->irq[i] < 0) {
- /*
- * At least one interrupt must be provided, make others
- * optional:
- * - stm32f4/h7 shares a common interrupt.
- * - stm32mp1, has one line per ADC (either for ADC1,
- * ADC2 or both).
- */
- if (i && priv->irq[i] == -ENXIO)
- continue;
-
+ if (priv->irq[i] < 0)
return priv->irq[i];
- }
}
priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
@@ -400,9 +396,7 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
return -ENOMEM;
}
- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
- if (priv->irq[i] < 0)
- continue;
+ for (i = 0; i < priv->cfg->num_irqs; i++) {
irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler);
irq_set_handler_data(priv->irq[i], priv);
}
@@ -420,11 +414,8 @@ static void stm32_adc_irq_remove(struct platform_device *pdev,
irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq));
irq_domain_remove(priv->domain);
- for (i = 0; i < STM32_ADC_MAX_ADCS; i++) {
- if (priv->irq[i] < 0)
- continue;
+ for (i = 0; i < priv->cfg->num_irqs; i++)
irq_set_chained_handler(priv->irq[i], NULL);
- }
}
static int stm32_adc_core_switches_supply_en(struct stm32_adc_priv *priv,
@@ -817,6 +808,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = {
.regs = &stm32f4_adc_common_regs,
.clk_sel = stm32f4_adc_clk_sel,
.max_clk_rate_hz = 36000000,
+ .num_irqs = 1,
};
static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
@@ -824,6 +816,7 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = {
.clk_sel = stm32h7_adc_clk_sel,
.max_clk_rate_hz = 36000000,
.has_syscfg = HAS_VBOOSTER,
+ .num_irqs = 1,
};
static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
@@ -831,6 +824,7 @@ static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = {
.clk_sel = stm32h7_adc_clk_sel,
.max_clk_rate_hz = 40000000,
.has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD,
+ .num_irqs = 2,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 176e1cb4abb1..0f2c1738a90d 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -496,7 +496,6 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
struct iio_dev *indio_dev)
{
struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
- struct resource *mem;
void __iomem *base;
int ret;
@@ -508,8 +507,7 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
indio_dev->num_channels = ARRAY_SIZE(sun8i_a33_gpadc_channels);
indio_dev->channels = sun8i_a33_gpadc_channels;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, mem);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 552c2be8d87a..f1ee3b1e2827 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -22,6 +22,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
/* Commands */
#define ADS124S08_CMD_NOP 0x00
#define ADS124S08_CMD_WAKEUP 0x02
@@ -188,7 +190,6 @@ static int ads124s_read(struct iio_dev *indio_dev, unsigned int chan)
{
struct ads124s_private *priv = iio_priv(indio_dev);
int ret;
- u32 tmp;
struct spi_transfer t[] = {
{
.tx_buf = &priv->data[0],
@@ -208,9 +209,7 @@ static int ads124s_read(struct iio_dev *indio_dev, unsigned int chan)
if (ret < 0)
return ret;
- tmp = priv->data[2] << 16 | priv->data[3] << 8 | priv->data[4];
-
- return tmp;
+ return get_unaligned_be24(&priv->data[2]);
}
static int ads124s_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 6fd06e4eff73..d7fecab9252e 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -3,7 +3,7 @@
* Xilinx XADC driver
*
* Copyright 2013-2014 Analog Devices Inc.
- * Author: Lars-Peter Clauen <lars@metafoo.de>
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
*
* Documentation for the parts can be found at:
* - XADC hardmacro: Xilinx UG480
@@ -663,7 +663,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
mutex_lock(&xadc->mutex);
if (state) {
- /* Only one of the two triggers can be active at the a time. */
+ /* Only one of the two triggers can be active at a time. */
if (xadc->trigger != NULL) {
ret = -EBUSY;
goto err_out;
diff --git a/drivers/iio/adc/xilinx-xadc-events.c b/drivers/iio/adc/xilinx-xadc-events.c
index dbfd5da290a4..2357f585720a 100644
--- a/drivers/iio/adc/xilinx-xadc-events.c
+++ b/drivers/iio/adc/xilinx-xadc-events.c
@@ -3,7 +3,7 @@
* Xilinx XADC driver
*
* Copyright 2013 Analog Devices Inc.
- * Author: Lars-Peter Clauen <lars@metafoo.de>
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
*/
#include <linux/iio/events.h>
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index 4017f18b0a4f..25abed9c0285 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -3,7 +3,7 @@
* Xilinx XADC driver
*
* Copyright 2013 Analog Devices Inc.
- * Author: Lars-Peter Clauen <lars@metafoo.de>
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
*/
#ifndef __IIO_XILINX_XADC__
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index a74bd9c0587c..d348af8b9705 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -12,7 +12,6 @@
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/poll.h>
-#include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index b129693af0fd..6dedf12b69a4 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -134,7 +134,7 @@ static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(indio_dev->buffer);
- return sprintf(buf, "%u\n", dmaengine_buffer->align);
+ return sprintf(buf, "%zu\n", dmaengine_buffer->align);
}
static IIO_DEVICE_ATTR(length_align_bytes, 0444,
@@ -229,6 +229,45 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
}
EXPORT_SYMBOL_GPL(iio_dmaengine_buffer_free);
+static void __devm_iio_dmaengine_buffer_free(struct device *dev, void *res)
+{
+ iio_dmaengine_buffer_free(*(struct iio_buffer **)res);
+}
+
+/**
+ * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
+ * @dev: Parent device for the buffer
+ * @channel: DMA channel name, typically "rx".
+ *
+ * This allocates a new IIO buffer which internally uses the DMAengine framework
+ * to perform its transfers. The parent device will be used to request the DMA
+ * channel.
+ *
+ * The buffer will be automatically de-allocated once the device gets destroyed.
+ */
+struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
+ const char *channel)
+{
+ struct iio_buffer **bufferp, *buffer;
+
+ bufferp = devres_alloc(__devm_iio_dmaengine_buffer_free,
+ sizeof(*bufferp), GFP_KERNEL);
+ if (!bufferp)
+ return ERR_PTR(-ENOMEM);
+
+ buffer = iio_dmaengine_buffer_alloc(dev, channel);
+ if (IS_ERR(buffer)) {
+ devres_free(bufferp);
+ return buffer;
+ }
+
+ *bufferp = buffer;
+ devres_add(dev, bufferp);
+
+ return buffer;
+}
+EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_alloc);
+
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
index 95165697d8ae..f2d27788f666 100644
--- a/drivers/iio/buffer/industrialio-hw-consumer.c
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -142,17 +142,6 @@ static void devm_iio_hw_consumer_release(struct device *dev, void *res)
iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
}
-static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
-{
- struct iio_hw_consumer **r = res;
-
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
- return *r == data;
-}
-
/**
* devm_iio_hw_consumer_alloc - Resource-managed iio_hw_consumer_alloc()
* @dev: Pointer to consumer device.
@@ -160,9 +149,6 @@ static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
* Managed iio_hw_consumer_alloc. iio_hw_consumer allocated with this function
* is automatically freed on driver detach.
*
- * If an iio_hw_consumer allocated with this function needs to be freed
- * separately, devm_iio_hw_consumer_free() must be used.
- *
* returns pointer to allocated iio_hw_consumer on success, NULL on failure.
*/
struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
@@ -187,23 +173,6 @@ struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc);
/**
- * devm_iio_hw_consumer_free - Resource-managed iio_hw_consumer_free()
- * @dev: Pointer to consumer device.
- * @hwc: iio_hw_consumer to free.
- *
- * Free iio_hw_consumer allocated with devm_iio_hw_consumer_alloc().
- */
-void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_hw_consumer_release,
- devm_iio_hw_consumer_match, hwc);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_free);
-
-/**
* iio_hw_consumer_enable() - Enable IIO hardware consumer
* @hwc: iio_hw_consumer to enable.
*
diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c
index cb322b2f09cd..e8046c1ecd6b 100644
--- a/drivers/iio/buffer/industrialio-triggered-buffer.c
+++ b/drivers/iio/buffer/industrialio-triggered-buffer.c
@@ -126,17 +126,6 @@ int devm_iio_triggered_buffer_setup(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_setup);
-void devm_iio_triggered_buffer_cleanup(struct device *dev,
- struct iio_dev *indio_dev)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_triggered_buffer_clean,
- devm_iio_device_match, indio_dev);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_triggered_buffer_cleanup);
-
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("IIO helper functions for setting up triggered buffers");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index 3150f8ab984b..1359abed3b31 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -179,16 +179,6 @@ static void devm_iio_kfifo_release(struct device *dev, void *res)
iio_kfifo_free(*(struct iio_buffer **)res);
}
-static int devm_iio_kfifo_match(struct device *dev, void *res, void *data)
-{
- struct iio_buffer **r = res;
-
- if (WARN_ON(!r || !*r))
- return 0;
-
- return *r == data;
-}
-
/**
* devm_iio_fifo_allocate - Resource-managed iio_kfifo_allocate()
* @dev: Device to allocate kfifo buffer for
@@ -216,16 +206,4 @@ struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev)
}
EXPORT_SYMBOL(devm_iio_kfifo_allocate);
-/**
- * devm_iio_fifo_free - Resource-managed iio_kfifo_free()
- * @dev: Device the buffer belongs to
- * @r: The buffer associated with the device
- */
-void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r)
-{
- WARN_ON(devres_release(dev, devm_iio_kfifo_release,
- devm_iio_kfifo_match, r));
-}
-EXPORT_SYMBOL(devm_iio_kfifo_free);
-
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index a7e65a59bf42..7f21afd73b1c 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -22,6 +22,17 @@ config ATLAS_PH_SENSOR
To compile this driver as module, choose M here: the
module will be called atlas-ph-sensor.
+config ATLAS_EZO_SENSOR
+ tristate "Atlas Scientific EZO sensors"
+ depends on I2C
+ help
+ Say Y here to build I2C interface support for the following
+ Atlas Scientific EZO sensors
+ * CO2 EZO Sensor
+
+ To compile this driver as module, choose M here: the
+ module will be called atlas-ezo-sensor.
+
config BME680
tristate "Bosch Sensortec BME680 sensor driver"
depends on (I2C || SPI)
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index 33d3a595dda9..aba4167db745 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-sensor.o
+obj-$(CONFIG_ATLAS_EZO_SENSOR) += atlas-ezo-sensor.o
obj-$(CONFIG_BME680) += bme680_core.o
obj-$(CONFIG_BME680_I2C) += bme680_i2c.o
obj-$(CONFIG_BME680_SPI) += bme680_spi.o
diff --git a/drivers/iio/chemical/atlas-ezo-sensor.c b/drivers/iio/chemical/atlas-ezo-sensor.c
new file mode 100644
index 000000000000..f5a6d8ec6d4d
--- /dev/null
+++ b/drivers/iio/chemical/atlas-ezo-sensor.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * atlas-ezo-sensor.c - Support for Atlas Scientific EZO sensors
+ *
+ * Copyright (C) 2020 Konsulko Group
+ * Author: Matt Ranostay <matt.ranostay@konsulko.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/iio/iio.h>
+
+#define ATLAS_EZO_DRV_NAME "atlas-ezo-sensor"
+#define ATLAS_CO2_INT_TIME_IN_MS 950
+
+enum {
+ ATLAS_CO2_EZO,
+};
+
+struct atlas_ezo_device {
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ int delay;
+};
+
+struct atlas_ezo_data {
+ struct i2c_client *client;
+ struct atlas_ezo_device *chip;
+
+ /* lock to avoid multiple concurrent read calls */
+ struct mutex lock;
+
+ u8 buffer[8];
+};
+
+static const struct iio_chan_spec atlas_co2_ezo_channels[] = {
+ {
+ .type = IIO_CONCENTRATION,
+ .modified = 1,
+ .channel2 = IIO_MOD_CO2,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+};
+
+static struct atlas_ezo_device atlas_ezo_devices[] = {
+ [ATLAS_CO2_EZO] = {
+ .channels = atlas_co2_ezo_channels,
+ .num_channels = 1,
+ .delay = ATLAS_CO2_INT_TIME_IN_MS,
+ },
+};
+
+static int atlas_ezo_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct atlas_ezo_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ int ret = 0;
+
+ if (chan->type != IIO_CONCENTRATION)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ long tmp;
+
+ mutex_lock(&data->lock);
+
+ tmp = i2c_smbus_write_byte(client, 'R');
+
+ if (tmp < 0) {
+ mutex_unlock(&data->lock);
+ return tmp;
+ }
+
+ msleep(data->chip->delay);
+
+ tmp = i2c_master_recv(client, data->buffer, sizeof(data->buffer));
+
+ if (tmp < 0 || data->buffer[0] != 1) {
+ mutex_unlock(&data->lock);
+ return -EBUSY;
+ }
+
+ ret = kstrtol(data->buffer + 1, 10, &tmp);
+
+ *val = tmp;
+
+ mutex_unlock(&data->lock);
+
+ return ret ? ret : IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 100; /* 0.0001 */
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return ret;
+}
+
+static const struct iio_info atlas_info = {
+ .read_raw = atlas_ezo_read_raw,
+};
+
+static const struct i2c_device_id atlas_ezo_id[] = {
+ { "atlas-co2-ezo", ATLAS_CO2_EZO },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, atlas_ezo_id);
+
+static const struct of_device_id atlas_ezo_dt_ids[] = {
+ { .compatible = "atlas,co2-ezo", .data = (void *)ATLAS_CO2_EZO, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, atlas_ezo_dt_ids);
+
+static int atlas_ezo_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct atlas_ezo_data *data;
+ struct atlas_ezo_device *chip;
+ const struct of_device_id *of_id;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ of_id = of_match_device(atlas_ezo_dt_ids, &client->dev);
+ if (!of_id)
+ chip = &atlas_ezo_devices[id->driver_data];
+ else
+ chip = &atlas_ezo_devices[(unsigned long)of_id->data];
+
+ indio_dev->info = &atlas_info;
+ indio_dev->name = ATLAS_EZO_DRV_NAME;
+ indio_dev->channels = chip->channels;
+ indio_dev->num_channels = chip->num_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &client->dev;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->chip = chip;
+ mutex_init(&data->lock);
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+};
+
+static struct i2c_driver atlas_ezo_driver = {
+ .driver = {
+ .name = ATLAS_EZO_DRV_NAME,
+ .of_match_table = atlas_ezo_dt_ids,
+ },
+ .probe = atlas_ezo_probe,
+ .id_table = atlas_ezo_id,
+};
+module_i2c_driver(atlas_ezo_driver);
+
+MODULE_AUTHOR("Matt Ranostay <matt.ranostay@konsulko.com>");
+MODULE_DESCRIPTION("Atlas Scientific EZO sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 7b199ce16ecf..78a27e36bf32 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -53,6 +53,8 @@
#define ATLAS_REG_DO_CALIB_STATUS_PRESSURE BIT(0)
#define ATLAS_REG_DO_CALIB_STATUS_DO BIT(1)
+#define ATLAS_REG_RTD_DATA 0x0e
+
#define ATLAS_REG_PH_TEMP_DATA 0x0e
#define ATLAS_REG_PH_DATA 0x16
@@ -72,12 +74,14 @@
#define ATLAS_EC_INT_TIME_IN_MS 650
#define ATLAS_ORP_INT_TIME_IN_MS 450
#define ATLAS_DO_INT_TIME_IN_MS 450
+#define ATLAS_RTD_INT_TIME_IN_MS 450
enum {
ATLAS_PH_SM,
ATLAS_EC_SM,
ATLAS_ORP_SM,
ATLAS_DO_SM,
+ ATLAS_RTD_SM,
};
struct atlas_data {
@@ -218,6 +222,22 @@ static const struct iio_chan_spec atlas_do_channels[] = {
},
};
+static const struct iio_chan_spec atlas_rtd_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .address = ATLAS_REG_RTD_DATA,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
static int atlas_check_ph_calibration(struct atlas_data *data)
{
struct device *dev = &data->client->dev;
@@ -362,6 +382,12 @@ static struct atlas_device atlas_devices[] = {
.calibration = &atlas_check_do_calibration,
.delay = ATLAS_DO_INT_TIME_IN_MS,
},
+ [ATLAS_RTD_SM] = {
+ .channels = atlas_rtd_channels,
+ .num_channels = 2,
+ .data_reg = ATLAS_REG_RTD_DATA,
+ .delay = ATLAS_RTD_INT_TIME_IN_MS,
+ },
};
static int atlas_set_powermode(struct atlas_data *data, int on)
@@ -438,8 +464,7 @@ static irqreturn_t atlas_trigger_handler(int irq, void *private)
int ret;
ret = regmap_bulk_read(data->regmap, data->chip->data_reg,
- (u8 *) &data->buffer,
- sizeof(__be32) * channels);
+ &data->buffer, sizeof(__be32) * channels);
if (!ret)
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
@@ -475,7 +500,7 @@ static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
if (suspended)
msleep(data->chip->delay);
- ret = regmap_bulk_read(data->regmap, reg, (u8 *) val, sizeof(*val));
+ ret = regmap_bulk_read(data->regmap, reg, val, sizeof(*val));
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -490,6 +515,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
struct atlas_data *data = iio_priv(indio_dev);
switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
case IIO_CHAN_INFO_RAW: {
int ret;
__be32 reg;
@@ -497,7 +523,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_TEMP:
ret = regmap_bulk_read(data->regmap, chan->address,
- (u8 *) &reg, sizeof(reg));
+ &reg, sizeof(reg));
break;
case IIO_PH:
case IIO_CONCENTRATION:
@@ -578,6 +604,7 @@ static const struct i2c_device_id atlas_id[] = {
{ "atlas-ec-sm", ATLAS_EC_SM},
{ "atlas-orp-sm", ATLAS_ORP_SM},
{ "atlas-do-sm", ATLAS_DO_SM},
+ { "atlas-rtd-sm", ATLAS_RTD_SM},
{}
};
MODULE_DEVICE_TABLE(i2c, atlas_id);
@@ -587,6 +614,7 @@ static const struct of_device_id atlas_dt_ids[] = {
{ .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, },
{ .compatible = "atlas,orp-sm", .data = (void *)ATLAS_ORP_SM, },
{ .compatible = "atlas,do-sm", .data = (void *)ATLAS_DO_SM, },
+ { .compatible = "atlas,rtd-sm", .data = (void *)ATLAS_RTD_SM, },
{ }
};
MODULE_DEVICE_TABLE(of, atlas_dt_ids);
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index ccde4c65ff93..13773e01699b 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -114,14 +114,16 @@ static int bme680_read_calib(struct bme680_data *data,
__le16 buf;
/* Temperature related coefficients */
- ret = regmap_bulk_read(data->regmap, BME680_T1_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_T1_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_T1_LSB_REG\n");
return ret;
}
calib->par_t1 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_T2_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_T2_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_T2_LSB_REG\n");
return ret;
@@ -136,14 +138,16 @@ static int bme680_read_calib(struct bme680_data *data,
calib->par_t3 = tmp;
/* Pressure related coefficients */
- ret = regmap_bulk_read(data->regmap, BME680_P1_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P1_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P1_LSB_REG\n");
return ret;
}
calib->par_p1 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_P2_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P2_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P2_LSB_REG\n");
return ret;
@@ -157,14 +161,16 @@ static int bme680_read_calib(struct bme680_data *data,
}
calib->par_p3 = tmp;
- ret = regmap_bulk_read(data->regmap, BME680_P4_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P4_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P4_LSB_REG\n");
return ret;
}
calib->par_p4 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_P5_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P5_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P5_LSB_REG\n");
return ret;
@@ -185,14 +191,16 @@ static int bme680_read_calib(struct bme680_data *data,
}
calib->par_p7 = tmp;
- ret = regmap_bulk_read(data->regmap, BME680_P8_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P8_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P8_LSB_REG\n");
return ret;
}
calib->par_p8 = le16_to_cpu(buf);
- ret = regmap_bulk_read(data->regmap, BME680_P9_LSB_REG, (u8 *) &buf, 2);
+ ret = regmap_bulk_read(data->regmap, BME680_P9_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_P9_LSB_REG\n");
return ret;
@@ -276,8 +284,8 @@ static int bme680_read_calib(struct bme680_data *data,
}
calib->par_gh1 = tmp;
- ret = regmap_bulk_read(data->regmap, BME680_GH2_LSB_REG, (u8 *) &buf,
- 2);
+ ret = regmap_bulk_read(data->regmap, BME680_GH2_LSB_REG,
+ &buf, sizeof(buf));
if (ret < 0) {
dev_err(dev, "failed to read BME680_GH2_LSB_REG\n");
return ret;
@@ -615,7 +623,7 @@ static int bme680_read_temp(struct bme680_data *data, int *val)
return ret;
ret = regmap_bulk_read(data->regmap, BME680_REG_TEMP_MSB,
- (u8 *) &tmp, 3);
+ &tmp, 3);
if (ret < 0) {
dev_err(dev, "failed to read temperature\n");
return ret;
@@ -656,7 +664,7 @@ static int bme680_read_press(struct bme680_data *data,
return ret;
ret = regmap_bulk_read(data->regmap, BME680_REG_PRESS_MSB,
- (u8 *) &tmp, 3);
+ &tmp, 3);
if (ret < 0) {
dev_err(dev, "failed to read pressure\n");
return ret;
@@ -689,7 +697,7 @@ static int bme680_read_humid(struct bme680_data *data,
return ret;
ret = regmap_bulk_read(data->regmap, BM6880_REG_HUMIDITY_MSB,
- (u8 *) &tmp, 2);
+ &tmp, sizeof(tmp));
if (ret < 0) {
dev_err(dev, "failed to read humidity\n");
return ret;
@@ -754,7 +762,7 @@ static int bme680_read_gas(struct bme680_data *data,
}
ret = regmap_bulk_read(data->regmap, BME680_REG_GAS_MSB,
- (u8 *) &tmp, 2);
+ &tmp, sizeof(tmp));
if (ret < 0) {
dev_err(dev, "failed to read gas resistance\n");
return ret;
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 2ebdfc35bcda..3ecd633f9ed3 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -16,6 +16,7 @@
*/
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -36,6 +37,7 @@
#define CCS811_ERR 0xE0
/* Used to transition from boot to application mode */
#define CCS811_APP_START 0xF4
+#define CCS811_SW_RESET 0xFF
/* Status register flags */
#define CCS811_STATUS_ERROR BIT(0)
@@ -74,6 +76,7 @@ struct ccs811_data {
struct mutex lock; /* Protect readings */
struct ccs811_reading buffer;
struct iio_trigger *drdy_trig;
+ struct gpio_desc *wakeup_gpio;
bool drdy_trig_on;
};
@@ -166,10 +169,25 @@ static int ccs811_setup(struct i2c_client *client)
CCS811_MODE_IAQ_1SEC);
}
+static void ccs811_set_wakeup(struct ccs811_data *data, bool enable)
+{
+ if (!data->wakeup_gpio)
+ return;
+
+ gpiod_set_value(data->wakeup_gpio, enable);
+
+ if (enable)
+ usleep_range(50, 60);
+ else
+ usleep_range(20, 30);
+}
+
static int ccs811_get_measurement(struct ccs811_data *data)
{
int ret, tries = 11;
+ ccs811_set_wakeup(data, true);
+
/* Maximum waiting time: 1s, as measurements are made every second */
while (tries-- > 0) {
ret = i2c_smbus_read_byte_data(data->client, CCS811_STATUS);
@@ -183,9 +201,12 @@ static int ccs811_get_measurement(struct ccs811_data *data)
if (!(ret & CCS811_STATUS_DATA_READY))
return -EIO;
- return i2c_smbus_read_i2c_block_data(data->client,
+ ret = i2c_smbus_read_i2c_block_data(data->client,
CCS811_ALG_RESULT_DATA, 8,
(char *)&data->buffer);
+ ccs811_set_wakeup(data, false);
+
+ return ret;
}
static int ccs811_read_raw(struct iio_dev *indio_dev,
@@ -336,6 +357,45 @@ static irqreturn_t ccs811_data_rdy_trigger_poll(int irq, void *private)
return IRQ_HANDLED;
}
+static int ccs811_reset(struct i2c_client *client)
+{
+ struct gpio_desc *reset_gpio;
+ int ret;
+
+ reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio))
+ return PTR_ERR(reset_gpio);
+
+ /* Try to reset using nRESET pin if available else do SW reset */
+ if (reset_gpio) {
+ gpiod_set_value(reset_gpio, 1);
+ usleep_range(20, 30);
+ gpiod_set_value(reset_gpio, 0);
+ } else {
+ /*
+ * As per the datasheet, this sequence of values needs to be
+ * written to the SW_RESET register for triggering the soft
+ * reset in the device and placing it in boot mode.
+ */
+ static const u8 reset_seq[] = {
+ 0x11, 0xE5, 0x72, 0x8A,
+ };
+
+ ret = i2c_smbus_write_i2c_block_data(client, CCS811_SW_RESET,
+ sizeof(reset_seq), reset_seq);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to reset sensor\n");
+ return ret;
+ }
+ }
+
+ /* tSTART delay required after reset */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
static int ccs811_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -348,36 +408,59 @@ static int ccs811_probe(struct i2c_client *client,
| I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -EOPNOTSUPP;
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ data->wakeup_gpio = devm_gpiod_get_optional(&client->dev, "wakeup",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(data->wakeup_gpio))
+ return PTR_ERR(data->wakeup_gpio);
+
+ ccs811_set_wakeup(data, true);
+
+ ret = ccs811_reset(client);
+ if (ret) {
+ ccs811_set_wakeup(data, false);
+ return ret;
+ }
+
/* Check hardware id (should be 0x81 for this family of devices) */
ret = i2c_smbus_read_byte_data(client, CCS811_HW_ID);
- if (ret < 0)
+ if (ret < 0) {
+ ccs811_set_wakeup(data, false);
return ret;
+ }
if (ret != CCS811_HW_ID_VALUE) {
dev_err(&client->dev, "hardware id doesn't match CCS81x\n");
+ ccs811_set_wakeup(data, false);
return -ENODEV;
}
ret = i2c_smbus_read_byte_data(client, CCS811_HW_VERSION);
- if (ret < 0)
+ if (ret < 0) {
+ ccs811_set_wakeup(data, false);
return ret;
+ }
if ((ret & CCS811_HW_VERSION_MASK) != CCS811_HW_VERSION_VALUE) {
dev_err(&client->dev, "no CCS811 sensor\n");
+ ccs811_set_wakeup(data, false);
return -ENODEV;
}
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev)
- return -ENOMEM;
-
ret = ccs811_setup(client);
- if (ret < 0)
+ if (ret < 0) {
+ ccs811_set_wakeup(data, false);
return ret;
+ }
- data = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
- data->client = client;
+ ccs811_set_wakeup(data, false);
mutex_init(&data->lock);
@@ -466,9 +549,16 @@ static const struct i2c_device_id ccs811_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ccs811_id);
+static const struct of_device_id ccs811_dt_ids[] = {
+ { .compatible = "ams,ccs811" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ccs811_dt_ids);
+
static struct i2c_driver ccs811_driver = {
.driver = {
.name = "ccs811",
+ .of_match_table = ccs811_dt_ids,
},
.probe = ccs811_probe,
.remove = ccs811_remove,
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index 23c9ab252470..07bb90d72434 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -73,6 +73,11 @@ struct pms7003_state {
struct pms7003_frame frame;
struct completion frame_ready;
struct mutex lock; /* must be held whenever state gets touched */
+ /* Used to construct scan to push to the IIO buffer */
+ struct {
+ u16 data[3]; /* PM1, PM2P5, PM10 */
+ s64 ts;
+ } scan;
};
static int pms7003_do_cmd(struct pms7003_state *state, enum pms7003_cmd cmd)
@@ -104,7 +109,6 @@ static irqreturn_t pms7003_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct pms7003_state *state = iio_priv(indio_dev);
struct pms7003_frame *frame = &state->frame;
- u16 data[3 + 1 + 4]; /* PM1, PM2P5, PM10, padding, timestamp */
int ret;
mutex_lock(&state->lock);
@@ -114,12 +118,15 @@ static irqreturn_t pms7003_trigger_handler(int irq, void *p)
goto err;
}
- data[PM1] = pms7003_get_pm(frame->data + PMS7003_PM1_OFFSET);
- data[PM2P5] = pms7003_get_pm(frame->data + PMS7003_PM2P5_OFFSET);
- data[PM10] = pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET);
+ state->scan.data[PM1] =
+ pms7003_get_pm(frame->data + PMS7003_PM1_OFFSET);
+ state->scan.data[PM2P5] =
+ pms7003_get_pm(frame->data + PMS7003_PM2P5_OFFSET);
+ state->scan.data[PM10] =
+ pms7003_get_pm(frame->data + PMS7003_PM10_OFFSET);
mutex_unlock(&state->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &state->scan,
iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c
index acb9f8ecbb3d..a88c1fb875a0 100644
--- a/drivers/iio/chemical/sps30.c
+++ b/drivers/iio/chemical/sps30.c
@@ -230,15 +230,18 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct sps30_state *state = iio_priv(indio_dev);
int ret;
- s32 data[4 + 2]; /* PM1, PM2P5, PM4, PM10, timestamp */
+ struct {
+ s32 data[4]; /* PM1, PM2P5, PM4, PM10 */
+ s64 ts;
+ } scan;
mutex_lock(&state->lock);
- ret = sps30_do_meas(state, data, 4);
+ ret = sps30_do_meas(state, scan.data, ARRAY_SIZE(scan.data));
mutex_unlock(&state->lock);
if (ret)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 906d87780419..ff375790b7e8 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -13,6 +13,8 @@
#include <linux/hid-sensor-hub.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/sysfs.h>
#include "hid-sensor-trigger.h"
@@ -222,7 +224,8 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
return hid_sensor_power_state(iio_trigger_get_drvdata(trig), state);
}
-void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
+void hid_sensor_remove_trigger(struct iio_dev *indio_dev,
+ struct hid_sensor_common *attrb)
{
if (atomic_read(&attrb->runtime_pm_enable))
pm_runtime_disable(&attrb->pdev->dev);
@@ -233,6 +236,7 @@ void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
cancel_work_sync(&attrb->work);
iio_trigger_unregister(attrb->trigger);
iio_trigger_free(attrb->trigger);
+ iio_triggered_buffer_cleanup(indio_dev);
}
EXPORT_SYMBOL(hid_sensor_remove_trigger);
@@ -246,11 +250,18 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
int ret;
struct iio_trigger *trig;
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ NULL, NULL);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Triggered Buffer Setup Failed\n");
+ return ret;
+ }
+
trig = iio_trigger_alloc("%s-dev%d", name, indio_dev->id);
if (trig == NULL) {
dev_err(&indio_dev->dev, "Trigger Allocate Failed\n");
ret = -ENOMEM;
- goto error_ret;
+ goto error_triggered_buffer_cleanup;
}
trig->dev.parent = indio_dev->dev.parent;
@@ -284,7 +295,8 @@ error_unreg_trigger:
iio_trigger_unregister(trig);
error_free_trig:
iio_trigger_free(trig);
-error_ret:
+error_triggered_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
return ret;
}
EXPORT_SYMBOL(hid_sensor_setup_trigger);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
index f47b940ff170..bb45cc89e551 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.h
@@ -13,7 +13,8 @@ extern const struct dev_pm_ops hid_sensor_pm_ops;
int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
struct hid_sensor_common *attrb);
-void hid_sensor_remove_trigger(struct hid_sensor_common *attrb);
+void hid_sensor_remove_trigger(struct iio_dev *indio_dev,
+ struct hid_sensor_common *attrb);
int hid_sensor_power_state(struct hid_sensor_common *st, bool state);
#endif
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 13bdfbbf5f71..7a69c1be7393 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -20,11 +20,6 @@
#include "st_sensors_core.h"
-static inline u32 st_sensors_get_unaligned_le24(const u8 *p)
-{
- return (s32)((p[0] | p[1] << 8 | p[2] << 16) << 8) >> 8;
-}
-
int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
u8 reg_addr, u8 mask, u8 data)
{
@@ -150,8 +145,7 @@ static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
if (err < 0)
goto st_accel_set_fullscale_error;
- sdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &sdata->sensor_settings->fs.fs_avl[i];
+ sdata->current_fullscale = &sdata->sensor_settings->fs.fs_avl[i];
return err;
st_accel_set_fullscale_error:
@@ -278,8 +272,7 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
!sdata->sensor_settings->drdy_irq.int2.addr) {
if (pdata->drdy_int_pin)
dev_info(&indio_dev->dev,
- "DRDY on pin INT%d specified, but sensor "
- "does not support interrupts\n",
+ "DRDY on pin INT%d specified, but sensor does not support interrupts\n",
pdata->drdy_int_pin);
return 0;
}
@@ -545,7 +538,7 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
else if (byte_for_channel == 2)
*data = (s16)get_unaligned_le16(outdata);
else if (byte_for_channel == 3)
- *data = (s32)st_sensors_get_unaligned_le24(outdata);
+ *data = (s32)sign_extend32(get_unaligned_le24(outdata), 23);
st_sensors_free_memory:
kfree(outdata);
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index 286830fb5d35..b400560bac93 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -49,8 +49,8 @@ int st_sensors_i2c_configure(struct iio_dev *indio_dev,
sdata->regmap = devm_regmap_init_i2c(client, config);
if (IS_ERR(sdata->regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap (%d)\n",
- (int)PTR_ERR(sdata->regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap (%ld)\n",
+ PTR_ERR(sdata->regmap));
return PTR_ERR(sdata->regmap);
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_spi.c b/drivers/iio/common/st_sensors/st_sensors_spi.c
index 1275fb0eda31..ee70515bb89f 100644
--- a/drivers/iio/common/st_sensors/st_sensors_spi.c
+++ b/drivers/iio/common/st_sensors/st_sensors_spi.c
@@ -44,7 +44,7 @@ static bool st_sensors_is_spi_3_wire(struct spi_device *spi)
if (device_property_read_bool(dev, "spi-3wire"))
return true;
- pdata = (struct st_sensors_platform_data *)dev->platform_data;
+ pdata = dev_get_platdata(dev);
if (pdata && pdata->spi_3wire)
return true;
@@ -101,8 +101,8 @@ int st_sensors_spi_configure(struct iio_dev *indio_dev,
sdata->regmap = devm_regmap_init_spi(spi, config);
if (IS_ERR(sdata->regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap (%d)\n",
- (int)PTR_ERR(sdata->regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap (%ld)\n",
+ PTR_ERR(sdata->regmap));
return PTR_ERR(sdata->regmap);
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index e817537cdfb5..0507283bd4c1 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -44,8 +44,7 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
- dev_err(sdata->dev,
- "error checking samples available\n");
+ dev_err(sdata->dev, "error checking samples available\n");
return ret;
}
@@ -148,9 +147,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
case IRQF_TRIGGER_LOW:
if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
dev_err(&indio_dev->dev,
- "falling/low specified for IRQ "
- "but hardware supports only rising/high: "
- "will request rising/high\n");
+ "falling/low specified for IRQ but hardware supports only rising/high: will request rising/high\n");
if (irq_trig == IRQF_TRIGGER_FALLING)
irq_trig = IRQF_TRIGGER_RISING;
if (irq_trig == IRQF_TRIGGER_LOW)
@@ -163,8 +160,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
if (err < 0)
goto iio_trigger_free;
dev_info(&indio_dev->dev,
- "interrupts on the falling edge or "
- "active low level\n");
+ "interrupts on the falling edge or active low level\n");
}
break;
case IRQF_TRIGGER_RISING:
@@ -178,8 +174,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
default:
/* This is the most preferred mode, if possible */
dev_err(&indio_dev->dev,
- "unsupported IRQ trigger specified (%lx), enforce "
- "rising edge\n", irq_trig);
+ "unsupported IRQ trigger specified (%lx), enforce rising edge\n", irq_trig);
irq_trig = IRQF_TRIGGER_RISING;
}
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 93744011b63f..3728f6325501 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -279,12 +279,12 @@ config LTC1660
module will be called ltc1660.
config LTC2632
- tristate "Linear Technology LTC2632-12/10/8 and LTC2636-12/10/8 DAC spi driver"
+ tristate "Linear Technology LTC2632-12/10/8 and similar DAC spi driver"
depends on SPI
help
Say yes here to build support for Linear Technology
- LTC2632-12, LTC2632-10, LTC2632-8, LTC2636-12, LTC2636-10 and
- LTC2636-8 converters (DAC).
+ LTC2632, LTC2634 and LTC2636 DAC resolution 12/10/8 bit
+ low 0-2.5V and high 0-4.096V range converters.
To compile this driver as a module, choose M here: the
module will be called ltc2632.
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 2ac428b957e3..3e0c9e84e8da 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -67,6 +67,7 @@ struct ad5360_chip_info {
* @chip_info: chip model specific constants, available modes etc
* @vref_reg: vref supply regulators
* @ctrl: control register cache
+ * @lock lock to protect the data buffer during SPI ops
* @data: spi transfer buffers
*/
@@ -75,6 +76,7 @@ struct ad5360_state {
const struct ad5360_chip_info *chip_info;
struct regulator_bulk_data vref_reg[3];
unsigned int ctrl;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -205,10 +207,11 @@ static int ad5360_write(struct iio_dev *indio_dev, unsigned int cmd,
unsigned int addr, unsigned int val, unsigned int shift)
{
int ret;
+ struct ad5360_state *st = iio_priv(indio_dev);
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5360_write_unlocked(indio_dev, cmd, addr, val, shift);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -229,7 +232,7 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32(AD5360_CMD(AD5360_CMD_SPECIAL_FUNCTION) |
AD5360_ADDR(AD5360_REG_SF_READBACK) |
@@ -240,7 +243,7 @@ static int ad5360_read(struct iio_dev *indio_dev, unsigned int type,
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -261,7 +264,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
struct ad5360_state *st = iio_priv(indio_dev);
unsigned int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->ctrl |= set;
st->ctrl &= ~clr;
@@ -269,7 +272,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
ret = ad5360_write_unlocked(indio_dev, AD5360_CMD_SPECIAL_FUNCTION,
AD5360_REG_SF_CTRL, st->ctrl, 0);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -479,6 +482,8 @@ static int ad5360_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
ret = ad5360_alloc_channels(indio_dev);
if (ret) {
dev_err(&spi->dev, "Failed to allocate channel spec: %d\n", ret);
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 2ebe08326048..b37e5675f716 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -51,6 +51,7 @@ struct ad5380_chip_info {
* @vref_reg: vref supply regulator
* @vref: actual reference voltage used in uA
* @pwr_down: whether the chip is currently in power down mode
+ * @lock lock to protect the data buffer during regmap ops
*/
struct ad5380_state {
@@ -59,6 +60,7 @@ struct ad5380_state {
struct regulator *vref_reg;
int vref;
bool pwr_down;
+ struct mutex lock;
};
enum ad5380_type {
@@ -98,7 +100,7 @@ static ssize_t ad5380_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (pwr_down)
ret = regmap_write(st->regmap, AD5380_REG_SF_PWR_DOWN, 0);
@@ -107,7 +109,7 @@ static ssize_t ad5380_write_dac_powerdown(struct iio_dev *indio_dev,
st->pwr_down = pwr_down;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -390,6 +392,8 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
ret = ad5380_alloc_channels(indio_dev);
if (ret) {
dev_err(dev, "Failed to allocate channel spec: %d\n", ret);
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 63063e85cd0a..fec27764cea8 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -62,12 +62,14 @@
* @current_range: current range which the device is configured for
* @data: spi transfer buffers
* @fault_mask: software masking of events
+ * @lock lock to protect the data buffer during SPI ops
*/
struct ad5421_state {
struct spi_device *spi;
unsigned int ctrl;
enum ad5421_current_range current_range;
unsigned int fault_mask;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -142,11 +144,12 @@ static int ad5421_write_unlocked(struct iio_dev *indio_dev,
static int ad5421_write(struct iio_dev *indio_dev, unsigned int reg,
unsigned int val)
{
+ struct ad5421_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5421_write_unlocked(indio_dev, reg, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -166,7 +169,7 @@ static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
@@ -174,7 +177,7 @@ static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -185,14 +188,14 @@ static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
struct ad5421_state *st = iio_priv(indio_dev);
unsigned int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->ctrl &= ~clr;
st->ctrl |= set;
ret = ad5421_write_unlocked(indio_dev, AD5421_REG_CTRL, st->ctrl);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -400,12 +403,12 @@ static int ad5421_write_event_config(struct iio_dev *indio_dev,
return -EINVAL;
}
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if (state)
st->fault_mask |= mask;
else
st->fault_mask &= ~mask;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return 0;
}
@@ -491,6 +494,8 @@ static int ad5421_probe(struct spi_device *spi)
indio_dev->channels = ad5421_channels;
indio_dev->num_channels = ARRAY_SIZE(ad5421_channels);
+ mutex_init(&st->lock);
+
st->ctrl = AD5421_CTRL_WATCHDOG_DISABLE |
AD5421_CTRL_AUTO_FAULT_READBACK;
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 61c670f7fc5f..8f8afc8999bc 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -21,6 +21,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
#define MODE_PWRDWN_1k 0x1
#define MODE_PWRDWN_100k 0x2
#define MODE_PWRDWN_TRISTATE 0x3
@@ -31,6 +33,7 @@
* @chip_info: chip model specific constants, available modes etc
* @reg: supply regulator
* @vref_mv: actual reference voltage used
+ * @lock lock to protect the data buffer during write ops
*/
struct ad5446_state {
@@ -41,6 +44,7 @@ struct ad5446_state {
unsigned cached_val;
unsigned pwr_down_mode;
unsigned pwr_down;
+ struct mutex lock;
};
/**
@@ -110,7 +114,7 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->pwr_down = powerdown;
if (st->pwr_down) {
@@ -121,7 +125,7 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
}
ret = st->chip_info->write(st, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret ? ret : len;
}
@@ -195,11 +199,11 @@ static int ad5446_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
val <<= chan->scan_type.shift;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->cached_val = val;
if (!st->pwr_down)
ret = st->chip_info->write(st, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
break;
default:
ret = -EINVAL;
@@ -254,6 +258,8 @@ static int ad5446_probe(struct device *dev, const char *name,
indio_dev->channels = &st->chip_info->channel;
indio_dev->num_channels = 1;
+ mutex_init(&st->lock);
+
st->pwr_down_mode = MODE_PWRDWN_1k;
if (st->chip_info->int_vref_mv)
@@ -302,9 +308,7 @@ static int ad5660_write(struct ad5446_state *st, unsigned val)
struct spi_device *spi = to_spi_device(st->dev);
uint8_t data[3];
- data[0] = (val >> 16) & 0xFF;
- data[1] = (val >> 8) & 0xFF;
- data[2] = val & 0xFF;
+ put_unaligned_be24(val, &data[0]);
return spi_write(spi, data, sizeof(data));
}
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index fed3ebaccac4..d739b10e5236 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -56,11 +56,13 @@ struct ad5449_chip_info {
* @has_sdo: whether the SDO line is connected
* @dac_cache: Cache for the DAC values
* @data: spi transfer buffers
+ * @lock lock to protect the data buffer during SPI ops
*/
struct ad5449 {
struct spi_device *spi;
const struct ad5449_chip_info *chip_info;
struct regulator_bulk_data vref_reg[AD5449_MAX_VREFS];
+ struct mutex lock;
bool has_sdo;
uint16_t dac_cache[AD5449_MAX_CHANNELS];
@@ -87,10 +89,10 @@ static int ad5449_write(struct iio_dev *indio_dev, unsigned int addr,
struct ad5449 *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0] = cpu_to_be16((addr << 12) | val);
ret = spi_write(st->spi, st->data, 2);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -112,7 +114,7 @@ static int ad5449_read(struct iio_dev *indio_dev, unsigned int addr,
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0] = cpu_to_be16(addr << 12);
st->data[1] = cpu_to_be16(AD5449_CMD_NOOP);
@@ -123,7 +125,7 @@ static int ad5449_read(struct iio_dev *indio_dev, unsigned int addr,
*val = be16_to_cpu(st->data[1]);
out_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -302,6 +304,8 @@ static int ad5449_spi_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
if (st->chip_info->has_ctrl) {
unsigned int ctrl = 0x00;
if (pdata) {
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index e2110113e884..410e90e5f75f 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -156,7 +156,6 @@ static void ad5592r_gpio_cleanup(struct ad5592r_state *st)
static int ad5592r_reset(struct ad5592r_state *st)
{
struct gpio_desc *gpio;
- struct iio_dev *iio_dev = iio_priv_to_dev(st);
gpio = devm_gpiod_get_optional(st->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
@@ -166,10 +165,10 @@ static int ad5592r_reset(struct ad5592r_state *st)
udelay(1);
gpiod_set_value(gpio, 1);
} else {
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
/* Writing this magic value resets the device */
st->ops->reg_write(st, AD5592R_REG_RESET, 0xdac);
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
}
udelay(250);
@@ -197,7 +196,6 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
const struct ad5592r_rw_ops *ops = st->ops;
int ret;
unsigned i;
- struct iio_dev *iio_dev = iio_priv_to_dev(st);
u8 pulldown = 0, tristate = 0, dac = 0, adc = 0;
u16 read_back;
@@ -247,7 +245,7 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
}
}
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
/* Pull down unused pins to GND */
ret = ops->reg_write(st, AD5592R_REG_PULLDOWN, pulldown);
@@ -285,7 +283,7 @@ static int ad5592r_set_channel_modes(struct ad5592r_state *st)
ret = -EIO;
err_unlock:
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -314,11 +312,11 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
if (!chan->output)
return -EINVAL;
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->ops->write_dac(st, chan->channel, val);
if (!ret)
st->cached_dac[chan->channel] = val;
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_VOLTAGE) {
@@ -333,12 +331,12 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
else
return -EINVAL;
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->ops->reg_read(st, AD5592R_REG_CTRL,
&st->cached_gp_ctrl);
if (ret < 0) {
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -360,7 +358,7 @@ static int ad5592r_write_raw(struct iio_dev *iio_dev,
ret = st->ops->reg_write(st, AD5592R_REG_CTRL,
st->cached_gp_ctrl);
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -382,7 +380,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
if (!chan->output) {
ret = st->ops->read_adc(st, chan->channel, &read_val);
@@ -419,7 +417,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
} else {
int mult;
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
if (chan->output)
mult = !!(st->cached_gp_ctrl &
@@ -437,7 +435,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_OFFSET:
ret = ad5592r_get_vref(st);
- mutex_lock(&iio_dev->mlock);
+ mutex_lock(&st->lock);
if (st->cached_gp_ctrl & AD5592R_REG_CTRL_ADC_RANGE)
*val = (-34365 * 25) / ret;
@@ -450,7 +448,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
}
unlock:
- mutex_unlock(&iio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -625,6 +623,8 @@ int ad5592r_probe(struct device *dev, const char *name,
iio_dev->info = &ad5592r_info;
iio_dev->modes = INDIO_DIRECT_MODE;
+ mutex_init(&st->lock);
+
ad5592r_init_scales(st, ad5592r_get_vref(st));
ret = ad5592r_reset(st);
diff --git a/drivers/iio/dac/ad5592r-base.h b/drivers/iio/dac/ad5592r-base.h
index 4774e4cd9c11..23dac2f1ff8a 100644
--- a/drivers/iio/dac/ad5592r-base.h
+++ b/drivers/iio/dac/ad5592r-base.h
@@ -52,6 +52,7 @@ struct ad5592r_state {
struct regulator *reg;
struct gpio_chip gpiochip;
struct mutex gpio_lock; /* Protect cached gpio_out, gpio_val, etc. */
+ struct mutex lock;
unsigned int num_channels;
const struct ad5592r_rw_ops *ops;
int scale_avail[2][2];
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 34ba059a77da..49308ad13c4b 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -98,7 +98,7 @@ static int ad5592r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
return 0;
}
-static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
+static int ad5592r_gpio_read(struct ad5592r_state *st, u8 *value)
{
int ret;
@@ -121,7 +121,7 @@ static const struct ad5592r_rw_ops ad5592r_rw_ops = {
.read_adc = ad5592r_read_adc,
.reg_write = ad5592r_reg_write,
.reg_read = ad5592r_reg_read,
- .gpio_read = ad5593r_gpio_read,
+ .gpio_read = ad5592r_gpio_read,
};
static int ad5592r_spi_probe(struct spi_device *spi)
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
index 44ea3b8117d0..1fbe9c019c7f 100644
--- a/drivers/iio/dac/ad5593r.c
+++ b/drivers/iio/dac/ad5593r.c
@@ -134,5 +134,5 @@ static struct i2c_driver ad5593r_driver = {
module_i2c_driver(ad5593r_driver);
MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
-MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
+MODULE_DESCRIPTION("Analog Devices AD5593R multi-channel converters");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index e6c022e1dc1c..2015a5df840c 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -18,6 +18,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
#include "ad5624r.h"
static int ad5624r_spi_write(struct spi_device *spi,
@@ -35,11 +37,9 @@ static int ad5624r_spi_write(struct spi_device *spi,
* for the AD5664R, AD5644R, and AD5624R, respectively.
*/
data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
- msg[0] = data >> 16;
- msg[1] = data >> 8;
- msg[2] = data;
+ put_unaligned_be24(data, &msg[0]);
- return spi_write(spi, msg, 3);
+ return spi_write(spi, msg, sizeof(msg));
}
static int ad5624r_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index e06b29c565b9..8dd67da0a7da 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -127,9 +127,9 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->read(st, chan->address);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
if (ret < 0)
return ret;
*val = (ret >> chan->scan_type.shift) &
@@ -157,12 +157,12 @@ static int ad5686_write_raw(struct iio_dev *indio_dev,
if (val > (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = st->write(st,
AD5686_CMD_WRITE_INPUT_N_UPDATE_N,
chan->address,
val << chan->scan_type.shift);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
break;
default:
ret = -EINVAL;
@@ -468,6 +468,8 @@ int ad5686_probe(struct device *dev,
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
+ mutex_init(&st->lock);
+
switch (st->chip_info->regmap_type) {
case AD5310_REGMAP:
cmd = AD5686_CMD_CONTROL_REG;
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index 70a779939ddb..52009b5eef88 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -117,6 +117,7 @@ struct ad5686_chip_info {
* @pwr_down_mask: power down mask
* @pwr_down_mode: current power down mode
* @use_internal_vref: set to true if the internal reference voltage is used
+ * @lock lock to protect the data buffer during regmap ops
* @data: spi transfer buffers
*/
@@ -130,6 +131,7 @@ struct ad5686_state {
ad5686_write_func write;
ad5686_read_func read;
bool use_internal_vref;
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 388ddd14bfd0..7723bd313fc6 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -82,6 +82,7 @@ struct ad5755_chip_info {
* @pwr_down: bitmask which contains hether a channel is powered down or not
* @ctrl: software shadow of the channel ctrl registers
* @channels: iio channel spec for the device
+ * @lock lock to protect the data buffer during SPI ops
* @data: spi transfer buffers
*/
struct ad5755_state {
@@ -90,6 +91,7 @@ struct ad5755_state {
unsigned int pwr_down;
unsigned int ctrl[AD5755_NUM_CHANNELS];
struct iio_chan_spec channels[AD5755_NUM_CHANNELS];
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -174,11 +176,12 @@ static int ad5755_write_ctrl_unlocked(struct iio_dev *indio_dev,
static int ad5755_write(struct iio_dev *indio_dev, unsigned int reg,
unsigned int val)
{
+ struct ad5755_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5755_write_unlocked(indio_dev, reg, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -186,11 +189,12 @@ static int ad5755_write(struct iio_dev *indio_dev, unsigned int reg,
static int ad5755_write_ctrl(struct iio_dev *indio_dev, unsigned int channel,
unsigned int reg, unsigned int val)
{
+ struct ad5755_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = ad5755_write_ctrl_unlocked(indio_dev, channel, reg, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -211,7 +215,7 @@ static int ad5755_read(struct iio_dev *indio_dev, unsigned int addr)
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32(AD5755_READ_FLAG | (addr << 16));
st->data[1].d32 = cpu_to_be32(AD5755_NOOP);
@@ -220,7 +224,7 @@ static int ad5755_read(struct iio_dev *indio_dev, unsigned int addr)
if (ret >= 0)
ret = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -246,7 +250,7 @@ static int ad5755_set_channel_pwr_down(struct iio_dev *indio_dev,
struct ad5755_state *st = iio_priv(indio_dev);
unsigned int mask = BIT(channel);
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
if ((bool)(st->pwr_down & mask) == pwr_down)
goto out_unlock;
@@ -266,7 +270,7 @@ static int ad5755_set_channel_pwr_down(struct iio_dev *indio_dev,
}
out_unlock:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return 0;
}
@@ -746,6 +750,8 @@ static int ad5755_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->num_channels = AD5755_NUM_CHANNELS;
+ mutex_init(&st->lock);
+
if (spi->dev.of_node)
pdata = ad5755_parse_dt(&spi->dev);
else
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
index 7468fbd11684..67c4fa75c6f1 100644
--- a/drivers/iio/dac/ad5761.c
+++ b/drivers/iio/dac/ad5761.c
@@ -57,11 +57,13 @@ enum ad5761_supported_device_ids {
* @use_intref: true when the internal voltage reference is used
* @vref: actual voltage reference in mVolts
* @range: output range mode used
+ * @lock lock to protect the data buffer during SPI ops
* @data: cache aligned spi buffer
*/
struct ad5761_state {
struct spi_device *spi;
struct regulator *vref_reg;
+ struct mutex lock;
bool use_intref;
int vref;
@@ -124,9 +126,9 @@ static int ad5761_spi_write(struct iio_dev *indio_dev, u8 addr, u16 val)
struct ad5761_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = _ad5761_spi_write(st, addr, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -163,9 +165,9 @@ static int ad5761_spi_read(struct iio_dev *indio_dev, u8 addr, u16 *val)
struct ad5761_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
ret = _ad5761_spi_read(st, addr, val);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -368,6 +370,8 @@ static int ad5761_probe(struct spi_device *spi)
if (pdata)
voltage_range = pdata->voltage_range;
+ mutex_init(&st->lock);
+
ret = ad5761_spi_set_range(st, voltage_range);
if (ret)
goto disable_regulator_err;
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index f7ab211604a1..5b0f0fe354f6 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -46,6 +46,7 @@ struct ad5764_chip_info {
* @spi: spi_device
* @chip_info: chip info
* @vref_reg: vref supply regulators
+ * @lock lock to protect the data buffer during SPI ops
* @data: spi transfer buffers
*/
@@ -53,6 +54,7 @@ struct ad5764_state {
struct spi_device *spi;
const struct ad5764_chip_info *chip_info;
struct regulator_bulk_data vref_reg[2];
+ struct mutex lock;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -126,11 +128,11 @@ static int ad5764_write(struct iio_dev *indio_dev, unsigned int reg,
struct ad5764_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32((reg << 16) | val);
ret = spi_write(st->spi, &st->data[0].d8[1], 3);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -151,7 +153,7 @@ static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
},
};
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
@@ -159,7 +161,7 @@ static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
if (ret >= 0)
*val = be32_to_cpu(st->data[1].d32) & 0xffff;
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return ret;
}
@@ -295,6 +297,8 @@ static int ad5764_probe(struct spi_device *spi)
indio_dev->num_channels = AD5764_NUM_CHANNELS;
indio_dev->channels = st->chip_info->channels;
+ mutex_init(&st->lock);
+
if (st->chip_info->int_vref == 0) {
st->vref_reg[0].supply = "vrefAB";
st->vref_reg[1].supply = "vrefCD";
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index 7adc91056aa1..f891311f05cf 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -12,6 +12,8 @@
#include <linux/iio/iio.h>
#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
#define LTC2632_CMD_WRITE_INPUT_N 0x0
#define LTC2632_CMD_UPDATE_DAC_N 0x1
#define LTC2632_CMD_WRITE_INPUT_N_UPDATE_ALL 0x2
@@ -24,6 +26,7 @@
/**
* struct ltc2632_chip_info - chip specific information
* @channels: channel spec for the DAC
+ * @num_channels: DAC channel count of the chip
* @vref_mv: internal reference voltage
*/
struct ltc2632_chip_info {
@@ -53,6 +56,12 @@ enum ltc2632_supported_device_ids {
ID_LTC2632H12,
ID_LTC2632H10,
ID_LTC2632H8,
+ ID_LTC2634L12,
+ ID_LTC2634L10,
+ ID_LTC2634L8,
+ ID_LTC2634H12,
+ ID_LTC2634H10,
+ ID_LTC2634H8,
ID_LTC2636L12,
ID_LTC2636L10,
ID_LTC2636L8,
@@ -75,9 +84,7 @@ static int ltc2632_spi_write(struct spi_device *spi,
* 10-, 8-bit input code followed by 4, 6, or 8 don't care bits.
*/
data = (cmd << 20) | (addr << 16) | (val << shift);
- msg[0] = data >> 16;
- msg[1] = data >> 8;
- msg[2] = data;
+ put_unaligned_be24(data, &msg[0]);
return spi_write(spi, msg, sizeof(msg));
}
@@ -235,6 +242,36 @@ static const struct ltc2632_chip_info ltc2632_chip_info_tbl[] = {
.num_channels = 2,
.vref_mv = 4096,
},
+ [ID_LTC2634L12] = {
+ .channels = ltc2632x12_channels,
+ .num_channels = 4,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2634L10] = {
+ .channels = ltc2632x10_channels,
+ .num_channels = 4,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2634L8] = {
+ .channels = ltc2632x8_channels,
+ .num_channels = 4,
+ .vref_mv = 2500,
+ },
+ [ID_LTC2634H12] = {
+ .channels = ltc2632x12_channels,
+ .num_channels = 4,
+ .vref_mv = 4096,
+ },
+ [ID_LTC2634H10] = {
+ .channels = ltc2632x10_channels,
+ .num_channels = 4,
+ .vref_mv = 4096,
+ },
+ [ID_LTC2634H8] = {
+ .channels = ltc2632x8_channels,
+ .num_channels = 4,
+ .vref_mv = 4096,
+ },
[ID_LTC2636L12] = {
.channels = ltc2632x12_channels,
.num_channels = 8,
@@ -356,6 +393,12 @@ static const struct spi_device_id ltc2632_id[] = {
{ "ltc2632-h12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H12] },
{ "ltc2632-h10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H10] },
{ "ltc2632-h8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2632H8] },
+ { "ltc2634-l12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634L12] },
+ { "ltc2634-l10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634L10] },
+ { "ltc2634-l8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634L8] },
+ { "ltc2634-h12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634H12] },
+ { "ltc2634-h10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634H10] },
+ { "ltc2634-h8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2634H8] },
{ "ltc2636-l12", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L12] },
{ "ltc2636-l10", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L10] },
{ "ltc2636-l8", (kernel_ulong_t)&ltc2632_chip_info_tbl[ID_LTC2636L8] },
@@ -386,6 +429,24 @@ static const struct of_device_id ltc2632_of_match[] = {
.compatible = "lltc,ltc2632-h8",
.data = &ltc2632_chip_info_tbl[ID_LTC2632H8]
}, {
+ .compatible = "lltc,ltc2634-l12",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634L12]
+ }, {
+ .compatible = "lltc,ltc2634-l10",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634L10]
+ }, {
+ .compatible = "lltc,ltc2634-l8",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634L8]
+ }, {
+ .compatible = "lltc,ltc2634-h12",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634H12]
+ }, {
+ .compatible = "lltc,ltc2634-h10",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634H10]
+ }, {
+ .compatible = "lltc,ltc2634-h8",
+ .data = &ltc2632_chip_info_tbl[ID_LTC2634H8]
+ }, {
.compatible = "lltc,ltc2636-l12",
.data = &ltc2632_chip_info_tbl[ID_LTC2636L12]
}, {
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index 7f1e9317c3f3..9417a4a3e22a 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -36,6 +36,7 @@ struct vf610_dac {
struct device *dev;
enum vf610_conversion_mode_sel conv_mode;
void __iomem *regs;
+ struct mutex lock;
};
static void vf610_dac_init(struct vf610_dac *info)
@@ -64,7 +65,7 @@ static int vf610_set_conversion_mode(struct iio_dev *indio_dev,
struct vf610_dac *info = iio_priv(indio_dev);
int val;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&info->lock);
info->conv_mode = mode;
val = readl(info->regs + VF610_DACx_STATCTRL);
if (mode)
@@ -72,7 +73,7 @@ static int vf610_set_conversion_mode(struct iio_dev *indio_dev,
else
val &= ~VF610_DAC_LPEN;
writel(val, info->regs + VF610_DACx_STATCTRL);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&info->lock);
return 0;
}
@@ -147,9 +148,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&info->lock);
writel(VF610_DAC_DAT0(val), info->regs);
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&info->lock);
return 0;
default:
@@ -205,6 +206,8 @@ static int vf610_dac_probe(struct platform_device *pdev)
indio_dev->channels = vf610_dac_iio_channels;
indio_dev->num_channels = ARRAY_SIZE(vf610_dac_iio_channels);
+ mutex_init(&info->lock);
+
ret = clk_prepare_enable(info->clk);
if (ret) {
dev_err(&pdev->dev,
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 7eaf77707b0b..6daeddf37f60 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -61,7 +61,7 @@ config BMG160
help
Say yes here to build support for BOSCH BMG160 Tri-axis Gyro Sensor
driver connected via I2C or SPI. This driver also supports BMI055
- gyroscope.
+ and BMI088 gyroscope.
This driver can also be built as a module. If so, the module
will be called bmg160_i2c or bmg160_spi.
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index 79e63c8a2ea8..2a9ec08ec561 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -12,6 +12,8 @@
#include <linux/iio/iio.h>
+#include <asm/unaligned.h>
+
#define ADIS16130_CON 0x0
#define ADIS16130_CON_RD (1 << 6)
#define ADIS16130_IOP 0x1
@@ -59,7 +61,7 @@ static int adis16130_spi_read(struct iio_dev *indio_dev, u8 reg_addr, u32 *val)
ret = spi_sync_transfer(st->us, &xfer, 1);
if (ret == 0)
- *val = (st->buf[1] << 16) | (st->buf[2] << 8) | st->buf[3];
+ *val = get_unaligned_be24(&st->buf[1]);
mutex_unlock(&st->buf_lock);
return ret;
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index a4c967a5fc5c..afdc57af475d 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -148,16 +148,14 @@ DEFINE_DEBUGFS_ATTRIBUTE(adis16136_flash_count_fops,
static int adis16136_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16136 *adis16136 = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
debugfs_create_file_unsafe("serial_number", 0400,
- indio_dev->debugfs_dentry, adis16136,
- &adis16136_serial_fops);
+ d, adis16136, &adis16136_serial_fops);
debugfs_create_file_unsafe("product_id", 0400,
- indio_dev->debugfs_dentry,
- adis16136, &adis16136_product_id_fops);
+ d, adis16136, &adis16136_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
- indio_dev->debugfs_dentry,
- adis16136, &adis16136_flash_count_fops);
+ d, adis16136, &adis16136_flash_count_fops);
return 0;
}
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index 4fc9c6a3321f..b3fa46bd02cb 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -21,8 +21,8 @@ static int bmg160_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &bmg160_regmap_i2c_conf);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
@@ -42,6 +42,7 @@ static int bmg160_i2c_remove(struct i2c_client *client)
static const struct acpi_device_id bmg160_acpi_match[] = {
{"BMG0160", 0},
{"BMI055B", 0},
+ {"BMI088B", 0},
{},
};
@@ -50,6 +51,7 @@ MODULE_DEVICE_TABLE(acpi, bmg160_acpi_match);
static const struct i2c_device_id bmg160_i2c_id[] = {
{"bmg160", 0},
{"bmi055_gyro", 0},
+ {"bmi088_gyro", 0},
{}
};
diff --git a/drivers/iio/gyro/bmg160_spi.c b/drivers/iio/gyro/bmg160_spi.c
index 182a59c42507..745962e1e423 100644
--- a/drivers/iio/gyro/bmg160_spi.c
+++ b/drivers/iio/gyro/bmg160_spi.c
@@ -19,8 +19,8 @@ static int bmg160_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &bmg160_regmap_spi_conf);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
@@ -37,6 +37,7 @@ static int bmg160_spi_remove(struct spi_device *spi)
static const struct spi_device_id bmg160_spi_id[] = {
{"bmg160", 0},
{"bmi055_gyro", 0},
+ {"bmi088_gyro", 0},
{}
};
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 08cacbbf31e6..7f382aae1dfd 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum gyro_3d_channel {
@@ -326,18 +324,13 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&gyro_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&gyro_state->common_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -361,9 +354,7 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&gyro_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &gyro_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -378,8 +369,7 @@ static int hid_gyro_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_GYRO_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&gyro_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &gyro_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index afa8018b9238..ef5bcbc4b45b 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -51,8 +51,8 @@ static int mpu3050_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &mpu3050_i2c_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c
index 7465ad62391c..9c92ff7a82be 100644
--- a/drivers/iio/gyro/st_gyro_buffer.c
+++ b/drivers/iio/gyro/st_gyro_buffer.c
@@ -37,8 +37,7 @@ static int st_gyro_buffer_postenable(struct iio_dev *indio_dev)
if (err < 0)
return err;
- err = st_sensors_set_axis_enable(indio_dev,
- (u8)indio_dev->active_scan_mask[0]);
+ err = st_sensors_set_axis_enable(indio_dev, indio_dev->active_scan_mask[0]);
if (err < 0)
goto st_gyro_buffer_predisable;
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 26c50b24bc08..c8aa051995d3 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -460,6 +460,7 @@ EXPORT_SYMBOL(st_gyro_get_settings);
int st_gyro_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *gdata = iio_priv(indio_dev);
+ struct st_sensors_platform_data *pdata;
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -477,12 +478,12 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
indio_dev->channels = gdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
- gdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &gdata->sensor_settings->fs.fs_avl[0];
+ gdata->current_fullscale = &gdata->sensor_settings->fs.fs_avl[0];
gdata->odr = gdata->sensor_settings->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev,
- (struct st_sensors_platform_data *)&gyro_pdata);
+ pdata = (struct st_sensors_platform_data *)&gyro_pdata;
+
+ err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
goto st_gyro_power_off;
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index dc22dc363a99..e9f87e42ff4f 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -23,6 +23,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
+#include <asm/unaligned.h>
+
#include "afe440x.h"
#define AFE4403_DRIVER_NAME "afe4403"
@@ -220,13 +222,11 @@ static int afe4403_read(struct afe4403_data *afe, unsigned int reg, u32 *val)
if (ret)
return ret;
- ret = spi_write_then_read(afe->spi, &reg, 1, rx, 3);
+ ret = spi_write_then_read(afe->spi, &reg, 1, rx, sizeof(rx));
if (ret)
return ret;
- *val = (rx[0] << 16) |
- (rx[1] << 8) |
- (rx[2]);
+ *val = get_unaligned_be24(&rx[0]);
/* Disable reading from the device */
tx[3] = AFE440X_CONTROL0_WRITE;
@@ -322,13 +322,11 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
indio_dev->masklength) {
ret = spi_write_then_read(afe->spi,
&afe4403_channel_values[bit], 1,
- rx, 3);
+ rx, sizeof(rx));
if (ret)
goto err;
- buffer[i++] = (rx[0] << 16) |
- (rx[1] << 8) |
- (rx[2]);
+ buffer[i++] = get_unaligned_be24(&rx[0]);
}
/* Disable reading from the device */
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 84010501762d..546fc37ad75d 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -16,7 +16,7 @@
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -267,11 +267,10 @@ static int max30100_get_current_idx(unsigned int val, int *reg)
static int max30100_led_init(struct max30100_data *data)
{
struct device *dev = &data->client->dev;
- struct device_node *np = dev->of_node;
unsigned int val[2];
int reg, ret;
- ret = of_property_read_u32_array(np, "maxim,led-current-microamp",
+ ret = device_property_read_u32_array(dev, "maxim,led-current-microamp",
(unsigned int *) &val, 2);
if (ret) {
/* Default to 24 mA RED LED, 50 mA IR LED */
@@ -502,7 +501,7 @@ MODULE_DEVICE_TABLE(of, max30100_dt_ids);
static struct i2c_driver max30100_driver = {
.driver = {
.name = MAX30100_DRV_NAME,
- .of_match_table = of_match_ptr(max30100_dt_ids),
+ .of_match_table = max30100_dt_ids,
},
.probe = max30100_probe,
.remove = max30100_remove,
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index c99b54b0568d..d2318c4aab0f 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -7,8 +7,6 @@
#include <linux/hid-sensor-hub.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
-#include <linux/iio/triggered_buffer.h>
-#include <linux/iio/trigger_consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -233,12 +231,8 @@ static int hid_humidity_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = devm_iio_triggered_buffer_setup(&pdev->dev, indio_dev,
- &iio_pollfunc_store_time, NULL, NULL);
- if (ret)
- return ret;
-
atomic_set(&humid_st->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&humid_st->common_attributes);
if (ret)
@@ -261,7 +255,7 @@ static int hid_humidity_probe(struct platform_device *pdev)
error_remove_callback:
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_HUMIDITY);
error_remove_trigger:
- hid_sensor_remove_trigger(&humid_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &humid_st->common_attributes);
return ret;
}
@@ -274,7 +268,7 @@ static int hid_humidity_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_HUMIDITY);
- hid_sensor_remove_trigger(&humid_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &humid_st->common_attributes);
return 0;
}
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
index 81d50a861c22..9fb3f33614d4 100644
--- a/drivers/iio/humidity/hts221_buffer.c
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -74,10 +74,9 @@ static irqreturn_t hts221_trigger_handler_thread(int irq, void *private)
int hts221_allocate_trigger(struct hts221_hw *hw)
{
+ struct st_sensors_platform_data *pdata = dev_get_platdata(hw->dev);
struct iio_dev *iio_dev = iio_priv_to_dev(hw);
bool irq_active_low = false, open_drain = false;
- struct device_node *np = hw->dev->of_node;
- struct st_sensors_platform_data *pdata;
unsigned long irq_type;
int err;
@@ -106,8 +105,7 @@ int hts221_allocate_trigger(struct hts221_hw *hw)
if (err < 0)
return err;
- pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
- if ((np && of_property_read_bool(np, "drive-open-drain")) ||
+ if (device_property_read_bool(hw->dev, "drive-open-drain") ||
(pdata && pdata->open_drain)) {
irq_type |= IRQF_SHARED;
open_drain = true;
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
index 4272b7030c44..cab39c4756f8 100644
--- a/drivers/iio/humidity/hts221_i2c.c
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -32,8 +32,8 @@ static int hts221_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &hts221_i2c_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -63,7 +63,7 @@ static struct i2c_driver hts221_driver = {
.driver = {
.name = "hts221_i2c",
.pm = &hts221_pm_ops,
- .of_match_table = of_match_ptr(hts221_i2c_of_match),
+ .of_match_table = hts221_i2c_of_match,
.acpi_match_table = ACPI_PTR(hts221_acpi_match),
},
.probe = hts221_i2c_probe,
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
index 055dba8897d2..729e86e433b1 100644
--- a/drivers/iio/humidity/hts221_spi.c
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -31,8 +31,8 @@ static int hts221_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &hts221_spi_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -56,7 +56,7 @@ static struct spi_driver hts221_driver = {
.driver = {
.name = "hts221_spi",
.pm = &hts221_pm_ops,
- .of_match_table = of_match_ptr(hts221_spi_of_match),
+ .of_match_table = hts221_spi_of_match,
},
.probe = hts221_spi_probe,
.id_table = hts221_spi_id_table,
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 60bb1029e759..fc4123d518bc 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -29,6 +29,19 @@ config ADIS16460
To compile this driver as a module, choose M here: the module will be
called adis16460.
+config ADIS16475
+ tristate "Analog Devices ADIS16475 and similar IMU driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Analog Devices ADIS16470, ADIS16475,
+ ADIS16477, ADIS16465, ADIS16467, ADIS16500, ADIS16505, ADIS16507 inertial
+ sensors.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adis16475.
+
config ADIS16480
tristate "Analog Devices ADIS16480 and similar IMU driver"
depends on SPI
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 5237fd4bc384..88b2c4555230 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ADIS16400) += adis16400.o
obj-$(CONFIG_ADIS16460) += adis16460.o
+obj-$(CONFIG_ADIS16475) += adis16475.o
obj-$(CONFIG_ADIS16480) += adis16480.o
adis_lib-y += adis.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index a8afd01de4f3..c539dfa3b8d3 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -223,6 +223,31 @@ int __adis_read_reg(struct adis *adis, unsigned int reg,
return ret;
}
EXPORT_SYMBOL_GPL(__adis_read_reg);
+/**
+ * __adis_update_bits_base() - ADIS Update bits function - Unlocked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @size: Size of the register to update
+ *
+ * Updates the desired bits of @reg in accordance with @mask and @val.
+ */
+int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
+ const u32 val, u8 size)
+{
+ int ret;
+ u32 __val;
+
+ ret = __adis_read_reg(adis, reg, &__val, size);
+ if (ret)
+ return ret;
+
+ __val = (__val & ~mask) | (val & mask);
+
+ return __adis_write_reg(adis, reg, __val, size);
+}
+EXPORT_SYMBOL_GPL(__adis_update_bits_base);
#ifdef CONFIG_DEBUG_FS
@@ -419,7 +444,7 @@ int __adis_initial_startup(struct adis *adis)
if (prod_id != adis->data->prod_id)
dev_warn(&adis->spi->dev,
- "Device ID(%u) and product ID(%u) do not match.",
+ "Device ID(%u) and product ID(%u) do not match.\n",
adis->data->prod_id, prod_id);
return 0;
diff --git a/drivers/iio/imu/adis16400.c b/drivers/iio/imu/adis16400.c
index 05e70c1c4835..229f2ff98469 100644
--- a/drivers/iio/imu/adis16400.c
+++ b/drivers/iio/imu/adis16400.c
@@ -258,7 +258,7 @@ static int adis16400_show_product_id(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16400_product_id_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16400_product_id_fops,
adis16400_show_product_id, NULL, "%lld\n");
static int adis16400_show_flash_count(void *arg, u64 *val)
@@ -275,23 +275,22 @@ static int adis16400_show_flash_count(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16400_flash_count_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(adis16400_flash_count_fops,
adis16400_show_flash_count, NULL, "%lld\n");
static int adis16400_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16400_state *st = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
if (st->variant->flags & ADIS16400_HAS_SERIAL_NUMBER)
- debugfs_create_file("serial_number", 0400,
- indio_dev->debugfs_dentry, st,
- &adis16400_serial_number_fops);
+ debugfs_create_file_unsafe("serial_number", 0400,
+ d, st, &adis16400_serial_number_fops);
if (st->variant->flags & ADIS16400_HAS_PROD_ID)
- debugfs_create_file("product_id", 0400,
- indio_dev->debugfs_dentry, st,
- &adis16400_product_id_fops);
- debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
- st, &adis16400_flash_count_fops);
+ debugfs_create_file_unsafe("product_id", 0400,
+ d, st, &adis16400_product_id_fops);
+ debugfs_create_file_unsafe("flash_count", 0400,
+ d, st, &adis16400_flash_count_fops);
return 0;
}
@@ -1194,7 +1193,7 @@ static int adis16400_probe(struct spi_device *spi)
indio_dev->available_scan_masks = st->avail_scan_mask;
st->adis.burst = &adis16400_burst;
if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
- st->adis.burst->extra_len = sizeof(u16);
+ st->adis.burst_extra_len = sizeof(u16);
}
adis16400_data = &st->variant->adis_data;
diff --git a/drivers/iio/imu/adis16460.c b/drivers/iio/imu/adis16460.c
index 0027683d0256..ad20c488a3ba 100644
--- a/drivers/iio/imu/adis16460.c
+++ b/drivers/iio/imu/adis16460.c
@@ -87,8 +87,8 @@ static int adis16460_show_serial_number(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16460_serial_number_fops,
- adis16460_show_serial_number, NULL, "0x%.4llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(adis16460_serial_number_fops,
+ adis16460_show_serial_number, NULL, "0x%.4llx\n");
static int adis16460_show_product_id(void *arg, u64 *val)
{
@@ -105,8 +105,8 @@ static int adis16460_show_product_id(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16460_product_id_fops,
- adis16460_show_product_id, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(adis16460_product_id_fops,
+ adis16460_show_product_id, NULL, "%llu\n");
static int adis16460_show_flash_count(void *arg, u64 *val)
{
@@ -123,19 +123,20 @@ static int adis16460_show_flash_count(void *arg, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(adis16460_flash_count_fops,
- adis16460_show_flash_count, NULL, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(adis16460_flash_count_fops,
+ adis16460_show_flash_count, NULL, "%lld\n");
static int adis16460_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16460 *adis16460 = iio_priv(indio_dev);
-
- debugfs_create_file("serial_number", 0400, indio_dev->debugfs_dentry,
- adis16460, &adis16460_serial_number_fops);
- debugfs_create_file("product_id", 0400, indio_dev->debugfs_dentry,
- adis16460, &adis16460_product_id_fops);
- debugfs_create_file("flash_count", 0400, indio_dev->debugfs_dentry,
- adis16460, &adis16460_flash_count_fops);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+
+ debugfs_create_file_unsafe("serial_number", 0400,
+ d, adis16460, &adis16460_serial_number_fops);
+ debugfs_create_file_unsafe("product_id", 0400,
+ d, adis16460, &adis16460_product_id_fops);
+ debugfs_create_file_unsafe("flash_count", 0400,
+ d, adis16460, &adis16460_flash_count_fops);
return 0;
}
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
new file mode 100644
index 000000000000..c6dac4fc67a1
--- /dev/null
+++ b/drivers/iio/imu/adis16475.c
@@ -0,0 +1,1338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADIS16475 IMU driver
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/imu/adis.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#define ADIS16475_REG_DIAG_STAT 0x02
+#define ADIS16475_REG_X_GYRO_L 0x04
+#define ADIS16475_REG_Y_GYRO_L 0x08
+#define ADIS16475_REG_Z_GYRO_L 0x0C
+#define ADIS16475_REG_X_ACCEL_L 0x10
+#define ADIS16475_REG_Y_ACCEL_L 0x14
+#define ADIS16475_REG_Z_ACCEL_L 0x18
+#define ADIS16475_REG_TEMP_OUT 0x1c
+#define ADIS16475_REG_X_GYRO_BIAS_L 0x40
+#define ADIS16475_REG_Y_GYRO_BIAS_L 0x44
+#define ADIS16475_REG_Z_GYRO_BIAS_L 0x48
+#define ADIS16475_REG_X_ACCEL_BIAS_L 0x4c
+#define ADIS16475_REG_Y_ACCEL_BIAS_L 0x50
+#define ADIS16475_REG_Z_ACCEL_BIAS_L 0x54
+#define ADIS16475_REG_FILT_CTRL 0x5c
+#define ADIS16475_FILT_CTRL_MASK GENMASK(2, 0)
+#define ADIS16475_FILT_CTRL(x) FIELD_PREP(ADIS16475_FILT_CTRL_MASK, x)
+#define ADIS16475_REG_MSG_CTRL 0x60
+#define ADIS16475_MSG_CTRL_DR_POL_MASK BIT(0)
+#define ADIS16475_MSG_CTRL_DR_POL(x) \
+ FIELD_PREP(ADIS16475_MSG_CTRL_DR_POL_MASK, x)
+#define ADIS16475_SYNC_MODE_MASK GENMASK(4, 2)
+#define ADIS16475_SYNC_MODE(x) FIELD_PREP(ADIS16475_SYNC_MODE_MASK, x)
+#define ADIS16475_REG_UP_SCALE 0x62
+#define ADIS16475_REG_DEC_RATE 0x64
+#define ADIS16475_REG_GLOB_CMD 0x68
+#define ADIS16475_REG_FIRM_REV 0x6c
+#define ADIS16475_REG_FIRM_DM 0x6e
+#define ADIS16475_REG_FIRM_Y 0x70
+#define ADIS16475_REG_PROD_ID 0x72
+#define ADIS16475_REG_SERIAL_NUM 0x74
+#define ADIS16475_REG_FLASH_CNT 0x7c
+#define ADIS16500_BURST32_MASK BIT(9)
+#define ADIS16500_BURST32(x) FIELD_PREP(ADIS16500_BURST32_MASK, x)
+/* number of data elements in burst mode */
+#define ADIS16475_BURST32_MAX_DATA 32
+#define ADIS16475_BURST_MAX_DATA 20
+#define ADIS16475_MAX_SCAN_DATA 20
+/* spi max speed in brust mode */
+#define ADIS16475_BURST_MAX_SPEED 1000000
+#define ADIS16475_LSB_DEC_MASK BIT(0)
+#define ADIS16475_LSB_FIR_MASK BIT(1)
+
+enum {
+ ADIS16475_SYNC_DIRECT = 1,
+ ADIS16475_SYNC_SCALED,
+ ADIS16475_SYNC_OUTPUT,
+ ADIS16475_SYNC_PULSE = 5,
+};
+
+struct adis16475_sync {
+ u16 sync_mode;
+ u16 min_rate;
+ u16 max_rate;
+};
+
+struct adis16475_chip_info {
+ const struct iio_chan_spec *channels;
+ const struct adis16475_sync *sync;
+ const struct adis_data adis_data;
+ const char *name;
+ u32 num_channels;
+ u32 gyro_max_val;
+ u32 gyro_max_scale;
+ u32 accel_max_val;
+ u32 accel_max_scale;
+ u32 temp_scale;
+ u32 int_clk;
+ u16 max_dec;
+ u8 num_sync;
+ bool has_burst32;
+};
+
+struct adis16475 {
+ const struct adis16475_chip_info *info;
+ struct adis adis;
+ u32 clk_freq;
+ bool burst32;
+ unsigned long lsb_flag;
+ /* Alignment needed for the timestamp */
+ __be16 data[ADIS16475_MAX_SCAN_DATA] __aligned(8);
+};
+
+enum {
+ ADIS16475_SCAN_GYRO_X,
+ ADIS16475_SCAN_GYRO_Y,
+ ADIS16475_SCAN_GYRO_Z,
+ ADIS16475_SCAN_ACCEL_X,
+ ADIS16475_SCAN_ACCEL_Y,
+ ADIS16475_SCAN_ACCEL_Z,
+ ADIS16475_SCAN_TEMP,
+ ADIS16475_SCAN_DIAG_S_FLAGS,
+ ADIS16475_SCAN_CRC_FAILURE,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t adis16475_show_firmware_revision(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct adis16475 *st = file->private_data;
+ char buf[7];
+ size_t len;
+ u16 rev;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FIRM_REV, &rev);
+ if (ret)
+ return ret;
+
+ len = scnprintf(buf, sizeof(buf), "%x.%x\n", rev >> 8, rev & 0xff);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16475_firmware_revision_fops = {
+ .open = simple_open,
+ .read = adis16475_show_firmware_revision,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t adis16475_show_firmware_date(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct adis16475 *st = file->private_data;
+ u16 md, year;
+ char buf[12];
+ size_t len;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FIRM_Y, &year);
+ if (ret)
+ return ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FIRM_DM, &md);
+ if (ret)
+ return ret;
+
+ len = snprintf(buf, sizeof(buf), "%.2x-%.2x-%.4x\n", md >> 8, md & 0xff,
+ year);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16475_firmware_date_fops = {
+ .open = simple_open,
+ .read = adis16475_show_firmware_date,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16475_show_serial_number(void *arg, u64 *val)
+{
+ struct adis16475 *st = arg;
+ u16 serial;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_SERIAL_NUM, &serial);
+ if (ret)
+ return ret;
+
+ *val = serial;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16475_serial_number_fops,
+ adis16475_show_serial_number, NULL, "0x%.4llx\n");
+
+static int adis16475_show_product_id(void *arg, u64 *val)
+{
+ struct adis16475 *st = arg;
+ u16 prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_PROD_ID, &prod_id);
+ if (ret)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16475_product_id_fops,
+ adis16475_show_product_id, NULL, "%llu\n");
+
+static int adis16475_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16475 *st = arg;
+ u32 flash_count;
+ int ret;
+
+ ret = adis_read_reg_32(&st->adis, ADIS16475_REG_FLASH_CNT,
+ &flash_count);
+ if (ret)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16475_flash_count_fops,
+ adis16475_show_flash_count, NULL, "%lld\n");
+
+static void adis16475_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+
+ debugfs_create_file_unsafe("serial_number", 0400,
+ d, st, &adis16475_serial_number_fops);
+ debugfs_create_file_unsafe("product_id", 0400,
+ d, st, &adis16475_product_id_fops);
+ debugfs_create_file_unsafe("flash_count", 0400,
+ d, st, &adis16475_flash_count_fops);
+ debugfs_create_file("firmware_revision", 0400,
+ d, st, &adis16475_firmware_revision_fops);
+ debugfs_create_file("firmware_date", 0400, d,
+ st, &adis16475_firmware_date_fops);
+}
+#else
+static void adis16475_debugfs_init(struct iio_dev *indio_dev)
+{
+}
+#endif
+
+static int adis16475_get_freq(struct adis16475 *st, u32 *freq)
+{
+ int ret;
+ u16 dec;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, &dec);
+ if (ret)
+ return -EINVAL;
+
+ *freq = DIV_ROUND_CLOSEST(st->clk_freq, dec + 1);
+
+ return 0;
+}
+
+static int adis16475_set_freq(struct adis16475 *st, const u32 freq)
+{
+ u16 dec;
+ int ret;
+
+ if (!freq)
+ return -EINVAL;
+
+ dec = DIV_ROUND_CLOSEST(st->clk_freq, freq);
+
+ if (dec)
+ dec--;
+
+ if (dec > st->info->max_dec)
+ dec = st->info->max_dec;
+
+ ret = adis_write_reg_16(&st->adis, ADIS16475_REG_DEC_RATE, dec);
+ if (ret)
+ return ret;
+
+ /*
+ * If decimation is used, then gyro and accel data will have meaningful
+ * bits on the LSB registers. This info is used on the trigger handler.
+ */
+ assign_bit(ADIS16475_LSB_DEC_MASK, &st->lsb_flag, dec);
+
+ return 0;
+}
+
+/* The values are approximated. */
+static const u32 adis16475_3db_freqs[] = {
+ [0] = 720, /* Filter disabled, full BW (~720Hz) */
+ [1] = 360,
+ [2] = 164,
+ [3] = 80,
+ [4] = 40,
+ [5] = 20,
+ [6] = 10,
+};
+
+static int adis16475_get_filter(struct adis16475 *st, u32 *filter)
+{
+ u16 filter_sz;
+ int ret;
+ const int mask = ADIS16475_FILT_CTRL_MASK;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16475_REG_FILT_CTRL, &filter_sz);
+ if (ret)
+ return ret;
+
+ *filter = adis16475_3db_freqs[filter_sz & mask];
+
+ return 0;
+}
+
+static int adis16475_set_filter(struct adis16475 *st, const u32 filter)
+{
+ int i = ARRAY_SIZE(adis16475_3db_freqs);
+ int ret;
+
+ while (--i) {
+ if (adis16475_3db_freqs[i] >= filter)
+ break;
+ }
+
+ ret = adis_write_reg_16(&st->adis, ADIS16475_REG_FILT_CTRL,
+ ADIS16475_FILT_CTRL(i));
+ if (ret)
+ return ret;
+
+ /*
+ * If FIR is used, then gyro and accel data will have meaningful
+ * bits on the LSB registers. This info is used on the trigger handler.
+ */
+ assign_bit(ADIS16475_LSB_FIR_MASK, &st->lsb_flag, i);
+
+ return 0;
+}
+
+static const u32 adis16475_calib_regs[] = {
+ [ADIS16475_SCAN_GYRO_X] = ADIS16475_REG_X_GYRO_BIAS_L,
+ [ADIS16475_SCAN_GYRO_Y] = ADIS16475_REG_Y_GYRO_BIAS_L,
+ [ADIS16475_SCAN_GYRO_Z] = ADIS16475_REG_Z_GYRO_BIAS_L,
+ [ADIS16475_SCAN_ACCEL_X] = ADIS16475_REG_X_ACCEL_BIAS_L,
+ [ADIS16475_SCAN_ACCEL_Y] = ADIS16475_REG_Y_ACCEL_BIAS_L,
+ [ADIS16475_SCAN_ACCEL_Z] = ADIS16475_REG_Z_ACCEL_BIAS_L,
+};
+
+static int adis16475_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ int ret;
+ u32 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = st->info->gyro_max_val;
+ *val2 = st->info->gyro_max_scale;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_ACCEL:
+ *val = st->info->accel_max_val;
+ *val2 = st->info->accel_max_scale;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_TEMP:
+ *val = st->info->temp_scale;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = adis_read_reg_32(&st->adis,
+ adis16475_calib_regs[chan->scan_index],
+ val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = adis16475_get_filter(st, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = adis16475_get_freq(st, &tmp);
+ if (ret)
+ return ret;
+
+ *val = tmp / 1000;
+ *val2 = (tmp % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16475_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long info)
+{
+ struct adis16475 *st = iio_priv(indio_dev);
+ u32 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ tmp = val * 1000 + val2 / 1000;
+ return adis16475_set_freq(st, tmp);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return adis16475_set_filter(st, val);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adis_write_reg_32(&st->adis,
+ adis16475_calib_regs[chan->scan_index],
+ val);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ADIS16475_MOD_CHAN(_type, _mod, _address, _si, _r_bits, _s_bits) \
+ { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .address = (_address), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (_r_bits), \
+ .storagebits = (_s_bits), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16475_GYRO_CHANNEL(_mod) \
+ ADIS16475_MOD_CHAN(IIO_ANGL_VEL, IIO_MOD_ ## _mod, \
+ ADIS16475_REG_ ## _mod ## _GYRO_L, \
+ ADIS16475_SCAN_GYRO_ ## _mod, 32, 32)
+
+#define ADIS16475_ACCEL_CHANNEL(_mod) \
+ ADIS16475_MOD_CHAN(IIO_ACCEL, IIO_MOD_ ## _mod, \
+ ADIS16475_REG_ ## _mod ## _ACCEL_L, \
+ ADIS16475_SCAN_ACCEL_ ## _mod, 32, 32)
+
+#define ADIS16475_TEMP_CHANNEL() { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .address = ADIS16475_REG_TEMP_OUT, \
+ .scan_index = ADIS16475_SCAN_TEMP, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+static const struct iio_chan_spec adis16475_channels[] = {
+ ADIS16475_GYRO_CHANNEL(X),
+ ADIS16475_GYRO_CHANNEL(Y),
+ ADIS16475_GYRO_CHANNEL(Z),
+ ADIS16475_ACCEL_CHANNEL(X),
+ ADIS16475_ACCEL_CHANNEL(Y),
+ ADIS16475_ACCEL_CHANNEL(Z),
+ ADIS16475_TEMP_CHANNEL(),
+ IIO_CHAN_SOFT_TIMESTAMP(7)
+};
+
+enum adis16475_variant {
+ ADIS16470,
+ ADIS16475_1,
+ ADIS16475_2,
+ ADIS16475_3,
+ ADIS16477_1,
+ ADIS16477_2,
+ ADIS16477_3,
+ ADIS16465_1,
+ ADIS16465_2,
+ ADIS16465_3,
+ ADIS16467_1,
+ ADIS16467_2,
+ ADIS16467_3,
+ ADIS16500,
+ ADIS16505_1,
+ ADIS16505_2,
+ ADIS16505_3,
+ ADIS16507_1,
+ ADIS16507_2,
+ ADIS16507_3,
+};
+
+enum {
+ ADIS16475_DIAG_STAT_DATA_PATH = 1,
+ ADIS16475_DIAG_STAT_FLASH_MEM,
+ ADIS16475_DIAG_STAT_SPI,
+ ADIS16475_DIAG_STAT_STANDBY,
+ ADIS16475_DIAG_STAT_SENSOR,
+ ADIS16475_DIAG_STAT_MEMORY,
+ ADIS16475_DIAG_STAT_CLK,
+};
+
+static const char * const adis16475_status_error_msgs[] = {
+ [ADIS16475_DIAG_STAT_DATA_PATH] = "Data Path Overrun",
+ [ADIS16475_DIAG_STAT_FLASH_MEM] = "Flash memory update failure",
+ [ADIS16475_DIAG_STAT_SPI] = "SPI communication error",
+ [ADIS16475_DIAG_STAT_STANDBY] = "Standby mode",
+ [ADIS16475_DIAG_STAT_SENSOR] = "Sensor failure",
+ [ADIS16475_DIAG_STAT_MEMORY] = "Memory failure",
+ [ADIS16475_DIAG_STAT_CLK] = "Clock error",
+};
+
+static int adis16475_enable_irq(struct adis *adis, bool enable)
+{
+ /*
+ * There is no way to gate the data-ready signal internally inside the
+ * ADIS16475. We can only control it's polarity...
+ */
+ if (enable)
+ enable_irq(adis->spi->irq);
+ else
+ disable_irq(adis->spi->irq);
+
+ return 0;
+}
+
+#define ADIS16475_DATA(_prod_id, _timeouts) \
+{ \
+ .msc_ctrl_reg = ADIS16475_REG_MSG_CTRL, \
+ .glob_cmd_reg = ADIS16475_REG_GLOB_CMD, \
+ .diag_stat_reg = ADIS16475_REG_DIAG_STAT, \
+ .prod_id_reg = ADIS16475_REG_PROD_ID, \
+ .prod_id = (_prod_id), \
+ .self_test_mask = BIT(2), \
+ .self_test_reg = ADIS16475_REG_GLOB_CMD, \
+ .cs_change_delay = 16, \
+ .read_delay = 5, \
+ .write_delay = 5, \
+ .status_error_msgs = adis16475_status_error_msgs, \
+ .status_error_mask = BIT(ADIS16475_DIAG_STAT_DATA_PATH) | \
+ BIT(ADIS16475_DIAG_STAT_FLASH_MEM) | \
+ BIT(ADIS16475_DIAG_STAT_SPI) | \
+ BIT(ADIS16475_DIAG_STAT_STANDBY) | \
+ BIT(ADIS16475_DIAG_STAT_SENSOR) | \
+ BIT(ADIS16475_DIAG_STAT_MEMORY) | \
+ BIT(ADIS16475_DIAG_STAT_CLK), \
+ .enable_irq = adis16475_enable_irq, \
+ .timeouts = (_timeouts), \
+}
+
+static const struct adis16475_sync adis16475_sync_mode[] = {
+ { ADIS16475_SYNC_OUTPUT },
+ { ADIS16475_SYNC_DIRECT, 1900, 2100 },
+ { ADIS16475_SYNC_SCALED, 1, 128 },
+ { ADIS16475_SYNC_PULSE, 1000, 2100 },
+};
+
+static const struct adis_timeout adis16475_timeouts = {
+ .reset_ms = 200,
+ .sw_reset_ms = 200,
+ .self_test_ms = 20,
+};
+
+static const struct adis_timeout adis1650x_timeouts = {
+ .reset_ms = 260,
+ .sw_reset_ms = 260,
+ .self_test_ms = 30,
+};
+
+static const struct adis16475_chip_info adis16475_chip_info[] = {
+ [ADIS16470] = {
+ .name = "adis16470",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16470, &adis16475_timeouts),
+ },
+ [ADIS16475_1] = {
+ .name = "adis16475-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ },
+ [ADIS16475_2] = {
+ .name = "adis16475-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ },
+ [ADIS16475_3] = {
+ .name = "adis16475-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16475, &adis16475_timeouts),
+ },
+ [ADIS16477_1] = {
+ .name = "adis16477-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ },
+ [ADIS16477_2] = {
+ .name = "adis16477-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ },
+ [ADIS16477_3] = {
+ .name = "adis16477-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
+ },
+ [ADIS16465_1] = {
+ .name = "adis16465-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ },
+ [ADIS16465_2] = {
+ .name = "adis16465-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ },
+ [ADIS16465_3] = {
+ .name = "adis16465-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(4000 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16465, &adis16475_timeouts),
+ },
+ [ADIS16467_1] = {
+ .name = "adis16467-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ },
+ [ADIS16467_2] = {
+ .name = "adis16467-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ },
+ [ADIS16467_3] = {
+ .name = "adis16467-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(800 << 16),
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .adis_data = ADIS16475_DATA(16467, &adis16475_timeouts),
+ },
+ [ADIS16500] = {
+ .name = "adis16500",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16500, &adis1650x_timeouts),
+ },
+ [ADIS16505_1] = {
+ .name = "adis16505-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 78,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ },
+ [ADIS16505_2] = {
+ .name = "adis16505-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 78,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ },
+ [ADIS16505_3] = {
+ .name = "adis16505-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 78,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16505, &adis1650x_timeouts),
+ },
+ [ADIS16507_1] = {
+ .name = "adis16507-1",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(160 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ },
+ [ADIS16507_2] = {
+ .name = "adis16507-2",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(40 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ },
+ [ADIS16507_3] = {
+ .name = "adis16507-3",
+ .num_channels = ARRAY_SIZE(adis16475_channels),
+ .channels = adis16475_channels,
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(10 << 16),
+ .accel_max_val = 392,
+ .accel_max_scale = 32000 << 16,
+ .temp_scale = 100,
+ .int_clk = 2000,
+ .max_dec = 1999,
+ .sync = adis16475_sync_mode,
+ /* pulse sync not supported */
+ .num_sync = ARRAY_SIZE(adis16475_sync_mode) - 1,
+ .has_burst32 = true,
+ .adis_data = ADIS16475_DATA(16507, &adis1650x_timeouts),
+ },
+};
+
+static const struct iio_info adis16475_info = {
+ .read_raw = &adis16475_read_raw,
+ .write_raw = &adis16475_write_raw,
+ .update_scan_mode = adis_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+};
+
+static struct adis_burst adis16475_burst = {
+ .en = true,
+ .reg_cmd = ADIS16475_REG_GLOB_CMD,
+ /*
+ * adis_update_scan_mode_burst() sets the burst length in respect with
+ * the number of channels and allocates 16 bits for each. However,
+ * adis1647x devices also need space for DIAG_STAT, DATA_CNTR or
+ * TIME_STAMP (depending on the clock mode but for us these bytes are
+ * don't care...) and CRC.
+ */
+ .extra_len = 3 * sizeof(u16),
+ .burst_max_len = ADIS16475_BURST32_MAX_DATA,
+};
+
+static bool adis16475_validate_crc(const u8 *buffer, u16 crc,
+ const bool burst32)
+{
+ int i;
+ /* extra 6 elements for low gyro and accel */
+ const u16 sz = burst32 ? ADIS16475_BURST32_MAX_DATA :
+ ADIS16475_BURST_MAX_DATA;
+
+ for (i = 0; i < sz - 2; i++)
+ crc -= buffer[i];
+
+ return crc == 0;
+}
+
+static void adis16475_burst32_check(struct adis16475 *st)
+{
+ int ret;
+ struct adis *adis = &st->adis;
+
+ if (!st->info->has_burst32)
+ return;
+
+ if (st->lsb_flag && !st->burst32) {
+ const u16 en = ADIS16500_BURST32(1);
+
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16500_BURST32_MASK, en);
+ if (ret)
+ return;
+
+ st->burst32 = true;
+
+ /*
+ * In 32-bit mode we need extra 2 bytes for all gyro
+ * and accel channels.
+ */
+ adis->burst_extra_len = 6 * sizeof(u16);
+ adis->xfer[1].len += 6 * sizeof(u16);
+ dev_dbg(&adis->spi->dev, "Enable burst32 mode, xfer:%d",
+ adis->xfer[1].len);
+
+ } else if (!st->lsb_flag && st->burst32) {
+ const u16 en = ADIS16500_BURST32(0);
+
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16500_BURST32_MASK, en);
+ if (ret)
+ return;
+
+ st->burst32 = false;
+
+ /* Remove the extra bits */
+ adis->burst_extra_len = 0;
+ adis->xfer[1].len -= 6 * sizeof(u16);
+ dev_dbg(&adis->spi->dev, "Disable burst32 mode, xfer:%d\n",
+ adis->xfer[1].len);
+ }
+}
+
+static irqreturn_t adis16475_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16475 *st = iio_priv(indio_dev);
+ struct adis *adis = &st->adis;
+ int ret, bit, i = 0;
+ __be16 *buffer;
+ u16 crc;
+ bool valid;
+ /* offset until the first element after gyro and accel */
+ const u8 offset = st->burst32 ? 13 : 7;
+ const u32 cached_spi_speed_hz = adis->spi->max_speed_hz;
+
+ adis->spi->max_speed_hz = ADIS16475_BURST_MAX_SPEED;
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret)
+ return ret;
+
+ adis->spi->max_speed_hz = cached_spi_speed_hz;
+ buffer = adis->buffer;
+
+ crc = be16_to_cpu(buffer[offset + 2]);
+ valid = adis16475_validate_crc(adis->buffer, crc, st->burst32);
+ if (!valid) {
+ dev_err(&adis->spi->dev, "Invalid crc\n");
+ goto check_burst32;
+ }
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ /*
+ * When burst mode is used, system flags is the first data
+ * channel in the sequence, but the scan index is 7.
+ */
+ switch (bit) {
+ case ADIS16475_SCAN_TEMP:
+ st->data[i++] = buffer[offset];
+ break;
+ case ADIS16475_SCAN_GYRO_X ... ADIS16475_SCAN_ACCEL_Z:
+ /*
+ * The first 2 bytes on the received data are the
+ * DIAG_STAT reg, hence the +1 offset here...
+ */
+ if (st->burst32) {
+ /* upper 16 */
+ st->data[i++] = buffer[bit * 2 + 2];
+ /* lower 16 */
+ st->data[i++] = buffer[bit * 2 + 1];
+ } else {
+ st->data[i++] = buffer[bit + 1];
+ /*
+ * Don't bother in doing the manual read if the
+ * device supports burst32. burst32 will be
+ * enabled in the next call to
+ * adis16475_burst32_check()...
+ */
+ if (st->lsb_flag && !st->info->has_burst32) {
+ u16 val = 0;
+ const u32 reg = ADIS16475_REG_X_GYRO_L +
+ bit * 4;
+
+ adis_read_reg_16(adis, reg, &val);
+ st->data[i++] = cpu_to_be16(val);
+ } else {
+ /* lower not used */
+ st->data[i++] = 0;
+ }
+ }
+ break;
+ }
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->data, pf->timestamp);
+check_burst32:
+ /*
+ * We only check the burst mode at the end of the current capture since
+ * it takes a full data ready cycle for the device to update the burst
+ * array.
+ */
+ adis16475_burst32_check(st);
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static void adis16475_disable_clk(void *data)
+{
+ clk_disable_unprepare((struct clk *)data);
+}
+
+static int adis16475_config_sync_mode(struct adis16475 *st)
+{
+ int ret;
+ struct device *dev = &st->adis.spi->dev;
+ const struct adis16475_sync *sync;
+ u32 sync_mode;
+
+ /* default to internal clk */
+ st->clk_freq = st->info->int_clk * 1000;
+
+ ret = device_property_read_u32(dev, "adi,sync-mode", &sync_mode);
+ if (ret)
+ return 0;
+
+ if (sync_mode >= st->info->num_sync) {
+ dev_err(dev, "Invalid sync mode: %u for %s\n", sync_mode,
+ st->info->name);
+ return -EINVAL;
+ }
+
+ sync = &st->info->sync[sync_mode];
+
+ /* All the other modes require external input signal */
+ if (sync->sync_mode != ADIS16475_SYNC_OUTPUT) {
+ struct clk *clk = devm_clk_get(dev, NULL);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adis16475_disable_clk, clk);
+ if (ret)
+ return ret;
+
+ st->clk_freq = clk_get_rate(clk);
+ if (st->clk_freq < sync->min_rate ||
+ st->clk_freq > sync->max_rate) {
+ dev_err(dev,
+ "Clk rate:%u not in a valid range:[%u %u]\n",
+ st->clk_freq, sync->min_rate, sync->max_rate);
+ return -EINVAL;
+ }
+
+ if (sync->sync_mode == ADIS16475_SYNC_SCALED) {
+ u16 up_scale;
+ u32 scaled_out_freq = 0;
+ /*
+ * If we are in scaled mode, we must have an up_scale.
+ * In scaled mode the allowable input clock range is
+ * 1 Hz to 128 Hz, and the allowable output range is
+ * 1900 to 2100 Hz. Hence, a scale must be given to
+ * get the allowable output.
+ */
+ ret = device_property_read_u32(dev,
+ "adi,scaled-output-hz",
+ &scaled_out_freq);
+ if (ret) {
+ dev_err(dev, "adi,scaled-output-hz must be given when in scaled sync mode");
+ return -EINVAL;
+ } else if (scaled_out_freq < 1900 ||
+ scaled_out_freq > 2100) {
+ dev_err(dev, "Invalid value: %u for adi,scaled-output-hz",
+ scaled_out_freq);
+ return -EINVAL;
+ }
+
+ up_scale = DIV_ROUND_CLOSEST(scaled_out_freq,
+ st->clk_freq);
+
+ ret = __adis_write_reg_16(&st->adis,
+ ADIS16475_REG_UP_SCALE,
+ up_scale);
+ if (ret)
+ return ret;
+
+ st->clk_freq = scaled_out_freq;
+ }
+
+ st->clk_freq *= 1000;
+ }
+ /*
+ * Keep in mind that the mask for the clk modes in adis1650*
+ * chips is different (1100 instead of 11100). However, we
+ * are not configuring BIT(4) in these chips and the default
+ * value is 0, so we are fine in doing the below operations.
+ * I'm keeping this for simplicity and avoiding extra variables
+ * in chip_info.
+ */
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16475_SYNC_MODE_MASK, sync->sync_mode);
+ if (ret)
+ return ret;
+
+ usleep_range(250, 260);
+
+ return 0;
+}
+
+static int adis16475_config_irq_pin(struct adis16475 *st)
+{
+ int ret;
+ struct irq_data *desc;
+ u32 irq_type;
+ u16 val = 0;
+ u8 polarity;
+ struct spi_device *spi = st->adis.spi;
+
+ desc = irq_get_irq_data(spi->irq);
+ if (!desc) {
+ dev_err(&spi->dev, "Could not find IRQ %d\n", spi->irq);
+ return -EINVAL;
+ }
+ /*
+ * It is possible to configure the data ready polarity. Furthermore, we
+ * need to update the adis struct if we want data ready as active low.
+ */
+ irq_type = irqd_get_trigger_type(desc);
+ if (irq_type == IRQ_TYPE_EDGE_RISING) {
+ polarity = 1;
+ st->adis.irq_flag = IRQF_TRIGGER_RISING;
+ } else if (irq_type == IRQ_TYPE_EDGE_FALLING) {
+ polarity = 0;
+ st->adis.irq_flag = IRQF_TRIGGER_FALLING;
+ } else {
+ dev_err(&spi->dev, "Invalid interrupt type 0x%x specified\n",
+ irq_type);
+ return -EINVAL;
+ }
+
+ val = ADIS16475_MSG_CTRL_DR_POL(polarity);
+ ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL,
+ ADIS16475_MSG_CTRL_DR_POL_MASK, val);
+ if (ret)
+ return ret;
+ /*
+ * There is a delay writing to any bits written to the MSC_CTRL
+ * register. It should not be bigger than 200us, so 250 should be more
+ * than enough!
+ */
+ usleep_range(250, 260);
+
+ return 0;
+}
+
+static const struct of_device_id adis16475_of_match[] = {
+ { .compatible = "adi,adis16470",
+ .data = &adis16475_chip_info[ADIS16470] },
+ { .compatible = "adi,adis16475-1",
+ .data = &adis16475_chip_info[ADIS16475_1] },
+ { .compatible = "adi,adis16475-2",
+ .data = &adis16475_chip_info[ADIS16475_2] },
+ { .compatible = "adi,adis16475-3",
+ .data = &adis16475_chip_info[ADIS16475_3] },
+ { .compatible = "adi,adis16477-1",
+ .data = &adis16475_chip_info[ADIS16477_1] },
+ { .compatible = "adi,adis16477-2",
+ .data = &adis16475_chip_info[ADIS16477_2] },
+ { .compatible = "adi,adis16477-3",
+ .data = &adis16475_chip_info[ADIS16477_3] },
+ { .compatible = "adi,adis16465-1",
+ .data = &adis16475_chip_info[ADIS16465_1] },
+ { .compatible = "adi,adis16465-2",
+ .data = &adis16475_chip_info[ADIS16465_2] },
+ { .compatible = "adi,adis16465-3",
+ .data = &adis16475_chip_info[ADIS16465_3] },
+ { .compatible = "adi,adis16467-1",
+ .data = &adis16475_chip_info[ADIS16467_1] },
+ { .compatible = "adi,adis16467-2",
+ .data = &adis16475_chip_info[ADIS16467_2] },
+ { .compatible = "adi,adis16467-3",
+ .data = &adis16475_chip_info[ADIS16467_3] },
+ { .compatible = "adi,adis16500",
+ .data = &adis16475_chip_info[ADIS16500] },
+ { .compatible = "adi,adis16505-1",
+ .data = &adis16475_chip_info[ADIS16505_1] },
+ { .compatible = "adi,adis16505-2",
+ .data = &adis16475_chip_info[ADIS16505_2] },
+ { .compatible = "adi,adis16505-3",
+ .data = &adis16475_chip_info[ADIS16505_3] },
+ { .compatible = "adi,adis16507-1",
+ .data = &adis16475_chip_info[ADIS16507_1] },
+ { .compatible = "adi,adis16507-2",
+ .data = &adis16475_chip_info[ADIS16507_2] },
+ { .compatible = "adi,adis16507-3",
+ .data = &adis16475_chip_info[ADIS16507_3] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, adis16475_of_match);
+
+static int adis16475_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adis16475 *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+ st->adis.burst = &adis16475_burst;
+
+ st->info = device_get_match_data(&spi->dev);
+ if (!st->info)
+ return -EINVAL;
+
+ ret = adis_init(&st->adis, indio_dev, spi, &st->info->adis_data);
+ if (ret)
+ return ret;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = st->info->name;
+ indio_dev->channels = st->info->channels;
+ indio_dev->num_channels = st->info->num_channels;
+ indio_dev->info = &adis16475_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = __adis_initial_startup(&st->adis);
+ if (ret)
+ return ret;
+
+ ret = adis16475_config_irq_pin(st);
+ if (ret)
+ return ret;
+
+ ret = adis16475_config_sync_mode(st);
+ if (ret)
+ return ret;
+
+ ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
+ adis16475_trigger_handler);
+ if (ret)
+ return ret;
+
+ adis16475_enable_irq(&st->adis, false);
+
+ ret = devm_iio_device_register(&spi->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ adis16475_debugfs_init(indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver adis16475_driver = {
+ .driver = {
+ .name = "adis16475",
+ .of_match_table = adis16475_of_match,
+ },
+ .probe = adis16475_probe,
+};
+module_spi_driver(adis16475_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16475 IMU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index cfae0e4476e7..6a471eee110e 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -284,22 +284,18 @@ DEFINE_DEBUGFS_ATTRIBUTE(adis16480_flash_count_fops,
static int adis16480_debugfs_init(struct iio_dev *indio_dev)
{
struct adis16480 *adis16480 = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
debugfs_create_file_unsafe("firmware_revision", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_firmware_revision_fops);
+ d, adis16480, &adis16480_firmware_revision_fops);
debugfs_create_file_unsafe("firmware_date", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_firmware_date_fops);
+ d, adis16480, &adis16480_firmware_date_fops);
debugfs_create_file_unsafe("serial_number", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_serial_number_fops);
+ d, adis16480, &adis16480_serial_number_fops);
debugfs_create_file_unsafe("product_id", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_product_id_fops);
+ d, adis16480, &adis16480_product_id_fops);
debugfs_create_file_unsafe("flash_count", 0400,
- indio_dev->debugfs_dentry, adis16480,
- &adis16480_flash_count_fops);
+ d, adis16480, &adis16480_flash_count_fops);
return 0;
}
diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
index 04e5e2a0fd6b..5b4225ee09b9 100644
--- a/drivers/iio/imu/adis_buffer.c
+++ b/drivers/iio/imu/adis_buffer.c
@@ -23,25 +23,30 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct adis *adis = iio_device_get_drvdata(indio_dev);
- unsigned int burst_length;
+ unsigned int burst_length, burst_max_length;
u8 *tx;
/* All but the timestamp channel */
burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
- burst_length += adis->burst->extra_len;
+ burst_length += adis->burst->extra_len + adis->burst_extra_len;
+
+ if (adis->burst->burst_max_len)
+ burst_max_length = adis->burst->burst_max_len;
+ else
+ burst_max_length = burst_length;
adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
if (!adis->xfer)
return -ENOMEM;
- adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
+ adis->buffer = kzalloc(burst_max_length + sizeof(u16), GFP_KERNEL);
if (!adis->buffer) {
kfree(adis->xfer);
adis->xfer = NULL;
return -ENOMEM;
}
- tx = adis->buffer + burst_length;
+ tx = adis->buffer + burst_max_length;
tx[0] = ADIS_READ_REG(adis->burst->reg_cmd);
tx[1] = 0;
@@ -156,6 +161,14 @@ static irqreturn_t adis_trigger_handler(int irq, void *p)
return IRQ_HANDLED;
}
+static void adis_buffer_cleanup(void *arg)
+{
+ struct adis *adis = arg;
+
+ kfree(adis->buffer);
+ kfree(adis->xfer);
+}
+
/**
* adis_setup_buffer_and_trigger() - Sets up buffer and trigger for the adis device
* @adis: The adis device.
@@ -199,6 +212,43 @@ error_buffer_cleanup:
EXPORT_SYMBOL_GPL(adis_setup_buffer_and_trigger);
/**
+ * devm_adis_setup_buffer_and_trigger() - Sets up buffer and trigger for
+ * the managed adis device
+ * @adis: The adis device
+ * @indio_dev: The IIO device
+ * @trigger_handler: Optional trigger handler, may be NULL.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ *
+ * This function perfoms exactly the same as adis_setup_buffer_and_trigger()
+ */
+int
+devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler)
+{
+ int ret;
+
+ if (!trigger_handler)
+ trigger_handler = adis_trigger_handler;
+
+ ret = devm_iio_triggered_buffer_setup(&adis->spi->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ trigger_handler, NULL);
+ if (ret)
+ return ret;
+
+ if (adis->spi->irq) {
+ ret = devm_adis_probe_trigger(adis, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ return devm_add_action_or_reset(&adis->spi->dev, adis_buffer_cleanup,
+ adis);
+}
+EXPORT_SYMBOL_GPL(devm_adis_setup_buffer_and_trigger);
+
+/**
* adis_cleanup_buffer_and_trigger() - Free buffer and trigger resources
* @adis: The adis device.
* @indio_dev: The IIO device.
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index 8b9cd02c0f9f..8afe71947c00 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -27,6 +27,34 @@ static const struct iio_trigger_ops adis_trigger_ops = {
.set_trigger_state = &adis_data_rdy_trigger_set_state,
};
+static void adis_trigger_setup(struct adis *adis)
+{
+ adis->trig->dev.parent = &adis->spi->dev;
+ adis->trig->ops = &adis_trigger_ops;
+ iio_trigger_set_drvdata(adis->trig, adis);
+}
+
+static int adis_validate_irq_flag(struct adis *adis)
+{
+ /*
+ * Typically this devices have data ready either on the rising edge or
+ * on the falling edge of the data ready pin. This checks enforces that
+ * one of those is set in the drivers... It defaults to
+ * IRQF_TRIGGER_RISING for backward compatibility wiht devices that
+ * don't support changing the pin polarity.
+ */
+ if (!adis->irq_flag) {
+ adis->irq_flag = IRQF_TRIGGER_RISING;
+ return 0;
+ } else if (adis->irq_flag != IRQF_TRIGGER_RISING &&
+ adis->irq_flag != IRQF_TRIGGER_FALLING) {
+ dev_err(&adis->spi->dev, "Invalid IRQ mask: %08lx\n",
+ adis->irq_flag);
+ return -EINVAL;
+ }
+
+ return 0;
+}
/**
* adis_probe_trigger() - Sets up trigger for a adis device
* @adis: The adis device
@@ -45,13 +73,15 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (adis->trig == NULL)
return -ENOMEM;
- adis->trig->dev.parent = &adis->spi->dev;
- adis->trig->ops = &adis_trigger_ops;
- iio_trigger_set_drvdata(adis->trig, adis);
+ adis_trigger_setup(adis);
+
+ ret = adis_validate_irq_flag(adis);
+ if (ret)
+ return ret;
ret = request_irq(adis->spi->irq,
&iio_trigger_generic_data_rdy_poll,
- IRQF_TRIGGER_RISING,
+ adis->irq_flag,
indio_dev->name,
adis->trig);
if (ret)
@@ -74,6 +104,40 @@ error_free_trig:
EXPORT_SYMBOL_GPL(adis_probe_trigger);
/**
+ * devm_adis_probe_trigger() - Sets up trigger for a managed adis device
+ * @adis: The adis device
+ * @indio_dev: The IIO device
+ *
+ * Returns 0 on success or a negative error code
+ */
+int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
+{
+ int ret;
+
+ adis->trig = devm_iio_trigger_alloc(&adis->spi->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!adis->trig)
+ return -ENOMEM;
+
+ adis_trigger_setup(adis);
+
+ ret = adis_validate_irq_flag(adis);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&adis->spi->dev, adis->spi->irq,
+ &iio_trigger_generic_data_rdy_poll,
+ adis->irq_flag,
+ indio_dev->name,
+ adis->trig);
+ if (ret)
+ return ret;
+
+ return devm_iio_trigger_register(&adis->spi->dev, adis->trig);
+}
+EXPORT_SYMBOL_GPL(devm_adis_probe_trigger);
+
+/**
* adis_remove_trigger() - Remove trigger for a adis devices
* @adis: The adis device
*
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index e36f5e82d400..26398614eddf 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -24,8 +24,8 @@ static int bmi160_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &bmi160_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
index c19e3df35559..61389b41c6d9 100644
--- a/drivers/iio/imu/bmi160/bmi160_spi.c
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -20,8 +20,8 @@ static int bmi160_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &bmi160_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
return bmi160_core_probe(&spi->dev, regmap, id->name, true);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index 2f8560ba4572..c27d06035c8b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -135,6 +135,7 @@ int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
st->mux_client = NULL;
if (ACPI_HANDLE(&client->dev)) {
struct i2c_board_info info;
+ struct i2c_client *mux_client;
struct acpi_device *adev;
int ret = -1;
@@ -172,9 +173,10 @@ int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
} else
return 0; /* no secondary addr, which is OK */
}
- st->mux_client = i2c_new_device(st->muxc->adapter[0], &info);
- if (!st->mux_client)
- return -ENODEV;
+ mux_client = i2c_new_client_device(st->muxc->adapter[0], &info);
+ if (IS_ERR(mux_client))
+ return PTR_ERR(mux_client);
+ st->mux_client = mux_client;
}
return 0;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 0b8d2f7a0165..4d604fe842e5 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -526,7 +526,7 @@ static int inv_mpu6050_sensor_set(struct inv_mpu6050_state *st, int reg,
__be16 d = cpu_to_be16(val);
ind = (axis - IIO_MOD_X) * 2;
- result = regmap_bulk_write(st->map, reg + ind, (u8 *)&d, 2);
+ result = regmap_bulk_write(st->map, reg + ind, &d, sizeof(d));
if (result)
return -EINVAL;
@@ -540,7 +540,7 @@ static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
__be16 d;
ind = (axis - IIO_MOD_X) * 2;
- result = regmap_bulk_read(st->map, reg + ind, (u8 *)&d, 2);
+ result = regmap_bulk_read(st->map, reg + ind, &d, sizeof(d));
if (result)
return -EINVAL;
*val = (short)be16_to_cpup(&d);
@@ -1248,12 +1248,31 @@ static const struct attribute_group inv_attribute_group = {
.attrs = inv_attributes
};
+static int inv_mpu6050_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&st->lock);
+ if (readval)
+ ret = regmap_read(st->map, reg, readval);
+ else
+ ret = regmap_write(st->map, reg, writeval);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
static const struct iio_info mpu_info = {
.read_raw = &inv_mpu6050_read_raw,
.write_raw = &inv_mpu6050_write_raw,
.write_raw_get_fmt = &inv_write_raw_get_fmt,
.attrs = &inv_attribute_group,
.validate_trigger = inv_mpu6050_validate_trigger,
+ .debugfs_reg_access = &inv_mpu6050_reg_access,
};
/**
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 6993d3b87bb0..28cfae1e61cf 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -122,8 +122,8 @@ static int inv_mpu_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 673b198e6368..6f968ce687e1 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -53,8 +53,8 @@ static int inv_mpu_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 41cb20cb3809..b56df409ed0f 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -111,7 +111,7 @@ struct st_lsm6dsx_odr {
u8 val;
};
-#define ST_LSM6DSX_ODR_LIST_SIZE 6
+#define ST_LSM6DSX_ODR_LIST_SIZE 8
struct st_lsm6dsx_odr_table_entry {
struct st_lsm6dsx_reg reg;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 4426524b59f2..0b776cb91928 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -27,7 +27,8 @@
* - FIFO size: 4KB
*
* - LSM6DSO/LSM6DSOX/ASM330LHH/LSM6DSR/ISM330DHCX:
- * - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416
+ * - Accelerometer/Gyroscope supported ODR [Hz]: 13, 26, 52, 104, 208, 416,
+ * 833
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 3KB
@@ -791,7 +792,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -804,7 +806,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
},
.fs_table = {
@@ -994,7 +997,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -1007,7 +1011,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
},
.fs_table = {
@@ -1171,7 +1176,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
[ST_LSM6DSX_ID_GYRO] = {
.reg = {
@@ -1184,7 +1190,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.odr_avl[3] = { 104000, 0x04 },
.odr_avl[4] = { 208000, 0x05 },
.odr_avl[5] = { 416000, 0x06 },
- .odr_len = 6,
+ .odr_avl[6] = { 833000, 0x07 },
+ .odr_len = 7,
},
},
.fs_table = {
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index 1cf98195f84d..c1f83fe0d8da 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -88,6 +88,69 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
.len = 6,
},
},
+ /* LIS3MDL */
+ {
+ .i2c_addr = { 0x1e },
+ .wai = {
+ .addr = 0x0f,
+ .val = 0x3d,
+ },
+ .id = ST_LSM6DSX_ID_MAGN,
+ .odr_table = {
+ .reg = {
+ .addr = 0x20,
+ .mask = GENMASK(4, 2),
+ },
+ .odr_avl[0] = { 1000, 0x0 },
+ .odr_avl[1] = { 2000, 0x1 },
+ .odr_avl[2] = { 3000, 0x2 },
+ .odr_avl[3] = { 5000, 0x3 },
+ .odr_avl[4] = { 10000, 0x4 },
+ .odr_avl[5] = { 20000, 0x5 },
+ .odr_avl[6] = { 40000, 0x6 },
+ .odr_avl[7] = { 80000, 0x7 },
+ .odr_len = 8,
+ },
+ .fs_table = {
+ .reg = {
+ .addr = 0x21,
+ .mask = GENMASK(6, 5),
+ },
+ .fs_avl[0] = {
+ .gain = 146,
+ .val = 0x00,
+ }, /* 4000 uG/LSB */
+ .fs_avl[1] = {
+ .gain = 292,
+ .val = 0x01,
+ }, /* 8000 uG/LSB */
+ .fs_avl[2] = {
+ .gain = 438,
+ .val = 0x02,
+ }, /* 12000 uG/LSB */
+ .fs_avl[3] = {
+ .gain = 584,
+ .val = 0x03,
+ }, /* 16000 uG/LSB */
+ .fs_len = 4,
+ },
+ .pwr_table = {
+ .reg = {
+ .addr = 0x22,
+ .mask = GENMASK(1, 0),
+ },
+ .off_val = 0x2,
+ .on_val = 0x0,
+ },
+ .bdu = {
+ .addr = 0x24,
+ .mask = BIT(6),
+ },
+ .out = {
+ .addr = 0x28,
+ .len = 6,
+ },
+ },
};
static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
@@ -519,6 +582,36 @@ st_lsm6dsx_shub_read_raw(struct iio_dev *iio_dev,
}
static int
+st_lsm6dsx_shub_set_full_scale(struct st_lsm6dsx_sensor *sensor,
+ u32 gain)
+{
+ const struct st_lsm6dsx_fs_table_entry *fs_table;
+ int i, err;
+
+ fs_table = &sensor->ext_info.settings->fs_table;
+ if (!fs_table->reg.addr)
+ return -ENOTSUPP;
+
+ for (i = 0; i < fs_table->fs_len; i++) {
+ if (fs_table->fs_avl[i].gain == gain)
+ break;
+ }
+
+ if (i == fs_table->fs_len)
+ return -EINVAL;
+
+ err = st_lsm6dsx_shub_write_with_mask(sensor, fs_table->reg.addr,
+ fs_table->reg.mask,
+ fs_table->fs_avl[i].val);
+ if (err < 0)
+ return err;
+
+ sensor->gain = gain;
+
+ return 0;
+}
+
+static int
st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -554,6 +647,9 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
}
break;
}
+ case IIO_CHAN_INFO_SCALE:
+ err = st_lsm6dsx_shub_set_full_scale(sensor, val2);
+ break;
default:
err = -EINVAL;
break;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 4ada5592aa2b..9fa238c0a7d4 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -189,10 +189,12 @@ __poll_t iio_buffer_poll(struct file *filp,
*/
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
{
- if (!indio_dev->buffer)
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (!buffer)
return;
- wake_up(&indio_dev->buffer->pollq);
+ wake_up(&buffer->pollq);
}
void iio_buffer_init(struct iio_buffer *buffer)
@@ -262,10 +264,11 @@ static ssize_t iio_scan_el_show(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
/* Ensure ret is 0 or 1. */
ret = !!test_bit(to_iio_dev_attr(attr)->address,
- indio_dev->buffer->scan_mask);
+ buffer->scan_mask);
return sprintf(buf, "%d\n", ret);
}
@@ -316,8 +319,7 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
const unsigned long *mask;
unsigned long *trialmask;
- trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
- sizeof(*trialmask), GFP_KERNEL);
+ trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
if (trialmask == NULL)
return -ENOMEM;
if (!indio_dev->masklength) {
@@ -382,7 +384,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
if (ret < 0)
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
@@ -411,7 +413,9 @@ static ssize_t iio_scan_el_ts_show(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ return sprintf(buf, "%d\n", buffer->scan_timestamp);
}
static ssize_t iio_scan_el_ts_store(struct device *dev,
@@ -421,6 +425,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
bool state;
ret = strtobool(buf, &state);
@@ -428,11 +433,11 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
return ret;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto error_ret;
}
- indio_dev->buffer->scan_timestamp = state;
+ buffer->scan_timestamp = state;
error_ret:
mutex_unlock(&indio_dev->mlock);
@@ -440,10 +445,10 @@ error_ret:
}
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer,
const struct iio_chan_spec *chan)
{
int ret, attrcount = 0;
- struct iio_buffer *buffer = indio_dev->buffer;
ret = __iio_add_chan_devattr("index",
chan,
@@ -519,7 +524,7 @@ static ssize_t iio_buffer_write_length(struct device *dev,
return len;
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
} else {
buffer->access->set_length(buffer, val);
@@ -540,7 +545,9 @@ static ssize_t iio_buffer_show_enable(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ return sprintf(buf, "%d\n", iio_buffer_is_active(buffer));
}
static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
@@ -687,6 +694,13 @@ static int iio_verify_update(struct iio_dev *indio_dev,
bool scan_timestamp;
unsigned int modes;
+ if (insert_buffer &&
+ bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
+ dev_dbg(&indio_dev->dev,
+ "At least one scan element must be enabled first\n");
+ return -EINVAL;
+ }
+
memset(config, 0, sizeof(*config));
config->watermark = ~0;
@@ -913,6 +927,7 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
indio_dev->active_scan_mask = config->scan_mask;
indio_dev->scan_timestamp = config->scan_timestamp;
indio_dev->scan_bytes = config->scan_bytes;
+ indio_dev->currentmode = config->mode;
iio_update_demux(indio_dev);
@@ -948,8 +963,6 @@ static int iio_enable_buffers(struct iio_dev *indio_dev,
goto err_disable_buffers;
}
- indio_dev->currentmode = config->mode;
-
if (indio_dev->setup_ops->postenable) {
ret = indio_dev->setup_ops->postenable(indio_dev);
if (ret) {
@@ -966,10 +979,10 @@ err_disable_buffers:
buffer_list)
iio_buffer_disable(buffer, indio_dev);
err_run_postdisable:
- indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->postdisable(indio_dev);
err_undo_config:
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
indio_dev->active_scan_mask = NULL;
return ret;
@@ -1004,8 +1017,6 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
ret = ret2;
}
- indio_dev->currentmode = INDIO_DIRECT_MODE;
-
if (indio_dev->setup_ops->postdisable) {
ret2 = indio_dev->setup_ops->postdisable(indio_dev);
if (ret2 && !ret)
@@ -1014,6 +1025,7 @@ static int iio_disable_buffers(struct iio_dev *indio_dev)
iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
indio_dev->active_scan_mask = NULL;
+ indio_dev->currentmode = INDIO_DIRECT_MODE;
return ret;
}
@@ -1123,6 +1135,7 @@ static ssize_t iio_buffer_store_enable(struct device *dev,
int ret;
bool requested_state;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_buffer *buffer = indio_dev->buffer;
bool inlist;
ret = strtobool(buf, &requested_state);
@@ -1132,17 +1145,15 @@ static ssize_t iio_buffer_store_enable(struct device *dev,
mutex_lock(&indio_dev->mlock);
/* Find out if it is in the list */
- inlist = iio_buffer_is_active(indio_dev->buffer);
+ inlist = iio_buffer_is_active(buffer);
/* Already in desired state */
if (inlist == requested_state)
goto done;
if (requested_state)
- ret = __iio_update_buffers(indio_dev,
- indio_dev->buffer, NULL);
+ ret = __iio_update_buffers(indio_dev, buffer, NULL);
else
- ret = __iio_update_buffers(indio_dev,
- NULL, indio_dev->buffer);
+ ret = __iio_update_buffers(indio_dev, NULL, buffer);
done:
mutex_unlock(&indio_dev->mlock);
@@ -1184,7 +1195,7 @@ static ssize_t iio_buffer_store_watermark(struct device *dev,
goto out;
}
- if (iio_buffer_is_active(indio_dev->buffer)) {
+ if (iio_buffer_is_active(buffer)) {
ret = -EBUSY;
goto out;
}
@@ -1201,11 +1212,9 @@ static ssize_t iio_dma_show_data_available(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- size_t bytes;
-
- bytes = iio_buffer_data_available(indio_dev->buffer);
+ struct iio_buffer *buffer = indio_dev->buffer;
- return sprintf(buf, "%zu\n", bytes);
+ return sprintf(buf, "%zu\n", iio_buffer_data_available(buffer));
}
static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
@@ -1233,7 +1242,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
struct iio_dev_attr *p;
struct attribute **attr;
struct iio_buffer *buffer = indio_dev->buffer;
- int ret, i, attrn, attrcount, attrcount_orig = 0;
+ int ret, i, attrn, attrcount;
const struct iio_chan_spec *channels;
channels = indio_dev->channels;
@@ -1277,12 +1286,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
- if (buffer->scan_el_attrs != NULL) {
- attr = buffer->scan_el_attrs->attrs;
- while (*attr++ != NULL)
- attrcount_orig++;
- }
- attrcount = attrcount_orig;
+ attrcount = 0;
INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
channels = indio_dev->channels;
if (channels) {
@@ -1291,7 +1295,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
if (channels[i].scan_index < 0)
continue;
- ret = iio_buffer_add_channel_sysfs(indio_dev,
+ ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
&channels[i]);
if (ret < 0)
goto error_cleanup_dynamic;
@@ -1319,10 +1323,7 @@ int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_free_scan_mask;
}
- if (buffer->scan_el_attrs)
- memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
- sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
- attrn = attrcount_orig;
+ attrn = 0;
list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
@@ -1334,20 +1335,22 @@ error_free_scan_mask:
bitmap_free(buffer->scan_mask);
error_cleanup_dynamic:
iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
- kfree(indio_dev->buffer->buffer_group.attrs);
+ kfree(buffer->buffer_group.attrs);
return ret;
}
void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
{
- if (!indio_dev->buffer)
+ struct iio_buffer *buffer = indio_dev->buffer;
+
+ if (!buffer)
return;
- bitmap_free(indio_dev->buffer->scan_mask);
- kfree(indio_dev->buffer->buffer_group.attrs);
- kfree(indio_dev->buffer->scan_el_group.attrs);
- iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
+ bitmap_free(buffer->scan_mask);
+ kfree(buffer->buffer_group.attrs);
+ kfree(buffer->scan_el_group.attrs);
+ iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
}
/**
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 24f7bbff4938..1527f01a44f1 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -572,46 +572,46 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
switch (type) {
case IIO_VAL_INT:
- return snprintf(buf, len, "%d", vals[0]);
+ return scnprintf(buf, len, "%d", vals[0]);
case IIO_VAL_INT_PLUS_MICRO_DB:
scale_db = true;
/* fall through */
case IIO_VAL_INT_PLUS_MICRO:
if (vals[1] < 0)
- return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
+ return scnprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
-vals[1], scale_db ? " dB" : "");
else
- return snprintf(buf, len, "%d.%06u%s", vals[0], vals[1],
+ return scnprintf(buf, len, "%d.%06u%s", vals[0], vals[1],
scale_db ? " dB" : "");
case IIO_VAL_INT_PLUS_NANO:
if (vals[1] < 0)
- return snprintf(buf, len, "-%d.%09u", abs(vals[0]),
+ return scnprintf(buf, len, "-%d.%09u", abs(vals[0]),
-vals[1]);
else
- return snprintf(buf, len, "%d.%09u", vals[0], vals[1]);
+ return scnprintf(buf, len, "%d.%09u", vals[0], vals[1]);
case IIO_VAL_FRACTIONAL:
tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
tmp1 = vals[1];
tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
- return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
+ return scnprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_FRACTIONAL_LOG2:
tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1);
- return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
+ return scnprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
case IIO_VAL_INT_MULTIPLE:
{
int i;
int l = 0;
for (i = 0; i < size; ++i) {
- l += snprintf(&buf[l], len - l, "%d ", vals[i]);
+ l += scnprintf(&buf[l], len - l, "%d ", vals[i]);
if (l >= len)
break;
}
return l;
}
case IIO_VAL_CHAR:
- return snprintf(buf, len, "%c", (char)vals[0]);
+ return scnprintf(buf, len, "%c", (char)vals[0]);
default:
return 0;
}
@@ -682,10 +682,10 @@ static ssize_t iio_format_avail_list(char *buf, const int *vals,
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < length - 1)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -698,10 +698,10 @@ static ssize_t iio_format_avail_list(char *buf, const int *vals,
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < length / 2 - 1)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -725,10 +725,10 @@ static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < 2)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"]\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -741,10 +741,10 @@ static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
if (len >= PAGE_SIZE)
return -EFBIG;
if (i < 2)
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
" ");
else
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"]\n");
if (len >= PAGE_SIZE)
return -EFBIG;
@@ -1507,27 +1507,27 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
alloc_size += IIO_ALIGN - 1;
dev = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dev)
+ return NULL;
- if (dev) {
- dev->dev.groups = dev->groups;
- dev->dev.type = &iio_device_type;
- dev->dev.bus = &iio_bus_type;
- device_initialize(&dev->dev);
- dev_set_drvdata(&dev->dev, (void *)dev);
- mutex_init(&dev->mlock);
- mutex_init(&dev->info_exist_lock);
- INIT_LIST_HEAD(&dev->channel_attr_list);
-
- dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
- if (dev->id < 0) {
- /* cannot use a dev_err as the name isn't available */
- pr_err("failed to get device id\n");
- kfree(dev);
- return NULL;
- }
- dev_set_name(&dev->dev, "iio:device%d", dev->id);
- INIT_LIST_HEAD(&dev->buffer_list);
+ dev->dev.groups = dev->groups;
+ dev->dev.type = &iio_device_type;
+ dev->dev.bus = &iio_bus_type;
+ device_initialize(&dev->dev);
+ dev_set_drvdata(&dev->dev, (void *)dev);
+ mutex_init(&dev->mlock);
+ mutex_init(&dev->info_exist_lock);
+ INIT_LIST_HEAD(&dev->channel_attr_list);
+
+ dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
+ if (dev->id < 0) {
+ /* cannot use a dev_err as the name isn't available */
+ pr_err("failed to get device id\n");
+ kfree(dev);
+ return NULL;
}
+ dev_set_name(&dev->dev, "iio:device%d", dev->id);
+ INIT_LIST_HEAD(&dev->buffer_list);
return dev;
}
@@ -1549,17 +1549,6 @@ static void devm_iio_device_release(struct device *dev, void *res)
iio_device_free(*(struct iio_dev **)res);
}
-int devm_iio_device_match(struct device *dev, void *res, void *data)
-{
- struct iio_dev **r = res;
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
- return *r == data;
-}
-EXPORT_SYMBOL_GPL(devm_iio_device_match);
-
/**
* devm_iio_device_alloc - Resource-managed iio_device_alloc()
* @dev: Device to allocate iio_dev for
@@ -1568,9 +1557,6 @@ EXPORT_SYMBOL_GPL(devm_iio_device_match);
* Managed iio_device_alloc. iio_dev allocated with this function is
* automatically freed on driver detach.
*
- * If an iio_dev allocated with this function needs to be freed separately,
- * devm_iio_device_free() must be used.
- *
* RETURNS:
* Pointer to allocated iio_dev on success, NULL on failure.
*/
@@ -1596,23 +1582,6 @@ struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv)
EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
/**
- * devm_iio_device_free - Resource-managed iio_device_free()
- * @dev: Device this iio_dev belongs to
- * @iio_dev: the iio_dev associated with the device
- *
- * Free iio_dev allocated with devm_iio_device_alloc().
- */
-void devm_iio_device_free(struct device *dev, struct iio_dev *iio_dev)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_device_release,
- devm_iio_device_match, iio_dev);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_device_free);
-
-/**
* iio_chrdev_open() - chrdev file open for buffer access and ioctls
* @inode: Inode structure for identifying the device in the file system
* @filp: File structure for iio device used to keep and later access
@@ -1714,6 +1683,9 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
int ret;
+ if (!indio_dev->info)
+ return -EINVAL;
+
indio_dev->driver_module = this_mod;
/* If the calling driver did not initialize of_node, do it here */
if (!indio_dev->dev.of_node && indio_dev->dev.parent)
@@ -1726,9 +1698,6 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
if (ret < 0)
return ret;
- if (!indio_dev->info)
- return -EINVAL;
-
/* configure elements for the chrdev */
indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), indio_dev->id);
@@ -1834,23 +1803,6 @@ int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
EXPORT_SYMBOL_GPL(__devm_iio_device_register);
/**
- * devm_iio_device_unregister - Resource-managed iio_device_unregister()
- * @dev: Device this iio_dev belongs to
- * @indio_dev: the iio_dev associated with the device
- *
- * Unregister iio_dev registered with devm_iio_device_register().
- */
-void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_device_unreg,
- devm_iio_device_match, indio_dev);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_device_unregister);
-
-/**
* iio_device_claim_direct_mode - Keep device in direct mode
* @indio_dev: the iio_dev associated with the device
*
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 3908a9a90035..53d1931f6be8 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -585,18 +585,6 @@ static void devm_iio_trigger_release(struct device *dev, void *res)
iio_trigger_free(*(struct iio_trigger **)res);
}
-static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
-{
- struct iio_trigger **r = res;
-
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
-
- return *r == data;
-}
-
/**
* devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
* @dev: Device to allocate iio_trigger for
@@ -608,9 +596,6 @@ static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
* Managed iio_trigger_alloc. iio_trigger allocated with this function is
* automatically freed on driver detach.
*
- * If an iio_trigger allocated with this function needs to be freed separately,
- * devm_iio_trigger_free() must be used.
- *
* RETURNS:
* Pointer to allocated iio_trigger on success, NULL on failure.
*/
@@ -640,23 +625,6 @@ struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
-/**
- * devm_iio_trigger_free - Resource-managed iio_trigger_free()
- * @dev: Device this iio_dev belongs to
- * @iio_trig: the iio_trigger associated with the device
- *
- * Free iio_trigger allocated with devm_iio_trigger_alloc().
- */
-void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_trigger_release,
- devm_iio_trigger_match, iio_trig);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
-
static void devm_iio_trigger_unreg(struct device *dev, void *res)
{
iio_trigger_unregister(*(struct iio_trigger **)res);
@@ -673,9 +641,6 @@ static void devm_iio_trigger_unreg(struct device *dev, void *res)
* calls iio_trigger_register() internally. Refer to that function for more
* information.
*
- * If an iio_trigger registered with this function needs to be unregistered
- * separately, devm_iio_trigger_unregister() must be used.
- *
* RETURNS:
* 0 on success, negative error number on failure.
*/
@@ -701,24 +666,6 @@ int __devm_iio_trigger_register(struct device *dev,
}
EXPORT_SYMBOL_GPL(__devm_iio_trigger_register);
-/**
- * devm_iio_trigger_unregister - Resource-managed iio_trigger_unregister()
- * @dev: device this iio_trigger belongs to
- * @trig_info: the trigger associated with the device
- *
- * Unregister trigger registered with devm_iio_trigger_register().
- */
-void devm_iio_trigger_unregister(struct device *dev,
- struct iio_trigger *trig_info)
-{
- int rc;
-
- rc = devres_release(dev, devm_iio_trigger_unreg, devm_iio_trigger_match,
- trig_info);
- WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_iio_trigger_unregister);
-
bool iio_trigger_using_own(struct iio_dev *indio_dev)
{
return indio_dev->trig->attached_own_device;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 5a8351c9a426..ede99e0d5371 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -360,18 +360,6 @@ static void devm_iio_channel_free(struct device *dev, void *res)
iio_channel_release(channel);
}
-static int devm_iio_channel_match(struct device *dev, void *res, void *data)
-{
- struct iio_channel **r = res;
-
- if (!r || !*r) {
- WARN_ON(!r || !*r);
- return 0;
- }
-
- return *r == data;
-}
-
struct iio_channel *devm_iio_channel_get(struct device *dev,
const char *channel_name)
{
@@ -394,13 +382,6 @@ struct iio_channel *devm_iio_channel_get(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get);
-void devm_iio_channel_release(struct device *dev, struct iio_channel *channel)
-{
- WARN_ON(devres_release(dev, devm_iio_channel_free,
- devm_iio_channel_match, channel));
-}
-EXPORT_SYMBOL_GPL(devm_iio_channel_release);
-
struct iio_channel *iio_channel_get_all(struct device *dev)
{
const char *name;
@@ -514,14 +495,6 @@ struct iio_channel *devm_iio_channel_get_all(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
-void devm_iio_channel_release_all(struct device *dev,
- struct iio_channel *channels)
-{
- WARN_ON(devres_release(dev, devm_iio_channel_free_all,
- devm_iio_channel_match, channels));
-}
-EXPORT_SYMBOL_GPL(devm_iio_channel_release_all);
-
static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum info)
{
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index b27719cefcf9..182bd18c4bb2 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -516,6 +516,8 @@ config US5182D
config VCNL4000
tristate "VCNL4000/4010/4020/4200 combined ALS and proximity sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
depends on I2C
help
Say Y here if you want to build a driver for the Vishay VCNL4000,
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index a8361006dcd9..03f2d8d123c4 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -273,13 +273,11 @@ static const struct i2c_device_id bh1780_id[] = {
MODULE_DEVICE_TABLE(i2c, bh1780_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_bh1780_match[] = {
{ .compatible = "rohm,bh1780gli", },
{},
};
MODULE_DEVICE_TABLE(of, of_bh1780_match);
-#endif
static struct i2c_driver bh1780_driver = {
.probe = bh1780_probe,
@@ -288,7 +286,7 @@ static struct i2c_driver bh1780_driver = {
.driver = {
.name = "bh1780",
.pm = &bh1780_dev_pm_ops,
- .of_match_table = of_match_ptr(of_bh1780_match),
+ .of_match_table = of_bh1780_match,
},
};
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index 5f4fb5674fa0..160eb3f99795 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -4,11 +4,13 @@
* Author: Kevin Tsai <ktsai@capellamicro.com>
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
@@ -18,17 +20,24 @@
/* Registers Address */
#define CM32181_REG_ADDR_CMD 0x00
+#define CM32181_REG_ADDR_WH 0x01
+#define CM32181_REG_ADDR_WL 0x02
+#define CM32181_REG_ADDR_TEST 0x03
#define CM32181_REG_ADDR_ALS 0x04
#define CM32181_REG_ADDR_STATUS 0x06
#define CM32181_REG_ADDR_ID 0x07
/* Number of Configurable Registers */
-#define CM32181_CONF_REG_NUM 0x01
+#define CM32181_CONF_REG_NUM 4
/* CMD register */
-#define CM32181_CMD_ALS_ENABLE 0x00
-#define CM32181_CMD_ALS_DISABLE 0x01
-#define CM32181_CMD_ALS_INT_EN 0x02
+#define CM32181_CMD_ALS_DISABLE BIT(0)
+#define CM32181_CMD_ALS_INT_EN BIT(1)
+#define CM32181_CMD_ALS_THRES_WINDOW BIT(2)
+
+#define CM32181_CMD_ALS_PERS_SHIFT 4
+#define CM32181_CMD_ALS_PERS_MASK (0x03 << CM32181_CMD_ALS_PERS_SHIFT)
+#define CM32181_CMD_ALS_PERS_DEFAULT (0x01 << CM32181_CMD_ALS_PERS_SHIFT)
#define CM32181_CMD_ALS_IT_SHIFT 6
#define CM32181_CMD_ALS_IT_MASK (0x0F << CM32181_CMD_ALS_IT_SHIFT)
@@ -38,27 +47,133 @@
#define CM32181_CMD_ALS_SM_MASK (0x03 << CM32181_CMD_ALS_SM_SHIFT)
#define CM32181_CMD_ALS_SM_DEFAULT (0x01 << CM32181_CMD_ALS_SM_SHIFT)
-#define CM32181_MLUX_PER_BIT 5 /* ALS_SM=01 IT=800ms */
-#define CM32181_MLUX_PER_BIT_BASE_IT 800000 /* Based on IT=800ms */
-#define CM32181_CALIBSCALE_DEFAULT 1000
-#define CM32181_CALIBSCALE_RESOLUTION 1000
-#define MLUX_PER_LUX 1000
+#define CM32181_LUX_PER_BIT 500 /* ALS_SM=01 IT=800ms */
+#define CM32181_LUX_PER_BIT_RESOLUTION 100000
+#define CM32181_LUX_PER_BIT_BASE_IT 800000 /* Based on IT=800ms */
+#define CM32181_CALIBSCALE_DEFAULT 100000
+#define CM32181_CALIBSCALE_RESOLUTION 100000
-static const u8 cm32181_reg[CM32181_CONF_REG_NUM] = {
- CM32181_REG_ADDR_CMD,
-};
+#define SMBUS_ALERT_RESPONSE_ADDRESS 0x0c
+
+/* CPM0 Index 0: device-id (3218 or 32181), 1: Unknown, 2: init_regs_bitmap */
+#define CPM0_REGS_BITMAP 2
+#define CPM0_HEADER_SIZE 3
-static const int als_it_bits[] = {12, 8, 0, 1, 2, 3};
-static const int als_it_value[] = {25000, 50000, 100000, 200000, 400000,
- 800000};
+/* CPM1 Index 0: lux_per_bit, 1: calibscale, 2: resolution (100000) */
+#define CPM1_LUX_PER_BIT 0
+#define CPM1_CALIBSCALE 1
+#define CPM1_SIZE 3
+
+/* CM3218 Family */
+static const int cm3218_als_it_bits[] = { 0, 1, 2, 3 };
+static const int cm3218_als_it_values[] = { 100000, 200000, 400000, 800000 };
+
+/* CM32181 Family */
+static const int cm32181_als_it_bits[] = { 12, 8, 0, 1, 2, 3 };
+static const int cm32181_als_it_values[] = {
+ 25000, 50000, 100000, 200000, 400000, 800000
+};
struct cm32181_chip {
struct i2c_client *client;
+ struct device *dev;
struct mutex lock;
u16 conf_regs[CM32181_CONF_REG_NUM];
+ unsigned long init_regs_bitmap;
int calibscale;
+ int lux_per_bit;
+ int lux_per_bit_base_it;
+ int num_als_it;
+ const int *als_it_bits;
+ const int *als_it_values;
};
+static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2);
+
+#ifdef CONFIG_ACPI
+/**
+ * cm32181_acpi_get_cpm() - Get CPM object from ACPI
+ * @client pointer of struct i2c_client.
+ * @obj_name pointer of ACPI object name.
+ * @count maximum size of return array.
+ * @vals pointer of array for return elements.
+ *
+ * Convert ACPI CPM table to array.
+ *
+ * Return: -ENODEV for fail. Otherwise is number of elements.
+ */
+static int cm32181_acpi_get_cpm(struct device *dev, char *obj_name,
+ u64 *values, int count)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *cpm, *elem;
+ acpi_handle handle;
+ acpi_status status;
+ int i;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -ENODEV;
+
+ status = acpi_evaluate_object(handle, obj_name, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "object %s not found\n", obj_name);
+ return -ENODEV;
+ }
+
+ cpm = buffer.pointer;
+ if (cpm->package.count > count)
+ dev_warn(dev, "%s table contains %u values, only using first %d values\n",
+ obj_name, cpm->package.count, count);
+
+ count = min_t(int, cpm->package.count, count);
+ for (i = 0; i < count; i++) {
+ elem = &(cpm->package.elements[i]);
+ values[i] = elem->integer.value;
+ }
+
+ kfree(buffer.pointer);
+
+ return count;
+}
+
+static void cm32181_acpi_parse_cpm_tables(struct cm32181_chip *cm32181)
+{
+ u64 vals[CPM0_HEADER_SIZE + CM32181_CONF_REG_NUM];
+ struct device *dev = cm32181->dev;
+ int i, count;
+
+ count = cm32181_acpi_get_cpm(dev, "CPM0", vals, ARRAY_SIZE(vals));
+ if (count <= CPM0_HEADER_SIZE)
+ return;
+
+ count -= CPM0_HEADER_SIZE;
+
+ cm32181->init_regs_bitmap = vals[CPM0_REGS_BITMAP];
+ cm32181->init_regs_bitmap &= GENMASK(count - 1, 0);
+ for_each_set_bit(i, &cm32181->init_regs_bitmap, count)
+ cm32181->conf_regs[i] = vals[CPM0_HEADER_SIZE + i];
+
+ count = cm32181_acpi_get_cpm(dev, "CPM1", vals, ARRAY_SIZE(vals));
+ if (count != CPM1_SIZE)
+ return;
+
+ cm32181->lux_per_bit = vals[CPM1_LUX_PER_BIT];
+
+ /* Check for uncalibrated devices */
+ if (vals[CPM1_CALIBSCALE] == CM32181_CALIBSCALE_DEFAULT)
+ return;
+
+ cm32181->calibscale = vals[CPM1_CALIBSCALE];
+ /* CPM1 lux_per_bit is for the current it value */
+ cm32181_read_als_it(cm32181, &cm32181->lux_per_bit_base_it);
+}
+#else
+static void cm32181_acpi_parse_cpm_tables(struct cm32181_chip *cm32181)
+{
+}
+#endif /* CONFIG_ACPI */
+
/**
* cm32181_reg_init() - Initialize CM32181 registers
* @cm32181: pointer of struct cm32181.
@@ -78,18 +193,37 @@ static int cm32181_reg_init(struct cm32181_chip *cm32181)
return ret;
/* check device ID */
- if ((ret & 0xFF) != 0x81)
+ switch (ret & 0xFF) {
+ case 0x18: /* CM3218 */
+ cm32181->num_als_it = ARRAY_SIZE(cm3218_als_it_bits);
+ cm32181->als_it_bits = cm3218_als_it_bits;
+ cm32181->als_it_values = cm3218_als_it_values;
+ break;
+ case 0x81: /* CM32181 */
+ case 0x82: /* CM32182, fully compat. with CM32181 */
+ cm32181->num_als_it = ARRAY_SIZE(cm32181_als_it_bits);
+ cm32181->als_it_bits = cm32181_als_it_bits;
+ cm32181->als_it_values = cm32181_als_it_values;
+ break;
+ default:
return -ENODEV;
+ }
/* Default Values */
- cm32181->conf_regs[CM32181_REG_ADDR_CMD] = CM32181_CMD_ALS_ENABLE |
+ cm32181->conf_regs[CM32181_REG_ADDR_CMD] =
CM32181_CMD_ALS_IT_DEFAULT | CM32181_CMD_ALS_SM_DEFAULT;
+ cm32181->init_regs_bitmap = BIT(CM32181_REG_ADDR_CMD);
cm32181->calibscale = CM32181_CALIBSCALE_DEFAULT;
+ cm32181->lux_per_bit = CM32181_LUX_PER_BIT;
+ cm32181->lux_per_bit_base_it = CM32181_LUX_PER_BIT_BASE_IT;
+
+ if (ACPI_HANDLE(cm32181->dev))
+ cm32181_acpi_parse_cpm_tables(cm32181);
/* Initialize registers*/
- for (i = 0; i < CM32181_CONF_REG_NUM; i++) {
- ret = i2c_smbus_write_word_data(client, cm32181_reg[i],
- cm32181->conf_regs[i]);
+ for_each_set_bit(i, &cm32181->init_regs_bitmap, CM32181_CONF_REG_NUM) {
+ ret = i2c_smbus_write_word_data(client, i,
+ cm32181->conf_regs[i]);
if (ret < 0)
return ret;
}
@@ -102,7 +236,7 @@ static int cm32181_reg_init(struct cm32181_chip *cm32181)
* @cm32181: pointer of struct cm32181
* @val2: pointer of int to load the als_it value.
*
- * Report the current integartion time by millisecond.
+ * Report the current integration time in milliseconds.
*
* Return: IIO_VAL_INT_PLUS_MICRO for success, otherwise -EINVAL.
*/
@@ -114,9 +248,9 @@ static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2)
als_it = cm32181->conf_regs[CM32181_REG_ADDR_CMD];
als_it &= CM32181_CMD_ALS_IT_MASK;
als_it >>= CM32181_CMD_ALS_IT_SHIFT;
- for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) {
- if (als_it == als_it_bits[i]) {
- *val2 = als_it_value[i];
+ for (i = 0; i < cm32181->num_als_it; i++) {
+ if (als_it == cm32181->als_it_bits[i]) {
+ *val2 = cm32181->als_it_values[i];
return IIO_VAL_INT_PLUS_MICRO;
}
}
@@ -139,14 +273,14 @@ static int cm32181_write_als_it(struct cm32181_chip *cm32181, int val)
u16 als_it;
int ret, i, n;
- n = ARRAY_SIZE(als_it_value);
+ n = cm32181->num_als_it;
for (i = 0; i < n; i++)
- if (val <= als_it_value[i])
+ if (val <= cm32181->als_it_values[i])
break;
if (i >= n)
i = n - 1;
- als_it = als_it_bits[i];
+ als_it = cm32181->als_it_bits[i];
als_it <<= CM32181_CMD_ALS_IT_SHIFT;
mutex_lock(&cm32181->lock);
@@ -175,15 +309,15 @@ static int cm32181_get_lux(struct cm32181_chip *cm32181)
struct i2c_client *client = cm32181->client;
int ret;
int als_it;
- unsigned long lux;
+ u64 lux;
ret = cm32181_read_als_it(cm32181, &als_it);
if (ret < 0)
return -EINVAL;
- lux = CM32181_MLUX_PER_BIT;
- lux *= CM32181_MLUX_PER_BIT_BASE_IT;
- lux /= als_it;
+ lux = cm32181->lux_per_bit;
+ lux *= cm32181->lux_per_bit_base_it;
+ lux = div_u64(lux, als_it);
ret = i2c_smbus_read_word_data(client, CM32181_REG_ADDR_ALS);
if (ret < 0)
@@ -191,8 +325,8 @@ static int cm32181_get_lux(struct cm32181_chip *cm32181)
lux *= ret;
lux *= cm32181->calibscale;
- lux /= CM32181_CALIBSCALE_RESOLUTION;
- lux /= MLUX_PER_LUX;
+ lux = div_u64(lux, CM32181_CALIBSCALE_RESOLUTION);
+ lux = div_u64(lux, CM32181_LUX_PER_BIT_RESOLUTION);
if (lux > 0xFFFF)
lux = 0xFFFF;
@@ -258,11 +392,12 @@ static int cm32181_write_raw(struct iio_dev *indio_dev,
static ssize_t cm32181_get_it_available(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct cm32181_chip *cm32181 = iio_priv(dev_to_iio_dev(dev));
int i, n, len;
- n = ARRAY_SIZE(als_it_value);
+ n = cm32181->num_als_it;
for (i = 0, len = 0; i < n; i++)
- len += sprintf(buf + len, "0.%06u ", als_it_value[i]);
+ len += sprintf(buf + len, "0.%06u ", cm32181->als_it_values[i]);
return len + sprintf(buf + len, "\n");
}
@@ -294,70 +429,86 @@ static const struct iio_info cm32181_info = {
.attrs = &cm32181_attribute_group,
};
-static int cm32181_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int cm32181_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct cm32181_chip *cm32181;
struct iio_dev *indio_dev;
int ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*cm32181));
- if (!indio_dev) {
- dev_err(&client->dev, "devm_iio_device_alloc failed\n");
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*cm32181));
+ if (!indio_dev)
return -ENOMEM;
+
+ /*
+ * Some ACPI systems list 2 I2C resources for the CM3218 sensor, the
+ * SMBus Alert Response Address (ARA, 0x0c) and the actual I2C address.
+ * Detect this and take the following step to deal with it:
+ * 1. When a SMBus Alert capable sensor has an Alert asserted, it will
+ * not respond on its actual I2C address. Read a byte from the ARA
+ * to clear any pending Alerts.
+ * 2. Create a "dummy" client for the actual I2C address and
+ * use that client to communicate with the sensor.
+ */
+ if (ACPI_HANDLE(dev) && client->addr == SMBUS_ALERT_RESPONSE_ADDRESS) {
+ struct i2c_board_info board_info = { .type = "dummy" };
+
+ i2c_smbus_read_byte(client);
+
+ client = i2c_acpi_new_device(dev, 1, &board_info);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
}
cm32181 = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
cm32181->client = client;
+ cm32181->dev = dev;
mutex_init(&cm32181->lock);
- indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.parent = dev;
indio_dev->channels = cm32181_channels;
indio_dev->num_channels = ARRAY_SIZE(cm32181_channels);
indio_dev->info = &cm32181_info;
- indio_dev->name = id->name;
+ indio_dev->name = dev_name(dev);
indio_dev->modes = INDIO_DIRECT_MODE;
ret = cm32181_reg_init(cm32181);
if (ret) {
- dev_err(&client->dev,
- "%s: register init failed\n",
- __func__);
+ dev_err(dev, "%s: register init failed\n", __func__);
return ret;
}
- ret = devm_iio_device_register(&client->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
- dev_err(&client->dev,
- "%s: regist device failed\n",
- __func__);
+ dev_err(dev, "%s: regist device failed\n", __func__);
return ret;
}
return 0;
}
-static const struct i2c_device_id cm32181_id[] = {
- { "cm32181", 0 },
- { }
-};
-
-MODULE_DEVICE_TABLE(i2c, cm32181_id);
-
static const struct of_device_id cm32181_of_match[] = {
+ { .compatible = "capella,cm3218" },
{ .compatible = "capella,cm32181" },
{ }
};
MODULE_DEVICE_TABLE(of, cm32181_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cm32181_acpi_match[] = {
+ { "CPLM3218", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cm32181_acpi_match);
+#endif
+
static struct i2c_driver cm32181_driver = {
.driver = {
.name = "cm32181",
- .of_match_table = of_match_ptr(cm32181_of_match),
+ .acpi_match_table = ACPI_PTR(cm32181_acpi_match),
+ .of_match_table = cm32181_of_match,
},
- .id_table = cm32181_id,
- .probe = cm32181_probe,
+ .probe_new = cm32181_probe,
};
module_i2c_driver(cm32181_driver);
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index cd3cfb7d02bd..867200825686 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -10,6 +10,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/init.h>
@@ -418,7 +419,7 @@ MODULE_DEVICE_TABLE(of, cm3232_of_match);
static struct i2c_driver cm3232_driver = {
.driver = {
.name = "cm3232",
- .of_match_table = of_match_ptr(cm3232_of_match),
+ .of_match_table = cm3232_of_match,
#ifdef CONFIG_PM_SLEEP
.pm = &cm3232_pm_ops,
#endif
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index b7ef16b28280..7a2679bdc987 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -158,6 +158,9 @@ static irqreturn_t gp2ap002_prox_irq(int irq, void *d)
int val;
int ret;
+ if (!gp2ap002->enabled)
+ goto err_retrig;
+
ret = regmap_read(gp2ap002->map, GP2AP002_PROX, &val);
if (ret) {
dev_err(gp2ap002->dev, "error reading proximity\n");
@@ -247,6 +250,8 @@ static int gp2ap002_read_raw(struct iio_dev *indio_dev,
struct gp2ap002 *gp2ap002 = iio_priv(indio_dev);
int ret;
+ pm_runtime_get_sync(gp2ap002->dev);
+
switch (mask) {
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
@@ -255,13 +260,21 @@ static int gp2ap002_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
*val = ret;
- return IIO_VAL_INT;
+ ret = IIO_VAL_INT;
+ goto out;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+
+out:
+ pm_runtime_mark_last_busy(gp2ap002->dev);
+ pm_runtime_put_autosuspend(gp2ap002->dev);
+
+ return ret;
}
static int gp2ap002_init(struct gp2ap002 *gp2ap002)
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 7fbbce0d4bc7..070d4cd0cf54 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -38,8 +38,8 @@
#include <linux/irq.h>
#include <linux/irq_work.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -1617,18 +1617,16 @@ static const struct i2c_device_id gp2ap020a00f_id[] = {
MODULE_DEVICE_TABLE(i2c, gp2ap020a00f_id);
-#ifdef CONFIG_OF
static const struct of_device_id gp2ap020a00f_of_match[] = {
{ .compatible = "sharp,gp2ap020a00f" },
{ }
};
MODULE_DEVICE_TABLE(of, gp2ap020a00f_of_match);
-#endif
static struct i2c_driver gp2ap020a00f_driver = {
.driver = {
.name = GP2A_I2C_NAME,
- .of_match_table = of_match_ptr(gp2ap020a00f_of_match),
+ .of_match_table = gp2ap020a00f_of_match,
},
.probe = gp2ap020a00f_probe,
.remove = gp2ap020a00f_remove,
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index b6cd299517d1..81fa2a422797 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum {
@@ -308,18 +306,13 @@ static int hid_als_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&als_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&als_state->common_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -343,9 +336,7 @@ static int hid_als_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&als_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &als_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -360,8 +351,7 @@ static int hid_als_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&als_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &als_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 7e1030af9ba3..e9c04df07344 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
#define CHANNEL_SCAN_INDEX_PRESENCE 0
@@ -286,18 +284,13 @@ static int hid_prox_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&prox_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&prox_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -321,9 +314,7 @@ static int hid_prox_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&prox_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &prox_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -338,8 +329,7 @@ static int hid_prox_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PROX);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&prox_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &prox_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index e37894f0ae0b..95611f5eff01 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -213,13 +213,24 @@ static const struct iio_info isl29125_info = {
.attrs = &isl29125_attribute_group,
};
-static int isl29125_buffer_preenable(struct iio_dev *indio_dev)
+static int isl29125_buffer_postenable(struct iio_dev *indio_dev)
{
struct isl29125_data *data = iio_priv(indio_dev);
+ int err;
+
+ err = iio_triggered_buffer_postenable(indio_dev);
+ if (err)
+ return err;
data->conf1 |= ISL29125_MODE_RGB;
- return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ err = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
data->conf1);
+ if (err) {
+ iio_triggered_buffer_predisable(indio_dev);
+ return err;
+ }
+
+ return 0;
}
static int isl29125_buffer_predisable(struct iio_dev *indio_dev)
@@ -227,19 +238,18 @@ static int isl29125_buffer_predisable(struct iio_dev *indio_dev)
struct isl29125_data *data = iio_priv(indio_dev);
int ret;
- ret = iio_triggered_buffer_predisable(indio_dev);
- if (ret < 0)
- return ret;
-
data->conf1 &= ~ISL29125_MODE_MASK;
data->conf1 |= ISL29125_MODE_PD;
- return i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
+ ret = i2c_smbus_write_byte_data(data->client, ISL29125_CONF1,
data->conf1);
+
+ iio_triggered_buffer_predisable(indio_dev);
+
+ return ret;
}
static const struct iio_buffer_setup_ops isl29125_buffer_setup_ops = {
- .preenable = isl29125_buffer_preenable,
- .postenable = &iio_triggered_buffer_postenable,
+ .postenable = isl29125_buffer_postenable,
.predisable = isl29125_buffer_predisable,
};
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 71f99d2a22c1..5a3fcb127cd2 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -101,12 +101,12 @@ struct ltr501_gain {
int uscale;
};
-static struct ltr501_gain ltr501_als_gain_tbl[] = {
+static const struct ltr501_gain ltr501_als_gain_tbl[] = {
{1, 0},
{0, 5000},
};
-static struct ltr501_gain ltr559_als_gain_tbl[] = {
+static const struct ltr501_gain ltr559_als_gain_tbl[] = {
{1, 0},
{0, 500000},
{0, 250000},
@@ -117,14 +117,14 @@ static struct ltr501_gain ltr559_als_gain_tbl[] = {
{0, 10000},
};
-static struct ltr501_gain ltr501_ps_gain_tbl[] = {
+static const struct ltr501_gain ltr501_ps_gain_tbl[] = {
{1, 0},
{0, 250000},
{0, 125000},
{0, 62500},
};
-static struct ltr501_gain ltr559_ps_gain_tbl[] = {
+static const struct ltr501_gain ltr559_ps_gain_tbl[] = {
{0, 62500}, /* x16 gain */
{0, 31250}, /* x32 gain */
{0, 15625}, /* bits X1 are for x64 gain */
@@ -133,9 +133,9 @@ static struct ltr501_gain ltr559_ps_gain_tbl[] = {
struct ltr501_chip_info {
u8 partid;
- struct ltr501_gain *als_gain;
+ const struct ltr501_gain *als_gain;
int als_gain_tbl_size;
- struct ltr501_gain *ps_gain;
+ const struct ltr501_gain *ps_gain;
int ps_gain_tbl_size;
u8 als_mode_active;
u8 als_gain_mask;
@@ -192,7 +192,7 @@ static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
return -EINVAL;
}
-static int ltr501_als_read_samp_freq(struct ltr501_data *data,
+static int ltr501_als_read_samp_freq(const struct ltr501_data *data,
int *val, int *val2)
{
int ret, i;
@@ -210,7 +210,7 @@ static int ltr501_als_read_samp_freq(struct ltr501_data *data,
return IIO_VAL_INT_PLUS_MICRO;
}
-static int ltr501_ps_read_samp_freq(struct ltr501_data *data,
+static int ltr501_ps_read_samp_freq(const struct ltr501_data *data,
int *val, int *val2)
{
int ret, i;
@@ -266,7 +266,7 @@ static int ltr501_ps_write_samp_freq(struct ltr501_data *data,
return ret;
}
-static int ltr501_als_read_samp_period(struct ltr501_data *data, int *val)
+static int ltr501_als_read_samp_period(const struct ltr501_data *data, int *val)
{
int ret, i;
@@ -282,7 +282,7 @@ static int ltr501_als_read_samp_period(struct ltr501_data *data, int *val)
return IIO_VAL_INT;
}
-static int ltr501_ps_read_samp_period(struct ltr501_data *data, int *val)
+static int ltr501_ps_read_samp_period(const struct ltr501_data *data, int *val)
{
int ret, i;
@@ -321,7 +321,7 @@ static unsigned long ltr501_calculate_lux(u16 vis_data, u16 ir_data)
return lux / 1000;
}
-static int ltr501_drdy(struct ltr501_data *data, u8 drdy_mask)
+static int ltr501_drdy(const struct ltr501_data *data, u8 drdy_mask)
{
int tries = 100;
int ret, status;
@@ -373,7 +373,8 @@ static int ltr501_set_it_time(struct ltr501_data *data, int it)
}
/* read int time in micro seconds */
-static int ltr501_read_it_time(struct ltr501_data *data, int *val, int *val2)
+static int ltr501_read_it_time(const struct ltr501_data *data,
+ int *val, int *val2)
{
int ret, index;
@@ -391,7 +392,7 @@ static int ltr501_read_it_time(struct ltr501_data *data, int *val, int *val2)
return IIO_VAL_INT_PLUS_MICRO;
}
-static int ltr501_read_als(struct ltr501_data *data, __le16 buf[2])
+static int ltr501_read_als(const struct ltr501_data *data, __le16 buf[2])
{
int ret;
@@ -403,7 +404,7 @@ static int ltr501_read_als(struct ltr501_data *data, __le16 buf[2])
buf, 2 * sizeof(__le16));
}
-static int ltr501_read_ps(struct ltr501_data *data)
+static int ltr501_read_ps(const struct ltr501_data *data)
{
int ret, status;
@@ -419,7 +420,7 @@ static int ltr501_read_ps(struct ltr501_data *data)
return status;
}
-static int ltr501_read_intr_prst(struct ltr501_data *data,
+static int ltr501_read_intr_prst(const struct ltr501_data *data,
enum iio_chan_type type,
int *val2)
{
@@ -716,7 +717,7 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ltr501_get_gain_index(struct ltr501_gain *gain, int size,
+static int ltr501_get_gain_index(const struct ltr501_gain *gain, int size,
int val, int val2)
{
int i;
@@ -848,14 +849,14 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static int ltr501_read_thresh(struct iio_dev *indio_dev,
+static int ltr501_read_thresh(const struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
enum iio_event_info info,
int *val, int *val2)
{
- struct ltr501_data *data = iio_priv(indio_dev);
+ const struct ltr501_data *data = iio_priv(indio_dev);
int ret, thresh_data;
switch (chan->type) {
@@ -1263,7 +1264,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
if (mask & LTR501_STATUS_ALS_RDY) {
ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
- (u8 *)als_buf, sizeof(als_buf));
+ als_buf, sizeof(als_buf));
if (ret < 0)
return ret;
if (test_bit(0, indio_dev->active_scan_mask))
@@ -1359,7 +1360,7 @@ static bool ltr501_is_volatile_reg(struct device *dev, unsigned int reg)
}
}
-static struct regmap_config ltr501_regmap_config = {
+static const struct regmap_config ltr501_regmap_config = {
.name = LTR501_REGMAP_NAME,
.reg_bits = 8,
.val_bits = 8,
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 92004a2563ea..82abfa57b59c 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -16,6 +16,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -844,7 +845,7 @@ static struct i2c_driver opt3001_driver = {
.driver = {
.name = "opt3001",
- .of_match_table = of_match_ptr(opt3001_of_match),
+ .of_match_table = opt3001_of_match,
},
};
diff --git a/drivers/iio/light/si1133.c b/drivers/iio/light/si1133.c
index 9174ab928880..c1adab2a50fd 100644
--- a/drivers/iio/light/si1133.c
+++ b/drivers/iio/light/si1133.c
@@ -17,6 +17,8 @@
#include <linux/util_macros.h>
+#include <asm/unaligned.h>
+
#define SI1133_REG_PART_ID 0x00
#define SI1133_REG_REV_ID 0x01
#define SI1133_REG_MFR_ID 0x02
@@ -104,8 +106,6 @@
#define SI1133_LUX_BUFFER_SIZE 9
#define SI1133_MEASURE_BUFFER_SIZE 3
-#define SI1133_SIGN_BIT_INDEX 23
-
static const int si1133_scale_available[] = {
1, 2, 4, 8, 16, 32, 64, 128};
@@ -633,8 +633,7 @@ static int si1133_measure(struct si1133_data *data,
if (err)
return err;
- *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
- SI1133_SIGN_BIT_INDEX);
+ *val = sign_extend32(get_unaligned_be24(&buffer[0]), 23);
return err;
}
@@ -723,16 +722,11 @@ static int si1133_get_lux(struct si1133_data *data, int *val)
if (err)
return err;
- high_vis =
- sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
- SI1133_SIGN_BIT_INDEX);
+ high_vis = sign_extend32(get_unaligned_be24(&buffer[0]), 23);
- low_vis =
- sign_extend32((buffer[3] << 16) | (buffer[4] << 8) | buffer[5],
- SI1133_SIGN_BIT_INDEX);
+ low_vis = sign_extend32(get_unaligned_be24(&buffer[3]), 23);
- ir = sign_extend32((buffer[6] << 16) | (buffer[7] << 8) | buffer[8],
- SI1133_SIGN_BIT_INDEX);
+ ir = sign_extend32(get_unaligned_be24(&buffer[6]), 23);
if (high_vis > SI1133_ADC_THRESHOLD || ir > SI1133_ADC_THRESHOLD)
lux = si1133_calc_polynomial(high_vis, ir,
diff --git a/drivers/iio/light/st_uvis25_i2c.c b/drivers/iio/light/st_uvis25_i2c.c
index 4889bbeb0c73..98cd49eefe45 100644
--- a/drivers/iio/light/st_uvis25_i2c.c
+++ b/drivers/iio/light/st_uvis25_i2c.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -31,8 +32,8 @@ static int st_uvis25_i2c_probe(struct i2c_client *client,
regmap = devm_regmap_init_i2c(client, &st_uvis25_i2c_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "Failed to register i2c regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&client->dev, "Failed to register i2c regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -55,7 +56,7 @@ static struct i2c_driver st_uvis25_driver = {
.driver = {
.name = "st_uvis25_i2c",
.pm = &st_uvis25_pm_ops,
- .of_match_table = of_match_ptr(st_uvis25_i2c_of_match),
+ .of_match_table = st_uvis25_i2c_of_match,
},
.probe = st_uvis25_i2c_probe,
.id_table = st_uvis25_i2c_id_table,
diff --git a/drivers/iio/light/st_uvis25_spi.c b/drivers/iio/light/st_uvis25_spi.c
index a9ceae4f58b3..af9d94d12787 100644
--- a/drivers/iio/light/st_uvis25_spi.c
+++ b/drivers/iio/light/st_uvis25_spi.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -31,8 +32,8 @@ static int st_uvis25_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &st_uvis25_spi_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap %ld\n",
+ PTR_ERR(regmap));
return PTR_ERR(regmap);
}
@@ -55,7 +56,7 @@ static struct spi_driver st_uvis25_driver = {
.driver = {
.name = "st_uvis25_spi",
.pm = &st_uvis25_pm_ops,
- .of_match_table = of_match_ptr(st_uvis25_spi_of_match),
+ .of_match_table = st_uvis25_spi_of_match,
},
.probe = st_uvis25_spi_probe,
.id_table = st_uvis25_spi_id_table,
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index d8c40a83097d..27a5c28aac7f 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -69,7 +69,7 @@
#define TSL2563_TIMING_GAIN16 0x10
#define TSL2563_TIMING_GAIN1 0x00
-#define TSL2563_INT_DISBLED 0x00
+#define TSL2563_INT_DISABLED 0x00
#define TSL2563_INT_LEVEL 0x10
#define TSL2563_INT_PERSIST(n) ((n) & 0x0F)
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index be37fcbd4654..9fbde9b71b63 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -932,7 +932,7 @@ static ssize_t in_illuminance0_target_input_show(struct device *dev,
{
struct tsl2772_chip *chip = iio_priv(dev_to_iio_dev(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", chip->settings.als_cal_target);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", chip->settings.als_cal_target);
}
static ssize_t in_illuminance0_target_input_store(struct device *dev,
@@ -986,7 +986,7 @@ static ssize_t in_illuminance0_lux_table_show(struct device *dev,
int offset = 0;
while (i < TSL2772_MAX_LUX_TABLE_SIZE) {
- offset += snprintf(buf + offset, PAGE_SIZE, "%u,%u,",
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%u,%u,",
chip->tsl2772_device_lux[i].ch0,
chip->tsl2772_device_lux[i].ch1);
if (chip->tsl2772_device_lux[i].ch0 == 0) {
@@ -1000,7 +1000,7 @@ static ssize_t in_illuminance0_lux_table_show(struct device *dev,
i++;
}
- offset += snprintf(buf + offset, PAGE_SIZE, "\n");
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n");
return offset;
}
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index ec803c1e81df..2a4b3d331055 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -5,6 +5,7 @@
*
* Copyright 2012 Peter Meerwald <pmeerw@pmeerw.net>
* Copyright 2019 Pursim SPC
+ * Copyright 2020 Mathieu Othacehe <m.othacehe@gmail.com>
*
* IIO driver for:
* VCNL4000/10/20 (7-bit I2C slave address 0x13)
@@ -13,9 +14,7 @@
*
* TODO:
* allow to adjust IR current
- * proximity threshold and event handling
- * periodic ALS/proximity measurement (VCNL4010/20)
- * interrupts (VCNL4010/20/40, VCNL4200)
+ * interrupts (VCNL4040, VCNL4200)
*/
#include <linux/module.h>
@@ -23,9 +22,15 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#define VCNL4000_DRV_NAME "vcnl4000"
#define VCNL4000_PROD_ID 0x01
@@ -35,14 +40,22 @@
#define VCNL4000_COMMAND 0x80 /* Command register */
#define VCNL4000_PROD_REV 0x81 /* Product ID and Revision ID */
+#define VCNL4010_PROX_RATE 0x82 /* Proximity rate */
#define VCNL4000_LED_CURRENT 0x83 /* IR LED current for proximity mode */
#define VCNL4000_AL_PARAM 0x84 /* Ambient light parameter register */
+#define VCNL4010_ALS_PARAM 0x84 /* ALS rate */
#define VCNL4000_AL_RESULT_HI 0x85 /* Ambient light result register, MSB */
#define VCNL4000_AL_RESULT_LO 0x86 /* Ambient light result register, LSB */
#define VCNL4000_PS_RESULT_HI 0x87 /* Proximity result register, MSB */
#define VCNL4000_PS_RESULT_LO 0x88 /* Proximity result register, LSB */
#define VCNL4000_PS_MEAS_FREQ 0x89 /* Proximity test signal frequency */
+#define VCNL4010_INT_CTRL 0x89 /* Interrupt control */
#define VCNL4000_PS_MOD_ADJ 0x8a /* Proximity modulator timing adjustment */
+#define VCNL4010_LOW_THR_HI 0x8a /* Low threshold, MSB */
+#define VCNL4010_LOW_THR_LO 0x8b /* Low threshold, LSB */
+#define VCNL4010_HIGH_THR_HI 0x8c /* High threshold, MSB */
+#define VCNL4010_HIGH_THR_LO 0x8d /* High threshold, LSB */
+#define VCNL4010_ISR 0x8e /* Interrupt status */
#define VCNL4200_AL_CONF 0x00 /* Ambient light configuration */
#define VCNL4200_PS_CONF1 0x03 /* Proximity configuration */
@@ -57,6 +70,36 @@
#define VCNL4000_PS_RDY BIT(5) /* proximity data ready? */
#define VCNL4000_AL_OD BIT(4) /* start on-demand ALS measurement */
#define VCNL4000_PS_OD BIT(3) /* start on-demand proximity measurement */
+#define VCNL4000_ALS_EN BIT(2) /* start ALS measurement */
+#define VCNL4000_PROX_EN BIT(1) /* start proximity measurement */
+#define VCNL4000_SELF_TIMED_EN BIT(0) /* start self-timed measurement */
+
+/* Bit masks for interrupt registers. */
+#define VCNL4010_INT_THR_SEL BIT(0) /* Select threshold interrupt source */
+#define VCNL4010_INT_THR_EN BIT(1) /* Threshold interrupt type */
+#define VCNL4010_INT_ALS_EN BIT(2) /* Enable on ALS data ready */
+#define VCNL4010_INT_PROX_EN BIT(3) /* Enable on proximity data ready */
+
+#define VCNL4010_INT_THR_HIGH 0 /* High threshold exceeded */
+#define VCNL4010_INT_THR_LOW 1 /* Low threshold exceeded */
+#define VCNL4010_INT_ALS 2 /* ALS data ready */
+#define VCNL4010_INT_PROXIMITY 3 /* Proximity data ready */
+
+#define VCNL4010_INT_THR \
+ (BIT(VCNL4010_INT_THR_LOW) | BIT(VCNL4010_INT_THR_HIGH))
+#define VCNL4010_INT_DRDY \
+ (BIT(VCNL4010_INT_PROXIMITY) | BIT(VCNL4010_INT_ALS))
+
+static const int vcnl4010_prox_sampling_frequency[][2] = {
+ {1, 950000},
+ {3, 906250},
+ {7, 812500},
+ {16, 625000},
+ {31, 250000},
+ {62, 500000},
+ {125, 0},
+ {250, 0},
+};
#define VCNL4000_SLEEP_DELAY_MS 2000 /* before we enter pm_runtime_suspend */
@@ -83,10 +126,15 @@ struct vcnl4000_data {
struct mutex vcnl4000_lock;
struct vcnl4200_channel vcnl4200_al;
struct vcnl4200_channel vcnl4200_ps;
+ uint32_t near_level;
};
struct vcnl4000_chip_spec {
const char *prod;
+ struct iio_chan_spec const *channels;
+ const int num_channels;
+ const struct iio_info *info;
+ bool irq_support;
int (*init)(struct vcnl4000_data *data);
int (*measure_light)(struct vcnl4000_data *data, int *val);
int (*measure_proximity)(struct vcnl4000_data *data, int *val);
@@ -215,11 +263,31 @@ static int vcnl4200_init(struct vcnl4000_data *data)
return 0;
};
+static int vcnl4000_read_data(struct vcnl4000_data *data, u8 data_reg, int *val)
+{
+ s32 ret;
+
+ ret = i2c_smbus_read_word_swapped(data->client, data_reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+}
+
+static int vcnl4000_write_data(struct vcnl4000_data *data, u8 data_reg, int val)
+{
+ if (val > U16_MAX)
+ return -ERANGE;
+
+ return i2c_smbus_write_word_swapped(data->client, data_reg, val);
+}
+
+
static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
u8 rdy_mask, u8 data_reg, int *val)
{
int tries = 20;
- __be16 buf;
int ret;
mutex_lock(&data->vcnl4000_lock);
@@ -246,13 +314,11 @@ static int vcnl4000_measure(struct vcnl4000_data *data, u8 req_mask,
goto fail;
}
- ret = i2c_smbus_read_i2c_block_data(data->client,
- data_reg, sizeof(buf), (u8 *) &buf);
+ ret = vcnl4000_read_data(data, data_reg, val);
if (ret < 0)
goto fail;
mutex_unlock(&data->vcnl4000_lock);
- *val = be16_to_cpu(buf);
return 0;
@@ -312,47 +378,34 @@ static int vcnl4200_measure_proximity(struct vcnl4000_data *data, int *val)
return vcnl4200_measure(data, &data->vcnl4200_ps, val);
}
-static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
- [VCNL4000] = {
- .prod = "VCNL4000",
- .init = vcnl4000_init,
- .measure_light = vcnl4000_measure_light,
- .measure_proximity = vcnl4000_measure_proximity,
- .set_power_state = vcnl4000_set_power_state,
- },
- [VCNL4010] = {
- .prod = "VCNL4010/4020",
- .init = vcnl4000_init,
- .measure_light = vcnl4000_measure_light,
- .measure_proximity = vcnl4000_measure_proximity,
- .set_power_state = vcnl4000_set_power_state,
- },
- [VCNL4040] = {
- .prod = "VCNL4040",
- .init = vcnl4200_init,
- .measure_light = vcnl4200_measure_light,
- .measure_proximity = vcnl4200_measure_proximity,
- .set_power_state = vcnl4200_set_power_state,
- },
- [VCNL4200] = {
- .prod = "VCNL4200",
- .init = vcnl4200_init,
- .measure_light = vcnl4200_measure_light,
- .measure_proximity = vcnl4200_measure_proximity,
- .set_power_state = vcnl4200_set_power_state,
- },
-};
+static int vcnl4010_read_proxy_samp_freq(struct vcnl4000_data *data, int *val,
+ int *val2)
+{
+ int ret;
-static const struct iio_chan_spec vcnl4000_channels[] = {
- {
- .type = IIO_LIGHT,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- }, {
- .type = IIO_PROXIMITY,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- }
-};
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_PROX_RATE);
+ if (ret < 0)
+ return ret;
+
+ if (ret >= ARRAY_SIZE(vcnl4010_prox_sampling_frequency))
+ return -EINVAL;
+
+ *val = vcnl4010_prox_sampling_frequency[ret][0];
+ *val2 = vcnl4010_prox_sampling_frequency[ret][1];
+
+ return 0;
+}
+
+static bool vcnl4010_is_in_periodic_mode(struct vcnl4000_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4000_COMMAND);
+ if (ret < 0)
+ return false;
+
+ return !!(ret & VCNL4000_SELF_TIMED_EN);
+}
static int vcnl4000_set_pm_runtime_state(struct vcnl4000_data *data, bool on)
{
@@ -412,10 +465,571 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
}
}
+static int vcnl4010_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_SCALE:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Protect against event capture. */
+ if (vcnl4010_is_in_periodic_mode(data)) {
+ ret = -EBUSY;
+ } else {
+ ret = vcnl4000_read_raw(indio_dev, chan, val, val2,
+ mask);
+ }
+
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = vcnl4010_read_proxy_samp_freq(data, val, val2);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (int *)vcnl4010_prox_sampling_frequency;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = 2 * ARRAY_SIZE(vcnl4010_prox_sampling_frequency);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_write_proxy_samp_freq(struct vcnl4000_data *data, int val,
+ int val2)
+{
+ unsigned int i;
+ int index = -1;
+
+ for (i = 0; i < ARRAY_SIZE(vcnl4010_prox_sampling_frequency); i++) {
+ if (val == vcnl4010_prox_sampling_frequency[i][0] &&
+ val2 == vcnl4010_prox_sampling_frequency[i][1]) {
+ index = i;
+ break;
+ }
+ }
+
+ if (index < 0)
+ return -EINVAL;
+
+ return i2c_smbus_write_byte_data(data->client, VCNL4010_PROX_RATE,
+ index);
+}
+
+static int vcnl4010_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Protect against event capture. */
+ if (vcnl4010_is_in_periodic_mode(data)) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = vcnl4010_write_proxy_samp_freq(data, val, val2);
+ goto end;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+
+end:
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+}
+
+static int vcnl4010_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = vcnl4000_read_data(data, VCNL4010_HIGH_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ ret = vcnl4000_read_data(data, VCNL4010_LOW_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = vcnl4000_write_data(data, VCNL4010_HIGH_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ ret = vcnl4000_write_data(data, VCNL4010_LOW_THR_HI,
+ val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool vcnl4010_is_thr_enabled(struct vcnl4000_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_INT_CTRL);
+ if (ret < 0)
+ return false;
+
+ return !!(ret & VCNL4010_INT_THR_EN);
+}
+
+static int vcnl4010_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return vcnl4010_is_thr_enabled(data);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4010_config_threshold(struct iio_dev *indio_dev, bool state)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret;
+ int icr;
+ int command;
+
+ if (state) {
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Enable periodic measurement of proximity data. */
+ command = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN;
+
+ /*
+ * Enable interrupts on threshold, for proximity data by
+ * default.
+ */
+ icr = VCNL4010_INT_THR_EN;
+ } else {
+ if (!vcnl4010_is_thr_enabled(data))
+ return 0;
+
+ command = 0;
+ icr = 0;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND,
+ command);
+ if (ret < 0)
+ goto end;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, icr);
+
+end:
+ if (state)
+ iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+}
+
+static int vcnl4010_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return vcnl4010_config_threshold(indio_dev, state);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t vcnl4000_read_near_level(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", data->near_level);
+}
+
+static const struct iio_chan_spec_ext_info vcnl4000_ext_info[] = {
+ {
+ .name = "nearlevel",
+ .shared = IIO_SEPARATE,
+ .read = vcnl4000_read_near_level,
+ },
+ { /* sentinel */ }
+};
+
+static const struct iio_event_spec vcnl4000_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ }
+};
+
+static const struct iio_chan_spec vcnl4000_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }, {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .ext_info = vcnl4000_ext_info,
+ }
+};
+
+static const struct iio_chan_spec vcnl4010_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .scan_index = -1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }, {
+ .type = IIO_PROXIMITY,
+ .scan_index = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .event_spec = vcnl4000_event_spec,
+ .num_event_specs = ARRAY_SIZE(vcnl4000_event_spec),
+ .ext_info = vcnl4000_ext_info,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
static const struct iio_info vcnl4000_info = {
.read_raw = vcnl4000_read_raw,
};
+static const struct iio_info vcnl4010_info = {
+ .read_raw = vcnl4010_read_raw,
+ .read_avail = vcnl4010_read_avail,
+ .write_raw = vcnl4010_write_raw,
+ .read_event_value = vcnl4010_read_event,
+ .write_event_value = vcnl4010_write_event,
+ .read_event_config = vcnl4010_read_event_config,
+ .write_event_config = vcnl4010_write_event_config,
+};
+
+static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
+ [VCNL4000] = {
+ .prod = "VCNL4000",
+ .init = vcnl4000_init,
+ .measure_light = vcnl4000_measure_light,
+ .measure_proximity = vcnl4000_measure_proximity,
+ .set_power_state = vcnl4000_set_power_state,
+ .channels = vcnl4000_channels,
+ .num_channels = ARRAY_SIZE(vcnl4000_channels),
+ .info = &vcnl4000_info,
+ .irq_support = false,
+ },
+ [VCNL4010] = {
+ .prod = "VCNL4010/4020",
+ .init = vcnl4000_init,
+ .measure_light = vcnl4000_measure_light,
+ .measure_proximity = vcnl4000_measure_proximity,
+ .set_power_state = vcnl4000_set_power_state,
+ .channels = vcnl4010_channels,
+ .num_channels = ARRAY_SIZE(vcnl4010_channels),
+ .info = &vcnl4010_info,
+ .irq_support = true,
+ },
+ [VCNL4040] = {
+ .prod = "VCNL4040",
+ .init = vcnl4200_init,
+ .measure_light = vcnl4200_measure_light,
+ .measure_proximity = vcnl4200_measure_proximity,
+ .set_power_state = vcnl4200_set_power_state,
+ .channels = vcnl4000_channels,
+ .num_channels = ARRAY_SIZE(vcnl4000_channels),
+ .info = &vcnl4000_info,
+ .irq_support = false,
+ },
+ [VCNL4200] = {
+ .prod = "VCNL4200",
+ .init = vcnl4200_init,
+ .measure_light = vcnl4200_measure_light,
+ .measure_proximity = vcnl4200_measure_proximity,
+ .set_power_state = vcnl4200_set_power_state,
+ .channels = vcnl4000_channels,
+ .num_channels = ARRAY_SIZE(vcnl4000_channels),
+ .info = &vcnl4000_info,
+ .irq_support = false,
+ },
+};
+
+static irqreturn_t vcnl4010_irq_thread(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ unsigned long isr;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_ISR);
+ if (ret < 0)
+ goto end;
+
+ isr = ret;
+
+ if (isr & VCNL4010_INT_THR) {
+ if (test_bit(VCNL4010_INT_THR_LOW, &isr)) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ 1,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ if (test_bit(VCNL4010_INT_THR_HIGH, &isr)) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ 1,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ i2c_smbus_write_byte_data(data->client, VCNL4010_ISR,
+ isr & VCNL4010_INT_THR);
+ }
+
+ if (isr & VCNL4010_INT_DRDY && iio_buffer_enabled(indio_dev))
+ iio_trigger_poll_chained(indio_dev->trig);
+
+end:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ const unsigned long *active_scan_mask = indio_dev->active_scan_mask;
+ u16 buffer[8] = {0}; /* 1x16-bit + ts */
+ bool data_read = false;
+ unsigned long isr;
+ int val = 0;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_ISR);
+ if (ret < 0)
+ goto end;
+
+ isr = ret;
+
+ if (test_bit(0, active_scan_mask)) {
+ if (test_bit(VCNL4010_INT_PROXIMITY, &isr)) {
+ ret = vcnl4000_read_data(data,
+ VCNL4000_PS_RESULT_HI,
+ &val);
+ if (ret < 0)
+ goto end;
+
+ buffer[0] = val;
+ data_read = true;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_ISR,
+ isr & VCNL4010_INT_DRDY);
+ if (ret < 0)
+ goto end;
+
+ if (!data_read)
+ goto end;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_get_time_ns(indio_dev));
+
+end:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int vcnl4010_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret;
+ int cmd;
+
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
+ /* Do not enable the buffer if we are already capturing events. */
+ if (vcnl4010_is_in_periodic_mode(data)) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL,
+ VCNL4010_INT_PROX_EN);
+ if (ret < 0)
+ goto end;
+
+ cmd = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN;
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, cmd);
+ if (ret < 0)
+ goto end;
+
+ return 0;
+end:
+ iio_triggered_buffer_predisable(indio_dev);
+
+ return ret;
+}
+
+static int vcnl4010_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret, ret_disable;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, 0);
+ if (ret < 0)
+ goto end;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0);
+
+end:
+ ret_disable = iio_triggered_buffer_predisable(indio_dev);
+ if (ret == 0)
+ ret = ret_disable;
+
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops vcnl4010_buffer_ops = {
+ .postenable = &vcnl4010_buffer_postenable,
+ .predisable = &vcnl4010_buffer_predisable,
+};
+
+static const struct iio_trigger_ops vcnl4010_trigger_ops = {
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static int vcnl4010_probe_trigger(struct iio_dev *indio_dev)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ struct iio_trigger *trigger;
+
+ trigger = devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!trigger)
+ return -ENOMEM;
+
+ trigger->dev.parent = &client->dev;
+ trigger->ops = &vcnl4010_trigger_ops;
+ iio_trigger_set_drvdata(trigger, indio_dev);
+
+ return devm_iio_trigger_register(&client->dev, trigger);
+}
+
static int vcnl4000_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -440,13 +1054,44 @@ static int vcnl4000_probe(struct i2c_client *client,
dev_dbg(&client->dev, "%s Ambient light/proximity sensor, Rev: %02x\n",
data->chip_spec->prod, data->rev);
+ if (device_property_read_u32(&client->dev, "proximity-near-level",
+ &data->near_level))
+ data->near_level = 0;
+
indio_dev->dev.parent = &client->dev;
- indio_dev->info = &vcnl4000_info;
- indio_dev->channels = vcnl4000_channels;
- indio_dev->num_channels = ARRAY_SIZE(vcnl4000_channels);
+ indio_dev->info = data->chip_spec->info;
+ indio_dev->channels = data->chip_spec->channels;
+ indio_dev->num_channels = data->chip_spec->num_channels;
indio_dev->name = VCNL4000_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
+ if (client->irq && data->chip_spec->irq_support) {
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ NULL,
+ vcnl4010_trigger_handler,
+ &vcnl4010_buffer_ops);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "unable to setup iio triggered buffer\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, vcnl4010_irq_thread,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "vcnl4010_irq",
+ indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "irq request failed\n");
+ return ret;
+ }
+
+ ret = vcnl4010_probe_trigger(indio_dev);
+ if (ret < 0)
+ return ret;
+ }
+
ret = pm_runtime_set_active(&client->dev);
if (ret < 0)
goto fail_poweroff;
@@ -540,5 +1185,6 @@ static struct i2c_driver vcnl4000_driver = {
module_i2c_driver(vcnl4000_driver);
MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_AUTHOR("Mathieu Othacehe <m.othacehe@gmail.com>");
MODULE_DESCRIPTION("Vishay VCNL4000 proximity/ambient light sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index d9533a76b8f6..ed7b02765b97 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -16,6 +16,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/err.h>
@@ -537,7 +538,7 @@ MODULE_DEVICE_TABLE(i2c, vl6180_id);
static struct i2c_driver vl6180_driver = {
.driver = {
.name = VL6180_DRV_NAME,
- .of_match_table = of_match_ptr(vl6180_of_match),
+ .of_match_table = vl6180_of_match,
},
.probe = vl6180_probe,
.id_table = vl6180_id,
diff --git a/drivers/iio/light/zopt2201.c b/drivers/iio/light/zopt2201.c
index 5f54f39e7a4c..80ae530720cd 100644
--- a/drivers/iio/light/zopt2201.c
+++ b/drivers/iio/light/zopt2201.c
@@ -19,6 +19,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
#define ZOPT2201_DRV_NAME "zopt2201"
/* Registers */
@@ -219,7 +221,7 @@ static int zopt2201_read(struct zopt2201_data *data, u8 reg)
goto fail;
mutex_unlock(&data->lock);
- return (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ return get_unaligned_le24(&buf[0]);
fail:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index d32996702110..810fdfd37c88 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -49,6 +49,7 @@
#define AK8974_WHOAMI_VALUE_AMI306 0x46
#define AK8974_WHOAMI_VALUE_AMI305 0x47
#define AK8974_WHOAMI_VALUE_AK8974 0x48
+#define AK8974_WHOAMI_VALUE_HSCDTD008A 0x49
#define AK8974_DATA_X 0x10
#define AK8974_DATA_Y 0x12
@@ -140,6 +141,12 @@
#define AK8974_INT_CTRL_PULSE BIT(1) /* 0 = latched; 1 = pulse (50 usec) */
#define AK8974_INT_CTRL_RESDEF (AK8974_INT_CTRL_XYZEN | AK8974_INT_CTRL_POL)
+/* HSCDTD008A-specific control register */
+#define HSCDTD008A_CTRL4 0x1E
+#define HSCDTD008A_CTRL4_MMD BIT(7) /* must be set to 1 */
+#define HSCDTD008A_CTRL4_RANGE BIT(4) /* 0 = 14-bit output; 1 = 15-bit output */
+#define HSCDTD008A_CTRL4_RESDEF (HSCDTD008A_CTRL4_MMD | HSCDTD008A_CTRL4_RANGE)
+
/* The AMI305 has elaborate FW version and serial number registers */
#define AMI305_VER 0xE8
#define AMI305_SN 0xEA
@@ -241,10 +248,17 @@ static int ak8974_reset(struct ak8974 *ak8974)
ret = regmap_write(ak8974->map, AK8974_CTRL3, AK8974_CTRL3_RESDEF);
if (ret)
return ret;
- ret = regmap_write(ak8974->map, AK8974_INT_CTRL,
- AK8974_INT_CTRL_RESDEF);
- if (ret)
- return ret;
+ if (ak8974->variant != AK8974_WHOAMI_VALUE_HSCDTD008A) {
+ ret = regmap_write(ak8974->map, AK8974_INT_CTRL,
+ AK8974_INT_CTRL_RESDEF);
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_write(ak8974->map, HSCDTD008A_CTRL4,
+ HSCDTD008A_CTRL4_RESDEF);
+ if (ret)
+ return ret;
+ }
/* After reset, power off is default state */
return ak8974_set_power(ak8974, AK8974_PWR_OFF);
@@ -267,6 +281,8 @@ static int ak8974_configure(struct ak8974 *ak8974)
if (ret)
return ret;
}
+ if (ak8974->variant == AK8974_WHOAMI_VALUE_HSCDTD008A)
+ return 0;
ret = regmap_write(ak8974->map, AK8974_INT_CTRL, AK8974_INT_CTRL_POL);
if (ret)
return ret;
@@ -495,6 +511,10 @@ static int ak8974_detect(struct ak8974 *ak8974)
name = "ak8974";
dev_info(&ak8974->i2c->dev, "detected AK8974\n");
break;
+ case AK8974_WHOAMI_VALUE_HSCDTD008A:
+ name = "hscdtd008a";
+ dev_info(&ak8974->i2c->dev, "detected hscdtd008a\n");
+ break;
default:
dev_err(&ak8974->i2c->dev, "unsupported device (%02x) ",
whoami);
@@ -534,47 +554,103 @@ static int ak8974_detect(struct ak8974 *ak8974)
return 0;
}
+static int ak8974_measure_channel(struct ak8974 *ak8974, unsigned long address,
+ int *val)
+{
+ __le16 hw_values[3];
+ int ret;
+
+ pm_runtime_get_sync(&ak8974->i2c->dev);
+ mutex_lock(&ak8974->lock);
+
+ /*
+ * We read all axes and discard all but one, for optimized
+ * reading, use the triggered buffer.
+ */
+ ret = ak8974_trigmeas(ak8974);
+ if (ret)
+ goto out_unlock;
+ ret = ak8974_getresult(ak8974, hw_values);
+ if (ret)
+ goto out_unlock;
+ /*
+ * This explicit cast to (s16) is necessary as the measurement
+ * is done in 2's complement with positive and negative values.
+ * The follwing assignment to *val will then convert the signed
+ * s16 value to a signed int value.
+ */
+ *val = (s16)le16_to_cpu(hw_values[address]);
+out_unlock:
+ mutex_unlock(&ak8974->lock);
+ pm_runtime_mark_last_busy(&ak8974->i2c->dev);
+ pm_runtime_put_autosuspend(&ak8974->i2c->dev);
+
+ return ret;
+}
+
static int ak8974_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2,
long mask)
{
struct ak8974 *ak8974 = iio_priv(indio_dev);
- __le16 hw_values[3];
- int ret = -EINVAL;
-
- pm_runtime_get_sync(&ak8974->i2c->dev);
- mutex_lock(&ak8974->lock);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (chan->address > 2) {
dev_err(&ak8974->i2c->dev, "faulty channel address\n");
- ret = -EIO;
- goto out_unlock;
+ return -EIO;
}
- ret = ak8974_trigmeas(ak8974);
- if (ret)
- goto out_unlock;
- ret = ak8974_getresult(ak8974, hw_values);
+ ret = ak8974_measure_channel(ak8974, chan->address, val);
if (ret)
- goto out_unlock;
-
- /*
- * We read all axes and discard all but one, for optimized
- * reading, use the triggered buffer.
- */
- *val = (s16)le16_to_cpu(hw_values[chan->address]);
-
- ret = IIO_VAL_INT;
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (ak8974->variant) {
+ case AK8974_WHOAMI_VALUE_AMI306:
+ case AK8974_WHOAMI_VALUE_AMI305:
+ /*
+ * The datasheet for AMI305 and AMI306, page 6
+ * specifies the range of the sensor to be
+ * +/- 12 Gauss.
+ */
+ *val = 12;
+ /*
+ * 12 bits are used, +/- 2^11
+ * [ -2048 .. 2047 ] (manual page 20)
+ * [ 0xf800 .. 0x07ff ]
+ */
+ *val2 = 11;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case AK8974_WHOAMI_VALUE_HSCDTD008A:
+ /*
+ * The datasheet for HSCDTF008A, page 3 specifies the
+ * range of the sensor as +/- 2.4 mT per axis, which
+ * corresponds to +/- 2400 uT = +/- 24 Gauss.
+ */
+ *val = 24;
+ /*
+ * 15 bits are used (set up in CTRL4), +/- 2^14
+ * [ -16384 .. 16383 ] (manual page 24)
+ * [ 0xc000 .. 0x3fff ]
+ */
+ *val2 = 14;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ /* GUESSING +/- 12 Gauss */
+ *val = 12;
+ /* GUESSING 12 bits ADC +/- 2^11 */
+ *val2 = 11;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+ break;
+ default:
+ /* Unknown request */
+ break;
}
- out_unlock:
- mutex_unlock(&ak8974->lock);
- pm_runtime_mark_last_busy(&ak8974->i2c->dev);
- pm_runtime_put_autosuspend(&ak8974->i2c->dev);
-
- return ret;
+ return -EINVAL;
}
static void ak8974_fill_buffer(struct iio_dev *indio_dev)
@@ -631,27 +707,44 @@ static const struct iio_chan_spec_ext_info ak8974_ext_info[] = {
{ },
};
-#define AK8974_AXIS_CHANNEL(axis, index) \
+#define AK8974_AXIS_CHANNEL(axis, index, bits) \
{ \
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
.ext_info = ak8974_ext_info, \
.address = index, \
.scan_index = index, \
.scan_type = { \
.sign = 's', \
- .realbits = 16, \
+ .realbits = bits, \
.storagebits = 16, \
.endianness = IIO_LE \
}, \
}
-static const struct iio_chan_spec ak8974_channels[] = {
- AK8974_AXIS_CHANNEL(X, 0),
- AK8974_AXIS_CHANNEL(Y, 1),
- AK8974_AXIS_CHANNEL(Z, 2),
+/*
+ * We have no datasheet for the AK8974 but we guess that its
+ * ADC is 12 bits. The AMI305 and AMI306 certainly has 12bit
+ * ADC.
+ */
+static const struct iio_chan_spec ak8974_12_bits_channels[] = {
+ AK8974_AXIS_CHANNEL(X, 0, 12),
+ AK8974_AXIS_CHANNEL(Y, 1, 12),
+ AK8974_AXIS_CHANNEL(Z, 2, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+/*
+ * The HSCDTD008A has 15 bits resolution the way we set it up
+ * in CTRL4.
+ */
+static const struct iio_chan_spec ak8974_15_bits_channels[] = {
+ AK8974_AXIS_CHANNEL(X, 0, 15),
+ AK8974_AXIS_CHANNEL(Y, 1, 15),
+ AK8974_AXIS_CHANNEL(Z, 2, 15),
IIO_CHAN_SOFT_TIMESTAMP(3),
};
@@ -674,18 +767,18 @@ static bool ak8974_writeable_reg(struct device *dev, unsigned int reg)
case AK8974_INT_CTRL:
case AK8974_INT_THRES:
case AK8974_INT_THRES + 1:
+ return true;
case AK8974_PRESET:
case AK8974_PRESET + 1:
- return true;
+ return ak8974->variant != AK8974_WHOAMI_VALUE_HSCDTD008A;
case AK8974_OFFSET_X:
case AK8974_OFFSET_X + 1:
case AK8974_OFFSET_Y:
case AK8974_OFFSET_Y + 1:
case AK8974_OFFSET_Z:
case AK8974_OFFSET_Z + 1:
- if (ak8974->variant == AK8974_WHOAMI_VALUE_AK8974)
- return true;
- return false;
+ return ak8974->variant == AK8974_WHOAMI_VALUE_AK8974 ||
+ ak8974->variant == AK8974_WHOAMI_VALUE_HSCDTD008A;
case AMI305_OFFSET_X:
case AMI305_OFFSET_X + 1:
case AMI305_OFFSET_Y:
@@ -746,7 +839,12 @@ static int ak8974_probe(struct i2c_client *i2c,
ARRAY_SIZE(ak8974->regs),
ak8974->regs);
if (ret < 0) {
- dev_err(&i2c->dev, "cannot get regulators\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&i2c->dev, "cannot get regulators: %d\n", ret);
+ else
+ dev_dbg(&i2c->dev,
+ "regulators unavailable, deferring probe\n");
+
return ret;
}
@@ -795,8 +893,21 @@ static int ak8974_probe(struct i2c_client *i2c,
pm_runtime_put(&i2c->dev);
indio_dev->dev.parent = &i2c->dev;
- indio_dev->channels = ak8974_channels;
- indio_dev->num_channels = ARRAY_SIZE(ak8974_channels);
+ switch (ak8974->variant) {
+ case AK8974_WHOAMI_VALUE_AMI306:
+ case AK8974_WHOAMI_VALUE_AMI305:
+ indio_dev->channels = ak8974_12_bits_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8974_12_bits_channels);
+ break;
+ case AK8974_WHOAMI_VALUE_HSCDTD008A:
+ indio_dev->channels = ak8974_15_bits_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8974_15_bits_channels);
+ break;
+ default:
+ indio_dev->channels = ak8974_12_bits_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ak8974_12_bits_channels);
+ break;
+ }
indio_dev->info = &ak8974_info;
indio_dev->available_scan_masks = ak8974_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -926,12 +1037,14 @@ static const struct i2c_device_id ak8974_id[] = {
{"ami305", 0 },
{"ami306", 0 },
{"ak8974", 0 },
+ {"hscdtd008a", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, ak8974_id);
static const struct of_device_id ak8974_of_match[] = {
{ .compatible = "asahi-kasei,ak8974", },
+ { .compatible = "alps,hscdtd008a", },
{}
};
MODULE_DEVICE_TABLE(of, ak8974_of_match);
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
index ed9be0490d77..c6ed3ea8460a 100644
--- a/drivers/iio/magnetometer/bmc150_magn_spi.c
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -22,8 +22,8 @@ static int bmc150_magn_spi_probe(struct spi_device *spi)
regmap = devm_regmap_init_spi(spi, &bmc150_magn_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "Failed to register spi regmap %d\n",
- (int)PTR_ERR(regmap));
+ dev_err(&spi->dev, "Failed to register spi regmap: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
return bmc150_magn_probe(&spi->dev, regmap, spi->irq, id->name);
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 25e60b233e08..0c09daf87794 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum magn_3d_channel {
@@ -519,18 +517,13 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- return ret;
- }
atomic_set(&magn_state->magn_flux_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&magn_state->magn_flux_attributes);
if (ret < 0) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ return ret;
}
ret = iio_device_register(indio_dev);
@@ -554,9 +547,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &magn_state->magn_flux_attributes);
return ret;
}
@@ -569,8 +560,7 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &magn_state->magn_flux_attributes);
return 0;
}
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 425cdd07b4e5..1787d656d009 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -239,7 +239,7 @@ static int mmc35240_init(struct mmc35240_data *data)
return ret;
ret = regmap_bulk_read(data->regmap, MMC35240_OTP_START_ADDR,
- (u8 *)otp_data, sizeof(otp_data));
+ otp_data, sizeof(otp_data));
if (ret < 0)
return ret;
@@ -295,7 +295,7 @@ static int mmc35240_read_measurement(struct mmc35240_data *data, __le16 buf[3])
if (ret < 0)
return ret;
- return regmap_bulk_read(data->regmap, MMC35240_REG_XOUT_L, (u8 *)buf,
+ return regmap_bulk_read(data->regmap, MMC35240_REG_XOUT_L, buf,
3 * sizeof(__le16));
}
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index 7c20918d8108..43a2e420c9c4 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -22,6 +22,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
+#include <asm/unaligned.h>
+
#include "rm3100.h"
/* Cycle Count Registers. */
@@ -223,8 +225,7 @@ static int rm3100_read_mag(struct rm3100_data *data, int idx, int *val)
goto unlock_return;
mutex_unlock(&data->lock);
- *val = sign_extend32((buffer[0] << 16) | (buffer[1] << 8) | buffer[2],
- 23);
+ *val = sign_extend32(get_unaligned_be24(&buffer[0]), 23);
return IIO_VAL_INT;
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index e68184a93a6d..79de721e6015 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -506,8 +506,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
indio_dev->channels = mdata->sensor_settings->ch;
indio_dev->num_channels = ST_SENSORS_NUMBER_ALL_CHANNELS;
- mdata->current_fullscale = (struct st_sensor_fullscale_avl *)
- &mdata->sensor_settings->fs.fs_avl[0];
+ mdata->current_fullscale = &mdata->sensor_settings->fs.fs_avl[0];
mdata->odr = mdata->sensor_settings->odr.odr_avl[0].hz;
err = st_sensors_init_sensor(indio_dev, NULL);
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index 00af68764cda..6aac8bea233a 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -15,8 +15,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum incl_3d_channel {
@@ -346,18 +344,13 @@ static int hid_incl_3d_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&incl_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&incl_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -382,9 +375,7 @@ static int hid_incl_3d_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&incl_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &incl_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -399,8 +390,7 @@ static int hid_incl_3d_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_INCLINOMETER_3D);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&incl_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &incl_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index 64ae7d04a200..b99f41240e3e 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
struct dev_rot_state {
@@ -288,18 +286,13 @@ static int hid_dev_rot_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- return ret;
- }
atomic_set(&rot_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&rot_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ return ret;
}
ret = iio_device_register(indio_dev);
@@ -323,9 +316,7 @@ static int hid_dev_rot_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&rot_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &rot_state->common_attributes);
return ret;
}
@@ -338,8 +329,7 @@ static int hid_dev_rot_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&rot_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &rot_state->common_attributes);
return 0;
}
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 29c209cc1108..126a56d31b6e 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -271,6 +271,8 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
+ (s32)2097152) * calib->H2 + 8192) >> 14);
var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)calib->H1) >> 4;
+ var = clamp_val(var, 0, 419430400);
+
return var >> 12;
};
@@ -337,8 +339,7 @@ static int bmp280_read_temp(struct bmp280_data *data,
__be32 tmp = 0;
s32 adc_temp, comp_temp;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
- (u8 *) &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB, &tmp, 3);
if (ret < 0) {
dev_err(data->dev, "failed to read temperature\n");
return ret;
@@ -377,8 +378,7 @@ static int bmp280_read_press(struct bmp280_data *data,
if (ret < 0)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
- (u8 *) &tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB, &tmp, 3);
if (ret < 0) {
dev_err(data->dev, "failed to read pressure\n");
return ret;
@@ -400,8 +400,8 @@ static int bmp280_read_press(struct bmp280_data *data,
static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
{
+ __be16 tmp;
int ret;
- __be16 tmp = 0;
s32 adc_humidity;
u32 comp_humidity;
@@ -410,8 +410,7 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
if (ret < 0)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB,
- (u8 *) &tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB, &tmp, 2);
if (ret < 0) {
dev_err(data->dev, "failed to read humidity\n");
return ret;
@@ -575,57 +574,38 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static ssize_t bmp280_show_avail(char *buf, const int *vals, const int n)
+static int bmp280_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
{
- size_t len = 0;
- int i;
-
- for (i = 0; i < n; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", vals[i]);
-
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static ssize_t bmp280_show_temp_oversampling_avail(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
-
- return bmp280_show_avail(buf, data->chip_info->oversampling_temp_avail,
- data->chip_info->num_oversampling_temp_avail);
-}
-
-static ssize_t bmp280_show_press_oversampling_avail(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
+ struct bmp280_data *data = iio_priv(indio_dev);
- return bmp280_show_avail(buf, data->chip_info->oversampling_press_avail,
- data->chip_info->num_oversampling_press_avail);
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *vals = data->chip_info->oversampling_press_avail;
+ *length = data->chip_info->num_oversampling_press_avail;
+ break;
+ case IIO_TEMP:
+ *vals = data->chip_info->oversampling_temp_avail;
+ *length = data->chip_info->num_oversampling_temp_avail;
+ break;
+ default:
+ return -EINVAL;
+ }
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
}
-static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available,
- S_IRUGO, bmp280_show_temp_oversampling_avail, NULL, 0);
-
-static IIO_DEVICE_ATTR(in_pressure_oversampling_ratio_available,
- S_IRUGO, bmp280_show_press_oversampling_avail, NULL, 0);
-
-static struct attribute *bmp280_attributes[] = {
- &iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
- &iio_dev_attr_in_pressure_oversampling_ratio_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group bmp280_attrs_group = {
- .attrs = bmp280_attributes,
-};
-
static const struct iio_info bmp280_info = {
.read_raw = &bmp280_read_raw,
+ .read_avail = &bmp280_read_avail,
.write_raw = &bmp280_write_raw,
- .attrs = &bmp280_attrs_group,
};
static int bmp280_chip_config(struct bmp280_data *data)
@@ -713,7 +693,7 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
unsigned int ctrl;
if (data->use_eoc)
- init_completion(&data->done);
+ reinit_completion(&data->done);
ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas);
if (ret)
@@ -752,14 +732,14 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
{
+ __be16 tmp;
int ret;
- __be16 tmp = 0;
ret = bmp180_measure(data, BMP180_MEAS_TEMP);
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 2);
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, &tmp, 2);
if (ret)
return ret;
@@ -856,7 +836,7 @@ static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
if (ret)
return ret;
- ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 3);
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, &tmp, 3);
if (ret)
return ret;
@@ -965,10 +945,12 @@ static int bmp085_fetch_eoc_irq(struct device *dev,
irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
if (irq_trig != IRQF_TRIGGER_RISING) {
- dev_err(dev, "non-rising trigger given for EOC interrupt, "
- "trying to enforce it\n");
+ dev_err(dev, "non-rising trigger given for EOC interrupt, trying to enforce it\n");
irq_trig = IRQF_TRIGGER_RISING;
}
+
+ init_completion(&data->done);
+
ret = devm_request_threaded_irq(dev,
irq,
bmp085_eoc_irq,
@@ -1082,9 +1064,9 @@ int bmp280_common_probe(struct device *dev,
usleep_range(data->start_up_time, data->start_up_time + 100);
/* Bring chip out of reset if there is an assigned GPIO line */
- gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
/* Deassert the signal */
- if (!IS_ERR(gpiod)) {
+ if (gpiod) {
dev_info(dev, "release reset\n");
gpiod_set_value(gpiod, 0);
}
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 953235052155..5e6663f757ae 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
#define CHANNEL_SCAN_INDEX_PRESSURE 0
@@ -290,18 +288,13 @@ static int hid_press_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
- goto error_free_dev_mem;
- }
atomic_set(&press_state->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&press_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "trigger setup failed\n");
- goto error_unreg_buffer_funcs;
+ goto error_free_dev_mem;
}
ret = iio_device_register(indio_dev);
@@ -325,9 +318,7 @@ static int hid_press_probe(struct platform_device *pdev)
error_iio_unreg:
iio_device_unregister(indio_dev);
error_remove_trigger:
- hid_sensor_remove_trigger(&press_state->common_attributes);
-error_unreg_buffer_funcs:
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &press_state->common_attributes);
error_free_dev_mem:
kfree(indio_dev->channels);
return ret;
@@ -342,8 +333,7 @@ static int hid_press_remove(struct platform_device *pdev)
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PRESSURE);
iio_device_unregister(indio_dev);
- hid_sensor_remove_trigger(&press_state->common_attributes);
- iio_triggered_buffer_cleanup(indio_dev);
+ hid_sensor_remove_trigger(indio_dev, &press_state->common_attributes);
kfree(indio_dev->channels);
return 0;
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
index 3ac3632e7242..1f931f5b7a65 100644
--- a/drivers/iio/pressure/hp206c.c
+++ b/drivers/iio/pressure/hp206c.c
@@ -18,6 +18,8 @@
#include <linux/util_macros.h>
#include <linux/acpi.h>
+#include <asm/unaligned.h>
+
/* I2C commands: */
#define HP206C_CMD_SOFT_RST 0x06
@@ -93,12 +95,12 @@ static int hp206c_read_20bit(struct i2c_client *client, u8 cmd)
int ret;
u8 values[3];
- ret = i2c_smbus_read_i2c_block_data(client, cmd, 3, values);
+ ret = i2c_smbus_read_i2c_block_data(client, cmd, sizeof(values), values);
if (ret < 0)
return ret;
- if (ret != 3)
+ if (ret != sizeof(values))
return -EIO;
- return ((values[0] & 0xF) << 16) | (values[1] << 8) | (values[2]);
+ return get_unaligned_be24(&values[0]) & GENMASK(19, 0);
}
/* Spin for max 160ms until DEV_RDY is 1, or return error. */
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 8089c59adce5..072c106dd66d 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -16,6 +16,8 @@
#include <linux/module.h>
#include <linux/of_device.h>
+#include <asm/unaligned.h>
+
#include "ms5611.h"
static int ms5611_i2c_reset(struct device *dev)
@@ -50,7 +52,7 @@ static int ms5611_i2c_read_adc(struct ms5611_state *st, s32 *val)
if (ret < 0)
return ret;
- *val = (buf[0] << 16) | (buf[1] << 8) | buf[2];
+ *val = get_unaligned_be24(&buf[0]);
return 0;
}
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index b463eaa799ab..4799aa57135e 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -11,6 +11,8 @@
#include <linux/spi/spi.h>
#include <linux/of_device.h>
+#include <asm/unaligned.h>
+
#include "ms5611.h"
static int ms5611_spi_reset(struct device *dev)
@@ -45,7 +47,7 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val)
if (ret < 0)
return ret;
- *val = (buf[0] << 16) | (buf[1] << 8) | buf[2];
+ *val = get_unaligned_be24(&buf[0]);
return 0;
}
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index bd972cec4830..789a2928504a 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -683,8 +683,7 @@ EXPORT_SYMBOL(st_press_get_settings);
int st_press_common_probe(struct iio_dev *indio_dev)
{
struct st_sensor_data *press_data = iio_priv(indio_dev);
- struct st_sensors_platform_data *pdata =
- (struct st_sensors_platform_data *)press_data->dev->platform_data;
+ struct st_sensors_platform_data *pdata = dev_get_platdata(press_data->dev);
int err;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -708,9 +707,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
indio_dev->channels = press_data->sensor_settings->ch;
indio_dev->num_channels = press_data->sensor_settings->num_ch;
- press_data->current_fullscale =
- (struct st_sensor_fullscale_avl *)
- &press_data->sensor_settings->fs.fs_avl[0];
+ press_data->current_fullscale = &press_data->sensor_settings->fs.fs_avl[0];
press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 99dfe33ee402..37fe851f89af 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -64,6 +64,7 @@
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+#include <asm/unaligned.h>
#include "zpa2326.h"
/* 200 ms should be enough for the longest conversion time in one-shot mode. */
@@ -1005,22 +1006,20 @@ static int zpa2326_fetch_raw_sample(const struct iio_dev *indio_dev,
struct regmap *regs = ((struct zpa2326_private *)
iio_priv(indio_dev))->regmap;
int err;
+ u8 v[3];
switch (type) {
case IIO_PRESSURE:
zpa2326_dbg(indio_dev, "fetching raw pressure sample");
- err = regmap_bulk_read(regs, ZPA2326_PRESS_OUT_XL_REG, value,
- 3);
+ err = regmap_bulk_read(regs, ZPA2326_PRESS_OUT_XL_REG, v, sizeof(v));
if (err) {
zpa2326_warn(indio_dev, "failed to fetch pressure (%d)",
err);
return err;
}
- /* Pressure is a 24 bits wide little-endian unsigned int. */
- *value = (((u8 *)value)[2] << 16) | (((u8 *)value)[1] << 8) |
- ((u8 *)value)[0];
+ *value = get_unaligned_le24(&v[0]);
return IIO_VAL_INT;
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index 37606d400805..12672a0e89ed 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -101,6 +101,19 @@ config SRF04
To compile this driver as a module, choose M here: the
module will be called srf04.
+config SX9310
+ tristate "SX9310/SX9311 Semtech proximity sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here to build a driver for Semtech's SX9310/SX9311 capacitive
+ proximity/button sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sx9310.
+
config SX9500
tristate "SX9500 Semtech proximity sensor"
select IIO_BUFFER
@@ -127,6 +140,17 @@ config SRF08
To compile this driver as a module, choose M here: the
module will be called srf08.
+config VCNL3020
+ tristate "VCNL3020 proximity sensor"
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VCNL3020
+ proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vcnl3020.
+
config VL53L0X_I2C
tristate "STMicroelectronics VL53L0X ToF ranger sensor (I2C)"
depends on I2C
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index c591b019304e..9c1aca1a8b79 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -12,6 +12,8 @@ obj-$(CONFIG_PING) += ping.o
obj-$(CONFIG_RFD77402) += rfd77402.o
obj-$(CONFIG_SRF04) += srf04.o
obj-$(CONFIG_SRF08) += srf08.o
+obj-$(CONFIG_SX9310) += sx9310.o
obj-$(CONFIG_SX9500) += sx9500.o
+obj-$(CONFIG_VCNL3020) += vcnl3020.o
obj-$(CONFIG_VL53L0X_I2C) += vl53l0x-i2c.o
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
index 12b893c5b0ee..2e99eeb27f2e 100644
--- a/drivers/iio/proximity/ping.c
+++ b/drivers/iio/proximity/ping.c
@@ -89,14 +89,14 @@ static irqreturn_t ping_handle_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ping_read(struct ping_data *data)
+static int ping_read(struct iio_dev *indio_dev)
{
+ struct ping_data *data = iio_priv(indio_dev);
int ret;
ktime_t ktime_dt;
s64 dt_ns;
u32 time_ns, distance_mm;
struct platform_device *pdev = to_platform_device(data->dev);
- struct iio_dev *indio_dev = iio_priv_to_dev(data);
/*
* just one read-echo-cycle can take place at a time
@@ -228,7 +228,6 @@ static int ping_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *channel, int *val,
int *val2, long info)
{
- struct ping_data *data = iio_priv(indio_dev);
int ret;
if (channel->type != IIO_DISTANCE)
@@ -236,7 +235,7 @@ static int ping_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- ret = ping_read(data);
+ ret = ping_read(indio_dev);
if (ret < 0)
return ret;
*val = ret;
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
new file mode 100644
index 000000000000..d161f3061e35
--- /dev/null
+++ b/drivers/iio/proximity/sx9310.c
@@ -0,0 +1,1069 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC.
+ *
+ * Driver for Semtech's SX9310/SX9311 capacitive proximity/button solution.
+ * Based on SX9500 driver and Semtech driver using the input framework
+ * <https://my.syncplicity.com/share/teouwsim8niiaud/
+ * linux-driver-SX9310_NoSmartHSensing>.
+ * Reworked April 2019 by Evan Green <evgreen@chromium.org>
+ * and January 2020 by Daniel Campello <campello@chromium.org>
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+/* Register definitions. */
+#define SX9310_REG_IRQ_SRC 0x00
+#define SX9310_REG_STAT0 0x01
+#define SX9310_REG_STAT1 0x02
+#define SX9310_REG_IRQ_MSK 0x03
+#define SX9310_CONVDONE_IRQ BIT(3)
+#define SX9310_FAR_IRQ BIT(5)
+#define SX9310_CLOSE_IRQ BIT(6)
+#define SX9310_EVENT_IRQ (SX9310_FAR_IRQ | \
+ SX9310_CLOSE_IRQ)
+#define SX9310_REG_IRQ_FUNC 0x04
+
+#define SX9310_REG_PROX_CTRL0 0x10
+#define SX9310_REG_PROX_CTRL0_PROXSTAT2 0x10
+#define SX9310_REG_PROX_CTRL0_EN_MASK 0x0F
+#define SX9310_REG_PROX_CTRL1 0x11
+#define SX9310_REG_PROX_CTRL2 0x12
+#define SX9310_REG_PROX_CTRL2_COMBMODE_ALL 0x80
+#define SX9310_REG_PROX_CTRL2_SHIELDEN_DYNAMIC 0x04
+#define SX9310_REG_PROX_CTRL3 0x13
+#define SX9310_REG_PROX_CTRL3_GAIN0_X8 0x0c
+#define SX9310_REG_PROX_CTRL3_GAIN12_X4 0x02
+#define SX9310_REG_PROX_CTRL4 0x14
+#define SX9310_REG_PROX_CTRL4_RESOLUTION_FINEST 0x07
+#define SX9310_REG_PROX_CTRL5 0x15
+#define SX9310_REG_PROX_CTRL5_RANGE_SMALL 0xc0
+#define SX9310_REG_PROX_CTRL5_STARTUPSENS_CS1 0x04
+#define SX9310_REG_PROX_CTRL5_RAWFILT_1P25 0x02
+#define SX9310_REG_PROX_CTRL6 0x16
+#define SX9310_REG_PROX_CTRL6_COMP_COMMON 0x20
+#define SX9310_REG_PROX_CTRL7 0x17
+#define SX9310_REG_PROX_CTRL7_AVGNEGFILT_2 0x08
+#define SX9310_REG_PROX_CTRL7_AVGPOSFILT_512 0x05
+#define SX9310_REG_PROX_CTRL8 0x18
+#define SX9310_REG_PROX_CTRL9 0x19
+#define SX9310_REG_PROX_CTRL8_9_PTHRESH12_28 0x40
+#define SX9310_REG_PROX_CTRL8_9_PTHRESH_96 0x88
+#define SX9310_REG_PROX_CTRL8_9_BODYTHRESH_900 0x03
+#define SX9310_REG_PROX_CTRL8_9_BODYTHRESH_1500 0x05
+#define SX9310_REG_PROX_CTRL10 0x1a
+#define SX9310_REG_PROX_CTRL10_HYST_6PCT 0x10
+#define SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_8 0x12
+#define SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_8 0x03
+#define SX9310_REG_PROX_CTRL11 0x1b
+#define SX9310_REG_PROX_CTRL12 0x1c
+#define SX9310_REG_PROX_CTRL13 0x1d
+#define SX9310_REG_PROX_CTRL14 0x1e
+#define SX9310_REG_PROX_CTRL15 0x1f
+#define SX9310_REG_PROX_CTRL16 0x20
+#define SX9310_REG_PROX_CTRL17 0x21
+#define SX9310_REG_PROX_CTRL18 0x22
+#define SX9310_REG_PROX_CTRL19 0x23
+#define SX9310_REG_SAR_CTRL0 0x2a
+#define SX9310_REG_SAR_CTRL0_SARDEB_4_SAMPLES 0x40
+#define SX9310_REG_SAR_CTRL0_SARHYST_8 0x10
+#define SX9310_REG_SAR_CTRL1 0x2b
+/* Each increment of the slope register is 0.0078125. */
+#define SX9310_REG_SAR_CTRL1_SLOPE(_hnslope) (_hnslope / 78125)
+#define SX9310_REG_SAR_CTRL2 0x2c
+#define SX9310_REG_SAR_CTRL2_SAROFFSET_DEFAULT 0x3c
+
+#define SX9310_REG_SENSOR_SEL 0x30
+
+#define SX9310_REG_USE_MSB 0x31
+#define SX9310_REG_USE_LSB 0x32
+
+#define SX9310_REG_AVG_MSB 0x33
+#define SX9310_REG_AVG_LSB 0x34
+
+#define SX9310_REG_DIFF_MSB 0x35
+#define SX9310_REG_DIFF_LSB 0x36
+
+#define SX9310_REG_OFFSET_MSB 0x37
+#define SX9310_REG_OFFSET_LSB 0x38
+
+#define SX9310_REG_SAR_MSB 0x39
+#define SX9310_REG_SAR_LSB 0x3a
+
+#define SX9310_REG_I2CADDR 0x40
+#define SX9310_REG_PAUSE 0x41
+#define SX9310_REG_WHOAMI 0x42
+#define SX9310_WHOAMI_VALUE 0x01
+#define SX9311_WHOAMI_VALUE 0x02
+
+#define SX9310_REG_RESET 0x7f
+#define SX9310_SOFT_RESET 0xde
+
+#define SX9310_SCAN_PERIOD_MASK GENMASK(7, 4)
+#define SX9310_SCAN_PERIOD_SHIFT 4
+
+#define SX9310_COMPSTAT_MASK GENMASK(3, 0)
+
+/* 4 hardware channels, as defined in STAT0: COMB, CS2, CS1 and CS0. */
+#define SX9310_NUM_CHANNELS 4
+#define SX9310_CHAN_ENABLED_MASK GENMASK(3, 0)
+
+struct sx9310_data {
+ /* Serialize access to registers and channel configuration */
+ struct mutex mutex;
+ struct i2c_client *client;
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ /*
+ * Last reading of the proximity status for each channel.
+ * We only send an event to user space when this changes.
+ */
+ bool prox_stat[SX9310_NUM_CHANNELS];
+ bool trigger_enabled;
+ __be16 buffer[SX9310_NUM_CHANNELS +
+ 4]; /* 64-bit data + 64-bit timestamp */
+ /* Remember enabled channels and sample rate during suspend. */
+ unsigned int suspend_ctrl0;
+ struct completion completion;
+ unsigned int chan_read, chan_event;
+ int channel_users[SX9310_NUM_CHANNELS];
+ int whoami;
+};
+
+static const struct iio_event_spec sx9310_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define SX9310_NAMED_CHANNEL(idx, name) \
+ { \
+ .type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .indexed = 1, \
+ .channel = idx, \
+ .extend_name = name, \
+ .address = SX9310_REG_DIFF_MSB, \
+ .event_spec = sx9310_events, \
+ .num_event_specs = ARRAY_SIZE(sx9310_events), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+#define SX9310_CHANNEL(idx) SX9310_NAMED_CHANNEL(idx, NULL)
+
+static const struct iio_chan_spec sx9310_channels[] = {
+ SX9310_CHANNEL(0), /* CS0 */
+ SX9310_CHANNEL(1), /* CS1 */
+ SX9310_CHANNEL(2), /* CS2 */
+ SX9310_NAMED_CHANNEL(3, "comb"), /* COMB */
+
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+/*
+ * Each entry contains the integer part (val) and the fractional part, in micro
+ * seconds. It conforms to the IIO output IIO_VAL_INT_PLUS_MICRO.
+ */
+static const struct {
+ int val;
+ int val2;
+} sx9310_samp_freq_table[] = {
+ { 500, 0 }, /* 0000: Min (no idle time) */
+ { 66, 666666 }, /* 0001: 15 ms */
+ { 33, 333333 }, /* 0010: 30 ms (Typ.) */
+ { 22, 222222 }, /* 0011: 45 ms */
+ { 16, 666666 }, /* 0100: 60 ms */
+ { 11, 111111 }, /* 0101: 90 ms */
+ { 8, 333333 }, /* 0110: 120 ms */
+ { 5, 0 }, /* 0111: 200 ms */
+ { 2, 500000 }, /* 1000: 400 ms */
+ { 1, 666666 }, /* 1001: 600 ms */
+ { 1, 250000 }, /* 1010: 800 ms */
+ { 1, 0 }, /* 1011: 1 s */
+ { 0, 500000 }, /* 1100: 2 s */
+ { 0, 333333 }, /* 1101: 3 s */
+ { 0, 250000 }, /* 1110: 4 s */
+ { 0, 200000 }, /* 1111: 5 s */
+};
+static const unsigned int sx9310_scan_period_table[] = {
+ 2, 15, 30, 45, 60, 90, 120, 200,
+ 400, 600, 800, 1000, 2000, 3000, 4000, 5000,
+};
+
+static ssize_t sx9310_show_samp_freq_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sx9310_samp_freq_table); i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%d ",
+ sx9310_samp_freq_table[i].val,
+ sx9310_samp_freq_table[i].val2);
+ buf[len - 1] = '\n';
+ return len;
+}
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sx9310_show_samp_freq_avail);
+
+static const struct regmap_range sx9310_writable_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_MSK, SX9310_REG_IRQ_FUNC),
+ regmap_reg_range(SX9310_REG_PROX_CTRL0, SX9310_REG_PROX_CTRL19),
+ regmap_reg_range(SX9310_REG_SAR_CTRL0, SX9310_REG_SAR_CTRL2),
+ regmap_reg_range(SX9310_REG_SENSOR_SEL, SX9310_REG_SENSOR_SEL),
+ regmap_reg_range(SX9310_REG_OFFSET_MSB, SX9310_REG_OFFSET_LSB),
+ regmap_reg_range(SX9310_REG_PAUSE, SX9310_REG_PAUSE),
+ regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
+};
+
+static const struct regmap_access_table sx9310_writeable_regs = {
+ .yes_ranges = sx9310_writable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9310_writable_reg_ranges),
+};
+
+static const struct regmap_range sx9310_readable_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_SRC, SX9310_REG_IRQ_FUNC),
+ regmap_reg_range(SX9310_REG_PROX_CTRL0, SX9310_REG_PROX_CTRL19),
+ regmap_reg_range(SX9310_REG_SAR_CTRL0, SX9310_REG_SAR_CTRL2),
+ regmap_reg_range(SX9310_REG_SENSOR_SEL, SX9310_REG_SAR_LSB),
+ regmap_reg_range(SX9310_REG_I2CADDR, SX9310_REG_WHOAMI),
+ regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
+};
+
+static const struct regmap_access_table sx9310_readable_regs = {
+ .yes_ranges = sx9310_readable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9310_readable_reg_ranges),
+};
+
+static const struct regmap_range sx9310_volatile_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_SRC, SX9310_REG_STAT1),
+ regmap_reg_range(SX9310_REG_USE_MSB, SX9310_REG_DIFF_LSB),
+ regmap_reg_range(SX9310_REG_SAR_MSB, SX9310_REG_SAR_LSB),
+ regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
+};
+
+static const struct regmap_access_table sx9310_volatile_regs = {
+ .yes_ranges = sx9310_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9310_volatile_reg_ranges),
+};
+
+static const struct regmap_config sx9310_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = SX9310_REG_RESET,
+ .cache_type = REGCACHE_RBTREE,
+
+ .wr_table = &sx9310_writeable_regs,
+ .rd_table = &sx9310_readable_regs,
+ .volatile_table = &sx9310_volatile_regs,
+};
+
+static int sx9310_update_chan_en(struct sx9310_data *data,
+ unsigned int chan_read,
+ unsigned int chan_event)
+{
+ int ret;
+
+ if ((data->chan_read | data->chan_event) != (chan_read | chan_event)) {
+ ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL0,
+ SX9310_CHAN_ENABLED_MASK,
+ chan_read | chan_event);
+ if (ret)
+ return ret;
+ }
+ data->chan_read = chan_read;
+ data->chan_event = chan_event;
+ return 0;
+}
+
+static int sx9310_get_read_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read | BIT(channel),
+ data->chan_event);
+}
+
+static int sx9310_put_read_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read & ~BIT(channel),
+ data->chan_event);
+}
+
+static int sx9310_get_event_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read,
+ data->chan_event | BIT(channel));
+}
+
+static int sx9310_put_event_channel(struct sx9310_data *data, int channel)
+{
+ return sx9310_update_chan_en(data, data->chan_read,
+ data->chan_event & ~BIT(channel));
+}
+
+static int sx9310_enable_irq(struct sx9310_data *data, unsigned int irq)
+{
+ return regmap_update_bits(data->regmap, SX9310_REG_IRQ_MSK, irq, irq);
+}
+
+static int sx9310_disable_irq(struct sx9310_data *data, unsigned int irq)
+{
+ return regmap_update_bits(data->regmap, SX9310_REG_IRQ_MSK, irq, 0);
+}
+
+static int sx9310_read_prox_data(struct sx9310_data *data,
+ const struct iio_chan_spec *chan, __be16 *val)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, SX9310_REG_SENSOR_SEL, chan->channel);
+ if (ret < 0)
+ return ret;
+
+ return regmap_bulk_read(data->regmap, chan->address, val, 2);
+}
+
+/*
+ * If we have no interrupt support, we have to wait for a scan period
+ * after enabling a channel to get a result.
+ */
+static int sx9310_wait_for_sample(struct sx9310_data *data)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &val);
+ if (ret < 0)
+ return ret;
+
+ val = (val & SX9310_SCAN_PERIOD_MASK) >> SX9310_SCAN_PERIOD_SHIFT;
+
+ msleep(sx9310_scan_period_table[val]);
+
+ return 0;
+}
+
+static int sx9310_read_proximity(struct sx9310_data *data,
+ const struct iio_chan_spec *chan, int *val)
+{
+ int ret = 0;
+ __be16 rawval;
+
+ mutex_lock(&data->mutex);
+
+ ret = sx9310_get_read_channel(data, chan->channel);
+ if (ret < 0)
+ goto out;
+
+ ret = sx9310_enable_irq(data, SX9310_CONVDONE_IRQ);
+ if (ret < 0)
+ goto out_put_channel;
+
+ mutex_unlock(&data->mutex);
+
+ if (data->client->irq > 0) {
+ ret = wait_for_completion_interruptible(&data->completion);
+ reinit_completion(&data->completion);
+ } else {
+ ret = sx9310_wait_for_sample(data);
+ }
+
+ mutex_lock(&data->mutex);
+
+ if (ret < 0)
+ goto out_disable_irq;
+
+ ret = sx9310_read_prox_data(data, chan, &rawval);
+ if (ret < 0)
+ goto out_disable_irq;
+
+ *val = sign_extend32(be16_to_cpu(rawval),
+ (chan->address == SX9310_REG_DIFF_MSB ? 11 : 15));
+
+ ret = sx9310_disable_irq(data, SX9310_CONVDONE_IRQ);
+ if (ret < 0)
+ goto out_put_channel;
+
+ ret = sx9310_put_read_channel(data, chan->channel);
+ if (ret < 0)
+ goto out;
+
+ mutex_unlock(&data->mutex);
+
+ return IIO_VAL_INT;
+
+out_disable_irq:
+ sx9310_disable_irq(data, SX9310_CONVDONE_IRQ);
+out_put_channel:
+ sx9310_put_read_channel(data, chan->channel);
+out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx9310_read_samp_freq(struct sx9310_data *data, int *val, int *val2)
+{
+ unsigned int regval;
+ int ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &regval);
+
+ if (ret < 0)
+ return ret;
+
+ regval = (regval & SX9310_SCAN_PERIOD_MASK) >> SX9310_SCAN_PERIOD_SHIFT;
+ *val = sx9310_samp_freq_table[regval].val;
+ *val2 = sx9310_samp_freq_table[regval].val2;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int sx9310_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val,
+ int *val2, long mask)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = sx9310_read_proximity(data, chan, val);
+ iio_device_release_direct_mode(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return sx9310_read_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sx9310_set_samp_freq(struct sx9310_data *data, int val, int val2)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(sx9310_samp_freq_table); i++)
+ if (val == sx9310_samp_freq_table[i].val &&
+ val2 == sx9310_samp_freq_table[i].val2)
+ break;
+
+ if (i == ARRAY_SIZE(sx9310_samp_freq_table))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL0,
+ SX9310_SCAN_PERIOD_MASK,
+ i << SX9310_SCAN_PERIOD_SHIFT);
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx9310_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int val, int val2,
+ long mask)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY)
+ return -EINVAL;
+
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ return sx9310_set_samp_freq(data, val, val2);
+}
+
+static irqreturn_t sx9310_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ if (data->trigger_enabled)
+ iio_trigger_poll(data->trig);
+
+ /*
+ * Even if no event is enabled, we need to wake the thread to
+ * clear the interrupt state by reading SX9310_REG_IRQ_SRC. It
+ * is not possible to do that here because regmap_read takes a
+ * mutex.
+ */
+ return IRQ_WAKE_THREAD;
+}
+
+static void sx9310_push_events(struct iio_dev *indio_dev)
+{
+ int ret;
+ unsigned int val, chan;
+ struct sx9310_data *data = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns(indio_dev);
+
+ /* Read proximity state on all channels */
+ ret = regmap_read(data->regmap, SX9310_REG_STAT0, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ return;
+ }
+
+ for (chan = 0; chan < SX9310_NUM_CHANNELS; chan++) {
+ int dir;
+ u64 ev;
+ bool new_prox = val & BIT(chan);
+
+ if (!(data->chan_event & BIT(chan)))
+ continue;
+ if (new_prox == data->prox_stat[chan])
+ /* No change on this channel. */
+ continue;
+
+ dir = new_prox ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
+ ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
+ IIO_EV_TYPE_THRESH, dir);
+
+ iio_push_event(indio_dev, ev, timestamp);
+ data->prox_stat[chan] = new_prox;
+ }
+}
+
+static irqreturn_t sx9310_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int val;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_read(data->regmap, SX9310_REG_IRQ_SRC, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ goto out;
+ }
+
+ if (val & SX9310_EVENT_IRQ)
+ sx9310_push_events(indio_dev);
+
+ if (val & SX9310_CONVDONE_IRQ)
+ complete(&data->completion);
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return IRQ_HANDLED;
+}
+
+static int sx9310_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ return !!(data->chan_event & BIT(chan->channel));
+}
+
+static int sx9310_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ /* If the state hasn't changed, there's nothing to do. */
+ if (!!(data->chan_event & BIT(chan->channel)) == state)
+ return 0;
+
+ mutex_lock(&data->mutex);
+ if (state) {
+ ret = sx9310_get_event_channel(data, chan->channel);
+ if (ret < 0)
+ goto out_unlock;
+ if (!(data->chan_event & ~BIT(chan->channel))) {
+ ret = sx9310_enable_irq(data, SX9310_EVENT_IRQ);
+ if (ret < 0)
+ sx9310_put_event_channel(data, chan->channel);
+ }
+ } else {
+ ret = sx9310_put_event_channel(data, chan->channel);
+ if (ret < 0)
+ goto out_unlock;
+ if (!data->chan_event) {
+ ret = sx9310_disable_irq(data, SX9310_EVENT_IRQ);
+ if (ret < 0)
+ sx9310_get_event_channel(data, chan->channel);
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static struct attribute *sx9310_attributes[] = {
+ &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group sx9310_attribute_group = {
+ .attrs = sx9310_attributes,
+};
+
+static const struct iio_info sx9310_info = {
+ .attrs = &sx9310_attribute_group,
+ .read_raw = sx9310_read_raw,
+ .write_raw = sx9310_write_raw,
+ .read_event_config = sx9310_read_event_config,
+ .write_event_config = sx9310_write_event_config,
+};
+
+static int sx9310_set_trigger_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret = 0;
+
+ mutex_lock(&data->mutex);
+
+ if (state)
+ ret = sx9310_enable_irq(data, SX9310_CONVDONE_IRQ);
+ else if (!data->chan_read)
+ ret = sx9310_disable_irq(data, SX9310_CONVDONE_IRQ);
+ if (ret < 0)
+ goto out;
+
+ data->trigger_enabled = state;
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_trigger_ops sx9310_trigger_ops = {
+ .set_trigger_state = sx9310_set_trigger_state,
+};
+
+static irqreturn_t sx9310_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct sx9310_data *data = iio_priv(indio_dev);
+ __be16 val;
+ int bit, ret, i = 0;
+
+ mutex_lock(&data->mutex);
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = sx9310_read_prox_data(data, &indio_dev->channels[bit],
+ &val);
+ if (ret < 0)
+ goto out;
+
+ data->buffer[i++] = val;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ pf->timestamp);
+
+out:
+ mutex_unlock(&data->mutex);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int sx9310_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ unsigned int channels = 0;
+ int bit, ret;
+
+ mutex_lock(&data->mutex);
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength)
+ channels |= BIT(indio_dev->channels[bit].channel);
+
+ ret = sx9310_update_chan_en(data, channels, data->chan_event);
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int sx9310_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = sx9310_update_chan_en(data, 0, data->chan_event);
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops sx9310_buffer_setup_ops = {
+ .preenable = sx9310_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = sx9310_buffer_postdisable,
+};
+
+struct sx9310_reg_default {
+ u8 reg;
+ u8 def;
+};
+
+#define SX_INIT(_reg, _def) \
+ { \
+ .reg = SX9310_REG_##_reg, \
+ .def = _def, \
+ }
+
+static const struct sx9310_reg_default sx9310_default_regs[] = {
+ SX_INIT(IRQ_MSK, 0x00),
+ SX_INIT(IRQ_FUNC, 0x00),
+ /*
+ * The lower 4 bits should not be set as it enable sensors measurements.
+ * Turning the detection on before the configuration values are set to
+ * good values can cause the device to return erroneous readings.
+ */
+ SX_INIT(PROX_CTRL0, SX9310_REG_PROX_CTRL0_PROXSTAT2),
+ SX_INIT(PROX_CTRL1, 0x00),
+ SX_INIT(PROX_CTRL2, SX9310_REG_PROX_CTRL2_COMBMODE_ALL |
+ SX9310_REG_PROX_CTRL2_SHIELDEN_DYNAMIC),
+ SX_INIT(PROX_CTRL3, SX9310_REG_PROX_CTRL3_GAIN0_X8 |
+ SX9310_REG_PROX_CTRL3_GAIN12_X4),
+ SX_INIT(PROX_CTRL4, SX9310_REG_PROX_CTRL4_RESOLUTION_FINEST),
+ SX_INIT(PROX_CTRL5, SX9310_REG_PROX_CTRL5_RANGE_SMALL |
+ SX9310_REG_PROX_CTRL5_STARTUPSENS_CS1 |
+ SX9310_REG_PROX_CTRL5_RAWFILT_1P25),
+ SX_INIT(PROX_CTRL6, SX9310_REG_PROX_CTRL6_COMP_COMMON),
+ SX_INIT(PROX_CTRL7, SX9310_REG_PROX_CTRL7_AVGNEGFILT_2 |
+ SX9310_REG_PROX_CTRL7_AVGPOSFILT_512),
+ SX_INIT(PROX_CTRL8, SX9310_REG_PROX_CTRL8_9_PTHRESH_96 |
+ SX9310_REG_PROX_CTRL8_9_BODYTHRESH_1500),
+ SX_INIT(PROX_CTRL9, SX9310_REG_PROX_CTRL8_9_PTHRESH12_28 |
+ SX9310_REG_PROX_CTRL8_9_BODYTHRESH_900),
+ SX_INIT(PROX_CTRL10, SX9310_REG_PROX_CTRL10_HYST_6PCT |
+ SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_8 |
+ SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_8),
+ SX_INIT(PROX_CTRL11, 0x00),
+ SX_INIT(PROX_CTRL12, 0x00),
+ SX_INIT(PROX_CTRL13, 0x00),
+ SX_INIT(PROX_CTRL14, 0x00),
+ SX_INIT(PROX_CTRL15, 0x00),
+ SX_INIT(PROX_CTRL16, 0x00),
+ SX_INIT(PROX_CTRL17, 0x00),
+ SX_INIT(PROX_CTRL18, 0x00),
+ SX_INIT(PROX_CTRL19, 0x00),
+ SX_INIT(SAR_CTRL0, SX9310_REG_SAR_CTRL0_SARDEB_4_SAMPLES |
+ SX9310_REG_SAR_CTRL0_SARHYST_8),
+ SX_INIT(SAR_CTRL1, SX9310_REG_SAR_CTRL1_SLOPE(10781250)),
+ SX_INIT(SAR_CTRL2, SX9310_REG_SAR_CTRL2_SAROFFSET_DEFAULT),
+};
+
+/* Activate all channels and perform an initial compensation. */
+static int sx9310_init_compensation(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int i, ret;
+ unsigned int val;
+ unsigned int ctrl0;
+
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &ctrl0);
+ if (ret < 0)
+ return ret;
+
+ /* run the compensation phase on all channels */
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0,
+ ctrl0 | SX9310_REG_PROX_CTRL0_EN_MASK);
+ if (ret < 0)
+ return ret;
+
+ for (i = 100; i >= 0; i--) {
+ msleep(20);
+ ret = regmap_read(data->regmap, SX9310_REG_STAT1, &val);
+ if (ret < 0)
+ goto out;
+ if (!(val & SX9310_COMPSTAT_MASK))
+ break;
+ }
+
+ if (i < 0) {
+ dev_err(&data->client->dev,
+ "initial compensation timed out: 0x%02x", val);
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0);
+ return ret;
+}
+
+static int sx9310_init_device(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ const struct sx9310_reg_default *initval;
+ int ret;
+ unsigned int i, val;
+
+ ret = regmap_write(data->regmap, SX9310_REG_RESET, SX9310_SOFT_RESET);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000); /* power-up time is ~1ms. */
+
+ /* Clear reset interrupt state by reading SX9310_REG_IRQ_SRC. */
+ ret = regmap_read(data->regmap, SX9310_REG_IRQ_SRC, &val);
+ if (ret < 0)
+ return ret;
+
+ /* Program some sane defaults. */
+ for (i = 0; i < ARRAY_SIZE(sx9310_default_regs); i++) {
+ initval = &sx9310_default_regs[i];
+ ret = regmap_write(data->regmap, initval->reg, initval->def);
+ if (ret < 0)
+ return ret;
+ }
+
+ return sx9310_init_compensation(indio_dev);
+}
+
+static int sx9310_set_indio_dev_name(struct device *dev,
+ struct iio_dev *indio_dev,
+ const struct i2c_device_id *id, int whoami)
+{
+ const struct acpi_device_id *acpi_id;
+
+ /* id will be NULL when enumerated via ACPI */
+ if (id) {
+ if (id->driver_data != whoami)
+ dev_err(dev, "WHOAMI does not match i2c_device_id: %s",
+ id->name);
+ } else if (ACPI_HANDLE(dev)) {
+ acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!acpi_id)
+ return -ENODEV;
+ if (acpi_id->driver_data != whoami)
+ dev_err(dev, "WHOAMI does not match acpi_device_id: %s",
+ acpi_id->id);
+ } else
+ return -ENODEV;
+
+ switch (whoami) {
+ case SX9310_WHOAMI_VALUE:
+ indio_dev->name = "sx9310";
+ break;
+ case SX9311_WHOAMI_VALUE:
+ indio_dev->name = "sx9311";
+ break;
+ default:
+ dev_err(dev, "unexpected WHOAMI response: %u", whoami);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int sx9310_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct sx9310_data *data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->mutex);
+ init_completion(&data->completion);
+
+ data->regmap = devm_regmap_init_i2c(client, &sx9310_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ ret = regmap_read(data->regmap, SX9310_REG_WHOAMI, &data->whoami);
+ if (ret < 0) {
+ dev_err(&client->dev, "error in reading WHOAMI register: %d",
+ ret);
+ return ret;
+ }
+
+ ret = sx9310_set_indio_dev_name(&client->dev, indio_dev, id,
+ data->whoami);
+ if (ret < 0)
+ return ret;
+
+ ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(&client->dev));
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = sx9310_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sx9310_channels);
+ indio_dev->info = &sx9310_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+
+ ret = sx9310_init_device(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ sx9310_irq_handler,
+ sx9310_irq_thread_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "sx9310_event", indio_dev);
+ if (ret < 0)
+ return ret;
+
+ data->trig =
+ devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!data->trig)
+ return -ENOMEM;
+
+ data->trig->dev.parent = &client->dev;
+ data->trig->ops = &sx9310_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+
+ ret = devm_iio_trigger_register(&client->dev, data->trig);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ iio_pollfunc_store_time,
+ sx9310_trigger_handler,
+ &sx9310_buffer_setup_ops);
+ if (ret < 0)
+ return ret;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static int __maybe_unused sx9310_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct sx9310_data *data = iio_priv(indio_dev);
+ u8 ctrl0;
+ int ret;
+
+ disable_irq_nosync(data->client->irq);
+
+ mutex_lock(&data->mutex);
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0,
+ &data->suspend_ctrl0);
+
+ if (ret)
+ goto out;
+
+ ctrl0 = data->suspend_ctrl0 & ~SX9310_REG_PROX_CTRL0_EN_MASK;
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 0);
+
+out:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int __maybe_unused sx9310_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 1);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0,
+ data->suspend_ctrl0);
+
+out:
+ mutex_unlock(&data->mutex);
+
+ enable_irq(data->client->irq);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sx9310_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sx9310_suspend, sx9310_resume)
+};
+
+static const struct acpi_device_id sx9310_acpi_match[] = {
+ { "STH9310", SX9310_WHOAMI_VALUE },
+ { "STH9311", SX9311_WHOAMI_VALUE },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, sx9310_acpi_match);
+
+static const struct of_device_id sx9310_of_match[] = {
+ { .compatible = "semtech,sx9310" },
+ { .compatible = "semtech,sx9311" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sx9310_of_match);
+
+static const struct i2c_device_id sx9310_id[] = {
+ { "sx9310", SX9310_WHOAMI_VALUE },
+ { "sx9311", SX9311_WHOAMI_VALUE },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, sx9310_id);
+
+static struct i2c_driver sx9310_driver = {
+ .driver = {
+ .name = "sx9310",
+ .acpi_match_table = ACPI_PTR(sx9310_acpi_match),
+ .of_match_table = of_match_ptr(sx9310_of_match),
+ .pm = &sx9310_pm_ops,
+ },
+ .probe = sx9310_probe,
+ .id_table = sx9310_id,
+};
+module_i2c_driver(sx9310_driver);
+
+MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
+MODULE_AUTHOR("Daniel Campello <campello@chromium.org>");
+MODULE_DESCRIPTION("Driver for Semtech SX9310/SX9311 proximity sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/proximity/vcnl3020.c b/drivers/iio/proximity/vcnl3020.c
new file mode 100644
index 000000000000..9ff1a164c2e6
--- /dev/null
+++ b/drivers/iio/proximity/vcnl3020.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Support for Vishay VCNL3020 proximity sensor on i2c bus.
+ * Based on Vishay VCNL4000 driver code.
+ *
+ * TODO: interrupts.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define VCNL3020_PROD_ID 0x21
+
+#define VCNL_COMMAND 0x80 /* Command register */
+#define VCNL_PROD_REV 0x81 /* Product ID and Revision ID */
+#define VCNL_PROXIMITY_RATE 0x82 /* Rate of Proximity Measurement */
+#define VCNL_LED_CURRENT 0x83 /* IR LED current for proximity mode */
+#define VCNL_PS_RESULT_HI 0x87 /* Proximity result register, MSB */
+#define VCNL_PS_RESULT_LO 0x88 /* Proximity result register, LSB */
+#define VCNL_PS_ICR 0x89 /* Interrupt Control Register */
+#define VCNL_PS_LO_THR_HI 0x8a /* High byte of low threshold value */
+#define VCNL_PS_LO_THR_LO 0x8b /* Low byte of low threshold value */
+#define VCNL_PS_HI_THR_HI 0x8c /* High byte of high threshold value */
+#define VCNL_PS_HI_THR_LO 0x8d /* Low byte of high threshold value */
+#define VCNL_ISR 0x8e /* Interrupt Status Register */
+#define VCNL_PS_MOD_ADJ 0x8f /* Proximity Modulator Timing Adjustment */
+
+/* Bit masks for COMMAND register */
+#define VCNL_PS_RDY BIT(5) /* proximity data ready? */
+#define VCNL_PS_OD BIT(3) /* start on-demand proximity
+ * measurement
+ */
+
+#define VCNL_ON_DEMAND_TIMEOUT_US 100000
+#define VCNL_POLL_US 20000
+
+/**
+ * struct vcnl3020_data - vcnl3020 specific data.
+ * @regmap: device register map.
+ * @dev: vcnl3020 device.
+ * @rev: revision id.
+ * @lock: lock for protecting access to device hardware registers.
+ */
+struct vcnl3020_data {
+ struct regmap *regmap;
+ struct device *dev;
+ u8 rev;
+ struct mutex lock;
+};
+
+/**
+ * struct vcnl3020_property - vcnl3020 property.
+ * @name: property name.
+ * @reg: i2c register offset.
+ * @conversion_func: conversion function.
+ */
+struct vcnl3020_property {
+ const char *name;
+ u32 reg;
+ u32 (*conversion_func)(u32 *val);
+};
+
+static u32 microamp_to_reg(u32 *val)
+{
+ /*
+ * An example of conversion from uA to reg val:
+ * 200000 uA == 200 mA == 20
+ */
+ return *val /= 10000;
+};
+
+static struct vcnl3020_property vcnl3020_led_current_property = {
+ .name = "vishay,led-current-microamp",
+ .reg = VCNL_LED_CURRENT,
+ .conversion_func = microamp_to_reg,
+};
+
+static int vcnl3020_get_and_apply_property(struct vcnl3020_data *data,
+ struct vcnl3020_property prop)
+{
+ int rc;
+ u32 val;
+
+ rc = device_property_read_u32(data->dev, prop.name, &val);
+ if (rc)
+ return 0;
+
+ if (prop.conversion_func)
+ prop.conversion_func(&val);
+
+ rc = regmap_write(data->regmap, prop.reg, val);
+ if (rc) {
+ dev_err(data->dev, "Error (%d) setting property (%s)\n",
+ rc, prop.name);
+ }
+
+ return rc;
+}
+
+static int vcnl3020_init(struct vcnl3020_data *data)
+{
+ int rc;
+ unsigned int reg;
+
+ rc = regmap_read(data->regmap, VCNL_PROD_REV, &reg);
+ if (rc) {
+ dev_err(data->dev,
+ "Error (%d) reading product revision\n", rc);
+ return rc;
+ }
+
+ if (reg != VCNL3020_PROD_ID) {
+ dev_err(data->dev,
+ "Product id (%x) did not match vcnl3020 (%x)\n", reg,
+ VCNL3020_PROD_ID);
+ return -ENODEV;
+ }
+
+ data->rev = reg;
+ mutex_init(&data->lock);
+
+ return vcnl3020_get_and_apply_property(data,
+ vcnl3020_led_current_property);
+};
+
+static int vcnl3020_measure_proximity(struct vcnl3020_data *data, int *val)
+{
+ int rc;
+ unsigned int reg;
+ __be16 res;
+
+ mutex_lock(&data->lock);
+
+ rc = regmap_write(data->regmap, VCNL_COMMAND, VCNL_PS_OD);
+ if (rc)
+ goto err_unlock;
+
+ /* wait for data to become ready */
+ rc = regmap_read_poll_timeout(data->regmap, VCNL_COMMAND, reg,
+ reg & VCNL_PS_RDY, VCNL_POLL_US,
+ VCNL_ON_DEMAND_TIMEOUT_US);
+ if (rc) {
+ dev_err(data->dev,
+ "Error (%d) reading vcnl3020 command register\n", rc);
+ goto err_unlock;
+ }
+
+ /* high & low result bytes read */
+ rc = regmap_bulk_read(data->regmap, VCNL_PS_RESULT_HI, &res,
+ sizeof(res));
+ if (rc)
+ goto err_unlock;
+
+ *val = be16_to_cpu(res);
+
+err_unlock:
+ mutex_unlock(&data->lock);
+
+ return rc;
+}
+
+static const struct iio_chan_spec vcnl3020_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+};
+
+static int vcnl3020_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int rc;
+ struct vcnl3020_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ rc = vcnl3020_measure_proximity(data, val);
+ if (rc)
+ return rc;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info vcnl3020_info = {
+ .read_raw = vcnl3020_read_raw,
+};
+
+static const struct regmap_config vcnl3020_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = VCNL_PS_MOD_ADJ,
+};
+
+static int vcnl3020_probe(struct i2c_client *client)
+{
+ struct vcnl3020_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int rc;
+
+ regmap = devm_regmap_init_i2c(client, &vcnl3020_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "regmap_init failed\n");
+ return PTR_ERR(regmap);
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->regmap = regmap;
+ data->dev = &client->dev;
+
+ rc = vcnl3020_init(data);
+ if (rc)
+ return rc;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &vcnl3020_info;
+ indio_dev->channels = vcnl3020_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vcnl3020_channels);
+ indio_dev->name = "vcnl3020";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct of_device_id vcnl3020_of_match[] = {
+ {
+ .compatible = "vishay,vcnl3020",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, vcnl3020_of_match);
+
+static struct i2c_driver vcnl3020_driver = {
+ .driver = {
+ .name = "vcnl3020",
+ .of_match_table = vcnl3020_of_match,
+ },
+ .probe_new = vcnl3020_probe,
+};
+module_i2c_driver(vcnl3020_driver);
+
+MODULE_AUTHOR("Ivan Mikhaylov <i.mikhaylov@yadro.com>");
+MODULE_DESCRIPTION("Vishay VCNL3020 proximity sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index eda55b9c1e9b..8d1f434f109d 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -7,8 +7,6 @@
#include <linux/hid-sensor-hub.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
-#include <linux/iio/triggered_buffer.h>
-#include <linux/iio/trigger_consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -230,12 +228,8 @@ static int hid_temperature_probe(struct platform_device *pdev)
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = devm_iio_triggered_buffer_setup(&pdev->dev, indio_dev,
- &iio_pollfunc_store_time, NULL, NULL);
- if (ret)
- return ret;
-
atomic_set(&temp_st->common_attributes.data_ready, 0);
+
ret = hid_sensor_setup_trigger(indio_dev, name,
&temp_st->common_attributes);
if (ret)
@@ -258,7 +252,7 @@ static int hid_temperature_probe(struct platform_device *pdev)
error_remove_callback:
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TEMPERATURE);
error_remove_trigger:
- hid_sensor_remove_trigger(&temp_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &temp_st->common_attributes);
return ret;
}
@@ -270,7 +264,7 @@ static int hid_temperature_remove(struct platform_device *pdev)
struct temperature_state *temp_st = iio_priv(indio_dev);
sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_TEMPERATURE);
- hid_sensor_remove_trigger(&temp_st->common_attributes);
+ hid_sensor_remove_trigger(indio_dev, &temp_st->common_attributes);
return 0;
}
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index d39c0d6b77f1..8976e8d59826 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -390,8 +390,8 @@ static struct ltc2983_custom_sensor *__ltc2983_custom_sensor_new(
* For custom steinhart, the full u32 is taken. For all the others
* the MSB is discarded.
*/
- const u8 n_size = (is_steinhart == true) ? 4 : 3;
- const u8 e_size = (is_steinhart == true) ? sizeof(u32) : sizeof(u64);
+ const u8 n_size = is_steinhart ? 4 : 3;
+ const u8 e_size = is_steinhart ? sizeof(u32) : sizeof(u64);
n_entries = of_property_count_elems_of_size(np, propname, e_size);
/* n_entries must be an even number */
diff --git a/drivers/iio/temperature/max31856.c b/drivers/iio/temperature/max31856.c
index b4cb21ab2e85..b4c49a5d3685 100644
--- a/drivers/iio/temperature/max31856.c
+++ b/drivers/iio/temperature/max31856.c
@@ -14,6 +14,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/util_macros.h>
+#include <asm/unaligned.h>
#include <dt-bindings/iio/temperature/thermocouple.h>
/*
* The MSB of the register value determines whether the following byte will
@@ -168,7 +169,7 @@ static int max31856_thermocouple_read(struct max31856_data *data,
if (ret)
return ret;
/* Skip last 5 dead bits of LTCBL */
- *val = (reg_val[0] << 16 | reg_val[1] << 8 | reg_val[2]) >> 5;
+ *val = get_unaligned_be24(&reg_val[0]) >> 5;
/* Check 7th bit of LTCBH reg. value for sign*/
if (reg_val[0] & 0x80)
*val -= 0x80000;
@@ -185,7 +186,7 @@ static int max31856_thermocouple_read(struct max31856_data *data,
/* Get Cold Junction Temp. offset register value */
offset_cjto = reg_val[0];
/* Get CJTH and CJTL value and skip last 2 dead bits of CJTL */
- *val = (reg_val[1] << 8 | reg_val[2]) >> 2;
+ *val = get_unaligned_be16(&reg_val[1]) >> 2;
/* As per datasheet add offset into CJTH and CJTL */
*val += offset_cjto;
/* Check 7th bit of CJTH reg. value for sign */
diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
index a5e670726717..f59bf8d58586 100644
--- a/drivers/iio/trigger/iio-trig-hrtimer.c
+++ b/drivers/iio/trigger/iio-trig-hrtimer.c
@@ -4,7 +4,7 @@
*
* Copyright (C) Intuitive Aerial AB
* Written by Marten Svanfeldt, marten@intuitiveaerial.com
- * Copyright (C) 2012, Analog Device Inc.
+ * Copyright (C) 2012, Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
* Copyright (C) 2015, Intel Corporation
*/
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index ade86388434f..477418b37786 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -107,6 +107,7 @@ source "drivers/infiniband/ulp/srpt/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig"
+source "drivers/infiniband/ulp/rtrs/Kconfig"
source "drivers/infiniband/ulp/opa_vnic/Kconfig"
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index d1b14887960e..24cb71a16a28 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -8,11 +8,11 @@ obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
- device.o fmr_pool.o cache.o netlink.o \
+ device.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
nldev.o restrack.o counters.o ib_core_uverbs.o \
- trace.o
+ trace.o lag.o
ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
@@ -36,6 +36,9 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
uverbs_std_types_flow_action.o uverbs_std_types_dm.o \
uverbs_std_types_mr.o uverbs_std_types_counters.o \
uverbs_uapi.o uverbs_std_types_device.o \
- uverbs_std_types_async_fd.o
+ uverbs_std_types_async_fd.o \
+ uverbs_std_types_srq.o \
+ uverbs_std_types_wq.o \
+ uverbs_std_types_qp.o
ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 1753a9801b70..3a98439bba83 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -371,6 +371,8 @@ static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
(const void *)&dst_in6->sin6_addr;
sa_family_t family = dst_in->sa_family;
+ might_sleep();
+
/* If we have a gateway in IB mode then it must be an IB network */
if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB)
return ib_nl_fetch_ha(dev_addr, daddr, seq, family);
@@ -727,6 +729,8 @@ int roce_resolve_route_from_path(struct sa_path_rec *rec,
struct rdma_dev_addr dev_addr = {};
int ret;
+ might_sleep();
+
if (rec->roce.route_resolved)
return 0;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 17f14e0eafe4..9ce787e37e22 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -66,6 +66,8 @@ static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
[IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
[IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
+ [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
+ "vendor option is not supported",
};
const char *__attribute_const__ ibcm_reject_msg(int reason)
@@ -81,8 +83,11 @@ const char *__attribute_const__ ibcm_reject_msg(int reason)
EXPORT_SYMBOL(ibcm_reject_msg);
struct cm_id_private;
-static void cm_add_one(struct ib_device *device);
+struct cm_work;
+static int cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device, void *client_data);
+static void cm_process_work(struct cm_id_private *cm_id_priv,
+ struct cm_work *work);
static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param);
static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
@@ -287,6 +292,8 @@ struct cm_id_private {
struct list_head work_list;
atomic_t work_count;
+
+ struct rdma_ucm_ece ece;
};
static void cm_work_handler(struct work_struct *work);
@@ -474,24 +481,19 @@ static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh, &av->ah_attr);
}
-static int add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
- struct cm_av *av,
- struct cm_port *port)
+static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
+ struct cm_av *av, struct cm_port *port)
{
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(&cm.lock, flags);
-
if (&cm_id_priv->av == av)
list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
else if (&cm_id_priv->alt_av == av)
list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
else
- ret = -EINVAL;
-
+ WARN_ON(true);
spin_unlock_irqrestore(&cm.lock, flags);
- return ret;
}
static struct cm_port *
@@ -572,12 +574,7 @@ static int cm_init_av_by_path(struct sa_path_rec *path,
return ret;
av->timeout = path->packet_life_time + 1;
-
- ret = add_cm_id_to_port_list(cm_id_priv, av, port);
- if (ret) {
- rdma_destroy_ah_attr(&new_ah_attr);
- return ret;
- }
+ add_cm_id_to_port_list(cm_id_priv, av, port);
rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
return 0;
}
@@ -587,11 +584,6 @@ static u32 cm_local_id(__be32 local_id)
return (__force u32) (local_id ^ cm.random_id_operand);
}
-static void cm_free_id(__be32 local_id)
-{
- xa_erase_irq(&cm.local_id_table, cm_local_id(local_id));
-}
-
static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
@@ -698,9 +690,10 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
if ((cm_id_priv->id.service_mask & service_id) ==
cm_id_priv->id.service_id &&
- (cm_id_priv->id.device == device))
+ (cm_id_priv->id.device == device)) {
+ refcount_inc(&cm_id_priv->refcount);
return cm_id_priv;
-
+ }
if (device < cm_id_priv->id.device)
node = node->rb_left;
else if (device > cm_id_priv->id.device)
@@ -745,12 +738,14 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
return NULL;
}
-static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
- __be32 remote_id)
+static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
+ __be32 remote_id)
{
struct rb_node *node = cm.remote_id_table.rb_node;
struct cm_timewait_info *timewait_info;
+ struct cm_id_private *res = NULL;
+ spin_lock_irq(&cm.lock);
while (node) {
timewait_info = rb_entry(node, struct cm_timewait_info,
remote_id_node);
@@ -762,10 +757,14 @@ static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
node = node->rb_left;
else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
node = node->rb_right;
- else
- return timewait_info;
+ else {
+ res = cm_acquire_id(timewait_info->work.local_id,
+ timewait_info->work.remote_id);
+ break;
+ }
}
- return NULL;
+ spin_unlock_irq(&cm.lock);
+ return res;
}
static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
@@ -917,6 +916,35 @@ static void cm_free_work(struct cm_work *work)
kfree(work);
}
+static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
+ struct cm_work *work)
+{
+ bool immediate;
+
+ /*
+ * To deliver the event to the user callback we have the drop the
+ * spinlock, however, we need to ensure that the user callback is single
+ * threaded and receives events in the temporal order. If there are
+ * already events being processed then thread new events onto a list,
+ * the thread currently processing will pick them up.
+ */
+ immediate = atomic_inc_and_test(&cm_id_priv->work_count);
+ if (!immediate) {
+ list_add_tail(&work->list, &cm_id_priv->work_list);
+ /*
+ * This routine always consumes incoming reference. Once queued
+ * to the work_list then a reference is held by the thread
+ * currently running cm_process_work() and this reference is not
+ * needed.
+ */
+ cm_deref_id(cm_id_priv);
+ }
+ spin_unlock_irq(&cm_id_priv->lock);
+
+ if (immediate)
+ cm_process_work(cm_id_priv, work);
+}
+
static inline int cm_convert_to_ms(int iba_time)
{
/* approximate conversion to ms from 4.096us x 2^iba_time */
@@ -942,8 +970,10 @@ static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
return min(31, ack_timeout);
}
-static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
+static void cm_remove_remote(struct cm_id_private *cm_id_priv)
{
+ struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
+
if (timewait_info->inserted_remote_id) {
rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
timewait_info->inserted_remote_id = 0;
@@ -982,7 +1012,7 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
return;
spin_lock_irqsave(&cm.lock, flags);
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
spin_unlock_irqrestore(&cm.lock, flags);
@@ -1001,6 +1031,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
msecs_to_jiffies(wait_time));
spin_unlock_irqrestore(&cm.lock, flags);
+ /*
+ * The timewait_info is converted into a work and gets freed during
+ * cm_free_work() in cm_timewait_handler().
+ */
+ BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
cm_id_priv->timewait_info = NULL;
}
@@ -1013,7 +1048,7 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
cm_id_priv->id.state = IB_CM_IDLE;
if (cm_id_priv->timewait_info) {
spin_lock_irqsave(&cm.lock, flags);
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
spin_unlock_irqrestore(&cm.lock, flags);
kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
@@ -1076,7 +1111,9 @@ retest:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- /* Fall through */
+ cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
+ 0, NULL, 0);
+ goto retest;
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
@@ -1101,7 +1138,7 @@ retest:
case IB_CM_TIMEWAIT:
/*
* The cm_acquire_id in cm_timewait_handler will stop working
- * once we do cm_free_id() below, so just move to idle here for
+ * once we do xa_erase below, so just move to idle here for
* consistency.
*/
cm_id->state = IB_CM_IDLE;
@@ -1114,7 +1151,7 @@ retest:
spin_lock(&cm.lock);
/* Required for cleanup paths related cm_req_handler() */
if (cm_id_priv->timewait_info) {
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
}
@@ -1131,7 +1168,7 @@ retest:
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
- cm_free_id(cm_id->local_id);
+ xa_erase_irq(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
@@ -1287,6 +1324,13 @@ static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
hdr->tid = tid;
}
+static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
+ __be64 tid, u32 attr_mod)
+{
+ cm_format_mad_hdr(hdr, attr_id, tid);
+ hdr->attr_mod = cpu_to_be32(attr_mod);
+}
+
static void cm_format_req(struct cm_req_msg *req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_req_param *param)
@@ -1299,8 +1343,8 @@ static void cm_format_req(struct cm_req_msg *req_msg,
pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
pri_path->opa.slid);
- cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
- cm_form_tid(cm_id_priv));
+ cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
+ cm_form_tid(cm_id_priv), param->ece.attr_mod);
IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
be32_to_cpu(cm_id_priv->id.local_id));
@@ -1423,6 +1467,7 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
alt_path->packet_life_time));
}
+ IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
if (param->private_data && param->private_data_len)
IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
@@ -1779,6 +1824,9 @@ static void cm_format_req_event(struct cm_work *work,
param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
+ param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
+ param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
+
work->cm_event.private_data =
IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
}
@@ -1927,7 +1975,6 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
struct cm_timewait_info *timewait_info;
struct cm_req_msg *req_msg;
- struct ib_cm_id *cm_id;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -1948,7 +1995,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
/* Check for stale connections. */
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
@@ -1957,8 +2004,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
NULL, 0);
if (cur_cm_id_priv) {
- cm_id = &cur_cm_id_priv->id;
- ib_send_cm_dreq(cm_id, NULL, 0);
+ ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
cm_deref_id(cur_cm_id_priv);
}
return NULL;
@@ -1969,14 +2015,13 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
cm_id_priv->id.device,
cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
if (!listen_cm_id_priv) {
- cm_cleanup_timewait(cm_id_priv->timewait_info);
+ cm_remove_remote(cm_id_priv);
spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
NULL, 0);
return NULL;
}
- refcount_inc(&listen_cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
return listen_cm_id_priv;
}
@@ -2153,9 +2198,7 @@ static int cm_req_handler(struct cm_work *work)
/* Refcount belongs to the event, pairs with cm_process_work() */
refcount_inc(&cm_id_priv->refcount);
- atomic_inc(&cm_id_priv->work_count);
- spin_unlock_irq(&cm_id_priv->lock);
- cm_process_work(cm_id_priv, work);
+ cm_queue_work_unlock(cm_id_priv, work);
/*
* Since this ID was just created and was not made visible to other MAD
* handlers until the cm_finalize_id() above we know that the
@@ -2176,7 +2219,8 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_rep_param *param)
{
- cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
+ cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
+ param->ece.attr_mod);
IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
be32_to_cpu(cm_id_priv->id.local_id));
IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
@@ -2203,6 +2247,10 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
}
+ IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
+ IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
+ IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
+
if (param->private_data && param->private_data_len)
IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
param->private_data_len);
@@ -2350,6 +2398,11 @@ static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
+ param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
+ param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
+ param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
+ param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
+
work->cm_event.private_data =
IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
}
@@ -2404,7 +2457,6 @@ static int cm_rep_handler(struct cm_work *work)
struct cm_rep_msg *rep_msg;
int ret;
struct cm_id_private *cur_cm_id_priv;
- struct ib_cm_id *cm_id;
struct cm_timewait_info *timewait_info;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
@@ -2454,9 +2506,7 @@ static int cm_rep_handler(struct cm_work *work)
/* Check for a stale connection. */
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
- rb_erase(&cm_id_priv->timewait_info->remote_id_node,
- &cm.remote_id_table);
- cm_id_priv->timewait_info->inserted_remote_id = 0;
+ cm_remove_remote(cm_id_priv);
cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
@@ -2472,8 +2522,7 @@ static int cm_rep_handler(struct cm_work *work)
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
if (cur_cm_id_priv) {
- cm_id = &cur_cm_id_priv->id;
- ib_send_cm_dreq(cm_id, NULL, 0);
+ ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
cm_deref_id(cur_cm_id_priv);
}
@@ -2501,15 +2550,7 @@ static int cm_rep_handler(struct cm_work *work)
cm_id_priv->alt_av.timeout - 1);
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
error:
@@ -2520,7 +2561,6 @@ error:
static int cm_establish_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
- int ret;
/* See comment in cm_establish about lookup. */
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
@@ -2534,15 +2574,7 @@ static int cm_establish_handler(struct cm_work *work)
}
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2553,7 +2585,6 @@ static int cm_rtu_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rtu_msg *rtu_msg;
- int ret;
rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(
@@ -2576,15 +2607,7 @@ static int cm_rtu_handler(struct cm_work *work)
cm_id_priv->id.state = IB_CM_ESTABLISHED;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2777,7 +2800,6 @@ static int cm_dreq_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv;
struct cm_dreq_msg *dreq_msg;
struct ib_mad_send_buf *msg = NULL;
- int ret;
dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(
@@ -2842,15 +2864,7 @@ static int cm_dreq_handler(struct cm_work *work)
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
cm_id_priv->tid = dreq_msg->hdr.tid;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
@@ -2862,7 +2876,6 @@ static int cm_drep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_drep_msg *drep_msg;
- int ret;
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(
@@ -2883,15 +2896,7 @@ static int cm_drep_handler(struct cm_work *work)
cm_enter_timewait(cm_id_priv);
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -2987,24 +2992,15 @@ static void cm_format_rej_event(struct cm_work *work)
static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
{
- struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
__be32 remote_id;
remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
- spin_lock_irq(&cm.lock);
- timewait_info = cm_find_remote_id(
+ cm_id_priv = cm_find_remote_id(
*((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
remote_id);
- if (!timewait_info) {
- spin_unlock_irq(&cm.lock);
- return NULL;
- }
- cm_id_priv =
- cm_acquire_id(timewait_info->work.local_id, remote_id);
- spin_unlock_irq(&cm.lock);
} else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
CM_MSG_RESPONSE_REQ)
cm_id_priv = cm_acquire_id(
@@ -3022,7 +3018,6 @@ static int cm_rej_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rej_msg *rej_msg;
- int ret;
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_rejected_id(rej_msg);
@@ -3068,19 +3063,10 @@ static int cm_rej_handler(struct cm_work *work)
__func__, be32_to_cpu(cm_id_priv->id.local_id),
cm_id_priv->id.state);
spin_unlock_irq(&cm_id_priv->lock);
- ret = -EINVAL;
goto out;
}
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -3190,7 +3176,7 @@ static int cm_mra_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_mra_msg *mra_msg;
- int timeout, ret;
+ int timeout;
mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_mraed_id(mra_msg);
@@ -3250,15 +3236,7 @@ static int cm_mra_handler(struct cm_work *work)
cm_id_priv->msg->context[1] = (void *) (unsigned long)
cm_id_priv->id.state;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
spin_unlock_irq(&cm_id_priv->lock);
@@ -3393,15 +3371,7 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
@@ -3413,7 +3383,6 @@ static int cm_apr_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_apr_msg *apr_msg;
- int ret;
/* Currently Alternate path messages are not supported for
* RoCE link layer.
@@ -3448,16 +3417,7 @@ static int cm_apr_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
cm_id_priv->msg = NULL;
-
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -3468,7 +3428,6 @@ static int cm_timewait_handler(struct cm_work *work)
{
struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
- int ret;
timewait_info = container_of(work, struct cm_timewait_info, work);
spin_lock_irq(&cm.lock);
@@ -3487,15 +3446,7 @@ static int cm_timewait_handler(struct cm_work *work)
goto out;
}
cm_id_priv->id.state = IB_CM_IDLE;
- ret = atomic_inc_and_test(&cm_id_priv->work_count);
- if (!ret)
- list_add_tail(&work->list, &cm_id_priv->work_list);
- spin_unlock_irq(&cm_id_priv->lock);
-
- if (ret)
- cm_process_work(cm_id_priv, work);
- else
- cm_deref_id(cm_id_priv);
+ cm_queue_work_unlock(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
@@ -3642,7 +3593,6 @@ static int cm_sidr_req_handler(struct cm_work *work)
.status = IB_SIDR_UNSUPPORTED });
goto out; /* No match. */
}
- refcount_inc(&listen_cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -3674,8 +3624,8 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param)
{
- cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
- cm_id_priv->tid);
+ cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
+ cm_id_priv->tid, param->ece.attr_mod);
IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
be32_to_cpu(cm_id_priv->id.remote_id));
IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
@@ -3683,6 +3633,10 @@ static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
be64_to_cpu(cm_id_priv->id.service_id));
IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
+ IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
+ param->ece.vendor_id & 0xFF);
+ IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
+ (param->ece.vendor_id >> 8) & 0xFF);
if (param->info && param->info_length)
IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
@@ -4384,7 +4338,7 @@ static void cm_remove_port_fs(struct cm_port *port)
}
-static void cm_add_one(struct ib_device *ib_device)
+static int cm_add_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
@@ -4403,7 +4357,7 @@ static void cm_add_one(struct ib_device *ib_device)
cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
GFP_KERNEL);
if (!cm_dev)
- return;
+ return -ENOMEM;
cm_dev->ib_device = ib_device;
cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
@@ -4415,8 +4369,10 @@ static void cm_add_one(struct ib_device *ib_device)
continue;
port = kzalloc(sizeof *port, GFP_KERNEL);
- if (!port)
+ if (!port) {
+ ret = -ENOMEM;
goto error1;
+ }
cm_dev->port[i-1] = port;
port->cm_dev = cm_dev;
@@ -4437,8 +4393,10 @@ static void cm_add_one(struct ib_device *ib_device)
cm_recv_handler,
port,
0);
- if (IS_ERR(port->mad_agent))
+ if (IS_ERR(port->mad_agent)) {
+ ret = PTR_ERR(port->mad_agent);
goto error2;
+ }
ret = ib_modify_port(ib_device, i, 0, &port_modify);
if (ret)
@@ -4447,15 +4405,17 @@ static void cm_add_one(struct ib_device *ib_device)
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
+ }
ib_set_client_data(ib_device, &cm_client, cm_dev);
write_lock_irqsave(&cm.device_lock, flags);
list_add_tail(&cm_dev->list, &cm.device_list);
write_unlock_irqrestore(&cm.device_lock, flags);
- return;
+ return 0;
error3:
ib_unregister_mad_agent(port->mad_agent);
@@ -4477,6 +4437,7 @@ error1:
}
free:
kfree(cm_dev);
+ return ret;
}
static void cm_remove_one(struct ib_device *ib_device, void *client_data)
@@ -4491,9 +4452,6 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
unsigned long flags;
int i;
- if (!cm_dev)
- return;
-
write_lock_irqsave(&cm.device_lock, flags);
list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 26e6f7df247b..3d7cc9f0f3d4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -91,7 +91,13 @@ const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
}
EXPORT_SYMBOL(rdma_reject_msg);
-bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
+/**
+ * rdma_is_consumer_reject - return true if the consumer rejected the connect
+ * request.
+ * @id: Communication identifier that received the REJECT event.
+ * @reason: Value returned in the REJECT event status field.
+ */
+static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
{
if (rdma_ib_or_roce(id->device, id->port_num))
return reason == IB_CM_REJ_CONSUMER_DEFINED;
@@ -102,7 +108,6 @@ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
WARN_ON_ONCE(1);
return false;
}
-EXPORT_SYMBOL(rdma_is_consumer_reject);
const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
struct rdma_cm_event *ev, u8 *data_len)
@@ -148,7 +153,7 @@ struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
}
EXPORT_SYMBOL(rdma_res_to_id);
-static void cma_add_one(struct ib_device *device);
+static int cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device, void *client_data);
static struct ib_client cma_client = {
@@ -479,6 +484,7 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
rdma_restrack_kadd(&id_priv->res);
else
rdma_restrack_uadd(&id_priv->res);
+ trace_cm_id_attach(id_priv, cma_dev->device);
}
static void cma_attach_to_dev(struct rdma_id_private *id_priv,
@@ -883,7 +889,6 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
id_priv->id.route.addr.dev_addr.net = get_net(net);
id_priv->seq_num &= 0x00ffffff;
- trace_cm_id_create(id_priv);
return &id_priv->id;
}
EXPORT_SYMBOL(__rdma_create_id);
@@ -1906,6 +1911,9 @@ static void cma_set_rep_event_data(struct rdma_cm_event *event,
event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
event->param.conn.srq = rep_data->srq;
event->param.conn.qp_num = rep_data->remote_qpn;
+
+ event->ece.vendor_id = rep_data->ece.vendor_id;
+ event->ece.attr_mod = rep_data->ece.attr_mod;
}
static int cma_cm_event_handler(struct rdma_id_private *id_priv,
@@ -2124,6 +2132,9 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
event->param.conn.srq = req_data->srq;
event->param.conn.qp_num = req_data->remote_qpn;
+
+ event->ece.vendor_id = req_data->ece.vendor_id;
+ event->ece.attr_mod = req_data->ece.attr_mod;
}
static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
@@ -2904,6 +2915,24 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
return 0;
}
+static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv)
+{
+ struct sockaddr_in6 *addr6;
+ u16 dport, sport;
+ u32 hash, fl;
+
+ addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv);
+ fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK;
+ if ((cma_family(id_priv) != AF_INET6) || !fl) {
+ dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv)));
+ sport = be16_to_cpu(cma_port(cma_src_addr(id_priv)));
+ hash = (u32)sport * 31 + dport;
+ fl = hash & IB_GRH_FLOWLABEL_MASK;
+ }
+
+ return cpu_to_be32(fl);
+}
+
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
@@ -2970,6 +2999,11 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
+ if (rdma_protocol_roce_udp_encap(id_priv->id.device,
+ id_priv->id.port_num))
+ route->path_rec->flow_label =
+ cma_get_roce_udp_flow_label(id_priv);
+
cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
@@ -3919,6 +3953,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
req.max_cm_retries = CMA_MAX_CM_RETRIES;
req.srq = id_priv->srq ? 1 : 0;
+ req.ece.vendor_id = id_priv->ece.vendor_id;
+ req.ece.attr_mod = id_priv->ece.attr_mod;
trace_cm_send_req(id_priv);
ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
@@ -4008,6 +4044,27 @@ err:
}
EXPORT_SYMBOL(rdma_connect);
+/**
+ * rdma_connect_ece - Initiate an active connection request with ECE data.
+ * @id: Connection identifier to connect.
+ * @conn_param: Connection information used for connected QPs.
+ * @ece: ECE parameters
+ *
+ * See rdma_connect() explanation.
+ */
+int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ struct rdma_ucm_ece *ece)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ id_priv->ece.vendor_id = ece->vendor_id;
+ id_priv->ece.attr_mod = ece->attr_mod;
+
+ return rdma_connect(id, conn_param);
+}
+EXPORT_SYMBOL(rdma_connect_ece);
+
static int cma_accept_ib(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
@@ -4033,6 +4090,8 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
rep.flow_control = conn_param->flow_control;
rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
rep.srq = id_priv->srq ? 1 : 0;
+ rep.ece.vendor_id = id_priv->ece.vendor_id;
+ rep.ece.attr_mod = id_priv->ece.attr_mod;
trace_cm_send_rep(id_priv);
ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
@@ -4080,7 +4139,11 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
return ret;
rep.qp_num = id_priv->qp_num;
rep.qkey = id_priv->qkey;
+
+ rep.ece.vendor_id = id_priv->ece.vendor_id;
+ rep.ece.attr_mod = id_priv->ece.attr_mod;
}
+
rep.private_data = private_data;
rep.private_data_len = private_data_len;
@@ -4133,11 +4196,24 @@ int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
return 0;
reject:
cma_modify_qp_err(id_priv);
- rdma_reject(id, NULL, 0);
+ rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
return ret;
}
EXPORT_SYMBOL(__rdma_accept);
+int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ const char *caller, struct rdma_ucm_ece *ece)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ id_priv->ece.vendor_id = ece->vendor_id;
+ id_priv->ece.attr_mod = ece->attr_mod;
+
+ return __rdma_accept(id, conn_param, caller);
+}
+EXPORT_SYMBOL(__rdma_accept_ece);
+
int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
{
struct rdma_id_private *id_priv;
@@ -4160,7 +4236,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
EXPORT_SYMBOL(rdma_notify);
int rdma_reject(struct rdma_cm_id *id, const void *private_data,
- u8 private_data_len)
+ u8 private_data_len, u8 reason)
{
struct rdma_id_private *id_priv;
int ret;
@@ -4175,9 +4251,8 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
private_data, private_data_len);
} else {
trace_cm_send_rej(id_priv);
- ret = ib_send_cm_rej(id_priv->cm_id.ib,
- IB_CM_REJ_CONSUMER_DEFINED, NULL,
- 0, private_data, private_data_len);
+ ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0,
+ private_data, private_data_len);
}
} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
ret = iw_cm_reject(id_priv->cm_id.iw,
@@ -4633,29 +4708,34 @@ static struct notifier_block cma_nb = {
.notifier_call = cma_netdev_callback
};
-static void cma_add_one(struct ib_device *device)
+static int cma_add_one(struct ib_device *device)
{
struct cma_device *cma_dev;
struct rdma_id_private *id_priv;
unsigned int i;
unsigned long supported_gids = 0;
+ int ret;
cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
if (!cma_dev)
- return;
+ return -ENOMEM;
cma_dev->device = device;
cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
sizeof(*cma_dev->default_gid_type),
GFP_KERNEL);
- if (!cma_dev->default_gid_type)
+ if (!cma_dev->default_gid_type) {
+ ret = -ENOMEM;
goto free_cma_dev;
+ }
cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
sizeof(*cma_dev->default_roce_tos),
GFP_KERNEL);
- if (!cma_dev->default_roce_tos)
+ if (!cma_dev->default_roce_tos) {
+ ret = -ENOMEM;
goto free_gid_type;
+ }
rdma_for_each_port (device, i) {
supported_gids = roce_gid_type_mask_support(device, i);
@@ -4681,15 +4761,14 @@ static void cma_add_one(struct ib_device *device)
mutex_unlock(&lock);
trace_cm_add_one(device);
- return;
+ return 0;
free_gid_type:
kfree(cma_dev->default_gid_type);
free_cma_dev:
kfree(cma_dev);
-
- return;
+ return ret;
}
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
@@ -4751,9 +4830,6 @@ static void cma_remove_one(struct ib_device *device, void *client_data)
trace_cm_remove_one(device);
- if (!cma_dev)
- return;
-
mutex_lock(&lock);
list_del(&cma_dev->list);
mutex_unlock(&lock);
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index c672a4978bfd..3c1e2ca564fe 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -322,8 +322,21 @@ fail:
return ERR_PTR(err);
}
+static void drop_cma_dev(struct config_group *cgroup, struct config_item *item)
+{
+ struct config_group *group =
+ container_of(item, struct config_group, cg_item);
+ struct cma_dev_group *cma_dev_group =
+ container_of(group, struct cma_dev_group, device_group);
+
+ configfs_remove_default_groups(&cma_dev_group->ports_group);
+ configfs_remove_default_groups(&cma_dev_group->device_group);
+ config_item_put(item);
+}
+
static struct configfs_group_operations cma_subsys_group_ops = {
.make_group = make_cma_dev,
+ .drop_item = drop_cma_dev,
};
static const struct config_item_type cma_subsys_type = {
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index 5edcf44a9307..caece96ebcf5 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -95,6 +95,7 @@ struct rdma_id_private {
* Internal to RDMA/core, don't use in the drivers
*/
struct rdma_restrack_entry res;
+ struct rdma_ucm_ece ece;
};
#if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS)
diff --git a/drivers/infiniband/core/cma_trace.h b/drivers/infiniband/core/cma_trace.h
index 81e36bf13159..e6e20c36c538 100644
--- a/drivers/infiniband/core/cma_trace.h
+++ b/drivers/infiniband/core/cma_trace.h
@@ -103,23 +103,33 @@ DEFINE_CMA_FSM_EVENT(sent_drep);
DEFINE_CMA_FSM_EVENT(sent_dreq);
DEFINE_CMA_FSM_EVENT(id_destroy);
-TRACE_EVENT(cm_id_create,
+TRACE_EVENT(cm_id_attach,
TP_PROTO(
- const struct rdma_id_private *id_priv
+ const struct rdma_id_private *id_priv,
+ const struct ib_device *device
),
- TP_ARGS(id_priv),
+ TP_ARGS(id_priv, device),
TP_STRUCT__entry(
__field(u32, cm_id)
+ __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
+ __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
+ __string(devname, device->name)
),
TP_fast_assign(
__entry->cm_id = id_priv->res.id;
+ memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr,
+ sizeof(struct sockaddr_in6));
+ memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr,
+ sizeof(struct sockaddr_in6));
+ __assign_str(devname, device->name);
),
- TP_printk("cm.id=%u",
- __entry->cm_id
+ TP_printk("cm.id=%u src=%pISpc dst=%pISpc device=%s",
+ __entry->cm_id, __entry->srcaddr, __entry->dstaddr,
+ __get_str(devname)
)
);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index cf42acca4a3a..a1e6a67b2c4a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -414,4 +414,7 @@ void rdma_umap_priv_init(struct rdma_umap_priv *priv,
struct vm_area_struct *vma,
struct rdma_user_mmap_entry *entry);
+void ib_cq_pool_init(struct ib_device *dev);
+void ib_cq_pool_destroy(struct ib_device *dev);
+
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 4f25b2400694..655795bfa0ee 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -7,7 +7,11 @@
#include <linux/slab.h>
#include <rdma/ib_verbs.h>
+#include "core_priv.h"
+
#include <trace/events/rdma_core.h>
+/* Max size for shared CQ, may require tuning */
+#define IB_MAX_SHARED_CQ_SZ 4096U
/* # of WCs to poll for with a single call to ib_poll_cq */
#define IB_POLL_BATCH 16
@@ -218,6 +222,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
cq->cq_context = private;
cq->poll_ctx = poll_ctx;
atomic_set(&cq->usecnt, 0);
+ cq->comp_vector = comp_vector;
cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
if (!cq->wc)
@@ -309,6 +314,8 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{
if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
return;
+ if (WARN_ON_ONCE(cq->cqe_used))
+ return;
switch (cq->poll_ctx) {
case IB_POLL_DIRECT:
@@ -334,3 +341,169 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
kfree(cq);
}
EXPORT_SYMBOL(ib_free_cq_user);
+
+void ib_cq_pool_init(struct ib_device *dev)
+{
+ unsigned int i;
+
+ spin_lock_init(&dev->cq_pools_lock);
+ for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++)
+ INIT_LIST_HEAD(&dev->cq_pools[i]);
+}
+
+void ib_cq_pool_destroy(struct ib_device *dev)
+{
+ struct ib_cq *cq, *n;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) {
+ list_for_each_entry_safe(cq, n, &dev->cq_pools[i],
+ pool_entry) {
+ WARN_ON(cq->cqe_used);
+ cq->shared = false;
+ ib_free_cq(cq);
+ }
+ }
+}
+
+static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
+ enum ib_poll_context poll_ctx)
+{
+ LIST_HEAD(tmp_list);
+ unsigned int nr_cqs, i;
+ struct ib_cq *cq;
+ int ret;
+
+ if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
+ WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate at least as many CQEs as requested, and otherwise
+ * a reasonable batch size so that we can share CQs between
+ * multiple users instead of allocating a larger number of CQs.
+ */
+ nr_cqes = min_t(unsigned int, dev->attrs.max_cqe,
+ max(nr_cqes, IB_MAX_SHARED_CQ_SZ));
+ nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
+ for (i = 0; i < nr_cqs; i++) {
+ cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx);
+ if (IS_ERR(cq)) {
+ ret = PTR_ERR(cq);
+ goto out_free_cqs;
+ }
+ cq->shared = true;
+ list_add_tail(&cq->pool_entry, &tmp_list);
+ }
+
+ spin_lock_irq(&dev->cq_pools_lock);
+ list_splice(&tmp_list, &dev->cq_pools[poll_ctx]);
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ return 0;
+
+out_free_cqs:
+ list_for_each_entry(cq, &tmp_list, pool_entry) {
+ cq->shared = false;
+ ib_free_cq(cq);
+ }
+ return ret;
+}
+
+/**
+ * ib_cq_pool_get() - Find the least used completion queue that matches
+ * a given cpu hint (or least used for wild card affinity) and fits
+ * nr_cqe.
+ * @dev: rdma device
+ * @nr_cqe: number of needed cqe entries
+ * @comp_vector_hint: completion vector hint (-1) for the driver to assign
+ * a comp vector based on internal counter
+ * @poll_ctx: cq polling context
+ *
+ * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and
+ * claim entries in it for us. In case there is no available cq, allocate
+ * a new cq with the requirements and add it to the device pool.
+ * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value
+ * for @poll_ctx.
+ */
+struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
+ int comp_vector_hint,
+ enum ib_poll_context poll_ctx)
+{
+ static unsigned int default_comp_vector;
+ unsigned int vector, num_comp_vectors;
+ struct ib_cq *cq, *found = NULL;
+ int ret;
+
+ if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
+ WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
+ return ERR_PTR(-EINVAL);
+ }
+
+ num_comp_vectors =
+ min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
+ /* Project the affinty to the device completion vector range */
+ if (comp_vector_hint < 0) {
+ comp_vector_hint =
+ (READ_ONCE(default_comp_vector) + 1) % num_comp_vectors;
+ WRITE_ONCE(default_comp_vector, comp_vector_hint);
+ }
+ vector = comp_vector_hint % num_comp_vectors;
+
+ /*
+ * Find the least used CQ with correct affinity and
+ * enough free CQ entries
+ */
+ while (!found) {
+ spin_lock_irq(&dev->cq_pools_lock);
+ list_for_each_entry(cq, &dev->cq_pools[poll_ctx],
+ pool_entry) {
+ /*
+ * Check to see if we have found a CQ with the
+ * correct completion vector
+ */
+ if (vector != cq->comp_vector)
+ continue;
+ if (cq->cqe_used + nr_cqe > cq->cqe)
+ continue;
+ found = cq;
+ break;
+ }
+
+ if (found) {
+ found->cqe_used += nr_cqe;
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ return found;
+ }
+ spin_unlock_irq(&dev->cq_pools_lock);
+
+ /*
+ * Didn't find a match or ran out of CQs in the device
+ * pool, allocate a new array of CQs.
+ */
+ ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return found;
+}
+EXPORT_SYMBOL(ib_cq_pool_get);
+
+/**
+ * ib_cq_pool_put - Return a CQ taken from a shared pool.
+ * @cq: The CQ to return.
+ * @nr_cqe: The max number of cqes that the user had requested.
+ */
+void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe)
+{
+ if (WARN_ON_ONCE(nr_cqe > cq->cqe_used))
+ return;
+
+ spin_lock_irq(&cq->device->cq_pools_lock);
+ cq->cqe_used -= nr_cqe;
+ spin_unlock_irq(&cq->device->cq_pools_lock);
+}
+EXPORT_SYMBOL(ib_cq_pool_put);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index d0b3d35ad3e4..905a2beaf885 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -677,8 +677,20 @@ static int add_client_context(struct ib_device *device,
if (ret)
goto out;
downgrade_write(&device->client_data_rwsem);
- if (client->add)
- client->add(device);
+ if (client->add) {
+ if (client->add(device)) {
+ /*
+ * If a client fails to add then the error code is
+ * ignored, but we won't call any more ops on this
+ * client.
+ */
+ xa_erase(&device->client_data, client->client_id);
+ up_read(&device->client_data_rwsem);
+ ib_device_put(device);
+ ib_client_put(client);
+ return 0;
+ }
+ }
/* Readers shall not see a client until add has been completed */
xa_set_mark(&device->client_data, client->client_id,
@@ -1381,6 +1393,7 @@ int ib_register_device(struct ib_device *device, const char *name)
goto dev_cleanup;
}
+ ib_cq_pool_init(device);
ret = enable_device_and_get(device);
dev_set_uevent_suppress(&device->dev, false);
/* Mark for userspace that device is ready */
@@ -1435,6 +1448,7 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
goto out;
disable_device(ib_dev);
+ ib_cq_pool_destroy(ib_dev);
/* Expedite removing unregistered pointers from the hash table */
free_netdevs(ib_dev);
@@ -2557,7 +2571,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, add_gid);
SET_DEVICE_OP(dev_ops, advise_mr);
SET_DEVICE_OP(dev_ops, alloc_dm);
- SET_DEVICE_OP(dev_ops, alloc_fmr);
SET_DEVICE_OP(dev_ops, alloc_hw_stats);
SET_DEVICE_OP(dev_ops, alloc_mr);
SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
@@ -2584,7 +2597,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, create_wq);
SET_DEVICE_OP(dev_ops, dealloc_dm);
SET_DEVICE_OP(dev_ops, dealloc_driver);
- SET_DEVICE_OP(dev_ops, dealloc_fmr);
SET_DEVICE_OP(dev_ops, dealloc_mw);
SET_DEVICE_OP(dev_ops, dealloc_pd);
SET_DEVICE_OP(dev_ops, dealloc_ucontext);
@@ -2628,7 +2640,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, iw_rem_ref);
SET_DEVICE_OP(dev_ops, map_mr_sg);
SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
- SET_DEVICE_OP(dev_ops, map_phys_fmr);
SET_DEVICE_OP(dev_ops, mmap);
SET_DEVICE_OP(dev_ops, mmap_free);
SET_DEVICE_OP(dev_ops, modify_ah);
@@ -2662,7 +2673,6 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, resize_cq);
SET_DEVICE_OP(dev_ops, set_vf_guid);
SET_DEVICE_OP(dev_ops, set_vf_link_state);
- SET_DEVICE_OP(dev_ops, unmap_fmr);
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_cq);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
deleted file mode 100644
index e08aec427027..000000000000
--- a/drivers/infiniband/core/fmr_pool.c
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/jhash.h>
-#include <linux/kthread.h>
-
-#include <rdma/ib_fmr_pool.h>
-
-#include "core_priv.h"
-
-#define PFX "fmr_pool: "
-
-enum {
- IB_FMR_MAX_REMAPS = 32,
-
- IB_FMR_HASH_BITS = 8,
- IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,
- IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1
-};
-
-/*
- * If an FMR is not in use, then the list member will point to either
- * its pool's free_list (if the FMR can be mapped again; that is,
- * remap_count < pool->max_remaps) or its pool's dirty_list (if the
- * FMR needs to be unmapped before being remapped). In either of
- * these cases it is a bug if the ref_count is not 0. In other words,
- * if ref_count is > 0, then the list member must not be linked into
- * either free_list or dirty_list.
- *
- * The cache_node member is used to link the FMR into a cache bucket
- * (if caching is enabled). This is independent of the reference
- * count of the FMR. When a valid FMR is released, its ref_count is
- * decremented, and if ref_count reaches 0, the FMR is placed in
- * either free_list or dirty_list as appropriate. However, it is not
- * removed from the cache and may be "revived" if a call to
- * ib_fmr_register_physical() occurs before the FMR is remapped. In
- * this case we just increment the ref_count and remove the FMR from
- * free_list/dirty_list.
- *
- * Before we remap an FMR from free_list, we remove it from the cache
- * (to prevent another user from obtaining a stale FMR). When an FMR
- * is released, we add it to the tail of the free list, so that our
- * cache eviction policy is "least recently used."
- *
- * All manipulation of ref_count, list and cache_node is protected by
- * pool_lock to maintain consistency.
- */
-
-struct ib_fmr_pool {
- spinlock_t pool_lock;
-
- int pool_size;
- int max_pages;
- int max_remaps;
- int dirty_watermark;
- int dirty_len;
- struct list_head free_list;
- struct list_head dirty_list;
- struct hlist_head *cache_bucket;
-
- void (*flush_function)(struct ib_fmr_pool *pool,
- void * arg);
- void *flush_arg;
-
- struct kthread_worker *worker;
- struct kthread_work work;
-
- atomic_t req_ser;
- atomic_t flush_ser;
-
- wait_queue_head_t force_wait;
-};
-
-static inline u32 ib_fmr_hash(u64 first_page)
-{
- return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
- (IB_FMR_HASH_SIZE - 1);
-}
-
-/* Caller must hold pool_lock */
-static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
- u64 *page_list,
- int page_list_len,
- u64 io_virtual_address)
-{
- struct hlist_head *bucket;
- struct ib_pool_fmr *fmr;
-
- if (!pool->cache_bucket)
- return NULL;
-
- bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
-
- hlist_for_each_entry(fmr, bucket, cache_node)
- if (io_virtual_address == fmr->io_virtual_address &&
- page_list_len == fmr->page_list_len &&
- !memcmp(page_list, fmr->page_list,
- page_list_len * sizeof *page_list))
- return fmr;
-
- return NULL;
-}
-
-static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
-{
- int ret;
- struct ib_pool_fmr *fmr;
- LIST_HEAD(unmap_list);
- LIST_HEAD(fmr_list);
-
- spin_lock_irq(&pool->pool_lock);
-
- list_for_each_entry(fmr, &pool->dirty_list, list) {
- hlist_del_init(&fmr->cache_node);
- fmr->remap_count = 0;
- list_add_tail(&fmr->fmr->list, &fmr_list);
- }
-
- list_splice_init(&pool->dirty_list, &unmap_list);
- pool->dirty_len = 0;
-
- spin_unlock_irq(&pool->pool_lock);
-
- if (list_empty(&unmap_list)) {
- return;
- }
-
- ret = ib_unmap_fmr(&fmr_list);
- if (ret)
- pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
-
- spin_lock_irq(&pool->pool_lock);
- list_splice(&unmap_list, &pool->free_list);
- spin_unlock_irq(&pool->pool_lock);
-}
-
-static void ib_fmr_cleanup_func(struct kthread_work *work)
-{
- struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work);
-
- ib_fmr_batch_release(pool);
- atomic_inc(&pool->flush_ser);
- wake_up_interruptible(&pool->force_wait);
-
- if (pool->flush_function)
- pool->flush_function(pool, pool->flush_arg);
-
- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0)
- kthread_queue_work(pool->worker, &pool->work);
-}
-
-/**
- * ib_create_fmr_pool - Create an FMR pool
- * @pd:Protection domain for FMRs
- * @params:FMR pool parameters
- *
- * Create a pool of FMRs. Return value is pointer to new pool or
- * error code if creation failed.
- */
-struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
- struct ib_fmr_pool_param *params)
-{
- struct ib_device *device;
- struct ib_fmr_pool *pool;
- int i;
- int ret;
- int max_remaps;
-
- if (!params)
- return ERR_PTR(-EINVAL);
-
- device = pd->device;
- if (!device->ops.alloc_fmr || !device->ops.dealloc_fmr ||
- !device->ops.map_phys_fmr || !device->ops.unmap_fmr) {
- dev_info(&device->dev, "Device does not support FMRs\n");
- return ERR_PTR(-ENOSYS);
- }
-
- if (!device->attrs.max_map_per_fmr)
- max_remaps = IB_FMR_MAX_REMAPS;
- else
- max_remaps = device->attrs.max_map_per_fmr;
-
- pool = kmalloc(sizeof *pool, GFP_KERNEL);
- if (!pool)
- return ERR_PTR(-ENOMEM);
-
- pool->cache_bucket = NULL;
- pool->flush_function = params->flush_function;
- pool->flush_arg = params->flush_arg;
-
- INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->dirty_list);
-
- if (params->cache) {
- pool->cache_bucket =
- kmalloc_array(IB_FMR_HASH_SIZE,
- sizeof(*pool->cache_bucket),
- GFP_KERNEL);
- if (!pool->cache_bucket) {
- ret = -ENOMEM;
- goto out_free_pool;
- }
-
- for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
- INIT_HLIST_HEAD(pool->cache_bucket + i);
- }
-
- pool->pool_size = 0;
- pool->max_pages = params->max_pages_per_fmr;
- pool->max_remaps = max_remaps;
- pool->dirty_watermark = params->dirty_watermark;
- pool->dirty_len = 0;
- spin_lock_init(&pool->pool_lock);
- atomic_set(&pool->req_ser, 0);
- atomic_set(&pool->flush_ser, 0);
- init_waitqueue_head(&pool->force_wait);
-
- pool->worker =
- kthread_create_worker(0, "ib_fmr(%s)", dev_name(&device->dev));
- if (IS_ERR(pool->worker)) {
- pr_warn(PFX "couldn't start cleanup kthread worker\n");
- ret = PTR_ERR(pool->worker);
- goto out_free_pool;
- }
- kthread_init_work(&pool->work, ib_fmr_cleanup_func);
-
- {
- struct ib_pool_fmr *fmr;
- struct ib_fmr_attr fmr_attr = {
- .max_pages = params->max_pages_per_fmr,
- .max_maps = pool->max_remaps,
- .page_shift = params->page_shift
- };
- int bytes_per_fmr = sizeof *fmr;
-
- if (pool->cache_bucket)
- bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
-
- for (i = 0; i < params->pool_size; ++i) {
- fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
- if (!fmr)
- goto out_fail;
-
- fmr->pool = pool;
- fmr->remap_count = 0;
- fmr->ref_count = 0;
- INIT_HLIST_NODE(&fmr->cache_node);
-
- fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
- if (IS_ERR(fmr->fmr)) {
- pr_warn(PFX "fmr_create failed for FMR %d\n",
- i);
- kfree(fmr);
- goto out_fail;
- }
-
- list_add_tail(&fmr->list, &pool->free_list);
- ++pool->pool_size;
- }
- }
-
- return pool;
-
- out_free_pool:
- kfree(pool->cache_bucket);
- kfree(pool);
-
- return ERR_PTR(ret);
-
- out_fail:
- ib_destroy_fmr_pool(pool);
-
- return ERR_PTR(-ENOMEM);
-}
-EXPORT_SYMBOL(ib_create_fmr_pool);
-
-/**
- * ib_destroy_fmr_pool - Free FMR pool
- * @pool:FMR pool to free
- *
- * Destroy an FMR pool and free all associated resources.
- */
-void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
-{
- struct ib_pool_fmr *fmr;
- struct ib_pool_fmr *tmp;
- LIST_HEAD(fmr_list);
- int i;
-
- kthread_destroy_worker(pool->worker);
- ib_fmr_batch_release(pool);
-
- i = 0;
- list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
- if (fmr->remap_count) {
- INIT_LIST_HEAD(&fmr_list);
- list_add_tail(&fmr->fmr->list, &fmr_list);
- ib_unmap_fmr(&fmr_list);
- }
- ib_dealloc_fmr(fmr->fmr);
- list_del(&fmr->list);
- kfree(fmr);
- ++i;
- }
-
- if (i < pool->pool_size)
- pr_warn(PFX "pool still has %d regions registered\n",
- pool->pool_size - i);
-
- kfree(pool->cache_bucket);
- kfree(pool);
-}
-EXPORT_SYMBOL(ib_destroy_fmr_pool);
-
-/**
- * ib_flush_fmr_pool - Invalidate all unmapped FMRs
- * @pool:FMR pool to flush
- *
- * Ensure that all unmapped FMRs are fully invalidated.
- */
-int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
-{
- int serial;
- struct ib_pool_fmr *fmr, *next;
-
- /*
- * The free_list holds FMRs that may have been used
- * but have not been remapped enough times to be dirty.
- * Put them on the dirty list now so that the cleanup
- * thread will reap them too.
- */
- spin_lock_irq(&pool->pool_lock);
- list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
- if (fmr->remap_count > 0)
- list_move(&fmr->list, &pool->dirty_list);
- }
- spin_unlock_irq(&pool->pool_lock);
-
- serial = atomic_inc_return(&pool->req_ser);
- kthread_queue_work(pool->worker, &pool->work);
-
- if (wait_event_interruptible(pool->force_wait,
- atomic_read(&pool->flush_ser) - serial >= 0))
- return -EINTR;
-
- return 0;
-}
-EXPORT_SYMBOL(ib_flush_fmr_pool);
-
-/**
- * ib_fmr_pool_map_phys - Map an FMR from an FMR pool.
- * @pool_handle: FMR pool to allocate FMR from
- * @page_list: List of pages to map
- * @list_len: Number of pages in @page_list
- * @io_virtual_address: I/O virtual address for new FMR
- */
-struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
- u64 *page_list,
- int list_len,
- u64 io_virtual_address)
-{
- struct ib_fmr_pool *pool = pool_handle;
- struct ib_pool_fmr *fmr;
- unsigned long flags;
- int result;
-
- if (list_len < 1 || list_len > pool->max_pages)
- return ERR_PTR(-EINVAL);
-
- spin_lock_irqsave(&pool->pool_lock, flags);
- fmr = ib_fmr_cache_lookup(pool,
- page_list,
- list_len,
- io_virtual_address);
- if (fmr) {
- /* found in cache */
- ++fmr->ref_count;
- if (fmr->ref_count == 1) {
- list_del(&fmr->list);
- }
-
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- return fmr;
- }
-
- if (list_empty(&pool->free_list)) {
- spin_unlock_irqrestore(&pool->pool_lock, flags);
- return ERR_PTR(-EAGAIN);
- }
-
- fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
- list_del(&fmr->list);
- hlist_del_init(&fmr->cache_node);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
- io_virtual_address);
-
- if (result) {
- spin_lock_irqsave(&pool->pool_lock, flags);
- list_add(&fmr->list, &pool->free_list);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-
- pr_warn(PFX "fmr_map returns %d\n", result);
-
- return ERR_PTR(result);
- }
-
- ++fmr->remap_count;
- fmr->ref_count = 1;
-
- if (pool->cache_bucket) {
- fmr->io_virtual_address = io_virtual_address;
- fmr->page_list_len = list_len;
- memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
-
- spin_lock_irqsave(&pool->pool_lock, flags);
- hlist_add_head(&fmr->cache_node,
- pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
- spin_unlock_irqrestore(&pool->pool_lock, flags);
- }
-
- return fmr;
-}
-EXPORT_SYMBOL(ib_fmr_pool_map_phys);
-
-/**
- * ib_fmr_pool_unmap - Unmap FMR
- * @fmr:FMR to unmap
- *
- * Unmap an FMR. The FMR mapping may remain valid until the FMR is
- * reused (or until ib_flush_fmr_pool() is called).
- */
-void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
-{
- struct ib_fmr_pool *pool;
- unsigned long flags;
-
- pool = fmr->pool;
-
- spin_lock_irqsave(&pool->pool_lock, flags);
-
- --fmr->ref_count;
- if (!fmr->ref_count) {
- if (fmr->remap_count < pool->max_remaps) {
- list_add_tail(&fmr->list, &pool->free_list);
- } else {
- list_add_tail(&fmr->list, &pool->dirty_list);
- if (++pool->dirty_len >= pool->dirty_watermark) {
- atomic_inc(&pool->req_ser);
- kthread_queue_work(pool->worker, &pool->work);
- }
- }
- }
-
- spin_unlock_irqrestore(&pool->pool_lock, flags);
-}
-EXPORT_SYMBOL(ib_fmr_pool_unmap);
diff --git a/drivers/infiniband/core/lag.c b/drivers/infiniband/core/lag.c
new file mode 100644
index 000000000000..7063e41eaf26
--- /dev/null
+++ b/drivers/infiniband/core/lag.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cache.h>
+#include <rdma/lag.h>
+
+static struct sk_buff *rdma_build_skb(struct ib_device *device,
+ struct net_device *netdev,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ u8 smac[ETH_ALEN];
+ bool is_ipv4;
+ int hdr_len;
+
+ is_ipv4 = ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw);
+ hdr_len = ETH_HLEN + sizeof(struct udphdr) + LL_RESERVED_SPACE(netdev);
+ hdr_len += is_ipv4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr);
+
+ skb = alloc_skb(hdr_len, flags);
+ if (!skb)
+ return NULL;
+
+ skb->dev = netdev;
+ skb_reserve(skb, hdr_len);
+ skb_push(skb, sizeof(struct udphdr));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+ uh->source =
+ htons(rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label));
+ uh->dest = htons(ROCE_V2_UDP_DPORT);
+ uh->len = htons(sizeof(struct udphdr));
+
+ if (is_ipv4) {
+ skb_push(skb, sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+ iph->frag_off = 0;
+ iph->version = 4;
+ iph->protocol = IPPROTO_UDP;
+ iph->ihl = 0x5;
+ iph->tot_len = htons(sizeof(struct udphdr) + sizeof(struct
+ iphdr));
+ memcpy(&iph->saddr, ah_attr->grh.sgid_attr->gid.raw + 12,
+ sizeof(struct in_addr));
+ memcpy(&iph->daddr, ah_attr->grh.dgid.raw + 12,
+ sizeof(struct in_addr));
+ } else {
+ skb_push(skb, sizeof(struct ipv6hdr));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6h->version = 6;
+ ip6h->nexthdr = IPPROTO_UDP;
+ memcpy(&ip6h->flow_lbl, &ah_attr->grh.flow_label,
+ sizeof(*ip6h->flow_lbl));
+ memcpy(&ip6h->saddr, ah_attr->grh.sgid_attr->gid.raw,
+ sizeof(struct in6_addr));
+ memcpy(&ip6h->daddr, ah_attr->grh.dgid.raw,
+ sizeof(struct in6_addr));
+ }
+
+ skb_push(skb, sizeof(struct ethhdr));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->protocol = eth->h_proto = htons(is_ipv4 ? ETH_P_IP : ETH_P_IPV6);
+ rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, NULL, smac);
+ memcpy(eth->h_source, smac, ETH_ALEN);
+ memcpy(eth->h_dest, ah_attr->roce.dmac, ETH_ALEN);
+
+ return skb;
+}
+
+static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
+ struct net_device *master,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct net_device *slave;
+ struct sk_buff *skb;
+
+ skb = rdma_build_skb(device, master, ah_attr, flags);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ rcu_read_lock();
+ slave = netdev_get_xmit_slave(master, skb,
+ !!(device->lag_flags &
+ RDMA_LAG_FLAGS_HASH_ALL_SLAVES));
+ if (slave)
+ dev_hold(slave);
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return slave;
+}
+
+void rdma_lag_put_ah_roce_slave(struct net_device *xmit_slave)
+{
+ if (xmit_slave)
+ dev_put(xmit_slave);
+}
+
+struct net_device *rdma_lag_get_ah_roce_slave(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags)
+{
+ struct net_device *slave = NULL;
+ struct net_device *master;
+
+ if (!(ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE &&
+ ah_attr->grh.sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
+ ah_attr->grh.flow_label))
+ return NULL;
+
+ rcu_read_lock();
+ master = rdma_read_gid_attr_ndev_rcu(ah_attr->grh.sgid_attr);
+ if (IS_ERR(master)) {
+ rcu_read_unlock();
+ return master;
+ }
+ dev_hold(master);
+ rcu_read_unlock();
+
+ if (!netif_is_bond_master(master))
+ goto put;
+
+ slave = rdma_get_xmit_slave_udp(device, master, ah_attr, flags);
+put:
+ dev_put(master);
+ return slave;
+}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index c54db13fa9b0..186e0d652e8b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -85,7 +85,6 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
-/* Client ID 0 is used for snoop-only clients */
static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
static u32 ib_mad_client_next;
static struct list_head ib_mad_port_list;
@@ -483,141 +482,12 @@ error1:
}
EXPORT_SYMBOL(ib_register_mad_agent);
-static inline int is_snooping_sends(int mad_snoop_flags)
-{
- return (mad_snoop_flags &
- (/*IB_MAD_SNOOP_POSTED_SENDS |
- IB_MAD_SNOOP_RMPP_SENDS |*/
- IB_MAD_SNOOP_SEND_COMPLETIONS /*|
- IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
-}
-
-static inline int is_snooping_recvs(int mad_snoop_flags)
-{
- return (mad_snoop_flags &
- (IB_MAD_SNOOP_RECVS /*|
- IB_MAD_SNOOP_RMPP_RECVS*/));
-}
-
-static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
- struct ib_mad_snoop_private *mad_snoop_priv)
-{
- struct ib_mad_snoop_private **new_snoop_table;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- /* Check for empty slot in array. */
- for (i = 0; i < qp_info->snoop_table_size; i++)
- if (!qp_info->snoop_table[i])
- break;
-
- if (i == qp_info->snoop_table_size) {
- /* Grow table. */
- new_snoop_table = krealloc(qp_info->snoop_table,
- sizeof mad_snoop_priv *
- (qp_info->snoop_table_size + 1),
- GFP_ATOMIC);
- if (!new_snoop_table) {
- i = -ENOMEM;
- goto out;
- }
-
- qp_info->snoop_table = new_snoop_table;
- qp_info->snoop_table_size++;
- }
- qp_info->snoop_table[i] = mad_snoop_priv;
- atomic_inc(&qp_info->snoop_count);
-out:
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- return i;
-}
-
-struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
- u8 port_num,
- enum ib_qp_type qp_type,
- int mad_snoop_flags,
- ib_mad_snoop_handler snoop_handler,
- ib_mad_recv_handler recv_handler,
- void *context)
-{
- struct ib_mad_port_private *port_priv;
- struct ib_mad_agent *ret;
- struct ib_mad_snoop_private *mad_snoop_priv;
- int qpn;
- int err;
-
- /* Validate parameters */
- if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
- (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
- qpn = get_spl_qp_index(qp_type);
- if (qpn == -1) {
- ret = ERR_PTR(-EINVAL);
- goto error1;
- }
- port_priv = ib_get_mad_port(device, port_num);
- if (!port_priv) {
- ret = ERR_PTR(-ENODEV);
- goto error1;
- }
- /* Allocate structures */
- mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
- if (!mad_snoop_priv) {
- ret = ERR_PTR(-ENOMEM);
- goto error1;
- }
-
- /* Now, fill in the various structures */
- mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
- mad_snoop_priv->agent.device = device;
- mad_snoop_priv->agent.recv_handler = recv_handler;
- mad_snoop_priv->agent.snoop_handler = snoop_handler;
- mad_snoop_priv->agent.context = context;
- mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
- mad_snoop_priv->agent.port_num = port_num;
- mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
- init_completion(&mad_snoop_priv->comp);
-
- err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
- if (err) {
- ret = ERR_PTR(err);
- goto error2;
- }
-
- mad_snoop_priv->snoop_index = register_snoop_agent(
- &port_priv->qp_info[qpn],
- mad_snoop_priv);
- if (mad_snoop_priv->snoop_index < 0) {
- ret = ERR_PTR(mad_snoop_priv->snoop_index);
- goto error3;
- }
-
- atomic_set(&mad_snoop_priv->refcount, 1);
- return &mad_snoop_priv->agent;
-error3:
- ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
-error2:
- kfree(mad_snoop_priv);
-error1:
- return ret;
-}
-EXPORT_SYMBOL(ib_register_mad_snoop);
-
static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
if (atomic_dec_and_test(&mad_agent_priv->refcount))
complete(&mad_agent_priv->comp);
}
-static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
-{
- if (atomic_dec_and_test(&mad_snoop_priv->refcount))
- complete(&mad_snoop_priv->comp);
-}
-
static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
{
struct ib_mad_port_private *port_priv;
@@ -650,25 +520,6 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
kfree_rcu(mad_agent_priv, rcu);
}
-static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
-{
- struct ib_mad_qp_info *qp_info;
- unsigned long flags;
-
- qp_info = mad_snoop_priv->qp_info;
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
- atomic_dec(&qp_info->snoop_count);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-
- deref_snoop_agent(mad_snoop_priv);
- wait_for_completion(&mad_snoop_priv->comp);
-
- ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
-
- kfree(mad_snoop_priv);
-}
-
/*
* ib_unregister_mad_agent - Unregisters a client from using MAD services
*
@@ -677,20 +528,11 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
{
struct ib_mad_agent_private *mad_agent_priv;
- struct ib_mad_snoop_private *mad_snoop_priv;
-
- /* If the TID is zero, the agent can only snoop. */
- if (mad_agent->hi_tid) {
- mad_agent_priv = container_of(mad_agent,
- struct ib_mad_agent_private,
- agent);
- unregister_mad_agent(mad_agent_priv);
- } else {
- mad_snoop_priv = container_of(mad_agent,
- struct ib_mad_snoop_private,
- agent);
- unregister_mad_snoop(mad_snoop_priv);
- }
+
+ mad_agent_priv = container_of(mad_agent,
+ struct ib_mad_agent_private,
+ agent);
+ unregister_mad_agent(mad_agent_priv);
}
EXPORT_SYMBOL(ib_unregister_mad_agent);
@@ -706,57 +548,6 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
spin_unlock_irqrestore(&mad_queue->lock, flags);
}
-static void snoop_send(struct ib_mad_qp_info *qp_info,
- struct ib_mad_send_buf *send_buf,
- struct ib_mad_send_wc *mad_send_wc,
- int mad_snoop_flags)
-{
- struct ib_mad_snoop_private *mad_snoop_priv;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- for (i = 0; i < qp_info->snoop_table_size; i++) {
- mad_snoop_priv = qp_info->snoop_table[i];
- if (!mad_snoop_priv ||
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
- continue;
-
- atomic_inc(&mad_snoop_priv->refcount);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
- send_buf, mad_send_wc);
- deref_snoop_agent(mad_snoop_priv);
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- }
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-}
-
-static void snoop_recv(struct ib_mad_qp_info *qp_info,
- struct ib_mad_recv_wc *mad_recv_wc,
- int mad_snoop_flags)
-{
- struct ib_mad_snoop_private *mad_snoop_priv;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- for (i = 0; i < qp_info->snoop_table_size; i++) {
- mad_snoop_priv = qp_info->snoop_table[i];
- if (!mad_snoop_priv ||
- !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
- continue;
-
- atomic_inc(&mad_snoop_priv->refcount);
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
- mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
- mad_recv_wc);
- deref_snoop_agent(mad_snoop_priv);
- spin_lock_irqsave(&qp_info->snoop_lock, flags);
- }
- spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
-}
-
static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
u16 pkey_index, u8 port_num, struct ib_wc *wc)
{
@@ -2289,9 +2080,6 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
recv->header.recv_wc.recv_buf.grh = &recv->grh;
- if (atomic_read(&qp_info->snoop_count))
- snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
-
/* Validate MAD */
if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
goto out;
@@ -2538,9 +2326,6 @@ retry:
mad_send_wc.send_buf = &mad_send_wr->send_buf;
mad_send_wc.status = wc->status;
mad_send_wc.vendor_err = wc->vendor_err;
- if (atomic_read(&qp_info->snoop_count))
- snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
- IB_MAD_SNOOP_SEND_COMPLETIONS);
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
if (queued_send_wr) {
@@ -2782,10 +2567,6 @@ static void local_completions(struct work_struct *work)
local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
local->mad_priv->header.recv_wc.recv_buf.mad =
(struct ib_mad *)local->mad_priv->mad;
- if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
- snoop_recv(recv_mad_agent->qp_info,
- &local->mad_priv->header.recv_wc,
- IB_MAD_SNOOP_RECVS);
recv_mad_agent->agent.recv_handler(
&recv_mad_agent->agent,
&local->mad_send_wr->send_buf,
@@ -2800,10 +2581,6 @@ local_send_completion:
mad_send_wc.status = IB_WC_SUCCESS;
mad_send_wc.vendor_err = 0;
mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
- if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
- snoop_send(mad_agent_priv->qp_info,
- &local->mad_send_wr->send_buf,
- &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
&mad_send_wc);
@@ -3119,10 +2896,6 @@ static void init_mad_qp(struct ib_mad_port_private *port_priv,
init_mad_queue(qp_info, &qp_info->send_queue);
init_mad_queue(qp_info, &qp_info->recv_queue);
INIT_LIST_HEAD(&qp_info->overflow_list);
- spin_lock_init(&qp_info->snoop_lock);
- qp_info->snoop_table = NULL;
- qp_info->snoop_table_size = 0;
- atomic_set(&qp_info->snoop_count, 0);
}
static int create_mad_qp(struct ib_mad_qp_info *qp_info,
@@ -3166,7 +2939,6 @@ static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
return;
ib_destroy_qp(qp_info->qp);
- kfree(qp_info->snoop_table);
}
/*
@@ -3304,9 +3076,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
return 0;
}
-static void ib_mad_init_device(struct ib_device *device)
+static int ib_mad_init_device(struct ib_device *device)
{
int start, i;
+ unsigned int count = 0;
+ int ret;
start = rdma_start_port(device);
@@ -3314,17 +3088,23 @@ static void ib_mad_init_device(struct ib_device *device)
if (!rdma_cap_ib_mad(device, i))
continue;
- if (ib_mad_port_open(device, i)) {
+ ret = ib_mad_port_open(device, i);
+ if (ret) {
dev_err(&device->dev, "Couldn't open port %d\n", i);
goto error;
}
- if (ib_agent_port_open(device, i)) {
+ ret = ib_agent_port_open(device, i);
+ if (ret) {
dev_err(&device->dev,
"Couldn't open port %d for agents\n", i);
goto error_agent;
}
+ count++;
}
- return;
+ if (!count)
+ return -EOPNOTSUPP;
+
+ return 0;
error_agent:
if (ib_mad_port_close(device, i))
@@ -3341,6 +3121,7 @@ error:
if (ib_mad_port_close(device, i))
dev_err(&device->dev, "Couldn't close port %d\n", i);
}
+ return ret;
}
static void ib_mad_remove_device(struct ib_device *device, void *client_data)
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 9c2d8b7f1af9..740f03ecc05d 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -42,7 +42,7 @@
#include <rdma/ib_cache.h>
#include "sa.h"
-static void mcast_add_one(struct ib_device *device);
+static int mcast_add_one(struct ib_device *device);
static void mcast_remove_one(struct ib_device *device, void *client_data);
static struct ib_client mcast_client = {
@@ -815,7 +815,7 @@ static void mcast_event_handler(struct ib_event_handler *handler,
}
}
-static void mcast_add_one(struct ib_device *device)
+static int mcast_add_one(struct ib_device *device)
{
struct mcast_device *dev;
struct mcast_port *port;
@@ -825,7 +825,7 @@ static void mcast_add_one(struct ib_device *device)
dev = kmalloc(struct_size(dev, port, device->phys_port_cnt),
GFP_KERNEL);
if (!dev)
- return;
+ return -ENOMEM;
dev->start_port = rdma_start_port(device);
dev->end_port = rdma_end_port(device);
@@ -845,7 +845,7 @@ static void mcast_add_one(struct ib_device *device)
if (!count) {
kfree(dev);
- return;
+ return -EOPNOTSUPP;
}
dev->device = device;
@@ -853,6 +853,7 @@ static void mcast_add_one(struct ib_device *device)
INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
ib_register_event_handler(&dev->event_handler);
+ return 0;
}
static void mcast_remove_one(struct ib_device *device, void *client_data)
@@ -861,9 +862,6 @@ static void mcast_remove_one(struct ib_device *device, void *client_data)
struct mcast_port *port;
int i;
- if (!dev)
- return;
-
ib_unregister_event_handler(&dev->event_handler);
flush_workqueue(mcast_wq);
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index e0a5e897e4b1..38de4942c682 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -130,6 +130,17 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
lockdep_assert_held(&ufile->hw_destroy_rwsem);
assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE);
+ if (reason == RDMA_REMOVE_ABORT_HWOBJ) {
+ reason = RDMA_REMOVE_ABORT;
+ ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason,
+ attrs);
+ /*
+ * Drivers are not permitted to ignore RDMA_REMOVE_ABORT, see
+ * ib_is_destroy_retryable, cleanup_retryable == false here.
+ */
+ WARN_ON(ret);
+ }
+
if (reason == RDMA_REMOVE_ABORT) {
WARN_ON(!list_empty(&uobj->list));
WARN_ON(!uobj->context);
@@ -653,11 +664,15 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
* object and anything else connected to uobj before calling this.
*/
void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
- struct uverbs_attr_bundle *attrs)
+ struct uverbs_attr_bundle *attrs,
+ bool hw_obj_valid)
{
struct ib_uverbs_file *ufile = uobj->ufile;
- uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs);
+ uverbs_destroy_uobject(uobj,
+ hw_obj_valid ? RDMA_REMOVE_ABORT_HWOBJ :
+ RDMA_REMOVE_ABORT,
+ attrs);
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
@@ -927,8 +942,8 @@ uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
}
void uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access, bool commit,
- struct uverbs_attr_bundle *attrs)
+ enum uverbs_obj_access access, bool hw_obj_valid,
+ bool commit, struct uverbs_attr_bundle *attrs)
{
/*
* refcounts should be handled at the object level and not at the
@@ -951,7 +966,7 @@ void uverbs_finalize_object(struct ib_uobject *uobj,
if (commit)
rdma_alloc_commit_uobject(uobj, attrs);
else
- rdma_alloc_abort_uobject(uobj, attrs);
+ rdma_alloc_abort_uobject(uobj, attrs, hw_obj_valid);
break;
default:
WARN_ON(true);
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index 33978e0f1262..33706dad6c0f 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -64,8 +64,8 @@ uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access,
s64 id, struct uverbs_attr_bundle *attrs);
void uverbs_finalize_object(struct ib_uobject *uobj,
- enum uverbs_obj_access access, bool commit,
- struct uverbs_attr_bundle *attrs);
+ enum uverbs_obj_access access, bool hw_obj_valid,
+ bool commit, struct uverbs_attr_bundle *attrs);
int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
@@ -159,6 +159,9 @@ extern const struct uapi_definition uverbs_def_obj_dm[];
extern const struct uapi_definition uverbs_def_obj_flow_action[];
extern const struct uapi_definition uverbs_def_obj_intf[];
extern const struct uapi_definition uverbs_def_obj_mr[];
+extern const struct uapi_definition uverbs_def_obj_qp[];
+extern const struct uapi_definition uverbs_def_obj_srq[];
+extern const struct uapi_definition uverbs_def_obj_wq[];
extern const struct uapi_definition uverbs_def_write_intf[];
static inline const struct uverbs_api_write_method *
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 557efbf29197..614cff89fc71 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -129,7 +129,7 @@ static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
qp->integrity_en);
int i, j, ret = 0, count = 0;
- ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr;
+ ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr);
ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
if (!ctx->reg) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 74e0058fcf9e..a2ed09a3c714 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -174,7 +174,7 @@ static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
};
-static void ib_sa_add_one(struct ib_device *device);
+static int ib_sa_add_one(struct ib_device *device);
static void ib_sa_remove_one(struct ib_device *device, void *client_data);
static struct ib_client sa_client = {
@@ -190,7 +190,7 @@ static u32 tid;
#define PATH_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct sa_path_rec, field), \
- .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \
+ .struct_size_bytes = sizeof_field(struct sa_path_rec, field), \
.field_name = "sa_path_rec:" #field
static const struct ib_field path_rec_table[] = {
@@ -292,7 +292,7 @@ static const struct ib_field path_rec_table[] = {
.struct_offset_bytes = \
offsetof(struct sa_path_rec, field), \
.struct_size_bytes = \
- sizeof((struct sa_path_rec *)0)->field, \
+ sizeof_field(struct sa_path_rec, field), \
.field_name = "sa_path_rec:" #field
static const struct ib_field opa_path_rec_table[] = {
@@ -420,7 +420,7 @@ static const struct ib_field opa_path_rec_table[] = {
#define MCMEMBER_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
- .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_sa_mcmember_rec, field), \
.field_name = "sa_mcmember_rec:" #field
static const struct ib_field mcmember_rec_table[] = {
@@ -504,7 +504,7 @@ static const struct ib_field mcmember_rec_table[] = {
#define SERVICE_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
- .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_sa_service_rec, field), \
.field_name = "sa_service_rec:" #field
static const struct ib_field service_rec_table[] = {
@@ -552,7 +552,7 @@ static const struct ib_field service_rec_table[] = {
#define CLASSPORTINFO_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
- .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_class_port_info, field), \
.field_name = "ib_class_port_info:" #field
static const struct ib_field ib_classport_info_rec_table[] = {
@@ -630,7 +630,7 @@ static const struct ib_field ib_classport_info_rec_table[] = {
.struct_offset_bytes =\
offsetof(struct opa_class_port_info, field), \
.struct_size_bytes = \
- sizeof((struct opa_class_port_info *)0)->field, \
+ sizeof_field(struct opa_class_port_info, field), \
.field_name = "opa_class_port_info:" #field
static const struct ib_field opa_classport_info_rec_table[] = {
@@ -710,7 +710,7 @@ static const struct ib_field opa_classport_info_rec_table[] = {
#define GUIDINFO_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
- .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_sa_guidinfo_rec, field), \
.field_name = "sa_guidinfo_rec:" #field
static const struct ib_field guidinfo_rec_table[] = {
@@ -1412,17 +1412,13 @@ void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
EXPORT_SYMBOL(ib_sa_pack_path);
static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
- struct ib_device *device,
+ struct ib_sa_device *sa_dev,
u8 port_num)
{
- struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
struct ib_sa_port *port;
unsigned long flags;
bool ret = false;
- if (!sa_dev)
- return ret;
-
port = &sa_dev->port[port_num - sa_dev->start_port];
spin_lock_irqsave(&port->classport_lock, flags);
if (!port->classport_info.valid)
@@ -1450,8 +1446,8 @@ enum opa_pr_supported {
* query is possible.
*/
static int opa_pr_query_possible(struct ib_sa_client *client,
- struct ib_device *device,
- u8 port_num,
+ struct ib_sa_device *sa_dev,
+ struct ib_device *device, u8 port_num,
struct sa_path_rec *rec)
{
struct ib_port_attr port_attr;
@@ -1459,7 +1455,7 @@ static int opa_pr_query_possible(struct ib_sa_client *client,
if (ib_query_port(device, port_num, &port_attr))
return PR_NOT_SUPPORTED;
- if (ib_sa_opa_pathrecord_support(client, device, port_num))
+ if (ib_sa_opa_pathrecord_support(client, sa_dev, port_num))
return PR_OPA_SUPPORTED;
if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
@@ -1574,7 +1570,8 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
query->sa_query.port = port;
if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
- status = opa_pr_query_possible(client, device, port_num, rec);
+ status = opa_pr_query_possible(client, sa_dev, device, port_num,
+ rec);
if (status == PR_NOT_SUPPORTED) {
ret = -EINVAL;
goto err1;
@@ -2325,18 +2322,19 @@ static void ib_sa_event(struct ib_event_handler *handler,
}
}
-static void ib_sa_add_one(struct ib_device *device)
+static int ib_sa_add_one(struct ib_device *device)
{
struct ib_sa_device *sa_dev;
int s, e, i;
int count = 0;
+ int ret;
s = rdma_start_port(device);
e = rdma_end_port(device);
sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
if (!sa_dev)
- return;
+ return -ENOMEM;
sa_dev->start_port = s;
sa_dev->end_port = e;
@@ -2356,8 +2354,10 @@ static void ib_sa_add_one(struct ib_device *device)
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
NULL, 0, send_handler,
recv_handler, sa_dev, 0);
- if (IS_ERR(sa_dev->port[i].agent))
+ if (IS_ERR(sa_dev->port[i].agent)) {
+ ret = PTR_ERR(sa_dev->port[i].agent);
goto err;
+ }
INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
@@ -2366,8 +2366,10 @@ static void ib_sa_add_one(struct ib_device *device)
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
+ }
ib_set_client_data(device, &sa_client, sa_dev);
@@ -2386,7 +2388,7 @@ static void ib_sa_add_one(struct ib_device *device)
update_sm_ah(&sa_dev->port[i].update_task);
}
- return;
+ return 0;
err:
while (--i >= 0) {
@@ -2395,7 +2397,7 @@ err:
}
free:
kfree(sa_dev);
- return;
+ return ret;
}
static void ib_sa_remove_one(struct ib_device *device, void *client_data)
@@ -2403,9 +2405,6 @@ static void ib_sa_remove_one(struct ib_device *device, void *client_data)
struct ib_sa_device *sa_dev = client_data;
int i;
- if (!sa_dev)
- return;
-
ib_unregister_event_handler(&sa_dev->event_handler);
flush_workqueue(ib_wq);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 087682e6969e..defe9cd4c5ee 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1058,8 +1058,7 @@ static int add_port(struct ib_core_device *coredev, int port_num)
coredev->ports_kobj,
"%d", port_num);
if (ret) {
- kfree(p);
- return ret;
+ goto err_put;
}
p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL);
@@ -1072,8 +1071,7 @@ static int add_port(struct ib_core_device *coredev, int port_num)
ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type,
&p->kobj, "gid_attrs");
if (ret) {
- kfree(p->gid_attr_group);
- goto err_put;
+ goto err_put_gid_attrs;
}
if (device->ops.process_mad && is_full_dev) {
@@ -1404,8 +1402,10 @@ int ib_port_register_module_stat(struct ib_device *device, u8 port_num,
ret = kobject_init_and_add(kobj, ktype, &port->kobj, "%s",
name);
- if (ret)
+ if (ret) {
+ kobject_put(kobj);
return ret;
+ }
}
return 0;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 16b6cf57fa85..5b87eee8ccc8 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -52,6 +52,7 @@
#include <rdma/rdma_cm_ib.h>
#include <rdma/ib_addr.h>
#include <rdma/ib.h>
+#include <rdma/ib_cm.h>
#include <rdma/rdma_netlink.h>
#include "core_priv.h"
@@ -360,6 +361,9 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
ucma_copy_conn_event(&uevent->resp.param.conn,
&event->param.conn);
+ uevent->resp.ece.vendor_id = event->ece.vendor_id;
+ uevent->resp.ece.attr_mod = event->ece.attr_mod;
+
if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
if (!ctx->backlog) {
ret = -ENOMEM;
@@ -404,7 +408,8 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
* Old 32 bit user space does not send the 4 byte padding in the
* reserved field. We don't care, allow it to keep working.
*/
- if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved))
+ if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved) -
+ sizeof(uevent->resp.ece))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
@@ -845,7 +850,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
struct sockaddr *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_route_resp, ibdev_index))
return -ENOSPC;
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
@@ -869,6 +874,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
goto out;
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
+ resp.ibdev_index = ctx->cm_id->device->index;
resp.port_num = ctx->cm_id->port_num;
if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
@@ -880,8 +886,8 @@ static ssize_t ucma_query_route(struct ucma_file *file,
out:
mutex_unlock(&ctx->mutex);
- if (copy_to_user(u64_to_user_ptr(cmd.response),
- &resp, sizeof(resp)))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp,
+ min_t(size_t, out_len, sizeof(resp))))
ret = -EFAULT;
ucma_put_ctx(ctx);
@@ -895,6 +901,7 @@ static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
return;
resp->node_guid = (__force __u64) cm_id->device->node_guid;
+ resp->ibdev_index = cm_id->device->index;
resp->port_num = cm_id->port_num;
resp->pkey = (__force __u16) cpu_to_be16(
ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
@@ -907,7 +914,7 @@ static ssize_t ucma_query_addr(struct ucma_context *ctx,
struct sockaddr *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
return -ENOSPC;
memset(&resp, 0, sizeof resp);
@@ -922,7 +929,7 @@ static ssize_t ucma_query_addr(struct ucma_context *ctx,
ucma_query_device_addr(ctx->cm_id, &resp);
- if (copy_to_user(response, &resp, sizeof(resp)))
+ if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
ret = -EFAULT;
return ret;
@@ -974,7 +981,7 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
struct sockaddr_ib *addr;
int ret = 0;
- if (out_len < sizeof(resp))
+ if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
return -ENOSPC;
memset(&resp, 0, sizeof resp);
@@ -1007,7 +1014,7 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
&ctx->cm_id->route.addr.dst_addr);
}
- if (copy_to_user(response, &resp, sizeof(resp)))
+ if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
ret = -EFAULT;
return ret;
@@ -1070,12 +1077,15 @@ static void ucma_copy_conn_param(struct rdma_cm_id *id,
static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
int in_len, int out_len)
{
- struct rdma_ucm_connect cmd;
struct rdma_conn_param conn_param;
+ struct rdma_ucm_ece ece = {};
+ struct rdma_ucm_connect cmd;
struct ucma_context *ctx;
+ size_t in_size;
int ret;
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ in_size = min_t(size_t, in_len, sizeof(cmd));
+ if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
if (!cmd.conn_param.valid)
@@ -1086,8 +1096,13 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
return PTR_ERR(ctx);
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
+ if (offsetofend(typeof(cmd), ece) <= in_size) {
+ ece.vendor_id = cmd.ece.vendor_id;
+ ece.attr_mod = cmd.ece.attr_mod;
+ }
+
mutex_lock(&ctx->mutex);
- ret = rdma_connect(ctx->cm_id, &conn_param);
+ ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
@@ -1121,28 +1136,36 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
{
struct rdma_ucm_accept cmd;
struct rdma_conn_param conn_param;
+ struct rdma_ucm_ece ece = {};
struct ucma_context *ctx;
+ size_t in_size;
int ret;
- if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ in_size = min_t(size_t, in_len, sizeof(cmd));
+ if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ if (offsetofend(typeof(cmd), ece) <= in_size) {
+ ece.vendor_id = cmd.ece.vendor_id;
+ ece.attr_mod = cmd.ece.attr_mod;
+ }
+
if (cmd.conn_param.valid) {
ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
mutex_lock(&file->mut);
mutex_lock(&ctx->mutex);
- ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
+ ret = __rdma_accept_ece(ctx->cm_id, &conn_param, NULL, &ece);
mutex_unlock(&ctx->mutex);
if (!ret)
ctx->uid = cmd.uid;
mutex_unlock(&file->mut);
} else {
mutex_lock(&ctx->mutex);
- ret = __rdma_accept(ctx->cm_id, NULL, NULL);
+ ret = __rdma_accept_ece(ctx->cm_id, NULL, NULL, &ece);
mutex_unlock(&ctx->mutex);
}
ucma_put_ctx(ctx);
@@ -1159,12 +1182,24 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
+ if (!cmd.reason)
+ cmd.reason = IB_CM_REJ_CONSUMER_DEFINED;
+
+ switch (cmd.reason) {
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ case IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
mutex_lock(&ctx->mutex);
- ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
+ ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len,
+ cmd.reason);
mutex_unlock(&ctx->mutex);
ucma_put_ctx(ctx);
return ret;
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 29a45d2f8898..d65d541b9a25 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -41,7 +41,7 @@
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
- .struct_size_bytes = sizeof ((struct ib_unpacked_ ## header *) 0)->field, \
+ .struct_size_bytes = sizeof_field(struct ib_unpacked_ ## header, field), \
.field_name = #header ":" #field
static const struct ib_field lrh_table[] = {
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 3b1e627d9a8d..ccd28405451c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -429,7 +429,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
PAGE_SIZE / sizeof(struct page *));
- down_read(&owning_mm->mmap_sem);
+ mmap_read_lock(owning_mm);
/*
* Note: this might result in redundent page getting. We can
* avoid this by checking dma_list to be 0 before calling
@@ -440,7 +440,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
flags, local_page_list, NULL, NULL);
- up_read(&owning_mm->mmap_sem);
+ mmap_read_unlock(owning_mm);
if (npages < 0) {
if (npages != -EAGAIN)
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index da229eab5903..b0d0b522cc76 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -142,7 +142,7 @@ static dev_t dynamic_issm_dev;
static DEFINE_IDA(umad_ida);
-static void ib_umad_add_one(struct ib_device *device);
+static int ib_umad_add_one(struct ib_device *device);
static void ib_umad_remove_one(struct ib_device *device, void *client_data);
static void ib_umad_dev_free(struct kref *kref)
@@ -1352,37 +1352,41 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
put_device(&port->dev);
}
-static void ib_umad_add_one(struct ib_device *device)
+static int ib_umad_add_one(struct ib_device *device)
{
struct ib_umad_device *umad_dev;
int s, e, i;
int count = 0;
+ int ret;
s = rdma_start_port(device);
e = rdma_end_port(device);
umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
if (!umad_dev)
- return;
+ return -ENOMEM;
kref_init(&umad_dev->kref);
for (i = s; i <= e; ++i) {
if (!rdma_cap_ib_mad(device, i))
continue;
- if (ib_umad_init_port(device, i, umad_dev,
- &umad_dev->ports[i - s]))
+ ret = ib_umad_init_port(device, i, umad_dev,
+ &umad_dev->ports[i - s]);
+ if (ret)
goto err;
count++;
}
- if (!count)
+ if (!count) {
+ ret = -EOPNOTSUPP;
goto free;
+ }
ib_set_client_data(device, &umad_client, umad_dev);
- return;
+ return 0;
err:
while (--i >= s) {
@@ -1394,6 +1398,7 @@ err:
free:
/* balances kref_init */
ib_umad_dev_put(umad_dev);
+ return ret;
}
static void ib_umad_remove_one(struct ib_device *device, void *client_data)
@@ -1401,9 +1406,6 @@ static void ib_umad_remove_one(struct ib_device *device, void *client_data)
struct ib_umad_device *umad_dev = client_data;
unsigned int i;
- if (!umad_dev)
- return;
-
rdma_for_each_port (device, i) {
if (rdma_cap_ib_mad(device, i))
ib_umad_kill_port(
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 3d189c7ee59e..53a10479958b 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -142,7 +142,7 @@ struct ib_uverbs_file {
* ucontext_lock held
*/
struct ib_ucontext *ucontext;
- struct ib_uverbs_async_event_file *async_file;
+ struct ib_uverbs_async_event_file *default_async_file;
struct list_head list;
/*
@@ -180,6 +180,7 @@ struct ib_uverbs_mcast_entry {
struct ib_uevent_object {
struct ib_uobject uobject;
+ struct ib_uverbs_async_event_file *event_file;
/* List member for ib_uverbs_async_event_file list */
struct list_head event_list;
u32 events_reported;
@@ -296,6 +297,24 @@ static inline u32 make_port_cap_flags(const struct ib_port_attr *attr)
return res;
}
+static inline struct ib_uverbs_async_event_file *
+ib_uverbs_get_async_event(struct uverbs_attr_bundle *attrs,
+ u16 id)
+{
+ struct ib_uobject *async_ev_file_uobj;
+ struct ib_uverbs_async_event_file *async_ev_file;
+
+ async_ev_file_uobj = uverbs_attr_get_uobject(attrs, id);
+ if (IS_ERR(async_ev_file_uobj))
+ async_ev_file = READ_ONCE(attrs->ufile->default_async_file);
+ else
+ async_ev_file = container_of(async_ev_file_uobj,
+ struct ib_uverbs_async_event_file,
+ uobj);
+ if (async_ev_file)
+ uverbs_uobject_get(&async_ev_file->uobj);
+ return async_ev_file;
+}
void copy_port_attr_to_resp(struct ib_port_attr *attr,
struct ib_uverbs_query_port_resp *resp,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 060b4ebbd2ba..b48b3f6e632d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -311,7 +311,7 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
return 0;
err_uobj:
- rdma_alloc_abort_uobject(uobj, attrs);
+ rdma_alloc_abort_uobject(uobj, attrs, false);
err_ucontext:
kfree(attrs->context);
attrs->context = NULL;
@@ -356,8 +356,6 @@ static void copy_query_dev_fields(struct ib_ucontext *ucontext,
resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
resp->max_ah = attr->max_ah;
- resp->max_fmr = attr->max_fmr;
- resp->max_map_per_fmr = attr->max_map_per_fmr;
resp->max_srq = attr->max_srq;
resp->max_srq_wr = attr->max_srq_wr;
resp->max_srq_sge = attr->max_srq_sge;
@@ -1051,6 +1049,10 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
goto err_free;
obj->uevent.uobject.object = cq;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
+
memset(&resp, 0, sizeof resp);
resp.base.cq_handle = obj->uevent.uobject.id;
resp.base.cqe = cq->cqe;
@@ -1067,6 +1069,8 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
return obj;
err_cb:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
cq = NULL;
err_free:
@@ -1460,6 +1464,9 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
}
obj->uevent.uobject.object = qp;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
memset(&resp, 0, sizeof resp);
resp.base.qpn = qp->qp_num;
@@ -1473,7 +1480,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
ret = uverbs_response(attrs, &resp, sizeof(resp));
if (ret)
- goto err_cb;
+ goto err_uevent;
if (xrcd) {
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
@@ -1498,6 +1505,9 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
rdma_alloc_commit_uobject(&obj->uevent.uobject, attrs);
return 0;
+err_uevent:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
err_cb:
ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
@@ -2954,11 +2964,11 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
wq_init_attr.cq = cq;
wq_init_attr.max_sge = cmd.max_sge;
wq_init_attr.max_wr = cmd.max_wr;
- wq_init_attr.wq_context = attrs->ufile;
wq_init_attr.wq_type = cmd.wq_type;
wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
wq_init_attr.create_flags = cmd.create_flags;
INIT_LIST_HEAD(&obj->uevent.event_list);
+ obj->uevent.uobject.user_handle = cmd.user_handle;
wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
if (IS_ERR(wq)) {
@@ -2972,12 +2982,12 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
wq->cq = cq;
wq->pd = pd;
wq->device = pd->device;
- wq->wq_context = wq_init_attr.wq_context;
atomic_set(&wq->usecnt, 0);
atomic_inc(&pd->usecnt);
atomic_inc(&cq->usecnt);
- wq->uobject = obj;
- obj->uevent.uobject.object = wq;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
memset(&resp, 0, sizeof(resp));
resp.wq_handle = obj->uevent.uobject.id;
@@ -2996,6 +3006,8 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
return 0;
err_copy:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
err_put_cq:
rdma_lookup_put_uobject(&cq->uobject->uevent.uobject,
@@ -3441,46 +3453,25 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
}
attr.event_handler = ib_uverbs_srq_event_handler;
- attr.srq_context = attrs->ufile;
attr.srq_type = cmd->srq_type;
attr.attr.max_wr = cmd->max_wr;
attr.attr.max_sge = cmd->max_sge;
attr.attr.srq_limit = cmd->srq_limit;
INIT_LIST_HEAD(&obj->uevent.event_list);
+ obj->uevent.uobject.user_handle = cmd->user_handle;
- srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
- if (!srq) {
- ret = -ENOMEM;
- goto err_put;
- }
-
- srq->device = pd->device;
- srq->pd = pd;
- srq->srq_type = cmd->srq_type;
- srq->uobject = obj;
- srq->event_handler = attr.event_handler;
- srq->srq_context = attr.srq_context;
-
- ret = pd->device->ops.create_srq(srq, &attr, udata);
- if (ret)
- goto err_free;
-
- if (ib_srq_has_cq(cmd->srq_type)) {
- srq->ext.cq = attr.ext.cq;
- atomic_inc(&attr.ext.cq->usecnt);
- }
-
- if (cmd->srq_type == IB_SRQT_XRC) {
- srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
- atomic_inc(&attr.ext.xrc.xrcd->usecnt);
+ srq = ib_create_srq_user(pd, &attr, obj, udata);
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
+ goto err_put_pd;
}
- atomic_inc(&pd->usecnt);
- atomic_set(&srq->usecnt, 0);
-
obj->uevent.uobject.object = srq;
obj->uevent.uobject.user_handle = cmd->user_handle;
+ obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+ if (obj->uevent.event_file)
+ uverbs_uobject_get(&obj->uevent.event_file->uobj);
memset(&resp, 0, sizeof resp);
resp.srq_handle = obj->uevent.uobject.id;
@@ -3505,14 +3496,11 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
return 0;
err_copy:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
- /* It was released in ib_destroy_srq_user */
- srq = NULL;
-err_free:
- kfree(srq);
-err_put:
+err_put_pd:
uobj_put_obj_read(pd);
-
err_put_cq:
if (ib_srq_has_cq(cmd->srq_type))
rdma_lookup_put_uobject(&attr.ext.cq->uobject->uevent.uobject,
@@ -3751,7 +3739,7 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs)
#define UAPI_DEF_WRITE_IO(req, resp) \
.write.has_resp = 1 + \
BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) + \
- BUILD_BUG_ON_ZERO(sizeof(((req *)0)->response) != \
+ BUILD_BUG_ON_ZERO(sizeof_field(req, response) != \
sizeof(u64)), \
.write.req_size = sizeof(req), .write.resp_size = sizeof(resp)
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 538affbc517e..2d882c02387c 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -58,6 +58,7 @@ struct bundle_priv {
DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN);
+ DECLARE_BITMAP(uobj_hw_obj_valid, UVERBS_API_ATTR_BKEY_LEN);
/*
* Must be last. bundle ends in a flex array which overlaps
@@ -136,7 +137,7 @@ EXPORT_SYMBOL(_uverbs_alloc);
static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
u16 len)
{
- if (uattr->len > sizeof(((struct ib_uverbs_attr *)0)->data))
+ if (uattr->len > sizeof_field(struct ib_uverbs_attr, data))
return ib_is_buffer_cleared(u64_to_user_ptr(uattr->data) + len,
uattr->len - len);
@@ -230,7 +231,8 @@ static void uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
for (i = 0; i != attr->len; i++)
uverbs_finalize_object(attr->uobjects[i],
- spec->u2.objs_arr.access, commit, attrs);
+ spec->u2.objs_arr.access, false, commit,
+ attrs);
}
static int uverbs_process_attr(struct bundle_priv *pbundle,
@@ -502,7 +504,9 @@ static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
uverbs_finalize_object(
attr->obj_attr.uobject,
- attr->obj_attr.attr_elm->spec.u.obj.access, commit,
+ attr->obj_attr.attr_elm->spec.u.obj.access,
+ test_bit(i, pbundle->uobj_hw_obj_valid),
+ commit,
&pbundle->bundle);
}
@@ -590,6 +594,8 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
sizeof(pbundle->bundle.attr_present));
memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
+ memset(pbundle->uobj_hw_obj_valid, 0,
+ sizeof(pbundle->uobj_hw_obj_valid));
ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
bundle_destroy(pbundle, ret == 0);
@@ -784,3 +790,15 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
}
return uverbs_copy_to(bundle, idx, from, size);
}
+
+/* Once called an abort will call through to the type's destroy_hw() */
+void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle,
+ u16 idx)
+{
+ struct bundle_priv *pbundle =
+ container_of(bundle, struct bundle_priv, bundle);
+
+ __set_bit(uapi_bkey_attr(uapi_key_attr(idx)),
+ pbundle->uobj_hw_obj_valid);
+}
+EXPORT_SYMBOL(uverbs_finalize_uobj_create);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 1bab8de14757..69e4755cc04b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -75,7 +75,7 @@ static dev_t dynamic_uverbs_dev;
static struct class *uverbs_class;
static DEFINE_IDA(uverbs_ida);
-static void ib_uverbs_add_one(struct ib_device *device);
+static int ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
/*
@@ -146,8 +146,7 @@ void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
{
- struct ib_uverbs_async_event_file *async_file =
- READ_ONCE(uobj->uobject.ufile->async_file);
+ struct ib_uverbs_async_event_file *async_file = uobj->event_file;
struct ib_uverbs_event *evt, *tmp;
if (!async_file)
@@ -159,6 +158,7 @@ void ib_uverbs_release_uevent(struct ib_uevent_object *uobj)
kfree(evt);
}
spin_unlock_irq(&async_file->ev_queue.lock);
+ uverbs_uobject_put(&async_file->uobj);
}
void ib_uverbs_detach_umcast(struct ib_qp *qp,
@@ -197,8 +197,8 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
- if (file->async_file)
- uverbs_uobject_put(&file->async_file->uobj);
+ if (file->default_async_file)
+ uverbs_uobject_put(&file->default_async_file->uobj);
put_device(&file->device->dev);
if (file->disassociate_page)
@@ -296,6 +296,8 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
spin_lock_irq(&ev_queue->lock);
if (!list_empty(&ev_queue->event_list))
pollflags = EPOLLIN | EPOLLRDNORM;
+ else if (ev_queue->is_closed)
+ pollflags = EPOLLERR;
spin_unlock_irq(&ev_queue->lock);
return pollflags;
@@ -425,7 +427,7 @@ void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
static void uverbs_uobj_event(struct ib_uevent_object *eobj,
struct ib_event *event)
{
- ib_uverbs_async_handler(READ_ONCE(eobj->uobject.ufile->async_file),
+ ib_uverbs_async_handler(eobj->event_file,
eobj->uobject.user_handle, event->event,
&eobj->event_list, &eobj->events_reported);
}
@@ -482,10 +484,10 @@ void ib_uverbs_init_async_event_file(
/* The first async_event_file becomes the default one for the file. */
mutex_lock(&uverbs_file->ucontext_lock);
- if (!uverbs_file->async_file) {
+ if (!uverbs_file->default_async_file) {
/* Pairs with the put in ib_uverbs_release_file */
uverbs_uobject_get(&async_file->uobj);
- smp_store_release(&uverbs_file->async_file, async_file);
+ smp_store_release(&uverbs_file->default_async_file, async_file);
}
mutex_unlock(&uverbs_file->ucontext_lock);
@@ -833,12 +835,12 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
return;
/*
- * The umap_lock is nested under mmap_sem since it used within
+ * The umap_lock is nested under mmap_lock since it used within
* the vma_ops callbacks, so we have to clean the list one mm
* at a time to get the lock ordering right. Typically there
* will only be one mm, so no big deal.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (!mmget_still_valid(mm))
goto skip_mm;
mutex_lock(&ufile->umap_lock);
@@ -860,7 +862,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
}
mutex_unlock(&ufile->umap_lock);
skip_mm:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
}
@@ -1092,7 +1094,7 @@ static int ib_uverbs_create_uapi(struct ib_device *device,
return 0;
}
-static void ib_uverbs_add_one(struct ib_device *device)
+static int ib_uverbs_add_one(struct ib_device *device)
{
int devnum;
dev_t base;
@@ -1100,16 +1102,16 @@ static void ib_uverbs_add_one(struct ib_device *device)
int ret;
if (!device->ops.alloc_ucontext)
- return;
+ return -EOPNOTSUPP;
uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
if (!uverbs_dev)
- return;
+ return -ENOMEM;
ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
if (ret) {
kfree(uverbs_dev);
- return;
+ return -ENOMEM;
}
device_initialize(&uverbs_dev->dev);
@@ -1129,15 +1131,18 @@ static void ib_uverbs_add_one(struct ib_device *device)
devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
GFP_KERNEL);
- if (devnum < 0)
+ if (devnum < 0) {
+ ret = -ENOMEM;
goto err;
+ }
uverbs_dev->devnum = devnum;
if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
else
base = IB_UVERBS_BASE_DEV + devnum;
- if (ib_uverbs_create_uapi(device, uverbs_dev))
+ ret = ib_uverbs_create_uapi(device, uverbs_dev);
+ if (ret)
goto err_uapi;
uverbs_dev->dev.devt = base;
@@ -1152,7 +1157,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
goto err_uapi;
ib_set_client_data(device, &uverbs_client, uverbs_dev);
- return;
+ return 0;
err_uapi:
ida_free(&uverbs_ida, devnum);
@@ -1161,7 +1166,7 @@ err:
ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
put_device(&uverbs_dev->dev);
- return;
+ return ret;
}
static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
@@ -1201,9 +1206,6 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
struct ib_uverbs_device *uverbs_dev = client_data;
int wait_clients = 1;
- if (!uverbs_dev)
- return;
-
cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
ida_free(&uverbs_ida, uverbs_dev->devnum);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 3abfc63225cb..08c39cfb1bd9 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -75,40 +75,6 @@ static int uverbs_free_mw(struct ib_uobject *uobject,
return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
}
-static int uverbs_free_qp(struct ib_uobject *uobject,
- enum rdma_remove_reason why,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_qp *qp = uobject->object;
- struct ib_uqp_object *uqp =
- container_of(uobject, struct ib_uqp_object, uevent.uobject);
- int ret;
-
- /*
- * If this is a user triggered destroy then do not allow destruction
- * until the user cleans up all the mcast bindings. Unlike in other
- * places we forcibly clean up the mcast attachments for !DESTROY
- * because the mcast attaches are not ubojects and will not be
- * destroyed by anything else during cleanup processing.
- */
- if (why == RDMA_REMOVE_DESTROY) {
- if (!list_empty(&uqp->mcast_list))
- return -EBUSY;
- } else if (qp == qp->real_qp) {
- ib_uverbs_detach_umcast(qp, uqp);
- }
-
- ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
- return ret;
-
- if (uqp->uxrcd)
- atomic_dec(&uqp->uxrcd->refcnt);
-
- ib_uverbs_release_uevent(&uqp->uevent);
- return ret;
-}
-
static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs)
@@ -125,48 +91,6 @@ static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
return ret;
}
-static int uverbs_free_wq(struct ib_uobject *uobject,
- enum rdma_remove_reason why,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_wq *wq = uobject->object;
- struct ib_uwq_object *uwq =
- container_of(uobject, struct ib_uwq_object, uevent.uobject);
- int ret;
-
- ret = ib_destroy_wq(wq, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
- return ret;
-
- ib_uverbs_release_uevent(&uwq->uevent);
- return ret;
-}
-
-static int uverbs_free_srq(struct ib_uobject *uobject,
- enum rdma_remove_reason why,
- struct uverbs_attr_bundle *attrs)
-{
- struct ib_srq *srq = uobject->object;
- struct ib_uevent_object *uevent =
- container_of(uobject, struct ib_uevent_object, uobject);
- enum ib_srq_type srq_type = srq->srq_type;
- int ret;
-
- ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
- if (ib_is_destroy_retryable(ret, why, uobject))
- return ret;
-
- if (srq_type == IB_SRQT_XRC) {
- struct ib_usrq_object *us =
- container_of(uevent, struct ib_usrq_object, uevent);
-
- atomic_dec(&us->uxrcd->refcnt);
- }
-
- ib_uverbs_release_uevent(uevent);
- return ret;
-}
-
static int uverbs_free_xrcd(struct ib_uobject *uobject,
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs)
@@ -252,10 +176,6 @@ DECLARE_UVERBS_NAMED_OBJECT(
"[infinibandevent]",
O_RDONLY));
-DECLARE_UVERBS_NAMED_OBJECT(
- UVERBS_OBJECT_QP,
- UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_MW_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MW_HANDLE,
@@ -267,11 +187,6 @@ DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW,
UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw),
&UVERBS_METHOD(UVERBS_METHOD_MW_DESTROY));
-DECLARE_UVERBS_NAMED_OBJECT(
- UVERBS_OBJECT_SRQ,
- UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
- uverbs_free_srq));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_AH_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_AH_HANDLE,
@@ -296,10 +211,6 @@ DECLARE_UVERBS_NAMED_OBJECT(
uverbs_free_flow),
&UVERBS_METHOD(UVERBS_METHOD_FLOW_DESTROY));
-DECLARE_UVERBS_NAMED_OBJECT(
- UVERBS_OBJECT_WQ,
- UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq));
-
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
UVERBS_METHOD_RWQ_IND_TBL_DESTROY,
UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_RWQ_IND_TBL_HANDLE,
@@ -340,18 +251,12 @@ const struct uapi_definition uverbs_def_obj_intf[] = {
UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_COMP_CHANNEL,
UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)),
- UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP,
- UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_AH,
UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MW,
UAPI_DEF_OBJ_NEEDS_FN(dealloc_mw)),
- UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_SRQ,
- UAPI_DEF_OBJ_NEEDS_FN(destroy_srq)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_FLOW,
UAPI_DEF_OBJ_NEEDS_FN(destroy_flow)),
- UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_WQ,
- UAPI_DEF_OBJ_NEEDS_FN(destroy_wq)),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
UVERBS_OBJECT_RWQ_IND_TBL,
UAPI_DEF_OBJ_NEEDS_FN(destroy_rwq_ind_table)),
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index da4110a0eea2..5dce2c7cc323 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -100,6 +100,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
uverbs_uobject_get(ev_file_uobj);
}
+ obj->uevent.event_file = ib_uverbs_get_async_event(
+ attrs, UVERBS_ATTR_CREATE_CQ_EVENT_FD);
+
if (attr.comp_vector >= attrs->ufile->device->num_comp_vectors) {
ret = -EINVAL;
goto err_event_file;
@@ -129,19 +132,17 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
obj->uevent.uobject.object = cq;
obj->uevent.uobject.user_handle = user_handle;
rdma_restrack_uadd(&cq->res);
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE);
ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
sizeof(cq->cqe));
- if (ret)
- goto err_cq;
+ return ret;
- return 0;
-err_cq:
- ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
- cq = NULL;
err_free:
kfree(cq);
err_event_file:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
if (ev_file)
uverbs_uobject_put(ev_file_uobj);
return ret;
@@ -171,6 +172,10 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_CQ_RESP_CQE,
UVERBS_ATTR_TYPE(u32),
UA_MANDATORY),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_CQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
UVERBS_ATTR_UHW());
static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)(
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index c1286a52dc84..a2722ef8496e 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -136,21 +136,15 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
uobj->object = mr;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_REG_DM_MR_HANDLE);
+
ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_LKEY, &mr->lkey,
sizeof(mr->lkey));
if (ret)
- goto err_dereg;
+ return ret;
ret = uverbs_copy_to(attrs, UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
&mr->rkey, sizeof(mr->rkey));
- if (ret)
- goto err_dereg;
-
- return 0;
-
-err_dereg:
- ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
-
return ret;
}
diff --git a/drivers/infiniband/core/uverbs_std_types_qp.c b/drivers/infiniband/core/uverbs_std_types_qp.c
new file mode 100644
index 000000000000..3bf8dcdfe7eb
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_qp.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+#include "core_priv.h"
+
+static int uverbs_free_qp(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_qp *qp = uobject->object;
+ struct ib_uqp_object *uqp =
+ container_of(uobject, struct ib_uqp_object, uevent.uobject);
+ int ret;
+
+ /*
+ * If this is a user triggered destroy then do not allow destruction
+ * until the user cleans up all the mcast bindings. Unlike in other
+ * places we forcibly clean up the mcast attachments for !DESTROY
+ * because the mcast attaches are not ubojects and will not be
+ * destroyed by anything else during cleanup processing.
+ */
+ if (why == RDMA_REMOVE_DESTROY) {
+ if (!list_empty(&uqp->mcast_list))
+ return -EBUSY;
+ } else if (qp == qp->real_qp) {
+ ib_uverbs_detach_umcast(qp, uqp);
+ }
+
+ ret = ib_destroy_qp_user(qp, &attrs->driver_udata);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ if (uqp->uxrcd)
+ atomic_dec(&uqp->uxrcd->refcnt);
+
+ ib_uverbs_release_uevent(&uqp->uevent);
+ return ret;
+}
+
+static int check_creation_flags(enum ib_qp_type qp_type,
+ u32 create_flags)
+{
+ create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL;
+
+ if (!create_flags || qp_type == IB_QPT_DRIVER)
+ return 0;
+
+ if (qp_type != IB_QPT_RAW_PACKET && qp_type != IB_QPT_UD)
+ return -EINVAL;
+
+ if ((create_flags & IB_UVERBS_QP_CREATE_SCATTER_FCS ||
+ create_flags & IB_UVERBS_QP_CREATE_CVLAN_STRIPPING) &&
+ qp_type != IB_QPT_RAW_PACKET)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void set_caps(struct ib_qp_init_attr *attr,
+ struct ib_uverbs_qp_cap *cap, bool req)
+{
+ if (req) {
+ attr->cap.max_send_wr = cap->max_send_wr;
+ attr->cap.max_recv_wr = cap->max_recv_wr;
+ attr->cap.max_send_sge = cap->max_send_sge;
+ attr->cap.max_recv_sge = cap->max_recv_sge;
+ attr->cap.max_inline_data = cap->max_inline_data;
+ } else {
+ cap->max_send_wr = attr->cap.max_send_wr;
+ cap->max_recv_wr = attr->cap.max_recv_wr;
+ cap->max_send_sge = attr->cap.max_send_sge;
+ cap->max_recv_sge = attr->cap.max_recv_sge;
+ cap->max_inline_data = attr->cap.max_inline_data;
+ }
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uqp_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_QP_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_qp_init_attr attr = {};
+ struct ib_uverbs_qp_cap cap = {};
+ struct ib_rwq_ind_table *rwq_ind_tbl = NULL;
+ struct ib_qp *qp;
+ struct ib_pd *pd = NULL;
+ struct ib_srq *srq = NULL;
+ struct ib_cq *recv_cq = NULL;
+ struct ib_cq *send_cq = NULL;
+ struct ib_xrcd *xrcd = NULL;
+ struct ib_uobject *xrcd_uobj = NULL;
+ struct ib_device *device;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_copy_from_or_zero(&cap, attrs,
+ UVERBS_ATTR_CREATE_QP_CAP);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_QP_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&attr.qp_type, attrs,
+ UVERBS_ATTR_CREATE_QP_TYPE);
+ if (ret)
+ return ret;
+
+ switch (attr.qp_type) {
+ case IB_QPT_XRC_TGT:
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_PD_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE))
+ return -EINVAL;
+
+ xrcd_uobj = uverbs_attr_get_uobject(attrs,
+ UVERBS_ATTR_CREATE_QP_XRCD_HANDLE);
+ if (IS_ERR(xrcd_uobj))
+ return PTR_ERR(xrcd_uobj);
+
+ xrcd = (struct ib_xrcd *)xrcd_uobj->object;
+ if (!xrcd)
+ return -EINVAL;
+ device = xrcd->device;
+ break;
+ case IB_UVERBS_QPT_RAW_PACKET:
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+ fallthrough;
+ case IB_UVERBS_QPT_RC:
+ case IB_UVERBS_QPT_UC:
+ case IB_UVERBS_QPT_UD:
+ case IB_UVERBS_QPT_XRC_INI:
+ case IB_UVERBS_QPT_DRIVER:
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_XRCD_HANDLE) ||
+ (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE) &&
+ attr.qp_type == IB_QPT_XRC_INI))
+ return -EINVAL;
+
+ pd = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_PD_HANDLE);
+ if (IS_ERR(pd))
+ return PTR_ERR(pd);
+
+ rwq_ind_tbl = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE);
+ if (!IS_ERR(rwq_ind_tbl)) {
+ if (cap.max_recv_wr || cap.max_recv_sge ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE) ||
+ uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE))
+ return -EINVAL;
+
+ /* send_cq is optinal */
+ if (cap.max_send_wr) {
+ send_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE);
+ if (IS_ERR(send_cq))
+ return PTR_ERR(send_cq);
+ }
+ attr.rwq_ind_tbl = rwq_ind_tbl;
+ } else {
+ send_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE);
+ if (IS_ERR(send_cq))
+ return PTR_ERR(send_cq);
+
+ if (attr.qp_type != IB_QPT_XRC_INI) {
+ recv_cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE);
+ if (IS_ERR(recv_cq))
+ return PTR_ERR(recv_cq);
+ }
+ }
+
+ device = pd->device;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = uverbs_get_flags32(&attr.create_flags, attrs,
+ UVERBS_ATTR_CREATE_QP_FLAGS,
+ IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
+ IB_UVERBS_QP_CREATE_SCATTER_FCS |
+ IB_UVERBS_QP_CREATE_CVLAN_STRIPPING |
+ IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING |
+ IB_UVERBS_QP_CREATE_SQ_SIG_ALL);
+ if (ret)
+ return ret;
+
+ ret = check_creation_flags(attr.qp_type, attr.create_flags);
+ if (ret)
+ return ret;
+
+ if (uverbs_attr_is_valid(attrs,
+ UVERBS_ATTR_CREATE_QP_SOURCE_QPN)) {
+ ret = uverbs_copy_from(&attr.source_qpn, attrs,
+ UVERBS_ATTR_CREATE_QP_SOURCE_QPN);
+ if (ret)
+ return ret;
+ attr.create_flags |= IB_QP_CREATE_SOURCE_QPN;
+ }
+
+ srq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE);
+ if (!IS_ERR(srq)) {
+ if ((srq->srq_type == IB_SRQT_XRC &&
+ attr.qp_type != IB_QPT_XRC_TGT) ||
+ (srq->srq_type != IB_SRQT_XRC &&
+ attr.qp_type == IB_QPT_XRC_TGT))
+ return -EINVAL;
+ attr.srq = srq;
+ }
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_QP_EVENT_FD);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ INIT_LIST_HEAD(&obj->mcast_list);
+ obj->uevent.uobject.user_handle = user_handle;
+ attr.event_handler = ib_uverbs_qp_event_handler;
+ attr.send_cq = send_cq;
+ attr.recv_cq = recv_cq;
+ attr.xrcd = xrcd;
+ if (attr.create_flags & IB_UVERBS_QP_CREATE_SQ_SIG_ALL) {
+ /* This creation bit is uverbs one, need to mask before
+ * calling drivers. It was added to prevent an extra user attr
+ * only for that when using ioctl.
+ */
+ attr.create_flags &= ~IB_UVERBS_QP_CREATE_SQ_SIG_ALL;
+ attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ } else {
+ attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ }
+
+ set_caps(&attr, &cap, true);
+ mutex_init(&obj->mcast_lock);
+
+ if (attr.qp_type == IB_QPT_XRC_TGT)
+ qp = ib_create_qp(pd, &attr);
+ else
+ qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata,
+ obj);
+
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto err_put;
+ }
+
+ if (attr.qp_type != IB_QPT_XRC_TGT) {
+ atomic_inc(&pd->usecnt);
+ if (attr.send_cq)
+ atomic_inc(&attr.send_cq->usecnt);
+ if (attr.recv_cq)
+ atomic_inc(&attr.recv_cq->usecnt);
+ if (attr.srq)
+ atomic_inc(&attr.srq->usecnt);
+ if (attr.rwq_ind_tbl)
+ atomic_inc(&attr.rwq_ind_tbl->usecnt);
+ } else {
+ obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
+ uobject);
+ atomic_inc(&obj->uxrcd->refcnt);
+ /* It is done in _ib_create_qp for other QP types */
+ qp->uobject = obj;
+ }
+
+ obj->uevent.uobject.object = qp;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_QP_HANDLE);
+
+ if (attr.qp_type != IB_QPT_XRC_TGT) {
+ ret = ib_create_qp_security(qp, device);
+ if (ret)
+ return ret;
+ }
+
+ set_caps(&attr, &cap, false);
+ ret = uverbs_copy_to_struct_or_zero(attrs,
+ UVERBS_ATTR_CREATE_QP_RESP_CAP, &cap,
+ sizeof(cap));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
+ &qp->qp_num,
+ sizeof(qp->qp_num));
+
+ return ret;
+err_put:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QP_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_HANDLE,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_XRCD_HANDLE,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_CAP,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap,
+ max_inline_data),
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_QP_TYPE,
+ enum ib_uverbs_qp_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_QP_FLAGS,
+ enum ib_uverbs_qp_create_flags,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_QP_SOURCE_QPN,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_QP_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_CAP,
+ UVERBS_ATTR_STRUCT(struct ib_uverbs_qp_cap,
+ max_inline_data),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_QP_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_QP_HANDLE);
+ struct ib_uqp_object *obj =
+ container_of(uobj, struct ib_uqp_object, uevent.uobject);
+ struct ib_uverbs_destroy_qp_resp resp = {
+ .events_reported = obj->uevent.events_reported
+ };
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_QP_RESP, &resp,
+ sizeof(resp));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_QP_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_QP_HANDLE,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_QP_RESP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_qp_resp),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_QP,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp),
+ &UVERBS_METHOD(UVERBS_METHOD_QP_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_QP_DESTROY));
+
+const struct uapi_definition uverbs_def_obj_qp[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_srq.c b/drivers/infiniband/core/uverbs_std_types_srq.c
new file mode 100644
index 000000000000..c0ecbba26bf4
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_srq.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int uverbs_free_srq(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_srq *srq = uobject->object;
+ struct ib_uevent_object *uevent =
+ container_of(uobject, struct ib_uevent_object, uobject);
+ enum ib_srq_type srq_type = srq->srq_type;
+ int ret;
+
+ ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ if (srq_type == IB_SRQT_XRC) {
+ struct ib_usrq_object *us =
+ container_of(uobject, struct ib_usrq_object,
+ uevent.uobject);
+
+ atomic_dec(&us->uxrcd->refcnt);
+ }
+
+ ib_uverbs_release_uevent(uevent);
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_SRQ_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_usrq_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_SRQ_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_SRQ_PD_HANDLE);
+ struct ib_srq_init_attr attr = {};
+ struct ib_uobject *xrcd_uobj;
+ struct ib_srq *srq;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_copy_from(&attr.attr.max_sge, attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_SGE);
+ if (!ret)
+ ret = uverbs_copy_from(&attr.attr.max_wr, attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_WR);
+ if (!ret)
+ ret = uverbs_copy_from(&attr.attr.srq_limit, attrs,
+ UVERBS_ATTR_CREATE_SRQ_LIMIT);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_SRQ_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&attr.srq_type, attrs,
+ UVERBS_ATTR_CREATE_SRQ_TYPE);
+ if (ret)
+ return ret;
+
+ if (ib_srq_has_cq(attr.srq_type)) {
+ attr.ext.cq = uverbs_attr_get_obj(attrs,
+ UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE);
+ if (IS_ERR(attr.ext.cq))
+ return PTR_ERR(attr.ext.cq);
+ }
+
+ switch (attr.srq_type) {
+ case IB_UVERBS_SRQT_XRC:
+ xrcd_uobj = uverbs_attr_get_uobject(attrs,
+ UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE);
+ if (IS_ERR(xrcd_uobj))
+ return PTR_ERR(xrcd_uobj);
+
+ attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
+ if (!attr.ext.xrc.xrcd)
+ return -EINVAL;
+ obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
+ uobject);
+ atomic_inc(&obj->uxrcd->refcnt);
+ break;
+ case IB_UVERBS_SRQT_TM:
+ ret = uverbs_copy_from(&attr.ext.tag_matching.max_num_tags,
+ attrs,
+ UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS);
+ if (ret)
+ return ret;
+ break;
+ case IB_UVERBS_SRQT_BASIC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_SRQ_EVENT_FD);
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ attr.event_handler = ib_uverbs_srq_event_handler;
+ obj->uevent.uobject.user_handle = user_handle;
+
+ srq = ib_create_srq_user(pd, &attr, obj, &attrs->driver_udata);
+ if (IS_ERR(srq)) {
+ ret = PTR_ERR(srq);
+ goto err;
+ }
+
+ obj->uevent.uobject.object = srq;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_SRQ_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR,
+ &attr.attr.max_wr,
+ sizeof(attr.attr.max_wr));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE,
+ &attr.attr.max_sge,
+ sizeof(attr.attr.max_sge));
+ if (ret)
+ return ret;
+
+ if (attr.srq_type == IB_SRQT_XRC) {
+ ret = uverbs_copy_to(attrs,
+ UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM,
+ &srq->ext.xrc.srq_num,
+ sizeof(srq->ext.xrc.srq_num));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+err:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ if (attr.srq_type == IB_SRQT_XRC)
+ atomic_dec(&obj->uxrcd->refcnt);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_SRQ_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_SRQ_TYPE,
+ enum ib_uverbs_srq_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_LIMIT,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_SRQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_SRQ_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_SRQ_HANDLE);
+ struct ib_usrq_object *obj =
+ container_of(uobj, struct ib_usrq_object, uevent.uobject);
+ struct ib_uverbs_destroy_srq_resp resp = {
+ .events_reported = obj->uevent.events_reported
+ };
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_SRQ_RESP, &resp,
+ sizeof(resp));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_SRQ_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_SRQ_HANDLE,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_SRQ_RESP,
+ UVERBS_ATTR_TYPE(struct ib_uverbs_destroy_srq_resp),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_SRQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object),
+ uverbs_free_srq),
+ &UVERBS_METHOD(UVERBS_METHOD_SRQ_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_SRQ_DESTROY)
+);
+
+const struct uapi_definition uverbs_def_obj_srq[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_SRQ,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_srq)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_std_types_wq.c b/drivers/infiniband/core/uverbs_std_types_wq.c
new file mode 100644
index 000000000000..cad842ede077
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_std_types_wq.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int uverbs_free_wq(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_wq *wq = uobject->object;
+ struct ib_uwq_object *uwq =
+ container_of(uobject, struct ib_uwq_object, uevent.uobject);
+ int ret;
+
+ ret = ib_destroy_wq(wq, &attrs->driver_udata);
+ if (ib_is_destroy_retryable(ret, why, uobject))
+ return ret;
+
+ ib_uverbs_release_uevent(&uwq->uevent);
+ return ret;
+}
+
+static int UVERBS_HANDLER(UVERBS_METHOD_WQ_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uwq_object *obj = container_of(
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_WQ_HANDLE),
+ typeof(*obj), uevent.uobject);
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_WQ_PD_HANDLE);
+ struct ib_cq *cq =
+ uverbs_attr_get_obj(attrs, UVERBS_ATTR_CREATE_WQ_CQ_HANDLE);
+ struct ib_wq_init_attr wq_init_attr = {};
+ struct ib_wq *wq;
+ u64 user_handle;
+ int ret;
+
+ ret = uverbs_get_flags32(&wq_init_attr.create_flags, attrs,
+ UVERBS_ATTR_CREATE_WQ_FLAGS,
+ IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING |
+ IB_UVERBS_WQ_FLAGS_SCATTER_FCS |
+ IB_UVERBS_WQ_FLAGS_DELAY_DROP |
+ IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING);
+ if (!ret)
+ ret = uverbs_copy_from(&wq_init_attr.max_sge, attrs,
+ UVERBS_ATTR_CREATE_WQ_MAX_SGE);
+ if (!ret)
+ ret = uverbs_copy_from(&wq_init_attr.max_wr, attrs,
+ UVERBS_ATTR_CREATE_WQ_MAX_WR);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs,
+ UVERBS_ATTR_CREATE_WQ_USER_HANDLE);
+ if (!ret)
+ ret = uverbs_get_const(&wq_init_attr.wq_type, attrs,
+ UVERBS_ATTR_CREATE_WQ_TYPE);
+ if (ret)
+ return ret;
+
+ if (wq_init_attr.wq_type != IB_WQT_RQ)
+ return -EINVAL;
+
+ obj->uevent.event_file = ib_uverbs_get_async_event(attrs,
+ UVERBS_ATTR_CREATE_WQ_EVENT_FD);
+ obj->uevent.uobject.user_handle = user_handle;
+ INIT_LIST_HEAD(&obj->uevent.event_list);
+ wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
+ wq_init_attr.wq_context = attrs->ufile;
+ wq_init_attr.cq = cq;
+
+ wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata);
+ if (IS_ERR(wq)) {
+ ret = PTR_ERR(wq);
+ goto err;
+ }
+
+ obj->uevent.uobject.object = wq;
+ wq->wq_type = wq_init_attr.wq_type;
+ wq->cq = cq;
+ wq->pd = pd;
+ wq->device = pd->device;
+ wq->wq_context = wq_init_attr.wq_context;
+ atomic_set(&wq->usecnt, 0);
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&cq->usecnt);
+ wq->uobject = obj;
+ uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_WQ_HANDLE);
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR,
+ &wq_init_attr.max_wr,
+ sizeof(wq_init_attr.max_wr));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE,
+ &wq_init_attr.max_sge,
+ sizeof(wq_init_attr.max_sge));
+ if (ret)
+ return ret;
+
+ ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM,
+ &wq->wq_num,
+ sizeof(wq->wq_num));
+ return ret;
+
+err:
+ if (obj->uevent.event_file)
+ uverbs_uobject_put(&obj->uevent.event_file->uobj);
+ return ret;
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_WQ_CREATE,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_HANDLE,
+ UVERBS_OBJECT_WQ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(UVERBS_ATTR_CREATE_WQ_TYPE,
+ enum ib_wq_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_USER_HANDLE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CREATE_WQ_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_CREATE_WQ_FLAGS,
+ enum ib_uverbs_wq_flags,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(UVERBS_ATTR_CREATE_WQ_CQ_HANDLE,
+ UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_FD(UVERBS_ATTR_CREATE_WQ_EVENT_FD,
+ UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_UHW());
+
+static int UVERBS_HANDLER(UVERBS_METHOD_WQ_DESTROY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj =
+ uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_WQ_HANDLE);
+ struct ib_uwq_object *obj =
+ container_of(uobj, struct ib_uwq_object, uevent.uobject);
+
+ return uverbs_copy_to(attrs, UVERBS_ATTR_DESTROY_WQ_RESP,
+ &obj->uevent.events_reported,
+ sizeof(obj->uevent.events_reported));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ UVERBS_METHOD_WQ_DESTROY,
+ UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_WQ_HANDLE,
+ UVERBS_OBJECT_WQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_DESTROY_WQ_RESP,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ UVERBS_OBJECT_WQ,
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq),
+ &UVERBS_METHOD(UVERBS_METHOD_WQ_CREATE),
+ &UVERBS_METHOD(UVERBS_METHOD_WQ_DESTROY)
+);
+
+const struct uapi_definition uverbs_def_obj_wq[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_WQ,
+ UAPI_DEF_OBJ_NEEDS_FN(destroy_wq)),
+ {}
+};
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 3f121ac31e0a..5addc8fae3f3 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -634,6 +634,9 @@ static const struct uapi_definition uverbs_core_api[] = {
UAPI_DEF_CHAIN(uverbs_def_obj_flow_action),
UAPI_DEF_CHAIN(uverbs_def_obj_intf),
UAPI_DEF_CHAIN(uverbs_def_obj_mr),
+ UAPI_DEF_CHAIN(uverbs_def_obj_qp),
+ UAPI_DEF_CHAIN(uverbs_def_obj_srq),
+ UAPI_DEF_CHAIN(uverbs_def_obj_wq),
UAPI_DEF_CHAIN(uverbs_def_write_intf),
{},
};
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 56a71337112c..53d6505c0c7b 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -50,6 +50,7 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
#include <rdma/rw.h>
+#include <rdma/lag.h>
#include "core_priv.h"
#include <trace/events/rdma_core.h>
@@ -500,8 +501,10 @@ rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr,
u32 flags,
- struct ib_udata *udata)
+ struct ib_udata *udata,
+ struct net_device *xmit_slave)
{
+ struct rdma_ah_init_attr init_attr = {};
struct ib_device *device = pd->device;
struct ib_ah *ah;
int ret;
@@ -521,8 +524,11 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
ah->pd = pd;
ah->type = ah_attr->type;
ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
+ init_attr.ah_attr = ah_attr;
+ init_attr.flags = flags;
+ init_attr.xmit_slave = xmit_slave;
- ret = device->ops.create_ah(ah, ah_attr, flags, udata);
+ ret = device->ops.create_ah(ah, &init_attr, udata);
if (ret) {
kfree(ah);
return ERR_PTR(ret);
@@ -547,15 +553,22 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
u32 flags)
{
const struct ib_gid_attr *old_sgid_attr;
+ struct net_device *slave;
struct ib_ah *ah;
int ret;
ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
if (ret)
return ERR_PTR(ret);
-
- ah = _rdma_create_ah(pd, ah_attr, flags, NULL);
-
+ slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
+ (flags & RDMA_CREATE_AH_SLEEPABLE) ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (IS_ERR(slave)) {
+ rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
+ return (void *)slave;
+ }
+ ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
+ rdma_lag_put_ah_roce_slave(slave);
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
return ah;
}
@@ -594,7 +607,8 @@ struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
}
}
- ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE, udata);
+ ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
+ udata, NULL);
out:
rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
@@ -967,15 +981,29 @@ EXPORT_SYMBOL(rdma_destroy_ah_user);
/* Shared receive queues */
-struct ib_srq *ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr)
+/**
+ * ib_create_srq_user - Creates a SRQ associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the SRQ.
+ * @srq_init_attr: A list of initial attributes required to create the
+ * SRQ. If SRQ creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created SRQ.
+ * @uobject - uobject pointer if this is not a kernel SRQ
+ * @udata - udata pointer if this is not a kernel SRQ
+ *
+ * srq_attr->max_wr and srq_attr->max_sge are read the determine the
+ * requested size of the SRQ, and set to the actual values allocated
+ * on return. If ib_create_srq() succeeds, then max_wr and max_sge
+ * will always be at least as large as the requested values.
+ */
+struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_usrq_object *uobject,
+ struct ib_udata *udata)
{
struct ib_srq *srq;
int ret;
- if (!pd->device->ops.create_srq)
- return ERR_PTR(-EOPNOTSUPP);
-
srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
if (!srq)
return ERR_PTR(-ENOMEM);
@@ -985,6 +1013,7 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
srq->event_handler = srq_init_attr->event_handler;
srq->srq_context = srq_init_attr->srq_context;
srq->srq_type = srq_init_attr->srq_type;
+ srq->uobject = uobject;
if (ib_srq_has_cq(srq->srq_type)) {
srq->ext.cq = srq_init_attr->ext.cq;
@@ -996,7 +1025,7 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
}
atomic_inc(&pd->usecnt);
- ret = pd->device->ops.create_srq(srq, srq_init_attr, NULL);
+ ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
if (ret) {
atomic_dec(&srq->pd->usecnt);
if (srq->srq_type == IB_SRQT_XRC)
@@ -1009,7 +1038,7 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
return srq;
}
-EXPORT_SYMBOL(ib_create_srq);
+EXPORT_SYMBOL(ib_create_srq_user);
int ib_modify_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr,
@@ -1633,11 +1662,35 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
const struct ib_gid_attr *old_sgid_attr_alt_av;
int ret;
+ attr->xmit_slave = NULL;
if (attr_mask & IB_QP_AV) {
ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
&old_sgid_attr_av);
if (ret)
return ret;
+
+ if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
+ is_qp_type_connected(qp)) {
+ struct net_device *slave;
+
+ /*
+ * If the user provided the qp_attr then we have to
+ * resolve it. Kerne users have to provide already
+ * resolved rdma_ah_attr's.
+ */
+ if (udata) {
+ ret = ib_resolve_eth_dmac(qp->device,
+ &attr->ah_attr);
+ if (ret)
+ goto out_av;
+ }
+ slave = rdma_lag_get_ah_roce_slave(qp->device,
+ &attr->ah_attr,
+ GFP_KERNEL);
+ if (IS_ERR(slave))
+ goto out_av;
+ attr->xmit_slave = slave;
+ }
}
if (attr_mask & IB_QP_ALT_PATH) {
/*
@@ -1664,18 +1717,6 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
}
}
- /*
- * If the user provided the qp_attr then we have to resolve it. Kernel
- * users have to provide already resolved rdma_ah_attr's
- */
- if (udata && (attr_mask & IB_QP_AV) &&
- attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
- is_qp_type_connected(qp)) {
- ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
- if (ret)
- goto out;
- }
-
if (rdma_ib_or_roce(qp->device, port)) {
if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
dev_warn(&qp->device->dev,
@@ -1717,8 +1758,10 @@ out:
if (attr_mask & IB_QP_ALT_PATH)
rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
out_av:
- if (attr_mask & IB_QP_AV)
+ if (attr_mask & IB_QP_AV) {
+ rdma_lag_put_ah_roce_slave(attr->xmit_slave);
rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
+ }
return ret;
}
@@ -1962,6 +2005,9 @@ EXPORT_SYMBOL(__ib_create_cq);
int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
+ if (cq->shared)
+ return -EOPNOTSUPP;
+
return cq->device->ops.modify_cq ?
cq->device->ops.modify_cq(cq, cq_count,
cq_period) : -EOPNOTSUPP;
@@ -1970,6 +2016,9 @@ EXPORT_SYMBOL(rdma_set_cq_moderation);
int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
{
+ if (WARN_ON_ONCE(cq->shared))
+ return -EOPNOTSUPP;
+
if (atomic_read(&cq->usecnt))
return -EBUSY;
@@ -1982,6 +2031,9 @@ EXPORT_SYMBOL(ib_destroy_cq_user);
int ib_resize_cq(struct ib_cq *cq, int cqe)
{
+ if (cq->shared)
+ return -EOPNOTSUPP;
+
return cq->device->ops.resize_cq ?
cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
}
@@ -2160,54 +2212,6 @@ out:
}
EXPORT_SYMBOL(ib_alloc_mr_integrity);
-/* "Fast" memory regions */
-
-struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct ib_fmr *fmr;
-
- if (!pd->device->ops.alloc_fmr)
- return ERR_PTR(-EOPNOTSUPP);
-
- fmr = pd->device->ops.alloc_fmr(pd, mr_access_flags, fmr_attr);
- if (!IS_ERR(fmr)) {
- fmr->device = pd->device;
- fmr->pd = pd;
- atomic_inc(&pd->usecnt);
- }
-
- return fmr;
-}
-EXPORT_SYMBOL(ib_alloc_fmr);
-
-int ib_unmap_fmr(struct list_head *fmr_list)
-{
- struct ib_fmr *fmr;
-
- if (list_empty(fmr_list))
- return 0;
-
- fmr = list_entry(fmr_list->next, struct ib_fmr, list);
- return fmr->device->ops.unmap_fmr(fmr_list);
-}
-EXPORT_SYMBOL(ib_unmap_fmr);
-
-int ib_dealloc_fmr(struct ib_fmr *fmr)
-{
- struct ib_pd *pd;
- int ret;
-
- pd = fmr->pd;
- ret = fmr->device->ops.dealloc_fmr(fmr);
- if (!ret)
- atomic_dec(&pd->usecnt);
-
- return ret;
-}
-EXPORT_SYMBOL(ib_dealloc_fmr);
-
/* Multicast groups */
static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
@@ -2574,6 +2578,7 @@ EXPORT_SYMBOL(ib_map_mr_sg_pi);
* @page_size: page vector desired page size
*
* Constraints:
+ *
* - The first sg element is allowed to have an offset.
* - Each sg element must either be aligned to page_size or virtually
* contiguous to the previous element. In case an sg element has a
@@ -2607,10 +2612,12 @@ EXPORT_SYMBOL(ib_map_mr_sg);
* @mr: memory region
* @sgl: dma mapped scatterlist
* @sg_nents: number of entries in sg
- * @sg_offset_p: IN: start offset in bytes into sg
- * OUT: offset in bytes for element n of the sg of the first
+ * @sg_offset_p: ==== =======================================================
+ * IN start offset in bytes into sg
+ * OUT offset in bytes for element n of the sg of the first
* byte that has not been processed where n is the return
* value of this function.
+ * ==== =======================================================
* @set_page: driver page assignment function pointer
*
* Core service helper for drivers to convert the largest
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 95f6d493d1b9..8b6ad5cddfce 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -177,9 +177,6 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_total_mcast_qp_attach = 0;
ib_attr->max_ah = dev_attr->max_ah;
- ib_attr->max_fmr = 0;
- ib_attr->max_map_per_fmr = 0;
-
ib_attr->max_srq = dev_attr->max_srq;
ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
ib_attr->max_srq_sge = dev_attr->max_srq_sges;
@@ -631,11 +628,12 @@ static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
return nw_type;
}
-int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct ib_pd *ib_pd = ib_ah->pd;
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
struct bnxt_re_dev *rdev = pd->rdev;
const struct ib_gid_attr *sgid_attr;
@@ -673,7 +671,8 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
- !(flags & RDMA_CREATE_AH_SLEEPABLE));
+ !(init_attr->flags &
+ RDMA_CREATE_AH_SLEEPABLE));
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
return rc;
@@ -856,7 +855,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return -EFAULT;
- bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
+ bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
/* Consider mapping PSN search memory only for RC QPs. */
if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
@@ -879,7 +878,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
qplib_qp->qp_handle = ureq.qp_handle;
if (!qp->qplib_qp.srq) {
- bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
IB_ACCESS_LOCAL_WRITE);
@@ -976,6 +975,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.sig_type = true;
/* Shadow QP SQ depth should be same as QP1 RQ depth */
+ qp->qplib_qp.sq.wqe_size = bnxt_re_get_swqe_size();
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */
@@ -986,6 +986,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.scq = qp1_qp->scq;
qp->qplib_qp.rcq = qp1_qp->rcq;
+ qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size();
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
/* Q full delta can be 1 since it is internal QP */
@@ -1021,10 +1022,12 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *rq;
int entries;
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
+ rq = &qplqp->rq;
dev_attr = &rdev->dev_attr;
if (init_attr->srq) {
@@ -1036,23 +1039,21 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
return -EINVAL;
}
qplqp->srq = &srq->qplib_srq;
- qplqp->rq.max_wqe = 0;
+ rq->max_wqe = 0;
} else {
+ rq->wqe_size = bnxt_re_get_rwqe_size();
/* Allocate 1 more than what's provided so posting max doesn't
* mean empty.
*/
entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
- qplqp->rq.max_wqe = min_t(u32, entries,
- dev_attr->max_qp_wqes + 1);
-
- qplqp->rq.q_full_delta = qplqp->rq.max_wqe -
- init_attr->cap.max_recv_wr;
- qplqp->rq.max_sge = init_attr->cap.max_recv_sge;
- if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
- qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ rq->q_full_delta = rq->max_wqe - init_attr->cap.max_recv_wr;
+ rq->max_sge = init_attr->cap.max_recv_sge;
+ if (rq->max_sge > dev_attr->max_qp_sges)
+ rq->max_sge = dev_attr->max_qp_sges;
}
- qplqp->rq.sg_info.pgsize = PAGE_SIZE;
- qplqp->rq.sg_info.pgshft = PAGE_SHIFT;
+ rq->sg_info.pgsize = PAGE_SIZE;
+ rq->sg_info.pgshft = PAGE_SHIFT;
return 0;
}
@@ -1080,15 +1081,18 @@ static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *sq;
int entries;
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
+ sq = &qplqp->sq;
dev_attr = &rdev->dev_attr;
- qplqp->sq.max_sge = init_attr->cap.max_send_sge;
- if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
- qplqp->sq.max_sge = dev_attr->max_qp_sges;
+ sq->wqe_size = bnxt_re_get_swqe_size();
+ sq->max_sge = init_attr->cap.max_send_sge;
+ if (sq->max_sge > dev_attr->max_qp_sges)
+ sq->max_sge = dev_attr->max_qp_sges;
/*
* Change the SQ depth if user has requested minimum using
* configfs. Only supported for kernel consumers
@@ -1096,9 +1100,9 @@ static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
entries = init_attr->cap.max_send_wr;
/* Allocate 128 + 1 more than what's provided */
entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
- BNXT_QPLIB_RESERVED_QP_WRS + 1);
- qplqp->sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
+ BNXT_QPLIB_RESERVED_QP_WRS + 1);
+ sq->q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
/*
* Reserving one slot for Phantom WQE. Application can
* post one extra entry in this case. But allowing this to avoid
@@ -1511,7 +1515,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return -EFAULT;
- bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
IB_ACCESS_LOCAL_WRITE);
@@ -1534,15 +1538,20 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata)
{
- struct ib_pd *ib_pd = ib_srq->pd;
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
- struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_srq *srq =
- container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+ struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_nq *nq = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_srq *srq;
+ struct bnxt_re_pd *pd;
+ struct ib_pd *ib_pd;
int rc, entries;
+ ib_pd = ib_srq->pd;
+ pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ rdev = pd->rdev;
+ dev_attr = &rdev->dev_attr;
+ srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+
if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
rc = -EINVAL;
@@ -1563,8 +1572,9 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
if (entries > dev_attr->max_srq_wqes + 1)
entries = dev_attr->max_srq_wqes + 1;
-
srq->qplib_srq.max_wqe = entries;
+
+ srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size();
srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
srq->srq_limit = srq_init_attr->attr.srq_limit;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 23d972da5652..e5fbbeba6d28 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -122,12 +122,6 @@ struct bnxt_re_frpl {
u64 *page_list;
};
-struct bnxt_re_fmr {
- struct bnxt_re_dev *rdev;
- struct ib_fmr ib_fmr;
- struct bnxt_qplib_mrw qplib_fmr;
-};
-
struct bnxt_re_mw {
struct bnxt_re_dev *rdev;
struct ib_mw ib_mw;
@@ -142,6 +136,16 @@ struct bnxt_re_ucontext {
spinlock_t sh_lock; /* protect shpg */
};
+static inline u16 bnxt_re_get_swqe_size(void)
+{
+ return sizeof(struct sq_send);
+}
+
+static inline u16 bnxt_re_get_rwqe_size(void)
+{
+ return sizeof(struct rq_wqe);
+}
+
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
@@ -160,7 +164,7 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
u8 port_num);
int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 899a5d2c100e..c5e29577cd43 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -300,12 +300,12 @@ static void bnxt_qplib_service_nq(unsigned long data)
{
struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
struct bnxt_qplib_hwq *hwq = &nq->hwq;
- struct nq_base *nqe, **nq_ptr;
- struct bnxt_qplib_cq *cq;
- int num_cqne_processed = 0;
int num_srqne_processed = 0;
+ int num_cqne_processed = 0;
+ struct bnxt_qplib_cq *cq;
int budget = nq->budget;
u32 sw_cons, raw_cons;
+ struct nq_base *nqe;
uintptr_t q_handle;
u16 type;
@@ -314,8 +314,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
raw_cons = hwq->cons;
while (budget--) {
sw_cons = HWQ_CMP(raw_cons, hwq);
- nq_ptr = (struct nq_base **)hwq->pbl_ptr;
- nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
+ nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
break;
@@ -392,13 +391,11 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_nq *nq = dev_instance;
struct bnxt_qplib_hwq *hwq = &nq->hwq;
- struct nq_base **nq_ptr;
u32 sw_cons;
/* Prefetch the NQ element */
sw_cons = HWQ_CMP(hwq->cons, hwq);
- nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
- prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
+ prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
/* Fan out to CPU affinitized kthreads? */
tasklet_schedule(&nq->nq_tasklet);
@@ -612,12 +609,13 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct cmdq_create_srq req;
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
+ u16 pg_sz_lvl;
int rc, idx;
hwq_attr.res = res;
hwq_attr.sginfo = &srq->sg_info;
hwq_attr.depth = srq->max_wqe;
- hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.stride = srq->wqe_size;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
if (rc)
@@ -638,22 +636,11 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
pbl = &srq->hwq.pbl[PBL_LVL_0];
- req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
- CMDQ_CREATE_SRQ_LVL_MASK) <<
- CMDQ_CREATE_SRQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
+ pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
+ CMDQ_CREATE_SRQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
+ CMDQ_CREATE_SRQ_LVL_SFT;
+ req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
req.pd_id = cpu_to_le32(srq->pd->id);
req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
@@ -740,7 +727,7 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
struct bnxt_qplib_swqe *wqe)
{
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
- struct rq_wqe *srqe, **srqe_ptr;
+ struct rq_wqe *srqe;
struct sq_sge *hw_sge;
u32 sw_prod, sw_cons, count = 0;
int i, rc = 0, next;
@@ -758,9 +745,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
spin_unlock(&srq_hwq->lock);
sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
- srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
- srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
- memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
+ memset(srqe, 0, srq->wqe_size);
/* Calculate wqe_size16 and data_len */
for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
i < wqe->num_sge; i++, hw_sge++) {
@@ -809,6 +795,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0;
u32 qp_flags = 0;
+ u8 pg_sz_lvl;
int rc;
RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
@@ -822,7 +809,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.depth = sq->max_wqe;
- hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
+ hwq_attr.stride = sq->wqe_size;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
@@ -835,33 +822,18 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.sq_pg_size_sq_lvl =
- ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
- << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
+ CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
if (qp->scq)
req.scq_cid = cpu_to_le32(qp->scq->id);
-
- qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
-
/* RQ */
if (rq->max_wqe) {
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
- hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.stride = rq->wqe_size;
hwq_attr.depth = qp->rq.max_wqe;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
@@ -876,32 +848,20 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
pbl = &rq->hwq.pbl[PBL_LVL_0];
req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.rq_pg_size_rq_lvl =
- ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
- CMDQ_CREATE_QP1_RQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
+ CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
if (qp->rcq)
req.rcq_cid = cpu_to_le32(qp->rcq->id);
}
-
/* Header buffer - allow hdr_buf pass in */
rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
if (rc) {
rc = -ENOMEM;
goto fail;
}
+ qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
req.qp_flags = cpu_to_le32(qp_flags);
req.sq_size = cpu_to_le32(sq->hwq.max_elements);
req.rq_size = cpu_to_le32(rq->hwq.max_elements);
@@ -948,23 +908,47 @@ exit:
return rc;
}
+static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
+{
+ struct bnxt_qplib_hwq *hwq;
+ struct bnxt_qplib_q *sq;
+ u64 fpsne, psne, psn_pg;
+ u16 indx_pad = 0, indx;
+ u16 pg_num, pg_indx;
+ u64 *page;
+
+ sq = &qp->sq;
+ hwq = &sq->hwq;
+
+ fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->max_elements, &psn_pg);
+ if (!IS_ALIGNED(fpsne, PAGE_SIZE))
+ indx_pad = ALIGN(fpsne, PAGE_SIZE) / size;
+
+ page = (u64 *)psn_pg;
+ for (indx = 0; indx < hwq->max_elements; indx++) {
+ pg_num = (indx + indx_pad) / (PAGE_SIZE / size);
+ pg_indx = (indx + indx_pad) % (PAGE_SIZE / size);
+ psne = page[pg_num] + pg_indx * size;
+ sq->swq[indx].psn_ext = (struct sq_psn_search_ext *)psne;
+ sq->swq[indx].psn_search = (struct sq_psn_search *)psne;
+ }
+}
+
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
- unsigned long int psn_search, poff = 0;
struct bnxt_qplib_sg_info sginfo = {};
- struct sq_psn_search **psn_search_ptr;
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq;
- int i, rc, req_size, psn_sz = 0;
- struct sq_send **hw_sq_send_ptr;
struct creq_create_qp_resp resp;
+ int rc, req_size, psn_sz = 0;
struct bnxt_qplib_hwq *xrrq;
u16 cmd_flags = 0, max_ssge;
- struct cmdq_create_qp req;
struct bnxt_qplib_pbl *pbl;
+ struct cmdq_create_qp req;
u32 qp_flags = 0;
+ u8 pg_sz_lvl;
u16 max_rsge;
RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
@@ -983,7 +967,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
- hwq_attr.stride = BNXT_QPLIB_MAX_SQE_ENTRY_SIZE;
+ hwq_attr.stride = sq->wqe_size;
hwq_attr.depth = sq->max_wqe;
hwq_attr.aux_stride = psn_sz;
hwq_attr.aux_depth = hwq_attr.depth;
@@ -997,64 +981,25 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rc = -ENOMEM;
goto fail_sq;
}
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
- if (psn_sz) {
- psn_search_ptr = (struct sq_psn_search **)
- &hw_sq_send_ptr[get_sqe_pg
- (sq->hwq.max_elements)];
- psn_search = (unsigned long int)
- &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
- [get_sqe_idx(sq->hwq.max_elements)];
- if (psn_search & ~PAGE_MASK) {
- /* If the psn_search does not start on a page boundary,
- * then calculate the offset
- */
- poff = (psn_search & ~PAGE_MASK) /
- BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
- }
- for (i = 0; i < sq->hwq.max_elements; i++) {
- sq->swq[i].psn_search =
- &psn_search_ptr[get_psne_pg(i + poff)]
- [get_psne_idx(i + poff)];
- /*psns_ext will be used only for P5 chips. */
- sq->swq[i].psn_ext =
- (struct sq_psn_search_ext *)
- &psn_search_ptr[get_psne_pg(i + poff)]
- [get_psne_idx(i + poff)];
- }
- }
+
+ if (psn_sz)
+ bnxt_qplib_init_psn_ptr(qp, psn_sz);
+
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.sq_pg_size_sq_lvl =
- ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
- << CMDQ_CREATE_QP_SQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
+ CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
if (qp->scq)
req.scq_cid = cpu_to_le32(qp->scq->id);
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
- if (qp->sig_type)
- qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
-
/* RQ */
if (rq->max_wqe) {
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
- hwq_attr.stride = BNXT_QPLIB_MAX_RQE_ENTRY_SIZE;
+ hwq_attr.stride = rq->wqe_size;
hwq_attr.depth = rq->max_wqe;
hwq_attr.aux_stride = 0;
hwq_attr.aux_depth = 0;
@@ -1071,22 +1016,10 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
pbl = &rq->hwq.pbl[PBL_LVL_0];
req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
- req.rq_pg_size_rq_lvl =
- ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
- CMDQ_CREATE_QP_RQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
+ CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
} else {
/* SRQ */
if (qp->srq) {
@@ -1097,7 +1030,13 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (qp->rcq)
req.rcq_cid = cpu_to_le32(qp->rcq->id);
+
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
+ if (qp->sig_type)
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
req.qp_flags = cpu_to_le32(qp_flags);
+
req.sq_size = cpu_to_le32(sq->hwq.max_elements);
req.rq_size = cpu_to_le32(rq->hwq.max_elements);
qp->sq_hdr_buf = NULL;
@@ -1483,12 +1422,11 @@ bail:
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
{
struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
- struct cq_base *hw_cqe, **hw_cqe_ptr;
+ struct cq_base *hw_cqe;
int i;
for (i = 0; i < cq_hwq->max_elements; i++) {
- hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
+ hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
continue;
/*
@@ -1615,6 +1553,34 @@ void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
return NULL;
}
+static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ struct bnxt_qplib_swq *swq)
+{
+ struct sq_psn_search_ext *psns_ext;
+ struct sq_psn_search *psns;
+ u32 flg_npsn;
+ u32 op_spsn;
+
+ psns = swq->psn_search;
+ psns_ext = swq->psn_ext;
+
+ op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
+ SQ_PSN_SEARCH_START_PSN_MASK);
+ op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
+ SQ_PSN_SEARCH_OPCODE_MASK);
+ flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
+ SQ_PSN_SEARCH_NEXT_PSN_MASK);
+
+ if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
+ psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
+ psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
+ } else {
+ psns->opcode_start_psn = cpu_to_le32(op_spsn);
+ psns->flags_next_psn = cpu_to_le32(flg_npsn);
+ }
+}
+
void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *sq = &qp->sq;
@@ -1625,16 +1591,16 @@ void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe)
{
+ struct bnxt_qplib_nq_work *nq_work = NULL;
+ int i, rc = 0, data_len = 0, pkt_num = 0;
struct bnxt_qplib_q *sq = &qp->sq;
+ struct sq_send *hw_sq_send_hdr;
struct bnxt_qplib_swq *swq;
- struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
- struct sq_sge *hw_sge;
- struct bnxt_qplib_nq_work *nq_work = NULL;
bool sch_handler = false;
- u32 sw_prod;
+ struct sq_sge *hw_sge;
u8 wqe_size16;
- int i, rc = 0, data_len = 0, pkt_num = 0;
__le32 temp32;
+ u32 sw_prod;
if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
@@ -1663,11 +1629,8 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
swq->start_psn = sq->psn & BTH_PSN_MASK;
- hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
- hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
- [get_sqe_idx(sw_prod)];
-
- memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
+ hw_sq_send_hdr = bnxt_qplib_get_qe(&sq->hwq, sw_prod, NULL);
+ memset(hw_sq_send_hdr, 0, sq->wqe_size);
if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
/* Copy the inline data */
@@ -1854,28 +1817,8 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
goto done;
}
swq->next_psn = sq->psn & BTH_PSN_MASK;
- if (swq->psn_search) {
- u32 opcd_spsn;
- u32 flg_npsn;
-
- opcd_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
- SQ_PSN_SEARCH_START_PSN_MASK);
- opcd_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
- SQ_PSN_SEARCH_OPCODE_MASK);
- flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
- SQ_PSN_SEARCH_NEXT_PSN_MASK);
- if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
- swq->psn_ext->opcode_start_psn =
- cpu_to_le32(opcd_spsn);
- swq->psn_ext->flags_next_psn =
- cpu_to_le32(flg_npsn);
- } else {
- swq->psn_search->opcode_start_psn =
- cpu_to_le32(opcd_spsn);
- swq->psn_search->flags_next_psn =
- cpu_to_le32(flg_npsn);
- }
- }
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RC)
+ bnxt_qplib_fill_psn_search(qp, wqe, swq);
queue_err:
if (sch_handler) {
/* Store the ULP info in the software structures */
@@ -1918,13 +1861,13 @@ void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swqe *wqe)
{
- struct bnxt_qplib_q *rq = &qp->rq;
- struct rq_wqe *rqe, **rqe_ptr;
- struct sq_sge *hw_sge;
struct bnxt_qplib_nq_work *nq_work = NULL;
+ struct bnxt_qplib_q *rq = &qp->rq;
bool sch_handler = false;
- u32 sw_prod;
+ struct sq_sge *hw_sge;
+ struct rq_wqe *rqe;
int i, rc = 0;
+ u32 sw_prod;
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
sch_handler = true;
@@ -1941,10 +1884,8 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
rq->swq[sw_prod].wr_id = wqe->wr_id;
- rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
- rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
-
- memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ rqe = bnxt_qplib_get_qe(&rq->hwq, sw_prod, NULL);
+ memset(rqe, 0, rq->wqe_size);
/* Calculate wqe_size16 and data_len */
for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
@@ -1997,9 +1938,10 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct creq_create_cq_resp resp;
- struct cmdq_create_cq req;
struct bnxt_qplib_pbl *pbl;
+ struct cmdq_create_cq req;
u16 cmd_flags = 0;
+ u32 pg_sz_lvl;
int rc;
hwq_attr.res = res;
@@ -2020,22 +1962,13 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
}
req.dpi = cpu_to_le32(cq->dpi->dpi);
req.cq_handle = cpu_to_le64(cq->cq_handle);
-
req.cq_size = cpu_to_le32(cq->hwq.max_elements);
pbl = &cq->hwq.pbl[PBL_LVL_0];
- req.pg_size_lvl = cpu_to_le32(
- ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
- CMDQ_CREATE_CQ_LVL_SFT) |
- (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
- CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
-
+ pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
+ CMDQ_CREATE_CQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
+ req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
-
req.cq_fco_cnq_id = cpu_to_le32(
(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
CMDQ_CREATE_CQ_CNQ_ID_SFT);
@@ -2194,13 +2127,13 @@ void bnxt_qplib_mark_qp_error(void *qp_handle)
static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
{
- struct bnxt_qplib_q *sq = &qp->sq;
- struct bnxt_qplib_swq *swq;
u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
- struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
+ struct bnxt_qplib_q *sq = &qp->sq;
struct cq_req *peek_req_hwcqe;
struct bnxt_qplib_qp *peek_qp;
struct bnxt_qplib_q *peek_sq;
+ struct bnxt_qplib_swq *swq;
+ struct cq_base *peek_hwcqe;
int i, rc = 0;
/* Normal mode */
@@ -2230,9 +2163,8 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
i = cq->hwq.max_elements;
while (i--) {
peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
- peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
- [CQE_IDX(peek_sw_cq_cons)];
+ peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
+ peek_sw_cq_cons, NULL);
/* If the next hwcqe is VALID */
if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
cq->hwq.max_elements)) {
@@ -2294,11 +2226,11 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe **pcqe, int *budget,
u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
{
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *sq;
- struct bnxt_qplib_cqe *cqe;
u32 sw_sq_cons, cqe_sq_cons;
struct bnxt_qplib_swq *swq;
+ struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *sq;
int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -2408,10 +2340,10 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe **pcqe,
int *budget)
{
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *rq;
struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
u32 wr_id_idx;
int rc = 0;
@@ -2483,10 +2415,10 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe **pcqe,
int *budget)
{
- struct bnxt_qplib_qp *qp;
- struct bnxt_qplib_q *rq;
struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
u32 wr_id_idx;
int rc = 0;
@@ -2561,15 +2493,13 @@ done:
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
{
- struct cq_base *hw_cqe, **hw_cqe_ptr;
+ struct cq_base *hw_cqe;
u32 sw_cons, raw_cons;
bool rc = true;
raw_cons = cq->hwq.cons;
sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
-
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
/* Check for Valid bit. If the CQE is valid, return false */
rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
return rc;
@@ -2813,7 +2743,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num_cqes, struct bnxt_qplib_qp **lib_qp)
{
- struct cq_base *hw_cqe, **hw_cqe_ptr;
+ struct cq_base *hw_cqe;
u32 sw_cons, raw_cons;
int budget, rc = 0;
@@ -2822,8 +2752,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
while (budget) {
sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
- hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
- hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
/* Check for Valid bit */
if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 7edb70b6bb16..568ca390322c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -45,6 +45,7 @@ struct bnxt_qplib_srq {
struct bnxt_qplib_db_info dbinfo;
u64 srq_handle;
u32 id;
+ u16 wqe_size;
u32 max_wqe;
u32 max_sge;
u32 threshold;
@@ -65,38 +66,7 @@ struct bnxt_qplib_sge {
u32 size;
};
-#define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send)
-
-#define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE)
-#define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1)
-
-static inline u32 get_sqe_pg(u32 val)
-{
- return ((val & ~SQE_MAX_IDX_PER_PG) / SQE_CNT_PER_PG);
-}
-
-static inline u32 get_sqe_idx(u32 val)
-{
- return (val & SQE_MAX_IDX_PER_PG);
-}
-
-#define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search)
-
-#define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE)
-#define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1)
-
-static inline u32 get_psne_pg(u32 val)
-{
- return ((val & ~PSNE_MAX_IDX_PER_PG) / PSNE_CNT_PER_PG);
-}
-
-static inline u32 get_psne_idx(u32 val)
-{
- return (val & PSNE_MAX_IDX_PER_PG);
-}
-
#define BNXT_QPLIB_QP_MAX_SGL 6
-
struct bnxt_qplib_swq {
u64 wr_id;
int next_idx;
@@ -226,19 +196,13 @@ struct bnxt_qplib_swqe {
};
};
-#define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe)
-
-#define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE)
-#define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1)
-#define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG)
-#define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG)
-
struct bnxt_qplib_q {
struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq;
struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sg_info;
u32 max_wqe;
+ u16 wqe_size;
u16 q_full_delta;
u16 max_sge;
u32 psn;
@@ -256,7 +220,7 @@ struct bnxt_qplib_qp {
struct bnxt_qplib_dpi *dpi;
struct bnxt_qplib_chip_ctx *cctx;
u64 qp_handle;
-#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
+#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
u32 id;
u8 type;
u8 sig_type;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index f01e864bb611..4e211162acee 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -89,10 +89,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
struct creq_base *resp, void *sb, u8 is_block)
{
struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
- struct bnxt_qplib_cmdqe *cmdqe, **hwq_ptr;
struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
struct bnxt_qplib_crsqe *crsqe;
- u32 cmdq_depth = rcfw->cmdq_depth;
+ struct bnxt_qplib_cmdqe *cmdqe;
u32 sw_prod, cmdq_prod;
struct pci_dev *pdev;
unsigned long flags;
@@ -163,13 +162,11 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
BNXT_QPLIB_CMDQE_UNITS;
}
- hwq_ptr = (struct bnxt_qplib_cmdqe **)hwq->pbl_ptr;
preq = (u8 *)req;
do {
/* Locate the next cmdq slot */
sw_prod = HWQ_CMP(hwq->prod, hwq);
- cmdqe = &hwq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)]
- [get_cmdq_idx(sw_prod, cmdq_depth)];
+ cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
if (!cmdqe) {
dev_err(&pdev->dev,
"RCFW request failed with no cmdqe!\n");
@@ -378,7 +375,7 @@ static void bnxt_qplib_service_creq(unsigned long data)
struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
struct bnxt_qplib_hwq *hwq = &creq->hwq;
- struct creq_base *creqe, **hwq_ptr;
+ struct creq_base *creqe;
u32 sw_cons, raw_cons;
unsigned long flags;
@@ -387,8 +384,7 @@ static void bnxt_qplib_service_creq(unsigned long data)
raw_cons = hwq->cons;
while (budget > 0) {
sw_cons = HWQ_CMP(raw_cons, hwq);
- hwq_ptr = (struct creq_base **)hwq->pbl_ptr;
- creqe = &hwq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
+ creqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements))
break;
/* The valid test of the entry must be done first before
@@ -434,7 +430,6 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
{
struct bnxt_qplib_rcfw *rcfw = dev_instance;
struct bnxt_qplib_creq_ctx *creq;
- struct creq_base **creq_ptr;
struct bnxt_qplib_hwq *hwq;
u32 sw_cons;
@@ -442,8 +437,7 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
hwq = &creq->hwq;
/* Prefetch the CREQ element */
sw_cons = HWQ_CMP(hwq->cons, hwq);
- creq_ptr = (struct creq_base **)creq->hwq.pbl_ptr;
- prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
+ prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
tasklet_schedule(&creq->creq_tasklet);
@@ -468,29 +462,13 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
return 0;
}
-static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
-{
- return (pbl->pg_size == ROCE_PG_SIZE_4K ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
- pbl->pg_size == ROCE_PG_SIZE_8K ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
- pbl->pg_size == ROCE_PG_SIZE_64K ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
- pbl->pg_size == ROCE_PG_SIZE_2M ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
- pbl->pg_size == ROCE_PG_SIZE_8M ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
- pbl->pg_size == ROCE_PG_SIZE_1G ?
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
- CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
-}
-
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn)
{
- struct cmdq_initialize_fw req;
struct creq_initialize_fw_resp resp;
- u16 cmd_flags = 0, level;
+ struct cmdq_initialize_fw req;
+ u16 cmd_flags = 0;
+ u8 pgsz, lvl;
int rc;
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
@@ -511,32 +489,30 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
goto config_vf_res;
- level = ctx->qpc_tbl.level;
- req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
- level = ctx->mrw_tbl.level;
- req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
- level = ctx->srqc_tbl.level;
- req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
- level = ctx->cq_tbl.level;
- req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
- level = ctx->srqc_tbl.level;
- req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
- level = ctx->cq_tbl.level;
- req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
- level = ctx->tim_tbl.level;
- req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
- level = ctx->tqm_ctx.pde.level;
- req.tqm_pg_size_tqm_lvl =
- (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
- __get_pbl_pg_idx(&ctx->tqm_ctx.pde.pbl[level]);
-
+ lvl = ctx->qpc_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->qpc_tbl);
+ req.qpc_pg_size_qpc_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->mrw_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->mrw_tbl);
+ req.mrw_pg_size_mrw_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->srqc_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->srqc_tbl);
+ req.srq_pg_size_srq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->cq_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->cq_tbl);
+ req.cq_pg_size_cq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->tim_tbl.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->tim_tbl);
+ req.tim_pg_size_tim_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
+ lvl = ctx->tqm_ctx.pde.level;
+ pgsz = bnxt_qplib_base_pg_size(&ctx->tqm_ctx.pde);
+ req.tqm_pg_size_tqm_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ lvl;
req.qpc_page_dir =
cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
req.mrw_page_dir =
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 411fce3493b6..157387636d00 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -87,12 +87,6 @@ static inline u32 bnxt_qplib_cmdqe_page_size(u32 depth)
return (bnxt_qplib_cmdqe_npages(depth) * PAGE_SIZE);
}
-static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
-{
- return (bnxt_qplib_cmdqe_page_size(depth) /
- BNXT_QPLIB_CMDQE_UNITS);
-}
-
/* Set the cmd_size to a factor of CMDQE unit */
static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
{
@@ -100,30 +94,12 @@ static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
BNXT_QPLIB_CMDQE_UNITS;
}
-#define MAX_CMDQ_IDX(depth) ((depth) - 1)
-
-static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
-{
- return (bnxt_qplib_cmdqe_cnt_per_pg(depth) - 1);
-}
-
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000
#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
#define HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK 0x1000900020011ULL
-static inline u32 get_cmdq_pg(u32 val, u32 depth)
-{
- return (val & ~(bnxt_qplib_max_cmdq_idx_per_pg(depth))) /
- (bnxt_qplib_cmdqe_cnt_per_pg(depth));
-}
-
-static inline u32 get_cmdq_idx(u32 val, u32 depth)
-{
- return val & (bnxt_qplib_max_cmdq_idx_per_pg(depth));
-}
-
/* Crsq buf is 1024-Byte */
struct bnxt_qplib_crsbe {
u8 data[1024];
@@ -133,76 +109,9 @@ struct bnxt_qplib_crsbe {
/* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
-#define BNXT_QPLIB_CREQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CREQE_UNITS)
-
-#define MAX_CREQ_IDX (BNXT_QPLIB_CREQE_MAX_CNT - 1)
-#define MAX_CREQ_IDX_PER_PG (BNXT_QPLIB_CREQE_CNT_PER_PG - 1)
-
-static inline u32 get_creq_pg(u32 val)
-{
- return (val & ~MAX_CREQ_IDX_PER_PG) / BNXT_QPLIB_CREQE_CNT_PER_PG;
-}
-
-static inline u32 get_creq_idx(u32 val)
-{
- return val & MAX_CREQ_IDX_PER_PG;
-}
-
-#define BNXT_QPLIB_CREQE_PER_PG (PAGE_SIZE / sizeof(struct creq_base))
-
#define CREQ_CMP_VALID(hdr, raw_cons, cp_bit) \
(!!((hdr)->v & CREQ_BASE_V) == \
!((raw_cons) & (cp_bit)))
-
-#define CREQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
-#define CREQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
-#define CREQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
-#define CREQ_DB_CP_FLAGS_REARM (CREQ_DB_KEY_CP | \
- CREQ_DB_IDX_VALID)
-#define CREQ_DB_CP_FLAGS (CREQ_DB_KEY_CP | \
- CREQ_DB_IDX_VALID | \
- CREQ_DB_IRQ_DIS)
-
-static inline void bnxt_qplib_ring_creq_db64(void __iomem *db, u32 index,
- u32 xid, bool arm)
-{
- u64 val = 0;
-
- val = xid & DBC_DBC_XID_MASK;
- val |= DBC_DBC_PATH_ROCE;
- val |= arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
- val <<= 32;
- val |= index & DBC_DBC_INDEX_MASK;
-
- writeq(val, db);
-}
-
-static inline void bnxt_qplib_ring_creq_db_rearm(void __iomem *db, u32 raw_cons,
- u32 max_elements, u32 xid,
- bool gen_p5)
-{
- u32 index = raw_cons & (max_elements - 1);
-
- if (gen_p5)
- bnxt_qplib_ring_creq_db64(db, index, xid, true);
- else
- writel(CREQ_DB_CP_FLAGS_REARM | (index & DBC_DBC32_XID_MASK),
- db);
-}
-
-static inline void bnxt_qplib_ring_creq_db(void __iomem *db, u32 raw_cons,
- u32 max_elements, u32 xid,
- bool gen_p5)
-{
- u32 index = raw_cons & (max_elements - 1);
-
- if (gen_p5)
- bnxt_qplib_ring_creq_db64(db, index, xid, true);
- else
- writel(CREQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK),
- db);
-}
-
#define CREQ_ENTRY_POLL_BUDGET 0x100
/* HWQ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index cab1adf1fed9..7efa6e5dce62 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -347,6 +347,7 @@ done:
hwq->depth = hwq_attr->depth;
hwq->max_elements = depth;
hwq->element_size = stride;
+ hwq->qe_ppg = pg_size / stride;
/* For direct access to the elements */
lvl = hwq->level;
if (hwq_attr->sginfo->nopte && hwq->level)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 95b645dbbc2d..c29cbd3a2d7b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -80,6 +80,15 @@ enum bnxt_qplib_pbl_lvl {
#define ROCE_PG_SIZE_8M (8 * 1024 * 1024)
#define ROCE_PG_SIZE_1G (1024 * 1024 * 1024)
+enum bnxt_qplib_hwrm_pg_size {
+ BNXT_QPLIB_HWRM_PG_SIZE_4K = 0,
+ BNXT_QPLIB_HWRM_PG_SIZE_8K = 1,
+ BNXT_QPLIB_HWRM_PG_SIZE_64K = 2,
+ BNXT_QPLIB_HWRM_PG_SIZE_2M = 3,
+ BNXT_QPLIB_HWRM_PG_SIZE_8M = 4,
+ BNXT_QPLIB_HWRM_PG_SIZE_1G = 5,
+};
+
struct bnxt_qplib_reg_desc {
u8 bar_id;
resource_size_t bar_base;
@@ -126,6 +135,7 @@ struct bnxt_qplib_hwq {
u32 max_elements;
u32 depth;
u16 element_size; /* Size of each entry */
+ u16 qe_ppg; /* queue entry per page */
u32 prod; /* raw */
u32 cons; /* raw */
@@ -263,6 +273,49 @@ static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
}
+static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq)
+{
+ u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ struct bnxt_qplib_pbl *pbl;
+
+ pbl = &hwq->pbl[PBL_LVL_0];
+ switch (pbl->pg_size) {
+ case ROCE_PG_SIZE_4K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ break;
+ case ROCE_PG_SIZE_8K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K;
+ break;
+ case ROCE_PG_SIZE_64K:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K;
+ break;
+ case ROCE_PG_SIZE_2M:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M;
+ break;
+ case ROCE_PG_SIZE_8M:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M;
+ break;
+ case ROCE_PG_SIZE_1G:
+ pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G;
+ break;
+ default:
+ break;
+ }
+
+ return pg_size;
+}
+
+static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq,
+ u32 indx, u64 *pg)
+{
+ u32 pg_num, pg_idx;
+
+ pg_num = (indx / hwq->qe_ppg);
+ pg_idx = (indx % hwq->qe_ppg);
+ if (pg)
+ *pg = (u64)&hwq->pbl_ptr[pg_num];
+ return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
+}
#define to_bnxt_qplib(ptr, type, member) \
container_of(ptr, type, member)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 66954ff6a2f2..4cd475ea97a2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -132,9 +132,6 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
attr->max_ah = le32_to_cpu(sb->max_ah);
- attr->max_fmr = le32_to_cpu(sb->max_fmr);
- attr->max_map_per_fmr = sb->max_map_per_fmr;
-
attr->max_srq = le16_to_cpu(sb->max_srq);
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 13d9432d5ce2..6404f0da1051 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -64,8 +64,6 @@ struct bnxt_qplib_dev_attr {
u32 max_mw;
u32 max_raw_ethy_qp;
u32 max_ah;
- u32 max_fmr;
- u32 max_map_per_fmr;
u32 max_srq;
u32 max_srq_wqes;
u32 max_srq_sges;
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index e4b09e7c2175..6f00f07420b7 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -210,6 +210,20 @@ struct sq_send {
__le32 data[24];
};
+/* sq_send_hdr (size:256b/32B) */
+struct sq_send_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8_1;
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 q_key;
+ __le32 dst_qp;
+ __le32 avid;
+ __le64 reserved64;
+};
+
/* Send Raw Ethernet and QP1 SQ WQE (40 bytes) */
struct sq_send_raweth_qp1 {
u8 wqe_type;
@@ -265,6 +279,21 @@ struct sq_send_raweth_qp1 {
__le32 data[24];
};
+/* sq_send_raweth_qp1_hdr (size:256b/32B) */
+struct sq_send_raweth_qp1_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le16 lflags;
+ __le16 cfa_action;
+ __le32 length;
+ __le32 reserved32_1;
+ __le32 cfa_meta;
+ __le32 reserved32_2;
+ __le64 reserved64;
+};
+
/* RDMA SQ WQE (40 bytes) */
struct sq_rdma {
u8 wqe_type;
@@ -288,6 +317,20 @@ struct sq_rdma {
__le32 data[24];
};
+/* sq_rdma_hdr (size:256b/32B) */
+struct sq_rdma_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 imm_data;
+ __le32 length;
+ __le32 reserved32_1;
+ __le64 remote_va;
+ __le32 remote_key;
+ __le32 reserved32_2;
+};
+
/* Atomic SQ WQE (40 bytes) */
struct sq_atomic {
u8 wqe_type;
@@ -307,6 +350,17 @@ struct sq_atomic {
__le32 data[24];
};
+/* sq_atomic_hdr (size:256b/32B) */
+struct sq_atomic_hdr {
+ u8 wqe_type;
+ u8 flags;
+ __le16 reserved16;
+ __le32 remote_key;
+ __le64 remote_va;
+ __le64 swap_data;
+ __le64 cmp_data;
+};
+
/* Local Invalidate SQ WQE (40 bytes) */
struct sq_localinvalidate {
u8 wqe_type;
@@ -324,6 +378,16 @@ struct sq_localinvalidate {
__le32 data[24];
};
+/* sq_localinvalidate_hdr (size:256b/32B) */
+struct sq_localinvalidate_hdr {
+ u8 wqe_type;
+ u8 flags;
+ __le16 reserved16;
+ __le32 inv_l_key;
+ __le64 reserved64;
+ u8 reserved128[16];
+};
+
/* FR-PMR SQ WQE (40 bytes) */
struct sq_fr_pmr {
u8 wqe_type;
@@ -380,6 +444,21 @@ struct sq_fr_pmr {
__le32 data[24];
};
+/* sq_fr_pmr_hdr (size:256b/32B) */
+struct sq_fr_pmr_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 access_cntl;
+ u8 zero_based_page_size_log;
+ __le32 l_key;
+ u8 length[5];
+ u8 reserved8_1;
+ u8 reserved8_2;
+ u8 numlevels_pbl_page_size_log;
+ __le64 pblptr;
+ __le64 va;
+};
+
/* Bind SQ WQE (40 bytes) */
struct sq_bind {
u8 wqe_type;
@@ -417,6 +496,22 @@ struct sq_bind {
#define SQ_BIND_DATA_SFT 0
};
+/* sq_bind_hdr (size:256b/32B) */
+struct sq_bind_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 access_cntl;
+ u8 reserved8_1;
+ u8 mw_type_zero_based;
+ u8 reserved8_2;
+ __le16 reserved16;
+ __le32 parent_l_key;
+ __le32 l_key;
+ __le64 va;
+ u8 length[5];
+ u8 reserved24[3];
+};
+
/* RQ/SRQ WQE Structures */
/* RQ/SRQ WQE (40 bytes) */
struct rq_wqe {
@@ -435,6 +530,17 @@ struct rq_wqe {
__le32 data[24];
};
+/* rq_wqe_hdr (size:256b/32B) */
+struct rq_wqe_hdr {
+ u8 wqe_type;
+ u8 flags;
+ u8 wqe_size;
+ u8 reserved8;
+ __le32 reserved32;
+ __le32 wr_id[2];
+ u8 reserved128[16];
+};
+
/* CQ CQE Structures */
/* Base CQE (32 bytes) */
struct cq_base {
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 599340c1f0b8..541dbcf22d0e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -953,6 +953,7 @@ void c4iw_dealloc(struct uld_ctx *ctx)
static void c4iw_remove(struct uld_ctx *ctx)
{
pr_debug("c4iw_dev %p\n", ctx->dev);
+ debugfs_remove_recursive(ctx->dev->debugfs_root);
c4iw_unregister_device(ctx->dev);
c4iw_dealloc(ctx);
}
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index aa7396a1588a..1889dd172a25 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_H_
@@ -40,6 +40,7 @@ struct efa_sw_stats {
atomic64_t reg_mr_err;
atomic64_t alloc_ucontext_err;
atomic64_t create_ah_err;
+ atomic64_t mmap_err;
};
/* Don't use anything other than atomic64 */
@@ -153,8 +154,7 @@ int efa_mmap(struct ib_ucontext *ibucontext,
struct vm_area_struct *vma);
void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int efa_create_ah(struct ib_ah *ibah,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
+ struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void efa_destroy_ah(struct ib_ah *ibah, u32 flags);
int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 96b104ab5415..bef2bd291054 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -37,7 +37,7 @@ enum efa_admin_aq_feature_id {
EFA_ADMIN_NETWORK_ATTR = 3,
EFA_ADMIN_QUEUE_ATTR = 4,
EFA_ADMIN_HW_HINTS = 5,
- EFA_ADMIN_FEATURES_OPCODE_NUM = 8,
+ EFA_ADMIN_HOST_INFO = 6,
};
/* QP transport type */
@@ -799,6 +799,54 @@ struct efa_admin_mmio_req_read_less_resp {
u32 reg_val;
};
+enum efa_admin_os_type {
+ EFA_ADMIN_OS_LINUX = 0,
+};
+
+struct efa_admin_host_info {
+ /* OS distribution string format */
+ u8 os_dist_str[128];
+
+ /* Defined in enum efa_admin_os_type */
+ u32 os_type;
+
+ /* Kernel version string format */
+ u8 kernel_ver_str[32];
+
+ /* Kernel version numeric format */
+ u32 kernel_ver;
+
+ /*
+ * 7:0 : driver_module_type
+ * 15:8 : driver_sub_minor
+ * 23:16 : driver_minor
+ * 31:24 : driver_major
+ */
+ u32 driver_ver;
+
+ /*
+ * Device's Bus, Device and Function
+ * 2:0 : function
+ * 7:3 : device
+ * 15:8 : bus
+ */
+ u16 bdf;
+
+ /*
+ * Spec version
+ * 7:0 : spec_minor
+ * 15:8 : spec_major
+ */
+ u16 spec_ver;
+
+ /*
+ * 0 : intree - Intree driver
+ * 1 : gdr - GPUDirect RDMA supported
+ * 31:2 : reserved2
+ */
+ u32 flags;
+};
+
/* create_qp_cmd */
#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
@@ -820,4 +868,17 @@ struct efa_admin_mmio_req_read_less_resp {
/* feature_device_attr_desc */
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
+/* host_info */
+#define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK GENMASK(7, 0)
+#define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_DRIVER_MINOR_MASK GENMASK(23, 16)
+#define EFA_ADMIN_HOST_INFO_DRIVER_MAJOR_MASK GENMASK(31, 24)
+#define EFA_ADMIN_HOST_INFO_FUNCTION_MASK GENMASK(2, 0)
+#define EFA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
+#define EFA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_SPEC_MINOR_MASK GENMASK(7, 0)
+#define EFA_ADMIN_HOST_INFO_SPEC_MAJOR_MASK GENMASK(15, 8)
+#define EFA_ADMIN_HOST_INFO_INTREE_MASK BIT(0)
+#define EFA_ADMIN_HOST_INFO_GDR_MASK BIT(1)
+
#endif /* _EFA_ADMIN_CMDS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 7fce69f5568f..336bc2c57bb1 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -631,17 +631,20 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
up(&aq->avail_cmds);
+ atomic64_inc(&aq->stats.cmd_err);
return PTR_ERR(comp_ctx);
}
err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
- if (err)
+ if (err) {
ibdev_err_ratelimited(
aq->efa_dev,
"Failed to process command %s (opcode %u) comp_status %d err %d\n",
efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
cmd->aq_common_descriptor.opcode, comp_ctx->comp_status,
err);
+ atomic64_inc(&aq->stats.cmd_err);
+ }
up(&aq->avail_cmds);
diff --git a/drivers/infiniband/hw/efa/efa_com.h b/drivers/infiniband/hw/efa/efa_com.h
index c67dd8109d1c..5e4c88877ddb 100644
--- a/drivers/infiniband/hw/efa/efa_com.h
+++ b/drivers/infiniband/hw/efa/efa_com.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_H_
@@ -47,6 +47,7 @@ struct efa_com_admin_sq {
struct efa_com_stats_admin {
atomic64_t submitted_cmd;
atomic64_t completed_cmd;
+ atomic64_t cmd_err;
atomic64_t no_completion;
};
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index eea5574a62e8..fabd8df2e78f 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -351,7 +351,7 @@ int efa_com_destroy_ah(struct efa_com_dev *edev,
return 0;
}
-static bool
+bool
efa_com_check_supported_feature_id(struct efa_com_dev *edev,
enum efa_admin_aq_feature_id feature_id)
{
@@ -388,7 +388,7 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
if (control_buff_size)
EFA_SET(&get_cmd.aq_common_descriptor.flags,
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&get_cmd.control_buffer.address.mem_addr_high,
@@ -517,12 +517,12 @@ int efa_com_get_hw_hints(struct efa_com_dev *edev,
return 0;
}
-static int efa_com_set_feature_ex(struct efa_com_dev *edev,
- struct efa_admin_set_feature_resp *set_resp,
- struct efa_admin_set_feature_cmd *set_cmd,
- enum efa_admin_aq_feature_id feature_id,
- dma_addr_t control_buf_dma_addr,
- u32 control_buff_size)
+int efa_com_set_feature_ex(struct efa_com_dev *edev,
+ struct efa_admin_set_feature_resp *set_resp,
+ struct efa_admin_set_feature_cmd *set_cmd,
+ enum efa_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size)
{
struct efa_com_admin_queue *aq;
int err;
@@ -540,7 +540,7 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
if (control_buff_size) {
set_cmd->aq_common_descriptor.flags = 0;
EFA_SET(&set_cmd->aq_common_descriptor.flags,
- EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
+ EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&set_cmd->control_buffer.address.mem_addr_high,
&set_cmd->control_buffer.address.mem_addr_low);
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 31db5a0cbd5b..41ce4a476ee6 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_CMD_H_
@@ -270,6 +270,15 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result);
int efa_com_get_hw_hints(struct efa_com_dev *edev,
struct efa_com_get_hw_hints_result *result);
+bool
+efa_com_check_supported_feature_id(struct efa_com_dev *edev,
+ enum efa_admin_aq_feature_id feature_id);
+int efa_com_set_feature_ex(struct efa_com_dev *edev,
+ struct efa_admin_set_feature_resp *set_resp,
+ struct efa_admin_set_feature_cmd *set_cmd,
+ enum efa_admin_aq_feature_id feature_id,
+ dma_addr_t control_buf_dma_addr,
+ u32 control_buff_size);
int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups);
int efa_com_alloc_pd(struct efa_com_dev *edev,
struct efa_com_alloc_pd_result *result);
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index faf3ff1bca2a..82145574c928 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
#include <rdma/ib_user_verbs.h>
@@ -187,6 +189,52 @@ static void efa_stats_init(struct efa_dev *dev)
atomic64_set(s, 0);
}
+static void efa_set_host_info(struct efa_dev *dev)
+{
+ struct efa_admin_set_feature_resp resp = {};
+ struct efa_admin_set_feature_cmd cmd = {};
+ struct efa_admin_host_info *hinf;
+ u32 bufsz = sizeof(*hinf);
+ dma_addr_t hinf_dma;
+
+ if (!efa_com_check_supported_feature_id(&dev->edev,
+ EFA_ADMIN_HOST_INFO))
+ return;
+
+ /* Failures in host info set shall not disturb probe */
+ hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
+ GFP_KERNEL);
+ if (!hinf)
+ return;
+
+ strlcpy(hinf->os_dist_str, utsname()->release,
+ min(sizeof(hinf->os_dist_str), sizeof(utsname()->release)));
+ hinf->os_type = EFA_ADMIN_OS_LINUX;
+ strlcpy(hinf->kernel_ver_str, utsname()->version,
+ min(sizeof(hinf->kernel_ver_str), sizeof(utsname()->version)));
+ hinf->kernel_ver = LINUX_VERSION_CODE;
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
+ EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
+ PCI_SLOT(dev->pdev->devfn));
+ EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
+ PCI_FUNC(dev->pdev->devfn));
+ EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
+ EFA_COMMON_SPEC_VERSION_MAJOR);
+ EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
+ EFA_COMMON_SPEC_VERSION_MINOR);
+ EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
+ EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
+
+ efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
+ hinf_dma, bufsz);
+
+ dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
+}
+
static const struct ib_device_ops efa_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_EFA,
@@ -251,6 +299,8 @@ static int efa_ib_device_add(struct efa_dev *dev)
if (err)
goto err_release_doorbell_bar;
+ efa_set_host_info(dev);
+
dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = 1;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 5c57098a4aee..08313f7c73bc 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -37,13 +37,16 @@ struct efa_user_mmap_entry {
op(EFA_RX_DROPS, "rx_drops") \
op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
op(EFA_COMPLETED_CMDS, "completed_cmds") \
+ op(EFA_CMDS_ERR, "cmds_err") \
op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
op(EFA_CREATE_QP_ERR, "create_qp_err") \
+ op(EFA_CREATE_CQ_ERR, "create_cq_err") \
op(EFA_REG_MR_ERR, "reg_mr_err") \
op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
- op(EFA_CREATE_AH_ERR, "create_ah_err")
+ op(EFA_CREATE_AH_ERR, "create_ah_err") \
+ op(EFA_MMAP_ERR, "mmap_err")
#define EFA_STATS_ENUM(ename, name) ename,
#define EFA_STATS_STR(ename, name) [ename] = name,
@@ -1568,6 +1571,7 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
ibdev_dbg(&dev->ibdev,
"pgoff[%#lx] does not have valid entry\n",
vma->vm_pgoff);
+ atomic64_inc(&dev->stats.sw_stats.mmap_err);
return -EINVAL;
}
entry = to_emmap(rdma_entry);
@@ -1603,12 +1607,14 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
err = -EINVAL;
}
- if (err)
+ if (err) {
ibdev_dbg(
&dev->ibdev,
"Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
entry->address, rdma_entry->npages * PAGE_SIZE,
entry->mmap_flag, err);
+ atomic64_inc(&dev->stats.sw_stats.mmap_err);
+ }
rdma_user_mmap_entry_put(rdma_entry);
return err;
@@ -1639,10 +1645,10 @@ static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
}
int efa_create_ah(struct ib_ah *ibah,
- struct rdma_ah_attr *ah_attr,
- u32 flags,
+ struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
struct efa_dev *dev = to_edev(ibah->device);
struct efa_com_create_ah_params params = {};
struct efa_ibv_create_ah_resp resp = {};
@@ -1650,7 +1656,7 @@ int efa_create_ah(struct ib_ah *ibah,
struct efa_ah *ah = to_eah(ibah);
int err;
- if (!(flags & RDMA_CREATE_AH_SLEEPABLE)) {
+ if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
ibdev_dbg(&dev->ibdev,
"Create address handle is not supported in atomic context\n");
err = -EOPNOTSUPP;
@@ -1747,15 +1753,18 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
as = &dev->edev.aq.stats;
stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
+ stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
s = &dev->stats;
stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->sw_stats.alloc_pd_err);
stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->sw_stats.create_qp_err);
+ stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->sw_stats.create_cq_err);
stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->sw_stats.reg_mr_err);
stats->value[EFA_ALLOC_UCONTEXT_ERR] = atomic64_read(&s->sw_stats.alloc_ucontext_err);
stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->sw_stats.create_ah_err);
+ stats->value[EFA_MMAP_ERR] = atomic64_read(&s->sw_stats.mmap_err);
return ARRAY_SIZE(efa_stats_names);
}
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 0405d26d0833..2e89ec10efed 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -22,9 +22,13 @@ hfi1-y := \
init.o \
intr.o \
iowait.o \
+ ipoib_main.o \
+ ipoib_rx.o \
+ ipoib_tx.o \
mad.o \
mmu_rb.o \
msix.o \
+ netdev_rx.o \
opfn.o \
pcie.o \
pio.o \
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 1aeea5d65c01..2a91b8d95e12 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -64,6 +64,7 @@ struct hfi1_affinity_node_list node_affinity = {
static const char * const irq_type_names[] = {
"SDMA",
"RCVCTXT",
+ "NETDEVCTXT",
"GENERAL",
"OTHER",
};
@@ -915,6 +916,11 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
set = &entry->rcv_intr;
scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
break;
+ case IRQ_NETDEVCTXT:
+ rcd = (struct hfi1_ctxtdata *)msix->arg;
+ set = &entry->def_intr;
+ scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
+ break;
default:
dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
return -EINVAL;
@@ -987,6 +993,10 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
if (rcd->ctxt != HFI1_CTRL_CTXT)
set = &entry->rcv_intr;
break;
+ case IRQ_NETDEVCTXT:
+ rcd = (struct hfi1_ctxtdata *)msix->arg;
+ set = &entry->def_intr;
+ break;
default:
mutex_unlock(&node_affinity.lock);
return;
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 6a7e6ea4e426..f94ed5d7c7a3 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -52,6 +52,7 @@
enum irq_type {
IRQ_SDMA,
IRQ_RCVCTXT,
+ IRQ_NETDEVCTXT,
IRQ_GENERAL,
IRQ_OTHER
};
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e0b1238d31df..15f9c635f292 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -66,10 +66,7 @@
#include "affinity.h"
#include "debugfs.h"
#include "fault.h"
-
-uint kdeth_qp;
-module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
-MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
+#include "netdev.h"
uint num_vls = HFI1_MAX_VLS_SUPPORTED;
module_param(num_vls, uint, S_IRUGO);
@@ -128,13 +125,15 @@ struct flag_table {
/*
* RSM instance allocation
- * 0 - Verbs
- * 1 - User Fecn Handling
- * 2 - Vnic
+ * 0 - User Fecn Handling
+ * 1 - Vnic
+ * 2 - AIP
+ * 3 - Verbs
*/
-#define RSM_INS_VERBS 0
-#define RSM_INS_FECN 1
-#define RSM_INS_VNIC 2
+#define RSM_INS_FECN 0
+#define RSM_INS_VNIC 1
+#define RSM_INS_AIP 2
+#define RSM_INS_VERBS 3
/* Bit offset into the GUID which carries HFI id information */
#define GUID_HFI_INDEX_SHIFT 39
@@ -175,6 +174,25 @@ struct flag_table {
/* QPN[m+n:1] QW 1, OFFSET 1 */
#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
+/* RSM fields for AIP */
+/* LRH.BTH above is reused for this rule */
+
+/* BTH.DESTQP: QW 1, OFFSET 16 for match */
+#define BTH_DESTQP_QW 1ull
+#define BTH_DESTQP_BIT_OFFSET 16ull
+#define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off))
+#define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET)
+#define BTH_DESTQP_MASK 0xFFull
+#define BTH_DESTQP_VALUE 0x81ull
+
+/* DETH.SQPN: QW 1 Offset 56 for select */
+/* We use 8 most significant Soure QPN bits as entropy fpr AIP */
+#define DETH_AIP_SQPN_QW 3ull
+#define DETH_AIP_SQPN_BIT_OFFSET 56ull
+#define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off))
+#define DETH_AIP_SQPN_SELECT_OFFSET \
+ DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET)
+
/* RSM fields for Vnic */
/* L2_TYPE: QW 0, OFFSET 61 - for match */
#define L2_TYPE_QW 0ull
@@ -8463,6 +8481,49 @@ static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
local_irq_restore(flags);
}
+/**
+ * hfi1_netdev_rx_napi - napi poll function to move eoi inline
+ * @napi - pointer to napi object
+ * @budget - netdev budget
+ */
+int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct hfi1_netdev_rxq *rxq = container_of(napi,
+ struct hfi1_netdev_rxq, napi);
+ struct hfi1_ctxtdata *rcd = rxq->rcd;
+ int work_done = 0;
+
+ work_done = rcd->do_interrupt(rcd, budget);
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ hfi1_rcd_eoi_intr(rcd);
+ }
+
+ return work_done;
+}
+
+/* Receive packet napi handler for netdevs VNIC and AIP */
+irqreturn_t receive_context_interrupt_napi(int irq, void *data)
+{
+ struct hfi1_ctxtdata *rcd = data;
+
+ receive_interrupt_common(rcd);
+
+ if (likely(rcd->napi)) {
+ if (likely(napi_schedule_prep(rcd->napi)))
+ __napi_schedule_irqoff(rcd->napi);
+ else
+ __hfi1_rcd_eoi_intr(rcd);
+ } else {
+ WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n",
+ rcd->ctxt);
+ __hfi1_rcd_eoi_intr(rcd);
+ }
+
+ return IRQ_HANDLED;
+}
+
/*
* Receive packet IRQ handler. This routine expects to be on its own IRQ.
* This routine will try to handle packets immediately (latency), but if
@@ -13330,13 +13391,12 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
* in array of contexts
* freectxts - number of free user contexts
* num_send_contexts - number of PIO send contexts being used
- * num_vnic_contexts - number of contexts reserved for VNIC
+ * num_netdev_contexts - number of contexts reserved for netdev
*/
static int set_up_context_variables(struct hfi1_devdata *dd)
{
unsigned long num_kernel_contexts;
- u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
- int total_contexts;
+ u16 num_netdev_contexts;
int ret;
unsigned ngroups;
int rmt_count;
@@ -13373,13 +13433,6 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
num_kernel_contexts = send_contexts - num_vls - 1;
}
- /* Accommodate VNIC contexts if possible */
- if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
- dd_dev_err(dd, "No receive contexts available for VNIC\n");
- num_vnic_contexts = 0;
- }
- total_contexts = num_kernel_contexts + num_vnic_contexts;
-
/*
* User contexts:
* - default to 1 user context per real (non-HT) CPU core if
@@ -13392,28 +13445,32 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
/*
* Adjust the counts given a global max.
*/
- if (total_contexts + n_usr_ctxts > rcv_contexts) {
+ if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) {
dd_dev_err(dd,
- "Reducing # user receive contexts to: %d, from %u\n",
- rcv_contexts - total_contexts,
+ "Reducing # user receive contexts to: %u, from %u\n",
+ (u32)(rcv_contexts - num_kernel_contexts),
n_usr_ctxts);
/* recalculate */
- n_usr_ctxts = rcv_contexts - total_contexts;
+ n_usr_ctxts = rcv_contexts - num_kernel_contexts;
}
+ num_netdev_contexts =
+ hfi1_num_netdev_contexts(dd, rcv_contexts -
+ (num_kernel_contexts + n_usr_ctxts),
+ &node_affinity.real_cpu_mask);
/*
* The RMT entries are currently allocated as shown below:
* 1. QOS (0 to 128 entries);
* 2. FECN (num_kernel_context - 1 + num_user_contexts +
- * num_vnic_contexts);
- * 3. VNIC (num_vnic_contexts).
- * It should be noted that FECN oversubscribe num_vnic_contexts
- * entries of RMT because both VNIC and PSM could allocate any receive
+ * num_netdev_contexts);
+ * 3. netdev (num_netdev_contexts).
+ * It should be noted that FECN oversubscribe num_netdev_contexts
+ * entries of RMT because both netdev and PSM could allocate any receive
* context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
* and PSM FECN must reserve an RMT entry for each possible PSM receive
* context.
*/
- rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+ rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2);
if (HFI1_CAP_IS_KSET(TID_RDMA))
rmt_count += num_kernel_contexts - 1;
if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
@@ -13426,21 +13483,20 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
n_usr_ctxts = user_rmt_reduced;
}
- total_contexts += n_usr_ctxts;
-
- /* the first N are kernel contexts, the rest are user/vnic contexts */
- dd->num_rcv_contexts = total_contexts;
+ /* the first N are kernel contexts, the rest are user/netdev contexts */
+ dd->num_rcv_contexts =
+ num_kernel_contexts + n_usr_ctxts + num_netdev_contexts;
dd->n_krcv_queues = num_kernel_contexts;
dd->first_dyn_alloc_ctxt = num_kernel_contexts;
- dd->num_vnic_contexts = num_vnic_contexts;
+ dd->num_netdev_contexts = num_netdev_contexts;
dd->num_user_contexts = n_usr_ctxts;
dd->freectxts = n_usr_ctxts;
dd_dev_info(dd,
- "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
+ "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n",
rcv_contexts,
(int)dd->num_rcv_contexts,
(int)dd->n_krcv_queues,
- dd->num_vnic_contexts,
+ dd->num_netdev_contexts,
dd->num_user_contexts);
/*
@@ -14119,21 +14175,12 @@ static void init_early_variables(struct hfi1_devdata *dd)
static void init_kdeth_qp(struct hfi1_devdata *dd)
{
- /* user changed the KDETH_QP */
- if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
- /* out of range or illegal value */
- dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
- kdeth_qp = 0;
- }
- if (kdeth_qp == 0) /* not set, or failed range check */
- kdeth_qp = DEFAULT_KDETH_QP;
-
write_csr(dd, SEND_BTH_QP,
- (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
+ (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) <<
SEND_BTH_QP_KDETH_QP_SHIFT);
write_csr(dd, RCV_BTH_QP,
- (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
+ (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) <<
RCV_BTH_QP_KDETH_QP_SHIFT);
}
@@ -14249,6 +14296,12 @@ static void complete_rsm_map_table(struct hfi1_devdata *dd,
}
}
+/* Is a receive side mapping rule */
+static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
+{
+ return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0;
+}
+
/*
* Add a receive side mapping rule.
*/
@@ -14485,77 +14538,138 @@ static void init_fecn_handling(struct hfi1_devdata *dd,
rmt->used += total_cnt;
}
-/* Initialize RSM for VNIC */
-void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
+static inline bool hfi1_is_rmt_full(int start, int spare)
+{
+ return (start + spare) > NUM_MAP_ENTRIES;
+}
+
+static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd)
{
u8 i, j;
u8 ctx_id = 0;
u64 reg;
u32 regoff;
- struct rsm_rule_data rrd;
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ int ctxt_count = hfi1_netdev_ctxt_count(dd);
- if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
- dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
- dd->vnic.rmt_start);
- return;
+ /* We already have contexts mapped in RMT */
+ if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) {
+ dd_dev_info(dd, "Contexts are already mapped in RMT\n");
+ return true;
+ }
+
+ if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) {
+ dd_dev_err(dd, "Not enough RMT entries used = %d\n",
+ rmt_start);
+ return false;
}
- dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
- dd->vnic.rmt_start,
- dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
+ dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n",
+ rmt_start,
+ rmt_start + NUM_NETDEV_MAP_ENTRIES);
/* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
- regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
+ regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8;
reg = read_csr(dd, regoff);
- for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
- /* Update map register with vnic context */
- j = (dd->vnic.rmt_start + i) % 8;
+ for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) {
+ /* Update map register with netdev context */
+ j = (rmt_start + i) % 8;
reg &= ~(0xffllu << (j * 8));
- reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
- /* Wrap up vnic ctx index */
- ctx_id %= dd->vnic.num_ctxt;
+ reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8);
+ /* Wrap up netdev ctx index */
+ ctx_id %= ctxt_count;
/* Write back map register */
- if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
+ if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) {
dev_dbg(&(dd)->pcidev->dev,
- "Vnic rsm map reg[%d] =0x%llx\n",
+ "RMT[%d] =0x%llx\n",
regoff - RCV_RSM_MAP_TABLE, reg);
write_csr(dd, regoff, reg);
regoff += 8;
- if (i < (NUM_VNIC_MAP_ENTRIES - 1))
+ if (i < (NUM_NETDEV_MAP_ENTRIES - 1))
reg = read_csr(dd, regoff);
}
}
- /* Add rule for vnic */
- rrd.offset = dd->vnic.rmt_start;
- rrd.pkt_type = 4;
- /* Match 16B packets */
- rrd.field1_off = L2_TYPE_MATCH_OFFSET;
- rrd.mask1 = L2_TYPE_MASK;
- rrd.value1 = L2_16B_VALUE;
- /* Match ETH L4 packets */
- rrd.field2_off = L4_TYPE_MATCH_OFFSET;
- rrd.mask2 = L4_16B_TYPE_MASK;
- rrd.value2 = L4_16B_ETH_VALUE;
- /* Calc context from veswid and entropy */
- rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
- rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
- rrd.index2_off = L2_16B_ENTROPY_OFFSET;
- rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
- add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
-
- /* Enable RSM if not already enabled */
+ return true;
+}
+
+static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd,
+ int rule, struct rsm_rule_data *rrd)
+{
+ if (!hfi1_netdev_update_rmt(dd)) {
+ dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule);
+ return;
+ }
+
+ add_rsm_rule(dd, rule, rrd);
add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
}
+void hfi1_init_aip_rsm(struct hfi1_devdata *dd)
+{
+ /*
+ * go through with the initialisation only if this rule actually doesn't
+ * exist yet
+ */
+ if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) {
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ struct rsm_rule_data rrd = {
+ .offset = rmt_start,
+ .pkt_type = IB_PACKET_TYPE,
+ .field1_off = LRH_BTH_MATCH_OFFSET,
+ .mask1 = LRH_BTH_MASK,
+ .value1 = LRH_BTH_VALUE,
+ .field2_off = BTH_DESTQP_MATCH_OFFSET,
+ .mask2 = BTH_DESTQP_MASK,
+ .value2 = BTH_DESTQP_VALUE,
+ .index1_off = DETH_AIP_SQPN_SELECT_OFFSET +
+ ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index2_off = DETH_AIP_SQPN_SELECT_OFFSET,
+ .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
+ };
+
+ hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd);
+ }
+}
+
+/* Initialize RSM for VNIC */
+void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
+{
+ int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
+ struct rsm_rule_data rrd = {
+ /* Add rule for vnic */
+ .offset = rmt_start,
+ .pkt_type = 4,
+ /* Match 16B packets */
+ .field1_off = L2_TYPE_MATCH_OFFSET,
+ .mask1 = L2_TYPE_MASK,
+ .value1 = L2_16B_VALUE,
+ /* Match ETH L4 packets */
+ .field2_off = L4_TYPE_MATCH_OFFSET,
+ .mask2 = L4_16B_TYPE_MASK,
+ .value2 = L4_16B_ETH_VALUE,
+ /* Calc context from veswid and entropy */
+ .index1_off = L4_16B_HDR_VESWID_OFFSET,
+ .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
+ .index2_off = L2_16B_ENTROPY_OFFSET,
+ .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
+ };
+
+ hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd);
+}
+
void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
{
clear_rsm_rule(dd, RSM_INS_VNIC);
+}
- /* Disable RSM if used only by vnic */
- if (dd->vnic.rmt_start == 0)
- clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
+void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd)
+{
+ /* only actually clear the rule if it's the last user asking to do so */
+ if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1)
+ clear_rsm_rule(dd, RSM_INS_AIP);
}
static int init_rxe(struct hfi1_devdata *dd)
@@ -14574,8 +14688,8 @@ static int init_rxe(struct hfi1_devdata *dd)
init_qos(dd, rmt);
init_fecn_handling(dd, rmt);
complete_rsm_map_table(dd, rmt);
- /* record number of used rsm map entries for vnic */
- dd->vnic.rmt_start = rmt->used;
+ /* record number of used rsm map entries for netdev */
+ hfi1_netdev_set_free_rmt_idx(dd, rmt->used);
kfree(rmt);
/*
@@ -15129,6 +15243,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
(dd->revision >> CCE_REVISION_SW_SHIFT)
& CCE_REVISION_SW_MASK);
+ /* alloc netdev data */
+ if (hfi1_netdev_alloc(dd))
+ goto bail_cleanup;
+
ret = set_up_context_variables(dd);
if (ret)
goto bail_cleanup;
@@ -15229,6 +15347,7 @@ bail_clear_intr:
hfi1_comp_vectors_clean_up(dd);
msix_clean_up_interrupts(dd);
bail_cleanup:
+ hfi1_netdev_free(dd);
hfi1_pcie_ddcleanup(dd);
bail_free:
hfi1_free_devdata(dd);
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 725509261016..2c6f2de74d4d 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -1,7 +1,7 @@
#ifndef _CHIP_H
#define _CHIP_H
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -1447,6 +1447,7 @@ irqreturn_t general_interrupt(int irq, void *data);
irqreturn_t sdma_interrupt(int irq, void *data);
irqreturn_t receive_context_interrupt(int irq, void *data);
irqreturn_t receive_context_thread(int irq, void *data);
+irqreturn_t receive_context_interrupt_napi(int irq, void *data);
int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set);
void init_qsfp_int(struct hfi1_devdata *dd);
@@ -1455,6 +1456,8 @@ void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
void reset_interrupts(struct hfi1_devdata *dd);
u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx);
+void hfi1_init_aip_rsm(struct hfi1_devdata *dd);
+void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd);
/*
* Interrupt source table.
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index 40a1ff0c8a8e..ff423e546b80 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -72,13 +72,6 @@
* compilation unit
*/
-/*
- * If a packet's QP[23:16] bits match this value, then it is
- * a PSM packet and the hardware will expect a KDETH header
- * following the BTH.
- */
-#define DEFAULT_KDETH_QP 0x80
-
/* driver/hw feature set bitmask */
#define HFI1_CAP_USER_SHIFT 24
#define HFI1_CAP_MASK ((1UL << HFI1_CAP_USER_SHIFT) - 1)
@@ -149,7 +142,8 @@
HFI1_CAP_NO_INTEGRITY | \
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_TID_RDMA | \
- HFI1_CAP_OPFN) << \
+ HFI1_CAP_OPFN | \
+ HFI1_CAP_AIP) << \
HFI1_CAP_USER_SHIFT)
/*
* Set of capabilities that need to be enabled for kernel context in
@@ -166,6 +160,7 @@
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_MULTI_PKT_EGR | \
HFI1_CAP_EXTENDED_PSN | \
+ HFI1_CAP_AIP | \
((HFI1_CAP_HDRSUPP | \
HFI1_CAP_MULTI_PKT_EGR | \
HFI1_CAP_STATIC_RATE_CTRL | \
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 049d15befe58..a40701a6e1b6 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2018 Intel Corporation.
+ * Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -54,6 +54,7 @@
#include <linux/module.h>
#include <linux/prefetch.h>
#include <rdma/ib_verbs.h>
+#include <linux/etherdevice.h>
#include "hfi.h"
#include "trace.h"
@@ -63,6 +64,9 @@
#include "vnic.h"
#include "fault.h"
+#include "ipoib.h"
+#include "netdev.h"
+
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -748,6 +752,39 @@ static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
return ret;
}
+static void process_rcv_packet_napi(struct hfi1_packet *packet)
+{
+ packet->etype = rhf_rcv_type(packet->rhf);
+
+ /* total length */
+ packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
+ /* retrieve eager buffer details */
+ packet->etail = rhf_egr_index(packet->rhf);
+ packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
+ &packet->updegr);
+ /*
+ * Prefetch the contents of the eager buffer. It is
+ * OK to send a negative length to prefetch_range().
+ * The +2 is the size of the RHF.
+ */
+ prefetch_range(packet->ebuf,
+ packet->tlen - ((packet->rcd->rcvhdrqentsize -
+ (rhf_hdrq_offset(packet->rhf)
+ + 2)) * 4));
+
+ packet->rcd->rhf_rcv_function_map[packet->etype](packet);
+ packet->numpkt++;
+
+ /* Set up for the next packet */
+ packet->rhqoff += packet->rsize;
+ if (packet->rhqoff >= packet->maxcnt)
+ packet->rhqoff = 0;
+
+ packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
+ packet->rcd->rhf_offset;
+ packet->rhf = rhf_to_cpu(packet->rhf_addr);
+}
+
static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
{
int ret;
@@ -827,6 +864,36 @@ static inline void finish_packet(struct hfi1_packet *packet)
}
/*
+ * handle_receive_interrupt_napi_fp - receive a packet
+ * @rcd: the context
+ * @budget: polling budget
+ *
+ * Called from interrupt handler for receive interrupt.
+ * This is the fast path interrupt handler
+ * when executing napi soft irq environment.
+ */
+int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget)
+{
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
+ goto bail;
+
+ while (packet.numpkt < budget) {
+ process_rcv_packet_napi(&packet);
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
+ break;
+
+ process_rcv_update(0, &packet);
+ }
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
+bail:
+ finish_packet(&packet);
+ return packet.numpkt;
+}
+
+/*
* Handle receive interrupts when using the no dma rtail option.
*/
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
@@ -1074,6 +1141,63 @@ bail:
}
/*
+ * handle_receive_interrupt_napi_sp - receive a packet
+ * @rcd: the context
+ * @budget: polling budget
+ *
+ * Called from interrupt handler for errors or receive interrupt.
+ * This is the slow path interrupt handler
+ * when executing napi soft irq environment.
+ */
+int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ int last = RCV_PKT_OK;
+ bool needset = true;
+ struct hfi1_packet packet;
+
+ init_packet(rcd, &packet);
+ if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf)))
+ goto bail;
+
+ while (last != RCV_PKT_DONE && packet.numpkt < budget) {
+ if (hfi1_need_drop(dd)) {
+ /* On to the next packet */
+ packet.rhqoff += packet.rsize;
+ packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
+ packet.rhqoff +
+ rcd->rhf_offset;
+ packet.rhf = rhf_to_cpu(packet.rhf_addr);
+
+ } else {
+ if (set_armed_to_active(&packet))
+ goto bail;
+ process_rcv_packet_napi(&packet);
+ }
+
+ if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf)))
+ last = RCV_PKT_DONE;
+
+ if (needset) {
+ needset = false;
+ set_all_fastpath(dd, rcd);
+ }
+
+ process_rcv_update(last, &packet);
+ }
+
+ hfi1_set_rcd_head(rcd, packet.rhqoff);
+
+bail:
+ /*
+ * Always write head at end, and setup rcv interrupt, even
+ * if no packets were processed.
+ */
+ finish_packet(&packet);
+ return packet.numpkt;
+}
+
+/*
* We may discover in the interrupt that the hardware link state has
* changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
* and we need to update the driver's notion of the link state. We cannot
@@ -1550,6 +1674,82 @@ void handle_eflags(struct hfi1_packet *packet)
show_eflags_errs(packet);
}
+static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp;
+ struct net_device *netdev;
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct napi_struct *napi = rcd->napi;
+ struct sk_buff *skb;
+ struct hfi1_netdev_rxq *rxq = container_of(napi,
+ struct hfi1_netdev_rxq, napi);
+ u32 extra_bytes;
+ u32 tlen, qpnum;
+ bool do_work, do_cnp;
+ struct hfi1_ipoib_dev_priv *priv;
+
+ trace_hfi1_rcvhdr(packet);
+
+ hfi1_setup_ib_header(packet);
+
+ packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth;
+ packet->grh = NULL;
+
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ handle_eflags(packet);
+ return;
+ }
+
+ qpnum = ib_bth_get_qpn(packet->ohdr);
+ netdev = hfi1_netdev_get_data(rcd->dd, qpnum);
+ if (!netdev)
+ goto drop_no_nd;
+
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+ trace_ctxt_rsm_hist(rcd->ctxt);
+
+ /* handle congestion notifications */
+ do_work = hfi1_may_ecn(packet);
+ if (unlikely(do_work)) {
+ do_cnp = (packet->opcode != IB_OPCODE_CNP);
+ (void)hfi1_process_ecn_slowpath(hfi1_ipoib_priv(netdev)->qp,
+ packet, do_cnp);
+ }
+
+ /*
+ * We have split point after last byte of DETH
+ * lets strip padding and CRC and ICRC.
+ * tlen is whole packet len so we need to
+ * subtract header size as well.
+ */
+ tlen = packet->tlen;
+ extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) +
+ packet->hlen;
+ if (unlikely(tlen < extra_bytes))
+ goto drop;
+
+ tlen -= extra_bytes;
+
+ skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf);
+ if (unlikely(!skb))
+ goto drop;
+
+ priv = hfi1_ipoib_priv(netdev);
+ hfi1_ipoib_update_rx_netstats(priv, 1, skb->len);
+
+ skb->dev = netdev;
+ skb->pkt_type = PACKET_HOST;
+ netif_receive_skb(skb);
+
+ return;
+
+drop:
+ ++netdev->stats.rx_dropped;
+drop_no_nd:
+ ibp = rcd_to_iport(packet->rcd);
+ ++ibp->rvp.n_pkt_drops;
+}
+
/*
* The following functions are called by the interrupt handler. They are type
* specific handlers for each packet type.
@@ -1572,28 +1772,10 @@ static void process_receive_ib(struct hfi1_packet *packet)
hfi1_ib_rcv(packet);
}
-static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
-{
- /* Packet received in VNIC context via RSM */
- if (packet->rcd->is_vnic)
- return true;
-
- if ((hfi1_16B_get_l2(packet->ebuf) == OPA_16B_L2_TYPE) &&
- (hfi1_16B_get_l4(packet->ebuf) == OPA_16B_L4_ETHR))
- return true;
-
- return false;
-}
-
static void process_receive_bypass(struct hfi1_packet *packet)
{
struct hfi1_devdata *dd = packet->rcd->dd;
- if (hfi1_is_vnic_packet(packet)) {
- hfi1_vnic_bypass_rcv(packet);
- return;
- }
-
if (hfi1_setup_bypass_packet(packet))
return;
@@ -1757,3 +1939,14 @@ const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = {
[RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
[RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
};
+
+const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] = {
+ [RHF_RCV_TYPE_EXPECTED] = process_receive_invalid,
+ [RHF_RCV_TYPE_EAGER] = process_receive_invalid,
+ [RHF_RCV_TYPE_IB] = hfi1_ipoib_ib_rcv,
+ [RHF_RCV_TYPE_ERROR] = process_receive_error,
+ [RHF_RCV_TYPE_BYPASS] = hfi1_vnic_bypass_rcv,
+ [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
+ [RHF_RCV_TYPE_INVALID7] = process_receive_invalid,
+};
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index e7fdd70c6e78..8ca51e43cf53 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -1264,7 +1264,7 @@ static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
memset(&binfo, 0, sizeof(binfo));
binfo.hw_version = dd->revision;
binfo.sw_version = HFI1_KERN_SWVERSION;
- binfo.bthqp = kdeth_qp;
+ binfo.bthqp = RVT_KDETH_QP_PREFIX;
binfo.jkey = uctxt->jkey;
/*
* If more than 64 contexts are enabled the allocated credit
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index b06c2594105a..b4c6bff60a4e 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_KERNEL_H
#define _HFI1_KERNEL_H
/*
- * Copyright(c) 2015-2018 Intel Corporation.
+ * Copyright(c) 2015-2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -233,6 +233,8 @@ struct hfi1_ctxtdata {
intr_handler fast_handler;
/** slow handler */
intr_handler slow_handler;
+ /* napi pointer assiociated with netdev */
+ struct napi_struct *napi;
/* verbs rx_stats per rcd */
struct hfi1_opcode_stats_perctx *opstats;
/* clear interrupt mask */
@@ -383,11 +385,11 @@ struct hfi1_packet {
u32 rhqoff;
u32 dlid;
u32 slid;
+ int numpkt;
u16 tlen;
s16 etail;
u16 pkey;
u8 hlen;
- u8 numpkt;
u8 rsize;
u8 updegr;
u8 etype;
@@ -985,7 +987,7 @@ typedef void (*hfi1_make_req)(struct rvt_qp *qp,
struct hfi1_pkt_state *ps,
struct rvt_swqe *wqe);
extern const rhf_rcv_function_ptr normal_rhf_rcv_functions[];
-
+extern const rhf_rcv_function_ptr netdev_rhf_rcv_functions[];
/* return values for the RHF receive functions */
#define RHF_RCV_CONTINUE 0 /* keep going */
@@ -1045,23 +1047,10 @@ struct hfi1_asic_data {
#define NUM_MAP_ENTRIES 256
#define NUM_MAP_REGS 32
-/*
- * Number of VNIC contexts used. Ensure it is less than or equal to
- * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
- */
-#define HFI1_NUM_VNIC_CTXT 8
-
-/* Number of VNIC RSM entries */
-#define NUM_VNIC_MAP_ENTRIES 8
-
/* Virtual NIC information */
struct hfi1_vnic_data {
- struct hfi1_ctxtdata *ctxt[HFI1_NUM_VNIC_CTXT];
struct kmem_cache *txreq_cache;
- struct xarray vesws;
u8 num_vports;
- u8 rmt_start;
- u8 num_ctxt;
};
struct hfi1_vnic_vport_info;
@@ -1167,8 +1156,8 @@ struct hfi1_devdata {
u64 z_send_schedule;
u64 __percpu *send_schedule;
- /* number of reserved contexts for VNIC usage */
- u16 num_vnic_contexts;
+ /* number of reserved contexts for netdev usage */
+ u16 num_netdev_contexts;
/* number of receive contexts in use by the driver */
u32 num_rcv_contexts;
/* number of pio send contexts in use by the driver */
@@ -1417,12 +1406,12 @@ struct hfi1_devdata {
struct hfi1_vnic_data vnic;
/* Lock to protect IRQ SRC register access */
spinlock_t irq_src_lock;
-};
+ int vnic_num_vports;
+ struct net_device *dummy_netdev;
-static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)
-{
- return (dd->vnic.rmt_start + spare) > NUM_MAP_ENTRIES;
-}
+ /* Keeps track of IPoIB RSM rule users */
+ atomic_t ipoib_rsm_usr_num;
+};
/* 8051 firmware version helper */
#define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c))
@@ -1500,6 +1489,8 @@ struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
+int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget);
+int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget);
void set_all_slowpath(struct hfi1_devdata *dd);
extern const struct pci_device_id hfi1_pci_tbl[];
@@ -2250,7 +2241,6 @@ extern int num_user_contexts;
extern unsigned long n_krcvqs;
extern uint krcvqs[];
extern int krcvqsset;
-extern uint kdeth_qp;
extern uint loopback;
extern uint quick_linkup;
extern uint rcv_intr_timeout;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 3759d9233a1c..5eed4360695f 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -69,6 +69,7 @@
#include "affinity.h"
#include "vnic.h"
#include "exp_rcv.h"
+#include "netdev.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -374,6 +375,7 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
rcd->numa_id = numa;
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
+ rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
mutex_init(&rcd->exp_mutex);
spin_lock_init(&rcd->exp_lock);
@@ -1316,6 +1318,7 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
goto bail;
}
+ atomic_set(&dd->ipoib_rsm_usr_num, 0);
return dd;
bail:
@@ -1663,9 +1666,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* do the generic initialization */
initfail = hfi1_init(dd, 0);
- /* setup vnic */
- hfi1_vnic_setup(dd);
-
ret = hfi1_register_ib_device(dd);
/*
@@ -1704,7 +1704,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hfi1_device_remove(dd);
if (!ret)
hfi1_unregister_ib_device(dd);
- hfi1_vnic_cleanup(dd);
postinit_cleanup(dd);
if (initfail)
ret = initfail;
@@ -1749,8 +1748,8 @@ static void remove_one(struct pci_dev *pdev)
/* unregister from IB core */
hfi1_unregister_ib_device(dd);
- /* cleanup vnic */
- hfi1_vnic_cleanup(dd);
+ /* free netdev data */
+ hfi1_netdev_free(dd);
/*
* Disable the IB link, disable interrupts on the device,
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
new file mode 100644
index 000000000000..185c9b02c974
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for IPOIB functionality
+ */
+
+#ifndef HFI1_IPOIB_H
+#define HFI1_IPOIB_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/if_infiniband.h>
+
+#include "hfi.h"
+#include "iowait.h"
+#include "netdev.h"
+
+#include <rdma/ib_verbs.h>
+
+#define HFI1_IPOIB_ENTROPY_SHIFT 24
+
+#define HFI1_IPOIB_TXREQ_NAME_LEN 32
+
+#define HFI1_IPOIB_PSEUDO_LEN 20
+#define HFI1_IPOIB_ENCAP_LEN 4
+
+struct hfi1_ipoib_dev_priv;
+
+union hfi1_ipoib_flow {
+ u16 as_int;
+ struct {
+ u8 tx_queue;
+ u8 sc5;
+ } __attribute__((__packed__));
+};
+
+/**
+ * struct hfi1_ipoib_circ_buf - List of items to be processed
+ * @items: ring of items
+ * @head: ring head
+ * @tail: ring tail
+ * @max_items: max items + 1 that the ring can contain
+ * @producer_lock: producer sync lock
+ * @consumer_lock: consumer sync lock
+ */
+struct hfi1_ipoib_circ_buf {
+ void **items;
+ unsigned long head;
+ unsigned long tail;
+ unsigned long max_items;
+ spinlock_t producer_lock; /* head sync lock */
+ spinlock_t consumer_lock; /* tail sync lock */
+};
+
+/**
+ * struct hfi1_ipoib_txq - IPOIB per Tx queue information
+ * @priv: private pointer
+ * @sde: sdma engine
+ * @tx_list: tx request list
+ * @sent_txreqs: count of txreqs posted to sdma
+ * @flow: tracks when list needs to be flushed for a flow change
+ * @q_idx: ipoib Tx queue index
+ * @pkts_sent: indicator packets have been sent from this queue
+ * @wait: iowait structure
+ * @complete_txreqs: count of txreqs completed by sdma
+ * @napi: pointer to tx napi interface
+ * @tx_ring: ring of ipoib txreqs to be reaped by napi callback
+ */
+struct hfi1_ipoib_txq {
+ struct hfi1_ipoib_dev_priv *priv;
+ struct sdma_engine *sde;
+ struct list_head tx_list;
+ u64 sent_txreqs;
+ union hfi1_ipoib_flow flow;
+ u8 q_idx;
+ bool pkts_sent;
+ struct iowait wait;
+
+ atomic64_t ____cacheline_aligned_in_smp complete_txreqs;
+ struct napi_struct *napi;
+ struct hfi1_ipoib_circ_buf tx_ring;
+};
+
+struct hfi1_ipoib_dev_priv {
+ struct hfi1_devdata *dd;
+ struct net_device *netdev;
+ struct ib_device *device;
+ struct hfi1_ipoib_txq *txqs;
+ struct kmem_cache *txreq_cache;
+ struct napi_struct *tx_napis;
+ u16 pkey;
+ u16 pkey_index;
+ u32 qkey;
+ u8 port_num;
+
+ const struct net_device_ops *netdev_ops;
+ struct rvt_qp *qp;
+ struct pcpu_sw_netstats __percpu *netstats;
+};
+
+/* hfi1 ipoib rdma netdev's private data structure */
+struct hfi1_ipoib_rdma_netdev {
+ struct rdma_netdev rn; /* keep this first */
+ /* followed by device private data */
+ struct hfi1_ipoib_dev_priv dev_priv;
+};
+
+static inline struct hfi1_ipoib_dev_priv *
+hfi1_ipoib_priv(const struct net_device *dev)
+{
+ return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
+}
+
+static inline void
+hfi1_ipoib_update_rx_netstats(struct hfi1_ipoib_dev_priv *priv,
+ u64 packets,
+ u64 bytes)
+{
+ struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
+
+ u64_stats_update_begin(&netstats->syncp);
+ netstats->rx_packets += packets;
+ netstats->rx_bytes += bytes;
+ u64_stats_update_end(&netstats->syncp);
+}
+
+static inline void
+hfi1_ipoib_update_tx_netstats(struct hfi1_ipoib_dev_priv *priv,
+ u64 packets,
+ u64 bytes)
+{
+ struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
+
+ u64_stats_update_begin(&netstats->syncp);
+ netstats->tx_packets += packets;
+ netstats->tx_bytes += bytes;
+ u64_stats_update_end(&netstats->syncp);
+}
+
+int hfi1_ipoib_send_dma(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn);
+
+int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv);
+void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv);
+
+int hfi1_ipoib_rxq_init(struct net_device *dev);
+void hfi1_ipoib_rxq_deinit(struct net_device *dev);
+
+void hfi1_ipoib_napi_tx_enable(struct net_device *dev);
+void hfi1_ipoib_napi_tx_disable(struct net_device *dev);
+
+struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
+ int size, void *data);
+
+int hfi1_ipoib_rn_get_params(struct ib_device *device,
+ u8 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params);
+
+#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
new file mode 100644
index 000000000000..014351ebbefa
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for ipoib functionality
+ */
+
+#include "ipoib.h"
+#include "hfi.h"
+
+static u32 qpn_from_mac(u8 *mac_arr)
+{
+ return (u32)mac_arr[1] << 16 | mac_arr[2] << 8 | mac_arr[3];
+}
+
+static int hfi1_ipoib_dev_init(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int ret;
+
+ priv->netstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+
+ ret = priv->netdev_ops->ndo_init(dev);
+ if (ret)
+ return ret;
+
+ ret = hfi1_netdev_add_data(priv->dd,
+ qpn_from_mac(priv->netdev->dev_addr),
+ dev);
+ if (ret < 0) {
+ priv->netdev_ops->ndo_uninit(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hfi1_ipoib_dev_uninit(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
+
+ priv->netdev_ops->ndo_uninit(dev);
+}
+
+static int hfi1_ipoib_dev_open(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int ret;
+
+ ret = priv->netdev_ops->ndo_open(dev);
+ if (!ret) {
+ struct hfi1_ibport *ibp = to_iport(priv->device,
+ priv->port_num);
+ struct rvt_qp *qp;
+ u32 qpn = qpn_from_mac(priv->netdev->dev_addr);
+
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (!qp) {
+ rcu_read_unlock();
+ priv->netdev_ops->ndo_stop(dev);
+ return -EINVAL;
+ }
+ rvt_get_qp(qp);
+ priv->qp = qp;
+ rcu_read_unlock();
+
+ hfi1_netdev_enable_queues(priv->dd);
+ hfi1_ipoib_napi_tx_enable(dev);
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_dev_stop(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ if (!priv->qp)
+ return 0;
+
+ hfi1_ipoib_napi_tx_disable(dev);
+ hfi1_netdev_disable_queues(priv->dd);
+
+ rvt_put_qp(priv->qp);
+ priv->qp = NULL;
+
+ return priv->netdev_ops->ndo_stop(dev);
+}
+
+static void hfi1_ipoib_dev_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ u64 rx_packets = 0ull;
+ u64 rx_bytes = 0ull;
+ u64 tx_packets = 0ull;
+ u64 tx_bytes = 0ull;
+ int i;
+
+ netdev_stats_to_stats64(storage, &dev->stats);
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_sw_netstats *stats;
+ unsigned int start;
+ u64 trx_packets;
+ u64 trx_bytes;
+ u64 ttx_packets;
+ u64 ttx_bytes;
+
+ stats = per_cpu_ptr(priv->netstats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ trx_packets = stats->rx_packets;
+ trx_bytes = stats->rx_bytes;
+ ttx_packets = stats->tx_packets;
+ ttx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ rx_packets += trx_packets;
+ rx_bytes += trx_bytes;
+ tx_packets += ttx_packets;
+ tx_bytes += ttx_bytes;
+ }
+
+ storage->rx_packets += rx_packets;
+ storage->rx_bytes += rx_bytes;
+ storage->tx_packets += tx_packets;
+ storage->tx_bytes += tx_bytes;
+}
+
+static const struct net_device_ops hfi1_ipoib_netdev_ops = {
+ .ndo_init = hfi1_ipoib_dev_init,
+ .ndo_uninit = hfi1_ipoib_dev_uninit,
+ .ndo_open = hfi1_ipoib_dev_open,
+ .ndo_stop = hfi1_ipoib_dev_stop,
+ .ndo_get_stats64 = hfi1_ipoib_dev_get_stats64,
+};
+
+static int hfi1_ipoib_send(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn)
+{
+ return hfi1_ipoib_send_dma(dev, skb, address, dqpn);
+}
+
+static int hfi1_ipoib_mcast_attach(struct net_device *dev,
+ struct ib_device *device,
+ union ib_gid *mgid,
+ u16 mlid,
+ int set_qkey,
+ u32 qkey)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr);
+ struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num);
+ struct rvt_qp *qp;
+ int ret = -EINVAL;
+
+ rcu_read_lock();
+
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (qp) {
+ rvt_get_qp(qp);
+ rcu_read_unlock();
+ if (set_qkey)
+ priv->qkey = qkey;
+
+ /* attach QP to multicast group */
+ ret = ib_attach_mcast(&qp->ibqp, mgid, mlid);
+ rvt_put_qp(qp);
+ } else {
+ rcu_read_unlock();
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_mcast_detach(struct net_device *dev,
+ struct ib_device *device,
+ union ib_gid *mgid,
+ u16 mlid)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr);
+ struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num);
+ struct rvt_qp *qp;
+ int ret = -EINVAL;
+
+ rcu_read_lock();
+
+ qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
+ if (qp) {
+ rvt_get_qp(qp);
+ rcu_read_unlock();
+ ret = ib_detach_mcast(&qp->ibqp, mgid, mlid);
+ rvt_put_qp(qp);
+ } else {
+ rcu_read_unlock();
+ }
+ return ret;
+}
+
+static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ hfi1_ipoib_txreq_deinit(priv);
+ hfi1_ipoib_rxq_deinit(priv->netdev);
+
+ free_percpu(priv->netstats);
+}
+
+static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
+{
+ hfi1_ipoib_netdev_dtor(dev);
+ free_netdev(dev);
+}
+
+static void hfi1_ipoib_set_id(struct net_device *dev, int id)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+
+ priv->pkey_index = (u16)id;
+ ib_query_pkey(priv->device,
+ priv->port_num,
+ priv->pkey_index,
+ &priv->pkey);
+}
+
+static int hfi1_ipoib_setup_rn(struct ib_device *device,
+ u8 port_num,
+ struct net_device *netdev,
+ void *param)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(device);
+ struct rdma_netdev *rn = netdev_priv(netdev);
+ struct hfi1_ipoib_dev_priv *priv;
+ int rc;
+
+ rn->send = hfi1_ipoib_send;
+ rn->attach_mcast = hfi1_ipoib_mcast_attach;
+ rn->detach_mcast = hfi1_ipoib_mcast_detach;
+ rn->set_id = hfi1_ipoib_set_id;
+ rn->hca = device;
+ rn->port_num = port_num;
+ rn->mtu = netdev->mtu;
+
+ priv = hfi1_ipoib_priv(netdev);
+ priv->dd = dd;
+ priv->netdev = netdev;
+ priv->device = device;
+ priv->port_num = port_num;
+ priv->netdev_ops = netdev->netdev_ops;
+
+ netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
+
+ ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
+
+ rc = hfi1_ipoib_txreq_init(priv);
+ if (rc) {
+ dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
+ hfi1_ipoib_free_rdma_netdev(netdev);
+ return rc;
+ }
+
+ rc = hfi1_ipoib_rxq_init(netdev);
+ if (rc) {
+ dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
+ hfi1_ipoib_free_rdma_netdev(netdev);
+ return rc;
+ }
+
+ netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
+ netdev->needs_free_netdev = true;
+
+ return 0;
+}
+
+int hfi1_ipoib_rn_get_params(struct ib_device *device,
+ u8 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(device);
+
+ if (type != RDMA_NETDEV_IPOIB)
+ return -EOPNOTSUPP;
+
+ if (!HFI1_CAP_IS_KSET(AIP) || !dd->num_netdev_contexts)
+ return -EOPNOTSUPP;
+
+ if (!port_num || port_num > dd->num_pports)
+ return -EINVAL;
+
+ params->sizeof_priv = sizeof(struct hfi1_ipoib_rdma_netdev);
+ params->txqs = dd->num_sdma;
+ params->rxqs = dd->num_netdev_contexts;
+ params->param = NULL;
+ params->initialize_rdma_netdev = hfi1_ipoib_setup_rn;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hfi1/ipoib_rx.c b/drivers/infiniband/hw/hfi1/ipoib_rx.c
new file mode 100644
index 000000000000..3afa7545242c
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_rx.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+#include "netdev.h"
+#include "ipoib.h"
+
+#define HFI1_IPOIB_SKB_PAD ((NET_SKB_PAD) + (NET_IP_ALIGN))
+
+static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size)
+{
+ void *dst_data;
+
+ skb_checksum_none_assert(skb);
+ skb->protocol = *((__be16 *)data);
+
+ dst_data = skb_put(skb, size);
+ memcpy(dst_data, data, size);
+ skb->mac_header = HFI1_IPOIB_PSEUDO_LEN;
+ skb_pull(skb, HFI1_IPOIB_ENCAP_LEN);
+}
+
+static struct sk_buff *prepare_frag_skb(struct napi_struct *napi, int size)
+{
+ struct sk_buff *skb;
+ int skb_size = SKB_DATA_ALIGN(size + HFI1_IPOIB_SKB_PAD);
+ void *frag;
+
+ skb_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb_size = SKB_DATA_ALIGN(skb_size);
+ frag = napi_alloc_frag(skb_size);
+
+ if (unlikely(!frag))
+ return napi_alloc_skb(napi, size);
+
+ skb = build_skb(frag, skb_size);
+
+ if (unlikely(!skb)) {
+ skb_free_frag(frag);
+ return NULL;
+ }
+
+ skb_reserve(skb, HFI1_IPOIB_SKB_PAD);
+ return skb;
+}
+
+struct sk_buff *hfi1_ipoib_prepare_skb(struct hfi1_netdev_rxq *rxq,
+ int size, void *data)
+{
+ struct napi_struct *napi = &rxq->napi;
+ int skb_size = size + HFI1_IPOIB_ENCAP_LEN;
+ struct sk_buff *skb;
+
+ /*
+ * For smaller(4k + skb overhead) allocations we will go using
+ * napi cache. Otherwise we will try to use napi frag cache.
+ */
+ if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE))
+ skb = napi_alloc_skb(napi, skb_size);
+ else
+ skb = prepare_frag_skb(napi, skb_size);
+
+ if (unlikely(!skb))
+ return NULL;
+
+ copy_ipoib_buf(skb, data, size);
+
+ return skb;
+}
+
+int hfi1_ipoib_rxq_init(struct net_device *netdev)
+{
+ struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
+ struct hfi1_devdata *dd = ipoib_priv->dd;
+ int ret;
+
+ ret = hfi1_netdev_rx_init(dd);
+ if (ret)
+ return ret;
+
+ hfi1_init_aip_rsm(dd);
+
+ return ret;
+}
+
+void hfi1_ipoib_rxq_deinit(struct net_device *netdev)
+{
+ struct hfi1_ipoib_dev_priv *ipoib_priv = hfi1_ipoib_priv(netdev);
+ struct hfi1_devdata *dd = ipoib_priv->dd;
+
+ hfi1_deinit_aip_rsm(dd);
+ hfi1_netdev_rx_destroy(dd);
+}
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
new file mode 100644
index 000000000000..883cb9d48022
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for IPOIB SDMA functionality
+ */
+
+#include <linux/log2.h>
+#include <linux/circ_buf.h>
+
+#include "sdma.h"
+#include "verbs.h"
+#include "trace_ibhdrs.h"
+#include "ipoib.h"
+
+/* Add a convenience helper */
+#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
+#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
+#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
+
+/**
+ * struct ipoib_txreq - IPOIB transmit descriptor
+ * @txreq: sdma transmit request
+ * @sdma_hdr: 9b ib headers
+ * @sdma_status: status returned by sdma engine
+ * @priv: ipoib netdev private data
+ * @txq: txq on which skb was output
+ * @skb: skb to send
+ */
+struct ipoib_txreq {
+ struct sdma_txreq txreq;
+ struct hfi1_sdma_header sdma_hdr;
+ int sdma_status;
+ struct hfi1_ipoib_dev_priv *priv;
+ struct hfi1_ipoib_txq *txq;
+ struct sk_buff *skb;
+};
+
+struct ipoib_txparms {
+ struct hfi1_devdata *dd;
+ struct rdma_ah_attr *ah_attr;
+ struct hfi1_ibport *ibp;
+ struct hfi1_ipoib_txq *txq;
+ union hfi1_ipoib_flow flow;
+ u32 dqpn;
+ u8 hdr_dwords;
+ u8 entropy;
+};
+
+static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
+{
+ return sent - completed;
+}
+
+static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+{
+ if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs,
+ atomic64_read(&txq->complete_txreqs)) >=
+ min_t(unsigned int, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items - 1)))
+ netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
+{
+ struct net_device *dev = txq->priv->netdev;
+
+ /* If the queue is already running just return */
+ if (likely(!__netif_subqueue_stopped(dev, txq->q_idx)))
+ return;
+
+ /* If shutting down just return as queue state is irrelevant */
+ if (unlikely(dev->reg_state != NETREG_REGISTERED))
+ return;
+
+ /*
+ * When the queue has been drained to less than half full it will be
+ * restarted.
+ * The size of the txreq ring is fixed at initialization.
+ * The tx queue len can be adjusted upward while the interface is
+ * running.
+ * The tx queue len can be large enough to overflow the txreq_ring.
+ * Use the minimum of the current tx_queue_len or the rings max txreqs
+ * to protect against ring overflow.
+ */
+ if (hfi1_ipoib_txreqs(txq->sent_txreqs,
+ atomic64_read(&txq->complete_txreqs))
+ < min_t(unsigned int, dev->tx_queue_len,
+ txq->tx_ring.max_items) >> 1)
+ netif_wake_subqueue(dev, txq->q_idx);
+}
+
+static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
+{
+ struct hfi1_ipoib_dev_priv *priv = tx->priv;
+
+ if (likely(!tx->sdma_status)) {
+ hfi1_ipoib_update_tx_netstats(priv, 1, tx->skb->len);
+ } else {
+ ++priv->netdev->stats.tx_errors;
+ dd_dev_warn(priv->dd,
+ "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
+ __func__, tx->sdma_status,
+ le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
+ tx->txq->sde->this_idx);
+ }
+
+ napi_consume_skb(tx->skb, budget);
+ sdma_txclean(priv->dd, &tx->txreq);
+ kmem_cache_free(priv->txreq_cache, tx);
+}
+
+static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget)
+{
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
+ unsigned long head;
+ unsigned long tail;
+ unsigned int max_tx;
+ int work_done;
+ int tx_count;
+
+ spin_lock_bh(&tx_ring->consumer_lock);
+
+ /* Read index before reading contents at that index. */
+ head = smp_load_acquire(&tx_ring->head);
+ tail = tx_ring->tail;
+ max_tx = tx_ring->max_items;
+
+ work_done = min_t(int, CIRC_CNT(head, tail, max_tx), budget);
+
+ for (tx_count = work_done; tx_count; tx_count--) {
+ hfi1_ipoib_free_tx(tx_ring->items[tail], budget);
+ tail = CIRC_NEXT(tail, max_tx);
+ }
+
+ atomic64_add(work_done, &txq->complete_txreqs);
+
+ /* Finished freeing tx items so store the tail value. */
+ smp_store_release(&tx_ring->tail, tail);
+
+ spin_unlock_bh(&tx_ring->consumer_lock);
+
+ hfi1_ipoib_check_queue_stopped(txq);
+
+ return work_done;
+}
+
+static int hfi1_ipoib_process_tx_ring(struct napi_struct *napi, int budget)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(napi->dev);
+ struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis];
+
+ int work_done = hfi1_ipoib_drain_tx_ring(txq, budget);
+
+ if (work_done < budget)
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx)
+{
+ struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring;
+ unsigned long head;
+ unsigned long tail;
+ size_t max_tx;
+
+ spin_lock(&tx_ring->producer_lock);
+
+ head = tx_ring->head;
+ tail = READ_ONCE(tx_ring->tail);
+ max_tx = tx_ring->max_items;
+
+ if (likely(CIRC_SPACE(head, tail, max_tx))) {
+ tx_ring->items[head] = tx;
+
+ /* Finish storing txreq before incrementing head. */
+ smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx));
+ napi_schedule(tx->txq->napi);
+ } else {
+ struct hfi1_ipoib_txq *txq = tx->txq;
+ struct hfi1_ipoib_dev_priv *priv = tx->priv;
+
+ /* Ring was full */
+ hfi1_ipoib_free_tx(tx, 0);
+ atomic64_inc(&txq->complete_txreqs);
+ dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx);
+ }
+
+ spin_unlock(&tx_ring->producer_lock);
+}
+
+static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status)
+{
+ struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq);
+
+ tx->sdma_status = status;
+
+ hfi1_ipoib_add_tx(tx);
+}
+
+static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_devdata *dd = txp->dd;
+ struct sdma_txreq *txreq = &tx->txreq;
+ struct sk_buff *skb = tx->skb;
+ int ret = 0;
+ int i;
+
+ if (skb_headlen(skb)) {
+ ret = sdma_txadd_kvaddr(dd, txreq, skb->data, skb_headlen(skb));
+ if (unlikely(ret))
+ return ret;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ ret = sdma_txadd_page(dd,
+ txreq,
+ skb_frag_page(frag),
+ frag->bv_offset,
+ skb_frag_size(frag));
+ if (unlikely(ret))
+ break;
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_devdata *dd = txp->dd;
+ struct sdma_txreq *txreq = &tx->txreq;
+ struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ u16 pkt_bytes =
+ sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
+ int ret;
+
+ ret = sdma_txinit(txreq, 0, pkt_bytes, hfi1_ipoib_sdma_complete);
+ if (unlikely(ret))
+ return ret;
+
+ /* add pbc + headers */
+ ret = sdma_txadd_kvaddr(dd,
+ txreq,
+ sdma_hdr,
+ sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2));
+ if (unlikely(ret))
+ return ret;
+
+ /* add the ulp payload */
+ return hfi1_ipoib_build_ulp_payload(tx, txp);
+}
+
+static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_dev_priv *priv = tx->priv;
+ struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ struct sk_buff *skb = tx->skb;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
+ struct rdma_ah_attr *ah_attr = txp->ah_attr;
+ struct ib_other_headers *ohdr;
+ struct ib_grh *grh;
+ u16 dwords;
+ u16 slid;
+ u16 dlid;
+ u16 lrh0;
+ u32 bth0;
+ u32 sqpn = (u32)(priv->netdev->dev_addr[1] << 16 |
+ priv->netdev->dev_addr[2] << 8 |
+ priv->netdev->dev_addr[3]);
+ u16 payload_dwords;
+ u8 pad_cnt;
+
+ pad_cnt = -skb->len & 3;
+
+ /* Includes ICRC */
+ payload_dwords = ((skb->len + pad_cnt) >> 2) + SIZE_OF_CRC;
+
+ /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
+ txp->hdr_dwords = 7;
+
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ grh = &sdma_hdr->hdr.ibh.u.l.grh;
+ txp->hdr_dwords +=
+ hfi1_make_grh(txp->ibp,
+ grh,
+ rdma_ah_read_grh(ah_attr),
+ txp->hdr_dwords - LRH_9B_DWORDS,
+ payload_dwords);
+ lrh0 = HFI1_LRH_GRH;
+ ohdr = &sdma_hdr->hdr.ibh.u.l.oth;
+ } else {
+ lrh0 = HFI1_LRH_BTH;
+ ohdr = &sdma_hdr->hdr.ibh.u.oth;
+ }
+
+ lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
+ lrh0 |= (txp->flow.sc5 & 0xf) << 12;
+
+ dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
+ if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ } else {
+ u16 lid = (u16)ppd->lid;
+
+ if (lid) {
+ lid |= rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1);
+ slid = lid;
+ } else {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ }
+ }
+
+ /* Includes ICRC */
+ dwords = txp->hdr_dwords + payload_dwords;
+
+ /* Build the lrh */
+ sdma_hdr->hdr.hdr_type = HFI1_PKT_TYPE_9B;
+ hfi1_make_ib_hdr(&sdma_hdr->hdr.ibh, lrh0, dwords, dlid, slid);
+
+ /* Build the bth */
+ bth0 = (IB_OPCODE_UD_SEND_ONLY << 24) | (pad_cnt << 20) | priv->pkey;
+
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(txp->dqpn);
+ ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs));
+
+ /* Build the deth */
+ ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey);
+ ohdr->u.ud.deth[1] = cpu_to_be32((txp->entropy <<
+ HFI1_IPOIB_ENTROPY_SHIFT) | sqpn);
+
+ /* Construct the pbc. */
+ sdma_hdr->pbc =
+ cpu_to_le64(create_pbc(ppd,
+ ib_is_sc5(txp->flow.sc5) <<
+ PBC_DC_INFO_SHIFT,
+ 0,
+ sc_to_vlt(priv->dd, txp->flow.sc5),
+ dwords - SIZE_OF_CRC +
+ (sizeof(sdma_hdr->pbc) >> 2)));
+}
+
+static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct ipoib_txreq *tx;
+ int ret;
+
+ tx = kmem_cache_alloc_node(priv->txreq_cache,
+ GFP_ATOMIC,
+ priv->dd->node);
+ if (unlikely(!tx))
+ return ERR_PTR(-ENOMEM);
+
+ /* so that we can test if the sdma decriptors are there */
+ tx->txreq.num_desc = 0;
+ tx->priv = priv;
+ tx->txq = txp->txq;
+ tx->skb = skb;
+
+ hfi1_ipoib_build_ib_tx_headers(tx, txp);
+
+ ret = hfi1_ipoib_build_tx_desc(tx, txp);
+ if (likely(!ret)) {
+ if (txp->txq->flow.as_int != txp->flow.as_int) {
+ txp->txq->flow.tx_queue = txp->flow.tx_queue;
+ txp->txq->flow.sc5 = txp->flow.sc5;
+ txp->txq->sde =
+ sdma_select_engine_sc(priv->dd,
+ txp->flow.tx_queue,
+ txp->flow.sc5);
+ }
+
+ return tx;
+ }
+
+ sdma_txclean(priv->dd, &tx->txreq);
+ kmem_cache_free(priv->txreq_cache, tx);
+
+ return ERR_PTR(ret);
+}
+
+static int hfi1_ipoib_submit_tx_list(struct net_device *dev,
+ struct hfi1_ipoib_txq *txq)
+{
+ int ret;
+ u16 count_out;
+
+ ret = sdma_send_txlist(txq->sde,
+ iowait_get_ib_work(&txq->wait),
+ &txq->tx_list,
+ &count_out);
+ if (likely(!ret) || ret == -EBUSY || ret == -ECOMM)
+ return ret;
+
+ dd_dev_warn(txq->priv->dd, "cannot send skb tx list, err %d.\n", ret);
+
+ return ret;
+}
+
+static int hfi1_ipoib_flush_tx_list(struct net_device *dev,
+ struct hfi1_ipoib_txq *txq)
+{
+ int ret = 0;
+
+ if (!list_empty(&txq->tx_list)) {
+ /* Flush the current list */
+ ret = hfi1_ipoib_submit_tx_list(dev, txq);
+
+ if (unlikely(ret))
+ if (ret != -EBUSY)
+ ++dev->stats.tx_carrier_errors;
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_submit_tx(struct hfi1_ipoib_txq *txq,
+ struct ipoib_txreq *tx)
+{
+ int ret;
+
+ ret = sdma_send_txreq(txq->sde,
+ iowait_get_ib_work(&txq->wait),
+ &tx->txreq,
+ txq->pkts_sent);
+ if (likely(!ret)) {
+ txq->pkts_sent = true;
+ iowait_starve_clear(txq->pkts_sent, &txq->wait);
+ }
+
+ return ret;
+}
+
+static int hfi1_ipoib_send_dma_single(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct hfi1_ipoib_txq *txq = txp->txq;
+ struct ipoib_txreq *tx;
+ int ret;
+
+ tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
+ if (IS_ERR(tx)) {
+ int ret = PTR_ERR(tx);
+
+ dev_kfree_skb_any(skb);
+
+ if (ret == -ENOMEM)
+ ++dev->stats.tx_errors;
+ else
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+ }
+
+ ret = hfi1_ipoib_submit_tx(txq, tx);
+ if (likely(!ret)) {
+ trace_sdma_output_ibhdr(tx->priv->dd,
+ &tx->sdma_hdr.hdr,
+ ib_is_sc5(txp->flow.sc5));
+ hfi1_ipoib_check_queue_depth(txq);
+ return NETDEV_TX_OK;
+ }
+
+ txq->pkts_sent = false;
+
+ if (ret == -EBUSY) {
+ list_add_tail(&tx->txreq.list, &txq->tx_list);
+
+ trace_sdma_output_ibhdr(tx->priv->dd,
+ &tx->sdma_hdr.hdr,
+ ib_is_sc5(txp->flow.sc5));
+ hfi1_ipoib_check_queue_depth(txq);
+ return NETDEV_TX_OK;
+ }
+
+ if (ret == -ECOMM) {
+ hfi1_ipoib_check_queue_depth(txq);
+ return NETDEV_TX_OK;
+ }
+
+ sdma_txclean(priv->dd, &tx->txreq);
+ dev_kfree_skb_any(skb);
+ kmem_cache_free(priv->txreq_cache, tx);
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+}
+
+static int hfi1_ipoib_send_dma_list(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ipoib_txparms *txp)
+{
+ struct hfi1_ipoib_txq *txq = txp->txq;
+ struct ipoib_txreq *tx;
+
+ /* Has the flow change ? */
+ if (txq->flow.as_int != txp->flow.as_int)
+ (void)hfi1_ipoib_flush_tx_list(dev, txq);
+
+ tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
+ if (IS_ERR(tx)) {
+ int ret = PTR_ERR(tx);
+
+ dev_kfree_skb_any(skb);
+
+ if (ret == -ENOMEM)
+ ++dev->stats.tx_errors;
+ else
+ ++dev->stats.tx_carrier_errors;
+
+ return NETDEV_TX_OK;
+ }
+
+ list_add_tail(&tx->txreq.list, &txq->tx_list);
+
+ hfi1_ipoib_check_queue_depth(txq);
+
+ trace_sdma_output_ibhdr(tx->priv->dd,
+ &tx->sdma_hdr.hdr,
+ ib_is_sc5(txp->flow.sc5));
+
+ if (!netdev_xmit_more())
+ (void)hfi1_ipoib_flush_tx_list(dev, txq);
+
+ return NETDEV_TX_OK;
+}
+
+static u8 hfi1_ipoib_calc_entropy(struct sk_buff *skb)
+{
+ if (skb_transport_header_was_set(skb)) {
+ u8 *hdr = (u8 *)skb_transport_header(skb);
+
+ return (hdr[0] ^ hdr[1] ^ hdr[2] ^ hdr[3]);
+ }
+
+ return (u8)skb_get_queue_mapping(skb);
+}
+
+int hfi1_ipoib_send_dma(struct net_device *dev,
+ struct sk_buff *skb,
+ struct ib_ah *address,
+ u32 dqpn)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct ipoib_txparms txp;
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ if (unlikely(skb->len > rn->mtu + HFI1_IPOIB_ENCAP_LEN)) {
+ dd_dev_warn(priv->dd, "packet len %d (> %d) too long to send, dropping\n",
+ skb->len,
+ rn->mtu + HFI1_IPOIB_ENCAP_LEN);
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ txp.dd = priv->dd;
+ txp.ah_attr = &ibah_to_rvtah(address)->attr;
+ txp.ibp = to_iport(priv->device, priv->port_num);
+ txp.txq = &priv->txqs[skb_get_queue_mapping(skb)];
+ txp.dqpn = dqpn;
+ txp.flow.sc5 = txp.ibp->sl_to_sc[rdma_ah_get_sl(txp.ah_attr)];
+ txp.flow.tx_queue = (u8)skb_get_queue_mapping(skb);
+ txp.entropy = hfi1_ipoib_calc_entropy(skb);
+
+ if (netdev_xmit_more() || !list_empty(&txp.txq->tx_list))
+ return hfi1_ipoib_send_dma_list(dev, skb, &txp);
+
+ return hfi1_ipoib_send_dma_single(dev, skb, &txp);
+}
+
+/*
+ * hfi1_ipoib_sdma_sleep - ipoib sdma sleep function
+ *
+ * This function gets called from sdma_send_txreq() when there are not enough
+ * sdma descriptors available to send the packet. It adds Tx queue's wait
+ * structure to sdma engine's dmawait list to be woken up when descriptors
+ * become available.
+ */
+static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *txreq,
+ uint seq,
+ bool pkts_sent)
+{
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait->iow, struct hfi1_ipoib_txq, wait);
+
+ write_seqlock(&sde->waitlock);
+
+ if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED)) {
+ if (sdma_progress(sde, seq, txreq)) {
+ write_sequnlock(&sde->waitlock);
+ return -EAGAIN;
+ }
+
+ netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
+
+ if (list_empty(&txq->wait.list))
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+
+ write_sequnlock(&sde->waitlock);
+ return -EBUSY;
+ }
+
+ write_sequnlock(&sde->waitlock);
+ return -EINVAL;
+}
+
+/*
+ * hfi1_ipoib_sdma_wakeup - ipoib sdma wakeup function
+ *
+ * This function gets called when SDMA descriptors becomes available and Tx
+ * queue's wait structure was previously added to sdma engine's dmawait list.
+ */
+static void hfi1_ipoib_sdma_wakeup(struct iowait *wait, int reason)
+{
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait, struct hfi1_ipoib_txq, wait);
+
+ if (likely(txq->priv->netdev->reg_state == NETREG_REGISTERED))
+ iowait_schedule(wait, system_highpri_wq, WORK_CPU_UNBOUND);
+}
+
+static void hfi1_ipoib_flush_txq(struct work_struct *work)
+{
+ struct iowait_work *ioww =
+ container_of(work, struct iowait_work, iowork);
+ struct iowait *wait = iowait_ioww_to_iow(ioww);
+ struct hfi1_ipoib_txq *txq =
+ container_of(wait, struct hfi1_ipoib_txq, wait);
+ struct net_device *dev = txq->priv->netdev;
+
+ if (likely(dev->reg_state == NETREG_REGISTERED) &&
+ likely(__netif_subqueue_stopped(dev, txq->q_idx)) &&
+ likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
+ netif_wake_subqueue(dev, txq->q_idx);
+}
+
+int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
+{
+ struct net_device *dev = priv->netdev;
+ char buf[HFI1_IPOIB_TXREQ_NAME_LEN];
+ unsigned long tx_ring_size;
+ int i;
+
+ /*
+ * Ring holds 1 less than tx_ring_size
+ * Round up to next power of 2 in order to hold at least tx_queue_len
+ */
+ tx_ring_size = roundup_pow_of_two((unsigned long)dev->tx_queue_len + 1);
+
+ snprintf(buf, sizeof(buf), "hfi1_%u_ipoib_txreq_cache", priv->dd->unit);
+ priv->txreq_cache = kmem_cache_create(buf,
+ sizeof(struct ipoib_txreq),
+ 0,
+ 0,
+ NULL);
+ if (!priv->txreq_cache)
+ return -ENOMEM;
+
+ priv->tx_napis = kcalloc_node(dev->num_tx_queues,
+ sizeof(struct napi_struct),
+ GFP_ATOMIC,
+ priv->dd->node);
+ if (!priv->tx_napis)
+ goto free_txreq_cache;
+
+ priv->txqs = kcalloc_node(dev->num_tx_queues,
+ sizeof(struct hfi1_ipoib_txq),
+ GFP_ATOMIC,
+ priv->dd->node);
+ if (!priv->txqs)
+ goto free_tx_napis;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ iowait_init(&txq->wait,
+ 0,
+ hfi1_ipoib_flush_txq,
+ NULL,
+ hfi1_ipoib_sdma_sleep,
+ hfi1_ipoib_sdma_wakeup,
+ NULL,
+ NULL);
+ txq->priv = priv;
+ txq->sde = NULL;
+ INIT_LIST_HEAD(&txq->tx_list);
+ atomic64_set(&txq->complete_txreqs, 0);
+ txq->q_idx = i;
+ txq->flow.tx_queue = 0xff;
+ txq->flow.sc5 = 0xff;
+ txq->pkts_sent = false;
+
+ netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
+ priv->dd->node);
+
+ txq->tx_ring.items =
+ vzalloc_node(array_size(tx_ring_size,
+ sizeof(struct ipoib_txreq)),
+ priv->dd->node);
+ if (!txq->tx_ring.items)
+ goto free_txqs;
+
+ spin_lock_init(&txq->tx_ring.producer_lock);
+ spin_lock_init(&txq->tx_ring.consumer_lock);
+ txq->tx_ring.max_items = tx_ring_size;
+
+ txq->napi = &priv->tx_napis[i];
+ netif_tx_napi_add(dev, txq->napi,
+ hfi1_ipoib_process_tx_ring,
+ NAPI_POLL_WEIGHT);
+ }
+
+ return 0;
+
+free_txqs:
+ for (i--; i >= 0; i--) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ netif_napi_del(txq->napi);
+ vfree(txq->tx_ring.items);
+ }
+
+ kfree(priv->txqs);
+ priv->txqs = NULL;
+
+free_tx_napis:
+ kfree(priv->tx_napis);
+ priv->tx_napis = NULL;
+
+free_txreq_cache:
+ kmem_cache_destroy(priv->txreq_cache);
+ priv->txreq_cache = NULL;
+ return -ENOMEM;
+}
+
+static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
+{
+ struct sdma_txreq *txreq;
+ struct sdma_txreq *txreq_tmp;
+ atomic64_t *complete_txreqs = &txq->complete_txreqs;
+
+ list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
+ struct ipoib_txreq *tx =
+ container_of(txreq, struct ipoib_txreq, txreq);
+
+ list_del(&txreq->list);
+ sdma_txclean(txq->priv->dd, &tx->txreq);
+ dev_kfree_skb_any(tx->skb);
+ kmem_cache_free(txq->priv->txreq_cache, tx);
+ atomic64_inc(complete_txreqs);
+ }
+
+ if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs)))
+ dd_dev_warn(txq->priv->dd,
+ "txq %d not empty found %llu requests\n",
+ txq->q_idx,
+ hfi1_ipoib_txreqs(txq->sent_txreqs,
+ atomic64_read(complete_txreqs)));
+}
+
+void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->netdev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ iowait_cancel_work(&txq->wait);
+ iowait_sdma_drain(&txq->wait);
+ hfi1_ipoib_drain_tx_list(txq);
+ netif_napi_del(txq->napi);
+ (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+ vfree(txq->tx_ring.items);
+ }
+
+ kfree(priv->txqs);
+ priv->txqs = NULL;
+
+ kfree(priv->tx_napis);
+ priv->tx_napis = NULL;
+
+ kmem_cache_destroy(priv->txreq_cache);
+ priv->txreq_cache = NULL;
+}
+
+void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ napi_enable(txq->napi);
+ }
+}
+
+void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
+{
+ struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+
+ napi_disable(txq->napi);
+ (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 14d2a90964c3..24ca17b77b72 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -333,7 +333,7 @@ static void do_remove(struct mmu_rb_handler *handler,
/*
* Work queue function to remove all nodes that have been queued up to
- * be removed. The key feature is that mm->mmap_sem is not being held
+ * be removed. The key feature is that mm->mmap_lock is not being held
* and the remove callback can sleep while taking it, if needed.
*/
static void handle_remove(struct work_struct *work)
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
index db82db497b2c..d61ee853d215 100644
--- a/drivers/infiniband/hw/hfi1/msix.c
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * Copyright(c) 2018 Intel Corporation.
+ * Copyright(c) 2018 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -49,6 +49,7 @@
#include "hfi.h"
#include "affinity.h"
#include "sdma.h"
+#include "netdev.h"
/**
* msix_initialize() - Calculate, request and configure MSIx IRQs
@@ -69,7 +70,7 @@ int msix_initialize(struct hfi1_devdata *dd)
* one for each VNIC context
* ...any new IRQs should be added here.
*/
- total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
+ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts;
if (total >= CCE_NUM_MSIX_VECTORS)
return -EINVAL;
@@ -140,7 +141,7 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
if (ret) {
dd_dev_err(dd,
- "%s: request for IRQ %d failed, MSIx %lu, err %d\n",
+ "%s: request for IRQ %d failed, MSIx %lx, err %d\n",
name, irq, nr, ret);
spin_lock(&dd->msix_info.msix_lock);
__clear_bit(nr, dd->msix_info.in_use_msix);
@@ -160,7 +161,7 @@ static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
/* This is a request, so a failure is not fatal */
ret = hfi1_get_irq_affinity(dd, me);
if (ret)
- dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
+ dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret);
return nr;
}
@@ -171,7 +172,8 @@ static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd,
const char *name)
{
int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
- IRQ_RCVCTXT, name);
+ rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT,
+ name);
if (nr < 0)
return nr;
@@ -204,6 +206,21 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
}
/**
+ * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
+ * for netdev context
+ * @rcd: valid netdev contexti
+ */
+int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+{
+ char name[MAX_NAME_SIZE];
+
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d nd kctxt%d",
+ rcd->dd->unit, rcd->ctxt);
+ return msix_request_rcd_irq_common(rcd, receive_context_interrupt_napi,
+ NULL, name);
+}
+
+/**
* msix_request_smda_ira() - Helper for getting SDMA IRQ resources
* @sde: valid sdma engine
*
@@ -355,15 +372,16 @@ void msix_clean_up_interrupts(struct hfi1_devdata *dd)
}
/**
- * msix_vnic_syncrhonize_irq() - Vnic IRQ synchronize
+ * msix_netdev_syncrhonize_irq() - netdev IRQ synchronize
* @dd: valid devdata
*/
-void msix_vnic_synchronize_irq(struct hfi1_devdata *dd)
+void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
{
int i;
+ int ctxt_count = hfi1_netdev_ctxt_count(dd);
- for (i = 0; i < dd->vnic.num_ctxt; i++) {
- struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
+ for (i = 0; i < ctxt_count; i++) {
+ struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i);
struct hfi1_msix_entry *me;
me = &dd->msix_info.msix_entries[rcd->msix_intr];
diff --git a/drivers/infiniband/hw/hfi1/msix.h b/drivers/infiniband/hw/hfi1/msix.h
index 1a02ab7971c8..e63e944bf0fc 100644
--- a/drivers/infiniband/hw/hfi1/msix.h
+++ b/drivers/infiniband/hw/hfi1/msix.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
- * Copyright(c) 2018 Intel Corporation.
+ * Copyright(c) 2018 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -59,7 +59,8 @@ int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd);
int msix_request_sdma_irq(struct sdma_engine *sde);
void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr);
-/* VNIC interface */
-void msix_vnic_synchronize_irq(struct hfi1_devdata *dd);
+/* Netdev interface */
+void msix_netdev_synchronize_irq(struct hfi1_devdata *dd);
+int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd);
#endif
diff --git a/drivers/infiniband/hw/hfi1/netdev.h b/drivers/infiniband/hw/hfi1/netdev.h
new file mode 100644
index 000000000000..947543a3e0c4
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/netdev.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+#ifndef HFI1_NETDEV_H
+#define HFI1_NETDEV_H
+
+#include "hfi.h"
+
+#include <linux/netdevice.h>
+#include <linux/xarray.h>
+
+/**
+ * struct hfi1_netdev_rxq - Receive Queue for HFI
+ * dummy netdev. Both IPoIB and VNIC netdevices will be working on
+ * top of this device.
+ * @napi: napi object
+ * @priv: ptr to netdev_priv
+ * @rcd: ptr to receive context data
+ */
+struct hfi1_netdev_rxq {
+ struct napi_struct napi;
+ struct hfi1_netdev_priv *priv;
+ struct hfi1_ctxtdata *rcd;
+};
+
+/*
+ * Number of netdev contexts used. Ensure it is less than or equal to
+ * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
+ */
+#define HFI1_MAX_NETDEV_CTXTS 8
+
+/* Number of NETDEV RSM entries */
+#define NUM_NETDEV_MAP_ENTRIES HFI1_MAX_NETDEV_CTXTS
+
+/**
+ * struct hfi1_netdev_priv: data required to setup and run HFI netdev.
+ * @dd: hfi1_devdata
+ * @rxq: pointer to dummy netdev receive queues.
+ * @num_rx_q: number of receive queues
+ * @rmt_index: first free index in RMT Array
+ * @msix_start: first free MSI-X interrupt vector.
+ * @dev_tbl: netdev table for unique identifier VNIC and IPoIb VLANs.
+ * @enabled: atomic counter of netdevs enabling receive queues.
+ * When 0 NAPI will be disabled.
+ * @netdevs: atomic counter of netdevs using dummy netdev.
+ * When 0 receive queues will be freed.
+ */
+struct hfi1_netdev_priv {
+ struct hfi1_devdata *dd;
+ struct hfi1_netdev_rxq *rxq;
+ int num_rx_q;
+ int rmt_start;
+ struct xarray dev_tbl;
+ /* count of enabled napi polls */
+ atomic_t enabled;
+ /* count of netdevs on top */
+ atomic_t netdevs;
+};
+
+static inline
+struct hfi1_netdev_priv *hfi1_netdev_priv(struct net_device *dev)
+{
+ return (struct hfi1_netdev_priv *)&dev[1];
+}
+
+static inline
+int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return priv->num_rx_q;
+}
+
+static inline
+struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return priv->rxq[ctxt].rcd;
+}
+
+static inline
+int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return priv->rmt_start;
+}
+
+static inline
+void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ priv->rmt_start = rmt_idx;
+}
+
+u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
+ struct cpumask *cpu_mask);
+
+void hfi1_netdev_enable_queues(struct hfi1_devdata *dd);
+void hfi1_netdev_disable_queues(struct hfi1_devdata *dd);
+int hfi1_netdev_rx_init(struct hfi1_devdata *dd);
+int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd);
+int hfi1_netdev_alloc(struct hfi1_devdata *dd);
+void hfi1_netdev_free(struct hfi1_devdata *dd);
+int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data);
+void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id);
+void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
+void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id);
+
+/* chip.c */
+int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget);
+
+#endif /* HFI1_NETDEV_H */
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
new file mode 100644
index 000000000000..63688e85e8da
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ *
+ */
+
+/*
+ * This file contains HFI1 support for netdev RX functionality
+ */
+
+#include "sdma.h"
+#include "verbs.h"
+#include "netdev.h"
+#include "hfi.h"
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <rdma/ib_verbs.h>
+
+static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv,
+ struct hfi1_ctxtdata *uctxt)
+{
+ unsigned int rcvctrl_ops;
+ struct hfi1_devdata *dd = priv->dd;
+ int ret;
+
+ uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
+ uctxt->do_interrupt = &handle_receive_interrupt_napi_sp;
+
+ /* Now allocate the RcvHdr queue and eager buffers. */
+ ret = hfi1_create_rcvhdrq(dd, uctxt);
+ if (ret)
+ goto done;
+
+ ret = hfi1_setup_eagerbufs(uctxt);
+ if (ret)
+ goto done;
+
+ clear_rcvhdrtail(uctxt);
+
+ rcvctrl_ops = HFI1_RCVCTRL_CTXT_DIS;
+ rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_DIS;
+
+ if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
+ rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
+ rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
+ if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
+ rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
+
+ hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
+done:
+ return ret;
+}
+
+static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd,
+ struct hfi1_ctxtdata **ctxt)
+{
+ struct hfi1_ctxtdata *uctxt;
+ int ret;
+
+ if (dd->flags & HFI1_FROZEN)
+ return -EIO;
+
+ ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
+ if (ret < 0) {
+ dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
+ return -ENOMEM;
+ }
+
+ uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
+ HFI1_CAP_KGET(NODROP_RHQ_FULL) |
+ HFI1_CAP_KGET(NODROP_EGR_FULL) |
+ HFI1_CAP_KGET(DMA_RTAIL);
+ /* Netdev contexts are always NO_RDMA_RTAIL */
+ uctxt->fast_handler = handle_receive_interrupt_napi_fp;
+ uctxt->slow_handler = handle_receive_interrupt_napi_sp;
+ hfi1_set_seq_cnt(uctxt, 1);
+ uctxt->is_vnic = true;
+
+ hfi1_stats.sps_ctxts++;
+
+ dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt);
+ *ctxt = uctxt;
+
+ return 0;
+}
+
+static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
+ struct hfi1_ctxtdata *uctxt)
+{
+ flush_wc();
+
+ /*
+ * Disable receive context and interrupt available, reset all
+ * RcvCtxtCtrl bits to default values.
+ */
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
+ HFI1_RCVCTRL_TIDFLOW_DIS |
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
+ HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
+ HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
+
+ if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS)
+ msix_free_irq(dd, uctxt->msix_intr);
+
+ uctxt->msix_intr = CCE_NUM_MSIX_VECTORS;
+ uctxt->event_flags = 0;
+
+ hfi1_clear_tids(uctxt);
+ hfi1_clear_ctxt_pkey(dd, uctxt);
+
+ hfi1_stats.sps_ctxts--;
+
+ hfi1_free_ctxt(uctxt);
+}
+
+static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
+ struct hfi1_ctxtdata **ctxt)
+{
+ int rc;
+ struct hfi1_devdata *dd = priv->dd;
+
+ rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
+ if (rc) {
+ dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc);
+ return rc;
+ }
+
+ rc = hfi1_netdev_setup_ctxt(priv, *ctxt);
+ if (rc) {
+ dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
+ hfi1_netdev_deallocate_ctxt(dd, *ctxt);
+ *ctxt = NULL;
+ }
+
+ return rc;
+}
+
+/**
+ * hfi1_num_netdev_contexts - Count of netdev recv contexts to use.
+ * @dd: device on which to allocate netdev contexts
+ * @available_contexts: count of available receive contexts
+ * @cpu_mask: mask of possible cpus to include for contexts
+ *
+ * Return: count of physical cores on a node or the remaining available recv
+ * contexts for netdev recv context usage up to the maximum of
+ * HFI1_MAX_NETDEV_CTXTS.
+ * A value of 0 can be returned when acceleration is explicitly turned off,
+ * a memory allocation error occurs or when there are no available contexts.
+ *
+ */
+u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
+ struct cpumask *cpu_mask)
+{
+ cpumask_var_t node_cpu_mask;
+ unsigned int available_cpus;
+
+ if (!HFI1_CAP_IS_KSET(AIP))
+ return 0;
+
+ /* Always give user contexts priority over netdev contexts */
+ if (available_contexts == 0) {
+ dd_dev_info(dd, "No receive contexts available for netdevs.\n");
+ return 0;
+ }
+
+ if (!zalloc_cpumask_var(&node_cpu_mask, GFP_KERNEL)) {
+ dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n");
+ return 0;
+ }
+
+ cpumask_and(node_cpu_mask, cpu_mask,
+ cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
+
+ available_cpus = cpumask_weight(node_cpu_mask);
+
+ free_cpumask_var(node_cpu_mask);
+
+ return min3(available_cpus, available_contexts,
+ (u32)HFI1_MAX_NETDEV_CTXTS);
+}
+
+static int hfi1_netdev_rxq_init(struct net_device *dev)
+{
+ int i;
+ int rc;
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
+ struct hfi1_devdata *dd = priv->dd;
+
+ priv->num_rx_q = dd->num_netdev_contexts;
+ priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq),
+ GFP_KERNEL, dd->node);
+
+ if (!priv->rxq) {
+ dd_dev_err(dd, "Unable to allocate netdev queue data\n");
+ return (-ENOMEM);
+ }
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd);
+ if (rc)
+ goto bail_context_irq_failure;
+
+ hfi1_rcd_get(rxq->rcd);
+ rxq->priv = priv;
+ rxq->rcd->napi = &rxq->napi;
+ dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
+ i, rxq->rcd->ctxt);
+ /*
+ * Disable BUSY_POLL on this NAPI as this is not supported
+ * right now.
+ */
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
+ netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
+ rc = msix_netdev_request_rcd_irq(rxq->rcd);
+ if (rc)
+ goto bail_context_irq_failure;
+ }
+
+ return 0;
+
+bail_context_irq_failure:
+ dd_dev_err(dd, "Unable to allot receive context\n");
+ for (; i >= 0; i--) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ if (rxq->rcd) {
+ hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
+ hfi1_rcd_put(rxq->rcd);
+ rxq->rcd = NULL;
+ }
+ }
+ kfree(priv->rxq);
+ priv->rxq = NULL;
+
+ return rc;
+}
+
+static void hfi1_netdev_rxq_deinit(struct net_device *dev)
+{
+ int i;
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
+ struct hfi1_devdata *dd = priv->dd;
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ netif_napi_del(&rxq->napi);
+ hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
+ hfi1_rcd_put(rxq->rcd);
+ rxq->rcd = NULL;
+ }
+
+ kfree(priv->rxq);
+ priv->rxq = NULL;
+ priv->num_rx_q = 0;
+}
+
+static void enable_queues(struct hfi1_netdev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i,
+ rxq->rcd->ctxt);
+ napi_enable(&rxq->napi);
+ hfi1_rcvctrl(priv->dd,
+ HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
+ rxq->rcd);
+ }
+}
+
+static void disable_queues(struct hfi1_netdev_priv *priv)
+{
+ int i;
+
+ msix_netdev_synchronize_irq(priv->dd);
+
+ for (i = 0; i < priv->num_rx_q; i++) {
+ struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+
+ dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i,
+ rxq->rcd->ctxt);
+
+ /* wait for napi if it was scheduled */
+ hfi1_rcvctrl(priv->dd,
+ HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
+ rxq->rcd);
+ napi_synchronize(&rxq->napi);
+ napi_disable(&rxq->napi);
+ }
+}
+
+/**
+ * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time,
+ * it allocates receive queue data and calls netif_napi_add
+ * for each queue.
+ *
+ * @dd: hfi1 dev data
+ */
+int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ int res;
+
+ if (atomic_fetch_inc(&priv->netdevs))
+ return 0;
+
+ mutex_lock(&hfi1_mutex);
+ init_dummy_netdev(dd->dummy_netdev);
+ res = hfi1_netdev_rxq_init(dd->dummy_netdev);
+ mutex_unlock(&hfi1_mutex);
+ return res;
+}
+
+/**
+ * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0
+ * napi is deleted and receive queses memory is freed.
+ *
+ * @dd: hfi1 dev data
+ */
+int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ /* destroy the RX queues only if it is the last netdev going away */
+ if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) {
+ mutex_lock(&hfi1_mutex);
+ hfi1_netdev_rxq_deinit(dd->dummy_netdev);
+ mutex_unlock(&hfi1_mutex);
+ }
+
+ return 0;
+}
+
+/**
+ * hfi1_netdev_alloc - Allocates netdev and private data. It is required
+ * because RMT index and MSI-X interrupt can be set only
+ * during driver initialization.
+ *
+ * @dd: hfi1 dev data
+ */
+int hfi1_netdev_alloc(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv;
+ const int netdev_size = sizeof(*dd->dummy_netdev) +
+ sizeof(struct hfi1_netdev_priv);
+
+ dd_dev_info(dd, "allocating netdev size %d\n", netdev_size);
+ dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);
+
+ if (!dd->dummy_netdev)
+ return -ENOMEM;
+
+ priv = hfi1_netdev_priv(dd->dummy_netdev);
+ priv->dd = dd;
+ xa_init(&priv->dev_tbl);
+ atomic_set(&priv->enabled, 0);
+ atomic_set(&priv->netdevs, 0);
+
+ return 0;
+}
+
+void hfi1_netdev_free(struct hfi1_devdata *dd)
+{
+ if (dd->dummy_netdev) {
+ dd_dev_info(dd, "hfi1 netdev freed\n");
+ free_netdev(dd->dummy_netdev);
+ dd->dummy_netdev = NULL;
+ }
+}
+
+/**
+ * hfi1_netdev_enable_queues - This is napi enable function.
+ * It enables napi objects associated with queues.
+ * When at least one device has called it it increments atomic counter.
+ * Disable function decrements counter and when it is 0,
+ * calls napi_disable for every queue.
+ *
+ * @dd: hfi1 dev data
+ */
+void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv;
+
+ if (!dd->dummy_netdev)
+ return;
+
+ priv = hfi1_netdev_priv(dd->dummy_netdev);
+ if (atomic_fetch_inc(&priv->enabled))
+ return;
+
+ mutex_lock(&hfi1_mutex);
+ enable_queues(priv);
+ mutex_unlock(&hfi1_mutex);
+}
+
+void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
+{
+ struct hfi1_netdev_priv *priv;
+
+ if (!dd->dummy_netdev)
+ return;
+
+ priv = hfi1_netdev_priv(dd->dummy_netdev);
+ if (atomic_dec_if_positive(&priv->enabled))
+ return;
+
+ mutex_lock(&hfi1_mutex);
+ disable_queues(priv);
+ mutex_unlock(&hfi1_mutex);
+}
+
+/**
+ * hfi1_netdev_add_data - Registers data with unique identifier
+ * to be requested later this is needed for VNIC and IPoIB VLANs
+ * implementations.
+ * This call is protected by mutex idr_lock.
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ * @data: data to be associated with index
+ */
+int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return xa_insert(&priv->dev_tbl, id, data, GFP_NOWAIT);
+}
+
+/**
+ * hfi1_netdev_remove_data - Removes data with previously given id.
+ * Returns the reference to removed entry.
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return xa_erase(&priv->dev_tbl, id);
+}
+
+/**
+ * hfi1_netdev_get_data - Gets data with given id
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+
+ return xa_load(&priv->dev_tbl, id);
+}
+
+/**
+ * hfi1_netdev_get_first_dat - Gets first entry with greater or equal id.
+ *
+ * @dd: hfi1 dev data
+ * @id: requested integer id up to INT_MAX
+ */
+void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
+{
+ struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+ unsigned long index = *start_id;
+ void *ret;
+
+ ret = xa_find(&priv->dev_tbl, &index, UINT_MAX, XA_PRESENT);
+ *start_id = (int)index;
+ return ret;
+}
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index f8e733aa3bb8..0c2ae9f7b3e8 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2019 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -186,15 +186,6 @@ static void flush_iowait(struct rvt_qp *qp)
write_sequnlock_irqrestore(lock, flags);
}
-static inline int opa_mtu_enum_to_int(int mtu)
-{
- switch (mtu) {
- case OPA_MTU_8192: return 8192;
- case OPA_MTU_10240: return 10240;
- default: return -1;
- }
-}
-
/**
* This function is what we would push to the core layer if we wanted to be a
* "first class citizen". Instead we hide this here and rely on Verbs ULPs
@@ -202,15 +193,10 @@ static inline int opa_mtu_enum_to_int(int mtu)
*/
static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{
- int val;
-
/* Constraining 10KB packets to 8KB packets */
if (mtu == (enum ib_mtu)OPA_MTU_10240)
mtu = OPA_MTU_8192;
- val = opa_mtu_enum_to_int((int)mtu);
- if (val > 0)
- return val;
- return ib_mtu_enum_to_int(mtu);
+ return opa_mtu_enum_to_int((enum opa_mtu)mtu);
}
int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 8a2e0d9351e9..243b4ba0b6f6 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * Copyright(c) 2018 Intel Corporation.
+ * Copyright(c) 2018 - 2020 Intel Corporation.
*
*/
@@ -194,7 +194,7 @@ void tid_rdma_opfn_init(struct rvt_qp *qp, struct tid_rdma_params *p)
{
struct hfi1_qp_priv *priv = qp->priv;
- p->qp = (kdeth_qp << 16) | priv->rcd->ctxt;
+ p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt;
p->max_len = TID_RDMA_MAX_SEGMENT_SIZE;
p->jkey = priv->rcd->jkey;
p->max_read = TID_RDMA_MAX_READ_SEGS_PER_REQ;
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 9a3d236bcc88..b219ea90fd6f 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -47,6 +47,7 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "exp_rcv.h"
+#include "ipoib.h"
static u8 __get_ib_hdr_len(struct ib_header *hdr)
{
@@ -126,6 +127,7 @@ const char *hfi1_trace_get_packet_l2_str(u8 l2)
#define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
#define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
#define DETH_PRN "deth qkey:0x%.8x sqpn:0x%.6x"
+#define DETH_ENTROPY_PRN "deth qkey:0x%.8x sqpn:0x%.6x entropy:0x%.2x"
#define IETH_PRN "ieth rkey:0x%.8x"
#define ATOMICACKETH_PRN "origdata:%llx"
#define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx"
@@ -444,6 +446,12 @@ const char *parse_everbs_hdrs(
break;
/* deth */
case OP(UD, SEND_ONLY):
+ trace_seq_printf(p, DETH_ENTROPY_PRN,
+ be32_to_cpu(eh->ud.deth[0]),
+ be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK,
+ be32_to_cpu(eh->ud.deth[1]) >>
+ HFI1_IPOIB_ENTROPY_SHIFT);
+ break;
case OP(UD, SEND_ONLY_WITH_IMMEDIATE):
trace_seq_printf(p, DETH_PRN,
be32_to_cpu(eh->ud.deth[0]),
@@ -512,6 +520,38 @@ u16 hfi1_trace_get_tid_idx(u32 ent)
return EXP_TID_GET(ent, IDX);
}
+struct hfi1_ctxt_hist {
+ atomic_t count;
+ atomic_t data[255];
+};
+
+struct hfi1_ctxt_hist hist = {
+ .count = ATOMIC_INIT(0)
+};
+
+const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt)
+{
+ int i, len = ARRAY_SIZE(hist.data);
+ const char *ret = trace_seq_buffer_ptr(p);
+ unsigned long packet_count = atomic_fetch_inc(&hist.count);
+
+ trace_seq_printf(p, "packet[%lu]", packet_count);
+ for (i = 0; i < len; ++i) {
+ unsigned long val;
+ atomic_t *count = &hist.data[i];
+
+ if (ctxt == i)
+ val = atomic_fetch_inc(count);
+ else
+ val = atomic_read(count);
+
+ if (val)
+ trace_seq_printf(p, "(%d:%lu)", i, val);
+ }
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
__hfi1_trace_fn(AFFINITY);
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h
index b5fc5c6cd52f..d8c168dc3ea8 100644
--- a/drivers/infiniband/hw/hfi1/trace_ctxts.h
+++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h
@@ -1,5 +1,5 @@
/*
-* Copyright(c) 2015, 2016 Intel Corporation.
+* Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -138,6 +138,15 @@ TRACE_EVENT(hfi1_ctxt_info,
)
);
+const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt);
+TRACE_EVENT(ctxt_rsm_hist,
+ TP_PROTO(unsigned int ctxt),
+ TP_ARGS(ctxt),
+ TP_STRUCT__entry(__field(unsigned int, ctxt)),
+ TP_fast_assign(__entry->ctxt = ctxt;),
+ TP_printk("%s", hfi1_trace_print_rsm_hist(p, __entry->ctxt))
+);
+
#endif /* __HFI1_TRACE_CTXTS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 2f6323ad9c59..30865635b449 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -66,6 +66,7 @@
#include "vnic.h"
#include "fault.h"
#include "affinity.h"
+#include "ipoib.h"
static unsigned int hfi1_lkey_table_size = 16;
module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
@@ -1342,7 +1343,7 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE |
IB_DEVICE_MEM_MGT_EXTENSIONS |
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC;
+ IB_DEVICE_RDMA_NETDEV_OPA;
rdi->dparms.props.page_size_cap = PAGE_SIZE;
rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
rdi->dparms.props.vendor_part_id = dd->pcidev->device;
@@ -1360,7 +1361,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
rdi->dparms.props.max_cq = hfi1_max_cqs;
rdi->dparms.props.max_ah = hfi1_max_ahs;
rdi->dparms.props.max_cqe = hfi1_max_cqes;
- rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_pd = hfi1_max_pds;
rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
rdi->dparms.props.max_qp_init_rd_atom = 255;
@@ -1439,6 +1439,8 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
4096 : hfi1_max_mtu), IB_MTU_4096);
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
+ props->phys_mtu = HFI1_CAP_IS_KSET(AIP) ? hfi1_max_mtu :
+ ib_mtu_enum_to_int(props->max_mtu);
return 0;
}
@@ -1793,6 +1795,7 @@ static const struct ib_device_ops hfi1_dev_ops = {
.modify_device = modify_device,
/* keep process mad in the driver */
.process_mad = hfi1_process_mad,
+ .rdma_netdev_get_params = hfi1_ipoib_rn_get_params,
};
/**
@@ -1863,9 +1866,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.dparms.qpn_start = 0;
dd->verbs_dev.rdi.dparms.qpn_inc = 1;
dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
- dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
- dd->verbs_dev.rdi.dparms.qpn_res_end =
- dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
+ dd->verbs_dev.rdi.dparms.qpn_res_start = RVT_KDETH_QP_BASE;
+ dd->verbs_dev.rdi.dparms.qpn_res_end = RVT_AIP_QP_MAX;
dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h
index 5ae781514e32..66150a13f374 100644
--- a/drivers/infiniband/hw/hfi1/vnic.h
+++ b/drivers/infiniband/hw/hfi1/vnic.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_VNIC_H
#define _HFI1_VNIC_H
/*
- * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -69,6 +69,7 @@
#define HFI1_VNIC_SC_SHIFT 4
#define HFI1_VNIC_MAX_QUEUE 16
+#define HFI1_NUM_VNIC_CTXT 8
/**
* struct hfi1_vnic_sdma - VNIC per Tx ring SDMA information
@@ -104,7 +105,6 @@ struct hfi1_vnic_rx_queue {
struct hfi1_vnic_vport_info *vinfo;
struct net_device *netdev;
struct napi_struct napi;
- struct sk_buff_head skbq;
};
/**
@@ -146,7 +146,6 @@ struct hfi1_vnic_vport_info {
/* vnic hfi1 internal functions */
void hfi1_vnic_setup(struct hfi1_devdata *dd);
-void hfi1_vnic_cleanup(struct hfi1_devdata *dd);
int hfi1_vnic_txreq_init(struct hfi1_devdata *dd);
void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index 6b14581b9965..a90824de0f57 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2017 - 2018 Intel Corporation.
+ * Copyright(c) 2017 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -53,6 +53,7 @@
#include <linux/if_vlan.h>
#include "vnic.h"
+#include "netdev.h"
#define HFI_TX_TIMEOUT_MS 1000
@@ -62,114 +63,6 @@
static DEFINE_SPINLOCK(vport_cntr_lock);
-static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
-{
- unsigned int rcvctrl_ops = 0;
- int ret;
-
- uctxt->do_interrupt = &handle_receive_interrupt;
-
- /* Now allocate the RcvHdr queue and eager buffers. */
- ret = hfi1_create_rcvhdrq(dd, uctxt);
- if (ret)
- goto done;
-
- ret = hfi1_setup_eagerbufs(uctxt);
- if (ret)
- goto done;
-
- if (hfi1_rcvhdrtail_kvaddr(uctxt))
- clear_rcvhdrtail(uctxt);
-
- rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
- rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_ENB;
-
- if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
- rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
- rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
- rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
- if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
- rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
-
- hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
-done:
- return ret;
-}
-
-static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
- struct hfi1_ctxtdata **vnic_ctxt)
-{
- struct hfi1_ctxtdata *uctxt;
- int ret;
-
- if (dd->flags & HFI1_FROZEN)
- return -EIO;
-
- ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
- if (ret < 0) {
- dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
- return -ENOMEM;
- }
-
- uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
- HFI1_CAP_KGET(NODROP_RHQ_FULL) |
- HFI1_CAP_KGET(NODROP_EGR_FULL) |
- HFI1_CAP_KGET(DMA_RTAIL);
- uctxt->seq_cnt = 1;
- uctxt->is_vnic = true;
-
- msix_request_rcd_irq(uctxt);
-
- hfi1_stats.sps_ctxts++;
- dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
- *vnic_ctxt = uctxt;
-
- return 0;
-}
-
-static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
- struct hfi1_ctxtdata *uctxt)
-{
- dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt);
- flush_wc();
-
- /*
- * Disable receive context and interrupt available, reset all
- * RcvCtxtCtrl bits to default values.
- */
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
- HFI1_RCVCTRL_TIDFLOW_DIS |
- HFI1_RCVCTRL_INTRAVAIL_DIS |
- HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
- HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
- HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
-
- /* msix_intr will always be > 0, only clean up if this is true */
- if (uctxt->msix_intr)
- msix_free_irq(dd, uctxt->msix_intr);
-
- uctxt->event_flags = 0;
-
- hfi1_clear_tids(uctxt);
- hfi1_clear_ctxt_pkey(dd, uctxt);
-
- hfi1_stats.sps_ctxts--;
-
- hfi1_free_ctxt(uctxt);
-}
-
-void hfi1_vnic_setup(struct hfi1_devdata *dd)
-{
- xa_init(&dd->vnic.vesws);
-}
-
-void hfi1_vnic_cleanup(struct hfi1_devdata *dd)
-{
- WARN_ON(!xa_empty(&dd->vnic.vesws));
-}
-
#define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \
u64 *src64, *dst64; \
for (src64 = &qstats->x_grp.unicast, \
@@ -179,6 +72,9 @@ void hfi1_vnic_cleanup(struct hfi1_devdata *dd)
} \
} while (0)
+#define VNIC_MASK (0xFF)
+#define VNIC_ID(val) ((1ull << 24) | ((val) & VNIC_MASK))
+
/* hfi1_vnic_update_stats - update statistics */
static void hfi1_vnic_update_stats(struct hfi1_vnic_vport_info *vinfo,
struct opa_vnic_stats *stats)
@@ -454,71 +350,25 @@ static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq,
return rc;
}
-static inline struct sk_buff *hfi1_vnic_get_skb(struct hfi1_vnic_rx_queue *rxq)
+static struct hfi1_vnic_vport_info *get_vnic_port(struct hfi1_devdata *dd,
+ int vesw_id)
{
- unsigned char *pad_info;
- struct sk_buff *skb;
+ int vnic_id = VNIC_ID(vesw_id);
- skb = skb_dequeue(&rxq->skbq);
- if (unlikely(!skb))
- return NULL;
-
- /* remove tail padding and icrc */
- pad_info = skb->data + skb->len - 1;
- skb_trim(skb, (skb->len - OPA_VNIC_ICRC_TAIL_LEN -
- ((*pad_info) & 0x7)));
-
- return skb;
+ return hfi1_netdev_get_data(dd, vnic_id);
}
-/* hfi1_vnic_handle_rx - handle skb receive */
-static void hfi1_vnic_handle_rx(struct hfi1_vnic_rx_queue *rxq,
- int *work_done, int work_to_do)
+static struct hfi1_vnic_vport_info *get_first_vnic_port(struct hfi1_devdata *dd)
{
- struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
- struct sk_buff *skb;
- int rc;
-
- while (1) {
- if (*work_done >= work_to_do)
- break;
-
- skb = hfi1_vnic_get_skb(rxq);
- if (unlikely(!skb))
- break;
-
- rc = hfi1_vnic_decap_skb(rxq, skb);
- /* update rx counters */
- hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
- if (unlikely(rc)) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- skb_checksum_none_assert(skb);
- skb->protocol = eth_type_trans(skb, rxq->netdev);
-
- napi_gro_receive(&rxq->napi, skb);
- (*work_done)++;
- }
-}
-
-/* hfi1_vnic_napi - napi receive polling callback function */
-static int hfi1_vnic_napi(struct napi_struct *napi, int budget)
-{
- struct hfi1_vnic_rx_queue *rxq = container_of(napi,
- struct hfi1_vnic_rx_queue, napi);
- struct hfi1_vnic_vport_info *vinfo = rxq->vinfo;
- int work_done = 0;
+ struct hfi1_vnic_vport_info *vinfo;
+ int next_id = VNIC_ID(0);
- v_dbg("napi %d budget %d\n", rxq->idx, budget);
- hfi1_vnic_handle_rx(rxq, &work_done, budget);
+ vinfo = hfi1_netdev_get_first_data(dd, &next_id);
- v_dbg("napi %d work_done %d\n", rxq->idx, work_done);
- if (work_done < budget)
- napi_complete(napi);
+ if (next_id > VNIC_ID(VNIC_MASK))
+ return NULL;
- return work_done;
+ return vinfo;
}
void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
@@ -527,13 +377,14 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
struct hfi1_vnic_vport_info *vinfo = NULL;
struct hfi1_vnic_rx_queue *rxq;
struct sk_buff *skb;
- int l4_type, vesw_id = -1;
+ int l4_type, vesw_id = -1, rc;
u8 q_idx;
+ unsigned char *pad_info;
l4_type = hfi1_16B_get_l4(packet->ebuf);
if (likely(l4_type == OPA_16B_L4_ETHR)) {
vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf);
- vinfo = xa_load(&dd->vnic.vesws, vesw_id);
+ vinfo = get_vnic_port(dd, vesw_id);
/*
* In case of invalid vesw id, count the error on
@@ -541,10 +392,8 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
*/
if (unlikely(!vinfo)) {
struct hfi1_vnic_vport_info *vinfo_tmp;
- unsigned long index = 0;
- vinfo_tmp = xa_find(&dd->vnic.vesws, &index, ULONG_MAX,
- XA_PRESENT);
+ vinfo_tmp = get_first_vnic_port(dd);
if (vinfo_tmp) {
spin_lock(&vport_cntr_lock);
vinfo_tmp->stats[0].netstats.rx_nohandler++;
@@ -563,12 +412,6 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
rxq = &vinfo->rxq[q_idx];
if (unlikely(!netif_oper_up(vinfo->netdev))) {
vinfo->stats[q_idx].rx_drop_state++;
- skb_queue_purge(&rxq->skbq);
- return;
- }
-
- if (unlikely(skb_queue_len(&rxq->skbq) > HFI1_VNIC_RCV_Q_SIZE)) {
- vinfo->stats[q_idx].netstats.rx_fifo_errors++;
return;
}
@@ -580,62 +423,65 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
memcpy(skb->data, packet->ebuf, packet->tlen);
skb_put(skb, packet->tlen);
- skb_queue_tail(&rxq->skbq, skb);
- if (napi_schedule_prep(&rxq->napi)) {
- v_dbg("napi %d scheduling\n", q_idx);
- __napi_schedule(&rxq->napi);
+ pad_info = skb->data + skb->len - 1;
+ skb_trim(skb, (skb->len - OPA_VNIC_ICRC_TAIL_LEN -
+ ((*pad_info) & 0x7)));
+
+ rc = hfi1_vnic_decap_skb(rxq, skb);
+
+ /* update rx counters */
+ hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc);
+ if (unlikely(rc)) {
+ dev_kfree_skb_any(skb);
+ return;
}
+
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, rxq->netdev);
+
+ napi_gro_receive(&rxq->napi, skb);
}
static int hfi1_vnic_up(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
struct net_device *netdev = vinfo->netdev;
- int i, rc;
+ int rc;
/* ensure virtual eth switch id is valid */
if (!vinfo->vesw_id)
return -EINVAL;
- rc = xa_insert(&dd->vnic.vesws, vinfo->vesw_id, vinfo, GFP_KERNEL);
+ rc = hfi1_netdev_add_data(dd, VNIC_ID(vinfo->vesw_id), vinfo);
if (rc < 0)
return rc;
- for (i = 0; i < vinfo->num_rx_q; i++) {
- struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
-
- skb_queue_head_init(&rxq->skbq);
- napi_enable(&rxq->napi);
- }
+ rc = hfi1_netdev_rx_init(dd);
+ if (rc)
+ goto err_remove;
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
set_bit(HFI1_VNIC_UP, &vinfo->flags);
return 0;
+
+err_remove:
+ hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id));
+ return rc;
}
static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
- u8 i;
clear_bit(HFI1_VNIC_UP, &vinfo->flags);
netif_carrier_off(vinfo->netdev);
netif_tx_disable(vinfo->netdev);
- xa_erase(&dd->vnic.vesws, vinfo->vesw_id);
-
- /* ensure irqs see the change */
- msix_vnic_synchronize_irq(dd);
+ hfi1_netdev_remove_data(dd, VNIC_ID(vinfo->vesw_id));
- /* remove unread skbs */
- for (i = 0; i < vinfo->num_rx_q; i++) {
- struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i];
-
- napi_disable(&rxq->napi);
- skb_queue_purge(&rxq->skbq);
- }
+ hfi1_netdev_rx_destroy(dd);
}
static int hfi1_netdev_open(struct net_device *netdev)
@@ -660,70 +506,31 @@ static int hfi1_netdev_close(struct net_device *netdev)
return 0;
}
-static int hfi1_vnic_allot_ctxt(struct hfi1_devdata *dd,
- struct hfi1_ctxtdata **vnic_ctxt)
-{
- int rc;
-
- rc = allocate_vnic_ctxt(dd, vnic_ctxt);
- if (rc) {
- dd_dev_err(dd, "vnic ctxt alloc failed %d\n", rc);
- return rc;
- }
-
- rc = setup_vnic_ctxt(dd, *vnic_ctxt);
- if (rc) {
- dd_dev_err(dd, "vnic ctxt setup failed %d\n", rc);
- deallocate_vnic_ctxt(dd, *vnic_ctxt);
- *vnic_ctxt = NULL;
- }
-
- return rc;
-}
-
static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
- int i, rc = 0;
+ int rc = 0;
mutex_lock(&hfi1_mutex);
- if (!dd->vnic.num_vports) {
+ if (!dd->vnic_num_vports) {
rc = hfi1_vnic_txreq_init(dd);
if (rc)
goto txreq_fail;
}
- for (i = dd->vnic.num_ctxt; i < vinfo->num_rx_q; i++) {
- rc = hfi1_vnic_allot_ctxt(dd, &dd->vnic.ctxt[i]);
- if (rc)
- break;
- hfi1_rcd_get(dd->vnic.ctxt[i]);
- dd->vnic.ctxt[i]->vnic_q_idx = i;
- }
-
- if (i < vinfo->num_rx_q) {
- /*
- * If required amount of contexts is not
- * allocated successfully then remaining contexts
- * are released.
- */
- while (i-- > dd->vnic.num_ctxt) {
- deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]);
- hfi1_rcd_put(dd->vnic.ctxt[i]);
- dd->vnic.ctxt[i] = NULL;
- }
+ rc = hfi1_netdev_rx_init(dd);
+ if (rc) {
+ dd_dev_err(dd, "Unable to initialize netdev contexts\n");
goto alloc_fail;
}
- if (dd->vnic.num_ctxt != i) {
- dd->vnic.num_ctxt = i;
- hfi1_init_vnic_rsm(dd);
- }
+ hfi1_init_vnic_rsm(dd);
- dd->vnic.num_vports++;
+ dd->vnic_num_vports++;
hfi1_vnic_sdma_init(vinfo);
+
alloc_fail:
- if (!dd->vnic.num_vports)
+ if (!dd->vnic_num_vports)
hfi1_vnic_txreq_deinit(dd);
txreq_fail:
mutex_unlock(&hfi1_mutex);
@@ -733,20 +540,14 @@ txreq_fail:
static void hfi1_vnic_deinit(struct hfi1_vnic_vport_info *vinfo)
{
struct hfi1_devdata *dd = vinfo->dd;
- int i;
mutex_lock(&hfi1_mutex);
- if (--dd->vnic.num_vports == 0) {
- for (i = 0; i < dd->vnic.num_ctxt; i++) {
- deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]);
- hfi1_rcd_put(dd->vnic.ctxt[i]);
- dd->vnic.ctxt[i] = NULL;
- }
+ if (--dd->vnic_num_vports == 0) {
hfi1_deinit_vnic_rsm(dd);
- dd->vnic.num_ctxt = 0;
hfi1_vnic_txreq_deinit(dd);
}
mutex_unlock(&hfi1_mutex);
+ hfi1_netdev_rx_destroy(dd);
}
static void hfi1_vnic_set_vesw_id(struct net_device *netdev, int id)
@@ -804,7 +605,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
struct rdma_netdev *rn;
int i, size, rc;
- if (!dd->num_vnic_contexts)
+ if (!dd->num_netdev_contexts)
return ERR_PTR(-ENOMEM);
if (!port_num || (port_num > dd->num_pports))
@@ -815,15 +616,16 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
- dd->num_sdma, dd->num_vnic_contexts);
+ chip_sdma_engines(dd),
+ dd->num_netdev_contexts);
if (!netdev)
return ERR_PTR(-ENOMEM);
rn = netdev_priv(netdev);
vinfo = opa_vnic_dev_priv(netdev);
vinfo->dd = dd;
- vinfo->num_tx_q = dd->num_sdma;
- vinfo->num_rx_q = dd->num_vnic_contexts;
+ vinfo->num_tx_q = chip_sdma_engines(dd);
+ vinfo->num_rx_q = dd->num_netdev_contexts;
vinfo->netdev = netdev;
rn->free_rdma_netdev = hfi1_vnic_free_rn;
rn->set_id = hfi1_vnic_set_vesw_id;
@@ -841,7 +643,6 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
rxq->idx = i;
rxq->vinfo = vinfo;
rxq->netdev = netdev;
- netif_napi_add(netdev, &rxq->napi, hfi1_vnic_napi, 64);
}
rc = hfi1_vnic_init(vinfo);
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 8a522e14ef62..5b2f9314edd3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -39,13 +39,14 @@
#define HNS_ROCE_VLAN_SL_BIT_MASK 7
#define HNS_ROCE_VLAN_SL_SHIFT 13
-int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
const struct ib_gid_attr *gid_attr;
struct device *dev = hr_dev->dev;
struct hns_roce_ah *ah = to_hr_ah(ibah);
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
u16 vlan_id = 0xffff;
bool vlan_en = false;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index da574c26e063..a522cb2d29ea 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -157,84 +157,78 @@ void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
kfree(bitmap->table);
}
-void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
- struct hns_roce_buf *buf)
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
{
- int i;
struct device *dev = hr_dev->dev;
+ u32 size = buf->size;
+ int i;
+
+ if (size == 0)
+ return;
- if (buf->nbufs == 1) {
+ buf->size = 0;
+
+ if (hns_roce_buf_is_direct(buf)) {
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
} else {
- for (i = 0; i < buf->nbufs; ++i)
+ for (i = 0; i < buf->npages; ++i)
if (buf->page_list[i].buf)
dma_free_coherent(dev, 1 << buf->page_shift,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
+ buf->page_list = NULL;
}
}
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
struct hns_roce_buf *buf, u32 page_shift)
{
- int i = 0;
- dma_addr_t t;
+ struct hns_roce_buf_list *buf_list;
struct device *dev = hr_dev->dev;
- u32 page_size = 1 << page_shift;
- u32 order;
+ u32 page_size;
+ int i;
- /* SQ/RQ buf lease than one page, SQ + RQ = 8K */
+ /* The minimum shift of the page accessed by hw is HNS_HW_PAGE_SHIFT */
+ buf->page_shift = max_t(int, HNS_HW_PAGE_SHIFT, page_shift);
+
+ page_size = 1 << buf->page_shift;
+ buf->npages = DIV_ROUND_UP(size, page_size);
+
+ /* required size is not bigger than one trunk size */
if (size <= max_direct) {
- buf->nbufs = 1;
- /* Npages calculated by page_size */
- order = get_order(size);
- if (order <= page_shift - PAGE_SHIFT)
- order = 0;
- else
- order -= page_shift - PAGE_SHIFT;
- buf->npages = 1 << order;
- buf->page_shift = page_shift;
- /* MTT PA must be recorded in 4k alignment, t is 4k aligned */
- buf->direct.buf = dma_alloc_coherent(dev, size, &t,
+ buf->page_list = NULL;
+ buf->direct.buf = dma_alloc_coherent(dev, size,
+ &buf->direct.map,
GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
-
- buf->direct.map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
} else {
- buf->nbufs = (size + page_size - 1) / page_size;
- buf->npages = buf->nbufs;
- buf->page_shift = page_shift;
- buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- GFP_KERNEL);
-
- if (!buf->page_list)
+ buf_list = kcalloc(buf->npages, sizeof(*buf_list), GFP_KERNEL);
+ if (!buf_list)
return -ENOMEM;
- for (i = 0; i < buf->nbufs; ++i) {
- buf->page_list[i].buf = dma_alloc_coherent(dev,
- page_size,
- &t,
- GFP_KERNEL);
-
- if (!buf->page_list[i].buf)
- goto err_free;
+ for (i = 0; i < buf->npages; i++) {
+ buf_list[i].buf = dma_alloc_coherent(dev, page_size,
+ &buf_list[i].map,
+ GFP_KERNEL);
+ if (!buf_list[i].buf)
+ break;
+ }
- buf->page_list[i].map = t;
+ if (i != buf->npages && i > 0) {
+ while (i-- > 0)
+ dma_free_coherent(dev, page_size,
+ buf_list[i].buf,
+ buf_list[i].map);
+ kfree(buf_list);
+ return -ENOMEM;
}
+ buf->page_list = buf_list;
}
+ buf->size = size;
return 0;
-
-err_free:
- hns_roce_buf_free(hr_dev, size, buf);
- return -ENOMEM;
}
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
@@ -246,33 +240,30 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
end = start + buf_cnt;
if (end > buf->npages) {
dev_err(hr_dev->dev,
- "invalid kmem region,offset %d,buf_cnt %d,total %d!\n",
+ "Failed to check kmem bufs, end %d + %d total %d!\n",
start, buf_cnt, buf->npages);
return -EINVAL;
}
total = 0;
for (i = start; i < end; i++)
- if (buf->nbufs == 1)
- bufs[total++] = buf->direct.map +
- ((dma_addr_t)i << buf->page_shift);
- else
- bufs[total++] = buf->page_list[i].map;
+ bufs[total++] = hns_roce_buf_page(buf, i);
return total;
}
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct ib_umem *umem,
- int page_shift)
+ unsigned int page_shift)
{
struct ib_block_iter biter;
int total = 0;
int idx = 0;
u64 addr;
- if (page_shift < PAGE_SHIFT) {
- dev_err(hr_dev->dev, "invalid page shift %d!\n", page_shift);
+ if (page_shift < HNS_HW_PAGE_SHIFT) {
+ dev_err(hr_dev->dev, "Failed to check umem page shift %d!\n",
+ page_shift);
return -EINVAL;
}
@@ -292,49 +283,6 @@ done:
return total;
}
-void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum,
- int offset, int buf_cnt)
-{
- if (hopnum == HNS_ROCE_HOP_NUM_0)
- region->hopnum = 0;
- else
- region->hopnum = hopnum;
-
- region->offset = offset;
- region->count = buf_cnt;
-}
-
-void hns_roce_free_buf_list(dma_addr_t **bufs, int region_cnt)
-{
- int i;
-
- for (i = 0; i < region_cnt; i++) {
- kfree(bufs[i]);
- bufs[i] = NULL;
- }
-}
-
-int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions,
- dma_addr_t **bufs, int region_cnt)
-{
- struct hns_roce_buf_region *r;
- int i;
-
- for (i = 0; i < region_cnt; i++) {
- r = &regions[i];
- bufs[i] = kcalloc(r->count, sizeof(dma_addr_t), GFP_KERNEL);
- if (!bufs[i])
- goto err_alloc;
- }
-
- return 0;
-
-err_alloc:
- hns_roce_free_buf_list(bufs, i);
-
- return -ENOMEM;
-}
-
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 8e95a1aa1b4f..f5669ff8cfeb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -33,10 +33,6 @@
#ifndef _HNS_ROCE_COMMON_H
#define _HNS_ROCE_COMMON_H
-#ifndef assert
-#define assert(cond)
-#endif
-
#define roce_write(dev, reg, val) writel((val), (dev)->reg_base + (reg))
#define roce_read(dev, reg) readl((dev)->reg_base + (reg))
#define roce_raw_write(value, addr) \
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 5bfb52ffd590..e87d616f7988 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -39,51 +39,40 @@
#include <rdma/hns-abi.h>
#include "hns_roce_common.h"
-static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq)
+static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cmd_mailbox *mailbox;
- struct hns_roce_hem_table *mtt_table;
struct hns_roce_cq_table *cq_table;
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 mtts[MTT_MIN_COUNT] = { 0 };
dma_addr_t dma_handle;
- u64 *mtts;
int ret;
- cq_table = &hr_dev->cq_table;
-
- /* Get the physical address of cq buf */
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- mtt_table = &hr_dev->mr_table.mtt_cqe_table;
- else
- mtt_table = &hr_dev->mr_table.mtt_table;
-
- mtts = hns_roce_table_find(hr_dev, mtt_table, hr_cq->mtt.first_seg,
- &dma_handle);
-
- if (!mtts) {
- dev_err(dev, "Failed to find mtt for CQ buf.\n");
+ ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
+ &dma_handle);
+ if (ret < 1) {
+ ibdev_err(ibdev, "Failed to find CQ mtr\n");
return -EINVAL;
}
+ cq_table = &hr_dev->cq_table;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
if (ret) {
- dev_err(dev, "Num of CQ out of range.\n");
+ ibdev_err(ibdev, "Failed to alloc CQ bitmap, err %d\n", ret);
return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) {
- dev_err(dev,
- "Get context mem failed(%d) when CQ(0x%lx) alloc.\n",
- ret, hr_cq->cqn);
+ ibdev_err(ibdev, "Failed to get CQ(0x%lx) context, err %d\n",
+ hr_cq->cqn, ret);
goto err_out;
}
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
- dev_err(dev, "Failed to xa_store CQ.\n");
+ ibdev_err(ibdev, "Failed to xa_store CQ\n");
goto err_put;
}
@@ -101,9 +90,9 @@ static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) {
- dev_err(dev,
- "Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n",
- ret, hr_cq->cqn);
+ ibdev_err(ibdev,
+ "Failed to send create cmd for CQ(0x%lx), err %d\n",
+ hr_cq->cqn, ret);
goto err_xa;
}
@@ -126,7 +115,7 @@ err_out:
return ret;
}
-void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev;
@@ -153,190 +142,86 @@ void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
-static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
- struct hns_roce_ib_create_cq ucmd,
- struct ib_udata *udata)
-{
- struct hns_roce_buf *buf = &hr_cq->buf;
- struct hns_roce_mtt *mtt = &hr_cq->mtt;
- struct ib_umem **umem = &hr_cq->umem;
- u32 npages;
- int ret;
-
- *umem = ib_umem_get(&hr_dev->ib_dev, ucmd.buf_addr, buf->size,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(*umem))
- return PTR_ERR(*umem);
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- mtt->mtt_type = MTT_TYPE_CQE;
- else
- mtt->mtt_type = MTT_TYPE_WQE;
-
- npages = DIV_ROUND_UP(ib_umem_page_count(*umem),
- 1 << hr_dev->caps.cqe_buf_pg_sz);
- ret = hns_roce_mtt_init(hr_dev, npages, buf->page_shift, mtt);
- if (ret)
- goto err_buf;
-
- ret = hns_roce_ib_umem_write_mtt(hr_dev, mtt, *umem);
- if (ret)
- goto err_mtt;
-
- return 0;
-
-err_mtt:
- hns_roce_mtt_cleanup(hr_dev, mtt);
-
-err_buf:
- ib_umem_release(*umem);
- return ret;
-}
-
-static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata, unsigned long addr)
{
- struct hns_roce_buf *buf = &hr_cq->buf;
- struct hns_roce_mtt *mtt = &hr_cq->mtt;
- int ret;
-
- ret = hns_roce_buf_alloc(hr_dev, buf->size, (1 << buf->page_shift) * 2,
- buf, buf->page_shift);
- if (ret)
- goto out;
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- mtt->mtt_type = MTT_TYPE_CQE;
- else
- mtt->mtt_type = MTT_TYPE_WQE;
-
- ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, mtt);
- if (ret)
- goto err_buf;
-
- ret = hns_roce_buf_write_mtt(hr_dev, mtt, buf);
- if (ret)
- goto err_mtt;
-
- return 0;
-
-err_mtt:
- hns_roce_mtt_cleanup(hr_dev, mtt);
-
-err_buf:
- hns_roce_buf_free(hr_dev, buf->size, buf);
-
-out:
- return ret;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
+ buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+
+ err = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
+ hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ udata, addr);
+ if (err)
+ ibdev_err(ibdev, "Failed to alloc CQ mtr, err %d\n", err);
+
+ return err;
}
static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
- hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
+ hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
}
-static int create_user_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq,
- struct ib_udata *udata,
- struct hns_roce_ib_create_cq_resp *resp)
+static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata, unsigned long addr,
+ struct hns_roce_ib_create_cq_resp *resp)
{
- struct hns_roce_ib_create_cq ucmd;
- struct device *dev = hr_dev->dev;
- int ret;
- struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
- udata, struct hns_roce_ucontext, ibucontext);
-
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
- dev_err(dev, "Failed to copy_from_udata.\n");
- return -EFAULT;
- }
+ bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB;
+ struct hns_roce_ucontext *uctx;
+ int err;
- /* Get user space address, write it into mtt table */
- ret = get_cq_umem(hr_dev, hr_cq, ucmd, udata);
- if (ret) {
- dev_err(dev, "Failed to get_cq_umem.\n");
- return ret;
- }
-
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB &&
- udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
- ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
- &hr_cq->db);
- if (ret) {
- dev_err(dev, "cq record doorbell map failed!\n");
- goto err_mtt;
+ if (udata) {
+ if (has_db &&
+ udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ err = hns_roce_db_map_user(uctx, udata, addr,
+ &hr_cq->db);
+ if (err)
+ return err;
+ hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
+ resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
}
- hr_cq->db_en = 1;
- resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
- }
-
- return 0;
-
-err_mtt:
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- ib_umem_release(hr_cq->umem);
-
- return ret;
-}
-
-static int create_kernel_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq)
-{
- struct device *dev = hr_dev->dev;
- int ret;
-
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
- ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
- if (ret)
- return ret;
-
- hr_cq->set_ci_db = hr_cq->db.db_record;
- *hr_cq->set_ci_db = 0;
- hr_cq->db_en = 1;
- }
-
- /* Init mtt table and write buff address to mtt table */
- ret = alloc_cq_buf(hr_dev, hr_cq);
- if (ret) {
- dev_err(dev, "Failed to alloc_cq_buf.\n");
- goto err_db;
+ } else {
+ if (has_db) {
+ err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
+ if (err)
+ return err;
+ hr_cq->set_ci_db = hr_cq->db.db_record;
+ *hr_cq->set_ci_db = 0;
+ hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
+ }
+ hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
}
- hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
-
return 0;
-
-err_db:
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
- hns_roce_free_db(hr_dev, &hr_cq->db);
-
- return ret;
}
-static void destroy_user_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq,
- struct ib_udata *udata,
- struct hns_roce_ib_create_cq_resp *resp)
+static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata)
{
- struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
- udata, struct hns_roce_ucontext, ibucontext);
+ struct hns_roce_ucontext *uctx;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB &&
- udata->outlen >= offsetofend(typeof(*resp), cap_flags))
- hns_roce_db_unmap_user(context, &hr_cq->db);
-
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- ib_umem_release(hr_cq->umem);
-}
-
-static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq)
-{
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- free_cq_buf(hr_dev, hr_cq);
+ if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB))
+ return;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
+ hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB;
+ if (udata) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext,
+ ibucontext);
+ hns_roce_db_unmap_user(uctx, &hr_cq->db);
+ } else {
hns_roce_free_db(hr_dev, &hr_cq->db);
+ }
}
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
@@ -345,20 +230,21 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- struct device *dev = hr_dev->dev;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_cq ucmd = {};
int vector = attr->comp_vector;
u32 cq_entries = attr->cqe;
int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
- dev_err(dev, "Create CQ failed. entries=%d, max=%d\n",
- cq_entries, hr_dev->caps.max_cqes);
+ ibdev_err(ibdev, "Failed to check CQ count %d max=%d\n",
+ cq_entries, hr_dev->caps.max_cqes);
return -EINVAL;
}
if (vector >= hr_dev->caps.num_comp_vectors) {
- dev_err(dev, "Create CQ failed, vector=%d, max=%d\n",
- vector, hr_dev->caps.num_comp_vectors);
+ ibdev_err(ibdev, "Failed to check CQ vector=%d max=%d\n",
+ vector, hr_dev->caps.num_comp_vectors);
return -EINVAL;
}
@@ -367,30 +253,35 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
hr_cq->cq_depth = cq_entries;
hr_cq->vector = vector;
- hr_cq->buf.size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
- hr_cq->buf.page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
spin_lock_init(&hr_cq->lock);
INIT_LIST_HEAD(&hr_cq->sq_list);
INIT_LIST_HEAD(&hr_cq->rq_list);
if (udata) {
- ret = create_user_cq(hr_dev, hr_cq, udata, &resp);
- if (ret) {
- dev_err(dev, "Create cq failed in user mode!\n");
- goto err_cq;
- }
- } else {
- ret = create_kernel_cq(hr_dev, hr_cq);
+ ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (ret) {
- dev_err(dev, "Create cq failed in kernel mode!\n");
- goto err_cq;
+ ibdev_err(ibdev, "Failed to copy CQ udata, err %d\n",
+ ret);
+ return ret;
}
}
- ret = hns_roce_alloc_cqc(hr_dev, hr_cq);
+ ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret);
+ return ret;
+ }
+
+ ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
if (ret) {
- dev_err(dev, "Alloc CQ failed(%d).\n", ret);
- goto err_dbmap;
+ ibdev_err(ibdev, "Failed to alloc CQ db, err %d\n", ret);
+ goto err_cq_buf;
+ }
+
+ ret = alloc_cqc(hr_dev, hr_cq);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc CQ context, err %d\n", ret);
+ goto err_cq_db;
}
/*
@@ -412,15 +303,11 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
return 0;
err_cqc:
- hns_roce_free_cqc(hr_dev, hr_cq);
-
-err_dbmap:
- if (udata)
- destroy_user_cq(hr_dev, hr_cq, udata, &resp);
- else
- destroy_kernel_cq(hr_dev, hr_cq);
-
-err_cq:
+ free_cqc(hr_dev, hr_cq);
+err_cq_db:
+ free_cq_db(hr_dev, hr_cq, udata);
+err_cq_buf:
+ free_cq_buf(hr_dev, hr_cq);
return ret;
}
@@ -429,28 +316,12 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- if (hr_dev->hw->destroy_cq) {
+ if (hr_dev->hw->destroy_cq)
hr_dev->hw->destroy_cq(ib_cq, udata);
- return;
- }
-
- hns_roce_free_cqc(hr_dev, hr_cq);
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
- ib_umem_release(hr_cq->umem);
- if (udata) {
- if (hr_cq->db_en == 1)
- hns_roce_db_unmap_user(rdma_udata_to_drv_context(
- udata,
- struct hns_roce_ucontext,
- ibucontext),
- &hr_cq->db);
- } else {
- /* Free the buff of stored cq */
- free_cq_buf(hr_dev, hr_cq);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
- hns_roce_free_db(hr_dev, &hr_cq->db);
- }
+ free_cq_buf(hr_dev, hr_cq);
+ free_cq_db(hr_dev, hr_cq, udata);
+ free_cqc(hr_dev, hr_cq);
}
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index f6b3cf6b95d6..a77fa6730b2d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -66,6 +66,8 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
+#define HNS_ROCE_RESERVED_SGE 1
+
#define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
@@ -131,12 +133,12 @@ enum {
};
enum {
- HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
- HNS_ROCE_SUPPORT_SQ_RECORD_DB = 1 << 1,
+ HNS_ROCE_QP_CAP_RQ_RECORD_DB = BIT(0),
+ HNS_ROCE_QP_CAP_SQ_RECORD_DB = BIT(1),
};
-enum {
- HNS_ROCE_SUPPORT_CQ_RECORD_DB = 1 << 0,
+enum hns_roce_cq_flags {
+ HNS_ROCE_CQ_FLAG_RECORD_DB = BIT(0),
};
enum hns_roce_qp_state {
@@ -209,6 +211,8 @@ enum {
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
};
+#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
+
enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
@@ -222,13 +226,6 @@ enum {
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
};
-enum hns_roce_mtt_type {
- MTT_TYPE_WQE,
- MTT_TYPE_CQE,
- MTT_TYPE_SRQWQE,
- MTT_TYPE_IDX
-};
-
#define HNS_ROCE_DB_TYPE_COUNT 2
#define HNS_ROCE_DB_UNIT_SIZE 4
@@ -267,9 +264,12 @@ enum {
#define HNS_ROCE_PORT_DOWN 0
#define HNS_ROCE_PORT_UP 1
-#define HNS_ROCE_MTT_ENTRY_PER_SEG 8
+/* The minimum page size is 4K for hardware */
+#define HNS_HW_PAGE_SHIFT 12
+#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
-#define PAGE_ADDR_SHIFT 12
+/* The minimum page count for hardware access page directly. */
+#define HNS_HW_DIRECT_PAGE_COUNT 2
struct hns_roce_uar {
u64 pfn;
@@ -300,22 +300,6 @@ struct hns_roce_bitmap {
unsigned long *table;
};
-/* Order bitmap length -- bit num compute formula: 1 << (max_order - order) */
-/* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
-/* Every bit repesent to a partner free/used status in bitmap */
-/*
- * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
- * Bit = 1 represent to idle and available; bit = 0: not available
- */
-struct hns_roce_buddy {
- /* Members point to every order level bitmap */
- unsigned long **bits;
- /* Represent to avail bits of the order level bitmap */
- u32 *num_free;
- int max_order;
- spinlock_t lock;
-};
-
/* For Hardware Entry Memory */
struct hns_roce_hem_table {
/* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
@@ -336,13 +320,6 @@ struct hns_roce_hem_table {
dma_addr_t *bt_l0_dma_addr;
};
-struct hns_roce_mtt {
- unsigned long first_seg;
- int order;
- int page_shift;
- enum hns_roce_mtt_type mtt_type;
-};
-
struct hns_roce_buf_region {
int offset; /* page offset */
u32 count; /* page count */
@@ -357,13 +334,34 @@ struct hns_roce_hem_list {
struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
struct list_head btm_bt; /* link all bottom bt in @mid_bt */
dma_addr_t root_ba; /* pointer to the root ba table */
- int bt_pg_shift;
+};
+
+struct hns_roce_buf_attr {
+ struct {
+ size_t size; /* region size */
+ int hopnum; /* multi-hop addressing hop num */
+ } region[HNS_ROCE_MAX_BT_REGION];
+ int region_count; /* valid region count */
+ unsigned int page_shift; /* buffer page shift */
+ bool fixed_page; /* decide page shift is fixed-size or maximum size */
+ int user_access; /* umem access flag */
+ bool mtt_only; /* only alloc buffer-required MTT memory */
};
/* memory translate region */
struct hns_roce_mtr {
- struct hns_roce_hem_list hem_list;
- int buf_pg_shift;
+ struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
+ struct ib_umem *umem; /* user space buffer */
+ struct hns_roce_buf *kmem; /* kernel space buffer */
+ struct {
+ dma_addr_t root_ba; /* root BA table's address */
+ bool is_direct; /* addressing without BA table */
+ unsigned int ba_pg_shift; /* BA table page shift */
+ unsigned int buf_pg_shift; /* buffer page shift */
+ int buf_pg_count; /* buffer page count */
+ struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
+ unsigned int region_count;
+ } hem_cfg; /* config for hardware addressing */
};
struct hns_roce_mw {
@@ -381,43 +379,22 @@ struct hns_roce_mw {
struct hns_roce_mr {
struct ib_mr ibmr;
- struct ib_umem *umem;
u64 iova; /* MR's virtual orignal addr */
u64 size; /* Address range of MR */
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
u32 access; /* Access permission of MR */
- u32 npages;
int enabled; /* MR's active status */
int type; /* MR's register type */
- u64 *pbl_buf; /* MR's PBL space */
- dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
- u32 pbl_size; /* PA number in the PBL */
- u64 pbl_ba; /* page table address */
- u32 l0_chunk_last_num; /* L0 last number */
- u32 l1_chunk_last_num; /* L1 last number */
- u64 **pbl_bt_l2; /* PBL BT L2 */
- u64 **pbl_bt_l1; /* PBL BT L1 */
- u64 *pbl_bt_l0; /* PBL BT L0 */
- dma_addr_t *pbl_l2_dma_addr; /* PBL BT L2 dma addr */
- dma_addr_t *pbl_l1_dma_addr; /* PBL BT L1 dma addr */
- dma_addr_t pbl_l0_dma_addr; /* PBL BT L0 dma addr */
- u32 pbl_ba_pg_sz; /* BT chunk page size */
- u32 pbl_buf_pg_sz; /* buf chunk page size */
u32 pbl_hop_num; /* multi-hop number */
+ struct hns_roce_mtr pbl_mtr;
+ u32 npages;
+ dma_addr_t *page_list;
};
struct hns_roce_mr_table {
struct hns_roce_bitmap mtpt_bitmap;
- struct hns_roce_buddy mtt_buddy;
- struct hns_roce_hem_table mtt_table;
struct hns_roce_hem_table mtpt_table;
- struct hns_roce_buddy mtt_cqe_buddy;
- struct hns_roce_hem_table mtt_cqe_table;
- struct hns_roce_buddy mtt_srqwqe_buddy;
- struct hns_roce_hem_table mtt_srqwqe_table;
- struct hns_roce_buddy mtt_idx_buddy;
- struct hns_roce_hem_table mtt_idx_table;
};
struct hns_roce_wq {
@@ -433,7 +410,7 @@ struct hns_roce_wq {
};
struct hns_roce_sge {
- int sge_cnt; /* SGE num */
+ unsigned int sge_cnt; /* SGE num */
int offset;
int sge_shift; /* SGE size */
};
@@ -446,10 +423,9 @@ struct hns_roce_buf_list {
struct hns_roce_buf {
struct hns_roce_buf_list direct;
struct hns_roce_buf_list *page_list;
- int nbufs;
u32 npages;
u32 size;
- int page_shift;
+ unsigned int page_shift;
};
struct hns_roce_db_pgdir {
@@ -482,12 +458,10 @@ struct hns_roce_db {
struct hns_roce_cq {
struct ib_cq ib_cq;
- struct hns_roce_buf buf;
- struct hns_roce_mtt mtt;
+ struct hns_roce_mtr mtr;
struct hns_roce_db db;
- u8 db_en;
+ u32 flags;
spinlock_t lock;
- struct ib_umem *umem;
u32 cq_depth;
u32 cons_index;
u32 *set_ci_db;
@@ -505,11 +479,8 @@ struct hns_roce_cq {
};
struct hns_roce_idx_que {
- struct hns_roce_buf idx_buf;
- int entry_sz;
- u32 buf_size;
- struct ib_umem *umem;
- struct hns_roce_mtt mtt;
+ struct hns_roce_mtr mtr;
+ int entry_shift;
unsigned long *bitmap;
};
@@ -524,10 +495,9 @@ struct hns_roce_srq {
atomic_t refcount;
struct completion free;
- struct hns_roce_buf buf;
+ struct hns_roce_mtr buf_mtr;
+
u64 *wrid;
- struct ib_umem *umem;
- struct hns_roce_mtt mtt;
struct hns_roce_idx_que idx_que;
spinlock_t lock;
int head;
@@ -656,20 +626,15 @@ struct hns_roce_work {
struct hns_roce_qp {
struct ib_qp ibqp;
- struct hns_roce_buf hr_buf;
struct hns_roce_wq rq;
struct hns_roce_db rdb;
struct hns_roce_db sdb;
- u8 rdb_en;
- u8 sdb_en;
+ unsigned long en_flags;
u32 doorbell_qpn;
u32 sq_signal_bits;
struct hns_roce_wq sq;
- struct ib_umem *umem;
- struct hns_roce_mtt mtt;
struct hns_roce_mtr mtr;
- int wqe_bt_pg_shift;
u32 buff_size;
struct mutex mutex;
@@ -769,17 +734,11 @@ struct hns_roce_eq {
int over_ignore;
int coalesce;
int arm_st;
- u64 eqe_ba;
- int eqe_ba_pg_sz;
- int eqe_buf_pg_sz;
int hop_num;
struct hns_roce_mtr mtr;
- struct hns_roce_buf buf;
- int eq_max_cnt;
+ u16 eq_max_cnt;
int eq_period;
int shift;
- dma_addr_t cur_eqe_ba;
- dma_addr_t nxt_eqe_ba;
int event_type;
int sub_type;
};
@@ -1102,15 +1061,67 @@ static inline struct hns_roce_qp
return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1));
}
+static inline bool hns_roce_buf_is_direct(struct hns_roce_buf *buf)
+{
+ if (buf->page_list)
+ return false;
+
+ return true;
+}
+
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
{
- u32 page_size = 1 << buf->page_shift;
+ if (hns_roce_buf_is_direct(buf))
+ return (char *)(buf->direct.buf) + (offset & (buf->size - 1));
- if (buf->nbufs == 1)
- return (char *)(buf->direct.buf) + offset;
+ return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
+ (offset & ((1 << buf->page_shift) - 1));
+}
+
+static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, int idx)
+{
+ if (hns_roce_buf_is_direct(buf))
+ return buf->direct.map + ((dma_addr_t)idx << buf->page_shift);
else
- return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
- (offset & (page_size - 1));
+ return buf->page_list[idx].map;
+}
+
+#define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
+
+static inline u64 to_hr_hw_page_addr(u64 addr)
+{
+ return addr >> HNS_HW_PAGE_SHIFT;
+}
+
+static inline u32 to_hr_hw_page_shift(u32 page_shift)
+{
+ return page_shift - HNS_HW_PAGE_SHIFT;
+}
+
+static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
+{
+ if (count > 0)
+ return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;
+
+ return 0;
+}
+
+static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
+{
+ return hr_hw_page_align(count << buf_shift);
+}
+
+static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
+{
+ return hr_hw_page_align(count << buf_shift) >> buf_shift;
+}
+
+static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
+{
+ if (!count)
+ return 0;
+
+ return ilog2(to_hr_hem_entries_count(count, buf_shift));
}
int hns_roce_init_uar_table(struct hns_roce_dev *dev);
@@ -1125,25 +1136,18 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
-int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
- struct hns_roce_mtt *mtt);
-void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt);
-int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct hns_roce_buf *buf);
-
-void hns_roce_mtr_init(struct hns_roce_mtr *mtr, int bt_pg_shift,
- int buf_pg_shift);
-int hns_roce_mtr_attach(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t **bufs, struct hns_roce_buf_region *regions,
- int region_cnt);
-void hns_roce_mtr_cleanup(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtr *mtr);
-
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT 2
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
+int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int page_shift, struct ib_udata *udata,
+ unsigned long user_addr);
+void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr);
+int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, int page_cnt);
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
@@ -1171,8 +1175,8 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
unsigned long obj, int cnt,
int rr);
-int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata);
+int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
void hns_roce_destroy_ah(struct ib_ah *ah, u32 flags);
@@ -1200,25 +1204,15 @@ struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
struct ib_udata *udata);
int hns_roce_dealloc_mw(struct ib_mw *ibmw);
-void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
- struct hns_roce_buf *buf);
+void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
struct hns_roce_buf *buf, u32 page_shift);
-int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct ib_umem *umem);
-
-void hns_roce_init_buf_region(struct hns_roce_buf_region *region, int hopnum,
- int offset, int buf_cnt);
-int hns_roce_alloc_buf_list(struct hns_roce_buf_region *regions,
- dma_addr_t **bufs, int count);
-void hns_roce_free_buf_list(dma_addr_t **bufs, int count);
-
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct hns_roce_buf *buf);
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct ib_umem *umem,
- int page_shift);
+ unsigned int page_shift);
int hns_roce_create_srq(struct ib_srq *srq,
struct ib_srq_init_attr *srq_init_attr,
@@ -1254,8 +1248,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
-void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
-
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 263338b90d7a..c8db6f8ae018 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -75,18 +75,6 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
case HEM_TYPE_CQC_TIMER:
hop_num = hr_dev->caps.cqc_timer_hop_num;
break;
- case HEM_TYPE_CQE:
- hop_num = hr_dev->caps.cqe_hop_num;
- break;
- case HEM_TYPE_MTT:
- hop_num = hr_dev->caps.mtt_hop_num;
- break;
- case HEM_TYPE_SRQWQE:
- hop_num = hr_dev->caps.srqwqe_hop_num;
- break;
- case HEM_TYPE_IDX:
- hop_num = hr_dev->caps.idx_hop_num;
- break;
default:
return false;
}
@@ -195,38 +183,6 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev,
mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
mhop->hop_num = hr_dev->caps.srqc_hop_num;
break;
- case HEM_TYPE_MTT:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.mtt_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.mtt_hop_num;
- break;
- case HEM_TYPE_CQE:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.cqe_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.cqe_hop_num;
- break;
- case HEM_TYPE_SRQWQE:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.srqwqe_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.srqwqe_hop_num;
- break;
- case HEM_TYPE_IDX:
- mhop->buf_chunk_size = 1 << (hr_dev->caps.idx_buf_pg_sz
- + PAGE_SHIFT);
- mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
- + PAGE_SHIFT);
- mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
- mhop->hop_num = hr_dev->caps.idx_hop_num;
- break;
default:
dev_err(dev, "Table %d not support multi-hop addressing!\n",
type);
@@ -899,57 +855,6 @@ out:
return addr;
}
-int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end)
-{
- struct hns_roce_hem_mhop mhop;
- unsigned long inc = table->table_chunk_size / table->obj_size;
- unsigned long i = 0;
- int ret;
-
- if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
- ret = hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
- if (ret)
- goto fail;
- inc = mhop.bt_chunk_size / table->obj_size;
- }
-
- /* Allocate MTT entry memory according to chunk(128K) */
- for (i = start; i <= end; i += inc) {
- ret = hns_roce_table_get(hr_dev, table, i);
- if (ret)
- goto fail;
- }
-
- return 0;
-
-fail:
- while (i > start) {
- i -= inc;
- hns_roce_table_put(hr_dev, table, i);
- }
- return ret;
-}
-
-void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end)
-{
- struct hns_roce_hem_mhop mhop;
- unsigned long inc = table->table_chunk_size / table->obj_size;
- unsigned long i;
-
- if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
- if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
- return;
- inc = mhop.bt_chunk_size / table->obj_size;
- }
-
- for (i = start; i <= end; i += inc)
- hns_roce_table_put(hr_dev, table, i);
-}
-
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
unsigned long obj_size, unsigned long nobj,
@@ -1112,12 +1017,6 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
{
- if ((hr_dev->caps.num_idx_segs))
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_idx_table);
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table);
if (hr_dev->caps.srqc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->srq_table.table);
@@ -1137,10 +1036,6 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_cqe_table);
- hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
}
struct roce_hem_item {
@@ -1505,7 +1400,7 @@ err_exit:
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
const struct hns_roce_buf_region *regions,
- int region_cnt)
+ int region_cnt, unsigned int bt_pg_shift)
{
const struct hns_roce_buf_region *r;
int ofs, end;
@@ -1519,7 +1414,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
- unit = (1 << hem_list->bt_pg_shift) / BA_BYTE_LEN;
+ unit = (1 << bt_pg_shift) / BA_BYTE_LEN;
for (i = 0; i < region_cnt; i++) {
r = &regions[i];
if (!r->count)
@@ -1566,8 +1461,7 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
hem_list->root_ba = 0;
}
-void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list,
- int bt_page_order)
+void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
{
int i, j;
@@ -1576,8 +1470,6 @@ void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list,
for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
-
- hem_list->bt_pg_shift = bt_page_order;
}
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 3bb8f78fb7b0..b34c940077bb 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -115,12 +115,6 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj,
dma_addr_t *dma_handle);
-int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end);
-void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long start, unsigned long end);
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
unsigned long obj_size, unsigned long nobj,
@@ -133,14 +127,13 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_mhop *mhop);
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
-void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list,
- int bt_page_order);
+void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list);
int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
int region_cnt, int unit);
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
const struct hns_roce_buf_region *regions,
- int region_cnt);
+ int region_cnt, unsigned int bt_pg_shift);
void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list);
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 5ff028d77be3..d02207cd30df 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -503,16 +503,13 @@ static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
u32 ext_sdb_alful)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
dma_addr_t sdb_dma_addr;
__le32 tmp;
u32 val;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
/* Configure extend SDB threshold */
roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
@@ -545,16 +542,13 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
u32 ext_odb_alful)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
dma_addr_t odb_dma_addr;
__le32 tmp;
u32 val;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
/* Configure extend ODB threshold */
roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
@@ -583,16 +577,13 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
u32 odb_ext_mod)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
dma_addr_t sdb_dma_addr;
dma_addr_t odb_dma_addr;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
if (!db->ext_db)
return -ENOMEM;
@@ -692,14 +683,14 @@ static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct hns_roce_caps *caps = &hr_dev->caps;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct device *dev = &hr_dev->pdev->dev;
struct ib_cq_init_attr cq_init_attr;
- struct hns_roce_free_mr *free_mr;
struct ib_qp_attr attr = { 0 };
- struct hns_roce_v1_priv *priv;
struct hns_roce_qp *hr_qp;
- struct ib_device *ibdev;
struct ib_cq *cq;
struct ib_pd *pd;
union ib_gid dgid;
@@ -712,14 +703,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
u8 port = 0;
u8 sl;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
/* Reserved cq for loop qp */
cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
cq_init_attr.comp_vector = 0;
- ibdev = &hr_dev->ib_dev;
cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
if (!cq)
return -ENOMEM;
@@ -868,16 +855,13 @@ alloc_cq_failed:
static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
struct hns_roce_qp *hr_qp;
int ret;
int i;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
if (!hr_qp)
@@ -897,18 +881,15 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
u32 sdb_ext_mod;
u32 odb_ext_mod;
u32 sdb_evt_mod;
u32 odb_evt_mod;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
-
memset(db, 0, sizeof(*db));
/* Default DB mode */
@@ -954,15 +935,12 @@ static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
{
- struct device *dev = &hr_dev->pdev->dev;
+ long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct hns_roce_recreate_lp_qp_work *lp_qp_work;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
+ struct device *dev = &hr_dev->pdev->dev;
struct completion comp;
- long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
GFP_KERNEL);
@@ -1021,29 +999,21 @@ static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
{
- struct hns_roce_mr_free_work *mr_work;
- struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_cq *mr_free_cq;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_dev *hr_dev;
- struct hns_roce_mr *hr_mr;
- struct hns_roce_qp *hr_qp;
- struct device *dev;
unsigned long end =
msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
- int i;
- int ret;
+ struct hns_roce_mr_free_work *mr_work =
+ container_of(work, struct hns_roce_mr_free_work, work);
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev);
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
+ struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq;
+ struct hns_roce_mr *hr_mr = mr_work->mr;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
+ struct hns_roce_qp *hr_qp;
int ne = 0;
-
- mr_work = container_of(work, struct hns_roce_mr_free_work, work);
- hr_mr = (struct hns_roce_mr *)mr_work->mr;
- hr_dev = to_hr_dev(mr_work->ib_dev);
- dev = &hr_dev->pdev->dev;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
- mr_free_cq = free_mr->mr_free_cq;
+ int ret;
+ int i;
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
@@ -1092,19 +1062,15 @@ free_work:
static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, struct ib_udata *udata)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
+ long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_mr_free_work *mr_work;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
- struct completion comp;
- long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
unsigned long start = jiffies;
- int npages;
+ struct completion comp;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
if (mr->enabled) {
if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
key_to_hw_index(mr->key) &
@@ -1146,17 +1112,9 @@ free_mr:
dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
- if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
- dma_free_coherent(dev, npages * 8, mr->pbl_buf,
- mr->pbl_dma_addr);
- }
-
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
key_to_hw_index(mr->key), 0);
-
- ib_umem_release(mr->umem);
-
+ hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
kfree(mr);
return ret;
@@ -1164,12 +1122,9 @@ free_mr:
static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_db_table *db = &priv->db_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_db_table *db;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- db = &priv->db_table;
if (db->sdb_ext_mod) {
dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
@@ -1190,17 +1145,14 @@ static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
{
- int ret;
- u32 val;
- __le32 tmp;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_raq_table *raq = &priv->raq_table;
+ struct device *dev = &hr_dev->pdev->dev;
int raq_shift = 0;
dma_addr_t addr;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_raq_table *raq;
- struct device *dev = &hr_dev->pdev->dev;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- raq = &priv->raq_table;
+ __le32 tmp;
+ u32 val;
+ int ret;
raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
if (!raq->e_raq_buf)
@@ -1280,12 +1232,9 @@ err_dma_alloc_raq:
static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_raq_table *raq = &priv->raq_table;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- struct hns_roce_raq_table *raq;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- raq = &priv->raq_table;
dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
raq->e_raq_buf->map);
@@ -1319,12 +1268,10 @@ static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
int ret;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
-
priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
GFP_KERNEL);
@@ -1362,10 +1309,8 @@ err_failed_alloc_mtpt_buf:
static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
@@ -1379,12 +1324,9 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_buf_list *tptr_buf;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- tptr_buf = &priv->tptr_table.tptr_buf;
/*
* This buffer will be used for CQ's tptr(tail pointer), also
@@ -1405,12 +1347,9 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_buf_list *tptr_buf;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- tptr_buf = &priv->tptr_table.tptr_buf;
dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
tptr_buf->buf, tptr_buf->map);
@@ -1418,14 +1357,11 @@ static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
-
free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
if (!free_mr->free_mr_wq) {
dev_err(dev, "Create free mr workqueue failed!\n");
@@ -1444,11 +1380,8 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_free_mr *free_mr;
- struct hns_roce_v1_priv *priv;
-
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- free_mr = &priv->free_mr;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_free_mr *free_mr = &priv->free_mr;
flush_workqueue(free_mr->free_mr_wq);
destroy_workqueue(free_mr->free_mr_wq);
@@ -1826,9 +1759,12 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
+ u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v1_mpt_entry *mpt_entry;
- struct sg_dma_page_iter sg_iter;
- u64 *pages;
+ dma_addr_t pbl_ba;
+ int count;
int i;
/* MPT filled into mailbox buf */
@@ -1878,22 +1814,15 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
if (mr->type == MR_TYPE_DMA)
return 0;
- pages = (u64 *) __get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- i = 0;
- for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
- pages[i] = ((u64)sg_page_iter_dma_address(&sg_iter)) >> 12;
-
- /* Directly record to MTPT table firstly 7 entry */
- if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
- break;
- i++;
+ count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+ ARRAY_SIZE(pages), &pbl_ba);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count);
+ return -ENOBUFS;
}
/* Register user mr */
- for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
+ for (i = 0; i < count; i++) {
switch (i) {
case 0:
mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
@@ -1959,20 +1888,17 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
}
}
- free_page((unsigned long) pages);
-
- mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
-
+ mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba);
roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
- MPT_BYTE_12_PBL_ADDR_H_S,
- ((u32)(mr->pbl_dma_addr >> 32)));
+ MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba));
return 0;
}
static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(hr_cq->mtr.kmem,
+ n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
@@ -2066,16 +1992,12 @@ static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf,
u64 *mtts, dma_addr_t dma_handle)
{
- struct hns_roce_cq_context *cq_context = NULL;
- struct hns_roce_buf_list *tptr_buf;
- struct hns_roce_v1_priv *priv;
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
+ struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
+ struct hns_roce_cq_context *cq_context = mb_buf;
dma_addr_t tptr_dma_addr;
int offset;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
- tptr_buf = &priv->tptr_table.tptr_buf;
-
- cq_context = mb_buf;
memset(cq_context, 0, sizeof(*cq_context));
/* Get the tptr for this CQ. */
@@ -2416,16 +2338,14 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
int step_idx)
{
+ struct hns_roce_v1_priv *priv = hr_dev->priv;
struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_v1_priv *priv;
- unsigned long flags = 0;
long end = HW_SYNC_TIMEOUT_MSECS;
__le32 bt_cmd_val[2] = {0};
+ unsigned long flags = 0;
void __iomem *bt_cmd;
u64 bt_ba = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->priv;
-
switch (table->type) {
case HEM_TYPE_QPC:
bt_ba = priv->bt_table.qpc_buf.map >> 12;
@@ -2479,7 +2399,6 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
}
static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt,
enum hns_roce_qp_state cur_state,
enum hns_roce_qp_state new_state,
struct hns_roce_qp_context *context,
@@ -2560,6 +2479,29 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
return ret;
}
+static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int rq_pa_start;
+ int count;
+
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba);
+ if (count < 1) {
+ ibdev_err(ibdev, "Failed to find SQ ba\n");
+ return -ENOBUFS;
+ }
+ rq_pa_start = hr_qp->rq.offset >> hr_qp->mtr.hem_cfg.buf_pg_shift;
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, rq_pa_start, rq_ba, 1,
+ NULL);
+ if (!count) {
+ ibdev_err(ibdev, "Failed to find RQ ba\n");
+ return -ENOBUFS;
+ }
+
+ return 0;
+}
+
static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
@@ -2567,25 +2509,20 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_sqp_context *context;
- struct device *dev = &hr_dev->pdev->dev;
dma_addr_t dma_handle = 0;
u32 __iomem *addr;
- int rq_pa_start;
+ u64 sq_ba = 0;
+ u64 rq_ba = 0;
__le32 tmp;
u32 reg_val;
- u64 *mtts;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
/* Search QP buf's MTTs */
- mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
- hr_qp->mtt.first_seg, &dma_handle);
- if (!mtts) {
- dev_err(dev, "qp buf pa find failed\n");
+ if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
goto out;
- }
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
roce_set_field(context->qp1c_bytes_4,
@@ -2599,11 +2536,11 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
- context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
+ context->sq_rq_bt_l = cpu_to_le32(dma_handle);
roce_set_field(context->qp1c_bytes_12,
QP1C_BYTES_12_SQ_RQ_BT_H_M,
QP1C_BYTES_12_SQ_RQ_BT_H_S,
- ((u32)(dma_handle >> 32)));
+ upper_32_bits(dma_handle));
roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
@@ -2624,14 +2561,12 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
- rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
- context->cur_rq_wqe_ba_l =
- cpu_to_le32((u32)(mtts[rq_pa_start]));
+ context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
roce_set_field(context->qp1c_bytes_28,
QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
- (mtts[rq_pa_start]) >> 32);
+ upper_32_bits(rq_ba));
roce_set_field(context->qp1c_bytes_28,
QP1C_BYTES_28_RQ_CUR_IDX_M,
QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
@@ -2645,12 +2580,12 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_32_TX_CQ_NUM_S,
to_hr_cq(ibqp->send_cq)->cqn);
- context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]);
+ context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
roce_set_field(context->qp1c_bytes_40,
QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
- (mtts[0]) >> 32);
+ upper_32_bits(sq_ba));
roce_set_field(context->qp1c_bytes_40,
QP1C_BYTES_40_SQ_CUR_IDX_M,
QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
@@ -2704,6 +2639,28 @@ out:
return -EINVAL;
}
+static bool check_qp_state(enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ static const bool sm[][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true },
+ [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true,
+ [IB_QPS_RTR] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_RTS] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
+ [IB_QPS_SQD] = {},
+ [IB_QPS_SQE] = {},
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
+ };
+
+ return sm[cur_state][new_state];
+}
+
static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
@@ -2716,26 +2673,29 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
dma_addr_t dma_handle_2 = 0;
dma_addr_t dma_handle = 0;
__le32 doorbell[2] = {0};
- int rq_pa_start = 0;
u64 *mtts_2 = NULL;
int ret = -EINVAL;
- u64 *mtts = NULL;
+ u64 sq_ba = 0;
+ u64 rq_ba = 0;
int port;
u8 port_num;
u8 *dmac;
u8 *smac;
+ if (!check_qp_state(cur_state, new_state)) {
+ ibdev_err(ibqp->device,
+ "not support QP(%u) status from %d to %d\n",
+ ibqp->qp_num, cur_state, new_state);
+ return -EINVAL;
+ }
+
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
/* Search qp buf's mtts */
- mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
- hr_qp->mtt.first_seg, &dma_handle);
- if (mtts == NULL) {
- dev_err(dev, "qp buf pa find failed\n");
+ if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
goto out;
- }
/* Search IRRL's mtts */
mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
@@ -2890,11 +2850,11 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
dmac = (u8 *)attr->ah_attr.roce.dmac;
- context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
+ context->sq_rq_bt_l = cpu_to_le32(dma_handle);
roce_set_field(context->qpc_bytes_24,
QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
- ((u32)(dma_handle >> 32)));
+ upper_32_bits(dma_handle));
roce_set_bit(context->qpc_bytes_24,
QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
1);
@@ -2993,14 +2953,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
- rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
- context->cur_rq_wqe_ba_l =
- cpu_to_le32((u32)(mtts[rq_pa_start]));
+ context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
roce_set_field(context->qpc_bytes_76,
QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
- mtts[rq_pa_start] >> 32);
+ upper_32_bits(rq_ba));
roce_set_field(context->qpc_bytes_76,
QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
@@ -3062,8 +3020,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_156_SL_S,
rdma_ah_get_sl(&attr->ah_attr));
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
- } else if (cur_state == IB_QPS_RTR &&
- new_state == IB_QPS_RTS) {
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
/* If exist optional param, return error */
if ((attr_mask & IB_QP_ALT_PATH) ||
(attr_mask & IB_QP_ACCESS_FLAGS) ||
@@ -3075,12 +3032,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
goto out;
}
- context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
+ context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
roce_set_field(context->qpc_bytes_120,
QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
- (mtts[0]) >> 32);
+ upper_32_bits(sq_ba));
roce_set_field(context->qpc_bytes_124,
QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
@@ -3223,28 +3180,18 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
- context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
+ context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
roce_set_field(context->qpc_bytes_188,
QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
- (mtts[0]) >> 32);
+ upper_32_bits(sq_ba));
roce_set_bit(context->qpc_bytes_188,
QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
roce_set_field(context->qpc_bytes_188,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
0);
- } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
- (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
- dev_err(dev, "not support this status migration\n");
- goto out;
}
/* Every status migrate must change state */
@@ -3253,8 +3200,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
/* SW pass context to HW */
- ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
- to_hns_roce_state(cur_state),
+ ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state),
to_hns_roce_state(new_state), context,
hr_qp);
if (ret) {
@@ -3636,8 +3582,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
u32 cqe_cnt_cur;
int wait_time = 0;
- hns_roce_free_cqc(hr_dev, hr_cq);
-
/*
* Before freeing cq buffer, we need to ensure that the outstanding CQE
* have been written by checking the CQE counter.
@@ -3660,14 +3604,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
}
wait_time++;
}
-
- hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
-
- ib_umem_release(hr_cq->umem);
- if (!udata) {
- /* Free the buff of stored cq */
- hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
- }
}
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index c3316672b70e..c597d7281629 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -95,6 +95,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
{
struct hns_roce_mr *mr = to_hr_mr(wr->mr);
struct hns_roce_wqe_frmr_seg *fseg = wqe;
+ u64 pbl_ba;
/* use ib_access_flags */
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
@@ -109,26 +110,27 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
/* Data structure reuse may lead to confusion */
- rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
- rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
+ pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
+ rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
+ rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));
rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
rc_sq_wqe->rkey = cpu_to_le32(wr->key);
rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
- fseg->pbl_size = cpu_to_le32(mr->pbl_size);
+ fseg->pbl_size = cpu_to_le32(mr->npages);
roce_set_field(fseg->mode_buf_pg_sz,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
- mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
roce_set_bit(fseg->mode_buf_pg_sz,
V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
}
static void set_atomic_seg(const struct ib_send_wr *wr, void *wqe,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
- int valid_num_sge)
+ unsigned int valid_num_sge)
{
struct hns_roce_wqe_atomic_seg *aseg;
@@ -149,56 +151,33 @@ static void set_atomic_seg(const struct ib_send_wr *wr, void *wqe,
}
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
- unsigned int *sge_ind, int valid_num_sge)
+ unsigned int *sge_ind, unsigned int valid_num_sge)
{
struct hns_roce_v2_wqe_data_seg *dseg;
- struct ib_sge *sg;
- int num_in_wqe = 0;
- int extend_sge_num;
- int fi_sge_num;
- int se_sge_num;
- int shift;
- int i;
+ unsigned int cnt = valid_num_sge;
+ struct ib_sge *sge = wr->sg_list;
+ unsigned int idx = *sge_ind;
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
- num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
- extend_sge_num = valid_num_sge - num_in_wqe;
- sg = wr->sg_list + num_in_wqe;
- shift = qp->hr_buf.page_shift;
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
+ cnt -= HNS_ROCE_SGE_IN_WQE;
+ sge += HNS_ROCE_SGE_IN_WQE;
+ }
- /*
- * Check whether wr->num_sge sges are in the same page. If not, we
- * should calculate how many sges in the first page and the second
- * page.
- */
- dseg = hns_roce_get_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
- fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
- (uintptr_t)dseg) /
- sizeof(struct hns_roce_v2_wqe_data_seg);
- if (extend_sge_num > fi_sge_num) {
- se_sge_num = extend_sge_num - fi_sge_num;
- for (i = 0; i < fi_sge_num; i++) {
- set_data_seg_v2(dseg++, sg + i);
- (*sge_ind)++;
- }
- dseg = hns_roce_get_extend_sge(qp,
- (*sge_ind) & (qp->sge.sge_cnt - 1));
- for (i = 0; i < se_sge_num; i++) {
- set_data_seg_v2(dseg++, sg + fi_sge_num + i);
- (*sge_ind)++;
- }
- } else {
- for (i = 0; i < extend_sge_num; i++) {
- set_data_seg_v2(dseg++, sg + i);
- (*sge_ind)++;
- }
+ while (cnt > 0) {
+ dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1));
+ set_data_seg_v2(dseg, sge);
+ idx++;
+ sge++;
+ cnt--;
}
+
+ *sge_ind = idx;
}
static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
void *wqe, unsigned int *sge_ind,
- int valid_num_sge)
+ unsigned int valid_num_sge)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_v2_wqe_data_seg *dseg = wqe;
@@ -208,15 +187,15 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
int i;
if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
- if (le32_to_cpu(rc_sq_wqe->msg_len) >
- hr_dev->caps.max_sq_inline) {
+ if (unlikely(le32_to_cpu(rc_sq_wqe->msg_len) >
+ hr_dev->caps.max_sq_inline)) {
ibdev_err(ibdev, "inline len(1-%d)=%d, illegal",
rc_sq_wqe->msg_len,
hr_dev->caps.max_sq_inline);
return -EINVAL;
}
- if (wr->opcode == IB_WR_RDMA_READ) {
+ if (unlikely(wr->opcode == IB_WR_RDMA_READ)) {
ibdev_err(ibdev, "Not support inline data!\n");
return -EINVAL;
}
@@ -230,7 +209,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
1);
} else {
- if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
+ if (valid_num_sge <= HNS_ROCE_SGE_IN_WQE) {
for (i = 0; i < wr->num_sge; i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
@@ -243,8 +222,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
(*sge_ind) & (qp->sge.sge_cnt - 1));
- for (i = 0; i < wr->num_sge &&
- j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
+ for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE;
+ i++) {
if (likely(wr->sg_list[i].length)) {
set_data_seg_v2(dseg, wr->sg_list + i);
dseg++;
@@ -290,10 +269,11 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
return 0;
}
-static inline int calc_wr_sge_num(const struct ib_send_wr *wr, u32 *sge_len)
+static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
+ unsigned int *sge_len)
{
- int valid_num = 0;
- u32 len = 0;
+ unsigned int valid_num = 0;
+ unsigned int len = 0;
int i;
for (i = 0; i < wr->num_sge; i++) {
@@ -424,7 +404,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
{
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
unsigned int curr_idx = *sge_idx;
- int valid_num_sge;
+ unsigned int valid_num_sge;
u32 msg_len = 0;
int ret = 0;
@@ -521,8 +501,7 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
- V2_DB_PARAMETER_IDX_S,
- qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
+ V2_DB_PARAMETER_IDX_S, qp->sq.head);
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
V2_DB_PARAMETER_SL_S, qp->sl);
@@ -548,7 +527,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
spin_lock_irqsave(&qp->sq.lock, flags);
ret = check_send_valid(hr_dev, qp);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
nreq = 0;
goto out;
@@ -584,7 +563,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
else if (ibqp->qp_type == IB_QPT_RC)
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
goto out;
}
@@ -634,15 +613,15 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
spin_lock_irqsave(&hr_qp->rq.lock, flags);
ret = check_recv_valid(hr_dev, hr_qp);
- if (ret) {
+ if (unlikely(ret)) {
*bad_wr = wr;
nreq = 0;
goto out;
}
for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
- hr_qp->ibqp.recv_cq)) {
+ if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq,
+ hr_qp->ibqp.recv_cq))) {
ret = -ENOMEM;
*bad_wr = wr;
goto out;
@@ -650,7 +629,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
- if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
+ if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) {
ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL;
@@ -667,13 +646,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
dseg++;
}
- if (i < hr_qp->rq.max_gs) {
+ if (wr->num_sge < hr_qp->rq.max_gs) {
dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
dseg->addr = 0;
+ dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
}
/* rq support inline data */
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ if (hr_qp->rq_inl_buf.wqe_cnt) {
sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
(u32)wr->num_sge;
@@ -715,6 +695,129 @@ out:
return ret;
}
+static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
+{
+ return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
+}
+
+static void *get_idx_buf(struct hns_roce_idx_que *idx_que, int n)
+{
+ return hns_roce_buf_offset(idx_que->mtr.kmem,
+ n << idx_que->entry_shift);
+}
+
+static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
+{
+ /* always called with interrupts disabled. */
+ spin_lock(&srq->lock);
+
+ bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
+ srq->tail++;
+
+ spin_unlock(&srq->lock);
+}
+
+static int find_empty_entry(struct hns_roce_idx_que *idx_que,
+ unsigned long size)
+{
+ int wqe_idx;
+
+ if (unlikely(bitmap_full(idx_que->bitmap, size)))
+ return -ENOSPC;
+
+ wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
+
+ bitmap_set(idx_que->bitmap, wqe_idx, 1);
+
+ return wqe_idx;
+}
+
+static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct hns_roce_v2_db srq_db;
+ unsigned long flags;
+ __le32 *srq_idx;
+ int ret = 0;
+ int wqe_idx;
+ void *wqe;
+ int nreq;
+ int ind;
+ int i;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ ind = srq->head & (srq->wqe_cnt - 1);
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (unlikely(wr->num_sge >= srq->max_gs)) {
+ ret = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (unlikely(srq->head == srq->tail)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+
+ wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
+ if (unlikely(wqe_idx < 0)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+
+ wqe = get_srq_wqe(srq, wqe_idx);
+ dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
+ dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
+ dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
+ }
+
+ if (wr->num_sge < srq->max_gs) {
+ dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
+ dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
+ dseg[i].addr = 0;
+ }
+
+ srq_idx = get_idx_buf(&srq->idx_que, ind);
+ *srq_idx = cpu_to_le32(wqe_idx);
+
+ srq->wrid[wqe_idx] = wr->wr_id;
+ ind = (ind + 1) & (srq->wqe_cnt - 1);
+ }
+
+ if (likely(nreq)) {
+ srq->head += nreq;
+
+ /*
+ * Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ srq_db.byte_4 =
+ cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
+ (srq->srqn & V2_DB_BYTE_4_TAG_M));
+ srq_db.parameter =
+ cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M);
+
+ hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
+ }
+
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return ret;
+}
+
static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
@@ -742,7 +845,7 @@ static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
@@ -768,7 +871,7 @@ static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
@@ -785,7 +888,7 @@ static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long instance_stage; /* the current instance stage */
@@ -865,7 +968,7 @@ static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
&priv->cmq.csq : &priv->cmq.crq;
@@ -878,7 +981,7 @@ static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
&priv->cmq.csq : &priv->cmq.crq;
dma_addr_t dma = ring->desc_dma_addr;
@@ -904,7 +1007,7 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
int ret;
/* Setup the queue entries for command queue */
@@ -948,7 +1051,7 @@ err_crq:
static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
@@ -970,15 +1073,15 @@ static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
return head == priv->cmq.csq.next_to_use;
}
static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
struct hns_roce_cmq_desc *desc;
u16 ntc = csq->next_to_clean;
@@ -1003,7 +1106,7 @@ static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
struct hns_roce_cmq_desc *desc_to_use;
bool complete = false;
@@ -1131,7 +1234,7 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long reset_cnt;
@@ -1151,7 +1254,7 @@ static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
int flag)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
unsigned long instance_stage;
@@ -1349,34 +1452,26 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
{
struct hns_roce_pf_timer_res_a *req_a;
- struct hns_roce_cmq_desc desc[2];
- int ret, i;
+ struct hns_roce_cmq_desc desc;
+ int ret;
- for (i = 0; i < 2; i++) {
- hns_roce_cmq_setup_basic_desc(&desc[i],
- HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
- true);
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
+ true);
- if (i == 0)
- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- }
-
- ret = hns_roce_cmq_send(hr_dev, desc, 2);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret)
return ret;
- req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
+ req_a = (struct hns_roce_pf_timer_res_a *)desc.data;
hr_dev->caps.qpc_timer_bt_num =
- roce_get_field(req_a->qpc_timer_bt_idx_num,
- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
- PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
+ roce_get_field(req_a->qpc_timer_bt_idx_num,
+ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
+ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
hr_dev->caps.cqc_timer_bt_num =
- roce_get_field(req_a->cqc_timer_bt_idx_num,
- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
- PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
+ roce_get_field(req_a->cqc_timer_bt_idx_num,
+ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
+ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
return 0;
}
@@ -1786,6 +1881,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
+ caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
+ HNS_ROCE_CAP_FLAGS_EX_SHIFT;
+
caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
V2_QUERY_PF_CAPS_C_NUM_CQS_M,
V2_QUERY_PF_CAPS_C_NUM_CQS_S);
@@ -1978,11 +2076,6 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
hr_dev->vendor_part_id = hr_dev->pci_dev->device;
hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
- caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
- caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
- caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
- caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
-
caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
caps->pbl_buf_pg_sz = 0;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
@@ -2040,8 +2133,6 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
page_num = link_tbl->npages;
entry = link_tbl->table.buf;
- memset(req_a, 0, sizeof(*req_a));
- memset(req_b, 0, sizeof(*req_b));
for (i = 0; i < 2; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
@@ -2050,39 +2141,30 @@ static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
-
- if (i == 0) {
- req_a->base_addr_l =
- cpu_to_le32(link_tbl->table.map & 0xffffffff);
- req_a->base_addr_h =
- cpu_to_le32(link_tbl->table.map >> 32);
- roce_set_field(req_a->depth_pgsz_init_en,
- CFG_LLM_QUE_DEPTH_M, CFG_LLM_QUE_DEPTH_S,
- link_tbl->npages);
- roce_set_field(req_a->depth_pgsz_init_en,
- CFG_LLM_QUE_PGSZ_M, CFG_LLM_QUE_PGSZ_S,
- link_tbl->pg_sz);
- req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
- req_a->head_ba_h_nxtptr =
- cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
- roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M,
- CFG_LLM_HEAD_PTR_S, 0);
- } else {
- req_b->tail_ba_l =
- cpu_to_le32(entry[page_num - 1].blk_ba0);
- roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
- CFG_LLM_TAIL_BA_H_S,
- entry[page_num - 1].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_BA1_M);
- roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M,
- CFG_LLM_TAIL_PTR_S,
- (entry[page_num - 2].blk_ba1_nxt_ptr &
- HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
- HNS_ROCE_LINK_TABLE_NXT_PTR_S);
- }
}
+
+ req_a->base_addr_l = cpu_to_le32(link_tbl->table.map & 0xffffffff);
+ req_a->base_addr_h = cpu_to_le32(link_tbl->table.map >> 32);
+ roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_DEPTH_M,
+ CFG_LLM_QUE_DEPTH_S, link_tbl->npages);
+ roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_QUE_PGSZ_M,
+ CFG_LLM_QUE_PGSZ_S, link_tbl->pg_sz);
roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
CFG_LLM_INIT_EN_S, 1);
+ req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
+ req_a->head_ba_h_nxtptr = cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
+ roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M, CFG_LLM_HEAD_PTR_S,
+ 0);
+
+ req_b->tail_ba_l = cpu_to_le32(entry[page_num - 1].blk_ba0);
+ roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
+ CFG_LLM_TAIL_BA_H_S,
+ entry[page_num - 1].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_BA1_M);
+ roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M, CFG_LLM_TAIL_PTR_S,
+ (entry[page_num - 2].blk_ba1_nxt_ptr &
+ HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
+ HNS_ROCE_LINK_TABLE_NXT_PTR_S);
return hns_roce_cmq_send(hr_dev, desc, 2);
}
@@ -2438,12 +2520,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
reg_smac_l = *(u32 *)(&addr[0]);
reg_smac_h = *(u16 *)(&addr[4]);
- memset(smac_tb, 0, sizeof(*smac_tb));
- roce_set_field(smac_tb->tb_idx_rsv,
- CFG_SMAC_TB_IDX_M,
+ roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M,
CFG_SMAC_TB_IDX_S, phy_port);
- roce_set_field(smac_tb->vf_smac_h_rsv,
- CFG_SMAC_TB_VF_SMAC_H_M,
+ roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M,
CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
@@ -2453,32 +2532,30 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
struct hns_roce_mr *mr)
{
- struct sg_dma_page_iter sg_iter;
- u64 page_addr;
- u64 *pages;
- int i;
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
+ u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t pbl_ba;
+ int i, count;
- mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
- mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
- roce_set_field(mpt_entry->byte_48_mode_ba,
- V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
- upper_32_bits(mr->pbl_ba >> 3));
+ count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+ ARRAY_SIZE(pages), &pbl_ba);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
+ count);
+ return -ENOBUFS;
+ }
- pages = (u64 *)__get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ /* Aligned to the hardware address access unit */
+ for (i = 0; i < count; i++)
+ pages[i] >>= 6;
- i = 0;
- for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
- page_addr = sg_page_iter_dma_address(&sg_iter);
- pages[i] = page_addr >> 6;
+ mpt_entry->pbl_size = cpu_to_le32(mr->npages);
+ mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
+ roce_set_field(mpt_entry->byte_48_mode_ba,
+ V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
+ upper_32_bits(pbl_ba >> 3));
- /* Record the first 2 entry directly to MTPT table */
- if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
- goto found;
- i++;
- }
-found:
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
@@ -2489,9 +2566,7 @@ found:
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
- mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
-
- free_page((unsigned long)pages);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
return 0;
}
@@ -2513,7 +2588,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
- mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
@@ -2599,11 +2674,19 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_mpt_entry *mpt_entry;
+ dma_addr_t pbl_ba = 0;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
+ if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
+ ibdev_err(ibdev, "failed to find frmr mtr.\n");
+ return -ENOBUFS;
+ }
+
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
@@ -2611,7 +2694,7 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
roce_set_field(mpt_entry->byte_4_pd_hop_st,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
- mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
@@ -2624,17 +2707,17 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
- mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+ mpt_entry->pbl_size = cpu_to_le32(mr->npages);
- mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3));
roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
V2_MPT_BYTE_48_PBL_BA_H_S,
- upper_32_bits(mr->pbl_ba >> 3));
+ upper_32_bits(pbl_ba >> 3));
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
- mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
return 0;
}
@@ -2680,7 +2763,8 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
- return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+ return hns_roce_buf_offset(hr_cq->mtr.kmem,
+ n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
}
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
@@ -2692,30 +2776,9 @@ static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
!!(n & hr_cq->cq_depth)) ? cqe : NULL;
}
-static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
+static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci)
{
- return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
-}
-
-static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
-{
- return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
-}
-
-static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
-{
- /* always called with interrupts disabled. */
- spin_lock(&srq->lock);
-
- bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
- srq->tail++;
-
- spin_unlock(&srq->lock);
-}
-
-static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
-{
- *hr_cq->set_ci_db = cons_index & V2_CQ_DB_PARAMETER_CONS_IDX_M;
+ *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M;
}
static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
@@ -2801,39 +2864,39 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
- cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
+ cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
- mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[0])));
roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_HOP_NUM_M,
V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
- cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
+ cq_context->cqe_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
- mtts[1] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
- hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
- hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
- if (hr_cq->db_en)
- roce_set_bit(cq_context->byte_44_db_record,
- V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
+ roce_set_bit(cq_context->byte_44_db_record,
+ V2_CQC_BYTE_44_DB_RECORD_EN_S,
+ (hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB) ? 1 : 0);
roce_set_field(cq_context->byte_44_db_record,
V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
@@ -2873,8 +2936,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
HNS_ROCE_V2_CQ_DB_NTR);
roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
- V2_CQ_DB_PARAMETER_CONS_IDX_S,
- hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
+ V2_CQ_DB_PARAMETER_CONS_IDX_S, hr_cq->cons_index);
roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
@@ -2911,7 +2973,7 @@ static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
wqe_buf += size;
}
- if (data_len) {
+ if (unlikely(data_len)) {
wc->status = IB_WC_LOC_LEN_ERR;
return -EAGAIN;
}
@@ -2968,6 +3030,62 @@ out:
return npolled;
}
+static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ struct hns_roce_v2_cqe *cqe, struct ib_wc *wc)
+{
+ static const struct {
+ u32 cqe_status;
+ enum ib_wc_status wc_status;
+ } map[] = {
+ { HNS_ROCE_CQE_V2_SUCCESS, IB_WC_SUCCESS },
+ { HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR, IB_WC_LOC_LEN_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_PROT_ERR, IB_WC_LOC_PROT_ERR },
+ { HNS_ROCE_CQE_V2_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
+ { HNS_ROCE_CQE_V2_MW_BIND_ERR, IB_WC_MW_BIND_ERR },
+ { HNS_ROCE_CQE_V2_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
+ { HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_OP_ERR, IB_WC_REM_OP_ERR },
+ { HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR,
+ IB_WC_RETRY_EXC_ERR },
+ { HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR, IB_WC_RNR_RETRY_EXC_ERR },
+ { HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR, IB_WC_REM_ABORT_ERR },
+ };
+
+ u32 cqe_status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
+ V2_CQE_BYTE_4_STATUS_S);
+ int i;
+
+ wc->status = IB_WC_GENERAL_ERR;
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ if (cqe_status == map[i].cqe_status) {
+ wc->status = map[i].wc_status;
+ break;
+ }
+
+ if (likely(wc->status == IB_WC_SUCCESS ||
+ wc->status == IB_WC_WR_FLUSH_ERR))
+ return;
+
+ ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
+ sizeof(*cqe), false);
+
+ /*
+ * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
+ * into errored mode. Hence, as a workaround to this hardware
+ * limitation, driver needs to assist in flushing. But the flushing
+ * operation uses mailbox to convey the QP state to the hardware and
+ * which can sleep due to the mutex protection around the mailbox calls.
+ * Hence, use the deferred flush for now. Once wc error detected, the
+ * flushing operation is needed.
+ */
+ if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+ init_flush_work(hr_dev, qp);
+}
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
@@ -2979,12 +3097,11 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
int is_send;
u16 wqe_ctr;
u32 opcode;
- u32 status;
int qpn;
int ret;
/* Find cqe according to consumer index */
- cqe = next_cqe_sw_v2(hr_cq);
+ cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
if (!cqe)
return -EAGAIN;
@@ -3009,7 +3126,6 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
*cur_qp = hr_qp;
}
- hr_qp = *cur_qp;
wc->qp = &(*cur_qp)->ibqp;
wc->vendor_err = 0;
@@ -3044,77 +3160,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
++wq->tail;
}
- status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
- V2_CQE_BYTE_4_STATUS_S);
- switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
- case HNS_ROCE_CQE_V2_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
- wc->status = IB_WC_LOC_QP_OP_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- case HNS_ROCE_CQE_V2_MW_BIND_ERR:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
- wc->status = IB_WC_BAD_RESP_ERR;
- break;
- case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
- wc->status = IB_WC_REM_INV_REQ_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
- wc->status = IB_WC_REM_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
- wc->status = IB_WC_REM_OP_ERR;
- break;
- case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
- wc->status = IB_WC_RETRY_EXC_ERR;
- break;
- case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
- wc->status = IB_WC_RNR_RETRY_EXC_ERR;
- break;
- case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
- wc->status = IB_WC_REM_ABORT_ERR;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- /*
- * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
- * into errored mode. Hence, as a workaround to this hardware
- * limitation, driver needs to assist in flushing. But the flushing
- * operation uses mailbox to convey the QP state to the hardware and
- * which can sleep due to the mutex protection around the mailbox calls.
- * Hence, use the deferred flush for now. Once wc error detected, the
- * flushing operation is needed.
- */
- if (wc->status != IB_WC_SUCCESS &&
- wc->status != IB_WC_WR_FLUSH_ERR) {
- ibdev_err(&hr_dev->ib_dev, "error cqe status is: 0x%x\n",
- status & HNS_ROCE_V2_CQE_STATUS_MASK);
-
- if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag))
- init_flush_work(hr_dev, hr_qp);
-
- return 0;
- }
-
- if (wc->status == IB_WC_WR_FLUSH_ERR)
+ get_cqe_status(hr_dev, *cur_qp, cqe, wc);
+ if (unlikely(wc->status != IB_WC_SUCCESS))
return 0;
if (is_send) {
@@ -3213,7 +3260,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
(roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
- if (ret)
+ if (unlikely(ret))
return -EAGAIN;
}
@@ -3514,29 +3561,18 @@ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
- roce_set_field(context->byte_4_sqpn_tst,
- V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sge.sge_cnt));
- else
- roce_set_field(context->byte_4_sqpn_tst,
- V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S,
- hr_qp->sq.max_gs >
- HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M, V2_QPC_BYTE_4_SGE_SHIFT_S,
+ to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
+ hr_qp->sge.sge_shift));
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ ilog2(hr_qp->sq.wqe_cnt));
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
- (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
- hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
- hr_qp->ibqp.srq) ? 0 :
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ ilog2(hr_qp->rq.wqe_cnt));
}
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
@@ -3572,7 +3608,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
- if (hr_qp->rdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
roce_set_bit(context->byte_68_rq_db,
V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
@@ -3734,30 +3770,19 @@ static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
return true;
}
-static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr, int attr_mask,
- struct hns_roce_v2_qp_context *context,
- struct hns_roce_v2_qp_context *qpc_mask)
+static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
{
- const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct ib_qp *ibqp = &hr_qp->ibqp;
u64 mtts[MTT_MIN_COUNT] = { 0 };
- dma_addr_t dma_handle_3;
- dma_addr_t dma_handle_2;
u64 wqe_sge_ba;
u32 page_size;
- u8 port_num;
- u64 *mtts_3;
- u64 *mtts_2;
int count;
- u8 *dmac;
- u8 *smac;
- int port;
/* Search qp buf's mtts */
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ page_size = 1 << hr_qp->mtr.hem_cfg.buf_pg_shift;
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
hr_qp->rq.offset / page_size, mtts,
MTT_MIN_COUNT, &wqe_sge_ba);
@@ -3765,29 +3790,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size))
return -EINVAL;
- /* Search IRRL's mtts */
- mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
- hr_qp->qpn, &dma_handle_2);
- if (!mtts_2) {
- ibdev_err(ibdev, "failed to find QP irrl_table\n");
- return -EINVAL;
- }
-
- /* Search TRRL's mtts */
- mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
- hr_qp->qpn, &dma_handle_3);
- if (!mtts_3) {
- ibdev_err(ibdev, "failed to find QP trrl_table\n");
- return -EINVAL;
- }
-
- if (attr_mask & IB_QP_ALT_PATH) {
- ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error\n",
- attr_mask);
- return -EINVAL;
- }
-
- dmac = (u8 *)attr->ah_attr.roce.dmac;
context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
qpc_mask->wqe_sge_ba = 0;
@@ -3804,17 +3806,16 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
V2_QPC_BYTE_12_SQ_HOP_NUM_S,
- hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
- 0 : hr_dev->caps.wqe_sq_hop_num);
+ to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num,
+ hr_qp->sq.wqe_cnt));
roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S,
- ((ibqp->qp_type == IB_QPT_GSI) ||
- hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
- hr_dev->caps.wqe_sge_hop_num : 0);
+ to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num,
+ hr_qp->sge.sge_cnt));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
@@ -3822,8 +3823,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_HOP_NUM_M,
V2_QPC_BYTE_20_RQ_HOP_NUM_S,
- hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
- 0 : hr_dev->caps.wqe_rq_hop_num);
+ to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num,
+ hr_qp->rq.wqe_cnt));
+
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_RQ_HOP_NUM_M,
V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
@@ -3831,7 +3833,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
- hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
@@ -3839,50 +3841,181 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
- hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
- context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
+ context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
qpc_mask->rq_cur_blk_addr = 0;
roce_set_field(context->byte_92_srq_info,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
- mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[0])));
roce_set_field(qpc_mask->byte_92_srq_info,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
- context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
+ context->rq_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
qpc_mask->rq_nxt_blk_addr = 0;
roce_set_field(context->byte_104_rq_sge,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
- mtts[1] >> (32 + PAGE_ADDR_SHIFT));
+ upper_32_bits(to_hr_hw_page_addr(mtts[1])));
roce_set_field(qpc_mask->byte_104_rq_sge,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
+ roce_set_field(context->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
+
+ return 0;
+}
+
+static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 sge_cur_blk = 0;
+ u64 sq_cur_blk = 0;
+ u32 page_size;
+ int count;
+
+ /* search qp buf's mtts */
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
+ hr_qp->qpn);
+ return -EINVAL;
+ }
+ if (hr_qp->sge.sge_cnt > 0) {
+ page_size = 1 << hr_qp->mtr.hem_cfg.buf_pg_shift;
+ count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
+ hr_qp->sge.offset / page_size,
+ &sge_cur_blk, 1, NULL);
+ if (count < 1) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
+ hr_qp->qpn);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ context->sq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
+ roce_set_field(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ qpc_mask->sq_cur_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
+
+ context->sq_cur_sge_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(sge_cur_blk));
+ roce_set_field(context->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(sge_cur_blk)));
+ qpc_mask->sq_cur_sge_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
+
+ context->rx_sq_cur_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(sq_cur_blk));
+ roce_set_field(context->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(sq_cur_blk)));
+ qpc_mask->rx_sq_cur_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
+
+ return 0;
+}
+
+static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t trrl_ba;
+ dma_addr_t irrl_ba;
+ u8 port_num;
+ u64 *mtts;
+ u8 *dmac;
+ u8 *smac;
+ int port;
+ int ret;
+
+ ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
+ if (ret) {
+ ibdev_err(ibdev, "failed to config rq buf, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Search IRRL's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
+ hr_qp->qpn, &irrl_ba);
+ if (!mtts) {
+ ibdev_err(ibdev, "failed to find qp irrl_table.\n");
+ return -EINVAL;
+ }
+
+ /* Search TRRL's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
+ hr_qp->qpn, &trrl_ba);
+ if (!mtts) {
+ ibdev_err(ibdev, "failed to find qp trrl_table.\n");
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error.\n",
+ attr_mask);
+ return -EINVAL;
+ }
+
roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
- V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
+ V2_QPC_BYTE_132_TRRL_BA_S, trrl_ba >> 4);
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
V2_QPC_BYTE_132_TRRL_BA_S, 0);
- context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4));
+ context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4));
qpc_mask->trrl_ba = 0;
roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
V2_QPC_BYTE_140_TRRL_BA_S,
- (u32)(dma_handle_3 >> (32 + 16 + 4)));
+ (u32)(trrl_ba >> (32 + 16 + 4)));
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
V2_QPC_BYTE_140_TRRL_BA_S, 0);
- context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6);
+ context->irrl_ba = cpu_to_le32(irrl_ba >> 6);
qpc_mask->irrl_ba = 0;
roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
V2_QPC_BYTE_208_IRRL_BA_S,
- dma_handle_2 >> (32 + 6));
+ irrl_ba >> (32 + 6));
roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
V2_QPC_BYTE_208_IRRL_BA_S, 0);
@@ -3897,6 +4030,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
smac = (u8 *)hr_dev->dev_addr[port];
+ dmac = (u8 *)attr->ah_attr.roce.dmac;
/* when dmac equals smac or loop_idc is 1, it should loopback */
if (ether_addr_equal_unaligned(dmac, smac) ||
hr_dev->loop_idc == 0x1) {
@@ -3919,6 +4053,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
grh->sgid_index));
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
+
memcpy(&(context->dmac), dmac, sizeof(u32));
roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
@@ -3928,7 +4063,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
- V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
+ V2_QPC_BYTE_56_LP_PKTN_INI_S,
+ ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096));
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
@@ -3942,16 +4078,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
- roce_set_field(context->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
- V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
-
- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
- V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
@@ -3987,30 +4113,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
- u64 sge_cur_blk = 0;
- u64 sq_cur_blk = 0;
- u32 page_size;
- int count;
-
- /* Search qp buf's mtts */
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find buf pa of QP(0x%lx)\n",
- hr_qp->qpn);
- return -EINVAL;
- }
-
- if (hr_qp->sge.offset) {
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
- hr_qp->sge.offset / page_size,
- &sge_cur_blk, 1, NULL);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find sge pa of QP(0x%lx)\n",
- hr_qp->qpn);
- return -EINVAL;
- }
- }
+ int ret;
/* Not support alternate path and path migration */
if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
@@ -4018,48 +4121,11 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return -EINVAL;
}
- /*
- * In v2 engine, software pass context and context mask to hardware
- * when modifying qp. If software need modify some fields in context,
- * we should set all bits of the relevant fields in context mask to
- * 0 at the same time, else set them to 0x1.
- */
- context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
- roce_set_field(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
- sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
- qpc_mask->sq_cur_blk_addr = 0;
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
-
- context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
- hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
- cpu_to_le32(sge_cur_blk >>
- PAGE_ADDR_SHIFT) : 0;
- roce_set_field(context->byte_184_irrl_idx,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
- ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
- HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
- (sge_cur_blk >>
- (32 + PAGE_ADDR_SHIFT)) : 0);
- qpc_mask->sq_cur_sge_blk_addr = 0;
- roce_set_field(qpc_mask->byte_184_irrl_idx,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
- V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
-
- context->rx_sq_cur_blk_addr =
- cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
- roce_set_field(context->byte_232_irrl_sge,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
- sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
- qpc_mask->rx_sq_cur_blk_addr = 0;
- roce_set_field(qpc_mask->byte_232_irrl_sge,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
- V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
+ ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
+ if (ret) {
+ ibdev_err(ibdev, "failed to config sq buf, ret %d\n", ret);
+ return ret;
+ }
/*
* Set some fields in context to zero, Because the default values
@@ -4108,21 +4174,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return 0;
}
-static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
-
- if ((cur_state != IB_QPS_RESET &&
- (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
- ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
- (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
- (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
- return true;
-
- return false;
-
-}
-
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4226,6 +4277,28 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
return 0;
}
+static bool check_qp_state(enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ static const bool sm[][IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true },
+ [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
+ [IB_QPS_INIT] = true,
+ [IB_QPS_RTR] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_RTS] = true,
+ [IB_QPS_ERR] = true },
+ [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
+ [IB_QPS_SQD] = {},
+ [IB_QPS_SQE] = {},
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
+ };
+
+ return sm[cur_state][new_state];
+}
+
static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4237,6 +4310,11 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
int ret = 0;
+ if (!check_qp_state(cur_state, new_state)) {
+ ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
+ return -EINVAL;
+ }
+
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, sizeof(*qpc_mask));
modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
@@ -4247,23 +4325,11 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
- if (ret)
- goto out;
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
- if (ret)
- goto out;
- } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
- /* Nothing */
- ;
- } else {
- ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
- ret = -EINVAL;
- goto out;
}
-out:
return ret;
}
@@ -4554,19 +4620,20 @@ out:
return ret;
}
-static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
+static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
{
- switch (state) {
- case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
- case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
- case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
- case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
- case HNS_ROCE_QP_ST_SQ_DRAINING:
- case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
- case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
- case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
- default: return -1;
- }
+ static const enum ib_qp_state map[] = {
+ [HNS_ROCE_QP_ST_RST] = IB_QPS_RESET,
+ [HNS_ROCE_QP_ST_INIT] = IB_QPS_INIT,
+ [HNS_ROCE_QP_ST_RTR] = IB_QPS_RTR,
+ [HNS_ROCE_QP_ST_RTS] = IB_QPS_RTS,
+ [HNS_ROCE_QP_ST_SQD] = IB_QPS_SQD,
+ [HNS_ROCE_QP_ST_SQER] = IB_QPS_SQE,
+ [HNS_ROCE_QP_ST_ERR] = IB_QPS_ERR,
+ [HNS_ROCE_QP_ST_SQ_DRAINING] = IB_QPS_SQD
+ };
+
+ return (state < ARRAY_SIZE(map)) ? map[state] : -1;
}
static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
@@ -4639,7 +4706,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->path_mig_state = IB_MIG_ARMED;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
if (hr_qp->ibqp.qp_type == IB_QPT_UD)
- qp_attr->qkey = V2_QKEY_VAL;
+ qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_EPSN_M,
@@ -4838,6 +4905,184 @@ out:
return ret;
}
+static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
+ u32 cqn, void *mb_buf, u64 *mtts_wqe,
+ u64 *mtts_idx, dma_addr_t dma_handle_wqe,
+ dma_addr_t dma_handle_idx)
+{
+ struct hns_roce_srq_context *srq_context;
+
+ srq_context = mb_buf;
+ memset(srq_context, 0, sizeof(*srq_context));
+
+ roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
+ SRQC_BYTE_4_SRQ_ST_S, 1);
+
+ roce_set_field(srq_context->byte_4_srqn_srqst,
+ SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
+ SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
+ to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
+ srq->wqe_cnt));
+ roce_set_field(srq_context->byte_4_srqn_srqst,
+ SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
+ ilog2(srq->wqe_cnt));
+
+ roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
+ SRQC_BYTE_4_SRQN_S, srq->srqn);
+
+ roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
+
+ roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
+ SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
+
+ srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
+
+ roce_set_field(srq_context->byte_24_wqe_bt_ba,
+ SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
+ SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
+ dma_handle_wqe >> 35);
+
+ roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
+ SRQC_BYTE_28_PD_S, pdn);
+ roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
+ SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
+ fls(srq->max_gs - 1));
+
+ srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
+ roce_set_field(srq_context->rsv_idx_bt_ba,
+ SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
+ SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
+ dma_handle_idx >> 35);
+
+ srq_context->idx_cur_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(mtts_idx[0]));
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
+ SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[0])));
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
+ SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
+ to_hr_hem_hopnum(hr_dev->caps.idx_hop_num,
+ srq->wqe_cnt));
+
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
+ SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
+ to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.ba_pg_shift));
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
+ SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
+ to_hr_hw_page_shift(srq->idx_que.mtr.hem_cfg.buf_pg_shift));
+
+ srq_context->idx_nxt_blk_addr =
+ cpu_to_le32(to_hr_hw_page_addr(mtts_idx[1]));
+ roce_set_field(srq_context->rsv_idxnxtblkaddr,
+ SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
+ SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
+ upper_32_bits(to_hr_hw_page_addr(mtts_idx[1])));
+ roce_set_field(srq_context->byte_56_xrc_cqn,
+ SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
+ cqn);
+ roce_set_field(srq_context->byte_56_xrc_cqn,
+ SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
+ SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
+ roce_set_field(srq_context->byte_56_xrc_cqn,
+ SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
+ SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
+ to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
+
+ roce_set_bit(srq_context->db_record_addr_record_en,
+ SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
+}
+
+static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
+ struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_srq_context *srq_context;
+ struct hns_roce_srq_context *srqc_mask;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ if (srq_attr_mask & IB_SRQ_LIMIT) {
+ if (srq_attr->srq_limit >= srq->wqe_cnt)
+ return -EINVAL;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ srq_context = mailbox->buf;
+ srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
+
+ memset(srqc_mask, 0xff, sizeof(*srqc_mask));
+
+ roce_set_field(srq_context->byte_8_limit_wl,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
+ roce_set_field(srqc_mask->byte_8_limit_wl,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
+ HNS_ROCE_CMD_MODIFY_SRQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to handle cmd of modifying SRQ, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_srq_context *srq_context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int limit_wl;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ srq_context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
+ HNS_ROCE_CMD_QUERY_SRQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd of querying SRQ, ret = %d.\n",
+ ret);
+ goto out;
+ }
+
+ limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_M,
+ SRQC_BYTE_8_SRQ_LIMIT_WL_S);
+
+ attr->srq_limit = limit_wl;
+ attr->max_wr = srq->wqe_cnt - 1;
+ attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE;
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
@@ -4989,24 +5234,14 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
hns_roce_write64(hr_dev, doorbell, eq->doorbell);
}
-static inline void *get_eqe_buf(struct hns_roce_eq *eq, unsigned long offset)
-{
- u32 buf_chk_sz;
-
- buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
- if (eq->buf.nbufs == 1)
- return eq->buf.direct.buf + offset % buf_chk_sz;
- else
- return eq->buf.page_list[offset / buf_chk_sz].buf +
- offset % buf_chk_sz;
-}
-
static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
{
struct hns_roce_aeqe *aeqe;
- aeqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
- HNS_ROCE_AEQ_ENTRY_SIZE);
+ aeqe = hns_roce_buf_offset(eq->mtr.kmem,
+ (eq->cons_index & (eq->entries - 1)) *
+ HNS_ROCE_AEQ_ENTRY_SIZE);
+
return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
}
@@ -5103,8 +5338,9 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
{
struct hns_roce_ceqe *ceqe;
- ceqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
- HNS_ROCE_CEQ_ENTRY_SIZE);
+ ceqe = hns_roce_buf_offset(eq->mtr.kmem,
+ (eq->cons_index & (eq->entries - 1)) *
+ HNS_ROCE_CEQ_ENTRY_SIZE);
return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
(!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
}
@@ -5263,17 +5499,15 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
{
- if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0)
- hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
- hns_roce_buf_free(hr_dev, eq->buf.size, &eq->buf);
+ hns_roce_mtr_destroy(hr_dev, &eq->mtr);
}
-static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq,
- void *mb_buf)
+static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
+ void *mb_buf)
{
+ u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
struct hns_roce_eq_context *eqc;
- u64 ba[MTT_MIN_COUNT] = { 0 };
+ u64 bt_ba = 0;
int count;
eqc = mb_buf;
@@ -5281,31 +5515,18 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* init eqc */
eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
- eq->hop_num = hr_dev->caps.eqe_hop_num;
eq->cons_index = 0;
eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
- eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
- eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
eq->shift = ilog2((unsigned int)eq->entries);
- /* if not muti-hop, eqe buffer only use one trunk */
- if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0) {
- eq->eqe_ba = eq->buf.direct.map;
- eq->cur_eqe_ba = eq->eqe_ba;
- if (eq->buf.npages > 1)
- eq->nxt_eqe_ba = eq->eqe_ba + (1 << eq->eqe_buf_pg_sz);
- else
- eq->nxt_eqe_ba = eq->eqe_ba;
- } else {
- count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, ba,
- MTT_MIN_COUNT, &eq->eqe_ba);
- eq->cur_eqe_ba = ba[0];
- if (count > 1)
- eq->nxt_eqe_ba = ba[1];
- else
- eq->nxt_eqe_ba = ba[0];
+ /* if not multi-hop, eqe buffer only use one trunk */
+ count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
+ &bt_ba);
+ if (count < 1) {
+ dev_err(hr_dev->dev, "failed to find EQE mtr\n");
+ return -ENOBUFS;
}
/* set eqc state */
@@ -5339,12 +5560,12 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* set eqe_ba_pg_sz */
roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
HNS_ROCE_EQC_BA_PG_SZ_S,
- eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
/* set eqe_buf_pg_sz */
roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
HNS_ROCE_EQC_BUF_PG_SZ_S,
- eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
+ to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
/* set eq_producer_idx */
roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
@@ -5363,13 +5584,13 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
HNS_ROCE_EQC_REPORT_TIMER_S,
HNS_ROCE_EQ_INIT_REPORT_TIMER);
- /* set eqe_ba [34:3] */
+ /* set bt_ba [34:3] */
roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
- HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
+ HNS_ROCE_EQC_EQE_BA_L_S, bt_ba >> 3);
- /* set eqe_ba [64:35] */
+ /* set bt_ba [64:35] */
roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
- HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
+ HNS_ROCE_EQC_EQE_BA_H_S, bt_ba >> 35);
/* set eq shift */
roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
@@ -5381,15 +5602,15 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* set cur_eqe_ba [27:12] */
roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
- HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
+ HNS_ROCE_EQC_CUR_EQE_BA_L_S, eqe_ba[0] >> 12);
/* set cur_eqe_ba [59:28] */
roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
- HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
+ HNS_ROCE_EQC_CUR_EQE_BA_M_S, eqe_ba[0] >> 28);
/* set cur_eqe_ba [63:60] */
roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
- HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
+ HNS_ROCE_EQC_CUR_EQE_BA_H_S, eqe_ba[0] >> 60);
/* set eq consumer idx */
roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
@@ -5397,97 +5618,38 @@ static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
/* set nex_eqe_ba[43:12] */
roce_set_field(eqc->nxt_eqe_ba0, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
- HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
+ HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12);
/* set nex_eqe_ba[63:44] */
roce_set_field(eqc->nxt_eqe_ba1, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
- HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
-}
+ HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44);
-static int map_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
- u32 page_shift)
-{
- struct hns_roce_buf_region region = {};
- dma_addr_t *buf_list = NULL;
- int ba_num;
- int ret;
-
- ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
- 1 << page_shift);
- hns_roce_init_buf_region(&region, hr_dev->caps.eqe_hop_num, 0, ba_num);
-
- /* alloc a tmp list for storing eq buf address */
- ret = hns_roce_alloc_buf_list(&region, &buf_list, 1);
- if (ret) {
- dev_err(hr_dev->dev, "alloc eq buf_list error\n");
- return ret;
- }
-
- ba_num = hns_roce_get_kmem_bufs(hr_dev, buf_list, region.count,
- region.offset, &eq->buf);
- if (ba_num != region.count) {
- dev_err(hr_dev->dev, "get eqe buf err,expect %d,ret %d.\n",
- region.count, ba_num);
- ret = -ENOBUFS;
- goto done;
- }
-
- hns_roce_mtr_init(&eq->mtr, PAGE_SHIFT + hr_dev->caps.eqe_ba_pg_sz,
- page_shift);
- ret = hns_roce_mtr_attach(hr_dev, &eq->mtr, &buf_list, &region, 1);
- if (ret)
- dev_err(hr_dev->dev, "mtr attach error for eqe\n");
-
- goto done;
-
- hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
-done:
- hns_roce_free_buf_list(&buf_list, 1);
-
- return ret;
+ return 0;
}
static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
{
- struct hns_roce_buf *buf = &eq->buf;
- bool is_mhop = false;
- u32 page_shift;
- u32 mhop_num;
- u32 max_size;
- int ret;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
- page_shift = PAGE_SHIFT + hr_dev->caps.eqe_buf_pg_sz;
- mhop_num = hr_dev->caps.eqe_hop_num;
- if (!mhop_num) {
- max_size = 1 << page_shift;
- buf->size = max_size;
- } else if (mhop_num == HNS_ROCE_HOP_NUM_0) {
- max_size = eq->entries * eq->eqe_size;
- buf->size = max_size;
- } else {
- max_size = 1 << page_shift;
- buf->size = PAGE_ALIGN(eq->entries * eq->eqe_size);
- is_mhop = true;
- }
+ if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
+ eq->hop_num = 0;
+ else
+ eq->hop_num = hr_dev->caps.eqe_hop_num;
- ret = hns_roce_buf_alloc(hr_dev, buf->size, max_size, buf, page_shift);
- if (ret) {
- dev_err(hr_dev->dev, "alloc eq buf error\n");
- return ret;
- }
+ buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = eq->entries * eq->eqe_size;
+ buf_attr.region[0].hopnum = eq->hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
- if (is_mhop) {
- ret = map_eq_buf(hr_dev, eq, page_shift);
- if (ret) {
- dev_err(hr_dev->dev, "map roce buf error\n");
- goto err_alloc;
- }
- }
+ err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
+ hr_dev->caps.eqe_ba_pg_sz +
+ HNS_HW_PAGE_SHIFT, NULL, 0);
+ if (err)
+ dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
- return 0;
-err_alloc:
- hns_roce_buf_free(hr_dev, buf->size, buf);
- return ret;
+ return err;
}
static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
@@ -5499,15 +5661,16 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
/* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
+ if (IS_ERR_OR_NULL(mailbox))
+ return -ENOMEM;
ret = alloc_eq_buf(hr_dev, eq);
- if (ret) {
- ret = -ENOMEM;
+ if (ret)
goto free_cmd_mbox;
- }
- hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
+
+ ret = config_eqc(hr_dev, eq, mailbox->buf);
+ if (ret)
+ goto err_cmd_mbox;
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
@@ -5731,294 +5894,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
destroy_workqueue(hr_dev->irq_workq);
}
-static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
- u32 cqn, void *mb_buf, u64 *mtts_wqe,
- u64 *mtts_idx, dma_addr_t dma_handle_wqe,
- dma_addr_t dma_handle_idx)
-{
- struct hns_roce_srq_context *srq_context;
-
- srq_context = mb_buf;
- memset(srq_context, 0, sizeof(*srq_context));
-
- roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
- SRQC_BYTE_4_SRQ_ST_S, 1);
-
- roce_set_field(srq_context->byte_4_srqn_srqst,
- SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
- SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
- (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
- hr_dev->caps.srqwqe_hop_num));
- roce_set_field(srq_context->byte_4_srqn_srqst,
- SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
- ilog2(srq->wqe_cnt));
-
- roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
- SRQC_BYTE_4_SRQN_S, srq->srqn);
-
- roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
-
- roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
- SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
-
- srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
-
- roce_set_field(srq_context->byte_24_wqe_bt_ba,
- SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
- SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
- dma_handle_wqe >> 35);
-
- roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
- SRQC_BYTE_28_PD_S, pdn);
- roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
- SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
- fls(srq->max_gs - 1));
-
- srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
- roce_set_field(srq_context->rsv_idx_bt_ba,
- SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
- SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
- dma_handle_idx >> 35);
-
- srq_context->idx_cur_blk_addr =
- cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT);
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
- SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
- mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT));
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
- SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
- hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
- hr_dev->caps.idx_hop_num);
-
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
- SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
- hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
- SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
- SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
- hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
-
- srq_context->idx_nxt_blk_addr =
- cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
- roce_set_field(srq_context->rsv_idxnxtblkaddr,
- SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
- SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
- mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT));
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
- cqn);
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
- SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
- hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
- roce_set_field(srq_context->byte_56_xrc_cqn,
- SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
- SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
- hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
-
- roce_set_bit(srq_context->db_record_addr_record_en,
- SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
-}
-
-static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
- struct ib_srq_attr *srq_attr,
- enum ib_srq_attr_mask srq_attr_mask,
- struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
- struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_srq_context *srq_context;
- struct hns_roce_srq_context *srqc_mask;
- struct hns_roce_cmd_mailbox *mailbox;
- int ret;
-
- if (srq_attr_mask & IB_SRQ_LIMIT) {
- if (srq_attr->srq_limit >= srq->wqe_cnt)
- return -EINVAL;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- srq_context = mailbox->buf;
- srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
-
- memset(srqc_mask, 0xff, sizeof(*srqc_mask));
-
- roce_set_field(srq_context->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
- roce_set_field(srqc_mask->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
-
- ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
- HNS_ROCE_CMD_MODIFY_SRQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- if (ret) {
- ibdev_err(&hr_dev->ib_dev,
- "failed to process cmd when modifying SRQ, ret = %d\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
- struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_srq_context *srq_context;
- struct hns_roce_cmd_mailbox *mailbox;
- int limit_wl;
- int ret;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- srq_context = mailbox->buf;
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
- HNS_ROCE_CMD_QUERY_SRQC,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
- if (ret) {
- ibdev_err(&hr_dev->ib_dev,
- "failed to process cmd when querying SRQ, ret = %d\n",
- ret);
- goto out;
- }
-
- limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
- SRQC_BYTE_8_SRQ_LIMIT_WL_M,
- SRQC_BYTE_8_SRQ_LIMIT_WL_S);
-
- attr->srq_limit = limit_wl;
- attr->max_wr = srq->wqe_cnt - 1;
- attr->max_sge = srq->max_gs;
-
- memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
-
-out:
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- return ret;
-}
-
-static int find_empty_entry(struct hns_roce_idx_que *idx_que,
- unsigned long size)
-{
- int wqe_idx;
-
- if (unlikely(bitmap_full(idx_que->bitmap, size)))
- return -ENOSPC;
-
- wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
-
- bitmap_set(idx_que->bitmap, wqe_idx, 1);
-
- return wqe_idx;
-}
-
-static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
- int cur_idx, int wqe_idx)
-{
- unsigned int *addr;
-
- addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
- cur_idx * idx_que->entry_sz);
- *addr = wqe_idx;
-}
-
-static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
- const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
- struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- struct hns_roce_v2_wqe_data_seg *dseg;
- struct hns_roce_v2_db srq_db;
- unsigned long flags;
- int ret = 0;
- int wqe_idx;
- void *wqe;
- int nreq;
- int ind;
- int i;
-
- spin_lock_irqsave(&srq->lock, flags);
-
- ind = srq->head & (srq->wqe_cnt - 1);
-
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (unlikely(wr->num_sge > srq->max_gs)) {
- ret = -EINVAL;
- *bad_wr = wr;
- break;
- }
-
- if (unlikely(srq->head == srq->tail)) {
- ret = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
- wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
- if (wqe_idx < 0) {
- ret = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
- fill_idx_queue(&srq->idx_que, ind, wqe_idx);
- wqe = get_srq_wqe(srq, wqe_idx);
- dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
-
- for (i = 0; i < wr->num_sge; ++i) {
- dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
- dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
- dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
- }
-
- if (i < srq->max_gs) {
- dseg[i].len = 0;
- dseg[i].lkey = cpu_to_le32(0x100);
- dseg[i].addr = 0;
- }
-
- srq->wrid[wqe_idx] = wr->wr_id;
- ind = (ind + 1) & (srq->wqe_cnt - 1);
- }
-
- if (likely(nreq)) {
- srq->head += nreq;
-
- /*
- * Make sure that descriptors are written before
- * doorbell record.
- */
- wmb();
-
- srq_db.byte_4 =
- cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
- (srq->srqn & V2_DB_BYTE_4_TAG_M));
- srq_db.parameter = cpu_to_le32(srq->head);
-
- hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
-
- }
-
- spin_unlock_irqrestore(&srq->lock, flags);
-
- return ret;
-}
-
static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
.query_cqc_info = hns_roce_v2_query_cqc_info,
};
@@ -6161,7 +6036,7 @@ error_failed_kzalloc:
static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
bool reset)
{
- struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+ struct hns_roce_dev *hr_dev = handle->priv;
if (!hr_dev)
return;
@@ -6241,7 +6116,7 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
- hr_dev = (struct hns_roce_dev *)handle->priv;
+ hr_dev = handle->priv;
if (!hr_dev)
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 82dd9f6f4845..e176b0aaa4ac 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -92,7 +92,9 @@
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
-#define HNS_ROCE_INVALID_LKEY 0x100
+#define HNS_ROCE_INVALID_LKEY 0x0
+#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
+
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
@@ -1241,10 +1243,9 @@ struct hns_roce_func_clear {
};
#define FUNC_CLEAR_RST_FUN_DONE_S 0
-/* Each physical function manages up to 248 virtual functionsï¼›
- * it takes up to 100ms for each function to execute clearï¼›
- * if an abnormal reset occurs, it is executed twice at most;
- * so it takes up to 249 * 2 * 100ms.
+/* Each physical function manages up to 248 virtual functions, it takes up to
+ * 100ms for each function to execute clear. If an abnormal reset occurs, it is
+ * executed twice at most, so it takes up to 249 * 2 * 100ms.
*/
#define HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS (249 * 2 * 100)
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL 40
@@ -1648,7 +1649,7 @@ struct hns_roce_query_pf_caps_c {
struct hns_roce_query_pf_caps_d {
__le32 wq_hop_num_max_srqs;
__le16 srq_depth;
- __le16 rsv;
+ __le16 cap_flags_ex;
__le32 num_ceqs_ceq_depth;
__le32 arm_st_aeq_depth;
__le32 num_uars_rsv_pds;
@@ -1978,7 +1979,7 @@ int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest)
{
- struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index d0031d559213..50763cf4fa3d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -233,7 +233,6 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
enum ib_mtu mtu;
u8 port;
- assert(port_num > 0);
port = port_num - 1;
/* props being zeroed by the caller, avoid zeroing it here */
@@ -579,33 +578,12 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
int ret;
struct device *dev = hr_dev->dev;
- ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
- HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
- hr_dev->caps.num_mtt_segs, 1);
- if (ret) {
- dev_err(dev, "Failed to init MTT context memory, aborting.\n");
- return ret;
- }
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
- ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_cqe_table,
- HEM_TYPE_CQE,
- hr_dev->caps.mtt_entry_sz,
- hr_dev->caps.num_cqe_segs, 1);
- if (ret) {
- dev_err(dev,
- "Failed to init CQE context memory, aborting.\n");
- goto err_unmap_cqe;
- }
- }
-
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
hr_dev->caps.num_mtpts, 1);
if (ret) {
dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
- goto err_unmap_mtt;
+ return ret;
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
@@ -660,32 +638,6 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
}
}
- if (hr_dev->caps.num_srqwqe_segs) {
- ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table,
- HEM_TYPE_SRQWQE,
- hr_dev->caps.mtt_entry_sz,
- hr_dev->caps.num_srqwqe_segs, 1);
- if (ret) {
- dev_err(dev,
- "Failed to init MTT srqwqe memory, aborting.\n");
- goto err_unmap_srq;
- }
- }
-
- if (hr_dev->caps.num_idx_segs) {
- ret = hns_roce_init_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_idx_table,
- HEM_TYPE_IDX,
- hr_dev->caps.idx_entry_sz,
- hr_dev->caps.num_idx_segs, 1);
- if (ret) {
- dev_err(dev,
- "Failed to init MTT idx memory, aborting.\n");
- goto err_unmap_srqwqe;
- }
- }
-
if (hr_dev->caps.sccc_entry_sz) {
ret = hns_roce_init_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table,
@@ -695,7 +647,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
if (ret) {
dev_err(dev,
"Failed to init SCC context memory, aborting.\n");
- goto err_unmap_idx;
+ goto err_unmap_srq;
}
}
@@ -733,17 +685,6 @@ err_unmap_ctx:
if (hr_dev->caps.sccc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.sccc_table);
-
-err_unmap_idx:
- if (hr_dev->caps.num_idx_segs)
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_idx_table);
-
-err_unmap_srqwqe:
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table);
-
err_unmap_srq:
if (hr_dev->caps.srqc_entry_sz)
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table);
@@ -765,14 +706,6 @@ err_unmap_qp:
err_unmap_dmpt:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
-err_unmap_mtt:
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_cleanup_hem_table(hr_dev,
- &hr_dev->mr_table.mtt_cqe_table);
-
-err_unmap_cqe:
- hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
-
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 176f34692f88..4c0bbb12770d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -66,645 +66,89 @@ int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
- unsigned long *seg)
+static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
+ u32 pd, u64 iova, u64 size, u32 access)
{
- int o;
- u32 m;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned long obj = 0;
+ int err;
- spin_lock(&buddy->lock);
-
- for (o = order; o <= buddy->max_order; ++o) {
- if (buddy->num_free[o]) {
- m = 1 << (buddy->max_order - o);
- *seg = find_first_bit(buddy->bits[o], m);
- if (*seg < m)
- goto found;
- }
- }
- spin_unlock(&buddy->lock);
- return -EINVAL;
-
- found:
- clear_bit(*seg, buddy->bits[o]);
- --buddy->num_free[o];
-
- while (o > order) {
- --o;
- *seg <<= 1;
- set_bit(*seg ^ 1, buddy->bits[o]);
- ++buddy->num_free[o];
- }
-
- spin_unlock(&buddy->lock);
-
- *seg <<= order;
- return 0;
-}
-
-static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
- int order)
-{
- seg >>= order;
-
- spin_lock(&buddy->lock);
-
- while (test_bit(seg ^ 1, buddy->bits[order])) {
- clear_bit(seg ^ 1, buddy->bits[order]);
- --buddy->num_free[order];
- seg >>= 1;
- ++order;
- }
-
- set_bit(seg, buddy->bits[order]);
- ++buddy->num_free[order];
-
- spin_unlock(&buddy->lock);
-}
-
-static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
-{
- int i, s;
-
- buddy->max_order = max_order;
- spin_lock_init(&buddy->lock);
- buddy->bits = kcalloc(buddy->max_order + 1,
- sizeof(*buddy->bits),
- GFP_KERNEL);
- buddy->num_free = kcalloc(buddy->max_order + 1,
- sizeof(*buddy->num_free),
- GFP_KERNEL);
- if (!buddy->bits || !buddy->num_free)
- goto err_out;
-
- for (i = 0; i <= buddy->max_order; ++i) {
- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
- __GFP_NOWARN);
- if (!buddy->bits[i]) {
- buddy->bits[i] = vzalloc(array_size(s, sizeof(long)));
- if (!buddy->bits[i])
- goto err_out_free;
- }
- }
-
- set_bit(0, buddy->bits[buddy->max_order]);
- buddy->num_free[buddy->max_order] = 1;
-
- return 0;
-
-err_out_free:
- for (i = 0; i <= buddy->max_order; ++i)
- kvfree(buddy->bits[i]);
-
-err_out:
- kfree(buddy->bits);
- kfree(buddy->num_free);
- return -ENOMEM;
-}
-
-static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
-{
- int i;
-
- for (i = 0; i <= buddy->max_order; ++i)
- kvfree(buddy->bits[i]);
-
- kfree(buddy->bits);
- kfree(buddy->num_free);
-}
-
-static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
- unsigned long *seg, u32 mtt_type)
-{
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- struct hns_roce_hem_table *table;
- struct hns_roce_buddy *buddy;
- int ret;
-
- switch (mtt_type) {
- case MTT_TYPE_WQE:
- buddy = &mr_table->mtt_buddy;
- table = &mr_table->mtt_table;
- break;
- case MTT_TYPE_CQE:
- buddy = &mr_table->mtt_cqe_buddy;
- table = &mr_table->mtt_cqe_table;
- break;
- case MTT_TYPE_SRQWQE:
- buddy = &mr_table->mtt_srqwqe_buddy;
- table = &mr_table->mtt_srqwqe_table;
- break;
- case MTT_TYPE_IDX:
- buddy = &mr_table->mtt_idx_buddy;
- table = &mr_table->mtt_idx_table;
- break;
- default:
- dev_err(hr_dev->dev, "Unsupport MTT table type: %d\n",
- mtt_type);
- return -EINVAL;
- }
-
- ret = hns_roce_buddy_alloc(buddy, order, seg);
- if (ret)
- return ret;
-
- ret = hns_roce_table_get_range(hr_dev, table, *seg,
- *seg + (1 << order) - 1);
- if (ret) {
- hns_roce_buddy_free(buddy, *seg, order);
- return ret;
- }
-
- return 0;
-}
-
-int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
- struct hns_roce_mtt *mtt)
-{
- int ret;
- int i;
-
- /* Page num is zero, correspond to DMA memory register */
- if (!npages) {
- mtt->order = -1;
- mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
- return 0;
- }
-
- /* Note: if page_shift is zero, FAST memory register */
- mtt->page_shift = page_shift;
-
- /* Compute MTT entry necessary */
- for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
- i <<= 1)
- ++mtt->order;
-
- /* Allocate MTT entry */
- ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
- mtt->mtt_type);
- if (ret)
- return -ENOMEM;
-
- return 0;
-}
-
-void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
-{
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
-
- if (mtt->order < 0)
- return;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- case MTT_TYPE_CQE:
- hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- case MTT_TYPE_SRQWQE:
- hns_roce_buddy_free(&mr_table->mtt_srqwqe_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_srqwqe_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- case MTT_TYPE_IDX:
- hns_roce_buddy_free(&mr_table->mtt_idx_buddy, mtt->first_seg,
- mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_idx_table,
- mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
- break;
- default:
- dev_err(hr_dev->dev,
- "Unsupport mtt type %d, clean mtt failed\n",
- mtt->mtt_type);
- break;
- }
-}
-
-static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr, int err_loop_index,
- int loop_i, int loop_j)
-{
- struct device *dev = hr_dev->dev;
- u32 mhop_num;
- u32 pbl_bt_sz;
- u64 bt_idx;
- int i, j;
-
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- mhop_num = hr_dev->caps.pbl_hop_num;
-
- i = loop_i;
- if (mhop_num == 3 && err_loop_index == 2) {
- for (; i >= 0; i--) {
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
- if (i == loop_i && j >= loop_j)
- break;
-
- bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
- dma_free_coherent(dev, pbl_bt_sz,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
- }
- }
- } else if (mhop_num == 3 && err_loop_index == 1) {
- for (i -= 1; i >= 0; i--) {
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
- bt_idx = i * pbl_bt_sz / BA_BYTE_LEN + j;
- dma_free_coherent(dev, pbl_bt_sz,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
- }
- }
- } else if (mhop_num == 2 && err_loop_index == 1) {
- for (i -= 1; i >= 0; i--)
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
- } else {
- dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
- mhop_num, err_loop_index);
- return;
- }
-
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
- mr->pbl_bt_l0 = NULL;
- mr->pbl_l0_dma_addr = 0;
-}
-static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr, u32 pbl_bt_sz)
-{
- struct device *dev = hr_dev->dev;
-
- if (npages > pbl_bt_sz / 8) {
- dev_err(dev, "npages %d is larger than buf_pg_sz!",
- npages);
- return -EINVAL;
- }
- mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf)
+ /* Allocate a key for mr from mr_table */
+ err = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &obj);
+ if (err) {
+ ibdev_err(ibdev,
+ "failed to alloc bitmap for MR key, ret = %d.\n",
+ err);
return -ENOMEM;
-
- mr->pbl_size = npages;
- mr->pbl_ba = mr->pbl_dma_addr;
- mr->pbl_hop_num = 1;
- mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
- mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
- return 0;
-
-}
-
-
-static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr, u32 pbl_bt_sz)
-{
- struct device *dev = hr_dev->dev;
- int npages_allocated;
- u64 pbl_last_bt_num;
- u64 pbl_bt_cnt = 0;
- u64 size;
- int i;
-
- pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
-
- /* alloc L1 BT */
- for (i = 0; i < pbl_bt_sz / 8; i++) {
- if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
- size = pbl_bt_sz;
- } else {
- npages_allocated = i * (pbl_bt_sz / 8);
- size = (npages - npages_allocated) * 8;
- }
- mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
- &(mr->pbl_l1_dma_addr[i]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l1[i]) {
- hns_roce_loop_free(hr_dev, mr, 1, i, 0);
- return -ENOMEM;
- }
-
- *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
-
- pbl_bt_cnt++;
- if (pbl_bt_cnt >= pbl_last_bt_num)
- break;
}
- mr->l0_chunk_last_num = i + 1;
-
- return 0;
-}
-
-static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr, u32 pbl_bt_sz)
-{
- struct device *dev = hr_dev->dev;
- int mr_alloc_done = 0;
- int npages_allocated;
- u64 pbl_last_bt_num;
- u64 pbl_bt_cnt = 0;
- u64 bt_idx;
- u64 size;
- int i;
- int j = 0;
-
- pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
-
- mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
- sizeof(*mr->pbl_l2_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_l2_dma_addr)
- return -ENOMEM;
-
- mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
- sizeof(*mr->pbl_bt_l2),
- GFP_KERNEL);
- if (!mr->pbl_bt_l2)
- goto err_kcalloc_bt_l2;
-
- /* alloc L1, L2 BT */
- for (i = 0; i < pbl_bt_sz / 8; i++) {
- mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
- &(mr->pbl_l1_dma_addr[i]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l1[i]) {
- hns_roce_loop_free(hr_dev, mr, 1, i, 0);
- goto err_dma_alloc_l0;
- }
-
- *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
-
- for (j = 0; j < pbl_bt_sz / 8; j++) {
- bt_idx = i * pbl_bt_sz / 8 + j;
-
- if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
- size = pbl_bt_sz;
- } else {
- npages_allocated = bt_idx *
- (pbl_bt_sz / 8);
- size = (npages - npages_allocated) * 8;
- }
- mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
- dev, size,
- &(mr->pbl_l2_dma_addr[bt_idx]),
- GFP_KERNEL);
- if (!mr->pbl_bt_l2[bt_idx]) {
- hns_roce_loop_free(hr_dev, mr, 2, i, j);
- goto err_dma_alloc_l0;
- }
-
- *(mr->pbl_bt_l1[i] + j) =
- mr->pbl_l2_dma_addr[bt_idx];
-
- pbl_bt_cnt++;
- if (pbl_bt_cnt >= pbl_last_bt_num) {
- mr_alloc_done = 1;
- break;
- }
- }
+ mr->iova = iova; /* MR va starting addr */
+ mr->size = size; /* MR addr range */
+ mr->pd = pd; /* MR num */
+ mr->access = access; /* MR access permit */
+ mr->enabled = 0; /* MR active status */
+ mr->key = hw_index_to_key(obj); /* MR key */
- if (mr_alloc_done)
- break;
+ err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
+ if (err) {
+ ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
+ goto err_free_bitmap;
}
- mr->l0_chunk_last_num = i + 1;
- mr->l1_chunk_last_num = j + 1;
-
-
return 0;
-
-err_dma_alloc_l0:
- kfree(mr->pbl_bt_l2);
- mr->pbl_bt_l2 = NULL;
-
-err_kcalloc_bt_l2:
- kfree(mr->pbl_l2_dma_addr);
- mr->pbl_l2_dma_addr = NULL;
-
- return -ENOMEM;
+err_free_bitmap:
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
+ return err;
}
-
-/* PBL multi hop addressing */
-static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
- struct hns_roce_mr *mr)
+static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
- struct device *dev = hr_dev->dev;
- u32 pbl_bt_sz;
- u32 mhop_num;
-
- mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
-
- if (mhop_num == HNS_ROCE_HOP_NUM_0)
- return 0;
-
- if (mhop_num == 1)
- return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz);
-
- mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
- sizeof(*mr->pbl_l1_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_l1_dma_addr)
- return -ENOMEM;
-
- mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
- GFP_KERNEL);
- if (!mr->pbl_bt_l1)
- goto err_kcalloc_bt_l1;
-
- /* alloc L0 BT */
- mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
- &(mr->pbl_l0_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_bt_l0)
- goto err_kcalloc_l2_dma;
-
- if (mhop_num == 2) {
- if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
- goto err_kcalloc_l2_dma;
- }
-
- if (mhop_num == 3) {
- if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
- goto err_kcalloc_l2_dma;
- }
-
+ unsigned long obj = key_to_hw_index(mr->key);
- mr->pbl_size = npages;
- mr->pbl_ba = mr->pbl_l0_dma_addr;
- mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
- mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
- mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
-
- return 0;
-
-err_kcalloc_l2_dma:
- kfree(mr->pbl_bt_l1);
- mr->pbl_bt_l1 = NULL;
-
-err_kcalloc_bt_l1:
- kfree(mr->pbl_l1_dma_addr);
- mr->pbl_l1_dma_addr = NULL;
-
- return -ENOMEM;
+ hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
}
-static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
- u64 size, u32 access, int npages,
- struct hns_roce_mr *mr)
+static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
+ size_t length, struct ib_udata *udata, u64 start,
+ int access)
{
- struct device *dev = hr_dev->dev;
- unsigned long index = 0;
- int ret;
-
- /* Allocate a key for mr from mr_table */
- ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
- if (ret)
- return -ENOMEM;
-
- mr->iova = iova; /* MR va starting addr */
- mr->size = size; /* MR addr range */
- mr->pd = pd; /* MR num */
- mr->access = access; /* MR access permit */
- mr->enabled = 0; /* MR active status */
- mr->key = hw_index_to_key(index); /* MR key */
-
- if (size == ~0ull) {
- mr->pbl_buf = NULL;
- mr->pbl_dma_addr = 0;
- /* PBL multi-hop addressing parameters */
- mr->pbl_bt_l2 = NULL;
- mr->pbl_bt_l1 = NULL;
- mr->pbl_bt_l0 = NULL;
- mr->pbl_l2_dma_addr = NULL;
- mr->pbl_l1_dma_addr = NULL;
- mr->pbl_l0_dma_addr = 0;
- } else {
- if (!hr_dev->caps.pbl_hop_num) {
- mr->pbl_buf = dma_alloc_coherent(dev,
- npages * BA_BYTE_LEN,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf)
- return -ENOMEM;
- } else {
- ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
- }
- }
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ bool is_fast = mr->type == MR_TYPE_FRMR;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
+ buf_attr.page_shift = is_fast ? PAGE_SHIFT :
+ hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = length;
+ buf_attr.region[0].hopnum = mr->pbl_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+ buf_attr.user_access = access;
+ /* fast MR's buffer is alloced before mapping, not at creation */
+ buf_attr.mtt_only = is_fast;
+
+ err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
+ hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ udata, start);
+ if (err)
+ ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
+ else
+ mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
- return ret;
+ return err;
}
-static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr)
+static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
- struct device *dev = hr_dev->dev;
- int npages_allocated;
- int npages;
- int i, j;
- u32 pbl_bt_sz;
- u32 mhop_num;
- u64 bt_idx;
-
- npages = mr->pbl_size;
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num;
-
- if (mhop_num == HNS_ROCE_HOP_NUM_0)
- return;
-
- if (mhop_num == 1) {
- dma_free_coherent(dev, (unsigned int)(npages * BA_BYTE_LEN),
- mr->pbl_buf, mr->pbl_dma_addr);
- return;
- }
-
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
- mr->pbl_l0_dma_addr);
-
- if (mhop_num == 2) {
- for (i = 0; i < mr->l0_chunk_last_num; i++) {
- if (i == mr->l0_chunk_last_num - 1) {
- npages_allocated =
- i * (pbl_bt_sz / BA_BYTE_LEN);
-
- dma_free_coherent(dev,
- (npages - npages_allocated) * BA_BYTE_LEN,
- mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- break;
- }
-
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
- }
- } else if (mhop_num == 3) {
- for (i = 0; i < mr->l0_chunk_last_num; i++) {
- dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
- mr->pbl_l1_dma_addr[i]);
-
- for (j = 0; j < pbl_bt_sz / BA_BYTE_LEN; j++) {
- bt_idx = i * (pbl_bt_sz / BA_BYTE_LEN) + j;
-
- if ((i == mr->l0_chunk_last_num - 1)
- && j == mr->l1_chunk_last_num - 1) {
- npages_allocated = bt_idx *
- (pbl_bt_sz / BA_BYTE_LEN);
-
- dma_free_coherent(dev,
- (npages - npages_allocated) *
- BA_BYTE_LEN,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
-
- break;
- }
-
- dma_free_coherent(dev, pbl_bt_sz,
- mr->pbl_bt_l2[bt_idx],
- mr->pbl_l2_dma_addr[bt_idx]);
- }
- }
- }
-
- kfree(mr->pbl_bt_l1);
- kfree(mr->pbl_l1_dma_addr);
- mr->pbl_bt_l1 = NULL;
- mr->pbl_l1_dma_addr = NULL;
- if (mhop_num == 3) {
- kfree(mr->pbl_bt_l2);
- kfree(mr->pbl_l2_dma_addr);
- mr->pbl_bt_l2 = NULL;
- mr->pbl_l2_dma_addr = NULL;
- }
+ hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
}
static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr)
{
- struct device *dev = hr_dev->dev;
- int npages = 0;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
if (mr->enabled) {
@@ -712,27 +156,12 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
key_to_hw_index(mr->key) &
(hr_dev->caps.num_mtpts - 1));
if (ret)
- dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
+ ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
+ ret);
}
- if (mr->size != ~0ULL) {
- if (mr->type == MR_TYPE_MR)
- npages = ib_umem_page_count(mr->umem);
-
- if (!hr_dev->caps.pbl_hop_num)
- dma_free_coherent(dev,
- (unsigned int)(npages * BA_BYTE_LEN),
- mr->pbl_buf, mr->pbl_dma_addr);
- else
- hns_roce_mhop_free(hr_dev, mr);
- }
-
- if (mr->enabled)
- hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
- key_to_hw_index(mr->key));
-
- hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
- key_to_hw_index(mr->key), BITMAP_NO_RR);
+ free_mr_pbl(hr_dev, mr);
+ free_mr_key(hr_dev, mr);
}
static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
@@ -742,18 +171,12 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
unsigned long mtpt_idx = key_to_hw_index(mr->key);
struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
-
- /* Prepare HEM entry memory */
- ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
- if (ret)
- return ret;
/* Allocate mailbox memory */
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) {
ret = PTR_ERR(mailbox);
- goto err_table;
+ return ret;
}
if (mr->type != MR_TYPE_FRMR)
@@ -780,137 +203,6 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
err_page:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-err_table:
- hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
- return ret;
-}
-
-static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, u32 start_index,
- u32 npages, u64 *page_list)
-{
- struct hns_roce_hem_table *table;
- dma_addr_t dma_handle;
- __le64 *mtts;
- u32 bt_page_size;
- u32 i;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- table = &hr_dev->mr_table.mtt_table;
- bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_CQE:
- table = &hr_dev->mr_table.mtt_cqe_table;
- bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_SRQWQE:
- table = &hr_dev->mr_table.mtt_srqwqe_table;
- bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_IDX:
- table = &hr_dev->mr_table.mtt_idx_table;
- bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
- break;
- default:
- return -EINVAL;
- }
-
- /* All MTTs must fit in the same page */
- if (start_index / (bt_page_size / sizeof(u64)) !=
- (start_index + npages - 1) / (bt_page_size / sizeof(u64)))
- return -EINVAL;
-
- if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
- return -EINVAL;
-
- mtts = hns_roce_table_find(hr_dev, table,
- mtt->first_seg +
- start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
- &dma_handle);
- if (!mtts)
- return -ENOMEM;
-
- /* Save page addr, low 12 bits : 0 */
- for (i = 0; i < npages; ++i) {
- if (!hr_dev->caps.mtt_hop_num)
- mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
- else
- mtts[i] = cpu_to_le64(page_list[i]);
- }
-
- return 0;
-}
-
-static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, u32 start_index,
- u32 npages, u64 *page_list)
-{
- int chunk;
- int ret;
- u32 bt_page_size;
-
- if (mtt->order < 0)
- return -EINVAL;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_CQE:
- bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_SRQWQE:
- bt_page_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz + PAGE_SHIFT);
- break;
- case MTT_TYPE_IDX:
- bt_page_size = 1 << (hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT);
- break;
- default:
- dev_err(hr_dev->dev,
- "Unsupport mtt type %d, write mtt failed\n",
- mtt->mtt_type);
- return -EINVAL;
- }
-
- while (npages > 0) {
- chunk = min_t(int, bt_page_size / sizeof(u64), npages);
-
- ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
- page_list);
- if (ret)
- return ret;
-
- npages -= chunk;
- start_index += chunk;
- page_list += chunk;
- }
-
- return 0;
-}
-
-int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
-{
- u64 *page_list;
- int ret;
- u32 i;
-
- page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
- if (!page_list)
- return -ENOMEM;
-
- for (i = 0; i < buf->npages; ++i) {
- if (buf->nbufs == 1)
- page_list[i] = buf->direct.map + (i << buf->page_shift);
- else
- page_list[i] = buf->page_list[i].map;
-
- }
- ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
-
- kfree(page_list);
-
return ret;
}
@@ -923,50 +215,6 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
hr_dev->caps.num_mtpts,
hr_dev->caps.num_mtpts - 1,
hr_dev->caps.reserved_mrws, 0);
- if (ret)
- return ret;
-
- ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
- ilog2(hr_dev->caps.num_mtt_segs));
- if (ret)
- goto err_buddy;
-
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
- ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
- ilog2(hr_dev->caps.num_cqe_segs));
- if (ret)
- goto err_buddy_cqe;
- }
-
- if (hr_dev->caps.num_srqwqe_segs) {
- ret = hns_roce_buddy_init(&mr_table->mtt_srqwqe_buddy,
- ilog2(hr_dev->caps.num_srqwqe_segs));
- if (ret)
- goto err_buddy_srqwqe;
- }
-
- if (hr_dev->caps.num_idx_segs) {
- ret = hns_roce_buddy_init(&mr_table->mtt_idx_buddy,
- ilog2(hr_dev->caps.num_idx_segs));
- if (ret)
- goto err_buddy_idx;
- }
-
- return 0;
-
-err_buddy_idx:
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
-
-err_buddy_srqwqe:
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
-
-err_buddy_cqe:
- hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
-
-err_buddy:
- hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
return ret;
}
@@ -974,30 +222,24 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- if (hr_dev->caps.num_idx_segs)
- hns_roce_buddy_cleanup(&mr_table->mtt_idx_buddy);
- if (hr_dev->caps.num_srqwqe_segs)
- hns_roce_buddy_cleanup(&mr_table->mtt_srqwqe_buddy);
- hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
- if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
- hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
}
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_mr *mr;
int ret;
- mr = kmalloc(sizeof(*mr), GFP_KERNEL);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (mr == NULL)
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_DMA;
/* Allocate memory region key */
- ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
- ~0ULL, acc, 0, mr);
+ hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
+ ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, 0, acc);
if (ret)
goto err_free;
@@ -1006,203 +248,52 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
goto err_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->umem = NULL;
return &mr->ibmr;
-
err_mr:
- hns_roce_mr_free(to_hr_dev(pd->device), mr);
+ free_mr_key(hr_dev, mr);
err_free:
kfree(mr);
return ERR_PTR(ret);
}
-int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtt *mtt, struct ib_umem *umem)
-{
- struct device *dev = hr_dev->dev;
- struct sg_dma_page_iter sg_iter;
- unsigned int order;
- int npage = 0;
- int ret = 0;
- int i;
- u64 page_addr;
- u64 *pages;
- u32 bt_page_size;
- u32 n;
-
- switch (mtt->mtt_type) {
- case MTT_TYPE_WQE:
- order = hr_dev->caps.mtt_ba_pg_sz;
- break;
- case MTT_TYPE_CQE:
- order = hr_dev->caps.cqe_ba_pg_sz;
- break;
- case MTT_TYPE_SRQWQE:
- order = hr_dev->caps.srqwqe_ba_pg_sz;
- break;
- case MTT_TYPE_IDX:
- order = hr_dev->caps.idx_ba_pg_sz;
- break;
- default:
- dev_err(dev, "Unsupport mtt type %d, write mtt failed\n",
- mtt->mtt_type);
- return -EINVAL;
- }
-
- bt_page_size = 1 << (order + PAGE_SHIFT);
-
- pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
- if (!pages)
- return -ENOMEM;
-
- i = n = 0;
-
- for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
- page_addr = sg_page_iter_dma_address(&sg_iter);
- if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
- if (page_addr & ((1 << mtt->page_shift) - 1)) {
- dev_err(dev,
- "page_addr is not page_shift %d alignment!\n",
- mtt->page_shift);
- ret = -EINVAL;
- goto out;
- }
- pages[i++] = page_addr;
- }
- npage++;
- if (i == bt_page_size / sizeof(u64)) {
- ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
- if (ret)
- goto out;
- n += i;
- i = 0;
- }
- }
-
- if (i)
- ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
-
-out:
- free_pages((unsigned long) pages, order);
- return ret;
-}
-
-static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr,
- struct ib_umem *umem)
-{
- struct sg_dma_page_iter sg_iter;
- int i = 0, j = 0;
- u64 page_addr;
- u32 pbl_bt_sz;
-
- if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
- return 0;
-
- pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
- page_addr = sg_page_iter_dma_address(&sg_iter);
- if (!hr_dev->caps.pbl_hop_num) {
- /* for hip06, page addr is aligned to 4K */
- mr->pbl_buf[i++] = page_addr >> 12;
- } else if (hr_dev->caps.pbl_hop_num == 1) {
- mr->pbl_buf[i++] = page_addr;
- } else {
- if (hr_dev->caps.pbl_hop_num == 2)
- mr->pbl_bt_l1[i][j] = page_addr;
- else if (hr_dev->caps.pbl_hop_num == 3)
- mr->pbl_bt_l2[i][j] = page_addr;
-
- j++;
- if (j >= (pbl_bt_sz / BA_BYTE_LEN)) {
- i++;
- j = 0;
- }
- }
- }
-
- /* Memory barrier */
- mb();
-
- return 0;
-}
-
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
- struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr;
- int bt_size;
int ret;
- int n;
- int i;
- mr = kmalloc(sizeof(*mr), GFP_KERNEL);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(pd->device, start, length, access_flags);
- if (IS_ERR(mr->umem)) {
- ret = PTR_ERR(mr->umem);
- goto err_free;
- }
-
- n = ib_umem_page_count(mr->umem);
-
- if (!hr_dev->caps.pbl_hop_num) {
- if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
- dev_err(dev,
- " MR len %lld err. MR is limited to 4G at most!\n",
- length);
- ret = -EINVAL;
- goto err_umem;
- }
- } else {
- u64 pbl_size = 1;
-
- bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) /
- BA_BYTE_LEN;
- for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
- pbl_size *= bt_size;
- if (n > pbl_size) {
- dev_err(dev,
- " MR len %lld err. MR page num is limited to %lld!\n",
- length, pbl_size);
- ret = -EINVAL;
- goto err_umem;
- }
- }
-
mr->type = MR_TYPE_MR;
-
- ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
- access_flags, n, mr);
+ ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, virt_addr, length,
+ access_flags);
if (ret)
- goto err_umem;
+ goto err_alloc_mr;
- ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
+ ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, access_flags);
if (ret)
- goto err_mr;
+ goto err_alloc_key;
ret = hns_roce_mr_enable(hr_dev, mr);
if (ret)
- goto err_mr;
+ goto err_alloc_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+ mr->ibmr.length = length;
return &mr->ibmr;
-err_mr:
- hns_roce_mr_free(hr_dev, mr);
-
-err_umem:
- ib_umem_release(mr->umem);
-
-err_free:
+err_alloc_pbl:
+ free_mr_pbl(hr_dev, mr);
+err_alloc_key:
+ free_mr_key(hr_dev, mr);
+err_alloc_mr:
kfree(mr);
return ERR_PTR(ret);
}
@@ -1214,84 +305,36 @@ static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
u32 pdn, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr);
- struct device *dev = hr_dev->dev;
- int npages;
int ret;
- if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num)
- hns_roce_mhop_free(hr_dev, mr);
- else
- dma_free_coherent(dev, npages * 8,
- mr->pbl_buf, mr->pbl_dma_addr);
- }
- ib_umem_release(mr->umem);
-
- mr->umem = ib_umem_get(ibmr->device, start, length, mr_access_flags);
- if (IS_ERR(mr->umem)) {
- ret = PTR_ERR(mr->umem);
- mr->umem = NULL;
- return -ENOMEM;
- }
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num) {
- ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
- if (ret)
- goto release_umem;
- } else {
- mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf) {
- ret = -ENOMEM;
- goto release_umem;
- }
+ free_mr_pbl(hr_dev, mr);
+ ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, mr_access_flags);
+ if (ret) {
+ ibdev_err(ibdev, "failed to create mr PBL, ret = %d.\n", ret);
+ return ret;
}
ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
mr_access_flags, virt_addr,
length, mailbox->buf);
- if (ret)
- goto release_umem;
-
-
- ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
if (ret) {
- if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
-
- if (hr_dev->caps.pbl_hop_num)
- hns_roce_mhop_free(hr_dev, mr);
- else
- dma_free_coherent(dev, npages * 8,
- mr->pbl_buf,
- mr->pbl_dma_addr);
- }
-
- goto release_umem;
+ ibdev_err(ibdev, "failed to write mtpt, ret = %d.\n", ret);
+ free_mr_pbl(hr_dev, mr);
}
- return 0;
-
-release_umem:
- ib_umem_release(mr->umem);
return ret;
-
}
-
int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct ib_device *ib_dev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_cmd_mailbox *mailbox;
- struct device *dev = hr_dev->dev;
unsigned long mtpt_idx;
u32 pdn = 0;
int ret;
@@ -1312,7 +355,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
if (ret)
- dev_warn(dev, "DESTROY_MPT failed (%d)\n", ret);
+ ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
mr->enabled = 0;
@@ -1336,8 +379,7 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
if (ret) {
- dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
- ib_umem_release(mr->umem);
+ ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
goto free_cmd_mbox;
}
@@ -1365,8 +407,6 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
} else {
hns_roce_mr_free(hr_dev, mr);
-
- ib_umem_release(mr->umem);
kfree(mr);
}
@@ -1380,12 +420,8 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
struct device *dev = hr_dev->dev;
struct hns_roce_mr *mr;
u64 length;
- u32 page_size;
int ret;
- page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT);
- length = max_num_sg * page_size;
-
if (mr_type != IB_MR_TYPE_MEM_REG)
return ERR_PTR(-EINVAL);
@@ -1402,23 +438,28 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
mr->type = MR_TYPE_FRMR;
/* Allocate memory region key */
- ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length,
- 0, max_num_sg, mr);
+ length = max_num_sg * (1 << PAGE_SHIFT);
+ ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, length, 0);
if (ret)
goto err_free;
+ ret = alloc_mr_pbl(hr_dev, mr, length, NULL, 0, 0);
+ if (ret)
+ goto err_key;
+
ret = hns_roce_mr_enable(hr_dev, mr);
if (ret)
- goto err_mr;
+ goto err_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->umem = NULL;
+ mr->ibmr.length = length;
return &mr->ibmr;
-err_mr:
- hns_roce_mr_free(to_hr_dev(pd->device), mr);
-
+err_key:
+ free_mr_key(hr_dev, mr);
+err_pbl:
+ free_mr_pbl(hr_dev, mr);
err_free:
kfree(mr);
return ERR_PTR(ret);
@@ -1428,19 +469,54 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
{
struct hns_roce_mr *mr = to_hr_mr(ibmr);
- mr->pbl_buf[mr->npages++] = addr;
+ if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
+ mr->page_list[mr->npages++] = addr;
+ return 0;
+ }
- return 0;
+ return -ENOBUFS;
}
int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset)
{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ struct hns_roce_mtr *mtr = &mr->pbl_mtr;
+ int ret = 0;
mr->npages = 0;
+ mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!mr->page_list)
+ return ret;
+
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+ if (ret < 1) {
+ ibdev_err(ibdev, "failed to store sg pages %d %d, cnt = %d.\n",
+ mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
+ goto err_page_list;
+ }
+
+ mtr->hem_cfg.region[0].offset = 0;
+ mtr->hem_cfg.region[0].count = mr->npages;
+ mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
+ mtr->hem_cfg.region_count = 1;
+ ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
+ if (ret) {
+ ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
+ ret = 0;
+ } else {
+ mr->pbl_mtr.hem_cfg.buf_pg_shift = ilog2(ibmr->page_size);
+ ret = mr->npages;
+ }
+
+err_page_list:
+ kvfree(mr->page_list);
+ mr->page_list = NULL;
- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+ return ret;
}
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
@@ -1564,32 +640,23 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw)
return 0;
}
-void hns_roce_mtr_init(struct hns_roce_mtr *mtr, int bt_pg_shift,
- int buf_pg_shift)
-{
- hns_roce_hem_list_init(&mtr->hem_list, bt_pg_shift);
- mtr->buf_pg_shift = buf_pg_shift;
-}
-
-void hns_roce_mtr_cleanup(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtr *mtr)
-{
- hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
-}
-
-static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
- struct hns_roce_mtr *mtr, dma_addr_t *bufs,
- struct hns_roce_buf_region *r)
+static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, struct hns_roce_buf_region *region)
{
+ __le64 *mtts;
int offset;
int count;
int npage;
- u64 *mtts;
+ u64 addr;
int end;
int i;
- offset = r->offset;
- end = offset + r->count;
+ /* if hopnum is 0, buffer cannot store BAs, so skip write mtt */
+ if (!region->hopnum)
+ return 0;
+
+ offset = region->offset;
+ end = offset + region->count;
npage = 0;
while (offset < end) {
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
@@ -1597,13 +664,13 @@ static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
if (!mtts)
return -ENOBUFS;
- /* Save page addr, low 12 bits : 0 */
for (i = 0; i < count; i++) {
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- mtts[i] = bufs[npage] >> PAGE_ADDR_SHIFT;
+ addr = to_hr_hw_page_addr(pages[npage]);
else
- mtts[i] = bufs[npage];
+ addr = pages[npage];
+ mtts[i] = cpu_to_le64(addr);
npage++;
}
offset += count;
@@ -1612,69 +679,416 @@ static int hns_roce_write_mtr(struct hns_roce_dev *hr_dev,
return 0;
}
-int hns_roce_mtr_attach(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- dma_addr_t **bufs, struct hns_roce_buf_region *regions,
- int region_cnt)
+static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
{
- struct hns_roce_buf_region *r;
- int ret;
int i;
- ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, regions,
- region_cnt);
- if (ret)
- return ret;
+ for (i = 0; i < attr->region_count; i++)
+ if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
+ attr->region[i].hopnum > 0)
+ return true;
- for (i = 0; i < region_cnt; i++) {
- r = &regions[i];
- ret = hns_roce_write_mtr(hr_dev, mtr, bufs[i], r);
+ /* because the mtr only one root base address, when hopnum is 0 means
+ * root base address equals the first buffer address, thus all alloced
+ * memory must in a continuous space accessed by direct mode.
+ */
+ return false;
+}
+
+static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
+{
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < attr->region_count; i++)
+ size += attr->region[i].size;
+
+ return size;
+}
+
+static inline int mtr_umem_page_count(struct ib_umem *umem,
+ unsigned int page_shift)
+{
+ int count = ib_umem_page_count(umem);
+
+ if (page_shift >= PAGE_SHIFT)
+ count >>= page_shift - PAGE_SHIFT;
+ else
+ count <<= PAGE_SHIFT - page_shift;
+
+ return count;
+}
+
+static inline size_t mtr_kmem_direct_size(bool is_direct, size_t alloc_size,
+ unsigned int page_shift)
+{
+ if (is_direct)
+ return ALIGN(alloc_size, 1 << page_shift);
+ else
+ return HNS_HW_DIRECT_PAGE_COUNT << page_shift;
+}
+
+/*
+ * check the given pages in continuous address space
+ * Returns 0 on success, or the error page num.
+ */
+static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
+ unsigned int page_shift)
+{
+ size_t page_size = 1 << page_shift;
+ int i;
+
+ for (i = 1; i < page_count; i++)
+ if (pages[i] - pages[i - 1] != page_size)
+ return i;
+
+ return 0;
+}
+
+static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ /* release user buffers */
+ if (mtr->umem) {
+ ib_umem_release(mtr->umem);
+ mtr->umem = NULL;
+ }
+
+ /* release kernel buffers */
+ if (mtr->kmem) {
+ hns_roce_buf_free(hr_dev, mtr->kmem);
+ kfree(mtr->kmem);
+ mtr->kmem = NULL;
+ }
+}
+
+static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr, bool is_direct,
+ struct ib_udata *udata, unsigned long user_addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ unsigned int max_pg_shift = buf_attr->page_shift;
+ unsigned int best_pg_shift = 0;
+ int all_pg_count = 0;
+ size_t direct_size;
+ size_t total_size;
+ unsigned long tmp;
+ int ret = 0;
+
+ total_size = mtr_bufs_size(buf_attr);
+ if (total_size < 1) {
+ ibdev_err(ibdev, "Failed to check mtr size\n");
+ return -EINVAL;
+ }
+
+ if (udata) {
+ mtr->kmem = NULL;
+ mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
+ buf_attr->user_access);
+ if (IS_ERR_OR_NULL(mtr->umem)) {
+ ibdev_err(ibdev, "Failed to get umem, ret %ld\n",
+ PTR_ERR(mtr->umem));
+ return -ENOMEM;
+ }
+ if (buf_attr->fixed_page) {
+ best_pg_shift = max_pg_shift;
+ } else {
+ tmp = GENMASK(max_pg_shift, 0);
+ ret = ib_umem_find_best_pgsz(mtr->umem, tmp, user_addr);
+ best_pg_shift = (ret <= PAGE_SIZE) ?
+ PAGE_SHIFT : ilog2(ret);
+ }
+ all_pg_count = mtr_umem_page_count(mtr->umem, best_pg_shift);
+ ret = 0;
+ } else {
+ mtr->umem = NULL;
+ mtr->kmem = kzalloc(sizeof(*mtr->kmem), GFP_KERNEL);
+ if (!mtr->kmem) {
+ ibdev_err(ibdev, "Failed to alloc kmem\n");
+ return -ENOMEM;
+ }
+ direct_size = mtr_kmem_direct_size(is_direct, total_size,
+ max_pg_shift);
+ ret = hns_roce_buf_alloc(hr_dev, total_size, direct_size,
+ mtr->kmem, max_pg_shift);
if (ret) {
- dev_err(hr_dev->dev,
- "write mtr[%d/%d] err %d,offset=%d.\n",
- i, region_cnt, ret, r->offset);
- goto err_write;
+ ibdev_err(ibdev, "Failed to alloc kmem, ret %d\n", ret);
+ goto err_alloc_mem;
+ } else {
+ best_pg_shift = max_pg_shift;
+ all_pg_count = mtr->kmem->npages;
}
}
- return 0;
+ /* must bigger than minimum hardware page shift */
+ if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) {
+ ret = -EINVAL;
+ ibdev_err(ibdev, "Failed to check mtr page shift %d count %d\n",
+ best_pg_shift, all_pg_count);
+ goto err_alloc_mem;
+ }
-err_write:
- hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+ mtr->hem_cfg.buf_pg_shift = best_pg_shift;
+ mtr->hem_cfg.buf_pg_count = all_pg_count;
+ return 0;
+err_alloc_mem:
+ mtr_free_bufs(hr_dev, mtr);
return ret;
}
+static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, int count, unsigned int page_shift)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int npage;
+ int err;
+
+ if (mtr->umem)
+ npage = hns_roce_get_umem_bufs(hr_dev, pages, count, 0,
+ mtr->umem, page_shift);
+ else
+ npage = hns_roce_get_kmem_bufs(hr_dev, pages, count, 0,
+ mtr->kmem);
+
+ if (mtr->hem_cfg.is_direct && npage > 1) {
+ err = mtr_check_direct_pages(pages, npage, page_shift);
+ if (err) {
+ ibdev_err(ibdev, "Failed to check %s direct page-%d\n",
+ mtr->umem ? "user" : "kernel", err);
+ npage = err;
+ }
+ }
+
+ return npage;
+}
+
+int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ dma_addr_t *pages, int page_cnt)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_region *r;
+ int err;
+ int i;
+
+ for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+ r = &mtr->hem_cfg.region[i];
+ if (r->offset + r->count > page_cnt) {
+ err = -EINVAL;
+ ibdev_err(ibdev,
+ "Failed to check mtr%d end %d + %d, max %d\n",
+ i, r->offset, r->count, page_cnt);
+ return err;
+ }
+
+ err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
+ if (err) {
+ ibdev_err(ibdev,
+ "Failed to map mtr%d offset %d, err %d\n",
+ i, r->offset, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
{
- u64 *mtts = mtt_buf;
int mtt_count;
int total = 0;
- u64 *addr;
+ __le64 *mtts;
int npage;
+ u64 addr;
int left;
- if (mtts == NULL || mtt_max < 1)
+ if (!mtt_buf || mtt_max < 1)
goto done;
+ /* no mtt memory in direct mode, so just return the buffer address */
+ if (mtr->hem_cfg.is_direct) {
+ npage = offset;
+ for (total = 0; total < mtt_max; total++, npage++) {
+ addr = mtr->hem_cfg.root_ba +
+ (npage << mtr->hem_cfg.buf_pg_shift);
+
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ mtt_buf[total] = to_hr_hw_page_addr(addr);
+ else
+ mtt_buf[total] = addr;
+ }
+
+ goto done;
+ }
+
left = mtt_max;
while (left > 0) {
mtt_count = 0;
- addr = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
+ mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
offset + total,
&mtt_count, NULL);
- if (!addr || !mtt_count)
+ if (!mtts || !mtt_count)
goto done;
npage = min(mtt_count, left);
- memcpy(&mtts[total], addr, BA_BYTE_LEN * npage);
left -= npage;
- total += npage;
+ for (mtt_count = 0; mtt_count < npage; mtt_count++)
+ mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
}
done:
if (base_addr)
- *base_addr = mtr->hem_list.root_ba;
+ *base_addr = mtr->hem_cfg.root_ba;
return total;
}
+
+/* convert buffer size to page index and page count */
+static unsigned int mtr_init_region(struct hns_roce_buf_attr *attr,
+ int page_cnt,
+ struct hns_roce_buf_region *regions,
+ int region_cnt, unsigned int page_shift)
+{
+ unsigned int page_size = 1 << page_shift;
+ int max_region = attr->region_count;
+ struct hns_roce_buf_region *r;
+ unsigned int i = 0;
+ int page_idx = 0;
+
+ for (; i < region_cnt && i < max_region && page_idx < page_cnt; i++) {
+ r = &regions[i];
+ r->hopnum = attr->region[i].hopnum == HNS_ROCE_HOP_NUM_0 ?
+ 0 : attr->region[i].hopnum;
+ r->offset = page_idx;
+ r->count = DIV_ROUND_UP(attr->region[i].size, page_size);
+ page_idx += r->count;
+ }
+
+ return i;
+}
+
+/**
+ * hns_roce_mtr_create - Create hns memory translate region.
+ *
+ * @mtr: memory translate region
+ * @init_attr: init attribute for creating mtr
+ * @page_shift: page shift for multi-hop base address table
+ * @udata: user space context, if it's NULL, means kernel space
+ * @user_addr: userspace virtual address to start at
+ * @buf_alloced: mtr has private buffer, true means need to alloc
+ */
+int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int page_shift, struct ib_udata *udata,
+ unsigned long user_addr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t *pages = NULL;
+ int region_cnt = 0;
+ int all_pg_cnt;
+ int get_pg_cnt;
+ bool has_mtt;
+ int err = 0;
+
+ has_mtt = mtr_has_mtt(buf_attr);
+ /* if buffer only need mtt, just init the hem cfg */
+ if (buf_attr->mtt_only) {
+ mtr->hem_cfg.buf_pg_shift = buf_attr->page_shift;
+ mtr->hem_cfg.buf_pg_count = mtr_bufs_size(buf_attr) >>
+ buf_attr->page_shift;
+ mtr->umem = NULL;
+ mtr->kmem = NULL;
+ } else {
+ err = mtr_alloc_bufs(hr_dev, mtr, buf_attr, !has_mtt, udata,
+ user_addr);
+ if (err) {
+ ibdev_err(ibdev, "Failed to alloc mtr bufs, err %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* alloc mtt memory */
+ all_pg_cnt = mtr->hem_cfg.buf_pg_count;
+ hns_roce_hem_list_init(&mtr->hem_list);
+ mtr->hem_cfg.is_direct = !has_mtt;
+ mtr->hem_cfg.ba_pg_shift = page_shift;
+ mtr->hem_cfg.region_count = 0;
+ region_cnt = mtr_init_region(buf_attr, all_pg_cnt,
+ mtr->hem_cfg.region,
+ ARRAY_SIZE(mtr->hem_cfg.region),
+ mtr->hem_cfg.buf_pg_shift);
+ if (region_cnt < 1) {
+ err = -ENOBUFS;
+ ibdev_err(ibdev, "failed to init mtr region %d\n", region_cnt);
+ goto err_alloc_bufs;
+ }
+
+ mtr->hem_cfg.region_count = region_cnt;
+
+ if (has_mtt) {
+ err = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
+ mtr->hem_cfg.region, region_cnt,
+ page_shift);
+ if (err) {
+ ibdev_err(ibdev, "Failed to request mtr hem, err %d\n",
+ err);
+ goto err_alloc_bufs;
+ }
+ mtr->hem_cfg.root_ba = mtr->hem_list.root_ba;
+ }
+
+ /* no buffer to map */
+ if (buf_attr->mtt_only)
+ return 0;
+
+ /* alloc a tmp array to store buffer's dma address */
+ pages = kvcalloc(all_pg_cnt, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!pages) {
+ err = -ENOMEM;
+ ibdev_err(ibdev, "Failed to alloc mtr page list %d\n",
+ all_pg_cnt);
+ goto err_alloc_hem_list;
+ }
+
+ get_pg_cnt = mtr_get_pages(hr_dev, mtr, pages, all_pg_cnt,
+ mtr->hem_cfg.buf_pg_shift);
+ if (get_pg_cnt != all_pg_cnt) {
+ ibdev_err(ibdev, "Failed to get mtr page %d != %d\n",
+ get_pg_cnt, all_pg_cnt);
+ err = -ENOBUFS;
+ goto err_alloc_page_list;
+ }
+
+ if (!has_mtt) {
+ mtr->hem_cfg.root_ba = pages[0];
+ } else {
+ /* write buffer's dma address to BA table */
+ err = hns_roce_mtr_map(hr_dev, mtr, pages, all_pg_cnt);
+ if (err) {
+ ibdev_err(ibdev, "Failed to map mtr pages, err %d\n",
+ err);
+ goto err_alloc_page_list;
+ }
+ }
+
+ /* drop tmp array */
+ kvfree(pages);
+ return 0;
+err_alloc_page_list:
+ kvfree(pages);
+err_alloc_hem_list:
+ hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+err_alloc_bufs:
+ mtr_free_bufs(hr_dev, mtr);
+ return err;
+}
+
+void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
+{
+ /* release multi-hop addressing resource */
+ hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
+
+ /* free buffers */
+ mtr_free_bufs(hr_dev, mtr);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 6317901c4b4f..a0a47bd66975 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -355,16 +355,16 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR);
}
-static int set_rq_size(struct hns_roce_dev *hr_dev,
- struct ib_qp_cap *cap, bool is_user, int has_rq,
- struct hns_roce_qp *hr_qp)
+static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
+ struct hns_roce_qp *hr_qp, int has_rq)
{
- u32 max_cnt;
+ u32 cnt;
/* If srq exist, set zero for relative number of rq */
if (!has_rq) {
hr_qp->rq.wqe_cnt = 0;
hr_qp->rq.max_gs = 0;
+ hr_qp->rq_inl_buf.wqe_cnt = 0;
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
@@ -379,17 +379,15 @@ static int set_rq_size(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
- max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
-
- hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
- if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
+ cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
+ if (cnt > hr_dev->caps.max_wqes) {
ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
cap->max_recv_wr);
return -EINVAL;
}
- max_cnt = max(1U, cap->max_recv_sge);
- hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
+ hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
+ HNS_ROCE_RESERVED_SGE);
if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
@@ -397,8 +395,57 @@ static int set_rq_size(struct hns_roce_dev *hr_dev,
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
hr_qp->rq.max_gs);
- cap->max_recv_wr = hr_qp->rq.wqe_cnt;
- cap->max_recv_sge = hr_qp->rq.max_gs;
+ hr_qp->rq.wqe_cnt = cnt;
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ hr_qp->rq_inl_buf.wqe_cnt = cnt;
+ else
+ hr_qp->rq_inl_buf.wqe_cnt = 0;
+
+ cap->max_recv_wr = cnt;
+ cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE;
+
+ return 0;
+}
+
+static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
+ struct hns_roce_qp *hr_qp,
+ struct ib_qp_cap *cap)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 cnt;
+
+ cnt = max(1U, cap->max_send_sge);
+ if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+ hr_qp->sq.max_gs = roundup_pow_of_two(cnt);
+ hr_qp->sge.sge_cnt = 0;
+
+ return 0;
+ }
+
+ hr_qp->sq.max_gs = cnt;
+
+ /* UD sqwqe's sge use extend sge */
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
+ hr_qp->ibqp.qp_type == IB_QPT_UD) {
+ cnt = roundup_pow_of_two(sq_wqe_cnt * hr_qp->sq.max_gs);
+ } else if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) {
+ cnt = roundup_pow_of_two(sq_wqe_cnt *
+ (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE));
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
+ if (cnt > hr_dev->caps.max_extend_sg) {
+ ibdev_err(ibdev,
+ "failed to check exSGE num, exSGE num = %d.\n",
+ cnt);
+ return -EINVAL;
+ }
+ }
+ } else {
+ cnt = 0;
+ }
+
+ hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
+ hr_qp->sge.sge_cnt = cnt;
return 0;
}
@@ -430,174 +477,79 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd)
{
- u32 ex_sge_num;
- u32 page_size;
- u32 max_cnt;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 cnt = 0;
int ret;
- if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) ||
- hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes)
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
+ cnt > hr_dev->caps.max_wqes)
return -EINVAL;
ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
if (ret) {
- ibdev_err(&hr_dev->ib_dev, "Failed to check user SQ size limit\n");
+ ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
+ ret);
return ret;
}
- hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
-
- max_cnt = max(1U, cap->max_send_sge);
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
- else
- hr_qp->sq.max_gs = max_cnt;
-
- if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
- hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
- (hr_qp->sq.max_gs - 2));
-
- if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE &&
- hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
- if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
- ibdev_err(&hr_dev->ib_dev,
- "Failed to check extended SGE size limit %d\n",
- hr_qp->sge.sge_cnt);
- return -EINVAL;
- }
- }
-
- hr_qp->sge.sge_shift = 4;
- ex_sge_num = hr_qp->sge.sge_cnt;
+ ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
+ if (ret)
+ return ret;
- /* Get buf size, SQ and RQ are aligned to page_szie */
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
- hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
- hr_qp->rq.wqe_shift), PAGE_SIZE) +
- round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), PAGE_SIZE);
-
- hr_qp->sq.offset = 0;
- hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), PAGE_SIZE);
- } else {
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- hr_qp->sge.sge_cnt = ex_sge_num ?
- max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num) : 0;
- hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
- hr_qp->rq.wqe_shift), page_size) +
- round_up((hr_qp->sge.sge_cnt <<
- hr_qp->sge.sge_shift), page_size) +
- round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), page_size);
-
- hr_qp->sq.offset = 0;
- if (ex_sge_num) {
- hr_qp->sge.offset = round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift),
- page_size);
- hr_qp->rq.offset = hr_qp->sge.offset +
- round_up((hr_qp->sge.sge_cnt <<
- hr_qp->sge.sge_shift),
- page_size);
- } else {
- hr_qp->rq.offset = round_up((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift),
- page_size);
- }
- }
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ hr_qp->sq.wqe_cnt = cnt;
return 0;
}
-static int split_wqe_buf_region(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_buf_region *regions,
- int region_max, int page_shift)
+static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_buf_attr *buf_attr)
{
- int page_size = 1 << page_shift;
- bool is_extend_sge;
- int region_cnt = 0;
int buf_size;
- int buf_cnt;
+ int idx = 0;
- if (hr_qp->buff_size < 1 || region_max < 1)
- return region_cnt;
+ hr_qp->buff_size = 0;
- if (hr_qp->sge.sge_cnt > 0)
- is_extend_sge = true;
- else
- is_extend_sge = false;
-
- /* sq region */
- if (is_extend_sge)
- buf_size = hr_qp->sge.offset - hr_qp->sq.offset;
- else
- buf_size = hr_qp->rq.offset - hr_qp->sq.offset;
-
- if (buf_size > 0 && region_cnt < region_max) {
- buf_cnt = DIV_ROUND_UP(buf_size, page_size);
- hns_roce_init_buf_region(&regions[region_cnt],
- hr_dev->caps.wqe_sq_hop_num,
- hr_qp->sq.offset / page_size,
- buf_cnt);
- region_cnt++;
- }
-
- /* sge region */
- if (is_extend_sge) {
- buf_size = hr_qp->rq.offset - hr_qp->sge.offset;
- if (buf_size > 0 && region_cnt < region_max) {
- buf_cnt = DIV_ROUND_UP(buf_size, page_size);
- hns_roce_init_buf_region(&regions[region_cnt],
- hr_dev->caps.wqe_sge_hop_num,
- hr_qp->sge.offset / page_size,
- buf_cnt);
- region_cnt++;
- }
- }
-
- /* rq region */
- buf_size = hr_qp->buff_size - hr_qp->rq.offset;
- if (buf_size > 0) {
- buf_cnt = DIV_ROUND_UP(buf_size, page_size);
- hns_roce_init_buf_region(&regions[region_cnt],
- hr_dev->caps.wqe_rq_hop_num,
- hr_qp->rq.offset / page_size,
- buf_cnt);
- region_cnt++;
- }
-
- return region_cnt;
-}
-
-static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp)
-{
- struct device *dev = hr_dev->dev;
-
- if (hr_qp->sq.max_gs > 2) {
- hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
- (hr_qp->sq.max_gs - 2));
- hr_qp->sge.sge_shift = 4;
- }
-
- /* ud sqwqe's sge use extend sge */
- if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
- hr_qp->ibqp.qp_type == IB_QPT_GSI) {
- hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
- hr_qp->sq.max_gs);
- hr_qp->sge.sge_shift = 4;
- }
+ /* SQ WQE */
+ hr_qp->sq.offset = 0;
+ buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
+ hr_qp->sq.wqe_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ /* extend SGE WQE in SQ */
+ hr_qp->sge.offset = hr_qp->buff_size;
+ buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
+ hr_qp->sge.sge_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ /* RQ WQE */
+ hr_qp->rq.offset = hr_qp->buff_size;
+ buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
+ hr_qp->rq.wqe_shift);
+ if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
+ buf_attr->region[idx].size = buf_size;
+ buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
+ idx++;
+ hr_qp->buff_size += buf_size;
+ }
+
+ if (hr_qp->buff_size < 1)
+ return -EINVAL;
- if (hr_qp->sq.max_gs > 2 &&
- hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
- if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
- dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
- hr_qp->sge.sge_cnt);
- return -EINVAL;
- }
- }
+ buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
+ buf_attr->fixed_page = true;
+ buf_attr->region_count = idx;
return 0;
}
@@ -605,62 +557,35 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
{
- u32 page_size;
- u32 max_cnt;
- int size;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u32 cnt;
int ret;
if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
- ibdev_err(&hr_dev->ib_dev,
- "SQ WR or sge or inline data error!\n");
+ ibdev_err(ibdev,
+ "failed to check SQ WR, SGE or inline num, ret = %d.\n",
+ -EINVAL);
return -EINVAL;
}
- hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
-
- max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
-
- hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
- if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
- ibdev_err(&hr_dev->ib_dev,
- "while setting kernel sq size, sq.wqe_cnt too large\n");
+ cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
+ if (cnt > hr_dev->caps.max_wqes) {
+ ibdev_err(ibdev, "failed to check WQE num, WQE num = %d.\n",
+ cnt);
return -EINVAL;
}
- /* Get data_seg numbers */
- max_cnt = max(1U, cap->max_send_sge);
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
- else
- hr_qp->sq.max_gs = max_cnt;
+ hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
+ hr_qp->sq.wqe_cnt = cnt;
- ret = set_extend_sge_param(hr_dev, hr_qp);
- if (ret) {
- ibdev_err(&hr_dev->ib_dev, "set extend sge parameters fail\n");
+ ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
+ if (ret)
return ret;
- }
- /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
- page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- hr_qp->sq.offset = 0;
- size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);
-
- if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && hr_qp->sge.sge_cnt) {
- hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
- (u32)hr_qp->sge.sge_cnt);
- hr_qp->sge.offset = size;
- size += round_up(hr_qp->sge.sge_cnt << hr_qp->sge.sge_shift,
- page_size);
- }
-
- hr_qp->rq.offset = size;
- size += round_up((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift), page_size);
- hr_qp->buff_size = size;
-
- /* Get wr and sge number which send */
- cap->max_send_wr = hr_qp->sq.wqe_cnt;
+ /* sync the parameters of kernel QP to user's configuration */
+ cap->max_send_wr = cnt;
cap->max_send_sge = hr_qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */
@@ -691,8 +616,8 @@ static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr)
{
u32 max_recv_sge = init_attr->cap.max_recv_sge;
+ u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
struct hns_roce_rinl_wqe *wqe_list;
- u32 wqe_cnt = hr_qp->rq.wqe_cnt;
int i;
/* allocate recv inline buf */
@@ -714,7 +639,6 @@ static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
hr_qp->rq_inl_buf.wqe_list = wqe_list;
- hr_qp->rq_inl_buf.wqe_cnt = wqe_cnt;
return 0;
@@ -727,140 +651,55 @@ err:
static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
{
- kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+ if (hr_qp->rq_inl_buf.wqe_list)
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
kfree(hr_qp->rq_inl_buf.wqe_list);
}
-static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
- u32 page_shift, bool is_user)
-{
-/* WQE buffer include 3 parts: SQ, extend SGE and RQ. */
-#define HNS_ROCE_WQE_REGION_MAX 3
- struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX] = {};
- dma_addr_t *buf_list[HNS_ROCE_WQE_REGION_MAX] = {};
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_buf_region *r;
- int region_count;
- int buf_count;
- int ret;
- int i;
-
- region_count = split_wqe_buf_region(hr_dev, hr_qp, regions,
- ARRAY_SIZE(regions), page_shift);
-
- /* alloc a tmp list to store WQE buffers address */
- ret = hns_roce_alloc_buf_list(regions, buf_list, region_count);
- if (ret) {
- ibdev_err(ibdev, "Failed to alloc WQE buffer list\n");
- return ret;
- }
-
- for (i = 0; i < region_count; i++) {
- r = &regions[i];
- if (is_user)
- buf_count = hns_roce_get_umem_bufs(hr_dev, buf_list[i],
- r->count, r->offset, hr_qp->umem,
- page_shift);
- else
- buf_count = hns_roce_get_kmem_bufs(hr_dev, buf_list[i],
- r->count, r->offset, &hr_qp->hr_buf);
-
- if (buf_count != r->count) {
- ibdev_err(ibdev, "Failed to get %s WQE buf, expect %d = %d.\n",
- is_user ? "user" : "kernel",
- r->count, buf_count);
- ret = -ENOBUFS;
- goto done;
- }
- }
-
- hr_qp->wqe_bt_pg_shift = hr_dev->caps.mtt_ba_pg_sz;
- hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
- page_shift);
- ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, regions,
- region_count);
- if (ret)
- ibdev_err(ibdev, "Failed to attach WQE's mtr\n");
-
- goto done;
-
- hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
-done:
- hns_roce_free_buf_list(buf_list, region_count);
-
- return ret;
-}
-
static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, unsigned long addr)
{
- u32 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
struct ib_device *ibdev = &hr_dev->ib_dev;
- bool is_rq_buf_inline;
+ struct hns_roce_buf_attr buf_attr = {};
int ret;
- is_rq_buf_inline = (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hns_roce_qp_has_rq(init_attr);
- if (is_rq_buf_inline) {
+ if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
ret = alloc_rq_inline_buf(hr_qp, init_attr);
if (ret) {
- ibdev_err(ibdev, "Failed to alloc inline RQ buffer\n");
+ ibdev_err(ibdev,
+ "failed to alloc inline buf, ret = %d.\n",
+ ret);
return ret;
}
- }
-
- if (udata) {
- hr_qp->umem = ib_umem_get(ibdev, addr, hr_qp->buff_size, 0);
- if (IS_ERR(hr_qp->umem)) {
- ret = PTR_ERR(hr_qp->umem);
- goto err_inline;
- }
} else {
- ret = hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
- (1 << page_shift) * 2,
- &hr_qp->hr_buf, page_shift);
- if (ret)
- goto err_inline;
+ hr_qp->rq_inl_buf.wqe_list = NULL;
}
- ret = map_wqe_buf(hr_dev, hr_qp, page_shift, udata);
- if (ret)
- goto err_alloc;
+ ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
+ goto err_inline;
+ }
+ ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
+ HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
+ udata, addr);
+ if (ret) {
+ ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
+ goto err_inline;
+ }
return 0;
-
err_inline:
- if (is_rq_buf_inline)
- free_rq_inline_buf(hr_qp);
-
-err_alloc:
- if (udata) {
- ib_umem_release(hr_qp->umem);
- hr_qp->umem = NULL;
- } else {
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
- }
-
- ibdev_err(ibdev, "Failed to alloc WQE buffer, ret %d.\n", ret);
+ free_rq_inline_buf(hr_qp);
return ret;
}
static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
- hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
- if (hr_qp->umem) {
- ib_umem_release(hr_qp->umem);
- hr_qp->umem = NULL;
- }
-
- if (hr_qp->hr_buf.nbufs > 0)
- hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
-
- if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
- hr_qp->rq.wqe_cnt)
- free_rq_inline_buf(hr_qp);
+ hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
+ free_rq_inline_buf(hr_qp);
}
static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
@@ -912,8 +751,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
"Failed to map user SQ doorbell\n");
goto err_out;
}
- hr_qp->sdb_en = 1;
- resp->cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
+ resp->cap_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
}
if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
@@ -924,8 +763,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
"Failed to map user RQ doorbell\n");
goto err_sdb;
}
- hr_qp->rdb_en = 1;
- resp->cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
+ resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
}
} else {
/* QP doorbell register address */
@@ -942,13 +781,13 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
goto err_out;
}
*hr_qp->rdb.db_record = 0;
- hr_qp->rdb_en = 1;
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
}
}
return 0;
err_sdb:
- if (udata && hr_qp->sdb_en)
+ if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
err_out:
return ret;
@@ -961,12 +800,12 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
udata, struct hns_roce_ucontext, ibucontext);
if (udata) {
- if (hr_qp->rdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
- if (hr_qp->sdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
} else {
- if (hr_qp->rdb_en)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_qp->rdb);
}
}
@@ -1003,8 +842,7 @@ err_sq:
return ret;
}
-static void free_kernel_wrid(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp)
+static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
{
kfree(hr_qp->rq.wrid);
kfree(hr_qp->sq.wrid);
@@ -1025,10 +863,11 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
else
hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
- ret = set_rq_size(hr_dev, &init_attr->cap, udata,
- hns_roce_qp_has_rq(init_attr), hr_qp);
+ ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
+ hns_roce_qp_has_rq(init_attr));
if (ret) {
- ibdev_err(ibdev, "Failed to set user RQ size\n");
+ ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
+ ret);
return ret;
}
@@ -1156,7 +995,7 @@ err_buf:
err_db:
free_qp_db(hr_dev, hr_qp, udata);
err_wrid:
- free_kernel_wrid(hr_dev, hr_qp);
+ free_kernel_wrid(hr_qp);
return ret;
}
@@ -1170,7 +1009,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
free_qpc(hr_dev, hr_qp);
free_qpn(hr_dev, hr_qp);
free_qp_buf(hr_dev, hr_qp);
- free_kernel_wrid(hr_dev, hr_qp);
+ free_kernel_wrid(hr_qp);
free_qp_db(hr_dev, hr_qp, udata);
kfree(hr_qp);
@@ -1339,10 +1178,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (ibqp->uobject &&
(attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
- if (hr_qp->sdb_en == 1) {
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
- if (hr_qp->rdb_en == 1)
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
} else {
ibdev_warn(&hr_dev->ib_dev,
@@ -1431,10 +1270,9 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
}
}
-static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
+static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
{
-
- return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
+ return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
}
void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
@@ -1449,8 +1287,7 @@ void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n)
void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n)
{
- return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
- (n << hr_qp->sge.sge_shift));
+ return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
}
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 5b3dd1a337d4..f40a000e94ee 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -77,56 +77,56 @@ static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
-static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
- u16 xrcd, struct hns_roce_mtt *hr_mtt,
- u64 db_rec_addr, struct hns_roce_srq *srq)
+static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox;
- dma_addr_t dma_handle_wqe;
- dma_addr_t dma_handle_idx;
- u64 *mtts_wqe;
- u64 *mtts_idx;
+ u64 mtts_wqe[MTT_MIN_COUNT] = { 0 };
+ u64 mtts_idx[MTT_MIN_COUNT] = { 0 };
+ dma_addr_t dma_handle_wqe = 0;
+ dma_addr_t dma_handle_idx = 0;
int ret;
/* Get the physical address of srq buf */
- mtts_wqe = hns_roce_table_find(hr_dev,
- &hr_dev->mr_table.mtt_srqwqe_table,
- srq->mtt.first_seg,
- &dma_handle_wqe);
- if (!mtts_wqe) {
- dev_err(hr_dev->dev, "Failed to find mtt for srq buf.\n");
- return -EINVAL;
+ ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
+ ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
+ if (ret < 1) {
+ ibdev_err(ibdev, "Failed to find mtr for SRQ WQE\n");
+ return -ENOBUFS;
}
/* Get physical address of idx que buf */
- mtts_idx = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_idx_table,
- srq->idx_que.mtt.first_seg,
- &dma_handle_idx);
- if (!mtts_idx) {
- dev_err(hr_dev->dev,
- "Failed to find mtt for srq idx queue buf.\n");
- return -EINVAL;
+ ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
+ ARRAY_SIZE(mtts_idx), &dma_handle_idx);
+ if (ret < 1) {
+ ibdev_err(ibdev, "Failed to find mtr for SRQ idx\n");
+ return -ENOBUFS;
}
ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
if (ret) {
- dev_err(hr_dev->dev,
- "Failed to alloc a bit from srq bitmap.\n");
+ ibdev_err(ibdev, "Failed to alloc SRQ number, err %d\n", ret);
return -ENOMEM;
}
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
- if (ret)
+ if (ret) {
+ ibdev_err(ibdev, "Failed to get SRQC table, err %d\n", ret);
goto err_out;
+ }
ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
- if (ret)
+ if (ret) {
+ ibdev_err(ibdev, "Failed to store SRQC, err %d\n", ret);
goto err_put;
+ }
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox)) {
- ret = PTR_ERR(mailbox);
+ if (IS_ERR_OR_NULL(mailbox)) {
+ ret = -ENOMEM;
+ ibdev_err(ibdev, "Failed to alloc mailbox for SRQC\n");
goto err_xa;
}
@@ -136,8 +136,10 @@ static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- if (ret)
+ if (ret) {
+ ibdev_err(ibdev, "Failed to config SRQC, err %d\n", ret);
goto err_xa;
+ }
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
@@ -154,8 +156,7 @@ err_out:
return ret;
}
-static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq)
+static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
int ret;
@@ -175,187 +176,104 @@ static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
hns_roce_bitmap_free(&srq_table->bitmap, srq->srqn, BITMAP_NO_RR);
}
-static int create_user_srq(struct hns_roce_srq *srq, struct ib_udata *udata,
- int srq_buf_size)
+static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata, unsigned long addr)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
- struct hns_roce_ib_create_srq ucmd;
- struct hns_roce_buf *buf;
- int ret;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
+ HNS_ROCE_SGE_SIZE *
+ srq->max_gs)));
+
+ buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
+ srq->wqe_shift);
+ buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+
+ err = hns_roce_mtr_create(hr_dev, &srq->buf_mtr, &buf_attr,
+ hr_dev->caps.srqwqe_ba_pg_sz +
+ HNS_HW_PAGE_SHIFT, udata, addr);
+ if (err)
+ ibdev_err(ibdev, "Failed to alloc SRQ buf mtr, err %d\n", err);
+
+ return err;
+}
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
- return -EFAULT;
-
- srq->umem =
- ib_umem_get(srq->ibsrq.device, ucmd.buf_addr, srq_buf_size, 0);
- if (IS_ERR(srq->umem))
- return PTR_ERR(srq->umem);
-
- buf = &srq->buf;
- buf->npages = (ib_umem_page_count(srq->umem) +
- (1 << hr_dev->caps.srqwqe_buf_pg_sz) - 1) /
- (1 << hr_dev->caps.srqwqe_buf_pg_sz);
- buf->page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
- &srq->mtt);
- if (ret)
- goto err_user_buf;
+static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
+{
+ hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
+}
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->mtt, srq->umem);
- if (ret)
- goto err_user_srq_mtt;
-
- /* config index queue BA */
- srq->idx_que.umem = ib_umem_get(srq->ibsrq.device, ucmd.que_addr,
- srq->idx_que.buf_size, 0);
- if (IS_ERR(srq->idx_que.umem)) {
- dev_err(hr_dev->dev, "ib_umem_get error for index queue\n");
- ret = PTR_ERR(srq->idx_que.umem);
- goto err_user_srq_mtt;
+static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ struct ib_udata *udata, unsigned long addr)
+{
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+ int err;
+
+ srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
+
+ buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
+ srq->idx_que.entry_shift);
+ buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+
+ err = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
+ hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ udata, addr);
+ if (err) {
+ ibdev_err(ibdev, "Failed to alloc SRQ idx mtr, err %d\n", err);
+ return err;
}
- buf = &srq->idx_que.idx_buf;
- buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem),
- 1 << hr_dev->caps.idx_buf_pg_sz);
- buf->page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
- ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift,
- &srq->idx_que.mtt);
- if (ret) {
- dev_err(hr_dev->dev, "hns_roce_mtt_init error for idx que\n");
- goto err_user_idx_mtt;
- }
+ if (!udata) {
+ idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
+ if (!idx_que->bitmap) {
+ ibdev_err(ibdev, "Failed to alloc SRQ idx bitmap\n");
+ err = -ENOMEM;
+ goto err_idx_mtr;
+ }
- ret = hns_roce_ib_umem_write_mtt(hr_dev, &srq->idx_que.mtt,
- srq->idx_que.umem);
- if (ret) {
- dev_err(hr_dev->dev,
- "hns_roce_ib_umem_write_mtt error for idx que\n");
- goto err_user_idx_buf;
}
return 0;
+err_idx_mtr:
+ hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
-err_user_idx_buf:
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
-
-err_user_idx_mtt:
- ib_umem_release(srq->idx_que.umem);
-
-err_user_srq_mtt:
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
-
-err_user_buf:
- ib_umem_release(srq->umem);
-
- return ret;
+ return err;
}
-static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
- u32 page_shift)
+static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que;
- idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
- if (!idx_que->bitmap)
- return -ENOMEM;
-
- idx_que->buf_size = srq->idx_que.buf_size;
-
- if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
- &idx_que->idx_buf, page_shift)) {
- bitmap_free(idx_que->bitmap);
- return -ENOMEM;
- }
-
- return 0;
+ bitmap_free(idx_que->bitmap);
+ idx_que->bitmap = NULL;
+ hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
}
-static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
+static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device);
- u32 page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
- int ret;
-
- if (hns_roce_buf_alloc(hr_dev, srq_buf_size, (1 << page_shift) * 2,
- &srq->buf, page_shift))
- return -ENOMEM;
-
srq->head = 0;
srq->tail = srq->wqe_cnt - 1;
-
- ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
- &srq->mtt);
- if (ret)
- goto err_kernel_buf;
-
- ret = hns_roce_buf_write_mtt(hr_dev, &srq->mtt, &srq->buf);
- if (ret)
- goto err_kernel_srq_mtt;
-
- page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
- ret = hns_roce_create_idx_que(srq->ibsrq.pd, srq, page_shift);
- if (ret) {
- dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n", ret);
- goto err_kernel_srq_mtt;
- }
-
- /* Init mtt table for idx_que */
- ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages,
- srq->idx_que.idx_buf.page_shift,
- &srq->idx_que.mtt);
- if (ret)
- goto err_kernel_create_idx;
-
- /* Write buffer address into the mtt table */
- ret = hns_roce_buf_write_mtt(hr_dev, &srq->idx_que.mtt,
- &srq->idx_que.idx_buf);
- if (ret)
- goto err_kernel_idx_buf;
-
srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
- if (!srq->wrid) {
- ret = -ENOMEM;
- goto err_kernel_idx_buf;
- }
+ if (!srq->wrid)
+ return -ENOMEM;
return 0;
-
-err_kernel_idx_buf:
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
-
-err_kernel_create_idx:
- hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
- &srq->idx_que.idx_buf);
- kfree(srq->idx_que.bitmap);
-
-err_kernel_srq_mtt:
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
-
-err_kernel_buf:
- hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
-
- return ret;
-}
-
-static void destroy_user_srq(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq)
-{
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
- ib_umem_release(srq->idx_que.umem);
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
- ib_umem_release(srq->umem);
}
-static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
- struct hns_roce_srq *srq, int srq_buf_size)
+static void free_srq_wrid(struct hns_roce_srq *srq)
{
- kvfree(srq->wrid);
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
- hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf);
- kfree(srq->idx_que.bitmap);
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
- hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
+ kfree(srq->wrid);
+ srq->wrid = NULL;
}
int hns_roce_create_srq(struct ib_srq *ib_srq,
@@ -365,8 +283,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
struct hns_roce_ib_create_srq_resp resp = {};
struct hns_roce_srq *srq = to_hr_srq(ib_srq);
- int srq_desc_size;
- int srq_buf_size;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_ib_create_srq ucmd = {};
int ret = 0;
u32 cqn;
@@ -379,43 +297,47 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
spin_lock_init(&srq->lock);
srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
- srq->max_gs = init_attr->attr.max_sge;
-
- srq_desc_size = roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
- HNS_ROCE_SGE_SIZE * srq->max_gs));
-
- srq->wqe_shift = ilog2(srq_desc_size);
-
- srq_buf_size = srq->wqe_cnt * srq_desc_size;
-
- srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
- srq->idx_que.buf_size = srq->wqe_cnt * srq->idx_que.entry_sz;
- srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
- srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
+ srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE;
if (udata) {
- ret = create_user_srq(srq, udata, srq_buf_size);
+ ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (ret) {
- dev_err(hr_dev->dev, "Create user srq failed\n");
- goto err_srq;
+ ibdev_err(ibdev, "Failed to copy SRQ udata, err %d\n",
+ ret);
+ return ret;
}
- } else {
- ret = create_kernel_srq(srq, srq_buf_size);
+ }
+
+ ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc SRQ buffer, err %d\n", ret);
+ return ret;
+ }
+
+ ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc SRQ idx, err %d\n", ret);
+ goto err_buf_alloc;
+ }
+
+ if (!udata) {
+ ret = alloc_srq_wrid(hr_dev, srq);
if (ret) {
- dev_err(hr_dev->dev, "Create kernel srq failed\n");
- goto err_srq;
+ ibdev_err(ibdev, "Failed to alloc SRQ wrid, err %d\n",
+ ret);
+ goto err_idx_alloc;
}
}
cqn = ib_srq_has_cq(init_attr->srq_type) ?
to_hr_cq(init_attr->ext.cq)->cqn : 0;
-
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
- ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(ib_srq->pd)->pdn, cqn, 0,
- &srq->mtt, 0, srq);
- if (ret)
- goto err_wrid;
+ ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
+ if (ret) {
+ ibdev_err(ibdev, "Failed to alloc SRQ context, err %d\n", ret);
+ goto err_wrid_alloc;
+ }
srq->event = hns_roce_ib_srq_event;
resp.srqn = srq->srqn;
@@ -431,15 +353,13 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
return 0;
err_srqc_alloc:
- hns_roce_srq_free(hr_dev, srq);
-
-err_wrid:
- if (udata)
- destroy_user_srq(hr_dev, srq);
- else
- destroy_kernel_srq(hr_dev, srq, srq_buf_size);
-
-err_srq:
+ free_srqc(hr_dev, srq);
+err_wrid_alloc:
+ free_srq_wrid(srq);
+err_idx_alloc:
+ free_srq_idx(hr_dev, srq);
+err_buf_alloc:
+ free_srq_buf(hr_dev, srq);
return ret;
}
@@ -448,18 +368,10 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
struct hns_roce_srq *srq = to_hr_srq(ibsrq);
- hns_roce_srq_free(hr_dev, srq);
- hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
-
- if (udata) {
- hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
- } else {
- kvfree(srq->wrid);
- hns_roce_buf_free(hr_dev, srq->wqe_cnt << srq->wqe_shift,
- &srq->buf);
- }
- ib_umem_release(srq->idx_que.umem);
- ib_umem_release(srq->umem);
+ free_srqc(hr_dev, srq);
+ free_srq_idx(hr_dev, srq);
+ free_srq_wrid(srq);
+ free_srq_buf(hr_dev, srq);
}
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 3c62c9327a9c..49d92638e0db 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -382,15 +382,6 @@ static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr)
}
/**
- * to_iwmr_from_ibfmr - get device memory region
- * @ibfmr: ib fmr
- **/
-static inline struct i40iw_mr *to_iwmr_from_ibfmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct i40iw_mr, ibfmr);
-}
-
-/**
* to_iwmw - get device memory window
* @ibmw: ib memory window
**/
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 1b6fb1380961..19af29a48c55 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -83,7 +83,6 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->atomic_cap = IB_ATOMIC_NONE;
- props->max_map_per_fmr = 1;
props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
return 0;
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 3a413752ccc3..331bc21cbcc7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -89,7 +89,6 @@ struct i40iw_mr {
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
- struct ib_fmr ibfmr;
};
struct ib_umem *region;
u16 type;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 02a169f8027b..5f8f8d5c0ce0 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -141,10 +141,11 @@ static int create_iboe_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
return 0;
}
-int mlx4_ib_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
-
+int mlx4_ib_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
return -EINVAL;
@@ -167,12 +168,14 @@ int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
int slave_sgid_index, u8 *s_mac, u16 vlan_tag)
{
struct rdma_ah_attr slave_attr = *ah_attr;
+ struct rdma_ah_init_attr init_attr = {};
struct mlx4_ib_ah *mah = to_mah(ah);
int ret;
slave_attr.grh.sgid_attr = NULL;
slave_attr.grh.sgid_index = slave_sgid_index;
- ret = mlx4_ib_create_ah(ah, &slave_attr, 0, NULL);
+ init_attr.ah_attr = &slave_attr;
+ ret = mlx4_ib_create_ah(ah, &init_attr, NULL);
if (ret)
return ret;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 275722cec8c6..816d28854a8e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -558,7 +558,6 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
props->max_ah = INT_MAX;
@@ -2600,13 +2599,6 @@ static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
.modify_wq = mlx4_ib_modify_wq,
};
-static const struct ib_device_ops mlx4_ib_dev_fmr_ops = {
- .alloc_fmr = mlx4_ib_fmr_alloc,
- .dealloc_fmr = mlx4_ib_fmr_dealloc,
- .map_phys_fmr = mlx4_ib_map_phys_fmr,
- .unmap_fmr = mlx4_ib_unmap_fmr,
-};
-
static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
.alloc_mw = mlx4_ib_alloc_mw,
.dealloc_mw = mlx4_ib_dealloc_mw,
@@ -2724,9 +2716,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
}
- if (!mlx4_is_slave(ibdev->dev))
- ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fmr_ops);
-
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
ibdev->ib_dev.uverbs_cmd_mask |=
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index d188573187fa..6f4ea1067095 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -146,11 +146,6 @@ struct mlx4_ib_mw {
struct mlx4_mw mmw;
};
-struct mlx4_ib_fmr {
- struct ib_fmr ibfmr;
- struct mlx4_fmr mfmr;
-};
-
#define MAX_REGS_PER_FLOW 2
struct mlx4_flow_reg_id {
@@ -679,11 +674,6 @@ static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
return container_of(ibmw, struct mlx4_ib_mw, ibmw);
}
-static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
-}
-
static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
{
return container_of(ibflow, struct mlx4_ib_flow, ibflow);
@@ -752,7 +742,7 @@ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
-int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int mlx4_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
int slave_sgid_index, u8 *s_mac, u16 vlan_tag);
@@ -794,12 +784,6 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
-struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
- u64 iova);
-int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
-int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props, int netw_view);
int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index b0121c90c561..7e0b205c05eb 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -380,7 +380,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
unsigned long untagged_start = untagged_addr(start);
struct vm_area_struct *vma;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
/*
* FIXME: Ideally this would iterate over all the vmas that
* cover the memory, but for now it requires a single vma to
@@ -395,7 +395,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
access_flags |= IB_ACCESS_LOCAL_WRITE;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
return ib_umem_get(device, start, length, access_flags);
@@ -698,99 +698,6 @@ err_free:
return ERR_PTR(err);
}
-struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
- struct ib_fmr_attr *fmr_attr)
-{
- struct mlx4_ib_dev *dev = to_mdev(pd->device);
- struct mlx4_ib_fmr *fmr;
- int err = -ENOMEM;
-
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
- if (!fmr)
- return ERR_PTR(-ENOMEM);
-
- err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
- fmr_attr->max_pages, fmr_attr->max_maps,
- fmr_attr->page_shift, &fmr->mfmr);
- if (err)
- goto err_free;
-
- err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
- if (err)
- goto err_mr;
-
- fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
-
- return &fmr->ibfmr;
-
-err_mr:
- (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
-
-err_free:
- kfree(fmr);
-
- return ERR_PTR(err);
-}
-
-int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int npages, u64 iova)
-{
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
- struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
-
- return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
- &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
-}
-
-int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
-{
- struct ib_fmr *ibfmr;
- int err;
- struct mlx4_dev *mdev = NULL;
-
- list_for_each_entry(ibfmr, fmr_list, list) {
- if (mdev && to_mdev(ibfmr->device)->dev != mdev)
- return -EINVAL;
- mdev = to_mdev(ibfmr->device)->dev;
- }
-
- if (!mdev)
- return 0;
-
- list_for_each_entry(ibfmr, fmr_list, list) {
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
-
- mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
- }
-
- /*
- * Make sure all MPT status updates are visible before issuing
- * SYNC_TPT firmware command.
- */
- wmb();
-
- err = mlx4_SYNC_TPT(mdev);
- if (err)
- pr_warn("SYNC_TPT error %d when "
- "unmapping FMRs\n", err);
-
- return 0;
-}
-
-int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
-{
- struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
- struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
- int err;
-
- err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
-
- if (!err)
- kfree(ifmr);
-
- return err;
-}
-
static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 228be05fbaf8..8cca61c671f8 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -16,7 +16,8 @@ mlx5_ib-y := ah.o \
qpc.o \
restrack.o \
srq.o \
- srq_cmd.o
+ srq_cmd.o \
+ wr.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 80642dd359bc..59e5ec39b447 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -32,9 +32,28 @@
#include "mlx5_ib.h"
+static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev,
+ const struct rdma_ah_attr *ah_attr)
+{
+ enum ib_gid_type gid_type = ah_attr->grh.sgid_attr->gid_type;
+ __be16 sport;
+
+ if ((gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
+ (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
+ (ah_attr->grh.flow_label & IB_GRH_FLOWLABEL_MASK))
+ sport = cpu_to_be16(
+ rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label));
+ else
+ sport = mlx5_get_roce_udp_sport_min(dev,
+ ah_attr->grh.sgid_attr);
+
+ return sport;
+}
+
static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
- struct rdma_ah_attr *ah_attr)
+ struct rdma_ah_init_attr *init_attr)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
enum ib_gid_type gid_type;
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
@@ -51,12 +70,15 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ if (init_attr->xmit_slave)
+ ah->xmit_port =
+ mlx5_lag_get_slave_port(dev->mdev,
+ init_attr->xmit_slave);
gid_type = ah_attr->grh.sgid_attr->gid_type;
memcpy(ah->av.rmac, ah_attr->roce.dmac,
sizeof(ah_attr->roce.dmac));
- ah->av.udp_sport =
- mlx5_get_roce_udp_sport(dev, ah_attr->grh.sgid_attr);
+ ah->av.udp_sport = mlx5_ah_get_udp_sport(dev, ah_attr);
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
#define MLX5_ECN_ENABLED BIT(1)
@@ -68,10 +90,11 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
}
}
-int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
struct mlx5_ib_ah *ah = to_mah(ibah);
struct mlx5_ib_dev *dev = to_mdev(ibah->device);
enum rdma_ah_attr_type ah_type = ah_attr->type;
@@ -97,7 +120,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
return err;
}
- create_ib_ah(dev, ah, ah_attr);
+ create_ib_ah(dev, ah, init_attr);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index a2fcbc49131e..cc24c711e92a 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -1,46 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
*/
#include "cmd.h"
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
{
- u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
int err;
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
if (!err)
*mkey = MLX5_GET(query_special_contexts_out, out,
dump_fill_mkey);
@@ -50,12 +23,12 @@ int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
- u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
int err;
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
if (!err)
*null_mkey = MLX5_GET(query_special_contexts_out, out,
null_mkey);
@@ -63,23 +36,15 @@ int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
}
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
- void *out, int out_size)
+ void *out)
{
- u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { };
+ u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
MLX5_SET(query_cong_params_in, in, opcode,
MLX5_CMD_OP_QUERY_CONG_PARAMS);
MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-
-int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
- void *in, int in_size)
-{
- u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { };
-
- return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
+ return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
}
int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
@@ -133,7 +98,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
MLX5_SET64(alloc_memic_in, in, range_start_addr,
hw_start_addr + (page_idx * PAGE_SIZE));
- ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
if (ret) {
spin_lock(&dm->lock);
bitmap_clear(dm->memic_alloc_pages,
@@ -162,8 +127,7 @@ void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
struct mlx5_core_dev *dev = dm->dev;
u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
- u32 out[MLX5_ST_SZ_DW(dealloc_memic_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
u64 start_page_idx;
int err;
@@ -174,7 +138,7 @@ void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
MLX5_SET(dealloc_memic_in, in, memic_size, length);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
if (err)
return;
@@ -198,49 +162,46 @@ int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
MLX5_SET(destroy_tir_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tir, in);
}
void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
MLX5_SET(destroy_tis_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_tis, in);
}
void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
MLX5_SET(destroy_rqt_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, destroy_rqt, in);
}
int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
int err;
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
MLX5_SET(alloc_transport_domain_in, in, uid, uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
@@ -251,32 +212,29 @@ int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
u16 uid)
{
- u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
}
void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
MLX5_SET(dealloc_pd_in, in, pd, pdn);
MLX5_SET(dealloc_pd_in, in, uid, uid);
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ mlx5_cmd_exec_in(dev, dealloc_pd, in);
}
int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
u32 qpn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {};
- u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
void *gid;
MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
@@ -284,14 +242,13 @@ int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
MLX5_SET(attach_to_mcg_in, in, uid, uid);
gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
}
int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
u32 qpn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {};
- u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
void *gid;
MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
@@ -299,18 +256,18 @@ int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
MLX5_SET(detach_from_mcg_in, in, uid, uid);
gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
memcpy(gid, mgid, sizeof(*mgid));
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
}
int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
{
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
int err;
MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
MLX5_SET(alloc_xrcd_in, in, uid, uid);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
if (!err)
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return err;
@@ -318,13 +275,12 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
MLX5_SET(dealloc_xrcd_in, in, uid, uid);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
}
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
@@ -350,7 +306,7 @@ int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ err = mlx5_cmd_exec_inout(dev, mad_ifc, in, out);
if (err)
goto out;
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 43079b18d9b4..f4d8558db434 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -40,10 +40,8 @@
int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
- void *out, int out_size);
+ void *out);
int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out);
-int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
- void *in, int in_size);
int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
u64 length, u32 alignment);
void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index de4da92b81a6..b9291e482428 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -290,7 +290,7 @@ static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
node = mlx5_ib_param_to_node(offset);
- err = mlx5_cmd_query_cong_params(mdev, node, out, outlen);
+ err = mlx5_cmd_query_cong_params(mdev, node, out);
if (err)
goto free;
@@ -339,7 +339,7 @@ static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
attr_mask);
- err = mlx5_cmd_modify_cong_params(mdev, in, inlen);
+ err = mlx5_cmd_exec_in(dev->mdev, modify_cong_params, in);
kvfree(in);
alloc_err:
mlx5_ib_put_native_port_mdev(dev, port_num + 1);
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 35b98c2d64d5..9454a66c12cc 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -495,6 +495,10 @@ static u64 devx_get_obj_id(const void *in)
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(rst2init_qp_in, in, qpn));
break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(init2init_qp_in, in, qpn));
+ break;
case MLX5_CMD_OP_INIT2RTR_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(init2rtr_qp_in, in, qpn));
@@ -615,7 +619,7 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
enum ib_qp_type qp_type = qp->ibqp.qp_type;
if (qp_type == IB_QPT_RAW_PACKET ||
- (qp->flags & MLX5_IB_QP_UNDERLAY)) {
+ (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
struct mlx5_ib_raw_packet_qp *raw_packet_qp =
&qp->raw_packet_qp;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
@@ -820,6 +824,7 @@ static bool devx_is_obj_modify_cmd(const void *in)
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
case MLX5_CMD_OP_RST2INIT_QP:
case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_INIT2INIT_QP:
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
@@ -2217,14 +2222,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
obj->mdev = dev->mdev;
uobj->object = obj;
devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
- err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
- if (err)
- goto err_umem_destroy;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
- return 0;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
+ sizeof(obj_id));
+ return err;
-err_umem_destroy:
- mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
err_umem_release:
ib_umem_release(obj->umem);
err_obj_free:
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index 3a0601c2052c..216a1108ad34 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -67,46 +67,41 @@ static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
},
};
-#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
-static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
- struct uverbs_attr_bundle *attrs)
+static int get_dests(struct uverbs_attr_bundle *attrs,
+ struct mlx5_ib_flow_matcher *fs_matcher, int *dest_id,
+ int *dest_type, struct ib_qp **qp, u32 *flags)
{
- struct mlx5_flow_context flow_context = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
- struct mlx5_ib_flow_handler *flow_handler;
- struct mlx5_ib_flow_matcher *fs_matcher;
- struct ib_uobject **arr_flow_actions;
- struct ib_uflow_resources *uflow_res;
- struct mlx5_flow_act flow_act = {};
- void *devx_obj;
- int dest_id, dest_type;
- void *cmd_in;
- int inlen;
bool dest_devx, dest_qp;
- struct ib_qp *qp = NULL;
- struct ib_uobject *uobj =
- uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
- struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
- int len, ret, i;
- u32 counter_id = 0;
- u32 *offset_attr;
- u32 offset = 0;
-
- if (!capable(CAP_NET_RAW))
- return -EPERM;
+ void *devx_obj;
+ int err;
- dest_devx =
- uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+ dest_devx = uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
dest_qp = uverbs_attr_is_valid(attrs,
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
- fs_matcher = uverbs_attr_get_obj(attrs,
- MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
- if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS &&
- ((dest_devx && dest_qp) || (!dest_devx && !dest_qp)))
+ *flags = 0;
+ err = uverbs_get_flags32(flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS |
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP);
+ if (err)
+ return err;
+
+ /* Both flags are not allowed */
+ if (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS &&
+ *flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
return -EINVAL;
- /* Allow only DEVX object as dest when inserting to FDB */
- if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx)
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
+ if (dest_devx && (dest_qp || *flags))
+ return -EINVAL;
+ else if (dest_qp && *flags)
+ return -EINVAL;
+ }
+
+ /* Allow only DEVX object, drop as dest for FDB */
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !(dest_devx ||
+ (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)))
return -EINVAL;
/* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
@@ -114,43 +109,86 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
return -EINVAL;
+ *qp = NULL;
if (dest_devx) {
- devx_obj = uverbs_attr_get_obj(
- attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
- if (IS_ERR(devx_obj))
- return PTR_ERR(devx_obj);
+ devx_obj =
+ uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
/* Verify that the given DEVX object is a flow
* steering destination.
*/
- if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
+ if (!mlx5_ib_devx_is_flow_dest(devx_obj, dest_id, dest_type))
return -EINVAL;
/* Allow only flow table as dest when inserting to FDB or RDMA_RX */
if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB ||
fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
- dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ *dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
return -EINVAL;
} else if (dest_qp) {
struct mlx5_ib_qp *mqp;
- qp = uverbs_attr_get_obj(attrs,
- MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
+ *qp = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+ if (IS_ERR(*qp))
+ return PTR_ERR(*qp);
- if (qp->qp_type != IB_QPT_RAW_PACKET)
+ if ((*qp)->qp_type != IB_QPT_RAW_PACKET)
return -EINVAL;
- mqp = to_mqp(qp);
- if (mqp->flags & MLX5_IB_QP_RSS)
- dest_id = mqp->rss_qp.tirn;
+ mqp = to_mqp(*qp);
+ if (mqp->is_rss)
+ *dest_id = mqp->rss_qp.tirn;
else
- dest_id = mqp->raw_packet_qp.rq.tirn;
- dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- } else {
- dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ *dest_id = mqp->raw_packet_qp.rq.tirn;
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
}
+ if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
+static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_flow_context flow_context = {.flow_tag =
+ MLX5_FS_DEFAULT_FLOW_TAG};
+ u32 *offset_attr, offset = 0, counter_id = 0;
+ int dest_id, dest_type, inlen, len, ret, i;
+ struct mlx5_ib_flow_handler *flow_handler;
+ struct mlx5_ib_flow_matcher *fs_matcher;
+ struct ib_uobject **arr_flow_actions;
+ struct ib_uflow_resources *uflow_res;
+ struct mlx5_flow_act flow_act = {};
+ struct ib_qp *qp = NULL;
+ void *devx_obj, *cmd_in;
+ struct ib_uobject *uobj;
+ struct mlx5_ib_dev *dev;
+ u32 flags;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ fs_matcher = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
+ uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
+ dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+
+ if (get_dests(attrs, fs_matcher, &dest_id, &dest_type, &qp, &flags))
+ return -EINVAL;
+
+ if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+
+ if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+
len = uverbs_attr_get_uobjs_arr(attrs,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
if (len) {
@@ -180,10 +218,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
- if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
- fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
- return -EINVAL;
-
cmd_in = uverbs_attr_get_alloced_ptr(
attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
inlen = uverbs_attr_get_len(attrs,
@@ -629,7 +663,10 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
UA_OPTIONAL,
- UA_ALLOC_AND_COPY));
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
+ enum mlx5_ib_create_flow_flags,
+ UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_DESTROY_FLOW,
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 1ae6fd95acaa..40d418153891 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -119,17 +119,15 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
struct mlx5_ib_gsi_qp *gsi;
struct ib_qp_init_attr hw_init_attr = *init_attr;
const u8 port_num = init_attr->port_num;
- const int num_pkeys = pd->device->attrs.max_pkeys;
- const int num_qps = mlx5_ib_deth_sqpn_cap(dev) ? num_pkeys : 0;
+ int num_qps = 0;
int ret;
- mlx5_ib_dbg(dev, "creating GSI QP\n");
-
- if (port_num > ARRAY_SIZE(dev->devr.ports) || port_num < 1) {
- mlx5_ib_warn(dev,
- "invalid port number %d during GSI QP creation\n",
- port_num);
- return ERR_PTR(-EINVAL);
+ if (mlx5_ib_deth_sqpn_cap(dev)) {
+ if (MLX5_CAP_GEN(dev->mdev,
+ port_type) == MLX5_CAP_PORT_TYPE_IB)
+ num_qps = pd->device->attrs.max_pkeys;
+ else if (dev->lag_active)
+ num_qps = MLX5_MAX_PORTS;
}
gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
@@ -270,7 +268,7 @@ static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
}
static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
- u16 qp_index)
+ u16 pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct ib_qp_attr attr;
@@ -279,7 +277,7 @@ static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
attr.qp_state = IB_QPS_INIT;
- attr.pkey_index = qp_index;
+ attr.pkey_index = pkey_index;
attr.qkey = IB_QP1_QKEY;
attr.port_num = gsi->port_num;
ret = ib_modify_qp(qp, &attr, mask);
@@ -313,12 +311,17 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
{
struct ib_device *device = gsi->rx_qp->device;
struct mlx5_ib_dev *dev = to_mdev(device);
+ int pkey_index = qp_index;
+ struct mlx5_ib_qp *mqp;
struct ib_qp *qp;
unsigned long flags;
u16 pkey;
int ret;
- ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey);
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ pkey_index = 0;
+
+ ret = ib_query_pkey(device, gsi->port_num, pkey_index, &pkey);
if (ret) {
mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
gsi->port_num, qp_index);
@@ -347,7 +350,10 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
return;
}
- ret = modify_to_rts(gsi, qp, qp_index);
+ mqp = to_mqp(qp);
+ if (dev->lag_active)
+ mqp->gsi_lag_port = qp_index + 1;
+ ret = modify_to_rts(gsi, qp, pkey_index);
if (ret)
goto err_destroy_qp;
@@ -466,11 +472,15 @@ static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi,
static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
{
struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+ struct mlx5_ib_ah *ah = to_mah(wr->ah);
int qp_index = wr->pkey_index;
- if (!mlx5_ib_deth_sqpn_cap(dev))
+ if (!gsi->num_qps)
return gsi->rx_qp;
+ if (dev->lag_active && ah->xmit_port)
+ qp_index = ah->xmit_port - 1;
+
if (qp_index >= gsi->num_qps)
return NULL;
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
index 3b6750cba796..5b30d3fa8f8d 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.h
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -9,9 +9,9 @@
#include <linux/mlx5/eswitch.h>
#include "mlx5_ib.h"
-#ifdef CONFIG_MLX5_ESWITCH
extern const struct mlx5_ib_profile raw_eth_profile;
+#ifdef CONFIG_MLX5_ESWITCH
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
u16 vport_num);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 566b42f3fb18..343a8b8361e7 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -53,6 +53,7 @@
#include <linux/list.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
+#include <rdma/lag.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
#include "mlx5_ib.h"
@@ -60,6 +61,7 @@
#include "cmd.h"
#include "srq.h"
#include "qp.h"
+#include "wr.h"
#include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
@@ -70,17 +72,10 @@
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
-#define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "5.0-0"
-
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
-MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
+MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
MODULE_LICENSE("Dual BSD/GPL");
-static char mlx5_version[] =
- DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
- DRIVER_VERSION "\n";
-
struct mlx5_ib_event_work {
struct work_struct work;
union {
@@ -628,8 +623,8 @@ static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
attr->index, NULL, NULL);
}
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
- const struct ib_gid_attr *attr)
+__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr)
{
if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
return 0;
@@ -1004,7 +999,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
props->max_ah = INT_MAX;
props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
@@ -1964,6 +1958,9 @@ uar_done:
resp.response_length += sizeof(resp.dump_fill_mkey);
}
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ resp.comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto out_mdev;
@@ -1974,7 +1971,7 @@ uar_done:
context->lib_caps = req.lib_caps;
print_lib_caps(dev, context->lib_caps);
- if (dev->lag_active) {
+ if (mlx5_ib_lag_should_assign_affinity(dev)) {
u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_set(&context->tx_port_affinity,
@@ -2561,7 +2558,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct mlx5_ib_alloc_pd_resp resp;
int err;
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
- u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
u16 uid = 0;
struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
@@ -2569,8 +2566,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
uid = context ? context->devx_uid : 0;
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
MLX5_SET(alloc_pd_in, in, uid, uid);
- err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
- out, sizeof(out));
+ err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
if (err)
return err;
@@ -3944,7 +3940,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
} else {
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- if (mqp->flags & MLX5_IB_QP_RSS)
+ if (mqp->is_rss)
dst->tir_num = mqp->rss_qp.tirn;
else
dst->tir_num = mqp->raw_packet_qp.rq.tirn;
@@ -4199,18 +4195,17 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
dst[dst_num].type = dest_type;
- dst[dst_num].tir_num = dest_id;
+ dst[dst_num++].tir_num = dest_id;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
- dst[dst_num].ft_num = dest_id;
+ dst[dst_num++].ft_num = dest_id;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- } else {
- dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
+ dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
}
- dst_num++;
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -4420,7 +4415,7 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
uid = ibqp->pd ?
to_mpd(ibqp->pd)->uid : 0;
- if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
+ if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
return -EOPNOTSUPP;
}
@@ -6194,26 +6189,20 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
mmap_offset = mlx5_entry_to_mmap_offset(entry);
length = entry->rdma_entry.npages * PAGE_SIZE;
uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
&mmap_offset, sizeof(mmap_offset));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
&entry->page_idx, sizeof(entry->page_idx));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
&length, sizeof(length));
- if (err)
- goto err;
-
- return 0;
-
-err:
- rdma_user_mmap_entry_remove(&entry->rdma_entry);
return err;
}
@@ -6327,26 +6316,20 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
mmap_offset = mlx5_entry_to_mmap_offset(entry);
length = entry->rdma_entry.npages * PAGE_SIZE;
uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
&mmap_offset, sizeof(mmap_offset));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
&entry->page_idx, sizeof(entry->page_idx));
if (err)
- goto err;
+ return err;
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
&length, sizeof(length));
- if (err)
- goto err;
-
- return 0;
-
-err:
- rdma_user_mmap_entry_remove(&entry->rdma_entry);
return err;
}
@@ -6540,6 +6523,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
dev->ib_dev.dev.parent = mdev->device;
+ dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
@@ -6629,8 +6613,8 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.modify_qp = mlx5_ib_modify_qp,
.modify_srq = mlx5_ib_modify_srq,
.poll_cq = mlx5_ib_poll_cq,
- .post_recv = mlx5_ib_post_recv,
- .post_send = mlx5_ib_post_send,
+ .post_recv = mlx5_ib_post_recv_nodrain,
+ .post_send = mlx5_ib_post_send_nodrain,
.post_srq_recv = mlx5_ib_post_srq_recv,
.process_mad = mlx5_ib_process_mad,
.query_ah = mlx5_ib_query_ah,
@@ -7131,6 +7115,8 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
int err;
int i;
+ dev->profile = profile;
+
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
if (profile->stage[i].init) {
err = profile->stage[i].init(dev);
@@ -7139,7 +7125,6 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
}
}
- dev->profile = profile;
dev->ib_active = true;
return dev;
@@ -7313,8 +7298,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
int port_type_cap;
int num_ports;
- printk_once(KERN_INFO "%s", mlx5_version);
-
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
if (!mlx5_core_mp_enabled(mdev))
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index aaabb8a98eed..5dbe3eb0d9cb 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -337,7 +337,6 @@ struct mlx5_ib_rwq {
struct ib_umem *umem;
size_t buf_size;
unsigned int page_shift;
- int create_type;
struct mlx5_db db;
u32 user_index;
u32 wqe_count;
@@ -346,17 +345,6 @@ struct mlx5_ib_rwq {
u32 create_flags; /* Use enum mlx5_ib_wq_flags */
};
-enum {
- MLX5_QP_USER,
- MLX5_QP_KERNEL,
- MLX5_QP_EMPTY
-};
-
-enum {
- MLX5_WQ_USER,
- MLX5_WQ_KERNEL
-};
-
struct mlx5_ib_rwq_ind_table {
struct ib_rwq_ind_table ib_rwq_ind_tbl;
u32 rqtn;
@@ -443,34 +431,37 @@ struct mlx5_ib_qp {
/* serialize qp state modifications
*/
struct mutex mutex;
+ /* cached variant of create_flags from struct ib_qp_init_attr */
u32 flags;
u8 port;
u8 state;
- int wq_sig;
- int scat_cqe;
int max_inline_data;
struct mlx5_bf bf;
- int has_rq;
+ u8 has_rq:1;
+ u8 is_rss:1;
/* only for user space QPs. For kernel
* we have it from the bf object
*/
int bfregn;
- int create_type;
-
struct list_head qps_list;
struct list_head cq_recv_list;
struct list_head cq_send_list;
struct mlx5_rate_limit rl;
u32 underlay_qpn;
u32 flags_en;
- /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
- enum ib_qp_type qp_sub_type;
+ /*
+ * IB/core doesn't store low-level QP types, so
+ * store both MLX and IBTA types in the field below.
+ * IB_QPT_DRIVER will be break to DCI/DCT subtypes.
+ */
+ enum ib_qp_type type;
/* A flag to indicate if there's a new counter is configured
* but not take effective
*/
u32 counter_pending;
+ u16 gsi_lag_port;
};
struct mlx5_ib_cq_buf {
@@ -481,24 +472,6 @@ struct mlx5_ib_cq_buf {
int nent;
};
-enum mlx5_ib_qp_flags {
- MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
- MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
- MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
- MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
- MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
- MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
- /* QP uses 1 as its source QP number */
- MLX5_IB_QP_SQPN_QP1 = 1 << 6,
- MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
- MLX5_IB_QP_RSS = 1 << 8,
- MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
- MLX5_IB_QP_UNDERLAY = 1 << 10,
- MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
- MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
- MLX5_IB_QP_PACKET_BASED_CREDIT = 1 << 13,
-};
-
struct mlx5_umr_wr {
struct ib_send_wr wr;
u64 virt_addr;
@@ -702,12 +675,6 @@ struct umr_common {
struct semaphore sem;
};
-enum {
- MLX5_FMR_INVALID,
- MLX5_FMR_VALID,
- MLX5_FMR_BUSY,
-};
-
struct mlx5_cache_ent {
struct list_head head;
/* sync access to the cahce entry
@@ -1181,7 +1148,7 @@ void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
-int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
@@ -1205,10 +1172,6 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
void mlx5_ib_drain_sq(struct ib_qp *qp);
void mlx5_ib_drain_rq(struct ib_qp *qp);
-int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr);
-int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr);
int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
size_t buflen, size_t *bc);
int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
@@ -1284,8 +1247,6 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
-int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
-void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
unsigned long max_page_shift,
int *count, int *shift,
@@ -1383,8 +1344,8 @@ int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
u64 guid, int type);
-__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
- const struct ib_gid_attr *attr);
+__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr);
void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
@@ -1581,4 +1542,11 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
int mlx5_ib_enable_driver(struct ib_device *dev);
int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
+
+static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
+{
+ return dev->lag_active ||
+ (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
+ MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
+}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 16af1105cfcf..7d2ec9ee5097 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -447,8 +447,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
{
int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
pfault->wqe.wq_num : pfault->token;
- u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = { };
- u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = { };
+ u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
int err;
MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
@@ -457,7 +456,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
MLX5_SET(page_fault_resume_in, in, error, !!error);
- err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
if (err)
mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
wq_num, err);
@@ -1136,8 +1135,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
*wqe += sizeof(struct mlx5_wqe_xrc_seg);
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->qp_sub_type == MLX5_IB_QPT_DCI) {
+ if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
av = *wqe;
if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av);
@@ -1190,7 +1188,7 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_wq *wq = &qp->rq;
int wqe_size = 1 << wq->wqe_shift;
- if (qp->wq_sig) {
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
return -EFAULT;
}
diff --git a/drivers/infiniband/hw/mlx5/qos.c b/drivers/infiniband/hw/mlx5/qos.c
index cac878a70edb..dce92554142a 100644
--- a/drivers/infiniband/hw/mlx5/qos.c
+++ b/drivers/infiniband/hw/mlx5/qos.c
@@ -69,17 +69,14 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
if (err)
goto err;
- err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
- &pp_entry->index, sizeof(pp_entry->index));
- if (err)
- goto clean;
-
pp_entry->mdev = dev->mdev;
uobj->object = pp_entry;
- return 0;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+ &pp_entry->index, sizeof(pp_entry->index));
+ return err;
-clean:
- mlx5_rl_remove_rate_raw(dev->mdev, pp_entry->index);
err:
kfree(pp_entry);
return err;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d93eec5d3277..81bf6b975e0e 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -40,9 +40,7 @@
#include "ib_rep.h"
#include "cmd.h"
#include "qp.h"
-
-/* not supported currently */
-static int wq_signature;
+#include "wr.h"
enum {
MLX5_IB_ACK_REQ_FREQ = 8,
@@ -55,32 +53,6 @@ enum {
MLX5_IB_LINK_TYPE_ETH = 1
};
-enum {
- MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
-};
-
-static const u32 mlx5_ib_opcode[] = {
- [IB_WR_SEND] = MLX5_OPCODE_SEND,
- [IB_WR_LSO] = MLX5_OPCODE_LSO,
- [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
- [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
- [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
- [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
- [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
- [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
- [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
- [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
- [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
-};
-
-struct mlx5_wqe_eth_pad {
- u8 rsvd0[16];
-};
-
enum raw_qp_set_mask_map {
MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
MLX5_RAW_QP_RATE_LIMIT = 1UL << 1,
@@ -392,17 +364,26 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
} else {
+ int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE);
+
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
return -EINVAL;
qp->rq.wqe_shift = ucmd->rq_wqe_shift;
- if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+ if ((1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) <
+ wq_sig)
return -EINVAL;
- qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ qp->rq.max_gs =
+ (1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) -
+ wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
} else {
- wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
+ wqe_size =
+ wq_sig ? sizeof(struct mlx5_wqe_signature_seg) :
+ 0;
wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
wqe_size = roundup_pow_of_two(wqe_size);
wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
@@ -416,7 +397,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
return -EINVAL;
}
qp->rq.wqe_shift = ilog2(wqe_size);
- qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ qp->rq.max_gs =
+ (1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) -
+ wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
}
}
@@ -596,7 +580,7 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
}
if (attr->qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
} else {
@@ -751,10 +735,7 @@ static int to_mlx5_st(enum ib_qp_type type)
case IB_QPT_SMI: return MLX5_QP_ST_QP0;
case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI;
- case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
- case IB_QPT_RAW_PACKET:
- case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
- case IB_QPT_MAX:
+ case IB_QPT_RAW_PACKET: return MLX5_QP_ST_RAW_ETHERTYPE;
default: return -EINVAL;
}
}
@@ -891,7 +872,6 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_umem;
}
- rwq->create_type = MLX5_WQ_USER;
return 0;
err_umem:
@@ -906,15 +886,14 @@ static int adjust_bfregn(struct mlx5_ib_dev *dev,
bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
}
-static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_qp *qp, struct ib_udata *udata,
- struct ib_qp_init_attr *attr,
- u32 **in,
- struct mlx5_ib_create_qp_resp *resp, int *inlen,
- struct mlx5_ib_qp_base *base)
+static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp, struct ib_udata *udata,
+ struct ib_qp_init_attr *attr, u32 **in,
+ struct mlx5_ib_create_qp_resp *resp, int *inlen,
+ struct mlx5_ib_qp_base *base,
+ struct mlx5_ib_create_qp *ucmd)
{
struct mlx5_ib_ucontext *context;
- struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
int page_shift = 0;
int uar_index = 0;
@@ -928,30 +907,24 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u16 uid;
u32 uar_flags;
- err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
- if (err) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return err;
- }
-
context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
ibucontext);
- uar_flags = ucmd.flags & (MLX5_QP_FLAG_UAR_PAGE_INDEX |
- MLX5_QP_FLAG_BFREG_INDEX);
+ uar_flags = qp->flags_en &
+ (MLX5_QP_FLAG_UAR_PAGE_INDEX | MLX5_QP_FLAG_BFREG_INDEX);
switch (uar_flags) {
case MLX5_QP_FLAG_UAR_PAGE_INDEX:
- uar_index = ucmd.bfreg_index;
+ uar_index = ucmd->bfreg_index;
bfregn = MLX5_IB_INVALID_BFREG;
break;
case MLX5_QP_FLAG_BFREG_INDEX:
uar_index = bfregn_to_uar_index(dev, &context->bfregi,
- ucmd.bfreg_index, true);
+ ucmd->bfreg_index, true);
if (uar_index < 0)
return uar_index;
bfregn = MLX5_IB_INVALID_BFREG;
break;
case 0:
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
return -EINVAL;
bfregn = alloc_bfreg(dev, &context->bfregi);
if (bfregn < 0)
@@ -970,12 +943,12 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
- err = set_user_buf_size(dev, qp, &ucmd, base, attr);
+ err = set_user_buf_size(dev, qp, ucmd, base, attr);
if (err)
goto err_bfreg;
- if (ucmd.buf_addr && ubuffer->buf_size) {
- ubuffer->buf_addr = ucmd.buf_addr;
+ if (ucmd->buf_addr && ubuffer->buf_size) {
+ ubuffer->buf_addr = ucmd->buf_addr;
err = mlx5_ib_umem_get(dev, udata, ubuffer->buf_addr,
ubuffer->buf_size, &ubuffer->umem,
&npages, &page_shift, &ncont, &offset);
@@ -993,8 +966,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_umem;
}
- uid = (attr->qp_type != IB_QPT_XRC_TGT &&
- attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
+ uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
MLX5_SET(create_qp_in, *in, uid, uid);
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem)
@@ -1012,24 +984,14 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
resp->bfreg_index = MLX5_IB_INVALID_BFREG;
qp->bfregn = bfregn;
- err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &qp->db);
+ err = mlx5_ib_db_map_user(context, udata, ucmd->db_addr, &qp->db);
if (err) {
mlx5_ib_dbg(dev, "map failed\n");
goto err_free;
}
- err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
- if (err) {
- mlx5_ib_dbg(dev, "copy failed\n");
- goto err_unmap;
- }
- qp->create_type = MLX5_QP_USER;
-
return 0;
-err_unmap:
- mlx5_ib_db_unmap_user(context, &qp->db);
-
err_free:
kvfree(*in);
@@ -1042,72 +1004,50 @@ err_bfreg:
return err;
}
-static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base,
- struct ib_udata *udata)
+static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct mlx5_ib_qp_base *base, struct ib_udata *udata)
{
- struct mlx5_ib_ucontext *context =
- rdma_udata_to_drv_context(
- udata,
- struct mlx5_ib_ucontext,
- ibucontext);
-
- mlx5_ib_db_unmap_user(context, &qp->db);
- ib_umem_release(base->ubuffer.umem);
-
- /*
- * Free only the BFREGs which are handled by the kernel.
- * BFREGs of UARs allocated dynamically are handled by user.
- */
- if (qp->bfregn != MLX5_IB_INVALID_BFREG)
- mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
-}
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
-/* get_sq_edge - Get the next nearby edge.
- *
- * An 'edge' is defined as the first following address after the end
- * of the fragment or the SQ. Accordingly, during the WQE construction
- * which repetitively increases the pointer to write the next data, it
- * simply should check if it gets to an edge.
- *
- * @sq - SQ buffer.
- * @idx - Stride index in the SQ buffer.
- *
- * Return:
- * The new edge.
- */
-static void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
-{
- void *fragment_end;
+ if (udata) {
+ /* User QP */
+ mlx5_ib_db_unmap_user(context, &qp->db);
+ ib_umem_release(base->ubuffer.umem);
- fragment_end = mlx5_frag_buf_get_wqe
- (&sq->fbc,
- mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
+ /*
+ * Free only the BFREGs which are handled by the kernel.
+ * BFREGs of UARs allocated dynamically are handled by user.
+ */
+ if (qp->bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
+ return;
+ }
- return fragment_end + MLX5_SEND_WQE_BB;
+ /* Kernel QP */
+ kvfree(qp->sq.wqe_head);
+ kvfree(qp->sq.w_list);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->sq.wr_data);
+ kvfree(qp->rq.wrid);
+ if (qp->db.db)
+ mlx5_db_free(dev->mdev, &qp->db);
+ if (qp->buf.frags)
+ mlx5_frag_buf_free(dev->mdev, &qp->buf);
}
-static int create_kernel_qp(struct mlx5_ib_dev *dev,
- struct ib_qp_init_attr *init_attr,
- struct mlx5_ib_qp *qp,
- u32 **in, int *inlen,
- struct mlx5_ib_qp_base *base)
+static int _create_kernel_qp(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_qp *qp, u32 **in, int *inlen,
+ struct mlx5_ib_qp_base *base)
{
int uar_index;
void *qpc;
int err;
- if (init_attr->create_flags & ~(IB_QP_CREATE_INTEGRITY_EN |
- IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
- IB_QP_CREATE_IPOIB_UD_LSO |
- IB_QP_CREATE_NETIF_QP |
- MLX5_IB_QP_CREATE_SQPN_QP1 |
- MLX5_IB_QP_CREATE_WC_TEST))
- return -EINVAL;
-
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
qp->bf.bfreg = &dev->fp_bfreg;
- else if (init_attr->create_flags & MLX5_IB_QP_CREATE_WC_TEST)
+ else if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
qp->bf.bfreg = &dev->wc_bfreg;
else
qp->bf.bfreg = &dev->bfreg;
@@ -1167,10 +1107,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
MLX5_SET(qpc, qpc, fre, 1);
MLX5_SET(qpc, qpc, rlky, 1);
- if (init_attr->create_flags & MLX5_IB_QP_CREATE_SQPN_QP1) {
+ if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
MLX5_SET(qpc, qpc, deth_sqpn, 1);
- qp->flags |= MLX5_IB_QP_SQPN_QP1;
- }
mlx5_fill_page_frag_array(&qp->buf,
(__be64 *)MLX5_ADDR_OF(create_qp_in,
@@ -1198,7 +1136,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
err = -ENOMEM;
goto err_wrid;
}
- qp->create_type = MLX5_QP_KERNEL;
return 0;
@@ -1218,36 +1155,15 @@ err_buf:
return err;
}
-static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
-{
- kvfree(qp->sq.wqe_head);
- kvfree(qp->sq.w_list);
- kvfree(qp->sq.wrid);
- kvfree(qp->sq.wr_data);
- kvfree(qp->rq.wrid);
- mlx5_db_free(dev->mdev, &qp->db);
- mlx5_frag_buf_free(dev->mdev, &qp->buf);
-}
-
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
{
- if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
- (attr->qp_type == MLX5_IB_QPT_DCI) ||
- (attr->qp_type == IB_QPT_XRC_INI))
+ if (attr->srq || (qp->type == IB_QPT_XRC_TGT) ||
+ (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI))
return MLX5_SRQ_RQ;
else if (!qp->has_rq)
return MLX5_ZERO_LEN_RQ;
- else
- return MLX5_NON_ZERO_RQ;
-}
-
-static int is_connected(enum ib_qp_type qp_type)
-{
- if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
- qp_type == MLX5_IB_QPT_DCI)
- return 1;
- return 0;
+ return MLX5_NON_ZERO_RQ;
}
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
@@ -1260,7 +1176,7 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
MLX5_SET(tisc, tisc, transport_domain, tdn);
- if (qp->flags & MLX5_IB_QP_UNDERLAY)
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
return mlx5_core_create_tis(dev->mdev, in, &sq->tisn);
@@ -1409,7 +1325,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
- if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS)
+ if (mqp->flags & IB_QP_CREATE_SCATTER_FCS)
MLX5_SET(rqc, rqc, scatter_fcs, 1);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
@@ -1440,13 +1356,6 @@ static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
}
-static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
-{
- return (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
- MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
- MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
-}
-
static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq,
u32 qp_flags_en,
@@ -1524,6 +1433,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u16 uid = to_mpd(pd)->uid;
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
+ if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt)
+ return -EINVAL;
if (qp->sq.wqe_cnt) {
err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
if (err)
@@ -1547,9 +1458,9 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->rq.wqe_cnt) {
rq->base.container_mibqp = qp;
- if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING)
+ if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING)
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
- if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
if (err)
@@ -1584,14 +1495,8 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
rq->base.mqp.qpn;
- err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
- if (err)
- goto err_destroy_tir;
-
return 0;
-err_destroy_tir:
- destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, pd);
err_destroy_rq:
destroy_raw_packet_qp_rq(dev, rq);
err_destroy_sq:
@@ -1643,14 +1548,27 @@ static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *q
to_mpd(qp->ibqp.pd)->uid);
}
-static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+struct mlx5_create_qp_params {
+ struct ib_udata *udata;
+ size_t inlen;
+ size_t outlen;
+ size_t ucmd_size;
+ void *ucmd;
+ u8 is_rss_raw : 1;
+ struct ib_qp_init_attr *attr;
+ u32 uidx;
+ struct mlx5_ib_create_qp_resp resp;
+};
+
+static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp_rss *ucmd = params->ucmd;
+ struct ib_udata *udata = params->udata;
struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_ib_create_qp_resp resp = {};
int inlen;
int outlen;
int err;
@@ -1660,79 +1578,28 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
void *hfso;
u32 selected_fields = 0;
u32 outer_l4;
- size_t min_resp_len;
u32 tdn = mucontext->tdn;
- struct mlx5_ib_create_qp_rss ucmd = {};
- size_t required_cmd_sz;
u8 lb_flag = 0;
- if (init_attr->qp_type != IB_QPT_RAW_PACKET)
- return -EOPNOTSUPP;
-
- if (init_attr->create_flags || init_attr->send_cq)
- return -EINVAL;
-
- min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
- if (udata->outlen < min_resp_len)
- return -EINVAL;
-
- required_cmd_sz = offsetof(typeof(ucmd), flags) + sizeof(ucmd.flags);
- if (udata->inlen < required_cmd_sz) {
- mlx5_ib_dbg(dev, "invalid inlen\n");
- return -EINVAL;
- }
-
- if (udata->inlen > sizeof(ucmd) &&
- !ib_is_udata_cleared(udata, sizeof(ucmd),
- udata->inlen - sizeof(ucmd))) {
- mlx5_ib_dbg(dev, "inlen is not supported\n");
- return -EOPNOTSUPP;
- }
-
- if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return -EFAULT;
- }
-
- if (ucmd.comp_mask) {
+ if (ucmd->comp_mask) {
mlx5_ib_dbg(dev, "invalid comp mask\n");
return -EOPNOTSUPP;
}
- if (ucmd.flags & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) {
- mlx5_ib_dbg(dev, "invalid flags\n");
- return -EOPNOTSUPP;
- }
-
- if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
- !tunnel_offload_supported(dev->mdev)) {
- mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
- return -EOPNOTSUPP;
- }
-
- if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
- !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
+ !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
return -EOPNOTSUPP;
}
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->is_rep) {
- lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ if (dev->is_rep)
qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
- }
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
- lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
- qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
- }
+ if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
- err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
- if (err) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return -EINVAL;
- }
+ if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
outlen = MLX5_ST_SZ_BYTES(create_tir_out);
@@ -1751,29 +1618,29 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
- if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
+ if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
- if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER)
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
else
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
- switch (ucmd.rx_hash_function) {
+ switch (ucmd->rx_hash_function) {
case MLX5_RX_HASH_FUNC_TOEPLITZ:
{
void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
- if (len != ucmd.rx_key_len) {
+ if (len != ucmd->rx_key_len) {
err = -EINVAL;
goto err;
}
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
- memcpy(rss_key, ucmd.rx_hash_key, len);
+ memcpy(rss_key, ucmd->rx_hash_key, len);
break;
}
default:
@@ -1781,7 +1648,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
goto err;
}
- if (!ucmd.rx_hash_fields_mask) {
+ if (!ucmd->rx_hash_fields_mask) {
/* special case when this TIR serves as steering entry without hashing */
if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
goto create_tir;
@@ -1789,29 +1656,31 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
goto err;
}
- if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
- ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
+ if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
+ ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
err = -EINVAL;
goto err;
}
/* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
- else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
- outer_l4 = ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) << 0 |
- ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) << 1 |
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
+ outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ << 0 |
+ ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ << 1 |
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
/* Check that only one l4 protocol is set */
if (outer_l4 & (outer_l4 - 1)) {
@@ -1820,32 +1689,32 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
/* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
- else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
- if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
- (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
- if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI;
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
@@ -1867,73 +1736,43 @@ create_tir:
goto err;
if (mucontext->devx_uid) {
- resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
- resp.tirn = qp->rss_qp.tirn;
+ params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ params->resp.tirn = qp->rss_qp.tirn;
if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
- resp.tir_icm_addr =
+ params->resp.tir_icm_addr =
MLX5_GET(create_tir_out, out, icm_address_31_0);
- resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
- icm_address_39_32)
- << 32;
- resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
- icm_address_63_40)
- << 40;
- resp.comp_mask |=
+ params->resp.tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_39_32)
+ << 32;
+ params->resp.tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_63_40)
+ << 40;
+ params->resp.comp_mask |=
MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
}
}
- err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
- if (err)
- goto err_copy;
-
kvfree(in);
/* qpn is reserved for that QP */
qp->trans_qp.base.mqp.qpn = 0;
- qp->flags |= MLX5_IB_QP_RSS;
+ qp->is_rss = true;
return 0;
-err_copy:
- mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, mucontext->devx_uid);
err:
kvfree(in);
return err;
}
-static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
- void *qpc)
-{
- int rcqe_sz;
-
- if (init_attr->qp_type == MLX5_IB_QPT_DCI)
- return;
-
- rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
-
- if (init_attr->qp_type == MLX5_IB_QPT_DCT) {
- if (rcqe_sz == 128)
- MLX5_SET(dctc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
-
- return;
- }
-
- MLX5_SET(qpc, qpc, cs_res,
- rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
- MLX5_RES_SCAT_DATA32_CQE);
-}
-
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
struct ib_qp_init_attr *init_attr,
struct mlx5_ib_create_qp *ucmd,
void *qpc)
{
- enum ib_qp_type qpt = init_attr->qp_type;
int scqe_sz;
bool allow_scat_cqe = false;
- if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
- return;
-
if (ucmd)
allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
@@ -1998,269 +1837,182 @@ static int get_atomic_mode(struct mlx5_ib_dev *dev,
return atomic_mode;
}
-static inline bool check_flags_mask(uint64_t input, uint64_t supported)
-{
- return (input & ~supported) == 0;
-}
-
-static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, struct mlx5_ib_qp *qp)
+static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
{
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ struct ib_qp_init_attr *attr = params->attr;
+ u32 uidx = params->uidx;
struct mlx5_ib_resources *devr = &dev->devr;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
struct mlx5_core_dev *mdev = dev->mdev;
- struct mlx5_ib_create_qp_resp resp = {};
- struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_ib_cq *send_cq;
- struct mlx5_ib_cq *recv_cq;
- unsigned long flags;
- u32 uidx = MLX5_IB_DEFAULT_UIDX;
- struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_qp_base *base;
- int mlx5_st;
+ unsigned long flags;
void *qpc;
u32 *in;
int err;
mutex_init(&qp->mutex);
- spin_lock_init(&qp->sq.lock);
- spin_lock_init(&qp->rq.lock);
- mlx5_st = to_mlx5_st(init_attr->qp_type);
- if (mlx5_st < 0)
- return -EINVAL;
+ if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
- if (init_attr->rwq_ind_tbl) {
- if (!udata)
- return -ENOSYS;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
- err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata);
- return err;
- }
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
- if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
- if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
- mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
- return -EINVAL;
- } else {
- qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
- }
- }
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(devr->p0)->pdn);
- if (init_attr->create_flags &
- (IB_QP_CREATE_CROSS_CHANNEL |
- IB_QP_CREATE_MANAGED_SEND |
- IB_QP_CREATE_MANAGED_RECV)) {
- if (!MLX5_CAP_GEN(mdev, cd)) {
- mlx5_ib_dbg(dev, "cross-channel isn't supported\n");
- return -EINVAL;
- }
- if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
- qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
- if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
- qp->flags |= MLX5_IB_QP_MANAGED_SEND;
- if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
- qp->flags |= MLX5_IB_QP_MANAGED_RECV;
- }
-
- if (init_attr->qp_type == IB_QPT_UD &&
- (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO))
- if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
- mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n");
- return -EOPNOTSUPP;
- }
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ MLX5_SET(qpc, qpc, cd_master, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
+ MLX5_SET(qpc, qpc, cd_slave_receive, 1);
- if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs");
- return -EOPNOTSUPP;
- }
- if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
- !MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
- mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS;
- }
+ MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
- qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
- if (init_attr->create_flags & IB_QP_CREATE_CVLAN_STRIPPING) {
- if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
- MLX5_CAP_ETH(dev->mdev, vlan_cap)) ||
- (init_attr->qp_type != IB_QPT_RAW_PACKET))
- return -EOPNOTSUPP;
- qp->flags |= MLX5_IB_QP_CVLAN_STRIPPING;
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
}
- if (udata) {
- if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
- mlx5_ib_dbg(dev, "copy failed\n");
- return -EFAULT;
- }
+ base = &qp->trans_qp.base;
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+ kvfree(in);
+ if (err)
+ return err;
- if (!check_flags_mask(ucmd.flags,
- MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
- MLX5_QP_FLAG_BFREG_INDEX |
- MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
- MLX5_QP_FLAG_SCATTER_CQE |
- MLX5_QP_FLAG_SIGNATURE |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
- MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
- MLX5_QP_FLAG_TUNNEL_OFFLOADS |
- MLX5_QP_FLAG_UAR_PAGE_INDEX |
- MLX5_QP_FLAG_TYPE_DCI |
- MLX5_QP_FLAG_TYPE_DCT))
- return -EINVAL;
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
- err = get_qp_user_index(ucontext, &ucmd, udata->inlen, &uidx);
- if (err)
- return err;
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
- qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
- if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
- qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
- if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
- !tunnel_offload_supported(mdev)) {
- mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
- }
+ qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn;
+ return 0;
+}
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
- }
+static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ struct ib_udata *udata = params->udata;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
- if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
- if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
- }
+ mutex_init(&qp->mutex);
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
- if (ucmd.flags & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE) {
- if (init_attr->qp_type != IB_QPT_RC ||
- !MLX5_CAP_GEN(dev->mdev, qp_packet_based)) {
- mlx5_ib_dbg(dev, "packet based credit mode isn't supported\n");
- return -EOPNOTSUPP;
- }
- qp->flags |= MLX5_IB_QP_PACKET_BASED_CREDIT;
- }
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
- if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
- if (init_attr->qp_type != IB_QPT_UD ||
- (MLX5_CAP_GEN(dev->mdev, port_type) !=
- MLX5_CAP_PORT_TYPE_IB) ||
- !mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) {
- mlx5_ib_dbg(dev, "Source QP option isn't supported\n");
- return -EOPNOTSUPP;
- }
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
- qp->flags |= MLX5_IB_QP_UNDERLAY;
- qp->underlay_qpn = init_attr->source_qpn;
- }
- } else {
- qp->wq_sig = !!wq_signature;
- }
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
+ qp->underlay_qpn = init_attr->source_qpn;
base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) ?
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
qp->has_rq = qp_has_rq(init_attr);
- err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
- qp, udata ? &ucmd : NULL);
+ err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
return err;
}
- if (pd) {
- if (udata) {
- __u32 max_wqes =
- 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
- mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
- if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
- ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
- mlx5_ib_dbg(dev, "invalid rq params\n");
- return -EINVAL;
- }
- if (ucmd.sq_wqe_count > max_wqes) {
- mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
- ucmd.sq_wqe_count, max_wqes);
- return -EINVAL;
- }
- if (init_attr->create_flags &
- MLX5_IB_QP_CREATE_SQPN_QP1) {
- mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
- return -EINVAL;
- }
- err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
- &resp, &inlen, base);
- if (err)
- mlx5_ib_dbg(dev, "err %d\n", err);
- } else {
- err = create_kernel_qp(dev, init_attr, qp, &in, &inlen,
- base);
- if (err)
- mlx5_ib_dbg(dev, "err %d\n", err);
- }
+ if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
+ ucmd->rq_wqe_count != qp->rq.wqe_cnt)
+ return -EINVAL;
- if (err)
- return err;
- } else {
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
+ return -EINVAL;
- qp->create_type = MLX5_QP_EMPTY;
- }
+ err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
+ &inlen, base, ucmd);
+ if (err)
+ return err;
if (is_sqp(init_attr->qp_type))
qp->port = init_attr->port_num;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, mlx5_st);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
- if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
- MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
- else
- MLX5_SET(qpc, qpc, latency_sensitive, 1);
-
-
- if (qp->wq_sig)
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
MLX5_SET(qpc, qpc, wq_signature, 1);
- if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
MLX5_SET(qpc, qpc, block_lb_mc, 1);
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
MLX5_SET(qpc, qpc, cd_master, 1);
- if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
MLX5_SET(qpc, qpc, cd_slave_send, 1);
- if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
+ if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
MLX5_SET(qpc, qpc, cd_slave_receive, 1);
- if (qp->flags & MLX5_IB_QP_PACKET_BASED_CREDIT)
+ if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)
MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1);
- if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
- configure_responder_scat_cqe(init_attr, qpc);
- configure_requester_scat_cqe(dev, init_attr,
- udata ? &ucmd : NULL,
- qpc);
+ if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
+ (init_attr->qp_type == IB_QPT_RC ||
+ init_attr->qp_type == IB_QPT_UC)) {
+ int rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
+
+ MLX5_SET(qpc, qpc, cs_res,
+ rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
+ MLX5_RES_SCAT_DATA32_CQE);
}
+ if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
+ (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
+ configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
if (qp->rq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
@@ -2281,12 +2033,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
/* Set default resources */
switch (init_attr->qp_type) {
- case IB_QPT_XRC_TGT:
- MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
- MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
- MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
- MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
- break;
case IB_QPT_XRC_INI:
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
@@ -2314,52 +2060,163 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
MLX5_SET(qpc, qpc, user_index, uidx);
- /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
- if (init_attr->qp_type == IB_QPT_UD &&
- (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
- MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
- qp->flags |= MLX5_IB_QP_LSO;
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING &&
+ init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
}
- if (init_attr->create_flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
- if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
- mlx5_ib_dbg(dev, "scatter end padding is not supported\n");
- err = -EOPNOTSUPP;
- goto err;
- } else if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
- MLX5_SET(qpc, qpc, end_padding_mode,
- MLX5_WQ_END_PAD_MODE_ALIGN);
- } else {
- qp->flags |= MLX5_IB_QP_PCI_WRITE_END_PADDING;
- }
+ if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
+ raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
+ err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
+ &params->resp);
+ } else
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+
+ kvfree(in);
+ if (err)
+ goto err_create;
+
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
+
+ get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ return 0;
+
+err_create:
+ destroy_qp(dev, qp, base, udata);
+ return err;
+}
+
+static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ mutex_init(&qp->mutex);
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
+ if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ base = &qp->trans_qp.base;
+
+ qp->has_rq = qp_has_rq(attr);
+ err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
}
- if (inlen < 0) {
- err = -EINVAL;
- goto err;
+ err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base);
+ if (err)
+ return err;
+
+ if (is_sqp(attr->qp_type))
+ qp->port = attr->port_num;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, mlx5_st);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+
+ if (attr->qp_type != MLX5_IB_QPT_REG_UMR)
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
+ else
+ MLX5_SET(qpc, qpc, latency_sensitive, 1);
+
+
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+
+ if (qp->rq.wqe_cnt) {
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
}
- if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
- qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
- raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
- err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
- &resp);
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr));
+
+ if (qp->sq.wqe_cnt)
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
+ else
+ MLX5_SET(qpc, qpc, no_sq, 1);
+
+ if (attr->srq) {
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(attr->srq)->msrq.srqn);
} else {
- err = mlx5_core_create_qp(dev, &base->mqp, in, inlen);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(devr->s1)->msrq.srqn);
}
- if (err) {
- mlx5_ib_dbg(dev, "create qp failed\n");
- goto err_create;
- }
+ if (attr->send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn);
+
+ if (attr->recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn);
+
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO)
+ MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
kvfree(in);
+ if (err)
+ goto err_create;
base->container_mibqp = qp;
base->mqp.event = mlx5_ib_qp_event;
- get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
+ get_cqs(qp->type, attr->send_cq, attr->recv_cq,
&send_cq, &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq);
@@ -2379,13 +2236,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return 0;
err_create:
- if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(dev, pd, qp, base, udata);
- else if (qp->create_type == MLX5_QP_KERNEL)
- destroy_qp_kernel(dev, qp);
-
-err:
- kvfree(in);
+ destroy_qp(dev, qp, base, NULL);
return err;
}
@@ -2447,11 +2298,6 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re
}
}
-static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
-{
- return to_mpd(qp->ibqp.pd);
-}
-
static void get_cqs(enum ib_qp_type qp_type,
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
@@ -2472,14 +2318,10 @@ static void get_cqs(enum ib_qp_type qp_type,
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
- case IB_QPT_RAW_IPV6:
- case IB_QPT_RAW_ETHERTYPE:
case IB_QPT_RAW_PACKET:
*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
*recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
break;
-
- case IB_QPT_MAX:
default:
*send_cq = NULL;
*recv_cq = NULL;
@@ -2505,15 +2347,15 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) ?
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
- !(qp->flags & MLX5_IB_QP_UNDERLAY)) {
+ !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
- NULL, &base->mqp);
+ NULL, &base->mqp, NULL);
} else {
struct mlx5_modify_raw_qp_param raw_qp_param = {
.operation = MLX5_CMD_OP_2RST_QP
@@ -2539,7 +2381,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (recv_cq)
list_del(&qp->cq_recv_list);
- if (qp->create_type == MLX5_QP_KERNEL) {
+ if (!udata) {
__mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
@@ -2550,7 +2392,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
destroy_raw_packet_qp(dev, qp);
} else {
err = mlx5_core_destroy_qp(dev, &base->mqp);
@@ -2559,254 +2401,454 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
base->mqp.qpn);
}
- if (qp->create_type == MLX5_QP_KERNEL)
- destroy_qp_kernel(dev, qp);
- else if (qp->create_type == MLX5_QP_USER)
- destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base, udata);
+ destroy_qp(dev, qp, base, udata);
}
-static const char *ib_qp_type_str(enum ib_qp_type type)
+static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
{
- switch (type) {
- case IB_QPT_SMI:
- return "IB_QPT_SMI";
- case IB_QPT_GSI:
- return "IB_QPT_GSI";
+ struct ib_qp_init_attr *attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 uidx = params->uidx;
+ void *dctc;
+
+ qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
+ if (!qp->dct.in)
+ return -ENOMEM;
+
+ MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
+ MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
+ MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
+ MLX5_SET(dctc, dctc, user_index, uidx);
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
+
+ if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) {
+ int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq);
+
+ if (rcqe_sz == 128)
+ MLX5_SET(dctc, dctc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+ }
+
+ qp->state = IB_QPS_RESET;
+
+ return 0;
+}
+
+static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
+ enum ib_qp_type *type)
+{
+ if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct))
+ goto out;
+
+ switch (attr->qp_type) {
+ case IB_QPT_XRC_TGT:
+ case IB_QPT_XRC_INI:
+ if (!MLX5_CAP_GEN(dev->mdev, xrc))
+ goto out;
+ fallthrough;
case IB_QPT_RC:
- return "IB_QPT_RC";
case IB_QPT_UC:
- return "IB_QPT_UC";
- case IB_QPT_UD:
- return "IB_QPT_UD";
- case IB_QPT_RAW_IPV6:
- return "IB_QPT_RAW_IPV6";
- case IB_QPT_RAW_ETHERTYPE:
- return "IB_QPT_RAW_ETHERTYPE";
- case IB_QPT_XRC_INI:
- return "IB_QPT_XRC_INI";
- case IB_QPT_XRC_TGT:
- return "IB_QPT_XRC_TGT";
+ case IB_QPT_SMI:
+ case MLX5_IB_QPT_HW_GSI:
+ case IB_QPT_DRIVER:
+ case IB_QPT_GSI:
+ if (dev->profile == &raw_eth_profile)
+ goto out;
case IB_QPT_RAW_PACKET:
- return "IB_QPT_RAW_PACKET";
+ case IB_QPT_UD:
case MLX5_IB_QPT_REG_UMR:
- return "MLX5_IB_QPT_REG_UMR";
- case IB_QPT_DRIVER:
- return "IB_QPT_DRIVER";
- case IB_QPT_MAX:
+ break;
default:
- return "Invalid QP type";
+ goto out;
}
+
+ *type = attr->qp_type;
+ return 0;
+
+out:
+ mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type);
+ return -EOPNOTSUPP;
}
-static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
- struct ib_qp_init_attr *attr,
- struct mlx5_ib_create_qp *ucmd,
- struct ib_udata *udata)
+static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
{
struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- struct mlx5_ib_qp *qp;
- int err = 0;
- u32 uidx = MLX5_IB_DEFAULT_UIDX;
- void *dctc;
- if (!attr->srq || !attr->recv_cq)
- return ERR_PTR(-EINVAL);
+ if (!udata) {
+ /* Kernel create_qp callers */
+ if (attr->rwq_ind_tbl)
+ return -EOPNOTSUPP;
- err = get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &uidx);
- if (err)
- return ERR_PTR(err);
+ switch (attr->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ case IB_QPT_DRIVER:
+ return -EOPNOTSUPP;
+ default:
+ return 0;
+ }
+ }
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ /* Userspace create_qp callers */
+ if (attr->qp_type == IB_QPT_RAW_PACKET && !ucontext->cqe_version) {
+ mlx5_ib_dbg(dev,
+ "Raw Packet QP is only supported for CQE version > 0\n");
+ return -EINVAL;
+ }
- qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
- if (!qp->dct.in) {
- err = -ENOMEM;
- goto err_free;
+ if (attr->qp_type != IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) {
+ mlx5_ib_dbg(dev,
+ "Wrong QP type %d for the RWQ indirect table\n",
+ attr->qp_type);
+ return -EINVAL;
}
- MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
- dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
- qp->qp_sub_type = MLX5_IB_QPT_DCT;
- MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
- MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
- MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
- MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
- MLX5_SET(dctc, dctc, user_index, uidx);
+ switch (attr->qp_type) {
+ case IB_QPT_SMI:
+ case MLX5_IB_QPT_HW_GSI:
+ case MLX5_IB_QPT_REG_UMR:
+ case IB_QPT_GSI:
+ mlx5_ib_dbg(dev, "Kernel doesn't support QP type %d\n",
+ attr->qp_type);
+ return -EINVAL;
+ default:
+ break;
+ }
- if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
- configure_responder_scat_cqe(attr, dctc);
+ /*
+ * We don't need to see this warning, it means that kernel code
+ * missing ib_pd. Placed here to catch developer's mistakes.
+ */
+ WARN_ONCE(!pd && attr->qp_type != IB_QPT_XRC_TGT,
+ "There is a missing PD pointer assignment\n");
+ return 0;
+}
- qp->state = IB_QPS_RESET;
+static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+ bool cond, struct mlx5_ib_qp *qp)
+{
+ if (!(*flags & flag))
+ return;
- return &qp->ibqp;
-err_free:
- kfree(qp);
- return ERR_PTR(err);
+ if (cond) {
+ qp->flags_en |= flag;
+ *flags &= ~flag;
+ return;
+ }
+
+ if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
+ /*
+ * We don't return error if this flag was provided,
+ * and mlx5 doesn't have right capability.
+ */
+ *flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
+ return;
+ }
+ mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
}
-static int set_mlx_qp_type(struct mlx5_ib_dev *dev,
- struct ib_qp_init_attr *init_attr,
- struct mlx5_ib_create_qp *ucmd,
- struct ib_udata *udata)
+static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ void *ucmd, struct ib_qp_init_attr *attr)
{
- enum { MLX_QP_FLAGS = MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI };
- int err;
+ struct mlx5_core_dev *mdev = dev->mdev;
+ bool cond;
+ int flags;
- if (!udata)
+ if (attr->rwq_ind_tbl)
+ flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags;
+ else
+ flags = ((struct mlx5_ib_create_qp *)ucmd)->flags;
+
+ switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) {
+ case MLX5_QP_FLAG_TYPE_DCI:
+ qp->type = MLX5_IB_QPT_DCI;
+ break;
+ case MLX5_QP_FLAG_TYPE_DCT:
+ qp->type = MLX5_IB_QPT_DCT;
+ break;
+ default:
+ if (qp->type != IB_QPT_DRIVER)
+ break;
+ /*
+ * It is IB_QPT_DRIVER and or no subtype or
+ * wrong subtype were provided.
+ */
return -EINVAL;
+ }
- if (udata->inlen < sizeof(*ucmd)) {
- mlx5_ib_dbg(dev, "create_qp user command is smaller than expected\n");
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp);
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
+ MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+
+ if (qp->type == IB_QPT_RAW_PACKET) {
+ cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_gre) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS,
+ cond, qp);
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true,
+ qp);
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true,
+ qp);
+ }
+
+ if (qp->type == IB_QPT_RC)
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE,
+ MLX5_CAP_GEN(mdev, qp_packet_based), qp);
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp);
+
+ cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC);
+ if (attr->rwq_ind_tbl && cond) {
+ mlx5_ib_dbg(dev, "RSS RAW QP has unsupported flags 0x%X\n",
+ cond);
return -EINVAL;
}
- err = ib_copy_from_udata(ucmd, udata, sizeof(*ucmd));
- if (err)
- return err;
- if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCI) {
- init_attr->qp_type = MLX5_IB_QPT_DCI;
- } else {
- if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCT) {
- init_attr->qp_type = MLX5_IB_QPT_DCT;
- } else {
- mlx5_ib_dbg(dev, "Invalid QP flags\n");
- return -EINVAL;
- }
+ if (flags)
+ mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags);
+
+ return (flags) ? -EINVAL : 0;
}
- if (!MLX5_CAP_GEN(dev->mdev, dct)) {
- mlx5_ib_dbg(dev, "DC transport is not supported\n");
- return -EOPNOTSUPP;
+static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+ bool cond, struct mlx5_ib_qp *qp)
+{
+ if (!(*flags & flag))
+ return;
+
+ if (cond) {
+ qp->flags |= flag;
+ *flags &= ~flag;
+ return;
}
- return 0;
+ if (flag == MLX5_IB_QP_CREATE_WC_TEST) {
+ /*
+ * Special case, if condition didn't meet, it won't be error,
+ * just different in-kernel flow.
+ */
+ *flags &= ~MLX5_IB_QP_CREATE_WC_TEST;
+ return;
+ }
+ mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag);
}
-struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *verbs_init_attr,
- struct ib_udata *udata)
+static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *attr)
{
- struct mlx5_ib_dev *dev;
- struct mlx5_ib_qp *qp;
- u16 xrcdn = 0;
- int err;
- struct ib_qp_init_attr mlx_init_attr;
- struct ib_qp_init_attr *init_attr = verbs_init_attr;
- struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
- udata, struct mlx5_ib_ucontext, ibucontext);
+ enum ib_qp_type qp_type = qp->type;
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int create_flags = attr->create_flags;
+ bool cond;
- if (pd) {
- dev = to_mdev(pd->device);
+ if (qp->type == IB_QPT_UD && dev->profile == &raw_eth_profile)
+ if (create_flags & ~MLX5_IB_QP_CREATE_WC_TEST)
+ return -EINVAL;
- if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
- if (!ucontext) {
- mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
- return ERR_PTR(-EINVAL);
- } else if (!ucontext->cqe_version) {
- mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n");
- return ERR_PTR(-EINVAL);
- }
- }
- } else {
- /* being cautious here */
- if (init_attr->qp_type != IB_QPT_XRC_TGT &&
- init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
- pr_warn("%s: no PD for transport %s\n", __func__,
- ib_qp_type_str(init_attr->qp_type));
- return ERR_PTR(-EINVAL);
- }
- dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return (create_flags) ? -EINVAL : 0;
+
+ if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
+ return (create_flags) ? -EINVAL : 0;
+
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
+ MLX5_CAP_GEN(mdev, block_lb_mc), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL,
+ MLX5_CAP_GEN(mdev, cd), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_SEND,
+ MLX5_CAP_GEN(mdev, cd), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_RECV,
+ MLX5_CAP_GEN(mdev, cd), qp);
+
+ if (qp_type == IB_QPT_UD) {
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_IPOIB_UD_LSO,
+ MLX5_CAP_GEN(mdev, ipoib_basic_offloads),
+ qp);
+ cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB;
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_SOURCE_QPN,
+ cond, qp);
+ }
+
+ if (qp_type == IB_QPT_RAW_PACKET) {
+ cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(mdev, scatter_fcs);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_SCATTER_FCS, cond, qp);
+
+ cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(mdev, vlan_cap);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_CVLAN_STRIPPING, cond, qp);
+ }
+
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_PCI_WRITE_END_PADDING,
+ MLX5_CAP_GEN(mdev, end_pad), qp);
+
+ process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_WC_TEST,
+ qp_type != MLX5_IB_QPT_REG_UMR, qp);
+ process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1,
+ true, qp);
+
+ if (create_flags)
+ mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n",
+ create_flags);
+
+ return (create_flags) ? -EINVAL : 0;
+}
+
+static int process_udata_size(struct mlx5_ib_dev *dev,
+ struct mlx5_create_qp_params *params)
+{
+ size_t ucmd = sizeof(struct mlx5_ib_create_qp);
+ struct ib_udata *udata = params->udata;
+ size_t outlen = udata->outlen;
+ size_t inlen = udata->inlen;
+
+ params->outlen = min(outlen, sizeof(struct mlx5_ib_create_qp_resp));
+ params->ucmd_size = ucmd;
+ if (!params->is_rss_raw) {
+ /* User has old rdma-core, which doesn't support ECE */
+ size_t min_inlen =
+ offsetof(struct mlx5_ib_create_qp, ece_options);
+
+ /*
+ * We will check in check_ucmd_data() that user
+ * cleared everything after inlen.
+ */
+ params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd);
+ goto out;
}
- if (init_attr->qp_type == IB_QPT_DRIVER) {
- struct mlx5_ib_create_qp ucmd;
+ /* RSS RAW QP */
+ if (inlen < offsetofend(struct mlx5_ib_create_qp_rss, flags))
+ return -EINVAL;
- init_attr = &mlx_init_attr;
- memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr));
- err = set_mlx_qp_type(dev, init_attr, &ucmd, udata);
- if (err)
- return ERR_PTR(err);
+ if (outlen < offsetofend(struct mlx5_ib_create_qp_resp, bfreg_index))
+ return -EINVAL;
- if (init_attr->qp_type == MLX5_IB_QPT_DCI) {
- if (init_attr->cap.max_recv_wr ||
- init_attr->cap.max_recv_sge) {
- mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n");
- return ERR_PTR(-EINVAL);
- }
- } else {
- return mlx5_ib_create_dct(pd, init_attr, &ucmd, udata);
- }
+ ucmd = sizeof(struct mlx5_ib_create_qp_rss);
+ params->ucmd_size = ucmd;
+ if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd))
+ return -EINVAL;
+
+ params->inlen = min(ucmd, inlen);
+out:
+ if (!params->inlen)
+ mlx5_ib_dbg(dev, "udata is too small\n");
+
+ return (params->inlen) ? 0 : -EINVAL;
+}
+
+static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ int err;
+
+ if (params->is_rss_raw) {
+ err = create_rss_raw_qp_tir(dev, pd, qp, params);
+ goto out;
}
- switch (init_attr->qp_type) {
- case IB_QPT_XRC_TGT:
- case IB_QPT_XRC_INI:
- if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
- mlx5_ib_dbg(dev, "XRC not supported\n");
- return ERR_PTR(-ENOSYS);
- }
- init_attr->recv_cq = NULL;
- if (init_attr->qp_type == IB_QPT_XRC_TGT) {
- xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
- init_attr->send_cq = NULL;
- }
+ if (qp->type == MLX5_IB_QPT_DCT) {
+ err = create_dct(dev, pd, qp, params);
+ goto out;
+ }
- /* fall through */
- case IB_QPT_RAW_PACKET:
- case IB_QPT_RC:
- case IB_QPT_UC:
- case IB_QPT_UD:
- case IB_QPT_SMI:
- case MLX5_IB_QPT_HW_GSI:
- case MLX5_IB_QPT_REG_UMR:
- case MLX5_IB_QPT_DCI:
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp)
- return ERR_PTR(-ENOMEM);
+ if (qp->type == IB_QPT_XRC_TGT) {
+ err = create_xrc_tgt_qp(dev, qp, params);
+ goto out;
+ }
- err = create_qp_common(dev, pd, init_attr, udata, qp);
- if (err) {
- mlx5_ib_dbg(dev, "create_qp_common failed\n");
- kfree(qp);
- return ERR_PTR(err);
- }
+ if (params->udata)
+ err = create_user_qp(dev, pd, qp, params);
+ else
+ err = create_kernel_qp(dev, pd, qp, params);
- if (is_qp0(init_attr->qp_type))
- qp->ibqp.qp_num = 0;
- else if (is_qp1(init_attr->qp_type))
- qp->ibqp.qp_num = 1;
- else
- qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
+out:
+ if (err) {
+ mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type);
+ return err;
+ }
- mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
- qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
- init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
- init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
+ if (is_qp0(qp->type))
+ qp->ibqp.qp_num = 0;
+ else if (is_qp1(qp->type))
+ qp->ibqp.qp_num = 1;
+ else
+ qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
- qp->trans_qp.xrcdn = xrcdn;
+ mlx5_ib_dbg(dev,
+ "QP type %d, ib qpn 0x%X, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x, ece 0x%x\n",
+ qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
+ params->attr->recv_cq ? to_mcq(params->attr->recv_cq)->mcq.cqn :
+ -1,
+ params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn :
+ -1,
+ params->resp.ece_options);
- break;
+ return 0;
+}
- case IB_QPT_GSI:
- return mlx5_ib_gsi_create_qp(pd, init_attr);
+static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *attr)
+{
+ int ret = 0;
- case IB_QPT_RAW_IPV6:
- case IB_QPT_RAW_ETHERTYPE:
- case IB_QPT_MAX:
+ switch (qp->type) {
+ case MLX5_IB_QPT_DCT:
+ ret = (!attr->srq || !attr->recv_cq) ? -EINVAL : 0;
+ break;
+ case MLX5_IB_QPT_DCI:
+ ret = (attr->cap.max_recv_wr || attr->cap.max_recv_sge) ?
+ -EINVAL :
+ 0;
+ break;
+ case IB_QPT_RAW_PACKET:
+ ret = (attr->rwq_ind_tbl && attr->send_cq) ? -EINVAL : 0;
+ break;
default:
- mlx5_ib_dbg(dev, "unsupported qp type %d\n",
- init_attr->qp_type);
- /* Don't support raw QPs */
- return ERR_PTR(-EOPNOTSUPP);
+ break;
}
- if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
- qp->qp_sub_type = init_attr->qp_type;
+ if (ret)
+ mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type);
- return &qp->ibqp;
+ return ret;
+}
+
+static int get_qp_uidx(struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ struct ib_udata *udata = params->udata;
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ if (params->is_rss_raw)
+ return 0;
+
+ return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &params->uidx);
}
static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
@@ -2828,6 +2870,150 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
return 0;
}
+static int check_ucmd_data(struct mlx5_ib_dev *dev,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ struct ib_udata *udata = params->udata;
+ size_t size, last;
+ int ret;
+
+ if (params->is_rss_raw)
+ /*
+ * These QPs don't have "reserved" field in their
+ * create_qp input struct, so their data is always valid.
+ */
+ last = sizeof(struct mlx5_ib_create_qp_rss);
+ else
+ /* IB_QPT_RAW_PACKET doesn't have ECE data */
+ switch (attr->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ last = offsetof(struct mlx5_ib_create_qp, ece_options);
+ break;
+ default:
+ last = offsetof(struct mlx5_ib_create_qp, reserved);
+ }
+
+ if (udata->inlen <= last)
+ return 0;
+
+ /*
+ * User provides different create_qp structures based on the
+ * flow and we need to know if he cleared memory after our
+ * struct create_qp ends.
+ */
+ size = udata->inlen - last;
+ ret = ib_is_udata_cleared(params->udata, last, size);
+ if (!ret)
+ mlx5_ib_dbg(
+ dev,
+ "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n",
+ udata->inlen, params->ucmd_size, last, size);
+ return ret ? 0 : -EINVAL;
+}
+
+struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_create_qp_params params = {};
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_qp *qp;
+ enum ib_qp_type type;
+ int err;
+
+ dev = pd ? to_mdev(pd->device) :
+ to_mdev(to_mxrcd(attr->xrcd)->ibxrcd.device);
+
+ err = check_qp_type(dev, attr, &type);
+ if (err)
+ return ERR_PTR(err);
+
+ err = check_valid_flow(dev, pd, attr, udata);
+ if (err)
+ return ERR_PTR(err);
+
+ if (attr->qp_type == IB_QPT_GSI)
+ return mlx5_ib_gsi_create_qp(pd, attr);
+
+ params.udata = udata;
+ params.uidx = MLX5_IB_DEFAULT_UIDX;
+ params.attr = attr;
+ params.is_rss_raw = !!attr->rwq_ind_tbl;
+
+ if (udata) {
+ err = process_udata_size(dev, &params);
+ if (err)
+ return ERR_PTR(err);
+
+ err = check_ucmd_data(dev, &params);
+ if (err)
+ return ERR_PTR(err);
+
+ params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
+ if (!params.ucmd)
+ return ERR_PTR(-ENOMEM);
+
+ err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
+ if (err)
+ goto free_ucmd;
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ err = -ENOMEM;
+ goto free_ucmd;
+ }
+
+ qp->type = type;
+ if (udata) {
+ err = process_vendor_flags(dev, qp, params.ucmd, attr);
+ if (err)
+ goto free_qp;
+
+ err = get_qp_uidx(qp, &params);
+ if (err)
+ goto free_qp;
+ }
+ err = process_create_flags(dev, qp, attr);
+ if (err)
+ goto free_qp;
+
+ err = check_qp_attr(dev, qp, attr);
+ if (err)
+ goto free_qp;
+
+ err = create_qp(dev, pd, qp, &params);
+ if (err)
+ goto free_qp;
+
+ kfree(params.ucmd);
+ params.ucmd = NULL;
+
+ if (udata)
+ /*
+ * It is safe to copy response for all user create QP flows,
+ * including MLX5_IB_QPT_DCT, which doesn't need it.
+ * In that case, resp will be filled with zeros.
+ */
+ err = ib_copy_to_udata(udata, &params.resp, params.outlen);
+ if (err)
+ goto destroy_qp;
+
+ return &qp->ibqp;
+
+destroy_qp:
+ if (qp->type == MLX5_IB_QPT_DCT)
+ mlx5_ib_destroy_dct(qp);
+ else
+ destroy_qp_common(dev, qp, udata);
+ qp = NULL;
+free_qp:
+ kfree(qp);
+free_ucmd:
+ kfree(params.ucmd);
+ return ERR_PTR(err);
+}
+
int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
@@ -2836,7 +3022,7 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
if (unlikely(qp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_destroy_qp(qp);
- if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
+ if (mqp->type == MLX5_IB_QPT_DCT)
return mlx5_ib_destroy_dct(mqp);
destroy_qp_common(dev, mqp, udata);
@@ -2846,14 +3032,13 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
return 0;
}
-static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
- const struct ib_qp_attr *attr,
- int attr_mask, __be32 *hw_access_flags_be)
+static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ void *qpc)
{
- u8 dest_rd_atomic;
- u32 access_flags, hw_access_flags = 0;
-
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+ u8 dest_rd_atomic;
+ u32 access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
dest_rd_atomic = attr->max_dest_rd_atomic;
@@ -2868,8 +3053,8 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
if (!dest_rd_atomic)
access_flags &= IB_ACCESS_REMOTE_WRITE;
- if (access_flags & IB_ACCESS_REMOTE_READ)
- hw_access_flags |= MLX5_QP_BIT_RRE;
+ MLX5_SET(qpc, qpc, rre, !!(access_flags & IB_ACCESS_REMOTE_READ));
+
if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
int atomic_mode;
@@ -2877,15 +3062,11 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
if (atomic_mode < 0)
return -EOPNOTSUPP;
- hw_access_flags |= MLX5_QP_BIT_RAE;
- hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
+ MLX5_SET(qpc, qpc, rae, 1);
+ MLX5_SET(qpc, qpc, atomic_mode, atomic_mode);
}
- if (access_flags & IB_ACCESS_REMOTE_WRITE)
- hw_access_flags |= MLX5_QP_BIT_RWE;
-
- *hw_access_flags_be = cpu_to_be32(hw_access_flags);
-
+ MLX5_SET(qpc, qpc, rwe, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
return 0;
}
@@ -2965,11 +3146,22 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
return err;
}
+static void mlx5_set_path_udp_sport(void *path, const struct rdma_ah_attr *ah,
+ u32 lqpn, u32 rqpn)
+
+{
+ u32 fl = ah->grh.flow_label;
+
+ if (!fl)
+ fl = rdma_calc_flow_label(lqpn, rqpn);
+
+ MLX5_SET(ads, path, udp_sport, rdma_flow_label_to_udp_sport(fl));
+}
+
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
- const struct rdma_ah_attr *ah,
- struct mlx5_qp_path *path, u8 port, int attr_mask,
- u32 path_flags, const struct ib_qp_attr *attr,
- bool alt)
+ const struct rdma_ah_attr *ah, void *path, u8 port,
+ int attr_mask, u32 path_flags,
+ const struct ib_qp_attr *attr, bool alt)
{
const struct ib_global_route *grh = rdma_ah_read_grh(ah);
int err;
@@ -2978,8 +3170,8 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u8 sl = rdma_ah_get_sl(ah);
if (attr_mask & IB_QP_PKEY_INDEX)
- path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
- attr->pkey_index);
+ MLX5_SET(ads, path, pkey_index,
+ alt ? attr->alt_pkey_index : attr->pkey_index);
if (ah_flags & IB_AH_GRH) {
if (grh->sgid_index >=
@@ -2995,45 +3187,49 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!(ah_flags & IB_AH_GRH))
return -EINVAL;
- memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
- if (qp->ibqp.qp_type == IB_QPT_RC ||
- qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_XRC_INI ||
- qp->ibqp.qp_type == IB_QPT_XRC_TGT)
- path->udp_sport =
- mlx5_get_roce_udp_sport(dev, ah->grh.sgid_attr);
- path->dci_cfi_prio_sl = (sl & 0x7) << 4;
+ ether_addr_copy(MLX5_ADDR_OF(ads, path, rmac_47_32),
+ ah->roce.dmac);
+ if ((qp->ibqp.qp_type == IB_QPT_RC ||
+ qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
+ (grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
+ (attr_mask & IB_QP_DEST_QPN))
+ mlx5_set_path_udp_sport(path, ah,
+ qp->ibqp.qp_num,
+ attr->dest_qp_num);
+ MLX5_SET(ads, path, eth_prio, sl & 0x7);
gid_type = ah->grh.sgid_attr->gid_type;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
- path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
+ MLX5_SET(ads, path, dscp, grh->traffic_class >> 2);
} else {
- path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
- path->fl_free_ar |=
- (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
- path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
- path->grh_mlid = rdma_ah_get_path_bits(ah) & 0x7f;
- if (ah_flags & IB_AH_GRH)
- path->grh_mlid |= 1 << 7;
- path->dci_cfi_prio_sl = sl & 0xf;
+ MLX5_SET(ads, path, fl, !!(path_flags & MLX5_PATH_FLAG_FL));
+ MLX5_SET(ads, path, free_ar,
+ !!(path_flags & MLX5_PATH_FLAG_FREE_AR));
+ MLX5_SET(ads, path, rlid, rdma_ah_get_dlid(ah));
+ MLX5_SET(ads, path, mlid, rdma_ah_get_path_bits(ah));
+ MLX5_SET(ads, path, grh, !!(ah_flags & IB_AH_GRH));
+ MLX5_SET(ads, path, sl, sl);
}
if (ah_flags & IB_AH_GRH) {
- path->mgid_index = grh->sgid_index;
- path->hop_limit = grh->hop_limit;
- path->tclass_flowlabel =
- cpu_to_be32((grh->traffic_class << 20) |
- (grh->flow_label));
- memcpy(path->rgid, grh->dgid.raw, 16);
+ MLX5_SET(ads, path, src_addr_index, grh->sgid_index);
+ MLX5_SET(ads, path, hop_limit, grh->hop_limit);
+ MLX5_SET(ads, path, tclass, grh->traffic_class);
+ MLX5_SET(ads, path, flow_label, grh->flow_label);
+ memcpy(MLX5_ADDR_OF(ads, path, rgid_rip), grh->dgid.raw,
+ sizeof(grh->dgid.raw));
}
err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
if (err < 0)
return err;
- path->static_rate = err;
- path->port = port;
+ MLX5_SET(ads, path, stat_rate, err);
+ MLX5_SET(ads, path, vhca_port_num, port);
if (attr_mask & IB_QP_TIMEOUT)
- path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
+ MLX5_SET(ads, path, ack_timeout,
+ alt ? attr->alt_timeout : attr->timeout);
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
@@ -3050,10 +3246,12 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
- MLX5_QP_OPTPAR_PRI_PORT,
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
- MLX5_QP_OPTPAR_PRI_PORT,
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_PRI_PORT,
@@ -3061,17 +3259,20 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
- MLX5_QP_OPTPAR_PRI_PORT,
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
},
[MLX5_QP_STATE_RTR] = {
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
@@ -3080,7 +3281,8 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
- MLX5_QP_OPTPAR_PKEY_INDEX,
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
},
},
[MLX5_QP_STATE_RTR] = {
@@ -3414,43 +3616,80 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return 0;
}
-static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
- struct mlx5_ib_pd *pd,
- struct mlx5_ib_qp_base *qp_base,
- u8 port_num, struct ib_udata *udata)
+static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
+ struct ib_udata *udata)
{
struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
- unsigned int tx_port_affinity;
+ u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ atomic_t *tx_port_affinity;
+
+ if (ucontext)
+ tx_port_affinity = &ucontext->tx_port_affinity;
+ else
+ tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
- if (ucontext) {
- tx_port_affinity = (unsigned int)atomic_add_return(
- 1, &ucontext->tx_port_affinity) %
- MLX5_MAX_PORTS +
- 1;
+ return (unsigned int)atomic_add_return(1, tx_port_affinity) %
+ MLX5_MAX_PORTS + 1;
+}
+
+static bool qp_supports_affinity(struct ib_qp *qp)
+{
+ if ((qp->qp_type == IB_QPT_RC) ||
+ (qp->qp_type == IB_QPT_UD) ||
+ (qp->qp_type == IB_QPT_UC) ||
+ (qp->qp_type == IB_QPT_RAW_PACKET) ||
+ (qp->qp_type == IB_QPT_XRC_INI) ||
+ (qp->qp_type == IB_QPT_XRC_TGT))
+ return true;
+ return false;
+}
+
+static unsigned int get_tx_affinity(struct ib_qp *qp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, u8 init,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_qp_base *qp_base;
+ unsigned int tx_affinity;
+
+ if (!(mlx5_ib_lag_should_assign_affinity(dev) &&
+ qp_supports_affinity(qp)))
+ return 0;
+
+ if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ tx_affinity = mqp->gsi_lag_port;
+ else if (init)
+ tx_affinity = get_tx_affinity_rr(dev, udata);
+ else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
+ tx_affinity =
+ mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave);
+ else
+ return 0;
+
+ qp_base = &mqp->trans_qp.base;
+ if (ucontext)
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
- tx_port_affinity, qp_base->mqp.qpn, ucontext);
- } else {
- tx_port_affinity =
- (unsigned int)atomic_add_return(
- 1, &dev->port[port_num].roce.tx_port_affinity) %
- MLX5_MAX_PORTS +
- 1;
+ tx_affinity, qp_base->mqp.qpn, ucontext);
+ else
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
- tx_port_affinity, qp_base->mqp.qpn);
- }
-
- return tx_port_affinity;
+ tx_affinity, qp_base->mqp.qpn);
+ return tx_affinity;
}
static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
struct rdma_counter *counter)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ u32 in[MLX5_ST_SZ_DW(rts2rts_qp_in)] = {};
struct mlx5_ib_qp *mqp = to_mqp(qp);
- struct mlx5_qp_context context = {};
struct mlx5_ib_qp_base *base;
u32 set_id;
+ u32 *qpc;
if (counter)
set_id = counter->id;
@@ -3458,11 +3697,15 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
base = &mqp->trans_qp.base;
- context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
- context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24);
- return mlx5_core_qp_modify(dev, MLX5_CMD_OP_RTS2RTS_QP,
- MLX5_QP_OPTPAR_COUNTER_SET_ID, &context,
- &base->mqp);
+ MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
+ MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
+ MLX5_SET(rts2rts_qp_in, in, uid, base->mqp.uid);
+ MLX5_SET(rts2rts_qp_in, in, opt_param_mask,
+ MLX5_QP_OPTPAR_COUNTER_SET_ID);
+
+ qpc = MLX5_ADDR_OF(rts2rts_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, counter_set_id, set_id);
+ return mlx5_cmd_exec_in(dev->mdev, rts2rts_qp, in);
}
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
@@ -3470,6 +3713,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
enum ib_qp_state cur_state,
enum ib_qp_state new_state,
const struct mlx5_ib_modify_qp *ucmd,
+ struct mlx5_ib_modify_qp_resp *resp,
struct ib_udata *udata)
{
static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
@@ -3513,67 +3757,60 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_ib_cq *send_cq, *recv_cq;
- struct mlx5_qp_context *context;
struct mlx5_ib_pd *pd;
enum mlx5_qp_state mlx5_cur, mlx5_new;
- enum mlx5_qp_optpar optpar;
+ void *qpc, *pri_path, *alt_path;
+ enum mlx5_qp_optpar optpar = 0;
u32 set_id = 0;
int mlx5_st;
int err;
u16 op;
u8 tx_affinity = 0;
- mlx5_st = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
- qp->qp_sub_type : ibqp->qp_type);
+ mlx5_st = to_mlx5_st(qp->type);
if (mlx5_st < 0)
return -EINVAL;
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
+ qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
+ if (!qpc)
return -ENOMEM;
- pd = get_pd(qp);
- context->flags = cpu_to_be32(mlx5_st << 16);
+ pd = to_mpd(qp->ibqp.pd);
+ MLX5_SET(qpc, qpc, st, mlx5_st);
if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
- context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
} else {
switch (attr->path_mig_state) {
case IB_MIG_MIGRATED:
- context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
break;
case IB_MIG_REARM:
- context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_REARM);
break;
case IB_MIG_ARMED:
- context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_ARMED);
break;
}
}
- if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
- if ((ibqp->qp_type == IB_QPT_RC) ||
- (ibqp->qp_type == IB_QPT_UD &&
- !(qp->flags & MLX5_IB_QP_SQPN_QP1)) ||
- (ibqp->qp_type == IB_QPT_UC) ||
- (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
- (ibqp->qp_type == IB_QPT_XRC_INI) ||
- (ibqp->qp_type == IB_QPT_XRC_TGT)) {
- if (dev->lag_active) {
- u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
- tx_affinity = get_tx_affinity(dev, pd, base, p,
- udata);
- context->flags |= cpu_to_be32(tx_affinity << 24);
- }
- }
- }
+ tx_affinity = get_tx_affinity(ibqp, attr, attr_mask,
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT, udata);
+
+ MLX5_SET(qpc, qpc, lag_tx_port_affinity, tx_affinity);
+ if (tx_affinity && new_state == IB_QPS_RTR &&
+ MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity))
+ optpar |= MLX5_QP_OPTPAR_LAG_TX_AFF;
if (is_sqp(ibqp->qp_type)) {
- context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_256);
+ MLX5_SET(qpc, qpc, log_msg_max, 8);
} else if ((ibqp->qp_type == IB_QPT_UD &&
- !(qp->flags & MLX5_IB_QP_UNDERLAY)) ||
+ !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) ||
ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
- context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
+ MLX5_SET(qpc, qpc, log_msg_max, 12);
} else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > IB_MTU_4096) {
@@ -3581,40 +3818,45 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = -EINVAL;
goto out;
}
- context->mtu_msgmax = (attr->path_mtu << 5) |
- (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
+ MLX5_SET(qpc, qpc, mtu, attr->path_mtu);
+ MLX5_SET(qpc, qpc, log_msg_max,
+ MLX5_CAP_GEN(dev->mdev, log_max_msg));
}
if (attr_mask & IB_QP_DEST_QPN)
- context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
+ MLX5_SET(qpc, qpc, remote_qpn, attr->dest_qp_num);
+
+ pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
if (attr_mask & IB_QP_PKEY_INDEX)
- context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
+ MLX5_SET(ads, pri_path, pkey_index, attr->pkey_index);
/* todo implement counter_index functionality */
if (is_sqp(ibqp->qp_type))
- context->pri_path.port = qp->port;
+ MLX5_SET(ads, pri_path, vhca_port_num, qp->port);
if (attr_mask & IB_QP_PORT)
- context->pri_path.port = attr->port_num;
+ MLX5_SET(ads, pri_path, vhca_port_num, attr->port_num);
if (attr_mask & IB_QP_AV) {
- err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
- attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
+ err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path,
+ attr_mask & IB_QP_PORT ? attr->port_num :
+ qp->port,
attr_mask, 0, attr, false);
if (err)
goto out;
}
if (attr_mask & IB_QP_TIMEOUT)
- context->pri_path.ackto_lt |= attr->timeout << 3;
+ MLX5_SET(ads, pri_path, ack_timeout, attr->timeout);
if (attr_mask & IB_QP_ALT_PATH) {
- err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
- &context->alt_path,
+ err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path,
attr->alt_port_num,
- attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+ attr_mask | IB_QP_PKEY_INDEX |
+ IB_QP_TIMEOUT,
0, attr, true);
if (err)
goto out;
@@ -3623,75 +3865,68 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
- context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
- context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
- context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
- context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
+ MLX5_SET(qpc, qpc, pd, pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
+ if (send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd, send_cq->mcq.cqn);
+ if (recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv, recv_cq->mcq.cqn);
+
+ MLX5_SET(qpc, qpc, log_ack_req_freq, MLX5_IB_ACK_REQ_FREQ);
if (attr_mask & IB_QP_RNR_RETRY)
- context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
+ MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
if (attr_mask & IB_QP_RETRY_CNT)
- context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
+ MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
- if (attr->max_rd_atomic)
- context->params1 |=
- cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
- }
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
+ MLX5_SET(qpc, qpc, log_sra_max, ilog2(attr->max_rd_atomic));
if (attr_mask & IB_QP_SQ_PSN)
- context->next_send_psn = cpu_to_be32(attr->sq_psn);
+ MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn);
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- if (attr->max_dest_rd_atomic)
- context->params2 |=
- cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
- }
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
+ MLX5_SET(qpc, qpc, log_rra_max,
+ ilog2(attr->max_dest_rd_atomic));
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
- __be32 access_flags;
-
- err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags);
+ err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
if (err)
goto out;
-
- context->params2 |= access_flags;
}
if (attr_mask & IB_QP_MIN_RNR_TIMER)
- context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
+ MLX5_SET(qpc, qpc, min_rnr_nak, attr->min_rnr_timer);
if (attr_mask & IB_QP_RQ_PSN)
- context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
+ MLX5_SET(qpc, qpc, next_rcv_psn, attr->rq_psn);
if (attr_mask & IB_QP_QKEY)
- context->qkey = cpu_to_be32(attr->qkey);
+ MLX5_SET(qpc, qpc, q_key, attr->qkey);
if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- context->db_rec_addr = cpu_to_be64(qp->db.dma);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
qp->port) - 1;
/* Underlay port should be used - index 0 function per port */
- if (qp->flags & MLX5_IB_QP_UNDERLAY)
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
port_num = 0;
if (ibqp->counter)
set_id = ibqp->counter->id;
else
set_id = mlx5_ib_get_counters_id(dev, port_num);
- context->qp_counter_set_usr_page |=
- cpu_to_be32(set_id << 24);
+ MLX5_SET(qpc, qpc, counter_set_id, set_id);
}
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- context->sq_crq_size |= cpu_to_be16(1 << 4);
+ MLX5_SET(qpc, qpc, rlky, 1);
- if (qp->flags & MLX5_IB_QP_SQPN_QP1)
- context->deth_sqpn = cpu_to_be32(1);
+ if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ MLX5_SET(qpc, qpc, deth_sqpn, 1);
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
@@ -3703,11 +3938,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
op = optab[mlx5_cur][mlx5_new];
- optpar = ib_mask_to_mlx5_opt(attr_mask);
+ optpar |= ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
struct mlx5_modify_raw_qp_param raw_qp_param = {};
raw_qp_param.operation = op;
@@ -3749,7 +3984,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
} else {
- err = mlx5_core_qp_modify(dev, op, optpar, context, &base->mqp);
+ if (udata) {
+ /* For the kernel flows, the resp will stay zero */
+ resp->ece_options =
+ MLX5_CAP_GEN(dev->mdev, ece_support) ?
+ ucmd->ece_options : 0;
+ resp->response_length = sizeof(*resp);
+ }
+ err = mlx5_core_qp_modify(dev, op, optpar, qpc, &base->mqp,
+ &resp->ece_options);
}
if (err)
@@ -3796,7 +4039,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
out:
- kfree(context);
+ kfree(qpc);
return err;
}
@@ -3854,7 +4097,8 @@ static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new
* Other transitions and attributes are illegal
*/
static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
+ int attr_mask, struct mlx5_ib_modify_qp *ucmd,
+ struct ib_udata *udata)
{
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
@@ -3870,6 +4114,15 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_state = attr->qp_state;
dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options)
+ /*
+ * DCT doesn't initialize QP till modify command is executed,
+ * so we need to overwrite previously set ECE field if user
+ * provided any value except zero, which means not set/not
+ * valid.
+ */
+ MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
+
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u16 set_id;
@@ -3902,17 +4155,23 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
MLX5_SET(dctc, dctc, counter_set_id, set_id);
-
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
- u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
- u32 min_resp_len = offsetof(typeof(resp), dctn) +
- sizeof(resp.dctn);
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
+ u32 min_resp_len = offsetofend(typeof(resp), dctn);
if (udata->outlen < min_resp_len)
return -EINVAL;
resp.response_length = min_resp_len;
+ /*
+ * If we don't have enough space for the ECE options,
+ * simply indicate it with resp.response_length.
+ */
+ resp.response_length = (udata->outlen < sizeof(resp)) ?
+ min_resp_len :
+ sizeof(resp);
+
required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
if (!is_valid_mask(attr_mask, required, 0))
return -EINVAL;
@@ -3929,6 +4188,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (err)
return err;
resp.dctn = qp->dct.mdct.mqp.qpn;
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ resp.ece_options = MLX5_GET(create_dct_out, out, ece);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err) {
mlx5_core_destroy_dct(dev, &qp->dct.mdct);
@@ -3949,11 +4210,11 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_modify_qp_resp resp = {};
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_modify_qp ucmd = {};
enum ib_qp_type qp_type;
enum ib_qp_state cur_state, new_state;
- size_t required_cmd_sz;
int err = -EINVAL;
int port;
@@ -3961,9 +4222,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return -ENOSYS;
if (udata && udata->inlen) {
- required_cmd_sz = offsetof(typeof(ucmd), reserved) +
- sizeof(ucmd.reserved);
- if (udata->inlen < required_cmd_sz)
+ if (udata->inlen < offsetofend(typeof(ucmd), ece_options))
return -EINVAL;
if (udata->inlen > sizeof(ucmd) &&
@@ -3976,23 +4235,20 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return -EFAULT;
if (ucmd.comp_mask ||
- memchr_inv(&ucmd.reserved, 0, sizeof(ucmd.reserved)) ||
memchr_inv(&ucmd.burst_info.reserved, 0,
sizeof(ucmd.burst_info.reserved)))
return -EOPNOTSUPP;
+
}
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
- if (ibqp->qp_type == IB_QPT_DRIVER)
- qp_type = qp->qp_sub_type;
- else
- qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
- IB_QPT_GSI : ibqp->qp_type;
+ qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ? IB_QPT_GSI :
+ qp->type;
if (qp_type == MLX5_IB_QPT_DCT)
- return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata);
+ return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
mutex_lock(&qp->mutex);
@@ -4003,7 +4259,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
}
- if (qp->flags & MLX5_IB_QP_UNDERLAY) {
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN) {
if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
attr_mask);
@@ -4063,1439 +4319,19 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
- new_state, &ucmd, udata);
-
-out:
- mutex_unlock(&qp->mutex);
- return err;
-}
-
-static void _handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
- u32 wqe_sz, void **cur_edge)
-{
- u32 idx;
-
- idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
- *cur_edge = get_sq_edge(sq, idx);
-
- *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
-}
-
-/* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
- * next nearby edge and get new address translation for current WQE position.
- * @sq - SQ buffer.
- * @seg: Current WQE position (16B aligned).
- * @wqe_sz: Total current WQE size [16B].
- * @cur_edge: Updated current edge.
- */
-static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
- u32 wqe_sz, void **cur_edge)
-{
- if (likely(*seg != *cur_edge))
- return;
-
- _handle_post_send_edge(sq, seg, wqe_sz, cur_edge);
-}
-
-/* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
- * pointers. At the end @seg is aligned to 16B regardless the copied size.
- * @sq - SQ buffer.
- * @cur_edge: Updated current edge.
- * @seg: Current WQE position (16B aligned).
- * @wqe_sz: Total current WQE size [16B].
- * @src: Pointer to copy from.
- * @n: Number of bytes to copy.
- */
-static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
- void **seg, u32 *wqe_sz, const void *src,
- size_t n)
-{
- while (likely(n)) {
- size_t leftlen = *cur_edge - *seg;
- size_t copysz = min_t(size_t, leftlen, n);
- size_t stride;
-
- memcpy(*seg, src, copysz);
-
- n -= copysz;
- src += copysz;
- stride = !n ? ALIGN(copysz, 16) : copysz;
- *seg += stride;
- *wqe_sz += stride >> 4;
- handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
- }
-}
-
-static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
-{
- struct mlx5_ib_cq *cq;
- unsigned cur;
-
- cur = wq->head - wq->tail;
- if (likely(cur + nreq < wq->max_post))
- return 0;
-
- cq = to_mcq(ib_cq);
- spin_lock(&cq->lock);
- cur = wq->head - wq->tail;
- spin_unlock(&cq->lock);
-
- return cur + nreq >= wq->max_post;
-}
-
-static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
- u64 remote_addr, u32 rkey)
-{
- rseg->raddr = cpu_to_be64(remote_addr);
- rseg->rkey = cpu_to_be32(rkey);
- rseg->reserved = 0;
-}
-
-static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
- void **seg, int *size, void **cur_edge)
-{
- struct mlx5_wqe_eth_seg *eseg = *seg;
-
- memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
-
- if (wr->send_flags & IB_SEND_IP_CSUM)
- eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
-
- if (wr->opcode == IB_WR_LSO) {
- struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
- size_t left, copysz;
- void *pdata = ud_wr->header;
- size_t stride;
-
- left = ud_wr->hlen;
- eseg->mss = cpu_to_be16(ud_wr->mss);
- eseg->inline_hdr.sz = cpu_to_be16(left);
-
- /* memcpy_send_wqe should get a 16B align address. Hence, we
- * first copy up to the current edge and then, if needed,
- * fall-through to memcpy_send_wqe.
- */
- copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
- left);
- memcpy(eseg->inline_hdr.start, pdata, copysz);
- stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
- sizeof(eseg->inline_hdr.start) + copysz, 16);
- *size += stride / 16;
- *seg += stride;
-
- if (copysz < left) {
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
- left -= copysz;
- pdata += copysz;
- memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata,
- left);
- }
-
- return;
- }
-
- *seg += sizeof(struct mlx5_wqe_eth_seg);
- *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
-}
-
-static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
- const struct ib_send_wr *wr)
-{
- memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
- dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
- dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
-}
-
-static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
-{
- dseg->byte_count = cpu_to_be32(sg->length);
- dseg->lkey = cpu_to_be32(sg->lkey);
- dseg->addr = cpu_to_be64(sg->addr);
-}
-
-static u64 get_xlt_octo(u64 bytes)
-{
- return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
- MLX5_IB_UMR_OCTOWORD;
-}
-
-static __be64 frwr_mkey_mask(bool atomic)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_EN_RINVAL |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_SMALL_FENCE |
- MLX5_MKEY_MASK_FREE;
-
- if (atomic)
- result |= MLX5_MKEY_MASK_A;
-
- return cpu_to_be64(result);
-}
-
-static __be64 sig_mkey_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR |
- MLX5_MKEY_MASK_EN_SIGERR |
- MLX5_MKEY_MASK_EN_RINVAL |
- MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW |
- MLX5_MKEY_MASK_SMALL_FENCE |
- MLX5_MKEY_MASK_FREE |
- MLX5_MKEY_MASK_BSF_EN;
-
- return cpu_to_be64(result);
-}
-
-static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct mlx5_ib_mr *mr, u8 flags, bool atomic)
-{
- int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
-
- memset(umr, 0, sizeof(*umr));
-
- umr->flags = flags;
- umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
- umr->mkey_mask = frwr_mkey_mask(atomic);
-}
-
-static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
-{
- memset(umr, 0, sizeof(*umr));
- umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
- umr->flags = MLX5_UMR_INLINE;
-}
-
-static __be64 get_umr_enable_mr_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_KEY |
- MLX5_MKEY_MASK_FREE;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_disable_mr_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_FREE;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_update_translation_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LEN |
- MLX5_MKEY_MASK_PAGE_SIZE |
- MLX5_MKEY_MASK_START_ADDR;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_update_access_mask(int atomic)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_LR |
- MLX5_MKEY_MASK_LW |
- MLX5_MKEY_MASK_RR |
- MLX5_MKEY_MASK_RW;
-
- if (atomic)
- result |= MLX5_MKEY_MASK_A;
-
- return cpu_to_be64(result);
-}
-
-static __be64 get_umr_update_pd_mask(void)
-{
- u64 result;
-
- result = MLX5_MKEY_MASK_PD;
-
- return cpu_to_be64(result);
-}
-
-static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
-{
- if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
- MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
- (mask & MLX5_MKEY_MASK_A &&
- MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
- return -EPERM;
- return 0;
-}
-
-static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
- struct mlx5_wqe_umr_ctrl_seg *umr,
- const struct ib_send_wr *wr, int atomic)
-{
- const struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
- memset(umr, 0, sizeof(*umr));
-
- if (!umrwr->ignore_free_state) {
- if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
- /* fail if free */
- umr->flags = MLX5_UMR_CHECK_FREE;
- else
- /* fail if not free */
- umr->flags = MLX5_UMR_CHECK_NOT_FREE;
- }
-
- umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
- u64 offset = get_xlt_octo(umrwr->offset);
-
- umr->xlt_offset = cpu_to_be16(offset & 0xffff);
- umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
- umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
- }
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
- umr->mkey_mask |= get_umr_update_translation_mask();
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
- umr->mkey_mask |= get_umr_update_access_mask(atomic);
- umr->mkey_mask |= get_umr_update_pd_mask();
- }
- if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
- umr->mkey_mask |= get_umr_enable_mr_mask();
- if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
- umr->mkey_mask |= get_umr_disable_mr_mask();
-
- if (!wr->num_sge)
- umr->flags |= MLX5_UMR_INLINE;
-
- return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
-}
-
-static u8 get_umr_flags(int acc)
-{
- return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
- (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
- (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
- (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
- MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
-}
-
-static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
- struct mlx5_ib_mr *mr,
- u32 key, int access)
-{
- int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
-
- memset(seg, 0, sizeof(*seg));
-
- if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
- seg->log2_page_size = ilog2(mr->ibmr.page_size);
- else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
- /* KLMs take twice the size of MTTs */
- ndescs *= 2;
-
- seg->flags = get_umr_flags(access) | mr->access_mode;
- seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
- seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
- seg->start_addr = cpu_to_be64(mr->ibmr.iova);
- seg->len = cpu_to_be64(mr->ibmr.length);
- seg->xlt_oct_size = cpu_to_be32(ndescs);
-}
-
-static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
-{
- memset(seg, 0, sizeof(*seg));
- seg->status = MLX5_MKEY_STATUS_FREE;
-}
-
-static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
- const struct ib_send_wr *wr)
-{
- const struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
- memset(seg, 0, sizeof(*seg));
- if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
- seg->status = MLX5_MKEY_STATUS_FREE;
-
- seg->flags = convert_access(umrwr->access_flags);
- if (umrwr->pd)
- seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
- if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
- !umrwr->length)
- seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
-
- seg->start_addr = cpu_to_be64(umrwr->virt_addr);
- seg->len = cpu_to_be64(umrwr->length);
- seg->log2_page_size = umrwr->page_shift;
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
- mlx5_mkey_variant(umrwr->mkey));
-}
-
-static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
- struct mlx5_ib_mr *mr,
- struct mlx5_ib_pd *pd)
-{
- int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
-
- dseg->addr = cpu_to_be64(mr->desc_map);
- dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
- dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
-}
-
-static __be32 send_ieth(const struct ib_send_wr *wr)
-{
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- return wr->ex.imm_data;
-
- case IB_WR_SEND_WITH_INV:
- return cpu_to_be32(wr->ex.invalidate_rkey);
-
- default:
- return 0;
- }
-}
-
-static u8 calc_sig(void *wqe, int size)
-{
- u8 *p = wqe;
- u8 res = 0;
- int i;
-
- for (i = 0; i < size; i++)
- res ^= p[i];
-
- return ~res;
-}
-
-static u8 wq_sig(void *wqe)
-{
- return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
-}
-
-static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
- void **wqe, int *wqe_sz, void **cur_edge)
-{
- struct mlx5_wqe_inline_seg *seg;
- size_t offset;
- int inl = 0;
- int i;
-
- seg = *wqe;
- *wqe += sizeof(*seg);
- offset = sizeof(*seg);
-
- for (i = 0; i < wr->num_sge; i++) {
- size_t len = wr->sg_list[i].length;
- void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
-
- inl += len;
-
- if (unlikely(inl > qp->max_inline_data))
- return -ENOMEM;
-
- while (likely(len)) {
- size_t leftlen;
- size_t copysz;
+ new_state, &ucmd, &resp, udata);
- handle_post_send_edge(&qp->sq, wqe,
- *wqe_sz + (offset >> 4),
- cur_edge);
-
- leftlen = *cur_edge - *wqe;
- copysz = min_t(size_t, leftlen, len);
-
- memcpy(*wqe, addr, copysz);
- len -= copysz;
- addr += copysz;
- *wqe += copysz;
- offset += copysz;
- }
- }
-
- seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
-
- *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
-
- return 0;
-}
-
-static u16 prot_field_size(enum ib_signature_type type)
-{
- switch (type) {
- case IB_SIG_TYPE_T10_DIF:
- return MLX5_DIF_SIZE;
- default:
- return 0;
- }
-}
-
-static u8 bs_selector(int block_size)
-{
- switch (block_size) {
- case 512: return 0x1;
- case 520: return 0x2;
- case 4096: return 0x3;
- case 4160: return 0x4;
- case 1073741824: return 0x5;
- default: return 0;
- }
-}
-
-static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
- struct mlx5_bsf_inl *inl)
-{
- /* Valid inline section and allow BSF refresh */
- inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
- MLX5_BSF_REFRESH_DIF);
- inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
- inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
- /* repeating block */
- inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
- inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
- MLX5_DIF_CRC : MLX5_DIF_IPCS;
-
- if (domain->sig.dif.ref_remap)
- inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
-
- if (domain->sig.dif.app_escape) {
- if (domain->sig.dif.ref_escape)
- inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
- else
- inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
- }
-
- inl->dif_app_bitmask_check =
- cpu_to_be16(domain->sig.dif.apptag_check_mask);
-}
-
-static int mlx5_set_bsf(struct ib_mr *sig_mr,
- struct ib_sig_attrs *sig_attrs,
- struct mlx5_bsf *bsf, u32 data_size)
-{
- struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
- struct mlx5_bsf_basic *basic = &bsf->basic;
- struct ib_sig_domain *mem = &sig_attrs->mem;
- struct ib_sig_domain *wire = &sig_attrs->wire;
-
- memset(bsf, 0, sizeof(*bsf));
-
- /* Basic + Extended + Inline */
- basic->bsf_size_sbs = 1 << 7;
- /* Input domain check byte mask */
- basic->check_byte_mask = sig_attrs->check_mask;
- basic->raw_data_size = cpu_to_be32(data_size);
-
- /* Memory domain */
- switch (sig_attrs->mem.sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
- basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
- mlx5_fill_inl_bsf(mem, &bsf->m_inl);
- break;
- default:
- return -EINVAL;
- }
-
- /* Wire domain */
- switch (sig_attrs->wire.sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
- mem->sig_type == wire->sig_type) {
- /* Same block structure */
- basic->bsf_size_sbs |= 1 << 4;
- if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
- basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
- if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
- basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
- if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
- basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
- } else
- basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
-
- basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
- mlx5_fill_inl_bsf(wire, &bsf->w_inl);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int set_sig_data_segment(const struct ib_send_wr *send_wr,
- struct ib_mr *sig_mr,
- struct ib_sig_attrs *sig_attrs,
- struct mlx5_ib_qp *qp, void **seg, int *size,
- void **cur_edge)
-{
- struct mlx5_bsf *bsf;
- u32 data_len;
- u32 data_key;
- u64 data_va;
- u32 prot_len = 0;
- u32 prot_key = 0;
- u64 prot_va = 0;
- bool prot = false;
- int ret;
- int wqe_size;
- struct mlx5_ib_mr *mr = to_mmr(sig_mr);
- struct mlx5_ib_mr *pi_mr = mr->pi_mr;
-
- data_len = pi_mr->data_length;
- data_key = pi_mr->ibmr.lkey;
- data_va = pi_mr->data_iova;
- if (pi_mr->meta_ndescs) {
- prot_len = pi_mr->meta_length;
- prot_key = pi_mr->ibmr.lkey;
- prot_va = pi_mr->pi_iova;
- prot = true;
- }
-
- if (!prot || (data_key == prot_key && data_va == prot_va &&
- data_len == prot_len)) {
- /**
- * Source domain doesn't contain signature information
- * or data and protection are interleaved in memory.
- * So need construct:
- * ------------------
- * | data_klm |
- * ------------------
- * | BSF |
- * ------------------
- **/
- struct mlx5_klm *data_klm = *seg;
-
- data_klm->bcount = cpu_to_be32(data_len);
- data_klm->key = cpu_to_be32(data_key);
- data_klm->va = cpu_to_be64(data_va);
- wqe_size = ALIGN(sizeof(*data_klm), 64);
- } else {
- /**
- * Source domain contains signature information
- * So need construct a strided block format:
- * ---------------------------
- * | stride_block_ctrl |
- * ---------------------------
- * | data_klm |
- * ---------------------------
- * | prot_klm |
- * ---------------------------
- * | BSF |
- * ---------------------------
- **/
- struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
- struct mlx5_stride_block_entry *data_sentry;
- struct mlx5_stride_block_entry *prot_sentry;
- u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
- int prot_size;
-
- sblock_ctrl = *seg;
- data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
- prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
-
- prot_size = prot_field_size(sig_attrs->mem.sig_type);
- if (!prot_size) {
- pr_err("Bad block size given: %u\n", block_size);
- return -EINVAL;
- }
- sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
- prot_size);
- sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
- sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
- sblock_ctrl->num_entries = cpu_to_be16(2);
-
- data_sentry->bcount = cpu_to_be16(block_size);
- data_sentry->key = cpu_to_be32(data_key);
- data_sentry->va = cpu_to_be64(data_va);
- data_sentry->stride = cpu_to_be16(block_size);
-
- prot_sentry->bcount = cpu_to_be16(prot_size);
- prot_sentry->key = cpu_to_be32(prot_key);
- prot_sentry->va = cpu_to_be64(prot_va);
- prot_sentry->stride = cpu_to_be16(prot_size);
-
- wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
- sizeof(*prot_sentry), 64);
- }
-
- *seg += wqe_size;
- *size += wqe_size / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- bsf = *seg;
- ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
- if (ret)
- return -EINVAL;
-
- *seg += sizeof(*bsf);
- *size += sizeof(*bsf) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- return 0;
-}
-
-static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
- struct ib_mr *sig_mr, int access_flags,
- u32 size, u32 length, u32 pdn)
-{
- u32 sig_key = sig_mr->rkey;
- u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
-
- memset(seg, 0, sizeof(*seg));
-
- seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
- seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
- seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
- MLX5_MKEY_BSF_EN | pdn);
- seg->len = cpu_to_be64(length);
- seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
- seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
-}
-
-static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
- u32 size)
-{
- memset(umr, 0, sizeof(*umr));
-
- umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
- umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
- umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
- umr->mkey_mask = sig_mkey_mask();
-}
-
-static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
- struct mlx5_ib_qp *qp, void **seg, int *size,
- void **cur_edge)
-{
- const struct ib_reg_wr *wr = reg_wr(send_wr);
- struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
- struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
- struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
- u32 pdn = get_pd(qp)->pdn;
- u32 xlt_size;
- int region_len, ret;
-
- if (unlikely(send_wr->num_sge != 0) ||
- unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
- unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
- unlikely(!sig_mr->sig->sig_status_checked))
- return -EINVAL;
-
- /* length of the protected region, data + protection */
- region_len = pi_mr->ibmr.length;
-
- /**
- * KLM octoword size - if protection was provided
- * then we use strided block format (3 octowords),
- * else we use single KLM (1 octoword)
- **/
- if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
- xlt_size = 0x30;
- else
- xlt_size = sizeof(struct mlx5_klm);
-
- set_sig_umr_segment(*seg, xlt_size);
- *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
- pdn);
- *seg += sizeof(struct mlx5_mkey_seg);
- *size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
- cur_edge);
- if (ret)
- return ret;
-
- sig_mr->sig->sig_status_checked = false;
- return 0;
-}
-
-static int set_psv_wr(struct ib_sig_domain *domain,
- u32 psv_idx, void **seg, int *size)
-{
- struct mlx5_seg_set_psv *psv_seg = *seg;
-
- memset(psv_seg, 0, sizeof(*psv_seg));
- psv_seg->psv_num = cpu_to_be32(psv_idx);
- switch (domain->sig_type) {
- case IB_SIG_TYPE_NONE:
- break;
- case IB_SIG_TYPE_T10_DIF:
- psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
- domain->sig.dif.app_tag);
- psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
- break;
- default:
- pr_err("Bad signature type (%d) is given.\n",
- domain->sig_type);
- return -EINVAL;
- }
-
- *seg += sizeof(*psv_seg);
- *size += sizeof(*psv_seg) / 16;
-
- return 0;
-}
-
-static int set_reg_wr(struct mlx5_ib_qp *qp,
- const struct ib_reg_wr *wr,
- void **seg, int *size, void **cur_edge,
- bool check_not_free)
-{
- struct mlx5_ib_mr *mr = to_mmr(wr->mr);
- struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
- struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
- int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
- bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
- bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
- u8 flags = 0;
-
- if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
- mlx5_ib_warn(to_mdev(qp->ibqp.device),
- "Fast update of %s for MR is disabled\n",
- (MLX5_CAP_GEN(dev->mdev,
- umr_modify_entity_size_disabled)) ?
- "entity size" :
- "atomic access");
- return -EINVAL;
- }
-
- if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
- mlx5_ib_warn(to_mdev(qp->ibqp.device),
- "Invalid IB_SEND_INLINE send flag\n");
- return -EINVAL;
- }
-
- if (check_not_free)
- flags |= MLX5_UMR_CHECK_NOT_FREE;
- if (umr_inline)
- flags |= MLX5_UMR_INLINE;
-
- set_reg_umr_seg(*seg, mr, flags, atomic);
- *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
- *seg += sizeof(struct mlx5_mkey_seg);
- *size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-
- if (umr_inline) {
- memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
- mr_list_size);
- *size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
- } else {
- set_reg_data_seg(*seg, mr, pd);
- *seg += sizeof(struct mlx5_wqe_data_seg);
- *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
- }
- return 0;
-}
-
-static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
- void **cur_edge)
-{
- set_linv_umr_seg(*seg);
- *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
- set_linv_mkey_seg(*seg);
- *seg += sizeof(struct mlx5_mkey_seg);
- *size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
-}
-
-static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
-{
- __be32 *p = NULL;
- int i, j;
-
- pr_debug("dump WQE index %u:\n", idx);
- for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
- if ((i & 0xf) == 0) {
- p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
- pr_debug("WQBB at %p:\n", (void *)p);
- j = 0;
- idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
- }
- pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
- be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
- be32_to_cpu(p[j + 3]));
- }
-}
-
-static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
- struct mlx5_wqe_ctrl_seg **ctrl,
- const struct ib_send_wr *wr, unsigned int *idx,
- int *size, void **cur_edge, int nreq,
- bool send_signaled, bool solicited)
-{
- if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
- return -ENOMEM;
-
- *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
- *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
- *ctrl = *seg;
- *(uint32_t *)(*seg + 8) = 0;
- (*ctrl)->imm = send_ieth(wr);
- (*ctrl)->fm_ce_se = qp->sq_signal_bits |
- (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
- (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
-
- *seg += sizeof(**ctrl);
- *size = sizeof(**ctrl) / 16;
- *cur_edge = qp->sq.cur_edge;
-
- return 0;
-}
-
-static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
- struct mlx5_wqe_ctrl_seg **ctrl,
- const struct ib_send_wr *wr, unsigned *idx,
- int *size, void **cur_edge, int nreq)
-{
- return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
- wr->send_flags & IB_SEND_SIGNALED,
- wr->send_flags & IB_SEND_SOLICITED);
-}
-
-static void finish_wqe(struct mlx5_ib_qp *qp,
- struct mlx5_wqe_ctrl_seg *ctrl,
- void *seg, u8 size, void *cur_edge,
- unsigned int idx, u64 wr_id, int nreq, u8 fence,
- u32 mlx5_opcode)
-{
- u8 opmod = 0;
-
- ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
- mlx5_opcode | ((u32)opmod << 24));
- ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
- ctrl->fm_ce_se |= fence;
- if (unlikely(qp->wq_sig))
- ctrl->signature = wq_sig(ctrl);
-
- qp->sq.wrid[idx] = wr_id;
- qp->sq.w_list[idx].opcode = mlx5_opcode;
- qp->sq.wqe_head[idx] = qp->sq.head + nreq;
- qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
- qp->sq.w_list[idx].next = qp->sq.cur_post;
-
- /* We save the edge which was possibly updated during the WQE
- * construction, into SQ's cache.
- */
- seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
- qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
- get_sq_edge(&qp->sq, qp->sq.cur_post &
- (qp->sq.wqe_cnt - 1)) :
- cur_edge;
-}
-
-static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr, bool drain)
-{
- struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
- struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx5_core_dev *mdev = dev->mdev;
- struct ib_reg_wr reg_pi_wr;
- struct mlx5_ib_qp *qp;
- struct mlx5_ib_mr *mr;
- struct mlx5_ib_mr *pi_mr;
- struct mlx5_ib_mr pa_pi_mr;
- struct ib_sig_attrs *sig_attrs;
- struct mlx5_wqe_xrc_seg *xrc;
- struct mlx5_bf *bf;
- void *cur_edge;
- int uninitialized_var(size);
- unsigned long flags;
- unsigned idx;
- int err = 0;
- int num_sge;
- void *seg;
- int nreq;
- int i;
- u8 next_fence = 0;
- u8 fence;
-
- if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
- !drain)) {
- *bad_wr = wr;
- return -EIO;
- }
-
- if (unlikely(ibqp->qp_type == IB_QPT_GSI))
- return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
-
- qp = to_mqp(ibqp);
- bf = &qp->bf;
-
- spin_lock_irqsave(&qp->sq.lock, flags);
-
- for (nreq = 0; wr; nreq++, wr = wr->next) {
- if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
- mlx5_ib_warn(dev, "\n");
- err = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- num_sge = wr->num_sge;
- if (unlikely(num_sge > qp->sq.max_gs)) {
- mlx5_ib_warn(dev, "\n");
- err = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
- nreq);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- if (wr->opcode == IB_WR_REG_MR ||
- wr->opcode == IB_WR_REG_MR_INTEGRITY) {
- fence = dev->umr_fence;
- next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
- } else {
- if (wr->send_flags & IB_SEND_FENCE) {
- if (qp->next_fence)
- fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
- else
- fence = MLX5_FENCE_MODE_FENCE;
- } else {
- fence = qp->next_fence;
- }
- }
-
- switch (ibqp->qp_type) {
- case IB_QPT_XRC_INI:
- xrc = seg;
- seg += sizeof(*xrc);
- size += sizeof(*xrc) / 16;
- /* fall through */
- case IB_QPT_RC:
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
- size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
- err = -ENOSYS;
- *bad_wr = wr;
- goto out;
-
- case IB_WR_LOCAL_INV:
- qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
- ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
- set_linv_wr(qp, &seg, &size, &cur_edge);
- num_sge = 0;
- break;
-
- case IB_WR_REG_MR:
- qp->sq.wr_data[idx] = IB_WR_REG_MR;
- ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
- err = set_reg_wr(qp, reg_wr(wr), &seg, &size,
- &cur_edge, true);
- if (err) {
- *bad_wr = wr;
- goto out;
- }
- num_sge = 0;
- break;
-
- case IB_WR_REG_MR_INTEGRITY:
- qp->sq.wr_data[idx] = IB_WR_REG_MR_INTEGRITY;
-
- mr = to_mmr(reg_wr(wr)->mr);
- pi_mr = mr->pi_mr;
-
- if (pi_mr) {
- memset(&reg_pi_wr, 0,
- sizeof(struct ib_reg_wr));
-
- reg_pi_wr.mr = &pi_mr->ibmr;
- reg_pi_wr.access = reg_wr(wr)->access;
- reg_pi_wr.key = pi_mr->ibmr.rkey;
-
- ctrl->imm = cpu_to_be32(reg_pi_wr.key);
- /* UMR for data + prot registration */
- err = set_reg_wr(qp, &reg_pi_wr, &seg,
- &size, &cur_edge,
- false);
- if (err) {
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size,
- cur_edge, idx, wr->wr_id,
- nreq, fence,
- MLX5_OPCODE_UMR);
-
- err = begin_wqe(qp, &seg, &ctrl, wr,
- &idx, &size, &cur_edge,
- nreq);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
- } else {
- memset(&pa_pi_mr, 0,
- sizeof(struct mlx5_ib_mr));
- /* No UMR, use local_dma_lkey */
- pa_pi_mr.ibmr.lkey =
- mr->ibmr.pd->local_dma_lkey;
-
- pa_pi_mr.ndescs = mr->ndescs;
- pa_pi_mr.data_length = mr->data_length;
- pa_pi_mr.data_iova = mr->data_iova;
- if (mr->meta_ndescs) {
- pa_pi_mr.meta_ndescs =
- mr->meta_ndescs;
- pa_pi_mr.meta_length =
- mr->meta_length;
- pa_pi_mr.pi_iova = mr->pi_iova;
- }
-
- pa_pi_mr.ibmr.length = mr->ibmr.length;
- mr->pi_mr = &pa_pi_mr;
- }
- ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
- /* UMR for sig MR */
- err = set_pi_umr_wr(wr, qp, &seg, &size,
- &cur_edge);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
- wr->wr_id, nreq, fence,
- MLX5_OPCODE_UMR);
-
- /*
- * SET_PSV WQEs are not signaled and solicited
- * on error
- */
- sig_attrs = mr->ibmr.sig_attrs;
- err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
- &size, &cur_edge, nreq, false,
- true);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
- err = set_psv_wr(&sig_attrs->mem,
- mr->sig->psv_memory.psv_idx,
- &seg, &size);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
- wr->wr_id, nreq, next_fence,
- MLX5_OPCODE_SET_PSV);
-
- err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
- &size, &cur_edge, nreq, false,
- true);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
- err = set_psv_wr(&sig_attrs->wire,
- mr->sig->psv_wire.psv_idx,
- &seg, &size);
- if (err) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
- wr->wr_id, nreq, next_fence,
- MLX5_OPCODE_SET_PSV);
-
- qp->next_fence =
- MLX5_FENCE_MODE_INITIATOR_SMALL;
- num_sge = 0;
- goto skip_psv;
-
- default:
- break;
- }
- break;
-
- case IB_QPT_UC:
- switch (wr->opcode) {
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
- size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
- break;
-
- default:
- break;
- }
- break;
-
- case IB_QPT_SMI:
- if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
- mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
- err = -EPERM;
- *bad_wr = wr;
- goto out;
- }
- /* fall through */
- case MLX5_IB_QPT_HW_GSI:
- set_datagram_seg(seg, wr);
- seg += sizeof(struct mlx5_wqe_datagram_seg);
- size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
-
- break;
- case IB_QPT_UD:
- set_datagram_seg(seg, wr);
- seg += sizeof(struct mlx5_wqe_datagram_seg);
- size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
-
- /* handle qp that supports ud offload */
- if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
- struct mlx5_wqe_eth_pad *pad;
-
- pad = seg;
- memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
- seg += sizeof(struct mlx5_wqe_eth_pad);
- size += sizeof(struct mlx5_wqe_eth_pad) / 16;
- set_eth_seg(wr, qp, &seg, &size, &cur_edge);
- handle_post_send_edge(&qp->sq, &seg, size,
- &cur_edge);
- }
- break;
- case MLX5_IB_QPT_REG_UMR:
- if (wr->opcode != MLX5_IB_WR_UMR) {
- err = -EINVAL;
- mlx5_ib_warn(dev, "bad opcode\n");
- goto out;
- }
- qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
- ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
- err = set_reg_umr_segment(dev, seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
- if (unlikely(err))
- goto out;
- seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
- size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
- set_reg_mkey_segment(seg, wr);
- seg += sizeof(struct mlx5_mkey_seg);
- size += sizeof(struct mlx5_mkey_seg) / 16;
- handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
- break;
-
- default:
- break;
- }
-
- if (wr->send_flags & IB_SEND_INLINE && num_sge) {
- err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
- if (unlikely(err)) {
- mlx5_ib_warn(dev, "\n");
- *bad_wr = wr;
- goto out;
- }
- } else {
- for (i = 0; i < num_sge; i++) {
- handle_post_send_edge(&qp->sq, &seg, size,
- &cur_edge);
- if (likely(wr->sg_list[i].length)) {
- set_data_ptr_seg
- ((struct mlx5_wqe_data_seg *)seg,
- wr->sg_list + i);
- size += sizeof(struct mlx5_wqe_data_seg) / 16;
- seg += sizeof(struct mlx5_wqe_data_seg);
- }
- }
- }
-
- qp->next_fence = next_fence;
- finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
- fence, mlx5_ib_opcode[wr->opcode]);
-skip_psv:
- if (0)
- dump_wqe(qp, idx, size);
- }
-
-out:
- if (likely(nreq)) {
- qp->sq.head += nreq;
-
- /* Make sure that descriptors are written before
- * updating doorbell record and ringing the doorbell
- */
- wmb();
-
- qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
-
- /* Make sure doorbell record is visible to the HCA before
- * we hit doorbell */
- wmb();
-
- mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
- /* Make sure doorbells don't leak out of SQ spinlock
- * and reach the HCA out of order.
- */
- bf->offset ^= bf->buf_size;
- }
-
- spin_unlock_irqrestore(&qp->sq.lock, flags);
-
- return err;
-}
-
-int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
-{
- return _mlx5_ib_post_send(ibqp, wr, bad_wr, false);
-}
-
-static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
-{
- sig->signature = calc_sig(sig, size);
-}
-
-static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr, bool drain)
-{
- struct mlx5_ib_qp *qp = to_mqp(ibqp);
- struct mlx5_wqe_data_seg *scat;
- struct mlx5_rwqe_sig *sig;
- struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx5_core_dev *mdev = dev->mdev;
- unsigned long flags;
- int err = 0;
- int nreq;
- int ind;
- int i;
-
- if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
- !drain)) {
- *bad_wr = wr;
- return -EIO;
- }
-
- if (unlikely(ibqp->qp_type == IB_QPT_GSI))
- return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
-
- spin_lock_irqsave(&qp->rq.lock, flags);
-
- ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
-
- for (nreq = 0; wr; nreq++, wr = wr->next) {
- if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
- err = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- if (unlikely(wr->num_sge > qp->rq.max_gs)) {
- err = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
- if (qp->wq_sig)
- scat++;
-
- for (i = 0; i < wr->num_sge; i++)
- set_data_ptr_seg(scat + i, wr->sg_list + i);
-
- if (i < qp->rq.max_gs) {
- scat[i].byte_count = 0;
- scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
- scat[i].addr = 0;
- }
-
- if (qp->wq_sig) {
- sig = (struct mlx5_rwqe_sig *)scat;
- set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
- }
-
- qp->rq.wrid[ind] = wr->wr_id;
-
- ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
- }
+ /* resp.response_length is set in ECE supported flows only */
+ if (!err && resp.response_length &&
+ udata->outlen >= resp.response_length)
+ /* Return -EFAULT to the user and expect him to destroy QP. */
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
out:
- if (likely(nreq)) {
- qp->rq.head += nreq;
-
- /* Make sure that descriptors are written before
- * doorbell record.
- */
- wmb();
-
- *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
- }
-
- spin_unlock_irqrestore(&qp->rq.lock, flags);
-
+ mutex_unlock(&qp->mutex);
return err;
}
-int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- return _mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
-}
-
static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
{
switch (mlx5_state) {
@@ -5521,50 +4357,35 @@ static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
}
}
-static int to_ib_qp_access_flags(int mlx5_flags)
-{
- int ib_flags = 0;
-
- if (mlx5_flags & MLX5_QP_BIT_RRE)
- ib_flags |= IB_ACCESS_REMOTE_READ;
- if (mlx5_flags & MLX5_QP_BIT_RWE)
- ib_flags |= IB_ACCESS_REMOTE_WRITE;
- if (mlx5_flags & MLX5_QP_BIT_RAE)
- ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
-
- return ib_flags;
-}
-
static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
- struct rdma_ah_attr *ah_attr,
- struct mlx5_qp_path *path)
+ struct rdma_ah_attr *ah_attr, void *path)
{
+ int port = MLX5_GET(ads, path, vhca_port_num);
+ int static_rate;
memset(ah_attr, 0, sizeof(*ah_attr));
- if (!path->port || path->port > ibdev->num_ports)
+ if (!port || port > ibdev->num_ports)
return;
- ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
+ ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port);
- rdma_ah_set_port_num(ah_attr, path->port);
- rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf);
+ rdma_ah_set_port_num(ah_attr, port);
+ rdma_ah_set_sl(ah_attr, MLX5_GET(ads, path, sl));
- rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
- rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
- rdma_ah_set_static_rate(ah_attr,
- path->static_rate ? path->static_rate - 5 : 0);
+ rdma_ah_set_dlid(ah_attr, MLX5_GET(ads, path, rlid));
+ rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
- if (path->grh_mlid & (1 << 7) ||
+ static_rate = MLX5_GET(ads, path, stat_rate);
+ rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
+ if (MLX5_GET(ads, path, grh) ||
ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
- u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
-
- rdma_ah_set_grh(ah_attr, NULL,
- tc_fl & 0xfffff,
- path->mgid_index,
- path->hop_limit,
- (tc_fl >> 20) & 0xff);
- rdma_ah_set_dgid_raw(ah_attr, path->rgid);
+ rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
+ MLX5_GET(ads, path, src_addr_index),
+ MLX5_GET(ads, path, hop_limit),
+ MLX5_GET(ads, path, tclass));
+ memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip),
+ MLX5_FLD_SZ_BYTES(ads, rgid_rip));
}
}
@@ -5686,10 +4507,9 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct ib_qp_attr *qp_attr)
{
int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
- struct mlx5_qp_context *context;
- int mlx5_state;
+ void *qpc, *pri_path, *alt_path;
u32 *outb;
- int err = 0;
+ int err;
outb = kzalloc(outlen, GFP_KERNEL);
if (!outb)
@@ -5699,47 +4519,46 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
goto out;
- /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
- context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc);
+ qpc = MLX5_ADDR_OF(query_qp_out, outb, qpc);
- mlx5_state = be32_to_cpu(context->flags) >> 28;
+ qp->state = to_ib_qp_state(MLX5_GET(qpc, qpc, state));
+ if (MLX5_GET(qpc, qpc, state) == MLX5_QP_STATE_SQ_DRAINING)
+ qp_attr->sq_draining = 1;
- qp->state = to_ib_qp_state(mlx5_state);
- qp_attr->path_mtu = context->mtu_msgmax >> 5;
- qp_attr->path_mig_state =
- to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
- qp_attr->qkey = be32_to_cpu(context->qkey);
- qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
- qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
- qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
- qp_attr->qp_access_flags =
- to_ib_qp_access_flags(be32_to_cpu(context->params2));
+ qp_attr->path_mtu = MLX5_GET(qpc, qpc, mtu);
+ qp_attr->path_mig_state = to_ib_mig_state(MLX5_GET(qpc, qpc, pm_state));
+ qp_attr->qkey = MLX5_GET(qpc, qpc, q_key);
+ qp_attr->rq_psn = MLX5_GET(qpc, qpc, next_rcv_psn);
+ qp_attr->sq_psn = MLX5_GET(qpc, qpc, next_send_psn);
+ qp_attr->dest_qp_num = MLX5_GET(qpc, qpc, remote_qpn);
- if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
- to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
- to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
- qp_attr->alt_pkey_index =
- be16_to_cpu(context->alt_path.pkey_index);
- qp_attr->alt_port_num =
- rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
- }
+ if (MLX5_GET(qpc, qpc, rre))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
+ if (MLX5_GET(qpc, qpc, rwe))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (MLX5_GET(qpc, qpc, rae))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_ATOMIC;
- qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
- qp_attr->port_num = context->pri_path.port;
+ qp_attr->max_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_sra_max);
+ qp_attr->max_dest_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_rra_max);
+ qp_attr->min_rnr_timer = MLX5_GET(qpc, qpc, min_rnr_nak);
+ qp_attr->retry_cnt = MLX5_GET(qpc, qpc, retry_count);
+ qp_attr->rnr_retry = MLX5_GET(qpc, qpc, rnr_retry);
- /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
- qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
+ pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
- qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
+ to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path);
+ to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path);
+ qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index);
+ qp_attr->alt_port_num = MLX5_GET(ads, alt_path, vhca_port_num);
+ }
- qp_attr->max_dest_rd_atomic =
- 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
- qp_attr->min_rnr_timer =
- (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
- qp_attr->timeout = context->pri_path.ackto_lt >> 3;
- qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
- qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
- qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
+ qp_attr->pkey_index = MLX5_GET(ads, pri_path, pkey_index);
+ qp_attr->port_num = MLX5_GET(ads, pri_path, vhca_port_num);
+ qp_attr->timeout = MLX5_GET(ads, pri_path, ack_timeout);
+ qp_attr->alt_timeout = MLX5_GET(ads, alt_path, ack_timeout);
out:
kfree(outb);
@@ -5830,14 +4649,14 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
memset(qp_attr, 0, sizeof(*qp_attr));
- if (unlikely(qp->qp_sub_type == MLX5_IB_QPT_DCT))
+ if (unlikely(qp->type == MLX5_IB_QPT_DCT))
return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
qp_attr_mask, qp_init_attr);
mutex_lock(&qp->mutex);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
- qp->flags & MLX5_IB_QP_UNDERLAY) {
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
if (err)
goto out;
@@ -5871,18 +4690,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->cap = qp_attr->cap;
- qp_init_attr->create_flags = 0;
- if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
- qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
-
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
- qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL;
- if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
- qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
- if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
- qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
- if (qp->flags & MLX5_IB_QP_SQPN_QP1)
- qp_init_attr->create_flags |= MLX5_IB_QP_CREATE_SQPN_QP1;
+ qp_init_attr->create_flags = qp->flags;
qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
@@ -6543,7 +5351,7 @@ void mlx5_ib_drain_sq(struct ib_qp *qp)
sdrain.cqe.done = mlx5_ib_drain_qp_done;
init_completion(&sdrain.done);
- ret = _mlx5_ib_post_send(qp, &swr.wr, &bad_swr, true);
+ ret = mlx5_ib_post_send_drain(qp, &swr.wr, &bad_swr);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return;
@@ -6573,7 +5381,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
rdrain.cqe.done = mlx5_ib_drain_qp_done;
init_completion(&rdrain.done);
- ret = _mlx5_ib_post_recv(qp, &rwr, &bad_rwr, true);
+ ret = mlx5_ib_post_recv_drain(qp, &rwr, &bad_rwr);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
index ad9d76e3e18a..82ea2b94dfa6 100644
--- a/drivers/infiniband/hw/mlx5/qp.h
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -13,10 +13,10 @@ void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev);
int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *qp,
u32 *in, int inlen, u32 *out, int outlen);
-int mlx5_core_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
- u32 *in, int inlen);
+int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *in, int inlen, u32 *out);
int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
- void *qpc, struct mlx5_core_qp *qp);
+ void *qpc, struct mlx5_core_qp *qp, u32 *ece);
int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp);
int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct);
int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index ea62735042f0..c19d91d6dce8 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -236,16 +236,16 @@ err_cmd:
return err;
}
-int mlx5_core_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
- u32 *in, int inlen)
+int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *in, int inlen, u32 *out)
{
- u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
int err;
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
- err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out,
+ MLX5_ST_SZ_BYTES(create_qp_out));
if (err)
return err;
@@ -341,9 +341,30 @@ static void mbox_free(struct mbox_info *mbox)
kfree(mbox->out);
}
+static int get_ece_from_mbox(void *out, u16 opcode)
+{
+ int ece = 0;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ ece = MLX5_GET(init2rtr_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ ece = MLX5_GET(rtr2rts_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ ece = MLX5_GET(rts2rts_qp_out, out, ece);
+ break;
+ default:
+ break;
+ }
+
+ return ece;
+}
+
static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
u32 opt_param_mask, void *qpc,
- struct mbox_info *mbox, u16 uid)
+ struct mbox_info *mbox, u16 uid, u32 ece)
{
mbox->out = NULL;
mbox->in = NULL;
@@ -391,18 +412,21 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(init2rtr_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_RTR2RTS_QP:
if (MBOX_ALLOC(mbox, rtr2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(rtr2rts_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_RTS2RTS_QP:
if (MBOX_ALLOC(mbox, rts2rts_qp))
return -ENOMEM;
MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(rts2rts_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_SQERR2RTS_QP:
if (MBOX_ALLOC(mbox, sqerr2rts_qp))
@@ -423,18 +447,22 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
}
int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
- void *qpc, struct mlx5_core_qp *qp)
+ void *qpc, struct mlx5_core_qp *qp, u32 *ece)
{
struct mbox_info mbox;
int err;
- err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn,
- opt_param_mask, qpc, &mbox, qp->uid);
+ err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn, opt_param_mask,
+ qpc, &mbox, qp->uid, (ece) ? *ece : 0);
if (err)
return err;
err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
mbox.outlen);
+
+ if (ece)
+ *ece = get_ece_from_mbox(mbox.out, opcode);
+
mbox_free(&mbox);
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index b1a8a9175040..6d1ff13d2283 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -310,12 +310,18 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
srq->msrq.event = mlx5_ib_srq_event;
srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
- if (udata)
- if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
+ if (udata) {
+ struct mlx5_ib_create_srq_resp resp = {
+ .srqn = srq->msrq.srqn,
+ };
+
+ if (ib_copy_to_udata(udata, &resp, min(udata->outlen,
+ sizeof(resp)))) {
mlx5_ib_dbg(dev, "copy to user failed\n");
err = -EFAULT;
goto err_core;
}
+ }
init_attr->attr.max_wr = srq->msrq.max - 1;
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index c851570791af..6f5eadc4d183 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -132,38 +132,33 @@ static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
- u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
- u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
- MLX5_SET(destroy_srq_in, srq_in, opcode,
- MLX5_CMD_OP_DESTROY_SRQ);
- MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
- MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
+ MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
+ MLX5_SET(destroy_srq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
- sizeof(srq_out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
}
static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
- u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
- MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
- MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
- MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
- MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
- MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
+ MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
+ MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
+ MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
- sizeof(srq_out));
+ return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
}
static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
- u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
u32 *srq_out;
void *srqc;
int err;
@@ -172,11 +167,9 @@ static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
if (!srq_out)
return -ENOMEM;
- MLX5_SET(query_srq_in, srq_in, opcode,
- MLX5_CMD_OP_QUERY_SRQ);
- MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
- MLX5_ST_SZ_BYTES(query_srq_out));
+ MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
+ MLX5_SET(query_srq_in, in, srqn, srq->srqn);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
if (err)
goto out;
@@ -234,39 +227,35 @@ out:
static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
- u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
- MLX5_CMD_OP_DESTROY_XRC_SRQ);
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
+ MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
+ MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
}
static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
u16 lwm)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
- u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
- MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
+ MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, op_mod,
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, sizeof(xrcsrq_out));
+ return mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
}
static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
- u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
u32 *xrcsrq_out;
void *xrc_srqc;
int err;
@@ -274,14 +263,11 @@ static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
if (!xrcsrq_out)
return -ENOMEM;
- memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
- MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
- MLX5_CMD_OP_QUERY_XRC_SRQ);
- MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
- xrcsrq_out, MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
if (err)
goto out;
@@ -341,13 +327,12 @@ out:
static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
- u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
}
static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
@@ -384,7 +369,7 @@ static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
- err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
+ err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
out:
kvfree(in);
@@ -414,7 +399,7 @@ static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
MLX5_SET(query_rmp_in, rmp_in, rmpn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, rmp_in, inlen, rmp_out, outlen);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
if (err)
goto out;
@@ -477,36 +462,34 @@ static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
- u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
- MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
+ MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
}
static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq,
u16 lwm)
{
- u32 out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
- MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
- MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
+ MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
- MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, lwm, lwm);
MLX5_SET(arm_rq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
}
static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
- u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
+ u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
u32 *xrq_out;
int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
void *xrqc;
@@ -519,7 +502,7 @@ static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
- err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), xrq_out, outlen);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
if (err)
goto out;
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
new file mode 100644
index 000000000000..2c6df1c43b55
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -0,0 +1,1504 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/gfp.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/driver.h>
+#include "wr.h"
+
+static const u32 mlx5_ib_opcode[] = {
+ [IB_WR_SEND] = MLX5_OPCODE_SEND,
+ [IB_WR_LSO] = MLX5_OPCODE_LSO,
+ [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
+ [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
+ [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
+ [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
+ [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
+ [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
+ [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
+ [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
+ [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
+};
+
+/* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
+ * next nearby edge and get new address translation for current WQE position.
+ * @sq - SQ buffer.
+ * @seg: Current WQE position (16B aligned).
+ * @wqe_sz: Total current WQE size [16B].
+ * @cur_edge: Updated current edge.
+ */
+static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
+ u32 wqe_sz, void **cur_edge)
+{
+ u32 idx;
+
+ if (likely(*seg != *cur_edge))
+ return;
+
+ idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
+ *cur_edge = get_sq_edge(sq, idx);
+
+ *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
+}
+
+/* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
+ * pointers. At the end @seg is aligned to 16B regardless the copied size.
+ * @sq - SQ buffer.
+ * @cur_edge: Updated current edge.
+ * @seg: Current WQE position (16B aligned).
+ * @wqe_sz: Total current WQE size [16B].
+ * @src: Pointer to copy from.
+ * @n: Number of bytes to copy.
+ */
+static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
+ void **seg, u32 *wqe_sz, const void *src,
+ size_t n)
+{
+ while (likely(n)) {
+ size_t leftlen = *cur_edge - *seg;
+ size_t copysz = min_t(size_t, leftlen, n);
+ size_t stride;
+
+ memcpy(*seg, src, copysz);
+
+ n -= copysz;
+ src += copysz;
+ stride = !n ? ALIGN(copysz, 16) : copysz;
+ *seg += stride;
+ *wqe_sz += stride >> 4;
+ handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
+ }
+}
+
+static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq,
+ struct ib_cq *ib_cq)
+{
+ struct mlx5_ib_cq *cq;
+ unsigned int cur;
+
+ cur = wq->head - wq->tail;
+ if (likely(cur + nreq < wq->max_post))
+ return 0;
+
+ cq = to_mcq(ib_cq);
+ spin_lock(&cq->lock);
+ cur = wq->head - wq->tail;
+ spin_unlock(&cq->lock);
+
+ return cur + nreq >= wq->max_post;
+}
+
+static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
+ u64 remote_addr, u32 rkey)
+{
+ rseg->raddr = cpu_to_be64(remote_addr);
+ rseg->rkey = cpu_to_be32(rkey);
+ rseg->reserved = 0;
+}
+
+static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
+ void **seg, int *size, void **cur_edge)
+{
+ struct mlx5_wqe_eth_seg *eseg = *seg;
+
+ memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
+
+ if (wr->send_flags & IB_SEND_IP_CSUM)
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+
+ if (wr->opcode == IB_WR_LSO) {
+ struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
+ size_t left, copysz;
+ void *pdata = ud_wr->header;
+ size_t stride;
+
+ left = ud_wr->hlen;
+ eseg->mss = cpu_to_be16(ud_wr->mss);
+ eseg->inline_hdr.sz = cpu_to_be16(left);
+
+ /* memcpy_send_wqe should get a 16B align address. Hence, we
+ * first copy up to the current edge and then, if needed,
+ * continue to memcpy_send_wqe.
+ */
+ copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
+ left);
+ memcpy(eseg->inline_hdr.start, pdata, copysz);
+ stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
+ sizeof(eseg->inline_hdr.start) + copysz, 16);
+ *size += stride / 16;
+ *seg += stride;
+
+ if (copysz < left) {
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ left -= copysz;
+ pdata += copysz;
+ memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata,
+ left);
+ }
+
+ return;
+ }
+
+ *seg += sizeof(struct mlx5_wqe_eth_seg);
+ *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
+}
+
+static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
+ const struct ib_send_wr *wr)
+{
+ memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
+ dseg->av.dqp_dct =
+ cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
+ dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
+}
+
+static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
+{
+ dseg->byte_count = cpu_to_be32(sg->length);
+ dseg->lkey = cpu_to_be32(sg->lkey);
+ dseg->addr = cpu_to_be64(sg->addr);
+}
+
+static u64 get_xlt_octo(u64 bytes)
+{
+ return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
+ MLX5_IB_UMR_OCTOWORD;
+}
+
+static __be64 frwr_mkey_mask(bool atomic)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE;
+
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 sig_mkey_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_SIGERR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE |
+ MLX5_MKEY_MASK_BSF_EN;
+
+ return cpu_to_be64(result);
+}
+
+static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
+ struct mlx5_ib_mr *mr, u8 flags, bool atomic)
+{
+ int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+
+ memset(umr, 0, sizeof(*umr));
+
+ umr->flags = flags;
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
+ umr->mkey_mask = frwr_mkey_mask(atomic);
+}
+
+static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
+{
+ memset(umr, 0, sizeof(*umr));
+ umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr->flags = MLX5_UMR_INLINE;
+}
+
+static __be64 get_umr_enable_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_disable_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_translation_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_access_mask(int atomic)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW;
+
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_pd_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_PD;
+
+ return cpu_to_be64(result);
+}
+
+static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
+{
+ if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
+ (mask & MLX5_MKEY_MASK_A &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
+ return -EPERM;
+ return 0;
+}
+
+static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
+ struct mlx5_wqe_umr_ctrl_seg *umr,
+ const struct ib_send_wr *wr, int atomic)
+{
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
+
+ memset(umr, 0, sizeof(*umr));
+
+ if (!umrwr->ignore_free_state) {
+ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+ /* fail if free */
+ umr->flags = MLX5_UMR_CHECK_FREE;
+ else
+ /* fail if not free */
+ umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+ }
+
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
+ u64 offset = get_xlt_octo(umrwr->offset);
+
+ umr->xlt_offset = cpu_to_be16(offset & 0xffff);
+ umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
+ umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
+ }
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
+ umr->mkey_mask |= get_umr_update_translation_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
+ umr->mkey_mask |= get_umr_update_access_mask(atomic);
+ umr->mkey_mask |= get_umr_update_pd_mask();
+ }
+ if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
+ umr->mkey_mask |= get_umr_enable_mr_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
+ umr->mkey_mask |= get_umr_disable_mr_mask();
+
+ if (!wr->num_sge)
+ umr->flags |= MLX5_UMR_INLINE;
+
+ return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
+}
+
+static u8 get_umr_flags(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
+ (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
+ (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
+ MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
+}
+
+static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
+ struct mlx5_ib_mr *mr,
+ u32 key, int access)
+{
+ int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
+
+ memset(seg, 0, sizeof(*seg));
+
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ seg->log2_page_size = ilog2(mr->ibmr.page_size);
+ else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
+ /* KLMs take twice the size of MTTs */
+ ndescs *= 2;
+
+ seg->flags = get_umr_flags(access) | mr->access_mode;
+ seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
+ seg->start_addr = cpu_to_be64(mr->ibmr.iova);
+ seg->len = cpu_to_be64(mr->ibmr.length);
+ seg->xlt_oct_size = cpu_to_be32(ndescs);
+}
+
+static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
+{
+ memset(seg, 0, sizeof(*seg));
+ seg->status = MLX5_MKEY_STATUS_FREE;
+}
+
+static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,
+ const struct ib_send_wr *wr)
+{
+ const struct mlx5_umr_wr *umrwr = umr_wr(wr);
+
+ memset(seg, 0, sizeof(*seg));
+ if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
+ seg->status = MLX5_MKEY_STATUS_FREE;
+
+ seg->flags = convert_access(umrwr->access_flags);
+ if (umrwr->pd)
+ seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
+ !umrwr->length)
+ seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
+
+ seg->start_addr = cpu_to_be64(umrwr->virt_addr);
+ seg->len = cpu_to_be64(umrwr->length);
+ seg->log2_page_size = umrwr->page_shift;
+ seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
+ mlx5_mkey_variant(umrwr->mkey));
+}
+
+static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
+ struct mlx5_ib_mr *mr,
+ struct mlx5_ib_pd *pd)
+{
+ int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
+
+ dseg->addr = cpu_to_be64(mr->desc_map);
+ dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
+ dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
+}
+
+static __be32 send_ieth(const struct ib_send_wr *wr)
+{
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return wr->ex.imm_data;
+
+ case IB_WR_SEND_WITH_INV:
+ return cpu_to_be32(wr->ex.invalidate_rkey);
+
+ default:
+ return 0;
+ }
+}
+
+static u8 calc_sig(void *wqe, int size)
+{
+ u8 *p = wqe;
+ u8 res = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ res ^= p[i];
+
+ return ~res;
+}
+
+static u8 wq_sig(void *wqe)
+{
+ return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
+}
+
+static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ void **wqe, int *wqe_sz, void **cur_edge)
+{
+ struct mlx5_wqe_inline_seg *seg;
+ size_t offset;
+ int inl = 0;
+ int i;
+
+ seg = *wqe;
+ *wqe += sizeof(*seg);
+ offset = sizeof(*seg);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ size_t len = wr->sg_list[i].length;
+ void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
+
+ inl += len;
+
+ if (unlikely(inl > qp->max_inline_data))
+ return -ENOMEM;
+
+ while (likely(len)) {
+ size_t leftlen;
+ size_t copysz;
+
+ handle_post_send_edge(&qp->sq, wqe,
+ *wqe_sz + (offset >> 4),
+ cur_edge);
+
+ leftlen = *cur_edge - *wqe;
+ copysz = min_t(size_t, leftlen, len);
+
+ memcpy(*wqe, addr, copysz);
+ len -= copysz;
+ addr += copysz;
+ *wqe += copysz;
+ offset += copysz;
+ }
+ }
+
+ seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
+
+ *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
+
+ return 0;
+}
+
+static u16 prot_field_size(enum ib_signature_type type)
+{
+ switch (type) {
+ case IB_SIG_TYPE_T10_DIF:
+ return MLX5_DIF_SIZE;
+ default:
+ return 0;
+ }
+}
+
+static u8 bs_selector(int block_size)
+{
+ switch (block_size) {
+ case 512: return 0x1;
+ case 520: return 0x2;
+ case 4096: return 0x3;
+ case 4160: return 0x4;
+ case 1073741824: return 0x5;
+ default: return 0;
+ }
+}
+
+static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
+ struct mlx5_bsf_inl *inl)
+{
+ /* Valid inline section and allow BSF refresh */
+ inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
+ MLX5_BSF_REFRESH_DIF);
+ inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
+ inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
+ /* repeating block */
+ inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
+ inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
+ MLX5_DIF_CRC : MLX5_DIF_IPCS;
+
+ if (domain->sig.dif.ref_remap)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
+
+ if (domain->sig.dif.app_escape) {
+ if (domain->sig.dif.ref_escape)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
+ else
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
+ }
+
+ inl->dif_app_bitmask_check =
+ cpu_to_be16(domain->sig.dif.apptag_check_mask);
+}
+
+static int mlx5_set_bsf(struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_bsf *bsf, u32 data_size)
+{
+ struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
+ struct mlx5_bsf_basic *basic = &bsf->basic;
+ struct ib_sig_domain *mem = &sig_attrs->mem;
+ struct ib_sig_domain *wire = &sig_attrs->wire;
+
+ memset(bsf, 0, sizeof(*bsf));
+
+ /* Basic + Extended + Inline */
+ basic->bsf_size_sbs = 1 << 7;
+ /* Input domain check byte mask */
+ basic->check_byte_mask = sig_attrs->check_mask;
+ basic->raw_data_size = cpu_to_be32(data_size);
+
+ /* Memory domain */
+ switch (sig_attrs->mem.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
+ basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
+ mlx5_fill_inl_bsf(mem, &bsf->m_inl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Wire domain */
+ switch (sig_attrs->wire.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
+ mem->sig_type == wire->sig_type) {
+ /* Same block structure */
+ basic->bsf_size_sbs |= 1 << 4;
+ if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
+ basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
+ if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
+ basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
+ if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
+ basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
+ } else
+ basic->wire.bs_selector =
+ bs_selector(wire->sig.dif.pi_interval);
+
+ basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
+ mlx5_fill_inl_bsf(wire, &bsf->w_inl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int set_sig_data_segment(const struct ib_send_wr *send_wr,
+ struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ struct mlx5_bsf *bsf;
+ u32 data_len;
+ u32 data_key;
+ u64 data_va;
+ u32 prot_len = 0;
+ u32 prot_key = 0;
+ u64 prot_va = 0;
+ bool prot = false;
+ int ret;
+ int wqe_size;
+ struct mlx5_ib_mr *mr = to_mmr(sig_mr);
+ struct mlx5_ib_mr *pi_mr = mr->pi_mr;
+
+ data_len = pi_mr->data_length;
+ data_key = pi_mr->ibmr.lkey;
+ data_va = pi_mr->data_iova;
+ if (pi_mr->meta_ndescs) {
+ prot_len = pi_mr->meta_length;
+ prot_key = pi_mr->ibmr.lkey;
+ prot_va = pi_mr->pi_iova;
+ prot = true;
+ }
+
+ if (!prot || (data_key == prot_key && data_va == prot_va &&
+ data_len == prot_len)) {
+ /**
+ * Source domain doesn't contain signature information
+ * or data and protection are interleaved in memory.
+ * So need construct:
+ * ------------------
+ * | data_klm |
+ * ------------------
+ * | BSF |
+ * ------------------
+ **/
+ struct mlx5_klm *data_klm = *seg;
+
+ data_klm->bcount = cpu_to_be32(data_len);
+ data_klm->key = cpu_to_be32(data_key);
+ data_klm->va = cpu_to_be64(data_va);
+ wqe_size = ALIGN(sizeof(*data_klm), 64);
+ } else {
+ /**
+ * Source domain contains signature information
+ * So need construct a strided block format:
+ * ---------------------------
+ * | stride_block_ctrl |
+ * ---------------------------
+ * | data_klm |
+ * ---------------------------
+ * | prot_klm |
+ * ---------------------------
+ * | BSF |
+ * ---------------------------
+ **/
+ struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
+ struct mlx5_stride_block_entry *data_sentry;
+ struct mlx5_stride_block_entry *prot_sentry;
+ u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
+ int prot_size;
+
+ sblock_ctrl = *seg;
+ data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
+ prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
+
+ prot_size = prot_field_size(sig_attrs->mem.sig_type);
+ if (!prot_size) {
+ pr_err("Bad block size given: %u\n", block_size);
+ return -EINVAL;
+ }
+ sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
+ prot_size);
+ sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
+ sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
+ sblock_ctrl->num_entries = cpu_to_be16(2);
+
+ data_sentry->bcount = cpu_to_be16(block_size);
+ data_sentry->key = cpu_to_be32(data_key);
+ data_sentry->va = cpu_to_be64(data_va);
+ data_sentry->stride = cpu_to_be16(block_size);
+
+ prot_sentry->bcount = cpu_to_be16(prot_size);
+ prot_sentry->key = cpu_to_be32(prot_key);
+ prot_sentry->va = cpu_to_be64(prot_va);
+ prot_sentry->stride = cpu_to_be16(prot_size);
+
+ wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
+ sizeof(*prot_sentry), 64);
+ }
+
+ *seg += wqe_size;
+ *size += wqe_size / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ bsf = *seg;
+ ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
+ if (ret)
+ return -EINVAL;
+
+ *seg += sizeof(*bsf);
+ *size += sizeof(*bsf) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ return 0;
+}
+
+static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
+ struct ib_mr *sig_mr, int access_flags,
+ u32 size, u32 length, u32 pdn)
+{
+ u32 sig_key = sig_mr->rkey;
+ u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
+
+ memset(seg, 0, sizeof(*seg));
+
+ seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
+ seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
+ MLX5_MKEY_BSF_EN | pdn);
+ seg->len = cpu_to_be64(length);
+ seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
+ seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
+}
+
+static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
+ u32 size)
+{
+ memset(umr, 0, sizeof(*umr));
+
+ umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
+ umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
+ umr->mkey_mask = sig_mkey_mask();
+}
+
+static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ const struct ib_reg_wr *wr = reg_wr(send_wr);
+ struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
+ struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
+ struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
+ u32 pdn = to_mpd(qp->ibqp.pd)->pdn;
+ u32 xlt_size;
+ int region_len, ret;
+
+ if (unlikely(send_wr->num_sge != 0) ||
+ unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
+ unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
+ unlikely(!sig_mr->sig->sig_status_checked))
+ return -EINVAL;
+
+ /* length of the protected region, data + protection */
+ region_len = pi_mr->ibmr.length;
+
+ /**
+ * KLM octoword size - if protection was provided
+ * then we use strided block format (3 octowords),
+ * else we use single KLM (1 octoword)
+ **/
+ if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
+ xlt_size = 0x30;
+ else
+ xlt_size = sizeof(struct mlx5_klm);
+
+ set_sig_umr_segment(*seg, xlt_size);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
+ pdn);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
+ cur_edge);
+ if (ret)
+ return ret;
+
+ sig_mr->sig->sig_status_checked = false;
+ return 0;
+}
+
+static int set_psv_wr(struct ib_sig_domain *domain,
+ u32 psv_idx, void **seg, int *size)
+{
+ struct mlx5_seg_set_psv *psv_seg = *seg;
+
+ memset(psv_seg, 0, sizeof(*psv_seg));
+ psv_seg->psv_num = cpu_to_be32(psv_idx);
+ switch (domain->sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
+ domain->sig.dif.app_tag);
+ psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
+ break;
+ default:
+ pr_err("Bad signature type (%d) is given.\n",
+ domain->sig_type);
+ return -EINVAL;
+ }
+
+ *seg += sizeof(*psv_seg);
+ *size += sizeof(*psv_seg) / 16;
+
+ return 0;
+}
+
+static int set_reg_wr(struct mlx5_ib_qp *qp,
+ const struct ib_reg_wr *wr,
+ void **seg, int *size, void **cur_edge,
+ bool check_not_free)
+{
+ struct mlx5_ib_mr *mr = to_mmr(wr->mr);
+ struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
+ int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+ bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
+ bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
+ u8 flags = 0;
+
+ if (!mlx5_ib_can_use_umr(dev, atomic, wr->access)) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device),
+ "Fast update of %s for MR is disabled\n",
+ (MLX5_CAP_GEN(dev->mdev,
+ umr_modify_entity_size_disabled)) ?
+ "entity size" :
+ "atomic access");
+ return -EINVAL;
+ }
+
+ if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device),
+ "Invalid IB_SEND_INLINE send flag\n");
+ return -EINVAL;
+ }
+
+ if (check_not_free)
+ flags |= MLX5_UMR_CHECK_NOT_FREE;
+ if (umr_inline)
+ flags |= MLX5_UMR_INLINE;
+
+ set_reg_umr_seg(*seg, mr, flags, atomic);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ if (umr_inline) {
+ memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
+ mr_list_size);
+ *size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
+ } else {
+ set_reg_data_seg(*seg, mr, pd);
+ *seg += sizeof(struct mlx5_wqe_data_seg);
+ *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
+ }
+ return 0;
+}
+
+static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ set_linv_umr_seg(*seg);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ set_linv_mkey_seg(*seg);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+}
+
+static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
+{
+ __be32 *p = NULL;
+ int i, j;
+
+ pr_debug("dump WQE index %u:\n", idx);
+ for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
+ if ((i & 0xf) == 0) {
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
+ pr_debug("WQBB at %p:\n", (void *)p);
+ j = 0;
+ idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
+ }
+ pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
+ be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
+ be32_to_cpu(p[j + 3]));
+ }
+}
+
+static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned int *idx,
+ int *size, void **cur_edge, int nreq,
+ bool send_signaled, bool solicited)
+{
+ if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
+ return -ENOMEM;
+
+ *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
+ *ctrl = *seg;
+ *(uint32_t *)(*seg + 8) = 0;
+ (*ctrl)->imm = send_ieth(wr);
+ (*ctrl)->fm_ce_se = qp->sq_signal_bits |
+ (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+ (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
+
+ *seg += sizeof(**ctrl);
+ *size = sizeof(**ctrl) / 16;
+ *cur_edge = qp->sq.cur_edge;
+
+ return 0;
+}
+
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned int *idx, int *size,
+ void **cur_edge, int nreq)
+{
+ return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
+ wr->send_flags & IB_SEND_SIGNALED,
+ wr->send_flags & IB_SEND_SOLICITED);
+}
+
+static void finish_wqe(struct mlx5_ib_qp *qp,
+ struct mlx5_wqe_ctrl_seg *ctrl,
+ void *seg, u8 size, void *cur_edge,
+ unsigned int idx, u64 wr_id, int nreq, u8 fence,
+ u32 mlx5_opcode)
+{
+ u8 opmod = 0;
+
+ ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
+ mlx5_opcode | ((u32)opmod << 24));
+ ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
+ ctrl->fm_ce_se |= fence;
+ if (unlikely(qp->flags_en & MLX5_QP_FLAG_SIGNATURE))
+ ctrl->signature = wq_sig(ctrl);
+
+ qp->sq.wrid[idx] = wr_id;
+ qp->sq.w_list[idx].opcode = mlx5_opcode;
+ qp->sq.wqe_head[idx] = qp->sq.head + nreq;
+ qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
+ qp->sq.w_list[idx].next = qp->sq.cur_post;
+
+ /* We save the edge which was possibly updated during the WQE
+ * construction, into SQ's cache.
+ */
+ seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
+ qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
+ get_sq_edge(&qp->sq, qp->sq.cur_post &
+ (qp->sq.wqe_cnt - 1)) :
+ cur_edge;
+}
+
+static void handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size)
+{
+ set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey);
+ *seg += sizeof(struct mlx5_wqe_raddr_seg);
+ *size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
+}
+
+static void handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge, unsigned int idx)
+{
+ qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
+ (*ctrl)->imm = cpu_to_be32(wr->ex.invalidate_rkey);
+ set_linv_wr(qp, seg, size, cur_edge);
+}
+
+static int handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int idx)
+{
+ qp->sq.wr_data[idx] = IB_WR_REG_MR;
+ (*ctrl)->imm = cpu_to_be32(reg_wr(wr)->key);
+ return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true);
+}
+
+static int handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int *idx, int nreq,
+ struct ib_sig_domain *domain, u32 psv_index,
+ u8 next_fence)
+{
+ int err;
+
+ /*
+ * SET_PSV WQEs are not signaled and solicited on error.
+ */
+ err = __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
+ false, true);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ err = set_psv_wr(domain, psv_index, seg, size);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ goto out;
+ }
+ finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq,
+ next_fence, MLX5_OPCODE_SET_PSV);
+
+out:
+ return err;
+}
+
+static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge,
+ unsigned int *idx, int nreq, u8 fence,
+ u8 next_fence)
+{
+ struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *pi_mr;
+ struct mlx5_ib_mr pa_pi_mr;
+ struct ib_sig_attrs *sig_attrs;
+ struct ib_reg_wr reg_pi_wr;
+ int err;
+
+ qp->sq.wr_data[*idx] = IB_WR_REG_MR_INTEGRITY;
+
+ mr = to_mmr(reg_wr(wr)->mr);
+ pi_mr = mr->pi_mr;
+
+ if (pi_mr) {
+ memset(&reg_pi_wr, 0,
+ sizeof(struct ib_reg_wr));
+
+ reg_pi_wr.mr = &pi_mr->ibmr;
+ reg_pi_wr.access = reg_wr(wr)->access;
+ reg_pi_wr.key = pi_mr->ibmr.rkey;
+
+ (*ctrl)->imm = cpu_to_be32(reg_pi_wr.key);
+ /* UMR for data + prot registration */
+ err = set_reg_wr(qp, &reg_pi_wr, seg, size, cur_edge, false);
+ if (unlikely(err))
+ goto out;
+
+ finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
+ nreq, fence, MLX5_OPCODE_UMR);
+
+ err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ } else {
+ memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr));
+ /* No UMR, use local_dma_lkey */
+ pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey;
+ pa_pi_mr.ndescs = mr->ndescs;
+ pa_pi_mr.data_length = mr->data_length;
+ pa_pi_mr.data_iova = mr->data_iova;
+ if (mr->meta_ndescs) {
+ pa_pi_mr.meta_ndescs = mr->meta_ndescs;
+ pa_pi_mr.meta_length = mr->meta_length;
+ pa_pi_mr.pi_iova = mr->pi_iova;
+ }
+
+ pa_pi_mr.ibmr.length = mr->ibmr.length;
+ mr->pi_mr = &pa_pi_mr;
+ }
+ (*ctrl)->imm = cpu_to_be32(mr->ibmr.rkey);
+ /* UMR for sig MR */
+ err = set_pi_umr_wr(wr, qp, seg, size, cur_edge);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ goto out;
+ }
+ finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq,
+ fence, MLX5_OPCODE_UMR);
+
+ sig_attrs = mr->ibmr.sig_attrs;
+ err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
+ &sig_attrs->mem, mr->sig->psv_memory.psv_idx,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+
+ err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
+ &sig_attrs->wire, mr->sig->psv_wire.psv_idx,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+
+ qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+
+out:
+ return err;
+}
+
+static int handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int *idx, int nreq, u8 fence,
+ u8 next_fence, int *num_sge)
+{
+ int err = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ handle_rdma_op(wr, seg, size);
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
+ err = -EOPNOTSUPP;
+ goto out;
+
+ case IB_WR_LOCAL_INV:
+ handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx);
+ *num_sge = 0;
+ break;
+
+ case IB_WR_REG_MR:
+ err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx);
+ if (unlikely(err))
+ goto out;
+ *num_sge = 0;
+ break;
+
+ case IB_WR_REG_MR_INTEGRITY:
+ err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size,
+ cur_edge, idx, nreq, fence,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+ *num_sge = 0;
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ return err;
+}
+
+static void handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size)
+{
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ handle_rdma_op(wr, seg, size);
+ break;
+ default:
+ break;
+ }
+}
+
+static void handle_qpt_hw_gsi(struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr, void **seg,
+ int *size, void **cur_edge)
+{
+ set_datagram_seg(*seg, wr);
+ *seg += sizeof(struct mlx5_wqe_datagram_seg);
+ *size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+}
+
+static void handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ void **seg, int *size, void **cur_edge)
+{
+ set_datagram_seg(*seg, wr);
+ *seg += sizeof(struct mlx5_wqe_datagram_seg);
+ *size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ /* handle qp that supports ud offload */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+ struct mlx5_wqe_eth_pad *pad;
+
+ pad = *seg;
+ memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
+ *seg += sizeof(struct mlx5_wqe_eth_pad);
+ *size += sizeof(struct mlx5_wqe_eth_pad) / 16;
+ set_eth_seg(wr, qp, seg, size, cur_edge);
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ }
+}
+
+static int handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge, unsigned int idx)
+{
+ int err = 0;
+
+ if (unlikely(wr->opcode != MLX5_IB_WR_UMR)) {
+ err = -EINVAL;
+ mlx5_ib_warn(dev, "bad opcode %d\n", wr->opcode);
+ goto out;
+ }
+
+ qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
+ (*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey);
+ err = set_reg_umr_segment(dev, *seg, wr,
+ !!(MLX5_CAP_GEN(dev->mdev, atomic)));
+ if (unlikely(err))
+ goto out;
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ set_reg_mkey_segment(*seg, wr);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+out:
+ return err;
+}
+
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_qp *qp;
+ struct mlx5_wqe_xrc_seg *xrc;
+ struct mlx5_bf *bf;
+ void *cur_edge;
+ int uninitialized_var(size);
+ unsigned long flags;
+ unsigned int idx;
+ int err = 0;
+ int num_sge;
+ void *seg;
+ int nreq;
+ int i;
+ u8 next_fence = 0;
+ u8 fence;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
+
+ qp = to_mqp(ibqp);
+ bf = &qp->bf;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
+ mlx5_ib_warn(dev, "\n");
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ num_sge = wr->num_sge;
+ if (unlikely(num_sge > qp->sq.max_gs)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
+ nreq);
+ if (err) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (wr->opcode == IB_WR_REG_MR ||
+ wr->opcode == IB_WR_REG_MR_INTEGRITY) {
+ fence = dev->umr_fence;
+ next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ } else {
+ if (wr->send_flags & IB_SEND_FENCE) {
+ if (qp->next_fence)
+ fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+ else
+ fence = MLX5_FENCE_MODE_FENCE;
+ } else {
+ fence = qp->next_fence;
+ }
+ }
+
+ switch (ibqp->qp_type) {
+ case IB_QPT_XRC_INI:
+ xrc = seg;
+ seg += sizeof(*xrc);
+ size += sizeof(*xrc) / 16;
+ fallthrough;
+ case IB_QPT_RC:
+ err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size,
+ &cur_edge, &idx, nreq, fence,
+ next_fence, &num_sge);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ goto out;
+ } else if (wr->opcode == IB_WR_REG_MR_INTEGRITY) {
+ goto skip_psv;
+ }
+ break;
+
+ case IB_QPT_UC:
+ handle_qpt_uc(wr, &seg, &size);
+ break;
+ case IB_QPT_SMI:
+ if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) {
+ mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
+ err = -EPERM;
+ *bad_wr = wr;
+ goto out;
+ }
+ fallthrough;
+ case MLX5_IB_QPT_HW_GSI:
+ handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge);
+ break;
+ case IB_QPT_UD:
+ handle_qpt_ud(qp, wr, &seg, &size, &cur_edge);
+ break;
+ case MLX5_IB_QPT_REG_UMR:
+ err = handle_qpt_reg_umr(dev, qp, wr, &ctrl, &seg,
+ &size, &cur_edge, idx);
+ if (unlikely(err))
+ goto out;
+ break;
+
+ default:
+ break;
+ }
+
+ if (wr->send_flags & IB_SEND_INLINE && num_sge) {
+ err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ } else {
+ for (i = 0; i < num_sge; i++) {
+ handle_post_send_edge(&qp->sq, &seg, size,
+ &cur_edge);
+ if (unlikely(!wr->sg_list[i].length))
+ continue;
+
+ set_data_ptr_seg(
+ (struct mlx5_wqe_data_seg *)seg,
+ wr->sg_list + i);
+ size += sizeof(struct mlx5_wqe_data_seg) / 16;
+ seg += sizeof(struct mlx5_wqe_data_seg);
+ }
+ }
+
+ qp->next_fence = next_fence;
+ finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
+ fence, mlx5_ib_opcode[wr->opcode]);
+skip_psv:
+ if (0)
+ dump_wqe(qp, idx, size);
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->sq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+
+ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell.
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
+ /* Make sure doorbells don't leak out of SQ spinlock
+ * and reach the HCA out of order.
+ */
+ bf->offset ^= bf->buf_size;
+ }
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return err;
+}
+
+static void set_sig_seg(struct mlx5_rwqe_sig *sig, int max_gs)
+{
+ sig->signature = calc_sig(sig, (max_gs + 1) << 2);
+}
+
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_data_seg *scat;
+ struct mlx5_rwqe_sig *sig;
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ unsigned long flags;
+ int err = 0;
+ int nreq;
+ int ind;
+ int i;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->rq.lock, flags);
+
+ ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->rq.max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
+ scat++;
+
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_ptr_seg(scat + i, wr->sg_list + i);
+
+ if (i < qp->rq.max_gs) {
+ scat[i].byte_count = 0;
+ scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ scat[i].addr = 0;
+ }
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
+ sig = (struct mlx5_rwqe_sig *)scat;
+ set_sig_seg(sig, qp->rq.max_gs);
+ }
+
+ qp->rq.wrid[ind] = wr->wr_id;
+
+ ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->rq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
+ }
+
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/wr.h b/drivers/infiniband/hw/mlx5/wr.h
new file mode 100644
index 000000000000..4f0057516402
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/wr.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_WR_H
+#define _MLX5_IB_WR_H
+
+#include "mlx5_ib.h"
+
+enum {
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
+};
+
+struct mlx5_wqe_eth_pad {
+ u8 rsvd0[16];
+};
+
+
+/* get_sq_edge - Get the next nearby edge.
+ *
+ * An 'edge' is defined as the first following address after the end
+ * of the fragment or the SQ. Accordingly, during the WQE construction
+ * which repetitively increases the pointer to write the next data, it
+ * simply should check if it gets to an edge.
+ *
+ * @sq - SQ buffer.
+ * @idx - Stride index in the SQ buffer.
+ *
+ * Return:
+ * The new edge.
+ */
+static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
+{
+ void *fragment_end;
+
+ fragment_end = mlx5_frag_buf_get_wqe
+ (&sq->fbc,
+ mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
+
+ return fragment_end + MLX5_SEND_WQE_BB;
+}
+
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain);
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain);
+
+static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
+}
+
+static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
+}
+
+static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
+}
+
+static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
+}
+#endif /* _MLX5_IB_WR_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 599794c5a78f..7550e9d03dec 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -478,16 +478,6 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_mr *mr);
void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr);
-int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
- u32 access, struct mthca_fmr *fmr);
-int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
-int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr);
-int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr);
-
int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt);
void mthca_unmap_eq_icm(struct mthca_dev *dev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 4250b2c18c64..ce0e0867e488 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -541,7 +541,7 @@ int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
return err;
}
-/* Free mr or fmr */
+/* Free mr */
static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
{
mthca_table_put(dev, dev->mr_table.mpt_table,
@@ -564,266 +564,6 @@ void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
mthca_free_mtt(dev, mr->mtt);
}
-int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
- u32 access, struct mthca_fmr *mr)
-{
- struct mthca_mpt_entry *mpt_entry;
- struct mthca_mailbox *mailbox;
- u64 mtt_seg;
- u32 key, idx;
- int list_len = mr->attr.max_pages;
- int err = -ENOMEM;
- int i;
-
- if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
- return -EINVAL;
-
- /* For Arbel, all MTTs must fit in the same page. */
- if (mthca_is_memfree(dev) &&
- mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
- return -EINVAL;
-
- mr->maps = 0;
-
- key = mthca_alloc(&dev->mr_table.mpt_alloc);
- if (key == -1)
- return -ENOMEM;
- key = adjust_key(dev, key);
-
- idx = key & (dev->limits.num_mpts - 1);
- mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
-
- if (mthca_is_memfree(dev)) {
- err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
- if (err)
- goto err_out_mpt_free;
-
- mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key, NULL);
- BUG_ON(!mr->mem.arbel.mpt);
- } else
- mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
- sizeof *(mr->mem.tavor.mpt) * idx;
-
- mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
- if (IS_ERR(mr->mtt)) {
- err = PTR_ERR(mr->mtt);
- goto err_out_table;
- }
-
- mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
-
- if (mthca_is_memfree(dev)) {
- mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
- mr->mtt->first_seg,
- &mr->mem.arbel.dma_handle);
- BUG_ON(!mr->mem.arbel.mtts);
- } else
- mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
-
- mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
- if (IS_ERR(mailbox)) {
- err = PTR_ERR(mailbox);
- goto err_out_free_mtt;
- }
-
- mpt_entry = mailbox->buf;
-
- mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
- MTHCA_MPT_FLAG_MIO |
- MTHCA_MPT_FLAG_REGION |
- access);
-
- mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12);
- mpt_entry->key = cpu_to_be32(key);
- mpt_entry->pd = cpu_to_be32(pd);
- memset(&mpt_entry->start, 0,
- sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start));
- mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg);
-
- if (0) {
- mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
- for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
- if (i % 4 == 0)
- printk("[%02x] ", i * 4);
- printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
- if ((i + 1) % 4 == 0)
- printk("\n");
- }
- }
-
- err = mthca_SW2HW_MPT(dev, mailbox,
- key & (dev->limits.num_mpts - 1));
- if (err) {
- mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
- goto err_out_mailbox_free;
- }
-
- mthca_free_mailbox(dev, mailbox);
- return 0;
-
-err_out_mailbox_free:
- mthca_free_mailbox(dev, mailbox);
-
-err_out_free_mtt:
- mthca_free_mtt(dev, mr->mtt);
-
-err_out_table:
- mthca_table_put(dev, dev->mr_table.mpt_table, key);
-
-err_out_mpt_free:
- mthca_free(&dev->mr_table.mpt_alloc, key);
- return err;
-}
-
-int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (fmr->maps)
- return -EBUSY;
-
- mthca_free_region(dev, fmr->ibmr.lkey);
- mthca_free_mtt(dev, fmr->mtt);
-
- return 0;
-}
-
-static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
- int list_len, u64 iova)
-{
- int i, page_mask;
-
- if (list_len > fmr->attr.max_pages)
- return -EINVAL;
-
- page_mask = (1 << fmr->attr.page_shift) - 1;
-
- /* We are getting page lists, so va must be page aligned. */
- if (iova & page_mask)
- return -EINVAL;
-
- /* Trust the user not to pass misaligned data in page_list */
- if (0)
- for (i = 0; i < list_len; ++i) {
- if (page_list[i] & ~page_mask)
- return -EINVAL;
- }
-
- if (fmr->maps >= fmr->attr.max_maps)
- return -EINVAL;
-
- return 0;
-}
-
-
-int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct mthca_fmr *fmr = to_mfmr(ibfmr);
- struct mthca_dev *dev = to_mdev(ibfmr->device);
- struct mthca_mpt_entry mpt_entry;
- u32 key;
- int i, err;
-
- err = mthca_check_fmr(fmr, page_list, list_len, iova);
- if (err)
- return err;
-
- ++fmr->maps;
-
- key = tavor_key_to_hw_index(fmr->ibmr.lkey);
- key += dev->limits.num_mpts;
- fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
-
- writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
-
- for (i = 0; i < list_len; ++i) {
- __be64 mtt_entry = cpu_to_be64(page_list[i] |
- MTHCA_MTT_FLAG_PRESENT);
- mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
- }
-
- mpt_entry.lkey = cpu_to_be32(key);
- mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
- mpt_entry.start = cpu_to_be64(iova);
-
- __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
- memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
- offsetof(struct mthca_mpt_entry, window_count) -
- offsetof(struct mthca_mpt_entry, start));
-
- writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt);
-
- return 0;
-}
-
-int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct mthca_fmr *fmr = to_mfmr(ibfmr);
- struct mthca_dev *dev = to_mdev(ibfmr->device);
- u32 key;
- int i, err;
-
- err = mthca_check_fmr(fmr, page_list, list_len, iova);
- if (err)
- return err;
-
- ++fmr->maps;
-
- key = arbel_key_to_hw_index(fmr->ibmr.lkey);
- if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
- key += SINAI_FMR_KEY_INC;
- else
- key += dev->limits.num_mpts;
- fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
-
- wmb();
-
- dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
- list_len * sizeof(u64), DMA_TO_DEVICE);
-
- for (i = 0; i < list_len; ++i)
- fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
- MTHCA_MTT_FLAG_PRESENT);
-
- dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
- list_len * sizeof(u64), DMA_TO_DEVICE);
-
- fmr->mem.arbel.mpt->key = cpu_to_be32(key);
- fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
- fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
- fmr->mem.arbel.mpt->start = cpu_to_be64(iova);
-
- wmb();
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW;
-
- wmb();
-
- return 0;
-}
-
-void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (!fmr->maps)
- return;
-
- fmr->maps = 0;
-
- writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
-}
-
-void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
-{
- if (!fmr->maps)
- return;
-
- fmr->maps = 0;
-
- *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
-}
-
int mthca_init_mr_table(struct mthca_dev *dev)
{
phys_addr_t addr;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 69a3e4f62fb1..9fa2f9164a47 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -118,16 +118,6 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
- /*
- * If Sinai memory key optimization is being used, then only
- * the 8-bit key portion will change. For other HCAs, the
- * unused index bits will also be used for FMR remapping.
- */
- if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
- props->max_map_per_fmr = 255;
- else
- props->max_map_per_fmr =
- (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
err = 0;
out:
@@ -388,14 +378,15 @@ static void mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
}
-static int mthca_ah_create(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+static int mthca_ah_create(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct mthca_ah *ah = to_mah(ibah);
- return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd), ah_attr,
- ah);
+ return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd),
+ init_attr->ah_attr, ah);
}
static void mthca_ah_destroy(struct ib_ah *ah, u32 flags)
@@ -957,69 +948,6 @@ static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata)
return 0;
}
-static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct mthca_fmr *fmr;
- int err;
-
- fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
- if (!fmr)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
- err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
- convert_access(mr_access_flags), fmr);
-
- if (err) {
- kfree(fmr);
- return ERR_PTR(err);
- }
-
- return &fmr->ibmr;
-}
-
-static int mthca_dealloc_fmr(struct ib_fmr *fmr)
-{
- struct mthca_fmr *mfmr = to_mfmr(fmr);
- int err;
-
- err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
- if (err)
- return err;
-
- kfree(mfmr);
- return 0;
-}
-
-static int mthca_unmap_fmr(struct list_head *fmr_list)
-{
- struct ib_fmr *fmr;
- int err;
- struct mthca_dev *mdev = NULL;
-
- list_for_each_entry(fmr, fmr_list, list) {
- if (mdev && to_mdev(fmr->device) != mdev)
- return -EINVAL;
- mdev = to_mdev(fmr->device);
- }
-
- if (!mdev)
- return 0;
-
- if (mthca_is_memfree(mdev)) {
- list_for_each_entry(fmr, fmr_list, list)
- mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
-
- wmb();
- } else
- list_for_each_entry(fmr, fmr_list, list)
- mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
-
- err = mthca_SYNC_TPT(mdev);
- return err;
-}
-
static ssize_t hw_rev_show(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -1203,20 +1131,6 @@ static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
};
-static const struct ib_device_ops mthca_dev_arbel_fmr_ops = {
- .alloc_fmr = mthca_alloc_fmr,
- .dealloc_fmr = mthca_dealloc_fmr,
- .map_phys_fmr = mthca_arbel_map_phys_fmr,
- .unmap_fmr = mthca_unmap_fmr,
-};
-
-static const struct ib_device_ops mthca_dev_tavor_fmr_ops = {
- .alloc_fmr = mthca_alloc_fmr,
- .dealloc_fmr = mthca_dealloc_fmr,
- .map_phys_fmr = mthca_tavor_map_phys_fmr,
- .unmap_fmr = mthca_unmap_fmr,
-};
-
static const struct ib_device_ops mthca_dev_arbel_ops = {
.post_recv = mthca_arbel_post_receive,
.post_send = mthca_arbel_post_send,
@@ -1275,15 +1189,6 @@ int mthca_register_device(struct mthca_dev *dev)
&mthca_dev_tavor_srq_ops);
}
- if (dev->mthca_flags & MTHCA_FLAG_FMR) {
- if (mthca_is_memfree(dev))
- ib_set_device_ops(&dev->ib_dev,
- &mthca_dev_arbel_fmr_ops);
- else
- ib_set_device_ops(&dev->ib_dev,
- &mthca_dev_tavor_fmr_ops);
- }
-
ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops);
if (mthca_is_memfree(dev))
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 596acc45569b..84c64bff0d92 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -76,24 +76,6 @@ struct mthca_mr {
struct mthca_mtt *mtt;
};
-struct mthca_fmr {
- struct ib_fmr ibmr;
- struct ib_fmr_attr attr;
- struct mthca_mtt *mtt;
- int maps;
- union {
- struct {
- struct mthca_mpt_entry __iomem *mpt;
- u64 __iomem *mtts;
- } tavor;
- struct {
- struct mthca_mpt_entry *mpt;
- __be64 *mtts;
- dma_addr_t dma_handle;
- } arbel;
- } mem;
-};
-
struct mthca_pd {
struct ib_pd ibpd;
u32 pd_num;
@@ -301,11 +283,6 @@ static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext
return container_of(ibucontext, struct mthca_ucontext, ibucontext);
}
-static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
-{
- return container_of(ibmr, struct mthca_fmr, ibmr);
-}
-
static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct mthca_mr, ibmr);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 7baedc74e39d..fcfe0e82197a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -98,7 +98,6 @@ struct ocrdma_dev_attr {
u64 max_mr_size;
u32 max_num_mr_pbl;
int max_mw;
- int max_fmr;
int max_map_per_fmr;
int max_pages_per_frmr;
u16 max_ord_per_qp;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 2b7f00ac41b0..6eea02b18968 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -155,7 +155,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
return status;
}
-int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
u32 *ahid_addr;
@@ -165,6 +165,7 @@ int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
u16 vlan_tag = 0xffff;
const struct ib_gid_attr *sgid_attr;
struct ocrdma_pd *pd = get_ocrdma_pd(ibah->pd);
+ struct rdma_ah_attr *attr = init_attr->ah_attr;
struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
if ((attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 9780afcde780..8b73b3489f3a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -51,7 +51,7 @@ enum {
OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */
};
-int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index d82d3ec3649e..e07bf0b2209a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1190,7 +1190,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_mr = rsp->max_mr;
attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
rsp->max_mr_size_lo;
- attr->max_fmr = 0;
attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
attr->max_cqe = rsp->max_cq_cqes_per_cq &
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 10e343894595..d11c74390a12 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -99,8 +99,6 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
attr->max_mw = dev->attr.max_mw;
attr->max_pd = dev->attr.max_pd;
attr->atomic_cap = 0;
- attr->max_fmr = 0;
- attr->max_map_per_fmr = 0;
attr->max_qp_rd_atom =
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index dcdc85a1ab25..ccaedfd53e49 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -632,7 +632,6 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
attr->max_mr_size = qed_attr->max_mr_size;
attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
attr->max_mw = qed_attr->max_mw;
- attr->max_fmr = qed_attr->max_fmr;
attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
attr->max_pd = qed_attr->max_pd;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 5488dbd59d3c..fdf90ecb2699 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -103,7 +103,6 @@ struct qedr_device_attr {
u64 max_mr_size;
u32 max_cqe;
u32 max_mw;
- u32 max_fmr;
u32 max_mr_mw_fmr_pbl;
u64 max_mr_mw_fmr_size;
u32 max_pd;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index a5bd3adaf90a..9b9e80266367 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -145,8 +145,6 @@ int qedr_query_device(struct ib_device *ibdev,
attr->max_mw = qattr->max_mw;
attr->max_pd = qattr->max_pd;
attr->atomic_cap = dev->atomic_cap;
- attr->max_fmr = qattr->max_fmr;
- attr->max_map_per_fmr = 16;
attr->max_qp_init_rd_atom =
1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
attr->max_qp_rd_atom =
@@ -2750,12 +2748,12 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
return 0;
}
-int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata)
{
struct qedr_ah *ah = get_qedr_ah(ibah);
- rdma_copy_ah_attr(&ah->attr, attr);
+ rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
return 0;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 18027844eb87..5e02387e068d 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -70,7 +70,7 @@ int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_recv_wr);
-int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void qedr_destroy_ah(struct ib_ah *ibah, u32 flags);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index b0144229cf3b..ff87a67dd7b7 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -40,10 +40,10 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/jiffies.h>
-#include <asm/pgtable.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/uio.h>
+#include <linux/pgtable.h>
#include <rdma/ib.h>
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 91d64dd71a8a..8bcbc884e5b6 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2375,7 +2375,6 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
struct qib_devdata *dd = ppd->dd;
u64 val, guid, ibc;
unsigned long flags;
- int ret = 0;
/*
* SerDes model not in Pd, but still need to
@@ -2510,7 +2509,7 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
val | ERR_MASK_N(IBStatusChanged));
/* Always zero until we start messing with SerDes for real */
- return ret;
+ return 0;
}
/**
@@ -6875,7 +6874,7 @@ static int init_sdma_7322_regs(struct qib_pportdata *ppd)
struct qib_devdata *dd = ppd->dd;
unsigned lastbuf, erstbuf;
u64 senddmabufmask[3] = { 0 };
- int n, ret = 0;
+ int n;
qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
qib_sdma_7322_setlengen(ppd);
@@ -6904,7 +6903,7 @@ static int init_sdma_7322_regs(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
- return ret;
+ return 0;
}
/* sdma_lock must be held */
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 342e3172ca40..4c24e83f3175 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -106,18 +106,18 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
goto bail;
}
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
for (got = 0; got < num_pages; got += ret) {
ret = pin_user_pages(start_page + got * PAGE_SIZE,
num_pages - got,
FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
goto bail_release;
}
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return 0;
bail_release:
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 7508abb6a0fa..7acf9ba5358a 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1460,7 +1460,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_cq = ib_qib_max_cqs;
rdi->dparms.props.max_cqe = ib_qib_max_cqes;
rdi->dparms.props.max_ah = ib_qib_max_ahs;
- rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
rdi->dparms.props.max_qp_init_rd_atom = 255;
rdi->dparms.props.max_srq = ib_qib_max_srqs;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 71f82339446c..b8a77ce11590 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -322,7 +322,6 @@ int usnic_ib_query_device(struct ib_device *ibdev,
props->max_mcast_grp = 0;
props->max_mcast_qp_attach = 0;
props->max_total_mcast_qp_attach = 0;
- props->max_map_per_fmr = 0;
/* Owned by Userspace
* max_qp_wr, max_sge, max_sge_rd, max_cqe */
mutex_unlock(&us_ibdev->usdev_lock);
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index bd9f944b68fc..760b254ba42d 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -123,7 +123,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
uiomr->owning_mm = mm = current->mm;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
locked = atomic64_add_return(npages, &current->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -187,7 +187,7 @@ out:
} else
mmgrab(uiomr->owning_mm);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
free_page((unsigned long) page_list);
return ret;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index faf7ecd7b3fa..ccbded2d26ce 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -509,9 +509,10 @@ void pvrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
*
* @return: 0 on success, otherwise errno.
*/
-int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata)
+int pvrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
struct pvrdma_dev *dev = to_vdev(ibah->device);
struct pvrdma_ah *ah = to_vah(ibah);
const struct ib_global_route *grh;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index e4a48f5c0c85..267702226f10 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -414,7 +414,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
-int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
+int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index ee02c6176007..40480add7dd3 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -98,14 +98,14 @@ EXPORT_SYMBOL(rvt_check_ah);
*
* Return: 0 on success
*/
-int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
- u32 create_flags, struct ib_udata *udata)
+int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
struct rvt_ah *ah = ibah_to_rvtah(ibah);
struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
unsigned long flags;
- if (rvt_check_ah(ibah->device, ah_attr))
+ if (rvt_check_ah(ibah->device, init_attr->ah_attr))
return -EINVAL;
spin_lock_irqsave(&dev->n_ahs_lock, flags);
@@ -117,10 +117,11 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
dev->n_ahs_allocated++;
spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- rdma_copy_ah_attr(&ah->attr, ah_attr);
+ rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
if (dev->driver_f.notify_new_ah)
- dev->driver_f.notify_new_ah(ibah->device, ah_attr, ah);
+ dev->driver_f.notify_new_ah(ibah->device,
+ init_attr->ah_attr, ah);
return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
index bbb4d3bdec4e..40b7123fec76 100644
--- a/drivers/infiniband/sw/rdmavt/ah.h
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -50,8 +50,8 @@
#include <rdma/rdma_vt.h>
-int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
- u32 create_flags, struct ib_udata *udata);
+int rvt_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
void rvt_destroy_ah(struct ib_ah *ibah, u32 destroy_flags);
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index 37853aa3bcf7..f5d0e33cf3d7 100644
--- a/drivers/infiniband/sw/rdmavt/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -48,7 +48,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
-#include <asm/pgtable.h>
#include <rdma/uverbs_ioctl.h>
#include "mmap.h"
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 72f6534fbb52..60864e5ca7cb 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -97,7 +97,6 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
rdi->dparms.props.max_mr = rdi->lkey_table.max;
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
return 0;
}
@@ -714,160 +713,6 @@ bail:
EXPORT_SYMBOL(rvt_invalidate_rkey);
/**
- * rvt_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Return: the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct rvt_fmr *fmr;
- int m;
- struct ib_fmr *ret;
- int rval = -ENOMEM;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
- fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL);
- if (!fmr)
- goto bail;
-
- rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages,
- PERCPU_REF_INIT_ATOMIC);
- if (rval)
- goto bail;
-
- /*
- * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
- * rkey.
- */
- rval = rvt_alloc_lkey(&fmr->mr, 0);
- if (rval)
- goto bail_mregion;
- fmr->ibfmr.rkey = fmr->mr.lkey;
- fmr->ibfmr.lkey = fmr->mr.lkey;
- /*
- * Resources are allocated but no valid mapping (RKEY can't be
- * used).
- */
- fmr->mr.access_flags = mr_access_flags;
- fmr->mr.max_segs = fmr_attr->max_pages;
- fmr->mr.page_shift = fmr_attr->page_shift;
-
- ret = &fmr->ibfmr;
-done:
- return ret;
-
-bail_mregion:
- rvt_deinit_mregion(&fmr->mr);
-bail:
- kfree(fmr);
- ret = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * rvt_map_phys_fmr - set up a fast memory region
- * @ibfmr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- *
- * Return: 0 on success
- */
-
-int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct rvt_fmr *fmr = to_ifmr(ibfmr);
- struct rvt_lkey_table *rkt;
- unsigned long flags;
- int m, n;
- unsigned long i;
- u32 ps;
- struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
-
- i = atomic_long_read(&fmr->mr.refcount.count);
- if (i > 2)
- return -EBUSY;
-
- if (list_len > fmr->mr.max_segs)
- return -EINVAL;
-
- rkt = &rdi->lkey_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = iova;
- fmr->mr.iova = iova;
- ps = 1 << fmr->mr.page_shift;
- fmr->mr.length = list_len * ps;
- m = 0;
- n = 0;
- for (i = 0; i < list_len; i++) {
- fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
- fmr->mr.map[m]->segs[n].length = ps;
- trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps);
- if (++n == RVT_SEGSZ) {
- m++;
- n = 0;
- }
- }
- spin_unlock_irqrestore(&rkt->lock, flags);
- return 0;
-}
-
-/**
- * rvt_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Return: 0 on success.
- */
-int rvt_unmap_fmr(struct list_head *fmr_list)
-{
- struct rvt_fmr *fmr;
- struct rvt_lkey_table *rkt;
- unsigned long flags;
- struct rvt_dev_info *rdi;
-
- list_for_each_entry(fmr, fmr_list, ibfmr.list) {
- rdi = ib_to_rvt(fmr->ibfmr.device);
- rkt = &rdi->lkey_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = 0;
- fmr->mr.iova = 0;
- fmr->mr.length = 0;
- spin_unlock_irqrestore(&rkt->lock, flags);
- }
- return 0;
-}
-
-/**
- * rvt_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Return: 0 on success.
- */
-int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
-{
- struct rvt_fmr *fmr = to_ifmr(ibfmr);
- int ret = 0;
-
- rvt_free_lkey(&fmr->mr);
- rvt_put_mr(&fmr->mr); /* will set completion if last */
- ret = rvt_check_refs(&fmr->mr, __func__);
- if (ret)
- goto out;
- rvt_deinit_mregion(&fmr->mr);
- kfree(fmr);
-out:
- return ret;
-}
-
-/**
* rvt_sge_adjacent - is isge compressible
* @last_sge: last outgoing SGE written
* @sge: SGE to check
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
index 2c8d0752e8e3..780fc63af98b 100644
--- a/drivers/infiniband/sw/rdmavt/mr.h
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -49,10 +49,6 @@
*/
#include <rdma/rdma_vt.h>
-struct rvt_fmr {
- struct ib_fmr ibfmr;
- struct rvt_mregion mr; /* must be last */
-};
struct rvt_mr {
struct ib_mr ibmr;
@@ -60,11 +56,6 @@ struct rvt_mr {
struct rvt_mregion mr; /* must be last */
};
-static inline struct rvt_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct rvt_fmr, ibfmr);
-}
-
static inline struct rvt_mr *to_imr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct rvt_mr, ibmr);
@@ -83,11 +74,5 @@ struct ib_mr *rvt_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
-struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-int rvt_unmap_fmr(struct list_head *fmr_list);
-int rvt_dealloc_fmr(struct ib_fmr *ibfmr);
#endif /* DEF_RVTMR_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 500a7ee04c44..511b72809e14 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 - 2019 Intel Corporation.
+ * Copyright(c) 2016 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -525,15 +525,18 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
* @rdi: rvt device info structure
* @qpt: queue pair number table pointer
* @port_num: IB port number, 1 based, comes from core
+ * @exclude_prefix: prefix of special queue pair number being allocated
*
* Return: The queue pair number
*/
static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num)
+ enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
u32 ret;
+ u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
+ RVT_AIP_QPN_MAX : RVT_QPN_MAX;
if (rdi->driver_f.alloc_qpn)
return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
@@ -553,7 +556,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
}
qpn = qpt->last + qpt->incr;
- if (qpn >= RVT_QPN_MAX)
+ if (qpn >= max_qpn)
qpn = qpt->incr | ((qpt->last & 1) ^ 1);
/* offset carries bit 0 */
offset = qpn & RVT_BITS_PER_PAGE_MASK;
@@ -987,6 +990,9 @@ static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
{
struct rvt_qpn_map *map;
+ if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
+ qpn &= RVT_AIP_QP_SUFFIX;
+
map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
if (map->page)
clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
@@ -1074,13 +1080,15 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
void *priv = NULL;
size_t sqsize;
+ u8 exclude_prefix = 0;
if (!rdi)
return ERR_PTR(-EINVAL);
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
- init_attr->create_flags)
+ (init_attr->create_flags &&
+ init_attr->create_flags != IB_QP_CREATE_NETDEV_USE))
return ERR_PTR(-EINVAL);
/* Check receive queue parameters if no SRQ is specified. */
@@ -1199,14 +1207,20 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
goto bail_driver_priv;
}
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ exclude_prefix = RVT_AIP_QP_PREFIX;
+
err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
init_attr->qp_type,
- init_attr->port_num);
+ init_attr->port_num,
+ exclude_prefix);
if (err < 0) {
ret = ERR_PTR(err);
goto bail_rq_wq;
}
qp->ibqp.qp_num = err;
+ if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
+ qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
qp->port_num = init_attr->port_num;
rvt_init_qp(rdi, qp, init_attr->qp_type);
if (rdi->driver_f.qp_priv_init) {
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 72b031ab7092..f904bb34477a 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -378,7 +378,6 @@ enum {
static const struct ib_device_ops rvt_dev_ops = {
.uverbs_abi_ver = RVT_UVERBS_ABI_VERSION,
- .alloc_fmr = rvt_alloc_fmr,
.alloc_mr = rvt_alloc_mr,
.alloc_pd = rvt_alloc_pd,
.alloc_ucontext = rvt_alloc_ucontext,
@@ -387,7 +386,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.create_cq = rvt_create_cq,
.create_qp = rvt_create_qp,
.create_srq = rvt_create_srq,
- .dealloc_fmr = rvt_dealloc_fmr,
.dealloc_pd = rvt_dealloc_pd,
.dealloc_ucontext = rvt_dealloc_ucontext,
.dereg_mr = rvt_dereg_mr,
@@ -399,7 +397,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.get_dma_mr = rvt_get_dma_mr,
.get_port_immutable = rvt_get_port_immutable,
.map_mr_sg = rvt_map_mr_sg,
- .map_phys_fmr = rvt_map_phys_fmr,
.mmap = rvt_mmap,
.modify_ah = rvt_modify_ah,
.modify_device = rvt_modify_device,
@@ -420,7 +417,6 @@ static const struct ib_device_ops rvt_dev_ops = {
.reg_user_mr = rvt_reg_user_mr,
.req_notify_cq = rvt_req_notify_cq,
.resize_cq = rvt_resize_cq,
- .unmap_fmr = rvt_unmap_fmr,
INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, rvt_cq, ibcq),
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 4afdd2e20883..5642eefb4ba1 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -77,6 +77,7 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
{
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
+ rxe->attr.vendor_id = RXE_VENDOR_ID;
rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
rxe->attr.max_qp = RXE_MAX_QP;
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index 6a413d73b95d..7887f623f62c 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -35,7 +35,6 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/errno.h>
-#include <asm/pgtable.h>
#include <rdma/uverbs_ioctl.h>
#include "rxe.h"
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index f59616b02477..99e9d8ba9767 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -127,6 +127,9 @@ enum rxe_device_param {
/* Delay before calling arbiter timer */
RXE_NSEC_ARB_TIMER_DELAY = 200,
+
+ /* IBTA v1.4 A3.3.1 VENDOR INFORMATION section */
+ RXE_VENDOR_ID = 0XFFFFFF,
};
/* default/initial rxe port parameters */
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 9dd4bd7aea92..b8a22af724e8 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -195,15 +195,16 @@ static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
rxe_drop_ref(pd);
}
-static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr,
- u32 flags, struct ib_udata *udata)
+static int rxe_create_ah(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
{
int err;
struct rxe_dev *rxe = to_rdev(ibah->device);
struct rxe_ah *ah = to_rah(ibah);
- err = rxe_av_chk_attr(rxe, attr);
+ err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
if (err)
return err;
@@ -211,7 +212,7 @@ static int rxe_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr,
if (err)
return err;
- rxe_init_av(attr, &ah->av);
+ rxe_init_av(init_attr->ah_attr, &ah->av);
return 0;
}
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index af5e9f8c0fcd..e9753831ac3f 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -30,7 +30,6 @@
#define SIW_MAX_MR (SIW_MAX_QP * 10)
#define SIW_MAX_PD SIW_MAX_QP
#define SIW_MAX_MW 0 /* to be set if MW's are supported */
-#define SIW_MAX_FMR SIW_MAX_MR
#define SIW_MAX_SRQ SIW_MAX_QP
#define SIW_MAX_SRQ_WR (SIW_MAX_QP_WR * 10)
#define SIW_MAX_CONTEXT SIW_MAX_PD
@@ -59,7 +58,6 @@ struct siw_dev_cap {
int max_mr;
int max_pd;
int max_mw;
- int max_fmr;
int max_srq;
int max_srq_wr;
int max_srq_sge;
@@ -139,7 +137,7 @@ struct siw_pble {
struct siw_pbl {
unsigned int num_buf;
unsigned int max_buf;
- struct siw_pble pbe[1];
+ struct siw_pble pbe[];
};
/*
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 5cd40fb9e20c..a0b8cc643c5c 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -413,7 +413,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sdev->attrs.max_mr = SIW_MAX_MR;
sdev->attrs.max_pd = SIW_MAX_PD;
sdev->attrs.max_mw = SIW_MAX_MW;
- sdev->attrs.max_fmr = SIW_MAX_FMR;
sdev->attrs.max_srq = SIW_MAX_SRQ;
sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR;
sdev->attrs.max_srq_sge = SIW_MAX_SGE;
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index e2061dc0b043..34a910cf0edb 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -349,14 +349,11 @@ dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
struct siw_pbl *siw_pbl_alloc(u32 num_buf)
{
struct siw_pbl *pbl;
- int buf_size = sizeof(*pbl);
if (num_buf == 0)
return ERR_PTR(-EINVAL);
- buf_size += ((num_buf - 1) * sizeof(struct siw_pble));
-
- pbl = kzalloc(buf_size, GFP_KERNEL);
+ pbl = kzalloc(struct_size(pbl, pbe, num_buf), GFP_KERNEL);
if (!pbl)
return ERR_PTR(-ENOMEM);
@@ -397,7 +394,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
if (!writable)
foll_flags |= FOLL_FORCE;
- down_read(&mm_s->mmap_sem);
+ mmap_read_lock(mm_s);
mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -441,7 +438,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
num_pages -= got;
}
out_sem_up:
- up_read(&mm_s->mmap_sem);
+ mmap_read_unlock(mm_s);
if (rv > 0)
return umem;
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index aeb842bc7a1e..987e2ba05dbc 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -136,7 +136,6 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
attr->max_cq = sdev->attrs.max_cq;
attr->max_cqe = sdev->attrs.max_cqe;
attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
- attr->max_fmr = sdev->attrs.max_fmr;
attr->max_mr = sdev->attrs.max_mr;
attr->max_mw = sdev->attrs.max_mw;
attr->max_mr_size = ~0ull;
diff --git a/drivers/infiniband/ulp/Makefile b/drivers/infiniband/ulp/Makefile
index 437813c7b481..4d0004b58377 100644
--- a/drivers/infiniband/ulp/Makefile
+++ b/drivers/infiniband/ulp/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_INFINIBAND_SRPT) += srpt/
obj-$(CONFIG_INFINIBAND_ISER) += iser/
obj-$(CONFIG_INFINIBAND_ISERT) += isert/
obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic/
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs/
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ceec24d45185..3cfb682b91b0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -86,7 +86,7 @@ struct workqueue_struct *ipoib_workqueue;
struct ib_sa_client ipoib_sa_client;
-static void ipoib_add_one(struct ib_device *device);
+static int ipoib_add_one(struct ib_device *device);
static void ipoib_remove_one(struct ib_device *device, void *client_data);
static void ipoib_neigh_reclaim(struct rcu_head *rp);
static struct net_device *ipoib_get_net_dev_by_params(
@@ -479,9 +479,6 @@ static struct net_device *ipoib_get_net_dev_by_params(
if (ret)
return NULL;
- if (!dev_list)
- return NULL;
-
/* See if we can find a unique device matching the L2 parameters */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, NULL, &net_dev);
@@ -529,6 +526,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
"will cause multicast packet drops\n");
netdev_update_features(dev);
dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
+ netif_set_real_num_tx_queues(dev, 1);
rtnl_unlock();
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
@@ -540,6 +538,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
netdev_update_features(dev);
dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+ netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
rtnl_unlock();
ipoib_flush_paths(dev);
return (!rtnl_trylock()) ? -EBUSY : 0;
@@ -1860,7 +1859,7 @@ static int ipoib_parent_init(struct net_device *ndev)
priv->port);
return result;
}
- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+ priv->max_ib_mtu = rdma_mtu_from_attr(priv->ca, priv->port, &attr);
result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
if (result) {
@@ -1901,6 +1900,7 @@ static int ipoib_ndo_init(struct net_device *ndev)
{
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
int rc;
+ struct rdma_netdev *rn = netdev_priv(ndev);
if (priv->parent) {
ipoib_child_init(ndev);
@@ -1913,6 +1913,7 @@ static int ipoib_ndo_init(struct net_device *ndev)
/* MTU will be reset when mcast join happens */
ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
+ rn->mtu = priv->mcast_mtu;
ndev->max_mtu = IPOIB_CM_MTU;
ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
@@ -2074,9 +2075,17 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_do_ioctl = ipoib_ioctl,
};
+static const struct net_device_ops ipoib_netdev_default_pf = {
+ .ndo_init = ipoib_dev_init_default,
+ .ndo_uninit = ipoib_dev_uninit_default,
+ .ndo_open = ipoib_ib_dev_open_default,
+ .ndo_stop = ipoib_ib_dev_stop_default,
+};
+
void ipoib_setup_common(struct net_device *dev)
{
dev->header_ops = &ipoib_header_ops;
+ dev->netdev_ops = &ipoib_netdev_default_pf;
ipoib_set_ethtool_ops(dev);
@@ -2126,13 +2135,6 @@ static void ipoib_build_priv(struct net_device *dev)
INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
}
-static const struct net_device_ops ipoib_netdev_default_pf = {
- .ndo_init = ipoib_dev_init_default,
- .ndo_uninit = ipoib_dev_uninit_default,
- .ndo_open = ipoib_ib_dev_open_default,
- .ndo_stop = ipoib_ib_dev_stop_default,
-};
-
static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u8 port,
const char *name)
{
@@ -2170,7 +2172,6 @@ int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
if (rc != -EOPNOTSUPP)
goto out;
- dev->netdev_ops = &ipoib_netdev_default_pf;
rn->send = ipoib_send;
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
@@ -2516,7 +2517,7 @@ sysfs_failed:
return ERR_PTR(-ENOMEM);
}
-static void ipoib_add_one(struct ib_device *device)
+static int ipoib_add_one(struct ib_device *device)
{
struct list_head *dev_list;
struct net_device *dev;
@@ -2526,7 +2527,7 @@ static void ipoib_add_one(struct ib_device *device)
dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
if (!dev_list)
- return;
+ return -ENOMEM;
INIT_LIST_HEAD(dev_list);
@@ -2543,10 +2544,11 @@ static void ipoib_add_one(struct ib_device *device)
if (!count) {
kfree(dev_list);
- return;
+ return -EOPNOTSUPP;
}
ib_set_client_data(device, &ipoib_client, dev_list);
+ return 0;
}
static void ipoib_remove_one(struct ib_device *device, void *client_data)
@@ -2554,9 +2556,6 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
struct list_head *dev_list = client_data;
- if (!dev_list)
- return;
-
list_for_each_entry_safe(priv, tmp, dev_list, list) {
LIST_HEAD(head);
ipoib_parent_unregister_pre(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b9e9562f5034..9bfa514473d5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -135,12 +135,11 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
kfree(mcast);
}
-static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
- int can_sleep)
+static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev)
{
struct ipoib_mcast *mcast;
- mcast = kzalloc(sizeof(*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC);
+ mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
if (!mcast)
return NULL;
@@ -218,6 +217,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
struct rdma_ah_attr av;
int ret;
int set_qkey = 0;
+ int mtu;
mcast->mcmember = *mcmember;
@@ -240,13 +240,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
priv->broadcast->mcmember.flow_label = mcmember->flow_label;
priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
/* assume if the admin and the mcast are the same both can be changed */
+ mtu = rdma_mtu_enum_to_int(priv->ca, priv->port,
+ priv->broadcast->mcmember.mtu);
if (priv->mcast_mtu == priv->admin_mtu)
- priv->admin_mtu =
- priv->mcast_mtu =
- IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
- else
- priv->mcast_mtu =
- IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ priv->admin_mtu = IPOIB_UD_MTU(mtu);
+ priv->mcast_mtu = IPOIB_UD_MTU(mtu);
+ rn->mtu = priv->mcast_mtu;
priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
spin_unlock_irq(&priv->lock);
@@ -599,7 +598,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
if (!priv->broadcast) {
struct ipoib_mcast *broadcast;
- broadcast = ipoib_mcast_alloc(dev, 0);
+ broadcast = ipoib_mcast_alloc(dev);
if (!broadcast) {
ipoib_warn(priv, "failed to allocate broadcast group\n");
/*
@@ -782,7 +781,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
mgid);
- mcast = ipoib_mcast_alloc(dev, 0);
+ mcast = ipoib_mcast_alloc(dev);
if (!mcast) {
ipoib_warn(priv, "unable to allocate memory "
"for multicast structure\n");
@@ -936,7 +935,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
mgid.raw);
- nmcast = ipoib_mcast_alloc(dev, 0);
+ nmcast = ipoib_mcast_alloc(dev);
if (!nmcast) {
ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
continue;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b69304d28f06..587252fd6f57 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -206,6 +206,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
+ if (priv->hca_caps & IB_DEVICE_RDMA_NETDEV_OPA)
+ init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE;
+
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
pr_warn("%s: failed to create QP\n", ca->name);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 8ac8e18fbe0c..30865605e098 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -97,6 +97,7 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
{
struct net_device *ndev = priv->dev;
int result;
+ struct rdma_netdev *rn = netdev_priv(ndev);
ASSERT_RTNL();
@@ -117,6 +118,8 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
goto out_early;
}
+ rn->mtu = priv->mcast_mtu;
+
priv->parent = ppriv->dev;
priv->pkey = pkey;
priv->child_type = type;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 029c00163442..1d77c7f42e38 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -65,7 +65,6 @@
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
-#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
#define DRV_NAME "iser"
@@ -313,33 +312,6 @@ struct iser_comp {
};
/**
- * struct iser_reg_ops - Memory registration operations
- * per-device registration schemes
- *
- * @alloc_reg_res: Allocate registration resources
- * @free_reg_res: Free registration resources
- * @reg_mem: Register memory buffers
- * @unreg_mem: Un-register memory buffers
- * @reg_desc_get: Get a registration descriptor for pool
- * @reg_desc_put: Get a registration descriptor to pool
- */
-struct iser_reg_ops {
- int (*alloc_reg_res)(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size);
- void (*free_reg_res)(struct ib_conn *ib_conn);
- int (*reg_mem)(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *reg);
- void (*unreg_mem)(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
- struct iser_fr_desc * (*reg_desc_get)(struct ib_conn *ib_conn);
- void (*reg_desc_put)(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
-};
-
-/**
* struct iser_device - iSER device handle
*
* @ib_device: RDMA device
@@ -351,8 +323,6 @@ struct iser_reg_ops {
* @comps_used: Number of completion contexts used, Min between online
* cpus and device max completion vectors
* @comps: Dinamically allocated array of completion handlers
- * @reg_ops: Registration ops
- * @remote_inv_sup: Remote invalidate is supported on this device
*/
struct iser_device {
struct ib_device *ib_device;
@@ -362,26 +332,18 @@ struct iser_device {
int refcount;
int comps_used;
struct iser_comp *comps;
- const struct iser_reg_ops *reg_ops;
- bool remote_inv_sup;
};
/**
* struct iser_reg_resources - Fast registration resources
*
* @mr: memory region
- * @fmr_pool: pool of fmrs
* @sig_mr: signature memory region
- * @page_vec: fast reg page list used by fmr pool
* @mr_valid: is mr valid indicator
*/
struct iser_reg_resources {
- union {
- struct ib_mr *mr;
- struct ib_fmr_pool *fmr_pool;
- };
+ struct ib_mr *mr;
struct ib_mr *sig_mr;
- struct iser_page_vec *page_vec;
u8 mr_valid:1;
};
@@ -403,7 +365,7 @@ struct iser_fr_desc {
* struct iser_fr_pool - connection fast registration pool
*
* @list: list of fastreg descriptors
- * @lock: protects fmr/fastreg pool
+ * @lock: protects fastreg pool
* @size: size of the pool
*/
struct iser_fr_pool {
@@ -518,12 +480,6 @@ struct iscsi_iser_task {
struct iser_data_buf prot[ISER_DIRS_NUM];
};
-struct iser_page_vec {
- u64 *pages;
- int npages;
- struct ib_mr fake_mr;
-};
-
/**
* struct iser_global - iSER global context
*
@@ -548,8 +504,6 @@ extern int iser_pi_guard;
extern unsigned int iser_max_sectors;
extern bool iser_always_reg;
-int iser_assign_reg_ops(struct iser_device *device);
-
int iser_send_control(struct iscsi_conn *conn,
struct iscsi_task *task);
@@ -591,22 +545,17 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
enum iser_data_dir cmd_dir);
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir,
- bool all_imm);
-void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir);
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm);
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir);
int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *src_addr,
struct sockaddr *dst_addr,
int non_blocking);
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
-void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir);
-
int iser_post_recvl(struct iser_conn *iser_conn);
int iser_post_recvm(struct iser_conn *iser_conn, int count);
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
@@ -625,26 +574,12 @@ int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session);
-int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size);
-void iser_free_fmr_pool(struct ib_conn *ib_conn);
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
unsigned int size);
void iser_free_fastreg_pool(struct ib_conn *ib_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector);
-struct iser_fr_desc *
-iser_reg_desc_get_fr(struct ib_conn *ib_conn);
-void
-iser_reg_desc_put_fr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
-struct iser_fr_desc *
-iser_reg_desc_get_fmr(struct ib_conn *ib_conn);
-void
-iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc);
static inline struct iser_conn *
to_iser_conn(struct ib_conn *ib_conn)
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 4a7045bb0831..27a6f75a9912 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -72,7 +72,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
return err;
}
- err = iser_reg_rdma_mem(iser_task, ISER_DIR_IN, false);
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
return err;
@@ -126,8 +126,8 @@ iser_prepare_write_cmd(struct iscsi_task *task,
return err;
}
- err = iser_reg_rdma_mem(iser_task, ISER_DIR_OUT,
- buf_out->data_len == imm_sz);
+ err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
+ buf_out->data_len == imm_sz);
if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n");
return err;
@@ -250,8 +250,8 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
- if (device->reg_ops->alloc_reg_res(ib_conn, session->scsi_cmds_max,
- iser_conn->pages_per_mr))
+ if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
+ iser_conn->pages_per_mr))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
@@ -293,7 +293,7 @@ rx_desc_dma_map_failed:
rx_desc_alloc_fail:
iser_free_login_buf(iser_conn);
alloc_login_buf_fail:
- device->reg_ops->free_reg_res(ib_conn);
+ iser_free_fastreg_pool(ib_conn);
create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
@@ -306,8 +306,7 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
- if (device->reg_ops->free_reg_res)
- device->reg_ops->free_reg_res(ib_conn);
+ iser_free_fastreg_pool(ib_conn);
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
@@ -768,7 +767,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
int prot_count = scsi_prot_sg_count(iser_task->sc);
if (iser_task->dir[ISER_DIR_IN]) {
- iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_IN],
DMA_FROM_DEVICE);
@@ -779,7 +778,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
}
if (iser_task->dir[ISER_DIR_OUT]) {
- iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
+ iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
iser_dma_unmap_task_data(iser_task,
&iser_task->data[ISER_DIR_OUT],
DMA_TO_DEVICE);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 999ef7cdd05e..d4e057fac219 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -38,62 +38,13 @@
#include <linux/scatterlist.h>
#include "iscsi_iser.h"
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *mem_reg);
-static
-int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *mem_reg);
-
-static const struct iser_reg_ops fastreg_ops = {
- .alloc_reg_res = iser_alloc_fastreg_pool,
- .free_reg_res = iser_free_fastreg_pool,
- .reg_mem = iser_fast_reg_mr,
- .unreg_mem = iser_unreg_mem_fastreg,
- .reg_desc_get = iser_reg_desc_get_fr,
- .reg_desc_put = iser_reg_desc_put_fr,
-};
-
-static const struct iser_reg_ops fmr_ops = {
- .alloc_reg_res = iser_alloc_fmr_pool,
- .free_reg_res = iser_free_fmr_pool,
- .reg_mem = iser_fast_reg_fmr,
- .unreg_mem = iser_unreg_mem_fmr,
- .reg_desc_get = iser_reg_desc_get_fmr,
- .reg_desc_put = iser_reg_desc_put_fmr,
-};
void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
{
iser_err_comp(wc, "memreg");
}
-int iser_assign_reg_ops(struct iser_device *device)
-{
- struct ib_device *ib_dev = device->ib_device;
-
- /* Assign function handles - based on FMR support */
- if (ib_dev->ops.alloc_fmr && ib_dev->ops.dealloc_fmr &&
- ib_dev->ops.map_phys_fmr && ib_dev->ops.unmap_fmr) {
- iser_info("FMR supported, using FMR for registration\n");
- device->reg_ops = &fmr_ops;
- } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
- iser_info("FastReg supported, using FastReg for registration\n");
- device->reg_ops = &fastreg_ops;
- device->remote_inv_sup = iser_always_reg;
- } else {
- iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
- return -1;
- }
-
- return 0;
-}
-
-struct iser_fr_desc *
+static struct iser_fr_desc *
iser_reg_desc_get_fr(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
@@ -109,7 +60,7 @@ iser_reg_desc_get_fr(struct ib_conn *ib_conn)
return desc;
}
-void
+static void
iser_reg_desc_put_fr(struct ib_conn *ib_conn,
struct iser_fr_desc *desc)
{
@@ -121,44 +72,6 @@ iser_reg_desc_put_fr(struct ib_conn *ib_conn,
spin_unlock_irqrestore(&fr_pool->lock, flags);
}
-struct iser_fr_desc *
-iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
-{
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
-
- return list_first_entry(&fr_pool->list,
- struct iser_fr_desc, list);
-}
-
-void
-iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc)
-{
-}
-
-static void iser_data_buf_dump(struct iser_data_buf *data,
- struct ib_device *ibdev)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(data->sg, sg, data->dma_nents, i)
- iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
- "off:0x%x sz:0x%x dma_len:0x%x\n",
- i, (unsigned long)sg_dma_address(sg),
- sg_page(sg), sg->offset, sg->length, sg_dma_len(sg));
-}
-
-static void iser_dump_page_vec(struct iser_page_vec *page_vec)
-{
- int i;
-
- iser_err("page vec npages %d data length %lld\n",
- page_vec->npages, page_vec->fake_mr.length);
- for (i = 0; i < page_vec->npages; i++)
- iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
-}
-
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
@@ -213,84 +126,9 @@ iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
return 0;
}
-static int iser_set_page(struct ib_mr *mr, u64 addr)
-{
- struct iser_page_vec *page_vec =
- container_of(mr, struct iser_page_vec, fake_mr);
-
- page_vec->pages[page_vec->npages++] = addr;
-
- return 0;
-}
-
-static
-int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *reg)
-{
- struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
- struct iser_page_vec *page_vec = rsc->page_vec;
- struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
- struct ib_pool_fmr *fmr;
- int ret, plen;
-
- page_vec->npages = 0;
- page_vec->fake_mr.page_size = SZ_4K;
- plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
- mem->dma_nents, NULL, iser_set_page);
- if (unlikely(plen < mem->dma_nents)) {
- iser_err("page vec too short to hold this SG\n");
- iser_data_buf_dump(mem, device->ib_device);
- iser_dump_page_vec(page_vec);
- return -EINVAL;
- }
-
- fmr = ib_fmr_pool_map_phys(fmr_pool, page_vec->pages,
- page_vec->npages, page_vec->pages[0]);
- if (IS_ERR(fmr)) {
- ret = PTR_ERR(fmr);
- iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
- return ret;
- }
-
- reg->sge.lkey = fmr->fmr->lkey;
- reg->rkey = fmr->fmr->rkey;
- reg->sge.addr = page_vec->fake_mr.iova;
- reg->sge.length = page_vec->fake_mr.length;
- reg->mem_h = fmr;
-
- iser_dbg("fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
- " length=0x%x\n", reg->sge.lkey, reg->rkey,
- reg->sge.addr, reg->sge.length);
-
- return 0;
-}
-
-/**
- * Unregister (previosuly registered using FMR) memory.
- * If memory is non-FMR does nothing.
- */
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
-{
- struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
-
- if (!reg->mem_h)
- return;
-
- iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
-
- ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
-
- reg->mem_h = NULL;
-}
-
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
- struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
struct iser_fr_desc *desc;
struct ib_mr_status mr_status;
@@ -312,7 +150,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
&mr_status);
}
- device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn, desc);
+ iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->mem_h);
reg->mem_h = NULL;
}
@@ -509,15 +347,14 @@ iser_reg_data_sg(struct iscsi_iser_task *task,
if (use_dma_key)
return iser_reg_dma(device, mem, reg);
- return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
+ return iser_fast_reg_mr(task, mem, &desc->rsc, reg);
}
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir,
- bool all_imm)
+int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
+ enum iser_data_dir dir,
+ bool all_imm)
{
struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
struct iser_data_buf *mem = &task->data[dir];
struct iser_mem_reg *reg = &task->rdma_reg[dir];
struct iser_fr_desc *desc = NULL;
@@ -528,7 +365,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
if (!use_dma_key) {
- desc = device->reg_ops->reg_desc_get(ib_conn);
+ desc = iser_reg_desc_get_fr(ib_conn);
reg->mem_h = desc;
}
@@ -549,15 +386,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
err_reg:
if (desc)
- device->reg_ops->reg_desc_put(ib_conn, desc);
+ iser_reg_desc_put_fr(ib_conn, desc);
return err;
}
-void iser_unreg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir dir)
-{
- struct iser_device *device = task->iser_conn->ib_conn.device;
-
- device->reg_ops->unreg_mem(task, dir);
-}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 127887c6c03f..c1f44c41f501 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -68,11 +68,12 @@ static void iser_event_handler(struct ib_event_handler *handler,
static int iser_create_device_ib_res(struct iser_device *device)
{
struct ib_device *ib_dev = device->ib_device;
- int ret, i, max_cqe;
+ int i, max_cqe;
- ret = iser_assign_reg_ops(device);
- if (ret)
- return ret;
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ iser_err("IB device does not support memory registrations\n");
+ return -1;
+ }
device->comps_used = min_t(int, num_online_cpus(),
ib_dev->num_comp_vectors);
@@ -147,96 +148,6 @@ static void iser_free_device_ib_res(struct iser_device *device)
device->pd = NULL;
}
-/**
- * iser_alloc_fmr_pool - Creates FMR pool and page_vector
- * @ib_conn: connection RDMA resources
- * @cmds_max: max number of SCSI commands for this connection
- * @size: max number of pages per map request
- *
- * Return: 0 on success, or errno code on failure
- */
-int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
- unsigned cmds_max,
- unsigned int size)
-{
- struct iser_device *device = ib_conn->device;
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
- struct iser_page_vec *page_vec;
- struct iser_fr_desc *desc;
- struct ib_fmr_pool *fmr_pool;
- struct ib_fmr_pool_param params;
- int ret;
-
- INIT_LIST_HEAD(&fr_pool->list);
- spin_lock_init(&fr_pool->lock);
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size),
- GFP_KERNEL);
- if (!page_vec) {
- ret = -ENOMEM;
- goto err_frpl;
- }
-
- page_vec->pages = (u64 *)(page_vec + 1);
-
- params.page_shift = ilog2(SZ_4K);
- params.max_pages_per_fmr = size;
- /* make the pool size twice the max number of SCSI commands *
- * the ML is expected to queue, watermark for unmap at 50% */
- params.pool_size = cmds_max * 2;
- params.dirty_watermark = cmds_max;
- params.cache = 0;
- params.flush_function = NULL;
- params.access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
-
- fmr_pool = ib_create_fmr_pool(device->pd, &params);
- if (IS_ERR(fmr_pool)) {
- ret = PTR_ERR(fmr_pool);
- iser_err("FMR allocation failed, err %d\n", ret);
- goto err_fmr;
- }
-
- desc->rsc.page_vec = page_vec;
- desc->rsc.fmr_pool = fmr_pool;
- list_add(&desc->list, &fr_pool->list);
-
- return 0;
-
-err_fmr:
- kfree(page_vec);
-err_frpl:
- kfree(desc);
-
- return ret;
-}
-
-/**
- * iser_free_fmr_pool - releases the FMR pool and page vec
- * @ib_conn: connection RDMA resources
- */
-void iser_free_fmr_pool(struct ib_conn *ib_conn)
-{
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
- struct iser_fr_desc *desc;
-
- desc = list_first_entry(&fr_pool->list,
- struct iser_fr_desc, list);
- list_del(&desc->list);
-
- iser_info("freeing conn %p fmr pool %p\n",
- ib_conn, desc->rsc.fmr_pool);
-
- ib_destroy_fmr_pool(desc->rsc.fmr_pool);
- kfree(desc->rsc.page_vec);
- kfree(desc);
-}
-
static struct iser_fr_desc *
iser_create_fastreg_desc(struct iser_device *device,
struct ib_pd *pd,
@@ -667,13 +578,12 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
u32 max_num_sg;
/*
- * FRs without SG_GAPS or FMRs can only map up to a (device) page per
- * entry, but if the first entry is misaligned we'll end up using two
- * entries (head and tail) for a single page worth data, so one
- * additional entry is required.
+ * FRs without SG_GAPS can only map up to a (device) page per entry,
+ * but if the first entry is misaligned we'll end up using two entries
+ * (head and tail) for a single page worth data, so one additional
+ * entry is required.
*/
- if ((attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) &&
- (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ if (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
reserved_mr_pages = 0;
else
reserved_mr_pages = 1;
@@ -684,14 +594,8 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
max_num_sg = attr->max_fast_reg_page_list_len;
sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
- if (attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)
- sup_sg_tablesize =
- min_t(
- uint, ISCSI_ISER_MAX_SG_TABLESIZE,
- max_num_sg - reserved_mr_pages);
- else
- sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
-
+ sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE,
+ max_num_sg - reserved_mr_pages);
iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
iser_conn->pages_per_mr =
iser_conn->scsi_sg_tablesize + reserved_mr_pages;
@@ -755,7 +659,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
struct iser_cm_hdr req_hdr;
struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct iser_device *device = ib_conn->device;
+ struct ib_device *ib_dev = ib_conn->device->ib_device;
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
@@ -766,14 +670,14 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
goto failure;
memset(&conn_param, 0, sizeof conn_param);
- conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom;
+ conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
conn_param.initiator_depth = 1;
conn_param.retry_count = 7;
conn_param.rnr_retry_count = 6;
memset(&req_hdr, 0, sizeof(req_hdr));
req_hdr.flags = ISER_ZBVA_NOT_SUP;
- if (!device->remote_inv_sup)
+ if (!iser_always_reg)
req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
conn_param.private_data = (void *)&req_hdr;
conn_param.private_data_len = sizeof(struct iser_cm_hdr);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index a1a035270cab..b7df38ee8ae0 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -15,6 +15,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
+#include <rdma/ib_cm.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -502,7 +503,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
if (!np->enabled) {
spin_unlock_bh(&np->np_thread_lock);
isert_dbg("iscsi_np is not enabled, reject connect request\n");
- return rdma_reject(cma_id, NULL, 0);
+ return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
}
spin_unlock_bh(&np->np_thread_lock);
@@ -553,7 +554,7 @@ out_rsp_dma_map:
isert_free_login_buf(isert_conn);
out:
kfree(isert_conn);
- rdma_reject(cma_id, NULL, 0);
+ rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
return ret;
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 6e8d650c17c7..874a8eb7638c 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -113,7 +113,7 @@ struct opa_vnic_vema_port {
struct mutex lock;
};
-static void opa_vnic_vema_add_one(struct ib_device *device);
+static int opa_vnic_vema_add_one(struct ib_device *device);
static void opa_vnic_vema_rem_one(struct ib_device *device,
void *client_data);
@@ -989,18 +989,18 @@ static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en)
*
* Allocate the vnic control port and initialize it.
*/
-static void opa_vnic_vema_add_one(struct ib_device *device)
+static int opa_vnic_vema_add_one(struct ib_device *device)
{
struct opa_vnic_ctrl_port *cport;
int rc, size = sizeof(*cport);
if (!rdma_cap_opa_vnic(device))
- return;
+ return -EOPNOTSUPP;
size += device->phys_port_cnt * sizeof(struct opa_vnic_vema_port);
cport = kzalloc(size, GFP_KERNEL);
if (!cport)
- return;
+ return -ENOMEM;
cport->num_ports = device->phys_port_cnt;
cport->ibdev = device;
@@ -1012,6 +1012,7 @@ static void opa_vnic_vema_add_one(struct ib_device *device)
ib_set_client_data(device, &opa_vnic_client, cport);
opa_vnic_ctrl_config_dev(cport, true);
+ return 0;
}
/**
@@ -1026,9 +1027,6 @@ static void opa_vnic_vema_rem_one(struct ib_device *device,
{
struct opa_vnic_ctrl_port *cport = client_data;
- if (!cport)
- return;
-
c_info("removing VNIC client\n");
opa_vnic_ctrl_config_dev(cport, false);
vema_unregister(cport);
diff --git a/drivers/infiniband/ulp/rtrs/Kconfig b/drivers/infiniband/ulp/rtrs/Kconfig
new file mode 100644
index 000000000000..9092b62e6dc8
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config INFINIBAND_RTRS
+ tristate
+ depends on INFINIBAND_ADDR_TRANS
+
+config INFINIBAND_RTRS_CLIENT
+ tristate "RTRS client module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport client module.
+
+ RDMA Transport (RTRS) client implements a reliable transport layer
+ and also multipathing functionality and that it is intended to be
+ the base layer for a block storage initiator over RDMA.
+
+config INFINIBAND_RTRS_SERVER
+ tristate "RTRS server module"
+ depends on INFINIBAND_ADDR_TRANS
+ select INFINIBAND_RTRS
+ help
+ RDMA transport server module.
+
+ RDMA Transport (RTRS) server module processing connection and IO
+ requests received from the RTRS client module, it will pass the
+ IO requests to its user eg. RNBD_server.
diff --git a/drivers/infiniband/ulp/rtrs/Makefile b/drivers/infiniband/ulp/rtrs/Makefile
new file mode 100644
index 000000000000..3898509be270
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+rtrs-client-y := rtrs-clt.o \
+ rtrs-clt-stats.o \
+ rtrs-clt-sysfs.o
+
+rtrs-server-y := rtrs-srv.o \
+ rtrs-srv-stats.o \
+ rtrs-srv-sysfs.o
+
+rtrs-core-y := rtrs.o
+
+obj-$(CONFIG_INFINIBAND_RTRS) += rtrs-core.o
+obj-$(CONFIG_INFINIBAND_RTRS_CLIENT) += rtrs-client.o
+obj-$(CONFIG_INFINIBAND_RTRS_SERVER) += rtrs-server.o
diff --git a/drivers/infiniband/ulp/rtrs/README b/drivers/infiniband/ulp/rtrs/README
new file mode 100644
index 000000000000..5d9ea142e5dd
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/README
@@ -0,0 +1,213 @@
+****************************
+RDMA Transport (RTRS)
+****************************
+
+RTRS (RDMA Transport) is a reliable high speed transport library
+which provides support to establish optimal number of connections
+between client and server machines using RDMA (InfiniBand, RoCE, iWarp)
+transport. It is optimized to transfer (read/write) IO blocks.
+
+In its core interface it follows the BIO semantics of providing the
+possibility to either write data from an sg list to the remote side
+or to request ("read") data transfer from the remote side into a given
+sg list.
+
+RTRS provides I/O fail-over and load-balancing capabilities by using
+multipath I/O (see "add_path" and "mp_policy" configuration entries in
+Documentation/ABI/testing/sysfs-class-rtrs-client).
+
+RTRS is used by the RNBD (RDMA Network Block Device) modules.
+
+==================
+Transport protocol
+==================
+
+Overview
+--------
+An established connection between a client and a server is called rtrs
+session. A session is associated with a set of memory chunks reserved on the
+server side for a given client for rdma transfer. A session
+consists of multiple paths, each representing a separate physical link
+between client and server. Those are used for load balancing and failover.
+Each path consists of as many connections (QPs) as there are cpus on
+the client.
+
+When processing an incoming write or read request, rtrs client uses memory
+chunks reserved for him on the server side. Their number, size and addresses
+need to be exchanged between client and server during the connection
+establishment phase. Apart from the memory related information client needs to
+inform the server about the session name and identify each path and connection
+individually.
+
+On an established session client sends to server write or read messages.
+Server uses immediate field to tell the client which request is being
+acknowledged and for errno. Client uses immediate field to tell the server
+which of the memory chunks has been accessed and at which offset the message
+can be found.
+
+Module parameter always_invalidate is introduced for the security problem
+discussed in LPC RDMA MC 2019. When always_invalidate=Y, on the server side we
+invalidate each rdma buffer before we hand it over to RNBD server and
+then pass it to the block layer. A new rkey is generated and registered for the
+buffer after it returns back from the block layer and RNBD server.
+The new rkey is sent back to the client along with the IO result.
+The procedure is the default behaviour of the driver. This invalidation and
+registration on each IO causes performance drop of up to 20%. A user of the
+driver may choose to load the modules with this mechanism switched off
+(always_invalidate=N), if he understands and can take the risk of a malicious
+client being able to corrupt memory of a server it is connected to. This might
+be a reasonable option in a scenario where all the clients and all the servers
+are located within a secure datacenter.
+
+
+Connection establishment
+------------------------
+
+1. Client starts establishing connections belonging to a path of a session one
+by one via attaching RTRS_MSG_CON_REQ messages to the rdma_connect requests.
+Those include uuid of the session and uuid of the path to be
+established. They are used by the server to find a persisting session/path or
+to create a new one when necessary. The message also contains the protocol
+version and magic for compatibility, total number of connections per session
+(as many as cpus on the client), the id of the current connection and
+the reconnect counter, which is used to resolve the situations where
+client is trying to reconnect a path, while server is still destroying the old
+one.
+
+2. Server accepts the connection requests one by one and attaches
+RTRS_MSG_CONN_RSP messages to the rdma_accept. Apart from magic and
+protocol version, the messages include error code, queue depth supported by
+the server (number of memory chunks which are going to be allocated for that
+session) and the maximum size of one io, RTRS_MSG_NEW_RKEY_F flags is set
+when always_invalidate=Y.
+
+3. After all connections of a path are established client sends to server the
+RTRS_MSG_INFO_REQ message, containing the name of the session. This message
+requests the address information from the server.
+
+4. Server replies to the session info request message with RTRS_MSG_INFO_RSP,
+which contains the addresses and keys of the RDMA buffers allocated for that
+session.
+
+5. Session becomes connected after all paths to be established are connected
+(i.e. steps 1-4 finished for all paths requested for a session)
+
+6. Server and client exchange periodically heartbeat messages (empty rdma
+messages with an immediate field) which are used to detect a crash on remote
+side or network outage in an absence of IO.
+
+7. On any RDMA related error or in the case of a heartbeat timeout, the
+corresponding path is disconnected, all the inflight IO are failed over to a
+healthy path, if any, and the reconnect mechanism is triggered.
+
+CLT SRV
+*for each connection belonging to a path and for each path:
+RTRS_MSG_CON_REQ ------------------->
+ <------------------- RTRS_MSG_CON_RSP
+...
+*after all connections are established:
+RTRS_MSG_INFO_REQ ------------------->
+ <------------------- RTRS_MSG_INFO_RSP
+*heartbeat is started from both sides:
+ -------------------> [RTRS_HB_MSG_IMM]
+[RTRS_HB_MSG_ACK] <-------------------
+[RTRS_HB_MSG_IMM] <-------------------
+ -------------------> [RTRS_HB_MSG_ACK]
+
+IO path
+-------
+
+* Write (always_invalidate=N) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+* Write (always_invalidate=Y) *
+
+1. When processing a write request client selects one of the memory chunks
+on the server side and rdma writes there the user data, user header and the
+RTRS_MSG_RDMA_WRITE message. Apart from the type (write), the message only
+contains size of the user header. The client tells the server which chunk has
+been accessed and at what offset the RTRS_MSG_RDMA_WRITE can be found by
+using the IMM field, Server invalidate rkey associated to the memory chunks
+first, when it finishes, pass the IO to RNBD server module.
+
+2. When confirming a write request server sends an "empty" rdma message with
+an immediate field. The 32 bit field is used to specify the outstanding
+inflight IO and for the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_data + usr_hdr + rtrs_msg_rdma_write -----------------> [RTRS_IO_REQ_IMM]
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+[RTRS_IO_RSP_IMM] <----------------- (id + errno)
+
+
+* Read (always_invalidate=N)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+
+* Read (always_invalidate=Y)*
+
+1. When processing a read request client selects one of the memory chunks
+on the server side and rdma writes there the user header and the
+RTRS_MSG_RDMA_READ message. This message contains the type (read), size of
+the user header, flags (specifying if memory invalidation is necessary) and the
+list of addresses along with keys for the data to be read into.
+Server invalidate rkey associated to the memory chunks first, when it finishes,
+passes the IO to RNBD server module.
+
+2. When confirming a read request server transfers the requested data first,
+attaches an invalidation message if requested and finally an "empty" rdma
+message with an immediate field. The 32 bit field is used to specify the
+outstanding inflight IO and the error code. The new rkey is sent back using
+SEND_WITH_IMM WR, client When it recived new rkey message, it validates
+the message and finished IO after update rkey for the rbuffer, then post
+back the recv buffer for later use.
+
+CLT SRV
+usr_hdr + rtrs_msg_rdma_read --------------> [RTRS_IO_REQ_IMM]
+[RTRS_IO_RSP_IMM] <-------------- usr_data + (id + errno)
+[RTRS_MSG_RKEY_RSP] <----------------- (RTRS_MSG_RKEY_RSP)
+or in case client requested invalidation:
+[RTRS_IO_RSP_IMM_W_INV] <-------------- usr_data + (INV) + (id + errno)
+=========================================
+Contributors List(in alphabetical order)
+=========================================
+Danil Kipnis <danil.kipnis@profitbricks.com>
+Fabian Holler <mail@fholler.de>
+Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
+Jack Wang <jinpu.wang@profitbricks.com>
+Kleber Souza <kleber.souza@profitbricks.com>
+Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
+Milind Dumbare <Milind.dumbare@gmail.com>
+Roman Penyaev <roman.penyaev@profitbricks.com>
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
new file mode 100644
index 000000000000..26bbe5d6dff5
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-clt.h"
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_stats *stats = sess->stats;
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ cpu = raw_smp_processor_id();
+ s = this_cpu_ptr(stats->pcpu_stats);
+ if (unlikely(con->cpu != cpu)) {
+ s->cpu_migr.to++;
+
+ /* Careful here, override s pointer */
+ s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
+ atomic_inc(&s->cpu_migr.from);
+ }
+}
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ s = this_cpu_ptr(stats->pcpu_stats);
+ s->rdma.failover_cnt++;
+}
+
+int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats,
+ char *buf, size_t len)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ size_t used;
+ int cpu;
+
+ used = scnprintf(buf, len, " ");
+ for_each_possible_cpu(cpu)
+ used += scnprintf(buf + used, len - used, " CPU%u", cpu);
+
+ used += scnprintf(buf + used, len - used, "\nfrom:");
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += scnprintf(buf + used, len - used, " %d",
+ atomic_read(&s->cpu_migr.from));
+ }
+
+ used += scnprintf(buf + used, len - used, "\nto :");
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ used += scnprintf(buf + used, len - used, " %d",
+ s->cpu_migr.to);
+ }
+ used += scnprintf(buf + used, len - used, "\n");
+
+ return used;
+}
+
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len)
+{
+ return scnprintf(buf, len, "%d %d\n",
+ stats->reconnects.successful_cnt,
+ stats->reconnects.fail_cnt);
+}
+
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len)
+{
+ struct rtrs_clt_stats_rdma sum;
+ struct rtrs_clt_stats_rdma *r;
+ int cpu;
+
+ memset(&sum, 0, sizeof(sum));
+
+ for_each_possible_cpu(cpu) {
+ r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
+
+ sum.dir[READ].cnt += r->dir[READ].cnt;
+ sum.dir[READ].size_total += r->dir[READ].size_total;
+ sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
+ sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
+ sum.failover_cnt += r->failover_cnt;
+ }
+
+ return scnprintf(page, len, "%llu %llu %llu %llu %u %llu\n",
+ sum.dir[READ].cnt, sum.dir[READ].size_total,
+ sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
+ atomic_read(&stats->inflight), sum.failover_cnt);
+}
+
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s,
+ char *page, size_t len)
+{
+ return scnprintf(page, len, "echo 1 to reset all statistics\n");
+}
+
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->rdma, 0, sizeof(s->rdma));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
+{
+ struct rtrs_clt_stats_pcpu *s;
+ int cpu;
+
+ if (!enable)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ s = per_cpu_ptr(stats->pcpu_stats, cpu);
+ memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
+ }
+
+ return 0;
+}
+
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
+{
+ if (!enable)
+ return -EINVAL;
+
+ memset(&stats->reconnects, 0, sizeof(stats->reconnects));
+
+ return 0;
+}
+
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
+{
+ if (enable) {
+ rtrs_clt_reset_rdma_stats(s, enable);
+ rtrs_clt_reset_cpu_migr_stats(s, enable);
+ rtrs_clt_reset_reconnects_stat(s, enable);
+ atomic_set(&s->inflight, 0);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
+ size_t size, int d)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ s = this_cpu_ptr(stats->pcpu_stats);
+ s->rdma.dir[d].cnt++;
+ s->rdma.dir[d].size_total += size;
+}
+
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_stats *stats = sess->stats;
+ unsigned int len;
+
+ len = req->usr_len + req->data_len;
+ rtrs_clt_update_rdma_stats(stats, len, dir);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_inc(&stats->inflight);
+}
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
+{
+ stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
+ if (!stats->pcpu_stats)
+ return -ENOMEM;
+
+ /*
+ * successful_cnt will be set to 0 after session
+ * is established for the first time
+ */
+ stats->reconnects.successful_cnt = -1;
+
+ return 0;
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
new file mode 100644
index 000000000000..298b747d0330
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+
+#define MIN_MAX_RECONN_ATT -1
+#define MAX_MAX_RECONN_ATT 9999
+
+static void rtrs_clt_sess_release(struct kobject *kobj)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+
+ free_sess(sess);
+}
+
+static struct kobj_type ktype_sess = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_sess_release
+};
+
+static void rtrs_clt_sess_stats_release(struct kobject *kobj)
+{
+ struct rtrs_clt_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_clt_stats, kobj_stats);
+
+ free_percpu(stats->pcpu_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_clt_sess_stats_release,
+};
+
+static ssize_t max_reconnect_attempts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ return sprintf(page, "%d\n", rtrs_clt_get_max_reconnect_attempts(clt));
+}
+
+static ssize_t max_reconnect_attempts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int value;
+ int ret;
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (ret) {
+ rtrs_err(clt, "%s: failed to convert string '%s' to int\n",
+ attr->attr.name, buf);
+ return ret;
+ }
+ if (value > MAX_MAX_RECONN_ATT ||
+ value < MIN_MAX_RECONN_ATT) {
+ rtrs_err(clt,
+ "%s: invalid range (provided: '%s', accepted: min: %d, max: %d)\n",
+ attr->attr.name, buf, MIN_MAX_RECONN_ATT,
+ MAX_MAX_RECONN_ATT);
+ return -EINVAL;
+ }
+ rtrs_clt_set_max_reconnect_attempts(clt, value);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(max_reconnect_attempts);
+
+static ssize_t mpath_policy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt *clt;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ switch (clt->mp_policy) {
+ case MP_POLICY_RR:
+ return sprintf(page, "round-robin (RR: %d)\n", clt->mp_policy);
+ case MP_POLICY_MIN_INFLIGHT:
+ return sprintf(page, "min-inflight (MI: %d)\n", clt->mp_policy);
+ default:
+ return sprintf(page, "Unknown (%d)\n", clt->mp_policy);
+ }
+}
+
+static ssize_t mpath_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct rtrs_clt *clt;
+ int value;
+ int ret;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ ret = kstrtoint(buf, 10, &value);
+ if (!ret && (value == MP_POLICY_RR ||
+ value == MP_POLICY_MIN_INFLIGHT)) {
+ clt->mp_policy = value;
+ return count;
+ }
+
+ if (!strncasecmp(buf, "round-robin", 11) ||
+ !strncasecmp(buf, "rr", 2))
+ clt->mp_policy = MP_POLICY_RR;
+ else if (!strncasecmp(buf, "min-inflight", 12) ||
+ !strncasecmp(buf, "mi", 2))
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(mpath_policy);
+
+static ssize_t add_path_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ return scnprintf(page, PAGE_SIZE,
+ "Usage: echo [<source addr>@]<destination addr> > %s\n\n*addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]\n",
+ attr->attr.name);
+}
+
+static ssize_t add_path_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sockaddr_storage srcaddr, dstaddr;
+ struct rtrs_addr addr = {
+ .src = &srcaddr,
+ .dst = &dstaddr
+ };
+ struct rtrs_clt *clt;
+ const char *nl;
+ size_t len;
+ int err;
+
+ clt = container_of(dev, struct rtrs_clt, dev);
+
+ nl = strchr(buf, '\n');
+ if (nl)
+ len = nl - buf;
+ else
+ len = count;
+ err = rtrs_addr_to_sockaddr(buf, len, clt->port, &addr);
+ if (err)
+ return -EINVAL;
+
+ err = rtrs_clt_create_path_from_sysfs(clt, &addr);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(add_path);
+
+static ssize_t rtrs_clt_state_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (sess->state == RTRS_CLT_CONNECTED)
+ return sprintf(page, "connected\n");
+
+ return sprintf(page, "disconnected\n");
+}
+
+static struct kobj_attribute rtrs_clt_state_attr =
+ __ATTR(state, 0444, rtrs_clt_state_show, NULL);
+
+static ssize_t rtrs_clt_reconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_reconnect_from_sysfs(sess);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_reconnect_attr =
+ __ATTR(reconnect, 0644, rtrs_clt_reconnect_show,
+ rtrs_clt_reconnect_store);
+
+static ssize_t rtrs_clt_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_disconnect_from_sysfs(sess);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_disconnect_attr =
+ __ATTR(disconnect, 0644, rtrs_clt_disconnect_show,
+ rtrs_clt_disconnect_store);
+
+static ssize_t rtrs_clt_remove_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_clt_sess *sess;
+ int ret;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+ ret = rtrs_clt_remove_path_from_sysfs(sess, &attr->attr);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_clt_remove_path_attr =
+ __ATTR(remove_path, 0644, rtrs_clt_remove_path_show,
+ rtrs_clt_remove_path_store);
+
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration,
+ rtrs_clt_stats_migration_cnt_to_str,
+ rtrs_clt_reset_cpu_migr_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reconnects,
+ rtrs_clt_stats_reconnects_to_str,
+ rtrs_clt_reset_reconnects_stat);
+
+STAT_ATTR(struct rtrs_clt_stats, rdma,
+ rtrs_clt_stats_rdma_to_str,
+ rtrs_clt_reset_rdma_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, reset_all,
+ rtrs_clt_reset_all_help,
+ rtrs_clt_reset_all_stats);
+
+static struct attribute *rtrs_clt_stats_attrs[] = {
+ &cpu_migration_attr.attr,
+ &reconnects_attr.attr,
+ &rdma_attr.attr,
+ &reset_all_attr.attr,
+ NULL
+};
+
+static struct attribute_group rtrs_clt_stats_attr_group = {
+ .attrs = rtrs_clt_stats_attrs,
+};
+
+static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, typeof(*sess), kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%u\n", sess->hca_port);
+}
+
+static struct kobj_attribute rtrs_clt_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_clt_hca_port_show, NULL);
+
+static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n", sess->hca_name);
+}
+
+static struct kobj_attribute rtrs_clt_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_clt_hca_name_show, NULL);
+
+static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_clt_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_clt_src_addr_show, NULL);
+
+static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_clt_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_clt_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL);
+
+static struct attribute *rtrs_clt_sess_attrs[] = {
+ &rtrs_clt_hca_name_attr.attr,
+ &rtrs_clt_hca_port_attr.attr,
+ &rtrs_clt_src_addr_attr.attr,
+ &rtrs_clt_dst_addr_attr.attr,
+ &rtrs_clt_state_attr.attr,
+ &rtrs_clt_reconnect_attr.attr,
+ &rtrs_clt_disconnect_attr.attr,
+ &rtrs_clt_remove_path_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_clt_sess_attr_group = {
+ .attrs = rtrs_clt_sess_attrs,
+};
+
+int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ char str[NAME_MAX];
+ int err, cnt;
+
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ str, sizeof(str));
+ cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
+ sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ str + cnt, sizeof(str) - cnt);
+
+ err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths,
+ "%s", str);
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ return err;
+ }
+ err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+ if (err) {
+ pr_err("sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
+ &sess->kobj, "stats");
+ if (err) {
+ pr_err("kobject_init_and_add: %d\n", err);
+ goto remove_group;
+ }
+
+ err = sysfs_create_group(&sess->stats->kobj_stats,
+ &rtrs_clt_stats_attr_group);
+ if (err) {
+ pr_err("failed to create stats sysfs group, err: %d\n", err);
+ goto put_kobj_stats;
+ }
+
+ return 0;
+
+put_kobj_stats:
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+remove_group:
+ sysfs_remove_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+put_kobj:
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+
+ return err;
+}
+
+void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self)
+{
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+ if (sysfs_self)
+ sysfs_remove_file_self(&sess->kobj, sysfs_self);
+ kobject_del(&sess->kobj);
+}
+
+static struct attribute *rtrs_clt_attrs[] = {
+ &dev_attr_max_reconnect_attempts.attr,
+ &dev_attr_mpath_policy.attr,
+ &dev_attr_add_path.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_clt_attr_group = {
+ .attrs = rtrs_clt_attrs,
+};
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt)
+{
+ return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+}
+
+void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt)
+{
+ if (clt->kobj_paths) {
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ }
+}
+
+void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt)
+{
+ sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
new file mode 100644
index 000000000000..564388a85603
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -0,0 +1,2992 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/rculist.h>
+
+#include "rtrs-clt.h"
+#include "rtrs-log.h"
+
+#define RTRS_CONNECT_TIMEOUT_MS 30000
+/*
+ * Wait a bit before trying to reconnect after a failure
+ * in order to give server time to finish clean up which
+ * leads to "false positives" failed reconnect attempts
+ */
+#define RTRS_RECONNECT_BACKOFF 1000
+
+MODULE_DESCRIPTION("RDMA Transport Client");
+MODULE_LICENSE("GPL");
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
+static struct rtrs_rdma_dev_pd dev_pd = {
+ .ops = &dev_pd_ops
+};
+
+static struct workqueue_struct *rtrs_wq;
+static struct class *rtrs_clt_dev_class;
+
+static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt)
+{
+ struct rtrs_clt_sess *sess;
+ bool connected = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
+ connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
+ rcu_read_unlock();
+
+ return connected;
+}
+
+static struct rtrs_permit *
+__rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
+{
+ size_t max_depth = clt->queue_depth;
+ struct rtrs_permit *permit;
+ int bit;
+
+ /*
+ * Adapted from null_blk get_tag(). Callers from different cpus may
+ * grab the same bit, since find_first_zero_bit is not atomic.
+ * But then the test_and_set_bit_lock will fail for all the
+ * callers but one, so that they will loop again.
+ * This way an explicit spinlock is not required.
+ */
+ do {
+ bit = find_first_zero_bit(clt->permits_map, max_depth);
+ if (unlikely(bit >= max_depth))
+ return NULL;
+ } while (unlikely(test_and_set_bit_lock(bit, clt->permits_map)));
+
+ permit = get_permit(clt, bit);
+ WARN_ON(permit->mem_id != bit);
+ permit->cpu_id = raw_smp_processor_id();
+ permit->con_type = con_type;
+
+ return permit;
+}
+
+static inline void __rtrs_put_permit(struct rtrs_clt *clt,
+ struct rtrs_permit *permit)
+{
+ clear_bit_unlock(permit->mem_id, clt->permits_map);
+}
+
+/**
+ * rtrs_clt_get_permit() - allocates permit for future RDMA operation
+ * @clt: Current session
+ * @con_type: Type of connection to use with the permit
+ * @can_wait: Wait type
+ *
+ * Description:
+ * Allocates permit for the following RDMA operation. Permit is used
+ * to preallocate all resources and to propagate memory pressure
+ * up earlier.
+ *
+ * Context:
+ * Can sleep if @wait == RTRS_TAG_WAIT
+ */
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
+ enum rtrs_clt_con_type con_type,
+ int can_wait)
+{
+ struct rtrs_permit *permit;
+ DEFINE_WAIT(wait);
+
+ permit = __rtrs_get_permit(clt, con_type);
+ if (likely(permit) || !can_wait)
+ return permit;
+
+ do {
+ prepare_to_wait(&clt->permits_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ permit = __rtrs_get_permit(clt, con_type);
+ if (likely(permit))
+ break;
+
+ io_schedule();
+ } while (1);
+
+ finish_wait(&clt->permits_wait, &wait);
+
+ return permit;
+}
+EXPORT_SYMBOL(rtrs_clt_get_permit);
+
+/**
+ * rtrs_clt_put_permit() - puts allocated permit
+ * @clt: Current session
+ * @permit: Permit to be freed
+ *
+ * Context:
+ * Does not matter
+ */
+void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
+{
+ if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
+ return;
+
+ __rtrs_put_permit(clt, permit);
+
+ /*
+ * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
+ * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
+ * it must have added itself to &clt->permits_wait before
+ * __rtrs_put_permit() finished.
+ * Hence it is safe to guard wake_up() with a waitqueue_active() test.
+ */
+ if (waitqueue_active(&clt->permits_wait))
+ wake_up(&clt->permits_wait);
+}
+EXPORT_SYMBOL(rtrs_clt_put_permit);
+
+void *rtrs_permit_to_pdu(struct rtrs_permit *permit)
+{
+ return permit + 1;
+}
+EXPORT_SYMBOL(rtrs_permit_to_pdu);
+
+/**
+ * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
+ * @sess: client session pointer
+ * @permit: permit for the allocation of the RDMA buffer
+ * Note:
+ * IO connection starts from 1.
+ * 0 connection is for user messages.
+ */
+static
+struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
+ struct rtrs_permit *permit)
+{
+ int id = 0;
+
+ if (likely(permit->con_type == RTRS_IO_CON))
+ id = (permit->cpu_id % (sess->s.con_num - 1)) + 1;
+
+ return to_clt_con(sess->s.con[id]);
+}
+
+/**
+ * __rtrs_clt_change_state() - change the session state through session state
+ * machine.
+ *
+ * @sess: client session to change the state of.
+ * @new_state: state to change to.
+ *
+ * returns true if successful, false if the requested state can not be set.
+ *
+ * Locks:
+ * state_wq lock must be hold.
+ */
+static bool __rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state)
+{
+ enum rtrs_clt_state old_state;
+ bool changed = false;
+
+ lockdep_assert_held(&sess->state_wq.lock);
+
+ old_state = sess->state;
+ switch (new_state) {
+ case RTRS_CLT_CONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_RECONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_RECONNECTING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTED:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTED:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CONNECTING_ERR:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSING:
+ switch (old_state) {
+ case RTRS_CLT_CONNECTING:
+ case RTRS_CLT_CONNECTING_ERR:
+ case RTRS_CLT_RECONNECTING:
+ case RTRS_CLT_CONNECTED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_CLOSED:
+ switch (old_state) {
+ case RTRS_CLT_CLOSING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_CLT_DEAD:
+ switch (old_state) {
+ case RTRS_CLT_CLOSED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (changed) {
+ sess->state = new_state;
+ wake_up_locked(&sess->state_wq);
+ }
+
+ return changed;
+}
+
+static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state old_state,
+ enum rtrs_clt_state new_state)
+{
+ bool changed = false;
+
+ spin_lock_irq(&sess->state_wq.lock);
+ if (sess->state == old_state)
+ changed = __rtrs_clt_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_wq.lock);
+
+ return changed;
+}
+
+static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ if (rtrs_clt_change_state_from_to(sess,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_RECONNECTING)) {
+ struct rtrs_clt *clt = sess->clt;
+ unsigned int delay_ms;
+
+ /*
+ * Normal scenario, reconnect if we were successfully connected
+ */
+ delay_ms = clt->reconnect_delay_sec * 1000;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ msecs_to_jiffies(delay_ms));
+ } else {
+ /*
+ * Error can happen just on establishing new connection,
+ * so notify waiter with error state, waiter is responsible
+ * for cleaning the rest and reconnect if needed.
+ */
+ rtrs_clt_change_state_from_to(sess,
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR);
+ }
+}
+
+static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static struct ib_cqe fast_reg_cqe = {
+ .done = rtrs_clt_fast_reg_done
+};
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait);
+
+static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_io_req *req =
+ container_of(wc->wr_cqe, typeof(*req), inv_cqe);
+ struct rtrs_clt_con *con = cq->cq_context;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ req->need_inv = false;
+ if (likely(req->need_inv_comp))
+ complete(&req->inv_comp);
+ else
+ /* Complete request from INV callback */
+ complete_rdma_req(req, req->inv_errno, true, false);
+}
+
+static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &req->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = req->mr->rkey,
+ };
+ req->inv_cqe.done = rtrs_clt_inv_rkey_done;
+
+ return ib_post_send(con->c.qp, &wr, NULL);
+}
+
+static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
+ bool notify, bool can_wait)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_clt_sess *sess;
+ int err;
+
+ if (WARN_ON(!req->in_use))
+ return;
+ if (WARN_ON(!req->con))
+ return;
+ sess = to_clt_sess(con->c.sess);
+
+ if (req->sg_cnt) {
+ if (unlikely(req->dir == DMA_FROM_DEVICE && req->need_inv)) {
+ /*
+ * We are here to invalidate read requests
+ * ourselves. In normal scenario server should
+ * send INV for all read requests, but
+ * we are here, thus two things could happen:
+ *
+ * 1. this is failover, when errno != 0
+ * and can_wait == 1,
+ *
+ * 2. something totally bad happened and
+ * server forgot to send INV, so we
+ * should do that ourselves.
+ */
+
+ if (likely(can_wait)) {
+ req->need_inv_comp = true;
+ } else {
+ /* This should be IO path, so always notify */
+ WARN_ON(!notify);
+ /* Save errno for INV callback */
+ req->inv_errno = errno;
+ }
+
+ err = rtrs_inv_rkey(req);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
+ req->mr->rkey, err);
+ } else if (likely(can_wait)) {
+ wait_for_completion(&req->inv_comp);
+ } else {
+ /*
+ * Something went wrong, so request will be
+ * completed from INV callback.
+ */
+ WARN_ON_ONCE(1);
+
+ return;
+ }
+ }
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+
+ req->in_use = false;
+ req->con = NULL;
+
+ if (notify)
+ req->conf(req->priv, errno);
+}
+
+static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf, u32 off,
+ u32 imm, struct ib_send_wr *wr)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ enum ib_send_flags flags;
+ struct ib_sge sge;
+
+ if (unlikely(!req->sg_size)) {
+ rtrs_wrn(con->c.sess,
+ "Doing RDMA Write failed, no data supplied\n");
+ return -EINVAL;
+ }
+
+ /* user data and user message in the first list element */
+ sge.addr = req->iu->dma_addr;
+ sge.length = req->sg_size;
+ sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ req->sg_size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
+ rbuf->rkey, rbuf->addr + off,
+ imm, flags, wr);
+}
+
+static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
+ s16 errno, bool w_inval)
+{
+ struct rtrs_clt_io_req *req;
+
+ if (WARN_ON(msg_id >= sess->queue_depth))
+ return;
+
+ req = &sess->reqs[msg_id];
+ /* Drop need_inv if server responded with send with invalidation */
+ req->need_inv &= !w_inval;
+ complete_rdma_req(req, errno, true, false);
+}
+
+static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_iu *iu;
+ int err;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu,
+ cqe);
+ err = rtrs_iu_post_recv(&con->c, iu);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "post iu failed %d\n", err);
+ rtrs_rdma_error_recovery(con);
+ }
+}
+
+static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_msg_rkey_rsp *msg;
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ struct rtrs_iu *iu;
+ u32 buf_id;
+ int err;
+
+ WARN_ON(sess->flags != RTRS_MSG_NEW_RKEY_F);
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) {
+ rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ buf_id = le16_to_cpu(msg->buf_id);
+ if (WARN_ON(buf_id >= sess->queue_depth))
+ goto out;
+
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM)) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ if (WARN_ON(buf_id != msg_id))
+ goto out;
+ sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
+ process_io_rsp(sess, msg_id, err, w_inval);
+ }
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ return rtrs_clt_recv_done(con, wc);
+out:
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_clt_rdma_done
+};
+
+/*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
+{
+ struct ib_recv_wr wr_arr[2], *wr;
+ int i;
+
+ memset(wr_arr, 0, sizeof(wr_arr));
+ for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
+ wr = &wr_arr[i];
+ wr->wr_cqe = cqe;
+ if (i)
+ /* Chain backwards */
+ wr->next = &wr_arr[i - 1];
+ }
+
+ return ib_post_recv(con->qp, wr, NULL);
+}
+
+static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ u32 imm_type, imm_payload;
+ bool w_inval = false;
+ int err;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ rtrs_err(sess->clt, "RDMA failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_rdma_error_recovery(con);
+ }
+ return;
+ }
+ rtrs_clt_update_wc_stats(con);
+
+ switch (wc->opcode) {
+ case IB_WC_RECV_RDMA_WITH_IMM:
+ /*
+ * post_recv() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
+ return;
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_RSP_IMM ||
+ imm_type == RTRS_IO_RSP_W_INV_IMM)) {
+ u32 msg_id;
+
+ w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
+ rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
+
+ process_io_rsp(sess, msg_id, err, w_inval);
+ } else if (imm_type == RTRS_HB_MSG_IMM) {
+ WARN_ON(con->c.cid);
+ rtrs_send_hb_ack(&sess->s);
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+ sess->s.hb_missed_cnt = 0;
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F)
+ return rtrs_clt_recv_done(con, wc);
+ } else {
+ rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
+ imm_type);
+ }
+ if (w_inval)
+ /*
+ * Post x2 empty WRs: first is for this RDMA with IMM,
+ * second is for RECV with INV, which happened earlier.
+ */
+ err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
+ else
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err)) {
+ rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
+ err);
+ rtrs_rdma_error_recovery(con);
+ break;
+ }
+ break;
+ case IB_WC_RECV:
+ /*
+ * Key invalidations from server side
+ */
+ WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
+ wc->wc_flags & IB_WC_WITH_IMM));
+ WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
+ return rtrs_clt_recv_done(con, wc);
+
+ return rtrs_clt_rkey_rsp_done(con, wc);
+ }
+ break;
+ case IB_WC_RDMA_WRITE:
+ /*
+ * post_send() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ break;
+
+ default:
+ rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
+ return;
+ }
+}
+
+static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
+{
+ int err, i;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ for (i = 0; i < q_size; i++) {
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F) {
+ struct rtrs_iu *iu = &con->rsp_ius[i];
+
+ err = rtrs_iu_post_recv(&con->c, iu);
+ } else {
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ }
+ if (unlikely(err))
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_sess(struct rtrs_clt_sess *sess)
+{
+ size_t q_size = 0;
+ int err, cid;
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (cid == 0)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = sess->queue_depth;
+
+ /*
+ * x2 for RDMA read responses + FR key invalidations,
+ * RDMA writes do not require any FR registrations.
+ */
+ q_size *= 2;
+
+ err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+struct path_it {
+ int i;
+ struct list_head skip_list;
+ struct rtrs_clt *clt;
+ struct rtrs_clt_sess *(*next_path)(struct path_it *it);
+};
+
+/**
+ * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
+ * @head: the head for the list.
+ * @ptr: the list head to take the next element from.
+ * @type: the type of the struct this is embedded in.
+ * @memb: the name of the list_head within the struct.
+ *
+ * Next element returned in round-robin fashion, i.e. head will be skipped,
+ * but if list is observed as empty, NULL will be returned.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_next_or_null_rr_rcu(head, ptr, type, memb) \
+({ \
+ list_next_or_null_rcu(head, ptr, type, memb) ?: \
+ list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
+ type, memb); \
+})
+
+/**
+ * get_next_path_rr() - Returns path in round-robin fashion.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_RR
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ */
+static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
+{
+ struct rtrs_clt_sess __rcu **ppcpu_path;
+ struct rtrs_clt_sess *path;
+ struct rtrs_clt *clt;
+
+ clt = it->clt;
+
+ /*
+ * Here we use two RCU objects: @paths_list and @pcpu_path
+ * pointer. See rtrs_clt_remove_path_from_arr() for details
+ * how that is handled.
+ */
+
+ ppcpu_path = this_cpu_ptr(clt->pcpu_path);
+ path = rcu_dereference(*ppcpu_path);
+ if (unlikely(!path))
+ path = list_first_or_null_rcu(&clt->paths_list,
+ typeof(*path), s.entry);
+ else
+ path = list_next_or_null_rr_rcu(&clt->paths_list,
+ &path->s.entry,
+ typeof(*path),
+ s.entry);
+ rcu_assign_pointer(*ppcpu_path, path);
+
+ return path;
+}
+
+/**
+ * get_next_path_min_inflight() - Returns path with minimal inflight count.
+ * @it: the path pointer
+ *
+ * Related to @MP_POLICY_MIN_INFLIGHT
+ *
+ * Locks:
+ * rcu_read_lock() must be hold.
+ */
+static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
+{
+ struct rtrs_clt_sess *min_path = NULL;
+ struct rtrs_clt *clt = it->clt;
+ struct rtrs_clt_sess *sess;
+ int min_inflight = INT_MAX;
+ int inflight;
+
+ list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
+ if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
+ continue;
+
+ inflight = atomic_read(&sess->stats->inflight);
+
+ if (inflight < min_inflight) {
+ min_inflight = inflight;
+ min_path = sess;
+ }
+ }
+
+ /*
+ * add the path to the skip list, so that next time we can get
+ * a different one
+ */
+ if (min_path)
+ list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
+
+ return min_path;
+}
+
+static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
+{
+ INIT_LIST_HEAD(&it->skip_list);
+ it->clt = clt;
+ it->i = 0;
+
+ if (clt->mp_policy == MP_POLICY_RR)
+ it->next_path = get_next_path_rr;
+ else
+ it->next_path = get_next_path_min_inflight;
+}
+
+static inline void path_it_deinit(struct path_it *it)
+{
+ struct list_head *skip, *tmp;
+ /*
+ * The skip_list is used only for the MIN_INFLIGHT policy.
+ * We need to remove paths from it, so that next IO can insert
+ * paths (->mp_skip_entry) into a skip_list again.
+ */
+ list_for_each_safe(skip, tmp, &it->skip_list)
+ list_del_init(skip);
+}
+
+/**
+ * rtrs_clt_init_req() Initialize an rtrs_clt_io_req holding information
+ * about an inflight IO.
+ * The user buffer holding user control message (not data) is copied into
+ * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
+ * also hold the control message of rtrs.
+ * @req: an io request holding information about IO.
+ * @sess: client session
+ * @conf: conformation callback function to notify upper layer.
+ * @permit: permit for allocation of RDMA remote buffer
+ * @priv: private pointer
+ * @vec: kernel vector containing control message
+ * @usr_len: length of the user message
+ * @sg: scater list for IO data
+ * @sg_cnt: number of scater list entries
+ * @data_len: length of the IO data
+ * @dir: direction of the IO.
+ */
+static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
+ struct rtrs_clt_sess *sess,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct iov_iter iter;
+ size_t len;
+
+ req->permit = permit;
+ req->in_use = true;
+ req->usr_len = usr_len;
+ req->data_len = data_len;
+ req->sglist = sg;
+ req->sg_cnt = sg_cnt;
+ req->priv = priv;
+ req->dir = dir;
+ req->con = rtrs_permit_to_clt_con(sess, permit);
+ req->conf = conf;
+ req->need_inv = false;
+ req->need_inv_comp = false;
+ req->inv_errno = 0;
+
+ iov_iter_kvec(&iter, READ, vec, 1, usr_len);
+ len = _copy_from_iter(req->iu->buf, usr_len, &iter);
+ WARN_ON(len != usr_len);
+
+ reinit_completion(&req->inv_comp);
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_req(struct rtrs_clt_sess *sess,
+ void (*conf)(void *priv, int errno),
+ struct rtrs_permit *permit, void *priv,
+ const struct kvec *vec, size_t usr_len,
+ struct scatterlist *sg, size_t sg_cnt,
+ size_t data_len, int dir)
+{
+ struct rtrs_clt_io_req *req;
+
+ req = &sess->reqs[permit->mem_id];
+ rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
+ sg, sg_cnt, data_len, dir);
+ return req;
+}
+
+static struct rtrs_clt_io_req *
+rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_io_req *req;
+ struct kvec vec = {
+ .iov_base = fail_req->iu->buf,
+ .iov_len = fail_req->usr_len
+ };
+
+ req = &alive_sess->reqs[fail_req->permit->mem_id];
+ rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit,
+ fail_req->priv, &vec, fail_req->usr_len,
+ fail_req->sglist, fail_req->sg_cnt,
+ fail_req->data_len, fail_req->dir);
+ return req;
+}
+
+static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
+ struct rtrs_clt_io_req *req,
+ struct rtrs_rbuf *rbuf,
+ u32 size, u32 imm)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct ib_sge *sge = req->sge;
+ enum ib_send_flags flags;
+ struct scatterlist *sg;
+ size_t num_sge;
+ int i;
+
+ for_each_sg(req->sglist, sg, req->sg_cnt, i) {
+ sge[i].addr = sg_dma_address(sg);
+ sge[i].length = sg_dma_len(sg);
+ sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ }
+ sge[i].addr = req->iu->dma_addr;
+ sge[i].length = size;
+ sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+
+ num_sge = 1 + req->sg_cnt;
+
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
+ 0 : IB_SEND_SIGNALED;
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ size, DMA_TO_DEVICE);
+
+ return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
+ rbuf->rkey, rbuf->addr, imm,
+ flags, NULL);
+}
+
+static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_msg_rdma_write *msg;
+
+ struct rtrs_rbuf *rbuf;
+ int ret, count = 0;
+ u32 imm, buf_id;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ if (unlikely(tsize > sess->chunk_size)) {
+ rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
+ tsize, sess->chunk_size);
+ return -EMSGSIZE;
+ }
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ if (unlikely(!count)) {
+ rtrs_wrn(s, "Write request failed, map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put rtrs msg after sg and user message */
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_WRITE);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ /* rtrs message on server side will be after user data and message */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+ req->sg_size = tsize;
+ rbuf = &sess->rbufs[buf_id];
+
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, WRITE);
+
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf,
+ req->usr_len + sizeof(*msg),
+ imm);
+ if (unlikely(ret)) {
+ rtrs_err(s, "Write request failed: %d\n", ret);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
+{
+ int nr;
+
+ /* Align the MR to a 4K page size to match the block virt boundary */
+ nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
+ if (nr < 0)
+ return nr;
+ if (unlikely(nr < req->sg_cnt))
+ return -EINVAL;
+ ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
+
+ return nr;
+}
+
+static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
+{
+ struct rtrs_clt_con *con = req->con;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_msg_rdma_read *msg;
+ struct rtrs_ib_dev *dev;
+
+ struct ib_reg_wr rwr;
+ struct ib_send_wr *wr = NULL;
+
+ int ret, count = 0;
+ u32 imm, buf_id;
+
+ const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
+
+ s = &sess->s;
+ dev = sess->s.dev;
+
+ if (unlikely(tsize > sess->chunk_size)) {
+ rtrs_wrn(s,
+ "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
+ tsize, sess->chunk_size);
+ return -EMSGSIZE;
+ }
+
+ if (req->sg_cnt) {
+ count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ if (unlikely(!count)) {
+ rtrs_wrn(s,
+ "Read request failed, dma map failed\n");
+ return -EINVAL;
+ }
+ }
+ /* put our message into req->buf after user message*/
+ msg = req->iu->buf + req->usr_len;
+ msg->type = cpu_to_le16(RTRS_MSG_READ);
+ msg->usr_len = cpu_to_le16(req->usr_len);
+
+ if (count) {
+ ret = rtrs_map_sg_fr(req, count);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Read request failed, failed to map fast reg. data, err: %d\n",
+ ret);
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
+ req->dir);
+ return ret;
+ }
+ rwr = (struct ib_reg_wr) {
+ .wr.opcode = IB_WR_REG_MR,
+ .wr.wr_cqe = &fast_reg_cqe,
+ .mr = req->mr,
+ .key = req->mr->rkey,
+ .access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE),
+ };
+ wr = &rwr.wr;
+
+ msg->sg_cnt = cpu_to_le16(1);
+ msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
+
+ msg->desc[0].addr = cpu_to_le64(req->mr->iova);
+ msg->desc[0].key = cpu_to_le32(req->mr->rkey);
+ msg->desc[0].len = cpu_to_le32(req->mr->length);
+
+ /* Further invalidation is required */
+ req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
+
+ } else {
+ msg->sg_cnt = 0;
+ msg->flags = 0;
+ }
+ /*
+ * rtrs message will be after the space reserved for disk data and
+ * user message
+ */
+ imm = req->permit->mem_off + req->data_len + req->usr_len;
+ imm = rtrs_to_io_req_imm(imm);
+ buf_id = req->permit->mem_id;
+
+ req->sg_size = sizeof(*msg);
+ req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
+ req->sg_size += req->usr_len;
+
+ /*
+ * Update stats now, after request is successfully sent it is not
+ * safe anymore to touch it.
+ */
+ rtrs_clt_update_all_stats(req, READ);
+
+ ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
+ req->data_len, imm, wr);
+ if (unlikely(ret)) {
+ rtrs_err(s, "Read request failed: %d\n", ret);
+ if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
+ atomic_dec(&sess->stats->inflight);
+ req->need_inv = false;
+ if (req->sg_cnt)
+ ib_dma_unmap_sg(dev->ib_dev, req->sglist,
+ req->sg_cnt, req->dir);
+ }
+
+ return ret;
+}
+
+/**
+ * rtrs_clt_failover_req() Try to find an active path for a failed request
+ * @clt: clt context
+ * @fail_req: a failed io request.
+ */
+static int rtrs_clt_failover_req(struct rtrs_clt *clt,
+ struct rtrs_clt_io_req *fail_req)
+{
+ struct rtrs_clt_sess *alive_sess;
+ struct rtrs_clt_io_req *req;
+ int err = -ECONNABORTED;
+ struct path_it it;
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
+ it.i++) {
+ if (unlikely(READ_ONCE(alive_sess->state) !=
+ RTRS_CLT_CONNECTED))
+ continue;
+ req = rtrs_clt_get_copy_req(alive_sess, fail_req);
+ if (req->dir == DMA_TO_DEVICE)
+ err = rtrs_clt_write_req(req);
+ else
+ err = rtrs_clt_read_req(req);
+ if (unlikely(err)) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ rtrs_clt_inc_failover_cnt(alive_sess->stats);
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+
+static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_io_req *req;
+ int i, err;
+
+ if (!sess->reqs)
+ return;
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ if (!req->in_use)
+ continue;
+
+ /*
+ * Safely (without notification) complete failed request.
+ * After completion this request is still useble and can
+ * be failovered to another path.
+ */
+ complete_rdma_req(req, -ECONNABORTED, false, true);
+
+ err = rtrs_clt_failover_req(clt, req);
+ if (unlikely(err))
+ /* Failover failed, notify anyway */
+ req->conf(req->priv, err);
+ }
+}
+
+static void free_sess_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_io_req *req;
+ int i;
+
+ if (!sess->reqs)
+ return;
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ if (req->mr)
+ ib_dereg_mr(req->mr);
+ kfree(req->sge);
+ rtrs_iu_free(req->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+ }
+ kfree(sess->reqs);
+ sess->reqs = NULL;
+}
+
+static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_io_req *req;
+ struct rtrs_clt *clt = sess->clt;
+ int i, err = -ENOMEM;
+
+ sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
+ GFP_KERNEL);
+ if (!sess->reqs)
+ return -ENOMEM;
+
+ for (i = 0; i < sess->queue_depth; ++i) {
+ req = &sess->reqs[i];
+ req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
+ sess->s.dev->ib_dev,
+ DMA_TO_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!req->iu)
+ goto out;
+
+ req->sge = kmalloc_array(clt->max_segments + 1,
+ sizeof(*req->sge), GFP_KERNEL);
+ if (!req->sge)
+ goto out;
+
+ req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ sess->max_pages_per_mr);
+ if (IS_ERR(req->mr)) {
+ err = PTR_ERR(req->mr);
+ req->mr = NULL;
+ pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
+ sess->max_pages_per_mr);
+ goto out;
+ }
+
+ init_completion(&req->inv_comp);
+ }
+
+ return 0;
+
+out:
+ free_sess_reqs(sess);
+
+ return err;
+}
+
+static int alloc_permits(struct rtrs_clt *clt)
+{
+ unsigned int chunk_bits;
+ int err, i;
+
+ clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
+ sizeof(long), GFP_KERNEL);
+ if (!clt->permits_map) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
+ if (!clt->permits) {
+ err = -ENOMEM;
+ goto err_map;
+ }
+ chunk_bits = ilog2(clt->queue_depth - 1) + 1;
+ for (i = 0; i < clt->queue_depth; i++) {
+ struct rtrs_permit *permit;
+
+ permit = get_permit(clt, i);
+ permit->mem_id = i;
+ permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
+ }
+
+ return 0;
+
+err_map:
+ kfree(clt->permits_map);
+ clt->permits_map = NULL;
+out_err:
+ return err;
+}
+
+static void free_permits(struct rtrs_clt *clt)
+{
+ kfree(clt->permits_map);
+ clt->permits_map = NULL;
+ kfree(clt->permits);
+ clt->permits = NULL;
+}
+
+static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
+{
+ struct ib_device *ib_dev;
+ u64 max_pages_per_mr;
+ int mr_page_shift;
+
+ ib_dev = sess->s.dev->ib_dev;
+
+ /*
+ * Use the smallest page size supported by the HCA, down to a
+ * minimum of 4096 bytes. We're unlikely to build large sglists
+ * out of smaller entries.
+ */
+ mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
+ max_pages_per_mr = ib_dev->attrs.max_mr_size;
+ do_div(max_pages_per_mr, (1ull << mr_page_shift));
+ sess->max_pages_per_mr =
+ min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
+ ib_dev->attrs.max_fast_reg_page_list_len);
+ sess->max_send_sge = ib_dev->attrs.max_send_sge;
+}
+
+static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state,
+ enum rtrs_clt_state *old_state)
+{
+ bool changed;
+
+ spin_lock_irq(&sess->state_wq.lock);
+ *old_state = sess->state;
+ changed = __rtrs_clt_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_wq.lock);
+
+ return changed;
+}
+
+static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_state new_state)
+{
+ enum rtrs_clt_state old_state;
+
+ return rtrs_clt_change_state_get_old(sess, new_state, &old_state);
+}
+
+static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
+{
+ struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
+
+ rtrs_rdma_error_recovery(con);
+}
+
+static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_init_hb(&sess->s, &io_comp_cqe,
+ RTRS_HB_INTERVAL_MS,
+ RTRS_HB_MISSED_MAX,
+ rtrs_clt_hb_err_handler,
+ rtrs_wq);
+}
+
+static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_start_hb(&sess->s);
+}
+
+static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess)
+{
+ rtrs_stop_hb(&sess->s);
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work);
+static void rtrs_clt_close_work(struct work_struct *work);
+
+static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
+ const struct rtrs_addr *path,
+ size_t con_num, u16 max_segments,
+ size_t max_segment_size)
+{
+ struct rtrs_clt_sess *sess;
+ int err = -ENOMEM;
+ int cpu;
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ goto err;
+
+ /* Extra connection for user messages */
+ con_num += 1;
+
+ sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
+ if (!sess->s.con)
+ goto err_free_sess;
+
+ sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
+ if (!sess->stats)
+ goto err_free_con;
+
+ mutex_init(&sess->init_mutex);
+ uuid_gen(&sess->s.uuid);
+ memcpy(&sess->s.dst_addr, path->dst,
+ rdma_addr_size((struct sockaddr *)path->dst));
+
+ /*
+ * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
+ * checks the sa_family to be non-zero. If user passed src_addr=NULL
+ * the sess->src_addr will contain only zeros, which is then fine.
+ */
+ if (path->src)
+ memcpy(&sess->s.src_addr, path->src,
+ rdma_addr_size((struct sockaddr *)path->src));
+ strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
+ sess->s.con_num = con_num;
+ sess->clt = clt;
+ sess->max_pages_per_mr = max_segments * max_segment_size >> 12;
+ init_waitqueue_head(&sess->state_wq);
+ sess->state = RTRS_CLT_CONNECTING;
+ atomic_set(&sess->connected_cnt, 0);
+ INIT_WORK(&sess->close_work, rtrs_clt_close_work);
+ INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
+ rtrs_clt_init_hb(sess);
+
+ sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
+ if (!sess->mp_skip_entry)
+ goto err_free_stats;
+
+ for_each_possible_cpu(cpu)
+ INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
+
+ err = rtrs_clt_init_stats(sess->stats);
+ if (err)
+ goto err_free_percpu;
+
+ return sess;
+
+err_free_percpu:
+ free_percpu(sess->mp_skip_entry);
+err_free_stats:
+ kfree(sess->stats);
+err_free_con:
+ kfree(sess->s.con);
+err_free_sess:
+ kfree(sess);
+err:
+ return ERR_PTR(err);
+}
+
+void free_sess(struct rtrs_clt_sess *sess)
+{
+ free_percpu(sess->mp_skip_entry);
+ mutex_destroy(&sess->init_mutex);
+ kfree(sess->s.con);
+ kfree(sess->rbufs);
+ kfree(sess);
+}
+
+static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
+{
+ struct rtrs_clt_con *con;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+ if (!con)
+ return -ENOMEM;
+
+ /* Map first two connections to the first CPU */
+ con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
+ con->c.cid = cid;
+ con->c.sess = &sess->s;
+ atomic_set(&con->io_cnt, 0);
+
+ sess->s.con[cid] = &con->c;
+
+ return 0;
+}
+
+static void destroy_con(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ sess->s.con[con->c.cid] = NULL;
+ kfree(con);
+}
+
+static int create_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ u16 wr_queue_size;
+ int err, cq_vector;
+ struct rtrs_msg_rkey_rsp *rsp;
+
+ /*
+ * This function can fail, but still destroy_con_cq_qp() should
+ * be called, this is because create_con_cq_qp() is called on cm
+ * event path, thus caller/waiter never knows: have we failed before
+ * create_con_cq_qp() or after. To solve this dilemma without
+ * creating any additional flags just allow destroy_con_cq_qp() be
+ * called many times.
+ */
+
+ if (con->c.cid == 0) {
+ /*
+ * One completion for each receive and two for each send
+ * (send request + registration)
+ * + 2 for drain and heartbeat
+ * in case qp gets into error state
+ */
+ wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
+ /* We must be the first here */
+ if (WARN_ON(sess->s.dev))
+ return -EINVAL;
+
+ /*
+ * The whole session uses device from user connection.
+ * Be careful not to close user connection before ib dev
+ * is gracefully put.
+ */
+ sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
+ &dev_pd);
+ if (!sess->s.dev) {
+ rtrs_wrn(sess->clt,
+ "rtrs_ib_dev_find_get_or_add(): no memory\n");
+ return -ENOMEM;
+ }
+ sess->s.dev_ref = 1;
+ query_fast_reg_mode(sess);
+ } else {
+ /*
+ * Here we assume that session members are correctly set.
+ * This is always true if user connection (cid == 0) is
+ * established first.
+ */
+ if (WARN_ON(!sess->s.dev))
+ return -EINVAL;
+ if (WARN_ON(!sess->queue_depth))
+ return -EINVAL;
+
+ /* Shared between connections */
+ sess->s.dev_ref++;
+ wr_queue_size =
+ min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
+ /* QD * (REQ + RSP + FR REGS or INVS) + drain */
+ sess->queue_depth * 3 + 1);
+ }
+ /* alloc iu to recv new rkey reply when server reports flags set */
+ if (sess->flags == RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ con->rsp_ius = rtrs_iu_alloc(wr_queue_size, sizeof(*rsp),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE,
+ rtrs_clt_rdma_done);
+ if (!con->rsp_ius)
+ return -ENOMEM;
+ con->queue_size = wr_queue_size;
+ }
+ cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
+ err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
+ cq_vector, wr_queue_size, wr_queue_size,
+ IB_POLL_SOFTIRQ);
+ /*
+ * In case of error we do not bother to clean previous allocations,
+ * since destroy_con_cq_qp() must be called.
+ */
+ return err;
+}
+
+static void destroy_con_cq_qp(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ /*
+ * Be careful here: destroy_con_cq_qp() can be called even
+ * create_con_cq_qp() failed, see comments there.
+ */
+
+ rtrs_cq_qp_destroy(&con->c);
+ if (con->rsp_ius) {
+ rtrs_iu_free(con->rsp_ius, DMA_FROM_DEVICE,
+ sess->s.dev->ib_dev, con->queue_size);
+ con->rsp_ius = NULL;
+ con->queue_size = 0;
+ }
+ if (sess->s.dev_ref && !--sess->s.dev_ref) {
+ rtrs_ib_dev_put(sess->s.dev);
+ sess->s.dev = NULL;
+ }
+}
+
+static void stop_cm(struct rtrs_clt_con *con)
+{
+ rdma_disconnect(con->c.cm_id);
+ if (con->c.qp)
+ ib_drain_qp(con->c.qp);
+}
+
+static void destroy_cm(struct rtrs_clt_con *con)
+{
+ rdma_destroy_id(con->c.cm_id);
+ con->c.cm_id = NULL;
+}
+
+static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ int err;
+
+ err = create_con_cq_qp(con);
+ if (err) {
+ rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
+ return err;
+ }
+ err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
+ if (err) {
+ rtrs_err(s, "Resolving route failed, err: %d\n", err);
+ destroy_con_cq_qp(con);
+ }
+
+ return err;
+}
+
+static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_msg_conn_req msg;
+ struct rdma_conn_param param;
+
+ int err;
+
+ param = (struct rdma_conn_param) {
+ .retry_count = 7,
+ .rnr_retry_count = 7,
+ .private_data = &msg,
+ .private_data_len = sizeof(msg),
+ };
+
+ msg = (struct rtrs_msg_conn_req) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .cid = cpu_to_le16(con->c.cid),
+ .cid_num = cpu_to_le16(sess->s.con_num),
+ .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
+ };
+ uuid_copy(&msg.sess_uuid, &sess->s.uuid);
+ uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
+
+ err = rdma_connect(con->c.cm_id, &param);
+ if (err)
+ rtrs_err(clt, "rdma_connect(): %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt *clt = sess->clt;
+ const struct rtrs_msg_conn_rsp *msg;
+ u16 version, queue_depth;
+ int errno;
+ u8 len;
+
+ msg = ev->param.conn.private_data;
+ len = ev->param.conn.private_data_len;
+ if (len < sizeof(*msg)) {
+ rtrs_err(clt, "Invalid RTRS connection response\n");
+ return -ECONNRESET;
+ }
+ if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
+ rtrs_err(clt, "Invalid RTRS magic\n");
+ return -ECONNRESET;
+ }
+ version = le16_to_cpu(msg->version);
+ if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
+ rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
+ version >> 8, RTRS_PROTO_VER_MAJOR);
+ return -ECONNRESET;
+ }
+ errno = le16_to_cpu(msg->errno);
+ if (errno) {
+ rtrs_err(clt, "Invalid RTRS message: errno %d\n",
+ errno);
+ return -ECONNRESET;
+ }
+ if (con->c.cid == 0) {
+ queue_depth = le16_to_cpu(msg->queue_depth);
+
+ if (queue_depth > MAX_SESS_QUEUE_DEPTH) {
+ rtrs_err(clt, "Invalid RTRS message: queue=%d\n",
+ queue_depth);
+ return -ECONNRESET;
+ }
+ if (!sess->rbufs || sess->queue_depth < queue_depth) {
+ kfree(sess->rbufs);
+ sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
+ GFP_KERNEL);
+ if (!sess->rbufs)
+ return -ENOMEM;
+ }
+ sess->queue_depth = queue_depth;
+ sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
+ sess->max_io_size = le32_to_cpu(msg->max_io_size);
+ sess->flags = le32_to_cpu(msg->flags);
+ sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
+
+ /*
+ * Global queue depth and IO size is always a minimum.
+ * If while a reconnection server sends us a value a bit
+ * higher - client does not care and uses cached minimum.
+ *
+ * Since we can have several sessions (paths) restablishing
+ * connections in parallel, use lock.
+ */
+ mutex_lock(&clt->paths_mutex);
+ clt->queue_depth = min_not_zero(sess->queue_depth,
+ clt->queue_depth);
+ clt->max_io_size = min_not_zero(sess->max_io_size,
+ clt->max_io_size);
+ mutex_unlock(&clt->paths_mutex);
+
+ /*
+ * Cache the hca_port and hca_name for sysfs
+ */
+ sess->hca_port = con->c.cm_id->port_num;
+ scnprintf(sess->hca_name, sizeof(sess->hca_name),
+ sess->s.dev->ib_dev->name);
+ sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
+ }
+
+ return 0;
+}
+
+static inline void flag_success_on_conn(struct rtrs_clt_con *con)
+{
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+
+ atomic_inc(&sess->connected_cnt);
+ con->cm_err = 1;
+}
+
+static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_sess *s = con->c.sess;
+ const struct rtrs_msg_conn_rsp *msg;
+ const char *rej_msg;
+ int status, errno;
+ u8 data_len;
+
+ status = ev->status;
+ rej_msg = rdma_reject_msg(con->c.cm_id, status);
+ msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
+
+ if (msg && data_len >= sizeof(*msg)) {
+ errno = (int16_t)le16_to_cpu(msg->errno);
+ if (errno == -EBUSY)
+ rtrs_err(s,
+ "Previous session is still exists on the server, please reconnect later\n");
+ else
+ rtrs_err(s,
+ "Connect rejected: status %d (%s), rtrs errno %d\n",
+ status, rej_msg, errno);
+ } else {
+ rtrs_err(s,
+ "Connect rejected but with malformed message: status %d (%s)\n",
+ status, rej_msg);
+ }
+
+ return -ECONNRESET;
+}
+
+static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
+{
+ if (rtrs_clt_change_state(sess, RTRS_CLT_CLOSING))
+ queue_work(rtrs_wq, &sess->close_work);
+ if (wait)
+ flush_work(&sess->close_work);
+}
+
+static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
+{
+ if (con->cm_err == 1) {
+ struct rtrs_clt_sess *sess;
+
+ sess = to_clt_sess(con->c.sess);
+ if (atomic_dec_and_test(&sess->connected_cnt))
+
+ wake_up(&sess->state_wq);
+ }
+ con->cm_err = cm_err;
+}
+
+static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_clt_con *con = cm_id->context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ int cm_err = 0;
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ cm_err = rtrs_rdma_addr_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ cm_err = rtrs_rdma_route_resolved(con);
+ break;
+ case RDMA_CM_EVENT_ESTABLISHED:
+ con->cm_err = rtrs_rdma_conn_established(con, ev);
+ if (likely(!con->cm_err)) {
+ /*
+ * Report success and wake up. Here we abuse state_wq,
+ * i.e. wake up without state change, but we set cm_err.
+ */
+ flag_success_on_conn(con);
+ wake_up(&sess->state_wq);
+ return 0;
+ }
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ cm_err = rtrs_rdma_conn_rejected(con, ev);
+ break;
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ rtrs_wrn(s, "CM error event %d\n", ev->event);
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ cm_err = -EHOSTUNREACH;
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ cm_err = -ECONNRESET;
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ /*
+ * Device removal is a special case. Queue close and return 0.
+ */
+ rtrs_clt_close_conns(sess, false);
+ return 0;
+ default:
+ rtrs_err(s, "Unexpected RDMA CM event (%d)\n", ev->event);
+ cm_err = -ECONNRESET;
+ break;
+ }
+
+ if (cm_err) {
+ /*
+ * cm error makes sense only on connection establishing,
+ * in other cases we rely on normal procedure of reconnecting.
+ */
+ flag_error_on_conn(con, cm_err);
+ rtrs_rdma_error_recovery(con);
+ }
+
+ return 0;
+}
+
+static int create_cm(struct rtrs_clt_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rdma_cm_id *cm_id;
+ int err;
+
+ cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
+ sess->s.dst_addr.ss_family == AF_IB ?
+ RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ err = PTR_ERR(cm_id);
+ rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
+
+ return err;
+ }
+ con->c.cm_id = cm_id;
+ con->cm_err = 0;
+ /* allow the port to be reused */
+ err = rdma_set_reuseaddr(cm_id, 1);
+ if (err != 0) {
+ rtrs_err(s, "Set address reuse failed, err: %d\n", err);
+ goto destroy_cm;
+ }
+ err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
+ (struct sockaddr *)&sess->s.dst_addr,
+ RTRS_CONNECT_TIMEOUT_MS);
+ if (err) {
+ rtrs_err(s, "Failed to resolve address, err: %d\n", err);
+ goto destroy_cm;
+ }
+ /*
+ * Combine connection status and session events. This is needed
+ * for waiting two possible cases: cm_err has something meaningful
+ * or session state was really changed to error by device removal.
+ */
+ err = wait_event_interruptible_timeout(
+ sess->state_wq,
+ con->cm_err || sess->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+ if (err == 0 || err == -ERESTARTSYS) {
+ if (err == 0)
+ err = -ETIMEDOUT;
+ /* Timedout or interrupted */
+ goto errr;
+ }
+ if (con->cm_err < 0) {
+ err = con->cm_err;
+ goto errr;
+ }
+ if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
+ /* Device removal */
+ err = -ECONNABORTED;
+ goto errr;
+ }
+
+ return 0;
+
+errr:
+ stop_cm(con);
+ /* Is safe to call destroy if cq_qp is not inited */
+ destroy_con_cq_qp(con);
+destroy_cm:
+ destroy_cm(con);
+
+ return err;
+}
+
+static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ int up;
+
+ /*
+ * We can fire RECONNECTED event only when all paths were
+ * connected on rtrs_clt_open(), then each was disconnected
+ * and the first one connected again. That's why this nasty
+ * game with counter value.
+ */
+
+ mutex_lock(&clt->paths_ev_mutex);
+ up = ++clt->paths_up;
+ /*
+ * Here it is safe to access paths num directly since up counter
+ * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
+ * in progress, thus paths removals are impossible.
+ */
+ if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
+ clt->paths_up = clt->paths_num;
+ else if (up == 1)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+
+ /* Mark session as established */
+ sess->established = true;
+ sess->reconnect_attempts = 0;
+ sess->stats->reconnects.successful_cnt++;
+}
+
+static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+
+ if (!sess->established)
+ return;
+
+ sess->established = false;
+ mutex_lock(&clt->paths_ev_mutex);
+ WARN_ON(!clt->paths_up);
+ if (--clt->paths_up == 0)
+ clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
+ mutex_unlock(&clt->paths_ev_mutex);
+}
+
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_con *con;
+ unsigned int cid;
+
+ WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
+
+ /*
+ * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
+ * exactly in between. Start destroying after it finishes.
+ */
+ mutex_lock(&sess->init_mutex);
+ mutex_unlock(&sess->init_mutex);
+
+ /*
+ * All IO paths must observe !CONNECTED state before we
+ * free everything.
+ */
+ synchronize_rcu();
+
+ rtrs_clt_stop_hb(sess);
+
+ /*
+ * The order it utterly crucial: firstly disconnect and complete all
+ * rdma requests with error (thus set in_use=false for requests),
+ * then fail outstanding requests checking in_use for each, and
+ * eventually notify upper layer about session disconnection.
+ */
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (!sess->s.con[cid])
+ break;
+ con = to_clt_con(sess->s.con[cid]);
+ stop_cm(con);
+ }
+ fail_all_outstanding_reqs(sess);
+ free_sess_reqs(sess);
+ rtrs_clt_sess_down(sess);
+
+ /*
+ * Wait for graceful shutdown, namely when peer side invokes
+ * rdma_disconnect(). 'connected_cnt' is decremented only on
+ * CM events, thus if other side had crashed and hb has detected
+ * something is wrong, here we will stuck for exactly timeout ms,
+ * since CM does not fire anything. That is fine, we are not in
+ * hurry.
+ */
+ wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
+ msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (!sess->s.con[cid])
+ break;
+ con = to_clt_con(sess->s.con[cid]);
+ destroy_con_cq_qp(con);
+ destroy_cm(con);
+ destroy_con(con);
+ }
+}
+
+static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path,
+ struct rtrs_clt_sess *sess,
+ struct rtrs_clt_sess *next)
+{
+ struct rtrs_clt_sess **ppcpu_path;
+
+ /* Call cmpxchg() without sparse warnings */
+ ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
+ return sess == cmpxchg(ppcpu_path, sess, next);
+}
+
+static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *next;
+ bool wait_for_grace = false;
+ int cpu;
+
+ mutex_lock(&clt->paths_mutex);
+ list_del_rcu(&sess->s.entry);
+
+ /* Make sure everybody observes path removal. */
+ synchronize_rcu();
+
+ /*
+ * At this point nobody sees @sess in the list, but still we have
+ * dangling pointer @pcpu_path which _can_ point to @sess. Since
+ * nobody can observe @sess in the list, we guarantee that IO path
+ * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
+ * to @sess, but can never again become @sess.
+ */
+
+ /*
+ * Decrement paths number only after grace period, because
+ * caller of do_each_path() must firstly observe list without
+ * path and only then decremented paths number.
+ *
+ * Otherwise there can be the following situation:
+ * o Two paths exist and IO is coming.
+ * o One path is removed:
+ * CPU#0 CPU#1
+ * do_each_path(): rtrs_clt_remove_path_from_arr():
+ * path = get_next_path()
+ * ^^^ list_del_rcu(path)
+ * [!CONNECTED path] clt->paths_num--
+ * ^^^^^^^^^
+ * load clt->paths_num from 2 to 1
+ * ^^^^^^^^^
+ * sees 1
+ *
+ * path is observed as !CONNECTED, but do_each_path() loop
+ * ends, because expression i < clt->paths_num is false.
+ */
+ clt->paths_num--;
+
+ /*
+ * Get @next connection from current @sess which is going to be
+ * removed. If @sess is the last element, then @next is NULL.
+ */
+ rcu_read_lock();
+ next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
+ typeof(*next), s.entry);
+ rcu_read_unlock();
+
+ /*
+ * @pcpu paths can still point to the path which is going to be
+ * removed, so change the pointer manually.
+ */
+ for_each_possible_cpu(cpu) {
+ struct rtrs_clt_sess __rcu **ppcpu_path;
+
+ ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
+ if (rcu_dereference_protected(*ppcpu_path,
+ lockdep_is_held(&clt->paths_mutex)) != sess)
+ /*
+ * synchronize_rcu() was called just after deleting
+ * entry from the list, thus IO code path cannot
+ * change pointer back to the pointer which is going
+ * to be removed, we are safe here.
+ */
+ continue;
+
+ /*
+ * We race with IO code path, which also changes pointer,
+ * thus we have to be careful not to overwrite it.
+ */
+ if (xchg_sessions(ppcpu_path, sess, next))
+ /*
+ * @ppcpu_path was successfully replaced with @next,
+ * that means that someone could also pick up the
+ * @sess and dereferencing it right now, so wait for
+ * a grace period is required.
+ */
+ wait_for_grace = true;
+ }
+ if (wait_for_grace)
+ synchronize_rcu();
+
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess,
+ struct rtrs_addr *addr)
+{
+ struct rtrs_clt *clt = sess->clt;
+
+ mutex_lock(&clt->paths_mutex);
+ clt->paths_num++;
+
+ list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+ mutex_unlock(&clt->paths_mutex);
+}
+
+static void rtrs_clt_close_work(struct work_struct *work)
+{
+ struct rtrs_clt_sess *sess;
+
+ sess = container_of(work, struct rtrs_clt_sess, close_work);
+
+ cancel_delayed_work_sync(&sess->reconnect_dwork);
+ rtrs_clt_stop_and_destroy_conns(sess);
+ rtrs_clt_change_state(sess, RTRS_CLT_CLOSED);
+}
+
+static int init_conns(struct rtrs_clt_sess *sess)
+{
+ unsigned int cid;
+ int err;
+
+ /*
+ * On every new session connections increase reconnect counter
+ * to avoid clashes with previous sessions not yet closed
+ * sessions on a server side.
+ */
+ sess->s.recon_cnt++;
+
+ /* Establish all RDMA connections */
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ err = create_con(sess, cid);
+ if (err)
+ goto destroy;
+
+ err = create_cm(to_clt_con(sess->s.con[cid]));
+ if (err) {
+ destroy_con(to_clt_con(sess->s.con[cid]));
+ goto destroy;
+ }
+ }
+ err = alloc_sess_reqs(sess);
+ if (err)
+ goto destroy;
+
+ rtrs_clt_start_hb(sess);
+
+ return 0;
+
+destroy:
+ while (cid--) {
+ struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
+
+ stop_cm(con);
+ destroy_con_cq_qp(con);
+ destroy_cm(con);
+ destroy_con(con);
+ }
+ /*
+ * If we've never taken async path and got an error, say,
+ * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
+ * manually to keep reconnecting.
+ */
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+
+ return err;
+}
+
+static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_iu *iu;
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(sess->clt, "Sess info request send failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+ return;
+ }
+
+ rtrs_clt_update_wc_stats(con);
+}
+
+static int process_info_rsp(struct rtrs_clt_sess *sess,
+ const struct rtrs_msg_info_rsp *msg)
+{
+ unsigned int sg_cnt, total_len;
+ int i, sgi;
+
+ sg_cnt = le16_to_cpu(msg->sg_cnt);
+ if (unlikely(!sg_cnt))
+ return -EINVAL;
+ /*
+ * Check if IB immediate data size is enough to hold the mem_id and
+ * the offset inside the memory chunk.
+ */
+ if (unlikely((ilog2(sg_cnt - 1) + 1) +
+ (ilog2(sess->chunk_size - 1) + 1) >
+ MAX_IMM_PAYL_BITS)) {
+ rtrs_err(sess->clt,
+ "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
+ MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
+ return -EINVAL;
+ }
+ if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) {
+ rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
+ sg_cnt);
+ return -EINVAL;
+ }
+ total_len = 0;
+ for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
+ const struct rtrs_sg_desc *desc = &msg->desc[sgi];
+ u32 len, rkey;
+ u64 addr;
+
+ addr = le64_to_cpu(desc->addr);
+ rkey = le32_to_cpu(desc->key);
+ len = le32_to_cpu(desc->len);
+
+ total_len += len;
+
+ if (unlikely(!len || (len % sess->chunk_size))) {
+ rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
+ len);
+ return -EINVAL;
+ }
+ for ( ; len && i < sess->queue_depth; i++) {
+ sess->rbufs[i].addr = addr;
+ sess->rbufs[i].rkey = rkey;
+
+ len -= sess->chunk_size;
+ addr += sess->chunk_size;
+ }
+ }
+ /* Sanity check */
+ if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) {
+ rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
+ return -EINVAL;
+ }
+ if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) {
+ rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_clt_con *con = cq->cq_context;
+ struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_msg_info_rsp *msg;
+ enum rtrs_clt_state state;
+ struct rtrs_iu *iu;
+ size_t rx_sz;
+ int err;
+
+ state = RTRS_CLT_CONNECTING_ERR;
+
+ WARN_ON(con->c.cid);
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ goto out;
+ }
+ WARN_ON(wc->opcode != IB_WC_RECV);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) {
+ rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto out;
+ }
+ rx_sz = sizeof(*msg);
+ rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
+ if (unlikely(wc->byte_len < rx_sz)) {
+ rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ wc->byte_len);
+ goto out;
+ }
+ err = process_info_rsp(sess, msg);
+ if (unlikely(err))
+ goto out;
+
+ err = post_recv_sess(sess);
+ if (unlikely(err))
+ goto out;
+
+ state = RTRS_CLT_CONNECTED;
+
+out:
+ rtrs_clt_update_wc_stats(con);
+ rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ rtrs_clt_change_state(sess, state);
+}
+
+static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
+{
+ struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
+ struct rtrs_msg_info_req *msg;
+ struct rtrs_iu *tx_iu, *rx_iu;
+ size_t rx_sz;
+ int err;
+
+ rx_sz = sizeof(struct rtrs_msg_info_rsp);
+ rx_sz += sizeof(u64) * MAX_SESS_QUEUE_DEPTH;
+
+ tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
+ sess->s.dev->ib_dev, DMA_TO_DEVICE,
+ rtrs_clt_info_req_done);
+ rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
+ if (unlikely(!tx_iu || !rx_iu)) {
+ err = -ENOMEM;
+ goto out;
+ }
+ /* Prepare for getting info response */
+ err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
+ goto out;
+ }
+ rx_iu = NULL;
+
+ msg = tx_iu->buf;
+ msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
+ memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ tx_iu->size, DMA_TO_DEVICE);
+
+ /* Send info request */
+ err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
+ if (unlikely(err)) {
+ rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
+ goto out;
+ }
+ tx_iu = NULL;
+
+ /* Wait for state change */
+ wait_event_interruptible_timeout(sess->state_wq,
+ sess->state != RTRS_CLT_CONNECTING,
+ msecs_to_jiffies(
+ RTRS_CONNECT_TIMEOUT_MS));
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) {
+ if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
+ err = -ECONNRESET;
+ else
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+out:
+ if (tx_iu)
+ rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ if (rx_iu)
+ rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ if (unlikely(err))
+ /* If we've never taken async path because of malloc problems */
+ rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
+
+ return err;
+}
+
+/**
+ * init_sess() - establishes all session connections and does handshake
+ * @sess: client session.
+ * In case of error full close or reconnect procedure should be taken,
+ * because reconnect or close async works can be started.
+ */
+static int init_sess(struct rtrs_clt_sess *sess)
+{
+ int err;
+
+ mutex_lock(&sess->init_mutex);
+ err = init_conns(sess);
+ if (err) {
+ rtrs_err(sess->clt, "init_conns(), err: %d\n", err);
+ goto out;
+ }
+ err = rtrs_send_sess_info(sess);
+ if (err) {
+ rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err);
+ goto out;
+ }
+ rtrs_clt_sess_up(sess);
+out:
+ mutex_unlock(&sess->init_mutex);
+
+ return err;
+}
+
+static void rtrs_clt_reconnect_work(struct work_struct *work)
+{
+ struct rtrs_clt_sess *sess;
+ struct rtrs_clt *clt;
+ unsigned int delay_ms;
+ int err;
+
+ sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
+ reconnect_dwork);
+ clt = sess->clt;
+
+ if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
+ return;
+
+ if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
+ /* Close a session completely if max attempts is reached */
+ rtrs_clt_close_conns(sess, false);
+ return;
+ }
+ sess->reconnect_attempts++;
+
+ /* Stop everything */
+ rtrs_clt_stop_and_destroy_conns(sess);
+ msleep(RTRS_RECONNECT_BACKOFF);
+ if (rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING)) {
+ err = init_sess(sess);
+ if (err)
+ goto reconnect_again;
+ }
+
+ return;
+
+reconnect_again:
+ if (rtrs_clt_change_state(sess, RTRS_CLT_RECONNECTING)) {
+ sess->stats->reconnects.fail_cnt++;
+ delay_ms = clt->reconnect_delay_sec * 1000;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ msecs_to_jiffies(delay_ms));
+ }
+}
+
+static void rtrs_clt_dev_release(struct device *dev)
+{
+ struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+
+ kfree(clt);
+}
+
+static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
+ u16 port, size_t pdu_sz, void *priv,
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev),
+ unsigned int max_segments,
+ size_t max_segment_size,
+ unsigned int reconnect_delay_sec,
+ unsigned int max_reconnect_attempts)
+{
+ struct rtrs_clt *clt;
+ int err;
+
+ if (!paths_num || paths_num > MAX_PATHS_NUM)
+ return ERR_PTR(-EINVAL);
+
+ if (strlen(sessname) >= sizeof(clt->sessname))
+ return ERR_PTR(-EINVAL);
+
+ clt = kzalloc(sizeof(*clt), GFP_KERNEL);
+ if (!clt)
+ return ERR_PTR(-ENOMEM);
+
+ clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
+ if (!clt->pcpu_path) {
+ kfree(clt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ uuid_gen(&clt->paths_uuid);
+ INIT_LIST_HEAD_RCU(&clt->paths_list);
+ clt->paths_num = paths_num;
+ clt->paths_up = MAX_PATHS_NUM;
+ clt->port = port;
+ clt->pdu_sz = pdu_sz;
+ clt->max_segments = max_segments;
+ clt->max_segment_size = max_segment_size;
+ clt->reconnect_delay_sec = reconnect_delay_sec;
+ clt->max_reconnect_attempts = max_reconnect_attempts;
+ clt->priv = priv;
+ clt->link_ev = link_ev;
+ clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
+ strlcpy(clt->sessname, sessname, sizeof(clt->sessname));
+ init_waitqueue_head(&clt->permits_wait);
+ mutex_init(&clt->paths_ev_mutex);
+ mutex_init(&clt->paths_mutex);
+
+ clt->dev.class = rtrs_clt_dev_class;
+ clt->dev.release = rtrs_clt_dev_release;
+ err = dev_set_name(&clt->dev, "%s", sessname);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ kfree(clt);
+ return ERR_PTR(err);
+ }
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&clt->dev, true);
+ err = device_register(&clt->dev);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ put_device(&clt->dev);
+ return ERR_PTR(err);
+ }
+
+ clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
+ if (!clt->kobj_paths) {
+ free_percpu(clt->pcpu_path);
+ device_unregister(&clt->dev);
+ return NULL;
+ }
+ err = rtrs_clt_create_sysfs_root_files(clt);
+ if (err) {
+ free_percpu(clt->pcpu_path);
+ kobject_del(clt->kobj_paths);
+ kobject_put(clt->kobj_paths);
+ device_unregister(&clt->dev);
+ return ERR_PTR(err);
+ }
+ dev_set_uevent_suppress(&clt->dev, false);
+ kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
+
+ return clt;
+}
+
+static void wait_for_inflight_permits(struct rtrs_clt *clt)
+{
+ if (clt->permits_map) {
+ size_t sz = clt->queue_depth;
+
+ wait_event(clt->permits_wait,
+ find_first_bit(clt->permits_map, sz) >= sz);
+ }
+}
+
+static void free_clt(struct rtrs_clt *clt)
+{
+ wait_for_inflight_permits(clt);
+ free_permits(clt);
+ free_percpu(clt->pcpu_path);
+ mutex_destroy(&clt->paths_ev_mutex);
+ mutex_destroy(&clt->paths_mutex);
+ /* release callback will free clt in last put */
+ device_unregister(&clt->dev);
+}
+
+/**
+ * rtrs_clt_open() - Open a session to an RTRS server
+ * @ops: holds the link event callback and the private pointer.
+ * @sessname: name of the session
+ * @paths: Paths to be established defined by their src and dst addresses
+ * @paths_num: Number of elements in the @paths array
+ * @port: port to be used by the RTRS session
+ * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
+ * @reconnect_delay_sec: time between reconnect tries
+ * @max_segments: Max. number of segments per IO request
+ * @max_segment_size: Max. size of one segment
+ * @max_reconnect_attempts: Number of times to reconnect on error before giving
+ * up, 0 for * disabled, -1 for forever
+ *
+ * Starts session establishment with the rtrs_server. The function can block
+ * up to ~2000ms before it returns.
+ *
+ * Return a valid pointer on success otherwise PTR_ERR.
+ */
+struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *sessname,
+ const struct rtrs_addr *paths,
+ size_t paths_num, u16 port,
+ size_t pdu_sz, u8 reconnect_delay_sec,
+ u16 max_segments,
+ size_t max_segment_size,
+ s16 max_reconnect_attempts)
+{
+ struct rtrs_clt_sess *sess, *tmp;
+ struct rtrs_clt *clt;
+ int err, i;
+
+ clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
+ ops->link_ev,
+ max_segments, max_segment_size, reconnect_delay_sec,
+ max_reconnect_attempts);
+ if (IS_ERR(clt)) {
+ err = PTR_ERR(clt);
+ goto out;
+ }
+ for (i = 0; i < paths_num; i++) {
+ struct rtrs_clt_sess *sess;
+
+ sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
+ max_segments, max_segment_size);
+ if (IS_ERR(sess)) {
+ err = PTR_ERR(sess);
+ goto close_all_sess;
+ }
+ list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+
+ err = init_sess(sess);
+ if (err) {
+ list_del_rcu(&sess->s.entry);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+ goto close_all_sess;
+ }
+
+ err = rtrs_clt_create_sess_files(sess);
+ if (err) {
+ list_del_rcu(&sess->s.entry);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+ goto close_all_sess;
+ }
+ }
+ err = alloc_permits(clt);
+ if (err)
+ goto close_all_sess;
+
+ return clt;
+
+close_all_sess:
+ list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_sess_files(sess, NULL);
+ rtrs_clt_close_conns(sess, true);
+ kobject_put(&sess->kobj);
+ }
+ rtrs_clt_destroy_sysfs_root_files(clt);
+ rtrs_clt_destroy_sysfs_root_folders(clt);
+ free_clt(clt);
+
+out:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(rtrs_clt_open);
+
+/**
+ * rtrs_clt_close() - Close a session
+ * @clt: Session handle. Session is freed upon return.
+ */
+void rtrs_clt_close(struct rtrs_clt *clt)
+{
+ struct rtrs_clt_sess *sess, *tmp;
+
+ /* Firstly forbid sysfs access */
+ rtrs_clt_destroy_sysfs_root_files(clt);
+ rtrs_clt_destroy_sysfs_root_folders(clt);
+
+ /* Now it is safe to iterate over all paths without locks */
+ list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_sess_files(sess, NULL);
+ rtrs_clt_close_conns(sess, true);
+ kobject_put(&sess->kobj);
+ }
+ free_clt(clt);
+}
+EXPORT_SYMBOL(rtrs_clt_close);
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
+{
+ enum rtrs_clt_state old_state;
+ int err = -EBUSY;
+ bool changed;
+
+ changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
+ &old_state);
+ if (changed) {
+ sess->reconnect_attempts = 0;
+ queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
+ }
+ if (changed || old_state == RTRS_CLT_RECONNECTING) {
+ /*
+ * flush_delayed_work() queues pending work for immediate
+ * execution, so do the flush if we have queued something
+ * right now or work is pending.
+ */
+ flush_delayed_work(&sess->reconnect_dwork);
+ err = (READ_ONCE(sess->state) ==
+ RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
+ }
+
+ return err;
+}
+
+int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess)
+{
+ rtrs_clt_close_conns(sess, true);
+
+ return 0;
+}
+
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self)
+{
+ enum rtrs_clt_state old_state;
+ bool changed;
+
+ /*
+ * Continue stopping path till state was changed to DEAD or
+ * state was observed as DEAD:
+ * 1. State was changed to DEAD - we were fast and nobody
+ * invoked rtrs_clt_reconnect(), which can again start
+ * reconnecting.
+ * 2. State was observed as DEAD - we have someone in parallel
+ * removing the path.
+ */
+ do {
+ rtrs_clt_close_conns(sess, true);
+ changed = rtrs_clt_change_state_get_old(sess,
+ RTRS_CLT_DEAD,
+ &old_state);
+ } while (!changed && old_state != RTRS_CLT_DEAD);
+
+ if (likely(changed)) {
+ rtrs_clt_destroy_sess_files(sess, sysfs_self);
+ rtrs_clt_remove_path_from_arr(sess);
+ kobject_put(&sess->kobj);
+ }
+
+ return 0;
+}
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value)
+{
+ clt->max_reconnect_attempts = (unsigned int)value;
+}
+
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
+{
+ return (int)clt->max_reconnect_attempts;
+}
+
+/**
+ * rtrs_clt_request() - Request data transfer to/from server via RDMA.
+ *
+ * @dir: READ/WRITE
+ * @ops: callback function to be called as confirmation, and the pointer.
+ * @clt: Session
+ * @permit: Preallocated permit
+ * @vec: Message that is sent to server together with the request.
+ * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
+ * Since the msg is copied internally it can be allocated on stack.
+ * @nr: Number of elements in @vec.
+ * @data_len: length of data sent to/from server
+ * @sg: Pages to be sent/received to/from server.
+ * @sg_cnt: Number of elements in the @sg
+ *
+ * Return:
+ * 0: Success
+ * <0: Error
+ *
+ * On dir=READ rtrs client will request a data transfer from Server to client.
+ * The data that the server will respond with will be stored in @sg when
+ * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
+ * On dir=WRITE rtrs client will rdma write data in sg to server side.
+ */
+int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
+ struct rtrs_clt *clt, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t data_len,
+ struct scatterlist *sg, unsigned int sg_cnt)
+{
+ struct rtrs_clt_io_req *req;
+ struct rtrs_clt_sess *sess;
+
+ enum dma_data_direction dma_dir;
+ int err = -ECONNABORTED, i;
+ size_t usr_len, hdr_len;
+ struct path_it it;
+
+ /* Get kvec length */
+ for (i = 0, usr_len = 0; i < nr; i++)
+ usr_len += vec[i].iov_len;
+
+ if (dir == READ) {
+ hdr_len = sizeof(struct rtrs_msg_rdma_read) +
+ sg_cnt * sizeof(struct rtrs_sg_desc);
+ dma_dir = DMA_FROM_DEVICE;
+ } else {
+ hdr_len = sizeof(struct rtrs_msg_rdma_write);
+ dma_dir = DMA_TO_DEVICE;
+ }
+
+ rcu_read_lock();
+ for (path_it_init(&it, clt);
+ (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+ continue;
+
+ if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) {
+ rtrs_wrn_rl(sess->clt,
+ "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
+ dir == READ ? "Read" : "Write",
+ usr_len, hdr_len, sess->max_hdr_size);
+ err = -EMSGSIZE;
+ break;
+ }
+ req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
+ vec, usr_len, sg, sg_cnt, data_len,
+ dma_dir);
+ if (dir == READ)
+ err = rtrs_clt_read_req(req);
+ else
+ err = rtrs_clt_write_req(req);
+ if (unlikely(err)) {
+ req->in_use = false;
+ continue;
+ }
+ /* Success path */
+ break;
+ }
+ path_it_deinit(&it);
+ rcu_read_unlock();
+
+ return err;
+}
+EXPORT_SYMBOL(rtrs_clt_request);
+
+/**
+ * rtrs_clt_query() - queries RTRS session attributes
+ *@clt: session pointer
+ *@attr: query results for session attributes.
+ * Returns:
+ * 0 on success
+ * -ECOMM no connection to the server
+ */
+int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
+{
+ if (!rtrs_clt_is_connected(clt))
+ return -ECOMM;
+
+ attr->queue_depth = clt->queue_depth;
+ attr->max_io_size = clt->max_io_size;
+ attr->sess_kobj = &clt->dev.kobj;
+ strlcpy(attr->sessname, clt->sessname, sizeof(attr->sessname));
+
+ return 0;
+}
+EXPORT_SYMBOL(rtrs_clt_query);
+
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+ struct rtrs_addr *addr)
+{
+ struct rtrs_clt_sess *sess;
+ int err;
+
+ sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments,
+ clt->max_segment_size);
+ if (IS_ERR(sess))
+ return PTR_ERR(sess);
+
+ /*
+ * It is totally safe to add path in CONNECTING state: coming
+ * IO will never grab it. Also it is very important to add
+ * path before init, since init fires LINK_CONNECTED event.
+ */
+ rtrs_clt_add_path_to_arr(sess, addr);
+
+ err = init_sess(sess);
+ if (err)
+ goto close_sess;
+
+ err = rtrs_clt_create_sess_files(sess);
+ if (err)
+ goto close_sess;
+
+ return 0;
+
+close_sess:
+ rtrs_clt_remove_path_from_arr(sess);
+ rtrs_clt_close_conns(sess, true);
+ free_sess(sess);
+
+ return err;
+}
+
+static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
+{
+ if (!(dev->ib_dev->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)) {
+ pr_err("Memory registrations not supported.\n");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
+ .init = rtrs_clt_ib_dev_init
+};
+
+static int __init rtrs_client_init(void)
+{
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+
+ rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client");
+ if (IS_ERR(rtrs_clt_dev_class)) {
+ pr_err("Failed to create rtrs-client dev class\n");
+ return PTR_ERR(rtrs_clt_dev_class);
+ }
+ rtrs_wq = alloc_workqueue("rtrs_client_wq", WQ_MEM_RECLAIM, 0);
+ if (!rtrs_wq) {
+ class_destroy(rtrs_clt_dev_class);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit rtrs_client_exit(void)
+{
+ destroy_workqueue(rtrs_wq);
+ class_destroy(rtrs_clt_dev_class);
+ rtrs_rdma_dev_pd_deinit(&dev_pd);
+}
+
+module_init(rtrs_client_init);
+module_exit(rtrs_client_exit);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
new file mode 100644
index 000000000000..167acd3c90fc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_CLT_H
+#define RTRS_CLT_H
+
+#include <linux/device.h>
+#include "rtrs-pri.h"
+
+/**
+ * enum rtrs_clt_state - Client states.
+ */
+enum rtrs_clt_state {
+ RTRS_CLT_CONNECTING,
+ RTRS_CLT_CONNECTING_ERR,
+ RTRS_CLT_RECONNECTING,
+ RTRS_CLT_CONNECTED,
+ RTRS_CLT_CLOSING,
+ RTRS_CLT_CLOSED,
+ RTRS_CLT_DEAD,
+};
+
+enum rtrs_mp_policy {
+ MP_POLICY_RR,
+ MP_POLICY_MIN_INFLIGHT,
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_reconnects {
+ int successful_cnt;
+ int fail_cnt;
+};
+
+/* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
+struct rtrs_clt_stats_cpu_migr {
+ atomic_t from;
+ int to;
+};
+
+/* stats for Read and write operation.
+ * see Documentation/ABI/testing/sysfs-class-rtrs-client for details
+ */
+struct rtrs_clt_stats_rdma {
+ struct {
+ u64 cnt;
+ u64 size_total;
+ } dir[2];
+
+ u64 failover_cnt;
+};
+
+struct rtrs_clt_stats_pcpu {
+ struct rtrs_clt_stats_cpu_migr cpu_migr;
+ struct rtrs_clt_stats_rdma rdma;
+};
+
+struct rtrs_clt_stats {
+ struct kobject kobj_stats;
+ struct rtrs_clt_stats_pcpu __percpu *pcpu_stats;
+ struct rtrs_clt_stats_reconnects reconnects;
+ atomic_t inflight;
+};
+
+struct rtrs_clt_con {
+ struct rtrs_con c;
+ struct rtrs_iu *rsp_ius;
+ u32 queue_size;
+ unsigned int cpu;
+ atomic_t io_cnt;
+ int cm_err;
+};
+
+/**
+ * rtrs_permit - permits the memory allocation for future RDMA operation.
+ * Combine with irq pinning to keep IO on same CPU.
+ */
+struct rtrs_permit {
+ enum rtrs_clt_con_type con_type;
+ unsigned int cpu_id;
+ unsigned int mem_id;
+ unsigned int mem_off;
+};
+
+/**
+ * rtrs_clt_io_req - describes one inflight IO request
+ */
+struct rtrs_clt_io_req {
+ struct list_head list;
+ struct rtrs_iu *iu;
+ struct scatterlist *sglist; /* list holding user data */
+ unsigned int sg_cnt;
+ unsigned int sg_size;
+ unsigned int data_len;
+ unsigned int usr_len;
+ void *priv;
+ bool in_use;
+ struct rtrs_clt_con *con;
+ struct rtrs_sg_desc *desc;
+ struct ib_sge *sge;
+ struct rtrs_permit *permit;
+ enum dma_data_direction dir;
+ void (*conf)(void *priv, int errno);
+ unsigned long start_jiffies;
+
+ struct ib_mr *mr;
+ struct ib_cqe inv_cqe;
+ struct completion inv_comp;
+ int inv_errno;
+ bool need_inv_comp;
+ bool need_inv;
+};
+
+struct rtrs_rbuf {
+ u64 addr;
+ u32 rkey;
+};
+
+struct rtrs_clt_sess {
+ struct rtrs_sess s;
+ struct rtrs_clt *clt;
+ wait_queue_head_t state_wq;
+ enum rtrs_clt_state state;
+ atomic_t connected_cnt;
+ struct mutex init_mutex;
+ struct rtrs_clt_io_req *reqs;
+ struct delayed_work reconnect_dwork;
+ struct work_struct close_work;
+ unsigned int reconnect_attempts;
+ bool established;
+ struct rtrs_rbuf *rbufs;
+ size_t max_io_size;
+ u32 max_hdr_size;
+ u32 chunk_size;
+ size_t queue_depth;
+ u32 max_pages_per_mr;
+ int max_send_sge;
+ u32 flags;
+ struct kobject kobj;
+ struct rtrs_clt_stats *stats;
+ /* cache hca_port and hca_name to display in sysfs */
+ u8 hca_port;
+ char hca_name[IB_DEVICE_NAME_MAX];
+ struct list_head __percpu
+ *mp_skip_entry;
+};
+
+struct rtrs_clt {
+ struct list_head paths_list; /* rcu protected list */
+ size_t paths_num;
+ struct rtrs_clt_sess
+ __rcu * __percpu *pcpu_path;
+ uuid_t paths_uuid;
+ int paths_up;
+ struct mutex paths_mutex;
+ struct mutex paths_ev_mutex;
+ char sessname[NAME_MAX];
+ u16 port;
+ unsigned int max_reconnect_attempts;
+ unsigned int reconnect_delay_sec;
+ unsigned int max_segments;
+ size_t max_segment_size;
+ void *permits;
+ unsigned long *permits_map;
+ size_t queue_depth;
+ size_t max_io_size;
+ wait_queue_head_t permits_wait;
+ size_t pdu_sz;
+ void *priv;
+ void (*link_ev)(void *priv,
+ enum rtrs_clt_link_ev ev);
+ struct device dev;
+ struct kobject *kobj_paths;
+ enum rtrs_mp_policy mp_policy;
+};
+
+static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_clt_con, c);
+}
+
+static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s)
+{
+ return container_of(s, struct rtrs_clt_sess, s);
+}
+
+static inline int permit_size(struct rtrs_clt *clt)
+{
+ return sizeof(struct rtrs_permit) + clt->pdu_sz;
+}
+
+static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx)
+{
+ return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx);
+}
+
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess);
+int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess);
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+ struct rtrs_addr *addr);
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self);
+
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value);
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt);
+void free_sess(struct rtrs_clt_sess *sess);
+
+/* rtrs-clt-stats.c */
+
+int rtrs_clt_init_stats(struct rtrs_clt_stats *stats);
+
+void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *s);
+
+void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con);
+void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir);
+
+int rtrs_clt_reset_rdma_lat_distr_stats(struct rtrs_clt_stats *stats,
+ bool enable);
+ssize_t rtrs_clt_stats_rdma_lat_distr_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_wc_comp_stats(struct rtrs_clt_stats *stats, bool enable);
+int rtrs_clt_stats_wc_completion_to_str(struct rtrs_clt_stats *stats, char *buf,
+ size_t len);
+int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *stats, bool enable);
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
+ char *page, size_t len);
+
+/* rtrs-clt-sysfs.c */
+
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt);
+void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt);
+void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt);
+
+int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess);
+void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+ const struct attribute *sysfs_self);
+
+#endif /* RTRS_CLT_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-log.h b/drivers/infiniband/ulp/rtrs/rtrs-log.h
new file mode 100644
index 000000000000..53c785b992f2
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-log.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RTRS_LOG_H
+#define RTRS_LOG_H
+
+#define rtrs_log(fn, obj, fmt, ...) \
+ fn("<%s>: " fmt, obj->sessname, ##__VA_ARGS__)
+
+#define rtrs_err(obj, fmt, ...) \
+ rtrs_log(pr_err, obj, fmt, ##__VA_ARGS__)
+#define rtrs_err_rl(obj, fmt, ...) \
+ rtrs_log(pr_err_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn(obj, fmt, ...) \
+ rtrs_log(pr_warn, obj, fmt, ##__VA_ARGS__)
+#define rtrs_wrn_rl(obj, fmt, ...) \
+ rtrs_log(pr_warn_ratelimited, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info(obj, fmt, ...) \
+ rtrs_log(pr_info, obj, fmt, ##__VA_ARGS__)
+#define rtrs_info_rl(obj, fmt, ...) \
+ rtrs_log(pr_info_ratelimited, obj, fmt, ##__VA_ARGS__)
+
+#endif /* RTRS_LOG_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
new file mode 100644
index 000000000000..0a93c87ef92b
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_PRI_H
+#define RTRS_PRI_H
+
+#include <linux/uuid.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib.h>
+
+#include "rtrs.h"
+
+#define RTRS_PROTO_VER_MAJOR 2
+#define RTRS_PROTO_VER_MINOR 0
+
+#define RTRS_PROTO_VER_STRING __stringify(RTRS_PROTO_VER_MAJOR) "." \
+ __stringify(RTRS_PROTO_VER_MINOR)
+
+enum rtrs_imm_const {
+ MAX_IMM_TYPE_BITS = 4,
+ MAX_IMM_TYPE_MASK = ((1 << MAX_IMM_TYPE_BITS) - 1),
+ MAX_IMM_PAYL_BITS = 28,
+ MAX_IMM_PAYL_MASK = ((1 << MAX_IMM_PAYL_BITS) - 1),
+};
+
+enum rtrs_imm_type {
+ RTRS_IO_REQ_IMM = 0, /* client to server */
+ RTRS_IO_RSP_IMM = 1, /* server to client */
+ RTRS_IO_RSP_W_INV_IMM = 2, /* server to client */
+
+ RTRS_HB_MSG_IMM = 8, /* HB: HeartBeat */
+ RTRS_HB_ACK_IMM = 9,
+
+ RTRS_LAST_IMM,
+};
+
+enum {
+ SERVICE_CON_QUEUE_DEPTH = 512,
+
+ MAX_PATHS_NUM = 128,
+
+ /*
+ * With the size of struct rtrs_permit allocated on the client, 4K
+ * is the maximum number of rtrs_permits we can allocate. This number is
+ * also used on the client to allocate the IU for the user connection
+ * to receive the RDMA addresses from the server.
+ */
+ MAX_SESS_QUEUE_DEPTH = 4096,
+
+ RTRS_HB_INTERVAL_MS = 5000,
+ RTRS_HB_MISSED_MAX = 5,
+
+ RTRS_MAGIC = 0x1BBD,
+ RTRS_PROTO_VER = (RTRS_PROTO_VER_MAJOR << 8) | RTRS_PROTO_VER_MINOR,
+};
+
+struct rtrs_ib_dev;
+
+struct rtrs_rdma_dev_pd_ops {
+ struct rtrs_ib_dev *(*alloc)(void);
+ void (*free)(struct rtrs_ib_dev *dev);
+ int (*init)(struct rtrs_ib_dev *dev);
+ void (*deinit)(struct rtrs_ib_dev *dev);
+};
+
+struct rtrs_rdma_dev_pd {
+ struct mutex mutex;
+ struct list_head list;
+ enum ib_pd_flags pd_flags;
+ const struct rtrs_rdma_dev_pd_ops *ops;
+};
+
+struct rtrs_ib_dev {
+ struct ib_device *ib_dev;
+ struct ib_pd *ib_pd;
+ struct kref ref;
+ struct list_head entry;
+ struct rtrs_rdma_dev_pd *pool;
+};
+
+struct rtrs_con {
+ struct rtrs_sess *sess;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct rdma_cm_id *cm_id;
+ unsigned int cid;
+};
+
+struct rtrs_sess {
+ struct list_head entry;
+ struct sockaddr_storage dst_addr;
+ struct sockaddr_storage src_addr;
+ char sessname[NAME_MAX];
+ uuid_t uuid;
+ struct rtrs_con **con;
+ unsigned int con_num;
+ unsigned int recon_cnt;
+ struct rtrs_ib_dev *dev;
+ int dev_ref;
+ struct ib_cqe *hb_cqe;
+ void (*hb_err_handler)(struct rtrs_con *con);
+ struct workqueue_struct *hb_wq;
+ struct delayed_work hb_dwork;
+ unsigned int hb_interval_ms;
+ unsigned int hb_missed_cnt;
+ unsigned int hb_missed_max;
+};
+
+/* rtrs information unit */
+struct rtrs_iu {
+ struct list_head list;
+ struct ib_cqe cqe;
+ dma_addr_t dma_addr;
+ void *buf;
+ size_t size;
+ enum dma_data_direction direction;
+};
+
+/**
+ * enum rtrs_msg_types - RTRS message types, see also rtrs/README
+ * @RTRS_MSG_INFO_REQ: Client additional info request to the server
+ * @RTRS_MSG_INFO_RSP: Server additional info response to the client
+ * @RTRS_MSG_WRITE: Client writes data per RDMA to server
+ * @RTRS_MSG_READ: Client requests data transfer from server
+ * @RTRS_MSG_RKEY_RSP: Server refreshed rkey for rbuf
+ */
+enum rtrs_msg_types {
+ RTRS_MSG_INFO_REQ,
+ RTRS_MSG_INFO_RSP,
+ RTRS_MSG_WRITE,
+ RTRS_MSG_READ,
+ RTRS_MSG_RKEY_RSP,
+};
+
+/**
+ * enum rtrs_msg_flags - RTRS message flags.
+ * @RTRS_NEED_INVAL: Send invalidation in response.
+ * @RTRS_MSG_NEW_RKEY_F: Send refreshed rkey in response.
+ */
+enum rtrs_msg_flags {
+ RTRS_MSG_NEED_INVAL_F = 1 << 0,
+ RTRS_MSG_NEW_RKEY_F = 1 << 1,
+};
+
+/**
+ * struct rtrs_sg_desc - RDMA-Buffer entry description
+ * @addr: Address of RDMA destination buffer
+ * @key: Authorization rkey to write to the buffer
+ * @len: Size of the buffer
+ */
+struct rtrs_sg_desc {
+ __le64 addr;
+ __le32 key;
+ __le32 len;
+};
+
+/**
+ * struct rtrs_msg_conn_req - Client connection request to the server
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @cid: Current connection id
+ * @cid_num: Number of connections per session
+ * @recon_cnt: Reconnections counter
+ * @sess_uuid: UUID of a session (path)
+ * @paths_uuid: UUID of a group of sessions (paths)
+ *
+ * NOTE: max size 56 bytes, see man rdma_connect().
+ */
+struct rtrs_msg_conn_req {
+ /* Is set to 0 by cma.c in case of AF_IB, do not touch that.
+ * see https://www.spinics.net/lists/linux-rdma/msg22397.html
+ */
+ u8 __cma_version;
+ /* On sender side that should be set to 0, or cma_save_ip_info()
+ * extract garbage and will fail.
+ */
+ u8 __ip_version;
+ __le16 magic;
+ __le16 version;
+ __le16 cid;
+ __le16 cid_num;
+ __le16 recon_cnt;
+ uuid_t sess_uuid;
+ uuid_t paths_uuid;
+ u8 reserved[12];
+};
+
+/**
+ * struct rtrs_msg_conn_rsp - Server connection response to the client
+ * @magic: RTRS magic
+ * @version: RTRS protocol version
+ * @errno: If rdma_accept() then 0, if rdma_reject() indicates error
+ * @queue_depth: max inflight messages (queue-depth) in this session
+ * @max_io_size: max io size server supports
+ * @max_hdr_size: max msg header size server supports
+ *
+ * NOTE: size is 56 bytes, max possible is 136 bytes, see man rdma_accept().
+ */
+struct rtrs_msg_conn_rsp {
+ __le16 magic;
+ __le16 version;
+ __le16 errno;
+ __le16 queue_depth;
+ __le32 max_io_size;
+ __le32 max_hdr_size;
+ __le32 flags;
+ u8 reserved[36];
+};
+
+/**
+ * struct rtrs_msg_info_req
+ * @type: @RTRS_MSG_INFO_REQ
+ * @sessname: Session name chosen by client
+ */
+struct rtrs_msg_info_req {
+ __le16 type;
+ u8 sessname[NAME_MAX];
+ u8 reserved[15];
+};
+
+/**
+ * struct rtrs_msg_info_rsp
+ * @type: @RTRS_MSG_INFO_RSP
+ * @sg_cnt: Number of @desc entries
+ * @desc: RDMA buffers where the client can write to server
+ */
+struct rtrs_msg_info_rsp {
+ __le16 type;
+ __le16 sg_cnt;
+ u8 reserved[4];
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct rtrs_msg_rkey_rsp
+ * @type: @RTRS_MSG_RKEY_RSP
+ * @buf_id: RDMA buf_id of the new rkey
+ * @rkey: new remote key for RDMA buffers id from server
+ */
+struct rtrs_msg_rkey_rsp {
+ __le16 type;
+ __le16 buf_id;
+ __le32 rkey;
+};
+
+/**
+ * struct rtrs_msg_rdma_read - RDMA data transfer request from client
+ * @type: always @RTRS_MSG_READ
+ * @usr_len: length of user payload
+ * @sg_cnt: number of @desc entries
+ * @desc: RDMA buffers where the server can write the result to
+ */
+struct rtrs_msg_rdma_read {
+ __le16 type;
+ __le16 usr_len;
+ __le16 flags;
+ __le16 sg_cnt;
+ struct rtrs_sg_desc desc[];
+};
+
+/**
+ * struct_msg_rdma_write - Message transferred to server with RDMA-Write
+ * @type: always @RTRS_MSG_WRITE
+ * @usr_len: length of user payload
+ */
+struct rtrs_msg_rdma_write {
+ __le16 type;
+ __le16 usr_len;
+};
+
+/**
+ * struct_msg_rdma_hdr - header for read or write request
+ * @type: @RTRS_MSG_WRITE | @RTRS_MSG_READ
+ */
+struct rtrs_msg_rdma_hdr {
+ __le16 type;
+};
+
+/* rtrs.c */
+
+struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t t,
+ struct ib_device *dev, enum dma_data_direction,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc));
+void rtrs_iu_free(struct rtrs_iu *iu, enum dma_data_direction dir,
+ struct ib_device *dev, u32 queue_size);
+int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu);
+int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
+ struct ib_send_wr *head);
+int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head);
+
+int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
+int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ u32 imm_data, enum ib_send_flags flags,
+ struct ib_send_wr *head);
+
+int rtrs_cq_qp_create(struct rtrs_sess *rtrs_sess, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, u16 cq_size,
+ u16 wr_queue_size, enum ib_poll_context poll_ctx);
+void rtrs_cq_qp_destroy(struct rtrs_con *con);
+
+void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+ unsigned int interval_ms, unsigned int missed_max,
+ void (*err_handler)(struct rtrs_con *con),
+ struct workqueue_struct *wq);
+void rtrs_start_hb(struct rtrs_sess *sess);
+void rtrs_stop_hb(struct rtrs_sess *sess);
+void rtrs_send_hb_ack(struct rtrs_sess *sess);
+
+void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
+ struct rtrs_rdma_dev_pd *pool);
+void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool);
+
+struct rtrs_ib_dev *rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
+ struct rtrs_rdma_dev_pd *pool);
+int rtrs_ib_dev_put(struct rtrs_ib_dev *dev);
+
+static inline u32 rtrs_to_imm(u32 type, u32 payload)
+{
+ BUILD_BUG_ON(MAX_IMM_PAYL_BITS + MAX_IMM_TYPE_BITS != 32);
+ BUILD_BUG_ON(RTRS_LAST_IMM > (1<<MAX_IMM_TYPE_BITS));
+ return ((type & MAX_IMM_TYPE_MASK) << MAX_IMM_PAYL_BITS) |
+ (payload & MAX_IMM_PAYL_MASK);
+}
+
+static inline void rtrs_from_imm(u32 imm, u32 *type, u32 *payload)
+{
+ *payload = imm & MAX_IMM_PAYL_MASK;
+ *type = imm >> MAX_IMM_PAYL_BITS;
+}
+
+static inline u32 rtrs_to_io_req_imm(u32 addr)
+{
+ return rtrs_to_imm(RTRS_IO_REQ_IMM, addr);
+}
+
+static inline u32 rtrs_to_io_rsp_imm(u32 msg_id, int errno, bool w_inval)
+{
+ enum rtrs_imm_type type;
+ u32 payload;
+
+ /* 9 bits for errno, 19 bits for msg_id */
+ payload = (abs(errno) & 0x1ff) << 19 | (msg_id & 0x7ffff);
+ type = w_inval ? RTRS_IO_RSP_W_INV_IMM : RTRS_IO_RSP_IMM;
+
+ return rtrs_to_imm(type, payload);
+}
+
+static inline void rtrs_from_io_rsp_imm(u32 payload, u32 *msg_id, int *errno)
+{
+ /* 9 bits for errno, 19 bits for msg_id */
+ *msg_id = payload & 0x7ffff;
+ *errno = -(int)((payload >> 19) & 0x1ff);
+}
+
+#define STAT_STORE_FUNC(type, set_value, reset) \
+static ssize_t set_value##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ int ret = -EINVAL; \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ if (sysfs_streq(buf, "1")) \
+ ret = reset(stats, true); \
+ else if (sysfs_streq(buf, "0")) \
+ ret = reset(stats, false); \
+ if (ret) \
+ return ret; \
+ \
+ return count; \
+}
+
+#define STAT_SHOW_FUNC(type, get_value, print) \
+static ssize_t get_value##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ type *stats = container_of(kobj, type, kobj_stats); \
+ \
+ return print(stats, page, PAGE_SIZE); \
+}
+
+#define STAT_ATTR(type, stat, print, reset) \
+STAT_STORE_FUNC(type, stat, reset) \
+STAT_SHOW_FUNC(type, stat, print) \
+static struct kobj_attribute stat##_attr = __ATTR_RW(stat)
+
+#endif /* RTRS_PRI_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
new file mode 100644
index 000000000000..e102b1368d0c
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-srv.h"
+
+int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
+{
+ if (enable) {
+ struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+
+ memset(r, 0, sizeof(*r));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
+ char *page, size_t len)
+{
+ struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
+ struct rtrs_srv_sess *sess = stats->sess;
+
+ return scnprintf(page, len, "%lld %lld %lld %lld %u\n",
+ (s64)atomic64_read(&r->dir[READ].cnt),
+ (s64)atomic64_read(&r->dir[READ].size_total),
+ (s64)atomic64_read(&r->dir[WRITE].cnt),
+ (s64)atomic64_read(&r->dir[WRITE].size_total),
+ atomic_read(&sess->ids_inflight));
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
new file mode 100644
index 000000000000..3d7877534bcc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include "rtrs-pri.h"
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+
+static void rtrs_srv_release(struct kobject *kobj)
+{
+ struct rtrs_srv_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ kfree(sess);
+}
+
+static struct kobj_type ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_release,
+};
+
+static ssize_t rtrs_srv_disconnect_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ return scnprintf(page, PAGE_SIZE, "Usage: echo 1 > %s\n",
+ attr->attr.name);
+}
+
+static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_sess *s;
+ char str[MAXHOSTNAMELEN];
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ s = &sess->s;
+ if (!sysfs_streq(buf, "1")) {
+ rtrs_err(s, "%s: invalid value: '%s'\n",
+ attr->attr.name, buf);
+ return -EINVAL;
+ }
+
+ sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
+
+ rtrs_info(s, "disconnect for path %s requested\n", str);
+ close_sess(sess);
+
+ return count;
+}
+
+static struct kobj_attribute rtrs_srv_disconnect_attr =
+ __ATTR(disconnect, 0644,
+ rtrs_srv_disconnect_show, rtrs_srv_disconnect_store);
+
+static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_con *usr_con;
+
+ sess = container_of(kobj, typeof(*sess), kobj);
+ usr_con = sess->s.con[0];
+
+ return scnprintf(page, PAGE_SIZE, "%u\n",
+ usr_con->cm_id->port_num);
+}
+
+static struct kobj_attribute rtrs_srv_hca_port_attr =
+ __ATTR(hca_port, 0444, rtrs_srv_hca_port_show, NULL);
+
+static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+
+ return scnprintf(page, PAGE_SIZE, "%s\n",
+ sess->s.dev->ib_dev->name);
+}
+
+static struct kobj_attribute rtrs_srv_hca_name_attr =
+ __ATTR(hca_name, 0444, rtrs_srv_hca_name_show, NULL);
+
+static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_srv_src_addr_attr =
+ __ATTR(src_addr, 0444, rtrs_srv_src_addr_show, NULL);
+
+static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *page)
+{
+ struct rtrs_srv_sess *sess;
+ int cnt;
+
+ sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ page, PAGE_SIZE);
+ return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+}
+
+static struct kobj_attribute rtrs_srv_dst_addr_attr =
+ __ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL);
+
+static struct attribute *rtrs_srv_sess_attrs[] = {
+ &rtrs_srv_hca_name_attr.attr,
+ &rtrs_srv_hca_port_attr.attr,
+ &rtrs_srv_src_addr_attr.attr,
+ &rtrs_srv_dst_addr_attr.attr,
+ &rtrs_srv_disconnect_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_srv_sess_attr_group = {
+ .attrs = rtrs_srv_sess_attrs,
+};
+
+STAT_ATTR(struct rtrs_srv_stats, rdma,
+ rtrs_srv_stats_rdma_to_str,
+ rtrs_srv_reset_rdma_stats);
+
+static struct attribute *rtrs_srv_stats_attrs[] = {
+ &rdma_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rtrs_srv_stats_attr_group = {
+ .attrs = rtrs_srv_stats_attrs,
+};
+
+static void rtrs_srv_dev_release(struct device *dev)
+{
+ struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
+
+ kfree(srv);
+}
+
+static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ int err = 0;
+
+ mutex_lock(&srv->paths_mutex);
+ if (srv->dev_ref++) {
+ /*
+ * Device needs to be registered only on the first session
+ */
+ goto unlock;
+ }
+ srv->dev.class = rtrs_dev_class;
+ srv->dev.release = rtrs_srv_dev_release;
+ err = dev_set_name(&srv->dev, "%s", sess->s.sessname);
+ if (err)
+ goto unlock;
+
+ /*
+ * Suppress user space notification until
+ * sysfs files are created
+ */
+ dev_set_uevent_suppress(&srv->dev, true);
+ err = device_register(&srv->dev);
+ if (err) {
+ pr_err("device_register(): %d\n", err);
+ goto put;
+ }
+ srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
+ if (!srv->kobj_paths) {
+ err = -ENOMEM;
+ pr_err("kobject_create_and_add(): %d\n", err);
+ device_unregister(&srv->dev);
+ goto unlock;
+ }
+ dev_set_uevent_suppress(&srv->dev, false);
+ kobject_uevent(&srv->dev.kobj, KOBJ_ADD);
+ goto unlock;
+
+put:
+ put_device(&srv->dev);
+unlock:
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+
+static void
+rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+
+ mutex_lock(&srv->paths_mutex);
+ if (!--srv->dev_ref) {
+ kobject_del(srv->kobj_paths);
+ kobject_put(srv->kobj_paths);
+ mutex_unlock(&srv->paths_mutex);
+ device_unregister(&srv->dev);
+ } else {
+ mutex_unlock(&srv->paths_mutex);
+ }
+}
+
+static void rtrs_srv_sess_stats_release(struct kobject *kobj)
+{
+ struct rtrs_srv_stats *stats;
+
+ stats = container_of(kobj, struct rtrs_srv_stats, kobj_stats);
+
+ kfree(stats);
+}
+
+static struct kobj_type ktype_stats = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = rtrs_srv_sess_stats_release,
+};
+
+static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
+{
+ int err;
+ struct rtrs_sess *s = &sess->s;
+
+ err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
+ &sess->kobj, "stats");
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ return err;
+ }
+ err = sysfs_create_group(&sess->stats->kobj_stats,
+ &rtrs_srv_stats_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+
+ return err;
+}
+
+int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ char str[NAME_MAX];
+ int err, cnt;
+
+ cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ str, sizeof(str));
+ cnt += scnprintf(str + cnt, sizeof(str) - cnt, "@");
+ sockaddr_to_str((struct sockaddr *)&sess->s.src_addr,
+ str + cnt, sizeof(str) - cnt);
+
+ err = rtrs_srv_create_once_sysfs_root_folders(sess);
+ if (err)
+ return err;
+
+ err = kobject_init_and_add(&sess->kobj, &ktype, srv->kobj_paths,
+ "%s", str);
+ if (err) {
+ rtrs_err(s, "kobject_init_and_add(): %d\n", err);
+ goto destroy_root;
+ }
+ err = sysfs_create_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ if (err) {
+ rtrs_err(s, "sysfs_create_group(): %d\n", err);
+ goto put_kobj;
+ }
+ err = rtrs_srv_create_stats_files(sess);
+ if (err)
+ goto remove_group;
+
+ return 0;
+
+remove_group:
+ sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+put_kobj:
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+destroy_root:
+ rtrs_srv_destroy_once_sysfs_root_folders(sess);
+
+ return err;
+}
+
+void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
+{
+ if (sess->kobj.state_in_sysfs) {
+ kobject_del(&sess->stats->kobj_stats);
+ kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&sess->kobj);
+ kobject_put(&sess->kobj);
+
+ rtrs_srv_destroy_once_sysfs_root_folders(sess);
+ }
+}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
new file mode 100644
index 000000000000..0d9241f5d9e6
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -0,0 +1,2178 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+
+#include "rtrs-srv.h"
+#include "rtrs-log.h"
+#include <rdma/ib_cm.h>
+
+MODULE_DESCRIPTION("RDMA Transport Server");
+MODULE_LICENSE("GPL");
+
+/* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
+#define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
+#define DEFAULT_SESS_QUEUE_DEPTH 512
+#define MAX_HDR_SIZE PAGE_SIZE
+
+/* We guarantee to serve 10 paths at least */
+#define CHUNK_POOL_SZ 10
+
+static struct rtrs_rdma_dev_pd dev_pd;
+static mempool_t *chunk_pool;
+struct class *rtrs_dev_class;
+
+static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
+static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
+
+static bool always_invalidate = true;
+module_param(always_invalidate, bool, 0444);
+MODULE_PARM_DESC(always_invalidate,
+ "Invalidate memory registration for contiguous memory regions before accessing.");
+
+module_param_named(max_chunk_size, max_chunk_size, int, 0444);
+MODULE_PARM_DESC(max_chunk_size,
+ "Max size for each IO request, when change the unit is in byte (default: "
+ __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
+
+module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
+MODULE_PARM_DESC(sess_queue_depth,
+ "Number of buffers for pending I/O requests to allocate per session. Maximum: "
+ __stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
+ __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
+
+static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
+
+static struct workqueue_struct *rtrs_wq;
+
+static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
+{
+ return container_of(c, struct rtrs_srv_con, c);
+}
+
+static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s)
+{
+ return container_of(s, struct rtrs_srv_sess, s);
+}
+
+static bool __rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state)
+{
+ enum rtrs_srv_state old_state;
+ bool changed = false;
+
+ lockdep_assert_held(&sess->state_lock);
+ old_state = sess->state;
+ switch (new_state) {
+ case RTRS_SRV_CONNECTED:
+ switch (old_state) {
+ case RTRS_SRV_CONNECTING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_SRV_CLOSING:
+ switch (old_state) {
+ case RTRS_SRV_CONNECTING:
+ case RTRS_SRV_CONNECTED:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ case RTRS_SRV_CLOSED:
+ switch (old_state) {
+ case RTRS_SRV_CLOSING:
+ changed = true;
+ fallthrough;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (changed)
+ sess->state = new_state;
+
+ return changed;
+}
+
+static bool rtrs_srv_change_state_get_old(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state,
+ enum rtrs_srv_state *old_state)
+{
+ bool changed;
+
+ spin_lock_irq(&sess->state_lock);
+ *old_state = sess->state;
+ changed = __rtrs_srv_change_state(sess, new_state);
+ spin_unlock_irq(&sess->state_lock);
+
+ return changed;
+}
+
+static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+ enum rtrs_srv_state new_state)
+{
+ enum rtrs_srv_state old_state;
+
+ return rtrs_srv_change_state_get_old(sess, new_state, &old_state);
+}
+
+static void free_id(struct rtrs_srv_op *id)
+{
+ if (!id)
+ return;
+ kfree(id);
+}
+
+static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ int i;
+
+ WARN_ON(atomic_read(&sess->ids_inflight));
+ if (sess->ops_ids) {
+ for (i = 0; i < srv->queue_depth; i++)
+ free_id(sess->ops_ids[i]);
+ kfree(sess->ops_ids);
+ sess->ops_ids = NULL;
+ }
+}
+
+static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
+
+static struct ib_cqe io_comp_cqe = {
+ .done = rtrs_srv_rdma_done
+};
+
+static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_op *id;
+ int i;
+
+ sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids),
+ GFP_KERNEL);
+ if (!sess->ops_ids)
+ goto err;
+
+ for (i = 0; i < srv->queue_depth; ++i) {
+ id = kzalloc(sizeof(*id), GFP_KERNEL);
+ if (!id)
+ goto err;
+
+ sess->ops_ids[i] = id;
+ }
+ init_waitqueue_head(&sess->ids_waitq);
+ atomic_set(&sess->ids_inflight, 0);
+
+ return 0;
+
+err:
+ rtrs_srv_free_ops_ids(sess);
+ return -ENOMEM;
+}
+
+static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess)
+{
+ atomic_inc(&sess->ids_inflight);
+}
+
+static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess)
+{
+ if (atomic_dec_and_test(&sess->ids_inflight))
+ wake_up(&sess->ids_waitq);
+}
+
+static void rtrs_srv_wait_ops_ids(struct rtrs_srv_sess *sess)
+{
+ wait_event(sess->ids_waitq, !atomic_read(&sess->ids_inflight));
+}
+
+
+static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "REG MR failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ return;
+ }
+}
+
+static struct ib_cqe local_reg_cqe = {
+ .done = rtrs_srv_reg_mr_done
+};
+
+static int rdma_write_sg(struct rtrs_srv_op *id)
+{
+ struct rtrs_sess *s = id->con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
+ struct rtrs_srv_mr *srv_mr;
+ struct rtrs_srv *srv = sess->srv;
+ struct ib_send_wr inv_wr, imm_wr;
+ struct ib_rdma_wr *wr = NULL;
+ enum ib_send_flags flags;
+ size_t sg_cnt;
+ int err, offset;
+ bool need_inval;
+ u32 rkey = 0;
+ struct ib_reg_wr rwr;
+ struct ib_sge *plist;
+ struct ib_sge list;
+
+ sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
+ need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
+ if (unlikely(sg_cnt != 1))
+ return -EINVAL;
+
+ offset = 0;
+
+ wr = &id->tx_wr;
+ plist = &id->tx_sg;
+ plist->addr = dma_addr + offset;
+ plist->length = le32_to_cpu(id->rd_msg->desc[0].len);
+
+ /* WR will fail with length error
+ * if this is 0
+ */
+ if (unlikely(plist->length == 0)) {
+ rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
+ return -EINVAL;
+ }
+
+ plist->lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ offset += plist->length;
+
+ wr->wr.sg_list = plist;
+ wr->wr.num_sge = 1;
+ wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr);
+ wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key);
+ if (rkey == 0)
+ rkey = wr->rkey;
+ else
+ /* Only one key is actually used */
+ WARN_ON_ONCE(rkey != wr->rkey);
+
+ wr->wr.opcode = IB_WR_RDMA_WRITE;
+ wr->wr.ex.imm_data = 0;
+ wr->wr.send_flags = 0;
+
+ if (need_inval && always_invalidate) {
+ wr->wr.next = &rwr.wr;
+ rwr.wr.next = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else if (always_invalidate) {
+ wr->wr.next = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (need_inval) {
+ wr->wr.next = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else {
+ wr->wr.next = &imm_wr;
+ }
+ /*
+ * From time to time we have to post signaled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = (atomic_inc_return(&id->con->wr_cnt) % srv->queue_depth) ?
+ 0 : IB_SEND_SIGNALED;
+
+ if (need_inval) {
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.send_flags = 0;
+ inv_wr.ex.invalidate_rkey = rkey;
+ }
+
+ imm_wr.next = NULL;
+ if (always_invalidate) {
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &sess->mrs[id->msg_id];
+ rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.num_sge = 0;
+ rwr.mr = srv_mr->mr;
+ rwr.wr.send_flags = 0;
+ rwr.key = srv_mr->mr->rkey;
+ rwr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ msg = srv_mr->iu->buf;
+ msg->buf_id = cpu_to_le16(id->msg_id);
+ msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
+ msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
+
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ imm_wr.sg_list = &list;
+ imm_wr.num_sge = 1;
+ imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+ imm_wr.sg_list = NULL;
+ imm_wr.num_sge = 0;
+ imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+ imm_wr.send_flags = flags;
+ imm_wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
+ 0, need_inval));
+
+ imm_wr.wr_cqe = &io_comp_cqe;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
+ offset, DMA_BIDIRECTIONAL);
+
+ err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
+ if (unlikely(err))
+ rtrs_err(s,
+ "Posting RDMA-Write-Request to QP failed, err: %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE
+ * requests or on successful WRITE request.
+ * @con: the connection to send back result
+ * @id: the id associated with the IO
+ * @errno: the error number of the IO.
+ *
+ * Return 0 on success, errno otherwise.
+ */
+static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ int errno)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct ib_send_wr inv_wr, imm_wr, *wr = NULL;
+ struct ib_reg_wr rwr;
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_mr *srv_mr;
+ bool need_inval = false;
+ enum ib_send_flags flags;
+ u32 imm;
+ int err;
+
+ if (id->dir == READ) {
+ struct rtrs_msg_rdma_read *rd_msg = id->rd_msg;
+ size_t sg_cnt;
+
+ need_inval = le16_to_cpu(rd_msg->flags) &
+ RTRS_MSG_NEED_INVAL_F;
+ sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
+
+ if (need_inval) {
+ if (likely(sg_cnt)) {
+ inv_wr.sg_list = NULL;
+ inv_wr.num_sge = 0;
+ inv_wr.opcode = IB_WR_SEND_WITH_INV;
+ inv_wr.send_flags = 0;
+ /* Only one key is actually used */
+ inv_wr.ex.invalidate_rkey =
+ le32_to_cpu(rd_msg->desc[0].key);
+ } else {
+ WARN_ON_ONCE(1);
+ need_inval = false;
+ }
+ }
+ }
+
+ if (need_inval && always_invalidate) {
+ wr = &inv_wr;
+ inv_wr.next = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (always_invalidate) {
+ wr = &rwr.wr;
+ rwr.wr.next = &imm_wr;
+ } else if (need_inval) {
+ wr = &inv_wr;
+ inv_wr.next = &imm_wr;
+ } else {
+ wr = &imm_wr;
+ }
+ /*
+ * From time to time we have to post signalled sends,
+ * or send queue will fill up and only QP reset can help.
+ */
+ flags = (atomic_inc_return(&con->wr_cnt) % srv->queue_depth) ?
+ 0 : IB_SEND_SIGNALED;
+ imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
+ imm_wr.next = NULL;
+ if (always_invalidate) {
+ struct ib_sge list;
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &sess->mrs[id->msg_id];
+ rwr.wr.next = &imm_wr;
+ rwr.wr.opcode = IB_WR_REG_MR;
+ rwr.wr.num_sge = 0;
+ rwr.wr.send_flags = 0;
+ rwr.mr = srv_mr->mr;
+ rwr.key = srv_mr->mr->rkey;
+ rwr.access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ msg = srv_mr->iu->buf;
+ msg->buf_id = cpu_to_le16(id->msg_id);
+ msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
+ msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
+
+ list.addr = srv_mr->iu->dma_addr;
+ list.length = sizeof(*msg);
+ list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ imm_wr.sg_list = &list;
+ imm_wr.num_sge = 1;
+ imm_wr.opcode = IB_WR_SEND_WITH_IMM;
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ srv_mr->iu->dma_addr,
+ srv_mr->iu->size, DMA_TO_DEVICE);
+ } else {
+ imm_wr.sg_list = NULL;
+ imm_wr.num_sge = 0;
+ imm_wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
+ }
+ imm_wr.send_flags = flags;
+ imm_wr.wr_cqe = &io_comp_cqe;
+
+ imm_wr.ex.imm_data = cpu_to_be32(imm);
+
+ err = ib_post_send(id->con->c.qp, wr, NULL);
+ if (unlikely(err))
+ rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n",
+ err);
+
+ return err;
+}
+
+void close_sess(struct rtrs_srv_sess *sess)
+{
+ enum rtrs_srv_state old_state;
+
+ if (rtrs_srv_change_state_get_old(sess, RTRS_SRV_CLOSING,
+ &old_state))
+ queue_work(rtrs_wq, &sess->close_work);
+ WARN_ON(sess->state != RTRS_SRV_CLOSING);
+}
+
+static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
+{
+ switch (state) {
+ case RTRS_SRV_CONNECTING:
+ return "RTRS_SRV_CONNECTING";
+ case RTRS_SRV_CONNECTED:
+ return "RTRS_SRV_CONNECTED";
+ case RTRS_SRV_CLOSING:
+ return "RTRS_SRV_CLOSING";
+ case RTRS_SRV_CLOSED:
+ return "RTRS_SRV_CLOSED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+/**
+ * rtrs_srv_resp_rdma() - Finish an RDMA request
+ *
+ * @id: Internal RTRS operation identifier
+ * @status: Response Code sent to the other side for this operation.
+ * 0 = success, <=0 error
+ * Context: any
+ *
+ * Finish a RDMA operation. A message is sent to the client and the
+ * corresponding memory areas will be released.
+ */
+bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv_con *con;
+ struct rtrs_sess *s;
+ int err;
+
+ if (WARN_ON(!id))
+ return true;
+
+ con = id->con;
+ s = con->c.sess;
+ sess = to_srv_sess(s);
+
+ id->status = status;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Sending I/O response failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ goto out;
+ }
+ if (always_invalidate) {
+ struct rtrs_srv_mr *mr = &sess->mrs[id->msg_id];
+
+ ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
+ }
+ if (unlikely(atomic_sub_return(1,
+ &con->sq_wr_avail) < 0)) {
+ pr_err("IB send queue full\n");
+ atomic_add(1, &con->sq_wr_avail);
+ spin_lock(&con->rsp_wr_wait_lock);
+ list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
+ spin_unlock(&con->rsp_wr_wait_lock);
+ return false;
+ }
+
+ if (status || id->dir == WRITE || !id->rd_msg->sg_cnt)
+ err = send_io_resp_imm(con, id, status);
+ else
+ err = rdma_write_sg(id);
+
+ if (unlikely(err)) {
+ rtrs_err_rl(s, "IO response failed: %d\n", err);
+ close_sess(sess);
+ }
+out:
+ rtrs_srv_put_ops_ids(sess);
+ return true;
+}
+EXPORT_SYMBOL(rtrs_srv_resp_rdma);
+
+/**
+ * rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv.
+ * @srv: Session pointer
+ * @priv: The private pointer that is associated with the session.
+ */
+void rtrs_srv_set_sess_priv(struct rtrs_srv *srv, void *priv)
+{
+ srv->priv = priv;
+}
+EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
+
+static void unmap_cont_bufs(struct rtrs_srv_sess *sess)
+{
+ int i;
+
+ for (i = 0; i < sess->mrs_num; i++) {
+ struct rtrs_srv_mr *srv_mr;
+
+ srv_mr = &sess->mrs[i];
+ rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+ ib_dereg_mr(srv_mr->mr);
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl,
+ srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
+ sg_free_table(&srv_mr->sgt);
+ }
+ kfree(sess->mrs);
+}
+
+static int map_cont_bufs(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *ss = &sess->s;
+ int i, mri, err, mrs_num;
+ unsigned int chunk_bits;
+ int chunks_per_mr = 1;
+
+ /*
+ * Here we map queue_depth chunks to MR. Firstly we have to
+ * figure out how many chunks can we map per MR.
+ */
+ if (always_invalidate) {
+ /*
+ * in order to do invalidate for each chunks of memory, we needs
+ * more memory regions.
+ */
+ mrs_num = srv->queue_depth;
+ } else {
+ chunks_per_mr =
+ sess->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
+ mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
+ chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
+ }
+
+ sess->mrs = kcalloc(mrs_num, sizeof(*sess->mrs), GFP_KERNEL);
+ if (!sess->mrs)
+ return -ENOMEM;
+
+ sess->mrs_num = mrs_num;
+
+ for (mri = 0; mri < mrs_num; mri++) {
+ struct rtrs_srv_mr *srv_mr = &sess->mrs[mri];
+ struct sg_table *sgt = &srv_mr->sgt;
+ struct scatterlist *s;
+ struct ib_mr *mr;
+ int nr, chunks;
+
+ chunks = chunks_per_mr * mri;
+ if (!always_invalidate)
+ chunks_per_mr = min_t(int, chunks_per_mr,
+ srv->queue_depth - chunks);
+
+ err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL);
+ if (err)
+ goto err;
+
+ for_each_sg(sgt->sgl, s, chunks_per_mr, i)
+ sg_set_page(s, srv->chunks[chunks + i],
+ max_chunk_size, 0);
+
+ nr = ib_dma_map_sg(sess->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+ if (nr < sgt->nents) {
+ err = nr < 0 ? nr : -EINVAL;
+ goto free_sg;
+ }
+ mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ sgt->nents);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto unmap_sg;
+ }
+ nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
+ NULL, max_chunk_size);
+ if (nr < 0 || nr < sgt->nents) {
+ err = nr < 0 ? nr : -EINVAL;
+ goto dereg_mr;
+ }
+
+ if (always_invalidate) {
+ srv_mr->iu = rtrs_iu_alloc(1,
+ sizeof(struct rtrs_msg_rkey_rsp),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_TO_DEVICE, rtrs_srv_rdma_done);
+ if (!srv_mr->iu) {
+ err = -ENOMEM;
+ rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
+ goto free_iu;
+ }
+ }
+ /* Eventually dma addr for each chunk can be cached */
+ for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+ sess->dma_addr[chunks + i] = sg_dma_address(s);
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+ srv_mr->mr = mr;
+
+ continue;
+err:
+ while (mri--) {
+ srv_mr = &sess->mrs[mri];
+ sgt = &srv_mr->sgt;
+ mr = srv_mr->mr;
+free_iu:
+ rtrs_iu_free(srv_mr->iu, DMA_TO_DEVICE,
+ sess->s.dev->ib_dev, 1);
+dereg_mr:
+ ib_dereg_mr(mr);
+unmap_sg:
+ ib_dma_unmap_sg(sess->s.dev->ib_dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL);
+free_sg:
+ sg_free_table(sgt);
+ }
+ kfree(sess->mrs);
+
+ return err;
+ }
+
+ chunk_bits = ilog2(srv->queue_depth - 1) + 1;
+ sess->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
+
+ return 0;
+}
+
+static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
+{
+ close_sess(to_srv_sess(c->sess));
+}
+
+static void rtrs_srv_init_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_init_hb(&sess->s, &io_comp_cqe,
+ RTRS_HB_INTERVAL_MS,
+ RTRS_HB_MISSED_MAX,
+ rtrs_srv_hb_err_handler,
+ rtrs_wq);
+}
+
+static void rtrs_srv_start_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_start_hb(&sess->s);
+}
+
+static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess)
+{
+ rtrs_stop_hb(&sess->s);
+}
+
+static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_iu *iu;
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ rtrs_iu_free(iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Sess info response send failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ return;
+ }
+ WARN_ON(wc->opcode != IB_WC_SEND);
+}
+
+static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ int up;
+
+ mutex_lock(&srv->paths_ev_mutex);
+ up = ++srv->paths_up;
+ if (up == 1)
+ ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
+ mutex_unlock(&srv->paths_ev_mutex);
+
+ /* Mark session as established */
+ sess->established = true;
+}
+
+static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+
+ if (!sess->established)
+ return;
+
+ sess->established = false;
+ mutex_lock(&srv->paths_ev_mutex);
+ WARN_ON(!srv->paths_up);
+ if (--srv->paths_up == 0)
+ ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv);
+ mutex_unlock(&srv->paths_ev_mutex);
+}
+
+static int post_recv_sess(struct rtrs_srv_sess *sess);
+
+static int process_info_req(struct rtrs_srv_con *con,
+ struct rtrs_msg_info_req *msg)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct ib_send_wr *reg_wr = NULL;
+ struct rtrs_msg_info_rsp *rsp;
+ struct rtrs_iu *tx_iu;
+ struct ib_reg_wr *rwr;
+ int mri, err;
+ size_t tx_sz;
+
+ err = post_recv_sess(sess);
+ if (unlikely(err)) {
+ rtrs_err(s, "post_recv_sess(), err: %d\n", err);
+ return err;
+ }
+ rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL);
+ if (unlikely(!rwr))
+ return -ENOMEM;
+ strlcpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname));
+
+ tx_sz = sizeof(*rsp);
+ tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num;
+ tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
+ if (unlikely(!tx_iu)) {
+ err = -ENOMEM;
+ goto rwr_free;
+ }
+
+ rsp = tx_iu->buf;
+ rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
+ rsp->sg_cnt = cpu_to_le16(sess->mrs_num);
+
+ for (mri = 0; mri < sess->mrs_num; mri++) {
+ struct ib_mr *mr = sess->mrs[mri].mr;
+
+ rsp->desc[mri].addr = cpu_to_le64(mr->iova);
+ rsp->desc[mri].key = cpu_to_le32(mr->rkey);
+ rsp->desc[mri].len = cpu_to_le32(mr->length);
+
+ /*
+ * Fill in reg MR request and chain them *backwards*
+ */
+ rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL;
+ rwr[mri].wr.opcode = IB_WR_REG_MR;
+ rwr[mri].wr.wr_cqe = &local_reg_cqe;
+ rwr[mri].wr.num_sge = 0;
+ rwr[mri].wr.send_flags = mri ? 0 : IB_SEND_SIGNALED;
+ rwr[mri].mr = mr;
+ rwr[mri].key = mr->rkey;
+ rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+ reg_wr = &rwr[mri].wr;
+ }
+
+ err = rtrs_srv_create_sess_files(sess);
+ if (unlikely(err))
+ goto iu_free;
+ kobject_get(&sess->kobj);
+ get_device(&sess->srv->dev);
+ rtrs_srv_change_state(sess, RTRS_SRV_CONNECTED);
+ rtrs_srv_start_hb(sess);
+
+ /*
+ * We do not account number of established connections at the current
+ * moment, we rely on the client, which should send info request when
+ * all connections are successfully established. Thus, simply notify
+ * listener with a proper event if we are the first path.
+ */
+ rtrs_srv_sess_up(sess);
+
+ ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ tx_iu->size, DMA_TO_DEVICE);
+
+ /* Send info response */
+ err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
+iu_free:
+ rtrs_iu_free(tx_iu, DMA_TO_DEVICE, sess->s.dev->ib_dev, 1);
+ }
+rwr_free:
+ kfree(rwr);
+
+ return err;
+}
+
+static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_msg_info_req *msg;
+ struct rtrs_iu *iu;
+ int err;
+
+ WARN_ON(con->c.cid);
+
+ iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Sess info request receive failed: %s\n",
+ ib_wc_status_msg(wc->status));
+ goto close;
+ }
+ WARN_ON(wc->opcode != IB_WC_RECV);
+
+ if (unlikely(wc->byte_len < sizeof(*msg))) {
+ rtrs_err(s, "Sess info request is malformed: size %d\n",
+ wc->byte_len);
+ goto close;
+ }
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ iu->size, DMA_FROM_DEVICE);
+ msg = iu->buf;
+ if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ)) {
+ rtrs_err(s, "Sess info request is malformed: type %d\n",
+ le16_to_cpu(msg->type));
+ goto close;
+ }
+ err = process_info_req(con, msg);
+ if (unlikely(err))
+ goto close;
+
+out:
+ rtrs_iu_free(iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ return;
+close:
+ close_sess(sess);
+ goto out;
+}
+
+static int post_recv_info_req(struct rtrs_srv_con *con)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_iu *rx_iu;
+ int err;
+
+ rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
+ GFP_KERNEL, sess->s.dev->ib_dev,
+ DMA_FROM_DEVICE, rtrs_srv_info_req_done);
+ if (unlikely(!rx_iu))
+ return -ENOMEM;
+ /* Prepare for getting info response */
+ err = rtrs_iu_post_recv(&con->c, rx_iu);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
+ rtrs_iu_free(rx_iu, DMA_FROM_DEVICE, sess->s.dev->ib_dev, 1);
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
+{
+ int i, err;
+
+ for (i = 0; i < q_size; i++) {
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err))
+ return err;
+ }
+
+ return 0;
+}
+
+static int post_recv_sess(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ size_t q_size;
+ int err, cid;
+
+ for (cid = 0; cid < sess->s.con_num; cid++) {
+ if (cid == 0)
+ q_size = SERVICE_CON_QUEUE_DEPTH;
+ else
+ q_size = srv->queue_depth;
+
+ err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size);
+ if (unlikely(err)) {
+ rtrs_err(s, "post_recv_io(), err: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void process_read(struct rtrs_srv_con *con,
+ struct rtrs_msg_rdma_read *msg,
+ u32 buf_id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ struct rtrs_srv_op *id;
+
+ size_t usr_len, data_len;
+ void *data;
+ int ret;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ return;
+ }
+ if (unlikely(msg->sg_cnt != 1 && msg->sg_cnt != 0)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, invalid message\n");
+ return;
+ }
+ rtrs_srv_get_ops_ids(sess);
+ rtrs_srv_update_rdma_stats(sess->stats, off, READ);
+ id = sess->ops_ids[buf_id];
+ id->con = con;
+ id->dir = READ;
+ id->msg_id = buf_id;
+ id->rd_msg = msg;
+ usr_len = le16_to_cpu(msg->usr_len);
+ data_len = off - usr_len;
+ data = page_address(srv->chunks[buf_id]);
+ ret = ctx->ops.rdma_ev(srv, srv->priv, id, READ, data, data_len,
+ data + data_len, usr_len);
+
+ if (unlikely(ret)) {
+ rtrs_err_rl(s,
+ "Processing read request failed, user module cb reported for msg_id %d, err: %d\n",
+ buf_id, ret);
+ goto send_err_msg;
+ }
+
+ return;
+
+send_err_msg:
+ ret = send_io_resp_imm(con, id, ret);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
+ buf_id, ret);
+ close_sess(sess);
+ }
+ rtrs_srv_put_ops_ids(sess);
+}
+
+static void process_write(struct rtrs_srv_con *con,
+ struct rtrs_msg_rdma_write *req,
+ u32 buf_id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+ struct rtrs_srv_op *id;
+
+ size_t data_len, usr_len;
+ void *data;
+ int ret;
+
+ if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
+ rtrs_err_rl(s,
+ "Processing write request failed, session is disconnected, sess state %s\n",
+ rtrs_srv_state_str(sess->state));
+ return;
+ }
+ rtrs_srv_get_ops_ids(sess);
+ rtrs_srv_update_rdma_stats(sess->stats, off, WRITE);
+ id = sess->ops_ids[buf_id];
+ id->con = con;
+ id->dir = WRITE;
+ id->msg_id = buf_id;
+
+ usr_len = le16_to_cpu(req->usr_len);
+ data_len = off - usr_len;
+ data = page_address(srv->chunks[buf_id]);
+ ret = ctx->ops.rdma_ev(srv, srv->priv, id, WRITE, data, data_len,
+ data + data_len, usr_len);
+ if (unlikely(ret)) {
+ rtrs_err_rl(s,
+ "Processing write request failed, user module callback reports err: %d\n",
+ ret);
+ goto send_err_msg;
+ }
+
+ return;
+
+send_err_msg:
+ ret = send_io_resp_imm(con, id, ret);
+ if (ret < 0) {
+ rtrs_err_rl(s,
+ "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
+ buf_id, ret);
+ close_sess(sess);
+ }
+ rtrs_srv_put_ops_ids(sess);
+}
+
+static void process_io_req(struct rtrs_srv_con *con, void *msg,
+ u32 id, u32 off)
+{
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_msg_rdma_hdr *hdr;
+ unsigned int type;
+
+ ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, sess->dma_addr[id],
+ max_chunk_size, DMA_BIDIRECTIONAL);
+ hdr = msg;
+ type = le16_to_cpu(hdr->type);
+
+ switch (type) {
+ case RTRS_MSG_WRITE:
+ process_write(con, msg, id, off);
+ break;
+ case RTRS_MSG_READ:
+ process_read(con, msg, id, off);
+ break;
+ default:
+ rtrs_err(s,
+ "Processing I/O request failed, unknown message type received: 0x%02x\n",
+ type);
+ goto err;
+ }
+
+ return;
+
+err:
+ close_sess(sess);
+}
+
+static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_mr *mr =
+ container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ u32 msg_id, off;
+ void *data;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
+ ib_wc_status_msg(wc->status));
+ close_sess(sess);
+ }
+ msg_id = mr->msg_id;
+ off = mr->msg_off;
+ data = page_address(srv->chunks[msg_id]) + off;
+ process_io_req(con, data, msg_id, off);
+}
+
+static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con,
+ struct rtrs_srv_mr *mr)
+{
+ struct ib_send_wr wr = {
+ .opcode = IB_WR_LOCAL_INV,
+ .wr_cqe = &mr->inv_cqe,
+ .send_flags = IB_SEND_SIGNALED,
+ .ex.invalidate_rkey = mr->mr->rkey,
+ };
+ mr->inv_cqe.done = rtrs_srv_inv_rkey_done;
+
+ return ib_post_send(con->c.qp, &wr, NULL);
+}
+
+static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
+{
+ spin_lock(&con->rsp_wr_wait_lock);
+ while (!list_empty(&con->rsp_wr_wait_list)) {
+ struct rtrs_srv_op *id;
+ int ret;
+
+ id = list_entry(con->rsp_wr_wait_list.next,
+ struct rtrs_srv_op, wait_list);
+ list_del(&id->wait_list);
+
+ spin_unlock(&con->rsp_wr_wait_lock);
+ ret = rtrs_srv_resp_rdma(id, id->status);
+ spin_lock(&con->rsp_wr_wait_lock);
+
+ if (!ret) {
+ list_add(&id->wait_list, &con->rsp_wr_wait_list);
+ break;
+ }
+ }
+ spin_unlock(&con->rsp_wr_wait_lock);
+}
+
+static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct rtrs_srv_con *con = cq->cq_context;
+ struct rtrs_sess *s = con->c.sess;
+ struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_srv *srv = sess->srv;
+ u32 imm_type, imm_payload;
+ int err;
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ rtrs_err(s,
+ "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
+ ib_wc_status_msg(wc->status), wc->wr_cqe,
+ wc->opcode, wc->vendor_err, wc->byte_len);
+ close_sess(sess);
+ }
+ return;
+ }
+
+ switch (wc->opcode) {
+ case IB_WC_RECV_RDMA_WITH_IMM:
+ /*
+ * post_recv() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
+ return;
+ err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
+ close_sess(sess);
+ break;
+ }
+ rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
+ &imm_type, &imm_payload);
+ if (likely(imm_type == RTRS_IO_REQ_IMM)) {
+ u32 msg_id, off;
+ void *data;
+
+ msg_id = imm_payload >> sess->mem_bits;
+ off = imm_payload & ((1 << sess->mem_bits) - 1);
+ if (unlikely(msg_id >= srv->queue_depth ||
+ off >= max_chunk_size)) {
+ rtrs_err(s, "Wrong msg_id %u, off %u\n",
+ msg_id, off);
+ close_sess(sess);
+ return;
+ }
+ if (always_invalidate) {
+ struct rtrs_srv_mr *mr = &sess->mrs[msg_id];
+
+ mr->msg_off = off;
+ mr->msg_id = msg_id;
+ err = rtrs_srv_inv_rkey(con, mr);
+ if (unlikely(err)) {
+ rtrs_err(s, "rtrs_post_recv(), err: %d\n",
+ err);
+ close_sess(sess);
+ break;
+ }
+ } else {
+ data = page_address(srv->chunks[msg_id]) + off;
+ process_io_req(con, data, msg_id, off);
+ }
+ } else if (imm_type == RTRS_HB_MSG_IMM) {
+ WARN_ON(con->c.cid);
+ rtrs_send_hb_ack(&sess->s);
+ } else if (imm_type == RTRS_HB_ACK_IMM) {
+ WARN_ON(con->c.cid);
+ sess->s.hb_missed_cnt = 0;
+ } else {
+ rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
+ }
+ break;
+ case IB_WC_RDMA_WRITE:
+ case IB_WC_SEND:
+ /*
+ * post_send() RDMA write completions of IO reqs (read/write)
+ * and hb
+ */
+ atomic_add(srv->queue_depth, &con->sq_wr_avail);
+
+ if (unlikely(!list_empty_careful(&con->rsp_wr_wait_list)))
+ rtrs_rdma_process_wr_wait_list(con);
+
+ break;
+ default:
+ rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode);
+ return;
+ }
+}
+
+/**
+ * rtrs_srv_get_sess_name() - Get rtrs_srv peer hostname.
+ * @srv: Session
+ * @sessname: Sessname buffer
+ * @len: Length of sessname buffer
+ */
+int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
+{
+ struct rtrs_srv_sess *sess;
+ int err = -ENOTCONN;
+
+ mutex_lock(&srv->paths_mutex);
+ list_for_each_entry(sess, &srv->paths_list, s.entry) {
+ if (sess->state != RTRS_SRV_CONNECTED)
+ continue;
+ strlcpy(sessname, sess->s.sessname,
+ min_t(size_t, sizeof(sess->s.sessname), len));
+ err = 0;
+ break;
+ }
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL(rtrs_srv_get_sess_name);
+
+/**
+ * rtrs_srv_get_sess_qdepth() - Get rtrs_srv qdepth.
+ * @srv: Session
+ */
+int rtrs_srv_get_queue_depth(struct rtrs_srv *srv)
+{
+ return srv->queue_depth;
+}
+EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
+
+static int find_next_bit_ring(struct rtrs_srv_sess *sess)
+{
+ struct ib_device *ib_dev = sess->s.dev->ib_dev;
+ int v;
+
+ v = cpumask_next(sess->cur_cq_vector, &cq_affinity_mask);
+ if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
+ v = cpumask_first(&cq_affinity_mask);
+ return v;
+}
+
+static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess)
+{
+ sess->cur_cq_vector = find_next_bit_ring(sess);
+
+ return sess->cur_cq_vector;
+}
+
+static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+ int i;
+
+ srv = kzalloc(sizeof(*srv), GFP_KERNEL);
+ if (!srv)
+ return NULL;
+
+ refcount_set(&srv->refcount, 1);
+ INIT_LIST_HEAD(&srv->paths_list);
+ mutex_init(&srv->paths_mutex);
+ mutex_init(&srv->paths_ev_mutex);
+ uuid_copy(&srv->paths_uuid, paths_uuid);
+ srv->queue_depth = sess_queue_depth;
+ srv->ctx = ctx;
+
+ srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
+ GFP_KERNEL);
+ if (!srv->chunks)
+ goto err_free_srv;
+
+ for (i = 0; i < srv->queue_depth; i++) {
+ srv->chunks[i] = mempool_alloc(chunk_pool, GFP_KERNEL);
+ if (!srv->chunks[i])
+ goto err_free_chunks;
+ }
+ list_add(&srv->ctx_list, &ctx->srv_list);
+
+ return srv;
+
+err_free_chunks:
+ while (i--)
+ mempool_free(srv->chunks[i], chunk_pool);
+ kfree(srv->chunks);
+
+err_free_srv:
+ kfree(srv);
+
+ return NULL;
+}
+
+static void free_srv(struct rtrs_srv *srv)
+{
+ int i;
+
+ WARN_ON(refcount_read(&srv->refcount));
+ for (i = 0; i < srv->queue_depth; i++)
+ mempool_free(srv->chunks[i], chunk_pool);
+ kfree(srv->chunks);
+ mutex_destroy(&srv->paths_mutex);
+ mutex_destroy(&srv->paths_ev_mutex);
+ /* last put to release the srv structure */
+ put_device(&srv->dev);
+}
+
+static inline struct rtrs_srv *__find_srv_and_get(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
+ if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
+ refcount_inc_not_zero(&srv->refcount))
+ return srv;
+ }
+
+ return NULL;
+}
+
+static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+ const uuid_t *paths_uuid)
+{
+ struct rtrs_srv *srv;
+
+ mutex_lock(&ctx->srv_mutex);
+ srv = __find_srv_and_get(ctx, paths_uuid);
+ if (!srv)
+ srv = __alloc_srv(ctx, paths_uuid);
+ mutex_unlock(&ctx->srv_mutex);
+
+ return srv;
+}
+
+static void put_srv(struct rtrs_srv *srv)
+{
+ if (refcount_dec_and_test(&srv->refcount)) {
+ struct rtrs_srv_ctx *ctx = srv->ctx;
+
+ WARN_ON(srv->dev.kobj.state_in_sysfs);
+
+ mutex_lock(&ctx->srv_mutex);
+ list_del(&srv->ctx_list);
+ mutex_unlock(&ctx->srv_mutex);
+ free_srv(srv);
+ }
+}
+
+static void __add_path_to_srv(struct rtrs_srv *srv,
+ struct rtrs_srv_sess *sess)
+{
+ list_add_tail(&sess->s.entry, &srv->paths_list);
+ srv->paths_num++;
+ WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
+}
+
+static void del_path_from_srv(struct rtrs_srv_sess *sess)
+{
+ struct rtrs_srv *srv = sess->srv;
+
+ if (WARN_ON(!srv))
+ return;
+
+ mutex_lock(&srv->paths_mutex);
+ list_del(&sess->s.entry);
+ WARN_ON(!srv->paths_num);
+ srv->paths_num--;
+ mutex_unlock(&srv->paths_mutex);
+}
+
+/* return true if addresses are the same, error other wise */
+static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
+{
+ switch (a->sa_family) {
+ case AF_IB:
+ return memcmp(&((struct sockaddr_ib *)a)->sib_addr,
+ &((struct sockaddr_ib *)b)->sib_addr,
+ sizeof(struct ib_addr)) &&
+ (b->sa_family == AF_IB);
+ case AF_INET:
+ return memcmp(&((struct sockaddr_in *)a)->sin_addr,
+ &((struct sockaddr_in *)b)->sin_addr,
+ sizeof(struct in_addr)) &&
+ (b->sa_family == AF_INET);
+ case AF_INET6:
+ return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
+ &((struct sockaddr_in6 *)b)->sin6_addr,
+ sizeof(struct in6_addr)) &&
+ (b->sa_family == AF_INET6);
+ default:
+ return -ENOENT;
+ }
+}
+
+static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
+ struct rdma_addr *addr)
+{
+ struct rtrs_srv_sess *sess;
+
+ list_for_each_entry(sess, &srv->paths_list, s.entry)
+ if (!sockaddr_cmp((struct sockaddr *)&sess->s.dst_addr,
+ (struct sockaddr *)&addr->dst_addr) &&
+ !sockaddr_cmp((struct sockaddr *)&sess->s.src_addr,
+ (struct sockaddr *)&addr->src_addr))
+ return true;
+
+ return false;
+}
+
+static void free_sess(struct rtrs_srv_sess *sess)
+{
+ if (sess->kobj.state_in_sysfs)
+ kobject_put(&sess->kobj);
+ else
+ kfree(sess);
+}
+
+static void rtrs_srv_close_work(struct work_struct *work)
+{
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv_con *con;
+ int i;
+
+ sess = container_of(work, typeof(*sess), close_work);
+
+ rtrs_srv_destroy_sess_files(sess);
+ rtrs_srv_stop_hb(sess);
+
+ for (i = 0; i < sess->s.con_num; i++) {
+ if (!sess->s.con[i])
+ continue;
+ con = to_srv_con(sess->s.con[i]);
+ rdma_disconnect(con->c.cm_id);
+ ib_drain_qp(con->c.qp);
+ }
+ /* Wait for all inflights */
+ rtrs_srv_wait_ops_ids(sess);
+
+ /* Notify upper layer if we are the last path */
+ rtrs_srv_sess_down(sess);
+
+ unmap_cont_bufs(sess);
+ rtrs_srv_free_ops_ids(sess);
+
+ for (i = 0; i < sess->s.con_num; i++) {
+ if (!sess->s.con[i])
+ continue;
+ con = to_srv_con(sess->s.con[i]);
+ rtrs_cq_qp_destroy(&con->c);
+ rdma_destroy_id(con->c.cm_id);
+ kfree(con);
+ }
+ rtrs_ib_dev_put(sess->s.dev);
+
+ del_path_from_srv(sess);
+ put_srv(sess->srv);
+ sess->srv = NULL;
+ rtrs_srv_change_state(sess, RTRS_SRV_CLOSED);
+
+ kfree(sess->dma_addr);
+ kfree(sess->s.con);
+ free_sess(sess);
+}
+
+static int rtrs_rdma_do_accept(struct rtrs_srv_sess *sess,
+ struct rdma_cm_id *cm_id)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_msg_conn_rsp msg;
+ struct rdma_conn_param param;
+ int err;
+
+ param = (struct rdma_conn_param) {
+ .rnr_retry_count = 7,
+ .private_data = &msg,
+ .private_data_len = sizeof(msg),
+ };
+
+ msg = (struct rtrs_msg_conn_rsp) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .queue_depth = cpu_to_le16(srv->queue_depth),
+ .max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE),
+ .max_hdr_size = cpu_to_le32(MAX_HDR_SIZE),
+ };
+
+ if (always_invalidate)
+ msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F);
+
+ err = rdma_accept(cm_id, &param);
+ if (err)
+ pr_err("rdma_accept(), err: %d\n", err);
+
+ return err;
+}
+
+static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
+{
+ struct rtrs_msg_conn_rsp msg;
+ int err;
+
+ msg = (struct rtrs_msg_conn_rsp) {
+ .magic = cpu_to_le16(RTRS_MAGIC),
+ .version = cpu_to_le16(RTRS_PROTO_VER),
+ .errno = cpu_to_le16(errno),
+ };
+
+ err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED);
+ if (err)
+ pr_err("rdma_reject(), err: %d\n", err);
+
+ /* Bounce errno back */
+ return errno;
+}
+
+static struct rtrs_srv_sess *
+__find_sess(struct rtrs_srv *srv, const uuid_t *sess_uuid)
+{
+ struct rtrs_srv_sess *sess;
+
+ list_for_each_entry(sess, &srv->paths_list, s.entry) {
+ if (uuid_equal(&sess->s.uuid, sess_uuid))
+ return sess;
+ }
+
+ return NULL;
+}
+
+static int create_con(struct rtrs_srv_sess *sess,
+ struct rdma_cm_id *cm_id,
+ unsigned int cid)
+{
+ struct rtrs_srv *srv = sess->srv;
+ struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_con *con;
+
+ u16 cq_size, wr_queue_size;
+ int err, cq_vector;
+
+ con = kzalloc(sizeof(*con), GFP_KERNEL);
+ if (!con) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&con->rsp_wr_wait_lock);
+ INIT_LIST_HEAD(&con->rsp_wr_wait_list);
+ con->c.cm_id = cm_id;
+ con->c.sess = &sess->s;
+ con->c.cid = cid;
+ atomic_set(&con->wr_cnt, 0);
+
+ if (con->c.cid == 0) {
+ /*
+ * All receive and all send (each requiring invalidate)
+ * + 2 for drain and heartbeat
+ */
+ wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
+ cq_size = wr_queue_size;
+ } else {
+ /*
+ * If we have all receive requests posted and
+ * all write requests posted and each read request
+ * requires an invalidate request + drain
+ * and qp gets into error state.
+ */
+ cq_size = srv->queue_depth * 3 + 1;
+ /*
+ * In theory we might have queue_depth * 32
+ * outstanding requests if an unsafe global key is used
+ * and we have queue_depth read requests each consisting
+ * of 32 different addresses. div 3 for mlx5.
+ */
+ wr_queue_size = sess->s.dev->ib_dev->attrs.max_qp_wr / 3;
+ }
+ atomic_set(&con->sq_wr_avail, wr_queue_size);
+ cq_vector = rtrs_srv_get_next_cq_vector(sess);
+
+ /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
+ err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_size,
+ wr_queue_size, IB_POLL_WORKQUEUE);
+ if (err) {
+ rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
+ goto free_con;
+ }
+ if (con->c.cid == 0) {
+ err = post_recv_info_req(con);
+ if (err)
+ goto free_cqqp;
+ }
+ WARN_ON(sess->s.con[cid]);
+ sess->s.con[cid] = &con->c;
+
+ /*
+ * Change context from server to current connection. The other
+ * way is to use cm_id->qp->qp_context, which does not work on OFED.
+ */
+ cm_id->context = &con->c;
+
+ return 0;
+
+free_cqqp:
+ rtrs_cq_qp_destroy(&con->c);
+free_con:
+ kfree(con);
+
+err:
+ return err;
+}
+
+static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
+ struct rdma_cm_id *cm_id,
+ unsigned int con_num,
+ unsigned int recon_cnt,
+ const uuid_t *uuid)
+{
+ struct rtrs_srv_sess *sess;
+ int err = -ENOMEM;
+
+ if (srv->paths_num >= MAX_PATHS_NUM) {
+ err = -ECONNRESET;
+ goto err;
+ }
+ if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) {
+ err = -EEXIST;
+ pr_err("Path with same addr exists\n");
+ goto err;
+ }
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess)
+ goto err;
+
+ sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
+ if (!sess->stats)
+ goto err_free_sess;
+
+ sess->stats->sess = sess;
+
+ sess->dma_addr = kcalloc(srv->queue_depth, sizeof(*sess->dma_addr),
+ GFP_KERNEL);
+ if (!sess->dma_addr)
+ goto err_free_stats;
+
+ sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
+ if (!sess->s.con)
+ goto err_free_dma_addr;
+
+ sess->state = RTRS_SRV_CONNECTING;
+ sess->srv = srv;
+ sess->cur_cq_vector = -1;
+ sess->s.dst_addr = cm_id->route.addr.dst_addr;
+ sess->s.src_addr = cm_id->route.addr.src_addr;
+ sess->s.con_num = con_num;
+ sess->s.recon_cnt = recon_cnt;
+ uuid_copy(&sess->s.uuid, uuid);
+ spin_lock_init(&sess->state_lock);
+ INIT_WORK(&sess->close_work, rtrs_srv_close_work);
+ rtrs_srv_init_hb(sess);
+
+ sess->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
+ if (!sess->s.dev) {
+ err = -ENOMEM;
+ goto err_free_con;
+ }
+ err = map_cont_bufs(sess);
+ if (err)
+ goto err_put_dev;
+
+ err = rtrs_srv_alloc_ops_ids(sess);
+ if (err)
+ goto err_unmap_bufs;
+
+ __add_path_to_srv(srv, sess);
+
+ return sess;
+
+err_unmap_bufs:
+ unmap_cont_bufs(sess);
+err_put_dev:
+ rtrs_ib_dev_put(sess->s.dev);
+err_free_con:
+ kfree(sess->s.con);
+err_free_dma_addr:
+ kfree(sess->dma_addr);
+err_free_stats:
+ kfree(sess->stats);
+err_free_sess:
+ kfree(sess);
+err:
+ return ERR_PTR(err);
+}
+
+static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
+ const struct rtrs_msg_conn_req *msg,
+ size_t len)
+{
+ struct rtrs_srv_ctx *ctx = cm_id->context;
+ struct rtrs_srv_sess *sess;
+ struct rtrs_srv *srv;
+
+ u16 version, con_num, cid;
+ u16 recon_cnt;
+ int err;
+
+ if (len < sizeof(*msg)) {
+ pr_err("Invalid RTRS connection request\n");
+ goto reject_w_econnreset;
+ }
+ if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
+ pr_err("Invalid RTRS magic\n");
+ goto reject_w_econnreset;
+ }
+ version = le16_to_cpu(msg->version);
+ if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
+ pr_err("Unsupported major RTRS version: %d, expected %d\n",
+ version >> 8, RTRS_PROTO_VER_MAJOR);
+ goto reject_w_econnreset;
+ }
+ con_num = le16_to_cpu(msg->cid_num);
+ if (con_num > 4096) {
+ /* Sanity check */
+ pr_err("Too many connections requested: %d\n", con_num);
+ goto reject_w_econnreset;
+ }
+ cid = le16_to_cpu(msg->cid);
+ if (cid >= con_num) {
+ /* Sanity check */
+ pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
+ goto reject_w_econnreset;
+ }
+ recon_cnt = le16_to_cpu(msg->recon_cnt);
+ srv = get_or_create_srv(ctx, &msg->paths_uuid);
+ if (!srv) {
+ err = -ENOMEM;
+ goto reject_w_err;
+ }
+ mutex_lock(&srv->paths_mutex);
+ sess = __find_sess(srv, &msg->sess_uuid);
+ if (sess) {
+ struct rtrs_sess *s = &sess->s;
+
+ /* Session already holds a reference */
+ put_srv(srv);
+
+ if (sess->state != RTRS_SRV_CONNECTING) {
+ rtrs_err(s, "Session in wrong state: %s\n",
+ rtrs_srv_state_str(sess->state));
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ /*
+ * Sanity checks
+ */
+ if (con_num != s->con_num || cid >= s->con_num) {
+ rtrs_err(s, "Incorrect request: %d, %d\n",
+ cid, con_num);
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ if (s->con[cid]) {
+ rtrs_err(s, "Connection already exists: %d\n",
+ cid);
+ mutex_unlock(&srv->paths_mutex);
+ goto reject_w_econnreset;
+ }
+ } else {
+ sess = __alloc_sess(srv, cm_id, con_num, recon_cnt,
+ &msg->sess_uuid);
+ if (IS_ERR(sess)) {
+ mutex_unlock(&srv->paths_mutex);
+ put_srv(srv);
+ err = PTR_ERR(sess);
+ goto reject_w_err;
+ }
+ }
+ err = create_con(sess, cm_id, cid);
+ if (err) {
+ (void)rtrs_rdma_do_reject(cm_id, err);
+ /*
+ * Since session has other connections we follow normal way
+ * through workqueue, but still return an error to tell cma.c
+ * to call rdma_destroy_id() for current connection.
+ */
+ goto close_and_return_err;
+ }
+ err = rtrs_rdma_do_accept(sess, cm_id);
+ if (err) {
+ (void)rtrs_rdma_do_reject(cm_id, err);
+ /*
+ * Since current connection was successfully added to the
+ * session we follow normal way through workqueue to close the
+ * session, thus return 0 to tell cma.c we call
+ * rdma_destroy_id() ourselves.
+ */
+ err = 0;
+ goto close_and_return_err;
+ }
+ mutex_unlock(&srv->paths_mutex);
+
+ return 0;
+
+reject_w_err:
+ return rtrs_rdma_do_reject(cm_id, err);
+
+reject_w_econnreset:
+ return rtrs_rdma_do_reject(cm_id, -ECONNRESET);
+
+close_and_return_err:
+ close_sess(sess);
+ mutex_unlock(&srv->paths_mutex);
+
+ return err;
+}
+
+static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *ev)
+{
+ struct rtrs_srv_sess *sess = NULL;
+ struct rtrs_sess *s = NULL;
+
+ if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
+ struct rtrs_con *c = cm_id->context;
+
+ s = c->sess;
+ sess = to_srv_sess(s);
+ }
+
+ switch (ev->event) {
+ case RDMA_CM_EVENT_CONNECT_REQUEST:
+ /*
+ * In case of error cma.c will destroy cm_id,
+ * see cma_process_remove()
+ */
+ return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
+ ev->param.conn.private_data_len);
+ case RDMA_CM_EVENT_ESTABLISHED:
+ /* Nothing here */
+ break;
+ case RDMA_CM_EVENT_REJECTED:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
+ rdma_event_msg(ev->event), ev->status);
+ close_sess(sess);
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_ADDR_CHANGE:
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ close_sess(sess);
+ break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ close_sess(sess);
+ break;
+ default:
+ pr_err("Ignoring unexpected CM event %s, err %d\n",
+ rdma_event_msg(ev->event), ev->status);
+ break;
+ }
+
+ return 0;
+}
+
+static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx,
+ struct sockaddr *addr,
+ enum rdma_ucm_port_space ps)
+{
+ struct rdma_cm_id *cm_id;
+ int ret;
+
+ cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler,
+ ctx, ps, IB_QPT_RC);
+ if (IS_ERR(cm_id)) {
+ ret = PTR_ERR(cm_id);
+ pr_err("Creating id for RDMA connection failed, err: %d\n",
+ ret);
+ goto err_out;
+ }
+ ret = rdma_bind_addr(cm_id, addr);
+ if (ret) {
+ pr_err("Binding RDMA address failed, err: %d\n", ret);
+ goto err_cm;
+ }
+ ret = rdma_listen(cm_id, 64);
+ if (ret) {
+ pr_err("Listening on RDMA connection failed, err: %d\n",
+ ret);
+ goto err_cm;
+ }
+
+ return cm_id;
+
+err_cm:
+ rdma_destroy_id(cm_id);
+err_out:
+
+ return ERR_PTR(ret);
+}
+
+static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port)
+{
+ struct sockaddr_in6 sin = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = IN6ADDR_ANY_INIT,
+ .sin6_port = htons(port),
+ };
+ struct sockaddr_ib sib = {
+ .sib_family = AF_IB,
+ .sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port),
+ .sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL),
+ .sib_pkey = cpu_to_be16(0xffff),
+ };
+ struct rdma_cm_id *cm_ip, *cm_ib;
+ int ret;
+
+ /*
+ * We accept both IPoIB and IB connections, so we need to keep
+ * two cm id's, one for each socket type and port space.
+ * If the cm initialization of one of the id's fails, we abort
+ * everything.
+ */
+ cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP);
+ if (IS_ERR(cm_ip))
+ return PTR_ERR(cm_ip);
+
+ cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
+ if (IS_ERR(cm_ib)) {
+ ret = PTR_ERR(cm_ib);
+ goto free_cm_ip;
+ }
+
+ ctx->cm_id_ip = cm_ip;
+ ctx->cm_id_ib = cm_ib;
+
+ return 0;
+
+free_cm_ip:
+ rdma_destroy_id(cm_ip);
+
+ return ret;
+}
+
+static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops)
+{
+ struct rtrs_srv_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ ctx->ops = *ops;
+ mutex_init(&ctx->srv_mutex);
+ INIT_LIST_HEAD(&ctx->srv_list);
+
+ return ctx;
+}
+
+static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
+{
+ WARN_ON(!list_empty(&ctx->srv_list));
+ mutex_destroy(&ctx->srv_mutex);
+ kfree(ctx);
+}
+
+/**
+ * rtrs_srv_open() - open RTRS server context
+ * @ops: callback functions
+ * @port: port to listen on
+ *
+ * Creates server context with specified callbacks.
+ *
+ * Return a valid pointer on success otherwise PTR_ERR.
+ */
+struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
+{
+ struct rtrs_srv_ctx *ctx;
+ int err;
+
+ ctx = alloc_srv_ctx(ops);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ err = rtrs_srv_rdma_init(ctx, port);
+ if (err) {
+ free_srv_ctx(ctx);
+ return ERR_PTR(err);
+ }
+
+ return ctx;
+}
+EXPORT_SYMBOL(rtrs_srv_open);
+
+static void close_sessions(struct rtrs_srv *srv)
+{
+ struct rtrs_srv_sess *sess;
+
+ mutex_lock(&srv->paths_mutex);
+ list_for_each_entry(sess, &srv->paths_list, s.entry)
+ close_sess(sess);
+ mutex_unlock(&srv->paths_mutex);
+}
+
+static void close_ctx(struct rtrs_srv_ctx *ctx)
+{
+ struct rtrs_srv *srv;
+
+ mutex_lock(&ctx->srv_mutex);
+ list_for_each_entry(srv, &ctx->srv_list, ctx_list)
+ close_sessions(srv);
+ mutex_unlock(&ctx->srv_mutex);
+ flush_workqueue(rtrs_wq);
+}
+
+/**
+ * rtrs_srv_close() - close RTRS server context
+ * @ctx: pointer to server context
+ *
+ * Closes RTRS server context with all client sessions.
+ */
+void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
+{
+ rdma_destroy_id(ctx->cm_id_ip);
+ rdma_destroy_id(ctx->cm_id_ib);
+ close_ctx(ctx);
+ free_srv_ctx(ctx);
+}
+EXPORT_SYMBOL(rtrs_srv_close);
+
+static int check_module_params(void)
+{
+ if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) {
+ pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n",
+ sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
+ return -EINVAL;
+ }
+ if (max_chunk_size < 4096 || !is_power_of_2(max_chunk_size)) {
+ pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
+ max_chunk_size, 4096);
+ return -EINVAL;
+ }
+
+ /*
+ * Check if IB immediate data size is enough to hold the mem_id and the
+ * offset inside the memory chunk
+ */
+ if ((ilog2(sess_queue_depth - 1) + 1) +
+ (ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) {
+ pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n",
+ MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init rtrs_server_init(void)
+{
+ int err;
+
+ pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n",
+ KBUILD_MODNAME, RTRS_PROTO_VER_STRING,
+ max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE,
+ sess_queue_depth, always_invalidate);
+
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+
+ err = check_module_params();
+ if (err) {
+ pr_err("Failed to load module, invalid module parameters, err: %d\n",
+ err);
+ return err;
+ }
+ chunk_pool = mempool_create_page_pool(sess_queue_depth * CHUNK_POOL_SZ,
+ get_order(max_chunk_size));
+ if (!chunk_pool)
+ return -ENOMEM;
+ rtrs_dev_class = class_create(THIS_MODULE, "rtrs-server");
+ if (IS_ERR(rtrs_dev_class)) {
+ err = PTR_ERR(rtrs_dev_class);
+ goto out_chunk_pool;
+ }
+ rtrs_wq = alloc_workqueue("rtrs_server_wq", WQ_MEM_RECLAIM, 0);
+ if (!rtrs_wq) {
+ err = -ENOMEM;
+ goto out_dev_class;
+ }
+
+ return 0;
+
+out_dev_class:
+ class_destroy(rtrs_dev_class);
+out_chunk_pool:
+ mempool_destroy(chunk_pool);
+
+ return err;
+}
+
+static void __exit rtrs_server_exit(void)
+{
+ destroy_workqueue(rtrs_wq);
+ class_destroy(rtrs_dev_class);
+ mempool_destroy(chunk_pool);
+ rtrs_rdma_dev_pd_deinit(&dev_pd);
+}
+
+module_init(rtrs_server_init);
+module_exit(rtrs_server_exit);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
new file mode 100644
index 000000000000..dc95b0932f0d
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+
+#ifndef RTRS_SRV_H
+#define RTRS_SRV_H
+
+#include <linux/device.h>
+#include <linux/refcount.h>
+#include "rtrs-pri.h"
+
+/*
+ * enum rtrs_srv_state - Server states.
+ */
+enum rtrs_srv_state {
+ RTRS_SRV_CONNECTING,
+ RTRS_SRV_CONNECTED,
+ RTRS_SRV_CLOSING,
+ RTRS_SRV_CLOSED,
+};
+
+/* stats for Read and write operation.
+ * see Documentation/ABI/testing/sysfs-class-rtrs-server for details
+ */
+struct rtrs_srv_stats_rdma_stats {
+ struct {
+ atomic64_t cnt;
+ atomic64_t size_total;
+ } dir[2];
+};
+
+struct rtrs_srv_stats {
+ struct kobject kobj_stats;
+ struct rtrs_srv_stats_rdma_stats rdma_stats;
+ struct rtrs_srv_sess *sess;
+};
+
+struct rtrs_srv_con {
+ struct rtrs_con c;
+ atomic_t wr_cnt;
+ atomic_t sq_wr_avail;
+ struct list_head rsp_wr_wait_list;
+ spinlock_t rsp_wr_wait_lock;
+};
+
+/* IO context in rtrs_srv, each io has one */
+struct rtrs_srv_op {
+ struct rtrs_srv_con *con;
+ u32 msg_id;
+ u8 dir;
+ struct rtrs_msg_rdma_read *rd_msg;
+ struct ib_rdma_wr tx_wr;
+ struct ib_sge tx_sg;
+ struct list_head wait_list;
+ int status;
+};
+
+/*
+ * server side memory region context, when always_invalidate=Y, we need
+ * queue_depth of memory regrion to invalidate each memory region.
+ */
+struct rtrs_srv_mr {
+ struct ib_mr *mr;
+ struct sg_table sgt;
+ struct ib_cqe inv_cqe; /* only for always_invalidate=true */
+ u32 msg_id; /* only for always_invalidate=true */
+ u32 msg_off; /* only for always_invalidate=true */
+ struct rtrs_iu *iu; /* send buffer for new rkey msg */
+};
+
+struct rtrs_srv_sess {
+ struct rtrs_sess s;
+ struct rtrs_srv *srv;
+ struct work_struct close_work;
+ enum rtrs_srv_state state;
+ spinlock_t state_lock;
+ int cur_cq_vector;
+ struct rtrs_srv_op **ops_ids;
+ atomic_t ids_inflight;
+ wait_queue_head_t ids_waitq;
+ struct rtrs_srv_mr *mrs;
+ unsigned int mrs_num;
+ dma_addr_t *dma_addr;
+ bool established;
+ unsigned int mem_bits;
+ struct kobject kobj;
+ struct rtrs_srv_stats *stats;
+};
+
+struct rtrs_srv {
+ struct list_head paths_list;
+ int paths_up;
+ struct mutex paths_ev_mutex;
+ size_t paths_num;
+ struct mutex paths_mutex;
+ uuid_t paths_uuid;
+ refcount_t refcount;
+ struct rtrs_srv_ctx *ctx;
+ struct list_head ctx_list;
+ void *priv;
+ size_t queue_depth;
+ struct page **chunks;
+ struct device dev;
+ unsigned int dev_ref;
+ struct kobject *kobj_paths;
+};
+
+struct rtrs_srv_ctx {
+ struct rtrs_srv_ops ops;
+ struct rdma_cm_id *cm_id_ip;
+ struct rdma_cm_id *cm_id_ib;
+ struct mutex srv_mutex;
+ struct list_head srv_list;
+};
+
+extern struct class *rtrs_dev_class;
+
+void close_sess(struct rtrs_srv_sess *sess);
+
+static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
+ size_t size, int d)
+{
+ atomic64_inc(&s->rdma_stats.dir[d].cnt);
+ atomic64_add(size, &s->rdma_stats.dir[d].size_total);
+}
+
+/* functions which are implemented in rtrs-srv-stats.c */
+int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable);
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
+ char *page, size_t len);
+int rtrs_srv_reset_wc_completion_stats(struct rtrs_srv_stats *stats,
+ bool enable);
+int rtrs_srv_stats_wc_completion_to_str(struct rtrs_srv_stats *stats, char *buf,
+ size_t len);
+int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable);
+ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
+ char *page, size_t len);
+
+/* functions which are implemented in rtrs-srv-sysfs.c */
+int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess);
+void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess);
+
+#endif /* RTRS_SRV_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
new file mode 100644
index 000000000000..ff1093d6e4bc
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -0,0 +1,612 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
+
+#include <linux/module.h>
+#include <linux/inet.h>
+
+#include "rtrs-pri.h"
+#include "rtrs-log.h"
+
+MODULE_DESCRIPTION("RDMA Transport Core");
+MODULE_LICENSE("GPL");
+
+struct rtrs_iu *rtrs_iu_alloc(u32 queue_size, size_t size, gfp_t gfp_mask,
+ struct ib_device *dma_dev,
+ enum dma_data_direction dir,
+ void (*done)(struct ib_cq *cq, struct ib_wc *wc))
+{
+ struct rtrs_iu *ius, *iu;
+ int i;
+
+ ius = kcalloc(queue_size, sizeof(*ius), gfp_mask);
+ if (!ius)
+ return NULL;
+ for (i = 0; i < queue_size; i++) {
+ iu = &ius[i];
+ iu->buf = kzalloc(size, gfp_mask);
+ if (!iu->buf)
+ goto err;
+
+ iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
+ if (ib_dma_mapping_error(dma_dev, iu->dma_addr))
+ goto err;
+
+ iu->cqe.done = done;
+ iu->size = size;
+ iu->direction = dir;
+ }
+ return ius;
+err:
+ rtrs_iu_free(ius, dir, dma_dev, i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
+
+void rtrs_iu_free(struct rtrs_iu *ius, enum dma_data_direction dir,
+ struct ib_device *ibdev, u32 queue_size)
+{
+ struct rtrs_iu *iu;
+ int i;
+
+ if (!ius)
+ return;
+
+ for (i = 0; i < queue_size; i++) {
+ iu = &ius[i];
+ ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, dir);
+ kfree(iu->buf);
+ }
+ kfree(ius);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_free);
+
+int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
+{
+ struct rtrs_sess *sess = con->sess;
+ struct ib_recv_wr wr;
+ struct ib_sge list;
+
+ list.addr = iu->dma_addr;
+ list.length = iu->size;
+ list.lkey = sess->dev->ib_pd->local_dma_lkey;
+
+ if (list.length == 0) {
+ rtrs_wrn(con->sess,
+ "Posting receive work request failed, sg list is empty\n");
+ return -EINVAL;
+ }
+ wr = (struct ib_recv_wr) {
+ .wr_cqe = &iu->cqe,
+ .sg_list = &list,
+ .num_sge = 1,
+ };
+
+ return ib_post_recv(con->qp, &wr, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_recv);
+
+int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe)
+{
+ struct ib_recv_wr wr;
+
+ wr = (struct ib_recv_wr) {
+ .wr_cqe = cqe,
+ };
+
+ return ib_post_recv(con->qp, &wr, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_post_recv_empty);
+
+int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
+ struct ib_send_wr *head)
+{
+ struct rtrs_sess *sess = con->sess;
+ struct ib_send_wr wr;
+ struct ib_sge list;
+
+ if (WARN_ON(size == 0))
+ return -EINVAL;
+
+ list.addr = iu->dma_addr;
+ list.length = size;
+ list.lkey = sess->dev->ib_pd->local_dma_lkey;
+
+ wr = (struct ib_send_wr) {
+ .wr_cqe = &iu->cqe,
+ .sg_list = &list,
+ .num_sge = 1,
+ .opcode = IB_WR_SEND,
+ .send_flags = IB_SEND_SIGNALED,
+ };
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr;
+ } else {
+ head = &wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_send);
+
+int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
+ struct ib_sge *sge, unsigned int num_sge,
+ u32 rkey, u64 rdma_addr, u32 imm_data,
+ enum ib_send_flags flags,
+ struct ib_send_wr *head)
+{
+ struct ib_rdma_wr wr;
+ int i;
+
+ wr = (struct ib_rdma_wr) {
+ .wr.wr_cqe = &iu->cqe,
+ .wr.sg_list = sge,
+ .wr.num_sge = num_sge,
+ .rkey = rkey,
+ .remote_addr = rdma_addr,
+ .wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .wr.ex.imm_data = cpu_to_be32(imm_data),
+ .wr.send_flags = flags,
+ };
+
+ /*
+ * If one of the sges has 0 size, the operation will fail with a
+ * length error
+ */
+ for (i = 0; i < num_sge; i++)
+ if (WARN_ON(sge[i].length == 0))
+ return -EINVAL;
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr.wr;
+ } else {
+ head = &wr.wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm);
+
+int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
+ u32 imm_data, enum ib_send_flags flags,
+ struct ib_send_wr *head)
+{
+ struct ib_send_wr wr;
+
+ wr = (struct ib_send_wr) {
+ .wr_cqe = cqe,
+ .send_flags = flags,
+ .opcode = IB_WR_RDMA_WRITE_WITH_IMM,
+ .ex.imm_data = cpu_to_be32(imm_data),
+ };
+
+ if (head) {
+ struct ib_send_wr *tail = head;
+
+ while (tail->next)
+ tail = tail->next;
+ tail->next = &wr;
+ } else {
+ head = &wr;
+ }
+
+ return ib_post_send(con->qp, head, NULL);
+}
+EXPORT_SYMBOL_GPL(rtrs_post_rdma_write_imm_empty);
+
+static void qp_event_handler(struct ib_event *ev, void *ctx)
+{
+ struct rtrs_con *con = ctx;
+
+ switch (ev->event) {
+ case IB_EVENT_COMM_EST:
+ rtrs_info(con->sess, "QP event %s (%d) received\n",
+ ib_event_msg(ev->event), ev->event);
+ rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
+ break;
+ default:
+ rtrs_info(con->sess, "Unhandled QP event %s (%d) received\n",
+ ib_event_msg(ev->event), ev->event);
+ break;
+ }
+}
+
+static int create_cq(struct rtrs_con *con, int cq_vector, u16 cq_size,
+ enum ib_poll_context poll_ctx)
+{
+ struct rdma_cm_id *cm_id = con->cm_id;
+ struct ib_cq *cq;
+
+ cq = ib_alloc_cq(cm_id->device, con, cq_size,
+ cq_vector, poll_ctx);
+ if (IS_ERR(cq)) {
+ rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
+ PTR_ERR(cq));
+ return PTR_ERR(cq);
+ }
+ con->cq = cq;
+
+ return 0;
+}
+
+static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
+ u16 wr_queue_size, u32 max_sge)
+{
+ struct ib_qp_init_attr init_attr = {NULL};
+ struct rdma_cm_id *cm_id = con->cm_id;
+ int ret;
+
+ init_attr.cap.max_send_wr = wr_queue_size;
+ init_attr.cap.max_recv_wr = wr_queue_size;
+ init_attr.cap.max_recv_sge = 1;
+ init_attr.event_handler = qp_event_handler;
+ init_attr.qp_context = con;
+ init_attr.cap.max_send_sge = max_sge;
+
+ init_attr.qp_type = IB_QPT_RC;
+ init_attr.send_cq = con->cq;
+ init_attr.recv_cq = con->cq;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+
+ ret = rdma_create_qp(cm_id, pd, &init_attr);
+ if (ret) {
+ rtrs_err(con->sess, "Creating QP failed, err: %d\n", ret);
+ return ret;
+ }
+ con->qp = cm_id->qp;
+
+ return ret;
+}
+
+int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+ u32 max_send_sge, int cq_vector, u16 cq_size,
+ u16 wr_queue_size, enum ib_poll_context poll_ctx)
+{
+ int err;
+
+ err = create_cq(con, cq_vector, cq_size, poll_ctx);
+ if (err)
+ return err;
+
+ err = create_qp(con, sess->dev->ib_pd, wr_queue_size, max_send_sge);
+ if (err) {
+ ib_free_cq(con->cq);
+ con->cq = NULL;
+ return err;
+ }
+ con->sess = sess;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtrs_cq_qp_create);
+
+void rtrs_cq_qp_destroy(struct rtrs_con *con)
+{
+ if (con->qp) {
+ rdma_destroy_qp(con->cm_id);
+ con->qp = NULL;
+ }
+ if (con->cq) {
+ ib_free_cq(con->cq);
+ con->cq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
+
+static void schedule_hb(struct rtrs_sess *sess)
+{
+ queue_delayed_work(sess->hb_wq, &sess->hb_dwork,
+ msecs_to_jiffies(sess->hb_interval_ms));
+}
+
+void rtrs_send_hb_ack(struct rtrs_sess *sess)
+{
+ struct rtrs_con *usr_con = sess->con[0];
+ u32 imm;
+ int err;
+
+ imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
+ err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ IB_SEND_SIGNALED, NULL);
+ if (err) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
+
+static void hb_work(struct work_struct *work)
+{
+ struct rtrs_con *usr_con;
+ struct rtrs_sess *sess;
+ u32 imm;
+ int err;
+
+ sess = container_of(to_delayed_work(work), typeof(*sess), hb_dwork);
+ usr_con = sess->con[0];
+
+ if (sess->hb_missed_cnt > sess->hb_missed_max) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+ if (sess->hb_missed_cnt++) {
+ /* Reschedule work without sending hb */
+ schedule_hb(sess);
+ return;
+ }
+ imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
+ err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ IB_SEND_SIGNALED, NULL);
+ if (err) {
+ sess->hb_err_handler(usr_con);
+ return;
+ }
+
+ schedule_hb(sess);
+}
+
+void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+ unsigned int interval_ms, unsigned int missed_max,
+ void (*err_handler)(struct rtrs_con *con),
+ struct workqueue_struct *wq)
+{
+ sess->hb_cqe = cqe;
+ sess->hb_interval_ms = interval_ms;
+ sess->hb_err_handler = err_handler;
+ sess->hb_wq = wq;
+ sess->hb_missed_max = missed_max;
+ sess->hb_missed_cnt = 0;
+ INIT_DELAYED_WORK(&sess->hb_dwork, hb_work);
+}
+EXPORT_SYMBOL_GPL(rtrs_init_hb);
+
+void rtrs_start_hb(struct rtrs_sess *sess)
+{
+ schedule_hb(sess);
+}
+EXPORT_SYMBOL_GPL(rtrs_start_hb);
+
+void rtrs_stop_hb(struct rtrs_sess *sess)
+{
+ cancel_delayed_work_sync(&sess->hb_dwork);
+ sess->hb_missed_cnt = 0;
+ sess->hb_missed_max = 0;
+}
+EXPORT_SYMBOL_GPL(rtrs_stop_hb);
+
+static int rtrs_str_gid_to_sockaddr(const char *addr, size_t len,
+ short port, struct sockaddr_storage *dst)
+{
+ struct sockaddr_ib *dst_ib = (struct sockaddr_ib *)dst;
+ int ret;
+
+ /*
+ * We can use some of the IPv6 functions since GID is a valid
+ * IPv6 address format
+ */
+ ret = in6_pton(addr, len, dst_ib->sib_addr.sib_raw, '\0', NULL);
+ if (ret == 0)
+ return -EINVAL;
+
+ dst_ib->sib_family = AF_IB;
+ /*
+ * Use the same TCP server port number as the IB service ID
+ * on the IB port space range
+ */
+ dst_ib->sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port);
+ dst_ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
+ dst_ib->sib_pkey = cpu_to_be16(0xffff);
+
+ return 0;
+}
+
+/**
+ * rtrs_str_to_sockaddr() - Convert rtrs address string to sockaddr
+ * @addr: String representation of an addr (IPv4, IPv6 or IB GID):
+ * - "ip:192.168.1.1"
+ * - "ip:fe80::200:5aee:feaa:20a2"
+ * - "gid:fe80::200:5aee:feaa:20a2"
+ * @len: String address length
+ * @port: Destination port
+ * @dst: Destination sockaddr structure
+ *
+ * Returns 0 if conversion successful. Non-zero on error.
+ */
+static int rtrs_str_to_sockaddr(const char *addr, size_t len,
+ u16 port, struct sockaddr_storage *dst)
+{
+ if (strncmp(addr, "gid:", 4) == 0) {
+ return rtrs_str_gid_to_sockaddr(addr + 4, len - 4, port, dst);
+ } else if (strncmp(addr, "ip:", 3) == 0) {
+ char port_str[8];
+ char *cpy;
+ int err;
+
+ snprintf(port_str, sizeof(port_str), "%u", port);
+ cpy = kstrndup(addr + 3, len - 3, GFP_KERNEL);
+ err = cpy ? inet_pton_with_scope(&init_net, AF_UNSPEC,
+ cpy, port_str, dst) : -ENOMEM;
+ kfree(cpy);
+
+ return err;
+ }
+ return -EPROTONOSUPPORT;
+}
+
+/**
+ * sockaddr_to_str() - convert sockaddr to a string.
+ * @addr: the sockadddr structure to be converted.
+ * @buf: string containing socket addr.
+ * @len: string length.
+ *
+ * The return value is the number of characters written into buf not
+ * including the trailing '\0'. If len is == 0 the function returns 0..
+ */
+int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
+{
+
+ switch (addr->sa_family) {
+ case AF_IB:
+ return scnprintf(buf, len, "gid:%pI6",
+ &((struct sockaddr_ib *)addr)->sib_addr.sib_raw);
+ case AF_INET:
+ return scnprintf(buf, len, "ip:%pI4",
+ &((struct sockaddr_in *)addr)->sin_addr);
+ case AF_INET6:
+ return scnprintf(buf, len, "ip:%pI6c",
+ &((struct sockaddr_in6 *)addr)->sin6_addr);
+ }
+ return scnprintf(buf, len, "<invalid address family>");
+}
+EXPORT_SYMBOL(sockaddr_to_str);
+
+/**
+ * rtrs_addr_to_sockaddr() - convert path string "src,dst" or "src@dst"
+ * to sockaddreses
+ * @str: string containing source and destination addr of a path
+ * separated by ',' or '@' I.e. "ip:1.1.1.1,ip:1.1.1.2" or
+ * "ip:1.1.1.1@ip:1.1.1.2". If str contains only one address it's
+ * considered to be destination.
+ * @len: string length
+ * @port: Destination port number.
+ * @addr: will be set to the source/destination address or to NULL
+ * if str doesn't contain any source address.
+ *
+ * Returns zero if conversion successful. Non-zero otherwise.
+ */
+int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
+ struct rtrs_addr *addr)
+{
+ const char *d;
+
+ d = strchr(str, ',');
+ if (!d)
+ d = strchr(str, '@');
+ if (d) {
+ if (rtrs_str_to_sockaddr(str, d - str, 0, addr->src))
+ return -EINVAL;
+ d += 1;
+ len -= d - str;
+ str = d;
+
+ } else {
+ addr->src = NULL;
+ }
+ return rtrs_str_to_sockaddr(str, len, port, addr->dst);
+}
+EXPORT_SYMBOL(rtrs_addr_to_sockaddr);
+
+void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
+ struct rtrs_rdma_dev_pd *pool)
+{
+ WARN_ON(pool->ops && (!pool->ops->alloc ^ !pool->ops->free));
+ INIT_LIST_HEAD(&pool->list);
+ mutex_init(&pool->mutex);
+ pool->pd_flags = pd_flags;
+}
+EXPORT_SYMBOL(rtrs_rdma_dev_pd_init);
+
+void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool)
+{
+ mutex_destroy(&pool->mutex);
+ WARN_ON(!list_empty(&pool->list));
+}
+EXPORT_SYMBOL(rtrs_rdma_dev_pd_deinit);
+
+static void dev_free(struct kref *ref)
+{
+ struct rtrs_rdma_dev_pd *pool;
+ struct rtrs_ib_dev *dev;
+
+ dev = container_of(ref, typeof(*dev), ref);
+ pool = dev->pool;
+
+ mutex_lock(&pool->mutex);
+ list_del(&dev->entry);
+ mutex_unlock(&pool->mutex);
+
+ if (pool->ops && pool->ops->deinit)
+ pool->ops->deinit(dev);
+
+ ib_dealloc_pd(dev->ib_pd);
+
+ if (pool->ops && pool->ops->free)
+ pool->ops->free(dev);
+ else
+ kfree(dev);
+}
+
+int rtrs_ib_dev_put(struct rtrs_ib_dev *dev)
+{
+ return kref_put(&dev->ref, dev_free);
+}
+EXPORT_SYMBOL(rtrs_ib_dev_put);
+
+static int rtrs_ib_dev_get(struct rtrs_ib_dev *dev)
+{
+ return kref_get_unless_zero(&dev->ref);
+}
+
+struct rtrs_ib_dev *
+rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
+ struct rtrs_rdma_dev_pd *pool)
+{
+ struct rtrs_ib_dev *dev;
+
+ mutex_lock(&pool->mutex);
+ list_for_each_entry(dev, &pool->list, entry) {
+ if (dev->ib_dev->node_guid == ib_dev->node_guid &&
+ rtrs_ib_dev_get(dev))
+ goto out_unlock;
+ }
+ mutex_unlock(&pool->mutex);
+ if (pool->ops && pool->ops->alloc)
+ dev = pool->ops->alloc();
+ else
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(dev))
+ goto out_err;
+
+ kref_init(&dev->ref);
+ dev->pool = pool;
+ dev->ib_dev = ib_dev;
+ dev->ib_pd = ib_alloc_pd(ib_dev, pool->pd_flags);
+ if (IS_ERR(dev->ib_pd))
+ goto out_free_dev;
+
+ if (pool->ops && pool->ops->init && pool->ops->init(dev))
+ goto out_free_pd;
+
+ mutex_lock(&pool->mutex);
+ list_add(&dev->entry, &pool->list);
+out_unlock:
+ mutex_unlock(&pool->mutex);
+ return dev;
+
+out_free_pd:
+ ib_dealloc_pd(dev->ib_pd);
+out_free_dev:
+ if (pool->ops && pool->ops->free)
+ pool->ops->free(dev);
+ else
+ kfree(dev);
+out_err:
+ return NULL;
+}
+EXPORT_SYMBOL(rtrs_ib_dev_find_or_add);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
new file mode 100644
index 000000000000..9af750f4d783
--- /dev/null
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * RDMA Transport Layer
+ *
+ * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
+ * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
+ * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
+ */
+#ifndef RTRS_H
+#define RTRS_H
+
+#include <linux/socket.h>
+#include <linux/scatterlist.h>
+
+struct rtrs_permit;
+struct rtrs_clt;
+struct rtrs_srv_ctx;
+struct rtrs_srv;
+struct rtrs_srv_op;
+
+/*
+ * RDMA transport (RTRS) client API
+ */
+
+/**
+ * enum rtrs_clt_link_ev - Events about connectivity state of a client
+ * @RTRS_CLT_LINK_EV_RECONNECTED Client was reconnected.
+ * @RTRS_CLT_LINK_EV_DISCONNECTED Client was disconnected.
+ */
+enum rtrs_clt_link_ev {
+ RTRS_CLT_LINK_EV_RECONNECTED,
+ RTRS_CLT_LINK_EV_DISCONNECTED,
+};
+
+/**
+ * Source and destination address of a path to be established
+ */
+struct rtrs_addr {
+ struct sockaddr_storage *src;
+ struct sockaddr_storage *dst;
+};
+
+/**
+ * rtrs_clt_ops - it holds the link event callback and private pointer.
+ * @priv: User supplied private data.
+ * @link_ev: Event notification callback function for connection state changes
+ * @priv: User supplied data that was passed to rtrs_clt_open()
+ * @ev: Occurred event
+ */
+struct rtrs_clt_ops {
+ void *priv;
+ void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev);
+};
+
+struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *sessname,
+ const struct rtrs_addr *paths,
+ size_t path_cnt, u16 port,
+ size_t pdu_sz, u8 reconnect_delay_sec,
+ u16 max_segments,
+ size_t max_segment_size,
+ s16 max_reconnect_attempts);
+
+void rtrs_clt_close(struct rtrs_clt *sess);
+
+/**
+ * rtrs_permit_to_pdu() - converts rtrs_permit to opaque pdu pointer
+ * @permit: RTRS permit pointer, it associates the memory allocation for future
+ * RDMA operation.
+ */
+void *rtrs_permit_to_pdu(struct rtrs_permit *permit);
+
+enum {
+ RTRS_PERMIT_NOWAIT = 0,
+ RTRS_PERMIT_WAIT = 1,
+};
+
+/**
+ * enum rtrs_clt_con_type() type of ib connection to use with a given
+ * rtrs_permit
+ * @ADMIN_CON - use connection reserved for "service" messages
+ * @IO_CON - use a connection reserved for IO
+ */
+enum rtrs_clt_con_type {
+ RTRS_ADMIN_CON,
+ RTRS_IO_CON
+};
+
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess,
+ enum rtrs_clt_con_type con_type,
+ int wait);
+
+void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit);
+
+/**
+ * rtrs_clt_req_ops - it holds the request confirmation callback
+ * and a private pointer.
+ * @priv: User supplied private data.
+ * @conf_fn: callback function to be called as confirmation
+ * @priv: User provided data, passed back with corresponding
+ * @(conf) confirmation.
+ * @errno: error number.
+ */
+struct rtrs_clt_req_ops {
+ void *priv;
+ void (*conf_fn)(void *priv, int errno);
+};
+
+int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
+ struct rtrs_clt *sess, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t len,
+ struct scatterlist *sg, unsigned int sg_cnt);
+
+/**
+ * rtrs_attrs - RTRS session attributes
+ */
+struct rtrs_attrs {
+ u32 queue_depth;
+ u32 max_io_size;
+ u8 sessname[NAME_MAX];
+ struct kobject *sess_kobj;
+};
+
+int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
+
+/*
+ * Here goes RTRS server API
+ */
+
+/**
+ * enum rtrs_srv_link_ev - Server link events
+ * @RTRS_SRV_LINK_EV_CONNECTED: Connection from client established
+ * @RTRS_SRV_LINK_EV_DISCONNECTED: Connection was disconnected, all
+ * connection RTRS resources were freed.
+ */
+enum rtrs_srv_link_ev {
+ RTRS_SRV_LINK_EV_CONNECTED,
+ RTRS_SRV_LINK_EV_DISCONNECTED,
+};
+
+struct rtrs_srv_ops {
+ /**
+ * rdma_ev(): Event notification for RDMA operations
+ * If the callback returns a value != 0, an error
+ * message for the data transfer will be sent to
+ * the client.
+
+ * @sess: Session
+ * @priv: Private data set by rtrs_srv_set_sess_priv()
+ * @id: internal RTRS operation id
+ * @dir: READ/WRITE
+ * @data: Pointer to (bidirectional) rdma memory area:
+ * - in case of %RTRS_SRV_RDMA_EV_RECV contains
+ * data sent by the client
+ * - in case of %RTRS_SRV_RDMA_EV_WRITE_REQ points
+ * to the memory area where the response is to be
+ * written to
+ * @datalen: Size of the memory area in @data
+ * @usr: The extra user message sent by the client (%vec)
+ * @usrlen: Size of the user message
+ */
+ int (*rdma_ev)(struct rtrs_srv *sess, void *priv,
+ struct rtrs_srv_op *id, int dir,
+ void *data, size_t datalen, const void *usr,
+ size_t usrlen);
+ /**
+ * link_ev(): Events about connectivity state changes
+ * If the callback returns != 0 and the event
+ * %RTRS_SRV_LINK_EV_CONNECTED the corresponding
+ * session will be destroyed.
+ * @sess: Session
+ * @ev: event
+ * @priv: Private data from user if previously set with
+ * rtrs_srv_set_sess_priv()
+ */
+ int (*link_ev)(struct rtrs_srv *sess, enum rtrs_srv_link_ev ev,
+ void *priv);
+};
+
+struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port);
+
+void rtrs_srv_close(struct rtrs_srv_ctx *ctx);
+
+bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno);
+
+void rtrs_srv_set_sess_priv(struct rtrs_srv *sess, void *priv);
+
+int rtrs_srv_get_sess_name(struct rtrs_srv *sess, char *sessname, size_t len);
+
+int rtrs_srv_get_queue_depth(struct rtrs_srv *sess);
+
+int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
+ struct rtrs_addr *addr);
+
+int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len);
+#endif
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index cd1181c39ed2..d8fcd21ab472 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -71,7 +71,6 @@ static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
-static bool prefer_fr = true;
static bool register_always = true;
static bool never_register;
static int topspin_workarounds = 1;
@@ -95,10 +94,6 @@ module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds,
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
-module_param(prefer_fr, bool, 0444);
-MODULE_PARM_DESC(prefer_fr,
-"Whether to use fast registration if both FMR and fast registration are supported");
-
module_param(register_always, bool, 0444);
MODULE_PARM_DESC(register_always,
"Use memory registration even for contiguous memory regions");
@@ -146,7 +141,7 @@ module_param(ch_count, uint, 0444);
MODULE_PARM_DESC(ch_count,
"Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
-static void srp_add_one(struct ib_device *device);
+static int srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_rename_dev(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
@@ -388,24 +383,6 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch)
srp_new_ib_cm_id(ch);
}
-static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
-{
- struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_fmr_pool_param fmr_param;
-
- memset(&fmr_param, 0, sizeof(fmr_param));
- fmr_param.pool_size = target->mr_pool_size;
- fmr_param.dirty_watermark = fmr_param.pool_size / 4;
- fmr_param.cache = 1;
- fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
- fmr_param.page_shift = ilog2(dev->mr_page_size);
- fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
-
- return ib_create_fmr_pool(dev->pd, &fmr_param);
-}
-
/**
* srp_destroy_fr_pool() - free the resources owned by a pool
* @pool: Fast registration pool to be destroyed.
@@ -556,7 +533,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
struct ib_qp_init_attr *init_attr;
struct ib_cq *recv_cq, *send_cq;
struct ib_qp *qp;
- struct ib_fmr_pool *fmr_pool = NULL;
struct srp_fr_pool *fr_pool = NULL;
const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
int ret;
@@ -619,14 +595,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
"FR pool allocation failed (%d)\n", ret);
goto err_qp;
}
- } else if (dev->use_fmr) {
- fmr_pool = srp_alloc_fmr_pool(target);
- if (IS_ERR(fmr_pool)) {
- ret = PTR_ERR(fmr_pool);
- shost_printk(KERN_WARNING, target->scsi_host, PFX
- "FMR pool allocation failed (%d)\n", ret);
- goto err_qp;
- }
}
if (ch->qp)
@@ -644,10 +612,6 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
ch->fr_pool = fr_pool;
- } else if (dev->use_fmr) {
- if (ch->fmr_pool)
- ib_destroy_fmr_pool(ch->fmr_pool);
- ch->fmr_pool = fmr_pool;
}
kfree(init_attr);
@@ -702,9 +666,6 @@ static void srp_free_ch_ib(struct srp_target_port *target,
if (dev->use_fast_reg) {
if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
- } else if (dev->use_fmr) {
- if (ch->fmr_pool)
- ib_destroy_fmr_pool(ch->fmr_pool);
}
srp_destroy_qp(ch);
@@ -1017,12 +978,8 @@ static void srp_free_req_data(struct srp_target_port *target,
for (i = 0; i < target->req_ring_size; ++i) {
req = &ch->req_ring[i];
- if (dev->use_fast_reg) {
+ if (dev->use_fast_reg)
kfree(req->fr_list);
- } else {
- kfree(req->fmr_list);
- kfree(req->map_page);
- }
if (req->indirect_dma_addr) {
ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
target->indirect_size,
@@ -1056,16 +1013,8 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
GFP_KERNEL);
if (!mr_list)
goto out;
- if (srp_dev->use_fast_reg) {
+ if (srp_dev->use_fast_reg)
req->fr_list = mr_list;
- } else {
- req->fmr_list = mr_list;
- req->map_page = kmalloc_array(srp_dev->max_pages_per_mr,
- sizeof(void *),
- GFP_KERNEL);
- if (!req->map_page)
- goto out;
- }
req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
if (!req->indirect_desc)
goto out;
@@ -1272,11 +1221,6 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
if (req->nmdesc)
srp_fr_pool_put(ch->fr_pool, req->fr_list,
req->nmdesc);
- } else if (dev->use_fmr) {
- struct ib_pool_fmr **pfmr;
-
- for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
- ib_fmr_pool_unmap(*pfmr);
}
ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
@@ -1472,50 +1416,6 @@ static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
state->ndesc++;
}
-static int srp_map_finish_fmr(struct srp_map_state *state,
- struct srp_rdma_ch *ch)
-{
- struct srp_target_port *target = ch->target;
- struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_pool_fmr *fmr;
- u64 io_addr = 0;
-
- if (state->fmr.next >= state->fmr.end) {
- shost_printk(KERN_ERR, ch->target->scsi_host,
- PFX "Out of MRs (mr_per_cmd = %d)\n",
- ch->target->mr_per_cmd);
- return -ENOMEM;
- }
-
- WARN_ON_ONCE(!dev->use_fmr);
-
- if (state->npages == 0)
- return 0;
-
- if (state->npages == 1 && target->global_rkey) {
- srp_map_desc(state, state->base_dma_addr, state->dma_len,
- target->global_rkey);
- goto reset_state;
- }
-
- fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
- state->npages, io_addr);
- if (IS_ERR(fmr))
- return PTR_ERR(fmr);
-
- *state->fmr.next++ = fmr;
- state->nmdesc++;
-
- srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
- state->dma_len, fmr->fmr->rkey);
-
-reset_state:
- state->npages = 0;
- state->dma_len = 0;
-
- return 0;
-}
-
static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
{
srp_handle_qp_err(cq, wc, "FAST REG");
@@ -1606,74 +1506,6 @@ static int srp_map_finish_fr(struct srp_map_state *state,
return n;
}
-static int srp_map_sg_entry(struct srp_map_state *state,
- struct srp_rdma_ch *ch,
- struct scatterlist *sg)
-{
- struct srp_target_port *target = ch->target;
- struct srp_device *dev = target->srp_host->srp_dev;
- dma_addr_t dma_addr = sg_dma_address(sg);
- unsigned int dma_len = sg_dma_len(sg);
- unsigned int len = 0;
- int ret;
-
- WARN_ON_ONCE(!dma_len);
-
- while (dma_len) {
- unsigned offset = dma_addr & ~dev->mr_page_mask;
-
- if (state->npages == dev->max_pages_per_mr ||
- (state->npages > 0 && offset != 0)) {
- ret = srp_map_finish_fmr(state, ch);
- if (ret)
- return ret;
- }
-
- len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
-
- if (!state->npages)
- state->base_dma_addr = dma_addr;
- state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
- state->dma_len += len;
- dma_addr += len;
- dma_len -= len;
- }
-
- /*
- * If the end of the MR is not on a page boundary then we need to
- * close it out and start a new one -- we can only merge at page
- * boundaries.
- */
- ret = 0;
- if ((dma_addr & ~dev->mr_page_mask) != 0)
- ret = srp_map_finish_fmr(state, ch);
- return ret;
-}
-
-static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
- struct srp_request *req, struct scatterlist *scat,
- int count)
-{
- struct scatterlist *sg;
- int i, ret;
-
- state->pages = req->map_page;
- state->fmr.next = req->fmr_list;
- state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
-
- for_each_sg(scat, sg, count, i) {
- ret = srp_map_sg_entry(state, ch, sg);
- if (ret)
- return ret;
- }
-
- ret = srp_map_finish_fmr(state, ch);
- if (ret)
- return ret;
-
- return 0;
-}
-
static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct srp_request *req, struct scatterlist *scat,
int count)
@@ -1733,7 +1565,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
struct srp_device *dev = target->srp_host->srp_dev;
struct srp_map_state state;
struct srp_direct_buf idb_desc;
- u64 idb_pages[1];
struct scatterlist idb_sg[1];
int ret;
@@ -1756,14 +1587,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
if (ret < 0)
return ret;
WARN_ON_ONCE(ret < 1);
- } else if (dev->use_fmr) {
- state.pages = idb_pages;
- state.pages[0] = (req->indirect_dma_addr &
- dev->mr_page_mask);
- state.npages = 1;
- ret = srp_map_finish_fmr(&state, ch);
- if (ret < 0)
- return ret;
} else {
return -EINVAL;
}
@@ -1787,9 +1610,6 @@ static void srp_check_mapping(struct srp_map_state *state,
if (dev->use_fast_reg)
for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
mr_len += (*pfr)->mr->length;
- else if (dev->use_fmr)
- for (i = 0; i < state->nmdesc; i++)
- mr_len += be32_to_cpu(req->indirect_desc[i].len);
if (desc_len != scsi_bufflen(req->scmnd) ||
mr_len > scsi_bufflen(req->scmnd))
pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
@@ -1904,8 +1724,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
state.desc = req->indirect_desc;
if (dev->use_fast_reg)
ret = srp_map_sg_fr(&state, ch, req, scat, count);
- else if (dev->use_fmr)
- ret = srp_map_sg_fmr(&state, ch, req, scat, count);
else
ret = srp_map_sg_dma(&state, ch, req, scat, count);
req->nmdesc = state.nmdesc;
@@ -3424,6 +3242,7 @@ enum {
SRP_OPT_IP_DEST = 1 << 16,
SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
+ SRP_OPT_CH_COUNT = 1 << 19,
};
static unsigned int srp_opt_mandatory[] = {
@@ -3457,6 +3276,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_IP_SRC, "src=%s" },
{ SRP_OPT_IP_DEST, "dest=%s" },
{ SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
+ { SRP_OPT_CH_COUNT, "ch_count=%u", },
{ SRP_OPT_ERR, NULL }
};
@@ -3758,6 +3578,14 @@ static int srp_parse_options(struct net *net, const char *buf,
target->max_it_iu_size = token;
break;
+ case SRP_OPT_CH_COUNT:
+ if (match_int(args, &token) || token < 1) {
+ pr_warn("bad channel count %s\n", p);
+ goto out;
+ }
+ target->ch_count = token;
+ break;
+
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
@@ -3864,13 +3692,13 @@ static ssize_t srp_create_target(struct device *dev,
goto out;
}
- if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
+ if (!srp_dev->has_fr && !target->allow_ext_sg &&
target->cmd_sg_cnt < target->sg_tablesize) {
pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
target->sg_tablesize = target->cmd_sg_cnt;
}
- if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
+ if (srp_dev->use_fast_reg) {
bool gaps_reg = (ibdev->attrs.device_cap_flags &
IB_DEVICE_SG_GAPS_REG);
@@ -3878,12 +3706,12 @@ static ssize_t srp_create_target(struct device *dev,
(ilog2(srp_dev->mr_page_size) - 9);
if (!gaps_reg) {
/*
- * FR and FMR can only map one HCA page per entry. If
- * the start address is not aligned on a HCA page
- * boundary two entries will be used for the head and
- * the tail although these two entries combined
- * contain at most one HCA page of data. Hence the "+
- * 1" in the calculation below.
+ * FR can only map one HCA page per entry. If the start
+ * address is not aligned on a HCA page boundary two
+ * entries will be used for the head and the tail
+ * although these two entries combined contain at most
+ * one HCA page of data. Hence the "+ 1" in the
+ * calculation below.
*
* The indirect data buffer descriptor is contiguous
* so the memory for that buffer will only be
@@ -3921,11 +3749,13 @@ static ssize_t srp_create_target(struct device *dev,
goto out;
ret = -ENOMEM;
- target->ch_count = max_t(unsigned, num_online_nodes(),
- min(ch_count ? :
- min(4 * num_online_nodes(),
- ibdev->num_comp_vectors),
- num_online_cpus()));
+ if (target->ch_count == 0)
+ target->ch_count =
+ max_t(unsigned int, num_online_nodes(),
+ min(ch_count ?:
+ min(4 * num_online_nodes(),
+ ibdev->num_comp_vectors),
+ num_online_cpus()));
target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
GFP_KERNEL);
if (!target->ch)
@@ -4132,7 +3962,7 @@ static void srp_rename_dev(struct ib_device *device, void *client_data)
}
}
-static void srp_add_one(struct ib_device *device)
+static int srp_add_one(struct ib_device *device)
{
struct srp_device *srp_dev;
struct ib_device_attr *attr = &device->attrs;
@@ -4144,7 +3974,7 @@ static void srp_add_one(struct ib_device *device)
srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
- return;
+ return -ENOMEM;
/*
* Use the smallest page size supported by the HCA, down to a
@@ -4162,23 +3992,15 @@ static void srp_add_one(struct ib_device *device)
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
max_pages_per_mr);
- srp_dev->has_fmr = (device->ops.alloc_fmr &&
- device->ops.dealloc_fmr &&
- device->ops.map_phys_fmr &&
- device->ops.unmap_fmr);
srp_dev->has_fr = (attr->device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
- if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
- dev_warn(&device->dev, "neither FMR nor FR is supported\n");
- } else if (!never_register &&
- attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
- srp_dev->use_fast_reg = (srp_dev->has_fr &&
- (!srp_dev->has_fmr || prefer_fr));
- srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
- }
+ if (!never_register && !srp_dev->has_fr)
+ dev_warn(&device->dev, "FR is not supported\n");
+ else if (!never_register &&
+ attr->max_mr_size >= 2 * srp_dev->mr_page_size)
+ srp_dev->use_fast_reg = srp_dev->has_fr;
- if (never_register || !register_always ||
- (!srp_dev->has_fmr && !srp_dev->has_fr))
+ if (never_register || !register_always || !srp_dev->has_fr)
flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
if (srp_dev->use_fast_reg) {
@@ -4197,8 +4019,12 @@ static void srp_add_one(struct ib_device *device)
srp_dev->dev = device;
srp_dev->pd = ib_alloc_pd(device, flags);
- if (IS_ERR(srp_dev->pd))
- goto free_dev;
+ if (IS_ERR(srp_dev->pd)) {
+ int ret = PTR_ERR(srp_dev->pd);
+
+ kfree(srp_dev);
+ return ret;
+ }
if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
@@ -4212,10 +4038,7 @@ static void srp_add_one(struct ib_device *device)
}
ib_set_client_data(device, &srp_client, srp_dev);
- return;
-
-free_dev:
- kfree(srp_dev);
+ return 0;
}
static void srp_remove_one(struct ib_device *device, void *client_data)
@@ -4225,8 +4048,6 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
struct srp_target_port *target;
srp_dev = client_data;
- if (!srp_dev)
- return;
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
device_unregister(&host->dev);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 6fabcc2faf1f..6818cac0a3b7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -44,7 +44,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
-#include <rdma/ib_fmr_pool.h>
#include <rdma/rdma_cm.h>
enum {
@@ -95,8 +94,7 @@ enum srp_iu_type {
/*
* @mr_page_mask: HCA memory registration page mask.
* @mr_page_size: HCA memory registration page size.
- * @mr_max_size: Maximum size in bytes of a single FMR / FR registration
- * request.
+ * @mr_max_size: Maximum size in bytes of a single FR registration request.
*/
struct srp_device {
struct list_head dev_list;
@@ -107,9 +105,7 @@ struct srp_device {
int mr_page_size;
int mr_max_size;
int max_pages_per_mr;
- bool has_fmr;
bool has_fr;
- bool use_fmr;
bool use_fast_reg;
};
@@ -127,11 +123,7 @@ struct srp_host {
struct srp_request {
struct scsi_cmnd *scmnd;
struct srp_iu *cmd;
- union {
- struct ib_pool_fmr **fmr_list;
- struct srp_fr_desc **fr_list;
- };
- u64 *map_page;
+ struct srp_fr_desc **fr_list;
struct srp_direct_buf *indirect_desc;
dma_addr_t indirect_dma_addr;
short nmdesc;
@@ -155,10 +147,7 @@ struct srp_rdma_ch {
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
struct ib_qp *qp;
- union {
- struct ib_fmr_pool *fmr_pool;
- struct srp_fr_pool *fr_pool;
- };
+ struct srp_fr_pool *fr_pool;
uint32_t max_it_iu_len;
uint32_t max_ti_iu_len;
u8 max_imm_sge;
@@ -319,20 +308,16 @@ struct srp_fr_pool {
* @pages: Array with DMA addresses of pages being considered for
* memory registration.
* @base_dma_addr: DMA address of the first page that has not yet been mapped.
- * @dma_len: Number of bytes that will be registered with the next
- * FMR or FR memory registration call.
+ * @dma_len: Number of bytes that will be registered with the next FR
+ * memory registration call.
* @total_len: Total number of bytes in the sg-list being mapped.
* @npages: Number of page addresses in the pages[] array.
- * @nmdesc: Number of FMR or FR memory descriptors used for mapping.
+ * @nmdesc: Number of FR memory descriptors used for mapping.
* @ndesc: Number of SRP buffer descriptors that have been filled in.
*/
struct srp_map_state {
union {
struct {
- struct ib_pool_fmr **next;
- struct ib_pool_fmr **end;
- } fmr;
- struct {
struct srp_fr_desc **next;
struct srp_fr_desc **end;
} fr;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 98552749d71c..ef7fcd3e8e15 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -81,7 +81,7 @@ MODULE_PARM_DESC(srpt_srq_size,
static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
{
- return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
+ return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
}
module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
0444);
@@ -135,14 +135,11 @@ static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
static void srpt_event_handler(struct ib_event_handler *handler,
struct ib_event *event)
{
- struct srpt_device *sdev;
+ struct srpt_device *sdev =
+ container_of(handler, struct srpt_device, event_handler);
struct srpt_port *sport;
u8 port_num;
- sdev = ib_get_client_data(event->device, &srpt_client);
- if (!sdev || sdev->device != event->device)
- return;
-
pr_debug("ASYNC event= %d on device= %s\n", event->event,
dev_name(&sdev->device->dev));
@@ -217,8 +214,9 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
*/
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
- pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
- event->event, ch, ch->sess_name, ch->state);
+ pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
+ event->event, ch, ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
switch (event->event) {
case IB_EVENT_COMM_EST:
@@ -610,6 +608,11 @@ static int srpt_refresh_port(struct srpt_port *sport)
dev_name(&sport->sdev->device->dev), sport->port,
PTR_ERR(sport->mad_agent));
sport->mad_agent = NULL;
+ memset(&port_modify, 0, sizeof(port_modify));
+ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ ib_modify_port(sport->sdev->device, sport->port, 0,
+ &port_modify);
+
}
}
@@ -633,9 +636,8 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
WARN_ON(sport->port != i);
- if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
- pr_err("disabling MAD processing failed.\n");
if (sport->mad_agent) {
+ ib_modify_port(sdev->device, i, 0, &port_modify);
ib_unregister_mad_agent(sport->mad_agent);
sport->mad_agent = NULL;
}
@@ -1814,18 +1816,13 @@ retry:
*/
qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
qp_init->cap.max_rdma_ctxs = sq_size / 2;
- qp_init->cap.max_send_sge = min(attrs->max_send_sge,
- SRPT_MAX_SG_PER_WQE);
- qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
- SRPT_MAX_SG_PER_WQE);
+ qp_init->cap.max_send_sge = attrs->max_send_sge;
+ qp_init->cap.max_recv_sge = 1;
qp_init->port_num = ch->sport->port;
- if (sdev->use_srq) {
+ if (sdev->use_srq)
qp_init->srq = sdev->srq;
- } else {
+ else
qp_init->cap.max_recv_wr = ch->rq_size;
- qp_init->cap.max_recv_sge = min(attrs->max_recv_sge,
- SRPT_MAX_SG_PER_WQE);
- }
if (ch->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
@@ -1984,8 +1981,8 @@ static void __srpt_close_all_ch(struct srpt_port *sport)
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
if (srpt_disconnect_ch(ch) >= 0)
- pr_info("Closing channel %s because target %s_%d has been disabled\n",
- ch->sess_name,
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
dev_name(&sport->sdev->device->dev),
sport->port);
srpt_close_ch(ch);
@@ -2496,7 +2493,8 @@ reject:
SRP_BUF_FORMAT_INDIRECT);
if (rdma_cm_id)
- rdma_reject(rdma_cm_id, rej, sizeof(*rej));
+ rdma_reject(rdma_cm_id, rej, sizeof(*rej),
+ IB_CM_REJ_CONSUMER_DEFINED);
else
ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
rej, sizeof(*rej));
@@ -3104,7 +3102,7 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
* srpt_add_one - InfiniBand device addition callback function
* @device: Describes a HCA.
*/
-static void srpt_add_one(struct ib_device *device)
+static int srpt_add_one(struct ib_device *device)
{
struct srpt_device *sdev;
struct srpt_port *sport;
@@ -3115,14 +3113,16 @@ static void srpt_add_one(struct ib_device *device)
sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
GFP_KERNEL);
if (!sdev)
- goto err;
+ return -ENOMEM;
sdev->device = device;
mutex_init(&sdev->sdev_mutex);
sdev->pd = ib_alloc_pd(device, 0);
- if (IS_ERR(sdev->pd))
+ if (IS_ERR(sdev->pd)) {
+ ret = PTR_ERR(sdev->pd);
goto free_dev;
+ }
sdev->lkey = sdev->pd->local_dma_lkey;
@@ -3138,6 +3138,7 @@ static void srpt_add_one(struct ib_device *device)
if (IS_ERR(sdev->cm_id)) {
pr_info("ib_create_cm_id() failed: %ld\n",
PTR_ERR(sdev->cm_id));
+ ret = PTR_ERR(sdev->cm_id);
sdev->cm_id = NULL;
if (!rdma_cm_id)
goto err_ring;
@@ -3182,7 +3183,8 @@ static void srpt_add_one(struct ib_device *device)
mutex_init(&sport->port_gid_id.mutex);
INIT_LIST_HEAD(&sport->port_gid_id.tpg_list);
- if (srpt_refresh_port(sport)) {
+ ret = srpt_refresh_port(sport);
+ if (ret) {
pr_err("MAD registration failed for %s-%d.\n",
dev_name(&sdev->device->dev), i);
goto err_event;
@@ -3193,10 +3195,9 @@ static void srpt_add_one(struct ib_device *device)
list_add_tail(&sdev->list, &srpt_dev_list);
spin_unlock(&srpt_dev_lock);
-out:
ib_set_client_data(device, &srpt_client, sdev);
pr_debug("added %s.\n", dev_name(&device->dev));
- return;
+ return 0;
err_event:
ib_unregister_event_handler(&sdev->event_handler);
@@ -3208,10 +3209,8 @@ err_ring:
ib_dealloc_pd(sdev->pd);
free_dev:
kfree(sdev);
-err:
- sdev = NULL;
pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
- goto out;
+ return ret;
}
/**
@@ -3224,12 +3223,6 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
struct srpt_device *sdev = client_data;
int i;
- if (!sdev) {
- pr_info("%s(%s): nothing to do.\n", __func__,
- dev_name(&device->dev));
- return;
- }
-
srpt_unregister_mad_agent(sdev);
ib_unregister_event_handler(&sdev->event_handler);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 2e1a69840857..f31c349d07a1 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -105,11 +105,6 @@ enum {
SRP_CMD_ACA = 0x4,
SRPT_DEF_SG_TABLESIZE = 128,
- /*
- * An experimentally determined value that avoids that QP creation
- * fails due to "swiotlb buffer is full" on systems using the swiotlb.
- */
- SRPT_MAX_SG_PER_WQE = 16,
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 0d57e51b8ba1..e494295d1c7b 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -282,7 +282,8 @@ static void evdev_pass_values(struct evdev_client *client,
spin_unlock(&client->buffer_lock);
if (wakeup)
- wake_up_interruptible(&evdev->wait);
+ wake_up_interruptible_poll(&evdev->wait,
+ EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM);
}
/*
@@ -429,7 +430,7 @@ static void evdev_hangup(struct evdev *evdev)
kill_fasync(&client->fasync, SIGIO, POLL_HUP);
spin_unlock(&evdev->client_lock);
- wake_up_interruptible(&evdev->wait);
+ wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR);
}
static int evdev_release(struct inode *inode, struct file *file)
@@ -945,7 +946,7 @@ static int evdev_revoke(struct evdev *evdev, struct evdev_client *client,
client->revoked = true;
evdev_ungrab(evdev, client);
input_flush_device(&evdev->handle, file);
- wake_up_interruptible(&evdev->wait);
+ wake_up_interruptible_poll(&evdev->wait, EPOLLHUP | EPOLLERR);
return 0;
}
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 940b744639c7..6f73f02059b5 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -45,6 +45,7 @@ config JOYSTICK_A3D
config JOYSTICK_ADI
tristate "Logitech ADI digital joysticks and gamepads"
select GAMEPORT
+ depends on ADI!=m # avoid module name conflict
help
Say Y here if you have a Logitech controller using the ADI
protocol over the PC gameport.
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 28de965a08d5..793ecbbda32c 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -701,7 +701,7 @@ config KEYBOARD_SPEAR
Say Y here if you want to use the SPEAR keyboard.
To compile this driver as a module, choose M here: the
- module will be called spear-keboard.
+ module will be called spear-keyboard.
config KEYBOARD_TC3589X
tristate "TC3589X Keypad support"
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 7e3eae54c192..6ec28265771d 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -24,6 +24,7 @@
#include <linux/libps2.h>
#include <linux/mutex.h>
#include <linux/dmi.h>
+#include <linux/property.h>
#define DRIVER_DESC "AT and PS/2 keyboard driver"
@@ -63,6 +64,11 @@ static bool atkbd_terminal;
module_param_named(terminal, atkbd_terminal, bool, 0);
MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
+#define MAX_FUNCTION_ROW_KEYS 24
+
+#define SCANCODE(keymap) ((keymap >> 16) & 0xFFFF)
+#define KEYCODE(keymap) (keymap & 0xFFFF)
+
/*
* Scancode to keycode tables. These are just the default setting, and
* are loadable via a userland utility.
@@ -230,6 +236,9 @@ struct atkbd {
/* Serializes reconnect(), attr->set() and event work */
struct mutex mutex;
+
+ u32 function_row_physmap[MAX_FUNCTION_ROW_KEYS];
+ int num_function_row_keys;
};
/*
@@ -283,6 +292,7 @@ static struct device_attribute atkbd_attr_##_name = \
__ATTR(_name, S_IRUGO, atkbd_do_show_##_name, NULL);
ATKBD_DEFINE_RO_ATTR(err_count);
+ATKBD_DEFINE_RO_ATTR(function_row_physmap);
static struct attribute *atkbd_attributes[] = {
&atkbd_attr_extra.attr,
@@ -292,11 +302,42 @@ static struct attribute *atkbd_attributes[] = {
&atkbd_attr_softrepeat.attr,
&atkbd_attr_softraw.attr,
&atkbd_attr_err_count.attr,
+ &atkbd_attr_function_row_physmap.attr,
NULL
};
+static ssize_t atkbd_show_function_row_physmap(struct atkbd *atkbd, char *buf)
+{
+ ssize_t size = 0;
+ int i;
+
+ if (!atkbd->num_function_row_keys)
+ return 0;
+
+ for (i = 0; i < atkbd->num_function_row_keys; i++)
+ size += scnprintf(buf + size, PAGE_SIZE - size, "%02X ",
+ atkbd->function_row_physmap[i]);
+ size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+ return size;
+}
+
+static umode_t atkbd_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct serio *serio = to_serio_port(dev);
+ struct atkbd *atkbd = serio_get_drvdata(serio);
+
+ if (attr == &atkbd_attr_function_row_physmap.attr &&
+ !atkbd->num_function_row_keys)
+ return 0;
+
+ return attr->mode;
+}
+
static struct attribute_group atkbd_attribute_group = {
.attrs = atkbd_attributes,
+ .is_visible = atkbd_attr_is_visible,
};
static const unsigned int xl_table[] = {
@@ -994,6 +1035,39 @@ static unsigned int atkbd_oqo_01plus_scancode_fixup(struct atkbd *atkbd,
return code;
}
+static int atkbd_get_keymap_from_fwnode(struct atkbd *atkbd)
+{
+ struct device *dev = &atkbd->ps2dev.serio->dev;
+ int i, n;
+ u32 *ptr;
+ u16 scancode, keycode;
+
+ /* Parse "linux,keymap" property */
+ n = device_property_count_u32(dev, "linux,keymap");
+ if (n <= 0 || n > ATKBD_KEYMAP_SIZE)
+ return -ENXIO;
+
+ ptr = kcalloc(n, sizeof(u32), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ if (device_property_read_u32_array(dev, "linux,keymap", ptr, n)) {
+ dev_err(dev, "problem parsing FW keymap property\n");
+ kfree(ptr);
+ return -EINVAL;
+ }
+
+ memset(atkbd->keycode, 0, sizeof(atkbd->keycode));
+ for (i = 0; i < n; i++) {
+ scancode = SCANCODE(ptr[i]);
+ keycode = KEYCODE(ptr[i]);
+ atkbd->keycode[scancode] = keycode;
+ }
+
+ kfree(ptr);
+ return 0;
+}
+
/*
* atkbd_set_keycode_table() initializes keyboard's keycode table
* according to the selected scancode set
@@ -1001,13 +1075,16 @@ static unsigned int atkbd_oqo_01plus_scancode_fixup(struct atkbd *atkbd,
static void atkbd_set_keycode_table(struct atkbd *atkbd)
{
+ struct device *dev = &atkbd->ps2dev.serio->dev;
unsigned int scancode;
int i, j;
memset(atkbd->keycode, 0, sizeof(atkbd->keycode));
bitmap_zero(atkbd->force_release_mask, ATKBD_KEYMAP_SIZE);
- if (atkbd->translated) {
+ if (!atkbd_get_keymap_from_fwnode(atkbd)) {
+ dev_dbg(dev, "Using FW keymap\n");
+ } else if (atkbd->translated) {
for (i = 0; i < 128; i++) {
scancode = atkbd_unxlate_table[i];
atkbd->keycode[i] = atkbd_set2_keycode[scancode];
@@ -1121,6 +1198,22 @@ static void atkbd_set_device_attrs(struct atkbd *atkbd)
}
}
+static void atkbd_parse_fwnode_data(struct serio *serio)
+{
+ struct atkbd *atkbd = serio_get_drvdata(serio);
+ struct device *dev = &serio->dev;
+ int n;
+
+ /* Parse "function-row-physmap" property */
+ n = device_property_count_u32(dev, "function-row-physmap");
+ if (n > 0 && n <= MAX_FUNCTION_ROW_KEYS &&
+ !device_property_read_u32_array(dev, "function-row-physmap",
+ atkbd->function_row_physmap, n)) {
+ atkbd->num_function_row_keys = n;
+ dev_dbg(dev, "FW reported %d function-row key locations\n", n);
+ }
+}
+
/*
* atkbd_connect() is called when the serio module finds an interface
* that isn't handled yet by an appropriate device driver. We check if
@@ -1184,6 +1277,8 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd->id = 0xab00;
}
+ atkbd_parse_fwnode_data(serio);
+
atkbd_set_keycode_table(atkbd);
atkbd_set_device_attrs(atkbd);
diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c
index 9f809aeb785c..d18839f1f4f6 100644
--- a/drivers/input/keyboard/imx_sc_key.c
+++ b/drivers/input/keyboard/imx_sc_key.c
@@ -99,6 +99,15 @@ static void imx_sc_check_for_events(struct work_struct *work)
msecs_to_jiffies(REPEAT_INTERVAL));
}
+static void imx_sc_key_action(void *data)
+{
+ struct imx_key_drv_data *priv = data;
+
+ imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON, false);
+ imx_scu_irq_unregister_notifier(&priv->key_notifier);
+ cancel_delayed_work_sync(&priv->check_work);
+}
+
static int imx_sc_key_probe(struct platform_device *pdev)
{
struct imx_key_drv_data *priv;
@@ -149,27 +158,16 @@ static int imx_sc_key_probe(struct platform_device *pdev)
return error;
}
+ error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, &priv);
+ if (error)
+ return error;
+
priv->key_notifier.notifier_call = imx_sc_key_notify;
error = imx_scu_irq_register_notifier(&priv->key_notifier);
- if (error) {
- imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON,
- false);
+ if (error)
dev_err(&pdev->dev, "failed to register scu notifier\n");
- return error;
- }
-
- return 0;
-}
-
-static int imx_sc_key_remove(struct platform_device *pdev)
-{
- struct imx_key_drv_data *priv = platform_get_drvdata(pdev);
-
- imx_scu_irq_group_enable(SC_IRQ_GROUP_WAKE, SC_IRQ_BUTTON, false);
- imx_scu_irq_unregister_notifier(&priv->key_notifier);
- cancel_delayed_work_sync(&priv->check_work);
- return 0;
+ return error;
}
static const struct of_device_id imx_sc_key_ids[] = {
@@ -184,7 +182,6 @@ static struct platform_driver imx_sc_key_driver = {
.of_match_table = imx_sc_key_ids,
},
.probe = imx_sc_key_probe,
- .remove = imx_sc_key_remove,
};
module_platform_driver(imx_sc_key_driver);
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 21758767ccf0..9b0f9665dcb0 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -374,5 +374,5 @@ static void __exit tca6416_keypad_exit(void)
module_exit(tca6416_keypad_exit);
MODULE_AUTHOR("Sriramakrishnan <srk@ti.com>");
-MODULE_DESCRIPTION("Keypad driver over tca6146 IO expander");
+MODULE_DESCRIPTION("Keypad driver over tca6416 IO expander");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7e2e658d551c..362e8a01980c 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -117,16 +117,6 @@ config INPUT_E3X0_BUTTON
To compile this driver as a module, choose M here: the
module will be called e3x0_button.
-config INPUT_MSM_VIBRATOR
- tristate "Qualcomm MSM vibrator driver"
- select INPUT_FF_MEMLESS
- help
- Support for the vibrator that is found on various Qualcomm MSM
- SOCs.
-
- To compile this driver as a module, choose M here: the module
- will be called msm_vibrator.
-
config INPUT_PCSPKR
tristate "PC Speaker support"
depends on PCSPKR_PLATFORM
@@ -265,17 +255,6 @@ config INPUT_APANEL
To compile this driver as a module, choose M here: the module will
be called apanel.
-config INPUT_GP2A
- tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
- depends on I2C
- depends on GPIOLIB || COMPILE_TEST
- help
- Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
- hooked to an I2C bus.
-
- To compile this driver as a module, choose M here: the
- module will be called gp2ap002a00f.
-
config INPUT_GPIO_BEEPER
tristate "Generic GPIO Beeper support"
depends on GPIOLIB || COMPILE_TEST
@@ -739,6 +718,17 @@ config INPUT_IMS_PCU
To compile this driver as a module, choose M here: the module will be
called ims_pcu.
+config INPUT_IQS269A
+ tristate "Azoteq IQS269A capacitive touch controller"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y to enable support for the Azoteq IQS269A capacitive
+ touch controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iqs269a.
+
config INPUT_CMA3000
tristate "VTI CMA3000 Tri-axis accelerometer"
help
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 8fd187f314bd..a48e5f2d859d 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -33,13 +33,13 @@ obj-$(CONFIG_INPUT_E3X0_BUTTON) += e3x0-button.o
obj-$(CONFIG_INPUT_DRV260X_HAPTICS) += drv260x.o
obj-$(CONFIG_INPUT_DRV2665_HAPTICS) += drv2665.o
obj-$(CONFIG_INPUT_DRV2667_HAPTICS) += drv2667.o
-obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o
obj-$(CONFIG_INPUT_GPIO_DECODER) += gpio_decoder.o
obj-$(CONFIG_INPUT_GPIO_VIBRA) += gpio-vibra.o
obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o
+obj-$(CONFIG_INPUT_IQS269A) += iqs269a.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
@@ -50,7 +50,6 @@ obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
obj-$(CONFIG_INPUT_MAX8997_HAPTIC) += max8997_haptic.o
obj-$(CONFIG_INPUT_MC13783_PWRBUTTON) += mc13783-pwrbutton.o
obj-$(CONFIG_INPUT_MMA8450) += mma8450.o
-obj-$(CONFIG_INPUT_MSM_VIBRATOR) += msm-vibrator.o
obj-$(CONFIG_INPUT_PALMAS_PWRBUTTON) += palmas-pwrbutton.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
deleted file mode 100644
index 90abda8eea67..000000000000
--- a/drivers/input/misc/gp2ap002a00f.c
+++ /dev/null
@@ -1,281 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Sony Ericsson Mobile Communications Inc.
- *
- * Author: Courtney Cavin <courtney.cavin@sonyericsson.com>
- * Prepared for up-stream by: Oskar Andero <oskar.andero@sonyericsson.com>
- */
-
-#include <linux/i2c.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/input.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <linux/input/gp2ap002a00f.h>
-
-struct gp2a_data {
- struct input_dev *input;
- const struct gp2a_platform_data *pdata;
- struct i2c_client *i2c_client;
-};
-
-enum gp2a_addr {
- GP2A_ADDR_PROX = 0x0,
- GP2A_ADDR_GAIN = 0x1,
- GP2A_ADDR_HYS = 0x2,
- GP2A_ADDR_CYCLE = 0x3,
- GP2A_ADDR_OPMOD = 0x4,
- GP2A_ADDR_CON = 0x6
-};
-
-enum gp2a_controls {
- /* Software Shutdown control: 0 = shutdown, 1 = normal operation */
- GP2A_CTRL_SSD = 0x01
-};
-
-static int gp2a_report(struct gp2a_data *dt)
-{
- int vo = gpio_get_value(dt->pdata->vout_gpio);
-
- input_report_switch(dt->input, SW_FRONT_PROXIMITY, !vo);
- input_sync(dt->input);
-
- return 0;
-}
-
-static irqreturn_t gp2a_irq(int irq, void *handle)
-{
- struct gp2a_data *dt = handle;
-
- gp2a_report(dt);
-
- return IRQ_HANDLED;
-}
-
-static int gp2a_enable(struct gp2a_data *dt)
-{
- return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
- GP2A_CTRL_SSD);
-}
-
-static int gp2a_disable(struct gp2a_data *dt)
-{
- return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
- 0x00);
-}
-
-static int gp2a_device_open(struct input_dev *dev)
-{
- struct gp2a_data *dt = input_get_drvdata(dev);
- int error;
-
- error = gp2a_enable(dt);
- if (error < 0) {
- dev_err(&dt->i2c_client->dev,
- "unable to activate, err %d\n", error);
- return error;
- }
-
- gp2a_report(dt);
-
- return 0;
-}
-
-static void gp2a_device_close(struct input_dev *dev)
-{
- struct gp2a_data *dt = input_get_drvdata(dev);
- int error;
-
- error = gp2a_disable(dt);
- if (error < 0)
- dev_err(&dt->i2c_client->dev,
- "unable to deactivate, err %d\n", error);
-}
-
-static int gp2a_initialize(struct gp2a_data *dt)
-{
- int error;
-
- error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_GAIN,
- 0x08);
- if (error < 0)
- return error;
-
- error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_HYS,
- 0xc2);
- if (error < 0)
- return error;
-
- error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_CYCLE,
- 0x04);
- if (error < 0)
- return error;
-
- error = gp2a_disable(dt);
-
- return error;
-}
-
-static int gp2a_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- const struct gp2a_platform_data *pdata = dev_get_platdata(&client->dev);
- struct gp2a_data *dt;
- int error;
-
- if (!pdata)
- return -EINVAL;
-
- if (pdata->hw_setup) {
- error = pdata->hw_setup(client);
- if (error < 0)
- return error;
- }
-
- error = gpio_request_one(pdata->vout_gpio, GPIOF_IN, GP2A_I2C_NAME);
- if (error)
- goto err_hw_shutdown;
-
- dt = kzalloc(sizeof(struct gp2a_data), GFP_KERNEL);
- if (!dt) {
- error = -ENOMEM;
- goto err_free_gpio;
- }
-
- dt->pdata = pdata;
- dt->i2c_client = client;
-
- error = gp2a_initialize(dt);
- if (error < 0)
- goto err_free_mem;
-
- dt->input = input_allocate_device();
- if (!dt->input) {
- error = -ENOMEM;
- goto err_free_mem;
- }
-
- input_set_drvdata(dt->input, dt);
-
- dt->input->open = gp2a_device_open;
- dt->input->close = gp2a_device_close;
- dt->input->name = GP2A_I2C_NAME;
- dt->input->id.bustype = BUS_I2C;
- dt->input->dev.parent = &client->dev;
-
- input_set_capability(dt->input, EV_SW, SW_FRONT_PROXIMITY);
-
- error = request_threaded_irq(client->irq, NULL, gp2a_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- GP2A_I2C_NAME, dt);
- if (error) {
- dev_err(&client->dev, "irq request failed\n");
- goto err_free_input_dev;
- }
-
- error = input_register_device(dt->input);
- if (error) {
- dev_err(&client->dev, "device registration failed\n");
- goto err_free_irq;
- }
-
- device_init_wakeup(&client->dev, pdata->wakeup);
- i2c_set_clientdata(client, dt);
-
- return 0;
-
-err_free_irq:
- free_irq(client->irq, dt);
-err_free_input_dev:
- input_free_device(dt->input);
-err_free_mem:
- kfree(dt);
-err_free_gpio:
- gpio_free(pdata->vout_gpio);
-err_hw_shutdown:
- if (pdata->hw_shutdown)
- pdata->hw_shutdown(client);
- return error;
-}
-
-static int gp2a_remove(struct i2c_client *client)
-{
- struct gp2a_data *dt = i2c_get_clientdata(client);
- const struct gp2a_platform_data *pdata = dt->pdata;
-
- free_irq(client->irq, dt);
-
- input_unregister_device(dt->input);
- kfree(dt);
-
- gpio_free(pdata->vout_gpio);
-
- if (pdata->hw_shutdown)
- pdata->hw_shutdown(client);
-
- return 0;
-}
-
-static int __maybe_unused gp2a_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct gp2a_data *dt = i2c_get_clientdata(client);
- int retval = 0;
-
- if (device_may_wakeup(&client->dev)) {
- enable_irq_wake(client->irq);
- } else {
- mutex_lock(&dt->input->mutex);
- if (dt->input->users)
- retval = gp2a_disable(dt);
- mutex_unlock(&dt->input->mutex);
- }
-
- return retval;
-}
-
-static int __maybe_unused gp2a_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct gp2a_data *dt = i2c_get_clientdata(client);
- int retval = 0;
-
- if (device_may_wakeup(&client->dev)) {
- disable_irq_wake(client->irq);
- } else {
- mutex_lock(&dt->input->mutex);
- if (dt->input->users)
- retval = gp2a_enable(dt);
- mutex_unlock(&dt->input->mutex);
- }
-
- return retval;
-}
-
-static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume);
-
-static const struct i2c_device_id gp2a_i2c_id[] = {
- { GP2A_I2C_NAME, 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, gp2a_i2c_id);
-
-static struct i2c_driver gp2a_i2c_driver = {
- .driver = {
- .name = GP2A_I2C_NAME,
- .pm = &gp2a_pm,
- },
- .probe = gp2a_probe,
- .remove = gp2a_remove,
- .id_table = gp2a_i2c_id,
-};
-
-module_i2c_driver(gp2a_i2c_driver);
-
-MODULE_AUTHOR("Courtney Cavin <courtney.cavin@sonyericsson.com>");
-MODULE_DESCRIPTION("Sharp GP2AP002A00F I2C Proximity/Opto sensor driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c
new file mode 100644
index 000000000000..6699eb160a0f
--- /dev/null
+++ b/drivers/input/misc/iqs269a.c
@@ -0,0 +1,1833 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS269A Capacitive Touch Controller
+ *
+ * Copyright (C) 2020 Jeff LaBundy <jeff@labundy.com>
+ *
+ * This driver registers up to 3 input devices: one representing capacitive or
+ * inductive keys as well as Hall-effect switches, and one for each of the two
+ * axial sliders presented by the device.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define IQS269_VER_INFO 0x00
+#define IQS269_VER_INFO_PROD_NUM 0x4F
+
+#define IQS269_SYS_FLAGS 0x02
+#define IQS269_SYS_FLAGS_SHOW_RESET BIT(15)
+#define IQS269_SYS_FLAGS_PWR_MODE_MASK GENMASK(12, 11)
+#define IQS269_SYS_FLAGS_PWR_MODE_SHIFT 11
+#define IQS269_SYS_FLAGS_IN_ATI BIT(10)
+
+#define IQS269_CHx_COUNTS 0x08
+
+#define IQS269_SLIDER_X 0x30
+
+#define IQS269_CAL_DATA_A 0x35
+#define IQS269_CAL_DATA_A_HALL_BIN_L_MASK GENMASK(15, 12)
+#define IQS269_CAL_DATA_A_HALL_BIN_L_SHIFT 12
+#define IQS269_CAL_DATA_A_HALL_BIN_R_MASK GENMASK(11, 8)
+#define IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT 8
+
+#define IQS269_SYS_SETTINGS 0x80
+#define IQS269_SYS_SETTINGS_CLK_DIV BIT(15)
+#define IQS269_SYS_SETTINGS_ULP_AUTO BIT(14)
+#define IQS269_SYS_SETTINGS_DIS_AUTO BIT(13)
+#define IQS269_SYS_SETTINGS_PWR_MODE_MASK GENMASK(12, 11)
+#define IQS269_SYS_SETTINGS_PWR_MODE_SHIFT 11
+#define IQS269_SYS_SETTINGS_PWR_MODE_MAX 3
+#define IQS269_SYS_SETTINGS_ULP_UPDATE_MASK GENMASK(10, 8)
+#define IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT 8
+#define IQS269_SYS_SETTINGS_ULP_UPDATE_MAX 7
+#define IQS269_SYS_SETTINGS_RESEED_OFFSET BIT(6)
+#define IQS269_SYS_SETTINGS_EVENT_MODE BIT(5)
+#define IQS269_SYS_SETTINGS_EVENT_MODE_LP BIT(4)
+#define IQS269_SYS_SETTINGS_REDO_ATI BIT(2)
+#define IQS269_SYS_SETTINGS_ACK_RESET BIT(0)
+
+#define IQS269_FILT_STR_LP_LTA_MASK GENMASK(7, 6)
+#define IQS269_FILT_STR_LP_LTA_SHIFT 6
+#define IQS269_FILT_STR_LP_CNT_MASK GENMASK(5, 4)
+#define IQS269_FILT_STR_LP_CNT_SHIFT 4
+#define IQS269_FILT_STR_NP_LTA_MASK GENMASK(3, 2)
+#define IQS269_FILT_STR_NP_LTA_SHIFT 2
+#define IQS269_FILT_STR_NP_CNT_MASK GENMASK(1, 0)
+#define IQS269_FILT_STR_MAX 3
+
+#define IQS269_EVENT_MASK_SYS BIT(6)
+#define IQS269_EVENT_MASK_DEEP BIT(2)
+#define IQS269_EVENT_MASK_TOUCH BIT(1)
+#define IQS269_EVENT_MASK_PROX BIT(0)
+
+#define IQS269_RATE_NP_MS_MAX 255
+#define IQS269_RATE_LP_MS_MAX 255
+#define IQS269_RATE_ULP_MS_MAX 4080
+#define IQS269_TIMEOUT_PWR_MS_MAX 130560
+#define IQS269_TIMEOUT_LTA_MS_MAX 130560
+
+#define IQS269_MISC_A_ATI_BAND_DISABLE BIT(15)
+#define IQS269_MISC_A_ATI_LP_ONLY BIT(14)
+#define IQS269_MISC_A_ATI_BAND_TIGHTEN BIT(13)
+#define IQS269_MISC_A_FILT_DISABLE BIT(12)
+#define IQS269_MISC_A_GPIO3_SELECT_MASK GENMASK(10, 8)
+#define IQS269_MISC_A_GPIO3_SELECT_SHIFT 8
+#define IQS269_MISC_A_DUAL_DIR BIT(6)
+#define IQS269_MISC_A_TX_FREQ_MASK GENMASK(5, 4)
+#define IQS269_MISC_A_TX_FREQ_SHIFT 4
+#define IQS269_MISC_A_TX_FREQ_MAX 3
+#define IQS269_MISC_A_GLOBAL_CAP_SIZE BIT(0)
+
+#define IQS269_MISC_B_RESEED_UI_SEL_MASK GENMASK(7, 6)
+#define IQS269_MISC_B_RESEED_UI_SEL_SHIFT 6
+#define IQS269_MISC_B_RESEED_UI_SEL_MAX 3
+#define IQS269_MISC_B_TRACKING_UI_ENABLE BIT(4)
+#define IQS269_MISC_B_FILT_STR_SLIDER GENMASK(1, 0)
+
+#define IQS269_CHx_SETTINGS 0x8C
+
+#define IQS269_CHx_ENG_A_MEAS_CAP_SIZE BIT(15)
+#define IQS269_CHx_ENG_A_RX_GND_INACTIVE BIT(13)
+#define IQS269_CHx_ENG_A_LOCAL_CAP_SIZE BIT(12)
+#define IQS269_CHx_ENG_A_ATI_MODE_MASK GENMASK(9, 8)
+#define IQS269_CHx_ENG_A_ATI_MODE_SHIFT 8
+#define IQS269_CHx_ENG_A_ATI_MODE_MAX 3
+#define IQS269_CHx_ENG_A_INV_LOGIC BIT(7)
+#define IQS269_CHx_ENG_A_PROJ_BIAS_MASK GENMASK(6, 5)
+#define IQS269_CHx_ENG_A_PROJ_BIAS_SHIFT 5
+#define IQS269_CHx_ENG_A_PROJ_BIAS_MAX 3
+#define IQS269_CHx_ENG_A_SENSE_MODE_MASK GENMASK(3, 0)
+#define IQS269_CHx_ENG_A_SENSE_MODE_MAX 15
+
+#define IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE BIT(13)
+#define IQS269_CHx_ENG_B_SENSE_FREQ_MASK GENMASK(10, 9)
+#define IQS269_CHx_ENG_B_SENSE_FREQ_SHIFT 9
+#define IQS269_CHx_ENG_B_SENSE_FREQ_MAX 3
+#define IQS269_CHx_ENG_B_STATIC_ENABLE BIT(8)
+#define IQS269_CHx_ENG_B_ATI_BASE_MASK GENMASK(7, 6)
+#define IQS269_CHx_ENG_B_ATI_BASE_75 0x00
+#define IQS269_CHx_ENG_B_ATI_BASE_100 0x40
+#define IQS269_CHx_ENG_B_ATI_BASE_150 0x80
+#define IQS269_CHx_ENG_B_ATI_BASE_200 0xC0
+#define IQS269_CHx_ENG_B_ATI_TARGET_MASK GENMASK(5, 0)
+#define IQS269_CHx_ENG_B_ATI_TARGET_MAX 2016
+
+#define IQS269_CHx_WEIGHT_MAX 255
+#define IQS269_CHx_THRESH_MAX 255
+#define IQS269_CHx_HYST_DEEP_MASK GENMASK(7, 4)
+#define IQS269_CHx_HYST_DEEP_SHIFT 4
+#define IQS269_CHx_HYST_TOUCH_MASK GENMASK(3, 0)
+#define IQS269_CHx_HYST_MAX 15
+
+#define IQS269_CHx_HALL_INACTIVE 6
+#define IQS269_CHx_HALL_ACTIVE 7
+
+#define IQS269_HALL_PAD_R BIT(0)
+#define IQS269_HALL_PAD_L BIT(1)
+#define IQS269_HALL_PAD_INV BIT(6)
+
+#define IQS269_HALL_UI 0xF5
+#define IQS269_HALL_UI_ENABLE BIT(15)
+
+#define IQS269_MAX_REG 0xFF
+
+#define IQS269_NUM_CH 8
+#define IQS269_NUM_SL 2
+
+#define IQS269_ATI_POLL_SLEEP_US (iqs269->delay_mult * 10000)
+#define IQS269_ATI_POLL_TIMEOUT_US (iqs269->delay_mult * 500000)
+#define IQS269_ATI_STABLE_DELAY_MS (iqs269->delay_mult * 150)
+
+#define IQS269_PWR_MODE_POLL_SLEEP_US IQS269_ATI_POLL_SLEEP_US
+#define IQS269_PWR_MODE_POLL_TIMEOUT_US IQS269_ATI_POLL_TIMEOUT_US
+
+#define iqs269_irq_wait() usleep_range(100, 150)
+
+enum iqs269_local_cap_size {
+ IQS269_LOCAL_CAP_SIZE_0,
+ IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY,
+ IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5,
+};
+
+enum iqs269_st_offs {
+ IQS269_ST_OFFS_PROX,
+ IQS269_ST_OFFS_DIR,
+ IQS269_ST_OFFS_TOUCH,
+ IQS269_ST_OFFS_DEEP,
+};
+
+enum iqs269_th_offs {
+ IQS269_TH_OFFS_PROX,
+ IQS269_TH_OFFS_TOUCH,
+ IQS269_TH_OFFS_DEEP,
+};
+
+enum iqs269_event_id {
+ IQS269_EVENT_PROX_DN,
+ IQS269_EVENT_PROX_UP,
+ IQS269_EVENT_TOUCH_DN,
+ IQS269_EVENT_TOUCH_UP,
+ IQS269_EVENT_DEEP_DN,
+ IQS269_EVENT_DEEP_UP,
+};
+
+struct iqs269_switch_desc {
+ unsigned int code;
+ bool enabled;
+};
+
+struct iqs269_event_desc {
+ const char *name;
+ enum iqs269_st_offs st_offs;
+ enum iqs269_th_offs th_offs;
+ bool dir_up;
+ u8 mask;
+};
+
+static const struct iqs269_event_desc iqs269_events[] = {
+ [IQS269_EVENT_PROX_DN] = {
+ .name = "event-prox",
+ .st_offs = IQS269_ST_OFFS_PROX,
+ .th_offs = IQS269_TH_OFFS_PROX,
+ .mask = IQS269_EVENT_MASK_PROX,
+ },
+ [IQS269_EVENT_PROX_UP] = {
+ .name = "event-prox-alt",
+ .st_offs = IQS269_ST_OFFS_PROX,
+ .th_offs = IQS269_TH_OFFS_PROX,
+ .dir_up = true,
+ .mask = IQS269_EVENT_MASK_PROX,
+ },
+ [IQS269_EVENT_TOUCH_DN] = {
+ .name = "event-touch",
+ .st_offs = IQS269_ST_OFFS_TOUCH,
+ .th_offs = IQS269_TH_OFFS_TOUCH,
+ .mask = IQS269_EVENT_MASK_TOUCH,
+ },
+ [IQS269_EVENT_TOUCH_UP] = {
+ .name = "event-touch-alt",
+ .st_offs = IQS269_ST_OFFS_TOUCH,
+ .th_offs = IQS269_TH_OFFS_TOUCH,
+ .dir_up = true,
+ .mask = IQS269_EVENT_MASK_TOUCH,
+ },
+ [IQS269_EVENT_DEEP_DN] = {
+ .name = "event-deep",
+ .st_offs = IQS269_ST_OFFS_DEEP,
+ .th_offs = IQS269_TH_OFFS_DEEP,
+ .mask = IQS269_EVENT_MASK_DEEP,
+ },
+ [IQS269_EVENT_DEEP_UP] = {
+ .name = "event-deep-alt",
+ .st_offs = IQS269_ST_OFFS_DEEP,
+ .th_offs = IQS269_TH_OFFS_DEEP,
+ .dir_up = true,
+ .mask = IQS269_EVENT_MASK_DEEP,
+ },
+};
+
+struct iqs269_ver_info {
+ u8 prod_num;
+ u8 sw_num;
+ u8 hw_num;
+ u8 padding;
+} __packed;
+
+struct iqs269_sys_reg {
+ __be16 general;
+ u8 active;
+ u8 filter;
+ u8 reseed;
+ u8 event_mask;
+ u8 rate_np;
+ u8 rate_lp;
+ u8 rate_ulp;
+ u8 timeout_pwr;
+ u8 timeout_rdy;
+ u8 timeout_lta;
+ __be16 misc_a;
+ __be16 misc_b;
+ u8 blocking;
+ u8 padding;
+ u8 slider_select[IQS269_NUM_SL];
+ u8 timeout_tap;
+ u8 timeout_swipe;
+ u8 thresh_swipe;
+ u8 redo_ati;
+} __packed;
+
+struct iqs269_ch_reg {
+ u8 rx_enable;
+ u8 tx_enable;
+ __be16 engine_a;
+ __be16 engine_b;
+ __be16 ati_comp;
+ u8 thresh[3];
+ u8 hyst;
+ u8 assoc_select;
+ u8 assoc_weight;
+} __packed;
+
+struct iqs269_flags {
+ __be16 system;
+ u8 gesture;
+ u8 padding;
+ u8 states[4];
+} __packed;
+
+struct iqs269_private {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct mutex lock;
+ struct iqs269_switch_desc switches[ARRAY_SIZE(iqs269_events)];
+ struct iqs269_ch_reg ch_reg[IQS269_NUM_CH];
+ struct iqs269_sys_reg sys_reg;
+ struct input_dev *keypad;
+ struct input_dev *slider[IQS269_NUM_SL];
+ unsigned int keycode[ARRAY_SIZE(iqs269_events) * IQS269_NUM_CH];
+ unsigned int suspend_mode;
+ unsigned int delay_mult;
+ unsigned int ch_num;
+ bool hall_enable;
+ bool ati_current;
+};
+
+static int iqs269_ati_mode_set(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int mode)
+{
+ u16 engine_a;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ if (mode > IQS269_CHx_ENG_A_ATI_MODE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+
+ engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a);
+
+ engine_a &= ~IQS269_CHx_ENG_A_ATI_MODE_MASK;
+ engine_a |= (mode << IQS269_CHx_ENG_A_ATI_MODE_SHIFT);
+
+ iqs269->ch_reg[ch_num].engine_a = cpu_to_be16(engine_a);
+ iqs269->ati_current = false;
+
+ mutex_unlock(&iqs269->lock);
+
+ return 0;
+}
+
+static int iqs269_ati_mode_get(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int *mode)
+{
+ u16 engine_a;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+ engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a);
+ mutex_unlock(&iqs269->lock);
+
+ engine_a &= IQS269_CHx_ENG_A_ATI_MODE_MASK;
+ *mode = (engine_a >> IQS269_CHx_ENG_A_ATI_MODE_SHIFT);
+
+ return 0;
+}
+
+static int iqs269_ati_base_set(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int base)
+{
+ u16 engine_b;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ switch (base) {
+ case 75:
+ base = IQS269_CHx_ENG_B_ATI_BASE_75;
+ break;
+
+ case 100:
+ base = IQS269_CHx_ENG_B_ATI_BASE_100;
+ break;
+
+ case 150:
+ base = IQS269_CHx_ENG_B_ATI_BASE_150;
+ break;
+
+ case 200:
+ base = IQS269_CHx_ENG_B_ATI_BASE_200;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&iqs269->lock);
+
+ engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+
+ engine_b &= ~IQS269_CHx_ENG_B_ATI_BASE_MASK;
+ engine_b |= base;
+
+ iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b);
+ iqs269->ati_current = false;
+
+ mutex_unlock(&iqs269->lock);
+
+ return 0;
+}
+
+static int iqs269_ati_base_get(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int *base)
+{
+ u16 engine_b;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+ engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+ mutex_unlock(&iqs269->lock);
+
+ switch (engine_b & IQS269_CHx_ENG_B_ATI_BASE_MASK) {
+ case IQS269_CHx_ENG_B_ATI_BASE_75:
+ *base = 75;
+ return 0;
+
+ case IQS269_CHx_ENG_B_ATI_BASE_100:
+ *base = 100;
+ return 0;
+
+ case IQS269_CHx_ENG_B_ATI_BASE_150:
+ *base = 150;
+ return 0;
+
+ case IQS269_CHx_ENG_B_ATI_BASE_200:
+ *base = 200;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int iqs269_ati_target_set(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int target)
+{
+ u16 engine_b;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ if (target > IQS269_CHx_ENG_B_ATI_TARGET_MAX)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+
+ engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+
+ engine_b &= ~IQS269_CHx_ENG_B_ATI_TARGET_MASK;
+ engine_b |= target / 32;
+
+ iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b);
+ iqs269->ati_current = false;
+
+ mutex_unlock(&iqs269->lock);
+
+ return 0;
+}
+
+static int iqs269_ati_target_get(struct iqs269_private *iqs269,
+ unsigned int ch_num, unsigned int *target)
+{
+ u16 engine_b;
+
+ if (ch_num >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+ engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b);
+ mutex_unlock(&iqs269->lock);
+
+ *target = (engine_b & IQS269_CHx_ENG_B_ATI_TARGET_MASK) * 32;
+
+ return 0;
+}
+
+static int iqs269_parse_mask(const struct fwnode_handle *fwnode,
+ const char *propname, u8 *mask)
+{
+ unsigned int val[IQS269_NUM_CH];
+ int count, error, i;
+
+ count = fwnode_property_count_u32(fwnode, propname);
+ if (count < 0)
+ return 0;
+
+ if (count > IQS269_NUM_CH)
+ return -EINVAL;
+
+ error = fwnode_property_read_u32_array(fwnode, propname, val, count);
+ if (error)
+ return error;
+
+ *mask = 0;
+
+ for (i = 0; i < count; i++) {
+ if (val[i] >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ *mask |= BIT(val[i]);
+ }
+
+ return 0;
+}
+
+static int iqs269_parse_chan(struct iqs269_private *iqs269,
+ const struct fwnode_handle *ch_node)
+{
+ struct i2c_client *client = iqs269->client;
+ struct fwnode_handle *ev_node;
+ struct iqs269_ch_reg *ch_reg;
+ u16 engine_a, engine_b;
+ unsigned int reg, val;
+ int error, i;
+
+ error = fwnode_property_read_u32(ch_node, "reg", &reg);
+ if (error) {
+ dev_err(&client->dev, "Failed to read channel number: %d\n",
+ error);
+ return error;
+ } else if (reg >= IQS269_NUM_CH) {
+ dev_err(&client->dev, "Invalid channel number: %u\n", reg);
+ return -EINVAL;
+ }
+
+ iqs269->sys_reg.active |= BIT(reg);
+ if (!fwnode_property_present(ch_node, "azoteq,reseed-disable"))
+ iqs269->sys_reg.reseed |= BIT(reg);
+
+ if (fwnode_property_present(ch_node, "azoteq,blocking-enable"))
+ iqs269->sys_reg.blocking |= BIT(reg);
+
+ if (fwnode_property_present(ch_node, "azoteq,slider0-select"))
+ iqs269->sys_reg.slider_select[0] |= BIT(reg);
+
+ if (fwnode_property_present(ch_node, "azoteq,slider1-select"))
+ iqs269->sys_reg.slider_select[1] |= BIT(reg);
+
+ ch_reg = &iqs269->ch_reg[reg];
+
+ error = regmap_raw_read(iqs269->regmap,
+ IQS269_CHx_SETTINGS + reg * sizeof(*ch_reg) / 2,
+ ch_reg, sizeof(*ch_reg));
+ if (error)
+ return error;
+
+ error = iqs269_parse_mask(ch_node, "azoteq,rx-enable",
+ &ch_reg->rx_enable);
+ if (error) {
+ dev_err(&client->dev, "Invalid channel %u RX enable mask: %d\n",
+ reg, error);
+ return error;
+ }
+
+ error = iqs269_parse_mask(ch_node, "azoteq,tx-enable",
+ &ch_reg->tx_enable);
+ if (error) {
+ dev_err(&client->dev, "Invalid channel %u TX enable mask: %d\n",
+ reg, error);
+ return error;
+ }
+
+ engine_a = be16_to_cpu(ch_reg->engine_a);
+ engine_b = be16_to_cpu(ch_reg->engine_b);
+
+ engine_a |= IQS269_CHx_ENG_A_MEAS_CAP_SIZE;
+ if (fwnode_property_present(ch_node, "azoteq,meas-cap-decrease"))
+ engine_a &= ~IQS269_CHx_ENG_A_MEAS_CAP_SIZE;
+
+ engine_a |= IQS269_CHx_ENG_A_RX_GND_INACTIVE;
+ if (fwnode_property_present(ch_node, "azoteq,rx-float-inactive"))
+ engine_a &= ~IQS269_CHx_ENG_A_RX_GND_INACTIVE;
+
+ engine_a &= ~IQS269_CHx_ENG_A_LOCAL_CAP_SIZE;
+ engine_b &= ~IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE;
+ if (!fwnode_property_read_u32(ch_node, "azoteq,local-cap-size", &val)) {
+ switch (val) {
+ case IQS269_LOCAL_CAP_SIZE_0:
+ break;
+
+ case IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5:
+ engine_a |= IQS269_CHx_ENG_A_LOCAL_CAP_SIZE;
+
+ /* fall through */
+
+ case IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY:
+ engine_b |= IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE;
+ break;
+
+ default:
+ dev_err(&client->dev,
+ "Invalid channel %u local cap. size: %u\n", reg,
+ val);
+ return -EINVAL;
+ }
+ }
+
+ engine_a &= ~IQS269_CHx_ENG_A_INV_LOGIC;
+ if (fwnode_property_present(ch_node, "azoteq,invert-enable"))
+ engine_a |= IQS269_CHx_ENG_A_INV_LOGIC;
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,proj-bias", &val)) {
+ if (val > IQS269_CHx_ENG_A_PROJ_BIAS_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u bias current: %u\n", reg,
+ val);
+ return -EINVAL;
+ }
+
+ engine_a &= ~IQS269_CHx_ENG_A_PROJ_BIAS_MASK;
+ engine_a |= (val << IQS269_CHx_ENG_A_PROJ_BIAS_SHIFT);
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,sense-mode", &val)) {
+ if (val > IQS269_CHx_ENG_A_SENSE_MODE_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u sensing mode: %u\n", reg,
+ val);
+ return -EINVAL;
+ }
+
+ engine_a &= ~IQS269_CHx_ENG_A_SENSE_MODE_MASK;
+ engine_a |= val;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,sense-freq", &val)) {
+ if (val > IQS269_CHx_ENG_B_SENSE_FREQ_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u sensing frequency: %u\n",
+ reg, val);
+ return -EINVAL;
+ }
+
+ engine_b &= ~IQS269_CHx_ENG_B_SENSE_FREQ_MASK;
+ engine_b |= (val << IQS269_CHx_ENG_B_SENSE_FREQ_SHIFT);
+ }
+
+ engine_b &= ~IQS269_CHx_ENG_B_STATIC_ENABLE;
+ if (fwnode_property_present(ch_node, "azoteq,static-enable"))
+ engine_b |= IQS269_CHx_ENG_B_STATIC_ENABLE;
+
+ ch_reg->engine_a = cpu_to_be16(engine_a);
+ ch_reg->engine_b = cpu_to_be16(engine_b);
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,ati-mode", &val)) {
+ error = iqs269_ati_mode_set(iqs269, reg, val);
+ if (error) {
+ dev_err(&client->dev,
+ "Invalid channel %u ATI mode: %u\n", reg, val);
+ return error;
+ }
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,ati-base", &val)) {
+ error = iqs269_ati_base_set(iqs269, reg, val);
+ if (error) {
+ dev_err(&client->dev,
+ "Invalid channel %u ATI base: %u\n", reg, val);
+ return error;
+ }
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,ati-target", &val)) {
+ error = iqs269_ati_target_set(iqs269, reg, val);
+ if (error) {
+ dev_err(&client->dev,
+ "Invalid channel %u ATI target: %u\n", reg,
+ val);
+ return error;
+ }
+ }
+
+ error = iqs269_parse_mask(ch_node, "azoteq,assoc-select",
+ &ch_reg->assoc_select);
+ if (error) {
+ dev_err(&client->dev, "Invalid channel %u association: %d\n",
+ reg, error);
+ return error;
+ }
+
+ if (!fwnode_property_read_u32(ch_node, "azoteq,assoc-weight", &val)) {
+ if (val > IQS269_CHx_WEIGHT_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u associated weight: %u\n",
+ reg, val);
+ return -EINVAL;
+ }
+
+ ch_reg->assoc_weight = val;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
+ ev_node = fwnode_get_named_child_node(ch_node,
+ iqs269_events[i].name);
+ if (!ev_node)
+ continue;
+
+ if (!fwnode_property_read_u32(ev_node, "azoteq,thresh", &val)) {
+ if (val > IQS269_CHx_THRESH_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u threshold: %u\n",
+ reg, val);
+ return -EINVAL;
+ }
+
+ ch_reg->thresh[iqs269_events[i].th_offs] = val;
+ }
+
+ if (!fwnode_property_read_u32(ev_node, "azoteq,hyst", &val)) {
+ u8 *hyst = &ch_reg->hyst;
+
+ if (val > IQS269_CHx_HYST_MAX) {
+ dev_err(&client->dev,
+ "Invalid channel %u hysteresis: %u\n",
+ reg, val);
+ return -EINVAL;
+ }
+
+ if (i == IQS269_EVENT_DEEP_DN ||
+ i == IQS269_EVENT_DEEP_UP) {
+ *hyst &= ~IQS269_CHx_HYST_DEEP_MASK;
+ *hyst |= (val << IQS269_CHx_HYST_DEEP_SHIFT);
+ } else if (i == IQS269_EVENT_TOUCH_DN ||
+ i == IQS269_EVENT_TOUCH_UP) {
+ *hyst &= ~IQS269_CHx_HYST_TOUCH_MASK;
+ *hyst |= val;
+ }
+ }
+
+ if (fwnode_property_read_u32(ev_node, "linux,code", &val))
+ continue;
+
+ switch (reg) {
+ case IQS269_CHx_HALL_ACTIVE:
+ if (iqs269->hall_enable) {
+ iqs269->switches[i].code = val;
+ iqs269->switches[i].enabled = true;
+ }
+
+ /* fall through */
+
+ case IQS269_CHx_HALL_INACTIVE:
+ if (iqs269->hall_enable)
+ break;
+
+ /* fall through */
+
+ default:
+ iqs269->keycode[i * IQS269_NUM_CH + reg] = val;
+ }
+
+ iqs269->sys_reg.event_mask &= ~iqs269_events[i].mask;
+ }
+
+ return 0;
+}
+
+static int iqs269_parse_prop(struct iqs269_private *iqs269)
+{
+ struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg;
+ struct i2c_client *client = iqs269->client;
+ struct fwnode_handle *ch_node;
+ u16 general, misc_a, misc_b;
+ unsigned int val;
+ int error;
+
+ iqs269->hall_enable = device_property_present(&client->dev,
+ "azoteq,hall-enable");
+
+ if (!device_property_read_u32(&client->dev, "azoteq,suspend-mode",
+ &val)) {
+ if (val > IQS269_SYS_SETTINGS_PWR_MODE_MAX) {
+ dev_err(&client->dev, "Invalid suspend mode: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ iqs269->suspend_mode = val;
+ }
+
+ error = regmap_raw_read(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg,
+ sizeof(*sys_reg));
+ if (error)
+ return error;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,filt-str-lp-lta",
+ &val)) {
+ if (val > IQS269_FILT_STR_MAX) {
+ dev_err(&client->dev, "Invalid filter strength: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->filter &= ~IQS269_FILT_STR_LP_LTA_MASK;
+ sys_reg->filter |= (val << IQS269_FILT_STR_LP_LTA_SHIFT);
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,filt-str-lp-cnt",
+ &val)) {
+ if (val > IQS269_FILT_STR_MAX) {
+ dev_err(&client->dev, "Invalid filter strength: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->filter &= ~IQS269_FILT_STR_LP_CNT_MASK;
+ sys_reg->filter |= (val << IQS269_FILT_STR_LP_CNT_SHIFT);
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,filt-str-np-lta",
+ &val)) {
+ if (val > IQS269_FILT_STR_MAX) {
+ dev_err(&client->dev, "Invalid filter strength: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->filter &= ~IQS269_FILT_STR_NP_LTA_MASK;
+ sys_reg->filter |= (val << IQS269_FILT_STR_NP_LTA_SHIFT);
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,filt-str-np-cnt",
+ &val)) {
+ if (val > IQS269_FILT_STR_MAX) {
+ dev_err(&client->dev, "Invalid filter strength: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ sys_reg->filter &= ~IQS269_FILT_STR_NP_CNT_MASK;
+ sys_reg->filter |= val;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,rate-np-ms",
+ &val)) {
+ if (val > IQS269_RATE_NP_MS_MAX) {
+ dev_err(&client->dev, "Invalid report rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->rate_np = val;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,rate-lp-ms",
+ &val)) {
+ if (val > IQS269_RATE_LP_MS_MAX) {
+ dev_err(&client->dev, "Invalid report rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->rate_lp = val;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,rate-ulp-ms",
+ &val)) {
+ if (val > IQS269_RATE_ULP_MS_MAX) {
+ dev_err(&client->dev, "Invalid report rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->rate_ulp = val / 16;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,timeout-pwr-ms",
+ &val)) {
+ if (val > IQS269_TIMEOUT_PWR_MS_MAX) {
+ dev_err(&client->dev, "Invalid timeout: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->timeout_pwr = val / 512;
+ }
+
+ if (!device_property_read_u32(&client->dev, "azoteq,timeout-lta-ms",
+ &val)) {
+ if (val > IQS269_TIMEOUT_LTA_MS_MAX) {
+ dev_err(&client->dev, "Invalid timeout: %u\n", val);
+ return -EINVAL;
+ }
+
+ sys_reg->timeout_lta = val / 512;
+ }
+
+ misc_a = be16_to_cpu(sys_reg->misc_a);
+ misc_b = be16_to_cpu(sys_reg->misc_b);
+
+ misc_a &= ~IQS269_MISC_A_ATI_BAND_DISABLE;
+ if (device_property_present(&client->dev, "azoteq,ati-band-disable"))
+ misc_a |= IQS269_MISC_A_ATI_BAND_DISABLE;
+
+ misc_a &= ~IQS269_MISC_A_ATI_LP_ONLY;
+ if (device_property_present(&client->dev, "azoteq,ati-lp-only"))
+ misc_a |= IQS269_MISC_A_ATI_LP_ONLY;
+
+ misc_a &= ~IQS269_MISC_A_ATI_BAND_TIGHTEN;
+ if (device_property_present(&client->dev, "azoteq,ati-band-tighten"))
+ misc_a |= IQS269_MISC_A_ATI_BAND_TIGHTEN;
+
+ misc_a &= ~IQS269_MISC_A_FILT_DISABLE;
+ if (device_property_present(&client->dev, "azoteq,filt-disable"))
+ misc_a |= IQS269_MISC_A_FILT_DISABLE;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,gpio3-select",
+ &val)) {
+ if (val >= IQS269_NUM_CH) {
+ dev_err(&client->dev, "Invalid GPIO3 selection: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ misc_a &= ~IQS269_MISC_A_GPIO3_SELECT_MASK;
+ misc_a |= (val << IQS269_MISC_A_GPIO3_SELECT_SHIFT);
+ }
+
+ misc_a &= ~IQS269_MISC_A_DUAL_DIR;
+ if (device_property_present(&client->dev, "azoteq,dual-direction"))
+ misc_a |= IQS269_MISC_A_DUAL_DIR;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,tx-freq", &val)) {
+ if (val > IQS269_MISC_A_TX_FREQ_MAX) {
+ dev_err(&client->dev,
+ "Invalid excitation frequency: %u\n", val);
+ return -EINVAL;
+ }
+
+ misc_a &= ~IQS269_MISC_A_TX_FREQ_MASK;
+ misc_a |= (val << IQS269_MISC_A_TX_FREQ_SHIFT);
+ }
+
+ misc_a &= ~IQS269_MISC_A_GLOBAL_CAP_SIZE;
+ if (device_property_present(&client->dev, "azoteq,global-cap-increase"))
+ misc_a |= IQS269_MISC_A_GLOBAL_CAP_SIZE;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,reseed-select",
+ &val)) {
+ if (val > IQS269_MISC_B_RESEED_UI_SEL_MAX) {
+ dev_err(&client->dev, "Invalid reseed selection: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ misc_b &= ~IQS269_MISC_B_RESEED_UI_SEL_MASK;
+ misc_b |= (val << IQS269_MISC_B_RESEED_UI_SEL_SHIFT);
+ }
+
+ misc_b &= ~IQS269_MISC_B_TRACKING_UI_ENABLE;
+ if (device_property_present(&client->dev, "azoteq,tracking-enable"))
+ misc_b |= IQS269_MISC_B_TRACKING_UI_ENABLE;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,filt-str-slider",
+ &val)) {
+ if (val > IQS269_FILT_STR_MAX) {
+ dev_err(&client->dev, "Invalid filter strength: %u\n",
+ val);
+ return -EINVAL;
+ }
+
+ misc_b &= ~IQS269_MISC_B_FILT_STR_SLIDER;
+ misc_b |= val;
+ }
+
+ sys_reg->misc_a = cpu_to_be16(misc_a);
+ sys_reg->misc_b = cpu_to_be16(misc_b);
+
+ sys_reg->active = 0;
+ sys_reg->reseed = 0;
+
+ sys_reg->blocking = 0;
+
+ sys_reg->slider_select[0] = 0;
+ sys_reg->slider_select[1] = 0;
+
+ sys_reg->event_mask = ~((u8)IQS269_EVENT_MASK_SYS);
+
+ device_for_each_child_node(&client->dev, ch_node) {
+ error = iqs269_parse_chan(iqs269, ch_node);
+ if (error) {
+ fwnode_handle_put(ch_node);
+ return error;
+ }
+ }
+
+ /*
+ * Volunteer all active channels to participate in ATI when REDO-ATI is
+ * manually triggered.
+ */
+ sys_reg->redo_ati = sys_reg->active;
+
+ general = be16_to_cpu(sys_reg->general);
+
+ if (device_property_present(&client->dev, "azoteq,clk-div")) {
+ general |= IQS269_SYS_SETTINGS_CLK_DIV;
+ iqs269->delay_mult = 4;
+ } else {
+ general &= ~IQS269_SYS_SETTINGS_CLK_DIV;
+ iqs269->delay_mult = 1;
+ }
+
+ /*
+ * Configure the device to automatically switch between normal and low-
+ * power modes as a function of sensing activity. Ultra-low-power mode,
+ * if enabled, is reserved for suspend.
+ */
+ general &= ~IQS269_SYS_SETTINGS_ULP_AUTO;
+ general &= ~IQS269_SYS_SETTINGS_DIS_AUTO;
+ general &= ~IQS269_SYS_SETTINGS_PWR_MODE_MASK;
+
+ if (!device_property_read_u32(&client->dev, "azoteq,ulp-update",
+ &val)) {
+ if (val > IQS269_SYS_SETTINGS_ULP_UPDATE_MAX) {
+ dev_err(&client->dev, "Invalid update rate: %u\n", val);
+ return -EINVAL;
+ }
+
+ general &= ~IQS269_SYS_SETTINGS_ULP_UPDATE_MASK;
+ general |= (val << IQS269_SYS_SETTINGS_ULP_UPDATE_SHIFT);
+ }
+
+ general &= ~IQS269_SYS_SETTINGS_RESEED_OFFSET;
+ if (device_property_present(&client->dev, "azoteq,reseed-offset"))
+ general |= IQS269_SYS_SETTINGS_RESEED_OFFSET;
+
+ general |= IQS269_SYS_SETTINGS_EVENT_MODE;
+
+ /*
+ * As per the datasheet, enable streaming during normal-power mode if
+ * either slider is in use. In that case, the device returns to event
+ * mode during low-power mode.
+ */
+ if (sys_reg->slider_select[0] || sys_reg->slider_select[1])
+ general |= IQS269_SYS_SETTINGS_EVENT_MODE_LP;
+
+ general |= IQS269_SYS_SETTINGS_REDO_ATI;
+ general |= IQS269_SYS_SETTINGS_ACK_RESET;
+
+ sys_reg->general = cpu_to_be16(general);
+
+ return 0;
+}
+
+static int iqs269_dev_init(struct iqs269_private *iqs269)
+{
+ struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg;
+ struct iqs269_ch_reg *ch_reg;
+ unsigned int val;
+ int error, i;
+
+ mutex_lock(&iqs269->lock);
+
+ error = regmap_update_bits(iqs269->regmap, IQS269_HALL_UI,
+ IQS269_HALL_UI_ENABLE,
+ iqs269->hall_enable ? ~0 : 0);
+ if (error)
+ goto err_mutex;
+
+ for (i = 0; i < IQS269_NUM_CH; i++) {
+ if (!(sys_reg->active & BIT(i)))
+ continue;
+
+ ch_reg = &iqs269->ch_reg[i];
+
+ error = regmap_raw_write(iqs269->regmap,
+ IQS269_CHx_SETTINGS + i *
+ sizeof(*ch_reg) / 2, ch_reg,
+ sizeof(*ch_reg));
+ if (error)
+ goto err_mutex;
+ }
+
+ /*
+ * The REDO-ATI and ATI channel selection fields must be written in the
+ * same block write, so every field between registers 0x80 through 0x8B
+ * (inclusive) must be written as well.
+ */
+ error = regmap_raw_write(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg,
+ sizeof(*sys_reg));
+ if (error)
+ goto err_mutex;
+
+ error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+ !(val & IQS269_SYS_FLAGS_IN_ATI),
+ IQS269_ATI_POLL_SLEEP_US,
+ IQS269_ATI_POLL_TIMEOUT_US);
+ if (error)
+ goto err_mutex;
+
+ msleep(IQS269_ATI_STABLE_DELAY_MS);
+ iqs269->ati_current = true;
+
+err_mutex:
+ mutex_unlock(&iqs269->lock);
+
+ return error;
+}
+
+static int iqs269_input_init(struct iqs269_private *iqs269)
+{
+ struct i2c_client *client = iqs269->client;
+ struct iqs269_flags flags;
+ unsigned int sw_code, keycode;
+ int error, i, j;
+ u8 dir_mask, state;
+
+ iqs269->keypad = devm_input_allocate_device(&client->dev);
+ if (!iqs269->keypad)
+ return -ENOMEM;
+
+ iqs269->keypad->keycodemax = ARRAY_SIZE(iqs269->keycode);
+ iqs269->keypad->keycode = iqs269->keycode;
+ iqs269->keypad->keycodesize = sizeof(*iqs269->keycode);
+
+ iqs269->keypad->name = "iqs269a_keypad";
+ iqs269->keypad->id.bustype = BUS_I2C;
+
+ if (iqs269->hall_enable) {
+ error = regmap_raw_read(iqs269->regmap, IQS269_SYS_FLAGS,
+ &flags, sizeof(flags));
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read initial status: %d\n", error);
+ return error;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
+ dir_mask = flags.states[IQS269_ST_OFFS_DIR];
+ if (!iqs269_events[i].dir_up)
+ dir_mask = ~dir_mask;
+
+ state = flags.states[iqs269_events[i].st_offs] & dir_mask;
+
+ sw_code = iqs269->switches[i].code;
+
+ for (j = 0; j < IQS269_NUM_CH; j++) {
+ keycode = iqs269->keycode[i * IQS269_NUM_CH + j];
+
+ /*
+ * Hall-effect sensing repurposes a pair of dedicated
+ * channels, only one of which reports events.
+ */
+ switch (j) {
+ case IQS269_CHx_HALL_ACTIVE:
+ if (iqs269->hall_enable &&
+ iqs269->switches[i].enabled) {
+ input_set_capability(iqs269->keypad,
+ EV_SW, sw_code);
+ input_report_switch(iqs269->keypad,
+ sw_code,
+ state & BIT(j));
+ }
+
+ /* fall through */
+
+ case IQS269_CHx_HALL_INACTIVE:
+ if (iqs269->hall_enable)
+ continue;
+
+ /* fall through */
+
+ default:
+ if (keycode != KEY_RESERVED)
+ input_set_capability(iqs269->keypad,
+ EV_KEY, keycode);
+ }
+ }
+ }
+
+ input_sync(iqs269->keypad);
+
+ error = input_register_device(iqs269->keypad);
+ if (error) {
+ dev_err(&client->dev, "Failed to register keypad: %d\n", error);
+ return error;
+ }
+
+ for (i = 0; i < IQS269_NUM_SL; i++) {
+ if (!iqs269->sys_reg.slider_select[i])
+ continue;
+
+ iqs269->slider[i] = devm_input_allocate_device(&client->dev);
+ if (!iqs269->slider[i])
+ return -ENOMEM;
+
+ iqs269->slider[i]->name = i ? "iqs269a_slider_1"
+ : "iqs269a_slider_0";
+ iqs269->slider[i]->id.bustype = BUS_I2C;
+
+ input_set_capability(iqs269->slider[i], EV_KEY, BTN_TOUCH);
+ input_set_abs_params(iqs269->slider[i], ABS_X, 0, 255, 0, 0);
+
+ error = input_register_device(iqs269->slider[i]);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register slider %d: %d\n", i, error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int iqs269_report(struct iqs269_private *iqs269)
+{
+ struct i2c_client *client = iqs269->client;
+ struct iqs269_flags flags;
+ unsigned int sw_code, keycode;
+ int error, i, j;
+ u8 slider_x[IQS269_NUM_SL];
+ u8 dir_mask, state;
+
+ error = regmap_raw_read(iqs269->regmap, IQS269_SYS_FLAGS, &flags,
+ sizeof(flags));
+ if (error) {
+ dev_err(&client->dev, "Failed to read device status: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * The device resets itself if its own watchdog bites, which can happen
+ * in the event of an I2C communication error. In this case, the device
+ * asserts a SHOW_RESET interrupt and all registers must be restored.
+ */
+ if (be16_to_cpu(flags.system) & IQS269_SYS_FLAGS_SHOW_RESET) {
+ dev_err(&client->dev, "Unexpected device reset\n");
+
+ error = iqs269_dev_init(iqs269);
+ if (error)
+ dev_err(&client->dev,
+ "Failed to re-initialize device: %d\n", error);
+
+ return error;
+ }
+
+ error = regmap_raw_read(iqs269->regmap, IQS269_SLIDER_X, slider_x,
+ sizeof(slider_x));
+ if (error) {
+ dev_err(&client->dev, "Failed to read slider position: %d\n",
+ error);
+ return error;
+ }
+
+ for (i = 0; i < IQS269_NUM_SL; i++) {
+ if (!iqs269->sys_reg.slider_select[i])
+ continue;
+
+ /*
+ * Report BTN_TOUCH if any channel that participates in the
+ * slider is in a state of touch.
+ */
+ if (flags.states[IQS269_ST_OFFS_TOUCH] &
+ iqs269->sys_reg.slider_select[i]) {
+ input_report_key(iqs269->slider[i], BTN_TOUCH, 1);
+ input_report_abs(iqs269->slider[i], ABS_X, slider_x[i]);
+ } else {
+ input_report_key(iqs269->slider[i], BTN_TOUCH, 0);
+ }
+
+ input_sync(iqs269->slider[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) {
+ dir_mask = flags.states[IQS269_ST_OFFS_DIR];
+ if (!iqs269_events[i].dir_up)
+ dir_mask = ~dir_mask;
+
+ state = flags.states[iqs269_events[i].st_offs] & dir_mask;
+
+ sw_code = iqs269->switches[i].code;
+
+ for (j = 0; j < IQS269_NUM_CH; j++) {
+ keycode = iqs269->keycode[i * IQS269_NUM_CH + j];
+
+ switch (j) {
+ case IQS269_CHx_HALL_ACTIVE:
+ if (iqs269->hall_enable &&
+ iqs269->switches[i].enabled)
+ input_report_switch(iqs269->keypad,
+ sw_code,
+ state & BIT(j));
+
+ /* fall through */
+
+ case IQS269_CHx_HALL_INACTIVE:
+ if (iqs269->hall_enable)
+ continue;
+
+ /* fall through */
+
+ default:
+ input_report_key(iqs269->keypad, keycode,
+ state & BIT(j));
+ }
+ }
+ }
+
+ input_sync(iqs269->keypad);
+
+ return 0;
+}
+
+static irqreturn_t iqs269_irq(int irq, void *context)
+{
+ struct iqs269_private *iqs269 = context;
+
+ if (iqs269_report(iqs269))
+ return IRQ_NONE;
+
+ /*
+ * The device does not deassert its interrupt (RDY) pin until shortly
+ * after receiving an I2C stop condition; the following delay ensures
+ * the interrupt handler does not return before this time.
+ */
+ iqs269_irq_wait();
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t counts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs269->client;
+ __le16 counts;
+ int error;
+
+ if (!iqs269->ati_current || iqs269->hall_enable)
+ return -EPERM;
+
+ /*
+ * Unsolicited I2C communication prompts the device to assert its RDY
+ * pin, so disable the interrupt line until the operation is finished
+ * and RDY has been deasserted.
+ */
+ disable_irq(client->irq);
+
+ error = regmap_raw_read(iqs269->regmap,
+ IQS269_CHx_COUNTS + iqs269->ch_num * 2,
+ &counts, sizeof(counts));
+
+ iqs269_irq_wait();
+ enable_irq(client->irq);
+
+ if (error)
+ return error;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", le16_to_cpu(counts));
+}
+
+static ssize_t hall_bin_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs269->client;
+ unsigned int val;
+ int error;
+
+ disable_irq(client->irq);
+
+ error = regmap_read(iqs269->regmap, IQS269_CAL_DATA_A, &val);
+
+ iqs269_irq_wait();
+ enable_irq(client->irq);
+
+ if (error)
+ return error;
+
+ switch (iqs269->ch_reg[IQS269_CHx_HALL_ACTIVE].rx_enable &
+ iqs269->ch_reg[IQS269_CHx_HALL_INACTIVE].rx_enable) {
+ case IQS269_HALL_PAD_R:
+ val &= IQS269_CAL_DATA_A_HALL_BIN_R_MASK;
+ val >>= IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT;
+ break;
+
+ case IQS269_HALL_PAD_L:
+ val &= IQS269_CAL_DATA_A_HALL_BIN_L_MASK;
+ val >>= IQS269_CAL_DATA_A_HALL_BIN_L_SHIFT;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t hall_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->hall_enable);
+}
+
+static ssize_t hall_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ mutex_lock(&iqs269->lock);
+
+ iqs269->hall_enable = val;
+ iqs269->ati_current = false;
+
+ mutex_unlock(&iqs269->lock);
+
+ return count;
+}
+
+static ssize_t ch_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ch_num);
+}
+
+static ssize_t ch_number_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ if (val >= IQS269_NUM_CH)
+ return -EINVAL;
+
+ iqs269->ch_num = val;
+
+ return count;
+}
+
+static ssize_t rx_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ iqs269->ch_reg[iqs269->ch_num].rx_enable);
+}
+
+static ssize_t rx_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ if (val > 0xFF)
+ return -EINVAL;
+
+ mutex_lock(&iqs269->lock);
+
+ iqs269->ch_reg[iqs269->ch_num].rx_enable = val;
+ iqs269->ati_current = false;
+
+ mutex_unlock(&iqs269->lock);
+
+ return count;
+}
+
+static ssize_t ati_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = iqs269_ati_mode_get(iqs269, iqs269->ch_num, &val);
+ if (error)
+ return error;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t ati_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ error = iqs269_ati_mode_set(iqs269, iqs269->ch_num, val);
+ if (error)
+ return error;
+
+ return count;
+}
+
+static ssize_t ati_base_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = iqs269_ati_base_get(iqs269, iqs269->ch_num, &val);
+ if (error)
+ return error;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t ati_base_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ error = iqs269_ati_base_set(iqs269, iqs269->ch_num, val);
+ if (error)
+ return error;
+
+ return count;
+}
+
+static ssize_t ati_target_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = iqs269_ati_target_get(iqs269, iqs269->ch_num, &val);
+ if (error)
+ return error;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t ati_target_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ error = iqs269_ati_target_set(iqs269, iqs269->ch_num, val);
+ if (error)
+ return error;
+
+ return count;
+}
+
+static ssize_t ati_trigger_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ati_current);
+}
+
+static ssize_t ati_trigger_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs269->client;
+ unsigned int val;
+ int error;
+
+ error = kstrtouint(buf, 10, &val);
+ if (error)
+ return error;
+
+ if (!val)
+ return count;
+
+ disable_irq(client->irq);
+
+ error = iqs269_dev_init(iqs269);
+
+ iqs269_irq_wait();
+ enable_irq(client->irq);
+
+ if (error)
+ return error;
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(counts);
+static DEVICE_ATTR_RO(hall_bin);
+static DEVICE_ATTR_RW(hall_enable);
+static DEVICE_ATTR_RW(ch_number);
+static DEVICE_ATTR_RW(rx_enable);
+static DEVICE_ATTR_RW(ati_mode);
+static DEVICE_ATTR_RW(ati_base);
+static DEVICE_ATTR_RW(ati_target);
+static DEVICE_ATTR_RW(ati_trigger);
+
+static struct attribute *iqs269_attrs[] = {
+ &dev_attr_counts.attr,
+ &dev_attr_hall_bin.attr,
+ &dev_attr_hall_enable.attr,
+ &dev_attr_ch_number.attr,
+ &dev_attr_rx_enable.attr,
+ &dev_attr_ati_mode.attr,
+ &dev_attr_ati_base.attr,
+ &dev_attr_ati_target.attr,
+ &dev_attr_ati_trigger.attr,
+ NULL,
+};
+
+static const struct attribute_group iqs269_attr_group = {
+ .attrs = iqs269_attrs,
+};
+
+static const struct regmap_config iqs269_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = IQS269_MAX_REG,
+};
+
+static int iqs269_probe(struct i2c_client *client)
+{
+ struct iqs269_ver_info ver_info;
+ struct iqs269_private *iqs269;
+ int error;
+
+ iqs269 = devm_kzalloc(&client->dev, sizeof(*iqs269), GFP_KERNEL);
+ if (!iqs269)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, iqs269);
+ iqs269->client = client;
+
+ iqs269->regmap = devm_regmap_init_i2c(client, &iqs269_regmap_config);
+ if (IS_ERR(iqs269->regmap)) {
+ error = PTR_ERR(iqs269->regmap);
+ dev_err(&client->dev, "Failed to initialize register map: %d\n",
+ error);
+ return error;
+ }
+
+ mutex_init(&iqs269->lock);
+
+ error = regmap_raw_read(iqs269->regmap, IQS269_VER_INFO, &ver_info,
+ sizeof(ver_info));
+ if (error)
+ return error;
+
+ if (ver_info.prod_num != IQS269_VER_INFO_PROD_NUM) {
+ dev_err(&client->dev, "Unrecognized product number: 0x%02X\n",
+ ver_info.prod_num);
+ return -EINVAL;
+ }
+
+ error = iqs269_parse_prop(iqs269);
+ if (error)
+ return error;
+
+ error = iqs269_dev_init(iqs269);
+ if (error) {
+ dev_err(&client->dev, "Failed to initialize device: %d\n",
+ error);
+ return error;
+ }
+
+ error = iqs269_input_init(iqs269);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, iqs269_irq, IRQF_ONESHOT,
+ client->name, iqs269);
+ if (error) {
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+ return error;
+ }
+
+ error = devm_device_add_group(&client->dev, &iqs269_attr_group);
+ if (error)
+ dev_err(&client->dev, "Failed to add attributes: %d\n", error);
+
+ return error;
+}
+
+static int __maybe_unused iqs269_suspend(struct device *dev)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs269->client;
+ unsigned int val;
+ int error;
+
+ if (!iqs269->suspend_mode)
+ return 0;
+
+ disable_irq(client->irq);
+
+ /*
+ * Automatic power mode switching must be disabled before the device is
+ * forced into any particular power mode. In this case, the device will
+ * transition into normal-power mode.
+ */
+ error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+ IQS269_SYS_SETTINGS_DIS_AUTO, ~0);
+ if (error)
+ goto err_irq;
+
+ /*
+ * The following check ensures the device has completed its transition
+ * into normal-power mode before a manual mode switch is performed.
+ */
+ error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+ !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK),
+ IQS269_PWR_MODE_POLL_SLEEP_US,
+ IQS269_PWR_MODE_POLL_TIMEOUT_US);
+ if (error)
+ goto err_irq;
+
+ error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+ IQS269_SYS_SETTINGS_PWR_MODE_MASK,
+ iqs269->suspend_mode <<
+ IQS269_SYS_SETTINGS_PWR_MODE_SHIFT);
+ if (error)
+ goto err_irq;
+
+ /*
+ * This last check ensures the device has completed its transition into
+ * the desired power mode to prevent any spurious interrupts from being
+ * triggered after iqs269_suspend has already returned.
+ */
+ error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+ (val & IQS269_SYS_FLAGS_PWR_MODE_MASK)
+ == (iqs269->suspend_mode <<
+ IQS269_SYS_FLAGS_PWR_MODE_SHIFT),
+ IQS269_PWR_MODE_POLL_SLEEP_US,
+ IQS269_PWR_MODE_POLL_TIMEOUT_US);
+
+err_irq:
+ iqs269_irq_wait();
+ enable_irq(client->irq);
+
+ return error;
+}
+
+static int __maybe_unused iqs269_resume(struct device *dev)
+{
+ struct iqs269_private *iqs269 = dev_get_drvdata(dev);
+ struct i2c_client *client = iqs269->client;
+ unsigned int val;
+ int error;
+
+ if (!iqs269->suspend_mode)
+ return 0;
+
+ disable_irq(client->irq);
+
+ error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+ IQS269_SYS_SETTINGS_PWR_MODE_MASK, 0);
+ if (error)
+ goto err_irq;
+
+ /*
+ * This check ensures the device has returned to normal-power mode
+ * before automatic power mode switching is re-enabled.
+ */
+ error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val,
+ !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK),
+ IQS269_PWR_MODE_POLL_SLEEP_US,
+ IQS269_PWR_MODE_POLL_TIMEOUT_US);
+ if (error)
+ goto err_irq;
+
+ error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS,
+ IQS269_SYS_SETTINGS_DIS_AUTO, 0);
+ if (error)
+ goto err_irq;
+
+ /*
+ * This step reports any events that may have been "swallowed" as a
+ * result of polling PWR_MODE (which automatically acknowledges any
+ * pending interrupts).
+ */
+ error = iqs269_report(iqs269);
+
+err_irq:
+ iqs269_irq_wait();
+ enable_irq(client->irq);
+
+ return error;
+}
+
+static SIMPLE_DEV_PM_OPS(iqs269_pm, iqs269_suspend, iqs269_resume);
+
+static const struct of_device_id iqs269_of_match[] = {
+ { .compatible = "azoteq,iqs269a" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, iqs269_of_match);
+
+static struct i2c_driver iqs269_i2c_driver = {
+ .driver = {
+ .name = "iqs269a",
+ .of_match_table = iqs269_of_match,
+ .pm = &iqs269_pm,
+ },
+ .probe_new = iqs269_probe,
+};
+module_i2c_driver(iqs269_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS269A Capacitive Touch Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/msm-vibrator.c b/drivers/input/misc/msm-vibrator.c
deleted file mode 100644
index b60f1aaee705..000000000000
--- a/drivers/input/misc/msm-vibrator.c
+++ /dev/null
@@ -1,281 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Qualcomm MSM vibrator driver
- *
- * Copyright (c) 2018 Brian Masney <masneyb@onstation.org>
- *
- * Based on qcom,pwm-vibrator.c from:
- * Copyright (c) 2018 Jonathan Marek <jonathan@marek.ca>
- *
- * Based on msm_pwm_vibrator.c from downstream Android sources:
- * Copyright (C) 2009-2014 LGE, Inc.
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/gpio/consumer.h>
-#include <linux/input.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-
-#define REG_CMD_RCGR 0x00
-#define REG_CFG_RCGR 0x04
-#define REG_M 0x08
-#define REG_N 0x0C
-#define REG_D 0x10
-#define REG_CBCR 0x24
-#define MMSS_CC_M_DEFAULT 1
-
-struct msm_vibrator {
- struct input_dev *input;
- struct mutex mutex;
- struct work_struct worker;
- void __iomem *base;
- struct regulator *vcc;
- struct clk *clk;
- struct gpio_desc *enable_gpio;
- u16 magnitude;
- bool enabled;
-};
-
-static void msm_vibrator_write(struct msm_vibrator *vibrator, int offset,
- u32 value)
-{
- writel(value, vibrator->base + offset);
-}
-
-static int msm_vibrator_start(struct msm_vibrator *vibrator)
-{
- int d_reg_val, ret = 0;
-
- mutex_lock(&vibrator->mutex);
-
- if (!vibrator->enabled) {
- ret = clk_set_rate(vibrator->clk, 24000);
- if (ret) {
- dev_err(&vibrator->input->dev,
- "Failed to set clock rate: %d\n", ret);
- goto unlock;
- }
-
- ret = clk_prepare_enable(vibrator->clk);
- if (ret) {
- dev_err(&vibrator->input->dev,
- "Failed to enable clock: %d\n", ret);
- goto unlock;
- }
-
- ret = regulator_enable(vibrator->vcc);
- if (ret) {
- dev_err(&vibrator->input->dev,
- "Failed to enable regulator: %d\n", ret);
- clk_disable(vibrator->clk);
- goto unlock;
- }
-
- gpiod_set_value_cansleep(vibrator->enable_gpio, 1);
-
- vibrator->enabled = true;
- }
-
- d_reg_val = 127 - ((126 * vibrator->magnitude) / 0xffff);
- msm_vibrator_write(vibrator, REG_CFG_RCGR,
- (2 << 12) | /* dual edge mode */
- (0 << 8) | /* cxo */
- (7 << 0));
- msm_vibrator_write(vibrator, REG_M, 1);
- msm_vibrator_write(vibrator, REG_N, 128);
- msm_vibrator_write(vibrator, REG_D, d_reg_val);
- msm_vibrator_write(vibrator, REG_CMD_RCGR, 1);
- msm_vibrator_write(vibrator, REG_CBCR, 1);
-
-unlock:
- mutex_unlock(&vibrator->mutex);
-
- return ret;
-}
-
-static void msm_vibrator_stop(struct msm_vibrator *vibrator)
-{
- mutex_lock(&vibrator->mutex);
-
- if (vibrator->enabled) {
- gpiod_set_value_cansleep(vibrator->enable_gpio, 0);
- regulator_disable(vibrator->vcc);
- clk_disable(vibrator->clk);
- vibrator->enabled = false;
- }
-
- mutex_unlock(&vibrator->mutex);
-}
-
-static void msm_vibrator_worker(struct work_struct *work)
-{
- struct msm_vibrator *vibrator = container_of(work,
- struct msm_vibrator,
- worker);
-
- if (vibrator->magnitude)
- msm_vibrator_start(vibrator);
- else
- msm_vibrator_stop(vibrator);
-}
-
-static int msm_vibrator_play_effect(struct input_dev *dev, void *data,
- struct ff_effect *effect)
-{
- struct msm_vibrator *vibrator = input_get_drvdata(dev);
-
- mutex_lock(&vibrator->mutex);
-
- if (effect->u.rumble.strong_magnitude > 0)
- vibrator->magnitude = effect->u.rumble.strong_magnitude;
- else
- vibrator->magnitude = effect->u.rumble.weak_magnitude;
-
- mutex_unlock(&vibrator->mutex);
-
- schedule_work(&vibrator->worker);
-
- return 0;
-}
-
-static void msm_vibrator_close(struct input_dev *input)
-{
- struct msm_vibrator *vibrator = input_get_drvdata(input);
-
- cancel_work_sync(&vibrator->worker);
- msm_vibrator_stop(vibrator);
-}
-
-static int msm_vibrator_probe(struct platform_device *pdev)
-{
- struct msm_vibrator *vibrator;
- struct resource *res;
- int ret;
-
- vibrator = devm_kzalloc(&pdev->dev, sizeof(*vibrator), GFP_KERNEL);
- if (!vibrator)
- return -ENOMEM;
-
- vibrator->input = devm_input_allocate_device(&pdev->dev);
- if (!vibrator->input)
- return -ENOMEM;
-
- vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
- if (IS_ERR(vibrator->vcc)) {
- if (PTR_ERR(vibrator->vcc) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get regulator: %ld\n",
- PTR_ERR(vibrator->vcc));
- return PTR_ERR(vibrator->vcc);
- }
-
- vibrator->enable_gpio = devm_gpiod_get(&pdev->dev, "enable",
- GPIOD_OUT_LOW);
- if (IS_ERR(vibrator->enable_gpio)) {
- if (PTR_ERR(vibrator->enable_gpio) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get enable gpio: %ld\n",
- PTR_ERR(vibrator->enable_gpio));
- return PTR_ERR(vibrator->enable_gpio);
- }
-
- vibrator->clk = devm_clk_get(&pdev->dev, "pwm");
- if (IS_ERR(vibrator->clk)) {
- if (PTR_ERR(vibrator->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to lookup pwm clock: %ld\n",
- PTR_ERR(vibrator->clk));
- return PTR_ERR(vibrator->clk);
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get platform resource\n");
- return -ENODEV;
- }
-
- vibrator->base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!vibrator->base) {
- dev_err(&pdev->dev, "Failed to iomap resource.\n");
- return -ENOMEM;
- }
-
- vibrator->enabled = false;
- mutex_init(&vibrator->mutex);
- INIT_WORK(&vibrator->worker, msm_vibrator_worker);
-
- vibrator->input->name = "msm-vibrator";
- vibrator->input->id.bustype = BUS_HOST;
- vibrator->input->close = msm_vibrator_close;
-
- input_set_drvdata(vibrator->input, vibrator);
- input_set_capability(vibrator->input, EV_FF, FF_RUMBLE);
-
- ret = input_ff_create_memless(vibrator->input, NULL,
- msm_vibrator_play_effect);
- if (ret) {
- dev_err(&pdev->dev, "Failed to create ff memless: %d", ret);
- return ret;
- }
-
- ret = input_register_device(vibrator->input);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register input device: %d", ret);
- return ret;
- }
-
- platform_set_drvdata(pdev, vibrator);
-
- return 0;
-}
-
-static int __maybe_unused msm_vibrator_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_vibrator *vibrator = platform_get_drvdata(pdev);
-
- cancel_work_sync(&vibrator->worker);
-
- if (vibrator->enabled)
- msm_vibrator_stop(vibrator);
-
- return 0;
-}
-
-static int __maybe_unused msm_vibrator_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct msm_vibrator *vibrator = platform_get_drvdata(pdev);
-
- if (vibrator->enabled)
- msm_vibrator_start(vibrator);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(msm_vibrator_pm_ops, msm_vibrator_suspend,
- msm_vibrator_resume);
-
-static const struct of_device_id msm_vibrator_of_match[] = {
- { .compatible = "qcom,msm8226-vibrator" },
- { .compatible = "qcom,msm8974-vibrator" },
- {},
-};
-MODULE_DEVICE_TABLE(of, msm_vibrator_of_match);
-
-static struct platform_driver msm_vibrator_driver = {
- .probe = msm_vibrator_probe,
- .driver = {
- .name = "msm-vibrator",
- .pm = &msm_vibrator_pm_ops,
- .of_match_table = of_match_ptr(msm_vibrator_of_match),
- },
-};
-module_platform_driver(msm_vibrator_driver);
-
-MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
-MODULE_DESCRIPTION("Qualcomm MSM vibrator driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 24bc5c5d876f..a1bba722b234 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -146,7 +146,7 @@ static void xenkbd_handle_mt_event(struct xenkbd_info *info,
break;
case XENKBD_MT_EV_UP:
- input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(info->mtouch);
break;
case XENKBD_MT_EV_SYN:
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 8719da540383..3f9354baac4b 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -938,7 +938,7 @@ static void elan_report_contact(struct elan_tp_data *data,
input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
} else {
input_mt_slot(input, contact_num);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(input);
}
}
diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h
deleted file mode 100644
index 391f94d9e47d..000000000000
--- a/drivers/input/serio/i8042-ppcio.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _I8042_PPCIO_H
-#define _I8042_PPCIO_H
-
-
-#if defined(CONFIG_WALNUT)
-
-#define I8042_KBD_IRQ 25
-#define I8042_AUX_IRQ 26
-
-#define I8042_KBD_PHYS_DESC "walnutps2/serio0"
-#define I8042_AUX_PHYS_DESC "walnutps2/serio1"
-#define I8042_MUX_PHYS_DESC "walnutps2/serio%d"
-
-extern void *kb_cs;
-extern void *kb_data;
-
-#define I8042_COMMAND_REG (*(int *)kb_cs)
-#define I8042_DATA_REG (*(int *)kb_data)
-
-static inline int i8042_read_data(void)
-{
- return readb(kb_data);
-}
-
-static inline int i8042_read_status(void)
-{
- return readb(kb_cs);
-}
-
-static inline void i8042_write_data(int val)
-{
- writeb(val, kb_data);
-}
-
-static inline void i8042_write_command(int val)
-{
- writeb(val, kb_cs);
-}
-
-static inline int i8042_platform_init(void)
-{
- i8042_reset = I8042_RESET_ALWAYS;
- return 0;
-}
-
-static inline void i8042_platform_exit(void)
-{
-}
-
-#else
-
-#include "i8042-io.h"
-
-#endif
-
-#endif /* _I8042_PPCIO_H */
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 7e048b557462..7b08ff8ddf35 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -945,6 +945,7 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
}
i8042_pnp_id_to_string(dev->id, i8042_kbd_firmware_id,
sizeof(i8042_kbd_firmware_id));
+ i8042_kbd_fwnode = dev_fwnode(&dev->dev);
/* Keyboard ports are always supposed to be wakeup-enabled */
device_set_wakeup_enable(&dev->dev, true);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 20ff2bed3917..0dddf273afd9 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -21,6 +21,7 @@
#include <linux/i8042.h>
#include <linux/slab.h>
#include <linux/suspend.h>
+#include <linux/property.h>
#include <asm/io.h>
@@ -124,6 +125,7 @@ MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive da
static bool i8042_bypass_aux_irq_test;
static char i8042_kbd_firmware_id[128];
static char i8042_aux_firmware_id[128];
+static struct fwnode_handle *i8042_kbd_fwnode;
#include "i8042.h"
@@ -1335,6 +1337,7 @@ static int __init i8042_create_kbd_port(void)
strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
strlcpy(serio->firmware_id, i8042_kbd_firmware_id,
sizeof(serio->firmware_id));
+ set_primary_fwnode(&serio->dev, i8042_kbd_fwnode);
port->serio = serio;
port->irq = I8042_KBD_IRQ;
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index 38dc27ad3c18..eb376700dfff 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -17,8 +17,6 @@
#include "i8042-ip22io.h"
#elif defined(CONFIG_SNI_RM)
#include "i8042-snirm.h"
-#elif defined(CONFIG_PPC)
-#include "i8042-ppcio.h"
#elif defined(CONFIG_SPARC)
#include "i8042-sparcio.h"
#elif defined(CONFIG_X86) || defined(CONFIG_IA64)
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index c071f7c407b6..35c867b2d9a7 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -201,6 +201,18 @@ config TOUCHSCREEN_CHIPONE_ICN8505
To compile this driver as a module, choose M here: the
module will be called chipone_icn8505.
+config TOUCHSCREEN_CY8CTMA140
+ tristate "cy8ctma140 touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have a Cypress CY8CTMA140 capacitive
+ touchscreen also just known as "TMA140"
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cy8ctma140.
+
config TOUCHSCREEN_CY8CTMG110
tristate "cy8ctmg110 touchscreen"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 94c6162409b3..30d1e1b42492 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
obj-$(CONFIG_TOUCHSCREEN_BU21029) += bu21029_ts.o
obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8318) += chipone_icn8318.o
obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505) += chipone_icn8505.o
+obj-$(CONFIG_TOUCHSCREEN_CY8CTMA140) += cy8ctma140.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_CORE) += cyttsp_core.o
obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C) += cyttsp_i2c.o cyttsp_i2c_common.o
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index ae60442efda0..a2189739e30f 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -822,8 +822,7 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
* have happened.
*/
if (status & MXT_T9_RELEASE) {
- input_mt_report_slot_state(input_dev,
- MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(input_dev);
mxt_input_sync(data);
}
@@ -839,7 +838,7 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, area);
} else {
/* Touch no longer active, close out slot */
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(input_dev);
}
data->update_input = true;
@@ -947,7 +946,7 @@ static void mxt_proc_t100_message(struct mxt_data *data, u8 *message)
dev_dbg(dev, "[%u] release\n", id);
/* close out slot */
- input_mt_report_slot_state(input_dev, 0, 0);
+ input_mt_report_slot_inactive(input_dev);
}
data->update_input = true;
diff --git a/drivers/input/touchscreen/cy8ctma140.c b/drivers/input/touchscreen/cy8ctma140.c
new file mode 100644
index 000000000000..a9be29139cbf
--- /dev/null
+++ b/drivers/input/touchscreen/cy8ctma140.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Cypress CY8CTMA140 (TMA140) touchscreen
+ * (C) 2020 Linus Walleij <linus.walleij@linaro.org>
+ * (C) 2007 Cypress
+ * (C) 2007 Google, Inc.
+ *
+ * Inspired by the tma140_skomer.c driver in the Samsung GT-S7710 code
+ * drop. The GT-S7710 is codenamed "Skomer", the code also indicates
+ * that the same touchscreen was used in a product called "Lucas".
+ *
+ * The code drop for GT-S7710 also contains a firmware downloader and
+ * 15 (!) versions of the firmware drop from Cypress. But here we assume
+ * the firmware got downloaded to the touchscreen flash successfully and
+ * just use it to read the fingers. The shipped vendor driver does the
+ * same.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/input/touchscreen.h>
+#include <linux/input/mt.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#define CY8CTMA140_NAME "cy8ctma140"
+
+#define CY8CTMA140_MAX_FINGERS 4
+
+#define CY8CTMA140_GET_FINGERS 0x00
+#define CY8CTMA140_GET_FW_INFO 0x19
+
+/* This message also fits some bytes for touchkeys, if used */
+#define CY8CTMA140_PACKET_SIZE 31
+
+#define CY8CTMA140_INVALID_BUFFER_BIT 5
+
+struct cy8ctma140 {
+ struct input_dev *input;
+ struct touchscreen_properties props;
+ struct device *dev;
+ struct i2c_client *client;
+ struct regulator_bulk_data regulators[2];
+ u8 prev_fingers;
+ u8 prev_f1id;
+ u8 prev_f2id;
+};
+
+static void cy8ctma140_report(struct cy8ctma140 *ts, u8 *data, int n_fingers)
+{
+ static const u8 contact_offsets[] = { 0x03, 0x09, 0x10, 0x16 };
+ u8 *buf;
+ u16 x, y;
+ u8 w;
+ u8 id;
+ int slot;
+ int i;
+
+ for (i = 0; i < n_fingers; i++) {
+ buf = &data[contact_offsets[i]];
+
+ /*
+ * Odd contacts have contact ID in the lower nibble of
+ * the preceding byte, whereas even contacts have it in
+ * the upper nibble of the following byte.
+ */
+ id = i % 2 ? buf[-1] & 0x0f : buf[5] >> 4;
+ slot = input_mt_get_slot_by_key(ts->input, id);
+ if (slot < 0)
+ continue;
+
+ x = get_unaligned_be16(buf);
+ y = get_unaligned_be16(buf + 2);
+ w = buf[4];
+
+ dev_dbg(ts->dev, "finger %d: ID %02x (%d, %d) w: %d\n",
+ slot, id, x, y, w);
+
+ input_mt_slot(ts->input, slot);
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, true);
+ touchscreen_report_pos(ts->input, &ts->props, x, y, true);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, w);
+ }
+
+ input_mt_sync_frame(ts->input);
+ input_sync(ts->input);
+}
+
+static irqreturn_t cy8ctma140_irq_thread(int irq, void *d)
+{
+ struct cy8ctma140 *ts = d;
+ u8 cmdbuf[] = { CY8CTMA140_GET_FINGERS };
+ u8 buf[CY8CTMA140_PACKET_SIZE];
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .len = sizeof(cmdbuf),
+ .buf = cmdbuf,
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(buf),
+ .buf = buf,
+ },
+ };
+ u8 n_fingers;
+ int ret;
+
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ if (ret < 0)
+ dev_err(ts->dev, "error reading message: %d\n", ret);
+ else
+ dev_err(ts->dev, "wrong number of messages\n");
+ goto out;
+ }
+
+ if (buf[1] & BIT(CY8CTMA140_INVALID_BUFFER_BIT)) {
+ dev_dbg(ts->dev, "invalid event\n");
+ goto out;
+ }
+
+ n_fingers = buf[2] & 0x0f;
+ if (n_fingers > CY8CTMA140_MAX_FINGERS) {
+ dev_err(ts->dev, "unexpected number of fingers: %d\n",
+ n_fingers);
+ goto out;
+ }
+
+ cy8ctma140_report(ts, buf, n_fingers);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int cy8ctma140_init(struct cy8ctma140 *ts)
+{
+ u8 addr[1];
+ u8 buf[5];
+ int ret;
+
+ addr[0] = CY8CTMA140_GET_FW_INFO;
+ ret = i2c_master_send(ts->client, addr, 1);
+ if (ret < 0) {
+ dev_err(ts->dev, "error sending FW info message\n");
+ return ret;
+ }
+ ret = i2c_master_recv(ts->client, buf, 5);
+ if (ret < 0) {
+ dev_err(ts->dev, "error receiving FW info message\n");
+ return ret;
+ }
+ if (ret != 5) {
+ dev_err(ts->dev, "got only %d bytes\n", ret);
+ return -EIO;
+ }
+
+ dev_dbg(ts->dev, "vendor %c%c, HW ID %.2d, FW ver %.4d\n",
+ buf[0], buf[1], buf[3], buf[4]);
+
+ return 0;
+}
+
+static int cy8ctma140_power_up(struct cy8ctma140 *ts)
+{
+ int error;
+
+ error = regulator_bulk_enable(ARRAY_SIZE(ts->regulators),
+ ts->regulators);
+ if (error) {
+ dev_err(ts->dev, "failed to enable regulators\n");
+ return error;
+ }
+
+ msleep(250);
+
+ return 0;
+}
+
+static void cy8ctma140_power_down(struct cy8ctma140 *ts)
+{
+ regulator_bulk_disable(ARRAY_SIZE(ts->regulators),
+ ts->regulators);
+}
+
+/* Called from the registered devm action */
+static void cy8ctma140_power_off_action(void *d)
+{
+ struct cy8ctma140 *ts = d;
+
+ cy8ctma140_power_down(ts);
+}
+
+static int cy8ctma140_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct cy8ctma140 *ts;
+ struct input_dev *input;
+ struct device *dev = &client->dev;
+ int error;
+
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ ts->dev = dev;
+ ts->client = client;
+ ts->input = input;
+
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+ /* One byte for width 0..255 so this is the limit */
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ /*
+ * This sets up event max/min capabilities and fuzz.
+ * Some DT properties are compulsory so we do not need
+ * to provide defaults for X/Y max or pressure max.
+ *
+ * We just initialize a very simple MT touchscreen here,
+ * some devices use the capability of this touchscreen to
+ * provide touchkeys, and in that case this needs to be
+ * extended to handle touchkey input.
+ *
+ * The firmware takes care of finger tracking and dropping
+ * invalid ranges.
+ */
+ touchscreen_parse_properties(input, true, &ts->props);
+ input_abs_set_fuzz(input, ABS_MT_POSITION_X, 0);
+ input_abs_set_fuzz(input, ABS_MT_POSITION_Y, 0);
+
+ error = input_mt_init_slots(input, CY8CTMA140_MAX_FINGERS,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error)
+ return error;
+
+ input->name = CY8CTMA140_NAME;
+ input->id.bustype = BUS_I2C;
+ input_set_drvdata(input, ts);
+
+ /*
+ * VCPIN is the analog voltage supply
+ * VDD is the digital voltage supply
+ * since the voltage range of VDD overlaps that of VCPIN,
+ * many designs to just supply both with a single voltage
+ * source of ~3.3 V.
+ */
+ ts->regulators[0].supply = "vcpin";
+ ts->regulators[1].supply = "vdd";
+ error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->regulators),
+ ts->regulators);
+ if (error) {
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get regulators %d\n",
+ error);
+ return error;
+ }
+
+ error = cy8ctma140_power_up(ts);
+ if (error)
+ return error;
+
+ error = devm_add_action_or_reset(dev, cy8ctma140_power_off_action, ts);
+ if (error) {
+ dev_err(dev, "failed to install power off handler\n");
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, cy8ctma140_irq_thread,
+ IRQF_ONESHOT, CY8CTMA140_NAME, ts);
+ if (error) {
+ dev_err(dev, "irq %d busy? error %d\n", client->irq, error);
+ return error;
+ }
+
+ error = cy8ctma140_init(ts);
+ if (error)
+ return error;
+
+ error = input_register_device(input);
+ if (error)
+ return error;
+
+ i2c_set_clientdata(client, ts);
+
+ return 0;
+}
+
+static int __maybe_unused cy8ctma140_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cy8ctma140 *ts = i2c_get_clientdata(client);
+
+ if (!device_may_wakeup(&client->dev))
+ cy8ctma140_power_down(ts);
+
+ return 0;
+}
+
+static int __maybe_unused cy8ctma140_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cy8ctma140 *ts = i2c_get_clientdata(client);
+ int error;
+
+ if (!device_may_wakeup(&client->dev)) {
+ error = cy8ctma140_power_up(ts);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cy8ctma140_pm, cy8ctma140_suspend, cy8ctma140_resume);
+
+static const struct i2c_device_id cy8ctma140_idtable[] = {
+ { CY8CTMA140_NAME, 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, cy8ctma140_idtable);
+
+static const struct of_device_id cy8ctma140_of_match[] = {
+ { .compatible = "cypress,cy8ctma140", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cy8ctma140_of_match);
+
+static struct i2c_driver cy8ctma140_driver = {
+ .driver = {
+ .name = CY8CTMA140_NAME,
+ .pm = &cy8ctma140_pm,
+ .of_match_table = cy8ctma140_of_match,
+ },
+ .id_table = cy8ctma140_idtable,
+ .probe = cy8ctma140_probe,
+};
+module_i2c_driver(cy8ctma140_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("CY8CTMA140 TouchScreen Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 6bcffc930384..02a73d9a4def 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -744,8 +744,7 @@ static void cyttsp4_report_slot_liftoff(struct cyttsp4_mt_data *md,
for (t = 0; t < max_slots; t++) {
input_mt_slot(md->input, t);
- input_mt_report_slot_state(md->input,
- MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(md->input);
}
}
@@ -845,7 +844,7 @@ static void cyttsp4_final_sync(struct input_dev *input, int max_slots, int *ids)
if (ids[t])
continue;
input_mt_slot(input, t);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(input);
}
input_sync(input);
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 3f5d463dbeed..697aa2c158f7 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -340,7 +340,7 @@ static void cyttsp_report_tchdata(struct cyttsp *ts)
continue;
input_mt_slot(input, i);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(input);
}
input_sync(input);
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index d2587724c52a..3a4f18d3450d 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -38,6 +38,9 @@
#define WORK_REGISTER_NUM_X 0x33
#define WORK_REGISTER_NUM_Y 0x34
+#define PMOD_REGISTER_ACTIVE 0x00
+#define PMOD_REGISTER_HIBERNATE 0x03
+
#define M09_REGISTER_THRESHOLD 0x80
#define M09_REGISTER_GAIN 0x92
#define M09_REGISTER_OFFSET 0x93
@@ -53,6 +56,7 @@
#define WORK_REGISTER_OPMODE 0x3c
#define FACTORY_REGISTER_OPMODE 0x01
+#define PMOD_REGISTER_OPMODE 0xa5
#define TOUCH_EVENT_DOWN 0x00
#define TOUCH_EVENT_UP 0x01
@@ -65,6 +69,12 @@
#define EDT_RAW_DATA_RETRIES 100
#define EDT_RAW_DATA_DELAY 1000 /* usec */
+enum edt_pmode {
+ EDT_PMODE_NOT_SUPPORTED,
+ EDT_PMODE_HIBERNATE,
+ EDT_PMODE_POWEROFF,
+};
+
enum edt_ver {
EDT_M06,
EDT_M09,
@@ -103,6 +113,7 @@ struct edt_ft5x06_ts_data {
struct mutex mutex;
bool factory_mode;
+ enum edt_pmode suspend_mode;
int threshold;
int gain;
int offset;
@@ -527,6 +538,29 @@ static const struct attribute_group edt_ft5x06_attr_group = {
.attrs = edt_ft5x06_attrs,
};
+static void edt_ft5x06_restore_reg_parameters(struct edt_ft5x06_ts_data *tsdata)
+{
+ struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
+
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold,
+ tsdata->threshold);
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_gain,
+ tsdata->gain);
+ if (reg_addr->reg_offset != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset,
+ tsdata->offset);
+ if (reg_addr->reg_offset_x != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x,
+ tsdata->offset_x);
+ if (reg_addr->reg_offset_y != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y,
+ tsdata->offset_y);
+ if (reg_addr->reg_report_rate != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate,
+ tsdata->report_rate);
+
+}
+
#ifdef CONFIG_DEBUG_FS
static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
{
@@ -592,7 +626,6 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
{
struct i2c_client *client = tsdata->client;
int retries = EDT_SWITCH_MODE_RETRIES;
- struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
int ret;
int error;
@@ -624,24 +657,7 @@ static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
kfree(tsdata->raw_buffer);
tsdata->raw_buffer = NULL;
- /* restore parameters */
- edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold,
- tsdata->threshold);
- edt_ft5x06_register_write(tsdata, reg_addr->reg_gain,
- tsdata->gain);
- if (reg_addr->reg_offset != NO_REGISTER)
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset,
- tsdata->offset);
- if (reg_addr->reg_offset_x != NO_REGISTER)
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x,
- tsdata->offset_x);
- if (reg_addr->reg_offset_y != NO_REGISTER)
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y,
- tsdata->offset_y);
- if (reg_addr->reg_report_rate != NO_REGISTER)
- edt_ft5x06_register_write(tsdata, reg_addr->reg_report_rate,
- tsdata->report_rate);
-
+ edt_ft5x06_restore_reg_parameters(tsdata);
enable_irq(client->irq);
return 0;
@@ -762,9 +778,8 @@ static const struct file_operations debugfs_raw_data_fops = {
.read = edt_ft5x06_debugfs_raw_data_read,
};
-static void
-edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
- const char *debugfs_name)
+static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
+ const char *debugfs_name)
{
tsdata->debug_dir = debugfs_create_dir(debugfs_name, NULL);
@@ -777,8 +792,7 @@ edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
tsdata->debug_dir, tsdata, &debugfs_raw_data_fops);
}
-static void
-edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
+static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
debugfs_remove_recursive(tsdata->debug_dir);
kfree(tsdata->raw_buffer);
@@ -786,14 +800,17 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
#else
-static inline void
-edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
- const char *debugfs_name)
+static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
+{
+ return -ENOSYS;
+}
+
+static void edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
+ const char *debugfs_name)
{
}
-static inline void
-edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
+static void edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
}
@@ -938,19 +955,25 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev,
error = device_property_read_u32(dev, "offset", &val);
if (!error) {
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
+ if (reg_addr->reg_offset != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata,
+ reg_addr->reg_offset, val);
tsdata->offset = val;
}
error = device_property_read_u32(dev, "offset-x", &val);
if (!error) {
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_x, val);
+ if (reg_addr->reg_offset_x != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata,
+ reg_addr->reg_offset_x, val);
tsdata->offset_x = val;
}
error = device_property_read_u32(dev, "offset-y", &val);
if (!error) {
- edt_ft5x06_register_write(tsdata, reg_addr->reg_offset_y, val);
+ if (reg_addr->reg_offset_y != NO_REGISTER)
+ edt_ft5x06_register_write(tsdata,
+ reg_addr->reg_offset_y, val);
tsdata->offset_y = val;
}
}
@@ -1114,6 +1137,19 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
}
+ /*
+ * Check which sleep modes we can support. Power-off requieres the
+ * reset-pin to ensure correct power-down/power-up behaviour. Start with
+ * the EDT_PMODE_POWEROFF test since this is the deepest possible sleep
+ * mode.
+ */
+ if (tsdata->reset_gpio)
+ tsdata->suspend_mode = EDT_PMODE_POWEROFF;
+ else if (tsdata->wake_gpio)
+ tsdata->suspend_mode = EDT_PMODE_HIBERNATE;
+ else
+ tsdata->suspend_mode = EDT_PMODE_NOT_SUPPORTED;
+
if (tsdata->wake_gpio) {
usleep_range(5000, 6000);
gpiod_set_value_cansleep(tsdata->wake_gpio, 1);
@@ -1227,6 +1263,102 @@ static int edt_ft5x06_ts_remove(struct i2c_client *client)
return 0;
}
+static int __maybe_unused edt_ft5x06_ts_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+ struct gpio_desc *reset_gpio = tsdata->reset_gpio;
+ int ret;
+
+ if (device_may_wakeup(dev))
+ return 0;
+
+ if (tsdata->suspend_mode == EDT_PMODE_NOT_SUPPORTED)
+ return 0;
+
+ /* Enter hibernate mode. */
+ ret = edt_ft5x06_register_write(tsdata, PMOD_REGISTER_OPMODE,
+ PMOD_REGISTER_HIBERNATE);
+ if (ret)
+ dev_warn(dev, "Failed to set hibernate mode\n");
+
+ if (tsdata->suspend_mode == EDT_PMODE_HIBERNATE)
+ return 0;
+
+ /*
+ * Power-off according the datasheet. Cut the power may leaf the irq
+ * line in an undefined state depending on the host pull resistor
+ * settings. Disable the irq to avoid adjusting each host till the
+ * device is back in a full functional state.
+ */
+ disable_irq(tsdata->client->irq);
+
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ usleep_range(1000, 2000);
+
+ ret = regulator_disable(tsdata->vcc);
+ if (ret)
+ dev_warn(dev, "Failed to disable vcc\n");
+
+ return 0;
+}
+
+static int __maybe_unused edt_ft5x06_ts_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+ int ret = 0;
+
+ if (device_may_wakeup(dev))
+ return 0;
+
+ if (tsdata->suspend_mode == EDT_PMODE_NOT_SUPPORTED)
+ return 0;
+
+ if (tsdata->suspend_mode == EDT_PMODE_POWEROFF) {
+ struct gpio_desc *reset_gpio = tsdata->reset_gpio;
+
+ /*
+ * We can't check if the regulator is a dummy or a real
+ * regulator. So we need to specify the 5ms reset time (T_rst)
+ * here instead of the 100us T_rtp time. We also need to wait
+ * 300ms in case it was a real supply and the power was cutted
+ * of. Toggle the reset pin is also a way to exit the hibernate
+ * mode.
+ */
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ ret = regulator_enable(tsdata->vcc);
+ if (ret) {
+ dev_err(dev, "Failed to enable vcc\n");
+ return ret;
+ }
+
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ msleep(300);
+
+ edt_ft5x06_restore_reg_parameters(tsdata);
+ enable_irq(tsdata->client->irq);
+
+ if (tsdata->factory_mode)
+ ret = edt_ft5x06_factory_mode(tsdata);
+ } else {
+ struct gpio_desc *wake_gpio = tsdata->wake_gpio;
+
+ gpiod_set_value_cansleep(wake_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(wake_gpio, 1);
+ }
+
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops,
+ edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume);
+
static const struct edt_i2c_chip_data edt_ft5x06_data = {
.max_support_points = 5,
};
@@ -1265,6 +1397,8 @@ static struct i2c_driver edt_ft5x06_ts_driver = {
.driver = {
.name = "edt_ft5x06",
.of_match_table = edt_ft5x06_of_match,
+ .pm = &edt_ft5x06_ts_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.id_table = edt_ft5x06_ts_id,
.probe = edt_ft5x06_ts_probe,
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 2289f9638116..233cb1085bbd 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
@@ -89,6 +90,7 @@
/* FW read command, 0x53 0x?? 0x0, 0x01 */
#define E_ELAN_INFO_FW_VER 0x00
#define E_ELAN_INFO_BC_VER 0x10
+#define E_ELAN_INFO_REK 0xE0
#define E_ELAN_INFO_TEST_VER 0xE0
#define E_ELAN_INFO_FW_ID 0xF0
#define E_INFO_OSR 0xD6
@@ -136,6 +138,7 @@ struct elants_data {
unsigned int y_res;
unsigned int x_max;
unsigned int y_max;
+ struct touchscreen_properties prop;
enum elants_state state;
enum elants_iap_mode iap_mode;
@@ -189,7 +192,8 @@ static int elants_i2c_read(struct i2c_client *client, void *data, size_t size)
static int elants_i2c_execute_command(struct i2c_client *client,
const u8 *cmd, size_t cmd_size,
- u8 *resp, size_t resp_size)
+ u8 *resp, size_t resp_size,
+ int retries, const char *cmd_name)
{
struct i2c_msg msgs[2];
int ret;
@@ -209,30 +213,55 @@ static int elants_i2c_execute_command(struct i2c_client *client,
break;
default:
- dev_err(&client->dev, "%s: invalid command %*ph\n",
- __func__, (int)cmd_size, cmd);
+ dev_err(&client->dev, "(%s): invalid command: %*ph\n",
+ cmd_name, (int)cmd_size, cmd);
return -EINVAL;
}
- msgs[0].addr = client->addr;
- msgs[0].flags = client->flags & I2C_M_TEN;
- msgs[0].len = cmd_size;
- msgs[0].buf = (u8 *)cmd;
+ for (;;) {
+ msgs[0].addr = client->addr;
+ msgs[0].flags = client->flags & I2C_M_TEN;
+ msgs[0].len = cmd_size;
+ msgs[0].buf = (u8 *)cmd;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
+ msgs[1].flags |= I2C_M_RD;
+ msgs[1].len = resp_size;
+ msgs[1].buf = resp;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0) {
+ if (--retries > 0) {
+ dev_dbg(&client->dev,
+ "(%s) I2C transfer failed: %pe (retrying)\n",
+ cmd_name, ERR_PTR(ret));
+ continue;
+ }
- msgs[1].addr = client->addr;
- msgs[1].flags = client->flags & I2C_M_TEN;
- msgs[1].flags |= I2C_M_RD;
- msgs[1].len = resp_size;
- msgs[1].buf = resp;
+ dev_err(&client->dev,
+ "(%s) I2C transfer failed: %pe\n",
+ cmd_name, ERR_PTR(ret));
+ return ret;
+ }
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret < 0)
- return ret;
+ if (ret != ARRAY_SIZE(msgs) ||
+ resp[FW_HDR_TYPE] != expected_response) {
+ if (--retries > 0) {
+ dev_dbg(&client->dev,
+ "(%s) unexpected response: %*ph (retrying)\n",
+ cmd_name, ret, resp);
+ continue;
+ }
- if (ret != ARRAY_SIZE(msgs) || resp[FW_HDR_TYPE] != expected_response)
- return -EIO;
+ dev_err(&client->dev,
+ "(%s) unexpected response: %*ph\n",
+ cmd_name, ret, resp);
+ return -EIO;
+ }
- return 0;
+ return 0;
+ }
}
static int elants_i2c_calibrate(struct elants_data *ts)
@@ -305,27 +334,21 @@ static u16 elants_i2c_parse_version(u8 *buf)
static int elants_i2c_query_hw_version(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
- int error, retry_cnt;
+ int retry_cnt = MAX_RETRIES;
const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_ID, 0x00, 0x01 };
u8 resp[HEADER_SIZE];
+ int error;
- for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ while (retry_cnt--) {
error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
- resp, sizeof(resp));
- if (!error) {
- ts->hw_version = elants_i2c_parse_version(resp);
- if (ts->hw_version != 0xffff)
- return 0;
- }
-
- dev_dbg(&client->dev, "read fw id error=%d, buf=%*phC\n",
- error, (int)sizeof(resp), resp);
- }
+ resp, sizeof(resp), 1,
+ "read fw id");
+ if (error)
+ return error;
- if (error) {
- dev_err(&client->dev,
- "Failed to read fw id: %d\n", error);
- return error;
+ ts->hw_version = elants_i2c_parse_version(resp);
+ if (ts->hw_version != 0xffff)
+ return 0;
}
dev_err(&client->dev, "Invalid fw id: %#04x\n", ts->hw_version);
@@ -336,26 +359,27 @@ static int elants_i2c_query_hw_version(struct elants_data *ts)
static int elants_i2c_query_fw_version(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
- int error, retry_cnt;
+ int retry_cnt = MAX_RETRIES;
const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_FW_VER, 0x00, 0x01 };
u8 resp[HEADER_SIZE];
+ int error;
- for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+ while (retry_cnt--) {
error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
- resp, sizeof(resp));
- if (!error) {
- ts->fw_version = elants_i2c_parse_version(resp);
- if (ts->fw_version != 0x0000 &&
- ts->fw_version != 0xffff)
- return 0;
- }
+ resp, sizeof(resp), 1,
+ "read fw version");
+ if (error)
+ return error;
+
+ ts->fw_version = elants_i2c_parse_version(resp);
+ if (ts->fw_version != 0x0000 && ts->fw_version != 0xffff)
+ return 0;
- dev_dbg(&client->dev, "read fw version error=%d, buf=%*phC\n",
- error, (int)sizeof(resp), resp);
+ dev_dbg(&client->dev, "(read fw version) resp %*phC\n",
+ (int)sizeof(resp), resp);
}
- dev_err(&client->dev,
- "Failed to read fw version or fw version is invalid\n");
+ dev_err(&client->dev, "Invalid fw ver: %#04x\n", ts->fw_version);
return -EINVAL;
}
@@ -363,30 +387,24 @@ static int elants_i2c_query_fw_version(struct elants_data *ts)
static int elants_i2c_query_test_version(struct elants_data *ts)
{
struct i2c_client *client = ts->client;
- int error, retry_cnt;
+ int error;
u16 version;
const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_TEST_VER, 0x00, 0x01 };
u8 resp[HEADER_SIZE];
- for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
- error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
- resp, sizeof(resp));
- if (!error) {
- version = elants_i2c_parse_version(resp);
- ts->test_version = version >> 8;
- ts->solution_version = version & 0xff;
-
- return 0;
- }
-
- dev_dbg(&client->dev,
- "read test version error rc=%d, buf=%*phC\n",
- error, (int)sizeof(resp), resp);
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp), MAX_RETRIES,
+ "read test version");
+ if (error) {
+ dev_err(&client->dev, "Failed to read test version\n");
+ return error;
}
- dev_err(&client->dev, "Failed to read test version\n");
+ version = elants_i2c_parse_version(resp);
+ ts->test_version = version >> 8;
+ ts->solution_version = version & 0xff;
- return -EINVAL;
+ return 0;
}
static int elants_i2c_query_bc_version(struct elants_data *ts)
@@ -398,13 +416,10 @@ static int elants_i2c_query_bc_version(struct elants_data *ts)
int error;
error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev,
- "read BC version error=%d, buf=%*phC\n",
- error, (int)sizeof(resp), resp);
+ resp, sizeof(resp), 1,
+ "read BC version");
+ if (error)
return error;
- }
version = elants_i2c_parse_version(resp);
ts->bc_version = version >> 8;
@@ -436,12 +451,10 @@ static int elants_i2c_query_ts_info(struct elants_data *ts)
error = elants_i2c_execute_command(client,
get_resolution_cmd,
sizeof(get_resolution_cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev, "get resolution command failed: %d\n",
- error);
+ resp, sizeof(resp), 1,
+ "get resolution");
+ if (error)
return error;
- }
rows = resp[2] + resp[6] + resp[10];
cols = resp[3] + resp[7] + resp[11];
@@ -449,36 +462,29 @@ static int elants_i2c_query_ts_info(struct elants_data *ts)
/* Process mm_to_pixel information */
error = elants_i2c_execute_command(client,
get_osr_cmd, sizeof(get_osr_cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev, "get osr command failed: %d\n",
- error);
+ resp, sizeof(resp), 1, "get osr");
+ if (error)
return error;
- }
osr = resp[3];
error = elants_i2c_execute_command(client,
get_physical_scan_cmd,
sizeof(get_physical_scan_cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev, "get physical scan command failed: %d\n",
- error);
+ resp, sizeof(resp), 1,
+ "get physical scan");
+ if (error)
return error;
- }
phy_x = get_unaligned_be16(&resp[2]);
error = elants_i2c_execute_command(client,
get_physical_drive_cmd,
sizeof(get_physical_drive_cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev, "get physical drive command failed: %d\n",
- error);
+ resp, sizeof(resp), 1,
+ "get physical drive");
+ if (error)
return error;
- }
phy_y = get_unaligned_be16(&resp[2]);
@@ -633,11 +639,10 @@ static int elants_i2c_validate_remark_id(struct elants_data *ts,
/* Compare TS Remark ID and FW Remark ID */
error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
- resp, sizeof(resp));
- if (error) {
- dev_err(&client->dev, "failed to query Remark ID: %d\n", error);
+ resp, sizeof(resp),
+ 1, "read Remark ID");
+ if (error)
return error;
- }
ts_remark_id = get_unaligned_be16(&resp[3]);
@@ -875,8 +880,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
input_mt_slot(input, i);
input_mt_report_slot_state(input, tool_type, true);
- input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
- input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
+ touchscreen_report_pos(input, &ts->prop, x, y, true);
input_event(input, EV_ABS, ABS_MT_PRESSURE, p);
input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, w);
@@ -1017,7 +1021,7 @@ out:
*/
static ssize_t calibrate_store(struct device *dev,
struct device_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct elants_data *ts = i2c_get_clientdata(client);
@@ -1063,8 +1067,28 @@ static ssize_t show_iap_mode(struct device *dev,
"Normal" : "Recovery");
}
+static ssize_t show_calibration_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ const u8 cmd[] = { CMD_HEADER_READ, E_ELAN_INFO_REK, 0x00, 0x01 };
+ u8 resp[HEADER_SIZE];
+ u16 rek_count;
+ int error;
+
+ error = elants_i2c_execute_command(client, cmd, sizeof(cmd),
+ resp, sizeof(resp), 1,
+ "read ReK status");
+ if (error)
+ return sprintf(buf, "%d\n", error);
+
+ rek_count = get_unaligned_be16(&resp[2]);
+ return sprintf(buf, "0x%04x\n", rek_count);
+}
+
static DEVICE_ATTR_WO(calibrate);
static DEVICE_ATTR(iap_mode, S_IRUGO, show_iap_mode, NULL);
+static DEVICE_ATTR(calibration_count, S_IRUGO, show_calibration_count, NULL);
static DEVICE_ATTR(update_fw, S_IWUSR, NULL, write_update_fw);
struct elants_version_attribute {
@@ -1120,6 +1144,7 @@ static struct attribute *elants_attributes[] = {
&dev_attr_calibrate.attr,
&dev_attr_update_fw.attr,
&dev_attr_iap_mode.attr,
+ &dev_attr_calibration_count.attr,
&elants_ver_attr_fw_version.dattr.attr,
&elants_ver_attr_hw_version.dattr.attr,
@@ -1290,25 +1315,7 @@ static int elants_i2c_probe(struct i2c_client *client,
ts->input->name = "Elan Touchscreen";
ts->input->id.bustype = BUS_I2C;
- __set_bit(BTN_TOUCH, ts->input->keybit);
- __set_bit(EV_ABS, ts->input->evbit);
- __set_bit(EV_KEY, ts->input->evbit);
-
- /* Single touch input params setup */
- input_set_abs_params(ts->input, ABS_X, 0, ts->x_max, 0, 0);
- input_set_abs_params(ts->input, ABS_Y, 0, ts->y_max, 0, 0);
- input_set_abs_params(ts->input, ABS_PRESSURE, 0, 255, 0, 0);
- input_abs_set_res(ts->input, ABS_X, ts->x_res);
- input_abs_set_res(ts->input, ABS_Y, ts->y_res);
-
/* Multitouch input params setup */
- error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
- INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
- if (error) {
- dev_err(&client->dev,
- "failed to initialize MT slots: %d\n", error);
- return error;
- }
input_set_abs_params(ts->input, ABS_MT_POSITION_X, 0, ts->x_max, 0, 0);
input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0);
@@ -1320,6 +1327,16 @@ static int elants_i2c_probe(struct i2c_client *client,
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
+ touchscreen_parse_properties(ts->input, true, &ts->prop);
+
+ error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to initialize MT slots: %d\n", error);
+ return error;
+ }
+
error = input_register_device(ts->input);
if (error) {
dev_err(&client->dev,
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 247c3aaba2d8..f67efdd040b2 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -391,7 +391,7 @@ static void mip4_clear_input(struct mip4_ts *ts)
/* Screen */
for (i = 0; i < MIP4_MAX_FINGERS; i++) {
input_mt_slot(ts->input, i);
- input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(ts->input);
}
/* Keys */
@@ -534,7 +534,7 @@ static void mip4_report_touch(struct mip4_ts *ts, u8 *packet)
} else {
/* Release event */
input_mt_slot(ts->input, id);
- input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(ts->input);
}
input_mt_sync_frame(ts->input);
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 2ef1adaed9af..1f96657310b7 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -54,6 +54,7 @@
enum mms_type {
TYPE_MMS114 = 114,
TYPE_MMS152 = 152,
+ TYPE_MMS345L = 345,
};
struct mms114_data {
@@ -250,6 +251,15 @@ static int mms114_get_version(struct mms114_data *data)
int error;
switch (data->type) {
+ case TYPE_MMS345L:
+ error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf);
+ if (error)
+ return error;
+
+ dev_info(dev, "TSP FW Rev: bootloader 0x%x / core 0x%x / config 0x%x\n",
+ buf[0], buf[1], buf[2]);
+ break;
+
case TYPE_MMS152:
error = __mms114_read_reg(data, MMS152_FW_REV, 3, buf);
if (error)
@@ -287,8 +297,8 @@ static int mms114_setup_regs(struct mms114_data *data)
if (error < 0)
return error;
- /* MMS152 has no configuration or power on registers */
- if (data->type == TYPE_MMS152)
+ /* Only MMS114 has configuration and power on registers */
+ if (data->type != TYPE_MMS114)
return 0;
error = mms114_set_active(data, true);
@@ -547,7 +557,7 @@ static int __maybe_unused mms114_suspend(struct device *dev)
/* Release all touch */
for (id = 0; id < MMS114_MAX_TOUCH; id++) {
input_mt_slot(input_dev, id);
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(input_dev);
}
input_mt_report_pointer_emulation(input_dev, true);
@@ -597,6 +607,9 @@ static const struct of_device_id mms114_dt_match[] = {
}, {
.compatible = "melfas,mms152",
.data = (void *)TYPE_MMS152,
+ }, {
+ .compatible = "melfas,mms345l",
+ .data = (void *)TYPE_MMS345L,
},
{ }
};
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index 0e2e08f3f433..ef6aaed217cf 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -100,7 +100,7 @@ static void rpi_ts_poll(struct input_dev *input)
released_ids = ts->known_ids & ~modified_ids;
for_each_set_bit(i, &released_ids, RPI_TS_MAX_SUPPORTED_POINTS) {
input_mt_slot(input, i);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, 0);
+ input_mt_report_slot_inactive(input);
modified_ids &= ~(BIT(i));
}
ts->known_ids = modified_ids;
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index b6f95f20f924..b54cc64e4ea6 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -198,7 +198,7 @@ static void stmfts_report_contact_release(struct stmfts_data *sdata,
u8 slot_id = (event[0] & STMFTS_MASK_TOUCH_ID) >> 4;
input_mt_slot(sdata->input, slot_id);
- input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, false);
+ input_mt_report_slot_inactive(sdata->input);
input_sync(sdata->input);
}
diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
index bfa4ca3ab7a9..5b7204ee2eb2 100644
--- a/drivers/interconnect/Kconfig
+++ b/drivers/interconnect/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig INTERCONNECT
- tristate "On-Chip Interconnect management support"
+ bool "On-Chip Interconnect management support"
help
Support for management of the on-chip interconnects.
@@ -11,6 +11,7 @@ menuconfig INTERCONNECT
if INTERCONNECT
+source "drivers/interconnect/imx/Kconfig"
source "drivers/interconnect/qcom/Kconfig"
endif
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
index 725029ae7a2c..4825c287ca13 100644
--- a/drivers/interconnect/Makefile
+++ b/drivers/interconnect/Makefile
@@ -4,4 +4,5 @@ CFLAGS_core.o := -I$(src)
icc-core-objs := core.o
obj-$(CONFIG_INTERCONNECT) += icc-core.o
+obj-$(CONFIG_INTERCONNECT_IMX) += imx/
obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 2c6515e3ecf1..e5f998744501 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -158,6 +158,7 @@ static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
path->reqs[i].node = node;
path->reqs[i].dev = dev;
+ path->reqs[i].enabled = true;
/* reference to previous node was saved during path traversal */
node = node->reverse;
}
@@ -249,9 +250,12 @@ static int aggregate_requests(struct icc_node *node)
if (p->pre_aggregate)
p->pre_aggregate(node);
- hlist_for_each_entry(r, &node->req_list, req_node)
+ hlist_for_each_entry(r, &node->req_list, req_node) {
+ if (!r->enabled)
+ continue;
p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
&node->avg_bw, &node->peak_bw);
+ }
return 0;
}
@@ -350,10 +354,35 @@ static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
return node;
}
+static void devm_icc_release(struct device *dev, void *res)
+{
+ icc_put(*(struct icc_path **)res);
+}
+
+struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
+{
+ struct icc_path **ptr, *path;
+
+ ptr = devres_alloc(devm_icc_release, sizeof(**ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ path = of_icc_get(dev, name);
+ if (!IS_ERR(path)) {
+ *ptr = path;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return path;
+}
+EXPORT_SYMBOL_GPL(devm_of_icc_get);
+
/**
- * of_icc_get() - get a path handle from a DT node based on name
+ * of_icc_get_by_index() - get a path handle from a DT node based on index
* @dev: device pointer for the consumer device
- * @name: interconnect path name
+ * @idx: interconnect path index
*
* This function will search for a path between two endpoints and return an
* icc_path handle on success. Use icc_put() to release constraints when they
@@ -365,13 +394,12 @@ static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec)
* Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
* when the API is disabled or the "interconnects" DT property is missing.
*/
-struct icc_path *of_icc_get(struct device *dev, const char *name)
+struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
{
- struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+ struct icc_path *path;
struct icc_node *src_node, *dst_node;
- struct device_node *np = NULL;
+ struct device_node *np;
struct of_phandle_args src_args, dst_args;
- int idx = 0;
int ret;
if (!dev || !dev->of_node)
@@ -391,12 +419,6 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
* lets support only global ids and extend this in the future if needed
* without breaking DT compatibility.
*/
- if (name) {
- idx = of_property_match_string(np, "interconnect-names", name);
- if (idx < 0)
- return ERR_PTR(idx);
- }
-
ret = of_parse_phandle_with_args(np, "interconnects",
"#interconnect-cells", idx * 2,
&src_args);
@@ -439,12 +461,8 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
return path;
}
- if (name)
- path->name = kstrdup_const(name, GFP_KERNEL);
- else
- path->name = kasprintf(GFP_KERNEL, "%s-%s",
- src_node->name, dst_node->name);
-
+ path->name = kasprintf(GFP_KERNEL, "%s-%s",
+ src_node->name, dst_node->name);
if (!path->name) {
kfree(path);
return ERR_PTR(-ENOMEM);
@@ -452,6 +470,53 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
return path;
}
+EXPORT_SYMBOL_GPL(of_icc_get_by_index);
+
+/**
+ * of_icc_get() - get a path handle from a DT node based on name
+ * @dev: device pointer for the consumer device
+ * @name: interconnect path name
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release constraints when they
+ * are not needed anymore.
+ * If the interconnect API is disabled, NULL is returned and the consumer
+ * drivers will still build. Drivers are free to handle this specifically,
+ * but they don't have to.
+ *
+ * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
+ * when the API is disabled or the "interconnects" DT property is missing.
+ */
+struct icc_path *of_icc_get(struct device *dev, const char *name)
+{
+ struct device_node *np;
+ int idx = 0;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-ENODEV);
+
+ np = dev->of_node;
+
+ /*
+ * When the consumer DT node do not have "interconnects" property
+ * return a NULL path to skip setting constraints.
+ */
+ if (!of_find_property(np, "interconnects", NULL))
+ return NULL;
+
+ /*
+ * We use a combination of phandle and specifier for endpoint. For now
+ * lets support only global ids and extend this in the future if needed
+ * without breaking DT compatibility.
+ */
+ if (name) {
+ idx = of_property_match_string(np, "interconnect-names", name);
+ if (idx < 0)
+ return ERR_PTR(idx);
+ }
+
+ return of_icc_get_by_index(dev, idx);
+}
EXPORT_SYMBOL_GPL(of_icc_get);
/**
@@ -479,6 +544,24 @@ void icc_set_tag(struct icc_path *path, u32 tag)
EXPORT_SYMBOL_GPL(icc_set_tag);
/**
+ * icc_get_name() - Get name of the icc path
+ * @path: reference to the path returned by icc_get()
+ *
+ * This function is used by an interconnect consumer to get the name of the icc
+ * path.
+ *
+ * Returns a valid pointer on success, or NULL otherwise.
+ */
+const char *icc_get_name(struct icc_path *path)
+{
+ if (!path)
+ return NULL;
+
+ return path->name;
+}
+EXPORT_SYMBOL_GPL(icc_get_name);
+
+/**
* icc_set_bw() - set bandwidth constraints on an interconnect path
* @path: reference to the path returned by icc_get()
* @avg_bw: average bandwidth in kilobytes per second
@@ -546,6 +629,39 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
}
EXPORT_SYMBOL_GPL(icc_set_bw);
+static int __icc_enable(struct icc_path *path, bool enable)
+{
+ int i;
+
+ if (!path)
+ return 0;
+
+ if (WARN_ON(IS_ERR(path) || !path->num_nodes))
+ return -EINVAL;
+
+ mutex_lock(&icc_lock);
+
+ for (i = 0; i < path->num_nodes; i++)
+ path->reqs[i].enabled = enable;
+
+ mutex_unlock(&icc_lock);
+
+ return icc_set_bw(path, path->reqs[0].avg_bw,
+ path->reqs[0].peak_bw);
+}
+
+int icc_enable(struct icc_path *path)
+{
+ return __icc_enable(path, true);
+}
+EXPORT_SYMBOL_GPL(icc_enable);
+
+int icc_disable(struct icc_path *path)
+{
+ return __icc_enable(path, false);
+}
+EXPORT_SYMBOL_GPL(icc_disable);
+
/**
* icc_get() - return a handle for path between two endpoints
* @dev: the device requesting the path
@@ -908,12 +1024,7 @@ static int __init icc_init(void)
return 0;
}
-static void __exit icc_exit(void)
-{
- debugfs_remove_recursive(icc_debugfs_dir);
-}
-module_init(icc_init);
-module_exit(icc_exit);
+device_initcall(icc_init);
MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
MODULE_DESCRIPTION("Interconnect Driver Core");
diff --git a/drivers/interconnect/imx/Kconfig b/drivers/interconnect/imx/Kconfig
new file mode 100644
index 000000000000..be2928362bb7
--- /dev/null
+++ b/drivers/interconnect/imx/Kconfig
@@ -0,0 +1,17 @@
+config INTERCONNECT_IMX
+ tristate "i.MX interconnect drivers"
+ depends on ARCH_MXC || COMPILE_TEST
+ help
+ Generic interconnect drivers for i.MX SOCs
+
+config INTERCONNECT_IMX8MM
+ tristate "i.MX8MM interconnect driver"
+ depends on INTERCONNECT_IMX
+
+config INTERCONNECT_IMX8MN
+ tristate "i.MX8MN interconnect driver"
+ depends on INTERCONNECT_IMX
+
+config INTERCONNECT_IMX8MQ
+ tristate "i.MX8MQ interconnect driver"
+ depends on INTERCONNECT_IMX
diff --git a/drivers/interconnect/imx/Makefile b/drivers/interconnect/imx/Makefile
new file mode 100644
index 000000000000..21fd5233754f
--- /dev/null
+++ b/drivers/interconnect/imx/Makefile
@@ -0,0 +1,9 @@
+imx-interconnect-objs := imx.o
+imx8mm-interconnect-objs := imx8mm.o
+imx8mq-interconnect-objs := imx8mq.o
+imx8mn-interconnect-objs := imx8mn.o
+
+obj-$(CONFIG_INTERCONNECT_IMX) += imx-interconnect.o
+obj-$(CONFIG_INTERCONNECT_IMX8MM) += imx8mm-interconnect.o
+obj-$(CONFIG_INTERCONNECT_IMX8MQ) += imx8mq-interconnect.o
+obj-$(CONFIG_INTERCONNECT_IMX8MN) += imx8mn-interconnect.o
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
new file mode 100644
index 000000000000..ac420f86008e
--- /dev/null
+++ b/drivers/interconnect/imx/imx.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Copyright (c) 2019-2020, NXP
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ * Author: Leonard Crestez <leonard.crestez@nxp.com>
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+
+#include "imx.h"
+
+/* private icc_node data */
+struct imx_icc_node {
+ const struct imx_icc_node_desc *desc;
+ struct device *qos_dev;
+ struct dev_pm_qos_request qos_req;
+};
+
+static int imx_icc_node_set(struct icc_node *node)
+{
+ struct device *dev = node->provider->dev;
+ struct imx_icc_node *node_data = node->data;
+ u64 freq;
+
+ if (!node_data->qos_dev)
+ return 0;
+
+ freq = (node->avg_bw + node->peak_bw) * node_data->desc->adj->bw_mul;
+ do_div(freq, node_data->desc->adj->bw_div);
+ dev_dbg(dev, "node %s device %s avg_bw %ukBps peak_bw %ukBps min_freq %llukHz\n",
+ node->name, dev_name(node_data->qos_dev),
+ node->avg_bw, node->peak_bw, freq);
+
+ if (freq > S32_MAX) {
+ dev_err(dev, "%s can't request more than S32_MAX freq\n",
+ node->name);
+ return -ERANGE;
+ }
+
+ dev_pm_qos_update_request(&node_data->qos_req, freq);
+
+ return 0;
+}
+
+static int imx_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ return imx_icc_node_set(dst);
+}
+
+/* imx_icc_node_destroy() - Destroy an imx icc_node, including private data */
+static void imx_icc_node_destroy(struct icc_node *node)
+{
+ struct imx_icc_node *node_data = node->data;
+ int ret;
+
+ if (dev_pm_qos_request_active(&node_data->qos_req)) {
+ ret = dev_pm_qos_remove_request(&node_data->qos_req);
+ if (ret)
+ dev_warn(node->provider->dev,
+ "failed to remove qos request for %s\n",
+ dev_name(node_data->qos_dev));
+ }
+
+ put_device(node_data->qos_dev);
+ icc_node_del(node);
+ icc_node_destroy(node->id);
+}
+
+static int imx_icc_node_init_qos(struct icc_provider *provider,
+ struct icc_node *node)
+{
+ struct imx_icc_node *node_data = node->data;
+ const struct imx_icc_node_adj_desc *adj = node_data->desc->adj;
+ struct device *dev = provider->dev;
+ struct device_node *dn = NULL;
+ struct platform_device *pdev;
+
+ if (adj->main_noc) {
+ node_data->qos_dev = dev;
+ dev_dbg(dev, "icc node %s[%d] is main noc itself\n",
+ node->name, node->id);
+ } else {
+ dn = of_parse_phandle(dev->of_node, adj->phandle_name, 0);
+ if (!dn) {
+ dev_warn(dev, "Failed to parse %s\n",
+ adj->phandle_name);
+ return -ENODEV;
+ }
+ /* Allow scaling to be disabled on a per-node basis */
+ if (!dn || !of_device_is_available(dn)) {
+ dev_warn(dev, "Missing property %s, skip scaling %s\n",
+ adj->phandle_name, node->name);
+ return 0;
+ }
+
+ pdev = of_find_device_by_node(dn);
+ of_node_put(dn);
+ if (!pdev) {
+ dev_warn(dev, "node %s[%d] missing device for %pOF\n",
+ node->name, node->id, dn);
+ return -EPROBE_DEFER;
+ }
+ node_data->qos_dev = &pdev->dev;
+ dev_dbg(dev, "node %s[%d] has device node %pOF\n",
+ node->name, node->id, dn);
+ }
+
+ return dev_pm_qos_add_request(node_data->qos_dev,
+ &node_data->qos_req,
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
+}
+
+static struct icc_node *imx_icc_node_add(struct icc_provider *provider,
+ const struct imx_icc_node_desc *node_desc)
+{
+ struct device *dev = provider->dev;
+ struct imx_icc_node *node_data;
+ struct icc_node *node;
+ int ret;
+
+ node = icc_node_create(node_desc->id);
+ if (IS_ERR(node)) {
+ dev_err(dev, "failed to create node %d\n", node_desc->id);
+ return node;
+ }
+
+ if (node->data) {
+ dev_err(dev, "already created node %s id=%d\n",
+ node_desc->name, node_desc->id);
+ return ERR_PTR(-EEXIST);
+ }
+
+ node_data = devm_kzalloc(dev, sizeof(*node_data), GFP_KERNEL);
+ if (!node_data) {
+ icc_node_destroy(node->id);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ node->name = node_desc->name;
+ node->data = node_data;
+ node_data->desc = node_desc;
+ icc_node_add(node, provider);
+
+ if (node_desc->adj) {
+ ret = imx_icc_node_init_qos(provider, node);
+ if (ret < 0) {
+ imx_icc_node_destroy(node);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return node;
+}
+
+static void imx_icc_unregister_nodes(struct icc_provider *provider)
+{
+ struct icc_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &provider->nodes, node_list)
+ imx_icc_node_destroy(node);
+}
+
+static int imx_icc_register_nodes(struct icc_provider *provider,
+ const struct imx_icc_node_desc *descs,
+ int count)
+{
+ struct icc_onecell_data *provider_data = provider->data;
+ int ret;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct icc_node *node;
+ const struct imx_icc_node_desc *node_desc = &descs[i];
+ size_t j;
+
+ node = imx_icc_node_add(provider, node_desc);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ if (ret != -EPROBE_DEFER)
+ dev_err(provider->dev, "failed to add %s: %d\n",
+ node_desc->name, ret);
+ goto err;
+ }
+ provider_data->nodes[node->id] = node;
+
+ for (j = 0; j < node_desc->num_links; j++) {
+ ret = icc_link_create(node, node_desc->links[j]);
+ if (ret) {
+ dev_err(provider->dev, "failed to link node %d to %d: %d\n",
+ node->id, node_desc->links[j], ret);
+ goto err;
+ }
+ }
+ }
+
+ return 0;
+
+err:
+ imx_icc_unregister_nodes(provider);
+
+ return ret;
+}
+
+static int get_max_node_id(struct imx_icc_node_desc *nodes, int nodes_count)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < nodes_count; ++i)
+ if (nodes[i].id > ret)
+ ret = nodes[i].id;
+
+ return ret;
+}
+
+int imx_icc_register(struct platform_device *pdev,
+ struct imx_icc_node_desc *nodes, int nodes_count)
+{
+ struct device *dev = &pdev->dev;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ int max_node_id;
+ int ret;
+
+ /* icc_onecell_data is indexed by node_id, unlike nodes param */
+ max_node_id = get_max_node_id(nodes, nodes_count);
+ data = devm_kzalloc(dev, struct_size(data, nodes, max_node_id),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->num_nodes = max_node_id;
+
+ provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL);
+ if (!provider)
+ return -ENOMEM;
+ provider->set = imx_icc_set;
+ provider->aggregate = icc_std_aggregate;
+ provider->xlate = of_icc_xlate_onecell;
+ provider->data = data;
+ provider->dev = dev->parent;
+ platform_set_drvdata(pdev, provider);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider: %d\n", ret);
+ return ret;
+ }
+
+ ret = imx_icc_register_nodes(provider, nodes, nodes_count);
+ if (ret)
+ goto provider_del;
+
+ return 0;
+
+provider_del:
+ icc_provider_del(provider);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_icc_register);
+
+int imx_icc_unregister(struct platform_device *pdev)
+{
+ struct icc_provider *provider = platform_get_drvdata(pdev);
+ int ret;
+
+ imx_icc_unregister_nodes(provider);
+
+ ret = icc_provider_del(provider);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_icc_unregister);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/imx/imx.h b/drivers/interconnect/imx/imx.h
new file mode 100644
index 000000000000..75da51076c68
--- /dev/null
+++ b/drivers/interconnect/imx/imx.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Copyright (c) 2019-2020, NXP
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ * Author: Leonard Crestez <leonard.crestez@nxp.com>
+ */
+#ifndef __DRIVERS_INTERCONNECT_IMX_H
+#define __DRIVERS_INTERCONNECT_IMX_H
+
+#include <linux/kernel.h>
+
+#define IMX_ICC_MAX_LINKS 4
+
+/*
+ * struct imx_icc_node_adj - Describe a dynamic adjustable node
+ */
+struct imx_icc_node_adj_desc {
+ unsigned int bw_mul, bw_div;
+ const char *phandle_name;
+ bool main_noc;
+};
+
+/*
+ * struct imx_icc_node - Describe an interconnect node
+ * @name: name of the node
+ * @id: an unique id to identify the node
+ * @links: an array of slaves' node id
+ * @num_links: number of id defined in links
+ */
+struct imx_icc_node_desc {
+ const char *name;
+ u16 id;
+ u16 links[IMX_ICC_MAX_LINKS];
+ u16 num_links;
+ const struct imx_icc_node_adj_desc *adj;
+};
+
+#define DEFINE_BUS_INTERCONNECT(_name, _id, _adj, ...) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .adj = _adj, \
+ .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
+ .links = { __VA_ARGS__ }, \
+ }
+
+#define DEFINE_BUS_MASTER(_name, _id, _dest_id) \
+ DEFINE_BUS_INTERCONNECT(_name, _id, NULL, _dest_id)
+
+#define DEFINE_BUS_SLAVE(_name, _id, _adj) \
+ DEFINE_BUS_INTERCONNECT(_name, _id, _adj)
+
+int imx_icc_register(struct platform_device *pdev,
+ struct imx_icc_node_desc *nodes,
+ int nodes_count);
+int imx_icc_unregister(struct platform_device *pdev);
+
+#endif /* __DRIVERS_INTERCONNECT_IMX_H */
diff --git a/drivers/interconnect/imx/imx8mm.c b/drivers/interconnect/imx/imx8mm.c
new file mode 100644
index 000000000000..1083490bb391
--- /dev/null
+++ b/drivers/interconnect/imx/imx8mm.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX8MM SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Copyright (c) 2019-2020, NXP
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ * Author: Leonard Crestez <leonard.crestez@nxp.com>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/imx8mm.h>
+
+#include "imx.h"
+
+static const struct imx_icc_node_adj_desc imx8mm_dram_adj = {
+ .bw_mul = 1,
+ .bw_div = 16,
+ .phandle_name = "fsl,ddrc",
+};
+
+static const struct imx_icc_node_adj_desc imx8mm_noc_adj = {
+ .bw_mul = 1,
+ .bw_div = 16,
+ .main_noc = true,
+};
+
+/*
+ * Describe bus masters, slaves and connections between them
+ *
+ * This is a simplified subset of the bus diagram, there are several other
+ * PL301 nics which are skipped/merged into PL301_MAIN
+ */
+static struct imx_icc_node_desc nodes[] = {
+ DEFINE_BUS_INTERCONNECT("NOC", IMX8MM_ICN_NOC, &imx8mm_noc_adj,
+ IMX8MM_ICS_DRAM, IMX8MM_ICN_MAIN),
+
+ DEFINE_BUS_SLAVE("DRAM", IMX8MM_ICS_DRAM, &imx8mm_dram_adj),
+ DEFINE_BUS_SLAVE("OCRAM", IMX8MM_ICS_OCRAM, NULL),
+ DEFINE_BUS_MASTER("A53", IMX8MM_ICM_A53, IMX8MM_ICN_NOC),
+
+ /* VPUMIX */
+ DEFINE_BUS_MASTER("VPU H1", IMX8MM_ICM_VPU_H1, IMX8MM_ICN_VIDEO),
+ DEFINE_BUS_MASTER("VPU G1", IMX8MM_ICM_VPU_G1, IMX8MM_ICN_VIDEO),
+ DEFINE_BUS_MASTER("VPU G2", IMX8MM_ICM_VPU_G2, IMX8MM_ICN_VIDEO),
+ DEFINE_BUS_INTERCONNECT("PL301_VIDEO", IMX8MM_ICN_VIDEO, NULL, IMX8MM_ICN_NOC),
+
+ /* GPUMIX */
+ DEFINE_BUS_MASTER("GPU 2D", IMX8MM_ICM_GPU2D, IMX8MM_ICN_GPU),
+ DEFINE_BUS_MASTER("GPU 3D", IMX8MM_ICM_GPU3D, IMX8MM_ICN_GPU),
+ DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MM_ICN_GPU, NULL, IMX8MM_ICN_NOC),
+
+ /* DISPLAYMIX */
+ DEFINE_BUS_MASTER("CSI", IMX8MM_ICM_CSI, IMX8MM_ICN_MIPI),
+ DEFINE_BUS_MASTER("LCDIF", IMX8MM_ICM_LCDIF, IMX8MM_ICN_MIPI),
+ DEFINE_BUS_INTERCONNECT("PL301_MIPI", IMX8MM_ICN_MIPI, NULL, IMX8MM_ICN_NOC),
+
+ /* HSIO */
+ DEFINE_BUS_MASTER("USB1", IMX8MM_ICM_USB1, IMX8MM_ICN_HSIO),
+ DEFINE_BUS_MASTER("USB2", IMX8MM_ICM_USB2, IMX8MM_ICN_HSIO),
+ DEFINE_BUS_MASTER("PCIE", IMX8MM_ICM_PCIE, IMX8MM_ICN_HSIO),
+ DEFINE_BUS_INTERCONNECT("PL301_HSIO", IMX8MM_ICN_HSIO, NULL, IMX8MM_ICN_NOC),
+
+ /* Audio */
+ DEFINE_BUS_MASTER("SDMA2", IMX8MM_ICM_SDMA2, IMX8MM_ICN_AUDIO),
+ DEFINE_BUS_MASTER("SDMA3", IMX8MM_ICM_SDMA3, IMX8MM_ICN_AUDIO),
+ DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MM_ICN_AUDIO, NULL, IMX8MM_ICN_MAIN),
+
+ /* Ethernet */
+ DEFINE_BUS_MASTER("ENET", IMX8MM_ICM_ENET, IMX8MM_ICN_ENET),
+ DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MM_ICN_ENET, NULL, IMX8MM_ICN_MAIN),
+
+ /* Other */
+ DEFINE_BUS_MASTER("SDMA1", IMX8MM_ICM_SDMA1, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("NAND", IMX8MM_ICM_NAND, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC1", IMX8MM_ICM_USDHC1, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC2", IMX8MM_ICM_USDHC2, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC3", IMX8MM_ICM_USDHC3, IMX8MM_ICN_MAIN),
+ DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MM_ICN_MAIN, NULL,
+ IMX8MM_ICN_NOC, IMX8MM_ICS_OCRAM),
+};
+
+static int imx8mm_icc_probe(struct platform_device *pdev)
+{
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+}
+
+static int imx8mm_icc_remove(struct platform_device *pdev)
+{
+ return imx_icc_unregister(pdev);
+}
+
+static struct platform_driver imx8mm_icc_driver = {
+ .probe = imx8mm_icc_probe,
+ .remove = imx8mm_icc_remove,
+ .driver = {
+ .name = "imx8mm-interconnect",
+ },
+};
+
+module_platform_driver(imx8mm_icc_driver);
+MODULE_AUTHOR("Alexandre Bailon <abailon@baylibre.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx8mm-interconnect");
diff --git a/drivers/interconnect/imx/imx8mn.c b/drivers/interconnect/imx/imx8mn.c
new file mode 100644
index 000000000000..ad97e55fd4e5
--- /dev/null
+++ b/drivers/interconnect/imx/imx8mn.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX8MN SoC
+ *
+ * Copyright (c) 2019-2020, NXP
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/imx8mn.h>
+
+#include "imx.h"
+
+static const struct imx_icc_node_adj_desc imx8mn_dram_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .phandle_name = "fsl,ddrc",
+};
+
+static const struct imx_icc_node_adj_desc imx8mn_noc_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .main_noc = true,
+};
+
+/*
+ * Describe bus masters, slaves and connections between them
+ *
+ * This is a simplified subset of the bus diagram, there are several other
+ * PL301 nics which are skipped/merged into PL301_MAIN
+ */
+static struct imx_icc_node_desc nodes[] = {
+ DEFINE_BUS_INTERCONNECT("NOC", IMX8MN_ICN_NOC, &imx8mn_noc_adj,
+ IMX8MN_ICS_DRAM, IMX8MN_ICN_MAIN),
+
+ DEFINE_BUS_SLAVE("DRAM", IMX8MN_ICS_DRAM, &imx8mn_dram_adj),
+ DEFINE_BUS_SLAVE("OCRAM", IMX8MN_ICS_OCRAM, NULL),
+ DEFINE_BUS_MASTER("A53", IMX8MN_ICM_A53, IMX8MN_ICN_NOC),
+
+ /* GPUMIX */
+ DEFINE_BUS_MASTER("GPU", IMX8MN_ICM_GPU, IMX8MN_ICN_GPU),
+ DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MN_ICN_GPU, NULL, IMX8MN_ICN_NOC),
+
+ /* DISPLAYMIX */
+ DEFINE_BUS_MASTER("CSI1", IMX8MN_ICM_CSI1, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_MASTER("CSI2", IMX8MN_ICM_CSI2, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_MASTER("ISI", IMX8MN_ICM_ISI, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_MASTER("LCDIF", IMX8MN_ICM_LCDIF, IMX8MN_ICN_MIPI),
+ DEFINE_BUS_INTERCONNECT("PL301_MIPI", IMX8MN_ICN_MIPI, NULL, IMX8MN_ICN_NOC),
+
+ /* USB goes straight to NOC */
+ DEFINE_BUS_MASTER("USB", IMX8MN_ICM_USB, IMX8MN_ICN_NOC),
+
+ /* Audio */
+ DEFINE_BUS_MASTER("SDMA2", IMX8MN_ICM_SDMA2, IMX8MN_ICN_AUDIO),
+ DEFINE_BUS_MASTER("SDMA3", IMX8MN_ICM_SDMA3, IMX8MN_ICN_AUDIO),
+ DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MN_ICN_AUDIO, NULL, IMX8MN_ICN_MAIN),
+
+ /* Ethernet */
+ DEFINE_BUS_MASTER("ENET", IMX8MN_ICM_ENET, IMX8MN_ICN_ENET),
+ DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MN_ICN_ENET, NULL, IMX8MN_ICN_MAIN),
+
+ /* Other */
+ DEFINE_BUS_MASTER("SDMA1", IMX8MN_ICM_SDMA1, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("NAND", IMX8MN_ICM_NAND, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC1", IMX8MN_ICM_USDHC1, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC2", IMX8MN_ICM_USDHC2, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC3", IMX8MN_ICM_USDHC3, IMX8MN_ICN_MAIN),
+ DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MN_ICN_MAIN, NULL,
+ IMX8MN_ICN_NOC, IMX8MN_ICS_OCRAM),
+};
+
+static int imx8mn_icc_probe(struct platform_device *pdev)
+{
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+}
+
+static int imx8mn_icc_remove(struct platform_device *pdev)
+{
+ return imx_icc_unregister(pdev);
+}
+
+static struct platform_driver imx8mn_icc_driver = {
+ .probe = imx8mn_icc_probe,
+ .remove = imx8mn_icc_remove,
+ .driver = {
+ .name = "imx8mn-interconnect",
+ },
+};
+
+module_platform_driver(imx8mn_icc_driver);
+MODULE_ALIAS("platform:imx8mn-interconnect");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
new file mode 100644
index 000000000000..ba43a15aefec
--- /dev/null
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX8MQ SoC
+ *
+ * Copyright (c) 2019-2020, NXP
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/imx8mq.h>
+
+#include "imx.h"
+
+static const struct imx_icc_node_adj_desc imx8mq_dram_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .phandle_name = "fsl,ddrc",
+};
+
+static const struct imx_icc_node_adj_desc imx8mq_noc_adj = {
+ .bw_mul = 1,
+ .bw_div = 4,
+ .main_noc = true,
+};
+
+/*
+ * Describe bus masters, slaves and connections between them
+ *
+ * This is a simplified subset of the bus diagram, there are several other
+ * PL301 nics which are skipped/merged into PL301_MAIN
+ */
+static struct imx_icc_node_desc nodes[] = {
+ DEFINE_BUS_INTERCONNECT("NOC", IMX8MQ_ICN_NOC, &imx8mq_noc_adj,
+ IMX8MQ_ICS_DRAM, IMX8MQ_ICN_MAIN),
+
+ DEFINE_BUS_SLAVE("DRAM", IMX8MQ_ICS_DRAM, &imx8mq_dram_adj),
+ DEFINE_BUS_SLAVE("OCRAM", IMX8MQ_ICS_OCRAM, NULL),
+ DEFINE_BUS_MASTER("A53", IMX8MQ_ICM_A53, IMX8MQ_ICN_NOC),
+
+ /* VPUMIX */
+ DEFINE_BUS_MASTER("VPU", IMX8MQ_ICM_VPU, IMX8MQ_ICN_VIDEO),
+ DEFINE_BUS_INTERCONNECT("PL301_VIDEO", IMX8MQ_ICN_VIDEO, NULL, IMX8MQ_ICN_NOC),
+
+ /* GPUMIX */
+ DEFINE_BUS_MASTER("GPU", IMX8MQ_ICM_GPU, IMX8MQ_ICN_GPU),
+ DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MQ_ICN_GPU, NULL, IMX8MQ_ICN_NOC),
+
+ /* DISPMIX (only for DCSS) */
+ DEFINE_BUS_MASTER("DC", IMX8MQ_ICM_DCSS, IMX8MQ_ICN_DCSS),
+ DEFINE_BUS_INTERCONNECT("PL301_DC", IMX8MQ_ICN_DCSS, NULL, IMX8MQ_ICN_NOC),
+
+ /* USBMIX */
+ DEFINE_BUS_MASTER("USB1", IMX8MQ_ICM_USB1, IMX8MQ_ICN_USB),
+ DEFINE_BUS_MASTER("USB2", IMX8MQ_ICM_USB2, IMX8MQ_ICN_USB),
+ DEFINE_BUS_INTERCONNECT("PL301_USB", IMX8MQ_ICN_USB, NULL, IMX8MQ_ICN_NOC),
+
+ /* PL301_DISPLAY (IPs other than DCSS, inside SUPERMIX) */
+ DEFINE_BUS_MASTER("CSI1", IMX8MQ_ICM_CSI1, IMX8MQ_ICN_DISPLAY),
+ DEFINE_BUS_MASTER("CSI2", IMX8MQ_ICM_CSI2, IMX8MQ_ICN_DISPLAY),
+ DEFINE_BUS_MASTER("LCDIF", IMX8MQ_ICM_LCDIF, IMX8MQ_ICN_DISPLAY),
+ DEFINE_BUS_INTERCONNECT("PL301_DISPLAY", IMX8MQ_ICN_DISPLAY, NULL, IMX8MQ_ICN_MAIN),
+
+ /* AUDIO */
+ DEFINE_BUS_MASTER("SDMA2", IMX8MQ_ICM_SDMA2, IMX8MQ_ICN_AUDIO),
+ DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MQ_ICN_AUDIO, NULL, IMX8MQ_ICN_DISPLAY),
+
+ /* ENET */
+ DEFINE_BUS_MASTER("ENET", IMX8MQ_ICM_ENET, IMX8MQ_ICN_ENET),
+ DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MQ_ICN_ENET, NULL, IMX8MQ_ICN_MAIN),
+
+ /* OTHER */
+ DEFINE_BUS_MASTER("SDMA1", IMX8MQ_ICM_SDMA1, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("NAND", IMX8MQ_ICM_NAND, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC1", IMX8MQ_ICM_USDHC1, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("USDHC2", IMX8MQ_ICM_USDHC2, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("PCIE1", IMX8MQ_ICM_PCIE1, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_MASTER("PCIE2", IMX8MQ_ICM_PCIE2, IMX8MQ_ICN_MAIN),
+ DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MQ_ICN_MAIN, NULL,
+ IMX8MQ_ICN_NOC, IMX8MQ_ICS_OCRAM),
+};
+
+static int imx8mq_icc_probe(struct platform_device *pdev)
+{
+ return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes));
+}
+
+static int imx8mq_icc_remove(struct platform_device *pdev)
+{
+ return imx_icc_unregister(pdev);
+}
+
+static struct platform_driver imx8mq_icc_driver = {
+ .probe = imx8mq_icc_probe,
+ .remove = imx8mq_icc_remove,
+ .driver = {
+ .name = "imx8mq-interconnect",
+ },
+};
+
+module_platform_driver(imx8mq_icc_driver);
+MODULE_ALIAS("platform:imx8mq-interconnect");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@nxp.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/internal.h b/drivers/interconnect/internal.h
index bf18cb7239df..f5f82a5c939e 100644
--- a/drivers/interconnect/internal.h
+++ b/drivers/interconnect/internal.h
@@ -14,6 +14,7 @@
* @req_node: entry in list of requests for the particular @node
* @node: the interconnect node to which this constraint applies
* @dev: reference to the device that sets the constraints
+ * @enabled: indicates whether the path with this request is enabled
* @tag: path tag (optional)
* @avg_bw: an integer describing the average bandwidth in kBps
* @peak_bw: an integer describing the peak bandwidth in kBps
@@ -22,6 +23,7 @@ struct icc_req {
struct hlist_node req_node;
struct icc_node *node;
struct device *dev;
+ bool enabled;
u32 tag;
u32 avg_bw;
u32 peak_bw;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 2ab07ce17abb..aca76383f201 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -303,6 +303,15 @@ config ROCKCHIP_IOMMU
Say Y here if you are using a Rockchip SoC that includes an IOMMU
device.
+config SUN50I_IOMMU
+ bool "Allwinner H6 IOMMU Support"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ select ARM_DMA_USE_IOMMU
+ select IOMMU_API
+ select IOMMU_DMA
+ help
+ Support for the IOMMU introduced in the Allwinner H6 SoCs.
+
config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support"
depends on ARCH_TEGRA_2x_SOC
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 9f33fdb3bb05..57cf4ba5e27c 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
+obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2883ac389abb..311ef7105c6d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -22,7 +22,6 @@
#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
#include <linux/iommu-helper.h>
-#include <linux/iommu.h>
#include <linux/delay.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
@@ -43,8 +42,7 @@
#include <asm/gart.h>
#include <asm/dma.h>
-#include "amd_iommu_proto.h"
-#include "amd_iommu_types.h"
+#include "amd_iommu.h"
#include "irq_remapping.h"
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@@ -71,6 +69,8 @@
*/
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
+#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
+
static DEFINE_SPINLOCK(pd_bitmap_lock);
/* List of all available dev_data structures */
@@ -99,7 +99,6 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
-static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
static void update_and_flush_device_table(struct protection_domain *domain,
struct domain_pgtable *pgtable);
@@ -280,12 +279,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
return dev_data;
}
-struct iommu_dev_data *get_dev_data(struct device *dev)
-{
- return dev->archdata.iommu;
-}
-EXPORT_SYMBOL(get_dev_data);
-
/*
* Find or create an IOMMU group for a acpihid device.
*/
@@ -314,16 +307,15 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
static bool pci_iommuv2_capable(struct pci_dev *pdev)
{
static const int caps[] = {
- PCI_EXT_CAP_ID_ATS,
PCI_EXT_CAP_ID_PRI,
PCI_EXT_CAP_ID_PASID,
};
int i, pos;
- if (pci_ats_disabled())
+ if (!pci_ats_supported(pdev))
return false;
- for (i = 0; i < 3; ++i) {
+ for (i = 0; i < 2; ++i) {
pos = pci_find_ext_capability(pdev, caps[i]);
if (pos == 0)
return false;
@@ -336,7 +328,7 @@ static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
{
struct iommu_dev_data *dev_data;
- dev_data = get_dev_data(&pdev->dev);
+ dev_data = dev_iommu_priv_get(&pdev->dev);
return dev_data->errata & (1 << erratum) ? true : false;
}
@@ -349,7 +341,7 @@ static bool check_device(struct device *dev)
{
int devid;
- if (!dev || !dev->dma_mask)
+ if (!dev)
return false;
devid = get_device_id(dev);
@@ -366,32 +358,18 @@ static bool check_device(struct device *dev)
return true;
}
-static void init_iommu_group(struct device *dev)
-{
- struct iommu_group *group;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return;
-
- iommu_group_put(group);
-}
-
static int iommu_init_device(struct device *dev)
{
struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
int devid;
- if (dev->archdata.iommu)
+ if (dev_iommu_priv_get(dev))
return 0;
devid = get_device_id(dev);
if (devid < 0)
return devid;
- iommu = amd_iommu_rlookup_table[devid];
-
dev_data = find_dev_data(devid);
if (!dev_data)
return -ENOMEM;
@@ -412,9 +390,7 @@ static int iommu_init_device(struct device *dev)
dev_data->iommu_v2 = iommu->is_iommu_v2;
}
- dev->archdata.iommu = dev_data;
-
- iommu_device_link(&iommu->iommu, dev);
+ dev_iommu_priv_set(dev, dev_data);
return 0;
}
@@ -433,31 +409,18 @@ static void iommu_ignore_device(struct device *dev)
setup_aliases(dev);
}
-static void iommu_uninit_device(struct device *dev)
+static void amd_iommu_uninit_device(struct device *dev)
{
struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
- int devid;
- devid = get_device_id(dev);
- if (devid < 0)
- return;
-
- iommu = amd_iommu_rlookup_table[devid];
-
- dev_data = search_dev_data(devid);
+ dev_data = dev_iommu_priv_get(dev);
if (!dev_data)
return;
if (dev_data->domain)
detach_device(dev);
- iommu_device_unlink(&iommu->iommu, dev);
-
- iommu_group_remove_device(dev);
-
- /* Remove dma-ops */
- dev->dma_ops = NULL;
+ dev_iommu_priv_set(dev, NULL);
/*
* We keep dev_data around for unplugged devices and reuse it when the
@@ -521,7 +484,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
devid & 0xff);
if (pdev)
- dev_data = get_dev_data(&pdev->dev);
+ dev_data = dev_iommu_priv_get(&pdev->dev);
if (dev_data && __ratelimit(&dev_data->rs)) {
pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
@@ -1418,20 +1381,19 @@ static struct page *free_sub_pt(unsigned long root, int mode,
return freelist;
}
-static void free_pagetable(struct protection_domain *domain)
+static void free_pagetable(struct domain_pgtable *pgtable)
{
- struct domain_pgtable pgtable;
struct page *freelist = NULL;
unsigned long root;
- amd_iommu_domain_get_pgtable(domain, &pgtable);
- atomic64_set(&domain->pt_root, 0);
+ if (pgtable->mode == PAGE_MODE_NONE)
+ return;
- BUG_ON(pgtable.mode < PAGE_MODE_NONE ||
- pgtable.mode > PAGE_MODE_6_LEVEL);
+ BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
+ pgtable->mode > PAGE_MODE_6_LEVEL);
- root = (unsigned long)pgtable.root;
- freelist = free_sub_pt(root, pgtable.mode, freelist);
+ root = (unsigned long)pgtable->root;
+ freelist = free_sub_pt(root, pgtable->mode, freelist);
free_page_list(freelist);
}
@@ -1844,70 +1806,6 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-/*
- * Free a domain, only used if something went wrong in the
- * allocation path and we need to free an already allocated page table
- */
-static void dma_ops_domain_free(struct protection_domain *domain)
-{
- if (!domain)
- return;
-
- iommu_put_dma_cookie(&domain->domain);
-
- free_pagetable(domain);
-
- if (domain->id)
- domain_id_free(domain->id);
-
- kfree(domain);
-}
-
-/*
- * Allocates a new protection domain usable for the dma_ops functions.
- * It also initializes the page table and the address allocator data
- * structures required for the dma_ops interface
- */
-static struct protection_domain *dma_ops_domain_alloc(void)
-{
- struct protection_domain *domain;
- u64 *pt_root, root;
-
- domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL);
- if (!domain)
- return NULL;
-
- if (protection_domain_init(domain))
- goto free_domain;
-
- pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pt_root)
- goto free_domain;
-
- root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL);
- atomic64_set(&domain->pt_root, root);
- domain->flags = PD_DMA_OPS_MASK;
-
- if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
- goto free_domain;
-
- return domain;
-
-free_domain:
- dma_ops_domain_free(domain);
-
- return NULL;
-}
-
-/*
- * little helper function to check whether a given protection domain is a
- * dma_ops domain
- */
-static bool dma_ops_domain(struct protection_domain *domain)
-{
- return domain->flags & PD_DMA_OPS_MASK;
-}
-
static void set_dte_entry(u16 devid, struct protection_domain *domain,
struct domain_pgtable *pgtable,
bool ats, bool ppr)
@@ -2119,14 +2017,14 @@ out_err:
static int attach_device(struct device *dev,
struct protection_domain *domain)
{
- struct pci_dev *pdev;
struct iommu_dev_data *dev_data;
+ struct pci_dev *pdev;
unsigned long flags;
int ret;
spin_lock_irqsave(&domain->lock, flags);
- dev_data = get_dev_data(dev);
+ dev_data = dev_iommu_priv_get(dev);
spin_lock(&dev_data->lock);
@@ -2139,8 +2037,10 @@ static int attach_device(struct device *dev,
pdev = to_pci_dev(dev);
if (domain->flags & PD_IOMMUV2_MASK) {
+ struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
+
ret = -EINVAL;
- if (!dev_data->passthrough)
+ if (def_domain->type != IOMMU_DOMAIN_IDENTITY)
goto out;
if (dev_data->iommu_v2) {
@@ -2188,7 +2088,7 @@ static void detach_device(struct device *dev)
struct iommu_dev_data *dev_data;
unsigned long flags;
- dev_data = get_dev_data(dev);
+ dev_data = dev_iommu_priv_get(dev);
domain = dev_data->domain;
spin_lock_irqsave(&domain->lock, flags);
@@ -2222,68 +2122,60 @@ out:
spin_unlock_irqrestore(&domain->lock, flags);
}
-static int amd_iommu_add_device(struct device *dev)
+static struct iommu_device *amd_iommu_probe_device(struct device *dev)
{
- struct iommu_dev_data *dev_data;
- struct iommu_domain *domain;
+ struct iommu_device *iommu_dev;
struct amd_iommu *iommu;
int ret, devid;
- if (!check_device(dev) || get_dev_data(dev))
- return 0;
+ if (!check_device(dev))
+ return ERR_PTR(-ENODEV);
devid = get_device_id(dev);
if (devid < 0)
- return devid;
+ return ERR_PTR(devid);
iommu = amd_iommu_rlookup_table[devid];
+ if (dev_iommu_priv_get(dev))
+ return &iommu->iommu;
+
ret = iommu_init_device(dev);
if (ret) {
if (ret != -ENOTSUPP)
dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
-
+ iommu_dev = ERR_PTR(ret);
iommu_ignore_device(dev);
- dev->dma_ops = NULL;
- goto out;
+ } else {
+ iommu_dev = &iommu->iommu;
}
- init_iommu_group(dev);
- dev_data = get_dev_data(dev);
+ iommu_completion_wait(iommu);
- BUG_ON(!dev_data);
+ return iommu_dev;
+}
- if (dev_data->iommu_v2)
- iommu_request_dm_for_dev(dev);
+static void amd_iommu_probe_finalize(struct device *dev)
+{
+ struct iommu_domain *domain;
/* Domains are initialized for this device - have a look what we ended up with */
domain = iommu_get_domain_for_dev(dev);
- if (domain->type == IOMMU_DOMAIN_IDENTITY)
- dev_data->passthrough = true;
- else if (domain->type == IOMMU_DOMAIN_DMA)
+ if (domain->type == IOMMU_DOMAIN_DMA)
iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
-
-out:
- iommu_completion_wait(iommu);
-
- return 0;
}
-static void amd_iommu_remove_device(struct device *dev)
+static void amd_iommu_release_device(struct device *dev)
{
+ int devid = get_device_id(dev);
struct amd_iommu *iommu;
- int devid;
if (!check_device(dev))
return;
- devid = get_device_id(dev);
- if (devid < 0)
- return;
-
iommu = amd_iommu_rlookup_table[devid];
- iommu_uninit_device(dev);
+ amd_iommu_uninit_device(dev);
iommu_completion_wait(iommu);
}
@@ -2418,27 +2310,46 @@ static void cleanup_domain(struct protection_domain *domain)
static void protection_domain_free(struct protection_domain *domain)
{
+ struct domain_pgtable pgtable;
+
if (!domain)
return;
if (domain->id)
domain_id_free(domain->id);
+ amd_iommu_domain_get_pgtable(domain, &pgtable);
+ atomic64_set(&domain->pt_root, 0);
+ free_pagetable(&pgtable);
+
kfree(domain);
}
-static int protection_domain_init(struct protection_domain *domain)
+static int protection_domain_init(struct protection_domain *domain, int mode)
{
+ u64 *pt_root = NULL, root;
+
+ BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
+
spin_lock_init(&domain->lock);
domain->id = domain_id_alloc();
if (!domain->id)
return -ENOMEM;
INIT_LIST_HEAD(&domain->dev_list);
+ if (mode != PAGE_MODE_NONE) {
+ pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!pt_root)
+ return -ENOMEM;
+ }
+
+ root = amd_iommu_domain_encode_pgtable(pt_root, mode);
+ atomic64_set(&domain->pt_root, root);
+
return 0;
}
-static struct protection_domain *protection_domain_alloc(void)
+static struct protection_domain *protection_domain_alloc(int mode)
{
struct protection_domain *domain;
@@ -2446,7 +2357,7 @@ static struct protection_domain *protection_domain_alloc(void)
if (!domain)
return NULL;
- if (protection_domain_init(domain))
+ if (protection_domain_init(domain, mode))
goto out_err;
return domain;
@@ -2459,54 +2370,35 @@ out_err:
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
- struct protection_domain *pdomain;
- u64 *pt_root, root;
+ struct protection_domain *domain;
+ int mode = DEFAULT_PGTABLE_LEVEL;
- switch (type) {
- case IOMMU_DOMAIN_UNMANAGED:
- pdomain = protection_domain_alloc();
- if (!pdomain)
- return NULL;
+ if (type == IOMMU_DOMAIN_IDENTITY)
+ mode = PAGE_MODE_NONE;
- pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- if (!pt_root) {
- protection_domain_free(pdomain);
- return NULL;
- }
+ domain = protection_domain_alloc(mode);
+ if (!domain)
+ return NULL;
- root = amd_iommu_domain_encode_pgtable(pt_root, PAGE_MODE_3_LEVEL);
- atomic64_set(&pdomain->pt_root, root);
+ domain->domain.geometry.aperture_start = 0;
+ domain->domain.geometry.aperture_end = ~0ULL;
+ domain->domain.geometry.force_aperture = true;
- pdomain->domain.geometry.aperture_start = 0;
- pdomain->domain.geometry.aperture_end = ~0ULL;
- pdomain->domain.geometry.force_aperture = true;
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
+ goto free_domain;
- break;
- case IOMMU_DOMAIN_DMA:
- pdomain = dma_ops_domain_alloc();
- if (!pdomain) {
- pr_err("Failed to allocate\n");
- return NULL;
- }
- break;
- case IOMMU_DOMAIN_IDENTITY:
- pdomain = protection_domain_alloc();
- if (!pdomain)
- return NULL;
+ return &domain->domain;
- atomic64_set(&pdomain->pt_root, PAGE_MODE_NONE);
- break;
- default:
- return NULL;
- }
+free_domain:
+ protection_domain_free(domain);
- return &pdomain->domain;
+ return NULL;
}
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
- struct domain_pgtable pgtable;
domain = to_pdomain(dom);
@@ -2518,29 +2410,19 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
if (!dom)
return;
- switch (dom->type) {
- case IOMMU_DOMAIN_DMA:
- /* Now release the domain */
- dma_ops_domain_free(domain);
- break;
- default:
- amd_iommu_domain_get_pgtable(domain, &pgtable);
-
- if (pgtable.mode != PAGE_MODE_NONE)
- free_pagetable(domain);
+ if (dom->type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&domain->domain);
- if (domain->flags & PD_IOMMUV2_MASK)
- free_gcr3_table(domain);
+ if (domain->flags & PD_IOMMUV2_MASK)
+ free_gcr3_table(domain);
- protection_domain_free(domain);
- break;
- }
+ protection_domain_free(domain);
}
static void amd_iommu_detach_device(struct iommu_domain *dom,
struct device *dev)
{
- struct iommu_dev_data *dev_data = dev->archdata.iommu;
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct amd_iommu *iommu;
int devid;
@@ -2578,7 +2460,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
if (!check_device(dev))
return -EINVAL;
- dev_data = dev->archdata.iommu;
+ dev_data = dev_iommu_priv_get(dev);
dev_data->defer_attach = false;
iommu = amd_iommu_rlookup_table[dev_data->devid];
@@ -2734,12 +2616,14 @@ static void amd_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, head);
}
-static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
- struct device *dev)
+bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
+ struct device *dev)
{
- struct iommu_dev_data *dev_data = dev->archdata.iommu;
+ struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+
return dev_data->defer_attach;
}
+EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
@@ -2758,6 +2642,20 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
amd_iommu_flush_iotlb_all(domain);
}
+static int amd_iommu_def_domain_type(struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+
+ dev_data = dev_iommu_priv_get(dev);
+ if (!dev_data)
+ return 0;
+
+ if (dev_data->iommu_v2)
+ return IOMMU_DOMAIN_IDENTITY;
+
+ return 0;
+}
+
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
@@ -2767,8 +2665,9 @@ const struct iommu_ops amd_iommu_ops = {
.map = amd_iommu_map,
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
- .add_device = amd_iommu_add_device,
- .remove_device = amd_iommu_remove_device,
+ .probe_device = amd_iommu_probe_device,
+ .release_device = amd_iommu_release_device,
+ .probe_finalize = amd_iommu_probe_finalize,
.device_group = amd_iommu_device_group,
.domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions,
@@ -2777,6 +2676,7 @@ const struct iommu_ops amd_iommu_ops = {
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
.iotlb_sync = amd_iommu_iotlb_sync,
+ .def_domain_type = amd_iommu_def_domain_type,
};
/*****************************************************************************
@@ -2807,7 +2707,6 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
struct protection_domain *domain = to_pdomain(dom);
struct domain_pgtable pgtable;
unsigned long flags;
- u64 pt_root;
spin_lock_irqsave(&domain->lock, flags);
@@ -2815,18 +2714,13 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
amd_iommu_domain_get_pgtable(domain, &pgtable);
/* Update data structure */
- pt_root = amd_iommu_domain_encode_pgtable(NULL, PAGE_MODE_NONE);
- atomic64_set(&domain->pt_root, pt_root);
+ atomic64_set(&domain->pt_root, 0);
/* Make changes visible to IOMMUs */
update_domain(domain);
- /* Restore old pgtable in domain->ptroot to free page-table */
- pt_root = amd_iommu_domain_encode_pgtable(pgtable.root, pgtable.mode);
- atomic64_set(&domain->pt_root, pt_root);
-
/* Page-table is not visible to IOMMU anymore, so free it */
- free_pagetable(domain);
+ free_pagetable(&pgtable);
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -3085,7 +2979,7 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
struct amd_iommu *iommu;
struct iommu_cmd cmd;
- dev_data = get_dev_data(&pdev->dev);
+ dev_data = dev_iommu_priv_get(&pdev->dev);
iommu = amd_iommu_rlookup_table[dev_data->devid];
build_complete_ppr(&cmd, dev_data->devid, pasid, status,
@@ -3098,23 +2992,27 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{
struct protection_domain *pdomain;
- struct iommu_domain *io_domain;
+ struct iommu_dev_data *dev_data;
struct device *dev = &pdev->dev;
+ struct iommu_domain *io_domain;
if (!check_device(dev))
return NULL;
- pdomain = get_dev_data(dev)->domain;
- if (pdomain == NULL && get_dev_data(dev)->defer_attach) {
- get_dev_data(dev)->defer_attach = false;
- io_domain = iommu_get_domain_for_dev(dev);
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ pdomain = dev_data->domain;
+ io_domain = iommu_get_domain_for_dev(dev);
+
+ if (pdomain == NULL && dev_data->defer_attach) {
+ dev_data->defer_attach = false;
pdomain = to_pdomain(io_domain);
attach_device(dev, pdomain);
}
+
if (pdomain == NULL)
return NULL;
- if (!dma_ops_domain(pdomain))
+ if (io_domain->type != IOMMU_DOMAIN_DMA)
return NULL;
/* Only return IOMMUv2 domains */
@@ -3132,7 +3030,7 @@ void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
if (!amd_iommu_v2_supported())
return;
- dev_data = get_dev_data(&pdev->dev);
+ dev_data = dev_iommu_priv_get(&pdev->dev);
dev_data->errata |= (1 << erratum);
}
EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
@@ -3151,11 +3049,8 @@ int amd_iommu_device_info(struct pci_dev *pdev,
memset(info, 0, sizeof(*info));
- if (!pci_ats_disabled()) {
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
- if (pos)
- info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
- }
+ if (pci_ats_supported(pdev))
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (pos)
diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
index 12d540d9b59b..f892992c8744 100644
--- a/drivers/iommu/amd_iommu.h
+++ b/drivers/iommu/amd_iommu.h
@@ -1,9 +1,103 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
#ifndef AMD_IOMMU_H
#define AMD_IOMMU_H
-int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
+#include <linux/iommu.h>
+
+#include "amd_iommu_types.h"
+
+extern int amd_iommu_get_num_iommus(void);
+extern int amd_iommu_init_dma_ops(void);
+extern int amd_iommu_init_passthrough(void);
+extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
+extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
+extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
+extern int amd_iommu_init_devices(void);
+extern void amd_iommu_uninit_devices(void);
+extern void amd_iommu_init_notifier(void);
+extern int amd_iommu_init_api(void);
+
+#ifdef CONFIG_AMD_IOMMU_DEBUGFS
+void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
+#else
+static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
+#endif
+
+/* Needed for interrupt remapping */
+extern int amd_iommu_prepare(void);
+extern int amd_iommu_enable(void);
+extern void amd_iommu_disable(void);
+extern int amd_iommu_reenable(int);
+extern int amd_iommu_enable_faulting(void);
+extern int amd_iommu_guest_ir;
+
+/* IOMMUv2 specific functions */
+struct iommu_domain;
+
+extern bool amd_iommu_v2_supported(void);
+extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
+extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
+extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
+extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
+extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+ u64 address);
+extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
+extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+ unsigned long cr3);
+extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
+extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
+
+#ifdef CONFIG_IRQ_REMAP
+extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
+#else
+static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
+{
+ return 0;
+}
+#endif
+
+#define PPR_SUCCESS 0x0
+#define PPR_INVALID 0x1
+#define PPR_FAILURE 0xf
+
+extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+ int status, int tag);
+
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+ return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+ (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
+{
+ if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+ return false;
+
+ return !!(iommu->features & f);
+}
+
+static inline u64 iommu_virt_to_phys(void *vaddr)
+{
+ return (u64)__sme_set(virt_to_phys(vaddr));
+}
+
+static inline void *iommu_phys_to_virt(unsigned long paddr)
+{
+ return phys_to_virt(__sme_clr(paddr));
+}
+
+extern bool translation_pre_enabled(struct amd_iommu *iommu);
+extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
+ struct device *dev);
+extern int __init add_special_device(u8 type, u8 id, u16 *devid,
+ bool cmd_line);
#ifdef CONFIG_DMI
void amd_iommu_apply_ivrs_quirks(void);
diff --git a/drivers/iommu/amd_iommu_debugfs.c b/drivers/iommu/amd_iommu_debugfs.c
index c6a5c737ef09..545372fcc72f 100644
--- a/drivers/iommu/amd_iommu_debugfs.c
+++ b/drivers/iommu/amd_iommu_debugfs.c
@@ -8,10 +8,9 @@
*/
#include <linux/debugfs.h>
-#include <linux/iommu.h>
#include <linux/pci.h>
-#include "amd_iommu_proto.h"
-#include "amd_iommu_types.h"
+
+#include "amd_iommu.h"
static struct dentry *amd_iommu_debugfs;
static DEFINE_MUTEX(amd_iommu_debugfs_lock);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 5b81fd16f5fa..3faff7f80fd2 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -18,7 +18,6 @@
#include <linux/msi.h>
#include <linux/amd-iommu.h>
#include <linux/export.h>
-#include <linux/iommu.h>
#include <linux/kmemleak.h>
#include <linux/mem_encrypt.h>
#include <asm/pci-direct.h>
@@ -32,9 +31,8 @@
#include <asm/irq_remapping.h>
#include <linux/crash_dump.h>
+
#include "amd_iommu.h"
-#include "amd_iommu_proto.h"
-#include "amd_iommu_types.h"
#include "irq_remapping.h"
/*
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
deleted file mode 100644
index 92c2ba6468a0..000000000000
--- a/drivers/iommu/amd_iommu_proto.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <jroedel@suse.de>
- */
-
-#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
-#define _ASM_X86_AMD_IOMMU_PROTO_H
-
-#include "amd_iommu_types.h"
-
-extern int amd_iommu_get_num_iommus(void);
-extern int amd_iommu_init_dma_ops(void);
-extern int amd_iommu_init_passthrough(void);
-extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
-extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
-extern void amd_iommu_apply_erratum_63(u16 devid);
-extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
-extern int amd_iommu_init_devices(void);
-extern void amd_iommu_uninit_devices(void);
-extern void amd_iommu_init_notifier(void);
-extern int amd_iommu_init_api(void);
-
-#ifdef CONFIG_AMD_IOMMU_DEBUGFS
-void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
-#else
-static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
-#endif
-
-/* Needed for interrupt remapping */
-extern int amd_iommu_prepare(void);
-extern int amd_iommu_enable(void);
-extern void amd_iommu_disable(void);
-extern int amd_iommu_reenable(int);
-extern int amd_iommu_enable_faulting(void);
-extern int amd_iommu_guest_ir;
-
-/* IOMMUv2 specific functions */
-struct iommu_domain;
-
-extern bool amd_iommu_v2_supported(void);
-extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
-extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
-extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
-extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
-extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
- u64 address);
-extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
-extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
- unsigned long cr3);
-extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
-extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
-
-#ifdef CONFIG_IRQ_REMAP
-extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
-#else
-static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
-{
- return 0;
-}
-#endif
-
-#define PPR_SUCCESS 0x0
-#define PPR_INVALID 0x1
-#define PPR_FAILURE 0xf
-
-extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
- int status, int tag);
-
-static inline bool is_rd890_iommu(struct pci_dev *pdev)
-{
- return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
- (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
-}
-
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
-{
- if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
- return false;
-
- return !!(iommu->features & f);
-}
-
-static inline u64 iommu_virt_to_phys(void *vaddr)
-{
- return (u64)__sme_set(virt_to_phys(vaddr));
-}
-
-static inline void *iommu_phys_to_virt(unsigned long paddr)
-{
- return phys_to_virt(__sme_clr(paddr));
-}
-
-extern bool translation_pre_enabled(struct amd_iommu *iommu);
-extern struct iommu_dev_data *get_dev_data(struct device *dev);
-#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 7a8fdec138bd..30a5d412255a 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -395,10 +395,10 @@
#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
extern bool amd_iommu_dump;
-#define DUMP_printk(format, arg...) \
- do { \
- if (amd_iommu_dump) \
- printk(KERN_INFO "AMD-Vi: " format, ## arg); \
+#define DUMP_printk(format, arg...) \
+ do { \
+ if (amd_iommu_dump) \
+ pr_info("AMD-Vi: " format, ## arg); \
} while(0);
/* global flag if IOMMUs cache non-present entries */
@@ -645,7 +645,6 @@ struct iommu_dev_data {
struct pci_dev *pdev;
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
- bool passthrough; /* Device is identity mapped */
struct {
bool enabled;
int qdep;
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d6d85debd01b..e4b025c5637c 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -13,13 +13,11 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
-#include <linux/iommu.h>
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/gfp.h>
-#include "amd_iommu_types.h"
-#include "amd_iommu_proto.h"
+#include "amd_iommu.h"
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
@@ -487,7 +485,7 @@ static void do_fault(struct work_struct *work)
flags |= FAULT_FLAG_WRITE;
flags |= FAULT_FLAG_REMOTE;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start)
/* failed to get a vma in the right range */
@@ -499,7 +497,7 @@ static void do_fault(struct work_struct *work)
ret = handle_mm_fault(vma, address, flags);
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret & VM_FAULT_ERROR)
/* failed to service fault */
@@ -517,13 +515,12 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
struct amd_iommu_fault *iommu_fault;
struct pasid_state *pasid_state;
struct device_state *dev_state;
+ struct pci_dev *pdev = NULL;
unsigned long flags;
struct fault *fault;
bool finish;
u16 tag, devid;
int ret;
- struct iommu_dev_data *dev_data;
- struct pci_dev *pdev = NULL;
iommu_fault = data;
tag = iommu_fault->tag & 0x1ff;
@@ -534,12 +531,11 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
devid & 0xff);
if (!pdev)
return -ENODEV;
- dev_data = get_dev_data(&pdev->dev);
- /* In kdump kernel pci dev is not initialized yet -> send INVALID */
ret = NOTIFY_DONE;
- if (translation_pre_enabled(amd_iommu_rlookup_table[devid])
- && dev_data->defer_attach) {
+
+ /* In kdump kernel pci dev is not initialized yet -> send INVALID */
+ if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
PPR_INVALID, tag);
goto out;
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c
index 74d97a886e93..c75b9d957b70 100644
--- a/drivers/iommu/arm-smmu-impl.c
+++ b/drivers/iommu/arm-smmu-impl.c
@@ -150,6 +150,8 @@ static const struct arm_smmu_impl arm_mmu500_impl = {
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
{
+ const struct device_node *np = smmu->dev->of_node;
+
/*
* We will inevitably have to combine model-specific implementation
* quirks with platform-specific integration quirks, but everything
@@ -166,11 +168,11 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
break;
}
- if (of_property_read_bool(smmu->dev->of_node,
- "calxeda,smmu-secure-config-access"))
+ if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
- if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm845-smmu-500"))
+ if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") ||
+ of_device_is_compatible(np, "qcom,sc7180-smmu-500"))
return qcom_smmu_impl_init(smmu);
return smmu;
diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c
index 24c071c1d8b0..cf01d0215a39 100644
--- a/drivers/iommu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm-smmu-qcom.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
+#include <linux/of_device.h>
#include <linux/qcom_scm.h>
#include "arm-smmu.h"
@@ -11,12 +12,29 @@ struct qcom_smmu {
struct arm_smmu_device smmu;
};
+static const struct of_device_id qcom_smmu_client_of_match[] = {
+ { .compatible = "qcom,adreno" },
+ { .compatible = "qcom,mdp4" },
+ { .compatible = "qcom,mdss" },
+ { .compatible = "qcom,sc7180-mdss" },
+ { .compatible = "qcom,sc7180-mss-pil" },
+ { .compatible = "qcom,sdm845-mdss" },
+ { .compatible = "qcom,sdm845-mss-pil" },
+ { }
+};
+
+static int qcom_smmu_def_domain_type(struct device *dev)
+{
+ const struct of_device_id *match =
+ of_match_device(qcom_smmu_client_of_match, dev);
+
+ return match ? IOMMU_DOMAIN_IDENTITY : 0;
+}
+
static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
{
int ret;
- arm_mmu500_reset(smmu);
-
/*
* To address performance degradation in non-real time clients,
* such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
@@ -30,8 +48,21 @@ static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
return ret;
}
+static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
+{
+ const struct device_node *np = smmu->dev->of_node;
+
+ arm_mmu500_reset(smmu);
+
+ if (of_device_is_compatible(np, "qcom,sdm845-smmu-500"))
+ return qcom_sdm845_smmu500_reset(smmu);
+
+ return 0;
+}
+
static const struct arm_smmu_impl qcom_smmu_impl = {
- .reset = qcom_sdm845_smmu500_reset,
+ .def_domain_type = qcom_smmu_def_domain_type,
+ .reset = qcom_smmu500_reset,
};
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 82508730feb7..f578677a5c41 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -171,6 +171,8 @@
#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
+#define ARM_SMMU_REG_SZ 0xe00
+
/* Common MSI config fields */
#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
#define MSI_CFG2_SH GENMASK(5, 4)
@@ -628,6 +630,7 @@ struct arm_smmu_strtab_cfg {
struct arm_smmu_device {
struct device *dev;
void __iomem *base;
+ void __iomem *page1;
#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
@@ -664,7 +667,6 @@ struct arm_smmu_device {
#define ARM_SMMU_MAX_ASIDS (1 << 16)
unsigned int asid_bits;
- DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
#define ARM_SMMU_MAX_VMIDS (1 << 16)
unsigned int vmid_bits;
@@ -724,6 +726,8 @@ struct arm_smmu_option_prop {
const char *prop;
};
+static DEFINE_XARRAY_ALLOC1(asid_xa);
+
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
{ ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
@@ -733,9 +737,8 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset,
struct arm_smmu_device *smmu)
{
- if ((offset > SZ_64K) &&
- (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY))
- offset -= SZ_64K;
+ if (offset > SZ_64K)
+ return smmu->page1 + offset - SZ_64K;
return smmu->base + offset;
}
@@ -1763,6 +1766,14 @@ static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
cdcfg->cdtab = NULL;
}
+static void arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
+{
+ if (!cd->asid)
+ return;
+
+ xa_erase(&asid_xa, cd->asid);
+}
+
/* Stream table manipulation functions */
static void
arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
@@ -2448,10 +2459,9 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
- if (cfg->cdcfg.cdtab) {
+ if (cfg->cdcfg.cdtab)
arm_smmu_free_cd_tables(smmu_domain);
- arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
- }
+ arm_smmu_free_asid(&cfg->cd);
} else {
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
if (cfg->vmid)
@@ -2466,14 +2476,15 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int ret;
- int asid;
+ u32 asid;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
- asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
- if (asid < 0)
- return asid;
+ ret = xa_alloc(&asid_xa, &asid, &cfg->cd,
+ XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
+ if (ret)
+ return ret;
cfg->s1cdmax = master->ssid_bits;
@@ -2506,7 +2517,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
out_free_cd_tables:
arm_smmu_free_cd_tables(smmu_domain);
out_free_asid:
- arm_smmu_bitmap_free(smmu->asid_map, asid);
+ arm_smmu_free_asid(&cfg->cd);
return ret;
}
@@ -2652,26 +2663,20 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
}
}
-#ifdef CONFIG_PCI_ATS
static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
{
- struct pci_dev *pdev;
+ struct device *dev = master->dev;
struct arm_smmu_device *smmu = master->smmu;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) ||
- !(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled())
+ if (!(smmu->features & ARM_SMMU_FEAT_ATS))
return false;
- pdev = to_pci_dev(master->dev);
- return !pdev->untrusted && pdev->ats_cap;
-}
-#else
-static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
-{
- return false;
+ if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS))
+ return false;
+
+ return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
}
-#endif
static void arm_smmu_enable_ats(struct arm_smmu_master *master)
{
@@ -2914,27 +2919,26 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
static struct iommu_ops arm_smmu_ops;
-static int arm_smmu_add_device(struct device *dev)
+static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{
int i, ret;
struct arm_smmu_device *smmu;
struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct iommu_group *group;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
if (!smmu)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
master->dev = dev;
master->smmu = smmu;
@@ -2975,43 +2979,24 @@ static int arm_smmu_add_device(struct device *dev)
master->ssid_bits = min_t(u8, master->ssid_bits,
CTXDESC_LINEAR_CDMAX);
- ret = iommu_device_link(&smmu->iommu, dev);
- if (ret)
- goto err_disable_pasid;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto err_unlink;
- }
+ return &smmu->iommu;
- iommu_group_put(group);
- return 0;
-
-err_unlink:
- iommu_device_unlink(&smmu->iommu, dev);
-err_disable_pasid:
- arm_smmu_disable_pasid(master);
err_free_master:
kfree(master);
dev_iommu_priv_set(dev, NULL);
- return ret;
+ return ERR_PTR(ret);
}
-static void arm_smmu_remove_device(struct device *dev)
+static void arm_smmu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_master *master;
- struct arm_smmu_device *smmu;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
master = dev_iommu_priv_get(dev);
- smmu = master->smmu;
arm_smmu_detach_dev(master);
- iommu_group_remove_device(dev);
- iommu_device_unlink(&smmu->iommu, dev);
arm_smmu_disable_pasid(master);
kfree(master);
iommu_fwspec_free(dev);
@@ -3138,8 +3123,8 @@ static struct iommu_ops arm_smmu_ops = {
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
- .add_device = arm_smmu_add_device,
- .remove_device = arm_smmu_remove_device,
+ .probe_device = arm_smmu_probe_device,
+ .release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
@@ -4021,6 +4006,18 @@ err_reset_pci_ops: __maybe_unused;
return err;
}
+static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
+ resource_size_t size)
+{
+ struct resource res = {
+ .flags = IORESOURCE_MEM,
+ .start = start,
+ .end = start + size - 1,
+ };
+
+ return devm_ioremap_resource(dev, &res);
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
@@ -4056,10 +4053,23 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
ioaddr = res->start;
- smmu->base = devm_ioremap_resource(dev, res);
+ /*
+ * Don't map the IMPLEMENTATION DEFINED regions, since they may contain
+ * the PMCG registers which are reserved by the PMU driver.
+ */
+ smmu->base = arm_smmu_ioremap(dev, ioaddr, ARM_SMMU_REG_SZ);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
+ if (arm_smmu_resource_size(smmu) > SZ_64K) {
+ smmu->page1 = arm_smmu_ioremap(dev, ioaddr + SZ_64K,
+ ARM_SMMU_REG_SZ);
+ if (IS_ERR(smmu->page1))
+ return PTR_ERR(smmu->page1);
+ } else {
+ smmu->page1 = smmu->base;
+ }
+
/* Interrupt lines */
irq = platform_get_irq_byname_optional(pdev, "combined");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index a6a5796e9c41..243bc4cb2705 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -220,7 +220,7 @@ static int arm_smmu_register_legacy_master(struct device *dev,
* With the legacy DT binding in play, we have no guarantees about
* probe order, but then we're also not doing default domains, so we can
* delay setting bus ops until we're sure every possible SMMU is ready,
- * and that way ensure that no add_device() calls get missed.
+ * and that way ensure that no probe_device() calls get missed.
*/
static int arm_smmu_legacy_bus_init(void)
{
@@ -1062,7 +1062,6 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
struct arm_smmu_device *smmu = cfg->smmu;
struct arm_smmu_smr *smrs = smmu->smrs;
- struct iommu_group *group;
int i, idx, ret;
mutex_lock(&smmu->stream_map_mutex);
@@ -1090,18 +1089,9 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
cfg->smendx[i] = (s16)idx;
}
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto out_err;
- }
- iommu_group_put(group);
-
/* It worked! Now, poke the actual hardware */
- for_each_cfg_sme(cfg, fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx)
arm_smmu_write_sme(smmu, idx);
- smmu->s2crs[idx].group = group;
- }
mutex_unlock(&smmu->stream_map_mutex);
return 0;
@@ -1172,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/*
* FIXME: The arch/arm DMA API code tries to attach devices to its own
- * domains between of_xlate() and add_device() - we have no way to cope
+ * domains between of_xlate() and probe_device() - we have no way to cope
* with that, so until ARM gets converted to rely on groups and default
* domains, just say no (but more politely than by dereferencing NULL).
* This should be at least a WARN_ON once that's sorted.
@@ -1382,7 +1372,7 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
return dev ? dev_get_drvdata(dev) : NULL;
}
-static int arm_smmu_add_device(struct device *dev)
+static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{
struct arm_smmu_device *smmu = NULL;
struct arm_smmu_master_cfg *cfg;
@@ -1403,7 +1393,7 @@ static int arm_smmu_add_device(struct device *dev)
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
} else {
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
ret = -EINVAL;
@@ -1444,21 +1434,19 @@ static int arm_smmu_add_device(struct device *dev)
if (ret)
goto out_cfg_free;
- iommu_device_link(&smmu->iommu, dev);
-
device_link_add(dev, smmu->dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
- return 0;
+ return &smmu->iommu;
out_cfg_free:
kfree(cfg);
out_free:
iommu_fwspec_free(dev);
- return ret;
+ return ERR_PTR(ret);
}
-static void arm_smmu_remove_device(struct device *dev)
+static void arm_smmu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_master_cfg *cfg;
@@ -1475,13 +1463,11 @@ static void arm_smmu_remove_device(struct device *dev)
if (ret < 0)
return;
- iommu_device_unlink(&smmu->iommu, dev);
arm_smmu_master_free_smes(cfg, fwspec);
arm_smmu_rpm_put(smmu);
dev_iommu_priv_set(dev, NULL);
- iommu_group_remove_device(dev);
kfree(cfg);
iommu_fwspec_free(dev);
}
@@ -1512,6 +1498,11 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
else
group = generic_device_group(dev);
+ /* Remember group for faster lookups */
+ if (!IS_ERR(group))
+ for_each_cfg_sme(cfg, fwspec, i, idx)
+ smmu->s2crs[idx].group = group;
+
return group;
}
@@ -1618,6 +1609,17 @@ static void arm_smmu_get_resv_regions(struct device *dev,
iommu_dma_get_resv_regions(dev, head);
}
+static int arm_smmu_def_domain_type(struct device *dev)
+{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+ const struct arm_smmu_impl *impl = cfg->smmu->impl;
+
+ if (impl && impl->def_domain_type)
+ return impl->def_domain_type(dev);
+
+ return 0;
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1628,14 +1630,15 @@ static struct iommu_ops arm_smmu_ops = {
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
- .add_device = arm_smmu_add_device,
- .remove_device = arm_smmu_remove_device,
+ .probe_device = arm_smmu_probe_device,
+ .release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
+ .def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -2253,7 +2256,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
return -ENODEV;
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
- dev_err(&pdev->dev, "removing device with active domains!\n");
+ dev_notice(&pdev->dev, "disabling translation\n");
arm_smmu_bus_init(NULL);
iommu_device_unregister(&smmu->iommu);
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h
index 8d1cd54d82a6..d172c024be61 100644
--- a/drivers/iommu/arm-smmu.h
+++ b/drivers/iommu/arm-smmu.h
@@ -386,6 +386,7 @@ struct arm_smmu_impl {
int (*init_context)(struct arm_smmu_domain *smmu_domain);
void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
int status);
+ int (*def_domain_type)(struct device *dev);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ba128d1cdaee..4959f5df21bd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -952,7 +952,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
/* Non-coherent atomic allocation? Easy */
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- dma_free_from_pool(cpu_addr, alloc_size))
+ dma_free_from_pool(dev, cpu_addr, alloc_size))
return;
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
@@ -1035,7 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
- cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+ cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
+ gfp);
else
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
if (!cpu_addr)
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index f77dae7ba7d4..60a2970c37ff 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -963,6 +963,7 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
warn_invalid_dmar(phys_addr, " returns all ones");
goto unmap;
}
+ iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
@@ -1156,12 +1157,11 @@ static inline void reclaim_free_desc(struct q_inval *qi)
}
}
-static int qi_check_fault(struct intel_iommu *iommu, int index)
+static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
{
u32 fault;
int head, tail;
struct q_inval *qi = iommu->qi;
- int wait_index = (index + 1) % QI_LENGTH;
int shift = qi_shift(iommu);
if (qi->desc_status[wait_index] == QI_ABORT)
@@ -1224,17 +1224,21 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
}
/*
- * Submit the queued invalidation descriptor to the remapping
- * hardware unit and wait for its completion.
+ * Function to submit invalidation descriptors of all types to the queued
+ * invalidation interface(QI). Multiple descriptors can be submitted at a
+ * time, a wait descriptor will be appended to each submission to ensure
+ * hardware has completed the invalidation before return. Wait descriptors
+ * can be part of the submission but it will not be polled for completion.
*/
-int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
+int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ unsigned int count, unsigned long options)
{
- int rc;
struct q_inval *qi = iommu->qi;
- int offset, shift, length;
struct qi_desc wait_desc;
int wait_index, index;
unsigned long flags;
+ int offset, shift;
+ int rc, i;
if (!qi)
return 0;
@@ -1243,32 +1247,41 @@ restart:
rc = 0;
raw_spin_lock_irqsave(&qi->q_lock, flags);
- while (qi->free_cnt < 3) {
+ /*
+ * Check if we have enough empty slots in the queue to submit,
+ * the calculation is based on:
+ * # of desc + 1 wait desc + 1 space between head and tail
+ */
+ while (qi->free_cnt < count + 2) {
raw_spin_unlock_irqrestore(&qi->q_lock, flags);
cpu_relax();
raw_spin_lock_irqsave(&qi->q_lock, flags);
}
index = qi->free_head;
- wait_index = (index + 1) % QI_LENGTH;
+ wait_index = (index + count) % QI_LENGTH;
shift = qi_shift(iommu);
- length = 1 << shift;
- qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
+ for (i = 0; i < count; i++) {
+ offset = ((index + i) % QI_LENGTH) << shift;
+ memcpy(qi->desc + offset, &desc[i], 1 << shift);
+ qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
+ }
+ qi->desc_status[wait_index] = QI_IN_USE;
- offset = index << shift;
- memcpy(qi->desc + offset, desc, length);
wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
+ if (options & QI_OPT_WAIT_DRAIN)
+ wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
wait_desc.qw2 = 0;
wait_desc.qw3 = 0;
offset = wait_index << shift;
- memcpy(qi->desc + offset, &wait_desc, length);
+ memcpy(qi->desc + offset, &wait_desc, 1 << shift);
- qi->free_head = (qi->free_head + 2) % QI_LENGTH;
- qi->free_cnt -= 2;
+ qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
+ qi->free_cnt -= count + 1;
/*
* update the HW tail register indicating the presence of
@@ -1284,7 +1297,7 @@ restart:
* a deadlock where the interrupt context can wait indefinitely
* for free slots in the queue.
*/
- rc = qi_check_fault(iommu, index);
+ rc = qi_check_fault(iommu, index, wait_index);
if (rc)
break;
@@ -1293,7 +1306,8 @@ restart:
raw_spin_lock(&qi->q_lock);
}
- qi->desc_status[index] = QI_DONE;
+ for (i = 0; i < count; i++)
+ qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
reclaim_free_desc(qi);
raw_spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -1317,7 +1331,7 @@ void qi_global_iec(struct intel_iommu *iommu)
desc.qw3 = 0;
/* should never fail */
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
@@ -1331,7 +1345,7 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@@ -1355,7 +1369,7 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
@@ -1377,7 +1391,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
/* PASID-based IOTLB invalidation */
@@ -1418,7 +1432,46 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
QI_EIOTLB_AM(mask);
}
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
+}
+
+/* PASID-based device IOTLB Invalidate */
+void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u32 pasid, u16 qdep, u64 addr,
+ unsigned int size_order, u64 granu)
+{
+ unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
+ struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+
+ desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
+ QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
+ QI_DEV_IOTLB_PFSID(pfsid);
+ desc.qw1 = QI_DEV_EIOTLB_GLOB(granu);
+
+ /*
+ * If S bit is 0, we only flush a single page. If S bit is set,
+ * The least significant zero bit indicates the invalidation address
+ * range. VT-d spec 6.5.2.6.
+ * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
+ * size order = 0 is PAGE_SIZE 4KB
+ * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
+ * ECAP.
+ */
+ desc.qw1 |= addr & ~mask;
+ if (size_order)
+ desc.qw1 |= QI_DEV_EIOTLB_SIZE;
+
+ qi_submit_sync(iommu, &desc, 1, 0);
+}
+
+void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
+ u64 granu, int pasid)
+{
+ struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+
+ desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
+ QI_PC_GRAN(granu) | QI_PC_TYPE;
+ qi_submit_sync(iommu, &desc, 1, 0);
}
/*
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 186ff5cc975c..60c8a56e4a3f 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1235,19 +1235,13 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
return phys;
}
-static int exynos_iommu_add_device(struct device *dev)
+static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct sysmmu_drvdata *data;
- struct iommu_group *group;
if (!has_sysmmu(dev))
- return -ENODEV;
-
- group = iommu_group_get_for_dev(dev);
-
- if (IS_ERR(group))
- return PTR_ERR(group);
+ return ERR_PTR(-ENODEV);
list_for_each_entry(data, &owner->controllers, owner_node) {
/*
@@ -1259,12 +1253,15 @@ static int exynos_iommu_add_device(struct device *dev)
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME);
}
- iommu_group_put(group);
- return 0;
+ /* There is always at least one entry, see exynos_iommu_of_xlate() */
+ data = list_first_entry(&owner->controllers,
+ struct sysmmu_drvdata, owner_node);
+
+ return &data->iommu;
}
-static void exynos_iommu_remove_device(struct device *dev)
+static void exynos_iommu_release_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct sysmmu_drvdata *data;
@@ -1282,7 +1279,6 @@ static void exynos_iommu_remove_device(struct device *dev)
iommu_group_put(group);
}
}
- iommu_group_remove_device(dev);
list_for_each_entry(data, &owner->controllers, owner_node)
device_link_del(data->link);
@@ -1331,8 +1327,8 @@ static const struct iommu_ops exynos_iommu_ops = {
.unmap = exynos_iommu_unmap,
.iova_to_phys = exynos_iommu_iova_to_phys,
.device_group = generic_device_group,
- .add_device = exynos_iommu_add_device,
- .remove_device = exynos_iommu_remove_device,
+ .probe_device = exynos_iommu_probe_device,
+ .release_device = exynos_iommu_release_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
.of_xlate = exynos_iommu_of_xlate,
};
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 06828e2698d5..928d37771ece 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -1016,25 +1016,13 @@ static struct iommu_group *fsl_pamu_device_group(struct device *dev)
return group;
}
-static int fsl_pamu_add_device(struct device *dev)
+static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
{
- struct iommu_group *group;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
-
- iommu_device_link(&pamu_iommu, dev);
-
- return 0;
+ return &pamu_iommu;
}
-static void fsl_pamu_remove_device(struct device *dev)
+static void fsl_pamu_release_device(struct device *dev)
{
- iommu_device_unlink(&pamu_iommu, dev);
- iommu_group_remove_device(dev);
}
static const struct iommu_ops fsl_pamu_ops = {
@@ -1048,8 +1036,8 @@ static const struct iommu_ops fsl_pamu_ops = {
.iova_to_phys = fsl_pamu_iova_to_phys,
.domain_set_attr = fsl_pamu_set_domain_attr,
.domain_get_attr = fsl_pamu_get_domain_attr,
- .add_device = fsl_pamu_add_device,
- .remove_device = fsl_pamu_remove_device,
+ .probe_device = fsl_pamu_probe_device,
+ .release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group,
};
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index a386b83e0e34..3c0c67a99c7b 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -131,7 +131,7 @@ static int hyperv_irq_remapping_activate(struct irq_domain *domain,
return 0;
}
-static struct irq_domain_ops hyperv_ir_domain_ops = {
+static const struct irq_domain_ops hyperv_ir_domain_ops = {
.alloc = hyperv_irq_remapping_alloc,
.free = hyperv_irq_remapping_free,
.activate = hyperv_irq_remapping_activate,
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 3eb1fe240fb0..cf1ebb98e418 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -372,6 +372,66 @@ static int domain_translation_struct_show(struct seq_file *m, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
+static void invalidation_queue_entry_show(struct seq_file *m,
+ struct intel_iommu *iommu)
+{
+ int index, shift = qi_shift(iommu);
+ struct qi_desc *desc;
+ int offset;
+
+ if (ecap_smts(iommu->ecap))
+ seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tqw2\t\t\tqw3\t\t\tstatus\n");
+ else
+ seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tstatus\n");
+
+ for (index = 0; index < QI_LENGTH; index++) {
+ offset = index << shift;
+ desc = iommu->qi->desc + offset;
+ if (ecap_smts(iommu->ecap))
+ seq_printf(m, "%5d\t%016llx\t%016llx\t%016llx\t%016llx\t%016x\n",
+ index, desc->qw0, desc->qw1,
+ desc->qw2, desc->qw3,
+ iommu->qi->desc_status[index]);
+ else
+ seq_printf(m, "%5d\t%016llx\t%016llx\t%016x\n",
+ index, desc->qw0, desc->qw1,
+ iommu->qi->desc_status[index]);
+ }
+}
+
+static int invalidation_queue_show(struct seq_file *m, void *unused)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+ struct q_inval *qi;
+ int shift;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ qi = iommu->qi;
+ shift = qi_shift(iommu);
+
+ if (!qi || !ecap_qis(iommu->ecap))
+ continue;
+
+ seq_printf(m, "Invalidation queue on IOMMU: %s\n", iommu->name);
+
+ raw_spin_lock_irqsave(&qi->q_lock, flags);
+ seq_printf(m, " Base: 0x%llx\tHead: %lld\tTail: %lld\n",
+ (u64)virt_to_phys(qi->desc),
+ dmar_readq(iommu->reg + DMAR_IQH_REG) >> shift,
+ dmar_readq(iommu->reg + DMAR_IQT_REG) >> shift);
+ invalidation_queue_entry_show(m, iommu);
+ raw_spin_unlock_irqrestore(&qi->q_lock, flags);
+ seq_putc(m, '\n');
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(invalidation_queue);
+
#ifdef CONFIG_IRQ_REMAP
static void ir_tbl_remap_entry_show(struct seq_file *m,
struct intel_iommu *iommu)
@@ -490,6 +550,8 @@ void __init intel_iommu_debugfs_init(void)
debugfs_create_file("domain_translation_struct", 0444,
intel_iommu_debug, NULL,
&domain_translation_struct_fops);
+ debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug,
+ NULL, &invalidation_queue_fops);
#ifdef CONFIG_IRQ_REMAP
debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
NULL, &ir_translation_struct_fops);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0182cff2c7ac..648a785e078a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -296,31 +296,6 @@ static inline void context_clear_entry(struct context_entry *context)
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
-/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
-
-/*
- * This is a DMA domain allocated through the iommu domain allocation
- * interface. But one or more devices belonging to this domain have
- * been chosen to use a private domain. We should avoid to use the
- * map/unmap/iova_to_phys APIs on it.
- */
-#define DOMAIN_FLAG_LOSE_CHILDREN BIT(1)
-
-/*
- * When VT-d works in the scalable mode, it allows DMA translation to
- * happen through either first level or second level page table. This
- * bit marks that the DMA translation for the domain goes through the
- * first level page table, otherwise, it goes through the second level.
- */
-#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(2)
-
-/*
- * Domain represents a virtual machine which demands iommu nested
- * translation mode support.
- */
-#define DOMAIN_FLAG_NESTING_MODE BIT(3)
-
#define for_each_domain_iommu(idx, domain) \
for (idx = 0; idx < g_num_of_iommus; idx++) \
if (domain->iommu_refcnt[idx])
@@ -355,11 +330,6 @@ static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
-static void domain_context_clear(struct intel_iommu *iommu,
- struct device *dev);
-static int domain_detach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu);
-static bool device_is_rmrr_locked(struct device *dev);
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -395,6 +365,21 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
+struct device_domain_info *get_domain_info(struct device *dev)
+{
+ struct device_domain_info *info;
+
+ if (!dev)
+ return NULL;
+
+ info = dev->archdata.iommu;
+ if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO ||
+ info == DEFER_DEVICE_DOMAIN_INFO))
+ return NULL;
+
+ return info;
+}
+
DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
@@ -446,12 +431,6 @@ static void init_translation_status(struct intel_iommu *iommu)
iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
}
-/* Convert generic 'struct iommu_domain to private struct dmar_domain */
-static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
-{
- return container_of(dom, struct dmar_domain, domain);
-}
-
static int __init intel_iommu_setup(char *str)
{
if (!str)
@@ -480,8 +459,7 @@ static int __init intel_iommu_setup(char *str)
pr_info("Intel-IOMMU: scalable mode supported\n");
intel_iommu_sm = 1;
} else if (!strncmp(str, "tboot_noforce", 13)) {
- printk(KERN_INFO
- "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
+ pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
intel_iommu_tboot_noforce = 1;
} else if (!strncmp(str, "nobounce", 8)) {
pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
@@ -1454,8 +1432,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
info->pri_enabled = 1;
#endif
- if (!pdev->untrusted && info->ats_supported &&
- pci_ats_page_aligned(pdev) &&
+ if (info->ats_supported && pci_ats_page_aligned(pdev) &&
!pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
domain_update_iotlb(info->domain);
@@ -1763,6 +1740,9 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (ecap_prs(iommu->ecap))
intel_svm_finish_prq(iommu);
}
+ if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
+ ioasid_unregister_allocator(&iommu->pasid_allocator);
+
#endif
}
@@ -1911,11 +1891,6 @@ static int dmar_init_reserved_ranges(void)
return 0;
}
-static void domain_reserve_special_ranges(struct dmar_domain *domain)
-{
- copy_reserved_iova(&reserved_iova_list, &domain->iovad);
-}
-
static inline int guestwidth_to_adjustwidth(int gaw)
{
int agaw;
@@ -1930,65 +1905,6 @@ static inline int guestwidth_to_adjustwidth(int gaw)
return agaw;
}
-static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
- int guest_width)
-{
- int adjust_width, agaw;
- unsigned long sagaw;
- int ret;
-
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-
- if (!intel_iommu_strict) {
- ret = init_iova_flush_queue(&domain->iovad,
- iommu_flush_iova, iova_entry_free);
- if (ret)
- pr_info("iova flush queue initialization failed\n");
- }
-
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- if (guest_width > cap_mgaw(iommu->cap))
- guest_width = cap_mgaw(iommu->cap);
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- agaw = width_to_agaw(adjust_width);
- sagaw = cap_sagaw(iommu->cap);
- if (!test_bit(agaw, &sagaw)) {
- /* hardware doesn't support it, choose a bigger one */
- pr_debug("Hardware doesn't support agaw %d\n", agaw);
- agaw = find_next_bit(&sagaw, 5, agaw);
- if (agaw >= 5)
- return -ENODEV;
- }
- domain->agaw = agaw;
-
- if (ecap_coherent(iommu->ecap))
- domain->iommu_coherency = 1;
- else
- domain->iommu_coherency = 0;
-
- if (ecap_sc_support(iommu->ecap))
- domain->iommu_snooping = 1;
- else
- domain->iommu_snooping = 0;
-
- if (intel_iommu_superpage)
- domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
- else
- domain->iommu_superpage = 0;
-
- domain->nid = iommu->node;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static void domain_exit(struct dmar_domain *domain)
{
@@ -1996,7 +1912,8 @@ static void domain_exit(struct dmar_domain *domain)
domain_remove_dev_info(domain);
/* destroy iovas */
- put_iova_domain(&domain->iovad);
+ if (domain->domain.type == IOMMU_DOMAIN_DMA)
+ put_iova_domain(&domain->iovad);
if (domain->pgd) {
struct page *freelist;
@@ -2518,11 +2435,8 @@ struct dmar_domain *find_domain(struct device *dev)
if (unlikely(attach_deferred(dev) || iommu_dummy(dev)))
return NULL;
- if (dev_is_pci(dev))
- dev = &pci_real_dma_dev(to_pci_dev(dev))->dev;
-
/* No lock here, assumes no domain exit in normal case */
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (likely(info))
return info->domain;
@@ -2545,7 +2459,7 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
struct device_domain_info *info;
list_for_each_entry(info, &device_domain_list, global)
- if (info->iommu->segment == segment && info->bus == bus &&
+ if (info->segment == segment && info->bus == bus &&
info->devfn == devfn)
return info;
@@ -2582,6 +2496,12 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
flags);
}
+static bool dev_is_real_dma_subdevice(struct device *dev)
+{
+ return dev && dev_is_pci(dev) &&
+ pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
+}
+
static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
int bus, int devfn,
struct device *dev,
@@ -2596,8 +2516,18 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (!info)
return NULL;
- info->bus = bus;
- info->devfn = devfn;
+ if (!dev_is_real_dma_subdevice(dev)) {
+ info->bus = bus;
+ info->devfn = devfn;
+ info->segment = iommu->segment;
+ } else {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ info->bus = pdev->bus->number;
+ info->devfn = pdev->devfn;
+ info->segment = pci_domain_nr(pdev->bus);
+ }
+
info->ats_supported = info->pasid_supported = info->pri_supported = 0;
info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
info->ats_qdep = 0;
@@ -2611,10 +2541,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
- if (!pdev->untrusted &&
- !pci_ats_disabled() &&
- ecap_dev_iotlb_support(iommu->ecap) &&
- pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
+ if (ecap_dev_iotlb_support(iommu->ecap) &&
+ pci_ats_supported(pdev) &&
dmar_find_matched_atsr_unit(pdev))
info->ats_supported = 1;
@@ -2637,7 +2565,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (!found) {
struct device_domain_info *info2;
- info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
+ info2 = dmar_search_domain_by_dev_info(info->segment, info->bus,
+ info->devfn);
if (info2) {
found = info2->domain;
info2->dev = dev;
@@ -2704,108 +2633,10 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
return domain;
}
-static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
-{
- *(u16 *)opaque = alias;
- return 0;
-}
-
-static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
-{
- struct device_domain_info *info;
- struct dmar_domain *domain = NULL;
- struct intel_iommu *iommu;
- u16 dma_alias;
- unsigned long flags;
- u8 bus, devfn;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return NULL;
-
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
-
- spin_lock_irqsave(&device_domain_lock, flags);
- info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
- PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff);
- if (info) {
- iommu = info->iommu;
- domain = info->domain;
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- /* DMA alias already has a domain, use it */
- if (info)
- goto out;
- }
-
- /* Allocate and initialize new domain for the device */
- domain = alloc_domain(0);
- if (!domain)
- return NULL;
- if (domain_init(domain, iommu, gaw)) {
- domain_exit(domain);
- return NULL;
- }
-
-out:
- return domain;
-}
-
-static struct dmar_domain *set_domain_for_dev(struct device *dev,
- struct dmar_domain *domain)
-{
- struct intel_iommu *iommu;
- struct dmar_domain *tmp;
- u16 req_id, dma_alias;
- u8 bus, devfn;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return NULL;
-
- req_id = ((u16)bus << 8) | devfn;
-
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
-
- /* register PCI DMA alias device */
- if (req_id != dma_alias) {
- tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff, NULL, domain);
-
- if (!tmp || tmp != domain)
- return tmp;
- }
- }
-
- tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
- if (!tmp || tmp != domain)
- return tmp;
-
- return domain;
-}
-
static int iommu_domain_identity_map(struct dmar_domain *domain,
- unsigned long long start,
- unsigned long long end)
+ unsigned long first_vpfn,
+ unsigned long last_vpfn)
{
- unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
- unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
-
- if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
- dma_to_mm_pfn(last_vpfn))) {
- pr_err("Reserving iova failed\n");
- return -ENOMEM;
- }
-
- pr_debug("Mapping reserved region %llx-%llx\n", start, end);
/*
* RMRR range might have overlap with physical memory range,
* clear it first
@@ -2817,45 +2648,6 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
DMA_PTE_READ|DMA_PTE_WRITE);
}
-static int domain_prepare_identity_map(struct device *dev,
- struct dmar_domain *domain,
- unsigned long long start,
- unsigned long long end)
-{
- /* For _hardware_ passthrough, don't bother. But for software
- passthrough, we do it anyway -- it may indicate a memory
- range which is reserved in E820, so which didn't get set
- up to start with in si_domain */
- if (domain == si_domain && hw_pass_through) {
- dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
- start, end);
- return 0;
- }
-
- dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
-
- if (end < start) {
- WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- return -EIO;
- }
-
- if (end >> agaw_to_width(domain->agaw)) {
- WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- agaw_to_width(domain->agaw),
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- return -EIO;
- }
-
- return iommu_domain_identity_map(domain, start, end);
-}
-
static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw)
@@ -2882,7 +2674,8 @@ static int __init si_domain_init(int hw)
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
ret = iommu_domain_identity_map(si_domain,
- PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
+ mm_to_dma_pfn(start_pfn),
+ mm_to_dma_pfn(end_pfn));
if (ret)
return ret;
}
@@ -2911,17 +2704,6 @@ static int __init si_domain_init(int hw)
return 0;
}
-static int identity_mapping(struct device *dev)
-{
- struct device_domain_info *info;
-
- info = dev->archdata.iommu;
- if (info)
- return (info->domain == si_domain);
-
- return 0;
-}
-
static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct dmar_domain *ndomain;
@@ -3048,31 +2830,6 @@ static int device_def_domain_type(struct device *dev)
if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
return IOMMU_DOMAIN_IDENTITY;
-
- /*
- * We want to start off with all devices in the 1:1 domain, and
- * take them out later if we find they can't access all of memory.
- *
- * However, we can't do this for PCI devices behind bridges,
- * because all PCI devices behind the same bridge will end up
- * with the same source-id on their transactions.
- *
- * Practically speaking, we can't change things around for these
- * devices at run-time, because we can't be sure there'll be no
- * DMA transactions in flight for any of their siblings.
- *
- * So PCI devices (unless they're on the root bus) as well as
- * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
- * the 1:1 domain, just in _case_ one of their siblings turns out
- * not to be able to map all of memory.
- */
- if (!pci_is_pcie(pdev)) {
- if (!pci_is_root_bus(pdev->bus))
- return IOMMU_DOMAIN_DMA;
- if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
- return IOMMU_DOMAIN_DMA;
- } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
- return IOMMU_DOMAIN_DMA;
}
return 0;
@@ -3297,6 +3054,85 @@ out_unmap:
return ret;
}
+#ifdef CONFIG_INTEL_IOMMU_SVM
+static ioasid_t intel_vcmd_ioasid_alloc(ioasid_t min, ioasid_t max, void *data)
+{
+ struct intel_iommu *iommu = data;
+ ioasid_t ioasid;
+
+ if (!iommu)
+ return INVALID_IOASID;
+ /*
+ * VT-d virtual command interface always uses the full 20 bit
+ * PASID range. Host can partition guest PASID range based on
+ * policies but it is out of guest's control.
+ */
+ if (min < PASID_MIN || max > intel_pasid_max_id)
+ return INVALID_IOASID;
+
+ if (vcmd_alloc_pasid(iommu, &ioasid))
+ return INVALID_IOASID;
+
+ return ioasid;
+}
+
+static void intel_vcmd_ioasid_free(ioasid_t ioasid, void *data)
+{
+ struct intel_iommu *iommu = data;
+
+ if (!iommu)
+ return;
+ /*
+ * Sanity check the ioasid owner is done at upper layer, e.g. VFIO
+ * We can only free the PASID when all the devices are unbound.
+ */
+ if (ioasid_find(NULL, ioasid, NULL)) {
+ pr_alert("Cannot free active IOASID %d\n", ioasid);
+ return;
+ }
+ vcmd_free_pasid(iommu, ioasid);
+}
+
+static void register_pasid_allocator(struct intel_iommu *iommu)
+{
+ /*
+ * If we are running in the host, no need for custom allocator
+ * in that PASIDs are allocated from the host system-wide.
+ */
+ if (!cap_caching_mode(iommu->cap))
+ return;
+
+ if (!sm_supported(iommu)) {
+ pr_warn("VT-d Scalable Mode not enabled, no PASID allocation\n");
+ return;
+ }
+
+ /*
+ * Register a custom PASID allocator if we are running in a guest,
+ * guest PASID must be obtained via virtual command interface.
+ * There can be multiple vIOMMUs in each guest but only one allocator
+ * is active. All vIOMMU allocators will eventually be calling the same
+ * host allocator.
+ */
+ if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
+ return;
+
+ pr_info("Register custom PASID allocator\n");
+ iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
+ iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
+ iommu->pasid_allocator.pdata = (void *)iommu;
+ if (ioasid_register_allocator(&iommu->pasid_allocator)) {
+ pr_warn("Custom PASID allocator failed, scalable mode disabled\n");
+ /*
+ * Disable scalable mode on this IOMMU if there
+ * is no custom allocator. Mixing SM capable vIOMMU
+ * and non-SM vIOMMU are not supported.
+ */
+ intel_iommu_sm = 0;
+ }
+}
+#endif
+
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
@@ -3414,6 +3250,9 @@ static int __init init_dmars(void)
*/
for_each_active_iommu(iommu, drhd) {
iommu_flush_write_buffer(iommu);
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ register_pasid_allocator(iommu);
+#endif
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
@@ -3531,100 +3370,6 @@ static unsigned long intel_alloc_iova(struct device *dev,
return iova_pfn;
}
-static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
-{
- struct dmar_domain *domain, *tmp;
- struct dmar_rmrr_unit *rmrr;
- struct device *i_dev;
- int i, ret;
-
- /* Device shouldn't be attached by any domains. */
- domain = find_domain(dev);
- if (domain)
- return NULL;
-
- domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- goto out;
-
- /* We have a new domain - setup possible RMRRs for the device */
- rcu_read_lock();
- for_each_rmrr_units(rmrr) {
- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
- i, i_dev) {
- if (i_dev != dev)
- continue;
-
- ret = domain_prepare_identity_map(dev, domain,
- rmrr->base_address,
- rmrr->end_address);
- if (ret)
- dev_err(dev, "Mapping reserved region failed\n");
- }
- }
- rcu_read_unlock();
-
- tmp = set_domain_for_dev(dev, domain);
- if (!tmp || domain != tmp) {
- domain_exit(domain);
- domain = tmp;
- }
-
-out:
- if (!domain)
- dev_err(dev, "Allocating domain failed\n");
- else
- domain->domain.type = IOMMU_DOMAIN_DMA;
-
- return domain;
-}
-
-/* Check if the dev needs to go through non-identity map and unmap process.*/
-static bool iommu_need_mapping(struct device *dev)
-{
- int ret;
-
- if (iommu_dummy(dev))
- return false;
-
- if (unlikely(attach_deferred(dev)))
- do_deferred_attach(dev);
-
- ret = identity_mapping(dev);
- if (ret) {
- u64 dma_mask = *dev->dma_mask;
-
- if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
- dma_mask = dev->coherent_dma_mask;
-
- if (dma_mask >= dma_direct_get_required_mask(dev))
- return false;
-
- /*
- * 32 bit DMA is removed from si_domain and fall back to
- * non-identity mapping.
- */
- dmar_remove_one_dev_info(dev);
- ret = iommu_request_dma_domain_for_dev(dev);
- if (ret) {
- struct iommu_domain *domain;
- struct dmar_domain *dmar_domain;
-
- domain = iommu_get_domain_for_dev(dev);
- if (domain) {
- dmar_domain = to_dmar_domain(domain);
- dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
- }
- dmar_remove_one_dev_info(dev);
- get_private_domain_for_dev(dev);
- }
-
- dev_info(dev, "32bit DMA uses non-identity mapping\n");
- }
-
- return true;
-}
-
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
size_t size, int dir, u64 dma_mask)
{
@@ -3638,6 +3383,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
+ if (unlikely(attach_deferred(dev)))
+ do_deferred_attach(dev);
+
domain = find_domain(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3689,20 +3437,15 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_need_mapping(dev))
- return __intel_map_single(dev, page_to_phys(page) + offset,
- size, dir, *dev->dma_mask);
- return dma_direct_map_page(dev, page, offset, size, dir, attrs);
+ return __intel_map_single(dev, page_to_phys(page) + offset,
+ size, dir, *dev->dma_mask);
}
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_need_mapping(dev))
- return __intel_map_single(dev, phys_addr, size, dir,
- *dev->dma_mask);
- return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
+ return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3753,17 +3496,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_need_mapping(dev))
- intel_unmap(dev, dev_addr, size);
- else
- dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
+ intel_unmap(dev, dev_addr, size);
}
static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- if (iommu_need_mapping(dev))
- intel_unmap(dev, dev_addr, size);
+ intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3773,8 +3512,8 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL;
int order;
- if (!iommu_need_mapping(dev))
- return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
+ if (unlikely(attach_deferred(dev)))
+ do_deferred_attach(dev);
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -3809,9 +3548,6 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
int order;
struct page *page = virt_to_page(vaddr);
- if (!iommu_need_mapping(dev))
- return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
-
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -3829,9 +3565,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;
int i;
- if (!iommu_need_mapping(dev))
- return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
-
for_each_sg(sglist, sg, nelems, i) {
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
}
@@ -3855,8 +3588,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (!iommu_need_mapping(dev))
- return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
+
+ if (unlikely(attach_deferred(dev)))
+ do_deferred_attach(dev);
domain = find_domain(dev);
if (!domain)
@@ -3903,8 +3637,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
static u64 intel_get_required_mask(struct device *dev)
{
- if (!iommu_need_mapping(dev))
- return dma_direct_get_required_mask(dev);
return DMA_BIT_MASK(32);
}
@@ -4813,58 +4545,37 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
unsigned long val, void *v)
{
struct memory_notify *mhp = v;
- unsigned long long start, end;
- unsigned long start_vpfn, last_vpfn;
+ unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
+ unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
+ mhp->nr_pages - 1);
switch (val) {
case MEM_GOING_ONLINE:
- start = mhp->start_pfn << PAGE_SHIFT;
- end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
- if (iommu_domain_identity_map(si_domain, start, end)) {
- pr_warn("Failed to build identity map for [%llx-%llx]\n",
- start, end);
+ if (iommu_domain_identity_map(si_domain,
+ start_vpfn, last_vpfn)) {
+ pr_warn("Failed to build identity map for [%lx-%lx]\n",
+ start_vpfn, last_vpfn);
return NOTIFY_BAD;
}
break;
case MEM_OFFLINE:
case MEM_CANCEL_ONLINE:
- start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
- last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
- while (start_vpfn <= last_vpfn) {
- struct iova *iova;
+ {
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
struct page *freelist;
- iova = find_iova(&si_domain->iovad, start_vpfn);
- if (iova == NULL) {
- pr_debug("Failed get IOVA for PFN %lx\n",
- start_vpfn);
- break;
- }
-
- iova = split_and_remove_iova(&si_domain->iovad, iova,
- start_vpfn, last_vpfn);
- if (iova == NULL) {
- pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
- start_vpfn, last_vpfn);
- return NOTIFY_BAD;
- }
-
- freelist = domain_unmap(si_domain, iova->pfn_lo,
- iova->pfn_hi);
+ freelist = domain_unmap(si_domain,
+ start_vpfn, last_vpfn);
rcu_read_lock();
for_each_active_iommu(iommu, drhd)
iommu_flush_iotlb_psi(iommu, si_domain,
- iova->pfn_lo, iova_size(iova),
+ start_vpfn, mhp->nr_pages,
!freelist, 0);
rcu_read_unlock();
dma_free_pagelist(freelist);
-
- start_vpfn = iova->pfn_hi + 1;
- free_iova_mem(iova);
}
break;
}
@@ -4892,8 +4603,9 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
for (did = 0; did < cap_ndoms(iommu->cap); did++) {
domain = get_iommu_domain(iommu, (u16)did);
- if (!domain)
+ if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA)
continue;
+
free_cpu_cached_iovas(cpu, &domain->iovad);
}
}
@@ -5186,18 +4898,6 @@ int __init intel_iommu_init(void)
}
up_write(&dmar_global_lock);
-#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
- /*
- * If the system has no untrusted device or the user has decided
- * to disable the bounce page mechanisms, we don't need swiotlb.
- * Mark this and the pre-allocated bounce pages will be released
- * later.
- */
- if (!has_untrusted_dev() || intel_no_bounce)
- swiotlb = 0;
-#endif
- dma_ops = &intel_dma_ops;
-
init_iommu_pm_ops();
down_read(&dmar_global_lock);
@@ -5283,10 +4983,11 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
if (info->dev) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, info->dev,
- PASID_RID2PASID);
+ PASID_RID2PASID, false);
iommu_disable_dev_iotlb(info);
- domain_context_clear(iommu, info->dev);
+ if (!dev_is_real_dma_subdevice(info->dev))
+ domain_context_clear(iommu, info->dev);
intel_pasid_free_table(info->dev);
}
@@ -5296,12 +4997,6 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
domain_detach_iommu(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
- /* free the private domain */
- if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
- !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
- list_empty(&domain->devices))
- domain_exit(info->domain);
-
free_devinfo_mem(info);
}
@@ -5311,9 +5006,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags);
- info = dev->archdata.iommu;
- if (info && info != DEFER_DEVICE_DOMAIN_INFO
- && info != DUMMY_DEVICE_DOMAIN_INFO)
+ info = get_domain_info(dev);
+ if (info)
__dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -5322,9 +5016,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
- domain_reserve_special_ranges(domain);
-
/* calculate AGAW */
domain->gaw = guest_width;
adjust_width = guestwidth_to_adjustwidth(guest_width);
@@ -5343,11 +5034,21 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
return 0;
}
+static void intel_init_iova_domain(struct dmar_domain *dmar_domain)
+{
+ init_iova_domain(&dmar_domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
+ copy_reserved_iova(&reserved_iova_list, &dmar_domain->iovad);
+
+ if (!intel_iommu_strict &&
+ init_iova_flush_queue(&dmar_domain->iovad,
+ iommu_flush_iova, iova_entry_free))
+ pr_info("iova flush queue initialization failed\n");
+}
+
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
- int ret;
switch (type) {
case IOMMU_DOMAIN_DMA:
@@ -5364,13 +5065,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
return NULL;
}
- if (!intel_iommu_strict && type == IOMMU_DOMAIN_DMA) {
- ret = init_iova_flush_queue(&dmar_domain->iovad,
- iommu_flush_iova,
- iova_entry_free);
- if (ret)
- pr_info("iova flush queue initialization failed\n");
- }
+ if (type == IOMMU_DOMAIN_DMA)
+ intel_init_iova_domain(dmar_domain);
domain_update_iommu_cap(dmar_domain);
@@ -5403,7 +5099,7 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
static inline bool
is_aux_domain(struct device *dev, struct iommu_domain *domain)
{
- struct device_domain_info *info = dev->archdata.iommu;
+ struct device_domain_info *info = get_domain_info(dev);
return info && info->auxd_enabled &&
domain->type == IOMMU_DOMAIN_UNMANAGED;
@@ -5412,7 +5108,7 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
static void auxiliary_link_device(struct dmar_domain *domain,
struct device *dev)
{
- struct device_domain_info *info = dev->archdata.iommu;
+ struct device_domain_info *info = get_domain_info(dev);
assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info))
@@ -5425,7 +5121,7 @@ static void auxiliary_link_device(struct dmar_domain *domain,
static void auxiliary_unlink_device(struct dmar_domain *domain,
struct device *dev)
{
- struct device_domain_info *info = dev->archdata.iommu;
+ struct device_domain_info *info = get_domain_info(dev);
assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info))
@@ -5513,13 +5209,13 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
return;
spin_lock_irqsave(&device_domain_lock, flags);
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
iommu = info->iommu;
auxiliary_unlink_device(domain, dev);
spin_lock(&iommu->lock);
- intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
+ intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
domain_detach_iommu(domain, iommu);
spin_unlock(&iommu->lock);
@@ -5626,6 +5322,176 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
aux_domain_remove_dev(to_dmar_domain(domain), dev);
}
+/*
+ * 2D array for converting and sanitizing IOMMU generic TLB granularity to
+ * VT-d granularity. Invalidation is typically included in the unmap operation
+ * as a result of DMA or VFIO unmap. However, for assigned devices guest
+ * owns the first level page tables. Invalidations of translation caches in the
+ * guest are trapped and passed down to the host.
+ *
+ * vIOMMU in the guest will only expose first level page tables, therefore
+ * we do not support IOTLB granularity for request without PASID (second level).
+ *
+ * For example, to find the VT-d granularity encoding for IOTLB
+ * type and page selective granularity within PASID:
+ * X: indexed by iommu cache type
+ * Y: indexed by enum iommu_inv_granularity
+ * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR]
+ */
+
+static const int
+inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = {
+ /*
+ * PASID based IOTLB invalidation: PASID selective (per PASID),
+ * page selective (address granularity)
+ */
+ {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID},
+ /* PASID based dev TLBs */
+ {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL},
+ /* PASID cache */
+ {-EINVAL, -EINVAL, -EINVAL}
+};
+
+static inline int to_vtd_granularity(int type, int granu)
+{
+ return inv_type_granu_table[type][granu];
+}
+
+static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
+{
+ u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT;
+
+ /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
+ * IOMMU cache invalidate API passes granu_size in bytes, and number of
+ * granu size in contiguous memory.
+ */
+ return order_base_2(nr_pages);
+}
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
+static int
+intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+ int cache_type;
+ u8 bus, devfn;
+ u16 did, sid;
+ int ret = 0;
+ u64 size = 0;
+
+ if (!inv_info || !dmar_domain ||
+ inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
+ return -EINVAL;
+
+ if (!dev || !dev_is_pci(dev))
+ return -ENODEV;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return -ENODEV;
+
+ if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
+ return -EINVAL;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ spin_lock(&iommu->lock);
+ info = get_domain_info(dev);
+ if (!info) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ did = dmar_domain->iommu_did[iommu->seq_id];
+ sid = PCI_DEVID(bus, devfn);
+
+ /* Size is only valid in address selective invalidation */
+ if (inv_info->granularity != IOMMU_INV_GRANU_PASID)
+ size = to_vtd_size(inv_info->addr_info.granule_size,
+ inv_info->addr_info.nb_granules);
+
+ for_each_set_bit(cache_type,
+ (unsigned long *)&inv_info->cache,
+ IOMMU_CACHE_INV_TYPE_NR) {
+ int granu = 0;
+ u64 pasid = 0;
+
+ granu = to_vtd_granularity(cache_type, inv_info->granularity);
+ if (granu == -EINVAL) {
+ pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
+ cache_type, inv_info->granularity);
+ break;
+ }
+
+ /*
+ * PASID is stored in different locations based on the
+ * granularity.
+ */
+ if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
+ (inv_info->pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
+ pasid = inv_info->pasid_info.pasid;
+ else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
+ (inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
+ pasid = inv_info->addr_info.pasid;
+
+ switch (BIT(cache_type)) {
+ case IOMMU_CACHE_INV_TYPE_IOTLB:
+ if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
+ size &&
+ (inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
+ pr_err_ratelimited("Address out of range, 0x%llx, size order %llu\n",
+ inv_info->addr_info.addr, size);
+ ret = -ERANGE;
+ goto out_unlock;
+ }
+
+ /*
+ * If granu is PASID-selective, address is ignored.
+ * We use npages = -1 to indicate that.
+ */
+ qi_flush_piotlb(iommu, did, pasid,
+ mm_to_dma_pfn(inv_info->addr_info.addr),
+ (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
+ inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
+
+ /*
+ * Always flush device IOTLB if ATS is enabled. vIOMMU
+ * in the guest may assume IOTLB flush is inclusive,
+ * which is more efficient.
+ */
+ if (info->ats_enabled)
+ qi_flush_dev_iotlb_pasid(iommu, sid,
+ info->pfsid, pasid,
+ info->ats_qdep,
+ inv_info->addr_info.addr,
+ size, granu);
+ break;
+ case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
+ if (info->ats_enabled)
+ qi_flush_dev_iotlb_pasid(iommu, sid,
+ info->pfsid, pasid,
+ info->ats_qdep,
+ inv_info->addr_info.addr,
+ size, granu);
+ else
+ pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
+ break;
+ default:
+ dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
+ cache_type);
+ ret = -EINVAL;
+ }
+ }
+out_unlock:
+ spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return ret;
+}
+#endif
+
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot, gfp_t gfp)
@@ -5781,78 +5647,22 @@ static bool intel_iommu_capable(enum iommu_cap cap)
return false;
}
-static int intel_iommu_add_device(struct device *dev)
+static struct iommu_device *intel_iommu_probe_device(struct device *dev)
{
- struct dmar_domain *dmar_domain;
- struct iommu_domain *domain;
struct intel_iommu *iommu;
- struct iommu_group *group;
u8 bus, devfn;
- int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
- return -ENODEV;
-
- iommu_device_link(&iommu->iommu, dev);
+ return ERR_PTR(-ENODEV);
if (translation_pre_enabled(iommu))
dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
- group = iommu_group_get_for_dev(dev);
-
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto unlink;
- }
-
- iommu_group_put(group);
-
- domain = iommu_get_domain_for_dev(dev);
- dmar_domain = to_dmar_domain(domain);
- if (domain->type == IOMMU_DOMAIN_DMA) {
- if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
- ret = iommu_request_dm_for_dev(dev);
- if (ret) {
- dmar_remove_one_dev_info(dev);
- dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
- domain_add_dev_info(si_domain, dev);
- dev_info(dev,
- "Device uses a private identity domain.\n");
- }
- }
- } else {
- if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
- ret = iommu_request_dma_domain_for_dev(dev);
- if (ret) {
- dmar_remove_one_dev_info(dev);
- dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
- if (!get_private_domain_for_dev(dev)) {
- dev_warn(dev,
- "Failed to get a private domain.\n");
- ret = -ENOMEM;
- goto unlink;
- }
-
- dev_info(dev,
- "Device uses a private dma domain.\n");
- }
- }
- }
-
- if (device_needs_bounce(dev)) {
- dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
- set_dma_ops(dev, &bounce_dma_ops);
- }
-
- return 0;
-
-unlink:
- iommu_device_unlink(&iommu->iommu, dev);
- return ret;
+ return &iommu->iommu;
}
-static void intel_iommu_remove_device(struct device *dev)
+static void intel_iommu_release_device(struct device *dev)
{
struct intel_iommu *iommu;
u8 bus, devfn;
@@ -5863,11 +5673,19 @@ static void intel_iommu_remove_device(struct device *dev)
dmar_remove_one_dev_info(dev);
- iommu_group_remove_device(dev);
+ set_dma_ops(dev, NULL);
+}
- iommu_device_unlink(&iommu->iommu, dev);
+static void intel_iommu_probe_finalize(struct device *dev)
+{
+ struct iommu_domain *domain;
+ domain = iommu_get_domain_for_dev(dev);
if (device_needs_bounce(dev))
+ set_dma_ops(dev, &bounce_dma_ops);
+ else if (domain && domain->type == IOMMU_DOMAIN_DMA)
+ set_dma_ops(dev, &intel_dma_ops);
+ else
set_dma_ops(dev, NULL);
}
@@ -5945,7 +5763,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
spin_lock(&iommu->lock);
ret = -EINVAL;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!info || !info->pasid_supported)
goto out;
@@ -6041,7 +5859,7 @@ static int intel_iommu_enable_auxd(struct device *dev)
return -ENODEV;
spin_lock_irqsave(&device_domain_lock, flags);
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
info->auxd_enabled = 1;
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -6054,7 +5872,7 @@ static int intel_iommu_disable_auxd(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags);
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!WARN_ON(!info))
info->auxd_enabled = 0;
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -6107,6 +5925,14 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
return !!siov_find_pci_dvsec(to_pci_dev(dev));
}
+ if (feat == IOMMU_DEV_FEAT_SVA) {
+ struct device_domain_info *info = get_domain_info(dev);
+
+ return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
+ info->pasid_supported && info->pri_supported &&
+ info->ats_supported;
+ }
+
return false;
}
@@ -6116,6 +5942,16 @@ intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
if (feat == IOMMU_DEV_FEAT_AUX)
return intel_iommu_enable_auxd(dev);
+ if (feat == IOMMU_DEV_FEAT_SVA) {
+ struct device_domain_info *info = get_domain_info(dev);
+
+ if (!info)
+ return -EINVAL;
+
+ if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
+ return 0;
+ }
+
return -ENODEV;
}
@@ -6131,7 +5967,7 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
static bool
intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
{
- struct device_domain_info *info = dev->archdata.iommu;
+ struct device_domain_info *info = get_domain_info(dev);
if (feat == IOMMU_DEV_FEAT_AUX)
return scalable_mode_support() && info && info->auxd_enabled;
@@ -6198,8 +6034,9 @@ const struct iommu_ops intel_iommu_ops = {
.map = intel_iommu_map,
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
- .add_device = intel_iommu_add_device,
- .remove_device = intel_iommu_remove_device,
+ .probe_device = intel_iommu_probe_device,
+ .probe_finalize = intel_iommu_probe_finalize,
+ .release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.apply_resv_region = intel_iommu_apply_resv_region,
@@ -6209,7 +6046,16 @@ const struct iommu_ops intel_iommu_ops = {
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
+ .def_domain_type = device_def_domain_type,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
+#ifdef CONFIG_INTEL_IOMMU_SVM
+ .cache_invalidate = intel_iommu_sva_invalidate,
+ .sva_bind_gpasid = intel_svm_bind_gpasid,
+ .sva_unbind_gpasid = intel_svm_unbind_gpasid,
+ .sva_bind = intel_svm_bind,
+ .sva_unbind = intel_svm_unbind,
+ .sva_get_pasid = intel_svm_get_pasid,
+#endif
};
static void quirk_iommu_igfx(struct pci_dev *dev)
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index 22b30f10b396..c81f0f17c6ba 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -27,6 +27,63 @@
static DEFINE_SPINLOCK(pasid_lock);
u32 intel_pasid_max_id = PASID_MAX;
+int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid)
+{
+ unsigned long flags;
+ u8 status_code;
+ int ret = 0;
+ u64 res;
+
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
+ IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
+ !(res & VCMD_VRSP_IP), res);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+
+ status_code = VCMD_VRSP_SC(res);
+ switch (status_code) {
+ case VCMD_VRSP_SC_SUCCESS:
+ *pasid = VCMD_VRSP_RESULT_PASID(res);
+ break;
+ case VCMD_VRSP_SC_NO_PASID_AVAIL:
+ pr_info("IOMMU: %s: No PASID available\n", iommu->name);
+ ret = -ENOSPC;
+ break;
+ default:
+ ret = -ENODEV;
+ pr_warn("IOMMU: %s: Unexpected error code %d\n",
+ iommu->name, status_code);
+ }
+
+ return ret;
+}
+
+void vcmd_free_pasid(struct intel_iommu *iommu, unsigned int pasid)
+{
+ unsigned long flags;
+ u8 status_code;
+ u64 res;
+
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ dmar_writeq(iommu->reg + DMAR_VCMD_REG,
+ VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
+ IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
+ !(res & VCMD_VRSP_IP), res);
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+
+ status_code = VCMD_VRSP_SC(res);
+ switch (status_code) {
+ case VCMD_VRSP_SC_SUCCESS:
+ break;
+ case VCMD_VRSP_SC_INVALID_PASID:
+ pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
+ break;
+ default:
+ pr_warn("IOMMU: %s: Unexpected error code %d\n",
+ iommu->name, status_code);
+ }
+}
+
/*
* Per device pasid table management:
*/
@@ -94,7 +151,7 @@ int intel_pasid_alloc_table(struct device *dev)
int size;
might_sleep();
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
return -EINVAL;
@@ -141,7 +198,7 @@ void intel_pasid_free_table(struct device *dev)
struct pasid_entry *table;
int i, max_pde;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!info || !dev_is_pci(dev) || !info->pasid_table)
return;
@@ -167,7 +224,7 @@ struct pasid_table *intel_pasid_get_table(struct device *dev)
{
struct device_domain_info *info;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!info)
return NULL;
@@ -178,7 +235,7 @@ int intel_pasid_get_dev_max_id(struct device *dev)
{
struct device_domain_info *info;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!info || !info->pasid_table)
return 0;
@@ -199,7 +256,7 @@ struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid)
return NULL;
dir = pasid_table->table;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
dir_index = pasid >> PASID_PDE_SHIFT;
index = pasid & PASID_PTE_MASK;
@@ -235,7 +292,20 @@ static inline void pasid_clear_entry(struct pasid_entry *pe)
WRITE_ONCE(pe->val[7], 0);
}
-static void intel_pasid_clear_entry(struct device *dev, int pasid)
+static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
+{
+ WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
+ WRITE_ONCE(pe->val[1], 0);
+ WRITE_ONCE(pe->val[2], 0);
+ WRITE_ONCE(pe->val[3], 0);
+ WRITE_ONCE(pe->val[4], 0);
+ WRITE_ONCE(pe->val[5], 0);
+ WRITE_ONCE(pe->val[6], 0);
+ WRITE_ONCE(pe->val[7], 0);
+}
+
+static void
+intel_pasid_clear_entry(struct device *dev, int pasid, bool fault_ignore)
{
struct pasid_entry *pe;
@@ -243,7 +313,10 @@ static void intel_pasid_clear_entry(struct device *dev, int pasid)
if (WARN_ON(!pe))
return;
- pasid_clear_entry(pe);
+ if (fault_ignore && pasid_pte_is_present(pe))
+ pasid_clear_entry_with_fpd(pe);
+ else
+ pasid_clear_entry(pe);
}
static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
@@ -359,18 +432,29 @@ pasid_set_flpm(struct pasid_entry *pe, u64 value)
pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
}
+/*
+ * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_eafe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
+}
+
static void
pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
u16 did, int pasid)
{
struct qi_desc desc;
- desc.qw0 = QI_PC_DID(did) | QI_PC_PASID_SEL | QI_PC_PASID(pasid);
+ desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) |
+ QI_PC_PASID(pasid) | QI_PC_TYPE;
desc.qw1 = 0;
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
static void
@@ -384,7 +468,7 @@ iotlb_invalidation_with_pasid(struct intel_iommu *iommu, u16 did, u32 pasid)
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, iommu);
+ qi_submit_sync(iommu, &desc, 1, 0);
}
static void
@@ -394,7 +478,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
struct device_domain_info *info;
u16 sid, qdep, pfsid;
- info = dev->archdata.iommu;
+ info = get_domain_info(dev);
if (!info || !info->ats_enabled)
return;
@@ -405,8 +489,8 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
}
-void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
- struct device *dev, int pasid)
+void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
+ int pasid, bool fault_ignore)
{
struct pasid_entry *pte;
u16 did;
@@ -416,7 +500,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
return;
did = pasid_get_domain_id(pte);
- intel_pasid_clear_entry(dev, pasid);
+ intel_pasid_clear_entry(dev, pasid, fault_ignore);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
@@ -492,7 +576,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
/* Setup Present and PASID Granular Transfer Type: */
- pasid_set_translation_type(pte, 1);
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did);
@@ -500,6 +584,25 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
}
/*
+ * Skip top levels of page tables for iommu which has less agaw
+ * than default. Unnecessary for PT mode.
+ */
+static inline int iommu_skip_agaw(struct dmar_domain *domain,
+ struct intel_iommu *iommu,
+ struct dma_pte **pgd)
+{
+ int agaw;
+
+ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+ *pgd = phys_to_virt(dma_pte_addr(*pgd));
+ if (!dma_pte_present(*pgd))
+ return -EINVAL;
+ }
+
+ return agaw;
+}
+
+/*
* Set up the scalable mode pasid entry for second only translation type.
*/
int intel_pasid_setup_second_level(struct intel_iommu *iommu,
@@ -522,17 +625,11 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
return -EINVAL;
}
- /*
- * Skip top levels of page tables for iommu which has less agaw
- * than default. Unnecessary for PT mode.
- */
pgd = domain->pgd;
- for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd)) {
- dev_err(dev, "Invalid domain page table\n");
- return -EINVAL;
- }
+ agaw = iommu_skip_agaw(domain, iommu, &pgd);
+ if (agaw < 0) {
+ dev_err(dev, "Invalid domain page table\n");
+ return -EINVAL;
}
pgd_val = virt_to_phys(pgd);
@@ -548,7 +645,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
pasid_set_domain_id(pte, did);
pasid_set_slptr(pte, pgd_val);
pasid_set_address_width(pte, agaw);
- pasid_set_translation_type(pte, 2);
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
pasid_set_fault_enable(pte);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
@@ -582,7 +679,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, iommu->agaw);
- pasid_set_translation_type(pte, 4);
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
pasid_set_fault_enable(pte);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
@@ -596,3 +693,161 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
return 0;
}
+
+static int
+intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
+ struct iommu_gpasid_bind_data_vtd *pasid_data)
+{
+ /*
+ * Not all guest PASID table entry fields are passed down during bind,
+ * here we only set up the ones that are dependent on guest settings.
+ * Execution related bits such as NXE, SMEP are not supported.
+ * Other fields, such as snoop related, are set based on host needs
+ * regardless of guest settings.
+ */
+ if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) {
+ if (!ecap_srs(iommu->ecap)) {
+ pr_err_ratelimited("No supervisor request support on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+ pasid_set_sre(pte);
+ }
+
+ if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
+ if (!ecap_eafs(iommu->ecap)) {
+ pr_err_ratelimited("No extended access flag support on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+ pasid_set_eafe(pte);
+ }
+
+ /*
+ * Memory type is only applicable to devices inside processor coherent
+ * domain. Will add MTS support once coherent devices are available.
+ */
+ if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) {
+ pr_warn_ratelimited("No memory type support %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_pasid_setup_nested() - Set up PASID entry for nested translation.
+ * This could be used for guest shared virtual address. In this case, the
+ * first level page tables are used for GVA-GPA translation in the guest,
+ * second level page tables are used for GPA-HPA translation.
+ *
+ * @iommu: IOMMU which the device belong to
+ * @dev: Device to be set up for translation
+ * @gpgd: FLPTPTR: First Level Page translation pointer in GPA
+ * @pasid: PASID to be programmed in the device PASID table
+ * @pasid_data: Additional PASID info from the guest bind request
+ * @domain: Domain info for setting up second level page tables
+ * @addr_width: Address width of the first level (guest)
+ */
+int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
+ pgd_t *gpgd, int pasid,
+ struct iommu_gpasid_bind_data_vtd *pasid_data,
+ struct dmar_domain *domain, int addr_width)
+{
+ struct pasid_entry *pte;
+ struct dma_pte *pgd;
+ int ret = 0;
+ u64 pgd_val;
+ int agaw;
+ u16 did;
+
+ if (!ecap_nest(iommu->ecap)) {
+ pr_err_ratelimited("IOMMU: %s: No nested translation support\n",
+ iommu->name);
+ return -EINVAL;
+ }
+
+ if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) {
+ pr_err_ratelimited("Domain is not in nesting mode, %x\n",
+ domain->flags);
+ return -EINVAL;
+ }
+
+ pte = intel_pasid_get_entry(dev, pasid);
+ if (WARN_ON(!pte))
+ return -EINVAL;
+
+ /*
+ * Caller must ensure PASID entry is not in use, i.e. not bind the
+ * same PASID to the same device twice.
+ */
+ if (pasid_pte_is_present(pte))
+ return -EBUSY;
+
+ pasid_clear_entry(pte);
+
+ /* Sanity checking performed by caller to make sure address
+ * width matching in two dimensions:
+ * 1. CPU vs. IOMMU
+ * 2. Guest vs. Host.
+ */
+ switch (addr_width) {
+#ifdef CONFIG_X86
+ case ADDR_WIDTH_5LEVEL:
+ if (!cpu_feature_enabled(X86_FEATURE_LA57) ||
+ !cap_5lp_support(iommu->cap)) {
+ dev_err_ratelimited(dev,
+ "5-level paging not supported\n");
+ return -EINVAL;
+ }
+
+ pasid_set_flpm(pte, 1);
+ break;
+#endif
+ case ADDR_WIDTH_4LEVEL:
+ pasid_set_flpm(pte, 0);
+ break;
+ default:
+ dev_err_ratelimited(dev, "Invalid guest address width %d\n",
+ addr_width);
+ return -EINVAL;
+ }
+
+ /* First level PGD is in GPA, must be supported by the second level */
+ if ((uintptr_t)gpgd > domain->max_addr) {
+ dev_err_ratelimited(dev,
+ "Guest PGD %lx not supported, max %llx\n",
+ (uintptr_t)gpgd, domain->max_addr);
+ return -EINVAL;
+ }
+ pasid_set_flptr(pte, (uintptr_t)gpgd);
+
+ ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data);
+ if (ret)
+ return ret;
+
+ /* Setup the second level based on the given domain */
+ pgd = domain->pgd;
+
+ agaw = iommu_skip_agaw(domain, iommu, &pgd);
+ if (agaw < 0) {
+ dev_err_ratelimited(dev, "Invalid domain page table\n");
+ return -EINVAL;
+ }
+ pgd_val = virt_to_phys(pgd);
+ pasid_set_slptr(pte, pgd_val);
+ pasid_set_fault_enable(pte);
+
+ did = domain->iommu_did[iommu->seq_id];
+ pasid_set_domain_id(pte, did);
+
+ pasid_set_address_width(pte, agaw);
+ pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
+ pasid_set_present(pte);
+ pasid_flush_caches(iommu, pte, pasid, did);
+
+ return ret;
+}
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
index 92de6df24ccb..c5318d40e0fa 100644
--- a/drivers/iommu/intel-pasid.h
+++ b/drivers/iommu/intel-pasid.h
@@ -15,6 +15,7 @@
#define PASID_MAX 0x100000
#define PASID_PTE_MASK 0x3F
#define PASID_PTE_PRESENT 1
+#define PASID_PTE_FPD 2
#define PDE_PFN_MASK PAGE_MASK
#define PASID_PDE_SHIFT 6
#define MAX_NR_PASID_BITS 20
@@ -23,6 +24,16 @@
#define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1)
#define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7))
+/* Virtual command interface for enlightened pasid management. */
+#define VCMD_CMD_ALLOC 0x1
+#define VCMD_CMD_FREE 0x2
+#define VCMD_VRSP_IP 0x1
+#define VCMD_VRSP_SC(e) (((e) >> 1) & 0x3)
+#define VCMD_VRSP_SC_SUCCESS 0
+#define VCMD_VRSP_SC_NO_PASID_AVAIL 1
+#define VCMD_VRSP_SC_INVALID_PASID 1
+#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 8) & 0xfffff)
+#define VCMD_CMD_OPERAND(e) ((e) << 8)
/*
* Domain ID reserved for pasid entries programmed for first-level
* only and pass-through transfer modes.
@@ -36,6 +47,7 @@
* to vmalloc or even module mappings.
*/
#define PASID_FLAG_SUPERVISOR_MODE BIT(0)
+#define PASID_FLAG_NESTED BIT(1)
/*
* The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
@@ -51,6 +63,11 @@ struct pasid_entry {
u64 val[8];
};
+#define PASID_ENTRY_PGTT_FL_ONLY (1)
+#define PASID_ENTRY_PGTT_SL_ONLY (2)
+#define PASID_ENTRY_PGTT_NESTED (3)
+#define PASID_ENTRY_PGTT_PT (4)
+
/* The representative of a PASID table */
struct pasid_table {
void *table; /* pasid table pointer */
@@ -99,7 +116,13 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, int pasid);
+int intel_pasid_setup_nested(struct intel_iommu *iommu,
+ struct device *dev, pgd_t *pgd, int pasid,
+ struct iommu_gpasid_bind_data_vtd *pasid_data,
+ struct dmar_domain *domain, int addr_width);
void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
- struct device *dev, int pasid);
-
+ struct device *dev, int pasid,
+ bool fault_ignore);
+int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid);
+void vcmd_free_pasid(struct intel_iommu *iommu, unsigned int pasid);
#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 2998418f0a38..6c87c807a0ab 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -23,6 +23,7 @@
#include "intel-pasid.h"
static irqreturn_t prq_event_thread(int irq, void *d);
+static void intel_svm_drain_prq(struct device *dev, int pasid);
#define PRQ_ORDER 0
@@ -66,6 +67,8 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
+ init_completion(&iommu->prq_complete);
+
return 0;
}
@@ -138,7 +141,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, svm->iommu);
+ qi_submit_sync(svm->iommu, &desc, 1, 0);
if (sdev->dev_iotlb) {
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@@ -162,7 +165,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(&desc, svm->iommu);
+ qi_submit_sync(svm->iommu, &desc, 1, 0);
}
}
@@ -206,10 +209,9 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* *has* to handle gracefully without affecting other processes.
*/
rcu_read_lock();
- list_for_each_entry_rcu(sdev, &svm->devs, list) {
- intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
- }
+ list_for_each_entry_rcu(sdev, &svm->devs, list)
+ intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
+ svm->pasid, true);
rcu_read_unlock();
}
@@ -226,13 +228,212 @@ static LIST_HEAD(global_svm_list);
list_for_each_entry((sdev), &(svm)->devs, list) \
if ((d) != (sdev)->dev) {} else
-int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
+int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
+ struct iommu_gpasid_bind_data *data)
+{
+ struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct dmar_domain *dmar_domain;
+ struct intel_svm_dev *sdev;
+ struct intel_svm *svm;
+ int ret = 0;
+
+ if (WARN_ON(!iommu) || !data)
+ return -EINVAL;
+
+ if (data->version != IOMMU_GPASID_BIND_VERSION_1 ||
+ data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
+ return -EINVAL;
+
+ if (!dev_is_pci(dev))
+ return -ENOTSUPP;
+
+ /* VT-d supports devices with full 20 bit PASIDs only */
+ if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX)
+ return -EINVAL;
+
+ /*
+ * We only check host PASID range, we have no knowledge to check
+ * guest PASID range.
+ */
+ if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
+ return -EINVAL;
+
+ dmar_domain = to_dmar_domain(domain);
+
+ mutex_lock(&pasid_mutex);
+ svm = ioasid_find(NULL, data->hpasid, NULL);
+ if (IS_ERR(svm)) {
+ ret = PTR_ERR(svm);
+ goto out;
+ }
+
+ if (svm) {
+ /*
+ * If we found svm for the PASID, there must be at
+ * least one device bond, otherwise svm should be freed.
+ */
+ if (WARN_ON(list_empty(&svm->devs))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for_each_svm_dev(sdev, svm, dev) {
+ /*
+ * For devices with aux domains, we should allow
+ * multiple bind calls with the same PASID and pdev.
+ */
+ if (iommu_dev_feature_enabled(dev,
+ IOMMU_DEV_FEAT_AUX)) {
+ sdev->users++;
+ } else {
+ dev_warn_ratelimited(dev,
+ "Already bound with PASID %u\n",
+ svm->pasid);
+ ret = -EBUSY;
+ }
+ goto out;
+ }
+ } else {
+ /* We come here when PASID has never been bond to a device. */
+ svm = kzalloc(sizeof(*svm), GFP_KERNEL);
+ if (!svm) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /* REVISIT: upper layer/VFIO can track host process that bind
+ * the PASID. ioasid_set = mm might be sufficient for vfio to
+ * check pasid VMM ownership. We can drop the following line
+ * once VFIO and IOASID set check is in place.
+ */
+ svm->mm = get_task_mm(current);
+ svm->pasid = data->hpasid;
+ if (data->flags & IOMMU_SVA_GPASID_VAL) {
+ svm->gpasid = data->gpasid;
+ svm->flags |= SVM_FLAG_GUEST_PASID;
+ }
+ ioasid_set_data(data->hpasid, svm);
+ INIT_LIST_HEAD_RCU(&svm->devs);
+ mmput(svm->mm);
+ }
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ sdev->dev = dev;
+
+ /* Only count users if device has aux domains */
+ if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
+ sdev->users = 1;
+
+ /* Set up device context entry for PASID if not enabled already */
+ ret = intel_iommu_enable_pasid(iommu, sdev->dev);
+ if (ret) {
+ dev_err_ratelimited(dev, "Failed to enable PASID capability\n");
+ kfree(sdev);
+ goto out;
+ }
+
+ /*
+ * PASID table is per device for better security. Therefore, for
+ * each bind of a new device even with an existing PASID, we need to
+ * call the nested mode setup function here.
+ */
+ spin_lock(&iommu->lock);
+ ret = intel_pasid_setup_nested(iommu, dev,
+ (pgd_t *)(uintptr_t)data->gpgd,
+ data->hpasid, &data->vtd, dmar_domain,
+ data->addr_width);
+ spin_unlock(&iommu->lock);
+ if (ret) {
+ dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
+ data->hpasid, ret);
+ /*
+ * PASID entry should be in cleared state if nested mode
+ * set up failed. So we only need to clear IOASID tracking
+ * data such that free call will succeed.
+ */
+ kfree(sdev);
+ goto out;
+ }
+
+ svm->flags |= SVM_FLAG_GUEST_MODE;
+
+ init_rcu_head(&sdev->rcu);
+ list_add_rcu(&sdev->list, &svm->devs);
+ out:
+ if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
+ ioasid_set_data(data->hpasid, NULL);
+ kfree(svm);
+ }
+
+ mutex_unlock(&pasid_mutex);
+ return ret;
+}
+
+int intel_svm_unbind_gpasid(struct device *dev, int pasid)
+{
+ struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+ struct intel_svm_dev *sdev;
+ struct intel_svm *svm;
+ int ret = -EINVAL;
+
+ if (WARN_ON(!iommu))
+ return -EINVAL;
+
+ mutex_lock(&pasid_mutex);
+ svm = ioasid_find(NULL, pasid, NULL);
+ if (!svm) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (IS_ERR(svm)) {
+ ret = PTR_ERR(svm);
+ goto out;
+ }
+
+ for_each_svm_dev(sdev, svm, dev) {
+ ret = 0;
+ if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
+ sdev->users--;
+ if (!sdev->users) {
+ list_del_rcu(&sdev->list);
+ intel_pasid_tear_down_entry(iommu, dev,
+ svm->pasid, false);
+ intel_svm_drain_prq(dev, svm->pasid);
+ kfree_rcu(sdev, rcu);
+
+ if (list_empty(&svm->devs)) {
+ /*
+ * We do not free the IOASID here in that
+ * IOMMU driver did not allocate it.
+ * Unlike native SVM, IOASID for guest use was
+ * allocated prior to the bind call.
+ * In any case, if the free call comes before
+ * the unbind, IOMMU driver will get notified
+ * and perform cleanup.
+ */
+ ioasid_set_data(pasid, NULL);
+ kfree(svm);
+ }
+ }
+ break;
+ }
+out:
+ mutex_unlock(&pasid_mutex);
+ return ret;
+}
+
+/* Caller must hold pasid_mutex, mm reference */
+static int
+intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops,
+ struct mm_struct *mm, struct intel_svm_dev **sd)
{
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
- struct mm_struct *mm = NULL;
int pasid_max;
int ret;
@@ -249,16 +450,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
} else
pasid_max = 1 << 20;
+ /* Bind supervisor PASID shuld have mm = NULL */
if (flags & SVM_FLAG_SUPERVISOR_MODE) {
- if (!ecap_srs(iommu->ecap))
+ if (!ecap_srs(iommu->ecap) || mm) {
+ pr_err("Supervisor PASID with user provided mm.\n");
return -EINVAL;
- } else if (pasid) {
- mm = get_task_mm(current);
- BUG_ON(!mm);
+ }
}
- mutex_lock(&pasid_mutex);
- if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
+ if (!(flags & SVM_FLAG_PRIVATE_PASID)) {
struct intel_svm *t;
list_for_each_entry(t, &global_svm_list, list) {
@@ -296,19 +496,12 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
sdev->dev = dev;
ret = intel_iommu_enable_pasid(iommu, dev);
- if (ret || !pasid) {
- /* If they don't actually want to assign a PASID, this is
- * just an enabling check/preparation. */
- kfree(sdev);
- goto out;
- }
-
- info = dev->archdata.iommu;
- if (!info || !info->pasid_supported) {
+ if (ret) {
kfree(sdev);
goto out;
}
+ info = get_domain_info(dev);
sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
if (info->ats_enabled) {
@@ -397,26 +590,24 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
}
}
list_add_rcu(&sdev->list, &svm->devs);
-
- success:
- *pasid = svm->pasid;
+success:
+ sdev->pasid = svm->pasid;
+ sdev->sva.dev = dev;
+ if (sd)
+ *sd = sdev;
ret = 0;
out:
- mutex_unlock(&pasid_mutex);
- if (mm)
- mmput(mm);
return ret;
}
-EXPORT_SYMBOL_GPL(intel_svm_bind_mm);
-int intel_svm_unbind_mm(struct device *dev, int pasid)
+/* Caller must hold pasid_mutex */
+static int intel_svm_unbind_mm(struct device *dev, int pasid)
{
struct intel_svm_dev *sdev;
struct intel_iommu *iommu;
struct intel_svm *svm;
int ret = -EINVAL;
- mutex_lock(&pasid_mutex);
iommu = intel_svm_device_to_iommu(dev);
if (!iommu)
goto out;
@@ -442,8 +633,9 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
* to use. We have a *shared* PASID table, because it's
* large and has to be physically contiguous. So it's
* hard to be as defensive as we might like. */
- intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
+ intel_pasid_tear_down_entry(iommu, dev,
+ svm->pasid, false);
+ intel_svm_drain_prq(dev, svm->pasid);
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
@@ -462,45 +654,9 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
break;
}
out:
- mutex_unlock(&pasid_mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(intel_svm_unbind_mm);
-
-int intel_svm_is_pasid_valid(struct device *dev, int pasid)
-{
- struct intel_iommu *iommu;
- struct intel_svm *svm;
- int ret = -EINVAL;
-
- mutex_lock(&pasid_mutex);
- iommu = intel_svm_device_to_iommu(dev);
- if (!iommu)
- goto out;
-
- svm = ioasid_find(NULL, pasid, NULL);
- if (!svm)
- goto out;
-
- if (IS_ERR(svm)) {
- ret = PTR_ERR(svm);
- goto out;
- }
- /* init_mm is used in this case */
- if (!svm->mm)
- ret = 1;
- else if (atomic_read(&svm->mm->mm_users) > 0)
- ret = 1;
- else
- ret = 0;
-
- out:
- mutex_unlock(&pasid_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid);
/* Page request queue descriptor */
struct page_req_dsc {
@@ -557,6 +713,93 @@ static bool is_canonical_address(u64 addr)
return (((saddr << shift) >> shift) == saddr);
}
+/**
+ * intel_svm_drain_prq - Drain page requests and responses for a pasid
+ * @dev: target device
+ * @pasid: pasid for draining
+ *
+ * Drain all pending page requests and responses related to @pasid in both
+ * software and hardware. This is supposed to be called after the device
+ * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
+ * and DevTLB have been invalidated.
+ *
+ * It waits until all pending page requests for @pasid in the page fault
+ * queue are completed by the prq handling thread. Then follow the steps
+ * described in VT-d spec CH7.10 to drain all page requests and page
+ * responses pending in the hardware.
+ */
+static void intel_svm_drain_prq(struct device *dev, int pasid)
+{
+ struct device_domain_info *info;
+ struct dmar_domain *domain;
+ struct intel_iommu *iommu;
+ struct qi_desc desc[3];
+ struct pci_dev *pdev;
+ int head, tail;
+ u16 sid, did;
+ int qdep;
+
+ info = get_domain_info(dev);
+ if (WARN_ON(!info || !dev_is_pci(dev)))
+ return;
+
+ if (!info->pri_enabled)
+ return;
+
+ iommu = info->iommu;
+ domain = info->domain;
+ pdev = to_pci_dev(dev);
+ sid = PCI_DEVID(info->bus, info->devfn);
+ did = domain->iommu_did[iommu->seq_id];
+ qdep = pci_ats_queue_depth(pdev);
+
+ /*
+ * Check and wait until all pending page requests in the queue are
+ * handled by the prq handling thread.
+ */
+prq_retry:
+ reinit_completion(&iommu->prq_complete);
+ tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
+ head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
+ while (head != tail) {
+ struct page_req_dsc *req;
+
+ req = &iommu->prq[head / sizeof(*req)];
+ if (!req->pasid_present || req->pasid != pasid) {
+ head = (head + sizeof(*req)) & PRQ_RING_MASK;
+ continue;
+ }
+
+ wait_for_completion(&iommu->prq_complete);
+ goto prq_retry;
+ }
+
+ /*
+ * Perform steps described in VT-d spec CH7.10 to drain page
+ * requests and responses in hardware.
+ */
+ memset(desc, 0, sizeof(desc));
+ desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
+ QI_IWD_FENCE |
+ QI_IWD_TYPE;
+ desc[1].qw0 = QI_EIOTLB_PASID(pasid) |
+ QI_EIOTLB_DID(did) |
+ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+ QI_EIOTLB_TYPE;
+ desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) |
+ QI_DEV_EIOTLB_SID(sid) |
+ QI_DEV_EIOTLB_QDEP(qdep) |
+ QI_DEIOTLB_TYPE |
+ QI_DEV_IOTLB_PFSID(info->pfsid);
+qi_retry:
+ reinit_completion(&iommu->prq_complete);
+ qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
+ if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
+ wait_for_completion(&iommu->prq_complete);
+ goto qi_retry;
+ }
+}
+
static irqreturn_t prq_event_thread(int irq, void *d)
{
struct intel_iommu *iommu = d;
@@ -620,7 +863,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!mmget_not_zero(svm->mm))
goto bad_req;
- down_read(&svm->mm->mmap_sem);
+ mmap_read_lock(svm->mm);
vma = find_extend_vma(svm->mm, address);
if (!vma || address < vma->vm_start)
goto invalid;
@@ -635,7 +878,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
result = QI_RESP_SUCCESS;
invalid:
- up_read(&svm->mm->mmap_sem);
+ mmap_read_unlock(svm->mm);
mmput(svm->mm);
bad_req:
/* Accounting for major/minor faults? */
@@ -685,12 +928,75 @@ static irqreturn_t prq_event_thread(int irq, void *d)
sizeof(req->priv_data));
resp.qw2 = 0;
resp.qw3 = 0;
- qi_submit_sync(&resp, iommu);
+ qi_submit_sync(iommu, &resp, 1, 0);
}
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
+ /*
+ * Clear the page request overflow bit and wake up all threads that
+ * are waiting for the completion of this handling.
+ */
+ if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO)
+ writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
+
+ if (!completion_done(&iommu->prq_complete))
+ complete(&iommu->prq_complete);
+
return IRQ_RETVAL(handled);
}
+
+#define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
+struct iommu_sva *
+intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
+{
+ struct iommu_sva *sva = ERR_PTR(-EINVAL);
+ struct intel_svm_dev *sdev = NULL;
+ int flags = 0;
+ int ret;
+
+ /*
+ * TODO: Consolidate with generic iommu-sva bind after it is merged.
+ * It will require shared SVM data structures, i.e. combine io_mm
+ * and intel_svm etc.
+ */
+ if (drvdata)
+ flags = *(int *)drvdata;
+ mutex_lock(&pasid_mutex);
+ ret = intel_svm_bind_mm(dev, flags, NULL, mm, &sdev);
+ if (ret)
+ sva = ERR_PTR(ret);
+ else if (sdev)
+ sva = &sdev->sva;
+ else
+ WARN(!sdev, "SVM bind succeeded with no sdev!\n");
+
+ mutex_unlock(&pasid_mutex);
+
+ return sva;
+}
+
+void intel_svm_unbind(struct iommu_sva *sva)
+{
+ struct intel_svm_dev *sdev;
+
+ mutex_lock(&pasid_mutex);
+ sdev = to_intel_svm_dev(sva);
+ intel_svm_unbind_mm(sdev->dev, sdev->pasid);
+ mutex_unlock(&pasid_mutex);
+}
+
+int intel_svm_get_pasid(struct iommu_sva *sva)
+{
+ struct intel_svm_dev *sdev;
+ int pasid;
+
+ mutex_lock(&pasid_mutex);
+ sdev = to_intel_svm_dev(sva);
+ pasid = sdev->pasid;
+ mutex_unlock(&pasid_mutex);
+
+ return pasid;
+}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 81e43c1df7ec..a042f123b091 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -151,7 +151,7 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
desc.qw2 = 0;
desc.qw3 = 0;
- return qi_submit_sync(&desc, iommu);
+ return qi_submit_sync(iommu, &desc, 1, 0);
}
static int modify_irte(struct irq_2_iommu *irq_iommu,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 03d6a26687bc..d43120eb1dc5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -44,6 +44,7 @@ struct iommu_group {
int id;
struct iommu_domain *default_domain;
struct iommu_domain *domain;
+ struct list_head entry;
};
struct group_device {
@@ -79,6 +80,20 @@ static bool iommu_cmd_line_dma_api(void)
return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
}
+static int iommu_alloc_default_domain(struct iommu_group *group,
+ struct device *dev);
+static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
+ unsigned type);
+static int __iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev);
+static int __iommu_attach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+static void __iommu_detach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+static int iommu_create_device_direct_mappings(struct iommu_group *group,
+ struct device *dev);
+static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
+
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
@@ -175,57 +190,118 @@ static void dev_iommu_free(struct device *dev)
dev->iommu = NULL;
}
-int iommu_probe_device(struct device *dev)
+static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct iommu_device *iommu_dev;
+ struct iommu_group *group;
int ret;
- WARN_ON(dev->iommu_group);
if (!ops)
- return -EINVAL;
+ return -ENODEV;
if (!dev_iommu_get(dev))
return -ENOMEM;
if (!try_module_get(ops->owner)) {
ret = -EINVAL;
- goto err_free_dev_param;
+ goto err_free;
}
- ret = ops->add_device(dev);
- if (ret)
- goto err_module_put;
+ iommu_dev = ops->probe_device(dev);
+ if (IS_ERR(iommu_dev)) {
+ ret = PTR_ERR(iommu_dev);
+ goto out_module_put;
+ }
+
+ dev->iommu->iommu_dev = iommu_dev;
+
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ goto out_release;
+ }
+ iommu_group_put(group);
+
+ if (group_list && !group->default_domain && list_empty(&group->entry))
+ list_add_tail(&group->entry, group_list);
+
+ iommu_device_link(iommu_dev, dev);
return 0;
-err_module_put:
+out_release:
+ ops->release_device(dev);
+
+out_module_put:
module_put(ops->owner);
-err_free_dev_param:
+
+err_free:
dev_iommu_free(dev);
+
return ret;
}
-void iommu_release_device(struct device *dev)
+int iommu_probe_device(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct iommu_group *group;
+ int ret;
- if (dev->iommu_group)
- ops->remove_device(dev);
+ ret = __iommu_probe_device(dev, NULL);
+ if (ret)
+ goto err_out;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ goto err_release;
+
+ /*
+ * Try to allocate a default domain - needs support from the
+ * IOMMU driver. There are still some drivers which don't
+ * support default domains, so the return value is not yet
+ * checked.
+ */
+ iommu_alloc_default_domain(group, dev);
+
+ if (group->default_domain)
+ ret = __iommu_attach_device(group->default_domain, dev);
+
+ iommu_create_device_direct_mappings(group, dev);
+
+ iommu_group_put(group);
+
+ if (ret)
+ goto err_release;
+
+ if (ops->probe_finalize)
+ ops->probe_finalize(dev);
+
+ return 0;
+
+err_release:
+ iommu_release_device(dev);
+
+err_out:
+ return ret;
- if (dev->iommu) {
- module_put(ops->owner);
- dev_iommu_free(dev);
- }
}
-static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
- unsigned type);
-static int __iommu_attach_device(struct iommu_domain *domain,
- struct device *dev);
-static int __iommu_attach_group(struct iommu_domain *domain,
- struct iommu_group *group);
-static void __iommu_detach_group(struct iommu_domain *domain,
- struct iommu_group *group);
+void iommu_release_device(struct device *dev)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+ if (!dev->iommu)
+ return;
+
+ iommu_device_unlink(dev->iommu->iommu_dev, dev);
+ iommu_group_remove_device(dev);
+
+ ops->release_device(dev);
+
+ module_put(ops->owner);
+ dev_iommu_free(dev);
+}
static int __init iommu_set_def_domain_type(char *str)
{
@@ -497,6 +573,7 @@ struct iommu_group *iommu_group_alloc(void)
group->kobj.kset = iommu_group_kset;
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->devices);
+ INIT_LIST_HEAD(&group->entry);
BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
@@ -638,8 +715,8 @@ int iommu_group_set_name(struct iommu_group *group, const char *name)
}
EXPORT_SYMBOL_GPL(iommu_group_set_name);
-static int iommu_group_create_direct_mappings(struct iommu_group *group,
- struct device *dev)
+static int iommu_create_device_direct_mappings(struct iommu_group *group,
+ struct device *dev)
{
struct iommu_domain *domain = group->default_domain;
struct iommu_resv_region *entry;
@@ -752,8 +829,6 @@ rename:
dev->iommu_group = group;
- iommu_group_create_direct_mappings(group, dev);
-
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
@@ -1371,6 +1446,61 @@ struct iommu_group *fsl_mc_device_group(struct device *dev)
}
EXPORT_SYMBOL_GPL(fsl_mc_device_group);
+static int iommu_get_def_domain_type(struct device *dev)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ unsigned int type = 0;
+
+ if (ops->def_domain_type)
+ type = ops->def_domain_type(dev);
+
+ return (type == 0) ? iommu_def_domain_type : type;
+}
+
+static int iommu_group_alloc_default_domain(struct bus_type *bus,
+ struct iommu_group *group,
+ unsigned int type)
+{
+ struct iommu_domain *dom;
+
+ dom = __iommu_domain_alloc(bus, type);
+ if (!dom && type != IOMMU_DOMAIN_DMA) {
+ dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
+ if (dom)
+ pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
+ type, group->name);
+ }
+
+ if (!dom)
+ return -ENOMEM;
+
+ group->default_domain = dom;
+ if (!group->domain)
+ group->domain = dom;
+
+ if (!iommu_dma_strict) {
+ int attr = 1;
+ iommu_domain_set_attr(dom,
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
+ &attr);
+ }
+
+ return 0;
+}
+
+static int iommu_alloc_default_domain(struct iommu_group *group,
+ struct device *dev)
+{
+ unsigned int type;
+
+ if (group->default_domain)
+ return 0;
+
+ type = iommu_get_def_domain_type(dev);
+
+ return iommu_group_alloc_default_domain(dev->bus, group, type);
+}
+
/**
* iommu_group_get_for_dev - Find or create the IOMMU group for a device
* @dev: target device
@@ -1381,7 +1511,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group);
* to the returned IOMMU group, which will already include the provided
* device. The reference should be released with iommu_group_put().
*/
-struct iommu_group *iommu_group_get_for_dev(struct device *dev)
+static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_group *group;
@@ -1401,59 +1531,37 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (IS_ERR(group))
return group;
- /*
- * Try to allocate a default domain - needs support from the
- * IOMMU driver.
- */
- if (!group->default_domain) {
- struct iommu_domain *dom;
-
- dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
- if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
- dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
- if (dom) {
- dev_warn(dev,
- "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
- iommu_def_domain_type);
- }
- }
-
- group->default_domain = dom;
- if (!group->domain)
- group->domain = dom;
-
- if (dom && !iommu_dma_strict) {
- int attr = 1;
- iommu_domain_set_attr(dom,
- DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
- &attr);
- }
- }
-
ret = iommu_group_add_device(group, dev);
- if (ret) {
- iommu_group_put(group);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto out_put_group;
return group;
+
+out_put_group:
+ iommu_group_put(group);
+
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(iommu_group_get_for_dev);
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{
return group->default_domain;
}
-static int add_iommu_group(struct device *dev, void *data)
+static int probe_iommu_group(struct device *dev, void *data)
{
- int ret = iommu_probe_device(dev);
+ struct list_head *group_list = data;
+ struct iommu_group *group;
+ int ret;
- /*
- * We ignore -ENODEV errors for now, as they just mean that the
- * device is not translated by an IOMMU. We still care about
- * other errors and fail to initialize when they happen.
- */
+ /* Device is probed already if in a group */
+ group = iommu_group_get(dev);
+ if (group) {
+ iommu_group_put(group);
+ return 0;
+ }
+
+ ret = __iommu_probe_device(dev, group_list);
if (ret == -ENODEV)
ret = 0;
@@ -1519,10 +1627,152 @@ static int iommu_bus_notifier(struct notifier_block *nb,
return 0;
}
+struct __group_domain_type {
+ struct device *dev;
+ unsigned int type;
+};
+
+static int probe_get_default_domain_type(struct device *dev, void *data)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct __group_domain_type *gtype = data;
+ unsigned int type = 0;
+
+ if (ops->def_domain_type)
+ type = ops->def_domain_type(dev);
+
+ if (type) {
+ if (gtype->type && gtype->type != type) {
+ dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
+ iommu_domain_type_str(type),
+ dev_name(gtype->dev),
+ iommu_domain_type_str(gtype->type));
+ gtype->type = 0;
+ }
+
+ if (!gtype->dev) {
+ gtype->dev = dev;
+ gtype->type = type;
+ }
+ }
+
+ return 0;
+}
+
+static void probe_alloc_default_domain(struct bus_type *bus,
+ struct iommu_group *group)
+{
+ struct __group_domain_type gtype;
+
+ memset(&gtype, 0, sizeof(gtype));
+
+ /* Ask for default domain requirements of all devices in the group */
+ __iommu_group_for_each_dev(group, &gtype,
+ probe_get_default_domain_type);
+
+ if (!gtype.type)
+ gtype.type = iommu_def_domain_type;
+
+ iommu_group_alloc_default_domain(bus, group, gtype.type);
+
+}
+
+static int iommu_group_do_dma_attach(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+ int ret = 0;
+
+ if (!iommu_is_attach_deferred(domain, dev))
+ ret = __iommu_attach_device(domain, dev);
+
+ return ret;
+}
+
+static int __iommu_group_dma_attach(struct iommu_group *group)
+{
+ return __iommu_group_for_each_dev(group, group->default_domain,
+ iommu_group_do_dma_attach);
+}
+
+static int iommu_group_do_probe_finalize(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+
+ if (domain->ops->probe_finalize)
+ domain->ops->probe_finalize(dev);
+
+ return 0;
+}
+
+static void __iommu_group_dma_finalize(struct iommu_group *group)
+{
+ __iommu_group_for_each_dev(group, group->default_domain,
+ iommu_group_do_probe_finalize);
+}
+
+static int iommu_do_create_direct_mappings(struct device *dev, void *data)
+{
+ struct iommu_group *group = data;
+
+ iommu_create_device_direct_mappings(group, dev);
+
+ return 0;
+}
+
+static int iommu_group_create_direct_mappings(struct iommu_group *group)
+{
+ return __iommu_group_for_each_dev(group, group,
+ iommu_do_create_direct_mappings);
+}
+
+int bus_iommu_probe(struct bus_type *bus)
+{
+ struct iommu_group *group, *next;
+ LIST_HEAD(group_list);
+ int ret;
+
+ /*
+ * This code-path does not allocate the default domain when
+ * creating the iommu group, so do it after the groups are
+ * created.
+ */
+ ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
+ if (ret)
+ return ret;
+
+ list_for_each_entry_safe(group, next, &group_list, entry) {
+ /* Remove item from the list */
+ list_del_init(&group->entry);
+
+ mutex_lock(&group->mutex);
+
+ /* Try to allocate default domain */
+ probe_alloc_default_domain(bus, group);
+
+ if (!group->default_domain) {
+ mutex_unlock(&group->mutex);
+ continue;
+ }
+
+ iommu_group_create_direct_mappings(group);
+
+ ret = __iommu_group_dma_attach(group);
+
+ mutex_unlock(&group->mutex);
+
+ if (ret)
+ break;
+
+ __iommu_group_dma_finalize(group);
+ }
+
+ return ret;
+}
+
static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
{
- int err;
struct notifier_block *nb;
+ int err;
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
if (!nb)
@@ -1534,7 +1784,7 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
if (err)
goto out_free;
- err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
+ err = bus_iommu_probe(bus);
if (err)
goto out_err;
@@ -2301,71 +2551,6 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
}
EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
-static int
-request_default_domain_for_dev(struct device *dev, unsigned long type)
-{
- struct iommu_domain *domain;
- struct iommu_group *group;
- int ret;
-
- /* Device must already be in a group before calling this function */
- group = iommu_group_get(dev);
- if (!group)
- return -EINVAL;
-
- mutex_lock(&group->mutex);
-
- ret = 0;
- if (group->default_domain && group->default_domain->type == type)
- goto out;
-
- /* Don't change mappings of existing devices */
- ret = -EBUSY;
- if (iommu_group_device_count(group) != 1)
- goto out;
-
- ret = -ENOMEM;
- domain = __iommu_domain_alloc(dev->bus, type);
- if (!domain)
- goto out;
-
- /* Attach the device to the domain */
- ret = __iommu_attach_group(domain, group);
- if (ret) {
- iommu_domain_free(domain);
- goto out;
- }
-
- /* Make the domain the default for this group */
- if (group->default_domain)
- iommu_domain_free(group->default_domain);
- group->default_domain = domain;
-
- iommu_group_create_direct_mappings(group, dev);
-
- dev_info(dev, "Using iommu %s mapping\n",
- type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
-
- ret = 0;
-out:
- mutex_unlock(&group->mutex);
- iommu_group_put(group);
-
- return ret;
-}
-
-/* Request that a device is direct mapped by the IOMMU */
-int iommu_request_dm_for_dev(struct device *dev)
-{
- return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
-}
-
-/* Request that a device can't be direct mapped by the IOMMU */
-int iommu_request_dma_domain_for_dev(struct device *dev)
-{
- return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
-}
-
void iommu_set_default_passthrough(bool cmd_line)
{
if (cmd_line)
@@ -2643,17 +2828,6 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
-int iommu_sva_set_ops(struct iommu_sva *handle,
- const struct iommu_sva_ops *sva_ops)
-{
- if (handle->ops && handle->ops != sva_ops)
- return -EEXIST;
-
- handle->ops = sva_ops;
- return 0;
-}
-EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
-
int iommu_sva_get_pasid(struct iommu_sva *handle)
{
const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 0e6a9536eca6..49fc01f2a28d 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -253,7 +253,7 @@ int iova_cache_get(void)
SLAB_HWCACHE_ALIGN, NULL);
if (!iova_cache) {
mutex_unlock(&iova_cache_mutex);
- printk(KERN_ERR "Couldn't create iova cache\n");
+ pr_err("Couldn't create iova cache\n");
return -ENOMEM;
}
}
@@ -718,8 +718,8 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
if (!new_iova)
- printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
- iova->pfn_lo, iova->pfn_lo);
+ pr_err("Reserve iova range %lx@%lx failed\n",
+ iova->pfn_lo, iova->pfn_lo);
}
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 310cf09feea3..4c2972f3153b 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -805,24 +805,8 @@ static int ipmmu_of_xlate(struct device *dev,
static int ipmmu_init_arm_mapping(struct device *dev)
{
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
- struct iommu_group *group;
int ret;
- /* Create a device group and add the device to it. */
- group = iommu_group_alloc();
- if (IS_ERR(group)) {
- dev_err(dev, "Failed to allocate IOMMU group\n");
- return PTR_ERR(group);
- }
-
- ret = iommu_group_add_device(group, dev);
- iommu_group_put(group);
-
- if (ret < 0) {
- dev_err(dev, "Failed to add device to IPMMU group\n");
- return ret;
- }
-
/*
* Create the ARM mapping, used by the ARM DMA mapping core to allocate
* VAs. This will allocate a corresponding IOMMU domain.
@@ -856,48 +840,39 @@ static int ipmmu_init_arm_mapping(struct device *dev)
return 0;
error:
- iommu_group_remove_device(dev);
if (mmu->mapping)
arm_iommu_release_mapping(mmu->mapping);
return ret;
}
-static int ipmmu_add_device(struct device *dev)
+static struct iommu_device *ipmmu_probe_device(struct device *dev)
{
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
- struct iommu_group *group;
- int ret;
/*
* Only let through devices that have been verified in xlate()
*/
if (!mmu)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
- if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) {
- ret = ipmmu_init_arm_mapping(dev);
- if (ret)
- return ret;
- } else {
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ return &mmu->iommu;
+}
- iommu_group_put(group);
- }
+static void ipmmu_probe_finalize(struct device *dev)
+{
+ int ret = 0;
- iommu_device_link(&mmu->iommu, dev);
- return 0;
+ if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
+ ret = ipmmu_init_arm_mapping(dev);
+
+ if (ret)
+ dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
}
-static void ipmmu_remove_device(struct device *dev)
+static void ipmmu_release_device(struct device *dev)
{
- struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
-
- iommu_device_unlink(&mmu->iommu, dev);
arm_iommu_detach_device(dev);
- iommu_group_remove_device(dev);
}
static struct iommu_group *ipmmu_find_group(struct device *dev)
@@ -925,9 +900,11 @@ static const struct iommu_ops ipmmu_ops = {
.flush_iotlb_all = ipmmu_flush_iotlb_all,
.iotlb_sync = ipmmu_iotlb_sync,
.iova_to_phys = ipmmu_iova_to_phys,
- .add_device = ipmmu_add_device,
- .remove_device = ipmmu_remove_device,
- .device_group = ipmmu_find_group,
+ .probe_device = ipmmu_probe_device,
+ .release_device = ipmmu_release_device,
+ .probe_finalize = ipmmu_probe_finalize,
+ .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
+ ? generic_device_group : ipmmu_find_group,
.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
.of_xlate = ipmmu_of_xlate,
};
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 94a6df1bddd6..3d8a63555c25 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -34,7 +34,7 @@ __asm__ __volatile__ ( \
/* bitmap of the page sizes currently supported */
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
-DEFINE_SPINLOCK(msm_iommu_lock);
+static DEFINE_SPINLOCK(msm_iommu_lock);
static LIST_HEAD(qcom_iommu_devices);
static struct iommu_ops msm_iommu_ops;
@@ -388,43 +388,23 @@ static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
return ret;
}
-static int msm_iommu_add_device(struct device *dev)
+static struct iommu_device *msm_iommu_probe_device(struct device *dev)
{
struct msm_iommu_dev *iommu;
- struct iommu_group *group;
unsigned long flags;
spin_lock_irqsave(&msm_iommu_lock, flags);
iommu = find_iommu_for_dev(dev);
spin_unlock_irqrestore(&msm_iommu_lock, flags);
- if (iommu)
- iommu_device_link(&iommu->iommu, dev);
- else
- return -ENODEV;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
+ if (!iommu)
+ return ERR_PTR(-ENODEV);
- return 0;
+ return &iommu->iommu;
}
-static void msm_iommu_remove_device(struct device *dev)
+static void msm_iommu_release_device(struct device *dev)
{
- struct msm_iommu_dev *iommu;
- unsigned long flags;
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
- iommu = find_iommu_for_dev(dev);
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
-
- if (iommu)
- iommu_device_unlink(&iommu->iommu, dev);
-
- iommu_group_remove_device(dev);
}
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -708,8 +688,8 @@ static struct iommu_ops msm_iommu_ops = {
*/
.iotlb_sync = NULL,
.iova_to_phys = msm_iommu_iova_to_phys,
- .add_device = msm_iommu_add_device,
- .remove_device = msm_iommu_remove_device,
+ .probe_device = msm_iommu_probe_device,
+ .release_device = msm_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 5f4d6df59cf6..2be96f1cdbd2 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -441,38 +441,26 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
return pa;
}
-static int mtk_iommu_add_device(struct device *dev)
+static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_data *data;
- struct iommu_group *group;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
- return -ENODEV; /* Not a iommu client device */
+ return ERR_PTR(-ENODEV); /* Not a iommu client device */
data = dev_iommu_priv_get(dev);
- iommu_device_link(&data->iommu, dev);
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
- return 0;
+ return &data->iommu;
}
-static void mtk_iommu_remove_device(struct device *dev)
+static void mtk_iommu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct mtk_iommu_data *data;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
- data = dev_iommu_priv_get(dev);
- iommu_device_unlink(&data->iommu, dev);
-
- iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
@@ -526,8 +514,8 @@ static const struct iommu_ops mtk_iommu_ops = {
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
.iotlb_sync = mtk_iommu_iotlb_sync,
.iova_to_phys = mtk_iommu_iova_to_phys,
- .add_device = mtk_iommu_add_device,
- .remove_device = mtk_iommu_remove_device,
+ .probe_device = mtk_iommu_probe_device,
+ .release_device = mtk_iommu_release_device,
.device_group = mtk_iommu_device_group,
.of_xlate = mtk_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index a31be05601c9..c9d79cff4d17 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -265,10 +265,13 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
{
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct dma_iommu_mapping *mtk_mapping;
int ret;
- if (!data)
- return -ENODEV;
+ /* Only allow the domain created internally. */
+ mtk_mapping = data->dev->archdata.iommu;
+ if (mtk_mapping->domain != domain)
+ return 0;
if (!data->m4u_dom) {
data->m4u_dom = dom;
@@ -288,9 +291,6 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
{
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
- if (!data)
- return;
-
mtk_iommu_config(data, dev, false);
}
@@ -416,14 +416,17 @@ static int mtk_iommu_create_mapping(struct device *dev,
return 0;
}
-static int mtk_iommu_add_device(struct device *dev)
+static int mtk_iommu_def_domain_type(struct device *dev)
+{
+ return IOMMU_DOMAIN_UNMANAGED;
+}
+
+static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct dma_iommu_mapping *mtk_mapping;
struct of_phandle_args iommu_spec;
struct of_phandle_iterator it;
struct mtk_iommu_data *data;
- struct iommu_group *group;
int err;
of_for_each_phandle(&it, err, dev->of_node, "iommus",
@@ -442,46 +445,34 @@ static int mtk_iommu_add_device(struct device *dev)
}
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
- return -ENODEV; /* Not a iommu client device */
+ return ERR_PTR(-ENODEV); /* Not a iommu client device */
- /*
- * This is a short-term bodge because the ARM DMA code doesn't
- * understand multi-device groups, but we have to call into it
- * successfully (and not just rely on a normal IOMMU API attach
- * here) in order to set the correct DMA API ops on @dev.
- */
- group = iommu_group_alloc();
- if (IS_ERR(group))
- return PTR_ERR(group);
+ data = dev_iommu_priv_get(dev);
- err = iommu_group_add_device(group, dev);
- iommu_group_put(group);
- if (err)
- return err;
+ return &data->iommu;
+}
- data = dev_iommu_priv_get(dev);
+static void mtk_iommu_probe_finalize(struct device *dev)
+{
+ struct dma_iommu_mapping *mtk_mapping;
+ struct mtk_iommu_data *data;
+ int err;
+
+ data = dev_iommu_priv_get(dev);
mtk_mapping = data->dev->archdata.iommu;
- err = arm_iommu_attach_device(dev, mtk_mapping);
- if (err) {
- iommu_group_remove_device(dev);
- return err;
- }
- return iommu_device_link(&data->iommu, dev);
+ err = arm_iommu_attach_device(dev, mtk_mapping);
+ if (err)
+ dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
}
-static void mtk_iommu_remove_device(struct device *dev)
+static void mtk_iommu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct mtk_iommu_data *data;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
- data = dev_iommu_priv_get(dev);
- iommu_device_unlink(&data->iommu, dev);
-
- iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
@@ -534,8 +525,11 @@ static const struct iommu_ops mtk_iommu_ops = {
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.iova_to_phys = mtk_iommu_iova_to_phys,
- .add_device = mtk_iommu_add_device,
- .remove_device = mtk_iommu_remove_device,
+ .probe_device = mtk_iommu_probe_device,
+ .probe_finalize = mtk_iommu_probe_finalize,
+ .release_device = mtk_iommu_release_device,
+ .def_domain_type = mtk_iommu_def_domain_type,
+ .device_group = generic_device_group,
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
};
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 887fefcb03b4..c8282cc212cb 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -35,15 +35,6 @@
static const struct iommu_ops omap_iommu_ops;
-struct orphan_dev {
- struct device *dev;
- struct list_head node;
-};
-
-static LIST_HEAD(orphan_dev_list);
-
-static DEFINE_SPINLOCK(orphan_lock);
-
#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
/* bitmap of the page sizes currently supported */
@@ -62,8 +53,6 @@ static DEFINE_SPINLOCK(orphan_lock);
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
-static int _omap_iommu_add_device(struct device *dev);
-
/**
* to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
* @dom: generic iommu domain handle
@@ -1177,7 +1166,6 @@ static int omap_iommu_probe(struct platform_device *pdev)
struct omap_iommu *obj;
struct resource *res;
struct device_node *of = pdev->dev.of_node;
- struct orphan_dev *orphan_dev, *tmp;
if (!of) {
pr_err("%s: only DT-based devices are supported\n", __func__);
@@ -1248,6 +1236,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
goto out_group;
iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
+ iommu_device_set_fwnode(&obj->iommu, &of->fwnode);
err = iommu_device_register(&obj->iommu);
if (err)
@@ -1260,13 +1249,8 @@ static int omap_iommu_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "%s registered\n", obj->name);
- list_for_each_entry_safe(orphan_dev, tmp, &orphan_dev_list, node) {
- err = _omap_iommu_add_device(orphan_dev->dev);
- if (!err) {
- list_del(&orphan_dev->node);
- kfree(orphan_dev);
- }
- }
+ /* Re-probe bus to probe device attached to this IOMMU */
+ bus_iommu_probe(&platform_bus_type);
return 0;
@@ -1657,17 +1641,13 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
return ret;
}
-static int _omap_iommu_add_device(struct device *dev)
+static struct iommu_device *omap_iommu_probe_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data, *tmp;
+ struct platform_device *pdev;
struct omap_iommu *oiommu;
- struct iommu_group *group;
struct device_node *np;
- struct platform_device *pdev;
int num_iommus, i;
- int ret;
- struct orphan_dev *orphan_dev;
- unsigned long flags;
/*
* Allocate the archdata iommu structure for DT-based devices.
@@ -1676,7 +1656,7 @@ static int _omap_iommu_add_device(struct device *dev)
* IOMMU users.
*/
if (!dev->of_node)
- return 0;
+ return ERR_PTR(-ENODEV);
/*
* retrieve the count of IOMMU nodes using phandle size as element size
@@ -1689,43 +1669,27 @@ static int _omap_iommu_add_device(struct device *dev)
arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
if (!arch_data)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
np = of_parse_phandle(dev->of_node, "iommus", i);
if (!np) {
kfree(arch_data);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
pdev = of_find_device_by_node(np);
if (!pdev) {
of_node_put(np);
kfree(arch_data);
- spin_lock_irqsave(&orphan_lock, flags);
- list_for_each_entry(orphan_dev, &orphan_dev_list,
- node) {
- if (orphan_dev->dev == dev)
- break;
- }
- spin_unlock_irqrestore(&orphan_lock, flags);
-
- if (orphan_dev && orphan_dev->dev == dev)
- return -EPROBE_DEFER;
-
- orphan_dev = kzalloc(sizeof(*orphan_dev), GFP_KERNEL);
- orphan_dev->dev = dev;
- spin_lock_irqsave(&orphan_lock, flags);
- list_add(&orphan_dev->node, &orphan_dev_list);
- spin_unlock_irqrestore(&orphan_lock, flags);
- return -EPROBE_DEFER;
+ return ERR_PTR(-ENODEV);
}
oiommu = platform_get_drvdata(pdev);
if (!oiommu) {
of_node_put(np);
kfree(arch_data);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
tmp->iommu_dev = oiommu;
@@ -1734,57 +1698,25 @@ static int _omap_iommu_add_device(struct device *dev)
of_node_put(np);
}
+ dev->archdata.iommu = arch_data;
+
/*
* use the first IOMMU alone for the sysfs device linking.
* TODO: Evaluate if a single iommu_group needs to be
* maintained for both IOMMUs
*/
oiommu = arch_data->iommu_dev;
- ret = iommu_device_link(&oiommu->iommu, dev);
- if (ret) {
- kfree(arch_data);
- return ret;
- }
-
- dev->archdata.iommu = arch_data;
-
- /*
- * IOMMU group initialization calls into omap_iommu_device_group, which
- * needs a valid dev->archdata.iommu pointer
- */
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group)) {
- iommu_device_unlink(&oiommu->iommu, dev);
- dev->archdata.iommu = NULL;
- kfree(arch_data);
- return PTR_ERR(group);
- }
- iommu_group_put(group);
- return 0;
+ return &oiommu->iommu;
}
-static int omap_iommu_add_device(struct device *dev)
-{
- int ret;
-
- ret = _omap_iommu_add_device(dev);
- if (ret == -EPROBE_DEFER)
- return 0;
-
- return ret;
-}
-
-static void omap_iommu_remove_device(struct device *dev)
+static void omap_iommu_release_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
if (!dev->of_node || !arch_data)
return;
- iommu_device_unlink(&arch_data->iommu_dev->iommu, dev);
- iommu_group_remove_device(dev);
-
dev->archdata.iommu = NULL;
kfree(arch_data);
@@ -1795,6 +1727,9 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev)
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct iommu_group *group = ERR_PTR(-EINVAL);
+ if (!arch_data)
+ return ERR_PTR(-ENODEV);
+
if (arch_data->iommu_dev)
group = iommu_group_ref_get(arch_data->iommu_dev->group);
@@ -1809,8 +1744,8 @@ static const struct iommu_ops omap_iommu_ops = {
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
- .add_device = omap_iommu_add_device,
- .remove_device = omap_iommu_remove_device,
+ .probe_device = omap_iommu_probe_device,
+ .release_device = omap_iommu_release_device,
.device_group = omap_iommu_device_group,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 5b3b270972f8..c3e1fbd1988c 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -524,14 +524,13 @@ static bool qcom_iommu_capable(enum iommu_cap cap)
}
}
-static int qcom_iommu_add_device(struct device *dev)
+static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
{
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
- struct iommu_group *group;
struct device_link *link;
if (!qcom_iommu)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
/*
* Establish the link between iommu and master, so that the
@@ -542,28 +541,19 @@ static int qcom_iommu_add_device(struct device *dev)
if (!link) {
dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
dev_name(qcom_iommu->dev), dev_name(dev));
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
- iommu_device_link(&qcom_iommu->iommu, dev);
-
- return 0;
+ return &qcom_iommu->iommu;
}
-static void qcom_iommu_remove_device(struct device *dev)
+static void qcom_iommu_release_device(struct device *dev)
{
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
if (!qcom_iommu)
return;
- iommu_device_unlink(&qcom_iommu->iommu, dev);
- iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
@@ -619,8 +609,8 @@ static const struct iommu_ops qcom_iommu_ops = {
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
- .add_device = qcom_iommu_add_device,
- .remove_device = qcom_iommu_remove_device,
+ .probe_device = qcom_iommu_probe_device,
+ .release_device = qcom_iommu_release_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index b33cdd5aad81..d25c2486ca07 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1054,40 +1054,28 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
kfree(rk_domain);
}
-static int rk_iommu_add_device(struct device *dev)
+static struct iommu_device *rk_iommu_probe_device(struct device *dev)
{
- struct iommu_group *group;
- struct rk_iommu *iommu;
struct rk_iommudata *data;
+ struct rk_iommu *iommu;
data = dev->archdata.iommu;
if (!data)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
iommu = rk_iommu_from_dev(dev);
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
- iommu_group_put(group);
-
- iommu_device_link(&iommu->iommu, dev);
data->link = device_link_add(dev, iommu->dev,
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME);
- return 0;
+ return &iommu->iommu;
}
-static void rk_iommu_remove_device(struct device *dev)
+static void rk_iommu_release_device(struct device *dev)
{
- struct rk_iommu *iommu;
struct rk_iommudata *data = dev->archdata.iommu;
- iommu = rk_iommu_from_dev(dev);
-
device_link_del(data->link);
- iommu_device_unlink(&iommu->iommu, dev);
- iommu_group_remove_device(dev);
}
static struct iommu_group *rk_iommu_device_group(struct device *dev)
@@ -1126,8 +1114,8 @@ static const struct iommu_ops rk_iommu_ops = {
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
- .add_device = rk_iommu_add_device,
- .remove_device = rk_iommu_remove_device,
+ .probe_device = rk_iommu_probe_device,
+ .release_device = rk_iommu_release_device,
.iova_to_phys = rk_iommu_iova_to_phys,
.device_group = rk_iommu_device_group,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 1137f3ddcb85..8895dbb705eb 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -87,7 +87,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
- struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
+ struct zpci_dev *zdev = to_zpci_dev(dev);
struct s390_domain_device *domain_device;
unsigned long flags;
int rc;
@@ -139,7 +139,7 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
- struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
+ struct zpci_dev *zdev = to_zpci_dev(dev);
struct s390_domain_device *domain_device, *tmp;
unsigned long flags;
int found = 0;
@@ -166,23 +166,16 @@ static void s390_iommu_detach_device(struct iommu_domain *domain,
}
}
-static int s390_iommu_add_device(struct device *dev)
+static struct iommu_device *s390_iommu_probe_device(struct device *dev)
{
- struct iommu_group *group = iommu_group_get_for_dev(dev);
- struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
+ struct zpci_dev *zdev = to_zpci_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
- iommu_device_link(&zdev->iommu_dev, dev);
-
- return 0;
+ return &zdev->iommu_dev;
}
-static void s390_iommu_remove_device(struct device *dev)
+static void s390_iommu_release_device(struct device *dev)
{
- struct zpci_dev *zdev = to_pci_dev(dev)->sysdata;
+ struct zpci_dev *zdev = to_zpci_dev(dev);
struct iommu_domain *domain;
/*
@@ -191,7 +184,7 @@ static void s390_iommu_remove_device(struct device *dev)
* to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers
* the attach_dev), removing the device via
* "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev,
- * only remove_device will be called via the BUS_NOTIFY_REMOVED_DEVICE
+ * only release_device will be called via the BUS_NOTIFY_REMOVED_DEVICE
* notifier.
*
* So let's call detach_dev from here if it hasn't been called before.
@@ -201,9 +194,6 @@ static void s390_iommu_remove_device(struct device *dev)
if (domain)
s390_iommu_detach_device(domain, dev);
}
-
- iommu_device_unlink(&zdev->iommu_dev, dev);
- iommu_group_remove_device(dev);
}
static int s390_iommu_update_trans(struct s390_domain *s390_domain,
@@ -373,8 +363,8 @@ static const struct iommu_ops s390_iommu_ops = {
.map = s390_iommu_map,
.unmap = s390_iommu_unmap,
.iova_to_phys = s390_iommu_iova_to_phys,
- .add_device = s390_iommu_add_device,
- .remove_device = s390_iommu_remove_device,
+ .probe_device = s390_iommu_probe_device,
+ .release_device = s390_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = S390_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
new file mode 100644
index 000000000000..fce605e96aa2
--- /dev/null
+++ b/drivers/iommu/sun50i-iommu.c
@@ -0,0 +1,1023 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
+// Copyright (C) 2019-2020, Cerno
+
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define IOMMU_RESET_REG 0x010
+#define IOMMU_ENABLE_REG 0x020
+#define IOMMU_ENABLE_ENABLE BIT(0)
+
+#define IOMMU_BYPASS_REG 0x030
+#define IOMMU_AUTO_GATING_REG 0x040
+#define IOMMU_AUTO_GATING_ENABLE BIT(0)
+
+#define IOMMU_WBUF_CTRL_REG 0x044
+#define IOMMU_OOO_CTRL_REG 0x048
+#define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c
+#define IOMMU_TTB_REG 0x050
+#define IOMMU_TLB_ENABLE_REG 0x060
+#define IOMMU_TLB_PREFETCH_REG 0x070
+#define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m)
+
+#define IOMMU_TLB_FLUSH_REG 0x080
+#define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17)
+#define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16)
+#define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0))
+
+#define IOMMU_TLB_IVLD_ADDR_REG 0x090
+#define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094
+#define IOMMU_TLB_IVLD_ENABLE_REG 0x098
+#define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0)
+
+#define IOMMU_PC_IVLD_ADDR_REG 0x0a0
+#define IOMMU_PC_IVLD_ENABLE_REG 0x0a8
+#define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0)
+
+#define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4)
+#define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2)))
+#define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1))
+
+#define IOMMU_DM_AUT_OVWT_REG 0x0d0
+#define IOMMU_INT_ENABLE_REG 0x100
+#define IOMMU_INT_CLR_REG 0x104
+#define IOMMU_INT_STA_REG 0x108
+#define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4)
+#define IOMMU_INT_ERR_ADDR_L1_REG 0x130
+#define IOMMU_INT_ERR_ADDR_L2_REG 0x134
+#define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4)
+#define IOMMU_L1PG_INT_REG 0x0180
+#define IOMMU_L2PG_INT_REG 0x0184
+
+#define IOMMU_INT_INVALID_L2PG BIT(17)
+#define IOMMU_INT_INVALID_L1PG BIT(16)
+#define IOMMU_INT_MASTER_PERMISSION(m) BIT(m)
+#define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \
+ IOMMU_INT_MASTER_PERMISSION(1) | \
+ IOMMU_INT_MASTER_PERMISSION(2) | \
+ IOMMU_INT_MASTER_PERMISSION(3) | \
+ IOMMU_INT_MASTER_PERMISSION(4) | \
+ IOMMU_INT_MASTER_PERMISSION(5))
+#define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \
+ IOMMU_INT_INVALID_L2PG | \
+ IOMMU_INT_MASTER_MASK)
+
+#define PT_ENTRY_SIZE sizeof(u32)
+
+#define NUM_DT_ENTRIES 4096
+#define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE)
+
+#define NUM_PT_ENTRIES 256
+#define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
+
+struct sun50i_iommu {
+ struct iommu_device iommu;
+
+ /* Lock to modify the IOMMU registers */
+ spinlock_t iommu_lock;
+
+ struct device *dev;
+ void __iomem *base;
+ struct reset_control *reset;
+ struct clk *clk;
+
+ struct iommu_domain *domain;
+ struct iommu_group *group;
+ struct kmem_cache *pt_pool;
+};
+
+struct sun50i_iommu_domain {
+ struct iommu_domain domain;
+
+ /* Number of devices attached to the domain */
+ refcount_t refcnt;
+
+ /* L1 Page Table */
+ u32 *dt;
+ dma_addr_t dt_dma;
+
+ struct sun50i_iommu *iommu;
+};
+
+static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain)
+{
+ return container_of(domain, struct sun50i_iommu_domain, domain);
+}
+
+static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev)
+{
+ return dev_iommu_priv_get(dev);
+}
+
+static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset)
+{
+ return readl(iommu->base + offset);
+}
+
+static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value)
+{
+ writel(value, iommu->base + offset);
+}
+
+/*
+ * The Allwinner H6 IOMMU uses a 2-level page table.
+ *
+ * The first level is the usual Directory Table (DT), that consists of
+ * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
+ * Table (PT).
+ *
+ * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
+ * pointing to a 4kB page of physical memory.
+ *
+ * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
+ * register that contains its physical address.
+ */
+
+#define SUN50I_IOVA_DTE_MASK GENMASK(31, 20)
+#define SUN50I_IOVA_PTE_MASK GENMASK(19, 12)
+#define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0)
+
+static u32 sun50i_iova_get_dte_index(dma_addr_t iova)
+{
+ return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova);
+}
+
+static u32 sun50i_iova_get_pte_index(dma_addr_t iova)
+{
+ return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova);
+}
+
+static u32 sun50i_iova_get_page_offset(dma_addr_t iova)
+{
+ return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova);
+}
+
+/*
+ * Each Directory Table Entry has a Page Table address and a valid
+ * bit:
+
+ * +---------------------+-----------+-+
+ * | PT address | Reserved |V|
+ * +---------------------+-----------+-+
+ * 31:10 - Page Table address
+ * 9:2 - Reserved
+ * 1:0 - 1 if the entry is valid
+ */
+
+#define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10)
+#define SUN50I_DTE_PT_ATTRS GENMASK(1, 0)
+#define SUN50I_DTE_PT_VALID 1
+
+static phys_addr_t sun50i_dte_get_pt_address(u32 dte)
+{
+ return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK;
+}
+
+static bool sun50i_dte_is_pt_valid(u32 dte)
+{
+ return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID;
+}
+
+static u32 sun50i_mk_dte(dma_addr_t pt_dma)
+{
+ return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID;
+}
+
+/*
+ * Each PTE has a Page address, an authority index and a valid bit:
+ *
+ * +----------------+-----+-----+-----+---+-----+
+ * | Page address | Rsv | ACI | Rsv | V | Rsv |
+ * +----------------+-----+-----+-----+---+-----+
+ * 31:12 - Page address
+ * 11:8 - Reserved
+ * 7:4 - Authority Control Index
+ * 3:2 - Reserved
+ * 1 - 1 if the entry is valid
+ * 0 - Reserved
+ *
+ * The way permissions work is that the IOMMU has 16 "domains" that
+ * can be configured to give each masters either read or write
+ * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain
+ * 0 seems like the default domain, and its permissions in the
+ * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
+ * useful to enforce any particular permission.
+ *
+ * Each page entry will then have a reference to the domain they are
+ * affected to, so that we can actually enforce them on a per-page
+ * basis.
+ *
+ * In order to make it work with the IOMMU framework, we will be using
+ * 4 different domains, starting at 1: RD_WR, RD, WR and NONE
+ * depending on the permission we want to enforce. Each domain will
+ * have each master setup in the same way, since the IOMMU framework
+ * doesn't seem to restrict page access on a per-device basis. And
+ * then we will use the relevant domain index when generating the page
+ * table entry depending on the permissions we want to be enforced.
+ */
+
+enum sun50i_iommu_aci {
+ SUN50I_IOMMU_ACI_DO_NOT_USE = 0,
+ SUN50I_IOMMU_ACI_NONE,
+ SUN50I_IOMMU_ACI_RD,
+ SUN50I_IOMMU_ACI_WR,
+ SUN50I_IOMMU_ACI_RD_WR,
+};
+
+#define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12)
+#define SUN50I_PTE_ACI_MASK GENMASK(7, 4)
+#define SUN50I_PTE_PAGE_VALID BIT(1)
+
+static phys_addr_t sun50i_pte_get_page_address(u32 pte)
+{
+ return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK;
+}
+
+static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte)
+{
+ return FIELD_GET(SUN50I_PTE_ACI_MASK, pte);
+}
+
+static bool sun50i_pte_is_page_valid(u32 pte)
+{
+ return pte & SUN50I_PTE_PAGE_VALID;
+}
+
+static u32 sun50i_mk_pte(phys_addr_t page, int prot)
+{
+ enum sun50i_iommu_aci aci;
+ u32 flags = 0;
+
+ if (prot & (IOMMU_READ | IOMMU_WRITE))
+ aci = SUN50I_IOMMU_ACI_RD_WR;
+ else if (prot & IOMMU_READ)
+ aci = SUN50I_IOMMU_ACI_RD;
+ else if (prot & IOMMU_WRITE)
+ aci = SUN50I_IOMMU_ACI_WR;
+ else
+ aci = SUN50I_IOMMU_ACI_NONE;
+
+ flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci);
+ page &= SUN50I_PTE_PAGE_ADDRESS_MASK;
+ return page | flags | SUN50I_PTE_PAGE_VALID;
+}
+
+static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
+ void *vaddr, unsigned int count)
+{
+ struct sun50i_iommu *iommu = sun50i_domain->iommu;
+ dma_addr_t dma = virt_to_phys(vaddr);
+ size_t size = count * PT_ENTRY_SIZE;
+
+ dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
+}
+
+static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
+{
+ u32 reg;
+ int ret;
+
+ assert_spin_locked(&iommu->iommu_lock);
+
+ iommu_write(iommu,
+ IOMMU_TLB_FLUSH_REG,
+ IOMMU_TLB_FLUSH_PTW_CACHE |
+ IOMMU_TLB_FLUSH_MACRO_TLB |
+ IOMMU_TLB_FLUSH_MICRO_TLB(5) |
+ IOMMU_TLB_FLUSH_MICRO_TLB(4) |
+ IOMMU_TLB_FLUSH_MICRO_TLB(3) |
+ IOMMU_TLB_FLUSH_MICRO_TLB(2) |
+ IOMMU_TLB_FLUSH_MICRO_TLB(1) |
+ IOMMU_TLB_FLUSH_MICRO_TLB(0));
+
+ ret = readl_poll_timeout(iommu->base + IOMMU_TLB_FLUSH_REG,
+ reg, !reg,
+ 1, 2000);
+ if (ret)
+ dev_warn(iommu->dev, "TLB Flush timed out!\n");
+
+ return ret;
+}
+
+static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ struct sun50i_iommu *iommu = sun50i_domain->iommu;
+ unsigned long flags;
+
+ /*
+ * At boot, we'll have a first call into .flush_iotlb_all right after
+ * .probe_device, and since we link our (single) domain to our iommu in
+ * the .attach_device callback, we don't have that pointer set.
+ *
+ * It shouldn't really be any trouble to ignore it though since we flush
+ * all caches as part of the device powerup.
+ */
+ if (!iommu)
+ return;
+
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+ sun50i_iommu_flush_all_tlb(iommu);
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+}
+
+static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+{
+ sun50i_iommu_flush_iotlb_all(domain);
+}
+
+static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
+{
+ struct sun50i_iommu_domain *sun50i_domain;
+ unsigned long flags;
+ int ret;
+
+ if (!iommu->domain)
+ return 0;
+
+ sun50i_domain = to_sun50i_domain(iommu->domain);
+
+ ret = reset_control_deassert(iommu->reset);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(iommu->clk);
+ if (ret)
+ goto err_reset_assert;
+
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+
+ iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma);
+ iommu_write(iommu, IOMMU_TLB_PREFETCH_REG,
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
+ IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
+ iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
+ iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5));
+
+ iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD),
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) |
+ IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5));
+
+ iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR),
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) |
+ IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5));
+
+ ret = sun50i_iommu_flush_all_tlb(iommu);
+ if (ret) {
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+ goto err_clk_disable;
+ }
+
+ iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
+ iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE);
+
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+
+ return 0;
+
+err_clk_disable:
+ clk_disable_unprepare(iommu->clk);
+
+err_reset_assert:
+ reset_control_assert(iommu->reset);
+
+ return ret;
+}
+
+static void sun50i_iommu_disable(struct sun50i_iommu *iommu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->iommu_lock, flags);
+
+ iommu_write(iommu, IOMMU_ENABLE_REG, 0);
+ iommu_write(iommu, IOMMU_TTB_REG, 0);
+
+ spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+
+ clk_disable_unprepare(iommu->clk);
+ reset_control_assert(iommu->reset);
+}
+
+static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu,
+ gfp_t gfp)
+{
+ dma_addr_t pt_dma;
+ u32 *page_table;
+
+ page_table = kmem_cache_zalloc(iommu->pt_pool, gfp);
+ if (!page_table)
+ return ERR_PTR(-ENOMEM);
+
+ pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(iommu->dev, pt_dma)) {
+ dev_err(iommu->dev, "Couldn't map L2 Page Table\n");
+ kmem_cache_free(iommu->pt_pool, page_table);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* We rely on the physical address and DMA address being the same */
+ WARN_ON(pt_dma != virt_to_phys(page_table));
+
+ return page_table;
+}
+
+static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu,
+ u32 *page_table)
+{
+ phys_addr_t pt_phys = virt_to_phys(page_table);
+
+ dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE);
+ kmem_cache_free(iommu->pt_pool, page_table);
+}
+
+static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
+ dma_addr_t iova, gfp_t gfp)
+{
+ struct sun50i_iommu *iommu = sun50i_domain->iommu;
+ u32 *page_table;
+ u32 *dte_addr;
+ u32 old_dte;
+ u32 dte;
+
+ dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
+ dte = *dte_addr;
+ if (sun50i_dte_is_pt_valid(dte)) {
+ phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte);
+ return (u32 *)phys_to_virt(pt_phys);
+ }
+
+ page_table = sun50i_iommu_alloc_page_table(iommu, gfp);
+ if (IS_ERR(page_table))
+ return page_table;
+
+ dte = sun50i_mk_dte(virt_to_phys(page_table));
+ old_dte = cmpxchg(dte_addr, 0, dte);
+ if (old_dte) {
+ phys_addr_t installed_pt_phys =
+ sun50i_dte_get_pt_address(old_dte);
+ u32 *installed_pt = phys_to_virt(installed_pt_phys);
+ u32 *drop_pt = page_table;
+
+ page_table = installed_pt;
+ dte = old_dte;
+ sun50i_iommu_free_page_table(iommu, drop_pt);
+ }
+
+ sun50i_table_flush(sun50i_domain, page_table, PT_SIZE);
+ sun50i_table_flush(sun50i_domain, dte_addr, 1);
+
+ return page_table;
+}
+
+static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ struct sun50i_iommu *iommu = sun50i_domain->iommu;
+ u32 pte_index;
+ u32 *page_table, *pte_addr;
+ int ret = 0;
+
+ page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp);
+ if (IS_ERR(page_table)) {
+ ret = PTR_ERR(page_table);
+ goto out;
+ }
+
+ pte_index = sun50i_iova_get_pte_index(iova);
+ pte_addr = &page_table[pte_index];
+ if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) {
+ phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr);
+ dev_err(iommu->dev,
+ "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
+ &iova, &page_phys, &paddr, prot);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ *pte_addr = sun50i_mk_pte(paddr, prot);
+ sun50i_table_flush(sun50i_domain, pte_addr, 1);
+
+out:
+ return ret;
+}
+
+static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ phys_addr_t pt_phys;
+ dma_addr_t pte_dma;
+ u32 *pte_addr;
+ u32 dte;
+
+ dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
+ if (!sun50i_dte_is_pt_valid(dte))
+ return 0;
+
+ pt_phys = sun50i_dte_get_pt_address(dte);
+ pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
+ pte_dma = pt_phys + sun50i_iova_get_pte_index(iova) * PT_ENTRY_SIZE;
+
+ if (!sun50i_pte_is_page_valid(*pte_addr))
+ return 0;
+
+ memset(pte_addr, 0, sizeof(*pte_addr));
+ sun50i_table_flush(sun50i_domain, pte_addr, 1);
+
+ return SZ_4K;
+}
+
+static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ phys_addr_t pt_phys;
+ u32 *page_table;
+ u32 dte, pte;
+
+ dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
+ if (!sun50i_dte_is_pt_valid(dte))
+ return 0;
+
+ pt_phys = sun50i_dte_get_pt_address(dte);
+ page_table = (u32 *)phys_to_virt(pt_phys);
+ pte = page_table[sun50i_iova_get_pte_index(iova)];
+ if (!sun50i_pte_is_page_valid(pte))
+ return 0;
+
+ return sun50i_pte_get_page_address(pte) +
+ sun50i_iova_get_page_offset(iova);
+}
+
+static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
+{
+ struct sun50i_iommu_domain *sun50i_domain;
+
+ if (type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_IDENTITY &&
+ type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
+ if (!sun50i_domain)
+ return NULL;
+
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&sun50i_domain->domain))
+ goto err_free_domain;
+
+ sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(DT_SIZE));
+ if (!sun50i_domain->dt)
+ goto err_put_cookie;
+
+ refcount_set(&sun50i_domain->refcnt, 1);
+
+ sun50i_domain->domain.geometry.aperture_start = 0;
+ sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
+ sun50i_domain->domain.geometry.force_aperture = true;
+
+ return &sun50i_domain->domain;
+
+err_put_cookie:
+ if (type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&sun50i_domain->domain);
+
+err_free_domain:
+ kfree(sun50i_domain);
+
+ return NULL;
+}
+
+static void sun50i_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+
+ free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
+ sun50i_domain->dt = NULL;
+
+ iommu_put_dma_cookie(domain);
+
+ kfree(sun50i_domain);
+}
+
+static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu,
+ struct sun50i_iommu_domain *sun50i_domain)
+{
+ iommu->domain = &sun50i_domain->domain;
+ sun50i_domain->iommu = iommu;
+
+ sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt,
+ DT_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) {
+ dev_err(iommu->dev, "Couldn't map L1 Page Table\n");
+ return -ENOMEM;
+ }
+
+ return sun50i_iommu_enable(iommu);
+}
+
+static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
+ struct sun50i_iommu_domain *sun50i_domain)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_DT_ENTRIES; i++) {
+ phys_addr_t pt_phys;
+ u32 *page_table;
+ u32 *dte_addr;
+ u32 dte;
+
+ dte_addr = &sun50i_domain->dt[i];
+ dte = *dte_addr;
+ if (!sun50i_dte_is_pt_valid(dte))
+ continue;
+
+ memset(dte_addr, 0, sizeof(*dte_addr));
+ sun50i_table_flush(sun50i_domain, dte_addr, 1);
+
+ pt_phys = sun50i_dte_get_pt_address(dte);
+ page_table = phys_to_virt(pt_phys);
+ sun50i_iommu_free_page_table(iommu, page_table);
+ }
+
+
+ sun50i_iommu_disable(iommu);
+
+ dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt),
+ DT_SIZE, DMA_TO_DEVICE);
+
+ iommu->domain = NULL;
+}
+
+static void sun50i_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
+
+ dev_dbg(dev, "Detaching from IOMMU domain\n");
+
+ if (iommu->domain != domain)
+ return;
+
+ if (refcount_dec_and_test(&sun50i_domain->refcnt))
+ sun50i_iommu_detach_domain(iommu, sun50i_domain);
+}
+
+static int sun50i_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+ struct sun50i_iommu *iommu;
+
+ iommu = sun50i_iommu_from_dev(dev);
+ if (!iommu)
+ return -ENODEV;
+
+ dev_dbg(dev, "Attaching to IOMMU domain\n");
+
+ refcount_inc(&sun50i_domain->refcnt);
+
+ if (iommu->domain == domain)
+ return 0;
+
+ if (iommu->domain)
+ sun50i_iommu_detach_device(iommu->domain, dev);
+
+ sun50i_iommu_attach_domain(iommu, sun50i_domain);
+
+ return 0;
+}
+
+static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
+{
+ struct sun50i_iommu *iommu;
+
+ iommu = sun50i_iommu_from_dev(dev);
+ if (!iommu)
+ return ERR_PTR(-ENODEV);
+
+ return &iommu->iommu;
+}
+
+static void sun50i_iommu_release_device(struct device *dev) {}
+
+static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
+{
+ struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
+
+ return iommu_group_ref_get(iommu->group);
+}
+
+static int sun50i_iommu_of_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
+ unsigned id = args->args[0];
+
+ dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
+
+ return iommu_fwspec_add_ids(dev, &id, 1);
+}
+
+static const struct iommu_ops sun50i_iommu_ops = {
+ .pgsize_bitmap = SZ_4K,
+ .attach_dev = sun50i_iommu_attach_device,
+ .detach_dev = sun50i_iommu_detach_device,
+ .device_group = sun50i_iommu_device_group,
+ .domain_alloc = sun50i_iommu_domain_alloc,
+ .domain_free = sun50i_iommu_domain_free,
+ .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
+ .iotlb_sync = sun50i_iommu_iotlb_sync,
+ .iova_to_phys = sun50i_iommu_iova_to_phys,
+ .map = sun50i_iommu_map,
+ .of_xlate = sun50i_iommu_of_xlate,
+ .probe_device = sun50i_iommu_probe_device,
+ .release_device = sun50i_iommu_release_device,
+ .unmap = sun50i_iommu_unmap,
+};
+
+static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
+ unsigned master, phys_addr_t iova,
+ unsigned prot)
+{
+ dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n",
+ &iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd");
+
+ if (iommu->domain)
+ report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
+ else
+ dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
+}
+
+static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
+ unsigned addr_reg,
+ unsigned blame_reg)
+{
+ phys_addr_t iova;
+ unsigned master;
+ u32 blame;
+
+ assert_spin_locked(&iommu->iommu_lock);
+
+ iova = iommu_read(iommu, addr_reg);
+ blame = iommu_read(iommu, blame_reg);
+ master = ilog2(blame & IOMMU_INT_MASTER_MASK);
+
+ /*
+ * If the address is not in the page table, we can't get what
+ * operation triggered the fault. Assume it's a read
+ * operation.
+ */
+ sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ);
+
+ return iova;
+}
+
+static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
+{
+ enum sun50i_iommu_aci aci;
+ phys_addr_t iova;
+ unsigned master;
+ unsigned dir;
+ u32 blame;
+
+ assert_spin_locked(&iommu->iommu_lock);
+
+ blame = iommu_read(iommu, IOMMU_INT_STA_REG);
+ master = ilog2(blame & IOMMU_INT_MASTER_MASK);
+ iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master));
+ aci = sun50i_get_pte_aci(iommu_read(iommu,
+ IOMMU_INT_ERR_DATA_REG(master)));
+
+ switch (aci) {
+ /*
+ * If we are in the read-only domain, then it means we
+ * tried to write.
+ */
+ case SUN50I_IOMMU_ACI_RD:
+ dir = IOMMU_FAULT_WRITE;
+ break;
+
+ /*
+ * If we are in the write-only domain, then it means
+ * we tried to read.
+ */
+ case SUN50I_IOMMU_ACI_WR:
+
+ /*
+ * If we are in the domain without any permission, we
+ * can't really tell. Let's default to a read
+ * operation.
+ */
+ case SUN50I_IOMMU_ACI_NONE:
+
+ /* WTF? */
+ case SUN50I_IOMMU_ACI_RD_WR:
+ default:
+ dir = IOMMU_FAULT_READ;
+ break;
+ }
+
+ /*
+ * If the address is not in the page table, we can't get what
+ * operation triggered the fault. Assume it's a read
+ * operation.
+ */
+ sun50i_iommu_report_fault(iommu, master, iova, dir);
+
+ return iova;
+}
+
+static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
+{
+ struct sun50i_iommu *iommu = dev_id;
+ phys_addr_t iova;
+ u32 status;
+
+ spin_lock(&iommu->iommu_lock);
+
+ status = iommu_read(iommu, IOMMU_INT_STA_REG);
+ if (!(status & IOMMU_INT_MASK)) {
+ spin_unlock(&iommu->iommu_lock);
+ return IRQ_NONE;
+ }
+
+ if (status & IOMMU_INT_INVALID_L2PG)
+ iova = sun50i_iommu_handle_pt_irq(iommu,
+ IOMMU_INT_ERR_ADDR_L2_REG,
+ IOMMU_L2PG_INT_REG);
+ else if (status & IOMMU_INT_INVALID_L1PG)
+ iova = sun50i_iommu_handle_pt_irq(iommu,
+ IOMMU_INT_ERR_ADDR_L1_REG,
+ IOMMU_L1PG_INT_REG);
+ else
+ iova = sun50i_iommu_handle_perm_irq(iommu);
+
+ iommu_write(iommu, IOMMU_INT_CLR_REG, status);
+
+ iommu_write(iommu, IOMMU_RESET_REG, ~status);
+ iommu_write(iommu, IOMMU_RESET_REG, status);
+
+ spin_unlock(&iommu->iommu_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int sun50i_iommu_probe(struct platform_device *pdev)
+{
+ struct sun50i_iommu *iommu;
+ int ret, irq;
+
+ iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return -ENOMEM;
+ spin_lock_init(&iommu->iommu_lock);
+ platform_set_drvdata(pdev, iommu);
+ iommu->dev = &pdev->dev;
+
+ iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev),
+ PT_SIZE, PT_SIZE,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!iommu->pt_pool)
+ return -ENOMEM;
+
+ iommu->group = iommu_group_alloc();
+ if (IS_ERR(iommu->group)) {
+ ret = PTR_ERR(iommu->group);
+ goto err_free_cache;
+ }
+
+ iommu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(iommu->base)) {
+ ret = PTR_ERR(iommu->base);
+ goto err_free_group;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_free_group;
+ }
+
+ iommu->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(iommu->clk)) {
+ dev_err(&pdev->dev, "Couldn't get our clock.\n");
+ ret = PTR_ERR(iommu->clk);
+ goto err_free_group;
+ }
+
+ iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(iommu->reset)) {
+ dev_err(&pdev->dev, "Couldn't get our reset line.\n");
+ ret = PTR_ERR(iommu->reset);
+ goto err_free_group;
+ }
+
+ ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
+ NULL, dev_name(&pdev->dev));
+ if (ret)
+ goto err_free_group;
+
+ iommu_device_set_ops(&iommu->iommu, &sun50i_iommu_ops);
+ iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
+
+ ret = iommu_device_register(&iommu->iommu);
+ if (ret)
+ goto err_remove_sysfs;
+
+ ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0,
+ dev_name(&pdev->dev), iommu);
+ if (ret < 0)
+ goto err_unregister;
+
+ bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
+
+ return 0;
+
+err_unregister:
+ iommu_device_unregister(&iommu->iommu);
+
+err_remove_sysfs:
+ iommu_device_sysfs_remove(&iommu->iommu);
+
+err_free_group:
+ iommu_group_put(iommu->group);
+
+err_free_cache:
+ kmem_cache_destroy(iommu->pt_pool);
+
+ return ret;
+}
+
+static const struct of_device_id sun50i_iommu_dt[] = {
+ { .compatible = "allwinner,sun50i-h6-iommu", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sun50i_iommu_dt);
+
+static struct platform_driver sun50i_iommu_driver = {
+ .driver = {
+ .name = "sun50i-iommu",
+ .of_match_table = sun50i_iommu_dt,
+ .suppress_bind_attrs = true,
+ }
+};
+builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe);
+
+MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index db6559e8336f..5fbdff6ff41a 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -243,28 +243,16 @@ static bool gart_iommu_capable(enum iommu_cap cap)
return false;
}
-static int gart_iommu_add_device(struct device *dev)
+static struct iommu_device *gart_iommu_probe_device(struct device *dev)
{
- struct iommu_group *group;
-
if (!dev_iommu_fwspec_get(dev))
- return -ENODEV;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
+ return ERR_PTR(-ENODEV);
- iommu_device_link(&gart_handle->iommu, dev);
-
- return 0;
+ return &gart_handle->iommu;
}
-static void gart_iommu_remove_device(struct device *dev)
+static void gart_iommu_release_device(struct device *dev)
{
- iommu_group_remove_device(dev);
- iommu_device_unlink(&gart_handle->iommu, dev);
}
static int gart_iommu_of_xlate(struct device *dev,
@@ -290,8 +278,8 @@ static const struct iommu_ops gart_iommu_ops = {
.domain_free = gart_iommu_domain_free,
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
- .add_device = gart_iommu_add_device,
- .remove_device = gart_iommu_remove_device,
+ .probe_device = gart_iommu_probe_device,
+ .release_device = gart_iommu_release_device,
.device_group = generic_device_group,
.map = gart_iommu_map,
.unmap = gart_iommu_unmap,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 63a147b623e6..7426b7666e2b 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -757,11 +757,10 @@ static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
return 0;
}
-static int tegra_smmu_add_device(struct device *dev)
+static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
{
struct device_node *np = dev->of_node;
struct tegra_smmu *smmu = NULL;
- struct iommu_group *group;
struct of_phandle_args args;
unsigned int index = 0;
int err;
@@ -774,7 +773,7 @@ static int tegra_smmu_add_device(struct device *dev)
of_node_put(args.np);
if (err < 0)
- return err;
+ return ERR_PTR(err);
/*
* Only a single IOMMU master interface is currently
@@ -783,8 +782,6 @@ static int tegra_smmu_add_device(struct device *dev)
*/
dev->archdata.iommu = smmu;
- iommu_device_link(&smmu->iommu, dev);
-
break;
}
@@ -793,26 +790,14 @@ static int tegra_smmu_add_device(struct device *dev)
}
if (!smmu)
- return -ENODEV;
-
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- iommu_group_put(group);
+ return ERR_PTR(-ENODEV);
- return 0;
+ return &smmu->iommu;
}
-static void tegra_smmu_remove_device(struct device *dev)
+static void tegra_smmu_release_device(struct device *dev)
{
- struct tegra_smmu *smmu = dev->archdata.iommu;
-
- if (smmu)
- iommu_device_unlink(&smmu->iommu, dev);
-
dev->archdata.iommu = NULL;
- iommu_group_remove_device(dev);
}
static const struct tegra_smmu_group_soc *
@@ -895,8 +880,8 @@ static const struct iommu_ops tegra_smmu_ops = {
.domain_free = tegra_smmu_domain_free,
.attach_dev = tegra_smmu_attach_dev,
.detach_dev = tegra_smmu_detach_dev,
- .add_device = tegra_smmu_add_device,
- .remove_device = tegra_smmu_remove_device,
+ .probe_device = tegra_smmu_probe_device,
+ .release_device = tegra_smmu_release_device,
.device_group = tegra_smmu_device_group,
.map = tegra_smmu_map,
.unmap = tegra_smmu_unmap,
@@ -1015,7 +1000,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
* value. However the IOMMU registration process will attempt to add
* all devices to the IOMMU when bus_set_iommu() is called. In order
* not to rely on global variables to track the IOMMU instance, we
- * set it here so that it can be looked up from the .add_device()
+ * set it here so that it can be looked up from the .probe_device()
* callback via the IOMMU device's .drvdata field.
*/
mc->smmu = smmu;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 4e1d11af23c8..f6f07489a9aa 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -865,24 +865,23 @@ static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
return dev ? dev_to_virtio(dev)->priv : NULL;
}
-static int viommu_add_device(struct device *dev)
+static struct iommu_device *viommu_probe_device(struct device *dev)
{
int ret;
- struct iommu_group *group;
struct viommu_endpoint *vdev;
struct viommu_dev *viommu = NULL;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (!fwspec || fwspec->ops != &viommu_ops)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
if (!viommu)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
vdev->dev = dev;
vdev->viommu = viommu;
@@ -896,45 +895,25 @@ static int viommu_add_device(struct device *dev)
goto err_free_dev;
}
- ret = iommu_device_link(&viommu->iommu, dev);
- if (ret)
- goto err_free_dev;
+ return &viommu->iommu;
- /*
- * Last step creates a default domain and attaches to it. Everything
- * must be ready.
- */
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group)) {
- ret = PTR_ERR(group);
- goto err_unlink_dev;
- }
-
- iommu_group_put(group);
-
- return PTR_ERR_OR_ZERO(group);
-
-err_unlink_dev:
- iommu_device_unlink(&viommu->iommu, dev);
err_free_dev:
generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
- return ret;
+ return ERR_PTR(ret);
}
-static void viommu_remove_device(struct device *dev)
+static void viommu_release_device(struct device *dev)
{
- struct viommu_endpoint *vdev;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct viommu_endpoint *vdev;
if (!fwspec || fwspec->ops != &viommu_ops)
return;
vdev = dev_iommu_priv_get(dev);
- iommu_group_remove_device(dev);
- iommu_device_unlink(&vdev->viommu->iommu, dev);
generic_iommu_put_resv_regions(dev, &vdev->resv_regions);
kfree(vdev);
}
@@ -960,8 +939,8 @@ static struct iommu_ops viommu_ops = {
.unmap = viommu_unmap,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
- .add_device = viommu_add_device,
- .remove_device = viommu_remove_device,
+ .probe_device = viommu_probe_device,
+ .release_device = viommu_release_device,
.device_group = viommu_device_group,
.get_resv_regions = viommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 66b9a68f5e9f..29fead208cad 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -493,6 +493,19 @@ config TI_SCI_INTA_IRQCHIP
If you wish to use interrupt aggregator irq resources managed by the
TI System Controller, say Y here. Otherwise, say N.
+config RISCV_INTC
+ bool "RISC-V Local Interrupt Controller"
+ depends on RISCV
+ default y
+ help
+ This enables support for the per-HART local interrupt controller
+ found in standard RISC-V systems. The per-HART local interrupt
+ controller handles timer interrupts, software interrupts, and
+ hardware interrupts. Without a per-HART local interrupt controller,
+ a RISC-V system will be unable to handle any interrupts.
+
+ If you don't know what to do here, say Y.
+
config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 3a4ce283189a..133f9c45744a 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -98,6 +98,7 @@ obj-$(CONFIG_NDS32) += irq-ativic32.o
obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
+obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
new file mode 100644
index 000000000000..a6f97fa6ff69
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017-2018 SiFive
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#define pr_fmt(fmt) "riscv-intc: " fmt
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/cpu.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+
+static struct irq_domain *intc_domain;
+
+static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
+{
+ unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
+
+ if (unlikely(cause >= BITS_PER_LONG))
+ panic("unexpected interrupt cause");
+
+ switch (cause) {
+#ifdef CONFIG_SMP
+ case RV_IRQ_SOFT:
+ /*
+ * We only use software interrupts to pass IPIs, so if a
+ * non-SMP system gets one, then we don't know what to do.
+ */
+ handle_IPI(regs);
+ break;
+#endif
+ default:
+ handle_domain_irq(intc_domain, cause, regs);
+ break;
+ }
+}
+
+/*
+ * On RISC-V systems local interrupts are masked or unmasked by writing
+ * the SIE (Supervisor Interrupt Enable) CSR. As CSRs can only be written
+ * on the local hart, these functions can only be called on the hart that
+ * corresponds to the IRQ chip.
+ */
+
+static void riscv_intc_irq_mask(struct irq_data *d)
+{
+ csr_clear(CSR_IE, BIT(d->hwirq));
+}
+
+static void riscv_intc_irq_unmask(struct irq_data *d)
+{
+ csr_set(CSR_IE, BIT(d->hwirq));
+}
+
+static int riscv_intc_cpu_starting(unsigned int cpu)
+{
+ csr_set(CSR_IE, BIT(RV_IRQ_SOFT));
+ return 0;
+}
+
+static int riscv_intc_cpu_dying(unsigned int cpu)
+{
+ csr_clear(CSR_IE, BIT(RV_IRQ_SOFT));
+ return 0;
+}
+
+static struct irq_chip riscv_intc_chip = {
+ .name = "RISC-V INTC",
+ .irq_mask = riscv_intc_irq_mask,
+ .irq_unmask = riscv_intc_irq_unmask,
+};
+
+static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_percpu_devid(irq);
+ irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops riscv_intc_domain_ops = {
+ .map = riscv_intc_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init riscv_intc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int rc, hartid;
+
+ hartid = riscv_of_parent_hartid(node);
+ if (hartid < 0) {
+ pr_warn("unable to fine hart id for %pOF\n", node);
+ return 0;
+ }
+
+ /*
+ * The DT will have one INTC DT node under each CPU (or HART)
+ * DT node so riscv_intc_init() function will be called once
+ * for each INTC DT node. We only need to do INTC initialization
+ * for the INTC DT node belonging to boot CPU (or boot HART).
+ */
+ if (riscv_hartid_to_cpuid(hartid) != smp_processor_id())
+ return 0;
+
+ intc_domain = irq_domain_add_linear(node, BITS_PER_LONG,
+ &riscv_intc_domain_ops, NULL);
+ if (!intc_domain) {
+ pr_err("unable to add IRQ domain\n");
+ return -ENXIO;
+ }
+
+ rc = set_handle_irq(&riscv_intc_irq);
+ if (rc) {
+ pr_err("failed to set irq handler\n");
+ return rc;
+ }
+
+ cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_STARTING,
+ "irqchip/riscv/intc:starting",
+ riscv_intc_cpu_starting,
+ riscv_intc_cpu_dying);
+
+ pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index d9c53f85a68e..eaa3e9fe54e9 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -9,6 +9,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -76,6 +77,7 @@ struct plic_handler {
void __iomem *enable_base;
struct plic_priv *priv;
};
+static int plic_parent_irq;
static bool plic_cpuhp_setup_done;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
@@ -219,15 +221,17 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
* that source ID back to the same claim register. This automatically enables
* and disables the interrupt, so there's nothing else to do.
*/
-static void plic_handle_irq(struct pt_regs *regs)
+static void plic_handle_irq(struct irq_desc *desc)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
irq_hw_number_t hwirq;
WARN_ON_ONCE(!handler->present);
- csr_clear(CSR_IE, IE_EIE);
+ chained_irq_enter(chip, desc);
+
while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
@@ -237,21 +241,8 @@ static void plic_handle_irq(struct pt_regs *regs)
else
generic_handle_irq(irq);
}
- csr_set(CSR_IE, IE_EIE);
-}
-
-/*
- * Walk up the DT tree until we find an active RISC-V core (HART) node and
- * extract the cpuid from it.
- */
-static int plic_find_hart_id(struct device_node *node)
-{
- for (; node; node = node->parent) {
- if (of_device_is_compatible(node, "riscv"))
- return riscv_of_processor_hartid(node);
- }
- return -1;
+ chained_irq_exit(chip, desc);
}
static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
@@ -262,10 +253,8 @@ static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
static int plic_dying_cpu(unsigned int cpu)
{
- struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
-
- csr_clear(CSR_IE, IE_EIE);
- plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
+ if (plic_parent_irq)
+ disable_percpu_irq(plic_parent_irq);
return 0;
}
@@ -274,7 +263,11 @@ static int plic_starting_cpu(unsigned int cpu)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- csr_set(CSR_IE, IE_EIE);
+ if (plic_parent_irq)
+ enable_percpu_irq(plic_parent_irq,
+ irq_get_trigger_type(plic_parent_irq));
+ else
+ pr_warn("cpu%d: parent irq not available\n", cpu);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0;
@@ -330,7 +323,7 @@ static int __init plic_init(struct device_node *node,
if (parent.args[0] != RV_IRQ_EXT)
continue;
- hartid = plic_find_hart_id(parent.np);
+ hartid = riscv_of_parent_hartid(parent.np);
if (hartid < 0) {
pr_warn("failed to parse hart ID for context %d.\n", i);
continue;
@@ -342,6 +335,14 @@ static int __init plic_init(struct device_node *node,
continue;
}
+ /* Find parent domain and register chained handler */
+ if (!plic_parent_irq && irq_find_host(parent.np)) {
+ plic_parent_irq = irq_of_parse_and_map(node, i);
+ if (plic_parent_irq)
+ irq_set_chained_handler(plic_parent_irq,
+ plic_handle_irq);
+ }
+
/*
* When running in M-mode we need to ignore the S-mode handler.
* Here we assume it always comes later, but that might be a
@@ -382,7 +383,6 @@ done:
pr_info("%pOFP: mapped %d interrupts with %d handlers for"
" %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
- set_handle_irq(plic_handle_irq);
return 0;
out_iounmap:
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index c664d84e1667..ed943140e1fd 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -83,6 +83,17 @@ config LEDS_APU
To compile this driver as a module, choose M here: the
module will be called leds-apu.
+config LEDS_ARIEL
+ tristate "Dell Wyse 3020 status LED support"
+ depends on LEDS_CLASS
+ depends on (MACH_MMP3_DT && MFD_ENE_KB3930) || COMPILE_TEST
+ help
+ This driver adds support for controlling the front panel status
+ LEDs on Dell Wyse 3020 (Ariel) board via the KB3930 Embedded
+ Controller.
+
+ Say Y to if your machine is a Dell Wyse 3020 thin client.
+
config LEDS_AS3645A
tristate "AS3645A and LM3555 LED flash controllers support"
depends on I2C && LEDS_CLASS_FLASH
@@ -92,6 +103,16 @@ config LEDS_AS3645A
controller. V4L2 flash API is provided as well if
CONFIG_V4L2_FLASH_API is enabled.
+config LEDS_AW2013
+ tristate "LED support for Awinic AW2013"
+ depends on LEDS_CLASS && I2C && OF
+ help
+ This option enables support for the AW2013 3-channel
+ LED driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-aw2013.
+
config LEDS_BCM6328
tristate "LED Support for Broadcom BCM6328"
depends on LEDS_CLASS
@@ -857,6 +878,14 @@ config LEDS_IP30
To compile this driver as a module, choose M here: the module
will be called leds-ip30.
+config LEDS_SGM3140
+ tristate "LED support for the SGM3140"
+ depends on LEDS_CLASS_FLASH
+ depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
+ help
+ This option enables support for the SGM3140 500mA Buck/Boost Charge
+ Pump LED Driver.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 45235d5fb218..d6b8a792c936 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -12,8 +12,10 @@ obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o
obj-$(CONFIG_LEDS_APU) += leds-apu.o
+obj-$(CONFIG_LEDS_ARIEL) += leds-ariel.o
obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o
obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
+obj-$(CONFIG_LEDS_AW2013) += leds-aw2013.o
obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o
obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
@@ -77,6 +79,7 @@ obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o
+obj-$(CONFIG_LEDS_SGM3140) += leds-sgm3140.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
diff --git a/drivers/leds/leds-ariel.c b/drivers/leds/leds-ariel.c
new file mode 100644
index 000000000000..bb68ba23a7d4
--- /dev/null
+++ b/drivers/leds/leds-ariel.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0-or-later
+/*
+ * Dell Wyse 3020 a.k.a. "Ariel" Embedded Controller LED Driver
+ *
+ * Copyright (C) 2020 Lubomir Rintel
+ */
+
+#include <linux/module.h>
+#include <linux/leds.h>
+#include <linux/regmap.h>
+#include <linux/of_platform.h>
+
+enum ec_index {
+ EC_BLUE_LED = 0x01,
+ EC_AMBER_LED = 0x02,
+ EC_GREEN_LED = 0x03,
+};
+
+enum {
+ EC_LED_OFF = 0x00,
+ EC_LED_STILL = 0x01,
+ EC_LED_FADE = 0x02,
+ EC_LED_BLINK = 0x03,
+};
+
+struct ariel_led {
+ struct regmap *ec_ram;
+ enum ec_index ec_index;
+ struct led_classdev led_cdev;
+};
+
+#define led_cdev_to_ariel_led(c) container_of(c, struct ariel_led, led_cdev)
+
+static enum led_brightness ariel_led_get(struct led_classdev *led_cdev)
+{
+ struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
+ unsigned int led_status = 0;
+
+ if (regmap_read(led->ec_ram, led->ec_index, &led_status))
+ return LED_OFF;
+
+ if (led_status == EC_LED_STILL)
+ return LED_FULL;
+ else
+ return LED_OFF;
+}
+
+static void ariel_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
+
+ if (brightness == LED_OFF)
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_OFF);
+ else
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_STILL);
+}
+
+static int ariel_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct ariel_led *led = led_cdev_to_ariel_led(led_cdev);
+
+ if (*delay_on == 0 && *delay_off == 0)
+ return -EINVAL;
+
+ if (*delay_on == 0) {
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_OFF);
+ } else if (*delay_off == 0) {
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_STILL);
+ } else {
+ *delay_on = 500;
+ *delay_off = 500;
+ regmap_write(led->ec_ram, led->ec_index, EC_LED_BLINK);
+ }
+
+ return 0;
+}
+
+#define NLEDS 3
+
+static int ariel_led_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ariel_led *leds;
+ struct regmap *ec_ram;
+ int ret;
+ int i;
+
+ ec_ram = dev_get_regmap(dev->parent, "ec_ram");
+ if (!ec_ram)
+ return -ENODEV;
+
+ leds = devm_kcalloc(dev, NLEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ leds[0].ec_index = EC_BLUE_LED;
+ leds[0].led_cdev.name = "blue:power",
+ leds[0].led_cdev.default_trigger = "default-on";
+
+ leds[1].ec_index = EC_AMBER_LED;
+ leds[1].led_cdev.name = "amber:status",
+
+ leds[2].ec_index = EC_GREEN_LED;
+ leds[2].led_cdev.name = "green:status",
+ leds[2].led_cdev.default_trigger = "default-on";
+
+ for (i = 0; i < NLEDS; i++) {
+ leds[i].ec_ram = ec_ram;
+ leds[i].led_cdev.brightness_get = ariel_led_get;
+ leds[i].led_cdev.brightness_set = ariel_led_set;
+ leds[i].led_cdev.blink_set = ariel_blink_set;
+
+ ret = devm_led_classdev_register(dev, &leds[i].led_cdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver ariel_led_driver = {
+ .probe = ariel_led_probe,
+ .driver = {
+ .name = "dell-wyse-ariel-led",
+ },
+};
+module_platform_driver(ariel_led_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Dell Wyse 3020 Status LEDs Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
new file mode 100644
index 000000000000..d709cc1f949e
--- /dev/null
+++ b/drivers/leds/leds-aw2013.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Driver for Awinic AW2013 3-channel LED driver
+
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define AW2013_MAX_LEDS 3
+
+/* Reset and ID register */
+#define AW2013_RSTR 0x00
+#define AW2013_RSTR_RESET 0x55
+#define AW2013_RSTR_CHIP_ID 0x33
+
+/* Global control register */
+#define AW2013_GCR 0x01
+#define AW2013_GCR_ENABLE BIT(0)
+
+/* LED channel enable register */
+#define AW2013_LCTR 0x30
+#define AW2013_LCTR_LE(x) BIT((x))
+
+/* LED channel control registers */
+#define AW2013_LCFG(x) (0x31 + (x))
+#define AW2013_LCFG_IMAX_MASK (BIT(0) | BIT(1)) // Should be 0-3
+#define AW2013_LCFG_MD BIT(4)
+#define AW2013_LCFG_FI BIT(5)
+#define AW2013_LCFG_FO BIT(6)
+
+/* LED channel PWM registers */
+#define AW2013_REG_PWM(x) (0x34 + (x))
+
+/* LED channel timing registers */
+#define AW2013_LEDT0(x) (0x37 + (x) * 3)
+#define AW2013_LEDT0_T1(x) ((x) << 4) // Should be 0-7
+#define AW2013_LEDT0_T2(x) (x) // Should be 0-5
+
+#define AW2013_LEDT1(x) (0x38 + (x) * 3)
+#define AW2013_LEDT1_T3(x) ((x) << 4) // Should be 0-7
+#define AW2013_LEDT1_T4(x) (x) // Should be 0-7
+
+#define AW2013_LEDT2(x) (0x39 + (x) * 3)
+#define AW2013_LEDT2_T0(x) ((x) << 4) // Should be 0-8
+#define AW2013_LEDT2_REPEAT(x) (x) // Should be 0-15
+
+#define AW2013_REG_MAX 0x77
+
+#define AW2013_TIME_STEP 130 /* ms */
+
+struct aw2013;
+
+struct aw2013_led {
+ struct aw2013 *chip;
+ struct led_classdev cdev;
+ u32 num;
+ unsigned int imax;
+};
+
+struct aw2013 {
+ struct mutex mutex; /* held when writing to registers */
+ struct regulator *vcc_regulator;
+ struct i2c_client *client;
+ struct aw2013_led leds[AW2013_MAX_LEDS];
+ struct regmap *regmap;
+ int num_leds;
+ bool enabled;
+};
+
+static int aw2013_chip_init(struct aw2013 *chip)
+{
+ int i, ret;
+
+ ret = regmap_write(chip->regmap, AW2013_GCR, AW2013_GCR_ENABLE);
+ if (ret) {
+ dev_err(&chip->client->dev, "Failed to enable the chip: %d\n",
+ ret);
+ return ret;
+ }
+
+ for (i = 0; i < chip->num_leds; i++) {
+ ret = regmap_update_bits(chip->regmap,
+ AW2013_LCFG(chip->leds[i].num),
+ AW2013_LCFG_IMAX_MASK,
+ chip->leds[i].imax);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "Failed to set maximum current for led %d: %d\n",
+ chip->leds[i].num, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void aw2013_chip_disable(struct aw2013 *chip)
+{
+ int ret;
+
+ if (!chip->enabled)
+ return;
+
+ regmap_write(chip->regmap, AW2013_GCR, 0);
+
+ ret = regulator_disable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "Failed to disable regulator: %d\n", ret);
+ return;
+ }
+
+ chip->enabled = false;
+}
+
+static int aw2013_chip_enable(struct aw2013 *chip)
+{
+ int ret;
+
+ if (chip->enabled)
+ return 0;
+
+ ret = regulator_enable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&chip->client->dev,
+ "Failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ chip->enabled = true;
+
+ ret = aw2013_chip_init(chip);
+ if (ret)
+ aw2013_chip_disable(chip);
+
+ return ret;
+}
+
+static bool aw2013_chip_in_use(struct aw2013 *chip)
+{
+ int i;
+
+ for (i = 0; i < chip->num_leds; i++)
+ if (chip->leds[i].cdev.brightness)
+ return true;
+
+ return false;
+}
+
+static int aw2013_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct aw2013_led *led = container_of(cdev, struct aw2013_led, cdev);
+ int ret, num;
+
+ mutex_lock(&led->chip->mutex);
+
+ if (aw2013_chip_in_use(led->chip)) {
+ ret = aw2013_chip_enable(led->chip);
+ if (ret)
+ goto error;
+ }
+
+ num = led->num;
+
+ ret = regmap_write(led->chip->regmap, AW2013_REG_PWM(num), brightness);
+ if (ret)
+ goto error;
+
+ if (brightness) {
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCTR,
+ AW2013_LCTR_LE(num), 0xFF);
+ } else {
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCTR,
+ AW2013_LCTR_LE(num), 0);
+ if (ret)
+ goto error;
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCFG(num),
+ AW2013_LCFG_MD, 0);
+ }
+ if (ret)
+ goto error;
+
+ if (!aw2013_chip_in_use(led->chip))
+ aw2013_chip_disable(led->chip);
+
+error:
+ mutex_unlock(&led->chip->mutex);
+
+ return ret;
+}
+
+static int aw2013_blink_set(struct led_classdev *cdev,
+ unsigned long *delay_on, unsigned long *delay_off)
+{
+ struct aw2013_led *led = container_of(cdev, struct aw2013_led, cdev);
+ int ret, num = led->num;
+ unsigned long off = 0, on = 0;
+
+ /* If no blink specified, default to 1 Hz. */
+ if (!*delay_off && !*delay_on) {
+ *delay_off = 500;
+ *delay_on = 500;
+ }
+
+ if (!led->cdev.brightness) {
+ led->cdev.brightness = LED_FULL;
+ ret = aw2013_brightness_set(&led->cdev, led->cdev.brightness);
+ if (ret)
+ return ret;
+ }
+
+ /* Never on - just set to off */
+ if (!*delay_on) {
+ led->cdev.brightness = LED_OFF;
+ return aw2013_brightness_set(&led->cdev, LED_OFF);
+ }
+
+ mutex_lock(&led->chip->mutex);
+
+ /* Never off - brightness is already set, disable blinking */
+ if (!*delay_off) {
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCFG(num),
+ AW2013_LCFG_MD, 0);
+ goto out;
+ }
+
+ /* Convert into values the HW will understand. */
+ off = min(5, ilog2((*delay_off - 1) / AW2013_TIME_STEP) + 1);
+ on = min(7, ilog2((*delay_on - 1) / AW2013_TIME_STEP) + 1);
+
+ *delay_off = BIT(off) * AW2013_TIME_STEP;
+ *delay_on = BIT(on) * AW2013_TIME_STEP;
+
+ /* Set timings */
+ ret = regmap_write(led->chip->regmap,
+ AW2013_LEDT0(num), AW2013_LEDT0_T2(on));
+ if (ret)
+ goto out;
+ ret = regmap_write(led->chip->regmap,
+ AW2013_LEDT1(num), AW2013_LEDT1_T4(off));
+ if (ret)
+ goto out;
+
+ /* Finally, enable the LED */
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCFG(num),
+ AW2013_LCFG_MD, 0xFF);
+ if (ret)
+ goto out;
+
+ ret = regmap_update_bits(led->chip->regmap, AW2013_LCTR,
+ AW2013_LCTR_LE(num), 0xFF);
+
+out:
+ mutex_unlock(&led->chip->mutex);
+
+ return ret;
+}
+
+static int aw2013_probe_dt(struct aw2013 *chip)
+{
+ struct device_node *np = chip->client->dev.of_node, *child;
+ int count, ret = 0, i = 0;
+ struct aw2013_led *led;
+
+ count = of_get_child_count(np);
+ if (!count || count > AW2013_MAX_LEDS)
+ return -EINVAL;
+
+ regmap_write(chip->regmap, AW2013_RSTR, AW2013_RSTR_RESET);
+
+ for_each_available_child_of_node(np, child) {
+ struct led_init_data init_data = {};
+ u32 source;
+ u32 imax;
+
+ ret = of_property_read_u32(child, "reg", &source);
+ if (ret != 0 || source >= AW2013_MAX_LEDS) {
+ dev_err(&chip->client->dev,
+ "Couldn't read LED address: %d\n", ret);
+ count--;
+ continue;
+ }
+
+ led = &chip->leds[i];
+ led->num = source;
+ led->chip = chip;
+ init_data.fwnode = of_fwnode_handle(child);
+
+ if (!of_property_read_u32(child, "led-max-microamp", &imax)) {
+ led->imax = min_t(u32, imax / 5000, 3);
+ } else {
+ led->imax = 1; // 5mA
+ dev_info(&chip->client->dev,
+ "DT property led-max-microamp is missing\n");
+ }
+
+ of_property_read_string(child, "linux,default-trigger",
+ &led->cdev.default_trigger);
+
+ led->cdev.brightness_set_blocking = aw2013_brightness_set;
+ led->cdev.blink_set = aw2013_blink_set;
+
+ ret = devm_led_classdev_register_ext(&chip->client->dev,
+ &led->cdev, &init_data);
+ if (ret < 0)
+ return ret;
+
+ i++;
+ }
+
+ if (!count)
+ return -EINVAL;
+
+ chip->num_leds = i;
+
+ return 0;
+}
+
+static const struct regmap_config aw2013_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AW2013_REG_MAX,
+};
+
+static int aw2013_probe(struct i2c_client *client)
+{
+ struct aw2013 *chip;
+ int ret;
+ unsigned int chipid;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ mutex_init(&chip->mutex);
+ mutex_lock(&chip->mutex);
+
+ chip->client = client;
+ i2c_set_clientdata(client, chip);
+
+ chip->regmap = devm_regmap_init_i2c(client, &aw2013_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ goto error;
+ }
+
+ chip->vcc_regulator = devm_regulator_get(&client->dev, "vcc");
+ ret = PTR_ERR_OR_ZERO(chip->vcc_regulator);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to request regulator: %d\n", ret);
+ goto error;
+ }
+
+ ret = regulator_enable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to enable regulator: %d\n", ret);
+ goto error;
+ }
+
+ ret = regmap_read(chip->regmap, AW2013_RSTR, &chipid);
+ if (ret) {
+ dev_err(&client->dev, "Failed to read chip ID: %d\n",
+ ret);
+ goto error_reg;
+ }
+
+ if (chipid != AW2013_RSTR_CHIP_ID) {
+ dev_err(&client->dev, "Chip reported wrong ID: %x\n",
+ chipid);
+ ret = -ENODEV;
+ goto error_reg;
+ }
+
+ ret = aw2013_probe_dt(chip);
+ if (ret < 0)
+ goto error_reg;
+
+ ret = regulator_disable(chip->vcc_regulator);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to disable regulator: %d\n", ret);
+ goto error;
+ }
+
+ mutex_unlock(&chip->mutex);
+
+ return 0;
+
+error_reg:
+ regulator_disable(chip->vcc_regulator);
+
+error:
+ mutex_destroy(&chip->mutex);
+ return ret;
+}
+
+static int aw2013_remove(struct i2c_client *client)
+{
+ struct aw2013 *chip = i2c_get_clientdata(client);
+
+ aw2013_chip_disable(chip);
+
+ mutex_destroy(&chip->mutex);
+
+ return 0;
+}
+
+static const struct of_device_id aw2013_match_table[] = {
+ { .compatible = "awinic,aw2013", },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, aw2013_match_table);
+
+static struct i2c_driver aw2013_driver = {
+ .driver = {
+ .name = "leds-aw2013",
+ .of_match_table = of_match_ptr(aw2013_match_table),
+ },
+ .probe_new = aw2013_probe,
+ .remove = aw2013_remove,
+};
+
+module_i2c_driver(aw2013_driver);
+
+MODULE_AUTHOR("Nikita Travkin <nikitos.tr@gmail.com>");
+MODULE_DESCRIPTION("AW2013 LED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index a5abb499574b..11ce05249751 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -7,7 +7,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c
index 4e4e542774cb..6ee9131fbf25 100644
--- a/drivers/leds/leds-lp3952.c
+++ b/drivers/leds/leds-lp3952.c
@@ -7,7 +7,7 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/kernel.h>
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index c94995f0daa2..9079850e6ea4 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -5,7 +5,6 @@
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 14ef4ccdda3a..ceceeb6a0e96 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -12,16 +12,17 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/leds.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
struct netxbig_gpio_ext {
- unsigned int *addr;
+ struct gpio_desc **addr;
int num_addr;
- unsigned int *data;
+ struct gpio_desc **data;
int num_data;
- unsigned int enable;
+ struct gpio_desc *enable;
};
enum netxbig_led_mode {
@@ -69,7 +70,7 @@ static void gpio_ext_set_addr(struct netxbig_gpio_ext *gpio_ext, int addr)
int pin;
for (pin = 0; pin < gpio_ext->num_addr; pin++)
- gpio_set_value(gpio_ext->addr[pin], (addr >> pin) & 1);
+ gpiod_set_value(gpio_ext->addr[pin], (addr >> pin) & 1);
}
static void gpio_ext_set_data(struct netxbig_gpio_ext *gpio_ext, int data)
@@ -77,14 +78,14 @@ static void gpio_ext_set_data(struct netxbig_gpio_ext *gpio_ext, int data)
int pin;
for (pin = 0; pin < gpio_ext->num_data; pin++)
- gpio_set_value(gpio_ext->data[pin], (data >> pin) & 1);
+ gpiod_set_value(gpio_ext->data[pin], (data >> pin) & 1);
}
static void gpio_ext_enable_select(struct netxbig_gpio_ext *gpio_ext)
{
/* Enable select is done on the raising edge. */
- gpio_set_value(gpio_ext->enable, 0);
- gpio_set_value(gpio_ext->enable, 1);
+ gpiod_set_value(gpio_ext->enable, 0);
+ gpiod_set_value(gpio_ext->enable, 1);
}
static void gpio_ext_set_value(struct netxbig_gpio_ext *gpio_ext,
@@ -99,41 +100,6 @@ static void gpio_ext_set_value(struct netxbig_gpio_ext *gpio_ext,
spin_unlock_irqrestore(&gpio_ext_lock, flags);
}
-static int gpio_ext_init(struct platform_device *pdev,
- struct netxbig_gpio_ext *gpio_ext)
-{
- int err;
- int i;
-
- if (unlikely(!gpio_ext))
- return -EINVAL;
-
- /* Configure address GPIOs. */
- for (i = 0; i < gpio_ext->num_addr; i++) {
- err = devm_gpio_request_one(&pdev->dev, gpio_ext->addr[i],
- GPIOF_OUT_INIT_LOW,
- "GPIO extension addr");
- if (err)
- return err;
- }
- /* Configure data GPIOs. */
- for (i = 0; i < gpio_ext->num_data; i++) {
- err = devm_gpio_request_one(&pdev->dev, gpio_ext->data[i],
- GPIOF_OUT_INIT_LOW,
- "GPIO extension data");
- if (err)
- return err;
- }
- /* Configure "enable select" GPIO. */
- err = devm_gpio_request_one(&pdev->dev, gpio_ext->enable,
- GPIOF_OUT_INIT_LOW,
- "GPIO extension enable");
- if (err)
- return err;
-
- return 0;
-}
-
/*
* Class LED driver.
*/
@@ -347,15 +313,47 @@ static int create_netxbig_led(struct platform_device *pdev,
return devm_led_classdev_register(&pdev->dev, &led_dat->cdev);
}
-static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
- struct netxbig_gpio_ext *gpio_ext)
+/**
+ * netxbig_gpio_ext_remove() - Clean up GPIO extension data
+ * @data: managed resource data to clean up
+ *
+ * Since we pick GPIO descriptors from another device than the device our
+ * driver is probing to, we need to register a specific callback to free
+ * these up using managed resources.
+ */
+static void netxbig_gpio_ext_remove(void *data)
+{
+ struct netxbig_gpio_ext *gpio_ext = data;
+ int i;
+
+ for (i = 0; i < gpio_ext->num_addr; i++)
+ gpiod_put(gpio_ext->addr[i]);
+ for (i = 0; i < gpio_ext->num_data; i++)
+ gpiod_put(gpio_ext->data[i]);
+ gpiod_put(gpio_ext->enable);
+}
+
+/**
+ * netxbig_gpio_ext_get() - Obtain GPIO extension device data
+ * @dev: main LED device
+ * @gpio_ext_dev: the GPIO extension device
+ * @gpio_ext: the data structure holding the GPIO extension data
+ *
+ * This function walks the subdevice that only contain GPIO line
+ * handles in the device tree and obtains the GPIO descriptors from that
+ * device.
+ */
+static int netxbig_gpio_ext_get(struct device *dev,
+ struct device *gpio_ext_dev,
+ struct netxbig_gpio_ext *gpio_ext)
{
- int *addr, *data;
+ struct gpio_desc **addr, **data;
int num_addr, num_data;
+ struct gpio_desc *gpiod;
int ret;
int i;
- ret = of_gpio_named_count(np, "addr-gpios");
+ ret = gpiod_count(gpio_ext_dev, "addr");
if (ret < 0) {
dev_err(dev,
"Failed to count GPIOs in DT property addr-gpios\n");
@@ -366,16 +364,25 @@ static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
if (!addr)
return -ENOMEM;
+ /*
+ * We cannot use devm_ managed resources with these GPIO descriptors
+ * since they are associated with the "GPIO extension device" which
+ * does not probe any driver. The device tree parser will however
+ * populate a platform device for it so we can anyway obtain the
+ * GPIO descriptors from the device.
+ */
for (i = 0; i < num_addr; i++) {
- ret = of_get_named_gpio(np, "addr-gpios", i);
- if (ret < 0)
- return ret;
- addr[i] = ret;
+ gpiod = gpiod_get_index(gpio_ext_dev, "addr", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ gpiod_set_consumer_name(gpiod, "GPIO extension addr");
+ addr[i] = gpiod;
}
gpio_ext->addr = addr;
gpio_ext->num_addr = num_addr;
- ret = of_gpio_named_count(np, "data-gpios");
+ ret = gpiod_count(gpio_ext_dev, "data");
if (ret < 0) {
dev_err(dev,
"Failed to count GPIOs in DT property data-gpios\n");
@@ -387,23 +394,26 @@ static int gpio_ext_get_of_pdata(struct device *dev, struct device_node *np,
return -ENOMEM;
for (i = 0; i < num_data; i++) {
- ret = of_get_named_gpio(np, "data-gpios", i);
- if (ret < 0)
- return ret;
- data[i] = ret;
+ gpiod = gpiod_get_index(gpio_ext_dev, "data", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ gpiod_set_consumer_name(gpiod, "GPIO extension data");
+ data[i] = gpiod;
}
gpio_ext->data = data;
gpio_ext->num_data = num_data;
- ret = of_get_named_gpio(np, "enable-gpio", 0);
- if (ret < 0) {
+ gpiod = gpiod_get(gpio_ext_dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
dev_err(dev,
"Failed to get GPIO from DT property enable-gpio\n");
- return ret;
+ return PTR_ERR(gpiod);
}
- gpio_ext->enable = ret;
+ gpiod_set_consumer_name(gpiod, "GPIO extension enable");
+ gpio_ext->enable = gpiod;
- return 0;
+ return devm_add_action_or_reset(dev, netxbig_gpio_ext_remove, gpio_ext);
}
static int netxbig_leds_get_of_pdata(struct device *dev,
@@ -411,6 +421,8 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
{
struct device_node *np = dev->of_node;
struct device_node *gpio_ext_np;
+ struct platform_device *gpio_ext_pdev;
+ struct device *gpio_ext_dev;
struct device_node *child;
struct netxbig_gpio_ext *gpio_ext;
struct netxbig_led_timer *timers;
@@ -426,13 +438,19 @@ static int netxbig_leds_get_of_pdata(struct device *dev,
dev_err(dev, "Failed to get DT handle gpio-ext\n");
return -EINVAL;
}
+ gpio_ext_pdev = of_find_device_by_node(gpio_ext_np);
+ if (!gpio_ext_pdev) {
+ dev_err(dev, "Failed to find platform device for gpio-ext\n");
+ return -ENODEV;
+ }
+ gpio_ext_dev = &gpio_ext_pdev->dev;
gpio_ext = devm_kzalloc(dev, sizeof(*gpio_ext), GFP_KERNEL);
if (!gpio_ext) {
of_node_put(gpio_ext_np);
return -ENOMEM;
}
- ret = gpio_ext_get_of_pdata(dev, gpio_ext_np, gpio_ext);
+ ret = netxbig_gpio_ext_get(dev, gpio_ext_dev, gpio_ext);
of_node_put(gpio_ext_np);
if (ret)
return ret;
@@ -585,10 +603,6 @@ static int netxbig_led_probe(struct platform_device *pdev)
if (!leds_data)
return -ENOMEM;
- ret = gpio_ext_init(pdev, pdata->gpio_ext);
- if (ret < 0)
- return ret;
-
for (i = 0; i < pdata->num_leds; i++) {
ret = create_netxbig_led(pdev, pdata,
&leds_data[i], &pdata->leds[i]);
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 6c8a724aac51..ef7b91bd2064 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -91,15 +91,21 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
pwm_init_state(led_data->pwm, &led_data->pwmstate);
ret = devm_led_classdev_register(dev, &led_data->cdev);
- if (ret == 0) {
- priv->num_leds++;
- led_pwm_set(&led_data->cdev, led_data->cdev.brightness);
- } else {
+ if (ret) {
dev_err(dev, "failed to register PWM led for %s: %d\n",
led->name, ret);
+ return ret;
}
- return ret;
+ ret = led_pwm_set(&led_data->cdev, led_data->cdev.brightness);
+ if (ret) {
+ dev_err(dev, "failed to set led PWM value for %s: %d",
+ led->name, ret);
+ return ret;
+ }
+
+ priv->num_leds++;
+ return 0;
}
static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
diff --git a/drivers/leds/leds-sgm3140.c b/drivers/leds/leds-sgm3140.c
new file mode 100644
index 000000000000..c494b934ae09
--- /dev/null
+++ b/drivers/leds/leds-sgm3140.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Luca Weiss <luca@z3ntu.xyz>
+
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-flash.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+
+#include <media/v4l2-flash-led-class.h>
+
+#define FLASH_TIMEOUT_DEFAULT 250000U /* 250ms */
+#define FLASH_MAX_TIMEOUT_DEFAULT 300000U /* 300ms */
+
+struct sgm3140 {
+ struct led_classdev_flash fled_cdev;
+ struct v4l2_flash *v4l2_flash;
+
+ struct timer_list powerdown_timer;
+
+ struct gpio_desc *flash_gpio;
+ struct gpio_desc *enable_gpio;
+ struct regulator *vin_regulator;
+
+ bool enabled;
+
+ /* current timeout in us */
+ u32 timeout;
+ /* maximum timeout in us */
+ u32 max_timeout;
+};
+
+static struct sgm3140 *flcdev_to_sgm3140(struct led_classdev_flash *flcdev)
+{
+ return container_of(flcdev, struct sgm3140, fled_cdev);
+}
+
+static int sgm3140_strobe_set(struct led_classdev_flash *fled_cdev, bool state)
+{
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+ int ret;
+
+ if (priv->enabled == state)
+ return 0;
+
+ if (state) {
+ ret = regulator_enable(priv->vin_regulator);
+ if (ret) {
+ dev_err(fled_cdev->led_cdev.dev,
+ "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ gpiod_set_value_cansleep(priv->flash_gpio, 1);
+ gpiod_set_value_cansleep(priv->enable_gpio, 1);
+ mod_timer(&priv->powerdown_timer,
+ jiffies + usecs_to_jiffies(priv->timeout));
+ } else {
+ del_timer_sync(&priv->powerdown_timer);
+ gpiod_set_value_cansleep(priv->enable_gpio, 0);
+ gpiod_set_value_cansleep(priv->flash_gpio, 0);
+ ret = regulator_disable(priv->vin_regulator);
+ if (ret) {
+ dev_err(fled_cdev->led_cdev.dev,
+ "failed to disable regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ priv->enabled = state;
+
+ return 0;
+}
+
+static int sgm3140_strobe_get(struct led_classdev_flash *fled_cdev, bool *state)
+{
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+
+ *state = timer_pending(&priv->powerdown_timer);
+
+ return 0;
+}
+
+static int sgm3140_timeout_set(struct led_classdev_flash *fled_cdev,
+ u32 timeout)
+{
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+
+ priv->timeout = timeout;
+
+ return 0;
+}
+
+static const struct led_flash_ops sgm3140_flash_ops = {
+ .strobe_set = sgm3140_strobe_set,
+ .strobe_get = sgm3140_strobe_get,
+ .timeout_set = sgm3140_timeout_set,
+};
+
+static int sgm3140_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev);
+ struct sgm3140 *priv = flcdev_to_sgm3140(fled_cdev);
+ bool enable = brightness == LED_ON;
+ int ret;
+
+ if (priv->enabled == enable)
+ return 0;
+
+ if (enable) {
+ ret = regulator_enable(priv->vin_regulator);
+ if (ret) {
+ dev_err(led_cdev->dev,
+ "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+ gpiod_set_value_cansleep(priv->enable_gpio, 1);
+ } else {
+ gpiod_set_value_cansleep(priv->enable_gpio, 0);
+ ret = regulator_disable(priv->vin_regulator);
+ if (ret) {
+ dev_err(led_cdev->dev,
+ "failed to disable regulator: %d\n", ret);
+ return ret;
+ }
+ }
+
+ priv->enabled = enable;
+
+ return 0;
+}
+
+static void sgm3140_powerdown_timer(struct timer_list *t)
+{
+ struct sgm3140 *priv = from_timer(priv, t, powerdown_timer);
+
+ gpiod_set_value(priv->enable_gpio, 0);
+ gpiod_set_value(priv->flash_gpio, 0);
+ regulator_disable(priv->vin_regulator);
+
+ priv->enabled = false;
+}
+
+static void sgm3140_init_flash_timeout(struct sgm3140 *priv)
+{
+ struct led_classdev_flash *fled_cdev = &priv->fled_cdev;
+ struct led_flash_setting *s;
+
+ /* Init flash timeout setting */
+ s = &fled_cdev->timeout;
+ s->min = 1;
+ s->max = priv->max_timeout;
+ s->step = 1;
+ s->val = FLASH_TIMEOUT_DEFAULT;
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+static void sgm3140_init_v4l2_flash_config(struct sgm3140 *priv,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+ struct led_classdev *led_cdev = &priv->fled_cdev.led_cdev;
+ struct led_flash_setting *s;
+
+ strscpy(v4l2_sd_cfg->dev_name, led_cdev->dev->kobj.name,
+ sizeof(v4l2_sd_cfg->dev_name));
+
+ /* Init flash intensity setting */
+ s = &v4l2_sd_cfg->intensity;
+ s->min = 0;
+ s->max = 1;
+ s->step = 1;
+ s->val = 1;
+}
+
+#else
+static void sgm3140_init_v4l2_flash_config(struct sgm3140 *priv,
+ struct v4l2_flash_config *v4l2_sd_cfg)
+{
+}
+#endif
+
+static int sgm3140_probe(struct platform_device *pdev)
+{
+ struct sgm3140 *priv;
+ struct led_classdev *led_cdev;
+ struct led_classdev_flash *fled_cdev;
+ struct led_init_data init_data = {};
+ struct fwnode_handle *child_node;
+ struct v4l2_flash_config v4l2_sd_cfg = {};
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->flash_gpio = devm_gpiod_get(&pdev->dev, "flash", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(priv->flash_gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request flash gpio: %d\n", ret);
+ return ret;
+ }
+
+ priv->enable_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
+ ret = PTR_ERR_OR_ZERO(priv->enable_gpio);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request enable gpio: %d\n", ret);
+ return ret;
+ }
+
+ priv->vin_regulator = devm_regulator_get(&pdev->dev, "vin");
+ ret = PTR_ERR_OR_ZERO(priv->vin_regulator);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to request regulator: %d\n", ret);
+ return ret;
+ }
+
+ child_node = fwnode_get_next_available_child_node(pdev->dev.fwnode,
+ NULL);
+ if (!child_node) {
+ dev_err(&pdev->dev,
+ "No fwnode child node found for connected LED.\n");
+ return -EINVAL;
+ }
+
+ ret = fwnode_property_read_u32(child_node, "flash-max-timeout-us",
+ &priv->max_timeout);
+ if (ret) {
+ priv->max_timeout = FLASH_MAX_TIMEOUT_DEFAULT;
+ dev_warn(&pdev->dev,
+ "flash-max-timeout-us property missing\n");
+ }
+
+ /*
+ * Set default timeout to FLASH_DEFAULT_TIMEOUT except if max_timeout
+ * from DT is lower.
+ */
+ priv->timeout = min(priv->max_timeout, FLASH_TIMEOUT_DEFAULT);
+
+ timer_setup(&priv->powerdown_timer, sgm3140_powerdown_timer, 0);
+
+ fled_cdev = &priv->fled_cdev;
+ led_cdev = &fled_cdev->led_cdev;
+
+ fled_cdev->ops = &sgm3140_flash_ops;
+
+ led_cdev->brightness_set_blocking = sgm3140_brightness_set;
+ led_cdev->max_brightness = LED_ON;
+ led_cdev->flags |= LED_DEV_CAP_FLASH;
+
+ sgm3140_init_flash_timeout(priv);
+
+ init_data.fwnode = child_node;
+
+ platform_set_drvdata(pdev, priv);
+
+ /* Register in the LED subsystem */
+ ret = devm_led_classdev_flash_register_ext(&pdev->dev,
+ fled_cdev, &init_data);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register flash device: %d\n",
+ ret);
+ goto err;
+ }
+
+ sgm3140_init_v4l2_flash_config(priv, &v4l2_sd_cfg);
+
+ /* Create V4L2 Flash subdev */
+ priv->v4l2_flash = v4l2_flash_init(&pdev->dev,
+ child_node,
+ fled_cdev, NULL,
+ &v4l2_sd_cfg);
+ if (IS_ERR(priv->v4l2_flash)) {
+ ret = PTR_ERR(priv->v4l2_flash);
+ goto err;
+ }
+
+ return ret;
+
+err:
+ fwnode_handle_put(child_node);
+ return ret;
+}
+
+static int sgm3140_remove(struct platform_device *pdev)
+{
+ struct sgm3140 *priv = platform_get_drvdata(pdev);
+
+ del_timer_sync(&priv->powerdown_timer);
+
+ v4l2_flash_release(priv->v4l2_flash);
+
+ return 0;
+}
+
+static const struct of_device_id sgm3140_dt_match[] = {
+ { .compatible = "sgmicro,sgm3140" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sgm3140_dt_match);
+
+static struct platform_driver sgm3140_driver = {
+ .probe = sgm3140_probe,
+ .remove = sgm3140_remove,
+ .driver = {
+ .name = "sgm3140",
+ .of_match_table = sgm3140_dt_match,
+ },
+};
+
+module_platform_driver(sgm3140_driver);
+
+MODULE_AUTHOR("Luca Weiss <luca@z3ntu.xyz>");
+MODULE_DESCRIPTION("SG Micro SGM3140 charge pump led driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 58be20cae183..1128ac75443c 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -93,7 +93,7 @@
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/workqueue.h>
#include <linux/leds-tca6507.h>
#include <linux/of.h>
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index a8911ebd30e5..0929f1275814 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -214,8 +214,9 @@ tlc591xx_probe(struct i2c_client *client,
err = devm_led_classdev_register_ext(dev, &led->ldev,
&init_data);
if (err < 0) {
- dev_err(dev, "couldn't register LED %s\n",
- led->ldev.name);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "couldn't register LED %s\n",
+ led->ldev.name);
return err;
}
}
diff --git a/drivers/leds/trigger/ledtrig-timer.c b/drivers/leds/trigger/ledtrig-timer.c
index 34a68604c46c..b4688d1d9d2b 100644
--- a/drivers/leds/trigger/ledtrig-timer.c
+++ b/drivers/leds/trigger/ledtrig-timer.c
@@ -28,7 +28,7 @@ static ssize_t led_delay_on_store(struct device *dev,
{
struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
- ssize_t ret = -EINVAL;
+ ssize_t ret;
ret = kstrtoul(buf, 10, &state);
if (ret)
@@ -53,7 +53,7 @@ static ssize_t led_delay_off_store(struct device *dev,
{
struct led_classdev *led_cdev = led_trigger_get_led(dev);
unsigned long state;
- ssize_t ret = -EINVAL;
+ ssize_t ret;
ret = kstrtoul(buf, 10, &state);
if (ret)
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index cbd46c1c5bf7..fcb9d7bd5bd0 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -247,7 +247,6 @@ config PMAC_RACKMETER
config SENSORS_AMS
tristate "Apple Motion Sensor driver"
depends on PPC_PMAC && !PPC64 && INPUT && ((ADB_PMU && I2C = y) || (ADB_PMU && !I2C) || I2C)
- select INPUT_POLLDEV
help
Support for the motion sensor included in PowerBooks. Includes
implementations for PMU and I2C.
diff --git a/drivers/macintosh/ams/ams-input.c b/drivers/macintosh/ams/ams-input.c
index 06a96b3f11de..0da493d449b2 100644
--- a/drivers/macintosh/ams/ams-input.c
+++ b/drivers/macintosh/ams/ams-input.c
@@ -25,9 +25,8 @@ MODULE_PARM_DESC(invert, "Invert input data on X and Y axis");
static DEFINE_MUTEX(ams_input_mutex);
-static void ams_idev_poll(struct input_polled_dev *dev)
+static void ams_idev_poll(struct input_dev *idev)
{
- struct input_dev *idev = dev->input;
s8 x, y, z;
mutex_lock(&ams_info.lock);
@@ -59,14 +58,10 @@ static int ams_input_enable(void)
ams_info.ycalib = y;
ams_info.zcalib = z;
- ams_info.idev = input_allocate_polled_device();
- if (!ams_info.idev)
+ input = input_allocate_device();
+ if (!input)
return -ENOMEM;
- ams_info.idev->poll = ams_idev_poll;
- ams_info.idev->poll_interval = 25;
-
- input = ams_info.idev->input;
input->name = "Apple Motion Sensor";
input->id.bustype = ams_info.bustype;
input->id.vendor = 0;
@@ -75,28 +70,32 @@ static int ams_input_enable(void)
input_set_abs_params(input, ABS_X, -50, 50, 3, 0);
input_set_abs_params(input, ABS_Y, -50, 50, 3, 0);
input_set_abs_params(input, ABS_Z, -50, 50, 3, 0);
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
- set_bit(EV_ABS, input->evbit);
- set_bit(EV_KEY, input->evbit);
- set_bit(BTN_TOUCH, input->keybit);
+ error = input_setup_polling(input, ams_idev_poll);
+ if (error)
+ goto err_free_input;
- error = input_register_polled_device(ams_info.idev);
- if (error) {
- input_free_polled_device(ams_info.idev);
- ams_info.idev = NULL;
- return error;
- }
+ input_set_poll_interval(input, 25);
+ error = input_register_device(input);
+ if (error)
+ goto err_free_input;
+
+ ams_info.idev = input;
joystick = true;
return 0;
+
+err_free_input:
+ input_free_device(input);
+ return error;
}
static void ams_input_disable(void)
{
if (ams_info.idev) {
- input_unregister_polled_device(ams_info.idev);
- input_free_polled_device(ams_info.idev);
+ input_unregister_device(ams_info.idev);
ams_info.idev = NULL;
}
diff --git a/drivers/macintosh/ams/ams.h b/drivers/macintosh/ams/ams.h
index fe8d596f9845..935bdd9cd9a6 100644
--- a/drivers/macintosh/ams/ams.h
+++ b/drivers/macintosh/ams/ams.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/i2c.h>
-#include <linux/input-polldev.h>
+#include <linux/input.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
@@ -51,7 +51,7 @@ struct ams {
#endif
/* Joystick emulation */
- struct input_polled_dev *idev;
+ struct input_dev *idev;
__u16 bustype;
/* calibrated null values */
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index eb3adfb7f88d..d4759db002c6 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -9,10 +9,10 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/pgtable.h>
#include <asm/prom.h>
#include <linux/adb.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/hydra.h>
#include <asm/irq.h>
#include <linux/init.h>
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 74bf2938276b..eab7e83c11c4 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -16,8 +16,8 @@
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
+#include <linux/pgtable.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 8450d7c008d0..73e6ae88fafd 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -50,9 +50,9 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/uaccess.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/irq.h>
#ifdef CONFIG_PPC_PMAC
diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c
index 4150301a89a5..e8377ce0a95a 100644
--- a/drivers/macintosh/windfarm_pm112.c
+++ b/drivers/macintosh/windfarm_pm112.c
@@ -132,14 +132,6 @@ static int create_cpu_loop(int cpu)
s32 tmax;
int fmin;
- /* Get PID params from the appropriate SAT */
- hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
- if (hdr == NULL) {
- printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
- return -EINVAL;
- }
- piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
-
/* Get FVT params to get Tmax; if not found, assume default */
hdr = smu_sat_get_sdb_partition(chip, 0xC4 + core, NULL);
if (hdr) {
@@ -152,6 +144,16 @@ static int create_cpu_loop(int cpu)
if (tmax < cpu_all_tmax)
cpu_all_tmax = tmax;
+ kfree(hdr);
+
+ /* Get PID params from the appropriate SAT */
+ hdr = smu_sat_get_sdb_partition(chip, 0xC8 + core, NULL);
+ if (hdr == NULL) {
+ printk(KERN_WARNING"windfarm: can't get CPU PID fan config\n");
+ return -EINVAL;
+ }
+ piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
+
/*
* Darwin has a minimum fan speed of 1000 rpm for the 4-way and
* 515 for the 2-way. That appears to be overkill, so for now,
@@ -174,6 +176,9 @@ static int create_cpu_loop(int cpu)
pid.min = fmin;
wf_cpu_pid_init(&cpu_pid[cpu], &pid);
+
+ kfree(hdr);
+
return 0;
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 5a577a6734cf..05b1009e2820 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -236,4 +236,22 @@ config SUN6I_MSGBOX
various Allwinner SoCs. This mailbox is used for communication
between the application CPUs and the power management coprocessor.
+config SPRD_MBOX
+ tristate "Spreadtrum Mailbox"
+ depends on ARCH_SPRD || COMPILE_TEST
+ help
+ Mailbox driver implementation for the Spreadtrum platform. It is used
+ to send message between application processors and MCU. Say Y here if
+ you want to build the Spreatrum mailbox controller driver.
+
+config QCOM_IPCC
+ bool "Qualcomm Technologies, Inc. IPCC driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Qualcomm Technologies, Inc. Inter-Processor Communication Controller
+ (IPCC) driver for MSM devices. The driver provides mailbox support for
+ sending interrupts to the clients. On the other hand, the driver also
+ acts as an interrupt controller for receiving interrupts from clients.
+ Say Y here if you want to build this driver.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 2e4364ef5c47..60d224b723a1 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -50,3 +50,7 @@ obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
+
+obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
+
+obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 7906624a731c..7205b825c8b5 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -12,6 +12,7 @@
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
@@ -66,6 +67,8 @@ struct imx_mu_priv {
struct clk *clk;
int irq;
+ u32 xcr;
+
bool side_b;
};
@@ -154,12 +157,17 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv,
switch (cp->type) {
case IMX_MU_TYPE_TX:
- if (msg->hdr.size > sizeof(*msg)) {
+ /*
+ * msg->hdr.size specifies the number of u32 words while
+ * sizeof yields bytes.
+ */
+
+ if (msg->hdr.size > sizeof(*msg) / 4) {
/*
* The real message size can be different to
* struct imx_sc_rpc_msg_max size
*/
- dev_err(priv->dev, "Exceed max msg size (%zu) on TX, got: %i\n", sizeof(*msg), msg->hdr.size);
+ dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), msg->hdr.size << 2);
return -EINVAL;
}
@@ -198,9 +206,8 @@ static int imx_mu_scu_rx(struct imx_mu_priv *priv,
imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0));
*data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
- if (msg.hdr.size > sizeof(msg)) {
- dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
- sizeof(msg), msg.hdr.size);
+ if (msg.hdr.size > sizeof(msg) / 4) {
+ dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2);
return -EINVAL;
}
@@ -285,8 +292,10 @@ static int imx_mu_startup(struct mbox_chan *chan)
{
struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
struct imx_mu_con_priv *cp = chan->con_priv;
+ unsigned long irq_flag = IRQF_SHARED;
int ret;
+ pm_runtime_get_sync(priv->dev);
if (cp->type == IMX_MU_TYPE_TXDB) {
/* Tx doorbell don't have ACK support */
tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
@@ -294,8 +303,12 @@ static int imx_mu_startup(struct mbox_chan *chan)
return 0;
}
- ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED |
- IRQF_NO_SUSPEND, cp->irq_desc, chan);
+ /* IPC MU should be with IRQF_NO_SUSPEND set */
+ if (!priv->dev->pm_domain)
+ irq_flag |= IRQF_NO_SUSPEND;
+
+ ret = request_irq(priv->irq, imx_mu_isr, irq_flag,
+ cp->irq_desc, chan);
if (ret) {
dev_err(priv->dev,
"Unable to acquire IRQ %d\n", priv->irq);
@@ -323,6 +336,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
if (cp->type == IMX_MU_TYPE_TXDB) {
tasklet_kill(&cp->txdb_tasklet);
+ pm_runtime_put_sync(priv->dev);
return;
}
@@ -341,6 +355,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
}
free_irq(priv->irq, chan);
+ pm_runtime_put_sync(priv->dev);
}
static const struct mbox_chan_ops imx_mu_ops = {
@@ -374,7 +389,7 @@ static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
break;
default:
dev_err(mbox->dev, "Invalid chan type: %d\n", type);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
if (chan >= mbox->num_chans) {
@@ -508,14 +523,39 @@ static int imx_mu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- return devm_mbox_controller_register(dev, &priv->mbox);
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret) {
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ goto disable_runtime_pm;
+ }
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ goto disable_runtime_pm;
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+
+disable_runtime_pm:
+ pm_runtime_disable(dev);
+ clk_disable_unprepare(priv->clk);
+ return ret;
}
static int imx_mu_remove(struct platform_device *pdev)
{
struct imx_mu_priv *priv = platform_get_drvdata(pdev);
- clk_disable_unprepare(priv->clk);
+ pm_runtime_disable(priv->dev);
return 0;
}
@@ -558,12 +598,69 @@ static const struct of_device_id imx_mu_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
+static int imx_mu_suspend_noirq(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv->clk)
+ priv->xcr = imx_mu_read(priv, priv->dcfg->xCR);
+
+ return 0;
+}
+
+static int imx_mu_resume_noirq(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ /*
+ * ONLY restore MU when context lost, the TIE could
+ * be set during noirq resume as there is MU data
+ * communication going on, and restore the saved
+ * value will overwrite the TIE and cause MU data
+ * send failed, may lead to system freeze. This issue
+ * is observed by testing freeze mode suspend.
+ */
+ if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk)
+ imx_mu_write(priv, priv->xcr, priv->dcfg->xCR);
+
+ return 0;
+}
+
+static int imx_mu_runtime_suspend(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int imx_mu_runtime_resume(struct device *dev)
+{
+ struct imx_mu_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ dev_err(dev, "failed to enable clock\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_mu_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
+ imx_mu_resume_noirq)
+ SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
+ imx_mu_runtime_resume, NULL)
+};
+
static struct platform_driver imx_mu_driver = {
.probe = imx_mu_probe,
.remove = imx_mu_remove,
.driver = {
.name = "imx_mu",
.of_match_table = imx_mu_dt_ids,
+ .pm = &imx_mu_pm_ops,
},
};
module_platform_driver(imx_mu_driver);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 34844b7a3675..8c7fac38bb1c 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -568,7 +568,7 @@ static int pcc_mbox_probe(struct platform_device *pdev)
return ret;
}
-struct platform_driver pcc_mbox_driver = {
+static struct platform_driver pcc_mbox_driver = {
.probe = pcc_mbox_probe,
.driver = {
.name = "PCCT",
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index eeebafd546e5..cec34f0af6ce 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -24,6 +24,35 @@ struct qcom_apcs_ipc {
struct platform_device *clk;
};
+struct qcom_apcs_ipc_data {
+ int offset;
+ char *clk_name;
+};
+
+static const struct qcom_apcs_ipc_data ipq6018_apcs_data = {
+ .offset = 8, .clk_name = "qcom,apss-ipq6018-clk"
+};
+
+static const struct qcom_apcs_ipc_data ipq8074_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8916_apcs_data = {
+ .offset = 8, .clk_name = "qcom-apcs-msm8916-clk"
+};
+
+static const struct qcom_apcs_ipc_data msm8996_apcs_data = {
+ .offset = 16, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data msm8998_apcs_data = {
+ .offset = 8, .clk_name = NULL
+};
+
+static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
+ .offset = 12, .clk_name = NULL
+};
+
static const struct regmap_config apcs_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -48,17 +77,12 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
static int qcom_apcs_ipc_probe(struct platform_device *pdev)
{
struct qcom_apcs_ipc *apcs;
+ const struct qcom_apcs_ipc_data *apcs_data;
struct regmap *regmap;
struct resource *res;
- unsigned long offset;
void __iomem *base;
unsigned long i;
int ret;
- const struct of_device_id apcs_clk_match_table[] = {
- { .compatible = "qcom,msm8916-apcs-kpss-global", },
- { .compatible = "qcom,qcs404-apcs-apps-global", },
- {}
- };
apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
if (!apcs)
@@ -73,10 +97,10 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- offset = (unsigned long)of_device_get_match_data(&pdev->dev);
+ apcs_data = of_device_get_match_data(&pdev->dev);
apcs->regmap = regmap;
- apcs->offset = offset;
+ apcs->offset = apcs_data->offset;
/* Initialize channel identifiers */
for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++)
@@ -93,9 +117,9 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
return ret;
}
- if (of_match_device(apcs_clk_match_table, &pdev->dev)) {
+ if (apcs_data->clk_name) {
apcs->clk = platform_device_register_data(&pdev->dev,
- "qcom-apcs-msm8916-clk",
+ apcs_data->clk_name,
PLATFORM_DEVID_NONE,
NULL, 0);
if (IS_ERR(apcs->clk))
@@ -119,14 +143,15 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
- { .compatible = "qcom,msm8916-apcs-kpss-global", .data = (void *)8 },
- { .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 },
- { .compatible = "qcom,msm8998-apcs-hmss-global", .data = (void *)8 },
- { .compatible = "qcom,qcs404-apcs-apps-global", .data = (void *)8 },
- { .compatible = "qcom,sc7180-apss-shared", .data = (void *)12 },
- { .compatible = "qcom,sdm845-apss-shared", .data = (void *)12 },
- { .compatible = "qcom,sm8150-apss-shared", .data = (void *)12 },
- { .compatible = "qcom,ipq8074-apcs-apps-global", .data = (void *)8 },
+ { .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq8074_apcs_data },
+ { .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8998_apcs_data },
+ { .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
+ { .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
+ { .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{}
};
MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
new file mode 100644
index 000000000000..2d13c72944c6
--- /dev/null
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/mailbox/qcom-ipcc.h>
+
+#define IPCC_MBOX_MAX_CHAN 48
+
+/* IPCC Register offsets */
+#define IPCC_REG_SEND_ID 0x0c
+#define IPCC_REG_RECV_ID 0x10
+#define IPCC_REG_RECV_SIGNAL_ENABLE 0x14
+#define IPCC_REG_RECV_SIGNAL_DISABLE 0x18
+#define IPCC_REG_RECV_SIGNAL_CLEAR 0x1c
+#define IPCC_REG_CLIENT_CLEAR 0x38
+
+#define IPCC_SIGNAL_ID_MASK GENMASK(15, 0)
+#define IPCC_CLIENT_ID_MASK GENMASK(31, 16)
+
+#define IPCC_NO_PENDING_IRQ GENMASK(31, 0)
+
+/**
+ * struct qcom_ipcc_chan_info - Per-mailbox-channel info
+ * @client_id: The client-id to which the interrupt has to be triggered
+ * @signal_id: The signal-id to which the interrupt has to be triggered
+ */
+struct qcom_ipcc_chan_info {
+ u16 client_id;
+ u16 signal_id;
+};
+
+/**
+ * struct qcom_ipcc - Holder for the mailbox driver
+ * @dev: Device associated with this instance
+ * @base: Base address of the IPCC frame associated to APSS
+ * @irq_domain: The irq_domain associated with this instance
+ * @chan: The mailbox channels array
+ * @mchan: The per-mailbox channel info array
+ * @mbox: The mailbox controller
+ * @irq: Summary irq
+ */
+struct qcom_ipcc {
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+ struct mbox_chan chan[IPCC_MBOX_MAX_CHAN];
+ struct qcom_ipcc_chan_info mchan[IPCC_MBOX_MAX_CHAN];
+ struct mbox_controller mbox;
+ int irq;
+};
+
+static inline struct qcom_ipcc *to_qcom_ipcc(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct qcom_ipcc, mbox);
+}
+
+static inline u32 qcom_ipcc_get_hwirq(u16 client_id, u16 signal_id)
+{
+ return FIELD_PREP(IPCC_CLIENT_ID_MASK, client_id) |
+ FIELD_PREP(IPCC_SIGNAL_ID_MASK, signal_id);
+}
+
+static irqreturn_t qcom_ipcc_irq_fn(int irq, void *data)
+{
+ struct qcom_ipcc *ipcc = data;
+ u32 hwirq;
+ int virq;
+
+ for (;;) {
+ hwirq = readl(ipcc->base + IPCC_REG_RECV_ID);
+ if (hwirq == IPCC_NO_PENDING_IRQ)
+ break;
+
+ virq = irq_find_mapping(ipcc->irq_domain, hwirq);
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_CLEAR);
+ generic_handle_irq(virq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void qcom_ipcc_mask_irq(struct irq_data *irqd)
+{
+ struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_DISABLE);
+}
+
+static void qcom_ipcc_unmask_irq(struct irq_data *irqd)
+{
+ struct qcom_ipcc *ipcc = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
+
+ writel(hwirq, ipcc->base + IPCC_REG_RECV_SIGNAL_ENABLE);
+}
+
+static struct irq_chip qcom_ipcc_irq_chip = {
+ .name = "ipcc",
+ .irq_mask = qcom_ipcc_mask_irq,
+ .irq_unmask = qcom_ipcc_unmask_irq,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int qcom_ipcc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct qcom_ipcc *ipcc = d->host_data;
+
+ irq_set_chip_and_handler(irq, &qcom_ipcc_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, ipcc);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static int qcom_ipcc_domain_xlate(struct irq_domain *d,
+ struct device_node *node, const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (intsize != 3)
+ return -EINVAL;
+
+ *out_hwirq = qcom_ipcc_get_hwirq(intspec[0], intspec[1]);
+ *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static const struct irq_domain_ops qcom_ipcc_irq_ops = {
+ .map = qcom_ipcc_domain_map,
+ .xlate = qcom_ipcc_domain_xlate,
+};
+
+static int qcom_ipcc_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_ipcc *ipcc = to_qcom_ipcc(chan->mbox);
+ struct qcom_ipcc_chan_info *mchan = chan->con_priv;
+ u32 hwirq;
+
+ hwirq = qcom_ipcc_get_hwirq(mchan->client_id, mchan->signal_id);
+ writel(hwirq, ipcc->base + IPCC_REG_SEND_ID);
+
+ return 0;
+}
+
+static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *ph)
+{
+ struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox);
+ struct qcom_ipcc_chan_info *mchan;
+ struct mbox_chan *chan;
+ unsigned int i;
+
+ if (ph->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < IPCC_MBOX_MAX_CHAN; i++) {
+ chan = &ipcc->chan[i];
+ if (!chan->con_priv) {
+ mchan = &ipcc->mchan[i];
+ mchan->client_id = ph->args[0];
+ mchan->signal_id = ph->args[1];
+ chan->con_priv = mchan;
+ break;
+ }
+
+ chan = NULL;
+ }
+
+ return chan ?: ERR_PTR(-EBUSY);
+}
+
+static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
+ .send_data = qcom_ipcc_mbox_send_data,
+};
+
+static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
+{
+ struct mbox_controller *mbox;
+ struct device *dev = ipcc->dev;
+
+ mbox = &ipcc->mbox;
+ mbox->dev = dev;
+ mbox->num_chans = IPCC_MBOX_MAX_CHAN;
+ mbox->chans = ipcc->chan;
+ mbox->ops = &ipcc_mbox_chan_ops;
+ mbox->of_xlate = qcom_ipcc_mbox_xlate;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = false;
+
+ return devm_mbox_controller_register(dev, mbox);
+}
+
+static int qcom_ipcc_probe(struct platform_device *pdev)
+{
+ struct qcom_ipcc *ipcc;
+ int ret;
+
+ ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL);
+ if (!ipcc)
+ return -ENOMEM;
+
+ ipcc->dev = &pdev->dev;
+
+ ipcc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ipcc->base))
+ return PTR_ERR(ipcc->base);
+
+ ipcc->irq = platform_get_irq(pdev, 0);
+ if (ipcc->irq < 0)
+ return ipcc->irq;
+
+ ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
+ &qcom_ipcc_irq_ops, ipcc);
+ if (!ipcc->irq_domain)
+ return -ENOMEM;
+
+ ret = qcom_ipcc_setup_mbox(ipcc);
+ if (ret)
+ goto err_mbox;
+
+ ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn,
+ IRQF_TRIGGER_HIGH, "ipcc", ipcc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
+ goto err_mbox;
+ }
+
+ enable_irq_wake(ipcc->irq);
+ platform_set_drvdata(pdev, ipcc);
+
+ return 0;
+
+err_mbox:
+ irq_domain_remove(ipcc->irq_domain);
+
+ return ret;
+}
+
+static int qcom_ipcc_remove(struct platform_device *pdev)
+{
+ struct qcom_ipcc *ipcc = platform_get_drvdata(pdev);
+
+ disable_irq_wake(ipcc->irq);
+ irq_domain_remove(ipcc->irq_domain);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_ipcc_of_match[] = {
+ { .compatible = "qcom,ipcc"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_ipcc_of_match);
+
+static struct platform_driver qcom_ipcc_driver = {
+ .probe = qcom_ipcc_probe,
+ .remove = qcom_ipcc_remove,
+ .driver = {
+ .name = "qcom-ipcc",
+ .of_match_table = qcom_ipcc_of_match,
+ },
+};
+
+static int __init qcom_ipcc_init(void)
+{
+ return platform_driver_register(&qcom_ipcc_driver);
+}
+arch_initcall(qcom_ipcc_init);
+
+MODULE_AUTHOR("Venkata Narendra Kumar Gutta <vnkgutta@codeaurora.org>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
new file mode 100644
index 000000000000..f6fab24ae8a9
--- /dev/null
+++ b/drivers/mailbox/sprd-mailbox.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Spreadtrum mailbox driver
+ *
+ * Copyright (c) 2020 Spreadtrum Communications Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define SPRD_MBOX_ID 0x0
+#define SPRD_MBOX_MSG_LOW 0x4
+#define SPRD_MBOX_MSG_HIGH 0x8
+#define SPRD_MBOX_TRIGGER 0xc
+#define SPRD_MBOX_FIFO_RST 0x10
+#define SPRD_MBOX_FIFO_STS 0x14
+#define SPRD_MBOX_IRQ_STS 0x18
+#define SPRD_MBOX_IRQ_MSK 0x1c
+#define SPRD_MBOX_LOCK 0x20
+#define SPRD_MBOX_FIFO_DEPTH 0x24
+
+/* Bit and mask definiation for inbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_INBOX_FIFO_DELIVER_MASK GENMASK(23, 16)
+#define SPRD_INBOX_FIFO_OVERLOW_MASK GENMASK(15, 8)
+#define SPRD_INBOX_FIFO_DELIVER_SHIFT 16
+#define SPRD_INBOX_FIFO_BUSY_MASK GENMASK(7, 0)
+
+/* Bit and mask definiation for SPRD_MBOX_IRQ_STS register */
+#define SPRD_MBOX_IRQ_CLR BIT(0)
+
+/* Bit and mask definiation for outbox's SPRD_MBOX_FIFO_STS register */
+#define SPRD_OUTBOX_FIFO_FULL BIT(0)
+#define SPRD_OUTBOX_FIFO_WR_SHIFT 16
+#define SPRD_OUTBOX_FIFO_RD_SHIFT 24
+#define SPRD_OUTBOX_FIFO_POS_MASK GENMASK(7, 0)
+
+/* Bit and mask definiation for inbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_INBOX_FIFO_BLOCK_IRQ BIT(0)
+#define SPRD_INBOX_FIFO_OVERFLOW_IRQ BIT(1)
+#define SPRD_INBOX_FIFO_DELIVER_IRQ BIT(2)
+#define SPRD_INBOX_FIFO_IRQ_MASK GENMASK(2, 0)
+
+/* Bit and mask definiation for outbox's SPRD_MBOX_IRQ_MSK register */
+#define SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ BIT(0)
+#define SPRD_OUTBOX_FIFO_IRQ_MASK GENMASK(4, 0)
+
+#define SPRD_MBOX_CHAN_MAX 8
+
+struct sprd_mbox_priv {
+ struct mbox_controller mbox;
+ struct device *dev;
+ void __iomem *inbox_base;
+ void __iomem *outbox_base;
+ struct clk *clk;
+ u32 outbox_fifo_depth;
+
+ struct mbox_chan chan[SPRD_MBOX_CHAN_MAX];
+};
+
+static struct sprd_mbox_priv *to_sprd_mbox_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct sprd_mbox_priv, mbox);
+}
+
+static u32 sprd_mbox_get_fifo_len(struct sprd_mbox_priv *priv, u32 fifo_sts)
+{
+ u32 wr_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_WR_SHIFT) &
+ SPRD_OUTBOX_FIFO_POS_MASK;
+ u32 rd_pos = (fifo_sts >> SPRD_OUTBOX_FIFO_RD_SHIFT) &
+ SPRD_OUTBOX_FIFO_POS_MASK;
+ u32 fifo_len;
+
+ /*
+ * If the read pointer is equal with write pointer, which means the fifo
+ * is full or empty.
+ */
+ if (wr_pos == rd_pos) {
+ if (fifo_sts & SPRD_OUTBOX_FIFO_FULL)
+ fifo_len = priv->outbox_fifo_depth;
+ else
+ fifo_len = 0;
+ } else if (wr_pos > rd_pos) {
+ fifo_len = wr_pos - rd_pos;
+ } else {
+ fifo_len = priv->outbox_fifo_depth - rd_pos + wr_pos;
+ }
+
+ return fifo_len;
+}
+
+static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+ struct mbox_chan *chan;
+ u32 fifo_sts, fifo_len, msg[2];
+ int i, id;
+
+ fifo_sts = readl(priv->outbox_base + SPRD_MBOX_FIFO_STS);
+
+ fifo_len = sprd_mbox_get_fifo_len(priv, fifo_sts);
+ if (!fifo_len) {
+ dev_warn_ratelimited(priv->dev, "spurious outbox interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < fifo_len; i++) {
+ msg[0] = readl(priv->outbox_base + SPRD_MBOX_MSG_LOW);
+ msg[1] = readl(priv->outbox_base + SPRD_MBOX_MSG_HIGH);
+ id = readl(priv->outbox_base + SPRD_MBOX_ID);
+
+ chan = &priv->chan[id];
+ mbox_chan_received_data(chan, (void *)msg);
+
+ /* Trigger to update outbox FIFO pointer */
+ writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
+ }
+
+ /* Clear irq status after reading all message. */
+ writel(SPRD_MBOX_IRQ_CLR, priv->outbox_base + SPRD_MBOX_IRQ_STS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sprd_mbox_inbox_isr(int irq, void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+ struct mbox_chan *chan;
+ u32 fifo_sts, send_sts, busy, id;
+
+ fifo_sts = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS);
+
+ /* Get the inbox data delivery status */
+ send_sts = (fifo_sts & SPRD_INBOX_FIFO_DELIVER_MASK) >>
+ SPRD_INBOX_FIFO_DELIVER_SHIFT;
+ if (!send_sts) {
+ dev_warn_ratelimited(priv->dev, "spurious inbox interrupt\n");
+ return IRQ_NONE;
+ }
+
+ while (send_sts) {
+ id = __ffs(send_sts);
+ send_sts &= (send_sts - 1);
+
+ chan = &priv->chan[id];
+
+ /*
+ * Check if the message was fetched by remote traget, if yes,
+ * that means the transmission has been completed.
+ */
+ busy = fifo_sts & SPRD_INBOX_FIFO_BUSY_MASK;
+ if (!(busy & BIT(id)))
+ mbox_chan_txdone(chan, 0);
+ }
+
+ /* Clear FIFO delivery and overflow status */
+ writel(fifo_sts &
+ (SPRD_INBOX_FIFO_DELIVER_MASK | SPRD_INBOX_FIFO_OVERLOW_MASK),
+ priv->inbox_base + SPRD_MBOX_FIFO_RST);
+
+ /* Clear irq status */
+ writel(SPRD_MBOX_IRQ_CLR, priv->inbox_base + SPRD_MBOX_IRQ_STS);
+
+ return IRQ_HANDLED;
+}
+
+static int sprd_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ unsigned long id = (unsigned long)chan->con_priv;
+ u32 *data = msg;
+
+ /* Write data into inbox FIFO, and only support 8 bytes every time */
+ writel(data[0], priv->inbox_base + SPRD_MBOX_MSG_LOW);
+ writel(data[1], priv->inbox_base + SPRD_MBOX_MSG_HIGH);
+
+ /* Set target core id */
+ writel(id, priv->inbox_base + SPRD_MBOX_ID);
+
+ /* Trigger remote request */
+ writel(0x1, priv->inbox_base + SPRD_MBOX_TRIGGER);
+
+ return 0;
+}
+
+static int sprd_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ unsigned long id = (unsigned long)chan->con_priv;
+ u32 busy;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ busy = readl(priv->inbox_base + SPRD_MBOX_FIFO_STS) &
+ SPRD_INBOX_FIFO_BUSY_MASK;
+ if (!(busy & BIT(id))) {
+ mbox_chan_txdone(chan, 0);
+ return 0;
+ }
+
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+static int sprd_mbox_startup(struct mbox_chan *chan)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+ u32 val;
+
+ /* Select outbox FIFO mode and reset the outbox FIFO status */
+ writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
+
+ /* Enable inbox FIFO overflow and delivery interrupt */
+ val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
+ writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+
+ /* Enable outbox FIFO not empty interrupt */
+ val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+ val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
+ writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+
+ return 0;
+}
+
+static void sprd_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
+
+ /* Disable inbox & outbox interrupt */
+ writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
+ writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
+}
+
+static const struct mbox_chan_ops sprd_mbox_ops = {
+ .send_data = sprd_mbox_send_data,
+ .flush = sprd_mbox_flush,
+ .startup = sprd_mbox_startup,
+ .shutdown = sprd_mbox_shutdown,
+};
+
+static void sprd_mbox_disable(void *data)
+{
+ struct sprd_mbox_priv *priv = data;
+
+ clk_disable_unprepare(priv->clk);
+}
+
+static int sprd_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sprd_mbox_priv *priv;
+ int ret, inbox_irq, outbox_irq;
+ unsigned long id;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ /*
+ * The Spreadtrum mailbox uses an inbox to send messages to the target
+ * core, and uses an outbox to receive messages from other cores.
+ *
+ * Thus the mailbox controller supplies 2 different register addresses
+ * and IRQ numbers for inbox and outbox.
+ */
+ priv->inbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->inbox_base))
+ return PTR_ERR(priv->inbox_base);
+
+ priv->outbox_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->outbox_base))
+ return PTR_ERR(priv->outbox_base);
+
+ priv->clk = devm_clk_get(dev, "enable");
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get mailbox clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, sprd_mbox_disable, priv);
+ if (ret) {
+ dev_err(dev, "failed to add mailbox disable action\n");
+ return ret;
+ }
+
+ inbox_irq = platform_get_irq(pdev, 0);
+ if (inbox_irq < 0)
+ return inbox_irq;
+
+ ret = devm_request_irq(dev, inbox_irq, sprd_mbox_inbox_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), priv);
+ if (ret) {
+ dev_err(dev, "failed to request inbox IRQ: %d\n", ret);
+ return ret;
+ }
+
+ outbox_irq = platform_get_irq(pdev, 1);
+ if (outbox_irq < 0)
+ return outbox_irq;
+
+ ret = devm_request_irq(dev, outbox_irq, sprd_mbox_outbox_isr,
+ IRQF_NO_SUSPEND, dev_name(dev), priv);
+ if (ret) {
+ dev_err(dev, "failed to request outbox IRQ: %d\n", ret);
+ return ret;
+ }
+
+ /* Get the default outbox FIFO depth */
+ priv->outbox_fifo_depth =
+ readl(priv->outbox_base + SPRD_MBOX_FIFO_DEPTH) + 1;
+ priv->mbox.dev = dev;
+ priv->mbox.chans = &priv->chan[0];
+ priv->mbox.num_chans = SPRD_MBOX_CHAN_MAX;
+ priv->mbox.ops = &sprd_mbox_ops;
+ priv->mbox.txdone_irq = true;
+
+ for (id = 0; id < SPRD_MBOX_CHAN_MAX; id++)
+ priv->chan[id].con_priv = (void *)id;
+
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret) {
+ dev_err(dev, "failed to register mailbox: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_mbox_of_match[] = {
+ { .compatible = "sprd,sc9860-mailbox", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_mbox_of_match);
+
+static struct platform_driver sprd_mbox_driver = {
+ .driver = {
+ .name = "sprd-mailbox",
+ .of_match_table = sprd_mbox_of_match,
+ },
+ .probe = sprd_mbox_probe,
+};
+module_platform_driver(sprd_mbox_driver);
+
+MODULE_AUTHOR("Baolin Wang <baolin.wang@unisoc.com>");
+MODULE_DESCRIPTION("Spreadtrum mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 86887c9a349a..f44079d62b1a 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -504,10 +504,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->req_buf_size = resource_size(&res);
mchan->req_buf = devm_ioremap(mdev, res.start,
mchan->req_buf_size);
- if (IS_ERR(mchan->req_buf)) {
+ if (!mchan->req_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->req_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret);
@@ -520,10 +519,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->resp_buf_size = resource_size(&res);
mchan->resp_buf = devm_ioremap(mdev, res.start,
mchan->resp_buf_size);
- if (IS_ERR(mchan->resp_buf)) {
+ if (!mchan->resp_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->resp_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -543,10 +541,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->req_buf_size = resource_size(&res);
mchan->req_buf = devm_ioremap(mdev, res.start,
mchan->req_buf_size);
- if (IS_ERR(mchan->req_buf)) {
+ if (!mchan->req_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->req_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -559,10 +556,9 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
mchan->resp_buf_size = resource_size(&res);
mchan->resp_buf = devm_ioremap(mdev, res.start,
mchan->resp_buf_size);
- if (IS_ERR(mchan->resp_buf)) {
+ if (!mchan->resp_buf) {
dev_err(mdev, "Unable to map IPI buffer I/O memory\n");
- ret = PTR_ERR(mchan->resp_buf);
- return ret;
+ return -ENOMEM;
}
} else if (ret != -ENODEV) {
dev_err(mdev, "Unmatched resource %s.\n", name);
@@ -668,10 +664,9 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
/* IPI IRQ */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "unable to find IPI IRQ.\n");
+ if (ret < 0)
goto free_mbox_dev;
- }
+
pdata->irq = ret;
ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt,
IRQF_SHARED, dev_name(dev), pdata);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index d6d5ab23c088..6665b56865b7 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -269,6 +269,7 @@ config DM_UNSTRIPED
config DM_CRYPT
tristate "Crypt target support"
depends on BLK_DEV_DM
+ depends on (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n)
select CRYPTO
select CRYPTO_CBC
select CRYPTO_ESSIV
@@ -336,6 +337,14 @@ config DM_WRITECACHE
The writecache target doesn't cache reads because reads are supposed
to be cached in standard RAM.
+config DM_EBS
+ tristate "Emulated block size target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM
+ select DM_BUFIO
+ help
+ dm-ebs emulates smaller logical block size on backing devices
+ with larger ones (e.g. 512 byte sectors on 4K native disks).
+
config DM_ERA
tristate "Era target (EXPERIMENTAL)"
depends on BLK_DEV_DM
@@ -443,6 +452,17 @@ config DM_MULTIPATH_ST
If unsure, say N.
+config DM_MULTIPATH_HST
+ tristate "I/O Path Selector based on historical service time"
+ depends on DM_MULTIPATH
+ help
+ This path selector is a dynamic load balancer which selects
+ the path expected to complete the incoming I/O in the shortest
+ time by comparing estimated service time (based on historical
+ service time).
+
+ If unsure, say N.
+
config DM_DELAY
tristate "I/O delaying target"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index d91a7edcd2ab..31840f95cd40 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -17,6 +17,7 @@ dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o \
dm-cache-background-tracker.o
dm-cache-smq-y += dm-cache-policy-smq.o
+dm-ebs-y += dm-ebs-target.o
dm-era-y += dm-era-target.o
dm-clone-y += dm-clone-target.o dm-clone-metadata.o
dm-verity-y += dm-verity-target.o
@@ -54,6 +55,7 @@ obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
+obj-$(CONFIG_DM_MULTIPATH_HST) += dm-historical-service-time.o
obj-$(CONFIG_DM_SWITCH) += dm-switch.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
obj-$(CONFIG_DM_PERSISTENT_DATA) += persistent-data/
@@ -65,6 +67,7 @@ obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
+obj-$(CONFIG_DM_EBS) += dm-ebs.o
obj-$(CONFIG_DM_ERA) += dm-era.o
obj-$(CONFIG_DM_CLONE) += dm-clone.o
obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index d1786cfd7f22..6d1565021d74 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -256,12 +256,35 @@ static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
if (b->block == block)
return b;
- n = (b->block < block) ? n->rb_left : n->rb_right;
+ n = block < b->block ? n->rb_left : n->rb_right;
}
return NULL;
}
+static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
+{
+ struct rb_node *n = c->buffer_tree.rb_node;
+ struct dm_buffer *b;
+ struct dm_buffer *best = NULL;
+
+ while (n) {
+ b = container_of(n, struct dm_buffer, node);
+
+ if (b->block == block)
+ return b;
+
+ if (block <= b->block) {
+ n = n->rb_left;
+ best = b;
+ } else {
+ n = n->rb_right;
+ }
+ }
+
+ return best;
+}
+
static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
{
struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
@@ -276,8 +299,8 @@ static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
}
parent = *new;
- new = (found->block < b->block) ?
- &((*new)->rb_left) : &((*new)->rb_right);
+ new = b->block < found->block ?
+ &found->node.rb_left : &found->node.rb_right;
}
rb_link_node(&b->node, parent, new);
@@ -631,6 +654,19 @@ dmio:
submit_bio(bio);
}
+static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
+{
+ sector_t sector;
+
+ if (likely(c->sectors_per_block_bits >= 0))
+ sector = block << c->sectors_per_block_bits;
+ else
+ sector = block * (c->block_size >> SECTOR_SHIFT);
+ sector += c->start;
+
+ return sector;
+}
+
static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buffer *, blk_status_t))
{
unsigned n_sectors;
@@ -639,11 +675,7 @@ static void submit_io(struct dm_buffer *b, int rw, void (*end_io)(struct dm_buff
b->end_io = end_io;
- if (likely(b->c->sectors_per_block_bits >= 0))
- sector = b->block << b->c->sectors_per_block_bits;
- else
- sector = b->block * (b->c->block_size >> SECTOR_SHIFT);
- sector += b->c->start;
+ sector = block_to_sector(b->c, b->block);
if (rw != REQ_OP_WRITE) {
n_sectors = b->c->block_size >> SECTOR_SHIFT;
@@ -1326,6 +1358,30 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
/*
+ * Use dm-io to send a discard request to flush the device.
+ */
+int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
+{
+ struct dm_io_request io_req = {
+ .bi_op = REQ_OP_DISCARD,
+ .bi_op_flags = REQ_SYNC,
+ .mem.type = DM_IO_KMEM,
+ .mem.ptr.addr = NULL,
+ .client = c->dm_io,
+ };
+ struct dm_io_region io_reg = {
+ .bdev = c->bdev,
+ .sector = block_to_sector(c, block),
+ .count = block_to_sector(c, count),
+ };
+
+ BUG_ON(dm_bufio_in_request());
+
+ return dm_io(&io_req, 1, &io_reg, NULL);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
+
+/*
* We first delete any other buffer that may be at that new location.
*
* Then, we write the buffer to the original location if it was dirty.
@@ -1401,6 +1457,14 @@ retry:
}
EXPORT_SYMBOL_GPL(dm_bufio_release_move);
+static void forget_buffer_locked(struct dm_buffer *b)
+{
+ if (likely(!b->hold_count) && likely(!b->state)) {
+ __unlink_buffer(b);
+ __free_buffer_wake(b);
+ }
+}
+
/*
* Free the given buffer.
*
@@ -1414,15 +1478,36 @@ void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
dm_bufio_lock(c);
b = __find(c, block);
- if (b && likely(!b->hold_count) && likely(!b->state)) {
- __unlink_buffer(b);
- __free_buffer_wake(b);
- }
+ if (b)
+ forget_buffer_locked(b);
dm_bufio_unlock(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_forget);
+void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
+{
+ struct dm_buffer *b;
+ sector_t end_block = block + n_blocks;
+
+ while (block < end_block) {
+ dm_bufio_lock(c);
+
+ b = __find_next(c, block);
+ if (b) {
+ block = b->block + 1;
+ forget_buffer_locked(b);
+ }
+
+ dm_bufio_unlock(c);
+
+ if (!b)
+ break;
+ }
+
+}
+EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
+
void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
{
c->minimum_buffers = n;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 3df90daba89e..000ddfab5ba0 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -34,7 +34,9 @@
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
+#include <linux/key-type.h>
#include <keys/user-type.h>
+#include <keys/encrypted-type.h>
#include <linux/device-mapper.h>
@@ -212,7 +214,7 @@ struct crypt_config {
struct mutex bio_alloc_lock;
u8 *authenc_key; /* space for keys in authenc() format (if used) */
- u8 key[0];
+ u8 key[];
};
#define MIN_IOS 64
@@ -2215,12 +2217,47 @@ static bool contains_whitespace(const char *str)
return false;
}
+static int set_key_user(struct crypt_config *cc, struct key *key)
+{
+ const struct user_key_payload *ukp;
+
+ ukp = user_key_payload_locked(key);
+ if (!ukp)
+ return -EKEYREVOKED;
+
+ if (cc->key_size != ukp->datalen)
+ return -EINVAL;
+
+ memcpy(cc->key, ukp->data, cc->key_size);
+
+ return 0;
+}
+
+#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
+static int set_key_encrypted(struct crypt_config *cc, struct key *key)
+{
+ const struct encrypted_key_payload *ekp;
+
+ ekp = key->payload.data[0];
+ if (!ekp)
+ return -EKEYREVOKED;
+
+ if (cc->key_size != ekp->decrypted_datalen)
+ return -EINVAL;
+
+ memcpy(cc->key, ekp->decrypted_data, cc->key_size);
+
+ return 0;
+}
+#endif /* CONFIG_ENCRYPTED_KEYS */
+
static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
{
char *new_key_string, *key_desc;
int ret;
+ struct key_type *type;
struct key *key;
- const struct user_key_payload *ukp;
+ int (*set_key)(struct crypt_config *cc, struct key *key);
/*
* Reject key_string with whitespace. dm core currently lacks code for
@@ -2236,16 +2273,26 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
return -EINVAL;
- if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
- strncmp(key_string, "user:", key_desc - key_string + 1))
+ if (!strncmp(key_string, "logon:", key_desc - key_string + 1)) {
+ type = &key_type_logon;
+ set_key = set_key_user;
+ } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) {
+ type = &key_type_user;
+ set_key = set_key_user;
+#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE)
+ } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) {
+ type = &key_type_encrypted;
+ set_key = set_key_encrypted;
+#endif
+ } else {
return -EINVAL;
+ }
new_key_string = kstrdup(key_string, GFP_KERNEL);
if (!new_key_string)
return -ENOMEM;
- key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
- key_desc + 1, NULL);
+ key = request_key(type, key_desc + 1, NULL);
if (IS_ERR(key)) {
kzfree(new_key_string);
return PTR_ERR(key);
@@ -2253,23 +2300,14 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
down_read(&key->sem);
- ukp = user_key_payload_locked(key);
- if (!ukp) {
- up_read(&key->sem);
- key_put(key);
- kzfree(new_key_string);
- return -EKEYREVOKED;
- }
-
- if (cc->key_size != ukp->datalen) {
+ ret = set_key(cc, key);
+ if (ret < 0) {
up_read(&key->sem);
key_put(key);
kzfree(new_key_string);
- return -EINVAL;
+ return ret;
}
- memcpy(cc->key, ukp->data, cc->key_size);
-
up_read(&key->sem);
key_put(key);
@@ -2323,7 +2361,7 @@ static int get_key_size(char **key_string)
return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
}
-#endif
+#endif /* CONFIG_KEYS */
static int crypt_set_key(struct crypt_config *cc, char *key)
{
@@ -3274,7 +3312,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_segment_size = PAGE_SIZE;
limits->logical_block_size =
- max_t(unsigned short, limits->logical_block_size, cc->sector_size);
+ max_t(unsigned, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
max_t(unsigned, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
@@ -3282,7 +3320,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 20, 0},
+ .version = {1, 21, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
new file mode 100644
index 000000000000..44451276f128
--- /dev/null
+++ b/drivers/md/dm-ebs-target.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) 2020 Red Hat GmbH
+ *
+ * This file is released under the GPL.
+ *
+ * Device-mapper target to emulate smaller logical block
+ * size on backing devices exposing (natively) larger ones.
+ *
+ * E.g. 512 byte sector emulation on 4K native disks.
+ */
+
+#include "dm.h"
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/dm-bufio.h>
+
+#define DM_MSG_PREFIX "ebs"
+
+static void ebs_dtr(struct dm_target *ti);
+
+/* Emulated block size context. */
+struct ebs_c {
+ struct dm_dev *dev; /* Underlying device to emulate block size on. */
+ struct dm_bufio_client *bufio; /* Use dm-bufio for read and read-modify-write processing. */
+ struct workqueue_struct *wq; /* Workqueue for ^ processing of bios. */
+ struct work_struct ws; /* Work item used for ^. */
+ struct bio_list bios_in; /* Worker bios input list. */
+ spinlock_t lock; /* Guard bios input list above. */
+ sector_t start; /* <start> table line argument, see ebs_ctr below. */
+ unsigned int e_bs; /* Emulated block size in sectors exposed to upper layer. */
+ unsigned int u_bs; /* Underlying block size in sectors retrievd from/set on lower layer device. */
+ unsigned char block_shift; /* bitshift sectors -> blocks used in dm-bufio API. */
+ bool u_bs_set:1; /* Flag to indicate underlying block size is set on table line. */
+};
+
+static inline sector_t __sector_to_block(struct ebs_c *ec, sector_t sector)
+{
+ return sector >> ec->block_shift;
+}
+
+static inline sector_t __block_mod(sector_t sector, unsigned int bs)
+{
+ return sector & (bs - 1);
+}
+
+/* Return number of blocks for a bio, accounting for misalignement of start and end sectors. */
+static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
+{
+ sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
+
+ return __sector_to_block(ec, end_sector) + (__block_mod(end_sector, ec->u_bs) ? 1 : 0);
+}
+
+static inline bool __ebs_check_bs(unsigned int bs)
+{
+ return bs && is_power_of_2(bs);
+}
+
+/*
+ * READ/WRITE:
+ *
+ * copy blocks between bufio blocks and bio vector's (partial/overlapping) pages.
+ */
+static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bvec_iter *iter)
+{
+ int r = 0;
+ unsigned char *ba, *pa;
+ unsigned int cur_len;
+ unsigned int bv_len = bv->bv_len;
+ unsigned int buf_off = to_bytes(__block_mod(iter->bi_sector, ec->u_bs));
+ sector_t block = __sector_to_block(ec, iter->bi_sector);
+ struct dm_buffer *b;
+
+ if (unlikely(!bv->bv_page || !bv_len))
+ return -EIO;
+
+ pa = page_address(bv->bv_page) + bv->bv_offset;
+
+ /* Handle overlapping page <-> blocks */
+ while (bv_len) {
+ cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len);
+
+ /* Avoid reading for writes in case bio vector's page overwrites block completely. */
+ if (rw == READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
+ ba = dm_bufio_read(ec->bufio, block, &b);
+ else
+ ba = dm_bufio_new(ec->bufio, block, &b);
+
+ if (unlikely(IS_ERR(ba))) {
+ /*
+ * Carry on with next buffer, if any, to issue all possible
+ * data but return error.
+ */
+ r = PTR_ERR(ba);
+ } else {
+ /* Copy data to/from bio to buffer if read/new was successful above. */
+ ba += buf_off;
+ if (rw == READ) {
+ memcpy(pa, ba, cur_len);
+ flush_dcache_page(bv->bv_page);
+ } else {
+ flush_dcache_page(bv->bv_page);
+ memcpy(ba, pa, cur_len);
+ dm_bufio_mark_partial_buffer_dirty(b, buf_off, buf_off + cur_len);
+ }
+
+ dm_bufio_release(b);
+ }
+
+ pa += cur_len;
+ bv_len -= cur_len;
+ buf_off = 0;
+ block++;
+ }
+
+ return r;
+}
+
+/* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */
+static int __ebs_rw_bio(struct ebs_c *ec, int rw, struct bio *bio)
+{
+ int r = 0, rr;
+ struct bio_vec bv;
+ struct bvec_iter iter;
+
+ bio_for_each_bvec(bv, bio, iter) {
+ rr = __ebs_rw_bvec(ec, rw, &bv, &iter);
+ if (rr)
+ r = rr;
+ }
+
+ return r;
+}
+
+/*
+ * Discard bio's blocks, i.e. pass discards down.
+ *
+ * Avoid discarding partial blocks at beginning and end;
+ * return 0 in case no blocks can be discarded as a result.
+ */
+static int __ebs_discard_bio(struct ebs_c *ec, struct bio *bio)
+{
+ sector_t block, blocks, sector = bio->bi_iter.bi_sector;
+
+ block = __sector_to_block(ec, sector);
+ blocks = __nr_blocks(ec, bio);
+
+ /*
+ * Partial first underlying block (__nr_blocks() may have
+ * resulted in one block).
+ */
+ if (__block_mod(sector, ec->u_bs)) {
+ block++;
+ blocks--;
+ }
+
+ /* Partial last underlying block if any. */
+ if (blocks && __block_mod(bio_end_sector(bio), ec->u_bs))
+ blocks--;
+
+ return blocks ? dm_bufio_issue_discard(ec->bufio, block, blocks) : 0;
+}
+
+/* Release blocks them from the bufio cache. */
+static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
+{
+ sector_t blocks, sector = bio->bi_iter.bi_sector;
+
+ blocks = __nr_blocks(ec, bio);
+
+ dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks);
+}
+
+/* Worker funtion to process incoming bios. */
+static void __ebs_process_bios(struct work_struct *ws)
+{
+ int r;
+ bool write = false;
+ sector_t block1, block2;
+ struct ebs_c *ec = container_of(ws, struct ebs_c, ws);
+ struct bio *bio;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
+
+ spin_lock_irq(&ec->lock);
+ bios = ec->bios_in;
+ bio_list_init(&ec->bios_in);
+ spin_unlock_irq(&ec->lock);
+
+ /* Prefetch all read and any mis-aligned write buffers */
+ bio_list_for_each(bio, &bios) {
+ block1 = __sector_to_block(ec, bio->bi_iter.bi_sector);
+ if (bio_op(bio) == REQ_OP_READ)
+ dm_bufio_prefetch(ec->bufio, block1, __nr_blocks(ec, bio));
+ else if (bio_op(bio) == REQ_OP_WRITE && !(bio->bi_opf & REQ_PREFLUSH)) {
+ block2 = __sector_to_block(ec, bio_end_sector(bio));
+ if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs))
+ dm_bufio_prefetch(ec->bufio, block1, 1);
+ if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1)
+ dm_bufio_prefetch(ec->bufio, block2, 1);
+ }
+ }
+
+ bio_list_for_each(bio, &bios) {
+ r = -EIO;
+ if (bio_op(bio) == REQ_OP_READ)
+ r = __ebs_rw_bio(ec, READ, bio);
+ else if (bio_op(bio) == REQ_OP_WRITE) {
+ write = true;
+ r = __ebs_rw_bio(ec, WRITE, bio);
+ } else if (bio_op(bio) == REQ_OP_DISCARD) {
+ __ebs_forget_bio(ec, bio);
+ r = __ebs_discard_bio(ec, bio);
+ }
+
+ if (r < 0)
+ bio->bi_status = errno_to_blk_status(r);
+ }
+
+ /*
+ * We write dirty buffers after processing I/O on them
+ * but before we endio thus addressing REQ_FUA/REQ_SYNC.
+ */
+ r = write ? dm_bufio_write_dirty_buffers(ec->bufio) : 0;
+
+ while ((bio = bio_list_pop(&bios))) {
+ /* Any other request is endioed. */
+ if (unlikely(r && bio_op(bio) == REQ_OP_WRITE))
+ bio_io_error(bio);
+ else
+ bio_endio(bio);
+ }
+}
+
+/*
+ * Construct an emulated block size mapping: <dev_path> <offset> <ebs> [<ubs>]
+ *
+ * <dev_path>: path of the underlying device
+ * <offset>: offset in 512 bytes sectors into <dev_path>
+ * <ebs>: emulated block size in units of 512 bytes exposed to the upper layer
+ * [<ubs>]: underlying block size in units of 512 bytes imposed on the lower layer;
+ * optional, if not supplied, retrieve logical block size from underlying device
+ */
+static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ int r;
+ unsigned short tmp1;
+ unsigned long long tmp;
+ char dummy;
+ struct ebs_c *ec;
+
+ if (argc < 3 || argc > 4) {
+ ti->error = "Invalid argument count";
+ return -EINVAL;
+ }
+
+ ec = ti->private = kzalloc(sizeof(*ec), GFP_KERNEL);
+ if (!ec) {
+ ti->error = "Cannot allocate ebs context";
+ return -ENOMEM;
+ }
+
+ r = -EINVAL;
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 ||
+ tmp != (sector_t)tmp ||
+ (sector_t)tmp >= ti->len) {
+ ti->error = "Invalid device offset sector";
+ goto bad;
+ }
+ ec->start = tmp;
+
+ if (sscanf(argv[2], "%hu%c", &tmp1, &dummy) != 1 ||
+ !__ebs_check_bs(tmp1) ||
+ to_bytes(tmp1) > PAGE_SIZE) {
+ ti->error = "Invalid emulated block size";
+ goto bad;
+ }
+ ec->e_bs = tmp1;
+
+ if (argc > 3) {
+ if (sscanf(argv[3], "%hu%c", &tmp1, &dummy) != 1 || !__ebs_check_bs(tmp1)) {
+ ti->error = "Invalid underlying block size";
+ goto bad;
+ }
+ ec->u_bs = tmp1;
+ ec->u_bs_set = true;
+ } else
+ ec->u_bs_set = false;
+
+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ec->dev);
+ if (r) {
+ ti->error = "Device lookup failed";
+ ec->dev = NULL;
+ goto bad;
+ }
+
+ r = -EINVAL;
+ if (!ec->u_bs_set) {
+ ec->u_bs = to_sector(bdev_logical_block_size(ec->dev->bdev));
+ if (!__ebs_check_bs(ec->u_bs)) {
+ ti->error = "Invalid retrieved underlying block size";
+ goto bad;
+ }
+ }
+
+ if (!ec->u_bs_set && ec->e_bs == ec->u_bs)
+ DMINFO("Emulation superfluous: emulated equal to underlying block size");
+
+ if (__block_mod(ec->start, ec->u_bs)) {
+ ti->error = "Device offset must be multiple of underlying block size";
+ goto bad;
+ }
+
+ ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1, 0, NULL, NULL);
+ if (IS_ERR(ec->bufio)) {
+ ti->error = "Cannot create dm bufio client";
+ r = PTR_ERR(ec->bufio);
+ ec->bufio = NULL;
+ goto bad;
+ }
+
+ ec->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+ if (!ec->wq) {
+ ti->error = "Cannot create dm-" DM_MSG_PREFIX " workqueue";
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ ec->block_shift = __ffs(ec->u_bs);
+ INIT_WORK(&ec->ws, &__ebs_process_bios);
+ bio_list_init(&ec->bios_in);
+ spin_lock_init(&ec->lock);
+
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
+ ti->num_secure_erase_bios = 0;
+ ti->num_write_same_bios = 0;
+ ti->num_write_zeroes_bios = 0;
+ return 0;
+bad:
+ ebs_dtr(ti);
+ return r;
+}
+
+static void ebs_dtr(struct dm_target *ti)
+{
+ struct ebs_c *ec = ti->private;
+
+ if (ec->wq)
+ destroy_workqueue(ec->wq);
+ if (ec->bufio)
+ dm_bufio_client_destroy(ec->bufio);
+ if (ec->dev)
+ dm_put_device(ti, ec->dev);
+ kfree(ec);
+}
+
+static int ebs_map(struct dm_target *ti, struct bio *bio)
+{
+ struct ebs_c *ec = ti->private;
+
+ bio_set_dev(bio, ec->dev->bdev);
+ bio->bi_iter.bi_sector = ec->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
+
+ if (unlikely(bio->bi_opf & REQ_OP_FLUSH))
+ return DM_MAPIO_REMAPPED;
+ /*
+ * Only queue for bufio processing in case of partial or overlapping buffers
+ * -or-
+ * emulation with ebs == ubs aiming for tests of dm-bufio overhead.
+ */
+ if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) ||
+ __block_mod(bio_end_sector(bio), ec->u_bs) ||
+ ec->e_bs == ec->u_bs)) {
+ spin_lock_irq(&ec->lock);
+ bio_list_add(&ec->bios_in, bio);
+ spin_unlock_irq(&ec->lock);
+
+ queue_work(ec->wq, &ec->ws);
+
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /* Forget any buffer content relative to this direct backing device I/O. */
+ __ebs_forget_bio(ec, bio);
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static void ebs_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
+{
+ struct ebs_c *ec = ti->private;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ *result = '\0';
+ break;
+ case STATUSTYPE_TABLE:
+ snprintf(result, maxlen, ec->u_bs_set ? "%s %llu %u %u" : "%s %llu %u",
+ ec->dev->name, (unsigned long long) ec->start, ec->e_bs, ec->u_bs);
+ break;
+ }
+}
+
+static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+{
+ struct ebs_c *ec = ti->private;
+ struct dm_dev *dev = ec->dev;
+
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ *bdev = dev->bdev;
+ return !!(ec->start || ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT);
+}
+
+static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct ebs_c *ec = ti->private;
+
+ limits->logical_block_size = to_bytes(ec->e_bs);
+ limits->physical_block_size = to_bytes(ec->u_bs);
+ limits->alignment_offset = limits->physical_block_size;
+ blk_limits_io_min(limits, limits->logical_block_size);
+}
+
+static int ebs_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct ebs_c *ec = ti->private;
+
+ return fn(ti, ec->dev, ec->start, ti->len, data);
+}
+
+static struct target_type ebs_target = {
+ .name = "ebs",
+ .version = {1, 0, 1},
+ .features = DM_TARGET_PASSES_INTEGRITY,
+ .module = THIS_MODULE,
+ .ctr = ebs_ctr,
+ .dtr = ebs_dtr,
+ .map = ebs_map,
+ .status = ebs_status,
+ .io_hints = ebs_io_hints,
+ .prepare_ioctl = ebs_prepare_ioctl,
+ .iterate_devices = ebs_iterate_devices,
+};
+
+static int __init dm_ebs_init(void)
+{
+ int r = dm_register_target(&ebs_target);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ return r;
+}
+
+static void dm_ebs_exit(void)
+{
+ dm_unregister_target(&ebs_target);
+}
+
+module_init(dm_ebs_init);
+module_exit(dm_ebs_exit);
+
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_DESCRIPTION(DM_NAME " emulated block size target");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-historical-service-time.c b/drivers/md/dm-historical-service-time.c
new file mode 100644
index 000000000000..186f91e2752c
--- /dev/null
+++ b/drivers/md/dm-historical-service-time.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Historical Service Time
+ *
+ * Keeps a time-weighted exponential moving average of the historical
+ * service time. Estimates future service time based on the historical
+ * service time and the number of outstanding requests.
+ *
+ * Marks paths stale if they have not finished within hst *
+ * num_paths. If a path is stale and unused, we will send a single
+ * request to probe in case the path has improved. This situation
+ * generally arises if the path is so much worse than others that it
+ * will never have the best estimated service time, or if the entire
+ * multipath device is unused. If a path is stale and in use, limit the
+ * number of requests it can receive with the assumption that the path
+ * has become degraded.
+ *
+ * To avoid repeatedly calculating exponents for time weighting, times
+ * are split into HST_WEIGHT_COUNT buckets each (1 >> HST_BUCKET_SHIFT)
+ * ns, and the weighting is pre-calculated.
+ *
+ */
+
+#include "dm.h"
+#include "dm-path-selector.h"
+
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+
+#define DM_MSG_PREFIX "multipath historical-service-time"
+#define HST_MIN_IO 1
+#define HST_VERSION "0.1.1"
+
+#define HST_FIXED_SHIFT 10 /* 10 bits of decimal precision */
+#define HST_FIXED_MAX (ULLONG_MAX >> HST_FIXED_SHIFT)
+#define HST_FIXED_1 (1 << HST_FIXED_SHIFT)
+#define HST_FIXED_95 972
+#define HST_MAX_INFLIGHT HST_FIXED_1
+#define HST_BUCKET_SHIFT 24 /* Buckets are ~ 16ms */
+#define HST_WEIGHT_COUNT 64ULL
+
+struct selector {
+ struct list_head valid_paths;
+ struct list_head failed_paths;
+ int valid_count;
+ spinlock_t lock;
+
+ unsigned int weights[HST_WEIGHT_COUNT];
+ unsigned int threshold_multiplier;
+};
+
+struct path_info {
+ struct list_head list;
+ struct dm_path *path;
+ unsigned int repeat_count;
+
+ spinlock_t lock;
+
+ u64 historical_service_time; /* Fixed point */
+
+ u64 stale_after;
+ u64 last_finish;
+
+ u64 outstanding;
+};
+
+/**
+ * fixed_power - compute: x^n, in O(log n) time
+ *
+ * @x: base of the power
+ * @frac_bits: fractional bits of @x
+ * @n: power to raise @x to.
+ *
+ * By exploiting the relation between the definition of the natural power
+ * function: x^n := x*x*...*x (x multiplied by itself for n times), and
+ * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
+ * (where: n_i \elem {0, 1}, the binary vector representing n),
+ * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
+ * of course trivially computable in O(log_2 n), the length of our binary
+ * vector.
+ *
+ * (see: kernel/sched/loadavg.c)
+ */
+static u64 fixed_power(u64 x, unsigned int frac_bits, unsigned int n)
+{
+ unsigned long result = 1UL << frac_bits;
+
+ if (n) {
+ for (;;) {
+ if (n & 1) {
+ result *= x;
+ result += 1UL << (frac_bits - 1);
+ result >>= frac_bits;
+ }
+ n >>= 1;
+ if (!n)
+ break;
+ x *= x;
+ x += 1UL << (frac_bits - 1);
+ x >>= frac_bits;
+ }
+ }
+
+ return result;
+}
+
+/*
+ * Calculate the next value of an exponential moving average
+ * a_1 = a_0 * e + a * (1 - e)
+ *
+ * @last: [0, ULLONG_MAX >> HST_FIXED_SHIFT]
+ * @next: [0, ULLONG_MAX >> HST_FIXED_SHIFT]
+ * @weight: [0, HST_FIXED_1]
+ *
+ * Note:
+ * To account for multiple periods in the same calculation,
+ * a_n = a_0 * e^n + a * (1 - e^n),
+ * so call fixed_ema(last, next, pow(weight, N))
+ */
+static u64 fixed_ema(u64 last, u64 next, u64 weight)
+{
+ last *= weight;
+ last += next * (HST_FIXED_1 - weight);
+ last += 1ULL << (HST_FIXED_SHIFT - 1);
+ return last >> HST_FIXED_SHIFT;
+}
+
+static struct selector *alloc_selector(void)
+{
+ struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
+
+ if (s) {
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->failed_paths);
+ spin_lock_init(&s->lock);
+ s->valid_count = 0;
+ }
+
+ return s;
+}
+
+/*
+ * Get the weight for a given time span.
+ */
+static u64 hst_weight(struct path_selector *ps, u64 delta)
+{
+ struct selector *s = ps->context;
+ int bucket = clamp(delta >> HST_BUCKET_SHIFT, 0ULL,
+ HST_WEIGHT_COUNT - 1);
+
+ return s->weights[bucket];
+}
+
+/*
+ * Set up the weights array.
+ *
+ * weights[len-1] = 0
+ * weights[n] = base ^ (n + 1)
+ */
+static void hst_set_weights(struct path_selector *ps, unsigned int base)
+{
+ struct selector *s = ps->context;
+ int i;
+
+ if (base >= HST_FIXED_1)
+ return;
+
+ for (i = 0; i < HST_WEIGHT_COUNT - 1; i++)
+ s->weights[i] = fixed_power(base, HST_FIXED_SHIFT, i + 1);
+ s->weights[HST_WEIGHT_COUNT - 1] = 0;
+}
+
+static int hst_create(struct path_selector *ps, unsigned int argc, char **argv)
+{
+ struct selector *s;
+ unsigned int base_weight = HST_FIXED_95;
+ unsigned int threshold_multiplier = 0;
+ char dummy;
+
+ /*
+ * Arguments: [<base_weight> [<threshold_multiplier>]]
+ * <base_weight>: Base weight for ema [0, 1024) 10-bit fixed point. A
+ * value of 0 will completely ignore any history.
+ * If not given, default (HST_FIXED_95) is used.
+ * <threshold_multiplier>: Minimum threshold multiplier for paths to
+ * be considered different. That is, a path is
+ * considered different iff (p1 > N * p2) where p1
+ * is the path with higher service time. A threshold
+ * of 1 or 0 has no effect. Defaults to 0.
+ */
+ if (argc > 2)
+ return -EINVAL;
+
+ if (argc && (sscanf(argv[0], "%u%c", &base_weight, &dummy) != 1 ||
+ base_weight >= HST_FIXED_1)) {
+ return -EINVAL;
+ }
+
+ if (argc > 1 && (sscanf(argv[1], "%u%c",
+ &threshold_multiplier, &dummy) != 1)) {
+ return -EINVAL;
+ }
+
+ s = alloc_selector();
+ if (!s)
+ return -ENOMEM;
+
+ ps->context = s;
+
+ hst_set_weights(ps, base_weight);
+ s->threshold_multiplier = threshold_multiplier;
+ return 0;
+}
+
+static void free_paths(struct list_head *paths)
+{
+ struct path_info *pi, *next;
+
+ list_for_each_entry_safe(pi, next, paths, list) {
+ list_del(&pi->list);
+ kfree(pi);
+ }
+}
+
+static void hst_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+
+ free_paths(&s->valid_paths);
+ free_paths(&s->failed_paths);
+ kfree(s);
+ ps->context = NULL;
+}
+
+static int hst_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned int maxlen)
+{
+ unsigned int sz = 0;
+ struct path_info *pi;
+
+ if (!path) {
+ struct selector *s = ps->context;
+
+ DMEMIT("2 %u %u ", s->weights[0], s->threshold_multiplier);
+ } else {
+ pi = path->pscontext;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%llu %llu %llu ", pi->historical_service_time,
+ pi->outstanding, pi->stale_after);
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("0 ");
+ break;
+ }
+ }
+
+ return sz;
+}
+
+static int hst_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ unsigned int repeat_count = HST_MIN_IO;
+ char dummy;
+ unsigned long flags;
+
+ /*
+ * Arguments: [<repeat_count>]
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (HST_MIN_IO) is used.
+ */
+ if (argc > 1) {
+ *error = "historical-service-time ps: incorrect number of arguments";
+ return -EINVAL;
+ }
+
+ if (argc && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) {
+ *error = "historical-service-time ps: invalid repeat count";
+ return -EINVAL;
+ }
+
+ /* allocate the path */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "historical-service-time ps: Error allocating path context";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ pi->repeat_count = repeat_count;
+
+ pi->historical_service_time = HST_FIXED_1;
+
+ spin_lock_init(&pi->lock);
+ pi->outstanding = 0;
+
+ pi->stale_after = 0;
+ pi->last_finish = 0;
+
+ path->pscontext = pi;
+
+ spin_lock_irqsave(&s->lock, flags);
+ list_add_tail(&pi->list, &s->valid_paths);
+ s->valid_count++;
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return 0;
+}
+
+static void hst_fail_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+ list_move(&pi->list, &s->failed_paths);
+ s->valid_count--;
+ spin_unlock_irqrestore(&s->lock, flags);
+}
+
+static int hst_reinstate_path(struct path_selector *ps, struct dm_path *path)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = path->pscontext;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+ list_move_tail(&pi->list, &s->valid_paths);
+ s->valid_count++;
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return 0;
+}
+
+static void hst_fill_compare(struct path_info *pi, u64 *hst,
+ u64 *out, u64 *stale)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pi->lock, flags);
+ *hst = pi->historical_service_time;
+ *out = pi->outstanding;
+ *stale = pi->stale_after;
+ spin_unlock_irqrestore(&pi->lock, flags);
+}
+
+/*
+ * Compare the estimated service time of 2 paths, pi1 and pi2,
+ * for the incoming I/O.
+ *
+ * Returns:
+ * < 0 : pi1 is better
+ * 0 : no difference between pi1 and pi2
+ * > 0 : pi2 is better
+ *
+ */
+static long long hst_compare(struct path_info *pi1, struct path_info *pi2,
+ u64 time_now, struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+ u64 hst1, hst2;
+ long long out1, out2, stale1, stale2;
+ int pi2_better, over_threshold;
+
+ hst_fill_compare(pi1, &hst1, &out1, &stale1);
+ hst_fill_compare(pi2, &hst2, &out2, &stale2);
+
+ /* Check here if estimated latency for two paths are too similar.
+ * If this is the case, we skip extra calculation and just compare
+ * outstanding requests. In this case, any unloaded paths will
+ * be preferred.
+ */
+ if (hst1 > hst2)
+ over_threshold = hst1 > (s->threshold_multiplier * hst2);
+ else
+ over_threshold = hst2 > (s->threshold_multiplier * hst1);
+
+ if (!over_threshold)
+ return out1 - out2;
+
+ /*
+ * If an unloaded path is stale, choose it. If both paths are unloaded,
+ * choose path that is the most stale.
+ * (If one path is loaded, choose the other)
+ */
+ if ((!out1 && stale1 < time_now) || (!out2 && stale2 < time_now) ||
+ (!out1 && !out2))
+ return (!out2 * stale1) - (!out1 * stale2);
+
+ /* Compare estimated service time. If outstanding is the same, we
+ * don't need to multiply
+ */
+ if (out1 == out2) {
+ pi2_better = hst1 > hst2;
+ } else {
+ /* Potential overflow with out >= 1024 */
+ if (unlikely(out1 >= HST_MAX_INFLIGHT ||
+ out2 >= HST_MAX_INFLIGHT)) {
+ /* If over 1023 in-flights, we may overflow if hst
+ * is at max. (With this shift we still overflow at
+ * 1048576 in-flights, which is high enough).
+ */
+ hst1 >>= HST_FIXED_SHIFT;
+ hst2 >>= HST_FIXED_SHIFT;
+ }
+ pi2_better = (1 + out1) * hst1 > (1 + out2) * hst2;
+ }
+
+ /* In the case that the 'winner' is stale, limit to equal usage. */
+ if (pi2_better) {
+ if (stale2 < time_now)
+ return out1 - out2;
+ return 1;
+ }
+ if (stale1 < time_now)
+ return out1 - out2;
+ return -1;
+}
+
+static struct dm_path *hst_select_path(struct path_selector *ps,
+ size_t nr_bytes)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = NULL, *best = NULL;
+ u64 time_now = sched_clock();
+ struct dm_path *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+ if (list_empty(&s->valid_paths))
+ goto out;
+
+ list_for_each_entry(pi, &s->valid_paths, list) {
+ if (!best || (hst_compare(pi, best, time_now, ps) < 0))
+ best = pi;
+ }
+
+ if (!best)
+ goto out;
+
+ /* Move last used path to end (least preferred in case of ties) */
+ list_move_tail(&best->list, &s->valid_paths);
+
+ ret = best->path;
+
+out:
+ spin_unlock_irqrestore(&s->lock, flags);
+ return ret;
+}
+
+static int hst_start_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes)
+{
+ struct path_info *pi = path->pscontext;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pi->lock, flags);
+ pi->outstanding++;
+ spin_unlock_irqrestore(&pi->lock, flags);
+
+ return 0;
+}
+
+static u64 path_service_time(struct path_info *pi, u64 start_time)
+{
+ u64 sched_now = ktime_get_ns();
+
+ /* if a previous disk request has finished after this IO was
+ * sent to the hardware, pretend the submission happened
+ * serially.
+ */
+ if (time_after64(pi->last_finish, start_time))
+ start_time = pi->last_finish;
+
+ pi->last_finish = sched_now;
+ if (time_before64(sched_now, start_time))
+ return 0;
+
+ return sched_now - start_time;
+}
+
+static int hst_end_io(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes, u64 start_time)
+{
+ struct path_info *pi = path->pscontext;
+ struct selector *s = ps->context;
+ unsigned long flags;
+ u64 st;
+
+ spin_lock_irqsave(&pi->lock, flags);
+
+ st = path_service_time(pi, start_time);
+ pi->outstanding--;
+ pi->historical_service_time =
+ fixed_ema(pi->historical_service_time,
+ min(st * HST_FIXED_1, HST_FIXED_MAX),
+ hst_weight(ps, st));
+
+ /*
+ * On request end, mark path as fresh. If a path hasn't
+ * finished any requests within the fresh period, the estimated
+ * service time is considered too optimistic and we limit the
+ * maximum requests on that path.
+ */
+ pi->stale_after = pi->last_finish +
+ (s->valid_count * (pi->historical_service_time >> HST_FIXED_SHIFT));
+
+ spin_unlock_irqrestore(&pi->lock, flags);
+
+ return 0;
+}
+
+static struct path_selector_type hst_ps = {
+ .name = "historical-service-time",
+ .module = THIS_MODULE,
+ .table_args = 1,
+ .info_args = 3,
+ .create = hst_create,
+ .destroy = hst_destroy,
+ .status = hst_status,
+ .add_path = hst_add_path,
+ .fail_path = hst_fail_path,
+ .reinstate_path = hst_reinstate_path,
+ .select_path = hst_select_path,
+ .start_io = hst_start_io,
+ .end_io = hst_end_io,
+};
+
+static int __init dm_hst_init(void)
+{
+ int r = dm_register_path_selector(&hst_ps);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+
+ DMINFO("version " HST_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit dm_hst_exit(void)
+{
+ int r = dm_unregister_path_selector(&hst_ps);
+
+ if (r < 0)
+ DMERR("unregister failed %d", r);
+}
+
+module_init(dm_hst_init);
+module_exit(dm_hst_exit);
+
+MODULE_DESCRIPTION(DM_NAME " measured service time oriented path selector");
+MODULE_AUTHOR("Khazhismel Kumykov <khazhy@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 84cb04904fab..81dc5ff08909 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -92,7 +92,7 @@ struct journal_entry {
} s;
__u64 sector;
} u;
- commit_id_t last_bytes[0];
+ commit_id_t last_bytes[];
/* __u8 tag[0]; */
};
@@ -1553,8 +1553,6 @@ static void integrity_metadata(struct work_struct *w)
char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
sector_t sector;
unsigned sectors_to_process;
- sector_t save_metadata_block;
- unsigned save_metadata_offset;
if (unlikely(ic->mode == 'R'))
goto skip_io;
@@ -1605,8 +1603,6 @@ static void integrity_metadata(struct work_struct *w)
goto skip_io;
}
- save_metadata_block = dio->metadata_block;
- save_metadata_offset = dio->metadata_offset;
sector = dio->range.logical_sector;
sectors_to_process = dio->range.n_sectors;
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 8ea20b56b4d6..e3d35c6c9f71 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -127,7 +127,7 @@ struct pending_block {
char *data;
u32 datalen;
struct list_head list;
- struct bio_vec vecs[0];
+ struct bio_vec vecs[];
};
struct per_bio_data {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3e500098132f..78cff42d987e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -439,7 +439,7 @@ failed:
}
/*
- * dm_report_EIO() is a macro instead of a function to make pr_debug()
+ * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
* report the function name and line number of the function from which
* it has been invoked.
*/
@@ -447,43 +447,25 @@ failed:
do { \
struct mapped_device *md = dm_table_get_md((m)->ti->table); \
\
- pr_debug("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d\n", \
- dm_device_name(md), \
- test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
- dm_noflush_suspending((m)->ti)); \
+ DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
+ dm_device_name(md), \
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
+ dm_noflush_suspending((m)->ti)); \
} while (0)
/*
* Check whether bios must be queued in the device-mapper core rather
* than here in the target.
- *
- * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
- * the same value then we are not between multipath_presuspend()
- * and multipath_resume() calls and we have no need to check
- * for the DMF_NOFLUSH_SUSPENDING flag.
*/
-static bool __must_push_back(struct multipath *m, unsigned long flags)
+static bool __must_push_back(struct multipath *m)
{
- return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
- dm_noflush_suspending(m->ti));
+ return dm_noflush_suspending(m->ti);
}
-/*
- * Following functions use READ_ONCE to get atomic access to
- * all m->flags to avoid taking spinlock
- */
static bool must_push_back_rq(struct multipath *m)
{
- unsigned long flags = READ_ONCE(m->flags);
- return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
-}
-
-static bool must_push_back_bio(struct multipath *m)
-{
- unsigned long flags = READ_ONCE(m->flags);
- return __must_push_back(m, flags);
+ return test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m);
}
/*
@@ -567,7 +549,8 @@ static void multipath_release_clone(struct request *clone,
if (pgpath && pgpath->pg->ps.type->end_io)
pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
&pgpath->path,
- mpio->nr_bytes);
+ mpio->nr_bytes,
+ clone->io_start_time_ns);
}
blk_put_request(clone);
@@ -619,7 +602,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio,
return DM_MAPIO_SUBMITTED;
if (!pgpath) {
- if (must_push_back_bio(m))
+ if (__must_push_back(m))
return DM_MAPIO_REQUEUE;
dm_report_EIO(m);
return DM_MAPIO_KILL;
@@ -709,15 +692,38 @@ static void process_queued_bios(struct work_struct *work)
* If we run out of usable paths, should we queue I/O or error it?
*/
static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
- bool save_old_value)
+ bool save_old_value, const char *caller)
{
unsigned long flags;
+ bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
+ const char *dm_dev_name = dm_device_name(dm_table_get_md(m->ti->table));
+
+ DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
+ dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
spin_lock_irqsave(&m->lock, flags);
- assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
- (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
- (!save_old_value && queue_if_no_path));
+
+ queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+
+ if (save_old_value) {
+ if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
+ DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
+ dm_dev_name);
+ } else
+ assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
+ } else if (!queue_if_no_path && saved_queue_if_no_path_bit) {
+ /* due to "fail_if_no_path" message, need to honor it. */
+ clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ }
assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
+
+ DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
+ dm_dev_name, __func__,
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
+ dm_noflush_suspending(m->ti));
+
spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path) {
@@ -738,7 +744,7 @@ static void queue_if_no_path_timeout_work(struct timer_list *t)
struct mapped_device *md = dm_table_get_md(m->ti->table);
DMWARN("queue_if_no_path timeout on %s, failing queued IO", dm_device_name(md));
- queue_if_no_path(m, false, false);
+ queue_if_no_path(m, false, false, __func__);
}
/*
@@ -1078,7 +1084,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
argc--;
if (!strcasecmp(arg_name, "queue_if_no_path")) {
- r = queue_if_no_path(m, true, false);
+ r = queue_if_no_path(m, true, false, __func__);
continue;
}
@@ -1279,7 +1285,9 @@ static int fail_path(struct pgpath *pgpath)
if (!pgpath->is_active)
goto out;
- DMWARN("Failing path %s.", pgpath->path.dev->name);
+ DMWARN("%s: Failing path %s.",
+ dm_device_name(dm_table_get_md(m->ti->table)),
+ pgpath->path.dev->name);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
pgpath->is_active = false;
@@ -1318,7 +1326,9 @@ static int reinstate_path(struct pgpath *pgpath)
if (pgpath->is_active)
goto out;
- DMWARN("Reinstating path %s.", pgpath->path.dev->name);
+ DMWARN("%s: Reinstating path %s.",
+ dm_device_name(dm_table_get_md(m->ti->table)),
+ pgpath->path.dev->name);
r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
if (r)
@@ -1617,7 +1627,8 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
struct path_selector *ps = &pgpath->pg->ps;
if (ps->type->end_io)
- ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
+ clone->io_start_time_ns);
}
return r;
@@ -1640,7 +1651,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
if (atomic_read(&m->nr_valid_paths) == 0 &&
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
- if (must_push_back_bio(m)) {
+ if (__must_push_back(m)) {
r = DM_ENDIO_REQUEUE;
} else {
dm_report_EIO(m);
@@ -1661,23 +1672,27 @@ done:
struct path_selector *ps = &pgpath->pg->ps;
if (ps->type->end_io)
- ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
+ ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
+ dm_start_time_ns_from_clone(clone));
}
return r;
}
/*
- * Suspend can't complete until all the I/O is processed so if
- * the last path fails we must error any remaining I/O.
- * Note that if the freeze_bdev fails while suspending, the
- * queue_if_no_path state is lost - userspace should reset it.
+ * Suspend with flush can't complete until all the I/O is processed
+ * so if the last path fails we must error any remaining I/O.
+ * - Note that if the freeze_bdev fails while suspending, the
+ * queue_if_no_path state is lost - userspace should reset it.
+ * Otherwise, during noflush suspend, queue_if_no_path will not change.
*/
static void multipath_presuspend(struct dm_target *ti)
{
struct multipath *m = ti->private;
- queue_if_no_path(m, false, true);
+ /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
+ if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
+ queue_if_no_path(m, false, true, __func__);
}
static void multipath_postsuspend(struct dm_target *ti)
@@ -1698,8 +1713,16 @@ static void multipath_resume(struct dm_target *ti)
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
- test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
+ if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
+ set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ }
+
+ DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
+ dm_device_name(dm_table_get_md(m->ti->table)), __func__,
+ test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
+
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -1859,13 +1882,13 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
- r = queue_if_no_path(m, true, false);
+ r = queue_if_no_path(m, true, false, __func__);
spin_lock_irqsave(&m->lock, flags);
enable_nopath_timeout(m);
spin_unlock_irqrestore(&m->lock, flags);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
- r = queue_if_no_path(m, false, false);
+ r = queue_if_no_path(m, false, false, __func__);
disable_nopath_timeout(m);
goto out;
}
@@ -1918,7 +1941,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
int r;
current_pgpath = READ_ONCE(m->current_pgpath);
- if (!current_pgpath)
+ if (!current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
current_pgpath = choose_pgpath(m, 0);
if (current_pgpath) {
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index b6eb5365b1a4..c47bc0e20275 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -74,7 +74,7 @@ struct path_selector_type {
int (*start_io) (struct path_selector *ps, struct dm_path *path,
size_t nr_bytes);
int (*end_io) (struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes);
+ size_t nr_bytes, u64 start_time);
};
/* Register a path selector */
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index 969c4f1a3633..5fd018d18418 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -227,7 +227,7 @@ static int ql_start_io(struct path_selector *ps, struct dm_path *path,
}
static int ql_end_io(struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes)
+ size_t nr_bytes, u64 start_time)
{
struct path_info *pi = path->pscontext;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9a18bef0a5ff..10e8b2fe787b 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -254,7 +254,7 @@ struct raid_set {
int mode;
} journal_dev;
- struct raid_dev dev[0];
+ struct raid_dev dev[];
};
static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 089aed57e083..2f655d9f4200 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -83,7 +83,7 @@ struct mirror_set {
struct work_struct trigger_event;
unsigned nr_mirrors;
- struct mirror mirror[0];
+ struct mirror mirror[];
};
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
index f006a9005593..9cfda665e9eb 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-service-time.c
@@ -309,7 +309,7 @@ static int st_start_io(struct path_selector *ps, struct dm_path *path,
}
static int st_end_io(struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes)
+ size_t nr_bytes, u64 start_time)
{
struct path_info *pi = path->pscontext;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 71417048256a..35d368c418d0 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -56,7 +56,7 @@ struct dm_stat {
size_t percpu_alloc_size;
size_t histogram_alloc_size;
struct dm_stat_percpu *stat_percpu[NR_CPUS];
- struct dm_stat_shared stat_shared[0];
+ struct dm_stat_shared stat_shared[];
};
#define STAT_PRECISE_TIMESTAMPS 1
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index fa813c0f993d..151d022b032d 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -41,7 +41,7 @@ struct stripe_c {
/* Work struct used for triggering events*/
struct work_struct trigger_event;
- struct stripe stripe[0];
+ struct stripe stripe[];
};
/*
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 8a0f057b8122..bff4c7fa1cd2 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -53,7 +53,7 @@ struct switch_ctx {
/*
* Array of dm devices to switch between.
*/
- struct switch_path path_list[0];
+ struct switch_path path_list[];
};
static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_paths,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 613c171b1b6d..74f3c506f084 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -234,10 +234,6 @@ static int persistent_memory_claim(struct dm_writecache *wc)
wc->memory_vmapped = false;
- if (!wc->ssd_dev->dax_dev) {
- r = -EOPNOTSUPP;
- goto err1;
- }
s = wc->memory_map_size;
p = s >> PAGE_SHIFT;
if (!p) {
@@ -1143,6 +1139,42 @@ static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
return r;
}
+static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
+{
+ /*
+ * clflushopt performs better with block size 1024, 2048, 4096
+ * non-temporal stores perform better with block size 512
+ *
+ * block size 512 1024 2048 4096
+ * movnti 496 MB/s 642 MB/s 725 MB/s 744 MB/s
+ * clflushopt 373 MB/s 688 MB/s 1.1 GB/s 1.2 GB/s
+ *
+ * We see that movnti performs better for 512-byte blocks, and
+ * clflushopt performs better for 1024-byte and larger blocks. So, we
+ * prefer clflushopt for sizes >= 768.
+ *
+ * NOTE: this happens to be the case now (with dm-writecache's single
+ * threaded model) but re-evaluate this once memcpy_flushcache() is
+ * enabled to use movdir64b which might invalidate this performance
+ * advantage seen with cache-allocating-writes plus flushing.
+ */
+#ifdef CONFIG_X86
+ if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
+ likely(boot_cpu_data.x86_clflush_size == 64) &&
+ likely(size >= 768)) {
+ do {
+ memcpy((void *)dest, (void *)source, 64);
+ clflushopt((void *)dest);
+ dest += 64;
+ source += 64;
+ size -= 64;
+ } while (size >= 64);
+ return;
+ }
+#endif
+ memcpy_flushcache(dest, source, size);
+}
+
static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
{
void *buf;
@@ -1168,7 +1200,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
}
} else {
flush_dcache_page(bio_page(bio));
- memcpy_flushcache(data, buf, size);
+ memcpy_flushcache_optimized(data, buf, size);
}
bvec_kunmap_irq(buf, &flags);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index bf2245370305..130b5a6d9f12 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -16,7 +16,7 @@
/*
* Metadata version.
*/
-#define DMZ_META_VER 1
+#define DMZ_META_VER 2
/*
* On-disk super block magic.
@@ -69,8 +69,17 @@ struct dmz_super {
/* Checksum */
__le32 crc; /* 48 */
+ /* DM-Zoned label */
+ u8 dmz_label[32]; /* 80 */
+
+ /* DM-Zoned UUID */
+ u8 dmz_uuid[16]; /* 96 */
+
+ /* Device UUID */
+ u8 dev_uuid[16]; /* 112 */
+
/* Padding to full 512B sector */
- u8 reserved[464]; /* 512 */
+ u8 reserved[400]; /* 512 */
};
/*
@@ -122,8 +131,10 @@ enum {
*/
struct dmz_sb {
sector_t block;
+ struct dmz_dev *dev;
struct dmz_mblock *mblk;
struct dmz_super *sb;
+ struct dm_zone *zone;
};
/*
@@ -131,28 +142,41 @@ struct dmz_sb {
*/
struct dmz_metadata {
struct dmz_dev *dev;
+ unsigned int nr_devs;
+
+ char devname[BDEVNAME_SIZE];
+ char label[BDEVNAME_SIZE];
+ uuid_t uuid;
sector_t zone_bitmap_size;
unsigned int zone_nr_bitmap_blocks;
unsigned int zone_bits_per_mblk;
+ sector_t zone_nr_blocks;
+ sector_t zone_nr_blocks_shift;
+
+ sector_t zone_nr_sectors;
+ sector_t zone_nr_sectors_shift;
+
unsigned int nr_bitmap_blocks;
unsigned int nr_map_blocks;
+ unsigned int nr_zones;
unsigned int nr_useable_zones;
unsigned int nr_meta_blocks;
unsigned int nr_meta_zones;
unsigned int nr_data_zones;
+ unsigned int nr_cache_zones;
unsigned int nr_rnd_zones;
unsigned int nr_reserved_seq;
unsigned int nr_chunks;
/* Zone information array */
- struct dm_zone *zones;
+ struct xarray zones;
- struct dm_zone *sb_zone;
struct dmz_sb sb[2];
unsigned int mblk_primary;
+ unsigned int sb_version;
u64 sb_gen;
unsigned int min_nr_mblks;
unsigned int max_nr_mblks;
@@ -168,15 +192,11 @@ struct dmz_metadata {
/* Zone allocation management */
struct mutex map_lock;
struct dmz_mblock **map_mblk;
- unsigned int nr_rnd;
- atomic_t unmap_nr_rnd;
- struct list_head unmap_rnd_list;
- struct list_head map_rnd_list;
- unsigned int nr_seq;
- atomic_t unmap_nr_seq;
- struct list_head unmap_seq_list;
- struct list_head map_seq_list;
+ unsigned int nr_cache;
+ atomic_t unmap_nr_cache;
+ struct list_head unmap_cache_list;
+ struct list_head map_cache_list;
atomic_t nr_reserved_seq_zones;
struct list_head reserved_seq_zones_list;
@@ -184,22 +204,65 @@ struct dmz_metadata {
wait_queue_head_t free_wq;
};
+#define dmz_zmd_info(zmd, format, args...) \
+ DMINFO("(%s): " format, (zmd)->label, ## args)
+
+#define dmz_zmd_err(zmd, format, args...) \
+ DMERR("(%s): " format, (zmd)->label, ## args)
+
+#define dmz_zmd_warn(zmd, format, args...) \
+ DMWARN("(%s): " format, (zmd)->label, ## args)
+
+#define dmz_zmd_debug(zmd, format, args...) \
+ DMDEBUG("(%s): " format, (zmd)->label, ## args)
/*
* Various accessors
*/
-unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone)
+static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- return ((unsigned int)(zone - zmd->zones));
+ if (WARN_ON(!zone))
+ return 0;
+
+ return zone->id - zone->dev->zone_offset;
}
sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift;
+ unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
+
+ return (sector_t)zone_id << zmd->zone_nr_sectors_shift;
}
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift;
+ unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
+
+ return (sector_t)zone_id << zmd->zone_nr_blocks_shift;
+}
+
+unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_blocks;
+}
+
+unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_blocks_shift;
+}
+
+unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_sectors;
+}
+
+unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_sectors_shift;
+}
+
+unsigned int dmz_nr_zones(struct dmz_metadata *zmd)
+{
+ return zmd->nr_zones;
}
unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
@@ -207,14 +270,88 @@ unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
return zmd->nr_chunks;
}
-unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd)
+unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx)
{
- return zmd->nr_rnd;
+ return zmd->dev[idx].nr_rnd;
}
-unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd)
+unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx)
{
- return atomic_read(&zmd->unmap_nr_rnd);
+ return atomic_read(&zmd->dev[idx].unmap_nr_rnd);
+}
+
+unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd)
+{
+ return zmd->nr_cache;
+}
+
+unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd)
+{
+ return atomic_read(&zmd->unmap_nr_cache);
+}
+
+unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx)
+{
+ return zmd->dev[idx].nr_seq;
+}
+
+unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx)
+{
+ return atomic_read(&zmd->dev[idx].unmap_nr_seq);
+}
+
+static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
+{
+ return xa_load(&zmd->zones, zone_id);
+}
+
+static struct dm_zone *dmz_insert(struct dmz_metadata *zmd,
+ unsigned int zone_id, struct dmz_dev *dev)
+{
+ struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL);
+
+ if (!zone)
+ return ERR_PTR(-ENOMEM);
+
+ if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) {
+ kfree(zone);
+ return ERR_PTR(-EBUSY);
+ }
+
+ INIT_LIST_HEAD(&zone->link);
+ atomic_set(&zone->refcount, 0);
+ zone->id = zone_id;
+ zone->chunk = DMZ_MAP_UNMAPPED;
+ zone->dev = dev;
+
+ return zone;
+}
+
+const char *dmz_metadata_label(struct dmz_metadata *zmd)
+{
+ return (const char *)zmd->label;
+}
+
+bool dmz_check_dev(struct dmz_metadata *zmd)
+{
+ unsigned int i;
+
+ for (i = 0; i < zmd->nr_devs; i++) {
+ if (!dmz_check_bdev(&zmd->dev[i]))
+ return false;
+ }
+ return true;
+}
+
+bool dmz_dev_is_dying(struct dmz_metadata *zmd)
+{
+ unsigned int i;
+
+ for (i = 0; i < zmd->nr_devs; i++) {
+ if (dmz_bdev_is_dying(&zmd->dev[i]))
+ return true;
+ }
+ return false;
}
/*
@@ -402,9 +539,10 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
{
struct dmz_mblock *mblk, *m;
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
+ struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
struct bio *bio;
- if (dmz_bdev_is_dying(zmd->dev))
+ if (dmz_bdev_is_dying(dev))
return ERR_PTR(-EIO);
/* Get a new block and a BIO to read it */
@@ -440,7 +578,7 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
/* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, zmd->dev->bdev);
+ bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
@@ -537,6 +675,7 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
sector_t mblk_no)
{
struct dmz_mblock *mblk;
+ struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
/* Check rbtree */
spin_lock(&zmd->mblk_lock);
@@ -555,7 +694,7 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
dmz_release_mblock(zmd, mblk);
- dmz_check_bdev(zmd->dev);
+ dmz_check_bdev(dev);
return ERR_PTR(-EIO);
}
@@ -579,10 +718,11 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
unsigned int set)
{
+ struct dmz_dev *dev = zmd->sb[set].dev;
sector_t block = zmd->sb[set].block + mblk->no;
struct bio *bio;
- if (dmz_bdev_is_dying(zmd->dev))
+ if (dmz_bdev_is_dying(dev))
return -EIO;
bio = bio_alloc(GFP_NOIO, 1);
@@ -594,7 +734,7 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, zmd->dev->bdev);
+ bio_set_dev(bio, dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
@@ -607,13 +747,16 @@ static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
/*
* Read/write a metadata block.
*/
-static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
- struct page *page)
+static int dmz_rdwr_block(struct dmz_dev *dev, int op,
+ sector_t block, struct page *page)
{
struct bio *bio;
int ret;
- if (dmz_bdev_is_dying(zmd->dev))
+ if (WARN_ON(!dev))
+ return -EIO;
+
+ if (dmz_bdev_is_dying(dev))
return -EIO;
bio = bio_alloc(GFP_NOIO, 1);
@@ -621,14 +764,14 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
return -ENOMEM;
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio_set_dev(bio, zmd->dev->bdev);
+ bio_set_dev(bio, dev->bdev);
bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
bio_put(bio);
if (ret)
- dmz_check_bdev(zmd->dev);
+ dmz_check_bdev(dev);
return ret;
}
@@ -637,18 +780,32 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
*/
static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
{
- sector_t block = zmd->sb[set].block;
struct dmz_mblock *mblk = zmd->sb[set].mblk;
struct dmz_super *sb = zmd->sb[set].sb;
+ struct dmz_dev *dev = zmd->sb[set].dev;
+ sector_t sb_block;
u64 sb_gen = zmd->sb_gen + 1;
int ret;
sb->magic = cpu_to_le32(DMZ_MAGIC);
- sb->version = cpu_to_le32(DMZ_META_VER);
+
+ sb->version = cpu_to_le32(zmd->sb_version);
+ if (zmd->sb_version > 1) {
+ BUILD_BUG_ON(UUID_SIZE != 16);
+ export_uuid(sb->dmz_uuid, &zmd->uuid);
+ memcpy(sb->dmz_label, zmd->label, BDEVNAME_SIZE);
+ export_uuid(sb->dev_uuid, &dev->uuid);
+ }
sb->gen = cpu_to_le64(sb_gen);
- sb->sb_block = cpu_to_le64(block);
+ /*
+ * The metadata always references the absolute block address,
+ * ie relative to the entire block range, not the per-device
+ * block address.
+ */
+ sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift;
+ sb->sb_block = cpu_to_le64(sb_block);
sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
@@ -659,9 +816,10 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
sb->crc = 0;
sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
- ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
+ ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
+ mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
return ret;
}
@@ -674,6 +832,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
unsigned int set)
{
struct dmz_mblock *mblk;
+ struct dmz_dev *dev = zmd->sb[set].dev;
struct blk_plug plug;
int ret = 0, nr_mblks_submitted = 0;
@@ -695,7 +854,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
clear_bit(DMZ_META_ERROR, &mblk->state);
- dmz_check_bdev(zmd->dev);
+ dmz_check_bdev(dev);
ret = -EIO;
}
nr_mblks_submitted--;
@@ -703,7 +862,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
return ret;
}
@@ -740,6 +899,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
{
struct dmz_mblock *mblk;
struct list_head write_list;
+ struct dmz_dev *dev;
int ret;
if (WARN_ON(!zmd))
@@ -753,6 +913,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
* from modifying metadata.
*/
down_write(&zmd->mblk_sem);
+ dev = zmd->sb[zmd->mblk_primary].dev;
/*
* This is called from the target flush work and reclaim work.
@@ -760,7 +921,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
*/
dmz_lock_flush(zmd);
- if (dmz_bdev_is_dying(zmd->dev)) {
+ if (dmz_bdev_is_dying(dev)) {
ret = -EIO;
goto out;
}
@@ -772,7 +933,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO);
+ ret = blkdev_issue_flush(dev->bdev, GFP_NOIO);
goto err;
}
@@ -821,7 +982,7 @@ err:
list_splice(&write_list, &zmd->mblk_dirty_list);
spin_unlock(&zmd->mblk_lock);
}
- if (!dmz_check_bdev(zmd->dev))
+ if (!dmz_check_bdev(dev))
ret = -EIO;
goto out;
}
@@ -829,12 +990,31 @@ err:
/*
* Check super block.
*/
-static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
+static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
+ bool tertiary)
{
+ struct dmz_super *sb = dsb->sb;
+ struct dmz_dev *dev = dsb->dev;
unsigned int nr_meta_zones, nr_data_zones;
- struct dmz_dev *dev = zmd->dev;
u32 crc, stored_crc;
- u64 gen;
+ u64 gen, sb_block;
+
+ if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
+ dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
+ DMZ_MAGIC, le32_to_cpu(sb->magic));
+ return -ENXIO;
+ }
+
+ zmd->sb_version = le32_to_cpu(sb->version);
+ if (zmd->sb_version > DMZ_META_VER) {
+ dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
+ DMZ_META_VER, zmd->sb_version);
+ return -EINVAL;
+ }
+ if (zmd->sb_version < 2 && tertiary) {
+ dmz_dev_err(dev, "Tertiary superblocks are not supported");
+ return -EINVAL;
+ }
gen = le64_to_cpu(sb->gen);
stored_crc = le32_to_cpu(sb->crc);
@@ -846,20 +1026,57 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
return -ENXIO;
}
- if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
- dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
- DMZ_MAGIC, le32_to_cpu(sb->magic));
- return -ENXIO;
- }
+ sb_block = le64_to_cpu(sb->sb_block);
+ if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift ) {
+ dmz_dev_err(dev, "Invalid superblock position "
+ "(is %llu expected %llu)",
+ sb_block,
+ (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
+ return -EINVAL;
+ }
+ if (zmd->sb_version > 1) {
+ uuid_t sb_uuid;
+
+ import_uuid(&sb_uuid, sb->dmz_uuid);
+ if (uuid_is_null(&sb_uuid)) {
+ dmz_dev_err(dev, "NULL DM-Zoned uuid");
+ return -ENXIO;
+ } else if (uuid_is_null(&zmd->uuid)) {
+ uuid_copy(&zmd->uuid, &sb_uuid);
+ } else if (!uuid_equal(&zmd->uuid, &sb_uuid)) {
+ dmz_dev_err(dev, "mismatching DM-Zoned uuid, "
+ "is %pUl expected %pUl",
+ &sb_uuid, &zmd->uuid);
+ return -ENXIO;
+ }
+ if (!strlen(zmd->label))
+ memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE);
+ else if (memcmp(zmd->label, sb->dmz_label, BDEVNAME_SIZE)) {
+ dmz_dev_err(dev, "mismatching DM-Zoned label, "
+ "is %s expected %s",
+ sb->dmz_label, zmd->label);
+ return -ENXIO;
+ }
+ import_uuid(&dev->uuid, sb->dev_uuid);
+ if (uuid_is_null(&dev->uuid)) {
+ dmz_dev_err(dev, "NULL device uuid");
+ return -ENXIO;
+ }
- if (le32_to_cpu(sb->version) != DMZ_META_VER) {
- dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
- DMZ_META_VER, le32_to_cpu(sb->version));
- return -ENXIO;
+ if (tertiary) {
+ /*
+ * Generation number should be 0, but it doesn't
+ * really matter if it isn't.
+ */
+ if (gen != 0)
+ dmz_dev_warn(dev, "Invalid generation %llu",
+ gen);
+ return 0;
+ }
}
- nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + dev->zone_nr_blocks - 1)
- >> dev->zone_nr_blocks_shift;
+ nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
+ >> zmd->zone_nr_blocks_shift;
if (!nr_meta_zones ||
nr_meta_zones >= zmd->nr_rnd_zones) {
dmz_dev_err(dev, "Invalid number of metadata blocks");
@@ -895,10 +1112,13 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
/*
* Read the first or second super block from disk.
*/
-static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set)
+static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
{
- return dmz_rdwr_block(zmd, REQ_OP_READ, zmd->sb[set].block,
- zmd->sb[set].mblk->page);
+ dmz_zmd_debug(zmd, "read superblock set %d dev %s block %llu",
+ set, sb->dev->name, sb->block);
+
+ return dmz_rdwr_block(sb->dev, REQ_OP_READ,
+ sb->block, sb->mblk->page);
}
/*
@@ -908,8 +1128,9 @@ static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set)
*/
static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
{
- unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
struct dmz_mblock *mblk;
+ unsigned int zone_id = zmd->sb[0].zone->id;
int i;
/* Allocate a block */
@@ -922,24 +1143,29 @@ static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
/* Bad first super block: search for the second one */
zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
- for (i = 0; i < zmd->nr_rnd_zones - 1; i++) {
- if (dmz_read_sb(zmd, 1) != 0)
+ zmd->sb[1].zone = dmz_get(zmd, zone_id + 1);
+ zmd->sb[1].dev = zmd->sb[0].dev;
+ for (i = 1; i < zmd->nr_rnd_zones; i++) {
+ if (dmz_read_sb(zmd, &zmd->sb[1], 1) != 0)
break;
if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC)
return 0;
zmd->sb[1].block += zone_nr_blocks;
+ zmd->sb[1].zone = dmz_get(zmd, zone_id + i);
}
dmz_free_mblock(zmd, mblk);
zmd->sb[1].mblk = NULL;
+ zmd->sb[1].zone = NULL;
+ zmd->sb[1].dev = NULL;
return -EIO;
}
/*
- * Read the first or second super block from disk.
+ * Read a super block from disk.
*/
-static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set)
+static int dmz_get_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
{
struct dmz_mblock *mblk;
int ret;
@@ -949,14 +1175,14 @@ static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set)
if (!mblk)
return -ENOMEM;
- zmd->sb[set].mblk = mblk;
- zmd->sb[set].sb = mblk->data;
+ sb->mblk = mblk;
+ sb->sb = mblk->data;
/* Read super block */
- ret = dmz_read_sb(zmd, set);
+ ret = dmz_read_sb(zmd, sb, set);
if (ret) {
dmz_free_mblock(zmd, mblk);
- zmd->sb[set].mblk = NULL;
+ sb->mblk = NULL;
return ret;
}
@@ -972,14 +1198,13 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
struct page *page;
int i, ret;
- dmz_dev_warn(zmd->dev, "Metadata set %u invalid: recovering", dst_set);
+ dmz_dev_warn(zmd->sb[dst_set].dev,
+ "Metadata set %u invalid: recovering", dst_set);
if (dst_set == 0)
- zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
- else {
- zmd->sb[1].block = zmd->sb[0].block +
- (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
- }
+ zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
+ else
+ zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
page = alloc_page(GFP_NOIO);
if (!page)
@@ -987,11 +1212,11 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
/* Copy metadata blocks */
for (i = 1; i < zmd->nr_meta_blocks; i++) {
- ret = dmz_rdwr_block(zmd, REQ_OP_READ,
+ ret = dmz_rdwr_block(zmd->sb[src_set].dev, REQ_OP_READ,
zmd->sb[src_set].block + i, page);
if (ret)
goto out;
- ret = dmz_rdwr_block(zmd, REQ_OP_WRITE,
+ ret = dmz_rdwr_block(zmd->sb[dst_set].dev, REQ_OP_WRITE,
zmd->sb[dst_set].block + i, page);
if (ret)
goto out;
@@ -1023,53 +1248,73 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
u64 sb_gen[2] = {0, 0};
int ret;
+ if (!zmd->sb[0].zone) {
+ dmz_zmd_err(zmd, "Primary super block zone not set");
+ return -ENXIO;
+ }
+
/* Read and check the primary super block */
- zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
- ret = dmz_get_sb(zmd, 0);
+ zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
+ zmd->sb[0].dev = zmd->sb[0].zone->dev;
+ ret = dmz_get_sb(zmd, &zmd->sb[0], 0);
if (ret) {
- dmz_dev_err(zmd->dev, "Read primary super block failed");
+ dmz_dev_err(zmd->sb[0].dev, "Read primary super block failed");
return ret;
}
- ret = dmz_check_sb(zmd, zmd->sb[0].sb);
+ ret = dmz_check_sb(zmd, &zmd->sb[0], false);
/* Read and check secondary super block */
if (ret == 0) {
sb_good[0] = true;
- zmd->sb[1].block = zmd->sb[0].block +
- (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
- ret = dmz_get_sb(zmd, 1);
+ if (!zmd->sb[1].zone) {
+ unsigned int zone_id =
+ zmd->sb[0].zone->id + zmd->nr_meta_zones;
+
+ zmd->sb[1].zone = dmz_get(zmd, zone_id);
+ }
+ zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
+ zmd->sb[1].dev = zmd->sb[0].dev;
+ ret = dmz_get_sb(zmd, &zmd->sb[1], 1);
} else
ret = dmz_lookup_secondary_sb(zmd);
if (ret) {
- dmz_dev_err(zmd->dev, "Read secondary super block failed");
+ dmz_dev_err(zmd->sb[1].dev, "Read secondary super block failed");
return ret;
}
- ret = dmz_check_sb(zmd, zmd->sb[1].sb);
+ ret = dmz_check_sb(zmd, &zmd->sb[1], false);
if (ret == 0)
sb_good[1] = true;
/* Use highest generation sb first */
if (!sb_good[0] && !sb_good[1]) {
- dmz_dev_err(zmd->dev, "No valid super block found");
+ dmz_zmd_err(zmd, "No valid super block found");
return -EIO;
}
if (sb_good[0])
sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
- else
+ else {
ret = dmz_recover_mblocks(zmd, 0);
+ if (ret) {
+ dmz_dev_err(zmd->sb[0].dev,
+ "Recovery of superblock 0 failed");
+ return -EIO;
+ }
+ }
if (sb_good[1])
sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
- else
+ else {
ret = dmz_recover_mblocks(zmd, 1);
- if (ret) {
- dmz_dev_err(zmd->dev, "Recovery failed");
- return -EIO;
+ if (ret) {
+ dmz_dev_err(zmd->sb[1].dev,
+ "Recovery of superblock 1 failed");
+ return -EIO;
+ }
}
if (sb_gen[0] >= sb_gen[1]) {
@@ -1080,32 +1325,70 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
zmd->mblk_primary = 1;
}
- dmz_dev_debug(zmd->dev, "Using super block %u (gen %llu)",
+ dmz_dev_debug(zmd->sb[zmd->mblk_primary].dev,
+ "Using super block %u (gen %llu)",
zmd->mblk_primary, zmd->sb_gen);
- return 0;
+ if (zmd->sb_version > 1) {
+ int i;
+ struct dmz_sb *sb;
+
+ sb = kzalloc(sizeof(struct dmz_sb), GFP_KERNEL);
+ if (!sb)
+ return -ENOMEM;
+ for (i = 1; i < zmd->nr_devs; i++) {
+ sb->block = 0;
+ sb->zone = dmz_get(zmd, zmd->dev[i].zone_offset);
+ sb->dev = &zmd->dev[i];
+ if (!dmz_is_meta(sb->zone)) {
+ dmz_dev_err(sb->dev,
+ "Tertiary super block zone %u not marked as metadata zone",
+ sb->zone->id);
+ ret = -EINVAL;
+ goto out_kfree;
+ }
+ ret = dmz_get_sb(zmd, sb, i + 1);
+ if (ret) {
+ dmz_dev_err(sb->dev,
+ "Read tertiary super block failed");
+ dmz_free_mblock(zmd, sb->mblk);
+ goto out_kfree;
+ }
+ ret = dmz_check_sb(zmd, sb, true);
+ dmz_free_mblock(zmd, sb->mblk);
+ if (ret == -EINVAL)
+ goto out_kfree;
+ }
+ out_kfree:
+ kfree(sb);
+ }
+ return ret;
}
/*
* Initialize a zone descriptor.
*/
-static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
+static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
{
- struct dmz_metadata *zmd = data;
- struct dm_zone *zone = &zmd->zones[idx];
- struct dmz_dev *dev = zmd->dev;
+ struct dmz_dev *dev = data;
+ struct dmz_metadata *zmd = dev->metadata;
+ int idx = num + dev->zone_offset;
+ struct dm_zone *zone;
+
+ zone = dmz_insert(zmd, idx, dev);
+ if (IS_ERR(zone))
+ return PTR_ERR(zone);
- /* Ignore the eventual last runt (smaller) zone */
- if (blkz->len != dev->zone_nr_sectors) {
- if (blkz->start + blkz->len == dev->capacity)
+ if (blkz->len != zmd->zone_nr_sectors) {
+ if (zmd->sb_version > 1) {
+ /* Ignore the eventual runt (smaller) zone */
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ return 0;
+ } else if (blkz->start + blkz->len == dev->capacity)
return 0;
return -ENXIO;
}
- INIT_LIST_HEAD(&zone->link);
- atomic_set(&zone->refcount, 0);
- zone->chunk = DMZ_MAP_UNMAPPED;
-
switch (blkz->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
set_bit(DMZ_RND, &zone->flags);
@@ -1131,13 +1414,45 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
zmd->nr_useable_zones++;
if (dmz_is_rnd(zone)) {
zmd->nr_rnd_zones++;
- if (!zmd->sb_zone) {
- /* Super block zone */
- zmd->sb_zone = zone;
+ if (zmd->nr_devs == 1 && !zmd->sb[0].zone) {
+ /* Primary super block zone */
+ zmd->sb[0].zone = zone;
}
}
+ if (zmd->nr_devs > 1 && num == 0) {
+ /*
+ * Tertiary superblock zones are always at the
+ * start of the zoned devices, so mark them
+ * as metadata zone.
+ */
+ set_bit(DMZ_META, &zone->flags);
+ }
}
+ return 0;
+}
+
+static int dmz_emulate_zones(struct dmz_metadata *zmd, struct dmz_dev *dev)
+{
+ int idx;
+ sector_t zone_offset = 0;
+ for(idx = 0; idx < dev->nr_zones; idx++) {
+ struct dm_zone *zone;
+
+ zone = dmz_insert(zmd, idx, dev);
+ if (IS_ERR(zone))
+ return PTR_ERR(zone);
+ set_bit(DMZ_CACHE, &zone->flags);
+ zone->wp_block = 0;
+ zmd->nr_cache_zones++;
+ zmd->nr_useable_zones++;
+ if (dev->capacity - zone_offset < zmd->zone_nr_sectors) {
+ /* Disable runt zone */
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ break;
+ }
+ zone_offset += zmd->zone_nr_sectors;
+ }
return 0;
}
@@ -1146,8 +1461,15 @@ static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
*/
static void dmz_drop_zones(struct dmz_metadata *zmd)
{
- kfree(zmd->zones);
- zmd->zones = NULL;
+ int idx;
+
+ for(idx = 0; idx < zmd->nr_zones; idx++) {
+ struct dm_zone *zone = xa_load(&zmd->zones, idx);
+
+ kfree(zone);
+ xa_erase(&zmd->zones, idx);
+ }
+ xa_destroy(&zmd->zones);
}
/*
@@ -1156,32 +1478,87 @@ static void dmz_drop_zones(struct dmz_metadata *zmd)
*/
static int dmz_init_zones(struct dmz_metadata *zmd)
{
- struct dmz_dev *dev = zmd->dev;
- int ret;
+ int i, ret;
+ struct dmz_dev *zoned_dev = &zmd->dev[0];
/* Init */
- zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
+ zmd->zone_nr_sectors = zmd->dev[0].zone_nr_sectors;
+ zmd->zone_nr_sectors_shift = ilog2(zmd->zone_nr_sectors);
+ zmd->zone_nr_blocks = dmz_sect2blk(zmd->zone_nr_sectors);
+ zmd->zone_nr_blocks_shift = ilog2(zmd->zone_nr_blocks);
+ zmd->zone_bitmap_size = zmd->zone_nr_blocks >> 3;
zmd->zone_nr_bitmap_blocks =
max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
- zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks,
+ zmd->zone_bits_per_mblk = min_t(sector_t, zmd->zone_nr_blocks,
DMZ_BLOCK_SIZE_BITS);
/* Allocate zone array */
- zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
- if (!zmd->zones)
- return -ENOMEM;
+ zmd->nr_zones = 0;
+ for (i = 0; i < zmd->nr_devs; i++) {
+ struct dmz_dev *dev = &zmd->dev[i];
+
+ dev->metadata = zmd;
+ zmd->nr_zones += dev->nr_zones;
+
+ atomic_set(&dev->unmap_nr_rnd, 0);
+ INIT_LIST_HEAD(&dev->unmap_rnd_list);
+ INIT_LIST_HEAD(&dev->map_rnd_list);
+
+ atomic_set(&dev->unmap_nr_seq, 0);
+ INIT_LIST_HEAD(&dev->unmap_seq_list);
+ INIT_LIST_HEAD(&dev->map_seq_list);
+ }
+
+ if (!zmd->nr_zones) {
+ DMERR("(%s): No zones found", zmd->devname);
+ return -ENXIO;
+ }
+ xa_init(&zmd->zones);
+
+ DMDEBUG("(%s): Using %zu B for zone information",
+ zmd->devname, sizeof(struct dm_zone) * zmd->nr_zones);
- dmz_dev_info(dev, "Using %zu B for zone information",
- sizeof(struct dm_zone) * dev->nr_zones);
+ if (zmd->nr_devs > 1) {
+ ret = dmz_emulate_zones(zmd, &zmd->dev[0]);
+ if (ret < 0) {
+ DMDEBUG("(%s): Failed to emulate zones, error %d",
+ zmd->devname, ret);
+ dmz_drop_zones(zmd);
+ return ret;
+ }
+
+ /*
+ * Primary superblock zone is always at zone 0 when multiple
+ * drives are present.
+ */
+ zmd->sb[0].zone = dmz_get(zmd, 0);
+
+ for (i = 1; i < zmd->nr_devs; i++) {
+ zoned_dev = &zmd->dev[i];
+
+ ret = blkdev_report_zones(zoned_dev->bdev, 0,
+ BLK_ALL_ZONES,
+ dmz_init_zone, zoned_dev);
+ if (ret < 0) {
+ DMDEBUG("(%s): Failed to report zones, error %d",
+ zmd->devname, ret);
+ dmz_drop_zones(zmd);
+ return ret;
+ }
+ }
+ return 0;
+ }
/*
* Get zone information and initialize zone descriptors. At the same
* time, determine where the super block should be: first block of the
* first randomly writable zone.
*/
- ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES, dmz_init_zone,
- zmd);
+ ret = blkdev_report_zones(zoned_dev->bdev, 0, BLK_ALL_ZONES,
+ dmz_init_zone, zoned_dev);
if (ret < 0) {
+ DMDEBUG("(%s): Failed to report zones, error %d",
+ zmd->devname, ret);
dmz_drop_zones(zmd);
return ret;
}
@@ -1213,9 +1590,13 @@ static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
*/
static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
{
+ struct dmz_dev *dev = zone->dev;
unsigned int noio_flag;
int ret;
+ if (dev->flags & DMZ_BDEV_REGULAR)
+ return 0;
+
/*
* Get zone information from disk. Since blkdev_report_zones() uses
* GFP_KERNEL by default for memory allocations, set the per-task
@@ -1223,16 +1604,16 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
* GFP_NOIO was specified.
*/
noio_flag = memalloc_noio_save();
- ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1,
+ ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1,
dmz_update_zone_cb, zone);
memalloc_noio_restore(noio_flag);
if (ret == 0)
ret = -EIO;
if (ret < 0) {
- dmz_dev_err(zmd->dev, "Get zone %u report failed",
- dmz_id(zmd, zone));
- dmz_check_bdev(zmd->dev);
+ dmz_dev_err(dev, "Get zone %u report failed",
+ zone->id);
+ dmz_check_bdev(dev);
return ret;
}
@@ -1246,6 +1627,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
struct dm_zone *zone)
{
+ struct dmz_dev *dev = zone->dev;
unsigned int wp = 0;
int ret;
@@ -1254,8 +1636,8 @@ static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
if (ret)
return ret;
- dmz_dev_warn(zmd->dev, "Processing zone %u write error (zone wp %u/%u)",
- dmz_id(zmd, zone), zone->wp_block, wp);
+ dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)",
+ zone->id, zone->wp_block, wp);
if (zone->wp_block < wp) {
dmz_invalidate_blocks(zmd, zone, zone->wp_block,
@@ -1265,11 +1647,6 @@ static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
return 0;
}
-static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
-{
- return &zmd->zones[zone_id];
-}
-
/*
* Reset a zone write pointer.
*/
@@ -1287,14 +1664,14 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
return 0;
if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
- struct dmz_dev *dev = zmd->dev;
+ struct dmz_dev *dev = zone->dev;
ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
dmz_start_sect(zmd, zone),
- dev->zone_nr_sectors, GFP_NOIO);
+ zmd->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
- dmz_id(zmd, zone), ret);
+ zone->id, ret);
return ret;
}
}
@@ -1313,7 +1690,6 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
*/
static int dmz_load_mapping(struct dmz_metadata *zmd)
{
- struct dmz_dev *dev = zmd->dev;
struct dm_zone *dzone, *bzone;
struct dmz_mblock *dmap_mblk = NULL;
struct dmz_map *dmap;
@@ -1345,36 +1721,48 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
if (dzone_id == DMZ_MAP_UNMAPPED)
goto next;
- if (dzone_id >= dev->nr_zones) {
- dmz_dev_err(dev, "Chunk %u mapping: invalid data zone ID %u",
+ if (dzone_id >= zmd->nr_zones) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u",
chunk, dzone_id);
return -EIO;
}
dzone = dmz_get(zmd, dzone_id);
+ if (!dzone) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present",
+ chunk, dzone_id);
+ return -EIO;
+ }
set_bit(DMZ_DATA, &dzone->flags);
dzone->chunk = chunk;
dmz_get_zone_weight(zmd, dzone);
- if (dmz_is_rnd(dzone))
- list_add_tail(&dzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(dzone))
+ list_add_tail(&dzone->link, &zmd->map_cache_list);
+ else if (dmz_is_rnd(dzone))
+ list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
else
- list_add_tail(&dzone->link, &zmd->map_seq_list);
+ list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
/* Check buffer zone */
bzone_id = le32_to_cpu(dmap[e].bzone_id);
if (bzone_id == DMZ_MAP_UNMAPPED)
goto next;
- if (bzone_id >= dev->nr_zones) {
- dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone ID %u",
+ if (bzone_id >= zmd->nr_zones) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone ID %u",
chunk, bzone_id);
return -EIO;
}
bzone = dmz_get(zmd, bzone_id);
- if (!dmz_is_rnd(bzone)) {
- dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone %u",
+ if (!bzone) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: buffer zone %u not present",
+ chunk, bzone_id);
+ return -EIO;
+ }
+ if (!dmz_is_rnd(bzone) && !dmz_is_cache(bzone)) {
+ dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone %u",
chunk, bzone_id);
return -EIO;
}
@@ -1385,7 +1773,10 @@ static int dmz_load_mapping(struct dmz_metadata *zmd)
bzone->bzone = dzone;
dzone->bzone = bzone;
dmz_get_zone_weight(zmd, bzone);
- list_add_tail(&bzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(bzone))
+ list_add_tail(&bzone->link, &zmd->map_cache_list);
+ else
+ list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
next:
chunk++;
e++;
@@ -1398,15 +1789,21 @@ next:
* fully initialized. All remaining zones are unmapped data
* zones. Finish initializing those here.
*/
- for (i = 0; i < dev->nr_zones; i++) {
+ for (i = 0; i < zmd->nr_zones; i++) {
dzone = dmz_get(zmd, i);
+ if (!dzone)
+ continue;
if (dmz_is_meta(dzone))
continue;
+ if (dmz_is_offline(dzone))
+ continue;
- if (dmz_is_rnd(dzone))
- zmd->nr_rnd++;
+ if (dmz_is_cache(dzone))
+ zmd->nr_cache++;
+ else if (dmz_is_rnd(dzone))
+ dzone->dev->nr_rnd++;
else
- zmd->nr_seq++;
+ dzone->dev->nr_seq++;
if (dmz_is_data(dzone)) {
/* Already initialized */
@@ -1416,16 +1813,22 @@ next:
/* Unmapped data zone */
set_bit(DMZ_DATA, &dzone->flags);
dzone->chunk = DMZ_MAP_UNMAPPED;
- if (dmz_is_rnd(dzone)) {
- list_add_tail(&dzone->link, &zmd->unmap_rnd_list);
- atomic_inc(&zmd->unmap_nr_rnd);
+ if (dmz_is_cache(dzone)) {
+ list_add_tail(&dzone->link, &zmd->unmap_cache_list);
+ atomic_inc(&zmd->unmap_nr_cache);
+ } else if (dmz_is_rnd(dzone)) {
+ list_add_tail(&dzone->link,
+ &dzone->dev->unmap_rnd_list);
+ atomic_inc(&dzone->dev->unmap_nr_rnd);
} else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
+ set_bit(DMZ_RESERVED, &dzone->flags);
atomic_inc(&zmd->nr_reserved_seq_zones);
- zmd->nr_seq--;
+ dzone->dev->nr_seq--;
} else {
- list_add_tail(&dzone->link, &zmd->unmap_seq_list);
- atomic_inc(&zmd->unmap_nr_seq);
+ list_add_tail(&dzone->link,
+ &dzone->dev->unmap_seq_list);
+ atomic_inc(&dzone->dev->unmap_nr_seq);
}
}
@@ -1459,10 +1862,13 @@ static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
list_del_init(&zone->link);
if (dmz_is_seq(zone)) {
/* LRU rotate sequential zone */
- list_add_tail(&zone->link, &zmd->map_seq_list);
+ list_add_tail(&zone->link, &zone->dev->map_seq_list);
+ } else if (dmz_is_cache(zone)) {
+ /* LRU rotate cache zone */
+ list_add_tail(&zone->link, &zmd->map_cache_list);
} else {
/* LRU rotate random zone */
- list_add_tail(&zone->link, &zmd->map_rnd_list);
+ list_add_tail(&zone->link, &zone->dev->map_rnd_list);
}
}
@@ -1529,58 +1935,76 @@ static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
{
dmz_unlock_map(zmd);
dmz_unlock_metadata(zmd);
+ set_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
+ clear_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
dmz_lock_metadata(zmd);
dmz_lock_map(zmd);
}
/*
- * Select a random write zone for reclaim.
+ * Select a cache or random write zone for reclaim.
*/
-static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
+static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int idx, bool idle)
{
struct dm_zone *dzone = NULL;
- struct dm_zone *zone;
-
- if (list_empty(&zmd->map_rnd_list))
- return ERR_PTR(-EBUSY);
+ struct dm_zone *zone, *last = NULL;
+ struct list_head *zone_list;
+
+ /* If we have cache zones select from the cache zone list */
+ if (zmd->nr_cache) {
+ zone_list = &zmd->map_cache_list;
+ /* Try to relaim random zones, too, when idle */
+ if (idle && list_empty(zone_list))
+ zone_list = &zmd->dev[idx].map_rnd_list;
+ } else
+ zone_list = &zmd->dev[idx].map_rnd_list;
- list_for_each_entry(zone, &zmd->map_rnd_list, link) {
- if (dmz_is_buf(zone))
+ list_for_each_entry(zone, zone_list, link) {
+ if (dmz_is_buf(zone)) {
dzone = zone->bzone;
- else
+ if (dzone->dev->dev_idx != idx)
+ continue;
+ if (!last) {
+ last = dzone;
+ continue;
+ }
+ if (last->weight < dzone->weight)
+ continue;
+ dzone = last;
+ } else
dzone = zone;
if (dmz_lock_zone_reclaim(dzone))
return dzone;
}
- return ERR_PTR(-EBUSY);
+ return NULL;
}
/*
* Select a buffered sequential zone for reclaim.
*/
-static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
+static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int idx)
{
struct dm_zone *zone;
- if (list_empty(&zmd->map_seq_list))
- return ERR_PTR(-EBUSY);
-
- list_for_each_entry(zone, &zmd->map_seq_list, link) {
+ list_for_each_entry(zone, &zmd->dev[idx].map_seq_list, link) {
if (!zone->bzone)
continue;
if (dmz_lock_zone_reclaim(zone))
return zone;
}
- return ERR_PTR(-EBUSY);
+ return NULL;
}
/*
* Select a zone for reclaim.
*/
-struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
+struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int dev_idx, bool idle)
{
struct dm_zone *zone;
@@ -1594,9 +2018,9 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
*/
dmz_lock_map(zmd);
if (list_empty(&zmd->reserved_seq_zones_list))
- zone = dmz_get_seq_zone_for_reclaim(zmd);
+ zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
else
- zone = dmz_get_rnd_zone_for_reclaim(zmd);
+ zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
dmz_unlock_map(zmd);
return zone;
@@ -1616,6 +2040,7 @@ struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chu
unsigned int dzone_id;
struct dm_zone *dzone = NULL;
int ret = 0;
+ int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
dmz_lock_map(zmd);
again:
@@ -1630,9 +2055,9 @@ again:
goto out;
/* Allocate a random zone */
- dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+ dzone = dmz_alloc_zone(zmd, 0, alloc_flags);
if (!dzone) {
- if (dmz_bdev_is_dying(zmd->dev)) {
+ if (dmz_dev_is_dying(zmd)) {
dzone = ERR_PTR(-EIO);
goto out;
}
@@ -1645,6 +2070,10 @@ again:
} else {
/* The chunk is already mapped: get the mapping zone */
dzone = dmz_get(zmd, dzone_id);
+ if (!dzone) {
+ dzone = ERR_PTR(-EIO);
+ goto out;
+ }
if (dzone->chunk != chunk) {
dzone = ERR_PTR(-EIO);
goto out;
@@ -1723,6 +2152,7 @@ struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
struct dm_zone *dzone)
{
struct dm_zone *bzone;
+ int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
dmz_lock_map(zmd);
again:
@@ -1731,9 +2161,9 @@ again:
goto out;
/* Allocate a random zone */
- bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+ bzone = dmz_alloc_zone(zmd, 0, alloc_flags);
if (!bzone) {
- if (dmz_bdev_is_dying(zmd->dev)) {
+ if (dmz_dev_is_dying(zmd)) {
bzone = ERR_PTR(-EIO);
goto out;
}
@@ -1742,14 +2172,16 @@ again:
}
/* Update the chunk mapping */
- dmz_set_chunk_mapping(zmd, dzone->chunk, dmz_id(zmd, dzone),
- dmz_id(zmd, bzone));
+ dmz_set_chunk_mapping(zmd, dzone->chunk, dzone->id, bzone->id);
set_bit(DMZ_BUF, &bzone->flags);
bzone->chunk = dzone->chunk;
bzone->bzone = dzone;
dzone->bzone = bzone;
- list_add_tail(&bzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(bzone))
+ list_add_tail(&bzone->link, &zmd->map_cache_list);
+ else
+ list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
out:
dmz_unlock_map(zmd);
@@ -1760,46 +2192,68 @@ out:
* Get an unmapped (free) zone.
* This must be called with the mapping lock held.
*/
-struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags)
+struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
+ unsigned long flags)
{
struct list_head *list;
struct dm_zone *zone;
+ int i = 0;
- if (flags & DMZ_ALLOC_RND)
- list = &zmd->unmap_rnd_list;
- else
- list = &zmd->unmap_seq_list;
again:
+ if (flags & DMZ_ALLOC_CACHE)
+ list = &zmd->unmap_cache_list;
+ else if (flags & DMZ_ALLOC_RND)
+ list = &zmd->dev[dev_idx].unmap_rnd_list;
+ else
+ list = &zmd->dev[dev_idx].unmap_seq_list;
+
if (list_empty(list)) {
/*
- * No free zone: if this is for reclaim, allow using the
- * reserved sequential zones.
+ * No free zone: return NULL if this is for not reclaim.
*/
- if (!(flags & DMZ_ALLOC_RECLAIM) ||
- list_empty(&zmd->reserved_seq_zones_list))
+ if (!(flags & DMZ_ALLOC_RECLAIM))
return NULL;
+ /*
+ * Try to allocate from other devices
+ */
+ if (i < zmd->nr_devs) {
+ dev_idx = (dev_idx + 1) % zmd->nr_devs;
+ i++;
+ goto again;
+ }
- zone = list_first_entry(&zmd->reserved_seq_zones_list,
- struct dm_zone, link);
- list_del_init(&zone->link);
- atomic_dec(&zmd->nr_reserved_seq_zones);
+ /*
+ * Fallback to the reserved sequential zones
+ */
+ zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list,
+ struct dm_zone, link);
+ if (zone) {
+ list_del_init(&zone->link);
+ atomic_dec(&zmd->nr_reserved_seq_zones);
+ }
return zone;
}
zone = list_first_entry(list, struct dm_zone, link);
list_del_init(&zone->link);
- if (dmz_is_rnd(zone))
- atomic_dec(&zmd->unmap_nr_rnd);
+ if (dmz_is_cache(zone))
+ atomic_dec(&zmd->unmap_nr_cache);
+ else if (dmz_is_rnd(zone))
+ atomic_dec(&zone->dev->unmap_nr_rnd);
else
- atomic_dec(&zmd->unmap_nr_seq);
+ atomic_dec(&zone->dev->unmap_nr_seq);
if (dmz_is_offline(zone)) {
- dmz_dev_warn(zmd->dev, "Zone %u is offline", dmz_id(zmd, zone));
+ dmz_zmd_warn(zmd, "Zone %u is offline", zone->id);
+ zone = NULL;
+ goto again;
+ }
+ if (dmz_is_meta(zone)) {
+ dmz_zmd_warn(zmd, "Zone %u has metadata", zone->id);
zone = NULL;
goto again;
}
-
return zone;
}
@@ -1814,16 +2268,18 @@ void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
dmz_reset_zone(zmd, zone);
/* Return the zone to its type unmap list */
- if (dmz_is_rnd(zone)) {
- list_add_tail(&zone->link, &zmd->unmap_rnd_list);
- atomic_inc(&zmd->unmap_nr_rnd);
- } else if (atomic_read(&zmd->nr_reserved_seq_zones) <
- zmd->nr_reserved_seq) {
+ if (dmz_is_cache(zone)) {
+ list_add_tail(&zone->link, &zmd->unmap_cache_list);
+ atomic_inc(&zmd->unmap_nr_cache);
+ } else if (dmz_is_rnd(zone)) {
+ list_add_tail(&zone->link, &zone->dev->unmap_rnd_list);
+ atomic_inc(&zone->dev->unmap_nr_rnd);
+ } else if (dmz_is_reserved(zone)) {
list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
atomic_inc(&zmd->nr_reserved_seq_zones);
} else {
- list_add_tail(&zone->link, &zmd->unmap_seq_list);
- atomic_inc(&zmd->unmap_nr_seq);
+ list_add_tail(&zone->link, &zone->dev->unmap_seq_list);
+ atomic_inc(&zone->dev->unmap_nr_seq);
}
wake_up_all(&zmd->free_wq);
@@ -1837,13 +2293,15 @@ void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
unsigned int chunk)
{
/* Set the chunk mapping */
- dmz_set_chunk_mapping(zmd, chunk, dmz_id(zmd, dzone),
+ dmz_set_chunk_mapping(zmd, chunk, dzone->id,
DMZ_MAP_UNMAPPED);
dzone->chunk = chunk;
- if (dmz_is_rnd(dzone))
- list_add_tail(&dzone->link, &zmd->map_rnd_list);
+ if (dmz_is_cache(dzone))
+ list_add_tail(&dzone->link, &zmd->map_cache_list);
+ else if (dmz_is_rnd(dzone))
+ list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
else
- list_add_tail(&dzone->link, &zmd->map_seq_list);
+ list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
}
/*
@@ -1865,7 +2323,7 @@ void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
* Unmapping the chunk buffer zone: clear only
* the chunk buffer mapping
*/
- dzone_id = dmz_id(zmd, zone->bzone);
+ dzone_id = zone->bzone->id;
zone->bzone->bzone = NULL;
zone->bzone = NULL;
@@ -1927,7 +2385,7 @@ static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
sector_t chunk_block)
{
sector_t bitmap_block = 1 + zmd->nr_map_blocks +
- (sector_t)(dmz_id(zmd, zone) * zmd->zone_nr_bitmap_blocks) +
+ (sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) +
(chunk_block >> DMZ_BLOCK_SHIFT_BITS);
return dmz_get_mblock(zmd, bitmap_block);
@@ -1943,7 +2401,7 @@ int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
sector_t chunk_block = 0;
/* Get the zones bitmap blocks */
- while (chunk_block < zmd->dev->zone_nr_blocks) {
+ while (chunk_block < zmd->zone_nr_blocks) {
from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
if (IS_ERR(from_mblk))
return PTR_ERR(from_mblk);
@@ -1978,7 +2436,7 @@ int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
int ret;
/* Get the zones bitmap blocks */
- while (chunk_block < zmd->dev->zone_nr_blocks) {
+ while (chunk_block < zmd->zone_nr_blocks) {
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
if (ret <= 0)
@@ -2002,12 +2460,12 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
sector_t chunk_block, unsigned int nr_blocks)
{
unsigned int count, bit, nr_bits;
- unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
struct dmz_mblock *mblk;
unsigned int n = 0;
- dmz_dev_debug(zmd->dev, "=> VALIDATE zone %u, block %llu, %u blocks",
- dmz_id(zmd, zone), (unsigned long long)chunk_block,
+ dmz_zmd_debug(zmd, "=> VALIDATE zone %u, block %llu, %u blocks",
+ zone->id, (unsigned long long)chunk_block,
nr_blocks);
WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
@@ -2036,8 +2494,8 @@ int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
if (likely(zone->weight + n <= zone_nr_blocks))
zone->weight += n;
else {
- dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be <= %u",
- dmz_id(zmd, zone), zone->weight,
+ dmz_zmd_warn(zmd, "Zone %u: weight %u should be <= %u",
+ zone->id, zone->weight,
zone_nr_blocks - n);
zone->weight = zone_nr_blocks;
}
@@ -2086,10 +2544,10 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
struct dmz_mblock *mblk;
unsigned int n = 0;
- dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks",
- dmz_id(zmd, zone), (u64)chunk_block, nr_blocks);
+ dmz_zmd_debug(zmd, "=> INVALIDATE zone %u, block %llu, %u blocks",
+ zone->id, (u64)chunk_block, nr_blocks);
- WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
while (nr_blocks) {
/* Get bitmap block */
@@ -2116,8 +2574,8 @@ int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
if (zone->weight >= n)
zone->weight -= n;
else {
- dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be >= %u",
- dmz_id(zmd, zone), zone->weight, n);
+ dmz_zmd_warn(zmd, "Zone %u: weight %u should be >= %u",
+ zone->id, zone->weight, n);
zone->weight = 0;
}
@@ -2133,7 +2591,7 @@ static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
struct dmz_mblock *mblk;
int ret;
- WARN_ON(chunk_block >= zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block >= zmd->zone_nr_blocks);
/* Get bitmap block */
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
@@ -2163,7 +2621,7 @@ static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
unsigned long *bitmap;
int n = 0;
- WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
while (nr_blocks) {
/* Get bitmap block */
@@ -2207,7 +2665,7 @@ int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
/* The block is valid: get the number of valid blocks from block */
return dmz_to_next_set_block(zmd, zone, chunk_block,
- zmd->dev->zone_nr_blocks - chunk_block, 0);
+ zmd->zone_nr_blocks - chunk_block, 0);
}
/*
@@ -2223,7 +2681,7 @@ int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
int ret;
ret = dmz_to_next_set_block(zmd, zone, start_block,
- zmd->dev->zone_nr_blocks - start_block, 1);
+ zmd->zone_nr_blocks - start_block, 1);
if (ret < 0)
return ret;
@@ -2231,7 +2689,7 @@ int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
*chunk_block = start_block;
return dmz_to_next_set_block(zmd, zone, start_block,
- zmd->dev->zone_nr_blocks - start_block, 0);
+ zmd->zone_nr_blocks - start_block, 0);
}
/*
@@ -2270,7 +2728,7 @@ static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
struct dmz_mblock *mblk;
sector_t chunk_block = 0;
unsigned int bit, nr_bits;
- unsigned int nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int nr_blocks = zmd->zone_nr_blocks;
void *bitmap;
int n = 0;
@@ -2326,7 +2784,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
while (!list_empty(&zmd->mblk_dirty_list)) {
mblk = list_first_entry(&zmd->mblk_dirty_list,
struct dmz_mblock, link);
- dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
+ dmz_zmd_warn(zmd, "mblock %llu still in dirty list (ref %u)",
(u64)mblk->no, mblk->ref);
list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree);
@@ -2344,7 +2802,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
/* Sanity checks: the mblock rbtree should now be empty */
root = &zmd->mblk_rbtree;
rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
- dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
+ dmz_zmd_warn(zmd, "mblock %llu ref %u still in rbtree",
(u64)mblk->no, mblk->ref);
mblk->ref = 0;
dmz_free_mblock(zmd, mblk);
@@ -2357,13 +2815,42 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
mutex_destroy(&zmd->map_lock);
}
+static void dmz_print_dev(struct dmz_metadata *zmd, int num)
+{
+ struct dmz_dev *dev = &zmd->dev[num];
+
+ if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE)
+ dmz_dev_info(dev, "Regular block device");
+ else
+ dmz_dev_info(dev, "Host-%s zoned block device",
+ bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
+ "aware" : "managed");
+ if (zmd->sb_version > 1) {
+ sector_t sector_offset =
+ dev->zone_offset << zmd->zone_nr_sectors_shift;
+
+ dmz_dev_info(dev, " %llu 512-byte logical sectors (offset %llu)",
+ (u64)dev->capacity, (u64)sector_offset);
+ dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors (offset %llu)",
+ dev->nr_zones, (u64)zmd->zone_nr_sectors,
+ (u64)dev->zone_offset);
+ } else {
+ dmz_dev_info(dev, " %llu 512-byte logical sectors",
+ (u64)dev->capacity);
+ dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
+ dev->nr_zones, (u64)zmd->zone_nr_sectors);
+ }
+}
+
/*
* Initialize the zoned metadata.
*/
-int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
+int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
+ struct dmz_metadata **metadata,
+ const char *devname)
{
struct dmz_metadata *zmd;
- unsigned int i, zid;
+ unsigned int i;
struct dm_zone *zone;
int ret;
@@ -2371,7 +2858,9 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
if (!zmd)
return -ENOMEM;
+ strcpy(zmd->devname, devname);
zmd->dev = dev;
+ zmd->nr_devs = num_dev;
zmd->mblk_rbtree = RB_ROOT;
init_rwsem(&zmd->mblk_sem);
mutex_init(&zmd->mblk_flush_lock);
@@ -2380,13 +2869,10 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
INIT_LIST_HEAD(&zmd->mblk_dirty_list);
mutex_init(&zmd->map_lock);
- atomic_set(&zmd->unmap_nr_rnd, 0);
- INIT_LIST_HEAD(&zmd->unmap_rnd_list);
- INIT_LIST_HEAD(&zmd->map_rnd_list);
- atomic_set(&zmd->unmap_nr_seq, 0);
- INIT_LIST_HEAD(&zmd->unmap_seq_list);
- INIT_LIST_HEAD(&zmd->map_seq_list);
+ atomic_set(&zmd->unmap_nr_cache, 0);
+ INIT_LIST_HEAD(&zmd->unmap_cache_list);
+ INIT_LIST_HEAD(&zmd->map_cache_list);
atomic_set(&zmd->nr_reserved_seq_zones, 0);
INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
@@ -2404,14 +2890,22 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
goto err;
/* Set metadata zones starting from sb_zone */
- zid = dmz_id(zmd, zmd->sb_zone);
for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
- zone = dmz_get(zmd, zid + i);
- if (!dmz_is_rnd(zone))
+ zone = dmz_get(zmd, zmd->sb[0].zone->id + i);
+ if (!zone) {
+ dmz_zmd_err(zmd,
+ "metadata zone %u not present", i);
+ ret = -ENXIO;
+ goto err;
+ }
+ if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) {
+ dmz_zmd_err(zmd,
+ "metadata zone %d is not random", i);
+ ret = -ENXIO;
goto err;
+ }
set_bit(DMZ_META, &zone->flags);
}
-
/* Load mapping table */
ret = dmz_load_mapping(zmd);
if (ret)
@@ -2432,34 +2926,38 @@ int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
/* Metadata cache shrinker */
ret = register_shrinker(&zmd->mblk_shrinker);
if (ret) {
- dmz_dev_err(dev, "Register metadata cache shrinker failed");
+ dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
goto err;
}
- dmz_dev_info(dev, "Host-%s zoned block device",
- bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
- "aware" : "managed");
- dmz_dev_info(dev, " %llu 512-byte logical sectors",
- (u64)dev->capacity);
- dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
- dev->nr_zones, (u64)dev->zone_nr_sectors);
- dmz_dev_info(dev, " %u metadata zones",
- zmd->nr_meta_zones * 2);
- dmz_dev_info(dev, " %u data zones for %u chunks",
- zmd->nr_data_zones, zmd->nr_chunks);
- dmz_dev_info(dev, " %u random zones (%u unmapped)",
- zmd->nr_rnd, atomic_read(&zmd->unmap_nr_rnd));
- dmz_dev_info(dev, " %u sequential zones (%u unmapped)",
- zmd->nr_seq, atomic_read(&zmd->unmap_nr_seq));
- dmz_dev_info(dev, " %u reserved sequential data zones",
- zmd->nr_reserved_seq);
-
- dmz_dev_debug(dev, "Format:");
- dmz_dev_debug(dev, "%u metadata blocks per set (%u max cache)",
+ dmz_zmd_info(zmd, "DM-Zoned metadata version %d", zmd->sb_version);
+ for (i = 0; i < zmd->nr_devs; i++)
+ dmz_print_dev(zmd, i);
+
+ dmz_zmd_info(zmd, " %u zones of %llu 512-byte logical sectors",
+ zmd->nr_zones, (u64)zmd->zone_nr_sectors);
+ dmz_zmd_debug(zmd, " %u metadata zones",
+ zmd->nr_meta_zones * 2);
+ dmz_zmd_debug(zmd, " %u data zones for %u chunks",
+ zmd->nr_data_zones, zmd->nr_chunks);
+ dmz_zmd_debug(zmd, " %u cache zones (%u unmapped)",
+ zmd->nr_cache, atomic_read(&zmd->unmap_nr_cache));
+ for (i = 0; i < zmd->nr_devs; i++) {
+ dmz_zmd_debug(zmd, " %u random zones (%u unmapped)",
+ dmz_nr_rnd_zones(zmd, i),
+ dmz_nr_unmap_rnd_zones(zmd, i));
+ dmz_zmd_debug(zmd, " %u sequential zones (%u unmapped)",
+ dmz_nr_seq_zones(zmd, i),
+ dmz_nr_unmap_seq_zones(zmd, i));
+ }
+ dmz_zmd_debug(zmd, " %u reserved sequential data zones",
+ zmd->nr_reserved_seq);
+ dmz_zmd_debug(zmd, "Format:");
+ dmz_zmd_debug(zmd, "%u metadata blocks per set (%u max cache)",
zmd->nr_meta_blocks, zmd->max_nr_mblks);
- dmz_dev_debug(dev, " %u data zone mapping blocks",
+ dmz_zmd_debug(zmd, " %u data zone mapping blocks",
zmd->nr_map_blocks);
- dmz_dev_debug(dev, " %u bitmap blocks",
+ dmz_zmd_debug(zmd, " %u bitmap blocks",
zmd->nr_bitmap_blocks);
*metadata = zmd;
@@ -2488,30 +2986,28 @@ void dmz_dtr_metadata(struct dmz_metadata *zmd)
*/
int dmz_resume_metadata(struct dmz_metadata *zmd)
{
- struct dmz_dev *dev = zmd->dev;
struct dm_zone *zone;
sector_t wp_block;
unsigned int i;
int ret;
/* Check zones */
- for (i = 0; i < dev->nr_zones; i++) {
+ for (i = 0; i < zmd->nr_zones; i++) {
zone = dmz_get(zmd, i);
if (!zone) {
- dmz_dev_err(dev, "Unable to get zone %u", i);
+ dmz_zmd_err(zmd, "Unable to get zone %u", i);
return -EIO;
}
-
wp_block = zone->wp_block;
ret = dmz_update_zone(zmd, zone);
if (ret) {
- dmz_dev_err(dev, "Broken zone %u", i);
+ dmz_zmd_err(zmd, "Broken zone %u", i);
return ret;
}
if (dmz_is_offline(zone)) {
- dmz_dev_warn(dev, "Zone %u is offline", i);
+ dmz_zmd_warn(zmd, "Zone %u is offline", i);
continue;
}
@@ -2519,11 +3015,11 @@ int dmz_resume_metadata(struct dmz_metadata *zmd)
if (!dmz_is_seq(zone))
zone->wp_block = 0;
else if (zone->wp_block != wp_block) {
- dmz_dev_err(dev, "Zone %u: Invalid wp (%llu / %llu)",
+ dmz_zmd_err(zmd, "Zone %u: Invalid wp (%llu / %llu)",
i, (u64)zone->wp_block, (u64)wp_block);
zone->wp_block = wp_block;
dmz_invalidate_blocks(zmd, zone, zone->wp_block,
- dev->zone_nr_blocks - zone->wp_block);
+ zmd->zone_nr_blocks - zone->wp_block);
}
}
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index e7ace908a9b7..2261b4dd60b7 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -13,7 +13,6 @@
struct dmz_reclaim {
struct dmz_metadata *metadata;
- struct dmz_dev *dev;
struct delayed_work work;
struct workqueue_struct *wq;
@@ -22,6 +21,8 @@ struct dmz_reclaim {
struct dm_kcopyd_throttle kc_throttle;
int kc_err;
+ int dev_idx;
+
unsigned long flags;
/* Last target access time */
@@ -44,13 +45,13 @@ enum {
* Percentage of unmapped (free) random zones below which reclaim starts
* even if the target is busy.
*/
-#define DMZ_RECLAIM_LOW_UNMAP_RND 30
+#define DMZ_RECLAIM_LOW_UNMAP_ZONES 30
/*
* Percentage of unmapped (free) random zones above which reclaim will
* stop if the target is busy.
*/
-#define DMZ_RECLAIM_HIGH_UNMAP_RND 50
+#define DMZ_RECLAIM_HIGH_UNMAP_ZONES 50
/*
* Align a sequential zone write pointer to chunk_block.
@@ -59,6 +60,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
sector_t block)
{
struct dmz_metadata *zmd = zrc->metadata;
+ struct dmz_dev *dev = zone->dev;
sector_t wp_block = zone->wp_block;
unsigned int nr_blocks;
int ret;
@@ -74,15 +76,15 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
* pointer and the requested position.
*/
nr_blocks = block - wp_block;
- ret = blkdev_issue_zeroout(zrc->dev->bdev,
+ ret = blkdev_issue_zeroout(dev->bdev,
dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
if (ret) {
- dmz_dev_err(zrc->dev,
+ dmz_dev_err(dev,
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
- dmz_id(zmd, zone), (unsigned long long)wp_block,
+ zone->id, (unsigned long long)wp_block,
(unsigned long long)block, nr_blocks, ret);
- dmz_check_bdev(zrc->dev);
+ dmz_check_bdev(dev);
return ret;
}
@@ -116,7 +118,6 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
struct dm_zone *src_zone, struct dm_zone *dst_zone)
{
struct dmz_metadata *zmd = zrc->metadata;
- struct dmz_dev *dev = zrc->dev;
struct dm_io_region src, dst;
sector_t block = 0, end_block;
sector_t nr_blocks;
@@ -128,7 +129,7 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
if (dmz_is_seq(src_zone))
end_block = src_zone->wp_block;
else
- end_block = dev->zone_nr_blocks;
+ end_block = dmz_zone_nr_blocks(zmd);
src_zone_block = dmz_start_block(zmd, src_zone);
dst_zone_block = dmz_start_block(zmd, dst_zone);
@@ -136,9 +137,14 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
while (block < end_block) {
- if (dev->flags & DMZ_BDEV_DYING)
+ if (src_zone->dev->flags & DMZ_BDEV_DYING)
+ return -EIO;
+ if (dst_zone->dev->flags & DMZ_BDEV_DYING)
return -EIO;
+ if (dmz_reclaim_should_terminate(src_zone))
+ return -EINTR;
+
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, src_zone, &block);
if (ret <= 0)
@@ -156,11 +162,11 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
return ret;
}
- src.bdev = dev->bdev;
+ src.bdev = src_zone->dev->bdev;
src.sector = dmz_blk2sect(src_zone_block + block);
src.count = dmz_blk2sect(nr_blocks);
- dst.bdev = dev->bdev;
+ dst.bdev = dst_zone->dev->bdev;
dst.sector = dmz_blk2sect(dst_zone_block + block);
dst.count = src.count;
@@ -194,10 +200,10 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
struct dmz_metadata *zmd = zrc->metadata;
int ret;
- dmz_dev_debug(zrc->dev,
- "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
- dzone->chunk, dmz_id(zmd, bzone), dmz_weight(bzone),
- dmz_id(zmd, dzone), dmz_weight(dzone));
+ DMDEBUG("(%s/%u): Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ dzone->chunk, bzone->id, dmz_weight(bzone),
+ dzone->id, dmz_weight(dzone));
/* Flush data zone into the buffer zone */
ret = dmz_reclaim_copy(zrc, bzone, dzone);
@@ -210,7 +216,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
if (ret == 0) {
/* Free the buffer zone */
- dmz_invalidate_blocks(zmd, bzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, bzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, bzone);
dmz_unlock_zone_reclaim(dzone);
@@ -233,10 +239,10 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
struct dmz_metadata *zmd = zrc->metadata;
int ret = 0;
- dmz_dev_debug(zrc->dev,
- "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
- chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
- dmz_id(zmd, bzone), dmz_weight(bzone));
+ DMDEBUG("(%s/%u): Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ chunk, dzone->id, dmz_weight(dzone),
+ bzone->id, dmz_weight(bzone));
/* Flush data zone into the buffer zone */
ret = dmz_reclaim_copy(zrc, dzone, bzone);
@@ -252,7 +258,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
* Free the data zone and remap the chunk to
* the buffer zone.
*/
- dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, bzone);
dmz_unmap_zone(zmd, dzone);
@@ -277,18 +283,26 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
struct dm_zone *szone = NULL;
struct dmz_metadata *zmd = zrc->metadata;
int ret;
+ int alloc_flags = DMZ_ALLOC_SEQ;
- /* Get a free sequential zone */
+ /* Get a free random or sequential zone */
dmz_lock_map(zmd);
- szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
+again:
+ szone = dmz_alloc_zone(zmd, zrc->dev_idx,
+ alloc_flags | DMZ_ALLOC_RECLAIM);
+ if (!szone && alloc_flags == DMZ_ALLOC_SEQ && dmz_nr_cache_zones(zmd)) {
+ alloc_flags = DMZ_ALLOC_RND;
+ goto again;
+ }
dmz_unlock_map(zmd);
if (!szone)
return -ENOSPC;
- dmz_dev_debug(zrc->dev,
- "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
- chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
- dmz_id(zmd, szone));
+ DMDEBUG("(%s/%u): Chunk %u, move %s zone %u (weight %u) to %s zone %u",
+ dmz_metadata_label(zmd), zrc->dev_idx, chunk,
+ dmz_is_cache(dzone) ? "cache" : "rnd",
+ dzone->id, dmz_weight(dzone),
+ dmz_is_rnd(szone) ? "rnd" : "seq", szone->id);
/* Flush the random data zone into the sequential zone */
ret = dmz_reclaim_copy(zrc, dzone, szone);
@@ -306,7 +320,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
dmz_unlock_map(zmd);
} else {
/* Free the data zone and remap the chunk */
- dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, dzone);
dmz_unlock_zone_reclaim(dzone);
@@ -337,6 +351,14 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
}
/*
+ * Test if the target device is idle.
+ */
+static inline int dmz_target_idle(struct dmz_reclaim *zrc)
+{
+ return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
+}
+
+/*
* Find a candidate zone for reclaim and process it.
*/
static int dmz_do_reclaim(struct dmz_reclaim *zrc)
@@ -348,13 +370,16 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
int ret;
/* Get a data zone */
- dzone = dmz_get_zone_for_reclaim(zmd);
- if (IS_ERR(dzone))
- return PTR_ERR(dzone);
+ dzone = dmz_get_zone_for_reclaim(zmd, zrc->dev_idx,
+ dmz_target_idle(zrc));
+ if (!dzone) {
+ DMDEBUG("(%s/%u): No zone found to reclaim",
+ dmz_metadata_label(zmd), zrc->dev_idx);
+ return -EBUSY;
+ }
start = jiffies;
-
- if (dmz_is_rnd(dzone)) {
+ if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
if (!dmz_weight(dzone)) {
/* Empty zone */
dmz_reclaim_empty(zrc, dzone);
@@ -395,54 +420,80 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
}
out:
if (ret) {
+ if (ret == -EINTR)
+ DMDEBUG("(%s/%u): reclaim zone %u interrupted",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ rzone->id);
+ else
+ DMDEBUG("(%s/%u): Failed to reclaim zone %u, err %d",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ rzone->id, ret);
dmz_unlock_zone_reclaim(dzone);
return ret;
}
ret = dmz_flush_metadata(zrc->metadata);
if (ret) {
- dmz_dev_debug(zrc->dev,
- "Metadata flush for zone %u failed, err %d\n",
- dmz_id(zmd, rzone), ret);
+ DMDEBUG("(%s/%u): Metadata flush for zone %u failed, err %d",
+ dmz_metadata_label(zmd), zrc->dev_idx, rzone->id, ret);
return ret;
}
- dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
- dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
+ DMDEBUG("(%s/%u): Reclaimed zone %u in %u ms",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ rzone->id, jiffies_to_msecs(jiffies - start));
return 0;
}
-/*
- * Test if the target device is idle.
- */
-static inline int dmz_target_idle(struct dmz_reclaim *zrc)
+static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
{
- return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
+ struct dmz_metadata *zmd = zrc->metadata;
+ unsigned int nr_cache = dmz_nr_cache_zones(zmd);
+ unsigned int nr_unmap, nr_zones;
+
+ if (nr_cache) {
+ nr_zones = nr_cache;
+ nr_unmap = dmz_nr_unmap_cache_zones(zmd);
+ } else {
+ nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
+ nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
+ }
+ return nr_unmap * 100 / nr_zones;
}
/*
* Test if reclaim is necessary.
*/
-static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
+static bool dmz_should_reclaim(struct dmz_reclaim *zrc, unsigned int p_unmap)
{
- struct dmz_metadata *zmd = zrc->metadata;
- unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
- unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
- unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
+ unsigned int nr_reclaim;
+
+ nr_reclaim = dmz_nr_rnd_zones(zrc->metadata, zrc->dev_idx);
+
+ if (dmz_nr_cache_zones(zrc->metadata)) {
+ /*
+ * The first device in a multi-device
+ * setup only contains cache zones, so
+ * never start reclaim there.
+ */
+ if (zrc->dev_idx == 0)
+ return false;
+ nr_reclaim += dmz_nr_cache_zones(zrc->metadata);
+ }
/* Reclaim when idle */
- if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
+ if (dmz_target_idle(zrc) && nr_reclaim)
return true;
- /* If there are still plenty of random zones, do not reclaim */
- if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
+ /* If there are still plenty of cache zones, do not reclaim */
+ if (p_unmap >= DMZ_RECLAIM_HIGH_UNMAP_ZONES)
return false;
/*
- * If the percentage of unmapped random zones is low,
+ * If the percentage of unmapped cache zones is low,
* reclaim even if the target is busy.
*/
- return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
+ return p_unmap <= DMZ_RECLAIM_LOW_UNMAP_ZONES;
}
/*
@@ -452,14 +503,14 @@ static void dmz_reclaim_work(struct work_struct *work)
{
struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
struct dmz_metadata *zmd = zrc->metadata;
- unsigned int nr_rnd, nr_unmap_rnd;
- unsigned int p_unmap_rnd;
+ unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0;
int ret;
- if (dmz_bdev_is_dying(zrc->dev))
+ if (dmz_dev_is_dying(zmd))
return;
- if (!dmz_should_reclaim(zrc)) {
+ p_unmap = dmz_reclaim_percentage(zrc);
+ if (!dmz_should_reclaim(zrc, p_unmap)) {
mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
return;
}
@@ -470,27 +521,29 @@ static void dmz_reclaim_work(struct work_struct *work)
* and slower if there are still some free random zones to avoid
* as much as possible to negatively impact the user workload.
*/
- nr_rnd = dmz_nr_rnd_zones(zmd);
- nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
- p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
- if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
+ if (dmz_target_idle(zrc) || p_unmap < DMZ_RECLAIM_LOW_UNMAP_ZONES / 2) {
/* Idle or very low percentage: go fast */
zrc->kc_throttle.throttle = 100;
} else {
/* Busy but we still have some random zone: throttle */
- zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
+ zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
}
- dmz_dev_debug(zrc->dev,
- "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
- zrc->kc_throttle.throttle,
- (dmz_target_idle(zrc) ? "Idle" : "Busy"),
- p_unmap_rnd, nr_unmap_rnd, nr_rnd);
+ nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
+ nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
+
+ DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
+ dmz_metadata_label(zmd), zrc->dev_idx,
+ zrc->kc_throttle.throttle,
+ (dmz_target_idle(zrc) ? "Idle" : "Busy"),
+ p_unmap, dmz_nr_unmap_cache_zones(zmd),
+ dmz_nr_cache_zones(zmd),
+ dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx),
+ dmz_nr_rnd_zones(zmd, zrc->dev_idx));
ret = dmz_do_reclaim(zrc);
- if (ret) {
- dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
- if (!dmz_check_bdev(zrc->dev))
+ if (ret && ret != -EINTR) {
+ if (!dmz_check_dev(zmd))
return;
}
@@ -500,8 +553,8 @@ static void dmz_reclaim_work(struct work_struct *work)
/*
* Initialize reclaim.
*/
-int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
- struct dmz_reclaim **reclaim)
+int dmz_ctr_reclaim(struct dmz_metadata *zmd,
+ struct dmz_reclaim **reclaim, int idx)
{
struct dmz_reclaim *zrc;
int ret;
@@ -510,9 +563,9 @@ int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
if (!zrc)
return -ENOMEM;
- zrc->dev = dev;
zrc->metadata = zmd;
zrc->atime = jiffies;
+ zrc->dev_idx = idx;
/* Reclaim kcopyd client */
zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
@@ -524,8 +577,8 @@ int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
/* Reclaim work */
INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
- zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
- dev->name);
+ zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s_%d", WQ_MEM_RECLAIM,
+ dmz_metadata_label(zmd), idx);
if (!zrc->wq) {
ret = -ENOMEM;
goto err;
@@ -583,7 +636,8 @@ void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
*/
void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
{
- if (dmz_should_reclaim(zrc))
+ unsigned int p_unmap = dmz_reclaim_percentage(zrc);
+
+ if (dmz_should_reclaim(zrc, p_unmap))
mod_delayed_work(zrc->wq, &zrc->work, 0);
}
-
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index f4f83d39b3dc..a907a9446c0b 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -17,7 +17,7 @@
* Zone BIO context.
*/
struct dmz_bioctx {
- struct dmz_target *target;
+ struct dmz_dev *dev;
struct dm_zone *zone;
struct bio *bio;
refcount_t ref;
@@ -38,9 +38,10 @@ struct dm_chunk_work {
* Target descriptor.
*/
struct dmz_target {
- struct dm_dev *ddev;
+ struct dm_dev **ddev;
+ unsigned int nr_ddevs;
- unsigned long flags;
+ unsigned int flags;
/* Zoned block device information */
struct dmz_dev *dev;
@@ -48,9 +49,6 @@ struct dmz_target {
/* For metadata handling */
struct dmz_metadata *metadata;
- /* For reclaim */
- struct dmz_reclaim *reclaim;
-
/* For chunk work */
struct radix_tree_root chunk_rxtree;
struct workqueue_struct *chunk_wq;
@@ -76,12 +74,13 @@ struct dmz_target {
*/
static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_bioctx *bioctx =
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
bio->bi_status = status;
- if (bio->bi_status != BLK_STS_OK)
- bioctx->target->dev->flags |= DMZ_CHECK_BDEV;
+ if (bioctx->dev && bio->bi_status != BLK_STS_OK)
+ bioctx->dev->flags |= DMZ_CHECK_BDEV;
if (refcount_dec_and_test(&bioctx->ref)) {
struct dm_zone *zone = bioctx->zone;
@@ -118,14 +117,20 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio, sector_t chunk_block,
unsigned int nr_blocks)
{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_bioctx *bioctx =
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_dev *dev = zone->dev;
struct bio *clone;
+ if (dev->flags & DMZ_BDEV_DYING)
+ return -EIO;
+
clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
if (!clone)
return -ENOMEM;
- bio_set_dev(clone, dmz->dev->bdev);
+ bio_set_dev(clone, dev->bdev);
+ bioctx->dev = dev;
clone->bi_iter.bi_sector =
dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
@@ -165,7 +170,8 @@ static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio)
{
- sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
+ struct dmz_metadata *zmd = dmz->metadata;
+ sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
unsigned int nr_blocks = dmz_bio_blocks(bio);
sector_t end_block = chunk_block + nr_blocks;
struct dm_zone *rzone, *bzone;
@@ -177,19 +183,22 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
return 0;
}
- dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- (dmz_is_rnd(zone) ? "RND" : "SEQ"),
- dmz_id(dmz->metadata, zone),
- (unsigned long long)chunk_block, nr_blocks);
+ DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks",
+ dmz_metadata_label(zmd),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ (dmz_is_rnd(zone) ? "RND" :
+ (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
+ zone->id,
+ (unsigned long long)chunk_block, nr_blocks);
/* Check block validity to determine the read location */
bzone = zone->bzone;
while (chunk_block < end_block) {
nr_blocks = 0;
- if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block < zone->wp_block) {
/* Test block validity in the data zone */
- ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
+ ret = dmz_block_valid(zmd, zone, chunk_block);
if (ret < 0)
return ret;
if (ret > 0) {
@@ -204,7 +213,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
* Check the buffer zone, if there is one.
*/
if (!nr_blocks && bzone) {
- ret = dmz_block_valid(dmz->metadata, bzone, chunk_block);
+ ret = dmz_block_valid(zmd, bzone, chunk_block);
if (ret < 0)
return ret;
if (ret > 0) {
@@ -216,8 +225,10 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
if (nr_blocks) {
/* Valid blocks found: read them */
- nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
- ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
+ nr_blocks = min_t(unsigned int, nr_blocks,
+ end_block - chunk_block);
+ ret = dmz_submit_bio(dmz, rzone, bio,
+ chunk_block, nr_blocks);
if (ret)
return ret;
chunk_block += nr_blocks;
@@ -308,25 +319,30 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio)
{
- sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
+ struct dmz_metadata *zmd = dmz->metadata;
+ sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
unsigned int nr_blocks = dmz_bio_blocks(bio);
if (!zone)
return -ENOSPC;
- dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- (dmz_is_rnd(zone) ? "RND" : "SEQ"),
- dmz_id(dmz->metadata, zone),
- (unsigned long long)chunk_block, nr_blocks);
+ DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
+ dmz_metadata_label(zmd),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ (dmz_is_rnd(zone) ? "RND" :
+ (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
+ zone->id,
+ (unsigned long long)chunk_block, nr_blocks);
- if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block == zone->wp_block) {
/*
* zone is a random zone or it is a sequential zone
* and the BIO is aligned to the zone write pointer:
* direct write the zone.
*/
- return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
+ return dmz_handle_direct_write(dmz, zone, bio,
+ chunk_block, nr_blocks);
}
/*
@@ -345,7 +361,7 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
struct dmz_metadata *zmd = dmz->metadata;
sector_t block = dmz_bio_block(bio);
unsigned int nr_blocks = dmz_bio_blocks(bio);
- sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
+ sector_t chunk_block = dmz_chunk_block(zmd, block);
int ret = 0;
/* For unmapped chunks, there is nothing to do */
@@ -355,16 +371,18 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
if (dmz_is_readonly(zone))
return -EROFS;
- dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- dmz_id(zmd, zone),
- (unsigned long long)chunk_block, nr_blocks);
+ DMDEBUG("(%s): DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
+ dmz_metadata_label(dmz->metadata),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ zone->id,
+ (unsigned long long)chunk_block, nr_blocks);
/*
* Invalidate blocks in the data zone and its
* buffer zone if one is mapped.
*/
- if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
+ chunk_block < zone->wp_block)
ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
if (ret == 0 && zone->bzone)
ret = dmz_invalidate_blocks(zmd, zone->bzone,
@@ -378,31 +396,28 @@ static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
struct bio *bio)
{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_bioctx *bioctx =
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
struct dmz_metadata *zmd = dmz->metadata;
struct dm_zone *zone;
- int ret;
+ int i, ret;
/*
* Write may trigger a zone allocation. So make sure the
* allocation can succeed.
*/
if (bio_op(bio) == REQ_OP_WRITE)
- dmz_schedule_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_schedule_reclaim(dmz->dev[i].reclaim);
dmz_lock_metadata(zmd);
- if (dmz->dev->flags & DMZ_BDEV_DYING) {
- ret = -EIO;
- goto out;
- }
-
/*
* Get the data zone mapping the chunk. There may be no
* mapping for read and discard. If a mapping is obtained,
+ the zone returned will be set to active state.
*/
- zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
+ zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
bio_op(bio));
if (IS_ERR(zone)) {
ret = PTR_ERR(zone);
@@ -413,6 +428,7 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
if (zone) {
dmz_activate_zone(zone);
bioctx->zone = zone;
+ dmz_reclaim_bio_acc(zone->dev->reclaim);
}
switch (bio_op(bio)) {
@@ -427,8 +443,8 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
ret = dmz_handle_discard(dmz, zone, bio);
break;
default:
- dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x",
- bio_op(bio));
+ DMERR("(%s): Unsupported BIO operation 0x%x",
+ dmz_metadata_label(dmz->metadata), bio_op(bio));
ret = -EIO;
}
@@ -502,7 +518,8 @@ static void dmz_flush_work(struct work_struct *work)
/* Flush dirty metadata blocks */
ret = dmz_flush_metadata(dmz->metadata);
if (ret)
- dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
+ DMDEBUG("(%s): Metadata flush failed, rc=%d",
+ dmz_metadata_label(dmz->metadata), ret);
/* Process queued flush requests */
while (1) {
@@ -525,7 +542,7 @@ static void dmz_flush_work(struct work_struct *work)
*/
static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
{
- unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
+ unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
struct dm_chunk_work *cw;
int ret = 0;
@@ -558,7 +575,6 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
bio_list_add(&cw->bio_list, bio);
- dmz_reclaim_bio_acc(dmz->reclaim);
if (queue_work(dmz->chunk_wq, &cw->work))
dmz_get_chunk_work(cw);
out:
@@ -618,23 +634,22 @@ bool dmz_check_bdev(struct dmz_dev *dmz_dev)
static int dmz_map(struct dm_target *ti, struct bio *bio)
{
struct dmz_target *dmz = ti->private;
- struct dmz_dev *dev = dmz->dev;
+ struct dmz_metadata *zmd = dmz->metadata;
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
sector_t sector = bio->bi_iter.bi_sector;
unsigned int nr_sectors = bio_sectors(bio);
sector_t chunk_sector;
int ret;
- if (dmz_bdev_is_dying(dmz->dev))
+ if (dmz_dev_is_dying(zmd))
return DM_MAPIO_KILL;
- dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
- bio_op(bio), (unsigned long long)sector, nr_sectors,
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
- (unsigned int)dmz_bio_blocks(bio));
-
- bio_set_dev(bio, dev->bdev);
+ DMDEBUG("(%s): BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
+ dmz_metadata_label(zmd),
+ bio_op(bio), (unsigned long long)sector, nr_sectors,
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ (unsigned long long)dmz_chunk_block(zmd, dmz_bio_block(bio)),
+ (unsigned int)dmz_bio_blocks(bio));
if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
return DM_MAPIO_REMAPPED;
@@ -644,7 +659,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_KILL;
/* Initialize the BIO context */
- bioctx->target = dmz;
+ bioctx->dev = NULL;
bioctx->zone = NULL;
bioctx->bio = bio;
refcount_set(&bioctx->ref, 1);
@@ -659,17 +674,17 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
}
/* Split zone BIOs to fit entirely into a zone */
- chunk_sector = sector & (dev->zone_nr_sectors - 1);
- if (chunk_sector + nr_sectors > dev->zone_nr_sectors)
- dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
+ chunk_sector = sector & (dmz_zone_nr_sectors(zmd) - 1);
+ if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd))
+ dm_accept_partial_bio(bio, dmz_zone_nr_sectors(zmd) - chunk_sector);
/* Now ready to handle this BIO */
ret = dmz_queue_chunk_work(dmz, bio);
if (ret) {
- dmz_dev_debug(dmz->dev,
- "BIO op %d, can't process chunk %llu, err %i\n",
- bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
- ret);
+ DMDEBUG("(%s): BIO op %d, can't process chunk %llu, err %i",
+ dmz_metadata_label(zmd),
+ bio_op(bio), (u64)dmz_bio_chunk(zmd, bio),
+ ret);
return DM_MAPIO_REQUEUE;
}
@@ -679,64 +694,65 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
/*
* Get zoned device information.
*/
-static int dmz_get_zoned_device(struct dm_target *ti, char *path)
+static int dmz_get_zoned_device(struct dm_target *ti, char *path,
+ int idx, int nr_devs)
{
struct dmz_target *dmz = ti->private;
- struct request_queue *q;
+ struct dm_dev *ddev;
struct dmz_dev *dev;
- sector_t aligned_capacity;
int ret;
+ struct block_device *bdev;
/* Get the target device */
- ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev);
+ ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &ddev);
if (ret) {
ti->error = "Get target device failed";
- dmz->ddev = NULL;
return ret;
}
- dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL);
- if (!dev) {
- ret = -ENOMEM;
- goto err;
+ bdev = ddev->bdev;
+ if (bdev_zoned_model(bdev) == BLK_ZONED_NONE) {
+ if (nr_devs == 1) {
+ ti->error = "Invalid regular device";
+ goto err;
+ }
+ if (idx != 0) {
+ ti->error = "First device must be a regular device";
+ goto err;
+ }
+ if (dmz->ddev[0]) {
+ ti->error = "Too many regular devices";
+ goto err;
+ }
+ dev = &dmz->dev[idx];
+ dev->flags = DMZ_BDEV_REGULAR;
+ } else {
+ if (dmz->ddev[idx]) {
+ ti->error = "Too many zoned devices";
+ goto err;
+ }
+ if (nr_devs > 1 && idx == 0) {
+ ti->error = "First device must be a regular device";
+ goto err;
+ }
+ dev = &dmz->dev[idx];
}
-
- dev->bdev = dmz->ddev->bdev;
+ dev->bdev = bdev;
+ dev->dev_idx = idx;
(void)bdevname(dev->bdev, dev->name);
- if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
- ti->error = "Not a zoned block device";
- ret = -EINVAL;
+ dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ if (ti->begin) {
+ ti->error = "Partial mapping is not supported";
goto err;
}
- q = bdev_get_queue(dev->bdev);
- dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
- aligned_capacity = dev->capacity &
- ~((sector_t)blk_queue_zone_sectors(q) - 1);
- if (ti->begin ||
- ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
- ti->error = "Partial mapping not supported";
- ret = -EINVAL;
- goto err;
- }
-
- dev->zone_nr_sectors = blk_queue_zone_sectors(q);
- dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
-
- dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
- dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
-
- dev->nr_zones = blkdev_nr_zones(dev->bdev->bd_disk);
-
- dmz->dev = dev;
+ dmz->ddev[idx] = ddev;
return 0;
err:
- dm_put_device(ti, dmz->ddev);
- kfree(dev);
-
- return ret;
+ dm_put_device(ti, ddev);
+ return -EINVAL;
}
/*
@@ -745,10 +761,78 @@ err:
static void dmz_put_zoned_device(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
- dm_put_device(ti, dmz->ddev);
- kfree(dmz->dev);
- dmz->dev = NULL;
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ if (dmz->ddev[i]) {
+ dm_put_device(ti, dmz->ddev[i]);
+ dmz->ddev[i] = NULL;
+ }
+ }
+}
+
+static int dmz_fixup_devices(struct dm_target *ti)
+{
+ struct dmz_target *dmz = ti->private;
+ struct dmz_dev *reg_dev, *zoned_dev;
+ struct request_queue *q;
+ sector_t zone_nr_sectors = 0;
+ int i;
+
+ /*
+ * When we have more than on devices, the first one must be a
+ * regular block device and the others zoned block devices.
+ */
+ if (dmz->nr_ddevs > 1) {
+ reg_dev = &dmz->dev[0];
+ if (!(reg_dev->flags & DMZ_BDEV_REGULAR)) {
+ ti->error = "Primary disk is not a regular device";
+ return -EINVAL;
+ }
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ zoned_dev = &dmz->dev[i];
+ if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
+ ti->error = "Secondary disk is not a zoned device";
+ return -EINVAL;
+ }
+ q = bdev_get_queue(zoned_dev->bdev);
+ if (zone_nr_sectors &&
+ zone_nr_sectors != blk_queue_zone_sectors(q)) {
+ ti->error = "Zone nr sectors mismatch";
+ return -EINVAL;
+ }
+ zone_nr_sectors = blk_queue_zone_sectors(q);
+ zoned_dev->zone_nr_sectors = zone_nr_sectors;
+ zoned_dev->nr_zones =
+ blkdev_nr_zones(zoned_dev->bdev->bd_disk);
+ }
+ } else {
+ reg_dev = NULL;
+ zoned_dev = &dmz->dev[0];
+ if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
+ ti->error = "Disk is not a zoned device";
+ return -EINVAL;
+ }
+ q = bdev_get_queue(zoned_dev->bdev);
+ zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q);
+ zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk);
+ }
+
+ if (reg_dev) {
+ sector_t zone_offset;
+
+ reg_dev->zone_nr_sectors = zone_nr_sectors;
+ reg_dev->nr_zones =
+ DIV_ROUND_UP_SECTOR_T(reg_dev->capacity,
+ reg_dev->zone_nr_sectors);
+ reg_dev->zone_offset = 0;
+ zone_offset = reg_dev->nr_zones;
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ dmz->dev[i].zone_offset = zone_offset;
+ zone_offset += dmz->dev[i].nr_zones;
+ }
+ }
+ return 0;
}
/*
@@ -757,11 +841,10 @@ static void dmz_put_zoned_device(struct dm_target *ti)
static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dmz_target *dmz;
- struct dmz_dev *dev;
- int ret;
+ int ret, i;
/* Check arguments */
- if (argc != 1) {
+ if (argc < 1) {
ti->error = "Invalid argument count";
return -EINVAL;
}
@@ -772,25 +855,42 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Unable to allocate the zoned target descriptor";
return -ENOMEM;
}
+ dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL);
+ if (!dmz->dev) {
+ ti->error = "Unable to allocate the zoned device descriptors";
+ kfree(dmz);
+ return -ENOMEM;
+ }
+ dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL);
+ if (!dmz->ddev) {
+ ti->error = "Unable to allocate the dm device descriptors";
+ ret = -ENOMEM;
+ goto err;
+ }
+ dmz->nr_ddevs = argc;
+
ti->private = dmz;
/* Get the target zoned block device */
- ret = dmz_get_zoned_device(ti, argv[0]);
- if (ret) {
- dmz->ddev = NULL;
- goto err;
+ for (i = 0; i < argc; i++) {
+ ret = dmz_get_zoned_device(ti, argv[i], i, argc);
+ if (ret)
+ goto err_dev;
}
+ ret = dmz_fixup_devices(ti);
+ if (ret)
+ goto err_dev;
/* Initialize metadata */
- dev = dmz->dev;
- ret = dmz_ctr_metadata(dev, &dmz->metadata);
+ ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata,
+ dm_table_device_name(ti->table));
if (ret) {
ti->error = "Metadata initialization failed";
goto err_dev;
}
/* Set target (no write same support) */
- ti->max_io_len = dev->zone_nr_sectors << 9;
+ ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9;
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_zeroes_bios = 1;
@@ -799,7 +899,8 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->discards_supported = true;
/* The exposed capacity is the number of chunks that can be mapped */
- ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
+ ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) <<
+ dmz_zone_nr_sectors_shift(dmz->metadata);
/* Zone BIO */
ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
@@ -811,8 +912,9 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Chunk BIO work */
mutex_init(&dmz->chunk_lock);
INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
- dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
- 0, dev->name);
+ dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
+ dmz_metadata_label(dmz->metadata));
if (!dmz->chunk_wq) {
ti->error = "Create chunk workqueue failed";
ret = -ENOMEM;
@@ -824,7 +926,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bio_list_init(&dmz->flush_list);
INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
- dev->name);
+ dmz_metadata_label(dmz->metadata));
if (!dmz->flush_wq) {
ti->error = "Create flush workqueue failed";
ret = -ENOMEM;
@@ -833,15 +935,18 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
/* Initialize reclaim */
- ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim);
- if (ret) {
- ti->error = "Zone reclaim initialization failed";
- goto err_fwq;
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ ret = dmz_ctr_reclaim(dmz->metadata, &dmz->dev[i].reclaim, i);
+ if (ret) {
+ ti->error = "Zone reclaim initialization failed";
+ goto err_fwq;
+ }
}
- dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)",
- (unsigned long long)ti->len,
- (unsigned long long)dmz_sect2blk(ti->len));
+ DMINFO("(%s): Target device: %llu 512-byte logical sectors (%llu blocks)",
+ dmz_metadata_label(dmz->metadata),
+ (unsigned long long)ti->len,
+ (unsigned long long)dmz_sect2blk(ti->len));
return 0;
err_fwq:
@@ -856,6 +961,7 @@ err_meta:
err_dev:
dmz_put_zoned_device(ti);
err:
+ kfree(dmz->dev);
kfree(dmz);
return ret;
@@ -867,11 +973,13 @@ err:
static void dmz_dtr(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
flush_workqueue(dmz->chunk_wq);
destroy_workqueue(dmz->chunk_wq);
- dmz_dtr_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_dtr_reclaim(dmz->dev[i].reclaim);
cancel_delayed_work_sync(&dmz->flush_work);
destroy_workqueue(dmz->flush_wq);
@@ -886,6 +994,7 @@ static void dmz_dtr(struct dm_target *ti)
mutex_destroy(&dmz->chunk_lock);
+ kfree(dmz->dev);
kfree(dmz);
}
@@ -895,7 +1004,7 @@ static void dmz_dtr(struct dm_target *ti)
static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct dmz_target *dmz = ti->private;
- unsigned int chunk_sectors = dmz->dev->zone_nr_sectors;
+ unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata);
limits->logical_block_size = DMZ_BLOCK_SIZE;
limits->physical_block_size = DMZ_BLOCK_SIZE;
@@ -923,11 +1032,12 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct dmz_target *dmz = ti->private;
+ struct dmz_dev *dev = &dmz->dev[0];
- if (!dmz_check_bdev(dmz->dev))
+ if (!dmz_check_bdev(dev))
return -EIO;
- *bdev = dmz->dev->bdev;
+ *bdev = dev->bdev;
return 0;
}
@@ -938,9 +1048,11 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
static void dmz_suspend(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
flush_workqueue(dmz->chunk_wq);
- dmz_suspend_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_suspend_reclaim(dmz->dev[i].reclaim);
cancel_delayed_work_sync(&dmz->flush_work);
}
@@ -950,24 +1062,95 @@ static void dmz_suspend(struct dm_target *ti)
static void dmz_resume(struct dm_target *ti)
{
struct dmz_target *dmz = ti->private;
+ int i;
queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
- dmz_resume_reclaim(dmz->reclaim);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_resume_reclaim(dmz->dev[i].reclaim);
}
static int dmz_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct dmz_target *dmz = ti->private;
- struct dmz_dev *dev = dmz->dev;
- sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
+ unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
+ sector_t capacity;
+ int i, r;
+
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
+ r = fn(ti, dmz->ddev[i], 0, capacity, data);
+ if (r)
+ break;
+ }
+ return r;
+}
+
+static void dmz_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result,
+ unsigned int maxlen)
+{
+ struct dmz_target *dmz = ti->private;
+ ssize_t sz = 0;
+ char buf[BDEVNAME_SIZE];
+ struct dmz_dev *dev;
+ int i;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%u zones %u/%u cache",
+ dmz_nr_zones(dmz->metadata),
+ dmz_nr_unmap_cache_zones(dmz->metadata),
+ dmz_nr_cache_zones(dmz->metadata));
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ /*
+ * For a multi-device setup the first device
+ * contains only cache zones.
+ */
+ if ((i == 0) &&
+ (dmz_nr_cache_zones(dmz->metadata) > 0))
+ continue;
+ DMEMIT(" %u/%u random %u/%u sequential",
+ dmz_nr_unmap_rnd_zones(dmz->metadata, i),
+ dmz_nr_rnd_zones(dmz->metadata, i),
+ dmz_nr_unmap_seq_zones(dmz->metadata, i),
+ dmz_nr_seq_zones(dmz->metadata, i));
+ }
+ break;
+ case STATUSTYPE_TABLE:
+ dev = &dmz->dev[0];
+ format_dev_t(buf, dev->bdev->bd_dev);
+ DMEMIT("%s", buf);
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ dev = &dmz->dev[i];
+ format_dev_t(buf, dev->bdev->bd_dev);
+ DMEMIT(" %s", buf);
+ }
+ break;
+ }
+ return;
+}
+
+static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
+{
+ struct dmz_target *dmz = ti->private;
+ int r = -EINVAL;
+
+ if (!strcasecmp(argv[0], "reclaim")) {
+ int i;
- return fn(ti, dmz->ddev, 0, capacity, data);
+ for (i = 0; i < dmz->nr_ddevs; i++)
+ dmz_schedule_reclaim(dmz->dev[i].reclaim);
+ r = 0;
+ } else
+ DMERR("unrecognized message %s", argv[0]);
+ return r;
}
static struct target_type dmz_type = {
.name = "zoned",
- .version = {1, 1, 0},
+ .version = {2, 0, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = dmz_ctr,
@@ -978,6 +1161,8 @@ static struct target_type dmz_type = {
.postsuspend = dmz_suspend,
.resume = dmz_resume,
.iterate_devices = dmz_iterate_devices,
+ .status = dmz_status,
+ .message = dmz_message,
};
static int __init dmz_init(void)
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index 5b5e493d479c..22f11440b423 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -45,34 +45,50 @@
#define dmz_bio_block(bio) dmz_sect2blk((bio)->bi_iter.bi_sector)
#define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio))
+struct dmz_metadata;
+struct dmz_reclaim;
+
/*
* Zoned block device information.
*/
struct dmz_dev {
struct block_device *bdev;
+ struct dmz_metadata *metadata;
+ struct dmz_reclaim *reclaim;
char name[BDEVNAME_SIZE];
+ uuid_t uuid;
sector_t capacity;
+ unsigned int dev_idx;
+
unsigned int nr_zones;
+ unsigned int zone_offset;
unsigned int flags;
sector_t zone_nr_sectors;
- unsigned int zone_nr_sectors_shift;
- sector_t zone_nr_blocks;
- sector_t zone_nr_blocks_shift;
+ unsigned int nr_rnd;
+ atomic_t unmap_nr_rnd;
+ struct list_head unmap_rnd_list;
+ struct list_head map_rnd_list;
+
+ unsigned int nr_seq;
+ atomic_t unmap_nr_seq;
+ struct list_head unmap_seq_list;
+ struct list_head map_seq_list;
};
-#define dmz_bio_chunk(dev, bio) ((bio)->bi_iter.bi_sector >> \
- (dev)->zone_nr_sectors_shift)
-#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
+#define dmz_bio_chunk(zmd, bio) ((bio)->bi_iter.bi_sector >> \
+ dmz_zone_nr_sectors_shift(zmd))
+#define dmz_chunk_block(zmd, b) ((b) & (dmz_zone_nr_blocks(zmd) - 1))
/* Device flags. */
#define DMZ_BDEV_DYING (1 << 0)
#define DMZ_CHECK_BDEV (2 << 0)
+#define DMZ_BDEV_REGULAR (4 << 0)
/*
* Zone descriptor.
@@ -81,12 +97,18 @@ struct dm_zone {
/* For listing the zone depending on its state */
struct list_head link;
+ /* Device containing this zone */
+ struct dmz_dev *dev;
+
/* Zone type and state */
unsigned long flags;
/* Zone activation reference count */
atomic_t refcount;
+ /* Zone id */
+ unsigned int id;
+
/* Zone write pointer block (relative to the zone start block) */
unsigned int wp_block;
@@ -109,6 +131,7 @@ struct dm_zone {
*/
enum {
/* Zone write type */
+ DMZ_CACHE,
DMZ_RND,
DMZ_SEQ,
@@ -120,22 +143,28 @@ enum {
DMZ_META,
DMZ_DATA,
DMZ_BUF,
+ DMZ_RESERVED,
/* Zone internal state */
DMZ_RECLAIM,
DMZ_SEQ_WRITE_ERR,
+ DMZ_RECLAIM_TERMINATE,
};
/*
* Zone data accessors.
*/
+#define dmz_is_cache(z) test_bit(DMZ_CACHE, &(z)->flags)
#define dmz_is_rnd(z) test_bit(DMZ_RND, &(z)->flags)
#define dmz_is_seq(z) test_bit(DMZ_SEQ, &(z)->flags)
#define dmz_is_empty(z) ((z)->wp_block == 0)
#define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags)
#define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags)
#define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags)
+#define dmz_is_reserved(z) test_bit(DMZ_RESERVED, &(z)->flags)
#define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
+#define dmz_reclaim_should_terminate(z) \
+ test_bit(DMZ_RECLAIM_TERMINATE, &(z)->flags)
#define dmz_is_meta(z) test_bit(DMZ_META, &(z)->flags)
#define dmz_is_buf(z) test_bit(DMZ_BUF, &(z)->flags)
@@ -158,13 +187,11 @@ enum {
#define dmz_dev_debug(dev, format, args...) \
DMDEBUG("(%s): " format, (dev)->name, ## args)
-struct dmz_metadata;
-struct dmz_reclaim;
-
/*
* Functions defined in dm-zoned-metadata.c
*/
-int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **zmd);
+int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
+ struct dmz_metadata **zmd, const char *devname);
void dmz_dtr_metadata(struct dmz_metadata *zmd);
int dmz_resume_metadata(struct dmz_metadata *zmd);
@@ -175,23 +202,38 @@ void dmz_unlock_metadata(struct dmz_metadata *zmd);
void dmz_lock_flush(struct dmz_metadata *zmd);
void dmz_unlock_flush(struct dmz_metadata *zmd);
int dmz_flush_metadata(struct dmz_metadata *zmd);
+const char *dmz_metadata_label(struct dmz_metadata *zmd);
-unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone);
sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone);
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone);
unsigned int dmz_nr_chunks(struct dmz_metadata *zmd);
+bool dmz_check_dev(struct dmz_metadata *zmd);
+bool dmz_dev_is_dying(struct dmz_metadata *zmd);
+
#define DMZ_ALLOC_RND 0x01
-#define DMZ_ALLOC_RECLAIM 0x02
+#define DMZ_ALLOC_CACHE 0x02
+#define DMZ_ALLOC_SEQ 0x04
+#define DMZ_ALLOC_RECLAIM 0x10
-struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags);
+struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd,
+ unsigned int dev_idx, unsigned long flags);
void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
unsigned int chunk);
void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
-unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
-unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx);
+unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd);
/*
* Activate a zone (increment its reference count).
@@ -201,26 +243,10 @@ static inline void dmz_activate_zone(struct dm_zone *zone)
atomic_inc(&zone->refcount);
}
-/*
- * Deactivate a zone. This decrement the zone reference counter
- * indicating that all BIOs to the zone have completed when the count is 0.
- */
-static inline void dmz_deactivate_zone(struct dm_zone *zone)
-{
- atomic_dec(&zone->refcount);
-}
-
-/*
- * Test if a zone is active, that is, has a refcount > 0.
- */
-static inline bool dmz_is_active(struct dm_zone *zone)
-{
- return atomic_read(&zone->refcount);
-}
-
int dmz_lock_zone_reclaim(struct dm_zone *zone);
void dmz_unlock_zone_reclaim(struct dm_zone *zone);
-struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd);
+struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
+ unsigned int dev_idx, bool idle);
struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
unsigned int chunk, int op);
@@ -244,8 +270,7 @@ int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
/*
* Functions defined in dm-zoned-reclaim.c
*/
-int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
- struct dmz_reclaim **zrc);
+int dmz_ctr_reclaim(struct dmz_metadata *zmd, struct dmz_reclaim **zrc, int idx);
void dmz_dtr_reclaim(struct dmz_reclaim *zrc);
void dmz_suspend_reclaim(struct dmz_reclaim *zrc);
void dmz_resume_reclaim(struct dmz_reclaim *zrc);
@@ -258,4 +283,22 @@ void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
bool dmz_check_bdev(struct dmz_dev *dmz_dev);
+/*
+ * Deactivate a zone. This decrement the zone reference counter
+ * indicating that all BIOs to the zone have completed when the count is 0.
+ */
+static inline void dmz_deactivate_zone(struct dm_zone *zone)
+{
+ dmz_reclaim_bio_acc(zone->dev->reclaim);
+ atomic_dec(&zone->refcount);
+}
+
+/*
+ * Test if a zone is active, that is, has a refcount > 0.
+ */
+static inline bool dmz_is_active(struct dm_zone *zone)
+{
+ return atomic_read(&zone->refcount);
+}
+
#endif /* DM_ZONED_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3f39fa1ac756..109e81f33edb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -676,6 +676,15 @@ static bool md_in_flight(struct mapped_device *md)
return md_in_flight_bios(md);
}
+u64 dm_start_time_ns_from_clone(struct bio *bio)
+{
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ struct dm_io *io = tio->io;
+
+ return jiffies_to_nsecs(io->start_time);
+}
+EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
+
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
@@ -2610,7 +2619,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
if (noflush)
set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
else
- pr_debug("%s: suspending with flush\n", dm_device_name(md));
+ DMDEBUG("%s: suspending with flush", dm_device_name(md));
/*
* This gets reverted if there's an error later and the targets
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index a240990a7f33..564896659dd4 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -38,7 +38,7 @@ struct node_header {
struct btree_node {
struct node_header header;
- __le64 keys[0];
+ __le64 keys[];
} __packed;
@@ -68,7 +68,7 @@ struct ro_spine {
};
void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
-int exit_ro_spine(struct ro_spine *s);
+void exit_ro_spine(struct ro_spine *s);
int ro_step(struct ro_spine *s, dm_block_t new_child);
void ro_pop(struct ro_spine *s);
struct btree_node *ro_node(struct ro_spine *s);
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index b27b8091a1ca..e03cb9e48773 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -132,15 +132,13 @@ void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info)
s->nodes[1] = NULL;
}
-int exit_ro_spine(struct ro_spine *s)
+void exit_ro_spine(struct ro_spine *s)
{
- int r = 0, i;
+ int i;
for (i = 0; i < s->count; i++) {
unlock_block(s->info, s->nodes[i]);
}
-
- return r;
}
int ro_step(struct ro_spine *s, dm_block_t new_child)
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index 53af26ad1dfb..79ba15a9385a 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -13,9 +13,9 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <linux/ioport.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <linux/types.h>
#include <linux/interrupt.h>
diff --git a/drivers/media/pci/bt8xx/btcx-risc.c b/drivers/media/pci/bt8xx/btcx-risc.c
index 1139a5ad2418..51257980f539 100644
--- a/drivers/media/pci/bt8xx/btcx-risc.c
+++ b/drivers/media/pci/bt8xx/btcx-risc.c
@@ -17,8 +17,8 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/videodev2.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "btcx-risc.h"
diff --git a/drivers/media/pci/bt8xx/bttv-risc.c b/drivers/media/pci/bt8xx/bttv-risc.c
index fc8708047be8..4af72826b006 100644
--- a/drivers/media/pci/bt8xx/bttv-risc.c
+++ b/drivers/media/pci/bt8xx/bttv-risc.c
@@ -20,8 +20,8 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <media/v4l2-ioctl.h>
#include "bttvp.h"
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 38d3088d4d38..7ab13eb7527d 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -17,7 +17,6 @@
#include <linux/videodev2.h>
#include <linux/slab.h>
-#include <asm/pgtable.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 6f769c527fae..10c214bd0903 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -10,7 +10,6 @@
* Sakari Ailus <sakari.ailus@iki.fi>
*/
-#include <asm/cacheflush.h>
#include <linux/clk.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -19,6 +18,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 9e8eb45a5b03..3dc17ebe14fa 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -41,7 +41,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/div64.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 2686f03b322e..5c91fc3e65b5 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -535,7 +535,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
if (b->memory == V4L2_MEMORY_MMAP)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
videobuf_queue_lock(q);
retval = -EBUSY;
@@ -622,7 +622,7 @@ done:
videobuf_queue_unlock(q);
if (b->memory == V4L2_MEMORY_MMAP)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return retval;
}
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index aeb2f497c683..52312ce2ba05 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -169,7 +169,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
mem->size = PAGE_ALIGN(vb->size + offset);
ret = -EINVAL;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, untagged_baddr);
if (!vma)
@@ -201,7 +201,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
}
out_up:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return ret;
}
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 13b65ed9e74c..46ff19df9f53 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -21,13 +21,13 @@
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/pgtable.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <media/videobuf-dma-sg.h>
@@ -200,9 +200,9 @@ static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
{
int ret;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
ret = videobuf_dma_init_user_locked(dma, direction, data, size);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return ret;
}
@@ -533,7 +533,7 @@ static int __videobuf_iolock(struct videobuf_queue *q,
} else {
/* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
buffers can only be called from videobuf_qbuf
- we take current->mm->mmap_sem there, to prevent
+ we take current->mm->mmap_lock there, to prevent
locking inversion, so don't take it here */
err = videobuf_dma_init_user_locked(&mem->dma,
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index f8bd5a369560..9b2443720ab0 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -15,12 +15,12 @@
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/pgtable.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <media/videobuf-vmalloc.h>
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 9bddca292330..04368ee2a809 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -46,6 +46,17 @@ config ATMEL_EBI
tree is used. This bus supports NANDs, external ethernet controller,
SRAMs, ATA devices, etc.
+config BT1_L2_CTL
+ bool "Baikal-T1 CM2 L2-RAM Cache Control Block"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ Baikal-T1 CPU is based on the MIPS P5600 Warrior IP-core. The CPU
+ resides Coherency Manager v2 with embedded 1MB L2-cache. It's
+ possible to tune the L2 cache performance up by setting the data,
+ tags and way-select latencies of RAM access. This driver provides a
+ dt properties-based and sysfs interface for it.
+
config TI_AEMIF
tristate "Texas Instruments AEMIF driver"
depends on (ARCH_DAVINCI || ARCH_KEYSTONE) && OF
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 27b493435e61..6d7e3e64ba62 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o
obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o
obj-$(CONFIG_ARCH_BRCMSTB) += brcmstb_dpfe.o
+obj-$(CONFIG_BT1_L2_CTL) += bt1-l2-ctl.o
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o
diff --git a/drivers/memory/bt1-l2-ctl.c b/drivers/memory/bt1-l2-ctl.c
new file mode 100644
index 000000000000..633fea6a4edf
--- /dev/null
+++ b/drivers/memory/bt1-l2-ctl.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 CM2 L2-cache Control Block driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/sysfs.h>
+#include <linux/of.h>
+
+#define L2_CTL_REG 0x028
+#define L2_CTL_DATA_STALL_FLD 0
+#define L2_CTL_DATA_STALL_MASK GENMASK(1, L2_CTL_DATA_STALL_FLD)
+#define L2_CTL_TAG_STALL_FLD 2
+#define L2_CTL_TAG_STALL_MASK GENMASK(3, L2_CTL_TAG_STALL_FLD)
+#define L2_CTL_WS_STALL_FLD 4
+#define L2_CTL_WS_STALL_MASK GENMASK(5, L2_CTL_WS_STALL_FLD)
+#define L2_CTL_SET_CLKRATIO BIT(13)
+#define L2_CTL_CLKRATIO_LOCK BIT(31)
+
+#define L2_CTL_STALL_MIN 0
+#define L2_CTL_STALL_MAX 3
+#define L2_CTL_STALL_SET_DELAY_US 1
+#define L2_CTL_STALL_SET_TOUT_US 1000
+
+/*
+ * struct l2_ctl - Baikal-T1 L2 Control block private data.
+ * @dev: Pointer to the device structure.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ */
+struct l2_ctl {
+ struct device *dev;
+
+ struct regmap *sys_regs;
+};
+
+/*
+ * enum l2_ctl_stall - Baikal-T1 L2-cache-RAM stall identifier.
+ * @L2_WSSTALL: Way-select latency.
+ * @L2_TAGSTALL: Tag latency.
+ * @L2_DATASTALL: Data latency.
+ */
+enum l2_ctl_stall {
+ L2_WS_STALL,
+ L2_TAG_STALL,
+ L2_DATA_STALL
+};
+
+/*
+ * struct l2_ctl_device_attribute - Baikal-T1 L2-cache device attribute.
+ * @dev_attr: Actual sysfs device attribute.
+ * @id: L2-cache stall field identifier.
+ */
+struct l2_ctl_device_attribute {
+ struct device_attribute dev_attr;
+ enum l2_ctl_stall id;
+};
+#define to_l2_ctl_dev_attr(_dev_attr) \
+ container_of(_dev_attr, struct l2_ctl_device_attribute, dev_attr)
+
+#define L2_CTL_ATTR_RW(_name, _prefix, _id) \
+ struct l2_ctl_device_attribute l2_ctl_attr_##_name = \
+ { __ATTR(_name, 0644, _prefix##_show, _prefix##_store), _id }
+
+static int l2_ctl_get_latency(struct l2_ctl *l2, enum l2_ctl_stall id, u32 *val)
+{
+ u32 data = 0;
+ int ret;
+
+ ret = regmap_read(l2->sys_regs, L2_CTL_REG, &data);
+ if (ret)
+ return ret;
+
+ switch (id) {
+ case L2_WS_STALL:
+ *val = FIELD_GET(L2_CTL_WS_STALL_MASK, data);
+ break;
+ case L2_TAG_STALL:
+ *val = FIELD_GET(L2_CTL_TAG_STALL_MASK, data);
+ break;
+ case L2_DATA_STALL:
+ *val = FIELD_GET(L2_CTL_DATA_STALL_MASK, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int l2_ctl_set_latency(struct l2_ctl *l2, enum l2_ctl_stall id, u32 val)
+{
+ u32 mask = 0, data = 0;
+ int ret;
+
+ val = clamp_val(val, L2_CTL_STALL_MIN, L2_CTL_STALL_MAX);
+
+ switch (id) {
+ case L2_WS_STALL:
+ data = FIELD_PREP(L2_CTL_WS_STALL_MASK, val);
+ mask = L2_CTL_WS_STALL_MASK;
+ break;
+ case L2_TAG_STALL:
+ data = FIELD_PREP(L2_CTL_TAG_STALL_MASK, val);
+ mask = L2_CTL_TAG_STALL_MASK;
+ break;
+ case L2_DATA_STALL:
+ data = FIELD_PREP(L2_CTL_DATA_STALL_MASK, val);
+ mask = L2_CTL_DATA_STALL_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data |= L2_CTL_SET_CLKRATIO;
+ mask |= L2_CTL_SET_CLKRATIO;
+
+ ret = regmap_update_bits(l2->sys_regs, L2_CTL_REG, mask, data);
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(l2->sys_regs, L2_CTL_REG, data,
+ data & L2_CTL_CLKRATIO_LOCK,
+ L2_CTL_STALL_SET_DELAY_US,
+ L2_CTL_STALL_SET_TOUT_US);
+}
+
+static void l2_ctl_clear_data(void *data)
+{
+ struct l2_ctl *l2 = data;
+ struct platform_device *pdev = to_platform_device(l2->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct l2_ctl *l2_ctl_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct l2_ctl *l2;
+ int ret;
+
+ l2 = devm_kzalloc(dev, sizeof(*l2), GFP_KERNEL);
+ if (!l2)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, l2_ctl_clear_data, l2);
+ if (ret) {
+ dev_err(dev, "Can't add L2 CTL data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ l2->dev = dev;
+ platform_set_drvdata(pdev, l2);
+
+ return l2;
+}
+
+static int l2_ctl_find_sys_regs(struct l2_ctl *l2)
+{
+ l2->sys_regs = syscon_node_to_regmap(l2->dev->of_node->parent);
+ if (IS_ERR(l2->sys_regs)) {
+ dev_err(l2->dev, "Couldn't get L2 CTL register map\n");
+ return PTR_ERR(l2->sys_regs);
+ }
+
+ return 0;
+}
+
+static int l2_ctl_of_parse_property(struct l2_ctl *l2, enum l2_ctl_stall id,
+ const char *propname)
+{
+ int ret = 0;
+ u32 data;
+
+ if (!of_property_read_u32(l2->dev->of_node, propname, &data)) {
+ ret = l2_ctl_set_latency(l2, id, data);
+ if (ret)
+ dev_err(l2->dev, "Invalid value of '%s'\n", propname);
+ }
+
+ return ret;
+}
+
+static int l2_ctl_of_parse(struct l2_ctl *l2)
+{
+ int ret;
+
+ ret = l2_ctl_of_parse_property(l2, L2_WS_STALL, "baikal,l2-ws-latency");
+ if (ret)
+ return ret;
+
+ ret = l2_ctl_of_parse_property(l2, L2_TAG_STALL, "baikal,l2-tag-latency");
+ if (ret)
+ return ret;
+
+ return l2_ctl_of_parse_property(l2, L2_DATA_STALL,
+ "baikal,l2-data-latency");
+}
+
+static ssize_t l2_ctl_latency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct l2_ctl_device_attribute *devattr = to_l2_ctl_dev_attr(attr);
+ struct l2_ctl *l2 = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = l2_ctl_get_latency(l2, devattr->id, &data);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", data);
+}
+
+static ssize_t l2_ctl_latency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct l2_ctl_device_attribute *devattr = to_l2_ctl_dev_attr(attr);
+ struct l2_ctl *l2 = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ if (kstrtouint(buf, 0, &data) < 0)
+ return -EINVAL;
+
+ ret = l2_ctl_set_latency(l2, devattr->id, data);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static L2_CTL_ATTR_RW(l2_ws_latency, l2_ctl_latency, L2_WS_STALL);
+static L2_CTL_ATTR_RW(l2_tag_latency, l2_ctl_latency, L2_TAG_STALL);
+static L2_CTL_ATTR_RW(l2_data_latency, l2_ctl_latency, L2_DATA_STALL);
+
+static struct attribute *l2_ctl_sysfs_attrs[] = {
+ &l2_ctl_attr_l2_ws_latency.dev_attr.attr,
+ &l2_ctl_attr_l2_tag_latency.dev_attr.attr,
+ &l2_ctl_attr_l2_data_latency.dev_attr.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(l2_ctl_sysfs);
+
+static void l2_ctl_remove_sysfs(void *data)
+{
+ struct l2_ctl *l2 = data;
+
+ device_remove_groups(l2->dev, l2_ctl_sysfs_groups);
+}
+
+static int l2_ctl_init_sysfs(struct l2_ctl *l2)
+{
+ int ret;
+
+ ret = device_add_groups(l2->dev, l2_ctl_sysfs_groups);
+ if (ret) {
+ dev_err(l2->dev, "Failed to create L2 CTL sysfs nodes\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(l2->dev, l2_ctl_remove_sysfs, l2);
+ if (ret)
+ dev_err(l2->dev, "Can't add L2 CTL sysfs remove action\n");
+
+ return ret;
+}
+
+static int l2_ctl_probe(struct platform_device *pdev)
+{
+ struct l2_ctl *l2;
+ int ret;
+
+ l2 = l2_ctl_create_data(pdev);
+ if (IS_ERR(l2))
+ return PTR_ERR(l2);
+
+ ret = l2_ctl_find_sys_regs(l2);
+ if (ret)
+ return ret;
+
+ ret = l2_ctl_of_parse(l2);
+ if (ret)
+ return ret;
+
+ ret = l2_ctl_init_sysfs(l2);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id l2_ctl_of_match[] = {
+ { .compatible = "baikal,bt1-l2-ctl" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, l2_ctl_of_match);
+
+static struct platform_driver l2_ctl_driver = {
+ .probe = l2_ctl_probe,
+ .driver = {
+ .name = "bt1-l2-ctl",
+ .of_match_table = l2_ctl_of_match
+ }
+};
+module_platform_driver(l2_ctl_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 L2-cache driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
index 81a1b1d01683..25196d6268e2 100644
--- a/drivers/memory/samsung/exynos5422-dmc.c
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -1091,7 +1091,7 @@ static int create_timings_aligned(struct exynos5_dmc *dmc, u32 *reg_timing_row,
/* power related timings */
val = dmc->timings->tFAW / clk_period_ps;
val += dmc->timings->tFAW % clk_period_ps ? 1 : 0;
- val = max(val, dmc->min_tck->tXP);
+ val = max(val, dmc->min_tck->tFAW);
reg = &timing_power[0];
*reg_timing_power |= TIMING_VAL2REG(reg, val);
@@ -1346,15 +1346,13 @@ static irqreturn_t dmc_irq_thread(int irq, void *priv)
struct exynos5_dmc *dmc = priv;
mutex_lock(&dmc->df->lock);
-
exynos5_dmc_perf_events_check(dmc);
-
res = update_devfreq(dmc->df);
+ mutex_unlock(&dmc->df->lock);
+
if (res)
dev_warn(dmc->dev, "devfreq failed with %d\n", res);
- mutex_unlock(&dmc->df->lock);
-
return IRQ_HANDLED;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index c2dd322691d1..68aea22f2b89 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -5052,9 +5052,11 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
* @ioc: Pointer to MPT_ADAPTER structure
* @persist_opcode: see below
*
- * MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
- * devices not currently present.
- * MPI_SAS_OP_CLEAR_ALL_PERSISTENT - Clear al persist TargetID mappings
+ * =============================== ======================================
+ * MPI_SAS_OP_CLEAR_NOT_PRESENT Free all persist TargetID mappings for
+ * devices not currently present.
+ * MPI_SAS_OP_CLEAR_ALL_PERSISTENT Clear al persist TargetID mappings
+ * =============================== ======================================
*
* NOTE: Don't use not this function during interrupt time.
*
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 687e9c848053..4f8b73d92df3 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -449,6 +449,15 @@ config MFD_MC13XXX_I2C
help
Select this if your MC13xxx is connected via an I2C bus.
+config MFD_MP2629
+ tristate "Monolithic Power Systems MP2629 ADC and Battery charger"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Select this option to enable support for Monolithic Power Systems
+ battery charger. This provides ADC, thermal and battery charger power
+ management functions.
+
config MFD_MXS_LRADC
tristate "Freescale i.MX23/i.MX28 LRADC"
depends on ARCH_MXS || COMPILE_TEST
@@ -899,6 +908,18 @@ config MFD_MAX8998
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_MT6360
+ tristate "Mediatek MT6360 SubPMIC"
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ depends on I2C
+ help
+ Say Y here to enable MT6360 PMU/PMIC/LDO functional support.
+ PMU part includes Charger, Flashlight, RGB LED
+ PMIC part includes 2-channel BUCKs and 2-channel LDOs
+ LDO part includes 4-channel LDOs
+
config MFD_MT6397
tristate "MediaTek MT6397 PMIC Support"
select MFD_CORE
@@ -2057,10 +2078,9 @@ config MCP_UCB1200_TS
endmenu
config MFD_VEXPRESS_SYSREG
- bool "Versatile Express System Registers"
- depends on VEXPRESS_CONFIG && GPIOLIB && !ARCH_USES_GETTIMEOFFSET
+ tristate "Versatile Express System Registers"
+ depends on VEXPRESS_CONFIG && GPIOLIB
default y
- select CLKSRC_MMIO
select GPIO_GENERIC_PLATFORM
select MFD_CORE
select MFD_SYSCON
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index bea2be419822..9367a92f795a 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -171,6 +171,8 @@ obj-$(CONFIG_MFD_MAX8925) += max8925.o
obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o
obj-$(CONFIG_MFD_MAX8998) += max8998.o max8998-irq.o
+obj-$(CONFIG_MFD_MP2629) += mp2629.o
+
pcf50633-objs := pcf50633-core.o pcf50633-irq.o
obj-$(CONFIG_MFD_PCF50633) += pcf50633.o
obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
@@ -241,7 +243,8 @@ obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
obj-$(CONFIG_INTEL_SOC_PMIC_BXTWC) += intel_soc_pmic_bxtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC_CHTWC) += intel_soc_pmic_chtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o
-mt6397-objs := mt6397-core.o mt6397-irq.o
+obj-$(CONFIG_MFD_MT6360) += mt6360-core.o
+mt6397-objs := mt6397-core.o mt6397-irq.o mt6358-irq.o
obj-$(CONFIG_MFD_MT6397) += mt6397.o
obj-$(CONFIG_INTEL_SOC_PMIC_MRFLD) += intel_soc_pmic_mrfld.o
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index 8ad6768bd7a2..247f9849e54a 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -355,12 +355,12 @@ static int htcpld_register_chip_i2c(
info.platform_data = chip;
/* Add the I2C device. This calls the probe() function. */
- client = i2c_new_device(adapter, &info);
- if (!client) {
+ client = i2c_new_client_device(adapter, &info);
+ if (IS_ERR(client)) {
/* I2C device registration failed, contineu with the next */
dev_warn(dev, "Unable to add I2C device for 0x%x\n",
plat_chip_data->addr);
- return -ENODEV;
+ return PTR_ERR(client);
}
i2c_set_clientdata(client, chip);
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 7fc0c5d4edff..046222684b8b 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -250,9 +250,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4da9), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4daa), (kernel_ulong_t)&spt_info },
{ PCI_VDEVICE(INTEL, 0x4dab), (kernel_ulong_t)&spt_info },
- { PCI_VDEVICE(INTEL, 0x4daf), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4dc5), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dc6), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4dc7), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4de8), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4de9), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4dea), (kernel_ulong_t)&bxt_i2c_info },
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index 41326b48da55..84ca7902e1df 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -216,7 +216,6 @@ static int intel_quark_gpio_setup(struct pci_dev *pdev, struct mfd_cell *cell)
pdata->properties->ngpio = INTEL_QUARK_MFD_NGPIO;
pdata->properties->gpio_base = INTEL_QUARK_MFD_GPIO_BASE;
pdata->properties->irq[0] = pdev->irq;
- pdata->properties->has_irq = true;
pdata->properties->irq_shared = true;
cell->platform_data = pdata;
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index c7ed5c353553..fec2096474ad 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -177,6 +177,7 @@ static const struct regmap_config max77620_regmap_config = {
.rd_table = &max77620_readable_table,
.wr_table = &max77620_writable_table,
.volatile_table = &max77620_volatile_table,
+ .use_single_write = true,
};
static const struct regmap_config max20024_regmap_config = {
diff --git a/drivers/mfd/mp2629.c b/drivers/mfd/mp2629.c
new file mode 100644
index 000000000000..16840ec5fd1c
--- /dev/null
+++ b/drivers/mfd/mp2629.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MP2629 parent driver for ADC and battery charger
+ *
+ * Copyright 2020 Monolithic Power Systems, Inc
+ *
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mp2629.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+static const struct mfd_cell mp2629_cell[] = {
+ {
+ .name = "mp2629_adc",
+ .of_compatible = "mps,mp2629_adc",
+ },
+ {
+ .name = "mp2629_charger",
+ .of_compatible = "mps,mp2629_charger",
+ }
+};
+
+static const struct regmap_config mp2629_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x17,
+};
+
+static int mp2629_probe(struct i2c_client *client)
+{
+ struct mp2629_data *ddata;
+ int ret;
+
+ ddata = devm_kzalloc(&client->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->dev = &client->dev;
+ i2c_set_clientdata(client, ddata);
+
+ ddata->regmap = devm_regmap_init_i2c(client, &mp2629_regmap_config);
+ if (IS_ERR(ddata->regmap)) {
+ dev_err(ddata->dev, "Failed to allocate regmap\n");
+ return PTR_ERR(ddata->regmap);
+ }
+
+ ret = devm_mfd_add_devices(ddata->dev, PLATFORM_DEVID_AUTO, mp2629_cell,
+ ARRAY_SIZE(mp2629_cell), NULL, 0, NULL);
+ if (ret)
+ dev_err(ddata->dev, "Failed to register sub-devices %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id mp2629_of_match[] = {
+ { .compatible = "mps,mp2629"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, mp2629_of_match);
+
+static struct i2c_driver mp2629_driver = {
+ .driver = {
+ .name = "mp2629",
+ .of_match_table = mp2629_of_match,
+ },
+ .probe_new = mp2629_probe,
+};
+module_i2c_driver(mp2629_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MP2629 Battery charger parent driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
new file mode 100644
index 000000000000..db734f2831ff
--- /dev/null
+++ b/drivers/mfd/mt6358-irq.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2020 MediaTek Inc.
+
+#include <linux/interrupt.h>
+#include <linux/mfd/mt6358/core.h>
+#include <linux/mfd/mt6358/registers.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static struct irq_top_t mt6358_ints[] = {
+ MT6358_TOP_GEN(BUCK),
+ MT6358_TOP_GEN(LDO),
+ MT6358_TOP_GEN(PSC),
+ MT6358_TOP_GEN(SCK),
+ MT6358_TOP_GEN(BM),
+ MT6358_TOP_GEN(HK),
+ MT6358_TOP_GEN(AUD),
+ MT6358_TOP_GEN(MISC),
+};
+
+static void pmic_irq_enable(struct irq_data *data)
+{
+ unsigned int hwirq = irqd_to_hwirq(data);
+ struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pmic_irq_data *irqd = chip->irq_data;
+
+ irqd->enable_hwirq[hwirq] = true;
+}
+
+static void pmic_irq_disable(struct irq_data *data)
+{
+ unsigned int hwirq = irqd_to_hwirq(data);
+ struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pmic_irq_data *irqd = chip->irq_data;
+
+ irqd->enable_hwirq[hwirq] = false;
+}
+
+static void pmic_irq_lock(struct irq_data *data)
+{
+ struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&chip->irqlock);
+}
+
+static void pmic_irq_sync_unlock(struct irq_data *data)
+{
+ unsigned int i, top_gp, gp_offset, en_reg, int_regs, shift;
+ struct mt6397_chip *chip = irq_data_get_irq_chip_data(data);
+ struct pmic_irq_data *irqd = chip->irq_data;
+
+ for (i = 0; i < irqd->num_pmic_irqs; i++) {
+ if (irqd->enable_hwirq[i] == irqd->cache_hwirq[i])
+ continue;
+
+ /* Find out the IRQ group */
+ top_gp = 0;
+ while ((top_gp + 1) < irqd->num_top &&
+ i >= mt6358_ints[top_gp + 1].hwirq_base)
+ top_gp++;
+
+ /* Find the IRQ registers */
+ gp_offset = i - mt6358_ints[top_gp].hwirq_base;
+ int_regs = gp_offset / MT6358_REG_WIDTH;
+ shift = gp_offset % MT6358_REG_WIDTH;
+ en_reg = mt6358_ints[top_gp].en_reg +
+ (mt6358_ints[top_gp].en_reg_shift * int_regs);
+
+ regmap_update_bits(chip->regmap, en_reg, BIT(shift),
+ irqd->enable_hwirq[i] << shift);
+
+ irqd->cache_hwirq[i] = irqd->enable_hwirq[i];
+ }
+ mutex_unlock(&chip->irqlock);
+}
+
+static struct irq_chip mt6358_irq_chip = {
+ .name = "mt6358-irq",
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+ .irq_enable = pmic_irq_enable,
+ .irq_disable = pmic_irq_disable,
+ .irq_bus_lock = pmic_irq_lock,
+ .irq_bus_sync_unlock = pmic_irq_sync_unlock,
+};
+
+static void mt6358_irq_sp_handler(struct mt6397_chip *chip,
+ unsigned int top_gp)
+{
+ unsigned int irq_status, sta_reg, status;
+ unsigned int hwirq, virq;
+ int i, j, ret;
+
+ for (i = 0; i < mt6358_ints[top_gp].num_int_regs; i++) {
+ sta_reg = mt6358_ints[top_gp].sta_reg +
+ mt6358_ints[top_gp].sta_reg_shift * i;
+
+ ret = regmap_read(chip->regmap, sta_reg, &irq_status);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to read IRQ status, ret=%d\n", ret);
+ return;
+ }
+
+ if (!irq_status)
+ continue;
+
+ status = irq_status;
+ do {
+ j = __ffs(status);
+
+ hwirq = mt6358_ints[top_gp].hwirq_base +
+ MT6358_REG_WIDTH * i + j;
+
+ virq = irq_find_mapping(chip->irq_domain, hwirq);
+ if (virq)
+ handle_nested_irq(virq);
+
+ status &= ~BIT(j);
+ } while (status);
+
+ regmap_write(chip->regmap, sta_reg, irq_status);
+ }
+}
+
+static irqreturn_t mt6358_irq_handler(int irq, void *data)
+{
+ struct mt6397_chip *chip = data;
+ struct pmic_irq_data *mt6358_irq_data = chip->irq_data;
+ unsigned int bit, i, top_irq_status = 0;
+ int ret;
+
+ ret = regmap_read(chip->regmap,
+ mt6358_irq_data->top_int_status_reg,
+ &top_irq_status);
+ if (ret) {
+ dev_err(chip->dev,
+ "Failed to read status from the device, ret=%d\n", ret);
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < mt6358_irq_data->num_top; i++) {
+ bit = BIT(mt6358_ints[i].top_offset);
+ if (top_irq_status & bit) {
+ mt6358_irq_sp_handler(chip, i);
+ top_irq_status &= ~bit;
+ if (!top_irq_status)
+ break;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int pmic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct mt6397_chip *mt6397 = d->host_data;
+
+ irq_set_chip_data(irq, mt6397);
+ irq_set_chip_and_handler(irq, &mt6358_irq_chip, handle_level_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mt6358_irq_domain_ops = {
+ .map = pmic_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+int mt6358_irq_init(struct mt6397_chip *chip)
+{
+ int i, j, ret;
+ struct pmic_irq_data *irqd;
+
+ irqd = devm_kzalloc(chip->dev, sizeof(*irqd), GFP_KERNEL);
+ if (!irqd)
+ return -ENOMEM;
+
+ chip->irq_data = irqd;
+
+ mutex_init(&chip->irqlock);
+ irqd->top_int_status_reg = MT6358_TOP_INT_STATUS0;
+ irqd->num_pmic_irqs = MT6358_IRQ_NR;
+ irqd->num_top = ARRAY_SIZE(mt6358_ints);
+
+ irqd->enable_hwirq = devm_kcalloc(chip->dev,
+ irqd->num_pmic_irqs,
+ sizeof(*irqd->enable_hwirq),
+ GFP_KERNEL);
+ if (!irqd->enable_hwirq)
+ return -ENOMEM;
+
+ irqd->cache_hwirq = devm_kcalloc(chip->dev,
+ irqd->num_pmic_irqs,
+ sizeof(*irqd->cache_hwirq),
+ GFP_KERNEL);
+ if (!irqd->cache_hwirq)
+ return -ENOMEM;
+
+ /* Disable all interrupts for initializing */
+ for (i = 0; i < irqd->num_top; i++) {
+ for (j = 0; j < mt6358_ints[i].num_int_regs; j++)
+ regmap_write(chip->regmap,
+ mt6358_ints[i].en_reg +
+ mt6358_ints[i].en_reg_shift * j, 0);
+ }
+
+ chip->irq_domain = irq_domain_add_linear(chip->dev->of_node,
+ irqd->num_pmic_irqs,
+ &mt6358_irq_domain_ops, chip);
+ if (!chip->irq_domain) {
+ dev_err(chip->dev, "Could not create IRQ domain\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_threaded_irq(chip->dev, chip->irq, NULL,
+ mt6358_irq_handler, IRQF_ONESHOT,
+ mt6358_irq_chip.name, chip);
+ if (ret) {
+ dev_err(chip->dev, "Failed to register IRQ=%d, ret=%d\n",
+ chip->irq, ret);
+ return ret;
+ }
+
+ enable_irq_wake(chip->irq);
+ return ret;
+}
diff --git a/drivers/mfd/mt6360-core.c b/drivers/mfd/mt6360-core.c
new file mode 100644
index 000000000000..db8cdf5272c1
--- /dev/null
+++ b/drivers/mfd/mt6360-core.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ *
+ * Author: Gene Chen <gene_chen@richtek.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/version.h>
+
+#include <linux/mfd/mt6360.h>
+
+/* reg 0 -> 0 ~ 7 */
+#define MT6360_CHG_TREG_EVT (4)
+#define MT6360_CHG_AICR_EVT (5)
+#define MT6360_CHG_MIVR_EVT (6)
+#define MT6360_PWR_RDY_EVT (7)
+/* REG 1 -> 8 ~ 15 */
+#define MT6360_CHG_BATSYSUV_EVT (9)
+#define MT6360_FLED_CHG_VINOVP_EVT (11)
+#define MT6360_CHG_VSYSUV_EVT (12)
+#define MT6360_CHG_VSYSOV_EVT (13)
+#define MT6360_CHG_VBATOV_EVT (14)
+#define MT6360_CHG_VBUSOV_EVT (15)
+/* REG 2 -> 16 ~ 23 */
+/* REG 3 -> 24 ~ 31 */
+#define MT6360_WD_PMU_DET (25)
+#define MT6360_WD_PMU_DONE (26)
+#define MT6360_CHG_TMRI (27)
+#define MT6360_CHG_ADPBADI (29)
+#define MT6360_CHG_RVPI (30)
+#define MT6360_OTPI (31)
+/* REG 4 -> 32 ~ 39 */
+#define MT6360_CHG_AICCMEASL (32)
+#define MT6360_CHGDET_DONEI (34)
+#define MT6360_WDTMRI (35)
+#define MT6360_SSFINISHI (36)
+#define MT6360_CHG_RECHGI (37)
+#define MT6360_CHG_TERMI (38)
+#define MT6360_CHG_IEOCI (39)
+/* REG 5 -> 40 ~ 47 */
+#define MT6360_PUMPX_DONEI (40)
+#define MT6360_BAT_OVP_ADC_EVT (41)
+#define MT6360_TYPEC_OTP_EVT (42)
+#define MT6360_ADC_WAKEUP_EVT (43)
+#define MT6360_ADC_DONEI (44)
+#define MT6360_BST_BATUVI (45)
+#define MT6360_BST_VBUSOVI (46)
+#define MT6360_BST_OLPI (47)
+/* REG 6 -> 48 ~ 55 */
+#define MT6360_ATTACH_I (48)
+#define MT6360_DETACH_I (49)
+#define MT6360_QC30_STPDONE (51)
+#define MT6360_QC_VBUSDET_DONE (52)
+#define MT6360_HVDCP_DET (53)
+#define MT6360_CHGDETI (54)
+#define MT6360_DCDTI (55)
+/* REG 7 -> 56 ~ 63 */
+#define MT6360_FOD_DONE_EVT (56)
+#define MT6360_FOD_OV_EVT (57)
+#define MT6360_CHRDET_UVP_EVT (58)
+#define MT6360_CHRDET_OVP_EVT (59)
+#define MT6360_CHRDET_EXT_EVT (60)
+#define MT6360_FOD_LR_EVT (61)
+#define MT6360_FOD_HR_EVT (62)
+#define MT6360_FOD_DISCHG_FAIL_EVT (63)
+/* REG 8 -> 64 ~ 71 */
+#define MT6360_USBID_EVT (64)
+#define MT6360_APWDTRST_EVT (65)
+#define MT6360_EN_EVT (66)
+#define MT6360_QONB_RST_EVT (67)
+#define MT6360_MRSTB_EVT (68)
+#define MT6360_OTP_EVT (69)
+#define MT6360_VDDAOV_EVT (70)
+#define MT6360_SYSUV_EVT (71)
+/* REG 9 -> 72 ~ 79 */
+#define MT6360_FLED_STRBPIN_EVT (72)
+#define MT6360_FLED_TORPIN_EVT (73)
+#define MT6360_FLED_TX_EVT (74)
+#define MT6360_FLED_LVF_EVT (75)
+#define MT6360_FLED2_SHORT_EVT (78)
+#define MT6360_FLED1_SHORT_EVT (79)
+/* REG 10 -> 80 ~ 87 */
+#define MT6360_FLED2_STRB_EVT (80)
+#define MT6360_FLED1_STRB_EVT (81)
+#define MT6360_FLED2_STRB_TO_EVT (82)
+#define MT6360_FLED1_STRB_TO_EVT (83)
+#define MT6360_FLED2_TOR_EVT (84)
+#define MT6360_FLED1_TOR_EVT (85)
+/* REG 11 -> 88 ~ 95 */
+/* REG 12 -> 96 ~ 103 */
+#define MT6360_BUCK1_PGB_EVT (96)
+#define MT6360_BUCK1_OC_EVT (100)
+#define MT6360_BUCK1_OV_EVT (101)
+#define MT6360_BUCK1_UV_EVT (102)
+/* REG 13 -> 104 ~ 111 */
+#define MT6360_BUCK2_PGB_EVT (104)
+#define MT6360_BUCK2_OC_EVT (108)
+#define MT6360_BUCK2_OV_EVT (109)
+#define MT6360_BUCK2_UV_EVT (110)
+/* REG 14 -> 112 ~ 119 */
+#define MT6360_LDO1_OC_EVT (113)
+#define MT6360_LDO2_OC_EVT (114)
+#define MT6360_LDO3_OC_EVT (115)
+#define MT6360_LDO5_OC_EVT (117)
+#define MT6360_LDO6_OC_EVT (118)
+#define MT6360_LDO7_OC_EVT (119)
+/* REG 15 -> 120 ~ 127 */
+#define MT6360_LDO1_PGB_EVT (121)
+#define MT6360_LDO2_PGB_EVT (122)
+#define MT6360_LDO3_PGB_EVT (123)
+#define MT6360_LDO5_PGB_EVT (125)
+#define MT6360_LDO6_PGB_EVT (126)
+#define MT6360_LDO7_PGB_EVT (127)
+
+static const struct regmap_irq mt6360_pmu_irqs[] = {
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_TREG_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_AICR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_MIVR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_PWR_RDY_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_BATSYSUV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED_CHG_VINOVP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_VSYSUV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_VSYSOV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_VBATOV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_VBUSOV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_WD_PMU_DET, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_WD_PMU_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_TMRI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_ADPBADI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_RVPI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_OTPI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_AICCMEASL, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHGDET_DONEI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_WDTMRI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_SSFINISHI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_RECHGI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_TERMI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHG_IEOCI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_PUMPX_DONEI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BAT_OVP_ADC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_TYPEC_OTP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_ADC_WAKEUP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_ADC_DONEI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BST_BATUVI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BST_VBUSOVI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BST_OLPI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_ATTACH_I, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_DETACH_I, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_QC30_STPDONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_QC_VBUSDET_DONE, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_HVDCP_DET, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHGDETI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_DCDTI, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FOD_DONE_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FOD_OV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHRDET_UVP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHRDET_OVP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_CHRDET_EXT_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FOD_LR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FOD_HR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FOD_DISCHG_FAIL_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_USBID_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_APWDTRST_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_EN_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_QONB_RST_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_MRSTB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_OTP_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_VDDAOV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_SYSUV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED_STRBPIN_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED_TORPIN_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED_TX_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED_LVF_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED2_SHORT_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED1_SHORT_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED2_STRB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED1_STRB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED2_STRB_TO_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED1_STRB_TO_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED2_TOR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_FLED1_TOR_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK1_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK1_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK1_OV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK1_UV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK2_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK2_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK2_OV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_BUCK2_UV_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO1_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO2_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO3_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO5_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO6_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO7_OC_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO1_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO2_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO3_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO5_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO6_PGB_EVT, 8),
+ REGMAP_IRQ_REG_LINE(MT6360_LDO7_PGB_EVT, 8),
+};
+
+static int mt6360_pmu_handle_post_irq(void *irq_drv_data)
+{
+ struct mt6360_pmu_data *mpd = irq_drv_data;
+
+ return regmap_update_bits(mpd->regmap,
+ MT6360_PMU_IRQ_SET, MT6360_IRQ_RETRIG, MT6360_IRQ_RETRIG);
+}
+
+static struct regmap_irq_chip mt6360_pmu_irq_chip = {
+ .irqs = mt6360_pmu_irqs,
+ .num_irqs = ARRAY_SIZE(mt6360_pmu_irqs),
+ .num_regs = MT6360_PMU_IRQ_REGNUM,
+ .mask_base = MT6360_PMU_CHG_MASK1,
+ .status_base = MT6360_PMU_CHG_IRQ1,
+ .ack_base = MT6360_PMU_CHG_IRQ1,
+ .init_ack_masked = true,
+ .use_ack = true,
+ .handle_post_irq = mt6360_pmu_handle_post_irq,
+};
+
+static const struct regmap_config mt6360_pmu_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MT6360_PMU_MAXREG,
+};
+
+static const struct resource mt6360_adc_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6360_ADC_DONEI, "adc_donei"),
+};
+
+static const struct resource mt6360_chg_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_TREG_EVT, "chg_treg_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_PWR_RDY_EVT, "pwr_rdy_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_BATSYSUV_EVT, "chg_batsysuv_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_VSYSUV_EVT, "chg_vsysuv_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_VSYSOV_EVT, "chg_vsysov_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_VBATOV_EVT, "chg_vbatov_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_VBUSOV_EVT, "chg_vbusov_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_AICCMEASL, "chg_aiccmeasl"),
+ DEFINE_RES_IRQ_NAMED(MT6360_WDTMRI, "wdtmri"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_RECHGI, "chg_rechgi"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_TERMI, "chg_termi"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHG_IEOCI, "chg_ieoci"),
+ DEFINE_RES_IRQ_NAMED(MT6360_PUMPX_DONEI, "pumpx_donei"),
+ DEFINE_RES_IRQ_NAMED(MT6360_ATTACH_I, "attach_i"),
+ DEFINE_RES_IRQ_NAMED(MT6360_CHRDET_EXT_EVT, "chrdet_ext_evt"),
+};
+
+static const struct resource mt6360_led_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED_CHG_VINOVP_EVT, "fled_chg_vinovp_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED_LVF_EVT, "fled_lvf_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED2_SHORT_EVT, "fled2_short_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED1_SHORT_EVT, "fled1_short_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED2_STRB_TO_EVT, "fled2_strb_to_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_FLED1_STRB_TO_EVT, "fled1_strb_to_evt"),
+};
+
+static const struct resource mt6360_pmic_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK1_PGB_EVT, "buck1_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK1_OC_EVT, "buck1_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK1_OV_EVT, "buck1_ov_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK1_UV_EVT, "buck1_uv_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK2_PGB_EVT, "buck2_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK2_OC_EVT, "buck2_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK2_OV_EVT, "buck2_ov_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_BUCK2_UV_EVT, "buck2_uv_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO6_OC_EVT, "ldo6_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO7_OC_EVT, "ldo7_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO6_PGB_EVT, "ldo6_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO7_PGB_EVT, "ldo7_pgb_evt"),
+};
+
+static const struct resource mt6360_ldo_resources[] = {
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO1_OC_EVT, "ldo1_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO2_OC_EVT, "ldo2_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO3_OC_EVT, "ldo3_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO5_OC_EVT, "ldo5_oc_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO1_PGB_EVT, "ldo1_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO2_PGB_EVT, "ldo2_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO3_PGB_EVT, "ldo3_pgb_evt"),
+ DEFINE_RES_IRQ_NAMED(MT6360_LDO5_PGB_EVT, "ldo5_pgb_evt"),
+};
+
+static const struct mfd_cell mt6360_devs[] = {
+ OF_MFD_CELL("mt6360_adc", mt6360_adc_resources,
+ NULL, 0, 0, "mediatek,mt6360_adc"),
+ OF_MFD_CELL("mt6360_chg", mt6360_chg_resources,
+ NULL, 0, 0, "mediatek,mt6360_chg"),
+ OF_MFD_CELL("mt6360_led", mt6360_led_resources,
+ NULL, 0, 0, "mediatek,mt6360_led"),
+ OF_MFD_CELL("mt6360_pmic", mt6360_pmic_resources,
+ NULL, 0, 0, "mediatek,mt6360_pmic"),
+ OF_MFD_CELL("mt6360_ldo", mt6360_ldo_resources,
+ NULL, 0, 0, "mediatek,mt6360_ldo"),
+ OF_MFD_CELL("mt6360_tcpc", NULL,
+ NULL, 0, 0, "mediatek,mt6360_tcpc"),
+};
+
+static const unsigned short mt6360_slave_addr[MT6360_SLAVE_MAX] = {
+ MT6360_PMU_SLAVEID,
+ MT6360_PMIC_SLAVEID,
+ MT6360_LDO_SLAVEID,
+ MT6360_TCPC_SLAVEID,
+};
+
+static int mt6360_pmu_probe(struct i2c_client *client)
+{
+ struct mt6360_pmu_data *mpd;
+ unsigned int reg_data;
+ int i, ret;
+
+ mpd = devm_kzalloc(&client->dev, sizeof(*mpd), GFP_KERNEL);
+ if (!mpd)
+ return -ENOMEM;
+
+ mpd->dev = &client->dev;
+ i2c_set_clientdata(client, mpd);
+
+ mpd->regmap = devm_regmap_init_i2c(client, &mt6360_pmu_regmap_config);
+ if (IS_ERR(mpd->regmap)) {
+ dev_err(&client->dev, "Failed to register regmap\n");
+ return PTR_ERR(mpd->regmap);
+ }
+
+ ret = regmap_read(mpd->regmap, MT6360_PMU_DEV_INFO, &reg_data);
+ if (ret) {
+ dev_err(&client->dev, "Device not found\n");
+ return ret;
+ }
+
+ mpd->chip_rev = reg_data & CHIP_REV_MASK;
+ if (mpd->chip_rev != CHIP_VEN_MT6360) {
+ dev_err(&client->dev, "Device not supported\n");
+ return -ENODEV;
+ }
+
+ mt6360_pmu_irq_chip.irq_drv_data = mpd;
+ ret = devm_regmap_add_irq_chip(&client->dev, mpd->regmap, client->irq,
+ IRQF_TRIGGER_FALLING, 0,
+ &mt6360_pmu_irq_chip, &mpd->irq_data);
+ if (ret) {
+ dev_err(&client->dev, "Failed to add Regmap IRQ Chip\n");
+ return ret;
+ }
+
+ mpd->i2c[0] = client;
+ for (i = 1; i < MT6360_SLAVE_MAX; i++) {
+ mpd->i2c[i] = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ mt6360_slave_addr[i]);
+ if (IS_ERR(mpd->i2c[i])) {
+ dev_err(&client->dev,
+ "Failed to get new dummy I2C device for address 0x%x",
+ mt6360_slave_addr[i]);
+ return PTR_ERR(mpd->i2c[i]);
+ }
+ i2c_set_clientdata(mpd->i2c[i], mpd);
+ }
+
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_AUTO,
+ mt6360_devs, ARRAY_SIZE(mt6360_devs), NULL,
+ 0, regmap_irq_get_domain(mpd->irq_data));
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to register subordinate devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mt6360_pmu_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(i2c->irq);
+
+ return 0;
+}
+
+static int __maybe_unused mt6360_pmu_resume(struct device *dev)
+{
+
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(i2c->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mt6360_pmu_pm_ops,
+ mt6360_pmu_suspend, mt6360_pmu_resume);
+
+static const struct of_device_id __maybe_unused mt6360_pmu_of_id[] = {
+ { .compatible = "mediatek,mt6360_pmu", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mt6360_pmu_of_id);
+
+static struct i2c_driver mt6360_pmu_driver = {
+ .driver = {
+ .pm = &mt6360_pmu_pm_ops,
+ .of_match_table = of_match_ptr(mt6360_pmu_of_id),
+ },
+ .probe_new = mt6360_pmu_probe,
+};
+module_i2c_driver(mt6360_pmu_driver);
+
+MODULE_AUTHOR("Gene Chen <gene_chen@richtek.com>");
+MODULE_DESCRIPTION("MT6360 PMU I2C Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 0437c858d115..f6cd8a660602 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -12,13 +12,18 @@
#include <linux/regmap.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mt6323/core.h>
+#include <linux/mfd/mt6358/core.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/mfd/mt6323/registers.h>
+#include <linux/mfd/mt6358/registers.h>
#include <linux/mfd/mt6397/registers.h>
#define MT6323_RTC_BASE 0x8000
#define MT6323_RTC_SIZE 0x40
+#define MT6358_RTC_BASE 0x0588
+#define MT6358_RTC_SIZE 0x3c
+
#define MT6397_RTC_BASE 0xe000
#define MT6397_RTC_SIZE 0x3e
@@ -30,6 +35,11 @@ static const struct resource mt6323_rtc_resources[] = {
DEFINE_RES_IRQ(MT6323_IRQ_STATUS_RTC),
};
+static const struct resource mt6358_rtc_resources[] = {
+ DEFINE_RES_MEM(MT6358_RTC_BASE, MT6358_RTC_SIZE),
+ DEFINE_RES_IRQ(MT6358_IRQ_RTC),
+};
+
static const struct resource mt6397_rtc_resources[] = {
DEFINE_RES_MEM(MT6397_RTC_BASE, MT6397_RTC_SIZE),
DEFINE_RES_IRQ(MT6397_IRQ_RTC),
@@ -74,6 +84,21 @@ static const struct mfd_cell mt6323_devs[] = {
},
};
+static const struct mfd_cell mt6358_devs[] = {
+ {
+ .name = "mt6358-regulator",
+ .of_compatible = "mediatek,mt6358-regulator"
+ }, {
+ .name = "mt6358-rtc",
+ .num_resources = ARRAY_SIZE(mt6358_rtc_resources),
+ .resources = mt6358_rtc_resources,
+ .of_compatible = "mediatek,mt6358-rtc",
+ }, {
+ .name = "mt6358-sound",
+ .of_compatible = "mediatek,mt6358-sound"
+ },
+};
+
static const struct mfd_cell mt6397_devs[] = {
{
.name = "mt6397-rtc",
@@ -100,54 +125,42 @@ static const struct mfd_cell mt6397_devs[] = {
}
};
-#ifdef CONFIG_PM_SLEEP
-static int mt6397_irq_suspend(struct device *dev)
-{
- struct mt6397_chip *chip = dev_get_drvdata(dev);
-
- regmap_write(chip->regmap, chip->int_con[0], chip->wake_mask[0]);
- regmap_write(chip->regmap, chip->int_con[1], chip->wake_mask[1]);
-
- enable_irq_wake(chip->irq);
-
- return 0;
-}
-
-static int mt6397_irq_resume(struct device *dev)
-{
- struct mt6397_chip *chip = dev_get_drvdata(dev);
-
- regmap_write(chip->regmap, chip->int_con[0], chip->irq_masks_cur[0]);
- regmap_write(chip->regmap, chip->int_con[1], chip->irq_masks_cur[1]);
-
- disable_irq_wake(chip->irq);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
- mt6397_irq_resume);
-
struct chip_data {
u32 cid_addr;
u32 cid_shift;
+ const struct mfd_cell *cells;
+ int cell_size;
+ int (*irq_init)(struct mt6397_chip *chip);
};
static const struct chip_data mt6323_core = {
.cid_addr = MT6323_CID,
.cid_shift = 0,
+ .cells = mt6323_devs,
+ .cell_size = ARRAY_SIZE(mt6323_devs),
+ .irq_init = mt6397_irq_init,
+};
+
+static const struct chip_data mt6358_core = {
+ .cid_addr = MT6358_SWCID,
+ .cid_shift = 8,
+ .cells = mt6358_devs,
+ .cell_size = ARRAY_SIZE(mt6358_devs),
+ .irq_init = mt6358_irq_init,
};
static const struct chip_data mt6397_core = {
.cid_addr = MT6397_CID,
.cid_shift = 0,
+ .cells = mt6397_devs,
+ .cell_size = ARRAY_SIZE(mt6397_devs),
+ .irq_init = mt6397_irq_init,
};
static int mt6397_probe(struct platform_device *pdev)
{
int ret;
- unsigned int id;
+ unsigned int id = 0;
struct mt6397_chip *pmic;
const struct chip_data *pmic_core;
@@ -183,29 +196,13 @@ static int mt6397_probe(struct platform_device *pdev)
if (pmic->irq <= 0)
return pmic->irq;
- ret = mt6397_irq_init(pmic);
+ ret = pmic_core->irq_init(pmic);
if (ret)
return ret;
- switch (pmic->chip_id) {
- case MT6323_CHIP_ID:
- ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
- mt6323_devs, ARRAY_SIZE(mt6323_devs),
- NULL, 0, pmic->irq_domain);
- break;
-
- case MT6391_CHIP_ID:
- case MT6397_CHIP_ID:
- ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
- mt6397_devs, ARRAY_SIZE(mt6397_devs),
- NULL, 0, pmic->irq_domain);
- break;
-
- default:
- dev_err(&pdev->dev, "unsupported chip: %d\n", pmic->chip_id);
- return -ENODEV;
- }
-
+ ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE,
+ pmic_core->cells, pmic_core->cell_size,
+ NULL, 0, pmic->irq_domain);
if (ret) {
irq_domain_remove(pmic->irq_domain);
dev_err(&pdev->dev, "failed to add child devices: %d\n", ret);
@@ -219,6 +216,9 @@ static const struct of_device_id mt6397_of_match[] = {
.compatible = "mediatek,mt6323",
.data = &mt6323_core,
}, {
+ .compatible = "mediatek,mt6358",
+ .data = &mt6358_core,
+ }, {
.compatible = "mediatek,mt6397",
.data = &mt6397_core,
}, {
@@ -238,7 +238,6 @@ static struct platform_driver mt6397_driver = {
.driver = {
.name = "mt6397",
.of_match_table = of_match_ptr(mt6397_of_match),
- .pm = &mt6397_pm_ops,
},
.id_table = mt6397_id,
};
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
index b2d3ce1f3115..2924919da991 100644
--- a/drivers/mfd/mt6397-irq.c
+++ b/drivers/mfd/mt6397-irq.c
@@ -9,6 +9,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/suspend.h>
#include <linux/mfd/mt6323/core.h>
#include <linux/mfd/mt6323/registers.h>
#include <linux/mfd/mt6397/core.h>
@@ -81,7 +82,7 @@ static struct irq_chip mt6397_irq_chip = {
static void mt6397_irq_handle_reg(struct mt6397_chip *mt6397, int reg,
int irqbase)
{
- unsigned int status;
+ unsigned int status = 0;
int i, irq, ret;
ret = regmap_read(mt6397->regmap, reg, &status);
@@ -128,6 +129,36 @@ static const struct irq_domain_ops mt6397_irq_domain_ops = {
.map = mt6397_irq_domain_map,
};
+static int mt6397_irq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ struct mt6397_chip *chip =
+ container_of(notifier, struct mt6397_chip, pm_nb);
+
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ regmap_write(chip->regmap,
+ chip->int_con[0], chip->wake_mask[0]);
+ regmap_write(chip->regmap,
+ chip->int_con[1], chip->wake_mask[1]);
+ enable_irq_wake(chip->irq);
+ break;
+
+ case PM_POST_SUSPEND:
+ regmap_write(chip->regmap,
+ chip->int_con[0], chip->irq_masks_cur[0]);
+ regmap_write(chip->regmap,
+ chip->int_con[1], chip->irq_masks_cur[1]);
+ disable_irq_wake(chip->irq);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
int mt6397_irq_init(struct mt6397_chip *chip)
{
int ret;
@@ -159,6 +190,7 @@ int mt6397_irq_init(struct mt6397_chip *chip)
regmap_write(chip->regmap, chip->int_con[0], 0x0);
regmap_write(chip->regmap, chip->int_con[1], 0x0);
+ chip->pm_nb.notifier_call = mt6397_irq_pm_notifier;
chip->irq_domain = irq_domain_add_linear(chip->dev->of_node,
MT6397_IRQ_NR,
&mt6397_irq_domain_ops,
@@ -177,5 +209,6 @@ int mt6397_irq_init(struct mt6397_chip *chip)
return ret;
}
+ register_pm_notifier(&chip->pm_nb);
return 0;
}
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index e49787e6bb93..ccd62b963952 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1145,22 +1145,14 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
return -ENOMEM;
lookup->dev_id = "i2c-gpio";
- if (iic->pin_sda < 32)
- lookup->table[0].chip_label = "SM501-LOW";
- else
- lookup->table[0].chip_label = "SM501-HIGH";
- lookup->table[0].chip_hwnum = iic->pin_sda % 32;
- lookup->table[0].con_id = NULL;
- lookup->table[0].idx = 0;
- lookup->table[0].flags = GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN;
- if (iic->pin_scl < 32)
- lookup->table[1].chip_label = "SM501-LOW";
- else
- lookup->table[1].chip_label = "SM501-HIGH";
- lookup->table[1].chip_hwnum = iic->pin_scl % 32;
- lookup->table[1].con_id = NULL;
- lookup->table[1].idx = 1;
- lookup->table[1].flags = GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN;
+ lookup->table[0] = (struct gpiod_lookup)
+ GPIO_LOOKUP_IDX(iic->pin_sda < 32 ? "SM501-LOW" : "SM501-HIGH",
+ iic->pin_sda % 32, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN);
+ lookup->table[1] = (struct gpiod_lookup)
+ GPIO_LOOKUP_IDX(iic->pin_scl < 32 ? "SM501-LOW" : "SM501-HIGH",
+ iic->pin_scl % 32, NULL, 1,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN);
gpiod_add_lookup_table(lookup);
icd = dev_get_platdata(&pdev->dev);
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index ebdf2f11ae28..33336cde4724 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -284,7 +284,6 @@ MODULE_DEVICE_TABLE(of, sprd_pmic_match);
static struct spi_driver sprd_pmic_driver = {
.driver = {
.name = "sc27xx-pmic",
- .bus = &spi_bus_type,
.of_match_table = sprd_pmic_match,
},
.probe = sprd_pmic_probe,
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index efcd4b980c94..add603359124 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -167,10 +167,11 @@ static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
regmap_write(ddata->regmap, TIM_ARR, 0x0);
}
-static void stm32_timers_dma_probe(struct device *dev,
+static int stm32_timers_dma_probe(struct device *dev,
struct stm32_timers *ddata)
{
int i;
+ int ret = 0;
char name[4];
init_completion(&ddata->dma.completion);
@@ -179,14 +180,23 @@ static void stm32_timers_dma_probe(struct device *dev,
/* Optional DMA support: get valid DMA channel(s) or NULL */
for (i = STM32_TIMERS_DMA_CH1; i <= STM32_TIMERS_DMA_CH4; i++) {
snprintf(name, ARRAY_SIZE(name), "ch%1d", i + 1);
- ddata->dma.chans[i] = dma_request_slave_channel(dev, name);
+ ddata->dma.chans[i] = dma_request_chan(dev, name);
}
- ddata->dma.chans[STM32_TIMERS_DMA_UP] =
- dma_request_slave_channel(dev, "up");
- ddata->dma.chans[STM32_TIMERS_DMA_TRIG] =
- dma_request_slave_channel(dev, "trig");
- ddata->dma.chans[STM32_TIMERS_DMA_COM] =
- dma_request_slave_channel(dev, "com");
+ ddata->dma.chans[STM32_TIMERS_DMA_UP] = dma_request_chan(dev, "up");
+ ddata->dma.chans[STM32_TIMERS_DMA_TRIG] = dma_request_chan(dev, "trig");
+ ddata->dma.chans[STM32_TIMERS_DMA_COM] = dma_request_chan(dev, "com");
+
+ for (i = STM32_TIMERS_DMA_CH1; i < STM32_TIMERS_MAX_DMAS; i++) {
+ if (IS_ERR(ddata->dma.chans[i])) {
+ /* Save the first error code to return */
+ if (PTR_ERR(ddata->dma.chans[i]) != -ENODEV && !ret)
+ ret = PTR_ERR(ddata->dma.chans[i]);
+
+ ddata->dma.chans[i] = NULL;
+ }
+ }
+
+ return ret;
}
static void stm32_timers_dma_remove(struct device *dev,
@@ -230,7 +240,11 @@ static int stm32_timers_probe(struct platform_device *pdev)
stm32_timers_get_arr_size(ddata);
- stm32_timers_dma_probe(dev, ddata);
+ ret = stm32_timers_dma_probe(dev, ddata);
+ if (ret) {
+ stm32_timers_dma_remove(dev, ddata);
+ return ret;
+ }
platform_set_drvdata(pdev, ddata);
diff --git a/drivers/mfd/stmfx.c b/drivers/mfd/stmfx.c
index 857991cb3cbb..711979afd90a 100644
--- a/drivers/mfd/stmfx.c
+++ b/drivers/mfd/stmfx.c
@@ -287,14 +287,21 @@ static int stmfx_irq_init(struct i2c_client *client)
ret = regmap_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN, irqoutpin);
if (ret)
- return ret;
+ goto irq_exit;
ret = devm_request_threaded_irq(stmfx->dev, client->irq,
NULL, stmfx_irq_handler,
irqtrigger | IRQF_ONESHOT,
"stmfx", stmfx);
if (ret)
- stmfx_irq_exit(client);
+ goto irq_exit;
+
+ stmfx->irq = client->irq;
+
+ return 0;
+
+irq_exit:
+ stmfx_irq_exit(client);
return ret;
}
@@ -481,6 +488,8 @@ static int stmfx_suspend(struct device *dev)
if (ret)
return ret;
+ disable_irq(stmfx->irq);
+
if (stmfx->vdd)
return regulator_disable(stmfx->vdd);
@@ -501,6 +510,13 @@ static int stmfx_resume(struct device *dev)
}
}
+ /* Reset STMFX - supply has been stopped during suspend */
+ ret = stmfx_chip_reset(stmfx);
+ if (ret) {
+ dev_err(stmfx->dev, "Failed to reset chip: %d\n", ret);
+ return ret;
+ }
+
ret = regmap_raw_write(stmfx->map, STMFX_REG_SYS_CTRL,
&stmfx->bkp_sysctrl, sizeof(stmfx->bkp_sysctrl));
if (ret)
@@ -517,6 +533,8 @@ static int stmfx_resume(struct device *dev)
if (ret)
return ret;
+ enable_irq(stmfx->irq);
+
return 0;
}
#endif
diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
index 7dfbe8906cb8..eb3da558c3fb 100644
--- a/drivers/mfd/stpmic1.c
+++ b/drivers/mfd/stpmic1.c
@@ -59,7 +59,7 @@ static const struct regmap_access_table stpmic1_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(stpmic1_volatile_ranges),
};
-const struct regmap_config stpmic1_regmap_config = {
+static const struct regmap_config stpmic1_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
index b9f48e588d95..ddddf08b6a4c 100644
--- a/drivers/mfd/tqmx86.c
+++ b/drivers/mfd/tqmx86.c
@@ -274,7 +274,7 @@ static int __init tqmx86_init(void)
module_init(tqmx86_init);
-MODULE_DESCRIPTION("TQx86 PLD Core Driver");
+MODULE_DESCRIPTION("TQMx86 PLD Core Driver");
MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:tqmx86");
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index c68ff56dbdb1..aaf24af287dd 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -8,13 +8,12 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mfd/core.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_data/syscon.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/stat.h>
-#include <linux/vexpress.h>
#define SYS_ID 0x000
#define SYS_SW 0x004
@@ -37,35 +36,8 @@
#define SYS_CFGCTRL 0x0a4
#define SYS_CFGSTAT 0x0a8
-#define SYS_HBI_MASK 0xfff
-#define SYS_PROCIDx_HBI_SHIFT 0
-
-#define SYS_MISC_MASTERSITE (1 << 14)
-
-void vexpress_flags_set(u32 data)
-{
- static void __iomem *base;
-
- if (!base) {
- struct device_node *node = of_find_compatible_node(NULL, NULL,
- "arm,vexpress-sysreg");
-
- base = of_iomap(node, 0);
- }
-
- if (WARN_ON(!base))
- return;
-
- writel(~0, base + SYS_FLAGSCLR);
- writel(data, base + SYS_FLAGSSET);
-}
-
/* The sysreg block is just a random collection of various functions... */
-static struct syscon_platform_data vexpress_sysreg_sys_id_pdata = {
- .label = "sys_id",
-};
-
static struct bgpio_pdata vexpress_sysreg_sys_led_pdata = {
.label = "sys_led",
.base = -1,
@@ -84,24 +56,8 @@ static struct bgpio_pdata vexpress_sysreg_sys_flash_pdata = {
.ngpio = 1,
};
-static struct syscon_platform_data vexpress_sysreg_sys_misc_pdata = {
- .label = "sys_misc",
-};
-
-static struct syscon_platform_data vexpress_sysreg_sys_procid_pdata = {
- .label = "sys_procid",
-};
-
static struct mfd_cell vexpress_sysreg_cells[] = {
{
- .name = "syscon",
- .num_resources = 1,
- .resources = (struct resource []) {
- DEFINE_RES_MEM(SYS_ID, 0x4),
- },
- .platform_data = &vexpress_sysreg_sys_id_pdata,
- .pdata_size = sizeof(vexpress_sysreg_sys_id_pdata),
- }, {
.name = "basic-mmio-gpio",
.of_compatible = "arm,vexpress-sysreg,sys_led",
.num_resources = 1,
@@ -129,26 +85,10 @@ static struct mfd_cell vexpress_sysreg_cells[] = {
.platform_data = &vexpress_sysreg_sys_flash_pdata,
.pdata_size = sizeof(vexpress_sysreg_sys_flash_pdata),
}, {
- .name = "syscon",
- .num_resources = 1,
- .resources = (struct resource []) {
- DEFINE_RES_MEM(SYS_MISC, 0x4),
- },
- .platform_data = &vexpress_sysreg_sys_misc_pdata,
- .pdata_size = sizeof(vexpress_sysreg_sys_misc_pdata),
- }, {
- .name = "syscon",
- .num_resources = 1,
- .resources = (struct resource []) {
- DEFINE_RES_MEM(SYS_PROCID0, 0x8),
- },
- .platform_data = &vexpress_sysreg_sys_procid_pdata,
- .pdata_size = sizeof(vexpress_sysreg_sys_procid_pdata),
- }, {
.name = "vexpress-syscfg",
.num_resources = 1,
.resources = (struct resource []) {
- DEFINE_RES_MEM(SYS_CFGDATA, 0xc),
+ DEFINE_RES_MEM(SYS_MISC, 0x4c),
},
}
};
@@ -158,8 +98,6 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
struct resource *mem;
void __iomem *base;
struct gpio_chip *mmc_gpio_chip;
- int master;
- u32 dt_hbi;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
@@ -169,21 +107,6 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
if (!base)
return -ENOMEM;
- master = readl(base + SYS_MISC) & SYS_MISC_MASTERSITE ?
- VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1;
- vexpress_config_set_master(master);
-
- /* Confirm board type against DT property, if available */
- if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
- u32 id = readl(base + (master == VEXPRESS_SITE_DB1 ?
- SYS_PROCID0 : SYS_PROCID1));
- u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
-
- if (WARN_ON(dt_hbi != hbi))
- dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n",
- dt_hbi, hbi);
- }
-
/*
* Duplicated SYS_MCI pseudo-GPIO controller for compatibility with
* older trees using sysreg node for MMC control lines.
@@ -195,9 +118,9 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI,
NULL, NULL, NULL, NULL, 0);
mmc_gpio_chip->ngpio = 2;
- gpiochip_add_data(mmc_gpio_chip, NULL);
+ devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL);
- return mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
vexpress_sysreg_cells,
ARRAY_SIZE(vexpress_sysreg_cells), mem, 0, NULL);
}
@@ -206,6 +129,7 @@ static const struct of_device_id vexpress_sysreg_match[] = {
{ .compatible = "arm,vexpress-sysreg", },
{},
};
+MODULE_DEVICE_TABLE(of, vexpress_sysreg_match);
static struct platform_driver vexpress_sysreg_driver = {
.driver = {
@@ -215,14 +139,5 @@ static struct platform_driver vexpress_sysreg_driver = {
.probe = vexpress_sysreg_probe,
};
-static int __init vexpress_sysreg_init(void)
-{
- struct device_node *node;
-
- /* Need the sysreg early, before any other device... */
- for_each_matching_node(node, vexpress_sysreg_match)
- of_platform_device_create(node, NULL, NULL);
-
- return platform_driver_register(&vexpress_sysreg_driver);
-}
-core_initcall(vexpress_sysreg_init);
+module_platform_driver(vexpress_sysreg_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/wcd934x.c b/drivers/mfd/wcd934x.c
index 90341f3c6810..da910302d51a 100644
--- a/drivers/mfd/wcd934x.c
+++ b/drivers/mfd/wcd934x.c
@@ -280,7 +280,6 @@ static void wcd934x_slim_remove(struct slim_device *sdev)
regulator_bulk_disable(WCD934X_MAX_SUPPLY, ddata->supplies);
mfd_remove_devices(&sdev->dev);
- kfree(ddata);
}
static const struct slim_device_id wcd934x_slim_id[] = {
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 1e9fe7d92597..3b2b93c5bbcb 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -393,7 +393,9 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
- dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wm8994->dev, "Failed to get supplies: %d\n",
+ ret);
goto err;
}
@@ -584,6 +586,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err_irq;
}
+ pm_runtime_set_active(wm8994->dev);
pm_runtime_enable(wm8994->dev);
pm_runtime_idle(wm8994->dev);
@@ -603,7 +606,9 @@ err:
static void wm8994_device_exit(struct wm8994 *wm8994)
{
+ pm_runtime_get_sync(wm8994->dev);
pm_runtime_disable(wm8994->dev);
+ pm_runtime_put_noidle(wm8994->dev);
wm8994_irq_exit(wm8994);
regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies);
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
@@ -690,3 +695,4 @@ module_i2c_driver(wm8994_i2c_driver);
MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_SOFTDEP("pre: wm8994_regulator");
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 99e151475d8f..edd5dd5ebfdc 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -423,15 +423,6 @@ config SRAM
config SRAM_EXEC
bool
-config VEXPRESS_SYSCFG
- bool "Versatile Express System Configuration driver"
- depends on VEXPRESS_CONFIG
- default y
- help
- ARM Ltd. Versatile Express uses specialised platform configuration
- bus. System Configuration interface is one of the possible means
- of generating transactions on this bus.
-
config PCI_ENDPOINT_TEST
depends on PCI
select CRC32
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9abf2923d831..c7bd01ac6291 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,7 +49,6 @@ obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
obj-y += mic/
obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
-obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 1a81cda948c1..6c6c9e95a29f 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -347,31 +347,6 @@ static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
return rtsx_pci_send_cmd(pcr, 100);
}
-static void rts5249_set_aspm(struct rtsx_pcr *pcr, bool enable)
-{
- struct rtsx_cr_option *option = &pcr->option;
- u8 val = 0;
-
- if (pcr->aspm_enabled == enable)
- return;
-
- if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
- if (enable)
- val = pcr->aspm_en;
- rtsx_pci_update_cfg_byte(pcr,
- pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
- u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
-
- if (!enable)
- val = FORCE_ASPM_CTL0;
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
- }
-
- pcr->aspm_enabled = enable;
-}
-
static const struct pcr_ops rts5249_pcr_ops = {
.fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
.extra_init_hw = rts5249_extra_init_hw,
@@ -384,7 +359,6 @@ static const struct pcr_ops rts5249_pcr_ops = {
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
- .set_aspm = rts5249_set_aspm,
};
/* SD Pull Control Enable:
@@ -471,7 +445,6 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
- option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF;
option->ltr_l1off_snooze_sspwrgate =
@@ -612,7 +585,6 @@ static const struct pcr_ops rts524a_pcr_ops = {
.switch_output_voltage = rtsx_base_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
.set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
- .set_aspm = rts5249_set_aspm,
};
void rts524a_init_params(struct rtsx_pcr *pcr)
@@ -728,7 +700,6 @@ static const struct pcr_ops rts525a_pcr_ops = {
.switch_output_voltage = rts525a_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
.set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
- .set_aspm = rts5249_set_aspm,
};
void rts525a_init_params(struct rtsx_pcr *pcr)
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 711054ebad74..7a9dbb778e84 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -570,30 +570,6 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
return 0;
}
-static void rts5260_set_aspm(struct rtsx_pcr *pcr, bool enable)
-{
- struct rtsx_cr_option *option = &pcr->option;
- u8 val = 0;
-
- if (pcr->aspm_enabled == enable)
- return;
-
- if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
- if (enable)
- val = pcr->aspm_en;
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
- u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
-
- if (!enable)
- val = FORCE_ASPM_CTL0;
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
- }
-
- pcr->aspm_enabled = enable;
-}
-
static void rts5260_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
{
struct rtsx_cr_option *option = &pcr->option;
@@ -639,7 +615,6 @@ static const struct pcr_ops rts5260_pcr_ops = {
.switch_output_voltage = rts5260_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
.stop_cmd = rts5260_stop_cmd,
- .set_aspm = rts5260_set_aspm,
.set_l1off_cfg_sub_d0 = rts5260_set_l1off_cfg_sub_d0,
.enable_ocp = rts5260_enable_ocp,
.disable_ocp = rts5260_disable_ocp,
@@ -683,7 +658,6 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
- option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
option->ltr_l1off_snooze_sspwrgate =
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 78c3b1d424c3..195822ec858e 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -518,51 +518,22 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
static void rts5261_enable_aspm(struct rtsx_pcr *pcr, bool enable)
{
- struct rtsx_cr_option *option = &pcr->option;
- u8 val = 0;
-
if (pcr->aspm_enabled == enable)
return;
- if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
- val = pcr->aspm_en;
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
- u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
-
- val = FORCE_ASPM_CTL0;
- val |= (pcr->aspm_en & 0x02);
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
- val = pcr->aspm_en;
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- }
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en);
pcr->aspm_enabled = enable;
}
static void rts5261_disable_aspm(struct rtsx_pcr *pcr, bool enable)
{
- struct rtsx_cr_option *option = &pcr->option;
- u8 val = 0;
-
if (pcr->aspm_enabled == enable)
return;
- if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
- val = 0;
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
- u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
-
- val = 0;
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- ASPM_MASK_NEG, val);
- val = FORCE_ASPM_CTL0;
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
- }
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0);
rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
udelay(10);
pcr->aspm_enabled = enable;
@@ -639,8 +610,13 @@ int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
- clk_divider = SD_CLK_DIVIDE_128;
- card_clock = 30000000;
+ if (is_version(pcr, PID_5261, IC_VER_D)) {
+ clk_divider = SD_CLK_DIVIDE_256;
+ card_clock = 60000000;
+ } else {
+ clk_divider = SD_CLK_DIVIDE_128;
+ card_clock = 30000000;
+ }
} else {
clk_divider = SD_CLK_DIVIDE_0;
}
@@ -784,7 +760,6 @@ void rts5261_init_params(struct rtsx_pcr *pcr)
option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
option->ltr_l1off_sspwrgate = 0x7F;
option->ltr_l1off_snooze_sspwrgate = 0x78;
- option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
option->ocp_en = 1;
hw_param->interrupt_en |= SD_OC_INT_EN;
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 55da6428ceb0..0d5928bc1b6d 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -55,16 +55,10 @@ static const struct pci_device_id rtsx_pci_ids[] = {
MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
-static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
-{
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- 0xFC, pcr->aspm_en);
-}
-
static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
{
- rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
- 0xFC, 0);
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0);
}
static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
@@ -85,32 +79,17 @@ static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
{
- if (pcr->ops->set_ltr_latency)
- return pcr->ops->set_ltr_latency(pcr, latency);
- else
- return rtsx_comm_set_ltr_latency(pcr, latency);
+ return rtsx_comm_set_ltr_latency(pcr, latency);
}
static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
{
- struct rtsx_cr_option *option = &pcr->option;
-
if (pcr->aspm_enabled == enable)
return;
- if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
- if (enable)
- rtsx_pci_enable_aspm(pcr);
- else
- rtsx_pci_disable_aspm(pcr);
- } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
- u8 mask = FORCE_ASPM_VAL_MASK;
- u8 val = 0;
-
- if (enable)
- val = pcr->aspm_en;
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
- }
+ pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ enable ? pcr->aspm_en : 0);
pcr->aspm_enabled = enable;
}
@@ -154,10 +133,7 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
{
- if (pcr->ops->full_on)
- pcr->ops->full_on(pcr);
- else
- rtsx_comm_pm_full_on(pcr);
+ rtsx_comm_pm_full_on(pcr);
}
void rtsx_pci_start_run(struct rtsx_pcr *pcr)
@@ -1111,10 +1087,7 @@ static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
{
- if (pcr->ops->power_saving)
- pcr->ops->power_saving(pcr);
- else
- rtsx_comm_pm_power_saving(pcr);
+ rtsx_comm_pm_power_saving(pcr);
}
static void rtsx_pci_idle_work(struct work_struct *work)
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index ed391df52f4f..024cbd998b2a 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -29,7 +29,6 @@
#define LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF 0xAC
#define LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF 0xF8
#define CMD_TIMEOUT_DEF 100
-#define ASPM_MASK_NEG 0xFC
#define MASK_8_BIT_DEF 0xFF
#define SSC_CLOCK_STABLE_WAIT 130
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index 39eec9031487..51aecafdcbdf 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -7,18 +7,10 @@ config CXL_BASE
bool
select PPC_COPRO_BASE
-config CXL_AFU_DRIVER_OPS
- bool
-
-config CXL_LIB
- bool
-
config CXL
tristate "Support for IBM Coherent Accelerators (CXL)"
depends on PPC_POWERNV && PCI_MSI && EEH
select CXL_BASE
- select CXL_AFU_DRIVER_OPS
- select CXL_LIB
default m
help
Select this option to enable driver support for IBM Coherent
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
index 258c43a95ac3..2a1783f32254 100644
--- a/drivers/misc/cxl/cxllib.c
+++ b/drivers/misc/cxl/cxllib.c
@@ -207,7 +207,7 @@ static int get_vma_info(struct mm_struct *mm, u64 addr,
struct vm_area_struct *vma = NULL;
int rc = 0;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, addr);
if (!vma) {
@@ -218,7 +218,7 @@ static int get_vma_info(struct mm_struct *mm, u64 addr,
*vma_start = vma->vm_start;
*vma_end = vma->vm_end;
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return rc;
}
@@ -245,9 +245,8 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
dar += page_size) {
if (dar < vma_start || dar >= vma_end) {
/*
- * We don't hold the mm->mmap_sem semaphore
- * while iterating, since the semaphore is
- * required by one of the lower-level page
+ * We don't hold mm->mmap_lock while iterating, since
+ * the lock is required by one of the lower-level page
* fault processing functions and it could
* create a deadlock.
*
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 2297e6fc1544..01153b74334a 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -321,7 +321,7 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
return;
}
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
for (ea = vma->vm_start; ea < vma->vm_end;
ea = next_segment(ea, slb.vsid)) {
@@ -336,7 +336,7 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
last_esid = slb.esid;
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index e3e085e33d46..7939c55daceb 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -904,6 +904,7 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
struct fastrpc_channel_ctx *cctx;
struct fastrpc_user *fl = ctx->fl;
struct fastrpc_msg *msg = &ctx->msg;
+ int ret;
cctx = fl->cctx;
msg->pid = fl->tgid;
@@ -919,7 +920,13 @@ static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
fastrpc_context_get(ctx);
- return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
+ ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
+
+ if (ret)
+ fastrpc_context_put(ctx);
+
+ return ret;
+
}
static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
@@ -1613,8 +1620,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
domains[domain_id]);
data->miscdev.fops = &fastrpc_fops;
err = misc_register(&data->miscdev);
- if (err)
+ if (err) {
+ kfree(data);
return err;
+ }
kref_init(&data->refcount);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 2e1c4d2905e8..77c21caf2acd 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -27,7 +27,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "genwqe_driver.h"
#include "card_base.h"
@@ -515,30 +515,6 @@ int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
}
/**
- * genwqe_free_user_pages() - Give pinned pages back
- *
- * Documentation of get_user_pages is in mm/gup.c:
- *
- * If the page is written to, set_page_dirty (or set_page_dirty_lock,
- * as appropriate) must be called after the page is finished with, and
- * before put_page is called.
- */
-static int genwqe_free_user_pages(struct page **page_list,
- unsigned int nr_pages, int dirty)
-{
- unsigned int i;
-
- for (i = 0; i < nr_pages; i++) {
- if (page_list[i] != NULL) {
- if (dirty)
- set_page_dirty_lock(page_list[i]);
- put_page(page_list[i]);
- }
- }
- return 0;
-}
-
-/**
* genwqe_user_vmap() - Map user-space memory to virtual kernel memory
* @cd: pointer to genwqe device
* @m: mapping params
@@ -597,18 +573,18 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
/* pin user pages in memory */
- rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
+ rc = pin_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
m->write ? FOLL_WRITE : 0, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
- goto fail_get_user_pages;
+ goto fail_pin_user_pages;
- /* assumption: get_user_pages can be killed by signals. */
+ /* assumption: pin_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
- genwqe_free_user_pages(m->page_list, rc, m->write);
+ unpin_user_pages_dirty_lock(m->page_list, rc, m->write);
rc = -EFAULT;
- goto fail_get_user_pages;
+ goto fail_pin_user_pages;
}
rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
@@ -618,9 +594,9 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
return 0;
fail_free_user_pages:
- genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
+ unpin_user_pages_dirty_lock(m->page_list, m->nr_pages, m->write);
- fail_get_user_pages:
+ fail_pin_user_pages:
kfree(m->page_list);
m->page_list = NULL;
m->dma_list = NULL;
@@ -650,8 +626,8 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
- genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
-
+ unpin_user_pages_dirty_lock(m->page_list, m->nr_pages,
+ m->write);
kfree(m->page_list);
m->page_list = NULL;
m->dma_list = NULL;
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
index 482f6227dbba..421ebd903069 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/misc/habanalabs/Makefile
@@ -13,3 +13,6 @@ habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
include $(src)/goya/Makefile
habanalabs-y += $(HL_GOYA_FILES)
+
+include $(src)/gaudi/Makefile
+habanalabs-y += $(HL_GAUDI_FILES)
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c
index 53fddbd8e693..02d13f71b1df 100644
--- a/drivers/misc/habanalabs/command_buffer.c
+++ b/drivers/misc/habanalabs/command_buffer.c
@@ -105,10 +105,9 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
goto out_err;
}
- if (cb_size > HL_MAX_CB_SIZE) {
- dev_err(hdev->dev,
- "CB size %d must be less then %d\n",
- cb_size, HL_MAX_CB_SIZE);
+ if (cb_size > SZ_2M) {
+ dev_err(hdev->dev, "CB size %d must be less than %d\n",
+ cb_size, SZ_2M);
rc = -EINVAL;
goto out_err;
}
@@ -211,7 +210,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cb_args *args = data;
struct hl_device *hdev = hpriv->hdev;
- u64 handle;
+ u64 handle = 0;
int rc;
if (hl_device_disabled_or_in_reset(hdev)) {
@@ -223,15 +222,26 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
switch (args->in.op) {
case HL_CB_OP_CREATE:
- rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size,
- &handle, hpriv->ctx->asid);
+ if (args->in.cb_size > HL_MAX_CB_SIZE) {
+ dev_err(hdev->dev,
+ "User requested CB size %d must be less than %d\n",
+ args->in.cb_size, HL_MAX_CB_SIZE);
+ rc = -EINVAL;
+ } else {
+ rc = hl_cb_create(hdev, &hpriv->cb_mgr,
+ args->in.cb_size, &handle,
+ hpriv->ctx->asid);
+ }
+
memset(args, 0, sizeof(*args));
args->out.cb_handle = handle;
break;
+
case HL_CB_OP_DESTROY:
rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
args->in.cb_handle);
break;
+
default:
rc = -ENOTTY;
break;
@@ -278,7 +288,7 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
if (!cb) {
dev_err(hdev->dev,
- "CB mmap failed, no match to handle %d\n", handle);
+ "CB mmap failed, no match to handle 0x%x\n", handle);
return -EINVAL;
}
@@ -347,7 +357,7 @@ struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
if (!cb) {
spin_unlock(&mgr->cb_lock);
dev_warn(hdev->dev,
- "CB get failed, no match to handle %d\n", handle);
+ "CB get failed, no match to handle 0x%x\n", handle);
return NULL;
}
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 409276b6374d..f82974a916c3 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -11,11 +11,33 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
+#define HL_CS_FLAGS_SIG_WAIT (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT)
+
static void job_wq_completion(struct work_struct *work);
static long _hl_cs_wait_ioctl(struct hl_device *hdev,
struct hl_ctx *ctx, u64 timeout_us, u64 seq);
static void cs_do_release(struct kref *ref);
+static void hl_sob_reset(struct kref *ref)
+{
+ struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
+ kref);
+ struct hl_device *hdev = hw_sob->hdev;
+
+ hdev->asic_funcs->reset_sob(hdev, hw_sob);
+}
+
+void hl_sob_reset_error(struct kref *ref)
+{
+ struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
+ kref);
+ struct hl_device *hdev = hw_sob->hdev;
+
+ dev_crit(hdev->dev,
+ "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
+ hw_sob->q_idx, hw_sob->sob_id);
+}
+
static const char *hl_fence_get_driver_name(struct dma_fence *fence)
{
return "HabanaLabs";
@@ -23,10 +45,10 @@ static const char *hl_fence_get_driver_name(struct dma_fence *fence)
static const char *hl_fence_get_timeline_name(struct dma_fence *fence)
{
- struct hl_dma_fence *hl_fence =
- container_of(fence, struct hl_dma_fence, base_fence);
+ struct hl_cs_compl *hl_cs_compl =
+ container_of(fence, struct hl_cs_compl, base_fence);
- return dev_name(hl_fence->hdev->dev);
+ return dev_name(hl_cs_compl->hdev->dev);
}
static bool hl_fence_enable_signaling(struct dma_fence *fence)
@@ -36,17 +58,47 @@ static bool hl_fence_enable_signaling(struct dma_fence *fence)
static void hl_fence_release(struct dma_fence *fence)
{
- struct hl_dma_fence *hl_fence =
- container_of(fence, struct hl_dma_fence, base_fence);
+ struct hl_cs_compl *hl_cs_cmpl =
+ container_of(fence, struct hl_cs_compl, base_fence);
+ struct hl_device *hdev = hl_cs_cmpl->hdev;
+
+ if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
+ (hl_cs_cmpl->type == CS_TYPE_WAIT)) {
- kfree_rcu(hl_fence, base_fence.rcu);
+ dev_dbg(hdev->dev,
+ "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n",
+ hl_cs_cmpl->cs_seq,
+ hl_cs_cmpl->type,
+ hl_cs_cmpl->hw_sob->sob_id,
+ hl_cs_cmpl->sob_val);
+
+ /*
+ * A signal CS can get completion while the corresponding wait
+ * for signal CS is on its way to the PQ. The wait for signal CS
+ * will get stuck if the signal CS incremented the SOB to its
+ * max value and there are no pending (submitted) waits on this
+ * SOB.
+ * We do the following to void this situation:
+ * 1. The wait for signal CS must get a ref for the signal CS as
+ * soon as possible in cs_ioctl_signal_wait() and put it
+ * before being submitted to the PQ but after it incremented
+ * the SOB refcnt in init_signal_wait_cs().
+ * 2. Signal/Wait for signal CS will decrement the SOB refcnt
+ * here.
+ * These two measures guarantee that the wait for signal CS will
+ * reset the SOB upon completion rather than the signal CS and
+ * hence the above scenario is avoided.
+ */
+ kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
+ }
+
+ kfree_rcu(hl_cs_cmpl, base_fence.rcu);
}
static const struct dma_fence_ops hl_fence_ops = {
.get_driver_name = hl_fence_get_driver_name,
.get_timeline_name = hl_fence_get_timeline_name,
.enable_signaling = hl_fence_enable_signaling,
- .wait = dma_fence_default_wait,
.release = hl_fence_release
};
@@ -113,6 +165,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
if (!rc) {
job->patched_cb = parser.patched_cb;
job->job_cb_size = parser.patched_cb_size;
+ job->contains_dma_pkt = parser.contains_dma_pkt;
spin_lock(&job->patched_cb->lock);
job->patched_cb->cs_cnt++;
@@ -259,6 +312,12 @@ static void cs_do_release(struct kref *ref)
spin_unlock(&hdev->hw_queues_mirror_lock);
}
+ } else if (cs->type == CS_TYPE_WAIT) {
+ /*
+ * In case the wait for signal CS was submitted, the put occurs
+ * in init_signal_wait_cs() right before hanging on the PQ.
+ */
+ dma_fence_put(cs->signal_fence);
}
/*
@@ -312,9 +371,9 @@ static void cs_timedout(struct work_struct *work)
}
static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
- struct hl_cs **cs_new)
+ enum hl_cs_type cs_type, struct hl_cs **cs_new)
{
- struct hl_dma_fence *fence;
+ struct hl_cs_compl *cs_cmpl;
struct dma_fence *other = NULL;
struct hl_cs *cs;
int rc;
@@ -326,25 +385,27 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->ctx = ctx;
cs->submitted = false;
cs->completed = false;
+ cs->type = cs_type;
INIT_LIST_HEAD(&cs->job_list);
INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout);
kref_init(&cs->refcount);
spin_lock_init(&cs->job_lock);
- fence = kmalloc(sizeof(*fence), GFP_ATOMIC);
- if (!fence) {
+ cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
+ if (!cs_cmpl) {
rc = -ENOMEM;
goto free_cs;
}
- fence->hdev = hdev;
- spin_lock_init(&fence->lock);
- cs->fence = &fence->base_fence;
+ cs_cmpl->hdev = hdev;
+ cs_cmpl->type = cs->type;
+ spin_lock_init(&cs_cmpl->lock);
+ cs->fence = &cs_cmpl->base_fence;
spin_lock(&ctx->cs_lock);
- fence->cs_seq = ctx->cs_sequence;
- other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)];
+ cs_cmpl->cs_seq = ctx->cs_sequence;
+ other = ctx->cs_pending[cs_cmpl->cs_seq & (HL_MAX_PENDING_CS - 1)];
if ((other) && (!dma_fence_is_signaled(other))) {
spin_unlock(&ctx->cs_lock);
dev_dbg(hdev->dev,
@@ -353,16 +414,16 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
goto free_fence;
}
- dma_fence_init(&fence->base_fence, &hl_fence_ops, &fence->lock,
+ dma_fence_init(&cs_cmpl->base_fence, &hl_fence_ops, &cs_cmpl->lock,
ctx->asid, ctx->cs_sequence);
- cs->sequence = fence->cs_seq;
+ cs->sequence = cs_cmpl->cs_seq;
- ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)] =
- &fence->base_fence;
+ ctx->cs_pending[cs_cmpl->cs_seq & (HL_MAX_PENDING_CS - 1)] =
+ &cs_cmpl->base_fence;
ctx->cs_sequence++;
- dma_fence_get(&fence->base_fence);
+ dma_fence_get(&cs_cmpl->base_fence);
dma_fence_put(other);
@@ -373,7 +434,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
return 0;
free_fence:
- kfree(fence);
+ kfree(cs_cmpl);
free_cs:
kfree(cs);
return rc;
@@ -499,8 +560,8 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
return job;
}
-static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
- u32 num_chunks, u64 *cs_seq)
+static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
+ u32 num_chunks, u64 *cs_seq)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
@@ -538,7 +599,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks,
/* increment refcnt for context */
hl_ctx_get(hdev, hpriv->ctx);
- rc = allocate_cs(hdev, hpriv->ctx, &cs);
+ rc = allocate_cs(hdev, hpriv->ctx, CS_TYPE_DEFAULT, &cs);
if (rc) {
hl_ctx_put(hpriv->ctx);
goto free_cs_chunk_array;
@@ -652,13 +713,230 @@ out:
return rc;
}
+static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
+ void __user *chunks, u32 num_chunks,
+ u64 *cs_seq)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct hl_ctx *ctx = hpriv->ctx;
+ struct hl_cs_chunk *cs_chunk_array, *chunk;
+ struct hw_queue_properties *hw_queue_prop;
+ struct dma_fence *sig_fence = NULL;
+ struct hl_cs_job *job;
+ struct hl_cs *cs;
+ struct hl_cb *cb;
+ u64 *signal_seq_arr = NULL, signal_seq;
+ u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size;
+ int rc;
+
+ *cs_seq = ULLONG_MAX;
+
+ if (num_chunks > HL_MAX_JOBS_PER_CS) {
+ dev_err(hdev->dev,
+ "Number of chunks can NOT be larger than %d\n",
+ HL_MAX_JOBS_PER_CS);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array),
+ GFP_ATOMIC);
+ if (!cs_chunk_array) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
+ if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) {
+ dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
+ rc = -EFAULT;
+ goto free_cs_chunk_array;
+ }
+
+ /* currently it is guaranteed to have only one chunk */
+ chunk = &cs_chunk_array[0];
+ q_idx = chunk->queue_index;
+ hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx];
+
+ if ((q_idx >= HL_MAX_QUEUES) ||
+ (hw_queue_prop->type != QUEUE_TYPE_EXT)) {
+ dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx);
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
+ if (cs_type == CS_TYPE_WAIT) {
+ struct hl_cs_compl *sig_waitcs_cmpl;
+
+ signal_seq_arr_len = chunk->num_signal_seq_arr;
+
+ /* currently only one signal seq is supported */
+ if (signal_seq_arr_len != 1) {
+ dev_err(hdev->dev,
+ "Wait for signal CS supports only one signal CS seq\n");
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
+ signal_seq_arr = kmalloc_array(signal_seq_arr_len,
+ sizeof(*signal_seq_arr),
+ GFP_ATOMIC);
+ if (!signal_seq_arr) {
+ rc = -ENOMEM;
+ goto free_cs_chunk_array;
+ }
+
+ size_to_copy = chunk->num_signal_seq_arr *
+ sizeof(*signal_seq_arr);
+ if (copy_from_user(signal_seq_arr,
+ u64_to_user_ptr(chunk->signal_seq_arr),
+ size_to_copy)) {
+ dev_err(hdev->dev,
+ "Failed to copy signal seq array from user\n");
+ rc = -EFAULT;
+ goto free_signal_seq_array;
+ }
+
+ /* currently it is guaranteed to have only one signal seq */
+ signal_seq = signal_seq_arr[0];
+ sig_fence = hl_ctx_get_fence(ctx, signal_seq);
+ if (IS_ERR(sig_fence)) {
+ dev_err(hdev->dev,
+ "Failed to get signal CS with seq 0x%llx\n",
+ signal_seq);
+ rc = PTR_ERR(sig_fence);
+ goto free_signal_seq_array;
+ }
+
+ if (!sig_fence) {
+ /* signal CS already finished */
+ rc = 0;
+ goto free_signal_seq_array;
+ }
+
+ sig_waitcs_cmpl =
+ container_of(sig_fence, struct hl_cs_compl, base_fence);
+
+ if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+ dev_err(hdev->dev,
+ "CS seq 0x%llx is not of a signal CS\n",
+ signal_seq);
+ dma_fence_put(sig_fence);
+ rc = -EINVAL;
+ goto free_signal_seq_array;
+ }
+
+ if (dma_fence_is_signaled(sig_fence)) {
+ /* signal CS already finished */
+ dma_fence_put(sig_fence);
+ rc = 0;
+ goto free_signal_seq_array;
+ }
+ }
+
+ /* increment refcnt for context */
+ hl_ctx_get(hdev, ctx);
+
+ rc = allocate_cs(hdev, ctx, cs_type, &cs);
+ if (rc) {
+ if (cs_type == CS_TYPE_WAIT)
+ dma_fence_put(sig_fence);
+ hl_ctx_put(ctx);
+ goto free_signal_seq_array;
+ }
+
+ /*
+ * Save the signal CS fence for later initialization right before
+ * hanging the wait CS on the queue.
+ */
+ if (cs->type == CS_TYPE_WAIT)
+ cs->signal_fence = sig_fence;
+
+ hl_debugfs_add_cs(cs);
+
+ *cs_seq = cs->sequence;
+
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ goto put_cs;
+ }
+
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ if (!cb) {
+ kfree(job);
+ rc = -EFAULT;
+ goto put_cs;
+ }
+
+ if (cs->type == CS_TYPE_WAIT)
+ cb_size = hdev->asic_funcs->get_wait_cb_size(hdev);
+ else
+ cb_size = hdev->asic_funcs->get_signal_cb_size(hdev);
+
+ job->id = 0;
+ job->cs = cs;
+ job->user_cb = cb;
+ job->user_cb->cs_cnt++;
+ job->user_cb_size = cb_size;
+ job->hw_queue_id = q_idx;
+
+ /*
+ * No need in parsing, user CB is the patched CB.
+ * We call hl_cb_destroy() out of two reasons - we don't need the CB in
+ * the CB idr anymore and to decrement its refcount as it was
+ * incremented inside hl_cb_kernel_create().
+ */
+ job->patched_cb = job->user_cb;
+ job->job_cb_size = job->user_cb_size;
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+ cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+
+ list_add_tail(&job->cs_node, &cs->job_list);
+
+ /* increment refcount as for external queues we get completion */
+ cs_get(cs);
+
+ hl_debugfs_add_job(hdev, job);
+
+ rc = hl_hw_queue_schedule_cs(cs);
+ if (rc) {
+ if (rc != -EAGAIN)
+ dev_err(hdev->dev,
+ "Failed to submit CS %d.%llu to H/W queues, error %d\n",
+ ctx->asid, cs->sequence, rc);
+ goto free_cs_object;
+ }
+
+ rc = HL_CS_STATUS_SUCCESS;
+ goto put_cs;
+
+free_cs_object:
+ cs_rollback(hdev, cs);
+ *cs_seq = ULLONG_MAX;
+ /* The path below is both for good and erroneous exits */
+put_cs:
+ /* We finished with the CS in this function, so put the ref */
+ cs_put(cs);
+free_signal_seq_array:
+ if (cs_type == CS_TYPE_WAIT)
+ kfree(signal_seq_arr);
+free_cs_chunk_array:
+ kfree(cs_chunk_array);
+out:
+ return rc;
+}
+
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
{
struct hl_device *hdev = hpriv->hdev;
union hl_cs_args *args = data;
struct hl_ctx *ctx = hpriv->ctx;
void __user *chunks_execute, *chunks_restore;
- u32 num_chunks_execute, num_chunks_restore;
+ enum hl_cs_type cs_type;
+ u32 num_chunks_execute, num_chunks_restore, sig_wait_flags;
u64 cs_seq = ULONG_MAX;
int rc, do_ctx_switch;
bool need_soft_reset = false;
@@ -671,12 +949,44 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
goto out;
}
+ sig_wait_flags = args->in.cs_flags & HL_CS_FLAGS_SIG_WAIT;
+
+ if (unlikely(sig_wait_flags == HL_CS_FLAGS_SIG_WAIT)) {
+ dev_err(hdev->dev,
+ "Signal and wait CS flags are mutually exclusive, context %d\n",
+ ctx->asid);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely((sig_wait_flags & HL_CS_FLAGS_SIG_WAIT) &&
+ (!hdev->supports_sync_stream))) {
+ dev_err(hdev->dev, "Sync stream CS is not supported\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (args->in.cs_flags & HL_CS_FLAGS_SIGNAL)
+ cs_type = CS_TYPE_SIGNAL;
+ else if (args->in.cs_flags & HL_CS_FLAGS_WAIT)
+ cs_type = CS_TYPE_WAIT;
+ else
+ cs_type = CS_TYPE_DEFAULT;
+
chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute;
num_chunks_execute = args->in.num_chunks_execute;
- if (!num_chunks_execute) {
+ if (cs_type == CS_TYPE_DEFAULT) {
+ if (!num_chunks_execute) {
+ dev_err(hdev->dev,
+ "Got execute CS with 0 chunks, context %d\n",
+ ctx->asid);
+ rc = -EINVAL;
+ goto out;
+ }
+ } else if (num_chunks_execute != 1) {
dev_err(hdev->dev,
- "Got execute CS with 0 chunks, context %d\n",
+ "Sync stream CS mandates one chunk only, context %d\n",
ctx->asid);
rc = -EINVAL;
goto out;
@@ -722,7 +1032,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
"Need to run restore phase but restore CS is empty\n");
rc = 0;
} else {
- rc = _hl_cs_ioctl(hpriv, chunks_restore,
+ rc = cs_ioctl_default(hpriv, chunks_restore,
num_chunks_restore, &cs_seq);
}
@@ -764,7 +1074,12 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
}
}
- rc = _hl_cs_ioctl(hpriv, chunks_execute, num_chunks_execute, &cs_seq);
+ if (cs_type == CS_TYPE_DEFAULT)
+ rc = cs_ioctl_default(hpriv, chunks_execute, num_chunks_execute,
+ &cs_seq);
+ else
+ rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks_execute,
+ num_chunks_execute, &cs_seq);
out:
if (rc != -EAGAIN) {
@@ -796,6 +1111,10 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev,
fence = hl_ctx_get_fence(ctx, seq);
if (IS_ERR(fence)) {
rc = PTR_ERR(fence);
+ if (rc == -EINVAL)
+ dev_notice_ratelimited(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu\n",
+ seq, ctx->cs_sequence);
} else if (fence) {
rc = dma_fence_wait_timeout(fence, true, timeout);
if (fence->error == -ETIMEDOUT)
@@ -803,8 +1122,12 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev,
else if (fence->error == -EIO)
rc = -EIO;
dma_fence_put(fence);
- } else
+ } else {
+ dev_dbg(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
+ seq, ctx->cs_sequence);
rc = 1;
+ }
hl_ctx_put(ctx);
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index 2df6fb87e7ff..ec92b3506b1f 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -170,24 +170,16 @@ int hl_ctx_put(struct hl_ctx *ctx)
struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
{
- struct hl_device *hdev = ctx->hdev;
struct dma_fence *fence;
spin_lock(&ctx->cs_lock);
if (seq >= ctx->cs_sequence) {
- dev_notice_ratelimited(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu\n",
- seq, ctx->cs_sequence);
spin_unlock(&ctx->cs_lock);
return ERR_PTR(-EINVAL);
}
-
if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) {
- dev_dbg(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
- seq, ctx->cs_sequence);
spin_unlock(&ctx->cs_lock);
return NULL;
}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 756d36ed5d95..3c8dcdfba20c 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -970,6 +970,98 @@ static ssize_t hl_device_write(struct file *f, const char __user *buf,
return count;
}
+static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ sprintf(tmp_buf, "%d\n", hdev->clock_gating);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Can't change clock gating during reset\n");
+ return 0;
+ }
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ if (value) {
+ hdev->clock_gating = 1;
+ if (hdev->asic_funcs->enable_clock_gating)
+ hdev->asic_funcs->enable_clock_gating(hdev);
+ } else {
+ if (hdev->asic_funcs->disable_clock_gating)
+ hdev->asic_funcs->disable_clock_gating(hdev);
+ hdev->clock_gating = 0;
+ }
+
+ return count;
+}
+
+static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ if (atomic_read(&hdev->in_reset)) {
+ dev_warn_ratelimited(hdev->dev,
+ "Can't change stop on error during reset\n");
+ return 0;
+ }
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ hdev->stop_on_err = value ? 1 : 0;
+
+ hl_device_reset(hdev, false, false);
+
+ return count;
+}
+
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
@@ -1015,6 +1107,18 @@ static const struct file_operations hl_device_fops = {
.write = hl_device_write
};
+static const struct file_operations hl_clk_gate_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_clk_gate_read,
+ .write = hl_clk_gate_write
+};
+
+static const struct file_operations hl_stop_on_err_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_stop_on_err_read,
+ .write = hl_stop_on_err_write
+};
+
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
@@ -1152,6 +1256,18 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_device_fops);
+ debugfs_create_file("clk_gate",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_clk_gate_fops);
+
+ debugfs_create_file("stop_on_err",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_stop_on_err_fops);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
ent = debugfs_create_file(hl_debugfs_list[i].name,
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index aef4de36b7aa..2b38a119704c 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -256,6 +256,10 @@ static int device_early_init(struct hl_device *hdev)
goya_set_asic_funcs(hdev);
strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
break;
+ case ASIC_GAUDI:
+ gaudi_set_asic_funcs(hdev);
+ sprintf(hdev->asic_name, "GAUDI");
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -603,6 +607,9 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
hdev->in_debug = 0;
+ if (!hdev->hard_reset_pending)
+ hdev->asic_funcs->enable_clock_gating(hdev);
+
goto out;
}
@@ -613,6 +620,7 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
goto out;
}
+ hdev->asic_funcs->disable_clock_gating(hdev);
hdev->in_debug = 1;
out:
@@ -718,7 +726,7 @@ disable_device:
return rc;
}
-static void device_kill_open_processes(struct hl_device *hdev)
+static int device_kill_open_processes(struct hl_device *hdev)
{
u16 pending_total, pending_cnt;
struct hl_fpriv *hpriv;
@@ -771,9 +779,7 @@ static void device_kill_open_processes(struct hl_device *hdev)
ssleep(1);
}
- if (!list_empty(&hdev->fpriv_list))
- dev_crit(hdev->dev,
- "Going to hard reset with open user contexts\n");
+ return list_empty(&hdev->fpriv_list) ? 0 : -EBUSY;
}
static void device_hard_reset_pending(struct work_struct *work)
@@ -793,6 +799,7 @@ static void device_hard_reset_pending(struct work_struct *work)
* @hdev: pointer to habanalabs device structure
* @hard_reset: should we do hard reset to all engines or just reset the
* compute/dma engines
+ * @from_hard_reset_thread: is the caller the hard-reset thread
*
* Block future CS and wait for pending CS to be enqueued
* Call ASIC H/W fini
@@ -815,6 +822,11 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
return 0;
}
+ if ((!hard_reset) && (!hdev->supports_soft_reset)) {
+ dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
+ hard_reset = true;
+ }
+
/*
* Prevent concurrency in this function - only one reset should be
* done at any given time. Only need to perform this if we didn't
@@ -894,7 +906,12 @@ again:
* process can't really exit until all its CSs are done, which
* is what we do in cs rollback
*/
- device_kill_open_processes(hdev);
+ rc = device_kill_open_processes(hdev);
+ if (rc) {
+ dev_crit(hdev->dev,
+ "Failed to kill all open processes, stopping hard reset\n");
+ goto out_err;
+ }
/* Flush the Event queue workers to make sure no other thread is
* reading or writing to registers during the reset
@@ -1062,7 +1079,7 @@ out_err:
*/
int hl_device_init(struct hl_device *hdev, struct class *hclass)
{
- int i, rc, cq_ready_cnt;
+ int i, rc, cq_cnt, cq_ready_cnt;
char *name;
bool add_cdev_sysfs_on_err = false;
@@ -1120,14 +1137,16 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto sw_fini;
}
+ cq_cnt = hdev->asic_prop.completion_queues_count;
+
/*
* Initialize the completion queues. Must be done before hw_init,
* because there the addresses of the completion queues are being
* passed as arguments to request_irq
*/
- hdev->completion_queue =
- kcalloc(hdev->asic_prop.completion_queues_count,
- sizeof(*hdev->completion_queue), GFP_KERNEL);
+ hdev->completion_queue = kcalloc(cq_cnt,
+ sizeof(*hdev->completion_queue),
+ GFP_KERNEL);
if (!hdev->completion_queue) {
dev_err(hdev->dev, "failed to allocate completion queues\n");
@@ -1135,10 +1154,9 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto hw_queues_destroy;
}
- for (i = 0, cq_ready_cnt = 0;
- i < hdev->asic_prop.completion_queues_count;
- i++, cq_ready_cnt++) {
- rc = hl_cq_init(hdev, &hdev->completion_queue[i], i);
+ for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
+ rc = hl_cq_init(hdev, &hdev->completion_queue[i],
+ hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
if (rc) {
dev_err(hdev->dev,
"failed to initialize completion queue\n");
@@ -1325,11 +1343,12 @@ void hl_device_fini(struct hl_device *hdev)
* This function is competing with the reset function, so try to
* take the reset atomic and if we are already in middle of reset,
* wait until reset function is finished. Reset function is designed
- * to always finish (could take up to a few seconds in worst case).
+ * to always finish. However, in Gaudi, because of all the network
+ * ports, the hard reset could take between 10-30 seconds
*/
timeout = ktime_add_us(ktime_get(),
- HL_PENDING_RESET_PER_SEC * 1000 * 1000 * 4);
+ HL_HARD_RESET_MAX_TIMEOUT * 1000 * 1000);
rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
while (rc) {
usleep_range(50, 200);
@@ -1375,7 +1394,9 @@ void hl_device_fini(struct hl_device *hdev)
* can't really exit until all its CSs are done, which is what we
* do in cs rollback
*/
- device_kill_open_processes(hdev);
+ rc = device_kill_open_processes(hdev);
+ if (rc)
+ dev_crit(hdev->dev, "Failed to kill all open processes\n");
hl_cb_pool_fini(hdev);
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
index f5bd03171dac..baf790cf4b78 100644
--- a/drivers/misc/habanalabs/firmware_if.c
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -6,20 +6,22 @@
*/
#include "habanalabs.h"
+#include "include/hl_boot_if.h"
#include <linux/firmware.h>
#include <linux/genalloc.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/slab.h>
/**
- * hl_fw_push_fw_to_device() - Push FW code to device.
+ * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
* @hdev: pointer to hl_device structure.
*
* Copy fw code from firmware file to device memory.
*
* Return: 0 on success, non-zero for failure.
*/
-int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst)
{
const struct firmware *fw;
@@ -129,6 +131,68 @@ out:
return rc;
}
+int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
+{
+ struct armcp_packet pkt;
+ long result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = cpu_to_le64(event_type);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if (rc)
+ dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
+
+ return rc;
+}
+
+int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
+ size_t irq_arr_size)
+{
+ struct armcp_unmask_irq_arr_packet *pkt;
+ size_t total_pkt_size;
+ long result;
+ int rc;
+
+ total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
+ irq_arr_size;
+
+ /* data should be aligned to 8 bytes in order to ArmCP to copy it */
+ total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
+
+ /* total_pkt_size is casted to u16 later on */
+ if (total_pkt_size > USHRT_MAX) {
+ dev_err(hdev->dev, "too many elements in IRQ array\n");
+ return -EINVAL;
+ }
+
+ pkt = kzalloc(total_pkt_size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
+ memcpy(&pkt->irqs, irq_arr, irq_arr_size);
+
+ pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
+ total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
+
+ if (rc)
+ dev_err(hdev->dev, "failed to unmask IRQ array\n");
+
+ kfree(pkt);
+
+ return rc;
+}
+
int hl_fw_test_cpu_queue(struct hl_device *hdev)
{
struct armcp_packet test_pkt = {};
@@ -286,3 +350,232 @@ out:
return rc;
}
+
+static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg)
+{
+ u32 err_val;
+
+ /* Some of the firmware status codes are deprecated in newer f/w
+ * versions. In those versions, the errors are reported
+ * in different registers. Therefore, we need to check those
+ * registers and print the exact errors. Moreover, there
+ * may be multiple errors, so we need to report on each error
+ * separately. Some of the error codes might indicate a state
+ * that is not an error per-se, but it is an error in production
+ * environment
+ */
+ err_val = RREG32(boot_err0_reg);
+ if (!(err_val & CPU_BOOT_ERR0_ENABLED))
+ return;
+
+ if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - DRAM initialization failed\n");
+ if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
+ dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
+ if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - Thermal Sensor initialization failed\n");
+ if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
+ dev_warn(hdev->dev,
+ "Device boot warning - Skipped DRAM initialization\n");
+ if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED)
+ dev_warn(hdev->dev,
+ "Device boot error - Skipped waiting for BMC\n");
+ if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
+ dev_err(hdev->dev,
+ "Device boot error - Serdes data from BMC not available\n");
+ if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
+ dev_err(hdev->dev,
+ "Device boot error - NIC F/W initialization failed\n");
+}
+
+int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
+ u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
+ u32 boot_err0_reg, bool skip_bmc,
+ u32 cpu_timeout, u32 boot_fit_timeout)
+{
+ u32 status;
+ int rc;
+
+ dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
+ cpu_timeout / USEC_PER_SEC);
+
+ /* Wait for boot FIT request */
+ rc = hl_poll_timeout(
+ hdev,
+ cpu_boot_status_reg,
+ status,
+ status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
+ 10000,
+ boot_fit_timeout);
+
+ if (rc) {
+ dev_dbg(hdev->dev,
+ "No boot fit request received, resuming boot\n");
+ } else {
+ rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
+ if (rc)
+ goto out;
+
+ /* Clear device CPU message status */
+ WREG32(cpu_msg_status_reg, CPU_MSG_CLR);
+
+ /* Signal device CPU that boot loader is ready */
+ WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
+
+ /* Poll for CPU device ack */
+ rc = hl_poll_timeout(
+ hdev,
+ cpu_msg_status_reg,
+ status,
+ status == CPU_MSG_OK,
+ 10000,
+ boot_fit_timeout);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout waiting for boot fit load ack\n");
+ goto out;
+ }
+
+ /* Clear message */
+ WREG32(msg_to_cpu_reg, KMD_MSG_NA);
+ }
+
+ /* Make sure CPU boot-loader is running */
+ rc = hl_poll_timeout(
+ hdev,
+ cpu_boot_status_reg,
+ status,
+ (status == CPU_BOOT_STATUS_DRAM_RDY) ||
+ (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
+ (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL),
+ 10000,
+ cpu_timeout);
+
+ /* Read U-Boot, preboot versions now in case we will later fail */
+ hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
+ hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT);
+
+ /* Some of the status codes below are deprecated in newer f/w
+ * versions but we keep them here for backward compatibility
+ */
+ if (rc) {
+ switch (status) {
+ case CPU_BOOT_STATUS_NA:
+ dev_err(hdev->dev,
+ "Device boot error - BTL did NOT run\n");
+ break;
+ case CPU_BOOT_STATUS_IN_WFE:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck inside WFE loop\n");
+ break;
+ case CPU_BOOT_STATUS_IN_BTL:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in BTL\n");
+ break;
+ case CPU_BOOT_STATUS_IN_PREBOOT:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in Preboot\n");
+ break;
+ case CPU_BOOT_STATUS_IN_SPL:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in SPL\n");
+ break;
+ case CPU_BOOT_STATUS_IN_UBOOT:
+ dev_err(hdev->dev,
+ "Device boot error - Stuck in u-boot\n");
+ break;
+ case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
+ dev_err(hdev->dev,
+ "Device boot error - DRAM initialization failed\n");
+ break;
+ case CPU_BOOT_STATUS_UBOOT_NOT_READY:
+ dev_err(hdev->dev,
+ "Device boot error - u-boot stopped by user\n");
+ break;
+ case CPU_BOOT_STATUS_TS_INIT_FAIL:
+ dev_err(hdev->dev,
+ "Device boot error - Thermal Sensor initialization failed\n");
+ break;
+ default:
+ dev_err(hdev->dev,
+ "Device boot error - Invalid status code %d\n",
+ status);
+ break;
+ }
+
+ rc = -EIO;
+ goto out;
+ }
+
+ if (!hdev->fw_loading) {
+ dev_info(hdev->dev, "Skip loading FW\n");
+ goto out;
+ }
+
+ if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
+ goto out;
+
+ dev_info(hdev->dev,
+ "Loading firmware to device, may take some time...\n");
+
+ rc = hdev->asic_funcs->load_firmware_to_device(hdev);
+ if (rc)
+ goto out;
+
+ if (skip_bmc) {
+ WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
+
+ rc = hl_poll_timeout(
+ hdev,
+ cpu_boot_status_reg,
+ status,
+ (status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
+ 10000,
+ cpu_timeout);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get ACK on skipping BMC, %d\n",
+ status);
+ WREG32(msg_to_cpu_reg, KMD_MSG_NA);
+ rc = -EIO;
+ goto out;
+ }
+ }
+
+ WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
+
+ rc = hl_poll_timeout(
+ hdev,
+ cpu_boot_status_reg,
+ status,
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL),
+ 10000,
+ cpu_timeout);
+
+ /* Clear message */
+ WREG32(msg_to_cpu_reg, KMD_MSG_NA);
+
+ if (rc) {
+ if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
+ dev_err(hdev->dev,
+ "Device reports FIT image is corrupted\n");
+ else
+ dev_err(hdev->dev,
+ "Device failed to load, %d\n", status);
+
+ rc = -EIO;
+ goto out;
+ }
+
+ dev_info(hdev->dev, "Successfully loaded firmware to device\n");
+
+out:
+ fw_read_errors(hdev, boot_err0_reg);
+
+ return rc;
+}
diff --git a/drivers/misc/habanalabs/gaudi/Makefile b/drivers/misc/habanalabs/gaudi/Makefile
new file mode 100644
index 000000000000..f802cdc980ca
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+subdir-ccflags-y += -I$(src)
+
+HL_GAUDI_FILES := gaudi/gaudi.o gaudi/gaudi_hwmgr.o gaudi/gaudi_security.o \
+ gaudi/gaudi_coresight.o
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
new file mode 100644
index 000000000000..61f88e9884ce
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -0,0 +1,6748 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "gaudiP.h"
+#include "include/hw_ip/mmu/mmu_general.h"
+#include "include/hw_ip/mmu/mmu_v1_1.h"
+#include "include/gaudi/gaudi_masks.h"
+#include "include/gaudi/gaudi_fw_if.h"
+#include "include/gaudi/gaudi_reg_map.h"
+#include "include/gaudi/gaudi_async_ids_map_extended.h"
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/hwmon.h>
+#include <linux/genalloc.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/iommu.h>
+#include <linux/seq_file.h>
+
+/*
+ * Gaudi security scheme:
+ *
+ * 1. Host is protected by:
+ * - Range registers
+ * - MMU
+ *
+ * 2. DDR is protected by:
+ * - Range registers (protect the first 512MB)
+ *
+ * 3. Configuration is protected by:
+ * - Range registers
+ * - Protection bits
+ *
+ * MMU is always enabled.
+ *
+ * QMAN DMA channels 0,1,5 (PCI DMAN):
+ * - DMA is not secured.
+ * - PQ and CQ are secured.
+ * - CP is secured: The driver needs to parse CB but WREG should be allowed
+ * because of TDMA (tensor DMA). Hence, WREG is always not
+ * secured.
+ *
+ * When the driver needs to use DMA it will check that Gaudi is idle, set DMA
+ * channel 0 to be secured, execute the DMA and change it back to not secured.
+ * Currently, the driver doesn't use the DMA while there are compute jobs
+ * running.
+ *
+ * The current use cases for the driver to use the DMA are:
+ * - Clear SRAM on context switch (happens on context switch when device is
+ * idle)
+ * - MMU page tables area clear (happens on init)
+ *
+ * QMAN DMA 2-4,6,7, TPC, MME, NIC:
+ * PQ is secured and is located on the Host (HBM CON TPC3 bug)
+ * CQ, CP and the engine are not secured
+ *
+ */
+
+#define GAUDI_BOOT_FIT_FILE "habanalabs/gaudi/gaudi-boot-fit.itb"
+#define GAUDI_LINUX_FW_FILE "habanalabs/gaudi/gaudi-fit.itb"
+#define GAUDI_TPC_FW_FILE "habanalabs/gaudi/gaudi_tpc.bin"
+
+#define GAUDI_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
+
+#define GAUDI_RESET_TIMEOUT_MSEC 1000 /* 1000ms */
+#define GAUDI_RESET_WAIT_MSEC 1 /* 1ms */
+#define GAUDI_CPU_RESET_WAIT_MSEC 200 /* 200ms */
+#define GAUDI_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
+
+#define GAUDI_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
+#define GAUDI_PLDM_HRESET_TIMEOUT_MSEC 20000 /* 20s */
+#define GAUDI_PLDM_SRESET_TIMEOUT_MSEC 14000 /* 14s */
+#define GAUDI_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */
+#define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
+#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
+#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
+#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
+
+#define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9
+
+#define GAUDI_MAX_STRING_LEN 20
+
+#define GAUDI_CB_POOL_CB_CNT 512
+#define GAUDI_CB_POOL_CB_SIZE 0x20000 /* 128KB */
+
+#define GAUDI_ALLOC_CPU_MEM_RETRY_CNT 3
+
+#define GAUDI_NUM_OF_TPC_INTR_CAUSE 20
+
+#define GAUDI_NUM_OF_QM_ERR_CAUSE 16
+
+#define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3
+
+#define GAUDI_ARB_WDT_TIMEOUT 0x400000
+
+static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
+ "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
+ "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
+ "gaudi cq 5_0", "gaudi cq 5_1", "gaudi cq 5_2", "gaudi cq 5_3",
+ "gaudi cpu eq"
+};
+
+static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
+ [GAUDI_PCI_DMA_1] = 0,
+ [GAUDI_PCI_DMA_2] = 1,
+ [GAUDI_PCI_DMA_3] = 5,
+ [GAUDI_HBM_DMA_1] = 2,
+ [GAUDI_HBM_DMA_2] = 3,
+ [GAUDI_HBM_DMA_3] = 4,
+ [GAUDI_HBM_DMA_4] = 6,
+ [GAUDI_HBM_DMA_5] = 7
+};
+
+static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
+ [0] = GAUDI_QUEUE_ID_DMA_0_0,
+ [1] = GAUDI_QUEUE_ID_DMA_0_1,
+ [2] = GAUDI_QUEUE_ID_DMA_0_2,
+ [3] = GAUDI_QUEUE_ID_DMA_0_3,
+ [4] = GAUDI_QUEUE_ID_DMA_1_0,
+ [5] = GAUDI_QUEUE_ID_DMA_1_1,
+ [6] = GAUDI_QUEUE_ID_DMA_1_2,
+ [7] = GAUDI_QUEUE_ID_DMA_1_3,
+ [8] = GAUDI_QUEUE_ID_DMA_5_0,
+ [9] = GAUDI_QUEUE_ID_DMA_5_1,
+ [10] = GAUDI_QUEUE_ID_DMA_5_2,
+ [11] = GAUDI_QUEUE_ID_DMA_5_3
+};
+
+static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
+ [PACKET_WREG_32] = sizeof(struct packet_wreg32),
+ [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
+ [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
+ [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
+ [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
+ [PACKET_REPEAT] = sizeof(struct packet_repeat),
+ [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
+ [PACKET_FENCE] = sizeof(struct packet_fence),
+ [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
+ [PACKET_NOP] = sizeof(struct packet_nop),
+ [PACKET_STOP] = sizeof(struct packet_stop),
+ [PACKET_ARB_POINT] = sizeof(struct packet_arb_point),
+ [PACKET_WAIT] = sizeof(struct packet_wait),
+ [PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
+};
+
+static const char * const
+gaudi_tpc_interrupts_cause[GAUDI_NUM_OF_TPC_INTR_CAUSE] = {
+ "tpc_address_exceed_slm",
+ "tpc_div_by_0",
+ "tpc_spu_mac_overflow",
+ "tpc_spu_addsub_overflow",
+ "tpc_spu_abs_overflow",
+ "tpc_spu_fp_dst_nan_inf",
+ "tpc_spu_fp_dst_denorm",
+ "tpc_vpu_mac_overflow",
+ "tpc_vpu_addsub_overflow",
+ "tpc_vpu_abs_overflow",
+ "tpc_vpu_fp_dst_nan_inf",
+ "tpc_vpu_fp_dst_denorm",
+ "tpc_assertions",
+ "tpc_illegal_instruction",
+ "tpc_pc_wrap_around",
+ "tpc_qm_sw_err",
+ "tpc_hbw_rresp_err",
+ "tpc_hbw_bresp_err",
+ "tpc_lbw_rresp_err",
+ "tpc_lbw_bresp_err"
+};
+
+static const char * const
+gaudi_qman_error_cause[GAUDI_NUM_OF_QM_ERR_CAUSE] = {
+ "PQ AXI HBW error",
+ "CQ AXI HBW error",
+ "CP AXI HBW error",
+ "CP error due to undefined OPCODE",
+ "CP encountered STOP OPCODE",
+ "CP AXI LBW error",
+ "CP WRREG32 or WRBULK returned error",
+ "N/A",
+ "FENCE 0 inc over max value and clipped",
+ "FENCE 1 inc over max value and clipped",
+ "FENCE 2 inc over max value and clipped",
+ "FENCE 3 inc over max value and clipped",
+ "FENCE 0 dec under min value and clipped",
+ "FENCE 1 dec under min value and clipped",
+ "FENCE 2 dec under min value and clipped",
+ "FENCE 3 dec under min value and clipped"
+};
+
+static const char * const
+gaudi_qman_arb_error_cause[GAUDI_NUM_OF_QM_ARB_ERR_CAUSE] = {
+ "Choice push while full error",
+ "Choice Q watchdog error",
+ "MSG AXI LBW returned with error"
+};
+
+static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_0 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_1 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_2 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_3 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_0 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_1 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_2 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_3 */
+ QUEUE_TYPE_CPU, /* GAUDI_QUEUE_ID_CPU_PQ */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_3 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_0 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_1 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_2 */
+ QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_3 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_0 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_1 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_2 */
+ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_3 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_0 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_1 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_2 */
+ QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_3 */
+};
+
+static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
+ u64 phys_addr);
+static int gaudi_send_job_on_qman0(struct hl_device *hdev,
+ struct hl_cs_job *job);
+static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
+ u32 size, u64 val);
+static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
+ u32 tpc_id);
+static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
+static int gaudi_armcp_info_get(struct hl_device *hdev);
+static void gaudi_disable_clock_gating(struct hl_device *hdev);
+static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
+
+static int gaudi_get_fixed_properties(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int i;
+
+ if (GAUDI_QUEUE_ID_SIZE >= HL_MAX_QUEUES) {
+ dev_err(hdev->dev,
+ "Number of H/W queues must be smaller than %d\n",
+ HL_MAX_QUEUES);
+ return -EFAULT;
+ }
+
+ for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
+ if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
+ prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 1;
+ } else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
+ prop->hw_queues_props[i].driver_only = 1;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
+ } else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
+ prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
+ } else if (gaudi_queue_type[i] == QUEUE_TYPE_NA) {
+ prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
+ prop->hw_queues_props[i].driver_only = 0;
+ prop->hw_queues_props[i].requires_kernel_cb = 0;
+ }
+ }
+
+ for (; i < HL_MAX_QUEUES; i++)
+ prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
+
+ prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
+
+ prop->dram_base_address = DRAM_PHYS_BASE;
+ prop->dram_size = GAUDI_HBM_SIZE_32GB;
+ prop->dram_end_address = prop->dram_base_address +
+ prop->dram_size;
+ prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
+
+ prop->sram_base_address = SRAM_BASE_ADDR;
+ prop->sram_size = SRAM_SIZE;
+ prop->sram_end_address = prop->sram_base_address +
+ prop->sram_size;
+ prop->sram_user_base_address = prop->sram_base_address +
+ SRAM_USER_BASE_OFFSET;
+
+ prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
+ if (hdev->pldm)
+ prop->mmu_pgt_size = 0x800000; /* 8MB */
+ else
+ prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
+ prop->mmu_pte_size = HL_PTE_SIZE;
+ prop->mmu_hop_table_size = HOP_TABLE_SIZE;
+ prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
+ prop->dram_page_size = PAGE_SIZE_2MB;
+
+ prop->pmmu.hop0_shift = HOP0_SHIFT;
+ prop->pmmu.hop1_shift = HOP1_SHIFT;
+ prop->pmmu.hop2_shift = HOP2_SHIFT;
+ prop->pmmu.hop3_shift = HOP3_SHIFT;
+ prop->pmmu.hop4_shift = HOP4_SHIFT;
+ prop->pmmu.hop0_mask = HOP0_MASK;
+ prop->pmmu.hop1_mask = HOP1_MASK;
+ prop->pmmu.hop2_mask = HOP2_MASK;
+ prop->pmmu.hop3_mask = HOP3_MASK;
+ prop->pmmu.hop4_mask = HOP4_MASK;
+ prop->pmmu.start_addr = VA_HOST_SPACE_START;
+ prop->pmmu.end_addr =
+ (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1;
+ prop->pmmu.page_size = PAGE_SIZE_4KB;
+
+ /* PMMU and HPMMU are the same except of page size */
+ memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
+ prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
+
+ /* shifts and masks are the same in PMMU and DMMU */
+ memcpy(&prop->dmmu, &prop->pmmu, sizeof(prop->pmmu));
+ prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2);
+ prop->dmmu.end_addr = VA_HOST_SPACE_END;
+ prop->dmmu.page_size = PAGE_SIZE_2MB;
+
+ prop->cfg_size = CFG_SIZE;
+ prop->max_asid = MAX_ASID;
+ prop->num_of_events = GAUDI_EVENT_SIZE;
+ prop->tpc_enabled_mask = TPC_ENABLED_MASK;
+
+ prop->max_power_default = MAX_POWER_DEFAULT;
+
+ prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT;
+ prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE;
+
+ prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
+ prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
+
+ strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
+ CARD_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int gaudi_pci_bars_map(struct hl_device *hdev)
+{
+ static const char * const name[] = {"SRAM", "CFG", "HBM"};
+ bool is_wc[3] = {false, false, true};
+ int rc;
+
+ rc = hl_pci_bars_map(hdev, name, is_wc);
+ if (rc)
+ return rc;
+
+ hdev->rmmio = hdev->pcie_bar[CFG_BAR_ID] +
+ (CFG_BASE - SPI_FLASH_BASE_ADDR);
+
+ return 0;
+}
+
+static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 old_addr = addr;
+ int rc;
+
+ if ((gaudi) && (gaudi->hbm_bar_cur_addr == addr))
+ return old_addr;
+
+ /* Inbound Region 2 - Bar 4 - Point to HBM */
+ rc = hl_pci_set_dram_bar_base(hdev, 2, 4, addr);
+ if (rc)
+ return U64_MAX;
+
+ if (gaudi) {
+ old_addr = gaudi->hbm_bar_cur_addr;
+ gaudi->hbm_bar_cur_addr = addr;
+ }
+
+ return old_addr;
+}
+
+static int gaudi_init_iatu(struct hl_device *hdev)
+{
+ int rc = 0;
+
+ /* Inbound Region 1 - Bar 2 - Point to SPI FLASH */
+ rc = hl_pci_iatu_write(hdev, 0x314,
+ lower_32_bits(SPI_FLASH_BASE_ADDR));
+ rc |= hl_pci_iatu_write(hdev, 0x318,
+ upper_32_bits(SPI_FLASH_BASE_ADDR));
+ rc |= hl_pci_iatu_write(hdev, 0x300, 0);
+ /* Enable + Bar match + match enable */
+ rc |= hl_pci_iatu_write(hdev, 0x304, 0xC0080200);
+
+ if (rc)
+ return -EIO;
+
+ return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
+ HOST_PHYS_BASE, HOST_PHYS_SIZE);
+}
+
+static int gaudi_early_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct pci_dev *pdev = hdev->pdev;
+ int rc;
+
+ rc = gaudi_get_fixed_properties(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get fixed properties\n");
+ return rc;
+ }
+
+ /* Check BAR sizes */
+ if (pci_resource_len(pdev, SRAM_BAR_ID) != SRAM_BAR_SIZE) {
+ dev_err(hdev->dev,
+ "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
+ SRAM_BAR_ID,
+ (unsigned long long) pci_resource_len(pdev,
+ SRAM_BAR_ID),
+ SRAM_BAR_SIZE);
+ return -ENODEV;
+ }
+
+ if (pci_resource_len(pdev, CFG_BAR_ID) != CFG_BAR_SIZE) {
+ dev_err(hdev->dev,
+ "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
+ CFG_BAR_ID,
+ (unsigned long long) pci_resource_len(pdev,
+ CFG_BAR_ID),
+ CFG_BAR_SIZE);
+ return -ENODEV;
+ }
+
+ prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
+
+ rc = hl_pci_init(hdev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int gaudi_early_fini(struct hl_device *hdev)
+{
+ hl_pci_fini(hdev);
+
+ return 0;
+}
+
+/**
+ * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ */
+static void gaudi_fetch_psoc_frequency(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
+ prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
+ prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
+ prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+}
+
+static int _gaudi_init_tpc_mem(struct hl_device *hdev,
+ dma_addr_t tpc_kernel_src_addr, u32 tpc_kernel_size)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct packet_lin_dma *init_tpc_mem_pkt;
+ struct hl_cs_job *job;
+ struct hl_cb *cb;
+ u64 dst_addr;
+ u32 cb_size, ctl;
+ u8 tpc_id;
+ int rc;
+
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ if (!cb)
+ return -EFAULT;
+
+ init_tpc_mem_pkt = (struct packet_lin_dma *) (uintptr_t)
+ cb->kernel_address;
+ cb_size = sizeof(*init_tpc_mem_pkt);
+ memset(init_tpc_mem_pkt, 0, cb_size);
+
+ init_tpc_mem_pkt->tsize = cpu_to_le32(tpc_kernel_size);
+
+ ctl = ((PACKET_LIN_DMA << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_LIN_DMA_CTL_LIN_SHIFT) |
+ (1 << GAUDI_PKT_CTL_RB_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT));
+
+ init_tpc_mem_pkt->ctl = cpu_to_le32(ctl);
+
+ init_tpc_mem_pkt->src_addr = cpu_to_le64(tpc_kernel_src_addr);
+ dst_addr = (prop->sram_user_base_address &
+ GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
+ GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
+ init_tpc_mem_pkt->dst_addr |= cpu_to_le64(dst_addr);
+
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ goto release_cb;
+ }
+
+ job->id = 0;
+ job->user_cb = cb;
+ job->user_cb->cs_cnt++;
+ job->user_cb_size = cb_size;
+ job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
+ job->patched_cb = job->user_cb;
+ job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot);
+
+ hl_debugfs_add_job(hdev, job);
+
+ rc = gaudi_send_job_on_qman0(hdev, job);
+
+ if (rc)
+ goto free_job;
+
+ for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
+ rc = gaudi_run_tpc_kernel(hdev, dst_addr, tpc_id);
+ if (rc)
+ break;
+ }
+
+free_job:
+ hl_userptr_delete_list(hdev, &job->userptr_list);
+ hl_debugfs_remove_job(hdev, job);
+ kfree(job);
+ cb->cs_cnt--;
+
+release_cb:
+ hl_cb_put(cb);
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+ return rc;
+}
+
+/*
+ * gaudi_init_tpc_mem() - Initialize TPC memories.
+ * @hdev: Pointer to hl_device structure.
+ *
+ * Copy TPC kernel fw from firmware file and run it to initialize TPC memories.
+ *
+ * Return: 0 for success, negative value for error.
+ */
+static int gaudi_init_tpc_mem(struct hl_device *hdev)
+{
+ const struct firmware *fw;
+ size_t fw_size;
+ void *cpu_addr;
+ dma_addr_t dma_handle;
+ int rc;
+
+ rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
+ if (rc) {
+ dev_err(hdev->dev, "Firmware file %s is not found!\n",
+ GAUDI_TPC_FW_FILE);
+ goto out;
+ }
+
+ fw_size = fw->size;
+ cpu_addr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, fw_size,
+ &dma_handle, GFP_KERNEL | __GFP_ZERO);
+ if (!cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate %zu of dma memory for TPC kernel\n",
+ fw_size);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(cpu_addr, fw->data, fw_size);
+
+ rc = _gaudi_init_tpc_mem(hdev, dma_handle, fw_size);
+
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, fw->size, cpu_addr,
+ dma_handle);
+
+out:
+ release_firmware(fw);
+ return rc;
+}
+
+static int gaudi_late_init(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int rc;
+
+ rc = gaudi->armcp_info_get(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to get armcp info\n");
+ return rc;
+ }
+
+ rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
+ return rc;
+ }
+
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
+
+ gaudi_fetch_psoc_frequency(hdev);
+
+ rc = gaudi_mmu_clear_pgt_range(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
+ goto disable_pci_access;
+ }
+
+ rc = gaudi_init_tpc_mem(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize TPC memories\n");
+ goto disable_pci_access;
+ }
+
+ return 0;
+
+disable_pci_access:
+ hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+
+ return rc;
+}
+
+static void gaudi_late_fini(struct hl_device *hdev)
+{
+ const struct hwmon_channel_info **channel_info_arr;
+ int i = 0;
+
+ if (!hdev->hl_chip_info->info)
+ return;
+
+ channel_info_arr = hdev->hl_chip_info->info;
+
+ while (channel_info_arr[i]) {
+ kfree(channel_info_arr[i]->config);
+ kfree(channel_info_arr[i]);
+ i++;
+ }
+
+ kfree(channel_info_arr);
+
+ hdev->hl_chip_info->info = NULL;
+}
+
+static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
+{
+ dma_addr_t dma_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {}, end_addr;
+ void *virt_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {};
+ int i, j, rc = 0;
+
+ /*
+ * The device CPU works with 40-bits addresses, while bit 39 must be set
+ * to '1' when accessing the host.
+ * Bits 49:39 of the full host address are saved for a later
+ * configuration of the HW to perform extension to 50 bits.
+ * Because there is a single HW register that holds the extension bits,
+ * these bits must be identical in all allocated range.
+ */
+
+ for (i = 0 ; i < GAUDI_ALLOC_CPU_MEM_RETRY_CNT ; i++) {
+ virt_addr_arr[i] =
+ hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
+ HL_CPU_ACCESSIBLE_MEM_SIZE,
+ &dma_addr_arr[i],
+ GFP_KERNEL | __GFP_ZERO);
+ if (!virt_addr_arr[i]) {
+ rc = -ENOMEM;
+ goto free_dma_mem_arr;
+ }
+
+ end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1;
+ if (GAUDI_CPU_PCI_MSB_ADDR(dma_addr_arr[i]) ==
+ GAUDI_CPU_PCI_MSB_ADDR(end_addr))
+ break;
+ }
+
+ if (i == GAUDI_ALLOC_CPU_MEM_RETRY_CNT) {
+ dev_err(hdev->dev,
+ "MSB of CPU accessible DMA memory are not identical in all range\n");
+ rc = -EFAULT;
+ goto free_dma_mem_arr;
+ }
+
+ hdev->cpu_accessible_dma_mem = virt_addr_arr[i];
+ hdev->cpu_accessible_dma_address = dma_addr_arr[i];
+ hdev->cpu_pci_msb_addr =
+ GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address);
+
+ GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address);
+
+free_dma_mem_arr:
+ for (j = 0 ; j < i ; j++)
+ hdev->asic_funcs->asic_dma_free_coherent(hdev,
+ HL_CPU_ACCESSIBLE_MEM_SIZE,
+ virt_addr_arr[j],
+ dma_addr_arr[j]);
+
+ return rc;
+}
+
+static void gaudi_free_internal_qmans_pq_mem(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+ u32 i;
+
+ for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
+ q = &gaudi->internal_qmans[i];
+ if (!q->pq_kernel_addr)
+ continue;
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, q->pq_size,
+ q->pq_kernel_addr,
+ q->pq_dma_addr);
+ }
+}
+
+static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+ int rc, i;
+
+ for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
+ if (gaudi_queue_type[i] != QUEUE_TYPE_INT)
+ continue;
+
+ q = &gaudi->internal_qmans[i];
+
+ switch (i) {
+ case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_4_3:
+ case GAUDI_QUEUE_ID_DMA_6_0 ... GAUDI_QUEUE_ID_DMA_7_3:
+ q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES;
+ break;
+ case GAUDI_QUEUE_ID_MME_0_0 ... GAUDI_QUEUE_ID_MME_1_3:
+ q->pq_size = MME_QMAN_SIZE_IN_BYTES;
+ break;
+ case GAUDI_QUEUE_ID_TPC_0_0 ... GAUDI_QUEUE_ID_TPC_7_3:
+ q->pq_size = TPC_QMAN_SIZE_IN_BYTES;
+ break;
+ default:
+ dev_err(hdev->dev, "Bad internal queue index %d", i);
+ rc = -EINVAL;
+ goto free_internal_qmans_pq_mem;
+ }
+
+ q->pq_kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent(
+ hdev, q->pq_size,
+ &q->pq_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!q->pq_kernel_addr) {
+ rc = -ENOMEM;
+ goto free_internal_qmans_pq_mem;
+ }
+ }
+
+ return 0;
+
+free_internal_qmans_pq_mem:
+ gaudi_free_internal_qmans_pq_mem(hdev);
+ return rc;
+}
+
+static int gaudi_sw_init(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi;
+ u32 i, event_id = 0;
+ int rc;
+
+ /* Allocate device structure */
+ gaudi = kzalloc(sizeof(*gaudi), GFP_KERNEL);
+ if (!gaudi)
+ return -ENOMEM;
+
+ for (i = 0 ; i < ARRAY_SIZE(gaudi_irq_map_table) ; i++) {
+ if (gaudi_irq_map_table[i].valid) {
+ if (event_id == GAUDI_EVENT_SIZE) {
+ dev_err(hdev->dev,
+ "Event array exceeds the limit of %u events\n",
+ GAUDI_EVENT_SIZE);
+ rc = -EINVAL;
+ goto free_gaudi_device;
+ }
+
+ gaudi->events[event_id++] =
+ gaudi_irq_map_table[i].fc_id;
+ }
+ }
+
+ gaudi->armcp_info_get = gaudi_armcp_info_get;
+
+ gaudi->max_freq_value = GAUDI_MAX_CLK_FREQ;
+
+ hdev->asic_specific = gaudi;
+
+ /* Create DMA pool for small allocations */
+ hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
+ &hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0);
+ if (!hdev->dma_pool) {
+ dev_err(hdev->dev, "failed to create DMA pool\n");
+ rc = -ENOMEM;
+ goto free_gaudi_device;
+ }
+
+ rc = gaudi_alloc_cpu_accessible_dma_mem(hdev);
+ if (rc)
+ goto free_dma_pool;
+
+ hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
+ if (!hdev->cpu_accessible_dma_pool) {
+ dev_err(hdev->dev,
+ "Failed to create CPU accessible DMA pool\n");
+ rc = -ENOMEM;
+ goto free_cpu_dma_mem;
+ }
+
+ rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
+ (uintptr_t) hdev->cpu_accessible_dma_mem,
+ HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add memory to CPU accessible DMA pool\n");
+ rc = -EFAULT;
+ goto free_cpu_accessible_dma_pool;
+ }
+
+ rc = gaudi_alloc_internal_qmans_pq_mem(hdev);
+ if (rc)
+ goto free_cpu_accessible_dma_pool;
+
+ spin_lock_init(&gaudi->hw_queues_lock);
+ mutex_init(&gaudi->clk_gate_mutex);
+
+ hdev->supports_sync_stream = true;
+ hdev->supports_coresight = true;
+
+ return 0;
+
+free_cpu_accessible_dma_pool:
+ gen_pool_destroy(hdev->cpu_accessible_dma_pool);
+free_cpu_dma_mem:
+ GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
+ hdev->cpu_pci_msb_addr);
+ hdev->asic_funcs->asic_dma_free_coherent(hdev,
+ HL_CPU_ACCESSIBLE_MEM_SIZE,
+ hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
+free_dma_pool:
+ dma_pool_destroy(hdev->dma_pool);
+free_gaudi_device:
+ kfree(gaudi);
+ return rc;
+}
+
+static int gaudi_sw_fini(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ gaudi_free_internal_qmans_pq_mem(hdev);
+
+ gen_pool_destroy(hdev->cpu_accessible_dma_pool);
+
+ GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
+ hdev->cpu_pci_msb_addr);
+ hdev->asic_funcs->asic_dma_free_coherent(hdev,
+ HL_CPU_ACCESSIBLE_MEM_SIZE,
+ hdev->cpu_accessible_dma_mem,
+ hdev->cpu_accessible_dma_address);
+
+ dma_pool_destroy(hdev->dma_pool);
+
+ mutex_destroy(&gaudi->clk_gate_mutex);
+
+ kfree(gaudi);
+
+ return 0;
+}
+
+static irqreturn_t gaudi_irq_handler_single(int irq, void *arg)
+{
+ struct hl_device *hdev = arg;
+ int i;
+
+ if (hdev->disabled)
+ return IRQ_HANDLED;
+
+ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
+ hl_irq_handler_cq(irq, &hdev->completion_queue[i]);
+
+ hl_irq_handler_eq(irq, &hdev->event_queue);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * For backward compatibility, new MSI interrupts should be set after the
+ * existing CPU and NIC interrupts.
+ */
+static int gaudi_pci_irq_vector(struct hl_device *hdev, unsigned int nr,
+ bool cpu_eq)
+{
+ int msi_vec;
+
+ if ((nr != GAUDI_EVENT_QUEUE_MSI_IDX) && (cpu_eq))
+ dev_crit(hdev->dev, "CPU EQ must use IRQ %d\n",
+ GAUDI_EVENT_QUEUE_MSI_IDX);
+
+ msi_vec = ((nr < GAUDI_EVENT_QUEUE_MSI_IDX) || (cpu_eq)) ? nr :
+ (nr + NIC_NUMBER_OF_ENGINES + 1);
+
+ return pci_irq_vector(hdev->pdev, msi_vec);
+}
+
+static int gaudi_enable_msi_single(struct hl_device *hdev)
+{
+ int rc, irq;
+
+ dev_info(hdev->dev, "Working in single MSI IRQ mode\n");
+
+ irq = gaudi_pci_irq_vector(hdev, 0, false);
+ rc = request_irq(irq, gaudi_irq_handler_single, 0,
+ "gaudi single msi", hdev);
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to request single MSI IRQ\n");
+
+ return rc;
+}
+
+static int gaudi_enable_msi_multi(struct hl_device *hdev)
+{
+ int cq_cnt = hdev->asic_prop.completion_queues_count;
+ int rc, i, irq_cnt_init, irq;
+
+ for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
+ irq = gaudi_pci_irq_vector(hdev, i, false);
+ rc = request_irq(irq, hl_irq_handler_cq, 0, gaudi_irq_name[i],
+ &hdev->completion_queue[i]);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_irqs;
+ }
+ }
+
+ irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX, true);
+ rc = request_irq(irq, hl_irq_handler_eq, 0, gaudi_irq_name[cq_cnt],
+ &hdev->event_queue);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to request IRQ %d", irq);
+ goto free_irqs;
+ }
+
+ return 0;
+
+free_irqs:
+ for (i = 0 ; i < irq_cnt_init ; i++)
+ free_irq(gaudi_pci_irq_vector(hdev, i, false),
+ &hdev->completion_queue[i]);
+ return rc;
+}
+
+static int gaudi_enable_msi(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int rc;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_MSI)
+ return 0;
+
+ rc = pci_alloc_irq_vectors(hdev->pdev, 1, GAUDI_MSI_ENTRIES,
+ PCI_IRQ_MSI);
+ if (rc < 0) {
+ dev_err(hdev->dev, "MSI: Failed to enable support %d\n", rc);
+ return rc;
+ }
+
+ if (rc < NUMBER_OF_INTERRUPTS) {
+ gaudi->multi_msi_mode = false;
+ rc = gaudi_enable_msi_single(hdev);
+ } else {
+ gaudi->multi_msi_mode = true;
+ rc = gaudi_enable_msi_multi(hdev);
+ }
+
+ if (rc)
+ goto free_pci_irq_vectors;
+
+ gaudi->hw_cap_initialized |= HW_CAP_MSI;
+
+ return 0;
+
+free_pci_irq_vectors:
+ pci_free_irq_vectors(hdev->pdev);
+ return rc;
+}
+
+static void gaudi_sync_irqs(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int i, cq_cnt = hdev->asic_prop.completion_queues_count;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
+ return;
+
+ /* Wait for all pending IRQs to be finished */
+ if (gaudi->multi_msi_mode) {
+ for (i = 0 ; i < cq_cnt ; i++)
+ synchronize_irq(gaudi_pci_irq_vector(hdev, i, false));
+
+ synchronize_irq(gaudi_pci_irq_vector(hdev,
+ GAUDI_EVENT_QUEUE_MSI_IDX,
+ true));
+ } else {
+ synchronize_irq(gaudi_pci_irq_vector(hdev, 0, false));
+ }
+}
+
+static void gaudi_disable_msi(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int i, irq, cq_cnt = hdev->asic_prop.completion_queues_count;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
+ return;
+
+ gaudi_sync_irqs(hdev);
+
+ if (gaudi->multi_msi_mode) {
+ irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX,
+ true);
+ free_irq(irq, &hdev->event_queue);
+
+ for (i = 0 ; i < cq_cnt ; i++) {
+ irq = gaudi_pci_irq_vector(hdev, i, false);
+ free_irq(irq, &hdev->completion_queue[i]);
+ }
+ } else {
+ free_irq(gaudi_pci_irq_vector(hdev, 0, false), hdev);
+ }
+
+ pci_free_irq_vectors(hdev->pdev);
+
+ gaudi->hw_cap_initialized &= ~HW_CAP_MSI;
+}
+
+static void gaudi_init_scrambler_sram(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER)
+ return;
+
+ if (!hdev->sram_scrambler_enable)
+ return;
+
+ WREG32(mmNIF_RTR_CTRL_0_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_1_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_2_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_3_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_4_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_5_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_6_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_7_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_0_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_1_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_2_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_3_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_4_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_5_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_6_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_7_SCRAM_SRAM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_SRAM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
+
+ gaudi->hw_cap_initialized |= HW_CAP_SRAM_SCRAMBLER;
+}
+
+static void gaudi_init_scrambler_hbm(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER)
+ return;
+
+ if (!hdev->dram_scrambler_enable)
+ return;
+
+ WREG32(mmNIF_RTR_CTRL_0_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_1_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_2_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_3_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_4_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_5_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_6_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_7_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_0_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_1_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_2_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_3_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_4_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_5_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_6_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_7_SCRAM_HBM_EN,
+ 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
+
+ gaudi->hw_cap_initialized |= HW_CAP_HBM_SCRAMBLER;
+}
+
+static void gaudi_init_e2e(struct hl_device *hdev)
+{
+ WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 247 >> 3);
+ WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 785 >> 3);
+ WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 49);
+ WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 101);
+
+ WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3);
+ WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3);
+ WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39);
+
+ WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3);
+ WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3);
+ WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19);
+ WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3);
+ WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3);
+ WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19);
+ WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3);
+ WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3);
+ WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39);
+
+ WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 297 >> 3);
+ WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 908 >> 3);
+ WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 19);
+ WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 19);
+
+ WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 318 >> 3);
+ WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 956 >> 3);
+ WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 79);
+ WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 163);
+
+ WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3);
+ WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3);
+ WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39);
+
+ WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3);
+ WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3);
+ WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19);
+ WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3);
+ WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3);
+ WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19);
+ WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32);
+
+ WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3);
+ WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3);
+ WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1);
+ WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39);
+
+ WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 318 >> 3);
+ WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 956 >> 3);
+ WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 79);
+ WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 79);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
+
+ if (!hdev->dram_scrambler_enable) {
+ WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21);
+ WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22);
+ WREG32(mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21);
+ WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22);
+ WREG32(mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_0, 0x21);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_1, 0x22);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
+ }
+
+ WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_EN,
+ 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_EN,
+ 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_EN,
+ 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
+}
+
+static void gaudi_init_hbm_cred(struct hl_device *hdev)
+{
+ uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
+
+ hbm0_wr = 0x33333333;
+ hbm1_wr = 0x33333333;
+ hbm0_rd = 0x77777777;
+ hbm1_rd = 0xDDDDDDDD;
+
+ WREG32(mmDMA_IF_E_N_HBM0_WR_CRED_CNT, hbm0_wr);
+ WREG32(mmDMA_IF_E_N_HBM1_WR_CRED_CNT, hbm1_wr);
+ WREG32(mmDMA_IF_E_N_HBM0_RD_CRED_CNT, hbm0_rd);
+ WREG32(mmDMA_IF_E_N_HBM1_RD_CRED_CNT, hbm1_rd);
+
+ WREG32(mmDMA_IF_E_S_HBM0_WR_CRED_CNT, hbm0_wr);
+ WREG32(mmDMA_IF_E_S_HBM1_WR_CRED_CNT, hbm1_wr);
+ WREG32(mmDMA_IF_E_S_HBM0_RD_CRED_CNT, hbm0_rd);
+ WREG32(mmDMA_IF_E_S_HBM1_RD_CRED_CNT, hbm1_rd);
+
+ WREG32(mmDMA_IF_W_N_HBM0_WR_CRED_CNT, hbm0_wr);
+ WREG32(mmDMA_IF_W_N_HBM1_WR_CRED_CNT, hbm1_wr);
+ WREG32(mmDMA_IF_W_N_HBM0_RD_CRED_CNT, hbm0_rd);
+ WREG32(mmDMA_IF_W_N_HBM1_RD_CRED_CNT, hbm1_rd);
+
+ WREG32(mmDMA_IF_W_S_HBM0_WR_CRED_CNT, hbm0_wr);
+ WREG32(mmDMA_IF_W_S_HBM1_WR_CRED_CNT, hbm1_wr);
+ WREG32(mmDMA_IF_W_S_HBM0_RD_CRED_CNT, hbm0_rd);
+ WREG32(mmDMA_IF_W_S_HBM1_RD_CRED_CNT, hbm1_rd);
+
+ WREG32(mmDMA_IF_E_N_HBM_CRED_EN_0,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_E_S_HBM_CRED_EN_0,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_W_N_HBM_CRED_EN_0,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_W_S_HBM_CRED_EN_0,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+
+ WREG32(mmDMA_IF_E_N_HBM_CRED_EN_1,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_E_S_HBM_CRED_EN_1,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_W_N_HBM_CRED_EN_1,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+ WREG32(mmDMA_IF_W_S_HBM_CRED_EN_1,
+ (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
+ (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
+}
+
+static void gaudi_init_rate_limiter(struct hl_device *hdev)
+{
+ u32 nr, nf, od, sat, rst, timeout;
+ u64 freq;
+
+ nr = RREG32(mmPSOC_HBM_PLL_NR);
+ nf = RREG32(mmPSOC_HBM_PLL_NF);
+ od = RREG32(mmPSOC_HBM_PLL_OD);
+ freq = (50 * (nf + 1)) / ((nr + 1) * (od + 1));
+
+ dev_dbg(hdev->dev, "HBM frequency is %lluMHz\n", freq);
+
+ /* Configuration is for five (5) DDMA channels */
+ if (freq == 800) {
+ sat = 4;
+ rst = 11;
+ timeout = 15;
+ } else if (freq == 900) {
+ sat = 4;
+ rst = 15;
+ timeout = 16;
+ } else if (freq == 950) {
+ sat = 4;
+ rst = 15;
+ timeout = 15;
+ } else {
+ dev_warn(hdev->dev,
+ "unsupported HBM frequency %lluMHz, no rate-limiters\n",
+ freq);
+ return;
+ }
+
+ WREG32(mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_0, 0x111);
+ WREG32(mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_1, 0x111);
+ WREG32(mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_0, 0x111);
+ WREG32(mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_1, 0x111);
+ WREG32(mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_0, 0x111);
+ WREG32(mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_1, 0x111);
+ WREG32(mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_0, 0x111);
+ WREG32(mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_1, 0x111);
+
+ if (!hdev->rl_enable) {
+ dev_info(hdev->dev, "Rate limiters disabled\n");
+ return;
+ }
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_SAT, sat);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_SAT, sat);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_RST, rst);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_RST, rst);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_TIMEOUT, timeout);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_HBM_EN, 1);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_HBM_EN, 1);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_SAT, sat);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_SAT, sat);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_RST, rst);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_RST, rst);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_TIMEOUT, timeout);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_TIMEOUT, timeout);
+
+ WREG32(mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_EN, 1);
+ WREG32(mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_EN, 1);
+}
+
+static void gaudi_init_golden_registers(struct hl_device *hdev)
+{
+ u32 tpc_offset;
+ int tpc_id, i;
+
+ gaudi_init_e2e(hdev);
+
+ gaudi_init_hbm_cred(hdev);
+
+ gaudi_init_rate_limiter(hdev);
+
+ gaudi_disable_clock_gating(hdev);
+
+ for (tpc_id = 0, tpc_offset = 0;
+ tpc_id < TPC_NUMBER_OF_ENGINES;
+ tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
+ /* Mask all arithmetic interrupts from TPC */
+ WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFF);
+ /* Set 16 cache lines */
+ WREG32_FIELD(TPC0_CFG_MSS_CONFIG, tpc_offset,
+ ICACHE_FETCH_LINE_NUM, 2);
+ }
+
+ /* Make sure 1st 128 bytes in SRAM are 0 for Tensor DMA */
+ for (i = 0 ; i < 128 ; i += 8)
+ writeq(0, hdev->pcie_bar[SRAM_BAR_ID] + i);
+
+ WREG32(mmMME0_CTRL_EUS_ROLLUP_CNT_ADD, 3);
+ WREG32(mmMME1_CTRL_EUS_ROLLUP_CNT_ADD, 3);
+ WREG32(mmMME2_CTRL_EUS_ROLLUP_CNT_ADD, 3);
+ WREG32(mmMME3_CTRL_EUS_ROLLUP_CNT_ADD, 3);
+
+ /* WA for H3-2081 */
+ WREG32(mmPCIE_WRAP_MAX_OUTSTAND, 0x10ff);
+}
+
+static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
+ int qman_id, dma_addr_t qman_pq_addr)
+{
+ u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
+ u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
+ u32 q_off, dma_qm_offset;
+ u32 dma_qm_err_cfg;
+
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+
+ mtr_base_en_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ mtr_base_en_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ so_base_en_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_en_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ mtr_base_ws_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ mtr_base_ws_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ so_base_ws_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_ws_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ q_off = dma_qm_offset + qman_id * 4;
+
+ WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_pq_addr));
+ WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off, upper_32_bits(qman_pq_addr));
+
+ WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HL_QUEUE_LENGTH));
+ WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
+ WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
+
+ WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
+ WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
+ WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+
+ WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
+ WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
+ WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off, mtr_base_ws_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off, mtr_base_ws_hi);
+ WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
+
+ /* The following configuration is needed only once per QMAN */
+ if (qman_id == 0) {
+ /* Configure RAZWI IRQ */
+ dma_qm_err_cfg = PCI_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
+ if (hdev->stop_on_err) {
+ dma_qm_err_cfg |=
+ PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
+ }
+
+ WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
+ WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
+ lower_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
+ upper_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
+ dma_id);
+
+ WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
+ QM_ARB_ERR_MSG_EN_MASK);
+
+ /* Increase ARB WDT to support streams architecture */
+ WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
+ GAUDI_ARB_WDT_TIMEOUT);
+
+ WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
+ QMAN_EXTERNAL_MAKE_TRUSTED);
+
+ WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
+ }
+}
+
+static void gaudi_init_dma_core(struct hl_device *hdev, int dma_id)
+{
+ u32 dma_offset = dma_id * DMA_CORE_OFFSET;
+ u32 dma_err_cfg = 1 << DMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT;
+
+ /* Set to maximum possible according to physical size */
+ WREG32(mmDMA0_CORE_RD_MAX_OUTSTAND + dma_offset, 0);
+ WREG32(mmDMA0_CORE_RD_MAX_SIZE + dma_offset, 0);
+
+ /* STOP_ON bit implies no completion to operation in case of RAZWI */
+ if (hdev->stop_on_err)
+ dma_err_cfg |= 1 << DMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT;
+
+ WREG32(mmDMA0_CORE_ERR_CFG + dma_offset, dma_err_cfg);
+ WREG32(mmDMA0_CORE_ERRMSG_ADDR_LO + dma_offset,
+ lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_CORE_ERRMSG_ADDR_HI + dma_offset,
+ upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_CORE_ERRMSG_WDATA + dma_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_DMA0_CORE].cpu_id + dma_id);
+ WREG32(mmDMA0_CORE_PROT + dma_offset,
+ 1 << DMA0_CORE_PROT_ERR_VAL_SHIFT);
+ /* If the channel is secured, it should be in MMU bypass mode */
+ WREG32(mmDMA0_CORE_SECURE_PROPS + dma_offset,
+ 1 << DMA0_CORE_SECURE_PROPS_MMBP_SHIFT);
+ WREG32(mmDMA0_CORE_CFG_0 + dma_offset, 1 << DMA0_CORE_CFG_0_EN_SHIFT);
+}
+
+static void gaudi_enable_qman(struct hl_device *hdev, int dma_id,
+ u32 enable_mask)
+{
+ u32 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+
+ WREG32(mmDMA0_QM_GLBL_CFG0 + dma_qm_offset, enable_mask);
+}
+
+static void gaudi_init_pci_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct hl_hw_queue *q;
+ int i, j, dma_id, cpu_skip, nic_skip, cq_id = 0, q_idx, msi_vec = 0;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)
+ return;
+
+ for (i = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
+ dma_id = gaudi_dma_assignment[i];
+ /*
+ * For queues after the CPU Q need to add 1 to get the correct
+ * queue. In addition, need to add the CPU EQ and NIC IRQs in
+ * order to get the correct MSI register.
+ */
+ if (dma_id > 1) {
+ cpu_skip = 1;
+ nic_skip = NIC_NUMBER_OF_ENGINES;
+ } else {
+ cpu_skip = 0;
+ nic_skip = 0;
+ }
+
+ for (j = 0 ; j < QMAN_STREAMS ; j++) {
+ q_idx = 4 * dma_id + j + cpu_skip;
+ q = &hdev->kernel_queues[q_idx];
+ q->cq_id = cq_id++;
+ q->msi_vec = nic_skip + cpu_skip + msi_vec++;
+ gaudi_init_pci_dma_qman(hdev, dma_id, j,
+ q->bus_address);
+ }
+
+ gaudi_init_dma_core(hdev, dma_id);
+
+ gaudi_enable_qman(hdev, dma_id, PCI_DMA_QMAN_ENABLE);
+ }
+
+ gaudi->hw_cap_initialized |= HW_CAP_PCI_DMA;
+}
+
+static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
+ int qman_id, u64 qman_base_addr)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 q_off, dma_qm_offset;
+ u32 dma_qm_err_cfg;
+
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+
+ mtr_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ q_off = dma_qm_offset + qman_id * 4;
+
+ if (qman_id < 4) {
+ WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off,
+ lower_32_bits(qman_base_addr));
+ WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off,
+ upper_32_bits(qman_base_addr));
+
+ WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HBM_DMA_QMAN_LENGTH));
+ WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
+ WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
+
+ WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x81BC);
+ WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x81B4);
+ WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ } else {
+ WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
+ WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
+ WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+
+ /* Configure RAZWI IRQ */
+ dma_qm_err_cfg = HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
+ if (hdev->stop_on_err) {
+ dma_qm_err_cfg |=
+ HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
+ }
+ WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
+
+ WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
+ lower_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
+ upper_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
+ dma_id);
+
+ WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
+ QM_ARB_ERR_MSG_EN_MASK);
+
+ /* Increase ARB WDT to support streams architecture */
+ WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
+ GAUDI_ARB_WDT_TIMEOUT);
+
+ WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
+ WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
+ QMAN_INTERNAL_MAKE_TRUSTED);
+ }
+
+ WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
+ WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
+ WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
+}
+
+static void gaudi_init_hbm_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+ u64 qman_base_addr;
+ int i, j, dma_id, internal_q_index;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)
+ return;
+
+ for (i = 0 ; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1 + i];
+
+ for (j = 0 ; j < QMAN_STREAMS ; j++) {
+ /*
+ * Add the CPU queue in order to get the correct queue
+ * number as all internal queue are placed after it
+ */
+ internal_q_index = dma_id * QMAN_STREAMS + j + 1;
+
+ q = &gaudi->internal_qmans[internal_q_index];
+ qman_base_addr = (u64) q->pq_dma_addr;
+ gaudi_init_hbm_dma_qman(hdev, dma_id, j,
+ qman_base_addr);
+ }
+
+ /* Initializing lower CP for HBM DMA QMAN */
+ gaudi_init_hbm_dma_qman(hdev, dma_id, 4, 0);
+
+ gaudi_init_dma_core(hdev, dma_id);
+
+ gaudi_enable_qman(hdev, dma_id, HBM_DMA_QMAN_ENABLE);
+ }
+
+ gaudi->hw_cap_initialized |= HW_CAP_HBM_DMA;
+}
+
+static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
+ int qman_id, u64 qman_base_addr)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 q_off, mme_id;
+ u32 mme_qm_err_cfg;
+
+ mtr_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ q_off = mme_offset + qman_id * 4;
+
+ if (qman_id < 4) {
+ WREG32(mmMME0_QM_PQ_BASE_LO_0 + q_off,
+ lower_32_bits(qman_base_addr));
+ WREG32(mmMME0_QM_PQ_BASE_HI_0 + q_off,
+ upper_32_bits(qman_base_addr));
+
+ WREG32(mmMME0_QM_PQ_SIZE_0 + q_off, ilog2(MME_QMAN_LENGTH));
+ WREG32(mmMME0_QM_PQ_PI_0 + q_off, 0);
+ WREG32(mmMME0_QM_PQ_CI_0 + q_off, 0);
+
+ WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x81BC);
+ WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x81B4);
+ WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ } else {
+ WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
+ WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
+ WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+
+ /* Configure RAZWI IRQ */
+ mme_id = mme_offset /
+ (mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0);
+
+ mme_qm_err_cfg = MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
+ if (hdev->stop_on_err) {
+ mme_qm_err_cfg |=
+ MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
+ }
+ WREG32(mmMME0_QM_GLBL_ERR_CFG + mme_offset, mme_qm_err_cfg);
+ WREG32(mmMME0_QM_GLBL_ERR_ADDR_LO + mme_offset,
+ lower_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmMME0_QM_GLBL_ERR_ADDR_HI + mme_offset,
+ upper_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmMME0_QM_GLBL_ERR_WDATA + mme_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_MME0_QM].cpu_id +
+ mme_id);
+
+ WREG32(mmMME0_QM_ARB_ERR_MSG_EN + mme_offset,
+ QM_ARB_ERR_MSG_EN_MASK);
+
+ /* Increase ARB WDT to support streams architecture */
+ WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset,
+ GAUDI_ARB_WDT_TIMEOUT);
+
+ WREG32(mmMME0_QM_GLBL_CFG1 + mme_offset, 0);
+ WREG32(mmMME0_QM_GLBL_PROT + mme_offset,
+ QMAN_INTERNAL_MAKE_TRUSTED);
+ }
+
+ WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
+ WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
+ WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
+ WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
+}
+
+static void gaudi_init_mme_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+ u64 qman_base_addr;
+ u32 mme_offset;
+ int i, internal_q_index;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_MME)
+ return;
+
+ /*
+ * map GAUDI_QUEUE_ID_MME_0_X to the N_W_MME (mmMME2_QM_BASE)
+ * and GAUDI_QUEUE_ID_MME_1_X to the S_W_MME (mmMME0_QM_BASE)
+ */
+
+ mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0;
+
+ for (i = 0 ; i < MME_NUMBER_OF_QMANS ; i++) {
+ internal_q_index = GAUDI_QUEUE_ID_MME_0_0 + i;
+ q = &gaudi->internal_qmans[internal_q_index];
+ qman_base_addr = (u64) q->pq_dma_addr;
+ gaudi_init_mme_qman(hdev, mme_offset, (i & 0x3),
+ qman_base_addr);
+ if (i == 3)
+ mme_offset = 0;
+ }
+
+ /* Initializing lower CP for MME QMANs */
+ mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0;
+ gaudi_init_mme_qman(hdev, mme_offset, 4, 0);
+ gaudi_init_mme_qman(hdev, 0, 4, 0);
+
+ WREG32(mmMME2_QM_GLBL_CFG0, QMAN_MME_ENABLE);
+ WREG32(mmMME0_QM_GLBL_CFG0, QMAN_MME_ENABLE);
+
+ gaudi->hw_cap_initialized |= HW_CAP_MME;
+}
+
+static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
+ int qman_id, u64 qman_base_addr)
+{
+ u32 mtr_base_lo, mtr_base_hi;
+ u32 so_base_lo, so_base_hi;
+ u32 q_off, tpc_id;
+ u32 tpc_qm_err_cfg;
+
+ mtr_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ mtr_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
+ so_base_lo = lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+ so_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ q_off = tpc_offset + qman_id * 4;
+
+ if (qman_id < 4) {
+ WREG32(mmTPC0_QM_PQ_BASE_LO_0 + q_off,
+ lower_32_bits(qman_base_addr));
+ WREG32(mmTPC0_QM_PQ_BASE_HI_0 + q_off,
+ upper_32_bits(qman_base_addr));
+
+ WREG32(mmTPC0_QM_PQ_SIZE_0 + q_off, ilog2(TPC_QMAN_LENGTH));
+ WREG32(mmTPC0_QM_PQ_PI_0 + q_off, 0);
+ WREG32(mmTPC0_QM_PQ_CI_0 + q_off, 0);
+
+ WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x81BC);
+ WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x81B4);
+ WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+ } else {
+ WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74);
+ WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14);
+ WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C);
+
+ /* Configure RAZWI IRQ */
+ tpc_id = tpc_offset /
+ (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0);
+
+ tpc_qm_err_cfg = TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
+ if (hdev->stop_on_err) {
+ tpc_qm_err_cfg |=
+ TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
+ }
+
+ WREG32(mmTPC0_QM_GLBL_ERR_CFG + tpc_offset, tpc_qm_err_cfg);
+ WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + tpc_offset,
+ lower_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + tpc_offset,
+ upper_32_bits(CFG_BASE +
+ mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR));
+ WREG32(mmTPC0_QM_GLBL_ERR_WDATA + tpc_offset,
+ gaudi_irq_map_table[GAUDI_EVENT_TPC0_QM].cpu_id +
+ tpc_id);
+
+ WREG32(mmTPC0_QM_ARB_ERR_MSG_EN + tpc_offset,
+ QM_ARB_ERR_MSG_EN_MASK);
+
+ /* Increase ARB WDT to support streams architecture */
+ WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset,
+ GAUDI_ARB_WDT_TIMEOUT);
+
+ WREG32(mmTPC0_QM_GLBL_CFG1 + tpc_offset, 0);
+ WREG32(mmTPC0_QM_GLBL_PROT + tpc_offset,
+ QMAN_INTERNAL_MAKE_TRUSTED);
+ }
+
+ WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
+ WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
+ WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
+ WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
+}
+
+static void gaudi_init_tpc_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+ u64 qman_base_addr;
+ u32 so_base_hi, tpc_offset = 0;
+ u32 tpc_delta = mmTPC1_CFG_SM_BASE_ADDRESS_HIGH -
+ mmTPC0_CFG_SM_BASE_ADDRESS_HIGH;
+ int i, tpc_id, internal_q_index;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)
+ return;
+
+ so_base_hi = upper_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
+
+ for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
+ for (i = 0 ; i < QMAN_STREAMS ; i++) {
+ internal_q_index = GAUDI_QUEUE_ID_TPC_0_0 +
+ tpc_id * QMAN_STREAMS + i;
+ q = &gaudi->internal_qmans[internal_q_index];
+ qman_base_addr = (u64) q->pq_dma_addr;
+ gaudi_init_tpc_qman(hdev, tpc_offset, i,
+ qman_base_addr);
+
+ if (i == 3) {
+ /* Initializing lower CP for TPC QMAN */
+ gaudi_init_tpc_qman(hdev, tpc_offset, 4, 0);
+
+ /* Enable the QMAN and TPC channel */
+ WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset,
+ QMAN_TPC_ENABLE);
+ }
+ }
+
+ WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + tpc_id * tpc_delta,
+ so_base_hi);
+
+ tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0;
+
+ gaudi->hw_cap_initialized |= 1 << (HW_CAP_TPC_SHIFT + tpc_id);
+ }
+}
+
+static void gaudi_disable_pci_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
+ return;
+
+ WREG32(mmDMA0_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA1_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA5_QM_GLBL_CFG0, 0);
+}
+
+static void gaudi_disable_hbm_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
+ return;
+
+ WREG32(mmDMA2_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA3_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA4_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA6_QM_GLBL_CFG0, 0);
+ WREG32(mmDMA7_QM_GLBL_CFG0, 0);
+}
+
+static void gaudi_disable_mme_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
+ return;
+
+ WREG32(mmMME2_QM_GLBL_CFG0, 0);
+ WREG32(mmMME0_QM_GLBL_CFG0, 0);
+}
+
+static void gaudi_disable_tpc_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 tpc_offset = 0;
+ int tpc_id;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
+ return;
+
+ for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
+ WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset, 0);
+ tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0;
+ }
+}
+
+static void gaudi_stop_pci_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
+ return;
+
+ /* Stop upper CPs of QMANs 0.0 to 1.3 and 5.0 to 5.3 */
+ WREG32(mmDMA0_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA1_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA5_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+}
+
+static void gaudi_stop_hbm_dma_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
+ return;
+
+ /* Stop CPs of HBM DMA QMANs */
+
+ WREG32(mmDMA2_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA3_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA4_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA6_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmDMA7_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+}
+
+static void gaudi_stop_mme_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
+ return;
+
+ /* Stop CPs of MME QMANs */
+ WREG32(mmMME2_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmMME0_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+}
+
+static void gaudi_stop_tpc_qmans(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
+ return;
+
+ WREG32(mmTPC0_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC1_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC2_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC3_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC4_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC5_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC6_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+ WREG32(mmTPC7_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
+}
+
+static void gaudi_pci_dma_stall(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
+ return;
+
+ WREG32(mmDMA0_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA1_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA5_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+}
+
+static void gaudi_hbm_dma_stall(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
+ return;
+
+ WREG32(mmDMA2_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA3_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA4_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA6_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+ WREG32(mmDMA7_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
+}
+
+static void gaudi_mme_stall(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
+ return;
+
+ /* WA for H3-1800 bug: do ACC and SBAB writes twice */
+ WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
+ WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+ WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
+}
+
+static void gaudi_tpc_stall(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
+ return;
+
+ WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+ WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
+}
+
+static void gaudi_enable_clock_gating(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 qman_offset;
+ int i;
+
+ if (!hdev->clock_gating)
+ return;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE)
+ return;
+
+ /* In case we are during debug session, don't enable the clock gate
+ * as it may interfere
+ */
+ if (hdev->in_debug)
+ return;
+
+ for (i = 0, qman_offset = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
+ qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
+ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
+ QMAN_UPPER_CP_CGM_PWR_GATE_EN);
+ }
+
+ for (; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
+ qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
+ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
+ QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ }
+
+ WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmMME0_QM_CGM_CFG,
+ QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+ WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmMME2_QM_CGM_CFG,
+ QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+
+ for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
+ WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset,
+ QMAN_CGM1_PWR_GATE_EN);
+ WREG32(mmTPC0_QM_CGM_CFG + qman_offset,
+ QMAN_COMMON_CP_CGM_PWR_GATE_EN);
+
+ qman_offset += TPC_QMAN_OFFSET;
+ }
+
+ gaudi->hw_cap_initialized |= HW_CAP_CLK_GATE;
+}
+
+static void gaudi_disable_clock_gating(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 qman_offset;
+ int i;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE))
+ return;
+
+ for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
+ WREG32(mmDMA0_QM_CGM_CFG + qman_offset, 0);
+ WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, 0);
+
+ qman_offset += (mmDMA1_QM_CGM_CFG - mmDMA0_QM_CGM_CFG);
+ }
+
+ WREG32(mmMME0_QM_CGM_CFG, 0);
+ WREG32(mmMME0_QM_CGM_CFG1, 0);
+ WREG32(mmMME2_QM_CGM_CFG, 0);
+ WREG32(mmMME2_QM_CGM_CFG1, 0);
+
+ for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
+ WREG32(mmTPC0_QM_CGM_CFG + qman_offset, 0);
+ WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, 0);
+
+ qman_offset += (mmTPC1_QM_CGM_CFG - mmTPC0_QM_CGM_CFG);
+ }
+
+ gaudi->hw_cap_initialized &= ~(HW_CAP_CLK_GATE);
+}
+
+static void gaudi_enable_timestamp(struct hl_device *hdev)
+{
+ /* Disable the timestamp counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
+
+ /* Zero the lower/upper parts of the 64-bit counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
+
+ /* Enable the counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
+}
+
+static void gaudi_disable_timestamp(struct hl_device *hdev)
+{
+ /* Disable the timestamp counter */
+ WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
+}
+
+static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
+{
+ u32 wait_timeout_ms, cpu_timeout_ms;
+
+ dev_info(hdev->dev,
+ "Halting compute engines and disabling interrupts\n");
+
+ if (hdev->pldm) {
+ wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
+ cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
+ } else {
+ wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
+ cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
+ }
+
+ if (hard_reset) {
+ /*
+ * I don't know what is the state of the CPU so make sure it is
+ * stopped in any means necessary
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GAUDI_EVENT_HALT_MACHINE);
+ msleep(cpu_timeout_ms);
+ }
+
+ gaudi_stop_mme_qmans(hdev);
+ gaudi_stop_tpc_qmans(hdev);
+ gaudi_stop_hbm_dma_qmans(hdev);
+ gaudi_stop_pci_dma_qmans(hdev);
+
+ gaudi_disable_clock_gating(hdev);
+
+ msleep(wait_timeout_ms);
+
+ gaudi_pci_dma_stall(hdev);
+ gaudi_hbm_dma_stall(hdev);
+ gaudi_tpc_stall(hdev);
+ gaudi_mme_stall(hdev);
+
+ msleep(wait_timeout_ms);
+
+ gaudi_disable_mme_qmans(hdev);
+ gaudi_disable_tpc_qmans(hdev);
+ gaudi_disable_hbm_dma_qmans(hdev);
+ gaudi_disable_pci_dma_qmans(hdev);
+
+ gaudi_disable_timestamp(hdev);
+
+ if (hard_reset)
+ gaudi_disable_msi(hdev);
+ else
+ gaudi_sync_irqs(hdev);
+}
+
+static int gaudi_mmu_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 hop0_addr;
+ int rc, i;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_MMU)
+ return 0;
+
+ hdev->dram_supports_virtual_memory = false;
+
+ for (i = 0 ; i < prop->max_asid ; i++) {
+ hop0_addr = prop->mmu_pgt_addr +
+ (i * prop->mmu_hop_table_size);
+
+ rc = gaudi_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
+ if (rc) {
+ dev_err(hdev->dev,
+ "failed to set hop0 addr for asid %d\n", i);
+ goto err;
+ }
+ }
+
+ /* init MMU cache manage page */
+ WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8);
+ WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
+
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
+ VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
+
+ WREG32(mmMMU_UP_MMU_ENABLE, 1);
+ WREG32(mmMMU_UP_SPI_MASK, 0xF);
+
+ WREG32(mmSTLB_HOP_CONFIGURATION,
+ hdev->mmu_huge_page_opt ? 0x30440 : 0x40440);
+
+ gaudi->hw_cap_initialized |= HW_CAP_MMU;
+
+ return 0;
+
+err:
+ return rc;
+}
+
+static int gaudi_load_firmware_to_device(struct hl_device *hdev)
+{
+ void __iomem *dst;
+
+ /* HBM scrambler must be initialized before pushing F/W to HBM */
+ gaudi_init_scrambler_hbm(hdev);
+
+ dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET;
+
+ return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst);
+}
+
+static int gaudi_load_boot_fit_to_device(struct hl_device *hdev)
+{
+ void __iomem *dst;
+
+ dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
+
+ return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst);
+}
+
+static void gaudi_read_device_fw_version(struct hl_device *hdev,
+ enum hl_fw_component fwc)
+{
+ const char *name;
+ u32 ver_off;
+ char *dest;
+
+ switch (fwc) {
+ case FW_COMP_UBOOT:
+ ver_off = RREG32(mmUBOOT_VER_OFFSET);
+ dest = hdev->asic_prop.uboot_ver;
+ name = "U-Boot";
+ break;
+ case FW_COMP_PREBOOT:
+ ver_off = RREG32(mmPREBOOT_VER_OFFSET);
+ dest = hdev->asic_prop.preboot_ver;
+ name = "Preboot";
+ break;
+ default:
+ dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
+ return;
+ }
+
+ ver_off &= ~((u32)SRAM_BASE_ADDR);
+
+ if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
+ memcpy_fromio(dest, hdev->pcie_bar[SRAM_BAR_ID] + ver_off,
+ VERSION_MAX_LEN);
+ } else {
+ dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
+ name, ver_off);
+ strcpy(dest, "unavailable");
+ }
+}
+
+static int gaudi_init_cpu(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int rc;
+
+ if (!hdev->cpu_enable)
+ return 0;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_CPU)
+ return 0;
+
+ /*
+ * The device CPU works with 40 bits addresses.
+ * This register sets the extension to 50 bits.
+ */
+ WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr);
+
+ rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
+ mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU,
+ mmCPU_CMD_STATUS_TO_HOST,
+ mmCPU_BOOT_ERR0,
+ !hdev->bmc_enable, GAUDI_CPU_TIMEOUT_USEC,
+ GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
+
+ if (rc)
+ return rc;
+
+ gaudi->hw_cap_initialized |= HW_CAP_CPU;
+
+ return 0;
+}
+
+static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct hl_eq *eq;
+ u32 status;
+ struct hl_hw_queue *cpu_pq =
+ &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
+ int err;
+
+ if (!hdev->cpu_queues_enable)
+ return 0;
+
+ if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q)
+ return 0;
+
+ eq = &hdev->event_queue;
+
+ WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
+ WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
+
+ WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
+ WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
+
+ WREG32(mmCPU_IF_CQ_BASE_ADDR_LOW,
+ lower_32_bits(hdev->cpu_accessible_dma_address));
+ WREG32(mmCPU_IF_CQ_BASE_ADDR_HIGH,
+ upper_32_bits(hdev->cpu_accessible_dma_address));
+
+ WREG32(mmCPU_IF_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
+ WREG32(mmCPU_IF_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
+ WREG32(mmCPU_IF_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
+
+ /* Used for EQ CI */
+ WREG32(mmCPU_IF_EQ_RD_OFFS, 0);
+
+ WREG32(mmCPU_IF_PF_PQ_PI, 0);
+
+ if (gaudi->multi_msi_mode)
+ WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP);
+ else
+ WREG32(mmCPU_IF_QUEUE_INIT,
+ PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI);
+
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_PI_UPDATE);
+
+ err = hl_poll_timeout(
+ hdev,
+ mmCPU_IF_QUEUE_INIT,
+ status,
+ (status == PQ_INIT_STATUS_READY_FOR_HOST),
+ 1000,
+ cpu_timeout);
+
+ if (err) {
+ dev_err(hdev->dev,
+ "Failed to communicate with ARM CPU (ArmCP timeout)\n");
+ return -EIO;
+ }
+
+ gaudi->hw_cap_initialized |= HW_CAP_CPU_Q;
+ return 0;
+}
+
+static void gaudi_pre_hw_init(struct hl_device *hdev)
+{
+ /* Perform read from the device to make sure device is up */
+ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+ /*
+ * Let's mark in the H/W that we have reached this point. We check
+ * this value in the reset_before_init function to understand whether
+ * we need to reset the chip before doing H/W init. This register is
+ * cleared by the H/W upon H/W reset
+ */
+ WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
+
+ /* Set the access through PCI bars (Linux driver only) as secured */
+ WREG32(mmPCIE_WRAP_LBW_PROT_OVR, (PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK |
+ PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK));
+
+ /* Perform read to flush the waiting writes to ensure configuration
+ * was set in the device
+ */
+ RREG32(mmPCIE_WRAP_LBW_PROT_OVR);
+
+ if (hdev->axi_drain) {
+ WREG32(mmPCIE_WRAP_LBW_DRAIN_CFG,
+ 1 << PCIE_WRAP_LBW_DRAIN_CFG_EN_SHIFT);
+ WREG32(mmPCIE_WRAP_HBW_DRAIN_CFG,
+ 1 << PCIE_WRAP_HBW_DRAIN_CFG_EN_SHIFT);
+
+ /* Perform read to flush the DRAIN cfg */
+ RREG32(mmPCIE_WRAP_HBW_DRAIN_CFG);
+ } else {
+ WREG32(mmPCIE_WRAP_LBW_DRAIN_CFG, 0);
+ WREG32(mmPCIE_WRAP_HBW_DRAIN_CFG, 0);
+
+ /* Perform read to flush the DRAIN cfg */
+ RREG32(mmPCIE_WRAP_HBW_DRAIN_CFG);
+ }
+
+ /* Configure the reset registers. Must be done as early as possible
+ * in case we fail during H/W initialization
+ */
+ WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H,
+ (CFG_RST_H_DMA_MASK |
+ CFG_RST_H_MME_MASK |
+ CFG_RST_H_SM_MASK |
+ CFG_RST_H_TPC_MASK));
+
+ WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H,
+ (CFG_RST_H_HBM_MASK |
+ CFG_RST_H_TPC_MASK |
+ CFG_RST_H_NIC_MASK |
+ CFG_RST_H_SM_MASK |
+ CFG_RST_H_DMA_MASK |
+ CFG_RST_H_MME_MASK |
+ CFG_RST_H_CPU_MASK |
+ CFG_RST_H_MMU_MASK));
+
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L,
+ (CFG_RST_L_IF_MASK |
+ CFG_RST_L_PSOC_MASK |
+ CFG_RST_L_TPC_MASK));
+}
+
+static int gaudi_hw_init(struct hl_device *hdev)
+{
+ int rc;
+
+ dev_info(hdev->dev, "Starting initialization of H/W\n");
+
+ gaudi_pre_hw_init(hdev);
+
+ gaudi_init_pci_dma_qmans(hdev);
+
+ gaudi_init_hbm_dma_qmans(hdev);
+
+ /*
+ * Before pushing u-boot/linux to device, need to set the hbm bar to
+ * base address of dram
+ */
+ if (gaudi_set_hbm_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
+ dev_err(hdev->dev,
+ "failed to map HBM bar to DRAM base address\n");
+ return -EIO;
+ }
+
+ rc = gaudi_init_cpu(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CPU\n");
+ return rc;
+ }
+
+ /* SRAM scrambler must be initialized after CPU is running from HBM */
+ gaudi_init_scrambler_sram(hdev);
+
+ /* This is here just in case we are working without CPU */
+ gaudi_init_scrambler_hbm(hdev);
+
+ gaudi_init_golden_registers(hdev);
+
+ rc = gaudi_mmu_init(hdev);
+ if (rc)
+ return rc;
+
+ gaudi_init_security(hdev);
+
+ gaudi_init_mme_qmans(hdev);
+
+ gaudi_init_tpc_qmans(hdev);
+
+ gaudi_enable_clock_gating(hdev);
+
+ gaudi_enable_timestamp(hdev);
+
+ /* MSI must be enabled before CPU queues are initialized */
+ rc = gaudi_enable_msi(hdev);
+ if (rc)
+ goto disable_queues;
+
+ /* must be called after MSI was enabled */
+ rc = gaudi_init_cpu_queues(hdev, GAUDI_CPU_TIMEOUT_USEC);
+ if (rc) {
+ dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
+ rc);
+ goto disable_msi;
+ }
+
+ /* Perform read from the device to flush all configuration */
+ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+ return 0;
+
+disable_msi:
+ gaudi_disable_msi(hdev);
+disable_queues:
+ gaudi_disable_mme_qmans(hdev);
+ gaudi_disable_pci_dma_qmans(hdev);
+
+ return rc;
+}
+
+static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 status, reset_timeout_ms, boot_strap = 0;
+
+ if (hdev->pldm) {
+ if (hard_reset)
+ reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC;
+ else
+ reset_timeout_ms = GAUDI_PLDM_SRESET_TIMEOUT_MSEC;
+ } else {
+ reset_timeout_ms = GAUDI_RESET_TIMEOUT_MSEC;
+ }
+
+ if (hard_reset) {
+ /* Tell ASIC not to re-initialize PCIe */
+ WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
+
+ boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
+ /* H/W bug WA:
+ * rdata[31:0] = strap_read_val;
+ * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0]
+ */
+ boot_strap = (((boot_strap & 0x7FE00000) << 1) |
+ (boot_strap & 0x001FFFFF));
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2);
+
+ /* Restart BTL/BLR upon hard-reset */
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
+ 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ } else {
+ /* Don't restart BTL/BLR upon soft-reset */
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 0);
+
+ WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST,
+ 1 << PSOC_GLOBAL_CONF_SOFT_RST_IND_SHIFT);
+ dev_info(hdev->dev,
+ "Issued SOFT reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ }
+
+ /*
+ * After hard reset, we can't poll the BTM_FSM register because the PSOC
+ * itself is in reset. Need to wait until the reset is deasserted
+ */
+ msleep(reset_timeout_ms);
+
+ status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
+ if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
+ dev_err(hdev->dev,
+ "Timeout while waiting for device to reset 0x%x\n",
+ status);
+
+ if (!hard_reset) {
+ gaudi->hw_cap_initialized &= ~(HW_CAP_PCI_DMA | HW_CAP_MME |
+ HW_CAP_TPC_MASK |
+ HW_CAP_HBM_DMA);
+
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GAUDI_EVENT_SOFT_RESET);
+ return;
+ }
+
+ /* We continue here only for hard-reset */
+
+ WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap);
+
+ gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
+ HW_CAP_HBM | HW_CAP_PCI_DMA |
+ HW_CAP_MME | HW_CAP_TPC_MASK |
+ HW_CAP_HBM_DMA | HW_CAP_PLL |
+ HW_CAP_MMU |
+ HW_CAP_SRAM_SCRAMBLER |
+ HW_CAP_HBM_SCRAMBLER);
+ memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
+}
+
+static int gaudi_suspend(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
+ if (rc)
+ dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
+
+ return rc;
+}
+
+static int gaudi_resume(struct hl_device *hdev)
+{
+ return gaudi_init_iatu(hdev);
+}
+
+static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
+ u64 kaddress, phys_addr_t paddress, u32 size)
+{
+ int rc;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE;
+
+ rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+ return rc;
+}
+
+static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 db_reg_offset, db_value, dma_qm_offset, q_off;
+ int dma_id;
+ bool invalid_queue = false;
+
+ switch (hw_queue_id) {
+ case GAUDI_QUEUE_ID_DMA_0_0...GAUDI_QUEUE_ID_DMA_0_3:
+ dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_1];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_1_0...GAUDI_QUEUE_ID_DMA_1_3:
+ dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_2_0...GAUDI_QUEUE_ID_DMA_2_3:
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_3_0...GAUDI_QUEUE_ID_DMA_3_3:
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_2];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_4_0...GAUDI_QUEUE_ID_DMA_4_3:
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_3];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_5_0...GAUDI_QUEUE_ID_DMA_5_3:
+ dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_3];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_6_0...GAUDI_QUEUE_ID_DMA_6_3:
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_DMA_7_0...GAUDI_QUEUE_ID_DMA_7_3:
+ dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5];
+ dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
+ q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
+ db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
+ break;
+
+ case GAUDI_QUEUE_ID_CPU_PQ:
+ if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q)
+ db_reg_offset = mmCPU_IF_PF_PQ_PI;
+ else
+ invalid_queue = true;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_0_0:
+ db_reg_offset = mmMME2_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_0_1:
+ db_reg_offset = mmMME2_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_0_2:
+ db_reg_offset = mmMME2_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_0_3:
+ db_reg_offset = mmMME2_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_1_0:
+ db_reg_offset = mmMME0_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_1_1:
+ db_reg_offset = mmMME0_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_1_2:
+ db_reg_offset = mmMME0_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_MME_1_3:
+ db_reg_offset = mmMME0_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_0_0:
+ db_reg_offset = mmTPC0_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_0_1:
+ db_reg_offset = mmTPC0_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_0_2:
+ db_reg_offset = mmTPC0_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_0_3:
+ db_reg_offset = mmTPC0_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_1_0:
+ db_reg_offset = mmTPC1_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_1_1:
+ db_reg_offset = mmTPC1_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_1_2:
+ db_reg_offset = mmTPC1_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_1_3:
+ db_reg_offset = mmTPC1_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_2_0:
+ db_reg_offset = mmTPC2_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_2_1:
+ db_reg_offset = mmTPC2_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_2_2:
+ db_reg_offset = mmTPC2_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_2_3:
+ db_reg_offset = mmTPC2_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_3_0:
+ db_reg_offset = mmTPC3_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_3_1:
+ db_reg_offset = mmTPC3_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_3_2:
+ db_reg_offset = mmTPC3_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_3_3:
+ db_reg_offset = mmTPC3_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_4_0:
+ db_reg_offset = mmTPC4_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_4_1:
+ db_reg_offset = mmTPC4_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_4_2:
+ db_reg_offset = mmTPC4_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_4_3:
+ db_reg_offset = mmTPC4_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_5_0:
+ db_reg_offset = mmTPC5_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_5_1:
+ db_reg_offset = mmTPC5_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_5_2:
+ db_reg_offset = mmTPC5_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_5_3:
+ db_reg_offset = mmTPC5_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_6_0:
+ db_reg_offset = mmTPC6_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_6_1:
+ db_reg_offset = mmTPC6_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_6_2:
+ db_reg_offset = mmTPC6_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_6_3:
+ db_reg_offset = mmTPC6_QM_PQ_PI_3;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_7_0:
+ db_reg_offset = mmTPC7_QM_PQ_PI_0;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_7_1:
+ db_reg_offset = mmTPC7_QM_PQ_PI_1;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_7_2:
+ db_reg_offset = mmTPC7_QM_PQ_PI_2;
+ break;
+
+ case GAUDI_QUEUE_ID_TPC_7_3:
+ db_reg_offset = mmTPC7_QM_PQ_PI_3;
+ break;
+
+ default:
+ invalid_queue = true;
+ }
+
+ if (invalid_queue) {
+ /* Should never get here */
+ dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
+ hw_queue_id);
+ return;
+ }
+
+ db_value = pi;
+
+ /* ring the doorbell */
+ WREG32(db_reg_offset, db_value);
+
+ if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ)
+ WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
+ GAUDI_EVENT_PI_UPDATE);
+}
+
+static void gaudi_pqe_write(struct hl_device *hdev, __le64 *pqe,
+ struct hl_bd *bd)
+{
+ __le64 *pbd = (__le64 *) bd;
+
+ /* The QMANs are on the host memory so a simple copy suffice */
+ pqe[0] = pbd[0];
+ pqe[1] = pbd[1];
+}
+
+static void *gaudi_dma_alloc_coherent(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags)
+{
+ void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
+ dma_handle, flags);
+
+ /* Shift to the device's base physical address of host memory */
+ if (kernel_addr)
+ *dma_handle += HOST_PHYS_BASE;
+
+ return kernel_addr;
+}
+
+static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ /* Cancel the device's base physical address of host memory */
+ dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
+
+ dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
+}
+
+static void *gaudi_get_int_queue_base(struct hl_device *hdev,
+ u32 queue_id, dma_addr_t *dma_handle,
+ u16 *queue_len)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct gaudi_internal_qman_info *q;
+
+ if (queue_id >= GAUDI_QUEUE_ID_SIZE ||
+ gaudi_queue_type[queue_id] != QUEUE_TYPE_INT) {
+ dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
+ return NULL;
+ }
+
+ q = &gaudi->internal_qmans[queue_id];
+ *dma_handle = q->pq_dma_addr;
+ *queue_len = q->pq_size / QMAN_PQ_ENTRY_SIZE;
+
+ return q->pq_kernel_addr;
+}
+
+static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg,
+ u16 len, u32 timeout, long *result)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) {
+ if (result)
+ *result = 0;
+ return 0;
+ }
+
+ return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len,
+ timeout, result);
+}
+
+static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id)
+{
+ struct packet_msg_prot *fence_pkt;
+ dma_addr_t pkt_dma_addr;
+ u32 fence_val, tmp, timeout_usec;
+ dma_addr_t fence_dma_addr;
+ u32 *fence_ptr;
+ int rc;
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI_PLDM_TEST_QUEUE_WAIT_USEC;
+ else
+ timeout_usec = GAUDI_TEST_QUEUE_WAIT_USEC;
+
+ fence_val = GAUDI_QMAN0_FENCE_VAL;
+
+ fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
+ &fence_dma_addr);
+ if (!fence_ptr) {
+ dev_err(hdev->dev,
+ "Failed to allocate memory for queue testing\n");
+ return -ENOMEM;
+ }
+
+ *fence_ptr = 0;
+
+ fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
+ sizeof(struct packet_msg_prot),
+ GFP_KERNEL, &pkt_dma_addr);
+ if (!fence_pkt) {
+ dev_err(hdev->dev,
+ "Failed to allocate packet for queue testing\n");
+ rc = -ENOMEM;
+ goto free_fence_ptr;
+ }
+
+ tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_CTL_EB_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT);
+ fence_pkt->ctl = cpu_to_le32(tmp);
+ fence_pkt->value = cpu_to_le32(fence_val);
+ fence_pkt->addr = cpu_to_le64(fence_dma_addr);
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
+ sizeof(struct packet_msg_prot),
+ pkt_dma_addr);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to send fence packet\n");
+ goto free_pkt;
+ }
+
+ rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
+ 1000, timeout_usec, true);
+
+ hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
+
+ if (rc == -ETIMEDOUT) {
+ dev_err(hdev->dev,
+ "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
+ hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
+ rc = -EIO;
+ }
+
+free_pkt:
+ hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
+ pkt_dma_addr);
+free_fence_ptr:
+ hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
+ fence_dma_addr);
+ return rc;
+}
+
+static int gaudi_test_cpu_queue(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ /*
+ * check capability here as send_cpu_message() won't update the result
+ * value if no capability
+ */
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ return hl_fw_test_cpu_queue(hdev);
+}
+
+static int gaudi_test_queues(struct hl_device *hdev)
+{
+ int i, rc, ret_val = 0;
+
+ for (i = 0 ; i < HL_MAX_QUEUES ; i++) {
+ if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) {
+ rc = gaudi_test_queue(hdev, i);
+ if (rc)
+ ret_val = -EINVAL;
+ }
+ }
+
+ rc = gaudi_test_cpu_queue(hdev);
+ if (rc)
+ ret_val = -EINVAL;
+
+ return ret_val;
+}
+
+static void *gaudi_dma_pool_zalloc(struct hl_device *hdev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma_handle)
+{
+ void *kernel_addr;
+
+ if (size > GAUDI_DMA_POOL_BLK_SIZE)
+ return NULL;
+
+ kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
+
+ /* Shift to the device's base physical address of host memory */
+ if (kernel_addr)
+ *dma_handle += HOST_PHYS_BASE;
+
+ return kernel_addr;
+}
+
+static void gaudi_dma_pool_free(struct hl_device *hdev, void *vaddr,
+ dma_addr_t dma_addr)
+{
+ /* Cancel the device's base physical address of host memory */
+ dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
+
+ dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
+}
+
+static void *gaudi_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
+ size_t size, dma_addr_t *dma_handle)
+{
+ return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
+}
+
+static void gaudi_cpu_accessible_dma_pool_free(struct hl_device *hdev,
+ size_t size, void *vaddr)
+{
+ hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
+}
+
+static int gaudi_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
+ return -ENOMEM;
+
+ /* Shift to the device's base physical address of host memory */
+ for_each_sg(sgl, sg, nents, i)
+ sg->dma_address += HOST_PHYS_BASE;
+
+ return 0;
+}
+
+static void gaudi_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ /* Cancel the device's base physical address of host memory */
+ for_each_sg(sgl, sg, nents, i)
+ sg->dma_address -= HOST_PHYS_BASE;
+
+ dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
+}
+
+static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev,
+ struct sg_table *sgt)
+{
+ struct scatterlist *sg, *sg_next_iter;
+ u32 count, dma_desc_cnt;
+ u64 len, len_next;
+ dma_addr_t addr, addr_next;
+
+ dma_desc_cnt = 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+
+ len = sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+
+ if (len == 0)
+ break;
+
+ while ((count + 1) < sgt->nents) {
+ sg_next_iter = sg_next(sg);
+ len_next = sg_dma_len(sg_next_iter);
+ addr_next = sg_dma_address(sg_next_iter);
+
+ if (len_next == 0)
+ break;
+
+ if ((addr + len == addr_next) &&
+ (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
+ len += len_next;
+ count++;
+ sg = sg_next_iter;
+ } else {
+ break;
+ }
+ }
+
+ dma_desc_cnt++;
+ }
+
+ return dma_desc_cnt * sizeof(struct packet_lin_dma);
+}
+
+static int gaudi_pin_memory_before_cs(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt,
+ u64 addr, enum dma_data_direction dir)
+{
+ struct hl_userptr *userptr;
+ int rc;
+
+ if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
+ parser->job_userptr_list, &userptr))
+ goto already_pinned;
+
+ userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
+ if (!userptr)
+ return -ENOMEM;
+
+ rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
+ userptr);
+ if (rc)
+ goto free_userptr;
+
+ list_add_tail(&userptr->job_node, parser->job_userptr_list);
+
+ rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
+ userptr->sgt->nents, dir);
+ if (rc) {
+ dev_err(hdev->dev, "failed to map sgt with DMA region\n");
+ goto unpin_memory;
+ }
+
+ userptr->dma_mapped = true;
+ userptr->dir = dir;
+
+already_pinned:
+ parser->patched_cb_size +=
+ gaudi_get_dma_desc_list_size(hdev, userptr->sgt);
+
+ return 0;
+
+unpin_memory:
+ hl_unpin_host_memory(hdev, userptr);
+free_userptr:
+ kfree(userptr);
+ return rc;
+}
+
+static int gaudi_validate_dma_pkt_host(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt,
+ bool src_in_host)
+{
+ enum dma_data_direction dir;
+ bool skip_host_mem_pin = false, user_memset;
+ u64 addr;
+ int rc = 0;
+
+ user_memset = (le32_to_cpu(user_dma_pkt->ctl) &
+ GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
+ GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
+
+ if (src_in_host) {
+ if (user_memset)
+ skip_host_mem_pin = true;
+
+ dev_dbg(hdev->dev, "DMA direction is HOST --> DEVICE\n");
+ dir = DMA_TO_DEVICE;
+ addr = le64_to_cpu(user_dma_pkt->src_addr);
+ } else {
+ dev_dbg(hdev->dev, "DMA direction is DEVICE --> HOST\n");
+ dir = DMA_FROM_DEVICE;
+ addr = (le64_to_cpu(user_dma_pkt->dst_addr) &
+ GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
+ GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
+ }
+
+ if (skip_host_mem_pin)
+ parser->patched_cb_size += sizeof(*user_dma_pkt);
+ else
+ rc = gaudi_pin_memory_before_cs(hdev, parser, user_dma_pkt,
+ addr, dir);
+
+ return rc;
+}
+
+static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt)
+{
+ bool src_in_host = false;
+ u64 dst_addr = (le64_to_cpu(user_dma_pkt->dst_addr) &
+ GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
+ GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
+
+ dev_dbg(hdev->dev, "DMA packet details:\n");
+ dev_dbg(hdev->dev, "source == 0x%llx\n",
+ le64_to_cpu(user_dma_pkt->src_addr));
+ dev_dbg(hdev->dev, "destination == 0x%llx\n", dst_addr);
+ dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
+
+ /*
+ * Special handling for DMA with size 0. Bypass all validations
+ * because no transactions will be done except for WR_COMP, which
+ * is not a security issue
+ */
+ if (!le32_to_cpu(user_dma_pkt->tsize)) {
+ parser->patched_cb_size += sizeof(*user_dma_pkt);
+ return 0;
+ }
+
+ if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3)
+ src_in_host = true;
+
+ return gaudi_validate_dma_pkt_host(hdev, parser, user_dma_pkt,
+ src_in_host);
+}
+
+static int gaudi_validate_cb(struct hl_device *hdev,
+ struct hl_cs_parser *parser, bool is_mmu)
+{
+ u32 cb_parsed_length = 0;
+ int rc = 0;
+
+ parser->patched_cb_size = 0;
+
+ /* cb_user_size is more than 0 so loop will always be executed */
+ while (cb_parsed_length < parser->user_cb_size) {
+ enum packet_id pkt_id;
+ u16 pkt_size;
+ struct gaudi_packet *user_pkt;
+
+ user_pkt = (struct gaudi_packet *) (uintptr_t)
+ (parser->user_cb->kernel_address + cb_parsed_length);
+
+ pkt_id = (enum packet_id) (
+ (le64_to_cpu(user_pkt->header) &
+ PACKET_HEADER_PACKET_ID_MASK) >>
+ PACKET_HEADER_PACKET_ID_SHIFT);
+
+ pkt_size = gaudi_packet_sizes[pkt_id];
+ cb_parsed_length += pkt_size;
+ if (cb_parsed_length > parser->user_cb_size) {
+ dev_err(hdev->dev,
+ "packet 0x%x is out of CB boundary\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ switch (pkt_id) {
+ case PACKET_MSG_PROT:
+ dev_err(hdev->dev,
+ "User not allowed to use MSG_PROT\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_CP_DMA:
+ dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_STOP:
+ dev_err(hdev->dev, "User not allowed to use STOP\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_LIN_DMA:
+ parser->contains_dma_pkt = true;
+ if (is_mmu)
+ parser->patched_cb_size += pkt_size;
+ else
+ rc = gaudi_validate_dma_pkt_no_mmu(hdev, parser,
+ (struct packet_lin_dma *) user_pkt);
+ break;
+
+ case PACKET_WREG_32:
+ case PACKET_WREG_BULK:
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_REPEAT:
+ case PACKET_FENCE:
+ case PACKET_NOP:
+ case PACKET_ARB_POINT:
+ case PACKET_LOAD_AND_EXE:
+ parser->patched_cb_size += pkt_size;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid packet header 0x%x\n",
+ pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ /*
+ * The new CB should have space at the end for two MSG_PROT packets:
+ * 1. A packet that will act as a completion packet
+ * 2. A packet that will generate MSI-X interrupt
+ */
+ parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
+
+ return rc;
+}
+
+static int gaudi_patch_dma_packet(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_lin_dma *user_dma_pkt,
+ struct packet_lin_dma *new_dma_pkt,
+ u32 *new_dma_pkt_size)
+{
+ struct hl_userptr *userptr;
+ struct scatterlist *sg, *sg_next_iter;
+ u32 count, dma_desc_cnt, user_wrcomp_en_mask, ctl;
+ u64 len, len_next;
+ dma_addr_t dma_addr, dma_addr_next;
+ u64 device_memory_addr, addr;
+ enum dma_data_direction dir;
+ struct sg_table *sgt;
+ bool src_in_host = false;
+ bool skip_host_mem_pin = false;
+ bool user_memset;
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+
+ if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3)
+ src_in_host = true;
+
+ user_memset = (ctl & GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
+ GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
+
+ if (src_in_host) {
+ addr = le64_to_cpu(user_dma_pkt->src_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ dir = DMA_TO_DEVICE;
+ if (user_memset)
+ skip_host_mem_pin = true;
+ } else {
+ addr = le64_to_cpu(user_dma_pkt->dst_addr);
+ device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
+ dir = DMA_FROM_DEVICE;
+ }
+
+ if ((!skip_host_mem_pin) &&
+ (!hl_userptr_is_pinned(hdev, addr,
+ le32_to_cpu(user_dma_pkt->tsize),
+ parser->job_userptr_list, &userptr))) {
+ dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
+ addr, user_dma_pkt->tsize);
+ return -EFAULT;
+ }
+
+ if ((user_memset) && (dir == DMA_TO_DEVICE)) {
+ memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
+ *new_dma_pkt_size = sizeof(*user_dma_pkt);
+ return 0;
+ }
+
+ user_wrcomp_en_mask = ctl & GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK;
+
+ sgt = userptr->sgt;
+ dma_desc_cnt = 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ len = sg_dma_len(sg);
+ dma_addr = sg_dma_address(sg);
+
+ if (len == 0)
+ break;
+
+ while ((count + 1) < sgt->nents) {
+ sg_next_iter = sg_next(sg);
+ len_next = sg_dma_len(sg_next_iter);
+ dma_addr_next = sg_dma_address(sg_next_iter);
+
+ if (len_next == 0)
+ break;
+
+ if ((dma_addr + len == dma_addr_next) &&
+ (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
+ len += len_next;
+ count++;
+ sg = sg_next_iter;
+ } else {
+ break;
+ }
+ }
+
+ new_dma_pkt->ctl = user_dma_pkt->ctl;
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+ if (likely(dma_desc_cnt))
+ ctl &= ~GAUDI_PKT_CTL_EB_MASK;
+ ctl &= ~GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK;
+ new_dma_pkt->ctl = cpu_to_le32(ctl);
+ new_dma_pkt->tsize = cpu_to_le32(len);
+
+ if (dir == DMA_TO_DEVICE) {
+ new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
+ new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
+ } else {
+ new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
+ new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
+ }
+
+ if (!user_memset)
+ device_memory_addr += len;
+ dma_desc_cnt++;
+ new_dma_pkt++;
+ }
+
+ if (!dma_desc_cnt) {
+ dev_err(hdev->dev,
+ "Error of 0 SG entries when patching DMA packet\n");
+ return -EFAULT;
+ }
+
+ /* Fix the last dma packet - wrcomp must be as user set it */
+ new_dma_pkt--;
+ new_dma_pkt->ctl |= cpu_to_le32(user_wrcomp_en_mask);
+
+ *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
+
+ return 0;
+}
+
+static int gaudi_patch_cb(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u32 cb_parsed_length = 0;
+ u32 cb_patched_cur_length = 0;
+ int rc = 0;
+
+ /* cb_user_size is more than 0 so loop will always be executed */
+ while (cb_parsed_length < parser->user_cb_size) {
+ enum packet_id pkt_id;
+ u16 pkt_size;
+ u32 new_pkt_size = 0;
+ struct gaudi_packet *user_pkt, *kernel_pkt;
+
+ user_pkt = (struct gaudi_packet *) (uintptr_t)
+ (parser->user_cb->kernel_address + cb_parsed_length);
+ kernel_pkt = (struct gaudi_packet *) (uintptr_t)
+ (parser->patched_cb->kernel_address +
+ cb_patched_cur_length);
+
+ pkt_id = (enum packet_id) (
+ (le64_to_cpu(user_pkt->header) &
+ PACKET_HEADER_PACKET_ID_MASK) >>
+ PACKET_HEADER_PACKET_ID_SHIFT);
+
+ pkt_size = gaudi_packet_sizes[pkt_id];
+ cb_parsed_length += pkt_size;
+ if (cb_parsed_length > parser->user_cb_size) {
+ dev_err(hdev->dev,
+ "packet 0x%x is out of CB boundary\n", pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ switch (pkt_id) {
+ case PACKET_LIN_DMA:
+ rc = gaudi_patch_dma_packet(hdev, parser,
+ (struct packet_lin_dma *) user_pkt,
+ (struct packet_lin_dma *) kernel_pkt,
+ &new_pkt_size);
+ cb_patched_cur_length += new_pkt_size;
+ break;
+
+ case PACKET_MSG_PROT:
+ dev_err(hdev->dev,
+ "User not allowed to use MSG_PROT\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_CP_DMA:
+ dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_STOP:
+ dev_err(hdev->dev, "User not allowed to use STOP\n");
+ rc = -EPERM;
+ break;
+
+ case PACKET_WREG_32:
+ case PACKET_WREG_BULK:
+ case PACKET_MSG_LONG:
+ case PACKET_MSG_SHORT:
+ case PACKET_REPEAT:
+ case PACKET_FENCE:
+ case PACKET_NOP:
+ case PACKET_ARB_POINT:
+ case PACKET_LOAD_AND_EXE:
+ memcpy(kernel_pkt, user_pkt, pkt_size);
+ cb_patched_cur_length += pkt_size;
+ break;
+
+ default:
+ dev_err(hdev->dev, "Invalid packet header 0x%x\n",
+ pkt_id);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int gaudi_parse_cb_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u64 patched_cb_handle;
+ u32 patched_cb_size;
+ struct hl_cb *user_cb;
+ int rc;
+
+ /*
+ * The new CB should have space at the end for two MSG_PROT pkt:
+ * 1. A packet that will act as a completion packet
+ * 2. A packet that will generate MSI interrupt
+ */
+ parser->patched_cb_size = parser->user_cb_size +
+ sizeof(struct packet_msg_prot) * 2;
+
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
+ parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to allocate patched CB for DMA CS %d\n",
+ rc);
+ return rc;
+ }
+
+ patched_cb_handle >>= PAGE_SHIFT;
+ parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
+ (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here so use kernel WARN */
+ WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
+ if (!parser->patched_cb) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * The check that parser->user_cb_size <= parser->user_cb->size was done
+ * in validate_queue_index().
+ */
+ memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
+ (void *) (uintptr_t) parser->user_cb->kernel_address,
+ parser->user_cb_size);
+
+ patched_cb_size = parser->patched_cb_size;
+
+ /* Validate patched CB instead of user CB */
+ user_cb = parser->user_cb;
+ parser->user_cb = parser->patched_cb;
+ rc = gaudi_validate_cb(hdev, parser, true);
+ parser->user_cb = user_cb;
+
+ if (rc) {
+ hl_cb_put(parser->patched_cb);
+ goto out;
+ }
+
+ if (patched_cb_size != parser->patched_cb_size) {
+ dev_err(hdev->dev, "user CB size mismatch\n");
+ hl_cb_put(parser->patched_cb);
+ rc = -EINVAL;
+ goto out;
+ }
+
+out:
+ /*
+ * Always call cb destroy here because we still have 1 reference
+ * to it by calling cb_get earlier. After the job will be completed,
+ * cb_put will release it, but here we want to remove it from the
+ * idr
+ */
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
+ patched_cb_handle << PAGE_SHIFT);
+
+ return rc;
+}
+
+static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ u64 patched_cb_handle;
+ int rc;
+
+ rc = gaudi_validate_cb(hdev, parser, false);
+
+ if (rc)
+ goto free_userptr;
+
+ rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
+ parser->patched_cb_size,
+ &patched_cb_handle, HL_KERNEL_ASID_ID);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to allocate patched CB for DMA CS %d\n", rc);
+ goto free_userptr;
+ }
+
+ patched_cb_handle >>= PAGE_SHIFT;
+ parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
+ (u32) patched_cb_handle);
+ /* hl_cb_get should never fail here so use kernel WARN */
+ WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
+ (u32) patched_cb_handle);
+ if (!parser->patched_cb) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = gaudi_patch_cb(hdev, parser);
+
+ if (rc)
+ hl_cb_put(parser->patched_cb);
+
+out:
+ /*
+ * Always call cb destroy here because we still have 1 reference
+ * to it by calling cb_get earlier. After the job will be completed,
+ * cb_put will release it, but here we want to remove it from the
+ * idr
+ */
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
+ patched_cb_handle << PAGE_SHIFT);
+
+free_userptr:
+ if (rc)
+ hl_userptr_delete_list(hdev, parser->job_userptr_list);
+ return rc;
+}
+
+static int gaudi_parse_cb_no_ext_queue(struct hl_device *hdev,
+ struct hl_cs_parser *parser)
+{
+ struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
+
+ /* For internal queue jobs just check if CB address is valid */
+ if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->sram_user_base_address,
+ asic_prop->sram_end_address))
+ return 0;
+
+ if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->dram_user_base_address,
+ asic_prop->dram_end_address))
+ return 0;
+
+ /* PMMU and HPMMU addresses are equal, check only one of them */
+ if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
+ parser->user_cb_size,
+ asic_prop->pmmu.start_addr,
+ asic_prop->pmmu.end_addr))
+ return 0;
+
+ dev_err(hdev->dev,
+ "CB address 0x%px + 0x%x for internal QMAN is not valid\n",
+ parser->user_cb, parser->user_cb_size);
+
+ return -EFAULT;
+}
+
+static int gaudi_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (parser->queue_type == QUEUE_TYPE_INT)
+ return gaudi_parse_cb_no_ext_queue(hdev, parser);
+
+ if (gaudi->hw_cap_initialized & HW_CAP_MMU)
+ return gaudi_parse_cb_mmu(hdev, parser);
+ else
+ return gaudi_parse_cb_no_mmu(hdev, parser);
+}
+
+static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
+ u64 kernel_address, u32 len,
+ u64 cq_addr, u32 cq_val, u32 msi_vec,
+ bool eb)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct packet_msg_prot *cq_pkt;
+ u32 tmp;
+
+ cq_pkt = (struct packet_msg_prot *) (uintptr_t)
+ (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
+
+ tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT);
+
+ if (eb)
+ tmp |= (1 << GAUDI_PKT_CTL_EB_SHIFT);
+
+ cq_pkt->ctl = cpu_to_le32(tmp);
+ cq_pkt->value = cpu_to_le32(cq_val);
+ cq_pkt->addr = cpu_to_le64(cq_addr);
+
+ cq_pkt++;
+
+ tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT);
+ cq_pkt->ctl = cpu_to_le32(tmp);
+ cq_pkt->value = cpu_to_le32(1);
+
+ if (!gaudi->multi_msi_mode)
+ msi_vec = 0;
+
+ cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_MSI_INTR_0 + msi_vec * 4);
+}
+
+static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
+{
+ WREG32(mmCPU_IF_EQ_RD_OFFS, val);
+}
+
+static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
+ u32 size, u64 val)
+{
+ struct packet_lin_dma *lin_dma_pkt;
+ struct hl_cs_job *job;
+ u32 cb_size, ctl;
+ struct hl_cb *cb;
+ int rc;
+
+ cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
+ if (!cb)
+ return -EFAULT;
+
+ lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
+ memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
+ cb_size = sizeof(*lin_dma_pkt);
+
+ ctl = ((PACKET_LIN_DMA << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
+ (1 << GAUDI_PKT_LIN_DMA_CTL_LIN_SHIFT) |
+ (1 << GAUDI_PKT_CTL_RB_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT));
+ lin_dma_pkt->ctl = cpu_to_le32(ctl);
+ lin_dma_pkt->src_addr = cpu_to_le64(val);
+ lin_dma_pkt->dst_addr |= cpu_to_le64(addr);
+ lin_dma_pkt->tsize = cpu_to_le32(size);
+
+ job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
+ if (!job) {
+ dev_err(hdev->dev, "Failed to allocate a new job\n");
+ rc = -ENOMEM;
+ goto release_cb;
+ }
+
+ job->id = 0;
+ job->user_cb = cb;
+ job->user_cb->cs_cnt++;
+ job->user_cb_size = cb_size;
+ job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
+ job->patched_cb = job->user_cb;
+ job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot);
+
+ hl_debugfs_add_job(hdev, job);
+
+ rc = gaudi_send_job_on_qman0(hdev, job);
+
+ hl_debugfs_remove_job(hdev, job);
+ kfree(job);
+ cb->cs_cnt--;
+
+release_cb:
+ hl_cb_put(cb);
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+
+ return rc;
+}
+
+static void gaudi_restore_sm_registers(struct hl_device *hdev)
+{
+ int i;
+
+ for (i = 0 ; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4) {
+ WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+ WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+ WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+ }
+
+ for (i = 0 ; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4) {
+ WREG32(mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+ WREG32(mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+ WREG32(mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+ }
+
+ i = GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4;
+
+ for (; i < NUM_OF_SOB_IN_BLOCK << 2 ; i += 4)
+ WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + i, 0);
+
+ i = GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4;
+
+ for (; i < NUM_OF_MONITORS_IN_BLOCK << 2 ; i += 4)
+ WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 + i, 0);
+}
+
+static void gaudi_restore_dma_registers(struct hl_device *hdev)
+{
+ u32 sob_delta = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 -
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
+ int i;
+
+ for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
+ u64 sob_addr = CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 +
+ (i * sob_delta);
+ u32 dma_offset = i * DMA_CORE_OFFSET;
+
+ WREG32(mmDMA0_CORE_WR_COMP_ADDR_LO + dma_offset,
+ lower_32_bits(sob_addr));
+ WREG32(mmDMA0_CORE_WR_COMP_ADDR_HI + dma_offset,
+ upper_32_bits(sob_addr));
+ WREG32(mmDMA0_CORE_WR_COMP_WDATA + dma_offset, 0x80000001);
+
+ /* For DMAs 2-7, need to restore WR_AWUSER_31_11 as it can be
+ * modified by the user for SRAM reduction
+ */
+ if (i > 1)
+ WREG32(mmDMA0_CORE_WR_AWUSER_31_11 + dma_offset,
+ 0x00000001);
+ }
+}
+
+static void gaudi_restore_qm_registers(struct hl_device *hdev)
+{
+ u32 qman_offset;
+ int i;
+
+ for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
+ qman_offset = i * DMA_QMAN_OFFSET;
+ WREG32(mmDMA0_QM_ARB_CFG_0 + qman_offset, 0);
+ }
+
+ for (i = 0 ; i < MME_NUMBER_OF_MASTER_ENGINES ; i++) {
+ qman_offset = i * (mmMME2_QM_BASE - mmMME0_QM_BASE);
+ WREG32(mmMME0_QM_ARB_CFG_0 + qman_offset, 0);
+ }
+
+ for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
+ qman_offset = i * TPC_QMAN_OFFSET;
+ WREG32(mmTPC0_QM_ARB_CFG_0 + qman_offset, 0);
+ }
+}
+
+static void gaudi_restore_user_registers(struct hl_device *hdev)
+{
+ gaudi_restore_sm_registers(hdev);
+ gaudi_restore_dma_registers(hdev);
+ gaudi_restore_qm_registers(hdev);
+}
+
+static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 addr = prop->sram_user_base_address;
+ u32 size = hdev->pldm ? 0x10000 :
+ (prop->sram_size - SRAM_USER_BASE_OFFSET);
+ u64 val = 0x7777777777777777ull;
+ int rc;
+
+ rc = gaudi_memset_device_memory(hdev, addr, size, val);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
+ return rc;
+ }
+
+ gaudi_mmu_prepare(hdev, asid);
+
+ gaudi_restore_user_registers(hdev);
+
+ return 0;
+}
+
+static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 addr = prop->mmu_pgt_addr;
+ u32 size = prop->mmu_pgt_size + MMU_CACHE_MNG_SIZE;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
+ return 0;
+
+ return gaudi_memset_device_memory(hdev, addr, size, 0);
+}
+
+static void gaudi_restore_phase_topology(struct hl_device *hdev)
+{
+
+}
+
+static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 hbm_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
+ if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't read register - clock gating is enabled!\n");
+ rc = -EFAULT;
+ } else {
+ *val = RREG32(addr - CFG_BASE);
+ }
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
+ *val = readl(hdev->pcie_bar[SRAM_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+ } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
+ if (hbm_bar_addr != U64_MAX) {
+ *val = readl(hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - bar_base_addr));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
+ hbm_bar_addr);
+ }
+ if (hbm_bar_addr == U64_MAX)
+ rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 hbm_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
+ if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't write register - clock gating is enabled!\n");
+ rc = -EFAULT;
+ } else {
+ WREG32(addr - CFG_BASE, val);
+ }
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
+ writel(val, hdev->pcie_bar[SRAM_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+ } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
+ if (hbm_bar_addr != U64_MAX) {
+ writel(val, hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - bar_base_addr));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
+ hbm_bar_addr);
+ }
+ if (hbm_bar_addr == U64_MAX)
+ rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 hbm_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
+ if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't read register - clock gating is enabled!\n");
+ rc = -EFAULT;
+ } else {
+ u32 val_l = RREG32(addr - CFG_BASE);
+ u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
+
+ *val = (((u64) val_h) << 32) | val_l;
+ }
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
+ *val = readq(hdev->pcie_bar[SRAM_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+ } else if (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
+ if (hbm_bar_addr != U64_MAX) {
+ *val = readq(hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - bar_base_addr));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
+ hbm_bar_addr);
+ }
+ if (hbm_bar_addr == U64_MAX)
+ rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 hbm_bar_addr;
+ int rc = 0;
+
+ if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
+ if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't write register - clock gating is enabled!\n");
+ rc = -EFAULT;
+ } else {
+ WREG32(addr - CFG_BASE, lower_32_bits(val));
+ WREG32(addr + sizeof(u32) - CFG_BASE,
+ upper_32_bits(val));
+ }
+ } else if ((addr >= SRAM_BASE_ADDR) &&
+ (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
+ writeq(val, hdev->pcie_bar[SRAM_BAR_ID] +
+ (addr - SRAM_BASE_ADDR));
+ } else if (addr <=
+ DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
+ u64 bar_base_addr = DRAM_PHYS_BASE +
+ (addr & ~(prop->dram_pci_bar_size - 0x1ull));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
+ if (hbm_bar_addr != U64_MAX) {
+ writeq(val, hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - bar_base_addr));
+
+ hbm_bar_addr = gaudi_set_hbm_bar_base(hdev,
+ hbm_bar_addr);
+ }
+ if (hbm_bar_addr == U64_MAX)
+ rc = -EIO;
+ } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
+ *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
+ } else {
+ rc = -EFAULT;
+ }
+
+ return rc;
+}
+
+static u64 gaudi_read_pte(struct hl_device *hdev, u64 addr)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (hdev->hard_reset_pending)
+ return U64_MAX;
+
+ return readq(hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - gaudi->hbm_bar_cur_addr));
+}
+
+static void gaudi_write_pte(struct hl_device *hdev, u64 addr, u64 val)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (hdev->hard_reset_pending)
+ return;
+
+ writeq(val, hdev->pcie_bar[HBM_BAR_ID] +
+ (addr - gaudi->hbm_bar_cur_addr));
+}
+
+static void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
+{
+ /* mask to zero the MMBP and ASID bits */
+ WREG32_AND(reg, ~0x7FF);
+ WREG32_OR(reg, asid);
+}
+
+static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ if (asid & ~DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK) {
+ WARN(1, "asid %u is too big\n", asid);
+ return;
+ }
+
+ mutex_lock(&gaudi->clk_gate_mutex);
+
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmDMA0_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA1_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA2_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA3_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA4_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA5_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA6_CORE_NON_SECURE_PROPS, asid);
+ gaudi_mmu_prepare_reg(hdev, mmDMA7_CORE_NON_SECURE_PROPS, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_ARUSER_LO, asid);
+ gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_AWUSER_LO, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_4, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_2, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_3, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_4, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER0, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER1, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME0_ACC_WBC, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME1_ACC_WBC, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME2_ACC_WBC, asid);
+ gaudi_mmu_prepare_reg(hdev, mmMME3_ACC_WBC, asid);
+
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
+ gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
+
+ hdev->asic_funcs->enable_clock_gating(hdev);
+
+ mutex_unlock(&gaudi->clk_gate_mutex);
+}
+
+static int gaudi_send_job_on_qman0(struct hl_device *hdev,
+ struct hl_cs_job *job)
+{
+ struct packet_msg_prot *fence_pkt;
+ u32 *fence_ptr;
+ dma_addr_t fence_dma_addr;
+ struct hl_cb *cb;
+ u32 tmp, timeout, dma_offset;
+ int rc;
+
+ if (hdev->pldm)
+ timeout = GAUDI_PLDM_QMAN0_TIMEOUT_USEC;
+ else
+ timeout = HL_DEVICE_TIMEOUT_USEC;
+
+ if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
+ dev_err_ratelimited(hdev->dev,
+ "Can't send driver job on QMAN0 because the device is not idle\n");
+ return -EBUSY;
+ }
+
+ fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
+ &fence_dma_addr);
+ if (!fence_ptr) {
+ dev_err(hdev->dev,
+ "Failed to allocate fence memory for QMAN0\n");
+ return -ENOMEM;
+ }
+
+ cb = job->patched_cb;
+
+ fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
+ job->job_cb_size - sizeof(struct packet_msg_prot));
+
+ tmp = (PACKET_MSG_PROT << GAUDI_PKT_CTL_OPCODE_SHIFT) |
+ (1 << GAUDI_PKT_CTL_EB_SHIFT) |
+ (1 << GAUDI_PKT_CTL_MB_SHIFT);
+ fence_pkt->ctl = cpu_to_le32(tmp);
+ fence_pkt->value = cpu_to_le32(GAUDI_QMAN0_FENCE_VAL);
+ fence_pkt->addr = cpu_to_le64(fence_dma_addr);
+
+ dma_offset = gaudi_dma_assignment[GAUDI_PCI_DMA_1] * DMA_CORE_OFFSET;
+
+ WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT));
+
+ rc = hl_hw_queue_send_cb_no_cmpl(hdev, GAUDI_QUEUE_ID_DMA_0_0,
+ job->job_cb_size, cb->bus_address);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
+ goto free_fence_ptr;
+ }
+
+ rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
+ (tmp == GAUDI_QMAN0_FENCE_VAL), 1000,
+ timeout, true);
+
+ hl_hw_queue_inc_ci_kernel(hdev, GAUDI_QUEUE_ID_DMA_0_0);
+
+ if (rc == -ETIMEDOUT) {
+ dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
+ goto free_fence_ptr;
+ }
+
+free_fence_ptr:
+ WREG32_AND(mmDMA0_CORE_PROT + dma_offset,
+ ~BIT(DMA0_CORE_PROT_VAL_SHIFT));
+
+ hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
+ fence_dma_addr);
+ return rc;
+}
+
+static void gaudi_get_event_desc(u16 event_type, char *desc, size_t size)
+{
+ if (event_type >= GAUDI_EVENT_SIZE)
+ goto event_not_supported;
+
+ if (!gaudi_irq_map_table[event_type].valid)
+ goto event_not_supported;
+
+ snprintf(desc, size, gaudi_irq_map_table[event_type].name);
+
+ return;
+
+event_not_supported:
+ snprintf(desc, size, "N/A");
+}
+
+static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev,
+ u32 x_y, bool is_write)
+{
+ u32 dma_id[2], dma_offset, err_cause[2], mask, i;
+
+ mask = is_write ? DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_MASK :
+ DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_MASK;
+
+ switch (x_y) {
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
+ dma_id[0] = 0;
+ dma_id[1] = 2;
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
+ dma_id[0] = 1;
+ dma_id[1] = 3;
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
+ dma_id[0] = 4;
+ dma_id[1] = 6;
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
+ dma_id[0] = 5;
+ dma_id[1] = 7;
+ break;
+ default:
+ goto unknown_initiator;
+ }
+
+ for (i = 0 ; i < 2 ; i++) {
+ dma_offset = dma_id[i] * DMA_CORE_OFFSET;
+ err_cause[i] = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
+ }
+
+ switch (x_y) {
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ return "DMA0";
+ else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ return "DMA2";
+ else
+ return "DMA0 or DMA2";
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ return "DMA1";
+ else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ return "DMA3";
+ else
+ return "DMA1 or DMA3";
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ return "DMA4";
+ else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ return "DMA6";
+ else
+ return "DMA4 or DMA6";
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ return "DMA5";
+ else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ return "DMA7";
+ else
+ return "DMA5 or DMA7";
+ }
+
+unknown_initiator:
+ return "unknown initiator";
+}
+
+static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev,
+ bool is_write)
+{
+ u32 val, x_y, axi_id;
+
+ val = is_write ? RREG32(mmMMU_UP_RAZWI_WRITE_ID) :
+ RREG32(mmMMU_UP_RAZWI_READ_ID);
+ x_y = val & ((RAZWI_INITIATOR_Y_MASK << RAZWI_INITIATOR_Y_SHIFT) |
+ (RAZWI_INITIATOR_X_MASK << RAZWI_INITIATOR_X_SHIFT));
+ axi_id = val & (RAZWI_INITIATOR_AXI_ID_MASK <<
+ RAZWI_INITIATOR_AXI_ID_SHIFT);
+
+ switch (x_y) {
+ case RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0:
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ return "TPC0";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ return "NIC0";
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_TPC1:
+ return "TPC1";
+ case RAZWI_INITIATOR_ID_X_Y_MME0_0:
+ case RAZWI_INITIATOR_ID_X_Y_MME0_1:
+ return "MME0";
+ case RAZWI_INITIATOR_ID_X_Y_MME1_0:
+ case RAZWI_INITIATOR_ID_X_Y_MME1_1:
+ return "MME1";
+ case RAZWI_INITIATOR_ID_X_Y_TPC2:
+ return "TPC2";
+ case RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC:
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ return "TPC3";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PCI))
+ return "PCI";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_CPU))
+ return "CPU";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PSOC))
+ return "PSOC";
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
+ case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
+ return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write);
+ case RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2:
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ return "TPC4";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ return "NIC1";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT))
+ return "NIC2";
+ break;
+ case RAZWI_INITIATOR_ID_X_Y_TPC5:
+ return "TPC5";
+ case RAZWI_INITIATOR_ID_X_Y_MME2_0:
+ case RAZWI_INITIATOR_ID_X_Y_MME2_1:
+ return "MME2";
+ case RAZWI_INITIATOR_ID_X_Y_MME3_0:
+ case RAZWI_INITIATOR_ID_X_Y_MME3_1:
+ return "MME3";
+ case RAZWI_INITIATOR_ID_X_Y_TPC6:
+ return "TPC6";
+ case RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5:
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ return "TPC7";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ return "NIC4";
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT))
+ return "NIC5";
+ break;
+ default:
+ break;
+ }
+
+ dev_err(hdev->dev,
+ "Unknown RAZWI initiator ID 0x%x [Y=%d, X=%d, AXI_ID=%d]\n",
+ val,
+ (val >> RAZWI_INITIATOR_Y_SHIFT) & RAZWI_INITIATOR_Y_MASK,
+ (val >> RAZWI_INITIATOR_X_SHIFT) & RAZWI_INITIATOR_X_MASK,
+ (val >> RAZWI_INITIATOR_AXI_ID_SHIFT) &
+ RAZWI_INITIATOR_AXI_ID_MASK);
+
+ return "unknown initiator";
+}
+
+static void gaudi_print_razwi_info(struct hl_device *hdev)
+{
+ if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) {
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI event caused by illegal write of %s\n",
+ gaudi_get_razwi_initiator_name(hdev, true));
+ WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0);
+ }
+
+ if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) {
+ dev_err_ratelimited(hdev->dev,
+ "RAZWI event caused by illegal read of %s\n",
+ gaudi_get_razwi_initiator_name(hdev, false));
+ WREG32(mmMMU_UP_RAZWI_READ_VLD, 0);
+ }
+}
+
+static void gaudi_print_mmu_error_info(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 addr;
+ u32 val;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
+ return;
+
+ val = RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE);
+ if (val & MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
+ addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
+ addr <<= 32;
+ addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA);
+
+ dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
+ addr);
+
+ WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0);
+ }
+
+ val = RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE);
+ if (val & MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK) {
+ addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK;
+ addr <<= 32;
+ addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA);
+
+ dev_err_ratelimited(hdev->dev,
+ "MMU access error on va 0x%llx\n", addr);
+
+ WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0);
+ }
+}
+
+/*
+ * +-------------------+------------------------------------------------------+
+ * | Configuration Reg | Description |
+ * | Address | |
+ * +-------------------+------------------------------------------------------+
+ * | 0xF30 - 0xF3F |ECC single error indication (1 bit per memory wrapper)|
+ * | |0xF30 memory wrappers 31:0 (MSB to LSB) |
+ * | |0xF34 memory wrappers 63:32 |
+ * | |0xF38 memory wrappers 95:64 |
+ * | |0xF3C memory wrappers 127:96 |
+ * +-------------------+------------------------------------------------------+
+ * | 0xF40 - 0xF4F |ECC double error indication (1 bit per memory wrapper)|
+ * | |0xF40 memory wrappers 31:0 (MSB to LSB) |
+ * | |0xF44 memory wrappers 63:32 |
+ * | |0xF48 memory wrappers 95:64 |
+ * | |0xF4C memory wrappers 127:96 |
+ * +-------------------+------------------------------------------------------+
+ */
+static void gaudi_print_ecc_info_generic(struct hl_device *hdev,
+ const char *block_name,
+ u64 block_address, int num_memories,
+ bool derr, bool disable_clock_gating)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int num_mem_regs = num_memories / 32 + ((num_memories % 32) ? 1 : 0);
+
+ if (block_address >= CFG_BASE)
+ block_address -= CFG_BASE;
+
+ if (derr)
+ block_address += GAUDI_ECC_DERR0_OFFSET;
+ else
+ block_address += GAUDI_ECC_SERR0_OFFSET;
+
+ if (disable_clock_gating) {
+ mutex_lock(&gaudi->clk_gate_mutex);
+ hdev->asic_funcs->disable_clock_gating(hdev);
+ }
+
+ switch (num_mem_regs) {
+ case 1:
+ dev_err(hdev->dev,
+ "%s ECC indication: 0x%08x\n",
+ block_name, RREG32(block_address));
+ break;
+ case 2:
+ dev_err(hdev->dev,
+ "%s ECC indication: 0x%08x 0x%08x\n",
+ block_name,
+ RREG32(block_address), RREG32(block_address + 4));
+ break;
+ case 3:
+ dev_err(hdev->dev,
+ "%s ECC indication: 0x%08x 0x%08x 0x%08x\n",
+ block_name,
+ RREG32(block_address), RREG32(block_address + 4),
+ RREG32(block_address + 8));
+ break;
+ case 4:
+ dev_err(hdev->dev,
+ "%s ECC indication: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ block_name,
+ RREG32(block_address), RREG32(block_address + 4),
+ RREG32(block_address + 8), RREG32(block_address + 0xc));
+ break;
+ default:
+ break;
+
+ }
+
+ if (disable_clock_gating) {
+ hdev->asic_funcs->enable_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+ }
+}
+
+static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
+ const char *qm_name,
+ u64 glbl_sts_addr,
+ u64 arb_err_addr)
+{
+ u32 i, j, glbl_sts_val, arb_err_val, glbl_sts_clr_val;
+ char reg_desc[32];
+
+ /* Iterate through all stream GLBL_STS1 registers + Lower CP */
+ for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) {
+ glbl_sts_clr_val = 0;
+ glbl_sts_val = RREG32(glbl_sts_addr + 4 * i);
+
+ if (!glbl_sts_val)
+ continue;
+
+ if (i == QMAN_STREAMS)
+ snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerCP");
+ else
+ snprintf(reg_desc, ARRAY_SIZE(reg_desc), "stream%u", i);
+
+ for (j = 0 ; j < GAUDI_NUM_OF_QM_ERR_CAUSE ; j++) {
+ if (glbl_sts_val & BIT(j)) {
+ dev_err_ratelimited(hdev->dev,
+ "%s %s. err cause: %s\n",
+ qm_name, reg_desc,
+ gaudi_qman_error_cause[j]);
+ glbl_sts_clr_val |= BIT(j);
+ }
+ }
+
+ /* Write 1 clear errors */
+ WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val);
+ }
+
+ arb_err_val = RREG32(arb_err_addr);
+
+ if (!arb_err_val)
+ return;
+
+ for (j = 0 ; j < GAUDI_NUM_OF_QM_ARB_ERR_CAUSE ; j++) {
+ if (arb_err_val & BIT(j)) {
+ dev_err_ratelimited(hdev->dev,
+ "%s ARB_ERR. err cause: %s\n",
+ qm_name,
+ gaudi_qman_arb_error_cause[j]);
+ }
+ }
+}
+
+static void gaudi_print_ecc_info(struct hl_device *hdev, u16 event_type)
+{
+ u64 block_address;
+ u8 index;
+ int num_memories;
+ char desc[32];
+ bool derr;
+ bool disable_clock_gating;
+
+ switch (event_type) {
+ case GAUDI_EVENT_PCIE_CORE_SERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_CORE");
+ block_address = mmPCIE_CORE_BASE;
+ num_memories = 51;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PCIE_CORE_DERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_CORE");
+ block_address = mmPCIE_CORE_BASE;
+ num_memories = 51;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PCIE_IF_SERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_WRAP");
+ block_address = mmPCIE_WRAP_BASE;
+ num_memories = 11;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PCIE_IF_DERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_WRAP");
+ block_address = mmPCIE_WRAP_BASE;
+ num_memories = 11;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PCIE_PHY_SERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_PHY");
+ block_address = mmPCIE_PHY_BASE;
+ num_memories = 4;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PCIE_PHY_DERR:
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "PCIE_PHY");
+ block_address = mmPCIE_PHY_BASE;
+ num_memories = 4;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
+ index = event_type - GAUDI_EVENT_TPC0_SERR;
+ block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC", index);
+ num_memories = 90;
+ derr = false;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
+ index = event_type - GAUDI_EVENT_TPC0_DERR;
+ block_address =
+ mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC", index);
+ num_memories = 90;
+ derr = true;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_MME0_ACC_SERR:
+ case GAUDI_EVENT_MME1_ACC_SERR:
+ case GAUDI_EVENT_MME2_ACC_SERR:
+ case GAUDI_EVENT_MME3_ACC_SERR:
+ index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4;
+ block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "MME%d_ACC", index);
+ num_memories = 128;
+ derr = false;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_MME0_ACC_DERR:
+ case GAUDI_EVENT_MME1_ACC_DERR:
+ case GAUDI_EVENT_MME2_ACC_DERR:
+ case GAUDI_EVENT_MME3_ACC_DERR:
+ index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4;
+ block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "MME%d_ACC", index);
+ num_memories = 128;
+ derr = true;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_MME0_SBAB_SERR:
+ case GAUDI_EVENT_MME1_SBAB_SERR:
+ case GAUDI_EVENT_MME2_SBAB_SERR:
+ case GAUDI_EVENT_MME3_SBAB_SERR:
+ index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4;
+ block_address = mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "MME%d_SBAB", index);
+ num_memories = 33;
+ derr = false;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_MME0_SBAB_DERR:
+ case GAUDI_EVENT_MME1_SBAB_DERR:
+ case GAUDI_EVENT_MME2_SBAB_DERR:
+ case GAUDI_EVENT_MME3_SBAB_DERR:
+ index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4;
+ block_address = mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "MME%d_SBAB", index);
+ num_memories = 33;
+ derr = true;
+ disable_clock_gating = true;
+ break;
+ case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_DMA7_SERR_ECC:
+ index = event_type - GAUDI_EVENT_DMA0_SERR_ECC;
+ block_address = mmDMA0_CORE_BASE + index * DMA_CORE_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DMA%d_CORE", index);
+ num_memories = 16;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_DMA0_DERR_ECC ... GAUDI_EVENT_DMA7_DERR_ECC:
+ index = event_type - GAUDI_EVENT_DMA0_DERR_ECC;
+ block_address = mmDMA0_CORE_BASE + index * DMA_CORE_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "DMA%d_CORE", index);
+ num_memories = 16;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_CPU_IF_ECC_SERR:
+ block_address = mmCPU_IF_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 4;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_CPU_IF_ECC_DERR:
+ block_address = mmCPU_IF_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 4;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PSOC_MEM_SERR:
+ block_address = mmPSOC_GLOBAL_CONF_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 4;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PSOC_MEM_DERR:
+ block_address = mmPSOC_GLOBAL_CONF_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 4;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PSOC_CORESIGHT_SERR:
+ block_address = mmPSOC_CS_TRACE_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 2;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
+ block_address = mmPSOC_CS_TRACE_BASE;
+ snprintf(desc, ARRAY_SIZE(desc), "%s", "CPU");
+ num_memories = 2;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_SRAM0_SERR ... GAUDI_EVENT_SRAM28_SERR:
+ index = event_type - GAUDI_EVENT_SRAM0_SERR;
+ block_address =
+ mmSRAM_Y0_X0_BANK_BASE + index * SRAM_BANK_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "SRAM%d", index);
+ num_memories = 2;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
+ index = event_type - GAUDI_EVENT_SRAM0_DERR;
+ block_address =
+ mmSRAM_Y0_X0_BANK_BASE + index * SRAM_BANK_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "SRAM%d", index);
+ num_memories = 2;
+ derr = true;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_DMA_IF0_SERR ... GAUDI_EVENT_DMA_IF3_SERR:
+ index = event_type - GAUDI_EVENT_DMA_IF0_SERR;
+ block_address = mmDMA_IF_W_S_BASE +
+ index * (mmDMA_IF_E_S_BASE - mmDMA_IF_W_S_BASE);
+ snprintf(desc, ARRAY_SIZE(desc), "DMA_IF%d", index);
+ num_memories = 60;
+ derr = false;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
+ index = event_type - GAUDI_EVENT_DMA_IF0_DERR;
+ block_address = mmDMA_IF_W_S_BASE +
+ index * (mmDMA_IF_E_S_BASE - mmDMA_IF_W_S_BASE);
+ snprintf(desc, ARRAY_SIZE(desc), "DMA_IF%d", index);
+ derr = true;
+ num_memories = 60;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
+ index = event_type - GAUDI_EVENT_HBM_0_SERR;
+ /* HBM Registers are at different offsets */
+ block_address = mmHBM0_BASE + 0x8000 +
+ index * (mmHBM1_BASE - mmHBM0_BASE);
+ snprintf(desc, ARRAY_SIZE(desc), "HBM%d", index);
+ derr = false;
+ num_memories = 64;
+ disable_clock_gating = false;
+ break;
+ case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
+ index = event_type - GAUDI_EVENT_HBM_0_SERR;
+ /* HBM Registers are at different offsets */
+ block_address = mmHBM0_BASE + 0x8000 +
+ index * (mmHBM1_BASE - mmHBM0_BASE);
+ snprintf(desc, ARRAY_SIZE(desc), "HBM%d", index);
+ derr = true;
+ num_memories = 64;
+ disable_clock_gating = false;
+ break;
+ default:
+ return;
+ }
+
+ gaudi_print_ecc_info_generic(hdev, desc, block_address, num_memories,
+ derr, disable_clock_gating);
+}
+
+static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
+{
+ u64 glbl_sts_addr, arb_err_addr;
+ u8 index;
+ char desc[32];
+
+ switch (event_type) {
+ case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
+ index = event_type - GAUDI_EVENT_TPC0_QM;
+ glbl_sts_addr =
+ mmTPC0_QM_GLBL_STS1_0 + index * TPC_QMAN_OFFSET;
+ arb_err_addr =
+ mmTPC0_QM_ARB_ERR_CAUSE + index * TPC_QMAN_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC_QM", index);
+ break;
+ case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
+ index = event_type - GAUDI_EVENT_MME0_QM;
+ glbl_sts_addr =
+ mmMME0_QM_GLBL_STS1_0 + index * MME_QMAN_OFFSET;
+ arb_err_addr =
+ mmMME0_QM_ARB_ERR_CAUSE + index * MME_QMAN_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "%s%d", "MME_QM", index);
+ break;
+ case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
+ index = event_type - GAUDI_EVENT_DMA0_QM;
+ glbl_sts_addr =
+ mmDMA0_QM_GLBL_STS1_0 + index * DMA_QMAN_OFFSET;
+ arb_err_addr =
+ mmDMA0_QM_ARB_ERR_CAUSE + index * DMA_QMAN_OFFSET;
+ snprintf(desc, ARRAY_SIZE(desc), "%s%d", "DMA_QM", index);
+ break;
+ default:
+ return;
+ }
+
+ gaudi_handle_qman_err_generic(hdev, desc, glbl_sts_addr, arb_err_addr);
+}
+
+static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
+ bool razwi)
+{
+ char desc[64] = "";
+
+ gaudi_get_event_desc(event_type, desc, sizeof(desc));
+ dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
+ event_type, desc);
+
+ gaudi_print_ecc_info(hdev, event_type);
+
+ if (razwi) {
+ gaudi_print_razwi_info(hdev);
+ gaudi_print_mmu_error_info(hdev);
+ }
+}
+
+static int gaudi_soft_reset_late_init(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ /* Unmask all IRQs since some could have been received
+ * during the soft reset
+ */
+ return hl_fw_unmask_irq_arr(hdev, gaudi->events, sizeof(gaudi->events));
+}
+
+static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device)
+{
+ int ch, err = 0;
+ u32 base, val, val2;
+
+ base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET;
+ for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) {
+ val = RREG32_MASK(base + ch * 0x1000 + 0x06C, 0x0000FFFF);
+ val = (val & 0xFF) | ((val >> 8) & 0xFF);
+ if (val) {
+ err = 1;
+ dev_err(hdev->dev,
+ "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
+ device, ch * 2, val & 0x1, (val >> 1) & 0x1,
+ (val >> 2) & 0x1, (val >> 3) & 0x1,
+ (val >> 4) & 0x1);
+
+ val2 = RREG32(base + ch * 0x1000 + 0x060);
+ dev_err(hdev->dev,
+ "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n",
+ device, ch * 2,
+ RREG32(base + ch * 0x1000 + 0x064),
+ (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
+ (val2 & 0xFF0000) >> 16,
+ (val2 & 0xFF000000) >> 24);
+ }
+
+ val = RREG32_MASK(base + ch * 0x1000 + 0x07C, 0x0000FFFF);
+ val = (val & 0xFF) | ((val >> 8) & 0xFF);
+ if (val) {
+ err = 1;
+ dev_err(hdev->dev,
+ "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
+ device, ch * 2 + 1, val & 0x1, (val >> 1) & 0x1,
+ (val >> 2) & 0x1, (val >> 3) & 0x1,
+ (val >> 4) & 0x1);
+
+ val2 = RREG32(base + ch * 0x1000 + 0x070);
+ dev_err(hdev->dev,
+ "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n",
+ device, ch * 2 + 1,
+ RREG32(base + ch * 0x1000 + 0x074),
+ (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
+ (val2 & 0xFF0000) >> 16,
+ (val2 & 0xFF000000) >> 24);
+ }
+
+ /* Clear interrupts */
+ RMWREG32(base + (ch * 0x1000) + 0x060, 0x1C8, 0x1FF);
+ RMWREG32(base + (ch * 0x1000) + 0x070, 0x1C8, 0x1FF);
+ WREG32(base + (ch * 0x1000) + 0x06C, 0x1F1F);
+ WREG32(base + (ch * 0x1000) + 0x07C, 0x1F1F);
+ RMWREG32(base + (ch * 0x1000) + 0x060, 0x0, 0xF);
+ RMWREG32(base + (ch * 0x1000) + 0x070, 0x0, 0xF);
+ }
+
+ val = RREG32(base + 0x8F30);
+ val2 = RREG32(base + 0x8F34);
+ if (val | val2) {
+ err = 1;
+ dev_err(hdev->dev,
+ "HBM %d MC SRAM SERR info: Reg 0x8F30=0x%x, Reg 0x8F34=0x%x\n",
+ device, val, val2);
+ }
+ val = RREG32(base + 0x8F40);
+ val2 = RREG32(base + 0x8F44);
+ if (val | val2) {
+ err = 1;
+ dev_err(hdev->dev,
+ "HBM %d MC SRAM DERR info: Reg 0x8F40=0x%x, Reg 0x8F44=0x%x\n",
+ device, val, val2);
+ }
+
+ return err;
+}
+
+static int gaudi_hbm_event_to_dev(u16 hbm_event_type)
+{
+ switch (hbm_event_type) {
+ case GAUDI_EVENT_HBM0_SPI_0:
+ case GAUDI_EVENT_HBM0_SPI_1:
+ return 0;
+ case GAUDI_EVENT_HBM1_SPI_0:
+ case GAUDI_EVENT_HBM1_SPI_1:
+ return 1;
+ case GAUDI_EVENT_HBM2_SPI_0:
+ case GAUDI_EVENT_HBM2_SPI_1:
+ return 2;
+ case GAUDI_EVENT_HBM3_SPI_0:
+ case GAUDI_EVENT_HBM3_SPI_1:
+ return 3;
+ default:
+ break;
+ }
+
+ /* Should never happen */
+ return 0;
+}
+
+static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id,
+ char *interrupt_name)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 tpc_offset = tpc_id * TPC_CFG_OFFSET, tpc_interrupts_cause, i;
+ bool soft_reset_required = false;
+
+ /* Accessing the TPC_INTR_CAUSE registers requires disabling the clock
+ * gating, and thus cannot be done in ArmCP and should be done instead
+ * by the driver.
+ */
+
+ mutex_lock(&gaudi->clk_gate_mutex);
+
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ tpc_interrupts_cause = RREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset) &
+ TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK;
+
+ for (i = 0 ; i < GAUDI_NUM_OF_TPC_INTR_CAUSE ; i++)
+ if (tpc_interrupts_cause & BIT(i)) {
+ dev_err_ratelimited(hdev->dev,
+ "TPC%d_%s interrupt cause: %s\n",
+ tpc_id, interrupt_name,
+ gaudi_tpc_interrupts_cause[i]);
+ /* If this is QM error, we need to soft-reset */
+ if (i == 15)
+ soft_reset_required = true;
+ }
+
+ /* Clear interrupts */
+ WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0);
+
+ hdev->asic_funcs->enable_clock_gating(hdev);
+
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ return soft_reset_required;
+}
+
+static int tpc_dec_event_to_tpc_id(u16 tpc_dec_event_type)
+{
+ return (tpc_dec_event_type - GAUDI_EVENT_TPC0_DEC) >> 1;
+}
+
+static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type)
+{
+ return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6;
+}
+
+static void gaudi_print_clk_change_info(struct hl_device *hdev,
+ u16 event_type)
+{
+ switch (event_type) {
+ case GAUDI_EVENT_FIX_POWER_ENV_S:
+ dev_info_ratelimited(hdev->dev,
+ "Clock throttling due to power consumption\n");
+ break;
+
+ case GAUDI_EVENT_FIX_POWER_ENV_E:
+ dev_info_ratelimited(hdev->dev,
+ "Power envelop is safe, back to optimal clock\n");
+ break;
+
+ case GAUDI_EVENT_FIX_THERMAL_ENV_S:
+ dev_info_ratelimited(hdev->dev,
+ "Clock throttling due to overheating\n");
+ break;
+
+ case GAUDI_EVENT_FIX_THERMAL_ENV_E:
+ dev_info_ratelimited(hdev->dev,
+ "Thermal envelop is safe, back to optimal clock\n");
+ break;
+
+ default:
+ dev_err(hdev->dev, "Received invalid clock change event %d\n",
+ event_type);
+ break;
+ }
+}
+
+static void gaudi_handle_eqe(struct hl_device *hdev,
+ struct hl_eq_entry *eq_entry)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
+ u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
+ >> EQ_CTL_EVENT_TYPE_SHIFT);
+ u8 cause;
+ bool reset_required;
+
+ gaudi->events_stat[event_type]++;
+ gaudi->events_stat_aggregate[event_type]++;
+
+ switch (event_type) {
+ case GAUDI_EVENT_PCIE_CORE_DERR:
+ case GAUDI_EVENT_PCIE_IF_DERR:
+ case GAUDI_EVENT_PCIE_PHY_DERR:
+ case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
+ case GAUDI_EVENT_MME0_ACC_DERR:
+ case GAUDI_EVENT_MME0_SBAB_DERR:
+ case GAUDI_EVENT_MME1_ACC_DERR:
+ case GAUDI_EVENT_MME1_SBAB_DERR:
+ case GAUDI_EVENT_MME2_ACC_DERR:
+ case GAUDI_EVENT_MME2_SBAB_DERR:
+ case GAUDI_EVENT_MME3_ACC_DERR:
+ case GAUDI_EVENT_MME3_SBAB_DERR:
+ case GAUDI_EVENT_DMA0_DERR_ECC ... GAUDI_EVENT_DMA7_DERR_ECC:
+ fallthrough;
+ case GAUDI_EVENT_CPU_IF_ECC_DERR:
+ case GAUDI_EVENT_PSOC_MEM_DERR:
+ case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
+ case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
+ case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
+ fallthrough;
+ case GAUDI_EVENT_GIC500:
+ case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
+ case GAUDI_EVENT_MMU_DERR:
+ case GAUDI_EVENT_AXI_ECC:
+ case GAUDI_EVENT_L2_RAM_ECC:
+ case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
+ gaudi_print_irq_info(hdev, event_type, false);
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ break;
+
+ case GAUDI_EVENT_HBM0_SPI_0:
+ case GAUDI_EVENT_HBM1_SPI_0:
+ case GAUDI_EVENT_HBM2_SPI_0:
+ case GAUDI_EVENT_HBM3_SPI_0:
+ gaudi_print_irq_info(hdev, event_type, false);
+ gaudi_hbm_read_interrupts(hdev,
+ gaudi_hbm_event_to_dev(event_type));
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ break;
+
+ case GAUDI_EVENT_HBM0_SPI_1:
+ case GAUDI_EVENT_HBM1_SPI_1:
+ case GAUDI_EVENT_HBM2_SPI_1:
+ case GAUDI_EVENT_HBM3_SPI_1:
+ gaudi_print_irq_info(hdev, event_type, false);
+ gaudi_hbm_read_interrupts(hdev,
+ gaudi_hbm_event_to_dev(event_type));
+ break;
+
+ case GAUDI_EVENT_TPC0_DEC:
+ case GAUDI_EVENT_TPC1_DEC:
+ case GAUDI_EVENT_TPC2_DEC:
+ case GAUDI_EVENT_TPC3_DEC:
+ case GAUDI_EVENT_TPC4_DEC:
+ case GAUDI_EVENT_TPC5_DEC:
+ case GAUDI_EVENT_TPC6_DEC:
+ case GAUDI_EVENT_TPC7_DEC:
+ gaudi_print_irq_info(hdev, event_type, true);
+ reset_required = gaudi_tpc_read_interrupts(hdev,
+ tpc_dec_event_to_tpc_id(event_type),
+ "AXI_SLV_DEC_Error");
+ if (reset_required) {
+ dev_err(hdev->dev, "hard reset required due to %s\n",
+ gaudi_irq_map_table[event_type].name);
+
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ } else {
+ hl_fw_unmask_irq(hdev, event_type);
+ }
+ break;
+
+ case GAUDI_EVENT_TPC0_KRN_ERR:
+ case GAUDI_EVENT_TPC1_KRN_ERR:
+ case GAUDI_EVENT_TPC2_KRN_ERR:
+ case GAUDI_EVENT_TPC3_KRN_ERR:
+ case GAUDI_EVENT_TPC4_KRN_ERR:
+ case GAUDI_EVENT_TPC5_KRN_ERR:
+ case GAUDI_EVENT_TPC6_KRN_ERR:
+ case GAUDI_EVENT_TPC7_KRN_ERR:
+ gaudi_print_irq_info(hdev, event_type, true);
+ reset_required = gaudi_tpc_read_interrupts(hdev,
+ tpc_krn_event_to_tpc_id(event_type),
+ "KRN_ERR");
+ if (reset_required) {
+ dev_err(hdev->dev, "hard reset required due to %s\n",
+ gaudi_irq_map_table[event_type].name);
+
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ } else {
+ hl_fw_unmask_irq(hdev, event_type);
+ }
+ break;
+
+ case GAUDI_EVENT_PCIE_CORE_SERR:
+ case GAUDI_EVENT_PCIE_IF_SERR:
+ case GAUDI_EVENT_PCIE_PHY_SERR:
+ case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
+ case GAUDI_EVENT_MME0_ACC_SERR:
+ case GAUDI_EVENT_MME0_SBAB_SERR:
+ case GAUDI_EVENT_MME1_ACC_SERR:
+ case GAUDI_EVENT_MME1_SBAB_SERR:
+ case GAUDI_EVENT_MME2_ACC_SERR:
+ case GAUDI_EVENT_MME2_SBAB_SERR:
+ case GAUDI_EVENT_MME3_ACC_SERR:
+ case GAUDI_EVENT_MME3_SBAB_SERR:
+ case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_DMA7_SERR_ECC:
+ case GAUDI_EVENT_CPU_IF_ECC_SERR:
+ case GAUDI_EVENT_PSOC_MEM_SERR:
+ case GAUDI_EVENT_PSOC_CORESIGHT_SERR:
+ case GAUDI_EVENT_SRAM0_SERR ... GAUDI_EVENT_SRAM28_SERR:
+ case GAUDI_EVENT_DMA_IF0_SERR ... GAUDI_EVENT_DMA_IF3_SERR:
+ case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
+ fallthrough;
+ case GAUDI_EVENT_MMU_SERR:
+ case GAUDI_EVENT_PCIE_DEC:
+ case GAUDI_EVENT_MME0_WBC_RSP:
+ case GAUDI_EVENT_MME0_SBAB0_RSP:
+ case GAUDI_EVENT_MME1_WBC_RSP:
+ case GAUDI_EVENT_MME1_SBAB0_RSP:
+ case GAUDI_EVENT_MME2_WBC_RSP:
+ case GAUDI_EVENT_MME2_SBAB0_RSP:
+ case GAUDI_EVENT_MME3_WBC_RSP:
+ case GAUDI_EVENT_MME3_SBAB0_RSP:
+ case GAUDI_EVENT_CPU_AXI_SPLITTER:
+ case GAUDI_EVENT_PSOC_AXI_DEC:
+ case GAUDI_EVENT_PSOC_PRSTN_FALL:
+ case GAUDI_EVENT_MMU_PAGE_FAULT:
+ case GAUDI_EVENT_MMU_WR_PERM:
+ case GAUDI_EVENT_RAZWI_OR_ADC:
+ case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
+ case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
+ case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
+ fallthrough;
+ case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE:
+ gaudi_print_irq_info(hdev, event_type, true);
+ gaudi_handle_qman_err(hdev, event_type);
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
+ case GAUDI_EVENT_RAZWI_OR_ADC_SW:
+ gaudi_print_irq_info(hdev, event_type, true);
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, true, false);
+ break;
+
+ case GAUDI_EVENT_TPC0_BMON_SPMU:
+ case GAUDI_EVENT_TPC1_BMON_SPMU:
+ case GAUDI_EVENT_TPC2_BMON_SPMU:
+ case GAUDI_EVENT_TPC3_BMON_SPMU:
+ case GAUDI_EVENT_TPC4_BMON_SPMU:
+ case GAUDI_EVENT_TPC5_BMON_SPMU:
+ case GAUDI_EVENT_TPC6_BMON_SPMU:
+ case GAUDI_EVENT_TPC7_BMON_SPMU:
+ case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7:
+ gaudi_print_irq_info(hdev, event_type, false);
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
+ case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E:
+ gaudi_print_clk_change_info(hdev, event_type);
+ hl_fw_unmask_irq(hdev, event_type);
+ break;
+
+ case GAUDI_EVENT_PSOC_GPIO_U16_0:
+ cause = le64_to_cpu(eq_entry->data[0]) & 0xFF;
+ dev_err(hdev->dev,
+ "Received high temp H/W interrupt %d (cause %d)\n",
+ event_type, cause);
+ break;
+
+ default:
+ dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
+ event_type);
+ break;
+ }
+}
+
+static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate,
+ u32 *size)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (aggregate) {
+ *size = (u32) sizeof(gaudi->events_stat_aggregate);
+ return gaudi->events_stat_aggregate;
+ }
+
+ *size = (u32) sizeof(gaudi->events_stat);
+ return gaudi->events_stat;
+}
+
+static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
+ u32 flags)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 status, timeout_usec;
+ int rc;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
+ return 0;
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ mutex_lock(&hdev->mmu_cache_lock);
+
+ /* L0 & L1 invalidation */
+ WREG32(mmSTLB_INV_PS, 2);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmSTLB_INV_PS,
+ status,
+ !status,
+ 1000,
+ timeout_usec);
+
+ WREG32(mmSTLB_INV_SET, 0);
+
+ mutex_unlock(&hdev->mmu_cache_lock);
+
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
+}
+
+static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
+ bool is_hard, u32 asid, u64 va, u64 size)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 status, timeout_usec;
+ u32 inv_data;
+ u32 pi;
+ int rc;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
+ hdev->hard_reset_pending)
+ return 0;
+
+ mutex_lock(&hdev->mmu_cache_lock);
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ /*
+ * TODO: currently invalidate entire L0 & L1 as in regular hard
+ * invalidation. Need to apply invalidation of specific cache
+ * lines with mask of ASID & VA & size.
+ * Note that L1 with be flushed entirely in any case.
+ */
+
+ /* L0 & L1 invalidation */
+ inv_data = RREG32(mmSTLB_CACHE_INV);
+ /* PI is 8 bit */
+ pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
+ WREG32(mmSTLB_CACHE_INV,
+ (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmSTLB_INV_CONSUMER_INDEX,
+ status,
+ status == pi,
+ 1000,
+ timeout_usec);
+
+ mutex_unlock(&hdev->mmu_cache_lock);
+
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
+}
+
+static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev,
+ u32 asid, u64 phys_addr)
+{
+ u32 status, timeout_usec;
+ int rc;
+
+ if (hdev->pldm)
+ timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
+ else
+ timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
+
+ WREG32(MMU_ASID, asid);
+ WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
+ WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
+ WREG32(MMU_BUSY, 0x80000000);
+
+ rc = hl_poll_timeout(
+ hdev,
+ MMU_BUSY,
+ status,
+ !(status & 0x80000000),
+ 1000,
+ timeout_usec);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout during MMU hop0 config of asid %d\n", asid);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int gaudi_send_heartbeat(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ return hl_fw_send_heartbeat(hdev);
+}
+
+static int gaudi_armcp_info_get(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int rc;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ rc = hl_fw_armcp_info_get(hdev);
+ if (rc)
+ return rc;
+
+ if (!strlen(prop->armcp_info.card_name))
+ strncpy(prop->armcp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
+ CARD_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
+ struct seq_file *s)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n";
+ const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n";
+ u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts;
+ bool is_idle = true, is_eng_idle, is_slave;
+ u64 offset;
+ int i, dma_id;
+
+ mutex_lock(&gaudi->clk_gate_mutex);
+
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ if (s)
+ seq_puts(s,
+ "\nDMA is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
+ "--- ------- ------------ ---------- -------------\n");
+
+ for (i = 0 ; i < DMA_NUMBER_OF_CHNLS ; i++) {
+ dma_id = gaudi_dma_assignment[i];
+ offset = dma_id * DMA_QMAN_OFFSET;
+
+ qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + offset);
+ qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + offset);
+ dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + offset);
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
+ IS_DMA_IDLE(dma_core_sts0);
+ is_idle &= is_eng_idle;
+
+ if (mask)
+ *mask |= !is_eng_idle <<
+ (GAUDI_ENGINE_ID_DMA_0 + dma_id);
+ if (s)
+ seq_printf(s, fmt, dma_id,
+ is_eng_idle ? "Y" : "N", qm_glbl_sts0,
+ qm_cgm_sts, dma_core_sts0);
+ }
+
+ if (s)
+ seq_puts(s,
+ "\nTPC is_idle QM_GLBL_STS0 QM_CGM_STS CFG_STATUS\n"
+ "--- ------- ------------ ---------- ----------\n");
+
+ for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
+ offset = i * TPC_QMAN_OFFSET;
+ qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + offset);
+ qm_cgm_sts = RREG32(mmTPC0_QM_CGM_STS + offset);
+ tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + offset);
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
+ IS_TPC_IDLE(tpc_cfg_sts);
+ is_idle &= is_eng_idle;
+
+ if (mask)
+ *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_TPC_0 + i);
+ if (s)
+ seq_printf(s, fmt, i,
+ is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
+ }
+
+ if (s)
+ seq_puts(s,
+ "\nMME is_idle QM_GLBL_STS0 QM_CGM_STS ARCH_STATUS\n"
+ "--- ------- ------------ ---------- -----------\n");
+
+ for (i = 0 ; i < MME_NUMBER_OF_ENGINES ; i++) {
+ offset = i * MME_QMAN_OFFSET;
+ mme_arch_sts = RREG32(mmMME0_CTRL_ARCH_STATUS + offset);
+ is_eng_idle = IS_MME_IDLE(mme_arch_sts);
+
+ /* MME 1 & 3 are slaves, no need to check their QMANs */
+ is_slave = i % 2;
+ if (!is_slave) {
+ qm_glbl_sts0 = RREG32(mmMME0_QM_GLBL_STS0 + offset);
+ qm_cgm_sts = RREG32(mmMME0_QM_CGM_STS + offset);
+ is_eng_idle &= IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
+ }
+
+ is_idle &= is_eng_idle;
+
+ if (mask)
+ *mask |= !is_eng_idle << (GAUDI_ENGINE_ID_MME_0 + i);
+ if (s) {
+ if (!is_slave)
+ seq_printf(s, fmt, i,
+ is_eng_idle ? "Y" : "N",
+ qm_glbl_sts0, qm_cgm_sts, mme_arch_sts);
+ else
+ seq_printf(s, mme_slave_fmt, i,
+ is_eng_idle ? "Y" : "N", "-",
+ "-", mme_arch_sts);
+ }
+ }
+
+ if (s)
+ seq_puts(s, "\n");
+
+ hdev->asic_funcs->enable_clock_gating(hdev);
+
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ return is_idle;
+}
+
+static void gaudi_hw_queues_lock(struct hl_device *hdev)
+ __acquires(&gaudi->hw_queues_lock)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ spin_lock(&gaudi->hw_queues_lock);
+}
+
+static void gaudi_hw_queues_unlock(struct hl_device *hdev)
+ __releases(&gaudi->hw_queues_lock)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ spin_unlock(&gaudi->hw_queues_lock);
+}
+
+static u32 gaudi_get_pci_id(struct hl_device *hdev)
+{
+ return hdev->pdev->device;
+}
+
+static int gaudi_get_eeprom_data(struct hl_device *hdev, void *data,
+ size_t max_size)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
+ return hl_fw_get_eeprom_data(hdev, data, max_size);
+}
+
+/*
+ * this function should be used only during initialization and/or after reset,
+ * when there are no active users.
+ */
+static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
+ u32 tpc_id)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ u64 kernel_timeout;
+ u32 status, offset;
+ int rc;
+
+ offset = tpc_id * (mmTPC1_CFG_STATUS - mmTPC0_CFG_STATUS);
+
+ if (hdev->pldm)
+ kernel_timeout = GAUDI_PLDM_TPC_KERNEL_WAIT_USEC;
+ else
+ kernel_timeout = HL_DEVICE_TIMEOUT_USEC;
+
+ mutex_lock(&gaudi->clk_gate_mutex);
+
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
+ WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW + offset,
+ lower_32_bits(tpc_kernel));
+ WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH + offset,
+ upper_32_bits(tpc_kernel));
+
+ WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW + offset,
+ lower_32_bits(tpc_kernel));
+ WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH + offset,
+ upper_32_bits(tpc_kernel));
+ /* set a valid LUT pointer, content is of no significance */
+ WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_LO + offset,
+ lower_32_bits(tpc_kernel));
+ WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_HI + offset,
+ upper_32_bits(tpc_kernel));
+
+ WREG32(mmTPC0_CFG_QM_SYNC_OBJECT_ADDR + offset,
+ lower_32_bits(CFG_BASE +
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0));
+
+ WREG32(mmTPC0_CFG_TPC_CMD + offset,
+ (1 << TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT |
+ 1 << TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT));
+ /* wait a bit for the engine to start executing */
+ usleep_range(1000, 1500);
+
+ /* wait until engine has finished executing */
+ rc = hl_poll_timeout(
+ hdev,
+ mmTPC0_CFG_STATUS + offset,
+ status,
+ (status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) ==
+ TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK,
+ 1000,
+ kernel_timeout);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout while waiting for TPC%d icache prefetch\n",
+ tpc_id);
+ hdev->asic_funcs->enable_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+ return -EIO;
+ }
+
+ WREG32(mmTPC0_CFG_TPC_EXECUTE + offset,
+ 1 << TPC0_CFG_TPC_EXECUTE_V_SHIFT);
+
+ /* wait a bit for the engine to start executing */
+ usleep_range(1000, 1500);
+
+ /* wait until engine has finished executing */
+ rc = hl_poll_timeout(
+ hdev,
+ mmTPC0_CFG_STATUS + offset,
+ status,
+ (status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) ==
+ TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK,
+ 1000,
+ kernel_timeout);
+
+ rc = hl_poll_timeout(
+ hdev,
+ mmTPC0_CFG_WQ_INFLIGHT_CNTR + offset,
+ status,
+ (status == 0),
+ 1000,
+ kernel_timeout);
+
+ hdev->asic_funcs->enable_clock_gating(hdev);
+ mutex_unlock(&gaudi->clk_gate_mutex);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout while waiting for TPC%d kernel to execute\n",
+ tpc_id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev)
+{
+ return RREG32(mmHW_STATE);
+}
+
+static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
+{
+ return gaudi_cq_assignment[cq_idx];
+}
+
+static void gaudi_ext_queue_init(struct hl_device *hdev, u32 q_idx)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+ struct hl_hw_sob *hw_sob;
+ int sob, ext_idx = gaudi->ext_queue_idx++;
+
+ /*
+ * The external queues might not sit sequentially, hence use the
+ * real external queue index for the SOB/MON base id.
+ */
+ hw_queue->base_sob_id = ext_idx * HL_RSVD_SOBS;
+ hw_queue->base_mon_id = ext_idx * HL_RSVD_MONS;
+ hw_queue->next_sob_val = 1;
+ hw_queue->curr_sob_offset = 0;
+
+ for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) {
+ hw_sob = &hw_queue->hw_sob[sob];
+ hw_sob->hdev = hdev;
+ hw_sob->sob_id = hw_queue->base_sob_id + sob;
+ hw_sob->q_idx = q_idx;
+ kref_init(&hw_sob->kref);
+ }
+}
+
+static void gaudi_ext_queue_reset(struct hl_device *hdev, u32 q_idx)
+{
+ struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx];
+
+ /*
+ * In case we got here due to a stuck CS, the refcnt might be bigger
+ * than 1 and therefore we reset it.
+ */
+ kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref);
+ hw_queue->curr_sob_offset = 0;
+ hw_queue->next_sob_val = 1;
+}
+
+static u32 gaudi_get_signal_cb_size(struct hl_device *hdev)
+{
+ return sizeof(struct packet_msg_short) +
+ sizeof(struct packet_msg_prot) * 2;
+}
+
+static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
+{
+ return sizeof(struct packet_msg_short) * 4 +
+ sizeof(struct packet_fence) +
+ sizeof(struct packet_msg_prot) * 2;
+}
+
+static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
+{
+ struct hl_cb *cb = (struct hl_cb *) data;
+ struct packet_msg_short *pkt;
+ u32 value, ctl;
+
+ pkt = (struct packet_msg_short *) (uintptr_t) cb->kernel_address;
+ memset(pkt, 0, sizeof(*pkt));
+
+ value = 1 << GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT; /* inc by 1 */
+ value |= 1 << GAUDI_PKT_SHORT_VAL_SOB_MOD_SHIFT; /* add mode */
+
+ ctl = (sob_id * 4) << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT; /* SOB id */
+ ctl |= 0 << GAUDI_PKT_SHORT_CTL_OP_SHIFT; /* write the value */
+ ctl |= 3 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S SOB base */
+ ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_MB_SHIFT;
+
+ pkt->value = cpu_to_le32(value);
+ pkt->ctl = cpu_to_le32(ctl);
+}
+
+static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
+ u16 addr)
+{
+ u32 ctl, pkt_size = sizeof(*pkt);
+
+ memset(pkt, 0, pkt_size);
+
+ ctl = addr << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT;
+ ctl |= 2 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S MON base */
+ ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
+ ctl |= 0 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
+ ctl |= 0 << GAUDI_PKT_SHORT_CTL_MB_SHIFT; /* only last pkt needs MB */
+
+ pkt->value = cpu_to_le32(value);
+ pkt->ctl = cpu_to_le32(ctl);
+
+ return pkt_size;
+}
+
+static u32 gaudi_add_arm_monitor_pkt(struct packet_msg_short *pkt, u16 sob_id,
+ u16 sob_val, u16 addr)
+{
+ u32 ctl, value, pkt_size = sizeof(*pkt);
+ u8 mask = ~(1 << (sob_id & 0x7));
+
+ memset(pkt, 0, pkt_size);
+
+ value = (sob_id / 8) << GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_SHIFT;
+ value |= sob_val << GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_SHIFT;
+ value |= 0 << GAUDI_PKT_SHORT_VAL_MON_MODE_SHIFT; /* GREATER_OR_EQUAL */
+ value |= mask << GAUDI_PKT_SHORT_VAL_MON_MASK_SHIFT;
+
+ ctl = addr << GAUDI_PKT_SHORT_CTL_ADDR_SHIFT;
+ ctl |= 0 << GAUDI_PKT_SHORT_CTL_OP_SHIFT; /* write the value */
+ ctl |= 2 << GAUDI_PKT_SHORT_CTL_BASE_SHIFT; /* W_S MON base */
+ ctl |= PACKET_MSG_SHORT << GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT;
+ ctl |= 0 << GAUDI_PKT_SHORT_CTL_EB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_RB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_SHORT_CTL_MB_SHIFT;
+
+ pkt->value = cpu_to_le32(value);
+ pkt->ctl = cpu_to_le32(ctl);
+
+ return pkt_size;
+}
+
+static u32 gaudi_add_fence_pkt(struct packet_fence *pkt)
+{
+ u32 ctl, cfg, pkt_size = sizeof(*pkt);
+
+ memset(pkt, 0, pkt_size);
+
+ cfg = 1 << GAUDI_PKT_FENCE_CFG_DEC_VAL_SHIFT;
+ cfg |= 1 << GAUDI_PKT_FENCE_CFG_TARGET_VAL_SHIFT;
+ cfg |= 2 << GAUDI_PKT_FENCE_CFG_ID_SHIFT;
+
+ ctl = 0 << GAUDI_PKT_FENCE_CTL_PRED_SHIFT;
+ ctl |= PACKET_FENCE << GAUDI_PKT_FENCE_CTL_OPCODE_SHIFT;
+ ctl |= 0 << GAUDI_PKT_FENCE_CTL_EB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_FENCE_CTL_RB_SHIFT;
+ ctl |= 1 << GAUDI_PKT_FENCE_CTL_MB_SHIFT;
+
+ pkt->cfg = cpu_to_le32(cfg);
+ pkt->ctl = cpu_to_le32(ctl);
+
+ return pkt_size;
+}
+
+static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id,
+ u16 sob_val, u16 mon_id, u32 q_idx)
+{
+ struct hl_cb *cb = (struct hl_cb *) data;
+ void *buf = (void *) (uintptr_t) cb->kernel_address;
+ u64 monitor_base, fence_addr = 0;
+ u32 size = 0;
+ u16 msg_addr_offset;
+
+ switch (q_idx) {
+ case GAUDI_QUEUE_ID_DMA_0_0:
+ fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_0;
+ break;
+ case GAUDI_QUEUE_ID_DMA_0_1:
+ fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_1;
+ break;
+ case GAUDI_QUEUE_ID_DMA_0_2:
+ fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_2;
+ break;
+ case GAUDI_QUEUE_ID_DMA_0_3:
+ fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_3;
+ break;
+ case GAUDI_QUEUE_ID_DMA_1_0:
+ fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_0;
+ break;
+ case GAUDI_QUEUE_ID_DMA_1_1:
+ fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_1;
+ break;
+ case GAUDI_QUEUE_ID_DMA_1_2:
+ fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_2;
+ break;
+ case GAUDI_QUEUE_ID_DMA_1_3:
+ fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_3;
+ break;
+ case GAUDI_QUEUE_ID_DMA_5_0:
+ fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_0;
+ break;
+ case GAUDI_QUEUE_ID_DMA_5_1:
+ fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_1;
+ break;
+ case GAUDI_QUEUE_ID_DMA_5_2:
+ fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_2;
+ break;
+ case GAUDI_QUEUE_ID_DMA_5_3:
+ fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_3;
+ break;
+ default:
+ /* queue index should be valid here */
+ dev_crit(hdev->dev, "wrong queue id %d for wait packet\n",
+ q_idx);
+ return;
+ }
+
+ fence_addr += CFG_BASE;
+
+ /*
+ * monitor_base should be the content of the base0 address registers,
+ * so it will be added to the msg short offsets
+ */
+ monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0;
+
+ /* First monitor config packet: low address of the sync */
+ msg_addr_offset =
+ (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_id * 4) -
+ monitor_base;
+
+ size += gaudi_add_mon_msg_short(buf + size, (u32) fence_addr,
+ msg_addr_offset);
+
+ /* Second monitor config packet: high address of the sync */
+ msg_addr_offset =
+ (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_id * 4) -
+ monitor_base;
+
+ size += gaudi_add_mon_msg_short(buf + size, (u32) (fence_addr >> 32),
+ msg_addr_offset);
+
+ /*
+ * Third monitor config packet: the payload, i.e. what to write when the
+ * sync triggers
+ */
+ msg_addr_offset =
+ (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_id * 4) -
+ monitor_base;
+
+ size += gaudi_add_mon_msg_short(buf + size, 1, msg_addr_offset);
+
+ /* Fourth monitor config packet: bind the monitor to a sync object */
+ msg_addr_offset =
+ (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) -
+ monitor_base;
+ size += gaudi_add_arm_monitor_pkt(buf + size, sob_id, sob_val,
+ msg_addr_offset);
+
+ /* Fence packet */
+ size += gaudi_add_fence_pkt(buf + size);
+}
+
+static void gaudi_reset_sob(struct hl_device *hdev, void *data)
+{
+ struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
+
+ dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx,
+ hw_sob->sob_id);
+
+ WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4,
+ 0);
+
+ kref_init(&hw_sob->kref);
+}
+
+static void gaudi_set_dma_mask_from_fw(struct hl_device *hdev)
+{
+ if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) ==
+ HL_POWER9_HOST_MAGIC) {
+ hdev->power9_64bit_dma_enable = 1;
+ hdev->dma_mask = 64;
+ } else {
+ hdev->power9_64bit_dma_enable = 0;
+ hdev->dma_mask = 48;
+ }
+}
+
+static u64 gaudi_get_device_time(struct hl_device *hdev)
+{
+ u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
+
+ return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
+}
+
+static const struct hl_asic_funcs gaudi_funcs = {
+ .early_init = gaudi_early_init,
+ .early_fini = gaudi_early_fini,
+ .late_init = gaudi_late_init,
+ .late_fini = gaudi_late_fini,
+ .sw_init = gaudi_sw_init,
+ .sw_fini = gaudi_sw_fini,
+ .hw_init = gaudi_hw_init,
+ .hw_fini = gaudi_hw_fini,
+ .halt_engines = gaudi_halt_engines,
+ .suspend = gaudi_suspend,
+ .resume = gaudi_resume,
+ .cb_mmap = gaudi_cb_mmap,
+ .ring_doorbell = gaudi_ring_doorbell,
+ .pqe_write = gaudi_pqe_write,
+ .asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
+ .asic_dma_free_coherent = gaudi_dma_free_coherent,
+ .get_int_queue_base = gaudi_get_int_queue_base,
+ .test_queues = gaudi_test_queues,
+ .asic_dma_pool_zalloc = gaudi_dma_pool_zalloc,
+ .asic_dma_pool_free = gaudi_dma_pool_free,
+ .cpu_accessible_dma_pool_alloc = gaudi_cpu_accessible_dma_pool_alloc,
+ .cpu_accessible_dma_pool_free = gaudi_cpu_accessible_dma_pool_free,
+ .hl_dma_unmap_sg = gaudi_dma_unmap_sg,
+ .cs_parser = gaudi_cs_parser,
+ .asic_dma_map_sg = gaudi_dma_map_sg,
+ .get_dma_desc_list_size = gaudi_get_dma_desc_list_size,
+ .add_end_of_cb_packets = gaudi_add_end_of_cb_packets,
+ .update_eq_ci = gaudi_update_eq_ci,
+ .context_switch = gaudi_context_switch,
+ .restore_phase_topology = gaudi_restore_phase_topology,
+ .debugfs_read32 = gaudi_debugfs_read32,
+ .debugfs_write32 = gaudi_debugfs_write32,
+ .debugfs_read64 = gaudi_debugfs_read64,
+ .debugfs_write64 = gaudi_debugfs_write64,
+ .add_device_attr = gaudi_add_device_attr,
+ .handle_eqe = gaudi_handle_eqe,
+ .set_pll_profile = gaudi_set_pll_profile,
+ .get_events_stat = gaudi_get_events_stat,
+ .read_pte = gaudi_read_pte,
+ .write_pte = gaudi_write_pte,
+ .mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
+ .mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
+ .send_heartbeat = gaudi_send_heartbeat,
+ .enable_clock_gating = gaudi_enable_clock_gating,
+ .disable_clock_gating = gaudi_disable_clock_gating,
+ .debug_coresight = gaudi_debug_coresight,
+ .is_device_idle = gaudi_is_device_idle,
+ .soft_reset_late_init = gaudi_soft_reset_late_init,
+ .hw_queues_lock = gaudi_hw_queues_lock,
+ .hw_queues_unlock = gaudi_hw_queues_unlock,
+ .get_pci_id = gaudi_get_pci_id,
+ .get_eeprom_data = gaudi_get_eeprom_data,
+ .send_cpu_message = gaudi_send_cpu_message,
+ .get_hw_state = gaudi_get_hw_state,
+ .pci_bars_map = gaudi_pci_bars_map,
+ .set_dram_bar_base = gaudi_set_hbm_bar_base,
+ .init_iatu = gaudi_init_iatu,
+ .rreg = hl_rreg,
+ .wreg = hl_wreg,
+ .halt_coresight = gaudi_halt_coresight,
+ .get_clk_rate = gaudi_get_clk_rate,
+ .get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
+ .read_device_fw_version = gaudi_read_device_fw_version,
+ .load_firmware_to_device = gaudi_load_firmware_to_device,
+ .load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
+ .ext_queue_init = gaudi_ext_queue_init,
+ .ext_queue_reset = gaudi_ext_queue_reset,
+ .get_signal_cb_size = gaudi_get_signal_cb_size,
+ .get_wait_cb_size = gaudi_get_wait_cb_size,
+ .gen_signal_cb = gaudi_gen_signal_cb,
+ .gen_wait_cb = gaudi_gen_wait_cb,
+ .reset_sob = gaudi_reset_sob,
+ .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw,
+ .get_device_time = gaudi_get_device_time
+};
+
+/**
+ * gaudi_set_asic_funcs - set GAUDI function pointers
+ *
+ * @*hdev: pointer to hl_device structure
+ *
+ */
+void gaudi_set_asic_funcs(struct hl_device *hdev)
+{
+ hdev->asic_funcs = &gaudi_funcs;
+}
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
new file mode 100644
index 000000000000..a46530d375fa
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2019-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDIP_H_
+#define GAUDIP_H_
+
+#include <uapi/misc/habanalabs.h>
+#include "habanalabs.h"
+#include "include/hl_boot_if.h"
+#include "include/gaudi/gaudi_packets.h"
+#include "include/gaudi/gaudi.h"
+#include "include/gaudi/gaudi_async_events.h"
+
+#define NUMBER_OF_EXT_HW_QUEUES 12
+#define NUMBER_OF_CMPLT_QUEUES NUMBER_OF_EXT_HW_QUEUES
+#define NUMBER_OF_CPU_HW_QUEUES 1
+#define NUMBER_OF_INT_HW_QUEUES 100
+#define NUMBER_OF_HW_QUEUES (NUMBER_OF_EXT_HW_QUEUES + \
+ NUMBER_OF_CPU_HW_QUEUES + \
+ NUMBER_OF_INT_HW_QUEUES)
+
+/*
+ * Number of MSI interrupts IDS:
+ * Each completion queue has 1 ID
+ * The event queue has 1 ID
+ */
+#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + \
+ NUMBER_OF_CPU_HW_QUEUES)
+
+#if (NUMBER_OF_INTERRUPTS > GAUDI_MSI_ENTRIES)
+#error "Number of MSI interrupts must be smaller or equal to GAUDI_MSI_ENTRIES"
+#endif
+
+#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */
+
+#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
+
+#define GAUDI_MAX_CLK_FREQ 2200000000ull /* 2200 MHz */
+
+#define MAX_POWER_DEFAULT 200000 /* 200W */
+
+#define GAUDI_CPU_TIMEOUT_USEC 15000000 /* 15s */
+
+#define TPC_ENABLED_MASK 0xFF
+
+#define GAUDI_HBM_SIZE_32GB 0x800000000ull
+#define GAUDI_HBM_DEVICES 4
+#define GAUDI_HBM_CHANNELS 8
+#define GAUDI_HBM_CFG_BASE (mmHBM0_BASE - CFG_BASE)
+#define GAUDI_HBM_CFG_OFFSET (mmHBM1_BASE - mmHBM0_BASE)
+
+#define DMA_MAX_TRANSFER_SIZE U32_MAX
+
+#define GAUDI_DEFAULT_CARD_NAME "HL2000"
+
+#define PCI_DMA_NUMBER_OF_CHNLS 3
+#define HBM_DMA_NUMBER_OF_CHNLS 5
+#define DMA_NUMBER_OF_CHNLS (PCI_DMA_NUMBER_OF_CHNLS + \
+ HBM_DMA_NUMBER_OF_CHNLS)
+
+#define MME_NUMBER_OF_SLAVE_ENGINES 2
+#define MME_NUMBER_OF_ENGINES (MME_NUMBER_OF_MASTER_ENGINES + \
+ MME_NUMBER_OF_SLAVE_ENGINES)
+#define MME_NUMBER_OF_QMANS (MME_NUMBER_OF_MASTER_ENGINES * \
+ QMAN_STREAMS)
+
+#define QMAN_STREAMS 4
+
+#define DMA_QMAN_OFFSET (mmDMA1_QM_BASE - mmDMA0_QM_BASE)
+#define TPC_QMAN_OFFSET (mmTPC1_QM_BASE - mmTPC0_QM_BASE)
+#define MME_QMAN_OFFSET (mmMME1_QM_BASE - mmMME0_QM_BASE)
+#define NIC_MACRO_QMAN_OFFSET (mmNIC1_QM0_BASE - mmNIC0_QM0_BASE)
+
+#define TPC_CFG_OFFSET (mmTPC1_CFG_BASE - mmTPC0_CFG_BASE)
+
+#define DMA_CORE_OFFSET (mmDMA1_CORE_BASE - mmDMA0_CORE_BASE)
+
+#define SIF_RTR_CTRL_OFFSET (mmSIF_RTR_CTRL_1_BASE - mmSIF_RTR_CTRL_0_BASE)
+
+#define NIF_RTR_CTRL_OFFSET (mmNIF_RTR_CTRL_1_BASE - mmNIF_RTR_CTRL_0_BASE)
+
+#define MME_ACC_OFFSET (mmMME1_ACC_BASE - mmMME0_ACC_BASE)
+#define SRAM_BANK_OFFSET (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE)
+
+#define NUM_OF_SOB_IN_BLOCK \
+ (((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
+
+#define NUM_OF_MONITORS_IN_BLOCK \
+ (((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 - \
+ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0) + 4) >> 2)
+
+
+/* DRAM Memory Map */
+
+#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
+#define MMU_PAGE_TABLES_SIZE 0x0BF00000 /* 191MB */
+#define MMU_CACHE_MNG_SIZE 0x00100000 /* 1MB */
+#define RESERVED 0x04000000 /* 64MB */
+
+#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
+#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE)
+#define MMU_CACHE_MNG_ADDR (MMU_PAGE_TABLES_ADDR + MMU_PAGE_TABLES_SIZE)
+
+#define DRAM_DRIVER_END_ADDR (MMU_CACHE_MNG_ADDR + MMU_CACHE_MNG_SIZE +\
+ RESERVED)
+
+#define DRAM_BASE_ADDR_USER 0x20000000
+
+#if (DRAM_DRIVER_END_ADDR > DRAM_BASE_ADDR_USER)
+#error "Driver must reserve no more than 512MB"
+#endif
+
+/* Internal QMANs PQ sizes */
+
+#define MME_QMAN_LENGTH 64
+#define MME_QMAN_SIZE_IN_BYTES (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
+
+#define HBM_DMA_QMAN_LENGTH 64
+#define HBM_DMA_QMAN_SIZE_IN_BYTES \
+ (HBM_DMA_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
+
+#define TPC_QMAN_LENGTH 64
+#define TPC_QMAN_SIZE_IN_BYTES (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)
+
+#define SRAM_USER_BASE_OFFSET GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START
+
+/* Virtual address space */
+#define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */
+#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */
+#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \
+ VA_HOST_SPACE_START) /* 767TB */
+
+#define HW_CAP_PLL 0x00000001
+#define HW_CAP_HBM 0x00000002
+#define HW_CAP_MMU 0x00000004
+#define HW_CAP_MME 0x00000008
+#define HW_CAP_CPU 0x00000010
+#define HW_CAP_PCI_DMA 0x00000020
+#define HW_CAP_MSI 0x00000040
+#define HW_CAP_CPU_Q 0x00000080
+#define HW_CAP_HBM_DMA 0x00000100
+#define HW_CAP_CLK_GATE 0x00000200
+#define HW_CAP_SRAM_SCRAMBLER 0x00000400
+#define HW_CAP_HBM_SCRAMBLER 0x00000800
+
+#define HW_CAP_TPC0 0x01000000
+#define HW_CAP_TPC1 0x02000000
+#define HW_CAP_TPC2 0x04000000
+#define HW_CAP_TPC3 0x08000000
+#define HW_CAP_TPC4 0x10000000
+#define HW_CAP_TPC5 0x20000000
+#define HW_CAP_TPC6 0x40000000
+#define HW_CAP_TPC7 0x80000000
+#define HW_CAP_TPC_MASK 0xFF000000
+#define HW_CAP_TPC_SHIFT 24
+
+#define GAUDI_CPU_PCI_MSB_ADDR(addr) (((addr) & GENMASK_ULL(49, 39)) >> 39)
+#define GAUDI_PCI_TO_CPU_ADDR(addr) \
+ do { \
+ (addr) &= ~GENMASK_ULL(49, 39); \
+ (addr) |= BIT_ULL(39); \
+ } while (0)
+#define GAUDI_CPU_TO_PCI_ADDR(addr, extension) \
+ do { \
+ (addr) &= ~GENMASK_ULL(49, 39); \
+ (addr) |= (u64) (extension) << 39; \
+ } while (0)
+
+enum gaudi_dma_channels {
+ GAUDI_PCI_DMA_1,
+ GAUDI_PCI_DMA_2,
+ GAUDI_PCI_DMA_3,
+ GAUDI_HBM_DMA_1,
+ GAUDI_HBM_DMA_2,
+ GAUDI_HBM_DMA_3,
+ GAUDI_HBM_DMA_4,
+ GAUDI_HBM_DMA_5,
+ GAUDI_DMA_MAX
+};
+
+enum gaudi_tpc_mask {
+ GAUDI_TPC_MASK_TPC0 = 0x01,
+ GAUDI_TPC_MASK_TPC1 = 0x02,
+ GAUDI_TPC_MASK_TPC2 = 0x04,
+ GAUDI_TPC_MASK_TPC3 = 0x08,
+ GAUDI_TPC_MASK_TPC4 = 0x10,
+ GAUDI_TPC_MASK_TPC5 = 0x20,
+ GAUDI_TPC_MASK_TPC6 = 0x40,
+ GAUDI_TPC_MASK_TPC7 = 0x80,
+ GAUDI_TPC_MASK_ALL = 0xFF
+};
+
+/**
+ * struct gaudi_internal_qman_info - Internal QMAN information.
+ * @pq_kernel_addr: Kernel address of the PQ memory area in the host.
+ * @pq_dma_addr: DMA address of the PQ memory area in the host.
+ * @pq_size: Size of allocated host memory for PQ.
+ */
+struct gaudi_internal_qman_info {
+ void *pq_kernel_addr;
+ dma_addr_t pq_dma_addr;
+ size_t pq_size;
+};
+
+/**
+ * struct gaudi_device - ASIC specific manage structure.
+ * @armcp_info_get: get information on device from ArmCP
+ * @hw_queues_lock: protects the H/W queues from concurrent access.
+ * @clk_gate_mutex: protects code areas that require clock gating to be disabled
+ * temporarily
+ * @internal_qmans: Internal QMANs information. The array size is larger than
+ * the actual number of internal queues because they are not in
+ * consecutive order.
+ * @hbm_bar_cur_addr: current address of HBM PCI bar.
+ * @max_freq_value: current max clk frequency.
+ * @events: array that holds all event id's
+ * @events_stat: array that holds histogram of all received events.
+ * @events_stat_aggregate: same as events_stat but doesn't get cleared on reset
+ * @hw_cap_initialized: This field contains a bit per H/W engine. When that
+ * engine is initialized, that bit is set by the driver to
+ * signal we can use this engine in later code paths.
+ * Each bit is cleared upon reset of its corresponding H/W
+ * engine.
+ * @multi_msi_mode: whether we are working in multi MSI single MSI mode.
+ * Multi MSI is possible only with IOMMU enabled.
+ * @ext_queue_idx: helper index for external queues initialization.
+ */
+struct gaudi_device {
+ int (*armcp_info_get)(struct hl_device *hdev);
+
+ /* TODO: remove hw_queues_lock after moving to scheduler code */
+ spinlock_t hw_queues_lock;
+ struct mutex clk_gate_mutex;
+
+ struct gaudi_internal_qman_info internal_qmans[GAUDI_QUEUE_ID_SIZE];
+
+ u64 hbm_bar_cur_addr;
+ u64 max_freq_value;
+
+ u32 events[GAUDI_EVENT_SIZE];
+ u32 events_stat[GAUDI_EVENT_SIZE];
+ u32 events_stat_aggregate[GAUDI_EVENT_SIZE];
+ u32 hw_cap_initialized;
+ u8 multi_msi_mode;
+ u8 ext_queue_idx;
+};
+
+void gaudi_init_security(struct hl_device *hdev);
+void gaudi_add_device_attr(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp);
+void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
+int gaudi_debug_coresight(struct hl_device *hdev, void *data);
+void gaudi_halt_coresight(struct hl_device *hdev);
+int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
+
+#endif /* GAUDIP_H_ */
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
new file mode 100644
index 000000000000..bf0e062d7b87
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -0,0 +1,884 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "gaudiP.h"
+#include "include/gaudi/gaudi_coresight.h"
+#include "include/gaudi/asic_reg/gaudi_regs.h"
+#include "include/gaudi/gaudi_masks.h"
+
+#include <uapi/misc/habanalabs.h>
+#include <linux/coresight.h>
+
+#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET
+#define SPMU_EVENT_TYPES_OFFSET 0x400
+#define SPMU_MAX_COUNTERS 6
+
+static u64 debug_stm_regs[GAUDI_STM_LAST + 1] = {
+ [GAUDI_STM_MME0_ACC] = mmMME0_ACC_STM_BASE,
+ [GAUDI_STM_MME0_SBAB] = mmMME0_SBAB_STM_BASE,
+ [GAUDI_STM_MME0_CTRL] = mmMME0_CTRL_STM_BASE,
+ [GAUDI_STM_MME1_ACC] = mmMME1_ACC_STM_BASE,
+ [GAUDI_STM_MME1_SBAB] = mmMME1_SBAB_STM_BASE,
+ [GAUDI_STM_MME1_CTRL] = mmMME1_CTRL_STM_BASE,
+ [GAUDI_STM_MME2_ACC] = mmMME2_ACC_STM_BASE,
+ [GAUDI_STM_MME2_SBAB] = mmMME2_SBAB_STM_BASE,
+ [GAUDI_STM_MME2_CTRL] = mmMME2_CTRL_STM_BASE,
+ [GAUDI_STM_MME3_ACC] = mmMME3_ACC_STM_BASE,
+ [GAUDI_STM_MME3_SBAB] = mmMME3_SBAB_STM_BASE,
+ [GAUDI_STM_MME3_CTRL] = mmMME3_CTRL_STM_BASE,
+ [GAUDI_STM_DMA_IF_W_S] = mmDMA_IF_W_S_STM_BASE,
+ [GAUDI_STM_DMA_IF_E_S] = mmDMA_IF_E_S_STM_BASE,
+ [GAUDI_STM_DMA_IF_W_N] = mmDMA_IF_W_N_STM_BASE,
+ [GAUDI_STM_DMA_IF_E_N] = mmDMA_IF_E_N_STM_BASE,
+ [GAUDI_STM_CPU] = mmCPU_STM_BASE,
+ [GAUDI_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_1_CS] = mmDMA_CH_1_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_2_CS] = mmDMA_CH_2_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_3_CS] = mmDMA_CH_3_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_4_CS] = mmDMA_CH_4_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_5_CS] = mmDMA_CH_5_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_6_CS] = mmDMA_CH_6_CS_STM_BASE,
+ [GAUDI_STM_DMA_CH_7_CS] = mmDMA_CH_7_CS_STM_BASE,
+ [GAUDI_STM_PCIE] = mmPCIE_STM_BASE,
+ [GAUDI_STM_MMU_CS] = mmMMU_CS_STM_BASE,
+ [GAUDI_STM_PSOC] = mmPSOC_STM_BASE,
+ [GAUDI_STM_NIC0_0] = mmSTM_0_NIC0_DBG_BASE,
+ [GAUDI_STM_NIC0_1] = mmSTM_1_NIC0_DBG_BASE,
+ [GAUDI_STM_NIC1_0] = mmSTM_0_NIC1_DBG_BASE,
+ [GAUDI_STM_NIC1_1] = mmSTM_1_NIC1_DBG_BASE,
+ [GAUDI_STM_NIC2_0] = mmSTM_0_NIC2_DBG_BASE,
+ [GAUDI_STM_NIC2_1] = mmSTM_1_NIC2_DBG_BASE,
+ [GAUDI_STM_NIC3_0] = mmSTM_0_NIC3_DBG_BASE,
+ [GAUDI_STM_NIC3_1] = mmSTM_1_NIC3_DBG_BASE,
+ [GAUDI_STM_NIC4_0] = mmSTM_0_NIC4_DBG_BASE,
+ [GAUDI_STM_NIC4_1] = mmSTM_1_NIC4_DBG_BASE,
+ [GAUDI_STM_TPC0_EML] = mmTPC0_EML_STM_BASE,
+ [GAUDI_STM_TPC1_EML] = mmTPC1_EML_STM_BASE,
+ [GAUDI_STM_TPC2_EML] = mmTPC2_EML_STM_BASE,
+ [GAUDI_STM_TPC3_EML] = mmTPC3_EML_STM_BASE,
+ [GAUDI_STM_TPC4_EML] = mmTPC4_EML_STM_BASE,
+ [GAUDI_STM_TPC5_EML] = mmTPC5_EML_STM_BASE,
+ [GAUDI_STM_TPC6_EML] = mmTPC6_EML_STM_BASE,
+ [GAUDI_STM_TPC7_EML] = mmTPC7_EML_STM_BASE
+};
+
+static u64 debug_etf_regs[GAUDI_ETF_LAST + 1] = {
+ [GAUDI_ETF_MME0_ACC] = mmMME0_ACC_ETF_BASE,
+ [GAUDI_ETF_MME0_SBAB] = mmMME0_SBAB_ETF_BASE,
+ [GAUDI_ETF_MME0_CTRL] = mmMME0_CTRL_ETF_BASE,
+ [GAUDI_ETF_MME1_ACC] = mmMME1_ACC_ETF_BASE,
+ [GAUDI_ETF_MME1_SBAB] = mmMME1_SBAB_ETF_BASE,
+ [GAUDI_ETF_MME1_CTRL] = mmMME1_CTRL_ETF_BASE,
+ [GAUDI_ETF_MME2_ACC] = mmMME2_MME2_ACC_ETF_BASE,
+ [GAUDI_ETF_MME2_SBAB] = mmMME2_SBAB_ETF_BASE,
+ [GAUDI_ETF_MME2_CTRL] = mmMME2_CTRL_ETF_BASE,
+ [GAUDI_ETF_MME3_ACC] = mmMME3_ACC_ETF_BASE,
+ [GAUDI_ETF_MME3_SBAB] = mmMME3_SBAB_ETF_BASE,
+ [GAUDI_ETF_MME3_CTRL] = mmMME3_CTRL_ETF_BASE,
+ [GAUDI_ETF_DMA_IF_W_S] = mmDMA_IF_W_S_ETF_BASE,
+ [GAUDI_ETF_DMA_IF_E_S] = mmDMA_IF_E_S_ETF_BASE,
+ [GAUDI_ETF_DMA_IF_W_N] = mmDMA_IF_W_N_ETF_BASE,
+ [GAUDI_ETF_DMA_IF_E_N] = mmDMA_IF_E_N_ETF_BASE,
+ [GAUDI_ETF_CPU_0] = mmCPU_ETF_0_BASE,
+ [GAUDI_ETF_CPU_1] = mmCPU_ETF_1_BASE,
+ [GAUDI_ETF_CPU_TRACE] = mmCPU_ETF_TRACE_BASE,
+ [GAUDI_ETF_DMA_CH_0_CS] = mmDMA_CH_0_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_1_CS] = mmDMA_CH_1_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_2_CS] = mmDMA_CH_2_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_3_CS] = mmDMA_CH_3_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_4_CS] = mmDMA_CH_4_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_5_CS] = mmDMA_CH_5_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_6_CS] = mmDMA_CH_6_CS_ETF_BASE,
+ [GAUDI_ETF_DMA_CH_7_CS] = mmDMA_CH_7_CS_ETF_BASE,
+ [GAUDI_ETF_PCIE] = mmPCIE_ETF_BASE,
+ [GAUDI_ETF_MMU_CS] = mmMMU_CS_ETF_BASE,
+ [GAUDI_ETF_PSOC] = mmPSOC_ETF_BASE,
+ [GAUDI_ETF_NIC0_0] = mmETF_0_NIC0_DBG_BASE,
+ [GAUDI_ETF_NIC0_1] = mmETF_1_NIC0_DBG_BASE,
+ [GAUDI_ETF_NIC1_0] = mmETF_0_NIC1_DBG_BASE,
+ [GAUDI_ETF_NIC1_1] = mmETF_1_NIC1_DBG_BASE,
+ [GAUDI_ETF_NIC2_0] = mmETF_0_NIC2_DBG_BASE,
+ [GAUDI_ETF_NIC2_1] = mmETF_1_NIC2_DBG_BASE,
+ [GAUDI_ETF_NIC3_0] = mmETF_0_NIC3_DBG_BASE,
+ [GAUDI_ETF_NIC3_1] = mmETF_1_NIC3_DBG_BASE,
+ [GAUDI_ETF_NIC4_0] = mmETF_0_NIC4_DBG_BASE,
+ [GAUDI_ETF_NIC4_1] = mmETF_1_NIC4_DBG_BASE,
+ [GAUDI_ETF_TPC0_EML] = mmTPC0_EML_ETF_BASE,
+ [GAUDI_ETF_TPC1_EML] = mmTPC1_EML_ETF_BASE,
+ [GAUDI_ETF_TPC2_EML] = mmTPC2_EML_ETF_BASE,
+ [GAUDI_ETF_TPC3_EML] = mmTPC3_EML_ETF_BASE,
+ [GAUDI_ETF_TPC4_EML] = mmTPC4_EML_ETF_BASE,
+ [GAUDI_ETF_TPC5_EML] = mmTPC5_EML_ETF_BASE,
+ [GAUDI_ETF_TPC6_EML] = mmTPC6_EML_ETF_BASE,
+ [GAUDI_ETF_TPC7_EML] = mmTPC7_EML_ETF_BASE
+};
+
+static u64 debug_funnel_regs[GAUDI_FUNNEL_LAST + 1] = {
+ [GAUDI_FUNNEL_MME0_ACC] = mmMME0_ACC_FUNNEL_BASE,
+ [GAUDI_FUNNEL_MME1_ACC] = mmMME1_ACC_FUNNEL_BASE,
+ [GAUDI_FUNNEL_MME2_ACC] = mmMME2_ACC_FUNNEL_BASE,
+ [GAUDI_FUNNEL_MME3_ACC] = mmMME3_ACC_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X0] = mmSRAM_Y0_X0_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X1] = mmSRAM_Y0_X1_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X2] = mmSRAM_Y0_X2_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X3] = mmSRAM_Y0_X3_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X4] = mmSRAM_Y0_X4_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X5] = mmSRAM_Y0_X5_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X6] = mmSRAM_Y0_X6_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y0_X7] = mmSRAM_Y0_X7_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X0] = mmSRAM_Y1_X0_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X1] = mmSRAM_Y1_X1_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X2] = mmSRAM_Y1_X2_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X3] = mmSRAM_Y1_X3_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X4] = mmSRAM_Y1_X4_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X5] = mmSRAM_Y1_X5_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X6] = mmSRAM_Y1_X6_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y1_X7] = mmSRAM_Y1_X7_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X0] = mmSRAM_Y2_X0_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X1] = mmSRAM_Y2_X1_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X2] = mmSRAM_Y2_X2_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X3] = mmSRAM_Y2_X3_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X4] = mmSRAM_Y2_X4_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X5] = mmSRAM_Y2_X5_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X6] = mmSRAM_Y2_X6_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y2_X7] = mmSRAM_Y2_X7_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X0] = mmSRAM_Y3_X0_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X1] = mmSRAM_Y3_X1_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X2] = mmSRAM_Y3_X2_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X4] = mmSRAM_Y3_X4_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X3] = mmSRAM_Y3_X3_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X5] = mmSRAM_Y3_X5_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X6] = mmSRAM_Y3_X6_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SRAM_Y3_X7] = mmSRAM_Y3_X7_FUNNEL_BASE,
+ [GAUDI_FUNNEL_SIF_0] = mmSIF_FUNNEL_0_BASE,
+ [GAUDI_FUNNEL_SIF_1] = mmSIF_FUNNEL_1_BASE,
+ [GAUDI_FUNNEL_SIF_2] = mmSIF_FUNNEL_2_BASE,
+ [GAUDI_FUNNEL_SIF_3] = mmSIF_FUNNEL_3_BASE,
+ [GAUDI_FUNNEL_SIF_4] = mmSIF_FUNNEL_4_BASE,
+ [GAUDI_FUNNEL_SIF_5] = mmSIF_FUNNEL_5_BASE,
+ [GAUDI_FUNNEL_SIF_6] = mmSIF_FUNNEL_6_BASE,
+ [GAUDI_FUNNEL_SIF_7] = mmSIF_FUNNEL_7_BASE,
+ [GAUDI_FUNNEL_NIF_0] = mmNIF_FUNNEL_0_BASE,
+ [GAUDI_FUNNEL_NIF_1] = mmNIF_FUNNEL_1_BASE,
+ [GAUDI_FUNNEL_NIF_2] = mmNIF_FUNNEL_2_BASE,
+ [GAUDI_FUNNEL_NIF_3] = mmNIF_FUNNEL_3_BASE,
+ [GAUDI_FUNNEL_NIF_4] = mmNIF_FUNNEL_4_BASE,
+ [GAUDI_FUNNEL_NIF_5] = mmNIF_FUNNEL_5_BASE,
+ [GAUDI_FUNNEL_NIF_6] = mmNIF_FUNNEL_6_BASE,
+ [GAUDI_FUNNEL_NIF_7] = mmNIF_FUNNEL_7_BASE,
+ [GAUDI_FUNNEL_DMA_IF_W_S] = mmDMA_IF_W_S_FUNNEL_BASE,
+ [GAUDI_FUNNEL_DMA_IF_E_S] = mmDMA_IF_E_S_FUNNEL_BASE,
+ [GAUDI_FUNNEL_DMA_IF_W_N] = mmDMA_IF_W_N_FUNNEL_BASE,
+ [GAUDI_FUNNEL_DMA_IF_E_N] = mmDMA_IF_E_N_FUNNEL_BASE,
+ [GAUDI_FUNNEL_CPU] = mmCPU_FUNNEL_BASE,
+ [GAUDI_FUNNEL_NIC_TPC_W_S] = mmNIC_TPC_FUNNEL_W_S_BASE,
+ [GAUDI_FUNNEL_NIC_TPC_E_S] = mmNIC_TPC_FUNNEL_E_S_BASE,
+ [GAUDI_FUNNEL_NIC_TPC_W_N] = mmNIC_TPC_FUNNEL_W_N_BASE,
+ [GAUDI_FUNNEL_NIC_TPC_E_N] = mmNIC_TPC_FUNNEL_E_N_BASE,
+ [GAUDI_FUNNEL_PCIE] = mmPCIE_FUNNEL_BASE,
+ [GAUDI_FUNNEL_PSOC] = mmPSOC_FUNNEL_BASE,
+ [GAUDI_FUNNEL_NIC0] = mmFUNNEL_NIC0_DBG_BASE,
+ [GAUDI_FUNNEL_NIC1] = mmFUNNEL_NIC1_DBG_BASE,
+ [GAUDI_FUNNEL_NIC2] = mmFUNNEL_NIC2_DBG_BASE,
+ [GAUDI_FUNNEL_NIC3] = mmFUNNEL_NIC3_DBG_BASE,
+ [GAUDI_FUNNEL_NIC4] = mmFUNNEL_NIC4_DBG_BASE,
+ [GAUDI_FUNNEL_TPC0_EML] = mmTPC0_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC1_EML] = mmTPC1_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC2_EML] = mmTPC2_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC3_EML] = mmTPC3_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC4_EML] = mmTPC4_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC5_EML] = mmTPC5_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC6_EML] = mmTPC6_EML_FUNNEL_BASE,
+ [GAUDI_FUNNEL_TPC7_EML] = mmTPC7_EML_FUNNEL_BASE
+};
+
+static u64 debug_bmon_regs[GAUDI_BMON_LAST + 1] = {
+ [GAUDI_BMON_MME0_ACC_0] = mmMME0_ACC_BMON0_BASE,
+ [GAUDI_BMON_MME0_SBAB_0] = mmMME0_SBAB_BMON0_BASE,
+ [GAUDI_BMON_MME0_SBAB_1] = mmMME0_SBAB_BMON1_BASE,
+ [GAUDI_BMON_MME0_CTRL_0] = mmMME0_CTRL_BMON0_BASE,
+ [GAUDI_BMON_MME0_CTRL_1] = mmMME0_CTRL_BMON1_BASE,
+ [GAUDI_BMON_MME1_ACC_0] = mmMME1_ACC_BMON0_BASE,
+ [GAUDI_BMON_MME1_SBAB_0] = mmMME1_SBAB_BMON0_BASE,
+ [GAUDI_BMON_MME1_SBAB_1] = mmMME1_SBAB_BMON1_BASE,
+ [GAUDI_BMON_MME1_CTRL_0] = mmMME1_CTRL_BMON0_BASE,
+ [GAUDI_BMON_MME1_CTRL_1] = mmMME1_CTRL_BMON1_BASE,
+ [GAUDI_BMON_MME2_ACC_0] = mmMME2_ACC_BMON0_BASE,
+ [GAUDI_BMON_MME2_SBAB_0] = mmMME2_SBAB_BMON0_BASE,
+ [GAUDI_BMON_MME2_SBAB_1] = mmMME2_SBAB_BMON1_BASE,
+ [GAUDI_BMON_MME2_CTRL_0] = mmMME2_CTRL_BMON0_BASE,
+ [GAUDI_BMON_MME2_CTRL_1] = mmMME2_CTRL_BMON1_BASE,
+ [GAUDI_BMON_MME3_ACC_0] = mmMME3_ACC_BMON0_BASE,
+ [GAUDI_BMON_MME3_SBAB_0] = mmMME3_SBAB_BMON0_BASE,
+ [GAUDI_BMON_MME3_SBAB_1] = mmMME3_SBAB_BMON1_BASE,
+ [GAUDI_BMON_MME3_CTRL_0] = mmMME3_CTRL_BMON0_BASE,
+ [GAUDI_BMON_MME3_CTRL_1] = mmMME3_CTRL_BMON1_BASE,
+ [GAUDI_BMON_DMA_IF_W_S_SOB_WR] = mmDMA_IF_W_S_SOB_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_S_0_WR] = mmDMA_IF_W_S_HBM0_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_S_0_RD] = mmDMA_IF_W_S_HBM0_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_S_1_WR] = mmDMA_IF_W_S_HBM1_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_S_1_RD] = mmDMA_IF_W_S_HBM1_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_S_SOB_WR] = mmDMA_IF_E_S_SOB_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_S_0_WR] = mmDMA_IF_E_S_HBM0_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_S_0_RD] = mmDMA_IF_E_S_HBM0_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_S_1_WR] = mmDMA_IF_E_S_HBM1_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_S_1_RD] = mmDMA_IF_E_S_HBM1_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_N_SOB_WR] = mmDMA_IF_W_N_SOB_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_N_HBM0_WR] = mmDMA_IF_W_N_HBM0_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_N_HBM0_RD] = mmDMA_IF_W_N_HBM0_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_N_HBM1_WR] = mmDMA_IF_W_N_HBM1_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_W_N_HBM1_RD] = mmDMA_IF_W_N_HBM1_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_N_SOB_WR] = mmDMA_IF_E_N_SOB_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_N_HBM0_WR] = mmDMA_IF_E_N_HBM0_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_N_HBM0_RD] = mmDMA_IF_E_N_HBM0_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_N_HBM1_WR] = mmDMA_IF_E_N_HBM1_WR_BMON_BASE,
+ [GAUDI_BMON_DMA_IF_E_N_HBM1_RD] = mmDMA_IF_E_N_HBM1_RD_BMON_BASE,
+ [GAUDI_BMON_CPU_WR] = mmCPU_WR_BMON_BASE,
+ [GAUDI_BMON_CPU_RD] = mmCPU_RD_BMON_BASE,
+ [GAUDI_BMON_DMA_CH_0_0] = mmDMA_CH_0_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_0_1] = mmDMA_CH_0_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_1_0] = mmDMA_CH_1_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_1_1] = mmDMA_CH_1_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_2_0] = mmDMA_CH_2_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_2_1] = mmDMA_CH_2_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_3_0] = mmDMA_CH_3_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_3_1] = mmDMA_CH_3_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_4_0] = mmDMA_CH_4_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_4_1] = mmDMA_CH_4_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_5_0] = mmDMA_CH_5_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_5_1] = mmDMA_CH_5_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_6_0] = mmDMA_CH_6_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_6_1] = mmDMA_CH_6_BMON_1_BASE,
+ [GAUDI_BMON_DMA_CH_7_0] = mmDMA_CH_7_BMON_0_BASE,
+ [GAUDI_BMON_DMA_CH_7_1] = mmDMA_CH_7_BMON_1_BASE,
+ [GAUDI_BMON_PCIE_MSTR_WR] = mmPCIE_BMON_MSTR_WR_BASE,
+ [GAUDI_BMON_PCIE_MSTR_RD] = mmPCIE_BMON_MSTR_RD_BASE,
+ [GAUDI_BMON_PCIE_SLV_WR] = mmPCIE_BMON_SLV_WR_BASE,
+ [GAUDI_BMON_PCIE_SLV_RD] = mmPCIE_BMON_SLV_RD_BASE,
+ [GAUDI_BMON_MMU_0] = mmMMU_BMON_0_BASE,
+ [GAUDI_BMON_MMU_1] = mmMMU_BMON_1_BASE,
+ [GAUDI_BMON_NIC0_0] = mmBMON0_NIC0_DBG_BASE,
+ [GAUDI_BMON_NIC0_1] = mmBMON1_NIC0_DBG_BASE,
+ [GAUDI_BMON_NIC0_2] = mmBMON2_NIC0_DBG_BASE,
+ [GAUDI_BMON_NIC0_3] = mmBMON3_NIC0_DBG_BASE,
+ [GAUDI_BMON_NIC0_4] = mmBMON4_NIC0_DBG_BASE,
+ [GAUDI_BMON_NIC1_0] = mmBMON0_NIC1_DBG_BASE,
+ [GAUDI_BMON_NIC1_1] = mmBMON1_NIC1_DBG_BASE,
+ [GAUDI_BMON_NIC1_2] = mmBMON2_NIC1_DBG_BASE,
+ [GAUDI_BMON_NIC1_3] = mmBMON3_NIC1_DBG_BASE,
+ [GAUDI_BMON_NIC1_4] = mmBMON4_NIC1_DBG_BASE,
+ [GAUDI_BMON_NIC2_0] = mmBMON0_NIC2_DBG_BASE,
+ [GAUDI_BMON_NIC2_1] = mmBMON1_NIC2_DBG_BASE,
+ [GAUDI_BMON_NIC2_2] = mmBMON2_NIC2_DBG_BASE,
+ [GAUDI_BMON_NIC2_3] = mmBMON3_NIC2_DBG_BASE,
+ [GAUDI_BMON_NIC2_4] = mmBMON4_NIC2_DBG_BASE,
+ [GAUDI_BMON_NIC3_0] = mmBMON0_NIC3_DBG_BASE,
+ [GAUDI_BMON_NIC3_1] = mmBMON1_NIC3_DBG_BASE,
+ [GAUDI_BMON_NIC3_2] = mmBMON2_NIC3_DBG_BASE,
+ [GAUDI_BMON_NIC3_3] = mmBMON3_NIC3_DBG_BASE,
+ [GAUDI_BMON_NIC3_4] = mmBMON4_NIC3_DBG_BASE,
+ [GAUDI_BMON_NIC4_0] = mmBMON0_NIC4_DBG_BASE,
+ [GAUDI_BMON_NIC4_1] = mmBMON1_NIC4_DBG_BASE,
+ [GAUDI_BMON_NIC4_2] = mmBMON2_NIC4_DBG_BASE,
+ [GAUDI_BMON_NIC4_3] = mmBMON3_NIC4_DBG_BASE,
+ [GAUDI_BMON_NIC4_4] = mmBMON4_NIC4_DBG_BASE,
+ [GAUDI_BMON_TPC0_EML_0] = mmTPC0_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC0_EML_1] = mmTPC0_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC0_EML_2] = mmTPC0_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC0_EML_3] = mmTPC0_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC1_EML_0] = mmTPC1_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC1_EML_1] = mmTPC1_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC1_EML_2] = mmTPC1_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC1_EML_3] = mmTPC1_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC2_EML_0] = mmTPC2_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC2_EML_1] = mmTPC2_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC2_EML_2] = mmTPC2_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC2_EML_3] = mmTPC2_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC3_EML_0] = mmTPC3_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC3_EML_1] = mmTPC3_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC3_EML_2] = mmTPC3_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC3_EML_3] = mmTPC3_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC4_EML_0] = mmTPC4_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC4_EML_1] = mmTPC4_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC4_EML_2] = mmTPC4_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC4_EML_3] = mmTPC4_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC5_EML_0] = mmTPC5_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC5_EML_1] = mmTPC5_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC5_EML_2] = mmTPC5_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC5_EML_3] = mmTPC5_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC6_EML_0] = mmTPC6_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC6_EML_1] = mmTPC6_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC6_EML_2] = mmTPC6_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC6_EML_3] = mmTPC6_EML_BUSMON_3_BASE,
+ [GAUDI_BMON_TPC7_EML_0] = mmTPC7_EML_BUSMON_0_BASE,
+ [GAUDI_BMON_TPC7_EML_1] = mmTPC7_EML_BUSMON_1_BASE,
+ [GAUDI_BMON_TPC7_EML_2] = mmTPC7_EML_BUSMON_2_BASE,
+ [GAUDI_BMON_TPC7_EML_3] = mmTPC7_EML_BUSMON_3_BASE
+};
+
+static u64 debug_spmu_regs[GAUDI_SPMU_LAST + 1] = {
+ [GAUDI_SPMU_MME0_ACC] = mmMME0_ACC_SPMU_BASE,
+ [GAUDI_SPMU_MME0_SBAB] = mmMME0_SBAB_SPMU_BASE,
+ [GAUDI_SPMU_MME0_CTRL] = mmMME0_CTRL_SPMU_BASE,
+ [GAUDI_SPMU_MME1_ACC] = mmMME1_ACC_SPMU_BASE,
+ [GAUDI_SPMU_MME1_SBAB] = mmMME1_SBAB_SPMU_BASE,
+ [GAUDI_SPMU_MME1_CTRL] = mmMME1_CTRL_SPMU_BASE,
+ [GAUDI_SPMU_MME2_MME2_ACC] = mmMME2_ACC_SPMU_BASE,
+ [GAUDI_SPMU_MME2_SBAB] = mmMME2_SBAB_SPMU_BASE,
+ [GAUDI_SPMU_MME2_CTRL] = mmMME2_CTRL_SPMU_BASE,
+ [GAUDI_SPMU_MME3_ACC] = mmMME3_ACC_SPMU_BASE,
+ [GAUDI_SPMU_MME3_SBAB] = mmMME3_SBAB_SPMU_BASE,
+ [GAUDI_SPMU_MME3_CTRL] = mmMME3_CTRL_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_0_CS] = mmDMA_CH_0_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_1_CS] = mmDMA_CH_1_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_2_CS] = mmDMA_CH_2_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_3_CS] = mmDMA_CH_3_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_4_CS] = mmDMA_CH_4_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_5_CS] = mmDMA_CH_5_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_6_CS] = mmDMA_CH_6_CS_SPMU_BASE,
+ [GAUDI_SPMU_DMA_CH_7_CS] = mmDMA_CH_7_CS_SPMU_BASE,
+ [GAUDI_SPMU_PCIE] = mmPCIE_SPMU_BASE,
+ [GAUDI_SPMU_MMU_CS] = mmMMU_CS_SPMU_BASE,
+ [GAUDI_SPMU_NIC0_0] = mmSPMU_0_NIC0_DBG_BASE,
+ [GAUDI_SPMU_NIC0_1] = mmSPMU_1_NIC0_DBG_BASE,
+ [GAUDI_SPMU_NIC1_0] = mmSPMU_0_NIC1_DBG_BASE,
+ [GAUDI_SPMU_NIC1_1] = mmSPMU_1_NIC1_DBG_BASE,
+ [GAUDI_SPMU_NIC2_0] = mmSPMU_0_NIC2_DBG_BASE,
+ [GAUDI_SPMU_NIC2_1] = mmSPMU_1_NIC2_DBG_BASE,
+ [GAUDI_SPMU_NIC3_0] = mmSPMU_0_NIC3_DBG_BASE,
+ [GAUDI_SPMU_NIC3_1] = mmSPMU_1_NIC3_DBG_BASE,
+ [GAUDI_SPMU_NIC4_0] = mmSPMU_0_NIC4_DBG_BASE,
+ [GAUDI_SPMU_NIC4_1] = mmSPMU_1_NIC4_DBG_BASE,
+ [GAUDI_SPMU_TPC0_EML] = mmTPC0_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC1_EML] = mmTPC1_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC2_EML] = mmTPC2_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC3_EML] = mmTPC3_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC4_EML] = mmTPC4_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC5_EML] = mmTPC5_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC6_EML] = mmTPC6_EML_SPMU_BASE,
+ [GAUDI_SPMU_TPC7_EML] = mmTPC7_EML_SPMU_BASE
+};
+
+static int gaudi_coresight_timeout(struct hl_device *hdev, u64 addr,
+ int position, bool up)
+{
+ int rc;
+ u32 val;
+
+ rc = hl_poll_timeout(
+ hdev,
+ addr,
+ val,
+ up ? val & BIT(position) : !(val & BIT(position)),
+ 1000,
+ CORESIGHT_TIMEOUT_USEC);
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n",
+ addr, position, up);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int gaudi_config_stm(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_stm *input;
+ u64 base_reg;
+ int rc;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_stm_regs)) {
+ dev_err(hdev->dev, "Invalid register index in STM\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
+
+ WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + 0xE80, 0x80004);
+ WREG32(base_reg + 0xD64, 7);
+ WREG32(base_reg + 0xD60, 0);
+ WREG32(base_reg + 0xD00, lower_32_bits(input->he_mask));
+ WREG32(base_reg + 0xD60, 1);
+ WREG32(base_reg + 0xD00, upper_32_bits(input->he_mask));
+ WREG32(base_reg + 0xE70, 0x10);
+ WREG32(base_reg + 0xE60, 0);
+ WREG32(base_reg + 0xE00, lower_32_bits(input->sp_mask));
+ WREG32(base_reg + 0xEF4, input->id);
+ WREG32(base_reg + 0xDF4, 0x80);
+ WREG32(base_reg + 0xE8C, input->frequency);
+ WREG32(base_reg + 0xE90, 0x7FF);
+
+ /* SW-2176 - SW WA for HW bug */
+ if ((CFG_BASE + base_reg) >= mmDMA_CH_0_CS_STM_BASE &&
+ (CFG_BASE + base_reg) <= mmDMA_CH_7_CS_STM_BASE) {
+
+ WREG32(base_reg + 0xE68, 0xffff8005);
+ WREG32(base_reg + 0xE6C, 0x0);
+ }
+
+ WREG32(base_reg + 0xE80, 0x27 | (input->id << 16));
+ } else {
+ WREG32(base_reg + 0xE80, 4);
+ WREG32(base_reg + 0xD64, 0);
+ WREG32(base_reg + 0xD60, 1);
+ WREG32(base_reg + 0xD00, 0);
+ WREG32(base_reg + 0xD20, 0);
+ WREG32(base_reg + 0xD60, 0);
+ WREG32(base_reg + 0xE20, 0);
+ WREG32(base_reg + 0xE00, 0);
+ WREG32(base_reg + 0xDF4, 0x80);
+ WREG32(base_reg + 0xE70, 0);
+ WREG32(base_reg + 0xE60, 0);
+ WREG32(base_reg + 0xE64, 0);
+ WREG32(base_reg + 0xE8C, 0);
+
+ rc = gaudi_coresight_timeout(hdev, base_reg + 0xE80, 23, false);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to disable STM on timeout, error %d\n",
+ rc);
+ return rc;
+ }
+
+ WREG32(base_reg + 0xE80, 4);
+ }
+
+ return 0;
+}
+
+static int gaudi_config_etf(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_etf *input;
+ u64 base_reg;
+ u32 val;
+ int rc;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_etf_regs)) {
+ dev_err(hdev->dev, "Invalid register index in ETF\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
+
+ WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+ val = RREG32(base_reg + 0x304);
+ val |= 0x1000;
+ WREG32(base_reg + 0x304, val);
+ val |= 0x40;
+ WREG32(base_reg + 0x304, val);
+
+ rc = gaudi_coresight_timeout(hdev, base_reg + 0x304, 6, false);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to %s ETF on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ rc = gaudi_coresight_timeout(hdev, base_reg + 0xC, 2, true);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to %s ETF on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ WREG32(base_reg + 0x20, 0);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + 0x34, 0x3FFC);
+ WREG32(base_reg + 0x28, input->sink_mode);
+ WREG32(base_reg + 0x304, 0x4001);
+ WREG32(base_reg + 0x308, 0xA);
+ WREG32(base_reg + 0x20, 1);
+ } else {
+ WREG32(base_reg + 0x34, 0);
+ WREG32(base_reg + 0x28, 0);
+ WREG32(base_reg + 0x304, 0);
+ }
+
+ return 0;
+}
+
+static bool gaudi_etr_validate_address(struct hl_device *hdev, u64 addr,
+ u32 size, bool *is_host)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ /* maximum address length is 50 bits */
+ if (addr >> 50) {
+ dev_err(hdev->dev,
+ "ETR buffer address shouldn't exceed 50 bits\n");
+ return false;
+ }
+
+ /* PMMU and HPMMU addresses are equal, check only one of them */
+ if ((gaudi->hw_cap_initialized & HW_CAP_MMU) &&
+ hl_mem_area_inside_range(addr, size,
+ prop->pmmu.start_addr,
+ prop->pmmu.end_addr)) {
+ *is_host = true;
+ return true;
+ }
+
+ if (hl_mem_area_inside_range(addr, size,
+ prop->dram_user_base_address,
+ prop->dram_end_address))
+ return true;
+
+ if (hl_mem_area_inside_range(addr, size,
+ prop->sram_user_base_address,
+ prop->sram_end_address))
+ return true;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
+ dev_err(hdev->dev, "ETR buffer should be in SRAM/DRAM\n");
+
+ return false;
+}
+
+static int gaudi_config_etr(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_etr *input;
+ u64 msb;
+ u32 val;
+ int rc;
+
+ WREG32(mmPSOC_ETR_LAR, CORESIGHT_UNLOCK);
+
+ val = RREG32(mmPSOC_ETR_FFCR);
+ val |= 0x1000;
+ WREG32(mmPSOC_ETR_FFCR, val);
+ val |= 0x40;
+ WREG32(mmPSOC_ETR_FFCR, val);
+
+ rc = gaudi_coresight_timeout(hdev, mmPSOC_ETR_FFCR, 6, false);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ rc = gaudi_coresight_timeout(hdev, mmPSOC_ETR_STS, 2, true);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
+ params->enable ? "enable" : "disable", rc);
+ return rc;
+ }
+
+ WREG32(mmPSOC_ETR_CTL, 0);
+
+ if (params->enable) {
+ bool is_host = false;
+
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ if (input->buffer_size == 0) {
+ dev_err(hdev->dev,
+ "ETR buffer size should be bigger than 0\n");
+ return -EINVAL;
+ }
+
+ if (!gaudi_etr_validate_address(hdev,
+ input->buffer_address, input->buffer_size,
+ &is_host)) {
+ dev_err(hdev->dev, "ETR buffer address is invalid\n");
+ return -EINVAL;
+ }
+
+ msb = upper_32_bits(input->buffer_address) >> 8;
+ msb &= PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK;
+ WREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR, msb);
+
+ WREG32(mmPSOC_ETR_BUFWM, 0x3FFC);
+ WREG32(mmPSOC_ETR_RSZ, input->buffer_size);
+ WREG32(mmPSOC_ETR_MODE, input->sink_mode);
+ /* Workaround for H3 #HW-2075 bug: use small data chunks */
+ WREG32(mmPSOC_ETR_AXICTL, (is_host ? 0 : 0x700) |
+ PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT);
+ WREG32(mmPSOC_ETR_DBALO,
+ lower_32_bits(input->buffer_address));
+ WREG32(mmPSOC_ETR_DBAHI,
+ upper_32_bits(input->buffer_address));
+ WREG32(mmPSOC_ETR_FFCR, 3);
+ WREG32(mmPSOC_ETR_PSCR, 0xA);
+ WREG32(mmPSOC_ETR_CTL, 1);
+ } else {
+ WREG32(mmPSOC_ETR_BUFWM, 0);
+ WREG32(mmPSOC_ETR_RSZ, 0x400);
+ WREG32(mmPSOC_ETR_DBALO, 0);
+ WREG32(mmPSOC_ETR_DBAHI, 0);
+ WREG32(mmPSOC_ETR_PSCR, 0);
+ WREG32(mmPSOC_ETR_MODE, 0);
+ WREG32(mmPSOC_ETR_FFCR, 0);
+
+ if (params->output_size >= sizeof(u64)) {
+ u32 rwp, rwphi;
+
+ /*
+ * The trace buffer address is 50 bits wide. The end of
+ * the buffer is set in the RWP register (lower 32
+ * bits), and in the RWPHI register (upper 8 bits).
+ * The 10 msb of the 50-bit address are stored in a
+ * global configuration register.
+ */
+ rwp = RREG32(mmPSOC_ETR_RWP);
+ rwphi = RREG32(mmPSOC_ETR_RWPHI) & 0xff;
+ msb = RREG32(mmPSOC_GLOBAL_CONF_TRACE_ADDR) &
+ PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK;
+ *(u64 *) params->output = ((u64) msb << 40) |
+ ((u64) rwphi << 32) | rwp;
+ }
+ }
+
+ return 0;
+}
+
+static int gaudi_config_funnel(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ u64 base_reg;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_funnel_regs)) {
+ dev_err(hdev->dev, "Invalid register index in FUNNEL\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_funnel_regs[params->reg_idx] - CFG_BASE;
+
+ WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
+
+ WREG32(base_reg, params->enable ? 0x33F : 0);
+
+ return 0;
+}
+
+static int gaudi_config_bmon(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ struct hl_debug_params_bmon *input;
+ u64 base_reg;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_bmon_regs)) {
+ dev_err(hdev->dev, "Invalid register index in BMON\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
+
+ WREG32(base_reg + 0x104, 1);
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ WREG32(base_reg + 0x200, lower_32_bits(input->start_addr0));
+ WREG32(base_reg + 0x204, upper_32_bits(input->start_addr0));
+ WREG32(base_reg + 0x208, lower_32_bits(input->addr_mask0));
+ WREG32(base_reg + 0x20C, upper_32_bits(input->addr_mask0));
+ WREG32(base_reg + 0x240, lower_32_bits(input->start_addr1));
+ WREG32(base_reg + 0x244, upper_32_bits(input->start_addr1));
+ WREG32(base_reg + 0x248, lower_32_bits(input->addr_mask1));
+ WREG32(base_reg + 0x24C, upper_32_bits(input->addr_mask1));
+ WREG32(base_reg + 0x224, 0);
+ WREG32(base_reg + 0x234, 0);
+ WREG32(base_reg + 0x30C, input->bw_win);
+ WREG32(base_reg + 0x308, input->win_capture);
+ WREG32(base_reg + 0x700, 0xA000B00 | (input->id << 12));
+ WREG32(base_reg + 0x708, 0xA000A00 | (input->id << 12));
+ WREG32(base_reg + 0x70C, 0xA000C00 | (input->id << 12));
+ WREG32(base_reg + 0x100, 0x11);
+ WREG32(base_reg + 0x304, 0x1);
+ } else {
+ WREG32(base_reg + 0x200, 0);
+ WREG32(base_reg + 0x204, 0);
+ WREG32(base_reg + 0x208, 0xFFFFFFFF);
+ WREG32(base_reg + 0x20C, 0xFFFFFFFF);
+ WREG32(base_reg + 0x240, 0);
+ WREG32(base_reg + 0x244, 0);
+ WREG32(base_reg + 0x248, 0xFFFFFFFF);
+ WREG32(base_reg + 0x24C, 0xFFFFFFFF);
+ WREG32(base_reg + 0x224, 0xFFFFFFFF);
+ WREG32(base_reg + 0x234, 0x1070F);
+ WREG32(base_reg + 0x30C, 0);
+ WREG32(base_reg + 0x308, 0xFFFF);
+ WREG32(base_reg + 0x700, 0xA000B00);
+ WREG32(base_reg + 0x708, 0xA000A00);
+ WREG32(base_reg + 0x70C, 0xA000C00);
+ WREG32(base_reg + 0x100, 1);
+ WREG32(base_reg + 0x304, 0);
+ WREG32(base_reg + 0x104, 0);
+ }
+
+ return 0;
+}
+
+static int gaudi_config_spmu(struct hl_device *hdev,
+ struct hl_debug_params *params)
+{
+ u64 base_reg;
+ struct hl_debug_params_spmu *input = params->input;
+ u64 *output;
+ u32 output_arr_len;
+ u32 events_num;
+ u32 overflow_idx;
+ u32 cycle_cnt_idx;
+ int i;
+
+ if (params->reg_idx >= ARRAY_SIZE(debug_spmu_regs)) {
+ dev_err(hdev->dev, "Invalid register index in SPMU\n");
+ return -EINVAL;
+ }
+
+ base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
+
+ if (params->enable) {
+ input = params->input;
+
+ if (!input)
+ return -EINVAL;
+
+ if (input->event_types_num < 3) {
+ dev_err(hdev->dev,
+ "not enough event types values for SPMU enable\n");
+ return -EINVAL;
+ }
+
+ if (input->event_types_num > SPMU_MAX_COUNTERS) {
+ dev_err(hdev->dev,
+ "too many event types values for SPMU enable\n");
+ return -EINVAL;
+ }
+
+ WREG32(base_reg + 0xE04, 0x41013046);
+ WREG32(base_reg + 0xE04, 0x41013040);
+
+ for (i = 0 ; i < input->event_types_num ; i++)
+ WREG32(base_reg + SPMU_EVENT_TYPES_OFFSET + i * 4,
+ input->event_types[i]);
+
+ WREG32(base_reg + 0xE04, 0x41013041);
+ WREG32(base_reg + 0xC00, 0x8000003F);
+ } else {
+ output = params->output;
+ output_arr_len = params->output_size / 8;
+ events_num = output_arr_len - 2;
+ overflow_idx = output_arr_len - 2;
+ cycle_cnt_idx = output_arr_len - 1;
+
+ if (!output)
+ return -EINVAL;
+
+ if (output_arr_len < 3) {
+ dev_err(hdev->dev,
+ "not enough values for SPMU disable\n");
+ return -EINVAL;
+ }
+
+ if (events_num > SPMU_MAX_COUNTERS) {
+ dev_err(hdev->dev,
+ "too many events values for SPMU disable\n");
+ return -EINVAL;
+ }
+
+ WREG32(base_reg + 0xE04, 0x41013040);
+
+ for (i = 0 ; i < events_num ; i++)
+ output[i] = RREG32(base_reg + i * 8);
+
+ output[overflow_idx] = RREG32(base_reg + 0xCC0);
+
+ output[cycle_cnt_idx] = RREG32(base_reg + 0xFC);
+ output[cycle_cnt_idx] <<= 32;
+ output[cycle_cnt_idx] |= RREG32(base_reg + 0xF8);
+
+ WREG32(base_reg + 0xCC0, 0);
+ }
+
+ return 0;
+}
+
+int gaudi_debug_coresight(struct hl_device *hdev, void *data)
+{
+ struct hl_debug_params *params = data;
+ int rc = 0;
+
+ switch (params->op) {
+ case HL_DEBUG_OP_STM:
+ rc = gaudi_config_stm(hdev, params);
+ break;
+ case HL_DEBUG_OP_ETF:
+ rc = gaudi_config_etf(hdev, params);
+ break;
+ case HL_DEBUG_OP_ETR:
+ rc = gaudi_config_etr(hdev, params);
+ break;
+ case HL_DEBUG_OP_FUNNEL:
+ rc = gaudi_config_funnel(hdev, params);
+ break;
+ case HL_DEBUG_OP_BMON:
+ rc = gaudi_config_bmon(hdev, params);
+ break;
+ case HL_DEBUG_OP_SPMU:
+ rc = gaudi_config_spmu(hdev, params);
+ break;
+ case HL_DEBUG_OP_TIMESTAMP:
+ /* Do nothing as this opcode is deprecated */
+ break;
+
+ default:
+ dev_err(hdev->dev, "Unknown coresight id %d\n", params->op);
+ return -EINVAL;
+ }
+
+ /* Perform read from the device to flush all configuration */
+ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+
+ return rc;
+}
+
+void gaudi_halt_coresight(struct hl_device *hdev)
+{
+ struct hl_debug_params params = {};
+ int i, rc;
+
+ for (i = GAUDI_ETF_FIRST ; i <= GAUDI_ETF_LAST ; i++) {
+ params.reg_idx = i;
+ rc = gaudi_config_etf(hdev, &params);
+ if (rc)
+ dev_err(hdev->dev, "halt ETF failed, %d/%d\n", rc, i);
+ }
+
+ rc = gaudi_config_etr(hdev, &params);
+ if (rc)
+ dev_err(hdev->dev, "halt ETR failed, %d\n", rc);
+}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
new file mode 100644
index 000000000000..6dd2c2a1cd70
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "gaudiP.h"
+#include "include/gaudi/gaudi_fw_if.h"
+
+void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (freq == PLL_LAST)
+ hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+}
+
+int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
+{
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, false);
+
+ if (value < 0) {
+ dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
+ value);
+ return value;
+ }
+
+ *max_clk = (value / 1000 / 1000);
+
+ value = hl_get_frequency(hdev, MME_PLL, true);
+
+ if (value < 0) {
+ dev_err(hdev->dev,
+ "Failed to retrieve device current clock %ld\n",
+ value);
+ return value;
+ }
+
+ *cur_clk = (value / 1000 / 1000);
+
+ return 0;
+}
+
+static ssize_t clk_max_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, false);
+
+ gaudi->max_freq_value = value;
+
+ return sprintf(buf, "%lu\n", (value / 1000 / 1000));
+}
+
+static ssize_t clk_max_freq_mhz_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ struct gaudi_device *gaudi = hdev->asic_specific;
+ int rc;
+ u64 value;
+
+ if (hl_device_disabled_or_in_reset(hdev)) {
+ count = -ENODEV;
+ goto fail;
+ }
+
+ rc = kstrtoull(buf, 0, &value);
+ if (rc) {
+ count = -EINVAL;
+ goto fail;
+ }
+
+ gaudi->max_freq_value = value * 1000 * 1000;
+
+ hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+
+fail:
+ return count;
+}
+
+static ssize_t clk_cur_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hl_device *hdev = dev_get_drvdata(dev);
+ long value;
+
+ if (hl_device_disabled_or_in_reset(hdev))
+ return -ENODEV;
+
+ value = hl_get_frequency(hdev, MME_PLL, true);
+
+ return sprintf(buf, "%lu\n", (value / 1000 / 1000));
+}
+
+static DEVICE_ATTR_RW(clk_max_freq_mhz);
+static DEVICE_ATTR_RO(clk_cur_freq_mhz);
+
+static struct attribute *gaudi_dev_attrs[] = {
+ &dev_attr_clk_max_freq_mhz.attr,
+ &dev_attr_clk_cur_freq_mhz.attr,
+ NULL,
+};
+
+void gaudi_add_device_attr(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp)
+{
+ dev_attr_grp->attrs = gaudi_dev_attrs;
+}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
new file mode 100644
index 000000000000..6a351e31fa6a
--- /dev/null
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -0,0 +1,9090 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "gaudiP.h"
+#include "include/gaudi/asic_reg/gaudi_regs.h"
+
+#define GAUDI_NUMBER_OF_RR_REGS 24
+#define GAUDI_NUMBER_OF_LBW_RANGES 12
+
+static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_HIT_WPROT,
+ mmDMA_IF_W_S_DMA1_HIT_WPROT,
+ mmDMA_IF_E_S_DMA0_HIT_WPROT,
+ mmDMA_IF_E_S_DMA1_HIT_WPROT,
+ mmDMA_IF_W_N_DMA0_HIT_WPROT,
+ mmDMA_IF_W_N_DMA1_HIT_WPROT,
+ mmDMA_IF_E_N_DMA0_HIT_WPROT,
+ mmDMA_IF_E_N_DMA1_HIT_WPROT,
+ mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AW,
+ mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AW,
+ mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW,
+};
+
+static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_HIT_RPROT,
+ mmDMA_IF_W_S_DMA1_HIT_RPROT,
+ mmDMA_IF_E_S_DMA0_HIT_RPROT,
+ mmDMA_IF_E_S_DMA1_HIT_RPROT,
+ mmDMA_IF_W_N_DMA0_HIT_RPROT,
+ mmDMA_IF_W_N_DMA1_HIT_RPROT,
+ mmDMA_IF_E_N_DMA0_HIT_RPROT,
+ mmDMA_IF_E_N_DMA1_HIT_RPROT,
+ mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AR,
+ mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AR,
+ mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR,
+};
+
+static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_MIN_WPROT_0,
+ mmDMA_IF_W_S_DMA1_MIN_WPROT_0,
+ mmDMA_IF_E_S_DMA0_MIN_WPROT_0,
+ mmDMA_IF_E_S_DMA1_MIN_WPROT_0,
+ mmDMA_IF_W_N_DMA0_MIN_WPROT_0,
+ mmDMA_IF_W_N_DMA1_MIN_WPROT_0,
+ mmDMA_IF_E_N_DMA0_MIN_WPROT_0,
+ mmDMA_IF_E_N_DMA1_MIN_WPROT_0,
+ mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0,
+ mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0,
+ mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0,
+};
+
+static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_MAX_WPROT_0,
+ mmDMA_IF_W_S_DMA1_MAX_WPROT_0,
+ mmDMA_IF_E_S_DMA0_MAX_WPROT_0,
+ mmDMA_IF_E_S_DMA1_MAX_WPROT_0,
+ mmDMA_IF_W_N_DMA0_MAX_WPROT_0,
+ mmDMA_IF_W_N_DMA1_MAX_WPROT_0,
+ mmDMA_IF_E_N_DMA0_MAX_WPROT_0,
+ mmDMA_IF_E_N_DMA1_MAX_WPROT_0,
+ mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0,
+ mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0,
+ mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0,
+};
+
+static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_MIN_RPROT_0,
+ mmDMA_IF_W_S_DMA1_MIN_RPROT_0,
+ mmDMA_IF_E_S_DMA0_MIN_RPROT_0,
+ mmDMA_IF_E_S_DMA1_MIN_RPROT_0,
+ mmDMA_IF_W_N_DMA0_MIN_RPROT_0,
+ mmDMA_IF_W_N_DMA1_MIN_RPROT_0,
+ mmDMA_IF_E_N_DMA0_MIN_RPROT_0,
+ mmDMA_IF_E_N_DMA1_MIN_RPROT_0,
+ mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0,
+ mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0,
+ mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0,
+};
+
+static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DMA0_MAX_RPROT_0,
+ mmDMA_IF_W_S_DMA1_MAX_RPROT_0,
+ mmDMA_IF_E_S_DMA0_MAX_RPROT_0,
+ mmDMA_IF_E_S_DMA1_MAX_RPROT_0,
+ mmDMA_IF_W_N_DMA0_MAX_RPROT_0,
+ mmDMA_IF_W_N_DMA1_MAX_RPROT_0,
+ mmDMA_IF_E_N_DMA0_MAX_RPROT_0,
+ mmDMA_IF_E_N_DMA1_MAX_RPROT_0,
+ mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0,
+ mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0,
+ mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0,
+};
+
+static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AW,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AW,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AW,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AW,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AW,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AW,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW
+};
+
+static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AR,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AR,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AR,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AR,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AR,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AR,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR
+};
+
+static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0
+};
+
+static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0
+};
+
+static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0
+};
+
+static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0
+};
+
+static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0
+};
+
+static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0
+};
+
+static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0
+};
+
+static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+ mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0,
+ mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0,
+ mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0
+};
+
+/**
+ * gaudi_set_block_as_protected - set the given block as protected
+ *
+ * @hdev: pointer to hl_device structure
+ * @block: block base address
+ *
+ */
+static void gaudi_pb_set_block(struct hl_device *hdev, u64 base)
+{
+ u32 pb_addr = base - CFG_BASE + PROT_BITS_OFFS;
+
+ while (pb_addr & 0xFFF) {
+ WREG32(pb_addr, 0);
+ pb_addr += 4;
+ }
+}
+
+static void gaudi_init_mme_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ gaudi_pb_set_block(hdev, mmMME0_ACC_BASE);
+ gaudi_pb_set_block(hdev, mmMME0_SBAB_BASE);
+ gaudi_pb_set_block(hdev, mmMME0_PRTN_BASE);
+ gaudi_pb_set_block(hdev, mmMME1_ACC_BASE);
+ gaudi_pb_set_block(hdev, mmMME1_SBAB_BASE);
+ gaudi_pb_set_block(hdev, mmMME1_PRTN_BASE);
+ gaudi_pb_set_block(hdev, mmMME2_ACC_BASE);
+ gaudi_pb_set_block(hdev, mmMME2_SBAB_BASE);
+ gaudi_pb_set_block(hdev, mmMME2_PRTN_BASE);
+ gaudi_pb_set_block(hdev, mmMME3_ACC_BASE);
+ gaudi_pb_set_block(hdev, mmMME3_SBAB_BASE);
+ gaudi_pb_set_block(hdev, mmMME3_PRTN_BASE);
+
+ WREG32(mmMME0_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmMME1_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmMME2_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmMME3_CTRL_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ WREG32(mmMME0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmMME2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmMME0_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME0_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME1_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME1_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME1_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME1_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME1_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME1_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME1_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ /* MME 1 is slave, hence its whole QM block is protected (with RR) */
+
+ pb_addr = (mmMME2_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME2_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 &
+ PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmMME2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME3_CTRL_RESET & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME3_CTRL_RESET & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmMME3_CTRL_RESET & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_QM_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_SYNC_OBJECT_FIFO_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_EUS_ROLLUP_CNT_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_LOG_SHADOW & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_DESC0 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_TOKEN_UPDATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_TH & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_MIN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_CTRL_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_HISTORY_LOG_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_A_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_B_BF16 & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_A_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_ODD & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_DUMMY_B_FP32_EVEN & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_EU_POWER_SAVE_DISABLE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_CS_DBG_BLOCK_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_CS_DBG_STATUS_DROP_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_TE_CLOSE_CGATE & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_AGU_SM_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_AGU_SM_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_EZSYNC_OUT_CREDIT & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_PCU_RL_SAT_SEC & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_AGU_SYNC_MSG_AXI_USER & 0x7F) >> 2);
+ mask |= 1 << ((mmMME3_CTRL_QM_SLV_LBW_CLK_EN & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmMME3_CTRL_SHADOW_0_STATUS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmMME3_CTRL_SHADOW_0_STATUS & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmMME3_CTRL_SHADOW_0_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ /* MME 3 is slave, hence its whole QM block is protected (with RR) */
+}
+
+static void gaudi_init_dma_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_S_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH0_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH1_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_E_PLL_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_BASE);
+
+ gaudi_pb_set_block(hdev, mmDMA_IF_W_N_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH0_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH1_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_BASE);
+
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_N_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH0_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH1_BASE);
+ gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_BASE);
+
+ WREG32(mmDMA0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA1_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA3_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA4_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA5_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA6_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA7_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ WREG32(mmDMA0_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA1_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA2_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA3_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA4_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA5_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA6_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmDMA7_CORE_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmDMA0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset =
+ ((mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA0_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA0_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA0_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA0_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA0_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA1_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA1_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA1_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA1_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA1_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA2_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA2_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA2_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA2_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA2_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA3_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA3_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA3_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA3_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA3_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA4_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA4_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA4_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA4_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA4_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA5_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA5_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA5_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA5_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA5_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA6_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA6_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA6_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA6_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA6_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_CORE_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_CORE_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_CORE_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_LBW_MAX_OUTSTAND & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_CORE_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_CORE_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_CORE_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_SECURE_PROPS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_NON_SECURE_PROPS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_CORE_RD_MAX_OUTSTAND & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_CORE_RD_MAX_OUTSTAND & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmDMA7_CORE_RD_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_ARCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_ARUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_MAX_OUTSTAND & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_MAX_AWID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_AWCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_INFLIGHTS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_ERRMSG_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_ERRMSG_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_ERRMSG_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_CORE_STS0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_CORE_STS0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_CORE_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_STS1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmDMA7_CORE_RD_DBGMEM_ADD & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmDMA7_CORE_RD_DBGMEM_ADD & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmDMA7_CORE_RD_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_RD_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_HBW_AXI_AR_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_HBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_LBW_AXI_AW_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_DESC_CNT & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_RD_DESC_ID & 0x7F) >> 2);
+ mask |= 1 << ((mmDMA7_CORE_DBG_WR_DESC_ID & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+}
+
+static void gaudi_init_tpc_protection_bits(struct hl_device *hdev)
+{
+ u32 pb_addr, mask;
+ u8 word_offset;
+
+ gaudi_pb_set_block(hdev, mmTPC0_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC1_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC2_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC3_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC4_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC5_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC6_E2E_CRED_BASE);
+ gaudi_pb_set_block(hdev, mmTPC7_E2E_CRED_BASE);
+
+ WREG32(mmTPC0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC0_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+
+ word_offset = ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+
+ mask = 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+
+ word_offset = ((mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC0_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC0_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC0_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC0_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC1_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC1_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+
+ word_offset = ((mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC1_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC1_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC1_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC2_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC2_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC2_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC2_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC2_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC3_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC3_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC3_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC3_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC3_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC4_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC4_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC4_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC4_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC4_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC5_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC5_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC5_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC5_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC5_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC6_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC6_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+
+ word_offset = ((mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+
+ mask = 1 << ((mmTPC6_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC6_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC6_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC6_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ WREG32(mmTPC7_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+ WREG32(mmTPC7_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0);
+
+ pb_addr = (mmTPC7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_STS1_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_MSG_EN_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_PQ_BASE_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_SIZE_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_SIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_SIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_SIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_PI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_CFG1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS0_3 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_PQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_PQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS0_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS1_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS1_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS1_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_STS1_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_0 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CQ_CTL_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CQ_CTL_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CQ_IFIFO_CNT_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) +
+ PROT_BITS_OFFS;
+
+ word_offset = ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & PROT_BITS_OFFS)
+ >> 7) << 2;
+
+ mask = 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CP_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_LO_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_CURRENT_INST_HI_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_2 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CP_BARRIER_CFG_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_BARRIER_CFG_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_DBG_0_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_DBG_0_1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_CP_DBG_0_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_DBG_0_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_DBG_0_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_ARUSER_31_11_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CP_AWUSER_31_11_4 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_ARB_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) +
+ PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 & PROT_BITS_OFFS)
+ >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_ARB_MST_QUIET_PER & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_SLV_CHOISE_WDT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_ARB_STATE_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MSG_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_ERR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_ERR_MSG_EN & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_ERR_STS_DRP & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_9 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_10 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_11 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_12 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_13 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_14 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_15 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_16 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_17 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_18 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_19 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_20 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_21 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_22 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_23 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_24 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_25 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_26 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_27 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_28 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_29 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_30 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_ARB_MST_CRED_STS_31 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CGM_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CGM_STS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CGM_CFG1 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_LOCAL_RANGE_BASE & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_QM_LOCAL_RANGE_BASE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_LOCAL_RANGE_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_AXCACHE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_IND_GW_APB_CFG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_IND_GW_APB_WDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_IND_GW_APB_RDATA & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_IND_GW_APB_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_QM_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_QM_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_QM_GLBL_MEM_INIT_BUSY & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CFG_ROUND_CSR & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_ROUND_CSR & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CFG_ROUND_CSR & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CFG_PROT & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_PROT & PROT_BITS_OFFS) >> 7) << 2;
+ mask = 1 << ((mmTPC7_CFG_PROT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_STATUS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_RD_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_WR_RATE_LIMIT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_WQ_CREDITS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_ARUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_ARUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_AWUSER_LO & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_AWUSER_HI & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_OPCODE_EXEC & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
+ pb_addr = (mmTPC7_CFG_TSB_CFG_MAX_SIZE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_TSB_CFG_MAX_SIZE & PROT_BITS_OFFS) >> 7)
+ << 2;
+ mask = 1 << ((mmTPC7_CFG_TSB_CFG_MAX_SIZE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_DBGMEM_ADD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_DBGMEM_DATA_WR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_DBGMEM_DATA_RD & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_DBGMEM_CTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_DBGMEM_RC & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TSB_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_WQ_INFLIGHT_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_WQ_LBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_WQ_HBW_TOTAL_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_IRQ_OCCOUPY_CNTR & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+}
+
+/**
+ * gaudi_init_protection_bits - Initialize protection bits of specific registers
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * All protection bits are 1 by default, means not protected. Need to set to 0
+ * each bit that belongs to a protected register.
+ *
+ */
+static void gaudi_init_protection_bits(struct hl_device *hdev)
+{
+ /*
+ * In each 4K block of registers, the last 128 bytes are protection
+ * bits - total of 1024 bits, one for each register. Each bit is related
+ * to a specific register, by the order of the registers.
+ * So in order to calculate the bit that is related to a given register,
+ * we need to calculate its word offset and then the exact bit inside
+ * the word (which is 4 bytes).
+ *
+ * Register address:
+ *
+ * 31 12 11 7 6 2 1 0
+ * -----------------------------------------------------------------
+ * | Don't | word | bit location | 0 |
+ * | care | offset | inside word | |
+ * -----------------------------------------------------------------
+ *
+ * Bits 7-11 represents the word offset inside the 128 bytes.
+ * Bits 2-6 represents the bit location inside the word.
+ *
+ * When a bit is cleared, it means the register it represents can only
+ * be accessed by a secured entity. When the bit is set, any entity can
+ * access the register.
+ *
+ * The last 4 bytes in the block of the PBs control the security of
+ * the PBs themselves, so they always need to be configured to be
+ * secured
+ */
+
+ gaudi_pb_set_block(hdev, mmIF_E_PLL_BASE);
+ gaudi_pb_set_block(hdev, mmMESH_W_PLL_BASE);
+ gaudi_pb_set_block(hdev, mmSRAM_W_PLL_BASE);
+ gaudi_pb_set_block(hdev, mmMESH_E_PLL_BASE);
+ gaudi_pb_set_block(hdev, mmSRAM_E_PLL_BASE);
+
+ gaudi_init_dma_protection_bits(hdev);
+
+ gaudi_init_mme_protection_bits(hdev);
+
+ gaudi_init_tpc_protection_bits(hdev);
+}
+
+static void gaudi_init_range_registers_lbw(struct hl_device *hdev)
+{
+ u32 lbw_rng_start[GAUDI_NUMBER_OF_LBW_RANGES];
+ u32 lbw_rng_end[GAUDI_NUMBER_OF_LBW_RANGES];
+ int i, j;
+
+ lbw_rng_start[0] = (0xFBFE0000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[0] = (0xFBFFF000 & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[1] = (0xFC0E8000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[1] = (0xFC120000 & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[2] = (0xFC1E8000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[2] = (0xFC48FFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[3] = (0xFC600000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[3] = (0xFCC48FFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[4] = (0xFCC4A000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[4] = (0xFCCDFFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[5] = (0xFCCE4000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[5] = (0xFCD1FFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[6] = (0xFCD24000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[6] = (0xFCD5FFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[7] = (0xFCD64000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[7] = (0xFCD9FFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[8] = (0xFCDA4000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[8] = (0xFCDDFFFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[9] = (0xFCDE4000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[9] = (0xFCE05FFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[10] = (0xFEC43000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[10] = (0xFEC43FFF & 0x3FFFFFF) + 1;
+
+ lbw_rng_start[11] = (0xFE484000 & 0x3FFFFFF) - 1;
+ lbw_rng_end[11] = (0xFE484FFF & 0x3FFFFFF) + 1;
+
+ for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ WREG32(gaudi_rr_lbw_hit_aw_regs[i],
+ (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
+ WREG32(gaudi_rr_lbw_hit_ar_regs[i],
+ (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
+ }
+
+ for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++)
+ for (j = 0 ; j < GAUDI_NUMBER_OF_LBW_RANGES ; j++) {
+ WREG32(gaudi_rr_lbw_min_aw_regs[i] + (j << 2),
+ lbw_rng_start[j]);
+
+ WREG32(gaudi_rr_lbw_min_ar_regs[i] + (j << 2),
+ lbw_rng_start[j]);
+
+ WREG32(gaudi_rr_lbw_max_aw_regs[i] + (j << 2),
+ lbw_rng_end[j]);
+
+ WREG32(gaudi_rr_lbw_max_ar_regs[i] + (j << 2),
+ lbw_rng_end[j]);
+ }
+}
+
+static void gaudi_init_range_registers_hbw(struct hl_device *hdev)
+{
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ u32 dram_addr_lo = lower_32_bits(DRAM_PHYS_BASE);
+ u32 dram_addr_hi = upper_32_bits(DRAM_PHYS_BASE);
+
+ u32 sram_addr_lo = lower_32_bits(SRAM_BASE_ADDR);
+ u32 sram_addr_hi = upper_32_bits(SRAM_BASE_ADDR);
+
+ u32 scratch_addr_lo = lower_32_bits(PSOC_SCRATCHPAD_ADDR);
+ u32 scratch_addr_hi = upper_32_bits(PSOC_SCRATCHPAD_ADDR);
+
+ u32 pcie_fw_addr_lo = lower_32_bits(PCIE_FW_SRAM_ADDR);
+ u32 pcie_fw_addr_hi = upper_32_bits(PCIE_FW_SRAM_ADDR);
+
+ u32 spi_addr_lo = lower_32_bits(SPI_FLASH_BASE_ADDR);
+ u32 spi_addr_hi = upper_32_bits(SPI_FLASH_BASE_ADDR);
+
+ int i;
+
+ /* Configure HBW RR:
+ * 1st range is the DRAM (first 512MB)
+ * 2nd range is the 1st 128 bytes in SRAM (for tensor DMA). This area
+ * is defined as read-only for user
+ * 3rd range is the PSOC scratch-pad
+ * 4th range is the PCIe F/W SRAM area
+ * 5th range is the SPI FLASH area
+ * 6th range is the host
+ */
+
+ for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ WREG32(gaudi_rr_hbw_hit_aw_regs[i], 0x1F);
+ WREG32(gaudi_rr_hbw_hit_ar_regs[i], 0x1D);
+ }
+
+ for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i], dram_addr_lo);
+ WREG32(gaudi_rr_hbw_base_low_ar_regs[i], dram_addr_lo);
+
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i], dram_addr_hi);
+ WREG32(gaudi_rr_hbw_base_high_ar_regs[i], dram_addr_hi);
+
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i], 0xE0000000);
+ WREG32(gaudi_rr_hbw_mask_low_ar_regs[i], 0xE0000000);
+
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i], 0x3FFFF);
+ WREG32(gaudi_rr_hbw_mask_high_ar_regs[i], 0x3FFFF);
+
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 4, sram_addr_lo);
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 4, sram_addr_hi);
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 4, 0xFFFFFF80);
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 4, 0x3FFFF);
+
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 8, scratch_addr_lo);
+ WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 8, scratch_addr_lo);
+
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 8, scratch_addr_hi);
+ WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 8, scratch_addr_hi);
+
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 8, 0xFFFF0000);
+ WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 8, 0xFFFF0000);
+
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 8, 0x3FFFF);
+ WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 8, 0x3FFFF);
+
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 12, pcie_fw_addr_lo);
+ WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 12, pcie_fw_addr_lo);
+
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 12, pcie_fw_addr_hi);
+ WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 12, pcie_fw_addr_hi);
+
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 12, 0xFFFF8000);
+ WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 12, 0xFFFF8000);
+
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 12, 0x3FFFF);
+ WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 12, 0x3FFFF);
+
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 16, spi_addr_lo);
+ WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 16, spi_addr_lo);
+
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 16, spi_addr_hi);
+ WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 16, spi_addr_hi);
+
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 16, 0xFE000000);
+ WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 16, 0xFE000000);
+
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 16, 0x3FFFF);
+ WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 16, 0x3FFFF);
+
+ if (gaudi->hw_cap_initialized & HW_CAP_MMU)
+ continue;
+
+ /* Protect HOST */
+ WREG32(gaudi_rr_hbw_base_low_aw_regs[i] + 20, 0);
+ WREG32(gaudi_rr_hbw_base_low_ar_regs[i] + 20, 0);
+
+ WREG32(gaudi_rr_hbw_base_high_aw_regs[i] + 20, 0);
+ WREG32(gaudi_rr_hbw_base_high_ar_regs[i] + 20, 0);
+
+ WREG32(gaudi_rr_hbw_mask_low_aw_regs[i] + 20, 0);
+ WREG32(gaudi_rr_hbw_mask_low_ar_regs[i] + 20, 0);
+
+ WREG32(gaudi_rr_hbw_mask_high_aw_regs[i] + 20, 0xFFF80);
+ WREG32(gaudi_rr_hbw_mask_high_ar_regs[i] + 20, 0xFFF80);
+ }
+}
+
+/**
+ * gaudi_init_security - Initialize security model
+ *
+ * @hdev: pointer to hl_device structure
+ *
+ * Initialize the security model of the device
+ * That includes range registers and protection bit per register
+ *
+ */
+void gaudi_init_security(struct hl_device *hdev)
+{
+ /* Due to H/W errata GAUDI0500, need to override default security
+ * property configuration of MME SBAB and ACC to be non-privileged and
+ * non-secured
+ */
+ WREG32(mmMME0_SBAB_PROT, 0x2);
+ WREG32(mmMME0_ACC_PROT, 0x2);
+ WREG32(mmMME1_SBAB_PROT, 0x2);
+ WREG32(mmMME1_ACC_PROT, 0x2);
+ WREG32(mmMME2_SBAB_PROT, 0x2);
+ WREG32(mmMME2_ACC_PROT, 0x2);
+ WREG32(mmMME3_SBAB_PROT, 0x2);
+ WREG32(mmMME3_ACC_PROT, 0x2);
+
+ /* On RAZWI, 0 will be returned from RR and 0xBABA0BAD from PB */
+ WREG32(0xC01B28, 0x1);
+
+ gaudi_init_range_registers_lbw(hdev);
+
+ gaudi_init_range_registers_hbw(hdev);
+
+ gaudi_init_protection_bits(hdev);
+}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 68f065607544..0d2952bb58df 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -72,7 +72,7 @@
*
*/
-#define GOYA_UBOOT_FW_FILE "habanalabs/goya/goya-u-boot.bin"
+#define GOYA_BOOT_FIT_FILE "habanalabs/goya/goya-boot-fit.itb"
#define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb"
#define GOYA_MMU_REGS_NUM 63
@@ -87,6 +87,7 @@
#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
+#define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
#define GOYA_QMAN0_FENCE_VAL 0xD169B243
@@ -531,7 +532,7 @@ static int goya_early_init(struct hl_device *hdev)
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
- rc = hl_pci_init(hdev, 48);
+ rc = hl_pci_init(hdev);
if (rc)
return rc;
@@ -750,6 +751,8 @@ static int goya_sw_init(struct hl_device *hdev)
}
spin_lock_init(&goya->hw_queues_lock);
+ hdev->supports_coresight = true;
+ hdev->supports_soft_reset = true;
return 0;
@@ -800,6 +803,7 @@ static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
u32 so_base_lo, so_base_hi;
u32 gic_base_lo, gic_base_hi;
u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
+ u32 dma_err_cfg = QMAN_DMA_ERR_MSG_EN;
mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
@@ -836,7 +840,10 @@ static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
else
WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
- WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
+ if (hdev->stop_on_err)
+ dma_err_cfg |= 1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT;
+
+ WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, dma_err_cfg);
WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
}
@@ -886,6 +893,7 @@ void goya_init_dma_qmans(struct hl_device *hdev)
q = &hdev->kernel_queues[0];
for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
+ q->cq_id = q->msi_vec = i;
goya_init_dma_qman(hdev, i, q->bus_address);
goya_init_dma_ch(hdev, i);
}
@@ -2205,80 +2213,37 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
}
/*
- * goya_push_uboot_to_device() - Push u-boot FW code to device.
+ * goya_load_firmware_to_device() - Load LINUX FW code to device.
* @hdev: Pointer to hl_device structure.
*
- * Copy u-boot fw code from firmware file to SRAM BAR.
+ * Copy LINUX fw code from firmware file to HBM BAR.
*
* Return: 0 on success, non-zero for failure.
*/
-static int goya_push_uboot_to_device(struct hl_device *hdev)
+static int goya_load_firmware_to_device(struct hl_device *hdev)
{
void __iomem *dst;
- dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
+ dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
- return hl_fw_push_fw_to_device(hdev, GOYA_UBOOT_FW_FILE, dst);
+ return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst);
}
/*
- * goya_push_linux_to_device() - Push LINUX FW code to device.
+ * goya_load_boot_fit_to_device() - Load boot fit to device.
* @hdev: Pointer to hl_device structure.
*
- * Copy LINUX fw code from firmware file to HBM BAR.
+ * Copy boot fit file to SRAM BAR.
*
* Return: 0 on success, non-zero for failure.
*/
-static int goya_push_linux_to_device(struct hl_device *hdev)
+static int goya_load_boot_fit_to_device(struct hl_device *hdev)
{
void __iomem *dst;
- dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
+ dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
- return hl_fw_push_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst);
-}
-
-static int goya_pldm_init_cpu(struct hl_device *hdev)
-{
- u32 unit_rst_val;
- int rc;
-
- /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
- goya_init_golden_registers(hdev);
-
- /* Put ARM cores into reset */
- WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
- RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
-
- /* Reset the CA53 MACRO */
- unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
- WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
- RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
- WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
- RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
-
- rc = goya_push_uboot_to_device(hdev);
- if (rc)
- return rc;
-
- rc = goya_push_linux_to_device(hdev);
- if (rc)
- return rc;
-
- WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
- WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
-
- WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
- lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
- WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
- upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
-
- /* Release ARM core 0 from reset */
- WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
- CPU_RESET_CORE0_DEASSERT);
- RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
-
- return 0;
+ return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst);
}
/*
@@ -2286,7 +2251,7 @@ static int goya_pldm_init_cpu(struct hl_device *hdev)
* The version string should be located by that offset.
*/
static void goya_read_device_fw_version(struct hl_device *hdev,
- enum goya_fw_component fwc)
+ enum hl_fw_component fwc)
{
const char *name;
u32 ver_off;
@@ -2320,10 +2285,9 @@ static void goya_read_device_fw_version(struct hl_device *hdev,
}
}
-static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
+static int goya_init_cpu(struct hl_device *hdev)
{
struct goya_device *goya = hdev->asic_specific;
- u32 status;
int rc;
if (!hdev->cpu_enable)
@@ -2342,115 +2306,15 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
return -EIO;
}
- if (hdev->pldm) {
- rc = goya_pldm_init_cpu(hdev);
- if (rc)
- return rc;
-
- goto out;
- }
-
- /* Make sure CPU boot-loader is running */
- rc = hl_poll_timeout(
- hdev,
- mmPSOC_GLOBAL_CONF_WARM_REBOOT,
- status,
- (status == CPU_BOOT_STATUS_DRAM_RDY) ||
- (status == CPU_BOOT_STATUS_SRAM_AVAIL),
- 10000,
- cpu_timeout);
+ rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
+ mmPSOC_GLOBAL_CONF_UBOOT_MAGIC,
+ mmCPU_CMD_STATUS_TO_HOST, mmCPU_BOOT_ERR0,
+ false, GOYA_CPU_TIMEOUT_USEC,
+ GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
- /* Read U-Boot version now in case we will later fail */
- goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
- goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
-
- if (rc) {
- dev_err(hdev->dev, "Error in ARM u-boot!");
- switch (status) {
- case CPU_BOOT_STATUS_NA:
- dev_err(hdev->dev,
- "ARM status %d - BTL did NOT run\n", status);
- break;
- case CPU_BOOT_STATUS_IN_WFE:
- dev_err(hdev->dev,
- "ARM status %d - Inside WFE loop\n", status);
- break;
- case CPU_BOOT_STATUS_IN_BTL:
- dev_err(hdev->dev,
- "ARM status %d - Stuck in BTL\n", status);
- break;
- case CPU_BOOT_STATUS_IN_PREBOOT:
- dev_err(hdev->dev,
- "ARM status %d - Stuck in Preboot\n", status);
- break;
- case CPU_BOOT_STATUS_IN_SPL:
- dev_err(hdev->dev,
- "ARM status %d - Stuck in SPL\n", status);
- break;
- case CPU_BOOT_STATUS_IN_UBOOT:
- dev_err(hdev->dev,
- "ARM status %d - Stuck in u-boot\n", status);
- break;
- case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
- dev_err(hdev->dev,
- "ARM status %d - DDR initialization failed\n",
- status);
- break;
- case CPU_BOOT_STATUS_UBOOT_NOT_READY:
- dev_err(hdev->dev,
- "ARM status %d - u-boot stopped by user\n",
- status);
- break;
- case CPU_BOOT_STATUS_TS_INIT_FAIL:
- dev_err(hdev->dev,
- "ARM status %d - Thermal Sensor initialization failed\n",
- status);
- break;
- default:
- dev_err(hdev->dev,
- "ARM status %d - Invalid status code\n",
- status);
- break;
- }
- return -EIO;
- }
-
- if (!hdev->fw_loading) {
- dev_info(hdev->dev, "Skip loading FW\n");
- goto out;
- }
-
- if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
- goto out;
-
- rc = goya_push_linux_to_device(hdev);
if (rc)
return rc;
- WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
-
- rc = hl_poll_timeout(
- hdev,
- mmPSOC_GLOBAL_CONF_WARM_REBOOT,
- status,
- (status == CPU_BOOT_STATUS_SRAM_AVAIL),
- 10000,
- cpu_timeout);
-
- if (rc) {
- if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
- dev_err(hdev->dev,
- "ARM u-boot reports FIT image is corrupted\n");
- else
- dev_err(hdev->dev,
- "ARM Linux failed to load, %d\n", status);
- WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
- return -EIO;
- }
-
- dev_info(hdev->dev, "Successfully loaded firmware to device\n");
-
-out:
goya->hw_cap_initialized |= HW_CAP_CPU;
return 0;
@@ -2565,7 +2429,7 @@ static int goya_hw_init(struct hl_device *hdev)
*/
WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
- rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
+ rc = goya_init_cpu(hdev);
if (rc) {
dev_err(hdev->dev, "failed to initialize CPU\n");
return rc;
@@ -2684,30 +2548,6 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
HW_CAP_MMU | HW_CAP_TPC_MBIST |
HW_CAP_GOLDEN | HW_CAP_TPC);
memset(goya->events_stat, 0, sizeof(goya->events_stat));
-
- if (!hdev->pldm) {
- int rc;
- /* In case we are running inside VM and the VM is
- * shutting down, we need to make sure CPU boot-loader
- * is running before we can continue the VM shutdown.
- * That is because the VM will send an FLR signal that
- * we must answer
- */
- dev_info(hdev->dev,
- "Going to wait up to %ds for CPU boot loader\n",
- GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
-
- rc = hl_poll_timeout(
- hdev,
- mmPSOC_GLOBAL_CONF_WARM_REBOOT,
- status,
- (status == CPU_BOOT_STATUS_DRAM_RDY),
- 10000,
- GOYA_CPU_TIMEOUT_USEC);
- if (rc)
- dev_err(hdev->dev,
- "failed to wait for CPU boot loader\n");
- }
}
int goya_suspend(struct hl_device *hdev)
@@ -3555,6 +3395,7 @@ static int goya_validate_cb(struct hl_device *hdev,
*/
rc = goya_validate_wreg32(hdev,
parser, (struct packet_wreg32 *) user_pkt);
+ parser->patched_cb_size += pkt_size;
break;
case PACKET_WREG_BULK:
@@ -4016,7 +3857,8 @@ int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
}
void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
- u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec)
+ u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
+ bool eb)
{
struct packet_msg_prot *cq_pkt;
u32 tmp;
@@ -5042,7 +4884,7 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
}
-static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
+static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
u32 flags)
{
struct goya_device *goya = hdev->asic_specific;
@@ -5051,11 +4893,11 @@ static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
hdev->hard_reset_pending)
- return;
+ return 0;
/* no need in L1 only invalidation in Goya */
if (!is_hard)
- return;
+ return 0;
if (hdev->pldm)
timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
@@ -5077,13 +4919,17 @@ static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
mutex_unlock(&hdev->mmu_cache_lock);
- if (rc)
- dev_notice_ratelimited(hdev->dev,
- "Timeout when waiting for MMU cache invalidation\n");
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
}
-static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
- bool is_hard, u32 asid, u64 va, u64 size)
+static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
+ bool is_hard, u32 asid, u64 va, u64 size)
{
struct goya_device *goya = hdev->asic_specific;
u32 status, timeout_usec, inv_data, pi;
@@ -5091,11 +4937,11 @@ static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
hdev->hard_reset_pending)
- return;
+ return 0;
/* no need in L1 only invalidation in Goya */
if (!is_hard)
- return;
+ return 0;
if (hdev->pldm)
timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
@@ -5128,9 +4974,13 @@ static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
mutex_unlock(&hdev->mmu_cache_lock);
- if (rc)
- dev_notice_ratelimited(hdev->dev,
- "Timeout when waiting for MMU cache invalidation\n");
+ if (rc) {
+ dev_err_ratelimited(hdev->dev,
+ "MMU cache invalidation timeout\n");
+ hl_device_reset(hdev, true, false);
+ }
+
+ return rc;
}
int goya_send_heartbeat(struct hl_device *hdev)
@@ -5178,6 +5028,16 @@ int goya_armcp_info_get(struct hl_device *hdev)
return 0;
}
+static void goya_enable_clock_gating(struct hl_device *hdev)
+{
+
+}
+
+static void goya_disable_clock_gating(struct hl_device *hdev)
+{
+
+}
+
static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
struct seq_file *s)
{
@@ -5293,6 +5153,68 @@ static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
return RREG32(mmHW_STATE);
}
+u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
+{
+ return cq_idx;
+}
+
+static void goya_ext_queue_init(struct hl_device *hdev, u32 q_idx)
+{
+
+}
+
+static void goya_ext_queue_reset(struct hl_device *hdev, u32 q_idx)
+{
+
+}
+
+static u32 goya_get_signal_cb_size(struct hl_device *hdev)
+{
+ return 0;
+}
+
+static u32 goya_get_wait_cb_size(struct hl_device *hdev)
+{
+ return 0;
+}
+
+static void goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id)
+{
+
+}
+
+static void goya_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id,
+ u16 sob_val, u16 mon_id, u32 q_idx)
+{
+
+}
+
+static void goya_reset_sob(struct hl_device *hdev, void *data)
+{
+
+}
+
+static void goya_set_dma_mask_from_fw(struct hl_device *hdev)
+{
+ if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) ==
+ HL_POWER9_HOST_MAGIC) {
+ dev_dbg(hdev->dev, "Working in 64-bit DMA mode\n");
+ hdev->power9_64bit_dma_enable = 1;
+ hdev->dma_mask = 64;
+ } else {
+ dev_dbg(hdev->dev, "Working in 48-bit DMA mode\n");
+ hdev->power9_64bit_dma_enable = 0;
+ hdev->dma_mask = 48;
+ }
+}
+
+u64 goya_get_device_time(struct hl_device *hdev)
+{
+ u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
+
+ return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5337,6 +5259,8 @@ static const struct hl_asic_funcs goya_funcs = {
.mmu_invalidate_cache = goya_mmu_invalidate_cache,
.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
.send_heartbeat = goya_send_heartbeat,
+ .enable_clock_gating = goya_enable_clock_gating,
+ .disable_clock_gating = goya_disable_clock_gating,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
.soft_reset_late_init = goya_soft_reset_late_init,
@@ -5352,7 +5276,20 @@ static const struct hl_asic_funcs goya_funcs = {
.rreg = hl_rreg,
.wreg = hl_wreg,
.halt_coresight = goya_halt_coresight,
- .get_clk_rate = goya_get_clk_rate
+ .get_clk_rate = goya_get_clk_rate,
+ .get_queue_id_for_cq = goya_get_queue_id_for_cq,
+ .read_device_fw_version = goya_read_device_fw_version,
+ .load_firmware_to_device = goya_load_firmware_to_device,
+ .load_boot_fit_to_device = goya_load_boot_fit_to_device,
+ .ext_queue_init = goya_ext_queue_init,
+ .ext_queue_reset = goya_ext_queue_reset,
+ .get_signal_cb_size = goya_get_signal_cb_size,
+ .get_wait_cb_size = goya_get_wait_cb_size,
+ .gen_signal_cb = goya_gen_signal_cb,
+ .gen_wait_cb = goya_gen_wait_cb,
+ .reset_sob = goya_reset_sob,
+ .set_dma_mask_from_fw = goya_set_dma_mask_from_fw,
+ .get_device_time = goya_get_device_time
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index c3230cb6e25c..d36f8d90c9c9 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -45,7 +45,7 @@
#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
-#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
+#define GOYA_CPU_TIMEOUT_USEC 15000000 /* 15s */
#define TPC_ENABLED_MASK 0xFF
@@ -149,11 +149,6 @@
#define HW_CAP_GOLDEN 0x00000400
#define HW_CAP_TPC 0x00000800
-enum goya_fw_component {
- FW_COMP_UBOOT,
- FW_COMP_PREBOOT
-};
-
struct goya_device {
/* TODO: remove hw_queues_lock after moving to scheduler code */
spinlock_t hw_queues_lock;
@@ -221,7 +216,8 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size);
void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
- u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec);
+ u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
+ bool eb);
int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser);
void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
dma_addr_t *dma_handle, u16 *queue_len);
@@ -234,5 +230,7 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev);
int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
+u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx);
+u64 goya_get_device_time(struct hl_device *hdev);
#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index a1bc930d904f..1258724ea510 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -266,7 +266,7 @@ static int goya_config_stm(struct hl_device *hdev,
WREG32(base_reg + 0xDF4, 0x80);
WREG32(base_reg + 0xE8C, input->frequency);
WREG32(base_reg + 0xE90, 0x7FF);
- WREG32(base_reg + 0xE80, 0x7 | (input->id << 16));
+ WREG32(base_reg + 0xE80, 0x27 | (input->id << 16));
} else {
WREG32(base_reg + 0xE80, 4);
WREG32(base_reg + 0xD64, 0);
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
index d6ec12b3e692..de8297001fea 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -683,7 +683,6 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask = 1 << ((mmTPC0_CFG_SEMAPHORE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_VFLAGS & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_SFLAGS & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_LFSR_POLYNOM & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_STATUS & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -695,7 +694,6 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
- mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_TPC_STALL & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_MSS_CONFIG & 0x7F) >> 2);
mask |= 1 << ((mmTPC0_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
@@ -875,6 +873,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC1_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC1_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC1_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC1_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC1_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -882,6 +890,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC1_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1057,6 +1069,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC2_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC2_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC2_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC2_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC2_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -1064,6 +1086,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC2_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1239,6 +1265,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC3_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC3_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC3_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC3_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC3_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH
& PROT_BITS_OFFS) >> 7) << 2;
@@ -1246,6 +1282,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC3_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1421,6 +1461,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC4_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC4_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC4_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC4_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC4_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -1428,6 +1478,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC4_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1603,6 +1657,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC5_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC5_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC5_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC5_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC5_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -1610,6 +1674,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC5_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1785,6 +1853,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC6_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC6_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC6_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC6_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC6_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -1792,6 +1870,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC6_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
@@ -1967,6 +2049,16 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
goya_pb_set_block(hdev, mmTPC7_RD_REGULATOR_BASE);
goya_pb_set_block(hdev, mmTPC7_WR_REGULATOR_BASE);
+ pb_addr = (mmTPC7_CFG_SEMAPHORE & ~0xFFF) + PROT_BITS_OFFS;
+ word_offset = ((mmTPC7_CFG_SEMAPHORE & PROT_BITS_OFFS) >> 7) << 2;
+
+ mask = 1 << ((mmTPC7_CFG_SEMAPHORE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_VFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_SFLAGS & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_STATUS & 0x7F) >> 2);
+
+ WREG32(pb_addr + word_offset, ~mask);
+
pb_addr = (mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS;
word_offset = ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH &
PROT_BITS_OFFS) >> 7) << 2;
@@ -1974,6 +2066,10 @@ static void goya_init_tpc_protection_bits(struct hl_device *hdev)
mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2);
mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_STALL & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_MSS_CONFIG & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_INTR_CAUSE & 0x7F) >> 2);
+ mask |= 1 << ((mmTPC7_CFG_TPC_INTR_MASK & 0x7F) >> 2);
WREG32(pb_addr + word_offset, ~mask);
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 31ebcf9458fe..1ecdcf8b763a 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -23,7 +23,9 @@
#define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT)
-#define HL_PENDING_RESET_PER_SEC 5
+#define HL_PENDING_RESET_PER_SEC 30
+
+#define HL_HARD_RESET_MAX_TIMEOUT 120
#define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
@@ -51,6 +53,14 @@
/* MMU */
#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
+#define HL_RSVD_SOBS 4
+#define HL_RSVD_MONS 2
+
+#define HL_RSVD_SOBS_IN_USE 2
+#define HL_RSVD_MONS_IN_USE 1
+
+#define HL_MAX_SOB_VAL (1 << 15)
+
/**
* struct pgt_info - MMU hop page info.
* @node: hash linked-list node for the pgts shadow hash of pgts.
@@ -76,6 +86,16 @@ struct hl_device;
struct hl_fpriv;
/**
+ * enum hl_fw_component - F/W components to read version through registers.
+ * @FW_COMP_UBOOT: u-boot.
+ * @FW_COMP_PREBOOT: preboot.
+ */
+enum hl_fw_component {
+ FW_COMP_UBOOT,
+ FW_COMP_PREBOOT
+};
+
+/**
* enum hl_queue_type - Supported QUEUE types.
* @QUEUE_TYPE_NA: queue is not available.
* @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
@@ -94,6 +114,26 @@ enum hl_queue_type {
QUEUE_TYPE_HW
};
+enum hl_cs_type {
+ CS_TYPE_DEFAULT,
+ CS_TYPE_SIGNAL,
+ CS_TYPE_WAIT
+};
+
+/*
+ * struct hl_hw_sob - H/W SOB info.
+ * @hdev: habanalabs device structure.
+ * @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
+ * @sob_id: id of this SOB.
+ * @q_idx: the H/W queue that uses this SOB.
+ */
+struct hl_hw_sob {
+ struct hl_device *hdev;
+ struct kref kref;
+ u32 sob_id;
+ u32 q_idx;
+};
+
/**
* struct hw_queue_properties - queue information.
* @type: queue type.
@@ -250,17 +290,23 @@ struct asic_fixed_properties {
};
/**
- * struct hl_dma_fence - wrapper for fence object used by command submissions.
+ * struct hl_cs_compl - command submission completion object.
* @base_fence: kernel fence object.
* @lock: spinlock to protect fence.
* @hdev: habanalabs device structure.
+ * @hw_sob: the H/W SOB used in this signal/wait CS.
* @cs_seq: command submission sequence number.
+ * @type: type of the CS - signal/wait.
+ * @sob_val: the SOB value that is used in this signal/wait CS.
*/
-struct hl_dma_fence {
+struct hl_cs_compl {
struct dma_fence base_fence;
spinlock_t lock;
struct hl_device *hdev;
+ struct hl_hw_sob *hw_sob;
u64 cs_seq;
+ enum hl_cs_type type;
+ u16 sob_val;
};
/*
@@ -358,6 +404,7 @@ struct hl_cs_job;
/**
* struct hl_hw_queue - describes a H/W transport queue.
+ * @hw_sob: array of the used H/W SOBs by this H/W queue.
* @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
* @queue_type: type of queue.
* @kernel_address: holds the queue's kernel virtual address.
@@ -365,11 +412,19 @@ struct hl_cs_job;
* @pi: holds the queue's pi value.
* @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
* @hw_queue_id: the id of the H/W queue.
+ * @cq_id: the id for the corresponding CQ for this H/W queue.
+ * @msi_vec: the IRQ number of the H/W queue.
* @int_queue_len: length of internal queue (number of entries).
+ * @next_sob_val: the next value to use for the currently used SOB.
+ * @base_sob_id: the base SOB id of the SOBs used by this queue.
+ * @base_mon_id: the base MON id of the MONs used by this queue.
* @valid: is the queue valid (we have array of 32 queues, not all of them
- * exists).
+ * exist).
+ * @curr_sob_offset: the id offset to the currently used SOB from the
+ * HL_RSVD_SOBS that are being used by this queue.
*/
struct hl_hw_queue {
+ struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
struct hl_cs_job **shadow_queue;
enum hl_queue_type queue_type;
u64 kernel_address;
@@ -377,8 +432,14 @@ struct hl_hw_queue {
u32 pi;
u32 ci;
u32 hw_queue_id;
+ u32 cq_id;
+ u32 msi_vec;
u16 int_queue_len;
+ u16 next_sob_val;
+ u16 base_sob_id;
+ u16 base_mon_id;
u8 valid;
+ u8 curr_sob_offset;
};
/**
@@ -517,6 +578,8 @@ enum hl_pll_frequency {
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
+ * @enable_clock_gating: enable clock gating for reducing power consumption.
+ * @disable_clock_gating: disable clock for accessing registers on HBW.
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
* @soft_reset_late_init: perform certain actions needed after soft reset.
@@ -534,6 +597,21 @@ enum hl_pll_frequency {
* @wreg: Write a register. Needed for simulator support.
* @halt_coresight: stop the ETF and ETR traces.
* @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
+ * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
+ * @read_device_fw_version: read the device's firmware versions that are
+ * contained in registers
+ * @load_firmware_to_device: load the firmware to the device's memory
+ * @load_boot_fit_to_device: load boot fit to device's memory
+ * @ext_queue_init: Initialize the given external queue.
+ * @ext_queue_reset: Reset the given external queue.
+ * @get_signal_cb_size: Get signal CB size.
+ * @get_wait_cb_size: Get wait CB size.
+ * @gen_signal_cb: Generate a signal CB.
+ * @gen_wait_cb: Generate a wait CB.
+ * @reset_sob: Reset a SOB.
+ * @set_dma_mask_from_fw: set the DMA mask in the driver according to the
+ * firmware configuration
+ * @get_device_time: Get the device time.
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -578,7 +656,8 @@ struct hl_asic_funcs {
struct sg_table *sgt);
void (*add_end_of_cb_packets)(struct hl_device *hdev,
u64 kernel_address, u32 len,
- u64 cq_addr, u32 cq_val, u32 msix_num);
+ u64 cq_addr, u32 cq_val, u32 msix_num,
+ bool eb);
void (*update_eq_ci)(struct hl_device *hdev, u32 val);
int (*context_switch)(struct hl_device *hdev, u32 asid);
void (*restore_phase_topology)(struct hl_device *hdev);
@@ -596,11 +675,13 @@ struct hl_asic_funcs {
u32 *size);
u64 (*read_pte)(struct hl_device *hdev, u64 addr);
void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
- void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
+ int (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
u32 flags);
- void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
+ int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
+ void (*enable_clock_gating)(struct hl_device *hdev);
+ void (*disable_clock_gating)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, void *data);
bool (*is_device_idle)(struct hl_device *hdev, u32 *mask,
struct seq_file *s);
@@ -620,6 +701,21 @@ struct hl_asic_funcs {
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
void (*halt_coresight)(struct hl_device *hdev);
int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
+ u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
+ void (*read_device_fw_version)(struct hl_device *hdev,
+ enum hl_fw_component fwc);
+ int (*load_firmware_to_device)(struct hl_device *hdev);
+ int (*load_boot_fit_to_device)(struct hl_device *hdev);
+ void (*ext_queue_init)(struct hl_device *hdev, u32 hw_queue_id);
+ void (*ext_queue_reset)(struct hl_device *hdev, u32 hw_queue_id);
+ u32 (*get_signal_cb_size)(struct hl_device *hdev);
+ u32 (*get_wait_cb_size)(struct hl_device *hdev);
+ void (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id);
+ void (*gen_wait_cb)(struct hl_device *hdev, void *data, u16 sob_id,
+ u16 sob_val, u16 mon_id, u32 q_idx);
+ void (*reset_sob)(struct hl_device *hdev, void *data);
+ void (*set_dma_mask_from_fw)(struct hl_device *hdev);
+ u64 (*get_device_time)(struct hl_device *hdev);
};
@@ -659,8 +755,8 @@ struct hl_va_range {
* with huge pages.
* @dram_va_range: holds available virtual addresses for DRAM mappings.
* @mem_hash_lock: protects the mem_hash.
- * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the
- * MMU hash or walking the PGT requires talking this lock
+ * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
+ * MMU hash or walking the PGT requires talking this lock.
* @debugfs_list: node in debugfs list of contexts.
* @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
* to user so user could inquire about CS. It is used as
@@ -751,10 +847,14 @@ struct hl_userptr {
* @job_lock: spinlock for the CS's jobs list. Needed for free_job.
* @refcount: reference counter for usage of the CS.
* @fence: pointer to the fence object of this CS.
+ * @signal_fence: pointer to the fence object of the signal CS (used by wait
+ * CS only).
+ * @finish_work: workqueue object to run when CS is completed by H/W.
* @work_tdr: delayed work node for TDR.
* @mirror_node : node in device mirror list of command submissions.
* @debugfs_list: node in debugfs list of command submissions.
* @sequence: the sequence number of this CS.
+ * @type: CS_TYPE_*.
* @submitted: true if CS was submitted to H/W.
* @completed: true if CS was completed by device.
* @timedout : true if CS was timedout.
@@ -769,10 +869,13 @@ struct hl_cs {
spinlock_t job_lock;
struct kref refcount;
struct dma_fence *fence;
+ struct dma_fence *signal_fence;
+ struct work_struct finish_work;
struct delayed_work work_tdr;
struct list_head mirror_node;
struct list_head debugfs_list;
u64 sequence;
+ enum hl_cs_type type;
u8 submitted;
u8 completed;
u8 timedout;
@@ -799,6 +902,12 @@ struct hl_cs {
* @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
* handle to a kernel-allocated CB object, false
* otherwise (SRAM/DRAM/host address).
+ * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
+ * info is needed later, when adding the 2xMSG_PROT at the
+ * end of the JOB, to know which barriers to put in the
+ * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
+ * have streams so the engine can't be busy by another
+ * stream.
*/
struct hl_cs_job {
struct list_head cs_node;
@@ -814,6 +923,7 @@ struct hl_cs_job {
u32 user_cb_size;
u32 job_cb_size;
u8 is_kernel_allocated_cb;
+ u8 contains_dma_pkt;
};
/**
@@ -833,6 +943,12 @@ struct hl_cs_job {
* @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
* handle to a kernel-allocated CB object, false
* otherwise (SRAM/DRAM/host address).
+ * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
+ * info is needed later, when adding the 2xMSG_PROT at the
+ * end of the JOB, to know which barriers to put in the
+ * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
+ * have streams so the engine can't be busy by another
+ * stream.
*/
struct hl_cs_parser {
struct hl_cb *user_cb;
@@ -846,6 +962,7 @@ struct hl_cs_parser {
u32 patched_cb_size;
u8 job_id;
u8 is_kernel_allocated_cb;
+ u8 contains_dma_pkt;
};
@@ -1093,6 +1210,16 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
+#define RMWREG32(reg, val, mask) \
+ do { \
+ u32 tmp_ = RREG32(reg); \
+ tmp_ &= ~(mask); \
+ tmp_ |= ((val) << __ffs(mask)); \
+ WREG32(reg, tmp_); \
+ } while (0)
+
+#define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask))
+
#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
#define WREG32_FIELD(reg, offset, field, val) \
@@ -1282,6 +1409,8 @@ struct hl_device_idle_busy_ts {
* @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr
* @id: device minor.
* @id_control: minor of the control device
+ * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
+ * addresses.
* @disabled: is device disabled.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
@@ -1295,11 +1424,19 @@ struct hl_device_idle_busy_ts {
* huge pages.
* @init_done: is the initialization of the device done.
* @mmu_enable: is MMU enabled.
+ * @mmu_huge_page_opt: is MMU huge pages optimization enabled.
+ * @clock_gating: is clock gating enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device
* @in_debug: is device under debug. This, together with fpriv_list, enforces
* that only a single user is configuring the debug infrastructure.
+ * @power9_64bit_dma_enable: true to enable 64-bit DMA mask support. Relevant
+ * only to POWER9 machines.
* @cdev_sysfs_created: were char devices and sysfs nodes created.
+ * @stop_on_err: true if engines should stop on error.
+ * @supports_sync_stream: is sync stream supported.
+ * @supports_coresight: is CoreSight supported.
+ * @supports_soft_reset: is soft reset supported.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -1366,6 +1503,7 @@ struct hl_device {
u32 idle_busy_ts_idx;
u16 id;
u16 id_control;
+ u16 cpu_pci_msb_addr;
u8 disabled;
u8 late_init_done;
u8 hwmon_initialized;
@@ -1376,18 +1514,31 @@ struct hl_device {
u8 dram_default_page_mapping;
u8 pmmu_huge_range;
u8 init_done;
+ u8 clock_gating;
u8 device_cpu_disabled;
u8 dma_mask;
u8 in_debug;
+ u8 power9_64bit_dma_enable;
u8 cdev_sysfs_created;
+ u8 stop_on_err;
+ u8 supports_sync_stream;
+ u8 supports_coresight;
+ u8 supports_soft_reset;
/* Parameters for bring-up */
u8 mmu_enable;
+ u8 mmu_huge_page_opt;
u8 cpu_enable;
u8 reset_pcilink;
u8 cpu_queues_enable;
u8 fw_loading;
u8 pldm;
+ u8 axi_drain;
+ u8 sram_scrambler_enable;
+ u8 dram_scrambler_enable;
+ u8 hard_reset_on_fw_events;
+ u8 bmc_enable;
+ u8 rl_enable;
};
@@ -1554,8 +1705,10 @@ int hl_cb_pool_fini(struct hl_device *hdev);
void hl_cs_rollback_all(struct hl_device *hdev);
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
+void hl_sob_reset_error(struct kref *ref);
void goya_set_asic_funcs(struct hl_device *hdev);
+void gaudi_set_asic_funcs(struct hl_device *hdev);
int hl_vm_ctx_init(struct hl_ctx *ctx);
void hl_vm_ctx_fini(struct hl_ctx *ctx);
@@ -1583,11 +1736,14 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
void hl_mmu_swap_out(struct hl_ctx *ctx);
void hl_mmu_swap_in(struct hl_ctx *ctx);
-int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst);
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u16 len, u32 timeout, long *result);
+int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type);
+int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
+ size_t irq_arr_size);
int hl_fw_test_cpu_queue(struct hl_device *hdev);
void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
dma_addr_t *dma_handle);
@@ -1596,6 +1752,10 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
int hl_fw_send_heartbeat(struct hl_device *hdev);
int hl_fw_armcp_info_get(struct hl_device *hdev);
int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
+int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
+ u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
+ u32 boot_err0_reg, bool skip_bmc,
+ u32 cpu_timeout, u32 boot_fit_timeout);
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
bool is_wc[3]);
@@ -1605,9 +1765,8 @@ int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
u64 dram_base_address, u64 host_phys_base_address,
u64 host_phys_size);
-int hl_pci_init(struct hl_device *hdev, u8 dma_mask);
+int hl_pci_init(struct hl_device *hdev);
void hl_pci_fini(struct hl_device *hdev);
-int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask);
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
@@ -1627,6 +1786,10 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
long value);
u64 hl_get_max_power(struct hl_device *hdev);
void hl_set_max_power(struct hl_device *hdev, u64 value);
+int hl_set_voltage(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value);
+int hl_set_current(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
index b670859c677a..8652c7e5d7f1 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -47,6 +47,7 @@ static const struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), },
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, ids);
/*
* get_asic_type - translate device id to asic type
@@ -171,6 +172,7 @@ out_err:
put_pid(hpriv->taskpid);
kfree(hpriv);
+
return rc;
}
@@ -230,8 +232,15 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
hdev->fw_loading = 1;
hdev->cpu_queues_enable = 1;
hdev->heartbeat = 1;
+ hdev->clock_gating = 1;
hdev->reset_pcilink = 0;
+ hdev->axi_drain = 0;
+ hdev->sram_scrambler_enable = 1;
+ hdev->dram_scrambler_enable = 1;
+ hdev->rl_enable = 1;
+ hdev->bmc_enable = 1;
+ hdev->hard_reset_on_fw_events = 1;
}
/*
@@ -267,11 +276,6 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
dev_err(&pdev->dev, "Unsupported ASIC\n");
rc = -ENODEV;
goto free_hdev;
- } else if (hdev->asic_type == ASIC_GAUDI) {
- dev_err(&pdev->dev,
- "GAUDI is not supported by the current kernel\n");
- rc = -ENODEV;
- goto free_hdev;
}
} else {
hdev->asic_type = asic_type;
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index 6474b868ef27..52eedd3a6c3a 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -71,6 +71,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
hw_ip.armcp_cpld_version = le32_to_cpu(prop->armcp_info.cpld_version);
+ hw_ip.module_id = le32_to_cpu(prop->armcp_info.card_location);
+
hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
@@ -258,6 +260,22 @@ static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
}
+static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
+{
+ struct hl_info_time_sync time_sync = {0};
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
+ time_sync.host_time = ktime_get_raw_ns();
+
+ return copy_to_user(out, &time_sync,
+ min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -315,6 +333,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
rc = get_clk_rate(hdev, args);
break;
+ case HL_INFO_TIME_SYNC:
+ return time_sync_info(hdev, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -ENOTTY;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 91579dde9262..f4434b39ef1b 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -111,7 +111,7 @@ static int ext_queue_sanity_checks(struct hl_device *hdev,
bool reserve_cq_entry)
{
atomic_t *free_slots =
- &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
+ &hdev->completion_queue[q->cq_id].free_slots_cnt;
int free_slots_cnt;
/* Check we have enough space in the queue */
@@ -194,7 +194,7 @@ static int hw_queue_sanity_checks(struct hl_device *hdev, struct hl_hw_queue *q,
int num_of_entries)
{
atomic_t *free_slots =
- &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
+ &hdev->completion_queue[q->cq_id].free_slots_cnt;
/*
* Check we have enough space in the completion queue.
@@ -308,13 +308,14 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
* No need to check if CQ is full because it was already
* checked in ext_queue_sanity_checks
*/
- cq = &hdev->completion_queue[q->hw_queue_id];
+ cq = &hdev->completion_queue[q->cq_id];
cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
cq_addr,
le32_to_cpu(cq_pkt.data),
- q->hw_queue_id);
+ q->msi_vec,
+ job->contains_dma_pkt);
q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
@@ -401,21 +402,111 @@ static void hw_queue_schedule_job(struct hl_cs_job *job)
* No need to check if CQ is full because it was already
* checked in hw_queue_sanity_checks
*/
- cq = &hdev->completion_queue[q->hw_queue_id];
+ cq = &hdev->completion_queue[q->cq_id];
+
cq->pi = hl_cq_inc_ptr(cq->pi);
ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr);
}
/*
- * hl_hw_queue_schedule_cs - schedule a command submission
- *
- * @job : pointer to the CS
+ * init_signal_wait_cs - initialize a signal/wait CS
+ * @cs: pointer to the signal/wait CS
*
+ * H/W queues spinlock should be taken before calling this function
+ */
+static void init_signal_wait_cs(struct hl_cs *cs)
+{
+ struct hl_ctx *ctx = cs->ctx;
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_hw_queue *hw_queue;
+ struct hl_cs_compl *cs_cmpl =
+ container_of(cs->fence, struct hl_cs_compl, base_fence);
+
+ struct hl_hw_sob *hw_sob;
+ struct hl_cs_job *job;
+ u32 q_idx;
+
+ /* There is only one job in a signal/wait CS */
+ job = list_first_entry(&cs->job_list, struct hl_cs_job,
+ cs_node);
+ q_idx = job->hw_queue_id;
+ hw_queue = &hdev->kernel_queues[q_idx];
+
+ if (cs->type & CS_TYPE_SIGNAL) {
+ hw_sob = &hw_queue->hw_sob[hw_queue->curr_sob_offset];
+
+ cs_cmpl->hw_sob = hw_sob;
+ cs_cmpl->sob_val = hw_queue->next_sob_val++;
+
+ dev_dbg(hdev->dev,
+ "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
+ cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+
+ hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
+ cs_cmpl->hw_sob->sob_id);
+
+ kref_get(&hw_sob->kref);
+
+ /* check for wraparound */
+ if (hw_queue->next_sob_val == HL_MAX_SOB_VAL) {
+ /*
+ * Decrement as we reached the max value.
+ * The release function won't be called here as we've
+ * just incremented the refcount.
+ */
+ kref_put(&hw_sob->kref, hl_sob_reset_error);
+ hw_queue->next_sob_val = 1;
+ /* only two SOBs are currently in use */
+ hw_queue->curr_sob_offset =
+ (hw_queue->curr_sob_offset + 1) %
+ HL_RSVD_SOBS_IN_USE;
+
+ dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
+ hw_queue->curr_sob_offset, q_idx);
+ }
+ } else if (cs->type & CS_TYPE_WAIT) {
+ struct hl_cs_compl *signal_cs_cmpl;
+
+ signal_cs_cmpl = container_of(cs->signal_fence,
+ struct hl_cs_compl,
+ base_fence);
+
+ /* copy the the SOB id and value of the signal CS */
+ cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
+ cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
+
+ dev_dbg(hdev->dev,
+ "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n",
+ cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
+ hw_queue->base_mon_id, q_idx);
+
+ hdev->asic_funcs->gen_wait_cb(hdev, job->patched_cb,
+ cs_cmpl->hw_sob->sob_id,
+ cs_cmpl->sob_val,
+ hw_queue->base_mon_id,
+ q_idx);
+
+ kref_get(&cs_cmpl->hw_sob->kref);
+ /*
+ * Must put the signal fence after the SOB refcnt increment so
+ * the SOB refcnt won't turn 0 and reset the SOB before the
+ * wait CS was submitted.
+ */
+ mb();
+ dma_fence_put(cs->signal_fence);
+ cs->signal_fence = NULL;
+ }
+}
+
+/*
+ * hl_hw_queue_schedule_cs - schedule a command submission
+ * @cs: pointer to the CS
*/
int hl_hw_queue_schedule_cs(struct hl_cs *cs)
{
- struct hl_device *hdev = cs->ctx->hdev;
+ struct hl_ctx *ctx = cs->ctx;
+ struct hl_device *hdev = ctx->hdev;
struct hl_cs_job *job, *tmp;
struct hl_hw_queue *q;
int rc = 0, i, cq_cnt;
@@ -461,6 +552,9 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
}
}
+ if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT))
+ init_signal_wait_cs(cs);
+
spin_lock(&hdev->hw_queues_mirror_lock);
list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
@@ -569,6 +663,9 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
q->ci = 0;
q->pi = 0;
+ if (!is_cpu_queue)
+ hdev->asic_funcs->ext_queue_init(hdev, q->hw_queue_id);
+
return 0;
free_queue:
@@ -791,5 +888,8 @@ void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
continue;
q->pi = q->ci = 0;
+
+ if (q->queue_type == QUEUE_TYPE_EXT)
+ hdev->asic_funcs->ext_queue_reset(hdev, q->hw_queue_id);
}
}
diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c
index a21a26e07c3b..8c6cd77e6af6 100644
--- a/drivers/misc/habanalabs/hwmon.c
+++ b/drivers/misc/habanalabs/hwmon.c
@@ -200,6 +200,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
switch (attr) {
case hwmon_temp_offset:
+ case hwmon_temp_reset_history:
break;
default:
return -EINVAL;
@@ -216,6 +217,24 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
}
hl_set_pwm_info(hdev, channel, attr, val);
break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_reset_history:
+ break;
+ default:
+ return -EINVAL;
+ }
+ hl_set_voltage(hdev, channel, attr, val);
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_reset_history:
+ break;
+ default:
+ return -EINVAL;
+ }
+ hl_set_current(hdev, channel, attr, val);
+ break;
default:
return -EINVAL;
}
@@ -237,6 +256,8 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
return 0444;
case hwmon_temp_offset:
return 0644;
+ case hwmon_temp_reset_history:
+ return 0200;
}
break;
case hwmon_in:
@@ -246,6 +267,8 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_in_max:
case hwmon_in_highest:
return 0444;
+ case hwmon_in_reset_history:
+ return 0200;
}
break;
case hwmon_curr:
@@ -255,6 +278,8 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_curr_max:
case hwmon_curr_highest:
return 0444;
+ case hwmon_curr_reset_history:
+ return 0200;
}
break;
case hwmon_fan:
@@ -462,6 +487,56 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
sensor_index, rc);
}
+int hl_set_voltage(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_VOLTAGE_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set voltage of sensor %d, error %d\n",
+ sensor_index, rc);
+
+ return rc;
+}
+
+int hl_set_current(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value)
+{
+ struct armcp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(ARMCP_PACKET_CURRENT_SET <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ SENSORS_PKT_TIMEOUT, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set current of sensor %d, error %d\n",
+ sensor_index, rc);
+
+ return rc;
+}
+
int hl_hwmon_init(struct hl_device *hdev)
{
struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev;
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
index bdd0a4c3a9cf..a34fc39ad87e 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -35,7 +35,8 @@ struct hl_eq_entry {
enum pq_init_status {
PQ_INIT_STATUS_NA = 0,
PQ_INIT_STATUS_READY_FOR_CP,
- PQ_INIT_STATUS_READY_FOR_HOST
+ PQ_INIT_STATUS_READY_FOR_HOST,
+ PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI
};
/*
@@ -193,6 +194,16 @@ enum pq_init_status {
* Set the value of the offset property of a specified thermal sensor.
* The packet's arguments specify the desired sensor and the field to
* set.
+ *
+ * ARMCP_PACKET_VOLTAGE_SET -
+ * Trigger the reset_history property of a specified voltage sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * ARMCP_PACKET_CURRENT_SET -
+ * Trigger the reset_history property of a specified current sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
*/
enum armcp_packet_id {
@@ -220,6 +231,8 @@ enum armcp_packet_id {
ARMCP_PACKET_EEPROM_DATA_GET, /* sysfs */
ARMCP_RESERVED,
ARMCP_PACKET_TEMPERATURE_SET, /* sysfs */
+ ARMCP_PACKET_VOLTAGE_SET, /* sysfs */
+ ARMCP_PACKET_CURRENT_SET, /* sysfs */
};
#define ARMCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -288,21 +301,24 @@ enum armcp_temp_type {
armcp_temp_crit,
armcp_temp_crit_hyst,
armcp_temp_offset = 19,
- armcp_temp_highest = 22
+ armcp_temp_highest = 22,
+ armcp_temp_reset_history = 23
};
enum armcp_in_attributes {
armcp_in_input,
armcp_in_min,
armcp_in_max,
- armcp_in_highest = 7
+ armcp_in_highest = 7,
+ armcp_in_reset_history
};
enum armcp_curr_attributes {
armcp_curr_input,
armcp_curr_min,
armcp_curr_max,
- armcp_curr_highest = 7
+ armcp_curr_highest = 7,
+ armcp_curr_reset_history
};
enum armcp_fan_attributes {
@@ -336,10 +352,23 @@ struct armcp_sensor {
};
/**
+ * struct armcp_card_types - ASIC card type.
+ * @armcp_card_type_pci: PCI card.
+ * @armcp_card_type_pmc: PCI Mezzanine Card.
+ */
+enum armcp_card_types {
+ armcp_card_type_pci,
+ armcp_card_type_pmc
+};
+
+/**
* struct armcp_info - Info from ArmCP that is necessary to the host's driver
* @sensors: available sensors description.
* @kernel_version: ArmCP linux kernel version.
* @reserved: reserved field.
+ * @card_type: card configuration type.
+ * @card_location: in a server, each card has different connections topology
+ * depending on its location (relevant for PMC card type)
* @cpld_version: CPLD programmed F/W version.
* @infineon_version: Infineon main DC-DC version.
* @fuse_version: silicon production FUSE information.
@@ -351,7 +380,9 @@ struct armcp_sensor {
struct armcp_info {
struct armcp_sensor sensors[ARMCP_MAX_SENSORS];
__u8 kernel_version[VERSION_MAX_LEN];
- __le32 reserved[3];
+ __le32 reserved;
+ __le32 card_type;
+ __le32 card_location;
__le32 cpld_version;
__le32 infineon_version;
__u8 fuse_version[VERSION_MAX_LEN];
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h
new file mode 100644
index 000000000000..cf80e31317ad
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_CPU_IF_REGS_H_
+#define ASIC_REG_CPU_IF_REGS_H_
+
+/*
+ *****************************************
+ * CPU_IF (Prototype: CPU_IF)
+ *****************************************
+ */
+
+#define mmCPU_IF_ARUSER_OVR 0x442104
+
+#define mmCPU_IF_ARUSER_OVR_EN 0x442108
+
+#define mmCPU_IF_AWUSER_OVR 0x44210C
+
+#define mmCPU_IF_AWUSER_OVR_EN 0x442110
+
+#define mmCPU_IF_AXCACHE_OVR 0x442114
+
+#define mmCPU_IF_LOCK_OVR 0x442118
+
+#define mmCPU_IF_PROT_OVR 0x44211C
+
+#define mmCPU_IF_MAX_OUTSTANDING 0x442120
+
+#define mmCPU_IF_EARLY_BRESP_EN 0x442124
+
+#define mmCPU_IF_FORCE_RSP_OK 0x442128
+
+#define mmCPU_IF_CPU_MSB_ADDR 0x44212C
+
+#define mmCPU_IF_AXI_SPLIT_INTR 0x442130
+
+#define mmCPU_IF_TOTAL_WR_CNT 0x442140
+
+#define mmCPU_IF_INFLIGHT_WR_CNT 0x442144
+
+#define mmCPU_IF_TOTAL_RD_CNT 0x442150
+
+#define mmCPU_IF_INFLIGHT_RD_CNT 0x442154
+
+#define mmCPU_IF_PF_PQ_PI 0x442200
+
+#define mmCPU_IF_PQ_BASE_ADDR_LOW 0x442204
+
+#define mmCPU_IF_PQ_BASE_ADDR_HIGH 0x442208
+
+#define mmCPU_IF_PQ_LENGTH 0x44220C
+
+#define mmCPU_IF_CQ_BASE_ADDR_LOW 0x442210
+
+#define mmCPU_IF_CQ_BASE_ADDR_HIGH 0x442214
+
+#define mmCPU_IF_CQ_LENGTH 0x442218
+
+#define mmCPU_IF_EQ_BASE_ADDR_LOW 0x442220
+
+#define mmCPU_IF_EQ_BASE_ADDR_HIGH 0x442224
+
+#define mmCPU_IF_EQ_LENGTH 0x442228
+
+#define mmCPU_IF_EQ_RD_OFFS 0x44222C
+
+#define mmCPU_IF_QUEUE_INIT 0x442230
+
+#define mmCPU_IF_TPC_SERR_INTR_STS 0x442300
+
+#define mmCPU_IF_TPC_SERR_INTR_CLR 0x442304
+
+#define mmCPU_IF_TPC_SERR_INTR_MASK 0x442308
+
+#define mmCPU_IF_TPC_DERR_INTR_STS 0x442310
+
+#define mmCPU_IF_TPC_DERR_INTR_CLR 0x442314
+
+#define mmCPU_IF_TPC_DERR_INTR_MASK 0x442318
+
+#define mmCPU_IF_DMA_SERR_INTR_STS 0x442320
+
+#define mmCPU_IF_DMA_SERR_INTR_CLR 0x442324
+
+#define mmCPU_IF_DMA_SERR_INTR_MASK 0x442328
+
+#define mmCPU_IF_DMA_DERR_INTR_STS 0x442330
+
+#define mmCPU_IF_DMA_DERR_INTR_CLR 0x442334
+
+#define mmCPU_IF_DMA_DERR_INTR_MASK 0x442338
+
+#define mmCPU_IF_SRAM_SERR_INTR_STS 0x442340
+
+#define mmCPU_IF_SRAM_SERR_INTR_CLR 0x442344
+
+#define mmCPU_IF_SRAM_SERR_INTR_MASK 0x442348
+
+#define mmCPU_IF_SRAM_DERR_INTR_STS 0x442350
+
+#define mmCPU_IF_SRAM_DERR_INTR_CLR 0x442354
+
+#define mmCPU_IF_SRAM_DERR_INTR_MASK 0x442358
+
+#define mmCPU_IF_NIC_SERR_INTR_STS 0x442360
+
+#define mmCPU_IF_NIC_SERR_INTR_CLR 0x442364
+
+#define mmCPU_IF_NIC_SERR_INTR_MASK 0x442368
+
+#define mmCPU_IF_NIC_DERR_INTR_STS 0x442370
+
+#define mmCPU_IF_NIC_DERR_INTR_CLR 0x442374
+
+#define mmCPU_IF_NIC_DERR_INTR_MASK 0x442378
+
+#define mmCPU_IF_DMA_IF_SERR_INTR_STS 0x442380
+
+#define mmCPU_IF_DMA_IF_SERR_INTR_CLR 0x442384
+
+#define mmCPU_IF_DMA_IF_SERR_INTR_MASK 0x442388
+
+#define mmCPU_IF_DMA_IF_DERR_INTR_STS 0x442390
+
+#define mmCPU_IF_DMA_IF_DERR_INTR_CLR 0x442394
+
+#define mmCPU_IF_DMA_IF_DERR_INTR_MASK 0x442398
+
+#define mmCPU_IF_HBM_SERR_INTR_STS 0x4423A0
+
+#define mmCPU_IF_HBM_SERR_INTR_CLR 0x4423A4
+
+#define mmCPU_IF_HBM_SERR_INTR_MASK 0x4423A8
+
+#define mmCPU_IF_HBM_DERR_INTR_STS 0x4423B0
+
+#define mmCPU_IF_HBM_DERR_INTR_CLR 0x4423B4
+
+#define mmCPU_IF_HBM_DERR_INTR_MASK 0x4423B8
+
+#define mmCPU_IF_PLL_SEI_INTR_STS 0x442400
+
+#define mmCPU_IF_PLL_SEI_INTR_CLR 0x442404
+
+#define mmCPU_IF_PLL_SEI_INTR_MASK 0x442408
+
+#define mmCPU_IF_NIC_SEI_INTR_STS 0x442410
+
+#define mmCPU_IF_NIC_SEI_INTR_CLR 0x442414
+
+#define mmCPU_IF_NIC_SEI_INTR_MASK 0x442418
+
+#define mmCPU_IF_DMA_SEI_INTR_STS 0x442420
+
+#define mmCPU_IF_DMA_SEI_INTR_CLR 0x442424
+
+#define mmCPU_IF_DMA_SEI_INTR_MASK 0x442428
+
+#define mmCPU_IF_DMA_IF_SEI_INTR_STS 0x442430
+
+#define mmCPU_IF_DMA_IF_SEI_INTR_CLR 0x442434
+
+#define mmCPU_IF_DMA_IF_SEI_INTR_MASK 0x442438
+
+#endif /* ASIC_REG_CPU_IF_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h
new file mode 100644
index 000000000000..d079a37acab8
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA0_CORE_MASKS_H_
+#define ASIC_REG_DMA0_CORE_MASKS_H_
+
+/*
+ *****************************************
+ * DMA0_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+/* DMA0_CORE_CFG_0 */
+#define DMA0_CORE_CFG_0_EN_SHIFT 0
+#define DMA0_CORE_CFG_0_EN_MASK 0x1
+
+/* DMA0_CORE_CFG_1 */
+#define DMA0_CORE_CFG_1_HALT_SHIFT 0
+#define DMA0_CORE_CFG_1_HALT_MASK 0x1
+#define DMA0_CORE_CFG_1_FLUSH_SHIFT 1
+#define DMA0_CORE_CFG_1_FLUSH_MASK 0x2
+#define DMA0_CORE_CFG_1_SB_FORCE_MISS_SHIFT 2
+#define DMA0_CORE_CFG_1_SB_FORCE_MISS_MASK 0x4
+
+/* DMA0_CORE_LBW_MAX_OUTSTAND */
+#define DMA0_CORE_LBW_MAX_OUTSTAND_VAL_SHIFT 0
+#define DMA0_CORE_LBW_MAX_OUTSTAND_VAL_MASK 0x1F
+
+/* DMA0_CORE_SRC_BASE_LO */
+#define DMA0_CORE_SRC_BASE_LO_VAL_SHIFT 0
+#define DMA0_CORE_SRC_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_BASE_HI */
+#define DMA0_CORE_SRC_BASE_HI_VAL_SHIFT 0
+#define DMA0_CORE_SRC_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_BASE_LO */
+#define DMA0_CORE_DST_BASE_LO_VAL_SHIFT 0
+#define DMA0_CORE_DST_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_BASE_HI */
+#define DMA0_CORE_DST_BASE_HI_VAL_SHIFT 0
+#define DMA0_CORE_DST_BASE_HI_VAL_MASK 0xFFFFFF
+#define DMA0_CORE_DST_BASE_HI_CTX_ID_HI_SHIFT 24
+#define DMA0_CORE_DST_BASE_HI_CTX_ID_HI_MASK 0xFF000000
+
+/* DMA0_CORE_SRC_TSIZE_1 */
+#define DMA0_CORE_SRC_TSIZE_1_VAL_SHIFT 0
+#define DMA0_CORE_SRC_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_STRIDE_1 */
+#define DMA0_CORE_SRC_STRIDE_1_VAL_SHIFT 0
+#define DMA0_CORE_SRC_STRIDE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_TSIZE_2 */
+#define DMA0_CORE_SRC_TSIZE_2_VAL_SHIFT 0
+#define DMA0_CORE_SRC_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_STRIDE_2 */
+#define DMA0_CORE_SRC_STRIDE_2_VAL_SHIFT 0
+#define DMA0_CORE_SRC_STRIDE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_TSIZE_3 */
+#define DMA0_CORE_SRC_TSIZE_3_VAL_SHIFT 0
+#define DMA0_CORE_SRC_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_STRIDE_3 */
+#define DMA0_CORE_SRC_STRIDE_3_VAL_SHIFT 0
+#define DMA0_CORE_SRC_STRIDE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_TSIZE_4 */
+#define DMA0_CORE_SRC_TSIZE_4_VAL_SHIFT 0
+#define DMA0_CORE_SRC_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_STRIDE_4 */
+#define DMA0_CORE_SRC_STRIDE_4_VAL_SHIFT 0
+#define DMA0_CORE_SRC_STRIDE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_SRC_TSIZE_0 */
+#define DMA0_CORE_SRC_TSIZE_0_VAL_SHIFT 0
+#define DMA0_CORE_SRC_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_TSIZE_1 */
+#define DMA0_CORE_DST_TSIZE_1_VAL_SHIFT 0
+#define DMA0_CORE_DST_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_STRIDE_1 */
+#define DMA0_CORE_DST_STRIDE_1_VAL_SHIFT 0
+#define DMA0_CORE_DST_STRIDE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_TSIZE_2 */
+#define DMA0_CORE_DST_TSIZE_2_VAL_SHIFT 0
+#define DMA0_CORE_DST_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_STRIDE_2 */
+#define DMA0_CORE_DST_STRIDE_2_VAL_SHIFT 0
+#define DMA0_CORE_DST_STRIDE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_TSIZE_3 */
+#define DMA0_CORE_DST_TSIZE_3_VAL_SHIFT 0
+#define DMA0_CORE_DST_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_STRIDE_3 */
+#define DMA0_CORE_DST_STRIDE_3_VAL_SHIFT 0
+#define DMA0_CORE_DST_STRIDE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_TSIZE_4 */
+#define DMA0_CORE_DST_TSIZE_4_VAL_SHIFT 0
+#define DMA0_CORE_DST_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_STRIDE_4 */
+#define DMA0_CORE_DST_STRIDE_4_VAL_SHIFT 0
+#define DMA0_CORE_DST_STRIDE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DST_TSIZE_0 */
+#define DMA0_CORE_DST_TSIZE_0_VAL_SHIFT 0
+#define DMA0_CORE_DST_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_COMMIT */
+#define DMA0_CORE_COMMIT_WR_COMP_EN_SHIFT 0
+#define DMA0_CORE_COMMIT_WR_COMP_EN_MASK 0x1
+#define DMA0_CORE_COMMIT_TRANSPOSE_SHIFT 1
+#define DMA0_CORE_COMMIT_TRANSPOSE_MASK 0x2
+#define DMA0_CORE_COMMIT_DTYPE_SHIFT 2
+#define DMA0_CORE_COMMIT_DTYPE_MASK 0x4
+#define DMA0_CORE_COMMIT_LIN_SHIFT 3
+#define DMA0_CORE_COMMIT_LIN_MASK 0x8
+#define DMA0_CORE_COMMIT_MEM_SET_SHIFT 4
+#define DMA0_CORE_COMMIT_MEM_SET_MASK 0x10
+#define DMA0_CORE_COMMIT_COMPRESS_SHIFT 5
+#define DMA0_CORE_COMMIT_COMPRESS_MASK 0x20
+#define DMA0_CORE_COMMIT_DECOMPRESS_SHIFT 6
+#define DMA0_CORE_COMMIT_DECOMPRESS_MASK 0x40
+#define DMA0_CORE_COMMIT_CTX_ID_SHIFT 16
+#define DMA0_CORE_COMMIT_CTX_ID_MASK 0xFF0000
+
+/* DMA0_CORE_WR_COMP_WDATA */
+#define DMA0_CORE_WR_COMP_WDATA_VAL_SHIFT 0
+#define DMA0_CORE_WR_COMP_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_WR_COMP_ADDR_LO */
+#define DMA0_CORE_WR_COMP_ADDR_LO_VAL_SHIFT 0
+#define DMA0_CORE_WR_COMP_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_WR_COMP_ADDR_HI */
+#define DMA0_CORE_WR_COMP_ADDR_HI_VAL_SHIFT 0
+#define DMA0_CORE_WR_COMP_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_WR_COMP_AWUSER_31_11 */
+#define DMA0_CORE_WR_COMP_AWUSER_31_11_VAL_SHIFT 0
+#define DMA0_CORE_WR_COMP_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_CORE_TE_NUMROWS */
+#define DMA0_CORE_TE_NUMROWS_VAL_SHIFT 0
+#define DMA0_CORE_TE_NUMROWS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_PROT */
+#define DMA0_CORE_PROT_VAL_SHIFT 0
+#define DMA0_CORE_PROT_VAL_MASK 0x1
+#define DMA0_CORE_PROT_ERR_VAL_SHIFT 1
+#define DMA0_CORE_PROT_ERR_VAL_MASK 0x2
+
+/* DMA0_CORE_SECURE_PROPS */
+#define DMA0_CORE_SECURE_PROPS_ASID_SHIFT 0
+#define DMA0_CORE_SECURE_PROPS_ASID_MASK 0x3FF
+#define DMA0_CORE_SECURE_PROPS_MMBP_SHIFT 10
+#define DMA0_CORE_SECURE_PROPS_MMBP_MASK 0x400
+
+/* DMA0_CORE_NON_SECURE_PROPS */
+#define DMA0_CORE_NON_SECURE_PROPS_ASID_SHIFT 0
+#define DMA0_CORE_NON_SECURE_PROPS_ASID_MASK 0x3FF
+#define DMA0_CORE_NON_SECURE_PROPS_MMBP_SHIFT 10
+#define DMA0_CORE_NON_SECURE_PROPS_MMBP_MASK 0x400
+
+/* DMA0_CORE_RD_MAX_OUTSTAND */
+#define DMA0_CORE_RD_MAX_OUTSTAND_VAL_SHIFT 0
+#define DMA0_CORE_RD_MAX_OUTSTAND_VAL_MASK 0xFFF
+
+/* DMA0_CORE_RD_MAX_SIZE */
+#define DMA0_CORE_RD_MAX_SIZE_DATA_SHIFT 0
+#define DMA0_CORE_RD_MAX_SIZE_DATA_MASK 0x7FF
+#define DMA0_CORE_RD_MAX_SIZE_MD_SHIFT 16
+#define DMA0_CORE_RD_MAX_SIZE_MD_MASK 0x7FF0000
+
+/* DMA0_CORE_RD_ARCACHE */
+#define DMA0_CORE_RD_ARCACHE_VAL_SHIFT 0
+#define DMA0_CORE_RD_ARCACHE_VAL_MASK 0xF
+
+/* DMA0_CORE_RD_ARUSER_31_11 */
+#define DMA0_CORE_RD_ARUSER_31_11_VAL_SHIFT 0
+#define DMA0_CORE_RD_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_CORE_RD_INFLIGHTS */
+#define DMA0_CORE_RD_INFLIGHTS_VAL_SHIFT 0
+#define DMA0_CORE_RD_INFLIGHTS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_WR_MAX_OUTSTAND */
+#define DMA0_CORE_WR_MAX_OUTSTAND_VAL_SHIFT 0
+#define DMA0_CORE_WR_MAX_OUTSTAND_VAL_MASK 0xFFF
+
+/* DMA0_CORE_WR_MAX_AWID */
+#define DMA0_CORE_WR_MAX_AWID_VAL_SHIFT 0
+#define DMA0_CORE_WR_MAX_AWID_VAL_MASK 0xFFFF
+
+/* DMA0_CORE_WR_AWCACHE */
+#define DMA0_CORE_WR_AWCACHE_VAL_SHIFT 0
+#define DMA0_CORE_WR_AWCACHE_VAL_MASK 0xF
+
+/* DMA0_CORE_WR_AWUSER_31_11 */
+#define DMA0_CORE_WR_AWUSER_31_11_VAL_SHIFT 0
+#define DMA0_CORE_WR_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_CORE_WR_INFLIGHTS */
+#define DMA0_CORE_WR_INFLIGHTS_VAL_SHIFT 0
+#define DMA0_CORE_WR_INFLIGHTS_VAL_MASK 0xFFFF
+
+/* DMA0_CORE_RD_RATE_LIM_CFG_0 */
+#define DMA0_CORE_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define DMA0_CORE_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define DMA0_CORE_RD_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define DMA0_CORE_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* DMA0_CORE_RD_RATE_LIM_CFG_1 */
+#define DMA0_CORE_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define DMA0_CORE_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define DMA0_CORE_RD_RATE_LIM_CFG_1_EN_SHIFT 31
+#define DMA0_CORE_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* DMA0_CORE_WR_RATE_LIM_CFG_0 */
+#define DMA0_CORE_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define DMA0_CORE_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define DMA0_CORE_WR_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define DMA0_CORE_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* DMA0_CORE_WR_RATE_LIM_CFG_1 */
+#define DMA0_CORE_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define DMA0_CORE_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define DMA0_CORE_WR_RATE_LIM_CFG_1_EN_SHIFT 31
+#define DMA0_CORE_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* DMA0_CORE_ERR_CFG */
+#define DMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT 0
+#define DMA0_CORE_ERR_CFG_ERR_MSG_EN_MASK 0x1
+#define DMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT 1
+#define DMA0_CORE_ERR_CFG_STOP_ON_ERR_MASK 0x2
+
+/* DMA0_CORE_ERR_CAUSE */
+#define DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_SHIFT 0
+#define DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_MASK 0x1
+#define DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_SHIFT 1
+#define DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_MASK 0x2
+#define DMA0_CORE_ERR_CAUSE_LBW_WR_ERR_SHIFT 2
+#define DMA0_CORE_ERR_CAUSE_LBW_WR_ERR_MASK 0x4
+#define DMA0_CORE_ERR_CAUSE_DESC_OVF_SHIFT 3
+#define DMA0_CORE_ERR_CAUSE_DESC_OVF_MASK 0x8
+
+/* DMA0_CORE_ERRMSG_ADDR_LO */
+#define DMA0_CORE_ERRMSG_ADDR_LO_VAL_SHIFT 0
+#define DMA0_CORE_ERRMSG_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_ERRMSG_ADDR_HI */
+#define DMA0_CORE_ERRMSG_ADDR_HI_VAL_SHIFT 0
+#define DMA0_CORE_ERRMSG_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_ERRMSG_WDATA */
+#define DMA0_CORE_ERRMSG_WDATA_VAL_SHIFT 0
+#define DMA0_CORE_ERRMSG_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_STS0 */
+#define DMA0_CORE_STS0_RD_REQ_CNT_SHIFT 0
+#define DMA0_CORE_STS0_RD_REQ_CNT_MASK 0x7FFF
+#define DMA0_CORE_STS0_WR_REQ_CNT_SHIFT 16
+#define DMA0_CORE_STS0_WR_REQ_CNT_MASK 0x7FFF0000
+#define DMA0_CORE_STS0_BUSY_SHIFT 31
+#define DMA0_CORE_STS0_BUSY_MASK 0x80000000
+
+/* DMA0_CORE_STS1 */
+#define DMA0_CORE_STS1_IS_HALT_SHIFT 0
+#define DMA0_CORE_STS1_IS_HALT_MASK 0x1
+
+/* DMA0_CORE_RD_DBGMEM_ADD */
+#define DMA0_CORE_RD_DBGMEM_ADD_VAL_SHIFT 0
+#define DMA0_CORE_RD_DBGMEM_ADD_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_RD_DBGMEM_DATA_WR */
+#define DMA0_CORE_RD_DBGMEM_DATA_WR_VAL_SHIFT 0
+#define DMA0_CORE_RD_DBGMEM_DATA_WR_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_RD_DBGMEM_DATA_RD */
+#define DMA0_CORE_RD_DBGMEM_DATA_RD_VAL_SHIFT 0
+#define DMA0_CORE_RD_DBGMEM_DATA_RD_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_RD_DBGMEM_CTRL */
+#define DMA0_CORE_RD_DBGMEM_CTRL_WR_NRD_SHIFT 0
+#define DMA0_CORE_RD_DBGMEM_CTRL_WR_NRD_MASK 0x1
+
+/* DMA0_CORE_RD_DBGMEM_RC */
+#define DMA0_CORE_RD_DBGMEM_RC_VALID_SHIFT 0
+#define DMA0_CORE_RD_DBGMEM_RC_VALID_MASK 0x1
+
+/* DMA0_CORE_DBG_HBW_AXI_AR_CNT */
+
+/* DMA0_CORE_DBG_HBW_AXI_AW_CNT */
+
+/* DMA0_CORE_DBG_LBW_AXI_AW_CNT */
+
+/* DMA0_CORE_DBG_DESC_CNT */
+#define DMA0_CORE_DBG_DESC_CNT_RD_STS_CTX_CNT_SHIFT 0
+#define DMA0_CORE_DBG_DESC_CNT_RD_STS_CTX_CNT_MASK 0xFFFFFFFF
+
+/* DMA0_CORE_DBG_STS */
+#define DMA0_CORE_DBG_STS_RD_CTX_FULL_SHIFT 0
+#define DMA0_CORE_DBG_STS_RD_CTX_FULL_MASK 0x1
+#define DMA0_CORE_DBG_STS_WR_CTX_FULL_SHIFT 1
+#define DMA0_CORE_DBG_STS_WR_CTX_FULL_MASK 0x2
+#define DMA0_CORE_DBG_STS_WR_COMP_FULL_SHIFT 2
+#define DMA0_CORE_DBG_STS_WR_COMP_FULL_MASK 0x4
+#define DMA0_CORE_DBG_STS_RD_CTX_EMPTY_SHIFT 3
+#define DMA0_CORE_DBG_STS_RD_CTX_EMPTY_MASK 0x8
+#define DMA0_CORE_DBG_STS_WR_CTX_EMPTY_SHIFT 4
+#define DMA0_CORE_DBG_STS_WR_CTX_EMPTY_MASK 0x10
+#define DMA0_CORE_DBG_STS_WR_COMP_EMPTY_SHIFT 5
+#define DMA0_CORE_DBG_STS_WR_COMP_EMPTY_MASK 0x20
+#define DMA0_CORE_DBG_STS_TE_EMPTY_SHIFT 6
+#define DMA0_CORE_DBG_STS_TE_EMPTY_MASK 0x40
+#define DMA0_CORE_DBG_STS_TE_BUSY_SHIFT 7
+#define DMA0_CORE_DBG_STS_TE_BUSY_MASK 0x80
+#define DMA0_CORE_DBG_STS_GSKT_EMPTY_SHIFT 8
+#define DMA0_CORE_DBG_STS_GSKT_EMPTY_MASK 0x100
+#define DMA0_CORE_DBG_STS_GSKT_FULL_SHIFT 9
+#define DMA0_CORE_DBG_STS_GSKT_FULL_MASK 0x200
+#define DMA0_CORE_DBG_STS_RDBUF_FULLNESS_SHIFT 20
+#define DMA0_CORE_DBG_STS_RDBUF_FULLNESS_MASK 0x7FF00000
+
+/* DMA0_CORE_DBG_RD_DESC_ID */
+
+/* DMA0_CORE_DBG_WR_DESC_ID */
+
+#endif /* ASIC_REG_DMA0_CORE_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h
new file mode 100644
index 000000000000..1fdd5d5fc6d2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA0_CORE_REGS_H_
+#define ASIC_REG_DMA0_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA0_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA0_CORE_CFG_0 0x500000
+
+#define mmDMA0_CORE_CFG_1 0x500004
+
+#define mmDMA0_CORE_LBW_MAX_OUTSTAND 0x500008
+
+#define mmDMA0_CORE_SRC_BASE_LO 0x500014
+
+#define mmDMA0_CORE_SRC_BASE_HI 0x500018
+
+#define mmDMA0_CORE_DST_BASE_LO 0x50001C
+
+#define mmDMA0_CORE_DST_BASE_HI 0x500020
+
+#define mmDMA0_CORE_SRC_TSIZE_1 0x50002C
+
+#define mmDMA0_CORE_SRC_STRIDE_1 0x500030
+
+#define mmDMA0_CORE_SRC_TSIZE_2 0x500034
+
+#define mmDMA0_CORE_SRC_STRIDE_2 0x500038
+
+#define mmDMA0_CORE_SRC_TSIZE_3 0x50003C
+
+#define mmDMA0_CORE_SRC_STRIDE_3 0x500040
+
+#define mmDMA0_CORE_SRC_TSIZE_4 0x500044
+
+#define mmDMA0_CORE_SRC_STRIDE_4 0x500048
+
+#define mmDMA0_CORE_SRC_TSIZE_0 0x50004C
+
+#define mmDMA0_CORE_DST_TSIZE_1 0x500054
+
+#define mmDMA0_CORE_DST_STRIDE_1 0x500058
+
+#define mmDMA0_CORE_DST_TSIZE_2 0x50005C
+
+#define mmDMA0_CORE_DST_STRIDE_2 0x500060
+
+#define mmDMA0_CORE_DST_TSIZE_3 0x500064
+
+#define mmDMA0_CORE_DST_STRIDE_3 0x500068
+
+#define mmDMA0_CORE_DST_TSIZE_4 0x50006C
+
+#define mmDMA0_CORE_DST_STRIDE_4 0x500070
+
+#define mmDMA0_CORE_DST_TSIZE_0 0x500074
+
+#define mmDMA0_CORE_COMMIT 0x500078
+
+#define mmDMA0_CORE_WR_COMP_WDATA 0x50007C
+
+#define mmDMA0_CORE_WR_COMP_ADDR_LO 0x500080
+
+#define mmDMA0_CORE_WR_COMP_ADDR_HI 0x500084
+
+#define mmDMA0_CORE_WR_COMP_AWUSER_31_11 0x500088
+
+#define mmDMA0_CORE_TE_NUMROWS 0x500094
+
+#define mmDMA0_CORE_PROT 0x5000B8
+
+#define mmDMA0_CORE_SECURE_PROPS 0x5000F0
+
+#define mmDMA0_CORE_NON_SECURE_PROPS 0x5000F4
+
+#define mmDMA0_CORE_RD_MAX_OUTSTAND 0x500100
+
+#define mmDMA0_CORE_RD_MAX_SIZE 0x500104
+
+#define mmDMA0_CORE_RD_ARCACHE 0x500108
+
+#define mmDMA0_CORE_RD_ARUSER_31_11 0x500110
+
+#define mmDMA0_CORE_RD_INFLIGHTS 0x500114
+
+#define mmDMA0_CORE_WR_MAX_OUTSTAND 0x500120
+
+#define mmDMA0_CORE_WR_MAX_AWID 0x500124
+
+#define mmDMA0_CORE_WR_AWCACHE 0x500128
+
+#define mmDMA0_CORE_WR_AWUSER_31_11 0x500130
+
+#define mmDMA0_CORE_WR_INFLIGHTS 0x500134
+
+#define mmDMA0_CORE_RD_RATE_LIM_CFG_0 0x500150
+
+#define mmDMA0_CORE_RD_RATE_LIM_CFG_1 0x500154
+
+#define mmDMA0_CORE_WR_RATE_LIM_CFG_0 0x500158
+
+#define mmDMA0_CORE_WR_RATE_LIM_CFG_1 0x50015C
+
+#define mmDMA0_CORE_ERR_CFG 0x500160
+
+#define mmDMA0_CORE_ERR_CAUSE 0x500164
+
+#define mmDMA0_CORE_ERRMSG_ADDR_LO 0x500170
+
+#define mmDMA0_CORE_ERRMSG_ADDR_HI 0x500174
+
+#define mmDMA0_CORE_ERRMSG_WDATA 0x500178
+
+#define mmDMA0_CORE_STS0 0x500190
+
+#define mmDMA0_CORE_STS1 0x500194
+
+#define mmDMA0_CORE_RD_DBGMEM_ADD 0x500200
+
+#define mmDMA0_CORE_RD_DBGMEM_DATA_WR 0x500204
+
+#define mmDMA0_CORE_RD_DBGMEM_DATA_RD 0x500208
+
+#define mmDMA0_CORE_RD_DBGMEM_CTRL 0x50020C
+
+#define mmDMA0_CORE_RD_DBGMEM_RC 0x500210
+
+#define mmDMA0_CORE_DBG_HBW_AXI_AR_CNT 0x500220
+
+#define mmDMA0_CORE_DBG_HBW_AXI_AW_CNT 0x500224
+
+#define mmDMA0_CORE_DBG_LBW_AXI_AW_CNT 0x500228
+
+#define mmDMA0_CORE_DBG_DESC_CNT 0x50022C
+
+#define mmDMA0_CORE_DBG_STS 0x500230
+
+#define mmDMA0_CORE_DBG_RD_DESC_ID 0x500234
+
+#define mmDMA0_CORE_DBG_WR_DESC_ID 0x500238
+
+#endif /* ASIC_REG_DMA0_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h
new file mode 100644
index 000000000000..48376aabc3ba
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h
@@ -0,0 +1,800 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA0_QM_MASKS_H_
+#define ASIC_REG_DMA0_QM_MASKS_H_
+
+/*
+ *****************************************
+ * DMA0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+/* DMA0_QM_GLBL_CFG0 */
+#define DMA0_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define DMA0_QM_GLBL_CFG0_PQF_EN_MASK 0xF
+#define DMA0_QM_GLBL_CFG0_CQF_EN_SHIFT 4
+#define DMA0_QM_GLBL_CFG0_CQF_EN_MASK 0x1F0
+#define DMA0_QM_GLBL_CFG0_CP_EN_SHIFT 9
+#define DMA0_QM_GLBL_CFG0_CP_EN_MASK 0x3E00
+
+/* DMA0_QM_GLBL_CFG1 */
+#define DMA0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define DMA0_QM_GLBL_CFG1_PQF_STOP_MASK 0xF
+#define DMA0_QM_GLBL_CFG1_CQF_STOP_SHIFT 4
+#define DMA0_QM_GLBL_CFG1_CQF_STOP_MASK 0x1F0
+#define DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT 9
+#define DMA0_QM_GLBL_CFG1_CP_STOP_MASK 0x3E00
+#define DMA0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 16
+#define DMA0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000
+#define DMA0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 20
+#define DMA0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000
+#define DMA0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 25
+#define DMA0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000
+
+/* DMA0_QM_GLBL_PROT */
+#define DMA0_QM_GLBL_PROT_PQF_SHIFT 0
+#define DMA0_QM_GLBL_PROT_PQF_MASK 0xF
+#define DMA0_QM_GLBL_PROT_CQF_SHIFT 4
+#define DMA0_QM_GLBL_PROT_CQF_MASK 0x1F0
+#define DMA0_QM_GLBL_PROT_CP_SHIFT 9
+#define DMA0_QM_GLBL_PROT_CP_MASK 0x3E00
+#define DMA0_QM_GLBL_PROT_ERR_SHIFT 14
+#define DMA0_QM_GLBL_PROT_ERR_MASK 0x4000
+#define DMA0_QM_GLBL_PROT_ARB_SHIFT 15
+#define DMA0_QM_GLBL_PROT_ARB_MASK 0x8000
+
+/* DMA0_QM_GLBL_ERR_CFG */
+#define DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0
+#define DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF
+#define DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0
+#define DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9
+#define DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00
+#define DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16
+#define DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000
+#define DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20
+#define DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000
+#define DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25
+#define DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000
+#define DMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31
+#define DMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000
+
+/* DMA0_QM_GLBL_SECURE_PROPS */
+#define DMA0_QM_GLBL_SECURE_PROPS_0_ASID_SHIFT 0
+#define DMA0_QM_GLBL_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_SECURE_PROPS_1_ASID_SHIFT 0
+#define DMA0_QM_GLBL_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_SECURE_PROPS_2_ASID_SHIFT 0
+#define DMA0_QM_GLBL_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_SECURE_PROPS_3_ASID_SHIFT 0
+#define DMA0_QM_GLBL_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_SECURE_PROPS_4_ASID_SHIFT 0
+#define DMA0_QM_GLBL_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_SECURE_PROPS_0_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_SECURE_PROPS_0_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_SECURE_PROPS_1_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_SECURE_PROPS_1_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_SECURE_PROPS_2_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_SECURE_PROPS_2_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_SECURE_PROPS_3_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_SECURE_PROPS_3_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_SECURE_PROPS_4_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* DMA0_QM_GLBL_NON_SECURE_PROPS */
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT 0
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT 0
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT 0
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT 0
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT 0
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_MASK 0x400
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT 10
+#define DMA0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* DMA0_QM_GLBL_STS0 */
+#define DMA0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define DMA0_QM_GLBL_STS0_PQF_IDLE_MASK 0xF
+#define DMA0_QM_GLBL_STS0_CQF_IDLE_SHIFT 4
+#define DMA0_QM_GLBL_STS0_CQF_IDLE_MASK 0x1F0
+#define DMA0_QM_GLBL_STS0_CP_IDLE_SHIFT 9
+#define DMA0_QM_GLBL_STS0_CP_IDLE_MASK 0x3E00
+#define DMA0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 16
+#define DMA0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000
+#define DMA0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 20
+#define DMA0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000
+#define DMA0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 25
+#define DMA0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000
+#define DMA0_QM_GLBL_STS0_ARB_IS_STOP_SHIFT 31
+#define DMA0_QM_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000
+
+/* DMA0_QM_GLBL_STS1 */
+#define DMA0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define DMA0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define DMA0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define DMA0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define DMA0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define DMA0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define DMA0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DMA0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DMA0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define DMA0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define DMA0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define DMA0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define DMA0_QM_GLBL_STS1_CP_WREG_ERR_SHIFT 6
+#define DMA0_QM_GLBL_STS1_CP_WREG_ERR_MASK 0x40
+#define DMA0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DMA0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DMA0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DMA0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DMA0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DMA0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DMA0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DMA0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DMA0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DMA0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DMA0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DMA0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DMA0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DMA0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DMA0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DMA0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* DMA0_QM_GLBL_STS1_4 */
+#define DMA0_QM_GLBL_STS1_4_CQF_RD_ERR_SHIFT 1
+#define DMA0_QM_GLBL_STS1_4_CQF_RD_ERR_MASK 0x2
+#define DMA0_QM_GLBL_STS1_4_CP_RD_ERR_SHIFT 2
+#define DMA0_QM_GLBL_STS1_4_CP_RD_ERR_MASK 0x4
+#define DMA0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DMA0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DMA0_QM_GLBL_STS1_4_CP_STOP_OP_SHIFT 4
+#define DMA0_QM_GLBL_STS1_4_CP_STOP_OP_MASK 0x10
+#define DMA0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT 5
+#define DMA0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_MASK 0x20
+#define DMA0_QM_GLBL_STS1_4_CP_WREG_ERR_SHIFT 6
+#define DMA0_QM_GLBL_STS1_4_CP_WREG_ERR_MASK 0x40
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DMA0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* DMA0_QM_GLBL_MSG_EN */
+#define DMA0_QM_GLBL_MSG_EN_PQF_RD_ERR_SHIFT 0
+#define DMA0_QM_GLBL_MSG_EN_PQF_RD_ERR_MASK 0x1
+#define DMA0_QM_GLBL_MSG_EN_CQF_RD_ERR_SHIFT 1
+#define DMA0_QM_GLBL_MSG_EN_CQF_RD_ERR_MASK 0x2
+#define DMA0_QM_GLBL_MSG_EN_CP_RD_ERR_SHIFT 2
+#define DMA0_QM_GLBL_MSG_EN_CP_RD_ERR_MASK 0x4
+#define DMA0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DMA0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DMA0_QM_GLBL_MSG_EN_CP_STOP_OP_SHIFT 4
+#define DMA0_QM_GLBL_MSG_EN_CP_STOP_OP_MASK 0x10
+#define DMA0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT 5
+#define DMA0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK 0x20
+#define DMA0_QM_GLBL_MSG_EN_CP_WREG_ERR_SHIFT 6
+#define DMA0_QM_GLBL_MSG_EN_CP_WREG_ERR_MASK 0x40
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DMA0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* DMA0_QM_GLBL_MSG_EN_4 */
+#define DMA0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT 1
+#define DMA0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_MASK 0x2
+#define DMA0_QM_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT 2
+#define DMA0_QM_GLBL_MSG_EN_4_CP_RD_ERR_MASK 0x4
+#define DMA0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define DMA0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define DMA0_QM_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT 4
+#define DMA0_QM_GLBL_MSG_EN_4_CP_STOP_OP_MASK 0x10
+#define DMA0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5
+#define DMA0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20
+#define DMA0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT 6
+#define DMA0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_MASK 0x40
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define DMA0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* DMA0_QM_PQ_BASE_LO */
+#define DMA0_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define DMA0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_PQ_BASE_HI */
+#define DMA0_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define DMA0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_PQ_SIZE */
+#define DMA0_QM_PQ_SIZE_VAL_SHIFT 0
+#define DMA0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_PQ_PI */
+#define DMA0_QM_PQ_PI_VAL_SHIFT 0
+#define DMA0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_PQ_CI */
+#define DMA0_QM_PQ_CI_VAL_SHIFT 0
+#define DMA0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_PQ_CFG0 */
+#define DMA0_QM_PQ_CFG0_RESERVED_SHIFT 0
+#define DMA0_QM_PQ_CFG0_RESERVED_MASK 0x1
+
+/* DMA0_QM_PQ_CFG1 */
+#define DMA0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define DMA0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define DMA0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define DMA0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* DMA0_QM_PQ_ARUSER_31_11 */
+#define DMA0_QM_PQ_ARUSER_31_11_VAL_SHIFT 0
+#define DMA0_QM_PQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_QM_PQ_STS0 */
+#define DMA0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define DMA0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define DMA0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define DMA0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* DMA0_QM_PQ_STS1 */
+#define DMA0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define DMA0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define DMA0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define DMA0_QM_PQ_STS1_PQ_BUSY_SHIFT 31
+#define DMA0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* DMA0_QM_CQ_CFG0 */
+#define DMA0_QM_CQ_CFG0_RESERVED_SHIFT 0
+#define DMA0_QM_CQ_CFG0_RESERVED_MASK 0x1
+
+/* DMA0_QM_CQ_CFG1 */
+#define DMA0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define DMA0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define DMA0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define DMA0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_ARUSER_31_11 */
+#define DMA0_QM_CQ_ARUSER_31_11_VAL_SHIFT 0
+#define DMA0_QM_CQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_QM_CQ_STS0 */
+#define DMA0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define DMA0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define DMA0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define DMA0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_STS1 */
+#define DMA0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define DMA0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define DMA0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define DMA0_QM_CQ_STS1_CQ_BUSY_SHIFT 31
+#define DMA0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* DMA0_QM_CQ_PTR_LO_0 */
+#define DMA0_QM_CQ_PTR_LO_0_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_0 */
+#define DMA0_QM_CQ_PTR_HI_0_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_0 */
+#define DMA0_QM_CQ_TSIZE_0_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_0 */
+#define DMA0_QM_CQ_CTL_0_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_0_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_0_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_0_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_PTR_LO_1 */
+#define DMA0_QM_CQ_PTR_LO_1_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_1 */
+#define DMA0_QM_CQ_PTR_HI_1_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_1 */
+#define DMA0_QM_CQ_TSIZE_1_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_1 */
+#define DMA0_QM_CQ_CTL_1_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_1_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_1_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_1_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_PTR_LO_2 */
+#define DMA0_QM_CQ_PTR_LO_2_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_2 */
+#define DMA0_QM_CQ_PTR_HI_2_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_2 */
+#define DMA0_QM_CQ_TSIZE_2_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_2 */
+#define DMA0_QM_CQ_CTL_2_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_2_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_2_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_2_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_PTR_LO_3 */
+#define DMA0_QM_CQ_PTR_LO_3_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_3 */
+#define DMA0_QM_CQ_PTR_HI_3_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_3 */
+#define DMA0_QM_CQ_TSIZE_3_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_3 */
+#define DMA0_QM_CQ_CTL_3_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_3_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_3_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_3_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_PTR_LO_4 */
+#define DMA0_QM_CQ_PTR_LO_4_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_4 */
+#define DMA0_QM_CQ_PTR_HI_4_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_4 */
+#define DMA0_QM_CQ_TSIZE_4_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_4 */
+#define DMA0_QM_CQ_CTL_4_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_4_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_4_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_4_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_PTR_LO_STS */
+#define DMA0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_PTR_HI_STS */
+#define DMA0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define DMA0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_TSIZE_STS */
+#define DMA0_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define DMA0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CQ_CTL_STS */
+#define DMA0_QM_CQ_CTL_STS_RPT_SHIFT 0
+#define DMA0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define DMA0_QM_CQ_CTL_STS_CTL_SHIFT 16
+#define DMA0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* DMA0_QM_CQ_IFIFO_CNT */
+#define DMA0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define DMA0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* DMA0_QM_CP_MSG_BASE0_ADDR_LO */
+#define DMA0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE0_ADDR_HI */
+#define DMA0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE1_ADDR_LO */
+#define DMA0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE1_ADDR_HI */
+#define DMA0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE2_ADDR_LO */
+#define DMA0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE2_ADDR_HI */
+#define DMA0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE3_ADDR_LO */
+#define DMA0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_MSG_BASE3_ADDR_HI */
+#define DMA0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define DMA0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_LDMA_TSIZE_OFFSET */
+#define DMA0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define DMA0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define DMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define DMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define DMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define DMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_FENCE0_RDATA */
+#define DMA0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* DMA0_QM_CP_FENCE1_RDATA */
+#define DMA0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* DMA0_QM_CP_FENCE2_RDATA */
+#define DMA0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* DMA0_QM_CP_FENCE3_RDATA */
+#define DMA0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* DMA0_QM_CP_FENCE0_CNT */
+#define DMA0_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE0_CNT_VAL_MASK 0x3FFF
+
+/* DMA0_QM_CP_FENCE1_CNT */
+#define DMA0_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE1_CNT_VAL_MASK 0x3FFF
+
+/* DMA0_QM_CP_FENCE2_CNT */
+#define DMA0_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE2_CNT_VAL_MASK 0x3FFF
+
+/* DMA0_QM_CP_FENCE3_CNT */
+#define DMA0_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define DMA0_QM_CP_FENCE3_CNT_VAL_MASK 0x3FFF
+
+/* DMA0_QM_CP_STS */
+#define DMA0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define DMA0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define DMA0_QM_CP_STS_ERDY_SHIFT 16
+#define DMA0_QM_CP_STS_ERDY_MASK 0x10000
+#define DMA0_QM_CP_STS_RRDY_SHIFT 17
+#define DMA0_QM_CP_STS_RRDY_MASK 0x20000
+#define DMA0_QM_CP_STS_MRDY_SHIFT 18
+#define DMA0_QM_CP_STS_MRDY_MASK 0x40000
+#define DMA0_QM_CP_STS_SW_STOP_SHIFT 19
+#define DMA0_QM_CP_STS_SW_STOP_MASK 0x80000
+#define DMA0_QM_CP_STS_FENCE_ID_SHIFT 20
+#define DMA0_QM_CP_STS_FENCE_ID_MASK 0x300000
+#define DMA0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define DMA0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* DMA0_QM_CP_CURRENT_INST_LO */
+#define DMA0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define DMA0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_CURRENT_INST_HI */
+#define DMA0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define DMA0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_CP_BARRIER_CFG */
+#define DMA0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define DMA0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+#define DMA0_QM_CP_BARRIER_CFG_RBGUARD_SHIFT 16
+#define DMA0_QM_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000
+
+/* DMA0_QM_CP_DBG_0 */
+#define DMA0_QM_CP_DBG_0_CS_SHIFT 0
+#define DMA0_QM_CP_DBG_0_CS_MASK 0xF
+#define DMA0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 4
+#define DMA0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x10
+#define DMA0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 5
+#define DMA0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x20
+#define DMA0_QM_CP_DBG_0_MREB_STALL_SHIFT 6
+#define DMA0_QM_CP_DBG_0_MREB_STALL_MASK 0x40
+#define DMA0_QM_CP_DBG_0_STALL_SHIFT 7
+#define DMA0_QM_CP_DBG_0_STALL_MASK 0x80
+
+/* DMA0_QM_CP_ARUSER_31_11 */
+#define DMA0_QM_CP_ARUSER_31_11_VAL_SHIFT 0
+#define DMA0_QM_CP_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_QM_CP_AWUSER_31_11 */
+#define DMA0_QM_CP_AWUSER_31_11_VAL_SHIFT 0
+#define DMA0_QM_CP_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_QM_ARB_CFG_0 */
+#define DMA0_QM_ARB_CFG_0_TYPE_SHIFT 0
+#define DMA0_QM_ARB_CFG_0_TYPE_MASK 0x1
+#define DMA0_QM_ARB_CFG_0_IS_MASTER_SHIFT 4
+#define DMA0_QM_ARB_CFG_0_IS_MASTER_MASK 0x10
+#define DMA0_QM_ARB_CFG_0_EN_SHIFT 8
+#define DMA0_QM_ARB_CFG_0_EN_MASK 0x100
+#define DMA0_QM_ARB_CFG_0_MASK_SHIFT 12
+#define DMA0_QM_ARB_CFG_0_MASK_MASK 0xF000
+#define DMA0_QM_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 16
+#define DMA0_QM_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x10000
+
+/* DMA0_QM_ARB_CHOISE_Q_PUSH */
+#define DMA0_QM_ARB_CHOISE_Q_PUSH_VAL_SHIFT 0
+#define DMA0_QM_ARB_CHOISE_Q_PUSH_VAL_MASK 0x3
+
+/* DMA0_QM_ARB_WRR_WEIGHT */
+#define DMA0_QM_ARB_WRR_WEIGHT_VAL_SHIFT 0
+#define DMA0_QM_ARB_WRR_WEIGHT_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_CFG_1 */
+#define DMA0_QM_ARB_CFG_1_CLR_SHIFT 0
+#define DMA0_QM_ARB_CFG_1_CLR_MASK 0x1
+
+/* DMA0_QM_ARB_MST_AVAIL_CRED */
+#define DMA0_QM_ARB_MST_AVAIL_CRED_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F
+
+/* DMA0_QM_ARB_MST_CRED_INC */
+#define DMA0_QM_ARB_MST_CRED_INC_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_MST_CHOISE_PUSH_OFST */
+#define DMA0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST */
+#define DMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0
+#define DMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_MST_SLAVE_EN */
+#define DMA0_QM_ARB_MST_SLAVE_EN_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_MST_QUIET_PER */
+#define DMA0_QM_ARB_MST_QUIET_PER_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_SLV_CHOISE_WDT */
+#define DMA0_QM_ARB_SLV_CHOISE_WDT_VAL_SHIFT 0
+#define DMA0_QM_ARB_SLV_CHOISE_WDT_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_SLV_ID */
+#define DMA0_QM_ARB_SLV_ID_VAL_SHIFT 0
+#define DMA0_QM_ARB_SLV_ID_VAL_MASK 0x1F
+
+/* DMA0_QM_ARB_MSG_MAX_INFLIGHT */
+#define DMA0_QM_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0
+#define DMA0_QM_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F
+
+/* DMA0_QM_ARB_MSG_AWUSER_31_11 */
+#define DMA0_QM_ARB_MSG_AWUSER_31_11_VAL_SHIFT 0
+#define DMA0_QM_ARB_MSG_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* DMA0_QM_ARB_MSG_AWUSER_SEC_PROP */
+#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT 0
+#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK 0x3FF
+#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT 10
+#define DMA0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK 0x400
+
+/* DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP */
+#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT 0
+#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK 0x3FF
+#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT 10
+#define DMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK 0x400
+
+/* DMA0_QM_ARB_BASE_LO */
+#define DMA0_QM_ARB_BASE_LO_VAL_SHIFT 0
+#define DMA0_QM_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_BASE_HI */
+#define DMA0_QM_ARB_BASE_HI_VAL_SHIFT 0
+#define DMA0_QM_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_STATE_STS */
+#define DMA0_QM_ARB_STATE_STS_VAL_SHIFT 0
+#define DMA0_QM_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_ARB_CHOISE_FULLNESS_STS */
+#define DMA0_QM_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT 0
+#define DMA0_QM_ARB_CHOISE_FULLNESS_STS_VAL_MASK 0x7F
+
+/* DMA0_QM_ARB_MSG_STS */
+#define DMA0_QM_ARB_MSG_STS_FULL_SHIFT 0
+#define DMA0_QM_ARB_MSG_STS_FULL_MASK 0x1
+#define DMA0_QM_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1
+#define DMA0_QM_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2
+
+/* DMA0_QM_ARB_SLV_CHOISE_Q_HEAD */
+#define DMA0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT 0
+#define DMA0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK 0x3
+
+/* DMA0_QM_ARB_ERR_CAUSE */
+#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT 0
+#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_OVF_MASK 0x1
+#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT 1
+#define DMA0_QM_ARB_ERR_CAUSE_CHOISE_WDT_MASK 0x2
+#define DMA0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2
+#define DMA0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4
+
+/* DMA0_QM_ARB_ERR_MSG_EN */
+#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT 0
+#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1
+#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT 1
+#define DMA0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2
+#define DMA0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2
+#define DMA0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
+
+/* DMA0_QM_ARB_ERR_STS_DRP */
+#define DMA0_QM_ARB_ERR_STS_DRP_VAL_SHIFT 0
+#define DMA0_QM_ARB_ERR_STS_DRP_VAL_MASK 0x3
+
+/* DMA0_QM_ARB_MST_CRED_STS */
+#define DMA0_QM_ARB_MST_CRED_STS_VAL_SHIFT 0
+#define DMA0_QM_ARB_MST_CRED_STS_VAL_MASK 0x7F
+
+/* DMA0_QM_CGM_CFG */
+#define DMA0_QM_CGM_CFG_IDLE_TH_SHIFT 0
+#define DMA0_QM_CGM_CFG_IDLE_TH_MASK 0xFFF
+#define DMA0_QM_CGM_CFG_G2F_TH_SHIFT 16
+#define DMA0_QM_CGM_CFG_G2F_TH_MASK 0xFF0000
+#define DMA0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT 24
+#define DMA0_QM_CGM_CFG_CP_IDLE_MASK_MASK 0x1F000000
+#define DMA0_QM_CGM_CFG_EN_SHIFT 31
+#define DMA0_QM_CGM_CFG_EN_MASK 0x80000000
+
+/* DMA0_QM_CGM_STS */
+#define DMA0_QM_CGM_STS_ST_SHIFT 0
+#define DMA0_QM_CGM_STS_ST_MASK 0x3
+#define DMA0_QM_CGM_STS_CG_SHIFT 4
+#define DMA0_QM_CGM_STS_CG_MASK 0x10
+#define DMA0_QM_CGM_STS_AGENT_IDLE_SHIFT 8
+#define DMA0_QM_CGM_STS_AGENT_IDLE_MASK 0x100
+#define DMA0_QM_CGM_STS_AXI_IDLE_SHIFT 9
+#define DMA0_QM_CGM_STS_AXI_IDLE_MASK 0x200
+#define DMA0_QM_CGM_STS_CP_IDLE_SHIFT 10
+#define DMA0_QM_CGM_STS_CP_IDLE_MASK 0x400
+
+/* DMA0_QM_CGM_CFG1 */
+#define DMA0_QM_CGM_CFG1_MASK_TH_SHIFT 0
+#define DMA0_QM_CGM_CFG1_MASK_TH_MASK 0xFF
+
+/* DMA0_QM_LOCAL_RANGE_BASE */
+#define DMA0_QM_LOCAL_RANGE_BASE_VAL_SHIFT 0
+#define DMA0_QM_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF
+
+/* DMA0_QM_LOCAL_RANGE_SIZE */
+#define DMA0_QM_LOCAL_RANGE_SIZE_VAL_SHIFT 0
+#define DMA0_QM_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF
+
+/* DMA0_QM_CSMR_STRICT_PRIO_CFG */
+#define DMA0_QM_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT 0
+#define DMA0_QM_CSMR_STRICT_PRIO_CFG_TYPE_MASK 0x1
+
+/* DMA0_QM_HBW_RD_RATE_LIM_CFG_1 */
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* DMA0_QM_LBW_WR_RATE_LIM_CFG_0 */
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* DMA0_QM_LBW_WR_RATE_LIM_CFG_1 */
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31
+#define DMA0_QM_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* DMA0_QM_HBW_RD_RATE_LIM_CFG_0 */
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define DMA0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* DMA0_QM_GLBL_AXCACHE */
+#define DMA0_QM_GLBL_AXCACHE_AR_SHIFT 0
+#define DMA0_QM_GLBL_AXCACHE_AR_MASK 0xF
+#define DMA0_QM_GLBL_AXCACHE_AW_SHIFT 16
+#define DMA0_QM_GLBL_AXCACHE_AW_MASK 0xF0000
+
+/* DMA0_QM_IND_GW_APB_CFG */
+#define DMA0_QM_IND_GW_APB_CFG_ADDR_SHIFT 0
+#define DMA0_QM_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF
+#define DMA0_QM_IND_GW_APB_CFG_CMD_SHIFT 31
+#define DMA0_QM_IND_GW_APB_CFG_CMD_MASK 0x80000000
+
+/* DMA0_QM_IND_GW_APB_WDATA */
+#define DMA0_QM_IND_GW_APB_WDATA_VAL_SHIFT 0
+#define DMA0_QM_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_IND_GW_APB_RDATA */
+#define DMA0_QM_IND_GW_APB_RDATA_VAL_SHIFT 0
+#define DMA0_QM_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_IND_GW_APB_STATUS */
+#define DMA0_QM_IND_GW_APB_STATUS_RDY_SHIFT 0
+#define DMA0_QM_IND_GW_APB_STATUS_RDY_MASK 0x1
+#define DMA0_QM_IND_GW_APB_STATUS_ERR_SHIFT 1
+#define DMA0_QM_IND_GW_APB_STATUS_ERR_MASK 0x2
+
+/* DMA0_QM_GLBL_ERR_ADDR_LO */
+#define DMA0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define DMA0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_GLBL_ERR_ADDR_HI */
+#define DMA0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define DMA0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_GLBL_ERR_WDATA */
+#define DMA0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define DMA0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* DMA0_QM_GLBL_MEM_INIT_BUSY */
+#define DMA0_QM_GLBL_MEM_INIT_BUSY_RBUF_SHIFT 0
+#define DMA0_QM_GLBL_MEM_INIT_BUSY_RBUF_MASK 0xF
+
+#endif /* ASIC_REG_DMA0_QM_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h
new file mode 100644
index 000000000000..8e56a93d88a1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA0_QM_REGS_H_
+#define ASIC_REG_DMA0_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA0_QM_GLBL_CFG0 0x508000
+
+#define mmDMA0_QM_GLBL_CFG1 0x508004
+
+#define mmDMA0_QM_GLBL_PROT 0x508008
+
+#define mmDMA0_QM_GLBL_ERR_CFG 0x50800C
+
+#define mmDMA0_QM_GLBL_SECURE_PROPS_0 0x508010
+
+#define mmDMA0_QM_GLBL_SECURE_PROPS_1 0x508014
+
+#define mmDMA0_QM_GLBL_SECURE_PROPS_2 0x508018
+
+#define mmDMA0_QM_GLBL_SECURE_PROPS_3 0x50801C
+
+#define mmDMA0_QM_GLBL_SECURE_PROPS_4 0x508020
+
+#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_0 0x508024
+
+#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_1 0x508028
+
+#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_2 0x50802C
+
+#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_3 0x508030
+
+#define mmDMA0_QM_GLBL_NON_SECURE_PROPS_4 0x508034
+
+#define mmDMA0_QM_GLBL_STS0 0x508038
+
+#define mmDMA0_QM_GLBL_STS1_0 0x508040
+
+#define mmDMA0_QM_GLBL_STS1_1 0x508044
+
+#define mmDMA0_QM_GLBL_STS1_2 0x508048
+
+#define mmDMA0_QM_GLBL_STS1_3 0x50804C
+
+#define mmDMA0_QM_GLBL_STS1_4 0x508050
+
+#define mmDMA0_QM_GLBL_MSG_EN_0 0x508054
+
+#define mmDMA0_QM_GLBL_MSG_EN_1 0x508058
+
+#define mmDMA0_QM_GLBL_MSG_EN_2 0x50805C
+
+#define mmDMA0_QM_GLBL_MSG_EN_3 0x508060
+
+#define mmDMA0_QM_GLBL_MSG_EN_4 0x508068
+
+#define mmDMA0_QM_PQ_BASE_LO_0 0x508070
+
+#define mmDMA0_QM_PQ_BASE_LO_1 0x508074
+
+#define mmDMA0_QM_PQ_BASE_LO_2 0x508078
+
+#define mmDMA0_QM_PQ_BASE_LO_3 0x50807C
+
+#define mmDMA0_QM_PQ_BASE_HI_0 0x508080
+
+#define mmDMA0_QM_PQ_BASE_HI_1 0x508084
+
+#define mmDMA0_QM_PQ_BASE_HI_2 0x508088
+
+#define mmDMA0_QM_PQ_BASE_HI_3 0x50808C
+
+#define mmDMA0_QM_PQ_SIZE_0 0x508090
+
+#define mmDMA0_QM_PQ_SIZE_1 0x508094
+
+#define mmDMA0_QM_PQ_SIZE_2 0x508098
+
+#define mmDMA0_QM_PQ_SIZE_3 0x50809C
+
+#define mmDMA0_QM_PQ_PI_0 0x5080A0
+
+#define mmDMA0_QM_PQ_PI_1 0x5080A4
+
+#define mmDMA0_QM_PQ_PI_2 0x5080A8
+
+#define mmDMA0_QM_PQ_PI_3 0x5080AC
+
+#define mmDMA0_QM_PQ_CI_0 0x5080B0
+
+#define mmDMA0_QM_PQ_CI_1 0x5080B4
+
+#define mmDMA0_QM_PQ_CI_2 0x5080B8
+
+#define mmDMA0_QM_PQ_CI_3 0x5080BC
+
+#define mmDMA0_QM_PQ_CFG0_0 0x5080C0
+
+#define mmDMA0_QM_PQ_CFG0_1 0x5080C4
+
+#define mmDMA0_QM_PQ_CFG0_2 0x5080C8
+
+#define mmDMA0_QM_PQ_CFG0_3 0x5080CC
+
+#define mmDMA0_QM_PQ_CFG1_0 0x5080D0
+
+#define mmDMA0_QM_PQ_CFG1_1 0x5080D4
+
+#define mmDMA0_QM_PQ_CFG1_2 0x5080D8
+
+#define mmDMA0_QM_PQ_CFG1_3 0x5080DC
+
+#define mmDMA0_QM_PQ_ARUSER_31_11_0 0x5080E0
+
+#define mmDMA0_QM_PQ_ARUSER_31_11_1 0x5080E4
+
+#define mmDMA0_QM_PQ_ARUSER_31_11_2 0x5080E8
+
+#define mmDMA0_QM_PQ_ARUSER_31_11_3 0x5080EC
+
+#define mmDMA0_QM_PQ_STS0_0 0x5080F0
+
+#define mmDMA0_QM_PQ_STS0_1 0x5080F4
+
+#define mmDMA0_QM_PQ_STS0_2 0x5080F8
+
+#define mmDMA0_QM_PQ_STS0_3 0x5080FC
+
+#define mmDMA0_QM_PQ_STS1_0 0x508100
+
+#define mmDMA0_QM_PQ_STS1_1 0x508104
+
+#define mmDMA0_QM_PQ_STS1_2 0x508108
+
+#define mmDMA0_QM_PQ_STS1_3 0x50810C
+
+#define mmDMA0_QM_CQ_CFG0_0 0x508110
+
+#define mmDMA0_QM_CQ_CFG0_1 0x508114
+
+#define mmDMA0_QM_CQ_CFG0_2 0x508118
+
+#define mmDMA0_QM_CQ_CFG0_3 0x50811C
+
+#define mmDMA0_QM_CQ_CFG0_4 0x508120
+
+#define mmDMA0_QM_CQ_CFG1_0 0x508124
+
+#define mmDMA0_QM_CQ_CFG1_1 0x508128
+
+#define mmDMA0_QM_CQ_CFG1_2 0x50812C
+
+#define mmDMA0_QM_CQ_CFG1_3 0x508130
+
+#define mmDMA0_QM_CQ_CFG1_4 0x508134
+
+#define mmDMA0_QM_CQ_ARUSER_31_11_0 0x508138
+
+#define mmDMA0_QM_CQ_ARUSER_31_11_1 0x50813C
+
+#define mmDMA0_QM_CQ_ARUSER_31_11_2 0x508140
+
+#define mmDMA0_QM_CQ_ARUSER_31_11_3 0x508144
+
+#define mmDMA0_QM_CQ_ARUSER_31_11_4 0x508148
+
+#define mmDMA0_QM_CQ_STS0_0 0x50814C
+
+#define mmDMA0_QM_CQ_STS0_1 0x508150
+
+#define mmDMA0_QM_CQ_STS0_2 0x508154
+
+#define mmDMA0_QM_CQ_STS0_3 0x508158
+
+#define mmDMA0_QM_CQ_STS0_4 0x50815C
+
+#define mmDMA0_QM_CQ_STS1_0 0x508160
+
+#define mmDMA0_QM_CQ_STS1_1 0x508164
+
+#define mmDMA0_QM_CQ_STS1_2 0x508168
+
+#define mmDMA0_QM_CQ_STS1_3 0x50816C
+
+#define mmDMA0_QM_CQ_STS1_4 0x508170
+
+#define mmDMA0_QM_CQ_PTR_LO_0 0x508174
+
+#define mmDMA0_QM_CQ_PTR_HI_0 0x508178
+
+#define mmDMA0_QM_CQ_TSIZE_0 0x50817C
+
+#define mmDMA0_QM_CQ_CTL_0 0x508180
+
+#define mmDMA0_QM_CQ_PTR_LO_1 0x508184
+
+#define mmDMA0_QM_CQ_PTR_HI_1 0x508188
+
+#define mmDMA0_QM_CQ_TSIZE_1 0x50818C
+
+#define mmDMA0_QM_CQ_CTL_1 0x508190
+
+#define mmDMA0_QM_CQ_PTR_LO_2 0x508194
+
+#define mmDMA0_QM_CQ_PTR_HI_2 0x508198
+
+#define mmDMA0_QM_CQ_TSIZE_2 0x50819C
+
+#define mmDMA0_QM_CQ_CTL_2 0x5081A0
+
+#define mmDMA0_QM_CQ_PTR_LO_3 0x5081A4
+
+#define mmDMA0_QM_CQ_PTR_HI_3 0x5081A8
+
+#define mmDMA0_QM_CQ_TSIZE_3 0x5081AC
+
+#define mmDMA0_QM_CQ_CTL_3 0x5081B0
+
+#define mmDMA0_QM_CQ_PTR_LO_4 0x5081B4
+
+#define mmDMA0_QM_CQ_PTR_HI_4 0x5081B8
+
+#define mmDMA0_QM_CQ_TSIZE_4 0x5081BC
+
+#define mmDMA0_QM_CQ_CTL_4 0x5081C0
+
+#define mmDMA0_QM_CQ_PTR_LO_STS_0 0x5081C4
+
+#define mmDMA0_QM_CQ_PTR_LO_STS_1 0x5081C8
+
+#define mmDMA0_QM_CQ_PTR_LO_STS_2 0x5081CC
+
+#define mmDMA0_QM_CQ_PTR_LO_STS_3 0x5081D0
+
+#define mmDMA0_QM_CQ_PTR_LO_STS_4 0x5081D4
+
+#define mmDMA0_QM_CQ_PTR_HI_STS_0 0x5081D8
+
+#define mmDMA0_QM_CQ_PTR_HI_STS_1 0x5081DC
+
+#define mmDMA0_QM_CQ_PTR_HI_STS_2 0x5081E0
+
+#define mmDMA0_QM_CQ_PTR_HI_STS_3 0x5081E4
+
+#define mmDMA0_QM_CQ_PTR_HI_STS_4 0x5081E8
+
+#define mmDMA0_QM_CQ_TSIZE_STS_0 0x5081EC
+
+#define mmDMA0_QM_CQ_TSIZE_STS_1 0x5081F0
+
+#define mmDMA0_QM_CQ_TSIZE_STS_2 0x5081F4
+
+#define mmDMA0_QM_CQ_TSIZE_STS_3 0x5081F8
+
+#define mmDMA0_QM_CQ_TSIZE_STS_4 0x5081FC
+
+#define mmDMA0_QM_CQ_CTL_STS_0 0x508200
+
+#define mmDMA0_QM_CQ_CTL_STS_1 0x508204
+
+#define mmDMA0_QM_CQ_CTL_STS_2 0x508208
+
+#define mmDMA0_QM_CQ_CTL_STS_3 0x50820C
+
+#define mmDMA0_QM_CQ_CTL_STS_4 0x508210
+
+#define mmDMA0_QM_CQ_IFIFO_CNT_0 0x508214
+
+#define mmDMA0_QM_CQ_IFIFO_CNT_1 0x508218
+
+#define mmDMA0_QM_CQ_IFIFO_CNT_2 0x50821C
+
+#define mmDMA0_QM_CQ_IFIFO_CNT_3 0x508220
+
+#define mmDMA0_QM_CQ_IFIFO_CNT_4 0x508224
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 0x508228
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 0x50822C
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 0x508230
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 0x508234
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 0x508238
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 0x50823C
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 0x508240
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 0x508244
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 0x508248
+
+#define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 0x50824C
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 0x508250
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 0x508254
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 0x508258
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 0x50825C
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 0x508260
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 0x508264
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 0x508268
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 0x50826C
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 0x508270
+
+#define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 0x508274
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 0x508278
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 0x50827C
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 0x508280
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 0x508284
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 0x508288
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 0x50828C
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 0x508290
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 0x508294
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 0x508298
+
+#define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 0x50829C
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 0x5082A0
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 0x5082A4
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 0x5082A8
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 0x5082AC
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 0x5082B0
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 0x5082B4
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 0x5082B8
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 0x5082BC
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 0x5082C0
+
+#define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 0x5082C4
+
+#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 0x5082C8
+
+#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_1 0x5082CC
+
+#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_2 0x5082D0
+
+#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_3 0x5082D4
+
+#define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_4 0x5082D8
+
+#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5082E0
+
+#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5082E4
+
+#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5082E8
+
+#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5082EC
+
+#define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5082F0
+
+#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5082F4
+
+#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5082F8
+
+#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5082FC
+
+#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x508300
+
+#define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x508304
+
+#define mmDMA0_QM_CP_FENCE0_RDATA_0 0x508308
+
+#define mmDMA0_QM_CP_FENCE0_RDATA_1 0x50830C
+
+#define mmDMA0_QM_CP_FENCE0_RDATA_2 0x508310
+
+#define mmDMA0_QM_CP_FENCE0_RDATA_3 0x508314
+
+#define mmDMA0_QM_CP_FENCE0_RDATA_4 0x508318
+
+#define mmDMA0_QM_CP_FENCE1_RDATA_0 0x50831C
+
+#define mmDMA0_QM_CP_FENCE1_RDATA_1 0x508320
+
+#define mmDMA0_QM_CP_FENCE1_RDATA_2 0x508324
+
+#define mmDMA0_QM_CP_FENCE1_RDATA_3 0x508328
+
+#define mmDMA0_QM_CP_FENCE1_RDATA_4 0x50832C
+
+#define mmDMA0_QM_CP_FENCE2_RDATA_0 0x508330
+
+#define mmDMA0_QM_CP_FENCE2_RDATA_1 0x508334
+
+#define mmDMA0_QM_CP_FENCE2_RDATA_2 0x508338
+
+#define mmDMA0_QM_CP_FENCE2_RDATA_3 0x50833C
+
+#define mmDMA0_QM_CP_FENCE2_RDATA_4 0x508340
+
+#define mmDMA0_QM_CP_FENCE3_RDATA_0 0x508344
+
+#define mmDMA0_QM_CP_FENCE3_RDATA_1 0x508348
+
+#define mmDMA0_QM_CP_FENCE3_RDATA_2 0x50834C
+
+#define mmDMA0_QM_CP_FENCE3_RDATA_3 0x508350
+
+#define mmDMA0_QM_CP_FENCE3_RDATA_4 0x508354
+
+#define mmDMA0_QM_CP_FENCE0_CNT_0 0x508358
+
+#define mmDMA0_QM_CP_FENCE0_CNT_1 0x50835C
+
+#define mmDMA0_QM_CP_FENCE0_CNT_2 0x508360
+
+#define mmDMA0_QM_CP_FENCE0_CNT_3 0x508364
+
+#define mmDMA0_QM_CP_FENCE0_CNT_4 0x508368
+
+#define mmDMA0_QM_CP_FENCE1_CNT_0 0x50836C
+
+#define mmDMA0_QM_CP_FENCE1_CNT_1 0x508370
+
+#define mmDMA0_QM_CP_FENCE1_CNT_2 0x508374
+
+#define mmDMA0_QM_CP_FENCE1_CNT_3 0x508378
+
+#define mmDMA0_QM_CP_FENCE1_CNT_4 0x50837C
+
+#define mmDMA0_QM_CP_FENCE2_CNT_0 0x508380
+
+#define mmDMA0_QM_CP_FENCE2_CNT_1 0x508384
+
+#define mmDMA0_QM_CP_FENCE2_CNT_2 0x508388
+
+#define mmDMA0_QM_CP_FENCE2_CNT_3 0x50838C
+
+#define mmDMA0_QM_CP_FENCE2_CNT_4 0x508390
+
+#define mmDMA0_QM_CP_FENCE3_CNT_0 0x508394
+
+#define mmDMA0_QM_CP_FENCE3_CNT_1 0x508398
+
+#define mmDMA0_QM_CP_FENCE3_CNT_2 0x50839C
+
+#define mmDMA0_QM_CP_FENCE3_CNT_3 0x5083A0
+
+#define mmDMA0_QM_CP_FENCE3_CNT_4 0x5083A4
+
+#define mmDMA0_QM_CP_STS_0 0x5083A8
+
+#define mmDMA0_QM_CP_STS_1 0x5083AC
+
+#define mmDMA0_QM_CP_STS_2 0x5083B0
+
+#define mmDMA0_QM_CP_STS_3 0x5083B4
+
+#define mmDMA0_QM_CP_STS_4 0x5083B8
+
+#define mmDMA0_QM_CP_CURRENT_INST_LO_0 0x5083BC
+
+#define mmDMA0_QM_CP_CURRENT_INST_LO_1 0x5083C0
+
+#define mmDMA0_QM_CP_CURRENT_INST_LO_2 0x5083C4
+
+#define mmDMA0_QM_CP_CURRENT_INST_LO_3 0x5083C8
+
+#define mmDMA0_QM_CP_CURRENT_INST_LO_4 0x5083CC
+
+#define mmDMA0_QM_CP_CURRENT_INST_HI_0 0x5083D0
+
+#define mmDMA0_QM_CP_CURRENT_INST_HI_1 0x5083D4
+
+#define mmDMA0_QM_CP_CURRENT_INST_HI_2 0x5083D8
+
+#define mmDMA0_QM_CP_CURRENT_INST_HI_3 0x5083DC
+
+#define mmDMA0_QM_CP_CURRENT_INST_HI_4 0x5083E0
+
+#define mmDMA0_QM_CP_BARRIER_CFG_0 0x5083F4
+
+#define mmDMA0_QM_CP_BARRIER_CFG_1 0x5083F8
+
+#define mmDMA0_QM_CP_BARRIER_CFG_2 0x5083FC
+
+#define mmDMA0_QM_CP_BARRIER_CFG_3 0x508400
+
+#define mmDMA0_QM_CP_BARRIER_CFG_4 0x508404
+
+#define mmDMA0_QM_CP_DBG_0_0 0x508408
+
+#define mmDMA0_QM_CP_DBG_0_1 0x50840C
+
+#define mmDMA0_QM_CP_DBG_0_2 0x508410
+
+#define mmDMA0_QM_CP_DBG_0_3 0x508414
+
+#define mmDMA0_QM_CP_DBG_0_4 0x508418
+
+#define mmDMA0_QM_CP_ARUSER_31_11_0 0x50841C
+
+#define mmDMA0_QM_CP_ARUSER_31_11_1 0x508420
+
+#define mmDMA0_QM_CP_ARUSER_31_11_2 0x508424
+
+#define mmDMA0_QM_CP_ARUSER_31_11_3 0x508428
+
+#define mmDMA0_QM_CP_ARUSER_31_11_4 0x50842C
+
+#define mmDMA0_QM_CP_AWUSER_31_11_0 0x508430
+
+#define mmDMA0_QM_CP_AWUSER_31_11_1 0x508434
+
+#define mmDMA0_QM_CP_AWUSER_31_11_2 0x508438
+
+#define mmDMA0_QM_CP_AWUSER_31_11_3 0x50843C
+
+#define mmDMA0_QM_CP_AWUSER_31_11_4 0x508440
+
+#define mmDMA0_QM_ARB_CFG_0 0x508A00
+
+#define mmDMA0_QM_ARB_CHOISE_Q_PUSH 0x508A04
+
+#define mmDMA0_QM_ARB_WRR_WEIGHT_0 0x508A08
+
+#define mmDMA0_QM_ARB_WRR_WEIGHT_1 0x508A0C
+
+#define mmDMA0_QM_ARB_WRR_WEIGHT_2 0x508A10
+
+#define mmDMA0_QM_ARB_WRR_WEIGHT_3 0x508A14
+
+#define mmDMA0_QM_ARB_CFG_1 0x508A18
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_0 0x508A20
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_1 0x508A24
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_2 0x508A28
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_3 0x508A2C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_4 0x508A30
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_5 0x508A34
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_6 0x508A38
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_7 0x508A3C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_8 0x508A40
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_9 0x508A44
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_10 0x508A48
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_11 0x508A4C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_12 0x508A50
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_13 0x508A54
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_14 0x508A58
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_15 0x508A5C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_16 0x508A60
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_17 0x508A64
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_18 0x508A68
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_19 0x508A6C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_20 0x508A70
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_21 0x508A74
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_22 0x508A78
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_23 0x508A7C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_24 0x508A80
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_25 0x508A84
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_26 0x508A88
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_27 0x508A8C
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_28 0x508A90
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_29 0x508A94
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_30 0x508A98
+
+#define mmDMA0_QM_ARB_MST_AVAIL_CRED_31 0x508A9C
+
+#define mmDMA0_QM_ARB_MST_CRED_INC 0x508AA0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x508AA4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x508AA8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x508AAC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x508AB0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x508AB4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x508AB8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x508ABC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x508AC0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x508AC4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x508AC8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x508ACC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x508AD0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x508AD4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x508AD8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x508ADC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x508AE0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x508AE4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x508AE8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x508AEC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x508AF0
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x508AF4
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x508AF8
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x508AFC
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x508B00
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x508B04
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x508B08
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x508B0C
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x508B10
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x508B14
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x508B18
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x508B1C
+
+#define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x508B20
+
+#define mmDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x508B28
+
+#define mmDMA0_QM_ARB_MST_SLAVE_EN 0x508B2C
+
+#define mmDMA0_QM_ARB_MST_QUIET_PER 0x508B34
+
+#define mmDMA0_QM_ARB_SLV_CHOISE_WDT 0x508B38
+
+#define mmDMA0_QM_ARB_SLV_ID 0x508B3C
+
+#define mmDMA0_QM_ARB_MSG_MAX_INFLIGHT 0x508B44
+
+#define mmDMA0_QM_ARB_MSG_AWUSER_31_11 0x508B48
+
+#define mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP 0x508B4C
+
+#define mmDMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x508B50
+
+#define mmDMA0_QM_ARB_BASE_LO 0x508B54
+
+#define mmDMA0_QM_ARB_BASE_HI 0x508B58
+
+#define mmDMA0_QM_ARB_STATE_STS 0x508B80
+
+#define mmDMA0_QM_ARB_CHOISE_FULLNESS_STS 0x508B84
+
+#define mmDMA0_QM_ARB_MSG_STS 0x508B88
+
+#define mmDMA0_QM_ARB_SLV_CHOISE_Q_HEAD 0x508B8C
+
+#define mmDMA0_QM_ARB_ERR_CAUSE 0x508B9C
+
+#define mmDMA0_QM_ARB_ERR_MSG_EN 0x508BA0
+
+#define mmDMA0_QM_ARB_ERR_STS_DRP 0x508BA8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_0 0x508BB0
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_1 0x508BB4
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_2 0x508BB8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_3 0x508BBC
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_4 0x508BC0
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_5 0x508BC4
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_6 0x508BC8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_7 0x508BCC
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_8 0x508BD0
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_9 0x508BD4
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_10 0x508BD8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_11 0x508BDC
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_12 0x508BE0
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_13 0x508BE4
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_14 0x508BE8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_15 0x508BEC
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_16 0x508BF0
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_17 0x508BF4
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_18 0x508BF8
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_19 0x508BFC
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_20 0x508C00
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_21 0x508C04
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_22 0x508C08
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_23 0x508C0C
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_24 0x508C10
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_25 0x508C14
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_26 0x508C18
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_27 0x508C1C
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_28 0x508C20
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_29 0x508C24
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_30 0x508C28
+
+#define mmDMA0_QM_ARB_MST_CRED_STS_31 0x508C2C
+
+#define mmDMA0_QM_CGM_CFG 0x508C70
+
+#define mmDMA0_QM_CGM_STS 0x508C74
+
+#define mmDMA0_QM_CGM_CFG1 0x508C78
+
+#define mmDMA0_QM_LOCAL_RANGE_BASE 0x508C80
+
+#define mmDMA0_QM_LOCAL_RANGE_SIZE 0x508C84
+
+#define mmDMA0_QM_CSMR_STRICT_PRIO_CFG 0x508C90
+
+#define mmDMA0_QM_HBW_RD_RATE_LIM_CFG_1 0x508C94
+
+#define mmDMA0_QM_LBW_WR_RATE_LIM_CFG_0 0x508C98
+
+#define mmDMA0_QM_LBW_WR_RATE_LIM_CFG_1 0x508C9C
+
+#define mmDMA0_QM_HBW_RD_RATE_LIM_CFG_0 0x508CA0
+
+#define mmDMA0_QM_GLBL_AXCACHE 0x508CA4
+
+#define mmDMA0_QM_IND_GW_APB_CFG 0x508CB0
+
+#define mmDMA0_QM_IND_GW_APB_WDATA 0x508CB4
+
+#define mmDMA0_QM_IND_GW_APB_RDATA 0x508CB8
+
+#define mmDMA0_QM_IND_GW_APB_STATUS 0x508CBC
+
+#define mmDMA0_QM_GLBL_ERR_ADDR_LO 0x508CD0
+
+#define mmDMA0_QM_GLBL_ERR_ADDR_HI 0x508CD4
+
+#define mmDMA0_QM_GLBL_ERR_WDATA 0x508CD8
+
+#define mmDMA0_QM_GLBL_MEM_INIT_BUSY 0x508D00
+
+#endif /* ASIC_REG_DMA0_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h
new file mode 100644
index 000000000000..4d8d8f26c5d4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA1_CORE_REGS_H_
+#define ASIC_REG_DMA1_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA1_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA1_CORE_CFG_0 0x520000
+
+#define mmDMA1_CORE_CFG_1 0x520004
+
+#define mmDMA1_CORE_LBW_MAX_OUTSTAND 0x520008
+
+#define mmDMA1_CORE_SRC_BASE_LO 0x520014
+
+#define mmDMA1_CORE_SRC_BASE_HI 0x520018
+
+#define mmDMA1_CORE_DST_BASE_LO 0x52001C
+
+#define mmDMA1_CORE_DST_BASE_HI 0x520020
+
+#define mmDMA1_CORE_SRC_TSIZE_1 0x52002C
+
+#define mmDMA1_CORE_SRC_STRIDE_1 0x520030
+
+#define mmDMA1_CORE_SRC_TSIZE_2 0x520034
+
+#define mmDMA1_CORE_SRC_STRIDE_2 0x520038
+
+#define mmDMA1_CORE_SRC_TSIZE_3 0x52003C
+
+#define mmDMA1_CORE_SRC_STRIDE_3 0x520040
+
+#define mmDMA1_CORE_SRC_TSIZE_4 0x520044
+
+#define mmDMA1_CORE_SRC_STRIDE_4 0x520048
+
+#define mmDMA1_CORE_SRC_TSIZE_0 0x52004C
+
+#define mmDMA1_CORE_DST_TSIZE_1 0x520054
+
+#define mmDMA1_CORE_DST_STRIDE_1 0x520058
+
+#define mmDMA1_CORE_DST_TSIZE_2 0x52005C
+
+#define mmDMA1_CORE_DST_STRIDE_2 0x520060
+
+#define mmDMA1_CORE_DST_TSIZE_3 0x520064
+
+#define mmDMA1_CORE_DST_STRIDE_3 0x520068
+
+#define mmDMA1_CORE_DST_TSIZE_4 0x52006C
+
+#define mmDMA1_CORE_DST_STRIDE_4 0x520070
+
+#define mmDMA1_CORE_DST_TSIZE_0 0x520074
+
+#define mmDMA1_CORE_COMMIT 0x520078
+
+#define mmDMA1_CORE_WR_COMP_WDATA 0x52007C
+
+#define mmDMA1_CORE_WR_COMP_ADDR_LO 0x520080
+
+#define mmDMA1_CORE_WR_COMP_ADDR_HI 0x520084
+
+#define mmDMA1_CORE_WR_COMP_AWUSER_31_11 0x520088
+
+#define mmDMA1_CORE_TE_NUMROWS 0x520094
+
+#define mmDMA1_CORE_PROT 0x5200B8
+
+#define mmDMA1_CORE_SECURE_PROPS 0x5200F0
+
+#define mmDMA1_CORE_NON_SECURE_PROPS 0x5200F4
+
+#define mmDMA1_CORE_RD_MAX_OUTSTAND 0x520100
+
+#define mmDMA1_CORE_RD_MAX_SIZE 0x520104
+
+#define mmDMA1_CORE_RD_ARCACHE 0x520108
+
+#define mmDMA1_CORE_RD_ARUSER_31_11 0x520110
+
+#define mmDMA1_CORE_RD_INFLIGHTS 0x520114
+
+#define mmDMA1_CORE_WR_MAX_OUTSTAND 0x520120
+
+#define mmDMA1_CORE_WR_MAX_AWID 0x520124
+
+#define mmDMA1_CORE_WR_AWCACHE 0x520128
+
+#define mmDMA1_CORE_WR_AWUSER_31_11 0x520130
+
+#define mmDMA1_CORE_WR_INFLIGHTS 0x520134
+
+#define mmDMA1_CORE_RD_RATE_LIM_CFG_0 0x520150
+
+#define mmDMA1_CORE_RD_RATE_LIM_CFG_1 0x520154
+
+#define mmDMA1_CORE_WR_RATE_LIM_CFG_0 0x520158
+
+#define mmDMA1_CORE_WR_RATE_LIM_CFG_1 0x52015C
+
+#define mmDMA1_CORE_ERR_CFG 0x520160
+
+#define mmDMA1_CORE_ERR_CAUSE 0x520164
+
+#define mmDMA1_CORE_ERRMSG_ADDR_LO 0x520170
+
+#define mmDMA1_CORE_ERRMSG_ADDR_HI 0x520174
+
+#define mmDMA1_CORE_ERRMSG_WDATA 0x520178
+
+#define mmDMA1_CORE_STS0 0x520190
+
+#define mmDMA1_CORE_STS1 0x520194
+
+#define mmDMA1_CORE_RD_DBGMEM_ADD 0x520200
+
+#define mmDMA1_CORE_RD_DBGMEM_DATA_WR 0x520204
+
+#define mmDMA1_CORE_RD_DBGMEM_DATA_RD 0x520208
+
+#define mmDMA1_CORE_RD_DBGMEM_CTRL 0x52020C
+
+#define mmDMA1_CORE_RD_DBGMEM_RC 0x520210
+
+#define mmDMA1_CORE_DBG_HBW_AXI_AR_CNT 0x520220
+
+#define mmDMA1_CORE_DBG_HBW_AXI_AW_CNT 0x520224
+
+#define mmDMA1_CORE_DBG_LBW_AXI_AW_CNT 0x520228
+
+#define mmDMA1_CORE_DBG_DESC_CNT 0x52022C
+
+#define mmDMA1_CORE_DBG_STS 0x520230
+
+#define mmDMA1_CORE_DBG_RD_DESC_ID 0x520234
+
+#define mmDMA1_CORE_DBG_WR_DESC_ID 0x520238
+
+#endif /* ASIC_REG_DMA1_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h
new file mode 100644
index 000000000000..c3ef300849be
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA1_QM_REGS_H_
+#define ASIC_REG_DMA1_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA1_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA1_QM_GLBL_CFG0 0x528000
+
+#define mmDMA1_QM_GLBL_CFG1 0x528004
+
+#define mmDMA1_QM_GLBL_PROT 0x528008
+
+#define mmDMA1_QM_GLBL_ERR_CFG 0x52800C
+
+#define mmDMA1_QM_GLBL_SECURE_PROPS_0 0x528010
+
+#define mmDMA1_QM_GLBL_SECURE_PROPS_1 0x528014
+
+#define mmDMA1_QM_GLBL_SECURE_PROPS_2 0x528018
+
+#define mmDMA1_QM_GLBL_SECURE_PROPS_3 0x52801C
+
+#define mmDMA1_QM_GLBL_SECURE_PROPS_4 0x528020
+
+#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_0 0x528024
+
+#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_1 0x528028
+
+#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_2 0x52802C
+
+#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_3 0x528030
+
+#define mmDMA1_QM_GLBL_NON_SECURE_PROPS_4 0x528034
+
+#define mmDMA1_QM_GLBL_STS0 0x528038
+
+#define mmDMA1_QM_GLBL_STS1_0 0x528040
+
+#define mmDMA1_QM_GLBL_STS1_1 0x528044
+
+#define mmDMA1_QM_GLBL_STS1_2 0x528048
+
+#define mmDMA1_QM_GLBL_STS1_3 0x52804C
+
+#define mmDMA1_QM_GLBL_STS1_4 0x528050
+
+#define mmDMA1_QM_GLBL_MSG_EN_0 0x528054
+
+#define mmDMA1_QM_GLBL_MSG_EN_1 0x528058
+
+#define mmDMA1_QM_GLBL_MSG_EN_2 0x52805C
+
+#define mmDMA1_QM_GLBL_MSG_EN_3 0x528060
+
+#define mmDMA1_QM_GLBL_MSG_EN_4 0x528068
+
+#define mmDMA1_QM_PQ_BASE_LO_0 0x528070
+
+#define mmDMA1_QM_PQ_BASE_LO_1 0x528074
+
+#define mmDMA1_QM_PQ_BASE_LO_2 0x528078
+
+#define mmDMA1_QM_PQ_BASE_LO_3 0x52807C
+
+#define mmDMA1_QM_PQ_BASE_HI_0 0x528080
+
+#define mmDMA1_QM_PQ_BASE_HI_1 0x528084
+
+#define mmDMA1_QM_PQ_BASE_HI_2 0x528088
+
+#define mmDMA1_QM_PQ_BASE_HI_3 0x52808C
+
+#define mmDMA1_QM_PQ_SIZE_0 0x528090
+
+#define mmDMA1_QM_PQ_SIZE_1 0x528094
+
+#define mmDMA1_QM_PQ_SIZE_2 0x528098
+
+#define mmDMA1_QM_PQ_SIZE_3 0x52809C
+
+#define mmDMA1_QM_PQ_PI_0 0x5280A0
+
+#define mmDMA1_QM_PQ_PI_1 0x5280A4
+
+#define mmDMA1_QM_PQ_PI_2 0x5280A8
+
+#define mmDMA1_QM_PQ_PI_3 0x5280AC
+
+#define mmDMA1_QM_PQ_CI_0 0x5280B0
+
+#define mmDMA1_QM_PQ_CI_1 0x5280B4
+
+#define mmDMA1_QM_PQ_CI_2 0x5280B8
+
+#define mmDMA1_QM_PQ_CI_3 0x5280BC
+
+#define mmDMA1_QM_PQ_CFG0_0 0x5280C0
+
+#define mmDMA1_QM_PQ_CFG0_1 0x5280C4
+
+#define mmDMA1_QM_PQ_CFG0_2 0x5280C8
+
+#define mmDMA1_QM_PQ_CFG0_3 0x5280CC
+
+#define mmDMA1_QM_PQ_CFG1_0 0x5280D0
+
+#define mmDMA1_QM_PQ_CFG1_1 0x5280D4
+
+#define mmDMA1_QM_PQ_CFG1_2 0x5280D8
+
+#define mmDMA1_QM_PQ_CFG1_3 0x5280DC
+
+#define mmDMA1_QM_PQ_ARUSER_31_11_0 0x5280E0
+
+#define mmDMA1_QM_PQ_ARUSER_31_11_1 0x5280E4
+
+#define mmDMA1_QM_PQ_ARUSER_31_11_2 0x5280E8
+
+#define mmDMA1_QM_PQ_ARUSER_31_11_3 0x5280EC
+
+#define mmDMA1_QM_PQ_STS0_0 0x5280F0
+
+#define mmDMA1_QM_PQ_STS0_1 0x5280F4
+
+#define mmDMA1_QM_PQ_STS0_2 0x5280F8
+
+#define mmDMA1_QM_PQ_STS0_3 0x5280FC
+
+#define mmDMA1_QM_PQ_STS1_0 0x528100
+
+#define mmDMA1_QM_PQ_STS1_1 0x528104
+
+#define mmDMA1_QM_PQ_STS1_2 0x528108
+
+#define mmDMA1_QM_PQ_STS1_3 0x52810C
+
+#define mmDMA1_QM_CQ_CFG0_0 0x528110
+
+#define mmDMA1_QM_CQ_CFG0_1 0x528114
+
+#define mmDMA1_QM_CQ_CFG0_2 0x528118
+
+#define mmDMA1_QM_CQ_CFG0_3 0x52811C
+
+#define mmDMA1_QM_CQ_CFG0_4 0x528120
+
+#define mmDMA1_QM_CQ_CFG1_0 0x528124
+
+#define mmDMA1_QM_CQ_CFG1_1 0x528128
+
+#define mmDMA1_QM_CQ_CFG1_2 0x52812C
+
+#define mmDMA1_QM_CQ_CFG1_3 0x528130
+
+#define mmDMA1_QM_CQ_CFG1_4 0x528134
+
+#define mmDMA1_QM_CQ_ARUSER_31_11_0 0x528138
+
+#define mmDMA1_QM_CQ_ARUSER_31_11_1 0x52813C
+
+#define mmDMA1_QM_CQ_ARUSER_31_11_2 0x528140
+
+#define mmDMA1_QM_CQ_ARUSER_31_11_3 0x528144
+
+#define mmDMA1_QM_CQ_ARUSER_31_11_4 0x528148
+
+#define mmDMA1_QM_CQ_STS0_0 0x52814C
+
+#define mmDMA1_QM_CQ_STS0_1 0x528150
+
+#define mmDMA1_QM_CQ_STS0_2 0x528154
+
+#define mmDMA1_QM_CQ_STS0_3 0x528158
+
+#define mmDMA1_QM_CQ_STS0_4 0x52815C
+
+#define mmDMA1_QM_CQ_STS1_0 0x528160
+
+#define mmDMA1_QM_CQ_STS1_1 0x528164
+
+#define mmDMA1_QM_CQ_STS1_2 0x528168
+
+#define mmDMA1_QM_CQ_STS1_3 0x52816C
+
+#define mmDMA1_QM_CQ_STS1_4 0x528170
+
+#define mmDMA1_QM_CQ_PTR_LO_0 0x528174
+
+#define mmDMA1_QM_CQ_PTR_HI_0 0x528178
+
+#define mmDMA1_QM_CQ_TSIZE_0 0x52817C
+
+#define mmDMA1_QM_CQ_CTL_0 0x528180
+
+#define mmDMA1_QM_CQ_PTR_LO_1 0x528184
+
+#define mmDMA1_QM_CQ_PTR_HI_1 0x528188
+
+#define mmDMA1_QM_CQ_TSIZE_1 0x52818C
+
+#define mmDMA1_QM_CQ_CTL_1 0x528190
+
+#define mmDMA1_QM_CQ_PTR_LO_2 0x528194
+
+#define mmDMA1_QM_CQ_PTR_HI_2 0x528198
+
+#define mmDMA1_QM_CQ_TSIZE_2 0x52819C
+
+#define mmDMA1_QM_CQ_CTL_2 0x5281A0
+
+#define mmDMA1_QM_CQ_PTR_LO_3 0x5281A4
+
+#define mmDMA1_QM_CQ_PTR_HI_3 0x5281A8
+
+#define mmDMA1_QM_CQ_TSIZE_3 0x5281AC
+
+#define mmDMA1_QM_CQ_CTL_3 0x5281B0
+
+#define mmDMA1_QM_CQ_PTR_LO_4 0x5281B4
+
+#define mmDMA1_QM_CQ_PTR_HI_4 0x5281B8
+
+#define mmDMA1_QM_CQ_TSIZE_4 0x5281BC
+
+#define mmDMA1_QM_CQ_CTL_4 0x5281C0
+
+#define mmDMA1_QM_CQ_PTR_LO_STS_0 0x5281C4
+
+#define mmDMA1_QM_CQ_PTR_LO_STS_1 0x5281C8
+
+#define mmDMA1_QM_CQ_PTR_LO_STS_2 0x5281CC
+
+#define mmDMA1_QM_CQ_PTR_LO_STS_3 0x5281D0
+
+#define mmDMA1_QM_CQ_PTR_LO_STS_4 0x5281D4
+
+#define mmDMA1_QM_CQ_PTR_HI_STS_0 0x5281D8
+
+#define mmDMA1_QM_CQ_PTR_HI_STS_1 0x5281DC
+
+#define mmDMA1_QM_CQ_PTR_HI_STS_2 0x5281E0
+
+#define mmDMA1_QM_CQ_PTR_HI_STS_3 0x5281E4
+
+#define mmDMA1_QM_CQ_PTR_HI_STS_4 0x5281E8
+
+#define mmDMA1_QM_CQ_TSIZE_STS_0 0x5281EC
+
+#define mmDMA1_QM_CQ_TSIZE_STS_1 0x5281F0
+
+#define mmDMA1_QM_CQ_TSIZE_STS_2 0x5281F4
+
+#define mmDMA1_QM_CQ_TSIZE_STS_3 0x5281F8
+
+#define mmDMA1_QM_CQ_TSIZE_STS_4 0x5281FC
+
+#define mmDMA1_QM_CQ_CTL_STS_0 0x528200
+
+#define mmDMA1_QM_CQ_CTL_STS_1 0x528204
+
+#define mmDMA1_QM_CQ_CTL_STS_2 0x528208
+
+#define mmDMA1_QM_CQ_CTL_STS_3 0x52820C
+
+#define mmDMA1_QM_CQ_CTL_STS_4 0x528210
+
+#define mmDMA1_QM_CQ_IFIFO_CNT_0 0x528214
+
+#define mmDMA1_QM_CQ_IFIFO_CNT_1 0x528218
+
+#define mmDMA1_QM_CQ_IFIFO_CNT_2 0x52821C
+
+#define mmDMA1_QM_CQ_IFIFO_CNT_3 0x528220
+
+#define mmDMA1_QM_CQ_IFIFO_CNT_4 0x528224
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_0 0x528228
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_1 0x52822C
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_2 0x528230
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_3 0x528234
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_LO_4 0x528238
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_0 0x52823C
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_1 0x528240
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_2 0x528244
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_3 0x528248
+
+#define mmDMA1_QM_CP_MSG_BASE0_ADDR_HI_4 0x52824C
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_0 0x528250
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_1 0x528254
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_2 0x528258
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_3 0x52825C
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_LO_4 0x528260
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_0 0x528264
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_1 0x528268
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_2 0x52826C
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_3 0x528270
+
+#define mmDMA1_QM_CP_MSG_BASE1_ADDR_HI_4 0x528274
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_0 0x528278
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_1 0x52827C
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_2 0x528280
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_3 0x528284
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_LO_4 0x528288
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_0 0x52828C
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_1 0x528290
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_2 0x528294
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_3 0x528298
+
+#define mmDMA1_QM_CP_MSG_BASE2_ADDR_HI_4 0x52829C
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_0 0x5282A0
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_1 0x5282A4
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_2 0x5282A8
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_3 0x5282AC
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_LO_4 0x5282B0
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_0 0x5282B4
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_1 0x5282B8
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_2 0x5282BC
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_3 0x5282C0
+
+#define mmDMA1_QM_CP_MSG_BASE3_ADDR_HI_4 0x5282C4
+
+#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_0 0x5282C8
+
+#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_1 0x5282CC
+
+#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_2 0x5282D0
+
+#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_3 0x5282D4
+
+#define mmDMA1_QM_CP_LDMA_TSIZE_OFFSET_4 0x5282D8
+
+#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5282E0
+
+#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5282E4
+
+#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5282E8
+
+#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5282EC
+
+#define mmDMA1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5282F0
+
+#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5282F4
+
+#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5282F8
+
+#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5282FC
+
+#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x528300
+
+#define mmDMA1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x528304
+
+#define mmDMA1_QM_CP_FENCE0_RDATA_0 0x528308
+
+#define mmDMA1_QM_CP_FENCE0_RDATA_1 0x52830C
+
+#define mmDMA1_QM_CP_FENCE0_RDATA_2 0x528310
+
+#define mmDMA1_QM_CP_FENCE0_RDATA_3 0x528314
+
+#define mmDMA1_QM_CP_FENCE0_RDATA_4 0x528318
+
+#define mmDMA1_QM_CP_FENCE1_RDATA_0 0x52831C
+
+#define mmDMA1_QM_CP_FENCE1_RDATA_1 0x528320
+
+#define mmDMA1_QM_CP_FENCE1_RDATA_2 0x528324
+
+#define mmDMA1_QM_CP_FENCE1_RDATA_3 0x528328
+
+#define mmDMA1_QM_CP_FENCE1_RDATA_4 0x52832C
+
+#define mmDMA1_QM_CP_FENCE2_RDATA_0 0x528330
+
+#define mmDMA1_QM_CP_FENCE2_RDATA_1 0x528334
+
+#define mmDMA1_QM_CP_FENCE2_RDATA_2 0x528338
+
+#define mmDMA1_QM_CP_FENCE2_RDATA_3 0x52833C
+
+#define mmDMA1_QM_CP_FENCE2_RDATA_4 0x528340
+
+#define mmDMA1_QM_CP_FENCE3_RDATA_0 0x528344
+
+#define mmDMA1_QM_CP_FENCE3_RDATA_1 0x528348
+
+#define mmDMA1_QM_CP_FENCE3_RDATA_2 0x52834C
+
+#define mmDMA1_QM_CP_FENCE3_RDATA_3 0x528350
+
+#define mmDMA1_QM_CP_FENCE3_RDATA_4 0x528354
+
+#define mmDMA1_QM_CP_FENCE0_CNT_0 0x528358
+
+#define mmDMA1_QM_CP_FENCE0_CNT_1 0x52835C
+
+#define mmDMA1_QM_CP_FENCE0_CNT_2 0x528360
+
+#define mmDMA1_QM_CP_FENCE0_CNT_3 0x528364
+
+#define mmDMA1_QM_CP_FENCE0_CNT_4 0x528368
+
+#define mmDMA1_QM_CP_FENCE1_CNT_0 0x52836C
+
+#define mmDMA1_QM_CP_FENCE1_CNT_1 0x528370
+
+#define mmDMA1_QM_CP_FENCE1_CNT_2 0x528374
+
+#define mmDMA1_QM_CP_FENCE1_CNT_3 0x528378
+
+#define mmDMA1_QM_CP_FENCE1_CNT_4 0x52837C
+
+#define mmDMA1_QM_CP_FENCE2_CNT_0 0x528380
+
+#define mmDMA1_QM_CP_FENCE2_CNT_1 0x528384
+
+#define mmDMA1_QM_CP_FENCE2_CNT_2 0x528388
+
+#define mmDMA1_QM_CP_FENCE2_CNT_3 0x52838C
+
+#define mmDMA1_QM_CP_FENCE2_CNT_4 0x528390
+
+#define mmDMA1_QM_CP_FENCE3_CNT_0 0x528394
+
+#define mmDMA1_QM_CP_FENCE3_CNT_1 0x528398
+
+#define mmDMA1_QM_CP_FENCE3_CNT_2 0x52839C
+
+#define mmDMA1_QM_CP_FENCE3_CNT_3 0x5283A0
+
+#define mmDMA1_QM_CP_FENCE3_CNT_4 0x5283A4
+
+#define mmDMA1_QM_CP_STS_0 0x5283A8
+
+#define mmDMA1_QM_CP_STS_1 0x5283AC
+
+#define mmDMA1_QM_CP_STS_2 0x5283B0
+
+#define mmDMA1_QM_CP_STS_3 0x5283B4
+
+#define mmDMA1_QM_CP_STS_4 0x5283B8
+
+#define mmDMA1_QM_CP_CURRENT_INST_LO_0 0x5283BC
+
+#define mmDMA1_QM_CP_CURRENT_INST_LO_1 0x5283C0
+
+#define mmDMA1_QM_CP_CURRENT_INST_LO_2 0x5283C4
+
+#define mmDMA1_QM_CP_CURRENT_INST_LO_3 0x5283C8
+
+#define mmDMA1_QM_CP_CURRENT_INST_LO_4 0x5283CC
+
+#define mmDMA1_QM_CP_CURRENT_INST_HI_0 0x5283D0
+
+#define mmDMA1_QM_CP_CURRENT_INST_HI_1 0x5283D4
+
+#define mmDMA1_QM_CP_CURRENT_INST_HI_2 0x5283D8
+
+#define mmDMA1_QM_CP_CURRENT_INST_HI_3 0x5283DC
+
+#define mmDMA1_QM_CP_CURRENT_INST_HI_4 0x5283E0
+
+#define mmDMA1_QM_CP_BARRIER_CFG_0 0x5283F4
+
+#define mmDMA1_QM_CP_BARRIER_CFG_1 0x5283F8
+
+#define mmDMA1_QM_CP_BARRIER_CFG_2 0x5283FC
+
+#define mmDMA1_QM_CP_BARRIER_CFG_3 0x528400
+
+#define mmDMA1_QM_CP_BARRIER_CFG_4 0x528404
+
+#define mmDMA1_QM_CP_DBG_0_0 0x528408
+
+#define mmDMA1_QM_CP_DBG_0_1 0x52840C
+
+#define mmDMA1_QM_CP_DBG_0_2 0x528410
+
+#define mmDMA1_QM_CP_DBG_0_3 0x528414
+
+#define mmDMA1_QM_CP_DBG_0_4 0x528418
+
+#define mmDMA1_QM_CP_ARUSER_31_11_0 0x52841C
+
+#define mmDMA1_QM_CP_ARUSER_31_11_1 0x528420
+
+#define mmDMA1_QM_CP_ARUSER_31_11_2 0x528424
+
+#define mmDMA1_QM_CP_ARUSER_31_11_3 0x528428
+
+#define mmDMA1_QM_CP_ARUSER_31_11_4 0x52842C
+
+#define mmDMA1_QM_CP_AWUSER_31_11_0 0x528430
+
+#define mmDMA1_QM_CP_AWUSER_31_11_1 0x528434
+
+#define mmDMA1_QM_CP_AWUSER_31_11_2 0x528438
+
+#define mmDMA1_QM_CP_AWUSER_31_11_3 0x52843C
+
+#define mmDMA1_QM_CP_AWUSER_31_11_4 0x528440
+
+#define mmDMA1_QM_ARB_CFG_0 0x528A00
+
+#define mmDMA1_QM_ARB_CHOISE_Q_PUSH 0x528A04
+
+#define mmDMA1_QM_ARB_WRR_WEIGHT_0 0x528A08
+
+#define mmDMA1_QM_ARB_WRR_WEIGHT_1 0x528A0C
+
+#define mmDMA1_QM_ARB_WRR_WEIGHT_2 0x528A10
+
+#define mmDMA1_QM_ARB_WRR_WEIGHT_3 0x528A14
+
+#define mmDMA1_QM_ARB_CFG_1 0x528A18
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_0 0x528A20
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_1 0x528A24
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_2 0x528A28
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_3 0x528A2C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_4 0x528A30
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_5 0x528A34
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_6 0x528A38
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_7 0x528A3C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_8 0x528A40
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_9 0x528A44
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_10 0x528A48
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_11 0x528A4C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_12 0x528A50
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_13 0x528A54
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_14 0x528A58
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_15 0x528A5C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_16 0x528A60
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_17 0x528A64
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_18 0x528A68
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_19 0x528A6C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_20 0x528A70
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_21 0x528A74
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_22 0x528A78
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_23 0x528A7C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_24 0x528A80
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_25 0x528A84
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_26 0x528A88
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_27 0x528A8C
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_28 0x528A90
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_29 0x528A94
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_30 0x528A98
+
+#define mmDMA1_QM_ARB_MST_AVAIL_CRED_31 0x528A9C
+
+#define mmDMA1_QM_ARB_MST_CRED_INC 0x528AA0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x528AA4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x528AA8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x528AAC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x528AB0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x528AB4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x528AB8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x528ABC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x528AC0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x528AC4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x528AC8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x528ACC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x528AD0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x528AD4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x528AD8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x528ADC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x528AE0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x528AE4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x528AE8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x528AEC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x528AF0
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x528AF4
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x528AF8
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x528AFC
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x528B00
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x528B04
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x528B08
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x528B0C
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x528B10
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x528B14
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x528B18
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x528B1C
+
+#define mmDMA1_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x528B20
+
+#define mmDMA1_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x528B28
+
+#define mmDMA1_QM_ARB_MST_SLAVE_EN 0x528B2C
+
+#define mmDMA1_QM_ARB_MST_QUIET_PER 0x528B34
+
+#define mmDMA1_QM_ARB_SLV_CHOISE_WDT 0x528B38
+
+#define mmDMA1_QM_ARB_SLV_ID 0x528B3C
+
+#define mmDMA1_QM_ARB_MSG_MAX_INFLIGHT 0x528B44
+
+#define mmDMA1_QM_ARB_MSG_AWUSER_31_11 0x528B48
+
+#define mmDMA1_QM_ARB_MSG_AWUSER_SEC_PROP 0x528B4C
+
+#define mmDMA1_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x528B50
+
+#define mmDMA1_QM_ARB_BASE_LO 0x528B54
+
+#define mmDMA1_QM_ARB_BASE_HI 0x528B58
+
+#define mmDMA1_QM_ARB_STATE_STS 0x528B80
+
+#define mmDMA1_QM_ARB_CHOISE_FULLNESS_STS 0x528B84
+
+#define mmDMA1_QM_ARB_MSG_STS 0x528B88
+
+#define mmDMA1_QM_ARB_SLV_CHOISE_Q_HEAD 0x528B8C
+
+#define mmDMA1_QM_ARB_ERR_CAUSE 0x528B9C
+
+#define mmDMA1_QM_ARB_ERR_MSG_EN 0x528BA0
+
+#define mmDMA1_QM_ARB_ERR_STS_DRP 0x528BA8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_0 0x528BB0
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_1 0x528BB4
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_2 0x528BB8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_3 0x528BBC
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_4 0x528BC0
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_5 0x528BC4
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_6 0x528BC8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_7 0x528BCC
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_8 0x528BD0
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_9 0x528BD4
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_10 0x528BD8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_11 0x528BDC
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_12 0x528BE0
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_13 0x528BE4
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_14 0x528BE8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_15 0x528BEC
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_16 0x528BF0
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_17 0x528BF4
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_18 0x528BF8
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_19 0x528BFC
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_20 0x528C00
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_21 0x528C04
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_22 0x528C08
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_23 0x528C0C
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_24 0x528C10
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_25 0x528C14
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_26 0x528C18
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_27 0x528C1C
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_28 0x528C20
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_29 0x528C24
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_30 0x528C28
+
+#define mmDMA1_QM_ARB_MST_CRED_STS_31 0x528C2C
+
+#define mmDMA1_QM_CGM_CFG 0x528C70
+
+#define mmDMA1_QM_CGM_STS 0x528C74
+
+#define mmDMA1_QM_CGM_CFG1 0x528C78
+
+#define mmDMA1_QM_LOCAL_RANGE_BASE 0x528C80
+
+#define mmDMA1_QM_LOCAL_RANGE_SIZE 0x528C84
+
+#define mmDMA1_QM_CSMR_STRICT_PRIO_CFG 0x528C90
+
+#define mmDMA1_QM_HBW_RD_RATE_LIM_CFG_1 0x528C94
+
+#define mmDMA1_QM_LBW_WR_RATE_LIM_CFG_0 0x528C98
+
+#define mmDMA1_QM_LBW_WR_RATE_LIM_CFG_1 0x528C9C
+
+#define mmDMA1_QM_HBW_RD_RATE_LIM_CFG_0 0x528CA0
+
+#define mmDMA1_QM_GLBL_AXCACHE 0x528CA4
+
+#define mmDMA1_QM_IND_GW_APB_CFG 0x528CB0
+
+#define mmDMA1_QM_IND_GW_APB_WDATA 0x528CB4
+
+#define mmDMA1_QM_IND_GW_APB_RDATA 0x528CB8
+
+#define mmDMA1_QM_IND_GW_APB_STATUS 0x528CBC
+
+#define mmDMA1_QM_GLBL_ERR_ADDR_LO 0x528CD0
+
+#define mmDMA1_QM_GLBL_ERR_ADDR_HI 0x528CD4
+
+#define mmDMA1_QM_GLBL_ERR_WDATA 0x528CD8
+
+#define mmDMA1_QM_GLBL_MEM_INIT_BUSY 0x528D00
+
+#endif /* ASIC_REG_DMA1_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h
new file mode 100644
index 000000000000..a42862cd5ae0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA2_CORE_REGS_H_
+#define ASIC_REG_DMA2_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA2_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA2_CORE_CFG_0 0x540000
+
+#define mmDMA2_CORE_CFG_1 0x540004
+
+#define mmDMA2_CORE_LBW_MAX_OUTSTAND 0x540008
+
+#define mmDMA2_CORE_SRC_BASE_LO 0x540014
+
+#define mmDMA2_CORE_SRC_BASE_HI 0x540018
+
+#define mmDMA2_CORE_DST_BASE_LO 0x54001C
+
+#define mmDMA2_CORE_DST_BASE_HI 0x540020
+
+#define mmDMA2_CORE_SRC_TSIZE_1 0x54002C
+
+#define mmDMA2_CORE_SRC_STRIDE_1 0x540030
+
+#define mmDMA2_CORE_SRC_TSIZE_2 0x540034
+
+#define mmDMA2_CORE_SRC_STRIDE_2 0x540038
+
+#define mmDMA2_CORE_SRC_TSIZE_3 0x54003C
+
+#define mmDMA2_CORE_SRC_STRIDE_3 0x540040
+
+#define mmDMA2_CORE_SRC_TSIZE_4 0x540044
+
+#define mmDMA2_CORE_SRC_STRIDE_4 0x540048
+
+#define mmDMA2_CORE_SRC_TSIZE_0 0x54004C
+
+#define mmDMA2_CORE_DST_TSIZE_1 0x540054
+
+#define mmDMA2_CORE_DST_STRIDE_1 0x540058
+
+#define mmDMA2_CORE_DST_TSIZE_2 0x54005C
+
+#define mmDMA2_CORE_DST_STRIDE_2 0x540060
+
+#define mmDMA2_CORE_DST_TSIZE_3 0x540064
+
+#define mmDMA2_CORE_DST_STRIDE_3 0x540068
+
+#define mmDMA2_CORE_DST_TSIZE_4 0x54006C
+
+#define mmDMA2_CORE_DST_STRIDE_4 0x540070
+
+#define mmDMA2_CORE_DST_TSIZE_0 0x540074
+
+#define mmDMA2_CORE_COMMIT 0x540078
+
+#define mmDMA2_CORE_WR_COMP_WDATA 0x54007C
+
+#define mmDMA2_CORE_WR_COMP_ADDR_LO 0x540080
+
+#define mmDMA2_CORE_WR_COMP_ADDR_HI 0x540084
+
+#define mmDMA2_CORE_WR_COMP_AWUSER_31_11 0x540088
+
+#define mmDMA2_CORE_TE_NUMROWS 0x540094
+
+#define mmDMA2_CORE_PROT 0x5400B8
+
+#define mmDMA2_CORE_SECURE_PROPS 0x5400F0
+
+#define mmDMA2_CORE_NON_SECURE_PROPS 0x5400F4
+
+#define mmDMA2_CORE_RD_MAX_OUTSTAND 0x540100
+
+#define mmDMA2_CORE_RD_MAX_SIZE 0x540104
+
+#define mmDMA2_CORE_RD_ARCACHE 0x540108
+
+#define mmDMA2_CORE_RD_ARUSER_31_11 0x540110
+
+#define mmDMA2_CORE_RD_INFLIGHTS 0x540114
+
+#define mmDMA2_CORE_WR_MAX_OUTSTAND 0x540120
+
+#define mmDMA2_CORE_WR_MAX_AWID 0x540124
+
+#define mmDMA2_CORE_WR_AWCACHE 0x540128
+
+#define mmDMA2_CORE_WR_AWUSER_31_11 0x540130
+
+#define mmDMA2_CORE_WR_INFLIGHTS 0x540134
+
+#define mmDMA2_CORE_RD_RATE_LIM_CFG_0 0x540150
+
+#define mmDMA2_CORE_RD_RATE_LIM_CFG_1 0x540154
+
+#define mmDMA2_CORE_WR_RATE_LIM_CFG_0 0x540158
+
+#define mmDMA2_CORE_WR_RATE_LIM_CFG_1 0x54015C
+
+#define mmDMA2_CORE_ERR_CFG 0x540160
+
+#define mmDMA2_CORE_ERR_CAUSE 0x540164
+
+#define mmDMA2_CORE_ERRMSG_ADDR_LO 0x540170
+
+#define mmDMA2_CORE_ERRMSG_ADDR_HI 0x540174
+
+#define mmDMA2_CORE_ERRMSG_WDATA 0x540178
+
+#define mmDMA2_CORE_STS0 0x540190
+
+#define mmDMA2_CORE_STS1 0x540194
+
+#define mmDMA2_CORE_RD_DBGMEM_ADD 0x540200
+
+#define mmDMA2_CORE_RD_DBGMEM_DATA_WR 0x540204
+
+#define mmDMA2_CORE_RD_DBGMEM_DATA_RD 0x540208
+
+#define mmDMA2_CORE_RD_DBGMEM_CTRL 0x54020C
+
+#define mmDMA2_CORE_RD_DBGMEM_RC 0x540210
+
+#define mmDMA2_CORE_DBG_HBW_AXI_AR_CNT 0x540220
+
+#define mmDMA2_CORE_DBG_HBW_AXI_AW_CNT 0x540224
+
+#define mmDMA2_CORE_DBG_LBW_AXI_AW_CNT 0x540228
+
+#define mmDMA2_CORE_DBG_DESC_CNT 0x54022C
+
+#define mmDMA2_CORE_DBG_STS 0x540230
+
+#define mmDMA2_CORE_DBG_RD_DESC_ID 0x540234
+
+#define mmDMA2_CORE_DBG_WR_DESC_ID 0x540238
+
+#endif /* ASIC_REG_DMA2_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h
new file mode 100644
index 000000000000..8c4d4e016852
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA2_QM_REGS_H_
+#define ASIC_REG_DMA2_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA2_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA2_QM_GLBL_CFG0 0x548000
+
+#define mmDMA2_QM_GLBL_CFG1 0x548004
+
+#define mmDMA2_QM_GLBL_PROT 0x548008
+
+#define mmDMA2_QM_GLBL_ERR_CFG 0x54800C
+
+#define mmDMA2_QM_GLBL_SECURE_PROPS_0 0x548010
+
+#define mmDMA2_QM_GLBL_SECURE_PROPS_1 0x548014
+
+#define mmDMA2_QM_GLBL_SECURE_PROPS_2 0x548018
+
+#define mmDMA2_QM_GLBL_SECURE_PROPS_3 0x54801C
+
+#define mmDMA2_QM_GLBL_SECURE_PROPS_4 0x548020
+
+#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_0 0x548024
+
+#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_1 0x548028
+
+#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_2 0x54802C
+
+#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_3 0x548030
+
+#define mmDMA2_QM_GLBL_NON_SECURE_PROPS_4 0x548034
+
+#define mmDMA2_QM_GLBL_STS0 0x548038
+
+#define mmDMA2_QM_GLBL_STS1_0 0x548040
+
+#define mmDMA2_QM_GLBL_STS1_1 0x548044
+
+#define mmDMA2_QM_GLBL_STS1_2 0x548048
+
+#define mmDMA2_QM_GLBL_STS1_3 0x54804C
+
+#define mmDMA2_QM_GLBL_STS1_4 0x548050
+
+#define mmDMA2_QM_GLBL_MSG_EN_0 0x548054
+
+#define mmDMA2_QM_GLBL_MSG_EN_1 0x548058
+
+#define mmDMA2_QM_GLBL_MSG_EN_2 0x54805C
+
+#define mmDMA2_QM_GLBL_MSG_EN_3 0x548060
+
+#define mmDMA2_QM_GLBL_MSG_EN_4 0x548068
+
+#define mmDMA2_QM_PQ_BASE_LO_0 0x548070
+
+#define mmDMA2_QM_PQ_BASE_LO_1 0x548074
+
+#define mmDMA2_QM_PQ_BASE_LO_2 0x548078
+
+#define mmDMA2_QM_PQ_BASE_LO_3 0x54807C
+
+#define mmDMA2_QM_PQ_BASE_HI_0 0x548080
+
+#define mmDMA2_QM_PQ_BASE_HI_1 0x548084
+
+#define mmDMA2_QM_PQ_BASE_HI_2 0x548088
+
+#define mmDMA2_QM_PQ_BASE_HI_3 0x54808C
+
+#define mmDMA2_QM_PQ_SIZE_0 0x548090
+
+#define mmDMA2_QM_PQ_SIZE_1 0x548094
+
+#define mmDMA2_QM_PQ_SIZE_2 0x548098
+
+#define mmDMA2_QM_PQ_SIZE_3 0x54809C
+
+#define mmDMA2_QM_PQ_PI_0 0x5480A0
+
+#define mmDMA2_QM_PQ_PI_1 0x5480A4
+
+#define mmDMA2_QM_PQ_PI_2 0x5480A8
+
+#define mmDMA2_QM_PQ_PI_3 0x5480AC
+
+#define mmDMA2_QM_PQ_CI_0 0x5480B0
+
+#define mmDMA2_QM_PQ_CI_1 0x5480B4
+
+#define mmDMA2_QM_PQ_CI_2 0x5480B8
+
+#define mmDMA2_QM_PQ_CI_3 0x5480BC
+
+#define mmDMA2_QM_PQ_CFG0_0 0x5480C0
+
+#define mmDMA2_QM_PQ_CFG0_1 0x5480C4
+
+#define mmDMA2_QM_PQ_CFG0_2 0x5480C8
+
+#define mmDMA2_QM_PQ_CFG0_3 0x5480CC
+
+#define mmDMA2_QM_PQ_CFG1_0 0x5480D0
+
+#define mmDMA2_QM_PQ_CFG1_1 0x5480D4
+
+#define mmDMA2_QM_PQ_CFG1_2 0x5480D8
+
+#define mmDMA2_QM_PQ_CFG1_3 0x5480DC
+
+#define mmDMA2_QM_PQ_ARUSER_31_11_0 0x5480E0
+
+#define mmDMA2_QM_PQ_ARUSER_31_11_1 0x5480E4
+
+#define mmDMA2_QM_PQ_ARUSER_31_11_2 0x5480E8
+
+#define mmDMA2_QM_PQ_ARUSER_31_11_3 0x5480EC
+
+#define mmDMA2_QM_PQ_STS0_0 0x5480F0
+
+#define mmDMA2_QM_PQ_STS0_1 0x5480F4
+
+#define mmDMA2_QM_PQ_STS0_2 0x5480F8
+
+#define mmDMA2_QM_PQ_STS0_3 0x5480FC
+
+#define mmDMA2_QM_PQ_STS1_0 0x548100
+
+#define mmDMA2_QM_PQ_STS1_1 0x548104
+
+#define mmDMA2_QM_PQ_STS1_2 0x548108
+
+#define mmDMA2_QM_PQ_STS1_3 0x54810C
+
+#define mmDMA2_QM_CQ_CFG0_0 0x548110
+
+#define mmDMA2_QM_CQ_CFG0_1 0x548114
+
+#define mmDMA2_QM_CQ_CFG0_2 0x548118
+
+#define mmDMA2_QM_CQ_CFG0_3 0x54811C
+
+#define mmDMA2_QM_CQ_CFG0_4 0x548120
+
+#define mmDMA2_QM_CQ_CFG1_0 0x548124
+
+#define mmDMA2_QM_CQ_CFG1_1 0x548128
+
+#define mmDMA2_QM_CQ_CFG1_2 0x54812C
+
+#define mmDMA2_QM_CQ_CFG1_3 0x548130
+
+#define mmDMA2_QM_CQ_CFG1_4 0x548134
+
+#define mmDMA2_QM_CQ_ARUSER_31_11_0 0x548138
+
+#define mmDMA2_QM_CQ_ARUSER_31_11_1 0x54813C
+
+#define mmDMA2_QM_CQ_ARUSER_31_11_2 0x548140
+
+#define mmDMA2_QM_CQ_ARUSER_31_11_3 0x548144
+
+#define mmDMA2_QM_CQ_ARUSER_31_11_4 0x548148
+
+#define mmDMA2_QM_CQ_STS0_0 0x54814C
+
+#define mmDMA2_QM_CQ_STS0_1 0x548150
+
+#define mmDMA2_QM_CQ_STS0_2 0x548154
+
+#define mmDMA2_QM_CQ_STS0_3 0x548158
+
+#define mmDMA2_QM_CQ_STS0_4 0x54815C
+
+#define mmDMA2_QM_CQ_STS1_0 0x548160
+
+#define mmDMA2_QM_CQ_STS1_1 0x548164
+
+#define mmDMA2_QM_CQ_STS1_2 0x548168
+
+#define mmDMA2_QM_CQ_STS1_3 0x54816C
+
+#define mmDMA2_QM_CQ_STS1_4 0x548170
+
+#define mmDMA2_QM_CQ_PTR_LO_0 0x548174
+
+#define mmDMA2_QM_CQ_PTR_HI_0 0x548178
+
+#define mmDMA2_QM_CQ_TSIZE_0 0x54817C
+
+#define mmDMA2_QM_CQ_CTL_0 0x548180
+
+#define mmDMA2_QM_CQ_PTR_LO_1 0x548184
+
+#define mmDMA2_QM_CQ_PTR_HI_1 0x548188
+
+#define mmDMA2_QM_CQ_TSIZE_1 0x54818C
+
+#define mmDMA2_QM_CQ_CTL_1 0x548190
+
+#define mmDMA2_QM_CQ_PTR_LO_2 0x548194
+
+#define mmDMA2_QM_CQ_PTR_HI_2 0x548198
+
+#define mmDMA2_QM_CQ_TSIZE_2 0x54819C
+
+#define mmDMA2_QM_CQ_CTL_2 0x5481A0
+
+#define mmDMA2_QM_CQ_PTR_LO_3 0x5481A4
+
+#define mmDMA2_QM_CQ_PTR_HI_3 0x5481A8
+
+#define mmDMA2_QM_CQ_TSIZE_3 0x5481AC
+
+#define mmDMA2_QM_CQ_CTL_3 0x5481B0
+
+#define mmDMA2_QM_CQ_PTR_LO_4 0x5481B4
+
+#define mmDMA2_QM_CQ_PTR_HI_4 0x5481B8
+
+#define mmDMA2_QM_CQ_TSIZE_4 0x5481BC
+
+#define mmDMA2_QM_CQ_CTL_4 0x5481C0
+
+#define mmDMA2_QM_CQ_PTR_LO_STS_0 0x5481C4
+
+#define mmDMA2_QM_CQ_PTR_LO_STS_1 0x5481C8
+
+#define mmDMA2_QM_CQ_PTR_LO_STS_2 0x5481CC
+
+#define mmDMA2_QM_CQ_PTR_LO_STS_3 0x5481D0
+
+#define mmDMA2_QM_CQ_PTR_LO_STS_4 0x5481D4
+
+#define mmDMA2_QM_CQ_PTR_HI_STS_0 0x5481D8
+
+#define mmDMA2_QM_CQ_PTR_HI_STS_1 0x5481DC
+
+#define mmDMA2_QM_CQ_PTR_HI_STS_2 0x5481E0
+
+#define mmDMA2_QM_CQ_PTR_HI_STS_3 0x5481E4
+
+#define mmDMA2_QM_CQ_PTR_HI_STS_4 0x5481E8
+
+#define mmDMA2_QM_CQ_TSIZE_STS_0 0x5481EC
+
+#define mmDMA2_QM_CQ_TSIZE_STS_1 0x5481F0
+
+#define mmDMA2_QM_CQ_TSIZE_STS_2 0x5481F4
+
+#define mmDMA2_QM_CQ_TSIZE_STS_3 0x5481F8
+
+#define mmDMA2_QM_CQ_TSIZE_STS_4 0x5481FC
+
+#define mmDMA2_QM_CQ_CTL_STS_0 0x548200
+
+#define mmDMA2_QM_CQ_CTL_STS_1 0x548204
+
+#define mmDMA2_QM_CQ_CTL_STS_2 0x548208
+
+#define mmDMA2_QM_CQ_CTL_STS_3 0x54820C
+
+#define mmDMA2_QM_CQ_CTL_STS_4 0x548210
+
+#define mmDMA2_QM_CQ_IFIFO_CNT_0 0x548214
+
+#define mmDMA2_QM_CQ_IFIFO_CNT_1 0x548218
+
+#define mmDMA2_QM_CQ_IFIFO_CNT_2 0x54821C
+
+#define mmDMA2_QM_CQ_IFIFO_CNT_3 0x548220
+
+#define mmDMA2_QM_CQ_IFIFO_CNT_4 0x548224
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_0 0x548228
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_1 0x54822C
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_2 0x548230
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_3 0x548234
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_LO_4 0x548238
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_0 0x54823C
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_1 0x548240
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_2 0x548244
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_3 0x548248
+
+#define mmDMA2_QM_CP_MSG_BASE0_ADDR_HI_4 0x54824C
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_0 0x548250
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_1 0x548254
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_2 0x548258
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_3 0x54825C
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_LO_4 0x548260
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_0 0x548264
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_1 0x548268
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_2 0x54826C
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_3 0x548270
+
+#define mmDMA2_QM_CP_MSG_BASE1_ADDR_HI_4 0x548274
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_0 0x548278
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_1 0x54827C
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_2 0x548280
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_3 0x548284
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_LO_4 0x548288
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_0 0x54828C
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_1 0x548290
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_2 0x548294
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_3 0x548298
+
+#define mmDMA2_QM_CP_MSG_BASE2_ADDR_HI_4 0x54829C
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_0 0x5482A0
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_1 0x5482A4
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_2 0x5482A8
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_3 0x5482AC
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_LO_4 0x5482B0
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_0 0x5482B4
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_1 0x5482B8
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_2 0x5482BC
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_3 0x5482C0
+
+#define mmDMA2_QM_CP_MSG_BASE3_ADDR_HI_4 0x5482C4
+
+#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_0 0x5482C8
+
+#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_1 0x5482CC
+
+#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_2 0x5482D0
+
+#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_3 0x5482D4
+
+#define mmDMA2_QM_CP_LDMA_TSIZE_OFFSET_4 0x5482D8
+
+#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5482E0
+
+#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5482E4
+
+#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5482E8
+
+#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5482EC
+
+#define mmDMA2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5482F0
+
+#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5482F4
+
+#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5482F8
+
+#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5482FC
+
+#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x548300
+
+#define mmDMA2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x548304
+
+#define mmDMA2_QM_CP_FENCE0_RDATA_0 0x548308
+
+#define mmDMA2_QM_CP_FENCE0_RDATA_1 0x54830C
+
+#define mmDMA2_QM_CP_FENCE0_RDATA_2 0x548310
+
+#define mmDMA2_QM_CP_FENCE0_RDATA_3 0x548314
+
+#define mmDMA2_QM_CP_FENCE0_RDATA_4 0x548318
+
+#define mmDMA2_QM_CP_FENCE1_RDATA_0 0x54831C
+
+#define mmDMA2_QM_CP_FENCE1_RDATA_1 0x548320
+
+#define mmDMA2_QM_CP_FENCE1_RDATA_2 0x548324
+
+#define mmDMA2_QM_CP_FENCE1_RDATA_3 0x548328
+
+#define mmDMA2_QM_CP_FENCE1_RDATA_4 0x54832C
+
+#define mmDMA2_QM_CP_FENCE2_RDATA_0 0x548330
+
+#define mmDMA2_QM_CP_FENCE2_RDATA_1 0x548334
+
+#define mmDMA2_QM_CP_FENCE2_RDATA_2 0x548338
+
+#define mmDMA2_QM_CP_FENCE2_RDATA_3 0x54833C
+
+#define mmDMA2_QM_CP_FENCE2_RDATA_4 0x548340
+
+#define mmDMA2_QM_CP_FENCE3_RDATA_0 0x548344
+
+#define mmDMA2_QM_CP_FENCE3_RDATA_1 0x548348
+
+#define mmDMA2_QM_CP_FENCE3_RDATA_2 0x54834C
+
+#define mmDMA2_QM_CP_FENCE3_RDATA_3 0x548350
+
+#define mmDMA2_QM_CP_FENCE3_RDATA_4 0x548354
+
+#define mmDMA2_QM_CP_FENCE0_CNT_0 0x548358
+
+#define mmDMA2_QM_CP_FENCE0_CNT_1 0x54835C
+
+#define mmDMA2_QM_CP_FENCE0_CNT_2 0x548360
+
+#define mmDMA2_QM_CP_FENCE0_CNT_3 0x548364
+
+#define mmDMA2_QM_CP_FENCE0_CNT_4 0x548368
+
+#define mmDMA2_QM_CP_FENCE1_CNT_0 0x54836C
+
+#define mmDMA2_QM_CP_FENCE1_CNT_1 0x548370
+
+#define mmDMA2_QM_CP_FENCE1_CNT_2 0x548374
+
+#define mmDMA2_QM_CP_FENCE1_CNT_3 0x548378
+
+#define mmDMA2_QM_CP_FENCE1_CNT_4 0x54837C
+
+#define mmDMA2_QM_CP_FENCE2_CNT_0 0x548380
+
+#define mmDMA2_QM_CP_FENCE2_CNT_1 0x548384
+
+#define mmDMA2_QM_CP_FENCE2_CNT_2 0x548388
+
+#define mmDMA2_QM_CP_FENCE2_CNT_3 0x54838C
+
+#define mmDMA2_QM_CP_FENCE2_CNT_4 0x548390
+
+#define mmDMA2_QM_CP_FENCE3_CNT_0 0x548394
+
+#define mmDMA2_QM_CP_FENCE3_CNT_1 0x548398
+
+#define mmDMA2_QM_CP_FENCE3_CNT_2 0x54839C
+
+#define mmDMA2_QM_CP_FENCE3_CNT_3 0x5483A0
+
+#define mmDMA2_QM_CP_FENCE3_CNT_4 0x5483A4
+
+#define mmDMA2_QM_CP_STS_0 0x5483A8
+
+#define mmDMA2_QM_CP_STS_1 0x5483AC
+
+#define mmDMA2_QM_CP_STS_2 0x5483B0
+
+#define mmDMA2_QM_CP_STS_3 0x5483B4
+
+#define mmDMA2_QM_CP_STS_4 0x5483B8
+
+#define mmDMA2_QM_CP_CURRENT_INST_LO_0 0x5483BC
+
+#define mmDMA2_QM_CP_CURRENT_INST_LO_1 0x5483C0
+
+#define mmDMA2_QM_CP_CURRENT_INST_LO_2 0x5483C4
+
+#define mmDMA2_QM_CP_CURRENT_INST_LO_3 0x5483C8
+
+#define mmDMA2_QM_CP_CURRENT_INST_LO_4 0x5483CC
+
+#define mmDMA2_QM_CP_CURRENT_INST_HI_0 0x5483D0
+
+#define mmDMA2_QM_CP_CURRENT_INST_HI_1 0x5483D4
+
+#define mmDMA2_QM_CP_CURRENT_INST_HI_2 0x5483D8
+
+#define mmDMA2_QM_CP_CURRENT_INST_HI_3 0x5483DC
+
+#define mmDMA2_QM_CP_CURRENT_INST_HI_4 0x5483E0
+
+#define mmDMA2_QM_CP_BARRIER_CFG_0 0x5483F4
+
+#define mmDMA2_QM_CP_BARRIER_CFG_1 0x5483F8
+
+#define mmDMA2_QM_CP_BARRIER_CFG_2 0x5483FC
+
+#define mmDMA2_QM_CP_BARRIER_CFG_3 0x548400
+
+#define mmDMA2_QM_CP_BARRIER_CFG_4 0x548404
+
+#define mmDMA2_QM_CP_DBG_0_0 0x548408
+
+#define mmDMA2_QM_CP_DBG_0_1 0x54840C
+
+#define mmDMA2_QM_CP_DBG_0_2 0x548410
+
+#define mmDMA2_QM_CP_DBG_0_3 0x548414
+
+#define mmDMA2_QM_CP_DBG_0_4 0x548418
+
+#define mmDMA2_QM_CP_ARUSER_31_11_0 0x54841C
+
+#define mmDMA2_QM_CP_ARUSER_31_11_1 0x548420
+
+#define mmDMA2_QM_CP_ARUSER_31_11_2 0x548424
+
+#define mmDMA2_QM_CP_ARUSER_31_11_3 0x548428
+
+#define mmDMA2_QM_CP_ARUSER_31_11_4 0x54842C
+
+#define mmDMA2_QM_CP_AWUSER_31_11_0 0x548430
+
+#define mmDMA2_QM_CP_AWUSER_31_11_1 0x548434
+
+#define mmDMA2_QM_CP_AWUSER_31_11_2 0x548438
+
+#define mmDMA2_QM_CP_AWUSER_31_11_3 0x54843C
+
+#define mmDMA2_QM_CP_AWUSER_31_11_4 0x548440
+
+#define mmDMA2_QM_ARB_CFG_0 0x548A00
+
+#define mmDMA2_QM_ARB_CHOISE_Q_PUSH 0x548A04
+
+#define mmDMA2_QM_ARB_WRR_WEIGHT_0 0x548A08
+
+#define mmDMA2_QM_ARB_WRR_WEIGHT_1 0x548A0C
+
+#define mmDMA2_QM_ARB_WRR_WEIGHT_2 0x548A10
+
+#define mmDMA2_QM_ARB_WRR_WEIGHT_3 0x548A14
+
+#define mmDMA2_QM_ARB_CFG_1 0x548A18
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_0 0x548A20
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_1 0x548A24
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_2 0x548A28
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_3 0x548A2C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_4 0x548A30
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_5 0x548A34
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_6 0x548A38
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_7 0x548A3C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_8 0x548A40
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_9 0x548A44
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_10 0x548A48
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_11 0x548A4C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_12 0x548A50
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_13 0x548A54
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_14 0x548A58
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_15 0x548A5C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_16 0x548A60
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_17 0x548A64
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_18 0x548A68
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_19 0x548A6C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_20 0x548A70
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_21 0x548A74
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_22 0x548A78
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_23 0x548A7C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_24 0x548A80
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_25 0x548A84
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_26 0x548A88
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_27 0x548A8C
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_28 0x548A90
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_29 0x548A94
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_30 0x548A98
+
+#define mmDMA2_QM_ARB_MST_AVAIL_CRED_31 0x548A9C
+
+#define mmDMA2_QM_ARB_MST_CRED_INC 0x548AA0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x548AA4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x548AA8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x548AAC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x548AB0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x548AB4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x548AB8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x548ABC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x548AC0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x548AC4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x548AC8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x548ACC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x548AD0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x548AD4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x548AD8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x548ADC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x548AE0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x548AE4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x548AE8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x548AEC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x548AF0
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x548AF4
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x548AF8
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x548AFC
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x548B00
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x548B04
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x548B08
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x548B0C
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x548B10
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x548B14
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x548B18
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x548B1C
+
+#define mmDMA2_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x548B20
+
+#define mmDMA2_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x548B28
+
+#define mmDMA2_QM_ARB_MST_SLAVE_EN 0x548B2C
+
+#define mmDMA2_QM_ARB_MST_QUIET_PER 0x548B34
+
+#define mmDMA2_QM_ARB_SLV_CHOISE_WDT 0x548B38
+
+#define mmDMA2_QM_ARB_SLV_ID 0x548B3C
+
+#define mmDMA2_QM_ARB_MSG_MAX_INFLIGHT 0x548B44
+
+#define mmDMA2_QM_ARB_MSG_AWUSER_31_11 0x548B48
+
+#define mmDMA2_QM_ARB_MSG_AWUSER_SEC_PROP 0x548B4C
+
+#define mmDMA2_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x548B50
+
+#define mmDMA2_QM_ARB_BASE_LO 0x548B54
+
+#define mmDMA2_QM_ARB_BASE_HI 0x548B58
+
+#define mmDMA2_QM_ARB_STATE_STS 0x548B80
+
+#define mmDMA2_QM_ARB_CHOISE_FULLNESS_STS 0x548B84
+
+#define mmDMA2_QM_ARB_MSG_STS 0x548B88
+
+#define mmDMA2_QM_ARB_SLV_CHOISE_Q_HEAD 0x548B8C
+
+#define mmDMA2_QM_ARB_ERR_CAUSE 0x548B9C
+
+#define mmDMA2_QM_ARB_ERR_MSG_EN 0x548BA0
+
+#define mmDMA2_QM_ARB_ERR_STS_DRP 0x548BA8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_0 0x548BB0
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_1 0x548BB4
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_2 0x548BB8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_3 0x548BBC
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_4 0x548BC0
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_5 0x548BC4
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_6 0x548BC8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_7 0x548BCC
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_8 0x548BD0
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_9 0x548BD4
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_10 0x548BD8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_11 0x548BDC
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_12 0x548BE0
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_13 0x548BE4
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_14 0x548BE8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_15 0x548BEC
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_16 0x548BF0
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_17 0x548BF4
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_18 0x548BF8
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_19 0x548BFC
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_20 0x548C00
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_21 0x548C04
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_22 0x548C08
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_23 0x548C0C
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_24 0x548C10
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_25 0x548C14
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_26 0x548C18
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_27 0x548C1C
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_28 0x548C20
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_29 0x548C24
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_30 0x548C28
+
+#define mmDMA2_QM_ARB_MST_CRED_STS_31 0x548C2C
+
+#define mmDMA2_QM_CGM_CFG 0x548C70
+
+#define mmDMA2_QM_CGM_STS 0x548C74
+
+#define mmDMA2_QM_CGM_CFG1 0x548C78
+
+#define mmDMA2_QM_LOCAL_RANGE_BASE 0x548C80
+
+#define mmDMA2_QM_LOCAL_RANGE_SIZE 0x548C84
+
+#define mmDMA2_QM_CSMR_STRICT_PRIO_CFG 0x548C90
+
+#define mmDMA2_QM_HBW_RD_RATE_LIM_CFG_1 0x548C94
+
+#define mmDMA2_QM_LBW_WR_RATE_LIM_CFG_0 0x548C98
+
+#define mmDMA2_QM_LBW_WR_RATE_LIM_CFG_1 0x548C9C
+
+#define mmDMA2_QM_HBW_RD_RATE_LIM_CFG_0 0x548CA0
+
+#define mmDMA2_QM_GLBL_AXCACHE 0x548CA4
+
+#define mmDMA2_QM_IND_GW_APB_CFG 0x548CB0
+
+#define mmDMA2_QM_IND_GW_APB_WDATA 0x548CB4
+
+#define mmDMA2_QM_IND_GW_APB_RDATA 0x548CB8
+
+#define mmDMA2_QM_IND_GW_APB_STATUS 0x548CBC
+
+#define mmDMA2_QM_GLBL_ERR_ADDR_LO 0x548CD0
+
+#define mmDMA2_QM_GLBL_ERR_ADDR_HI 0x548CD4
+
+#define mmDMA2_QM_GLBL_ERR_WDATA 0x548CD8
+
+#define mmDMA2_QM_GLBL_MEM_INIT_BUSY 0x548D00
+
+#endif /* ASIC_REG_DMA2_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h
new file mode 100644
index 000000000000..fb145f416fe6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA3_CORE_REGS_H_
+#define ASIC_REG_DMA3_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA3_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA3_CORE_CFG_0 0x560000
+
+#define mmDMA3_CORE_CFG_1 0x560004
+
+#define mmDMA3_CORE_LBW_MAX_OUTSTAND 0x560008
+
+#define mmDMA3_CORE_SRC_BASE_LO 0x560014
+
+#define mmDMA3_CORE_SRC_BASE_HI 0x560018
+
+#define mmDMA3_CORE_DST_BASE_LO 0x56001C
+
+#define mmDMA3_CORE_DST_BASE_HI 0x560020
+
+#define mmDMA3_CORE_SRC_TSIZE_1 0x56002C
+
+#define mmDMA3_CORE_SRC_STRIDE_1 0x560030
+
+#define mmDMA3_CORE_SRC_TSIZE_2 0x560034
+
+#define mmDMA3_CORE_SRC_STRIDE_2 0x560038
+
+#define mmDMA3_CORE_SRC_TSIZE_3 0x56003C
+
+#define mmDMA3_CORE_SRC_STRIDE_3 0x560040
+
+#define mmDMA3_CORE_SRC_TSIZE_4 0x560044
+
+#define mmDMA3_CORE_SRC_STRIDE_4 0x560048
+
+#define mmDMA3_CORE_SRC_TSIZE_0 0x56004C
+
+#define mmDMA3_CORE_DST_TSIZE_1 0x560054
+
+#define mmDMA3_CORE_DST_STRIDE_1 0x560058
+
+#define mmDMA3_CORE_DST_TSIZE_2 0x56005C
+
+#define mmDMA3_CORE_DST_STRIDE_2 0x560060
+
+#define mmDMA3_CORE_DST_TSIZE_3 0x560064
+
+#define mmDMA3_CORE_DST_STRIDE_3 0x560068
+
+#define mmDMA3_CORE_DST_TSIZE_4 0x56006C
+
+#define mmDMA3_CORE_DST_STRIDE_4 0x560070
+
+#define mmDMA3_CORE_DST_TSIZE_0 0x560074
+
+#define mmDMA3_CORE_COMMIT 0x560078
+
+#define mmDMA3_CORE_WR_COMP_WDATA 0x56007C
+
+#define mmDMA3_CORE_WR_COMP_ADDR_LO 0x560080
+
+#define mmDMA3_CORE_WR_COMP_ADDR_HI 0x560084
+
+#define mmDMA3_CORE_WR_COMP_AWUSER_31_11 0x560088
+
+#define mmDMA3_CORE_TE_NUMROWS 0x560094
+
+#define mmDMA3_CORE_PROT 0x5600B8
+
+#define mmDMA3_CORE_SECURE_PROPS 0x5600F0
+
+#define mmDMA3_CORE_NON_SECURE_PROPS 0x5600F4
+
+#define mmDMA3_CORE_RD_MAX_OUTSTAND 0x560100
+
+#define mmDMA3_CORE_RD_MAX_SIZE 0x560104
+
+#define mmDMA3_CORE_RD_ARCACHE 0x560108
+
+#define mmDMA3_CORE_RD_ARUSER_31_11 0x560110
+
+#define mmDMA3_CORE_RD_INFLIGHTS 0x560114
+
+#define mmDMA3_CORE_WR_MAX_OUTSTAND 0x560120
+
+#define mmDMA3_CORE_WR_MAX_AWID 0x560124
+
+#define mmDMA3_CORE_WR_AWCACHE 0x560128
+
+#define mmDMA3_CORE_WR_AWUSER_31_11 0x560130
+
+#define mmDMA3_CORE_WR_INFLIGHTS 0x560134
+
+#define mmDMA3_CORE_RD_RATE_LIM_CFG_0 0x560150
+
+#define mmDMA3_CORE_RD_RATE_LIM_CFG_1 0x560154
+
+#define mmDMA3_CORE_WR_RATE_LIM_CFG_0 0x560158
+
+#define mmDMA3_CORE_WR_RATE_LIM_CFG_1 0x56015C
+
+#define mmDMA3_CORE_ERR_CFG 0x560160
+
+#define mmDMA3_CORE_ERR_CAUSE 0x560164
+
+#define mmDMA3_CORE_ERRMSG_ADDR_LO 0x560170
+
+#define mmDMA3_CORE_ERRMSG_ADDR_HI 0x560174
+
+#define mmDMA3_CORE_ERRMSG_WDATA 0x560178
+
+#define mmDMA3_CORE_STS0 0x560190
+
+#define mmDMA3_CORE_STS1 0x560194
+
+#define mmDMA3_CORE_RD_DBGMEM_ADD 0x560200
+
+#define mmDMA3_CORE_RD_DBGMEM_DATA_WR 0x560204
+
+#define mmDMA3_CORE_RD_DBGMEM_DATA_RD 0x560208
+
+#define mmDMA3_CORE_RD_DBGMEM_CTRL 0x56020C
+
+#define mmDMA3_CORE_RD_DBGMEM_RC 0x560210
+
+#define mmDMA3_CORE_DBG_HBW_AXI_AR_CNT 0x560220
+
+#define mmDMA3_CORE_DBG_HBW_AXI_AW_CNT 0x560224
+
+#define mmDMA3_CORE_DBG_LBW_AXI_AW_CNT 0x560228
+
+#define mmDMA3_CORE_DBG_DESC_CNT 0x56022C
+
+#define mmDMA3_CORE_DBG_STS 0x560230
+
+#define mmDMA3_CORE_DBG_RD_DESC_ID 0x560234
+
+#define mmDMA3_CORE_DBG_WR_DESC_ID 0x560238
+
+#endif /* ASIC_REG_DMA3_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h
new file mode 100644
index 000000000000..a4b461ca3d94
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA3_QM_REGS_H_
+#define ASIC_REG_DMA3_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA3_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA3_QM_GLBL_CFG0 0x568000
+
+#define mmDMA3_QM_GLBL_CFG1 0x568004
+
+#define mmDMA3_QM_GLBL_PROT 0x568008
+
+#define mmDMA3_QM_GLBL_ERR_CFG 0x56800C
+
+#define mmDMA3_QM_GLBL_SECURE_PROPS_0 0x568010
+
+#define mmDMA3_QM_GLBL_SECURE_PROPS_1 0x568014
+
+#define mmDMA3_QM_GLBL_SECURE_PROPS_2 0x568018
+
+#define mmDMA3_QM_GLBL_SECURE_PROPS_3 0x56801C
+
+#define mmDMA3_QM_GLBL_SECURE_PROPS_4 0x568020
+
+#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_0 0x568024
+
+#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_1 0x568028
+
+#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_2 0x56802C
+
+#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_3 0x568030
+
+#define mmDMA3_QM_GLBL_NON_SECURE_PROPS_4 0x568034
+
+#define mmDMA3_QM_GLBL_STS0 0x568038
+
+#define mmDMA3_QM_GLBL_STS1_0 0x568040
+
+#define mmDMA3_QM_GLBL_STS1_1 0x568044
+
+#define mmDMA3_QM_GLBL_STS1_2 0x568048
+
+#define mmDMA3_QM_GLBL_STS1_3 0x56804C
+
+#define mmDMA3_QM_GLBL_STS1_4 0x568050
+
+#define mmDMA3_QM_GLBL_MSG_EN_0 0x568054
+
+#define mmDMA3_QM_GLBL_MSG_EN_1 0x568058
+
+#define mmDMA3_QM_GLBL_MSG_EN_2 0x56805C
+
+#define mmDMA3_QM_GLBL_MSG_EN_3 0x568060
+
+#define mmDMA3_QM_GLBL_MSG_EN_4 0x568068
+
+#define mmDMA3_QM_PQ_BASE_LO_0 0x568070
+
+#define mmDMA3_QM_PQ_BASE_LO_1 0x568074
+
+#define mmDMA3_QM_PQ_BASE_LO_2 0x568078
+
+#define mmDMA3_QM_PQ_BASE_LO_3 0x56807C
+
+#define mmDMA3_QM_PQ_BASE_HI_0 0x568080
+
+#define mmDMA3_QM_PQ_BASE_HI_1 0x568084
+
+#define mmDMA3_QM_PQ_BASE_HI_2 0x568088
+
+#define mmDMA3_QM_PQ_BASE_HI_3 0x56808C
+
+#define mmDMA3_QM_PQ_SIZE_0 0x568090
+
+#define mmDMA3_QM_PQ_SIZE_1 0x568094
+
+#define mmDMA3_QM_PQ_SIZE_2 0x568098
+
+#define mmDMA3_QM_PQ_SIZE_3 0x56809C
+
+#define mmDMA3_QM_PQ_PI_0 0x5680A0
+
+#define mmDMA3_QM_PQ_PI_1 0x5680A4
+
+#define mmDMA3_QM_PQ_PI_2 0x5680A8
+
+#define mmDMA3_QM_PQ_PI_3 0x5680AC
+
+#define mmDMA3_QM_PQ_CI_0 0x5680B0
+
+#define mmDMA3_QM_PQ_CI_1 0x5680B4
+
+#define mmDMA3_QM_PQ_CI_2 0x5680B8
+
+#define mmDMA3_QM_PQ_CI_3 0x5680BC
+
+#define mmDMA3_QM_PQ_CFG0_0 0x5680C0
+
+#define mmDMA3_QM_PQ_CFG0_1 0x5680C4
+
+#define mmDMA3_QM_PQ_CFG0_2 0x5680C8
+
+#define mmDMA3_QM_PQ_CFG0_3 0x5680CC
+
+#define mmDMA3_QM_PQ_CFG1_0 0x5680D0
+
+#define mmDMA3_QM_PQ_CFG1_1 0x5680D4
+
+#define mmDMA3_QM_PQ_CFG1_2 0x5680D8
+
+#define mmDMA3_QM_PQ_CFG1_3 0x5680DC
+
+#define mmDMA3_QM_PQ_ARUSER_31_11_0 0x5680E0
+
+#define mmDMA3_QM_PQ_ARUSER_31_11_1 0x5680E4
+
+#define mmDMA3_QM_PQ_ARUSER_31_11_2 0x5680E8
+
+#define mmDMA3_QM_PQ_ARUSER_31_11_3 0x5680EC
+
+#define mmDMA3_QM_PQ_STS0_0 0x5680F0
+
+#define mmDMA3_QM_PQ_STS0_1 0x5680F4
+
+#define mmDMA3_QM_PQ_STS0_2 0x5680F8
+
+#define mmDMA3_QM_PQ_STS0_3 0x5680FC
+
+#define mmDMA3_QM_PQ_STS1_0 0x568100
+
+#define mmDMA3_QM_PQ_STS1_1 0x568104
+
+#define mmDMA3_QM_PQ_STS1_2 0x568108
+
+#define mmDMA3_QM_PQ_STS1_3 0x56810C
+
+#define mmDMA3_QM_CQ_CFG0_0 0x568110
+
+#define mmDMA3_QM_CQ_CFG0_1 0x568114
+
+#define mmDMA3_QM_CQ_CFG0_2 0x568118
+
+#define mmDMA3_QM_CQ_CFG0_3 0x56811C
+
+#define mmDMA3_QM_CQ_CFG0_4 0x568120
+
+#define mmDMA3_QM_CQ_CFG1_0 0x568124
+
+#define mmDMA3_QM_CQ_CFG1_1 0x568128
+
+#define mmDMA3_QM_CQ_CFG1_2 0x56812C
+
+#define mmDMA3_QM_CQ_CFG1_3 0x568130
+
+#define mmDMA3_QM_CQ_CFG1_4 0x568134
+
+#define mmDMA3_QM_CQ_ARUSER_31_11_0 0x568138
+
+#define mmDMA3_QM_CQ_ARUSER_31_11_1 0x56813C
+
+#define mmDMA3_QM_CQ_ARUSER_31_11_2 0x568140
+
+#define mmDMA3_QM_CQ_ARUSER_31_11_3 0x568144
+
+#define mmDMA3_QM_CQ_ARUSER_31_11_4 0x568148
+
+#define mmDMA3_QM_CQ_STS0_0 0x56814C
+
+#define mmDMA3_QM_CQ_STS0_1 0x568150
+
+#define mmDMA3_QM_CQ_STS0_2 0x568154
+
+#define mmDMA3_QM_CQ_STS0_3 0x568158
+
+#define mmDMA3_QM_CQ_STS0_4 0x56815C
+
+#define mmDMA3_QM_CQ_STS1_0 0x568160
+
+#define mmDMA3_QM_CQ_STS1_1 0x568164
+
+#define mmDMA3_QM_CQ_STS1_2 0x568168
+
+#define mmDMA3_QM_CQ_STS1_3 0x56816C
+
+#define mmDMA3_QM_CQ_STS1_4 0x568170
+
+#define mmDMA3_QM_CQ_PTR_LO_0 0x568174
+
+#define mmDMA3_QM_CQ_PTR_HI_0 0x568178
+
+#define mmDMA3_QM_CQ_TSIZE_0 0x56817C
+
+#define mmDMA3_QM_CQ_CTL_0 0x568180
+
+#define mmDMA3_QM_CQ_PTR_LO_1 0x568184
+
+#define mmDMA3_QM_CQ_PTR_HI_1 0x568188
+
+#define mmDMA3_QM_CQ_TSIZE_1 0x56818C
+
+#define mmDMA3_QM_CQ_CTL_1 0x568190
+
+#define mmDMA3_QM_CQ_PTR_LO_2 0x568194
+
+#define mmDMA3_QM_CQ_PTR_HI_2 0x568198
+
+#define mmDMA3_QM_CQ_TSIZE_2 0x56819C
+
+#define mmDMA3_QM_CQ_CTL_2 0x5681A0
+
+#define mmDMA3_QM_CQ_PTR_LO_3 0x5681A4
+
+#define mmDMA3_QM_CQ_PTR_HI_3 0x5681A8
+
+#define mmDMA3_QM_CQ_TSIZE_3 0x5681AC
+
+#define mmDMA3_QM_CQ_CTL_3 0x5681B0
+
+#define mmDMA3_QM_CQ_PTR_LO_4 0x5681B4
+
+#define mmDMA3_QM_CQ_PTR_HI_4 0x5681B8
+
+#define mmDMA3_QM_CQ_TSIZE_4 0x5681BC
+
+#define mmDMA3_QM_CQ_CTL_4 0x5681C0
+
+#define mmDMA3_QM_CQ_PTR_LO_STS_0 0x5681C4
+
+#define mmDMA3_QM_CQ_PTR_LO_STS_1 0x5681C8
+
+#define mmDMA3_QM_CQ_PTR_LO_STS_2 0x5681CC
+
+#define mmDMA3_QM_CQ_PTR_LO_STS_3 0x5681D0
+
+#define mmDMA3_QM_CQ_PTR_LO_STS_4 0x5681D4
+
+#define mmDMA3_QM_CQ_PTR_HI_STS_0 0x5681D8
+
+#define mmDMA3_QM_CQ_PTR_HI_STS_1 0x5681DC
+
+#define mmDMA3_QM_CQ_PTR_HI_STS_2 0x5681E0
+
+#define mmDMA3_QM_CQ_PTR_HI_STS_3 0x5681E4
+
+#define mmDMA3_QM_CQ_PTR_HI_STS_4 0x5681E8
+
+#define mmDMA3_QM_CQ_TSIZE_STS_0 0x5681EC
+
+#define mmDMA3_QM_CQ_TSIZE_STS_1 0x5681F0
+
+#define mmDMA3_QM_CQ_TSIZE_STS_2 0x5681F4
+
+#define mmDMA3_QM_CQ_TSIZE_STS_3 0x5681F8
+
+#define mmDMA3_QM_CQ_TSIZE_STS_4 0x5681FC
+
+#define mmDMA3_QM_CQ_CTL_STS_0 0x568200
+
+#define mmDMA3_QM_CQ_CTL_STS_1 0x568204
+
+#define mmDMA3_QM_CQ_CTL_STS_2 0x568208
+
+#define mmDMA3_QM_CQ_CTL_STS_3 0x56820C
+
+#define mmDMA3_QM_CQ_CTL_STS_4 0x568210
+
+#define mmDMA3_QM_CQ_IFIFO_CNT_0 0x568214
+
+#define mmDMA3_QM_CQ_IFIFO_CNT_1 0x568218
+
+#define mmDMA3_QM_CQ_IFIFO_CNT_2 0x56821C
+
+#define mmDMA3_QM_CQ_IFIFO_CNT_3 0x568220
+
+#define mmDMA3_QM_CQ_IFIFO_CNT_4 0x568224
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_0 0x568228
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_1 0x56822C
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_2 0x568230
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_3 0x568234
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_LO_4 0x568238
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_0 0x56823C
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_1 0x568240
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_2 0x568244
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_3 0x568248
+
+#define mmDMA3_QM_CP_MSG_BASE0_ADDR_HI_4 0x56824C
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_0 0x568250
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_1 0x568254
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_2 0x568258
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_3 0x56825C
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_LO_4 0x568260
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_0 0x568264
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_1 0x568268
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_2 0x56826C
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_3 0x568270
+
+#define mmDMA3_QM_CP_MSG_BASE1_ADDR_HI_4 0x568274
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_0 0x568278
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_1 0x56827C
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_2 0x568280
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_3 0x568284
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_LO_4 0x568288
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_0 0x56828C
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_1 0x568290
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_2 0x568294
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_3 0x568298
+
+#define mmDMA3_QM_CP_MSG_BASE2_ADDR_HI_4 0x56829C
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_0 0x5682A0
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_1 0x5682A4
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_2 0x5682A8
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_3 0x5682AC
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_LO_4 0x5682B0
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_0 0x5682B4
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_1 0x5682B8
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_2 0x5682BC
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_3 0x5682C0
+
+#define mmDMA3_QM_CP_MSG_BASE3_ADDR_HI_4 0x5682C4
+
+#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_0 0x5682C8
+
+#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_1 0x5682CC
+
+#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_2 0x5682D0
+
+#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_3 0x5682D4
+
+#define mmDMA3_QM_CP_LDMA_TSIZE_OFFSET_4 0x5682D8
+
+#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5682E0
+
+#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5682E4
+
+#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5682E8
+
+#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5682EC
+
+#define mmDMA3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5682F0
+
+#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5682F4
+
+#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5682F8
+
+#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5682FC
+
+#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x568300
+
+#define mmDMA3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x568304
+
+#define mmDMA3_QM_CP_FENCE0_RDATA_0 0x568308
+
+#define mmDMA3_QM_CP_FENCE0_RDATA_1 0x56830C
+
+#define mmDMA3_QM_CP_FENCE0_RDATA_2 0x568310
+
+#define mmDMA3_QM_CP_FENCE0_RDATA_3 0x568314
+
+#define mmDMA3_QM_CP_FENCE0_RDATA_4 0x568318
+
+#define mmDMA3_QM_CP_FENCE1_RDATA_0 0x56831C
+
+#define mmDMA3_QM_CP_FENCE1_RDATA_1 0x568320
+
+#define mmDMA3_QM_CP_FENCE1_RDATA_2 0x568324
+
+#define mmDMA3_QM_CP_FENCE1_RDATA_3 0x568328
+
+#define mmDMA3_QM_CP_FENCE1_RDATA_4 0x56832C
+
+#define mmDMA3_QM_CP_FENCE2_RDATA_0 0x568330
+
+#define mmDMA3_QM_CP_FENCE2_RDATA_1 0x568334
+
+#define mmDMA3_QM_CP_FENCE2_RDATA_2 0x568338
+
+#define mmDMA3_QM_CP_FENCE2_RDATA_3 0x56833C
+
+#define mmDMA3_QM_CP_FENCE2_RDATA_4 0x568340
+
+#define mmDMA3_QM_CP_FENCE3_RDATA_0 0x568344
+
+#define mmDMA3_QM_CP_FENCE3_RDATA_1 0x568348
+
+#define mmDMA3_QM_CP_FENCE3_RDATA_2 0x56834C
+
+#define mmDMA3_QM_CP_FENCE3_RDATA_3 0x568350
+
+#define mmDMA3_QM_CP_FENCE3_RDATA_4 0x568354
+
+#define mmDMA3_QM_CP_FENCE0_CNT_0 0x568358
+
+#define mmDMA3_QM_CP_FENCE0_CNT_1 0x56835C
+
+#define mmDMA3_QM_CP_FENCE0_CNT_2 0x568360
+
+#define mmDMA3_QM_CP_FENCE0_CNT_3 0x568364
+
+#define mmDMA3_QM_CP_FENCE0_CNT_4 0x568368
+
+#define mmDMA3_QM_CP_FENCE1_CNT_0 0x56836C
+
+#define mmDMA3_QM_CP_FENCE1_CNT_1 0x568370
+
+#define mmDMA3_QM_CP_FENCE1_CNT_2 0x568374
+
+#define mmDMA3_QM_CP_FENCE1_CNT_3 0x568378
+
+#define mmDMA3_QM_CP_FENCE1_CNT_4 0x56837C
+
+#define mmDMA3_QM_CP_FENCE2_CNT_0 0x568380
+
+#define mmDMA3_QM_CP_FENCE2_CNT_1 0x568384
+
+#define mmDMA3_QM_CP_FENCE2_CNT_2 0x568388
+
+#define mmDMA3_QM_CP_FENCE2_CNT_3 0x56838C
+
+#define mmDMA3_QM_CP_FENCE2_CNT_4 0x568390
+
+#define mmDMA3_QM_CP_FENCE3_CNT_0 0x568394
+
+#define mmDMA3_QM_CP_FENCE3_CNT_1 0x568398
+
+#define mmDMA3_QM_CP_FENCE3_CNT_2 0x56839C
+
+#define mmDMA3_QM_CP_FENCE3_CNT_3 0x5683A0
+
+#define mmDMA3_QM_CP_FENCE3_CNT_4 0x5683A4
+
+#define mmDMA3_QM_CP_STS_0 0x5683A8
+
+#define mmDMA3_QM_CP_STS_1 0x5683AC
+
+#define mmDMA3_QM_CP_STS_2 0x5683B0
+
+#define mmDMA3_QM_CP_STS_3 0x5683B4
+
+#define mmDMA3_QM_CP_STS_4 0x5683B8
+
+#define mmDMA3_QM_CP_CURRENT_INST_LO_0 0x5683BC
+
+#define mmDMA3_QM_CP_CURRENT_INST_LO_1 0x5683C0
+
+#define mmDMA3_QM_CP_CURRENT_INST_LO_2 0x5683C4
+
+#define mmDMA3_QM_CP_CURRENT_INST_LO_3 0x5683C8
+
+#define mmDMA3_QM_CP_CURRENT_INST_LO_4 0x5683CC
+
+#define mmDMA3_QM_CP_CURRENT_INST_HI_0 0x5683D0
+
+#define mmDMA3_QM_CP_CURRENT_INST_HI_1 0x5683D4
+
+#define mmDMA3_QM_CP_CURRENT_INST_HI_2 0x5683D8
+
+#define mmDMA3_QM_CP_CURRENT_INST_HI_3 0x5683DC
+
+#define mmDMA3_QM_CP_CURRENT_INST_HI_4 0x5683E0
+
+#define mmDMA3_QM_CP_BARRIER_CFG_0 0x5683F4
+
+#define mmDMA3_QM_CP_BARRIER_CFG_1 0x5683F8
+
+#define mmDMA3_QM_CP_BARRIER_CFG_2 0x5683FC
+
+#define mmDMA3_QM_CP_BARRIER_CFG_3 0x568400
+
+#define mmDMA3_QM_CP_BARRIER_CFG_4 0x568404
+
+#define mmDMA3_QM_CP_DBG_0_0 0x568408
+
+#define mmDMA3_QM_CP_DBG_0_1 0x56840C
+
+#define mmDMA3_QM_CP_DBG_0_2 0x568410
+
+#define mmDMA3_QM_CP_DBG_0_3 0x568414
+
+#define mmDMA3_QM_CP_DBG_0_4 0x568418
+
+#define mmDMA3_QM_CP_ARUSER_31_11_0 0x56841C
+
+#define mmDMA3_QM_CP_ARUSER_31_11_1 0x568420
+
+#define mmDMA3_QM_CP_ARUSER_31_11_2 0x568424
+
+#define mmDMA3_QM_CP_ARUSER_31_11_3 0x568428
+
+#define mmDMA3_QM_CP_ARUSER_31_11_4 0x56842C
+
+#define mmDMA3_QM_CP_AWUSER_31_11_0 0x568430
+
+#define mmDMA3_QM_CP_AWUSER_31_11_1 0x568434
+
+#define mmDMA3_QM_CP_AWUSER_31_11_2 0x568438
+
+#define mmDMA3_QM_CP_AWUSER_31_11_3 0x56843C
+
+#define mmDMA3_QM_CP_AWUSER_31_11_4 0x568440
+
+#define mmDMA3_QM_ARB_CFG_0 0x568A00
+
+#define mmDMA3_QM_ARB_CHOISE_Q_PUSH 0x568A04
+
+#define mmDMA3_QM_ARB_WRR_WEIGHT_0 0x568A08
+
+#define mmDMA3_QM_ARB_WRR_WEIGHT_1 0x568A0C
+
+#define mmDMA3_QM_ARB_WRR_WEIGHT_2 0x568A10
+
+#define mmDMA3_QM_ARB_WRR_WEIGHT_3 0x568A14
+
+#define mmDMA3_QM_ARB_CFG_1 0x568A18
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_0 0x568A20
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_1 0x568A24
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_2 0x568A28
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_3 0x568A2C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_4 0x568A30
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_5 0x568A34
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_6 0x568A38
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_7 0x568A3C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_8 0x568A40
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_9 0x568A44
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_10 0x568A48
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_11 0x568A4C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_12 0x568A50
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_13 0x568A54
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_14 0x568A58
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_15 0x568A5C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_16 0x568A60
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_17 0x568A64
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_18 0x568A68
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_19 0x568A6C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_20 0x568A70
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_21 0x568A74
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_22 0x568A78
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_23 0x568A7C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_24 0x568A80
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_25 0x568A84
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_26 0x568A88
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_27 0x568A8C
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_28 0x568A90
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_29 0x568A94
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_30 0x568A98
+
+#define mmDMA3_QM_ARB_MST_AVAIL_CRED_31 0x568A9C
+
+#define mmDMA3_QM_ARB_MST_CRED_INC 0x568AA0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x568AA4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x568AA8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x568AAC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x568AB0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x568AB4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x568AB8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x568ABC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x568AC0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x568AC4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x568AC8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x568ACC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x568AD0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x568AD4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x568AD8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x568ADC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x568AE0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x568AE4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x568AE8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x568AEC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x568AF0
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x568AF4
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x568AF8
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x568AFC
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x568B00
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x568B04
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x568B08
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x568B0C
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x568B10
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x568B14
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x568B18
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x568B1C
+
+#define mmDMA3_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x568B20
+
+#define mmDMA3_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x568B28
+
+#define mmDMA3_QM_ARB_MST_SLAVE_EN 0x568B2C
+
+#define mmDMA3_QM_ARB_MST_QUIET_PER 0x568B34
+
+#define mmDMA3_QM_ARB_SLV_CHOISE_WDT 0x568B38
+
+#define mmDMA3_QM_ARB_SLV_ID 0x568B3C
+
+#define mmDMA3_QM_ARB_MSG_MAX_INFLIGHT 0x568B44
+
+#define mmDMA3_QM_ARB_MSG_AWUSER_31_11 0x568B48
+
+#define mmDMA3_QM_ARB_MSG_AWUSER_SEC_PROP 0x568B4C
+
+#define mmDMA3_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x568B50
+
+#define mmDMA3_QM_ARB_BASE_LO 0x568B54
+
+#define mmDMA3_QM_ARB_BASE_HI 0x568B58
+
+#define mmDMA3_QM_ARB_STATE_STS 0x568B80
+
+#define mmDMA3_QM_ARB_CHOISE_FULLNESS_STS 0x568B84
+
+#define mmDMA3_QM_ARB_MSG_STS 0x568B88
+
+#define mmDMA3_QM_ARB_SLV_CHOISE_Q_HEAD 0x568B8C
+
+#define mmDMA3_QM_ARB_ERR_CAUSE 0x568B9C
+
+#define mmDMA3_QM_ARB_ERR_MSG_EN 0x568BA0
+
+#define mmDMA3_QM_ARB_ERR_STS_DRP 0x568BA8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_0 0x568BB0
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_1 0x568BB4
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_2 0x568BB8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_3 0x568BBC
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_4 0x568BC0
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_5 0x568BC4
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_6 0x568BC8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_7 0x568BCC
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_8 0x568BD0
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_9 0x568BD4
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_10 0x568BD8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_11 0x568BDC
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_12 0x568BE0
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_13 0x568BE4
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_14 0x568BE8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_15 0x568BEC
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_16 0x568BF0
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_17 0x568BF4
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_18 0x568BF8
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_19 0x568BFC
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_20 0x568C00
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_21 0x568C04
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_22 0x568C08
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_23 0x568C0C
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_24 0x568C10
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_25 0x568C14
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_26 0x568C18
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_27 0x568C1C
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_28 0x568C20
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_29 0x568C24
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_30 0x568C28
+
+#define mmDMA3_QM_ARB_MST_CRED_STS_31 0x568C2C
+
+#define mmDMA3_QM_CGM_CFG 0x568C70
+
+#define mmDMA3_QM_CGM_STS 0x568C74
+
+#define mmDMA3_QM_CGM_CFG1 0x568C78
+
+#define mmDMA3_QM_LOCAL_RANGE_BASE 0x568C80
+
+#define mmDMA3_QM_LOCAL_RANGE_SIZE 0x568C84
+
+#define mmDMA3_QM_CSMR_STRICT_PRIO_CFG 0x568C90
+
+#define mmDMA3_QM_HBW_RD_RATE_LIM_CFG_1 0x568C94
+
+#define mmDMA3_QM_LBW_WR_RATE_LIM_CFG_0 0x568C98
+
+#define mmDMA3_QM_LBW_WR_RATE_LIM_CFG_1 0x568C9C
+
+#define mmDMA3_QM_HBW_RD_RATE_LIM_CFG_0 0x568CA0
+
+#define mmDMA3_QM_GLBL_AXCACHE 0x568CA4
+
+#define mmDMA3_QM_IND_GW_APB_CFG 0x568CB0
+
+#define mmDMA3_QM_IND_GW_APB_WDATA 0x568CB4
+
+#define mmDMA3_QM_IND_GW_APB_RDATA 0x568CB8
+
+#define mmDMA3_QM_IND_GW_APB_STATUS 0x568CBC
+
+#define mmDMA3_QM_GLBL_ERR_ADDR_LO 0x568CD0
+
+#define mmDMA3_QM_GLBL_ERR_ADDR_HI 0x568CD4
+
+#define mmDMA3_QM_GLBL_ERR_WDATA 0x568CD8
+
+#define mmDMA3_QM_GLBL_MEM_INIT_BUSY 0x568D00
+
+#endif /* ASIC_REG_DMA3_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h
new file mode 100644
index 000000000000..192d11404b1c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA4_CORE_REGS_H_
+#define ASIC_REG_DMA4_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA4_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA4_CORE_CFG_0 0x580000
+
+#define mmDMA4_CORE_CFG_1 0x580004
+
+#define mmDMA4_CORE_LBW_MAX_OUTSTAND 0x580008
+
+#define mmDMA4_CORE_SRC_BASE_LO 0x580014
+
+#define mmDMA4_CORE_SRC_BASE_HI 0x580018
+
+#define mmDMA4_CORE_DST_BASE_LO 0x58001C
+
+#define mmDMA4_CORE_DST_BASE_HI 0x580020
+
+#define mmDMA4_CORE_SRC_TSIZE_1 0x58002C
+
+#define mmDMA4_CORE_SRC_STRIDE_1 0x580030
+
+#define mmDMA4_CORE_SRC_TSIZE_2 0x580034
+
+#define mmDMA4_CORE_SRC_STRIDE_2 0x580038
+
+#define mmDMA4_CORE_SRC_TSIZE_3 0x58003C
+
+#define mmDMA4_CORE_SRC_STRIDE_3 0x580040
+
+#define mmDMA4_CORE_SRC_TSIZE_4 0x580044
+
+#define mmDMA4_CORE_SRC_STRIDE_4 0x580048
+
+#define mmDMA4_CORE_SRC_TSIZE_0 0x58004C
+
+#define mmDMA4_CORE_DST_TSIZE_1 0x580054
+
+#define mmDMA4_CORE_DST_STRIDE_1 0x580058
+
+#define mmDMA4_CORE_DST_TSIZE_2 0x58005C
+
+#define mmDMA4_CORE_DST_STRIDE_2 0x580060
+
+#define mmDMA4_CORE_DST_TSIZE_3 0x580064
+
+#define mmDMA4_CORE_DST_STRIDE_3 0x580068
+
+#define mmDMA4_CORE_DST_TSIZE_4 0x58006C
+
+#define mmDMA4_CORE_DST_STRIDE_4 0x580070
+
+#define mmDMA4_CORE_DST_TSIZE_0 0x580074
+
+#define mmDMA4_CORE_COMMIT 0x580078
+
+#define mmDMA4_CORE_WR_COMP_WDATA 0x58007C
+
+#define mmDMA4_CORE_WR_COMP_ADDR_LO 0x580080
+
+#define mmDMA4_CORE_WR_COMP_ADDR_HI 0x580084
+
+#define mmDMA4_CORE_WR_COMP_AWUSER_31_11 0x580088
+
+#define mmDMA4_CORE_TE_NUMROWS 0x580094
+
+#define mmDMA4_CORE_PROT 0x5800B8
+
+#define mmDMA4_CORE_SECURE_PROPS 0x5800F0
+
+#define mmDMA4_CORE_NON_SECURE_PROPS 0x5800F4
+
+#define mmDMA4_CORE_RD_MAX_OUTSTAND 0x580100
+
+#define mmDMA4_CORE_RD_MAX_SIZE 0x580104
+
+#define mmDMA4_CORE_RD_ARCACHE 0x580108
+
+#define mmDMA4_CORE_RD_ARUSER_31_11 0x580110
+
+#define mmDMA4_CORE_RD_INFLIGHTS 0x580114
+
+#define mmDMA4_CORE_WR_MAX_OUTSTAND 0x580120
+
+#define mmDMA4_CORE_WR_MAX_AWID 0x580124
+
+#define mmDMA4_CORE_WR_AWCACHE 0x580128
+
+#define mmDMA4_CORE_WR_AWUSER_31_11 0x580130
+
+#define mmDMA4_CORE_WR_INFLIGHTS 0x580134
+
+#define mmDMA4_CORE_RD_RATE_LIM_CFG_0 0x580150
+
+#define mmDMA4_CORE_RD_RATE_LIM_CFG_1 0x580154
+
+#define mmDMA4_CORE_WR_RATE_LIM_CFG_0 0x580158
+
+#define mmDMA4_CORE_WR_RATE_LIM_CFG_1 0x58015C
+
+#define mmDMA4_CORE_ERR_CFG 0x580160
+
+#define mmDMA4_CORE_ERR_CAUSE 0x580164
+
+#define mmDMA4_CORE_ERRMSG_ADDR_LO 0x580170
+
+#define mmDMA4_CORE_ERRMSG_ADDR_HI 0x580174
+
+#define mmDMA4_CORE_ERRMSG_WDATA 0x580178
+
+#define mmDMA4_CORE_STS0 0x580190
+
+#define mmDMA4_CORE_STS1 0x580194
+
+#define mmDMA4_CORE_RD_DBGMEM_ADD 0x580200
+
+#define mmDMA4_CORE_RD_DBGMEM_DATA_WR 0x580204
+
+#define mmDMA4_CORE_RD_DBGMEM_DATA_RD 0x580208
+
+#define mmDMA4_CORE_RD_DBGMEM_CTRL 0x58020C
+
+#define mmDMA4_CORE_RD_DBGMEM_RC 0x580210
+
+#define mmDMA4_CORE_DBG_HBW_AXI_AR_CNT 0x580220
+
+#define mmDMA4_CORE_DBG_HBW_AXI_AW_CNT 0x580224
+
+#define mmDMA4_CORE_DBG_LBW_AXI_AW_CNT 0x580228
+
+#define mmDMA4_CORE_DBG_DESC_CNT 0x58022C
+
+#define mmDMA4_CORE_DBG_STS 0x580230
+
+#define mmDMA4_CORE_DBG_RD_DESC_ID 0x580234
+
+#define mmDMA4_CORE_DBG_WR_DESC_ID 0x580238
+
+#endif /* ASIC_REG_DMA4_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h
new file mode 100644
index 000000000000..f0cbda0d1e4d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA4_QM_REGS_H_
+#define ASIC_REG_DMA4_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA4_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA4_QM_GLBL_CFG0 0x588000
+
+#define mmDMA4_QM_GLBL_CFG1 0x588004
+
+#define mmDMA4_QM_GLBL_PROT 0x588008
+
+#define mmDMA4_QM_GLBL_ERR_CFG 0x58800C
+
+#define mmDMA4_QM_GLBL_SECURE_PROPS_0 0x588010
+
+#define mmDMA4_QM_GLBL_SECURE_PROPS_1 0x588014
+
+#define mmDMA4_QM_GLBL_SECURE_PROPS_2 0x588018
+
+#define mmDMA4_QM_GLBL_SECURE_PROPS_3 0x58801C
+
+#define mmDMA4_QM_GLBL_SECURE_PROPS_4 0x588020
+
+#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_0 0x588024
+
+#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_1 0x588028
+
+#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_2 0x58802C
+
+#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_3 0x588030
+
+#define mmDMA4_QM_GLBL_NON_SECURE_PROPS_4 0x588034
+
+#define mmDMA4_QM_GLBL_STS0 0x588038
+
+#define mmDMA4_QM_GLBL_STS1_0 0x588040
+
+#define mmDMA4_QM_GLBL_STS1_1 0x588044
+
+#define mmDMA4_QM_GLBL_STS1_2 0x588048
+
+#define mmDMA4_QM_GLBL_STS1_3 0x58804C
+
+#define mmDMA4_QM_GLBL_STS1_4 0x588050
+
+#define mmDMA4_QM_GLBL_MSG_EN_0 0x588054
+
+#define mmDMA4_QM_GLBL_MSG_EN_1 0x588058
+
+#define mmDMA4_QM_GLBL_MSG_EN_2 0x58805C
+
+#define mmDMA4_QM_GLBL_MSG_EN_3 0x588060
+
+#define mmDMA4_QM_GLBL_MSG_EN_4 0x588068
+
+#define mmDMA4_QM_PQ_BASE_LO_0 0x588070
+
+#define mmDMA4_QM_PQ_BASE_LO_1 0x588074
+
+#define mmDMA4_QM_PQ_BASE_LO_2 0x588078
+
+#define mmDMA4_QM_PQ_BASE_LO_3 0x58807C
+
+#define mmDMA4_QM_PQ_BASE_HI_0 0x588080
+
+#define mmDMA4_QM_PQ_BASE_HI_1 0x588084
+
+#define mmDMA4_QM_PQ_BASE_HI_2 0x588088
+
+#define mmDMA4_QM_PQ_BASE_HI_3 0x58808C
+
+#define mmDMA4_QM_PQ_SIZE_0 0x588090
+
+#define mmDMA4_QM_PQ_SIZE_1 0x588094
+
+#define mmDMA4_QM_PQ_SIZE_2 0x588098
+
+#define mmDMA4_QM_PQ_SIZE_3 0x58809C
+
+#define mmDMA4_QM_PQ_PI_0 0x5880A0
+
+#define mmDMA4_QM_PQ_PI_1 0x5880A4
+
+#define mmDMA4_QM_PQ_PI_2 0x5880A8
+
+#define mmDMA4_QM_PQ_PI_3 0x5880AC
+
+#define mmDMA4_QM_PQ_CI_0 0x5880B0
+
+#define mmDMA4_QM_PQ_CI_1 0x5880B4
+
+#define mmDMA4_QM_PQ_CI_2 0x5880B8
+
+#define mmDMA4_QM_PQ_CI_3 0x5880BC
+
+#define mmDMA4_QM_PQ_CFG0_0 0x5880C0
+
+#define mmDMA4_QM_PQ_CFG0_1 0x5880C4
+
+#define mmDMA4_QM_PQ_CFG0_2 0x5880C8
+
+#define mmDMA4_QM_PQ_CFG0_3 0x5880CC
+
+#define mmDMA4_QM_PQ_CFG1_0 0x5880D0
+
+#define mmDMA4_QM_PQ_CFG1_1 0x5880D4
+
+#define mmDMA4_QM_PQ_CFG1_2 0x5880D8
+
+#define mmDMA4_QM_PQ_CFG1_3 0x5880DC
+
+#define mmDMA4_QM_PQ_ARUSER_31_11_0 0x5880E0
+
+#define mmDMA4_QM_PQ_ARUSER_31_11_1 0x5880E4
+
+#define mmDMA4_QM_PQ_ARUSER_31_11_2 0x5880E8
+
+#define mmDMA4_QM_PQ_ARUSER_31_11_3 0x5880EC
+
+#define mmDMA4_QM_PQ_STS0_0 0x5880F0
+
+#define mmDMA4_QM_PQ_STS0_1 0x5880F4
+
+#define mmDMA4_QM_PQ_STS0_2 0x5880F8
+
+#define mmDMA4_QM_PQ_STS0_3 0x5880FC
+
+#define mmDMA4_QM_PQ_STS1_0 0x588100
+
+#define mmDMA4_QM_PQ_STS1_1 0x588104
+
+#define mmDMA4_QM_PQ_STS1_2 0x588108
+
+#define mmDMA4_QM_PQ_STS1_3 0x58810C
+
+#define mmDMA4_QM_CQ_CFG0_0 0x588110
+
+#define mmDMA4_QM_CQ_CFG0_1 0x588114
+
+#define mmDMA4_QM_CQ_CFG0_2 0x588118
+
+#define mmDMA4_QM_CQ_CFG0_3 0x58811C
+
+#define mmDMA4_QM_CQ_CFG0_4 0x588120
+
+#define mmDMA4_QM_CQ_CFG1_0 0x588124
+
+#define mmDMA4_QM_CQ_CFG1_1 0x588128
+
+#define mmDMA4_QM_CQ_CFG1_2 0x58812C
+
+#define mmDMA4_QM_CQ_CFG1_3 0x588130
+
+#define mmDMA4_QM_CQ_CFG1_4 0x588134
+
+#define mmDMA4_QM_CQ_ARUSER_31_11_0 0x588138
+
+#define mmDMA4_QM_CQ_ARUSER_31_11_1 0x58813C
+
+#define mmDMA4_QM_CQ_ARUSER_31_11_2 0x588140
+
+#define mmDMA4_QM_CQ_ARUSER_31_11_3 0x588144
+
+#define mmDMA4_QM_CQ_ARUSER_31_11_4 0x588148
+
+#define mmDMA4_QM_CQ_STS0_0 0x58814C
+
+#define mmDMA4_QM_CQ_STS0_1 0x588150
+
+#define mmDMA4_QM_CQ_STS0_2 0x588154
+
+#define mmDMA4_QM_CQ_STS0_3 0x588158
+
+#define mmDMA4_QM_CQ_STS0_4 0x58815C
+
+#define mmDMA4_QM_CQ_STS1_0 0x588160
+
+#define mmDMA4_QM_CQ_STS1_1 0x588164
+
+#define mmDMA4_QM_CQ_STS1_2 0x588168
+
+#define mmDMA4_QM_CQ_STS1_3 0x58816C
+
+#define mmDMA4_QM_CQ_STS1_4 0x588170
+
+#define mmDMA4_QM_CQ_PTR_LO_0 0x588174
+
+#define mmDMA4_QM_CQ_PTR_HI_0 0x588178
+
+#define mmDMA4_QM_CQ_TSIZE_0 0x58817C
+
+#define mmDMA4_QM_CQ_CTL_0 0x588180
+
+#define mmDMA4_QM_CQ_PTR_LO_1 0x588184
+
+#define mmDMA4_QM_CQ_PTR_HI_1 0x588188
+
+#define mmDMA4_QM_CQ_TSIZE_1 0x58818C
+
+#define mmDMA4_QM_CQ_CTL_1 0x588190
+
+#define mmDMA4_QM_CQ_PTR_LO_2 0x588194
+
+#define mmDMA4_QM_CQ_PTR_HI_2 0x588198
+
+#define mmDMA4_QM_CQ_TSIZE_2 0x58819C
+
+#define mmDMA4_QM_CQ_CTL_2 0x5881A0
+
+#define mmDMA4_QM_CQ_PTR_LO_3 0x5881A4
+
+#define mmDMA4_QM_CQ_PTR_HI_3 0x5881A8
+
+#define mmDMA4_QM_CQ_TSIZE_3 0x5881AC
+
+#define mmDMA4_QM_CQ_CTL_3 0x5881B0
+
+#define mmDMA4_QM_CQ_PTR_LO_4 0x5881B4
+
+#define mmDMA4_QM_CQ_PTR_HI_4 0x5881B8
+
+#define mmDMA4_QM_CQ_TSIZE_4 0x5881BC
+
+#define mmDMA4_QM_CQ_CTL_4 0x5881C0
+
+#define mmDMA4_QM_CQ_PTR_LO_STS_0 0x5881C4
+
+#define mmDMA4_QM_CQ_PTR_LO_STS_1 0x5881C8
+
+#define mmDMA4_QM_CQ_PTR_LO_STS_2 0x5881CC
+
+#define mmDMA4_QM_CQ_PTR_LO_STS_3 0x5881D0
+
+#define mmDMA4_QM_CQ_PTR_LO_STS_4 0x5881D4
+
+#define mmDMA4_QM_CQ_PTR_HI_STS_0 0x5881D8
+
+#define mmDMA4_QM_CQ_PTR_HI_STS_1 0x5881DC
+
+#define mmDMA4_QM_CQ_PTR_HI_STS_2 0x5881E0
+
+#define mmDMA4_QM_CQ_PTR_HI_STS_3 0x5881E4
+
+#define mmDMA4_QM_CQ_PTR_HI_STS_4 0x5881E8
+
+#define mmDMA4_QM_CQ_TSIZE_STS_0 0x5881EC
+
+#define mmDMA4_QM_CQ_TSIZE_STS_1 0x5881F0
+
+#define mmDMA4_QM_CQ_TSIZE_STS_2 0x5881F4
+
+#define mmDMA4_QM_CQ_TSIZE_STS_3 0x5881F8
+
+#define mmDMA4_QM_CQ_TSIZE_STS_4 0x5881FC
+
+#define mmDMA4_QM_CQ_CTL_STS_0 0x588200
+
+#define mmDMA4_QM_CQ_CTL_STS_1 0x588204
+
+#define mmDMA4_QM_CQ_CTL_STS_2 0x588208
+
+#define mmDMA4_QM_CQ_CTL_STS_3 0x58820C
+
+#define mmDMA4_QM_CQ_CTL_STS_4 0x588210
+
+#define mmDMA4_QM_CQ_IFIFO_CNT_0 0x588214
+
+#define mmDMA4_QM_CQ_IFIFO_CNT_1 0x588218
+
+#define mmDMA4_QM_CQ_IFIFO_CNT_2 0x58821C
+
+#define mmDMA4_QM_CQ_IFIFO_CNT_3 0x588220
+
+#define mmDMA4_QM_CQ_IFIFO_CNT_4 0x588224
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_0 0x588228
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_1 0x58822C
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_2 0x588230
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_3 0x588234
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_LO_4 0x588238
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_0 0x58823C
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_1 0x588240
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_2 0x588244
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_3 0x588248
+
+#define mmDMA4_QM_CP_MSG_BASE0_ADDR_HI_4 0x58824C
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_0 0x588250
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_1 0x588254
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_2 0x588258
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_3 0x58825C
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_LO_4 0x588260
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_0 0x588264
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_1 0x588268
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_2 0x58826C
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_3 0x588270
+
+#define mmDMA4_QM_CP_MSG_BASE1_ADDR_HI_4 0x588274
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_0 0x588278
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_1 0x58827C
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_2 0x588280
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_3 0x588284
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_LO_4 0x588288
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_0 0x58828C
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_1 0x588290
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_2 0x588294
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_3 0x588298
+
+#define mmDMA4_QM_CP_MSG_BASE2_ADDR_HI_4 0x58829C
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_0 0x5882A0
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_1 0x5882A4
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_2 0x5882A8
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_3 0x5882AC
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_LO_4 0x5882B0
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_0 0x5882B4
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_1 0x5882B8
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_2 0x5882BC
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_3 0x5882C0
+
+#define mmDMA4_QM_CP_MSG_BASE3_ADDR_HI_4 0x5882C4
+
+#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_0 0x5882C8
+
+#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_1 0x5882CC
+
+#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_2 0x5882D0
+
+#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_3 0x5882D4
+
+#define mmDMA4_QM_CP_LDMA_TSIZE_OFFSET_4 0x5882D8
+
+#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5882E0
+
+#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5882E4
+
+#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5882E8
+
+#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5882EC
+
+#define mmDMA4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5882F0
+
+#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5882F4
+
+#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5882F8
+
+#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5882FC
+
+#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x588300
+
+#define mmDMA4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x588304
+
+#define mmDMA4_QM_CP_FENCE0_RDATA_0 0x588308
+
+#define mmDMA4_QM_CP_FENCE0_RDATA_1 0x58830C
+
+#define mmDMA4_QM_CP_FENCE0_RDATA_2 0x588310
+
+#define mmDMA4_QM_CP_FENCE0_RDATA_3 0x588314
+
+#define mmDMA4_QM_CP_FENCE0_RDATA_4 0x588318
+
+#define mmDMA4_QM_CP_FENCE1_RDATA_0 0x58831C
+
+#define mmDMA4_QM_CP_FENCE1_RDATA_1 0x588320
+
+#define mmDMA4_QM_CP_FENCE1_RDATA_2 0x588324
+
+#define mmDMA4_QM_CP_FENCE1_RDATA_3 0x588328
+
+#define mmDMA4_QM_CP_FENCE1_RDATA_4 0x58832C
+
+#define mmDMA4_QM_CP_FENCE2_RDATA_0 0x588330
+
+#define mmDMA4_QM_CP_FENCE2_RDATA_1 0x588334
+
+#define mmDMA4_QM_CP_FENCE2_RDATA_2 0x588338
+
+#define mmDMA4_QM_CP_FENCE2_RDATA_3 0x58833C
+
+#define mmDMA4_QM_CP_FENCE2_RDATA_4 0x588340
+
+#define mmDMA4_QM_CP_FENCE3_RDATA_0 0x588344
+
+#define mmDMA4_QM_CP_FENCE3_RDATA_1 0x588348
+
+#define mmDMA4_QM_CP_FENCE3_RDATA_2 0x58834C
+
+#define mmDMA4_QM_CP_FENCE3_RDATA_3 0x588350
+
+#define mmDMA4_QM_CP_FENCE3_RDATA_4 0x588354
+
+#define mmDMA4_QM_CP_FENCE0_CNT_0 0x588358
+
+#define mmDMA4_QM_CP_FENCE0_CNT_1 0x58835C
+
+#define mmDMA4_QM_CP_FENCE0_CNT_2 0x588360
+
+#define mmDMA4_QM_CP_FENCE0_CNT_3 0x588364
+
+#define mmDMA4_QM_CP_FENCE0_CNT_4 0x588368
+
+#define mmDMA4_QM_CP_FENCE1_CNT_0 0x58836C
+
+#define mmDMA4_QM_CP_FENCE1_CNT_1 0x588370
+
+#define mmDMA4_QM_CP_FENCE1_CNT_2 0x588374
+
+#define mmDMA4_QM_CP_FENCE1_CNT_3 0x588378
+
+#define mmDMA4_QM_CP_FENCE1_CNT_4 0x58837C
+
+#define mmDMA4_QM_CP_FENCE2_CNT_0 0x588380
+
+#define mmDMA4_QM_CP_FENCE2_CNT_1 0x588384
+
+#define mmDMA4_QM_CP_FENCE2_CNT_2 0x588388
+
+#define mmDMA4_QM_CP_FENCE2_CNT_3 0x58838C
+
+#define mmDMA4_QM_CP_FENCE2_CNT_4 0x588390
+
+#define mmDMA4_QM_CP_FENCE3_CNT_0 0x588394
+
+#define mmDMA4_QM_CP_FENCE3_CNT_1 0x588398
+
+#define mmDMA4_QM_CP_FENCE3_CNT_2 0x58839C
+
+#define mmDMA4_QM_CP_FENCE3_CNT_3 0x5883A0
+
+#define mmDMA4_QM_CP_FENCE3_CNT_4 0x5883A4
+
+#define mmDMA4_QM_CP_STS_0 0x5883A8
+
+#define mmDMA4_QM_CP_STS_1 0x5883AC
+
+#define mmDMA4_QM_CP_STS_2 0x5883B0
+
+#define mmDMA4_QM_CP_STS_3 0x5883B4
+
+#define mmDMA4_QM_CP_STS_4 0x5883B8
+
+#define mmDMA4_QM_CP_CURRENT_INST_LO_0 0x5883BC
+
+#define mmDMA4_QM_CP_CURRENT_INST_LO_1 0x5883C0
+
+#define mmDMA4_QM_CP_CURRENT_INST_LO_2 0x5883C4
+
+#define mmDMA4_QM_CP_CURRENT_INST_LO_3 0x5883C8
+
+#define mmDMA4_QM_CP_CURRENT_INST_LO_4 0x5883CC
+
+#define mmDMA4_QM_CP_CURRENT_INST_HI_0 0x5883D0
+
+#define mmDMA4_QM_CP_CURRENT_INST_HI_1 0x5883D4
+
+#define mmDMA4_QM_CP_CURRENT_INST_HI_2 0x5883D8
+
+#define mmDMA4_QM_CP_CURRENT_INST_HI_3 0x5883DC
+
+#define mmDMA4_QM_CP_CURRENT_INST_HI_4 0x5883E0
+
+#define mmDMA4_QM_CP_BARRIER_CFG_0 0x5883F4
+
+#define mmDMA4_QM_CP_BARRIER_CFG_1 0x5883F8
+
+#define mmDMA4_QM_CP_BARRIER_CFG_2 0x5883FC
+
+#define mmDMA4_QM_CP_BARRIER_CFG_3 0x588400
+
+#define mmDMA4_QM_CP_BARRIER_CFG_4 0x588404
+
+#define mmDMA4_QM_CP_DBG_0_0 0x588408
+
+#define mmDMA4_QM_CP_DBG_0_1 0x58840C
+
+#define mmDMA4_QM_CP_DBG_0_2 0x588410
+
+#define mmDMA4_QM_CP_DBG_0_3 0x588414
+
+#define mmDMA4_QM_CP_DBG_0_4 0x588418
+
+#define mmDMA4_QM_CP_ARUSER_31_11_0 0x58841C
+
+#define mmDMA4_QM_CP_ARUSER_31_11_1 0x588420
+
+#define mmDMA4_QM_CP_ARUSER_31_11_2 0x588424
+
+#define mmDMA4_QM_CP_ARUSER_31_11_3 0x588428
+
+#define mmDMA4_QM_CP_ARUSER_31_11_4 0x58842C
+
+#define mmDMA4_QM_CP_AWUSER_31_11_0 0x588430
+
+#define mmDMA4_QM_CP_AWUSER_31_11_1 0x588434
+
+#define mmDMA4_QM_CP_AWUSER_31_11_2 0x588438
+
+#define mmDMA4_QM_CP_AWUSER_31_11_3 0x58843C
+
+#define mmDMA4_QM_CP_AWUSER_31_11_4 0x588440
+
+#define mmDMA4_QM_ARB_CFG_0 0x588A00
+
+#define mmDMA4_QM_ARB_CHOISE_Q_PUSH 0x588A04
+
+#define mmDMA4_QM_ARB_WRR_WEIGHT_0 0x588A08
+
+#define mmDMA4_QM_ARB_WRR_WEIGHT_1 0x588A0C
+
+#define mmDMA4_QM_ARB_WRR_WEIGHT_2 0x588A10
+
+#define mmDMA4_QM_ARB_WRR_WEIGHT_3 0x588A14
+
+#define mmDMA4_QM_ARB_CFG_1 0x588A18
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_0 0x588A20
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_1 0x588A24
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_2 0x588A28
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_3 0x588A2C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_4 0x588A30
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_5 0x588A34
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_6 0x588A38
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_7 0x588A3C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_8 0x588A40
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_9 0x588A44
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_10 0x588A48
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_11 0x588A4C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_12 0x588A50
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_13 0x588A54
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_14 0x588A58
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_15 0x588A5C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_16 0x588A60
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_17 0x588A64
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_18 0x588A68
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_19 0x588A6C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_20 0x588A70
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_21 0x588A74
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_22 0x588A78
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_23 0x588A7C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_24 0x588A80
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_25 0x588A84
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_26 0x588A88
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_27 0x588A8C
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_28 0x588A90
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_29 0x588A94
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_30 0x588A98
+
+#define mmDMA4_QM_ARB_MST_AVAIL_CRED_31 0x588A9C
+
+#define mmDMA4_QM_ARB_MST_CRED_INC 0x588AA0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x588AA4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x588AA8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x588AAC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x588AB0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x588AB4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x588AB8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x588ABC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x588AC0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x588AC4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x588AC8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x588ACC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x588AD0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x588AD4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x588AD8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x588ADC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x588AE0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x588AE4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x588AE8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x588AEC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x588AF0
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x588AF4
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x588AF8
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x588AFC
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x588B00
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x588B04
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x588B08
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x588B0C
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x588B10
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x588B14
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x588B18
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x588B1C
+
+#define mmDMA4_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x588B20
+
+#define mmDMA4_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x588B28
+
+#define mmDMA4_QM_ARB_MST_SLAVE_EN 0x588B2C
+
+#define mmDMA4_QM_ARB_MST_QUIET_PER 0x588B34
+
+#define mmDMA4_QM_ARB_SLV_CHOISE_WDT 0x588B38
+
+#define mmDMA4_QM_ARB_SLV_ID 0x588B3C
+
+#define mmDMA4_QM_ARB_MSG_MAX_INFLIGHT 0x588B44
+
+#define mmDMA4_QM_ARB_MSG_AWUSER_31_11 0x588B48
+
+#define mmDMA4_QM_ARB_MSG_AWUSER_SEC_PROP 0x588B4C
+
+#define mmDMA4_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x588B50
+
+#define mmDMA4_QM_ARB_BASE_LO 0x588B54
+
+#define mmDMA4_QM_ARB_BASE_HI 0x588B58
+
+#define mmDMA4_QM_ARB_STATE_STS 0x588B80
+
+#define mmDMA4_QM_ARB_CHOISE_FULLNESS_STS 0x588B84
+
+#define mmDMA4_QM_ARB_MSG_STS 0x588B88
+
+#define mmDMA4_QM_ARB_SLV_CHOISE_Q_HEAD 0x588B8C
+
+#define mmDMA4_QM_ARB_ERR_CAUSE 0x588B9C
+
+#define mmDMA4_QM_ARB_ERR_MSG_EN 0x588BA0
+
+#define mmDMA4_QM_ARB_ERR_STS_DRP 0x588BA8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_0 0x588BB0
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_1 0x588BB4
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_2 0x588BB8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_3 0x588BBC
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_4 0x588BC0
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_5 0x588BC4
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_6 0x588BC8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_7 0x588BCC
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_8 0x588BD0
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_9 0x588BD4
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_10 0x588BD8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_11 0x588BDC
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_12 0x588BE0
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_13 0x588BE4
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_14 0x588BE8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_15 0x588BEC
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_16 0x588BF0
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_17 0x588BF4
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_18 0x588BF8
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_19 0x588BFC
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_20 0x588C00
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_21 0x588C04
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_22 0x588C08
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_23 0x588C0C
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_24 0x588C10
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_25 0x588C14
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_26 0x588C18
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_27 0x588C1C
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_28 0x588C20
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_29 0x588C24
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_30 0x588C28
+
+#define mmDMA4_QM_ARB_MST_CRED_STS_31 0x588C2C
+
+#define mmDMA4_QM_CGM_CFG 0x588C70
+
+#define mmDMA4_QM_CGM_STS 0x588C74
+
+#define mmDMA4_QM_CGM_CFG1 0x588C78
+
+#define mmDMA4_QM_LOCAL_RANGE_BASE 0x588C80
+
+#define mmDMA4_QM_LOCAL_RANGE_SIZE 0x588C84
+
+#define mmDMA4_QM_CSMR_STRICT_PRIO_CFG 0x588C90
+
+#define mmDMA4_QM_HBW_RD_RATE_LIM_CFG_1 0x588C94
+
+#define mmDMA4_QM_LBW_WR_RATE_LIM_CFG_0 0x588C98
+
+#define mmDMA4_QM_LBW_WR_RATE_LIM_CFG_1 0x588C9C
+
+#define mmDMA4_QM_HBW_RD_RATE_LIM_CFG_0 0x588CA0
+
+#define mmDMA4_QM_GLBL_AXCACHE 0x588CA4
+
+#define mmDMA4_QM_IND_GW_APB_CFG 0x588CB0
+
+#define mmDMA4_QM_IND_GW_APB_WDATA 0x588CB4
+
+#define mmDMA4_QM_IND_GW_APB_RDATA 0x588CB8
+
+#define mmDMA4_QM_IND_GW_APB_STATUS 0x588CBC
+
+#define mmDMA4_QM_GLBL_ERR_ADDR_LO 0x588CD0
+
+#define mmDMA4_QM_GLBL_ERR_ADDR_HI 0x588CD4
+
+#define mmDMA4_QM_GLBL_ERR_WDATA 0x588CD8
+
+#define mmDMA4_QM_GLBL_MEM_INIT_BUSY 0x588D00
+
+#endif /* ASIC_REG_DMA4_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h
new file mode 100644
index 000000000000..6e07c6fb6fc9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA5_CORE_REGS_H_
+#define ASIC_REG_DMA5_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA5_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA5_CORE_CFG_0 0x5A0000
+
+#define mmDMA5_CORE_CFG_1 0x5A0004
+
+#define mmDMA5_CORE_LBW_MAX_OUTSTAND 0x5A0008
+
+#define mmDMA5_CORE_SRC_BASE_LO 0x5A0014
+
+#define mmDMA5_CORE_SRC_BASE_HI 0x5A0018
+
+#define mmDMA5_CORE_DST_BASE_LO 0x5A001C
+
+#define mmDMA5_CORE_DST_BASE_HI 0x5A0020
+
+#define mmDMA5_CORE_SRC_TSIZE_1 0x5A002C
+
+#define mmDMA5_CORE_SRC_STRIDE_1 0x5A0030
+
+#define mmDMA5_CORE_SRC_TSIZE_2 0x5A0034
+
+#define mmDMA5_CORE_SRC_STRIDE_2 0x5A0038
+
+#define mmDMA5_CORE_SRC_TSIZE_3 0x5A003C
+
+#define mmDMA5_CORE_SRC_STRIDE_3 0x5A0040
+
+#define mmDMA5_CORE_SRC_TSIZE_4 0x5A0044
+
+#define mmDMA5_CORE_SRC_STRIDE_4 0x5A0048
+
+#define mmDMA5_CORE_SRC_TSIZE_0 0x5A004C
+
+#define mmDMA5_CORE_DST_TSIZE_1 0x5A0054
+
+#define mmDMA5_CORE_DST_STRIDE_1 0x5A0058
+
+#define mmDMA5_CORE_DST_TSIZE_2 0x5A005C
+
+#define mmDMA5_CORE_DST_STRIDE_2 0x5A0060
+
+#define mmDMA5_CORE_DST_TSIZE_3 0x5A0064
+
+#define mmDMA5_CORE_DST_STRIDE_3 0x5A0068
+
+#define mmDMA5_CORE_DST_TSIZE_4 0x5A006C
+
+#define mmDMA5_CORE_DST_STRIDE_4 0x5A0070
+
+#define mmDMA5_CORE_DST_TSIZE_0 0x5A0074
+
+#define mmDMA5_CORE_COMMIT 0x5A0078
+
+#define mmDMA5_CORE_WR_COMP_WDATA 0x5A007C
+
+#define mmDMA5_CORE_WR_COMP_ADDR_LO 0x5A0080
+
+#define mmDMA5_CORE_WR_COMP_ADDR_HI 0x5A0084
+
+#define mmDMA5_CORE_WR_COMP_AWUSER_31_11 0x5A0088
+
+#define mmDMA5_CORE_TE_NUMROWS 0x5A0094
+
+#define mmDMA5_CORE_PROT 0x5A00B8
+
+#define mmDMA5_CORE_SECURE_PROPS 0x5A00F0
+
+#define mmDMA5_CORE_NON_SECURE_PROPS 0x5A00F4
+
+#define mmDMA5_CORE_RD_MAX_OUTSTAND 0x5A0100
+
+#define mmDMA5_CORE_RD_MAX_SIZE 0x5A0104
+
+#define mmDMA5_CORE_RD_ARCACHE 0x5A0108
+
+#define mmDMA5_CORE_RD_ARUSER_31_11 0x5A0110
+
+#define mmDMA5_CORE_RD_INFLIGHTS 0x5A0114
+
+#define mmDMA5_CORE_WR_MAX_OUTSTAND 0x5A0120
+
+#define mmDMA5_CORE_WR_MAX_AWID 0x5A0124
+
+#define mmDMA5_CORE_WR_AWCACHE 0x5A0128
+
+#define mmDMA5_CORE_WR_AWUSER_31_11 0x5A0130
+
+#define mmDMA5_CORE_WR_INFLIGHTS 0x5A0134
+
+#define mmDMA5_CORE_RD_RATE_LIM_CFG_0 0x5A0150
+
+#define mmDMA5_CORE_RD_RATE_LIM_CFG_1 0x5A0154
+
+#define mmDMA5_CORE_WR_RATE_LIM_CFG_0 0x5A0158
+
+#define mmDMA5_CORE_WR_RATE_LIM_CFG_1 0x5A015C
+
+#define mmDMA5_CORE_ERR_CFG 0x5A0160
+
+#define mmDMA5_CORE_ERR_CAUSE 0x5A0164
+
+#define mmDMA5_CORE_ERRMSG_ADDR_LO 0x5A0170
+
+#define mmDMA5_CORE_ERRMSG_ADDR_HI 0x5A0174
+
+#define mmDMA5_CORE_ERRMSG_WDATA 0x5A0178
+
+#define mmDMA5_CORE_STS0 0x5A0190
+
+#define mmDMA5_CORE_STS1 0x5A0194
+
+#define mmDMA5_CORE_RD_DBGMEM_ADD 0x5A0200
+
+#define mmDMA5_CORE_RD_DBGMEM_DATA_WR 0x5A0204
+
+#define mmDMA5_CORE_RD_DBGMEM_DATA_RD 0x5A0208
+
+#define mmDMA5_CORE_RD_DBGMEM_CTRL 0x5A020C
+
+#define mmDMA5_CORE_RD_DBGMEM_RC 0x5A0210
+
+#define mmDMA5_CORE_DBG_HBW_AXI_AR_CNT 0x5A0220
+
+#define mmDMA5_CORE_DBG_HBW_AXI_AW_CNT 0x5A0224
+
+#define mmDMA5_CORE_DBG_LBW_AXI_AW_CNT 0x5A0228
+
+#define mmDMA5_CORE_DBG_DESC_CNT 0x5A022C
+
+#define mmDMA5_CORE_DBG_STS 0x5A0230
+
+#define mmDMA5_CORE_DBG_RD_DESC_ID 0x5A0234
+
+#define mmDMA5_CORE_DBG_WR_DESC_ID 0x5A0238
+
+#endif /* ASIC_REG_DMA5_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h
new file mode 100644
index 000000000000..0faea21756c5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA5_QM_REGS_H_
+#define ASIC_REG_DMA5_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA5_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA5_QM_GLBL_CFG0 0x5A8000
+
+#define mmDMA5_QM_GLBL_CFG1 0x5A8004
+
+#define mmDMA5_QM_GLBL_PROT 0x5A8008
+
+#define mmDMA5_QM_GLBL_ERR_CFG 0x5A800C
+
+#define mmDMA5_QM_GLBL_SECURE_PROPS_0 0x5A8010
+
+#define mmDMA5_QM_GLBL_SECURE_PROPS_1 0x5A8014
+
+#define mmDMA5_QM_GLBL_SECURE_PROPS_2 0x5A8018
+
+#define mmDMA5_QM_GLBL_SECURE_PROPS_3 0x5A801C
+
+#define mmDMA5_QM_GLBL_SECURE_PROPS_4 0x5A8020
+
+#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_0 0x5A8024
+
+#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_1 0x5A8028
+
+#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_2 0x5A802C
+
+#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_3 0x5A8030
+
+#define mmDMA5_QM_GLBL_NON_SECURE_PROPS_4 0x5A8034
+
+#define mmDMA5_QM_GLBL_STS0 0x5A8038
+
+#define mmDMA5_QM_GLBL_STS1_0 0x5A8040
+
+#define mmDMA5_QM_GLBL_STS1_1 0x5A8044
+
+#define mmDMA5_QM_GLBL_STS1_2 0x5A8048
+
+#define mmDMA5_QM_GLBL_STS1_3 0x5A804C
+
+#define mmDMA5_QM_GLBL_STS1_4 0x5A8050
+
+#define mmDMA5_QM_GLBL_MSG_EN_0 0x5A8054
+
+#define mmDMA5_QM_GLBL_MSG_EN_1 0x5A8058
+
+#define mmDMA5_QM_GLBL_MSG_EN_2 0x5A805C
+
+#define mmDMA5_QM_GLBL_MSG_EN_3 0x5A8060
+
+#define mmDMA5_QM_GLBL_MSG_EN_4 0x5A8068
+
+#define mmDMA5_QM_PQ_BASE_LO_0 0x5A8070
+
+#define mmDMA5_QM_PQ_BASE_LO_1 0x5A8074
+
+#define mmDMA5_QM_PQ_BASE_LO_2 0x5A8078
+
+#define mmDMA5_QM_PQ_BASE_LO_3 0x5A807C
+
+#define mmDMA5_QM_PQ_BASE_HI_0 0x5A8080
+
+#define mmDMA5_QM_PQ_BASE_HI_1 0x5A8084
+
+#define mmDMA5_QM_PQ_BASE_HI_2 0x5A8088
+
+#define mmDMA5_QM_PQ_BASE_HI_3 0x5A808C
+
+#define mmDMA5_QM_PQ_SIZE_0 0x5A8090
+
+#define mmDMA5_QM_PQ_SIZE_1 0x5A8094
+
+#define mmDMA5_QM_PQ_SIZE_2 0x5A8098
+
+#define mmDMA5_QM_PQ_SIZE_3 0x5A809C
+
+#define mmDMA5_QM_PQ_PI_0 0x5A80A0
+
+#define mmDMA5_QM_PQ_PI_1 0x5A80A4
+
+#define mmDMA5_QM_PQ_PI_2 0x5A80A8
+
+#define mmDMA5_QM_PQ_PI_3 0x5A80AC
+
+#define mmDMA5_QM_PQ_CI_0 0x5A80B0
+
+#define mmDMA5_QM_PQ_CI_1 0x5A80B4
+
+#define mmDMA5_QM_PQ_CI_2 0x5A80B8
+
+#define mmDMA5_QM_PQ_CI_3 0x5A80BC
+
+#define mmDMA5_QM_PQ_CFG0_0 0x5A80C0
+
+#define mmDMA5_QM_PQ_CFG0_1 0x5A80C4
+
+#define mmDMA5_QM_PQ_CFG0_2 0x5A80C8
+
+#define mmDMA5_QM_PQ_CFG0_3 0x5A80CC
+
+#define mmDMA5_QM_PQ_CFG1_0 0x5A80D0
+
+#define mmDMA5_QM_PQ_CFG1_1 0x5A80D4
+
+#define mmDMA5_QM_PQ_CFG1_2 0x5A80D8
+
+#define mmDMA5_QM_PQ_CFG1_3 0x5A80DC
+
+#define mmDMA5_QM_PQ_ARUSER_31_11_0 0x5A80E0
+
+#define mmDMA5_QM_PQ_ARUSER_31_11_1 0x5A80E4
+
+#define mmDMA5_QM_PQ_ARUSER_31_11_2 0x5A80E8
+
+#define mmDMA5_QM_PQ_ARUSER_31_11_3 0x5A80EC
+
+#define mmDMA5_QM_PQ_STS0_0 0x5A80F0
+
+#define mmDMA5_QM_PQ_STS0_1 0x5A80F4
+
+#define mmDMA5_QM_PQ_STS0_2 0x5A80F8
+
+#define mmDMA5_QM_PQ_STS0_3 0x5A80FC
+
+#define mmDMA5_QM_PQ_STS1_0 0x5A8100
+
+#define mmDMA5_QM_PQ_STS1_1 0x5A8104
+
+#define mmDMA5_QM_PQ_STS1_2 0x5A8108
+
+#define mmDMA5_QM_PQ_STS1_3 0x5A810C
+
+#define mmDMA5_QM_CQ_CFG0_0 0x5A8110
+
+#define mmDMA5_QM_CQ_CFG0_1 0x5A8114
+
+#define mmDMA5_QM_CQ_CFG0_2 0x5A8118
+
+#define mmDMA5_QM_CQ_CFG0_3 0x5A811C
+
+#define mmDMA5_QM_CQ_CFG0_4 0x5A8120
+
+#define mmDMA5_QM_CQ_CFG1_0 0x5A8124
+
+#define mmDMA5_QM_CQ_CFG1_1 0x5A8128
+
+#define mmDMA5_QM_CQ_CFG1_2 0x5A812C
+
+#define mmDMA5_QM_CQ_CFG1_3 0x5A8130
+
+#define mmDMA5_QM_CQ_CFG1_4 0x5A8134
+
+#define mmDMA5_QM_CQ_ARUSER_31_11_0 0x5A8138
+
+#define mmDMA5_QM_CQ_ARUSER_31_11_1 0x5A813C
+
+#define mmDMA5_QM_CQ_ARUSER_31_11_2 0x5A8140
+
+#define mmDMA5_QM_CQ_ARUSER_31_11_3 0x5A8144
+
+#define mmDMA5_QM_CQ_ARUSER_31_11_4 0x5A8148
+
+#define mmDMA5_QM_CQ_STS0_0 0x5A814C
+
+#define mmDMA5_QM_CQ_STS0_1 0x5A8150
+
+#define mmDMA5_QM_CQ_STS0_2 0x5A8154
+
+#define mmDMA5_QM_CQ_STS0_3 0x5A8158
+
+#define mmDMA5_QM_CQ_STS0_4 0x5A815C
+
+#define mmDMA5_QM_CQ_STS1_0 0x5A8160
+
+#define mmDMA5_QM_CQ_STS1_1 0x5A8164
+
+#define mmDMA5_QM_CQ_STS1_2 0x5A8168
+
+#define mmDMA5_QM_CQ_STS1_3 0x5A816C
+
+#define mmDMA5_QM_CQ_STS1_4 0x5A8170
+
+#define mmDMA5_QM_CQ_PTR_LO_0 0x5A8174
+
+#define mmDMA5_QM_CQ_PTR_HI_0 0x5A8178
+
+#define mmDMA5_QM_CQ_TSIZE_0 0x5A817C
+
+#define mmDMA5_QM_CQ_CTL_0 0x5A8180
+
+#define mmDMA5_QM_CQ_PTR_LO_1 0x5A8184
+
+#define mmDMA5_QM_CQ_PTR_HI_1 0x5A8188
+
+#define mmDMA5_QM_CQ_TSIZE_1 0x5A818C
+
+#define mmDMA5_QM_CQ_CTL_1 0x5A8190
+
+#define mmDMA5_QM_CQ_PTR_LO_2 0x5A8194
+
+#define mmDMA5_QM_CQ_PTR_HI_2 0x5A8198
+
+#define mmDMA5_QM_CQ_TSIZE_2 0x5A819C
+
+#define mmDMA5_QM_CQ_CTL_2 0x5A81A0
+
+#define mmDMA5_QM_CQ_PTR_LO_3 0x5A81A4
+
+#define mmDMA5_QM_CQ_PTR_HI_3 0x5A81A8
+
+#define mmDMA5_QM_CQ_TSIZE_3 0x5A81AC
+
+#define mmDMA5_QM_CQ_CTL_3 0x5A81B0
+
+#define mmDMA5_QM_CQ_PTR_LO_4 0x5A81B4
+
+#define mmDMA5_QM_CQ_PTR_HI_4 0x5A81B8
+
+#define mmDMA5_QM_CQ_TSIZE_4 0x5A81BC
+
+#define mmDMA5_QM_CQ_CTL_4 0x5A81C0
+
+#define mmDMA5_QM_CQ_PTR_LO_STS_0 0x5A81C4
+
+#define mmDMA5_QM_CQ_PTR_LO_STS_1 0x5A81C8
+
+#define mmDMA5_QM_CQ_PTR_LO_STS_2 0x5A81CC
+
+#define mmDMA5_QM_CQ_PTR_LO_STS_3 0x5A81D0
+
+#define mmDMA5_QM_CQ_PTR_LO_STS_4 0x5A81D4
+
+#define mmDMA5_QM_CQ_PTR_HI_STS_0 0x5A81D8
+
+#define mmDMA5_QM_CQ_PTR_HI_STS_1 0x5A81DC
+
+#define mmDMA5_QM_CQ_PTR_HI_STS_2 0x5A81E0
+
+#define mmDMA5_QM_CQ_PTR_HI_STS_3 0x5A81E4
+
+#define mmDMA5_QM_CQ_PTR_HI_STS_4 0x5A81E8
+
+#define mmDMA5_QM_CQ_TSIZE_STS_0 0x5A81EC
+
+#define mmDMA5_QM_CQ_TSIZE_STS_1 0x5A81F0
+
+#define mmDMA5_QM_CQ_TSIZE_STS_2 0x5A81F4
+
+#define mmDMA5_QM_CQ_TSIZE_STS_3 0x5A81F8
+
+#define mmDMA5_QM_CQ_TSIZE_STS_4 0x5A81FC
+
+#define mmDMA5_QM_CQ_CTL_STS_0 0x5A8200
+
+#define mmDMA5_QM_CQ_CTL_STS_1 0x5A8204
+
+#define mmDMA5_QM_CQ_CTL_STS_2 0x5A8208
+
+#define mmDMA5_QM_CQ_CTL_STS_3 0x5A820C
+
+#define mmDMA5_QM_CQ_CTL_STS_4 0x5A8210
+
+#define mmDMA5_QM_CQ_IFIFO_CNT_0 0x5A8214
+
+#define mmDMA5_QM_CQ_IFIFO_CNT_1 0x5A8218
+
+#define mmDMA5_QM_CQ_IFIFO_CNT_2 0x5A821C
+
+#define mmDMA5_QM_CQ_IFIFO_CNT_3 0x5A8220
+
+#define mmDMA5_QM_CQ_IFIFO_CNT_4 0x5A8224
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_0 0x5A8228
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_1 0x5A822C
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_2 0x5A8230
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_3 0x5A8234
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_LO_4 0x5A8238
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_0 0x5A823C
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_1 0x5A8240
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_2 0x5A8244
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_3 0x5A8248
+
+#define mmDMA5_QM_CP_MSG_BASE0_ADDR_HI_4 0x5A824C
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_0 0x5A8250
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_1 0x5A8254
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_2 0x5A8258
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_3 0x5A825C
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_LO_4 0x5A8260
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_0 0x5A8264
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_1 0x5A8268
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_2 0x5A826C
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_3 0x5A8270
+
+#define mmDMA5_QM_CP_MSG_BASE1_ADDR_HI_4 0x5A8274
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_0 0x5A8278
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_1 0x5A827C
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_2 0x5A8280
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_3 0x5A8284
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_LO_4 0x5A8288
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_0 0x5A828C
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_1 0x5A8290
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_2 0x5A8294
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_3 0x5A8298
+
+#define mmDMA5_QM_CP_MSG_BASE2_ADDR_HI_4 0x5A829C
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_0 0x5A82A0
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_1 0x5A82A4
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_2 0x5A82A8
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_3 0x5A82AC
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_LO_4 0x5A82B0
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_0 0x5A82B4
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_1 0x5A82B8
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_2 0x5A82BC
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_3 0x5A82C0
+
+#define mmDMA5_QM_CP_MSG_BASE3_ADDR_HI_4 0x5A82C4
+
+#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_0 0x5A82C8
+
+#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_1 0x5A82CC
+
+#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_2 0x5A82D0
+
+#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_3 0x5A82D4
+
+#define mmDMA5_QM_CP_LDMA_TSIZE_OFFSET_4 0x5A82D8
+
+#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5A82E0
+
+#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5A82E4
+
+#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5A82E8
+
+#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5A82EC
+
+#define mmDMA5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5A82F0
+
+#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5A82F4
+
+#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5A82F8
+
+#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5A82FC
+
+#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x5A8300
+
+#define mmDMA5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x5A8304
+
+#define mmDMA5_QM_CP_FENCE0_RDATA_0 0x5A8308
+
+#define mmDMA5_QM_CP_FENCE0_RDATA_1 0x5A830C
+
+#define mmDMA5_QM_CP_FENCE0_RDATA_2 0x5A8310
+
+#define mmDMA5_QM_CP_FENCE0_RDATA_3 0x5A8314
+
+#define mmDMA5_QM_CP_FENCE0_RDATA_4 0x5A8318
+
+#define mmDMA5_QM_CP_FENCE1_RDATA_0 0x5A831C
+
+#define mmDMA5_QM_CP_FENCE1_RDATA_1 0x5A8320
+
+#define mmDMA5_QM_CP_FENCE1_RDATA_2 0x5A8324
+
+#define mmDMA5_QM_CP_FENCE1_RDATA_3 0x5A8328
+
+#define mmDMA5_QM_CP_FENCE1_RDATA_4 0x5A832C
+
+#define mmDMA5_QM_CP_FENCE2_RDATA_0 0x5A8330
+
+#define mmDMA5_QM_CP_FENCE2_RDATA_1 0x5A8334
+
+#define mmDMA5_QM_CP_FENCE2_RDATA_2 0x5A8338
+
+#define mmDMA5_QM_CP_FENCE2_RDATA_3 0x5A833C
+
+#define mmDMA5_QM_CP_FENCE2_RDATA_4 0x5A8340
+
+#define mmDMA5_QM_CP_FENCE3_RDATA_0 0x5A8344
+
+#define mmDMA5_QM_CP_FENCE3_RDATA_1 0x5A8348
+
+#define mmDMA5_QM_CP_FENCE3_RDATA_2 0x5A834C
+
+#define mmDMA5_QM_CP_FENCE3_RDATA_3 0x5A8350
+
+#define mmDMA5_QM_CP_FENCE3_RDATA_4 0x5A8354
+
+#define mmDMA5_QM_CP_FENCE0_CNT_0 0x5A8358
+
+#define mmDMA5_QM_CP_FENCE0_CNT_1 0x5A835C
+
+#define mmDMA5_QM_CP_FENCE0_CNT_2 0x5A8360
+
+#define mmDMA5_QM_CP_FENCE0_CNT_3 0x5A8364
+
+#define mmDMA5_QM_CP_FENCE0_CNT_4 0x5A8368
+
+#define mmDMA5_QM_CP_FENCE1_CNT_0 0x5A836C
+
+#define mmDMA5_QM_CP_FENCE1_CNT_1 0x5A8370
+
+#define mmDMA5_QM_CP_FENCE1_CNT_2 0x5A8374
+
+#define mmDMA5_QM_CP_FENCE1_CNT_3 0x5A8378
+
+#define mmDMA5_QM_CP_FENCE1_CNT_4 0x5A837C
+
+#define mmDMA5_QM_CP_FENCE2_CNT_0 0x5A8380
+
+#define mmDMA5_QM_CP_FENCE2_CNT_1 0x5A8384
+
+#define mmDMA5_QM_CP_FENCE2_CNT_2 0x5A8388
+
+#define mmDMA5_QM_CP_FENCE2_CNT_3 0x5A838C
+
+#define mmDMA5_QM_CP_FENCE2_CNT_4 0x5A8390
+
+#define mmDMA5_QM_CP_FENCE3_CNT_0 0x5A8394
+
+#define mmDMA5_QM_CP_FENCE3_CNT_1 0x5A8398
+
+#define mmDMA5_QM_CP_FENCE3_CNT_2 0x5A839C
+
+#define mmDMA5_QM_CP_FENCE3_CNT_3 0x5A83A0
+
+#define mmDMA5_QM_CP_FENCE3_CNT_4 0x5A83A4
+
+#define mmDMA5_QM_CP_STS_0 0x5A83A8
+
+#define mmDMA5_QM_CP_STS_1 0x5A83AC
+
+#define mmDMA5_QM_CP_STS_2 0x5A83B0
+
+#define mmDMA5_QM_CP_STS_3 0x5A83B4
+
+#define mmDMA5_QM_CP_STS_4 0x5A83B8
+
+#define mmDMA5_QM_CP_CURRENT_INST_LO_0 0x5A83BC
+
+#define mmDMA5_QM_CP_CURRENT_INST_LO_1 0x5A83C0
+
+#define mmDMA5_QM_CP_CURRENT_INST_LO_2 0x5A83C4
+
+#define mmDMA5_QM_CP_CURRENT_INST_LO_3 0x5A83C8
+
+#define mmDMA5_QM_CP_CURRENT_INST_LO_4 0x5A83CC
+
+#define mmDMA5_QM_CP_CURRENT_INST_HI_0 0x5A83D0
+
+#define mmDMA5_QM_CP_CURRENT_INST_HI_1 0x5A83D4
+
+#define mmDMA5_QM_CP_CURRENT_INST_HI_2 0x5A83D8
+
+#define mmDMA5_QM_CP_CURRENT_INST_HI_3 0x5A83DC
+
+#define mmDMA5_QM_CP_CURRENT_INST_HI_4 0x5A83E0
+
+#define mmDMA5_QM_CP_BARRIER_CFG_0 0x5A83F4
+
+#define mmDMA5_QM_CP_BARRIER_CFG_1 0x5A83F8
+
+#define mmDMA5_QM_CP_BARRIER_CFG_2 0x5A83FC
+
+#define mmDMA5_QM_CP_BARRIER_CFG_3 0x5A8400
+
+#define mmDMA5_QM_CP_BARRIER_CFG_4 0x5A8404
+
+#define mmDMA5_QM_CP_DBG_0_0 0x5A8408
+
+#define mmDMA5_QM_CP_DBG_0_1 0x5A840C
+
+#define mmDMA5_QM_CP_DBG_0_2 0x5A8410
+
+#define mmDMA5_QM_CP_DBG_0_3 0x5A8414
+
+#define mmDMA5_QM_CP_DBG_0_4 0x5A8418
+
+#define mmDMA5_QM_CP_ARUSER_31_11_0 0x5A841C
+
+#define mmDMA5_QM_CP_ARUSER_31_11_1 0x5A8420
+
+#define mmDMA5_QM_CP_ARUSER_31_11_2 0x5A8424
+
+#define mmDMA5_QM_CP_ARUSER_31_11_3 0x5A8428
+
+#define mmDMA5_QM_CP_ARUSER_31_11_4 0x5A842C
+
+#define mmDMA5_QM_CP_AWUSER_31_11_0 0x5A8430
+
+#define mmDMA5_QM_CP_AWUSER_31_11_1 0x5A8434
+
+#define mmDMA5_QM_CP_AWUSER_31_11_2 0x5A8438
+
+#define mmDMA5_QM_CP_AWUSER_31_11_3 0x5A843C
+
+#define mmDMA5_QM_CP_AWUSER_31_11_4 0x5A8440
+
+#define mmDMA5_QM_ARB_CFG_0 0x5A8A00
+
+#define mmDMA5_QM_ARB_CHOISE_Q_PUSH 0x5A8A04
+
+#define mmDMA5_QM_ARB_WRR_WEIGHT_0 0x5A8A08
+
+#define mmDMA5_QM_ARB_WRR_WEIGHT_1 0x5A8A0C
+
+#define mmDMA5_QM_ARB_WRR_WEIGHT_2 0x5A8A10
+
+#define mmDMA5_QM_ARB_WRR_WEIGHT_3 0x5A8A14
+
+#define mmDMA5_QM_ARB_CFG_1 0x5A8A18
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_0 0x5A8A20
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_1 0x5A8A24
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_2 0x5A8A28
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_3 0x5A8A2C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_4 0x5A8A30
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_5 0x5A8A34
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_6 0x5A8A38
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_7 0x5A8A3C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_8 0x5A8A40
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_9 0x5A8A44
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_10 0x5A8A48
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_11 0x5A8A4C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_12 0x5A8A50
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_13 0x5A8A54
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_14 0x5A8A58
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_15 0x5A8A5C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_16 0x5A8A60
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_17 0x5A8A64
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_18 0x5A8A68
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_19 0x5A8A6C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_20 0x5A8A70
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_21 0x5A8A74
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_22 0x5A8A78
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_23 0x5A8A7C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_24 0x5A8A80
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_25 0x5A8A84
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_26 0x5A8A88
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_27 0x5A8A8C
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_28 0x5A8A90
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_29 0x5A8A94
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_30 0x5A8A98
+
+#define mmDMA5_QM_ARB_MST_AVAIL_CRED_31 0x5A8A9C
+
+#define mmDMA5_QM_ARB_MST_CRED_INC 0x5A8AA0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x5A8AA4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x5A8AA8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x5A8AAC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x5A8AB0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x5A8AB4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x5A8AB8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x5A8ABC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x5A8AC0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x5A8AC4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x5A8AC8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x5A8ACC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x5A8AD0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x5A8AD4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x5A8AD8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x5A8ADC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x5A8AE0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x5A8AE4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x5A8AE8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x5A8AEC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x5A8AF0
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x5A8AF4
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x5A8AF8
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x5A8AFC
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x5A8B00
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x5A8B04
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x5A8B08
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x5A8B0C
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x5A8B10
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x5A8B14
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x5A8B18
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x5A8B1C
+
+#define mmDMA5_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x5A8B20
+
+#define mmDMA5_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x5A8B28
+
+#define mmDMA5_QM_ARB_MST_SLAVE_EN 0x5A8B2C
+
+#define mmDMA5_QM_ARB_MST_QUIET_PER 0x5A8B34
+
+#define mmDMA5_QM_ARB_SLV_CHOISE_WDT 0x5A8B38
+
+#define mmDMA5_QM_ARB_SLV_ID 0x5A8B3C
+
+#define mmDMA5_QM_ARB_MSG_MAX_INFLIGHT 0x5A8B44
+
+#define mmDMA5_QM_ARB_MSG_AWUSER_31_11 0x5A8B48
+
+#define mmDMA5_QM_ARB_MSG_AWUSER_SEC_PROP 0x5A8B4C
+
+#define mmDMA5_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x5A8B50
+
+#define mmDMA5_QM_ARB_BASE_LO 0x5A8B54
+
+#define mmDMA5_QM_ARB_BASE_HI 0x5A8B58
+
+#define mmDMA5_QM_ARB_STATE_STS 0x5A8B80
+
+#define mmDMA5_QM_ARB_CHOISE_FULLNESS_STS 0x5A8B84
+
+#define mmDMA5_QM_ARB_MSG_STS 0x5A8B88
+
+#define mmDMA5_QM_ARB_SLV_CHOISE_Q_HEAD 0x5A8B8C
+
+#define mmDMA5_QM_ARB_ERR_CAUSE 0x5A8B9C
+
+#define mmDMA5_QM_ARB_ERR_MSG_EN 0x5A8BA0
+
+#define mmDMA5_QM_ARB_ERR_STS_DRP 0x5A8BA8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_0 0x5A8BB0
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_1 0x5A8BB4
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_2 0x5A8BB8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_3 0x5A8BBC
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_4 0x5A8BC0
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_5 0x5A8BC4
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_6 0x5A8BC8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_7 0x5A8BCC
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_8 0x5A8BD0
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_9 0x5A8BD4
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_10 0x5A8BD8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_11 0x5A8BDC
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_12 0x5A8BE0
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_13 0x5A8BE4
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_14 0x5A8BE8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_15 0x5A8BEC
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_16 0x5A8BF0
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_17 0x5A8BF4
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_18 0x5A8BF8
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_19 0x5A8BFC
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_20 0x5A8C00
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_21 0x5A8C04
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_22 0x5A8C08
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_23 0x5A8C0C
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_24 0x5A8C10
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_25 0x5A8C14
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_26 0x5A8C18
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_27 0x5A8C1C
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_28 0x5A8C20
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_29 0x5A8C24
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_30 0x5A8C28
+
+#define mmDMA5_QM_ARB_MST_CRED_STS_31 0x5A8C2C
+
+#define mmDMA5_QM_CGM_CFG 0x5A8C70
+
+#define mmDMA5_QM_CGM_STS 0x5A8C74
+
+#define mmDMA5_QM_CGM_CFG1 0x5A8C78
+
+#define mmDMA5_QM_LOCAL_RANGE_BASE 0x5A8C80
+
+#define mmDMA5_QM_LOCAL_RANGE_SIZE 0x5A8C84
+
+#define mmDMA5_QM_CSMR_STRICT_PRIO_CFG 0x5A8C90
+
+#define mmDMA5_QM_HBW_RD_RATE_LIM_CFG_1 0x5A8C94
+
+#define mmDMA5_QM_LBW_WR_RATE_LIM_CFG_0 0x5A8C98
+
+#define mmDMA5_QM_LBW_WR_RATE_LIM_CFG_1 0x5A8C9C
+
+#define mmDMA5_QM_HBW_RD_RATE_LIM_CFG_0 0x5A8CA0
+
+#define mmDMA5_QM_GLBL_AXCACHE 0x5A8CA4
+
+#define mmDMA5_QM_IND_GW_APB_CFG 0x5A8CB0
+
+#define mmDMA5_QM_IND_GW_APB_WDATA 0x5A8CB4
+
+#define mmDMA5_QM_IND_GW_APB_RDATA 0x5A8CB8
+
+#define mmDMA5_QM_IND_GW_APB_STATUS 0x5A8CBC
+
+#define mmDMA5_QM_GLBL_ERR_ADDR_LO 0x5A8CD0
+
+#define mmDMA5_QM_GLBL_ERR_ADDR_HI 0x5A8CD4
+
+#define mmDMA5_QM_GLBL_ERR_WDATA 0x5A8CD8
+
+#define mmDMA5_QM_GLBL_MEM_INIT_BUSY 0x5A8D00
+
+#endif /* ASIC_REG_DMA5_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h
new file mode 100644
index 000000000000..4962c13e2e2e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA6_CORE_REGS_H_
+#define ASIC_REG_DMA6_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA6_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA6_CORE_CFG_0 0x5C0000
+
+#define mmDMA6_CORE_CFG_1 0x5C0004
+
+#define mmDMA6_CORE_LBW_MAX_OUTSTAND 0x5C0008
+
+#define mmDMA6_CORE_SRC_BASE_LO 0x5C0014
+
+#define mmDMA6_CORE_SRC_BASE_HI 0x5C0018
+
+#define mmDMA6_CORE_DST_BASE_LO 0x5C001C
+
+#define mmDMA6_CORE_DST_BASE_HI 0x5C0020
+
+#define mmDMA6_CORE_SRC_TSIZE_1 0x5C002C
+
+#define mmDMA6_CORE_SRC_STRIDE_1 0x5C0030
+
+#define mmDMA6_CORE_SRC_TSIZE_2 0x5C0034
+
+#define mmDMA6_CORE_SRC_STRIDE_2 0x5C0038
+
+#define mmDMA6_CORE_SRC_TSIZE_3 0x5C003C
+
+#define mmDMA6_CORE_SRC_STRIDE_3 0x5C0040
+
+#define mmDMA6_CORE_SRC_TSIZE_4 0x5C0044
+
+#define mmDMA6_CORE_SRC_STRIDE_4 0x5C0048
+
+#define mmDMA6_CORE_SRC_TSIZE_0 0x5C004C
+
+#define mmDMA6_CORE_DST_TSIZE_1 0x5C0054
+
+#define mmDMA6_CORE_DST_STRIDE_1 0x5C0058
+
+#define mmDMA6_CORE_DST_TSIZE_2 0x5C005C
+
+#define mmDMA6_CORE_DST_STRIDE_2 0x5C0060
+
+#define mmDMA6_CORE_DST_TSIZE_3 0x5C0064
+
+#define mmDMA6_CORE_DST_STRIDE_3 0x5C0068
+
+#define mmDMA6_CORE_DST_TSIZE_4 0x5C006C
+
+#define mmDMA6_CORE_DST_STRIDE_4 0x5C0070
+
+#define mmDMA6_CORE_DST_TSIZE_0 0x5C0074
+
+#define mmDMA6_CORE_COMMIT 0x5C0078
+
+#define mmDMA6_CORE_WR_COMP_WDATA 0x5C007C
+
+#define mmDMA6_CORE_WR_COMP_ADDR_LO 0x5C0080
+
+#define mmDMA6_CORE_WR_COMP_ADDR_HI 0x5C0084
+
+#define mmDMA6_CORE_WR_COMP_AWUSER_31_11 0x5C0088
+
+#define mmDMA6_CORE_TE_NUMROWS 0x5C0094
+
+#define mmDMA6_CORE_PROT 0x5C00B8
+
+#define mmDMA6_CORE_SECURE_PROPS 0x5C00F0
+
+#define mmDMA6_CORE_NON_SECURE_PROPS 0x5C00F4
+
+#define mmDMA6_CORE_RD_MAX_OUTSTAND 0x5C0100
+
+#define mmDMA6_CORE_RD_MAX_SIZE 0x5C0104
+
+#define mmDMA6_CORE_RD_ARCACHE 0x5C0108
+
+#define mmDMA6_CORE_RD_ARUSER_31_11 0x5C0110
+
+#define mmDMA6_CORE_RD_INFLIGHTS 0x5C0114
+
+#define mmDMA6_CORE_WR_MAX_OUTSTAND 0x5C0120
+
+#define mmDMA6_CORE_WR_MAX_AWID 0x5C0124
+
+#define mmDMA6_CORE_WR_AWCACHE 0x5C0128
+
+#define mmDMA6_CORE_WR_AWUSER_31_11 0x5C0130
+
+#define mmDMA6_CORE_WR_INFLIGHTS 0x5C0134
+
+#define mmDMA6_CORE_RD_RATE_LIM_CFG_0 0x5C0150
+
+#define mmDMA6_CORE_RD_RATE_LIM_CFG_1 0x5C0154
+
+#define mmDMA6_CORE_WR_RATE_LIM_CFG_0 0x5C0158
+
+#define mmDMA6_CORE_WR_RATE_LIM_CFG_1 0x5C015C
+
+#define mmDMA6_CORE_ERR_CFG 0x5C0160
+
+#define mmDMA6_CORE_ERR_CAUSE 0x5C0164
+
+#define mmDMA6_CORE_ERRMSG_ADDR_LO 0x5C0170
+
+#define mmDMA6_CORE_ERRMSG_ADDR_HI 0x5C0174
+
+#define mmDMA6_CORE_ERRMSG_WDATA 0x5C0178
+
+#define mmDMA6_CORE_STS0 0x5C0190
+
+#define mmDMA6_CORE_STS1 0x5C0194
+
+#define mmDMA6_CORE_RD_DBGMEM_ADD 0x5C0200
+
+#define mmDMA6_CORE_RD_DBGMEM_DATA_WR 0x5C0204
+
+#define mmDMA6_CORE_RD_DBGMEM_DATA_RD 0x5C0208
+
+#define mmDMA6_CORE_RD_DBGMEM_CTRL 0x5C020C
+
+#define mmDMA6_CORE_RD_DBGMEM_RC 0x5C0210
+
+#define mmDMA6_CORE_DBG_HBW_AXI_AR_CNT 0x5C0220
+
+#define mmDMA6_CORE_DBG_HBW_AXI_AW_CNT 0x5C0224
+
+#define mmDMA6_CORE_DBG_LBW_AXI_AW_CNT 0x5C0228
+
+#define mmDMA6_CORE_DBG_DESC_CNT 0x5C022C
+
+#define mmDMA6_CORE_DBG_STS 0x5C0230
+
+#define mmDMA6_CORE_DBG_RD_DESC_ID 0x5C0234
+
+#define mmDMA6_CORE_DBG_WR_DESC_ID 0x5C0238
+
+#endif /* ASIC_REG_DMA6_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h
new file mode 100644
index 000000000000..af87adb94c94
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA6_QM_REGS_H_
+#define ASIC_REG_DMA6_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA6_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA6_QM_GLBL_CFG0 0x5C8000
+
+#define mmDMA6_QM_GLBL_CFG1 0x5C8004
+
+#define mmDMA6_QM_GLBL_PROT 0x5C8008
+
+#define mmDMA6_QM_GLBL_ERR_CFG 0x5C800C
+
+#define mmDMA6_QM_GLBL_SECURE_PROPS_0 0x5C8010
+
+#define mmDMA6_QM_GLBL_SECURE_PROPS_1 0x5C8014
+
+#define mmDMA6_QM_GLBL_SECURE_PROPS_2 0x5C8018
+
+#define mmDMA6_QM_GLBL_SECURE_PROPS_3 0x5C801C
+
+#define mmDMA6_QM_GLBL_SECURE_PROPS_4 0x5C8020
+
+#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_0 0x5C8024
+
+#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_1 0x5C8028
+
+#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_2 0x5C802C
+
+#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_3 0x5C8030
+
+#define mmDMA6_QM_GLBL_NON_SECURE_PROPS_4 0x5C8034
+
+#define mmDMA6_QM_GLBL_STS0 0x5C8038
+
+#define mmDMA6_QM_GLBL_STS1_0 0x5C8040
+
+#define mmDMA6_QM_GLBL_STS1_1 0x5C8044
+
+#define mmDMA6_QM_GLBL_STS1_2 0x5C8048
+
+#define mmDMA6_QM_GLBL_STS1_3 0x5C804C
+
+#define mmDMA6_QM_GLBL_STS1_4 0x5C8050
+
+#define mmDMA6_QM_GLBL_MSG_EN_0 0x5C8054
+
+#define mmDMA6_QM_GLBL_MSG_EN_1 0x5C8058
+
+#define mmDMA6_QM_GLBL_MSG_EN_2 0x5C805C
+
+#define mmDMA6_QM_GLBL_MSG_EN_3 0x5C8060
+
+#define mmDMA6_QM_GLBL_MSG_EN_4 0x5C8068
+
+#define mmDMA6_QM_PQ_BASE_LO_0 0x5C8070
+
+#define mmDMA6_QM_PQ_BASE_LO_1 0x5C8074
+
+#define mmDMA6_QM_PQ_BASE_LO_2 0x5C8078
+
+#define mmDMA6_QM_PQ_BASE_LO_3 0x5C807C
+
+#define mmDMA6_QM_PQ_BASE_HI_0 0x5C8080
+
+#define mmDMA6_QM_PQ_BASE_HI_1 0x5C8084
+
+#define mmDMA6_QM_PQ_BASE_HI_2 0x5C8088
+
+#define mmDMA6_QM_PQ_BASE_HI_3 0x5C808C
+
+#define mmDMA6_QM_PQ_SIZE_0 0x5C8090
+
+#define mmDMA6_QM_PQ_SIZE_1 0x5C8094
+
+#define mmDMA6_QM_PQ_SIZE_2 0x5C8098
+
+#define mmDMA6_QM_PQ_SIZE_3 0x5C809C
+
+#define mmDMA6_QM_PQ_PI_0 0x5C80A0
+
+#define mmDMA6_QM_PQ_PI_1 0x5C80A4
+
+#define mmDMA6_QM_PQ_PI_2 0x5C80A8
+
+#define mmDMA6_QM_PQ_PI_3 0x5C80AC
+
+#define mmDMA6_QM_PQ_CI_0 0x5C80B0
+
+#define mmDMA6_QM_PQ_CI_1 0x5C80B4
+
+#define mmDMA6_QM_PQ_CI_2 0x5C80B8
+
+#define mmDMA6_QM_PQ_CI_3 0x5C80BC
+
+#define mmDMA6_QM_PQ_CFG0_0 0x5C80C0
+
+#define mmDMA6_QM_PQ_CFG0_1 0x5C80C4
+
+#define mmDMA6_QM_PQ_CFG0_2 0x5C80C8
+
+#define mmDMA6_QM_PQ_CFG0_3 0x5C80CC
+
+#define mmDMA6_QM_PQ_CFG1_0 0x5C80D0
+
+#define mmDMA6_QM_PQ_CFG1_1 0x5C80D4
+
+#define mmDMA6_QM_PQ_CFG1_2 0x5C80D8
+
+#define mmDMA6_QM_PQ_CFG1_3 0x5C80DC
+
+#define mmDMA6_QM_PQ_ARUSER_31_11_0 0x5C80E0
+
+#define mmDMA6_QM_PQ_ARUSER_31_11_1 0x5C80E4
+
+#define mmDMA6_QM_PQ_ARUSER_31_11_2 0x5C80E8
+
+#define mmDMA6_QM_PQ_ARUSER_31_11_3 0x5C80EC
+
+#define mmDMA6_QM_PQ_STS0_0 0x5C80F0
+
+#define mmDMA6_QM_PQ_STS0_1 0x5C80F4
+
+#define mmDMA6_QM_PQ_STS0_2 0x5C80F8
+
+#define mmDMA6_QM_PQ_STS0_3 0x5C80FC
+
+#define mmDMA6_QM_PQ_STS1_0 0x5C8100
+
+#define mmDMA6_QM_PQ_STS1_1 0x5C8104
+
+#define mmDMA6_QM_PQ_STS1_2 0x5C8108
+
+#define mmDMA6_QM_PQ_STS1_3 0x5C810C
+
+#define mmDMA6_QM_CQ_CFG0_0 0x5C8110
+
+#define mmDMA6_QM_CQ_CFG0_1 0x5C8114
+
+#define mmDMA6_QM_CQ_CFG0_2 0x5C8118
+
+#define mmDMA6_QM_CQ_CFG0_3 0x5C811C
+
+#define mmDMA6_QM_CQ_CFG0_4 0x5C8120
+
+#define mmDMA6_QM_CQ_CFG1_0 0x5C8124
+
+#define mmDMA6_QM_CQ_CFG1_1 0x5C8128
+
+#define mmDMA6_QM_CQ_CFG1_2 0x5C812C
+
+#define mmDMA6_QM_CQ_CFG1_3 0x5C8130
+
+#define mmDMA6_QM_CQ_CFG1_4 0x5C8134
+
+#define mmDMA6_QM_CQ_ARUSER_31_11_0 0x5C8138
+
+#define mmDMA6_QM_CQ_ARUSER_31_11_1 0x5C813C
+
+#define mmDMA6_QM_CQ_ARUSER_31_11_2 0x5C8140
+
+#define mmDMA6_QM_CQ_ARUSER_31_11_3 0x5C8144
+
+#define mmDMA6_QM_CQ_ARUSER_31_11_4 0x5C8148
+
+#define mmDMA6_QM_CQ_STS0_0 0x5C814C
+
+#define mmDMA6_QM_CQ_STS0_1 0x5C8150
+
+#define mmDMA6_QM_CQ_STS0_2 0x5C8154
+
+#define mmDMA6_QM_CQ_STS0_3 0x5C8158
+
+#define mmDMA6_QM_CQ_STS0_4 0x5C815C
+
+#define mmDMA6_QM_CQ_STS1_0 0x5C8160
+
+#define mmDMA6_QM_CQ_STS1_1 0x5C8164
+
+#define mmDMA6_QM_CQ_STS1_2 0x5C8168
+
+#define mmDMA6_QM_CQ_STS1_3 0x5C816C
+
+#define mmDMA6_QM_CQ_STS1_4 0x5C8170
+
+#define mmDMA6_QM_CQ_PTR_LO_0 0x5C8174
+
+#define mmDMA6_QM_CQ_PTR_HI_0 0x5C8178
+
+#define mmDMA6_QM_CQ_TSIZE_0 0x5C817C
+
+#define mmDMA6_QM_CQ_CTL_0 0x5C8180
+
+#define mmDMA6_QM_CQ_PTR_LO_1 0x5C8184
+
+#define mmDMA6_QM_CQ_PTR_HI_1 0x5C8188
+
+#define mmDMA6_QM_CQ_TSIZE_1 0x5C818C
+
+#define mmDMA6_QM_CQ_CTL_1 0x5C8190
+
+#define mmDMA6_QM_CQ_PTR_LO_2 0x5C8194
+
+#define mmDMA6_QM_CQ_PTR_HI_2 0x5C8198
+
+#define mmDMA6_QM_CQ_TSIZE_2 0x5C819C
+
+#define mmDMA6_QM_CQ_CTL_2 0x5C81A0
+
+#define mmDMA6_QM_CQ_PTR_LO_3 0x5C81A4
+
+#define mmDMA6_QM_CQ_PTR_HI_3 0x5C81A8
+
+#define mmDMA6_QM_CQ_TSIZE_3 0x5C81AC
+
+#define mmDMA6_QM_CQ_CTL_3 0x5C81B0
+
+#define mmDMA6_QM_CQ_PTR_LO_4 0x5C81B4
+
+#define mmDMA6_QM_CQ_PTR_HI_4 0x5C81B8
+
+#define mmDMA6_QM_CQ_TSIZE_4 0x5C81BC
+
+#define mmDMA6_QM_CQ_CTL_4 0x5C81C0
+
+#define mmDMA6_QM_CQ_PTR_LO_STS_0 0x5C81C4
+
+#define mmDMA6_QM_CQ_PTR_LO_STS_1 0x5C81C8
+
+#define mmDMA6_QM_CQ_PTR_LO_STS_2 0x5C81CC
+
+#define mmDMA6_QM_CQ_PTR_LO_STS_3 0x5C81D0
+
+#define mmDMA6_QM_CQ_PTR_LO_STS_4 0x5C81D4
+
+#define mmDMA6_QM_CQ_PTR_HI_STS_0 0x5C81D8
+
+#define mmDMA6_QM_CQ_PTR_HI_STS_1 0x5C81DC
+
+#define mmDMA6_QM_CQ_PTR_HI_STS_2 0x5C81E0
+
+#define mmDMA6_QM_CQ_PTR_HI_STS_3 0x5C81E4
+
+#define mmDMA6_QM_CQ_PTR_HI_STS_4 0x5C81E8
+
+#define mmDMA6_QM_CQ_TSIZE_STS_0 0x5C81EC
+
+#define mmDMA6_QM_CQ_TSIZE_STS_1 0x5C81F0
+
+#define mmDMA6_QM_CQ_TSIZE_STS_2 0x5C81F4
+
+#define mmDMA6_QM_CQ_TSIZE_STS_3 0x5C81F8
+
+#define mmDMA6_QM_CQ_TSIZE_STS_4 0x5C81FC
+
+#define mmDMA6_QM_CQ_CTL_STS_0 0x5C8200
+
+#define mmDMA6_QM_CQ_CTL_STS_1 0x5C8204
+
+#define mmDMA6_QM_CQ_CTL_STS_2 0x5C8208
+
+#define mmDMA6_QM_CQ_CTL_STS_3 0x5C820C
+
+#define mmDMA6_QM_CQ_CTL_STS_4 0x5C8210
+
+#define mmDMA6_QM_CQ_IFIFO_CNT_0 0x5C8214
+
+#define mmDMA6_QM_CQ_IFIFO_CNT_1 0x5C8218
+
+#define mmDMA6_QM_CQ_IFIFO_CNT_2 0x5C821C
+
+#define mmDMA6_QM_CQ_IFIFO_CNT_3 0x5C8220
+
+#define mmDMA6_QM_CQ_IFIFO_CNT_4 0x5C8224
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_0 0x5C8228
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_1 0x5C822C
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_2 0x5C8230
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_3 0x5C8234
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_LO_4 0x5C8238
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_0 0x5C823C
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_1 0x5C8240
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_2 0x5C8244
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_3 0x5C8248
+
+#define mmDMA6_QM_CP_MSG_BASE0_ADDR_HI_4 0x5C824C
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_0 0x5C8250
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_1 0x5C8254
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_2 0x5C8258
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_3 0x5C825C
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_LO_4 0x5C8260
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_0 0x5C8264
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_1 0x5C8268
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_2 0x5C826C
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_3 0x5C8270
+
+#define mmDMA6_QM_CP_MSG_BASE1_ADDR_HI_4 0x5C8274
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_0 0x5C8278
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_1 0x5C827C
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_2 0x5C8280
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_3 0x5C8284
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_LO_4 0x5C8288
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_0 0x5C828C
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_1 0x5C8290
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_2 0x5C8294
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_3 0x5C8298
+
+#define mmDMA6_QM_CP_MSG_BASE2_ADDR_HI_4 0x5C829C
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_0 0x5C82A0
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_1 0x5C82A4
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_2 0x5C82A8
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_3 0x5C82AC
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_LO_4 0x5C82B0
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_0 0x5C82B4
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_1 0x5C82B8
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_2 0x5C82BC
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_3 0x5C82C0
+
+#define mmDMA6_QM_CP_MSG_BASE3_ADDR_HI_4 0x5C82C4
+
+#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_0 0x5C82C8
+
+#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_1 0x5C82CC
+
+#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_2 0x5C82D0
+
+#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_3 0x5C82D4
+
+#define mmDMA6_QM_CP_LDMA_TSIZE_OFFSET_4 0x5C82D8
+
+#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5C82E0
+
+#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5C82E4
+
+#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5C82E8
+
+#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5C82EC
+
+#define mmDMA6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5C82F0
+
+#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5C82F4
+
+#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5C82F8
+
+#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5C82FC
+
+#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x5C8300
+
+#define mmDMA6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x5C8304
+
+#define mmDMA6_QM_CP_FENCE0_RDATA_0 0x5C8308
+
+#define mmDMA6_QM_CP_FENCE0_RDATA_1 0x5C830C
+
+#define mmDMA6_QM_CP_FENCE0_RDATA_2 0x5C8310
+
+#define mmDMA6_QM_CP_FENCE0_RDATA_3 0x5C8314
+
+#define mmDMA6_QM_CP_FENCE0_RDATA_4 0x5C8318
+
+#define mmDMA6_QM_CP_FENCE1_RDATA_0 0x5C831C
+
+#define mmDMA6_QM_CP_FENCE1_RDATA_1 0x5C8320
+
+#define mmDMA6_QM_CP_FENCE1_RDATA_2 0x5C8324
+
+#define mmDMA6_QM_CP_FENCE1_RDATA_3 0x5C8328
+
+#define mmDMA6_QM_CP_FENCE1_RDATA_4 0x5C832C
+
+#define mmDMA6_QM_CP_FENCE2_RDATA_0 0x5C8330
+
+#define mmDMA6_QM_CP_FENCE2_RDATA_1 0x5C8334
+
+#define mmDMA6_QM_CP_FENCE2_RDATA_2 0x5C8338
+
+#define mmDMA6_QM_CP_FENCE2_RDATA_3 0x5C833C
+
+#define mmDMA6_QM_CP_FENCE2_RDATA_4 0x5C8340
+
+#define mmDMA6_QM_CP_FENCE3_RDATA_0 0x5C8344
+
+#define mmDMA6_QM_CP_FENCE3_RDATA_1 0x5C8348
+
+#define mmDMA6_QM_CP_FENCE3_RDATA_2 0x5C834C
+
+#define mmDMA6_QM_CP_FENCE3_RDATA_3 0x5C8350
+
+#define mmDMA6_QM_CP_FENCE3_RDATA_4 0x5C8354
+
+#define mmDMA6_QM_CP_FENCE0_CNT_0 0x5C8358
+
+#define mmDMA6_QM_CP_FENCE0_CNT_1 0x5C835C
+
+#define mmDMA6_QM_CP_FENCE0_CNT_2 0x5C8360
+
+#define mmDMA6_QM_CP_FENCE0_CNT_3 0x5C8364
+
+#define mmDMA6_QM_CP_FENCE0_CNT_4 0x5C8368
+
+#define mmDMA6_QM_CP_FENCE1_CNT_0 0x5C836C
+
+#define mmDMA6_QM_CP_FENCE1_CNT_1 0x5C8370
+
+#define mmDMA6_QM_CP_FENCE1_CNT_2 0x5C8374
+
+#define mmDMA6_QM_CP_FENCE1_CNT_3 0x5C8378
+
+#define mmDMA6_QM_CP_FENCE1_CNT_4 0x5C837C
+
+#define mmDMA6_QM_CP_FENCE2_CNT_0 0x5C8380
+
+#define mmDMA6_QM_CP_FENCE2_CNT_1 0x5C8384
+
+#define mmDMA6_QM_CP_FENCE2_CNT_2 0x5C8388
+
+#define mmDMA6_QM_CP_FENCE2_CNT_3 0x5C838C
+
+#define mmDMA6_QM_CP_FENCE2_CNT_4 0x5C8390
+
+#define mmDMA6_QM_CP_FENCE3_CNT_0 0x5C8394
+
+#define mmDMA6_QM_CP_FENCE3_CNT_1 0x5C8398
+
+#define mmDMA6_QM_CP_FENCE3_CNT_2 0x5C839C
+
+#define mmDMA6_QM_CP_FENCE3_CNT_3 0x5C83A0
+
+#define mmDMA6_QM_CP_FENCE3_CNT_4 0x5C83A4
+
+#define mmDMA6_QM_CP_STS_0 0x5C83A8
+
+#define mmDMA6_QM_CP_STS_1 0x5C83AC
+
+#define mmDMA6_QM_CP_STS_2 0x5C83B0
+
+#define mmDMA6_QM_CP_STS_3 0x5C83B4
+
+#define mmDMA6_QM_CP_STS_4 0x5C83B8
+
+#define mmDMA6_QM_CP_CURRENT_INST_LO_0 0x5C83BC
+
+#define mmDMA6_QM_CP_CURRENT_INST_LO_1 0x5C83C0
+
+#define mmDMA6_QM_CP_CURRENT_INST_LO_2 0x5C83C4
+
+#define mmDMA6_QM_CP_CURRENT_INST_LO_3 0x5C83C8
+
+#define mmDMA6_QM_CP_CURRENT_INST_LO_4 0x5C83CC
+
+#define mmDMA6_QM_CP_CURRENT_INST_HI_0 0x5C83D0
+
+#define mmDMA6_QM_CP_CURRENT_INST_HI_1 0x5C83D4
+
+#define mmDMA6_QM_CP_CURRENT_INST_HI_2 0x5C83D8
+
+#define mmDMA6_QM_CP_CURRENT_INST_HI_3 0x5C83DC
+
+#define mmDMA6_QM_CP_CURRENT_INST_HI_4 0x5C83E0
+
+#define mmDMA6_QM_CP_BARRIER_CFG_0 0x5C83F4
+
+#define mmDMA6_QM_CP_BARRIER_CFG_1 0x5C83F8
+
+#define mmDMA6_QM_CP_BARRIER_CFG_2 0x5C83FC
+
+#define mmDMA6_QM_CP_BARRIER_CFG_3 0x5C8400
+
+#define mmDMA6_QM_CP_BARRIER_CFG_4 0x5C8404
+
+#define mmDMA6_QM_CP_DBG_0_0 0x5C8408
+
+#define mmDMA6_QM_CP_DBG_0_1 0x5C840C
+
+#define mmDMA6_QM_CP_DBG_0_2 0x5C8410
+
+#define mmDMA6_QM_CP_DBG_0_3 0x5C8414
+
+#define mmDMA6_QM_CP_DBG_0_4 0x5C8418
+
+#define mmDMA6_QM_CP_ARUSER_31_11_0 0x5C841C
+
+#define mmDMA6_QM_CP_ARUSER_31_11_1 0x5C8420
+
+#define mmDMA6_QM_CP_ARUSER_31_11_2 0x5C8424
+
+#define mmDMA6_QM_CP_ARUSER_31_11_3 0x5C8428
+
+#define mmDMA6_QM_CP_ARUSER_31_11_4 0x5C842C
+
+#define mmDMA6_QM_CP_AWUSER_31_11_0 0x5C8430
+
+#define mmDMA6_QM_CP_AWUSER_31_11_1 0x5C8434
+
+#define mmDMA6_QM_CP_AWUSER_31_11_2 0x5C8438
+
+#define mmDMA6_QM_CP_AWUSER_31_11_3 0x5C843C
+
+#define mmDMA6_QM_CP_AWUSER_31_11_4 0x5C8440
+
+#define mmDMA6_QM_ARB_CFG_0 0x5C8A00
+
+#define mmDMA6_QM_ARB_CHOISE_Q_PUSH 0x5C8A04
+
+#define mmDMA6_QM_ARB_WRR_WEIGHT_0 0x5C8A08
+
+#define mmDMA6_QM_ARB_WRR_WEIGHT_1 0x5C8A0C
+
+#define mmDMA6_QM_ARB_WRR_WEIGHT_2 0x5C8A10
+
+#define mmDMA6_QM_ARB_WRR_WEIGHT_3 0x5C8A14
+
+#define mmDMA6_QM_ARB_CFG_1 0x5C8A18
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_0 0x5C8A20
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_1 0x5C8A24
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_2 0x5C8A28
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_3 0x5C8A2C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_4 0x5C8A30
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_5 0x5C8A34
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_6 0x5C8A38
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_7 0x5C8A3C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_8 0x5C8A40
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_9 0x5C8A44
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_10 0x5C8A48
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_11 0x5C8A4C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_12 0x5C8A50
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_13 0x5C8A54
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_14 0x5C8A58
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_15 0x5C8A5C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_16 0x5C8A60
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_17 0x5C8A64
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_18 0x5C8A68
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_19 0x5C8A6C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_20 0x5C8A70
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_21 0x5C8A74
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_22 0x5C8A78
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_23 0x5C8A7C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_24 0x5C8A80
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_25 0x5C8A84
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_26 0x5C8A88
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_27 0x5C8A8C
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_28 0x5C8A90
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_29 0x5C8A94
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_30 0x5C8A98
+
+#define mmDMA6_QM_ARB_MST_AVAIL_CRED_31 0x5C8A9C
+
+#define mmDMA6_QM_ARB_MST_CRED_INC 0x5C8AA0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x5C8AA4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x5C8AA8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x5C8AAC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x5C8AB0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x5C8AB4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x5C8AB8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x5C8ABC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x5C8AC0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x5C8AC4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x5C8AC8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x5C8ACC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x5C8AD0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x5C8AD4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x5C8AD8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x5C8ADC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x5C8AE0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x5C8AE4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x5C8AE8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x5C8AEC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x5C8AF0
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x5C8AF4
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x5C8AF8
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x5C8AFC
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x5C8B00
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x5C8B04
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x5C8B08
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x5C8B0C
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x5C8B10
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x5C8B14
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x5C8B18
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x5C8B1C
+
+#define mmDMA6_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x5C8B20
+
+#define mmDMA6_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x5C8B28
+
+#define mmDMA6_QM_ARB_MST_SLAVE_EN 0x5C8B2C
+
+#define mmDMA6_QM_ARB_MST_QUIET_PER 0x5C8B34
+
+#define mmDMA6_QM_ARB_SLV_CHOISE_WDT 0x5C8B38
+
+#define mmDMA6_QM_ARB_SLV_ID 0x5C8B3C
+
+#define mmDMA6_QM_ARB_MSG_MAX_INFLIGHT 0x5C8B44
+
+#define mmDMA6_QM_ARB_MSG_AWUSER_31_11 0x5C8B48
+
+#define mmDMA6_QM_ARB_MSG_AWUSER_SEC_PROP 0x5C8B4C
+
+#define mmDMA6_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x5C8B50
+
+#define mmDMA6_QM_ARB_BASE_LO 0x5C8B54
+
+#define mmDMA6_QM_ARB_BASE_HI 0x5C8B58
+
+#define mmDMA6_QM_ARB_STATE_STS 0x5C8B80
+
+#define mmDMA6_QM_ARB_CHOISE_FULLNESS_STS 0x5C8B84
+
+#define mmDMA6_QM_ARB_MSG_STS 0x5C8B88
+
+#define mmDMA6_QM_ARB_SLV_CHOISE_Q_HEAD 0x5C8B8C
+
+#define mmDMA6_QM_ARB_ERR_CAUSE 0x5C8B9C
+
+#define mmDMA6_QM_ARB_ERR_MSG_EN 0x5C8BA0
+
+#define mmDMA6_QM_ARB_ERR_STS_DRP 0x5C8BA8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_0 0x5C8BB0
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_1 0x5C8BB4
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_2 0x5C8BB8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_3 0x5C8BBC
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_4 0x5C8BC0
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_5 0x5C8BC4
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_6 0x5C8BC8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_7 0x5C8BCC
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_8 0x5C8BD0
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_9 0x5C8BD4
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_10 0x5C8BD8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_11 0x5C8BDC
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_12 0x5C8BE0
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_13 0x5C8BE4
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_14 0x5C8BE8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_15 0x5C8BEC
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_16 0x5C8BF0
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_17 0x5C8BF4
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_18 0x5C8BF8
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_19 0x5C8BFC
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_20 0x5C8C00
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_21 0x5C8C04
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_22 0x5C8C08
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_23 0x5C8C0C
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_24 0x5C8C10
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_25 0x5C8C14
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_26 0x5C8C18
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_27 0x5C8C1C
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_28 0x5C8C20
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_29 0x5C8C24
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_30 0x5C8C28
+
+#define mmDMA6_QM_ARB_MST_CRED_STS_31 0x5C8C2C
+
+#define mmDMA6_QM_CGM_CFG 0x5C8C70
+
+#define mmDMA6_QM_CGM_STS 0x5C8C74
+
+#define mmDMA6_QM_CGM_CFG1 0x5C8C78
+
+#define mmDMA6_QM_LOCAL_RANGE_BASE 0x5C8C80
+
+#define mmDMA6_QM_LOCAL_RANGE_SIZE 0x5C8C84
+
+#define mmDMA6_QM_CSMR_STRICT_PRIO_CFG 0x5C8C90
+
+#define mmDMA6_QM_HBW_RD_RATE_LIM_CFG_1 0x5C8C94
+
+#define mmDMA6_QM_LBW_WR_RATE_LIM_CFG_0 0x5C8C98
+
+#define mmDMA6_QM_LBW_WR_RATE_LIM_CFG_1 0x5C8C9C
+
+#define mmDMA6_QM_HBW_RD_RATE_LIM_CFG_0 0x5C8CA0
+
+#define mmDMA6_QM_GLBL_AXCACHE 0x5C8CA4
+
+#define mmDMA6_QM_IND_GW_APB_CFG 0x5C8CB0
+
+#define mmDMA6_QM_IND_GW_APB_WDATA 0x5C8CB4
+
+#define mmDMA6_QM_IND_GW_APB_RDATA 0x5C8CB8
+
+#define mmDMA6_QM_IND_GW_APB_STATUS 0x5C8CBC
+
+#define mmDMA6_QM_GLBL_ERR_ADDR_LO 0x5C8CD0
+
+#define mmDMA6_QM_GLBL_ERR_ADDR_HI 0x5C8CD4
+
+#define mmDMA6_QM_GLBL_ERR_WDATA 0x5C8CD8
+
+#define mmDMA6_QM_GLBL_MEM_INIT_BUSY 0x5C8D00
+
+#endif /* ASIC_REG_DMA6_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h
new file mode 100644
index 000000000000..8dd705d20195
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA7_CORE_REGS_H_
+#define ASIC_REG_DMA7_CORE_REGS_H_
+
+/*
+ *****************************************
+ * DMA7_CORE (Prototype: DMA_CORE)
+ *****************************************
+ */
+
+#define mmDMA7_CORE_CFG_0 0x5E0000
+
+#define mmDMA7_CORE_CFG_1 0x5E0004
+
+#define mmDMA7_CORE_LBW_MAX_OUTSTAND 0x5E0008
+
+#define mmDMA7_CORE_SRC_BASE_LO 0x5E0014
+
+#define mmDMA7_CORE_SRC_BASE_HI 0x5E0018
+
+#define mmDMA7_CORE_DST_BASE_LO 0x5E001C
+
+#define mmDMA7_CORE_DST_BASE_HI 0x5E0020
+
+#define mmDMA7_CORE_SRC_TSIZE_1 0x5E002C
+
+#define mmDMA7_CORE_SRC_STRIDE_1 0x5E0030
+
+#define mmDMA7_CORE_SRC_TSIZE_2 0x5E0034
+
+#define mmDMA7_CORE_SRC_STRIDE_2 0x5E0038
+
+#define mmDMA7_CORE_SRC_TSIZE_3 0x5E003C
+
+#define mmDMA7_CORE_SRC_STRIDE_3 0x5E0040
+
+#define mmDMA7_CORE_SRC_TSIZE_4 0x5E0044
+
+#define mmDMA7_CORE_SRC_STRIDE_4 0x5E0048
+
+#define mmDMA7_CORE_SRC_TSIZE_0 0x5E004C
+
+#define mmDMA7_CORE_DST_TSIZE_1 0x5E0054
+
+#define mmDMA7_CORE_DST_STRIDE_1 0x5E0058
+
+#define mmDMA7_CORE_DST_TSIZE_2 0x5E005C
+
+#define mmDMA7_CORE_DST_STRIDE_2 0x5E0060
+
+#define mmDMA7_CORE_DST_TSIZE_3 0x5E0064
+
+#define mmDMA7_CORE_DST_STRIDE_3 0x5E0068
+
+#define mmDMA7_CORE_DST_TSIZE_4 0x5E006C
+
+#define mmDMA7_CORE_DST_STRIDE_4 0x5E0070
+
+#define mmDMA7_CORE_DST_TSIZE_0 0x5E0074
+
+#define mmDMA7_CORE_COMMIT 0x5E0078
+
+#define mmDMA7_CORE_WR_COMP_WDATA 0x5E007C
+
+#define mmDMA7_CORE_WR_COMP_ADDR_LO 0x5E0080
+
+#define mmDMA7_CORE_WR_COMP_ADDR_HI 0x5E0084
+
+#define mmDMA7_CORE_WR_COMP_AWUSER_31_11 0x5E0088
+
+#define mmDMA7_CORE_TE_NUMROWS 0x5E0094
+
+#define mmDMA7_CORE_PROT 0x5E00B8
+
+#define mmDMA7_CORE_SECURE_PROPS 0x5E00F0
+
+#define mmDMA7_CORE_NON_SECURE_PROPS 0x5E00F4
+
+#define mmDMA7_CORE_RD_MAX_OUTSTAND 0x5E0100
+
+#define mmDMA7_CORE_RD_MAX_SIZE 0x5E0104
+
+#define mmDMA7_CORE_RD_ARCACHE 0x5E0108
+
+#define mmDMA7_CORE_RD_ARUSER_31_11 0x5E0110
+
+#define mmDMA7_CORE_RD_INFLIGHTS 0x5E0114
+
+#define mmDMA7_CORE_WR_MAX_OUTSTAND 0x5E0120
+
+#define mmDMA7_CORE_WR_MAX_AWID 0x5E0124
+
+#define mmDMA7_CORE_WR_AWCACHE 0x5E0128
+
+#define mmDMA7_CORE_WR_AWUSER_31_11 0x5E0130
+
+#define mmDMA7_CORE_WR_INFLIGHTS 0x5E0134
+
+#define mmDMA7_CORE_RD_RATE_LIM_CFG_0 0x5E0150
+
+#define mmDMA7_CORE_RD_RATE_LIM_CFG_1 0x5E0154
+
+#define mmDMA7_CORE_WR_RATE_LIM_CFG_0 0x5E0158
+
+#define mmDMA7_CORE_WR_RATE_LIM_CFG_1 0x5E015C
+
+#define mmDMA7_CORE_ERR_CFG 0x5E0160
+
+#define mmDMA7_CORE_ERR_CAUSE 0x5E0164
+
+#define mmDMA7_CORE_ERRMSG_ADDR_LO 0x5E0170
+
+#define mmDMA7_CORE_ERRMSG_ADDR_HI 0x5E0174
+
+#define mmDMA7_CORE_ERRMSG_WDATA 0x5E0178
+
+#define mmDMA7_CORE_STS0 0x5E0190
+
+#define mmDMA7_CORE_STS1 0x5E0194
+
+#define mmDMA7_CORE_RD_DBGMEM_ADD 0x5E0200
+
+#define mmDMA7_CORE_RD_DBGMEM_DATA_WR 0x5E0204
+
+#define mmDMA7_CORE_RD_DBGMEM_DATA_RD 0x5E0208
+
+#define mmDMA7_CORE_RD_DBGMEM_CTRL 0x5E020C
+
+#define mmDMA7_CORE_RD_DBGMEM_RC 0x5E0210
+
+#define mmDMA7_CORE_DBG_HBW_AXI_AR_CNT 0x5E0220
+
+#define mmDMA7_CORE_DBG_HBW_AXI_AW_CNT 0x5E0224
+
+#define mmDMA7_CORE_DBG_LBW_AXI_AW_CNT 0x5E0228
+
+#define mmDMA7_CORE_DBG_DESC_CNT 0x5E022C
+
+#define mmDMA7_CORE_DBG_STS 0x5E0230
+
+#define mmDMA7_CORE_DBG_RD_DESC_ID 0x5E0234
+
+#define mmDMA7_CORE_DBG_WR_DESC_ID 0x5E0238
+
+#endif /* ASIC_REG_DMA7_CORE_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h
new file mode 100644
index 000000000000..d6c631f63e3e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA7_QM_REGS_H_
+#define ASIC_REG_DMA7_QM_REGS_H_
+
+/*
+ *****************************************
+ * DMA7_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmDMA7_QM_GLBL_CFG0 0x5E8000
+
+#define mmDMA7_QM_GLBL_CFG1 0x5E8004
+
+#define mmDMA7_QM_GLBL_PROT 0x5E8008
+
+#define mmDMA7_QM_GLBL_ERR_CFG 0x5E800C
+
+#define mmDMA7_QM_GLBL_SECURE_PROPS_0 0x5E8010
+
+#define mmDMA7_QM_GLBL_SECURE_PROPS_1 0x5E8014
+
+#define mmDMA7_QM_GLBL_SECURE_PROPS_2 0x5E8018
+
+#define mmDMA7_QM_GLBL_SECURE_PROPS_3 0x5E801C
+
+#define mmDMA7_QM_GLBL_SECURE_PROPS_4 0x5E8020
+
+#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_0 0x5E8024
+
+#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_1 0x5E8028
+
+#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_2 0x5E802C
+
+#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_3 0x5E8030
+
+#define mmDMA7_QM_GLBL_NON_SECURE_PROPS_4 0x5E8034
+
+#define mmDMA7_QM_GLBL_STS0 0x5E8038
+
+#define mmDMA7_QM_GLBL_STS1_0 0x5E8040
+
+#define mmDMA7_QM_GLBL_STS1_1 0x5E8044
+
+#define mmDMA7_QM_GLBL_STS1_2 0x5E8048
+
+#define mmDMA7_QM_GLBL_STS1_3 0x5E804C
+
+#define mmDMA7_QM_GLBL_STS1_4 0x5E8050
+
+#define mmDMA7_QM_GLBL_MSG_EN_0 0x5E8054
+
+#define mmDMA7_QM_GLBL_MSG_EN_1 0x5E8058
+
+#define mmDMA7_QM_GLBL_MSG_EN_2 0x5E805C
+
+#define mmDMA7_QM_GLBL_MSG_EN_3 0x5E8060
+
+#define mmDMA7_QM_GLBL_MSG_EN_4 0x5E8068
+
+#define mmDMA7_QM_PQ_BASE_LO_0 0x5E8070
+
+#define mmDMA7_QM_PQ_BASE_LO_1 0x5E8074
+
+#define mmDMA7_QM_PQ_BASE_LO_2 0x5E8078
+
+#define mmDMA7_QM_PQ_BASE_LO_3 0x5E807C
+
+#define mmDMA7_QM_PQ_BASE_HI_0 0x5E8080
+
+#define mmDMA7_QM_PQ_BASE_HI_1 0x5E8084
+
+#define mmDMA7_QM_PQ_BASE_HI_2 0x5E8088
+
+#define mmDMA7_QM_PQ_BASE_HI_3 0x5E808C
+
+#define mmDMA7_QM_PQ_SIZE_0 0x5E8090
+
+#define mmDMA7_QM_PQ_SIZE_1 0x5E8094
+
+#define mmDMA7_QM_PQ_SIZE_2 0x5E8098
+
+#define mmDMA7_QM_PQ_SIZE_3 0x5E809C
+
+#define mmDMA7_QM_PQ_PI_0 0x5E80A0
+
+#define mmDMA7_QM_PQ_PI_1 0x5E80A4
+
+#define mmDMA7_QM_PQ_PI_2 0x5E80A8
+
+#define mmDMA7_QM_PQ_PI_3 0x5E80AC
+
+#define mmDMA7_QM_PQ_CI_0 0x5E80B0
+
+#define mmDMA7_QM_PQ_CI_1 0x5E80B4
+
+#define mmDMA7_QM_PQ_CI_2 0x5E80B8
+
+#define mmDMA7_QM_PQ_CI_3 0x5E80BC
+
+#define mmDMA7_QM_PQ_CFG0_0 0x5E80C0
+
+#define mmDMA7_QM_PQ_CFG0_1 0x5E80C4
+
+#define mmDMA7_QM_PQ_CFG0_2 0x5E80C8
+
+#define mmDMA7_QM_PQ_CFG0_3 0x5E80CC
+
+#define mmDMA7_QM_PQ_CFG1_0 0x5E80D0
+
+#define mmDMA7_QM_PQ_CFG1_1 0x5E80D4
+
+#define mmDMA7_QM_PQ_CFG1_2 0x5E80D8
+
+#define mmDMA7_QM_PQ_CFG1_3 0x5E80DC
+
+#define mmDMA7_QM_PQ_ARUSER_31_11_0 0x5E80E0
+
+#define mmDMA7_QM_PQ_ARUSER_31_11_1 0x5E80E4
+
+#define mmDMA7_QM_PQ_ARUSER_31_11_2 0x5E80E8
+
+#define mmDMA7_QM_PQ_ARUSER_31_11_3 0x5E80EC
+
+#define mmDMA7_QM_PQ_STS0_0 0x5E80F0
+
+#define mmDMA7_QM_PQ_STS0_1 0x5E80F4
+
+#define mmDMA7_QM_PQ_STS0_2 0x5E80F8
+
+#define mmDMA7_QM_PQ_STS0_3 0x5E80FC
+
+#define mmDMA7_QM_PQ_STS1_0 0x5E8100
+
+#define mmDMA7_QM_PQ_STS1_1 0x5E8104
+
+#define mmDMA7_QM_PQ_STS1_2 0x5E8108
+
+#define mmDMA7_QM_PQ_STS1_3 0x5E810C
+
+#define mmDMA7_QM_CQ_CFG0_0 0x5E8110
+
+#define mmDMA7_QM_CQ_CFG0_1 0x5E8114
+
+#define mmDMA7_QM_CQ_CFG0_2 0x5E8118
+
+#define mmDMA7_QM_CQ_CFG0_3 0x5E811C
+
+#define mmDMA7_QM_CQ_CFG0_4 0x5E8120
+
+#define mmDMA7_QM_CQ_CFG1_0 0x5E8124
+
+#define mmDMA7_QM_CQ_CFG1_1 0x5E8128
+
+#define mmDMA7_QM_CQ_CFG1_2 0x5E812C
+
+#define mmDMA7_QM_CQ_CFG1_3 0x5E8130
+
+#define mmDMA7_QM_CQ_CFG1_4 0x5E8134
+
+#define mmDMA7_QM_CQ_ARUSER_31_11_0 0x5E8138
+
+#define mmDMA7_QM_CQ_ARUSER_31_11_1 0x5E813C
+
+#define mmDMA7_QM_CQ_ARUSER_31_11_2 0x5E8140
+
+#define mmDMA7_QM_CQ_ARUSER_31_11_3 0x5E8144
+
+#define mmDMA7_QM_CQ_ARUSER_31_11_4 0x5E8148
+
+#define mmDMA7_QM_CQ_STS0_0 0x5E814C
+
+#define mmDMA7_QM_CQ_STS0_1 0x5E8150
+
+#define mmDMA7_QM_CQ_STS0_2 0x5E8154
+
+#define mmDMA7_QM_CQ_STS0_3 0x5E8158
+
+#define mmDMA7_QM_CQ_STS0_4 0x5E815C
+
+#define mmDMA7_QM_CQ_STS1_0 0x5E8160
+
+#define mmDMA7_QM_CQ_STS1_1 0x5E8164
+
+#define mmDMA7_QM_CQ_STS1_2 0x5E8168
+
+#define mmDMA7_QM_CQ_STS1_3 0x5E816C
+
+#define mmDMA7_QM_CQ_STS1_4 0x5E8170
+
+#define mmDMA7_QM_CQ_PTR_LO_0 0x5E8174
+
+#define mmDMA7_QM_CQ_PTR_HI_0 0x5E8178
+
+#define mmDMA7_QM_CQ_TSIZE_0 0x5E817C
+
+#define mmDMA7_QM_CQ_CTL_0 0x5E8180
+
+#define mmDMA7_QM_CQ_PTR_LO_1 0x5E8184
+
+#define mmDMA7_QM_CQ_PTR_HI_1 0x5E8188
+
+#define mmDMA7_QM_CQ_TSIZE_1 0x5E818C
+
+#define mmDMA7_QM_CQ_CTL_1 0x5E8190
+
+#define mmDMA7_QM_CQ_PTR_LO_2 0x5E8194
+
+#define mmDMA7_QM_CQ_PTR_HI_2 0x5E8198
+
+#define mmDMA7_QM_CQ_TSIZE_2 0x5E819C
+
+#define mmDMA7_QM_CQ_CTL_2 0x5E81A0
+
+#define mmDMA7_QM_CQ_PTR_LO_3 0x5E81A4
+
+#define mmDMA7_QM_CQ_PTR_HI_3 0x5E81A8
+
+#define mmDMA7_QM_CQ_TSIZE_3 0x5E81AC
+
+#define mmDMA7_QM_CQ_CTL_3 0x5E81B0
+
+#define mmDMA7_QM_CQ_PTR_LO_4 0x5E81B4
+
+#define mmDMA7_QM_CQ_PTR_HI_4 0x5E81B8
+
+#define mmDMA7_QM_CQ_TSIZE_4 0x5E81BC
+
+#define mmDMA7_QM_CQ_CTL_4 0x5E81C0
+
+#define mmDMA7_QM_CQ_PTR_LO_STS_0 0x5E81C4
+
+#define mmDMA7_QM_CQ_PTR_LO_STS_1 0x5E81C8
+
+#define mmDMA7_QM_CQ_PTR_LO_STS_2 0x5E81CC
+
+#define mmDMA7_QM_CQ_PTR_LO_STS_3 0x5E81D0
+
+#define mmDMA7_QM_CQ_PTR_LO_STS_4 0x5E81D4
+
+#define mmDMA7_QM_CQ_PTR_HI_STS_0 0x5E81D8
+
+#define mmDMA7_QM_CQ_PTR_HI_STS_1 0x5E81DC
+
+#define mmDMA7_QM_CQ_PTR_HI_STS_2 0x5E81E0
+
+#define mmDMA7_QM_CQ_PTR_HI_STS_3 0x5E81E4
+
+#define mmDMA7_QM_CQ_PTR_HI_STS_4 0x5E81E8
+
+#define mmDMA7_QM_CQ_TSIZE_STS_0 0x5E81EC
+
+#define mmDMA7_QM_CQ_TSIZE_STS_1 0x5E81F0
+
+#define mmDMA7_QM_CQ_TSIZE_STS_2 0x5E81F4
+
+#define mmDMA7_QM_CQ_TSIZE_STS_3 0x5E81F8
+
+#define mmDMA7_QM_CQ_TSIZE_STS_4 0x5E81FC
+
+#define mmDMA7_QM_CQ_CTL_STS_0 0x5E8200
+
+#define mmDMA7_QM_CQ_CTL_STS_1 0x5E8204
+
+#define mmDMA7_QM_CQ_CTL_STS_2 0x5E8208
+
+#define mmDMA7_QM_CQ_CTL_STS_3 0x5E820C
+
+#define mmDMA7_QM_CQ_CTL_STS_4 0x5E8210
+
+#define mmDMA7_QM_CQ_IFIFO_CNT_0 0x5E8214
+
+#define mmDMA7_QM_CQ_IFIFO_CNT_1 0x5E8218
+
+#define mmDMA7_QM_CQ_IFIFO_CNT_2 0x5E821C
+
+#define mmDMA7_QM_CQ_IFIFO_CNT_3 0x5E8220
+
+#define mmDMA7_QM_CQ_IFIFO_CNT_4 0x5E8224
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_0 0x5E8228
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_1 0x5E822C
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_2 0x5E8230
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_3 0x5E8234
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_LO_4 0x5E8238
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_0 0x5E823C
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_1 0x5E8240
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_2 0x5E8244
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_3 0x5E8248
+
+#define mmDMA7_QM_CP_MSG_BASE0_ADDR_HI_4 0x5E824C
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_0 0x5E8250
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_1 0x5E8254
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_2 0x5E8258
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_3 0x5E825C
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_LO_4 0x5E8260
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_0 0x5E8264
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_1 0x5E8268
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_2 0x5E826C
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_3 0x5E8270
+
+#define mmDMA7_QM_CP_MSG_BASE1_ADDR_HI_4 0x5E8274
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_0 0x5E8278
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_1 0x5E827C
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_2 0x5E8280
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_3 0x5E8284
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_LO_4 0x5E8288
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_0 0x5E828C
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_1 0x5E8290
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_2 0x5E8294
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_3 0x5E8298
+
+#define mmDMA7_QM_CP_MSG_BASE2_ADDR_HI_4 0x5E829C
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_0 0x5E82A0
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_1 0x5E82A4
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_2 0x5E82A8
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_3 0x5E82AC
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_LO_4 0x5E82B0
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_0 0x5E82B4
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_1 0x5E82B8
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_2 0x5E82BC
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_3 0x5E82C0
+
+#define mmDMA7_QM_CP_MSG_BASE3_ADDR_HI_4 0x5E82C4
+
+#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_0 0x5E82C8
+
+#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_1 0x5E82CC
+
+#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_2 0x5E82D0
+
+#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_3 0x5E82D4
+
+#define mmDMA7_QM_CP_LDMA_TSIZE_OFFSET_4 0x5E82D8
+
+#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5E82E0
+
+#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5E82E4
+
+#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5E82E8
+
+#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5E82EC
+
+#define mmDMA7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5E82F0
+
+#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5E82F4
+
+#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5E82F8
+
+#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5E82FC
+
+#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x5E8300
+
+#define mmDMA7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x5E8304
+
+#define mmDMA7_QM_CP_FENCE0_RDATA_0 0x5E8308
+
+#define mmDMA7_QM_CP_FENCE0_RDATA_1 0x5E830C
+
+#define mmDMA7_QM_CP_FENCE0_RDATA_2 0x5E8310
+
+#define mmDMA7_QM_CP_FENCE0_RDATA_3 0x5E8314
+
+#define mmDMA7_QM_CP_FENCE0_RDATA_4 0x5E8318
+
+#define mmDMA7_QM_CP_FENCE1_RDATA_0 0x5E831C
+
+#define mmDMA7_QM_CP_FENCE1_RDATA_1 0x5E8320
+
+#define mmDMA7_QM_CP_FENCE1_RDATA_2 0x5E8324
+
+#define mmDMA7_QM_CP_FENCE1_RDATA_3 0x5E8328
+
+#define mmDMA7_QM_CP_FENCE1_RDATA_4 0x5E832C
+
+#define mmDMA7_QM_CP_FENCE2_RDATA_0 0x5E8330
+
+#define mmDMA7_QM_CP_FENCE2_RDATA_1 0x5E8334
+
+#define mmDMA7_QM_CP_FENCE2_RDATA_2 0x5E8338
+
+#define mmDMA7_QM_CP_FENCE2_RDATA_3 0x5E833C
+
+#define mmDMA7_QM_CP_FENCE2_RDATA_4 0x5E8340
+
+#define mmDMA7_QM_CP_FENCE3_RDATA_0 0x5E8344
+
+#define mmDMA7_QM_CP_FENCE3_RDATA_1 0x5E8348
+
+#define mmDMA7_QM_CP_FENCE3_RDATA_2 0x5E834C
+
+#define mmDMA7_QM_CP_FENCE3_RDATA_3 0x5E8350
+
+#define mmDMA7_QM_CP_FENCE3_RDATA_4 0x5E8354
+
+#define mmDMA7_QM_CP_FENCE0_CNT_0 0x5E8358
+
+#define mmDMA7_QM_CP_FENCE0_CNT_1 0x5E835C
+
+#define mmDMA7_QM_CP_FENCE0_CNT_2 0x5E8360
+
+#define mmDMA7_QM_CP_FENCE0_CNT_3 0x5E8364
+
+#define mmDMA7_QM_CP_FENCE0_CNT_4 0x5E8368
+
+#define mmDMA7_QM_CP_FENCE1_CNT_0 0x5E836C
+
+#define mmDMA7_QM_CP_FENCE1_CNT_1 0x5E8370
+
+#define mmDMA7_QM_CP_FENCE1_CNT_2 0x5E8374
+
+#define mmDMA7_QM_CP_FENCE1_CNT_3 0x5E8378
+
+#define mmDMA7_QM_CP_FENCE1_CNT_4 0x5E837C
+
+#define mmDMA7_QM_CP_FENCE2_CNT_0 0x5E8380
+
+#define mmDMA7_QM_CP_FENCE2_CNT_1 0x5E8384
+
+#define mmDMA7_QM_CP_FENCE2_CNT_2 0x5E8388
+
+#define mmDMA7_QM_CP_FENCE2_CNT_3 0x5E838C
+
+#define mmDMA7_QM_CP_FENCE2_CNT_4 0x5E8390
+
+#define mmDMA7_QM_CP_FENCE3_CNT_0 0x5E8394
+
+#define mmDMA7_QM_CP_FENCE3_CNT_1 0x5E8398
+
+#define mmDMA7_QM_CP_FENCE3_CNT_2 0x5E839C
+
+#define mmDMA7_QM_CP_FENCE3_CNT_3 0x5E83A0
+
+#define mmDMA7_QM_CP_FENCE3_CNT_4 0x5E83A4
+
+#define mmDMA7_QM_CP_STS_0 0x5E83A8
+
+#define mmDMA7_QM_CP_STS_1 0x5E83AC
+
+#define mmDMA7_QM_CP_STS_2 0x5E83B0
+
+#define mmDMA7_QM_CP_STS_3 0x5E83B4
+
+#define mmDMA7_QM_CP_STS_4 0x5E83B8
+
+#define mmDMA7_QM_CP_CURRENT_INST_LO_0 0x5E83BC
+
+#define mmDMA7_QM_CP_CURRENT_INST_LO_1 0x5E83C0
+
+#define mmDMA7_QM_CP_CURRENT_INST_LO_2 0x5E83C4
+
+#define mmDMA7_QM_CP_CURRENT_INST_LO_3 0x5E83C8
+
+#define mmDMA7_QM_CP_CURRENT_INST_LO_4 0x5E83CC
+
+#define mmDMA7_QM_CP_CURRENT_INST_HI_0 0x5E83D0
+
+#define mmDMA7_QM_CP_CURRENT_INST_HI_1 0x5E83D4
+
+#define mmDMA7_QM_CP_CURRENT_INST_HI_2 0x5E83D8
+
+#define mmDMA7_QM_CP_CURRENT_INST_HI_3 0x5E83DC
+
+#define mmDMA7_QM_CP_CURRENT_INST_HI_4 0x5E83E0
+
+#define mmDMA7_QM_CP_BARRIER_CFG_0 0x5E83F4
+
+#define mmDMA7_QM_CP_BARRIER_CFG_1 0x5E83F8
+
+#define mmDMA7_QM_CP_BARRIER_CFG_2 0x5E83FC
+
+#define mmDMA7_QM_CP_BARRIER_CFG_3 0x5E8400
+
+#define mmDMA7_QM_CP_BARRIER_CFG_4 0x5E8404
+
+#define mmDMA7_QM_CP_DBG_0_0 0x5E8408
+
+#define mmDMA7_QM_CP_DBG_0_1 0x5E840C
+
+#define mmDMA7_QM_CP_DBG_0_2 0x5E8410
+
+#define mmDMA7_QM_CP_DBG_0_3 0x5E8414
+
+#define mmDMA7_QM_CP_DBG_0_4 0x5E8418
+
+#define mmDMA7_QM_CP_ARUSER_31_11_0 0x5E841C
+
+#define mmDMA7_QM_CP_ARUSER_31_11_1 0x5E8420
+
+#define mmDMA7_QM_CP_ARUSER_31_11_2 0x5E8424
+
+#define mmDMA7_QM_CP_ARUSER_31_11_3 0x5E8428
+
+#define mmDMA7_QM_CP_ARUSER_31_11_4 0x5E842C
+
+#define mmDMA7_QM_CP_AWUSER_31_11_0 0x5E8430
+
+#define mmDMA7_QM_CP_AWUSER_31_11_1 0x5E8434
+
+#define mmDMA7_QM_CP_AWUSER_31_11_2 0x5E8438
+
+#define mmDMA7_QM_CP_AWUSER_31_11_3 0x5E843C
+
+#define mmDMA7_QM_CP_AWUSER_31_11_4 0x5E8440
+
+#define mmDMA7_QM_ARB_CFG_0 0x5E8A00
+
+#define mmDMA7_QM_ARB_CHOISE_Q_PUSH 0x5E8A04
+
+#define mmDMA7_QM_ARB_WRR_WEIGHT_0 0x5E8A08
+
+#define mmDMA7_QM_ARB_WRR_WEIGHT_1 0x5E8A0C
+
+#define mmDMA7_QM_ARB_WRR_WEIGHT_2 0x5E8A10
+
+#define mmDMA7_QM_ARB_WRR_WEIGHT_3 0x5E8A14
+
+#define mmDMA7_QM_ARB_CFG_1 0x5E8A18
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_0 0x5E8A20
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_1 0x5E8A24
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_2 0x5E8A28
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_3 0x5E8A2C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_4 0x5E8A30
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_5 0x5E8A34
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_6 0x5E8A38
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_7 0x5E8A3C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_8 0x5E8A40
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_9 0x5E8A44
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_10 0x5E8A48
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_11 0x5E8A4C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_12 0x5E8A50
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_13 0x5E8A54
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_14 0x5E8A58
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_15 0x5E8A5C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_16 0x5E8A60
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_17 0x5E8A64
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_18 0x5E8A68
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_19 0x5E8A6C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_20 0x5E8A70
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_21 0x5E8A74
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_22 0x5E8A78
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_23 0x5E8A7C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_24 0x5E8A80
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_25 0x5E8A84
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_26 0x5E8A88
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_27 0x5E8A8C
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_28 0x5E8A90
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_29 0x5E8A94
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_30 0x5E8A98
+
+#define mmDMA7_QM_ARB_MST_AVAIL_CRED_31 0x5E8A9C
+
+#define mmDMA7_QM_ARB_MST_CRED_INC 0x5E8AA0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x5E8AA4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x5E8AA8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x5E8AAC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x5E8AB0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x5E8AB4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x5E8AB8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x5E8ABC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x5E8AC0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x5E8AC4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x5E8AC8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x5E8ACC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x5E8AD0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x5E8AD4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x5E8AD8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x5E8ADC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x5E8AE0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x5E8AE4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x5E8AE8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x5E8AEC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x5E8AF0
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x5E8AF4
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x5E8AF8
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x5E8AFC
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x5E8B00
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x5E8B04
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x5E8B08
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x5E8B0C
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x5E8B10
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x5E8B14
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x5E8B18
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x5E8B1C
+
+#define mmDMA7_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x5E8B20
+
+#define mmDMA7_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x5E8B28
+
+#define mmDMA7_QM_ARB_MST_SLAVE_EN 0x5E8B2C
+
+#define mmDMA7_QM_ARB_MST_QUIET_PER 0x5E8B34
+
+#define mmDMA7_QM_ARB_SLV_CHOISE_WDT 0x5E8B38
+
+#define mmDMA7_QM_ARB_SLV_ID 0x5E8B3C
+
+#define mmDMA7_QM_ARB_MSG_MAX_INFLIGHT 0x5E8B44
+
+#define mmDMA7_QM_ARB_MSG_AWUSER_31_11 0x5E8B48
+
+#define mmDMA7_QM_ARB_MSG_AWUSER_SEC_PROP 0x5E8B4C
+
+#define mmDMA7_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x5E8B50
+
+#define mmDMA7_QM_ARB_BASE_LO 0x5E8B54
+
+#define mmDMA7_QM_ARB_BASE_HI 0x5E8B58
+
+#define mmDMA7_QM_ARB_STATE_STS 0x5E8B80
+
+#define mmDMA7_QM_ARB_CHOISE_FULLNESS_STS 0x5E8B84
+
+#define mmDMA7_QM_ARB_MSG_STS 0x5E8B88
+
+#define mmDMA7_QM_ARB_SLV_CHOISE_Q_HEAD 0x5E8B8C
+
+#define mmDMA7_QM_ARB_ERR_CAUSE 0x5E8B9C
+
+#define mmDMA7_QM_ARB_ERR_MSG_EN 0x5E8BA0
+
+#define mmDMA7_QM_ARB_ERR_STS_DRP 0x5E8BA8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_0 0x5E8BB0
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_1 0x5E8BB4
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_2 0x5E8BB8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_3 0x5E8BBC
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_4 0x5E8BC0
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_5 0x5E8BC4
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_6 0x5E8BC8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_7 0x5E8BCC
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_8 0x5E8BD0
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_9 0x5E8BD4
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_10 0x5E8BD8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_11 0x5E8BDC
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_12 0x5E8BE0
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_13 0x5E8BE4
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_14 0x5E8BE8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_15 0x5E8BEC
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_16 0x5E8BF0
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_17 0x5E8BF4
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_18 0x5E8BF8
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_19 0x5E8BFC
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_20 0x5E8C00
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_21 0x5E8C04
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_22 0x5E8C08
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_23 0x5E8C0C
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_24 0x5E8C10
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_25 0x5E8C14
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_26 0x5E8C18
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_27 0x5E8C1C
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_28 0x5E8C20
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_29 0x5E8C24
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_30 0x5E8C28
+
+#define mmDMA7_QM_ARB_MST_CRED_STS_31 0x5E8C2C
+
+#define mmDMA7_QM_CGM_CFG 0x5E8C70
+
+#define mmDMA7_QM_CGM_STS 0x5E8C74
+
+#define mmDMA7_QM_CGM_CFG1 0x5E8C78
+
+#define mmDMA7_QM_LOCAL_RANGE_BASE 0x5E8C80
+
+#define mmDMA7_QM_LOCAL_RANGE_SIZE 0x5E8C84
+
+#define mmDMA7_QM_CSMR_STRICT_PRIO_CFG 0x5E8C90
+
+#define mmDMA7_QM_HBW_RD_RATE_LIM_CFG_1 0x5E8C94
+
+#define mmDMA7_QM_LBW_WR_RATE_LIM_CFG_0 0x5E8C98
+
+#define mmDMA7_QM_LBW_WR_RATE_LIM_CFG_1 0x5E8C9C
+
+#define mmDMA7_QM_HBW_RD_RATE_LIM_CFG_0 0x5E8CA0
+
+#define mmDMA7_QM_GLBL_AXCACHE 0x5E8CA4
+
+#define mmDMA7_QM_IND_GW_APB_CFG 0x5E8CB0
+
+#define mmDMA7_QM_IND_GW_APB_WDATA 0x5E8CB4
+
+#define mmDMA7_QM_IND_GW_APB_RDATA 0x5E8CB8
+
+#define mmDMA7_QM_IND_GW_APB_STATUS 0x5E8CBC
+
+#define mmDMA7_QM_GLBL_ERR_ADDR_LO 0x5E8CD0
+
+#define mmDMA7_QM_GLBL_ERR_ADDR_HI 0x5E8CD4
+
+#define mmDMA7_QM_GLBL_ERR_WDATA 0x5E8CD8
+
+#define mmDMA7_QM_GLBL_MEM_INIT_BUSY 0x5E8D00
+
+#endif /* ASIC_REG_DMA7_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h
new file mode 100644
index 000000000000..8c1c72df4469
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_N_DOWN_CH0_REGS_H_
+#define ASIC_REG_DMA_IF_E_N_DOWN_CH0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_N_DOWN_CH0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_N_DOWN_CH0_PERM_SEL 0x4E1108
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_0 0x4E1114
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_1 0x4E1118
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_2 0x4E111C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_3 0x4E1120
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_4 0x4E1124
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_5 0x4E1128
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_6 0x4E112C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_7 0x4E1130
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_8 0x4E1134
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_9 0x4E1138
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_10 0x4E113C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_11 0x4E1140
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_12 0x4E1144
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_13 0x4E1148
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_14 0x4E114C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_15 0x4E1150
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_16 0x4E1154
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_17 0x4E1158
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_18 0x4E115C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_19 0x4E1160
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_20 0x4E1164
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_21 0x4E1168
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_22 0x4E116C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_23 0x4E1170
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_24 0x4E1174
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_25 0x4E1178
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_26 0x4E117C
+
+#define mmDMA_IF_E_N_DOWN_CH0_HBM_POLY_H3_27 0x4E1180
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_0 0x4E1184
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_1 0x4E1188
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_2 0x4E118C
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_3 0x4E1190
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_4 0x4E1194
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_5 0x4E1198
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_6 0x4E119C
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_7 0x4E11A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_8 0x4E11A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_9 0x4E11A8
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_10 0x4E11AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_11 0x4E11B0
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_12 0x4E11B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_13 0x4E11B8
+
+#define mmDMA_IF_E_N_DOWN_CH0_SRAM_POLY_H3_14 0x4E11BC
+
+#define mmDMA_IF_E_N_DOWN_CH0_SCRAM_SRAM_EN 0x4E126C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_EN 0x4E1274
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_SAT 0x4E1278
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_RST 0x4E127C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_HBM_TIMEOUT 0x4E1280
+
+#define mmDMA_IF_E_N_DOWN_CH0_SCRAM_HBM_EN 0x4E1284
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_EN 0x4E1288
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_SAT 0x4E128C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_RST 0x4E1290
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_PCI_TIMEOUT 0x4E1294
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_EN 0x4E129C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_SAT 0x4E12A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_RST 0x4E12A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_TIMEOUT 0x4E12AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RL_SRAM_RED 0x4E12B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_EN 0x4E12EC
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_EN 0x4E12F0
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_WR_SIZE 0x4E12F4
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_WR_SIZE 0x4E12F8
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x4E1404
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x4E1408
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x4E140C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x4E1410
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x4E1414
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x4E1418
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_RD_SIZE 0x4E141C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_RD_SIZE 0x4E1420
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x4E1424
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x4E1428
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x4E142C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x4E1430
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x4E1434
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x4E1438
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_0 0x4E1450
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_1 0x4E1454
+
+#define mmDMA_IF_E_N_DOWN_CH0_NON_LIN_EN 0x4E1480
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_0 0x4E1500
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_1 0x4E1504
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_2 0x4E1508
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_3 0x4E150C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_BANK_4 0x4E1510
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_0 0x4E1514
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_1 0x4E1520
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_2 0x4E1524
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_3 0x4E1528
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_4 0x4E152C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_5 0x4E1530
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_6 0x4E1534
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_7 0x4E1538
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_8 0x4E153C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_SRAM_OFFSET_9 0x4E1540
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_0 0x4E1550
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_1 0x4E1554
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_2 0x4E1558
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_3 0x4E155C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_4 0x4E1560
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_5 0x4E1564
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_6 0x4E1568
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_7 0x4E156C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_8 0x4E1570
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_9 0x4E1574
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_10 0x4E1578
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_11 0x4E157C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_12 0x4E1580
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_13 0x4E1584
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_14 0x4E1588
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_15 0x4E158C
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_16 0x4E1590
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_17 0x4E1594
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_18 0x4E1598
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4E15E4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4E15E8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4E15EC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4E15F0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4E15F4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4E15F8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4E15FC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x4E1600
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x4E1604
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x4E1608
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x4E160C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x4E1610
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x4E1614
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x4E1618
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x4E161C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x4E1620
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x4E1624
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x4E1628
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x4E162C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x4E1630
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x4E1634
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x4E1638
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x4E163C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x4E1640
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x4E1644
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x4E1648
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x4E164C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x4E1650
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x4E1654
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x4E1658
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x4E165C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x4E1660
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x4E1664
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x4E1668
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x4E166C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x4E1670
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x4E1674
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x4E1678
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x4E167C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x4E1680
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x4E1684
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x4E1688
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x4E168C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x4E1690
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x4E1694
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x4E1698
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x4E169C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4E16A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4E16A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4E16A8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4E16AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4E16B0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4E16B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4E16B8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4E16BC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4E16C0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4E16C4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4E16C8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4E16CC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4E16D0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4E16D4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4E16D8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4E16DC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4E16E0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4E16E4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4E16E8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4E16EC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4E16F0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4E16F4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4E16F8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4E16FC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x4E1700
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x4E1704
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x4E1708
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x4E170C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x4E1710
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x4E1714
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x4E1718
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x4E171C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x4E1720
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x4E1724
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x4E1728
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x4E172C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x4E1730
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x4E1734
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x4E1738
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x4E173C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x4E1740
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x4E1744
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x4E1748
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x4E174C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x4E1750
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x4E1754
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x4E1758
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x4E175C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x4E1760
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x4E1764
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x4E1768
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x4E176C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x4E1770
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x4E1774
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x4E1778
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x4E177C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x4E1780
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x4E1784
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x4E1788
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x4E178C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x4E1790
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x4E1794
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x4E1798
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x4E179C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4E17A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4E17A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4E17A8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4E17AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4E17B0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4E17B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4E17B8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4E17BC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4E17C0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4E17C4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4E17C8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4E17CC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4E17D0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4E17D4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4E17D8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4E17DC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4E17E0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x4E1824
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x4E1828
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x4E182C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x4E1830
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x4E1834
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x4E1838
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x4E183C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x4E1840
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x4E1844
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x4E1848
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x4E184C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x4E1850
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x4E1854
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x4E1858
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x4E185C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x4E1860
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x4E1864
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x4E1868
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x4E186C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x4E1870
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x4E1874
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x4E1878
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x4E187C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x4E1880
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x4E1884
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x4E1888
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x4E188C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x4E1890
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x4E1894
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x4E1898
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x4E189C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4E18A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4E18A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4E18A8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4E18AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4E18B0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4E18B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4E18B8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4E18BC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4E18C0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4E18C4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4E18C8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4E18CC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4E18D0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4E18D4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4E18D8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4E18DC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4E18E0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4E18E4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4E18E8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4E18EC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4E18F0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4E18F4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4E18F8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4E18FC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x4E1900
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x4E1904
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x4E1908
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x4E190C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x4E1910
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x4E1914
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x4E1918
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x4E191C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x4E1920
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x4E1924
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x4E1928
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x4E192C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x4E1930
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x4E1934
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x4E1938
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x4E193C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x4E1940
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x4E1944
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x4E1948
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x4E194C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x4E1950
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x4E1954
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x4E1958
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x4E195C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x4E1960
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x4E1964
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x4E1968
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x4E196C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x4E1970
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x4E1974
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x4E1978
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x4E197C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x4E1980
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x4E1984
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x4E1988
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x4E198C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x4E1990
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x4E1994
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x4E1998
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x4E199C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4E19A0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4E19A4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4E19A8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4E19AC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4E19B0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4E19B4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4E19B8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4E19BC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4E19C0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4E19C4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4E19C8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4E19CC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4E19D0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4E19D4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4E19D8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4E19DC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4E19E0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4E19E4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4E19E8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4E19EC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4E19F0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4E19F4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4E19F8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4E19FC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x4E1A00
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x4E1A04
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x4E1A08
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x4E1A0C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x4E1A10
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x4E1A14
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x4E1A18
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x4E1A1C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x4E1A20
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AW 0x4E1A64
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_SEC_HIT_AR 0x4E1A68
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_HIT_AW 0x4E1A6C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RANGE_PRIV_HIT_AR 0x4E1A70
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_CFG 0x4E1B64
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_SHIFT 0x4E1B68
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_0 0x4E1B6C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_1 0x4E1B70
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_2 0x4E1B74
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_3 0x4E1B78
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_4 0x4E1B7C
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_5 0x4E1B80
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_6 0x4E1B84
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_EXPECTED_LAT_7 0x4E1B88
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_0 0x4E1BAC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_1 0x4E1BB0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_2 0x4E1BB4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_3 0x4E1BB8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_4 0x4E1BBC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_5 0x4E1BC0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_6 0x4E1BC4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_TOKEN_7 0x4E1BC8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_0 0x4E1BEC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_1 0x4E1BF0
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_2 0x4E1BF4
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_3 0x4E1BF8
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_4 0x4E1BFC
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_5 0x4E1C00
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_6 0x4E1C04
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_BANK_ID_7 0x4E1C08
+
+#define mmDMA_IF_E_N_DOWN_CH0_RGL_WDT 0x4E1C2C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x4E1C30
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x4E1C34
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x4E1C38
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x4E1C3C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x4E1C40
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x4E1C44
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x4E1C48
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x4E1C4C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x4E1C50
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x4E1C54
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x4E1C58
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x4E1C5C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x4E1C60
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x4E1C64
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x4E1C68
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x4E1C6C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x4E1C70
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x4E1C74
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x4E1C78
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x4E1C7C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x4E1C80
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x4E1C84
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x4E1C88
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x4E1C8C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x4E1C90
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x4E1C94
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x4E1C98
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x4E1C9C
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x4E1CA0
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x4E1CA4
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x4E1CA8
+
+#define mmDMA_IF_E_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x4E1CAC
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_0 0x4E1CB0
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_1 0x4E1CB4
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_2 0x4E1CB8
+
+#define mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_3 0x4E1CBC
+
+#endif /* ASIC_REG_DMA_IF_E_N_DOWN_CH0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h
new file mode 100644
index 000000000000..b2b593fcec30
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_N_DOWN_CH1_REGS_H_
+#define ASIC_REG_DMA_IF_E_N_DOWN_CH1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_N_DOWN_CH1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_N_DOWN_CH1_PERM_SEL 0x4E2108
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_0 0x4E2114
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_1 0x4E2118
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_2 0x4E211C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_3 0x4E2120
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_4 0x4E2124
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_5 0x4E2128
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_6 0x4E212C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_7 0x4E2130
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_8 0x4E2134
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_9 0x4E2138
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_10 0x4E213C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_11 0x4E2140
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_12 0x4E2144
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_13 0x4E2148
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_14 0x4E214C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_15 0x4E2150
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_16 0x4E2154
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_17 0x4E2158
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_18 0x4E215C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_19 0x4E2160
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_20 0x4E2164
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_21 0x4E2168
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_22 0x4E216C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_23 0x4E2170
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_24 0x4E2174
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_25 0x4E2178
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_26 0x4E217C
+
+#define mmDMA_IF_E_N_DOWN_CH1_HBM_POLY_H3_27 0x4E2180
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_0 0x4E2184
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_1 0x4E2188
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_2 0x4E218C
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_3 0x4E2190
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_4 0x4E2194
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_5 0x4E2198
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_6 0x4E219C
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_7 0x4E21A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_8 0x4E21A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_9 0x4E21A8
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_10 0x4E21AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_11 0x4E21B0
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_12 0x4E21B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_13 0x4E21B8
+
+#define mmDMA_IF_E_N_DOWN_CH1_SRAM_POLY_H3_14 0x4E21BC
+
+#define mmDMA_IF_E_N_DOWN_CH1_SCRAM_SRAM_EN 0x4E226C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_EN 0x4E2274
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_SAT 0x4E2278
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_RST 0x4E227C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_HBM_TIMEOUT 0x4E2280
+
+#define mmDMA_IF_E_N_DOWN_CH1_SCRAM_HBM_EN 0x4E2284
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_EN 0x4E2288
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_SAT 0x4E228C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_RST 0x4E2290
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_PCI_TIMEOUT 0x4E2294
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_EN 0x4E229C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_SAT 0x4E22A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_RST 0x4E22A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_TIMEOUT 0x4E22AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RL_SRAM_RED 0x4E22B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_EN 0x4E22EC
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_EN 0x4E22F0
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_WR_SIZE 0x4E22F4
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_WR_SIZE 0x4E22F8
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x4E2404
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x4E2408
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x4E240C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x4E2410
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x4E2414
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x4E2418
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_RD_SIZE 0x4E241C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_RD_SIZE 0x4E2420
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x4E2424
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x4E2428
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x4E242C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x4E2430
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x4E2434
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x4E2438
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_0 0x4E2450
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_1 0x4E2454
+
+#define mmDMA_IF_E_N_DOWN_CH1_NON_LIN_EN 0x4E2480
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_0 0x4E2500
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_1 0x4E2504
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_2 0x4E2508
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_3 0x4E250C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_BANK_4 0x4E2510
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_0 0x4E2514
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_1 0x4E2520
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_2 0x4E2524
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_3 0x4E2528
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_4 0x4E252C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_5 0x4E2530
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_6 0x4E2534
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_7 0x4E2538
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_8 0x4E253C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_SRAM_OFFSET_9 0x4E2540
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_0 0x4E2550
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_1 0x4E2554
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_2 0x4E2558
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_3 0x4E255C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_4 0x4E2560
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_5 0x4E2564
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_6 0x4E2568
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_7 0x4E256C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_8 0x4E2570
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_9 0x4E2574
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_10 0x4E2578
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_11 0x4E257C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_12 0x4E2580
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_13 0x4E2584
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_14 0x4E2588
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_15 0x4E258C
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_16 0x4E2590
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_17 0x4E2594
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_18 0x4E2598
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4E25E4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4E25E8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4E25EC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4E25F0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4E25F4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4E25F8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4E25FC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x4E2600
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x4E2604
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x4E2608
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x4E260C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x4E2610
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x4E2614
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x4E2618
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x4E261C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x4E2620
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x4E2624
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x4E2628
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x4E262C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x4E2630
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x4E2634
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x4E2638
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x4E263C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x4E2640
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x4E2644
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x4E2648
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x4E264C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x4E2650
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x4E2654
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x4E2658
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x4E265C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x4E2660
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x4E2664
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x4E2668
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x4E266C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x4E2670
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x4E2674
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x4E2678
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x4E267C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x4E2680
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x4E2684
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x4E2688
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x4E268C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x4E2690
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x4E2694
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x4E2698
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x4E269C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4E26A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4E26A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4E26A8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4E26AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4E26B0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4E26B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4E26B8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4E26BC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4E26C0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4E26C4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4E26C8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4E26CC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4E26D0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4E26D4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4E26D8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4E26DC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4E26E0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4E26E4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4E26E8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4E26EC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4E26F0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4E26F4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4E26F8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4E26FC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x4E2700
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x4E2704
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x4E2708
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x4E270C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x4E2710
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x4E2714
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x4E2718
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x4E271C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x4E2720
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x4E2724
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x4E2728
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x4E272C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x4E2730
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x4E2734
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x4E2738
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x4E273C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x4E2740
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x4E2744
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x4E2748
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x4E274C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x4E2750
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x4E2754
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x4E2758
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x4E275C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x4E2760
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x4E2764
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x4E2768
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x4E276C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x4E2770
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x4E2774
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x4E2778
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x4E277C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x4E2780
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x4E2784
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x4E2788
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x4E278C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x4E2790
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x4E2794
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x4E2798
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x4E279C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4E27A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4E27A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4E27A8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4E27AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4E27B0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4E27B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4E27B8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4E27BC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4E27C0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4E27C4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4E27C8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4E27CC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4E27D0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4E27D4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4E27D8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4E27DC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4E27E0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x4E2824
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x4E2828
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x4E282C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x4E2830
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x4E2834
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x4E2838
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x4E283C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x4E2840
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x4E2844
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x4E2848
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x4E284C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x4E2850
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x4E2854
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x4E2858
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x4E285C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x4E2860
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x4E2864
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x4E2868
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x4E286C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x4E2870
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x4E2874
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x4E2878
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x4E287C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x4E2880
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x4E2884
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x4E2888
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x4E288C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x4E2890
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x4E2894
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x4E2898
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x4E289C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4E28A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4E28A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4E28A8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4E28AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4E28B0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4E28B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4E28B8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4E28BC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4E28C0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4E28C4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4E28C8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4E28CC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4E28D0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4E28D4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4E28D8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4E28DC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4E28E0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4E28E4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4E28E8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4E28EC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4E28F0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4E28F4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4E28F8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4E28FC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x4E2900
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x4E2904
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x4E2908
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x4E290C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x4E2910
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x4E2914
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x4E2918
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x4E291C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x4E2920
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x4E2924
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x4E2928
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x4E292C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x4E2930
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x4E2934
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x4E2938
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x4E293C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x4E2940
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x4E2944
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x4E2948
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x4E294C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x4E2950
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x4E2954
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x4E2958
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x4E295C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x4E2960
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x4E2964
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x4E2968
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x4E296C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x4E2970
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x4E2974
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x4E2978
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x4E297C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x4E2980
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x4E2984
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x4E2988
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x4E298C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x4E2990
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x4E2994
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x4E2998
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x4E299C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4E29A0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4E29A4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4E29A8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4E29AC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4E29B0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4E29B4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4E29B8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4E29BC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4E29C0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4E29C4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4E29C8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4E29CC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4E29D0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4E29D4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4E29D8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4E29DC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4E29E0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4E29E4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4E29E8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4E29EC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4E29F0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4E29F4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4E29F8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4E29FC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x4E2A00
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x4E2A04
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x4E2A08
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x4E2A0C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x4E2A10
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x4E2A14
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x4E2A18
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x4E2A1C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x4E2A20
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AW 0x4E2A64
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_SEC_HIT_AR 0x4E2A68
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_HIT_AW 0x4E2A6C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RANGE_PRIV_HIT_AR 0x4E2A70
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_CFG 0x4E2B64
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_SHIFT 0x4E2B68
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_0 0x4E2B6C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_1 0x4E2B70
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_2 0x4E2B74
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_3 0x4E2B78
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_4 0x4E2B7C
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_5 0x4E2B80
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_6 0x4E2B84
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_EXPECTED_LAT_7 0x4E2B88
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_0 0x4E2BAC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_1 0x4E2BB0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_2 0x4E2BB4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_3 0x4E2BB8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_4 0x4E2BBC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_5 0x4E2BC0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_6 0x4E2BC4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_TOKEN_7 0x4E2BC8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_0 0x4E2BEC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_1 0x4E2BF0
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_2 0x4E2BF4
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_3 0x4E2BF8
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_4 0x4E2BFC
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_5 0x4E2C00
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_6 0x4E2C04
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_BANK_ID_7 0x4E2C08
+
+#define mmDMA_IF_E_N_DOWN_CH1_RGL_WDT 0x4E2C2C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x4E2C30
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x4E2C34
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x4E2C38
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x4E2C3C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x4E2C40
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x4E2C44
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x4E2C48
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x4E2C4C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x4E2C50
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x4E2C54
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x4E2C58
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x4E2C5C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x4E2C60
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x4E2C64
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x4E2C68
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x4E2C6C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x4E2C70
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x4E2C74
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x4E2C78
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x4E2C7C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x4E2C80
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x4E2C84
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x4E2C88
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x4E2C8C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x4E2C90
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x4E2C94
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x4E2C98
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x4E2C9C
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x4E2CA0
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x4E2CA4
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x4E2CA8
+
+#define mmDMA_IF_E_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x4E2CAC
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_0 0x4E2CB0
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_1 0x4E2CB4
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_2 0x4E2CB8
+
+#define mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_3 0x4E2CBC
+
+#endif /* ASIC_REG_DMA_IF_E_N_DOWN_CH1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h
new file mode 100644
index 000000000000..8a10c6a76156
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h
@@ -0,0 +1,860 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_N_REGS_H_
+#define ASIC_REG_DMA_IF_E_N_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_N (Prototype: DMA_IF)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_N_HBM0_WR_CRED_CNT 0x4E0000
+
+#define mmDMA_IF_E_N_HBM1_WR_CRED_CNT 0x4E0004
+
+#define mmDMA_IF_E_N_HBM0_RD_CRED_CNT 0x4E0008
+
+#define mmDMA_IF_E_N_HBM1_RD_CRED_CNT 0x4E000C
+
+#define mmDMA_IF_E_N_HBM_LIMITER_0 0x4E0030
+
+#define mmDMA_IF_E_N_HBM_LIMITER_1 0x4E0034
+
+#define mmDMA_IF_E_N_HBM_LIMITER_2 0x4E0038
+
+#define mmDMA_IF_E_N_HBM_LIMITER_3 0x4E003C
+
+#define mmDMA_IF_E_N_HBM_ALMOST_EN_0 0x4E0040
+
+#define mmDMA_IF_E_N_HBM_ALMOST_EN_1 0x4E0044
+
+#define mmDMA_IF_E_N_HBM_CRED_EN_0 0x4E0050
+
+#define mmDMA_IF_E_N_HBM_CRED_EN_1 0x4E0054
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_0 0x4E0100
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_1 0x4E0104
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_2 0x4E0108
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_3 0x4E010C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_4 0x4E0110
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_5 0x4E0114
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_6 0x4E0118
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_7 0x4E011C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_8 0x4E0120
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_9 0x4E0124
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_10 0x4E0128
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_11 0x4E012C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_12 0x4E0130
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_13 0x4E0134
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_14 0x4E0138
+
+#define mmDMA_IF_E_N_SOB_MIN_RPROT_15 0x4E013C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_0 0x4E0140
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_1 0x4E0144
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_2 0x4E0148
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_3 0x4E014C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_4 0x4E0150
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_5 0x4E0154
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_6 0x4E0158
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_7 0x4E015C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_8 0x4E0160
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_9 0x4E0164
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_10 0x4E0168
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_11 0x4E016C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_12 0x4E0170
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_13 0x4E0174
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_14 0x4E0178
+
+#define mmDMA_IF_E_N_SOB_MAX_RPROT_15 0x4E017C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_0 0x4E0180
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_1 0x4E0184
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_2 0x4E0188
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_3 0x4E018C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_4 0x4E0190
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_5 0x4E0194
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_6 0x4E0198
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_7 0x4E019C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_8 0x4E01A0
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_9 0x4E01A4
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_10 0x4E01A8
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_11 0x4E01AC
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_12 0x4E01B0
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_13 0x4E01B4
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_14 0x4E01B8
+
+#define mmDMA_IF_E_N_SOB_MIN_WPROT_15 0x4E01BC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_0 0x4E01C0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_1 0x4E01C4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_2 0x4E01C8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_3 0x4E01CC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_4 0x4E01D0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_5 0x4E01D4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_6 0x4E01D8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_7 0x4E01DC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_8 0x4E01E0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_9 0x4E01E4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_10 0x4E01E8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_11 0x4E01EC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_12 0x4E01F0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_13 0x4E01F4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_14 0x4E01F8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPROT_15 0x4E01FC
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_0 0x4E0200
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_1 0x4E0204
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_2 0x4E0208
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_3 0x4E020C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_4 0x4E0210
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_5 0x4E0214
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_6 0x4E0218
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_7 0x4E021C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_8 0x4E0220
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_9 0x4E0224
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_10 0x4E0228
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_11 0x4E022C
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_12 0x4E0230
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_13 0x4E0234
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_14 0x4E0238
+
+#define mmDMA_IF_E_N_SOB_MIN_RPRIV_15 0x4E023C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_0 0x4E0240
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_1 0x4E0244
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_2 0x4E0248
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_3 0x4E024C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_4 0x4E0250
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_5 0x4E0254
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_6 0x4E0258
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_7 0x4E025C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_8 0x4E0260
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_9 0x4E0264
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_10 0x4E0268
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_11 0x4E026C
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_12 0x4E0270
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_13 0x4E0274
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_14 0x4E0278
+
+#define mmDMA_IF_E_N_SOB_MAX_RPRIV_15 0x4E027C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_0 0x4E0280
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_1 0x4E0284
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_2 0x4E0288
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_3 0x4E028C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_4 0x4E0290
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_5 0x4E0294
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_6 0x4E0298
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_7 0x4E029C
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_8 0x4E02A0
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_9 0x4E02A4
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_10 0x4E02A8
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_11 0x4E02AC
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_12 0x4E02B0
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_13 0x4E02B4
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_14 0x4E02B8
+
+#define mmDMA_IF_E_N_SOB_MIN_WPRIV_15 0x4E02BC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_0 0x4E02C0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_1 0x4E02C4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_2 0x4E02C8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_3 0x4E02CC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_4 0x4E02D0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_5 0x4E02D4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_6 0x4E02D8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_7 0x4E02DC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_8 0x4E02E0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_9 0x4E02E4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_10 0x4E02E8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_11 0x4E02EC
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_12 0x4E02F0
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_13 0x4E02F4
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_14 0x4E02F8
+
+#define mmDMA_IF_E_N_SOB_MAX_WPRIV_15 0x4E02FC
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_0 0x4E0300
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_1 0x4E0304
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_2 0x4E0308
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_3 0x4E030C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_4 0x4E0310
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_5 0x4E0314
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_6 0x4E0318
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_7 0x4E031C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_8 0x4E0320
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_9 0x4E0324
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_10 0x4E0328
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_11 0x4E032C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_12 0x4E0330
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_13 0x4E0334
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_14 0x4E0338
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPROT_15 0x4E033C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_0 0x4E0340
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_1 0x4E0344
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_2 0x4E0348
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_3 0x4E034C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_4 0x4E0350
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_5 0x4E0354
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_6 0x4E0358
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_7 0x4E035C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_8 0x4E0360
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_9 0x4E0364
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_10 0x4E0368
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_11 0x4E036C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_12 0x4E0370
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_13 0x4E0374
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_14 0x4E0378
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPROT_15 0x4E037C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_0 0x4E0380
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_1 0x4E0384
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_2 0x4E0388
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_3 0x4E038C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_4 0x4E0390
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_5 0x4E0394
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_6 0x4E0398
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_7 0x4E039C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_8 0x4E03A0
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_9 0x4E03A4
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_10 0x4E03A8
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_11 0x4E03AC
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_12 0x4E03B0
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_13 0x4E03B4
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_14 0x4E03B8
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPROT_15 0x4E03BC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_0 0x4E03C0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_1 0x4E03C4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_2 0x4E03C8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_3 0x4E03CC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_4 0x4E03D0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_5 0x4E03D4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_6 0x4E03D8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_7 0x4E03DC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_8 0x4E03E0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_9 0x4E03E4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_10 0x4E03E8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_11 0x4E03EC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_12 0x4E03F0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_13 0x4E03F4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_14 0x4E03F8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPROT_15 0x4E03FC
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_0 0x4E0400
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_1 0x4E0404
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_2 0x4E0408
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_3 0x4E040C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_4 0x4E0410
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_5 0x4E0414
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_6 0x4E0418
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_7 0x4E041C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_8 0x4E0420
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_9 0x4E0424
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_10 0x4E0428
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_11 0x4E042C
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_12 0x4E0430
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_13 0x4E0434
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_14 0x4E0438
+
+#define mmDMA_IF_E_N_DMA0_MIN_RPRIV_15 0x4E043C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_0 0x4E0440
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_1 0x4E0444
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_2 0x4E0448
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_3 0x4E044C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_4 0x4E0450
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_5 0x4E0454
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_6 0x4E0458
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_7 0x4E045C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_8 0x4E0460
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_9 0x4E0464
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_10 0x4E0468
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_11 0x4E046C
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_12 0x4E0470
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_13 0x4E0474
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_14 0x4E0478
+
+#define mmDMA_IF_E_N_DMA0_MAX_RPRIV_15 0x4E047C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_0 0x4E0480
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_1 0x4E0484
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_2 0x4E0488
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_3 0x4E048C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_4 0x4E0490
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_5 0x4E0494
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_6 0x4E0498
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_7 0x4E049C
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_8 0x4E04A0
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_9 0x4E04A4
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_10 0x4E04A8
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_11 0x4E04AC
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_12 0x4E04B0
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_13 0x4E04B4
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_14 0x4E04B8
+
+#define mmDMA_IF_E_N_DMA0_MIN_WPRIV_15 0x4E04BC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_0 0x4E04C0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_1 0x4E04C4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_2 0x4E04C8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_3 0x4E04CC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_4 0x4E04D0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_5 0x4E04D4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_6 0x4E04D8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_7 0x4E04DC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_8 0x4E04E0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_9 0x4E04E4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_10 0x4E04E8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_11 0x4E04EC
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_12 0x4E04F0
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_13 0x4E04F4
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_14 0x4E04F8
+
+#define mmDMA_IF_E_N_DMA0_MAX_WPRIV_15 0x4E04FC
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_0 0x4E0500
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_1 0x4E0504
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_2 0x4E0508
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_3 0x4E050C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_4 0x4E0510
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_5 0x4E0514
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_6 0x4E0518
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_7 0x4E051C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_8 0x4E0520
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_9 0x4E0524
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_10 0x4E0528
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_11 0x4E052C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_12 0x4E0530
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_13 0x4E0534
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_14 0x4E0538
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPROT_15 0x4E053C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_0 0x4E0540
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_1 0x4E0544
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_2 0x4E0548
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_3 0x4E054C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_4 0x4E0550
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_5 0x4E0554
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_6 0x4E0558
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_7 0x4E055C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_8 0x4E0560
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_9 0x4E0564
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_10 0x4E0568
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_11 0x4E056C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_12 0x4E0570
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_13 0x4E0574
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_14 0x4E0578
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPROT_15 0x4E057C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_0 0x4E0580
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_1 0x4E0584
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_2 0x4E0588
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_3 0x4E058C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_4 0x4E0590
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_5 0x4E0594
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_6 0x4E0598
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_7 0x4E059C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_8 0x4E05A0
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_9 0x4E05A4
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_10 0x4E05A8
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_11 0x4E05AC
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_12 0x4E05B0
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_13 0x4E05B4
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_14 0x4E05B8
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPROT_15 0x4E05BC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_0 0x4E05C0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_1 0x4E05C4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_2 0x4E05C8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_3 0x4E05CC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_4 0x4E05D0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_5 0x4E05D4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_6 0x4E05D8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_7 0x4E05DC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_8 0x4E05E0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_9 0x4E05E4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_10 0x4E05E8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_11 0x4E05EC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_12 0x4E05F0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_13 0x4E05F4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_14 0x4E05F8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPROT_15 0x4E05FC
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_0 0x4E0600
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_1 0x4E0604
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_2 0x4E0608
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_3 0x4E060C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_4 0x4E0610
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_5 0x4E0614
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_6 0x4E0618
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_7 0x4E061C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_8 0x4E0620
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_9 0x4E0624
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_10 0x4E0628
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_11 0x4E062C
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_12 0x4E0630
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_13 0x4E0634
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_14 0x4E0638
+
+#define mmDMA_IF_E_N_DMA1_MIN_RPRIV_15 0x4E063C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_0 0x4E0640
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_1 0x4E0644
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_2 0x4E0648
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_3 0x4E064C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_4 0x4E0650
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_5 0x4E0654
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_6 0x4E0658
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_7 0x4E065C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_8 0x4E0660
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_9 0x4E0664
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_10 0x4E0668
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_11 0x4E066C
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_12 0x4E0670
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_13 0x4E0674
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_14 0x4E0678
+
+#define mmDMA_IF_E_N_DMA1_MAX_RPRIV_15 0x4E067C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_0 0x4E0680
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_1 0x4E0684
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_2 0x4E0688
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_3 0x4E068C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_4 0x4E0690
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_5 0x4E0694
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_6 0x4E0698
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_7 0x4E069C
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_8 0x4E06A0
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_9 0x4E06A4
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_10 0x4E06A8
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_11 0x4E06AC
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_12 0x4E06B0
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_13 0x4E06B4
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_14 0x4E06B8
+
+#define mmDMA_IF_E_N_DMA1_MIN_WPRIV_15 0x4E06BC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_0 0x4E06C0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_1 0x4E06C4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_2 0x4E06C8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_3 0x4E06CC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_4 0x4E06D0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_5 0x4E06D4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_6 0x4E06D8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_7 0x4E06DC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_8 0x4E06E0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_9 0x4E06E4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_10 0x4E06E8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_11 0x4E06EC
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_12 0x4E06F0
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_13 0x4E06F4
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_14 0x4E06F8
+
+#define mmDMA_IF_E_N_DMA1_MAX_WPRIV_15 0x4E06FC
+
+#define mmDMA_IF_E_N_SOB_HIT_RPROT 0x4E0700
+
+#define mmDMA_IF_E_N_SOB_HIT_WPROT 0x4E0704
+
+#define mmDMA_IF_E_N_SOB_HIT_RPRIV 0x4E070C
+
+#define mmDMA_IF_E_N_SOB_HIT_WPRIV 0x4E0710
+
+#define mmDMA_IF_E_N_DMA0_HIT_RPROT 0x4E071C
+
+#define mmDMA_IF_E_N_DMA0_HIT_WPROT 0x4E0720
+
+#define mmDMA_IF_E_N_DMA0_HIT_RPRIV 0x4E0724
+
+#define mmDMA_IF_E_N_DMA0_HIT_WPRIV 0x4E0728
+
+#define mmDMA_IF_E_N_DMA1_HIT_RPROT 0x4E0730
+
+#define mmDMA_IF_E_N_DMA1_HIT_WPROT 0x4E0734
+
+#define mmDMA_IF_E_N_DMA1_HIT_RPRIV 0x4E0738
+
+#define mmDMA_IF_E_N_DMA1_HIT_WPRIV 0x4E073C
+
+#define mmDMA_IF_E_N_HBM_BIN 0x4E0800
+
+#define mmDMA_IF_E_N_MME_BIN 0x4E0804
+
+#define mmDMA_IF_E_N_TPC_BIN 0x4E0808
+
+#define mmDMA_IF_E_N_DMA_BIN 0x4E080C
+
+#define mmDMA_IF_E_N_SOB_CG_EN 0x4E0810
+
+#define mmDMA_IF_E_N_HBM_I2C_ADDR_0 0x4E0820
+
+#define mmDMA_IF_E_N_HBM_I2C_ADDR_1 0x4E0824
+
+#define mmDMA_IF_E_N_HBM_I2C_ADDR_2 0x4E0828
+
+#define mmDMA_IF_E_N_HBM_I2C_ADDR_3 0x4E082C
+
+#define mmDMA_IF_E_N_HBM_I2C_ADDR_4 0x4E0830
+
+#define mmDMA_IF_E_N_HBM_MISC 0x4E0834
+
+#endif /* ASIC_REG_DMA_IF_E_N_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h
new file mode 100644
index 000000000000..cd61289a1e8a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_S_DOWN_CH0_REGS_H_
+#define ASIC_REG_DMA_IF_E_S_DOWN_CH0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_S_DOWN_CH0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_S_DOWN_CH0_PERM_SEL 0x4A1108
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_0 0x4A1114
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_1 0x4A1118
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_2 0x4A111C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_3 0x4A1120
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_4 0x4A1124
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_5 0x4A1128
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_6 0x4A112C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_7 0x4A1130
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_8 0x4A1134
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_9 0x4A1138
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_10 0x4A113C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_11 0x4A1140
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_12 0x4A1144
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_13 0x4A1148
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_14 0x4A114C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_15 0x4A1150
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_16 0x4A1154
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_17 0x4A1158
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_18 0x4A115C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_19 0x4A1160
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_20 0x4A1164
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_21 0x4A1168
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_22 0x4A116C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_23 0x4A1170
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_24 0x4A1174
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_25 0x4A1178
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_26 0x4A117C
+
+#define mmDMA_IF_E_S_DOWN_CH0_HBM_POLY_H3_27 0x4A1180
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_0 0x4A1184
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_1 0x4A1188
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_2 0x4A118C
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_3 0x4A1190
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_4 0x4A1194
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_5 0x4A1198
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_6 0x4A119C
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_7 0x4A11A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_8 0x4A11A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_9 0x4A11A8
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_10 0x4A11AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_11 0x4A11B0
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_12 0x4A11B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_13 0x4A11B8
+
+#define mmDMA_IF_E_S_DOWN_CH0_SRAM_POLY_H3_14 0x4A11BC
+
+#define mmDMA_IF_E_S_DOWN_CH0_SCRAM_SRAM_EN 0x4A126C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_EN 0x4A1274
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_SAT 0x4A1278
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_RST 0x4A127C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_HBM_TIMEOUT 0x4A1280
+
+#define mmDMA_IF_E_S_DOWN_CH0_SCRAM_HBM_EN 0x4A1284
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_EN 0x4A1288
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_SAT 0x4A128C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_RST 0x4A1290
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_PCI_TIMEOUT 0x4A1294
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_EN 0x4A129C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_SAT 0x4A12A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_RST 0x4A12A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_TIMEOUT 0x4A12AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RL_SRAM_RED 0x4A12B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_EN 0x4A12EC
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_EN 0x4A12F0
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_WR_SIZE 0x4A12F4
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_WR_SIZE 0x4A12F8
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x4A1404
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x4A1408
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x4A140C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x4A1410
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x4A1414
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x4A1418
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_RD_SIZE 0x4A141C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_RD_SIZE 0x4A1420
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x4A1424
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x4A1428
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x4A142C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x4A1430
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x4A1434
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x4A1438
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_0 0x4A1450
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_1 0x4A1454
+
+#define mmDMA_IF_E_S_DOWN_CH0_NON_LIN_EN 0x4A1480
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_0 0x4A1500
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_1 0x4A1504
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_2 0x4A1508
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_3 0x4A150C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_BANK_4 0x4A1510
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_0 0x4A1514
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_1 0x4A1520
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_2 0x4A1524
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_3 0x4A1528
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_4 0x4A152C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_5 0x4A1530
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_6 0x4A1534
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_7 0x4A1538
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_8 0x4A153C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_SRAM_OFFSET_9 0x4A1540
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_0 0x4A1550
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_1 0x4A1554
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_2 0x4A1558
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_3 0x4A155C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_4 0x4A1560
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_5 0x4A1564
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_6 0x4A1568
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_7 0x4A156C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_8 0x4A1570
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_9 0x4A1574
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_10 0x4A1578
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_11 0x4A157C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_12 0x4A1580
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_13 0x4A1584
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_14 0x4A1588
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_15 0x4A158C
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_16 0x4A1590
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_17 0x4A1594
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_18 0x4A1598
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4A15E4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4A15E8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4A15EC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4A15F0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4A15F4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4A15F8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4A15FC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x4A1600
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x4A1604
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x4A1608
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x4A160C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x4A1610
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x4A1614
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x4A1618
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x4A161C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x4A1620
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x4A1624
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x4A1628
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x4A162C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x4A1630
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x4A1634
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x4A1638
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x4A163C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x4A1640
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x4A1644
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x4A1648
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x4A164C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x4A1650
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x4A1654
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x4A1658
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x4A165C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x4A1660
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x4A1664
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x4A1668
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x4A166C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x4A1670
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x4A1674
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x4A1678
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x4A167C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x4A1680
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x4A1684
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x4A1688
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x4A168C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x4A1690
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x4A1694
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x4A1698
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x4A169C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4A16A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4A16A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4A16A8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4A16AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4A16B0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4A16B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4A16B8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4A16BC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4A16C0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4A16C4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4A16C8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4A16CC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4A16D0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4A16D4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4A16D8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4A16DC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4A16E0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4A16E4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4A16E8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4A16EC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4A16F0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4A16F4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4A16F8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4A16FC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x4A1700
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x4A1704
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x4A1708
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x4A170C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x4A1710
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x4A1714
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x4A1718
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x4A171C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x4A1720
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x4A1724
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x4A1728
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x4A172C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x4A1730
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x4A1734
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x4A1738
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x4A173C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x4A1740
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x4A1744
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x4A1748
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x4A174C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x4A1750
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x4A1754
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x4A1758
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x4A175C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x4A1760
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x4A1764
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x4A1768
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x4A176C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x4A1770
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x4A1774
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x4A1778
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x4A177C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x4A1780
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x4A1784
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x4A1788
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x4A178C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x4A1790
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x4A1794
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x4A1798
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x4A179C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4A17A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4A17A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4A17A8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4A17AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4A17B0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4A17B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4A17B8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4A17BC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4A17C0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4A17C4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4A17C8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4A17CC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4A17D0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4A17D4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4A17D8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4A17DC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4A17E0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x4A1824
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x4A1828
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x4A182C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x4A1830
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x4A1834
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x4A1838
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x4A183C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x4A1840
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x4A1844
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x4A1848
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x4A184C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x4A1850
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x4A1854
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x4A1858
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x4A185C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x4A1860
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x4A1864
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x4A1868
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x4A186C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x4A1870
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x4A1874
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x4A1878
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x4A187C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x4A1880
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x4A1884
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x4A1888
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x4A188C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x4A1890
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x4A1894
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x4A1898
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x4A189C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4A18A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4A18A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4A18A8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4A18AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4A18B0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4A18B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4A18B8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4A18BC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4A18C0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4A18C4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4A18C8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4A18CC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4A18D0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4A18D4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4A18D8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4A18DC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4A18E0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4A18E4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4A18E8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4A18EC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4A18F0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4A18F4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4A18F8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4A18FC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x4A1900
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x4A1904
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x4A1908
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x4A190C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x4A1910
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x4A1914
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x4A1918
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x4A191C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x4A1920
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x4A1924
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x4A1928
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x4A192C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x4A1930
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x4A1934
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x4A1938
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x4A193C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x4A1940
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x4A1944
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x4A1948
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x4A194C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x4A1950
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x4A1954
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x4A1958
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x4A195C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x4A1960
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x4A1964
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x4A1968
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x4A196C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x4A1970
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x4A1974
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x4A1978
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x4A197C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x4A1980
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x4A1984
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x4A1988
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x4A198C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x4A1990
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x4A1994
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x4A1998
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x4A199C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4A19A0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4A19A4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4A19A8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4A19AC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4A19B0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4A19B4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4A19B8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4A19BC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4A19C0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4A19C4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4A19C8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4A19CC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4A19D0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4A19D4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4A19D8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4A19DC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4A19E0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4A19E4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4A19E8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4A19EC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4A19F0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4A19F4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4A19F8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4A19FC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x4A1A00
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x4A1A04
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x4A1A08
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x4A1A0C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x4A1A10
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x4A1A14
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x4A1A18
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x4A1A1C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x4A1A20
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW 0x4A1A64
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR 0x4A1A68
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_HIT_AW 0x4A1A6C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RANGE_PRIV_HIT_AR 0x4A1A70
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_CFG 0x4A1B64
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_SHIFT 0x4A1B68
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_0 0x4A1B6C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_1 0x4A1B70
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_2 0x4A1B74
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_3 0x4A1B78
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_4 0x4A1B7C
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_5 0x4A1B80
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_6 0x4A1B84
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_EXPECTED_LAT_7 0x4A1B88
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_0 0x4A1BAC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_1 0x4A1BB0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_2 0x4A1BB4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_3 0x4A1BB8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_4 0x4A1BBC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_5 0x4A1BC0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_6 0x4A1BC4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_TOKEN_7 0x4A1BC8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_0 0x4A1BEC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_1 0x4A1BF0
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_2 0x4A1BF4
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_3 0x4A1BF8
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_4 0x4A1BFC
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_5 0x4A1C00
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_6 0x4A1C04
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_BANK_ID_7 0x4A1C08
+
+#define mmDMA_IF_E_S_DOWN_CH0_RGL_WDT 0x4A1C2C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x4A1C30
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x4A1C34
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x4A1C38
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x4A1C3C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x4A1C40
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x4A1C44
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x4A1C48
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x4A1C4C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x4A1C50
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x4A1C54
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x4A1C58
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x4A1C5C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x4A1C60
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x4A1C64
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x4A1C68
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x4A1C6C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x4A1C70
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x4A1C74
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x4A1C78
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x4A1C7C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x4A1C80
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x4A1C84
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x4A1C88
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x4A1C8C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x4A1C90
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x4A1C94
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x4A1C98
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x4A1C9C
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x4A1CA0
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x4A1CA4
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x4A1CA8
+
+#define mmDMA_IF_E_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x4A1CAC
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_0 0x4A1CB0
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_1 0x4A1CB4
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_2 0x4A1CB8
+
+#define mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_3 0x4A1CBC
+
+#endif /* ASIC_REG_DMA_IF_E_S_DOWN_CH0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h
new file mode 100644
index 000000000000..3f32370a14c7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_S_DOWN_CH1_REGS_H_
+#define ASIC_REG_DMA_IF_E_S_DOWN_CH1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_S_DOWN_CH1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_S_DOWN_CH1_PERM_SEL 0x4A2108
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_0 0x4A2114
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_1 0x4A2118
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_2 0x4A211C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_3 0x4A2120
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_4 0x4A2124
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_5 0x4A2128
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_6 0x4A212C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_7 0x4A2130
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_8 0x4A2134
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_9 0x4A2138
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_10 0x4A213C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_11 0x4A2140
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_12 0x4A2144
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_13 0x4A2148
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_14 0x4A214C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_15 0x4A2150
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_16 0x4A2154
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_17 0x4A2158
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_18 0x4A215C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_19 0x4A2160
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_20 0x4A2164
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_21 0x4A2168
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_22 0x4A216C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_23 0x4A2170
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_24 0x4A2174
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_25 0x4A2178
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_26 0x4A217C
+
+#define mmDMA_IF_E_S_DOWN_CH1_HBM_POLY_H3_27 0x4A2180
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_0 0x4A2184
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_1 0x4A2188
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_2 0x4A218C
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_3 0x4A2190
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_4 0x4A2194
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_5 0x4A2198
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_6 0x4A219C
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_7 0x4A21A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_8 0x4A21A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_9 0x4A21A8
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_10 0x4A21AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_11 0x4A21B0
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_12 0x4A21B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_13 0x4A21B8
+
+#define mmDMA_IF_E_S_DOWN_CH1_SRAM_POLY_H3_14 0x4A21BC
+
+#define mmDMA_IF_E_S_DOWN_CH1_SCRAM_SRAM_EN 0x4A226C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_EN 0x4A2274
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_SAT 0x4A2278
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_RST 0x4A227C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_HBM_TIMEOUT 0x4A2280
+
+#define mmDMA_IF_E_S_DOWN_CH1_SCRAM_HBM_EN 0x4A2284
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_EN 0x4A2288
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_SAT 0x4A228C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_RST 0x4A2290
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_PCI_TIMEOUT 0x4A2294
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_EN 0x4A229C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_SAT 0x4A22A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_RST 0x4A22A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_TIMEOUT 0x4A22AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RL_SRAM_RED 0x4A22B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_EN 0x4A22EC
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_EN 0x4A22F0
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_WR_SIZE 0x4A22F4
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_WR_SIZE 0x4A22F8
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x4A2404
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x4A2408
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x4A240C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x4A2410
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x4A2414
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x4A2418
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_RD_SIZE 0x4A241C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_RD_SIZE 0x4A2420
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x4A2424
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x4A2428
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x4A242C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x4A2430
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x4A2434
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x4A2438
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_0 0x4A2450
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_1 0x4A2454
+
+#define mmDMA_IF_E_S_DOWN_CH1_NON_LIN_EN 0x4A2480
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_0 0x4A2500
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_1 0x4A2504
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_2 0x4A2508
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_3 0x4A250C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_BANK_4 0x4A2510
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_0 0x4A2514
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_1 0x4A2520
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_2 0x4A2524
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_3 0x4A2528
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_4 0x4A252C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_5 0x4A2530
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_6 0x4A2534
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_7 0x4A2538
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_8 0x4A253C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_SRAM_OFFSET_9 0x4A2540
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_0 0x4A2550
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_1 0x4A2554
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_2 0x4A2558
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_3 0x4A255C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_4 0x4A2560
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_5 0x4A2564
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_6 0x4A2568
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_7 0x4A256C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_8 0x4A2570
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_9 0x4A2574
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_10 0x4A2578
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_11 0x4A257C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_12 0x4A2580
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_13 0x4A2584
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_14 0x4A2588
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_15 0x4A258C
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_16 0x4A2590
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_17 0x4A2594
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_18 0x4A2598
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4A25E4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4A25E8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4A25EC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4A25F0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4A25F4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4A25F8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4A25FC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x4A2600
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x4A2604
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x4A2608
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x4A260C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x4A2610
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x4A2614
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x4A2618
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x4A261C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x4A2620
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x4A2624
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x4A2628
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x4A262C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x4A2630
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x4A2634
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x4A2638
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x4A263C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x4A2640
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x4A2644
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x4A2648
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x4A264C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x4A2650
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x4A2654
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x4A2658
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x4A265C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x4A2660
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x4A2664
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x4A2668
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x4A266C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x4A2670
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x4A2674
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x4A2678
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x4A267C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x4A2680
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x4A2684
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x4A2688
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x4A268C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x4A2690
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x4A2694
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x4A2698
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x4A269C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4A26A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4A26A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4A26A8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4A26AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4A26B0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4A26B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4A26B8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4A26BC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4A26C0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4A26C4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4A26C8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4A26CC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4A26D0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4A26D4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4A26D8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4A26DC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4A26E0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4A26E4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4A26E8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4A26EC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4A26F0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4A26F4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4A26F8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4A26FC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x4A2700
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x4A2704
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x4A2708
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x4A270C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x4A2710
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x4A2714
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x4A2718
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x4A271C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x4A2720
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x4A2724
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x4A2728
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x4A272C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x4A2730
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x4A2734
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x4A2738
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x4A273C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x4A2740
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x4A2744
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x4A2748
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x4A274C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x4A2750
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x4A2754
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x4A2758
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x4A275C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x4A2760
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x4A2764
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x4A2768
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x4A276C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x4A2770
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x4A2774
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x4A2778
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x4A277C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x4A2780
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x4A2784
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x4A2788
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x4A278C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x4A2790
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x4A2794
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x4A2798
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x4A279C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4A27A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4A27A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4A27A8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4A27AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4A27B0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4A27B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4A27B8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4A27BC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4A27C0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4A27C4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4A27C8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4A27CC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4A27D0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4A27D4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4A27D8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4A27DC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4A27E0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x4A2824
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x4A2828
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x4A282C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x4A2830
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x4A2834
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x4A2838
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x4A283C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x4A2840
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x4A2844
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x4A2848
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x4A284C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x4A2850
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x4A2854
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x4A2858
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x4A285C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x4A2860
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x4A2864
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x4A2868
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x4A286C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x4A2870
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x4A2874
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x4A2878
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x4A287C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x4A2880
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x4A2884
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x4A2888
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x4A288C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x4A2890
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x4A2894
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x4A2898
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x4A289C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4A28A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4A28A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4A28A8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4A28AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4A28B0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4A28B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4A28B8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4A28BC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4A28C0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4A28C4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4A28C8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4A28CC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4A28D0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4A28D4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4A28D8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4A28DC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4A28E0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4A28E4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4A28E8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4A28EC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4A28F0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4A28F4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4A28F8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4A28FC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x4A2900
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x4A2904
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x4A2908
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x4A290C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x4A2910
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x4A2914
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x4A2918
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x4A291C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x4A2920
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x4A2924
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x4A2928
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x4A292C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x4A2930
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x4A2934
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x4A2938
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x4A293C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x4A2940
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x4A2944
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x4A2948
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x4A294C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x4A2950
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x4A2954
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x4A2958
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x4A295C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x4A2960
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x4A2964
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x4A2968
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x4A296C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x4A2970
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x4A2974
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x4A2978
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x4A297C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x4A2980
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x4A2984
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x4A2988
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x4A298C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x4A2990
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x4A2994
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x4A2998
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x4A299C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4A29A0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4A29A4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4A29A8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4A29AC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4A29B0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4A29B4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4A29B8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4A29BC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4A29C0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4A29C4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4A29C8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4A29CC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4A29D0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4A29D4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4A29D8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4A29DC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4A29E0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4A29E4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4A29E8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4A29EC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4A29F0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4A29F4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4A29F8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4A29FC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x4A2A00
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x4A2A04
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x4A2A08
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x4A2A0C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x4A2A10
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x4A2A14
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x4A2A18
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x4A2A1C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x4A2A20
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AW 0x4A2A64
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_SEC_HIT_AR 0x4A2A68
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_HIT_AW 0x4A2A6C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RANGE_PRIV_HIT_AR 0x4A2A70
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_CFG 0x4A2B64
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_SHIFT 0x4A2B68
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_0 0x4A2B6C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_1 0x4A2B70
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_2 0x4A2B74
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_3 0x4A2B78
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_4 0x4A2B7C
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_5 0x4A2B80
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_6 0x4A2B84
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_EXPECTED_LAT_7 0x4A2B88
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_0 0x4A2BAC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_1 0x4A2BB0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_2 0x4A2BB4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_3 0x4A2BB8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_4 0x4A2BBC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_5 0x4A2BC0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_6 0x4A2BC4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_TOKEN_7 0x4A2BC8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_0 0x4A2BEC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_1 0x4A2BF0
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_2 0x4A2BF4
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_3 0x4A2BF8
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_4 0x4A2BFC
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_5 0x4A2C00
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_6 0x4A2C04
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_BANK_ID_7 0x4A2C08
+
+#define mmDMA_IF_E_S_DOWN_CH1_RGL_WDT 0x4A2C2C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x4A2C30
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x4A2C34
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x4A2C38
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x4A2C3C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x4A2C40
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x4A2C44
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x4A2C48
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x4A2C4C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x4A2C50
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x4A2C54
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x4A2C58
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x4A2C5C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x4A2C60
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x4A2C64
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x4A2C68
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x4A2C6C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x4A2C70
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x4A2C74
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x4A2C78
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x4A2C7C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x4A2C80
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x4A2C84
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x4A2C88
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x4A2C8C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x4A2C90
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x4A2C94
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x4A2C98
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x4A2C9C
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x4A2CA0
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x4A2CA4
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x4A2CA8
+
+#define mmDMA_IF_E_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x4A2CAC
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_0 0x4A2CB0
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_1 0x4A2CB4
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_2 0x4A2CB8
+
+#define mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_3 0x4A2CBC
+
+#endif /* ASIC_REG_DMA_IF_E_S_DOWN_CH1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h
new file mode 100644
index 000000000000..78c18da7154b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h
@@ -0,0 +1,860 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_E_S_REGS_H_
+#define ASIC_REG_DMA_IF_E_S_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_E_S (Prototype: DMA_IF)
+ *****************************************
+ */
+
+#define mmDMA_IF_E_S_HBM0_WR_CRED_CNT 0x4A0000
+
+#define mmDMA_IF_E_S_HBM1_WR_CRED_CNT 0x4A0004
+
+#define mmDMA_IF_E_S_HBM0_RD_CRED_CNT 0x4A0008
+
+#define mmDMA_IF_E_S_HBM1_RD_CRED_CNT 0x4A000C
+
+#define mmDMA_IF_E_S_HBM_LIMITER_0 0x4A0030
+
+#define mmDMA_IF_E_S_HBM_LIMITER_1 0x4A0034
+
+#define mmDMA_IF_E_S_HBM_LIMITER_2 0x4A0038
+
+#define mmDMA_IF_E_S_HBM_LIMITER_3 0x4A003C
+
+#define mmDMA_IF_E_S_HBM_ALMOST_EN_0 0x4A0040
+
+#define mmDMA_IF_E_S_HBM_ALMOST_EN_1 0x4A0044
+
+#define mmDMA_IF_E_S_HBM_CRED_EN_0 0x4A0050
+
+#define mmDMA_IF_E_S_HBM_CRED_EN_1 0x4A0054
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_0 0x4A0100
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_1 0x4A0104
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_2 0x4A0108
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_3 0x4A010C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_4 0x4A0110
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_5 0x4A0114
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_6 0x4A0118
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_7 0x4A011C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_8 0x4A0120
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_9 0x4A0124
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_10 0x4A0128
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_11 0x4A012C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_12 0x4A0130
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_13 0x4A0134
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_14 0x4A0138
+
+#define mmDMA_IF_E_S_SOB_MIN_RPROT_15 0x4A013C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_0 0x4A0140
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_1 0x4A0144
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_2 0x4A0148
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_3 0x4A014C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_4 0x4A0150
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_5 0x4A0154
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_6 0x4A0158
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_7 0x4A015C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_8 0x4A0160
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_9 0x4A0164
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_10 0x4A0168
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_11 0x4A016C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_12 0x4A0170
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_13 0x4A0174
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_14 0x4A0178
+
+#define mmDMA_IF_E_S_SOB_MAX_RPROT_15 0x4A017C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_0 0x4A0180
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_1 0x4A0184
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_2 0x4A0188
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_3 0x4A018C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_4 0x4A0190
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_5 0x4A0194
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_6 0x4A0198
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_7 0x4A019C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_8 0x4A01A0
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_9 0x4A01A4
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_10 0x4A01A8
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_11 0x4A01AC
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_12 0x4A01B0
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_13 0x4A01B4
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_14 0x4A01B8
+
+#define mmDMA_IF_E_S_SOB_MIN_WPROT_15 0x4A01BC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_0 0x4A01C0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_1 0x4A01C4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_2 0x4A01C8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_3 0x4A01CC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_4 0x4A01D0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_5 0x4A01D4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_6 0x4A01D8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_7 0x4A01DC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_8 0x4A01E0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_9 0x4A01E4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_10 0x4A01E8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_11 0x4A01EC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_12 0x4A01F0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_13 0x4A01F4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_14 0x4A01F8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPROT_15 0x4A01FC
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_0 0x4A0200
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_1 0x4A0204
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_2 0x4A0208
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_3 0x4A020C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_4 0x4A0210
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_5 0x4A0214
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_6 0x4A0218
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_7 0x4A021C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_8 0x4A0220
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_9 0x4A0224
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_10 0x4A0228
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_11 0x4A022C
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_12 0x4A0230
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_13 0x4A0234
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_14 0x4A0238
+
+#define mmDMA_IF_E_S_SOB_MIN_RPRIV_15 0x4A023C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_0 0x4A0240
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_1 0x4A0244
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_2 0x4A0248
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_3 0x4A024C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_4 0x4A0250
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_5 0x4A0254
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_6 0x4A0258
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_7 0x4A025C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_8 0x4A0260
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_9 0x4A0264
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_10 0x4A0268
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_11 0x4A026C
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_12 0x4A0270
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_13 0x4A0274
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_14 0x4A0278
+
+#define mmDMA_IF_E_S_SOB_MAX_RPRIV_15 0x4A027C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_0 0x4A0280
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_1 0x4A0284
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_2 0x4A0288
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_3 0x4A028C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_4 0x4A0290
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_5 0x4A0294
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_6 0x4A0298
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_7 0x4A029C
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_8 0x4A02A0
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_9 0x4A02A4
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_10 0x4A02A8
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_11 0x4A02AC
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_12 0x4A02B0
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_13 0x4A02B4
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_14 0x4A02B8
+
+#define mmDMA_IF_E_S_SOB_MIN_WPRIV_15 0x4A02BC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_0 0x4A02C0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_1 0x4A02C4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_2 0x4A02C8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_3 0x4A02CC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_4 0x4A02D0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_5 0x4A02D4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_6 0x4A02D8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_7 0x4A02DC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_8 0x4A02E0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_9 0x4A02E4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_10 0x4A02E8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_11 0x4A02EC
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_12 0x4A02F0
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_13 0x4A02F4
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_14 0x4A02F8
+
+#define mmDMA_IF_E_S_SOB_MAX_WPRIV_15 0x4A02FC
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_0 0x4A0300
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_1 0x4A0304
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_2 0x4A0308
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_3 0x4A030C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_4 0x4A0310
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_5 0x4A0314
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_6 0x4A0318
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_7 0x4A031C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_8 0x4A0320
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_9 0x4A0324
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_10 0x4A0328
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_11 0x4A032C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_12 0x4A0330
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_13 0x4A0334
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_14 0x4A0338
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPROT_15 0x4A033C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_0 0x4A0340
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_1 0x4A0344
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_2 0x4A0348
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_3 0x4A034C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_4 0x4A0350
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_5 0x4A0354
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_6 0x4A0358
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_7 0x4A035C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_8 0x4A0360
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_9 0x4A0364
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_10 0x4A0368
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_11 0x4A036C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_12 0x4A0370
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_13 0x4A0374
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_14 0x4A0378
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPROT_15 0x4A037C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_0 0x4A0380
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_1 0x4A0384
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_2 0x4A0388
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_3 0x4A038C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_4 0x4A0390
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_5 0x4A0394
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_6 0x4A0398
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_7 0x4A039C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_8 0x4A03A0
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_9 0x4A03A4
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_10 0x4A03A8
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_11 0x4A03AC
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_12 0x4A03B0
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_13 0x4A03B4
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_14 0x4A03B8
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPROT_15 0x4A03BC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_0 0x4A03C0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_1 0x4A03C4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_2 0x4A03C8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_3 0x4A03CC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_4 0x4A03D0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_5 0x4A03D4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_6 0x4A03D8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_7 0x4A03DC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_8 0x4A03E0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_9 0x4A03E4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_10 0x4A03E8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_11 0x4A03EC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_12 0x4A03F0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_13 0x4A03F4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_14 0x4A03F8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPROT_15 0x4A03FC
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_0 0x4A0400
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_1 0x4A0404
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_2 0x4A0408
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_3 0x4A040C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_4 0x4A0410
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_5 0x4A0414
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_6 0x4A0418
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_7 0x4A041C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_8 0x4A0420
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_9 0x4A0424
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_10 0x4A0428
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_11 0x4A042C
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_12 0x4A0430
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_13 0x4A0434
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_14 0x4A0438
+
+#define mmDMA_IF_E_S_DMA0_MIN_RPRIV_15 0x4A043C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_0 0x4A0440
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_1 0x4A0444
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_2 0x4A0448
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_3 0x4A044C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_4 0x4A0450
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_5 0x4A0454
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_6 0x4A0458
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_7 0x4A045C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_8 0x4A0460
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_9 0x4A0464
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_10 0x4A0468
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_11 0x4A046C
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_12 0x4A0470
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_13 0x4A0474
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_14 0x4A0478
+
+#define mmDMA_IF_E_S_DMA0_MAX_RPRIV_15 0x4A047C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_0 0x4A0480
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_1 0x4A0484
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_2 0x4A0488
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_3 0x4A048C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_4 0x4A0490
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_5 0x4A0494
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_6 0x4A0498
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_7 0x4A049C
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_8 0x4A04A0
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_9 0x4A04A4
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_10 0x4A04A8
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_11 0x4A04AC
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_12 0x4A04B0
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_13 0x4A04B4
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_14 0x4A04B8
+
+#define mmDMA_IF_E_S_DMA0_MIN_WPRIV_15 0x4A04BC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_0 0x4A04C0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_1 0x4A04C4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_2 0x4A04C8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_3 0x4A04CC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_4 0x4A04D0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_5 0x4A04D4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_6 0x4A04D8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_7 0x4A04DC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_8 0x4A04E0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_9 0x4A04E4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_10 0x4A04E8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_11 0x4A04EC
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_12 0x4A04F0
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_13 0x4A04F4
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_14 0x4A04F8
+
+#define mmDMA_IF_E_S_DMA0_MAX_WPRIV_15 0x4A04FC
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_0 0x4A0500
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_1 0x4A0504
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_2 0x4A0508
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_3 0x4A050C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_4 0x4A0510
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_5 0x4A0514
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_6 0x4A0518
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_7 0x4A051C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_8 0x4A0520
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_9 0x4A0524
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_10 0x4A0528
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_11 0x4A052C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_12 0x4A0530
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_13 0x4A0534
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_14 0x4A0538
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPROT_15 0x4A053C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_0 0x4A0540
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_1 0x4A0544
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_2 0x4A0548
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_3 0x4A054C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_4 0x4A0550
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_5 0x4A0554
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_6 0x4A0558
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_7 0x4A055C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_8 0x4A0560
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_9 0x4A0564
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_10 0x4A0568
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_11 0x4A056C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_12 0x4A0570
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_13 0x4A0574
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_14 0x4A0578
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPROT_15 0x4A057C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_0 0x4A0580
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_1 0x4A0584
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_2 0x4A0588
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_3 0x4A058C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_4 0x4A0590
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_5 0x4A0594
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_6 0x4A0598
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_7 0x4A059C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_8 0x4A05A0
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_9 0x4A05A4
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_10 0x4A05A8
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_11 0x4A05AC
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_12 0x4A05B0
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_13 0x4A05B4
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_14 0x4A05B8
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPROT_15 0x4A05BC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_0 0x4A05C0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_1 0x4A05C4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_2 0x4A05C8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_3 0x4A05CC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_4 0x4A05D0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_5 0x4A05D4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_6 0x4A05D8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_7 0x4A05DC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_8 0x4A05E0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_9 0x4A05E4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_10 0x4A05E8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_11 0x4A05EC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_12 0x4A05F0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_13 0x4A05F4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_14 0x4A05F8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPROT_15 0x4A05FC
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_0 0x4A0600
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_1 0x4A0604
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_2 0x4A0608
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_3 0x4A060C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_4 0x4A0610
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_5 0x4A0614
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_6 0x4A0618
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_7 0x4A061C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_8 0x4A0620
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_9 0x4A0624
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_10 0x4A0628
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_11 0x4A062C
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_12 0x4A0630
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_13 0x4A0634
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_14 0x4A0638
+
+#define mmDMA_IF_E_S_DMA1_MIN_RPRIV_15 0x4A063C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_0 0x4A0640
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_1 0x4A0644
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_2 0x4A0648
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_3 0x4A064C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_4 0x4A0650
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_5 0x4A0654
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_6 0x4A0658
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_7 0x4A065C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_8 0x4A0660
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_9 0x4A0664
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_10 0x4A0668
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_11 0x4A066C
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_12 0x4A0670
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_13 0x4A0674
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_14 0x4A0678
+
+#define mmDMA_IF_E_S_DMA1_MAX_RPRIV_15 0x4A067C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_0 0x4A0680
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_1 0x4A0684
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_2 0x4A0688
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_3 0x4A068C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_4 0x4A0690
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_5 0x4A0694
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_6 0x4A0698
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_7 0x4A069C
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_8 0x4A06A0
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_9 0x4A06A4
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_10 0x4A06A8
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_11 0x4A06AC
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_12 0x4A06B0
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_13 0x4A06B4
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_14 0x4A06B8
+
+#define mmDMA_IF_E_S_DMA1_MIN_WPRIV_15 0x4A06BC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_0 0x4A06C0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_1 0x4A06C4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_2 0x4A06C8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_3 0x4A06CC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_4 0x4A06D0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_5 0x4A06D4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_6 0x4A06D8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_7 0x4A06DC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_8 0x4A06E0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_9 0x4A06E4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_10 0x4A06E8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_11 0x4A06EC
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_12 0x4A06F0
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_13 0x4A06F4
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_14 0x4A06F8
+
+#define mmDMA_IF_E_S_DMA1_MAX_WPRIV_15 0x4A06FC
+
+#define mmDMA_IF_E_S_SOB_HIT_RPROT 0x4A0700
+
+#define mmDMA_IF_E_S_SOB_HIT_WPROT 0x4A0704
+
+#define mmDMA_IF_E_S_SOB_HIT_RPRIV 0x4A070C
+
+#define mmDMA_IF_E_S_SOB_HIT_WPRIV 0x4A0710
+
+#define mmDMA_IF_E_S_DMA0_HIT_RPROT 0x4A071C
+
+#define mmDMA_IF_E_S_DMA0_HIT_WPROT 0x4A0720
+
+#define mmDMA_IF_E_S_DMA0_HIT_RPRIV 0x4A0724
+
+#define mmDMA_IF_E_S_DMA0_HIT_WPRIV 0x4A0728
+
+#define mmDMA_IF_E_S_DMA1_HIT_RPROT 0x4A0730
+
+#define mmDMA_IF_E_S_DMA1_HIT_WPROT 0x4A0734
+
+#define mmDMA_IF_E_S_DMA1_HIT_RPRIV 0x4A0738
+
+#define mmDMA_IF_E_S_DMA1_HIT_WPRIV 0x4A073C
+
+#define mmDMA_IF_E_S_HBM_BIN 0x4A0800
+
+#define mmDMA_IF_E_S_MME_BIN 0x4A0804
+
+#define mmDMA_IF_E_S_TPC_BIN 0x4A0808
+
+#define mmDMA_IF_E_S_DMA_BIN 0x4A080C
+
+#define mmDMA_IF_E_S_SOB_CG_EN 0x4A0810
+
+#define mmDMA_IF_E_S_HBM_I2C_ADDR_0 0x4A0820
+
+#define mmDMA_IF_E_S_HBM_I2C_ADDR_1 0x4A0824
+
+#define mmDMA_IF_E_S_HBM_I2C_ADDR_2 0x4A0828
+
+#define mmDMA_IF_E_S_HBM_I2C_ADDR_3 0x4A082C
+
+#define mmDMA_IF_E_S_HBM_I2C_ADDR_4 0x4A0830
+
+#define mmDMA_IF_E_S_HBM_MISC 0x4A0834
+
+#endif /* ASIC_REG_DMA_IF_E_S_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h
new file mode 100644
index 000000000000..4ccaf8712948
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_N_DOWN_CH0_REGS_H_
+#define ASIC_REG_DMA_IF_W_N_DOWN_CH0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_N_DOWN_CH0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_N_DOWN_CH0_PERM_SEL 0x4C1108
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_0 0x4C1114
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_1 0x4C1118
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_2 0x4C111C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_3 0x4C1120
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_4 0x4C1124
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_5 0x4C1128
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_6 0x4C112C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_7 0x4C1130
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_8 0x4C1134
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_9 0x4C1138
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_10 0x4C113C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_11 0x4C1140
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_12 0x4C1144
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_13 0x4C1148
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_14 0x4C114C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_15 0x4C1150
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_16 0x4C1154
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_17 0x4C1158
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_18 0x4C115C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_19 0x4C1160
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_20 0x4C1164
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_21 0x4C1168
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_22 0x4C116C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_23 0x4C1170
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_24 0x4C1174
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_25 0x4C1178
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_26 0x4C117C
+
+#define mmDMA_IF_W_N_DOWN_CH0_HBM_POLY_H3_27 0x4C1180
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_0 0x4C1184
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_1 0x4C1188
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_2 0x4C118C
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_3 0x4C1190
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_4 0x4C1194
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_5 0x4C1198
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_6 0x4C119C
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_7 0x4C11A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_8 0x4C11A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_9 0x4C11A8
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_10 0x4C11AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_11 0x4C11B0
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_12 0x4C11B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_13 0x4C11B8
+
+#define mmDMA_IF_W_N_DOWN_CH0_SRAM_POLY_H3_14 0x4C11BC
+
+#define mmDMA_IF_W_N_DOWN_CH0_SCRAM_SRAM_EN 0x4C126C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_EN 0x4C1274
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_SAT 0x4C1278
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_RST 0x4C127C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_HBM_TIMEOUT 0x4C1280
+
+#define mmDMA_IF_W_N_DOWN_CH0_SCRAM_HBM_EN 0x4C1284
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_EN 0x4C1288
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_SAT 0x4C128C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_RST 0x4C1290
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_PCI_TIMEOUT 0x4C1294
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_EN 0x4C129C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_SAT 0x4C12A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_RST 0x4C12A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_TIMEOUT 0x4C12AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RL_SRAM_RED 0x4C12B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_EN 0x4C12EC
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_EN 0x4C12F0
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_WR_SIZE 0x4C12F4
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_WR_SIZE 0x4C12F8
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x4C1404
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x4C1408
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x4C140C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x4C1410
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x4C1414
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x4C1418
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_RD_SIZE 0x4C141C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_RD_SIZE 0x4C1420
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x4C1424
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x4C1428
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x4C142C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x4C1430
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x4C1434
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x4C1438
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_0 0x4C1450
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_1 0x4C1454
+
+#define mmDMA_IF_W_N_DOWN_CH0_NON_LIN_EN 0x4C1480
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_0 0x4C1500
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_1 0x4C1504
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_2 0x4C1508
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_3 0x4C150C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_BANK_4 0x4C1510
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_0 0x4C1514
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_1 0x4C1520
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_2 0x4C1524
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_3 0x4C1528
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_4 0x4C152C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_5 0x4C1530
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_6 0x4C1534
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_7 0x4C1538
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_8 0x4C153C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_SRAM_OFFSET_9 0x4C1540
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_0 0x4C1550
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_1 0x4C1554
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_2 0x4C1558
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_3 0x4C155C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_4 0x4C1560
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_5 0x4C1564
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_6 0x4C1568
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_7 0x4C156C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_8 0x4C1570
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_9 0x4C1574
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_10 0x4C1578
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_11 0x4C157C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_12 0x4C1580
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_13 0x4C1584
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_14 0x4C1588
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_15 0x4C158C
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_16 0x4C1590
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_17 0x4C1594
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_18 0x4C1598
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4C15E4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4C15E8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4C15EC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4C15F0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4C15F4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4C15F8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4C15FC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x4C1600
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x4C1604
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x4C1608
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x4C160C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x4C1610
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x4C1614
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x4C1618
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x4C161C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x4C1620
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x4C1624
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x4C1628
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x4C162C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x4C1630
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x4C1634
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x4C1638
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x4C163C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x4C1640
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x4C1644
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x4C1648
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x4C164C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x4C1650
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x4C1654
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x4C1658
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x4C165C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x4C1660
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x4C1664
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x4C1668
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x4C166C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x4C1670
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x4C1674
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x4C1678
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x4C167C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x4C1680
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x4C1684
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x4C1688
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x4C168C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x4C1690
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x4C1694
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x4C1698
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x4C169C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4C16A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4C16A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4C16A8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4C16AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4C16B0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4C16B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4C16B8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4C16BC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4C16C0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4C16C4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4C16C8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4C16CC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4C16D0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4C16D4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4C16D8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4C16DC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4C16E0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4C16E4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4C16E8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4C16EC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4C16F0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4C16F4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4C16F8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4C16FC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x4C1700
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x4C1704
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x4C1708
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x4C170C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x4C1710
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x4C1714
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x4C1718
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x4C171C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x4C1720
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x4C1724
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x4C1728
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x4C172C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x4C1730
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x4C1734
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x4C1738
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x4C173C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x4C1740
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x4C1744
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x4C1748
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x4C174C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x4C1750
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x4C1754
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x4C1758
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x4C175C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x4C1760
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x4C1764
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x4C1768
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x4C176C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x4C1770
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x4C1774
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x4C1778
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x4C177C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x4C1780
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x4C1784
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x4C1788
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x4C178C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x4C1790
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x4C1794
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x4C1798
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x4C179C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4C17A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4C17A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4C17A8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4C17AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4C17B0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4C17B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4C17B8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4C17BC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4C17C0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4C17C4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4C17C8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4C17CC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4C17D0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4C17D4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4C17D8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4C17DC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4C17E0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x4C1824
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x4C1828
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x4C182C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x4C1830
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x4C1834
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x4C1838
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x4C183C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x4C1840
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x4C1844
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x4C1848
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x4C184C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x4C1850
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x4C1854
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x4C1858
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x4C185C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x4C1860
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x4C1864
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x4C1868
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x4C186C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x4C1870
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x4C1874
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x4C1878
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x4C187C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x4C1880
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x4C1884
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x4C1888
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x4C188C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x4C1890
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x4C1894
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x4C1898
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x4C189C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4C18A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4C18A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4C18A8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4C18AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4C18B0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4C18B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4C18B8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4C18BC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4C18C0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4C18C4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4C18C8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4C18CC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4C18D0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4C18D4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4C18D8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4C18DC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4C18E0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4C18E4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4C18E8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4C18EC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4C18F0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4C18F4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4C18F8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4C18FC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x4C1900
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x4C1904
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x4C1908
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x4C190C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x4C1910
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x4C1914
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x4C1918
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x4C191C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x4C1920
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x4C1924
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x4C1928
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x4C192C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x4C1930
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x4C1934
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x4C1938
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x4C193C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x4C1940
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x4C1944
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x4C1948
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x4C194C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x4C1950
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x4C1954
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x4C1958
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x4C195C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x4C1960
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x4C1964
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x4C1968
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x4C196C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x4C1970
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x4C1974
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x4C1978
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x4C197C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x4C1980
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x4C1984
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x4C1988
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x4C198C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x4C1990
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x4C1994
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x4C1998
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x4C199C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4C19A0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4C19A4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4C19A8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4C19AC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4C19B0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4C19B4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4C19B8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4C19BC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4C19C0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4C19C4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4C19C8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4C19CC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4C19D0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4C19D4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4C19D8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4C19DC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4C19E0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4C19E4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4C19E8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4C19EC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4C19F0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4C19F4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4C19F8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4C19FC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x4C1A00
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x4C1A04
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x4C1A08
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x4C1A0C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x4C1A10
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x4C1A14
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x4C1A18
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x4C1A1C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x4C1A20
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AW 0x4C1A64
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_SEC_HIT_AR 0x4C1A68
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_HIT_AW 0x4C1A6C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RANGE_PRIV_HIT_AR 0x4C1A70
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_CFG 0x4C1B64
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_SHIFT 0x4C1B68
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_0 0x4C1B6C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_1 0x4C1B70
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_2 0x4C1B74
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_3 0x4C1B78
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_4 0x4C1B7C
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_5 0x4C1B80
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_6 0x4C1B84
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_EXPECTED_LAT_7 0x4C1B88
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_0 0x4C1BAC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_1 0x4C1BB0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_2 0x4C1BB4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_3 0x4C1BB8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_4 0x4C1BBC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_5 0x4C1BC0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_6 0x4C1BC4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_TOKEN_7 0x4C1BC8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_0 0x4C1BEC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_1 0x4C1BF0
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_2 0x4C1BF4
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_3 0x4C1BF8
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_4 0x4C1BFC
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_5 0x4C1C00
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_6 0x4C1C04
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_BANK_ID_7 0x4C1C08
+
+#define mmDMA_IF_W_N_DOWN_CH0_RGL_WDT 0x4C1C2C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x4C1C30
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x4C1C34
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x4C1C38
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x4C1C3C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x4C1C40
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x4C1C44
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x4C1C48
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x4C1C4C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x4C1C50
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x4C1C54
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x4C1C58
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x4C1C5C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x4C1C60
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x4C1C64
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x4C1C68
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x4C1C6C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x4C1C70
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x4C1C74
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x4C1C78
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x4C1C7C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x4C1C80
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x4C1C84
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x4C1C88
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x4C1C8C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x4C1C90
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x4C1C94
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x4C1C98
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x4C1C9C
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x4C1CA0
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x4C1CA4
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x4C1CA8
+
+#define mmDMA_IF_W_N_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x4C1CAC
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_0 0x4C1CB0
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_1 0x4C1CB4
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_2 0x4C1CB8
+
+#define mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_3 0x4C1CBC
+
+#endif /* ASIC_REG_DMA_IF_W_N_DOWN_CH0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h
new file mode 100644
index 000000000000..9236f4183084
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_
+#define ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_N_DOWN_CH1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_N_DOWN_CH1_PERM_SEL 0x4C2108
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_0 0x4C2114
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_1 0x4C2118
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_2 0x4C211C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_3 0x4C2120
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_4 0x4C2124
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_5 0x4C2128
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_6 0x4C212C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_7 0x4C2130
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_8 0x4C2134
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_9 0x4C2138
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_10 0x4C213C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_11 0x4C2140
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_12 0x4C2144
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_13 0x4C2148
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_14 0x4C214C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_15 0x4C2150
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_16 0x4C2154
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_17 0x4C2158
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_18 0x4C215C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_19 0x4C2160
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_20 0x4C2164
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_21 0x4C2168
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_22 0x4C216C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_23 0x4C2170
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_24 0x4C2174
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_25 0x4C2178
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_26 0x4C217C
+
+#define mmDMA_IF_W_N_DOWN_CH1_HBM_POLY_H3_27 0x4C2180
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_0 0x4C2184
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_1 0x4C2188
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_2 0x4C218C
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_3 0x4C2190
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_4 0x4C2194
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_5 0x4C2198
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_6 0x4C219C
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_7 0x4C21A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_8 0x4C21A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_9 0x4C21A8
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_10 0x4C21AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_11 0x4C21B0
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_12 0x4C21B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_13 0x4C21B8
+
+#define mmDMA_IF_W_N_DOWN_CH1_SRAM_POLY_H3_14 0x4C21BC
+
+#define mmDMA_IF_W_N_DOWN_CH1_SCRAM_SRAM_EN 0x4C226C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_EN 0x4C2274
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_SAT 0x4C2278
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_RST 0x4C227C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_HBM_TIMEOUT 0x4C2280
+
+#define mmDMA_IF_W_N_DOWN_CH1_SCRAM_HBM_EN 0x4C2284
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_EN 0x4C2288
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_SAT 0x4C228C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_RST 0x4C2290
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_PCI_TIMEOUT 0x4C2294
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_EN 0x4C229C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_SAT 0x4C22A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RST 0x4C22A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_TIMEOUT 0x4C22AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RL_SRAM_RED 0x4C22B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_EN 0x4C22EC
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_EN 0x4C22F0
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_WR_SIZE 0x4C22F4
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_WR_SIZE 0x4C22F8
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x4C2404
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x4C2408
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x4C240C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x4C2410
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x4C2414
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x4C2418
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_RD_SIZE 0x4C241C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_RD_SIZE 0x4C2420
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x4C2424
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x4C2428
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x4C242C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x4C2430
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x4C2434
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x4C2438
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_0 0x4C2450
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_1 0x4C2454
+
+#define mmDMA_IF_W_N_DOWN_CH1_NON_LIN_EN 0x4C2480
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_0 0x4C2500
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_1 0x4C2504
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_2 0x4C2508
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_3 0x4C250C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_BANK_4 0x4C2510
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_0 0x4C2514
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_1 0x4C2520
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_2 0x4C2524
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_3 0x4C2528
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_4 0x4C252C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_5 0x4C2530
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_6 0x4C2534
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_7 0x4C2538
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_8 0x4C253C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_SRAM_OFFSET_9 0x4C2540
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_0 0x4C2550
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_1 0x4C2554
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_2 0x4C2558
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_3 0x4C255C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_4 0x4C2560
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_5 0x4C2564
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_6 0x4C2568
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_7 0x4C256C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_8 0x4C2570
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_9 0x4C2574
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_10 0x4C2578
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_11 0x4C257C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_12 0x4C2580
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_13 0x4C2584
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_14 0x4C2588
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_15 0x4C258C
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_16 0x4C2590
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_17 0x4C2594
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_18 0x4C2598
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4C25E4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4C25E8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4C25EC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4C25F0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4C25F4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4C25F8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4C25FC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x4C2600
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x4C2604
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x4C2608
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x4C260C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x4C2610
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x4C2614
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x4C2618
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x4C261C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x4C2620
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x4C2624
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x4C2628
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x4C262C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x4C2630
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x4C2634
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x4C2638
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x4C263C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x4C2640
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x4C2644
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x4C2648
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x4C264C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x4C2650
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x4C2654
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x4C2658
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x4C265C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x4C2660
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x4C2664
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x4C2668
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x4C266C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x4C2670
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x4C2674
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x4C2678
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x4C267C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x4C2680
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x4C2684
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x4C2688
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x4C268C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x4C2690
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x4C2694
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x4C2698
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x4C269C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4C26A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4C26A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4C26A8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4C26AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4C26B0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4C26B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4C26B8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4C26BC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4C26C0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4C26C4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4C26C8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4C26CC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4C26D0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4C26D4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4C26D8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4C26DC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4C26E0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4C26E4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4C26E8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4C26EC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4C26F0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4C26F4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4C26F8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4C26FC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x4C2700
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x4C2704
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x4C2708
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x4C270C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x4C2710
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x4C2714
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x4C2718
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x4C271C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x4C2720
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x4C2724
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x4C2728
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x4C272C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x4C2730
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x4C2734
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x4C2738
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x4C273C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x4C2740
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x4C2744
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x4C2748
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x4C274C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x4C2750
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x4C2754
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x4C2758
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x4C275C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x4C2760
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x4C2764
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x4C2768
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x4C276C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x4C2770
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x4C2774
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x4C2778
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x4C277C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x4C2780
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x4C2784
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x4C2788
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x4C278C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x4C2790
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x4C2794
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x4C2798
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x4C279C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4C27A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4C27A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4C27A8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4C27AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4C27B0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4C27B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4C27B8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4C27BC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4C27C0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4C27C4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4C27C8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4C27CC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4C27D0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4C27D4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4C27D8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4C27DC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4C27E0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x4C2824
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x4C2828
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x4C282C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x4C2830
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x4C2834
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x4C2838
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x4C283C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x4C2840
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x4C2844
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x4C2848
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x4C284C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x4C2850
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x4C2854
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x4C2858
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x4C285C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x4C2860
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x4C2864
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x4C2868
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x4C286C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x4C2870
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x4C2874
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x4C2878
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x4C287C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x4C2880
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x4C2884
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x4C2888
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x4C288C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x4C2890
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x4C2894
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x4C2898
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x4C289C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4C28A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4C28A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4C28A8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4C28AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4C28B0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4C28B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4C28B8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4C28BC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4C28C0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4C28C4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4C28C8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4C28CC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4C28D0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4C28D4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4C28D8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4C28DC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4C28E0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4C28E4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4C28E8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4C28EC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4C28F0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4C28F4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4C28F8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4C28FC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x4C2900
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x4C2904
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x4C2908
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x4C290C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x4C2910
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x4C2914
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x4C2918
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x4C291C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x4C2920
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x4C2924
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x4C2928
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x4C292C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x4C2930
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x4C2934
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x4C2938
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x4C293C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x4C2940
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x4C2944
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x4C2948
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x4C294C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x4C2950
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x4C2954
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x4C2958
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x4C295C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x4C2960
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x4C2964
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x4C2968
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x4C296C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x4C2970
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x4C2974
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x4C2978
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x4C297C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x4C2980
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x4C2984
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x4C2988
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x4C298C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x4C2990
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x4C2994
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x4C2998
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x4C299C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4C29A0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4C29A4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4C29A8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4C29AC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4C29B0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4C29B4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4C29B8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4C29BC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4C29C0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4C29C4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4C29C8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4C29CC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4C29D0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4C29D4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4C29D8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4C29DC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4C29E0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4C29E4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4C29E8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4C29EC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4C29F0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4C29F4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4C29F8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4C29FC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x4C2A00
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x4C2A04
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x4C2A08
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x4C2A0C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x4C2A10
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x4C2A14
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x4C2A18
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x4C2A1C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x4C2A20
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AW 0x4C2A64
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_SEC_HIT_AR 0x4C2A68
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_HIT_AW 0x4C2A6C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RANGE_PRIV_HIT_AR 0x4C2A70
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_CFG 0x4C2B64
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_SHIFT 0x4C2B68
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_0 0x4C2B6C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_1 0x4C2B70
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_2 0x4C2B74
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_3 0x4C2B78
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_4 0x4C2B7C
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_5 0x4C2B80
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_6 0x4C2B84
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_EXPECTED_LAT_7 0x4C2B88
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_0 0x4C2BAC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_1 0x4C2BB0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_2 0x4C2BB4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_3 0x4C2BB8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_4 0x4C2BBC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_5 0x4C2BC0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_6 0x4C2BC4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_TOKEN_7 0x4C2BC8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_0 0x4C2BEC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_1 0x4C2BF0
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_2 0x4C2BF4
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_3 0x4C2BF8
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_4 0x4C2BFC
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_5 0x4C2C00
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_6 0x4C2C04
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_BANK_ID_7 0x4C2C08
+
+#define mmDMA_IF_W_N_DOWN_CH1_RGL_WDT 0x4C2C2C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x4C2C30
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x4C2C34
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x4C2C38
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x4C2C3C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x4C2C40
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x4C2C44
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x4C2C48
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x4C2C4C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x4C2C50
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x4C2C54
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x4C2C58
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x4C2C5C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x4C2C60
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x4C2C64
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x4C2C68
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x4C2C6C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x4C2C70
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x4C2C74
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x4C2C78
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x4C2C7C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x4C2C80
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x4C2C84
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x4C2C88
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x4C2C8C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x4C2C90
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x4C2C94
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x4C2C98
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x4C2C9C
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x4C2CA0
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x4C2CA4
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x4C2CA8
+
+#define mmDMA_IF_W_N_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x4C2CAC
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_0 0x4C2CB0
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_1 0x4C2CB4
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_2 0x4C2CB8
+
+#define mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_3 0x4C2CBC
+
+#endif /* ASIC_REG_DMA_IF_W_N_DOWN_CH1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h
new file mode 100644
index 000000000000..da60893a5fab
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h
@@ -0,0 +1,860 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_N_REGS_H_
+#define ASIC_REG_DMA_IF_W_N_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_N (Prototype: DMA_IF)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_N_HBM0_WR_CRED_CNT 0x4C0000
+
+#define mmDMA_IF_W_N_HBM1_WR_CRED_CNT 0x4C0004
+
+#define mmDMA_IF_W_N_HBM0_RD_CRED_CNT 0x4C0008
+
+#define mmDMA_IF_W_N_HBM1_RD_CRED_CNT 0x4C000C
+
+#define mmDMA_IF_W_N_HBM_LIMITER_0 0x4C0030
+
+#define mmDMA_IF_W_N_HBM_LIMITER_1 0x4C0034
+
+#define mmDMA_IF_W_N_HBM_LIMITER_2 0x4C0038
+
+#define mmDMA_IF_W_N_HBM_LIMITER_3 0x4C003C
+
+#define mmDMA_IF_W_N_HBM_ALMOST_EN_0 0x4C0040
+
+#define mmDMA_IF_W_N_HBM_ALMOST_EN_1 0x4C0044
+
+#define mmDMA_IF_W_N_HBM_CRED_EN_0 0x4C0050
+
+#define mmDMA_IF_W_N_HBM_CRED_EN_1 0x4C0054
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_0 0x4C0100
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_1 0x4C0104
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_2 0x4C0108
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_3 0x4C010C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_4 0x4C0110
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_5 0x4C0114
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_6 0x4C0118
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_7 0x4C011C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_8 0x4C0120
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_9 0x4C0124
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_10 0x4C0128
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_11 0x4C012C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_12 0x4C0130
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_13 0x4C0134
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_14 0x4C0138
+
+#define mmDMA_IF_W_N_SOB_MIN_RPROT_15 0x4C013C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_0 0x4C0140
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_1 0x4C0144
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_2 0x4C0148
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_3 0x4C014C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_4 0x4C0150
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_5 0x4C0154
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_6 0x4C0158
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_7 0x4C015C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_8 0x4C0160
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_9 0x4C0164
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_10 0x4C0168
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_11 0x4C016C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_12 0x4C0170
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_13 0x4C0174
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_14 0x4C0178
+
+#define mmDMA_IF_W_N_SOB_MAX_RPROT_15 0x4C017C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_0 0x4C0180
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_1 0x4C0184
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_2 0x4C0188
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_3 0x4C018C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_4 0x4C0190
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_5 0x4C0194
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_6 0x4C0198
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_7 0x4C019C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_8 0x4C01A0
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_9 0x4C01A4
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_10 0x4C01A8
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_11 0x4C01AC
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_12 0x4C01B0
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_13 0x4C01B4
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_14 0x4C01B8
+
+#define mmDMA_IF_W_N_SOB_MIN_WPROT_15 0x4C01BC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_0 0x4C01C0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_1 0x4C01C4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_2 0x4C01C8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_3 0x4C01CC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_4 0x4C01D0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_5 0x4C01D4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_6 0x4C01D8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_7 0x4C01DC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_8 0x4C01E0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_9 0x4C01E4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_10 0x4C01E8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_11 0x4C01EC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_12 0x4C01F0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_13 0x4C01F4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_14 0x4C01F8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPROT_15 0x4C01FC
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_0 0x4C0200
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_1 0x4C0204
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_2 0x4C0208
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_3 0x4C020C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_4 0x4C0210
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_5 0x4C0214
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_6 0x4C0218
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_7 0x4C021C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_8 0x4C0220
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_9 0x4C0224
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_10 0x4C0228
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_11 0x4C022C
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_12 0x4C0230
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_13 0x4C0234
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_14 0x4C0238
+
+#define mmDMA_IF_W_N_SOB_MIN_RPRIV_15 0x4C023C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_0 0x4C0240
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_1 0x4C0244
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_2 0x4C0248
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_3 0x4C024C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_4 0x4C0250
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_5 0x4C0254
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_6 0x4C0258
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_7 0x4C025C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_8 0x4C0260
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_9 0x4C0264
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_10 0x4C0268
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_11 0x4C026C
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_12 0x4C0270
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_13 0x4C0274
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_14 0x4C0278
+
+#define mmDMA_IF_W_N_SOB_MAX_RPRIV_15 0x4C027C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_0 0x4C0280
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_1 0x4C0284
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_2 0x4C0288
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_3 0x4C028C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_4 0x4C0290
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_5 0x4C0294
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_6 0x4C0298
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_7 0x4C029C
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_8 0x4C02A0
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_9 0x4C02A4
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_10 0x4C02A8
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_11 0x4C02AC
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_12 0x4C02B0
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_13 0x4C02B4
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_14 0x4C02B8
+
+#define mmDMA_IF_W_N_SOB_MIN_WPRIV_15 0x4C02BC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_0 0x4C02C0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_1 0x4C02C4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_2 0x4C02C8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_3 0x4C02CC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_4 0x4C02D0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_5 0x4C02D4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_6 0x4C02D8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_7 0x4C02DC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_8 0x4C02E0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_9 0x4C02E4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_10 0x4C02E8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_11 0x4C02EC
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_12 0x4C02F0
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_13 0x4C02F4
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_14 0x4C02F8
+
+#define mmDMA_IF_W_N_SOB_MAX_WPRIV_15 0x4C02FC
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_0 0x4C0300
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_1 0x4C0304
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_2 0x4C0308
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_3 0x4C030C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_4 0x4C0310
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_5 0x4C0314
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_6 0x4C0318
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_7 0x4C031C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_8 0x4C0320
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_9 0x4C0324
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_10 0x4C0328
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_11 0x4C032C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_12 0x4C0330
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_13 0x4C0334
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_14 0x4C0338
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPROT_15 0x4C033C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_0 0x4C0340
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_1 0x4C0344
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_2 0x4C0348
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_3 0x4C034C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_4 0x4C0350
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_5 0x4C0354
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_6 0x4C0358
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_7 0x4C035C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_8 0x4C0360
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_9 0x4C0364
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_10 0x4C0368
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_11 0x4C036C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_12 0x4C0370
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_13 0x4C0374
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_14 0x4C0378
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPROT_15 0x4C037C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_0 0x4C0380
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_1 0x4C0384
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_2 0x4C0388
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_3 0x4C038C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_4 0x4C0390
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_5 0x4C0394
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_6 0x4C0398
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_7 0x4C039C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_8 0x4C03A0
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_9 0x4C03A4
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_10 0x4C03A8
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_11 0x4C03AC
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_12 0x4C03B0
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_13 0x4C03B4
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_14 0x4C03B8
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPROT_15 0x4C03BC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_0 0x4C03C0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_1 0x4C03C4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_2 0x4C03C8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_3 0x4C03CC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_4 0x4C03D0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_5 0x4C03D4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_6 0x4C03D8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_7 0x4C03DC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_8 0x4C03E0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_9 0x4C03E4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_10 0x4C03E8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_11 0x4C03EC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_12 0x4C03F0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_13 0x4C03F4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_14 0x4C03F8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPROT_15 0x4C03FC
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_0 0x4C0400
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_1 0x4C0404
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_2 0x4C0408
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_3 0x4C040C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_4 0x4C0410
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_5 0x4C0414
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_6 0x4C0418
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_7 0x4C041C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_8 0x4C0420
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_9 0x4C0424
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_10 0x4C0428
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_11 0x4C042C
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_12 0x4C0430
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_13 0x4C0434
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_14 0x4C0438
+
+#define mmDMA_IF_W_N_DMA0_MIN_RPRIV_15 0x4C043C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_0 0x4C0440
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_1 0x4C0444
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_2 0x4C0448
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_3 0x4C044C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_4 0x4C0450
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_5 0x4C0454
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_6 0x4C0458
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_7 0x4C045C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_8 0x4C0460
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_9 0x4C0464
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_10 0x4C0468
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_11 0x4C046C
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_12 0x4C0470
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_13 0x4C0474
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_14 0x4C0478
+
+#define mmDMA_IF_W_N_DMA0_MAX_RPRIV_15 0x4C047C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_0 0x4C0480
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_1 0x4C0484
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_2 0x4C0488
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_3 0x4C048C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_4 0x4C0490
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_5 0x4C0494
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_6 0x4C0498
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_7 0x4C049C
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_8 0x4C04A0
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_9 0x4C04A4
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_10 0x4C04A8
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_11 0x4C04AC
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_12 0x4C04B0
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_13 0x4C04B4
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_14 0x4C04B8
+
+#define mmDMA_IF_W_N_DMA0_MIN_WPRIV_15 0x4C04BC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_0 0x4C04C0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_1 0x4C04C4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_2 0x4C04C8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_3 0x4C04CC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_4 0x4C04D0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_5 0x4C04D4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_6 0x4C04D8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_7 0x4C04DC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_8 0x4C04E0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_9 0x4C04E4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_10 0x4C04E8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_11 0x4C04EC
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_12 0x4C04F0
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_13 0x4C04F4
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_14 0x4C04F8
+
+#define mmDMA_IF_W_N_DMA0_MAX_WPRIV_15 0x4C04FC
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_0 0x4C0500
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_1 0x4C0504
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_2 0x4C0508
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_3 0x4C050C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_4 0x4C0510
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_5 0x4C0514
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_6 0x4C0518
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_7 0x4C051C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_8 0x4C0520
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_9 0x4C0524
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_10 0x4C0528
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_11 0x4C052C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_12 0x4C0530
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_13 0x4C0534
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_14 0x4C0538
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPROT_15 0x4C053C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_0 0x4C0540
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_1 0x4C0544
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_2 0x4C0548
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_3 0x4C054C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_4 0x4C0550
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_5 0x4C0554
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_6 0x4C0558
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_7 0x4C055C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_8 0x4C0560
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_9 0x4C0564
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_10 0x4C0568
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_11 0x4C056C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_12 0x4C0570
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_13 0x4C0574
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_14 0x4C0578
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPROT_15 0x4C057C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_0 0x4C0580
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_1 0x4C0584
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_2 0x4C0588
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_3 0x4C058C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_4 0x4C0590
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_5 0x4C0594
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_6 0x4C0598
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_7 0x4C059C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_8 0x4C05A0
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_9 0x4C05A4
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_10 0x4C05A8
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_11 0x4C05AC
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_12 0x4C05B0
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_13 0x4C05B4
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_14 0x4C05B8
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPROT_15 0x4C05BC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_0 0x4C05C0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_1 0x4C05C4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_2 0x4C05C8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_3 0x4C05CC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_4 0x4C05D0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_5 0x4C05D4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_6 0x4C05D8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_7 0x4C05DC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_8 0x4C05E0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_9 0x4C05E4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_10 0x4C05E8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_11 0x4C05EC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_12 0x4C05F0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_13 0x4C05F4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_14 0x4C05F8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPROT_15 0x4C05FC
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_0 0x4C0600
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_1 0x4C0604
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_2 0x4C0608
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_3 0x4C060C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_4 0x4C0610
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_5 0x4C0614
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_6 0x4C0618
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_7 0x4C061C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_8 0x4C0620
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_9 0x4C0624
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_10 0x4C0628
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_11 0x4C062C
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_12 0x4C0630
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_13 0x4C0634
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_14 0x4C0638
+
+#define mmDMA_IF_W_N_DMA1_MIN_RPRIV_15 0x4C063C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_0 0x4C0640
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_1 0x4C0644
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_2 0x4C0648
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_3 0x4C064C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_4 0x4C0650
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_5 0x4C0654
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_6 0x4C0658
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_7 0x4C065C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_8 0x4C0660
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_9 0x4C0664
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_10 0x4C0668
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_11 0x4C066C
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_12 0x4C0670
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_13 0x4C0674
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_14 0x4C0678
+
+#define mmDMA_IF_W_N_DMA1_MAX_RPRIV_15 0x4C067C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_0 0x4C0680
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_1 0x4C0684
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_2 0x4C0688
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_3 0x4C068C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_4 0x4C0690
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_5 0x4C0694
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_6 0x4C0698
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_7 0x4C069C
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_8 0x4C06A0
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_9 0x4C06A4
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_10 0x4C06A8
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_11 0x4C06AC
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_12 0x4C06B0
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_13 0x4C06B4
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_14 0x4C06B8
+
+#define mmDMA_IF_W_N_DMA1_MIN_WPRIV_15 0x4C06BC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_0 0x4C06C0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_1 0x4C06C4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_2 0x4C06C8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_3 0x4C06CC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_4 0x4C06D0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_5 0x4C06D4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_6 0x4C06D8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_7 0x4C06DC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_8 0x4C06E0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_9 0x4C06E4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_10 0x4C06E8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_11 0x4C06EC
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_12 0x4C06F0
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_13 0x4C06F4
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_14 0x4C06F8
+
+#define mmDMA_IF_W_N_DMA1_MAX_WPRIV_15 0x4C06FC
+
+#define mmDMA_IF_W_N_SOB_HIT_RPROT 0x4C0700
+
+#define mmDMA_IF_W_N_SOB_HIT_WPROT 0x4C0704
+
+#define mmDMA_IF_W_N_SOB_HIT_RPRIV 0x4C070C
+
+#define mmDMA_IF_W_N_SOB_HIT_WPRIV 0x4C0710
+
+#define mmDMA_IF_W_N_DMA0_HIT_RPROT 0x4C071C
+
+#define mmDMA_IF_W_N_DMA0_HIT_WPROT 0x4C0720
+
+#define mmDMA_IF_W_N_DMA0_HIT_RPRIV 0x4C0724
+
+#define mmDMA_IF_W_N_DMA0_HIT_WPRIV 0x4C0728
+
+#define mmDMA_IF_W_N_DMA1_HIT_RPROT 0x4C0730
+
+#define mmDMA_IF_W_N_DMA1_HIT_WPROT 0x4C0734
+
+#define mmDMA_IF_W_N_DMA1_HIT_RPRIV 0x4C0738
+
+#define mmDMA_IF_W_N_DMA1_HIT_WPRIV 0x4C073C
+
+#define mmDMA_IF_W_N_HBM_BIN 0x4C0800
+
+#define mmDMA_IF_W_N_MME_BIN 0x4C0804
+
+#define mmDMA_IF_W_N_TPC_BIN 0x4C0808
+
+#define mmDMA_IF_W_N_DMA_BIN 0x4C080C
+
+#define mmDMA_IF_W_N_SOB_CG_EN 0x4C0810
+
+#define mmDMA_IF_W_N_HBM_I2C_ADDR_0 0x4C0820
+
+#define mmDMA_IF_W_N_HBM_I2C_ADDR_1 0x4C0824
+
+#define mmDMA_IF_W_N_HBM_I2C_ADDR_2 0x4C0828
+
+#define mmDMA_IF_W_N_HBM_I2C_ADDR_3 0x4C082C
+
+#define mmDMA_IF_W_N_HBM_I2C_ADDR_4 0x4C0830
+
+#define mmDMA_IF_W_N_HBM_MISC 0x4C0834
+
+#endif /* ASIC_REG_DMA_IF_W_N_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h
new file mode 100644
index 000000000000..56ffc920d58a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_S_DOWN_CH0_REGS_H_
+#define ASIC_REG_DMA_IF_W_S_DOWN_CH0_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_S_DOWN_CH0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_S_DOWN_CH0_PERM_SEL 0x481108
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_0 0x481114
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_1 0x481118
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_2 0x48111C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_3 0x481120
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_4 0x481124
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_5 0x481128
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_6 0x48112C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_7 0x481130
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_8 0x481134
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_9 0x481138
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_10 0x48113C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_11 0x481140
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_12 0x481144
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_13 0x481148
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_14 0x48114C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_15 0x481150
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_16 0x481154
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_17 0x481158
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_18 0x48115C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_19 0x481160
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_20 0x481164
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_21 0x481168
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_22 0x48116C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_23 0x481170
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_24 0x481174
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_25 0x481178
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_26 0x48117C
+
+#define mmDMA_IF_W_S_DOWN_CH0_HBM_POLY_H3_27 0x481180
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_0 0x481184
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_1 0x481188
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_2 0x48118C
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_3 0x481190
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_4 0x481194
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_5 0x481198
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_6 0x48119C
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_7 0x4811A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_8 0x4811A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_9 0x4811A8
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_10 0x4811AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_11 0x4811B0
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_12 0x4811B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_13 0x4811B8
+
+#define mmDMA_IF_W_S_DOWN_CH0_SRAM_POLY_H3_14 0x4811BC
+
+#define mmDMA_IF_W_S_DOWN_CH0_SCRAM_SRAM_EN 0x48126C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_EN 0x481274
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_SAT 0x481278
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_RST 0x48127C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_HBM_TIMEOUT 0x481280
+
+#define mmDMA_IF_W_S_DOWN_CH0_SCRAM_HBM_EN 0x481284
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_EN 0x481288
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_SAT 0x48128C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_RST 0x481290
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_PCI_TIMEOUT 0x481294
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_EN 0x48129C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_SAT 0x4812A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_RST 0x4812A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_TIMEOUT 0x4812AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RL_SRAM_RED 0x4812B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_EN 0x4812EC
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_EN 0x4812F0
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_WR_SIZE 0x4812F4
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_WR_SIZE 0x4812F8
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_SET_EN 0x481404
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_SET 0x481408
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_WRAP 0x48140C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_PCI_CTR_CNT 0x481410
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM_CTR_SET_EN 0x481414
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM_CTR_SET 0x481418
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_RD_SIZE 0x48141C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_RD_SIZE 0x481420
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_SET_EN 0x481424
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_SET 0x481428
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_WRAP 0x48142C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_PCI_CTR_CNT 0x481430
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM_CTR_SET_EN 0x481434
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM_CTR_SET 0x481438
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_0 0x481450
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_1 0x481454
+
+#define mmDMA_IF_W_S_DOWN_CH0_NON_LIN_EN 0x481480
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_0 0x481500
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_1 0x481504
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_2 0x481508
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_3 0x48150C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_BANK_4 0x481510
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_0 0x481514
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_1 0x481520
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_2 0x481524
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_3 0x481528
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_4 0x48152C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_5 0x481530
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_6 0x481534
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_7 0x481538
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_8 0x48153C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_SRAM_OFFSET_9 0x481540
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_0 0x481550
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_1 0x481554
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_2 0x481558
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_3 0x48155C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_4 0x481560
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_5 0x481564
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_6 0x481568
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_7 0x48156C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_8 0x481570
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_9 0x481574
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_10 0x481578
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_11 0x48157C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_12 0x481580
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_13 0x481584
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_14 0x481588
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_15 0x48158C
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_16 0x481590
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_17 0x481594
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_18 0x481598
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0 0x4815E4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_1 0x4815E8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_2 0x4815EC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_3 0x4815F0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_4 0x4815F4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_5 0x4815F8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_6 0x4815FC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_7 0x481600
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_8 0x481604
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_9 0x481608
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_10 0x48160C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_11 0x481610
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_12 0x481614
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_13 0x481618
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_14 0x48161C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_15 0x481620
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0 0x481624
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_1 0x481628
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_2 0x48162C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_3 0x481630
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_4 0x481634
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_5 0x481638
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_6 0x48163C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_7 0x481640
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_8 0x481644
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_9 0x481648
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_10 0x48164C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_11 0x481650
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_12 0x481654
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_13 0x481658
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_14 0x48165C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_15 0x481660
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0 0x481664
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_1 0x481668
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_2 0x48166C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_3 0x481670
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_4 0x481674
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_5 0x481678
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_6 0x48167C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_7 0x481680
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_8 0x481684
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_9 0x481688
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_10 0x48168C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_11 0x481690
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_12 0x481694
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_13 0x481698
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_14 0x48169C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_15 0x4816A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0 0x4816A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_1 0x4816A8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_2 0x4816AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_3 0x4816B0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_4 0x4816B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_5 0x4816B8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_6 0x4816BC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_7 0x4816C0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_8 0x4816C4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_9 0x4816C8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_10 0x4816CC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_11 0x4816D0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_12 0x4816D4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_13 0x4816D8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_14 0x4816DC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_15 0x4816E0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_0 0x4816E4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_1 0x4816E8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_2 0x4816EC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_3 0x4816F0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_4 0x4816F4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_5 0x4816F8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_6 0x4816FC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_7 0x481700
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_8 0x481704
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_9 0x481708
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_10 0x48170C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_11 0x481710
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_12 0x481714
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_13 0x481718
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_14 0x48171C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AW_15 0x481720
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_0 0x481724
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_1 0x481728
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_2 0x48172C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_3 0x481730
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_4 0x481734
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_5 0x481738
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_6 0x48173C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_7 0x481740
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_8 0x481744
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_9 0x481748
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_10 0x48174C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_11 0x481750
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_12 0x481754
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_13 0x481758
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_14 0x48175C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AW_15 0x481760
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_0 0x481764
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_1 0x481768
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_2 0x48176C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_3 0x481770
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_4 0x481774
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_5 0x481778
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_6 0x48177C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_7 0x481780
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_8 0x481784
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_9 0x481788
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_10 0x48178C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_11 0x481790
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_12 0x481794
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_13 0x481798
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_14 0x48179C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AW_15 0x4817A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_0 0x4817A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_1 0x4817A8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_2 0x4817AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_3 0x4817B0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_4 0x4817B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_5 0x4817B8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_6 0x4817BC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_7 0x4817C0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_8 0x4817C4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_9 0x4817C8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_10 0x4817CC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_11 0x4817D0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_12 0x4817D4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_13 0x4817D8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_14 0x4817DC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AW_15 0x4817E0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0 0x481824
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_1 0x481828
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_2 0x48182C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_3 0x481830
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_4 0x481834
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_5 0x481838
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_6 0x48183C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_7 0x481840
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_8 0x481844
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_9 0x481848
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_10 0x48184C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_11 0x481850
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_12 0x481854
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_13 0x481858
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_14 0x48185C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_15 0x481860
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0 0x481864
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_1 0x481868
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_2 0x48186C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_3 0x481870
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_4 0x481874
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_5 0x481878
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_6 0x48187C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_7 0x481880
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_8 0x481884
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_9 0x481888
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_10 0x48188C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_11 0x481890
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_12 0x481894
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_13 0x481898
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_14 0x48189C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_15 0x4818A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0 0x4818A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_1 0x4818A8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_2 0x4818AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_3 0x4818B0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_4 0x4818B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_5 0x4818B8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_6 0x4818BC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_7 0x4818C0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_8 0x4818C4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_9 0x4818C8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_10 0x4818CC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_11 0x4818D0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_12 0x4818D4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_13 0x4818D8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_14 0x4818DC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_15 0x4818E0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0 0x4818E4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_1 0x4818E8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_2 0x4818EC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_3 0x4818F0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_4 0x4818F4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_5 0x4818F8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_6 0x4818FC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_7 0x481900
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_8 0x481904
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_9 0x481908
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_10 0x48190C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_11 0x481910
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_12 0x481914
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_13 0x481918
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_14 0x48191C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_15 0x481920
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_0 0x481924
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_1 0x481928
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_2 0x48192C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_3 0x481930
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_4 0x481934
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_5 0x481938
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_6 0x48193C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_7 0x481940
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_8 0x481944
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_9 0x481948
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_10 0x48194C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_11 0x481950
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_12 0x481954
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_13 0x481958
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_14 0x48195C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_LOW_AR_15 0x481960
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_0 0x481964
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_1 0x481968
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_2 0x48196C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_3 0x481970
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_4 0x481974
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_5 0x481978
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_6 0x48197C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_7 0x481980
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_8 0x481984
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_9 0x481988
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_10 0x48198C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_11 0x481990
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_12 0x481994
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_13 0x481998
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_14 0x48199C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_BASE_HIGH_AR_15 0x4819A0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_0 0x4819A4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_1 0x4819A8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_2 0x4819AC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_3 0x4819B0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_4 0x4819B4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_5 0x4819B8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_6 0x4819BC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_7 0x4819C0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_8 0x4819C4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_9 0x4819C8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_10 0x4819CC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_11 0x4819D0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_12 0x4819D4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_13 0x4819D8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_14 0x4819DC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_LOW_AR_15 0x4819E0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_0 0x4819E4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_1 0x4819E8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_2 0x4819EC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_3 0x4819F0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_4 0x4819F4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_5 0x4819F8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_6 0x4819FC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_7 0x481A00
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_8 0x481A04
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_9 0x481A08
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_10 0x481A0C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_11 0x481A10
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_12 0x481A14
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_13 0x481A18
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_14 0x481A1C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_MASK_HIGH_AR_15 0x481A20
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW 0x481A64
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR 0x481A68
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_HIT_AW 0x481A6C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RANGE_PRIV_HIT_AR 0x481A70
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_CFG 0x481B64
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_SHIFT 0x481B68
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_0 0x481B6C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_1 0x481B70
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_2 0x481B74
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_3 0x481B78
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_4 0x481B7C
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_5 0x481B80
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_6 0x481B84
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_EXPECTED_LAT_7 0x481B88
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_0 0x481BAC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_1 0x481BB0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_2 0x481BB4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_3 0x481BB8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_4 0x481BBC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_5 0x481BC0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_6 0x481BC4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_TOKEN_7 0x481BC8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_0 0x481BEC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_1 0x481BF0
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_2 0x481BF4
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_3 0x481BF8
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_4 0x481BFC
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_5 0x481C00
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_6 0x481C04
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_BANK_ID_7 0x481C08
+
+#define mmDMA_IF_W_S_DOWN_CH0_RGL_WDT 0x481C2C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_WRAP 0x481C30
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_WRAP 0x481C34
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_WRAP 0x481C38
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_WRAP 0x481C3C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_WRAP 0x481C40
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_WRAP 0x481C44
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_WRAP 0x481C48
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_WRAP 0x481C4C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH0_CTR_CNT 0x481C50
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM0_CH1_CTR_CNT 0x481C54
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH0_CTR_CNT 0x481C58
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM1_CH1_CTR_CNT 0x481C5C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH0_CTR_CNT 0x481C60
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM2_CH1_CTR_CNT 0x481C64
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH0_CTR_CNT 0x481C68
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AR_HBM3_CH1_CTR_CNT 0x481C6C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_WRAP 0x481C70
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_WRAP 0x481C74
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_WRAP 0x481C78
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_WRAP 0x481C7C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_WRAP 0x481C80
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_WRAP 0x481C84
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_WRAP 0x481C88
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_WRAP 0x481C8C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH0_CTR_CNT 0x481C90
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM0_CH1_CTR_CNT 0x481C94
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH0_CTR_CNT 0x481C98
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM1_CH1_CTR_CNT 0x481C9C
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH0_CTR_CNT 0x481CA0
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM2_CH1_CTR_CNT 0x481CA4
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH0_CTR_CNT 0x481CA8
+
+#define mmDMA_IF_W_S_DOWN_CH0_E2E_AW_HBM3_CH1_CTR_CNT 0x481CAC
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_0 0x481CB0
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_1 0x481CB4
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_2 0x481CB8
+
+#define mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_3 0x481CBC
+
+#endif /* ASIC_REG_DMA_IF_W_S_DOWN_CH0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h
new file mode 100644
index 000000000000..cbc642918deb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_S_DOWN_CH1_REGS_H_
+#define ASIC_REG_DMA_IF_W_S_DOWN_CH1_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_S_DOWN_CH1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_S_DOWN_CH1_PERM_SEL 0x482108
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_0 0x482114
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_1 0x482118
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_2 0x48211C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_3 0x482120
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_4 0x482124
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_5 0x482128
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_6 0x48212C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_7 0x482130
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_8 0x482134
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_9 0x482138
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_10 0x48213C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_11 0x482140
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_12 0x482144
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_13 0x482148
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_14 0x48214C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_15 0x482150
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_16 0x482154
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_17 0x482158
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_18 0x48215C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_19 0x482160
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_20 0x482164
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_21 0x482168
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_22 0x48216C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_23 0x482170
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_24 0x482174
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_25 0x482178
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_26 0x48217C
+
+#define mmDMA_IF_W_S_DOWN_CH1_HBM_POLY_H3_27 0x482180
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_0 0x482184
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_1 0x482188
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_2 0x48218C
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_3 0x482190
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_4 0x482194
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_5 0x482198
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_6 0x48219C
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_7 0x4821A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_8 0x4821A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_9 0x4821A8
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_10 0x4821AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_11 0x4821B0
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_12 0x4821B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_13 0x4821B8
+
+#define mmDMA_IF_W_S_DOWN_CH1_SRAM_POLY_H3_14 0x4821BC
+
+#define mmDMA_IF_W_S_DOWN_CH1_SCRAM_SRAM_EN 0x48226C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_EN 0x482274
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_SAT 0x482278
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_RST 0x48227C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_HBM_TIMEOUT 0x482280
+
+#define mmDMA_IF_W_S_DOWN_CH1_SCRAM_HBM_EN 0x482284
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_EN 0x482288
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_SAT 0x48228C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_RST 0x482290
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_PCI_TIMEOUT 0x482294
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_EN 0x48229C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_SAT 0x4822A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_RST 0x4822A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_TIMEOUT 0x4822AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RL_SRAM_RED 0x4822B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_EN 0x4822EC
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_EN 0x4822F0
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_WR_SIZE 0x4822F4
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_WR_SIZE 0x4822F8
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_SET_EN 0x482404
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_SET 0x482408
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_WRAP 0x48240C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_PCI_CTR_CNT 0x482410
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM_CTR_SET_EN 0x482414
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM_CTR_SET 0x482418
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_RD_SIZE 0x48241C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_RD_SIZE 0x482420
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_SET_EN 0x482424
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_SET 0x482428
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_WRAP 0x48242C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_PCI_CTR_CNT 0x482430
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM_CTR_SET_EN 0x482434
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM_CTR_SET 0x482438
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_0 0x482450
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_1 0x482454
+
+#define mmDMA_IF_W_S_DOWN_CH1_NON_LIN_EN 0x482480
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_0 0x482500
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_1 0x482504
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_2 0x482508
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_3 0x48250C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_BANK_4 0x482510
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_0 0x482514
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_1 0x482520
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_2 0x482524
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_3 0x482528
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_4 0x48252C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_5 0x482530
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_6 0x482534
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_7 0x482538
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_8 0x48253C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_SRAM_OFFSET_9 0x482540
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_0 0x482550
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_1 0x482554
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_2 0x482558
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_3 0x48255C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_4 0x482560
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_5 0x482564
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_6 0x482568
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_7 0x48256C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_8 0x482570
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_9 0x482574
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_10 0x482578
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_11 0x48257C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_12 0x482580
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_13 0x482584
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_14 0x482588
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_15 0x48258C
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_16 0x482590
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_17 0x482594
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_18 0x482598
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0 0x4825E4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_1 0x4825E8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_2 0x4825EC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_3 0x4825F0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_4 0x4825F4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_5 0x4825F8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_6 0x4825FC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_7 0x482600
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_8 0x482604
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_9 0x482608
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_10 0x48260C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_11 0x482610
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_12 0x482614
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_13 0x482618
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_14 0x48261C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_15 0x482620
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0 0x482624
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_1 0x482628
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_2 0x48262C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_3 0x482630
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_4 0x482634
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_5 0x482638
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_6 0x48263C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_7 0x482640
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_8 0x482644
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_9 0x482648
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_10 0x48264C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_11 0x482650
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_12 0x482654
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_13 0x482658
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_14 0x48265C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_15 0x482660
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0 0x482664
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_1 0x482668
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_2 0x48266C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_3 0x482670
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_4 0x482674
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_5 0x482678
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_6 0x48267C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_7 0x482680
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_8 0x482684
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_9 0x482688
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_10 0x48268C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_11 0x482690
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_12 0x482694
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_13 0x482698
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_14 0x48269C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_15 0x4826A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0 0x4826A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_1 0x4826A8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_2 0x4826AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_3 0x4826B0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_4 0x4826B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_5 0x4826B8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_6 0x4826BC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_7 0x4826C0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_8 0x4826C4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_9 0x4826C8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_10 0x4826CC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_11 0x4826D0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_12 0x4826D4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_13 0x4826D8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_14 0x4826DC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_15 0x4826E0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_0 0x4826E4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_1 0x4826E8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_2 0x4826EC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_3 0x4826F0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_4 0x4826F4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_5 0x4826F8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_6 0x4826FC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_7 0x482700
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_8 0x482704
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_9 0x482708
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_10 0x48270C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_11 0x482710
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_12 0x482714
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_13 0x482718
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_14 0x48271C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AW_15 0x482720
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_0 0x482724
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_1 0x482728
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_2 0x48272C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_3 0x482730
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_4 0x482734
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_5 0x482738
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_6 0x48273C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_7 0x482740
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_8 0x482744
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_9 0x482748
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_10 0x48274C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_11 0x482750
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_12 0x482754
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_13 0x482758
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_14 0x48275C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AW_15 0x482760
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_0 0x482764
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_1 0x482768
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_2 0x48276C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_3 0x482770
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_4 0x482774
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_5 0x482778
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_6 0x48277C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_7 0x482780
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_8 0x482784
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_9 0x482788
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_10 0x48278C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_11 0x482790
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_12 0x482794
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_13 0x482798
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_14 0x48279C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AW_15 0x4827A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_0 0x4827A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_1 0x4827A8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_2 0x4827AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_3 0x4827B0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_4 0x4827B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_5 0x4827B8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_6 0x4827BC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_7 0x4827C0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_8 0x4827C4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_9 0x4827C8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_10 0x4827CC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_11 0x4827D0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_12 0x4827D4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_13 0x4827D8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_14 0x4827DC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AW_15 0x4827E0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0 0x482824
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_1 0x482828
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_2 0x48282C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_3 0x482830
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_4 0x482834
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_5 0x482838
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_6 0x48283C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_7 0x482840
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_8 0x482844
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_9 0x482848
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_10 0x48284C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_11 0x482850
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_12 0x482854
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_13 0x482858
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_14 0x48285C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_15 0x482860
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0 0x482864
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_1 0x482868
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_2 0x48286C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_3 0x482870
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_4 0x482874
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_5 0x482878
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_6 0x48287C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_7 0x482880
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_8 0x482884
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_9 0x482888
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_10 0x48288C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_11 0x482890
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_12 0x482894
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_13 0x482898
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_14 0x48289C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_15 0x4828A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0 0x4828A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_1 0x4828A8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_2 0x4828AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_3 0x4828B0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_4 0x4828B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_5 0x4828B8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_6 0x4828BC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_7 0x4828C0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_8 0x4828C4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_9 0x4828C8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_10 0x4828CC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_11 0x4828D0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_12 0x4828D4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_13 0x4828D8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_14 0x4828DC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_15 0x4828E0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0 0x4828E4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_1 0x4828E8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_2 0x4828EC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_3 0x4828F0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_4 0x4828F4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_5 0x4828F8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_6 0x4828FC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_7 0x482900
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_8 0x482904
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_9 0x482908
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_10 0x48290C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_11 0x482910
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_12 0x482914
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_13 0x482918
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_14 0x48291C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_15 0x482920
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_0 0x482924
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_1 0x482928
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_2 0x48292C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_3 0x482930
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_4 0x482934
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_5 0x482938
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_6 0x48293C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_7 0x482940
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_8 0x482944
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_9 0x482948
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_10 0x48294C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_11 0x482950
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_12 0x482954
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_13 0x482958
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_14 0x48295C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_LOW_AR_15 0x482960
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_0 0x482964
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_1 0x482968
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_2 0x48296C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_3 0x482970
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_4 0x482974
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_5 0x482978
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_6 0x48297C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_7 0x482980
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_8 0x482984
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_9 0x482988
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_10 0x48298C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_11 0x482990
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_12 0x482994
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_13 0x482998
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_14 0x48299C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_BASE_HIGH_AR_15 0x4829A0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_0 0x4829A4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_1 0x4829A8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_2 0x4829AC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_3 0x4829B0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_4 0x4829B4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_5 0x4829B8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_6 0x4829BC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_7 0x4829C0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_8 0x4829C4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_9 0x4829C8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_10 0x4829CC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_11 0x4829D0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_12 0x4829D4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_13 0x4829D8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_14 0x4829DC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_LOW_AR_15 0x4829E0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_0 0x4829E4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_1 0x4829E8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_2 0x4829EC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_3 0x4829F0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_4 0x4829F4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_5 0x4829F8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_6 0x4829FC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_7 0x482A00
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_8 0x482A04
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_9 0x482A08
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_10 0x482A0C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_11 0x482A10
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_12 0x482A14
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_13 0x482A18
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_14 0x482A1C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_MASK_HIGH_AR_15 0x482A20
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW 0x482A64
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR 0x482A68
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_HIT_AW 0x482A6C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RANGE_PRIV_HIT_AR 0x482A70
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_CFG 0x482B64
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_SHIFT 0x482B68
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_0 0x482B6C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_1 0x482B70
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_2 0x482B74
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_3 0x482B78
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_4 0x482B7C
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_5 0x482B80
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_6 0x482B84
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_EXPECTED_LAT_7 0x482B88
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_0 0x482BAC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_1 0x482BB0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_2 0x482BB4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_3 0x482BB8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_4 0x482BBC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_5 0x482BC0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_6 0x482BC4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_TOKEN_7 0x482BC8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_0 0x482BEC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_1 0x482BF0
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_2 0x482BF4
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_3 0x482BF8
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_4 0x482BFC
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_5 0x482C00
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_6 0x482C04
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_BANK_ID_7 0x482C08
+
+#define mmDMA_IF_W_S_DOWN_CH1_RGL_WDT 0x482C2C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_WRAP 0x482C30
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_WRAP 0x482C34
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_WRAP 0x482C38
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_WRAP 0x482C3C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_WRAP 0x482C40
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_WRAP 0x482C44
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_WRAP 0x482C48
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_WRAP 0x482C4C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH0_CTR_CNT 0x482C50
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM0_CH1_CTR_CNT 0x482C54
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH0_CTR_CNT 0x482C58
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM1_CH1_CTR_CNT 0x482C5C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH0_CTR_CNT 0x482C60
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM2_CH1_CTR_CNT 0x482C64
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH0_CTR_CNT 0x482C68
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AR_HBM3_CH1_CTR_CNT 0x482C6C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_WRAP 0x482C70
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_WRAP 0x482C74
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_WRAP 0x482C78
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_WRAP 0x482C7C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_WRAP 0x482C80
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_WRAP 0x482C84
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_WRAP 0x482C88
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_WRAP 0x482C8C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH0_CTR_CNT 0x482C90
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM0_CH1_CTR_CNT 0x482C94
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH0_CTR_CNT 0x482C98
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM1_CH1_CTR_CNT 0x482C9C
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH0_CTR_CNT 0x482CA0
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM2_CH1_CTR_CNT 0x482CA4
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH0_CTR_CNT 0x482CA8
+
+#define mmDMA_IF_W_S_DOWN_CH1_E2E_AW_HBM3_CH1_CTR_CNT 0x482CAC
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_0 0x482CB0
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_1 0x482CB4
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_2 0x482CB8
+
+#define mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_3 0x482CBC
+
+#endif /* ASIC_REG_DMA_IF_W_S_DOWN_CH1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h
new file mode 100644
index 000000000000..2382bc41bea6
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h
@@ -0,0 +1,860 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DMA_IF_W_S_REGS_H_
+#define ASIC_REG_DMA_IF_W_S_REGS_H_
+
+/*
+ *****************************************
+ * DMA_IF_W_S (Prototype: DMA_IF)
+ *****************************************
+ */
+
+#define mmDMA_IF_W_S_HBM0_WR_CRED_CNT 0x480000
+
+#define mmDMA_IF_W_S_HBM1_WR_CRED_CNT 0x480004
+
+#define mmDMA_IF_W_S_HBM0_RD_CRED_CNT 0x480008
+
+#define mmDMA_IF_W_S_HBM1_RD_CRED_CNT 0x48000C
+
+#define mmDMA_IF_W_S_HBM_LIMITER_0 0x480030
+
+#define mmDMA_IF_W_S_HBM_LIMITER_1 0x480034
+
+#define mmDMA_IF_W_S_HBM_LIMITER_2 0x480038
+
+#define mmDMA_IF_W_S_HBM_LIMITER_3 0x48003C
+
+#define mmDMA_IF_W_S_HBM_ALMOST_EN_0 0x480040
+
+#define mmDMA_IF_W_S_HBM_ALMOST_EN_1 0x480044
+
+#define mmDMA_IF_W_S_HBM_CRED_EN_0 0x480050
+
+#define mmDMA_IF_W_S_HBM_CRED_EN_1 0x480054
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_0 0x480100
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_1 0x480104
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_2 0x480108
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_3 0x48010C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_4 0x480110
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_5 0x480114
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_6 0x480118
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_7 0x48011C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_8 0x480120
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_9 0x480124
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_10 0x480128
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_11 0x48012C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_12 0x480130
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_13 0x480134
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_14 0x480138
+
+#define mmDMA_IF_W_S_SOB_MIN_RPROT_15 0x48013C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_0 0x480140
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_1 0x480144
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_2 0x480148
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_3 0x48014C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_4 0x480150
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_5 0x480154
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_6 0x480158
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_7 0x48015C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_8 0x480160
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_9 0x480164
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_10 0x480168
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_11 0x48016C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_12 0x480170
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_13 0x480174
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_14 0x480178
+
+#define mmDMA_IF_W_S_SOB_MAX_RPROT_15 0x48017C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_0 0x480180
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_1 0x480184
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_2 0x480188
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_3 0x48018C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_4 0x480190
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_5 0x480194
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_6 0x480198
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_7 0x48019C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_8 0x4801A0
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_9 0x4801A4
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_10 0x4801A8
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_11 0x4801AC
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_12 0x4801B0
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_13 0x4801B4
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_14 0x4801B8
+
+#define mmDMA_IF_W_S_SOB_MIN_WPROT_15 0x4801BC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_0 0x4801C0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_1 0x4801C4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_2 0x4801C8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_3 0x4801CC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_4 0x4801D0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_5 0x4801D4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_6 0x4801D8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_7 0x4801DC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_8 0x4801E0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_9 0x4801E4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_10 0x4801E8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_11 0x4801EC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_12 0x4801F0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_13 0x4801F4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_14 0x4801F8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPROT_15 0x4801FC
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_0 0x480200
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_1 0x480204
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_2 0x480208
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_3 0x48020C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_4 0x480210
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_5 0x480214
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_6 0x480218
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_7 0x48021C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_8 0x480220
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_9 0x480224
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_10 0x480228
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_11 0x48022C
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_12 0x480230
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_13 0x480234
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_14 0x480238
+
+#define mmDMA_IF_W_S_SOB_MIN_RPRIV_15 0x48023C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_0 0x480240
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_1 0x480244
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_2 0x480248
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_3 0x48024C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_4 0x480250
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_5 0x480254
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_6 0x480258
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_7 0x48025C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_8 0x480260
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_9 0x480264
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_10 0x480268
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_11 0x48026C
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_12 0x480270
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_13 0x480274
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_14 0x480278
+
+#define mmDMA_IF_W_S_SOB_MAX_RPRIV_15 0x48027C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_0 0x480280
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_1 0x480284
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_2 0x480288
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_3 0x48028C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_4 0x480290
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_5 0x480294
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_6 0x480298
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_7 0x48029C
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_8 0x4802A0
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_9 0x4802A4
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_10 0x4802A8
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_11 0x4802AC
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_12 0x4802B0
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_13 0x4802B4
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_14 0x4802B8
+
+#define mmDMA_IF_W_S_SOB_MIN_WPRIV_15 0x4802BC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_0 0x4802C0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_1 0x4802C4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_2 0x4802C8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_3 0x4802CC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_4 0x4802D0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_5 0x4802D4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_6 0x4802D8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_7 0x4802DC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_8 0x4802E0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_9 0x4802E4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_10 0x4802E8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_11 0x4802EC
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_12 0x4802F0
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_13 0x4802F4
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_14 0x4802F8
+
+#define mmDMA_IF_W_S_SOB_MAX_WPRIV_15 0x4802FC
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_0 0x480300
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_1 0x480304
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_2 0x480308
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_3 0x48030C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_4 0x480310
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_5 0x480314
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_6 0x480318
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_7 0x48031C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_8 0x480320
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_9 0x480324
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_10 0x480328
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_11 0x48032C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_12 0x480330
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_13 0x480334
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_14 0x480338
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPROT_15 0x48033C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_0 0x480340
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_1 0x480344
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_2 0x480348
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_3 0x48034C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_4 0x480350
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_5 0x480354
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_6 0x480358
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_7 0x48035C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_8 0x480360
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_9 0x480364
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_10 0x480368
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_11 0x48036C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_12 0x480370
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_13 0x480374
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_14 0x480378
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPROT_15 0x48037C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_0 0x480380
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_1 0x480384
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_2 0x480388
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_3 0x48038C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_4 0x480390
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_5 0x480394
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_6 0x480398
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_7 0x48039C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_8 0x4803A0
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_9 0x4803A4
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_10 0x4803A8
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_11 0x4803AC
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_12 0x4803B0
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_13 0x4803B4
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_14 0x4803B8
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPROT_15 0x4803BC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_0 0x4803C0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_1 0x4803C4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_2 0x4803C8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_3 0x4803CC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_4 0x4803D0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_5 0x4803D4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_6 0x4803D8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_7 0x4803DC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_8 0x4803E0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_9 0x4803E4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_10 0x4803E8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_11 0x4803EC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_12 0x4803F0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_13 0x4803F4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_14 0x4803F8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPROT_15 0x4803FC
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_0 0x480400
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_1 0x480404
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_2 0x480408
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_3 0x48040C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_4 0x480410
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_5 0x480414
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_6 0x480418
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_7 0x48041C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_8 0x480420
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_9 0x480424
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_10 0x480428
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_11 0x48042C
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_12 0x480430
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_13 0x480434
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_14 0x480438
+
+#define mmDMA_IF_W_S_DMA0_MIN_RPRIV_15 0x48043C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_0 0x480440
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_1 0x480444
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_2 0x480448
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_3 0x48044C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_4 0x480450
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_5 0x480454
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_6 0x480458
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_7 0x48045C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_8 0x480460
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_9 0x480464
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_10 0x480468
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_11 0x48046C
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_12 0x480470
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_13 0x480474
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_14 0x480478
+
+#define mmDMA_IF_W_S_DMA0_MAX_RPRIV_15 0x48047C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_0 0x480480
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_1 0x480484
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_2 0x480488
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_3 0x48048C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_4 0x480490
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_5 0x480494
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_6 0x480498
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_7 0x48049C
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_8 0x4804A0
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_9 0x4804A4
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_10 0x4804A8
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_11 0x4804AC
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_12 0x4804B0
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_13 0x4804B4
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_14 0x4804B8
+
+#define mmDMA_IF_W_S_DMA0_MIN_WPRIV_15 0x4804BC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_0 0x4804C0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_1 0x4804C4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_2 0x4804C8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_3 0x4804CC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_4 0x4804D0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_5 0x4804D4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_6 0x4804D8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_7 0x4804DC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_8 0x4804E0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_9 0x4804E4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_10 0x4804E8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_11 0x4804EC
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_12 0x4804F0
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_13 0x4804F4
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_14 0x4804F8
+
+#define mmDMA_IF_W_S_DMA0_MAX_WPRIV_15 0x4804FC
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_0 0x480500
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_1 0x480504
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_2 0x480508
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_3 0x48050C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_4 0x480510
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_5 0x480514
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_6 0x480518
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_7 0x48051C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_8 0x480520
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_9 0x480524
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_10 0x480528
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_11 0x48052C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_12 0x480530
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_13 0x480534
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_14 0x480538
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPROT_15 0x48053C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_0 0x480540
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_1 0x480544
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_2 0x480548
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_3 0x48054C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_4 0x480550
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_5 0x480554
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_6 0x480558
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_7 0x48055C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_8 0x480560
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_9 0x480564
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_10 0x480568
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_11 0x48056C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_12 0x480570
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_13 0x480574
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_14 0x480578
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPROT_15 0x48057C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_0 0x480580
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_1 0x480584
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_2 0x480588
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_3 0x48058C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_4 0x480590
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_5 0x480594
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_6 0x480598
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_7 0x48059C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_8 0x4805A0
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_9 0x4805A4
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_10 0x4805A8
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_11 0x4805AC
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_12 0x4805B0
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_13 0x4805B4
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_14 0x4805B8
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPROT_15 0x4805BC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_0 0x4805C0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_1 0x4805C4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_2 0x4805C8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_3 0x4805CC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_4 0x4805D0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_5 0x4805D4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_6 0x4805D8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_7 0x4805DC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_8 0x4805E0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_9 0x4805E4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_10 0x4805E8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_11 0x4805EC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_12 0x4805F0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_13 0x4805F4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_14 0x4805F8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPROT_15 0x4805FC
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_0 0x480600
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_1 0x480604
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_2 0x480608
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_3 0x48060C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_4 0x480610
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_5 0x480614
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_6 0x480618
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_7 0x48061C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_8 0x480620
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_9 0x480624
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_10 0x480628
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_11 0x48062C
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_12 0x480630
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_13 0x480634
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_14 0x480638
+
+#define mmDMA_IF_W_S_DMA1_MIN_RPRIV_15 0x48063C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_0 0x480640
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_1 0x480644
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_2 0x480648
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_3 0x48064C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_4 0x480650
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_5 0x480654
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_6 0x480658
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_7 0x48065C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_8 0x480660
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_9 0x480664
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_10 0x480668
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_11 0x48066C
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_12 0x480670
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_13 0x480674
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_14 0x480678
+
+#define mmDMA_IF_W_S_DMA1_MAX_RPRIV_15 0x48067C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_0 0x480680
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_1 0x480684
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_2 0x480688
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_3 0x48068C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_4 0x480690
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_5 0x480694
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_6 0x480698
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_7 0x48069C
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_8 0x4806A0
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_9 0x4806A4
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_10 0x4806A8
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_11 0x4806AC
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_12 0x4806B0
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_13 0x4806B4
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_14 0x4806B8
+
+#define mmDMA_IF_W_S_DMA1_MIN_WPRIV_15 0x4806BC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_0 0x4806C0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_1 0x4806C4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_2 0x4806C8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_3 0x4806CC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_4 0x4806D0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_5 0x4806D4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_6 0x4806D8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_7 0x4806DC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_8 0x4806E0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_9 0x4806E4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_10 0x4806E8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_11 0x4806EC
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_12 0x4806F0
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_13 0x4806F4
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_14 0x4806F8
+
+#define mmDMA_IF_W_S_DMA1_MAX_WPRIV_15 0x4806FC
+
+#define mmDMA_IF_W_S_SOB_HIT_RPROT 0x480700
+
+#define mmDMA_IF_W_S_SOB_HIT_WPROT 0x480704
+
+#define mmDMA_IF_W_S_SOB_HIT_RPRIV 0x48070C
+
+#define mmDMA_IF_W_S_SOB_HIT_WPRIV 0x480710
+
+#define mmDMA_IF_W_S_DMA0_HIT_RPROT 0x48071C
+
+#define mmDMA_IF_W_S_DMA0_HIT_WPROT 0x480720
+
+#define mmDMA_IF_W_S_DMA0_HIT_RPRIV 0x480724
+
+#define mmDMA_IF_W_S_DMA0_HIT_WPRIV 0x480728
+
+#define mmDMA_IF_W_S_DMA1_HIT_RPROT 0x480730
+
+#define mmDMA_IF_W_S_DMA1_HIT_WPROT 0x480734
+
+#define mmDMA_IF_W_S_DMA1_HIT_RPRIV 0x480738
+
+#define mmDMA_IF_W_S_DMA1_HIT_WPRIV 0x48073C
+
+#define mmDMA_IF_W_S_HBM_BIN 0x480800
+
+#define mmDMA_IF_W_S_MME_BIN 0x480804
+
+#define mmDMA_IF_W_S_TPC_BIN 0x480808
+
+#define mmDMA_IF_W_S_DMA_BIN 0x48080C
+
+#define mmDMA_IF_W_S_SOB_CG_EN 0x480810
+
+#define mmDMA_IF_W_S_HBM_I2C_ADDR_0 0x480820
+
+#define mmDMA_IF_W_S_HBM_I2C_ADDR_1 0x480824
+
+#define mmDMA_IF_W_S_HBM_I2C_ADDR_2 0x480828
+
+#define mmDMA_IF_W_S_HBM_I2C_ADDR_3 0x48082C
+
+#define mmDMA_IF_W_S_HBM_I2C_ADDR_4 0x480830
+
+#define mmDMA_IF_W_S_HBM_MISC 0x480834
+
+#endif /* ASIC_REG_DMA_IF_W_S_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h
new file mode 100644
index 000000000000..c7596aac7a5c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h
@@ -0,0 +1,4974 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef GAUDI_BLOCKS_H_
+#define GAUDI_BLOCKS_H_
+
+#define mmNIC0_PHY0_BASE 0x0ull
+#define NIC0_PHY0_MAX_OFFSET 0x9F13
+#define mmMME0_ACC_BASE 0x7FFC020000ull
+#define MME0_ACC_MAX_OFFSET 0x5C00
+#define MME0_ACC_SECTION 0x20000
+#define mmMME0_SBAB_BASE 0x7FFC040000ull
+#define MME0_SBAB_MAX_OFFSET 0x5800
+#define MME0_SBAB_SECTION 0x1000
+#define mmMME0_PRTN_BASE 0x7FFC041000ull
+#define MME0_PRTN_MAX_OFFSET 0x5000
+#define MME0_PRTN_SECTION 0x1F000
+#define mmMME0_CTRL_BASE 0x7FFC060000ull
+#define MME0_CTRL_MAX_OFFSET 0xDA80
+#define MME0_CTRL_SECTION 0x8000
+#define mmARCH_MME0_CTRL_BASE 0x7FFC060008ull
+#define ARCH_MME0_CTRL_MAX_OFFSET 0x3400
+#define ARCH_MME0_CTRL_SECTION 0x3400
+#define mmARCH_TENSOR_S_MME0_CTRL_BASE 0x7FFC06003Cull
+#define ARCH_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_S_MME0_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_S_MME0_CTRL_BASE 0x7FFC060088ull
+#define ARCH_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_S_MME0_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_L_MME0_CTRL_BASE 0x7FFC0600ACull
+#define ARCH_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_L_MME0_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC0600F8ull
+#define ARCH_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmARCH_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC06011Cull
+#define ARCH_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_O_MME0_CTRL_BASE 0x7FFC060140ull
+#define ARCH_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_O_MME0_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC06018Cull
+#define ARCH_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmARCH_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC0601B0ull
+#define ARCH_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmARCH_DESC_MME0_CTRL_BASE 0x7FFC0601D4ull
+#define ARCH_DESC_MME0_CTRL_MAX_OFFSET 0x5400
+#define ARCH_DESC_MME0_CTRL_SECTION 0x2340
+#define mmSHADOW_0_MME0_CTRL_BASE 0x7FFC060408ull
+#define SHADOW_0_MME0_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_0_MME0_CTRL_SECTION 0x3400
+#define mmSHADOW_0_TENSOR_S_MME0_CTRL_BASE 0x7FFC06043Cull
+#define SHADOW_0_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_S_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_S_MME0_CTRL_BASE 0x7FFC060488ull
+#define SHADOW_0_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_S_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_L_MME0_CTRL_BASE 0x7FFC0604ACull
+#define SHADOW_0_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_L_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC0604F8ull
+#define SHADOW_0_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC06051Cull
+#define SHADOW_0_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_O_MME0_CTRL_BASE 0x7FFC060540ull
+#define SHADOW_0_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_O_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC06058Cull
+#define SHADOW_0_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC0605B0ull
+#define SHADOW_0_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_0_DESC_MME0_CTRL_BASE 0x7FFC0605D4ull
+#define SHADOW_0_DESC_MME0_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_0_DESC_MME0_CTRL_SECTION 0xB400
+#define mmSHADOW_1_MME0_CTRL_BASE 0x7FFC060688ull
+#define SHADOW_1_MME0_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_1_MME0_CTRL_SECTION 0x3400
+#define mmSHADOW_1_TENSOR_S_MME0_CTRL_BASE 0x7FFC0606BCull
+#define SHADOW_1_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_S_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_S_MME0_CTRL_BASE 0x7FFC060708ull
+#define SHADOW_1_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_S_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_L_MME0_CTRL_BASE 0x7FFC06072Cull
+#define SHADOW_1_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_L_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC060778ull
+#define SHADOW_1_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC06079Cull
+#define SHADOW_1_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_O_MME0_CTRL_BASE 0x7FFC0607C0ull
+#define SHADOW_1_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_O_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC06080Cull
+#define SHADOW_1_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC060830ull
+#define SHADOW_1_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_1_DESC_MME0_CTRL_BASE 0x7FFC060854ull
+#define SHADOW_1_DESC_MME0_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_1_DESC_MME0_CTRL_SECTION 0xB400
+#define mmSHADOW_2_MME0_CTRL_BASE 0x7FFC060908ull
+#define SHADOW_2_MME0_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_2_MME0_CTRL_SECTION 0x3400
+#define mmSHADOW_2_TENSOR_S_MME0_CTRL_BASE 0x7FFC06093Cull
+#define SHADOW_2_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_S_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_S_MME0_CTRL_BASE 0x7FFC060988ull
+#define SHADOW_2_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_S_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_L_MME0_CTRL_BASE 0x7FFC0609ACull
+#define SHADOW_2_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_L_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC0609F8ull
+#define SHADOW_2_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC060A1Cull
+#define SHADOW_2_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_O_MME0_CTRL_BASE 0x7FFC060A40ull
+#define SHADOW_2_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_O_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC060A8Cull
+#define SHADOW_2_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC060AB0ull
+#define SHADOW_2_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_2_DESC_MME0_CTRL_BASE 0x7FFC060AD4ull
+#define SHADOW_2_DESC_MME0_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_2_DESC_MME0_CTRL_SECTION 0xB400
+#define mmSHADOW_3_MME0_CTRL_BASE 0x7FFC060B88ull
+#define SHADOW_3_MME0_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_3_MME0_CTRL_SECTION 0x3400
+#define mmSHADOW_3_TENSOR_S_MME0_CTRL_BASE 0x7FFC060BBCull
+#define SHADOW_3_TENSOR_S_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_S_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_S_MME0_CTRL_BASE 0x7FFC060C08ull
+#define SHADOW_3_AGU_S_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_S_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_L_MME0_CTRL_BASE 0x7FFC060C2Cull
+#define SHADOW_3_TENSOR_L_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_L_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_L_LOCAL_MME0_CTRL_BASE 0x7FFC060C78ull
+#define SHADOW_3_AGU_L_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_L_REMOTE_MME0_CTRL_BASE 0x7FFC060C9Cull
+#define SHADOW_3_AGU_L_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_O_MME0_CTRL_BASE 0x7FFC060CC0ull
+#define SHADOW_3_TENSOR_O_MME0_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_O_MME0_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_O_LOCAL_MME0_CTRL_BASE 0x7FFC060D0Cull
+#define SHADOW_3_AGU_O_LOCAL_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_LOCAL_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_O_REMOTE_MME0_CTRL_BASE 0x7FFC060D30ull
+#define SHADOW_3_AGU_O_REMOTE_MME0_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_REMOTE_MME0_CTRL_SECTION 0x2400
+#define mmSHADOW_3_DESC_MME0_CTRL_BASE 0x7FFC060D54ull
+#define SHADOW_3_DESC_MME0_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_3_DESC_MME0_CTRL_SECTION 0x72AC
+#define mmMME0_QM_BASE 0x7FFC068000ull
+#define MME0_QM_MAX_OFFSET 0xD040
+#define MME0_QM_SECTION 0x38000
+#define mmMME1_ACC_BASE 0x7FFC0A0000ull
+#define MME1_ACC_MAX_OFFSET 0x5C00
+#define MME1_ACC_SECTION 0x20000
+#define mmMME1_SBAB_BASE 0x7FFC0C0000ull
+#define MME1_SBAB_MAX_OFFSET 0x5800
+#define MME1_SBAB_SECTION 0x1000
+#define mmMME1_PRTN_BASE 0x7FFC0C1000ull
+#define MME1_PRTN_MAX_OFFSET 0x5000
+#define MME1_PRTN_SECTION 0x1F000
+#define mmMME1_CTRL_BASE 0x7FFC0E0000ull
+#define MME1_CTRL_MAX_OFFSET 0xDA80
+#define MME1_CTRL_SECTION 0x8000
+#define mmARCH_MME1_CTRL_BASE 0x7FFC0E0008ull
+#define ARCH_MME1_CTRL_MAX_OFFSET 0x3400
+#define ARCH_MME1_CTRL_SECTION 0x3400
+#define mmARCH_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E003Cull
+#define ARCH_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_S_MME1_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_S_MME1_CTRL_BASE 0x7FFC0E0088ull
+#define ARCH_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_S_MME1_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E00ACull
+#define ARCH_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_L_MME1_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E00F8ull
+#define ARCH_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmARCH_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E011Cull
+#define ARCH_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0140ull
+#define ARCH_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_O_MME1_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E018Cull
+#define ARCH_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmARCH_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E01B0ull
+#define ARCH_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmARCH_DESC_MME1_CTRL_BASE 0x7FFC0E01D4ull
+#define ARCH_DESC_MME1_CTRL_MAX_OFFSET 0x5400
+#define ARCH_DESC_MME1_CTRL_SECTION 0x2340
+#define mmSHADOW_0_MME1_CTRL_BASE 0x7FFC0E0408ull
+#define SHADOW_0_MME1_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_0_MME1_CTRL_SECTION 0x3400
+#define mmSHADOW_0_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E043Cull
+#define SHADOW_0_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_S_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_S_MME1_CTRL_BASE 0x7FFC0E0488ull
+#define SHADOW_0_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_S_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E04ACull
+#define SHADOW_0_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_L_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E04F8ull
+#define SHADOW_0_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E051Cull
+#define SHADOW_0_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0540ull
+#define SHADOW_0_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_O_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E058Cull
+#define SHADOW_0_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E05B0ull
+#define SHADOW_0_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_0_DESC_MME1_CTRL_BASE 0x7FFC0E05D4ull
+#define SHADOW_0_DESC_MME1_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_0_DESC_MME1_CTRL_SECTION 0xB400
+#define mmSHADOW_1_MME1_CTRL_BASE 0x7FFC0E0688ull
+#define SHADOW_1_MME1_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_1_MME1_CTRL_SECTION 0x3400
+#define mmSHADOW_1_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E06BCull
+#define SHADOW_1_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_S_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_S_MME1_CTRL_BASE 0x7FFC0E0708ull
+#define SHADOW_1_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_S_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E072Cull
+#define SHADOW_1_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_L_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E0778ull
+#define SHADOW_1_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E079Cull
+#define SHADOW_1_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E07C0ull
+#define SHADOW_1_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_O_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E080Cull
+#define SHADOW_1_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E0830ull
+#define SHADOW_1_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_1_DESC_MME1_CTRL_BASE 0x7FFC0E0854ull
+#define SHADOW_1_DESC_MME1_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_1_DESC_MME1_CTRL_SECTION 0xB400
+#define mmSHADOW_2_MME1_CTRL_BASE 0x7FFC0E0908ull
+#define SHADOW_2_MME1_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_2_MME1_CTRL_SECTION 0x3400
+#define mmSHADOW_2_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E093Cull
+#define SHADOW_2_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_S_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_S_MME1_CTRL_BASE 0x7FFC0E0988ull
+#define SHADOW_2_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_S_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E09ACull
+#define SHADOW_2_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_L_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E09F8ull
+#define SHADOW_2_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E0A1Cull
+#define SHADOW_2_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0A40ull
+#define SHADOW_2_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_O_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E0A8Cull
+#define SHADOW_2_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E0AB0ull
+#define SHADOW_2_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_2_DESC_MME1_CTRL_BASE 0x7FFC0E0AD4ull
+#define SHADOW_2_DESC_MME1_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_2_DESC_MME1_CTRL_SECTION 0xB400
+#define mmSHADOW_3_MME1_CTRL_BASE 0x7FFC0E0B88ull
+#define SHADOW_3_MME1_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_3_MME1_CTRL_SECTION 0x3400
+#define mmSHADOW_3_TENSOR_S_MME1_CTRL_BASE 0x7FFC0E0BBCull
+#define SHADOW_3_TENSOR_S_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_S_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_S_MME1_CTRL_BASE 0x7FFC0E0C08ull
+#define SHADOW_3_AGU_S_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_S_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_L_MME1_CTRL_BASE 0x7FFC0E0C2Cull
+#define SHADOW_3_TENSOR_L_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_L_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_L_LOCAL_MME1_CTRL_BASE 0x7FFC0E0C78ull
+#define SHADOW_3_AGU_L_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_L_REMOTE_MME1_CTRL_BASE 0x7FFC0E0C9Cull
+#define SHADOW_3_AGU_L_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_O_MME1_CTRL_BASE 0x7FFC0E0CC0ull
+#define SHADOW_3_TENSOR_O_MME1_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_O_MME1_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_O_LOCAL_MME1_CTRL_BASE 0x7FFC0E0D0Cull
+#define SHADOW_3_AGU_O_LOCAL_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_LOCAL_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_O_REMOTE_MME1_CTRL_BASE 0x7FFC0E0D30ull
+#define SHADOW_3_AGU_O_REMOTE_MME1_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_REMOTE_MME1_CTRL_SECTION 0x2400
+#define mmSHADOW_3_DESC_MME1_CTRL_BASE 0x7FFC0E0D54ull
+#define SHADOW_3_DESC_MME1_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_3_DESC_MME1_CTRL_SECTION 0x72AC
+#define mmMME1_QM_BASE 0x7FFC0E8000ull
+#define MME1_QM_MAX_OFFSET 0xD040
+#define MME1_QM_SECTION 0x38000
+#define mmMME2_ACC_BASE 0x7FFC120000ull
+#define MME2_ACC_MAX_OFFSET 0x5C00
+#define MME2_ACC_SECTION 0x20000
+#define mmMME2_SBAB_BASE 0x7FFC140000ull
+#define MME2_SBAB_MAX_OFFSET 0x5800
+#define MME2_SBAB_SECTION 0x1000
+#define mmMME2_PRTN_BASE 0x7FFC141000ull
+#define MME2_PRTN_MAX_OFFSET 0x5000
+#define MME2_PRTN_SECTION 0x1F000
+#define mmMME2_CTRL_BASE 0x7FFC160000ull
+#define MME2_CTRL_MAX_OFFSET 0xDA80
+#define MME2_CTRL_SECTION 0x8000
+#define mmARCH_MME2_CTRL_BASE 0x7FFC160008ull
+#define ARCH_MME2_CTRL_MAX_OFFSET 0x3400
+#define ARCH_MME2_CTRL_SECTION 0x3400
+#define mmARCH_TENSOR_S_MME2_CTRL_BASE 0x7FFC16003Cull
+#define ARCH_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_S_MME2_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_S_MME2_CTRL_BASE 0x7FFC160088ull
+#define ARCH_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_S_MME2_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_L_MME2_CTRL_BASE 0x7FFC1600ACull
+#define ARCH_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_L_MME2_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC1600F8ull
+#define ARCH_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmARCH_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC16011Cull
+#define ARCH_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_O_MME2_CTRL_BASE 0x7FFC160140ull
+#define ARCH_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_O_MME2_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC16018Cull
+#define ARCH_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmARCH_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC1601B0ull
+#define ARCH_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmARCH_DESC_MME2_CTRL_BASE 0x7FFC1601D4ull
+#define ARCH_DESC_MME2_CTRL_MAX_OFFSET 0x5400
+#define ARCH_DESC_MME2_CTRL_SECTION 0x2340
+#define mmSHADOW_0_MME2_CTRL_BASE 0x7FFC160408ull
+#define SHADOW_0_MME2_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_0_MME2_CTRL_SECTION 0x3400
+#define mmSHADOW_0_TENSOR_S_MME2_CTRL_BASE 0x7FFC16043Cull
+#define SHADOW_0_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_S_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_S_MME2_CTRL_BASE 0x7FFC160488ull
+#define SHADOW_0_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_S_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_L_MME2_CTRL_BASE 0x7FFC1604ACull
+#define SHADOW_0_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_L_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC1604F8ull
+#define SHADOW_0_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC16051Cull
+#define SHADOW_0_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_O_MME2_CTRL_BASE 0x7FFC160540ull
+#define SHADOW_0_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_O_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC16058Cull
+#define SHADOW_0_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC1605B0ull
+#define SHADOW_0_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_0_DESC_MME2_CTRL_BASE 0x7FFC1605D4ull
+#define SHADOW_0_DESC_MME2_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_0_DESC_MME2_CTRL_SECTION 0xB400
+#define mmSHADOW_1_MME2_CTRL_BASE 0x7FFC160688ull
+#define SHADOW_1_MME2_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_1_MME2_CTRL_SECTION 0x3400
+#define mmSHADOW_1_TENSOR_S_MME2_CTRL_BASE 0x7FFC1606BCull
+#define SHADOW_1_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_S_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_S_MME2_CTRL_BASE 0x7FFC160708ull
+#define SHADOW_1_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_S_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_L_MME2_CTRL_BASE 0x7FFC16072Cull
+#define SHADOW_1_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_L_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC160778ull
+#define SHADOW_1_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC16079Cull
+#define SHADOW_1_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_O_MME2_CTRL_BASE 0x7FFC1607C0ull
+#define SHADOW_1_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_O_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC16080Cull
+#define SHADOW_1_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC160830ull
+#define SHADOW_1_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_1_DESC_MME2_CTRL_BASE 0x7FFC160854ull
+#define SHADOW_1_DESC_MME2_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_1_DESC_MME2_CTRL_SECTION 0xB400
+#define mmSHADOW_2_MME2_CTRL_BASE 0x7FFC160908ull
+#define SHADOW_2_MME2_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_2_MME2_CTRL_SECTION 0x3400
+#define mmSHADOW_2_TENSOR_S_MME2_CTRL_BASE 0x7FFC16093Cull
+#define SHADOW_2_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_S_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_S_MME2_CTRL_BASE 0x7FFC160988ull
+#define SHADOW_2_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_S_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_L_MME2_CTRL_BASE 0x7FFC1609ACull
+#define SHADOW_2_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_L_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC1609F8ull
+#define SHADOW_2_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC160A1Cull
+#define SHADOW_2_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_O_MME2_CTRL_BASE 0x7FFC160A40ull
+#define SHADOW_2_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_O_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC160A8Cull
+#define SHADOW_2_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC160AB0ull
+#define SHADOW_2_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_2_DESC_MME2_CTRL_BASE 0x7FFC160AD4ull
+#define SHADOW_2_DESC_MME2_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_2_DESC_MME2_CTRL_SECTION 0xB400
+#define mmSHADOW_3_MME2_CTRL_BASE 0x7FFC160B88ull
+#define SHADOW_3_MME2_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_3_MME2_CTRL_SECTION 0x3400
+#define mmSHADOW_3_TENSOR_S_MME2_CTRL_BASE 0x7FFC160BBCull
+#define SHADOW_3_TENSOR_S_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_S_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_S_MME2_CTRL_BASE 0x7FFC160C08ull
+#define SHADOW_3_AGU_S_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_S_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_L_MME2_CTRL_BASE 0x7FFC160C2Cull
+#define SHADOW_3_TENSOR_L_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_L_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_L_LOCAL_MME2_CTRL_BASE 0x7FFC160C78ull
+#define SHADOW_3_AGU_L_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_L_REMOTE_MME2_CTRL_BASE 0x7FFC160C9Cull
+#define SHADOW_3_AGU_L_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_O_MME2_CTRL_BASE 0x7FFC160CC0ull
+#define SHADOW_3_TENSOR_O_MME2_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_O_MME2_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_O_LOCAL_MME2_CTRL_BASE 0x7FFC160D0Cull
+#define SHADOW_3_AGU_O_LOCAL_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_LOCAL_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_O_REMOTE_MME2_CTRL_BASE 0x7FFC160D30ull
+#define SHADOW_3_AGU_O_REMOTE_MME2_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_REMOTE_MME2_CTRL_SECTION 0x2400
+#define mmSHADOW_3_DESC_MME2_CTRL_BASE 0x7FFC160D54ull
+#define SHADOW_3_DESC_MME2_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_3_DESC_MME2_CTRL_SECTION 0x72AC
+#define mmMME2_QM_BASE 0x7FFC168000ull
+#define MME2_QM_MAX_OFFSET 0xD040
+#define MME2_QM_SECTION 0x38000
+#define mmMME3_ACC_BASE 0x7FFC1A0000ull
+#define MME3_ACC_MAX_OFFSET 0x5C00
+#define MME3_ACC_SECTION 0x20000
+#define mmMME3_SBAB_BASE 0x7FFC1C0000ull
+#define MME3_SBAB_MAX_OFFSET 0x5800
+#define MME3_SBAB_SECTION 0x1000
+#define mmMME3_PRTN_BASE 0x7FFC1C1000ull
+#define MME3_PRTN_MAX_OFFSET 0x5000
+#define MME3_PRTN_SECTION 0x1F000
+#define mmMME3_CTRL_BASE 0x7FFC1E0000ull
+#define MME3_CTRL_MAX_OFFSET 0xDA80
+#define MME3_CTRL_SECTION 0x8000
+#define mmARCH_MME3_CTRL_BASE 0x7FFC1E0008ull
+#define ARCH_MME3_CTRL_MAX_OFFSET 0x3400
+#define ARCH_MME3_CTRL_SECTION 0x3400
+#define mmARCH_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E003Cull
+#define ARCH_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_S_MME3_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_S_MME3_CTRL_BASE 0x7FFC1E0088ull
+#define ARCH_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_S_MME3_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E00ACull
+#define ARCH_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_L_MME3_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E00F8ull
+#define ARCH_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmARCH_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E011Cull
+#define ARCH_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmARCH_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0140ull
+#define ARCH_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00
+#define ARCH_TENSOR_O_MME3_CTRL_SECTION 0x4C00
+#define mmARCH_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E018Cull
+#define ARCH_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmARCH_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E01B0ull
+#define ARCH_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define ARCH_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmARCH_DESC_MME3_CTRL_BASE 0x7FFC1E01D4ull
+#define ARCH_DESC_MME3_CTRL_MAX_OFFSET 0x5400
+#define ARCH_DESC_MME3_CTRL_SECTION 0x2340
+#define mmSHADOW_0_MME3_CTRL_BASE 0x7FFC1E0408ull
+#define SHADOW_0_MME3_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_0_MME3_CTRL_SECTION 0x3400
+#define mmSHADOW_0_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E043Cull
+#define SHADOW_0_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_S_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_S_MME3_CTRL_BASE 0x7FFC1E0488ull
+#define SHADOW_0_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_S_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E04ACull
+#define SHADOW_0_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_L_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E04F8ull
+#define SHADOW_0_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E051Cull
+#define SHADOW_0_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_0_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0540ull
+#define SHADOW_0_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_0_TENSOR_O_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_0_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E058Cull
+#define SHADOW_0_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_0_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E05B0ull
+#define SHADOW_0_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_0_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_0_DESC_MME3_CTRL_BASE 0x7FFC1E05D4ull
+#define SHADOW_0_DESC_MME3_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_0_DESC_MME3_CTRL_SECTION 0xB400
+#define mmSHADOW_1_MME3_CTRL_BASE 0x7FFC1E0688ull
+#define SHADOW_1_MME3_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_1_MME3_CTRL_SECTION 0x3400
+#define mmSHADOW_1_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E06BCull
+#define SHADOW_1_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_S_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_S_MME3_CTRL_BASE 0x7FFC1E0708ull
+#define SHADOW_1_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_S_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E072Cull
+#define SHADOW_1_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_L_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E0778ull
+#define SHADOW_1_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E079Cull
+#define SHADOW_1_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_1_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E07C0ull
+#define SHADOW_1_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_1_TENSOR_O_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_1_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E080Cull
+#define SHADOW_1_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_1_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E0830ull
+#define SHADOW_1_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_1_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_1_DESC_MME3_CTRL_BASE 0x7FFC1E0854ull
+#define SHADOW_1_DESC_MME3_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_1_DESC_MME3_CTRL_SECTION 0xB400
+#define mmSHADOW_2_MME3_CTRL_BASE 0x7FFC1E0908ull
+#define SHADOW_2_MME3_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_2_MME3_CTRL_SECTION 0x3400
+#define mmSHADOW_2_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E093Cull
+#define SHADOW_2_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_S_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_S_MME3_CTRL_BASE 0x7FFC1E0988ull
+#define SHADOW_2_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_S_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E09ACull
+#define SHADOW_2_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_L_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E09F8ull
+#define SHADOW_2_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E0A1Cull
+#define SHADOW_2_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_2_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0A40ull
+#define SHADOW_2_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_2_TENSOR_O_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_2_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E0A8Cull
+#define SHADOW_2_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_2_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E0AB0ull
+#define SHADOW_2_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_2_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_2_DESC_MME3_CTRL_BASE 0x7FFC1E0AD4ull
+#define SHADOW_2_DESC_MME3_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_2_DESC_MME3_CTRL_SECTION 0xB400
+#define mmSHADOW_3_MME3_CTRL_BASE 0x7FFC1E0B88ull
+#define SHADOW_3_MME3_CTRL_MAX_OFFSET 0x3400
+#define SHADOW_3_MME3_CTRL_SECTION 0x3400
+#define mmSHADOW_3_TENSOR_S_MME3_CTRL_BASE 0x7FFC1E0BBCull
+#define SHADOW_3_TENSOR_S_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_S_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_S_MME3_CTRL_BASE 0x7FFC1E0C08ull
+#define SHADOW_3_AGU_S_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_S_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_L_MME3_CTRL_BASE 0x7FFC1E0C2Cull
+#define SHADOW_3_TENSOR_L_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_L_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_L_LOCAL_MME3_CTRL_BASE 0x7FFC1E0C78ull
+#define SHADOW_3_AGU_L_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_L_REMOTE_MME3_CTRL_BASE 0x7FFC1E0C9Cull
+#define SHADOW_3_AGU_L_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_L_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_3_TENSOR_O_MME3_CTRL_BASE 0x7FFC1E0CC0ull
+#define SHADOW_3_TENSOR_O_MME3_CTRL_MAX_OFFSET 0x4C00
+#define SHADOW_3_TENSOR_O_MME3_CTRL_SECTION 0x4C00
+#define mmSHADOW_3_AGU_O_LOCAL_MME3_CTRL_BASE 0x7FFC1E0D0Cull
+#define SHADOW_3_AGU_O_LOCAL_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_LOCAL_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_3_AGU_O_REMOTE_MME3_CTRL_BASE 0x7FFC1E0D30ull
+#define SHADOW_3_AGU_O_REMOTE_MME3_CTRL_MAX_OFFSET 0x2400
+#define SHADOW_3_AGU_O_REMOTE_MME3_CTRL_SECTION 0x2400
+#define mmSHADOW_3_DESC_MME3_CTRL_BASE 0x7FFC1E0D54ull
+#define SHADOW_3_DESC_MME3_CTRL_MAX_OFFSET 0x5400
+#define SHADOW_3_DESC_MME3_CTRL_SECTION 0x72AC
+#define mmMME3_QM_BASE 0x7FFC1E8000ull
+#define MME3_QM_MAX_OFFSET 0xD040
+#define MME3_QM_SECTION 0x18000
+#define mmSRAM_Y0_X0_BANK_BASE 0x7FFC200000ull
+#define SRAM_Y0_X0_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X0_RTR_BASE 0x7FFC201000ull
+#define SRAM_Y0_X0_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X0_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X1_BANK_BASE 0x7FFC208000ull
+#define SRAM_Y0_X1_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X1_RTR_BASE 0x7FFC209000ull
+#define SRAM_Y0_X1_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X1_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X2_BANK_BASE 0x7FFC210000ull
+#define SRAM_Y0_X2_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X2_RTR_BASE 0x7FFC211000ull
+#define SRAM_Y0_X2_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X2_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X3_BANK_BASE 0x7FFC218000ull
+#define SRAM_Y0_X3_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X3_RTR_BASE 0x7FFC219000ull
+#define SRAM_Y0_X3_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X3_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X4_BANK_BASE 0x7FFC220000ull
+#define SRAM_Y0_X4_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X4_RTR_BASE 0x7FFC221000ull
+#define SRAM_Y0_X4_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X4_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X5_BANK_BASE 0x7FFC228000ull
+#define SRAM_Y0_X5_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X5_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X5_RTR_BASE 0x7FFC229000ull
+#define SRAM_Y0_X5_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X5_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X6_BANK_BASE 0x7FFC230000ull
+#define SRAM_Y0_X6_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X6_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X6_RTR_BASE 0x7FFC231000ull
+#define SRAM_Y0_X6_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X6_RTR_SECTION 0x7000
+#define mmSRAM_Y0_X7_BANK_BASE 0x7FFC238000ull
+#define SRAM_Y0_X7_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y0_X7_BANK_SECTION 0x1000
+#define mmSRAM_Y0_X7_RTR_BASE 0x7FFC239000ull
+#define SRAM_Y0_X7_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y0_X7_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X0_BANK_BASE 0x7FFC240000ull
+#define SRAM_Y1_X0_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X0_RTR_BASE 0x7FFC241000ull
+#define SRAM_Y1_X0_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X0_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X1_BANK_BASE 0x7FFC248000ull
+#define SRAM_Y1_X1_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X1_RTR_BASE 0x7FFC249000ull
+#define SRAM_Y1_X1_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X1_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X2_BANK_BASE 0x7FFC250000ull
+#define SRAM_Y1_X2_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X2_RTR_BASE 0x7FFC251000ull
+#define SRAM_Y1_X2_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X2_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X3_BANK_BASE 0x7FFC258000ull
+#define SRAM_Y1_X3_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X3_RTR_BASE 0x7FFC259000ull
+#define SRAM_Y1_X3_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X3_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X4_BANK_BASE 0x7FFC260000ull
+#define SRAM_Y1_X4_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X4_RTR_BASE 0x7FFC261000ull
+#define SRAM_Y1_X4_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X4_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X5_BANK_BASE 0x7FFC268000ull
+#define SRAM_Y1_X5_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X5_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X5_RTR_BASE 0x7FFC269000ull
+#define SRAM_Y1_X5_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X5_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X6_BANK_BASE 0x7FFC270000ull
+#define SRAM_Y1_X6_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X6_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X6_RTR_BASE 0x7FFC271000ull
+#define SRAM_Y1_X6_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X6_RTR_SECTION 0x7000
+#define mmSRAM_Y1_X7_BANK_BASE 0x7FFC278000ull
+#define SRAM_Y1_X7_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y1_X7_BANK_SECTION 0x1000
+#define mmSRAM_Y1_X7_RTR_BASE 0x7FFC279000ull
+#define SRAM_Y1_X7_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y1_X7_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X0_BANK_BASE 0x7FFC280000ull
+#define SRAM_Y2_X0_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X0_RTR_BASE 0x7FFC281000ull
+#define SRAM_Y2_X0_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X0_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X1_BANK_BASE 0x7FFC288000ull
+#define SRAM_Y2_X1_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X1_RTR_BASE 0x7FFC289000ull
+#define SRAM_Y2_X1_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X1_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X2_BANK_BASE 0x7FFC290000ull
+#define SRAM_Y2_X2_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X2_RTR_BASE 0x7FFC291000ull
+#define SRAM_Y2_X2_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X2_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X3_BANK_BASE 0x7FFC298000ull
+#define SRAM_Y2_X3_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X3_RTR_BASE 0x7FFC299000ull
+#define SRAM_Y2_X3_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X3_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X4_BANK_BASE 0x7FFC2A0000ull
+#define SRAM_Y2_X4_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X4_RTR_BASE 0x7FFC2A1000ull
+#define SRAM_Y2_X4_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X4_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X5_BANK_BASE 0x7FFC2A8000ull
+#define SRAM_Y2_X5_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X5_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X5_RTR_BASE 0x7FFC2A9000ull
+#define SRAM_Y2_X5_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X5_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X6_BANK_BASE 0x7FFC2B0000ull
+#define SRAM_Y2_X6_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X6_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X6_RTR_BASE 0x7FFC2B1000ull
+#define SRAM_Y2_X6_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X6_RTR_SECTION 0x7000
+#define mmSRAM_Y2_X7_BANK_BASE 0x7FFC2B8000ull
+#define SRAM_Y2_X7_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y2_X7_BANK_SECTION 0x1000
+#define mmSRAM_Y2_X7_RTR_BASE 0x7FFC2B9000ull
+#define SRAM_Y2_X7_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y2_X7_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X0_BANK_BASE 0x7FFC2C0000ull
+#define SRAM_Y3_X0_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X0_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X0_RTR_BASE 0x7FFC2C1000ull
+#define SRAM_Y3_X0_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X0_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X1_BANK_BASE 0x7FFC2C8000ull
+#define SRAM_Y3_X1_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X1_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X1_RTR_BASE 0x7FFC2C9000ull
+#define SRAM_Y3_X1_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X1_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X2_BANK_BASE 0x7FFC2D0000ull
+#define SRAM_Y3_X2_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X2_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X2_RTR_BASE 0x7FFC2D1000ull
+#define SRAM_Y3_X2_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X2_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X3_BANK_BASE 0x7FFC2D8000ull
+#define SRAM_Y3_X3_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X3_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X3_RTR_BASE 0x7FFC2D9000ull
+#define SRAM_Y3_X3_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X3_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X4_BANK_BASE 0x7FFC2E0000ull
+#define SRAM_Y3_X4_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X4_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X4_RTR_BASE 0x7FFC2E1000ull
+#define SRAM_Y3_X4_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X4_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X5_BANK_BASE 0x7FFC2E8000ull
+#define SRAM_Y3_X5_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X5_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X5_RTR_BASE 0x7FFC2E9000ull
+#define SRAM_Y3_X5_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X5_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X6_BANK_BASE 0x7FFC2F0000ull
+#define SRAM_Y3_X6_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X6_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X6_RTR_BASE 0x7FFC2F1000ull
+#define SRAM_Y3_X6_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X6_RTR_SECTION 0x7000
+#define mmSRAM_Y3_X7_BANK_BASE 0x7FFC2F8000ull
+#define SRAM_Y3_X7_BANK_MAX_OFFSET 0x4000
+#define SRAM_Y3_X7_BANK_SECTION 0x1000
+#define mmSRAM_Y3_X7_RTR_BASE 0x7FFC2F9000ull
+#define SRAM_Y3_X7_RTR_MAX_OFFSET 0x3340
+#define SRAM_Y3_X7_RTR_SECTION 0x7000
+#define mmSIF_RTR_0_BASE 0x7FFC300000ull
+#define SIF_RTR_0_MAX_OFFSET 0x6500
+#define SIF_RTR_0_SECTION 0x6000
+#define mmSIF_RTR_CTRL_0_BASE 0x7FFC306000ull
+#define SIF_RTR_CTRL_0_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_0_SECTION 0xA000
+#define mmSIF_RTR_1_BASE 0x7FFC310000ull
+#define SIF_RTR_1_MAX_OFFSET 0x6500
+#define SIF_RTR_1_SECTION 0x6000
+#define mmSIF_RTR_CTRL_1_BASE 0x7FFC316000ull
+#define SIF_RTR_CTRL_1_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_1_SECTION 0xA000
+#define mmSIF_RTR_2_BASE 0x7FFC320000ull
+#define SIF_RTR_2_MAX_OFFSET 0x6500
+#define SIF_RTR_2_SECTION 0x6000
+#define mmSIF_RTR_CTRL_2_BASE 0x7FFC326000ull
+#define SIF_RTR_CTRL_2_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_2_SECTION 0xA000
+#define mmSIF_RTR_3_BASE 0x7FFC330000ull
+#define SIF_RTR_3_MAX_OFFSET 0x6500
+#define SIF_RTR_3_SECTION 0x6000
+#define mmSIF_RTR_CTRL_3_BASE 0x7FFC336000ull
+#define SIF_RTR_CTRL_3_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_3_SECTION 0xA000
+#define mmSIF_RTR_4_BASE 0x7FFC340000ull
+#define SIF_RTR_4_MAX_OFFSET 0x6500
+#define SIF_RTR_4_SECTION 0x6000
+#define mmSIF_RTR_CTRL_4_BASE 0x7FFC346000ull
+#define SIF_RTR_CTRL_4_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_4_SECTION 0xA000
+#define mmSIF_RTR_5_BASE 0x7FFC350000ull
+#define SIF_RTR_5_MAX_OFFSET 0x6500
+#define SIF_RTR_5_SECTION 0x6000
+#define mmSIF_RTR_CTRL_5_BASE 0x7FFC356000ull
+#define SIF_RTR_CTRL_5_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_5_SECTION 0xA000
+#define mmSIF_RTR_6_BASE 0x7FFC360000ull
+#define SIF_RTR_6_MAX_OFFSET 0x6500
+#define SIF_RTR_6_SECTION 0x6000
+#define mmSIF_RTR_CTRL_6_BASE 0x7FFC366000ull
+#define SIF_RTR_CTRL_6_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_6_SECTION 0xA000
+#define mmSIF_RTR_7_BASE 0x7FFC370000ull
+#define SIF_RTR_7_MAX_OFFSET 0x6500
+#define SIF_RTR_7_SECTION 0x6000
+#define mmSIF_RTR_CTRL_7_BASE 0x7FFC376000ull
+#define SIF_RTR_CTRL_7_MAX_OFFSET 0xCC00
+#define SIF_RTR_CTRL_7_SECTION 0xA000
+#define mmNIF_RTR_0_BASE 0x7FFC380000ull
+#define NIF_RTR_0_MAX_OFFSET 0x6500
+#define NIF_RTR_0_SECTION 0x6000
+#define mmNIF_RTR_CTRL_0_BASE 0x7FFC386000ull
+#define NIF_RTR_CTRL_0_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_0_SECTION 0xA000
+#define mmNIF_RTR_1_BASE 0x7FFC390000ull
+#define NIF_RTR_1_MAX_OFFSET 0x6500
+#define NIF_RTR_1_SECTION 0x6000
+#define mmNIF_RTR_CTRL_1_BASE 0x7FFC396000ull
+#define NIF_RTR_CTRL_1_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_1_SECTION 0xA000
+#define mmNIF_RTR_2_BASE 0x7FFC3A0000ull
+#define NIF_RTR_2_MAX_OFFSET 0x6500
+#define NIF_RTR_2_SECTION 0x6000
+#define mmNIF_RTR_CTRL_2_BASE 0x7FFC3A6000ull
+#define NIF_RTR_CTRL_2_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_2_SECTION 0xA000
+#define mmNIF_RTR_3_BASE 0x7FFC3B0000ull
+#define NIF_RTR_3_MAX_OFFSET 0x6500
+#define NIF_RTR_3_SECTION 0x6000
+#define mmNIF_RTR_CTRL_3_BASE 0x7FFC3B6000ull
+#define NIF_RTR_CTRL_3_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_3_SECTION 0xA000
+#define mmNIF_RTR_4_BASE 0x7FFC3C0000ull
+#define NIF_RTR_4_MAX_OFFSET 0x6500
+#define NIF_RTR_4_SECTION 0x6000
+#define mmNIF_RTR_CTRL_4_BASE 0x7FFC3C6000ull
+#define NIF_RTR_CTRL_4_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_4_SECTION 0xA000
+#define mmNIF_RTR_5_BASE 0x7FFC3D0000ull
+#define NIF_RTR_5_MAX_OFFSET 0x6500
+#define NIF_RTR_5_SECTION 0x6000
+#define mmNIF_RTR_CTRL_5_BASE 0x7FFC3D6000ull
+#define NIF_RTR_CTRL_5_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_5_SECTION 0xA000
+#define mmNIF_RTR_6_BASE 0x7FFC3E0000ull
+#define NIF_RTR_6_MAX_OFFSET 0x6500
+#define NIF_RTR_6_SECTION 0x6000
+#define mmNIF_RTR_CTRL_6_BASE 0x7FFC3E6000ull
+#define NIF_RTR_CTRL_6_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_6_SECTION 0xA000
+#define mmNIF_RTR_7_BASE 0x7FFC3F0000ull
+#define NIF_RTR_7_MAX_OFFSET 0x6500
+#define NIF_RTR_7_SECTION 0x6000
+#define mmNIF_RTR_CTRL_7_BASE 0x7FFC3F6000ull
+#define NIF_RTR_CTRL_7_MAX_OFFSET 0xCC00
+#define NIF_RTR_CTRL_7_SECTION 0x4B000
+#define mmCPU_CA53_CFG_BASE 0x7FFC441000ull
+#define CPU_CA53_CFG_MAX_OFFSET 0x2180
+#define CPU_CA53_CFG_SECTION 0x1000
+#define mmCPU_IF_BASE 0x7FFC442000ull
+#define CPU_IF_MAX_OFFSET 0x43C0
+#define CPU_IF_SECTION 0x2000
+#define mmCPU_TIMESTAMP_BASE 0x7FFC444000ull
+#define CPU_TIMESTAMP_MAX_OFFSET 0x1000
+#define CPU_TIMESTAMP_SECTION 0x3C000
+#define mmDMA_IF_W_S_BASE 0x7FFC480000ull
+#define DMA_IF_W_S_MAX_OFFSET 0x8380
+#define DMA_IF_W_S_SECTION 0x1000
+#define mmDMA_IF_W_S_DOWN_CH0_BASE 0x7FFC481000ull
+#define DMA_IF_W_S_DOWN_CH0_MAX_OFFSET 0xCC00
+#define DMA_IF_W_S_DOWN_CH0_SECTION 0x1000
+#define mmDMA_IF_W_S_DOWN_CH1_BASE 0x7FFC482000ull
+#define DMA_IF_W_S_DOWN_CH1_MAX_OFFSET 0xCC00
+#define DMA_IF_W_S_DOWN_CH1_SECTION 0x5000
+#define mmDMA_W_PLL_BASE 0x7FFC487000ull
+#define DMA_W_PLL_MAX_OFFSET 0x5200
+#define DMA_W_PLL_SECTION 0x1000
+#define mmIF_W_PLL_BASE 0x7FFC488000ull
+#define IF_W_PLL_MAX_OFFSET 0x5200
+#define IF_W_PLL_SECTION 0x1000
+#define mmDMA_IF_W_S_DOWN_BASE 0x7FFC489000ull
+#define DMA_IF_W_S_DOWN_MAX_OFFSET 0x1500
+#define DMA_IF_W_S_DOWN_SECTION 0x7000
+#define mmSYNC_MNGR_GLBL_W_S_BASE 0x7FFC490000ull
+#define SYNC_MNGR_GLBL_W_S_MAX_OFFSET 0x6C00
+#define SYNC_MNGR_GLBL_W_S_SECTION 0x1000
+#define mmSYNC_MNGR_OBJS_W_S_BASE 0x7FFC491000ull
+#define SYNC_MNGR_OBJS_W_S_MAX_OFFSET 0x5C00
+#define SYNC_MNGR_OBJS_W_S_SECTION 0xF000
+#define mmDMA_IF_E_S_BASE 0x7FFC4A0000ull
+#define DMA_IF_E_S_MAX_OFFSET 0x8380
+#define DMA_IF_E_S_SECTION 0x1000
+#define mmDMA_IF_E_S_DOWN_CH0_BASE 0x7FFC4A1000ull
+#define DMA_IF_E_S_DOWN_CH0_MAX_OFFSET 0xCC00
+#define DMA_IF_E_S_DOWN_CH0_SECTION 0x1000
+#define mmDMA_IF_E_S_DOWN_CH1_BASE 0x7FFC4A2000ull
+#define DMA_IF_E_S_DOWN_CH1_MAX_OFFSET 0xCC00
+#define DMA_IF_E_S_DOWN_CH1_SECTION 0x5000
+#define mmIF_E_PLL_BASE 0x7FFC4A7000ull
+#define IF_E_PLL_MAX_OFFSET 0x5200
+#define IF_E_PLL_SECTION 0x1000
+#define mmDMA_E_PLL_BASE 0x7FFC4A8000ull
+#define DMA_E_PLL_MAX_OFFSET 0x5200
+#define DMA_E_PLL_SECTION 0x1000
+#define mmDMA_IF_E_S_DOWN_BASE 0x7FFC4A9000ull
+#define DMA_IF_E_S_DOWN_MAX_OFFSET 0x1500
+#define DMA_IF_E_S_DOWN_SECTION 0x7000
+#define mmSYNC_MNGR_GLBL_E_S_BASE 0x7FFC4B0000ull
+#define SYNC_MNGR_GLBL_E_S_MAX_OFFSET 0x6C00
+#define SYNC_MNGR_GLBL_E_S_SECTION 0x1000
+#define mmSYNC_MNGR_OBJS_E_S_BASE 0x7FFC4B1000ull
+#define SYNC_MNGR_OBJS_E_S_MAX_OFFSET 0x5C00
+#define SYNC_MNGR_OBJS_E_S_SECTION 0xF000
+#define mmDMA_IF_W_N_BASE 0x7FFC4C0000ull
+#define DMA_IF_W_N_MAX_OFFSET 0x8380
+#define DMA_IF_W_N_SECTION 0x1000
+#define mmDMA_IF_W_N_DOWN_CH0_BASE 0x7FFC4C1000ull
+#define DMA_IF_W_N_DOWN_CH0_MAX_OFFSET 0xCC00
+#define DMA_IF_W_N_DOWN_CH0_SECTION 0x1000
+#define mmDMA_IF_W_N_DOWN_CH1_BASE 0x7FFC4C2000ull
+#define DMA_IF_W_N_DOWN_CH1_MAX_OFFSET 0xCC00
+#define DMA_IF_W_N_DOWN_CH1_SECTION 0x5000
+#define mmMESH_W_PLL_BASE 0x7FFC4C7000ull
+#define MESH_W_PLL_MAX_OFFSET 0x5200
+#define MESH_W_PLL_SECTION 0x1000
+#define mmSRAM_W_PLL_BASE 0x7FFC4C8000ull
+#define SRAM_W_PLL_MAX_OFFSET 0x5200
+#define SRAM_W_PLL_SECTION 0x1000
+#define mmDMA_IF_W_N_DOWN_BASE 0x7FFC4C9000ull
+#define DMA_IF_W_N_DOWN_MAX_OFFSET 0x1500
+#define DMA_IF_W_N_DOWN_SECTION 0x7000
+#define mmSYNC_MNGR_GLBL_W_N_BASE 0x7FFC4D0000ull
+#define SYNC_MNGR_GLBL_W_N_MAX_OFFSET 0x6C00
+#define SYNC_MNGR_GLBL_W_N_SECTION 0x1000
+#define mmSYNC_MNGR_OBJS_W_N_BASE 0x7FFC4D1000ull
+#define SYNC_MNGR_OBJS_W_N_MAX_OFFSET 0x5C00
+#define SYNC_MNGR_OBJS_W_N_SECTION 0xF000
+#define mmDMA_IF_E_N_BASE 0x7FFC4E0000ull
+#define DMA_IF_E_N_MAX_OFFSET 0x8380
+#define DMA_IF_E_N_SECTION 0x1000
+#define mmDMA_IF_E_N_DOWN_CH0_BASE 0x7FFC4E1000ull
+#define DMA_IF_E_N_DOWN_CH0_MAX_OFFSET 0xCC00
+#define DMA_IF_E_N_DOWN_CH0_SECTION 0x1000
+#define mmDMA_IF_E_N_DOWN_CH1_BASE 0x7FFC4E2000ull
+#define DMA_IF_E_N_DOWN_CH1_MAX_OFFSET 0xCC00
+#define DMA_IF_E_N_DOWN_CH1_SECTION 0x5000
+#define mmMESH_E_PLL_BASE 0x7FFC4E7000ull
+#define MESH_E_PLL_MAX_OFFSET 0x5200
+#define MESH_E_PLL_SECTION 0x1000
+#define mmSRAM_E_PLL_BASE 0x7FFC4E8000ull
+#define SRAM_E_PLL_MAX_OFFSET 0x5200
+#define SRAM_E_PLL_SECTION 0x1000
+#define mmDMA_IF_E_N_DOWN_BASE 0x7FFC4E9000ull
+#define DMA_IF_E_N_DOWN_MAX_OFFSET 0x1500
+#define DMA_IF_E_N_DOWN_SECTION 0x7000
+#define mmSYNC_MNGR_GLBL_E_N_BASE 0x7FFC4F0000ull
+#define SYNC_MNGR_GLBL_E_N_MAX_OFFSET 0x6C00
+#define SYNC_MNGR_GLBL_E_N_SECTION 0x1000
+#define mmSYNC_MNGR_OBJS_E_N_BASE 0x7FFC4F1000ull
+#define SYNC_MNGR_OBJS_E_N_MAX_OFFSET 0x5C00
+#define SYNC_MNGR_OBJS_E_N_SECTION 0xF000
+#define mmDMA0_CORE_BASE 0x7FFC500000ull
+#define DMA0_CORE_MAX_OFFSET 0x23C0
+#define DMA0_CORE_SECTION 0x8000
+#define mmDMA0_QM_BASE 0x7FFC508000ull
+#define DMA0_QM_MAX_OFFSET 0xD040
+#define DMA0_QM_SECTION 0x18000
+#define mmDMA1_CORE_BASE 0x7FFC520000ull
+#define DMA1_CORE_MAX_OFFSET 0x23C0
+#define DMA1_CORE_SECTION 0x8000
+#define mmDMA1_QM_BASE 0x7FFC528000ull
+#define DMA1_QM_MAX_OFFSET 0xD040
+#define DMA1_QM_SECTION 0x18000
+#define mmDMA2_CORE_BASE 0x7FFC540000ull
+#define DMA2_CORE_MAX_OFFSET 0x23C0
+#define DMA2_CORE_SECTION 0x8000
+#define mmDMA2_QM_BASE 0x7FFC548000ull
+#define DMA2_QM_MAX_OFFSET 0xD040
+#define DMA2_QM_SECTION 0x18000
+#define mmDMA3_CORE_BASE 0x7FFC560000ull
+#define DMA3_CORE_MAX_OFFSET 0x23C0
+#define DMA3_CORE_SECTION 0x8000
+#define mmDMA3_QM_BASE 0x7FFC568000ull
+#define DMA3_QM_MAX_OFFSET 0xD040
+#define DMA3_QM_SECTION 0x18000
+#define mmDMA4_CORE_BASE 0x7FFC580000ull
+#define DMA4_CORE_MAX_OFFSET 0x23C0
+#define DMA4_CORE_SECTION 0x8000
+#define mmDMA4_QM_BASE 0x7FFC588000ull
+#define DMA4_QM_MAX_OFFSET 0xD040
+#define DMA4_QM_SECTION 0x18000
+#define mmDMA5_CORE_BASE 0x7FFC5A0000ull
+#define DMA5_CORE_MAX_OFFSET 0x23C0
+#define DMA5_CORE_SECTION 0x8000
+#define mmDMA5_QM_BASE 0x7FFC5A8000ull
+#define DMA5_QM_MAX_OFFSET 0xD040
+#define DMA5_QM_SECTION 0x18000
+#define mmDMA6_CORE_BASE 0x7FFC5C0000ull
+#define DMA6_CORE_MAX_OFFSET 0x23C0
+#define DMA6_CORE_SECTION 0x8000
+#define mmDMA6_QM_BASE 0x7FFC5C8000ull
+#define DMA6_QM_MAX_OFFSET 0xD040
+#define DMA6_QM_SECTION 0x18000
+#define mmDMA7_CORE_BASE 0x7FFC5E0000ull
+#define DMA7_CORE_MAX_OFFSET 0x23C0
+#define DMA7_CORE_SECTION 0x8000
+#define mmDMA7_QM_BASE 0x7FFC5E8000ull
+#define DMA7_QM_MAX_OFFSET 0xD040
+#define DMA7_QM_SECTION 0x18000
+#define mmHBM0_BASE 0x7FFC600000ull
+#define HBM0_MAX_OFFSET 0x8F58
+#define HBM0_SECTION 0x80000
+#define mmHBM1_BASE 0x7FFC680000ull
+#define HBM1_MAX_OFFSET 0x8F58
+#define HBM1_SECTION 0x80000
+#define mmHBM2_BASE 0x7FFC700000ull
+#define HBM2_MAX_OFFSET 0x8F58
+#define HBM2_SECTION 0x80000
+#define mmHBM3_BASE 0x7FFC780000ull
+#define HBM3_MAX_OFFSET 0x8F58
+#define HBM3_SECTION 0x80000
+#define mmGIC_BASE 0x7FFC800000ull
+#define GIC_MAX_OFFSET 0x10000
+#define GIC_SECTION 0x401000
+#define mmPCIE_WRAP_BASE 0x7FFCC01000ull
+#define PCIE_WRAP_MAX_OFFSET 0xDF00
+#define PCIE_WRAP_SECTION 0x1000
+#define mmPCIE_DBI_BASE 0x7FFCC02000ull
+#define PCIE_DBI_MAX_OFFSET 0xC040
+#define PCIE_DBI_SECTION 0x2000
+#define mmPCIE_CORE_BASE 0x7FFCC04000ull
+#define PCIE_CORE_MAX_OFFSET 0x9BC0
+#define PCIE_CORE_SECTION 0x3000
+#define mmPCIE_AUX_BASE 0x7FFCC07000ull
+#define PCIE_AUX_MAX_OFFSET 0x9C40
+#define PCIE_AUX_SECTION 0x9000
+#define mmPCIE_PHY_BASE 0x7FFCC10000ull
+#define PCIE_PHY_MAX_OFFSET 0x9640
+#define PCIE_PHY_SECTION 0x1000
+#define mmMMU_UP_BASE 0x7FFCC11000ull
+#define MMU_UP_MAX_OFFSET 0x7000
+#define MMU_UP_SECTION 0x1000
+#define mmSTLB_BASE 0x7FFCC12000ull
+#define STLB_MAX_OFFSET 0x8800
+#define STLB_SECTION 0x1000
+#define mmPCIE_MSI_BASE 0x7FFCC13000ull
+#define PCIE_MSI_MAX_OFFSET 0x8000
+#define PCIE_MSI_SECTION 0x2D000
+#define mmPSOC_I2C_M0_BASE 0x7FFCC40000ull
+#define PSOC_I2C_M0_MAX_OFFSET 0x1000
+#define PSOC_I2C_M0_SECTION 0x1000
+#define mmPSOC_I2C_M1_BASE 0x7FFCC41000ull
+#define PSOC_I2C_M1_MAX_OFFSET 0x1000
+#define PSOC_I2C_M1_SECTION 0x1000
+#define mmPSOC_I2C_S_BASE 0x7FFCC42000ull
+#define PSOC_I2C_S_MAX_OFFSET 0x1000
+#define PSOC_I2C_S_SECTION 0x1000
+#define mmPSOC_SPI_BASE 0x7FFCC43000ull
+#define PSOC_SPI_MAX_OFFSET 0x1000
+#define PSOC_SPI_SECTION 0x2000
+#define mmPSOC_UART_0_BASE 0x7FFCC45000ull
+#define PSOC_UART_0_MAX_OFFSET 0x1000
+#define PSOC_UART_0_SECTION 0x1000
+#define mmPSOC_UART_1_BASE 0x7FFCC46000ull
+#define PSOC_UART_1_MAX_OFFSET 0x1000
+#define PSOC_UART_1_SECTION 0x1000
+#define mmPSOC_TIMER_BASE 0x7FFCC47000ull
+#define PSOC_TIMER_MAX_OFFSET 0x1000
+#define PSOC_TIMER_SECTION 0x1000
+#define mmPSOC_WDOG_BASE 0x7FFCC48000ull
+#define PSOC_WDOG_MAX_OFFSET 0x1000
+#define PSOC_WDOG_SECTION 0x1000
+#define mmPSOC_TIMESTAMP_BASE 0x7FFCC49000ull
+#define PSOC_TIMESTAMP_MAX_OFFSET 0x1000
+#define PSOC_TIMESTAMP_SECTION 0x1000
+#define mmPSOC_EFUSE_BASE 0x7FFCC4A000ull
+#define PSOC_EFUSE_MAX_OFFSET 0x3040
+#define PSOC_EFUSE_SECTION 0x1000
+#define mmPSOC_GLOBAL_CONF_BASE 0x7FFCC4B000ull
+#define PSOC_GLOBAL_CONF_MAX_OFFSET 0xCD80
+#define PSOC_GLOBAL_CONF_SECTION 0x1000
+#define mmPSOC_GPIO0_BASE 0x7FFCC4C000ull
+#define PSOC_GPIO0_MAX_OFFSET 0x1000
+#define PSOC_GPIO0_SECTION 0x1000
+#define mmPSOC_GPIO1_BASE 0x7FFCC4D000ull
+#define PSOC_GPIO1_MAX_OFFSET 0x1000
+#define PSOC_GPIO1_SECTION 0x1000
+#define mmPSOC_BTL_BASE 0x7FFCC4E000ull
+#define PSOC_BTL_MAX_OFFSET 0x1480
+#define PSOC_BTL_SECTION 0x1000
+#define mmPSOC_CS_TRACE_BASE 0x7FFCC4F000ull
+#define PSOC_CS_TRACE_MAX_OFFSET 0x1680
+#define PSOC_CS_TRACE_SECTION 0x1000
+#define mmPSOC_GPIO2_BASE 0x7FFCC50000ull
+#define PSOC_GPIO2_MAX_OFFSET 0x1000
+#define PSOC_GPIO2_SECTION 0x1000
+#define mmPSOC_GPIO3_BASE 0x7FFCC51000ull
+#define PSOC_GPIO3_MAX_OFFSET 0x1000
+#define PSOC_GPIO3_SECTION 0x1000
+#define mmPSOC_GPIO4_BASE 0x7FFCC52000ull
+#define PSOC_GPIO4_MAX_OFFSET 0x1000
+#define PSOC_GPIO4_SECTION 0x1000
+#define mmPSOC_DFT_EFUSE_BASE 0x7FFCC53000ull
+#define PSOC_DFT_EFUSE_MAX_OFFSET 0x3040
+#define PSOC_DFT_EFUSE_SECTION 0x1000
+#define mmPSOC_RPM_0_BASE 0x7FFCC54000ull
+#define PSOC_RPM_0_MAX_OFFSET 0x8800
+#define PSOC_RPM_0_SECTION 0x1000
+#define mmPSOC_RPM_1_BASE 0x7FFCC55000ull
+#define PSOC_RPM_1_MAX_OFFSET 0x8800
+#define PSOC_RPM_1_SECTION 0x1000
+#define mmPSOC_RPM_2_BASE 0x7FFCC56000ull
+#define PSOC_RPM_2_MAX_OFFSET 0x8800
+#define PSOC_RPM_2_SECTION 0x1000
+#define mmPSOC_RPM_3_BASE 0x7FFCC57000ull
+#define PSOC_RPM_3_MAX_OFFSET 0x8800
+#define PSOC_RPM_3_SECTION 0x19000
+#define mmPSOC_CPU_PLL_BASE 0x7FFCC70000ull
+#define PSOC_CPU_PLL_MAX_OFFSET 0x5200
+#define PSOC_CPU_PLL_SECTION 0x1000
+#define mmPSOC_MME_PLL_BASE 0x7FFCC71000ull
+#define PSOC_MME_PLL_MAX_OFFSET 0x5200
+#define PSOC_MME_PLL_SECTION 0x1000
+#define mmPSOC_PCI_PLL_BASE 0x7FFCC72000ull
+#define PSOC_PCI_PLL_MAX_OFFSET 0x5200
+#define PSOC_PCI_PLL_SECTION 0x1000
+#define mmPSOC_TPC_PLL_BASE 0x7FFCC73000ull
+#define PSOC_TPC_PLL_MAX_OFFSET 0x5200
+#define PSOC_TPC_PLL_SECTION 0x1000
+#define mmPSOC_HBM_PLL_BASE 0x7FFCC74000ull
+#define PSOC_HBM_PLL_MAX_OFFSET 0x5200
+#define PSOC_HBM_PLL_SECTION 0x1000
+#define mmPSOC_PM_BASE 0x7FFCC75000ull
+#define PSOC_PM_MAX_OFFSET 0x1F00
+#define PSOC_PM_SECTION 0x1000
+#define mmPSOC_TS_BASE 0x7FFCC76000ull
+#define PSOC_TS_MAX_OFFSET 0xE640
+#define PSOC_TS_SECTION 0x2000
+#define mmPSOC_PWM0_BASE 0x7FFCC78000ull
+#define PSOC_PWM0_MAX_OFFSET 0x5800
+#define PSOC_PWM0_SECTION 0x1000
+#define mmPSOC_PWM1_BASE 0x7FFCC79000ull
+#define PSOC_PWM1_MAX_OFFSET 0x5800
+#define PSOC_PWM1_SECTION 0x1000
+#define mmPSOC_PWM2_BASE 0x7FFCC7A000ull
+#define PSOC_PWM2_MAX_OFFSET 0x5800
+#define PSOC_PWM2_SECTION 0x1000
+#define mmPSOC_PWM3_BASE 0x7FFCC7B000ull
+#define PSOC_PWM3_MAX_OFFSET 0x5800
+#define PSOC_PWM3_SECTION 0x1000
+#define mmPSOC_GPIO5_BASE 0x7FFCC7C000ull
+#define PSOC_GPIO5_MAX_OFFSET 0x1000
+#define PSOC_GPIO5_SECTION 0x1000
+#define mmPSOC_GPIO6_BASE 0x7FFCC7D000ull
+#define PSOC_GPIO6_MAX_OFFSET 0x1000
+#define PSOC_GPIO6_SECTION 0x3000
+#define mmPCIE_PMA_0_BASE 0x7FFCC80000ull
+#define PCIE_PMA_0_MAX_OFFSET 0x10003
+#define PCIE_PMA_0_SECTION 0x10000
+#define mmPCIE_PMA_1_BASE 0x7FFCC90000ull
+#define PCIE_PMA_1_MAX_OFFSET 0x10003
+#define PCIE_PMA_1_SECTION 0x10000
+#define mmPCIE_PMA_2_BASE 0x7FFCCA0000ull
+#define PCIE_PMA_2_MAX_OFFSET 0x10003
+#define PCIE_PMA_2_SECTION 0x10000
+#define mmPCIE_PMA_3_BASE 0x7FFCCB0000ull
+#define PCIE_PMA_3_MAX_OFFSET 0x10003
+#define PCIE_PMA_3_SECTION 0x10000
+#define mmNIC0_MAC_CH0_BASE 0x7FFCCC0000ull
+#define NIC0_MAC_CH0_MAX_OFFSET 0x8400
+#define NIC0_MAC_CH0_SECTION 0x1000
+#define mmNIC0_MAC_CH1_BASE 0x7FFCCC1000ull
+#define NIC0_MAC_CH1_MAX_OFFSET 0x8400
+#define NIC0_MAC_CH1_SECTION 0x1000
+#define mmNIC0_MAC_CH2_BASE 0x7FFCCC2000ull
+#define NIC0_MAC_CH2_MAX_OFFSET 0x8400
+#define NIC0_MAC_CH2_SECTION 0x1000
+#define mmNIC0_MAC_CH3_BASE 0x7FFCCC3000ull
+#define NIC0_MAC_CH3_MAX_OFFSET 0x8400
+#define NIC0_MAC_CH3_SECTION 0x1000
+#define mmNIC0_STAT_BASE 0x7FFCCC4000ull
+#define NIC0_STAT_MAX_OFFSET 0x4D00
+#define NIC0_STAT_SECTION 0x1000
+#define mmNIC0_MAC_XPCS91_BASE 0x7FFCCC5000ull
+#define NIC0_MAC_XPCS91_MAX_OFFSET 0x2380
+#define NIC0_MAC_XPCS91_SECTION 0x3000
+#define mmNIC0_MAC_CORE_BASE 0x7FFCCC8000ull
+#define NIC0_MAC_CORE_MAX_OFFSET 0x5400
+#define NIC0_MAC_CORE_SECTION 0x1000
+#define mmNIC0_MAC_AUX_BASE 0x7FFCCC9000ull
+#define NIC0_MAC_AUX_MAX_OFFSET 0x3000
+#define NIC0_MAC_AUX_SECTION 0xF000
+#define mmNIC0_PHY_BASE 0x7FFCCD8000ull
+#define NIC0_PHY_MAX_OFFSET 0x3400
+#define NIC0_PHY_SECTION 0x8000
+#define mmNIC0_QM0_BASE 0x7FFCCE0000ull
+#define NIC0_QM0_MAX_OFFSET 0xD040
+#define NIC0_QM0_SECTION 0x2000
+#define mmNIC0_QM1_BASE 0x7FFCCE2000ull
+#define NIC0_QM1_MAX_OFFSET 0xD040
+#define NIC0_QM1_SECTION 0x2000
+#define mmNIC0_QPC0_BASE 0x7FFCCE4000ull
+#define NIC0_QPC0_MAX_OFFSET 0x7140
+#define NIC0_QPC0_SECTION 0x1000
+#define mmNIC0_QPC1_BASE 0x7FFCCE5000ull
+#define NIC0_QPC1_MAX_OFFSET 0x7140
+#define NIC0_QPC1_SECTION 0x3000
+#define mmNIC0_RXB_BASE 0x7FFCCE8000ull
+#define NIC0_RXB_MAX_OFFSET 0x6040
+#define NIC0_RXB_SECTION 0x1000
+#define mmNIC0_RXE0_BASE 0x7FFCCE9000ull
+#define NIC0_RXE0_MAX_OFFSET 0x2FC0
+#define NIC0_RXE0_SECTION 0x1000
+#define mmNIC0_RXE1_BASE 0x7FFCCEA000ull
+#define NIC0_RXE1_MAX_OFFSET 0x2FC0
+#define NIC0_RXE1_SECTION 0x1000
+#define mmNIC0_RX_GW_BASE 0x7FFCCEB000ull
+#define NIC0_RX_GW_MAX_OFFSET 0x4540
+#define NIC0_RX_GW_SECTION 0x5000
+#define mmNIC0_TXS0_BASE 0x7FFCCF0000ull
+#define NIC0_TXS0_MAX_OFFSET 0x19C0
+#define NIC0_TXS0_SECTION 0x1000
+#define mmNIC0_TXS1_BASE 0x7FFCCF1000ull
+#define NIC0_TXS1_MAX_OFFSET 0x19C0
+#define NIC0_TXS1_SECTION 0x1000
+#define mmNIC0_TXE0_BASE 0x7FFCCF2000ull
+#define NIC0_TXE0_MAX_OFFSET 0x2040
+#define NIC0_TXE0_SECTION 0x1000
+#define mmNIC0_TXE1_BASE 0x7FFCCF3000ull
+#define NIC0_TXE1_MAX_OFFSET 0x2040
+#define NIC0_TXE1_SECTION 0x1000
+#define mmNIC0_TXB_BASE 0x7FFCCF4000ull
+#define NIC0_TXB_MAX_OFFSET 0xD400
+#define NIC0_TXB_SECTION 0x1000
+#define mmNIC0_TMR_BASE 0x7FFCCF5000ull
+#define NIC0_TMR_MAX_OFFSET 0x1600
+#define NIC0_TMR_SECTION 0x1000
+#define mmNIC0_TX_GW_BASE 0x7FFCCF6000ull
+#define NIC0_TX_GW_MAX_OFFSET 0x1400
+#define NIC0_TX_GW_SECTION 0x2000
+#define mmNIC0_TS_BASE 0x7FFCCF8000ull
+#define NIC0_TS_MAX_OFFSET 0xE640
+#define NIC0_TS_SECTION 0x1000
+#define mmNIC0_PLL_BASE 0x7FFCCF9000ull
+#define NIC0_PLL_MAX_OFFSET 0x5200
+#define NIC0_PLL_SECTION 0x1000
+#define mmNIC0_PM_BASE 0x7FFCCFA000ull
+#define NIC0_PM_MAX_OFFSET 0x1F00
+#define NIC0_PM_SECTION 0x6000
+#define mmNIC1_MAC_CH0_BASE 0x7FFCD00000ull
+#define NIC1_MAC_CH0_MAX_OFFSET 0x8400
+#define NIC1_MAC_CH0_SECTION 0x1000
+#define mmNIC1_MAC_CH1_BASE 0x7FFCD01000ull
+#define NIC1_MAC_CH1_MAX_OFFSET 0x8400
+#define NIC1_MAC_CH1_SECTION 0x1000
+#define mmNIC1_MAC_CH2_BASE 0x7FFCD02000ull
+#define NIC1_MAC_CH2_MAX_OFFSET 0x8400
+#define NIC1_MAC_CH2_SECTION 0x1000
+#define mmNIC1_MAC_CH3_BASE 0x7FFCD03000ull
+#define NIC1_MAC_CH3_MAX_OFFSET 0x8400
+#define NIC1_MAC_CH3_SECTION 0x1000
+#define mmNIC1_STAT_BASE 0x7FFCD04000ull
+#define NIC1_STAT_MAX_OFFSET 0x4D00
+#define NIC1_STAT_SECTION 0x1000
+#define mmNIC1_MAC_XPCS91_BASE 0x7FFCD05000ull
+#define NIC1_MAC_XPCS91_MAX_OFFSET 0x2380
+#define NIC1_MAC_XPCS91_SECTION 0x3000
+#define mmNIC1_MAC_CORE_BASE 0x7FFCD08000ull
+#define NIC1_MAC_CORE_MAX_OFFSET 0x5400
+#define NIC1_MAC_CORE_SECTION 0x1000
+#define mmNIC1_MAC_AUX_BASE 0x7FFCD09000ull
+#define NIC1_MAC_AUX_MAX_OFFSET 0x3000
+#define NIC1_MAC_AUX_SECTION 0xF000
+#define mmNIC1_PHY_BASE 0x7FFCD18000ull
+#define NIC1_PHY_MAX_OFFSET 0x3400
+#define NIC1_PHY_SECTION 0x8000
+#define mmNIC1_QM0_BASE 0x7FFCD20000ull
+#define NIC1_QM0_MAX_OFFSET 0xD040
+#define NIC1_QM0_SECTION 0x2000
+#define mmNIC1_QM1_BASE 0x7FFCD22000ull
+#define NIC1_QM1_MAX_OFFSET 0xD040
+#define NIC1_QM1_SECTION 0x2000
+#define mmNIC1_QPC0_BASE 0x7FFCD24000ull
+#define NIC1_QPC0_MAX_OFFSET 0x7140
+#define NIC1_QPC0_SECTION 0x1000
+#define mmNIC1_QPC1_BASE 0x7FFCD25000ull
+#define NIC1_QPC1_MAX_OFFSET 0x7140
+#define NIC1_QPC1_SECTION 0x3000
+#define mmNIC1_RXB_BASE 0x7FFCD28000ull
+#define NIC1_RXB_MAX_OFFSET 0x6040
+#define NIC1_RXB_SECTION 0x1000
+#define mmNIC1_RXE0_BASE 0x7FFCD29000ull
+#define NIC1_RXE0_MAX_OFFSET 0x2FC0
+#define NIC1_RXE0_SECTION 0x1000
+#define mmNIC1_RXE1_BASE 0x7FFCD2A000ull
+#define NIC1_RXE1_MAX_OFFSET 0x2FC0
+#define NIC1_RXE1_SECTION 0x1000
+#define mmNIC1_RX_GW_BASE 0x7FFCD2B000ull
+#define NIC1_RX_GW_MAX_OFFSET 0x4540
+#define NIC1_RX_GW_SECTION 0x5000
+#define mmNIC1_TXS0_BASE 0x7FFCD30000ull
+#define NIC1_TXS0_MAX_OFFSET 0x19C0
+#define NIC1_TXS0_SECTION 0x1000
+#define mmNIC1_TXS1_BASE 0x7FFCD31000ull
+#define NIC1_TXS1_MAX_OFFSET 0x19C0
+#define NIC1_TXS1_SECTION 0x1000
+#define mmNIC1_TXE0_BASE 0x7FFCD32000ull
+#define NIC1_TXE0_MAX_OFFSET 0x2040
+#define NIC1_TXE0_SECTION 0x1000
+#define mmNIC1_TXE1_BASE 0x7FFCD33000ull
+#define NIC1_TXE1_MAX_OFFSET 0x2040
+#define NIC1_TXE1_SECTION 0x1000
+#define mmNIC1_TXB_BASE 0x7FFCD34000ull
+#define NIC1_TXB_MAX_OFFSET 0xD400
+#define NIC1_TXB_SECTION 0x1000
+#define mmNIC1_TMR_BASE 0x7FFCD35000ull
+#define NIC1_TMR_MAX_OFFSET 0x1600
+#define NIC1_TMR_SECTION 0x1000
+#define mmNIC1_TX_GW_BASE 0x7FFCD36000ull
+#define NIC1_TX_GW_MAX_OFFSET 0x1400
+#define NIC1_TX_GW_SECTION 0x2000
+#define mmNIC1_TS_BASE 0x7FFCD38000ull
+#define NIC1_TS_MAX_OFFSET 0xE640
+#define NIC1_TS_SECTION 0x1000
+#define mmNIC1_PLL_BASE 0x7FFCD39000ull
+#define NIC1_PLL_MAX_OFFSET 0x5200
+#define NIC1_PLL_SECTION 0x1000
+#define mmNIC1_PM_BASE 0x7FFCD3A000ull
+#define NIC1_PM_MAX_OFFSET 0x1F00
+#define NIC1_PM_SECTION 0x6000
+#define mmNIC2_MAC_CH0_BASE 0x7FFCD40000ull
+#define NIC2_MAC_CH0_MAX_OFFSET 0x8400
+#define NIC2_MAC_CH0_SECTION 0x1000
+#define mmNIC2_MAC_CH1_BASE 0x7FFCD41000ull
+#define NIC2_MAC_CH1_MAX_OFFSET 0x8400
+#define NIC2_MAC_CH1_SECTION 0x1000
+#define mmNIC2_MAC_CH2_BASE 0x7FFCD42000ull
+#define NIC2_MAC_CH2_MAX_OFFSET 0x8400
+#define NIC2_MAC_CH2_SECTION 0x1000
+#define mmNIC2_MAC_CH3_BASE 0x7FFCD43000ull
+#define NIC2_MAC_CH3_MAX_OFFSET 0x8400
+#define NIC2_MAC_CH3_SECTION 0x1000
+#define mmNIC2_STAT_BASE 0x7FFCD44000ull
+#define NIC2_STAT_MAX_OFFSET 0x4D00
+#define NIC2_STAT_SECTION 0x1000
+#define mmNIC2_MAC_XPCS91_BASE 0x7FFCD45000ull
+#define NIC2_MAC_XPCS91_MAX_OFFSET 0x2380
+#define NIC2_MAC_XPCS91_SECTION 0x3000
+#define mmNIC2_MAC_CORE_BASE 0x7FFCD48000ull
+#define NIC2_MAC_CORE_MAX_OFFSET 0x5400
+#define NIC2_MAC_CORE_SECTION 0x1000
+#define mmNIC2_MAC_AUX_BASE 0x7FFCD49000ull
+#define NIC2_MAC_AUX_MAX_OFFSET 0x3000
+#define NIC2_MAC_AUX_SECTION 0xF000
+#define mmNIC2_PHY_BASE 0x7FFCD58000ull
+#define NIC2_PHY_MAX_OFFSET 0x3400
+#define NIC2_PHY_SECTION 0x8000
+#define mmNIC2_QM0_BASE 0x7FFCD60000ull
+#define NIC2_QM0_MAX_OFFSET 0xD040
+#define NIC2_QM0_SECTION 0x2000
+#define mmNIC2_QM1_BASE 0x7FFCD62000ull
+#define NIC2_QM1_MAX_OFFSET 0xD040
+#define NIC2_QM1_SECTION 0x2000
+#define mmNIC2_QPC0_BASE 0x7FFCD64000ull
+#define NIC2_QPC0_MAX_OFFSET 0x7140
+#define NIC2_QPC0_SECTION 0x1000
+#define mmNIC2_QPC1_BASE 0x7FFCD65000ull
+#define NIC2_QPC1_MAX_OFFSET 0x7140
+#define NIC2_QPC1_SECTION 0x3000
+#define mmNIC2_RXB_BASE 0x7FFCD68000ull
+#define NIC2_RXB_MAX_OFFSET 0x6040
+#define NIC2_RXB_SECTION 0x1000
+#define mmNIC2_RXE0_BASE 0x7FFCD69000ull
+#define NIC2_RXE0_MAX_OFFSET 0x2FC0
+#define NIC2_RXE0_SECTION 0x1000
+#define mmNIC2_RXE1_BASE 0x7FFCD6A000ull
+#define NIC2_RXE1_MAX_OFFSET 0x2FC0
+#define NIC2_RXE1_SECTION 0x1000
+#define mmNIC2_RX_GW_BASE 0x7FFCD6B000ull
+#define NIC2_RX_GW_MAX_OFFSET 0x4540
+#define NIC2_RX_GW_SECTION 0x5000
+#define mmNIC2_TXS0_BASE 0x7FFCD70000ull
+#define NIC2_TXS0_MAX_OFFSET 0x19C0
+#define NIC2_TXS0_SECTION 0x1000
+#define mmNIC2_TXS1_BASE 0x7FFCD71000ull
+#define NIC2_TXS1_MAX_OFFSET 0x19C0
+#define NIC2_TXS1_SECTION 0x1000
+#define mmNIC2_TXE0_BASE 0x7FFCD72000ull
+#define NIC2_TXE0_MAX_OFFSET 0x2040
+#define NIC2_TXE0_SECTION 0x1000
+#define mmNIC2_TXE1_BASE 0x7FFCD73000ull
+#define NIC2_TXE1_MAX_OFFSET 0x2040
+#define NIC2_TXE1_SECTION 0x1000
+#define mmNIC2_TXB_BASE 0x7FFCD74000ull
+#define NIC2_TXB_MAX_OFFSET 0xD400
+#define NIC2_TXB_SECTION 0x1000
+#define mmNIC2_TMR_BASE 0x7FFCD75000ull
+#define NIC2_TMR_MAX_OFFSET 0x1600
+#define NIC2_TMR_SECTION 0x1000
+#define mmNIC2_TX_GW_BASE 0x7FFCD76000ull
+#define NIC2_TX_GW_MAX_OFFSET 0x1400
+#define NIC2_TX_GW_SECTION 0x2000
+#define mmNIC2_HBM_PLL_BASE 0x7FFCD78000ull
+#define NIC2_HBM_PLL_MAX_OFFSET 0x5200
+#define NIC2_HBM_PLL_SECTION 0x1000
+#define mmNIC2_MME_PLL_BASE 0x7FFCD79000ull
+#define NIC2_MME_PLL_MAX_OFFSET 0x5200
+#define NIC2_MME_PLL_SECTION 0x1000
+#define mmNIC2_TPC_PLL_BASE 0x7FFCD7A000ull
+#define NIC2_TPC_PLL_MAX_OFFSET 0x5200
+#define NIC2_TPC_PLL_SECTION 0x6000
+#define mmNIC3_MAC_CH0_BASE 0x7FFCD80000ull
+#define NIC3_MAC_CH0_MAX_OFFSET 0x8400
+#define NIC3_MAC_CH0_SECTION 0x1000
+#define mmNIC3_MAC_CH1_BASE 0x7FFCD81000ull
+#define NIC3_MAC_CH1_MAX_OFFSET 0x8400
+#define NIC3_MAC_CH1_SECTION 0x1000
+#define mmNIC3_MAC_CH2_BASE 0x7FFCD82000ull
+#define NIC3_MAC_CH2_MAX_OFFSET 0x8400
+#define NIC3_MAC_CH2_SECTION 0x1000
+#define mmNIC3_MAC_CH3_BASE 0x7FFCD83000ull
+#define NIC3_MAC_CH3_MAX_OFFSET 0x8400
+#define NIC3_MAC_CH3_SECTION 0x1000
+#define mmNIC3_STAT_BASE 0x7FFCD84000ull
+#define NIC3_STAT_MAX_OFFSET 0x4D00
+#define NIC3_STAT_SECTION 0x1000
+#define mmNIC3_MAC_XPCS91_BASE 0x7FFCD85000ull
+#define NIC3_MAC_XPCS91_MAX_OFFSET 0x2380
+#define NIC3_MAC_XPCS91_SECTION 0x3000
+#define mmNIC3_MAC_CORE_BASE 0x7FFCD88000ull
+#define NIC3_MAC_CORE_MAX_OFFSET 0x5400
+#define NIC3_MAC_CORE_SECTION 0x1000
+#define mmNIC3_MAC_AUX_BASE 0x7FFCD89000ull
+#define NIC3_MAC_AUX_MAX_OFFSET 0x3000
+#define NIC3_MAC_AUX_SECTION 0xF000
+#define mmNIC3_PHY_BASE 0x7FFCD98000ull
+#define NIC3_PHY_MAX_OFFSET 0x3400
+#define NIC3_PHY_SECTION 0x8000
+#define mmNIC3_QM0_BASE 0x7FFCDA0000ull
+#define NIC3_QM0_MAX_OFFSET 0xD040
+#define NIC3_QM0_SECTION 0x2000
+#define mmNIC3_QM1_BASE 0x7FFCDA2000ull
+#define NIC3_QM1_MAX_OFFSET 0xD040
+#define NIC3_QM1_SECTION 0x2000
+#define mmNIC3_QPC0_BASE 0x7FFCDA4000ull
+#define NIC3_QPC0_MAX_OFFSET 0x7140
+#define NIC3_QPC0_SECTION 0x1000
+#define mmNIC3_QPC1_BASE 0x7FFCDA5000ull
+#define NIC3_QPC1_MAX_OFFSET 0x7140
+#define NIC3_QPC1_SECTION 0x3000
+#define mmNIC3_RXB_BASE 0x7FFCDA8000ull
+#define NIC3_RXB_MAX_OFFSET 0x6040
+#define NIC3_RXB_SECTION 0x1000
+#define mmNIC3_RXE0_BASE 0x7FFCDA9000ull
+#define NIC3_RXE0_MAX_OFFSET 0x2FC0
+#define NIC3_RXE0_SECTION 0x1000
+#define mmNIC3_RXE1_BASE 0x7FFCDAA000ull
+#define NIC3_RXE1_MAX_OFFSET 0x2FC0
+#define NIC3_RXE1_SECTION 0x1000
+#define mmNIC3_RX_GW_BASE 0x7FFCDAB000ull
+#define NIC3_RX_GW_MAX_OFFSET 0x4540
+#define NIC3_RX_GW_SECTION 0x5000
+#define mmNIC3_TXS0_BASE 0x7FFCDB0000ull
+#define NIC3_TXS0_MAX_OFFSET 0x19C0
+#define NIC3_TXS0_SECTION 0x1000
+#define mmNIC3_TXS1_BASE 0x7FFCDB1000ull
+#define NIC3_TXS1_MAX_OFFSET 0x19C0
+#define NIC3_TXS1_SECTION 0x1000
+#define mmNIC3_TXE0_BASE 0x7FFCDB2000ull
+#define NIC3_TXE0_MAX_OFFSET 0x2040
+#define NIC3_TXE0_SECTION 0x1000
+#define mmNIC3_TXE1_BASE 0x7FFCDB3000ull
+#define NIC3_TXE1_MAX_OFFSET 0x2040
+#define NIC3_TXE1_SECTION 0x1000
+#define mmNIC3_TXB_BASE 0x7FFCDB4000ull
+#define NIC3_TXB_MAX_OFFSET 0xD400
+#define NIC3_TXB_SECTION 0x1000
+#define mmNIC3_TMR_BASE 0x7FFCDB5000ull
+#define NIC3_TMR_MAX_OFFSET 0x1600
+#define NIC3_TMR_SECTION 0x1000
+#define mmNIC3_TX_GW_BASE 0x7FFCDB6000ull
+#define NIC3_TX_GW_MAX_OFFSET 0x1400
+#define NIC3_TX_GW_SECTION 0x2000
+#define mmNIC3_TS_BASE 0x7FFCDB8000ull
+#define NIC3_TS_MAX_OFFSET 0xE640
+#define NIC3_TS_SECTION 0x2000
+#define mmNIC3_PM_BASE 0x7FFCDBA000ull
+#define NIC3_PM_MAX_OFFSET 0x1F00
+#define NIC3_PM_SECTION 0x6000
+#define mmNIC4_MAC_CH0_BASE 0x7FFCDC0000ull
+#define NIC4_MAC_CH0_MAX_OFFSET 0x8400
+#define NIC4_MAC_CH0_SECTION 0x1000
+#define mmNIC4_MAC_CH1_BASE 0x7FFCDC1000ull
+#define NIC4_MAC_CH1_MAX_OFFSET 0x8400
+#define NIC4_MAC_CH1_SECTION 0x1000
+#define mmNIC4_MAC_CH2_BASE 0x7FFCDC2000ull
+#define NIC4_MAC_CH2_MAX_OFFSET 0x8400
+#define NIC4_MAC_CH2_SECTION 0x1000
+#define mmNIC4_MAC_CH3_BASE 0x7FFCDC3000ull
+#define NIC4_MAC_CH3_MAX_OFFSET 0x8400
+#define NIC4_MAC_CH3_SECTION 0x1000
+#define mmNIC4_STAT_BASE 0x7FFCDC4000ull
+#define NIC4_STAT_MAX_OFFSET 0x4D00
+#define NIC4_STAT_SECTION 0x1000
+#define mmNIC4_MAC_XPCS91_BASE 0x7FFCDC5000ull
+#define NIC4_MAC_XPCS91_MAX_OFFSET 0x2380
+#define NIC4_MAC_XPCS91_SECTION 0x3000
+#define mmNIC4_MAC_CORE_BASE 0x7FFCDC8000ull
+#define NIC4_MAC_CORE_MAX_OFFSET 0x5400
+#define NIC4_MAC_CORE_SECTION 0x1000
+#define mmNIC4_MAC_AUX_BASE 0x7FFCDC9000ull
+#define NIC4_MAC_AUX_MAX_OFFSET 0x3000
+#define NIC4_MAC_AUX_SECTION 0xF000
+#define mmNIC4_PHY_BASE 0x7FFCDD8000ull
+#define NIC4_PHY_MAX_OFFSET 0x3400
+#define NIC4_PHY_SECTION 0x8000
+#define mmNIC4_QM0_BASE 0x7FFCDE0000ull
+#define NIC4_QM0_MAX_OFFSET 0xD040
+#define NIC4_QM0_SECTION 0x2000
+#define mmNIC4_QM1_BASE 0x7FFCDE2000ull
+#define NIC4_QM1_MAX_OFFSET 0xD040
+#define NIC4_QM1_SECTION 0x2000
+#define mmNIC4_QPC0_BASE 0x7FFCDE4000ull
+#define NIC4_QPC0_MAX_OFFSET 0x7140
+#define NIC4_QPC0_SECTION 0x1000
+#define mmNIC4_QPC1_BASE 0x7FFCDE5000ull
+#define NIC4_QPC1_MAX_OFFSET 0x7140
+#define NIC4_QPC1_SECTION 0x3000
+#define mmNIC4_RXB_BASE 0x7FFCDE8000ull
+#define NIC4_RXB_MAX_OFFSET 0x6040
+#define NIC4_RXB_SECTION 0x1000
+#define mmNIC4_RXE0_BASE 0x7FFCDE9000ull
+#define NIC4_RXE0_MAX_OFFSET 0x2FC0
+#define NIC4_RXE0_SECTION 0x1000
+#define mmNIC4_RXE1_BASE 0x7FFCDEA000ull
+#define NIC4_RXE1_MAX_OFFSET 0x2FC0
+#define NIC4_RXE1_SECTION 0x1000
+#define mmNIC4_RX_GW_BASE 0x7FFCDEB000ull
+#define NIC4_RX_GW_MAX_OFFSET 0x4540
+#define NIC4_RX_GW_SECTION 0x5000
+#define mmNIC4_TXS0_BASE 0x7FFCDF0000ull
+#define NIC4_TXS0_MAX_OFFSET 0x19C0
+#define NIC4_TXS0_SECTION 0x1000
+#define mmNIC4_TXS1_BASE 0x7FFCDF1000ull
+#define NIC4_TXS1_MAX_OFFSET 0x19C0
+#define NIC4_TXS1_SECTION 0x1000
+#define mmNIC4_TXE0_BASE 0x7FFCDF2000ull
+#define NIC4_TXE0_MAX_OFFSET 0x2040
+#define NIC4_TXE0_SECTION 0x1000
+#define mmNIC4_TXE1_BASE 0x7FFCDF3000ull
+#define NIC4_TXE1_MAX_OFFSET 0x2040
+#define NIC4_TXE1_SECTION 0x1000
+#define mmNIC4_TXB_BASE 0x7FFCDF4000ull
+#define NIC4_TXB_MAX_OFFSET 0xD400
+#define NIC4_TXB_SECTION 0x1000
+#define mmNIC4_TMR_BASE 0x7FFCDF5000ull
+#define NIC4_TMR_MAX_OFFSET 0x1600
+#define NIC4_TMR_SECTION 0x1000
+#define mmNIC4_TX_GW_BASE 0x7FFCDF6000ull
+#define NIC4_TX_GW_MAX_OFFSET 0x1400
+#define NIC4_TX_GW_SECTION 0x10000
+#define mmTPC0_CFG_BASE 0x7FFCE06000ull
+#define TPC0_CFG_MAX_OFFSET 0xE400
+#define TPC0_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC0_CFG_BASE 0x7FFCE06400ull
+#define KERNEL_TENSOR_0_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC0_CFG_BASE 0x7FFCE06438ull
+#define KERNEL_TENSOR_1_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC0_CFG_BASE 0x7FFCE06470ull
+#define KERNEL_TENSOR_2_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC0_CFG_BASE 0x7FFCE064A8ull
+#define KERNEL_TENSOR_3_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC0_CFG_BASE 0x7FFCE064E0ull
+#define KERNEL_TENSOR_4_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC0_CFG_BASE 0x7FFCE06518ull
+#define KERNEL_TENSOR_5_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC0_CFG_BASE 0x7FFCE06550ull
+#define KERNEL_TENSOR_6_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC0_CFG_BASE 0x7FFCE06588ull
+#define KERNEL_TENSOR_7_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC0_CFG_BASE 0x7FFCE065C0ull
+#define KERNEL_TENSOR_8_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC0_CFG_BASE 0x7FFCE065F8ull
+#define KERNEL_TENSOR_9_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC0_CFG_BASE 0x7FFCE06630ull
+#define KERNEL_TENSOR_10_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC0_CFG_BASE 0x7FFCE06668ull
+#define KERNEL_TENSOR_11_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC0_CFG_BASE 0x7FFCE066A0ull
+#define KERNEL_TENSOR_12_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC0_CFG_BASE 0x7FFCE066D8ull
+#define KERNEL_TENSOR_13_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC0_CFG_BASE 0x7FFCE06710ull
+#define KERNEL_TENSOR_14_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC0_CFG_BASE 0x7FFCE06748ull
+#define KERNEL_TENSOR_15_TPC0_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC0_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC0_CFG_BASE 0x7FFCE06780ull
+#define KERNEL_SYNC_OBJECT_TPC0_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC0_CFG_SECTION 0x8000
+#define mmKERNEL_TPC0_CFG_BASE 0x7FFCE06788ull
+#define KERNEL_TPC0_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC0_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC0_CFG_BASE 0x7FFCE06A00ull
+#define QM_TENSOR_0_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC0_CFG_BASE 0x7FFCE06A38ull
+#define QM_TENSOR_1_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC0_CFG_BASE 0x7FFCE06A70ull
+#define QM_TENSOR_2_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC0_CFG_BASE 0x7FFCE06AA8ull
+#define QM_TENSOR_3_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC0_CFG_BASE 0x7FFCE06AE0ull
+#define QM_TENSOR_4_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC0_CFG_BASE 0x7FFCE06B18ull
+#define QM_TENSOR_5_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC0_CFG_BASE 0x7FFCE06B50ull
+#define QM_TENSOR_6_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC0_CFG_BASE 0x7FFCE06B88ull
+#define QM_TENSOR_7_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC0_CFG_BASE 0x7FFCE06BC0ull
+#define QM_TENSOR_8_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC0_CFG_BASE 0x7FFCE06BF8ull
+#define QM_TENSOR_9_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC0_CFG_BASE 0x7FFCE06C30ull
+#define QM_TENSOR_10_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC0_CFG_BASE 0x7FFCE06C68ull
+#define QM_TENSOR_11_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC0_CFG_BASE 0x7FFCE06CA0ull
+#define QM_TENSOR_12_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC0_CFG_BASE 0x7FFCE06CD8ull
+#define QM_TENSOR_13_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC0_CFG_BASE 0x7FFCE06D10ull
+#define QM_TENSOR_14_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC0_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC0_CFG_BASE 0x7FFCE06D48ull
+#define QM_TENSOR_15_TPC0_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC0_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC0_CFG_BASE 0x7FFCE06D80ull
+#define QM_SYNC_OBJECT_TPC0_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC0_CFG_SECTION 0x8000
+#define mmQM_TPC0_CFG_BASE 0x7FFCE06D88ull
+#define QM_TPC0_CFG_MAX_OFFSET 0xB800
+#define QM_TPC0_CFG_SECTION 0x2780
+#define mmTPC0_E2E_CRED_BASE 0x7FFCE07000ull
+#define TPC0_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC0_E2E_CRED_SECTION 0x1000
+#define mmTPC0_QM_BASE 0x7FFCE08000ull
+#define TPC0_QM_MAX_OFFSET 0xD040
+#define TPC0_QM_SECTION 0x3E000
+#define mmTPC1_CFG_BASE 0x7FFCE46000ull
+#define TPC1_CFG_MAX_OFFSET 0xE400
+#define TPC1_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC1_CFG_BASE 0x7FFCE46400ull
+#define KERNEL_TENSOR_0_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC1_CFG_BASE 0x7FFCE46438ull
+#define KERNEL_TENSOR_1_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC1_CFG_BASE 0x7FFCE46470ull
+#define KERNEL_TENSOR_2_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC1_CFG_BASE 0x7FFCE464A8ull
+#define KERNEL_TENSOR_3_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC1_CFG_BASE 0x7FFCE464E0ull
+#define KERNEL_TENSOR_4_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC1_CFG_BASE 0x7FFCE46518ull
+#define KERNEL_TENSOR_5_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC1_CFG_BASE 0x7FFCE46550ull
+#define KERNEL_TENSOR_6_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC1_CFG_BASE 0x7FFCE46588ull
+#define KERNEL_TENSOR_7_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC1_CFG_BASE 0x7FFCE465C0ull
+#define KERNEL_TENSOR_8_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC1_CFG_BASE 0x7FFCE465F8ull
+#define KERNEL_TENSOR_9_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC1_CFG_BASE 0x7FFCE46630ull
+#define KERNEL_TENSOR_10_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC1_CFG_BASE 0x7FFCE46668ull
+#define KERNEL_TENSOR_11_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC1_CFG_BASE 0x7FFCE466A0ull
+#define KERNEL_TENSOR_12_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC1_CFG_BASE 0x7FFCE466D8ull
+#define KERNEL_TENSOR_13_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC1_CFG_BASE 0x7FFCE46710ull
+#define KERNEL_TENSOR_14_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC1_CFG_BASE 0x7FFCE46748ull
+#define KERNEL_TENSOR_15_TPC1_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC1_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC1_CFG_BASE 0x7FFCE46780ull
+#define KERNEL_SYNC_OBJECT_TPC1_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC1_CFG_SECTION 0x8000
+#define mmKERNEL_TPC1_CFG_BASE 0x7FFCE46788ull
+#define KERNEL_TPC1_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC1_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC1_CFG_BASE 0x7FFCE46A00ull
+#define QM_TENSOR_0_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC1_CFG_BASE 0x7FFCE46A38ull
+#define QM_TENSOR_1_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC1_CFG_BASE 0x7FFCE46A70ull
+#define QM_TENSOR_2_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC1_CFG_BASE 0x7FFCE46AA8ull
+#define QM_TENSOR_3_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC1_CFG_BASE 0x7FFCE46AE0ull
+#define QM_TENSOR_4_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC1_CFG_BASE 0x7FFCE46B18ull
+#define QM_TENSOR_5_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC1_CFG_BASE 0x7FFCE46B50ull
+#define QM_TENSOR_6_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC1_CFG_BASE 0x7FFCE46B88ull
+#define QM_TENSOR_7_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC1_CFG_BASE 0x7FFCE46BC0ull
+#define QM_TENSOR_8_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC1_CFG_BASE 0x7FFCE46BF8ull
+#define QM_TENSOR_9_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC1_CFG_BASE 0x7FFCE46C30ull
+#define QM_TENSOR_10_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC1_CFG_BASE 0x7FFCE46C68ull
+#define QM_TENSOR_11_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC1_CFG_BASE 0x7FFCE46CA0ull
+#define QM_TENSOR_12_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC1_CFG_BASE 0x7FFCE46CD8ull
+#define QM_TENSOR_13_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC1_CFG_BASE 0x7FFCE46D10ull
+#define QM_TENSOR_14_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC1_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC1_CFG_BASE 0x7FFCE46D48ull
+#define QM_TENSOR_15_TPC1_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC1_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC1_CFG_BASE 0x7FFCE46D80ull
+#define QM_SYNC_OBJECT_TPC1_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC1_CFG_SECTION 0x8000
+#define mmQM_TPC1_CFG_BASE 0x7FFCE46D88ull
+#define QM_TPC1_CFG_MAX_OFFSET 0xB800
+#define QM_TPC1_CFG_SECTION 0x2780
+#define mmTPC1_E2E_CRED_BASE 0x7FFCE47000ull
+#define TPC1_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC1_E2E_CRED_SECTION 0x1000
+#define mmTPC1_QM_BASE 0x7FFCE48000ull
+#define TPC1_QM_MAX_OFFSET 0xD040
+#define TPC1_QM_SECTION 0x3E000
+#define mmTPC2_CFG_BASE 0x7FFCE86000ull
+#define TPC2_CFG_MAX_OFFSET 0xE400
+#define TPC2_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC2_CFG_BASE 0x7FFCE86400ull
+#define KERNEL_TENSOR_0_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC2_CFG_BASE 0x7FFCE86438ull
+#define KERNEL_TENSOR_1_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC2_CFG_BASE 0x7FFCE86470ull
+#define KERNEL_TENSOR_2_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC2_CFG_BASE 0x7FFCE864A8ull
+#define KERNEL_TENSOR_3_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC2_CFG_BASE 0x7FFCE864E0ull
+#define KERNEL_TENSOR_4_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC2_CFG_BASE 0x7FFCE86518ull
+#define KERNEL_TENSOR_5_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC2_CFG_BASE 0x7FFCE86550ull
+#define KERNEL_TENSOR_6_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC2_CFG_BASE 0x7FFCE86588ull
+#define KERNEL_TENSOR_7_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC2_CFG_BASE 0x7FFCE865C0ull
+#define KERNEL_TENSOR_8_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC2_CFG_BASE 0x7FFCE865F8ull
+#define KERNEL_TENSOR_9_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC2_CFG_BASE 0x7FFCE86630ull
+#define KERNEL_TENSOR_10_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC2_CFG_BASE 0x7FFCE86668ull
+#define KERNEL_TENSOR_11_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC2_CFG_BASE 0x7FFCE866A0ull
+#define KERNEL_TENSOR_12_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC2_CFG_BASE 0x7FFCE866D8ull
+#define KERNEL_TENSOR_13_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC2_CFG_BASE 0x7FFCE86710ull
+#define KERNEL_TENSOR_14_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC2_CFG_BASE 0x7FFCE86748ull
+#define KERNEL_TENSOR_15_TPC2_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC2_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC2_CFG_BASE 0x7FFCE86780ull
+#define KERNEL_SYNC_OBJECT_TPC2_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC2_CFG_SECTION 0x8000
+#define mmKERNEL_TPC2_CFG_BASE 0x7FFCE86788ull
+#define KERNEL_TPC2_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC2_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC2_CFG_BASE 0x7FFCE86A00ull
+#define QM_TENSOR_0_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC2_CFG_BASE 0x7FFCE86A38ull
+#define QM_TENSOR_1_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC2_CFG_BASE 0x7FFCE86A70ull
+#define QM_TENSOR_2_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC2_CFG_BASE 0x7FFCE86AA8ull
+#define QM_TENSOR_3_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC2_CFG_BASE 0x7FFCE86AE0ull
+#define QM_TENSOR_4_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC2_CFG_BASE 0x7FFCE86B18ull
+#define QM_TENSOR_5_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC2_CFG_BASE 0x7FFCE86B50ull
+#define QM_TENSOR_6_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC2_CFG_BASE 0x7FFCE86B88ull
+#define QM_TENSOR_7_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC2_CFG_BASE 0x7FFCE86BC0ull
+#define QM_TENSOR_8_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC2_CFG_BASE 0x7FFCE86BF8ull
+#define QM_TENSOR_9_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC2_CFG_BASE 0x7FFCE86C30ull
+#define QM_TENSOR_10_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC2_CFG_BASE 0x7FFCE86C68ull
+#define QM_TENSOR_11_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC2_CFG_BASE 0x7FFCE86CA0ull
+#define QM_TENSOR_12_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC2_CFG_BASE 0x7FFCE86CD8ull
+#define QM_TENSOR_13_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC2_CFG_BASE 0x7FFCE86D10ull
+#define QM_TENSOR_14_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC2_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC2_CFG_BASE 0x7FFCE86D48ull
+#define QM_TENSOR_15_TPC2_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC2_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC2_CFG_BASE 0x7FFCE86D80ull
+#define QM_SYNC_OBJECT_TPC2_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC2_CFG_SECTION 0x8000
+#define mmQM_TPC2_CFG_BASE 0x7FFCE86D88ull
+#define QM_TPC2_CFG_MAX_OFFSET 0xB800
+#define QM_TPC2_CFG_SECTION 0x2780
+#define mmTPC2_E2E_CRED_BASE 0x7FFCE87000ull
+#define TPC2_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC2_E2E_CRED_SECTION 0x1000
+#define mmTPC2_QM_BASE 0x7FFCE88000ull
+#define TPC2_QM_MAX_OFFSET 0xD040
+#define TPC2_QM_SECTION 0x3E000
+#define mmTPC3_CFG_BASE 0x7FFCEC6000ull
+#define TPC3_CFG_MAX_OFFSET 0xE400
+#define TPC3_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC3_CFG_BASE 0x7FFCEC6400ull
+#define KERNEL_TENSOR_0_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC3_CFG_BASE 0x7FFCEC6438ull
+#define KERNEL_TENSOR_1_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC3_CFG_BASE 0x7FFCEC6470ull
+#define KERNEL_TENSOR_2_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC3_CFG_BASE 0x7FFCEC64A8ull
+#define KERNEL_TENSOR_3_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC3_CFG_BASE 0x7FFCEC64E0ull
+#define KERNEL_TENSOR_4_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC3_CFG_BASE 0x7FFCEC6518ull
+#define KERNEL_TENSOR_5_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC3_CFG_BASE 0x7FFCEC6550ull
+#define KERNEL_TENSOR_6_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC3_CFG_BASE 0x7FFCEC6588ull
+#define KERNEL_TENSOR_7_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC3_CFG_BASE 0x7FFCEC65C0ull
+#define KERNEL_TENSOR_8_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC3_CFG_BASE 0x7FFCEC65F8ull
+#define KERNEL_TENSOR_9_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC3_CFG_BASE 0x7FFCEC6630ull
+#define KERNEL_TENSOR_10_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC3_CFG_BASE 0x7FFCEC6668ull
+#define KERNEL_TENSOR_11_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC3_CFG_BASE 0x7FFCEC66A0ull
+#define KERNEL_TENSOR_12_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC3_CFG_BASE 0x7FFCEC66D8ull
+#define KERNEL_TENSOR_13_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC3_CFG_BASE 0x7FFCEC6710ull
+#define KERNEL_TENSOR_14_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC3_CFG_BASE 0x7FFCEC6748ull
+#define KERNEL_TENSOR_15_TPC3_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC3_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC3_CFG_BASE 0x7FFCEC6780ull
+#define KERNEL_SYNC_OBJECT_TPC3_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC3_CFG_SECTION 0x8000
+#define mmKERNEL_TPC3_CFG_BASE 0x7FFCEC6788ull
+#define KERNEL_TPC3_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC3_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC3_CFG_BASE 0x7FFCEC6A00ull
+#define QM_TENSOR_0_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC3_CFG_BASE 0x7FFCEC6A38ull
+#define QM_TENSOR_1_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC3_CFG_BASE 0x7FFCEC6A70ull
+#define QM_TENSOR_2_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC3_CFG_BASE 0x7FFCEC6AA8ull
+#define QM_TENSOR_3_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC3_CFG_BASE 0x7FFCEC6AE0ull
+#define QM_TENSOR_4_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC3_CFG_BASE 0x7FFCEC6B18ull
+#define QM_TENSOR_5_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC3_CFG_BASE 0x7FFCEC6B50ull
+#define QM_TENSOR_6_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC3_CFG_BASE 0x7FFCEC6B88ull
+#define QM_TENSOR_7_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC3_CFG_BASE 0x7FFCEC6BC0ull
+#define QM_TENSOR_8_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC3_CFG_BASE 0x7FFCEC6BF8ull
+#define QM_TENSOR_9_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC3_CFG_BASE 0x7FFCEC6C30ull
+#define QM_TENSOR_10_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC3_CFG_BASE 0x7FFCEC6C68ull
+#define QM_TENSOR_11_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC3_CFG_BASE 0x7FFCEC6CA0ull
+#define QM_TENSOR_12_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC3_CFG_BASE 0x7FFCEC6CD8ull
+#define QM_TENSOR_13_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC3_CFG_BASE 0x7FFCEC6D10ull
+#define QM_TENSOR_14_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC3_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC3_CFG_BASE 0x7FFCEC6D48ull
+#define QM_TENSOR_15_TPC3_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC3_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC3_CFG_BASE 0x7FFCEC6D80ull
+#define QM_SYNC_OBJECT_TPC3_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC3_CFG_SECTION 0x8000
+#define mmQM_TPC3_CFG_BASE 0x7FFCEC6D88ull
+#define QM_TPC3_CFG_MAX_OFFSET 0xB800
+#define QM_TPC3_CFG_SECTION 0x2780
+#define mmTPC3_E2E_CRED_BASE 0x7FFCEC7000ull
+#define TPC3_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC3_E2E_CRED_SECTION 0x1000
+#define mmTPC3_QM_BASE 0x7FFCEC8000ull
+#define TPC3_QM_MAX_OFFSET 0xD040
+#define TPC3_QM_SECTION 0x3E000
+#define mmTPC4_CFG_BASE 0x7FFCF06000ull
+#define TPC4_CFG_MAX_OFFSET 0xE400
+#define TPC4_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC4_CFG_BASE 0x7FFCF06400ull
+#define KERNEL_TENSOR_0_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC4_CFG_BASE 0x7FFCF06438ull
+#define KERNEL_TENSOR_1_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC4_CFG_BASE 0x7FFCF06470ull
+#define KERNEL_TENSOR_2_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC4_CFG_BASE 0x7FFCF064A8ull
+#define KERNEL_TENSOR_3_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC4_CFG_BASE 0x7FFCF064E0ull
+#define KERNEL_TENSOR_4_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC4_CFG_BASE 0x7FFCF06518ull
+#define KERNEL_TENSOR_5_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC4_CFG_BASE 0x7FFCF06550ull
+#define KERNEL_TENSOR_6_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC4_CFG_BASE 0x7FFCF06588ull
+#define KERNEL_TENSOR_7_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC4_CFG_BASE 0x7FFCF065C0ull
+#define KERNEL_TENSOR_8_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC4_CFG_BASE 0x7FFCF065F8ull
+#define KERNEL_TENSOR_9_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC4_CFG_BASE 0x7FFCF06630ull
+#define KERNEL_TENSOR_10_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC4_CFG_BASE 0x7FFCF06668ull
+#define KERNEL_TENSOR_11_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC4_CFG_BASE 0x7FFCF066A0ull
+#define KERNEL_TENSOR_12_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC4_CFG_BASE 0x7FFCF066D8ull
+#define KERNEL_TENSOR_13_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC4_CFG_BASE 0x7FFCF06710ull
+#define KERNEL_TENSOR_14_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC4_CFG_BASE 0x7FFCF06748ull
+#define KERNEL_TENSOR_15_TPC4_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC4_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC4_CFG_BASE 0x7FFCF06780ull
+#define KERNEL_SYNC_OBJECT_TPC4_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC4_CFG_SECTION 0x8000
+#define mmKERNEL_TPC4_CFG_BASE 0x7FFCF06788ull
+#define KERNEL_TPC4_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC4_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC4_CFG_BASE 0x7FFCF06A00ull
+#define QM_TENSOR_0_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC4_CFG_BASE 0x7FFCF06A38ull
+#define QM_TENSOR_1_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC4_CFG_BASE 0x7FFCF06A70ull
+#define QM_TENSOR_2_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC4_CFG_BASE 0x7FFCF06AA8ull
+#define QM_TENSOR_3_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC4_CFG_BASE 0x7FFCF06AE0ull
+#define QM_TENSOR_4_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC4_CFG_BASE 0x7FFCF06B18ull
+#define QM_TENSOR_5_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC4_CFG_BASE 0x7FFCF06B50ull
+#define QM_TENSOR_6_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC4_CFG_BASE 0x7FFCF06B88ull
+#define QM_TENSOR_7_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC4_CFG_BASE 0x7FFCF06BC0ull
+#define QM_TENSOR_8_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC4_CFG_BASE 0x7FFCF06BF8ull
+#define QM_TENSOR_9_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC4_CFG_BASE 0x7FFCF06C30ull
+#define QM_TENSOR_10_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC4_CFG_BASE 0x7FFCF06C68ull
+#define QM_TENSOR_11_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC4_CFG_BASE 0x7FFCF06CA0ull
+#define QM_TENSOR_12_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC4_CFG_BASE 0x7FFCF06CD8ull
+#define QM_TENSOR_13_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC4_CFG_BASE 0x7FFCF06D10ull
+#define QM_TENSOR_14_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC4_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC4_CFG_BASE 0x7FFCF06D48ull
+#define QM_TENSOR_15_TPC4_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC4_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC4_CFG_BASE 0x7FFCF06D80ull
+#define QM_SYNC_OBJECT_TPC4_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC4_CFG_SECTION 0x8000
+#define mmQM_TPC4_CFG_BASE 0x7FFCF06D88ull
+#define QM_TPC4_CFG_MAX_OFFSET 0xB800
+#define QM_TPC4_CFG_SECTION 0x2780
+#define mmTPC4_E2E_CRED_BASE 0x7FFCF07000ull
+#define TPC4_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC4_E2E_CRED_SECTION 0x1000
+#define mmTPC4_QM_BASE 0x7FFCF08000ull
+#define TPC4_QM_MAX_OFFSET 0xD040
+#define TPC4_QM_SECTION 0x3E000
+#define mmTPC5_CFG_BASE 0x7FFCF46000ull
+#define TPC5_CFG_MAX_OFFSET 0xE400
+#define TPC5_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC5_CFG_BASE 0x7FFCF46400ull
+#define KERNEL_TENSOR_0_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC5_CFG_BASE 0x7FFCF46438ull
+#define KERNEL_TENSOR_1_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC5_CFG_BASE 0x7FFCF46470ull
+#define KERNEL_TENSOR_2_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC5_CFG_BASE 0x7FFCF464A8ull
+#define KERNEL_TENSOR_3_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC5_CFG_BASE 0x7FFCF464E0ull
+#define KERNEL_TENSOR_4_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC5_CFG_BASE 0x7FFCF46518ull
+#define KERNEL_TENSOR_5_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC5_CFG_BASE 0x7FFCF46550ull
+#define KERNEL_TENSOR_6_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC5_CFG_BASE 0x7FFCF46588ull
+#define KERNEL_TENSOR_7_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC5_CFG_BASE 0x7FFCF465C0ull
+#define KERNEL_TENSOR_8_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC5_CFG_BASE 0x7FFCF465F8ull
+#define KERNEL_TENSOR_9_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC5_CFG_BASE 0x7FFCF46630ull
+#define KERNEL_TENSOR_10_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC5_CFG_BASE 0x7FFCF46668ull
+#define KERNEL_TENSOR_11_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC5_CFG_BASE 0x7FFCF466A0ull
+#define KERNEL_TENSOR_12_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC5_CFG_BASE 0x7FFCF466D8ull
+#define KERNEL_TENSOR_13_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC5_CFG_BASE 0x7FFCF46710ull
+#define KERNEL_TENSOR_14_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC5_CFG_BASE 0x7FFCF46748ull
+#define KERNEL_TENSOR_15_TPC5_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC5_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC5_CFG_BASE 0x7FFCF46780ull
+#define KERNEL_SYNC_OBJECT_TPC5_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC5_CFG_SECTION 0x8000
+#define mmKERNEL_TPC5_CFG_BASE 0x7FFCF46788ull
+#define KERNEL_TPC5_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC5_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC5_CFG_BASE 0x7FFCF46A00ull
+#define QM_TENSOR_0_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC5_CFG_BASE 0x7FFCF46A38ull
+#define QM_TENSOR_1_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC5_CFG_BASE 0x7FFCF46A70ull
+#define QM_TENSOR_2_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC5_CFG_BASE 0x7FFCF46AA8ull
+#define QM_TENSOR_3_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC5_CFG_BASE 0x7FFCF46AE0ull
+#define QM_TENSOR_4_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC5_CFG_BASE 0x7FFCF46B18ull
+#define QM_TENSOR_5_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC5_CFG_BASE 0x7FFCF46B50ull
+#define QM_TENSOR_6_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC5_CFG_BASE 0x7FFCF46B88ull
+#define QM_TENSOR_7_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC5_CFG_BASE 0x7FFCF46BC0ull
+#define QM_TENSOR_8_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC5_CFG_BASE 0x7FFCF46BF8ull
+#define QM_TENSOR_9_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC5_CFG_BASE 0x7FFCF46C30ull
+#define QM_TENSOR_10_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC5_CFG_BASE 0x7FFCF46C68ull
+#define QM_TENSOR_11_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC5_CFG_BASE 0x7FFCF46CA0ull
+#define QM_TENSOR_12_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC5_CFG_BASE 0x7FFCF46CD8ull
+#define QM_TENSOR_13_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC5_CFG_BASE 0x7FFCF46D10ull
+#define QM_TENSOR_14_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC5_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC5_CFG_BASE 0x7FFCF46D48ull
+#define QM_TENSOR_15_TPC5_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC5_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC5_CFG_BASE 0x7FFCF46D80ull
+#define QM_SYNC_OBJECT_TPC5_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC5_CFG_SECTION 0x8000
+#define mmQM_TPC5_CFG_BASE 0x7FFCF46D88ull
+#define QM_TPC5_CFG_MAX_OFFSET 0xB800
+#define QM_TPC5_CFG_SECTION 0x2780
+#define mmTPC5_E2E_CRED_BASE 0x7FFCF47000ull
+#define TPC5_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC5_E2E_CRED_SECTION 0x1000
+#define mmTPC5_QM_BASE 0x7FFCF48000ull
+#define TPC5_QM_MAX_OFFSET 0xD040
+#define TPC5_QM_SECTION 0x3E000
+#define mmTPC6_CFG_BASE 0x7FFCF86000ull
+#define TPC6_CFG_MAX_OFFSET 0xE400
+#define TPC6_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC6_CFG_BASE 0x7FFCF86400ull
+#define KERNEL_TENSOR_0_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC6_CFG_BASE 0x7FFCF86438ull
+#define KERNEL_TENSOR_1_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC6_CFG_BASE 0x7FFCF86470ull
+#define KERNEL_TENSOR_2_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC6_CFG_BASE 0x7FFCF864A8ull
+#define KERNEL_TENSOR_3_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC6_CFG_BASE 0x7FFCF864E0ull
+#define KERNEL_TENSOR_4_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC6_CFG_BASE 0x7FFCF86518ull
+#define KERNEL_TENSOR_5_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC6_CFG_BASE 0x7FFCF86550ull
+#define KERNEL_TENSOR_6_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC6_CFG_BASE 0x7FFCF86588ull
+#define KERNEL_TENSOR_7_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC6_CFG_BASE 0x7FFCF865C0ull
+#define KERNEL_TENSOR_8_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC6_CFG_BASE 0x7FFCF865F8ull
+#define KERNEL_TENSOR_9_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC6_CFG_BASE 0x7FFCF86630ull
+#define KERNEL_TENSOR_10_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC6_CFG_BASE 0x7FFCF86668ull
+#define KERNEL_TENSOR_11_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC6_CFG_BASE 0x7FFCF866A0ull
+#define KERNEL_TENSOR_12_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC6_CFG_BASE 0x7FFCF866D8ull
+#define KERNEL_TENSOR_13_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC6_CFG_BASE 0x7FFCF86710ull
+#define KERNEL_TENSOR_14_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC6_CFG_BASE 0x7FFCF86748ull
+#define KERNEL_TENSOR_15_TPC6_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC6_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC6_CFG_BASE 0x7FFCF86780ull
+#define KERNEL_SYNC_OBJECT_TPC6_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC6_CFG_SECTION 0x8000
+#define mmKERNEL_TPC6_CFG_BASE 0x7FFCF86788ull
+#define KERNEL_TPC6_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC6_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC6_CFG_BASE 0x7FFCF86A00ull
+#define QM_TENSOR_0_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC6_CFG_BASE 0x7FFCF86A38ull
+#define QM_TENSOR_1_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC6_CFG_BASE 0x7FFCF86A70ull
+#define QM_TENSOR_2_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC6_CFG_BASE 0x7FFCF86AA8ull
+#define QM_TENSOR_3_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC6_CFG_BASE 0x7FFCF86AE0ull
+#define QM_TENSOR_4_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC6_CFG_BASE 0x7FFCF86B18ull
+#define QM_TENSOR_5_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC6_CFG_BASE 0x7FFCF86B50ull
+#define QM_TENSOR_6_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC6_CFG_BASE 0x7FFCF86B88ull
+#define QM_TENSOR_7_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC6_CFG_BASE 0x7FFCF86BC0ull
+#define QM_TENSOR_8_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC6_CFG_BASE 0x7FFCF86BF8ull
+#define QM_TENSOR_9_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC6_CFG_BASE 0x7FFCF86C30ull
+#define QM_TENSOR_10_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC6_CFG_BASE 0x7FFCF86C68ull
+#define QM_TENSOR_11_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC6_CFG_BASE 0x7FFCF86CA0ull
+#define QM_TENSOR_12_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC6_CFG_BASE 0x7FFCF86CD8ull
+#define QM_TENSOR_13_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC6_CFG_BASE 0x7FFCF86D10ull
+#define QM_TENSOR_14_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC6_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC6_CFG_BASE 0x7FFCF86D48ull
+#define QM_TENSOR_15_TPC6_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC6_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC6_CFG_BASE 0x7FFCF86D80ull
+#define QM_SYNC_OBJECT_TPC6_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC6_CFG_SECTION 0x8000
+#define mmQM_TPC6_CFG_BASE 0x7FFCF86D88ull
+#define QM_TPC6_CFG_MAX_OFFSET 0xB800
+#define QM_TPC6_CFG_SECTION 0x2780
+#define mmTPC6_E2E_CRED_BASE 0x7FFCF87000ull
+#define TPC6_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC6_E2E_CRED_SECTION 0x1000
+#define mmTPC6_QM_BASE 0x7FFCF88000ull
+#define TPC6_QM_MAX_OFFSET 0xD040
+#define TPC6_QM_SECTION 0x3E000
+#define mmTPC7_CFG_BASE 0x7FFCFC6000ull
+#define TPC7_CFG_MAX_OFFSET 0xE400
+#define TPC7_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC7_CFG_BASE 0x7FFCFC6400ull
+#define KERNEL_TENSOR_0_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC7_CFG_BASE 0x7FFCFC6438ull
+#define KERNEL_TENSOR_1_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC7_CFG_BASE 0x7FFCFC6470ull
+#define KERNEL_TENSOR_2_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC7_CFG_BASE 0x7FFCFC64A8ull
+#define KERNEL_TENSOR_3_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC7_CFG_BASE 0x7FFCFC64E0ull
+#define KERNEL_TENSOR_4_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC7_CFG_BASE 0x7FFCFC6518ull
+#define KERNEL_TENSOR_5_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC7_CFG_BASE 0x7FFCFC6550ull
+#define KERNEL_TENSOR_6_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC7_CFG_BASE 0x7FFCFC6588ull
+#define KERNEL_TENSOR_7_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC7_CFG_BASE 0x7FFCFC65C0ull
+#define KERNEL_TENSOR_8_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC7_CFG_BASE 0x7FFCFC65F8ull
+#define KERNEL_TENSOR_9_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC7_CFG_BASE 0x7FFCFC6630ull
+#define KERNEL_TENSOR_10_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC7_CFG_BASE 0x7FFCFC6668ull
+#define KERNEL_TENSOR_11_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC7_CFG_BASE 0x7FFCFC66A0ull
+#define KERNEL_TENSOR_12_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC7_CFG_BASE 0x7FFCFC66D8ull
+#define KERNEL_TENSOR_13_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC7_CFG_BASE 0x7FFCFC6710ull
+#define KERNEL_TENSOR_14_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC7_CFG_BASE 0x7FFCFC6748ull
+#define KERNEL_TENSOR_15_TPC7_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC7_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC7_CFG_BASE 0x7FFCFC6780ull
+#define KERNEL_SYNC_OBJECT_TPC7_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC7_CFG_SECTION 0x8000
+#define mmKERNEL_TPC7_CFG_BASE 0x7FFCFC6788ull
+#define KERNEL_TPC7_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC7_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC7_CFG_BASE 0x7FFCFC6A00ull
+#define QM_TENSOR_0_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC7_CFG_BASE 0x7FFCFC6A38ull
+#define QM_TENSOR_1_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC7_CFG_BASE 0x7FFCFC6A70ull
+#define QM_TENSOR_2_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC7_CFG_BASE 0x7FFCFC6AA8ull
+#define QM_TENSOR_3_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC7_CFG_BASE 0x7FFCFC6AE0ull
+#define QM_TENSOR_4_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC7_CFG_BASE 0x7FFCFC6B18ull
+#define QM_TENSOR_5_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC7_CFG_BASE 0x7FFCFC6B50ull
+#define QM_TENSOR_6_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC7_CFG_BASE 0x7FFCFC6B88ull
+#define QM_TENSOR_7_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC7_CFG_BASE 0x7FFCFC6BC0ull
+#define QM_TENSOR_8_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC7_CFG_BASE 0x7FFCFC6BF8ull
+#define QM_TENSOR_9_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC7_CFG_BASE 0x7FFCFC6C30ull
+#define QM_TENSOR_10_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC7_CFG_BASE 0x7FFCFC6C68ull
+#define QM_TENSOR_11_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC7_CFG_BASE 0x7FFCFC6CA0ull
+#define QM_TENSOR_12_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC7_CFG_BASE 0x7FFCFC6CD8ull
+#define QM_TENSOR_13_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC7_CFG_BASE 0x7FFCFC6D10ull
+#define QM_TENSOR_14_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC7_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC7_CFG_BASE 0x7FFCFC6D48ull
+#define QM_TENSOR_15_TPC7_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC7_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC7_CFG_BASE 0x7FFCFC6D80ull
+#define QM_SYNC_OBJECT_TPC7_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC7_CFG_SECTION 0x8000
+#define mmQM_TPC7_CFG_BASE 0x7FFCFC6D88ull
+#define QM_TPC7_CFG_MAX_OFFSET 0xB800
+#define QM_TPC7_CFG_SECTION 0x2780
+#define mmTPC7_E2E_CRED_BASE 0x7FFCFC7000ull
+#define TPC7_E2E_CRED_MAX_OFFSET 0x1680
+#define TPC7_E2E_CRED_SECTION 0x1000
+#define mmTPC7_QM_BASE 0x7FFCFC8000ull
+#define TPC7_QM_MAX_OFFSET 0xD040
+#define TPC7_QM_SECTION 0x1038000
+#define mmMME_S_ROM_TABLE_BASE 0x7FFE000000ull
+#define MME_S_ROM_TABLE_MAX_OFFSET 0x1000
+#define MME_S_ROM_TABLE_SECTION 0x21000
+#define mmMME0_ACC_STM_BASE 0x7FFE021000ull
+#define MME0_ACC_STM_MAX_OFFSET 0x1000
+#define MME0_ACC_STM_SECTION 0x1000
+#define mmMME0_ACC_CTI_BASE 0x7FFE022000ull
+#define MME0_ACC_CTI_MAX_OFFSET 0x1000
+#define MME0_ACC_CTI_SECTION 0x1000
+#define mmMME0_ACC_ETF_BASE 0x7FFE023000ull
+#define MME0_ACC_ETF_MAX_OFFSET 0x1000
+#define MME0_ACC_ETF_SECTION 0x1000
+#define mmMME0_ACC_SPMU_BASE 0x7FFE024000ull
+#define MME0_ACC_SPMU_MAX_OFFSET 0x1000
+#define MME0_ACC_SPMU_SECTION 0x1000
+#define mmMME0_ACC_CTI0_BASE 0x7FFE025000ull
+#define MME0_ACC_CTI0_MAX_OFFSET 0x1000
+#define MME0_ACC_CTI0_SECTION 0x1000
+#define mmMME0_ACC_CTI1_BASE 0x7FFE026000ull
+#define MME0_ACC_CTI1_MAX_OFFSET 0x1000
+#define MME0_ACC_CTI1_SECTION 0x1000
+#define mmMME0_ACC_BMON0_BASE 0x7FFE027000ull
+#define MME0_ACC_BMON0_MAX_OFFSET 0x1000
+#define MME0_ACC_BMON0_SECTION 0x9000
+#define mmMME0_ACC_FUNNEL_BASE 0x7FFE030000ull
+#define MME0_ACC_FUNNEL_MAX_OFFSET 0x1000
+#define MME0_ACC_FUNNEL_SECTION 0x11000
+#define mmMME0_SBAB_STM_BASE 0x7FFE041000ull
+#define MME0_SBAB_STM_MAX_OFFSET 0x1000
+#define MME0_SBAB_STM_SECTION 0x1000
+#define mmMME0_SBAB_CTI_BASE 0x7FFE042000ull
+#define MME0_SBAB_CTI_MAX_OFFSET 0x1000
+#define MME0_SBAB_CTI_SECTION 0x1000
+#define mmMME0_SBAB_ETF_BASE 0x7FFE043000ull
+#define MME0_SBAB_ETF_MAX_OFFSET 0x1000
+#define MME0_SBAB_ETF_SECTION 0x1000
+#define mmMME0_SBAB_SPMU_BASE 0x7FFE044000ull
+#define MME0_SBAB_SPMU_MAX_OFFSET 0x1000
+#define MME0_SBAB_SPMU_SECTION 0x1000
+#define mmMME0_SBAB_CTI0_BASE 0x7FFE045000ull
+#define MME0_SBAB_CTI0_MAX_OFFSET 0x1000
+#define MME0_SBAB_CTI0_SECTION 0x1000
+#define mmMME0_SBAB_CTI1_BASE 0x7FFE046000ull
+#define MME0_SBAB_CTI1_MAX_OFFSET 0x1000
+#define MME0_SBAB_CTI1_SECTION 0x1000
+#define mmMME0_SBAB_BMON0_BASE 0x7FFE047000ull
+#define MME0_SBAB_BMON0_MAX_OFFSET 0x1000
+#define MME0_SBAB_BMON0_SECTION 0x1000
+#define mmMME0_SBAB_BMON1_BASE 0x7FFE048000ull
+#define MME0_SBAB_BMON1_MAX_OFFSET 0x1000
+#define MME0_SBAB_BMON1_SECTION 0x19000
+#define mmMME0_CTRL_STM_BASE 0x7FFE061000ull
+#define MME0_CTRL_STM_MAX_OFFSET 0x1000
+#define MME0_CTRL_STM_SECTION 0x1000
+#define mmMME0_CTRL_CTI_BASE 0x7FFE062000ull
+#define MME0_CTRL_CTI_MAX_OFFSET 0x1000
+#define MME0_CTRL_CTI_SECTION 0x1000
+#define mmMME0_CTRL_ETF_BASE 0x7FFE063000ull
+#define MME0_CTRL_ETF_MAX_OFFSET 0x1000
+#define MME0_CTRL_ETF_SECTION 0x1000
+#define mmMME0_CTRL_SPMU_BASE 0x7FFE064000ull
+#define MME0_CTRL_SPMU_MAX_OFFSET 0x1000
+#define MME0_CTRL_SPMU_SECTION 0x1000
+#define mmMME0_CTRL_CTI0_BASE 0x7FFE065000ull
+#define MME0_CTRL_CTI0_MAX_OFFSET 0x1000
+#define MME0_CTRL_CTI0_SECTION 0x1000
+#define mmMME0_CTRL_CTI1_BASE 0x7FFE066000ull
+#define MME0_CTRL_CTI1_MAX_OFFSET 0x1000
+#define MME0_CTRL_CTI1_SECTION 0x1000
+#define mmMME0_CTRL_BMON0_BASE 0x7FFE067000ull
+#define MME0_CTRL_BMON0_MAX_OFFSET 0x1000
+#define MME0_CTRL_BMON0_SECTION 0x1000
+#define mmMME0_CTRL_BMON1_BASE 0x7FFE068000ull
+#define MME0_CTRL_BMON1_MAX_OFFSET 0x1000
+#define MME0_CTRL_BMON1_SECTION 0x39000
+#define mmMME1_ACC_STM_BASE 0x7FFE0A1000ull
+#define MME1_ACC_STM_MAX_OFFSET 0x1000
+#define MME1_ACC_STM_SECTION 0x1000
+#define mmMME1_ACC_CTI_BASE 0x7FFE0A2000ull
+#define MME1_ACC_CTI_MAX_OFFSET 0x1000
+#define MME1_ACC_CTI_SECTION 0x1000
+#define mmMME1_ACC_ETF_BASE 0x7FFE0A3000ull
+#define MME1_ACC_ETF_MAX_OFFSET 0x1000
+#define MME1_ACC_ETF_SECTION 0x1000
+#define mmMME1_ACC_SPMU_BASE 0x7FFE0A4000ull
+#define MME1_ACC_SPMU_MAX_OFFSET 0x1000
+#define MME1_ACC_SPMU_SECTION 0x1000
+#define mmMME1_ACC_CTI0_BASE 0x7FFE0A5000ull
+#define MME1_ACC_CTI0_MAX_OFFSET 0x1000
+#define MME1_ACC_CTI0_SECTION 0x1000
+#define mmMME1_ACC_CTI1_BASE 0x7FFE0A6000ull
+#define MME1_ACC_CTI1_MAX_OFFSET 0x1000
+#define MME1_ACC_CTI1_SECTION 0x1000
+#define mmMME1_ACC_BMON0_BASE 0x7FFE0A7000ull
+#define MME1_ACC_BMON0_MAX_OFFSET 0x1000
+#define MME1_ACC_BMON0_SECTION 0x9000
+#define mmMME1_ACC_FUNNEL_BASE 0x7FFE0B0000ull
+#define MME1_ACC_FUNNEL_MAX_OFFSET 0x1000
+#define MME1_ACC_FUNNEL_SECTION 0x11000
+#define mmMME1_SBAB_STM_BASE 0x7FFE0C1000ull
+#define MME1_SBAB_STM_MAX_OFFSET 0x1000
+#define MME1_SBAB_STM_SECTION 0x1000
+#define mmMME1_SBAB_CTI_BASE 0x7FFE0C2000ull
+#define MME1_SBAB_CTI_MAX_OFFSET 0x1000
+#define MME1_SBAB_CTI_SECTION 0x1000
+#define mmMME1_SBAB_ETF_BASE 0x7FFE0C3000ull
+#define MME1_SBAB_ETF_MAX_OFFSET 0x1000
+#define MME1_SBAB_ETF_SECTION 0x1000
+#define mmMME1_SBAB_SPMU_BASE 0x7FFE0C4000ull
+#define MME1_SBAB_SPMU_MAX_OFFSET 0x1000
+#define MME1_SBAB_SPMU_SECTION 0x1000
+#define mmMME1_SBAB_CTI0_BASE 0x7FFE0C5000ull
+#define MME1_SBAB_CTI0_MAX_OFFSET 0x1000
+#define MME1_SBAB_CTI0_SECTION 0x1000
+#define mmMME1_SBAB_CTI1_BASE 0x7FFE0C6000ull
+#define MME1_SBAB_CTI1_MAX_OFFSET 0x1000
+#define MME1_SBAB_CTI1_SECTION 0x1000
+#define mmMME1_SBAB_BMON0_BASE 0x7FFE0C7000ull
+#define MME1_SBAB_BMON0_MAX_OFFSET 0x1000
+#define MME1_SBAB_BMON0_SECTION 0x1000
+#define mmMME1_SBAB_BMON1_BASE 0x7FFE0C8000ull
+#define MME1_SBAB_BMON1_MAX_OFFSET 0x1000
+#define MME1_SBAB_BMON1_SECTION 0x19000
+#define mmMME1_CTRL_STM_BASE 0x7FFE0E1000ull
+#define MME1_CTRL_STM_MAX_OFFSET 0x1000
+#define MME1_CTRL_STM_SECTION 0x1000
+#define mmMME1_CTRL_CTI_BASE 0x7FFE0E2000ull
+#define MME1_CTRL_CTI_MAX_OFFSET 0x1000
+#define MME1_CTRL_CTI_SECTION 0x1000
+#define mmMME1_CTRL_ETF_BASE 0x7FFE0E3000ull
+#define MME1_CTRL_ETF_MAX_OFFSET 0x1000
+#define MME1_CTRL_ETF_SECTION 0x1000
+#define mmMME1_CTRL_SPMU_BASE 0x7FFE0E4000ull
+#define MME1_CTRL_SPMU_MAX_OFFSET 0x1000
+#define MME1_CTRL_SPMU_SECTION 0x1000
+#define mmMME1_CTRL_CTI0_BASE 0x7FFE0E5000ull
+#define MME1_CTRL_CTI0_MAX_OFFSET 0x1000
+#define MME1_CTRL_CTI0_SECTION 0x1000
+#define mmMME1_CTRL_CTI1_BASE 0x7FFE0E6000ull
+#define MME1_CTRL_CTI1_MAX_OFFSET 0x1000
+#define MME1_CTRL_CTI1_SECTION 0x1000
+#define mmMME1_CTRL_BMON0_BASE 0x7FFE0E7000ull
+#define MME1_CTRL_BMON0_MAX_OFFSET 0x1000
+#define MME1_CTRL_BMON0_SECTION 0x1000
+#define mmMME1_CTRL_BMON1_BASE 0x7FFE0E8000ull
+#define MME1_CTRL_BMON1_MAX_OFFSET 0x1000
+#define MME1_CTRL_BMON1_SECTION 0x18000
+#define mmMME_N_ROM_TABLE_BASE 0x7FFE100000ull
+#define MME_N_ROM_TABLE_MAX_OFFSET 0x1000
+#define MME_N_ROM_TABLE_SECTION 0x21000
+#define mmMME2_ACC_STM_BASE 0x7FFE121000ull
+#define MME2_ACC_STM_MAX_OFFSET 0x1000
+#define MME2_ACC_STM_SECTION 0x1000
+#define mmMME2_ACC_CTI_BASE 0x7FFE122000ull
+#define MME2_ACC_CTI_MAX_OFFSET 0x1000
+#define MME2_ACC_CTI_SECTION 0x1000
+#define mmMME2_MME2_ACC_ETF_BASE 0x7FFE123000ull
+#define MME2_MME2_ACC_ETF_MAX_OFFSET 0x1000
+#define MME2_MME2_ACC_ETF_SECTION 0x1000
+#define mmMME2_ACC_SPMU_BASE 0x7FFE124000ull
+#define MME2_ACC_SPMU_MAX_OFFSET 0x1000
+#define MME2_ACC_SPMU_SECTION 0x1000
+#define mmMME2_ACC_CTI0_BASE 0x7FFE125000ull
+#define MME2_ACC_CTI0_MAX_OFFSET 0x1000
+#define MME2_ACC_CTI0_SECTION 0x1000
+#define mmMME2_ACC_CTI1_BASE 0x7FFE126000ull
+#define MME2_ACC_CTI1_MAX_OFFSET 0x1000
+#define MME2_ACC_CTI1_SECTION 0x1000
+#define mmMME2_ACC_BMON0_BASE 0x7FFE127000ull
+#define MME2_ACC_BMON0_MAX_OFFSET 0x1000
+#define MME2_ACC_BMON0_SECTION 0x9000
+#define mmMME2_ACC_FUNNEL_BASE 0x7FFE130000ull
+#define MME2_ACC_FUNNEL_MAX_OFFSET 0x1000
+#define MME2_ACC_FUNNEL_SECTION 0x11000
+#define mmMME2_SBAB_STM_BASE 0x7FFE141000ull
+#define MME2_SBAB_STM_MAX_OFFSET 0x1000
+#define MME2_SBAB_STM_SECTION 0x1000
+#define mmMME2_SBAB_CTI_BASE 0x7FFE142000ull
+#define MME2_SBAB_CTI_MAX_OFFSET 0x1000
+#define MME2_SBAB_CTI_SECTION 0x1000
+#define mmMME2_SBAB_ETF_BASE 0x7FFE143000ull
+#define MME2_SBAB_ETF_MAX_OFFSET 0x1000
+#define MME2_SBAB_ETF_SECTION 0x1000
+#define mmMME2_SBAB_SPMU_BASE 0x7FFE144000ull
+#define MME2_SBAB_SPMU_MAX_OFFSET 0x1000
+#define MME2_SBAB_SPMU_SECTION 0x1000
+#define mmMME2_SBAB_CTI0_BASE 0x7FFE145000ull
+#define MME2_SBAB_CTI0_MAX_OFFSET 0x1000
+#define MME2_SBAB_CTI0_SECTION 0x1000
+#define mmMME2_SBAB_CTI1_BASE 0x7FFE146000ull
+#define MME2_SBAB_CTI1_MAX_OFFSET 0x1000
+#define MME2_SBAB_CTI1_SECTION 0x1000
+#define mmMME2_SBAB_BMON0_BASE 0x7FFE147000ull
+#define MME2_SBAB_BMON0_MAX_OFFSET 0x1000
+#define MME2_SBAB_BMON0_SECTION 0x1000
+#define mmMME2_SBAB_BMON1_BASE 0x7FFE148000ull
+#define MME2_SBAB_BMON1_MAX_OFFSET 0x1000
+#define MME2_SBAB_BMON1_SECTION 0x19000
+#define mmMME2_CTRL_STM_BASE 0x7FFE161000ull
+#define MME2_CTRL_STM_MAX_OFFSET 0x1000
+#define MME2_CTRL_STM_SECTION 0x1000
+#define mmMME2_CTRL_CTI_BASE 0x7FFE162000ull
+#define MME2_CTRL_CTI_MAX_OFFSET 0x1000
+#define MME2_CTRL_CTI_SECTION 0x1000
+#define mmMME2_CTRL_ETF_BASE 0x7FFE163000ull
+#define MME2_CTRL_ETF_MAX_OFFSET 0x1000
+#define MME2_CTRL_ETF_SECTION 0x1000
+#define mmMME2_CTRL_SPMU_BASE 0x7FFE164000ull
+#define MME2_CTRL_SPMU_MAX_OFFSET 0x1000
+#define MME2_CTRL_SPMU_SECTION 0x1000
+#define mmMME2_CTRL_CTI0_BASE 0x7FFE165000ull
+#define MME2_CTRL_CTI0_MAX_OFFSET 0x1000
+#define MME2_CTRL_CTI0_SECTION 0x1000
+#define mmMME2_CTRL_CTI1_BASE 0x7FFE166000ull
+#define MME2_CTRL_CTI1_MAX_OFFSET 0x1000
+#define MME2_CTRL_CTI1_SECTION 0x1000
+#define mmMME2_CTRL_BMON0_BASE 0x7FFE167000ull
+#define MME2_CTRL_BMON0_MAX_OFFSET 0x1000
+#define MME2_CTRL_BMON0_SECTION 0x1000
+#define mmMME2_CTRL_BMON1_BASE 0x7FFE168000ull
+#define MME2_CTRL_BMON1_MAX_OFFSET 0x1000
+#define MME2_CTRL_BMON1_SECTION 0x39000
+#define mmMME3_ACC_STM_BASE 0x7FFE1A1000ull
+#define MME3_ACC_STM_MAX_OFFSET 0x1000
+#define MME3_ACC_STM_SECTION 0x1000
+#define mmMME3_ACC_CTI_BASE 0x7FFE1A2000ull
+#define MME3_ACC_CTI_MAX_OFFSET 0x1000
+#define MME3_ACC_CTI_SECTION 0x1000
+#define mmMME3_ACC_ETF_BASE 0x7FFE1A3000ull
+#define MME3_ACC_ETF_MAX_OFFSET 0x1000
+#define MME3_ACC_ETF_SECTION 0x1000
+#define mmMME3_ACC_SPMU_BASE 0x7FFE1A4000ull
+#define MME3_ACC_SPMU_MAX_OFFSET 0x1000
+#define MME3_ACC_SPMU_SECTION 0x1000
+#define mmMME3_ACC_CTI0_BASE 0x7FFE1A5000ull
+#define MME3_ACC_CTI0_MAX_OFFSET 0x1000
+#define MME3_ACC_CTI0_SECTION 0x1000
+#define mmMME3_ACC_CTI1_BASE 0x7FFE1A6000ull
+#define MME3_ACC_CTI1_MAX_OFFSET 0x1000
+#define MME3_ACC_CTI1_SECTION 0x1000
+#define mmMME3_ACC_BMON0_BASE 0x7FFE1A7000ull
+#define MME3_ACC_BMON0_MAX_OFFSET 0x1000
+#define MME3_ACC_BMON0_SECTION 0x9000
+#define mmMME3_ACC_FUNNEL_BASE 0x7FFE1B0000ull
+#define MME3_ACC_FUNNEL_MAX_OFFSET 0x1000
+#define MME3_ACC_FUNNEL_SECTION 0x11000
+#define mmMME3_SBAB_STM_BASE 0x7FFE1C1000ull
+#define MME3_SBAB_STM_MAX_OFFSET 0x1000
+#define MME3_SBAB_STM_SECTION 0x1000
+#define mmMME3_SBAB_CTI_BASE 0x7FFE1C2000ull
+#define MME3_SBAB_CTI_MAX_OFFSET 0x1000
+#define MME3_SBAB_CTI_SECTION 0x1000
+#define mmMME3_SBAB_ETF_BASE 0x7FFE1C3000ull
+#define MME3_SBAB_ETF_MAX_OFFSET 0x1000
+#define MME3_SBAB_ETF_SECTION 0x1000
+#define mmMME3_SBAB_SPMU_BASE 0x7FFE1C4000ull
+#define MME3_SBAB_SPMU_MAX_OFFSET 0x1000
+#define MME3_SBAB_SPMU_SECTION 0x1000
+#define mmMME3_SBAB_CTI0_BASE 0x7FFE1C5000ull
+#define MME3_SBAB_CTI0_MAX_OFFSET 0x1000
+#define MME3_SBAB_CTI0_SECTION 0x1000
+#define mmMME3_SBAB_CTI1_BASE 0x7FFE1C6000ull
+#define MME3_SBAB_CTI1_MAX_OFFSET 0x1000
+#define MME3_SBAB_CTI1_SECTION 0x1000
+#define mmMME3_SBAB_BMON0_BASE 0x7FFE1C7000ull
+#define MME3_SBAB_BMON0_MAX_OFFSET 0x1000
+#define MME3_SBAB_BMON0_SECTION 0x1000
+#define mmMME3_SBAB_BMON1_BASE 0x7FFE1C8000ull
+#define MME3_SBAB_BMON1_MAX_OFFSET 0x1000
+#define MME3_SBAB_BMON1_SECTION 0x19000
+#define mmMME3_CTRL_STM_BASE 0x7FFE1E1000ull
+#define MME3_CTRL_STM_MAX_OFFSET 0x1000
+#define MME3_CTRL_STM_SECTION 0x1000
+#define mmMME3_CTRL_CTI_BASE 0x7FFE1E2000ull
+#define MME3_CTRL_CTI_MAX_OFFSET 0x1000
+#define MME3_CTRL_CTI_SECTION 0x1000
+#define mmMME3_CTRL_ETF_BASE 0x7FFE1E3000ull
+#define MME3_CTRL_ETF_MAX_OFFSET 0x1000
+#define MME3_CTRL_ETF_SECTION 0x1000
+#define mmMME3_CTRL_SPMU_BASE 0x7FFE1E4000ull
+#define MME3_CTRL_SPMU_MAX_OFFSET 0x1000
+#define MME3_CTRL_SPMU_SECTION 0x1000
+#define mmMME3_CTRL_CTI0_BASE 0x7FFE1E5000ull
+#define MME3_CTRL_CTI0_MAX_OFFSET 0x1000
+#define MME3_CTRL_CTI0_SECTION 0x1000
+#define mmMME3_CTRL_CTI1_BASE 0x7FFE1E6000ull
+#define MME3_CTRL_CTI1_MAX_OFFSET 0x1000
+#define MME3_CTRL_CTI1_SECTION 0x1000
+#define mmMME3_CTRL_BMON0_BASE 0x7FFE1E7000ull
+#define MME3_CTRL_BMON0_MAX_OFFSET 0x1000
+#define MME3_CTRL_BMON0_SECTION 0x1000
+#define mmMME3_CTRL_BMON1_BASE 0x7FFE1E8000ull
+#define MME3_CTRL_BMON1_MAX_OFFSET 0x1000
+#define MME3_CTRL_BMON1_SECTION 0x18000
+#define mmIC_ROM_TABLE_BASE 0x7FFE200000ull
+#define IC_ROM_TABLE_MAX_OFFSET 0x1000
+#define IC_ROM_TABLE_SECTION 0x1000
+#define mmSRAM_Y0_X0_FUNNEL_BASE 0x7FFE201000ull
+#define SRAM_Y0_X0_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X0_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X1_FUNNEL_BASE 0x7FFE209000ull
+#define SRAM_Y0_X1_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X1_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X2_FUNNEL_BASE 0x7FFE211000ull
+#define SRAM_Y0_X2_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X2_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X3_FUNNEL_BASE 0x7FFE219000ull
+#define SRAM_Y0_X3_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X3_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X4_FUNNEL_BASE 0x7FFE221000ull
+#define SRAM_Y0_X4_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X4_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X5_FUNNEL_BASE 0x7FFE229000ull
+#define SRAM_Y0_X5_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X5_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X6_FUNNEL_BASE 0x7FFE231000ull
+#define SRAM_Y0_X6_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X6_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y0_X7_FUNNEL_BASE 0x7FFE239000ull
+#define SRAM_Y0_X7_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y0_X7_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X0_FUNNEL_BASE 0x7FFE241000ull
+#define SRAM_Y1_X0_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X0_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X1_FUNNEL_BASE 0x7FFE249000ull
+#define SRAM_Y1_X1_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X1_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X2_FUNNEL_BASE 0x7FFE251000ull
+#define SRAM_Y1_X2_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X2_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X3_FUNNEL_BASE 0x7FFE259000ull
+#define SRAM_Y1_X3_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X3_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X4_FUNNEL_BASE 0x7FFE261000ull
+#define SRAM_Y1_X4_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X4_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X5_FUNNEL_BASE 0x7FFE269000ull
+#define SRAM_Y1_X5_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X5_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X6_FUNNEL_BASE 0x7FFE271000ull
+#define SRAM_Y1_X6_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X6_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y1_X7_FUNNEL_BASE 0x7FFE279000ull
+#define SRAM_Y1_X7_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y1_X7_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X0_FUNNEL_BASE 0x7FFE281000ull
+#define SRAM_Y2_X0_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X0_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X1_FUNNEL_BASE 0x7FFE289000ull
+#define SRAM_Y2_X1_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X1_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X2_FUNNEL_BASE 0x7FFE291000ull
+#define SRAM_Y2_X2_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X2_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X3_FUNNEL_BASE 0x7FFE299000ull
+#define SRAM_Y2_X3_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X3_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X4_FUNNEL_BASE 0x7FFE2A1000ull
+#define SRAM_Y2_X4_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X4_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X5_FUNNEL_BASE 0x7FFE2A9000ull
+#define SRAM_Y2_X5_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X5_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X6_FUNNEL_BASE 0x7FFE2B1000ull
+#define SRAM_Y2_X6_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X6_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y2_X7_FUNNEL_BASE 0x7FFE2B9000ull
+#define SRAM_Y2_X7_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y2_X7_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X0_FUNNEL_BASE 0x7FFE2C1000ull
+#define SRAM_Y3_X0_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X0_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X1_FUNNEL_BASE 0x7FFE2C9000ull
+#define SRAM_Y3_X1_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X1_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X2_FUNNEL_BASE 0x7FFE2D1000ull
+#define SRAM_Y3_X2_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X2_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X4_FUNNEL_BASE 0x7FFE2D9000ull
+#define SRAM_Y3_X4_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X4_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X3_FUNNEL_BASE 0x7FFE2E1000ull
+#define SRAM_Y3_X3_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X3_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X5_FUNNEL_BASE 0x7FFE2E9000ull
+#define SRAM_Y3_X5_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X5_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X6_FUNNEL_BASE 0x7FFE2F1000ull
+#define SRAM_Y3_X6_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X6_FUNNEL_SECTION 0x8000
+#define mmSRAM_Y3_X7_FUNNEL_BASE 0x7FFE2F9000ull
+#define SRAM_Y3_X7_FUNNEL_MAX_OFFSET 0x1000
+#define SRAM_Y3_X7_FUNNEL_SECTION 0x7000
+#define mmIF_ROM_TABLE_BASE 0x7FFE300000ull
+#define IF_ROM_TABLE_MAX_OFFSET 0x1000
+#define IF_ROM_TABLE_SECTION 0x1000
+#define mmSIF_FUNNEL_0_BASE 0x7FFE301000ull
+#define SIF_FUNNEL_0_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_0_SECTION 0x10000
+#define mmSIF_FUNNEL_1_BASE 0x7FFE311000ull
+#define SIF_FUNNEL_1_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_1_SECTION 0x10000
+#define mmSIF_FUNNEL_2_BASE 0x7FFE321000ull
+#define SIF_FUNNEL_2_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_2_SECTION 0x10000
+#define mmSIF_FUNNEL_3_BASE 0x7FFE331000ull
+#define SIF_FUNNEL_3_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_3_SECTION 0x10000
+#define mmSIF_FUNNEL_4_BASE 0x7FFE341000ull
+#define SIF_FUNNEL_4_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_4_SECTION 0x10000
+#define mmSIF_FUNNEL_5_BASE 0x7FFE351000ull
+#define SIF_FUNNEL_5_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_5_SECTION 0x10000
+#define mmSIF_FUNNEL_6_BASE 0x7FFE361000ull
+#define SIF_FUNNEL_6_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_6_SECTION 0x10000
+#define mmSIF_FUNNEL_7_BASE 0x7FFE371000ull
+#define SIF_FUNNEL_7_MAX_OFFSET 0x1000
+#define SIF_FUNNEL_7_SECTION 0x10000
+#define mmNIF_FUNNEL_0_BASE 0x7FFE381000ull
+#define NIF_FUNNEL_0_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_0_SECTION 0x10000
+#define mmNIF_FUNNEL_1_BASE 0x7FFE391000ull
+#define NIF_FUNNEL_1_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_1_SECTION 0x10000
+#define mmNIF_FUNNEL_2_BASE 0x7FFE3A1000ull
+#define NIF_FUNNEL_2_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_2_SECTION 0x10000
+#define mmNIF_FUNNEL_3_BASE 0x7FFE3B1000ull
+#define NIF_FUNNEL_3_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_3_SECTION 0x10000
+#define mmNIF_FUNNEL_4_BASE 0x7FFE3C1000ull
+#define NIF_FUNNEL_4_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_4_SECTION 0x10000
+#define mmNIF_FUNNEL_5_BASE 0x7FFE3D1000ull
+#define NIF_FUNNEL_5_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_5_SECTION 0x10000
+#define mmNIF_FUNNEL_6_BASE 0x7FFE3E1000ull
+#define NIF_FUNNEL_6_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_6_SECTION 0x10000
+#define mmNIF_FUNNEL_7_BASE 0x7FFE3F1000ull
+#define NIF_FUNNEL_7_MAX_OFFSET 0x1000
+#define NIF_FUNNEL_7_SECTION 0xF000
+#define mmDMA_IF_ROM_TABLE_BASE 0x7FFE400000ull
+#define DMA_IF_ROM_TABLE_MAX_OFFSET 0x1000
+#define DMA_IF_ROM_TABLE_SECTION 0x1000
+#define mmDMA_IF_W_S_STM_BASE 0x7FFE401000ull
+#define DMA_IF_W_S_STM_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_STM_SECTION 0x1000
+#define mmDMA_IF_W_S_CTI_BASE 0x7FFE402000ull
+#define DMA_IF_W_S_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_CTI_SECTION 0x1000
+#define mmDMA_IF_W_S_ETF_BASE 0x7FFE403000ull
+#define DMA_IF_W_S_ETF_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_ETF_SECTION 0x2000
+#define mmDMA_IF_W_S_BMON0_CTI_BASE 0x7FFE405000ull
+#define DMA_IF_W_S_BMON0_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_BMON0_CTI_SECTION 0x1000
+#define mmDMA_IF_W_S_BMON1_CTI_BASE 0x7FFE406000ull
+#define DMA_IF_W_S_BMON1_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_BMON1_CTI_SECTION 0x1000
+#define mmDMA_IF_W_S_HBM0_WR_BMON_BASE 0x7FFE407000ull
+#define DMA_IF_W_S_HBM0_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_HBM0_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_W_S_HBM0_RD_BMON_BASE 0x7FFE408000ull
+#define DMA_IF_W_S_HBM0_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_HBM0_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_W_S_HBM1_WR_BMON_BASE 0x7FFE409000ull
+#define DMA_IF_W_S_HBM1_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_HBM1_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_W_S_HBM1_RD_BMON_BASE 0x7FFE40A000ull
+#define DMA_IF_W_S_HBM1_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_HBM1_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_W_S_SOB_WR_BMON_BASE 0x7FFE40B000ull
+#define DMA_IF_W_S_SOB_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_SOB_WR_BMON_SECTION 0x4000
+#define mmDMA_IF_W_S_FUNNEL_BASE 0x7FFE40F000ull
+#define DMA_IF_W_S_FUNNEL_MAX_OFFSET 0x1000
+#define DMA_IF_W_S_FUNNEL_SECTION 0x12000
+#define mmDMA_IF_E_S_STM_BASE 0x7FFE421000ull
+#define DMA_IF_E_S_STM_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_STM_SECTION 0x1000
+#define mmDMA_IF_E_S_CTI_BASE 0x7FFE422000ull
+#define DMA_IF_E_S_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_CTI_SECTION 0x1000
+#define mmDMA_IF_E_S_ETF_BASE 0x7FFE423000ull
+#define DMA_IF_E_S_ETF_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_ETF_SECTION 0x2000
+#define mmDMA_IF_E_S_BMON0_CTI_BASE 0x7FFE425000ull
+#define DMA_IF_E_S_BMON0_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_BMON0_CTI_SECTION 0x1000
+#define mmDMA_IF_E_S_BMON1_CTI_BASE 0x7FFE426000ull
+#define DMA_IF_E_S_BMON1_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_BMON1_CTI_SECTION 0x1000
+#define mmDMA_IF_E_S_HBM0_WR_BMON_BASE 0x7FFE427000ull
+#define DMA_IF_E_S_HBM0_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_HBM0_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_E_S_HBM0_RD_BMON_BASE 0x7FFE428000ull
+#define DMA_IF_E_S_HBM0_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_HBM0_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_E_S_HBM1_WR_BMON_BASE 0x7FFE429000ull
+#define DMA_IF_E_S_HBM1_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_HBM1_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_E_S_HBM1_RD_BMON_BASE 0x7FFE42A000ull
+#define DMA_IF_E_S_HBM1_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_HBM1_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_E_S_SOB_WR_BMON_BASE 0x7FFE42B000ull
+#define DMA_IF_E_S_SOB_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_SOB_WR_BMON_SECTION 0x4000
+#define mmDMA_IF_E_S_FUNNEL_BASE 0x7FFE42F000ull
+#define DMA_IF_E_S_FUNNEL_MAX_OFFSET 0x1000
+#define DMA_IF_E_S_FUNNEL_SECTION 0x12000
+#define mmDMA_IF_W_N_STM_BASE 0x7FFE441000ull
+#define DMA_IF_W_N_STM_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_STM_SECTION 0x1000
+#define mmDMA_IF_W_N_CTI_BASE 0x7FFE442000ull
+#define DMA_IF_W_N_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_CTI_SECTION 0x1000
+#define mmDMA_IF_W_N_ETF_BASE 0x7FFE443000ull
+#define DMA_IF_W_N_ETF_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_ETF_SECTION 0x2000
+#define mmDMA_IF_W_N_BMON0_CTI_BASE 0x7FFE445000ull
+#define DMA_IF_W_N_BMON0_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_BMON0_CTI_SECTION 0x1000
+#define mmDMA_IF_W_N_BMON1_CTI_BASE 0x7FFE446000ull
+#define DMA_IF_W_N_BMON1_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_BMON1_CTI_SECTION 0x1000
+#define mmDMA_IF_W_N_HBM0_WR_BMON_BASE 0x7FFE447000ull
+#define DMA_IF_W_N_HBM0_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_HBM0_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_W_N_HBM0_RD_BMON_BASE 0x7FFE448000ull
+#define DMA_IF_W_N_HBM0_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_HBM0_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_W_N_HBM1_WR_BMON_BASE 0x7FFE449000ull
+#define DMA_IF_W_N_HBM1_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_HBM1_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_W_N_HBM1_RD_BMON_BASE 0x7FFE44A000ull
+#define DMA_IF_W_N_HBM1_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_HBM1_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_W_N_SOB_WR_BMON_BASE 0x7FFE44B000ull
+#define DMA_IF_W_N_SOB_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_SOB_WR_BMON_SECTION 0x4000
+#define mmDMA_IF_W_N_FUNNEL_BASE 0x7FFE44F000ull
+#define DMA_IF_W_N_FUNNEL_MAX_OFFSET 0x1000
+#define DMA_IF_W_N_FUNNEL_SECTION 0x12000
+#define mmDMA_IF_E_N_STM_BASE 0x7FFE461000ull
+#define DMA_IF_E_N_STM_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_STM_SECTION 0x1000
+#define mmDMA_IF_E_N_CTI_BASE 0x7FFE462000ull
+#define DMA_IF_E_N_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_CTI_SECTION 0x1000
+#define mmDMA_IF_E_N_ETF_BASE 0x7FFE463000ull
+#define DMA_IF_E_N_ETF_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_ETF_SECTION 0x2000
+#define mmDMA_IF_E_N_BMON0_CTI_BASE 0x7FFE465000ull
+#define DMA_IF_E_N_BMON0_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_BMON0_CTI_SECTION 0x1000
+#define mmDMA_IF_E_N_BMON1_CTI_BASE 0x7FFE466000ull
+#define DMA_IF_E_N_BMON1_CTI_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_BMON1_CTI_SECTION 0x1000
+#define mmDMA_IF_E_N_HBM0_WR_BMON_BASE 0x7FFE467000ull
+#define DMA_IF_E_N_HBM0_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_HBM0_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_E_N_HBM0_RD_BMON_BASE 0x7FFE468000ull
+#define DMA_IF_E_N_HBM0_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_HBM0_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_E_N_HBM1_WR_BMON_BASE 0x7FFE469000ull
+#define DMA_IF_E_N_HBM1_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_HBM1_WR_BMON_SECTION 0x1000
+#define mmDMA_IF_E_N_HBM1_RD_BMON_BASE 0x7FFE46A000ull
+#define DMA_IF_E_N_HBM1_RD_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_HBM1_RD_BMON_SECTION 0x1000
+#define mmDMA_IF_E_N_SOB_WR_BMON_BASE 0x7FFE46B000ull
+#define DMA_IF_E_N_SOB_WR_BMON_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_SOB_WR_BMON_SECTION 0x4000
+#define mmDMA_IF_E_N_FUNNEL_BASE 0x7FFE46F000ull
+#define DMA_IF_E_N_FUNNEL_MAX_OFFSET 0x1000
+#define DMA_IF_E_N_FUNNEL_SECTION 0x11000
+#define mmCPU_ROM_TABLE_BASE 0x7FFE480000ull
+#define CPU_ROM_TABLE_MAX_OFFSET 0x1000
+#define CPU_ROM_TABLE_SECTION 0x1000
+#define mmCPU_ETF_0_BASE 0x7FFE481000ull
+#define CPU_ETF_0_MAX_OFFSET 0x1000
+#define CPU_ETF_0_SECTION 0x1000
+#define mmCPU_ETF_1_BASE 0x7FFE482000ull
+#define CPU_ETF_1_MAX_OFFSET 0x1000
+#define CPU_ETF_1_SECTION 0x2000
+#define mmCPU_CTI_BASE 0x7FFE484000ull
+#define CPU_CTI_MAX_OFFSET 0x1000
+#define CPU_CTI_SECTION 0x1000
+#define mmCPU_FUNNEL_BASE 0x7FFE485000ull
+#define CPU_FUNNEL_MAX_OFFSET 0x1000
+#define CPU_FUNNEL_SECTION 0x1000
+#define mmCPU_STM_BASE 0x7FFE486000ull
+#define CPU_STM_MAX_OFFSET 0x1000
+#define CPU_STM_SECTION 0x1000
+#define mmCPU_CTI_TRACE_BASE 0x7FFE487000ull
+#define CPU_CTI_TRACE_MAX_OFFSET 0x1000
+#define CPU_CTI_TRACE_SECTION 0x1000
+#define mmCPU_ETF_TRACE_BASE 0x7FFE488000ull
+#define CPU_ETF_TRACE_MAX_OFFSET 0x1000
+#define CPU_ETF_TRACE_SECTION 0x1000
+#define mmCPU_WR_BMON_BASE 0x7FFE489000ull
+#define CPU_WR_BMON_MAX_OFFSET 0x1000
+#define CPU_WR_BMON_SECTION 0x1000
+#define mmCPU_RD_BMON_BASE 0x7FFE48A000ull
+#define CPU_RD_BMON_MAX_OFFSET 0x1000
+#define CPU_RD_BMON_SECTION 0x76000
+#define mmDMA_ROM_TABLE_BASE 0x7FFE500000ull
+#define DMA_ROM_TABLE_MAX_OFFSET 0x1000
+#define DMA_ROM_TABLE_SECTION 0x1000
+#define mmDMA_CH_0_CS_STM_BASE 0x7FFE501000ull
+#define DMA_CH_0_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_STM_SECTION 0x1000
+#define mmDMA_CH_0_CS_CTI_BASE 0x7FFE502000ull
+#define DMA_CH_0_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_0_CS_ETF_BASE 0x7FFE503000ull
+#define DMA_CH_0_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_0_CS_SPMU_BASE 0x7FFE504000ull
+#define DMA_CH_0_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_0_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_0_BMON_CTI_BASE 0x7FFE505000ull
+#define DMA_CH_0_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_0_USER_CTI_BASE 0x7FFE506000ull
+#define DMA_CH_0_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_0_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_0_BMON_0_BASE 0x7FFE507000ull
+#define DMA_CH_0_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_0_SECTION 0x1000
+#define mmDMA_CH_0_BMON_1_BASE 0x7FFE508000ull
+#define DMA_CH_0_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_0_BMON_1_SECTION 0x19000
+#define mmDMA_CH_1_CS_STM_BASE 0x7FFE521000ull
+#define DMA_CH_1_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_STM_SECTION 0x1000
+#define mmDMA_CH_1_CS_CTI_BASE 0x7FFE522000ull
+#define DMA_CH_1_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_1_CS_ETF_BASE 0x7FFE523000ull
+#define DMA_CH_1_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_1_CS_SPMU_BASE 0x7FFE524000ull
+#define DMA_CH_1_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_1_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_1_BMON_CTI_BASE 0x7FFE525000ull
+#define DMA_CH_1_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_1_USER_CTI_BASE 0x7FFE526000ull
+#define DMA_CH_1_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_1_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_1_BMON_0_BASE 0x7FFE527000ull
+#define DMA_CH_1_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_0_SECTION 0x1000
+#define mmDMA_CH_1_BMON_1_BASE 0x7FFE528000ull
+#define DMA_CH_1_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_1_BMON_1_SECTION 0x19000
+#define mmDMA_CH_2_CS_STM_BASE 0x7FFE541000ull
+#define DMA_CH_2_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_STM_SECTION 0x1000
+#define mmDMA_CH_2_CS_CTI_BASE 0x7FFE542000ull
+#define DMA_CH_2_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_2_CS_ETF_BASE 0x7FFE543000ull
+#define DMA_CH_2_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_2_CS_SPMU_BASE 0x7FFE544000ull
+#define DMA_CH_2_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_2_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_2_BMON_CTI_BASE 0x7FFE545000ull
+#define DMA_CH_2_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_2_USER_CTI_BASE 0x7FFE546000ull
+#define DMA_CH_2_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_2_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_2_BMON_0_BASE 0x7FFE547000ull
+#define DMA_CH_2_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_0_SECTION 0x1000
+#define mmDMA_CH_2_BMON_1_BASE 0x7FFE548000ull
+#define DMA_CH_2_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_2_BMON_1_SECTION 0x19000
+#define mmDMA_CH_3_CS_STM_BASE 0x7FFE561000ull
+#define DMA_CH_3_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_STM_SECTION 0x1000
+#define mmDMA_CH_3_CS_CTI_BASE 0x7FFE562000ull
+#define DMA_CH_3_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_3_CS_ETF_BASE 0x7FFE563000ull
+#define DMA_CH_3_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_3_CS_SPMU_BASE 0x7FFE564000ull
+#define DMA_CH_3_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_3_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_3_BMON_CTI_BASE 0x7FFE565000ull
+#define DMA_CH_3_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_3_USER_CTI_BASE 0x7FFE566000ull
+#define DMA_CH_3_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_3_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_3_BMON_0_BASE 0x7FFE567000ull
+#define DMA_CH_3_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_0_SECTION 0x1000
+#define mmDMA_CH_3_BMON_1_BASE 0x7FFE568000ull
+#define DMA_CH_3_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_3_BMON_1_SECTION 0x19000
+#define mmDMA_CH_4_CS_STM_BASE 0x7FFE581000ull
+#define DMA_CH_4_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_STM_SECTION 0x1000
+#define mmDMA_CH_4_CS_CTI_BASE 0x7FFE582000ull
+#define DMA_CH_4_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_4_CS_ETF_BASE 0x7FFE583000ull
+#define DMA_CH_4_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_4_CS_SPMU_BASE 0x7FFE584000ull
+#define DMA_CH_4_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_4_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_4_BMON_CTI_BASE 0x7FFE585000ull
+#define DMA_CH_4_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_4_USER_CTI_BASE 0x7FFE586000ull
+#define DMA_CH_4_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_4_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_4_BMON_0_BASE 0x7FFE587000ull
+#define DMA_CH_4_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_0_SECTION 0x1000
+#define mmDMA_CH_4_BMON_1_BASE 0x7FFE588000ull
+#define DMA_CH_4_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_4_BMON_1_SECTION 0x19000
+#define mmDMA_CH_5_CS_STM_BASE 0x7FFE5A1000ull
+#define DMA_CH_5_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_5_CS_STM_SECTION 0x1000
+#define mmDMA_CH_5_CS_CTI_BASE 0x7FFE5A2000ull
+#define DMA_CH_5_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_5_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_5_CS_ETF_BASE 0x7FFE5A3000ull
+#define DMA_CH_5_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_5_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_5_CS_SPMU_BASE 0x7FFE5A4000ull
+#define DMA_CH_5_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_5_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_5_BMON_CTI_BASE 0x7FFE5A5000ull
+#define DMA_CH_5_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_5_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_5_USER_CTI_BASE 0x7FFE5A6000ull
+#define DMA_CH_5_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_5_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_5_BMON_0_BASE 0x7FFE5A7000ull
+#define DMA_CH_5_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_5_BMON_0_SECTION 0x1000
+#define mmDMA_CH_5_BMON_1_BASE 0x7FFE5A8000ull
+#define DMA_CH_5_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_5_BMON_1_SECTION 0x19000
+#define mmDMA_CH_6_CS_STM_BASE 0x7FFE5C1000ull
+#define DMA_CH_6_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_6_CS_STM_SECTION 0x1000
+#define mmDMA_CH_6_CS_CTI_BASE 0x7FFE5C2000ull
+#define DMA_CH_6_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_6_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_6_CS_ETF_BASE 0x7FFE5C3000ull
+#define DMA_CH_6_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_6_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_6_CS_SPMU_BASE 0x7FFE5C4000ull
+#define DMA_CH_6_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_6_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_6_BMON_CTI_BASE 0x7FFE5C5000ull
+#define DMA_CH_6_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_6_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_6_USER_CTI_BASE 0x7FFE5C6000ull
+#define DMA_CH_6_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_6_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_6_BMON_0_BASE 0x7FFE5C7000ull
+#define DMA_CH_6_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_6_BMON_0_SECTION 0x1000
+#define mmDMA_CH_6_BMON_1_BASE 0x7FFE5C8000ull
+#define DMA_CH_6_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_6_BMON_1_SECTION 0x19000
+#define mmDMA_CH_7_CS_STM_BASE 0x7FFE5E1000ull
+#define DMA_CH_7_CS_STM_MAX_OFFSET 0x1000
+#define DMA_CH_7_CS_STM_SECTION 0x1000
+#define mmDMA_CH_7_CS_CTI_BASE 0x7FFE5E2000ull
+#define DMA_CH_7_CS_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_7_CS_CTI_SECTION 0x1000
+#define mmDMA_CH_7_CS_ETF_BASE 0x7FFE5E3000ull
+#define DMA_CH_7_CS_ETF_MAX_OFFSET 0x1000
+#define DMA_CH_7_CS_ETF_SECTION 0x1000
+#define mmDMA_CH_7_CS_SPMU_BASE 0x7FFE5E4000ull
+#define DMA_CH_7_CS_SPMU_MAX_OFFSET 0x1000
+#define DMA_CH_7_CS_SPMU_SECTION 0x1000
+#define mmDMA_CH_7_BMON_CTI_BASE 0x7FFE5E5000ull
+#define DMA_CH_7_BMON_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_7_BMON_CTI_SECTION 0x1000
+#define mmDMA_CH_7_USER_CTI_BASE 0x7FFE5E6000ull
+#define DMA_CH_7_USER_CTI_MAX_OFFSET 0x1000
+#define DMA_CH_7_USER_CTI_SECTION 0x1000
+#define mmDMA_CH_7_BMON_0_BASE 0x7FFE5E7000ull
+#define DMA_CH_7_BMON_0_MAX_OFFSET 0x1000
+#define DMA_CH_7_BMON_0_SECTION 0x1000
+#define mmDMA_CH_7_BMON_1_BASE 0x7FFE5E8000ull
+#define DMA_CH_7_BMON_1_MAX_OFFSET 0x1000
+#define DMA_CH_7_BMON_1_SECTION 0x18000
+#define mmNIC_TPC_FUNNEL_W_S_BASE 0x7FFE600000ull
+#define NIC_TPC_FUNNEL_W_S_MAX_OFFSET 0x1000
+#define NIC_TPC_FUNNEL_W_S_SECTION 0x80000
+#define mmNIC_TPC_FUNNEL_E_S_BASE 0x7FFE680000ull
+#define NIC_TPC_FUNNEL_E_S_MAX_OFFSET 0x1000
+#define NIC_TPC_FUNNEL_E_S_SECTION 0x80000
+#define mmNIC_TPC_FUNNEL_W_N_BASE 0x7FFE700000ull
+#define NIC_TPC_FUNNEL_W_N_MAX_OFFSET 0x1000
+#define NIC_TPC_FUNNEL_W_N_SECTION 0x80000
+#define mmNIC_TPC_FUNNEL_E_N_BASE 0x7FFE780000ull
+#define NIC_TPC_FUNNEL_E_N_MAX_OFFSET 0x1000
+#define NIC_TPC_FUNNEL_E_N_SECTION 0x80000
+#define mmCA53_BASE 0x7FFE800000ull
+#define CA53_MAX_OFFSET 0x141000
+#define CA53_SECTION 0x400000
+#define mmPCI_ROM_TABLE_BASE 0x7FFEC00000ull
+#define PCI_ROM_TABLE_MAX_OFFSET 0x1000
+#define PCI_ROM_TABLE_SECTION 0x1000
+#define mmPCIE_STM_BASE 0x7FFEC01000ull
+#define PCIE_STM_MAX_OFFSET 0x1000
+#define PCIE_STM_SECTION 0x1000
+#define mmPCIE_ETF_BASE 0x7FFEC02000ull
+#define PCIE_ETF_MAX_OFFSET 0x1000
+#define PCIE_ETF_SECTION 0x1000
+#define mmPCIE_CTI_0_BASE 0x7FFEC03000ull
+#define PCIE_CTI_0_MAX_OFFSET 0x1000
+#define PCIE_CTI_0_SECTION 0x1000
+#define mmPCIE_SPMU_BASE 0x7FFEC04000ull
+#define PCIE_SPMU_MAX_OFFSET 0x1000
+#define PCIE_SPMU_SECTION 0x1000
+#define mmPCIE_CTI_1_BASE 0x7FFEC05000ull
+#define PCIE_CTI_1_MAX_OFFSET 0x1000
+#define PCIE_CTI_1_SECTION 0x1000
+#define mmPCIE_FUNNEL_BASE 0x7FFEC06000ull
+#define PCIE_FUNNEL_MAX_OFFSET 0x1000
+#define PCIE_FUNNEL_SECTION 0x1000
+#define mmPCIE_BMON_MSTR_WR_BASE 0x7FFEC07000ull
+#define PCIE_BMON_MSTR_WR_MAX_OFFSET 0x1000
+#define PCIE_BMON_MSTR_WR_SECTION 0x1000
+#define mmPCIE_BMON_MSTR_RD_BASE 0x7FFEC08000ull
+#define PCIE_BMON_MSTR_RD_MAX_OFFSET 0x1000
+#define PCIE_BMON_MSTR_RD_SECTION 0x1000
+#define mmPCIE_BMON_SLV_WR_BASE 0x7FFEC09000ull
+#define PCIE_BMON_SLV_WR_MAX_OFFSET 0x1000
+#define PCIE_BMON_SLV_WR_SECTION 0x1000
+#define mmPCIE_BMON_SLV_RD_BASE 0x7FFEC0A000ull
+#define PCIE_BMON_SLV_RD_MAX_OFFSET 0x1000
+#define PCIE_BMON_SLV_RD_SECTION 0x7000
+#define mmMMU_CS_STM_BASE 0x7FFEC11000ull
+#define MMU_CS_STM_MAX_OFFSET 0x1000
+#define MMU_CS_STM_SECTION 0x1000
+#define mmMMU_CS_CTI_BASE 0x7FFEC12000ull
+#define MMU_CS_CTI_MAX_OFFSET 0x1000
+#define MMU_CS_CTI_SECTION 0x1000
+#define mmMMU_CS_ETF_BASE 0x7FFEC13000ull
+#define MMU_CS_ETF_MAX_OFFSET 0x1000
+#define MMU_CS_ETF_SECTION 0x1000
+#define mmMMU_CS_SPMU_BASE 0x7FFEC14000ull
+#define MMU_CS_SPMU_MAX_OFFSET 0x1000
+#define MMU_CS_SPMU_SECTION 0x1000
+#define mmMMU_BMON_CTI_BASE 0x7FFEC15000ull
+#define MMU_BMON_CTI_MAX_OFFSET 0x1000
+#define MMU_BMON_CTI_SECTION 0x1000
+#define mmMMU_USER_CTI_BASE 0x7FFEC16000ull
+#define MMU_USER_CTI_MAX_OFFSET 0x1000
+#define MMU_USER_CTI_SECTION 0x1000
+#define mmMMU_BMON_0_BASE 0x7FFEC17000ull
+#define MMU_BMON_0_MAX_OFFSET 0x1000
+#define MMU_BMON_0_SECTION 0x1000
+#define mmMMU_BMON_1_BASE 0x7FFEC18000ull
+#define MMU_BMON_1_MAX_OFFSET 0x1000
+#define MMU_BMON_1_SECTION 0x28000
+#define mmPSOC_CTI_BASE 0x7FFEC40000ull
+#define PSOC_CTI_MAX_OFFSET 0x1000
+#define PSOC_CTI_SECTION 0x1000
+#define mmPSOC_STM_BASE 0x7FFEC41000ull
+#define PSOC_STM_MAX_OFFSET 0x1000
+#define PSOC_STM_SECTION 0x1000
+#define mmPSOC_FUNNEL_BASE 0x7FFEC42000ull
+#define PSOC_FUNNEL_MAX_OFFSET 0x1000
+#define PSOC_FUNNEL_SECTION 0x1000
+#define mmPSOC_ETR_BASE 0x7FFEC43000ull
+#define PSOC_ETR_MAX_OFFSET 0x1000
+#define PSOC_ETR_SECTION 0x1000
+#define mmPSOC_ETF_BASE 0x7FFEC44000ull
+#define PSOC_ETF_MAX_OFFSET 0x1000
+#define PSOC_ETF_SECTION 0x1000
+#define mmPSOC_TS_CTI_BASE 0x7FFEC45000ull
+#define PSOC_TS_CTI_MAX_OFFSET 0x1000
+#define PSOC_TS_CTI_SECTION 0xB000
+#define mmTOP_ROM_TABLE_BASE 0x7FFEC50000ull
+#define TOP_ROM_TABLE_MAX_OFFSET 0x1000
+#define TOP_ROM_TABLE_SECTION 0x70000
+#define mmNIC0_ROM_TABLE_BASE 0x7FFECC0000ull
+#define NIC0_ROM_TABLE_MAX_OFFSET 0x1000
+#define NIC0_ROM_TABLE_SECTION 0x1000
+#define mmSTM_0_NIC0_DBG_BASE 0x7FFECC1000ull
+#define STM_0_NIC0_DBG_MAX_OFFSET 0x21000
+#define STM_0_NIC0_DBG_SECTION 0x1000
+#define mmCTI_0_NIC0_DBG_BASE 0x7FFECC2000ull
+#define CTI_0_NIC0_DBG_MAX_OFFSET 0x1000
+#define CTI_0_NIC0_DBG_SECTION 0x1000
+#define mmETF_0_NIC0_DBG_BASE 0x7FFECC3000ull
+#define ETF_0_NIC0_DBG_MAX_OFFSET 0x1000
+#define ETF_0_NIC0_DBG_SECTION 0x1000
+#define mmSPMU_0_NIC0_DBG_BASE 0x7FFECC4000ull
+#define SPMU_0_NIC0_DBG_MAX_OFFSET 0x1000
+#define SPMU_0_NIC0_DBG_SECTION 0x2000
+#define mmUSER_CTI_0_NIC0_DBG_BASE 0x7FFECC6000ull
+#define USER_CTI_0_NIC0_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_0_NIC0_DBG_SECTION 0xB000
+#define mmSTM_1_NIC0_DBG_BASE 0x7FFECD1000ull
+#define STM_1_NIC0_DBG_MAX_OFFSET 0x1000
+#define STM_1_NIC0_DBG_SECTION 0x1000
+#define mmCTI_1_NIC0_DBG_BASE 0x7FFECD2000ull
+#define CTI_1_NIC0_DBG_MAX_OFFSET 0x1000
+#define CTI_1_NIC0_DBG_SECTION 0x1000
+#define mmETF_1_NIC0_DBG_BASE 0x7FFECD3000ull
+#define ETF_1_NIC0_DBG_MAX_OFFSET 0x1000
+#define ETF_1_NIC0_DBG_SECTION 0x1000
+#define mmSPMU_1_NIC0_DBG_BASE 0x7FFECD4000ull
+#define SPMU_1_NIC0_DBG_MAX_OFFSET 0x1000
+#define SPMU_1_NIC0_DBG_SECTION 0x1000
+#define mmBMON_CTI_NIC0_DBG_BASE 0x7FFECD5000ull
+#define BMON_CTI_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON_CTI_NIC0_DBG_SECTION 0x1000
+#define mmUSER_CTI_1_NIC0_DBG_BASE 0x7FFECD6000ull
+#define USER_CTI_1_NIC0_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_1_NIC0_DBG_SECTION 0x1000
+#define mmBMON0_NIC0_DBG_BASE 0x7FFECD7000ull
+#define BMON0_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON0_NIC0_DBG_SECTION 0x1000
+#define mmBMON1_NIC0_DBG_BASE 0x7FFECD8000ull
+#define BMON1_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON1_NIC0_DBG_SECTION 0x1000
+#define mmBMON2_NIC0_DBG_BASE 0x7FFECD9000ull
+#define BMON2_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON2_NIC0_DBG_SECTION 0x1000
+#define mmBMON3_NIC0_DBG_BASE 0x7FFECDA000ull
+#define BMON3_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON3_NIC0_DBG_SECTION 0x1000
+#define mmBMON4_NIC0_DBG_BASE 0x7FFECDB000ull
+#define BMON4_NIC0_DBG_MAX_OFFSET 0x1000
+#define BMON4_NIC0_DBG_SECTION 0x6000
+#define mmFUNNEL_NIC0_DBG_BASE 0x7FFECE1000ull
+#define FUNNEL_NIC0_DBG_MAX_OFFSET 0x1000
+#define FUNNEL_NIC0_DBG_SECTION 0x1F000
+#define mmNIC1_ROM_TABLE_BASE 0x7FFED00000ull
+#define NIC1_ROM_TABLE_MAX_OFFSET 0x1000
+#define NIC1_ROM_TABLE_SECTION 0x1000
+#define mmSTM_0_NIC1_DBG_BASE 0x7FFED01000ull
+#define STM_0_NIC1_DBG_MAX_OFFSET 0x21000
+#define STM_0_NIC1_DBG_SECTION 0x1000
+#define mmCTI_0_NIC1_DBG_BASE 0x7FFED02000ull
+#define CTI_0_NIC1_DBG_MAX_OFFSET 0x1000
+#define CTI_0_NIC1_DBG_SECTION 0x1000
+#define mmETF_0_NIC1_DBG_BASE 0x7FFED03000ull
+#define ETF_0_NIC1_DBG_MAX_OFFSET 0x1000
+#define ETF_0_NIC1_DBG_SECTION 0x1000
+#define mmSPMU_0_NIC1_DBG_BASE 0x7FFED04000ull
+#define SPMU_0_NIC1_DBG_MAX_OFFSET 0x1000
+#define SPMU_0_NIC1_DBG_SECTION 0x2000
+#define mmUSER_CTI_0_NIC1_DBG_BASE 0x7FFED06000ull
+#define USER_CTI_0_NIC1_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_0_NIC1_DBG_SECTION 0xB000
+#define mmSTM_1_NIC1_DBG_BASE 0x7FFED11000ull
+#define STM_1_NIC1_DBG_MAX_OFFSET 0x1000
+#define STM_1_NIC1_DBG_SECTION 0x1000
+#define mmCTI_1_NIC1_DBG_BASE 0x7FFED12000ull
+#define CTI_1_NIC1_DBG_MAX_OFFSET 0x1000
+#define CTI_1_NIC1_DBG_SECTION 0x1000
+#define mmETF_1_NIC1_DBG_BASE 0x7FFED13000ull
+#define ETF_1_NIC1_DBG_MAX_OFFSET 0x1000
+#define ETF_1_NIC1_DBG_SECTION 0x1000
+#define mmSPMU_1_NIC1_DBG_BASE 0x7FFED14000ull
+#define SPMU_1_NIC1_DBG_MAX_OFFSET 0x1000
+#define SPMU_1_NIC1_DBG_SECTION 0x1000
+#define mmBMON_CTI_NIC1_DBG_BASE 0x7FFED15000ull
+#define BMON_CTI_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON_CTI_NIC1_DBG_SECTION 0x1000
+#define mmUSER_CTI_1_NIC1_DBG_BASE 0x7FFED16000ull
+#define USER_CTI_1_NIC1_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_1_NIC1_DBG_SECTION 0x1000
+#define mmBMON0_NIC1_DBG_BASE 0x7FFED17000ull
+#define BMON0_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON0_NIC1_DBG_SECTION 0x1000
+#define mmBMON1_NIC1_DBG_BASE 0x7FFED18000ull
+#define BMON1_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON1_NIC1_DBG_SECTION 0x1000
+#define mmBMON2_NIC1_DBG_BASE 0x7FFED19000ull
+#define BMON2_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON2_NIC1_DBG_SECTION 0x1000
+#define mmBMON3_NIC1_DBG_BASE 0x7FFED1A000ull
+#define BMON3_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON3_NIC1_DBG_SECTION 0x1000
+#define mmBMON4_NIC1_DBG_BASE 0x7FFED1B000ull
+#define BMON4_NIC1_DBG_MAX_OFFSET 0x1000
+#define BMON4_NIC1_DBG_SECTION 0x6000
+#define mmFUNNEL_NIC1_DBG_BASE 0x7FFED21000ull
+#define FUNNEL_NIC1_DBG_MAX_OFFSET 0x1000
+#define FUNNEL_NIC1_DBG_SECTION 0x1F000
+#define mmNIC2_ROM_TABLE_BASE 0x7FFED40000ull
+#define NIC2_ROM_TABLE_MAX_OFFSET 0x1000
+#define NIC2_ROM_TABLE_SECTION 0x1000
+#define mmSTM_0_NIC2_DBG_BASE 0x7FFED41000ull
+#define STM_0_NIC2_DBG_MAX_OFFSET 0x21000
+#define STM_0_NIC2_DBG_SECTION 0x1000
+#define mmCTI_0_NIC2_DBG_BASE 0x7FFED42000ull
+#define CTI_0_NIC2_DBG_MAX_OFFSET 0x1000
+#define CTI_0_NIC2_DBG_SECTION 0x1000
+#define mmETF_0_NIC2_DBG_BASE 0x7FFED43000ull
+#define ETF_0_NIC2_DBG_MAX_OFFSET 0x1000
+#define ETF_0_NIC2_DBG_SECTION 0x1000
+#define mmSPMU_0_NIC2_DBG_BASE 0x7FFED44000ull
+#define SPMU_0_NIC2_DBG_MAX_OFFSET 0x1000
+#define SPMU_0_NIC2_DBG_SECTION 0x2000
+#define mmUSER_CTI_0_NIC2_DBG_BASE 0x7FFED46000ull
+#define USER_CTI_0_NIC2_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_0_NIC2_DBG_SECTION 0xB000
+#define mmSTM_1_NIC2_DBG_BASE 0x7FFED51000ull
+#define STM_1_NIC2_DBG_MAX_OFFSET 0x1000
+#define STM_1_NIC2_DBG_SECTION 0x1000
+#define mmCTI_1_NIC2_DBG_BASE 0x7FFED52000ull
+#define CTI_1_NIC2_DBG_MAX_OFFSET 0x1000
+#define CTI_1_NIC2_DBG_SECTION 0x1000
+#define mmETF_1_NIC2_DBG_BASE 0x7FFED53000ull
+#define ETF_1_NIC2_DBG_MAX_OFFSET 0x1000
+#define ETF_1_NIC2_DBG_SECTION 0x1000
+#define mmSPMU_1_NIC2_DBG_BASE 0x7FFED54000ull
+#define SPMU_1_NIC2_DBG_MAX_OFFSET 0x1000
+#define SPMU_1_NIC2_DBG_SECTION 0x1000
+#define mmBMON_CTI_NIC2_DBG_BASE 0x7FFED55000ull
+#define BMON_CTI_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON_CTI_NIC2_DBG_SECTION 0x1000
+#define mmUSER_CTI_1_NIC2_DBG_BASE 0x7FFED56000ull
+#define USER_CTI_1_NIC2_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_1_NIC2_DBG_SECTION 0x1000
+#define mmBMON0_NIC2_DBG_BASE 0x7FFED57000ull
+#define BMON0_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON0_NIC2_DBG_SECTION 0x1000
+#define mmBMON1_NIC2_DBG_BASE 0x7FFED58000ull
+#define BMON1_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON1_NIC2_DBG_SECTION 0x1000
+#define mmBMON2_NIC2_DBG_BASE 0x7FFED59000ull
+#define BMON2_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON2_NIC2_DBG_SECTION 0x1000
+#define mmBMON3_NIC2_DBG_BASE 0x7FFED5A000ull
+#define BMON3_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON3_NIC2_DBG_SECTION 0x1000
+#define mmBMON4_NIC2_DBG_BASE 0x7FFED5B000ull
+#define BMON4_NIC2_DBG_MAX_OFFSET 0x1000
+#define BMON4_NIC2_DBG_SECTION 0x6000
+#define mmFUNNEL_NIC2_DBG_BASE 0x7FFED61000ull
+#define FUNNEL_NIC2_DBG_MAX_OFFSET 0x1000
+#define FUNNEL_NIC2_DBG_SECTION 0x1F000
+#define mmNIC3_ROM_TABLE_BASE 0x7FFED80000ull
+#define NIC3_ROM_TABLE_MAX_OFFSET 0x1000
+#define NIC3_ROM_TABLE_SECTION 0x1000
+#define mmSTM_0_NIC3_DBG_BASE 0x7FFED81000ull
+#define STM_0_NIC3_DBG_MAX_OFFSET 0x21000
+#define STM_0_NIC3_DBG_SECTION 0x1000
+#define mmCTI_0_NIC3_DBG_BASE 0x7FFED82000ull
+#define CTI_0_NIC3_DBG_MAX_OFFSET 0x1000
+#define CTI_0_NIC3_DBG_SECTION 0x1000
+#define mmETF_0_NIC3_DBG_BASE 0x7FFED83000ull
+#define ETF_0_NIC3_DBG_MAX_OFFSET 0x1000
+#define ETF_0_NIC3_DBG_SECTION 0x1000
+#define mmSPMU_0_NIC3_DBG_BASE 0x7FFED84000ull
+#define SPMU_0_NIC3_DBG_MAX_OFFSET 0x1000
+#define SPMU_0_NIC3_DBG_SECTION 0x2000
+#define mmUSER_CTI_0_NIC3_DBG_BASE 0x7FFED86000ull
+#define USER_CTI_0_NIC3_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_0_NIC3_DBG_SECTION 0xB000
+#define mmSTM_1_NIC3_DBG_BASE 0x7FFED91000ull
+#define STM_1_NIC3_DBG_MAX_OFFSET 0x1000
+#define STM_1_NIC3_DBG_SECTION 0x1000
+#define mmCTI_1_NIC3_DBG_BASE 0x7FFED92000ull
+#define CTI_1_NIC3_DBG_MAX_OFFSET 0x1000
+#define CTI_1_NIC3_DBG_SECTION 0x1000
+#define mmETF_1_NIC3_DBG_BASE 0x7FFED93000ull
+#define ETF_1_NIC3_DBG_MAX_OFFSET 0x1000
+#define ETF_1_NIC3_DBG_SECTION 0x1000
+#define mmSPMU_1_NIC3_DBG_BASE 0x7FFED94000ull
+#define SPMU_1_NIC3_DBG_MAX_OFFSET 0x1000
+#define SPMU_1_NIC3_DBG_SECTION 0x1000
+#define mmBMON_CTI_NIC3_DBG_BASE 0x7FFED95000ull
+#define BMON_CTI_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON_CTI_NIC3_DBG_SECTION 0x1000
+#define mmUSER_CTI_1_NIC3_DBG_BASE 0x7FFED96000ull
+#define USER_CTI_1_NIC3_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_1_NIC3_DBG_SECTION 0x1000
+#define mmBMON0_NIC3_DBG_BASE 0x7FFED97000ull
+#define BMON0_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON0_NIC3_DBG_SECTION 0x1000
+#define mmBMON1_NIC3_DBG_BASE 0x7FFED98000ull
+#define BMON1_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON1_NIC3_DBG_SECTION 0x1000
+#define mmBMON2_NIC3_DBG_BASE 0x7FFED99000ull
+#define BMON2_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON2_NIC3_DBG_SECTION 0x1000
+#define mmBMON3_NIC3_DBG_BASE 0x7FFED9A000ull
+#define BMON3_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON3_NIC3_DBG_SECTION 0x1000
+#define mmBMON4_NIC3_DBG_BASE 0x7FFED9B000ull
+#define BMON4_NIC3_DBG_MAX_OFFSET 0x1000
+#define BMON4_NIC3_DBG_SECTION 0x6000
+#define mmFUNNEL_NIC3_DBG_BASE 0x7FFEDA1000ull
+#define FUNNEL_NIC3_DBG_MAX_OFFSET 0x1000
+#define FUNNEL_NIC3_DBG_SECTION 0x1F000
+#define mmNIC4_ROM_TABLE_BASE 0x7FFEDC0000ull
+#define NIC4_ROM_TABLE_MAX_OFFSET 0x1000
+#define NIC4_ROM_TABLE_SECTION 0x1000
+#define mmSTM_0_NIC4_DBG_BASE 0x7FFEDC1000ull
+#define STM_0_NIC4_DBG_MAX_OFFSET 0x21000
+#define STM_0_NIC4_DBG_SECTION 0x1000
+#define mmCTI_0_NIC4_DBG_BASE 0x7FFEDC2000ull
+#define CTI_0_NIC4_DBG_MAX_OFFSET 0x1000
+#define CTI_0_NIC4_DBG_SECTION 0x1000
+#define mmETF_0_NIC4_DBG_BASE 0x7FFEDC3000ull
+#define ETF_0_NIC4_DBG_MAX_OFFSET 0x1000
+#define ETF_0_NIC4_DBG_SECTION 0x1000
+#define mmSPMU_0_NIC4_DBG_BASE 0x7FFEDC4000ull
+#define SPMU_0_NIC4_DBG_MAX_OFFSET 0x1000
+#define SPMU_0_NIC4_DBG_SECTION 0x2000
+#define mmUSER_CTI_0_NIC4_DBG_BASE 0x7FFEDC6000ull
+#define USER_CTI_0_NIC4_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_0_NIC4_DBG_SECTION 0xB000
+#define mmSTM_1_NIC4_DBG_BASE 0x7FFEDD1000ull
+#define STM_1_NIC4_DBG_MAX_OFFSET 0x1000
+#define STM_1_NIC4_DBG_SECTION 0x1000
+#define mmCTI_1_NIC4_DBG_BASE 0x7FFEDD2000ull
+#define CTI_1_NIC4_DBG_MAX_OFFSET 0x1000
+#define CTI_1_NIC4_DBG_SECTION 0x1000
+#define mmETF_1_NIC4_DBG_BASE 0x7FFEDD3000ull
+#define ETF_1_NIC4_DBG_MAX_OFFSET 0x1000
+#define ETF_1_NIC4_DBG_SECTION 0x1000
+#define mmSPMU_1_NIC4_DBG_BASE 0x7FFEDD4000ull
+#define SPMU_1_NIC4_DBG_MAX_OFFSET 0x1000
+#define SPMU_1_NIC4_DBG_SECTION 0x1000
+#define mmBMON_CTI_NIC4_DBG_BASE 0x7FFEDD5000ull
+#define BMON_CTI_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON_CTI_NIC4_DBG_SECTION 0x1000
+#define mmUSER_CTI_1_NIC4_DBG_BASE 0x7FFEDD6000ull
+#define USER_CTI_1_NIC4_DBG_MAX_OFFSET 0x1000
+#define USER_CTI_1_NIC4_DBG_SECTION 0x1000
+#define mmBMON0_NIC4_DBG_BASE 0x7FFEDD7000ull
+#define BMON0_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON0_NIC4_DBG_SECTION 0x1000
+#define mmBMON1_NIC4_DBG_BASE 0x7FFEDD8000ull
+#define BMON1_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON1_NIC4_DBG_SECTION 0x1000
+#define mmBMON2_NIC4_DBG_BASE 0x7FFEDD9000ull
+#define BMON2_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON2_NIC4_DBG_SECTION 0x1000
+#define mmBMON3_NIC4_DBG_BASE 0x7FFEDDA000ull
+#define BMON3_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON3_NIC4_DBG_SECTION 0x1000
+#define mmBMON4_NIC4_DBG_BASE 0x7FFEDDB000ull
+#define BMON4_NIC4_DBG_MAX_OFFSET 0x1000
+#define BMON4_NIC4_DBG_SECTION 0x6000
+#define mmFUNNEL_NIC4_DBG_BASE 0x7FFEDE1000ull
+#define FUNNEL_NIC4_DBG_MAX_OFFSET 0x1000
+#define FUNNEL_NIC4_DBG_SECTION 0x21F000
+#define mmTPC0_ROM_TABLE_BASE 0x7FFF000000ull
+#define TPC0_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC0_ROM_TABLE_SECTION 0x1000
+#define mmTPC0_EML_SPMU_BASE 0x7FFF001000ull
+#define TPC0_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC0_EML_SPMU_SECTION 0x1000
+#define mmTPC0_EML_ETF_BASE 0x7FFF002000ull
+#define TPC0_EML_ETF_MAX_OFFSET 0x1000
+#define TPC0_EML_ETF_SECTION 0x1000
+#define mmTPC0_EML_STM_BASE 0x7FFF003000ull
+#define TPC0_EML_STM_MAX_OFFSET 0x1000
+#define TPC0_EML_STM_SECTION 0x2000
+#define mmTPC0_EML_CTI_BASE 0x7FFF005000ull
+#define TPC0_EML_CTI_MAX_OFFSET 0x1000
+#define TPC0_EML_CTI_SECTION 0x1000
+#define mmTPC0_EML_FUNNEL_BASE 0x7FFF006000ull
+#define TPC0_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC0_EML_FUNNEL_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_0_BASE 0x7FFF007000ull
+#define TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_1_BASE 0x7FFF008000ull
+#define TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_2_BASE 0x7FFF009000ull
+#define TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC0_EML_BUSMON_3_BASE 0x7FFF00A000ull
+#define TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC0_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC0_EML_CFG_BASE 0x7FFF040000ull
+#define TPC0_EML_CFG_MAX_OFFSET 0x3380
+#define TPC0_EML_CFG_SECTION 0x1000
+#define mmTPC0_EML_TPC_CFG_BASE 0x7FFF041000ull
+#define TPC0_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC0_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC0_EML_TPC_CFG_BASE 0x7FFF041400ull
+#define KERNEL_TENSOR_0_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC0_EML_TPC_CFG_BASE 0x7FFF041438ull
+#define KERNEL_TENSOR_1_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC0_EML_TPC_CFG_BASE 0x7FFF041470ull
+#define KERNEL_TENSOR_2_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC0_EML_TPC_CFG_BASE 0x7FFF0414A8ull
+#define KERNEL_TENSOR_3_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC0_EML_TPC_CFG_BASE 0x7FFF0414E0ull
+#define KERNEL_TENSOR_4_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC0_EML_TPC_CFG_BASE 0x7FFF041518ull
+#define KERNEL_TENSOR_5_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC0_EML_TPC_CFG_BASE 0x7FFF041550ull
+#define KERNEL_TENSOR_6_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC0_EML_TPC_CFG_BASE 0x7FFF041588ull
+#define KERNEL_TENSOR_7_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC0_EML_TPC_CFG_BASE 0x7FFF0415C0ull
+#define KERNEL_TENSOR_8_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC0_EML_TPC_CFG_BASE 0x7FFF0415F8ull
+#define KERNEL_TENSOR_9_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC0_EML_TPC_CFG_BASE 0x7FFF041630ull
+#define KERNEL_TENSOR_10_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC0_EML_TPC_CFG_BASE 0x7FFF041668ull
+#define KERNEL_TENSOR_11_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC0_EML_TPC_CFG_BASE 0x7FFF0416A0ull
+#define KERNEL_TENSOR_12_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC0_EML_TPC_CFG_BASE 0x7FFF0416D8ull
+#define KERNEL_TENSOR_13_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC0_EML_TPC_CFG_BASE 0x7FFF041710ull
+#define KERNEL_TENSOR_14_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC0_EML_TPC_CFG_BASE 0x7FFF041748ull
+#define KERNEL_TENSOR_15_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC0_EML_TPC_CFG_BASE 0x7FFF041780ull
+#define KERNEL_SYNC_OBJECT_TPC0_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC0_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC0_EML_TPC_CFG_BASE 0x7FFF041788ull
+#define KERNEL_TPC0_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC0_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC0_EML_TPC_CFG_BASE 0x7FFF041A00ull
+#define QM_TENSOR_0_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC0_EML_TPC_CFG_BASE 0x7FFF041A38ull
+#define QM_TENSOR_1_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC0_EML_TPC_CFG_BASE 0x7FFF041A70ull
+#define QM_TENSOR_2_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC0_EML_TPC_CFG_BASE 0x7FFF041AA8ull
+#define QM_TENSOR_3_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC0_EML_TPC_CFG_BASE 0x7FFF041AE0ull
+#define QM_TENSOR_4_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC0_EML_TPC_CFG_BASE 0x7FFF041B18ull
+#define QM_TENSOR_5_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC0_EML_TPC_CFG_BASE 0x7FFF041B50ull
+#define QM_TENSOR_6_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC0_EML_TPC_CFG_BASE 0x7FFF041B88ull
+#define QM_TENSOR_7_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC0_EML_TPC_CFG_BASE 0x7FFF041BC0ull
+#define QM_TENSOR_8_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC0_EML_TPC_CFG_BASE 0x7FFF041BF8ull
+#define QM_TENSOR_9_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC0_EML_TPC_CFG_BASE 0x7FFF041C30ull
+#define QM_TENSOR_10_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC0_EML_TPC_CFG_BASE 0x7FFF041C68ull
+#define QM_TENSOR_11_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC0_EML_TPC_CFG_BASE 0x7FFF041CA0ull
+#define QM_TENSOR_12_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC0_EML_TPC_CFG_BASE 0x7FFF041CD8ull
+#define QM_TENSOR_13_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC0_EML_TPC_CFG_BASE 0x7FFF041D10ull
+#define QM_TENSOR_14_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC0_EML_TPC_CFG_BASE 0x7FFF041D48ull
+#define QM_TENSOR_15_TPC0_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC0_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC0_EML_TPC_CFG_BASE 0x7FFF041D80ull
+#define QM_SYNC_OBJECT_TPC0_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC0_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC0_EML_TPC_CFG_BASE 0x7FFF041D88ull
+#define QM_TPC0_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC0_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC0_EML_TPC_QM_BASE 0x7FFF042000ull
+#define TPC0_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC0_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC0_EML_CS_BASE 0x7FFF1FF000ull
+#define TPC0_EML_CS_MAX_OFFSET 0x1000
+#define TPC0_EML_CS_SECTION 0x1000
+#define mmTPC1_ROM_TABLE_BASE 0x7FFF200000ull
+#define TPC1_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC1_ROM_TABLE_SECTION 0x1000
+#define mmTPC1_EML_SPMU_BASE 0x7FFF201000ull
+#define TPC1_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC1_EML_SPMU_SECTION 0x1000
+#define mmTPC1_EML_ETF_BASE 0x7FFF202000ull
+#define TPC1_EML_ETF_MAX_OFFSET 0x1000
+#define TPC1_EML_ETF_SECTION 0x1000
+#define mmTPC1_EML_STM_BASE 0x7FFF203000ull
+#define TPC1_EML_STM_MAX_OFFSET 0x1000
+#define TPC1_EML_STM_SECTION 0x2000
+#define mmTPC1_EML_CTI_BASE 0x7FFF205000ull
+#define TPC1_EML_CTI_MAX_OFFSET 0x1000
+#define TPC1_EML_CTI_SECTION 0x1000
+#define mmTPC1_EML_FUNNEL_BASE 0x7FFF206000ull
+#define TPC1_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC1_EML_FUNNEL_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_0_BASE 0x7FFF207000ull
+#define TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_1_BASE 0x7FFF208000ull
+#define TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_2_BASE 0x7FFF209000ull
+#define TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC1_EML_BUSMON_3_BASE 0x7FFF20A000ull
+#define TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC1_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC1_EML_CFG_BASE 0x7FFF240000ull
+#define TPC1_EML_CFG_MAX_OFFSET 0x3380
+#define TPC1_EML_CFG_SECTION 0x1000
+#define mmTPC1_EML_TPC_CFG_BASE 0x7FFF241000ull
+#define TPC1_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC1_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC1_EML_TPC_CFG_BASE 0x7FFF241400ull
+#define KERNEL_TENSOR_0_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC1_EML_TPC_CFG_BASE 0x7FFF241438ull
+#define KERNEL_TENSOR_1_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC1_EML_TPC_CFG_BASE 0x7FFF241470ull
+#define KERNEL_TENSOR_2_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC1_EML_TPC_CFG_BASE 0x7FFF2414A8ull
+#define KERNEL_TENSOR_3_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC1_EML_TPC_CFG_BASE 0x7FFF2414E0ull
+#define KERNEL_TENSOR_4_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC1_EML_TPC_CFG_BASE 0x7FFF241518ull
+#define KERNEL_TENSOR_5_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC1_EML_TPC_CFG_BASE 0x7FFF241550ull
+#define KERNEL_TENSOR_6_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC1_EML_TPC_CFG_BASE 0x7FFF241588ull
+#define KERNEL_TENSOR_7_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC1_EML_TPC_CFG_BASE 0x7FFF2415C0ull
+#define KERNEL_TENSOR_8_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC1_EML_TPC_CFG_BASE 0x7FFF2415F8ull
+#define KERNEL_TENSOR_9_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC1_EML_TPC_CFG_BASE 0x7FFF241630ull
+#define KERNEL_TENSOR_10_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC1_EML_TPC_CFG_BASE 0x7FFF241668ull
+#define KERNEL_TENSOR_11_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC1_EML_TPC_CFG_BASE 0x7FFF2416A0ull
+#define KERNEL_TENSOR_12_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC1_EML_TPC_CFG_BASE 0x7FFF2416D8ull
+#define KERNEL_TENSOR_13_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC1_EML_TPC_CFG_BASE 0x7FFF241710ull
+#define KERNEL_TENSOR_14_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC1_EML_TPC_CFG_BASE 0x7FFF241748ull
+#define KERNEL_TENSOR_15_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC1_EML_TPC_CFG_BASE 0x7FFF241780ull
+#define KERNEL_SYNC_OBJECT_TPC1_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC1_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC1_EML_TPC_CFG_BASE 0x7FFF241788ull
+#define KERNEL_TPC1_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC1_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC1_EML_TPC_CFG_BASE 0x7FFF241A00ull
+#define QM_TENSOR_0_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC1_EML_TPC_CFG_BASE 0x7FFF241A38ull
+#define QM_TENSOR_1_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC1_EML_TPC_CFG_BASE 0x7FFF241A70ull
+#define QM_TENSOR_2_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC1_EML_TPC_CFG_BASE 0x7FFF241AA8ull
+#define QM_TENSOR_3_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC1_EML_TPC_CFG_BASE 0x7FFF241AE0ull
+#define QM_TENSOR_4_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC1_EML_TPC_CFG_BASE 0x7FFF241B18ull
+#define QM_TENSOR_5_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC1_EML_TPC_CFG_BASE 0x7FFF241B50ull
+#define QM_TENSOR_6_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC1_EML_TPC_CFG_BASE 0x7FFF241B88ull
+#define QM_TENSOR_7_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC1_EML_TPC_CFG_BASE 0x7FFF241BC0ull
+#define QM_TENSOR_8_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC1_EML_TPC_CFG_BASE 0x7FFF241BF8ull
+#define QM_TENSOR_9_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC1_EML_TPC_CFG_BASE 0x7FFF241C30ull
+#define QM_TENSOR_10_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC1_EML_TPC_CFG_BASE 0x7FFF241C68ull
+#define QM_TENSOR_11_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC1_EML_TPC_CFG_BASE 0x7FFF241CA0ull
+#define QM_TENSOR_12_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC1_EML_TPC_CFG_BASE 0x7FFF241CD8ull
+#define QM_TENSOR_13_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC1_EML_TPC_CFG_BASE 0x7FFF241D10ull
+#define QM_TENSOR_14_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC1_EML_TPC_CFG_BASE 0x7FFF241D48ull
+#define QM_TENSOR_15_TPC1_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC1_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC1_EML_TPC_CFG_BASE 0x7FFF241D80ull
+#define QM_SYNC_OBJECT_TPC1_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC1_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC1_EML_TPC_CFG_BASE 0x7FFF241D88ull
+#define QM_TPC1_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC1_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC1_EML_TPC_QM_BASE 0x7FFF242000ull
+#define TPC1_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC1_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC1_EML_CS_BASE 0x7FFF3FF000ull
+#define TPC1_EML_CS_MAX_OFFSET 0x1000
+#define TPC1_EML_CS_SECTION 0x1000
+#define mmTPC2_ROM_TABLE_BASE 0x7FFF400000ull
+#define TPC2_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC2_ROM_TABLE_SECTION 0x1000
+#define mmTPC2_EML_SPMU_BASE 0x7FFF401000ull
+#define TPC2_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC2_EML_SPMU_SECTION 0x1000
+#define mmTPC2_EML_ETF_BASE 0x7FFF402000ull
+#define TPC2_EML_ETF_MAX_OFFSET 0x1000
+#define TPC2_EML_ETF_SECTION 0x1000
+#define mmTPC2_EML_STM_BASE 0x7FFF403000ull
+#define TPC2_EML_STM_MAX_OFFSET 0x1000
+#define TPC2_EML_STM_SECTION 0x2000
+#define mmTPC2_EML_CTI_BASE 0x7FFF405000ull
+#define TPC2_EML_CTI_MAX_OFFSET 0x1000
+#define TPC2_EML_CTI_SECTION 0x1000
+#define mmTPC2_EML_FUNNEL_BASE 0x7FFF406000ull
+#define TPC2_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC2_EML_FUNNEL_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_0_BASE 0x7FFF407000ull
+#define TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_1_BASE 0x7FFF408000ull
+#define TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_2_BASE 0x7FFF409000ull
+#define TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC2_EML_BUSMON_3_BASE 0x7FFF40A000ull
+#define TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC2_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC2_EML_CFG_BASE 0x7FFF440000ull
+#define TPC2_EML_CFG_MAX_OFFSET 0x3380
+#define TPC2_EML_CFG_SECTION 0x1000
+#define mmTPC2_EML_TPC_CFG_BASE 0x7FFF441000ull
+#define TPC2_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC2_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC2_EML_TPC_CFG_BASE 0x7FFF441400ull
+#define KERNEL_TENSOR_0_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC2_EML_TPC_CFG_BASE 0x7FFF441438ull
+#define KERNEL_TENSOR_1_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC2_EML_TPC_CFG_BASE 0x7FFF441470ull
+#define KERNEL_TENSOR_2_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC2_EML_TPC_CFG_BASE 0x7FFF4414A8ull
+#define KERNEL_TENSOR_3_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC2_EML_TPC_CFG_BASE 0x7FFF4414E0ull
+#define KERNEL_TENSOR_4_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC2_EML_TPC_CFG_BASE 0x7FFF441518ull
+#define KERNEL_TENSOR_5_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC2_EML_TPC_CFG_BASE 0x7FFF441550ull
+#define KERNEL_TENSOR_6_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC2_EML_TPC_CFG_BASE 0x7FFF441588ull
+#define KERNEL_TENSOR_7_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC2_EML_TPC_CFG_BASE 0x7FFF4415C0ull
+#define KERNEL_TENSOR_8_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC2_EML_TPC_CFG_BASE 0x7FFF4415F8ull
+#define KERNEL_TENSOR_9_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC2_EML_TPC_CFG_BASE 0x7FFF441630ull
+#define KERNEL_TENSOR_10_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC2_EML_TPC_CFG_BASE 0x7FFF441668ull
+#define KERNEL_TENSOR_11_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC2_EML_TPC_CFG_BASE 0x7FFF4416A0ull
+#define KERNEL_TENSOR_12_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC2_EML_TPC_CFG_BASE 0x7FFF4416D8ull
+#define KERNEL_TENSOR_13_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC2_EML_TPC_CFG_BASE 0x7FFF441710ull
+#define KERNEL_TENSOR_14_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC2_EML_TPC_CFG_BASE 0x7FFF441748ull
+#define KERNEL_TENSOR_15_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC2_EML_TPC_CFG_BASE 0x7FFF441780ull
+#define KERNEL_SYNC_OBJECT_TPC2_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC2_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC2_EML_TPC_CFG_BASE 0x7FFF441788ull
+#define KERNEL_TPC2_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC2_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC2_EML_TPC_CFG_BASE 0x7FFF441A00ull
+#define QM_TENSOR_0_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC2_EML_TPC_CFG_BASE 0x7FFF441A38ull
+#define QM_TENSOR_1_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC2_EML_TPC_CFG_BASE 0x7FFF441A70ull
+#define QM_TENSOR_2_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC2_EML_TPC_CFG_BASE 0x7FFF441AA8ull
+#define QM_TENSOR_3_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC2_EML_TPC_CFG_BASE 0x7FFF441AE0ull
+#define QM_TENSOR_4_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC2_EML_TPC_CFG_BASE 0x7FFF441B18ull
+#define QM_TENSOR_5_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC2_EML_TPC_CFG_BASE 0x7FFF441B50ull
+#define QM_TENSOR_6_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC2_EML_TPC_CFG_BASE 0x7FFF441B88ull
+#define QM_TENSOR_7_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC2_EML_TPC_CFG_BASE 0x7FFF441BC0ull
+#define QM_TENSOR_8_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC2_EML_TPC_CFG_BASE 0x7FFF441BF8ull
+#define QM_TENSOR_9_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC2_EML_TPC_CFG_BASE 0x7FFF441C30ull
+#define QM_TENSOR_10_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC2_EML_TPC_CFG_BASE 0x7FFF441C68ull
+#define QM_TENSOR_11_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC2_EML_TPC_CFG_BASE 0x7FFF441CA0ull
+#define QM_TENSOR_12_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC2_EML_TPC_CFG_BASE 0x7FFF441CD8ull
+#define QM_TENSOR_13_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC2_EML_TPC_CFG_BASE 0x7FFF441D10ull
+#define QM_TENSOR_14_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC2_EML_TPC_CFG_BASE 0x7FFF441D48ull
+#define QM_TENSOR_15_TPC2_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC2_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC2_EML_TPC_CFG_BASE 0x7FFF441D80ull
+#define QM_SYNC_OBJECT_TPC2_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC2_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC2_EML_TPC_CFG_BASE 0x7FFF441D88ull
+#define QM_TPC2_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC2_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC2_EML_TPC_QM_BASE 0x7FFF442000ull
+#define TPC2_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC2_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC2_EML_CS_BASE 0x7FFF5FF000ull
+#define TPC2_EML_CS_MAX_OFFSET 0x1000
+#define TPC2_EML_CS_SECTION 0x1000
+#define mmTPC3_ROM_TABLE_BASE 0x7FFF600000ull
+#define TPC3_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC3_ROM_TABLE_SECTION 0x1000
+#define mmTPC3_EML_SPMU_BASE 0x7FFF601000ull
+#define TPC3_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC3_EML_SPMU_SECTION 0x1000
+#define mmTPC3_EML_ETF_BASE 0x7FFF602000ull
+#define TPC3_EML_ETF_MAX_OFFSET 0x1000
+#define TPC3_EML_ETF_SECTION 0x1000
+#define mmTPC3_EML_STM_BASE 0x7FFF603000ull
+#define TPC3_EML_STM_MAX_OFFSET 0x1000
+#define TPC3_EML_STM_SECTION 0x2000
+#define mmTPC3_EML_CTI_BASE 0x7FFF605000ull
+#define TPC3_EML_CTI_MAX_OFFSET 0x1000
+#define TPC3_EML_CTI_SECTION 0x1000
+#define mmTPC3_EML_FUNNEL_BASE 0x7FFF606000ull
+#define TPC3_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC3_EML_FUNNEL_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_0_BASE 0x7FFF607000ull
+#define TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_1_BASE 0x7FFF608000ull
+#define TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_2_BASE 0x7FFF609000ull
+#define TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC3_EML_BUSMON_3_BASE 0x7FFF60A000ull
+#define TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC3_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC3_EML_CFG_BASE 0x7FFF640000ull
+#define TPC3_EML_CFG_MAX_OFFSET 0x3380
+#define TPC3_EML_CFG_SECTION 0x1000
+#define mmTPC3_EML_TPC_CFG_BASE 0x7FFF641000ull
+#define TPC3_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC3_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC3_EML_TPC_CFG_BASE 0x7FFF641400ull
+#define KERNEL_TENSOR_0_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC3_EML_TPC_CFG_BASE 0x7FFF641438ull
+#define KERNEL_TENSOR_1_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC3_EML_TPC_CFG_BASE 0x7FFF641470ull
+#define KERNEL_TENSOR_2_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC3_EML_TPC_CFG_BASE 0x7FFF6414A8ull
+#define KERNEL_TENSOR_3_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC3_EML_TPC_CFG_BASE 0x7FFF6414E0ull
+#define KERNEL_TENSOR_4_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC3_EML_TPC_CFG_BASE 0x7FFF641518ull
+#define KERNEL_TENSOR_5_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC3_EML_TPC_CFG_BASE 0x7FFF641550ull
+#define KERNEL_TENSOR_6_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC3_EML_TPC_CFG_BASE 0x7FFF641588ull
+#define KERNEL_TENSOR_7_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC3_EML_TPC_CFG_BASE 0x7FFF6415C0ull
+#define KERNEL_TENSOR_8_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC3_EML_TPC_CFG_BASE 0x7FFF6415F8ull
+#define KERNEL_TENSOR_9_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC3_EML_TPC_CFG_BASE 0x7FFF641630ull
+#define KERNEL_TENSOR_10_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC3_EML_TPC_CFG_BASE 0x7FFF641668ull
+#define KERNEL_TENSOR_11_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC3_EML_TPC_CFG_BASE 0x7FFF6416A0ull
+#define KERNEL_TENSOR_12_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC3_EML_TPC_CFG_BASE 0x7FFF6416D8ull
+#define KERNEL_TENSOR_13_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC3_EML_TPC_CFG_BASE 0x7FFF641710ull
+#define KERNEL_TENSOR_14_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC3_EML_TPC_CFG_BASE 0x7FFF641748ull
+#define KERNEL_TENSOR_15_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC3_EML_TPC_CFG_BASE 0x7FFF641780ull
+#define KERNEL_SYNC_OBJECT_TPC3_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC3_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC3_EML_TPC_CFG_BASE 0x7FFF641788ull
+#define KERNEL_TPC3_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC3_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC3_EML_TPC_CFG_BASE 0x7FFF641A00ull
+#define QM_TENSOR_0_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC3_EML_TPC_CFG_BASE 0x7FFF641A38ull
+#define QM_TENSOR_1_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC3_EML_TPC_CFG_BASE 0x7FFF641A70ull
+#define QM_TENSOR_2_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC3_EML_TPC_CFG_BASE 0x7FFF641AA8ull
+#define QM_TENSOR_3_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC3_EML_TPC_CFG_BASE 0x7FFF641AE0ull
+#define QM_TENSOR_4_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC3_EML_TPC_CFG_BASE 0x7FFF641B18ull
+#define QM_TENSOR_5_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC3_EML_TPC_CFG_BASE 0x7FFF641B50ull
+#define QM_TENSOR_6_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC3_EML_TPC_CFG_BASE 0x7FFF641B88ull
+#define QM_TENSOR_7_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC3_EML_TPC_CFG_BASE 0x7FFF641BC0ull
+#define QM_TENSOR_8_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC3_EML_TPC_CFG_BASE 0x7FFF641BF8ull
+#define QM_TENSOR_9_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC3_EML_TPC_CFG_BASE 0x7FFF641C30ull
+#define QM_TENSOR_10_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC3_EML_TPC_CFG_BASE 0x7FFF641C68ull
+#define QM_TENSOR_11_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC3_EML_TPC_CFG_BASE 0x7FFF641CA0ull
+#define QM_TENSOR_12_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC3_EML_TPC_CFG_BASE 0x7FFF641CD8ull
+#define QM_TENSOR_13_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC3_EML_TPC_CFG_BASE 0x7FFF641D10ull
+#define QM_TENSOR_14_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC3_EML_TPC_CFG_BASE 0x7FFF641D48ull
+#define QM_TENSOR_15_TPC3_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC3_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC3_EML_TPC_CFG_BASE 0x7FFF641D80ull
+#define QM_SYNC_OBJECT_TPC3_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC3_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC3_EML_TPC_CFG_BASE 0x7FFF641D88ull
+#define QM_TPC3_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC3_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC3_EML_TPC_QM_BASE 0x7FFF642000ull
+#define TPC3_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC3_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC3_EML_CS_BASE 0x7FFF7FF000ull
+#define TPC3_EML_CS_MAX_OFFSET 0x1000
+#define TPC3_EML_CS_SECTION 0x1000
+#define mmTPC4_ROM_TABLE_BASE 0x7FFF800000ull
+#define TPC4_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC4_ROM_TABLE_SECTION 0x1000
+#define mmTPC4_EML_SPMU_BASE 0x7FFF801000ull
+#define TPC4_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC4_EML_SPMU_SECTION 0x1000
+#define mmTPC4_EML_ETF_BASE 0x7FFF802000ull
+#define TPC4_EML_ETF_MAX_OFFSET 0x1000
+#define TPC4_EML_ETF_SECTION 0x1000
+#define mmTPC4_EML_STM_BASE 0x7FFF803000ull
+#define TPC4_EML_STM_MAX_OFFSET 0x1000
+#define TPC4_EML_STM_SECTION 0x2000
+#define mmTPC4_EML_CTI_BASE 0x7FFF805000ull
+#define TPC4_EML_CTI_MAX_OFFSET 0x1000
+#define TPC4_EML_CTI_SECTION 0x1000
+#define mmTPC4_EML_FUNNEL_BASE 0x7FFF806000ull
+#define TPC4_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC4_EML_FUNNEL_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_0_BASE 0x7FFF807000ull
+#define TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_1_BASE 0x7FFF808000ull
+#define TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_2_BASE 0x7FFF809000ull
+#define TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC4_EML_BUSMON_3_BASE 0x7FFF80A000ull
+#define TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC4_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC4_EML_CFG_BASE 0x7FFF840000ull
+#define TPC4_EML_CFG_MAX_OFFSET 0x3380
+#define TPC4_EML_CFG_SECTION 0x1000
+#define mmTPC4_EML_TPC_CFG_BASE 0x7FFF841000ull
+#define TPC4_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC4_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC4_EML_TPC_CFG_BASE 0x7FFF841400ull
+#define KERNEL_TENSOR_0_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC4_EML_TPC_CFG_BASE 0x7FFF841438ull
+#define KERNEL_TENSOR_1_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC4_EML_TPC_CFG_BASE 0x7FFF841470ull
+#define KERNEL_TENSOR_2_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC4_EML_TPC_CFG_BASE 0x7FFF8414A8ull
+#define KERNEL_TENSOR_3_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC4_EML_TPC_CFG_BASE 0x7FFF8414E0ull
+#define KERNEL_TENSOR_4_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC4_EML_TPC_CFG_BASE 0x7FFF841518ull
+#define KERNEL_TENSOR_5_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC4_EML_TPC_CFG_BASE 0x7FFF841550ull
+#define KERNEL_TENSOR_6_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC4_EML_TPC_CFG_BASE 0x7FFF841588ull
+#define KERNEL_TENSOR_7_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC4_EML_TPC_CFG_BASE 0x7FFF8415C0ull
+#define KERNEL_TENSOR_8_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC4_EML_TPC_CFG_BASE 0x7FFF8415F8ull
+#define KERNEL_TENSOR_9_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC4_EML_TPC_CFG_BASE 0x7FFF841630ull
+#define KERNEL_TENSOR_10_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC4_EML_TPC_CFG_BASE 0x7FFF841668ull
+#define KERNEL_TENSOR_11_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC4_EML_TPC_CFG_BASE 0x7FFF8416A0ull
+#define KERNEL_TENSOR_12_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC4_EML_TPC_CFG_BASE 0x7FFF8416D8ull
+#define KERNEL_TENSOR_13_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC4_EML_TPC_CFG_BASE 0x7FFF841710ull
+#define KERNEL_TENSOR_14_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC4_EML_TPC_CFG_BASE 0x7FFF841748ull
+#define KERNEL_TENSOR_15_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC4_EML_TPC_CFG_BASE 0x7FFF841780ull
+#define KERNEL_SYNC_OBJECT_TPC4_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC4_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC4_EML_TPC_CFG_BASE 0x7FFF841788ull
+#define KERNEL_TPC4_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC4_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC4_EML_TPC_CFG_BASE 0x7FFF841A00ull
+#define QM_TENSOR_0_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC4_EML_TPC_CFG_BASE 0x7FFF841A38ull
+#define QM_TENSOR_1_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC4_EML_TPC_CFG_BASE 0x7FFF841A70ull
+#define QM_TENSOR_2_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC4_EML_TPC_CFG_BASE 0x7FFF841AA8ull
+#define QM_TENSOR_3_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC4_EML_TPC_CFG_BASE 0x7FFF841AE0ull
+#define QM_TENSOR_4_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC4_EML_TPC_CFG_BASE 0x7FFF841B18ull
+#define QM_TENSOR_5_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC4_EML_TPC_CFG_BASE 0x7FFF841B50ull
+#define QM_TENSOR_6_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC4_EML_TPC_CFG_BASE 0x7FFF841B88ull
+#define QM_TENSOR_7_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC4_EML_TPC_CFG_BASE 0x7FFF841BC0ull
+#define QM_TENSOR_8_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC4_EML_TPC_CFG_BASE 0x7FFF841BF8ull
+#define QM_TENSOR_9_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC4_EML_TPC_CFG_BASE 0x7FFF841C30ull
+#define QM_TENSOR_10_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC4_EML_TPC_CFG_BASE 0x7FFF841C68ull
+#define QM_TENSOR_11_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC4_EML_TPC_CFG_BASE 0x7FFF841CA0ull
+#define QM_TENSOR_12_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC4_EML_TPC_CFG_BASE 0x7FFF841CD8ull
+#define QM_TENSOR_13_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC4_EML_TPC_CFG_BASE 0x7FFF841D10ull
+#define QM_TENSOR_14_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC4_EML_TPC_CFG_BASE 0x7FFF841D48ull
+#define QM_TENSOR_15_TPC4_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC4_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC4_EML_TPC_CFG_BASE 0x7FFF841D80ull
+#define QM_SYNC_OBJECT_TPC4_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC4_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC4_EML_TPC_CFG_BASE 0x7FFF841D88ull
+#define QM_TPC4_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC4_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC4_EML_TPC_QM_BASE 0x7FFF842000ull
+#define TPC4_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC4_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC4_EML_CS_BASE 0x7FFF9FF000ull
+#define TPC4_EML_CS_MAX_OFFSET 0x1000
+#define TPC4_EML_CS_SECTION 0x1000
+#define mmTPC5_ROM_TABLE_BASE 0x7FFFA00000ull
+#define TPC5_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC5_ROM_TABLE_SECTION 0x1000
+#define mmTPC5_EML_SPMU_BASE 0x7FFFA01000ull
+#define TPC5_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC5_EML_SPMU_SECTION 0x1000
+#define mmTPC5_EML_ETF_BASE 0x7FFFA02000ull
+#define TPC5_EML_ETF_MAX_OFFSET 0x1000
+#define TPC5_EML_ETF_SECTION 0x1000
+#define mmTPC5_EML_STM_BASE 0x7FFFA03000ull
+#define TPC5_EML_STM_MAX_OFFSET 0x1000
+#define TPC5_EML_STM_SECTION 0x2000
+#define mmTPC5_EML_CTI_BASE 0x7FFFA05000ull
+#define TPC5_EML_CTI_MAX_OFFSET 0x1000
+#define TPC5_EML_CTI_SECTION 0x1000
+#define mmTPC5_EML_FUNNEL_BASE 0x7FFFA06000ull
+#define TPC5_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC5_EML_FUNNEL_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_0_BASE 0x7FFFA07000ull
+#define TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_1_BASE 0x7FFFA08000ull
+#define TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_2_BASE 0x7FFFA09000ull
+#define TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC5_EML_BUSMON_3_BASE 0x7FFFA0A000ull
+#define TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC5_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC5_EML_CFG_BASE 0x7FFFA40000ull
+#define TPC5_EML_CFG_MAX_OFFSET 0x3380
+#define TPC5_EML_CFG_SECTION 0x1000
+#define mmTPC5_EML_TPC_CFG_BASE 0x7FFFA41000ull
+#define TPC5_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC5_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC5_EML_TPC_CFG_BASE 0x7FFFA41400ull
+#define KERNEL_TENSOR_0_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC5_EML_TPC_CFG_BASE 0x7FFFA41438ull
+#define KERNEL_TENSOR_1_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC5_EML_TPC_CFG_BASE 0x7FFFA41470ull
+#define KERNEL_TENSOR_2_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC5_EML_TPC_CFG_BASE 0x7FFFA414A8ull
+#define KERNEL_TENSOR_3_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC5_EML_TPC_CFG_BASE 0x7FFFA414E0ull
+#define KERNEL_TENSOR_4_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC5_EML_TPC_CFG_BASE 0x7FFFA41518ull
+#define KERNEL_TENSOR_5_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC5_EML_TPC_CFG_BASE 0x7FFFA41550ull
+#define KERNEL_TENSOR_6_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC5_EML_TPC_CFG_BASE 0x7FFFA41588ull
+#define KERNEL_TENSOR_7_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC5_EML_TPC_CFG_BASE 0x7FFFA415C0ull
+#define KERNEL_TENSOR_8_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC5_EML_TPC_CFG_BASE 0x7FFFA415F8ull
+#define KERNEL_TENSOR_9_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC5_EML_TPC_CFG_BASE 0x7FFFA41630ull
+#define KERNEL_TENSOR_10_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC5_EML_TPC_CFG_BASE 0x7FFFA41668ull
+#define KERNEL_TENSOR_11_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC5_EML_TPC_CFG_BASE 0x7FFFA416A0ull
+#define KERNEL_TENSOR_12_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC5_EML_TPC_CFG_BASE 0x7FFFA416D8ull
+#define KERNEL_TENSOR_13_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC5_EML_TPC_CFG_BASE 0x7FFFA41710ull
+#define KERNEL_TENSOR_14_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC5_EML_TPC_CFG_BASE 0x7FFFA41748ull
+#define KERNEL_TENSOR_15_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC5_EML_TPC_CFG_BASE 0x7FFFA41780ull
+#define KERNEL_SYNC_OBJECT_TPC5_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC5_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC5_EML_TPC_CFG_BASE 0x7FFFA41788ull
+#define KERNEL_TPC5_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC5_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC5_EML_TPC_CFG_BASE 0x7FFFA41A00ull
+#define QM_TENSOR_0_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC5_EML_TPC_CFG_BASE 0x7FFFA41A38ull
+#define QM_TENSOR_1_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC5_EML_TPC_CFG_BASE 0x7FFFA41A70ull
+#define QM_TENSOR_2_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC5_EML_TPC_CFG_BASE 0x7FFFA41AA8ull
+#define QM_TENSOR_3_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC5_EML_TPC_CFG_BASE 0x7FFFA41AE0ull
+#define QM_TENSOR_4_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC5_EML_TPC_CFG_BASE 0x7FFFA41B18ull
+#define QM_TENSOR_5_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC5_EML_TPC_CFG_BASE 0x7FFFA41B50ull
+#define QM_TENSOR_6_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC5_EML_TPC_CFG_BASE 0x7FFFA41B88ull
+#define QM_TENSOR_7_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC5_EML_TPC_CFG_BASE 0x7FFFA41BC0ull
+#define QM_TENSOR_8_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC5_EML_TPC_CFG_BASE 0x7FFFA41BF8ull
+#define QM_TENSOR_9_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC5_EML_TPC_CFG_BASE 0x7FFFA41C30ull
+#define QM_TENSOR_10_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC5_EML_TPC_CFG_BASE 0x7FFFA41C68ull
+#define QM_TENSOR_11_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC5_EML_TPC_CFG_BASE 0x7FFFA41CA0ull
+#define QM_TENSOR_12_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC5_EML_TPC_CFG_BASE 0x7FFFA41CD8ull
+#define QM_TENSOR_13_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D10ull
+#define QM_TENSOR_14_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D48ull
+#define QM_TENSOR_15_TPC5_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC5_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D80ull
+#define QM_SYNC_OBJECT_TPC5_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC5_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC5_EML_TPC_CFG_BASE 0x7FFFA41D88ull
+#define QM_TPC5_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC5_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC5_EML_TPC_QM_BASE 0x7FFFA42000ull
+#define TPC5_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC5_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC5_EML_CS_BASE 0x7FFFBFF000ull
+#define TPC5_EML_CS_MAX_OFFSET 0x1000
+#define TPC5_EML_CS_SECTION 0x1000
+#define mmTPC6_ROM_TABLE_BASE 0x7FFFC00000ull
+#define TPC6_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC6_ROM_TABLE_SECTION 0x1000
+#define mmTPC6_EML_SPMU_BASE 0x7FFFC01000ull
+#define TPC6_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC6_EML_SPMU_SECTION 0x1000
+#define mmTPC6_EML_ETF_BASE 0x7FFFC02000ull
+#define TPC6_EML_ETF_MAX_OFFSET 0x1000
+#define TPC6_EML_ETF_SECTION 0x1000
+#define mmTPC6_EML_STM_BASE 0x7FFFC03000ull
+#define TPC6_EML_STM_MAX_OFFSET 0x1000
+#define TPC6_EML_STM_SECTION 0x2000
+#define mmTPC6_EML_CTI_BASE 0x7FFFC05000ull
+#define TPC6_EML_CTI_MAX_OFFSET 0x1000
+#define TPC6_EML_CTI_SECTION 0x1000
+#define mmTPC6_EML_FUNNEL_BASE 0x7FFFC06000ull
+#define TPC6_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC6_EML_FUNNEL_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_0_BASE 0x7FFFC07000ull
+#define TPC6_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_1_BASE 0x7FFFC08000ull
+#define TPC6_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_2_BASE 0x7FFFC09000ull
+#define TPC6_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC6_EML_BUSMON_3_BASE 0x7FFFC0A000ull
+#define TPC6_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC6_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC6_EML_CFG_BASE 0x7FFFC40000ull
+#define TPC6_EML_CFG_MAX_OFFSET 0x3380
+#define TPC6_EML_CFG_SECTION 0x1000
+#define mmTPC6_EML_TPC_CFG_BASE 0x7FFFC41000ull
+#define TPC6_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC6_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC6_EML_TPC_CFG_BASE 0x7FFFC41400ull
+#define KERNEL_TENSOR_0_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC6_EML_TPC_CFG_BASE 0x7FFFC41438ull
+#define KERNEL_TENSOR_1_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC6_EML_TPC_CFG_BASE 0x7FFFC41470ull
+#define KERNEL_TENSOR_2_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC6_EML_TPC_CFG_BASE 0x7FFFC414A8ull
+#define KERNEL_TENSOR_3_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC6_EML_TPC_CFG_BASE 0x7FFFC414E0ull
+#define KERNEL_TENSOR_4_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC6_EML_TPC_CFG_BASE 0x7FFFC41518ull
+#define KERNEL_TENSOR_5_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC6_EML_TPC_CFG_BASE 0x7FFFC41550ull
+#define KERNEL_TENSOR_6_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC6_EML_TPC_CFG_BASE 0x7FFFC41588ull
+#define KERNEL_TENSOR_7_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC6_EML_TPC_CFG_BASE 0x7FFFC415C0ull
+#define KERNEL_TENSOR_8_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC6_EML_TPC_CFG_BASE 0x7FFFC415F8ull
+#define KERNEL_TENSOR_9_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC6_EML_TPC_CFG_BASE 0x7FFFC41630ull
+#define KERNEL_TENSOR_10_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC6_EML_TPC_CFG_BASE 0x7FFFC41668ull
+#define KERNEL_TENSOR_11_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC6_EML_TPC_CFG_BASE 0x7FFFC416A0ull
+#define KERNEL_TENSOR_12_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC6_EML_TPC_CFG_BASE 0x7FFFC416D8ull
+#define KERNEL_TENSOR_13_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC6_EML_TPC_CFG_BASE 0x7FFFC41710ull
+#define KERNEL_TENSOR_14_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC6_EML_TPC_CFG_BASE 0x7FFFC41748ull
+#define KERNEL_TENSOR_15_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC6_EML_TPC_CFG_BASE 0x7FFFC41780ull
+#define KERNEL_SYNC_OBJECT_TPC6_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC6_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC6_EML_TPC_CFG_BASE 0x7FFFC41788ull
+#define KERNEL_TPC6_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC6_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC6_EML_TPC_CFG_BASE 0x7FFFC41A00ull
+#define QM_TENSOR_0_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC6_EML_TPC_CFG_BASE 0x7FFFC41A38ull
+#define QM_TENSOR_1_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC6_EML_TPC_CFG_BASE 0x7FFFC41A70ull
+#define QM_TENSOR_2_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC6_EML_TPC_CFG_BASE 0x7FFFC41AA8ull
+#define QM_TENSOR_3_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC6_EML_TPC_CFG_BASE 0x7FFFC41AE0ull
+#define QM_TENSOR_4_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC6_EML_TPC_CFG_BASE 0x7FFFC41B18ull
+#define QM_TENSOR_5_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC6_EML_TPC_CFG_BASE 0x7FFFC41B50ull
+#define QM_TENSOR_6_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC6_EML_TPC_CFG_BASE 0x7FFFC41B88ull
+#define QM_TENSOR_7_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC6_EML_TPC_CFG_BASE 0x7FFFC41BC0ull
+#define QM_TENSOR_8_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC6_EML_TPC_CFG_BASE 0x7FFFC41BF8ull
+#define QM_TENSOR_9_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC6_EML_TPC_CFG_BASE 0x7FFFC41C30ull
+#define QM_TENSOR_10_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC6_EML_TPC_CFG_BASE 0x7FFFC41C68ull
+#define QM_TENSOR_11_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC6_EML_TPC_CFG_BASE 0x7FFFC41CA0ull
+#define QM_TENSOR_12_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC6_EML_TPC_CFG_BASE 0x7FFFC41CD8ull
+#define QM_TENSOR_13_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D10ull
+#define QM_TENSOR_14_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D48ull
+#define QM_TENSOR_15_TPC6_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC6_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D80ull
+#define QM_SYNC_OBJECT_TPC6_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC6_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC6_EML_TPC_CFG_BASE 0x7FFFC41D88ull
+#define QM_TPC6_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC6_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC6_EML_TPC_QM_BASE 0x7FFFC42000ull
+#define TPC6_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC6_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC6_EML_CS_BASE 0x7FFFDFF000ull
+#define TPC6_EML_CS_MAX_OFFSET 0x1000
+#define TPC6_EML_CS_SECTION 0x1000
+#define mmTPC7_ROM_TABLE_BASE 0x7FFFE00000ull
+#define TPC7_ROM_TABLE_MAX_OFFSET 0x1000
+#define TPC7_ROM_TABLE_SECTION 0x1000
+#define mmTPC7_EML_SPMU_BASE 0x7FFFE01000ull
+#define TPC7_EML_SPMU_MAX_OFFSET 0x1000
+#define TPC7_EML_SPMU_SECTION 0x1000
+#define mmTPC7_EML_ETF_BASE 0x7FFFE02000ull
+#define TPC7_EML_ETF_MAX_OFFSET 0x1000
+#define TPC7_EML_ETF_SECTION 0x1000
+#define mmTPC7_EML_STM_BASE 0x7FFFE03000ull
+#define TPC7_EML_STM_MAX_OFFSET 0x1000
+#define TPC7_EML_STM_SECTION 0x2000
+#define mmTPC7_EML_CTI_BASE 0x7FFFE05000ull
+#define TPC7_EML_CTI_MAX_OFFSET 0x1000
+#define TPC7_EML_CTI_SECTION 0x1000
+#define mmTPC7_EML_FUNNEL_BASE 0x7FFFE06000ull
+#define TPC7_EML_FUNNEL_MAX_OFFSET 0x1000
+#define TPC7_EML_FUNNEL_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_0_BASE 0x7FFFE07000ull
+#define TPC7_EML_BUSMON_0_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_0_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_1_BASE 0x7FFFE08000ull
+#define TPC7_EML_BUSMON_1_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_1_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_2_BASE 0x7FFFE09000ull
+#define TPC7_EML_BUSMON_2_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_2_SECTION 0x1000
+#define mmTPC7_EML_BUSMON_3_BASE 0x7FFFE0A000ull
+#define TPC7_EML_BUSMON_3_MAX_OFFSET 0x1000
+#define TPC7_EML_BUSMON_3_SECTION 0x36000
+#define mmTPC7_EML_CFG_BASE 0x7FFFE40000ull
+#define TPC7_EML_CFG_MAX_OFFSET 0x3380
+#define TPC7_EML_CFG_SECTION 0x1000
+#define mmTPC7_EML_TPC_CFG_BASE 0x7FFFE41000ull
+#define TPC7_EML_TPC_CFG_MAX_OFFSET 0xE400
+#define TPC7_EML_TPC_CFG_SECTION 0x4000
+#define mmKERNEL_TENSOR_0_TPC7_EML_TPC_CFG_BASE 0x7FFFE41400ull
+#define KERNEL_TENSOR_0_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_0_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_1_TPC7_EML_TPC_CFG_BASE 0x7FFFE41438ull
+#define KERNEL_TENSOR_1_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_1_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_2_TPC7_EML_TPC_CFG_BASE 0x7FFFE41470ull
+#define KERNEL_TENSOR_2_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_2_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_3_TPC7_EML_TPC_CFG_BASE 0x7FFFE414A8ull
+#define KERNEL_TENSOR_3_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_3_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_4_TPC7_EML_TPC_CFG_BASE 0x7FFFE414E0ull
+#define KERNEL_TENSOR_4_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_4_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_5_TPC7_EML_TPC_CFG_BASE 0x7FFFE41518ull
+#define KERNEL_TENSOR_5_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_5_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_6_TPC7_EML_TPC_CFG_BASE 0x7FFFE41550ull
+#define KERNEL_TENSOR_6_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_6_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_7_TPC7_EML_TPC_CFG_BASE 0x7FFFE41588ull
+#define KERNEL_TENSOR_7_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_7_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_8_TPC7_EML_TPC_CFG_BASE 0x7FFFE415C0ull
+#define KERNEL_TENSOR_8_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_8_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_9_TPC7_EML_TPC_CFG_BASE 0x7FFFE415F8ull
+#define KERNEL_TENSOR_9_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_9_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_10_TPC7_EML_TPC_CFG_BASE 0x7FFFE41630ull
+#define KERNEL_TENSOR_10_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_10_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_11_TPC7_EML_TPC_CFG_BASE 0x7FFFE41668ull
+#define KERNEL_TENSOR_11_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_11_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_12_TPC7_EML_TPC_CFG_BASE 0x7FFFE416A0ull
+#define KERNEL_TENSOR_12_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_12_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_13_TPC7_EML_TPC_CFG_BASE 0x7FFFE416D8ull
+#define KERNEL_TENSOR_13_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_13_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_14_TPC7_EML_TPC_CFG_BASE 0x7FFFE41710ull
+#define KERNEL_TENSOR_14_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_14_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_TENSOR_15_TPC7_EML_TPC_CFG_BASE 0x7FFFE41748ull
+#define KERNEL_TENSOR_15_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define KERNEL_TENSOR_15_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmKERNEL_SYNC_OBJECT_TPC7_EML_TPC_CFG_BASE 0x7FFFE41780ull
+#define KERNEL_SYNC_OBJECT_TPC7_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define KERNEL_SYNC_OBJECT_TPC7_EML_TPC_CFG_SECTION 0x8000
+#define mmKERNEL_TPC7_EML_TPC_CFG_BASE 0x7FFFE41788ull
+#define KERNEL_TPC7_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define KERNEL_TPC7_EML_TPC_CFG_SECTION 0x2780
+#define mmQM_TENSOR_0_TPC7_EML_TPC_CFG_BASE 0x7FFFE41A00ull
+#define QM_TENSOR_0_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_0_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_1_TPC7_EML_TPC_CFG_BASE 0x7FFFE41A38ull
+#define QM_TENSOR_1_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_1_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_2_TPC7_EML_TPC_CFG_BASE 0x7FFFE41A70ull
+#define QM_TENSOR_2_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_2_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_3_TPC7_EML_TPC_CFG_BASE 0x7FFFE41AA8ull
+#define QM_TENSOR_3_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_3_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_4_TPC7_EML_TPC_CFG_BASE 0x7FFFE41AE0ull
+#define QM_TENSOR_4_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_4_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_5_TPC7_EML_TPC_CFG_BASE 0x7FFFE41B18ull
+#define QM_TENSOR_5_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_5_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_6_TPC7_EML_TPC_CFG_BASE 0x7FFFE41B50ull
+#define QM_TENSOR_6_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_6_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_7_TPC7_EML_TPC_CFG_BASE 0x7FFFE41B88ull
+#define QM_TENSOR_7_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_7_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_8_TPC7_EML_TPC_CFG_BASE 0x7FFFE41BC0ull
+#define QM_TENSOR_8_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_8_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_9_TPC7_EML_TPC_CFG_BASE 0x7FFFE41BF8ull
+#define QM_TENSOR_9_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_9_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_10_TPC7_EML_TPC_CFG_BASE 0x7FFFE41C30ull
+#define QM_TENSOR_10_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_10_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_11_TPC7_EML_TPC_CFG_BASE 0x7FFFE41C68ull
+#define QM_TENSOR_11_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_11_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_12_TPC7_EML_TPC_CFG_BASE 0x7FFFE41CA0ull
+#define QM_TENSOR_12_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_12_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_13_TPC7_EML_TPC_CFG_BASE 0x7FFFE41CD8ull
+#define QM_TENSOR_13_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_13_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_14_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D10ull
+#define QM_TENSOR_14_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_14_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_TENSOR_15_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D48ull
+#define QM_TENSOR_15_TPC7_EML_TPC_CFG_MAX_OFFSET 0x3800
+#define QM_TENSOR_15_TPC7_EML_TPC_CFG_SECTION 0x3800
+#define mmQM_SYNC_OBJECT_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D80ull
+#define QM_SYNC_OBJECT_TPC7_EML_TPC_CFG_MAX_OFFSET 0x8000
+#define QM_SYNC_OBJECT_TPC7_EML_TPC_CFG_SECTION 0x8000
+#define mmQM_TPC7_EML_TPC_CFG_BASE 0x7FFFE41D88ull
+#define QM_TPC7_EML_TPC_CFG_MAX_OFFSET 0xB800
+#define QM_TPC7_EML_TPC_CFG_SECTION 0x2780
+#define mmTPC7_EML_TPC_QM_BASE 0x7FFFE42000ull
+#define TPC7_EML_TPC_QM_MAX_OFFSET 0xD040
+#define TPC7_EML_TPC_QM_SECTION 0x1BD000
+#define mmTPC7_EML_CS_BASE 0x7FFFFFF000ull
+#define TPC7_EML_CS_MAX_OFFSET 0x1000
+
+#endif /* GAUDI_BLOCKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
new file mode 100644
index 000000000000..85e3b5148595
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef ASIC_REG_GAUDI_REGS_H_
+#define ASIC_REG_GAUDI_REGS_H_
+
+#include "gaudi_blocks.h"
+#include "psoc_global_conf_regs.h"
+#include "psoc_timestamp_regs.h"
+#include "cpu_if_regs.h"
+#include "mmu_up_regs.h"
+#include "stlb_regs.h"
+#include "dma0_qm_regs.h"
+#include "dma1_qm_regs.h"
+#include "dma2_qm_regs.h"
+#include "dma3_qm_regs.h"
+#include "dma4_qm_regs.h"
+#include "dma5_qm_regs.h"
+#include "dma6_qm_regs.h"
+#include "dma7_qm_regs.h"
+#include "dma0_core_regs.h"
+#include "dma1_core_regs.h"
+#include "dma2_core_regs.h"
+#include "dma3_core_regs.h"
+#include "dma4_core_regs.h"
+#include "dma5_core_regs.h"
+#include "dma6_core_regs.h"
+#include "dma7_core_regs.h"
+#include "mme0_ctrl_regs.h"
+#include "mme1_ctrl_regs.h"
+#include "mme2_ctrl_regs.h"
+#include "mme3_ctrl_regs.h"
+#include "mme0_qm_regs.h"
+#include "mme2_qm_regs.h"
+#include "tpc0_cfg_regs.h"
+#include "tpc1_cfg_regs.h"
+#include "tpc2_cfg_regs.h"
+#include "tpc3_cfg_regs.h"
+#include "tpc4_cfg_regs.h"
+#include "tpc5_cfg_regs.h"
+#include "tpc6_cfg_regs.h"
+#include "tpc7_cfg_regs.h"
+#include "tpc0_qm_regs.h"
+#include "tpc1_qm_regs.h"
+#include "tpc2_qm_regs.h"
+#include "tpc3_qm_regs.h"
+#include "tpc4_qm_regs.h"
+#include "tpc5_qm_regs.h"
+#include "tpc6_qm_regs.h"
+#include "tpc7_qm_regs.h"
+#include "dma_if_e_n_down_ch0_regs.h"
+#include "dma_if_e_n_down_ch1_regs.h"
+#include "dma_if_e_s_down_ch0_regs.h"
+#include "dma_if_e_s_down_ch1_regs.h"
+#include "dma_if_w_n_down_ch0_regs.h"
+#include "dma_if_w_n_down_ch1_regs.h"
+#include "dma_if_w_s_down_ch0_regs.h"
+#include "dma_if_w_s_down_ch1_regs.h"
+#include "dma_if_e_n_regs.h"
+#include "dma_if_e_s_regs.h"
+#include "dma_if_w_n_regs.h"
+#include "dma_if_w_s_regs.h"
+#include "nif_rtr_ctrl_0_regs.h"
+#include "nif_rtr_ctrl_1_regs.h"
+#include "nif_rtr_ctrl_2_regs.h"
+#include "nif_rtr_ctrl_3_regs.h"
+#include "nif_rtr_ctrl_4_regs.h"
+#include "nif_rtr_ctrl_5_regs.h"
+#include "nif_rtr_ctrl_6_regs.h"
+#include "nif_rtr_ctrl_7_regs.h"
+#include "sif_rtr_ctrl_0_regs.h"
+#include "sif_rtr_ctrl_1_regs.h"
+#include "sif_rtr_ctrl_2_regs.h"
+#include "sif_rtr_ctrl_3_regs.h"
+#include "sif_rtr_ctrl_4_regs.h"
+#include "sif_rtr_ctrl_5_regs.h"
+#include "sif_rtr_ctrl_6_regs.h"
+#include "sif_rtr_ctrl_7_regs.h"
+#include "psoc_etr_regs.h"
+
+#include "dma0_qm_masks.h"
+#include "mme0_qm_masks.h"
+#include "tpc0_qm_masks.h"
+#include "dma0_core_masks.h"
+#include "tpc0_cfg_masks.h"
+#include "psoc_global_conf_masks.h"
+
+#include "psoc_pci_pll_regs.h"
+#include "psoc_hbm_pll_regs.h"
+
+#define GAUDI_ECC_MEM_SEL_OFFSET 0xF18
+#define GAUDI_ECC_ADDRESS_OFFSET 0xF1C
+#define GAUDI_ECC_SYNDROME_OFFSET 0xF20
+#define GAUDI_ECC_SERR0_OFFSET 0xF30
+#define GAUDI_ECC_SERR1_OFFSET 0xF34
+#define GAUDI_ECC_SERR2_OFFSET 0xF38
+#define GAUDI_ECC_SERR3_OFFSET 0xF3C
+#define GAUDI_ECC_DERR0_OFFSET 0xF40
+#define GAUDI_ECC_DERR1_OFFSET 0xF44
+#define GAUDI_ECC_DERR2_OFFSET 0xF48
+#define GAUDI_ECC_DERR3_OFFSET 0xF4C
+
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 0x492000
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x494000
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 0x494800
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 0x495000
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 0x495800
+#define mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 0x496000
+#define mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0 0x4B2000
+#define mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0 0x4B6000
+#define mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0 0x4D2000
+#define mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0 0x4D6000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 0x4F2000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 0x4F2004
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 0x4F3FFC
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 0x4F4000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0 0x4F6000
+#define mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_511 0x4F67FC
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW 0x300400
+#define mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AW 0x310400
+#define mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AW 0x320400
+#define mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AW 0x330400
+#define mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AW 0x340400
+#define mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AW 0x350400
+#define mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AW 0x360400
+#define mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AW 0x370400
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR 0x300490
+#define mmSIF_RTR_1_LBW_RANGE_PROT_HIT_AR 0x310490
+#define mmSIF_RTR_2_LBW_RANGE_PROT_HIT_AR 0x320490
+#define mmSIF_RTR_3_LBW_RANGE_PROT_HIT_AR 0x330490
+#define mmSIF_RTR_4_LBW_RANGE_PROT_HIT_AR 0x340490
+#define mmSIF_RTR_5_LBW_RANGE_PROT_HIT_AR 0x350490
+#define mmSIF_RTR_6_LBW_RANGE_PROT_HIT_AR 0x360490
+#define mmSIF_RTR_7_LBW_RANGE_PROT_HIT_AR 0x370490
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0 0x300410
+#define mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0 0x310410
+#define mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0 0x320410
+#define mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0 0x330410
+#define mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0 0x340410
+#define mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0 0x350410
+#define mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0 0x360410
+#define mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0 0x370410
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0 0x300450
+#define mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0 0x310450
+#define mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0 0x320450
+#define mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0 0x330450
+#define mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0 0x340450
+#define mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0 0x350450
+#define mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0 0x360450
+#define mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0 0x370450
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0 0x3004A0
+#define mmSIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0 0x3104A0
+#define mmSIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0 0x3204A0
+#define mmSIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0 0x3304A0
+#define mmSIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0 0x3404A0
+#define mmSIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0 0x3504A0
+#define mmSIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0 0x3604A0
+#define mmSIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0 0x3704A0
+
+#define mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0 0x3004E0
+#define mmSIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0 0x3104E0
+#define mmSIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0 0x3204E0
+#define mmSIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0 0x3304E0
+#define mmSIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0 0x3404E0
+#define mmSIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0 0x3504E0
+#define mmSIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0 0x3604E0
+#define mmSIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0 0x3704E0
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AW 0x380400
+#define mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AW 0x390400
+#define mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AW 0x3A0400
+#define mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AW 0x3B0400
+#define mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AW 0x3C0400
+#define mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AW 0x3D0400
+#define mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AW 0x3E0400
+#define mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW 0x3F0400
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_HIT_AR 0x380490
+#define mmNIF_RTR_1_LBW_RANGE_PROT_HIT_AR 0x390490
+#define mmNIF_RTR_2_LBW_RANGE_PROT_HIT_AR 0x3A0490
+#define mmNIF_RTR_3_LBW_RANGE_PROT_HIT_AR 0x3B0490
+#define mmNIF_RTR_4_LBW_RANGE_PROT_HIT_AR 0x3C0490
+#define mmNIF_RTR_5_LBW_RANGE_PROT_HIT_AR 0x3D0490
+#define mmNIF_RTR_6_LBW_RANGE_PROT_HIT_AR 0x3E0490
+#define mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR 0x3F0490
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0 0x380410
+#define mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AW_0 0x390410
+#define mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AW_0 0x3A0410
+#define mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AW_0 0x3B0410
+#define mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AW_0 0x3C0410
+#define mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AW_0 0x3D0410
+#define mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AW_0 0x3E0410
+#define mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0 0x3F0410
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0 0x380450
+#define mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AW_0 0x390450
+#define mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AW_0 0x3A0450
+#define mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AW_0 0x3B0450
+#define mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AW_0 0x3C0450
+#define mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AW_0 0x3D0450
+#define mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AW_0 0x3E0450
+#define mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0 0x3F0450
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0 0x3804A0
+#define mmNIF_RTR_1_LBW_RANGE_PROT_MIN_AR_0 0x3904A0
+#define mmNIF_RTR_2_LBW_RANGE_PROT_MIN_AR_0 0x3A04A0
+#define mmNIF_RTR_3_LBW_RANGE_PROT_MIN_AR_0 0x3B04A0
+#define mmNIF_RTR_4_LBW_RANGE_PROT_MIN_AR_0 0x3C04A0
+#define mmNIF_RTR_5_LBW_RANGE_PROT_MIN_AR_0 0x3D04A0
+#define mmNIF_RTR_6_LBW_RANGE_PROT_MIN_AR_0 0x3E04A0
+#define mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0 0x3F04A0
+
+#define mmNIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0 0x3804E0
+#define mmNIF_RTR_1_LBW_RANGE_PROT_MAX_AR_0 0x3904E0
+#define mmNIF_RTR_2_LBW_RANGE_PROT_MAX_AR_0 0x3A04E0
+#define mmNIF_RTR_3_LBW_RANGE_PROT_MAX_AR_0 0x3B04E0
+#define mmNIF_RTR_4_LBW_RANGE_PROT_MAX_AR_0 0x3C04E0
+#define mmNIF_RTR_5_LBW_RANGE_PROT_MAX_AR_0 0x3D04E0
+#define mmNIF_RTR_6_LBW_RANGE_PROT_MAX_AR_0 0x3E04E0
+#define mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0 0x3F04E0
+
+#define mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_0 0x489030
+#define mmDMA_IF_W_S_DOWN_RSP_MID_WGHT_1 0x489034
+
+#define mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_0 0x4A9030
+#define mmDMA_IF_E_S_DOWN_RSP_MID_WGHT_1 0x4A9034
+
+#define mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_0 0x4C9030
+#define mmDMA_IF_W_N_DOWN_RSP_MID_WGHT_1 0x4C9034
+
+#define mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_0 0x4E9030
+#define mmDMA_IF_E_N_DOWN_RSP_MID_WGHT_1 0x4E9034
+
+#define mmMME1_QM_GLBL_CFG0 0xE8000
+#define mmMME1_QM_GLBL_STS0 0xE8038
+
+#define mmMME0_SBAB_SB_STALL 0x4002C
+#define mmMME0_SBAB_ARUSER0 0x40034
+#define mmMME0_SBAB_ARUSER1 0x40038
+#define mmMME0_SBAB_PROT 0x40050
+
+#define mmMME1_SBAB_SB_STALL 0xC002C
+#define mmMME1_SBAB_ARUSER0 0xC0034
+#define mmMME1_SBAB_ARUSER1 0xC0038
+#define mmMME1_SBAB_PROT 0xC0050
+
+#define mmMME2_SBAB_SB_STALL 0x14002C
+#define mmMME2_SBAB_ARUSER0 0x140034
+#define mmMME2_SBAB_ARUSER1 0x140038
+#define mmMME2_SBAB_PROT 0x140050
+
+#define mmMME3_SBAB_SB_STALL 0x1C002C
+#define mmMME3_SBAB_ARUSER0 0x1C0034
+#define mmMME3_SBAB_ARUSER1 0x1C0038
+#define mmMME3_SBAB_PROT 0x1C0050
+
+#define mmMME0_ACC_ACC_STALL 0x20028
+#define mmMME0_ACC_WBC 0x20038
+#define mmMME0_ACC_PROT 0x20050
+
+#define mmMME1_ACC_ACC_STALL 0xA0028
+#define mmMME1_ACC_WBC 0xA0038
+#define mmMME1_ACC_PROT 0xA0050
+
+#define mmMME2_ACC_ACC_STALL 0x120028
+#define mmMME2_ACC_WBC 0x120038
+#define mmMME2_ACC_PROT 0x120050
+
+#define mmMME3_ACC_ACC_STALL 0x1A0028
+#define mmMME3_ACC_WBC 0x1A0038
+#define mmMME3_ACC_PROT 0x1A0050
+
+#define mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR 0x800040
+
+#define mmPSOC_EFUSE_READ 0xC4A000
+#define mmPSOC_EFUSE_DATA_0 0xC4A080
+
+#define mmPCIE_WRAP_MAX_OUTSTAND 0xC01B20
+#define mmPCIE_WRAP_LBW_PROT_OVR 0xC01B48
+#define mmPCIE_WRAP_HBW_DRAIN_CFG 0xC01D54
+#define mmPCIE_WRAP_LBW_DRAIN_CFG 0xC01D5C
+
+#define mmPCIE_MSI_INTR_0 0xC13000
+
+#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0xC02000
+
+#define mmPCIE_AUX_DBI 0xC07490
+
+#endif /* ASIC_REG_GAUDI_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h
new file mode 100644
index 000000000000..083d073a0128
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h
@@ -0,0 +1,1456 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME0_CTRL_REGS_H_
+#define ASIC_REG_MME0_CTRL_REGS_H_
+
+/*
+ *****************************************
+ * MME0_CTRL (Prototype: MME)
+ *****************************************
+ */
+
+#define mmMME0_CTRL_ARCH_STATUS 0x60000
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_HIGH_S 0x60008
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_HIGH_L 0x6000C
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_HIGH_O 0x60010
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_LOW_S 0x60014
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_LOW_L 0x60018
+
+#define mmMME0_CTRL_ARCH_BASE_ADDR_LOW_O 0x6001C
+
+#define mmMME0_CTRL_ARCH_HEADER_LOW 0x60020
+
+#define mmMME0_CTRL_ARCH_HEADER_HIGH 0x60024
+
+#define mmMME0_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0x60028
+
+#define mmMME0_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0x6002C
+
+#define mmMME0_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0x60030
+
+#define mmMME0_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0x60034
+
+#define mmMME0_CTRL_ARCH_OUTER_LOOP 0x60038
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0x6003C
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0x60040
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0x60044
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0x60048
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0x6004C
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0x60050
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0x60054
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0x60058
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0x6005C
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0x60060
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0x60064
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0x60068
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0x6006C
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0x60070
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0x60074
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0x60078
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0x6007C
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0x60080
+
+#define mmMME0_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60084
+
+#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0x60088
+
+#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0x6008C
+
+#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0x60090
+
+#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0x60094
+
+#define mmMME0_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0x60098
+
+#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_0 0x6009C
+
+#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_1 0x600A0
+
+#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_2 0x600A4
+
+#define mmMME0_CTRL_ARCH_AGU_S_START_OFFSET_3 0x600A8
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0x600AC
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0x600B0
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0x600B4
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0x600B8
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0x600BC
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0x600C0
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0x600C4
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0x600C8
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0x600CC
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0x600D0
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0x600D4
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0x600D8
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0x600DC
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0x600E0
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0x600E4
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0x600E8
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0x600EC
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0x600F0
+
+#define mmMME0_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x600F4
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x600F8
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x600FC
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60100
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60104
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60108
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0x6010C
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0x60110
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0x60114
+
+#define mmMME0_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0x60118
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x6011C
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60120
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60124
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60128
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x6012C
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0x60130
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0x60134
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0x60138
+
+#define mmMME0_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0x6013C
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0x60140
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0x60144
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0x60148
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0x6014C
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0x60150
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0x60154
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0x60158
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0x6015C
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0x60160
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0x60164
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0x60168
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0x6016C
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0x60170
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0x60174
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0x60178
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0x6017C
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0x60180
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0x60184
+
+#define mmMME0_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60188
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x6018C
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60190
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60194
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60198
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x6019C
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0x601A0
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0x601A4
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0x601A8
+
+#define mmMME0_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0x601AC
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x601B0
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x601B4
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x601B8
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x601BC
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x601C0
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0x601C4
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0x601C8
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0x601CC
+
+#define mmMME0_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0x601D0
+
+#define mmMME0_CTRL_ARCH_DESC_SB_REPEAT 0x601D4
+
+#define mmMME0_CTRL_ARCH_DESC_RATE_LIMITER 0x601D8
+
+#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x601DC
+
+#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x601E0
+
+#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0x601E4
+
+#define mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0x601E8
+
+#define mmMME0_CTRL_ARCH_DESC_AXI_USER_DATA 0x601EC
+
+#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_S 0x601F0
+
+#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0x601F4
+
+#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0x601F8
+
+#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0x601FC
+
+#define mmMME0_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0x60200
+
+#define mmMME0_CTRL_ARCH_DESC_PADDING_VALUE_S 0x60204
+
+#define mmMME0_CTRL_ARCH_DESC_PADDING_VALUE_L 0x60208
+
+#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_S 0x6020C
+
+#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0x60210
+
+#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0x60214
+
+#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0x60218
+
+#define mmMME0_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0x6021C
+
+#define mmMME0_CTRL_ARCH_DESC_PCU_RL_SATURATION 0x60220
+
+#define mmMME0_CTRL_ARCH_DESC_DUMMY 0x60224
+
+#define mmMME0_CTRL_CMD 0x60280
+
+#define mmMME0_CTRL_STATUS1 0x60284
+
+#define mmMME0_CTRL_RESET 0x60288
+
+#define mmMME0_CTRL_QM_STALL 0x6028C
+
+#define mmMME0_CTRL_SYNC_OBJECT_FIFO_TH 0x60290
+
+#define mmMME0_CTRL_EUS_ROLLUP_CNT_ADD 0x60294
+
+#define mmMME0_CTRL_INTR_CAUSE 0x60298
+
+#define mmMME0_CTRL_INTR_MASK 0x6029C
+
+#define mmMME0_CTRL_LOG_SHADOW 0x602A0
+
+#define mmMME0_CTRL_PCU_RL_DESC0 0x602A4
+
+#define mmMME0_CTRL_PCU_RL_TOKEN_UPDATE 0x602A8
+
+#define mmMME0_CTRL_PCU_RL_TH 0x602AC
+
+#define mmMME0_CTRL_PCU_RL_MIN 0x602B0
+
+#define mmMME0_CTRL_PCU_RL_CTRL_EN 0x602B4
+
+#define mmMME0_CTRL_PCU_RL_HISTORY_LOG_SIZE 0x602B8
+
+#define mmMME0_CTRL_PCU_DUMMY_A_BF16 0x602BC
+
+#define mmMME0_CTRL_PCU_DUMMY_B_BF16 0x602C0
+
+#define mmMME0_CTRL_PCU_DUMMY_A_FP32_ODD 0x602C4
+
+#define mmMME0_CTRL_PCU_DUMMY_A_FP32_EVEN 0x602C8
+
+#define mmMME0_CTRL_PCU_DUMMY_B_FP32_ODD 0x602CC
+
+#define mmMME0_CTRL_PCU_DUMMY_B_FP32_EVEN 0x602D0
+
+#define mmMME0_CTRL_PROT 0x602D4
+
+#define mmMME0_CTRL_EU_POWER_SAVE_DISABLE 0x602D8
+
+#define mmMME0_CTRL_CS_DBG_BLOCK_ID 0x602DC
+
+#define mmMME0_CTRL_CS_DBG_STATUS_DROP_CNT 0x602E0
+
+#define mmMME0_CTRL_TE_CLOSE_CGATE 0x602E4
+
+#define mmMME0_CTRL_AGU_SM_INFLIGHT_CNTR 0x602E8
+
+#define mmMME0_CTRL_AGU_SM_TOTAL_CNTR 0x602EC
+
+#define mmMME0_CTRL_EZSYNC_OUT_CREDIT 0x602F0
+
+#define mmMME0_CTRL_PCU_RL_SAT_SEC 0x602F4
+
+#define mmMME0_CTRL_AGU_SYNC_MSG_AXI_USER 0x602F8
+
+#define mmMME0_CTRL_QM_SLV_LBW_CLK_EN 0x602FC
+
+#define mmMME0_CTRL_SHADOW_0_STATUS 0x60400
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0x60408
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0x6040C
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0x60410
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0x60414
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0x60418
+
+#define mmMME0_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0x6041C
+
+#define mmMME0_CTRL_SHADOW_0_HEADER_LOW 0x60420
+
+#define mmMME0_CTRL_SHADOW_0_HEADER_HIGH 0x60424
+
+#define mmMME0_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0x60428
+
+#define mmMME0_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0x6042C
+
+#define mmMME0_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0x60430
+
+#define mmMME0_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0x60434
+
+#define mmMME0_CTRL_SHADOW_0_OUTER_LOOP 0x60438
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0x6043C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0x60440
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0x60444
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0x60448
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0x6044C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0x60450
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0x60454
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0x60458
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0x6045C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0x60460
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0x60464
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0x60468
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0x6046C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0x60470
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0x60474
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0x60478
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0x6047C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0x60480
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60484
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0x60488
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0x6048C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0x60490
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0x60494
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0x60498
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0x6049C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0x604A0
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0x604A4
+
+#define mmMME0_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0x604A8
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0x604AC
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0x604B0
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0x604B4
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0x604B8
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0x604BC
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0x604C0
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0x604C4
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0x604C8
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0x604CC
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0x604D0
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0x604D4
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0x604D8
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0x604DC
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0x604E0
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0x604E4
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0x604E8
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0x604EC
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0x604F0
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x604F4
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x604F8
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x604FC
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60500
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60504
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60508
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0x6050C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0x60510
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0x60514
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0x60518
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x6051C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60520
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60524
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60528
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x6052C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0x60530
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0x60534
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0x60538
+
+#define mmMME0_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0x6053C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0x60540
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0x60544
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0x60548
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0x6054C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0x60550
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0x60554
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0x60558
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0x6055C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0x60560
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0x60564
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0x60568
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0x6056C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0x60570
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0x60574
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0x60578
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0x6057C
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0x60580
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0x60584
+
+#define mmMME0_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60588
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x6058C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60590
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60594
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60598
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x6059C
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0x605A0
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0x605A4
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0x605A8
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0x605AC
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x605B0
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x605B4
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x605B8
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x605BC
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x605C0
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0x605C4
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0x605C8
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0x605CC
+
+#define mmMME0_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0x605D0
+
+#define mmMME0_CTRL_SHADOW_0_DESC_SB_REPEAT 0x605D4
+
+#define mmMME0_CTRL_SHADOW_0_DESC_RATE_LIMITER 0x605D8
+
+#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x605DC
+
+#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x605E0
+
+#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0x605E4
+
+#define mmMME0_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0x605E8
+
+#define mmMME0_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0x605EC
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_S 0x605F0
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0x605F4
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0x605F8
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0x605FC
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0x60600
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0x60604
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0x60608
+
+#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0x6060C
+
+#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0x60610
+
+#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0x60614
+
+#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0x60618
+
+#define mmMME0_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0x6061C
+
+#define mmMME0_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0x60620
+
+#define mmMME0_CTRL_SHADOW_0_DESC_DUMMY 0x60624
+
+#define mmMME0_CTRL_SHADOW_1_STATUS 0x60680
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0x60688
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0x6068C
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0x60690
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0x60694
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0x60698
+
+#define mmMME0_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0x6069C
+
+#define mmMME0_CTRL_SHADOW_1_HEADER_LOW 0x606A0
+
+#define mmMME0_CTRL_SHADOW_1_HEADER_HIGH 0x606A4
+
+#define mmMME0_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0x606A8
+
+#define mmMME0_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0x606AC
+
+#define mmMME0_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0x606B0
+
+#define mmMME0_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0x606B4
+
+#define mmMME0_CTRL_SHADOW_1_OUTER_LOOP 0x606B8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0x606BC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0x606C0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0x606C4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0x606C8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0x606CC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0x606D0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0x606D4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0x606D8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0x606DC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0x606E0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0x606E4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0x606E8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0x606EC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0x606F0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0x606F4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0x606F8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0x606FC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0x60700
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60704
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0x60708
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0x6070C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0x60710
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0x60714
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0x60718
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0x6071C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0x60720
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0x60724
+
+#define mmMME0_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0x60728
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0x6072C
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0x60730
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0x60734
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0x60738
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0x6073C
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0x60740
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0x60744
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0x60748
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0x6074C
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0x60750
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0x60754
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0x60758
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0x6075C
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0x60760
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0x60764
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0x60768
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0x6076C
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0x60770
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x60774
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x60778
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x6077C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60780
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60784
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60788
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0x6078C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0x60790
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0x60794
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0x60798
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x6079C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x607A0
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x607A4
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x607A8
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x607AC
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0x607B0
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0x607B4
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0x607B8
+
+#define mmMME0_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0x607BC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0x607C0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0x607C4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0x607C8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0x607CC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0x607D0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0x607D4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0x607D8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0x607DC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0x607E0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0x607E4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0x607E8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0x607EC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0x607F0
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0x607F4
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0x607F8
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0x607FC
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0x60800
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0x60804
+
+#define mmMME0_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60808
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x6080C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60810
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60814
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60818
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x6081C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0x60820
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0x60824
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0x60828
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0x6082C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x60830
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x60834
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x60838
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x6083C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x60840
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0x60844
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0x60848
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0x6084C
+
+#define mmMME0_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0x60850
+
+#define mmMME0_CTRL_SHADOW_1_DESC_SB_REPEAT 0x60854
+
+#define mmMME0_CTRL_SHADOW_1_DESC_RATE_LIMITER 0x60858
+
+#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x6085C
+
+#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x60860
+
+#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0x60864
+
+#define mmMME0_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0x60868
+
+#define mmMME0_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0x6086C
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_S 0x60870
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0x60874
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0x60878
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0x6087C
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0x60880
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0x60884
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0x60888
+
+#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0x6088C
+
+#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0x60890
+
+#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0x60894
+
+#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0x60898
+
+#define mmMME0_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0x6089C
+
+#define mmMME0_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0x608A0
+
+#define mmMME0_CTRL_SHADOW_1_DESC_DUMMY 0x608A4
+
+#define mmMME0_CTRL_SHADOW_2_STATUS 0x60900
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0x60908
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0x6090C
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0x60910
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0x60914
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0x60918
+
+#define mmMME0_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0x6091C
+
+#define mmMME0_CTRL_SHADOW_2_HEADER_LOW 0x60920
+
+#define mmMME0_CTRL_SHADOW_2_HEADER_HIGH 0x60924
+
+#define mmMME0_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0x60928
+
+#define mmMME0_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0x6092C
+
+#define mmMME0_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0x60930
+
+#define mmMME0_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0x60934
+
+#define mmMME0_CTRL_SHADOW_2_OUTER_LOOP 0x60938
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0x6093C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0x60940
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0x60944
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0x60948
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0x6094C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0x60950
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0x60954
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0x60958
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0x6095C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0x60960
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0x60964
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0x60968
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0x6096C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0x60970
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0x60974
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0x60978
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0x6097C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0x60980
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60984
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0x60988
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0x6098C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0x60990
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0x60994
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0x60998
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0x6099C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0x609A0
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0x609A4
+
+#define mmMME0_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0x609A8
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0x609AC
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0x609B0
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0x609B4
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0x609B8
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0x609BC
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0x609C0
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0x609C4
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0x609C8
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0x609CC
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0x609D0
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0x609D4
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0x609D8
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0x609DC
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0x609E0
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0x609E4
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0x609E8
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0x609EC
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0x609F0
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x609F4
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x609F8
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x609FC
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60A00
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60A04
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60A08
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0x60A0C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0x60A10
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0x60A14
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0x60A18
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x60A1C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60A20
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60A24
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60A28
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x60A2C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0x60A30
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0x60A34
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0x60A38
+
+#define mmMME0_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0x60A3C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0x60A40
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0x60A44
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0x60A48
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0x60A4C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0x60A50
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0x60A54
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0x60A58
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0x60A5C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0x60A60
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0x60A64
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0x60A68
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0x60A6C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0x60A70
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0x60A74
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0x60A78
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0x60A7C
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0x60A80
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0x60A84
+
+#define mmMME0_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60A88
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x60A8C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60A90
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60A94
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60A98
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x60A9C
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0x60AA0
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0x60AA4
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0x60AA8
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0x60AAC
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x60AB0
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x60AB4
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x60AB8
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x60ABC
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x60AC0
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0x60AC4
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0x60AC8
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0x60ACC
+
+#define mmMME0_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0x60AD0
+
+#define mmMME0_CTRL_SHADOW_2_DESC_SB_REPEAT 0x60AD4
+
+#define mmMME0_CTRL_SHADOW_2_DESC_RATE_LIMITER 0x60AD8
+
+#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x60ADC
+
+#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x60AE0
+
+#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0x60AE4
+
+#define mmMME0_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0x60AE8
+
+#define mmMME0_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0x60AEC
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_S 0x60AF0
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0x60AF4
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0x60AF8
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0x60AFC
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0x60B00
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0x60B04
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0x60B08
+
+#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0x60B0C
+
+#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0x60B10
+
+#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0x60B14
+
+#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0x60B18
+
+#define mmMME0_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0x60B1C
+
+#define mmMME0_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0x60B20
+
+#define mmMME0_CTRL_SHADOW_2_DESC_DUMMY 0x60B24
+
+#define mmMME0_CTRL_SHADOW_3_STATUS 0x60B80
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0x60B88
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0x60B8C
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0x60B90
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0x60B94
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0x60B98
+
+#define mmMME0_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0x60B9C
+
+#define mmMME0_CTRL_SHADOW_3_HEADER_LOW 0x60BA0
+
+#define mmMME0_CTRL_SHADOW_3_HEADER_HIGH 0x60BA4
+
+#define mmMME0_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0x60BA8
+
+#define mmMME0_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0x60BAC
+
+#define mmMME0_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0x60BB0
+
+#define mmMME0_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0x60BB4
+
+#define mmMME0_CTRL_SHADOW_3_OUTER_LOOP 0x60BB8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0x60BBC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0x60BC0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0x60BC4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0x60BC8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0x60BCC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0x60BD0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0x60BD4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0x60BD8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0x60BDC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0x60BE0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0x60BE4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0x60BE8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0x60BEC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0x60BF0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0x60BF4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0x60BF8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0x60BFC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0x60C00
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x60C04
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0x60C08
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0x60C0C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0x60C10
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0x60C14
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0x60C18
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0x60C1C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0x60C20
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0x60C24
+
+#define mmMME0_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0x60C28
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0x60C2C
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0x60C30
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0x60C34
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0x60C38
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0x60C3C
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0x60C40
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0x60C44
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0x60C48
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0x60C4C
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0x60C50
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0x60C54
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0x60C58
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0x60C5C
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0x60C60
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0x60C64
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0x60C68
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0x60C6C
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0x60C70
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x60C74
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x60C78
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x60C7C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x60C80
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x60C84
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x60C88
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0x60C8C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0x60C90
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0x60C94
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0x60C98
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x60C9C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x60CA0
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x60CA4
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x60CA8
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x60CAC
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0x60CB0
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0x60CB4
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0x60CB8
+
+#define mmMME0_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0x60CBC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0x60CC0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0x60CC4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0x60CC8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0x60CCC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0x60CD0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0x60CD4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0x60CD8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0x60CDC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0x60CE0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0x60CE4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0x60CE8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0x60CEC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0x60CF0
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0x60CF4
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0x60CF8
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0x60CFC
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0x60D00
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0x60D04
+
+#define mmMME0_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x60D08
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x60D0C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x60D10
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x60D14
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x60D18
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x60D1C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0x60D20
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0x60D24
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0x60D28
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0x60D2C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x60D30
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x60D34
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x60D38
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x60D3C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x60D40
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0x60D44
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0x60D48
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0x60D4C
+
+#define mmMME0_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0x60D50
+
+#define mmMME0_CTRL_SHADOW_3_DESC_SB_REPEAT 0x60D54
+
+#define mmMME0_CTRL_SHADOW_3_DESC_RATE_LIMITER 0x60D58
+
+#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x60D5C
+
+#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x60D60
+
+#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0x60D64
+
+#define mmMME0_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0x60D68
+
+#define mmMME0_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0x60D6C
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_S 0x60D70
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0x60D74
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0x60D78
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0x60D7C
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0x60D80
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0x60D84
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0x60D88
+
+#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0x60D8C
+
+#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0x60D90
+
+#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0x60D94
+
+#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0x60D98
+
+#define mmMME0_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0x60D9C
+
+#define mmMME0_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0x60DA0
+
+#define mmMME0_CTRL_SHADOW_3_DESC_DUMMY 0x60DA4
+
+#endif /* ASIC_REG_MME0_CTRL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h
new file mode 100644
index 000000000000..e6dd30ce0ca7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h
@@ -0,0 +1,800 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME0_QM_MASKS_H_
+#define ASIC_REG_MME0_QM_MASKS_H_
+
+/*
+ *****************************************
+ * MME0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+/* MME0_QM_GLBL_CFG0 */
+#define MME0_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define MME0_QM_GLBL_CFG0_PQF_EN_MASK 0xF
+#define MME0_QM_GLBL_CFG0_CQF_EN_SHIFT 4
+#define MME0_QM_GLBL_CFG0_CQF_EN_MASK 0x1F0
+#define MME0_QM_GLBL_CFG0_CP_EN_SHIFT 9
+#define MME0_QM_GLBL_CFG0_CP_EN_MASK 0x3E00
+
+/* MME0_QM_GLBL_CFG1 */
+#define MME0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define MME0_QM_GLBL_CFG1_PQF_STOP_MASK 0xF
+#define MME0_QM_GLBL_CFG1_CQF_STOP_SHIFT 4
+#define MME0_QM_GLBL_CFG1_CQF_STOP_MASK 0x1F0
+#define MME0_QM_GLBL_CFG1_CP_STOP_SHIFT 9
+#define MME0_QM_GLBL_CFG1_CP_STOP_MASK 0x3E00
+#define MME0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 16
+#define MME0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000
+#define MME0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 20
+#define MME0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000
+#define MME0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 25
+#define MME0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000
+
+/* MME0_QM_GLBL_PROT */
+#define MME0_QM_GLBL_PROT_PQF_SHIFT 0
+#define MME0_QM_GLBL_PROT_PQF_MASK 0xF
+#define MME0_QM_GLBL_PROT_CQF_SHIFT 4
+#define MME0_QM_GLBL_PROT_CQF_MASK 0x1F0
+#define MME0_QM_GLBL_PROT_CP_SHIFT 9
+#define MME0_QM_GLBL_PROT_CP_MASK 0x3E00
+#define MME0_QM_GLBL_PROT_ERR_SHIFT 14
+#define MME0_QM_GLBL_PROT_ERR_MASK 0x4000
+#define MME0_QM_GLBL_PROT_ARB_SHIFT 15
+#define MME0_QM_GLBL_PROT_ARB_MASK 0x8000
+
+/* MME0_QM_GLBL_ERR_CFG */
+#define MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0
+#define MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF
+#define MME0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define MME0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0
+#define MME0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9
+#define MME0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00
+#define MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16
+#define MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000
+#define MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20
+#define MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000
+#define MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25
+#define MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000
+#define MME0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31
+#define MME0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000
+
+/* MME0_QM_GLBL_SECURE_PROPS */
+#define MME0_QM_GLBL_SECURE_PROPS_0_ASID_SHIFT 0
+#define MME0_QM_GLBL_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_SECURE_PROPS_1_ASID_SHIFT 0
+#define MME0_QM_GLBL_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_SECURE_PROPS_2_ASID_SHIFT 0
+#define MME0_QM_GLBL_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_SECURE_PROPS_3_ASID_SHIFT 0
+#define MME0_QM_GLBL_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_SECURE_PROPS_4_ASID_SHIFT 0
+#define MME0_QM_GLBL_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_SECURE_PROPS_0_MMBP_SHIFT 10
+#define MME0_QM_GLBL_SECURE_PROPS_0_MMBP_MASK 0x400
+#define MME0_QM_GLBL_SECURE_PROPS_1_MMBP_SHIFT 10
+#define MME0_QM_GLBL_SECURE_PROPS_1_MMBP_MASK 0x400
+#define MME0_QM_GLBL_SECURE_PROPS_2_MMBP_SHIFT 10
+#define MME0_QM_GLBL_SECURE_PROPS_2_MMBP_MASK 0x400
+#define MME0_QM_GLBL_SECURE_PROPS_3_MMBP_SHIFT 10
+#define MME0_QM_GLBL_SECURE_PROPS_3_MMBP_MASK 0x400
+#define MME0_QM_GLBL_SECURE_PROPS_4_MMBP_SHIFT 10
+#define MME0_QM_GLBL_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* MME0_QM_GLBL_NON_SECURE_PROPS */
+#define MME0_QM_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT 0
+#define MME0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT 0
+#define MME0_QM_GLBL_NON_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT 0
+#define MME0_QM_GLBL_NON_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT 0
+#define MME0_QM_GLBL_NON_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT 0
+#define MME0_QM_GLBL_NON_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define MME0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT 10
+#define MME0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_MASK 0x400
+#define MME0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT 10
+#define MME0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_MASK 0x400
+#define MME0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT 10
+#define MME0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_MASK 0x400
+#define MME0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT 10
+#define MME0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_MASK 0x400
+#define MME0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT 10
+#define MME0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* MME0_QM_GLBL_STS0 */
+#define MME0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define MME0_QM_GLBL_STS0_PQF_IDLE_MASK 0xF
+#define MME0_QM_GLBL_STS0_CQF_IDLE_SHIFT 4
+#define MME0_QM_GLBL_STS0_CQF_IDLE_MASK 0x1F0
+#define MME0_QM_GLBL_STS0_CP_IDLE_SHIFT 9
+#define MME0_QM_GLBL_STS0_CP_IDLE_MASK 0x3E00
+#define MME0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 16
+#define MME0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000
+#define MME0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 20
+#define MME0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000
+#define MME0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 25
+#define MME0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000
+#define MME0_QM_GLBL_STS0_ARB_IS_STOP_SHIFT 31
+#define MME0_QM_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000
+
+/* MME0_QM_GLBL_STS1 */
+#define MME0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define MME0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define MME0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define MME0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define MME0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define MME0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define MME0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define MME0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define MME0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define MME0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define MME0_QM_GLBL_STS1_CP_WREG_ERR_SHIFT 6
+#define MME0_QM_GLBL_STS1_CP_WREG_ERR_MASK 0x40
+#define MME0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT 8
+#define MME0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK 0x100
+#define MME0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT 9
+#define MME0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK 0x200
+#define MME0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT 10
+#define MME0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK 0x400
+#define MME0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT 11
+#define MME0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK 0x800
+#define MME0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT 12
+#define MME0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define MME0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT 13
+#define MME0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define MME0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT 14
+#define MME0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define MME0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT 15
+#define MME0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* MME0_QM_GLBL_STS1_4 */
+#define MME0_QM_GLBL_STS1_4_CQF_RD_ERR_SHIFT 1
+#define MME0_QM_GLBL_STS1_4_CQF_RD_ERR_MASK 0x2
+#define MME0_QM_GLBL_STS1_4_CP_RD_ERR_SHIFT 2
+#define MME0_QM_GLBL_STS1_4_CP_RD_ERR_MASK 0x4
+#define MME0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME0_QM_GLBL_STS1_4_CP_STOP_OP_SHIFT 4
+#define MME0_QM_GLBL_STS1_4_CP_STOP_OP_MASK 0x10
+#define MME0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT 5
+#define MME0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_MASK 0x20
+#define MME0_QM_GLBL_STS1_4_CP_WREG_ERR_SHIFT 6
+#define MME0_QM_GLBL_STS1_4_CP_WREG_ERR_MASK 0x40
+#define MME0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define MME0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define MME0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define MME0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define MME0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define MME0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define MME0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define MME0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define MME0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define MME0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define MME0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define MME0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define MME0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define MME0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define MME0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define MME0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* MME0_QM_GLBL_MSG_EN */
+#define MME0_QM_GLBL_MSG_EN_PQF_RD_ERR_SHIFT 0
+#define MME0_QM_GLBL_MSG_EN_PQF_RD_ERR_MASK 0x1
+#define MME0_QM_GLBL_MSG_EN_CQF_RD_ERR_SHIFT 1
+#define MME0_QM_GLBL_MSG_EN_CQF_RD_ERR_MASK 0x2
+#define MME0_QM_GLBL_MSG_EN_CP_RD_ERR_SHIFT 2
+#define MME0_QM_GLBL_MSG_EN_CP_RD_ERR_MASK 0x4
+#define MME0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME0_QM_GLBL_MSG_EN_CP_STOP_OP_SHIFT 4
+#define MME0_QM_GLBL_MSG_EN_CP_STOP_OP_MASK 0x10
+#define MME0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT 5
+#define MME0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK 0x20
+#define MME0_QM_GLBL_MSG_EN_CP_WREG_ERR_SHIFT 6
+#define MME0_QM_GLBL_MSG_EN_CP_WREG_ERR_MASK 0x40
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15
+#define MME0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* MME0_QM_GLBL_MSG_EN_4 */
+#define MME0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT 1
+#define MME0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_MASK 0x2
+#define MME0_QM_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT 2
+#define MME0_QM_GLBL_MSG_EN_4_CP_RD_ERR_MASK 0x4
+#define MME0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define MME0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define MME0_QM_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT 4
+#define MME0_QM_GLBL_MSG_EN_4_CP_STOP_OP_MASK 0x10
+#define MME0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5
+#define MME0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20
+#define MME0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT 6
+#define MME0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_MASK 0x40
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define MME0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* MME0_QM_PQ_BASE_LO */
+#define MME0_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define MME0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_PQ_BASE_HI */
+#define MME0_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define MME0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_PQ_SIZE */
+#define MME0_QM_PQ_SIZE_VAL_SHIFT 0
+#define MME0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_PQ_PI */
+#define MME0_QM_PQ_PI_VAL_SHIFT 0
+#define MME0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_PQ_CI */
+#define MME0_QM_PQ_CI_VAL_SHIFT 0
+#define MME0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_PQ_CFG0 */
+#define MME0_QM_PQ_CFG0_RESERVED_SHIFT 0
+#define MME0_QM_PQ_CFG0_RESERVED_MASK 0x1
+
+/* MME0_QM_PQ_CFG1 */
+#define MME0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define MME0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define MME0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define MME0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* MME0_QM_PQ_ARUSER_31_11 */
+#define MME0_QM_PQ_ARUSER_31_11_VAL_SHIFT 0
+#define MME0_QM_PQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* MME0_QM_PQ_STS0 */
+#define MME0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define MME0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define MME0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define MME0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* MME0_QM_PQ_STS1 */
+#define MME0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define MME0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define MME0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define MME0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define MME0_QM_PQ_STS1_PQ_BUSY_SHIFT 31
+#define MME0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* MME0_QM_CQ_CFG0 */
+#define MME0_QM_CQ_CFG0_RESERVED_SHIFT 0
+#define MME0_QM_CQ_CFG0_RESERVED_MASK 0x1
+
+/* MME0_QM_CQ_CFG1 */
+#define MME0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define MME0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define MME0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define MME0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_ARUSER_31_11 */
+#define MME0_QM_CQ_ARUSER_31_11_VAL_SHIFT 0
+#define MME0_QM_CQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* MME0_QM_CQ_STS0 */
+#define MME0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define MME0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define MME0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define MME0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_STS1 */
+#define MME0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define MME0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define MME0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define MME0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define MME0_QM_CQ_STS1_CQ_BUSY_SHIFT 31
+#define MME0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* MME0_QM_CQ_PTR_LO_0 */
+#define MME0_QM_CQ_PTR_LO_0_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_0 */
+#define MME0_QM_CQ_PTR_HI_0_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_0 */
+#define MME0_QM_CQ_TSIZE_0_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_0 */
+#define MME0_QM_CQ_CTL_0_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_0_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_0_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_0_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_PTR_LO_1 */
+#define MME0_QM_CQ_PTR_LO_1_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_1 */
+#define MME0_QM_CQ_PTR_HI_1_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_1 */
+#define MME0_QM_CQ_TSIZE_1_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_1 */
+#define MME0_QM_CQ_CTL_1_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_1_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_1_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_1_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_PTR_LO_2 */
+#define MME0_QM_CQ_PTR_LO_2_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_2 */
+#define MME0_QM_CQ_PTR_HI_2_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_2 */
+#define MME0_QM_CQ_TSIZE_2_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_2 */
+#define MME0_QM_CQ_CTL_2_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_2_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_2_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_2_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_PTR_LO_3 */
+#define MME0_QM_CQ_PTR_LO_3_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_3 */
+#define MME0_QM_CQ_PTR_HI_3_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_3 */
+#define MME0_QM_CQ_TSIZE_3_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_3 */
+#define MME0_QM_CQ_CTL_3_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_3_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_3_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_3_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_PTR_LO_4 */
+#define MME0_QM_CQ_PTR_LO_4_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_4 */
+#define MME0_QM_CQ_PTR_HI_4_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_4 */
+#define MME0_QM_CQ_TSIZE_4_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_4 */
+#define MME0_QM_CQ_CTL_4_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_4_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_4_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_4_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_PTR_LO_STS */
+#define MME0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_PTR_HI_STS */
+#define MME0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define MME0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_TSIZE_STS */
+#define MME0_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define MME0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CQ_CTL_STS */
+#define MME0_QM_CQ_CTL_STS_RPT_SHIFT 0
+#define MME0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define MME0_QM_CQ_CTL_STS_CTL_SHIFT 16
+#define MME0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* MME0_QM_CQ_IFIFO_CNT */
+#define MME0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define MME0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* MME0_QM_CP_MSG_BASE0_ADDR_LO */
+#define MME0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE0_ADDR_HI */
+#define MME0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE1_ADDR_LO */
+#define MME0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE1_ADDR_HI */
+#define MME0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE2_ADDR_LO */
+#define MME0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE2_ADDR_HI */
+#define MME0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE3_ADDR_LO */
+#define MME0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_MSG_BASE3_ADDR_HI */
+#define MME0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define MME0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_LDMA_TSIZE_OFFSET */
+#define MME0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define MME0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define MME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define MME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define MME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_FENCE0_RDATA */
+#define MME0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* MME0_QM_CP_FENCE1_RDATA */
+#define MME0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* MME0_QM_CP_FENCE2_RDATA */
+#define MME0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* MME0_QM_CP_FENCE3_RDATA */
+#define MME0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* MME0_QM_CP_FENCE0_CNT */
+#define MME0_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE0_CNT_VAL_MASK 0x3FFF
+
+/* MME0_QM_CP_FENCE1_CNT */
+#define MME0_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE1_CNT_VAL_MASK 0x3FFF
+
+/* MME0_QM_CP_FENCE2_CNT */
+#define MME0_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE2_CNT_VAL_MASK 0x3FFF
+
+/* MME0_QM_CP_FENCE3_CNT */
+#define MME0_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define MME0_QM_CP_FENCE3_CNT_VAL_MASK 0x3FFF
+
+/* MME0_QM_CP_STS */
+#define MME0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define MME0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define MME0_QM_CP_STS_ERDY_SHIFT 16
+#define MME0_QM_CP_STS_ERDY_MASK 0x10000
+#define MME0_QM_CP_STS_RRDY_SHIFT 17
+#define MME0_QM_CP_STS_RRDY_MASK 0x20000
+#define MME0_QM_CP_STS_MRDY_SHIFT 18
+#define MME0_QM_CP_STS_MRDY_MASK 0x40000
+#define MME0_QM_CP_STS_SW_STOP_SHIFT 19
+#define MME0_QM_CP_STS_SW_STOP_MASK 0x80000
+#define MME0_QM_CP_STS_FENCE_ID_SHIFT 20
+#define MME0_QM_CP_STS_FENCE_ID_MASK 0x300000
+#define MME0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define MME0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* MME0_QM_CP_CURRENT_INST_LO */
+#define MME0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define MME0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_CURRENT_INST_HI */
+#define MME0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define MME0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_CP_BARRIER_CFG */
+#define MME0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define MME0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+#define MME0_QM_CP_BARRIER_CFG_RBGUARD_SHIFT 16
+#define MME0_QM_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000
+
+/* MME0_QM_CP_DBG_0 */
+#define MME0_QM_CP_DBG_0_CS_SHIFT 0
+#define MME0_QM_CP_DBG_0_CS_MASK 0xF
+#define MME0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 4
+#define MME0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x10
+#define MME0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 5
+#define MME0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x20
+#define MME0_QM_CP_DBG_0_MREB_STALL_SHIFT 6
+#define MME0_QM_CP_DBG_0_MREB_STALL_MASK 0x40
+#define MME0_QM_CP_DBG_0_STALL_SHIFT 7
+#define MME0_QM_CP_DBG_0_STALL_MASK 0x80
+
+/* MME0_QM_CP_ARUSER_31_11 */
+#define MME0_QM_CP_ARUSER_31_11_VAL_SHIFT 0
+#define MME0_QM_CP_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* MME0_QM_CP_AWUSER_31_11 */
+#define MME0_QM_CP_AWUSER_31_11_VAL_SHIFT 0
+#define MME0_QM_CP_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* MME0_QM_ARB_CFG_0 */
+#define MME0_QM_ARB_CFG_0_TYPE_SHIFT 0
+#define MME0_QM_ARB_CFG_0_TYPE_MASK 0x1
+#define MME0_QM_ARB_CFG_0_IS_MASTER_SHIFT 4
+#define MME0_QM_ARB_CFG_0_IS_MASTER_MASK 0x10
+#define MME0_QM_ARB_CFG_0_EN_SHIFT 8
+#define MME0_QM_ARB_CFG_0_EN_MASK 0x100
+#define MME0_QM_ARB_CFG_0_MASK_SHIFT 12
+#define MME0_QM_ARB_CFG_0_MASK_MASK 0xF000
+#define MME0_QM_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 16
+#define MME0_QM_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x10000
+
+/* MME0_QM_ARB_CHOISE_Q_PUSH */
+#define MME0_QM_ARB_CHOISE_Q_PUSH_VAL_SHIFT 0
+#define MME0_QM_ARB_CHOISE_Q_PUSH_VAL_MASK 0x3
+
+/* MME0_QM_ARB_WRR_WEIGHT */
+#define MME0_QM_ARB_WRR_WEIGHT_VAL_SHIFT 0
+#define MME0_QM_ARB_WRR_WEIGHT_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_CFG_1 */
+#define MME0_QM_ARB_CFG_1_CLR_SHIFT 0
+#define MME0_QM_ARB_CFG_1_CLR_MASK 0x1
+
+/* MME0_QM_ARB_MST_AVAIL_CRED */
+#define MME0_QM_ARB_MST_AVAIL_CRED_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F
+
+/* MME0_QM_ARB_MST_CRED_INC */
+#define MME0_QM_ARB_MST_CRED_INC_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_MST_CHOISE_PUSH_OFST */
+#define MME0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_SLV_MASTER_INC_CRED_OFST */
+#define MME0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0
+#define MME0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_MST_SLAVE_EN */
+#define MME0_QM_ARB_MST_SLAVE_EN_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_MST_QUIET_PER */
+#define MME0_QM_ARB_MST_QUIET_PER_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_SLV_CHOISE_WDT */
+#define MME0_QM_ARB_SLV_CHOISE_WDT_VAL_SHIFT 0
+#define MME0_QM_ARB_SLV_CHOISE_WDT_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_SLV_ID */
+#define MME0_QM_ARB_SLV_ID_VAL_SHIFT 0
+#define MME0_QM_ARB_SLV_ID_VAL_MASK 0x1F
+
+/* MME0_QM_ARB_MSG_MAX_INFLIGHT */
+#define MME0_QM_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0
+#define MME0_QM_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F
+
+/* MME0_QM_ARB_MSG_AWUSER_31_11 */
+#define MME0_QM_ARB_MSG_AWUSER_31_11_VAL_SHIFT 0
+#define MME0_QM_ARB_MSG_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* MME0_QM_ARB_MSG_AWUSER_SEC_PROP */
+#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT 0
+#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK 0x3FF
+#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT 10
+#define MME0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK 0x400
+
+/* MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP */
+#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT 0
+#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK 0x3FF
+#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT 10
+#define MME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK 0x400
+
+/* MME0_QM_ARB_BASE_LO */
+#define MME0_QM_ARB_BASE_LO_VAL_SHIFT 0
+#define MME0_QM_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_BASE_HI */
+#define MME0_QM_ARB_BASE_HI_VAL_SHIFT 0
+#define MME0_QM_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_STATE_STS */
+#define MME0_QM_ARB_STATE_STS_VAL_SHIFT 0
+#define MME0_QM_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_ARB_CHOISE_FULLNESS_STS */
+#define MME0_QM_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT 0
+#define MME0_QM_ARB_CHOISE_FULLNESS_STS_VAL_MASK 0x7F
+
+/* MME0_QM_ARB_MSG_STS */
+#define MME0_QM_ARB_MSG_STS_FULL_SHIFT 0
+#define MME0_QM_ARB_MSG_STS_FULL_MASK 0x1
+#define MME0_QM_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1
+#define MME0_QM_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2
+
+/* MME0_QM_ARB_SLV_CHOISE_Q_HEAD */
+#define MME0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT 0
+#define MME0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK 0x3
+
+/* MME0_QM_ARB_ERR_CAUSE */
+#define MME0_QM_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT 0
+#define MME0_QM_ARB_ERR_CAUSE_CHOISE_OVF_MASK 0x1
+#define MME0_QM_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT 1
+#define MME0_QM_ARB_ERR_CAUSE_CHOISE_WDT_MASK 0x2
+#define MME0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2
+#define MME0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4
+
+/* MME0_QM_ARB_ERR_MSG_EN */
+#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT 0
+#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1
+#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT 1
+#define MME0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2
+#define MME0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2
+#define MME0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
+
+/* MME0_QM_ARB_ERR_STS_DRP */
+#define MME0_QM_ARB_ERR_STS_DRP_VAL_SHIFT 0
+#define MME0_QM_ARB_ERR_STS_DRP_VAL_MASK 0x3
+
+/* MME0_QM_ARB_MST_CRED_STS */
+#define MME0_QM_ARB_MST_CRED_STS_VAL_SHIFT 0
+#define MME0_QM_ARB_MST_CRED_STS_VAL_MASK 0x7F
+
+/* MME0_QM_CGM_CFG */
+#define MME0_QM_CGM_CFG_IDLE_TH_SHIFT 0
+#define MME0_QM_CGM_CFG_IDLE_TH_MASK 0xFFF
+#define MME0_QM_CGM_CFG_G2F_TH_SHIFT 16
+#define MME0_QM_CGM_CFG_G2F_TH_MASK 0xFF0000
+#define MME0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT 24
+#define MME0_QM_CGM_CFG_CP_IDLE_MASK_MASK 0x1F000000
+#define MME0_QM_CGM_CFG_EN_SHIFT 31
+#define MME0_QM_CGM_CFG_EN_MASK 0x80000000
+
+/* MME0_QM_CGM_STS */
+#define MME0_QM_CGM_STS_ST_SHIFT 0
+#define MME0_QM_CGM_STS_ST_MASK 0x3
+#define MME0_QM_CGM_STS_CG_SHIFT 4
+#define MME0_QM_CGM_STS_CG_MASK 0x10
+#define MME0_QM_CGM_STS_AGENT_IDLE_SHIFT 8
+#define MME0_QM_CGM_STS_AGENT_IDLE_MASK 0x100
+#define MME0_QM_CGM_STS_AXI_IDLE_SHIFT 9
+#define MME0_QM_CGM_STS_AXI_IDLE_MASK 0x200
+#define MME0_QM_CGM_STS_CP_IDLE_SHIFT 10
+#define MME0_QM_CGM_STS_CP_IDLE_MASK 0x400
+
+/* MME0_QM_CGM_CFG1 */
+#define MME0_QM_CGM_CFG1_MASK_TH_SHIFT 0
+#define MME0_QM_CGM_CFG1_MASK_TH_MASK 0xFF
+
+/* MME0_QM_LOCAL_RANGE_BASE */
+#define MME0_QM_LOCAL_RANGE_BASE_VAL_SHIFT 0
+#define MME0_QM_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF
+
+/* MME0_QM_LOCAL_RANGE_SIZE */
+#define MME0_QM_LOCAL_RANGE_SIZE_VAL_SHIFT 0
+#define MME0_QM_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF
+
+/* MME0_QM_CSMR_STRICT_PRIO_CFG */
+#define MME0_QM_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT 0
+#define MME0_QM_CSMR_STRICT_PRIO_CFG_TYPE_MASK 0x1
+
+/* MME0_QM_HBW_RD_RATE_LIM_CFG_1 */
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* MME0_QM_LBW_WR_RATE_LIM_CFG_0 */
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* MME0_QM_LBW_WR_RATE_LIM_CFG_1 */
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31
+#define MME0_QM_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* MME0_QM_HBW_RD_RATE_LIM_CFG_0 */
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define MME0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* MME0_QM_GLBL_AXCACHE */
+#define MME0_QM_GLBL_AXCACHE_AR_SHIFT 0
+#define MME0_QM_GLBL_AXCACHE_AR_MASK 0xF
+#define MME0_QM_GLBL_AXCACHE_AW_SHIFT 16
+#define MME0_QM_GLBL_AXCACHE_AW_MASK 0xF0000
+
+/* MME0_QM_IND_GW_APB_CFG */
+#define MME0_QM_IND_GW_APB_CFG_ADDR_SHIFT 0
+#define MME0_QM_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF
+#define MME0_QM_IND_GW_APB_CFG_CMD_SHIFT 31
+#define MME0_QM_IND_GW_APB_CFG_CMD_MASK 0x80000000
+
+/* MME0_QM_IND_GW_APB_WDATA */
+#define MME0_QM_IND_GW_APB_WDATA_VAL_SHIFT 0
+#define MME0_QM_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_IND_GW_APB_RDATA */
+#define MME0_QM_IND_GW_APB_RDATA_VAL_SHIFT 0
+#define MME0_QM_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_IND_GW_APB_STATUS */
+#define MME0_QM_IND_GW_APB_STATUS_RDY_SHIFT 0
+#define MME0_QM_IND_GW_APB_STATUS_RDY_MASK 0x1
+#define MME0_QM_IND_GW_APB_STATUS_ERR_SHIFT 1
+#define MME0_QM_IND_GW_APB_STATUS_ERR_MASK 0x2
+
+/* MME0_QM_GLBL_ERR_ADDR_LO */
+#define MME0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define MME0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_GLBL_ERR_ADDR_HI */
+#define MME0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define MME0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_GLBL_ERR_WDATA */
+#define MME0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define MME0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* MME0_QM_GLBL_MEM_INIT_BUSY */
+#define MME0_QM_GLBL_MEM_INIT_BUSY_RBUF_SHIFT 0
+#define MME0_QM_GLBL_MEM_INIT_BUSY_RBUF_MASK 0xF
+
+#endif /* ASIC_REG_MME0_QM_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h
new file mode 100644
index 000000000000..4f078b328b00
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME0_QM_REGS_H_
+#define ASIC_REG_MME0_QM_REGS_H_
+
+/*
+ *****************************************
+ * MME0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmMME0_QM_GLBL_CFG0 0x68000
+
+#define mmMME0_QM_GLBL_CFG1 0x68004
+
+#define mmMME0_QM_GLBL_PROT 0x68008
+
+#define mmMME0_QM_GLBL_ERR_CFG 0x6800C
+
+#define mmMME0_QM_GLBL_SECURE_PROPS_0 0x68010
+
+#define mmMME0_QM_GLBL_SECURE_PROPS_1 0x68014
+
+#define mmMME0_QM_GLBL_SECURE_PROPS_2 0x68018
+
+#define mmMME0_QM_GLBL_SECURE_PROPS_3 0x6801C
+
+#define mmMME0_QM_GLBL_SECURE_PROPS_4 0x68020
+
+#define mmMME0_QM_GLBL_NON_SECURE_PROPS_0 0x68024
+
+#define mmMME0_QM_GLBL_NON_SECURE_PROPS_1 0x68028
+
+#define mmMME0_QM_GLBL_NON_SECURE_PROPS_2 0x6802C
+
+#define mmMME0_QM_GLBL_NON_SECURE_PROPS_3 0x68030
+
+#define mmMME0_QM_GLBL_NON_SECURE_PROPS_4 0x68034
+
+#define mmMME0_QM_GLBL_STS0 0x68038
+
+#define mmMME0_QM_GLBL_STS1_0 0x68040
+
+#define mmMME0_QM_GLBL_STS1_1 0x68044
+
+#define mmMME0_QM_GLBL_STS1_2 0x68048
+
+#define mmMME0_QM_GLBL_STS1_3 0x6804C
+
+#define mmMME0_QM_GLBL_STS1_4 0x68050
+
+#define mmMME0_QM_GLBL_MSG_EN_0 0x68054
+
+#define mmMME0_QM_GLBL_MSG_EN_1 0x68058
+
+#define mmMME0_QM_GLBL_MSG_EN_2 0x6805C
+
+#define mmMME0_QM_GLBL_MSG_EN_3 0x68060
+
+#define mmMME0_QM_GLBL_MSG_EN_4 0x68068
+
+#define mmMME0_QM_PQ_BASE_LO_0 0x68070
+
+#define mmMME0_QM_PQ_BASE_LO_1 0x68074
+
+#define mmMME0_QM_PQ_BASE_LO_2 0x68078
+
+#define mmMME0_QM_PQ_BASE_LO_3 0x6807C
+
+#define mmMME0_QM_PQ_BASE_HI_0 0x68080
+
+#define mmMME0_QM_PQ_BASE_HI_1 0x68084
+
+#define mmMME0_QM_PQ_BASE_HI_2 0x68088
+
+#define mmMME0_QM_PQ_BASE_HI_3 0x6808C
+
+#define mmMME0_QM_PQ_SIZE_0 0x68090
+
+#define mmMME0_QM_PQ_SIZE_1 0x68094
+
+#define mmMME0_QM_PQ_SIZE_2 0x68098
+
+#define mmMME0_QM_PQ_SIZE_3 0x6809C
+
+#define mmMME0_QM_PQ_PI_0 0x680A0
+
+#define mmMME0_QM_PQ_PI_1 0x680A4
+
+#define mmMME0_QM_PQ_PI_2 0x680A8
+
+#define mmMME0_QM_PQ_PI_3 0x680AC
+
+#define mmMME0_QM_PQ_CI_0 0x680B0
+
+#define mmMME0_QM_PQ_CI_1 0x680B4
+
+#define mmMME0_QM_PQ_CI_2 0x680B8
+
+#define mmMME0_QM_PQ_CI_3 0x680BC
+
+#define mmMME0_QM_PQ_CFG0_0 0x680C0
+
+#define mmMME0_QM_PQ_CFG0_1 0x680C4
+
+#define mmMME0_QM_PQ_CFG0_2 0x680C8
+
+#define mmMME0_QM_PQ_CFG0_3 0x680CC
+
+#define mmMME0_QM_PQ_CFG1_0 0x680D0
+
+#define mmMME0_QM_PQ_CFG1_1 0x680D4
+
+#define mmMME0_QM_PQ_CFG1_2 0x680D8
+
+#define mmMME0_QM_PQ_CFG1_3 0x680DC
+
+#define mmMME0_QM_PQ_ARUSER_31_11_0 0x680E0
+
+#define mmMME0_QM_PQ_ARUSER_31_11_1 0x680E4
+
+#define mmMME0_QM_PQ_ARUSER_31_11_2 0x680E8
+
+#define mmMME0_QM_PQ_ARUSER_31_11_3 0x680EC
+
+#define mmMME0_QM_PQ_STS0_0 0x680F0
+
+#define mmMME0_QM_PQ_STS0_1 0x680F4
+
+#define mmMME0_QM_PQ_STS0_2 0x680F8
+
+#define mmMME0_QM_PQ_STS0_3 0x680FC
+
+#define mmMME0_QM_PQ_STS1_0 0x68100
+
+#define mmMME0_QM_PQ_STS1_1 0x68104
+
+#define mmMME0_QM_PQ_STS1_2 0x68108
+
+#define mmMME0_QM_PQ_STS1_3 0x6810C
+
+#define mmMME0_QM_CQ_CFG0_0 0x68110
+
+#define mmMME0_QM_CQ_CFG0_1 0x68114
+
+#define mmMME0_QM_CQ_CFG0_2 0x68118
+
+#define mmMME0_QM_CQ_CFG0_3 0x6811C
+
+#define mmMME0_QM_CQ_CFG0_4 0x68120
+
+#define mmMME0_QM_CQ_CFG1_0 0x68124
+
+#define mmMME0_QM_CQ_CFG1_1 0x68128
+
+#define mmMME0_QM_CQ_CFG1_2 0x6812C
+
+#define mmMME0_QM_CQ_CFG1_3 0x68130
+
+#define mmMME0_QM_CQ_CFG1_4 0x68134
+
+#define mmMME0_QM_CQ_ARUSER_31_11_0 0x68138
+
+#define mmMME0_QM_CQ_ARUSER_31_11_1 0x6813C
+
+#define mmMME0_QM_CQ_ARUSER_31_11_2 0x68140
+
+#define mmMME0_QM_CQ_ARUSER_31_11_3 0x68144
+
+#define mmMME0_QM_CQ_ARUSER_31_11_4 0x68148
+
+#define mmMME0_QM_CQ_STS0_0 0x6814C
+
+#define mmMME0_QM_CQ_STS0_1 0x68150
+
+#define mmMME0_QM_CQ_STS0_2 0x68154
+
+#define mmMME0_QM_CQ_STS0_3 0x68158
+
+#define mmMME0_QM_CQ_STS0_4 0x6815C
+
+#define mmMME0_QM_CQ_STS1_0 0x68160
+
+#define mmMME0_QM_CQ_STS1_1 0x68164
+
+#define mmMME0_QM_CQ_STS1_2 0x68168
+
+#define mmMME0_QM_CQ_STS1_3 0x6816C
+
+#define mmMME0_QM_CQ_STS1_4 0x68170
+
+#define mmMME0_QM_CQ_PTR_LO_0 0x68174
+
+#define mmMME0_QM_CQ_PTR_HI_0 0x68178
+
+#define mmMME0_QM_CQ_TSIZE_0 0x6817C
+
+#define mmMME0_QM_CQ_CTL_0 0x68180
+
+#define mmMME0_QM_CQ_PTR_LO_1 0x68184
+
+#define mmMME0_QM_CQ_PTR_HI_1 0x68188
+
+#define mmMME0_QM_CQ_TSIZE_1 0x6818C
+
+#define mmMME0_QM_CQ_CTL_1 0x68190
+
+#define mmMME0_QM_CQ_PTR_LO_2 0x68194
+
+#define mmMME0_QM_CQ_PTR_HI_2 0x68198
+
+#define mmMME0_QM_CQ_TSIZE_2 0x6819C
+
+#define mmMME0_QM_CQ_CTL_2 0x681A0
+
+#define mmMME0_QM_CQ_PTR_LO_3 0x681A4
+
+#define mmMME0_QM_CQ_PTR_HI_3 0x681A8
+
+#define mmMME0_QM_CQ_TSIZE_3 0x681AC
+
+#define mmMME0_QM_CQ_CTL_3 0x681B0
+
+#define mmMME0_QM_CQ_PTR_LO_4 0x681B4
+
+#define mmMME0_QM_CQ_PTR_HI_4 0x681B8
+
+#define mmMME0_QM_CQ_TSIZE_4 0x681BC
+
+#define mmMME0_QM_CQ_CTL_4 0x681C0
+
+#define mmMME0_QM_CQ_PTR_LO_STS_0 0x681C4
+
+#define mmMME0_QM_CQ_PTR_LO_STS_1 0x681C8
+
+#define mmMME0_QM_CQ_PTR_LO_STS_2 0x681CC
+
+#define mmMME0_QM_CQ_PTR_LO_STS_3 0x681D0
+
+#define mmMME0_QM_CQ_PTR_LO_STS_4 0x681D4
+
+#define mmMME0_QM_CQ_PTR_HI_STS_0 0x681D8
+
+#define mmMME0_QM_CQ_PTR_HI_STS_1 0x681DC
+
+#define mmMME0_QM_CQ_PTR_HI_STS_2 0x681E0
+
+#define mmMME0_QM_CQ_PTR_HI_STS_3 0x681E4
+
+#define mmMME0_QM_CQ_PTR_HI_STS_4 0x681E8
+
+#define mmMME0_QM_CQ_TSIZE_STS_0 0x681EC
+
+#define mmMME0_QM_CQ_TSIZE_STS_1 0x681F0
+
+#define mmMME0_QM_CQ_TSIZE_STS_2 0x681F4
+
+#define mmMME0_QM_CQ_TSIZE_STS_3 0x681F8
+
+#define mmMME0_QM_CQ_TSIZE_STS_4 0x681FC
+
+#define mmMME0_QM_CQ_CTL_STS_0 0x68200
+
+#define mmMME0_QM_CQ_CTL_STS_1 0x68204
+
+#define mmMME0_QM_CQ_CTL_STS_2 0x68208
+
+#define mmMME0_QM_CQ_CTL_STS_3 0x6820C
+
+#define mmMME0_QM_CQ_CTL_STS_4 0x68210
+
+#define mmMME0_QM_CQ_IFIFO_CNT_0 0x68214
+
+#define mmMME0_QM_CQ_IFIFO_CNT_1 0x68218
+
+#define mmMME0_QM_CQ_IFIFO_CNT_2 0x6821C
+
+#define mmMME0_QM_CQ_IFIFO_CNT_3 0x68220
+
+#define mmMME0_QM_CQ_IFIFO_CNT_4 0x68224
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 0x68228
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_1 0x6822C
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_2 0x68230
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_3 0x68234
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_LO_4 0x68238
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 0x6823C
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_1 0x68240
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_2 0x68244
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_3 0x68248
+
+#define mmMME0_QM_CP_MSG_BASE0_ADDR_HI_4 0x6824C
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 0x68250
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_1 0x68254
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_2 0x68258
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_3 0x6825C
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_LO_4 0x68260
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 0x68264
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_1 0x68268
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_2 0x6826C
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_3 0x68270
+
+#define mmMME0_QM_CP_MSG_BASE1_ADDR_HI_4 0x68274
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_0 0x68278
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_1 0x6827C
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_2 0x68280
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_3 0x68284
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_LO_4 0x68288
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_0 0x6828C
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_1 0x68290
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_2 0x68294
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_3 0x68298
+
+#define mmMME0_QM_CP_MSG_BASE2_ADDR_HI_4 0x6829C
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_0 0x682A0
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_1 0x682A4
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_2 0x682A8
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_3 0x682AC
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_LO_4 0x682B0
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_0 0x682B4
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_1 0x682B8
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_2 0x682BC
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_3 0x682C0
+
+#define mmMME0_QM_CP_MSG_BASE3_ADDR_HI_4 0x682C4
+
+#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 0x682C8
+
+#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_1 0x682CC
+
+#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_2 0x682D0
+
+#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_3 0x682D4
+
+#define mmMME0_QM_CP_LDMA_TSIZE_OFFSET_4 0x682D8
+
+#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x682E0
+
+#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x682E4
+
+#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x682E8
+
+#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x682EC
+
+#define mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x682F0
+
+#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x682F4
+
+#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x682F8
+
+#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x682FC
+
+#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x68300
+
+#define mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x68304
+
+#define mmMME0_QM_CP_FENCE0_RDATA_0 0x68308
+
+#define mmMME0_QM_CP_FENCE0_RDATA_1 0x6830C
+
+#define mmMME0_QM_CP_FENCE0_RDATA_2 0x68310
+
+#define mmMME0_QM_CP_FENCE0_RDATA_3 0x68314
+
+#define mmMME0_QM_CP_FENCE0_RDATA_4 0x68318
+
+#define mmMME0_QM_CP_FENCE1_RDATA_0 0x6831C
+
+#define mmMME0_QM_CP_FENCE1_RDATA_1 0x68320
+
+#define mmMME0_QM_CP_FENCE1_RDATA_2 0x68324
+
+#define mmMME0_QM_CP_FENCE1_RDATA_3 0x68328
+
+#define mmMME0_QM_CP_FENCE1_RDATA_4 0x6832C
+
+#define mmMME0_QM_CP_FENCE2_RDATA_0 0x68330
+
+#define mmMME0_QM_CP_FENCE2_RDATA_1 0x68334
+
+#define mmMME0_QM_CP_FENCE2_RDATA_2 0x68338
+
+#define mmMME0_QM_CP_FENCE2_RDATA_3 0x6833C
+
+#define mmMME0_QM_CP_FENCE2_RDATA_4 0x68340
+
+#define mmMME0_QM_CP_FENCE3_RDATA_0 0x68344
+
+#define mmMME0_QM_CP_FENCE3_RDATA_1 0x68348
+
+#define mmMME0_QM_CP_FENCE3_RDATA_2 0x6834C
+
+#define mmMME0_QM_CP_FENCE3_RDATA_3 0x68350
+
+#define mmMME0_QM_CP_FENCE3_RDATA_4 0x68354
+
+#define mmMME0_QM_CP_FENCE0_CNT_0 0x68358
+
+#define mmMME0_QM_CP_FENCE0_CNT_1 0x6835C
+
+#define mmMME0_QM_CP_FENCE0_CNT_2 0x68360
+
+#define mmMME0_QM_CP_FENCE0_CNT_3 0x68364
+
+#define mmMME0_QM_CP_FENCE0_CNT_4 0x68368
+
+#define mmMME0_QM_CP_FENCE1_CNT_0 0x6836C
+
+#define mmMME0_QM_CP_FENCE1_CNT_1 0x68370
+
+#define mmMME0_QM_CP_FENCE1_CNT_2 0x68374
+
+#define mmMME0_QM_CP_FENCE1_CNT_3 0x68378
+
+#define mmMME0_QM_CP_FENCE1_CNT_4 0x6837C
+
+#define mmMME0_QM_CP_FENCE2_CNT_0 0x68380
+
+#define mmMME0_QM_CP_FENCE2_CNT_1 0x68384
+
+#define mmMME0_QM_CP_FENCE2_CNT_2 0x68388
+
+#define mmMME0_QM_CP_FENCE2_CNT_3 0x6838C
+
+#define mmMME0_QM_CP_FENCE2_CNT_4 0x68390
+
+#define mmMME0_QM_CP_FENCE3_CNT_0 0x68394
+
+#define mmMME0_QM_CP_FENCE3_CNT_1 0x68398
+
+#define mmMME0_QM_CP_FENCE3_CNT_2 0x6839C
+
+#define mmMME0_QM_CP_FENCE3_CNT_3 0x683A0
+
+#define mmMME0_QM_CP_FENCE3_CNT_4 0x683A4
+
+#define mmMME0_QM_CP_STS_0 0x683A8
+
+#define mmMME0_QM_CP_STS_1 0x683AC
+
+#define mmMME0_QM_CP_STS_2 0x683B0
+
+#define mmMME0_QM_CP_STS_3 0x683B4
+
+#define mmMME0_QM_CP_STS_4 0x683B8
+
+#define mmMME0_QM_CP_CURRENT_INST_LO_0 0x683BC
+
+#define mmMME0_QM_CP_CURRENT_INST_LO_1 0x683C0
+
+#define mmMME0_QM_CP_CURRENT_INST_LO_2 0x683C4
+
+#define mmMME0_QM_CP_CURRENT_INST_LO_3 0x683C8
+
+#define mmMME0_QM_CP_CURRENT_INST_LO_4 0x683CC
+
+#define mmMME0_QM_CP_CURRENT_INST_HI_0 0x683D0
+
+#define mmMME0_QM_CP_CURRENT_INST_HI_1 0x683D4
+
+#define mmMME0_QM_CP_CURRENT_INST_HI_2 0x683D8
+
+#define mmMME0_QM_CP_CURRENT_INST_HI_3 0x683DC
+
+#define mmMME0_QM_CP_CURRENT_INST_HI_4 0x683E0
+
+#define mmMME0_QM_CP_BARRIER_CFG_0 0x683F4
+
+#define mmMME0_QM_CP_BARRIER_CFG_1 0x683F8
+
+#define mmMME0_QM_CP_BARRIER_CFG_2 0x683FC
+
+#define mmMME0_QM_CP_BARRIER_CFG_3 0x68400
+
+#define mmMME0_QM_CP_BARRIER_CFG_4 0x68404
+
+#define mmMME0_QM_CP_DBG_0_0 0x68408
+
+#define mmMME0_QM_CP_DBG_0_1 0x6840C
+
+#define mmMME0_QM_CP_DBG_0_2 0x68410
+
+#define mmMME0_QM_CP_DBG_0_3 0x68414
+
+#define mmMME0_QM_CP_DBG_0_4 0x68418
+
+#define mmMME0_QM_CP_ARUSER_31_11_0 0x6841C
+
+#define mmMME0_QM_CP_ARUSER_31_11_1 0x68420
+
+#define mmMME0_QM_CP_ARUSER_31_11_2 0x68424
+
+#define mmMME0_QM_CP_ARUSER_31_11_3 0x68428
+
+#define mmMME0_QM_CP_ARUSER_31_11_4 0x6842C
+
+#define mmMME0_QM_CP_AWUSER_31_11_0 0x68430
+
+#define mmMME0_QM_CP_AWUSER_31_11_1 0x68434
+
+#define mmMME0_QM_CP_AWUSER_31_11_2 0x68438
+
+#define mmMME0_QM_CP_AWUSER_31_11_3 0x6843C
+
+#define mmMME0_QM_CP_AWUSER_31_11_4 0x68440
+
+#define mmMME0_QM_ARB_CFG_0 0x68A00
+
+#define mmMME0_QM_ARB_CHOISE_Q_PUSH 0x68A04
+
+#define mmMME0_QM_ARB_WRR_WEIGHT_0 0x68A08
+
+#define mmMME0_QM_ARB_WRR_WEIGHT_1 0x68A0C
+
+#define mmMME0_QM_ARB_WRR_WEIGHT_2 0x68A10
+
+#define mmMME0_QM_ARB_WRR_WEIGHT_3 0x68A14
+
+#define mmMME0_QM_ARB_CFG_1 0x68A18
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_0 0x68A20
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_1 0x68A24
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_2 0x68A28
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_3 0x68A2C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_4 0x68A30
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_5 0x68A34
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_6 0x68A38
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_7 0x68A3C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_8 0x68A40
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_9 0x68A44
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_10 0x68A48
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_11 0x68A4C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_12 0x68A50
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_13 0x68A54
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_14 0x68A58
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_15 0x68A5C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_16 0x68A60
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_17 0x68A64
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_18 0x68A68
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_19 0x68A6C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_20 0x68A70
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_21 0x68A74
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_22 0x68A78
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_23 0x68A7C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_24 0x68A80
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_25 0x68A84
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_26 0x68A88
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_27 0x68A8C
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_28 0x68A90
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_29 0x68A94
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_30 0x68A98
+
+#define mmMME0_QM_ARB_MST_AVAIL_CRED_31 0x68A9C
+
+#define mmMME0_QM_ARB_MST_CRED_INC 0x68AA0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x68AA4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x68AA8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x68AAC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x68AB0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x68AB4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x68AB8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x68ABC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x68AC0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x68AC4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x68AC8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x68ACC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x68AD0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x68AD4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x68AD8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x68ADC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x68AE0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x68AE4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x68AE8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x68AEC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x68AF0
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x68AF4
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x68AF8
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x68AFC
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x68B00
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x68B04
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x68B08
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x68B0C
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x68B10
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x68B14
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x68B18
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x68B1C
+
+#define mmMME0_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x68B20
+
+#define mmMME0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x68B28
+
+#define mmMME0_QM_ARB_MST_SLAVE_EN 0x68B2C
+
+#define mmMME0_QM_ARB_MST_QUIET_PER 0x68B34
+
+#define mmMME0_QM_ARB_SLV_CHOISE_WDT 0x68B38
+
+#define mmMME0_QM_ARB_SLV_ID 0x68B3C
+
+#define mmMME0_QM_ARB_MSG_MAX_INFLIGHT 0x68B44
+
+#define mmMME0_QM_ARB_MSG_AWUSER_31_11 0x68B48
+
+#define mmMME0_QM_ARB_MSG_AWUSER_SEC_PROP 0x68B4C
+
+#define mmMME0_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x68B50
+
+#define mmMME0_QM_ARB_BASE_LO 0x68B54
+
+#define mmMME0_QM_ARB_BASE_HI 0x68B58
+
+#define mmMME0_QM_ARB_STATE_STS 0x68B80
+
+#define mmMME0_QM_ARB_CHOISE_FULLNESS_STS 0x68B84
+
+#define mmMME0_QM_ARB_MSG_STS 0x68B88
+
+#define mmMME0_QM_ARB_SLV_CHOISE_Q_HEAD 0x68B8C
+
+#define mmMME0_QM_ARB_ERR_CAUSE 0x68B9C
+
+#define mmMME0_QM_ARB_ERR_MSG_EN 0x68BA0
+
+#define mmMME0_QM_ARB_ERR_STS_DRP 0x68BA8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_0 0x68BB0
+
+#define mmMME0_QM_ARB_MST_CRED_STS_1 0x68BB4
+
+#define mmMME0_QM_ARB_MST_CRED_STS_2 0x68BB8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_3 0x68BBC
+
+#define mmMME0_QM_ARB_MST_CRED_STS_4 0x68BC0
+
+#define mmMME0_QM_ARB_MST_CRED_STS_5 0x68BC4
+
+#define mmMME0_QM_ARB_MST_CRED_STS_6 0x68BC8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_7 0x68BCC
+
+#define mmMME0_QM_ARB_MST_CRED_STS_8 0x68BD0
+
+#define mmMME0_QM_ARB_MST_CRED_STS_9 0x68BD4
+
+#define mmMME0_QM_ARB_MST_CRED_STS_10 0x68BD8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_11 0x68BDC
+
+#define mmMME0_QM_ARB_MST_CRED_STS_12 0x68BE0
+
+#define mmMME0_QM_ARB_MST_CRED_STS_13 0x68BE4
+
+#define mmMME0_QM_ARB_MST_CRED_STS_14 0x68BE8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_15 0x68BEC
+
+#define mmMME0_QM_ARB_MST_CRED_STS_16 0x68BF0
+
+#define mmMME0_QM_ARB_MST_CRED_STS_17 0x68BF4
+
+#define mmMME0_QM_ARB_MST_CRED_STS_18 0x68BF8
+
+#define mmMME0_QM_ARB_MST_CRED_STS_19 0x68BFC
+
+#define mmMME0_QM_ARB_MST_CRED_STS_20 0x68C00
+
+#define mmMME0_QM_ARB_MST_CRED_STS_21 0x68C04
+
+#define mmMME0_QM_ARB_MST_CRED_STS_22 0x68C08
+
+#define mmMME0_QM_ARB_MST_CRED_STS_23 0x68C0C
+
+#define mmMME0_QM_ARB_MST_CRED_STS_24 0x68C10
+
+#define mmMME0_QM_ARB_MST_CRED_STS_25 0x68C14
+
+#define mmMME0_QM_ARB_MST_CRED_STS_26 0x68C18
+
+#define mmMME0_QM_ARB_MST_CRED_STS_27 0x68C1C
+
+#define mmMME0_QM_ARB_MST_CRED_STS_28 0x68C20
+
+#define mmMME0_QM_ARB_MST_CRED_STS_29 0x68C24
+
+#define mmMME0_QM_ARB_MST_CRED_STS_30 0x68C28
+
+#define mmMME0_QM_ARB_MST_CRED_STS_31 0x68C2C
+
+#define mmMME0_QM_CGM_CFG 0x68C70
+
+#define mmMME0_QM_CGM_STS 0x68C74
+
+#define mmMME0_QM_CGM_CFG1 0x68C78
+
+#define mmMME0_QM_LOCAL_RANGE_BASE 0x68C80
+
+#define mmMME0_QM_LOCAL_RANGE_SIZE 0x68C84
+
+#define mmMME0_QM_CSMR_STRICT_PRIO_CFG 0x68C90
+
+#define mmMME0_QM_HBW_RD_RATE_LIM_CFG_1 0x68C94
+
+#define mmMME0_QM_LBW_WR_RATE_LIM_CFG_0 0x68C98
+
+#define mmMME0_QM_LBW_WR_RATE_LIM_CFG_1 0x68C9C
+
+#define mmMME0_QM_HBW_RD_RATE_LIM_CFG_0 0x68CA0
+
+#define mmMME0_QM_GLBL_AXCACHE 0x68CA4
+
+#define mmMME0_QM_IND_GW_APB_CFG 0x68CB0
+
+#define mmMME0_QM_IND_GW_APB_WDATA 0x68CB4
+
+#define mmMME0_QM_IND_GW_APB_RDATA 0x68CB8
+
+#define mmMME0_QM_IND_GW_APB_STATUS 0x68CBC
+
+#define mmMME0_QM_GLBL_ERR_ADDR_LO 0x68CD0
+
+#define mmMME0_QM_GLBL_ERR_ADDR_HI 0x68CD4
+
+#define mmMME0_QM_GLBL_ERR_WDATA 0x68CD8
+
+#define mmMME0_QM_GLBL_MEM_INIT_BUSY 0x68D00
+
+#endif /* ASIC_REG_MME0_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h
new file mode 100644
index 000000000000..6c07f7d45490
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h
@@ -0,0 +1,1456 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME1_CTRL_REGS_H_
+#define ASIC_REG_MME1_CTRL_REGS_H_
+
+/*
+ *****************************************
+ * MME1_CTRL (Prototype: MME)
+ *****************************************
+ */
+
+#define mmMME1_CTRL_ARCH_STATUS 0xE0000
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_HIGH_S 0xE0008
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_HIGH_L 0xE000C
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_HIGH_O 0xE0010
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_LOW_S 0xE0014
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_LOW_L 0xE0018
+
+#define mmMME1_CTRL_ARCH_BASE_ADDR_LOW_O 0xE001C
+
+#define mmMME1_CTRL_ARCH_HEADER_LOW 0xE0020
+
+#define mmMME1_CTRL_ARCH_HEADER_HIGH 0xE0024
+
+#define mmMME1_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0xE0028
+
+#define mmMME1_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0xE002C
+
+#define mmMME1_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0xE0030
+
+#define mmMME1_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0xE0034
+
+#define mmMME1_CTRL_ARCH_OUTER_LOOP 0xE0038
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0xE003C
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0xE0040
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0xE0044
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0xE0048
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0xE004C
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0xE0050
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0xE0054
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0xE0058
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0xE005C
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0xE0060
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0xE0064
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0xE0068
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0xE006C
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0xE0070
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0xE0074
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0xE0078
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0xE007C
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0xE0080
+
+#define mmMME1_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0084
+
+#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0xE0088
+
+#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0xE008C
+
+#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0xE0090
+
+#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0xE0094
+
+#define mmMME1_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0xE0098
+
+#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_0 0xE009C
+
+#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_1 0xE00A0
+
+#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_2 0xE00A4
+
+#define mmMME1_CTRL_ARCH_AGU_S_START_OFFSET_3 0xE00A8
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0xE00AC
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0xE00B0
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0xE00B4
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0xE00B8
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0xE00BC
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0xE00C0
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0xE00C4
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0xE00C8
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0xE00CC
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0xE00D0
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0xE00D4
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0xE00D8
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0xE00DC
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0xE00E0
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0xE00E4
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0xE00E8
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0xE00EC
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0xE00F0
+
+#define mmMME1_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE00F4
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE00F8
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE00FC
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0100
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0104
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0108
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0xE010C
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0xE0110
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0xE0114
+
+#define mmMME1_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0xE0118
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE011C
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0120
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0124
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0128
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE012C
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0xE0130
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0xE0134
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0xE0138
+
+#define mmMME1_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0xE013C
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0xE0140
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0xE0144
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0xE0148
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0xE014C
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0xE0150
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0xE0154
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0xE0158
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0xE015C
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0xE0160
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0xE0164
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0xE0168
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0xE016C
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0xE0170
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0xE0174
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0xE0178
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0xE017C
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0xE0180
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0xE0184
+
+#define mmMME1_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0188
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE018C
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0190
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0194
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0198
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE019C
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0xE01A0
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0xE01A4
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0xE01A8
+
+#define mmMME1_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0xE01AC
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE01B0
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE01B4
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE01B8
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE01BC
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE01C0
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0xE01C4
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0xE01C8
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0xE01CC
+
+#define mmMME1_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0xE01D0
+
+#define mmMME1_CTRL_ARCH_DESC_SB_REPEAT 0xE01D4
+
+#define mmMME1_CTRL_ARCH_DESC_RATE_LIMITER 0xE01D8
+
+#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE01DC
+
+#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE01E0
+
+#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0xE01E4
+
+#define mmMME1_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0xE01E8
+
+#define mmMME1_CTRL_ARCH_DESC_AXI_USER_DATA 0xE01EC
+
+#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_S 0xE01F0
+
+#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0xE01F4
+
+#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0xE01F8
+
+#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0xE01FC
+
+#define mmMME1_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0xE0200
+
+#define mmMME1_CTRL_ARCH_DESC_PADDING_VALUE_S 0xE0204
+
+#define mmMME1_CTRL_ARCH_DESC_PADDING_VALUE_L 0xE0208
+
+#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_S 0xE020C
+
+#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0xE0210
+
+#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0xE0214
+
+#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0xE0218
+
+#define mmMME1_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0xE021C
+
+#define mmMME1_CTRL_ARCH_DESC_PCU_RL_SATURATION 0xE0220
+
+#define mmMME1_CTRL_ARCH_DESC_DUMMY 0xE0224
+
+#define mmMME1_CTRL_CMD 0xE0280
+
+#define mmMME1_CTRL_STATUS1 0xE0284
+
+#define mmMME1_CTRL_RESET 0xE0288
+
+#define mmMME1_CTRL_QM_STALL 0xE028C
+
+#define mmMME1_CTRL_SYNC_OBJECT_FIFO_TH 0xE0290
+
+#define mmMME1_CTRL_EUS_ROLLUP_CNT_ADD 0xE0294
+
+#define mmMME1_CTRL_INTR_CAUSE 0xE0298
+
+#define mmMME1_CTRL_INTR_MASK 0xE029C
+
+#define mmMME1_CTRL_LOG_SHADOW 0xE02A0
+
+#define mmMME1_CTRL_PCU_RL_DESC0 0xE02A4
+
+#define mmMME1_CTRL_PCU_RL_TOKEN_UPDATE 0xE02A8
+
+#define mmMME1_CTRL_PCU_RL_TH 0xE02AC
+
+#define mmMME1_CTRL_PCU_RL_MIN 0xE02B0
+
+#define mmMME1_CTRL_PCU_RL_CTRL_EN 0xE02B4
+
+#define mmMME1_CTRL_PCU_RL_HISTORY_LOG_SIZE 0xE02B8
+
+#define mmMME1_CTRL_PCU_DUMMY_A_BF16 0xE02BC
+
+#define mmMME1_CTRL_PCU_DUMMY_B_BF16 0xE02C0
+
+#define mmMME1_CTRL_PCU_DUMMY_A_FP32_ODD 0xE02C4
+
+#define mmMME1_CTRL_PCU_DUMMY_A_FP32_EVEN 0xE02C8
+
+#define mmMME1_CTRL_PCU_DUMMY_B_FP32_ODD 0xE02CC
+
+#define mmMME1_CTRL_PCU_DUMMY_B_FP32_EVEN 0xE02D0
+
+#define mmMME1_CTRL_PROT 0xE02D4
+
+#define mmMME1_CTRL_EU_POWER_SAVE_DISABLE 0xE02D8
+
+#define mmMME1_CTRL_CS_DBG_BLOCK_ID 0xE02DC
+
+#define mmMME1_CTRL_CS_DBG_STATUS_DROP_CNT 0xE02E0
+
+#define mmMME1_CTRL_TE_CLOSE_CGATE 0xE02E4
+
+#define mmMME1_CTRL_AGU_SM_INFLIGHT_CNTR 0xE02E8
+
+#define mmMME1_CTRL_AGU_SM_TOTAL_CNTR 0xE02EC
+
+#define mmMME1_CTRL_EZSYNC_OUT_CREDIT 0xE02F0
+
+#define mmMME1_CTRL_PCU_RL_SAT_SEC 0xE02F4
+
+#define mmMME1_CTRL_AGU_SYNC_MSG_AXI_USER 0xE02F8
+
+#define mmMME1_CTRL_QM_SLV_LBW_CLK_EN 0xE02FC
+
+#define mmMME1_CTRL_SHADOW_0_STATUS 0xE0400
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0xE0408
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0xE040C
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0xE0410
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0xE0414
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0xE0418
+
+#define mmMME1_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0xE041C
+
+#define mmMME1_CTRL_SHADOW_0_HEADER_LOW 0xE0420
+
+#define mmMME1_CTRL_SHADOW_0_HEADER_HIGH 0xE0424
+
+#define mmMME1_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0xE0428
+
+#define mmMME1_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0xE042C
+
+#define mmMME1_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0xE0430
+
+#define mmMME1_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0xE0434
+
+#define mmMME1_CTRL_SHADOW_0_OUTER_LOOP 0xE0438
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0xE043C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0xE0440
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0xE0444
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0xE0448
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0xE044C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0xE0450
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0xE0454
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0xE0458
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0xE045C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0xE0460
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0xE0464
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0xE0468
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0xE046C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0xE0470
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0xE0474
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0xE0478
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0xE047C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0xE0480
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0484
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0xE0488
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0xE048C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0xE0490
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0xE0494
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0xE0498
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0xE049C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0xE04A0
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0xE04A4
+
+#define mmMME1_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0xE04A8
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0xE04AC
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0xE04B0
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0xE04B4
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0xE04B8
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0xE04BC
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0xE04C0
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0xE04C4
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0xE04C8
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0xE04CC
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0xE04D0
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0xE04D4
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0xE04D8
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0xE04DC
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0xE04E0
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0xE04E4
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0xE04E8
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0xE04EC
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0xE04F0
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE04F4
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE04F8
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE04FC
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0500
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0504
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0508
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0xE050C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0xE0510
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0xE0514
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0xE0518
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE051C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0520
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0524
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0528
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE052C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0xE0530
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0xE0534
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0xE0538
+
+#define mmMME1_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0xE053C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0xE0540
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0xE0544
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0xE0548
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0xE054C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0xE0550
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0xE0554
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0xE0558
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0xE055C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0xE0560
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0xE0564
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0xE0568
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0xE056C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0xE0570
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0xE0574
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0xE0578
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0xE057C
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0xE0580
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0xE0584
+
+#define mmMME1_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0588
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE058C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0590
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0594
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0598
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE059C
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0xE05A0
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0xE05A4
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0xE05A8
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0xE05AC
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE05B0
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE05B4
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE05B8
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE05BC
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE05C0
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0xE05C4
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0xE05C8
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0xE05CC
+
+#define mmMME1_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0xE05D0
+
+#define mmMME1_CTRL_SHADOW_0_DESC_SB_REPEAT 0xE05D4
+
+#define mmMME1_CTRL_SHADOW_0_DESC_RATE_LIMITER 0xE05D8
+
+#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE05DC
+
+#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE05E0
+
+#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0xE05E4
+
+#define mmMME1_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0xE05E8
+
+#define mmMME1_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0xE05EC
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_S 0xE05F0
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0xE05F4
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0xE05F8
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0xE05FC
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0xE0600
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0xE0604
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0xE0608
+
+#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0xE060C
+
+#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0xE0610
+
+#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0xE0614
+
+#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0xE0618
+
+#define mmMME1_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0xE061C
+
+#define mmMME1_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0xE0620
+
+#define mmMME1_CTRL_SHADOW_0_DESC_DUMMY 0xE0624
+
+#define mmMME1_CTRL_SHADOW_1_STATUS 0xE0680
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0xE0688
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0xE068C
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0xE0690
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0xE0694
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0xE0698
+
+#define mmMME1_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0xE069C
+
+#define mmMME1_CTRL_SHADOW_1_HEADER_LOW 0xE06A0
+
+#define mmMME1_CTRL_SHADOW_1_HEADER_HIGH 0xE06A4
+
+#define mmMME1_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0xE06A8
+
+#define mmMME1_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0xE06AC
+
+#define mmMME1_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0xE06B0
+
+#define mmMME1_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0xE06B4
+
+#define mmMME1_CTRL_SHADOW_1_OUTER_LOOP 0xE06B8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0xE06BC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0xE06C0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0xE06C4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0xE06C8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0xE06CC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0xE06D0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0xE06D4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0xE06D8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0xE06DC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0xE06E0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0xE06E4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0xE06E8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0xE06EC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0xE06F0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0xE06F4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0xE06F8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0xE06FC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0xE0700
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0704
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0xE0708
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0xE070C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0xE0710
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0xE0714
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0xE0718
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0xE071C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0xE0720
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0xE0724
+
+#define mmMME1_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0xE0728
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0xE072C
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0xE0730
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0xE0734
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0xE0738
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0xE073C
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0xE0740
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0xE0744
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0xE0748
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0xE074C
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0xE0750
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0xE0754
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0xE0758
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0xE075C
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0xE0760
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0xE0764
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0xE0768
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0xE076C
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0xE0770
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE0774
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE0778
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE077C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0780
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0784
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0788
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0xE078C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0xE0790
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0xE0794
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0xE0798
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE079C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE07A0
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE07A4
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE07A8
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE07AC
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0xE07B0
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0xE07B4
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0xE07B8
+
+#define mmMME1_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0xE07BC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0xE07C0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0xE07C4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0xE07C8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0xE07CC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0xE07D0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0xE07D4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0xE07D8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0xE07DC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0xE07E0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0xE07E4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0xE07E8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0xE07EC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0xE07F0
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0xE07F4
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0xE07F8
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0xE07FC
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0xE0800
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0xE0804
+
+#define mmMME1_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0808
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE080C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0810
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0814
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0818
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE081C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0xE0820
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0xE0824
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0xE0828
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0xE082C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE0830
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE0834
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE0838
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE083C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE0840
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0xE0844
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0xE0848
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0xE084C
+
+#define mmMME1_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0xE0850
+
+#define mmMME1_CTRL_SHADOW_1_DESC_SB_REPEAT 0xE0854
+
+#define mmMME1_CTRL_SHADOW_1_DESC_RATE_LIMITER 0xE0858
+
+#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE085C
+
+#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE0860
+
+#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0xE0864
+
+#define mmMME1_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0xE0868
+
+#define mmMME1_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0xE086C
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_S 0xE0870
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0xE0874
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0xE0878
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0xE087C
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0xE0880
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0xE0884
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0xE0888
+
+#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0xE088C
+
+#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0xE0890
+
+#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0xE0894
+
+#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0xE0898
+
+#define mmMME1_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0xE089C
+
+#define mmMME1_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0xE08A0
+
+#define mmMME1_CTRL_SHADOW_1_DESC_DUMMY 0xE08A4
+
+#define mmMME1_CTRL_SHADOW_2_STATUS 0xE0900
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0xE0908
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0xE090C
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0xE0910
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0xE0914
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0xE0918
+
+#define mmMME1_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0xE091C
+
+#define mmMME1_CTRL_SHADOW_2_HEADER_LOW 0xE0920
+
+#define mmMME1_CTRL_SHADOW_2_HEADER_HIGH 0xE0924
+
+#define mmMME1_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0xE0928
+
+#define mmMME1_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0xE092C
+
+#define mmMME1_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0xE0930
+
+#define mmMME1_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0xE0934
+
+#define mmMME1_CTRL_SHADOW_2_OUTER_LOOP 0xE0938
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0xE093C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0xE0940
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0xE0944
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0xE0948
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0xE094C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0xE0950
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0xE0954
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0xE0958
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0xE095C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0xE0960
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0xE0964
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0xE0968
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0xE096C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0xE0970
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0xE0974
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0xE0978
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0xE097C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0xE0980
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0984
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0xE0988
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0xE098C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0xE0990
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0xE0994
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0xE0998
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0xE099C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0xE09A0
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0xE09A4
+
+#define mmMME1_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0xE09A8
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0xE09AC
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0xE09B0
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0xE09B4
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0xE09B8
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0xE09BC
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0xE09C0
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0xE09C4
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0xE09C8
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0xE09CC
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0xE09D0
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0xE09D4
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0xE09D8
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0xE09DC
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0xE09E0
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0xE09E4
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0xE09E8
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0xE09EC
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0xE09F0
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE09F4
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE09F8
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE09FC
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0A00
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0A04
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0A08
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0xE0A0C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0xE0A10
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0xE0A14
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0xE0A18
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE0A1C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0A20
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0A24
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0A28
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE0A2C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0xE0A30
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0xE0A34
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0xE0A38
+
+#define mmMME1_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0xE0A3C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0xE0A40
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0xE0A44
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0xE0A48
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0xE0A4C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0xE0A50
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0xE0A54
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0xE0A58
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0xE0A5C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0xE0A60
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0xE0A64
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0xE0A68
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0xE0A6C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0xE0A70
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0xE0A74
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0xE0A78
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0xE0A7C
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0xE0A80
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0xE0A84
+
+#define mmMME1_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0A88
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE0A8C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0A90
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0A94
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0A98
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE0A9C
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0xE0AA0
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0xE0AA4
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0xE0AA8
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0xE0AAC
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE0AB0
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE0AB4
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE0AB8
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE0ABC
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE0AC0
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0xE0AC4
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0xE0AC8
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0xE0ACC
+
+#define mmMME1_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0xE0AD0
+
+#define mmMME1_CTRL_SHADOW_2_DESC_SB_REPEAT 0xE0AD4
+
+#define mmMME1_CTRL_SHADOW_2_DESC_RATE_LIMITER 0xE0AD8
+
+#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE0ADC
+
+#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE0AE0
+
+#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0xE0AE4
+
+#define mmMME1_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0xE0AE8
+
+#define mmMME1_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0xE0AEC
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_S 0xE0AF0
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0xE0AF4
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0xE0AF8
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0xE0AFC
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0xE0B00
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0xE0B04
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0xE0B08
+
+#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0xE0B0C
+
+#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0xE0B10
+
+#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0xE0B14
+
+#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0xE0B18
+
+#define mmMME1_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0xE0B1C
+
+#define mmMME1_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0xE0B20
+
+#define mmMME1_CTRL_SHADOW_2_DESC_DUMMY 0xE0B24
+
+#define mmMME1_CTRL_SHADOW_3_STATUS 0xE0B80
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0xE0B88
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0xE0B8C
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0xE0B90
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0xE0B94
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0xE0B98
+
+#define mmMME1_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0xE0B9C
+
+#define mmMME1_CTRL_SHADOW_3_HEADER_LOW 0xE0BA0
+
+#define mmMME1_CTRL_SHADOW_3_HEADER_HIGH 0xE0BA4
+
+#define mmMME1_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0xE0BA8
+
+#define mmMME1_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0xE0BAC
+
+#define mmMME1_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0xE0BB0
+
+#define mmMME1_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0xE0BB4
+
+#define mmMME1_CTRL_SHADOW_3_OUTER_LOOP 0xE0BB8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0xE0BBC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0xE0BC0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0xE0BC4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0xE0BC8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0xE0BCC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0xE0BD0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0xE0BD4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0xE0BD8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0xE0BDC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0xE0BE0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0xE0BE4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0xE0BE8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0xE0BEC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0xE0BF0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0xE0BF4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0xE0BF8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0xE0BFC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0xE0C00
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0xE0C04
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0xE0C08
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0xE0C0C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0xE0C10
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0xE0C14
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0xE0C18
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0xE0C1C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0xE0C20
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0xE0C24
+
+#define mmMME1_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0xE0C28
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0xE0C2C
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0xE0C30
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0xE0C34
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0xE0C38
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0xE0C3C
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0xE0C40
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0xE0C44
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0xE0C48
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0xE0C4C
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0xE0C50
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0xE0C54
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0xE0C58
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0xE0C5C
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0xE0C60
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0xE0C64
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0xE0C68
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0xE0C6C
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0xE0C70
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0xE0C74
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0xE0C78
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0xE0C7C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0xE0C80
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0xE0C84
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0xE0C88
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0xE0C8C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0xE0C90
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0xE0C94
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0xE0C98
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0xE0C9C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0xE0CA0
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0xE0CA4
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0xE0CA8
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0xE0CAC
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0xE0CB0
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0xE0CB4
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0xE0CB8
+
+#define mmMME1_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0xE0CBC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0xE0CC0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0xE0CC4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0xE0CC8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0xE0CCC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0xE0CD0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0xE0CD4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0xE0CD8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0xE0CDC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0xE0CE0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0xE0CE4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0xE0CE8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0xE0CEC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0xE0CF0
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0xE0CF4
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0xE0CF8
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0xE0CFC
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0xE0D00
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0xE0D04
+
+#define mmMME1_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0xE0D08
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0xE0D0C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0xE0D10
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0xE0D14
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0xE0D18
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0xE0D1C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0xE0D20
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0xE0D24
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0xE0D28
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0xE0D2C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0xE0D30
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0xE0D34
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0xE0D38
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0xE0D3C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0xE0D40
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0xE0D44
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0xE0D48
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0xE0D4C
+
+#define mmMME1_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0xE0D50
+
+#define mmMME1_CTRL_SHADOW_3_DESC_SB_REPEAT 0xE0D54
+
+#define mmMME1_CTRL_SHADOW_3_DESC_RATE_LIMITER 0xE0D58
+
+#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0xE0D5C
+
+#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0xE0D60
+
+#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0xE0D64
+
+#define mmMME1_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0xE0D68
+
+#define mmMME1_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0xE0D6C
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_S 0xE0D70
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0xE0D74
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0xE0D78
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0xE0D7C
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0xE0D80
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0xE0D84
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0xE0D88
+
+#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0xE0D8C
+
+#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0xE0D90
+
+#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0xE0D94
+
+#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0xE0D98
+
+#define mmMME1_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0xE0D9C
+
+#define mmMME1_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0xE0DA0
+
+#define mmMME1_CTRL_SHADOW_3_DESC_DUMMY 0xE0DA4
+
+#endif /* ASIC_REG_MME1_CTRL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h
new file mode 100644
index 000000000000..a1f2eb8b91bd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h
@@ -0,0 +1,1456 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME2_CTRL_REGS_H_
+#define ASIC_REG_MME2_CTRL_REGS_H_
+
+/*
+ *****************************************
+ * MME2_CTRL (Prototype: MME)
+ *****************************************
+ */
+
+#define mmMME2_CTRL_ARCH_STATUS 0x160000
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_HIGH_S 0x160008
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_HIGH_L 0x16000C
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_HIGH_O 0x160010
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_LOW_S 0x160014
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_LOW_L 0x160018
+
+#define mmMME2_CTRL_ARCH_BASE_ADDR_LOW_O 0x16001C
+
+#define mmMME2_CTRL_ARCH_HEADER_LOW 0x160020
+
+#define mmMME2_CTRL_ARCH_HEADER_HIGH 0x160024
+
+#define mmMME2_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0x160028
+
+#define mmMME2_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0x16002C
+
+#define mmMME2_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0x160030
+
+#define mmMME2_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0x160034
+
+#define mmMME2_CTRL_ARCH_OUTER_LOOP 0x160038
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0x16003C
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0x160040
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0x160044
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0x160048
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0x16004C
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0x160050
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0x160054
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0x160058
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0x16005C
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0x160060
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0x160064
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0x160068
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0x16006C
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0x160070
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0x160074
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0x160078
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0x16007C
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0x160080
+
+#define mmMME2_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160084
+
+#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0x160088
+
+#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0x16008C
+
+#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0x160090
+
+#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0x160094
+
+#define mmMME2_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0x160098
+
+#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_0 0x16009C
+
+#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_1 0x1600A0
+
+#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_2 0x1600A4
+
+#define mmMME2_CTRL_ARCH_AGU_S_START_OFFSET_3 0x1600A8
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0x1600AC
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0x1600B0
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0x1600B4
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0x1600B8
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0x1600BC
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0x1600C0
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0x1600C4
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0x1600C8
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0x1600CC
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0x1600D0
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0x1600D4
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0x1600D8
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0x1600DC
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0x1600E0
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0x1600E4
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0x1600E8
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0x1600EC
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0x1600F0
+
+#define mmMME2_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1600F4
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1600F8
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1600FC
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160100
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160104
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160108
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0x16010C
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0x160110
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0x160114
+
+#define mmMME2_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0x160118
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x16011C
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160120
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160124
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160128
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x16012C
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0x160130
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0x160134
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0x160138
+
+#define mmMME2_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0x16013C
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0x160140
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0x160144
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0x160148
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0x16014C
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0x160150
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0x160154
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0x160158
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0x16015C
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0x160160
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0x160164
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0x160168
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0x16016C
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0x160170
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0x160174
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0x160178
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0x16017C
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0x160180
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0x160184
+
+#define mmMME2_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160188
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x16018C
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160190
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160194
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160198
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x16019C
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0x1601A0
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0x1601A4
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0x1601A8
+
+#define mmMME2_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0x1601AC
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1601B0
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1601B4
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1601B8
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1601BC
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1601C0
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0x1601C4
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0x1601C8
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0x1601CC
+
+#define mmMME2_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0x1601D0
+
+#define mmMME2_CTRL_ARCH_DESC_SB_REPEAT 0x1601D4
+
+#define mmMME2_CTRL_ARCH_DESC_RATE_LIMITER 0x1601D8
+
+#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1601DC
+
+#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1601E0
+
+#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0x1601E4
+
+#define mmMME2_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0x1601E8
+
+#define mmMME2_CTRL_ARCH_DESC_AXI_USER_DATA 0x1601EC
+
+#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_S 0x1601F0
+
+#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0x1601F4
+
+#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0x1601F8
+
+#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0x1601FC
+
+#define mmMME2_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0x160200
+
+#define mmMME2_CTRL_ARCH_DESC_PADDING_VALUE_S 0x160204
+
+#define mmMME2_CTRL_ARCH_DESC_PADDING_VALUE_L 0x160208
+
+#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_S 0x16020C
+
+#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0x160210
+
+#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0x160214
+
+#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0x160218
+
+#define mmMME2_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0x16021C
+
+#define mmMME2_CTRL_ARCH_DESC_PCU_RL_SATURATION 0x160220
+
+#define mmMME2_CTRL_ARCH_DESC_DUMMY 0x160224
+
+#define mmMME2_CTRL_CMD 0x160280
+
+#define mmMME2_CTRL_STATUS1 0x160284
+
+#define mmMME2_CTRL_RESET 0x160288
+
+#define mmMME2_CTRL_QM_STALL 0x16028C
+
+#define mmMME2_CTRL_SYNC_OBJECT_FIFO_TH 0x160290
+
+#define mmMME2_CTRL_EUS_ROLLUP_CNT_ADD 0x160294
+
+#define mmMME2_CTRL_INTR_CAUSE 0x160298
+
+#define mmMME2_CTRL_INTR_MASK 0x16029C
+
+#define mmMME2_CTRL_LOG_SHADOW 0x1602A0
+
+#define mmMME2_CTRL_PCU_RL_DESC0 0x1602A4
+
+#define mmMME2_CTRL_PCU_RL_TOKEN_UPDATE 0x1602A8
+
+#define mmMME2_CTRL_PCU_RL_TH 0x1602AC
+
+#define mmMME2_CTRL_PCU_RL_MIN 0x1602B0
+
+#define mmMME2_CTRL_PCU_RL_CTRL_EN 0x1602B4
+
+#define mmMME2_CTRL_PCU_RL_HISTORY_LOG_SIZE 0x1602B8
+
+#define mmMME2_CTRL_PCU_DUMMY_A_BF16 0x1602BC
+
+#define mmMME2_CTRL_PCU_DUMMY_B_BF16 0x1602C0
+
+#define mmMME2_CTRL_PCU_DUMMY_A_FP32_ODD 0x1602C4
+
+#define mmMME2_CTRL_PCU_DUMMY_A_FP32_EVEN 0x1602C8
+
+#define mmMME2_CTRL_PCU_DUMMY_B_FP32_ODD 0x1602CC
+
+#define mmMME2_CTRL_PCU_DUMMY_B_FP32_EVEN 0x1602D0
+
+#define mmMME2_CTRL_PROT 0x1602D4
+
+#define mmMME2_CTRL_EU_POWER_SAVE_DISABLE 0x1602D8
+
+#define mmMME2_CTRL_CS_DBG_BLOCK_ID 0x1602DC
+
+#define mmMME2_CTRL_CS_DBG_STATUS_DROP_CNT 0x1602E0
+
+#define mmMME2_CTRL_TE_CLOSE_CGATE 0x1602E4
+
+#define mmMME2_CTRL_AGU_SM_INFLIGHT_CNTR 0x1602E8
+
+#define mmMME2_CTRL_AGU_SM_TOTAL_CNTR 0x1602EC
+
+#define mmMME2_CTRL_EZSYNC_OUT_CREDIT 0x1602F0
+
+#define mmMME2_CTRL_PCU_RL_SAT_SEC 0x1602F4
+
+#define mmMME2_CTRL_AGU_SYNC_MSG_AXI_USER 0x1602F8
+
+#define mmMME2_CTRL_QM_SLV_LBW_CLK_EN 0x1602FC
+
+#define mmMME2_CTRL_SHADOW_0_STATUS 0x160400
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0x160408
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0x16040C
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0x160410
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0x160414
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0x160418
+
+#define mmMME2_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0x16041C
+
+#define mmMME2_CTRL_SHADOW_0_HEADER_LOW 0x160420
+
+#define mmMME2_CTRL_SHADOW_0_HEADER_HIGH 0x160424
+
+#define mmMME2_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0x160428
+
+#define mmMME2_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0x16042C
+
+#define mmMME2_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0x160430
+
+#define mmMME2_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0x160434
+
+#define mmMME2_CTRL_SHADOW_0_OUTER_LOOP 0x160438
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0x16043C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0x160440
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0x160444
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0x160448
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0x16044C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0x160450
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0x160454
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0x160458
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0x16045C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0x160460
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0x160464
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0x160468
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0x16046C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0x160470
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0x160474
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0x160478
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0x16047C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0x160480
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160484
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0x160488
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0x16048C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0x160490
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0x160494
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0x160498
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0x16049C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0x1604A0
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0x1604A4
+
+#define mmMME2_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0x1604A8
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0x1604AC
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0x1604B0
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0x1604B4
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0x1604B8
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0x1604BC
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0x1604C0
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0x1604C4
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0x1604C8
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0x1604CC
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0x1604D0
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0x1604D4
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0x1604D8
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0x1604DC
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0x1604E0
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0x1604E4
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0x1604E8
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0x1604EC
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0x1604F0
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1604F4
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1604F8
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1604FC
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160500
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160504
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160508
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0x16050C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0x160510
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0x160514
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0x160518
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x16051C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160520
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160524
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160528
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x16052C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0x160530
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0x160534
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0x160538
+
+#define mmMME2_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0x16053C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0x160540
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0x160544
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0x160548
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0x16054C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0x160550
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0x160554
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0x160558
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0x16055C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0x160560
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0x160564
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0x160568
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0x16056C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0x160570
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0x160574
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0x160578
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0x16057C
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0x160580
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0x160584
+
+#define mmMME2_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160588
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x16058C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160590
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160594
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160598
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x16059C
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0x1605A0
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0x1605A4
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0x1605A8
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0x1605AC
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1605B0
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1605B4
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1605B8
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1605BC
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1605C0
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0x1605C4
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0x1605C8
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0x1605CC
+
+#define mmMME2_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0x1605D0
+
+#define mmMME2_CTRL_SHADOW_0_DESC_SB_REPEAT 0x1605D4
+
+#define mmMME2_CTRL_SHADOW_0_DESC_RATE_LIMITER 0x1605D8
+
+#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1605DC
+
+#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1605E0
+
+#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0x1605E4
+
+#define mmMME2_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0x1605E8
+
+#define mmMME2_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0x1605EC
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_S 0x1605F0
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0x1605F4
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0x1605F8
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0x1605FC
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0x160600
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0x160604
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0x160608
+
+#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0x16060C
+
+#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0x160610
+
+#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0x160614
+
+#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0x160618
+
+#define mmMME2_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0x16061C
+
+#define mmMME2_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0x160620
+
+#define mmMME2_CTRL_SHADOW_0_DESC_DUMMY 0x160624
+
+#define mmMME2_CTRL_SHADOW_1_STATUS 0x160680
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0x160688
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0x16068C
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0x160690
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0x160694
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0x160698
+
+#define mmMME2_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0x16069C
+
+#define mmMME2_CTRL_SHADOW_1_HEADER_LOW 0x1606A0
+
+#define mmMME2_CTRL_SHADOW_1_HEADER_HIGH 0x1606A4
+
+#define mmMME2_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0x1606A8
+
+#define mmMME2_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0x1606AC
+
+#define mmMME2_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0x1606B0
+
+#define mmMME2_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0x1606B4
+
+#define mmMME2_CTRL_SHADOW_1_OUTER_LOOP 0x1606B8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0x1606BC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0x1606C0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0x1606C4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0x1606C8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0x1606CC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0x1606D0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0x1606D4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0x1606D8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0x1606DC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0x1606E0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0x1606E4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0x1606E8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0x1606EC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0x1606F0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0x1606F4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0x1606F8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0x1606FC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0x160700
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160704
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0x160708
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0x16070C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0x160710
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0x160714
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0x160718
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0x16071C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0x160720
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0x160724
+
+#define mmMME2_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0x160728
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0x16072C
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0x160730
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0x160734
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0x160738
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0x16073C
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0x160740
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0x160744
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0x160748
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0x16074C
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0x160750
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0x160754
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0x160758
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0x16075C
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0x160760
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0x160764
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0x160768
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0x16076C
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0x160770
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x160774
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x160778
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x16077C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160780
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160784
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160788
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0x16078C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0x160790
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0x160794
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0x160798
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x16079C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1607A0
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1607A4
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1607A8
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1607AC
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0x1607B0
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0x1607B4
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0x1607B8
+
+#define mmMME2_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0x1607BC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0x1607C0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0x1607C4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0x1607C8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0x1607CC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0x1607D0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0x1607D4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0x1607D8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0x1607DC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0x1607E0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0x1607E4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0x1607E8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0x1607EC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0x1607F0
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0x1607F4
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0x1607F8
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0x1607FC
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0x160800
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0x160804
+
+#define mmMME2_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160808
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x16080C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160810
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160814
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160818
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x16081C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0x160820
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0x160824
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0x160828
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0x16082C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x160830
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x160834
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x160838
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x16083C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x160840
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0x160844
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0x160848
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0x16084C
+
+#define mmMME2_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0x160850
+
+#define mmMME2_CTRL_SHADOW_1_DESC_SB_REPEAT 0x160854
+
+#define mmMME2_CTRL_SHADOW_1_DESC_RATE_LIMITER 0x160858
+
+#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x16085C
+
+#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x160860
+
+#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0x160864
+
+#define mmMME2_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0x160868
+
+#define mmMME2_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0x16086C
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_S 0x160870
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0x160874
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0x160878
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0x16087C
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0x160880
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0x160884
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0x160888
+
+#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0x16088C
+
+#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0x160890
+
+#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0x160894
+
+#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0x160898
+
+#define mmMME2_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0x16089C
+
+#define mmMME2_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0x1608A0
+
+#define mmMME2_CTRL_SHADOW_1_DESC_DUMMY 0x1608A4
+
+#define mmMME2_CTRL_SHADOW_2_STATUS 0x160900
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0x160908
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0x16090C
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0x160910
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0x160914
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0x160918
+
+#define mmMME2_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0x16091C
+
+#define mmMME2_CTRL_SHADOW_2_HEADER_LOW 0x160920
+
+#define mmMME2_CTRL_SHADOW_2_HEADER_HIGH 0x160924
+
+#define mmMME2_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0x160928
+
+#define mmMME2_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0x16092C
+
+#define mmMME2_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0x160930
+
+#define mmMME2_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0x160934
+
+#define mmMME2_CTRL_SHADOW_2_OUTER_LOOP 0x160938
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0x16093C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0x160940
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0x160944
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0x160948
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0x16094C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0x160950
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0x160954
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0x160958
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0x16095C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0x160960
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0x160964
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0x160968
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0x16096C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0x160970
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0x160974
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0x160978
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0x16097C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0x160980
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160984
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0x160988
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0x16098C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0x160990
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0x160994
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0x160998
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0x16099C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0x1609A0
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0x1609A4
+
+#define mmMME2_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0x1609A8
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0x1609AC
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0x1609B0
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0x1609B4
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0x1609B8
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0x1609BC
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0x1609C0
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0x1609C4
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0x1609C8
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0x1609CC
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0x1609D0
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0x1609D4
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0x1609D8
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0x1609DC
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0x1609E0
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0x1609E4
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0x1609E8
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0x1609EC
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0x1609F0
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1609F4
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1609F8
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1609FC
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160A00
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160A04
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160A08
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0x160A0C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0x160A10
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0x160A14
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0x160A18
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x160A1C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160A20
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160A24
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160A28
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x160A2C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0x160A30
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0x160A34
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0x160A38
+
+#define mmMME2_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0x160A3C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0x160A40
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0x160A44
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0x160A48
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0x160A4C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0x160A50
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0x160A54
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0x160A58
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0x160A5C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0x160A60
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0x160A64
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0x160A68
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0x160A6C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0x160A70
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0x160A74
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0x160A78
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0x160A7C
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0x160A80
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0x160A84
+
+#define mmMME2_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160A88
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x160A8C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160A90
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160A94
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160A98
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x160A9C
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0x160AA0
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0x160AA4
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0x160AA8
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0x160AAC
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x160AB0
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x160AB4
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x160AB8
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x160ABC
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x160AC0
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0x160AC4
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0x160AC8
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0x160ACC
+
+#define mmMME2_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0x160AD0
+
+#define mmMME2_CTRL_SHADOW_2_DESC_SB_REPEAT 0x160AD4
+
+#define mmMME2_CTRL_SHADOW_2_DESC_RATE_LIMITER 0x160AD8
+
+#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x160ADC
+
+#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x160AE0
+
+#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0x160AE4
+
+#define mmMME2_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0x160AE8
+
+#define mmMME2_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0x160AEC
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_S 0x160AF0
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0x160AF4
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0x160AF8
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0x160AFC
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0x160B00
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0x160B04
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0x160B08
+
+#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0x160B0C
+
+#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0x160B10
+
+#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0x160B14
+
+#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0x160B18
+
+#define mmMME2_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0x160B1C
+
+#define mmMME2_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0x160B20
+
+#define mmMME2_CTRL_SHADOW_2_DESC_DUMMY 0x160B24
+
+#define mmMME2_CTRL_SHADOW_3_STATUS 0x160B80
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0x160B88
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0x160B8C
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0x160B90
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0x160B94
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0x160B98
+
+#define mmMME2_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0x160B9C
+
+#define mmMME2_CTRL_SHADOW_3_HEADER_LOW 0x160BA0
+
+#define mmMME2_CTRL_SHADOW_3_HEADER_HIGH 0x160BA4
+
+#define mmMME2_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0x160BA8
+
+#define mmMME2_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0x160BAC
+
+#define mmMME2_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0x160BB0
+
+#define mmMME2_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0x160BB4
+
+#define mmMME2_CTRL_SHADOW_3_OUTER_LOOP 0x160BB8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0x160BBC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0x160BC0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0x160BC4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0x160BC8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0x160BCC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0x160BD0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0x160BD4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0x160BD8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0x160BDC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0x160BE0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0x160BE4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0x160BE8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0x160BEC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0x160BF0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0x160BF4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0x160BF8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0x160BFC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0x160C00
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x160C04
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0x160C08
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0x160C0C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0x160C10
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0x160C14
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0x160C18
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0x160C1C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0x160C20
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0x160C24
+
+#define mmMME2_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0x160C28
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0x160C2C
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0x160C30
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0x160C34
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0x160C38
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0x160C3C
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0x160C40
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0x160C44
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0x160C48
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0x160C4C
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0x160C50
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0x160C54
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0x160C58
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0x160C5C
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0x160C60
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0x160C64
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0x160C68
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0x160C6C
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0x160C70
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x160C74
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x160C78
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x160C7C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x160C80
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x160C84
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x160C88
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0x160C8C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0x160C90
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0x160C94
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0x160C98
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x160C9C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x160CA0
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x160CA4
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x160CA8
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x160CAC
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0x160CB0
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0x160CB4
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0x160CB8
+
+#define mmMME2_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0x160CBC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0x160CC0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0x160CC4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0x160CC8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0x160CCC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0x160CD0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0x160CD4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0x160CD8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0x160CDC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0x160CE0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0x160CE4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0x160CE8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0x160CEC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0x160CF0
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0x160CF4
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0x160CF8
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0x160CFC
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0x160D00
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0x160D04
+
+#define mmMME2_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x160D08
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x160D0C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x160D10
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x160D14
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x160D18
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x160D1C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0x160D20
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0x160D24
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0x160D28
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0x160D2C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x160D30
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x160D34
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x160D38
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x160D3C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x160D40
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0x160D44
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0x160D48
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0x160D4C
+
+#define mmMME2_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0x160D50
+
+#define mmMME2_CTRL_SHADOW_3_DESC_SB_REPEAT 0x160D54
+
+#define mmMME2_CTRL_SHADOW_3_DESC_RATE_LIMITER 0x160D58
+
+#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x160D5C
+
+#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x160D60
+
+#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0x160D64
+
+#define mmMME2_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0x160D68
+
+#define mmMME2_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0x160D6C
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_S 0x160D70
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0x160D74
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0x160D78
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0x160D7C
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0x160D80
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0x160D84
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0x160D88
+
+#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0x160D8C
+
+#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0x160D90
+
+#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0x160D94
+
+#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0x160D98
+
+#define mmMME2_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0x160D9C
+
+#define mmMME2_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0x160DA0
+
+#define mmMME2_CTRL_SHADOW_3_DESC_DUMMY 0x160DA4
+
+#endif /* ASIC_REG_MME2_CTRL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h
new file mode 100644
index 000000000000..c1ea6a422010
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME2_QM_REGS_H_
+#define ASIC_REG_MME2_QM_REGS_H_
+
+/*
+ *****************************************
+ * MME2_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmMME2_QM_GLBL_CFG0 0x168000
+
+#define mmMME2_QM_GLBL_CFG1 0x168004
+
+#define mmMME2_QM_GLBL_PROT 0x168008
+
+#define mmMME2_QM_GLBL_ERR_CFG 0x16800C
+
+#define mmMME2_QM_GLBL_SECURE_PROPS_0 0x168010
+
+#define mmMME2_QM_GLBL_SECURE_PROPS_1 0x168014
+
+#define mmMME2_QM_GLBL_SECURE_PROPS_2 0x168018
+
+#define mmMME2_QM_GLBL_SECURE_PROPS_3 0x16801C
+
+#define mmMME2_QM_GLBL_SECURE_PROPS_4 0x168020
+
+#define mmMME2_QM_GLBL_NON_SECURE_PROPS_0 0x168024
+
+#define mmMME2_QM_GLBL_NON_SECURE_PROPS_1 0x168028
+
+#define mmMME2_QM_GLBL_NON_SECURE_PROPS_2 0x16802C
+
+#define mmMME2_QM_GLBL_NON_SECURE_PROPS_3 0x168030
+
+#define mmMME2_QM_GLBL_NON_SECURE_PROPS_4 0x168034
+
+#define mmMME2_QM_GLBL_STS0 0x168038
+
+#define mmMME2_QM_GLBL_STS1_0 0x168040
+
+#define mmMME2_QM_GLBL_STS1_1 0x168044
+
+#define mmMME2_QM_GLBL_STS1_2 0x168048
+
+#define mmMME2_QM_GLBL_STS1_3 0x16804C
+
+#define mmMME2_QM_GLBL_STS1_4 0x168050
+
+#define mmMME2_QM_GLBL_MSG_EN_0 0x168054
+
+#define mmMME2_QM_GLBL_MSG_EN_1 0x168058
+
+#define mmMME2_QM_GLBL_MSG_EN_2 0x16805C
+
+#define mmMME2_QM_GLBL_MSG_EN_3 0x168060
+
+#define mmMME2_QM_GLBL_MSG_EN_4 0x168068
+
+#define mmMME2_QM_PQ_BASE_LO_0 0x168070
+
+#define mmMME2_QM_PQ_BASE_LO_1 0x168074
+
+#define mmMME2_QM_PQ_BASE_LO_2 0x168078
+
+#define mmMME2_QM_PQ_BASE_LO_3 0x16807C
+
+#define mmMME2_QM_PQ_BASE_HI_0 0x168080
+
+#define mmMME2_QM_PQ_BASE_HI_1 0x168084
+
+#define mmMME2_QM_PQ_BASE_HI_2 0x168088
+
+#define mmMME2_QM_PQ_BASE_HI_3 0x16808C
+
+#define mmMME2_QM_PQ_SIZE_0 0x168090
+
+#define mmMME2_QM_PQ_SIZE_1 0x168094
+
+#define mmMME2_QM_PQ_SIZE_2 0x168098
+
+#define mmMME2_QM_PQ_SIZE_3 0x16809C
+
+#define mmMME2_QM_PQ_PI_0 0x1680A0
+
+#define mmMME2_QM_PQ_PI_1 0x1680A4
+
+#define mmMME2_QM_PQ_PI_2 0x1680A8
+
+#define mmMME2_QM_PQ_PI_3 0x1680AC
+
+#define mmMME2_QM_PQ_CI_0 0x1680B0
+
+#define mmMME2_QM_PQ_CI_1 0x1680B4
+
+#define mmMME2_QM_PQ_CI_2 0x1680B8
+
+#define mmMME2_QM_PQ_CI_3 0x1680BC
+
+#define mmMME2_QM_PQ_CFG0_0 0x1680C0
+
+#define mmMME2_QM_PQ_CFG0_1 0x1680C4
+
+#define mmMME2_QM_PQ_CFG0_2 0x1680C8
+
+#define mmMME2_QM_PQ_CFG0_3 0x1680CC
+
+#define mmMME2_QM_PQ_CFG1_0 0x1680D0
+
+#define mmMME2_QM_PQ_CFG1_1 0x1680D4
+
+#define mmMME2_QM_PQ_CFG1_2 0x1680D8
+
+#define mmMME2_QM_PQ_CFG1_3 0x1680DC
+
+#define mmMME2_QM_PQ_ARUSER_31_11_0 0x1680E0
+
+#define mmMME2_QM_PQ_ARUSER_31_11_1 0x1680E4
+
+#define mmMME2_QM_PQ_ARUSER_31_11_2 0x1680E8
+
+#define mmMME2_QM_PQ_ARUSER_31_11_3 0x1680EC
+
+#define mmMME2_QM_PQ_STS0_0 0x1680F0
+
+#define mmMME2_QM_PQ_STS0_1 0x1680F4
+
+#define mmMME2_QM_PQ_STS0_2 0x1680F8
+
+#define mmMME2_QM_PQ_STS0_3 0x1680FC
+
+#define mmMME2_QM_PQ_STS1_0 0x168100
+
+#define mmMME2_QM_PQ_STS1_1 0x168104
+
+#define mmMME2_QM_PQ_STS1_2 0x168108
+
+#define mmMME2_QM_PQ_STS1_3 0x16810C
+
+#define mmMME2_QM_CQ_CFG0_0 0x168110
+
+#define mmMME2_QM_CQ_CFG0_1 0x168114
+
+#define mmMME2_QM_CQ_CFG0_2 0x168118
+
+#define mmMME2_QM_CQ_CFG0_3 0x16811C
+
+#define mmMME2_QM_CQ_CFG0_4 0x168120
+
+#define mmMME2_QM_CQ_CFG1_0 0x168124
+
+#define mmMME2_QM_CQ_CFG1_1 0x168128
+
+#define mmMME2_QM_CQ_CFG1_2 0x16812C
+
+#define mmMME2_QM_CQ_CFG1_3 0x168130
+
+#define mmMME2_QM_CQ_CFG1_4 0x168134
+
+#define mmMME2_QM_CQ_ARUSER_31_11_0 0x168138
+
+#define mmMME2_QM_CQ_ARUSER_31_11_1 0x16813C
+
+#define mmMME2_QM_CQ_ARUSER_31_11_2 0x168140
+
+#define mmMME2_QM_CQ_ARUSER_31_11_3 0x168144
+
+#define mmMME2_QM_CQ_ARUSER_31_11_4 0x168148
+
+#define mmMME2_QM_CQ_STS0_0 0x16814C
+
+#define mmMME2_QM_CQ_STS0_1 0x168150
+
+#define mmMME2_QM_CQ_STS0_2 0x168154
+
+#define mmMME2_QM_CQ_STS0_3 0x168158
+
+#define mmMME2_QM_CQ_STS0_4 0x16815C
+
+#define mmMME2_QM_CQ_STS1_0 0x168160
+
+#define mmMME2_QM_CQ_STS1_1 0x168164
+
+#define mmMME2_QM_CQ_STS1_2 0x168168
+
+#define mmMME2_QM_CQ_STS1_3 0x16816C
+
+#define mmMME2_QM_CQ_STS1_4 0x168170
+
+#define mmMME2_QM_CQ_PTR_LO_0 0x168174
+
+#define mmMME2_QM_CQ_PTR_HI_0 0x168178
+
+#define mmMME2_QM_CQ_TSIZE_0 0x16817C
+
+#define mmMME2_QM_CQ_CTL_0 0x168180
+
+#define mmMME2_QM_CQ_PTR_LO_1 0x168184
+
+#define mmMME2_QM_CQ_PTR_HI_1 0x168188
+
+#define mmMME2_QM_CQ_TSIZE_1 0x16818C
+
+#define mmMME2_QM_CQ_CTL_1 0x168190
+
+#define mmMME2_QM_CQ_PTR_LO_2 0x168194
+
+#define mmMME2_QM_CQ_PTR_HI_2 0x168198
+
+#define mmMME2_QM_CQ_TSIZE_2 0x16819C
+
+#define mmMME2_QM_CQ_CTL_2 0x1681A0
+
+#define mmMME2_QM_CQ_PTR_LO_3 0x1681A4
+
+#define mmMME2_QM_CQ_PTR_HI_3 0x1681A8
+
+#define mmMME2_QM_CQ_TSIZE_3 0x1681AC
+
+#define mmMME2_QM_CQ_CTL_3 0x1681B0
+
+#define mmMME2_QM_CQ_PTR_LO_4 0x1681B4
+
+#define mmMME2_QM_CQ_PTR_HI_4 0x1681B8
+
+#define mmMME2_QM_CQ_TSIZE_4 0x1681BC
+
+#define mmMME2_QM_CQ_CTL_4 0x1681C0
+
+#define mmMME2_QM_CQ_PTR_LO_STS_0 0x1681C4
+
+#define mmMME2_QM_CQ_PTR_LO_STS_1 0x1681C8
+
+#define mmMME2_QM_CQ_PTR_LO_STS_2 0x1681CC
+
+#define mmMME2_QM_CQ_PTR_LO_STS_3 0x1681D0
+
+#define mmMME2_QM_CQ_PTR_LO_STS_4 0x1681D4
+
+#define mmMME2_QM_CQ_PTR_HI_STS_0 0x1681D8
+
+#define mmMME2_QM_CQ_PTR_HI_STS_1 0x1681DC
+
+#define mmMME2_QM_CQ_PTR_HI_STS_2 0x1681E0
+
+#define mmMME2_QM_CQ_PTR_HI_STS_3 0x1681E4
+
+#define mmMME2_QM_CQ_PTR_HI_STS_4 0x1681E8
+
+#define mmMME2_QM_CQ_TSIZE_STS_0 0x1681EC
+
+#define mmMME2_QM_CQ_TSIZE_STS_1 0x1681F0
+
+#define mmMME2_QM_CQ_TSIZE_STS_2 0x1681F4
+
+#define mmMME2_QM_CQ_TSIZE_STS_3 0x1681F8
+
+#define mmMME2_QM_CQ_TSIZE_STS_4 0x1681FC
+
+#define mmMME2_QM_CQ_CTL_STS_0 0x168200
+
+#define mmMME2_QM_CQ_CTL_STS_1 0x168204
+
+#define mmMME2_QM_CQ_CTL_STS_2 0x168208
+
+#define mmMME2_QM_CQ_CTL_STS_3 0x16820C
+
+#define mmMME2_QM_CQ_CTL_STS_4 0x168210
+
+#define mmMME2_QM_CQ_IFIFO_CNT_0 0x168214
+
+#define mmMME2_QM_CQ_IFIFO_CNT_1 0x168218
+
+#define mmMME2_QM_CQ_IFIFO_CNT_2 0x16821C
+
+#define mmMME2_QM_CQ_IFIFO_CNT_3 0x168220
+
+#define mmMME2_QM_CQ_IFIFO_CNT_4 0x168224
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_0 0x168228
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_1 0x16822C
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_2 0x168230
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_3 0x168234
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_LO_4 0x168238
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_0 0x16823C
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_1 0x168240
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_2 0x168244
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_3 0x168248
+
+#define mmMME2_QM_CP_MSG_BASE0_ADDR_HI_4 0x16824C
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_0 0x168250
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_1 0x168254
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_2 0x168258
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_3 0x16825C
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_LO_4 0x168260
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_0 0x168264
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_1 0x168268
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_2 0x16826C
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_3 0x168270
+
+#define mmMME2_QM_CP_MSG_BASE1_ADDR_HI_4 0x168274
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_0 0x168278
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_1 0x16827C
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_2 0x168280
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_3 0x168284
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_LO_4 0x168288
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_0 0x16828C
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_1 0x168290
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_2 0x168294
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_3 0x168298
+
+#define mmMME2_QM_CP_MSG_BASE2_ADDR_HI_4 0x16829C
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_0 0x1682A0
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_1 0x1682A4
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_2 0x1682A8
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_3 0x1682AC
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_LO_4 0x1682B0
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_0 0x1682B4
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_1 0x1682B8
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_2 0x1682BC
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_3 0x1682C0
+
+#define mmMME2_QM_CP_MSG_BASE3_ADDR_HI_4 0x1682C4
+
+#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_0 0x1682C8
+
+#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_1 0x1682CC
+
+#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_2 0x1682D0
+
+#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_3 0x1682D4
+
+#define mmMME2_QM_CP_LDMA_TSIZE_OFFSET_4 0x1682D8
+
+#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x1682E0
+
+#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x1682E4
+
+#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x1682E8
+
+#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x1682EC
+
+#define mmMME2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x1682F0
+
+#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x1682F4
+
+#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x1682F8
+
+#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x1682FC
+
+#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x168300
+
+#define mmMME2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x168304
+
+#define mmMME2_QM_CP_FENCE0_RDATA_0 0x168308
+
+#define mmMME2_QM_CP_FENCE0_RDATA_1 0x16830C
+
+#define mmMME2_QM_CP_FENCE0_RDATA_2 0x168310
+
+#define mmMME2_QM_CP_FENCE0_RDATA_3 0x168314
+
+#define mmMME2_QM_CP_FENCE0_RDATA_4 0x168318
+
+#define mmMME2_QM_CP_FENCE1_RDATA_0 0x16831C
+
+#define mmMME2_QM_CP_FENCE1_RDATA_1 0x168320
+
+#define mmMME2_QM_CP_FENCE1_RDATA_2 0x168324
+
+#define mmMME2_QM_CP_FENCE1_RDATA_3 0x168328
+
+#define mmMME2_QM_CP_FENCE1_RDATA_4 0x16832C
+
+#define mmMME2_QM_CP_FENCE2_RDATA_0 0x168330
+
+#define mmMME2_QM_CP_FENCE2_RDATA_1 0x168334
+
+#define mmMME2_QM_CP_FENCE2_RDATA_2 0x168338
+
+#define mmMME2_QM_CP_FENCE2_RDATA_3 0x16833C
+
+#define mmMME2_QM_CP_FENCE2_RDATA_4 0x168340
+
+#define mmMME2_QM_CP_FENCE3_RDATA_0 0x168344
+
+#define mmMME2_QM_CP_FENCE3_RDATA_1 0x168348
+
+#define mmMME2_QM_CP_FENCE3_RDATA_2 0x16834C
+
+#define mmMME2_QM_CP_FENCE3_RDATA_3 0x168350
+
+#define mmMME2_QM_CP_FENCE3_RDATA_4 0x168354
+
+#define mmMME2_QM_CP_FENCE0_CNT_0 0x168358
+
+#define mmMME2_QM_CP_FENCE0_CNT_1 0x16835C
+
+#define mmMME2_QM_CP_FENCE0_CNT_2 0x168360
+
+#define mmMME2_QM_CP_FENCE0_CNT_3 0x168364
+
+#define mmMME2_QM_CP_FENCE0_CNT_4 0x168368
+
+#define mmMME2_QM_CP_FENCE1_CNT_0 0x16836C
+
+#define mmMME2_QM_CP_FENCE1_CNT_1 0x168370
+
+#define mmMME2_QM_CP_FENCE1_CNT_2 0x168374
+
+#define mmMME2_QM_CP_FENCE1_CNT_3 0x168378
+
+#define mmMME2_QM_CP_FENCE1_CNT_4 0x16837C
+
+#define mmMME2_QM_CP_FENCE2_CNT_0 0x168380
+
+#define mmMME2_QM_CP_FENCE2_CNT_1 0x168384
+
+#define mmMME2_QM_CP_FENCE2_CNT_2 0x168388
+
+#define mmMME2_QM_CP_FENCE2_CNT_3 0x16838C
+
+#define mmMME2_QM_CP_FENCE2_CNT_4 0x168390
+
+#define mmMME2_QM_CP_FENCE3_CNT_0 0x168394
+
+#define mmMME2_QM_CP_FENCE3_CNT_1 0x168398
+
+#define mmMME2_QM_CP_FENCE3_CNT_2 0x16839C
+
+#define mmMME2_QM_CP_FENCE3_CNT_3 0x1683A0
+
+#define mmMME2_QM_CP_FENCE3_CNT_4 0x1683A4
+
+#define mmMME2_QM_CP_STS_0 0x1683A8
+
+#define mmMME2_QM_CP_STS_1 0x1683AC
+
+#define mmMME2_QM_CP_STS_2 0x1683B0
+
+#define mmMME2_QM_CP_STS_3 0x1683B4
+
+#define mmMME2_QM_CP_STS_4 0x1683B8
+
+#define mmMME2_QM_CP_CURRENT_INST_LO_0 0x1683BC
+
+#define mmMME2_QM_CP_CURRENT_INST_LO_1 0x1683C0
+
+#define mmMME2_QM_CP_CURRENT_INST_LO_2 0x1683C4
+
+#define mmMME2_QM_CP_CURRENT_INST_LO_3 0x1683C8
+
+#define mmMME2_QM_CP_CURRENT_INST_LO_4 0x1683CC
+
+#define mmMME2_QM_CP_CURRENT_INST_HI_0 0x1683D0
+
+#define mmMME2_QM_CP_CURRENT_INST_HI_1 0x1683D4
+
+#define mmMME2_QM_CP_CURRENT_INST_HI_2 0x1683D8
+
+#define mmMME2_QM_CP_CURRENT_INST_HI_3 0x1683DC
+
+#define mmMME2_QM_CP_CURRENT_INST_HI_4 0x1683E0
+
+#define mmMME2_QM_CP_BARRIER_CFG_0 0x1683F4
+
+#define mmMME2_QM_CP_BARRIER_CFG_1 0x1683F8
+
+#define mmMME2_QM_CP_BARRIER_CFG_2 0x1683FC
+
+#define mmMME2_QM_CP_BARRIER_CFG_3 0x168400
+
+#define mmMME2_QM_CP_BARRIER_CFG_4 0x168404
+
+#define mmMME2_QM_CP_DBG_0_0 0x168408
+
+#define mmMME2_QM_CP_DBG_0_1 0x16840C
+
+#define mmMME2_QM_CP_DBG_0_2 0x168410
+
+#define mmMME2_QM_CP_DBG_0_3 0x168414
+
+#define mmMME2_QM_CP_DBG_0_4 0x168418
+
+#define mmMME2_QM_CP_ARUSER_31_11_0 0x16841C
+
+#define mmMME2_QM_CP_ARUSER_31_11_1 0x168420
+
+#define mmMME2_QM_CP_ARUSER_31_11_2 0x168424
+
+#define mmMME2_QM_CP_ARUSER_31_11_3 0x168428
+
+#define mmMME2_QM_CP_ARUSER_31_11_4 0x16842C
+
+#define mmMME2_QM_CP_AWUSER_31_11_0 0x168430
+
+#define mmMME2_QM_CP_AWUSER_31_11_1 0x168434
+
+#define mmMME2_QM_CP_AWUSER_31_11_2 0x168438
+
+#define mmMME2_QM_CP_AWUSER_31_11_3 0x16843C
+
+#define mmMME2_QM_CP_AWUSER_31_11_4 0x168440
+
+#define mmMME2_QM_ARB_CFG_0 0x168A00
+
+#define mmMME2_QM_ARB_CHOISE_Q_PUSH 0x168A04
+
+#define mmMME2_QM_ARB_WRR_WEIGHT_0 0x168A08
+
+#define mmMME2_QM_ARB_WRR_WEIGHT_1 0x168A0C
+
+#define mmMME2_QM_ARB_WRR_WEIGHT_2 0x168A10
+
+#define mmMME2_QM_ARB_WRR_WEIGHT_3 0x168A14
+
+#define mmMME2_QM_ARB_CFG_1 0x168A18
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_0 0x168A20
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_1 0x168A24
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_2 0x168A28
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_3 0x168A2C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_4 0x168A30
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_5 0x168A34
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_6 0x168A38
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_7 0x168A3C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_8 0x168A40
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_9 0x168A44
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_10 0x168A48
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_11 0x168A4C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_12 0x168A50
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_13 0x168A54
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_14 0x168A58
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_15 0x168A5C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_16 0x168A60
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_17 0x168A64
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_18 0x168A68
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_19 0x168A6C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_20 0x168A70
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_21 0x168A74
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_22 0x168A78
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_23 0x168A7C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_24 0x168A80
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_25 0x168A84
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_26 0x168A88
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_27 0x168A8C
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_28 0x168A90
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_29 0x168A94
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_30 0x168A98
+
+#define mmMME2_QM_ARB_MST_AVAIL_CRED_31 0x168A9C
+
+#define mmMME2_QM_ARB_MST_CRED_INC 0x168AA0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x168AA4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x168AA8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x168AAC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x168AB0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x168AB4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x168AB8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x168ABC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x168AC0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x168AC4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x168AC8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x168ACC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x168AD0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x168AD4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x168AD8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x168ADC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x168AE0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x168AE4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x168AE8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x168AEC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x168AF0
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x168AF4
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x168AF8
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x168AFC
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x168B00
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x168B04
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x168B08
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x168B0C
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x168B10
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x168B14
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x168B18
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x168B1C
+
+#define mmMME2_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x168B20
+
+#define mmMME2_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x168B28
+
+#define mmMME2_QM_ARB_MST_SLAVE_EN 0x168B2C
+
+#define mmMME2_QM_ARB_MST_QUIET_PER 0x168B34
+
+#define mmMME2_QM_ARB_SLV_CHOISE_WDT 0x168B38
+
+#define mmMME2_QM_ARB_SLV_ID 0x168B3C
+
+#define mmMME2_QM_ARB_MSG_MAX_INFLIGHT 0x168B44
+
+#define mmMME2_QM_ARB_MSG_AWUSER_31_11 0x168B48
+
+#define mmMME2_QM_ARB_MSG_AWUSER_SEC_PROP 0x168B4C
+
+#define mmMME2_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x168B50
+
+#define mmMME2_QM_ARB_BASE_LO 0x168B54
+
+#define mmMME2_QM_ARB_BASE_HI 0x168B58
+
+#define mmMME2_QM_ARB_STATE_STS 0x168B80
+
+#define mmMME2_QM_ARB_CHOISE_FULLNESS_STS 0x168B84
+
+#define mmMME2_QM_ARB_MSG_STS 0x168B88
+
+#define mmMME2_QM_ARB_SLV_CHOISE_Q_HEAD 0x168B8C
+
+#define mmMME2_QM_ARB_ERR_CAUSE 0x168B9C
+
+#define mmMME2_QM_ARB_ERR_MSG_EN 0x168BA0
+
+#define mmMME2_QM_ARB_ERR_STS_DRP 0x168BA8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_0 0x168BB0
+
+#define mmMME2_QM_ARB_MST_CRED_STS_1 0x168BB4
+
+#define mmMME2_QM_ARB_MST_CRED_STS_2 0x168BB8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_3 0x168BBC
+
+#define mmMME2_QM_ARB_MST_CRED_STS_4 0x168BC0
+
+#define mmMME2_QM_ARB_MST_CRED_STS_5 0x168BC4
+
+#define mmMME2_QM_ARB_MST_CRED_STS_6 0x168BC8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_7 0x168BCC
+
+#define mmMME2_QM_ARB_MST_CRED_STS_8 0x168BD0
+
+#define mmMME2_QM_ARB_MST_CRED_STS_9 0x168BD4
+
+#define mmMME2_QM_ARB_MST_CRED_STS_10 0x168BD8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_11 0x168BDC
+
+#define mmMME2_QM_ARB_MST_CRED_STS_12 0x168BE0
+
+#define mmMME2_QM_ARB_MST_CRED_STS_13 0x168BE4
+
+#define mmMME2_QM_ARB_MST_CRED_STS_14 0x168BE8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_15 0x168BEC
+
+#define mmMME2_QM_ARB_MST_CRED_STS_16 0x168BF0
+
+#define mmMME2_QM_ARB_MST_CRED_STS_17 0x168BF4
+
+#define mmMME2_QM_ARB_MST_CRED_STS_18 0x168BF8
+
+#define mmMME2_QM_ARB_MST_CRED_STS_19 0x168BFC
+
+#define mmMME2_QM_ARB_MST_CRED_STS_20 0x168C00
+
+#define mmMME2_QM_ARB_MST_CRED_STS_21 0x168C04
+
+#define mmMME2_QM_ARB_MST_CRED_STS_22 0x168C08
+
+#define mmMME2_QM_ARB_MST_CRED_STS_23 0x168C0C
+
+#define mmMME2_QM_ARB_MST_CRED_STS_24 0x168C10
+
+#define mmMME2_QM_ARB_MST_CRED_STS_25 0x168C14
+
+#define mmMME2_QM_ARB_MST_CRED_STS_26 0x168C18
+
+#define mmMME2_QM_ARB_MST_CRED_STS_27 0x168C1C
+
+#define mmMME2_QM_ARB_MST_CRED_STS_28 0x168C20
+
+#define mmMME2_QM_ARB_MST_CRED_STS_29 0x168C24
+
+#define mmMME2_QM_ARB_MST_CRED_STS_30 0x168C28
+
+#define mmMME2_QM_ARB_MST_CRED_STS_31 0x168C2C
+
+#define mmMME2_QM_CGM_CFG 0x168C70
+
+#define mmMME2_QM_CGM_STS 0x168C74
+
+#define mmMME2_QM_CGM_CFG1 0x168C78
+
+#define mmMME2_QM_LOCAL_RANGE_BASE 0x168C80
+
+#define mmMME2_QM_LOCAL_RANGE_SIZE 0x168C84
+
+#define mmMME2_QM_CSMR_STRICT_PRIO_CFG 0x168C90
+
+#define mmMME2_QM_HBW_RD_RATE_LIM_CFG_1 0x168C94
+
+#define mmMME2_QM_LBW_WR_RATE_LIM_CFG_0 0x168C98
+
+#define mmMME2_QM_LBW_WR_RATE_LIM_CFG_1 0x168C9C
+
+#define mmMME2_QM_HBW_RD_RATE_LIM_CFG_0 0x168CA0
+
+#define mmMME2_QM_GLBL_AXCACHE 0x168CA4
+
+#define mmMME2_QM_IND_GW_APB_CFG 0x168CB0
+
+#define mmMME2_QM_IND_GW_APB_WDATA 0x168CB4
+
+#define mmMME2_QM_IND_GW_APB_RDATA 0x168CB8
+
+#define mmMME2_QM_IND_GW_APB_STATUS 0x168CBC
+
+#define mmMME2_QM_GLBL_ERR_ADDR_LO 0x168CD0
+
+#define mmMME2_QM_GLBL_ERR_ADDR_HI 0x168CD4
+
+#define mmMME2_QM_GLBL_ERR_WDATA 0x168CD8
+
+#define mmMME2_QM_GLBL_MEM_INIT_BUSY 0x168D00
+
+#endif /* ASIC_REG_MME2_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h
new file mode 100644
index 000000000000..36f6edc72e3d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h
@@ -0,0 +1,1456 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MME3_CTRL_REGS_H_
+#define ASIC_REG_MME3_CTRL_REGS_H_
+
+/*
+ *****************************************
+ * MME3_CTRL (Prototype: MME)
+ *****************************************
+ */
+
+#define mmMME3_CTRL_ARCH_STATUS 0x1E0000
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_HIGH_S 0x1E0008
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_HIGH_L 0x1E000C
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_HIGH_O 0x1E0010
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_LOW_S 0x1E0014
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_LOW_L 0x1E0018
+
+#define mmMME3_CTRL_ARCH_BASE_ADDR_LOW_O 0x1E001C
+
+#define mmMME3_CTRL_ARCH_HEADER_LOW 0x1E0020
+
+#define mmMME3_CTRL_ARCH_HEADER_HIGH 0x1E0024
+
+#define mmMME3_CTRL_ARCH_CONV_KERNEL_SIZE_MINUS_1 0x1E0028
+
+#define mmMME3_CTRL_ARCH_CONV_ASSOCIATED_DIMS_LOW 0x1E002C
+
+#define mmMME3_CTRL_ARCH_CONV_ASSOCIATED_DIMS_HIGH 0x1E0030
+
+#define mmMME3_CTRL_ARCH_NUM_ITERATIONS_MINUS_1 0x1E0034
+
+#define mmMME3_CTRL_ARCH_OUTER_LOOP 0x1E0038
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_0 0x1E003C
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_1 0x1E0040
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_2 0x1E0044
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_3 0x1E0048
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_VALID_ELEMENTS_4 0x1E004C
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_0 0x1E0050
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_1 0x1E0054
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_2 0x1E0058
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_3 0x1E005C
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_LOOP_STRIDE_4 0x1E0060
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_0 0x1E0064
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_1 0x1E0068
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_2 0x1E006C
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_ROI_SIZE_3 0x1E0070
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_0 0x1E0074
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_1 0x1E0078
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_2 0x1E007C
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_STRIDES_3 0x1E0080
+
+#define mmMME3_CTRL_ARCH_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0084
+
+#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_0 0x1E0088
+
+#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_1 0x1E008C
+
+#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_2 0x1E0090
+
+#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_3 0x1E0094
+
+#define mmMME3_CTRL_ARCH_AGU_S_ROI_BASE_OFFSET_4 0x1E0098
+
+#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_0 0x1E009C
+
+#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_1 0x1E00A0
+
+#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_2 0x1E00A4
+
+#define mmMME3_CTRL_ARCH_AGU_S_START_OFFSET_3 0x1E00A8
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_0 0x1E00AC
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_1 0x1E00B0
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_2 0x1E00B4
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_3 0x1E00B8
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_VALID_ELEMENTS_4 0x1E00BC
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_0 0x1E00C0
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_1 0x1E00C4
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_2 0x1E00C8
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_3 0x1E00CC
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_LOOP_STRIDE_4 0x1E00D0
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_0 0x1E00D4
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_1 0x1E00D8
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_2 0x1E00DC
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_ROI_SIZE_3 0x1E00E0
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_0 0x1E00E4
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_1 0x1E00E8
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_2 0x1E00EC
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_STRIDES_3 0x1E00F0
+
+#define mmMME3_CTRL_ARCH_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E00F4
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E00F8
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E00FC
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0100
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0104
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0108
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_0 0x1E010C
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_1 0x1E0110
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_2 0x1E0114
+
+#define mmMME3_CTRL_ARCH_AGU_L_LOCAL_START_OFFSET_3 0x1E0118
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E011C
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0120
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0124
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0128
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E012C
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_0 0x1E0130
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_1 0x1E0134
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_2 0x1E0138
+
+#define mmMME3_CTRL_ARCH_AGU_L_REMOTE_START_OFFSET_3 0x1E013C
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_0 0x1E0140
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_1 0x1E0144
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_2 0x1E0148
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_3 0x1E014C
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_VALID_ELEMENTS_4 0x1E0150
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_0 0x1E0154
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_1 0x1E0158
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_2 0x1E015C
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_3 0x1E0160
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_LOOP_STRIDE_4 0x1E0164
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_0 0x1E0168
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_1 0x1E016C
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_2 0x1E0170
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_ROI_SIZE_3 0x1E0174
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_0 0x1E0178
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_1 0x1E017C
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_2 0x1E0180
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_STRIDES_3 0x1E0184
+
+#define mmMME3_CTRL_ARCH_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0188
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E018C
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0190
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0194
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0198
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E019C
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_0 0x1E01A0
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_1 0x1E01A4
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_2 0x1E01A8
+
+#define mmMME3_CTRL_ARCH_AGU_O_LOCAL_START_OFFSET_3 0x1E01AC
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E01B0
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E01B4
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E01B8
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E01BC
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E01C0
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_0 0x1E01C4
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_1 0x1E01C8
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_2 0x1E01CC
+
+#define mmMME3_CTRL_ARCH_AGU_O_REMOTE_START_OFFSET_3 0x1E01D0
+
+#define mmMME3_CTRL_ARCH_DESC_SB_REPEAT 0x1E01D4
+
+#define mmMME3_CTRL_ARCH_DESC_RATE_LIMITER 0x1E01D8
+
+#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E01DC
+
+#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E01E0
+
+#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E01E4
+
+#define mmMME3_CTRL_ARCH_DESC_SYNC_OBJECT_DATA 0x1E01E8
+
+#define mmMME3_CTRL_ARCH_DESC_AXI_USER_DATA 0x1E01EC
+
+#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_S 0x1E01F0
+
+#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_L_LOCAL 0x1E01F4
+
+#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_L_REMOTE 0x1E01F8
+
+#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_O_LOCAL 0x1E01FC
+
+#define mmMME3_CTRL_ARCH_DESC_PERF_EVT_O_REMOTE 0x1E0200
+
+#define mmMME3_CTRL_ARCH_DESC_PADDING_VALUE_S 0x1E0204
+
+#define mmMME3_CTRL_ARCH_DESC_PADDING_VALUE_L 0x1E0208
+
+#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_S 0x1E020C
+
+#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_L_LOCAL 0x1E0210
+
+#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_L_REMOTE 0x1E0214
+
+#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_O_LOCAL 0x1E0218
+
+#define mmMME3_CTRL_ARCH_DESC_META_DATA_AGU_O_REMOTE 0x1E021C
+
+#define mmMME3_CTRL_ARCH_DESC_PCU_RL_SATURATION 0x1E0220
+
+#define mmMME3_CTRL_ARCH_DESC_DUMMY 0x1E0224
+
+#define mmMME3_CTRL_CMD 0x1E0280
+
+#define mmMME3_CTRL_STATUS1 0x1E0284
+
+#define mmMME3_CTRL_RESET 0x1E0288
+
+#define mmMME3_CTRL_QM_STALL 0x1E028C
+
+#define mmMME3_CTRL_SYNC_OBJECT_FIFO_TH 0x1E0290
+
+#define mmMME3_CTRL_EUS_ROLLUP_CNT_ADD 0x1E0294
+
+#define mmMME3_CTRL_INTR_CAUSE 0x1E0298
+
+#define mmMME3_CTRL_INTR_MASK 0x1E029C
+
+#define mmMME3_CTRL_LOG_SHADOW 0x1E02A0
+
+#define mmMME3_CTRL_PCU_RL_DESC0 0x1E02A4
+
+#define mmMME3_CTRL_PCU_RL_TOKEN_UPDATE 0x1E02A8
+
+#define mmMME3_CTRL_PCU_RL_TH 0x1E02AC
+
+#define mmMME3_CTRL_PCU_RL_MIN 0x1E02B0
+
+#define mmMME3_CTRL_PCU_RL_CTRL_EN 0x1E02B4
+
+#define mmMME3_CTRL_PCU_RL_HISTORY_LOG_SIZE 0x1E02B8
+
+#define mmMME3_CTRL_PCU_DUMMY_A_BF16 0x1E02BC
+
+#define mmMME3_CTRL_PCU_DUMMY_B_BF16 0x1E02C0
+
+#define mmMME3_CTRL_PCU_DUMMY_A_FP32_ODD 0x1E02C4
+
+#define mmMME3_CTRL_PCU_DUMMY_A_FP32_EVEN 0x1E02C8
+
+#define mmMME3_CTRL_PCU_DUMMY_B_FP32_ODD 0x1E02CC
+
+#define mmMME3_CTRL_PCU_DUMMY_B_FP32_EVEN 0x1E02D0
+
+#define mmMME3_CTRL_PROT 0x1E02D4
+
+#define mmMME3_CTRL_EU_POWER_SAVE_DISABLE 0x1E02D8
+
+#define mmMME3_CTRL_CS_DBG_BLOCK_ID 0x1E02DC
+
+#define mmMME3_CTRL_CS_DBG_STATUS_DROP_CNT 0x1E02E0
+
+#define mmMME3_CTRL_TE_CLOSE_CGATE 0x1E02E4
+
+#define mmMME3_CTRL_AGU_SM_INFLIGHT_CNTR 0x1E02E8
+
+#define mmMME3_CTRL_AGU_SM_TOTAL_CNTR 0x1E02EC
+
+#define mmMME3_CTRL_EZSYNC_OUT_CREDIT 0x1E02F0
+
+#define mmMME3_CTRL_PCU_RL_SAT_SEC 0x1E02F4
+
+#define mmMME3_CTRL_AGU_SYNC_MSG_AXI_USER 0x1E02F8
+
+#define mmMME3_CTRL_QM_SLV_LBW_CLK_EN 0x1E02FC
+
+#define mmMME3_CTRL_SHADOW_0_STATUS 0x1E0400
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_HIGH_S 0x1E0408
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_HIGH_L 0x1E040C
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_HIGH_O 0x1E0410
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_LOW_S 0x1E0414
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_LOW_L 0x1E0418
+
+#define mmMME3_CTRL_SHADOW_0_BASE_ADDR_LOW_O 0x1E041C
+
+#define mmMME3_CTRL_SHADOW_0_HEADER_LOW 0x1E0420
+
+#define mmMME3_CTRL_SHADOW_0_HEADER_HIGH 0x1E0424
+
+#define mmMME3_CTRL_SHADOW_0_CONV_KERNEL_SIZE_MINUS_1 0x1E0428
+
+#define mmMME3_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_LOW 0x1E042C
+
+#define mmMME3_CTRL_SHADOW_0_CONV_ASSOCIATED_DIMS_HIGH 0x1E0430
+
+#define mmMME3_CTRL_SHADOW_0_NUM_ITERATIONS_MINUS_1 0x1E0434
+
+#define mmMME3_CTRL_SHADOW_0_OUTER_LOOP 0x1E0438
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_0 0x1E043C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_1 0x1E0440
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_2 0x1E0444
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_3 0x1E0448
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_VALID_ELEMENTS_4 0x1E044C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_0 0x1E0450
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_1 0x1E0454
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_2 0x1E0458
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_3 0x1E045C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_LOOP_STRIDE_4 0x1E0460
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_0 0x1E0464
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_1 0x1E0468
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_2 0x1E046C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_ROI_SIZE_3 0x1E0470
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_0 0x1E0474
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_1 0x1E0478
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_2 0x1E047C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_STRIDES_3 0x1E0480
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0484
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_0 0x1E0488
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_1 0x1E048C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_2 0x1E0490
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_3 0x1E0494
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_ROI_BASE_OFFSET_4 0x1E0498
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_0 0x1E049C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_1 0x1E04A0
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_2 0x1E04A4
+
+#define mmMME3_CTRL_SHADOW_0_AGU_S_START_OFFSET_3 0x1E04A8
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_0 0x1E04AC
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_1 0x1E04B0
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_2 0x1E04B4
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_3 0x1E04B8
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_VALID_ELEMENTS_4 0x1E04BC
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_0 0x1E04C0
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_1 0x1E04C4
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_2 0x1E04C8
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_3 0x1E04CC
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_LOOP_STRIDE_4 0x1E04D0
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_0 0x1E04D4
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_1 0x1E04D8
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_2 0x1E04DC
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_ROI_SIZE_3 0x1E04E0
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_0 0x1E04E4
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_1 0x1E04E8
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_2 0x1E04EC
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_STRIDES_3 0x1E04F0
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E04F4
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E04F8
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E04FC
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0500
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0504
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0508
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_0 0x1E050C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_1 0x1E0510
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_2 0x1E0514
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_LOCAL_START_OFFSET_3 0x1E0518
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E051C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0520
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0524
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0528
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E052C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_0 0x1E0530
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_1 0x1E0534
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_2 0x1E0538
+
+#define mmMME3_CTRL_SHADOW_0_AGU_L_REMOTE_START_OFFSET_3 0x1E053C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_0 0x1E0540
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_1 0x1E0544
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_2 0x1E0548
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_3 0x1E054C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_VALID_ELEMENTS_4 0x1E0550
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_0 0x1E0554
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_1 0x1E0558
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_2 0x1E055C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_3 0x1E0560
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_LOOP_STRIDE_4 0x1E0564
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_0 0x1E0568
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_1 0x1E056C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_2 0x1E0570
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_ROI_SIZE_3 0x1E0574
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_0 0x1E0578
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_1 0x1E057C
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_2 0x1E0580
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_STRIDES_3 0x1E0584
+
+#define mmMME3_CTRL_SHADOW_0_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0588
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E058C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0590
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0594
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0598
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E059C
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_0 0x1E05A0
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_1 0x1E05A4
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_2 0x1E05A8
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_LOCAL_START_OFFSET_3 0x1E05AC
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E05B0
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E05B4
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E05B8
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E05BC
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E05C0
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_0 0x1E05C4
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_1 0x1E05C8
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_2 0x1E05CC
+
+#define mmMME3_CTRL_SHADOW_0_AGU_O_REMOTE_START_OFFSET_3 0x1E05D0
+
+#define mmMME3_CTRL_SHADOW_0_DESC_SB_REPEAT 0x1E05D4
+
+#define mmMME3_CTRL_SHADOW_0_DESC_RATE_LIMITER 0x1E05D8
+
+#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E05DC
+
+#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E05E0
+
+#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E05E4
+
+#define mmMME3_CTRL_SHADOW_0_DESC_SYNC_OBJECT_DATA 0x1E05E8
+
+#define mmMME3_CTRL_SHADOW_0_DESC_AXI_USER_DATA 0x1E05EC
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_S 0x1E05F0
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_L_LOCAL 0x1E05F4
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_L_REMOTE 0x1E05F8
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_O_LOCAL 0x1E05FC
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PERF_EVT_O_REMOTE 0x1E0600
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PADDING_VALUE_S 0x1E0604
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PADDING_VALUE_L 0x1E0608
+
+#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_S 0x1E060C
+
+#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_LOCAL 0x1E0610
+
+#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_L_REMOTE 0x1E0614
+
+#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_LOCAL 0x1E0618
+
+#define mmMME3_CTRL_SHADOW_0_DESC_META_DATA_AGU_O_REMOTE 0x1E061C
+
+#define mmMME3_CTRL_SHADOW_0_DESC_PCU_RL_SATURATION 0x1E0620
+
+#define mmMME3_CTRL_SHADOW_0_DESC_DUMMY 0x1E0624
+
+#define mmMME3_CTRL_SHADOW_1_STATUS 0x1E0680
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_HIGH_S 0x1E0688
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_HIGH_L 0x1E068C
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_HIGH_O 0x1E0690
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_LOW_S 0x1E0694
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_LOW_L 0x1E0698
+
+#define mmMME3_CTRL_SHADOW_1_BASE_ADDR_LOW_O 0x1E069C
+
+#define mmMME3_CTRL_SHADOW_1_HEADER_LOW 0x1E06A0
+
+#define mmMME3_CTRL_SHADOW_1_HEADER_HIGH 0x1E06A4
+
+#define mmMME3_CTRL_SHADOW_1_CONV_KERNEL_SIZE_MINUS_1 0x1E06A8
+
+#define mmMME3_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_LOW 0x1E06AC
+
+#define mmMME3_CTRL_SHADOW_1_CONV_ASSOCIATED_DIMS_HIGH 0x1E06B0
+
+#define mmMME3_CTRL_SHADOW_1_NUM_ITERATIONS_MINUS_1 0x1E06B4
+
+#define mmMME3_CTRL_SHADOW_1_OUTER_LOOP 0x1E06B8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_0 0x1E06BC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_1 0x1E06C0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_2 0x1E06C4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_3 0x1E06C8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_VALID_ELEMENTS_4 0x1E06CC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_0 0x1E06D0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_1 0x1E06D4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_2 0x1E06D8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_3 0x1E06DC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_LOOP_STRIDE_4 0x1E06E0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_0 0x1E06E4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_1 0x1E06E8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_2 0x1E06EC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_ROI_SIZE_3 0x1E06F0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_0 0x1E06F4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_1 0x1E06F8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_2 0x1E06FC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_STRIDES_3 0x1E0700
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0704
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_0 0x1E0708
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_1 0x1E070C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_2 0x1E0710
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_3 0x1E0714
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_ROI_BASE_OFFSET_4 0x1E0718
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_0 0x1E071C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_1 0x1E0720
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_2 0x1E0724
+
+#define mmMME3_CTRL_SHADOW_1_AGU_S_START_OFFSET_3 0x1E0728
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_0 0x1E072C
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_1 0x1E0730
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_2 0x1E0734
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_3 0x1E0738
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_VALID_ELEMENTS_4 0x1E073C
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_0 0x1E0740
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_1 0x1E0744
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_2 0x1E0748
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_3 0x1E074C
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_LOOP_STRIDE_4 0x1E0750
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_0 0x1E0754
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_1 0x1E0758
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_2 0x1E075C
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_ROI_SIZE_3 0x1E0760
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_0 0x1E0764
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_1 0x1E0768
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_2 0x1E076C
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_STRIDES_3 0x1E0770
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E0774
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E0778
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E077C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0780
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0784
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0788
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_0 0x1E078C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_1 0x1E0790
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_2 0x1E0794
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_LOCAL_START_OFFSET_3 0x1E0798
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E079C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E07A0
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E07A4
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E07A8
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E07AC
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_0 0x1E07B0
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_1 0x1E07B4
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_2 0x1E07B8
+
+#define mmMME3_CTRL_SHADOW_1_AGU_L_REMOTE_START_OFFSET_3 0x1E07BC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_0 0x1E07C0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_1 0x1E07C4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_2 0x1E07C8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_3 0x1E07CC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_VALID_ELEMENTS_4 0x1E07D0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_0 0x1E07D4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_1 0x1E07D8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_2 0x1E07DC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_3 0x1E07E0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_LOOP_STRIDE_4 0x1E07E4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_0 0x1E07E8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_1 0x1E07EC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_2 0x1E07F0
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_ROI_SIZE_3 0x1E07F4
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_0 0x1E07F8
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_1 0x1E07FC
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_2 0x1E0800
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_STRIDES_3 0x1E0804
+
+#define mmMME3_CTRL_SHADOW_1_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0808
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E080C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0810
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0814
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0818
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E081C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_0 0x1E0820
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_1 0x1E0824
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_2 0x1E0828
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_LOCAL_START_OFFSET_3 0x1E082C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E0830
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E0834
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E0838
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E083C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E0840
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_0 0x1E0844
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_1 0x1E0848
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_2 0x1E084C
+
+#define mmMME3_CTRL_SHADOW_1_AGU_O_REMOTE_START_OFFSET_3 0x1E0850
+
+#define mmMME3_CTRL_SHADOW_1_DESC_SB_REPEAT 0x1E0854
+
+#define mmMME3_CTRL_SHADOW_1_DESC_RATE_LIMITER 0x1E0858
+
+#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E085C
+
+#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E0860
+
+#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E0864
+
+#define mmMME3_CTRL_SHADOW_1_DESC_SYNC_OBJECT_DATA 0x1E0868
+
+#define mmMME3_CTRL_SHADOW_1_DESC_AXI_USER_DATA 0x1E086C
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_S 0x1E0870
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_L_LOCAL 0x1E0874
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_L_REMOTE 0x1E0878
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_O_LOCAL 0x1E087C
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PERF_EVT_O_REMOTE 0x1E0880
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PADDING_VALUE_S 0x1E0884
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PADDING_VALUE_L 0x1E0888
+
+#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_S 0x1E088C
+
+#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_LOCAL 0x1E0890
+
+#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_L_REMOTE 0x1E0894
+
+#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_LOCAL 0x1E0898
+
+#define mmMME3_CTRL_SHADOW_1_DESC_META_DATA_AGU_O_REMOTE 0x1E089C
+
+#define mmMME3_CTRL_SHADOW_1_DESC_PCU_RL_SATURATION 0x1E08A0
+
+#define mmMME3_CTRL_SHADOW_1_DESC_DUMMY 0x1E08A4
+
+#define mmMME3_CTRL_SHADOW_2_STATUS 0x1E0900
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_HIGH_S 0x1E0908
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_HIGH_L 0x1E090C
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_HIGH_O 0x1E0910
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_LOW_S 0x1E0914
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_LOW_L 0x1E0918
+
+#define mmMME3_CTRL_SHADOW_2_BASE_ADDR_LOW_O 0x1E091C
+
+#define mmMME3_CTRL_SHADOW_2_HEADER_LOW 0x1E0920
+
+#define mmMME3_CTRL_SHADOW_2_HEADER_HIGH 0x1E0924
+
+#define mmMME3_CTRL_SHADOW_2_CONV_KERNEL_SIZE_MINUS_1 0x1E0928
+
+#define mmMME3_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_LOW 0x1E092C
+
+#define mmMME3_CTRL_SHADOW_2_CONV_ASSOCIATED_DIMS_HIGH 0x1E0930
+
+#define mmMME3_CTRL_SHADOW_2_NUM_ITERATIONS_MINUS_1 0x1E0934
+
+#define mmMME3_CTRL_SHADOW_2_OUTER_LOOP 0x1E0938
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_0 0x1E093C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_1 0x1E0940
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_2 0x1E0944
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_3 0x1E0948
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_VALID_ELEMENTS_4 0x1E094C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_0 0x1E0950
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_1 0x1E0954
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_2 0x1E0958
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_3 0x1E095C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_LOOP_STRIDE_4 0x1E0960
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_0 0x1E0964
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_1 0x1E0968
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_2 0x1E096C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_ROI_SIZE_3 0x1E0970
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_0 0x1E0974
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_1 0x1E0978
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_2 0x1E097C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_STRIDES_3 0x1E0980
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0984
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_0 0x1E0988
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_1 0x1E098C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_2 0x1E0990
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_3 0x1E0994
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_ROI_BASE_OFFSET_4 0x1E0998
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_0 0x1E099C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_1 0x1E09A0
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_2 0x1E09A4
+
+#define mmMME3_CTRL_SHADOW_2_AGU_S_START_OFFSET_3 0x1E09A8
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_0 0x1E09AC
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_1 0x1E09B0
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_2 0x1E09B4
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_3 0x1E09B8
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_VALID_ELEMENTS_4 0x1E09BC
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_0 0x1E09C0
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_1 0x1E09C4
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_2 0x1E09C8
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_3 0x1E09CC
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_LOOP_STRIDE_4 0x1E09D0
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_0 0x1E09D4
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_1 0x1E09D8
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_2 0x1E09DC
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_ROI_SIZE_3 0x1E09E0
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_0 0x1E09E4
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_1 0x1E09E8
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_2 0x1E09EC
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_STRIDES_3 0x1E09F0
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E09F4
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E09F8
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E09FC
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0A00
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0A04
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0A08
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_0 0x1E0A0C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_1 0x1E0A10
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_2 0x1E0A14
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_LOCAL_START_OFFSET_3 0x1E0A18
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E0A1C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0A20
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0A24
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0A28
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E0A2C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_0 0x1E0A30
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_1 0x1E0A34
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_2 0x1E0A38
+
+#define mmMME3_CTRL_SHADOW_2_AGU_L_REMOTE_START_OFFSET_3 0x1E0A3C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_0 0x1E0A40
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_1 0x1E0A44
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_2 0x1E0A48
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_3 0x1E0A4C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_VALID_ELEMENTS_4 0x1E0A50
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_0 0x1E0A54
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_1 0x1E0A58
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_2 0x1E0A5C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_3 0x1E0A60
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_LOOP_STRIDE_4 0x1E0A64
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_0 0x1E0A68
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_1 0x1E0A6C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_2 0x1E0A70
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_ROI_SIZE_3 0x1E0A74
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_0 0x1E0A78
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_1 0x1E0A7C
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_2 0x1E0A80
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_STRIDES_3 0x1E0A84
+
+#define mmMME3_CTRL_SHADOW_2_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0A88
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E0A8C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0A90
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0A94
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0A98
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E0A9C
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_0 0x1E0AA0
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_1 0x1E0AA4
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_2 0x1E0AA8
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_LOCAL_START_OFFSET_3 0x1E0AAC
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E0AB0
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E0AB4
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E0AB8
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E0ABC
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E0AC0
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_0 0x1E0AC4
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_1 0x1E0AC8
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_2 0x1E0ACC
+
+#define mmMME3_CTRL_SHADOW_2_AGU_O_REMOTE_START_OFFSET_3 0x1E0AD0
+
+#define mmMME3_CTRL_SHADOW_2_DESC_SB_REPEAT 0x1E0AD4
+
+#define mmMME3_CTRL_SHADOW_2_DESC_RATE_LIMITER 0x1E0AD8
+
+#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E0ADC
+
+#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E0AE0
+
+#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E0AE4
+
+#define mmMME3_CTRL_SHADOW_2_DESC_SYNC_OBJECT_DATA 0x1E0AE8
+
+#define mmMME3_CTRL_SHADOW_2_DESC_AXI_USER_DATA 0x1E0AEC
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_S 0x1E0AF0
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_L_LOCAL 0x1E0AF4
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_L_REMOTE 0x1E0AF8
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_O_LOCAL 0x1E0AFC
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PERF_EVT_O_REMOTE 0x1E0B00
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PADDING_VALUE_S 0x1E0B04
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PADDING_VALUE_L 0x1E0B08
+
+#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_S 0x1E0B0C
+
+#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_LOCAL 0x1E0B10
+
+#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_L_REMOTE 0x1E0B14
+
+#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_LOCAL 0x1E0B18
+
+#define mmMME3_CTRL_SHADOW_2_DESC_META_DATA_AGU_O_REMOTE 0x1E0B1C
+
+#define mmMME3_CTRL_SHADOW_2_DESC_PCU_RL_SATURATION 0x1E0B20
+
+#define mmMME3_CTRL_SHADOW_2_DESC_DUMMY 0x1E0B24
+
+#define mmMME3_CTRL_SHADOW_3_STATUS 0x1E0B80
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_HIGH_S 0x1E0B88
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_HIGH_L 0x1E0B8C
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_HIGH_O 0x1E0B90
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_LOW_S 0x1E0B94
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_LOW_L 0x1E0B98
+
+#define mmMME3_CTRL_SHADOW_3_BASE_ADDR_LOW_O 0x1E0B9C
+
+#define mmMME3_CTRL_SHADOW_3_HEADER_LOW 0x1E0BA0
+
+#define mmMME3_CTRL_SHADOW_3_HEADER_HIGH 0x1E0BA4
+
+#define mmMME3_CTRL_SHADOW_3_CONV_KERNEL_SIZE_MINUS_1 0x1E0BA8
+
+#define mmMME3_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_LOW 0x1E0BAC
+
+#define mmMME3_CTRL_SHADOW_3_CONV_ASSOCIATED_DIMS_HIGH 0x1E0BB0
+
+#define mmMME3_CTRL_SHADOW_3_NUM_ITERATIONS_MINUS_1 0x1E0BB4
+
+#define mmMME3_CTRL_SHADOW_3_OUTER_LOOP 0x1E0BB8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_0 0x1E0BBC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_1 0x1E0BC0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_2 0x1E0BC4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_3 0x1E0BC8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_VALID_ELEMENTS_4 0x1E0BCC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_0 0x1E0BD0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_1 0x1E0BD4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_2 0x1E0BD8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_3 0x1E0BDC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_LOOP_STRIDE_4 0x1E0BE0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_0 0x1E0BE4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_1 0x1E0BE8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_2 0x1E0BEC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_ROI_SIZE_3 0x1E0BF0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_0 0x1E0BF4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_1 0x1E0BF8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_2 0x1E0BFC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_STRIDES_3 0x1E0C00
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_S_SPATIAL_SIZE_MINUS_1 0x1E0C04
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_0 0x1E0C08
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_1 0x1E0C0C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_2 0x1E0C10
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_3 0x1E0C14
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_ROI_BASE_OFFSET_4 0x1E0C18
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_0 0x1E0C1C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_1 0x1E0C20
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_2 0x1E0C24
+
+#define mmMME3_CTRL_SHADOW_3_AGU_S_START_OFFSET_3 0x1E0C28
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_0 0x1E0C2C
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_1 0x1E0C30
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_2 0x1E0C34
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_3 0x1E0C38
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_VALID_ELEMENTS_4 0x1E0C3C
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_0 0x1E0C40
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_1 0x1E0C44
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_2 0x1E0C48
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_3 0x1E0C4C
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_LOOP_STRIDE_4 0x1E0C50
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_0 0x1E0C54
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_1 0x1E0C58
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_2 0x1E0C5C
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_ROI_SIZE_3 0x1E0C60
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_0 0x1E0C64
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_1 0x1E0C68
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_2 0x1E0C6C
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_STRIDES_3 0x1E0C70
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_L_SPATIAL_SIZE_MINUS_1 0x1E0C74
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_0 0x1E0C78
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_1 0x1E0C7C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_2 0x1E0C80
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_3 0x1E0C84
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_ROI_BASE_OFFSET_4 0x1E0C88
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_0 0x1E0C8C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_1 0x1E0C90
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_2 0x1E0C94
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_LOCAL_START_OFFSET_3 0x1E0C98
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_0 0x1E0C9C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_1 0x1E0CA0
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_2 0x1E0CA4
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_3 0x1E0CA8
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_ROI_BASE_OFFSET_4 0x1E0CAC
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_0 0x1E0CB0
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_1 0x1E0CB4
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_2 0x1E0CB8
+
+#define mmMME3_CTRL_SHADOW_3_AGU_L_REMOTE_START_OFFSET_3 0x1E0CBC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_0 0x1E0CC0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_1 0x1E0CC4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_2 0x1E0CC8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_3 0x1E0CCC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_VALID_ELEMENTS_4 0x1E0CD0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_0 0x1E0CD4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_1 0x1E0CD8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_2 0x1E0CDC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_3 0x1E0CE0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_LOOP_STRIDE_4 0x1E0CE4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_0 0x1E0CE8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_1 0x1E0CEC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_2 0x1E0CF0
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_ROI_SIZE_3 0x1E0CF4
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_0 0x1E0CF8
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_1 0x1E0CFC
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_2 0x1E0D00
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_STRIDES_3 0x1E0D04
+
+#define mmMME3_CTRL_SHADOW_3_TENSOR_O_SPATIAL_SIZE_MINUS_1 0x1E0D08
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_0 0x1E0D0C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_1 0x1E0D10
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_2 0x1E0D14
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_3 0x1E0D18
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_ROI_BASE_OFFSET_4 0x1E0D1C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_0 0x1E0D20
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_1 0x1E0D24
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_2 0x1E0D28
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_LOCAL_START_OFFSET_3 0x1E0D2C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_0 0x1E0D30
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_1 0x1E0D34
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_2 0x1E0D38
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_3 0x1E0D3C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_ROI_BASE_OFFSET_4 0x1E0D40
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_0 0x1E0D44
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_1 0x1E0D48
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_2 0x1E0D4C
+
+#define mmMME3_CTRL_SHADOW_3_AGU_O_REMOTE_START_OFFSET_3 0x1E0D50
+
+#define mmMME3_CTRL_SHADOW_3_DESC_SB_REPEAT 0x1E0D54
+
+#define mmMME3_CTRL_SHADOW_3_DESC_RATE_LIMITER 0x1E0D58
+
+#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL 0x1E0D5C
+
+#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_LOW_REMOTE 0x1E0D60
+
+#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_ADDR_HIGH 0x1E0D64
+
+#define mmMME3_CTRL_SHADOW_3_DESC_SYNC_OBJECT_DATA 0x1E0D68
+
+#define mmMME3_CTRL_SHADOW_3_DESC_AXI_USER_DATA 0x1E0D6C
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_S 0x1E0D70
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_L_LOCAL 0x1E0D74
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_L_REMOTE 0x1E0D78
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_O_LOCAL 0x1E0D7C
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PERF_EVT_O_REMOTE 0x1E0D80
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PADDING_VALUE_S 0x1E0D84
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PADDING_VALUE_L 0x1E0D88
+
+#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_S 0x1E0D8C
+
+#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_LOCAL 0x1E0D90
+
+#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_L_REMOTE 0x1E0D94
+
+#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_LOCAL 0x1E0D98
+
+#define mmMME3_CTRL_SHADOW_3_DESC_META_DATA_AGU_O_REMOTE 0x1E0D9C
+
+#define mmMME3_CTRL_SHADOW_3_DESC_PCU_RL_SATURATION 0x1E0DA0
+
+#define mmMME3_CTRL_SHADOW_3_DESC_DUMMY 0x1E0DA4
+
+#endif /* ASIC_REG_MME3_CTRL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h
new file mode 100644
index 000000000000..61465b599850
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_MMU_UP_REGS_H_
+#define ASIC_REG_MMU_UP_REGS_H_
+
+/*
+ *****************************************
+ * MMU_UP (Prototype: MMU)
+ *****************************************
+ */
+
+#define mmMMU_UP_MMU_ENABLE 0xC1100C
+
+#define mmMMU_UP_FORCE_ORDERING 0xC11010
+
+#define mmMMU_UP_FEATURE_ENABLE 0xC11014
+
+#define mmMMU_UP_VA_ORDERING_MASK_31_7 0xC11018
+
+#define mmMMU_UP_VA_ORDERING_MASK_49_32 0xC1101C
+
+#define mmMMU_UP_LOG2_DDR_SIZE 0xC11020
+
+#define mmMMU_UP_SCRAMBLER 0xC11024
+
+#define mmMMU_UP_MEM_INIT_BUSY 0xC11028
+
+#define mmMMU_UP_SPI_MASK 0xC1102C
+
+#define mmMMU_UP_SPI_CAUSE 0xC11030
+
+#define mmMMU_UP_PAGE_ERROR_CAPTURE 0xC11034
+
+#define mmMMU_UP_PAGE_ERROR_CAPTURE_VA 0xC11038
+
+#define mmMMU_UP_ACCESS_ERROR_CAPTURE 0xC1103C
+
+#define mmMMU_UP_ACCESS_ERROR_CAPTURE_VA 0xC11040
+
+#define mmMMU_UP_SPI_INTERRUPT_CLR 0xC11044
+
+#define mmMMU_UP_SPI_INTERRUPT_MASK 0xC11048
+
+#define mmMMU_UP_DBG_MEM_WRAP_RM 0xC1104C
+
+#define mmMMU_UP_SPI_CAUSE_CLR 0xC11050
+
+#define mmMMU_UP_SLICE_CREDIT 0xC11054
+
+#define mmMMU_UP_PIPE_CREDIT 0xC11058
+
+#define mmMMU_UP_RAZWI_WRITE_VLD 0xC1105C
+
+#define mmMMU_UP_RAZWI_WRITE_ID 0xC11060
+
+#define mmMMU_UP_RAZWI_READ_VLD 0xC11064
+
+#define mmMMU_UP_RAZWI_READ_ID 0xC11068
+
+#define mmMMU_UP_MMU_BYPASS 0xC1106C
+
+#endif /* ASIC_REG_MMU_UP_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h
new file mode 100644
index 000000000000..2efa2a54deb4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_0_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_0_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_0_PERM_SEL 0x386108
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_0 0x386114
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_1 0x386118
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_2 0x38611C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_3 0x386120
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_4 0x386124
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_5 0x386128
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_6 0x38612C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_7 0x386130
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_8 0x386134
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_9 0x386138
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_10 0x38613C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_11 0x386140
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_12 0x386144
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_13 0x386148
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_14 0x38614C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_15 0x386150
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_16 0x386154
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_17 0x386158
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_18 0x38615C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_19 0x386160
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_20 0x386164
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_21 0x386168
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_22 0x38616C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_23 0x386170
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_24 0x386174
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_25 0x386178
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_26 0x38617C
+
+#define mmNIF_RTR_CTRL_0_HBM_POLY_H3_27 0x386180
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_0 0x386184
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_1 0x386188
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_2 0x38618C
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_3 0x386190
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_4 0x386194
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_5 0x386198
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_6 0x38619C
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_7 0x3861A0
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_8 0x3861A4
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_9 0x3861A8
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_10 0x3861AC
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_11 0x3861B0
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_12 0x3861B4
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_13 0x3861B8
+
+#define mmNIF_RTR_CTRL_0_SRAM_POLY_H3_14 0x3861BC
+
+#define mmNIF_RTR_CTRL_0_SCRAM_SRAM_EN 0x38626C
+
+#define mmNIF_RTR_CTRL_0_RL_HBM_EN 0x386274
+
+#define mmNIF_RTR_CTRL_0_RL_HBM_SAT 0x386278
+
+#define mmNIF_RTR_CTRL_0_RL_HBM_RST 0x38627C
+
+#define mmNIF_RTR_CTRL_0_RL_HBM_TIMEOUT 0x386280
+
+#define mmNIF_RTR_CTRL_0_SCRAM_HBM_EN 0x386284
+
+#define mmNIF_RTR_CTRL_0_RL_PCI_EN 0x386288
+
+#define mmNIF_RTR_CTRL_0_RL_PCI_SAT 0x38628C
+
+#define mmNIF_RTR_CTRL_0_RL_PCI_RST 0x386290
+
+#define mmNIF_RTR_CTRL_0_RL_PCI_TIMEOUT 0x386294
+
+#define mmNIF_RTR_CTRL_0_RL_SRAM_EN 0x38629C
+
+#define mmNIF_RTR_CTRL_0_RL_SRAM_SAT 0x3862A0
+
+#define mmNIF_RTR_CTRL_0_RL_SRAM_RST 0x3862A4
+
+#define mmNIF_RTR_CTRL_0_RL_SRAM_TIMEOUT 0x3862AC
+
+#define mmNIF_RTR_CTRL_0_RL_SRAM_RED 0x3862B4
+
+#define mmNIF_RTR_CTRL_0_E2E_HBM_EN 0x3862EC
+
+#define mmNIF_RTR_CTRL_0_E2E_PCI_EN 0x3862F0
+
+#define mmNIF_RTR_CTRL_0_E2E_HBM_WR_SIZE 0x3862F4
+
+#define mmNIF_RTR_CTRL_0_E2E_PCI_WR_SIZE 0x3862F8
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET_EN 0x386404
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET 0x386408
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_WRAP 0x38640C
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_PCI_CTR_CNT 0x386410
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET_EN 0x386414
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET 0x386418
+
+#define mmNIF_RTR_CTRL_0_E2E_HBM_RD_SIZE 0x38641C
+
+#define mmNIF_RTR_CTRL_0_E2E_PCI_RD_SIZE 0x386420
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET_EN 0x386424
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET 0x386428
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_WRAP 0x38642C
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_PCI_CTR_CNT 0x386430
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET_EN 0x386434
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET 0x386438
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_SEL_0 0x386450
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_SEL_1 0x386454
+
+#define mmNIF_RTR_CTRL_0_NON_LIN_EN 0x386480
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_0 0x386500
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_1 0x386504
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_2 0x386508
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_3 0x38650C
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_BANK_4 0x386510
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_0 0x386514
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_1 0x386520
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_2 0x386524
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_3 0x386528
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_4 0x38652C
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_5 0x386530
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_6 0x386534
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_7 0x386538
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_8 0x38653C
+
+#define mmNIF_RTR_CTRL_0_NL_SRAM_OFFSET_9 0x386540
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_0 0x386550
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_1 0x386554
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_2 0x386558
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_3 0x38655C
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_4 0x386560
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_5 0x386564
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_6 0x386568
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_7 0x38656C
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_8 0x386570
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_9 0x386574
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_10 0x386578
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_11 0x38657C
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_12 0x386580
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_13 0x386584
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_14 0x386588
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_15 0x38658C
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_16 0x386590
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_17 0x386594
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_18 0x386598
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0 0x3865E4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_1 0x3865E8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_2 0x3865EC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_3 0x3865F0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_4 0x3865F4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_5 0x3865F8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_6 0x3865FC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_7 0x386600
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_8 0x386604
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_9 0x386608
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_10 0x38660C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_11 0x386610
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_12 0x386614
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_13 0x386618
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_14 0x38661C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_15 0x386620
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0 0x386624
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_1 0x386628
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_2 0x38662C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_3 0x386630
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_4 0x386634
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_5 0x386638
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_6 0x38663C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_7 0x386640
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_8 0x386644
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_9 0x386648
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_10 0x38664C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_11 0x386650
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_12 0x386654
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_13 0x386658
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_14 0x38665C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_15 0x386660
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0 0x386664
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_1 0x386668
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_2 0x38666C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_3 0x386670
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_4 0x386674
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_5 0x386678
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_6 0x38667C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_7 0x386680
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_8 0x386684
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_9 0x386688
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_10 0x38668C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_11 0x386690
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_12 0x386694
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_13 0x386698
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_14 0x38669C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_15 0x3866A0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0 0x3866A4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_1 0x3866A8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_2 0x3866AC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_3 0x3866B0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_4 0x3866B4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_5 0x3866B8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_6 0x3866BC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_7 0x3866C0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_8 0x3866C4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_9 0x3866C8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_10 0x3866CC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_11 0x3866D0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_12 0x3866D4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_13 0x3866D8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_14 0x3866DC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_15 0x3866E0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_0 0x3866E4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_1 0x3866E8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_2 0x3866EC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_3 0x3866F0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_4 0x3866F4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_5 0x3866F8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_6 0x3866FC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_7 0x386700
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_8 0x386704
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_9 0x386708
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_10 0x38670C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_11 0x386710
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_12 0x386714
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_13 0x386718
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_14 0x38671C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_15 0x386720
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_0 0x386724
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_1 0x386728
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_2 0x38672C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_3 0x386730
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_4 0x386734
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_5 0x386738
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_6 0x38673C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_7 0x386740
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_8 0x386744
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_9 0x386748
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_10 0x38674C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_11 0x386750
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_12 0x386754
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_13 0x386758
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_14 0x38675C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_15 0x386760
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_0 0x386764
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_1 0x386768
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_2 0x38676C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_3 0x386770
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_4 0x386774
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_5 0x386778
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_6 0x38677C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_7 0x386780
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_8 0x386784
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_9 0x386788
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_10 0x38678C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_11 0x386790
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_12 0x386794
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_13 0x386798
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_14 0x38679C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_15 0x3867A0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_0 0x3867A4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_1 0x3867A8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_2 0x3867AC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_3 0x3867B0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_4 0x3867B4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_5 0x3867B8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_6 0x3867BC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_7 0x3867C0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_8 0x3867C4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_9 0x3867C8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_10 0x3867CC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_11 0x3867D0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_12 0x3867D4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_13 0x3867D8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_14 0x3867DC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_15 0x3867E0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0 0x386824
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_1 0x386828
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_2 0x38682C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_3 0x386830
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_4 0x386834
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_5 0x386838
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_6 0x38683C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_7 0x386840
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_8 0x386844
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_9 0x386848
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_10 0x38684C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_11 0x386850
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_12 0x386854
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_13 0x386858
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_14 0x38685C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_15 0x386860
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0 0x386864
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_1 0x386868
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_2 0x38686C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_3 0x386870
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_4 0x386874
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_5 0x386878
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_6 0x38687C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_7 0x386880
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_8 0x386884
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_9 0x386888
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_10 0x38688C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_11 0x386890
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_12 0x386894
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_13 0x386898
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_14 0x38689C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_15 0x3868A0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0 0x3868A4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_1 0x3868A8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_2 0x3868AC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_3 0x3868B0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_4 0x3868B4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_5 0x3868B8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_6 0x3868BC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_7 0x3868C0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_8 0x3868C4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_9 0x3868C8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_10 0x3868CC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_11 0x3868D0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_12 0x3868D4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_13 0x3868D8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_14 0x3868DC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_15 0x3868E0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0 0x3868E4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_1 0x3868E8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_2 0x3868EC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_3 0x3868F0
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_4 0x3868F4
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_5 0x3868F8
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_6 0x3868FC
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_7 0x386900
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_8 0x386904
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_9 0x386908
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_10 0x38690C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_11 0x386910
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_12 0x386914
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_13 0x386918
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_14 0x38691C
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_15 0x386920
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_0 0x386924
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_1 0x386928
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_2 0x38692C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_3 0x386930
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_4 0x386934
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_5 0x386938
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_6 0x38693C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_7 0x386940
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_8 0x386944
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_9 0x386948
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_10 0x38694C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_11 0x386950
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_12 0x386954
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_13 0x386958
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_14 0x38695C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_15 0x386960
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_0 0x386964
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_1 0x386968
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_2 0x38696C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_3 0x386970
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_4 0x386974
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_5 0x386978
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_6 0x38697C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_7 0x386980
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_8 0x386984
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_9 0x386988
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_10 0x38698C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_11 0x386990
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_12 0x386994
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_13 0x386998
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_14 0x38699C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_15 0x3869A0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_0 0x3869A4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_1 0x3869A8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_2 0x3869AC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_3 0x3869B0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_4 0x3869B4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_5 0x3869B8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_6 0x3869BC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_7 0x3869C0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_8 0x3869C4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_9 0x3869C8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_10 0x3869CC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_11 0x3869D0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_12 0x3869D4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_13 0x3869D8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_14 0x3869DC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_15 0x3869E0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_0 0x3869E4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_1 0x3869E8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_2 0x3869EC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_3 0x3869F0
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_4 0x3869F4
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_5 0x3869F8
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_6 0x3869FC
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_7 0x386A00
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_8 0x386A04
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_9 0x386A08
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_10 0x386A0C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_11 0x386A10
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_12 0x386A14
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_13 0x386A18
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_14 0x386A1C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_15 0x386A20
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AW 0x386A64
+
+#define mmNIF_RTR_CTRL_0_RANGE_SEC_HIT_AR 0x386A68
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_HIT_AW 0x386A6C
+
+#define mmNIF_RTR_CTRL_0_RANGE_PRIV_HIT_AR 0x386A70
+
+#define mmNIF_RTR_CTRL_0_RGL_CFG 0x386B64
+
+#define mmNIF_RTR_CTRL_0_RGL_SHIFT 0x386B68
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_0 0x386B6C
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_1 0x386B70
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_2 0x386B74
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_3 0x386B78
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_4 0x386B7C
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_5 0x386B80
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_6 0x386B84
+
+#define mmNIF_RTR_CTRL_0_RGL_EXPECTED_LAT_7 0x386B88
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_0 0x386BAC
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_1 0x386BB0
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_2 0x386BB4
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_3 0x386BB8
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_4 0x386BBC
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_5 0x386BC0
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_6 0x386BC4
+
+#define mmNIF_RTR_CTRL_0_RGL_TOKEN_7 0x386BC8
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_0 0x386BEC
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_1 0x386BF0
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_2 0x386BF4
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_3 0x386BF8
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_4 0x386BFC
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_5 0x386C00
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_6 0x386C04
+
+#define mmNIF_RTR_CTRL_0_RGL_BANK_ID_7 0x386C08
+
+#define mmNIF_RTR_CTRL_0_RGL_WDT 0x386C2C
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_WRAP 0x386C30
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_WRAP 0x386C34
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_WRAP 0x386C38
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_WRAP 0x386C3C
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_WRAP 0x386C40
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_WRAP 0x386C44
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_WRAP 0x386C48
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_WRAP 0x386C4C
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_CNT 0x386C50
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_CNT 0x386C54
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_CNT 0x386C58
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_CNT 0x386C5C
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_CNT 0x386C60
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_CNT 0x386C64
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_CNT 0x386C68
+
+#define mmNIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_CNT 0x386C6C
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_WRAP 0x386C70
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_WRAP 0x386C74
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_WRAP 0x386C78
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_WRAP 0x386C7C
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_WRAP 0x386C80
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_WRAP 0x386C84
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_WRAP 0x386C88
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_WRAP 0x386C8C
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_CNT 0x386C90
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_CNT 0x386C94
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_CNT 0x386C98
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_CNT 0x386C9C
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_CNT 0x386CA0
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_CNT 0x386CA4
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_CNT 0x386CA8
+
+#define mmNIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_CNT 0x386CAC
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_0 0x386CB0
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_1 0x386CB4
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_2 0x386CB8
+
+#define mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_3 0x386CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h
new file mode 100644
index 000000000000..a6047d4e2560
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_1_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_1_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_1_PERM_SEL 0x396108
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_0 0x396114
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_1 0x396118
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_2 0x39611C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_3 0x396120
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_4 0x396124
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_5 0x396128
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_6 0x39612C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_7 0x396130
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_8 0x396134
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_9 0x396138
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_10 0x39613C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_11 0x396140
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_12 0x396144
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_13 0x396148
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_14 0x39614C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_15 0x396150
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_16 0x396154
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_17 0x396158
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_18 0x39615C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_19 0x396160
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_20 0x396164
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_21 0x396168
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_22 0x39616C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_23 0x396170
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_24 0x396174
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_25 0x396178
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_26 0x39617C
+
+#define mmNIF_RTR_CTRL_1_HBM_POLY_H3_27 0x396180
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_0 0x396184
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_1 0x396188
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_2 0x39618C
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_3 0x396190
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_4 0x396194
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_5 0x396198
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_6 0x39619C
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_7 0x3961A0
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_8 0x3961A4
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_9 0x3961A8
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_10 0x3961AC
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_11 0x3961B0
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_12 0x3961B4
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_13 0x3961B8
+
+#define mmNIF_RTR_CTRL_1_SRAM_POLY_H3_14 0x3961BC
+
+#define mmNIF_RTR_CTRL_1_SCRAM_SRAM_EN 0x39626C
+
+#define mmNIF_RTR_CTRL_1_RL_HBM_EN 0x396274
+
+#define mmNIF_RTR_CTRL_1_RL_HBM_SAT 0x396278
+
+#define mmNIF_RTR_CTRL_1_RL_HBM_RST 0x39627C
+
+#define mmNIF_RTR_CTRL_1_RL_HBM_TIMEOUT 0x396280
+
+#define mmNIF_RTR_CTRL_1_SCRAM_HBM_EN 0x396284
+
+#define mmNIF_RTR_CTRL_1_RL_PCI_EN 0x396288
+
+#define mmNIF_RTR_CTRL_1_RL_PCI_SAT 0x39628C
+
+#define mmNIF_RTR_CTRL_1_RL_PCI_RST 0x396290
+
+#define mmNIF_RTR_CTRL_1_RL_PCI_TIMEOUT 0x396294
+
+#define mmNIF_RTR_CTRL_1_RL_SRAM_EN 0x39629C
+
+#define mmNIF_RTR_CTRL_1_RL_SRAM_SAT 0x3962A0
+
+#define mmNIF_RTR_CTRL_1_RL_SRAM_RST 0x3962A4
+
+#define mmNIF_RTR_CTRL_1_RL_SRAM_TIMEOUT 0x3962AC
+
+#define mmNIF_RTR_CTRL_1_RL_SRAM_RED 0x3962B4
+
+#define mmNIF_RTR_CTRL_1_E2E_HBM_EN 0x3962EC
+
+#define mmNIF_RTR_CTRL_1_E2E_PCI_EN 0x3962F0
+
+#define mmNIF_RTR_CTRL_1_E2E_HBM_WR_SIZE 0x3962F4
+
+#define mmNIF_RTR_CTRL_1_E2E_PCI_WR_SIZE 0x3962F8
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET_EN 0x396404
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET 0x396408
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_WRAP 0x39640C
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_PCI_CTR_CNT 0x396410
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET_EN 0x396414
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET 0x396418
+
+#define mmNIF_RTR_CTRL_1_E2E_HBM_RD_SIZE 0x39641C
+
+#define mmNIF_RTR_CTRL_1_E2E_PCI_RD_SIZE 0x396420
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET_EN 0x396424
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET 0x396428
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_WRAP 0x39642C
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_PCI_CTR_CNT 0x396430
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET_EN 0x396434
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET 0x396438
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_SEL_0 0x396450
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_SEL_1 0x396454
+
+#define mmNIF_RTR_CTRL_1_NON_LIN_EN 0x396480
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_0 0x396500
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_1 0x396504
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_2 0x396508
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_3 0x39650C
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_BANK_4 0x396510
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_0 0x396514
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_1 0x396520
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_2 0x396524
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_3 0x396528
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_4 0x39652C
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_5 0x396530
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_6 0x396534
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_7 0x396538
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_8 0x39653C
+
+#define mmNIF_RTR_CTRL_1_NL_SRAM_OFFSET_9 0x396540
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_0 0x396550
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_1 0x396554
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_2 0x396558
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_3 0x39655C
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_4 0x396560
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_5 0x396564
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_6 0x396568
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_7 0x39656C
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_8 0x396570
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_9 0x396574
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_10 0x396578
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_11 0x39657C
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_12 0x396580
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_13 0x396584
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_14 0x396588
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_15 0x39658C
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_16 0x396590
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_17 0x396594
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_18 0x396598
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0 0x3965E4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_1 0x3965E8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_2 0x3965EC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_3 0x3965F0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_4 0x3965F4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_5 0x3965F8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_6 0x3965FC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_7 0x396600
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_8 0x396604
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_9 0x396608
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_10 0x39660C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_11 0x396610
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_12 0x396614
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_13 0x396618
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_14 0x39661C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_15 0x396620
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0 0x396624
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_1 0x396628
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_2 0x39662C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_3 0x396630
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_4 0x396634
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_5 0x396638
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_6 0x39663C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_7 0x396640
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_8 0x396644
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_9 0x396648
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_10 0x39664C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_11 0x396650
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_12 0x396654
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_13 0x396658
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_14 0x39665C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_15 0x396660
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0 0x396664
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_1 0x396668
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_2 0x39666C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_3 0x396670
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_4 0x396674
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_5 0x396678
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_6 0x39667C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_7 0x396680
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_8 0x396684
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_9 0x396688
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_10 0x39668C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_11 0x396690
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_12 0x396694
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_13 0x396698
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_14 0x39669C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_15 0x3966A0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0 0x3966A4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_1 0x3966A8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_2 0x3966AC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_3 0x3966B0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_4 0x3966B4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_5 0x3966B8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_6 0x3966BC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_7 0x3966C0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_8 0x3966C4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_9 0x3966C8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_10 0x3966CC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_11 0x3966D0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_12 0x3966D4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_13 0x3966D8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_14 0x3966DC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_15 0x3966E0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_0 0x3966E4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_1 0x3966E8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_2 0x3966EC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_3 0x3966F0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_4 0x3966F4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_5 0x3966F8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_6 0x3966FC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_7 0x396700
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_8 0x396704
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_9 0x396708
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_10 0x39670C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_11 0x396710
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_12 0x396714
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_13 0x396718
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_14 0x39671C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_15 0x396720
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_0 0x396724
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_1 0x396728
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_2 0x39672C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_3 0x396730
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_4 0x396734
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_5 0x396738
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_6 0x39673C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_7 0x396740
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_8 0x396744
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_9 0x396748
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_10 0x39674C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_11 0x396750
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_12 0x396754
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_13 0x396758
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_14 0x39675C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_15 0x396760
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_0 0x396764
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_1 0x396768
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_2 0x39676C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_3 0x396770
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_4 0x396774
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_5 0x396778
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_6 0x39677C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_7 0x396780
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_8 0x396784
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_9 0x396788
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_10 0x39678C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_11 0x396790
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_12 0x396794
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_13 0x396798
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_14 0x39679C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_15 0x3967A0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_0 0x3967A4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_1 0x3967A8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_2 0x3967AC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_3 0x3967B0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_4 0x3967B4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_5 0x3967B8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_6 0x3967BC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_7 0x3967C0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_8 0x3967C4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_9 0x3967C8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_10 0x3967CC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_11 0x3967D0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_12 0x3967D4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_13 0x3967D8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_14 0x3967DC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_15 0x3967E0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0 0x396824
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_1 0x396828
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_2 0x39682C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_3 0x396830
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_4 0x396834
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_5 0x396838
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_6 0x39683C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_7 0x396840
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_8 0x396844
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_9 0x396848
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_10 0x39684C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_11 0x396850
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_12 0x396854
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_13 0x396858
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_14 0x39685C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_15 0x396860
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0 0x396864
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_1 0x396868
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_2 0x39686C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_3 0x396870
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_4 0x396874
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_5 0x396878
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_6 0x39687C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_7 0x396880
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_8 0x396884
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_9 0x396888
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_10 0x39688C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_11 0x396890
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_12 0x396894
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_13 0x396898
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_14 0x39689C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_15 0x3968A0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0 0x3968A4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_1 0x3968A8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_2 0x3968AC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_3 0x3968B0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_4 0x3968B4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_5 0x3968B8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_6 0x3968BC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_7 0x3968C0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_8 0x3968C4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_9 0x3968C8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_10 0x3968CC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_11 0x3968D0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_12 0x3968D4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_13 0x3968D8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_14 0x3968DC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_15 0x3968E0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0 0x3968E4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_1 0x3968E8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_2 0x3968EC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_3 0x3968F0
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_4 0x3968F4
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_5 0x3968F8
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_6 0x3968FC
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_7 0x396900
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_8 0x396904
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_9 0x396908
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_10 0x39690C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_11 0x396910
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_12 0x396914
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_13 0x396918
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_14 0x39691C
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_15 0x396920
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_0 0x396924
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_1 0x396928
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_2 0x39692C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_3 0x396930
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_4 0x396934
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_5 0x396938
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_6 0x39693C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_7 0x396940
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_8 0x396944
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_9 0x396948
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_10 0x39694C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_11 0x396950
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_12 0x396954
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_13 0x396958
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_14 0x39695C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_15 0x396960
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_0 0x396964
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_1 0x396968
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_2 0x39696C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_3 0x396970
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_4 0x396974
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_5 0x396978
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_6 0x39697C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_7 0x396980
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_8 0x396984
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_9 0x396988
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_10 0x39698C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_11 0x396990
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_12 0x396994
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_13 0x396998
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_14 0x39699C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_15 0x3969A0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_0 0x3969A4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_1 0x3969A8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_2 0x3969AC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_3 0x3969B0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_4 0x3969B4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_5 0x3969B8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_6 0x3969BC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_7 0x3969C0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_8 0x3969C4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_9 0x3969C8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_10 0x3969CC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_11 0x3969D0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_12 0x3969D4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_13 0x3969D8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_14 0x3969DC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_15 0x3969E0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_0 0x3969E4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_1 0x3969E8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_2 0x3969EC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_3 0x3969F0
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_4 0x3969F4
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_5 0x3969F8
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_6 0x3969FC
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_7 0x396A00
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_8 0x396A04
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_9 0x396A08
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_10 0x396A0C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_11 0x396A10
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_12 0x396A14
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_13 0x396A18
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_14 0x396A1C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_15 0x396A20
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AW 0x396A64
+
+#define mmNIF_RTR_CTRL_1_RANGE_SEC_HIT_AR 0x396A68
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_HIT_AW 0x396A6C
+
+#define mmNIF_RTR_CTRL_1_RANGE_PRIV_HIT_AR 0x396A70
+
+#define mmNIF_RTR_CTRL_1_RGL_CFG 0x396B64
+
+#define mmNIF_RTR_CTRL_1_RGL_SHIFT 0x396B68
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_0 0x396B6C
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_1 0x396B70
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_2 0x396B74
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_3 0x396B78
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_4 0x396B7C
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_5 0x396B80
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_6 0x396B84
+
+#define mmNIF_RTR_CTRL_1_RGL_EXPECTED_LAT_7 0x396B88
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_0 0x396BAC
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_1 0x396BB0
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_2 0x396BB4
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_3 0x396BB8
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_4 0x396BBC
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_5 0x396BC0
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_6 0x396BC4
+
+#define mmNIF_RTR_CTRL_1_RGL_TOKEN_7 0x396BC8
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_0 0x396BEC
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_1 0x396BF0
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_2 0x396BF4
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_3 0x396BF8
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_4 0x396BFC
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_5 0x396C00
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_6 0x396C04
+
+#define mmNIF_RTR_CTRL_1_RGL_BANK_ID_7 0x396C08
+
+#define mmNIF_RTR_CTRL_1_RGL_WDT 0x396C2C
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_WRAP 0x396C30
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_WRAP 0x396C34
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_WRAP 0x396C38
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_WRAP 0x396C3C
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_WRAP 0x396C40
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_WRAP 0x396C44
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_WRAP 0x396C48
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_WRAP 0x396C4C
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_CNT 0x396C50
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_CNT 0x396C54
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_CNT 0x396C58
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_CNT 0x396C5C
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_CNT 0x396C60
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_CNT 0x396C64
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_CNT 0x396C68
+
+#define mmNIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_CNT 0x396C6C
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_WRAP 0x396C70
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_WRAP 0x396C74
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_WRAP 0x396C78
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_WRAP 0x396C7C
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_WRAP 0x396C80
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_WRAP 0x396C84
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_WRAP 0x396C88
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_WRAP 0x396C8C
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_CNT 0x396C90
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_CNT 0x396C94
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_CNT 0x396C98
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_CNT 0x396C9C
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_CNT 0x396CA0
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_CNT 0x396CA4
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_CNT 0x396CA8
+
+#define mmNIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_CNT 0x396CAC
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_0 0x396CB0
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_1 0x396CB4
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_2 0x396CB8
+
+#define mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_3 0x396CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h
new file mode 100644
index 000000000000..9de8442f9bc2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_2_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_2_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_2 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_2_PERM_SEL 0x3A6108
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_0 0x3A6114
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_1 0x3A6118
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_2 0x3A611C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_3 0x3A6120
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_4 0x3A6124
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_5 0x3A6128
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_6 0x3A612C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_7 0x3A6130
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_8 0x3A6134
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_9 0x3A6138
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_10 0x3A613C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_11 0x3A6140
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_12 0x3A6144
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_13 0x3A6148
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_14 0x3A614C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_15 0x3A6150
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_16 0x3A6154
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_17 0x3A6158
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_18 0x3A615C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_19 0x3A6160
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_20 0x3A6164
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_21 0x3A6168
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_22 0x3A616C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_23 0x3A6170
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_24 0x3A6174
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_25 0x3A6178
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_26 0x3A617C
+
+#define mmNIF_RTR_CTRL_2_HBM_POLY_H3_27 0x3A6180
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_0 0x3A6184
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_1 0x3A6188
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_2 0x3A618C
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_3 0x3A6190
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_4 0x3A6194
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_5 0x3A6198
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_6 0x3A619C
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_7 0x3A61A0
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_8 0x3A61A4
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_9 0x3A61A8
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_10 0x3A61AC
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_11 0x3A61B0
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_12 0x3A61B4
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_13 0x3A61B8
+
+#define mmNIF_RTR_CTRL_2_SRAM_POLY_H3_14 0x3A61BC
+
+#define mmNIF_RTR_CTRL_2_SCRAM_SRAM_EN 0x3A626C
+
+#define mmNIF_RTR_CTRL_2_RL_HBM_EN 0x3A6274
+
+#define mmNIF_RTR_CTRL_2_RL_HBM_SAT 0x3A6278
+
+#define mmNIF_RTR_CTRL_2_RL_HBM_RST 0x3A627C
+
+#define mmNIF_RTR_CTRL_2_RL_HBM_TIMEOUT 0x3A6280
+
+#define mmNIF_RTR_CTRL_2_SCRAM_HBM_EN 0x3A6284
+
+#define mmNIF_RTR_CTRL_2_RL_PCI_EN 0x3A6288
+
+#define mmNIF_RTR_CTRL_2_RL_PCI_SAT 0x3A628C
+
+#define mmNIF_RTR_CTRL_2_RL_PCI_RST 0x3A6290
+
+#define mmNIF_RTR_CTRL_2_RL_PCI_TIMEOUT 0x3A6294
+
+#define mmNIF_RTR_CTRL_2_RL_SRAM_EN 0x3A629C
+
+#define mmNIF_RTR_CTRL_2_RL_SRAM_SAT 0x3A62A0
+
+#define mmNIF_RTR_CTRL_2_RL_SRAM_RST 0x3A62A4
+
+#define mmNIF_RTR_CTRL_2_RL_SRAM_TIMEOUT 0x3A62AC
+
+#define mmNIF_RTR_CTRL_2_RL_SRAM_RED 0x3A62B4
+
+#define mmNIF_RTR_CTRL_2_E2E_HBM_EN 0x3A62EC
+
+#define mmNIF_RTR_CTRL_2_E2E_PCI_EN 0x3A62F0
+
+#define mmNIF_RTR_CTRL_2_E2E_HBM_WR_SIZE 0x3A62F4
+
+#define mmNIF_RTR_CTRL_2_E2E_PCI_WR_SIZE 0x3A62F8
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET_EN 0x3A6404
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET 0x3A6408
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_WRAP 0x3A640C
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_PCI_CTR_CNT 0x3A6410
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET_EN 0x3A6414
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET 0x3A6418
+
+#define mmNIF_RTR_CTRL_2_E2E_HBM_RD_SIZE 0x3A641C
+
+#define mmNIF_RTR_CTRL_2_E2E_PCI_RD_SIZE 0x3A6420
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET_EN 0x3A6424
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET 0x3A6428
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_WRAP 0x3A642C
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_PCI_CTR_CNT 0x3A6430
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET_EN 0x3A6434
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET 0x3A6438
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_SEL_0 0x3A6450
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_SEL_1 0x3A6454
+
+#define mmNIF_RTR_CTRL_2_NON_LIN_EN 0x3A6480
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_0 0x3A6500
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_1 0x3A6504
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_2 0x3A6508
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_3 0x3A650C
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_BANK_4 0x3A6510
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_0 0x3A6514
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_1 0x3A6520
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_2 0x3A6524
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_3 0x3A6528
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_4 0x3A652C
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_5 0x3A6530
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_6 0x3A6534
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_7 0x3A6538
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_8 0x3A653C
+
+#define mmNIF_RTR_CTRL_2_NL_SRAM_OFFSET_9 0x3A6540
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_0 0x3A6550
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_1 0x3A6554
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_2 0x3A6558
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_3 0x3A655C
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_4 0x3A6560
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_5 0x3A6564
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_6 0x3A6568
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_7 0x3A656C
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_8 0x3A6570
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_9 0x3A6574
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_10 0x3A6578
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_11 0x3A657C
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_12 0x3A6580
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_13 0x3A6584
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_14 0x3A6588
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_15 0x3A658C
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_16 0x3A6590
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_17 0x3A6594
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_18 0x3A6598
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0 0x3A65E4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_1 0x3A65E8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_2 0x3A65EC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_3 0x3A65F0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_4 0x3A65F4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_5 0x3A65F8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_6 0x3A65FC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_7 0x3A6600
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_8 0x3A6604
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_9 0x3A6608
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_10 0x3A660C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_11 0x3A6610
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_12 0x3A6614
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_13 0x3A6618
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_14 0x3A661C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_15 0x3A6620
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0 0x3A6624
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_1 0x3A6628
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_2 0x3A662C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_3 0x3A6630
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_4 0x3A6634
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_5 0x3A6638
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_6 0x3A663C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_7 0x3A6640
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_8 0x3A6644
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_9 0x3A6648
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_10 0x3A664C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_11 0x3A6650
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_12 0x3A6654
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_13 0x3A6658
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_14 0x3A665C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_15 0x3A6660
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0 0x3A6664
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_1 0x3A6668
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_2 0x3A666C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_3 0x3A6670
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_4 0x3A6674
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_5 0x3A6678
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_6 0x3A667C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_7 0x3A6680
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_8 0x3A6684
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_9 0x3A6688
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_10 0x3A668C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_11 0x3A6690
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_12 0x3A6694
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_13 0x3A6698
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_14 0x3A669C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_15 0x3A66A0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0 0x3A66A4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_1 0x3A66A8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_2 0x3A66AC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_3 0x3A66B0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_4 0x3A66B4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_5 0x3A66B8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_6 0x3A66BC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_7 0x3A66C0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_8 0x3A66C4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_9 0x3A66C8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_10 0x3A66CC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_11 0x3A66D0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_12 0x3A66D4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_13 0x3A66D8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_14 0x3A66DC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_15 0x3A66E0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_0 0x3A66E4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_1 0x3A66E8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_2 0x3A66EC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_3 0x3A66F0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_4 0x3A66F4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_5 0x3A66F8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_6 0x3A66FC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_7 0x3A6700
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_8 0x3A6704
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_9 0x3A6708
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_10 0x3A670C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_11 0x3A6710
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_12 0x3A6714
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_13 0x3A6718
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_14 0x3A671C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_15 0x3A6720
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_0 0x3A6724
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_1 0x3A6728
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_2 0x3A672C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_3 0x3A6730
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_4 0x3A6734
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_5 0x3A6738
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_6 0x3A673C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_7 0x3A6740
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_8 0x3A6744
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_9 0x3A6748
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_10 0x3A674C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_11 0x3A6750
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_12 0x3A6754
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_13 0x3A6758
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_14 0x3A675C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_15 0x3A6760
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_0 0x3A6764
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_1 0x3A6768
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_2 0x3A676C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_3 0x3A6770
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_4 0x3A6774
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_5 0x3A6778
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_6 0x3A677C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_7 0x3A6780
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_8 0x3A6784
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_9 0x3A6788
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_10 0x3A678C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_11 0x3A6790
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_12 0x3A6794
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_13 0x3A6798
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_14 0x3A679C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_15 0x3A67A0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_0 0x3A67A4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_1 0x3A67A8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_2 0x3A67AC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_3 0x3A67B0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_4 0x3A67B4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_5 0x3A67B8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_6 0x3A67BC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_7 0x3A67C0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_8 0x3A67C4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_9 0x3A67C8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_10 0x3A67CC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_11 0x3A67D0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_12 0x3A67D4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_13 0x3A67D8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_14 0x3A67DC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_15 0x3A67E0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0 0x3A6824
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_1 0x3A6828
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_2 0x3A682C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_3 0x3A6830
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_4 0x3A6834
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_5 0x3A6838
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_6 0x3A683C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_7 0x3A6840
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_8 0x3A6844
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_9 0x3A6848
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_10 0x3A684C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_11 0x3A6850
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_12 0x3A6854
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_13 0x3A6858
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_14 0x3A685C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_15 0x3A6860
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0 0x3A6864
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_1 0x3A6868
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_2 0x3A686C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_3 0x3A6870
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_4 0x3A6874
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_5 0x3A6878
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_6 0x3A687C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_7 0x3A6880
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_8 0x3A6884
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_9 0x3A6888
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_10 0x3A688C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_11 0x3A6890
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_12 0x3A6894
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_13 0x3A6898
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_14 0x3A689C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_15 0x3A68A0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0 0x3A68A4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_1 0x3A68A8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_2 0x3A68AC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_3 0x3A68B0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_4 0x3A68B4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_5 0x3A68B8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_6 0x3A68BC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_7 0x3A68C0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_8 0x3A68C4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_9 0x3A68C8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_10 0x3A68CC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_11 0x3A68D0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_12 0x3A68D4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_13 0x3A68D8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_14 0x3A68DC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_15 0x3A68E0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0 0x3A68E4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_1 0x3A68E8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_2 0x3A68EC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_3 0x3A68F0
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_4 0x3A68F4
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_5 0x3A68F8
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_6 0x3A68FC
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_7 0x3A6900
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_8 0x3A6904
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_9 0x3A6908
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_10 0x3A690C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_11 0x3A6910
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_12 0x3A6914
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_13 0x3A6918
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_14 0x3A691C
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_15 0x3A6920
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_0 0x3A6924
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_1 0x3A6928
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_2 0x3A692C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_3 0x3A6930
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_4 0x3A6934
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_5 0x3A6938
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_6 0x3A693C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_7 0x3A6940
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_8 0x3A6944
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_9 0x3A6948
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_10 0x3A694C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_11 0x3A6950
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_12 0x3A6954
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_13 0x3A6958
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_14 0x3A695C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_15 0x3A6960
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_0 0x3A6964
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_1 0x3A6968
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_2 0x3A696C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_3 0x3A6970
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_4 0x3A6974
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_5 0x3A6978
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_6 0x3A697C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_7 0x3A6980
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_8 0x3A6984
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_9 0x3A6988
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_10 0x3A698C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_11 0x3A6990
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_12 0x3A6994
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_13 0x3A6998
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_14 0x3A699C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_15 0x3A69A0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_0 0x3A69A4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_1 0x3A69A8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_2 0x3A69AC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_3 0x3A69B0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_4 0x3A69B4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_5 0x3A69B8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_6 0x3A69BC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_7 0x3A69C0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_8 0x3A69C4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_9 0x3A69C8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_10 0x3A69CC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_11 0x3A69D0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_12 0x3A69D4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_13 0x3A69D8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_14 0x3A69DC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_15 0x3A69E0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_0 0x3A69E4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_1 0x3A69E8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_2 0x3A69EC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_3 0x3A69F0
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_4 0x3A69F4
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_5 0x3A69F8
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_6 0x3A69FC
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_7 0x3A6A00
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_8 0x3A6A04
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_9 0x3A6A08
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_10 0x3A6A0C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_11 0x3A6A10
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_12 0x3A6A14
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_13 0x3A6A18
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_14 0x3A6A1C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_15 0x3A6A20
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AW 0x3A6A64
+
+#define mmNIF_RTR_CTRL_2_RANGE_SEC_HIT_AR 0x3A6A68
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_HIT_AW 0x3A6A6C
+
+#define mmNIF_RTR_CTRL_2_RANGE_PRIV_HIT_AR 0x3A6A70
+
+#define mmNIF_RTR_CTRL_2_RGL_CFG 0x3A6B64
+
+#define mmNIF_RTR_CTRL_2_RGL_SHIFT 0x3A6B68
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_0 0x3A6B6C
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_1 0x3A6B70
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_2 0x3A6B74
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_3 0x3A6B78
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_4 0x3A6B7C
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_5 0x3A6B80
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_6 0x3A6B84
+
+#define mmNIF_RTR_CTRL_2_RGL_EXPECTED_LAT_7 0x3A6B88
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_0 0x3A6BAC
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_1 0x3A6BB0
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_2 0x3A6BB4
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_3 0x3A6BB8
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_4 0x3A6BBC
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_5 0x3A6BC0
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_6 0x3A6BC4
+
+#define mmNIF_RTR_CTRL_2_RGL_TOKEN_7 0x3A6BC8
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_0 0x3A6BEC
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_1 0x3A6BF0
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_2 0x3A6BF4
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_3 0x3A6BF8
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_4 0x3A6BFC
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_5 0x3A6C00
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_6 0x3A6C04
+
+#define mmNIF_RTR_CTRL_2_RGL_BANK_ID_7 0x3A6C08
+
+#define mmNIF_RTR_CTRL_2_RGL_WDT 0x3A6C2C
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_WRAP 0x3A6C30
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_WRAP 0x3A6C34
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_WRAP 0x3A6C38
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_WRAP 0x3A6C3C
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_WRAP 0x3A6C40
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_WRAP 0x3A6C44
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_WRAP 0x3A6C48
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_WRAP 0x3A6C4C
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_CNT 0x3A6C50
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_CNT 0x3A6C54
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_CNT 0x3A6C58
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_CNT 0x3A6C5C
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_CNT 0x3A6C60
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_CNT 0x3A6C64
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_CNT 0x3A6C68
+
+#define mmNIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_CNT 0x3A6C6C
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_WRAP 0x3A6C70
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_WRAP 0x3A6C74
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_WRAP 0x3A6C78
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_WRAP 0x3A6C7C
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_WRAP 0x3A6C80
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_WRAP 0x3A6C84
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_WRAP 0x3A6C88
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_WRAP 0x3A6C8C
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_CNT 0x3A6C90
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_CNT 0x3A6C94
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_CNT 0x3A6C98
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_CNT 0x3A6C9C
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_CNT 0x3A6CA0
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_CNT 0x3A6CA4
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_CNT 0x3A6CA8
+
+#define mmNIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_CNT 0x3A6CAC
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_0 0x3A6CB0
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_1 0x3A6CB4
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_2 0x3A6CB8
+
+#define mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_3 0x3A6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_2_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h
new file mode 100644
index 000000000000..34fd47685edd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_3_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_3_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_3 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_3_PERM_SEL 0x3B6108
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_0 0x3B6114
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_1 0x3B6118
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_2 0x3B611C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_3 0x3B6120
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_4 0x3B6124
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_5 0x3B6128
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_6 0x3B612C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_7 0x3B6130
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_8 0x3B6134
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_9 0x3B6138
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_10 0x3B613C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_11 0x3B6140
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_12 0x3B6144
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_13 0x3B6148
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_14 0x3B614C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_15 0x3B6150
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_16 0x3B6154
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_17 0x3B6158
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_18 0x3B615C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_19 0x3B6160
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_20 0x3B6164
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_21 0x3B6168
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_22 0x3B616C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_23 0x3B6170
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_24 0x3B6174
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_25 0x3B6178
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_26 0x3B617C
+
+#define mmNIF_RTR_CTRL_3_HBM_POLY_H3_27 0x3B6180
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_0 0x3B6184
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_1 0x3B6188
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_2 0x3B618C
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_3 0x3B6190
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_4 0x3B6194
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_5 0x3B6198
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_6 0x3B619C
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_7 0x3B61A0
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_8 0x3B61A4
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_9 0x3B61A8
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_10 0x3B61AC
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_11 0x3B61B0
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_12 0x3B61B4
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_13 0x3B61B8
+
+#define mmNIF_RTR_CTRL_3_SRAM_POLY_H3_14 0x3B61BC
+
+#define mmNIF_RTR_CTRL_3_SCRAM_SRAM_EN 0x3B626C
+
+#define mmNIF_RTR_CTRL_3_RL_HBM_EN 0x3B6274
+
+#define mmNIF_RTR_CTRL_3_RL_HBM_SAT 0x3B6278
+
+#define mmNIF_RTR_CTRL_3_RL_HBM_RST 0x3B627C
+
+#define mmNIF_RTR_CTRL_3_RL_HBM_TIMEOUT 0x3B6280
+
+#define mmNIF_RTR_CTRL_3_SCRAM_HBM_EN 0x3B6284
+
+#define mmNIF_RTR_CTRL_3_RL_PCI_EN 0x3B6288
+
+#define mmNIF_RTR_CTRL_3_RL_PCI_SAT 0x3B628C
+
+#define mmNIF_RTR_CTRL_3_RL_PCI_RST 0x3B6290
+
+#define mmNIF_RTR_CTRL_3_RL_PCI_TIMEOUT 0x3B6294
+
+#define mmNIF_RTR_CTRL_3_RL_SRAM_EN 0x3B629C
+
+#define mmNIF_RTR_CTRL_3_RL_SRAM_SAT 0x3B62A0
+
+#define mmNIF_RTR_CTRL_3_RL_SRAM_RST 0x3B62A4
+
+#define mmNIF_RTR_CTRL_3_RL_SRAM_TIMEOUT 0x3B62AC
+
+#define mmNIF_RTR_CTRL_3_RL_SRAM_RED 0x3B62B4
+
+#define mmNIF_RTR_CTRL_3_E2E_HBM_EN 0x3B62EC
+
+#define mmNIF_RTR_CTRL_3_E2E_PCI_EN 0x3B62F0
+
+#define mmNIF_RTR_CTRL_3_E2E_HBM_WR_SIZE 0x3B62F4
+
+#define mmNIF_RTR_CTRL_3_E2E_PCI_WR_SIZE 0x3B62F8
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET_EN 0x3B6404
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET 0x3B6408
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_WRAP 0x3B640C
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_PCI_CTR_CNT 0x3B6410
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET_EN 0x3B6414
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET 0x3B6418
+
+#define mmNIF_RTR_CTRL_3_E2E_HBM_RD_SIZE 0x3B641C
+
+#define mmNIF_RTR_CTRL_3_E2E_PCI_RD_SIZE 0x3B6420
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET_EN 0x3B6424
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET 0x3B6428
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_WRAP 0x3B642C
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_PCI_CTR_CNT 0x3B6430
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET_EN 0x3B6434
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET 0x3B6438
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_SEL_0 0x3B6450
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_SEL_1 0x3B6454
+
+#define mmNIF_RTR_CTRL_3_NON_LIN_EN 0x3B6480
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_0 0x3B6500
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_1 0x3B6504
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_2 0x3B6508
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_3 0x3B650C
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_BANK_4 0x3B6510
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_0 0x3B6514
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_1 0x3B6520
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_2 0x3B6524
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_3 0x3B6528
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_4 0x3B652C
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_5 0x3B6530
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_6 0x3B6534
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_7 0x3B6538
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_8 0x3B653C
+
+#define mmNIF_RTR_CTRL_3_NL_SRAM_OFFSET_9 0x3B6540
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_0 0x3B6550
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_1 0x3B6554
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_2 0x3B6558
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_3 0x3B655C
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_4 0x3B6560
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_5 0x3B6564
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_6 0x3B6568
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_7 0x3B656C
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_8 0x3B6570
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_9 0x3B6574
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_10 0x3B6578
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_11 0x3B657C
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_12 0x3B6580
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_13 0x3B6584
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_14 0x3B6588
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_15 0x3B658C
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_16 0x3B6590
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_17 0x3B6594
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_18 0x3B6598
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0 0x3B65E4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_1 0x3B65E8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_2 0x3B65EC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_3 0x3B65F0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_4 0x3B65F4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_5 0x3B65F8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_6 0x3B65FC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_7 0x3B6600
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_8 0x3B6604
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_9 0x3B6608
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_10 0x3B660C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_11 0x3B6610
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_12 0x3B6614
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_13 0x3B6618
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_14 0x3B661C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_15 0x3B6620
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0 0x3B6624
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_1 0x3B6628
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_2 0x3B662C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_3 0x3B6630
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_4 0x3B6634
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_5 0x3B6638
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_6 0x3B663C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_7 0x3B6640
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_8 0x3B6644
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_9 0x3B6648
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_10 0x3B664C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_11 0x3B6650
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_12 0x3B6654
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_13 0x3B6658
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_14 0x3B665C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_15 0x3B6660
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0 0x3B6664
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_1 0x3B6668
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_2 0x3B666C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_3 0x3B6670
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_4 0x3B6674
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_5 0x3B6678
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_6 0x3B667C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_7 0x3B6680
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_8 0x3B6684
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_9 0x3B6688
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_10 0x3B668C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_11 0x3B6690
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_12 0x3B6694
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_13 0x3B6698
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_14 0x3B669C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_15 0x3B66A0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0 0x3B66A4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_1 0x3B66A8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_2 0x3B66AC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_3 0x3B66B0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_4 0x3B66B4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_5 0x3B66B8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_6 0x3B66BC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_7 0x3B66C0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_8 0x3B66C4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_9 0x3B66C8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_10 0x3B66CC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_11 0x3B66D0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_12 0x3B66D4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_13 0x3B66D8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_14 0x3B66DC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_15 0x3B66E0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_0 0x3B66E4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_1 0x3B66E8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_2 0x3B66EC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_3 0x3B66F0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_4 0x3B66F4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_5 0x3B66F8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_6 0x3B66FC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_7 0x3B6700
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_8 0x3B6704
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_9 0x3B6708
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_10 0x3B670C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_11 0x3B6710
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_12 0x3B6714
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_13 0x3B6718
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_14 0x3B671C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_15 0x3B6720
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_0 0x3B6724
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_1 0x3B6728
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_2 0x3B672C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_3 0x3B6730
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_4 0x3B6734
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_5 0x3B6738
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_6 0x3B673C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_7 0x3B6740
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_8 0x3B6744
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_9 0x3B6748
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_10 0x3B674C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_11 0x3B6750
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_12 0x3B6754
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_13 0x3B6758
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_14 0x3B675C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_15 0x3B6760
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_0 0x3B6764
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_1 0x3B6768
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_2 0x3B676C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_3 0x3B6770
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_4 0x3B6774
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_5 0x3B6778
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_6 0x3B677C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_7 0x3B6780
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_8 0x3B6784
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_9 0x3B6788
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_10 0x3B678C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_11 0x3B6790
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_12 0x3B6794
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_13 0x3B6798
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_14 0x3B679C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_15 0x3B67A0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_0 0x3B67A4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_1 0x3B67A8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_2 0x3B67AC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_3 0x3B67B0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_4 0x3B67B4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_5 0x3B67B8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_6 0x3B67BC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_7 0x3B67C0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_8 0x3B67C4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_9 0x3B67C8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_10 0x3B67CC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_11 0x3B67D0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_12 0x3B67D4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_13 0x3B67D8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_14 0x3B67DC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_15 0x3B67E0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0 0x3B6824
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_1 0x3B6828
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_2 0x3B682C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_3 0x3B6830
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_4 0x3B6834
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_5 0x3B6838
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_6 0x3B683C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_7 0x3B6840
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_8 0x3B6844
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_9 0x3B6848
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_10 0x3B684C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_11 0x3B6850
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_12 0x3B6854
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_13 0x3B6858
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_14 0x3B685C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_15 0x3B6860
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0 0x3B6864
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_1 0x3B6868
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_2 0x3B686C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_3 0x3B6870
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_4 0x3B6874
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_5 0x3B6878
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_6 0x3B687C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_7 0x3B6880
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_8 0x3B6884
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_9 0x3B6888
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_10 0x3B688C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_11 0x3B6890
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_12 0x3B6894
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_13 0x3B6898
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_14 0x3B689C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_15 0x3B68A0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0 0x3B68A4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_1 0x3B68A8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_2 0x3B68AC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_3 0x3B68B0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_4 0x3B68B4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_5 0x3B68B8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_6 0x3B68BC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_7 0x3B68C0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_8 0x3B68C4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_9 0x3B68C8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_10 0x3B68CC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_11 0x3B68D0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_12 0x3B68D4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_13 0x3B68D8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_14 0x3B68DC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_15 0x3B68E0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0 0x3B68E4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_1 0x3B68E8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_2 0x3B68EC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_3 0x3B68F0
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_4 0x3B68F4
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_5 0x3B68F8
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_6 0x3B68FC
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_7 0x3B6900
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_8 0x3B6904
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_9 0x3B6908
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_10 0x3B690C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_11 0x3B6910
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_12 0x3B6914
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_13 0x3B6918
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_14 0x3B691C
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_15 0x3B6920
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_0 0x3B6924
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_1 0x3B6928
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_2 0x3B692C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_3 0x3B6930
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_4 0x3B6934
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_5 0x3B6938
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_6 0x3B693C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_7 0x3B6940
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_8 0x3B6944
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_9 0x3B6948
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_10 0x3B694C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_11 0x3B6950
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_12 0x3B6954
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_13 0x3B6958
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_14 0x3B695C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_15 0x3B6960
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_0 0x3B6964
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_1 0x3B6968
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_2 0x3B696C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_3 0x3B6970
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_4 0x3B6974
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_5 0x3B6978
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_6 0x3B697C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_7 0x3B6980
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_8 0x3B6984
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_9 0x3B6988
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_10 0x3B698C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_11 0x3B6990
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_12 0x3B6994
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_13 0x3B6998
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_14 0x3B699C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_15 0x3B69A0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_0 0x3B69A4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_1 0x3B69A8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_2 0x3B69AC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_3 0x3B69B0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_4 0x3B69B4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_5 0x3B69B8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_6 0x3B69BC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_7 0x3B69C0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_8 0x3B69C4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_9 0x3B69C8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_10 0x3B69CC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_11 0x3B69D0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_12 0x3B69D4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_13 0x3B69D8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_14 0x3B69DC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_15 0x3B69E0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_0 0x3B69E4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_1 0x3B69E8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_2 0x3B69EC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_3 0x3B69F0
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_4 0x3B69F4
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_5 0x3B69F8
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_6 0x3B69FC
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_7 0x3B6A00
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_8 0x3B6A04
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_9 0x3B6A08
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_10 0x3B6A0C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_11 0x3B6A10
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_12 0x3B6A14
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_13 0x3B6A18
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_14 0x3B6A1C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_15 0x3B6A20
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AW 0x3B6A64
+
+#define mmNIF_RTR_CTRL_3_RANGE_SEC_HIT_AR 0x3B6A68
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_HIT_AW 0x3B6A6C
+
+#define mmNIF_RTR_CTRL_3_RANGE_PRIV_HIT_AR 0x3B6A70
+
+#define mmNIF_RTR_CTRL_3_RGL_CFG 0x3B6B64
+
+#define mmNIF_RTR_CTRL_3_RGL_SHIFT 0x3B6B68
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_0 0x3B6B6C
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_1 0x3B6B70
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_2 0x3B6B74
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_3 0x3B6B78
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_4 0x3B6B7C
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_5 0x3B6B80
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_6 0x3B6B84
+
+#define mmNIF_RTR_CTRL_3_RGL_EXPECTED_LAT_7 0x3B6B88
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_0 0x3B6BAC
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_1 0x3B6BB0
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_2 0x3B6BB4
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_3 0x3B6BB8
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_4 0x3B6BBC
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_5 0x3B6BC0
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_6 0x3B6BC4
+
+#define mmNIF_RTR_CTRL_3_RGL_TOKEN_7 0x3B6BC8
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_0 0x3B6BEC
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_1 0x3B6BF0
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_2 0x3B6BF4
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_3 0x3B6BF8
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_4 0x3B6BFC
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_5 0x3B6C00
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_6 0x3B6C04
+
+#define mmNIF_RTR_CTRL_3_RGL_BANK_ID_7 0x3B6C08
+
+#define mmNIF_RTR_CTRL_3_RGL_WDT 0x3B6C2C
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_WRAP 0x3B6C30
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_WRAP 0x3B6C34
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_WRAP 0x3B6C38
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_WRAP 0x3B6C3C
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_WRAP 0x3B6C40
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_WRAP 0x3B6C44
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_WRAP 0x3B6C48
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_WRAP 0x3B6C4C
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_CNT 0x3B6C50
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_CNT 0x3B6C54
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_CNT 0x3B6C58
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_CNT 0x3B6C5C
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_CNT 0x3B6C60
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_CNT 0x3B6C64
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_CNT 0x3B6C68
+
+#define mmNIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_CNT 0x3B6C6C
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_WRAP 0x3B6C70
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_WRAP 0x3B6C74
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_WRAP 0x3B6C78
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_WRAP 0x3B6C7C
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_WRAP 0x3B6C80
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_WRAP 0x3B6C84
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_WRAP 0x3B6C88
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_WRAP 0x3B6C8C
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_CNT 0x3B6C90
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_CNT 0x3B6C94
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_CNT 0x3B6C98
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_CNT 0x3B6C9C
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_CNT 0x3B6CA0
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_CNT 0x3B6CA4
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_CNT 0x3B6CA8
+
+#define mmNIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_CNT 0x3B6CAC
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_0 0x3B6CB0
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_1 0x3B6CB4
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_2 0x3B6CB8
+
+#define mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_3 0x3B6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_3_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h
new file mode 100644
index 000000000000..543a98f81767
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_4_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_4_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_4 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_4_PERM_SEL 0x3C6108
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_0 0x3C6114
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_1 0x3C6118
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_2 0x3C611C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_3 0x3C6120
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_4 0x3C6124
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_5 0x3C6128
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_6 0x3C612C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_7 0x3C6130
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_8 0x3C6134
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_9 0x3C6138
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_10 0x3C613C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_11 0x3C6140
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_12 0x3C6144
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_13 0x3C6148
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_14 0x3C614C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_15 0x3C6150
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_16 0x3C6154
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_17 0x3C6158
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_18 0x3C615C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_19 0x3C6160
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_20 0x3C6164
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_21 0x3C6168
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_22 0x3C616C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_23 0x3C6170
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_24 0x3C6174
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_25 0x3C6178
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_26 0x3C617C
+
+#define mmNIF_RTR_CTRL_4_HBM_POLY_H3_27 0x3C6180
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_0 0x3C6184
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_1 0x3C6188
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_2 0x3C618C
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_3 0x3C6190
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_4 0x3C6194
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_5 0x3C6198
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_6 0x3C619C
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_7 0x3C61A0
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_8 0x3C61A4
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_9 0x3C61A8
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_10 0x3C61AC
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_11 0x3C61B0
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_12 0x3C61B4
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_13 0x3C61B8
+
+#define mmNIF_RTR_CTRL_4_SRAM_POLY_H3_14 0x3C61BC
+
+#define mmNIF_RTR_CTRL_4_SCRAM_SRAM_EN 0x3C626C
+
+#define mmNIF_RTR_CTRL_4_RL_HBM_EN 0x3C6274
+
+#define mmNIF_RTR_CTRL_4_RL_HBM_SAT 0x3C6278
+
+#define mmNIF_RTR_CTRL_4_RL_HBM_RST 0x3C627C
+
+#define mmNIF_RTR_CTRL_4_RL_HBM_TIMEOUT 0x3C6280
+
+#define mmNIF_RTR_CTRL_4_SCRAM_HBM_EN 0x3C6284
+
+#define mmNIF_RTR_CTRL_4_RL_PCI_EN 0x3C6288
+
+#define mmNIF_RTR_CTRL_4_RL_PCI_SAT 0x3C628C
+
+#define mmNIF_RTR_CTRL_4_RL_PCI_RST 0x3C6290
+
+#define mmNIF_RTR_CTRL_4_RL_PCI_TIMEOUT 0x3C6294
+
+#define mmNIF_RTR_CTRL_4_RL_SRAM_EN 0x3C629C
+
+#define mmNIF_RTR_CTRL_4_RL_SRAM_SAT 0x3C62A0
+
+#define mmNIF_RTR_CTRL_4_RL_SRAM_RST 0x3C62A4
+
+#define mmNIF_RTR_CTRL_4_RL_SRAM_TIMEOUT 0x3C62AC
+
+#define mmNIF_RTR_CTRL_4_RL_SRAM_RED 0x3C62B4
+
+#define mmNIF_RTR_CTRL_4_E2E_HBM_EN 0x3C62EC
+
+#define mmNIF_RTR_CTRL_4_E2E_PCI_EN 0x3C62F0
+
+#define mmNIF_RTR_CTRL_4_E2E_HBM_WR_SIZE 0x3C62F4
+
+#define mmNIF_RTR_CTRL_4_E2E_PCI_WR_SIZE 0x3C62F8
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET_EN 0x3C6404
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET 0x3C6408
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_WRAP 0x3C640C
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_PCI_CTR_CNT 0x3C6410
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET_EN 0x3C6414
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET 0x3C6418
+
+#define mmNIF_RTR_CTRL_4_E2E_HBM_RD_SIZE 0x3C641C
+
+#define mmNIF_RTR_CTRL_4_E2E_PCI_RD_SIZE 0x3C6420
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET_EN 0x3C6424
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET 0x3C6428
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_WRAP 0x3C642C
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_PCI_CTR_CNT 0x3C6430
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET_EN 0x3C6434
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET 0x3C6438
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_SEL_0 0x3C6450
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_SEL_1 0x3C6454
+
+#define mmNIF_RTR_CTRL_4_NON_LIN_EN 0x3C6480
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_0 0x3C6500
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_1 0x3C6504
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_2 0x3C6508
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_3 0x3C650C
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_BANK_4 0x3C6510
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_0 0x3C6514
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_1 0x3C6520
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_2 0x3C6524
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_3 0x3C6528
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_4 0x3C652C
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_5 0x3C6530
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_6 0x3C6534
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_7 0x3C6538
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_8 0x3C653C
+
+#define mmNIF_RTR_CTRL_4_NL_SRAM_OFFSET_9 0x3C6540
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_0 0x3C6550
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_1 0x3C6554
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_2 0x3C6558
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_3 0x3C655C
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_4 0x3C6560
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_5 0x3C6564
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_6 0x3C6568
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_7 0x3C656C
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_8 0x3C6570
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_9 0x3C6574
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_10 0x3C6578
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_11 0x3C657C
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_12 0x3C6580
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_13 0x3C6584
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_14 0x3C6588
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_15 0x3C658C
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_16 0x3C6590
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_17 0x3C6594
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_18 0x3C6598
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0 0x3C65E4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_1 0x3C65E8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_2 0x3C65EC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_3 0x3C65F0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_4 0x3C65F4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_5 0x3C65F8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_6 0x3C65FC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_7 0x3C6600
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_8 0x3C6604
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_9 0x3C6608
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_10 0x3C660C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_11 0x3C6610
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_12 0x3C6614
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_13 0x3C6618
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_14 0x3C661C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_15 0x3C6620
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0 0x3C6624
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_1 0x3C6628
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_2 0x3C662C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_3 0x3C6630
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_4 0x3C6634
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_5 0x3C6638
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_6 0x3C663C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_7 0x3C6640
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_8 0x3C6644
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_9 0x3C6648
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_10 0x3C664C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_11 0x3C6650
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_12 0x3C6654
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_13 0x3C6658
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_14 0x3C665C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_15 0x3C6660
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0 0x3C6664
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_1 0x3C6668
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_2 0x3C666C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_3 0x3C6670
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_4 0x3C6674
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_5 0x3C6678
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_6 0x3C667C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_7 0x3C6680
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_8 0x3C6684
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_9 0x3C6688
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_10 0x3C668C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_11 0x3C6690
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_12 0x3C6694
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_13 0x3C6698
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_14 0x3C669C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_15 0x3C66A0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0 0x3C66A4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_1 0x3C66A8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_2 0x3C66AC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_3 0x3C66B0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_4 0x3C66B4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_5 0x3C66B8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_6 0x3C66BC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_7 0x3C66C0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_8 0x3C66C4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_9 0x3C66C8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_10 0x3C66CC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_11 0x3C66D0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_12 0x3C66D4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_13 0x3C66D8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_14 0x3C66DC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_15 0x3C66E0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_0 0x3C66E4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_1 0x3C66E8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_2 0x3C66EC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_3 0x3C66F0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_4 0x3C66F4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_5 0x3C66F8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_6 0x3C66FC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_7 0x3C6700
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_8 0x3C6704
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_9 0x3C6708
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_10 0x3C670C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_11 0x3C6710
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_12 0x3C6714
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_13 0x3C6718
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_14 0x3C671C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_15 0x3C6720
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_0 0x3C6724
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_1 0x3C6728
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_2 0x3C672C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_3 0x3C6730
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_4 0x3C6734
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_5 0x3C6738
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_6 0x3C673C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_7 0x3C6740
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_8 0x3C6744
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_9 0x3C6748
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_10 0x3C674C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_11 0x3C6750
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_12 0x3C6754
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_13 0x3C6758
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_14 0x3C675C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_15 0x3C6760
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_0 0x3C6764
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_1 0x3C6768
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_2 0x3C676C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_3 0x3C6770
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_4 0x3C6774
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_5 0x3C6778
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_6 0x3C677C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_7 0x3C6780
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_8 0x3C6784
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_9 0x3C6788
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_10 0x3C678C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_11 0x3C6790
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_12 0x3C6794
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_13 0x3C6798
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_14 0x3C679C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_15 0x3C67A0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_0 0x3C67A4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_1 0x3C67A8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_2 0x3C67AC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_3 0x3C67B0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_4 0x3C67B4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_5 0x3C67B8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_6 0x3C67BC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_7 0x3C67C0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_8 0x3C67C4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_9 0x3C67C8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_10 0x3C67CC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_11 0x3C67D0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_12 0x3C67D4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_13 0x3C67D8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_14 0x3C67DC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_15 0x3C67E0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0 0x3C6824
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_1 0x3C6828
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_2 0x3C682C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_3 0x3C6830
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_4 0x3C6834
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_5 0x3C6838
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_6 0x3C683C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_7 0x3C6840
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_8 0x3C6844
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_9 0x3C6848
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_10 0x3C684C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_11 0x3C6850
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_12 0x3C6854
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_13 0x3C6858
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_14 0x3C685C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_15 0x3C6860
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0 0x3C6864
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_1 0x3C6868
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_2 0x3C686C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_3 0x3C6870
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_4 0x3C6874
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_5 0x3C6878
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_6 0x3C687C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_7 0x3C6880
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_8 0x3C6884
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_9 0x3C6888
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_10 0x3C688C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_11 0x3C6890
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_12 0x3C6894
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_13 0x3C6898
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_14 0x3C689C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_15 0x3C68A0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0 0x3C68A4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_1 0x3C68A8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_2 0x3C68AC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_3 0x3C68B0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_4 0x3C68B4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_5 0x3C68B8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_6 0x3C68BC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_7 0x3C68C0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_8 0x3C68C4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_9 0x3C68C8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_10 0x3C68CC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_11 0x3C68D0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_12 0x3C68D4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_13 0x3C68D8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_14 0x3C68DC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_15 0x3C68E0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0 0x3C68E4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_1 0x3C68E8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_2 0x3C68EC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_3 0x3C68F0
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_4 0x3C68F4
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_5 0x3C68F8
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_6 0x3C68FC
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_7 0x3C6900
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_8 0x3C6904
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_9 0x3C6908
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_10 0x3C690C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_11 0x3C6910
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_12 0x3C6914
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_13 0x3C6918
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_14 0x3C691C
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_15 0x3C6920
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_0 0x3C6924
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_1 0x3C6928
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_2 0x3C692C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_3 0x3C6930
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_4 0x3C6934
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_5 0x3C6938
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_6 0x3C693C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_7 0x3C6940
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_8 0x3C6944
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_9 0x3C6948
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_10 0x3C694C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_11 0x3C6950
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_12 0x3C6954
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_13 0x3C6958
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_14 0x3C695C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_15 0x3C6960
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_0 0x3C6964
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_1 0x3C6968
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_2 0x3C696C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_3 0x3C6970
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_4 0x3C6974
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_5 0x3C6978
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_6 0x3C697C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_7 0x3C6980
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_8 0x3C6984
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_9 0x3C6988
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_10 0x3C698C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_11 0x3C6990
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_12 0x3C6994
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_13 0x3C6998
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_14 0x3C699C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_15 0x3C69A0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_0 0x3C69A4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_1 0x3C69A8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_2 0x3C69AC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_3 0x3C69B0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_4 0x3C69B4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_5 0x3C69B8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_6 0x3C69BC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_7 0x3C69C0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_8 0x3C69C4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_9 0x3C69C8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_10 0x3C69CC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_11 0x3C69D0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_12 0x3C69D4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_13 0x3C69D8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_14 0x3C69DC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_15 0x3C69E0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_0 0x3C69E4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_1 0x3C69E8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_2 0x3C69EC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_3 0x3C69F0
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_4 0x3C69F4
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_5 0x3C69F8
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_6 0x3C69FC
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_7 0x3C6A00
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_8 0x3C6A04
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_9 0x3C6A08
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_10 0x3C6A0C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_11 0x3C6A10
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_12 0x3C6A14
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_13 0x3C6A18
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_14 0x3C6A1C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_15 0x3C6A20
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AW 0x3C6A64
+
+#define mmNIF_RTR_CTRL_4_RANGE_SEC_HIT_AR 0x3C6A68
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_HIT_AW 0x3C6A6C
+
+#define mmNIF_RTR_CTRL_4_RANGE_PRIV_HIT_AR 0x3C6A70
+
+#define mmNIF_RTR_CTRL_4_RGL_CFG 0x3C6B64
+
+#define mmNIF_RTR_CTRL_4_RGL_SHIFT 0x3C6B68
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_0 0x3C6B6C
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_1 0x3C6B70
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_2 0x3C6B74
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_3 0x3C6B78
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_4 0x3C6B7C
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_5 0x3C6B80
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_6 0x3C6B84
+
+#define mmNIF_RTR_CTRL_4_RGL_EXPECTED_LAT_7 0x3C6B88
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_0 0x3C6BAC
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_1 0x3C6BB0
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_2 0x3C6BB4
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_3 0x3C6BB8
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_4 0x3C6BBC
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_5 0x3C6BC0
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_6 0x3C6BC4
+
+#define mmNIF_RTR_CTRL_4_RGL_TOKEN_7 0x3C6BC8
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_0 0x3C6BEC
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_1 0x3C6BF0
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_2 0x3C6BF4
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_3 0x3C6BF8
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_4 0x3C6BFC
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_5 0x3C6C00
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_6 0x3C6C04
+
+#define mmNIF_RTR_CTRL_4_RGL_BANK_ID_7 0x3C6C08
+
+#define mmNIF_RTR_CTRL_4_RGL_WDT 0x3C6C2C
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_WRAP 0x3C6C30
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_WRAP 0x3C6C34
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_WRAP 0x3C6C38
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_WRAP 0x3C6C3C
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_WRAP 0x3C6C40
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_WRAP 0x3C6C44
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_WRAP 0x3C6C48
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_WRAP 0x3C6C4C
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_CNT 0x3C6C50
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_CNT 0x3C6C54
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_CNT 0x3C6C58
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_CNT 0x3C6C5C
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_CNT 0x3C6C60
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_CNT 0x3C6C64
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_CNT 0x3C6C68
+
+#define mmNIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_CNT 0x3C6C6C
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_WRAP 0x3C6C70
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_WRAP 0x3C6C74
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_WRAP 0x3C6C78
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_WRAP 0x3C6C7C
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_WRAP 0x3C6C80
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_WRAP 0x3C6C84
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_WRAP 0x3C6C88
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_WRAP 0x3C6C8C
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_CNT 0x3C6C90
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_CNT 0x3C6C94
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_CNT 0x3C6C98
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_CNT 0x3C6C9C
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_CNT 0x3C6CA0
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_CNT 0x3C6CA4
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_CNT 0x3C6CA8
+
+#define mmNIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_CNT 0x3C6CAC
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_0 0x3C6CB0
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_1 0x3C6CB4
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_2 0x3C6CB8
+
+#define mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_3 0x3C6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_4_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h
new file mode 100644
index 000000000000..95486b7ddf1d
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_5_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_5_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_5 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_5_PERM_SEL 0x3D6108
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_0 0x3D6114
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_1 0x3D6118
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_2 0x3D611C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_3 0x3D6120
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_4 0x3D6124
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_5 0x3D6128
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_6 0x3D612C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_7 0x3D6130
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_8 0x3D6134
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_9 0x3D6138
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_10 0x3D613C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_11 0x3D6140
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_12 0x3D6144
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_13 0x3D6148
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_14 0x3D614C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_15 0x3D6150
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_16 0x3D6154
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_17 0x3D6158
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_18 0x3D615C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_19 0x3D6160
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_20 0x3D6164
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_21 0x3D6168
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_22 0x3D616C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_23 0x3D6170
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_24 0x3D6174
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_25 0x3D6178
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_26 0x3D617C
+
+#define mmNIF_RTR_CTRL_5_HBM_POLY_H3_27 0x3D6180
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_0 0x3D6184
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_1 0x3D6188
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_2 0x3D618C
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_3 0x3D6190
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_4 0x3D6194
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_5 0x3D6198
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_6 0x3D619C
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_7 0x3D61A0
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_8 0x3D61A4
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_9 0x3D61A8
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_10 0x3D61AC
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_11 0x3D61B0
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_12 0x3D61B4
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_13 0x3D61B8
+
+#define mmNIF_RTR_CTRL_5_SRAM_POLY_H3_14 0x3D61BC
+
+#define mmNIF_RTR_CTRL_5_SCRAM_SRAM_EN 0x3D626C
+
+#define mmNIF_RTR_CTRL_5_RL_HBM_EN 0x3D6274
+
+#define mmNIF_RTR_CTRL_5_RL_HBM_SAT 0x3D6278
+
+#define mmNIF_RTR_CTRL_5_RL_HBM_RST 0x3D627C
+
+#define mmNIF_RTR_CTRL_5_RL_HBM_TIMEOUT 0x3D6280
+
+#define mmNIF_RTR_CTRL_5_SCRAM_HBM_EN 0x3D6284
+
+#define mmNIF_RTR_CTRL_5_RL_PCI_EN 0x3D6288
+
+#define mmNIF_RTR_CTRL_5_RL_PCI_SAT 0x3D628C
+
+#define mmNIF_RTR_CTRL_5_RL_PCI_RST 0x3D6290
+
+#define mmNIF_RTR_CTRL_5_RL_PCI_TIMEOUT 0x3D6294
+
+#define mmNIF_RTR_CTRL_5_RL_SRAM_EN 0x3D629C
+
+#define mmNIF_RTR_CTRL_5_RL_SRAM_SAT 0x3D62A0
+
+#define mmNIF_RTR_CTRL_5_RL_SRAM_RST 0x3D62A4
+
+#define mmNIF_RTR_CTRL_5_RL_SRAM_TIMEOUT 0x3D62AC
+
+#define mmNIF_RTR_CTRL_5_RL_SRAM_RED 0x3D62B4
+
+#define mmNIF_RTR_CTRL_5_E2E_HBM_EN 0x3D62EC
+
+#define mmNIF_RTR_CTRL_5_E2E_PCI_EN 0x3D62F0
+
+#define mmNIF_RTR_CTRL_5_E2E_HBM_WR_SIZE 0x3D62F4
+
+#define mmNIF_RTR_CTRL_5_E2E_PCI_WR_SIZE 0x3D62F8
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET_EN 0x3D6404
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET 0x3D6408
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_WRAP 0x3D640C
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_PCI_CTR_CNT 0x3D6410
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET_EN 0x3D6414
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET 0x3D6418
+
+#define mmNIF_RTR_CTRL_5_E2E_HBM_RD_SIZE 0x3D641C
+
+#define mmNIF_RTR_CTRL_5_E2E_PCI_RD_SIZE 0x3D6420
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET_EN 0x3D6424
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET 0x3D6428
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_WRAP 0x3D642C
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_PCI_CTR_CNT 0x3D6430
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET_EN 0x3D6434
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET 0x3D6438
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_SEL_0 0x3D6450
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_SEL_1 0x3D6454
+
+#define mmNIF_RTR_CTRL_5_NON_LIN_EN 0x3D6480
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_0 0x3D6500
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_1 0x3D6504
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_2 0x3D6508
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_3 0x3D650C
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_BANK_4 0x3D6510
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_0 0x3D6514
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_1 0x3D6520
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_2 0x3D6524
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_3 0x3D6528
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_4 0x3D652C
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_5 0x3D6530
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_6 0x3D6534
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_7 0x3D6538
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_8 0x3D653C
+
+#define mmNIF_RTR_CTRL_5_NL_SRAM_OFFSET_9 0x3D6540
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_0 0x3D6550
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_1 0x3D6554
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_2 0x3D6558
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_3 0x3D655C
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_4 0x3D6560
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_5 0x3D6564
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_6 0x3D6568
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_7 0x3D656C
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_8 0x3D6570
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_9 0x3D6574
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_10 0x3D6578
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_11 0x3D657C
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_12 0x3D6580
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_13 0x3D6584
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_14 0x3D6588
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_15 0x3D658C
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_16 0x3D6590
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_17 0x3D6594
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_18 0x3D6598
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0 0x3D65E4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_1 0x3D65E8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_2 0x3D65EC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_3 0x3D65F0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_4 0x3D65F4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_5 0x3D65F8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_6 0x3D65FC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_7 0x3D6600
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_8 0x3D6604
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_9 0x3D6608
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_10 0x3D660C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_11 0x3D6610
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_12 0x3D6614
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_13 0x3D6618
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_14 0x3D661C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_15 0x3D6620
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0 0x3D6624
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_1 0x3D6628
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_2 0x3D662C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_3 0x3D6630
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_4 0x3D6634
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_5 0x3D6638
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_6 0x3D663C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_7 0x3D6640
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_8 0x3D6644
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_9 0x3D6648
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_10 0x3D664C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_11 0x3D6650
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_12 0x3D6654
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_13 0x3D6658
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_14 0x3D665C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_15 0x3D6660
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0 0x3D6664
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_1 0x3D6668
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_2 0x3D666C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_3 0x3D6670
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_4 0x3D6674
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_5 0x3D6678
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_6 0x3D667C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_7 0x3D6680
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_8 0x3D6684
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_9 0x3D6688
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_10 0x3D668C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_11 0x3D6690
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_12 0x3D6694
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_13 0x3D6698
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_14 0x3D669C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_15 0x3D66A0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0 0x3D66A4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_1 0x3D66A8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_2 0x3D66AC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_3 0x3D66B0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_4 0x3D66B4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_5 0x3D66B8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_6 0x3D66BC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_7 0x3D66C0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_8 0x3D66C4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_9 0x3D66C8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_10 0x3D66CC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_11 0x3D66D0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_12 0x3D66D4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_13 0x3D66D8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_14 0x3D66DC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_15 0x3D66E0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_0 0x3D66E4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_1 0x3D66E8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_2 0x3D66EC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_3 0x3D66F0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_4 0x3D66F4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_5 0x3D66F8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_6 0x3D66FC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_7 0x3D6700
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_8 0x3D6704
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_9 0x3D6708
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_10 0x3D670C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_11 0x3D6710
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_12 0x3D6714
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_13 0x3D6718
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_14 0x3D671C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_15 0x3D6720
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_0 0x3D6724
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_1 0x3D6728
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_2 0x3D672C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_3 0x3D6730
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_4 0x3D6734
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_5 0x3D6738
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_6 0x3D673C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_7 0x3D6740
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_8 0x3D6744
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_9 0x3D6748
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_10 0x3D674C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_11 0x3D6750
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_12 0x3D6754
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_13 0x3D6758
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_14 0x3D675C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_15 0x3D6760
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_0 0x3D6764
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_1 0x3D6768
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_2 0x3D676C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_3 0x3D6770
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_4 0x3D6774
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_5 0x3D6778
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_6 0x3D677C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_7 0x3D6780
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_8 0x3D6784
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_9 0x3D6788
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_10 0x3D678C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_11 0x3D6790
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_12 0x3D6794
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_13 0x3D6798
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_14 0x3D679C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_15 0x3D67A0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_0 0x3D67A4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_1 0x3D67A8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_2 0x3D67AC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_3 0x3D67B0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_4 0x3D67B4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_5 0x3D67B8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_6 0x3D67BC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_7 0x3D67C0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_8 0x3D67C4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_9 0x3D67C8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_10 0x3D67CC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_11 0x3D67D0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_12 0x3D67D4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_13 0x3D67D8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_14 0x3D67DC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_15 0x3D67E0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0 0x3D6824
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_1 0x3D6828
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_2 0x3D682C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_3 0x3D6830
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_4 0x3D6834
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_5 0x3D6838
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_6 0x3D683C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_7 0x3D6840
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_8 0x3D6844
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_9 0x3D6848
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_10 0x3D684C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_11 0x3D6850
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_12 0x3D6854
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_13 0x3D6858
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_14 0x3D685C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_15 0x3D6860
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0 0x3D6864
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_1 0x3D6868
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_2 0x3D686C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_3 0x3D6870
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_4 0x3D6874
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_5 0x3D6878
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_6 0x3D687C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_7 0x3D6880
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_8 0x3D6884
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_9 0x3D6888
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_10 0x3D688C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_11 0x3D6890
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_12 0x3D6894
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_13 0x3D6898
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_14 0x3D689C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_15 0x3D68A0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0 0x3D68A4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_1 0x3D68A8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_2 0x3D68AC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_3 0x3D68B0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_4 0x3D68B4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_5 0x3D68B8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_6 0x3D68BC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_7 0x3D68C0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_8 0x3D68C4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_9 0x3D68C8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_10 0x3D68CC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_11 0x3D68D0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_12 0x3D68D4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_13 0x3D68D8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_14 0x3D68DC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_15 0x3D68E0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0 0x3D68E4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_1 0x3D68E8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_2 0x3D68EC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_3 0x3D68F0
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_4 0x3D68F4
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_5 0x3D68F8
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_6 0x3D68FC
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_7 0x3D6900
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_8 0x3D6904
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_9 0x3D6908
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_10 0x3D690C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_11 0x3D6910
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_12 0x3D6914
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_13 0x3D6918
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_14 0x3D691C
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_15 0x3D6920
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_0 0x3D6924
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_1 0x3D6928
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_2 0x3D692C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_3 0x3D6930
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_4 0x3D6934
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_5 0x3D6938
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_6 0x3D693C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_7 0x3D6940
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_8 0x3D6944
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_9 0x3D6948
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_10 0x3D694C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_11 0x3D6950
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_12 0x3D6954
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_13 0x3D6958
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_14 0x3D695C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_15 0x3D6960
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_0 0x3D6964
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_1 0x3D6968
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_2 0x3D696C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_3 0x3D6970
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_4 0x3D6974
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_5 0x3D6978
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_6 0x3D697C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_7 0x3D6980
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_8 0x3D6984
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_9 0x3D6988
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_10 0x3D698C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_11 0x3D6990
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_12 0x3D6994
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_13 0x3D6998
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_14 0x3D699C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_15 0x3D69A0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_0 0x3D69A4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_1 0x3D69A8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_2 0x3D69AC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_3 0x3D69B0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_4 0x3D69B4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_5 0x3D69B8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_6 0x3D69BC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_7 0x3D69C0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_8 0x3D69C4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_9 0x3D69C8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_10 0x3D69CC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_11 0x3D69D0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_12 0x3D69D4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_13 0x3D69D8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_14 0x3D69DC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_15 0x3D69E0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_0 0x3D69E4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_1 0x3D69E8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_2 0x3D69EC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_3 0x3D69F0
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_4 0x3D69F4
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_5 0x3D69F8
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_6 0x3D69FC
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_7 0x3D6A00
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_8 0x3D6A04
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_9 0x3D6A08
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_10 0x3D6A0C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_11 0x3D6A10
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_12 0x3D6A14
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_13 0x3D6A18
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_14 0x3D6A1C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_15 0x3D6A20
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AW 0x3D6A64
+
+#define mmNIF_RTR_CTRL_5_RANGE_SEC_HIT_AR 0x3D6A68
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_HIT_AW 0x3D6A6C
+
+#define mmNIF_RTR_CTRL_5_RANGE_PRIV_HIT_AR 0x3D6A70
+
+#define mmNIF_RTR_CTRL_5_RGL_CFG 0x3D6B64
+
+#define mmNIF_RTR_CTRL_5_RGL_SHIFT 0x3D6B68
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_0 0x3D6B6C
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_1 0x3D6B70
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_2 0x3D6B74
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_3 0x3D6B78
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_4 0x3D6B7C
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_5 0x3D6B80
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_6 0x3D6B84
+
+#define mmNIF_RTR_CTRL_5_RGL_EXPECTED_LAT_7 0x3D6B88
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_0 0x3D6BAC
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_1 0x3D6BB0
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_2 0x3D6BB4
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_3 0x3D6BB8
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_4 0x3D6BBC
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_5 0x3D6BC0
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_6 0x3D6BC4
+
+#define mmNIF_RTR_CTRL_5_RGL_TOKEN_7 0x3D6BC8
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_0 0x3D6BEC
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_1 0x3D6BF0
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_2 0x3D6BF4
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_3 0x3D6BF8
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_4 0x3D6BFC
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_5 0x3D6C00
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_6 0x3D6C04
+
+#define mmNIF_RTR_CTRL_5_RGL_BANK_ID_7 0x3D6C08
+
+#define mmNIF_RTR_CTRL_5_RGL_WDT 0x3D6C2C
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_WRAP 0x3D6C30
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_WRAP 0x3D6C34
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_WRAP 0x3D6C38
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_WRAP 0x3D6C3C
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_WRAP 0x3D6C40
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_WRAP 0x3D6C44
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_WRAP 0x3D6C48
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_WRAP 0x3D6C4C
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_CNT 0x3D6C50
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_CNT 0x3D6C54
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_CNT 0x3D6C58
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_CNT 0x3D6C5C
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_CNT 0x3D6C60
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_CNT 0x3D6C64
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_CNT 0x3D6C68
+
+#define mmNIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_CNT 0x3D6C6C
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_WRAP 0x3D6C70
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_WRAP 0x3D6C74
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_WRAP 0x3D6C78
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_WRAP 0x3D6C7C
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_WRAP 0x3D6C80
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_WRAP 0x3D6C84
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_WRAP 0x3D6C88
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_WRAP 0x3D6C8C
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_CNT 0x3D6C90
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_CNT 0x3D6C94
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_CNT 0x3D6C98
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_CNT 0x3D6C9C
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_CNT 0x3D6CA0
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_CNT 0x3D6CA4
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_CNT 0x3D6CA8
+
+#define mmNIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_CNT 0x3D6CAC
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_0 0x3D6CB0
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_1 0x3D6CB4
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_2 0x3D6CB8
+
+#define mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_3 0x3D6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_5_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h
new file mode 100644
index 000000000000..b79c59887b21
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_6_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_6_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_6 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_6_PERM_SEL 0x3E6108
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_0 0x3E6114
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_1 0x3E6118
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_2 0x3E611C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_3 0x3E6120
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_4 0x3E6124
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_5 0x3E6128
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_6 0x3E612C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_7 0x3E6130
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_8 0x3E6134
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_9 0x3E6138
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_10 0x3E613C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_11 0x3E6140
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_12 0x3E6144
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_13 0x3E6148
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_14 0x3E614C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_15 0x3E6150
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_16 0x3E6154
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_17 0x3E6158
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_18 0x3E615C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_19 0x3E6160
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_20 0x3E6164
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_21 0x3E6168
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_22 0x3E616C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_23 0x3E6170
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_24 0x3E6174
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_25 0x3E6178
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_26 0x3E617C
+
+#define mmNIF_RTR_CTRL_6_HBM_POLY_H3_27 0x3E6180
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_0 0x3E6184
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_1 0x3E6188
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_2 0x3E618C
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_3 0x3E6190
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_4 0x3E6194
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_5 0x3E6198
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_6 0x3E619C
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_7 0x3E61A0
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_8 0x3E61A4
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_9 0x3E61A8
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_10 0x3E61AC
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_11 0x3E61B0
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_12 0x3E61B4
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_13 0x3E61B8
+
+#define mmNIF_RTR_CTRL_6_SRAM_POLY_H3_14 0x3E61BC
+
+#define mmNIF_RTR_CTRL_6_SCRAM_SRAM_EN 0x3E626C
+
+#define mmNIF_RTR_CTRL_6_RL_HBM_EN 0x3E6274
+
+#define mmNIF_RTR_CTRL_6_RL_HBM_SAT 0x3E6278
+
+#define mmNIF_RTR_CTRL_6_RL_HBM_RST 0x3E627C
+
+#define mmNIF_RTR_CTRL_6_RL_HBM_TIMEOUT 0x3E6280
+
+#define mmNIF_RTR_CTRL_6_SCRAM_HBM_EN 0x3E6284
+
+#define mmNIF_RTR_CTRL_6_RL_PCI_EN 0x3E6288
+
+#define mmNIF_RTR_CTRL_6_RL_PCI_SAT 0x3E628C
+
+#define mmNIF_RTR_CTRL_6_RL_PCI_RST 0x3E6290
+
+#define mmNIF_RTR_CTRL_6_RL_PCI_TIMEOUT 0x3E6294
+
+#define mmNIF_RTR_CTRL_6_RL_SRAM_EN 0x3E629C
+
+#define mmNIF_RTR_CTRL_6_RL_SRAM_SAT 0x3E62A0
+
+#define mmNIF_RTR_CTRL_6_RL_SRAM_RST 0x3E62A4
+
+#define mmNIF_RTR_CTRL_6_RL_SRAM_TIMEOUT 0x3E62AC
+
+#define mmNIF_RTR_CTRL_6_RL_SRAM_RED 0x3E62B4
+
+#define mmNIF_RTR_CTRL_6_E2E_HBM_EN 0x3E62EC
+
+#define mmNIF_RTR_CTRL_6_E2E_PCI_EN 0x3E62F0
+
+#define mmNIF_RTR_CTRL_6_E2E_HBM_WR_SIZE 0x3E62F4
+
+#define mmNIF_RTR_CTRL_6_E2E_PCI_WR_SIZE 0x3E62F8
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET_EN 0x3E6404
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET 0x3E6408
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_WRAP 0x3E640C
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_PCI_CTR_CNT 0x3E6410
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET_EN 0x3E6414
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET 0x3E6418
+
+#define mmNIF_RTR_CTRL_6_E2E_HBM_RD_SIZE 0x3E641C
+
+#define mmNIF_RTR_CTRL_6_E2E_PCI_RD_SIZE 0x3E6420
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET_EN 0x3E6424
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET 0x3E6428
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_WRAP 0x3E642C
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_PCI_CTR_CNT 0x3E6430
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET_EN 0x3E6434
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET 0x3E6438
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_SEL_0 0x3E6450
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_SEL_1 0x3E6454
+
+#define mmNIF_RTR_CTRL_6_NON_LIN_EN 0x3E6480
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_0 0x3E6500
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_1 0x3E6504
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_2 0x3E6508
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_3 0x3E650C
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_BANK_4 0x3E6510
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_0 0x3E6514
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_1 0x3E6520
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_2 0x3E6524
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_3 0x3E6528
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_4 0x3E652C
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_5 0x3E6530
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_6 0x3E6534
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_7 0x3E6538
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_8 0x3E653C
+
+#define mmNIF_RTR_CTRL_6_NL_SRAM_OFFSET_9 0x3E6540
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_0 0x3E6550
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_1 0x3E6554
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_2 0x3E6558
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_3 0x3E655C
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_4 0x3E6560
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_5 0x3E6564
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_6 0x3E6568
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_7 0x3E656C
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_8 0x3E6570
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_9 0x3E6574
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_10 0x3E6578
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_11 0x3E657C
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_12 0x3E6580
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_13 0x3E6584
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_14 0x3E6588
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_15 0x3E658C
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_16 0x3E6590
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_17 0x3E6594
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_18 0x3E6598
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0 0x3E65E4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_1 0x3E65E8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_2 0x3E65EC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_3 0x3E65F0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_4 0x3E65F4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_5 0x3E65F8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_6 0x3E65FC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_7 0x3E6600
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_8 0x3E6604
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_9 0x3E6608
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_10 0x3E660C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_11 0x3E6610
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_12 0x3E6614
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_13 0x3E6618
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_14 0x3E661C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_15 0x3E6620
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0 0x3E6624
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_1 0x3E6628
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_2 0x3E662C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_3 0x3E6630
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_4 0x3E6634
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_5 0x3E6638
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_6 0x3E663C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_7 0x3E6640
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_8 0x3E6644
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_9 0x3E6648
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_10 0x3E664C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_11 0x3E6650
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_12 0x3E6654
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_13 0x3E6658
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_14 0x3E665C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_15 0x3E6660
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0 0x3E6664
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_1 0x3E6668
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_2 0x3E666C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_3 0x3E6670
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_4 0x3E6674
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_5 0x3E6678
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_6 0x3E667C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_7 0x3E6680
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_8 0x3E6684
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_9 0x3E6688
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_10 0x3E668C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_11 0x3E6690
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_12 0x3E6694
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_13 0x3E6698
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_14 0x3E669C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_15 0x3E66A0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0 0x3E66A4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_1 0x3E66A8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_2 0x3E66AC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_3 0x3E66B0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_4 0x3E66B4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_5 0x3E66B8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_6 0x3E66BC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_7 0x3E66C0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_8 0x3E66C4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_9 0x3E66C8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_10 0x3E66CC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_11 0x3E66D0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_12 0x3E66D4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_13 0x3E66D8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_14 0x3E66DC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_15 0x3E66E0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_0 0x3E66E4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_1 0x3E66E8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_2 0x3E66EC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_3 0x3E66F0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_4 0x3E66F4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_5 0x3E66F8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_6 0x3E66FC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_7 0x3E6700
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_8 0x3E6704
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_9 0x3E6708
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_10 0x3E670C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_11 0x3E6710
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_12 0x3E6714
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_13 0x3E6718
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_14 0x3E671C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_15 0x3E6720
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_0 0x3E6724
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_1 0x3E6728
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_2 0x3E672C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_3 0x3E6730
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_4 0x3E6734
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_5 0x3E6738
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_6 0x3E673C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_7 0x3E6740
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_8 0x3E6744
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_9 0x3E6748
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_10 0x3E674C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_11 0x3E6750
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_12 0x3E6754
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_13 0x3E6758
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_14 0x3E675C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_15 0x3E6760
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_0 0x3E6764
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_1 0x3E6768
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_2 0x3E676C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_3 0x3E6770
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_4 0x3E6774
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_5 0x3E6778
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_6 0x3E677C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_7 0x3E6780
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_8 0x3E6784
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_9 0x3E6788
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_10 0x3E678C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_11 0x3E6790
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_12 0x3E6794
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_13 0x3E6798
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_14 0x3E679C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_15 0x3E67A0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_0 0x3E67A4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_1 0x3E67A8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_2 0x3E67AC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_3 0x3E67B0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_4 0x3E67B4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_5 0x3E67B8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_6 0x3E67BC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_7 0x3E67C0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_8 0x3E67C4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_9 0x3E67C8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_10 0x3E67CC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_11 0x3E67D0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_12 0x3E67D4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_13 0x3E67D8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_14 0x3E67DC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_15 0x3E67E0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0 0x3E6824
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_1 0x3E6828
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_2 0x3E682C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_3 0x3E6830
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_4 0x3E6834
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_5 0x3E6838
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_6 0x3E683C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_7 0x3E6840
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_8 0x3E6844
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_9 0x3E6848
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_10 0x3E684C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_11 0x3E6850
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_12 0x3E6854
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_13 0x3E6858
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_14 0x3E685C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_15 0x3E6860
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0 0x3E6864
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_1 0x3E6868
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_2 0x3E686C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_3 0x3E6870
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_4 0x3E6874
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_5 0x3E6878
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_6 0x3E687C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_7 0x3E6880
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_8 0x3E6884
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_9 0x3E6888
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_10 0x3E688C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_11 0x3E6890
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_12 0x3E6894
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_13 0x3E6898
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_14 0x3E689C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_15 0x3E68A0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0 0x3E68A4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_1 0x3E68A8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_2 0x3E68AC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_3 0x3E68B0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_4 0x3E68B4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_5 0x3E68B8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_6 0x3E68BC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_7 0x3E68C0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_8 0x3E68C4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_9 0x3E68C8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_10 0x3E68CC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_11 0x3E68D0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_12 0x3E68D4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_13 0x3E68D8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_14 0x3E68DC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_15 0x3E68E0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0 0x3E68E4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_1 0x3E68E8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_2 0x3E68EC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_3 0x3E68F0
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_4 0x3E68F4
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_5 0x3E68F8
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_6 0x3E68FC
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_7 0x3E6900
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_8 0x3E6904
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_9 0x3E6908
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_10 0x3E690C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_11 0x3E6910
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_12 0x3E6914
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_13 0x3E6918
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_14 0x3E691C
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_15 0x3E6920
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_0 0x3E6924
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_1 0x3E6928
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_2 0x3E692C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_3 0x3E6930
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_4 0x3E6934
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_5 0x3E6938
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_6 0x3E693C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_7 0x3E6940
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_8 0x3E6944
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_9 0x3E6948
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_10 0x3E694C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_11 0x3E6950
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_12 0x3E6954
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_13 0x3E6958
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_14 0x3E695C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_15 0x3E6960
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_0 0x3E6964
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_1 0x3E6968
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_2 0x3E696C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_3 0x3E6970
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_4 0x3E6974
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_5 0x3E6978
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_6 0x3E697C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_7 0x3E6980
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_8 0x3E6984
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_9 0x3E6988
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_10 0x3E698C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_11 0x3E6990
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_12 0x3E6994
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_13 0x3E6998
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_14 0x3E699C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_15 0x3E69A0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_0 0x3E69A4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_1 0x3E69A8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_2 0x3E69AC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_3 0x3E69B0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_4 0x3E69B4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_5 0x3E69B8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_6 0x3E69BC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_7 0x3E69C0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_8 0x3E69C4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_9 0x3E69C8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_10 0x3E69CC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_11 0x3E69D0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_12 0x3E69D4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_13 0x3E69D8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_14 0x3E69DC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_15 0x3E69E0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_0 0x3E69E4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_1 0x3E69E8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_2 0x3E69EC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_3 0x3E69F0
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_4 0x3E69F4
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_5 0x3E69F8
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_6 0x3E69FC
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_7 0x3E6A00
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_8 0x3E6A04
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_9 0x3E6A08
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_10 0x3E6A0C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_11 0x3E6A10
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_12 0x3E6A14
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_13 0x3E6A18
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_14 0x3E6A1C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_15 0x3E6A20
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AW 0x3E6A64
+
+#define mmNIF_RTR_CTRL_6_RANGE_SEC_HIT_AR 0x3E6A68
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_HIT_AW 0x3E6A6C
+
+#define mmNIF_RTR_CTRL_6_RANGE_PRIV_HIT_AR 0x3E6A70
+
+#define mmNIF_RTR_CTRL_6_RGL_CFG 0x3E6B64
+
+#define mmNIF_RTR_CTRL_6_RGL_SHIFT 0x3E6B68
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_0 0x3E6B6C
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_1 0x3E6B70
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_2 0x3E6B74
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_3 0x3E6B78
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_4 0x3E6B7C
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_5 0x3E6B80
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_6 0x3E6B84
+
+#define mmNIF_RTR_CTRL_6_RGL_EXPECTED_LAT_7 0x3E6B88
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_0 0x3E6BAC
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_1 0x3E6BB0
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_2 0x3E6BB4
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_3 0x3E6BB8
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_4 0x3E6BBC
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_5 0x3E6BC0
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_6 0x3E6BC4
+
+#define mmNIF_RTR_CTRL_6_RGL_TOKEN_7 0x3E6BC8
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_0 0x3E6BEC
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_1 0x3E6BF0
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_2 0x3E6BF4
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_3 0x3E6BF8
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_4 0x3E6BFC
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_5 0x3E6C00
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_6 0x3E6C04
+
+#define mmNIF_RTR_CTRL_6_RGL_BANK_ID_7 0x3E6C08
+
+#define mmNIF_RTR_CTRL_6_RGL_WDT 0x3E6C2C
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_WRAP 0x3E6C30
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_WRAP 0x3E6C34
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_WRAP 0x3E6C38
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_WRAP 0x3E6C3C
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_WRAP 0x3E6C40
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_WRAP 0x3E6C44
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_WRAP 0x3E6C48
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_WRAP 0x3E6C4C
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_CNT 0x3E6C50
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_CNT 0x3E6C54
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_CNT 0x3E6C58
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_CNT 0x3E6C5C
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_CNT 0x3E6C60
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_CNT 0x3E6C64
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_CNT 0x3E6C68
+
+#define mmNIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_CNT 0x3E6C6C
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_WRAP 0x3E6C70
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_WRAP 0x3E6C74
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_WRAP 0x3E6C78
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_WRAP 0x3E6C7C
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_WRAP 0x3E6C80
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_WRAP 0x3E6C84
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_WRAP 0x3E6C88
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_WRAP 0x3E6C8C
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_CNT 0x3E6C90
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_CNT 0x3E6C94
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_CNT 0x3E6C98
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_CNT 0x3E6C9C
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_CNT 0x3E6CA0
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_CNT 0x3E6CA4
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_CNT 0x3E6CA8
+
+#define mmNIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_CNT 0x3E6CAC
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_0 0x3E6CB0
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_1 0x3E6CB4
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_2 0x3E6CB8
+
+#define mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_3 0x3E6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_6_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h
new file mode 100644
index 000000000000..3a6a34ba2958
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_NIF_RTR_CTRL_7_REGS_H_
+#define ASIC_REG_NIF_RTR_CTRL_7_REGS_H_
+
+/*
+ *****************************************
+ * NIF_RTR_CTRL_7 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmNIF_RTR_CTRL_7_PERM_SEL 0x3F6108
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_0 0x3F6114
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_1 0x3F6118
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_2 0x3F611C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_3 0x3F6120
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_4 0x3F6124
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_5 0x3F6128
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_6 0x3F612C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_7 0x3F6130
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_8 0x3F6134
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_9 0x3F6138
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_10 0x3F613C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_11 0x3F6140
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_12 0x3F6144
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_13 0x3F6148
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_14 0x3F614C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_15 0x3F6150
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_16 0x3F6154
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_17 0x3F6158
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_18 0x3F615C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_19 0x3F6160
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_20 0x3F6164
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_21 0x3F6168
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_22 0x3F616C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_23 0x3F6170
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_24 0x3F6174
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_25 0x3F6178
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_26 0x3F617C
+
+#define mmNIF_RTR_CTRL_7_HBM_POLY_H3_27 0x3F6180
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_0 0x3F6184
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_1 0x3F6188
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_2 0x3F618C
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_3 0x3F6190
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_4 0x3F6194
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_5 0x3F6198
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_6 0x3F619C
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_7 0x3F61A0
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_8 0x3F61A4
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_9 0x3F61A8
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_10 0x3F61AC
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_11 0x3F61B0
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_12 0x3F61B4
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_13 0x3F61B8
+
+#define mmNIF_RTR_CTRL_7_SRAM_POLY_H3_14 0x3F61BC
+
+#define mmNIF_RTR_CTRL_7_SCRAM_SRAM_EN 0x3F626C
+
+#define mmNIF_RTR_CTRL_7_RL_HBM_EN 0x3F6274
+
+#define mmNIF_RTR_CTRL_7_RL_HBM_SAT 0x3F6278
+
+#define mmNIF_RTR_CTRL_7_RL_HBM_RST 0x3F627C
+
+#define mmNIF_RTR_CTRL_7_RL_HBM_TIMEOUT 0x3F6280
+
+#define mmNIF_RTR_CTRL_7_SCRAM_HBM_EN 0x3F6284
+
+#define mmNIF_RTR_CTRL_7_RL_PCI_EN 0x3F6288
+
+#define mmNIF_RTR_CTRL_7_RL_PCI_SAT 0x3F628C
+
+#define mmNIF_RTR_CTRL_7_RL_PCI_RST 0x3F6290
+
+#define mmNIF_RTR_CTRL_7_RL_PCI_TIMEOUT 0x3F6294
+
+#define mmNIF_RTR_CTRL_7_RL_SRAM_EN 0x3F629C
+
+#define mmNIF_RTR_CTRL_7_RL_SRAM_SAT 0x3F62A0
+
+#define mmNIF_RTR_CTRL_7_RL_SRAM_RST 0x3F62A4
+
+#define mmNIF_RTR_CTRL_7_RL_SRAM_TIMEOUT 0x3F62AC
+
+#define mmNIF_RTR_CTRL_7_RL_SRAM_RED 0x3F62B4
+
+#define mmNIF_RTR_CTRL_7_E2E_HBM_EN 0x3F62EC
+
+#define mmNIF_RTR_CTRL_7_E2E_PCI_EN 0x3F62F0
+
+#define mmNIF_RTR_CTRL_7_E2E_HBM_WR_SIZE 0x3F62F4
+
+#define mmNIF_RTR_CTRL_7_E2E_PCI_WR_SIZE 0x3F62F8
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET_EN 0x3F6404
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET 0x3F6408
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_WRAP 0x3F640C
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_PCI_CTR_CNT 0x3F6410
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET_EN 0x3F6414
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET 0x3F6418
+
+#define mmNIF_RTR_CTRL_7_E2E_HBM_RD_SIZE 0x3F641C
+
+#define mmNIF_RTR_CTRL_7_E2E_PCI_RD_SIZE 0x3F6420
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET_EN 0x3F6424
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET 0x3F6428
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_WRAP 0x3F642C
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_PCI_CTR_CNT 0x3F6430
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET_EN 0x3F6434
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET 0x3F6438
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_SEL_0 0x3F6450
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_SEL_1 0x3F6454
+
+#define mmNIF_RTR_CTRL_7_NON_LIN_EN 0x3F6480
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_0 0x3F6500
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_1 0x3F6504
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_2 0x3F6508
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_3 0x3F650C
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_BANK_4 0x3F6510
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_0 0x3F6514
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_1 0x3F6520
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_2 0x3F6524
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_3 0x3F6528
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_4 0x3F652C
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_5 0x3F6530
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_6 0x3F6534
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_7 0x3F6538
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_8 0x3F653C
+
+#define mmNIF_RTR_CTRL_7_NL_SRAM_OFFSET_9 0x3F6540
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_0 0x3F6550
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_1 0x3F6554
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_2 0x3F6558
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_3 0x3F655C
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_4 0x3F6560
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_5 0x3F6564
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_6 0x3F6568
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_7 0x3F656C
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_8 0x3F6570
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_9 0x3F6574
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_10 0x3F6578
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_11 0x3F657C
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_12 0x3F6580
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_13 0x3F6584
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_14 0x3F6588
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_15 0x3F658C
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_16 0x3F6590
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_17 0x3F6594
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_18 0x3F6598
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0 0x3F65E4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_1 0x3F65E8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_2 0x3F65EC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_3 0x3F65F0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_4 0x3F65F4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_5 0x3F65F8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_6 0x3F65FC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_7 0x3F6600
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_8 0x3F6604
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_9 0x3F6608
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_10 0x3F660C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_11 0x3F6610
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_12 0x3F6614
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_13 0x3F6618
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_14 0x3F661C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_15 0x3F6620
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0 0x3F6624
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_1 0x3F6628
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_2 0x3F662C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_3 0x3F6630
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_4 0x3F6634
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_5 0x3F6638
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_6 0x3F663C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_7 0x3F6640
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_8 0x3F6644
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_9 0x3F6648
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_10 0x3F664C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_11 0x3F6650
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_12 0x3F6654
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_13 0x3F6658
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_14 0x3F665C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_15 0x3F6660
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0 0x3F6664
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_1 0x3F6668
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_2 0x3F666C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_3 0x3F6670
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_4 0x3F6674
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_5 0x3F6678
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_6 0x3F667C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_7 0x3F6680
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_8 0x3F6684
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_9 0x3F6688
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_10 0x3F668C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_11 0x3F6690
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_12 0x3F6694
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_13 0x3F6698
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_14 0x3F669C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_15 0x3F66A0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0 0x3F66A4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_1 0x3F66A8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_2 0x3F66AC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_3 0x3F66B0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_4 0x3F66B4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_5 0x3F66B8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_6 0x3F66BC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_7 0x3F66C0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_8 0x3F66C4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_9 0x3F66C8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_10 0x3F66CC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_11 0x3F66D0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_12 0x3F66D4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_13 0x3F66D8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_14 0x3F66DC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_15 0x3F66E0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_0 0x3F66E4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_1 0x3F66E8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_2 0x3F66EC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_3 0x3F66F0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_4 0x3F66F4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_5 0x3F66F8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_6 0x3F66FC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_7 0x3F6700
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_8 0x3F6704
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_9 0x3F6708
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_10 0x3F670C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_11 0x3F6710
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_12 0x3F6714
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_13 0x3F6718
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_14 0x3F671C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_15 0x3F6720
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_0 0x3F6724
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_1 0x3F6728
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_2 0x3F672C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_3 0x3F6730
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_4 0x3F6734
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_5 0x3F6738
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_6 0x3F673C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_7 0x3F6740
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_8 0x3F6744
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_9 0x3F6748
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_10 0x3F674C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_11 0x3F6750
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_12 0x3F6754
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_13 0x3F6758
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_14 0x3F675C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_15 0x3F6760
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_0 0x3F6764
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_1 0x3F6768
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_2 0x3F676C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_3 0x3F6770
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_4 0x3F6774
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_5 0x3F6778
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_6 0x3F677C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_7 0x3F6780
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_8 0x3F6784
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_9 0x3F6788
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_10 0x3F678C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_11 0x3F6790
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_12 0x3F6794
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_13 0x3F6798
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_14 0x3F679C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_15 0x3F67A0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_0 0x3F67A4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_1 0x3F67A8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_2 0x3F67AC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_3 0x3F67B0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_4 0x3F67B4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_5 0x3F67B8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_6 0x3F67BC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_7 0x3F67C0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_8 0x3F67C4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_9 0x3F67C8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_10 0x3F67CC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_11 0x3F67D0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_12 0x3F67D4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_13 0x3F67D8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_14 0x3F67DC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_15 0x3F67E0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0 0x3F6824
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_1 0x3F6828
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_2 0x3F682C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_3 0x3F6830
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_4 0x3F6834
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_5 0x3F6838
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_6 0x3F683C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_7 0x3F6840
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_8 0x3F6844
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_9 0x3F6848
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_10 0x3F684C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_11 0x3F6850
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_12 0x3F6854
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_13 0x3F6858
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_14 0x3F685C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_15 0x3F6860
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0 0x3F6864
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_1 0x3F6868
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_2 0x3F686C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_3 0x3F6870
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_4 0x3F6874
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_5 0x3F6878
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_6 0x3F687C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_7 0x3F6880
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_8 0x3F6884
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_9 0x3F6888
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_10 0x3F688C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_11 0x3F6890
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_12 0x3F6894
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_13 0x3F6898
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_14 0x3F689C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_15 0x3F68A0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0 0x3F68A4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_1 0x3F68A8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_2 0x3F68AC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_3 0x3F68B0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_4 0x3F68B4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_5 0x3F68B8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_6 0x3F68BC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_7 0x3F68C0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_8 0x3F68C4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_9 0x3F68C8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_10 0x3F68CC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_11 0x3F68D0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_12 0x3F68D4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_13 0x3F68D8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_14 0x3F68DC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_15 0x3F68E0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0 0x3F68E4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_1 0x3F68E8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_2 0x3F68EC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_3 0x3F68F0
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_4 0x3F68F4
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_5 0x3F68F8
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_6 0x3F68FC
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_7 0x3F6900
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_8 0x3F6904
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_9 0x3F6908
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_10 0x3F690C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_11 0x3F6910
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_12 0x3F6914
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_13 0x3F6918
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_14 0x3F691C
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_15 0x3F6920
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_0 0x3F6924
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_1 0x3F6928
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_2 0x3F692C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_3 0x3F6930
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_4 0x3F6934
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_5 0x3F6938
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_6 0x3F693C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_7 0x3F6940
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_8 0x3F6944
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_9 0x3F6948
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_10 0x3F694C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_11 0x3F6950
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_12 0x3F6954
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_13 0x3F6958
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_14 0x3F695C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_15 0x3F6960
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_0 0x3F6964
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_1 0x3F6968
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_2 0x3F696C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_3 0x3F6970
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_4 0x3F6974
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_5 0x3F6978
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_6 0x3F697C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_7 0x3F6980
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_8 0x3F6984
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_9 0x3F6988
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_10 0x3F698C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_11 0x3F6990
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_12 0x3F6994
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_13 0x3F6998
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_14 0x3F699C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_15 0x3F69A0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_0 0x3F69A4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_1 0x3F69A8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_2 0x3F69AC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_3 0x3F69B0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_4 0x3F69B4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_5 0x3F69B8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_6 0x3F69BC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_7 0x3F69C0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_8 0x3F69C4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_9 0x3F69C8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_10 0x3F69CC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_11 0x3F69D0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_12 0x3F69D4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_13 0x3F69D8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_14 0x3F69DC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_15 0x3F69E0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_0 0x3F69E4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_1 0x3F69E8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_2 0x3F69EC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_3 0x3F69F0
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_4 0x3F69F4
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_5 0x3F69F8
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_6 0x3F69FC
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_7 0x3F6A00
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_8 0x3F6A04
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_9 0x3F6A08
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_10 0x3F6A0C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_11 0x3F6A10
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_12 0x3F6A14
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_13 0x3F6A18
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_14 0x3F6A1C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_15 0x3F6A20
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW 0x3F6A64
+
+#define mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR 0x3F6A68
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_HIT_AW 0x3F6A6C
+
+#define mmNIF_RTR_CTRL_7_RANGE_PRIV_HIT_AR 0x3F6A70
+
+#define mmNIF_RTR_CTRL_7_RGL_CFG 0x3F6B64
+
+#define mmNIF_RTR_CTRL_7_RGL_SHIFT 0x3F6B68
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_0 0x3F6B6C
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_1 0x3F6B70
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_2 0x3F6B74
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_3 0x3F6B78
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_4 0x3F6B7C
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_5 0x3F6B80
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_6 0x3F6B84
+
+#define mmNIF_RTR_CTRL_7_RGL_EXPECTED_LAT_7 0x3F6B88
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_0 0x3F6BAC
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_1 0x3F6BB0
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_2 0x3F6BB4
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_3 0x3F6BB8
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_4 0x3F6BBC
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_5 0x3F6BC0
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_6 0x3F6BC4
+
+#define mmNIF_RTR_CTRL_7_RGL_TOKEN_7 0x3F6BC8
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_0 0x3F6BEC
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_1 0x3F6BF0
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_2 0x3F6BF4
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_3 0x3F6BF8
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_4 0x3F6BFC
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_5 0x3F6C00
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_6 0x3F6C04
+
+#define mmNIF_RTR_CTRL_7_RGL_BANK_ID_7 0x3F6C08
+
+#define mmNIF_RTR_CTRL_7_RGL_WDT 0x3F6C2C
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_WRAP 0x3F6C30
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_WRAP 0x3F6C34
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_WRAP 0x3F6C38
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_WRAP 0x3F6C3C
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_WRAP 0x3F6C40
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_WRAP 0x3F6C44
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_WRAP 0x3F6C48
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_WRAP 0x3F6C4C
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_CNT 0x3F6C50
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_CNT 0x3F6C54
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_CNT 0x3F6C58
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_CNT 0x3F6C5C
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_CNT 0x3F6C60
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_CNT 0x3F6C64
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_CNT 0x3F6C68
+
+#define mmNIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_CNT 0x3F6C6C
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_WRAP 0x3F6C70
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_WRAP 0x3F6C74
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_WRAP 0x3F6C78
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_WRAP 0x3F6C7C
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_WRAP 0x3F6C80
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_WRAP 0x3F6C84
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_WRAP 0x3F6C88
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_WRAP 0x3F6C8C
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_CNT 0x3F6C90
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_CNT 0x3F6C94
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_CNT 0x3F6C98
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_CNT 0x3F6C9C
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_CNT 0x3F6CA0
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_CNT 0x3F6CA4
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_CNT 0x3F6CA8
+
+#define mmNIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_CNT 0x3F6CAC
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_0 0x3F6CB0
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_1 0x3F6CB4
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_2 0x3F6CB8
+
+#define mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_3 0x3F6CBC
+
+#endif /* ASIC_REG_NIF_RTR_CTRL_7_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h
new file mode 100644
index 000000000000..b7c33e025db5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_ETR_REGS_H_
+#define ASIC_REG_PSOC_ETR_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_ETR (Prototype: ETR)
+ *****************************************
+ */
+
+#define mmPSOC_ETR_RSZ 0x2C43004
+
+#define mmPSOC_ETR_STS 0x2C4300C
+
+#define mmPSOC_ETR_RRD 0x2C43010
+
+#define mmPSOC_ETR_RRP 0x2C43014
+
+#define mmPSOC_ETR_RWP 0x2C43018
+
+#define mmPSOC_ETR_TRG 0x2C4301C
+
+#define mmPSOC_ETR_CTL 0x2C43020
+
+#define mmPSOC_ETR_RWD 0x2C43024
+
+#define mmPSOC_ETR_MODE 0x2C43028
+
+#define mmPSOC_ETR_LBUFLEVEL 0x2C4302C
+
+#define mmPSOC_ETR_CBUFLEVEL 0x2C43030
+
+#define mmPSOC_ETR_BUFWM 0x2C43034
+
+#define mmPSOC_ETR_RRPHI 0x2C43038
+
+#define mmPSOC_ETR_RWPHI 0x2C4303C
+
+#define mmPSOC_ETR_AXICTL 0x2C43110
+
+#define mmPSOC_ETR_DBALO 0x2C43118
+
+#define mmPSOC_ETR_DBAHI 0x2C4311C
+
+#define mmPSOC_ETR_FFSR 0x2C43300
+
+#define mmPSOC_ETR_FFCR 0x2C43304
+
+#define mmPSOC_ETR_PSCR 0x2C43308
+
+#define mmPSOC_ETR_ITMISCOP0 0x2C43EE0
+
+#define mmPSOC_ETR_ITTRFLIN 0x2C43EE8
+
+#define mmPSOC_ETR_ITATBDATA0 0x2C43EEC
+
+#define mmPSOC_ETR_ITATBCTR2 0x2C43EF0
+
+#define mmPSOC_ETR_ITATBCTR1 0x2C43EF4
+
+#define mmPSOC_ETR_ITATBCTR0 0x2C43EF8
+
+#define mmPSOC_ETR_ITCTRL 0x2C43F00
+
+#define mmPSOC_ETR_CLAIMSET 0x2C43FA0
+
+#define mmPSOC_ETR_CLAIMCLR 0x2C43FA4
+
+#define mmPSOC_ETR_LAR 0x2C43FB0
+
+#define mmPSOC_ETR_LSR 0x2C43FB4
+
+#define mmPSOC_ETR_AUTHSTATUS 0x2C43FB8
+
+#define mmPSOC_ETR_DEVID 0x2C43FC8
+
+#define mmPSOC_ETR_DEVTYPE 0x2C43FCC
+
+#define mmPSOC_ETR_PERIPHID4 0x2C43FD0
+
+#define mmPSOC_ETR_PERIPHID5 0x2C43FD4
+
+#define mmPSOC_ETR_PERIPHID6 0x2C43FD8
+
+#define mmPSOC_ETR_PERIPHID7 0x2C43FDC
+
+#define mmPSOC_ETR_PERIPHID0 0x2C43FE0
+
+#define mmPSOC_ETR_PERIPHID1 0x2C43FE4
+
+#define mmPSOC_ETR_PERIPHID2 0x2C43FE8
+
+#define mmPSOC_ETR_PERIPHID3 0x2C43FEC
+
+#define mmPSOC_ETR_COMPID0 0x2C43FF0
+
+#define mmPSOC_ETR_COMPID1 0x2C43FF4
+
+#define mmPSOC_ETR_COMPID2 0x2C43FF8
+
+#define mmPSOC_ETR_COMPID3 0x2C43FFC
+
+#endif /* ASIC_REG_PSOC_ETR_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h
new file mode 100644
index 000000000000..6703e678ee9f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h
@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
+#define ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_
+
+/*
+ *****************************************
+ * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF)
+ *****************************************
+ */
+
+/* PSOC_GLOBAL_CONF_NON_RST_FLOPS */
+#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_PCI_FW_FSM */
+#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BTM_FSM */
+#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_SW_BTM_FSM */
+#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM */
+#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPI_MEM_EN */
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PRSTN */
+#define PSOC_GLOBAL_CONF_PRSTN_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_EN */
+#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR */
+#define PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_PRSTN_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SPI_IMG_STS */
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_MASK 0x1
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_SHIFT 1
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_MASK 0x2
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_SHIFT 2
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_MASK 0x4
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_SHIFT 3
+#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_MASK 0x8
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_FSM */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_SHIFT 1
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_MASK 0x2
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_SHIFT 2
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_MASK 0x4
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_SHIFT 3
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_MASK 0x8
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_MASK 0x20
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_SHIFT 6
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_MASK 0x40
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_SHIFT 7
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_MASK 0x80
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_SHIFT 8
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_MASK 0x100
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD_DONE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD_DONE_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PHY_STABLE */
+#define PSOC_GLOBAL_CONF_PHY_STABLE_PRSTN_SHIFT 0
+#define PSOC_GLOBAL_CONF_PHY_STABLE_PRSTN_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PRSTN_OVR */
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_VAL_SHIFT 4
+#define PSOC_GLOBAL_CONF_PRSTN_OVR_VAL_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_ETR_FLUSH */
+#define PSOC_GLOBAL_CONF_ETR_FLUSH_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_ETR_FLUSH_MASK_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_COLD_RST_FLOPS */
+#define PSOC_GLOBAL_CONF_COLD_RST_FLOPS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_COLD_RST_FLOPS_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_DIS_RAZWI_ERR */
+#define PSOC_GLOBAL_CONF_DIS_RAZWI_ERR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_DIS_RAZWI_ERR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PCIE_PHY_RST_N */
+#define PSOC_GLOBAL_CONF_PCIE_PHY_RST_N_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PCIE_PHY_RST_N_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_RAZWI */
+#define PSOC_GLOBAL_CONF_RAZWI_INTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_RAZWI_INTR_MASK 0x1
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_SHIFT 4
+#define PSOC_GLOBAL_CONF_RAZWI_MASK_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_PROT */
+#define PSOC_GLOBAL_CONF_PROT_AR_SHIFT 0
+#define PSOC_GLOBAL_CONF_PROT_AR_MASK 0x7
+#define PSOC_GLOBAL_CONF_PROT_AW_SHIFT 4
+#define PSOC_GLOBAL_CONF_PROT_AW_MASK 0x70
+
+/* PSOC_GLOBAL_CONF_ADC */
+#define PSOC_GLOBAL_CONF_ADC_INTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_INTR_MASK 0x1
+#define PSOC_GLOBAL_CONF_ADC_MASK_SHIFT 4
+#define PSOC_GLOBAL_CONF_ADC_MASK_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_BOOT_SEQ_TO */
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TO_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_SEQ_TO_MASK_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SCRATCHPAD */
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_SHIFT 0
+#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SEMAPHORE */
+#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_SHIFT 0
+#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_CPU_BOOT_STATUS */
+#define PSOC_GLOBAL_CONF_CPU_BOOT_STATUS_CNTR_SHIFT 0
+#define PSOC_GLOBAL_CONF_CPU_BOOT_STATUS_CNTR_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU */
+#define PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_KMD_MSG_TO_CPU_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SPL_SOURCE */
+#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_MASK 0x7
+
+/* PSOC_GLOBAL_CONF_I2C_MSTR1_DBG */
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_MASK 0x1
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_SHIFT 1
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_MASK 0x2
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_SHIFT 2
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_MASK 0x4
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_SHIFT 3
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_MASK 0x8
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_SHIFT 4
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_MASK 0x10
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_SHIFT 5
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_MASK 0x20
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_SHIFT 6
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_MASK 0x40
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_SHIFT 7
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_MASK 0x80
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_SHIFT 8
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_MASK 0x100
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_SHIFT 9
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_MASK 0x200
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_SHIFT 10
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_MASK 0x7C00
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_SHIFT 15
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_MASK 0x78000
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_SHIFT 19
+#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_MASK 0x80000
+
+/* PSOC_GLOBAL_CONF_I2C_SLV */
+#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK */
+#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_SHIFT 0
+#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_TRACE_ADDR */
+#define PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_SHIFT 0
+#define PSOC_GLOBAL_CONF_TRACE_ADDR_MSB_MASK 0x3FF
+
+/* PSOC_GLOBAL_CONF_ARUSER */
+#define PSOC_GLOBAL_CONF_ARUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ARUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_AWUSER */
+#define PSOC_GLOBAL_CONF_AWUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_AWUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TRACE_AWUSER */
+#define PSOC_GLOBAL_CONF_TRACE_AWUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TRACE_AWUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_TRACE_ARUSER */
+#define PSOC_GLOBAL_CONF_TRACE_ARUSER_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_TRACE_ARUSER_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_BTL_STS */
+#define PSOC_GLOBAL_CONF_BTL_STS_DONE_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_STS_DONE_MASK 0x1
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_SHIFT 4
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_MASK 0x10
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_SHIFT 8
+#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_MASK 0xF00
+
+/* PSOC_GLOBAL_CONF_TIMEOUT_INTR */
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_SHIFT 0
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_MASK 0x1
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_SHIFT 1
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_MASK 0x2
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_SHIFT 2
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_MASK 0x4
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_SHIFT 3
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_MASK 0x8
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_SHIFT 4
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_MASK 0x10
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_SHIFT 5
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_MASK 0x20
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_SHIFT 6
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_MASK 0x40
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_SHIFT 7
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_MASK 0x80
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_5_SHIFT 8
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_5_MASK 0x100
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_6_SHIFT 9
+#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_6_MASK 0x200
+
+/* PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR */
+#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PERIPH_INTR */
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_SHIFT 0
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_MASK 0x1
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_SHIFT 1
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_MASK 0x2
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_SHIFT 2
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_MASK 0x4
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_SHIFT 3
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_MASK 0x8
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_SHIFT 4
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_MASK 0x10
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_SHIFT 5
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_MASK 0x20
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_SHIFT 6
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_MASK 0x40
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_SHIFT 7
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_MASK 0x80
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_SHIFT 12
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_MASK 0x1000
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_SHIFT 13
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_MASK 0x2000
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_SHIFT 16
+#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_MASK 0x10000
+
+/* PSOC_GLOBAL_CONF_COMB_PERIPH_INTR */
+#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_AXI_ERR_INTR */
+#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_TARGETID */
+#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_SHIFT 1
+#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_MASK 0xFFE
+#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_SHIFT 16
+#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_MASK 0xFFF0000
+#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_SHIFT 28
+#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_MASK 0xF0000000
+
+/* PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE */
+#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_BOOT_STRAP_PINS */
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_MASK 0x1
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_SHIFT 1
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_MASK 0x2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_REPAIR_CFG_SHIFT 2
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_REPAIR_CFG_MASK 0xC
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_SHIFT 4
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_MASK 0x10
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_SHIFT 5
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_MASK 0x20
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_SHIFT 6
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_MASK 0x40
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_SHIFT 7
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_MASK 0x80
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_SEL_SHIFT 8
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_SEL_MASK 0x1FFF00
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_GRAD_RST_SHIFT 22
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_GRAD_RST_MASK 0x400000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_DIS_SHIFT 23
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_DUMP_DIS_MASK 0x800000
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SHIFT 24
+#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_MASK 0x1F000000
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_DIV */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_EN_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_EN_MASK 0x1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_VAL_SHIFT 8
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_DIV_VAL_MASK 0xFF00
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_MASK 0x1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_SHIFT 1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_MEM_REPAIR_STS */
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_MASK 0x1
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_FAIL_SHIFT 4
+#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_FAIL_MASK 0x10
+
+/* PSOC_GLOBAL_CONF_OUTSTANT_TRANS */
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_SHIFT 0
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_MASK 0x1
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_SHIFT 1
+#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_MASK 0x2
+
+/* PSOC_GLOBAL_CONF_MASK_REQ */
+#define PSOC_GLOBAL_CONF_MASK_REQ_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_MASK_REQ_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_WD_RST_CFG_L */
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_WD_RST_CFG_H */
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_WD_RST_CFG_H_VAL_MASK 0x3FFFFF
+
+/* PSOC_GLOBAL_CONF_MNL_RST_CFG_L */
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_MNL_RST_CFG_H */
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_MNL_RST_CFG_H_VAL_MASK 0x3FFFFF
+
+/* PSOC_GLOBAL_CONF_PRSTN_RST_CFG_L */
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_PRSTN_RST_CFG_H */
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_H_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L */
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H */
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H_VAL_MASK 0x3FFFFF
+
+/* PSOC_GLOBAL_CONF_SW_ALL_RST */
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_SW_ALL_RST_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SOFT_RST */
+#define PSOC_GLOBAL_CONF_SOFT_RST_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_SOFT_RST_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_SOFT_RST_CFG_L */
+#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_SOFT_RST_CFG_H */
+#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_SOFT_RST_CFG_H_VAL_MASK 0x3FFFFF
+
+/* PSOC_GLOBAL_CONF_UNIT_RST_N */
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_UNIT_RST_N_L */
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_L_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_L_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_UNIT_RST_N_H */
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_H_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_UNIT_RST_N_H_VAL_MASK 0x3FFFFF
+
+/* PSOC_GLOBAL_CONF_BTL_IMG */
+#define PSOC_GLOBAL_CONF_BTL_IMG_SEL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BTL_IMG_SEL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PRSTN_MASK */
+#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_WD_MASK */
+#define PSOC_GLOBAL_CONF_WD_MASK_IND_SHIFT 0
+#define PSOC_GLOBAL_CONF_WD_MASK_IND_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_RST_SRC */
+#define PSOC_GLOBAL_CONF_RST_SRC_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_RST_SRC_VAL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_BOOT_STATE */
+#define PSOC_GLOBAL_CONF_BOOT_STATE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BOOT_STATE_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_PAD_1V8_CFG */
+#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_MASK 0x7F
+
+/* PSOC_GLOBAL_CONF_PAD_3V3_CFG */
+#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_MASK 0x7F
+
+/* PSOC_GLOBAL_CONF_PAD_1V8_INPUT */
+#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_MASK 0x7
+
+/* PSOC_GLOBAL_CONF_BNK3V3_MS */
+#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_ADC_CLK_FREQ */
+#define PSOC_GLOBAL_CONF_ADC_CLK_FREQ_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_CLK_FREQ_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START */
+#define PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_DELAY_FROM_START_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_DATA_SAMPLES */
+#define PSOC_GLOBAL_CONF_ADC_DATA_SAMPLES_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_DATA_SAMPLES_VAL_MASK 0x1F
+
+/* PSOC_GLOBAL_CONF_ADC_TPH_CS */
+#define PSOC_GLOBAL_CONF_ADC_TPH_CS_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_TPH_CS_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_LSB_NMSB */
+#define PSOC_GLOBAL_CONF_ADC_LSB_NMSB_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_LSB_NMSB_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES */
+#define PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE */
+#define PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE_VAL_MASK 0x1
+
+/* PSOC_GLOBAL_CONF_ADC_CFG_DATA */
+#define PSOC_GLOBAL_CONF_ADC_CFG_DATA_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_CFG_DATA_VAL_MASK 0xFFFFFFFF
+
+/* PSOC_GLOBAL_CONF_ADC_TDV_CSDO */
+#define PSOC_GLOBAL_CONF_ADC_TDV_CSDO_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_TDV_CSDO_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_ADC_TSU_CSCK */
+#define PSOC_GLOBAL_CONF_ADC_TSU_CSCK_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_ADC_TSU_CSCK_VAL_MASK 0xFF
+
+/* PSOC_GLOBAL_CONF_PAD_DEFAULT */
+#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_MASK 0xF
+
+/* PSOC_GLOBAL_CONF_PAD_SEL */
+#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_SHIFT 0
+#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3
+
+/* PSOC_GLOBAL_CONF_RST_CTRL */
+#define PSOC_GLOBAL_CONF_RST_CTRL_SEL_SHIFT 0
+#define PSOC_GLOBAL_CONF_RST_CTRL_SEL_MASK 0xFF
+
+#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h
new file mode 100644
index 000000000000..1b5cfcc1d85f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h
@@ -0,0 +1,1062 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
+#define ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF)
+ *****************************************
+ */
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0 0xC4B000
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_1 0xC4B004
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_2 0xC4B008
+
+#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3 0xC4B00C
+
+#define mmPSOC_GLOBAL_CONF_PCI_FW_FSM 0xC4B020
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START 0xC4B024
+
+#define mmPSOC_GLOBAL_CONF_BTM_FSM 0xC4B028
+
+#define mmPSOC_GLOBAL_CONF_SW_BTM_FSM 0xC4B030
+
+#define mmPSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM 0xC4B034
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT 0xC4B038
+
+#define mmPSOC_GLOBAL_CONF_SPI_MEM_EN 0xC4B040
+
+#define mmPSOC_GLOBAL_CONF_PRSTN 0xC4B044
+
+#define mmPSOC_GLOBAL_CONF_PCIE_EN 0xC4B048
+
+#define mmPSOC_GLOBAL_CONF_PCIE_PRSTN_INTR 0xC4B04C
+
+#define mmPSOC_GLOBAL_CONF_SPI_IMG_STS 0xC4B050
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_FSM 0xC4B054
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_EXT_LD 0xC4B058
+
+#define mmPSOC_GLOBAL_CONF_PHY_STABLE 0xC4B060
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_OVR 0xC4B064
+
+#define mmPSOC_GLOBAL_CONF_ETR_FLUSH 0xC4B068
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_0 0xC4B070
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1 0xC4B074
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_2 0xC4B078
+
+#define mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3 0xC4B07C
+
+#define mmPSOC_GLOBAL_CONF_DIS_RAZWI_ERR 0xC4B080
+
+#define mmPSOC_GLOBAL_CONF_PCIE_PHY_RST_N 0xC4B084
+
+#define mmPSOC_GLOBAL_CONF_RAZWI 0xC4B088
+
+#define mmPSOC_GLOBAL_CONF_PROT 0xC4B090
+
+#define mmPSOC_GLOBAL_CONF_ADC 0xC4B094
+
+#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TO 0xC4B098
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 0xC4B100
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_1 0xC4B104
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_2 0xC4B108
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_3 0xC4B10C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_4 0xC4B110
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_5 0xC4B114
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_6 0xC4B118
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_7 0xC4B11C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 0xC4B120
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 0xC4B124
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 0xC4B128
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_11 0xC4B12C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_12 0xC4B130
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_13 0xC4B134
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_14 0xC4B138
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_15 0xC4B13C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_16 0xC4B140
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_17 0xC4B144
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_18 0xC4B148
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_19 0xC4B14C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 0xC4B150
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 0xC4B154
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 0xC4B158
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 0xC4B15C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 0xC4B160
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 0xC4B164
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 0xC4B168
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 0xC4B16C
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 0xC4B170
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 0xC4B174
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 0xC4B178
+
+#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 0xC4B17C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_0 0xC4B200
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_1 0xC4B204
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_2 0xC4B208
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_3 0xC4B20C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_4 0xC4B210
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_5 0xC4B214
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_6 0xC4B218
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_7 0xC4B21C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_8 0xC4B220
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_9 0xC4B224
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_10 0xC4B228
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_11 0xC4B22C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_12 0xC4B230
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_13 0xC4B234
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_14 0xC4B238
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_15 0xC4B23C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_16 0xC4B240
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_17 0xC4B244
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_18 0xC4B248
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_19 0xC4B24C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_20 0xC4B250
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_21 0xC4B254
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_22 0xC4B258
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_23 0xC4B25C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_24 0xC4B260
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_25 0xC4B264
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_26 0xC4B268
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_27 0xC4B26C
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_28 0xC4B270
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_29 0xC4B274
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_30 0xC4B278
+
+#define mmPSOC_GLOBAL_CONF_SEMAPHORE_31 0xC4B27C
+
+#define mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS 0xC4B300
+
+#define mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU 0xC4B304
+
+#define mmPSOC_GLOBAL_CONF_SPL_SOURCE 0xC4B308
+
+#define mmPSOC_GLOBAL_CONF_I2C_MSTR1_DBG 0xC4B30C
+
+#define mmPSOC_GLOBAL_CONF_I2C_SLV 0xC4B310
+
+#define mmPSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK 0xC4B314
+
+#define mmPSOC_GLOBAL_CONF_TRACE_ADDR 0xC4B320
+
+#define mmPSOC_GLOBAL_CONF_ARUSER 0xC4B330
+
+#define mmPSOC_GLOBAL_CONF_AWUSER 0xC4B334
+
+#define mmPSOC_GLOBAL_CONF_TRACE_AWUSER 0xC4B338
+
+#define mmPSOC_GLOBAL_CONF_TRACE_ARUSER 0xC4B33C
+
+#define mmPSOC_GLOBAL_CONF_BTL_STS 0xC4B340
+
+#define mmPSOC_GLOBAL_CONF_TIMEOUT_INTR 0xC4B350
+
+#define mmPSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR 0xC4B354
+
+#define mmPSOC_GLOBAL_CONF_PERIPH_INTR 0xC4B358
+
+#define mmPSOC_GLOBAL_CONF_COMB_PERIPH_INTR 0xC4B35C
+
+#define mmPSOC_GLOBAL_CONF_AXI_ERR_INTR 0xC4B360
+
+#define mmPSOC_GLOBAL_CONF_TARGETID 0xC4B400
+
+#define mmPSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE 0xC4B420
+
+#define mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS 0xC4B430
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_DIV 0xC4B44C
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_CTRL 0xC4B450
+
+#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_STS 0xC4B454
+
+#define mmPSOC_GLOBAL_CONF_OUTSTANT_TRANS 0xC4B458
+
+#define mmPSOC_GLOBAL_CONF_MASK_REQ 0xC4B45C
+
+#define mmPSOC_GLOBAL_CONF_WD_RST_CFG_L 0xC4B460
+
+#define mmPSOC_GLOBAL_CONF_WD_RST_CFG_H 0xC4B464
+
+#define mmPSOC_GLOBAL_CONF_MNL_RST_CFG_L 0xC4B470
+
+#define mmPSOC_GLOBAL_CONF_MNL_RST_CFG_H 0xC4B474
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_RST_CFG_L 0xC4B480
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_RST_CFG_H 0xC4B484
+
+#define mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L 0xC4B490
+
+#define mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H 0xC4B494
+
+#define mmPSOC_GLOBAL_CONF_SW_ALL_RST 0xC4B498
+
+#define mmPSOC_GLOBAL_CONF_SOFT_RST 0xC4B4A0
+
+#define mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L 0xC4B4A4
+
+#define mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H 0xC4B4A8
+
+#define mmPSOC_GLOBAL_CONF_UNIT_RST_N 0xC4B4B0
+
+#define mmPSOC_GLOBAL_CONF_UNIT_RST_N_L 0xC4B4B4
+
+#define mmPSOC_GLOBAL_CONF_UNIT_RST_N_H 0xC4B4B8
+
+#define mmPSOC_GLOBAL_CONF_BTL_IMG 0xC4B4E0
+
+#define mmPSOC_GLOBAL_CONF_PRSTN_MASK 0xC4B4E4
+
+#define mmPSOC_GLOBAL_CONF_WD_MASK 0xC4B4E8
+
+#define mmPSOC_GLOBAL_CONF_RST_SRC 0xC4B4F0
+
+#define mmPSOC_GLOBAL_CONF_BOOT_STATE 0xC4B4F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_0 0xC4B500
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_1 0xC4B504
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_2 0xC4B508
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_3 0xC4B50C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_4 0xC4B510
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_5 0xC4B514
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_6 0xC4B518
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_7 0xC4B51C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_8 0xC4B520
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_9 0xC4B524
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_10 0xC4B528
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_11 0xC4B52C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_12 0xC4B530
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_13 0xC4B534
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_14 0xC4B538
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_15 0xC4B53C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_16 0xC4B540
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_17 0xC4B544
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_18 0xC4B548
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_19 0xC4B54C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_20 0xC4B550
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_21 0xC4B554
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_22 0xC4B558
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_23 0xC4B55C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_24 0xC4B560
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_25 0xC4B564
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_26 0xC4B568
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_27 0xC4B56C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_28 0xC4B570
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_29 0xC4B574
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_30 0xC4B578
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_31 0xC4B57C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_32 0xC4B580
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_33 0xC4B584
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_34 0xC4B588
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_35 0xC4B58C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_36 0xC4B590
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_37 0xC4B594
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_38 0xC4B598
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_39 0xC4B59C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_40 0xC4B5A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_41 0xC4B5A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_42 0xC4B5A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_43 0xC4B5AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_44 0xC4B5B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_45 0xC4B5B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_46 0xC4B5B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_47 0xC4B5BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_48 0xC4B5C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_49 0xC4B5C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_50 0xC4B5C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_51 0xC4B5CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_52 0xC4B5D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_53 0xC4B5D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_54 0xC4B5D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_55 0xC4B5DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_56 0xC4B5E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_57 0xC4B5E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_58 0xC4B5E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_59 0xC4B5EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_60 0xC4B5F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_61 0xC4B5F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_62 0xC4B5F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_63 0xC4B5FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_64 0xC4B600
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_65 0xC4B604
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_66 0xC4B608
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_67 0xC4B60C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_68 0xC4B610
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_69 0xC4B614
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_70 0xC4B618
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_71 0xC4B61C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_72 0xC4B620
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_73 0xC4B624
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_74 0xC4B628
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_75 0xC4B62C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_76 0xC4B630
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_77 0xC4B634
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_78 0xC4B638
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_79 0xC4B63C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_80 0xC4B640
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_81 0xC4B644
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_82 0xC4B648
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_83 0xC4B64C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_84 0xC4B650
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_85 0xC4B654
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_86 0xC4B658
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_87 0xC4B65C
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_88 0xC4B660
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_89 0xC4B664
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_0 0xC4B690
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_1 0xC4B694
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_2 0xC4B698
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_3 0xC4B69C
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_4 0xC4B6A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_5 0xC4B6A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_6 0xC4B6A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_7 0xC4B6AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_8 0xC4B6B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_9 0xC4B6B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_10 0xC4B6B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_11 0xC4B6BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_0 0xC4B6C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_1 0xC4B6C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_2 0xC4B6C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_3 0xC4B6CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_4 0xC4B6D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_5 0xC4B6D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_6 0xC4B6D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_7 0xC4B6DC
+
+#define mmPSOC_GLOBAL_CONF_BNK3V3_MS 0xC4B710
+
+#define mmPSOC_GLOBAL_CONF_ADC_CLK_FREQ 0xC4B720
+
+#define mmPSOC_GLOBAL_CONF_ADC_DELAY_FROM_START 0xC4B724
+
+#define mmPSOC_GLOBAL_CONF_ADC_DATA_SAMPLES 0xC4B728
+
+#define mmPSOC_GLOBAL_CONF_ADC_TPH_CS 0xC4B72C
+
+#define mmPSOC_GLOBAL_CONF_ADC_LSB_NMSB 0xC4B730
+
+#define mmPSOC_GLOBAL_CONF_ADC_ONE_NCONTIUES 0xC4B734
+
+#define mmPSOC_GLOBAL_CONF_ADC_BLOCK_ENABLE 0xC4B738
+
+#define mmPSOC_GLOBAL_CONF_ADC_CFG_DATA 0xC4B73C
+
+#define mmPSOC_GLOBAL_CONF_ADC_TDV_CSDO 0xC4B740
+
+#define mmPSOC_GLOBAL_CONF_ADC_TSU_CSCK 0xC4B744
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_0 0xC4B800
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_1 0xC4B804
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_2 0xC4B808
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_3 0xC4B80C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_4 0xC4B810
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_5 0xC4B814
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_6 0xC4B818
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_7 0xC4B81C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_8 0xC4B820
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_9 0xC4B824
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_10 0xC4B828
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_11 0xC4B82C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_12 0xC4B830
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_13 0xC4B834
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_14 0xC4B838
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_15 0xC4B83C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_16 0xC4B840
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_17 0xC4B844
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_18 0xC4B848
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_19 0xC4B84C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_20 0xC4B850
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_21 0xC4B854
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_22 0xC4B858
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_23 0xC4B85C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_24 0xC4B860
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_25 0xC4B864
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_26 0xC4B868
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_27 0xC4B86C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_28 0xC4B870
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_29 0xC4B874
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_30 0xC4B878
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_31 0xC4B87C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_32 0xC4B880
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_33 0xC4B884
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_34 0xC4B888
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_35 0xC4B88C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_36 0xC4B890
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_37 0xC4B894
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_38 0xC4B898
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_39 0xC4B89C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_40 0xC4B8A0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_41 0xC4B8A4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_42 0xC4B8A8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_43 0xC4B8AC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_44 0xC4B8B0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_45 0xC4B8B4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_46 0xC4B8B8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_47 0xC4B8BC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_48 0xC4B8C0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_49 0xC4B8C4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_50 0xC4B8C8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_51 0xC4B8CC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_52 0xC4B8D0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_53 0xC4B8D4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_54 0xC4B8D8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_55 0xC4B8DC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_56 0xC4B8E0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_57 0xC4B8E4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_58 0xC4B8E8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_59 0xC4B8EC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_60 0xC4B8F0
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_61 0xC4B8F4
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_62 0xC4B8F8
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_63 0xC4B8FC
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_64 0xC4B900
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_65 0xC4B904
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_66 0xC4B908
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_67 0xC4B90C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_68 0xC4B910
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_69 0xC4B914
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_70 0xC4B918
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_71 0xC4B91C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_72 0xC4B920
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_73 0xC4B924
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_74 0xC4B928
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_75 0xC4B92C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_76 0xC4B930
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_77 0xC4B934
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_78 0xC4B938
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_79 0xC4B93C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_80 0xC4B940
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_81 0xC4B944
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_82 0xC4B948
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_83 0xC4B94C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_84 0xC4B950
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_85 0xC4B954
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_86 0xC4B958
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_87 0xC4B95C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_88 0xC4B960
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_89 0xC4B964
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_90 0xC4B968
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_91 0xC4B96C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_92 0xC4B970
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_93 0xC4B974
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_94 0xC4B978
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_95 0xC4B97C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_96 0xC4B980
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_97 0xC4B984
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_98 0xC4B988
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_99 0xC4B98C
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_100 0xC4B990
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_101 0xC4B994
+
+#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_102 0xC4B998
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_0 0xC4BA00
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_1 0xC4BA04
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_2 0xC4BA08
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_3 0xC4BA0C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_4 0xC4BA10
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_5 0xC4BA14
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_6 0xC4BA18
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_7 0xC4BA1C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_8 0xC4BA20
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_9 0xC4BA24
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_10 0xC4BA28
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_11 0xC4BA2C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_12 0xC4BA30
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_13 0xC4BA34
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_14 0xC4BA38
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_15 0xC4BA3C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_16 0xC4BA40
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_17 0xC4BA44
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_18 0xC4BA48
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_19 0xC4BA4C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_20 0xC4BA50
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_21 0xC4BA54
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_22 0xC4BA58
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_23 0xC4BA5C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_24 0xC4BA60
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_25 0xC4BA64
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_26 0xC4BA68
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_27 0xC4BA6C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_28 0xC4BA70
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_29 0xC4BA74
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_30 0xC4BA78
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_31 0xC4BA7C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_32 0xC4BA80
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_33 0xC4BA84
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_34 0xC4BA88
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_35 0xC4BA8C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_36 0xC4BA90
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_37 0xC4BA94
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_38 0xC4BA98
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_39 0xC4BA9C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_40 0xC4BAA0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_41 0xC4BAA4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_42 0xC4BAA8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_43 0xC4BAAC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_44 0xC4BAB0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_45 0xC4BAB4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_46 0xC4BAB8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_47 0xC4BABC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_48 0xC4BAC0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_49 0xC4BAC4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_50 0xC4BAC8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_51 0xC4BACC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_52 0xC4BAD0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_53 0xC4BAD4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_54 0xC4BAD8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_55 0xC4BADC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_56 0xC4BAE0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_57 0xC4BAE4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_58 0xC4BAE8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_59 0xC4BAEC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_60 0xC4BAF0
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_61 0xC4BAF4
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_62 0xC4BAF8
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_63 0xC4BAFC
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_64 0xC4BB00
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_65 0xC4BB04
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_66 0xC4BB08
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_67 0xC4BB0C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_68 0xC4BB10
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_69 0xC4BB14
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_70 0xC4BB18
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_71 0xC4BB1C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_72 0xC4BB20
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_73 0xC4BB24
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_74 0xC4BB28
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_75 0xC4BB2C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_76 0xC4BB30
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_77 0xC4BB34
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_78 0xC4BB38
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_79 0xC4BB3C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_80 0xC4BB40
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BB44
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_82 0xC4BB48
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_83 0xC4BB4C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_84 0xC4BB50
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_85 0xC4BB54
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_86 0xC4BB58
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_87 0xC4BB5C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_88 0xC4BB60
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_89 0xC4BB64
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_90 0xC4BB68
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_91 0xC4BB6C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_92 0xC4BB70
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_93 0xC4BB74
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_94 0xC4BB78
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_95 0xC4BB7C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_96 0xC4BB80
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_97 0xC4BB84
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_98 0xC4BB88
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_99 0xC4BB8C
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_100 0xC4BB90
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_101 0xC4BB94
+
+#define mmPSOC_GLOBAL_CONF_PAD_SEL_102 0xC4BB98
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_0 0xC4BC00
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_1 0xC4BC04
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_2 0xC4BC08
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_3 0xC4BC0C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_4 0xC4BC10
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_5 0xC4BC14
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_6 0xC4BC18
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_7 0xC4BC1C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_8 0xC4BC20
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_9 0xC4BC24
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_10 0xC4BC28
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_11 0xC4BC2C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_12 0xC4BC30
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_13 0xC4BC34
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_14 0xC4BC38
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_15 0xC4BC3C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_16 0xC4BC40
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_17 0xC4BC44
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_18 0xC4BC48
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_19 0xC4BC4C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_20 0xC4BC50
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_21 0xC4BC54
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_22 0xC4BC58
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_23 0xC4BC5C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_24 0xC4BC60
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_25 0xC4BC64
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_26 0xC4BC68
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_27 0xC4BC6C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_28 0xC4BC70
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_29 0xC4BC74
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_30 0xC4BC78
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_31 0xC4BC7C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_32 0xC4BC80
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_33 0xC4BC84
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_34 0xC4BC88
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_35 0xC4BC8C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_36 0xC4BC90
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_37 0xC4BC94
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_38 0xC4BC98
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_39 0xC4BC9C
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_40 0xC4BCA0
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_41 0xC4BCA4
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_42 0xC4BCA8
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_43 0xC4BCAC
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_44 0xC4BCB0
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_45 0xC4BCB4
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_46 0xC4BCB8
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_47 0xC4BCBC
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_48 0xC4BCC0
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_49 0xC4BCC4
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_50 0xC4BCC8
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_51 0xC4BCCC
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_52 0xC4BCD0
+
+#define mmPSOC_GLOBAL_CONF_RST_CTRL_53 0xC4BCD4
+
+#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h
new file mode 100644
index 000000000000..687e2255cb19
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_HBM_PLL_REGS_H_
+#define ASIC_REG_PSOC_HBM_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_HBM_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_HBM_PLL_NR 0xC74100
+
+#define mmPSOC_HBM_PLL_NF 0xC74104
+
+#define mmPSOC_HBM_PLL_OD 0xC74108
+
+#define mmPSOC_HBM_PLL_NB 0xC7410C
+
+#define mmPSOC_HBM_PLL_CFG 0xC74110
+
+#define mmPSOC_HBM_PLL_LOSE_MASK 0xC74120
+
+#define mmPSOC_HBM_PLL_LOCK_INTR 0xC74128
+
+#define mmPSOC_HBM_PLL_LOCK_BYPASS 0xC7412C
+
+#define mmPSOC_HBM_PLL_DATA_CHNG 0xC74130
+
+#define mmPSOC_HBM_PLL_RST 0xC74134
+
+#define mmPSOC_HBM_PLL_SLIP_WD_CNTR 0xC74150
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_0 0xC74200
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_1 0xC74204
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_2 0xC74208
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_3 0xC7420C
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_0 0xC74220
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_1 0xC74224
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_2 0xC74228
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_3 0xC7422C
+
+#define mmPSOC_HBM_PLL_DIV_SEL_0 0xC74280
+
+#define mmPSOC_HBM_PLL_DIV_SEL_1 0xC74284
+
+#define mmPSOC_HBM_PLL_DIV_SEL_2 0xC74288
+
+#define mmPSOC_HBM_PLL_DIV_SEL_3 0xC7428C
+
+#define mmPSOC_HBM_PLL_DIV_EN_0 0xC742A0
+
+#define mmPSOC_HBM_PLL_DIV_EN_1 0xC742A4
+
+#define mmPSOC_HBM_PLL_DIV_EN_2 0xC742A8
+
+#define mmPSOC_HBM_PLL_DIV_EN_3 0xC742AC
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_0 0xC742C0
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_1 0xC742C4
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_2 0xC742C8
+
+#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_3 0xC742CC
+
+#define mmPSOC_HBM_PLL_CLK_GATER 0xC74300
+
+#define mmPSOC_HBM_PLL_CLK_RLX_0 0xC74310
+
+#define mmPSOC_HBM_PLL_CLK_RLX_1 0xC74314
+
+#define mmPSOC_HBM_PLL_CLK_RLX_2 0xC74318
+
+#define mmPSOC_HBM_PLL_CLK_RLX_3 0xC7431C
+
+#define mmPSOC_HBM_PLL_REF_CNTR_PERIOD 0xC74400
+
+#define mmPSOC_HBM_PLL_REF_LOW_THRESHOLD 0xC74410
+
+#define mmPSOC_HBM_PLL_REF_HIGH_THRESHOLD 0xC74420
+
+#define mmPSOC_HBM_PLL_PLL_NOT_STABLE 0xC74430
+
+#define mmPSOC_HBM_PLL_FREQ_CALC_EN 0xC74440
+
+#define mmPSOC_HBM_PLL_RLX_BITMAP_CFG 0xC74500
+
+#define mmPSOC_HBM_PLL_RLX_BITMAP_0 0xC74510
+
+#define mmPSOC_HBM_PLL_RLX_BITMAP_1 0xC74514
+
+#define mmPSOC_HBM_PLL_RLX_BITMAP_2 0xC74518
+
+#define mmPSOC_HBM_PLL_RLX_BITMAP_3 0xC7451C
+
+#endif /* ASIC_REG_PSOC_HBM_PLL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h
new file mode 100644
index 000000000000..3dc9bb4542dd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_PCI_PLL_REGS_H_
+#define ASIC_REG_PSOC_PCI_PLL_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_PCI_PLL (Prototype: PLL)
+ *****************************************
+ */
+
+#define mmPSOC_PCI_PLL_NR 0xC72100
+
+#define mmPSOC_PCI_PLL_NF 0xC72104
+
+#define mmPSOC_PCI_PLL_OD 0xC72108
+
+#define mmPSOC_PCI_PLL_NB 0xC7210C
+
+#define mmPSOC_PCI_PLL_CFG 0xC72110
+
+#define mmPSOC_PCI_PLL_LOSE_MASK 0xC72120
+
+#define mmPSOC_PCI_PLL_LOCK_INTR 0xC72128
+
+#define mmPSOC_PCI_PLL_LOCK_BYPASS 0xC7212C
+
+#define mmPSOC_PCI_PLL_DATA_CHNG 0xC72130
+
+#define mmPSOC_PCI_PLL_RST 0xC72134
+
+#define mmPSOC_PCI_PLL_SLIP_WD_CNTR 0xC72150
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_0 0xC72200
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_1 0xC72204
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_2 0xC72208
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_3 0xC7220C
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_0 0xC72220
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_1 0xC72224
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_2 0xC72228
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_3 0xC7222C
+
+#define mmPSOC_PCI_PLL_DIV_SEL_0 0xC72280
+
+#define mmPSOC_PCI_PLL_DIV_SEL_1 0xC72284
+
+#define mmPSOC_PCI_PLL_DIV_SEL_2 0xC72288
+
+#define mmPSOC_PCI_PLL_DIV_SEL_3 0xC7228C
+
+#define mmPSOC_PCI_PLL_DIV_EN_0 0xC722A0
+
+#define mmPSOC_PCI_PLL_DIV_EN_1 0xC722A4
+
+#define mmPSOC_PCI_PLL_DIV_EN_2 0xC722A8
+
+#define mmPSOC_PCI_PLL_DIV_EN_3 0xC722AC
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_0 0xC722C0
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_1 0xC722C4
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_2 0xC722C8
+
+#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_3 0xC722CC
+
+#define mmPSOC_PCI_PLL_CLK_GATER 0xC72300
+
+#define mmPSOC_PCI_PLL_CLK_RLX_0 0xC72310
+
+#define mmPSOC_PCI_PLL_CLK_RLX_1 0xC72314
+
+#define mmPSOC_PCI_PLL_CLK_RLX_2 0xC72318
+
+#define mmPSOC_PCI_PLL_CLK_RLX_3 0xC7231C
+
+#define mmPSOC_PCI_PLL_REF_CNTR_PERIOD 0xC72400
+
+#define mmPSOC_PCI_PLL_REF_LOW_THRESHOLD 0xC72410
+
+#define mmPSOC_PCI_PLL_REF_HIGH_THRESHOLD 0xC72420
+
+#define mmPSOC_PCI_PLL_PLL_NOT_STABLE 0xC72430
+
+#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440
+
+#define mmPSOC_PCI_PLL_RLX_BITMAP_CFG 0xC72500
+
+#define mmPSOC_PCI_PLL_RLX_BITMAP_0 0xC72510
+
+#define mmPSOC_PCI_PLL_RLX_BITMAP_1 0xC72514
+
+#define mmPSOC_PCI_PLL_RLX_BITMAP_2 0xC72518
+
+#define mmPSOC_PCI_PLL_RLX_BITMAP_3 0xC7251C
+
+#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h
new file mode 100644
index 000000000000..9ce24597d4b0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_TIMESTAMP_REGS_H_
+#define ASIC_REG_PSOC_TIMESTAMP_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_TIMESTAMP (Prototype: TIMESTAMP)
+ *****************************************
+ */
+
+#define mmPSOC_TIMESTAMP_CNTCR 0xC49000
+
+#define mmPSOC_TIMESTAMP_CNTSR 0xC49004
+
+#define mmPSOC_TIMESTAMP_CNTCVL 0xC49008
+
+#define mmPSOC_TIMESTAMP_CNTCVU 0xC4900C
+
+#define mmPSOC_TIMESTAMP_CNTFID0 0xC49020
+
+#define mmPSOC_TIMESTAMP_PIDR4 0xC49FD0
+
+#define mmPSOC_TIMESTAMP_PIDR5 0xC49FD4
+
+#define mmPSOC_TIMESTAMP_PIDR6 0xC49FD8
+
+#define mmPSOC_TIMESTAMP_PIDR7 0xC49FDC
+
+#define mmPSOC_TIMESTAMP_PIDR0 0xC49FE0
+
+#define mmPSOC_TIMESTAMP_PIDR1 0xC49FE4
+
+#define mmPSOC_TIMESTAMP_PIDR2 0xC49FE8
+
+#define mmPSOC_TIMESTAMP_PIDR3 0xC49FEC
+
+#define mmPSOC_TIMESTAMP_CIDR0 0xC49FF0
+
+#define mmPSOC_TIMESTAMP_CIDR1 0xC49FF4
+
+#define mmPSOC_TIMESTAMP_CIDR2 0xC49FF8
+
+#define mmPSOC_TIMESTAMP_CIDR3 0xC49FFC
+
+#endif /* ASIC_REG_PSOC_TIMESTAMP_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h
new file mode 100644
index 000000000000..ddf824392cf7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_0_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_0_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_0 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_0_PERM_SEL 0x306108
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_0 0x306114
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_1 0x306118
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_2 0x30611C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_3 0x306120
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_4 0x306124
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_5 0x306128
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_6 0x30612C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_7 0x306130
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_8 0x306134
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_9 0x306138
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_10 0x30613C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_11 0x306140
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_12 0x306144
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_13 0x306148
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_14 0x30614C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_15 0x306150
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_16 0x306154
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_17 0x306158
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_18 0x30615C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_19 0x306160
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_20 0x306164
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_21 0x306168
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_22 0x30616C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_23 0x306170
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_24 0x306174
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_25 0x306178
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_26 0x30617C
+
+#define mmSIF_RTR_CTRL_0_HBM_POLY_H3_27 0x306180
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_0 0x306184
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_1 0x306188
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_2 0x30618C
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_3 0x306190
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_4 0x306194
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_5 0x306198
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_6 0x30619C
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_7 0x3061A0
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_8 0x3061A4
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_9 0x3061A8
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_10 0x3061AC
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_11 0x3061B0
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_12 0x3061B4
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_13 0x3061B8
+
+#define mmSIF_RTR_CTRL_0_SRAM_POLY_H3_14 0x3061BC
+
+#define mmSIF_RTR_CTRL_0_SCRAM_SRAM_EN 0x30626C
+
+#define mmSIF_RTR_CTRL_0_RL_HBM_EN 0x306274
+
+#define mmSIF_RTR_CTRL_0_RL_HBM_SAT 0x306278
+
+#define mmSIF_RTR_CTRL_0_RL_HBM_RST 0x30627C
+
+#define mmSIF_RTR_CTRL_0_RL_HBM_TIMEOUT 0x306280
+
+#define mmSIF_RTR_CTRL_0_SCRAM_HBM_EN 0x306284
+
+#define mmSIF_RTR_CTRL_0_RL_PCI_EN 0x306288
+
+#define mmSIF_RTR_CTRL_0_RL_PCI_SAT 0x30628C
+
+#define mmSIF_RTR_CTRL_0_RL_PCI_RST 0x306290
+
+#define mmSIF_RTR_CTRL_0_RL_PCI_TIMEOUT 0x306294
+
+#define mmSIF_RTR_CTRL_0_RL_SRAM_EN 0x30629C
+
+#define mmSIF_RTR_CTRL_0_RL_SRAM_SAT 0x3062A0
+
+#define mmSIF_RTR_CTRL_0_RL_SRAM_RST 0x3062A4
+
+#define mmSIF_RTR_CTRL_0_RL_SRAM_TIMEOUT 0x3062AC
+
+#define mmSIF_RTR_CTRL_0_RL_SRAM_RED 0x3062B4
+
+#define mmSIF_RTR_CTRL_0_E2E_HBM_EN 0x3062EC
+
+#define mmSIF_RTR_CTRL_0_E2E_PCI_EN 0x3062F0
+
+#define mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE 0x3062F4
+
+#define mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE 0x3062F8
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET_EN 0x306404
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_SET 0x306408
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_WRAP 0x30640C
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_PCI_CTR_CNT 0x306410
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET_EN 0x306414
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM_CTR_SET 0x306418
+
+#define mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE 0x30641C
+
+#define mmSIF_RTR_CTRL_0_E2E_PCI_RD_SIZE 0x306420
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET_EN 0x306424
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_SET 0x306428
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_WRAP 0x30642C
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_PCI_CTR_CNT 0x306430
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET_EN 0x306434
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM_CTR_SET 0x306438
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_SEL_0 0x306450
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_SEL_1 0x306454
+
+#define mmSIF_RTR_CTRL_0_NON_LIN_EN 0x306480
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_0 0x306500
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_1 0x306504
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_2 0x306508
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_3 0x30650C
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_BANK_4 0x306510
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_0 0x306514
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_1 0x306520
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_2 0x306524
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_3 0x306528
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_4 0x30652C
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_5 0x306530
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_6 0x306534
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_7 0x306538
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_8 0x30653C
+
+#define mmSIF_RTR_CTRL_0_NL_SRAM_OFFSET_9 0x306540
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_0 0x306550
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_1 0x306554
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_2 0x306558
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_3 0x30655C
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_4 0x306560
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_5 0x306564
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_6 0x306568
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_7 0x30656C
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_8 0x306570
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_9 0x306574
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_10 0x306578
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_11 0x30657C
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_12 0x306580
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_13 0x306584
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_14 0x306588
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_15 0x30658C
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_16 0x306590
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_17 0x306594
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_18 0x306598
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_0 0x3065E4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_1 0x3065E8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_2 0x3065EC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_3 0x3065F0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_4 0x3065F4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_5 0x3065F8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_6 0x3065FC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_7 0x306600
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_8 0x306604
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_9 0x306608
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_10 0x30660C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_11 0x306610
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_12 0x306614
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_13 0x306618
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_14 0x30661C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AW_15 0x306620
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_0 0x306624
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_1 0x306628
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_2 0x30662C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_3 0x306630
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_4 0x306634
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_5 0x306638
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_6 0x30663C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_7 0x306640
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_8 0x306644
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_9 0x306648
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_10 0x30664C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_11 0x306650
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_12 0x306654
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_13 0x306658
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_14 0x30665C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AW_15 0x306660
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_0 0x306664
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_1 0x306668
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_2 0x30666C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_3 0x306670
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_4 0x306674
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_5 0x306678
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_6 0x30667C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_7 0x306680
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_8 0x306684
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_9 0x306688
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_10 0x30668C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_11 0x306690
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_12 0x306694
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_13 0x306698
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_14 0x30669C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AW_15 0x3066A0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_0 0x3066A4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_1 0x3066A8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_2 0x3066AC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_3 0x3066B0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_4 0x3066B4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_5 0x3066B8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_6 0x3066BC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_7 0x3066C0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_8 0x3066C4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_9 0x3066C8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_10 0x3066CC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_11 0x3066D0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_12 0x3066D4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_13 0x3066D8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_14 0x3066DC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AW_15 0x3066E0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_0 0x3066E4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_1 0x3066E8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_2 0x3066EC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_3 0x3066F0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_4 0x3066F4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_5 0x3066F8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_6 0x3066FC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_7 0x306700
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_8 0x306704
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_9 0x306708
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_10 0x30670C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_11 0x306710
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_12 0x306714
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_13 0x306718
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_14 0x30671C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AW_15 0x306720
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_0 0x306724
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_1 0x306728
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_2 0x30672C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_3 0x306730
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_4 0x306734
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_5 0x306738
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_6 0x30673C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_7 0x306740
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_8 0x306744
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_9 0x306748
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_10 0x30674C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_11 0x306750
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_12 0x306754
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_13 0x306758
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_14 0x30675C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AW_15 0x306760
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_0 0x306764
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_1 0x306768
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_2 0x30676C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_3 0x306770
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_4 0x306774
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_5 0x306778
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_6 0x30677C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_7 0x306780
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_8 0x306784
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_9 0x306788
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_10 0x30678C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_11 0x306790
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_12 0x306794
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_13 0x306798
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_14 0x30679C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AW_15 0x3067A0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_0 0x3067A4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_1 0x3067A8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_2 0x3067AC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_3 0x3067B0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_4 0x3067B4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_5 0x3067B8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_6 0x3067BC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_7 0x3067C0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_8 0x3067C4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_9 0x3067C8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_10 0x3067CC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_11 0x3067D0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_12 0x3067D4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_13 0x3067D8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_14 0x3067DC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AW_15 0x3067E0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_0 0x306824
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_1 0x306828
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_2 0x30682C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_3 0x306830
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_4 0x306834
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_5 0x306838
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_6 0x30683C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_7 0x306840
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_8 0x306844
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_9 0x306848
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_10 0x30684C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_11 0x306850
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_12 0x306854
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_13 0x306858
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_14 0x30685C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_LOW_AR_15 0x306860
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_0 0x306864
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_1 0x306868
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_2 0x30686C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_3 0x306870
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_4 0x306874
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_5 0x306878
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_6 0x30687C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_7 0x306880
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_8 0x306884
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_9 0x306888
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_10 0x30688C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_11 0x306890
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_12 0x306894
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_13 0x306898
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_14 0x30689C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_BASE_HIGH_AR_15 0x3068A0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_0 0x3068A4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_1 0x3068A8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_2 0x3068AC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_3 0x3068B0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_4 0x3068B4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_5 0x3068B8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_6 0x3068BC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_7 0x3068C0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_8 0x3068C4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_9 0x3068C8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_10 0x3068CC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_11 0x3068D0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_12 0x3068D4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_13 0x3068D8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_14 0x3068DC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_LOW_AR_15 0x3068E0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_0 0x3068E4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_1 0x3068E8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_2 0x3068EC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_3 0x3068F0
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_4 0x3068F4
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_5 0x3068F8
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_6 0x3068FC
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_7 0x306900
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_8 0x306904
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_9 0x306908
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_10 0x30690C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_11 0x306910
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_12 0x306914
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_13 0x306918
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_14 0x30691C
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_MASK_HIGH_AR_15 0x306920
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_0 0x306924
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_1 0x306928
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_2 0x30692C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_3 0x306930
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_4 0x306934
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_5 0x306938
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_6 0x30693C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_7 0x306940
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_8 0x306944
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_9 0x306948
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_10 0x30694C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_11 0x306950
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_12 0x306954
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_13 0x306958
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_14 0x30695C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_LOW_AR_15 0x306960
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_0 0x306964
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_1 0x306968
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_2 0x30696C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_3 0x306970
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_4 0x306974
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_5 0x306978
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_6 0x30697C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_7 0x306980
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_8 0x306984
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_9 0x306988
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_10 0x30698C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_11 0x306990
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_12 0x306994
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_13 0x306998
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_14 0x30699C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_BASE_HIGH_AR_15 0x3069A0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_0 0x3069A4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_1 0x3069A8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_2 0x3069AC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_3 0x3069B0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_4 0x3069B4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_5 0x3069B8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_6 0x3069BC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_7 0x3069C0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_8 0x3069C4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_9 0x3069C8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_10 0x3069CC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_11 0x3069D0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_12 0x3069D4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_13 0x3069D8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_14 0x3069DC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_LOW_AR_15 0x3069E0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_0 0x3069E4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_1 0x3069E8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_2 0x3069EC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_3 0x3069F0
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_4 0x3069F4
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_5 0x3069F8
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_6 0x3069FC
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_7 0x306A00
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_8 0x306A04
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_9 0x306A08
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_10 0x306A0C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_11 0x306A10
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_12 0x306A14
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_13 0x306A18
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_14 0x306A1C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_MASK_HIGH_AR_15 0x306A20
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AW 0x306A64
+
+#define mmSIF_RTR_CTRL_0_RANGE_SEC_HIT_AR 0x306A68
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_HIT_AW 0x306A6C
+
+#define mmSIF_RTR_CTRL_0_RANGE_PRIV_HIT_AR 0x306A70
+
+#define mmSIF_RTR_CTRL_0_RGL_CFG 0x306B64
+
+#define mmSIF_RTR_CTRL_0_RGL_SHIFT 0x306B68
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_0 0x306B6C
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_1 0x306B70
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_2 0x306B74
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_3 0x306B78
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_4 0x306B7C
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_5 0x306B80
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_6 0x306B84
+
+#define mmSIF_RTR_CTRL_0_RGL_EXPECTED_LAT_7 0x306B88
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_0 0x306BAC
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_1 0x306BB0
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_2 0x306BB4
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_3 0x306BB8
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_4 0x306BBC
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_5 0x306BC0
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_6 0x306BC4
+
+#define mmSIF_RTR_CTRL_0_RGL_TOKEN_7 0x306BC8
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_0 0x306BEC
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_1 0x306BF0
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_2 0x306BF4
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_3 0x306BF8
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_4 0x306BFC
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_5 0x306C00
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_6 0x306C04
+
+#define mmSIF_RTR_CTRL_0_RGL_BANK_ID_7 0x306C08
+
+#define mmSIF_RTR_CTRL_0_RGL_WDT 0x306C2C
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_WRAP 0x306C30
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_WRAP 0x306C34
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_WRAP 0x306C38
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_WRAP 0x306C3C
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_WRAP 0x306C40
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_WRAP 0x306C44
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_WRAP 0x306C48
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_WRAP 0x306C4C
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH0_CTR_CNT 0x306C50
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM0_CH1_CTR_CNT 0x306C54
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH0_CTR_CNT 0x306C58
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM1_CH1_CTR_CNT 0x306C5C
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH0_CTR_CNT 0x306C60
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM2_CH1_CTR_CNT 0x306C64
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH0_CTR_CNT 0x306C68
+
+#define mmSIF_RTR_CTRL_0_E2E_AR_HBM3_CH1_CTR_CNT 0x306C6C
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_WRAP 0x306C70
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_WRAP 0x306C74
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_WRAP 0x306C78
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_WRAP 0x306C7C
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_WRAP 0x306C80
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_WRAP 0x306C84
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_WRAP 0x306C88
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_WRAP 0x306C8C
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH0_CTR_CNT 0x306C90
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM0_CH1_CTR_CNT 0x306C94
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH0_CTR_CNT 0x306C98
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM1_CH1_CTR_CNT 0x306C9C
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH0_CTR_CNT 0x306CA0
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM2_CH1_CTR_CNT 0x306CA4
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH0_CTR_CNT 0x306CA8
+
+#define mmSIF_RTR_CTRL_0_E2E_AW_HBM3_CH1_CTR_CNT 0x306CAC
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_0 0x306CB0
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_1 0x306CB4
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_2 0x306CB8
+
+#define mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_3 0x306CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_0_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h
new file mode 100644
index 000000000000..c6d517dbbd54
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_1_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_1_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_1 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_1_PERM_SEL 0x316108
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_0 0x316114
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_1 0x316118
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_2 0x31611C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_3 0x316120
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_4 0x316124
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_5 0x316128
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_6 0x31612C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_7 0x316130
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_8 0x316134
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_9 0x316138
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_10 0x31613C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_11 0x316140
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_12 0x316144
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_13 0x316148
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_14 0x31614C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_15 0x316150
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_16 0x316154
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_17 0x316158
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_18 0x31615C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_19 0x316160
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_20 0x316164
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_21 0x316168
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_22 0x31616C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_23 0x316170
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_24 0x316174
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_25 0x316178
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_26 0x31617C
+
+#define mmSIF_RTR_CTRL_1_HBM_POLY_H3_27 0x316180
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_0 0x316184
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_1 0x316188
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_2 0x31618C
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_3 0x316190
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_4 0x316194
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_5 0x316198
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_6 0x31619C
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_7 0x3161A0
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_8 0x3161A4
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_9 0x3161A8
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_10 0x3161AC
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_11 0x3161B0
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_12 0x3161B4
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_13 0x3161B8
+
+#define mmSIF_RTR_CTRL_1_SRAM_POLY_H3_14 0x3161BC
+
+#define mmSIF_RTR_CTRL_1_SCRAM_SRAM_EN 0x31626C
+
+#define mmSIF_RTR_CTRL_1_RL_HBM_EN 0x316274
+
+#define mmSIF_RTR_CTRL_1_RL_HBM_SAT 0x316278
+
+#define mmSIF_RTR_CTRL_1_RL_HBM_RST 0x31627C
+
+#define mmSIF_RTR_CTRL_1_RL_HBM_TIMEOUT 0x316280
+
+#define mmSIF_RTR_CTRL_1_SCRAM_HBM_EN 0x316284
+
+#define mmSIF_RTR_CTRL_1_RL_PCI_EN 0x316288
+
+#define mmSIF_RTR_CTRL_1_RL_PCI_SAT 0x31628C
+
+#define mmSIF_RTR_CTRL_1_RL_PCI_RST 0x316290
+
+#define mmSIF_RTR_CTRL_1_RL_PCI_TIMEOUT 0x316294
+
+#define mmSIF_RTR_CTRL_1_RL_SRAM_EN 0x31629C
+
+#define mmSIF_RTR_CTRL_1_RL_SRAM_SAT 0x3162A0
+
+#define mmSIF_RTR_CTRL_1_RL_SRAM_RST 0x3162A4
+
+#define mmSIF_RTR_CTRL_1_RL_SRAM_TIMEOUT 0x3162AC
+
+#define mmSIF_RTR_CTRL_1_RL_SRAM_RED 0x3162B4
+
+#define mmSIF_RTR_CTRL_1_E2E_HBM_EN 0x3162EC
+
+#define mmSIF_RTR_CTRL_1_E2E_PCI_EN 0x3162F0
+
+#define mmSIF_RTR_CTRL_1_E2E_HBM_WR_SIZE 0x3162F4
+
+#define mmSIF_RTR_CTRL_1_E2E_PCI_WR_SIZE 0x3162F8
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET_EN 0x316404
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_SET 0x316408
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_WRAP 0x31640C
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_PCI_CTR_CNT 0x316410
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET_EN 0x316414
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM_CTR_SET 0x316418
+
+#define mmSIF_RTR_CTRL_1_E2E_HBM_RD_SIZE 0x31641C
+
+#define mmSIF_RTR_CTRL_1_E2E_PCI_RD_SIZE 0x316420
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET_EN 0x316424
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_SET 0x316428
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_WRAP 0x31642C
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_PCI_CTR_CNT 0x316430
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET_EN 0x316434
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM_CTR_SET 0x316438
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_SEL_0 0x316450
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_SEL_1 0x316454
+
+#define mmSIF_RTR_CTRL_1_NON_LIN_EN 0x316480
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_0 0x316500
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_1 0x316504
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_2 0x316508
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_3 0x31650C
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_BANK_4 0x316510
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_0 0x316514
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_1 0x316520
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_2 0x316524
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_3 0x316528
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_4 0x31652C
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_5 0x316530
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_6 0x316534
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_7 0x316538
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_8 0x31653C
+
+#define mmSIF_RTR_CTRL_1_NL_SRAM_OFFSET_9 0x316540
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_0 0x316550
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_1 0x316554
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_2 0x316558
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_3 0x31655C
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_4 0x316560
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_5 0x316564
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_6 0x316568
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_7 0x31656C
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_8 0x316570
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_9 0x316574
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_10 0x316578
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_11 0x31657C
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_12 0x316580
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_13 0x316584
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_14 0x316588
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_15 0x31658C
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_16 0x316590
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_17 0x316594
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_18 0x316598
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_0 0x3165E4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_1 0x3165E8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_2 0x3165EC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_3 0x3165F0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_4 0x3165F4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_5 0x3165F8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_6 0x3165FC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_7 0x316600
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_8 0x316604
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_9 0x316608
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_10 0x31660C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_11 0x316610
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_12 0x316614
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_13 0x316618
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_14 0x31661C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AW_15 0x316620
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_0 0x316624
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_1 0x316628
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_2 0x31662C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_3 0x316630
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_4 0x316634
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_5 0x316638
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_6 0x31663C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_7 0x316640
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_8 0x316644
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_9 0x316648
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_10 0x31664C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_11 0x316650
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_12 0x316654
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_13 0x316658
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_14 0x31665C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AW_15 0x316660
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_0 0x316664
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_1 0x316668
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_2 0x31666C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_3 0x316670
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_4 0x316674
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_5 0x316678
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_6 0x31667C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_7 0x316680
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_8 0x316684
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_9 0x316688
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_10 0x31668C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_11 0x316690
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_12 0x316694
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_13 0x316698
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_14 0x31669C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AW_15 0x3166A0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_0 0x3166A4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_1 0x3166A8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_2 0x3166AC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_3 0x3166B0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_4 0x3166B4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_5 0x3166B8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_6 0x3166BC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_7 0x3166C0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_8 0x3166C4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_9 0x3166C8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_10 0x3166CC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_11 0x3166D0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_12 0x3166D4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_13 0x3166D8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_14 0x3166DC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AW_15 0x3166E0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_0 0x3166E4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_1 0x3166E8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_2 0x3166EC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_3 0x3166F0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_4 0x3166F4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_5 0x3166F8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_6 0x3166FC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_7 0x316700
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_8 0x316704
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_9 0x316708
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_10 0x31670C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_11 0x316710
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_12 0x316714
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_13 0x316718
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_14 0x31671C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AW_15 0x316720
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_0 0x316724
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_1 0x316728
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_2 0x31672C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_3 0x316730
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_4 0x316734
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_5 0x316738
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_6 0x31673C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_7 0x316740
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_8 0x316744
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_9 0x316748
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_10 0x31674C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_11 0x316750
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_12 0x316754
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_13 0x316758
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_14 0x31675C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AW_15 0x316760
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_0 0x316764
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_1 0x316768
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_2 0x31676C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_3 0x316770
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_4 0x316774
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_5 0x316778
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_6 0x31677C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_7 0x316780
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_8 0x316784
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_9 0x316788
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_10 0x31678C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_11 0x316790
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_12 0x316794
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_13 0x316798
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_14 0x31679C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AW_15 0x3167A0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_0 0x3167A4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_1 0x3167A8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_2 0x3167AC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_3 0x3167B0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_4 0x3167B4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_5 0x3167B8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_6 0x3167BC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_7 0x3167C0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_8 0x3167C4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_9 0x3167C8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_10 0x3167CC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_11 0x3167D0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_12 0x3167D4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_13 0x3167D8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_14 0x3167DC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AW_15 0x3167E0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_0 0x316824
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_1 0x316828
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_2 0x31682C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_3 0x316830
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_4 0x316834
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_5 0x316838
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_6 0x31683C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_7 0x316840
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_8 0x316844
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_9 0x316848
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_10 0x31684C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_11 0x316850
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_12 0x316854
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_13 0x316858
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_14 0x31685C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_LOW_AR_15 0x316860
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_0 0x316864
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_1 0x316868
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_2 0x31686C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_3 0x316870
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_4 0x316874
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_5 0x316878
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_6 0x31687C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_7 0x316880
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_8 0x316884
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_9 0x316888
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_10 0x31688C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_11 0x316890
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_12 0x316894
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_13 0x316898
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_14 0x31689C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_BASE_HIGH_AR_15 0x3168A0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_0 0x3168A4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_1 0x3168A8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_2 0x3168AC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_3 0x3168B0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_4 0x3168B4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_5 0x3168B8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_6 0x3168BC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_7 0x3168C0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_8 0x3168C4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_9 0x3168C8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_10 0x3168CC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_11 0x3168D0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_12 0x3168D4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_13 0x3168D8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_14 0x3168DC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_LOW_AR_15 0x3168E0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_0 0x3168E4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_1 0x3168E8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_2 0x3168EC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_3 0x3168F0
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_4 0x3168F4
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_5 0x3168F8
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_6 0x3168FC
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_7 0x316900
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_8 0x316904
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_9 0x316908
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_10 0x31690C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_11 0x316910
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_12 0x316914
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_13 0x316918
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_14 0x31691C
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_MASK_HIGH_AR_15 0x316920
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_0 0x316924
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_1 0x316928
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_2 0x31692C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_3 0x316930
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_4 0x316934
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_5 0x316938
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_6 0x31693C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_7 0x316940
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_8 0x316944
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_9 0x316948
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_10 0x31694C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_11 0x316950
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_12 0x316954
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_13 0x316958
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_14 0x31695C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_LOW_AR_15 0x316960
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_0 0x316964
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_1 0x316968
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_2 0x31696C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_3 0x316970
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_4 0x316974
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_5 0x316978
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_6 0x31697C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_7 0x316980
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_8 0x316984
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_9 0x316988
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_10 0x31698C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_11 0x316990
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_12 0x316994
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_13 0x316998
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_14 0x31699C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_BASE_HIGH_AR_15 0x3169A0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_0 0x3169A4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_1 0x3169A8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_2 0x3169AC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_3 0x3169B0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_4 0x3169B4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_5 0x3169B8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_6 0x3169BC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_7 0x3169C0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_8 0x3169C4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_9 0x3169C8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_10 0x3169CC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_11 0x3169D0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_12 0x3169D4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_13 0x3169D8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_14 0x3169DC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_LOW_AR_15 0x3169E0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_0 0x3169E4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_1 0x3169E8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_2 0x3169EC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_3 0x3169F0
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_4 0x3169F4
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_5 0x3169F8
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_6 0x3169FC
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_7 0x316A00
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_8 0x316A04
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_9 0x316A08
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_10 0x316A0C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_11 0x316A10
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_12 0x316A14
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_13 0x316A18
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_14 0x316A1C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_MASK_HIGH_AR_15 0x316A20
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AW 0x316A64
+
+#define mmSIF_RTR_CTRL_1_RANGE_SEC_HIT_AR 0x316A68
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_HIT_AW 0x316A6C
+
+#define mmSIF_RTR_CTRL_1_RANGE_PRIV_HIT_AR 0x316A70
+
+#define mmSIF_RTR_CTRL_1_RGL_CFG 0x316B64
+
+#define mmSIF_RTR_CTRL_1_RGL_SHIFT 0x316B68
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_0 0x316B6C
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_1 0x316B70
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_2 0x316B74
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_3 0x316B78
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_4 0x316B7C
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_5 0x316B80
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_6 0x316B84
+
+#define mmSIF_RTR_CTRL_1_RGL_EXPECTED_LAT_7 0x316B88
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_0 0x316BAC
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_1 0x316BB0
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_2 0x316BB4
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_3 0x316BB8
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_4 0x316BBC
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_5 0x316BC0
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_6 0x316BC4
+
+#define mmSIF_RTR_CTRL_1_RGL_TOKEN_7 0x316BC8
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_0 0x316BEC
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_1 0x316BF0
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_2 0x316BF4
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_3 0x316BF8
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_4 0x316BFC
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_5 0x316C00
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_6 0x316C04
+
+#define mmSIF_RTR_CTRL_1_RGL_BANK_ID_7 0x316C08
+
+#define mmSIF_RTR_CTRL_1_RGL_WDT 0x316C2C
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_WRAP 0x316C30
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_WRAP 0x316C34
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_WRAP 0x316C38
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_WRAP 0x316C3C
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_WRAP 0x316C40
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_WRAP 0x316C44
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_WRAP 0x316C48
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_WRAP 0x316C4C
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH0_CTR_CNT 0x316C50
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM0_CH1_CTR_CNT 0x316C54
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH0_CTR_CNT 0x316C58
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM1_CH1_CTR_CNT 0x316C5C
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH0_CTR_CNT 0x316C60
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM2_CH1_CTR_CNT 0x316C64
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH0_CTR_CNT 0x316C68
+
+#define mmSIF_RTR_CTRL_1_E2E_AR_HBM3_CH1_CTR_CNT 0x316C6C
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_WRAP 0x316C70
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_WRAP 0x316C74
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_WRAP 0x316C78
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_WRAP 0x316C7C
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_WRAP 0x316C80
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_WRAP 0x316C84
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_WRAP 0x316C88
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_WRAP 0x316C8C
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH0_CTR_CNT 0x316C90
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM0_CH1_CTR_CNT 0x316C94
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH0_CTR_CNT 0x316C98
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM1_CH1_CTR_CNT 0x316C9C
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH0_CTR_CNT 0x316CA0
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM2_CH1_CTR_CNT 0x316CA4
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH0_CTR_CNT 0x316CA8
+
+#define mmSIF_RTR_CTRL_1_E2E_AW_HBM3_CH1_CTR_CNT 0x316CAC
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_0 0x316CB0
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_1 0x316CB4
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_2 0x316CB8
+
+#define mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_3 0x316CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_1_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h
new file mode 100644
index 000000000000..330e5b42d679
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_2_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_2_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_2 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_2_PERM_SEL 0x326108
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_0 0x326114
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_1 0x326118
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_2 0x32611C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_3 0x326120
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_4 0x326124
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_5 0x326128
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_6 0x32612C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_7 0x326130
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_8 0x326134
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_9 0x326138
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_10 0x32613C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_11 0x326140
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_12 0x326144
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_13 0x326148
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_14 0x32614C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_15 0x326150
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_16 0x326154
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_17 0x326158
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_18 0x32615C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_19 0x326160
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_20 0x326164
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_21 0x326168
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_22 0x32616C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_23 0x326170
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_24 0x326174
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_25 0x326178
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_26 0x32617C
+
+#define mmSIF_RTR_CTRL_2_HBM_POLY_H3_27 0x326180
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_0 0x326184
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_1 0x326188
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_2 0x32618C
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_3 0x326190
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_4 0x326194
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_5 0x326198
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_6 0x32619C
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_7 0x3261A0
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_8 0x3261A4
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_9 0x3261A8
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_10 0x3261AC
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_11 0x3261B0
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_12 0x3261B4
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_13 0x3261B8
+
+#define mmSIF_RTR_CTRL_2_SRAM_POLY_H3_14 0x3261BC
+
+#define mmSIF_RTR_CTRL_2_SCRAM_SRAM_EN 0x32626C
+
+#define mmSIF_RTR_CTRL_2_RL_HBM_EN 0x326274
+
+#define mmSIF_RTR_CTRL_2_RL_HBM_SAT 0x326278
+
+#define mmSIF_RTR_CTRL_2_RL_HBM_RST 0x32627C
+
+#define mmSIF_RTR_CTRL_2_RL_HBM_TIMEOUT 0x326280
+
+#define mmSIF_RTR_CTRL_2_SCRAM_HBM_EN 0x326284
+
+#define mmSIF_RTR_CTRL_2_RL_PCI_EN 0x326288
+
+#define mmSIF_RTR_CTRL_2_RL_PCI_SAT 0x32628C
+
+#define mmSIF_RTR_CTRL_2_RL_PCI_RST 0x326290
+
+#define mmSIF_RTR_CTRL_2_RL_PCI_TIMEOUT 0x326294
+
+#define mmSIF_RTR_CTRL_2_RL_SRAM_EN 0x32629C
+
+#define mmSIF_RTR_CTRL_2_RL_SRAM_SAT 0x3262A0
+
+#define mmSIF_RTR_CTRL_2_RL_SRAM_RST 0x3262A4
+
+#define mmSIF_RTR_CTRL_2_RL_SRAM_TIMEOUT 0x3262AC
+
+#define mmSIF_RTR_CTRL_2_RL_SRAM_RED 0x3262B4
+
+#define mmSIF_RTR_CTRL_2_E2E_HBM_EN 0x3262EC
+
+#define mmSIF_RTR_CTRL_2_E2E_PCI_EN 0x3262F0
+
+#define mmSIF_RTR_CTRL_2_E2E_HBM_WR_SIZE 0x3262F4
+
+#define mmSIF_RTR_CTRL_2_E2E_PCI_WR_SIZE 0x3262F8
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET_EN 0x326404
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_SET 0x326408
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_WRAP 0x32640C
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_PCI_CTR_CNT 0x326410
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET_EN 0x326414
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM_CTR_SET 0x326418
+
+#define mmSIF_RTR_CTRL_2_E2E_HBM_RD_SIZE 0x32641C
+
+#define mmSIF_RTR_CTRL_2_E2E_PCI_RD_SIZE 0x326420
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET_EN 0x326424
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_SET 0x326428
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_WRAP 0x32642C
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_PCI_CTR_CNT 0x326430
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET_EN 0x326434
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM_CTR_SET 0x326438
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_SEL_0 0x326450
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_SEL_1 0x326454
+
+#define mmSIF_RTR_CTRL_2_NON_LIN_EN 0x326480
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_0 0x326500
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_1 0x326504
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_2 0x326508
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_3 0x32650C
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_BANK_4 0x326510
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_0 0x326514
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_1 0x326520
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_2 0x326524
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_3 0x326528
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_4 0x32652C
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_5 0x326530
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_6 0x326534
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_7 0x326538
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_8 0x32653C
+
+#define mmSIF_RTR_CTRL_2_NL_SRAM_OFFSET_9 0x326540
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_0 0x326550
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_1 0x326554
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_2 0x326558
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_3 0x32655C
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_4 0x326560
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_5 0x326564
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_6 0x326568
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_7 0x32656C
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_8 0x326570
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_9 0x326574
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_10 0x326578
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_11 0x32657C
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_12 0x326580
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_13 0x326584
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_14 0x326588
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_15 0x32658C
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_16 0x326590
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_17 0x326594
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_18 0x326598
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_0 0x3265E4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_1 0x3265E8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_2 0x3265EC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_3 0x3265F0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_4 0x3265F4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_5 0x3265F8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_6 0x3265FC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_7 0x326600
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_8 0x326604
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_9 0x326608
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_10 0x32660C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_11 0x326610
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_12 0x326614
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_13 0x326618
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_14 0x32661C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AW_15 0x326620
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_0 0x326624
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_1 0x326628
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_2 0x32662C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_3 0x326630
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_4 0x326634
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_5 0x326638
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_6 0x32663C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_7 0x326640
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_8 0x326644
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_9 0x326648
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_10 0x32664C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_11 0x326650
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_12 0x326654
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_13 0x326658
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_14 0x32665C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AW_15 0x326660
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_0 0x326664
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_1 0x326668
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_2 0x32666C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_3 0x326670
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_4 0x326674
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_5 0x326678
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_6 0x32667C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_7 0x326680
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_8 0x326684
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_9 0x326688
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_10 0x32668C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_11 0x326690
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_12 0x326694
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_13 0x326698
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_14 0x32669C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AW_15 0x3266A0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_0 0x3266A4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_1 0x3266A8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_2 0x3266AC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_3 0x3266B0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_4 0x3266B4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_5 0x3266B8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_6 0x3266BC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_7 0x3266C0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_8 0x3266C4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_9 0x3266C8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_10 0x3266CC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_11 0x3266D0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_12 0x3266D4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_13 0x3266D8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_14 0x3266DC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AW_15 0x3266E0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_0 0x3266E4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_1 0x3266E8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_2 0x3266EC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_3 0x3266F0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_4 0x3266F4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_5 0x3266F8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_6 0x3266FC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_7 0x326700
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_8 0x326704
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_9 0x326708
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_10 0x32670C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_11 0x326710
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_12 0x326714
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_13 0x326718
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_14 0x32671C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AW_15 0x326720
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_0 0x326724
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_1 0x326728
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_2 0x32672C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_3 0x326730
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_4 0x326734
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_5 0x326738
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_6 0x32673C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_7 0x326740
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_8 0x326744
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_9 0x326748
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_10 0x32674C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_11 0x326750
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_12 0x326754
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_13 0x326758
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_14 0x32675C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AW_15 0x326760
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_0 0x326764
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_1 0x326768
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_2 0x32676C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_3 0x326770
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_4 0x326774
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_5 0x326778
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_6 0x32677C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_7 0x326780
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_8 0x326784
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_9 0x326788
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_10 0x32678C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_11 0x326790
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_12 0x326794
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_13 0x326798
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_14 0x32679C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AW_15 0x3267A0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_0 0x3267A4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_1 0x3267A8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_2 0x3267AC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_3 0x3267B0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_4 0x3267B4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_5 0x3267B8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_6 0x3267BC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_7 0x3267C0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_8 0x3267C4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_9 0x3267C8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_10 0x3267CC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_11 0x3267D0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_12 0x3267D4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_13 0x3267D8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_14 0x3267DC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AW_15 0x3267E0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_0 0x326824
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_1 0x326828
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_2 0x32682C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_3 0x326830
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_4 0x326834
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_5 0x326838
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_6 0x32683C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_7 0x326840
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_8 0x326844
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_9 0x326848
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_10 0x32684C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_11 0x326850
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_12 0x326854
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_13 0x326858
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_14 0x32685C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_LOW_AR_15 0x326860
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_0 0x326864
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_1 0x326868
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_2 0x32686C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_3 0x326870
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_4 0x326874
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_5 0x326878
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_6 0x32687C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_7 0x326880
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_8 0x326884
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_9 0x326888
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_10 0x32688C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_11 0x326890
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_12 0x326894
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_13 0x326898
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_14 0x32689C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_BASE_HIGH_AR_15 0x3268A0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_0 0x3268A4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_1 0x3268A8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_2 0x3268AC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_3 0x3268B0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_4 0x3268B4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_5 0x3268B8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_6 0x3268BC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_7 0x3268C0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_8 0x3268C4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_9 0x3268C8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_10 0x3268CC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_11 0x3268D0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_12 0x3268D4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_13 0x3268D8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_14 0x3268DC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_LOW_AR_15 0x3268E0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_0 0x3268E4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_1 0x3268E8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_2 0x3268EC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_3 0x3268F0
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_4 0x3268F4
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_5 0x3268F8
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_6 0x3268FC
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_7 0x326900
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_8 0x326904
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_9 0x326908
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_10 0x32690C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_11 0x326910
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_12 0x326914
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_13 0x326918
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_14 0x32691C
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_MASK_HIGH_AR_15 0x326920
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_0 0x326924
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_1 0x326928
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_2 0x32692C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_3 0x326930
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_4 0x326934
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_5 0x326938
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_6 0x32693C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_7 0x326940
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_8 0x326944
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_9 0x326948
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_10 0x32694C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_11 0x326950
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_12 0x326954
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_13 0x326958
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_14 0x32695C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_LOW_AR_15 0x326960
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_0 0x326964
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_1 0x326968
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_2 0x32696C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_3 0x326970
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_4 0x326974
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_5 0x326978
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_6 0x32697C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_7 0x326980
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_8 0x326984
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_9 0x326988
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_10 0x32698C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_11 0x326990
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_12 0x326994
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_13 0x326998
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_14 0x32699C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_BASE_HIGH_AR_15 0x3269A0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_0 0x3269A4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_1 0x3269A8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_2 0x3269AC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_3 0x3269B0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_4 0x3269B4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_5 0x3269B8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_6 0x3269BC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_7 0x3269C0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_8 0x3269C4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_9 0x3269C8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_10 0x3269CC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_11 0x3269D0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_12 0x3269D4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_13 0x3269D8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_14 0x3269DC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_LOW_AR_15 0x3269E0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_0 0x3269E4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_1 0x3269E8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_2 0x3269EC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_3 0x3269F0
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_4 0x3269F4
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_5 0x3269F8
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_6 0x3269FC
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_7 0x326A00
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_8 0x326A04
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_9 0x326A08
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_10 0x326A0C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_11 0x326A10
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_12 0x326A14
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_13 0x326A18
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_14 0x326A1C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_MASK_HIGH_AR_15 0x326A20
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AW 0x326A64
+
+#define mmSIF_RTR_CTRL_2_RANGE_SEC_HIT_AR 0x326A68
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_HIT_AW 0x326A6C
+
+#define mmSIF_RTR_CTRL_2_RANGE_PRIV_HIT_AR 0x326A70
+
+#define mmSIF_RTR_CTRL_2_RGL_CFG 0x326B64
+
+#define mmSIF_RTR_CTRL_2_RGL_SHIFT 0x326B68
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_0 0x326B6C
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_1 0x326B70
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_2 0x326B74
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_3 0x326B78
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_4 0x326B7C
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_5 0x326B80
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_6 0x326B84
+
+#define mmSIF_RTR_CTRL_2_RGL_EXPECTED_LAT_7 0x326B88
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_0 0x326BAC
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_1 0x326BB0
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_2 0x326BB4
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_3 0x326BB8
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_4 0x326BBC
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_5 0x326BC0
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_6 0x326BC4
+
+#define mmSIF_RTR_CTRL_2_RGL_TOKEN_7 0x326BC8
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_0 0x326BEC
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_1 0x326BF0
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_2 0x326BF4
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_3 0x326BF8
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_4 0x326BFC
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_5 0x326C00
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_6 0x326C04
+
+#define mmSIF_RTR_CTRL_2_RGL_BANK_ID_7 0x326C08
+
+#define mmSIF_RTR_CTRL_2_RGL_WDT 0x326C2C
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_WRAP 0x326C30
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_WRAP 0x326C34
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_WRAP 0x326C38
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_WRAP 0x326C3C
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_WRAP 0x326C40
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_WRAP 0x326C44
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_WRAP 0x326C48
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_WRAP 0x326C4C
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH0_CTR_CNT 0x326C50
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM0_CH1_CTR_CNT 0x326C54
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH0_CTR_CNT 0x326C58
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM1_CH1_CTR_CNT 0x326C5C
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH0_CTR_CNT 0x326C60
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM2_CH1_CTR_CNT 0x326C64
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH0_CTR_CNT 0x326C68
+
+#define mmSIF_RTR_CTRL_2_E2E_AR_HBM3_CH1_CTR_CNT 0x326C6C
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_WRAP 0x326C70
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_WRAP 0x326C74
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_WRAP 0x326C78
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_WRAP 0x326C7C
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_WRAP 0x326C80
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_WRAP 0x326C84
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_WRAP 0x326C88
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_WRAP 0x326C8C
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH0_CTR_CNT 0x326C90
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM0_CH1_CTR_CNT 0x326C94
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH0_CTR_CNT 0x326C98
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM1_CH1_CTR_CNT 0x326C9C
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH0_CTR_CNT 0x326CA0
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM2_CH1_CTR_CNT 0x326CA4
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH0_CTR_CNT 0x326CA8
+
+#define mmSIF_RTR_CTRL_2_E2E_AW_HBM3_CH1_CTR_CNT 0x326CAC
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_0 0x326CB0
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_1 0x326CB4
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_2 0x326CB8
+
+#define mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_3 0x326CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_2_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h
new file mode 100644
index 000000000000..d749f1968e5e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_3_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_3_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_3 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_3_PERM_SEL 0x336108
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_0 0x336114
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_1 0x336118
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_2 0x33611C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_3 0x336120
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_4 0x336124
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_5 0x336128
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_6 0x33612C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_7 0x336130
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_8 0x336134
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_9 0x336138
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_10 0x33613C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_11 0x336140
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_12 0x336144
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_13 0x336148
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_14 0x33614C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_15 0x336150
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_16 0x336154
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_17 0x336158
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_18 0x33615C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_19 0x336160
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_20 0x336164
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_21 0x336168
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_22 0x33616C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_23 0x336170
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_24 0x336174
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_25 0x336178
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_26 0x33617C
+
+#define mmSIF_RTR_CTRL_3_HBM_POLY_H3_27 0x336180
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_0 0x336184
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_1 0x336188
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_2 0x33618C
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_3 0x336190
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_4 0x336194
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_5 0x336198
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_6 0x33619C
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_7 0x3361A0
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_8 0x3361A4
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_9 0x3361A8
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_10 0x3361AC
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_11 0x3361B0
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_12 0x3361B4
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_13 0x3361B8
+
+#define mmSIF_RTR_CTRL_3_SRAM_POLY_H3_14 0x3361BC
+
+#define mmSIF_RTR_CTRL_3_SCRAM_SRAM_EN 0x33626C
+
+#define mmSIF_RTR_CTRL_3_RL_HBM_EN 0x336274
+
+#define mmSIF_RTR_CTRL_3_RL_HBM_SAT 0x336278
+
+#define mmSIF_RTR_CTRL_3_RL_HBM_RST 0x33627C
+
+#define mmSIF_RTR_CTRL_3_RL_HBM_TIMEOUT 0x336280
+
+#define mmSIF_RTR_CTRL_3_SCRAM_HBM_EN 0x336284
+
+#define mmSIF_RTR_CTRL_3_RL_PCI_EN 0x336288
+
+#define mmSIF_RTR_CTRL_3_RL_PCI_SAT 0x33628C
+
+#define mmSIF_RTR_CTRL_3_RL_PCI_RST 0x336290
+
+#define mmSIF_RTR_CTRL_3_RL_PCI_TIMEOUT 0x336294
+
+#define mmSIF_RTR_CTRL_3_RL_SRAM_EN 0x33629C
+
+#define mmSIF_RTR_CTRL_3_RL_SRAM_SAT 0x3362A0
+
+#define mmSIF_RTR_CTRL_3_RL_SRAM_RST 0x3362A4
+
+#define mmSIF_RTR_CTRL_3_RL_SRAM_TIMEOUT 0x3362AC
+
+#define mmSIF_RTR_CTRL_3_RL_SRAM_RED 0x3362B4
+
+#define mmSIF_RTR_CTRL_3_E2E_HBM_EN 0x3362EC
+
+#define mmSIF_RTR_CTRL_3_E2E_PCI_EN 0x3362F0
+
+#define mmSIF_RTR_CTRL_3_E2E_HBM_WR_SIZE 0x3362F4
+
+#define mmSIF_RTR_CTRL_3_E2E_PCI_WR_SIZE 0x3362F8
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET_EN 0x336404
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_SET 0x336408
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_WRAP 0x33640C
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_PCI_CTR_CNT 0x336410
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET_EN 0x336414
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM_CTR_SET 0x336418
+
+#define mmSIF_RTR_CTRL_3_E2E_HBM_RD_SIZE 0x33641C
+
+#define mmSIF_RTR_CTRL_3_E2E_PCI_RD_SIZE 0x336420
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET_EN 0x336424
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_SET 0x336428
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_WRAP 0x33642C
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_PCI_CTR_CNT 0x336430
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET_EN 0x336434
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM_CTR_SET 0x336438
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_SEL_0 0x336450
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_SEL_1 0x336454
+
+#define mmSIF_RTR_CTRL_3_NON_LIN_EN 0x336480
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_0 0x336500
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_1 0x336504
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_2 0x336508
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_3 0x33650C
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_BANK_4 0x336510
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_0 0x336514
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_1 0x336520
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_2 0x336524
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_3 0x336528
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_4 0x33652C
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_5 0x336530
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_6 0x336534
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_7 0x336538
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_8 0x33653C
+
+#define mmSIF_RTR_CTRL_3_NL_SRAM_OFFSET_9 0x336540
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_0 0x336550
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_1 0x336554
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_2 0x336558
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_3 0x33655C
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_4 0x336560
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_5 0x336564
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_6 0x336568
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_7 0x33656C
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_8 0x336570
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_9 0x336574
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_10 0x336578
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_11 0x33657C
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_12 0x336580
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_13 0x336584
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_14 0x336588
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_15 0x33658C
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_16 0x336590
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_17 0x336594
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_18 0x336598
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_0 0x3365E4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_1 0x3365E8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_2 0x3365EC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_3 0x3365F0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_4 0x3365F4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_5 0x3365F8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_6 0x3365FC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_7 0x336600
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_8 0x336604
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_9 0x336608
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_10 0x33660C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_11 0x336610
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_12 0x336614
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_13 0x336618
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_14 0x33661C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AW_15 0x336620
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_0 0x336624
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_1 0x336628
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_2 0x33662C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_3 0x336630
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_4 0x336634
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_5 0x336638
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_6 0x33663C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_7 0x336640
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_8 0x336644
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_9 0x336648
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_10 0x33664C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_11 0x336650
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_12 0x336654
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_13 0x336658
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_14 0x33665C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AW_15 0x336660
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_0 0x336664
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_1 0x336668
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_2 0x33666C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_3 0x336670
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_4 0x336674
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_5 0x336678
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_6 0x33667C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_7 0x336680
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_8 0x336684
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_9 0x336688
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_10 0x33668C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_11 0x336690
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_12 0x336694
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_13 0x336698
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_14 0x33669C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AW_15 0x3366A0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_0 0x3366A4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_1 0x3366A8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_2 0x3366AC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_3 0x3366B0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_4 0x3366B4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_5 0x3366B8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_6 0x3366BC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_7 0x3366C0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_8 0x3366C4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_9 0x3366C8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_10 0x3366CC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_11 0x3366D0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_12 0x3366D4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_13 0x3366D8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_14 0x3366DC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AW_15 0x3366E0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_0 0x3366E4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_1 0x3366E8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_2 0x3366EC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_3 0x3366F0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_4 0x3366F4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_5 0x3366F8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_6 0x3366FC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_7 0x336700
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_8 0x336704
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_9 0x336708
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_10 0x33670C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_11 0x336710
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_12 0x336714
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_13 0x336718
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_14 0x33671C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AW_15 0x336720
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_0 0x336724
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_1 0x336728
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_2 0x33672C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_3 0x336730
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_4 0x336734
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_5 0x336738
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_6 0x33673C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_7 0x336740
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_8 0x336744
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_9 0x336748
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_10 0x33674C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_11 0x336750
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_12 0x336754
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_13 0x336758
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_14 0x33675C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AW_15 0x336760
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_0 0x336764
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_1 0x336768
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_2 0x33676C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_3 0x336770
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_4 0x336774
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_5 0x336778
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_6 0x33677C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_7 0x336780
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_8 0x336784
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_9 0x336788
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_10 0x33678C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_11 0x336790
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_12 0x336794
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_13 0x336798
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_14 0x33679C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AW_15 0x3367A0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_0 0x3367A4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_1 0x3367A8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_2 0x3367AC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_3 0x3367B0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_4 0x3367B4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_5 0x3367B8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_6 0x3367BC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_7 0x3367C0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_8 0x3367C4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_9 0x3367C8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_10 0x3367CC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_11 0x3367D0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_12 0x3367D4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_13 0x3367D8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_14 0x3367DC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AW_15 0x3367E0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_0 0x336824
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_1 0x336828
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_2 0x33682C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_3 0x336830
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_4 0x336834
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_5 0x336838
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_6 0x33683C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_7 0x336840
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_8 0x336844
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_9 0x336848
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_10 0x33684C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_11 0x336850
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_12 0x336854
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_13 0x336858
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_14 0x33685C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_LOW_AR_15 0x336860
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_0 0x336864
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_1 0x336868
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_2 0x33686C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_3 0x336870
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_4 0x336874
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_5 0x336878
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_6 0x33687C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_7 0x336880
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_8 0x336884
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_9 0x336888
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_10 0x33688C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_11 0x336890
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_12 0x336894
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_13 0x336898
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_14 0x33689C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_BASE_HIGH_AR_15 0x3368A0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_0 0x3368A4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_1 0x3368A8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_2 0x3368AC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_3 0x3368B0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_4 0x3368B4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_5 0x3368B8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_6 0x3368BC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_7 0x3368C0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_8 0x3368C4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_9 0x3368C8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_10 0x3368CC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_11 0x3368D0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_12 0x3368D4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_13 0x3368D8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_14 0x3368DC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_LOW_AR_15 0x3368E0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_0 0x3368E4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_1 0x3368E8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_2 0x3368EC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_3 0x3368F0
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_4 0x3368F4
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_5 0x3368F8
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_6 0x3368FC
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_7 0x336900
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_8 0x336904
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_9 0x336908
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_10 0x33690C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_11 0x336910
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_12 0x336914
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_13 0x336918
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_14 0x33691C
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_MASK_HIGH_AR_15 0x336920
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_0 0x336924
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_1 0x336928
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_2 0x33692C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_3 0x336930
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_4 0x336934
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_5 0x336938
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_6 0x33693C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_7 0x336940
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_8 0x336944
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_9 0x336948
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_10 0x33694C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_11 0x336950
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_12 0x336954
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_13 0x336958
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_14 0x33695C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_LOW_AR_15 0x336960
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_0 0x336964
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_1 0x336968
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_2 0x33696C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_3 0x336970
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_4 0x336974
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_5 0x336978
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_6 0x33697C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_7 0x336980
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_8 0x336984
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_9 0x336988
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_10 0x33698C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_11 0x336990
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_12 0x336994
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_13 0x336998
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_14 0x33699C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_BASE_HIGH_AR_15 0x3369A0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_0 0x3369A4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_1 0x3369A8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_2 0x3369AC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_3 0x3369B0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_4 0x3369B4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_5 0x3369B8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_6 0x3369BC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_7 0x3369C0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_8 0x3369C4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_9 0x3369C8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_10 0x3369CC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_11 0x3369D0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_12 0x3369D4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_13 0x3369D8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_14 0x3369DC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_LOW_AR_15 0x3369E0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_0 0x3369E4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_1 0x3369E8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_2 0x3369EC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_3 0x3369F0
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_4 0x3369F4
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_5 0x3369F8
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_6 0x3369FC
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_7 0x336A00
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_8 0x336A04
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_9 0x336A08
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_10 0x336A0C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_11 0x336A10
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_12 0x336A14
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_13 0x336A18
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_14 0x336A1C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_MASK_HIGH_AR_15 0x336A20
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AW 0x336A64
+
+#define mmSIF_RTR_CTRL_3_RANGE_SEC_HIT_AR 0x336A68
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_HIT_AW 0x336A6C
+
+#define mmSIF_RTR_CTRL_3_RANGE_PRIV_HIT_AR 0x336A70
+
+#define mmSIF_RTR_CTRL_3_RGL_CFG 0x336B64
+
+#define mmSIF_RTR_CTRL_3_RGL_SHIFT 0x336B68
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_0 0x336B6C
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_1 0x336B70
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_2 0x336B74
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_3 0x336B78
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_4 0x336B7C
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_5 0x336B80
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_6 0x336B84
+
+#define mmSIF_RTR_CTRL_3_RGL_EXPECTED_LAT_7 0x336B88
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_0 0x336BAC
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_1 0x336BB0
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_2 0x336BB4
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_3 0x336BB8
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_4 0x336BBC
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_5 0x336BC0
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_6 0x336BC4
+
+#define mmSIF_RTR_CTRL_3_RGL_TOKEN_7 0x336BC8
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_0 0x336BEC
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_1 0x336BF0
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_2 0x336BF4
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_3 0x336BF8
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_4 0x336BFC
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_5 0x336C00
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_6 0x336C04
+
+#define mmSIF_RTR_CTRL_3_RGL_BANK_ID_7 0x336C08
+
+#define mmSIF_RTR_CTRL_3_RGL_WDT 0x336C2C
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_WRAP 0x336C30
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_WRAP 0x336C34
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_WRAP 0x336C38
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_WRAP 0x336C3C
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_WRAP 0x336C40
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_WRAP 0x336C44
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_WRAP 0x336C48
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_WRAP 0x336C4C
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH0_CTR_CNT 0x336C50
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM0_CH1_CTR_CNT 0x336C54
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH0_CTR_CNT 0x336C58
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM1_CH1_CTR_CNT 0x336C5C
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH0_CTR_CNT 0x336C60
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM2_CH1_CTR_CNT 0x336C64
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH0_CTR_CNT 0x336C68
+
+#define mmSIF_RTR_CTRL_3_E2E_AR_HBM3_CH1_CTR_CNT 0x336C6C
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_WRAP 0x336C70
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_WRAP 0x336C74
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_WRAP 0x336C78
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_WRAP 0x336C7C
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_WRAP 0x336C80
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_WRAP 0x336C84
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_WRAP 0x336C88
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_WRAP 0x336C8C
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH0_CTR_CNT 0x336C90
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM0_CH1_CTR_CNT 0x336C94
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH0_CTR_CNT 0x336C98
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM1_CH1_CTR_CNT 0x336C9C
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH0_CTR_CNT 0x336CA0
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM2_CH1_CTR_CNT 0x336CA4
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH0_CTR_CNT 0x336CA8
+
+#define mmSIF_RTR_CTRL_3_E2E_AW_HBM3_CH1_CTR_CNT 0x336CAC
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_0 0x336CB0
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_1 0x336CB4
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_2 0x336CB8
+
+#define mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_3 0x336CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_3_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h
new file mode 100644
index 000000000000..ad48773c4bbd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_4_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_4_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_4 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_4_PERM_SEL 0x346108
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_0 0x346114
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_1 0x346118
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_2 0x34611C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_3 0x346120
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_4 0x346124
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_5 0x346128
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_6 0x34612C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_7 0x346130
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_8 0x346134
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_9 0x346138
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_10 0x34613C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_11 0x346140
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_12 0x346144
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_13 0x346148
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_14 0x34614C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_15 0x346150
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_16 0x346154
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_17 0x346158
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_18 0x34615C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_19 0x346160
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_20 0x346164
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_21 0x346168
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_22 0x34616C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_23 0x346170
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_24 0x346174
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_25 0x346178
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_26 0x34617C
+
+#define mmSIF_RTR_CTRL_4_HBM_POLY_H3_27 0x346180
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_0 0x346184
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_1 0x346188
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_2 0x34618C
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_3 0x346190
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_4 0x346194
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_5 0x346198
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_6 0x34619C
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_7 0x3461A0
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_8 0x3461A4
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_9 0x3461A8
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_10 0x3461AC
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_11 0x3461B0
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_12 0x3461B4
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_13 0x3461B8
+
+#define mmSIF_RTR_CTRL_4_SRAM_POLY_H3_14 0x3461BC
+
+#define mmSIF_RTR_CTRL_4_SCRAM_SRAM_EN 0x34626C
+
+#define mmSIF_RTR_CTRL_4_RL_HBM_EN 0x346274
+
+#define mmSIF_RTR_CTRL_4_RL_HBM_SAT 0x346278
+
+#define mmSIF_RTR_CTRL_4_RL_HBM_RST 0x34627C
+
+#define mmSIF_RTR_CTRL_4_RL_HBM_TIMEOUT 0x346280
+
+#define mmSIF_RTR_CTRL_4_SCRAM_HBM_EN 0x346284
+
+#define mmSIF_RTR_CTRL_4_RL_PCI_EN 0x346288
+
+#define mmSIF_RTR_CTRL_4_RL_PCI_SAT 0x34628C
+
+#define mmSIF_RTR_CTRL_4_RL_PCI_RST 0x346290
+
+#define mmSIF_RTR_CTRL_4_RL_PCI_TIMEOUT 0x346294
+
+#define mmSIF_RTR_CTRL_4_RL_SRAM_EN 0x34629C
+
+#define mmSIF_RTR_CTRL_4_RL_SRAM_SAT 0x3462A0
+
+#define mmSIF_RTR_CTRL_4_RL_SRAM_RST 0x3462A4
+
+#define mmSIF_RTR_CTRL_4_RL_SRAM_TIMEOUT 0x3462AC
+
+#define mmSIF_RTR_CTRL_4_RL_SRAM_RED 0x3462B4
+
+#define mmSIF_RTR_CTRL_4_E2E_HBM_EN 0x3462EC
+
+#define mmSIF_RTR_CTRL_4_E2E_PCI_EN 0x3462F0
+
+#define mmSIF_RTR_CTRL_4_E2E_HBM_WR_SIZE 0x3462F4
+
+#define mmSIF_RTR_CTRL_4_E2E_PCI_WR_SIZE 0x3462F8
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET_EN 0x346404
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_SET 0x346408
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_WRAP 0x34640C
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_PCI_CTR_CNT 0x346410
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET_EN 0x346414
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM_CTR_SET 0x346418
+
+#define mmSIF_RTR_CTRL_4_E2E_HBM_RD_SIZE 0x34641C
+
+#define mmSIF_RTR_CTRL_4_E2E_PCI_RD_SIZE 0x346420
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET_EN 0x346424
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_SET 0x346428
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_WRAP 0x34642C
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_PCI_CTR_CNT 0x346430
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET_EN 0x346434
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM_CTR_SET 0x346438
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_SEL_0 0x346450
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_SEL_1 0x346454
+
+#define mmSIF_RTR_CTRL_4_NON_LIN_EN 0x346480
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_0 0x346500
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_1 0x346504
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_2 0x346508
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_3 0x34650C
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_BANK_4 0x346510
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_0 0x346514
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_1 0x346520
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_2 0x346524
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_3 0x346528
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_4 0x34652C
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_5 0x346530
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_6 0x346534
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_7 0x346538
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_8 0x34653C
+
+#define mmSIF_RTR_CTRL_4_NL_SRAM_OFFSET_9 0x346540
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_0 0x346550
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_1 0x346554
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_2 0x346558
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_3 0x34655C
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_4 0x346560
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_5 0x346564
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_6 0x346568
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_7 0x34656C
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_8 0x346570
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_9 0x346574
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_10 0x346578
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_11 0x34657C
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_12 0x346580
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_13 0x346584
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_14 0x346588
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_15 0x34658C
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_16 0x346590
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_17 0x346594
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_18 0x346598
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_0 0x3465E4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_1 0x3465E8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_2 0x3465EC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_3 0x3465F0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_4 0x3465F4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_5 0x3465F8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_6 0x3465FC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_7 0x346600
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_8 0x346604
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_9 0x346608
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_10 0x34660C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_11 0x346610
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_12 0x346614
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_13 0x346618
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_14 0x34661C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AW_15 0x346620
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_0 0x346624
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_1 0x346628
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_2 0x34662C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_3 0x346630
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_4 0x346634
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_5 0x346638
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_6 0x34663C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_7 0x346640
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_8 0x346644
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_9 0x346648
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_10 0x34664C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_11 0x346650
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_12 0x346654
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_13 0x346658
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_14 0x34665C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AW_15 0x346660
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_0 0x346664
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_1 0x346668
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_2 0x34666C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_3 0x346670
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_4 0x346674
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_5 0x346678
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_6 0x34667C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_7 0x346680
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_8 0x346684
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_9 0x346688
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_10 0x34668C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_11 0x346690
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_12 0x346694
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_13 0x346698
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_14 0x34669C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AW_15 0x3466A0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_0 0x3466A4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_1 0x3466A8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_2 0x3466AC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_3 0x3466B0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_4 0x3466B4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_5 0x3466B8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_6 0x3466BC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_7 0x3466C0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_8 0x3466C4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_9 0x3466C8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_10 0x3466CC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_11 0x3466D0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_12 0x3466D4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_13 0x3466D8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_14 0x3466DC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AW_15 0x3466E0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_0 0x3466E4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_1 0x3466E8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_2 0x3466EC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_3 0x3466F0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_4 0x3466F4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_5 0x3466F8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_6 0x3466FC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_7 0x346700
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_8 0x346704
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_9 0x346708
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_10 0x34670C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_11 0x346710
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_12 0x346714
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_13 0x346718
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_14 0x34671C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AW_15 0x346720
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_0 0x346724
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_1 0x346728
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_2 0x34672C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_3 0x346730
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_4 0x346734
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_5 0x346738
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_6 0x34673C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_7 0x346740
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_8 0x346744
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_9 0x346748
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_10 0x34674C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_11 0x346750
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_12 0x346754
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_13 0x346758
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_14 0x34675C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AW_15 0x346760
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_0 0x346764
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_1 0x346768
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_2 0x34676C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_3 0x346770
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_4 0x346774
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_5 0x346778
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_6 0x34677C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_7 0x346780
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_8 0x346784
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_9 0x346788
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_10 0x34678C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_11 0x346790
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_12 0x346794
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_13 0x346798
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_14 0x34679C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AW_15 0x3467A0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_0 0x3467A4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_1 0x3467A8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_2 0x3467AC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_3 0x3467B0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_4 0x3467B4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_5 0x3467B8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_6 0x3467BC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_7 0x3467C0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_8 0x3467C4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_9 0x3467C8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_10 0x3467CC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_11 0x3467D0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_12 0x3467D4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_13 0x3467D8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_14 0x3467DC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AW_15 0x3467E0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_0 0x346824
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_1 0x346828
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_2 0x34682C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_3 0x346830
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_4 0x346834
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_5 0x346838
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_6 0x34683C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_7 0x346840
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_8 0x346844
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_9 0x346848
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_10 0x34684C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_11 0x346850
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_12 0x346854
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_13 0x346858
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_14 0x34685C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_LOW_AR_15 0x346860
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_0 0x346864
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_1 0x346868
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_2 0x34686C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_3 0x346870
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_4 0x346874
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_5 0x346878
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_6 0x34687C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_7 0x346880
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_8 0x346884
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_9 0x346888
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_10 0x34688C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_11 0x346890
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_12 0x346894
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_13 0x346898
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_14 0x34689C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_BASE_HIGH_AR_15 0x3468A0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_0 0x3468A4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_1 0x3468A8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_2 0x3468AC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_3 0x3468B0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_4 0x3468B4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_5 0x3468B8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_6 0x3468BC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_7 0x3468C0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_8 0x3468C4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_9 0x3468C8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_10 0x3468CC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_11 0x3468D0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_12 0x3468D4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_13 0x3468D8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_14 0x3468DC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_LOW_AR_15 0x3468E0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_0 0x3468E4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_1 0x3468E8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_2 0x3468EC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_3 0x3468F0
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_4 0x3468F4
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_5 0x3468F8
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_6 0x3468FC
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_7 0x346900
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_8 0x346904
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_9 0x346908
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_10 0x34690C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_11 0x346910
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_12 0x346914
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_13 0x346918
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_14 0x34691C
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_MASK_HIGH_AR_15 0x346920
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_0 0x346924
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_1 0x346928
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_2 0x34692C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_3 0x346930
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_4 0x346934
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_5 0x346938
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_6 0x34693C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_7 0x346940
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_8 0x346944
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_9 0x346948
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_10 0x34694C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_11 0x346950
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_12 0x346954
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_13 0x346958
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_14 0x34695C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_LOW_AR_15 0x346960
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_0 0x346964
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_1 0x346968
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_2 0x34696C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_3 0x346970
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_4 0x346974
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_5 0x346978
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_6 0x34697C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_7 0x346980
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_8 0x346984
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_9 0x346988
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_10 0x34698C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_11 0x346990
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_12 0x346994
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_13 0x346998
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_14 0x34699C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_BASE_HIGH_AR_15 0x3469A0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_0 0x3469A4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_1 0x3469A8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_2 0x3469AC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_3 0x3469B0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_4 0x3469B4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_5 0x3469B8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_6 0x3469BC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_7 0x3469C0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_8 0x3469C4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_9 0x3469C8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_10 0x3469CC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_11 0x3469D0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_12 0x3469D4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_13 0x3469D8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_14 0x3469DC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_LOW_AR_15 0x3469E0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_0 0x3469E4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_1 0x3469E8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_2 0x3469EC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_3 0x3469F0
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_4 0x3469F4
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_5 0x3469F8
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_6 0x3469FC
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_7 0x346A00
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_8 0x346A04
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_9 0x346A08
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_10 0x346A0C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_11 0x346A10
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_12 0x346A14
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_13 0x346A18
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_14 0x346A1C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_MASK_HIGH_AR_15 0x346A20
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AW 0x346A64
+
+#define mmSIF_RTR_CTRL_4_RANGE_SEC_HIT_AR 0x346A68
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_HIT_AW 0x346A6C
+
+#define mmSIF_RTR_CTRL_4_RANGE_PRIV_HIT_AR 0x346A70
+
+#define mmSIF_RTR_CTRL_4_RGL_CFG 0x346B64
+
+#define mmSIF_RTR_CTRL_4_RGL_SHIFT 0x346B68
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_0 0x346B6C
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_1 0x346B70
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_2 0x346B74
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_3 0x346B78
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_4 0x346B7C
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_5 0x346B80
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_6 0x346B84
+
+#define mmSIF_RTR_CTRL_4_RGL_EXPECTED_LAT_7 0x346B88
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_0 0x346BAC
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_1 0x346BB0
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_2 0x346BB4
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_3 0x346BB8
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_4 0x346BBC
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_5 0x346BC0
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_6 0x346BC4
+
+#define mmSIF_RTR_CTRL_4_RGL_TOKEN_7 0x346BC8
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_0 0x346BEC
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_1 0x346BF0
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_2 0x346BF4
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_3 0x346BF8
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_4 0x346BFC
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_5 0x346C00
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_6 0x346C04
+
+#define mmSIF_RTR_CTRL_4_RGL_BANK_ID_7 0x346C08
+
+#define mmSIF_RTR_CTRL_4_RGL_WDT 0x346C2C
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_WRAP 0x346C30
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_WRAP 0x346C34
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_WRAP 0x346C38
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_WRAP 0x346C3C
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_WRAP 0x346C40
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_WRAP 0x346C44
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_WRAP 0x346C48
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_WRAP 0x346C4C
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH0_CTR_CNT 0x346C50
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM0_CH1_CTR_CNT 0x346C54
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH0_CTR_CNT 0x346C58
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM1_CH1_CTR_CNT 0x346C5C
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH0_CTR_CNT 0x346C60
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM2_CH1_CTR_CNT 0x346C64
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH0_CTR_CNT 0x346C68
+
+#define mmSIF_RTR_CTRL_4_E2E_AR_HBM3_CH1_CTR_CNT 0x346C6C
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_WRAP 0x346C70
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_WRAP 0x346C74
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_WRAP 0x346C78
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_WRAP 0x346C7C
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_WRAP 0x346C80
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_WRAP 0x346C84
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_WRAP 0x346C88
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_WRAP 0x346C8C
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH0_CTR_CNT 0x346C90
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM0_CH1_CTR_CNT 0x346C94
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH0_CTR_CNT 0x346C98
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM1_CH1_CTR_CNT 0x346C9C
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH0_CTR_CNT 0x346CA0
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM2_CH1_CTR_CNT 0x346CA4
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH0_CTR_CNT 0x346CA8
+
+#define mmSIF_RTR_CTRL_4_E2E_AW_HBM3_CH1_CTR_CNT 0x346CAC
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_0 0x346CB0
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_1 0x346CB4
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_2 0x346CB8
+
+#define mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_3 0x346CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_4_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h
new file mode 100644
index 000000000000..6c27850ca3f5
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_5_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_5_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_5 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_5_PERM_SEL 0x356108
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_0 0x356114
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_1 0x356118
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_2 0x35611C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_3 0x356120
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_4 0x356124
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_5 0x356128
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_6 0x35612C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_7 0x356130
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_8 0x356134
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_9 0x356138
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_10 0x35613C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_11 0x356140
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_12 0x356144
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_13 0x356148
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_14 0x35614C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_15 0x356150
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_16 0x356154
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_17 0x356158
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_18 0x35615C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_19 0x356160
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_20 0x356164
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_21 0x356168
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_22 0x35616C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_23 0x356170
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_24 0x356174
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_25 0x356178
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_26 0x35617C
+
+#define mmSIF_RTR_CTRL_5_HBM_POLY_H3_27 0x356180
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_0 0x356184
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_1 0x356188
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_2 0x35618C
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_3 0x356190
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_4 0x356194
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_5 0x356198
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_6 0x35619C
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_7 0x3561A0
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_8 0x3561A4
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_9 0x3561A8
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_10 0x3561AC
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_11 0x3561B0
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_12 0x3561B4
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_13 0x3561B8
+
+#define mmSIF_RTR_CTRL_5_SRAM_POLY_H3_14 0x3561BC
+
+#define mmSIF_RTR_CTRL_5_SCRAM_SRAM_EN 0x35626C
+
+#define mmSIF_RTR_CTRL_5_RL_HBM_EN 0x356274
+
+#define mmSIF_RTR_CTRL_5_RL_HBM_SAT 0x356278
+
+#define mmSIF_RTR_CTRL_5_RL_HBM_RST 0x35627C
+
+#define mmSIF_RTR_CTRL_5_RL_HBM_TIMEOUT 0x356280
+
+#define mmSIF_RTR_CTRL_5_SCRAM_HBM_EN 0x356284
+
+#define mmSIF_RTR_CTRL_5_RL_PCI_EN 0x356288
+
+#define mmSIF_RTR_CTRL_5_RL_PCI_SAT 0x35628C
+
+#define mmSIF_RTR_CTRL_5_RL_PCI_RST 0x356290
+
+#define mmSIF_RTR_CTRL_5_RL_PCI_TIMEOUT 0x356294
+
+#define mmSIF_RTR_CTRL_5_RL_SRAM_EN 0x35629C
+
+#define mmSIF_RTR_CTRL_5_RL_SRAM_SAT 0x3562A0
+
+#define mmSIF_RTR_CTRL_5_RL_SRAM_RST 0x3562A4
+
+#define mmSIF_RTR_CTRL_5_RL_SRAM_TIMEOUT 0x3562AC
+
+#define mmSIF_RTR_CTRL_5_RL_SRAM_RED 0x3562B4
+
+#define mmSIF_RTR_CTRL_5_E2E_HBM_EN 0x3562EC
+
+#define mmSIF_RTR_CTRL_5_E2E_PCI_EN 0x3562F0
+
+#define mmSIF_RTR_CTRL_5_E2E_HBM_WR_SIZE 0x3562F4
+
+#define mmSIF_RTR_CTRL_5_E2E_PCI_WR_SIZE 0x3562F8
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET_EN 0x356404
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_SET 0x356408
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_WRAP 0x35640C
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_PCI_CTR_CNT 0x356410
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET_EN 0x356414
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM_CTR_SET 0x356418
+
+#define mmSIF_RTR_CTRL_5_E2E_HBM_RD_SIZE 0x35641C
+
+#define mmSIF_RTR_CTRL_5_E2E_PCI_RD_SIZE 0x356420
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET_EN 0x356424
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_SET 0x356428
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_WRAP 0x35642C
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_PCI_CTR_CNT 0x356430
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET_EN 0x356434
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM_CTR_SET 0x356438
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_SEL_0 0x356450
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_SEL_1 0x356454
+
+#define mmSIF_RTR_CTRL_5_NON_LIN_EN 0x356480
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_0 0x356500
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_1 0x356504
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_2 0x356508
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_3 0x35650C
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_BANK_4 0x356510
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_0 0x356514
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_1 0x356520
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_2 0x356524
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_3 0x356528
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_4 0x35652C
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_5 0x356530
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_6 0x356534
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_7 0x356538
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_8 0x35653C
+
+#define mmSIF_RTR_CTRL_5_NL_SRAM_OFFSET_9 0x356540
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_0 0x356550
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_1 0x356554
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_2 0x356558
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_3 0x35655C
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_4 0x356560
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_5 0x356564
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_6 0x356568
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_7 0x35656C
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_8 0x356570
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_9 0x356574
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_10 0x356578
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_11 0x35657C
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_12 0x356580
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_13 0x356584
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_14 0x356588
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_15 0x35658C
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_16 0x356590
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_17 0x356594
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_18 0x356598
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_0 0x3565E4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_1 0x3565E8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_2 0x3565EC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_3 0x3565F0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_4 0x3565F4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_5 0x3565F8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_6 0x3565FC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_7 0x356600
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_8 0x356604
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_9 0x356608
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_10 0x35660C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_11 0x356610
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_12 0x356614
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_13 0x356618
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_14 0x35661C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AW_15 0x356620
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_0 0x356624
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_1 0x356628
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_2 0x35662C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_3 0x356630
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_4 0x356634
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_5 0x356638
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_6 0x35663C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_7 0x356640
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_8 0x356644
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_9 0x356648
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_10 0x35664C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_11 0x356650
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_12 0x356654
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_13 0x356658
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_14 0x35665C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AW_15 0x356660
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_0 0x356664
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_1 0x356668
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_2 0x35666C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_3 0x356670
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_4 0x356674
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_5 0x356678
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_6 0x35667C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_7 0x356680
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_8 0x356684
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_9 0x356688
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_10 0x35668C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_11 0x356690
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_12 0x356694
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_13 0x356698
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_14 0x35669C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AW_15 0x3566A0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_0 0x3566A4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_1 0x3566A8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_2 0x3566AC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_3 0x3566B0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_4 0x3566B4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_5 0x3566B8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_6 0x3566BC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_7 0x3566C0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_8 0x3566C4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_9 0x3566C8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_10 0x3566CC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_11 0x3566D0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_12 0x3566D4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_13 0x3566D8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_14 0x3566DC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AW_15 0x3566E0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_0 0x3566E4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_1 0x3566E8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_2 0x3566EC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_3 0x3566F0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_4 0x3566F4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_5 0x3566F8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_6 0x3566FC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_7 0x356700
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_8 0x356704
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_9 0x356708
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_10 0x35670C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_11 0x356710
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_12 0x356714
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_13 0x356718
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_14 0x35671C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AW_15 0x356720
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_0 0x356724
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_1 0x356728
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_2 0x35672C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_3 0x356730
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_4 0x356734
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_5 0x356738
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_6 0x35673C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_7 0x356740
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_8 0x356744
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_9 0x356748
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_10 0x35674C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_11 0x356750
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_12 0x356754
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_13 0x356758
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_14 0x35675C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AW_15 0x356760
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_0 0x356764
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_1 0x356768
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_2 0x35676C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_3 0x356770
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_4 0x356774
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_5 0x356778
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_6 0x35677C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_7 0x356780
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_8 0x356784
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_9 0x356788
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_10 0x35678C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_11 0x356790
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_12 0x356794
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_13 0x356798
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_14 0x35679C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AW_15 0x3567A0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_0 0x3567A4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_1 0x3567A8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_2 0x3567AC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_3 0x3567B0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_4 0x3567B4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_5 0x3567B8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_6 0x3567BC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_7 0x3567C0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_8 0x3567C4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_9 0x3567C8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_10 0x3567CC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_11 0x3567D0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_12 0x3567D4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_13 0x3567D8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_14 0x3567DC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AW_15 0x3567E0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_0 0x356824
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_1 0x356828
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_2 0x35682C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_3 0x356830
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_4 0x356834
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_5 0x356838
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_6 0x35683C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_7 0x356840
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_8 0x356844
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_9 0x356848
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_10 0x35684C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_11 0x356850
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_12 0x356854
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_13 0x356858
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_14 0x35685C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_LOW_AR_15 0x356860
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_0 0x356864
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_1 0x356868
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_2 0x35686C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_3 0x356870
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_4 0x356874
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_5 0x356878
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_6 0x35687C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_7 0x356880
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_8 0x356884
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_9 0x356888
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_10 0x35688C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_11 0x356890
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_12 0x356894
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_13 0x356898
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_14 0x35689C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_BASE_HIGH_AR_15 0x3568A0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_0 0x3568A4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_1 0x3568A8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_2 0x3568AC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_3 0x3568B0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_4 0x3568B4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_5 0x3568B8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_6 0x3568BC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_7 0x3568C0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_8 0x3568C4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_9 0x3568C8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_10 0x3568CC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_11 0x3568D0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_12 0x3568D4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_13 0x3568D8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_14 0x3568DC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_LOW_AR_15 0x3568E0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_0 0x3568E4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_1 0x3568E8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_2 0x3568EC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_3 0x3568F0
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_4 0x3568F4
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_5 0x3568F8
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_6 0x3568FC
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_7 0x356900
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_8 0x356904
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_9 0x356908
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_10 0x35690C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_11 0x356910
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_12 0x356914
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_13 0x356918
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_14 0x35691C
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_MASK_HIGH_AR_15 0x356920
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_0 0x356924
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_1 0x356928
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_2 0x35692C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_3 0x356930
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_4 0x356934
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_5 0x356938
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_6 0x35693C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_7 0x356940
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_8 0x356944
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_9 0x356948
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_10 0x35694C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_11 0x356950
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_12 0x356954
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_13 0x356958
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_14 0x35695C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_LOW_AR_15 0x356960
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_0 0x356964
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_1 0x356968
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_2 0x35696C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_3 0x356970
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_4 0x356974
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_5 0x356978
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_6 0x35697C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_7 0x356980
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_8 0x356984
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_9 0x356988
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_10 0x35698C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_11 0x356990
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_12 0x356994
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_13 0x356998
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_14 0x35699C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_BASE_HIGH_AR_15 0x3569A0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_0 0x3569A4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_1 0x3569A8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_2 0x3569AC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_3 0x3569B0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_4 0x3569B4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_5 0x3569B8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_6 0x3569BC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_7 0x3569C0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_8 0x3569C4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_9 0x3569C8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_10 0x3569CC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_11 0x3569D0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_12 0x3569D4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_13 0x3569D8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_14 0x3569DC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_LOW_AR_15 0x3569E0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_0 0x3569E4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_1 0x3569E8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_2 0x3569EC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_3 0x3569F0
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_4 0x3569F4
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_5 0x3569F8
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_6 0x3569FC
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_7 0x356A00
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_8 0x356A04
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_9 0x356A08
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_10 0x356A0C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_11 0x356A10
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_12 0x356A14
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_13 0x356A18
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_14 0x356A1C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_MASK_HIGH_AR_15 0x356A20
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AW 0x356A64
+
+#define mmSIF_RTR_CTRL_5_RANGE_SEC_HIT_AR 0x356A68
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_HIT_AW 0x356A6C
+
+#define mmSIF_RTR_CTRL_5_RANGE_PRIV_HIT_AR 0x356A70
+
+#define mmSIF_RTR_CTRL_5_RGL_CFG 0x356B64
+
+#define mmSIF_RTR_CTRL_5_RGL_SHIFT 0x356B68
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_0 0x356B6C
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_1 0x356B70
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_2 0x356B74
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_3 0x356B78
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_4 0x356B7C
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_5 0x356B80
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_6 0x356B84
+
+#define mmSIF_RTR_CTRL_5_RGL_EXPECTED_LAT_7 0x356B88
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_0 0x356BAC
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_1 0x356BB0
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_2 0x356BB4
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_3 0x356BB8
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_4 0x356BBC
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_5 0x356BC0
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_6 0x356BC4
+
+#define mmSIF_RTR_CTRL_5_RGL_TOKEN_7 0x356BC8
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_0 0x356BEC
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_1 0x356BF0
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_2 0x356BF4
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_3 0x356BF8
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_4 0x356BFC
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_5 0x356C00
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_6 0x356C04
+
+#define mmSIF_RTR_CTRL_5_RGL_BANK_ID_7 0x356C08
+
+#define mmSIF_RTR_CTRL_5_RGL_WDT 0x356C2C
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_WRAP 0x356C30
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_WRAP 0x356C34
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_WRAP 0x356C38
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_WRAP 0x356C3C
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_WRAP 0x356C40
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_WRAP 0x356C44
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_WRAP 0x356C48
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_WRAP 0x356C4C
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH0_CTR_CNT 0x356C50
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM0_CH1_CTR_CNT 0x356C54
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH0_CTR_CNT 0x356C58
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM1_CH1_CTR_CNT 0x356C5C
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH0_CTR_CNT 0x356C60
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM2_CH1_CTR_CNT 0x356C64
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH0_CTR_CNT 0x356C68
+
+#define mmSIF_RTR_CTRL_5_E2E_AR_HBM3_CH1_CTR_CNT 0x356C6C
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_WRAP 0x356C70
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_WRAP 0x356C74
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_WRAP 0x356C78
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_WRAP 0x356C7C
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_WRAP 0x356C80
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_WRAP 0x356C84
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_WRAP 0x356C88
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_WRAP 0x356C8C
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH0_CTR_CNT 0x356C90
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM0_CH1_CTR_CNT 0x356C94
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH0_CTR_CNT 0x356C98
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM1_CH1_CTR_CNT 0x356C9C
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH0_CTR_CNT 0x356CA0
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM2_CH1_CTR_CNT 0x356CA4
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH0_CTR_CNT 0x356CA8
+
+#define mmSIF_RTR_CTRL_5_E2E_AW_HBM3_CH1_CTR_CNT 0x356CAC
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_0 0x356CB0
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_1 0x356CB4
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_2 0x356CB8
+
+#define mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_3 0x356CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_5_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h
new file mode 100644
index 000000000000..a9ea89aa6405
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_6_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_6_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_6 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_6_PERM_SEL 0x366108
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_0 0x366114
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_1 0x366118
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_2 0x36611C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_3 0x366120
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_4 0x366124
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_5 0x366128
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_6 0x36612C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_7 0x366130
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_8 0x366134
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_9 0x366138
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_10 0x36613C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_11 0x366140
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_12 0x366144
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_13 0x366148
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_14 0x36614C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_15 0x366150
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_16 0x366154
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_17 0x366158
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_18 0x36615C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_19 0x366160
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_20 0x366164
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_21 0x366168
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_22 0x36616C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_23 0x366170
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_24 0x366174
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_25 0x366178
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_26 0x36617C
+
+#define mmSIF_RTR_CTRL_6_HBM_POLY_H3_27 0x366180
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_0 0x366184
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_1 0x366188
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_2 0x36618C
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_3 0x366190
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_4 0x366194
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_5 0x366198
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_6 0x36619C
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_7 0x3661A0
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_8 0x3661A4
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_9 0x3661A8
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_10 0x3661AC
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_11 0x3661B0
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_12 0x3661B4
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_13 0x3661B8
+
+#define mmSIF_RTR_CTRL_6_SRAM_POLY_H3_14 0x3661BC
+
+#define mmSIF_RTR_CTRL_6_SCRAM_SRAM_EN 0x36626C
+
+#define mmSIF_RTR_CTRL_6_RL_HBM_EN 0x366274
+
+#define mmSIF_RTR_CTRL_6_RL_HBM_SAT 0x366278
+
+#define mmSIF_RTR_CTRL_6_RL_HBM_RST 0x36627C
+
+#define mmSIF_RTR_CTRL_6_RL_HBM_TIMEOUT 0x366280
+
+#define mmSIF_RTR_CTRL_6_SCRAM_HBM_EN 0x366284
+
+#define mmSIF_RTR_CTRL_6_RL_PCI_EN 0x366288
+
+#define mmSIF_RTR_CTRL_6_RL_PCI_SAT 0x36628C
+
+#define mmSIF_RTR_CTRL_6_RL_PCI_RST 0x366290
+
+#define mmSIF_RTR_CTRL_6_RL_PCI_TIMEOUT 0x366294
+
+#define mmSIF_RTR_CTRL_6_RL_SRAM_EN 0x36629C
+
+#define mmSIF_RTR_CTRL_6_RL_SRAM_SAT 0x3662A0
+
+#define mmSIF_RTR_CTRL_6_RL_SRAM_RST 0x3662A4
+
+#define mmSIF_RTR_CTRL_6_RL_SRAM_TIMEOUT 0x3662AC
+
+#define mmSIF_RTR_CTRL_6_RL_SRAM_RED 0x3662B4
+
+#define mmSIF_RTR_CTRL_6_E2E_HBM_EN 0x3662EC
+
+#define mmSIF_RTR_CTRL_6_E2E_PCI_EN 0x3662F0
+
+#define mmSIF_RTR_CTRL_6_E2E_HBM_WR_SIZE 0x3662F4
+
+#define mmSIF_RTR_CTRL_6_E2E_PCI_WR_SIZE 0x3662F8
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET_EN 0x366404
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_SET 0x366408
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_WRAP 0x36640C
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_PCI_CTR_CNT 0x366410
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET_EN 0x366414
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM_CTR_SET 0x366418
+
+#define mmSIF_RTR_CTRL_6_E2E_HBM_RD_SIZE 0x36641C
+
+#define mmSIF_RTR_CTRL_6_E2E_PCI_RD_SIZE 0x366420
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET_EN 0x366424
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_SET 0x366428
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_WRAP 0x36642C
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_PCI_CTR_CNT 0x366430
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET_EN 0x366434
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM_CTR_SET 0x366438
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_SEL_0 0x366450
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_SEL_1 0x366454
+
+#define mmSIF_RTR_CTRL_6_NON_LIN_EN 0x366480
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_0 0x366500
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_1 0x366504
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_2 0x366508
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_3 0x36650C
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_BANK_4 0x366510
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_0 0x366514
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_1 0x366520
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_2 0x366524
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_3 0x366528
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_4 0x36652C
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_5 0x366530
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_6 0x366534
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_7 0x366538
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_8 0x36653C
+
+#define mmSIF_RTR_CTRL_6_NL_SRAM_OFFSET_9 0x366540
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_0 0x366550
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_1 0x366554
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_2 0x366558
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_3 0x36655C
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_4 0x366560
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_5 0x366564
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_6 0x366568
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_7 0x36656C
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_8 0x366570
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_9 0x366574
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_10 0x366578
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_11 0x36657C
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_12 0x366580
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_13 0x366584
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_14 0x366588
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_15 0x36658C
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_16 0x366590
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_17 0x366594
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_18 0x366598
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_0 0x3665E4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_1 0x3665E8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_2 0x3665EC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_3 0x3665F0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_4 0x3665F4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_5 0x3665F8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_6 0x3665FC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_7 0x366600
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_8 0x366604
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_9 0x366608
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_10 0x36660C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_11 0x366610
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_12 0x366614
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_13 0x366618
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_14 0x36661C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AW_15 0x366620
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_0 0x366624
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_1 0x366628
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_2 0x36662C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_3 0x366630
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_4 0x366634
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_5 0x366638
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_6 0x36663C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_7 0x366640
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_8 0x366644
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_9 0x366648
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_10 0x36664C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_11 0x366650
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_12 0x366654
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_13 0x366658
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_14 0x36665C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AW_15 0x366660
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_0 0x366664
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_1 0x366668
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_2 0x36666C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_3 0x366670
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_4 0x366674
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_5 0x366678
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_6 0x36667C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_7 0x366680
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_8 0x366684
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_9 0x366688
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_10 0x36668C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_11 0x366690
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_12 0x366694
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_13 0x366698
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_14 0x36669C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AW_15 0x3666A0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_0 0x3666A4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_1 0x3666A8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_2 0x3666AC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_3 0x3666B0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_4 0x3666B4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_5 0x3666B8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_6 0x3666BC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_7 0x3666C0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_8 0x3666C4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_9 0x3666C8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_10 0x3666CC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_11 0x3666D0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_12 0x3666D4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_13 0x3666D8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_14 0x3666DC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AW_15 0x3666E0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_0 0x3666E4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_1 0x3666E8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_2 0x3666EC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_3 0x3666F0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_4 0x3666F4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_5 0x3666F8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_6 0x3666FC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_7 0x366700
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_8 0x366704
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_9 0x366708
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_10 0x36670C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_11 0x366710
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_12 0x366714
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_13 0x366718
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_14 0x36671C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AW_15 0x366720
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_0 0x366724
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_1 0x366728
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_2 0x36672C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_3 0x366730
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_4 0x366734
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_5 0x366738
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_6 0x36673C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_7 0x366740
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_8 0x366744
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_9 0x366748
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_10 0x36674C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_11 0x366750
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_12 0x366754
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_13 0x366758
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_14 0x36675C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AW_15 0x366760
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_0 0x366764
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_1 0x366768
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_2 0x36676C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_3 0x366770
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_4 0x366774
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_5 0x366778
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_6 0x36677C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_7 0x366780
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_8 0x366784
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_9 0x366788
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_10 0x36678C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_11 0x366790
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_12 0x366794
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_13 0x366798
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_14 0x36679C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AW_15 0x3667A0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_0 0x3667A4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_1 0x3667A8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_2 0x3667AC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_3 0x3667B0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_4 0x3667B4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_5 0x3667B8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_6 0x3667BC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_7 0x3667C0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_8 0x3667C4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_9 0x3667C8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_10 0x3667CC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_11 0x3667D0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_12 0x3667D4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_13 0x3667D8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_14 0x3667DC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AW_15 0x3667E0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_0 0x366824
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_1 0x366828
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_2 0x36682C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_3 0x366830
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_4 0x366834
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_5 0x366838
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_6 0x36683C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_7 0x366840
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_8 0x366844
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_9 0x366848
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_10 0x36684C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_11 0x366850
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_12 0x366854
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_13 0x366858
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_14 0x36685C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_LOW_AR_15 0x366860
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_0 0x366864
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_1 0x366868
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_2 0x36686C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_3 0x366870
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_4 0x366874
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_5 0x366878
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_6 0x36687C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_7 0x366880
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_8 0x366884
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_9 0x366888
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_10 0x36688C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_11 0x366890
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_12 0x366894
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_13 0x366898
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_14 0x36689C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_BASE_HIGH_AR_15 0x3668A0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_0 0x3668A4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_1 0x3668A8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_2 0x3668AC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_3 0x3668B0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_4 0x3668B4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_5 0x3668B8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_6 0x3668BC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_7 0x3668C0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_8 0x3668C4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_9 0x3668C8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_10 0x3668CC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_11 0x3668D0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_12 0x3668D4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_13 0x3668D8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_14 0x3668DC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_LOW_AR_15 0x3668E0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_0 0x3668E4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_1 0x3668E8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_2 0x3668EC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_3 0x3668F0
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_4 0x3668F4
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_5 0x3668F8
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_6 0x3668FC
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_7 0x366900
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_8 0x366904
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_9 0x366908
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_10 0x36690C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_11 0x366910
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_12 0x366914
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_13 0x366918
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_14 0x36691C
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_MASK_HIGH_AR_15 0x366920
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_0 0x366924
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_1 0x366928
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_2 0x36692C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_3 0x366930
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_4 0x366934
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_5 0x366938
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_6 0x36693C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_7 0x366940
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_8 0x366944
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_9 0x366948
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_10 0x36694C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_11 0x366950
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_12 0x366954
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_13 0x366958
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_14 0x36695C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_LOW_AR_15 0x366960
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_0 0x366964
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_1 0x366968
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_2 0x36696C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_3 0x366970
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_4 0x366974
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_5 0x366978
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_6 0x36697C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_7 0x366980
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_8 0x366984
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_9 0x366988
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_10 0x36698C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_11 0x366990
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_12 0x366994
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_13 0x366998
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_14 0x36699C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_BASE_HIGH_AR_15 0x3669A0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_0 0x3669A4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_1 0x3669A8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_2 0x3669AC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_3 0x3669B0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_4 0x3669B4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_5 0x3669B8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_6 0x3669BC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_7 0x3669C0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_8 0x3669C4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_9 0x3669C8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_10 0x3669CC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_11 0x3669D0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_12 0x3669D4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_13 0x3669D8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_14 0x3669DC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_LOW_AR_15 0x3669E0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_0 0x3669E4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_1 0x3669E8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_2 0x3669EC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_3 0x3669F0
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_4 0x3669F4
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_5 0x3669F8
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_6 0x3669FC
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_7 0x366A00
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_8 0x366A04
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_9 0x366A08
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_10 0x366A0C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_11 0x366A10
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_12 0x366A14
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_13 0x366A18
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_14 0x366A1C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_MASK_HIGH_AR_15 0x366A20
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AW 0x366A64
+
+#define mmSIF_RTR_CTRL_6_RANGE_SEC_HIT_AR 0x366A68
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_HIT_AW 0x366A6C
+
+#define mmSIF_RTR_CTRL_6_RANGE_PRIV_HIT_AR 0x366A70
+
+#define mmSIF_RTR_CTRL_6_RGL_CFG 0x366B64
+
+#define mmSIF_RTR_CTRL_6_RGL_SHIFT 0x366B68
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_0 0x366B6C
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_1 0x366B70
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_2 0x366B74
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_3 0x366B78
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_4 0x366B7C
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_5 0x366B80
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_6 0x366B84
+
+#define mmSIF_RTR_CTRL_6_RGL_EXPECTED_LAT_7 0x366B88
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_0 0x366BAC
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_1 0x366BB0
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_2 0x366BB4
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_3 0x366BB8
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_4 0x366BBC
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_5 0x366BC0
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_6 0x366BC4
+
+#define mmSIF_RTR_CTRL_6_RGL_TOKEN_7 0x366BC8
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_0 0x366BEC
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_1 0x366BF0
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_2 0x366BF4
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_3 0x366BF8
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_4 0x366BFC
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_5 0x366C00
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_6 0x366C04
+
+#define mmSIF_RTR_CTRL_6_RGL_BANK_ID_7 0x366C08
+
+#define mmSIF_RTR_CTRL_6_RGL_WDT 0x366C2C
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_WRAP 0x366C30
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_WRAP 0x366C34
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_WRAP 0x366C38
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_WRAP 0x366C3C
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_WRAP 0x366C40
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_WRAP 0x366C44
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_WRAP 0x366C48
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_WRAP 0x366C4C
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH0_CTR_CNT 0x366C50
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM0_CH1_CTR_CNT 0x366C54
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH0_CTR_CNT 0x366C58
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM1_CH1_CTR_CNT 0x366C5C
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH0_CTR_CNT 0x366C60
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM2_CH1_CTR_CNT 0x366C64
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH0_CTR_CNT 0x366C68
+
+#define mmSIF_RTR_CTRL_6_E2E_AR_HBM3_CH1_CTR_CNT 0x366C6C
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_WRAP 0x366C70
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_WRAP 0x366C74
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_WRAP 0x366C78
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_WRAP 0x366C7C
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_WRAP 0x366C80
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_WRAP 0x366C84
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_WRAP 0x366C88
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_WRAP 0x366C8C
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH0_CTR_CNT 0x366C90
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM0_CH1_CTR_CNT 0x366C94
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH0_CTR_CNT 0x366C98
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM1_CH1_CTR_CNT 0x366C9C
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH0_CTR_CNT 0x366CA0
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM2_CH1_CTR_CNT 0x366CA4
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH0_CTR_CNT 0x366CA8
+
+#define mmSIF_RTR_CTRL_6_E2E_AW_HBM3_CH1_CTR_CNT 0x366CAC
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_0 0x366CB0
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_1 0x366CB4
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_2 0x366CB8
+
+#define mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_3 0x366CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_6_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h
new file mode 100644
index 000000000000..a37772c531d9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h
@@ -0,0 +1,896 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_SIF_RTR_CTRL_7_REGS_H_
+#define ASIC_REG_SIF_RTR_CTRL_7_REGS_H_
+
+/*
+ *****************************************
+ * SIF_RTR_CTRL_7 (Prototype: RTR_CTRL)
+ *****************************************
+ */
+
+#define mmSIF_RTR_CTRL_7_PERM_SEL 0x376108
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_0 0x376114
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_1 0x376118
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_2 0x37611C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_3 0x376120
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_4 0x376124
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_5 0x376128
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_6 0x37612C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_7 0x376130
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_8 0x376134
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_9 0x376138
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_10 0x37613C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_11 0x376140
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_12 0x376144
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_13 0x376148
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_14 0x37614C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_15 0x376150
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_16 0x376154
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_17 0x376158
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_18 0x37615C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_19 0x376160
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_20 0x376164
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_21 0x376168
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_22 0x37616C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_23 0x376170
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_24 0x376174
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_25 0x376178
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_26 0x37617C
+
+#define mmSIF_RTR_CTRL_7_HBM_POLY_H3_27 0x376180
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_0 0x376184
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_1 0x376188
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_2 0x37618C
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_3 0x376190
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_4 0x376194
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_5 0x376198
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_6 0x37619C
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_7 0x3761A0
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_8 0x3761A4
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_9 0x3761A8
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_10 0x3761AC
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_11 0x3761B0
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_12 0x3761B4
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_13 0x3761B8
+
+#define mmSIF_RTR_CTRL_7_SRAM_POLY_H3_14 0x3761BC
+
+#define mmSIF_RTR_CTRL_7_SCRAM_SRAM_EN 0x37626C
+
+#define mmSIF_RTR_CTRL_7_RL_HBM_EN 0x376274
+
+#define mmSIF_RTR_CTRL_7_RL_HBM_SAT 0x376278
+
+#define mmSIF_RTR_CTRL_7_RL_HBM_RST 0x37627C
+
+#define mmSIF_RTR_CTRL_7_RL_HBM_TIMEOUT 0x376280
+
+#define mmSIF_RTR_CTRL_7_SCRAM_HBM_EN 0x376284
+
+#define mmSIF_RTR_CTRL_7_RL_PCI_EN 0x376288
+
+#define mmSIF_RTR_CTRL_7_RL_PCI_SAT 0x37628C
+
+#define mmSIF_RTR_CTRL_7_RL_PCI_RST 0x376290
+
+#define mmSIF_RTR_CTRL_7_RL_PCI_TIMEOUT 0x376294
+
+#define mmSIF_RTR_CTRL_7_RL_SRAM_EN 0x37629C
+
+#define mmSIF_RTR_CTRL_7_RL_SRAM_SAT 0x3762A0
+
+#define mmSIF_RTR_CTRL_7_RL_SRAM_RST 0x3762A4
+
+#define mmSIF_RTR_CTRL_7_RL_SRAM_TIMEOUT 0x3762AC
+
+#define mmSIF_RTR_CTRL_7_RL_SRAM_RED 0x3762B4
+
+#define mmSIF_RTR_CTRL_7_E2E_HBM_EN 0x3762EC
+
+#define mmSIF_RTR_CTRL_7_E2E_PCI_EN 0x3762F0
+
+#define mmSIF_RTR_CTRL_7_E2E_HBM_WR_SIZE 0x3762F4
+
+#define mmSIF_RTR_CTRL_7_E2E_PCI_WR_SIZE 0x3762F8
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET_EN 0x376404
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_SET 0x376408
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_WRAP 0x37640C
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_PCI_CTR_CNT 0x376410
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET_EN 0x376414
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM_CTR_SET 0x376418
+
+#define mmSIF_RTR_CTRL_7_E2E_HBM_RD_SIZE 0x37641C
+
+#define mmSIF_RTR_CTRL_7_E2E_PCI_RD_SIZE 0x376420
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET_EN 0x376424
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_SET 0x376428
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_WRAP 0x37642C
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_PCI_CTR_CNT 0x376430
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET_EN 0x376434
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM_CTR_SET 0x376438
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_SEL_0 0x376450
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_SEL_1 0x376454
+
+#define mmSIF_RTR_CTRL_7_NON_LIN_EN 0x376480
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_0 0x376500
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_1 0x376504
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_2 0x376508
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_3 0x37650C
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_BANK_4 0x376510
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_0 0x376514
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_1 0x376520
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_2 0x376524
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_3 0x376528
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_4 0x37652C
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_5 0x376530
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_6 0x376534
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_7 0x376538
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_8 0x37653C
+
+#define mmSIF_RTR_CTRL_7_NL_SRAM_OFFSET_9 0x376540
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_0 0x376550
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_1 0x376554
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_2 0x376558
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_3 0x37655C
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_4 0x376560
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_5 0x376564
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_6 0x376568
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_7 0x37656C
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_8 0x376570
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_9 0x376574
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_10 0x376578
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_11 0x37657C
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_12 0x376580
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_13 0x376584
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_14 0x376588
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_15 0x37658C
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_16 0x376590
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_17 0x376594
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_18 0x376598
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0 0x3765E4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_1 0x3765E8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_2 0x3765EC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_3 0x3765F0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_4 0x3765F4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_5 0x3765F8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_6 0x3765FC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_7 0x376600
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_8 0x376604
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_9 0x376608
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_10 0x37660C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_11 0x376610
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_12 0x376614
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_13 0x376618
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_14 0x37661C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_15 0x376620
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0 0x376624
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_1 0x376628
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_2 0x37662C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_3 0x376630
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_4 0x376634
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_5 0x376638
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_6 0x37663C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_7 0x376640
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_8 0x376644
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_9 0x376648
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_10 0x37664C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_11 0x376650
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_12 0x376654
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_13 0x376658
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_14 0x37665C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_15 0x376660
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0 0x376664
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_1 0x376668
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_2 0x37666C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_3 0x376670
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_4 0x376674
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_5 0x376678
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_6 0x37667C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_7 0x376680
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_8 0x376684
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_9 0x376688
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_10 0x37668C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_11 0x376690
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_12 0x376694
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_13 0x376698
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_14 0x37669C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_15 0x3766A0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0 0x3766A4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_1 0x3766A8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_2 0x3766AC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_3 0x3766B0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_4 0x3766B4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_5 0x3766B8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_6 0x3766BC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_7 0x3766C0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_8 0x3766C4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_9 0x3766C8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_10 0x3766CC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_11 0x3766D0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_12 0x3766D4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_13 0x3766D8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_14 0x3766DC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_15 0x3766E0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_0 0x3766E4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_1 0x3766E8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_2 0x3766EC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_3 0x3766F0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_4 0x3766F4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_5 0x3766F8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_6 0x3766FC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_7 0x376700
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_8 0x376704
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_9 0x376708
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_10 0x37670C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_11 0x376710
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_12 0x376714
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_13 0x376718
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_14 0x37671C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AW_15 0x376720
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_0 0x376724
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_1 0x376728
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_2 0x37672C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_3 0x376730
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_4 0x376734
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_5 0x376738
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_6 0x37673C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_7 0x376740
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_8 0x376744
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_9 0x376748
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_10 0x37674C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_11 0x376750
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_12 0x376754
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_13 0x376758
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_14 0x37675C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AW_15 0x376760
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_0 0x376764
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_1 0x376768
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_2 0x37676C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_3 0x376770
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_4 0x376774
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_5 0x376778
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_6 0x37677C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_7 0x376780
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_8 0x376784
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_9 0x376788
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_10 0x37678C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_11 0x376790
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_12 0x376794
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_13 0x376798
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_14 0x37679C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AW_15 0x3767A0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_0 0x3767A4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_1 0x3767A8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_2 0x3767AC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_3 0x3767B0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_4 0x3767B4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_5 0x3767B8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_6 0x3767BC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_7 0x3767C0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_8 0x3767C4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_9 0x3767C8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_10 0x3767CC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_11 0x3767D0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_12 0x3767D4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_13 0x3767D8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_14 0x3767DC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AW_15 0x3767E0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0 0x376824
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_1 0x376828
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_2 0x37682C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_3 0x376830
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_4 0x376834
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_5 0x376838
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_6 0x37683C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_7 0x376840
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_8 0x376844
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_9 0x376848
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_10 0x37684C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_11 0x376850
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_12 0x376854
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_13 0x376858
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_14 0x37685C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_15 0x376860
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0 0x376864
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_1 0x376868
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_2 0x37686C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_3 0x376870
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_4 0x376874
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_5 0x376878
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_6 0x37687C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_7 0x376880
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_8 0x376884
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_9 0x376888
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_10 0x37688C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_11 0x376890
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_12 0x376894
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_13 0x376898
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_14 0x37689C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_15 0x3768A0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0 0x3768A4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_1 0x3768A8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_2 0x3768AC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_3 0x3768B0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_4 0x3768B4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_5 0x3768B8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_6 0x3768BC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_7 0x3768C0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_8 0x3768C4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_9 0x3768C8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_10 0x3768CC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_11 0x3768D0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_12 0x3768D4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_13 0x3768D8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_14 0x3768DC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_15 0x3768E0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_0 0x3768E4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_1 0x3768E8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_2 0x3768EC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_3 0x3768F0
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_4 0x3768F4
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_5 0x3768F8
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_6 0x3768FC
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_7 0x376900
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_8 0x376904
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_9 0x376908
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_10 0x37690C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_11 0x376910
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_12 0x376914
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_13 0x376918
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_14 0x37691C
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AR_15 0x376920
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_0 0x376924
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_1 0x376928
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_2 0x37692C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_3 0x376930
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_4 0x376934
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_5 0x376938
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_6 0x37693C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_7 0x376940
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_8 0x376944
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_9 0x376948
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_10 0x37694C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_11 0x376950
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_12 0x376954
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_13 0x376958
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_14 0x37695C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_LOW_AR_15 0x376960
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_0 0x376964
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_1 0x376968
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_2 0x37696C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_3 0x376970
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_4 0x376974
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_5 0x376978
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_6 0x37697C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_7 0x376980
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_8 0x376984
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_9 0x376988
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_10 0x37698C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_11 0x376990
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_12 0x376994
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_13 0x376998
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_14 0x37699C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_BASE_HIGH_AR_15 0x3769A0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_0 0x3769A4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_1 0x3769A8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_2 0x3769AC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_3 0x3769B0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_4 0x3769B4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_5 0x3769B8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_6 0x3769BC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_7 0x3769C0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_8 0x3769C4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_9 0x3769C8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_10 0x3769CC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_11 0x3769D0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_12 0x3769D4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_13 0x3769D8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_14 0x3769DC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_LOW_AR_15 0x3769E0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_0 0x3769E4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_1 0x3769E8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_2 0x3769EC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_3 0x3769F0
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_4 0x3769F4
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_5 0x3769F8
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_6 0x3769FC
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_7 0x376A00
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_8 0x376A04
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_9 0x376A08
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_10 0x376A0C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_11 0x376A10
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_12 0x376A14
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_13 0x376A18
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_14 0x376A1C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_MASK_HIGH_AR_15 0x376A20
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AW 0x376A64
+
+#define mmSIF_RTR_CTRL_7_RANGE_SEC_HIT_AR 0x376A68
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_HIT_AW 0x376A6C
+
+#define mmSIF_RTR_CTRL_7_RANGE_PRIV_HIT_AR 0x376A70
+
+#define mmSIF_RTR_CTRL_7_RGL_CFG 0x376B64
+
+#define mmSIF_RTR_CTRL_7_RGL_SHIFT 0x376B68
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_0 0x376B6C
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_1 0x376B70
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_2 0x376B74
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_3 0x376B78
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_4 0x376B7C
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_5 0x376B80
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_6 0x376B84
+
+#define mmSIF_RTR_CTRL_7_RGL_EXPECTED_LAT_7 0x376B88
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_0 0x376BAC
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_1 0x376BB0
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_2 0x376BB4
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_3 0x376BB8
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_4 0x376BBC
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_5 0x376BC0
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_6 0x376BC4
+
+#define mmSIF_RTR_CTRL_7_RGL_TOKEN_7 0x376BC8
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_0 0x376BEC
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_1 0x376BF0
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_2 0x376BF4
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_3 0x376BF8
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_4 0x376BFC
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_5 0x376C00
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_6 0x376C04
+
+#define mmSIF_RTR_CTRL_7_RGL_BANK_ID_7 0x376C08
+
+#define mmSIF_RTR_CTRL_7_RGL_WDT 0x376C2C
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_WRAP 0x376C30
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_WRAP 0x376C34
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_WRAP 0x376C38
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_WRAP 0x376C3C
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_WRAP 0x376C40
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_WRAP 0x376C44
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_WRAP 0x376C48
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_WRAP 0x376C4C
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH0_CTR_CNT 0x376C50
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM0_CH1_CTR_CNT 0x376C54
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH0_CTR_CNT 0x376C58
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM1_CH1_CTR_CNT 0x376C5C
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH0_CTR_CNT 0x376C60
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM2_CH1_CTR_CNT 0x376C64
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH0_CTR_CNT 0x376C68
+
+#define mmSIF_RTR_CTRL_7_E2E_AR_HBM3_CH1_CTR_CNT 0x376C6C
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_WRAP 0x376C70
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_WRAP 0x376C74
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_WRAP 0x376C78
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_WRAP 0x376C7C
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_WRAP 0x376C80
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_WRAP 0x376C84
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_WRAP 0x376C88
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_WRAP 0x376C8C
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH0_CTR_CNT 0x376C90
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM0_CH1_CTR_CNT 0x376C94
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH0_CTR_CNT 0x376C98
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM1_CH1_CTR_CNT 0x376C9C
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH0_CTR_CNT 0x376CA0
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM2_CH1_CTR_CNT 0x376CA4
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH0_CTR_CNT 0x376CA8
+
+#define mmSIF_RTR_CTRL_7_E2E_AW_HBM3_CH1_CTR_CNT 0x376CAC
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_0 0x376CB0
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_1 0x376CB4
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_2 0x376CB8
+
+#define mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_3 0x376CBC
+
+#endif /* ASIC_REG_SIF_RTR_CTRL_7_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h
new file mode 100644
index 000000000000..07d2a9000102
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_STLB_REGS_H_
+#define ASIC_REG_STLB_REGS_H_
+
+/*
+ *****************************************
+ * STLB (Prototype: STLB)
+ *****************************************
+ */
+
+#define mmSTLB_CACHE_INV 0xC12010
+
+#define mmSTLB_CACHE_INV_BASE_39_8 0xC12014
+
+#define mmSTLB_CACHE_INV_BASE_49_40 0xC12018
+
+#define mmSTLB_STLB_FEATURE_EN 0xC1201C
+
+#define mmSTLB_STLB_AXI_CACHE 0xC12020
+
+#define mmSTLB_HOP_CONFIGURATION 0xC12024
+
+#define mmSTLB_LINK_LIST_LOOKUP_MASK_49_32 0xC12028
+
+#define mmSTLB_LINK_LIST_LOOKUP_MASK_31_0 0xC1202C
+
+#define mmSTLB_LINK_LIST 0xC12030
+
+#define mmSTLB_INV_ALL_START 0xC12034
+
+#define mmSTLB_INV_ALL_SET 0xC12038
+
+#define mmSTLB_INV_PS 0xC1203C
+
+#define mmSTLB_INV_CONSUMER_INDEX 0xC12040
+
+#define mmSTLB_INV_HIT_COUNT 0xC12044
+
+#define mmSTLB_INV_SET 0xC12048
+
+#define mmSTLB_SRAM_INIT 0xC1204C
+
+#define mmSTLB_MEM_CACHE_INVALIDATION 0xC12050
+
+#define mmSTLB_MEM_CACHE_INV_STATUS 0xC12054
+
+#define mmSTLB_MEM_CACHE_BASE_38_7 0xC12058
+
+#define mmSTLB_MEM_CACHE_BASE_49_39 0xC1205C
+
+#define mmSTLB_MEM_CACHE_CONFIG 0xC12060
+
+#define mmSTLB_SET_THRESHOLD_HOP4 0xC12064
+
+#define mmSTLB_SET_THRESHOLD_HOP3 0xC12068
+
+#define mmSTLB_SET_THRESHOLD_HOP2 0xC1206C
+
+#define mmSTLB_SET_THRESHOLD_HOP1 0xC12070
+
+#define mmSTLB_SET_THRESHOLD_HOP0 0xC12074
+
+#define mmSTLB_MULTI_HIT_INTERRUPT_CLR 0xC12078
+
+#define mmSTLB_MULTI_HIT_INTERRUPT_MASK 0xC1207C
+
+#define mmSTLB_MEM_L0_CACHE_CFG 0xC12080
+
+#define mmSTLB_MEM_READ_ARPROT 0xC12084
+
+#endif /* ASIC_REG_STLB_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h
new file mode 100644
index 000000000000..8f67c11c8de9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h
@@ -0,0 +1,2578 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CFG_MASKS_H_
+#define ASIC_REG_TPC0_CFG_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW */
+#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH */
+#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE */
+#define TPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG */
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE */
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE */
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_RSV_SHIFT 16
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_RSV_MASK 0x1FFF0000
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 29
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0xE0000000
+
+/* TPC0_CFG_KERNEL_SYNC_OBJECT_ADDR */
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_ADDR_V_SHIFT 0
+#define TPC0_CFG_KERNEL_SYNC_OBJECT_ADDR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW */
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_0 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_0 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_1 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_1 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_2 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_2 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_3 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_3 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_BASE_DIM_4 */
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_TID_SIZE_DIM_4 */
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_KERNEL_KERNEL_CONFIG */
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_MASK 0x1
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 2
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0xFC
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_SHIFT 8
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_MASK 0xFF00
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_SHIFT 16
+#define TPC0_CFG_KERNEL_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_MASK 0xFF0000
+
+/* TPC0_CFG_KERNEL_KERNEL_ID */
+#define TPC0_CFG_KERNEL_KERNEL_ID_V_SHIFT 0
+#define TPC0_CFG_KERNEL_KERNEL_ID_V_MASK 0xFFFF
+
+/* TPC0_CFG_KERNEL_SRF */
+#define TPC0_CFG_KERNEL_SRF_V_SHIFT 0
+#define TPC0_CFG_KERNEL_SRF_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_ROUND_CSR */
+#define TPC0_CFG_ROUND_CSR_MODE_SHIFT 0
+#define TPC0_CFG_ROUND_CSR_MODE_MASK 0x7
+
+/* TPC0_CFG_PROT */
+#define TPC0_CFG_PROT_AWPROT_SHIFT 0
+#define TPC0_CFG_PROT_AWPROT_MASK 0x7
+#define TPC0_CFG_PROT_ARPROT_SHIFT 3
+#define TPC0_CFG_PROT_ARPROT_MASK 0x38
+
+/* TPC0_CFG_SEMAPHORE */
+#define TPC0_CFG_SEMAPHORE_V_SHIFT 0
+#define TPC0_CFG_SEMAPHORE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_VFLAGS */
+#define TPC0_CFG_VFLAGS_V_SHIFT 0
+#define TPC0_CFG_VFLAGS_V_MASK 0xF
+
+/* TPC0_CFG_SFLAGS */
+#define TPC0_CFG_SFLAGS_V_SHIFT 0
+#define TPC0_CFG_SFLAGS_V_MASK 0xF
+
+/* TPC0_CFG_LFSR_POLYNOM */
+#define TPC0_CFG_LFSR_POLYNOM_V_SHIFT 0
+#define TPC0_CFG_LFSR_POLYNOM_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_STATUS */
+#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT 1
+#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK 0x2
+#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT 2
+#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK 0x4
+#define TPC0_CFG_STATUS_IQ_EMPTY_SHIFT 3
+#define TPC0_CFG_STATUS_IQ_EMPTY_MASK 0x8
+#define TPC0_CFG_STATUS_SB_EMPTY_SHIFT 5
+#define TPC0_CFG_STATUS_SB_EMPTY_MASK 0x20
+#define TPC0_CFG_STATUS_QM_IDLE_SHIFT 6
+#define TPC0_CFG_STATUS_QM_IDLE_MASK 0x40
+#define TPC0_CFG_STATUS_QM_RDY_SHIFT 7
+#define TPC0_CFG_STATUS_QM_RDY_MASK 0x80
+
+/* TPC0_CFG_CFG_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_CFG_SUBTRACT_VALUE */
+#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_SHIFT 0
+#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_SM_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TPC_CMD */
+#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT 0
+#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_MASK 0x1
+#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_SHIFT 1
+#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_MASK 0x2
+#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_SHIFT 2
+#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_MASK 0x4
+#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_SHIFT 3
+#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_MASK 0x8
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT 4
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_MASK 0x10
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_SHIFT 5
+#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_MASK 0x20
+#define TPC0_CFG_TPC_CMD_QMAN_STOP_SHIFT 6
+#define TPC0_CFG_TPC_CMD_QMAN_STOP_MASK 0x40
+
+/* TPC0_CFG_TPC_EXECUTE */
+#define TPC0_CFG_TPC_EXECUTE_V_SHIFT 0
+#define TPC0_CFG_TPC_EXECUTE_V_MASK 0x1
+
+/* TPC0_CFG_TPC_STALL */
+#define TPC0_CFG_TPC_STALL_V_SHIFT 0
+#define TPC0_CFG_TPC_STALL_V_MASK 0x1
+
+/* TPC0_CFG_ICACHE_BASE_ADDERESS_LOW */
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_SHIFT 0
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH */
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_RD_RATE_LIMIT */
+#define TPC0_CFG_RD_RATE_LIMIT_ENABLE_SHIFT 0
+#define TPC0_CFG_RD_RATE_LIMIT_ENABLE_MASK 0x1
+#define TPC0_CFG_RD_RATE_LIMIT_SATURATION_SHIFT 1
+#define TPC0_CFG_RD_RATE_LIMIT_SATURATION_MASK 0x1FE
+#define TPC0_CFG_RD_RATE_LIMIT_TIMEOUT_SHIFT 9
+#define TPC0_CFG_RD_RATE_LIMIT_TIMEOUT_MASK 0x1FE00
+
+/* TPC0_CFG_WR_RATE_LIMIT */
+#define TPC0_CFG_WR_RATE_LIMIT_ENABLE_SHIFT 0
+#define TPC0_CFG_WR_RATE_LIMIT_ENABLE_MASK 0x1
+#define TPC0_CFG_WR_RATE_LIMIT_SATURATION_SHIFT 1
+#define TPC0_CFG_WR_RATE_LIMIT_SATURATION_MASK 0x1FE
+#define TPC0_CFG_WR_RATE_LIMIT_TIMEOUT_SHIFT 9
+#define TPC0_CFG_WR_RATE_LIMIT_TIMEOUT_MASK 0x1FE00
+
+/* TPC0_CFG_MSS_CONFIG */
+#define TPC0_CFG_MSS_CONFIG_AWCACHE_SHIFT 0
+#define TPC0_CFG_MSS_CONFIG_AWCACHE_MASK 0xF
+#define TPC0_CFG_MSS_CONFIG_ARCACHE_SHIFT 4
+#define TPC0_CFG_MSS_CONFIG_ARCACHE_MASK 0xF0
+#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT 8
+#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_MASK 0x300
+#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_SHIFT 10
+#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_MASK 0x400
+#define TPC0_CFG_MSS_CONFIG_DCACHE_PREFETCH_DIS_SHIFT 11
+#define TPC0_CFG_MSS_CONFIG_DCACHE_PREFETCH_DIS_MASK 0x800
+
+/* TPC0_CFG_TPC_INTR_CAUSE */
+#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_SHIFT 0
+#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK 0xFFFFF
+
+/* TPC0_CFG_TPC_INTR_MASK */
+#define TPC0_CFG_TPC_INTR_MASK_MASK_SHIFT 0
+#define TPC0_CFG_TPC_INTR_MASK_MASK_MASK 0xFFFFF
+
+/* TPC0_CFG_WQ_CREDITS */
+#define TPC0_CFG_WQ_CREDITS_ST_G_SHIFT 0
+#define TPC0_CFG_WQ_CREDITS_ST_G_MASK 0xF
+#define TPC0_CFG_WQ_CREDITS_KERNEL_FIFO_SHIFT 4
+#define TPC0_CFG_WQ_CREDITS_KERNEL_FIFO_MASK 0x70
+
+/* TPC0_CFG_ARUSER_LO */
+#define TPC0_CFG_ARUSER_LO_V_SHIFT 0
+#define TPC0_CFG_ARUSER_LO_V_MASK 0x7FF
+
+/* TPC0_CFG_ARUSER_HI */
+#define TPC0_CFG_ARUSER_HI_V_SHIFT 11
+#define TPC0_CFG_ARUSER_HI_V_MASK 0x1800
+#define TPC0_CFG_ARUSER_HI_RSRV_SHIFT 13
+#define TPC0_CFG_ARUSER_HI_RSRV_MASK 0xFFFFE000
+
+/* TPC0_CFG_AWUSER_LO */
+#define TPC0_CFG_AWUSER_LO_V_SHIFT 0
+#define TPC0_CFG_AWUSER_LO_V_MASK 0x7FF
+
+/* TPC0_CFG_AWUSER_HI */
+#define TPC0_CFG_AWUSER_HI_V_SHIFT 11
+#define TPC0_CFG_AWUSER_HI_V_MASK 0x1800
+#define TPC0_CFG_AWUSER_HI_RSRV_SHIFT 13
+#define TPC0_CFG_AWUSER_HI_RSRV_MASK 0xFFFFE000
+
+/* TPC0_CFG_OPCODE_EXEC */
+#define TPC0_CFG_OPCODE_EXEC_SPU_OP_SHIFT 0
+#define TPC0_CFG_OPCODE_EXEC_SPU_OP_MASK 0x7F
+#define TPC0_CFG_OPCODE_EXEC_SPU_EN_SHIFT 7
+#define TPC0_CFG_OPCODE_EXEC_SPU_EN_MASK 0x80
+#define TPC0_CFG_OPCODE_EXEC_VPU_OP_SHIFT 8
+#define TPC0_CFG_OPCODE_EXEC_VPU_OP_MASK 0x7F00
+#define TPC0_CFG_OPCODE_EXEC_VPU_EN_SHIFT 15
+#define TPC0_CFG_OPCODE_EXEC_VPU_EN_MASK 0x8000
+#define TPC0_CFG_OPCODE_EXEC_LD_OP_SHIFT 16
+#define TPC0_CFG_OPCODE_EXEC_LD_OP_MASK 0x7F0000
+#define TPC0_CFG_OPCODE_EXEC_LD_EN_SHIFT 23
+#define TPC0_CFG_OPCODE_EXEC_LD_EN_MASK 0x800000
+#define TPC0_CFG_OPCODE_EXEC_ST_OP_SHIFT 24
+#define TPC0_CFG_OPCODE_EXEC_ST_OP_MASK 0x7F000000
+#define TPC0_CFG_OPCODE_EXEC_ST_EN_SHIFT 31
+#define TPC0_CFG_OPCODE_EXEC_ST_EN_MASK 0x80000000
+
+/* TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO */
+#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI */
+#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC32_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO */
+#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI */
+#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC64_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO */
+#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI */
+#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO */
+#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI */
+#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI_V_SHIFT 0
+#define TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_TSB_CFG_MAX_SIZE */
+#define TPC0_CFG_TSB_CFG_MAX_SIZE_DATA_SHIFT 0
+#define TPC0_CFG_TSB_CFG_MAX_SIZE_DATA_MASK 0xFFFF
+#define TPC0_CFG_TSB_CFG_MAX_SIZE_MD_SHIFT 16
+#define TPC0_CFG_TSB_CFG_MAX_SIZE_MD_MASK 0xFFFF0000
+
+/* TPC0_CFG_TSB_CFG */
+#define TPC0_CFG_TSB_CFG_FORCE_MISS_SHIFT 0
+#define TPC0_CFG_TSB_CFG_FORCE_MISS_MASK 0x1
+#define TPC0_CFG_TSB_CFG_MAX_OS_SHIFT 1
+#define TPC0_CFG_TSB_CFG_MAX_OS_MASK 0x1FFFE
+
+/* TPC0_CFG_DBGMEM_ADD */
+#define TPC0_CFG_DBGMEM_ADD_V_SHIFT 0
+#define TPC0_CFG_DBGMEM_ADD_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_DBGMEM_DATA_WR */
+#define TPC0_CFG_DBGMEM_DATA_WR_V_SHIFT 0
+#define TPC0_CFG_DBGMEM_DATA_WR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_DBGMEM_DATA_RD */
+#define TPC0_CFG_DBGMEM_DATA_RD_V_SHIFT 0
+#define TPC0_CFG_DBGMEM_DATA_RD_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_DBGMEM_CTRL */
+#define TPC0_CFG_DBGMEM_CTRL_WR_NRD_SHIFT 0
+#define TPC0_CFG_DBGMEM_CTRL_WR_NRD_MASK 0x1
+
+/* TPC0_CFG_DBGMEM_RC */
+#define TPC0_CFG_DBGMEM_RC_VALID_SHIFT 0
+#define TPC0_CFG_DBGMEM_RC_VALID_MASK 0x1
+
+/* TPC0_CFG_TSB_INFLIGHT_CNTR */
+#define TPC0_CFG_TSB_INFLIGHT_CNTR_V_SHIFT 0
+#define TPC0_CFG_TSB_INFLIGHT_CNTR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_WQ_INFLIGHT_CNTR */
+#define TPC0_CFG_WQ_INFLIGHT_CNTR_HBW_SHIFT 0
+#define TPC0_CFG_WQ_INFLIGHT_CNTR_HBW_MASK 0xFFFF
+#define TPC0_CFG_WQ_INFLIGHT_CNTR_LBW_SHIFT 16
+#define TPC0_CFG_WQ_INFLIGHT_CNTR_LBW_MASK 0xF0000
+
+/* TPC0_CFG_WQ_LBW_TOTAL_CNTR */
+#define TPC0_CFG_WQ_LBW_TOTAL_CNTR_V_SHIFT 0
+#define TPC0_CFG_WQ_LBW_TOTAL_CNTR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_WQ_HBW_TOTAL_CNTR */
+#define TPC0_CFG_WQ_HBW_TOTAL_CNTR_V_SHIFT 0
+#define TPC0_CFG_WQ_HBW_TOTAL_CNTR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_IRQ_OCCOUPY_CNTR */
+#define TPC0_CFG_IRQ_OCCOUPY_CNTR_V_SHIFT 0
+#define TPC0_CFG_IRQ_OCCOUPY_CNTR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_FUNC_MBIST_CNTRL */
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_MASK 0x1
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_SHIFT 1
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK 0x2
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_SHIFT 2
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK 0x4
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_SHIFT 16
+#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_MASK 0x3FF0000
+
+/* TPC0_CFG_FUNC_MBIST_PAT */
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_MASK 0x3
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_SHIFT 2
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_MASK 0xC
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_SHIFT 4
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_MASK 0x30
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_SHIFT 6
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_MASK 0xC0
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_SHIFT 8
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_MASK 0x300
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_SHIFT 10
+#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_MASK 0xC00
+
+/* TPC0_CFG_FUNC_MBIST_MEM */
+#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_SHIFT 0
+#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_MASK 0x7FF
+#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_SHIFT 12
+#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_MASK 0x7000
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_SHIFT 16
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_MASK 0x7FF0000
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_SHIFT 28
+#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000
+
+/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_8_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_9_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_10_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_11_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_12_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_13_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_14_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW */
+#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH */
+#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_PADDING_VALUE */
+#define TPC0_CFG_QM_TENSOR_15_PADDING_VALUE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_PADDING_VALUE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG */
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_DATA_TYPE_MASK 0x7
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_LAST_DIM_SHIFT 16
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_LAST_DIM_MASK 0x70000
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_SET_SHIFT 19
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_SET_MASK 0x80000
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_SHIFT 20
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_RESERV_MASK 0x100000
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_OP_SHIFT 21
+#define TPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG_RMW_OP_MASK 0x600000
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_0_SIZE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_0_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_0_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_1_SIZE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_1_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_1_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_2_SIZE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_2_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_2_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_3_SIZE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_3_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_3_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_4_SIZE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_4_SIZE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_4_SIZE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE */
+#define TPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE_V_SHIFT 0
+#define TPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_SYNC_OBJECT_MESSAGE */
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_RSV_SHIFT 16
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_RSV_MASK 0x1FFF0000
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 29
+#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0xE0000000
+
+/* TPC0_CFG_QM_SYNC_OBJECT_ADDR */
+#define TPC0_CFG_QM_SYNC_OBJECT_ADDR_V_SHIFT 0
+#define TPC0_CFG_QM_SYNC_OBJECT_ADDR_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW */
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH */
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_0 */
+#define TPC0_CFG_QM_TID_BASE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_0 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_1 */
+#define TPC0_CFG_QM_TID_BASE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_1 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_2 */
+#define TPC0_CFG_QM_TID_BASE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_2 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_3 */
+#define TPC0_CFG_QM_TID_BASE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_3 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_BASE_DIM_4 */
+#define TPC0_CFG_QM_TID_BASE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_QM_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_TID_SIZE_DIM_4 */
+#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_SHIFT 0
+#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF
+
+/* TPC0_CFG_QM_KERNEL_CONFIG */
+#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_MASK 0x1
+#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1
+#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2
+#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 2
+#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0xFC
+#define TPC0_CFG_QM_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_SHIFT 8
+#define TPC0_CFG_QM_KERNEL_CONFIG_RD_RATE_LIMIT_RST_TOKEN_MASK 0xFF00
+#define TPC0_CFG_QM_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_SHIFT 16
+#define TPC0_CFG_QM_KERNEL_CONFIG_WR_RATE_LIMIT_RST_TOKEN_MASK 0xFF0000
+
+/* TPC0_CFG_QM_KERNEL_ID */
+#define TPC0_CFG_QM_KERNEL_ID_V_SHIFT 0
+#define TPC0_CFG_QM_KERNEL_ID_V_MASK 0xFFFF
+
+/* TPC0_CFG_QM_SRF */
+#define TPC0_CFG_QM_SRF_V_SHIFT 0
+#define TPC0_CFG_QM_SRF_V_MASK 0xFFFFFFFF
+
+#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h
new file mode 100644
index 000000000000..b82a906265a8
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_CFG_REGS_H_
+#define ASIC_REG_TPC0_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE06400
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE06404
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE06408
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE0640C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE06410
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE06414
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE06418
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE0641C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE06420
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE06424
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE06428
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE0642C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE06430
+
+#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE06434
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE06438
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE0643C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE06440
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE06444
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE06448
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE0644C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE06450
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE06454
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE06458
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE0645C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE06460
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE06464
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE06468
+
+#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE0646C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE06470
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE06474
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE06478
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE0647C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE06480
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE06484
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE06488
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE0648C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE06490
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE06494
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE06498
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE0649C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE064A0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE064A4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE064A8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE064AC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE064B0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE064B4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE064B8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE064BC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE064C0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE064C4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE064C8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE064CC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE064D0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE064D4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE064D8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE064DC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE064E0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE064E4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE064E8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE064EC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE064F0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE064F4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE064F8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE064FC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE06500
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE06504
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE06508
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE0650C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE06510
+
+#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE06514
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE06518
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE0651C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE06520
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE06524
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE06528
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE0652C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE06530
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE06534
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE06538
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE0653C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE06540
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE06544
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE06548
+
+#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE0654C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE06550
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE06554
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE06558
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE0655C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE06560
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE06564
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE06568
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE0656C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE06570
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE06574
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE06578
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE0657C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE06580
+
+#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE06584
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE06588
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE0658C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE06590
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE06594
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE06598
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE0659C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE065A0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE065A4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE065A8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE065AC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE065B0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE065B4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE065B8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE065BC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xE065C0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xE065C4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xE065C8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xE065CC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xE065D0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xE065D4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xE065D8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xE065DC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xE065E0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xE065E4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xE065E8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xE065EC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xE065F0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xE065F4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xE065F8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xE065FC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xE06600
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xE06604
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xE06608
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xE0660C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xE06610
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xE06614
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xE06618
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xE0661C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xE06620
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xE06624
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xE06628
+
+#define mmTPC0_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xE0662C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xE06630
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xE06634
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xE06638
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xE0663C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xE06640
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xE06644
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xE06648
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xE0664C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xE06650
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xE06654
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xE06658
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xE0665C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xE06660
+
+#define mmTPC0_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xE06664
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xE06668
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xE0666C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xE06670
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xE06674
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xE06678
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xE0667C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xE06680
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xE06684
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xE06688
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xE0668C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xE06690
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xE06694
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xE06698
+
+#define mmTPC0_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xE0669C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xE066A0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xE066A4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xE066A8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xE066AC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xE066B0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xE066B4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xE066B8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xE066BC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xE066C0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xE066C4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xE066C8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xE066CC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xE066D0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xE066D4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xE066D8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xE066DC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xE066E0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xE066E4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xE066E8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xE066EC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xE066F0
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xE066F4
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xE066F8
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xE066FC
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xE06700
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xE06704
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xE06708
+
+#define mmTPC0_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xE0670C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xE06710
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xE06714
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xE06718
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xE0671C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xE06720
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xE06724
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xE06728
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xE0672C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xE06730
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xE06734
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xE06738
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xE0673C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xE06740
+
+#define mmTPC0_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xE06744
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xE06748
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xE0674C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xE06750
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xE06754
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xE06758
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xE0675C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xE06760
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xE06764
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xE06768
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xE0676C
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xE06770
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xE06774
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xE06778
+
+#define mmTPC0_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xE0677C
+
+#define mmTPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE06780
+
+#define mmTPC0_CFG_KERNEL_SYNC_OBJECT_ADDR 0xE06784
+
+#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE06788
+
+#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE0678C
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_0 0xE06790
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_0 0xE06794
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_1 0xE06798
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_1 0xE0679C
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_2 0xE067A0
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_2 0xE067A4
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_3 0xE067A8
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_3 0xE067AC
+
+#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_4 0xE067B0
+
+#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_4 0xE067B4
+
+#define mmTPC0_CFG_KERNEL_KERNEL_CONFIG 0xE067B8
+
+#define mmTPC0_CFG_KERNEL_KERNEL_ID 0xE067BC
+
+#define mmTPC0_CFG_KERNEL_SRF_0 0xE067C0
+
+#define mmTPC0_CFG_KERNEL_SRF_1 0xE067C4
+
+#define mmTPC0_CFG_KERNEL_SRF_2 0xE067C8
+
+#define mmTPC0_CFG_KERNEL_SRF_3 0xE067CC
+
+#define mmTPC0_CFG_KERNEL_SRF_4 0xE067D0
+
+#define mmTPC0_CFG_KERNEL_SRF_5 0xE067D4
+
+#define mmTPC0_CFG_KERNEL_SRF_6 0xE067D8
+
+#define mmTPC0_CFG_KERNEL_SRF_7 0xE067DC
+
+#define mmTPC0_CFG_KERNEL_SRF_8 0xE067E0
+
+#define mmTPC0_CFG_KERNEL_SRF_9 0xE067E4
+
+#define mmTPC0_CFG_KERNEL_SRF_10 0xE067E8
+
+#define mmTPC0_CFG_KERNEL_SRF_11 0xE067EC
+
+#define mmTPC0_CFG_KERNEL_SRF_12 0xE067F0
+
+#define mmTPC0_CFG_KERNEL_SRF_13 0xE067F4
+
+#define mmTPC0_CFG_KERNEL_SRF_14 0xE067F8
+
+#define mmTPC0_CFG_KERNEL_SRF_15 0xE067FC
+
+#define mmTPC0_CFG_KERNEL_SRF_16 0xE06800
+
+#define mmTPC0_CFG_KERNEL_SRF_17 0xE06804
+
+#define mmTPC0_CFG_KERNEL_SRF_18 0xE06808
+
+#define mmTPC0_CFG_KERNEL_SRF_19 0xE0680C
+
+#define mmTPC0_CFG_KERNEL_SRF_20 0xE06810
+
+#define mmTPC0_CFG_KERNEL_SRF_21 0xE06814
+
+#define mmTPC0_CFG_KERNEL_SRF_22 0xE06818
+
+#define mmTPC0_CFG_KERNEL_SRF_23 0xE0681C
+
+#define mmTPC0_CFG_KERNEL_SRF_24 0xE06820
+
+#define mmTPC0_CFG_KERNEL_SRF_25 0xE06824
+
+#define mmTPC0_CFG_KERNEL_SRF_26 0xE06828
+
+#define mmTPC0_CFG_KERNEL_SRF_27 0xE0682C
+
+#define mmTPC0_CFG_KERNEL_SRF_28 0xE06830
+
+#define mmTPC0_CFG_KERNEL_SRF_29 0xE06834
+
+#define mmTPC0_CFG_KERNEL_SRF_30 0xE06838
+
+#define mmTPC0_CFG_KERNEL_SRF_31 0xE0683C
+
+#define mmTPC0_CFG_ROUND_CSR 0xE068FC
+
+#define mmTPC0_CFG_PROT 0xE06900
+
+#define mmTPC0_CFG_SEMAPHORE 0xE06908
+
+#define mmTPC0_CFG_VFLAGS 0xE0690C
+
+#define mmTPC0_CFG_SFLAGS 0xE06910
+
+#define mmTPC0_CFG_LFSR_POLYNOM 0xE06918
+
+#define mmTPC0_CFG_STATUS 0xE0691C
+
+#define mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH 0xE06920
+
+#define mmTPC0_CFG_CFG_SUBTRACT_VALUE 0xE06924
+
+#define mmTPC0_CFG_SM_BASE_ADDRESS_HIGH 0xE0692C
+
+#define mmTPC0_CFG_TPC_CMD 0xE06930
+
+#define mmTPC0_CFG_TPC_EXECUTE 0xE06938
+
+#define mmTPC0_CFG_TPC_STALL 0xE0693C
+
+#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW 0xE06940
+
+#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE06944
+
+#define mmTPC0_CFG_RD_RATE_LIMIT 0xE06948
+
+#define mmTPC0_CFG_WR_RATE_LIMIT 0xE06950
+
+#define mmTPC0_CFG_MSS_CONFIG 0xE06954
+
+#define mmTPC0_CFG_TPC_INTR_CAUSE 0xE06958
+
+#define mmTPC0_CFG_TPC_INTR_MASK 0xE0695C
+
+#define mmTPC0_CFG_WQ_CREDITS 0xE06960
+
+#define mmTPC0_CFG_ARUSER_LO 0xE06964
+
+#define mmTPC0_CFG_ARUSER_HI 0xE06968
+
+#define mmTPC0_CFG_AWUSER_LO 0xE0696C
+
+#define mmTPC0_CFG_AWUSER_HI 0xE06970
+
+#define mmTPC0_CFG_OPCODE_EXEC 0xE06974
+
+#define mmTPC0_CFG_LUT_FUNC32_BASE_ADDR_LO 0xE06978
+
+#define mmTPC0_CFG_LUT_FUNC32_BASE_ADDR_HI 0xE0697C
+
+#define mmTPC0_CFG_LUT_FUNC64_BASE_ADDR_LO 0xE06980
+
+#define mmTPC0_CFG_LUT_FUNC64_BASE_ADDR_HI 0xE06984
+
+#define mmTPC0_CFG_LUT_FUNC128_BASE_ADDR_LO 0xE06988
+
+#define mmTPC0_CFG_LUT_FUNC128_BASE_ADDR_HI 0xE0698C
+
+#define mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_LO 0xE06990
+
+#define mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_HI 0xE06994
+
+#define mmTPC0_CFG_TSB_CFG_MAX_SIZE 0xE06998
+
+#define mmTPC0_CFG_TSB_CFG 0xE0699C
+
+#define mmTPC0_CFG_DBGMEM_ADD 0xE069A0
+
+#define mmTPC0_CFG_DBGMEM_DATA_WR 0xE069A4
+
+#define mmTPC0_CFG_DBGMEM_DATA_RD 0xE069A8
+
+#define mmTPC0_CFG_DBGMEM_CTRL 0xE069AC
+
+#define mmTPC0_CFG_DBGMEM_RC 0xE069B0
+
+#define mmTPC0_CFG_TSB_INFLIGHT_CNTR 0xE069B4
+
+#define mmTPC0_CFG_WQ_INFLIGHT_CNTR 0xE069B8
+
+#define mmTPC0_CFG_WQ_LBW_TOTAL_CNTR 0xE069BC
+
+#define mmTPC0_CFG_WQ_HBW_TOTAL_CNTR 0xE069C0
+
+#define mmTPC0_CFG_IRQ_OCCOUPY_CNTR 0xE069C4
+
+#define mmTPC0_CFG_FUNC_MBIST_CNTRL 0xE069D0
+
+#define mmTPC0_CFG_FUNC_MBIST_PAT 0xE069D4
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_0 0xE069D8
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_1 0xE069DC
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_2 0xE069E0
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_3 0xE069E4
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_4 0xE069E8
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_5 0xE069EC
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_6 0xE069F0
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_7 0xE069F4
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_8 0xE069F8
+
+#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE069FC
+
+#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE06A00
+
+#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE06A04
+
+#define mmTPC0_CFG_QM_TENSOR_0_PADDING_VALUE 0xE06A08
+
+#define mmTPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE06A0C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE06A10
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE06A14
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE06A18
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE06A1C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE06A20
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE06A24
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE06A28
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE06A2C
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE06A30
+
+#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE06A34
+
+#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE06A38
+
+#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE06A3C
+
+#define mmTPC0_CFG_QM_TENSOR_1_PADDING_VALUE 0xE06A40
+
+#define mmTPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE06A44
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE06A48
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE06A4C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE06A50
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE06A54
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE06A58
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE06A5C
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE06A60
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE06A64
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE06A68
+
+#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE06A6C
+
+#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE06A70
+
+#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE06A74
+
+#define mmTPC0_CFG_QM_TENSOR_2_PADDING_VALUE 0xE06A78
+
+#define mmTPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE06A7C
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE06A80
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE06A84
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE06A88
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE06A8C
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE06A90
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE06A94
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE06A98
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE06A9C
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE06AA0
+
+#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE06AA4
+
+#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE06AA8
+
+#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE06AAC
+
+#define mmTPC0_CFG_QM_TENSOR_3_PADDING_VALUE 0xE06AB0
+
+#define mmTPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE06AB4
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE06AB8
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE06ABC
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE06AC0
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE06AC4
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE06AC8
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE06ACC
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE06AD0
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE06AD4
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE06AD8
+
+#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE06ADC
+
+#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE06AE0
+
+#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE06AE4
+
+#define mmTPC0_CFG_QM_TENSOR_4_PADDING_VALUE 0xE06AE8
+
+#define mmTPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE06AEC
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE06AF0
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE06AF4
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE06AF8
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE06AFC
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE06B00
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE06B04
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE06B08
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE06B0C
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE06B10
+
+#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE06B14
+
+#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE06B18
+
+#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE06B1C
+
+#define mmTPC0_CFG_QM_TENSOR_5_PADDING_VALUE 0xE06B20
+
+#define mmTPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE06B24
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE06B28
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE06B2C
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE06B30
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE06B34
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE06B38
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE06B3C
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE06B40
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE06B44
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE06B48
+
+#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE06B4C
+
+#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE06B50
+
+#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE06B54
+
+#define mmTPC0_CFG_QM_TENSOR_6_PADDING_VALUE 0xE06B58
+
+#define mmTPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE06B5C
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE06B60
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE06B64
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE06B68
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE06B6C
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE06B70
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE06B74
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE06B78
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE06B7C
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE06B80
+
+#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE06B84
+
+#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE06B88
+
+#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE06B8C
+
+#define mmTPC0_CFG_QM_TENSOR_7_PADDING_VALUE 0xE06B90
+
+#define mmTPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE06B94
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE06B98
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE06B9C
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE06BA0
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE06BA4
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE06BA8
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE06BAC
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE06BB0
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE06BB4
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE06BB8
+
+#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE06BBC
+
+#define mmTPC0_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xE06BC0
+
+#define mmTPC0_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xE06BC4
+
+#define mmTPC0_CFG_QM_TENSOR_8_PADDING_VALUE 0xE06BC8
+
+#define mmTPC0_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xE06BCC
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_0_SIZE 0xE06BD0
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xE06BD4
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_1_SIZE 0xE06BD8
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xE06BDC
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_2_SIZE 0xE06BE0
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xE06BE4
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_3_SIZE 0xE06BE8
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xE06BEC
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_4_SIZE 0xE06BF0
+
+#define mmTPC0_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xE06BF4
+
+#define mmTPC0_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xE06BF8
+
+#define mmTPC0_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xE06BFC
+
+#define mmTPC0_CFG_QM_TENSOR_9_PADDING_VALUE 0xE06C00
+
+#define mmTPC0_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xE06C04
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_0_SIZE 0xE06C08
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xE06C0C
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_1_SIZE 0xE06C10
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xE06C14
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_2_SIZE 0xE06C18
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xE06C1C
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_3_SIZE 0xE06C20
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xE06C24
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_4_SIZE 0xE06C28
+
+#define mmTPC0_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xE06C2C
+
+#define mmTPC0_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xE06C30
+
+#define mmTPC0_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xE06C34
+
+#define mmTPC0_CFG_QM_TENSOR_10_PADDING_VALUE 0xE06C38
+
+#define mmTPC0_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xE06C3C
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_0_SIZE 0xE06C40
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xE06C44
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_1_SIZE 0xE06C48
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xE06C4C
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_2_SIZE 0xE06C50
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xE06C54
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_3_SIZE 0xE06C58
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xE06C5C
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_4_SIZE 0xE06C60
+
+#define mmTPC0_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xE06C64
+
+#define mmTPC0_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xE06C68
+
+#define mmTPC0_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xE06C6C
+
+#define mmTPC0_CFG_QM_TENSOR_11_PADDING_VALUE 0xE06C70
+
+#define mmTPC0_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xE06C74
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_0_SIZE 0xE06C78
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xE06C7C
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_1_SIZE 0xE06C80
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xE06C84
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_2_SIZE 0xE06C88
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xE06C8C
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_3_SIZE 0xE06C90
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xE06C94
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_4_SIZE 0xE06C98
+
+#define mmTPC0_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xE06C9C
+
+#define mmTPC0_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xE06CA0
+
+#define mmTPC0_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xE06CA4
+
+#define mmTPC0_CFG_QM_TENSOR_12_PADDING_VALUE 0xE06CA8
+
+#define mmTPC0_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xE06CAC
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_0_SIZE 0xE06CB0
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xE06CB4
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_1_SIZE 0xE06CB8
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xE06CBC
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_2_SIZE 0xE06CC0
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xE06CC4
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_3_SIZE 0xE06CC8
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xE06CCC
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_4_SIZE 0xE06CD0
+
+#define mmTPC0_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xE06CD4
+
+#define mmTPC0_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xE06CD8
+
+#define mmTPC0_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xE06CDC
+
+#define mmTPC0_CFG_QM_TENSOR_13_PADDING_VALUE 0xE06CE0
+
+#define mmTPC0_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xE06CE4
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_0_SIZE 0xE06CE8
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xE06CEC
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_1_SIZE 0xE06CF0
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xE06CF4
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_2_SIZE 0xE06CF8
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xE06CFC
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_3_SIZE 0xE06D00
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xE06D04
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_4_SIZE 0xE06D08
+
+#define mmTPC0_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xE06D0C
+
+#define mmTPC0_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xE06D10
+
+#define mmTPC0_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xE06D14
+
+#define mmTPC0_CFG_QM_TENSOR_14_PADDING_VALUE 0xE06D18
+
+#define mmTPC0_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xE06D1C
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_0_SIZE 0xE06D20
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xE06D24
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_1_SIZE 0xE06D28
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xE06D2C
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_2_SIZE 0xE06D30
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xE06D34
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_3_SIZE 0xE06D38
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xE06D3C
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_4_SIZE 0xE06D40
+
+#define mmTPC0_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xE06D44
+
+#define mmTPC0_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xE06D48
+
+#define mmTPC0_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xE06D4C
+
+#define mmTPC0_CFG_QM_TENSOR_15_PADDING_VALUE 0xE06D50
+
+#define mmTPC0_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xE06D54
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_0_SIZE 0xE06D58
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xE06D5C
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_1_SIZE 0xE06D60
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xE06D64
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_2_SIZE 0xE06D68
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xE06D6C
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_3_SIZE 0xE06D70
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xE06D74
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_4_SIZE 0xE06D78
+
+#define mmTPC0_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xE06D7C
+
+#define mmTPC0_CFG_QM_SYNC_OBJECT_MESSAGE 0xE06D80
+
+#define mmTPC0_CFG_QM_SYNC_OBJECT_ADDR 0xE06D84
+
+#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE06D88
+
+#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE06D8C
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_0 0xE06D90
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_0 0xE06D94
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_1 0xE06D98
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_1 0xE06D9C
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_2 0xE06DA0
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_2 0xE06DA4
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_3 0xE06DA8
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_3 0xE06DAC
+
+#define mmTPC0_CFG_QM_TID_BASE_DIM_4 0xE06DB0
+
+#define mmTPC0_CFG_QM_TID_SIZE_DIM_4 0xE06DB4
+
+#define mmTPC0_CFG_QM_KERNEL_CONFIG 0xE06DB8
+
+#define mmTPC0_CFG_QM_KERNEL_ID 0xE06DBC
+
+#define mmTPC0_CFG_QM_SRF_0 0xE06DC0
+
+#define mmTPC0_CFG_QM_SRF_1 0xE06DC4
+
+#define mmTPC0_CFG_QM_SRF_2 0xE06DC8
+
+#define mmTPC0_CFG_QM_SRF_3 0xE06DCC
+
+#define mmTPC0_CFG_QM_SRF_4 0xE06DD0
+
+#define mmTPC0_CFG_QM_SRF_5 0xE06DD4
+
+#define mmTPC0_CFG_QM_SRF_6 0xE06DD8
+
+#define mmTPC0_CFG_QM_SRF_7 0xE06DDC
+
+#define mmTPC0_CFG_QM_SRF_8 0xE06DE0
+
+#define mmTPC0_CFG_QM_SRF_9 0xE06DE4
+
+#define mmTPC0_CFG_QM_SRF_10 0xE06DE8
+
+#define mmTPC0_CFG_QM_SRF_11 0xE06DEC
+
+#define mmTPC0_CFG_QM_SRF_12 0xE06DF0
+
+#define mmTPC0_CFG_QM_SRF_13 0xE06DF4
+
+#define mmTPC0_CFG_QM_SRF_14 0xE06DF8
+
+#define mmTPC0_CFG_QM_SRF_15 0xE06DFC
+
+#define mmTPC0_CFG_QM_SRF_16 0xE06E00
+
+#define mmTPC0_CFG_QM_SRF_17 0xE06E04
+
+#define mmTPC0_CFG_QM_SRF_18 0xE06E08
+
+#define mmTPC0_CFG_QM_SRF_19 0xE06E0C
+
+#define mmTPC0_CFG_QM_SRF_20 0xE06E10
+
+#define mmTPC0_CFG_QM_SRF_21 0xE06E14
+
+#define mmTPC0_CFG_QM_SRF_22 0xE06E18
+
+#define mmTPC0_CFG_QM_SRF_23 0xE06E1C
+
+#define mmTPC0_CFG_QM_SRF_24 0xE06E20
+
+#define mmTPC0_CFG_QM_SRF_25 0xE06E24
+
+#define mmTPC0_CFG_QM_SRF_26 0xE06E28
+
+#define mmTPC0_CFG_QM_SRF_27 0xE06E2C
+
+#define mmTPC0_CFG_QM_SRF_28 0xE06E30
+
+#define mmTPC0_CFG_QM_SRF_29 0xE06E34
+
+#define mmTPC0_CFG_QM_SRF_30 0xE06E38
+
+#define mmTPC0_CFG_QM_SRF_31 0xE06E3C
+
+#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h
new file mode 100644
index 000000000000..8e71532c6f36
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h
@@ -0,0 +1,800 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_QM_MASKS_H_
+#define ASIC_REG_TPC0_QM_MASKS_H_
+
+/*
+ *****************************************
+ * TPC0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+/* TPC0_QM_GLBL_CFG0 */
+#define TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT 0
+#define TPC0_QM_GLBL_CFG0_PQF_EN_MASK 0xF
+#define TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT 4
+#define TPC0_QM_GLBL_CFG0_CQF_EN_MASK 0x1F0
+#define TPC0_QM_GLBL_CFG0_CP_EN_SHIFT 9
+#define TPC0_QM_GLBL_CFG0_CP_EN_MASK 0x3E00
+
+/* TPC0_QM_GLBL_CFG1 */
+#define TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0
+#define TPC0_QM_GLBL_CFG1_PQF_STOP_MASK 0xF
+#define TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT 4
+#define TPC0_QM_GLBL_CFG1_CQF_STOP_MASK 0x1F0
+#define TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT 9
+#define TPC0_QM_GLBL_CFG1_CP_STOP_MASK 0x3E00
+#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 16
+#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000
+#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 20
+#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000
+#define TPC0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 25
+#define TPC0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000
+
+/* TPC0_QM_GLBL_PROT */
+#define TPC0_QM_GLBL_PROT_PQF_SHIFT 0
+#define TPC0_QM_GLBL_PROT_PQF_MASK 0xF
+#define TPC0_QM_GLBL_PROT_CQF_SHIFT 4
+#define TPC0_QM_GLBL_PROT_CQF_MASK 0x1F0
+#define TPC0_QM_GLBL_PROT_CP_SHIFT 9
+#define TPC0_QM_GLBL_PROT_CP_MASK 0x3E00
+#define TPC0_QM_GLBL_PROT_ERR_SHIFT 14
+#define TPC0_QM_GLBL_PROT_ERR_MASK 0x4000
+#define TPC0_QM_GLBL_PROT_ARB_SHIFT 15
+#define TPC0_QM_GLBL_PROT_ARB_MASK 0x8000
+
+/* TPC0_QM_GLBL_ERR_CFG */
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0
+#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4
+#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9
+#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00
+#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16
+#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000
+#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20
+#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000
+#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25
+#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000
+#define TPC0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31
+#define TPC0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000
+
+/* TPC0_QM_GLBL_SECURE_PROPS */
+#define TPC0_QM_GLBL_SECURE_PROPS_0_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_1_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_2_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_3_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_4_ASID_SHIFT 0
+#define TPC0_QM_GLBL_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_SECURE_PROPS_0_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_0_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_SECURE_PROPS_1_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_1_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_SECURE_PROPS_2_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_2_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_SECURE_PROPS_3_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_3_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_SECURE_PROPS_4_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* TPC0_QM_GLBL_NON_SECURE_PROPS */
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT 0
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_ASID_MASK 0x3FF
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_0_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_1_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_2_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_3_MMBP_MASK 0x400
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT 10
+#define TPC0_QM_GLBL_NON_SECURE_PROPS_4_MMBP_MASK 0x400
+
+/* TPC0_QM_GLBL_STS0 */
+#define TPC0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0
+#define TPC0_QM_GLBL_STS0_PQF_IDLE_MASK 0xF
+#define TPC0_QM_GLBL_STS0_CQF_IDLE_SHIFT 4
+#define TPC0_QM_GLBL_STS0_CQF_IDLE_MASK 0x1F0
+#define TPC0_QM_GLBL_STS0_CP_IDLE_SHIFT 9
+#define TPC0_QM_GLBL_STS0_CP_IDLE_MASK 0x3E00
+#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 16
+#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000
+#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 20
+#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000
+#define TPC0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 25
+#define TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000
+#define TPC0_QM_GLBL_STS0_ARB_IS_STOP_SHIFT 31
+#define TPC0_QM_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000
+
+/* TPC0_QM_GLBL_STS1 */
+#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0
+#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1
+#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1
+#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2
+#define TPC0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2
+#define TPC0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4
+#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4
+#define TPC0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10
+#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_QM_GLBL_STS1_CP_WREG_ERR_SHIFT 6
+#define TPC0_QM_GLBL_STS1_CP_WREG_ERR_MASK 0x40
+#define TPC0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT 8
+#define TPC0_QM_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK 0x100
+#define TPC0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT 9
+#define TPC0_QM_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK 0x200
+#define TPC0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT 10
+#define TPC0_QM_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK 0x400
+#define TPC0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT 11
+#define TPC0_QM_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK 0x800
+#define TPC0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT 12
+#define TPC0_QM_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define TPC0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT 13
+#define TPC0_QM_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define TPC0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT 14
+#define TPC0_QM_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define TPC0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT 15
+#define TPC0_QM_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* TPC0_QM_GLBL_STS1_4 */
+#define TPC0_QM_GLBL_STS1_4_CQF_RD_ERR_SHIFT 1
+#define TPC0_QM_GLBL_STS1_4_CQF_RD_ERR_MASK 0x2
+#define TPC0_QM_GLBL_STS1_4_CP_RD_ERR_SHIFT 2
+#define TPC0_QM_GLBL_STS1_4_CP_RD_ERR_MASK 0x4
+#define TPC0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_QM_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_QM_GLBL_STS1_4_CP_STOP_OP_SHIFT 4
+#define TPC0_QM_GLBL_STS1_4_CP_STOP_OP_MASK 0x10
+#define TPC0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_QM_GLBL_STS1_4_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_QM_GLBL_STS1_4_CP_WREG_ERR_SHIFT 6
+#define TPC0_QM_GLBL_STS1_4_CP_WREG_ERR_MASK 0x40
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define TPC0_QM_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* TPC0_QM_GLBL_MSG_EN */
+#define TPC0_QM_GLBL_MSG_EN_PQF_RD_ERR_SHIFT 0
+#define TPC0_QM_GLBL_MSG_EN_PQF_RD_ERR_MASK 0x1
+#define TPC0_QM_GLBL_MSG_EN_CQF_RD_ERR_SHIFT 1
+#define TPC0_QM_GLBL_MSG_EN_CQF_RD_ERR_MASK 0x2
+#define TPC0_QM_GLBL_MSG_EN_CP_RD_ERR_SHIFT 2
+#define TPC0_QM_GLBL_MSG_EN_CP_RD_ERR_MASK 0x4
+#define TPC0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_QM_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_QM_GLBL_MSG_EN_CP_STOP_OP_SHIFT 4
+#define TPC0_QM_GLBL_MSG_EN_CP_STOP_OP_MASK 0x10
+#define TPC0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_QM_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_QM_GLBL_MSG_EN_CP_WREG_ERR_SHIFT 6
+#define TPC0_QM_GLBL_MSG_EN_CP_WREG_ERR_MASK 0x40
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15
+#define TPC0_QM_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* TPC0_QM_GLBL_MSG_EN_4 */
+#define TPC0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT 1
+#define TPC0_QM_GLBL_MSG_EN_4_CQF_RD_ERR_MASK 0x2
+#define TPC0_QM_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT 2
+#define TPC0_QM_GLBL_MSG_EN_4_CP_RD_ERR_MASK 0x4
+#define TPC0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3
+#define TPC0_QM_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8
+#define TPC0_QM_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT 4
+#define TPC0_QM_GLBL_MSG_EN_4_CP_STOP_OP_MASK 0x10
+#define TPC0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5
+#define TPC0_QM_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20
+#define TPC0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT 6
+#define TPC0_QM_GLBL_MSG_EN_4_CP_WREG_ERR_MASK 0x40
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15
+#define TPC0_QM_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000
+
+/* TPC0_QM_PQ_BASE_LO */
+#define TPC0_QM_PQ_BASE_LO_VAL_SHIFT 0
+#define TPC0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_BASE_HI */
+#define TPC0_QM_PQ_BASE_HI_VAL_SHIFT 0
+#define TPC0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_SIZE */
+#define TPC0_QM_PQ_SIZE_VAL_SHIFT 0
+#define TPC0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_PI */
+#define TPC0_QM_PQ_PI_VAL_SHIFT 0
+#define TPC0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_CI */
+#define TPC0_QM_PQ_CI_VAL_SHIFT 0
+#define TPC0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_PQ_CFG0 */
+#define TPC0_QM_PQ_CFG0_RESERVED_SHIFT 0
+#define TPC0_QM_PQ_CFG0_RESERVED_MASK 0x1
+
+/* TPC0_QM_PQ_CFG1 */
+#define TPC0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0
+#define TPC0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* TPC0_QM_PQ_ARUSER_31_11 */
+#define TPC0_QM_PQ_ARUSER_31_11_VAL_SHIFT 0
+#define TPC0_QM_PQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* TPC0_QM_PQ_STS0 */
+#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0
+#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF
+#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16
+#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000
+
+/* TPC0_QM_PQ_STS1 */
+#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30
+#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000
+#define TPC0_QM_PQ_STS1_PQ_BUSY_SHIFT 31
+#define TPC0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000
+
+/* TPC0_QM_CQ_CFG0 */
+#define TPC0_QM_CQ_CFG0_RESERVED_SHIFT 0
+#define TPC0_QM_CQ_CFG0_RESERVED_MASK 0x1
+
+/* TPC0_QM_CQ_CFG1 */
+#define TPC0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0
+#define TPC0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF
+#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16
+#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_ARUSER_31_11 */
+#define TPC0_QM_CQ_ARUSER_31_11_VAL_SHIFT 0
+#define TPC0_QM_CQ_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* TPC0_QM_CQ_STS0 */
+#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0
+#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF
+#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16
+#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_STS1 */
+#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30
+#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000
+#define TPC0_QM_CQ_STS1_CQ_BUSY_SHIFT 31
+#define TPC0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000
+
+/* TPC0_QM_CQ_PTR_LO_0 */
+#define TPC0_QM_CQ_PTR_LO_0_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_0 */
+#define TPC0_QM_CQ_PTR_HI_0_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_0 */
+#define TPC0_QM_CQ_TSIZE_0_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_0 */
+#define TPC0_QM_CQ_CTL_0_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_0_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_0_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_0_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_1 */
+#define TPC0_QM_CQ_PTR_LO_1_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_1 */
+#define TPC0_QM_CQ_PTR_HI_1_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_1 */
+#define TPC0_QM_CQ_TSIZE_1_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_1 */
+#define TPC0_QM_CQ_CTL_1_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_1_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_1_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_1_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_2 */
+#define TPC0_QM_CQ_PTR_LO_2_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_2 */
+#define TPC0_QM_CQ_PTR_HI_2_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_2 */
+#define TPC0_QM_CQ_TSIZE_2_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_2 */
+#define TPC0_QM_CQ_CTL_2_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_2_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_2_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_2_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_3 */
+#define TPC0_QM_CQ_PTR_LO_3_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_3 */
+#define TPC0_QM_CQ_PTR_HI_3_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_3 */
+#define TPC0_QM_CQ_TSIZE_3_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_3 */
+#define TPC0_QM_CQ_CTL_3_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_3_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_3_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_3_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_4 */
+#define TPC0_QM_CQ_PTR_LO_4_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_4 */
+#define TPC0_QM_CQ_PTR_HI_4_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_4 */
+#define TPC0_QM_CQ_TSIZE_4_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_4 */
+#define TPC0_QM_CQ_CTL_4_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_4_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_4_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_4_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_PTR_LO_STS */
+#define TPC0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_PTR_HI_STS */
+#define TPC0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_TSIZE_STS */
+#define TPC0_QM_CQ_TSIZE_STS_VAL_SHIFT 0
+#define TPC0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CQ_CTL_STS */
+#define TPC0_QM_CQ_CTL_STS_RPT_SHIFT 0
+#define TPC0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF
+#define TPC0_QM_CQ_CTL_STS_CTL_SHIFT 16
+#define TPC0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000
+
+/* TPC0_QM_CQ_IFIFO_CNT */
+#define TPC0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0
+#define TPC0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3
+
+/* TPC0_QM_CP_MSG_BASE0_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE0_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE1_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE1_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE2_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE2_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE3_ADDR_LO */
+#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_MSG_BASE3_ADDR_HI */
+#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_TSIZE_OFFSET */
+#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */
+#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET */
+#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0
+#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_FENCE0_RDATA */
+#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE1_RDATA */
+#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE2_RDATA */
+#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE3_RDATA */
+#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF
+
+/* TPC0_QM_CP_FENCE0_CNT */
+#define TPC0_QM_CP_FENCE0_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE0_CNT_VAL_MASK 0x3FFF
+
+/* TPC0_QM_CP_FENCE1_CNT */
+#define TPC0_QM_CP_FENCE1_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE1_CNT_VAL_MASK 0x3FFF
+
+/* TPC0_QM_CP_FENCE2_CNT */
+#define TPC0_QM_CP_FENCE2_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE2_CNT_VAL_MASK 0x3FFF
+
+/* TPC0_QM_CP_FENCE3_CNT */
+#define TPC0_QM_CP_FENCE3_CNT_VAL_SHIFT 0
+#define TPC0_QM_CP_FENCE3_CNT_VAL_MASK 0x3FFF
+
+/* TPC0_QM_CP_STS */
+#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0
+#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF
+#define TPC0_QM_CP_STS_ERDY_SHIFT 16
+#define TPC0_QM_CP_STS_ERDY_MASK 0x10000
+#define TPC0_QM_CP_STS_RRDY_SHIFT 17
+#define TPC0_QM_CP_STS_RRDY_MASK 0x20000
+#define TPC0_QM_CP_STS_MRDY_SHIFT 18
+#define TPC0_QM_CP_STS_MRDY_MASK 0x40000
+#define TPC0_QM_CP_STS_SW_STOP_SHIFT 19
+#define TPC0_QM_CP_STS_SW_STOP_MASK 0x80000
+#define TPC0_QM_CP_STS_FENCE_ID_SHIFT 20
+#define TPC0_QM_CP_STS_FENCE_ID_MASK 0x300000
+#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22
+#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000
+
+/* TPC0_QM_CP_CURRENT_INST_LO */
+#define TPC0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0
+#define TPC0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_CURRENT_INST_HI */
+#define TPC0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0
+#define TPC0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_CP_BARRIER_CFG */
+#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0
+#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF
+#define TPC0_QM_CP_BARRIER_CFG_RBGUARD_SHIFT 16
+#define TPC0_QM_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000
+
+/* TPC0_QM_CP_DBG_0 */
+#define TPC0_QM_CP_DBG_0_CS_SHIFT 0
+#define TPC0_QM_CP_DBG_0_CS_MASK 0xF
+#define TPC0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 4
+#define TPC0_QM_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x10
+#define TPC0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 5
+#define TPC0_QM_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x20
+#define TPC0_QM_CP_DBG_0_MREB_STALL_SHIFT 6
+#define TPC0_QM_CP_DBG_0_MREB_STALL_MASK 0x40
+#define TPC0_QM_CP_DBG_0_STALL_SHIFT 7
+#define TPC0_QM_CP_DBG_0_STALL_MASK 0x80
+
+/* TPC0_QM_CP_ARUSER_31_11 */
+#define TPC0_QM_CP_ARUSER_31_11_VAL_SHIFT 0
+#define TPC0_QM_CP_ARUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* TPC0_QM_CP_AWUSER_31_11 */
+#define TPC0_QM_CP_AWUSER_31_11_VAL_SHIFT 0
+#define TPC0_QM_CP_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* TPC0_QM_ARB_CFG_0 */
+#define TPC0_QM_ARB_CFG_0_TYPE_SHIFT 0
+#define TPC0_QM_ARB_CFG_0_TYPE_MASK 0x1
+#define TPC0_QM_ARB_CFG_0_IS_MASTER_SHIFT 4
+#define TPC0_QM_ARB_CFG_0_IS_MASTER_MASK 0x10
+#define TPC0_QM_ARB_CFG_0_EN_SHIFT 8
+#define TPC0_QM_ARB_CFG_0_EN_MASK 0x100
+#define TPC0_QM_ARB_CFG_0_MASK_SHIFT 12
+#define TPC0_QM_ARB_CFG_0_MASK_MASK 0xF000
+#define TPC0_QM_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 16
+#define TPC0_QM_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x10000
+
+/* TPC0_QM_ARB_CHOISE_Q_PUSH */
+#define TPC0_QM_ARB_CHOISE_Q_PUSH_VAL_SHIFT 0
+#define TPC0_QM_ARB_CHOISE_Q_PUSH_VAL_MASK 0x3
+
+/* TPC0_QM_ARB_WRR_WEIGHT */
+#define TPC0_QM_ARB_WRR_WEIGHT_VAL_SHIFT 0
+#define TPC0_QM_ARB_WRR_WEIGHT_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_CFG_1 */
+#define TPC0_QM_ARB_CFG_1_CLR_SHIFT 0
+#define TPC0_QM_ARB_CFG_1_CLR_MASK 0x1
+
+/* TPC0_QM_ARB_MST_AVAIL_CRED */
+#define TPC0_QM_ARB_MST_AVAIL_CRED_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F
+
+/* TPC0_QM_ARB_MST_CRED_INC */
+#define TPC0_QM_ARB_MST_CRED_INC_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_MST_CHOISE_PUSH_OFST */
+#define TPC0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST */
+#define TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0
+#define TPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_MST_SLAVE_EN */
+#define TPC0_QM_ARB_MST_SLAVE_EN_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_MST_QUIET_PER */
+#define TPC0_QM_ARB_MST_QUIET_PER_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_SLV_CHOISE_WDT */
+#define TPC0_QM_ARB_SLV_CHOISE_WDT_VAL_SHIFT 0
+#define TPC0_QM_ARB_SLV_CHOISE_WDT_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_SLV_ID */
+#define TPC0_QM_ARB_SLV_ID_VAL_SHIFT 0
+#define TPC0_QM_ARB_SLV_ID_VAL_MASK 0x1F
+
+/* TPC0_QM_ARB_MSG_MAX_INFLIGHT */
+#define TPC0_QM_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0
+#define TPC0_QM_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F
+
+/* TPC0_QM_ARB_MSG_AWUSER_31_11 */
+#define TPC0_QM_ARB_MSG_AWUSER_31_11_VAL_SHIFT 0
+#define TPC0_QM_ARB_MSG_AWUSER_31_11_VAL_MASK 0x1FFFFF
+
+/* TPC0_QM_ARB_MSG_AWUSER_SEC_PROP */
+#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT 0
+#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK 0x3FF
+#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT 10
+#define TPC0_QM_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK 0x400
+
+/* TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP */
+#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT 0
+#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK 0x3FF
+#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT 10
+#define TPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK 0x400
+
+/* TPC0_QM_ARB_BASE_LO */
+#define TPC0_QM_ARB_BASE_LO_VAL_SHIFT 0
+#define TPC0_QM_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_BASE_HI */
+#define TPC0_QM_ARB_BASE_HI_VAL_SHIFT 0
+#define TPC0_QM_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_STATE_STS */
+#define TPC0_QM_ARB_STATE_STS_VAL_SHIFT 0
+#define TPC0_QM_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_ARB_CHOISE_FULLNESS_STS */
+#define TPC0_QM_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT 0
+#define TPC0_QM_ARB_CHOISE_FULLNESS_STS_VAL_MASK 0x7F
+
+/* TPC0_QM_ARB_MSG_STS */
+#define TPC0_QM_ARB_MSG_STS_FULL_SHIFT 0
+#define TPC0_QM_ARB_MSG_STS_FULL_MASK 0x1
+#define TPC0_QM_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1
+#define TPC0_QM_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2
+
+/* TPC0_QM_ARB_SLV_CHOISE_Q_HEAD */
+#define TPC0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT 0
+#define TPC0_QM_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK 0x3
+
+/* TPC0_QM_ARB_ERR_CAUSE */
+#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT 0
+#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_OVF_MASK 0x1
+#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT 1
+#define TPC0_QM_ARB_ERR_CAUSE_CHOISE_WDT_MASK 0x2
+#define TPC0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2
+#define TPC0_QM_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4
+
+/* TPC0_QM_ARB_ERR_MSG_EN */
+#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT 0
+#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1
+#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT 1
+#define TPC0_QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2
+#define TPC0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2
+#define TPC0_QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
+
+/* TPC0_QM_ARB_ERR_STS_DRP */
+#define TPC0_QM_ARB_ERR_STS_DRP_VAL_SHIFT 0
+#define TPC0_QM_ARB_ERR_STS_DRP_VAL_MASK 0x3
+
+/* TPC0_QM_ARB_MST_CRED_STS */
+#define TPC0_QM_ARB_MST_CRED_STS_VAL_SHIFT 0
+#define TPC0_QM_ARB_MST_CRED_STS_VAL_MASK 0x7F
+
+/* TPC0_QM_CGM_CFG */
+#define TPC0_QM_CGM_CFG_IDLE_TH_SHIFT 0
+#define TPC0_QM_CGM_CFG_IDLE_TH_MASK 0xFFF
+#define TPC0_QM_CGM_CFG_G2F_TH_SHIFT 16
+#define TPC0_QM_CGM_CFG_G2F_TH_MASK 0xFF0000
+#define TPC0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT 24
+#define TPC0_QM_CGM_CFG_CP_IDLE_MASK_MASK 0x1F000000
+#define TPC0_QM_CGM_CFG_EN_SHIFT 31
+#define TPC0_QM_CGM_CFG_EN_MASK 0x80000000
+
+/* TPC0_QM_CGM_STS */
+#define TPC0_QM_CGM_STS_ST_SHIFT 0
+#define TPC0_QM_CGM_STS_ST_MASK 0x3
+#define TPC0_QM_CGM_STS_CG_SHIFT 4
+#define TPC0_QM_CGM_STS_CG_MASK 0x10
+#define TPC0_QM_CGM_STS_AGENT_IDLE_SHIFT 8
+#define TPC0_QM_CGM_STS_AGENT_IDLE_MASK 0x100
+#define TPC0_QM_CGM_STS_AXI_IDLE_SHIFT 9
+#define TPC0_QM_CGM_STS_AXI_IDLE_MASK 0x200
+#define TPC0_QM_CGM_STS_CP_IDLE_SHIFT 10
+#define TPC0_QM_CGM_STS_CP_IDLE_MASK 0x400
+
+/* TPC0_QM_CGM_CFG1 */
+#define TPC0_QM_CGM_CFG1_MASK_TH_SHIFT 0
+#define TPC0_QM_CGM_CFG1_MASK_TH_MASK 0xFF
+
+/* TPC0_QM_LOCAL_RANGE_BASE */
+#define TPC0_QM_LOCAL_RANGE_BASE_VAL_SHIFT 0
+#define TPC0_QM_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF
+
+/* TPC0_QM_LOCAL_RANGE_SIZE */
+#define TPC0_QM_LOCAL_RANGE_SIZE_VAL_SHIFT 0
+#define TPC0_QM_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF
+
+/* TPC0_QM_CSMR_STRICT_PRIO_CFG */
+#define TPC0_QM_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT 0
+#define TPC0_QM_CSMR_STRICT_PRIO_CFG_TYPE_MASK 0x1
+
+/* TPC0_QM_HBW_RD_RATE_LIM_CFG_1 */
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* TPC0_QM_LBW_WR_RATE_LIM_CFG_0 */
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* TPC0_QM_LBW_WR_RATE_LIM_CFG_1 */
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31
+#define TPC0_QM_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000
+
+/* TPC0_QM_HBW_RD_RATE_LIM_CFG_0 */
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16
+#define TPC0_QM_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000
+
+/* TPC0_QM_GLBL_AXCACHE */
+#define TPC0_QM_GLBL_AXCACHE_AR_SHIFT 0
+#define TPC0_QM_GLBL_AXCACHE_AR_MASK 0xF
+#define TPC0_QM_GLBL_AXCACHE_AW_SHIFT 16
+#define TPC0_QM_GLBL_AXCACHE_AW_MASK 0xF0000
+
+/* TPC0_QM_IND_GW_APB_CFG */
+#define TPC0_QM_IND_GW_APB_CFG_ADDR_SHIFT 0
+#define TPC0_QM_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF
+#define TPC0_QM_IND_GW_APB_CFG_CMD_SHIFT 31
+#define TPC0_QM_IND_GW_APB_CFG_CMD_MASK 0x80000000
+
+/* TPC0_QM_IND_GW_APB_WDATA */
+#define TPC0_QM_IND_GW_APB_WDATA_VAL_SHIFT 0
+#define TPC0_QM_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_IND_GW_APB_RDATA */
+#define TPC0_QM_IND_GW_APB_RDATA_VAL_SHIFT 0
+#define TPC0_QM_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_IND_GW_APB_STATUS */
+#define TPC0_QM_IND_GW_APB_STATUS_RDY_SHIFT 0
+#define TPC0_QM_IND_GW_APB_STATUS_RDY_MASK 0x1
+#define TPC0_QM_IND_GW_APB_STATUS_ERR_SHIFT 1
+#define TPC0_QM_IND_GW_APB_STATUS_ERR_MASK 0x2
+
+/* TPC0_QM_GLBL_ERR_ADDR_LO */
+#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_ERR_ADDR_HI */
+#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_ERR_WDATA */
+#define TPC0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0
+#define TPC0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF
+
+/* TPC0_QM_GLBL_MEM_INIT_BUSY */
+#define TPC0_QM_GLBL_MEM_INIT_BUSY_RBUF_SHIFT 0
+#define TPC0_QM_GLBL_MEM_INIT_BUSY_RBUF_MASK 0xF
+
+#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h
new file mode 100644
index 000000000000..f9e310ab6df2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC0_QM_REGS_H_
+#define ASIC_REG_TPC0_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC0_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC0_QM_GLBL_CFG0 0xE08000
+
+#define mmTPC0_QM_GLBL_CFG1 0xE08004
+
+#define mmTPC0_QM_GLBL_PROT 0xE08008
+
+#define mmTPC0_QM_GLBL_ERR_CFG 0xE0800C
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS_0 0xE08010
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS_1 0xE08014
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS_2 0xE08018
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS_3 0xE0801C
+
+#define mmTPC0_QM_GLBL_SECURE_PROPS_4 0xE08020
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_0 0xE08024
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_1 0xE08028
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_2 0xE0802C
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_3 0xE08030
+
+#define mmTPC0_QM_GLBL_NON_SECURE_PROPS_4 0xE08034
+
+#define mmTPC0_QM_GLBL_STS0 0xE08038
+
+#define mmTPC0_QM_GLBL_STS1_0 0xE08040
+
+#define mmTPC0_QM_GLBL_STS1_1 0xE08044
+
+#define mmTPC0_QM_GLBL_STS1_2 0xE08048
+
+#define mmTPC0_QM_GLBL_STS1_3 0xE0804C
+
+#define mmTPC0_QM_GLBL_STS1_4 0xE08050
+
+#define mmTPC0_QM_GLBL_MSG_EN_0 0xE08054
+
+#define mmTPC0_QM_GLBL_MSG_EN_1 0xE08058
+
+#define mmTPC0_QM_GLBL_MSG_EN_2 0xE0805C
+
+#define mmTPC0_QM_GLBL_MSG_EN_3 0xE08060
+
+#define mmTPC0_QM_GLBL_MSG_EN_4 0xE08068
+
+#define mmTPC0_QM_PQ_BASE_LO_0 0xE08070
+
+#define mmTPC0_QM_PQ_BASE_LO_1 0xE08074
+
+#define mmTPC0_QM_PQ_BASE_LO_2 0xE08078
+
+#define mmTPC0_QM_PQ_BASE_LO_3 0xE0807C
+
+#define mmTPC0_QM_PQ_BASE_HI_0 0xE08080
+
+#define mmTPC0_QM_PQ_BASE_HI_1 0xE08084
+
+#define mmTPC0_QM_PQ_BASE_HI_2 0xE08088
+
+#define mmTPC0_QM_PQ_BASE_HI_3 0xE0808C
+
+#define mmTPC0_QM_PQ_SIZE_0 0xE08090
+
+#define mmTPC0_QM_PQ_SIZE_1 0xE08094
+
+#define mmTPC0_QM_PQ_SIZE_2 0xE08098
+
+#define mmTPC0_QM_PQ_SIZE_3 0xE0809C
+
+#define mmTPC0_QM_PQ_PI_0 0xE080A0
+
+#define mmTPC0_QM_PQ_PI_1 0xE080A4
+
+#define mmTPC0_QM_PQ_PI_2 0xE080A8
+
+#define mmTPC0_QM_PQ_PI_3 0xE080AC
+
+#define mmTPC0_QM_PQ_CI_0 0xE080B0
+
+#define mmTPC0_QM_PQ_CI_1 0xE080B4
+
+#define mmTPC0_QM_PQ_CI_2 0xE080B8
+
+#define mmTPC0_QM_PQ_CI_3 0xE080BC
+
+#define mmTPC0_QM_PQ_CFG0_0 0xE080C0
+
+#define mmTPC0_QM_PQ_CFG0_1 0xE080C4
+
+#define mmTPC0_QM_PQ_CFG0_2 0xE080C8
+
+#define mmTPC0_QM_PQ_CFG0_3 0xE080CC
+
+#define mmTPC0_QM_PQ_CFG1_0 0xE080D0
+
+#define mmTPC0_QM_PQ_CFG1_1 0xE080D4
+
+#define mmTPC0_QM_PQ_CFG1_2 0xE080D8
+
+#define mmTPC0_QM_PQ_CFG1_3 0xE080DC
+
+#define mmTPC0_QM_PQ_ARUSER_31_11_0 0xE080E0
+
+#define mmTPC0_QM_PQ_ARUSER_31_11_1 0xE080E4
+
+#define mmTPC0_QM_PQ_ARUSER_31_11_2 0xE080E8
+
+#define mmTPC0_QM_PQ_ARUSER_31_11_3 0xE080EC
+
+#define mmTPC0_QM_PQ_STS0_0 0xE080F0
+
+#define mmTPC0_QM_PQ_STS0_1 0xE080F4
+
+#define mmTPC0_QM_PQ_STS0_2 0xE080F8
+
+#define mmTPC0_QM_PQ_STS0_3 0xE080FC
+
+#define mmTPC0_QM_PQ_STS1_0 0xE08100
+
+#define mmTPC0_QM_PQ_STS1_1 0xE08104
+
+#define mmTPC0_QM_PQ_STS1_2 0xE08108
+
+#define mmTPC0_QM_PQ_STS1_3 0xE0810C
+
+#define mmTPC0_QM_CQ_CFG0_0 0xE08110
+
+#define mmTPC0_QM_CQ_CFG0_1 0xE08114
+
+#define mmTPC0_QM_CQ_CFG0_2 0xE08118
+
+#define mmTPC0_QM_CQ_CFG0_3 0xE0811C
+
+#define mmTPC0_QM_CQ_CFG0_4 0xE08120
+
+#define mmTPC0_QM_CQ_CFG1_0 0xE08124
+
+#define mmTPC0_QM_CQ_CFG1_1 0xE08128
+
+#define mmTPC0_QM_CQ_CFG1_2 0xE0812C
+
+#define mmTPC0_QM_CQ_CFG1_3 0xE08130
+
+#define mmTPC0_QM_CQ_CFG1_4 0xE08134
+
+#define mmTPC0_QM_CQ_ARUSER_31_11_0 0xE08138
+
+#define mmTPC0_QM_CQ_ARUSER_31_11_1 0xE0813C
+
+#define mmTPC0_QM_CQ_ARUSER_31_11_2 0xE08140
+
+#define mmTPC0_QM_CQ_ARUSER_31_11_3 0xE08144
+
+#define mmTPC0_QM_CQ_ARUSER_31_11_4 0xE08148
+
+#define mmTPC0_QM_CQ_STS0_0 0xE0814C
+
+#define mmTPC0_QM_CQ_STS0_1 0xE08150
+
+#define mmTPC0_QM_CQ_STS0_2 0xE08154
+
+#define mmTPC0_QM_CQ_STS0_3 0xE08158
+
+#define mmTPC0_QM_CQ_STS0_4 0xE0815C
+
+#define mmTPC0_QM_CQ_STS1_0 0xE08160
+
+#define mmTPC0_QM_CQ_STS1_1 0xE08164
+
+#define mmTPC0_QM_CQ_STS1_2 0xE08168
+
+#define mmTPC0_QM_CQ_STS1_3 0xE0816C
+
+#define mmTPC0_QM_CQ_STS1_4 0xE08170
+
+#define mmTPC0_QM_CQ_PTR_LO_0 0xE08174
+
+#define mmTPC0_QM_CQ_PTR_HI_0 0xE08178
+
+#define mmTPC0_QM_CQ_TSIZE_0 0xE0817C
+
+#define mmTPC0_QM_CQ_CTL_0 0xE08180
+
+#define mmTPC0_QM_CQ_PTR_LO_1 0xE08184
+
+#define mmTPC0_QM_CQ_PTR_HI_1 0xE08188
+
+#define mmTPC0_QM_CQ_TSIZE_1 0xE0818C
+
+#define mmTPC0_QM_CQ_CTL_1 0xE08190
+
+#define mmTPC0_QM_CQ_PTR_LO_2 0xE08194
+
+#define mmTPC0_QM_CQ_PTR_HI_2 0xE08198
+
+#define mmTPC0_QM_CQ_TSIZE_2 0xE0819C
+
+#define mmTPC0_QM_CQ_CTL_2 0xE081A0
+
+#define mmTPC0_QM_CQ_PTR_LO_3 0xE081A4
+
+#define mmTPC0_QM_CQ_PTR_HI_3 0xE081A8
+
+#define mmTPC0_QM_CQ_TSIZE_3 0xE081AC
+
+#define mmTPC0_QM_CQ_CTL_3 0xE081B0
+
+#define mmTPC0_QM_CQ_PTR_LO_4 0xE081B4
+
+#define mmTPC0_QM_CQ_PTR_HI_4 0xE081B8
+
+#define mmTPC0_QM_CQ_TSIZE_4 0xE081BC
+
+#define mmTPC0_QM_CQ_CTL_4 0xE081C0
+
+#define mmTPC0_QM_CQ_PTR_LO_STS_0 0xE081C4
+
+#define mmTPC0_QM_CQ_PTR_LO_STS_1 0xE081C8
+
+#define mmTPC0_QM_CQ_PTR_LO_STS_2 0xE081CC
+
+#define mmTPC0_QM_CQ_PTR_LO_STS_3 0xE081D0
+
+#define mmTPC0_QM_CQ_PTR_LO_STS_4 0xE081D4
+
+#define mmTPC0_QM_CQ_PTR_HI_STS_0 0xE081D8
+
+#define mmTPC0_QM_CQ_PTR_HI_STS_1 0xE081DC
+
+#define mmTPC0_QM_CQ_PTR_HI_STS_2 0xE081E0
+
+#define mmTPC0_QM_CQ_PTR_HI_STS_3 0xE081E4
+
+#define mmTPC0_QM_CQ_PTR_HI_STS_4 0xE081E8
+
+#define mmTPC0_QM_CQ_TSIZE_STS_0 0xE081EC
+
+#define mmTPC0_QM_CQ_TSIZE_STS_1 0xE081F0
+
+#define mmTPC0_QM_CQ_TSIZE_STS_2 0xE081F4
+
+#define mmTPC0_QM_CQ_TSIZE_STS_3 0xE081F8
+
+#define mmTPC0_QM_CQ_TSIZE_STS_4 0xE081FC
+
+#define mmTPC0_QM_CQ_CTL_STS_0 0xE08200
+
+#define mmTPC0_QM_CQ_CTL_STS_1 0xE08204
+
+#define mmTPC0_QM_CQ_CTL_STS_2 0xE08208
+
+#define mmTPC0_QM_CQ_CTL_STS_3 0xE0820C
+
+#define mmTPC0_QM_CQ_CTL_STS_4 0xE08210
+
+#define mmTPC0_QM_CQ_IFIFO_CNT_0 0xE08214
+
+#define mmTPC0_QM_CQ_IFIFO_CNT_1 0xE08218
+
+#define mmTPC0_QM_CQ_IFIFO_CNT_2 0xE0821C
+
+#define mmTPC0_QM_CQ_IFIFO_CNT_3 0xE08220
+
+#define mmTPC0_QM_CQ_IFIFO_CNT_4 0xE08224
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 0xE08228
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_1 0xE0822C
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_2 0xE08230
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_3 0xE08234
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_4 0xE08238
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 0xE0823C
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_1 0xE08240
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_2 0xE08244
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_3 0xE08248
+
+#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_4 0xE0824C
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 0xE08250
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_1 0xE08254
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_2 0xE08258
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_3 0xE0825C
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_4 0xE08260
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 0xE08264
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_1 0xE08268
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_2 0xE0826C
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_3 0xE08270
+
+#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_4 0xE08274
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 0xE08278
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_1 0xE0827C
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_2 0xE08280
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_3 0xE08284
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_4 0xE08288
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 0xE0828C
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_1 0xE08290
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_2 0xE08294
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_3 0xE08298
+
+#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_4 0xE0829C
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 0xE082A0
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_1 0xE082A4
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_2 0xE082A8
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_3 0xE082AC
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_4 0xE082B0
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 0xE082B4
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_1 0xE082B8
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_2 0xE082BC
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_3 0xE082C0
+
+#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_4 0xE082C4
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 0xE082C8
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_1 0xE082CC
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_2 0xE082D0
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_3 0xE082D4
+
+#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_4 0xE082D8
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xE082E0
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xE082E4
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xE082E8
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xE082EC
+
+#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xE082F0
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xE082F4
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xE082F8
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xE082FC
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xE08300
+
+#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xE08304
+
+#define mmTPC0_QM_CP_FENCE0_RDATA_0 0xE08308
+
+#define mmTPC0_QM_CP_FENCE0_RDATA_1 0xE0830C
+
+#define mmTPC0_QM_CP_FENCE0_RDATA_2 0xE08310
+
+#define mmTPC0_QM_CP_FENCE0_RDATA_3 0xE08314
+
+#define mmTPC0_QM_CP_FENCE0_RDATA_4 0xE08318
+
+#define mmTPC0_QM_CP_FENCE1_RDATA_0 0xE0831C
+
+#define mmTPC0_QM_CP_FENCE1_RDATA_1 0xE08320
+
+#define mmTPC0_QM_CP_FENCE1_RDATA_2 0xE08324
+
+#define mmTPC0_QM_CP_FENCE1_RDATA_3 0xE08328
+
+#define mmTPC0_QM_CP_FENCE1_RDATA_4 0xE0832C
+
+#define mmTPC0_QM_CP_FENCE2_RDATA_0 0xE08330
+
+#define mmTPC0_QM_CP_FENCE2_RDATA_1 0xE08334
+
+#define mmTPC0_QM_CP_FENCE2_RDATA_2 0xE08338
+
+#define mmTPC0_QM_CP_FENCE2_RDATA_3 0xE0833C
+
+#define mmTPC0_QM_CP_FENCE2_RDATA_4 0xE08340
+
+#define mmTPC0_QM_CP_FENCE3_RDATA_0 0xE08344
+
+#define mmTPC0_QM_CP_FENCE3_RDATA_1 0xE08348
+
+#define mmTPC0_QM_CP_FENCE3_RDATA_2 0xE0834C
+
+#define mmTPC0_QM_CP_FENCE3_RDATA_3 0xE08350
+
+#define mmTPC0_QM_CP_FENCE3_RDATA_4 0xE08354
+
+#define mmTPC0_QM_CP_FENCE0_CNT_0 0xE08358
+
+#define mmTPC0_QM_CP_FENCE0_CNT_1 0xE0835C
+
+#define mmTPC0_QM_CP_FENCE0_CNT_2 0xE08360
+
+#define mmTPC0_QM_CP_FENCE0_CNT_3 0xE08364
+
+#define mmTPC0_QM_CP_FENCE0_CNT_4 0xE08368
+
+#define mmTPC0_QM_CP_FENCE1_CNT_0 0xE0836C
+
+#define mmTPC0_QM_CP_FENCE1_CNT_1 0xE08370
+
+#define mmTPC0_QM_CP_FENCE1_CNT_2 0xE08374
+
+#define mmTPC0_QM_CP_FENCE1_CNT_3 0xE08378
+
+#define mmTPC0_QM_CP_FENCE1_CNT_4 0xE0837C
+
+#define mmTPC0_QM_CP_FENCE2_CNT_0 0xE08380
+
+#define mmTPC0_QM_CP_FENCE2_CNT_1 0xE08384
+
+#define mmTPC0_QM_CP_FENCE2_CNT_2 0xE08388
+
+#define mmTPC0_QM_CP_FENCE2_CNT_3 0xE0838C
+
+#define mmTPC0_QM_CP_FENCE2_CNT_4 0xE08390
+
+#define mmTPC0_QM_CP_FENCE3_CNT_0 0xE08394
+
+#define mmTPC0_QM_CP_FENCE3_CNT_1 0xE08398
+
+#define mmTPC0_QM_CP_FENCE3_CNT_2 0xE0839C
+
+#define mmTPC0_QM_CP_FENCE3_CNT_3 0xE083A0
+
+#define mmTPC0_QM_CP_FENCE3_CNT_4 0xE083A4
+
+#define mmTPC0_QM_CP_STS_0 0xE083A8
+
+#define mmTPC0_QM_CP_STS_1 0xE083AC
+
+#define mmTPC0_QM_CP_STS_2 0xE083B0
+
+#define mmTPC0_QM_CP_STS_3 0xE083B4
+
+#define mmTPC0_QM_CP_STS_4 0xE083B8
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO_0 0xE083BC
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO_1 0xE083C0
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO_2 0xE083C4
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO_3 0xE083C8
+
+#define mmTPC0_QM_CP_CURRENT_INST_LO_4 0xE083CC
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI_0 0xE083D0
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI_1 0xE083D4
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI_2 0xE083D8
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI_3 0xE083DC
+
+#define mmTPC0_QM_CP_CURRENT_INST_HI_4 0xE083E0
+
+#define mmTPC0_QM_CP_BARRIER_CFG_0 0xE083F4
+
+#define mmTPC0_QM_CP_BARRIER_CFG_1 0xE083F8
+
+#define mmTPC0_QM_CP_BARRIER_CFG_2 0xE083FC
+
+#define mmTPC0_QM_CP_BARRIER_CFG_3 0xE08400
+
+#define mmTPC0_QM_CP_BARRIER_CFG_4 0xE08404
+
+#define mmTPC0_QM_CP_DBG_0_0 0xE08408
+
+#define mmTPC0_QM_CP_DBG_0_1 0xE0840C
+
+#define mmTPC0_QM_CP_DBG_0_2 0xE08410
+
+#define mmTPC0_QM_CP_DBG_0_3 0xE08414
+
+#define mmTPC0_QM_CP_DBG_0_4 0xE08418
+
+#define mmTPC0_QM_CP_ARUSER_31_11_0 0xE0841C
+
+#define mmTPC0_QM_CP_ARUSER_31_11_1 0xE08420
+
+#define mmTPC0_QM_CP_ARUSER_31_11_2 0xE08424
+
+#define mmTPC0_QM_CP_ARUSER_31_11_3 0xE08428
+
+#define mmTPC0_QM_CP_ARUSER_31_11_4 0xE0842C
+
+#define mmTPC0_QM_CP_AWUSER_31_11_0 0xE08430
+
+#define mmTPC0_QM_CP_AWUSER_31_11_1 0xE08434
+
+#define mmTPC0_QM_CP_AWUSER_31_11_2 0xE08438
+
+#define mmTPC0_QM_CP_AWUSER_31_11_3 0xE0843C
+
+#define mmTPC0_QM_CP_AWUSER_31_11_4 0xE08440
+
+#define mmTPC0_QM_ARB_CFG_0 0xE08A00
+
+#define mmTPC0_QM_ARB_CHOISE_Q_PUSH 0xE08A04
+
+#define mmTPC0_QM_ARB_WRR_WEIGHT_0 0xE08A08
+
+#define mmTPC0_QM_ARB_WRR_WEIGHT_1 0xE08A0C
+
+#define mmTPC0_QM_ARB_WRR_WEIGHT_2 0xE08A10
+
+#define mmTPC0_QM_ARB_WRR_WEIGHT_3 0xE08A14
+
+#define mmTPC0_QM_ARB_CFG_1 0xE08A18
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_0 0xE08A20
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_1 0xE08A24
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_2 0xE08A28
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_3 0xE08A2C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_4 0xE08A30
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_5 0xE08A34
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_6 0xE08A38
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_7 0xE08A3C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_8 0xE08A40
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_9 0xE08A44
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_10 0xE08A48
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_11 0xE08A4C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_12 0xE08A50
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_13 0xE08A54
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_14 0xE08A58
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_15 0xE08A5C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_16 0xE08A60
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_17 0xE08A64
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_18 0xE08A68
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_19 0xE08A6C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_20 0xE08A70
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_21 0xE08A74
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_22 0xE08A78
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_23 0xE08A7C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_24 0xE08A80
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_25 0xE08A84
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_26 0xE08A88
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_27 0xE08A8C
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_28 0xE08A90
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_29 0xE08A94
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_30 0xE08A98
+
+#define mmTPC0_QM_ARB_MST_AVAIL_CRED_31 0xE08A9C
+
+#define mmTPC0_QM_ARB_MST_CRED_INC 0xE08AA0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xE08AA4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xE08AA8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xE08AAC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xE08AB0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xE08AB4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xE08AB8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xE08ABC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xE08AC0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xE08AC4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xE08AC8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xE08ACC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xE08AD0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xE08AD4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xE08AD8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xE08ADC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xE08AE0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xE08AE4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xE08AE8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xE08AEC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xE08AF0
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xE08AF4
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xE08AF8
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xE08AFC
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xE08B00
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xE08B04
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xE08B08
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xE08B0C
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xE08B10
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xE08B14
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xE08B18
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xE08B1C
+
+#define mmTPC0_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xE08B20
+
+#define mmTPC0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xE08B28
+
+#define mmTPC0_QM_ARB_MST_SLAVE_EN 0xE08B2C
+
+#define mmTPC0_QM_ARB_MST_QUIET_PER 0xE08B34
+
+#define mmTPC0_QM_ARB_SLV_CHOISE_WDT 0xE08B38
+
+#define mmTPC0_QM_ARB_SLV_ID 0xE08B3C
+
+#define mmTPC0_QM_ARB_MSG_MAX_INFLIGHT 0xE08B44
+
+#define mmTPC0_QM_ARB_MSG_AWUSER_31_11 0xE08B48
+
+#define mmTPC0_QM_ARB_MSG_AWUSER_SEC_PROP 0xE08B4C
+
+#define mmTPC0_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xE08B50
+
+#define mmTPC0_QM_ARB_BASE_LO 0xE08B54
+
+#define mmTPC0_QM_ARB_BASE_HI 0xE08B58
+
+#define mmTPC0_QM_ARB_STATE_STS 0xE08B80
+
+#define mmTPC0_QM_ARB_CHOISE_FULLNESS_STS 0xE08B84
+
+#define mmTPC0_QM_ARB_MSG_STS 0xE08B88
+
+#define mmTPC0_QM_ARB_SLV_CHOISE_Q_HEAD 0xE08B8C
+
+#define mmTPC0_QM_ARB_ERR_CAUSE 0xE08B9C
+
+#define mmTPC0_QM_ARB_ERR_MSG_EN 0xE08BA0
+
+#define mmTPC0_QM_ARB_ERR_STS_DRP 0xE08BA8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_0 0xE08BB0
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_1 0xE08BB4
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_2 0xE08BB8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_3 0xE08BBC
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_4 0xE08BC0
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_5 0xE08BC4
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_6 0xE08BC8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_7 0xE08BCC
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_8 0xE08BD0
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_9 0xE08BD4
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_10 0xE08BD8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_11 0xE08BDC
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_12 0xE08BE0
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_13 0xE08BE4
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_14 0xE08BE8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_15 0xE08BEC
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_16 0xE08BF0
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_17 0xE08BF4
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_18 0xE08BF8
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_19 0xE08BFC
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_20 0xE08C00
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_21 0xE08C04
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_22 0xE08C08
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_23 0xE08C0C
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_24 0xE08C10
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_25 0xE08C14
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_26 0xE08C18
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_27 0xE08C1C
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_28 0xE08C20
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_29 0xE08C24
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_30 0xE08C28
+
+#define mmTPC0_QM_ARB_MST_CRED_STS_31 0xE08C2C
+
+#define mmTPC0_QM_CGM_CFG 0xE08C70
+
+#define mmTPC0_QM_CGM_STS 0xE08C74
+
+#define mmTPC0_QM_CGM_CFG1 0xE08C78
+
+#define mmTPC0_QM_LOCAL_RANGE_BASE 0xE08C80
+
+#define mmTPC0_QM_LOCAL_RANGE_SIZE 0xE08C84
+
+#define mmTPC0_QM_CSMR_STRICT_PRIO_CFG 0xE08C90
+
+#define mmTPC0_QM_HBW_RD_RATE_LIM_CFG_1 0xE08C94
+
+#define mmTPC0_QM_LBW_WR_RATE_LIM_CFG_0 0xE08C98
+
+#define mmTPC0_QM_LBW_WR_RATE_LIM_CFG_1 0xE08C9C
+
+#define mmTPC0_QM_HBW_RD_RATE_LIM_CFG_0 0xE08CA0
+
+#define mmTPC0_QM_GLBL_AXCACHE 0xE08CA4
+
+#define mmTPC0_QM_IND_GW_APB_CFG 0xE08CB0
+
+#define mmTPC0_QM_IND_GW_APB_WDATA 0xE08CB4
+
+#define mmTPC0_QM_IND_GW_APB_RDATA 0xE08CB8
+
+#define mmTPC0_QM_IND_GW_APB_STATUS 0xE08CBC
+
+#define mmTPC0_QM_GLBL_ERR_ADDR_LO 0xE08CD0
+
+#define mmTPC0_QM_GLBL_ERR_ADDR_HI 0xE08CD4
+
+#define mmTPC0_QM_GLBL_ERR_WDATA 0xE08CD8
+
+#define mmTPC0_QM_GLBL_MEM_INIT_BUSY 0xE08D00
+
+#endif /* ASIC_REG_TPC0_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h
new file mode 100644
index 000000000000..6736c476d979
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_CFG_REGS_H_
+#define ASIC_REG_TPC1_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE46400
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE46404
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE46408
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE4640C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE46410
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE46414
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE46418
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE4641C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE46420
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE46424
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE46428
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE4642C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE46430
+
+#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE46434
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE46438
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE4643C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE46440
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE46444
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE46448
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE4644C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE46450
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE46454
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE46458
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE4645C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE46460
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE46464
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE46468
+
+#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE4646C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE46470
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE46474
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE46478
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE4647C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE46480
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE46484
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE46488
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE4648C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE46490
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE46494
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE46498
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE4649C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE464A0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE464A4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE464A8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE464AC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE464B0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE464B4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE464B8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE464BC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE464C0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE464C4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE464C8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE464CC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE464D0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE464D4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE464D8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE464DC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE464E0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE464E4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE464E8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE464EC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE464F0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE464F4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE464F8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE464FC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE46500
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE46504
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE46508
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE4650C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE46510
+
+#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE46514
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE46518
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE4651C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE46520
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE46524
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE46528
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE4652C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE46530
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE46534
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE46538
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE4653C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE46540
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE46544
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE46548
+
+#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE4654C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE46550
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE46554
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE46558
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE4655C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE46560
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE46564
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE46568
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE4656C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE46570
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE46574
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE46578
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE4657C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE46580
+
+#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE46584
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE46588
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE4658C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE46590
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE46594
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE46598
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE4659C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE465A0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE465A4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE465A8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE465AC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE465B0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE465B4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE465B8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE465BC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xE465C0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xE465C4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xE465C8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xE465CC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xE465D0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xE465D4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xE465D8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xE465DC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xE465E0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xE465E4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xE465E8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xE465EC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xE465F0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xE465F4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xE465F8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xE465FC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xE46600
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xE46604
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xE46608
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xE4660C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xE46610
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xE46614
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xE46618
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xE4661C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xE46620
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xE46624
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xE46628
+
+#define mmTPC1_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xE4662C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xE46630
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xE46634
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xE46638
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xE4663C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xE46640
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xE46644
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xE46648
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xE4664C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xE46650
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xE46654
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xE46658
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xE4665C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xE46660
+
+#define mmTPC1_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xE46664
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xE46668
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xE4666C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xE46670
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xE46674
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xE46678
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xE4667C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xE46680
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xE46684
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xE46688
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xE4668C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xE46690
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xE46694
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xE46698
+
+#define mmTPC1_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xE4669C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xE466A0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xE466A4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xE466A8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xE466AC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xE466B0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xE466B4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xE466B8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xE466BC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xE466C0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xE466C4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xE466C8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xE466CC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xE466D0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xE466D4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xE466D8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xE466DC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xE466E0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xE466E4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xE466E8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xE466EC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xE466F0
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xE466F4
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xE466F8
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xE466FC
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xE46700
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xE46704
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xE46708
+
+#define mmTPC1_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xE4670C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xE46710
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xE46714
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xE46718
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xE4671C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xE46720
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xE46724
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xE46728
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xE4672C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xE46730
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xE46734
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xE46738
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xE4673C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xE46740
+
+#define mmTPC1_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xE46744
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xE46748
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xE4674C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xE46750
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xE46754
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xE46758
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xE4675C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xE46760
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xE46764
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xE46768
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xE4676C
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xE46770
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xE46774
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xE46778
+
+#define mmTPC1_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xE4677C
+
+#define mmTPC1_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE46780
+
+#define mmTPC1_CFG_KERNEL_SYNC_OBJECT_ADDR 0xE46784
+
+#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE46788
+
+#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE4678C
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_0 0xE46790
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_0 0xE46794
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_1 0xE46798
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_1 0xE4679C
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_2 0xE467A0
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_2 0xE467A4
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_3 0xE467A8
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_3 0xE467AC
+
+#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_4 0xE467B0
+
+#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_4 0xE467B4
+
+#define mmTPC1_CFG_KERNEL_KERNEL_CONFIG 0xE467B8
+
+#define mmTPC1_CFG_KERNEL_KERNEL_ID 0xE467BC
+
+#define mmTPC1_CFG_KERNEL_SRF_0 0xE467C0
+
+#define mmTPC1_CFG_KERNEL_SRF_1 0xE467C4
+
+#define mmTPC1_CFG_KERNEL_SRF_2 0xE467C8
+
+#define mmTPC1_CFG_KERNEL_SRF_3 0xE467CC
+
+#define mmTPC1_CFG_KERNEL_SRF_4 0xE467D0
+
+#define mmTPC1_CFG_KERNEL_SRF_5 0xE467D4
+
+#define mmTPC1_CFG_KERNEL_SRF_6 0xE467D8
+
+#define mmTPC1_CFG_KERNEL_SRF_7 0xE467DC
+
+#define mmTPC1_CFG_KERNEL_SRF_8 0xE467E0
+
+#define mmTPC1_CFG_KERNEL_SRF_9 0xE467E4
+
+#define mmTPC1_CFG_KERNEL_SRF_10 0xE467E8
+
+#define mmTPC1_CFG_KERNEL_SRF_11 0xE467EC
+
+#define mmTPC1_CFG_KERNEL_SRF_12 0xE467F0
+
+#define mmTPC1_CFG_KERNEL_SRF_13 0xE467F4
+
+#define mmTPC1_CFG_KERNEL_SRF_14 0xE467F8
+
+#define mmTPC1_CFG_KERNEL_SRF_15 0xE467FC
+
+#define mmTPC1_CFG_KERNEL_SRF_16 0xE46800
+
+#define mmTPC1_CFG_KERNEL_SRF_17 0xE46804
+
+#define mmTPC1_CFG_KERNEL_SRF_18 0xE46808
+
+#define mmTPC1_CFG_KERNEL_SRF_19 0xE4680C
+
+#define mmTPC1_CFG_KERNEL_SRF_20 0xE46810
+
+#define mmTPC1_CFG_KERNEL_SRF_21 0xE46814
+
+#define mmTPC1_CFG_KERNEL_SRF_22 0xE46818
+
+#define mmTPC1_CFG_KERNEL_SRF_23 0xE4681C
+
+#define mmTPC1_CFG_KERNEL_SRF_24 0xE46820
+
+#define mmTPC1_CFG_KERNEL_SRF_25 0xE46824
+
+#define mmTPC1_CFG_KERNEL_SRF_26 0xE46828
+
+#define mmTPC1_CFG_KERNEL_SRF_27 0xE4682C
+
+#define mmTPC1_CFG_KERNEL_SRF_28 0xE46830
+
+#define mmTPC1_CFG_KERNEL_SRF_29 0xE46834
+
+#define mmTPC1_CFG_KERNEL_SRF_30 0xE46838
+
+#define mmTPC1_CFG_KERNEL_SRF_31 0xE4683C
+
+#define mmTPC1_CFG_ROUND_CSR 0xE468FC
+
+#define mmTPC1_CFG_PROT 0xE46900
+
+#define mmTPC1_CFG_SEMAPHORE 0xE46908
+
+#define mmTPC1_CFG_VFLAGS 0xE4690C
+
+#define mmTPC1_CFG_SFLAGS 0xE46910
+
+#define mmTPC1_CFG_LFSR_POLYNOM 0xE46918
+
+#define mmTPC1_CFG_STATUS 0xE4691C
+
+#define mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH 0xE46920
+
+#define mmTPC1_CFG_CFG_SUBTRACT_VALUE 0xE46924
+
+#define mmTPC1_CFG_SM_BASE_ADDRESS_HIGH 0xE4692C
+
+#define mmTPC1_CFG_TPC_CMD 0xE46930
+
+#define mmTPC1_CFG_TPC_EXECUTE 0xE46938
+
+#define mmTPC1_CFG_TPC_STALL 0xE4693C
+
+#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_LOW 0xE46940
+
+#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE46944
+
+#define mmTPC1_CFG_RD_RATE_LIMIT 0xE46948
+
+#define mmTPC1_CFG_WR_RATE_LIMIT 0xE46950
+
+#define mmTPC1_CFG_MSS_CONFIG 0xE46954
+
+#define mmTPC1_CFG_TPC_INTR_CAUSE 0xE46958
+
+#define mmTPC1_CFG_TPC_INTR_MASK 0xE4695C
+
+#define mmTPC1_CFG_WQ_CREDITS 0xE46960
+
+#define mmTPC1_CFG_ARUSER_LO 0xE46964
+
+#define mmTPC1_CFG_ARUSER_HI 0xE46968
+
+#define mmTPC1_CFG_AWUSER_LO 0xE4696C
+
+#define mmTPC1_CFG_AWUSER_HI 0xE46970
+
+#define mmTPC1_CFG_OPCODE_EXEC 0xE46974
+
+#define mmTPC1_CFG_LUT_FUNC32_BASE_ADDR_LO 0xE46978
+
+#define mmTPC1_CFG_LUT_FUNC32_BASE_ADDR_HI 0xE4697C
+
+#define mmTPC1_CFG_LUT_FUNC64_BASE_ADDR_LO 0xE46980
+
+#define mmTPC1_CFG_LUT_FUNC64_BASE_ADDR_HI 0xE46984
+
+#define mmTPC1_CFG_LUT_FUNC128_BASE_ADDR_LO 0xE46988
+
+#define mmTPC1_CFG_LUT_FUNC128_BASE_ADDR_HI 0xE4698C
+
+#define mmTPC1_CFG_LUT_FUNC256_BASE_ADDR_LO 0xE46990
+
+#define mmTPC1_CFG_LUT_FUNC256_BASE_ADDR_HI 0xE46994
+
+#define mmTPC1_CFG_TSB_CFG_MAX_SIZE 0xE46998
+
+#define mmTPC1_CFG_TSB_CFG 0xE4699C
+
+#define mmTPC1_CFG_DBGMEM_ADD 0xE469A0
+
+#define mmTPC1_CFG_DBGMEM_DATA_WR 0xE469A4
+
+#define mmTPC1_CFG_DBGMEM_DATA_RD 0xE469A8
+
+#define mmTPC1_CFG_DBGMEM_CTRL 0xE469AC
+
+#define mmTPC1_CFG_DBGMEM_RC 0xE469B0
+
+#define mmTPC1_CFG_TSB_INFLIGHT_CNTR 0xE469B4
+
+#define mmTPC1_CFG_WQ_INFLIGHT_CNTR 0xE469B8
+
+#define mmTPC1_CFG_WQ_LBW_TOTAL_CNTR 0xE469BC
+
+#define mmTPC1_CFG_WQ_HBW_TOTAL_CNTR 0xE469C0
+
+#define mmTPC1_CFG_IRQ_OCCOUPY_CNTR 0xE469C4
+
+#define mmTPC1_CFG_FUNC_MBIST_CNTRL 0xE469D0
+
+#define mmTPC1_CFG_FUNC_MBIST_PAT 0xE469D4
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_0 0xE469D8
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_1 0xE469DC
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_2 0xE469E0
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_3 0xE469E4
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_4 0xE469E8
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_5 0xE469EC
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_6 0xE469F0
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_7 0xE469F4
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_8 0xE469F8
+
+#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE469FC
+
+#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE46A00
+
+#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE46A04
+
+#define mmTPC1_CFG_QM_TENSOR_0_PADDING_VALUE 0xE46A08
+
+#define mmTPC1_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE46A0C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE46A10
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE46A14
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE46A18
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE46A1C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE46A20
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE46A24
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE46A28
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE46A2C
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE46A30
+
+#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE46A34
+
+#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE46A38
+
+#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE46A3C
+
+#define mmTPC1_CFG_QM_TENSOR_1_PADDING_VALUE 0xE46A40
+
+#define mmTPC1_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE46A44
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE46A48
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE46A4C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE46A50
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE46A54
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE46A58
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE46A5C
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE46A60
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE46A64
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE46A68
+
+#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE46A6C
+
+#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE46A70
+
+#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE46A74
+
+#define mmTPC1_CFG_QM_TENSOR_2_PADDING_VALUE 0xE46A78
+
+#define mmTPC1_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE46A7C
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE46A80
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE46A84
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE46A88
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE46A8C
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE46A90
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE46A94
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE46A98
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE46A9C
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE46AA0
+
+#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE46AA4
+
+#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE46AA8
+
+#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE46AAC
+
+#define mmTPC1_CFG_QM_TENSOR_3_PADDING_VALUE 0xE46AB0
+
+#define mmTPC1_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE46AB4
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE46AB8
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE46ABC
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE46AC0
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE46AC4
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE46AC8
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE46ACC
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE46AD0
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE46AD4
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE46AD8
+
+#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE46ADC
+
+#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE46AE0
+
+#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE46AE4
+
+#define mmTPC1_CFG_QM_TENSOR_4_PADDING_VALUE 0xE46AE8
+
+#define mmTPC1_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE46AEC
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE46AF0
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE46AF4
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE46AF8
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE46AFC
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE46B00
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE46B04
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE46B08
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE46B0C
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE46B10
+
+#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE46B14
+
+#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE46B18
+
+#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE46B1C
+
+#define mmTPC1_CFG_QM_TENSOR_5_PADDING_VALUE 0xE46B20
+
+#define mmTPC1_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE46B24
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE46B28
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE46B2C
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE46B30
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE46B34
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE46B38
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE46B3C
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE46B40
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE46B44
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE46B48
+
+#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE46B4C
+
+#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE46B50
+
+#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE46B54
+
+#define mmTPC1_CFG_QM_TENSOR_6_PADDING_VALUE 0xE46B58
+
+#define mmTPC1_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE46B5C
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE46B60
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE46B64
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE46B68
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE46B6C
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE46B70
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE46B74
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE46B78
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE46B7C
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE46B80
+
+#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE46B84
+
+#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE46B88
+
+#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE46B8C
+
+#define mmTPC1_CFG_QM_TENSOR_7_PADDING_VALUE 0xE46B90
+
+#define mmTPC1_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE46B94
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE46B98
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE46B9C
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE46BA0
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE46BA4
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE46BA8
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE46BAC
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE46BB0
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE46BB4
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE46BB8
+
+#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE46BBC
+
+#define mmTPC1_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xE46BC0
+
+#define mmTPC1_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xE46BC4
+
+#define mmTPC1_CFG_QM_TENSOR_8_PADDING_VALUE 0xE46BC8
+
+#define mmTPC1_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xE46BCC
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_0_SIZE 0xE46BD0
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xE46BD4
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_1_SIZE 0xE46BD8
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xE46BDC
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_2_SIZE 0xE46BE0
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xE46BE4
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_3_SIZE 0xE46BE8
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xE46BEC
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_4_SIZE 0xE46BF0
+
+#define mmTPC1_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xE46BF4
+
+#define mmTPC1_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xE46BF8
+
+#define mmTPC1_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xE46BFC
+
+#define mmTPC1_CFG_QM_TENSOR_9_PADDING_VALUE 0xE46C00
+
+#define mmTPC1_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xE46C04
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_0_SIZE 0xE46C08
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xE46C0C
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_1_SIZE 0xE46C10
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xE46C14
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_2_SIZE 0xE46C18
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xE46C1C
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_3_SIZE 0xE46C20
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xE46C24
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_4_SIZE 0xE46C28
+
+#define mmTPC1_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xE46C2C
+
+#define mmTPC1_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xE46C30
+
+#define mmTPC1_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xE46C34
+
+#define mmTPC1_CFG_QM_TENSOR_10_PADDING_VALUE 0xE46C38
+
+#define mmTPC1_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xE46C3C
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_0_SIZE 0xE46C40
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xE46C44
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_1_SIZE 0xE46C48
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xE46C4C
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_2_SIZE 0xE46C50
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xE46C54
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_3_SIZE 0xE46C58
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xE46C5C
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_4_SIZE 0xE46C60
+
+#define mmTPC1_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xE46C64
+
+#define mmTPC1_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xE46C68
+
+#define mmTPC1_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xE46C6C
+
+#define mmTPC1_CFG_QM_TENSOR_11_PADDING_VALUE 0xE46C70
+
+#define mmTPC1_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xE46C74
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_0_SIZE 0xE46C78
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xE46C7C
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_1_SIZE 0xE46C80
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xE46C84
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_2_SIZE 0xE46C88
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xE46C8C
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_3_SIZE 0xE46C90
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xE46C94
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_4_SIZE 0xE46C98
+
+#define mmTPC1_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xE46C9C
+
+#define mmTPC1_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xE46CA0
+
+#define mmTPC1_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xE46CA4
+
+#define mmTPC1_CFG_QM_TENSOR_12_PADDING_VALUE 0xE46CA8
+
+#define mmTPC1_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xE46CAC
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_0_SIZE 0xE46CB0
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xE46CB4
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_1_SIZE 0xE46CB8
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xE46CBC
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_2_SIZE 0xE46CC0
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xE46CC4
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_3_SIZE 0xE46CC8
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xE46CCC
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_4_SIZE 0xE46CD0
+
+#define mmTPC1_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xE46CD4
+
+#define mmTPC1_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xE46CD8
+
+#define mmTPC1_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xE46CDC
+
+#define mmTPC1_CFG_QM_TENSOR_13_PADDING_VALUE 0xE46CE0
+
+#define mmTPC1_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xE46CE4
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_0_SIZE 0xE46CE8
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xE46CEC
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_1_SIZE 0xE46CF0
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xE46CF4
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_2_SIZE 0xE46CF8
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xE46CFC
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_3_SIZE 0xE46D00
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xE46D04
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_4_SIZE 0xE46D08
+
+#define mmTPC1_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xE46D0C
+
+#define mmTPC1_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xE46D10
+
+#define mmTPC1_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xE46D14
+
+#define mmTPC1_CFG_QM_TENSOR_14_PADDING_VALUE 0xE46D18
+
+#define mmTPC1_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xE46D1C
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_0_SIZE 0xE46D20
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xE46D24
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_1_SIZE 0xE46D28
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xE46D2C
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_2_SIZE 0xE46D30
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xE46D34
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_3_SIZE 0xE46D38
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xE46D3C
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_4_SIZE 0xE46D40
+
+#define mmTPC1_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xE46D44
+
+#define mmTPC1_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xE46D48
+
+#define mmTPC1_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xE46D4C
+
+#define mmTPC1_CFG_QM_TENSOR_15_PADDING_VALUE 0xE46D50
+
+#define mmTPC1_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xE46D54
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_0_SIZE 0xE46D58
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xE46D5C
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_1_SIZE 0xE46D60
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xE46D64
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_2_SIZE 0xE46D68
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xE46D6C
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_3_SIZE 0xE46D70
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xE46D74
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_4_SIZE 0xE46D78
+
+#define mmTPC1_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xE46D7C
+
+#define mmTPC1_CFG_QM_SYNC_OBJECT_MESSAGE 0xE46D80
+
+#define mmTPC1_CFG_QM_SYNC_OBJECT_ADDR 0xE46D84
+
+#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE46D88
+
+#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE46D8C
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_0 0xE46D90
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_0 0xE46D94
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_1 0xE46D98
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_1 0xE46D9C
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_2 0xE46DA0
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_2 0xE46DA4
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_3 0xE46DA8
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_3 0xE46DAC
+
+#define mmTPC1_CFG_QM_TID_BASE_DIM_4 0xE46DB0
+
+#define mmTPC1_CFG_QM_TID_SIZE_DIM_4 0xE46DB4
+
+#define mmTPC1_CFG_QM_KERNEL_CONFIG 0xE46DB8
+
+#define mmTPC1_CFG_QM_KERNEL_ID 0xE46DBC
+
+#define mmTPC1_CFG_QM_SRF_0 0xE46DC0
+
+#define mmTPC1_CFG_QM_SRF_1 0xE46DC4
+
+#define mmTPC1_CFG_QM_SRF_2 0xE46DC8
+
+#define mmTPC1_CFG_QM_SRF_3 0xE46DCC
+
+#define mmTPC1_CFG_QM_SRF_4 0xE46DD0
+
+#define mmTPC1_CFG_QM_SRF_5 0xE46DD4
+
+#define mmTPC1_CFG_QM_SRF_6 0xE46DD8
+
+#define mmTPC1_CFG_QM_SRF_7 0xE46DDC
+
+#define mmTPC1_CFG_QM_SRF_8 0xE46DE0
+
+#define mmTPC1_CFG_QM_SRF_9 0xE46DE4
+
+#define mmTPC1_CFG_QM_SRF_10 0xE46DE8
+
+#define mmTPC1_CFG_QM_SRF_11 0xE46DEC
+
+#define mmTPC1_CFG_QM_SRF_12 0xE46DF0
+
+#define mmTPC1_CFG_QM_SRF_13 0xE46DF4
+
+#define mmTPC1_CFG_QM_SRF_14 0xE46DF8
+
+#define mmTPC1_CFG_QM_SRF_15 0xE46DFC
+
+#define mmTPC1_CFG_QM_SRF_16 0xE46E00
+
+#define mmTPC1_CFG_QM_SRF_17 0xE46E04
+
+#define mmTPC1_CFG_QM_SRF_18 0xE46E08
+
+#define mmTPC1_CFG_QM_SRF_19 0xE46E0C
+
+#define mmTPC1_CFG_QM_SRF_20 0xE46E10
+
+#define mmTPC1_CFG_QM_SRF_21 0xE46E14
+
+#define mmTPC1_CFG_QM_SRF_22 0xE46E18
+
+#define mmTPC1_CFG_QM_SRF_23 0xE46E1C
+
+#define mmTPC1_CFG_QM_SRF_24 0xE46E20
+
+#define mmTPC1_CFG_QM_SRF_25 0xE46E24
+
+#define mmTPC1_CFG_QM_SRF_26 0xE46E28
+
+#define mmTPC1_CFG_QM_SRF_27 0xE46E2C
+
+#define mmTPC1_CFG_QM_SRF_28 0xE46E30
+
+#define mmTPC1_CFG_QM_SRF_29 0xE46E34
+
+#define mmTPC1_CFG_QM_SRF_30 0xE46E38
+
+#define mmTPC1_CFG_QM_SRF_31 0xE46E3C
+
+#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h
new file mode 100644
index 000000000000..af10ef7a87d9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC1_QM_REGS_H_
+#define ASIC_REG_TPC1_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC1_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC1_QM_GLBL_CFG0 0xE48000
+
+#define mmTPC1_QM_GLBL_CFG1 0xE48004
+
+#define mmTPC1_QM_GLBL_PROT 0xE48008
+
+#define mmTPC1_QM_GLBL_ERR_CFG 0xE4800C
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS_0 0xE48010
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS_1 0xE48014
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS_2 0xE48018
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS_3 0xE4801C
+
+#define mmTPC1_QM_GLBL_SECURE_PROPS_4 0xE48020
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_0 0xE48024
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_1 0xE48028
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_2 0xE4802C
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_3 0xE48030
+
+#define mmTPC1_QM_GLBL_NON_SECURE_PROPS_4 0xE48034
+
+#define mmTPC1_QM_GLBL_STS0 0xE48038
+
+#define mmTPC1_QM_GLBL_STS1_0 0xE48040
+
+#define mmTPC1_QM_GLBL_STS1_1 0xE48044
+
+#define mmTPC1_QM_GLBL_STS1_2 0xE48048
+
+#define mmTPC1_QM_GLBL_STS1_3 0xE4804C
+
+#define mmTPC1_QM_GLBL_STS1_4 0xE48050
+
+#define mmTPC1_QM_GLBL_MSG_EN_0 0xE48054
+
+#define mmTPC1_QM_GLBL_MSG_EN_1 0xE48058
+
+#define mmTPC1_QM_GLBL_MSG_EN_2 0xE4805C
+
+#define mmTPC1_QM_GLBL_MSG_EN_3 0xE48060
+
+#define mmTPC1_QM_GLBL_MSG_EN_4 0xE48068
+
+#define mmTPC1_QM_PQ_BASE_LO_0 0xE48070
+
+#define mmTPC1_QM_PQ_BASE_LO_1 0xE48074
+
+#define mmTPC1_QM_PQ_BASE_LO_2 0xE48078
+
+#define mmTPC1_QM_PQ_BASE_LO_3 0xE4807C
+
+#define mmTPC1_QM_PQ_BASE_HI_0 0xE48080
+
+#define mmTPC1_QM_PQ_BASE_HI_1 0xE48084
+
+#define mmTPC1_QM_PQ_BASE_HI_2 0xE48088
+
+#define mmTPC1_QM_PQ_BASE_HI_3 0xE4808C
+
+#define mmTPC1_QM_PQ_SIZE_0 0xE48090
+
+#define mmTPC1_QM_PQ_SIZE_1 0xE48094
+
+#define mmTPC1_QM_PQ_SIZE_2 0xE48098
+
+#define mmTPC1_QM_PQ_SIZE_3 0xE4809C
+
+#define mmTPC1_QM_PQ_PI_0 0xE480A0
+
+#define mmTPC1_QM_PQ_PI_1 0xE480A4
+
+#define mmTPC1_QM_PQ_PI_2 0xE480A8
+
+#define mmTPC1_QM_PQ_PI_3 0xE480AC
+
+#define mmTPC1_QM_PQ_CI_0 0xE480B0
+
+#define mmTPC1_QM_PQ_CI_1 0xE480B4
+
+#define mmTPC1_QM_PQ_CI_2 0xE480B8
+
+#define mmTPC1_QM_PQ_CI_3 0xE480BC
+
+#define mmTPC1_QM_PQ_CFG0_0 0xE480C0
+
+#define mmTPC1_QM_PQ_CFG0_1 0xE480C4
+
+#define mmTPC1_QM_PQ_CFG0_2 0xE480C8
+
+#define mmTPC1_QM_PQ_CFG0_3 0xE480CC
+
+#define mmTPC1_QM_PQ_CFG1_0 0xE480D0
+
+#define mmTPC1_QM_PQ_CFG1_1 0xE480D4
+
+#define mmTPC1_QM_PQ_CFG1_2 0xE480D8
+
+#define mmTPC1_QM_PQ_CFG1_3 0xE480DC
+
+#define mmTPC1_QM_PQ_ARUSER_31_11_0 0xE480E0
+
+#define mmTPC1_QM_PQ_ARUSER_31_11_1 0xE480E4
+
+#define mmTPC1_QM_PQ_ARUSER_31_11_2 0xE480E8
+
+#define mmTPC1_QM_PQ_ARUSER_31_11_3 0xE480EC
+
+#define mmTPC1_QM_PQ_STS0_0 0xE480F0
+
+#define mmTPC1_QM_PQ_STS0_1 0xE480F4
+
+#define mmTPC1_QM_PQ_STS0_2 0xE480F8
+
+#define mmTPC1_QM_PQ_STS0_3 0xE480FC
+
+#define mmTPC1_QM_PQ_STS1_0 0xE48100
+
+#define mmTPC1_QM_PQ_STS1_1 0xE48104
+
+#define mmTPC1_QM_PQ_STS1_2 0xE48108
+
+#define mmTPC1_QM_PQ_STS1_3 0xE4810C
+
+#define mmTPC1_QM_CQ_CFG0_0 0xE48110
+
+#define mmTPC1_QM_CQ_CFG0_1 0xE48114
+
+#define mmTPC1_QM_CQ_CFG0_2 0xE48118
+
+#define mmTPC1_QM_CQ_CFG0_3 0xE4811C
+
+#define mmTPC1_QM_CQ_CFG0_4 0xE48120
+
+#define mmTPC1_QM_CQ_CFG1_0 0xE48124
+
+#define mmTPC1_QM_CQ_CFG1_1 0xE48128
+
+#define mmTPC1_QM_CQ_CFG1_2 0xE4812C
+
+#define mmTPC1_QM_CQ_CFG1_3 0xE48130
+
+#define mmTPC1_QM_CQ_CFG1_4 0xE48134
+
+#define mmTPC1_QM_CQ_ARUSER_31_11_0 0xE48138
+
+#define mmTPC1_QM_CQ_ARUSER_31_11_1 0xE4813C
+
+#define mmTPC1_QM_CQ_ARUSER_31_11_2 0xE48140
+
+#define mmTPC1_QM_CQ_ARUSER_31_11_3 0xE48144
+
+#define mmTPC1_QM_CQ_ARUSER_31_11_4 0xE48148
+
+#define mmTPC1_QM_CQ_STS0_0 0xE4814C
+
+#define mmTPC1_QM_CQ_STS0_1 0xE48150
+
+#define mmTPC1_QM_CQ_STS0_2 0xE48154
+
+#define mmTPC1_QM_CQ_STS0_3 0xE48158
+
+#define mmTPC1_QM_CQ_STS0_4 0xE4815C
+
+#define mmTPC1_QM_CQ_STS1_0 0xE48160
+
+#define mmTPC1_QM_CQ_STS1_1 0xE48164
+
+#define mmTPC1_QM_CQ_STS1_2 0xE48168
+
+#define mmTPC1_QM_CQ_STS1_3 0xE4816C
+
+#define mmTPC1_QM_CQ_STS1_4 0xE48170
+
+#define mmTPC1_QM_CQ_PTR_LO_0 0xE48174
+
+#define mmTPC1_QM_CQ_PTR_HI_0 0xE48178
+
+#define mmTPC1_QM_CQ_TSIZE_0 0xE4817C
+
+#define mmTPC1_QM_CQ_CTL_0 0xE48180
+
+#define mmTPC1_QM_CQ_PTR_LO_1 0xE48184
+
+#define mmTPC1_QM_CQ_PTR_HI_1 0xE48188
+
+#define mmTPC1_QM_CQ_TSIZE_1 0xE4818C
+
+#define mmTPC1_QM_CQ_CTL_1 0xE48190
+
+#define mmTPC1_QM_CQ_PTR_LO_2 0xE48194
+
+#define mmTPC1_QM_CQ_PTR_HI_2 0xE48198
+
+#define mmTPC1_QM_CQ_TSIZE_2 0xE4819C
+
+#define mmTPC1_QM_CQ_CTL_2 0xE481A0
+
+#define mmTPC1_QM_CQ_PTR_LO_3 0xE481A4
+
+#define mmTPC1_QM_CQ_PTR_HI_3 0xE481A8
+
+#define mmTPC1_QM_CQ_TSIZE_3 0xE481AC
+
+#define mmTPC1_QM_CQ_CTL_3 0xE481B0
+
+#define mmTPC1_QM_CQ_PTR_LO_4 0xE481B4
+
+#define mmTPC1_QM_CQ_PTR_HI_4 0xE481B8
+
+#define mmTPC1_QM_CQ_TSIZE_4 0xE481BC
+
+#define mmTPC1_QM_CQ_CTL_4 0xE481C0
+
+#define mmTPC1_QM_CQ_PTR_LO_STS_0 0xE481C4
+
+#define mmTPC1_QM_CQ_PTR_LO_STS_1 0xE481C8
+
+#define mmTPC1_QM_CQ_PTR_LO_STS_2 0xE481CC
+
+#define mmTPC1_QM_CQ_PTR_LO_STS_3 0xE481D0
+
+#define mmTPC1_QM_CQ_PTR_LO_STS_4 0xE481D4
+
+#define mmTPC1_QM_CQ_PTR_HI_STS_0 0xE481D8
+
+#define mmTPC1_QM_CQ_PTR_HI_STS_1 0xE481DC
+
+#define mmTPC1_QM_CQ_PTR_HI_STS_2 0xE481E0
+
+#define mmTPC1_QM_CQ_PTR_HI_STS_3 0xE481E4
+
+#define mmTPC1_QM_CQ_PTR_HI_STS_4 0xE481E8
+
+#define mmTPC1_QM_CQ_TSIZE_STS_0 0xE481EC
+
+#define mmTPC1_QM_CQ_TSIZE_STS_1 0xE481F0
+
+#define mmTPC1_QM_CQ_TSIZE_STS_2 0xE481F4
+
+#define mmTPC1_QM_CQ_TSIZE_STS_3 0xE481F8
+
+#define mmTPC1_QM_CQ_TSIZE_STS_4 0xE481FC
+
+#define mmTPC1_QM_CQ_CTL_STS_0 0xE48200
+
+#define mmTPC1_QM_CQ_CTL_STS_1 0xE48204
+
+#define mmTPC1_QM_CQ_CTL_STS_2 0xE48208
+
+#define mmTPC1_QM_CQ_CTL_STS_3 0xE4820C
+
+#define mmTPC1_QM_CQ_CTL_STS_4 0xE48210
+
+#define mmTPC1_QM_CQ_IFIFO_CNT_0 0xE48214
+
+#define mmTPC1_QM_CQ_IFIFO_CNT_1 0xE48218
+
+#define mmTPC1_QM_CQ_IFIFO_CNT_2 0xE4821C
+
+#define mmTPC1_QM_CQ_IFIFO_CNT_3 0xE48220
+
+#define mmTPC1_QM_CQ_IFIFO_CNT_4 0xE48224
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_0 0xE48228
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_1 0xE4822C
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_2 0xE48230
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_3 0xE48234
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO_4 0xE48238
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_0 0xE4823C
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_1 0xE48240
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_2 0xE48244
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_3 0xE48248
+
+#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI_4 0xE4824C
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_0 0xE48250
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_1 0xE48254
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_2 0xE48258
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_3 0xE4825C
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO_4 0xE48260
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_0 0xE48264
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_1 0xE48268
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_2 0xE4826C
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_3 0xE48270
+
+#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI_4 0xE48274
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_0 0xE48278
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_1 0xE4827C
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_2 0xE48280
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_3 0xE48284
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO_4 0xE48288
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_0 0xE4828C
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_1 0xE48290
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_2 0xE48294
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_3 0xE48298
+
+#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI_4 0xE4829C
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_0 0xE482A0
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_1 0xE482A4
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_2 0xE482A8
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_3 0xE482AC
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO_4 0xE482B0
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_0 0xE482B4
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_1 0xE482B8
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_2 0xE482BC
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_3 0xE482C0
+
+#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI_4 0xE482C4
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_0 0xE482C8
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_1 0xE482CC
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_2 0xE482D0
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_3 0xE482D4
+
+#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET_4 0xE482D8
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xE482E0
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xE482E4
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xE482E8
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xE482EC
+
+#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xE482F0
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xE482F4
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xE482F8
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xE482FC
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xE48300
+
+#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xE48304
+
+#define mmTPC1_QM_CP_FENCE0_RDATA_0 0xE48308
+
+#define mmTPC1_QM_CP_FENCE0_RDATA_1 0xE4830C
+
+#define mmTPC1_QM_CP_FENCE0_RDATA_2 0xE48310
+
+#define mmTPC1_QM_CP_FENCE0_RDATA_3 0xE48314
+
+#define mmTPC1_QM_CP_FENCE0_RDATA_4 0xE48318
+
+#define mmTPC1_QM_CP_FENCE1_RDATA_0 0xE4831C
+
+#define mmTPC1_QM_CP_FENCE1_RDATA_1 0xE48320
+
+#define mmTPC1_QM_CP_FENCE1_RDATA_2 0xE48324
+
+#define mmTPC1_QM_CP_FENCE1_RDATA_3 0xE48328
+
+#define mmTPC1_QM_CP_FENCE1_RDATA_4 0xE4832C
+
+#define mmTPC1_QM_CP_FENCE2_RDATA_0 0xE48330
+
+#define mmTPC1_QM_CP_FENCE2_RDATA_1 0xE48334
+
+#define mmTPC1_QM_CP_FENCE2_RDATA_2 0xE48338
+
+#define mmTPC1_QM_CP_FENCE2_RDATA_3 0xE4833C
+
+#define mmTPC1_QM_CP_FENCE2_RDATA_4 0xE48340
+
+#define mmTPC1_QM_CP_FENCE3_RDATA_0 0xE48344
+
+#define mmTPC1_QM_CP_FENCE3_RDATA_1 0xE48348
+
+#define mmTPC1_QM_CP_FENCE3_RDATA_2 0xE4834C
+
+#define mmTPC1_QM_CP_FENCE3_RDATA_3 0xE48350
+
+#define mmTPC1_QM_CP_FENCE3_RDATA_4 0xE48354
+
+#define mmTPC1_QM_CP_FENCE0_CNT_0 0xE48358
+
+#define mmTPC1_QM_CP_FENCE0_CNT_1 0xE4835C
+
+#define mmTPC1_QM_CP_FENCE0_CNT_2 0xE48360
+
+#define mmTPC1_QM_CP_FENCE0_CNT_3 0xE48364
+
+#define mmTPC1_QM_CP_FENCE0_CNT_4 0xE48368
+
+#define mmTPC1_QM_CP_FENCE1_CNT_0 0xE4836C
+
+#define mmTPC1_QM_CP_FENCE1_CNT_1 0xE48370
+
+#define mmTPC1_QM_CP_FENCE1_CNT_2 0xE48374
+
+#define mmTPC1_QM_CP_FENCE1_CNT_3 0xE48378
+
+#define mmTPC1_QM_CP_FENCE1_CNT_4 0xE4837C
+
+#define mmTPC1_QM_CP_FENCE2_CNT_0 0xE48380
+
+#define mmTPC1_QM_CP_FENCE2_CNT_1 0xE48384
+
+#define mmTPC1_QM_CP_FENCE2_CNT_2 0xE48388
+
+#define mmTPC1_QM_CP_FENCE2_CNT_3 0xE4838C
+
+#define mmTPC1_QM_CP_FENCE2_CNT_4 0xE48390
+
+#define mmTPC1_QM_CP_FENCE3_CNT_0 0xE48394
+
+#define mmTPC1_QM_CP_FENCE3_CNT_1 0xE48398
+
+#define mmTPC1_QM_CP_FENCE3_CNT_2 0xE4839C
+
+#define mmTPC1_QM_CP_FENCE3_CNT_3 0xE483A0
+
+#define mmTPC1_QM_CP_FENCE3_CNT_4 0xE483A4
+
+#define mmTPC1_QM_CP_STS_0 0xE483A8
+
+#define mmTPC1_QM_CP_STS_1 0xE483AC
+
+#define mmTPC1_QM_CP_STS_2 0xE483B0
+
+#define mmTPC1_QM_CP_STS_3 0xE483B4
+
+#define mmTPC1_QM_CP_STS_4 0xE483B8
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO_0 0xE483BC
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO_1 0xE483C0
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO_2 0xE483C4
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO_3 0xE483C8
+
+#define mmTPC1_QM_CP_CURRENT_INST_LO_4 0xE483CC
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI_0 0xE483D0
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI_1 0xE483D4
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI_2 0xE483D8
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI_3 0xE483DC
+
+#define mmTPC1_QM_CP_CURRENT_INST_HI_4 0xE483E0
+
+#define mmTPC1_QM_CP_BARRIER_CFG_0 0xE483F4
+
+#define mmTPC1_QM_CP_BARRIER_CFG_1 0xE483F8
+
+#define mmTPC1_QM_CP_BARRIER_CFG_2 0xE483FC
+
+#define mmTPC1_QM_CP_BARRIER_CFG_3 0xE48400
+
+#define mmTPC1_QM_CP_BARRIER_CFG_4 0xE48404
+
+#define mmTPC1_QM_CP_DBG_0_0 0xE48408
+
+#define mmTPC1_QM_CP_DBG_0_1 0xE4840C
+
+#define mmTPC1_QM_CP_DBG_0_2 0xE48410
+
+#define mmTPC1_QM_CP_DBG_0_3 0xE48414
+
+#define mmTPC1_QM_CP_DBG_0_4 0xE48418
+
+#define mmTPC1_QM_CP_ARUSER_31_11_0 0xE4841C
+
+#define mmTPC1_QM_CP_ARUSER_31_11_1 0xE48420
+
+#define mmTPC1_QM_CP_ARUSER_31_11_2 0xE48424
+
+#define mmTPC1_QM_CP_ARUSER_31_11_3 0xE48428
+
+#define mmTPC1_QM_CP_ARUSER_31_11_4 0xE4842C
+
+#define mmTPC1_QM_CP_AWUSER_31_11_0 0xE48430
+
+#define mmTPC1_QM_CP_AWUSER_31_11_1 0xE48434
+
+#define mmTPC1_QM_CP_AWUSER_31_11_2 0xE48438
+
+#define mmTPC1_QM_CP_AWUSER_31_11_3 0xE4843C
+
+#define mmTPC1_QM_CP_AWUSER_31_11_4 0xE48440
+
+#define mmTPC1_QM_ARB_CFG_0 0xE48A00
+
+#define mmTPC1_QM_ARB_CHOISE_Q_PUSH 0xE48A04
+
+#define mmTPC1_QM_ARB_WRR_WEIGHT_0 0xE48A08
+
+#define mmTPC1_QM_ARB_WRR_WEIGHT_1 0xE48A0C
+
+#define mmTPC1_QM_ARB_WRR_WEIGHT_2 0xE48A10
+
+#define mmTPC1_QM_ARB_WRR_WEIGHT_3 0xE48A14
+
+#define mmTPC1_QM_ARB_CFG_1 0xE48A18
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_0 0xE48A20
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_1 0xE48A24
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_2 0xE48A28
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_3 0xE48A2C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_4 0xE48A30
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_5 0xE48A34
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_6 0xE48A38
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_7 0xE48A3C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_8 0xE48A40
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_9 0xE48A44
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_10 0xE48A48
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_11 0xE48A4C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_12 0xE48A50
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_13 0xE48A54
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_14 0xE48A58
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_15 0xE48A5C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_16 0xE48A60
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_17 0xE48A64
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_18 0xE48A68
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_19 0xE48A6C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_20 0xE48A70
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_21 0xE48A74
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_22 0xE48A78
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_23 0xE48A7C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_24 0xE48A80
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_25 0xE48A84
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_26 0xE48A88
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_27 0xE48A8C
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_28 0xE48A90
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_29 0xE48A94
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_30 0xE48A98
+
+#define mmTPC1_QM_ARB_MST_AVAIL_CRED_31 0xE48A9C
+
+#define mmTPC1_QM_ARB_MST_CRED_INC 0xE48AA0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xE48AA4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xE48AA8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xE48AAC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xE48AB0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xE48AB4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xE48AB8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xE48ABC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xE48AC0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xE48AC4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xE48AC8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xE48ACC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xE48AD0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xE48AD4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xE48AD8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xE48ADC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xE48AE0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xE48AE4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xE48AE8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xE48AEC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xE48AF0
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xE48AF4
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xE48AF8
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xE48AFC
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xE48B00
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xE48B04
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xE48B08
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xE48B0C
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xE48B10
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xE48B14
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xE48B18
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xE48B1C
+
+#define mmTPC1_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xE48B20
+
+#define mmTPC1_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xE48B28
+
+#define mmTPC1_QM_ARB_MST_SLAVE_EN 0xE48B2C
+
+#define mmTPC1_QM_ARB_MST_QUIET_PER 0xE48B34
+
+#define mmTPC1_QM_ARB_SLV_CHOISE_WDT 0xE48B38
+
+#define mmTPC1_QM_ARB_SLV_ID 0xE48B3C
+
+#define mmTPC1_QM_ARB_MSG_MAX_INFLIGHT 0xE48B44
+
+#define mmTPC1_QM_ARB_MSG_AWUSER_31_11 0xE48B48
+
+#define mmTPC1_QM_ARB_MSG_AWUSER_SEC_PROP 0xE48B4C
+
+#define mmTPC1_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xE48B50
+
+#define mmTPC1_QM_ARB_BASE_LO 0xE48B54
+
+#define mmTPC1_QM_ARB_BASE_HI 0xE48B58
+
+#define mmTPC1_QM_ARB_STATE_STS 0xE48B80
+
+#define mmTPC1_QM_ARB_CHOISE_FULLNESS_STS 0xE48B84
+
+#define mmTPC1_QM_ARB_MSG_STS 0xE48B88
+
+#define mmTPC1_QM_ARB_SLV_CHOISE_Q_HEAD 0xE48B8C
+
+#define mmTPC1_QM_ARB_ERR_CAUSE 0xE48B9C
+
+#define mmTPC1_QM_ARB_ERR_MSG_EN 0xE48BA0
+
+#define mmTPC1_QM_ARB_ERR_STS_DRP 0xE48BA8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_0 0xE48BB0
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_1 0xE48BB4
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_2 0xE48BB8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_3 0xE48BBC
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_4 0xE48BC0
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_5 0xE48BC4
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_6 0xE48BC8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_7 0xE48BCC
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_8 0xE48BD0
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_9 0xE48BD4
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_10 0xE48BD8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_11 0xE48BDC
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_12 0xE48BE0
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_13 0xE48BE4
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_14 0xE48BE8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_15 0xE48BEC
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_16 0xE48BF0
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_17 0xE48BF4
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_18 0xE48BF8
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_19 0xE48BFC
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_20 0xE48C00
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_21 0xE48C04
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_22 0xE48C08
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_23 0xE48C0C
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_24 0xE48C10
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_25 0xE48C14
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_26 0xE48C18
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_27 0xE48C1C
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_28 0xE48C20
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_29 0xE48C24
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_30 0xE48C28
+
+#define mmTPC1_QM_ARB_MST_CRED_STS_31 0xE48C2C
+
+#define mmTPC1_QM_CGM_CFG 0xE48C70
+
+#define mmTPC1_QM_CGM_STS 0xE48C74
+
+#define mmTPC1_QM_CGM_CFG1 0xE48C78
+
+#define mmTPC1_QM_LOCAL_RANGE_BASE 0xE48C80
+
+#define mmTPC1_QM_LOCAL_RANGE_SIZE 0xE48C84
+
+#define mmTPC1_QM_CSMR_STRICT_PRIO_CFG 0xE48C90
+
+#define mmTPC1_QM_HBW_RD_RATE_LIM_CFG_1 0xE48C94
+
+#define mmTPC1_QM_LBW_WR_RATE_LIM_CFG_0 0xE48C98
+
+#define mmTPC1_QM_LBW_WR_RATE_LIM_CFG_1 0xE48C9C
+
+#define mmTPC1_QM_HBW_RD_RATE_LIM_CFG_0 0xE48CA0
+
+#define mmTPC1_QM_GLBL_AXCACHE 0xE48CA4
+
+#define mmTPC1_QM_IND_GW_APB_CFG 0xE48CB0
+
+#define mmTPC1_QM_IND_GW_APB_WDATA 0xE48CB4
+
+#define mmTPC1_QM_IND_GW_APB_RDATA 0xE48CB8
+
+#define mmTPC1_QM_IND_GW_APB_STATUS 0xE48CBC
+
+#define mmTPC1_QM_GLBL_ERR_ADDR_LO 0xE48CD0
+
+#define mmTPC1_QM_GLBL_ERR_ADDR_HI 0xE48CD4
+
+#define mmTPC1_QM_GLBL_ERR_WDATA 0xE48CD8
+
+#define mmTPC1_QM_GLBL_MEM_INIT_BUSY 0xE48D00
+
+#endif /* ASIC_REG_TPC1_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h
new file mode 100644
index 000000000000..3e77c37952bc
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_CFG_REGS_H_
+#define ASIC_REG_TPC2_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE86400
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE86404
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE86408
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE8640C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE86410
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE86414
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE86418
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE8641C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE86420
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE86424
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE86428
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE8642C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE86430
+
+#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE86434
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE86438
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE8643C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE86440
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE86444
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE86448
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE8644C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE86450
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE86454
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE86458
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE8645C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE86460
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE86464
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE86468
+
+#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE8646C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE86470
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE86474
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE86478
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE8647C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE86480
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE86484
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE86488
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE8648C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE86490
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE86494
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE86498
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE8649C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE864A0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE864A4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE864A8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE864AC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE864B0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE864B4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE864B8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE864BC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE864C0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE864C4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE864C8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE864CC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE864D0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE864D4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE864D8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE864DC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE864E0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE864E4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE864E8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE864EC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE864F0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE864F4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE864F8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE864FC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE86500
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE86504
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE86508
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE8650C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE86510
+
+#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE86514
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE86518
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE8651C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE86520
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE86524
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE86528
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE8652C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE86530
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE86534
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE86538
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE8653C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE86540
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE86544
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE86548
+
+#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE8654C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE86550
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE86554
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE86558
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE8655C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE86560
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE86564
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE86568
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE8656C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE86570
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE86574
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE86578
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE8657C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE86580
+
+#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE86584
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE86588
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE8658C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE86590
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE86594
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE86598
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE8659C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE865A0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE865A4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE865A8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE865AC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE865B0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE865B4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE865B8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE865BC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xE865C0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xE865C4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xE865C8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xE865CC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xE865D0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xE865D4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xE865D8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xE865DC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xE865E0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xE865E4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xE865E8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xE865EC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xE865F0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xE865F4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xE865F8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xE865FC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xE86600
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xE86604
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xE86608
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xE8660C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xE86610
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xE86614
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xE86618
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xE8661C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xE86620
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xE86624
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xE86628
+
+#define mmTPC2_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xE8662C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xE86630
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xE86634
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xE86638
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xE8663C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xE86640
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xE86644
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xE86648
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xE8664C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xE86650
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xE86654
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xE86658
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xE8665C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xE86660
+
+#define mmTPC2_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xE86664
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xE86668
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xE8666C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xE86670
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xE86674
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xE86678
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xE8667C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xE86680
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xE86684
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xE86688
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xE8668C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xE86690
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xE86694
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xE86698
+
+#define mmTPC2_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xE8669C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xE866A0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xE866A4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xE866A8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xE866AC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xE866B0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xE866B4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xE866B8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xE866BC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xE866C0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xE866C4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xE866C8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xE866CC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xE866D0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xE866D4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xE866D8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xE866DC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xE866E0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xE866E4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xE866E8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xE866EC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xE866F0
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xE866F4
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xE866F8
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xE866FC
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xE86700
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xE86704
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xE86708
+
+#define mmTPC2_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xE8670C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xE86710
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xE86714
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xE86718
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xE8671C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xE86720
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xE86724
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xE86728
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xE8672C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xE86730
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xE86734
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xE86738
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xE8673C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xE86740
+
+#define mmTPC2_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xE86744
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xE86748
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xE8674C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xE86750
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xE86754
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xE86758
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xE8675C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xE86760
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xE86764
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xE86768
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xE8676C
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xE86770
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xE86774
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xE86778
+
+#define mmTPC2_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xE8677C
+
+#define mmTPC2_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE86780
+
+#define mmTPC2_CFG_KERNEL_SYNC_OBJECT_ADDR 0xE86784
+
+#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE86788
+
+#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE8678C
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_0 0xE86790
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_0 0xE86794
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_1 0xE86798
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_1 0xE8679C
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_2 0xE867A0
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_2 0xE867A4
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_3 0xE867A8
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_3 0xE867AC
+
+#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_4 0xE867B0
+
+#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_4 0xE867B4
+
+#define mmTPC2_CFG_KERNEL_KERNEL_CONFIG 0xE867B8
+
+#define mmTPC2_CFG_KERNEL_KERNEL_ID 0xE867BC
+
+#define mmTPC2_CFG_KERNEL_SRF_0 0xE867C0
+
+#define mmTPC2_CFG_KERNEL_SRF_1 0xE867C4
+
+#define mmTPC2_CFG_KERNEL_SRF_2 0xE867C8
+
+#define mmTPC2_CFG_KERNEL_SRF_3 0xE867CC
+
+#define mmTPC2_CFG_KERNEL_SRF_4 0xE867D0
+
+#define mmTPC2_CFG_KERNEL_SRF_5 0xE867D4
+
+#define mmTPC2_CFG_KERNEL_SRF_6 0xE867D8
+
+#define mmTPC2_CFG_KERNEL_SRF_7 0xE867DC
+
+#define mmTPC2_CFG_KERNEL_SRF_8 0xE867E0
+
+#define mmTPC2_CFG_KERNEL_SRF_9 0xE867E4
+
+#define mmTPC2_CFG_KERNEL_SRF_10 0xE867E8
+
+#define mmTPC2_CFG_KERNEL_SRF_11 0xE867EC
+
+#define mmTPC2_CFG_KERNEL_SRF_12 0xE867F0
+
+#define mmTPC2_CFG_KERNEL_SRF_13 0xE867F4
+
+#define mmTPC2_CFG_KERNEL_SRF_14 0xE867F8
+
+#define mmTPC2_CFG_KERNEL_SRF_15 0xE867FC
+
+#define mmTPC2_CFG_KERNEL_SRF_16 0xE86800
+
+#define mmTPC2_CFG_KERNEL_SRF_17 0xE86804
+
+#define mmTPC2_CFG_KERNEL_SRF_18 0xE86808
+
+#define mmTPC2_CFG_KERNEL_SRF_19 0xE8680C
+
+#define mmTPC2_CFG_KERNEL_SRF_20 0xE86810
+
+#define mmTPC2_CFG_KERNEL_SRF_21 0xE86814
+
+#define mmTPC2_CFG_KERNEL_SRF_22 0xE86818
+
+#define mmTPC2_CFG_KERNEL_SRF_23 0xE8681C
+
+#define mmTPC2_CFG_KERNEL_SRF_24 0xE86820
+
+#define mmTPC2_CFG_KERNEL_SRF_25 0xE86824
+
+#define mmTPC2_CFG_KERNEL_SRF_26 0xE86828
+
+#define mmTPC2_CFG_KERNEL_SRF_27 0xE8682C
+
+#define mmTPC2_CFG_KERNEL_SRF_28 0xE86830
+
+#define mmTPC2_CFG_KERNEL_SRF_29 0xE86834
+
+#define mmTPC2_CFG_KERNEL_SRF_30 0xE86838
+
+#define mmTPC2_CFG_KERNEL_SRF_31 0xE8683C
+
+#define mmTPC2_CFG_ROUND_CSR 0xE868FC
+
+#define mmTPC2_CFG_PROT 0xE86900
+
+#define mmTPC2_CFG_SEMAPHORE 0xE86908
+
+#define mmTPC2_CFG_VFLAGS 0xE8690C
+
+#define mmTPC2_CFG_SFLAGS 0xE86910
+
+#define mmTPC2_CFG_LFSR_POLYNOM 0xE86918
+
+#define mmTPC2_CFG_STATUS 0xE8691C
+
+#define mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH 0xE86920
+
+#define mmTPC2_CFG_CFG_SUBTRACT_VALUE 0xE86924
+
+#define mmTPC2_CFG_SM_BASE_ADDRESS_HIGH 0xE8692C
+
+#define mmTPC2_CFG_TPC_CMD 0xE86930
+
+#define mmTPC2_CFG_TPC_EXECUTE 0xE86938
+
+#define mmTPC2_CFG_TPC_STALL 0xE8693C
+
+#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_LOW 0xE86940
+
+#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE86944
+
+#define mmTPC2_CFG_RD_RATE_LIMIT 0xE86948
+
+#define mmTPC2_CFG_WR_RATE_LIMIT 0xE86950
+
+#define mmTPC2_CFG_MSS_CONFIG 0xE86954
+
+#define mmTPC2_CFG_TPC_INTR_CAUSE 0xE86958
+
+#define mmTPC2_CFG_TPC_INTR_MASK 0xE8695C
+
+#define mmTPC2_CFG_WQ_CREDITS 0xE86960
+
+#define mmTPC2_CFG_ARUSER_LO 0xE86964
+
+#define mmTPC2_CFG_ARUSER_HI 0xE86968
+
+#define mmTPC2_CFG_AWUSER_LO 0xE8696C
+
+#define mmTPC2_CFG_AWUSER_HI 0xE86970
+
+#define mmTPC2_CFG_OPCODE_EXEC 0xE86974
+
+#define mmTPC2_CFG_LUT_FUNC32_BASE_ADDR_LO 0xE86978
+
+#define mmTPC2_CFG_LUT_FUNC32_BASE_ADDR_HI 0xE8697C
+
+#define mmTPC2_CFG_LUT_FUNC64_BASE_ADDR_LO 0xE86980
+
+#define mmTPC2_CFG_LUT_FUNC64_BASE_ADDR_HI 0xE86984
+
+#define mmTPC2_CFG_LUT_FUNC128_BASE_ADDR_LO 0xE86988
+
+#define mmTPC2_CFG_LUT_FUNC128_BASE_ADDR_HI 0xE8698C
+
+#define mmTPC2_CFG_LUT_FUNC256_BASE_ADDR_LO 0xE86990
+
+#define mmTPC2_CFG_LUT_FUNC256_BASE_ADDR_HI 0xE86994
+
+#define mmTPC2_CFG_TSB_CFG_MAX_SIZE 0xE86998
+
+#define mmTPC2_CFG_TSB_CFG 0xE8699C
+
+#define mmTPC2_CFG_DBGMEM_ADD 0xE869A0
+
+#define mmTPC2_CFG_DBGMEM_DATA_WR 0xE869A4
+
+#define mmTPC2_CFG_DBGMEM_DATA_RD 0xE869A8
+
+#define mmTPC2_CFG_DBGMEM_CTRL 0xE869AC
+
+#define mmTPC2_CFG_DBGMEM_RC 0xE869B0
+
+#define mmTPC2_CFG_TSB_INFLIGHT_CNTR 0xE869B4
+
+#define mmTPC2_CFG_WQ_INFLIGHT_CNTR 0xE869B8
+
+#define mmTPC2_CFG_WQ_LBW_TOTAL_CNTR 0xE869BC
+
+#define mmTPC2_CFG_WQ_HBW_TOTAL_CNTR 0xE869C0
+
+#define mmTPC2_CFG_IRQ_OCCOUPY_CNTR 0xE869C4
+
+#define mmTPC2_CFG_FUNC_MBIST_CNTRL 0xE869D0
+
+#define mmTPC2_CFG_FUNC_MBIST_PAT 0xE869D4
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_0 0xE869D8
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_1 0xE869DC
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_2 0xE869E0
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_3 0xE869E4
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_4 0xE869E8
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_5 0xE869EC
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_6 0xE869F0
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_7 0xE869F4
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_8 0xE869F8
+
+#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE869FC
+
+#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE86A00
+
+#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE86A04
+
+#define mmTPC2_CFG_QM_TENSOR_0_PADDING_VALUE 0xE86A08
+
+#define mmTPC2_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE86A0C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE86A10
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE86A14
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE86A18
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE86A1C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE86A20
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE86A24
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE86A28
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE86A2C
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE86A30
+
+#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE86A34
+
+#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE86A38
+
+#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE86A3C
+
+#define mmTPC2_CFG_QM_TENSOR_1_PADDING_VALUE 0xE86A40
+
+#define mmTPC2_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE86A44
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE86A48
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE86A4C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE86A50
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE86A54
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE86A58
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE86A5C
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE86A60
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE86A64
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE86A68
+
+#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE86A6C
+
+#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE86A70
+
+#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE86A74
+
+#define mmTPC2_CFG_QM_TENSOR_2_PADDING_VALUE 0xE86A78
+
+#define mmTPC2_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE86A7C
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE86A80
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE86A84
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE86A88
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE86A8C
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE86A90
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE86A94
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE86A98
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE86A9C
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE86AA0
+
+#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE86AA4
+
+#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE86AA8
+
+#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE86AAC
+
+#define mmTPC2_CFG_QM_TENSOR_3_PADDING_VALUE 0xE86AB0
+
+#define mmTPC2_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE86AB4
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE86AB8
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE86ABC
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE86AC0
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE86AC4
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE86AC8
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE86ACC
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE86AD0
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE86AD4
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE86AD8
+
+#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE86ADC
+
+#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE86AE0
+
+#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE86AE4
+
+#define mmTPC2_CFG_QM_TENSOR_4_PADDING_VALUE 0xE86AE8
+
+#define mmTPC2_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE86AEC
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE86AF0
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE86AF4
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE86AF8
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE86AFC
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE86B00
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE86B04
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE86B08
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE86B0C
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE86B10
+
+#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE86B14
+
+#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE86B18
+
+#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE86B1C
+
+#define mmTPC2_CFG_QM_TENSOR_5_PADDING_VALUE 0xE86B20
+
+#define mmTPC2_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE86B24
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE86B28
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE86B2C
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE86B30
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE86B34
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE86B38
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE86B3C
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE86B40
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE86B44
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE86B48
+
+#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE86B4C
+
+#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE86B50
+
+#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE86B54
+
+#define mmTPC2_CFG_QM_TENSOR_6_PADDING_VALUE 0xE86B58
+
+#define mmTPC2_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE86B5C
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE86B60
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE86B64
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE86B68
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE86B6C
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE86B70
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE86B74
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE86B78
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE86B7C
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE86B80
+
+#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE86B84
+
+#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE86B88
+
+#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE86B8C
+
+#define mmTPC2_CFG_QM_TENSOR_7_PADDING_VALUE 0xE86B90
+
+#define mmTPC2_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE86B94
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE86B98
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE86B9C
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE86BA0
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE86BA4
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE86BA8
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE86BAC
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE86BB0
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE86BB4
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE86BB8
+
+#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE86BBC
+
+#define mmTPC2_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xE86BC0
+
+#define mmTPC2_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xE86BC4
+
+#define mmTPC2_CFG_QM_TENSOR_8_PADDING_VALUE 0xE86BC8
+
+#define mmTPC2_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xE86BCC
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_0_SIZE 0xE86BD0
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xE86BD4
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_1_SIZE 0xE86BD8
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xE86BDC
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_2_SIZE 0xE86BE0
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xE86BE4
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_3_SIZE 0xE86BE8
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xE86BEC
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_4_SIZE 0xE86BF0
+
+#define mmTPC2_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xE86BF4
+
+#define mmTPC2_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xE86BF8
+
+#define mmTPC2_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xE86BFC
+
+#define mmTPC2_CFG_QM_TENSOR_9_PADDING_VALUE 0xE86C00
+
+#define mmTPC2_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xE86C04
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_0_SIZE 0xE86C08
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xE86C0C
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_1_SIZE 0xE86C10
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xE86C14
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_2_SIZE 0xE86C18
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xE86C1C
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_3_SIZE 0xE86C20
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xE86C24
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_4_SIZE 0xE86C28
+
+#define mmTPC2_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xE86C2C
+
+#define mmTPC2_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xE86C30
+
+#define mmTPC2_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xE86C34
+
+#define mmTPC2_CFG_QM_TENSOR_10_PADDING_VALUE 0xE86C38
+
+#define mmTPC2_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xE86C3C
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_0_SIZE 0xE86C40
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xE86C44
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_1_SIZE 0xE86C48
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xE86C4C
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_2_SIZE 0xE86C50
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xE86C54
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_3_SIZE 0xE86C58
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xE86C5C
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_4_SIZE 0xE86C60
+
+#define mmTPC2_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xE86C64
+
+#define mmTPC2_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xE86C68
+
+#define mmTPC2_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xE86C6C
+
+#define mmTPC2_CFG_QM_TENSOR_11_PADDING_VALUE 0xE86C70
+
+#define mmTPC2_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xE86C74
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_0_SIZE 0xE86C78
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xE86C7C
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_1_SIZE 0xE86C80
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xE86C84
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_2_SIZE 0xE86C88
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xE86C8C
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_3_SIZE 0xE86C90
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xE86C94
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_4_SIZE 0xE86C98
+
+#define mmTPC2_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xE86C9C
+
+#define mmTPC2_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xE86CA0
+
+#define mmTPC2_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xE86CA4
+
+#define mmTPC2_CFG_QM_TENSOR_12_PADDING_VALUE 0xE86CA8
+
+#define mmTPC2_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xE86CAC
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_0_SIZE 0xE86CB0
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xE86CB4
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_1_SIZE 0xE86CB8
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xE86CBC
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_2_SIZE 0xE86CC0
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xE86CC4
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_3_SIZE 0xE86CC8
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xE86CCC
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_4_SIZE 0xE86CD0
+
+#define mmTPC2_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xE86CD4
+
+#define mmTPC2_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xE86CD8
+
+#define mmTPC2_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xE86CDC
+
+#define mmTPC2_CFG_QM_TENSOR_13_PADDING_VALUE 0xE86CE0
+
+#define mmTPC2_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xE86CE4
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_0_SIZE 0xE86CE8
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xE86CEC
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_1_SIZE 0xE86CF0
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xE86CF4
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_2_SIZE 0xE86CF8
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xE86CFC
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_3_SIZE 0xE86D00
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xE86D04
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_4_SIZE 0xE86D08
+
+#define mmTPC2_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xE86D0C
+
+#define mmTPC2_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xE86D10
+
+#define mmTPC2_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xE86D14
+
+#define mmTPC2_CFG_QM_TENSOR_14_PADDING_VALUE 0xE86D18
+
+#define mmTPC2_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xE86D1C
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_0_SIZE 0xE86D20
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xE86D24
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_1_SIZE 0xE86D28
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xE86D2C
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_2_SIZE 0xE86D30
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xE86D34
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_3_SIZE 0xE86D38
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xE86D3C
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_4_SIZE 0xE86D40
+
+#define mmTPC2_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xE86D44
+
+#define mmTPC2_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xE86D48
+
+#define mmTPC2_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xE86D4C
+
+#define mmTPC2_CFG_QM_TENSOR_15_PADDING_VALUE 0xE86D50
+
+#define mmTPC2_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xE86D54
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_0_SIZE 0xE86D58
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xE86D5C
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_1_SIZE 0xE86D60
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xE86D64
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_2_SIZE 0xE86D68
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xE86D6C
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_3_SIZE 0xE86D70
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xE86D74
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_4_SIZE 0xE86D78
+
+#define mmTPC2_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xE86D7C
+
+#define mmTPC2_CFG_QM_SYNC_OBJECT_MESSAGE 0xE86D80
+
+#define mmTPC2_CFG_QM_SYNC_OBJECT_ADDR 0xE86D84
+
+#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE86D88
+
+#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE86D8C
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_0 0xE86D90
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_0 0xE86D94
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_1 0xE86D98
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_1 0xE86D9C
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_2 0xE86DA0
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_2 0xE86DA4
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_3 0xE86DA8
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_3 0xE86DAC
+
+#define mmTPC2_CFG_QM_TID_BASE_DIM_4 0xE86DB0
+
+#define mmTPC2_CFG_QM_TID_SIZE_DIM_4 0xE86DB4
+
+#define mmTPC2_CFG_QM_KERNEL_CONFIG 0xE86DB8
+
+#define mmTPC2_CFG_QM_KERNEL_ID 0xE86DBC
+
+#define mmTPC2_CFG_QM_SRF_0 0xE86DC0
+
+#define mmTPC2_CFG_QM_SRF_1 0xE86DC4
+
+#define mmTPC2_CFG_QM_SRF_2 0xE86DC8
+
+#define mmTPC2_CFG_QM_SRF_3 0xE86DCC
+
+#define mmTPC2_CFG_QM_SRF_4 0xE86DD0
+
+#define mmTPC2_CFG_QM_SRF_5 0xE86DD4
+
+#define mmTPC2_CFG_QM_SRF_6 0xE86DD8
+
+#define mmTPC2_CFG_QM_SRF_7 0xE86DDC
+
+#define mmTPC2_CFG_QM_SRF_8 0xE86DE0
+
+#define mmTPC2_CFG_QM_SRF_9 0xE86DE4
+
+#define mmTPC2_CFG_QM_SRF_10 0xE86DE8
+
+#define mmTPC2_CFG_QM_SRF_11 0xE86DEC
+
+#define mmTPC2_CFG_QM_SRF_12 0xE86DF0
+
+#define mmTPC2_CFG_QM_SRF_13 0xE86DF4
+
+#define mmTPC2_CFG_QM_SRF_14 0xE86DF8
+
+#define mmTPC2_CFG_QM_SRF_15 0xE86DFC
+
+#define mmTPC2_CFG_QM_SRF_16 0xE86E00
+
+#define mmTPC2_CFG_QM_SRF_17 0xE86E04
+
+#define mmTPC2_CFG_QM_SRF_18 0xE86E08
+
+#define mmTPC2_CFG_QM_SRF_19 0xE86E0C
+
+#define mmTPC2_CFG_QM_SRF_20 0xE86E10
+
+#define mmTPC2_CFG_QM_SRF_21 0xE86E14
+
+#define mmTPC2_CFG_QM_SRF_22 0xE86E18
+
+#define mmTPC2_CFG_QM_SRF_23 0xE86E1C
+
+#define mmTPC2_CFG_QM_SRF_24 0xE86E20
+
+#define mmTPC2_CFG_QM_SRF_25 0xE86E24
+
+#define mmTPC2_CFG_QM_SRF_26 0xE86E28
+
+#define mmTPC2_CFG_QM_SRF_27 0xE86E2C
+
+#define mmTPC2_CFG_QM_SRF_28 0xE86E30
+
+#define mmTPC2_CFG_QM_SRF_29 0xE86E34
+
+#define mmTPC2_CFG_QM_SRF_30 0xE86E38
+
+#define mmTPC2_CFG_QM_SRF_31 0xE86E3C
+
+#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h
new file mode 100644
index 000000000000..2919e2fa58f8
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC2_QM_REGS_H_
+#define ASIC_REG_TPC2_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC2_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC2_QM_GLBL_CFG0 0xE88000
+
+#define mmTPC2_QM_GLBL_CFG1 0xE88004
+
+#define mmTPC2_QM_GLBL_PROT 0xE88008
+
+#define mmTPC2_QM_GLBL_ERR_CFG 0xE8800C
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS_0 0xE88010
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS_1 0xE88014
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS_2 0xE88018
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS_3 0xE8801C
+
+#define mmTPC2_QM_GLBL_SECURE_PROPS_4 0xE88020
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_0 0xE88024
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_1 0xE88028
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_2 0xE8802C
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_3 0xE88030
+
+#define mmTPC2_QM_GLBL_NON_SECURE_PROPS_4 0xE88034
+
+#define mmTPC2_QM_GLBL_STS0 0xE88038
+
+#define mmTPC2_QM_GLBL_STS1_0 0xE88040
+
+#define mmTPC2_QM_GLBL_STS1_1 0xE88044
+
+#define mmTPC2_QM_GLBL_STS1_2 0xE88048
+
+#define mmTPC2_QM_GLBL_STS1_3 0xE8804C
+
+#define mmTPC2_QM_GLBL_STS1_4 0xE88050
+
+#define mmTPC2_QM_GLBL_MSG_EN_0 0xE88054
+
+#define mmTPC2_QM_GLBL_MSG_EN_1 0xE88058
+
+#define mmTPC2_QM_GLBL_MSG_EN_2 0xE8805C
+
+#define mmTPC2_QM_GLBL_MSG_EN_3 0xE88060
+
+#define mmTPC2_QM_GLBL_MSG_EN_4 0xE88068
+
+#define mmTPC2_QM_PQ_BASE_LO_0 0xE88070
+
+#define mmTPC2_QM_PQ_BASE_LO_1 0xE88074
+
+#define mmTPC2_QM_PQ_BASE_LO_2 0xE88078
+
+#define mmTPC2_QM_PQ_BASE_LO_3 0xE8807C
+
+#define mmTPC2_QM_PQ_BASE_HI_0 0xE88080
+
+#define mmTPC2_QM_PQ_BASE_HI_1 0xE88084
+
+#define mmTPC2_QM_PQ_BASE_HI_2 0xE88088
+
+#define mmTPC2_QM_PQ_BASE_HI_3 0xE8808C
+
+#define mmTPC2_QM_PQ_SIZE_0 0xE88090
+
+#define mmTPC2_QM_PQ_SIZE_1 0xE88094
+
+#define mmTPC2_QM_PQ_SIZE_2 0xE88098
+
+#define mmTPC2_QM_PQ_SIZE_3 0xE8809C
+
+#define mmTPC2_QM_PQ_PI_0 0xE880A0
+
+#define mmTPC2_QM_PQ_PI_1 0xE880A4
+
+#define mmTPC2_QM_PQ_PI_2 0xE880A8
+
+#define mmTPC2_QM_PQ_PI_3 0xE880AC
+
+#define mmTPC2_QM_PQ_CI_0 0xE880B0
+
+#define mmTPC2_QM_PQ_CI_1 0xE880B4
+
+#define mmTPC2_QM_PQ_CI_2 0xE880B8
+
+#define mmTPC2_QM_PQ_CI_3 0xE880BC
+
+#define mmTPC2_QM_PQ_CFG0_0 0xE880C0
+
+#define mmTPC2_QM_PQ_CFG0_1 0xE880C4
+
+#define mmTPC2_QM_PQ_CFG0_2 0xE880C8
+
+#define mmTPC2_QM_PQ_CFG0_3 0xE880CC
+
+#define mmTPC2_QM_PQ_CFG1_0 0xE880D0
+
+#define mmTPC2_QM_PQ_CFG1_1 0xE880D4
+
+#define mmTPC2_QM_PQ_CFG1_2 0xE880D8
+
+#define mmTPC2_QM_PQ_CFG1_3 0xE880DC
+
+#define mmTPC2_QM_PQ_ARUSER_31_11_0 0xE880E0
+
+#define mmTPC2_QM_PQ_ARUSER_31_11_1 0xE880E4
+
+#define mmTPC2_QM_PQ_ARUSER_31_11_2 0xE880E8
+
+#define mmTPC2_QM_PQ_ARUSER_31_11_3 0xE880EC
+
+#define mmTPC2_QM_PQ_STS0_0 0xE880F0
+
+#define mmTPC2_QM_PQ_STS0_1 0xE880F4
+
+#define mmTPC2_QM_PQ_STS0_2 0xE880F8
+
+#define mmTPC2_QM_PQ_STS0_3 0xE880FC
+
+#define mmTPC2_QM_PQ_STS1_0 0xE88100
+
+#define mmTPC2_QM_PQ_STS1_1 0xE88104
+
+#define mmTPC2_QM_PQ_STS1_2 0xE88108
+
+#define mmTPC2_QM_PQ_STS1_3 0xE8810C
+
+#define mmTPC2_QM_CQ_CFG0_0 0xE88110
+
+#define mmTPC2_QM_CQ_CFG0_1 0xE88114
+
+#define mmTPC2_QM_CQ_CFG0_2 0xE88118
+
+#define mmTPC2_QM_CQ_CFG0_3 0xE8811C
+
+#define mmTPC2_QM_CQ_CFG0_4 0xE88120
+
+#define mmTPC2_QM_CQ_CFG1_0 0xE88124
+
+#define mmTPC2_QM_CQ_CFG1_1 0xE88128
+
+#define mmTPC2_QM_CQ_CFG1_2 0xE8812C
+
+#define mmTPC2_QM_CQ_CFG1_3 0xE88130
+
+#define mmTPC2_QM_CQ_CFG1_4 0xE88134
+
+#define mmTPC2_QM_CQ_ARUSER_31_11_0 0xE88138
+
+#define mmTPC2_QM_CQ_ARUSER_31_11_1 0xE8813C
+
+#define mmTPC2_QM_CQ_ARUSER_31_11_2 0xE88140
+
+#define mmTPC2_QM_CQ_ARUSER_31_11_3 0xE88144
+
+#define mmTPC2_QM_CQ_ARUSER_31_11_4 0xE88148
+
+#define mmTPC2_QM_CQ_STS0_0 0xE8814C
+
+#define mmTPC2_QM_CQ_STS0_1 0xE88150
+
+#define mmTPC2_QM_CQ_STS0_2 0xE88154
+
+#define mmTPC2_QM_CQ_STS0_3 0xE88158
+
+#define mmTPC2_QM_CQ_STS0_4 0xE8815C
+
+#define mmTPC2_QM_CQ_STS1_0 0xE88160
+
+#define mmTPC2_QM_CQ_STS1_1 0xE88164
+
+#define mmTPC2_QM_CQ_STS1_2 0xE88168
+
+#define mmTPC2_QM_CQ_STS1_3 0xE8816C
+
+#define mmTPC2_QM_CQ_STS1_4 0xE88170
+
+#define mmTPC2_QM_CQ_PTR_LO_0 0xE88174
+
+#define mmTPC2_QM_CQ_PTR_HI_0 0xE88178
+
+#define mmTPC2_QM_CQ_TSIZE_0 0xE8817C
+
+#define mmTPC2_QM_CQ_CTL_0 0xE88180
+
+#define mmTPC2_QM_CQ_PTR_LO_1 0xE88184
+
+#define mmTPC2_QM_CQ_PTR_HI_1 0xE88188
+
+#define mmTPC2_QM_CQ_TSIZE_1 0xE8818C
+
+#define mmTPC2_QM_CQ_CTL_1 0xE88190
+
+#define mmTPC2_QM_CQ_PTR_LO_2 0xE88194
+
+#define mmTPC2_QM_CQ_PTR_HI_2 0xE88198
+
+#define mmTPC2_QM_CQ_TSIZE_2 0xE8819C
+
+#define mmTPC2_QM_CQ_CTL_2 0xE881A0
+
+#define mmTPC2_QM_CQ_PTR_LO_3 0xE881A4
+
+#define mmTPC2_QM_CQ_PTR_HI_3 0xE881A8
+
+#define mmTPC2_QM_CQ_TSIZE_3 0xE881AC
+
+#define mmTPC2_QM_CQ_CTL_3 0xE881B0
+
+#define mmTPC2_QM_CQ_PTR_LO_4 0xE881B4
+
+#define mmTPC2_QM_CQ_PTR_HI_4 0xE881B8
+
+#define mmTPC2_QM_CQ_TSIZE_4 0xE881BC
+
+#define mmTPC2_QM_CQ_CTL_4 0xE881C0
+
+#define mmTPC2_QM_CQ_PTR_LO_STS_0 0xE881C4
+
+#define mmTPC2_QM_CQ_PTR_LO_STS_1 0xE881C8
+
+#define mmTPC2_QM_CQ_PTR_LO_STS_2 0xE881CC
+
+#define mmTPC2_QM_CQ_PTR_LO_STS_3 0xE881D0
+
+#define mmTPC2_QM_CQ_PTR_LO_STS_4 0xE881D4
+
+#define mmTPC2_QM_CQ_PTR_HI_STS_0 0xE881D8
+
+#define mmTPC2_QM_CQ_PTR_HI_STS_1 0xE881DC
+
+#define mmTPC2_QM_CQ_PTR_HI_STS_2 0xE881E0
+
+#define mmTPC2_QM_CQ_PTR_HI_STS_3 0xE881E4
+
+#define mmTPC2_QM_CQ_PTR_HI_STS_4 0xE881E8
+
+#define mmTPC2_QM_CQ_TSIZE_STS_0 0xE881EC
+
+#define mmTPC2_QM_CQ_TSIZE_STS_1 0xE881F0
+
+#define mmTPC2_QM_CQ_TSIZE_STS_2 0xE881F4
+
+#define mmTPC2_QM_CQ_TSIZE_STS_3 0xE881F8
+
+#define mmTPC2_QM_CQ_TSIZE_STS_4 0xE881FC
+
+#define mmTPC2_QM_CQ_CTL_STS_0 0xE88200
+
+#define mmTPC2_QM_CQ_CTL_STS_1 0xE88204
+
+#define mmTPC2_QM_CQ_CTL_STS_2 0xE88208
+
+#define mmTPC2_QM_CQ_CTL_STS_3 0xE8820C
+
+#define mmTPC2_QM_CQ_CTL_STS_4 0xE88210
+
+#define mmTPC2_QM_CQ_IFIFO_CNT_0 0xE88214
+
+#define mmTPC2_QM_CQ_IFIFO_CNT_1 0xE88218
+
+#define mmTPC2_QM_CQ_IFIFO_CNT_2 0xE8821C
+
+#define mmTPC2_QM_CQ_IFIFO_CNT_3 0xE88220
+
+#define mmTPC2_QM_CQ_IFIFO_CNT_4 0xE88224
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_0 0xE88228
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_1 0xE8822C
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_2 0xE88230
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_3 0xE88234
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO_4 0xE88238
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_0 0xE8823C
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_1 0xE88240
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_2 0xE88244
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_3 0xE88248
+
+#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI_4 0xE8824C
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_0 0xE88250
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_1 0xE88254
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_2 0xE88258
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_3 0xE8825C
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO_4 0xE88260
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_0 0xE88264
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_1 0xE88268
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_2 0xE8826C
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_3 0xE88270
+
+#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI_4 0xE88274
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_0 0xE88278
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_1 0xE8827C
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_2 0xE88280
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_3 0xE88284
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO_4 0xE88288
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_0 0xE8828C
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_1 0xE88290
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_2 0xE88294
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_3 0xE88298
+
+#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI_4 0xE8829C
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_0 0xE882A0
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_1 0xE882A4
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_2 0xE882A8
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_3 0xE882AC
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO_4 0xE882B0
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_0 0xE882B4
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_1 0xE882B8
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_2 0xE882BC
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_3 0xE882C0
+
+#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI_4 0xE882C4
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_0 0xE882C8
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_1 0xE882CC
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_2 0xE882D0
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_3 0xE882D4
+
+#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET_4 0xE882D8
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xE882E0
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xE882E4
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xE882E8
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xE882EC
+
+#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xE882F0
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xE882F4
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xE882F8
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xE882FC
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xE88300
+
+#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xE88304
+
+#define mmTPC2_QM_CP_FENCE0_RDATA_0 0xE88308
+
+#define mmTPC2_QM_CP_FENCE0_RDATA_1 0xE8830C
+
+#define mmTPC2_QM_CP_FENCE0_RDATA_2 0xE88310
+
+#define mmTPC2_QM_CP_FENCE0_RDATA_3 0xE88314
+
+#define mmTPC2_QM_CP_FENCE0_RDATA_4 0xE88318
+
+#define mmTPC2_QM_CP_FENCE1_RDATA_0 0xE8831C
+
+#define mmTPC2_QM_CP_FENCE1_RDATA_1 0xE88320
+
+#define mmTPC2_QM_CP_FENCE1_RDATA_2 0xE88324
+
+#define mmTPC2_QM_CP_FENCE1_RDATA_3 0xE88328
+
+#define mmTPC2_QM_CP_FENCE1_RDATA_4 0xE8832C
+
+#define mmTPC2_QM_CP_FENCE2_RDATA_0 0xE88330
+
+#define mmTPC2_QM_CP_FENCE2_RDATA_1 0xE88334
+
+#define mmTPC2_QM_CP_FENCE2_RDATA_2 0xE88338
+
+#define mmTPC2_QM_CP_FENCE2_RDATA_3 0xE8833C
+
+#define mmTPC2_QM_CP_FENCE2_RDATA_4 0xE88340
+
+#define mmTPC2_QM_CP_FENCE3_RDATA_0 0xE88344
+
+#define mmTPC2_QM_CP_FENCE3_RDATA_1 0xE88348
+
+#define mmTPC2_QM_CP_FENCE3_RDATA_2 0xE8834C
+
+#define mmTPC2_QM_CP_FENCE3_RDATA_3 0xE88350
+
+#define mmTPC2_QM_CP_FENCE3_RDATA_4 0xE88354
+
+#define mmTPC2_QM_CP_FENCE0_CNT_0 0xE88358
+
+#define mmTPC2_QM_CP_FENCE0_CNT_1 0xE8835C
+
+#define mmTPC2_QM_CP_FENCE0_CNT_2 0xE88360
+
+#define mmTPC2_QM_CP_FENCE0_CNT_3 0xE88364
+
+#define mmTPC2_QM_CP_FENCE0_CNT_4 0xE88368
+
+#define mmTPC2_QM_CP_FENCE1_CNT_0 0xE8836C
+
+#define mmTPC2_QM_CP_FENCE1_CNT_1 0xE88370
+
+#define mmTPC2_QM_CP_FENCE1_CNT_2 0xE88374
+
+#define mmTPC2_QM_CP_FENCE1_CNT_3 0xE88378
+
+#define mmTPC2_QM_CP_FENCE1_CNT_4 0xE8837C
+
+#define mmTPC2_QM_CP_FENCE2_CNT_0 0xE88380
+
+#define mmTPC2_QM_CP_FENCE2_CNT_1 0xE88384
+
+#define mmTPC2_QM_CP_FENCE2_CNT_2 0xE88388
+
+#define mmTPC2_QM_CP_FENCE2_CNT_3 0xE8838C
+
+#define mmTPC2_QM_CP_FENCE2_CNT_4 0xE88390
+
+#define mmTPC2_QM_CP_FENCE3_CNT_0 0xE88394
+
+#define mmTPC2_QM_CP_FENCE3_CNT_1 0xE88398
+
+#define mmTPC2_QM_CP_FENCE3_CNT_2 0xE8839C
+
+#define mmTPC2_QM_CP_FENCE3_CNT_3 0xE883A0
+
+#define mmTPC2_QM_CP_FENCE3_CNT_4 0xE883A4
+
+#define mmTPC2_QM_CP_STS_0 0xE883A8
+
+#define mmTPC2_QM_CP_STS_1 0xE883AC
+
+#define mmTPC2_QM_CP_STS_2 0xE883B0
+
+#define mmTPC2_QM_CP_STS_3 0xE883B4
+
+#define mmTPC2_QM_CP_STS_4 0xE883B8
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO_0 0xE883BC
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO_1 0xE883C0
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO_2 0xE883C4
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO_3 0xE883C8
+
+#define mmTPC2_QM_CP_CURRENT_INST_LO_4 0xE883CC
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI_0 0xE883D0
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI_1 0xE883D4
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI_2 0xE883D8
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI_3 0xE883DC
+
+#define mmTPC2_QM_CP_CURRENT_INST_HI_4 0xE883E0
+
+#define mmTPC2_QM_CP_BARRIER_CFG_0 0xE883F4
+
+#define mmTPC2_QM_CP_BARRIER_CFG_1 0xE883F8
+
+#define mmTPC2_QM_CP_BARRIER_CFG_2 0xE883FC
+
+#define mmTPC2_QM_CP_BARRIER_CFG_3 0xE88400
+
+#define mmTPC2_QM_CP_BARRIER_CFG_4 0xE88404
+
+#define mmTPC2_QM_CP_DBG_0_0 0xE88408
+
+#define mmTPC2_QM_CP_DBG_0_1 0xE8840C
+
+#define mmTPC2_QM_CP_DBG_0_2 0xE88410
+
+#define mmTPC2_QM_CP_DBG_0_3 0xE88414
+
+#define mmTPC2_QM_CP_DBG_0_4 0xE88418
+
+#define mmTPC2_QM_CP_ARUSER_31_11_0 0xE8841C
+
+#define mmTPC2_QM_CP_ARUSER_31_11_1 0xE88420
+
+#define mmTPC2_QM_CP_ARUSER_31_11_2 0xE88424
+
+#define mmTPC2_QM_CP_ARUSER_31_11_3 0xE88428
+
+#define mmTPC2_QM_CP_ARUSER_31_11_4 0xE8842C
+
+#define mmTPC2_QM_CP_AWUSER_31_11_0 0xE88430
+
+#define mmTPC2_QM_CP_AWUSER_31_11_1 0xE88434
+
+#define mmTPC2_QM_CP_AWUSER_31_11_2 0xE88438
+
+#define mmTPC2_QM_CP_AWUSER_31_11_3 0xE8843C
+
+#define mmTPC2_QM_CP_AWUSER_31_11_4 0xE88440
+
+#define mmTPC2_QM_ARB_CFG_0 0xE88A00
+
+#define mmTPC2_QM_ARB_CHOISE_Q_PUSH 0xE88A04
+
+#define mmTPC2_QM_ARB_WRR_WEIGHT_0 0xE88A08
+
+#define mmTPC2_QM_ARB_WRR_WEIGHT_1 0xE88A0C
+
+#define mmTPC2_QM_ARB_WRR_WEIGHT_2 0xE88A10
+
+#define mmTPC2_QM_ARB_WRR_WEIGHT_3 0xE88A14
+
+#define mmTPC2_QM_ARB_CFG_1 0xE88A18
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_0 0xE88A20
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_1 0xE88A24
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_2 0xE88A28
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_3 0xE88A2C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_4 0xE88A30
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_5 0xE88A34
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_6 0xE88A38
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_7 0xE88A3C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_8 0xE88A40
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_9 0xE88A44
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_10 0xE88A48
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_11 0xE88A4C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_12 0xE88A50
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_13 0xE88A54
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_14 0xE88A58
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_15 0xE88A5C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_16 0xE88A60
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_17 0xE88A64
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_18 0xE88A68
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_19 0xE88A6C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_20 0xE88A70
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_21 0xE88A74
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_22 0xE88A78
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_23 0xE88A7C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_24 0xE88A80
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_25 0xE88A84
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_26 0xE88A88
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_27 0xE88A8C
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_28 0xE88A90
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_29 0xE88A94
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_30 0xE88A98
+
+#define mmTPC2_QM_ARB_MST_AVAIL_CRED_31 0xE88A9C
+
+#define mmTPC2_QM_ARB_MST_CRED_INC 0xE88AA0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xE88AA4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xE88AA8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xE88AAC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xE88AB0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xE88AB4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xE88AB8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xE88ABC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xE88AC0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xE88AC4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xE88AC8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xE88ACC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xE88AD0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xE88AD4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xE88AD8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xE88ADC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xE88AE0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xE88AE4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xE88AE8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xE88AEC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xE88AF0
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xE88AF4
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xE88AF8
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xE88AFC
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xE88B00
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xE88B04
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xE88B08
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xE88B0C
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xE88B10
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xE88B14
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xE88B18
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xE88B1C
+
+#define mmTPC2_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xE88B20
+
+#define mmTPC2_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xE88B28
+
+#define mmTPC2_QM_ARB_MST_SLAVE_EN 0xE88B2C
+
+#define mmTPC2_QM_ARB_MST_QUIET_PER 0xE88B34
+
+#define mmTPC2_QM_ARB_SLV_CHOISE_WDT 0xE88B38
+
+#define mmTPC2_QM_ARB_SLV_ID 0xE88B3C
+
+#define mmTPC2_QM_ARB_MSG_MAX_INFLIGHT 0xE88B44
+
+#define mmTPC2_QM_ARB_MSG_AWUSER_31_11 0xE88B48
+
+#define mmTPC2_QM_ARB_MSG_AWUSER_SEC_PROP 0xE88B4C
+
+#define mmTPC2_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xE88B50
+
+#define mmTPC2_QM_ARB_BASE_LO 0xE88B54
+
+#define mmTPC2_QM_ARB_BASE_HI 0xE88B58
+
+#define mmTPC2_QM_ARB_STATE_STS 0xE88B80
+
+#define mmTPC2_QM_ARB_CHOISE_FULLNESS_STS 0xE88B84
+
+#define mmTPC2_QM_ARB_MSG_STS 0xE88B88
+
+#define mmTPC2_QM_ARB_SLV_CHOISE_Q_HEAD 0xE88B8C
+
+#define mmTPC2_QM_ARB_ERR_CAUSE 0xE88B9C
+
+#define mmTPC2_QM_ARB_ERR_MSG_EN 0xE88BA0
+
+#define mmTPC2_QM_ARB_ERR_STS_DRP 0xE88BA8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_0 0xE88BB0
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_1 0xE88BB4
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_2 0xE88BB8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_3 0xE88BBC
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_4 0xE88BC0
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_5 0xE88BC4
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_6 0xE88BC8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_7 0xE88BCC
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_8 0xE88BD0
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_9 0xE88BD4
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_10 0xE88BD8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_11 0xE88BDC
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_12 0xE88BE0
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_13 0xE88BE4
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_14 0xE88BE8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_15 0xE88BEC
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_16 0xE88BF0
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_17 0xE88BF4
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_18 0xE88BF8
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_19 0xE88BFC
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_20 0xE88C00
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_21 0xE88C04
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_22 0xE88C08
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_23 0xE88C0C
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_24 0xE88C10
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_25 0xE88C14
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_26 0xE88C18
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_27 0xE88C1C
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_28 0xE88C20
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_29 0xE88C24
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_30 0xE88C28
+
+#define mmTPC2_QM_ARB_MST_CRED_STS_31 0xE88C2C
+
+#define mmTPC2_QM_CGM_CFG 0xE88C70
+
+#define mmTPC2_QM_CGM_STS 0xE88C74
+
+#define mmTPC2_QM_CGM_CFG1 0xE88C78
+
+#define mmTPC2_QM_LOCAL_RANGE_BASE 0xE88C80
+
+#define mmTPC2_QM_LOCAL_RANGE_SIZE 0xE88C84
+
+#define mmTPC2_QM_CSMR_STRICT_PRIO_CFG 0xE88C90
+
+#define mmTPC2_QM_HBW_RD_RATE_LIM_CFG_1 0xE88C94
+
+#define mmTPC2_QM_LBW_WR_RATE_LIM_CFG_0 0xE88C98
+
+#define mmTPC2_QM_LBW_WR_RATE_LIM_CFG_1 0xE88C9C
+
+#define mmTPC2_QM_HBW_RD_RATE_LIM_CFG_0 0xE88CA0
+
+#define mmTPC2_QM_GLBL_AXCACHE 0xE88CA4
+
+#define mmTPC2_QM_IND_GW_APB_CFG 0xE88CB0
+
+#define mmTPC2_QM_IND_GW_APB_WDATA 0xE88CB4
+
+#define mmTPC2_QM_IND_GW_APB_RDATA 0xE88CB8
+
+#define mmTPC2_QM_IND_GW_APB_STATUS 0xE88CBC
+
+#define mmTPC2_QM_GLBL_ERR_ADDR_LO 0xE88CD0
+
+#define mmTPC2_QM_GLBL_ERR_ADDR_HI 0xE88CD4
+
+#define mmTPC2_QM_GLBL_ERR_WDATA 0xE88CD8
+
+#define mmTPC2_QM_GLBL_MEM_INIT_BUSY 0xE88D00
+
+#endif /* ASIC_REG_TPC2_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h
new file mode 100644
index 000000000000..6d42469659f1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_CFG_REGS_H_
+#define ASIC_REG_TPC3_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xEC6400
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xEC6404
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xEC6408
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xEC640C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xEC6410
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xEC6414
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xEC6418
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xEC641C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xEC6420
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xEC6424
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xEC6428
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xEC642C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xEC6430
+
+#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xEC6434
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xEC6438
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xEC643C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xEC6440
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xEC6444
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xEC6448
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xEC644C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xEC6450
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xEC6454
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xEC6458
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xEC645C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xEC6460
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xEC6464
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xEC6468
+
+#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xEC646C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xEC6470
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xEC6474
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xEC6478
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xEC647C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xEC6480
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xEC6484
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xEC6488
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xEC648C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xEC6490
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xEC6494
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xEC6498
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xEC649C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xEC64A0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xEC64A4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xEC64A8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xEC64AC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xEC64B0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xEC64B4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xEC64B8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xEC64BC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xEC64C0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xEC64C4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xEC64C8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xEC64CC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xEC64D0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xEC64D4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xEC64D8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xEC64DC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xEC64E0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xEC64E4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xEC64E8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xEC64EC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xEC64F0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xEC64F4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xEC64F8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xEC64FC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xEC6500
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xEC6504
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xEC6508
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xEC650C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xEC6510
+
+#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xEC6514
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xEC6518
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xEC651C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xEC6520
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xEC6524
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xEC6528
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xEC652C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xEC6530
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xEC6534
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xEC6538
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xEC653C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xEC6540
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xEC6544
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xEC6548
+
+#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xEC654C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xEC6550
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xEC6554
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xEC6558
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xEC655C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xEC6560
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xEC6564
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xEC6568
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xEC656C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xEC6570
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xEC6574
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xEC6578
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xEC657C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xEC6580
+
+#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xEC6584
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xEC6588
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xEC658C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xEC6590
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xEC6594
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xEC6598
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xEC659C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xEC65A0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xEC65A4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xEC65A8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xEC65AC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xEC65B0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xEC65B4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xEC65B8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xEC65BC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xEC65C0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xEC65C4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xEC65C8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xEC65CC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xEC65D0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xEC65D4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xEC65D8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xEC65DC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xEC65E0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xEC65E4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xEC65E8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xEC65EC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xEC65F0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xEC65F4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xEC65F8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xEC65FC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xEC6600
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xEC6604
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xEC6608
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xEC660C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xEC6610
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xEC6614
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xEC6618
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xEC661C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xEC6620
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xEC6624
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xEC6628
+
+#define mmTPC3_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xEC662C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xEC6630
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xEC6634
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xEC6638
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xEC663C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xEC6640
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xEC6644
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xEC6648
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xEC664C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xEC6650
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xEC6654
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xEC6658
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xEC665C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xEC6660
+
+#define mmTPC3_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xEC6664
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xEC6668
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xEC666C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xEC6670
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xEC6674
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xEC6678
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xEC667C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xEC6680
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xEC6684
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xEC6688
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xEC668C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xEC6690
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xEC6694
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xEC6698
+
+#define mmTPC3_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xEC669C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xEC66A0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xEC66A4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xEC66A8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xEC66AC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xEC66B0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xEC66B4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xEC66B8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xEC66BC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xEC66C0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xEC66C4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xEC66C8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xEC66CC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xEC66D0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xEC66D4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xEC66D8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xEC66DC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xEC66E0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xEC66E4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xEC66E8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xEC66EC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xEC66F0
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xEC66F4
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xEC66F8
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xEC66FC
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xEC6700
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xEC6704
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xEC6708
+
+#define mmTPC3_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xEC670C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xEC6710
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xEC6714
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xEC6718
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xEC671C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xEC6720
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xEC6724
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xEC6728
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xEC672C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xEC6730
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xEC6734
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xEC6738
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xEC673C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xEC6740
+
+#define mmTPC3_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xEC6744
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xEC6748
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xEC674C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xEC6750
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xEC6754
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xEC6758
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xEC675C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xEC6760
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xEC6764
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xEC6768
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xEC676C
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xEC6770
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xEC6774
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xEC6778
+
+#define mmTPC3_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xEC677C
+
+#define mmTPC3_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xEC6780
+
+#define mmTPC3_CFG_KERNEL_SYNC_OBJECT_ADDR 0xEC6784
+
+#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xEC6788
+
+#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xEC678C
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_0 0xEC6790
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_0 0xEC6794
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_1 0xEC6798
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_1 0xEC679C
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_2 0xEC67A0
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_2 0xEC67A4
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_3 0xEC67A8
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_3 0xEC67AC
+
+#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_4 0xEC67B0
+
+#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_4 0xEC67B4
+
+#define mmTPC3_CFG_KERNEL_KERNEL_CONFIG 0xEC67B8
+
+#define mmTPC3_CFG_KERNEL_KERNEL_ID 0xEC67BC
+
+#define mmTPC3_CFG_KERNEL_SRF_0 0xEC67C0
+
+#define mmTPC3_CFG_KERNEL_SRF_1 0xEC67C4
+
+#define mmTPC3_CFG_KERNEL_SRF_2 0xEC67C8
+
+#define mmTPC3_CFG_KERNEL_SRF_3 0xEC67CC
+
+#define mmTPC3_CFG_KERNEL_SRF_4 0xEC67D0
+
+#define mmTPC3_CFG_KERNEL_SRF_5 0xEC67D4
+
+#define mmTPC3_CFG_KERNEL_SRF_6 0xEC67D8
+
+#define mmTPC3_CFG_KERNEL_SRF_7 0xEC67DC
+
+#define mmTPC3_CFG_KERNEL_SRF_8 0xEC67E0
+
+#define mmTPC3_CFG_KERNEL_SRF_9 0xEC67E4
+
+#define mmTPC3_CFG_KERNEL_SRF_10 0xEC67E8
+
+#define mmTPC3_CFG_KERNEL_SRF_11 0xEC67EC
+
+#define mmTPC3_CFG_KERNEL_SRF_12 0xEC67F0
+
+#define mmTPC3_CFG_KERNEL_SRF_13 0xEC67F4
+
+#define mmTPC3_CFG_KERNEL_SRF_14 0xEC67F8
+
+#define mmTPC3_CFG_KERNEL_SRF_15 0xEC67FC
+
+#define mmTPC3_CFG_KERNEL_SRF_16 0xEC6800
+
+#define mmTPC3_CFG_KERNEL_SRF_17 0xEC6804
+
+#define mmTPC3_CFG_KERNEL_SRF_18 0xEC6808
+
+#define mmTPC3_CFG_KERNEL_SRF_19 0xEC680C
+
+#define mmTPC3_CFG_KERNEL_SRF_20 0xEC6810
+
+#define mmTPC3_CFG_KERNEL_SRF_21 0xEC6814
+
+#define mmTPC3_CFG_KERNEL_SRF_22 0xEC6818
+
+#define mmTPC3_CFG_KERNEL_SRF_23 0xEC681C
+
+#define mmTPC3_CFG_KERNEL_SRF_24 0xEC6820
+
+#define mmTPC3_CFG_KERNEL_SRF_25 0xEC6824
+
+#define mmTPC3_CFG_KERNEL_SRF_26 0xEC6828
+
+#define mmTPC3_CFG_KERNEL_SRF_27 0xEC682C
+
+#define mmTPC3_CFG_KERNEL_SRF_28 0xEC6830
+
+#define mmTPC3_CFG_KERNEL_SRF_29 0xEC6834
+
+#define mmTPC3_CFG_KERNEL_SRF_30 0xEC6838
+
+#define mmTPC3_CFG_KERNEL_SRF_31 0xEC683C
+
+#define mmTPC3_CFG_ROUND_CSR 0xEC68FC
+
+#define mmTPC3_CFG_PROT 0xEC6900
+
+#define mmTPC3_CFG_SEMAPHORE 0xEC6908
+
+#define mmTPC3_CFG_VFLAGS 0xEC690C
+
+#define mmTPC3_CFG_SFLAGS 0xEC6910
+
+#define mmTPC3_CFG_LFSR_POLYNOM 0xEC6918
+
+#define mmTPC3_CFG_STATUS 0xEC691C
+
+#define mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH 0xEC6920
+
+#define mmTPC3_CFG_CFG_SUBTRACT_VALUE 0xEC6924
+
+#define mmTPC3_CFG_SM_BASE_ADDRESS_HIGH 0xEC692C
+
+#define mmTPC3_CFG_TPC_CMD 0xEC6930
+
+#define mmTPC3_CFG_TPC_EXECUTE 0xEC6938
+
+#define mmTPC3_CFG_TPC_STALL 0xEC693C
+
+#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_LOW 0xEC6940
+
+#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH 0xEC6944
+
+#define mmTPC3_CFG_RD_RATE_LIMIT 0xEC6948
+
+#define mmTPC3_CFG_WR_RATE_LIMIT 0xEC6950
+
+#define mmTPC3_CFG_MSS_CONFIG 0xEC6954
+
+#define mmTPC3_CFG_TPC_INTR_CAUSE 0xEC6958
+
+#define mmTPC3_CFG_TPC_INTR_MASK 0xEC695C
+
+#define mmTPC3_CFG_WQ_CREDITS 0xEC6960
+
+#define mmTPC3_CFG_ARUSER_LO 0xEC6964
+
+#define mmTPC3_CFG_ARUSER_HI 0xEC6968
+
+#define mmTPC3_CFG_AWUSER_LO 0xEC696C
+
+#define mmTPC3_CFG_AWUSER_HI 0xEC6970
+
+#define mmTPC3_CFG_OPCODE_EXEC 0xEC6974
+
+#define mmTPC3_CFG_LUT_FUNC32_BASE_ADDR_LO 0xEC6978
+
+#define mmTPC3_CFG_LUT_FUNC32_BASE_ADDR_HI 0xEC697C
+
+#define mmTPC3_CFG_LUT_FUNC64_BASE_ADDR_LO 0xEC6980
+
+#define mmTPC3_CFG_LUT_FUNC64_BASE_ADDR_HI 0xEC6984
+
+#define mmTPC3_CFG_LUT_FUNC128_BASE_ADDR_LO 0xEC6988
+
+#define mmTPC3_CFG_LUT_FUNC128_BASE_ADDR_HI 0xEC698C
+
+#define mmTPC3_CFG_LUT_FUNC256_BASE_ADDR_LO 0xEC6990
+
+#define mmTPC3_CFG_LUT_FUNC256_BASE_ADDR_HI 0xEC6994
+
+#define mmTPC3_CFG_TSB_CFG_MAX_SIZE 0xEC6998
+
+#define mmTPC3_CFG_TSB_CFG 0xEC699C
+
+#define mmTPC3_CFG_DBGMEM_ADD 0xEC69A0
+
+#define mmTPC3_CFG_DBGMEM_DATA_WR 0xEC69A4
+
+#define mmTPC3_CFG_DBGMEM_DATA_RD 0xEC69A8
+
+#define mmTPC3_CFG_DBGMEM_CTRL 0xEC69AC
+
+#define mmTPC3_CFG_DBGMEM_RC 0xEC69B0
+
+#define mmTPC3_CFG_TSB_INFLIGHT_CNTR 0xEC69B4
+
+#define mmTPC3_CFG_WQ_INFLIGHT_CNTR 0xEC69B8
+
+#define mmTPC3_CFG_WQ_LBW_TOTAL_CNTR 0xEC69BC
+
+#define mmTPC3_CFG_WQ_HBW_TOTAL_CNTR 0xEC69C0
+
+#define mmTPC3_CFG_IRQ_OCCOUPY_CNTR 0xEC69C4
+
+#define mmTPC3_CFG_FUNC_MBIST_CNTRL 0xEC69D0
+
+#define mmTPC3_CFG_FUNC_MBIST_PAT 0xEC69D4
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_0 0xEC69D8
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_1 0xEC69DC
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_2 0xEC69E0
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_3 0xEC69E4
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_4 0xEC69E8
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_5 0xEC69EC
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_6 0xEC69F0
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_7 0xEC69F4
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_8 0xEC69F8
+
+#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC69FC
+
+#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xEC6A00
+
+#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xEC6A04
+
+#define mmTPC3_CFG_QM_TENSOR_0_PADDING_VALUE 0xEC6A08
+
+#define mmTPC3_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xEC6A0C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_SIZE 0xEC6A10
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xEC6A14
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_SIZE 0xEC6A18
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xEC6A1C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_SIZE 0xEC6A20
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xEC6A24
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_SIZE 0xEC6A28
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xEC6A2C
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_SIZE 0xEC6A30
+
+#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xEC6A34
+
+#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xEC6A38
+
+#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xEC6A3C
+
+#define mmTPC3_CFG_QM_TENSOR_1_PADDING_VALUE 0xEC6A40
+
+#define mmTPC3_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xEC6A44
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_SIZE 0xEC6A48
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xEC6A4C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_SIZE 0xEC6A50
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xEC6A54
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_SIZE 0xEC6A58
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xEC6A5C
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_SIZE 0xEC6A60
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xEC6A64
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_SIZE 0xEC6A68
+
+#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xEC6A6C
+
+#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xEC6A70
+
+#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xEC6A74
+
+#define mmTPC3_CFG_QM_TENSOR_2_PADDING_VALUE 0xEC6A78
+
+#define mmTPC3_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xEC6A7C
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_SIZE 0xEC6A80
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xEC6A84
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_SIZE 0xEC6A88
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xEC6A8C
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_SIZE 0xEC6A90
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xEC6A94
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_SIZE 0xEC6A98
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xEC6A9C
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_SIZE 0xEC6AA0
+
+#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xEC6AA4
+
+#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xEC6AA8
+
+#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xEC6AAC
+
+#define mmTPC3_CFG_QM_TENSOR_3_PADDING_VALUE 0xEC6AB0
+
+#define mmTPC3_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xEC6AB4
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_SIZE 0xEC6AB8
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xEC6ABC
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_SIZE 0xEC6AC0
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xEC6AC4
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_SIZE 0xEC6AC8
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xEC6ACC
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_SIZE 0xEC6AD0
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xEC6AD4
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_SIZE 0xEC6AD8
+
+#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xEC6ADC
+
+#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xEC6AE0
+
+#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xEC6AE4
+
+#define mmTPC3_CFG_QM_TENSOR_4_PADDING_VALUE 0xEC6AE8
+
+#define mmTPC3_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xEC6AEC
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_SIZE 0xEC6AF0
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xEC6AF4
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_SIZE 0xEC6AF8
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xEC6AFC
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_SIZE 0xEC6B00
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xEC6B04
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_SIZE 0xEC6B08
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xEC6B0C
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_SIZE 0xEC6B10
+
+#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xEC6B14
+
+#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xEC6B18
+
+#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xEC6B1C
+
+#define mmTPC3_CFG_QM_TENSOR_5_PADDING_VALUE 0xEC6B20
+
+#define mmTPC3_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xEC6B24
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_SIZE 0xEC6B28
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xEC6B2C
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_SIZE 0xEC6B30
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xEC6B34
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_SIZE 0xEC6B38
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xEC6B3C
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_SIZE 0xEC6B40
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xEC6B44
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_SIZE 0xEC6B48
+
+#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xEC6B4C
+
+#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xEC6B50
+
+#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xEC6B54
+
+#define mmTPC3_CFG_QM_TENSOR_6_PADDING_VALUE 0xEC6B58
+
+#define mmTPC3_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xEC6B5C
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_SIZE 0xEC6B60
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xEC6B64
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_SIZE 0xEC6B68
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xEC6B6C
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_SIZE 0xEC6B70
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xEC6B74
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_SIZE 0xEC6B78
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xEC6B7C
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_SIZE 0xEC6B80
+
+#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xEC6B84
+
+#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xEC6B88
+
+#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xEC6B8C
+
+#define mmTPC3_CFG_QM_TENSOR_7_PADDING_VALUE 0xEC6B90
+
+#define mmTPC3_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xEC6B94
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_SIZE 0xEC6B98
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xEC6B9C
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_SIZE 0xEC6BA0
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xEC6BA4
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_SIZE 0xEC6BA8
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xEC6BAC
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_SIZE 0xEC6BB0
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xEC6BB4
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_SIZE 0xEC6BB8
+
+#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xEC6BBC
+
+#define mmTPC3_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xEC6BC0
+
+#define mmTPC3_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xEC6BC4
+
+#define mmTPC3_CFG_QM_TENSOR_8_PADDING_VALUE 0xEC6BC8
+
+#define mmTPC3_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xEC6BCC
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_0_SIZE 0xEC6BD0
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xEC6BD4
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_1_SIZE 0xEC6BD8
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xEC6BDC
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_2_SIZE 0xEC6BE0
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xEC6BE4
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_3_SIZE 0xEC6BE8
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xEC6BEC
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_4_SIZE 0xEC6BF0
+
+#define mmTPC3_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xEC6BF4
+
+#define mmTPC3_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xEC6BF8
+
+#define mmTPC3_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xEC6BFC
+
+#define mmTPC3_CFG_QM_TENSOR_9_PADDING_VALUE 0xEC6C00
+
+#define mmTPC3_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xEC6C04
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_0_SIZE 0xEC6C08
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xEC6C0C
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_1_SIZE 0xEC6C10
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xEC6C14
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_2_SIZE 0xEC6C18
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xEC6C1C
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_3_SIZE 0xEC6C20
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xEC6C24
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_4_SIZE 0xEC6C28
+
+#define mmTPC3_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xEC6C2C
+
+#define mmTPC3_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xEC6C30
+
+#define mmTPC3_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xEC6C34
+
+#define mmTPC3_CFG_QM_TENSOR_10_PADDING_VALUE 0xEC6C38
+
+#define mmTPC3_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xEC6C3C
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_0_SIZE 0xEC6C40
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xEC6C44
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_1_SIZE 0xEC6C48
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xEC6C4C
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_2_SIZE 0xEC6C50
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xEC6C54
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_3_SIZE 0xEC6C58
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xEC6C5C
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_4_SIZE 0xEC6C60
+
+#define mmTPC3_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xEC6C64
+
+#define mmTPC3_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xEC6C68
+
+#define mmTPC3_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xEC6C6C
+
+#define mmTPC3_CFG_QM_TENSOR_11_PADDING_VALUE 0xEC6C70
+
+#define mmTPC3_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xEC6C74
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_0_SIZE 0xEC6C78
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xEC6C7C
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_1_SIZE 0xEC6C80
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xEC6C84
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_2_SIZE 0xEC6C88
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xEC6C8C
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_3_SIZE 0xEC6C90
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xEC6C94
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_4_SIZE 0xEC6C98
+
+#define mmTPC3_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xEC6C9C
+
+#define mmTPC3_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xEC6CA0
+
+#define mmTPC3_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xEC6CA4
+
+#define mmTPC3_CFG_QM_TENSOR_12_PADDING_VALUE 0xEC6CA8
+
+#define mmTPC3_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xEC6CAC
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_0_SIZE 0xEC6CB0
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xEC6CB4
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_1_SIZE 0xEC6CB8
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xEC6CBC
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_2_SIZE 0xEC6CC0
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xEC6CC4
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_3_SIZE 0xEC6CC8
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xEC6CCC
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_4_SIZE 0xEC6CD0
+
+#define mmTPC3_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xEC6CD4
+
+#define mmTPC3_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xEC6CD8
+
+#define mmTPC3_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xEC6CDC
+
+#define mmTPC3_CFG_QM_TENSOR_13_PADDING_VALUE 0xEC6CE0
+
+#define mmTPC3_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xEC6CE4
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_0_SIZE 0xEC6CE8
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xEC6CEC
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_1_SIZE 0xEC6CF0
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xEC6CF4
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_2_SIZE 0xEC6CF8
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xEC6CFC
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_3_SIZE 0xEC6D00
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xEC6D04
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_4_SIZE 0xEC6D08
+
+#define mmTPC3_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xEC6D0C
+
+#define mmTPC3_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xEC6D10
+
+#define mmTPC3_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xEC6D14
+
+#define mmTPC3_CFG_QM_TENSOR_14_PADDING_VALUE 0xEC6D18
+
+#define mmTPC3_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xEC6D1C
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_0_SIZE 0xEC6D20
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xEC6D24
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_1_SIZE 0xEC6D28
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xEC6D2C
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_2_SIZE 0xEC6D30
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xEC6D34
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_3_SIZE 0xEC6D38
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xEC6D3C
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_4_SIZE 0xEC6D40
+
+#define mmTPC3_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xEC6D44
+
+#define mmTPC3_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xEC6D48
+
+#define mmTPC3_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xEC6D4C
+
+#define mmTPC3_CFG_QM_TENSOR_15_PADDING_VALUE 0xEC6D50
+
+#define mmTPC3_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xEC6D54
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_0_SIZE 0xEC6D58
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xEC6D5C
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_1_SIZE 0xEC6D60
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xEC6D64
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_2_SIZE 0xEC6D68
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xEC6D6C
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_3_SIZE 0xEC6D70
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xEC6D74
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_4_SIZE 0xEC6D78
+
+#define mmTPC3_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xEC6D7C
+
+#define mmTPC3_CFG_QM_SYNC_OBJECT_MESSAGE 0xEC6D80
+
+#define mmTPC3_CFG_QM_SYNC_OBJECT_ADDR 0xEC6D84
+
+#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xEC6D88
+
+#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xEC6D8C
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_0 0xEC6D90
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_0 0xEC6D94
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_1 0xEC6D98
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_1 0xEC6D9C
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_2 0xEC6DA0
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_2 0xEC6DA4
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_3 0xEC6DA8
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_3 0xEC6DAC
+
+#define mmTPC3_CFG_QM_TID_BASE_DIM_4 0xEC6DB0
+
+#define mmTPC3_CFG_QM_TID_SIZE_DIM_4 0xEC6DB4
+
+#define mmTPC3_CFG_QM_KERNEL_CONFIG 0xEC6DB8
+
+#define mmTPC3_CFG_QM_KERNEL_ID 0xEC6DBC
+
+#define mmTPC3_CFG_QM_SRF_0 0xEC6DC0
+
+#define mmTPC3_CFG_QM_SRF_1 0xEC6DC4
+
+#define mmTPC3_CFG_QM_SRF_2 0xEC6DC8
+
+#define mmTPC3_CFG_QM_SRF_3 0xEC6DCC
+
+#define mmTPC3_CFG_QM_SRF_4 0xEC6DD0
+
+#define mmTPC3_CFG_QM_SRF_5 0xEC6DD4
+
+#define mmTPC3_CFG_QM_SRF_6 0xEC6DD8
+
+#define mmTPC3_CFG_QM_SRF_7 0xEC6DDC
+
+#define mmTPC3_CFG_QM_SRF_8 0xEC6DE0
+
+#define mmTPC3_CFG_QM_SRF_9 0xEC6DE4
+
+#define mmTPC3_CFG_QM_SRF_10 0xEC6DE8
+
+#define mmTPC3_CFG_QM_SRF_11 0xEC6DEC
+
+#define mmTPC3_CFG_QM_SRF_12 0xEC6DF0
+
+#define mmTPC3_CFG_QM_SRF_13 0xEC6DF4
+
+#define mmTPC3_CFG_QM_SRF_14 0xEC6DF8
+
+#define mmTPC3_CFG_QM_SRF_15 0xEC6DFC
+
+#define mmTPC3_CFG_QM_SRF_16 0xEC6E00
+
+#define mmTPC3_CFG_QM_SRF_17 0xEC6E04
+
+#define mmTPC3_CFG_QM_SRF_18 0xEC6E08
+
+#define mmTPC3_CFG_QM_SRF_19 0xEC6E0C
+
+#define mmTPC3_CFG_QM_SRF_20 0xEC6E10
+
+#define mmTPC3_CFG_QM_SRF_21 0xEC6E14
+
+#define mmTPC3_CFG_QM_SRF_22 0xEC6E18
+
+#define mmTPC3_CFG_QM_SRF_23 0xEC6E1C
+
+#define mmTPC3_CFG_QM_SRF_24 0xEC6E20
+
+#define mmTPC3_CFG_QM_SRF_25 0xEC6E24
+
+#define mmTPC3_CFG_QM_SRF_26 0xEC6E28
+
+#define mmTPC3_CFG_QM_SRF_27 0xEC6E2C
+
+#define mmTPC3_CFG_QM_SRF_28 0xEC6E30
+
+#define mmTPC3_CFG_QM_SRF_29 0xEC6E34
+
+#define mmTPC3_CFG_QM_SRF_30 0xEC6E38
+
+#define mmTPC3_CFG_QM_SRF_31 0xEC6E3C
+
+#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h
new file mode 100644
index 000000000000..5f2a0fd86c9e
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC3_QM_REGS_H_
+#define ASIC_REG_TPC3_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC3_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC3_QM_GLBL_CFG0 0xEC8000
+
+#define mmTPC3_QM_GLBL_CFG1 0xEC8004
+
+#define mmTPC3_QM_GLBL_PROT 0xEC8008
+
+#define mmTPC3_QM_GLBL_ERR_CFG 0xEC800C
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS_0 0xEC8010
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS_1 0xEC8014
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS_2 0xEC8018
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS_3 0xEC801C
+
+#define mmTPC3_QM_GLBL_SECURE_PROPS_4 0xEC8020
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_0 0xEC8024
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_1 0xEC8028
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_2 0xEC802C
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_3 0xEC8030
+
+#define mmTPC3_QM_GLBL_NON_SECURE_PROPS_4 0xEC8034
+
+#define mmTPC3_QM_GLBL_STS0 0xEC8038
+
+#define mmTPC3_QM_GLBL_STS1_0 0xEC8040
+
+#define mmTPC3_QM_GLBL_STS1_1 0xEC8044
+
+#define mmTPC3_QM_GLBL_STS1_2 0xEC8048
+
+#define mmTPC3_QM_GLBL_STS1_3 0xEC804C
+
+#define mmTPC3_QM_GLBL_STS1_4 0xEC8050
+
+#define mmTPC3_QM_GLBL_MSG_EN_0 0xEC8054
+
+#define mmTPC3_QM_GLBL_MSG_EN_1 0xEC8058
+
+#define mmTPC3_QM_GLBL_MSG_EN_2 0xEC805C
+
+#define mmTPC3_QM_GLBL_MSG_EN_3 0xEC8060
+
+#define mmTPC3_QM_GLBL_MSG_EN_4 0xEC8068
+
+#define mmTPC3_QM_PQ_BASE_LO_0 0xEC8070
+
+#define mmTPC3_QM_PQ_BASE_LO_1 0xEC8074
+
+#define mmTPC3_QM_PQ_BASE_LO_2 0xEC8078
+
+#define mmTPC3_QM_PQ_BASE_LO_3 0xEC807C
+
+#define mmTPC3_QM_PQ_BASE_HI_0 0xEC8080
+
+#define mmTPC3_QM_PQ_BASE_HI_1 0xEC8084
+
+#define mmTPC3_QM_PQ_BASE_HI_2 0xEC8088
+
+#define mmTPC3_QM_PQ_BASE_HI_3 0xEC808C
+
+#define mmTPC3_QM_PQ_SIZE_0 0xEC8090
+
+#define mmTPC3_QM_PQ_SIZE_1 0xEC8094
+
+#define mmTPC3_QM_PQ_SIZE_2 0xEC8098
+
+#define mmTPC3_QM_PQ_SIZE_3 0xEC809C
+
+#define mmTPC3_QM_PQ_PI_0 0xEC80A0
+
+#define mmTPC3_QM_PQ_PI_1 0xEC80A4
+
+#define mmTPC3_QM_PQ_PI_2 0xEC80A8
+
+#define mmTPC3_QM_PQ_PI_3 0xEC80AC
+
+#define mmTPC3_QM_PQ_CI_0 0xEC80B0
+
+#define mmTPC3_QM_PQ_CI_1 0xEC80B4
+
+#define mmTPC3_QM_PQ_CI_2 0xEC80B8
+
+#define mmTPC3_QM_PQ_CI_3 0xEC80BC
+
+#define mmTPC3_QM_PQ_CFG0_0 0xEC80C0
+
+#define mmTPC3_QM_PQ_CFG0_1 0xEC80C4
+
+#define mmTPC3_QM_PQ_CFG0_2 0xEC80C8
+
+#define mmTPC3_QM_PQ_CFG0_3 0xEC80CC
+
+#define mmTPC3_QM_PQ_CFG1_0 0xEC80D0
+
+#define mmTPC3_QM_PQ_CFG1_1 0xEC80D4
+
+#define mmTPC3_QM_PQ_CFG1_2 0xEC80D8
+
+#define mmTPC3_QM_PQ_CFG1_3 0xEC80DC
+
+#define mmTPC3_QM_PQ_ARUSER_31_11_0 0xEC80E0
+
+#define mmTPC3_QM_PQ_ARUSER_31_11_1 0xEC80E4
+
+#define mmTPC3_QM_PQ_ARUSER_31_11_2 0xEC80E8
+
+#define mmTPC3_QM_PQ_ARUSER_31_11_3 0xEC80EC
+
+#define mmTPC3_QM_PQ_STS0_0 0xEC80F0
+
+#define mmTPC3_QM_PQ_STS0_1 0xEC80F4
+
+#define mmTPC3_QM_PQ_STS0_2 0xEC80F8
+
+#define mmTPC3_QM_PQ_STS0_3 0xEC80FC
+
+#define mmTPC3_QM_PQ_STS1_0 0xEC8100
+
+#define mmTPC3_QM_PQ_STS1_1 0xEC8104
+
+#define mmTPC3_QM_PQ_STS1_2 0xEC8108
+
+#define mmTPC3_QM_PQ_STS1_3 0xEC810C
+
+#define mmTPC3_QM_CQ_CFG0_0 0xEC8110
+
+#define mmTPC3_QM_CQ_CFG0_1 0xEC8114
+
+#define mmTPC3_QM_CQ_CFG0_2 0xEC8118
+
+#define mmTPC3_QM_CQ_CFG0_3 0xEC811C
+
+#define mmTPC3_QM_CQ_CFG0_4 0xEC8120
+
+#define mmTPC3_QM_CQ_CFG1_0 0xEC8124
+
+#define mmTPC3_QM_CQ_CFG1_1 0xEC8128
+
+#define mmTPC3_QM_CQ_CFG1_2 0xEC812C
+
+#define mmTPC3_QM_CQ_CFG1_3 0xEC8130
+
+#define mmTPC3_QM_CQ_CFG1_4 0xEC8134
+
+#define mmTPC3_QM_CQ_ARUSER_31_11_0 0xEC8138
+
+#define mmTPC3_QM_CQ_ARUSER_31_11_1 0xEC813C
+
+#define mmTPC3_QM_CQ_ARUSER_31_11_2 0xEC8140
+
+#define mmTPC3_QM_CQ_ARUSER_31_11_3 0xEC8144
+
+#define mmTPC3_QM_CQ_ARUSER_31_11_4 0xEC8148
+
+#define mmTPC3_QM_CQ_STS0_0 0xEC814C
+
+#define mmTPC3_QM_CQ_STS0_1 0xEC8150
+
+#define mmTPC3_QM_CQ_STS0_2 0xEC8154
+
+#define mmTPC3_QM_CQ_STS0_3 0xEC8158
+
+#define mmTPC3_QM_CQ_STS0_4 0xEC815C
+
+#define mmTPC3_QM_CQ_STS1_0 0xEC8160
+
+#define mmTPC3_QM_CQ_STS1_1 0xEC8164
+
+#define mmTPC3_QM_CQ_STS1_2 0xEC8168
+
+#define mmTPC3_QM_CQ_STS1_3 0xEC816C
+
+#define mmTPC3_QM_CQ_STS1_4 0xEC8170
+
+#define mmTPC3_QM_CQ_PTR_LO_0 0xEC8174
+
+#define mmTPC3_QM_CQ_PTR_HI_0 0xEC8178
+
+#define mmTPC3_QM_CQ_TSIZE_0 0xEC817C
+
+#define mmTPC3_QM_CQ_CTL_0 0xEC8180
+
+#define mmTPC3_QM_CQ_PTR_LO_1 0xEC8184
+
+#define mmTPC3_QM_CQ_PTR_HI_1 0xEC8188
+
+#define mmTPC3_QM_CQ_TSIZE_1 0xEC818C
+
+#define mmTPC3_QM_CQ_CTL_1 0xEC8190
+
+#define mmTPC3_QM_CQ_PTR_LO_2 0xEC8194
+
+#define mmTPC3_QM_CQ_PTR_HI_2 0xEC8198
+
+#define mmTPC3_QM_CQ_TSIZE_2 0xEC819C
+
+#define mmTPC3_QM_CQ_CTL_2 0xEC81A0
+
+#define mmTPC3_QM_CQ_PTR_LO_3 0xEC81A4
+
+#define mmTPC3_QM_CQ_PTR_HI_3 0xEC81A8
+
+#define mmTPC3_QM_CQ_TSIZE_3 0xEC81AC
+
+#define mmTPC3_QM_CQ_CTL_3 0xEC81B0
+
+#define mmTPC3_QM_CQ_PTR_LO_4 0xEC81B4
+
+#define mmTPC3_QM_CQ_PTR_HI_4 0xEC81B8
+
+#define mmTPC3_QM_CQ_TSIZE_4 0xEC81BC
+
+#define mmTPC3_QM_CQ_CTL_4 0xEC81C0
+
+#define mmTPC3_QM_CQ_PTR_LO_STS_0 0xEC81C4
+
+#define mmTPC3_QM_CQ_PTR_LO_STS_1 0xEC81C8
+
+#define mmTPC3_QM_CQ_PTR_LO_STS_2 0xEC81CC
+
+#define mmTPC3_QM_CQ_PTR_LO_STS_3 0xEC81D0
+
+#define mmTPC3_QM_CQ_PTR_LO_STS_4 0xEC81D4
+
+#define mmTPC3_QM_CQ_PTR_HI_STS_0 0xEC81D8
+
+#define mmTPC3_QM_CQ_PTR_HI_STS_1 0xEC81DC
+
+#define mmTPC3_QM_CQ_PTR_HI_STS_2 0xEC81E0
+
+#define mmTPC3_QM_CQ_PTR_HI_STS_3 0xEC81E4
+
+#define mmTPC3_QM_CQ_PTR_HI_STS_4 0xEC81E8
+
+#define mmTPC3_QM_CQ_TSIZE_STS_0 0xEC81EC
+
+#define mmTPC3_QM_CQ_TSIZE_STS_1 0xEC81F0
+
+#define mmTPC3_QM_CQ_TSIZE_STS_2 0xEC81F4
+
+#define mmTPC3_QM_CQ_TSIZE_STS_3 0xEC81F8
+
+#define mmTPC3_QM_CQ_TSIZE_STS_4 0xEC81FC
+
+#define mmTPC3_QM_CQ_CTL_STS_0 0xEC8200
+
+#define mmTPC3_QM_CQ_CTL_STS_1 0xEC8204
+
+#define mmTPC3_QM_CQ_CTL_STS_2 0xEC8208
+
+#define mmTPC3_QM_CQ_CTL_STS_3 0xEC820C
+
+#define mmTPC3_QM_CQ_CTL_STS_4 0xEC8210
+
+#define mmTPC3_QM_CQ_IFIFO_CNT_0 0xEC8214
+
+#define mmTPC3_QM_CQ_IFIFO_CNT_1 0xEC8218
+
+#define mmTPC3_QM_CQ_IFIFO_CNT_2 0xEC821C
+
+#define mmTPC3_QM_CQ_IFIFO_CNT_3 0xEC8220
+
+#define mmTPC3_QM_CQ_IFIFO_CNT_4 0xEC8224
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_0 0xEC8228
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_1 0xEC822C
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_2 0xEC8230
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_3 0xEC8234
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO_4 0xEC8238
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_0 0xEC823C
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_1 0xEC8240
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_2 0xEC8244
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_3 0xEC8248
+
+#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI_4 0xEC824C
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_0 0xEC8250
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_1 0xEC8254
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_2 0xEC8258
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_3 0xEC825C
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO_4 0xEC8260
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_0 0xEC8264
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_1 0xEC8268
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_2 0xEC826C
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_3 0xEC8270
+
+#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI_4 0xEC8274
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_0 0xEC8278
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_1 0xEC827C
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_2 0xEC8280
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_3 0xEC8284
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO_4 0xEC8288
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_0 0xEC828C
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_1 0xEC8290
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_2 0xEC8294
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_3 0xEC8298
+
+#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI_4 0xEC829C
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_0 0xEC82A0
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_1 0xEC82A4
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_2 0xEC82A8
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_3 0xEC82AC
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO_4 0xEC82B0
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_0 0xEC82B4
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_1 0xEC82B8
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_2 0xEC82BC
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_3 0xEC82C0
+
+#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI_4 0xEC82C4
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_0 0xEC82C8
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_1 0xEC82CC
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_2 0xEC82D0
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_3 0xEC82D4
+
+#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET_4 0xEC82D8
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xEC82E0
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xEC82E4
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xEC82E8
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xEC82EC
+
+#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xEC82F0
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xEC82F4
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xEC82F8
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xEC82FC
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xEC8300
+
+#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xEC8304
+
+#define mmTPC3_QM_CP_FENCE0_RDATA_0 0xEC8308
+
+#define mmTPC3_QM_CP_FENCE0_RDATA_1 0xEC830C
+
+#define mmTPC3_QM_CP_FENCE0_RDATA_2 0xEC8310
+
+#define mmTPC3_QM_CP_FENCE0_RDATA_3 0xEC8314
+
+#define mmTPC3_QM_CP_FENCE0_RDATA_4 0xEC8318
+
+#define mmTPC3_QM_CP_FENCE1_RDATA_0 0xEC831C
+
+#define mmTPC3_QM_CP_FENCE1_RDATA_1 0xEC8320
+
+#define mmTPC3_QM_CP_FENCE1_RDATA_2 0xEC8324
+
+#define mmTPC3_QM_CP_FENCE1_RDATA_3 0xEC8328
+
+#define mmTPC3_QM_CP_FENCE1_RDATA_4 0xEC832C
+
+#define mmTPC3_QM_CP_FENCE2_RDATA_0 0xEC8330
+
+#define mmTPC3_QM_CP_FENCE2_RDATA_1 0xEC8334
+
+#define mmTPC3_QM_CP_FENCE2_RDATA_2 0xEC8338
+
+#define mmTPC3_QM_CP_FENCE2_RDATA_3 0xEC833C
+
+#define mmTPC3_QM_CP_FENCE2_RDATA_4 0xEC8340
+
+#define mmTPC3_QM_CP_FENCE3_RDATA_0 0xEC8344
+
+#define mmTPC3_QM_CP_FENCE3_RDATA_1 0xEC8348
+
+#define mmTPC3_QM_CP_FENCE3_RDATA_2 0xEC834C
+
+#define mmTPC3_QM_CP_FENCE3_RDATA_3 0xEC8350
+
+#define mmTPC3_QM_CP_FENCE3_RDATA_4 0xEC8354
+
+#define mmTPC3_QM_CP_FENCE0_CNT_0 0xEC8358
+
+#define mmTPC3_QM_CP_FENCE0_CNT_1 0xEC835C
+
+#define mmTPC3_QM_CP_FENCE0_CNT_2 0xEC8360
+
+#define mmTPC3_QM_CP_FENCE0_CNT_3 0xEC8364
+
+#define mmTPC3_QM_CP_FENCE0_CNT_4 0xEC8368
+
+#define mmTPC3_QM_CP_FENCE1_CNT_0 0xEC836C
+
+#define mmTPC3_QM_CP_FENCE1_CNT_1 0xEC8370
+
+#define mmTPC3_QM_CP_FENCE1_CNT_2 0xEC8374
+
+#define mmTPC3_QM_CP_FENCE1_CNT_3 0xEC8378
+
+#define mmTPC3_QM_CP_FENCE1_CNT_4 0xEC837C
+
+#define mmTPC3_QM_CP_FENCE2_CNT_0 0xEC8380
+
+#define mmTPC3_QM_CP_FENCE2_CNT_1 0xEC8384
+
+#define mmTPC3_QM_CP_FENCE2_CNT_2 0xEC8388
+
+#define mmTPC3_QM_CP_FENCE2_CNT_3 0xEC838C
+
+#define mmTPC3_QM_CP_FENCE2_CNT_4 0xEC8390
+
+#define mmTPC3_QM_CP_FENCE3_CNT_0 0xEC8394
+
+#define mmTPC3_QM_CP_FENCE3_CNT_1 0xEC8398
+
+#define mmTPC3_QM_CP_FENCE3_CNT_2 0xEC839C
+
+#define mmTPC3_QM_CP_FENCE3_CNT_3 0xEC83A0
+
+#define mmTPC3_QM_CP_FENCE3_CNT_4 0xEC83A4
+
+#define mmTPC3_QM_CP_STS_0 0xEC83A8
+
+#define mmTPC3_QM_CP_STS_1 0xEC83AC
+
+#define mmTPC3_QM_CP_STS_2 0xEC83B0
+
+#define mmTPC3_QM_CP_STS_3 0xEC83B4
+
+#define mmTPC3_QM_CP_STS_4 0xEC83B8
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO_0 0xEC83BC
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO_1 0xEC83C0
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO_2 0xEC83C4
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO_3 0xEC83C8
+
+#define mmTPC3_QM_CP_CURRENT_INST_LO_4 0xEC83CC
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI_0 0xEC83D0
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI_1 0xEC83D4
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI_2 0xEC83D8
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI_3 0xEC83DC
+
+#define mmTPC3_QM_CP_CURRENT_INST_HI_4 0xEC83E0
+
+#define mmTPC3_QM_CP_BARRIER_CFG_0 0xEC83F4
+
+#define mmTPC3_QM_CP_BARRIER_CFG_1 0xEC83F8
+
+#define mmTPC3_QM_CP_BARRIER_CFG_2 0xEC83FC
+
+#define mmTPC3_QM_CP_BARRIER_CFG_3 0xEC8400
+
+#define mmTPC3_QM_CP_BARRIER_CFG_4 0xEC8404
+
+#define mmTPC3_QM_CP_DBG_0_0 0xEC8408
+
+#define mmTPC3_QM_CP_DBG_0_1 0xEC840C
+
+#define mmTPC3_QM_CP_DBG_0_2 0xEC8410
+
+#define mmTPC3_QM_CP_DBG_0_3 0xEC8414
+
+#define mmTPC3_QM_CP_DBG_0_4 0xEC8418
+
+#define mmTPC3_QM_CP_ARUSER_31_11_0 0xEC841C
+
+#define mmTPC3_QM_CP_ARUSER_31_11_1 0xEC8420
+
+#define mmTPC3_QM_CP_ARUSER_31_11_2 0xEC8424
+
+#define mmTPC3_QM_CP_ARUSER_31_11_3 0xEC8428
+
+#define mmTPC3_QM_CP_ARUSER_31_11_4 0xEC842C
+
+#define mmTPC3_QM_CP_AWUSER_31_11_0 0xEC8430
+
+#define mmTPC3_QM_CP_AWUSER_31_11_1 0xEC8434
+
+#define mmTPC3_QM_CP_AWUSER_31_11_2 0xEC8438
+
+#define mmTPC3_QM_CP_AWUSER_31_11_3 0xEC843C
+
+#define mmTPC3_QM_CP_AWUSER_31_11_4 0xEC8440
+
+#define mmTPC3_QM_ARB_CFG_0 0xEC8A00
+
+#define mmTPC3_QM_ARB_CHOISE_Q_PUSH 0xEC8A04
+
+#define mmTPC3_QM_ARB_WRR_WEIGHT_0 0xEC8A08
+
+#define mmTPC3_QM_ARB_WRR_WEIGHT_1 0xEC8A0C
+
+#define mmTPC3_QM_ARB_WRR_WEIGHT_2 0xEC8A10
+
+#define mmTPC3_QM_ARB_WRR_WEIGHT_3 0xEC8A14
+
+#define mmTPC3_QM_ARB_CFG_1 0xEC8A18
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_0 0xEC8A20
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_1 0xEC8A24
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_2 0xEC8A28
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_3 0xEC8A2C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_4 0xEC8A30
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_5 0xEC8A34
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_6 0xEC8A38
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_7 0xEC8A3C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_8 0xEC8A40
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_9 0xEC8A44
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_10 0xEC8A48
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_11 0xEC8A4C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_12 0xEC8A50
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_13 0xEC8A54
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_14 0xEC8A58
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_15 0xEC8A5C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_16 0xEC8A60
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_17 0xEC8A64
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_18 0xEC8A68
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_19 0xEC8A6C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_20 0xEC8A70
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_21 0xEC8A74
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_22 0xEC8A78
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_23 0xEC8A7C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_24 0xEC8A80
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_25 0xEC8A84
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_26 0xEC8A88
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_27 0xEC8A8C
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_28 0xEC8A90
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_29 0xEC8A94
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_30 0xEC8A98
+
+#define mmTPC3_QM_ARB_MST_AVAIL_CRED_31 0xEC8A9C
+
+#define mmTPC3_QM_ARB_MST_CRED_INC 0xEC8AA0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xEC8AA4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xEC8AA8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xEC8AAC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xEC8AB0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xEC8AB4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xEC8AB8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xEC8ABC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xEC8AC0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xEC8AC4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xEC8AC8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xEC8ACC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xEC8AD0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xEC8AD4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xEC8AD8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xEC8ADC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xEC8AE0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xEC8AE4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xEC8AE8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xEC8AEC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xEC8AF0
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xEC8AF4
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xEC8AF8
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xEC8AFC
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xEC8B00
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xEC8B04
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xEC8B08
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xEC8B0C
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xEC8B10
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xEC8B14
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xEC8B18
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xEC8B1C
+
+#define mmTPC3_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xEC8B20
+
+#define mmTPC3_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xEC8B28
+
+#define mmTPC3_QM_ARB_MST_SLAVE_EN 0xEC8B2C
+
+#define mmTPC3_QM_ARB_MST_QUIET_PER 0xEC8B34
+
+#define mmTPC3_QM_ARB_SLV_CHOISE_WDT 0xEC8B38
+
+#define mmTPC3_QM_ARB_SLV_ID 0xEC8B3C
+
+#define mmTPC3_QM_ARB_MSG_MAX_INFLIGHT 0xEC8B44
+
+#define mmTPC3_QM_ARB_MSG_AWUSER_31_11 0xEC8B48
+
+#define mmTPC3_QM_ARB_MSG_AWUSER_SEC_PROP 0xEC8B4C
+
+#define mmTPC3_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xEC8B50
+
+#define mmTPC3_QM_ARB_BASE_LO 0xEC8B54
+
+#define mmTPC3_QM_ARB_BASE_HI 0xEC8B58
+
+#define mmTPC3_QM_ARB_STATE_STS 0xEC8B80
+
+#define mmTPC3_QM_ARB_CHOISE_FULLNESS_STS 0xEC8B84
+
+#define mmTPC3_QM_ARB_MSG_STS 0xEC8B88
+
+#define mmTPC3_QM_ARB_SLV_CHOISE_Q_HEAD 0xEC8B8C
+
+#define mmTPC3_QM_ARB_ERR_CAUSE 0xEC8B9C
+
+#define mmTPC3_QM_ARB_ERR_MSG_EN 0xEC8BA0
+
+#define mmTPC3_QM_ARB_ERR_STS_DRP 0xEC8BA8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_0 0xEC8BB0
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_1 0xEC8BB4
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_2 0xEC8BB8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_3 0xEC8BBC
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_4 0xEC8BC0
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_5 0xEC8BC4
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_6 0xEC8BC8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_7 0xEC8BCC
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_8 0xEC8BD0
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_9 0xEC8BD4
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_10 0xEC8BD8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_11 0xEC8BDC
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_12 0xEC8BE0
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_13 0xEC8BE4
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_14 0xEC8BE8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_15 0xEC8BEC
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_16 0xEC8BF0
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_17 0xEC8BF4
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_18 0xEC8BF8
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_19 0xEC8BFC
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_20 0xEC8C00
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_21 0xEC8C04
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_22 0xEC8C08
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_23 0xEC8C0C
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_24 0xEC8C10
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_25 0xEC8C14
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_26 0xEC8C18
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_27 0xEC8C1C
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_28 0xEC8C20
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_29 0xEC8C24
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_30 0xEC8C28
+
+#define mmTPC3_QM_ARB_MST_CRED_STS_31 0xEC8C2C
+
+#define mmTPC3_QM_CGM_CFG 0xEC8C70
+
+#define mmTPC3_QM_CGM_STS 0xEC8C74
+
+#define mmTPC3_QM_CGM_CFG1 0xEC8C78
+
+#define mmTPC3_QM_LOCAL_RANGE_BASE 0xEC8C80
+
+#define mmTPC3_QM_LOCAL_RANGE_SIZE 0xEC8C84
+
+#define mmTPC3_QM_CSMR_STRICT_PRIO_CFG 0xEC8C90
+
+#define mmTPC3_QM_HBW_RD_RATE_LIM_CFG_1 0xEC8C94
+
+#define mmTPC3_QM_LBW_WR_RATE_LIM_CFG_0 0xEC8C98
+
+#define mmTPC3_QM_LBW_WR_RATE_LIM_CFG_1 0xEC8C9C
+
+#define mmTPC3_QM_HBW_RD_RATE_LIM_CFG_0 0xEC8CA0
+
+#define mmTPC3_QM_GLBL_AXCACHE 0xEC8CA4
+
+#define mmTPC3_QM_IND_GW_APB_CFG 0xEC8CB0
+
+#define mmTPC3_QM_IND_GW_APB_WDATA 0xEC8CB4
+
+#define mmTPC3_QM_IND_GW_APB_RDATA 0xEC8CB8
+
+#define mmTPC3_QM_IND_GW_APB_STATUS 0xEC8CBC
+
+#define mmTPC3_QM_GLBL_ERR_ADDR_LO 0xEC8CD0
+
+#define mmTPC3_QM_GLBL_ERR_ADDR_HI 0xEC8CD4
+
+#define mmTPC3_QM_GLBL_ERR_WDATA 0xEC8CD8
+
+#define mmTPC3_QM_GLBL_MEM_INIT_BUSY 0xEC8D00
+
+#endif /* ASIC_REG_TPC3_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h
new file mode 100644
index 000000000000..7a9447f39a74
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_CFG_REGS_H_
+#define ASIC_REG_TPC4_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF06400
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF06404
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF06408
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF0640C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF06410
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF06414
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF06418
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF0641C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF06420
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF06424
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF06428
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF0642C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF06430
+
+#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF06434
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF06438
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF0643C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF06440
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF06444
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF06448
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF0644C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF06450
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF06454
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF06458
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF0645C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF06460
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF06464
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF06468
+
+#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF0646C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF06470
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF06474
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF06478
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF0647C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF06480
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF06484
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF06488
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF0648C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF06490
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF06494
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF06498
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF0649C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF064A0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF064A4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF064A8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF064AC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF064B0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF064B4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF064B8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF064BC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF064C0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF064C4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF064C8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF064CC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF064D0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF064D4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF064D8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF064DC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF064E0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF064E4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF064E8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF064EC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF064F0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF064F4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF064F8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF064FC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF06500
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF06504
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF06508
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF0650C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF06510
+
+#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF06514
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF06518
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF0651C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF06520
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF06524
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF06528
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF0652C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF06530
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF06534
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF06538
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF0653C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF06540
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF06544
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF06548
+
+#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF0654C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF06550
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF06554
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF06558
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF0655C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF06560
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF06564
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF06568
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF0656C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF06570
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF06574
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF06578
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF0657C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF06580
+
+#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF06584
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF06588
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF0658C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF06590
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF06594
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF06598
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF0659C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF065A0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF065A4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF065A8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF065AC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF065B0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF065B4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF065B8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF065BC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xF065C0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xF065C4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xF065C8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xF065CC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xF065D0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xF065D4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xF065D8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xF065DC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xF065E0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xF065E4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xF065E8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xF065EC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xF065F0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xF065F4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xF065F8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xF065FC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xF06600
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xF06604
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xF06608
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xF0660C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xF06610
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xF06614
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xF06618
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xF0661C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xF06620
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xF06624
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xF06628
+
+#define mmTPC4_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xF0662C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xF06630
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xF06634
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xF06638
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xF0663C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xF06640
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xF06644
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xF06648
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xF0664C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xF06650
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xF06654
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xF06658
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xF0665C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xF06660
+
+#define mmTPC4_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xF06664
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xF06668
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xF0666C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xF06670
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xF06674
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xF06678
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xF0667C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xF06680
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xF06684
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xF06688
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xF0668C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xF06690
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xF06694
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xF06698
+
+#define mmTPC4_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xF0669C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xF066A0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xF066A4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xF066A8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xF066AC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xF066B0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xF066B4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xF066B8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xF066BC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xF066C0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xF066C4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xF066C8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xF066CC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xF066D0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xF066D4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xF066D8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xF066DC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xF066E0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xF066E4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xF066E8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xF066EC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xF066F0
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xF066F4
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xF066F8
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xF066FC
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xF06700
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xF06704
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xF06708
+
+#define mmTPC4_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xF0670C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xF06710
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xF06714
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xF06718
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xF0671C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xF06720
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xF06724
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xF06728
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xF0672C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xF06730
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xF06734
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xF06738
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xF0673C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xF06740
+
+#define mmTPC4_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xF06744
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xF06748
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xF0674C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xF06750
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xF06754
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xF06758
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xF0675C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xF06760
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xF06764
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xF06768
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xF0676C
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xF06770
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xF06774
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xF06778
+
+#define mmTPC4_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xF0677C
+
+#define mmTPC4_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF06780
+
+#define mmTPC4_CFG_KERNEL_SYNC_OBJECT_ADDR 0xF06784
+
+#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF06788
+
+#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF0678C
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_0 0xF06790
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_0 0xF06794
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_1 0xF06798
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_1 0xF0679C
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_2 0xF067A0
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_2 0xF067A4
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_3 0xF067A8
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_3 0xF067AC
+
+#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_4 0xF067B0
+
+#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_4 0xF067B4
+
+#define mmTPC4_CFG_KERNEL_KERNEL_CONFIG 0xF067B8
+
+#define mmTPC4_CFG_KERNEL_KERNEL_ID 0xF067BC
+
+#define mmTPC4_CFG_KERNEL_SRF_0 0xF067C0
+
+#define mmTPC4_CFG_KERNEL_SRF_1 0xF067C4
+
+#define mmTPC4_CFG_KERNEL_SRF_2 0xF067C8
+
+#define mmTPC4_CFG_KERNEL_SRF_3 0xF067CC
+
+#define mmTPC4_CFG_KERNEL_SRF_4 0xF067D0
+
+#define mmTPC4_CFG_KERNEL_SRF_5 0xF067D4
+
+#define mmTPC4_CFG_KERNEL_SRF_6 0xF067D8
+
+#define mmTPC4_CFG_KERNEL_SRF_7 0xF067DC
+
+#define mmTPC4_CFG_KERNEL_SRF_8 0xF067E0
+
+#define mmTPC4_CFG_KERNEL_SRF_9 0xF067E4
+
+#define mmTPC4_CFG_KERNEL_SRF_10 0xF067E8
+
+#define mmTPC4_CFG_KERNEL_SRF_11 0xF067EC
+
+#define mmTPC4_CFG_KERNEL_SRF_12 0xF067F0
+
+#define mmTPC4_CFG_KERNEL_SRF_13 0xF067F4
+
+#define mmTPC4_CFG_KERNEL_SRF_14 0xF067F8
+
+#define mmTPC4_CFG_KERNEL_SRF_15 0xF067FC
+
+#define mmTPC4_CFG_KERNEL_SRF_16 0xF06800
+
+#define mmTPC4_CFG_KERNEL_SRF_17 0xF06804
+
+#define mmTPC4_CFG_KERNEL_SRF_18 0xF06808
+
+#define mmTPC4_CFG_KERNEL_SRF_19 0xF0680C
+
+#define mmTPC4_CFG_KERNEL_SRF_20 0xF06810
+
+#define mmTPC4_CFG_KERNEL_SRF_21 0xF06814
+
+#define mmTPC4_CFG_KERNEL_SRF_22 0xF06818
+
+#define mmTPC4_CFG_KERNEL_SRF_23 0xF0681C
+
+#define mmTPC4_CFG_KERNEL_SRF_24 0xF06820
+
+#define mmTPC4_CFG_KERNEL_SRF_25 0xF06824
+
+#define mmTPC4_CFG_KERNEL_SRF_26 0xF06828
+
+#define mmTPC4_CFG_KERNEL_SRF_27 0xF0682C
+
+#define mmTPC4_CFG_KERNEL_SRF_28 0xF06830
+
+#define mmTPC4_CFG_KERNEL_SRF_29 0xF06834
+
+#define mmTPC4_CFG_KERNEL_SRF_30 0xF06838
+
+#define mmTPC4_CFG_KERNEL_SRF_31 0xF0683C
+
+#define mmTPC4_CFG_ROUND_CSR 0xF068FC
+
+#define mmTPC4_CFG_PROT 0xF06900
+
+#define mmTPC4_CFG_SEMAPHORE 0xF06908
+
+#define mmTPC4_CFG_VFLAGS 0xF0690C
+
+#define mmTPC4_CFG_SFLAGS 0xF06910
+
+#define mmTPC4_CFG_LFSR_POLYNOM 0xF06918
+
+#define mmTPC4_CFG_STATUS 0xF0691C
+
+#define mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH 0xF06920
+
+#define mmTPC4_CFG_CFG_SUBTRACT_VALUE 0xF06924
+
+#define mmTPC4_CFG_SM_BASE_ADDRESS_HIGH 0xF0692C
+
+#define mmTPC4_CFG_TPC_CMD 0xF06930
+
+#define mmTPC4_CFG_TPC_EXECUTE 0xF06938
+
+#define mmTPC4_CFG_TPC_STALL 0xF0693C
+
+#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_LOW 0xF06940
+
+#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF06944
+
+#define mmTPC4_CFG_RD_RATE_LIMIT 0xF06948
+
+#define mmTPC4_CFG_WR_RATE_LIMIT 0xF06950
+
+#define mmTPC4_CFG_MSS_CONFIG 0xF06954
+
+#define mmTPC4_CFG_TPC_INTR_CAUSE 0xF06958
+
+#define mmTPC4_CFG_TPC_INTR_MASK 0xF0695C
+
+#define mmTPC4_CFG_WQ_CREDITS 0xF06960
+
+#define mmTPC4_CFG_ARUSER_LO 0xF06964
+
+#define mmTPC4_CFG_ARUSER_HI 0xF06968
+
+#define mmTPC4_CFG_AWUSER_LO 0xF0696C
+
+#define mmTPC4_CFG_AWUSER_HI 0xF06970
+
+#define mmTPC4_CFG_OPCODE_EXEC 0xF06974
+
+#define mmTPC4_CFG_LUT_FUNC32_BASE_ADDR_LO 0xF06978
+
+#define mmTPC4_CFG_LUT_FUNC32_BASE_ADDR_HI 0xF0697C
+
+#define mmTPC4_CFG_LUT_FUNC64_BASE_ADDR_LO 0xF06980
+
+#define mmTPC4_CFG_LUT_FUNC64_BASE_ADDR_HI 0xF06984
+
+#define mmTPC4_CFG_LUT_FUNC128_BASE_ADDR_LO 0xF06988
+
+#define mmTPC4_CFG_LUT_FUNC128_BASE_ADDR_HI 0xF0698C
+
+#define mmTPC4_CFG_LUT_FUNC256_BASE_ADDR_LO 0xF06990
+
+#define mmTPC4_CFG_LUT_FUNC256_BASE_ADDR_HI 0xF06994
+
+#define mmTPC4_CFG_TSB_CFG_MAX_SIZE 0xF06998
+
+#define mmTPC4_CFG_TSB_CFG 0xF0699C
+
+#define mmTPC4_CFG_DBGMEM_ADD 0xF069A0
+
+#define mmTPC4_CFG_DBGMEM_DATA_WR 0xF069A4
+
+#define mmTPC4_CFG_DBGMEM_DATA_RD 0xF069A8
+
+#define mmTPC4_CFG_DBGMEM_CTRL 0xF069AC
+
+#define mmTPC4_CFG_DBGMEM_RC 0xF069B0
+
+#define mmTPC4_CFG_TSB_INFLIGHT_CNTR 0xF069B4
+
+#define mmTPC4_CFG_WQ_INFLIGHT_CNTR 0xF069B8
+
+#define mmTPC4_CFG_WQ_LBW_TOTAL_CNTR 0xF069BC
+
+#define mmTPC4_CFG_WQ_HBW_TOTAL_CNTR 0xF069C0
+
+#define mmTPC4_CFG_IRQ_OCCOUPY_CNTR 0xF069C4
+
+#define mmTPC4_CFG_FUNC_MBIST_CNTRL 0xF069D0
+
+#define mmTPC4_CFG_FUNC_MBIST_PAT 0xF069D4
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_0 0xF069D8
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_1 0xF069DC
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_2 0xF069E0
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_3 0xF069E4
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_4 0xF069E8
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_5 0xF069EC
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_6 0xF069F0
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_7 0xF069F4
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_8 0xF069F8
+
+#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF069FC
+
+#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF06A00
+
+#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF06A04
+
+#define mmTPC4_CFG_QM_TENSOR_0_PADDING_VALUE 0xF06A08
+
+#define mmTPC4_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF06A0C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF06A10
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF06A14
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF06A18
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF06A1C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF06A20
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF06A24
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF06A28
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF06A2C
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF06A30
+
+#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF06A34
+
+#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF06A38
+
+#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF06A3C
+
+#define mmTPC4_CFG_QM_TENSOR_1_PADDING_VALUE 0xF06A40
+
+#define mmTPC4_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF06A44
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF06A48
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF06A4C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF06A50
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF06A54
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF06A58
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF06A5C
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF06A60
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF06A64
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF06A68
+
+#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF06A6C
+
+#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF06A70
+
+#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF06A74
+
+#define mmTPC4_CFG_QM_TENSOR_2_PADDING_VALUE 0xF06A78
+
+#define mmTPC4_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF06A7C
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF06A80
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF06A84
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF06A88
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF06A8C
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF06A90
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF06A94
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF06A98
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF06A9C
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF06AA0
+
+#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF06AA4
+
+#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF06AA8
+
+#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF06AAC
+
+#define mmTPC4_CFG_QM_TENSOR_3_PADDING_VALUE 0xF06AB0
+
+#define mmTPC4_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF06AB4
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF06AB8
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF06ABC
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF06AC0
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF06AC4
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF06AC8
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF06ACC
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF06AD0
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF06AD4
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF06AD8
+
+#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF06ADC
+
+#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF06AE0
+
+#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF06AE4
+
+#define mmTPC4_CFG_QM_TENSOR_4_PADDING_VALUE 0xF06AE8
+
+#define mmTPC4_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF06AEC
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF06AF0
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF06AF4
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF06AF8
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF06AFC
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF06B00
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF06B04
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF06B08
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF06B0C
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF06B10
+
+#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF06B14
+
+#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF06B18
+
+#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF06B1C
+
+#define mmTPC4_CFG_QM_TENSOR_5_PADDING_VALUE 0xF06B20
+
+#define mmTPC4_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF06B24
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF06B28
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF06B2C
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF06B30
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF06B34
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF06B38
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF06B3C
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF06B40
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF06B44
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF06B48
+
+#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF06B4C
+
+#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF06B50
+
+#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF06B54
+
+#define mmTPC4_CFG_QM_TENSOR_6_PADDING_VALUE 0xF06B58
+
+#define mmTPC4_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF06B5C
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF06B60
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF06B64
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF06B68
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF06B6C
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF06B70
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF06B74
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF06B78
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF06B7C
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF06B80
+
+#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF06B84
+
+#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF06B88
+
+#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF06B8C
+
+#define mmTPC4_CFG_QM_TENSOR_7_PADDING_VALUE 0xF06B90
+
+#define mmTPC4_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF06B94
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF06B98
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF06B9C
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF06BA0
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF06BA4
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF06BA8
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF06BAC
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF06BB0
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF06BB4
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF06BB8
+
+#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF06BBC
+
+#define mmTPC4_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xF06BC0
+
+#define mmTPC4_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xF06BC4
+
+#define mmTPC4_CFG_QM_TENSOR_8_PADDING_VALUE 0xF06BC8
+
+#define mmTPC4_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xF06BCC
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_0_SIZE 0xF06BD0
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xF06BD4
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_1_SIZE 0xF06BD8
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xF06BDC
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_2_SIZE 0xF06BE0
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xF06BE4
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_3_SIZE 0xF06BE8
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xF06BEC
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_4_SIZE 0xF06BF0
+
+#define mmTPC4_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xF06BF4
+
+#define mmTPC4_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xF06BF8
+
+#define mmTPC4_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xF06BFC
+
+#define mmTPC4_CFG_QM_TENSOR_9_PADDING_VALUE 0xF06C00
+
+#define mmTPC4_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xF06C04
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_0_SIZE 0xF06C08
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xF06C0C
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_1_SIZE 0xF06C10
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xF06C14
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_2_SIZE 0xF06C18
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xF06C1C
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_3_SIZE 0xF06C20
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xF06C24
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_4_SIZE 0xF06C28
+
+#define mmTPC4_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xF06C2C
+
+#define mmTPC4_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xF06C30
+
+#define mmTPC4_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xF06C34
+
+#define mmTPC4_CFG_QM_TENSOR_10_PADDING_VALUE 0xF06C38
+
+#define mmTPC4_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xF06C3C
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_0_SIZE 0xF06C40
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xF06C44
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_1_SIZE 0xF06C48
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xF06C4C
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_2_SIZE 0xF06C50
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xF06C54
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_3_SIZE 0xF06C58
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xF06C5C
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_4_SIZE 0xF06C60
+
+#define mmTPC4_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xF06C64
+
+#define mmTPC4_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xF06C68
+
+#define mmTPC4_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xF06C6C
+
+#define mmTPC4_CFG_QM_TENSOR_11_PADDING_VALUE 0xF06C70
+
+#define mmTPC4_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xF06C74
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_0_SIZE 0xF06C78
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xF06C7C
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_1_SIZE 0xF06C80
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xF06C84
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_2_SIZE 0xF06C88
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xF06C8C
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_3_SIZE 0xF06C90
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xF06C94
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_4_SIZE 0xF06C98
+
+#define mmTPC4_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xF06C9C
+
+#define mmTPC4_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xF06CA0
+
+#define mmTPC4_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xF06CA4
+
+#define mmTPC4_CFG_QM_TENSOR_12_PADDING_VALUE 0xF06CA8
+
+#define mmTPC4_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xF06CAC
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_0_SIZE 0xF06CB0
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xF06CB4
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_1_SIZE 0xF06CB8
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xF06CBC
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_2_SIZE 0xF06CC0
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xF06CC4
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_3_SIZE 0xF06CC8
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xF06CCC
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_4_SIZE 0xF06CD0
+
+#define mmTPC4_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xF06CD4
+
+#define mmTPC4_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xF06CD8
+
+#define mmTPC4_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xF06CDC
+
+#define mmTPC4_CFG_QM_TENSOR_13_PADDING_VALUE 0xF06CE0
+
+#define mmTPC4_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xF06CE4
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_0_SIZE 0xF06CE8
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xF06CEC
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_1_SIZE 0xF06CF0
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xF06CF4
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_2_SIZE 0xF06CF8
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xF06CFC
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_3_SIZE 0xF06D00
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xF06D04
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_4_SIZE 0xF06D08
+
+#define mmTPC4_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xF06D0C
+
+#define mmTPC4_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xF06D10
+
+#define mmTPC4_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xF06D14
+
+#define mmTPC4_CFG_QM_TENSOR_14_PADDING_VALUE 0xF06D18
+
+#define mmTPC4_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xF06D1C
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_0_SIZE 0xF06D20
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xF06D24
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_1_SIZE 0xF06D28
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xF06D2C
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_2_SIZE 0xF06D30
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xF06D34
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_3_SIZE 0xF06D38
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xF06D3C
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_4_SIZE 0xF06D40
+
+#define mmTPC4_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xF06D44
+
+#define mmTPC4_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xF06D48
+
+#define mmTPC4_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xF06D4C
+
+#define mmTPC4_CFG_QM_TENSOR_15_PADDING_VALUE 0xF06D50
+
+#define mmTPC4_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xF06D54
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_0_SIZE 0xF06D58
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xF06D5C
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_1_SIZE 0xF06D60
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xF06D64
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_2_SIZE 0xF06D68
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xF06D6C
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_3_SIZE 0xF06D70
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xF06D74
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_4_SIZE 0xF06D78
+
+#define mmTPC4_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xF06D7C
+
+#define mmTPC4_CFG_QM_SYNC_OBJECT_MESSAGE 0xF06D80
+
+#define mmTPC4_CFG_QM_SYNC_OBJECT_ADDR 0xF06D84
+
+#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF06D88
+
+#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF06D8C
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_0 0xF06D90
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_0 0xF06D94
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_1 0xF06D98
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_1 0xF06D9C
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_2 0xF06DA0
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_2 0xF06DA4
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_3 0xF06DA8
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_3 0xF06DAC
+
+#define mmTPC4_CFG_QM_TID_BASE_DIM_4 0xF06DB0
+
+#define mmTPC4_CFG_QM_TID_SIZE_DIM_4 0xF06DB4
+
+#define mmTPC4_CFG_QM_KERNEL_CONFIG 0xF06DB8
+
+#define mmTPC4_CFG_QM_KERNEL_ID 0xF06DBC
+
+#define mmTPC4_CFG_QM_SRF_0 0xF06DC0
+
+#define mmTPC4_CFG_QM_SRF_1 0xF06DC4
+
+#define mmTPC4_CFG_QM_SRF_2 0xF06DC8
+
+#define mmTPC4_CFG_QM_SRF_3 0xF06DCC
+
+#define mmTPC4_CFG_QM_SRF_4 0xF06DD0
+
+#define mmTPC4_CFG_QM_SRF_5 0xF06DD4
+
+#define mmTPC4_CFG_QM_SRF_6 0xF06DD8
+
+#define mmTPC4_CFG_QM_SRF_7 0xF06DDC
+
+#define mmTPC4_CFG_QM_SRF_8 0xF06DE0
+
+#define mmTPC4_CFG_QM_SRF_9 0xF06DE4
+
+#define mmTPC4_CFG_QM_SRF_10 0xF06DE8
+
+#define mmTPC4_CFG_QM_SRF_11 0xF06DEC
+
+#define mmTPC4_CFG_QM_SRF_12 0xF06DF0
+
+#define mmTPC4_CFG_QM_SRF_13 0xF06DF4
+
+#define mmTPC4_CFG_QM_SRF_14 0xF06DF8
+
+#define mmTPC4_CFG_QM_SRF_15 0xF06DFC
+
+#define mmTPC4_CFG_QM_SRF_16 0xF06E00
+
+#define mmTPC4_CFG_QM_SRF_17 0xF06E04
+
+#define mmTPC4_CFG_QM_SRF_18 0xF06E08
+
+#define mmTPC4_CFG_QM_SRF_19 0xF06E0C
+
+#define mmTPC4_CFG_QM_SRF_20 0xF06E10
+
+#define mmTPC4_CFG_QM_SRF_21 0xF06E14
+
+#define mmTPC4_CFG_QM_SRF_22 0xF06E18
+
+#define mmTPC4_CFG_QM_SRF_23 0xF06E1C
+
+#define mmTPC4_CFG_QM_SRF_24 0xF06E20
+
+#define mmTPC4_CFG_QM_SRF_25 0xF06E24
+
+#define mmTPC4_CFG_QM_SRF_26 0xF06E28
+
+#define mmTPC4_CFG_QM_SRF_27 0xF06E2C
+
+#define mmTPC4_CFG_QM_SRF_28 0xF06E30
+
+#define mmTPC4_CFG_QM_SRF_29 0xF06E34
+
+#define mmTPC4_CFG_QM_SRF_30 0xF06E38
+
+#define mmTPC4_CFG_QM_SRF_31 0xF06E3C
+
+#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h
new file mode 100644
index 000000000000..80e63402f6e0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC4_QM_REGS_H_
+#define ASIC_REG_TPC4_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC4_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC4_QM_GLBL_CFG0 0xF08000
+
+#define mmTPC4_QM_GLBL_CFG1 0xF08004
+
+#define mmTPC4_QM_GLBL_PROT 0xF08008
+
+#define mmTPC4_QM_GLBL_ERR_CFG 0xF0800C
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS_0 0xF08010
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS_1 0xF08014
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS_2 0xF08018
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS_3 0xF0801C
+
+#define mmTPC4_QM_GLBL_SECURE_PROPS_4 0xF08020
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_0 0xF08024
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_1 0xF08028
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_2 0xF0802C
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_3 0xF08030
+
+#define mmTPC4_QM_GLBL_NON_SECURE_PROPS_4 0xF08034
+
+#define mmTPC4_QM_GLBL_STS0 0xF08038
+
+#define mmTPC4_QM_GLBL_STS1_0 0xF08040
+
+#define mmTPC4_QM_GLBL_STS1_1 0xF08044
+
+#define mmTPC4_QM_GLBL_STS1_2 0xF08048
+
+#define mmTPC4_QM_GLBL_STS1_3 0xF0804C
+
+#define mmTPC4_QM_GLBL_STS1_4 0xF08050
+
+#define mmTPC4_QM_GLBL_MSG_EN_0 0xF08054
+
+#define mmTPC4_QM_GLBL_MSG_EN_1 0xF08058
+
+#define mmTPC4_QM_GLBL_MSG_EN_2 0xF0805C
+
+#define mmTPC4_QM_GLBL_MSG_EN_3 0xF08060
+
+#define mmTPC4_QM_GLBL_MSG_EN_4 0xF08068
+
+#define mmTPC4_QM_PQ_BASE_LO_0 0xF08070
+
+#define mmTPC4_QM_PQ_BASE_LO_1 0xF08074
+
+#define mmTPC4_QM_PQ_BASE_LO_2 0xF08078
+
+#define mmTPC4_QM_PQ_BASE_LO_3 0xF0807C
+
+#define mmTPC4_QM_PQ_BASE_HI_0 0xF08080
+
+#define mmTPC4_QM_PQ_BASE_HI_1 0xF08084
+
+#define mmTPC4_QM_PQ_BASE_HI_2 0xF08088
+
+#define mmTPC4_QM_PQ_BASE_HI_3 0xF0808C
+
+#define mmTPC4_QM_PQ_SIZE_0 0xF08090
+
+#define mmTPC4_QM_PQ_SIZE_1 0xF08094
+
+#define mmTPC4_QM_PQ_SIZE_2 0xF08098
+
+#define mmTPC4_QM_PQ_SIZE_3 0xF0809C
+
+#define mmTPC4_QM_PQ_PI_0 0xF080A0
+
+#define mmTPC4_QM_PQ_PI_1 0xF080A4
+
+#define mmTPC4_QM_PQ_PI_2 0xF080A8
+
+#define mmTPC4_QM_PQ_PI_3 0xF080AC
+
+#define mmTPC4_QM_PQ_CI_0 0xF080B0
+
+#define mmTPC4_QM_PQ_CI_1 0xF080B4
+
+#define mmTPC4_QM_PQ_CI_2 0xF080B8
+
+#define mmTPC4_QM_PQ_CI_3 0xF080BC
+
+#define mmTPC4_QM_PQ_CFG0_0 0xF080C0
+
+#define mmTPC4_QM_PQ_CFG0_1 0xF080C4
+
+#define mmTPC4_QM_PQ_CFG0_2 0xF080C8
+
+#define mmTPC4_QM_PQ_CFG0_3 0xF080CC
+
+#define mmTPC4_QM_PQ_CFG1_0 0xF080D0
+
+#define mmTPC4_QM_PQ_CFG1_1 0xF080D4
+
+#define mmTPC4_QM_PQ_CFG1_2 0xF080D8
+
+#define mmTPC4_QM_PQ_CFG1_3 0xF080DC
+
+#define mmTPC4_QM_PQ_ARUSER_31_11_0 0xF080E0
+
+#define mmTPC4_QM_PQ_ARUSER_31_11_1 0xF080E4
+
+#define mmTPC4_QM_PQ_ARUSER_31_11_2 0xF080E8
+
+#define mmTPC4_QM_PQ_ARUSER_31_11_3 0xF080EC
+
+#define mmTPC4_QM_PQ_STS0_0 0xF080F0
+
+#define mmTPC4_QM_PQ_STS0_1 0xF080F4
+
+#define mmTPC4_QM_PQ_STS0_2 0xF080F8
+
+#define mmTPC4_QM_PQ_STS0_3 0xF080FC
+
+#define mmTPC4_QM_PQ_STS1_0 0xF08100
+
+#define mmTPC4_QM_PQ_STS1_1 0xF08104
+
+#define mmTPC4_QM_PQ_STS1_2 0xF08108
+
+#define mmTPC4_QM_PQ_STS1_3 0xF0810C
+
+#define mmTPC4_QM_CQ_CFG0_0 0xF08110
+
+#define mmTPC4_QM_CQ_CFG0_1 0xF08114
+
+#define mmTPC4_QM_CQ_CFG0_2 0xF08118
+
+#define mmTPC4_QM_CQ_CFG0_3 0xF0811C
+
+#define mmTPC4_QM_CQ_CFG0_4 0xF08120
+
+#define mmTPC4_QM_CQ_CFG1_0 0xF08124
+
+#define mmTPC4_QM_CQ_CFG1_1 0xF08128
+
+#define mmTPC4_QM_CQ_CFG1_2 0xF0812C
+
+#define mmTPC4_QM_CQ_CFG1_3 0xF08130
+
+#define mmTPC4_QM_CQ_CFG1_4 0xF08134
+
+#define mmTPC4_QM_CQ_ARUSER_31_11_0 0xF08138
+
+#define mmTPC4_QM_CQ_ARUSER_31_11_1 0xF0813C
+
+#define mmTPC4_QM_CQ_ARUSER_31_11_2 0xF08140
+
+#define mmTPC4_QM_CQ_ARUSER_31_11_3 0xF08144
+
+#define mmTPC4_QM_CQ_ARUSER_31_11_4 0xF08148
+
+#define mmTPC4_QM_CQ_STS0_0 0xF0814C
+
+#define mmTPC4_QM_CQ_STS0_1 0xF08150
+
+#define mmTPC4_QM_CQ_STS0_2 0xF08154
+
+#define mmTPC4_QM_CQ_STS0_3 0xF08158
+
+#define mmTPC4_QM_CQ_STS0_4 0xF0815C
+
+#define mmTPC4_QM_CQ_STS1_0 0xF08160
+
+#define mmTPC4_QM_CQ_STS1_1 0xF08164
+
+#define mmTPC4_QM_CQ_STS1_2 0xF08168
+
+#define mmTPC4_QM_CQ_STS1_3 0xF0816C
+
+#define mmTPC4_QM_CQ_STS1_4 0xF08170
+
+#define mmTPC4_QM_CQ_PTR_LO_0 0xF08174
+
+#define mmTPC4_QM_CQ_PTR_HI_0 0xF08178
+
+#define mmTPC4_QM_CQ_TSIZE_0 0xF0817C
+
+#define mmTPC4_QM_CQ_CTL_0 0xF08180
+
+#define mmTPC4_QM_CQ_PTR_LO_1 0xF08184
+
+#define mmTPC4_QM_CQ_PTR_HI_1 0xF08188
+
+#define mmTPC4_QM_CQ_TSIZE_1 0xF0818C
+
+#define mmTPC4_QM_CQ_CTL_1 0xF08190
+
+#define mmTPC4_QM_CQ_PTR_LO_2 0xF08194
+
+#define mmTPC4_QM_CQ_PTR_HI_2 0xF08198
+
+#define mmTPC4_QM_CQ_TSIZE_2 0xF0819C
+
+#define mmTPC4_QM_CQ_CTL_2 0xF081A0
+
+#define mmTPC4_QM_CQ_PTR_LO_3 0xF081A4
+
+#define mmTPC4_QM_CQ_PTR_HI_3 0xF081A8
+
+#define mmTPC4_QM_CQ_TSIZE_3 0xF081AC
+
+#define mmTPC4_QM_CQ_CTL_3 0xF081B0
+
+#define mmTPC4_QM_CQ_PTR_LO_4 0xF081B4
+
+#define mmTPC4_QM_CQ_PTR_HI_4 0xF081B8
+
+#define mmTPC4_QM_CQ_TSIZE_4 0xF081BC
+
+#define mmTPC4_QM_CQ_CTL_4 0xF081C0
+
+#define mmTPC4_QM_CQ_PTR_LO_STS_0 0xF081C4
+
+#define mmTPC4_QM_CQ_PTR_LO_STS_1 0xF081C8
+
+#define mmTPC4_QM_CQ_PTR_LO_STS_2 0xF081CC
+
+#define mmTPC4_QM_CQ_PTR_LO_STS_3 0xF081D0
+
+#define mmTPC4_QM_CQ_PTR_LO_STS_4 0xF081D4
+
+#define mmTPC4_QM_CQ_PTR_HI_STS_0 0xF081D8
+
+#define mmTPC4_QM_CQ_PTR_HI_STS_1 0xF081DC
+
+#define mmTPC4_QM_CQ_PTR_HI_STS_2 0xF081E0
+
+#define mmTPC4_QM_CQ_PTR_HI_STS_3 0xF081E4
+
+#define mmTPC4_QM_CQ_PTR_HI_STS_4 0xF081E8
+
+#define mmTPC4_QM_CQ_TSIZE_STS_0 0xF081EC
+
+#define mmTPC4_QM_CQ_TSIZE_STS_1 0xF081F0
+
+#define mmTPC4_QM_CQ_TSIZE_STS_2 0xF081F4
+
+#define mmTPC4_QM_CQ_TSIZE_STS_3 0xF081F8
+
+#define mmTPC4_QM_CQ_TSIZE_STS_4 0xF081FC
+
+#define mmTPC4_QM_CQ_CTL_STS_0 0xF08200
+
+#define mmTPC4_QM_CQ_CTL_STS_1 0xF08204
+
+#define mmTPC4_QM_CQ_CTL_STS_2 0xF08208
+
+#define mmTPC4_QM_CQ_CTL_STS_3 0xF0820C
+
+#define mmTPC4_QM_CQ_CTL_STS_4 0xF08210
+
+#define mmTPC4_QM_CQ_IFIFO_CNT_0 0xF08214
+
+#define mmTPC4_QM_CQ_IFIFO_CNT_1 0xF08218
+
+#define mmTPC4_QM_CQ_IFIFO_CNT_2 0xF0821C
+
+#define mmTPC4_QM_CQ_IFIFO_CNT_3 0xF08220
+
+#define mmTPC4_QM_CQ_IFIFO_CNT_4 0xF08224
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_0 0xF08228
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_1 0xF0822C
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_2 0xF08230
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_3 0xF08234
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO_4 0xF08238
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_0 0xF0823C
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_1 0xF08240
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_2 0xF08244
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_3 0xF08248
+
+#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI_4 0xF0824C
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_0 0xF08250
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_1 0xF08254
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_2 0xF08258
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_3 0xF0825C
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO_4 0xF08260
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_0 0xF08264
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_1 0xF08268
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_2 0xF0826C
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_3 0xF08270
+
+#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI_4 0xF08274
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_0 0xF08278
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_1 0xF0827C
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_2 0xF08280
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_3 0xF08284
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO_4 0xF08288
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_0 0xF0828C
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_1 0xF08290
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_2 0xF08294
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_3 0xF08298
+
+#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI_4 0xF0829C
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_0 0xF082A0
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_1 0xF082A4
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_2 0xF082A8
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_3 0xF082AC
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO_4 0xF082B0
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_0 0xF082B4
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_1 0xF082B8
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_2 0xF082BC
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_3 0xF082C0
+
+#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI_4 0xF082C4
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_0 0xF082C8
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_1 0xF082CC
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_2 0xF082D0
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_3 0xF082D4
+
+#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET_4 0xF082D8
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xF082E0
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xF082E4
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xF082E8
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xF082EC
+
+#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xF082F0
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xF082F4
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xF082F8
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xF082FC
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xF08300
+
+#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xF08304
+
+#define mmTPC4_QM_CP_FENCE0_RDATA_0 0xF08308
+
+#define mmTPC4_QM_CP_FENCE0_RDATA_1 0xF0830C
+
+#define mmTPC4_QM_CP_FENCE0_RDATA_2 0xF08310
+
+#define mmTPC4_QM_CP_FENCE0_RDATA_3 0xF08314
+
+#define mmTPC4_QM_CP_FENCE0_RDATA_4 0xF08318
+
+#define mmTPC4_QM_CP_FENCE1_RDATA_0 0xF0831C
+
+#define mmTPC4_QM_CP_FENCE1_RDATA_1 0xF08320
+
+#define mmTPC4_QM_CP_FENCE1_RDATA_2 0xF08324
+
+#define mmTPC4_QM_CP_FENCE1_RDATA_3 0xF08328
+
+#define mmTPC4_QM_CP_FENCE1_RDATA_4 0xF0832C
+
+#define mmTPC4_QM_CP_FENCE2_RDATA_0 0xF08330
+
+#define mmTPC4_QM_CP_FENCE2_RDATA_1 0xF08334
+
+#define mmTPC4_QM_CP_FENCE2_RDATA_2 0xF08338
+
+#define mmTPC4_QM_CP_FENCE2_RDATA_3 0xF0833C
+
+#define mmTPC4_QM_CP_FENCE2_RDATA_4 0xF08340
+
+#define mmTPC4_QM_CP_FENCE3_RDATA_0 0xF08344
+
+#define mmTPC4_QM_CP_FENCE3_RDATA_1 0xF08348
+
+#define mmTPC4_QM_CP_FENCE3_RDATA_2 0xF0834C
+
+#define mmTPC4_QM_CP_FENCE3_RDATA_3 0xF08350
+
+#define mmTPC4_QM_CP_FENCE3_RDATA_4 0xF08354
+
+#define mmTPC4_QM_CP_FENCE0_CNT_0 0xF08358
+
+#define mmTPC4_QM_CP_FENCE0_CNT_1 0xF0835C
+
+#define mmTPC4_QM_CP_FENCE0_CNT_2 0xF08360
+
+#define mmTPC4_QM_CP_FENCE0_CNT_3 0xF08364
+
+#define mmTPC4_QM_CP_FENCE0_CNT_4 0xF08368
+
+#define mmTPC4_QM_CP_FENCE1_CNT_0 0xF0836C
+
+#define mmTPC4_QM_CP_FENCE1_CNT_1 0xF08370
+
+#define mmTPC4_QM_CP_FENCE1_CNT_2 0xF08374
+
+#define mmTPC4_QM_CP_FENCE1_CNT_3 0xF08378
+
+#define mmTPC4_QM_CP_FENCE1_CNT_4 0xF0837C
+
+#define mmTPC4_QM_CP_FENCE2_CNT_0 0xF08380
+
+#define mmTPC4_QM_CP_FENCE2_CNT_1 0xF08384
+
+#define mmTPC4_QM_CP_FENCE2_CNT_2 0xF08388
+
+#define mmTPC4_QM_CP_FENCE2_CNT_3 0xF0838C
+
+#define mmTPC4_QM_CP_FENCE2_CNT_4 0xF08390
+
+#define mmTPC4_QM_CP_FENCE3_CNT_0 0xF08394
+
+#define mmTPC4_QM_CP_FENCE3_CNT_1 0xF08398
+
+#define mmTPC4_QM_CP_FENCE3_CNT_2 0xF0839C
+
+#define mmTPC4_QM_CP_FENCE3_CNT_3 0xF083A0
+
+#define mmTPC4_QM_CP_FENCE3_CNT_4 0xF083A4
+
+#define mmTPC4_QM_CP_STS_0 0xF083A8
+
+#define mmTPC4_QM_CP_STS_1 0xF083AC
+
+#define mmTPC4_QM_CP_STS_2 0xF083B0
+
+#define mmTPC4_QM_CP_STS_3 0xF083B4
+
+#define mmTPC4_QM_CP_STS_4 0xF083B8
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO_0 0xF083BC
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO_1 0xF083C0
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO_2 0xF083C4
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO_3 0xF083C8
+
+#define mmTPC4_QM_CP_CURRENT_INST_LO_4 0xF083CC
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI_0 0xF083D0
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI_1 0xF083D4
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI_2 0xF083D8
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI_3 0xF083DC
+
+#define mmTPC4_QM_CP_CURRENT_INST_HI_4 0xF083E0
+
+#define mmTPC4_QM_CP_BARRIER_CFG_0 0xF083F4
+
+#define mmTPC4_QM_CP_BARRIER_CFG_1 0xF083F8
+
+#define mmTPC4_QM_CP_BARRIER_CFG_2 0xF083FC
+
+#define mmTPC4_QM_CP_BARRIER_CFG_3 0xF08400
+
+#define mmTPC4_QM_CP_BARRIER_CFG_4 0xF08404
+
+#define mmTPC4_QM_CP_DBG_0_0 0xF08408
+
+#define mmTPC4_QM_CP_DBG_0_1 0xF0840C
+
+#define mmTPC4_QM_CP_DBG_0_2 0xF08410
+
+#define mmTPC4_QM_CP_DBG_0_3 0xF08414
+
+#define mmTPC4_QM_CP_DBG_0_4 0xF08418
+
+#define mmTPC4_QM_CP_ARUSER_31_11_0 0xF0841C
+
+#define mmTPC4_QM_CP_ARUSER_31_11_1 0xF08420
+
+#define mmTPC4_QM_CP_ARUSER_31_11_2 0xF08424
+
+#define mmTPC4_QM_CP_ARUSER_31_11_3 0xF08428
+
+#define mmTPC4_QM_CP_ARUSER_31_11_4 0xF0842C
+
+#define mmTPC4_QM_CP_AWUSER_31_11_0 0xF08430
+
+#define mmTPC4_QM_CP_AWUSER_31_11_1 0xF08434
+
+#define mmTPC4_QM_CP_AWUSER_31_11_2 0xF08438
+
+#define mmTPC4_QM_CP_AWUSER_31_11_3 0xF0843C
+
+#define mmTPC4_QM_CP_AWUSER_31_11_4 0xF08440
+
+#define mmTPC4_QM_ARB_CFG_0 0xF08A00
+
+#define mmTPC4_QM_ARB_CHOISE_Q_PUSH 0xF08A04
+
+#define mmTPC4_QM_ARB_WRR_WEIGHT_0 0xF08A08
+
+#define mmTPC4_QM_ARB_WRR_WEIGHT_1 0xF08A0C
+
+#define mmTPC4_QM_ARB_WRR_WEIGHT_2 0xF08A10
+
+#define mmTPC4_QM_ARB_WRR_WEIGHT_3 0xF08A14
+
+#define mmTPC4_QM_ARB_CFG_1 0xF08A18
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_0 0xF08A20
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_1 0xF08A24
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_2 0xF08A28
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_3 0xF08A2C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_4 0xF08A30
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_5 0xF08A34
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_6 0xF08A38
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_7 0xF08A3C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_8 0xF08A40
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_9 0xF08A44
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_10 0xF08A48
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_11 0xF08A4C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_12 0xF08A50
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_13 0xF08A54
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_14 0xF08A58
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_15 0xF08A5C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_16 0xF08A60
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_17 0xF08A64
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_18 0xF08A68
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_19 0xF08A6C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_20 0xF08A70
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_21 0xF08A74
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_22 0xF08A78
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_23 0xF08A7C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_24 0xF08A80
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_25 0xF08A84
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_26 0xF08A88
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_27 0xF08A8C
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_28 0xF08A90
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_29 0xF08A94
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_30 0xF08A98
+
+#define mmTPC4_QM_ARB_MST_AVAIL_CRED_31 0xF08A9C
+
+#define mmTPC4_QM_ARB_MST_CRED_INC 0xF08AA0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xF08AA4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xF08AA8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xF08AAC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xF08AB0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xF08AB4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xF08AB8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xF08ABC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xF08AC0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xF08AC4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xF08AC8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xF08ACC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xF08AD0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xF08AD4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xF08AD8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xF08ADC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xF08AE0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xF08AE4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xF08AE8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xF08AEC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xF08AF0
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xF08AF4
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xF08AF8
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xF08AFC
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xF08B00
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xF08B04
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xF08B08
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xF08B0C
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xF08B10
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xF08B14
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xF08B18
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xF08B1C
+
+#define mmTPC4_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xF08B20
+
+#define mmTPC4_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xF08B28
+
+#define mmTPC4_QM_ARB_MST_SLAVE_EN 0xF08B2C
+
+#define mmTPC4_QM_ARB_MST_QUIET_PER 0xF08B34
+
+#define mmTPC4_QM_ARB_SLV_CHOISE_WDT 0xF08B38
+
+#define mmTPC4_QM_ARB_SLV_ID 0xF08B3C
+
+#define mmTPC4_QM_ARB_MSG_MAX_INFLIGHT 0xF08B44
+
+#define mmTPC4_QM_ARB_MSG_AWUSER_31_11 0xF08B48
+
+#define mmTPC4_QM_ARB_MSG_AWUSER_SEC_PROP 0xF08B4C
+
+#define mmTPC4_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xF08B50
+
+#define mmTPC4_QM_ARB_BASE_LO 0xF08B54
+
+#define mmTPC4_QM_ARB_BASE_HI 0xF08B58
+
+#define mmTPC4_QM_ARB_STATE_STS 0xF08B80
+
+#define mmTPC4_QM_ARB_CHOISE_FULLNESS_STS 0xF08B84
+
+#define mmTPC4_QM_ARB_MSG_STS 0xF08B88
+
+#define mmTPC4_QM_ARB_SLV_CHOISE_Q_HEAD 0xF08B8C
+
+#define mmTPC4_QM_ARB_ERR_CAUSE 0xF08B9C
+
+#define mmTPC4_QM_ARB_ERR_MSG_EN 0xF08BA0
+
+#define mmTPC4_QM_ARB_ERR_STS_DRP 0xF08BA8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_0 0xF08BB0
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_1 0xF08BB4
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_2 0xF08BB8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_3 0xF08BBC
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_4 0xF08BC0
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_5 0xF08BC4
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_6 0xF08BC8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_7 0xF08BCC
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_8 0xF08BD0
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_9 0xF08BD4
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_10 0xF08BD8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_11 0xF08BDC
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_12 0xF08BE0
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_13 0xF08BE4
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_14 0xF08BE8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_15 0xF08BEC
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_16 0xF08BF0
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_17 0xF08BF4
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_18 0xF08BF8
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_19 0xF08BFC
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_20 0xF08C00
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_21 0xF08C04
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_22 0xF08C08
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_23 0xF08C0C
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_24 0xF08C10
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_25 0xF08C14
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_26 0xF08C18
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_27 0xF08C1C
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_28 0xF08C20
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_29 0xF08C24
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_30 0xF08C28
+
+#define mmTPC4_QM_ARB_MST_CRED_STS_31 0xF08C2C
+
+#define mmTPC4_QM_CGM_CFG 0xF08C70
+
+#define mmTPC4_QM_CGM_STS 0xF08C74
+
+#define mmTPC4_QM_CGM_CFG1 0xF08C78
+
+#define mmTPC4_QM_LOCAL_RANGE_BASE 0xF08C80
+
+#define mmTPC4_QM_LOCAL_RANGE_SIZE 0xF08C84
+
+#define mmTPC4_QM_CSMR_STRICT_PRIO_CFG 0xF08C90
+
+#define mmTPC4_QM_HBW_RD_RATE_LIM_CFG_1 0xF08C94
+
+#define mmTPC4_QM_LBW_WR_RATE_LIM_CFG_0 0xF08C98
+
+#define mmTPC4_QM_LBW_WR_RATE_LIM_CFG_1 0xF08C9C
+
+#define mmTPC4_QM_HBW_RD_RATE_LIM_CFG_0 0xF08CA0
+
+#define mmTPC4_QM_GLBL_AXCACHE 0xF08CA4
+
+#define mmTPC4_QM_IND_GW_APB_CFG 0xF08CB0
+
+#define mmTPC4_QM_IND_GW_APB_WDATA 0xF08CB4
+
+#define mmTPC4_QM_IND_GW_APB_RDATA 0xF08CB8
+
+#define mmTPC4_QM_IND_GW_APB_STATUS 0xF08CBC
+
+#define mmTPC4_QM_GLBL_ERR_ADDR_LO 0xF08CD0
+
+#define mmTPC4_QM_GLBL_ERR_ADDR_HI 0xF08CD4
+
+#define mmTPC4_QM_GLBL_ERR_WDATA 0xF08CD8
+
+#define mmTPC4_QM_GLBL_MEM_INIT_BUSY 0xF08D00
+
+#endif /* ASIC_REG_TPC4_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h
new file mode 100644
index 000000000000..f428f891935a
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_CFG_REGS_H_
+#define ASIC_REG_TPC5_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF46400
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF46404
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF46408
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF4640C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF46410
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF46414
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF46418
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF4641C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF46420
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF46424
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF46428
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF4642C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF46430
+
+#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF46434
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF46438
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF4643C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF46440
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF46444
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF46448
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF4644C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF46450
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF46454
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF46458
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF4645C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF46460
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF46464
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF46468
+
+#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF4646C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF46470
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF46474
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF46478
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF4647C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF46480
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF46484
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF46488
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF4648C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF46490
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF46494
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF46498
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF4649C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF464A0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF464A4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF464A8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF464AC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF464B0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF464B4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF464B8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF464BC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF464C0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF464C4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF464C8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF464CC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF464D0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF464D4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF464D8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF464DC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF464E0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF464E4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF464E8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF464EC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF464F0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF464F4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF464F8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF464FC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF46500
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF46504
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF46508
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF4650C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF46510
+
+#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF46514
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF46518
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF4651C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF46520
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF46524
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF46528
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF4652C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF46530
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF46534
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF46538
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF4653C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF46540
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF46544
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF46548
+
+#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF4654C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF46550
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF46554
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF46558
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF4655C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF46560
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF46564
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF46568
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF4656C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF46570
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF46574
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF46578
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF4657C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF46580
+
+#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF46584
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF46588
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF4658C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF46590
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF46594
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF46598
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF4659C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF465A0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF465A4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF465A8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF465AC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF465B0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF465B4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF465B8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF465BC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xF465C0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xF465C4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xF465C8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xF465CC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xF465D0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xF465D4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xF465D8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xF465DC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xF465E0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xF465E4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xF465E8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xF465EC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xF465F0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xF465F4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xF465F8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xF465FC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xF46600
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xF46604
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xF46608
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xF4660C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xF46610
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xF46614
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xF46618
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xF4661C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xF46620
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xF46624
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xF46628
+
+#define mmTPC5_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xF4662C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xF46630
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xF46634
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xF46638
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xF4663C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xF46640
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xF46644
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xF46648
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xF4664C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xF46650
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xF46654
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xF46658
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xF4665C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xF46660
+
+#define mmTPC5_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xF46664
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xF46668
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xF4666C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xF46670
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xF46674
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xF46678
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xF4667C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xF46680
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xF46684
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xF46688
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xF4668C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xF46690
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xF46694
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xF46698
+
+#define mmTPC5_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xF4669C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xF466A0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xF466A4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xF466A8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xF466AC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xF466B0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xF466B4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xF466B8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xF466BC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xF466C0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xF466C4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xF466C8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xF466CC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xF466D0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xF466D4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xF466D8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xF466DC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xF466E0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xF466E4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xF466E8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xF466EC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xF466F0
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xF466F4
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xF466F8
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xF466FC
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xF46700
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xF46704
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xF46708
+
+#define mmTPC5_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xF4670C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xF46710
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xF46714
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xF46718
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xF4671C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xF46720
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xF46724
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xF46728
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xF4672C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xF46730
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xF46734
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xF46738
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xF4673C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xF46740
+
+#define mmTPC5_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xF46744
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xF46748
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xF4674C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xF46750
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xF46754
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xF46758
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xF4675C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xF46760
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xF46764
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xF46768
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xF4676C
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xF46770
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xF46774
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xF46778
+
+#define mmTPC5_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xF4677C
+
+#define mmTPC5_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF46780
+
+#define mmTPC5_CFG_KERNEL_SYNC_OBJECT_ADDR 0xF46784
+
+#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF46788
+
+#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF4678C
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_0 0xF46790
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_0 0xF46794
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_1 0xF46798
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_1 0xF4679C
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_2 0xF467A0
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_2 0xF467A4
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_3 0xF467A8
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_3 0xF467AC
+
+#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_4 0xF467B0
+
+#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_4 0xF467B4
+
+#define mmTPC5_CFG_KERNEL_KERNEL_CONFIG 0xF467B8
+
+#define mmTPC5_CFG_KERNEL_KERNEL_ID 0xF467BC
+
+#define mmTPC5_CFG_KERNEL_SRF_0 0xF467C0
+
+#define mmTPC5_CFG_KERNEL_SRF_1 0xF467C4
+
+#define mmTPC5_CFG_KERNEL_SRF_2 0xF467C8
+
+#define mmTPC5_CFG_KERNEL_SRF_3 0xF467CC
+
+#define mmTPC5_CFG_KERNEL_SRF_4 0xF467D0
+
+#define mmTPC5_CFG_KERNEL_SRF_5 0xF467D4
+
+#define mmTPC5_CFG_KERNEL_SRF_6 0xF467D8
+
+#define mmTPC5_CFG_KERNEL_SRF_7 0xF467DC
+
+#define mmTPC5_CFG_KERNEL_SRF_8 0xF467E0
+
+#define mmTPC5_CFG_KERNEL_SRF_9 0xF467E4
+
+#define mmTPC5_CFG_KERNEL_SRF_10 0xF467E8
+
+#define mmTPC5_CFG_KERNEL_SRF_11 0xF467EC
+
+#define mmTPC5_CFG_KERNEL_SRF_12 0xF467F0
+
+#define mmTPC5_CFG_KERNEL_SRF_13 0xF467F4
+
+#define mmTPC5_CFG_KERNEL_SRF_14 0xF467F8
+
+#define mmTPC5_CFG_KERNEL_SRF_15 0xF467FC
+
+#define mmTPC5_CFG_KERNEL_SRF_16 0xF46800
+
+#define mmTPC5_CFG_KERNEL_SRF_17 0xF46804
+
+#define mmTPC5_CFG_KERNEL_SRF_18 0xF46808
+
+#define mmTPC5_CFG_KERNEL_SRF_19 0xF4680C
+
+#define mmTPC5_CFG_KERNEL_SRF_20 0xF46810
+
+#define mmTPC5_CFG_KERNEL_SRF_21 0xF46814
+
+#define mmTPC5_CFG_KERNEL_SRF_22 0xF46818
+
+#define mmTPC5_CFG_KERNEL_SRF_23 0xF4681C
+
+#define mmTPC5_CFG_KERNEL_SRF_24 0xF46820
+
+#define mmTPC5_CFG_KERNEL_SRF_25 0xF46824
+
+#define mmTPC5_CFG_KERNEL_SRF_26 0xF46828
+
+#define mmTPC5_CFG_KERNEL_SRF_27 0xF4682C
+
+#define mmTPC5_CFG_KERNEL_SRF_28 0xF46830
+
+#define mmTPC5_CFG_KERNEL_SRF_29 0xF46834
+
+#define mmTPC5_CFG_KERNEL_SRF_30 0xF46838
+
+#define mmTPC5_CFG_KERNEL_SRF_31 0xF4683C
+
+#define mmTPC5_CFG_ROUND_CSR 0xF468FC
+
+#define mmTPC5_CFG_PROT 0xF46900
+
+#define mmTPC5_CFG_SEMAPHORE 0xF46908
+
+#define mmTPC5_CFG_VFLAGS 0xF4690C
+
+#define mmTPC5_CFG_SFLAGS 0xF46910
+
+#define mmTPC5_CFG_LFSR_POLYNOM 0xF46918
+
+#define mmTPC5_CFG_STATUS 0xF4691C
+
+#define mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH 0xF46920
+
+#define mmTPC5_CFG_CFG_SUBTRACT_VALUE 0xF46924
+
+#define mmTPC5_CFG_SM_BASE_ADDRESS_HIGH 0xF4692C
+
+#define mmTPC5_CFG_TPC_CMD 0xF46930
+
+#define mmTPC5_CFG_TPC_EXECUTE 0xF46938
+
+#define mmTPC5_CFG_TPC_STALL 0xF4693C
+
+#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_LOW 0xF46940
+
+#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF46944
+
+#define mmTPC5_CFG_RD_RATE_LIMIT 0xF46948
+
+#define mmTPC5_CFG_WR_RATE_LIMIT 0xF46950
+
+#define mmTPC5_CFG_MSS_CONFIG 0xF46954
+
+#define mmTPC5_CFG_TPC_INTR_CAUSE 0xF46958
+
+#define mmTPC5_CFG_TPC_INTR_MASK 0xF4695C
+
+#define mmTPC5_CFG_WQ_CREDITS 0xF46960
+
+#define mmTPC5_CFG_ARUSER_LO 0xF46964
+
+#define mmTPC5_CFG_ARUSER_HI 0xF46968
+
+#define mmTPC5_CFG_AWUSER_LO 0xF4696C
+
+#define mmTPC5_CFG_AWUSER_HI 0xF46970
+
+#define mmTPC5_CFG_OPCODE_EXEC 0xF46974
+
+#define mmTPC5_CFG_LUT_FUNC32_BASE_ADDR_LO 0xF46978
+
+#define mmTPC5_CFG_LUT_FUNC32_BASE_ADDR_HI 0xF4697C
+
+#define mmTPC5_CFG_LUT_FUNC64_BASE_ADDR_LO 0xF46980
+
+#define mmTPC5_CFG_LUT_FUNC64_BASE_ADDR_HI 0xF46984
+
+#define mmTPC5_CFG_LUT_FUNC128_BASE_ADDR_LO 0xF46988
+
+#define mmTPC5_CFG_LUT_FUNC128_BASE_ADDR_HI 0xF4698C
+
+#define mmTPC5_CFG_LUT_FUNC256_BASE_ADDR_LO 0xF46990
+
+#define mmTPC5_CFG_LUT_FUNC256_BASE_ADDR_HI 0xF46994
+
+#define mmTPC5_CFG_TSB_CFG_MAX_SIZE 0xF46998
+
+#define mmTPC5_CFG_TSB_CFG 0xF4699C
+
+#define mmTPC5_CFG_DBGMEM_ADD 0xF469A0
+
+#define mmTPC5_CFG_DBGMEM_DATA_WR 0xF469A4
+
+#define mmTPC5_CFG_DBGMEM_DATA_RD 0xF469A8
+
+#define mmTPC5_CFG_DBGMEM_CTRL 0xF469AC
+
+#define mmTPC5_CFG_DBGMEM_RC 0xF469B0
+
+#define mmTPC5_CFG_TSB_INFLIGHT_CNTR 0xF469B4
+
+#define mmTPC5_CFG_WQ_INFLIGHT_CNTR 0xF469B8
+
+#define mmTPC5_CFG_WQ_LBW_TOTAL_CNTR 0xF469BC
+
+#define mmTPC5_CFG_WQ_HBW_TOTAL_CNTR 0xF469C0
+
+#define mmTPC5_CFG_IRQ_OCCOUPY_CNTR 0xF469C4
+
+#define mmTPC5_CFG_FUNC_MBIST_CNTRL 0xF469D0
+
+#define mmTPC5_CFG_FUNC_MBIST_PAT 0xF469D4
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_0 0xF469D8
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_1 0xF469DC
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_2 0xF469E0
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_3 0xF469E4
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_4 0xF469E8
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_5 0xF469EC
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_6 0xF469F0
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_7 0xF469F4
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_8 0xF469F8
+
+#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF469FC
+
+#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF46A00
+
+#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF46A04
+
+#define mmTPC5_CFG_QM_TENSOR_0_PADDING_VALUE 0xF46A08
+
+#define mmTPC5_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF46A0C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF46A10
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF46A14
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF46A18
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF46A1C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF46A20
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF46A24
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF46A28
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF46A2C
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF46A30
+
+#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF46A34
+
+#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF46A38
+
+#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF46A3C
+
+#define mmTPC5_CFG_QM_TENSOR_1_PADDING_VALUE 0xF46A40
+
+#define mmTPC5_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF46A44
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF46A48
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF46A4C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF46A50
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF46A54
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF46A58
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF46A5C
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF46A60
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF46A64
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF46A68
+
+#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF46A6C
+
+#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF46A70
+
+#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF46A74
+
+#define mmTPC5_CFG_QM_TENSOR_2_PADDING_VALUE 0xF46A78
+
+#define mmTPC5_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF46A7C
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF46A80
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF46A84
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF46A88
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF46A8C
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF46A90
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF46A94
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF46A98
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF46A9C
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF46AA0
+
+#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF46AA4
+
+#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF46AA8
+
+#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF46AAC
+
+#define mmTPC5_CFG_QM_TENSOR_3_PADDING_VALUE 0xF46AB0
+
+#define mmTPC5_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF46AB4
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF46AB8
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF46ABC
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF46AC0
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF46AC4
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF46AC8
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF46ACC
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF46AD0
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF46AD4
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF46AD8
+
+#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF46ADC
+
+#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF46AE0
+
+#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF46AE4
+
+#define mmTPC5_CFG_QM_TENSOR_4_PADDING_VALUE 0xF46AE8
+
+#define mmTPC5_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF46AEC
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF46AF0
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF46AF4
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF46AF8
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF46AFC
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF46B00
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF46B04
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF46B08
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF46B0C
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF46B10
+
+#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF46B14
+
+#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF46B18
+
+#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF46B1C
+
+#define mmTPC5_CFG_QM_TENSOR_5_PADDING_VALUE 0xF46B20
+
+#define mmTPC5_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF46B24
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF46B28
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF46B2C
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF46B30
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF46B34
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF46B38
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF46B3C
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF46B40
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF46B44
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF46B48
+
+#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF46B4C
+
+#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF46B50
+
+#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF46B54
+
+#define mmTPC5_CFG_QM_TENSOR_6_PADDING_VALUE 0xF46B58
+
+#define mmTPC5_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF46B5C
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF46B60
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF46B64
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF46B68
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF46B6C
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF46B70
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF46B74
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF46B78
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF46B7C
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF46B80
+
+#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF46B84
+
+#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF46B88
+
+#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF46B8C
+
+#define mmTPC5_CFG_QM_TENSOR_7_PADDING_VALUE 0xF46B90
+
+#define mmTPC5_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF46B94
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF46B98
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF46B9C
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF46BA0
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF46BA4
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF46BA8
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF46BAC
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF46BB0
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF46BB4
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF46BB8
+
+#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF46BBC
+
+#define mmTPC5_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xF46BC0
+
+#define mmTPC5_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xF46BC4
+
+#define mmTPC5_CFG_QM_TENSOR_8_PADDING_VALUE 0xF46BC8
+
+#define mmTPC5_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xF46BCC
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_0_SIZE 0xF46BD0
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xF46BD4
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_1_SIZE 0xF46BD8
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xF46BDC
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_2_SIZE 0xF46BE0
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xF46BE4
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_3_SIZE 0xF46BE8
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xF46BEC
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_4_SIZE 0xF46BF0
+
+#define mmTPC5_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xF46BF4
+
+#define mmTPC5_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xF46BF8
+
+#define mmTPC5_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xF46BFC
+
+#define mmTPC5_CFG_QM_TENSOR_9_PADDING_VALUE 0xF46C00
+
+#define mmTPC5_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xF46C04
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_0_SIZE 0xF46C08
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xF46C0C
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_1_SIZE 0xF46C10
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xF46C14
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_2_SIZE 0xF46C18
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xF46C1C
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_3_SIZE 0xF46C20
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xF46C24
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_4_SIZE 0xF46C28
+
+#define mmTPC5_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xF46C2C
+
+#define mmTPC5_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xF46C30
+
+#define mmTPC5_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xF46C34
+
+#define mmTPC5_CFG_QM_TENSOR_10_PADDING_VALUE 0xF46C38
+
+#define mmTPC5_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xF46C3C
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_0_SIZE 0xF46C40
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xF46C44
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_1_SIZE 0xF46C48
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xF46C4C
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_2_SIZE 0xF46C50
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xF46C54
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_3_SIZE 0xF46C58
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xF46C5C
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_4_SIZE 0xF46C60
+
+#define mmTPC5_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xF46C64
+
+#define mmTPC5_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xF46C68
+
+#define mmTPC5_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xF46C6C
+
+#define mmTPC5_CFG_QM_TENSOR_11_PADDING_VALUE 0xF46C70
+
+#define mmTPC5_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xF46C74
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_0_SIZE 0xF46C78
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xF46C7C
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_1_SIZE 0xF46C80
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xF46C84
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_2_SIZE 0xF46C88
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xF46C8C
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_3_SIZE 0xF46C90
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xF46C94
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_4_SIZE 0xF46C98
+
+#define mmTPC5_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xF46C9C
+
+#define mmTPC5_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xF46CA0
+
+#define mmTPC5_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xF46CA4
+
+#define mmTPC5_CFG_QM_TENSOR_12_PADDING_VALUE 0xF46CA8
+
+#define mmTPC5_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xF46CAC
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_0_SIZE 0xF46CB0
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xF46CB4
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_1_SIZE 0xF46CB8
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xF46CBC
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_2_SIZE 0xF46CC0
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xF46CC4
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_3_SIZE 0xF46CC8
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xF46CCC
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_4_SIZE 0xF46CD0
+
+#define mmTPC5_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xF46CD4
+
+#define mmTPC5_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xF46CD8
+
+#define mmTPC5_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xF46CDC
+
+#define mmTPC5_CFG_QM_TENSOR_13_PADDING_VALUE 0xF46CE0
+
+#define mmTPC5_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xF46CE4
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_0_SIZE 0xF46CE8
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xF46CEC
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_1_SIZE 0xF46CF0
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xF46CF4
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_2_SIZE 0xF46CF8
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xF46CFC
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_3_SIZE 0xF46D00
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xF46D04
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_4_SIZE 0xF46D08
+
+#define mmTPC5_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xF46D0C
+
+#define mmTPC5_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xF46D10
+
+#define mmTPC5_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xF46D14
+
+#define mmTPC5_CFG_QM_TENSOR_14_PADDING_VALUE 0xF46D18
+
+#define mmTPC5_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xF46D1C
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_0_SIZE 0xF46D20
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xF46D24
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_1_SIZE 0xF46D28
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xF46D2C
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_2_SIZE 0xF46D30
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xF46D34
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_3_SIZE 0xF46D38
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xF46D3C
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_4_SIZE 0xF46D40
+
+#define mmTPC5_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xF46D44
+
+#define mmTPC5_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xF46D48
+
+#define mmTPC5_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xF46D4C
+
+#define mmTPC5_CFG_QM_TENSOR_15_PADDING_VALUE 0xF46D50
+
+#define mmTPC5_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xF46D54
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_0_SIZE 0xF46D58
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xF46D5C
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_1_SIZE 0xF46D60
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xF46D64
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_2_SIZE 0xF46D68
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xF46D6C
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_3_SIZE 0xF46D70
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xF46D74
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_4_SIZE 0xF46D78
+
+#define mmTPC5_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xF46D7C
+
+#define mmTPC5_CFG_QM_SYNC_OBJECT_MESSAGE 0xF46D80
+
+#define mmTPC5_CFG_QM_SYNC_OBJECT_ADDR 0xF46D84
+
+#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF46D88
+
+#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF46D8C
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_0 0xF46D90
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_0 0xF46D94
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_1 0xF46D98
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_1 0xF46D9C
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_2 0xF46DA0
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_2 0xF46DA4
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_3 0xF46DA8
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_3 0xF46DAC
+
+#define mmTPC5_CFG_QM_TID_BASE_DIM_4 0xF46DB0
+
+#define mmTPC5_CFG_QM_TID_SIZE_DIM_4 0xF46DB4
+
+#define mmTPC5_CFG_QM_KERNEL_CONFIG 0xF46DB8
+
+#define mmTPC5_CFG_QM_KERNEL_ID 0xF46DBC
+
+#define mmTPC5_CFG_QM_SRF_0 0xF46DC0
+
+#define mmTPC5_CFG_QM_SRF_1 0xF46DC4
+
+#define mmTPC5_CFG_QM_SRF_2 0xF46DC8
+
+#define mmTPC5_CFG_QM_SRF_3 0xF46DCC
+
+#define mmTPC5_CFG_QM_SRF_4 0xF46DD0
+
+#define mmTPC5_CFG_QM_SRF_5 0xF46DD4
+
+#define mmTPC5_CFG_QM_SRF_6 0xF46DD8
+
+#define mmTPC5_CFG_QM_SRF_7 0xF46DDC
+
+#define mmTPC5_CFG_QM_SRF_8 0xF46DE0
+
+#define mmTPC5_CFG_QM_SRF_9 0xF46DE4
+
+#define mmTPC5_CFG_QM_SRF_10 0xF46DE8
+
+#define mmTPC5_CFG_QM_SRF_11 0xF46DEC
+
+#define mmTPC5_CFG_QM_SRF_12 0xF46DF0
+
+#define mmTPC5_CFG_QM_SRF_13 0xF46DF4
+
+#define mmTPC5_CFG_QM_SRF_14 0xF46DF8
+
+#define mmTPC5_CFG_QM_SRF_15 0xF46DFC
+
+#define mmTPC5_CFG_QM_SRF_16 0xF46E00
+
+#define mmTPC5_CFG_QM_SRF_17 0xF46E04
+
+#define mmTPC5_CFG_QM_SRF_18 0xF46E08
+
+#define mmTPC5_CFG_QM_SRF_19 0xF46E0C
+
+#define mmTPC5_CFG_QM_SRF_20 0xF46E10
+
+#define mmTPC5_CFG_QM_SRF_21 0xF46E14
+
+#define mmTPC5_CFG_QM_SRF_22 0xF46E18
+
+#define mmTPC5_CFG_QM_SRF_23 0xF46E1C
+
+#define mmTPC5_CFG_QM_SRF_24 0xF46E20
+
+#define mmTPC5_CFG_QM_SRF_25 0xF46E24
+
+#define mmTPC5_CFG_QM_SRF_26 0xF46E28
+
+#define mmTPC5_CFG_QM_SRF_27 0xF46E2C
+
+#define mmTPC5_CFG_QM_SRF_28 0xF46E30
+
+#define mmTPC5_CFG_QM_SRF_29 0xF46E34
+
+#define mmTPC5_CFG_QM_SRF_30 0xF46E38
+
+#define mmTPC5_CFG_QM_SRF_31 0xF46E3C
+
+#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h
new file mode 100644
index 000000000000..cd3a810ff4c4
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC5_QM_REGS_H_
+#define ASIC_REG_TPC5_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC5_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC5_QM_GLBL_CFG0 0xF48000
+
+#define mmTPC5_QM_GLBL_CFG1 0xF48004
+
+#define mmTPC5_QM_GLBL_PROT 0xF48008
+
+#define mmTPC5_QM_GLBL_ERR_CFG 0xF4800C
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS_0 0xF48010
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS_1 0xF48014
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS_2 0xF48018
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS_3 0xF4801C
+
+#define mmTPC5_QM_GLBL_SECURE_PROPS_4 0xF48020
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_0 0xF48024
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_1 0xF48028
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_2 0xF4802C
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_3 0xF48030
+
+#define mmTPC5_QM_GLBL_NON_SECURE_PROPS_4 0xF48034
+
+#define mmTPC5_QM_GLBL_STS0 0xF48038
+
+#define mmTPC5_QM_GLBL_STS1_0 0xF48040
+
+#define mmTPC5_QM_GLBL_STS1_1 0xF48044
+
+#define mmTPC5_QM_GLBL_STS1_2 0xF48048
+
+#define mmTPC5_QM_GLBL_STS1_3 0xF4804C
+
+#define mmTPC5_QM_GLBL_STS1_4 0xF48050
+
+#define mmTPC5_QM_GLBL_MSG_EN_0 0xF48054
+
+#define mmTPC5_QM_GLBL_MSG_EN_1 0xF48058
+
+#define mmTPC5_QM_GLBL_MSG_EN_2 0xF4805C
+
+#define mmTPC5_QM_GLBL_MSG_EN_3 0xF48060
+
+#define mmTPC5_QM_GLBL_MSG_EN_4 0xF48068
+
+#define mmTPC5_QM_PQ_BASE_LO_0 0xF48070
+
+#define mmTPC5_QM_PQ_BASE_LO_1 0xF48074
+
+#define mmTPC5_QM_PQ_BASE_LO_2 0xF48078
+
+#define mmTPC5_QM_PQ_BASE_LO_3 0xF4807C
+
+#define mmTPC5_QM_PQ_BASE_HI_0 0xF48080
+
+#define mmTPC5_QM_PQ_BASE_HI_1 0xF48084
+
+#define mmTPC5_QM_PQ_BASE_HI_2 0xF48088
+
+#define mmTPC5_QM_PQ_BASE_HI_3 0xF4808C
+
+#define mmTPC5_QM_PQ_SIZE_0 0xF48090
+
+#define mmTPC5_QM_PQ_SIZE_1 0xF48094
+
+#define mmTPC5_QM_PQ_SIZE_2 0xF48098
+
+#define mmTPC5_QM_PQ_SIZE_3 0xF4809C
+
+#define mmTPC5_QM_PQ_PI_0 0xF480A0
+
+#define mmTPC5_QM_PQ_PI_1 0xF480A4
+
+#define mmTPC5_QM_PQ_PI_2 0xF480A8
+
+#define mmTPC5_QM_PQ_PI_3 0xF480AC
+
+#define mmTPC5_QM_PQ_CI_0 0xF480B0
+
+#define mmTPC5_QM_PQ_CI_1 0xF480B4
+
+#define mmTPC5_QM_PQ_CI_2 0xF480B8
+
+#define mmTPC5_QM_PQ_CI_3 0xF480BC
+
+#define mmTPC5_QM_PQ_CFG0_0 0xF480C0
+
+#define mmTPC5_QM_PQ_CFG0_1 0xF480C4
+
+#define mmTPC5_QM_PQ_CFG0_2 0xF480C8
+
+#define mmTPC5_QM_PQ_CFG0_3 0xF480CC
+
+#define mmTPC5_QM_PQ_CFG1_0 0xF480D0
+
+#define mmTPC5_QM_PQ_CFG1_1 0xF480D4
+
+#define mmTPC5_QM_PQ_CFG1_2 0xF480D8
+
+#define mmTPC5_QM_PQ_CFG1_3 0xF480DC
+
+#define mmTPC5_QM_PQ_ARUSER_31_11_0 0xF480E0
+
+#define mmTPC5_QM_PQ_ARUSER_31_11_1 0xF480E4
+
+#define mmTPC5_QM_PQ_ARUSER_31_11_2 0xF480E8
+
+#define mmTPC5_QM_PQ_ARUSER_31_11_3 0xF480EC
+
+#define mmTPC5_QM_PQ_STS0_0 0xF480F0
+
+#define mmTPC5_QM_PQ_STS0_1 0xF480F4
+
+#define mmTPC5_QM_PQ_STS0_2 0xF480F8
+
+#define mmTPC5_QM_PQ_STS0_3 0xF480FC
+
+#define mmTPC5_QM_PQ_STS1_0 0xF48100
+
+#define mmTPC5_QM_PQ_STS1_1 0xF48104
+
+#define mmTPC5_QM_PQ_STS1_2 0xF48108
+
+#define mmTPC5_QM_PQ_STS1_3 0xF4810C
+
+#define mmTPC5_QM_CQ_CFG0_0 0xF48110
+
+#define mmTPC5_QM_CQ_CFG0_1 0xF48114
+
+#define mmTPC5_QM_CQ_CFG0_2 0xF48118
+
+#define mmTPC5_QM_CQ_CFG0_3 0xF4811C
+
+#define mmTPC5_QM_CQ_CFG0_4 0xF48120
+
+#define mmTPC5_QM_CQ_CFG1_0 0xF48124
+
+#define mmTPC5_QM_CQ_CFG1_1 0xF48128
+
+#define mmTPC5_QM_CQ_CFG1_2 0xF4812C
+
+#define mmTPC5_QM_CQ_CFG1_3 0xF48130
+
+#define mmTPC5_QM_CQ_CFG1_4 0xF48134
+
+#define mmTPC5_QM_CQ_ARUSER_31_11_0 0xF48138
+
+#define mmTPC5_QM_CQ_ARUSER_31_11_1 0xF4813C
+
+#define mmTPC5_QM_CQ_ARUSER_31_11_2 0xF48140
+
+#define mmTPC5_QM_CQ_ARUSER_31_11_3 0xF48144
+
+#define mmTPC5_QM_CQ_ARUSER_31_11_4 0xF48148
+
+#define mmTPC5_QM_CQ_STS0_0 0xF4814C
+
+#define mmTPC5_QM_CQ_STS0_1 0xF48150
+
+#define mmTPC5_QM_CQ_STS0_2 0xF48154
+
+#define mmTPC5_QM_CQ_STS0_3 0xF48158
+
+#define mmTPC5_QM_CQ_STS0_4 0xF4815C
+
+#define mmTPC5_QM_CQ_STS1_0 0xF48160
+
+#define mmTPC5_QM_CQ_STS1_1 0xF48164
+
+#define mmTPC5_QM_CQ_STS1_2 0xF48168
+
+#define mmTPC5_QM_CQ_STS1_3 0xF4816C
+
+#define mmTPC5_QM_CQ_STS1_4 0xF48170
+
+#define mmTPC5_QM_CQ_PTR_LO_0 0xF48174
+
+#define mmTPC5_QM_CQ_PTR_HI_0 0xF48178
+
+#define mmTPC5_QM_CQ_TSIZE_0 0xF4817C
+
+#define mmTPC5_QM_CQ_CTL_0 0xF48180
+
+#define mmTPC5_QM_CQ_PTR_LO_1 0xF48184
+
+#define mmTPC5_QM_CQ_PTR_HI_1 0xF48188
+
+#define mmTPC5_QM_CQ_TSIZE_1 0xF4818C
+
+#define mmTPC5_QM_CQ_CTL_1 0xF48190
+
+#define mmTPC5_QM_CQ_PTR_LO_2 0xF48194
+
+#define mmTPC5_QM_CQ_PTR_HI_2 0xF48198
+
+#define mmTPC5_QM_CQ_TSIZE_2 0xF4819C
+
+#define mmTPC5_QM_CQ_CTL_2 0xF481A0
+
+#define mmTPC5_QM_CQ_PTR_LO_3 0xF481A4
+
+#define mmTPC5_QM_CQ_PTR_HI_3 0xF481A8
+
+#define mmTPC5_QM_CQ_TSIZE_3 0xF481AC
+
+#define mmTPC5_QM_CQ_CTL_3 0xF481B0
+
+#define mmTPC5_QM_CQ_PTR_LO_4 0xF481B4
+
+#define mmTPC5_QM_CQ_PTR_HI_4 0xF481B8
+
+#define mmTPC5_QM_CQ_TSIZE_4 0xF481BC
+
+#define mmTPC5_QM_CQ_CTL_4 0xF481C0
+
+#define mmTPC5_QM_CQ_PTR_LO_STS_0 0xF481C4
+
+#define mmTPC5_QM_CQ_PTR_LO_STS_1 0xF481C8
+
+#define mmTPC5_QM_CQ_PTR_LO_STS_2 0xF481CC
+
+#define mmTPC5_QM_CQ_PTR_LO_STS_3 0xF481D0
+
+#define mmTPC5_QM_CQ_PTR_LO_STS_4 0xF481D4
+
+#define mmTPC5_QM_CQ_PTR_HI_STS_0 0xF481D8
+
+#define mmTPC5_QM_CQ_PTR_HI_STS_1 0xF481DC
+
+#define mmTPC5_QM_CQ_PTR_HI_STS_2 0xF481E0
+
+#define mmTPC5_QM_CQ_PTR_HI_STS_3 0xF481E4
+
+#define mmTPC5_QM_CQ_PTR_HI_STS_4 0xF481E8
+
+#define mmTPC5_QM_CQ_TSIZE_STS_0 0xF481EC
+
+#define mmTPC5_QM_CQ_TSIZE_STS_1 0xF481F0
+
+#define mmTPC5_QM_CQ_TSIZE_STS_2 0xF481F4
+
+#define mmTPC5_QM_CQ_TSIZE_STS_3 0xF481F8
+
+#define mmTPC5_QM_CQ_TSIZE_STS_4 0xF481FC
+
+#define mmTPC5_QM_CQ_CTL_STS_0 0xF48200
+
+#define mmTPC5_QM_CQ_CTL_STS_1 0xF48204
+
+#define mmTPC5_QM_CQ_CTL_STS_2 0xF48208
+
+#define mmTPC5_QM_CQ_CTL_STS_3 0xF4820C
+
+#define mmTPC5_QM_CQ_CTL_STS_4 0xF48210
+
+#define mmTPC5_QM_CQ_IFIFO_CNT_0 0xF48214
+
+#define mmTPC5_QM_CQ_IFIFO_CNT_1 0xF48218
+
+#define mmTPC5_QM_CQ_IFIFO_CNT_2 0xF4821C
+
+#define mmTPC5_QM_CQ_IFIFO_CNT_3 0xF48220
+
+#define mmTPC5_QM_CQ_IFIFO_CNT_4 0xF48224
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_0 0xF48228
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_1 0xF4822C
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_2 0xF48230
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_3 0xF48234
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO_4 0xF48238
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_0 0xF4823C
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_1 0xF48240
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_2 0xF48244
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_3 0xF48248
+
+#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI_4 0xF4824C
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_0 0xF48250
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_1 0xF48254
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_2 0xF48258
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_3 0xF4825C
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO_4 0xF48260
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_0 0xF48264
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_1 0xF48268
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_2 0xF4826C
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_3 0xF48270
+
+#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI_4 0xF48274
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_0 0xF48278
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_1 0xF4827C
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_2 0xF48280
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_3 0xF48284
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO_4 0xF48288
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_0 0xF4828C
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_1 0xF48290
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_2 0xF48294
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_3 0xF48298
+
+#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI_4 0xF4829C
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_0 0xF482A0
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_1 0xF482A4
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_2 0xF482A8
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_3 0xF482AC
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO_4 0xF482B0
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_0 0xF482B4
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_1 0xF482B8
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_2 0xF482BC
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_3 0xF482C0
+
+#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI_4 0xF482C4
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_0 0xF482C8
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_1 0xF482CC
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_2 0xF482D0
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_3 0xF482D4
+
+#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET_4 0xF482D8
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xF482E0
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xF482E4
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xF482E8
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xF482EC
+
+#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xF482F0
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xF482F4
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xF482F8
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xF482FC
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xF48300
+
+#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xF48304
+
+#define mmTPC5_QM_CP_FENCE0_RDATA_0 0xF48308
+
+#define mmTPC5_QM_CP_FENCE0_RDATA_1 0xF4830C
+
+#define mmTPC5_QM_CP_FENCE0_RDATA_2 0xF48310
+
+#define mmTPC5_QM_CP_FENCE0_RDATA_3 0xF48314
+
+#define mmTPC5_QM_CP_FENCE0_RDATA_4 0xF48318
+
+#define mmTPC5_QM_CP_FENCE1_RDATA_0 0xF4831C
+
+#define mmTPC5_QM_CP_FENCE1_RDATA_1 0xF48320
+
+#define mmTPC5_QM_CP_FENCE1_RDATA_2 0xF48324
+
+#define mmTPC5_QM_CP_FENCE1_RDATA_3 0xF48328
+
+#define mmTPC5_QM_CP_FENCE1_RDATA_4 0xF4832C
+
+#define mmTPC5_QM_CP_FENCE2_RDATA_0 0xF48330
+
+#define mmTPC5_QM_CP_FENCE2_RDATA_1 0xF48334
+
+#define mmTPC5_QM_CP_FENCE2_RDATA_2 0xF48338
+
+#define mmTPC5_QM_CP_FENCE2_RDATA_3 0xF4833C
+
+#define mmTPC5_QM_CP_FENCE2_RDATA_4 0xF48340
+
+#define mmTPC5_QM_CP_FENCE3_RDATA_0 0xF48344
+
+#define mmTPC5_QM_CP_FENCE3_RDATA_1 0xF48348
+
+#define mmTPC5_QM_CP_FENCE3_RDATA_2 0xF4834C
+
+#define mmTPC5_QM_CP_FENCE3_RDATA_3 0xF48350
+
+#define mmTPC5_QM_CP_FENCE3_RDATA_4 0xF48354
+
+#define mmTPC5_QM_CP_FENCE0_CNT_0 0xF48358
+
+#define mmTPC5_QM_CP_FENCE0_CNT_1 0xF4835C
+
+#define mmTPC5_QM_CP_FENCE0_CNT_2 0xF48360
+
+#define mmTPC5_QM_CP_FENCE0_CNT_3 0xF48364
+
+#define mmTPC5_QM_CP_FENCE0_CNT_4 0xF48368
+
+#define mmTPC5_QM_CP_FENCE1_CNT_0 0xF4836C
+
+#define mmTPC5_QM_CP_FENCE1_CNT_1 0xF48370
+
+#define mmTPC5_QM_CP_FENCE1_CNT_2 0xF48374
+
+#define mmTPC5_QM_CP_FENCE1_CNT_3 0xF48378
+
+#define mmTPC5_QM_CP_FENCE1_CNT_4 0xF4837C
+
+#define mmTPC5_QM_CP_FENCE2_CNT_0 0xF48380
+
+#define mmTPC5_QM_CP_FENCE2_CNT_1 0xF48384
+
+#define mmTPC5_QM_CP_FENCE2_CNT_2 0xF48388
+
+#define mmTPC5_QM_CP_FENCE2_CNT_3 0xF4838C
+
+#define mmTPC5_QM_CP_FENCE2_CNT_4 0xF48390
+
+#define mmTPC5_QM_CP_FENCE3_CNT_0 0xF48394
+
+#define mmTPC5_QM_CP_FENCE3_CNT_1 0xF48398
+
+#define mmTPC5_QM_CP_FENCE3_CNT_2 0xF4839C
+
+#define mmTPC5_QM_CP_FENCE3_CNT_3 0xF483A0
+
+#define mmTPC5_QM_CP_FENCE3_CNT_4 0xF483A4
+
+#define mmTPC5_QM_CP_STS_0 0xF483A8
+
+#define mmTPC5_QM_CP_STS_1 0xF483AC
+
+#define mmTPC5_QM_CP_STS_2 0xF483B0
+
+#define mmTPC5_QM_CP_STS_3 0xF483B4
+
+#define mmTPC5_QM_CP_STS_4 0xF483B8
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO_0 0xF483BC
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO_1 0xF483C0
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO_2 0xF483C4
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO_3 0xF483C8
+
+#define mmTPC5_QM_CP_CURRENT_INST_LO_4 0xF483CC
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI_0 0xF483D0
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI_1 0xF483D4
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI_2 0xF483D8
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI_3 0xF483DC
+
+#define mmTPC5_QM_CP_CURRENT_INST_HI_4 0xF483E0
+
+#define mmTPC5_QM_CP_BARRIER_CFG_0 0xF483F4
+
+#define mmTPC5_QM_CP_BARRIER_CFG_1 0xF483F8
+
+#define mmTPC5_QM_CP_BARRIER_CFG_2 0xF483FC
+
+#define mmTPC5_QM_CP_BARRIER_CFG_3 0xF48400
+
+#define mmTPC5_QM_CP_BARRIER_CFG_4 0xF48404
+
+#define mmTPC5_QM_CP_DBG_0_0 0xF48408
+
+#define mmTPC5_QM_CP_DBG_0_1 0xF4840C
+
+#define mmTPC5_QM_CP_DBG_0_2 0xF48410
+
+#define mmTPC5_QM_CP_DBG_0_3 0xF48414
+
+#define mmTPC5_QM_CP_DBG_0_4 0xF48418
+
+#define mmTPC5_QM_CP_ARUSER_31_11_0 0xF4841C
+
+#define mmTPC5_QM_CP_ARUSER_31_11_1 0xF48420
+
+#define mmTPC5_QM_CP_ARUSER_31_11_2 0xF48424
+
+#define mmTPC5_QM_CP_ARUSER_31_11_3 0xF48428
+
+#define mmTPC5_QM_CP_ARUSER_31_11_4 0xF4842C
+
+#define mmTPC5_QM_CP_AWUSER_31_11_0 0xF48430
+
+#define mmTPC5_QM_CP_AWUSER_31_11_1 0xF48434
+
+#define mmTPC5_QM_CP_AWUSER_31_11_2 0xF48438
+
+#define mmTPC5_QM_CP_AWUSER_31_11_3 0xF4843C
+
+#define mmTPC5_QM_CP_AWUSER_31_11_4 0xF48440
+
+#define mmTPC5_QM_ARB_CFG_0 0xF48A00
+
+#define mmTPC5_QM_ARB_CHOISE_Q_PUSH 0xF48A04
+
+#define mmTPC5_QM_ARB_WRR_WEIGHT_0 0xF48A08
+
+#define mmTPC5_QM_ARB_WRR_WEIGHT_1 0xF48A0C
+
+#define mmTPC5_QM_ARB_WRR_WEIGHT_2 0xF48A10
+
+#define mmTPC5_QM_ARB_WRR_WEIGHT_3 0xF48A14
+
+#define mmTPC5_QM_ARB_CFG_1 0xF48A18
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_0 0xF48A20
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_1 0xF48A24
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_2 0xF48A28
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_3 0xF48A2C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_4 0xF48A30
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_5 0xF48A34
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_6 0xF48A38
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_7 0xF48A3C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_8 0xF48A40
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_9 0xF48A44
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_10 0xF48A48
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_11 0xF48A4C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_12 0xF48A50
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_13 0xF48A54
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_14 0xF48A58
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_15 0xF48A5C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_16 0xF48A60
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_17 0xF48A64
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_18 0xF48A68
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_19 0xF48A6C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_20 0xF48A70
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_21 0xF48A74
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_22 0xF48A78
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_23 0xF48A7C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_24 0xF48A80
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_25 0xF48A84
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_26 0xF48A88
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_27 0xF48A8C
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_28 0xF48A90
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_29 0xF48A94
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_30 0xF48A98
+
+#define mmTPC5_QM_ARB_MST_AVAIL_CRED_31 0xF48A9C
+
+#define mmTPC5_QM_ARB_MST_CRED_INC 0xF48AA0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xF48AA4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xF48AA8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xF48AAC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xF48AB0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xF48AB4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xF48AB8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xF48ABC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xF48AC0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xF48AC4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xF48AC8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xF48ACC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xF48AD0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xF48AD4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xF48AD8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xF48ADC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xF48AE0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xF48AE4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xF48AE8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xF48AEC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xF48AF0
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xF48AF4
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xF48AF8
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xF48AFC
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xF48B00
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xF48B04
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xF48B08
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xF48B0C
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xF48B10
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xF48B14
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xF48B18
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xF48B1C
+
+#define mmTPC5_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xF48B20
+
+#define mmTPC5_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xF48B28
+
+#define mmTPC5_QM_ARB_MST_SLAVE_EN 0xF48B2C
+
+#define mmTPC5_QM_ARB_MST_QUIET_PER 0xF48B34
+
+#define mmTPC5_QM_ARB_SLV_CHOISE_WDT 0xF48B38
+
+#define mmTPC5_QM_ARB_SLV_ID 0xF48B3C
+
+#define mmTPC5_QM_ARB_MSG_MAX_INFLIGHT 0xF48B44
+
+#define mmTPC5_QM_ARB_MSG_AWUSER_31_11 0xF48B48
+
+#define mmTPC5_QM_ARB_MSG_AWUSER_SEC_PROP 0xF48B4C
+
+#define mmTPC5_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xF48B50
+
+#define mmTPC5_QM_ARB_BASE_LO 0xF48B54
+
+#define mmTPC5_QM_ARB_BASE_HI 0xF48B58
+
+#define mmTPC5_QM_ARB_STATE_STS 0xF48B80
+
+#define mmTPC5_QM_ARB_CHOISE_FULLNESS_STS 0xF48B84
+
+#define mmTPC5_QM_ARB_MSG_STS 0xF48B88
+
+#define mmTPC5_QM_ARB_SLV_CHOISE_Q_HEAD 0xF48B8C
+
+#define mmTPC5_QM_ARB_ERR_CAUSE 0xF48B9C
+
+#define mmTPC5_QM_ARB_ERR_MSG_EN 0xF48BA0
+
+#define mmTPC5_QM_ARB_ERR_STS_DRP 0xF48BA8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_0 0xF48BB0
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_1 0xF48BB4
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_2 0xF48BB8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_3 0xF48BBC
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_4 0xF48BC0
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_5 0xF48BC4
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_6 0xF48BC8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_7 0xF48BCC
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_8 0xF48BD0
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_9 0xF48BD4
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_10 0xF48BD8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_11 0xF48BDC
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_12 0xF48BE0
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_13 0xF48BE4
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_14 0xF48BE8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_15 0xF48BEC
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_16 0xF48BF0
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_17 0xF48BF4
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_18 0xF48BF8
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_19 0xF48BFC
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_20 0xF48C00
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_21 0xF48C04
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_22 0xF48C08
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_23 0xF48C0C
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_24 0xF48C10
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_25 0xF48C14
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_26 0xF48C18
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_27 0xF48C1C
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_28 0xF48C20
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_29 0xF48C24
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_30 0xF48C28
+
+#define mmTPC5_QM_ARB_MST_CRED_STS_31 0xF48C2C
+
+#define mmTPC5_QM_CGM_CFG 0xF48C70
+
+#define mmTPC5_QM_CGM_STS 0xF48C74
+
+#define mmTPC5_QM_CGM_CFG1 0xF48C78
+
+#define mmTPC5_QM_LOCAL_RANGE_BASE 0xF48C80
+
+#define mmTPC5_QM_LOCAL_RANGE_SIZE 0xF48C84
+
+#define mmTPC5_QM_CSMR_STRICT_PRIO_CFG 0xF48C90
+
+#define mmTPC5_QM_HBW_RD_RATE_LIM_CFG_1 0xF48C94
+
+#define mmTPC5_QM_LBW_WR_RATE_LIM_CFG_0 0xF48C98
+
+#define mmTPC5_QM_LBW_WR_RATE_LIM_CFG_1 0xF48C9C
+
+#define mmTPC5_QM_HBW_RD_RATE_LIM_CFG_0 0xF48CA0
+
+#define mmTPC5_QM_GLBL_AXCACHE 0xF48CA4
+
+#define mmTPC5_QM_IND_GW_APB_CFG 0xF48CB0
+
+#define mmTPC5_QM_IND_GW_APB_WDATA 0xF48CB4
+
+#define mmTPC5_QM_IND_GW_APB_RDATA 0xF48CB8
+
+#define mmTPC5_QM_IND_GW_APB_STATUS 0xF48CBC
+
+#define mmTPC5_QM_GLBL_ERR_ADDR_LO 0xF48CD0
+
+#define mmTPC5_QM_GLBL_ERR_ADDR_HI 0xF48CD4
+
+#define mmTPC5_QM_GLBL_ERR_WDATA 0xF48CD8
+
+#define mmTPC5_QM_GLBL_MEM_INIT_BUSY 0xF48D00
+
+#endif /* ASIC_REG_TPC5_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h
new file mode 100644
index 000000000000..eb251e72813f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_CFG_REGS_H_
+#define ASIC_REG_TPC6_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF86400
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF86404
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF86408
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF8640C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF86410
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF86414
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF86418
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF8641C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF86420
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF86424
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF86428
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF8642C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF86430
+
+#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF86434
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF86438
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF8643C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF86440
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF86444
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF86448
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF8644C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF86450
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF86454
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF86458
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF8645C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF86460
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF86464
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF86468
+
+#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF8646C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF86470
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF86474
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF86478
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF8647C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF86480
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF86484
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF86488
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF8648C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF86490
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF86494
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF86498
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF8649C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF864A0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF864A4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF864A8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF864AC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF864B0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF864B4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF864B8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF864BC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF864C0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF864C4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF864C8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF864CC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF864D0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF864D4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF864D8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF864DC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF864E0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF864E4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF864E8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF864EC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF864F0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF864F4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF864F8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF864FC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF86500
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF86504
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF86508
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF8650C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF86510
+
+#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF86514
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF86518
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF8651C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF86520
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF86524
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF86528
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF8652C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF86530
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF86534
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF86538
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF8653C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF86540
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF86544
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF86548
+
+#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF8654C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF86550
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF86554
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF86558
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF8655C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF86560
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF86564
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF86568
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF8656C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF86570
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF86574
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF86578
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF8657C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF86580
+
+#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF86584
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF86588
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF8658C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF86590
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF86594
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF86598
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF8659C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF865A0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF865A4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF865A8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF865AC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF865B0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF865B4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF865B8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF865BC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xF865C0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xF865C4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xF865C8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xF865CC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xF865D0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xF865D4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xF865D8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xF865DC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xF865E0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xF865E4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xF865E8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xF865EC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xF865F0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xF865F4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xF865F8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xF865FC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xF86600
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xF86604
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xF86608
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xF8660C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xF86610
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xF86614
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xF86618
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xF8661C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xF86620
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xF86624
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xF86628
+
+#define mmTPC6_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xF8662C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xF86630
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xF86634
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xF86638
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xF8663C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xF86640
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xF86644
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xF86648
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xF8664C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xF86650
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xF86654
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xF86658
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xF8665C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xF86660
+
+#define mmTPC6_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xF86664
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xF86668
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xF8666C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xF86670
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xF86674
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xF86678
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xF8667C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xF86680
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xF86684
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xF86688
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xF8668C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xF86690
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xF86694
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xF86698
+
+#define mmTPC6_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xF8669C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xF866A0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xF866A4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xF866A8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xF866AC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xF866B0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xF866B4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xF866B8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xF866BC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xF866C0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xF866C4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xF866C8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xF866CC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xF866D0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xF866D4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xF866D8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xF866DC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xF866E0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xF866E4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xF866E8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xF866EC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xF866F0
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xF866F4
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xF866F8
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xF866FC
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xF86700
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xF86704
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xF86708
+
+#define mmTPC6_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xF8670C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xF86710
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xF86714
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xF86718
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xF8671C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xF86720
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xF86724
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xF86728
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xF8672C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xF86730
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xF86734
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xF86738
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xF8673C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xF86740
+
+#define mmTPC6_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xF86744
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xF86748
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xF8674C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xF86750
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xF86754
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xF86758
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xF8675C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xF86760
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xF86764
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xF86768
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xF8676C
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xF86770
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xF86774
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xF86778
+
+#define mmTPC6_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xF8677C
+
+#define mmTPC6_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF86780
+
+#define mmTPC6_CFG_KERNEL_SYNC_OBJECT_ADDR 0xF86784
+
+#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF86788
+
+#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF8678C
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_0 0xF86790
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_0 0xF86794
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_1 0xF86798
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_1 0xF8679C
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_2 0xF867A0
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_2 0xF867A4
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_3 0xF867A8
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_3 0xF867AC
+
+#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_4 0xF867B0
+
+#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_4 0xF867B4
+
+#define mmTPC6_CFG_KERNEL_KERNEL_CONFIG 0xF867B8
+
+#define mmTPC6_CFG_KERNEL_KERNEL_ID 0xF867BC
+
+#define mmTPC6_CFG_KERNEL_SRF_0 0xF867C0
+
+#define mmTPC6_CFG_KERNEL_SRF_1 0xF867C4
+
+#define mmTPC6_CFG_KERNEL_SRF_2 0xF867C8
+
+#define mmTPC6_CFG_KERNEL_SRF_3 0xF867CC
+
+#define mmTPC6_CFG_KERNEL_SRF_4 0xF867D0
+
+#define mmTPC6_CFG_KERNEL_SRF_5 0xF867D4
+
+#define mmTPC6_CFG_KERNEL_SRF_6 0xF867D8
+
+#define mmTPC6_CFG_KERNEL_SRF_7 0xF867DC
+
+#define mmTPC6_CFG_KERNEL_SRF_8 0xF867E0
+
+#define mmTPC6_CFG_KERNEL_SRF_9 0xF867E4
+
+#define mmTPC6_CFG_KERNEL_SRF_10 0xF867E8
+
+#define mmTPC6_CFG_KERNEL_SRF_11 0xF867EC
+
+#define mmTPC6_CFG_KERNEL_SRF_12 0xF867F0
+
+#define mmTPC6_CFG_KERNEL_SRF_13 0xF867F4
+
+#define mmTPC6_CFG_KERNEL_SRF_14 0xF867F8
+
+#define mmTPC6_CFG_KERNEL_SRF_15 0xF867FC
+
+#define mmTPC6_CFG_KERNEL_SRF_16 0xF86800
+
+#define mmTPC6_CFG_KERNEL_SRF_17 0xF86804
+
+#define mmTPC6_CFG_KERNEL_SRF_18 0xF86808
+
+#define mmTPC6_CFG_KERNEL_SRF_19 0xF8680C
+
+#define mmTPC6_CFG_KERNEL_SRF_20 0xF86810
+
+#define mmTPC6_CFG_KERNEL_SRF_21 0xF86814
+
+#define mmTPC6_CFG_KERNEL_SRF_22 0xF86818
+
+#define mmTPC6_CFG_KERNEL_SRF_23 0xF8681C
+
+#define mmTPC6_CFG_KERNEL_SRF_24 0xF86820
+
+#define mmTPC6_CFG_KERNEL_SRF_25 0xF86824
+
+#define mmTPC6_CFG_KERNEL_SRF_26 0xF86828
+
+#define mmTPC6_CFG_KERNEL_SRF_27 0xF8682C
+
+#define mmTPC6_CFG_KERNEL_SRF_28 0xF86830
+
+#define mmTPC6_CFG_KERNEL_SRF_29 0xF86834
+
+#define mmTPC6_CFG_KERNEL_SRF_30 0xF86838
+
+#define mmTPC6_CFG_KERNEL_SRF_31 0xF8683C
+
+#define mmTPC6_CFG_ROUND_CSR 0xF868FC
+
+#define mmTPC6_CFG_PROT 0xF86900
+
+#define mmTPC6_CFG_SEMAPHORE 0xF86908
+
+#define mmTPC6_CFG_VFLAGS 0xF8690C
+
+#define mmTPC6_CFG_SFLAGS 0xF86910
+
+#define mmTPC6_CFG_LFSR_POLYNOM 0xF86918
+
+#define mmTPC6_CFG_STATUS 0xF8691C
+
+#define mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH 0xF86920
+
+#define mmTPC6_CFG_CFG_SUBTRACT_VALUE 0xF86924
+
+#define mmTPC6_CFG_SM_BASE_ADDRESS_HIGH 0xF8692C
+
+#define mmTPC6_CFG_TPC_CMD 0xF86930
+
+#define mmTPC6_CFG_TPC_EXECUTE 0xF86938
+
+#define mmTPC6_CFG_TPC_STALL 0xF8693C
+
+#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_LOW 0xF86940
+
+#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF86944
+
+#define mmTPC6_CFG_RD_RATE_LIMIT 0xF86948
+
+#define mmTPC6_CFG_WR_RATE_LIMIT 0xF86950
+
+#define mmTPC6_CFG_MSS_CONFIG 0xF86954
+
+#define mmTPC6_CFG_TPC_INTR_CAUSE 0xF86958
+
+#define mmTPC6_CFG_TPC_INTR_MASK 0xF8695C
+
+#define mmTPC6_CFG_WQ_CREDITS 0xF86960
+
+#define mmTPC6_CFG_ARUSER_LO 0xF86964
+
+#define mmTPC6_CFG_ARUSER_HI 0xF86968
+
+#define mmTPC6_CFG_AWUSER_LO 0xF8696C
+
+#define mmTPC6_CFG_AWUSER_HI 0xF86970
+
+#define mmTPC6_CFG_OPCODE_EXEC 0xF86974
+
+#define mmTPC6_CFG_LUT_FUNC32_BASE_ADDR_LO 0xF86978
+
+#define mmTPC6_CFG_LUT_FUNC32_BASE_ADDR_HI 0xF8697C
+
+#define mmTPC6_CFG_LUT_FUNC64_BASE_ADDR_LO 0xF86980
+
+#define mmTPC6_CFG_LUT_FUNC64_BASE_ADDR_HI 0xF86984
+
+#define mmTPC6_CFG_LUT_FUNC128_BASE_ADDR_LO 0xF86988
+
+#define mmTPC6_CFG_LUT_FUNC128_BASE_ADDR_HI 0xF8698C
+
+#define mmTPC6_CFG_LUT_FUNC256_BASE_ADDR_LO 0xF86990
+
+#define mmTPC6_CFG_LUT_FUNC256_BASE_ADDR_HI 0xF86994
+
+#define mmTPC6_CFG_TSB_CFG_MAX_SIZE 0xF86998
+
+#define mmTPC6_CFG_TSB_CFG 0xF8699C
+
+#define mmTPC6_CFG_DBGMEM_ADD 0xF869A0
+
+#define mmTPC6_CFG_DBGMEM_DATA_WR 0xF869A4
+
+#define mmTPC6_CFG_DBGMEM_DATA_RD 0xF869A8
+
+#define mmTPC6_CFG_DBGMEM_CTRL 0xF869AC
+
+#define mmTPC6_CFG_DBGMEM_RC 0xF869B0
+
+#define mmTPC6_CFG_TSB_INFLIGHT_CNTR 0xF869B4
+
+#define mmTPC6_CFG_WQ_INFLIGHT_CNTR 0xF869B8
+
+#define mmTPC6_CFG_WQ_LBW_TOTAL_CNTR 0xF869BC
+
+#define mmTPC6_CFG_WQ_HBW_TOTAL_CNTR 0xF869C0
+
+#define mmTPC6_CFG_IRQ_OCCOUPY_CNTR 0xF869C4
+
+#define mmTPC6_CFG_FUNC_MBIST_CNTRL 0xF869D0
+
+#define mmTPC6_CFG_FUNC_MBIST_PAT 0xF869D4
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_0 0xF869D8
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_1 0xF869DC
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_2 0xF869E0
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_3 0xF869E4
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_4 0xF869E8
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_5 0xF869EC
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_6 0xF869F0
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_7 0xF869F4
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_8 0xF869F8
+
+#define mmTPC6_CFG_FUNC_MBIST_MEM_9 0xF869FC
+
+#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF86A00
+
+#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF86A04
+
+#define mmTPC6_CFG_QM_TENSOR_0_PADDING_VALUE 0xF86A08
+
+#define mmTPC6_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF86A0C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF86A10
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF86A14
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF86A18
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF86A1C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF86A20
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF86A24
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF86A28
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF86A2C
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF86A30
+
+#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF86A34
+
+#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF86A38
+
+#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF86A3C
+
+#define mmTPC6_CFG_QM_TENSOR_1_PADDING_VALUE 0xF86A40
+
+#define mmTPC6_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF86A44
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF86A48
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF86A4C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF86A50
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF86A54
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF86A58
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF86A5C
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF86A60
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF86A64
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF86A68
+
+#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF86A6C
+
+#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF86A70
+
+#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF86A74
+
+#define mmTPC6_CFG_QM_TENSOR_2_PADDING_VALUE 0xF86A78
+
+#define mmTPC6_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF86A7C
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF86A80
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF86A84
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF86A88
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF86A8C
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF86A90
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF86A94
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF86A98
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF86A9C
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF86AA0
+
+#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF86AA4
+
+#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF86AA8
+
+#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF86AAC
+
+#define mmTPC6_CFG_QM_TENSOR_3_PADDING_VALUE 0xF86AB0
+
+#define mmTPC6_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF86AB4
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF86AB8
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF86ABC
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF86AC0
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF86AC4
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF86AC8
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF86ACC
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF86AD0
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF86AD4
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF86AD8
+
+#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF86ADC
+
+#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF86AE0
+
+#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF86AE4
+
+#define mmTPC6_CFG_QM_TENSOR_4_PADDING_VALUE 0xF86AE8
+
+#define mmTPC6_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF86AEC
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF86AF0
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF86AF4
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF86AF8
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF86AFC
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF86B00
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF86B04
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF86B08
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF86B0C
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF86B10
+
+#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF86B14
+
+#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF86B18
+
+#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF86B1C
+
+#define mmTPC6_CFG_QM_TENSOR_5_PADDING_VALUE 0xF86B20
+
+#define mmTPC6_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF86B24
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF86B28
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF86B2C
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF86B30
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF86B34
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF86B38
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF86B3C
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF86B40
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF86B44
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF86B48
+
+#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF86B4C
+
+#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF86B50
+
+#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF86B54
+
+#define mmTPC6_CFG_QM_TENSOR_6_PADDING_VALUE 0xF86B58
+
+#define mmTPC6_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF86B5C
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF86B60
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF86B64
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF86B68
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF86B6C
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF86B70
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF86B74
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF86B78
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF86B7C
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF86B80
+
+#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF86B84
+
+#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF86B88
+
+#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF86B8C
+
+#define mmTPC6_CFG_QM_TENSOR_7_PADDING_VALUE 0xF86B90
+
+#define mmTPC6_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF86B94
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF86B98
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF86B9C
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF86BA0
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF86BA4
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF86BA8
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF86BAC
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF86BB0
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF86BB4
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF86BB8
+
+#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF86BBC
+
+#define mmTPC6_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xF86BC0
+
+#define mmTPC6_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xF86BC4
+
+#define mmTPC6_CFG_QM_TENSOR_8_PADDING_VALUE 0xF86BC8
+
+#define mmTPC6_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xF86BCC
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_0_SIZE 0xF86BD0
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xF86BD4
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_1_SIZE 0xF86BD8
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xF86BDC
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_2_SIZE 0xF86BE0
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xF86BE4
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_3_SIZE 0xF86BE8
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xF86BEC
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_4_SIZE 0xF86BF0
+
+#define mmTPC6_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xF86BF4
+
+#define mmTPC6_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xF86BF8
+
+#define mmTPC6_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xF86BFC
+
+#define mmTPC6_CFG_QM_TENSOR_9_PADDING_VALUE 0xF86C00
+
+#define mmTPC6_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xF86C04
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_0_SIZE 0xF86C08
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xF86C0C
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_1_SIZE 0xF86C10
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xF86C14
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_2_SIZE 0xF86C18
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xF86C1C
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_3_SIZE 0xF86C20
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xF86C24
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_4_SIZE 0xF86C28
+
+#define mmTPC6_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xF86C2C
+
+#define mmTPC6_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xF86C30
+
+#define mmTPC6_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xF86C34
+
+#define mmTPC6_CFG_QM_TENSOR_10_PADDING_VALUE 0xF86C38
+
+#define mmTPC6_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xF86C3C
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_0_SIZE 0xF86C40
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xF86C44
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_1_SIZE 0xF86C48
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xF86C4C
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_2_SIZE 0xF86C50
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xF86C54
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_3_SIZE 0xF86C58
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xF86C5C
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_4_SIZE 0xF86C60
+
+#define mmTPC6_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xF86C64
+
+#define mmTPC6_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xF86C68
+
+#define mmTPC6_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xF86C6C
+
+#define mmTPC6_CFG_QM_TENSOR_11_PADDING_VALUE 0xF86C70
+
+#define mmTPC6_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xF86C74
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_0_SIZE 0xF86C78
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xF86C7C
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_1_SIZE 0xF86C80
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xF86C84
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_2_SIZE 0xF86C88
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xF86C8C
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_3_SIZE 0xF86C90
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xF86C94
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_4_SIZE 0xF86C98
+
+#define mmTPC6_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xF86C9C
+
+#define mmTPC6_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xF86CA0
+
+#define mmTPC6_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xF86CA4
+
+#define mmTPC6_CFG_QM_TENSOR_12_PADDING_VALUE 0xF86CA8
+
+#define mmTPC6_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xF86CAC
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_0_SIZE 0xF86CB0
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xF86CB4
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_1_SIZE 0xF86CB8
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xF86CBC
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_2_SIZE 0xF86CC0
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xF86CC4
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_3_SIZE 0xF86CC8
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xF86CCC
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_4_SIZE 0xF86CD0
+
+#define mmTPC6_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xF86CD4
+
+#define mmTPC6_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xF86CD8
+
+#define mmTPC6_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xF86CDC
+
+#define mmTPC6_CFG_QM_TENSOR_13_PADDING_VALUE 0xF86CE0
+
+#define mmTPC6_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xF86CE4
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_0_SIZE 0xF86CE8
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xF86CEC
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_1_SIZE 0xF86CF0
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xF86CF4
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_2_SIZE 0xF86CF8
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xF86CFC
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_3_SIZE 0xF86D00
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xF86D04
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_4_SIZE 0xF86D08
+
+#define mmTPC6_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xF86D0C
+
+#define mmTPC6_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xF86D10
+
+#define mmTPC6_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xF86D14
+
+#define mmTPC6_CFG_QM_TENSOR_14_PADDING_VALUE 0xF86D18
+
+#define mmTPC6_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xF86D1C
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_0_SIZE 0xF86D20
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xF86D24
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_1_SIZE 0xF86D28
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xF86D2C
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_2_SIZE 0xF86D30
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xF86D34
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_3_SIZE 0xF86D38
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xF86D3C
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_4_SIZE 0xF86D40
+
+#define mmTPC6_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xF86D44
+
+#define mmTPC6_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xF86D48
+
+#define mmTPC6_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xF86D4C
+
+#define mmTPC6_CFG_QM_TENSOR_15_PADDING_VALUE 0xF86D50
+
+#define mmTPC6_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xF86D54
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_0_SIZE 0xF86D58
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xF86D5C
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_1_SIZE 0xF86D60
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xF86D64
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_2_SIZE 0xF86D68
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xF86D6C
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_3_SIZE 0xF86D70
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xF86D74
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_4_SIZE 0xF86D78
+
+#define mmTPC6_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xF86D7C
+
+#define mmTPC6_CFG_QM_SYNC_OBJECT_MESSAGE 0xF86D80
+
+#define mmTPC6_CFG_QM_SYNC_OBJECT_ADDR 0xF86D84
+
+#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF86D88
+
+#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF86D8C
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_0 0xF86D90
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_0 0xF86D94
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_1 0xF86D98
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_1 0xF86D9C
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_2 0xF86DA0
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_2 0xF86DA4
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_3 0xF86DA8
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_3 0xF86DAC
+
+#define mmTPC6_CFG_QM_TID_BASE_DIM_4 0xF86DB0
+
+#define mmTPC6_CFG_QM_TID_SIZE_DIM_4 0xF86DB4
+
+#define mmTPC6_CFG_QM_KERNEL_CONFIG 0xF86DB8
+
+#define mmTPC6_CFG_QM_KERNEL_ID 0xF86DBC
+
+#define mmTPC6_CFG_QM_SRF_0 0xF86DC0
+
+#define mmTPC6_CFG_QM_SRF_1 0xF86DC4
+
+#define mmTPC6_CFG_QM_SRF_2 0xF86DC8
+
+#define mmTPC6_CFG_QM_SRF_3 0xF86DCC
+
+#define mmTPC6_CFG_QM_SRF_4 0xF86DD0
+
+#define mmTPC6_CFG_QM_SRF_5 0xF86DD4
+
+#define mmTPC6_CFG_QM_SRF_6 0xF86DD8
+
+#define mmTPC6_CFG_QM_SRF_7 0xF86DDC
+
+#define mmTPC6_CFG_QM_SRF_8 0xF86DE0
+
+#define mmTPC6_CFG_QM_SRF_9 0xF86DE4
+
+#define mmTPC6_CFG_QM_SRF_10 0xF86DE8
+
+#define mmTPC6_CFG_QM_SRF_11 0xF86DEC
+
+#define mmTPC6_CFG_QM_SRF_12 0xF86DF0
+
+#define mmTPC6_CFG_QM_SRF_13 0xF86DF4
+
+#define mmTPC6_CFG_QM_SRF_14 0xF86DF8
+
+#define mmTPC6_CFG_QM_SRF_15 0xF86DFC
+
+#define mmTPC6_CFG_QM_SRF_16 0xF86E00
+
+#define mmTPC6_CFG_QM_SRF_17 0xF86E04
+
+#define mmTPC6_CFG_QM_SRF_18 0xF86E08
+
+#define mmTPC6_CFG_QM_SRF_19 0xF86E0C
+
+#define mmTPC6_CFG_QM_SRF_20 0xF86E10
+
+#define mmTPC6_CFG_QM_SRF_21 0xF86E14
+
+#define mmTPC6_CFG_QM_SRF_22 0xF86E18
+
+#define mmTPC6_CFG_QM_SRF_23 0xF86E1C
+
+#define mmTPC6_CFG_QM_SRF_24 0xF86E20
+
+#define mmTPC6_CFG_QM_SRF_25 0xF86E24
+
+#define mmTPC6_CFG_QM_SRF_26 0xF86E28
+
+#define mmTPC6_CFG_QM_SRF_27 0xF86E2C
+
+#define mmTPC6_CFG_QM_SRF_28 0xF86E30
+
+#define mmTPC6_CFG_QM_SRF_29 0xF86E34
+
+#define mmTPC6_CFG_QM_SRF_30 0xF86E38
+
+#define mmTPC6_CFG_QM_SRF_31 0xF86E3C
+
+#endif /* ASIC_REG_TPC6_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h
new file mode 100644
index 000000000000..e35ef7fd8b1c
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC6_QM_REGS_H_
+#define ASIC_REG_TPC6_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC6_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC6_QM_GLBL_CFG0 0xF88000
+
+#define mmTPC6_QM_GLBL_CFG1 0xF88004
+
+#define mmTPC6_QM_GLBL_PROT 0xF88008
+
+#define mmTPC6_QM_GLBL_ERR_CFG 0xF8800C
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS_0 0xF88010
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS_1 0xF88014
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS_2 0xF88018
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS_3 0xF8801C
+
+#define mmTPC6_QM_GLBL_SECURE_PROPS_4 0xF88020
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_0 0xF88024
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_1 0xF88028
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_2 0xF8802C
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_3 0xF88030
+
+#define mmTPC6_QM_GLBL_NON_SECURE_PROPS_4 0xF88034
+
+#define mmTPC6_QM_GLBL_STS0 0xF88038
+
+#define mmTPC6_QM_GLBL_STS1_0 0xF88040
+
+#define mmTPC6_QM_GLBL_STS1_1 0xF88044
+
+#define mmTPC6_QM_GLBL_STS1_2 0xF88048
+
+#define mmTPC6_QM_GLBL_STS1_3 0xF8804C
+
+#define mmTPC6_QM_GLBL_STS1_4 0xF88050
+
+#define mmTPC6_QM_GLBL_MSG_EN_0 0xF88054
+
+#define mmTPC6_QM_GLBL_MSG_EN_1 0xF88058
+
+#define mmTPC6_QM_GLBL_MSG_EN_2 0xF8805C
+
+#define mmTPC6_QM_GLBL_MSG_EN_3 0xF88060
+
+#define mmTPC6_QM_GLBL_MSG_EN_4 0xF88068
+
+#define mmTPC6_QM_PQ_BASE_LO_0 0xF88070
+
+#define mmTPC6_QM_PQ_BASE_LO_1 0xF88074
+
+#define mmTPC6_QM_PQ_BASE_LO_2 0xF88078
+
+#define mmTPC6_QM_PQ_BASE_LO_3 0xF8807C
+
+#define mmTPC6_QM_PQ_BASE_HI_0 0xF88080
+
+#define mmTPC6_QM_PQ_BASE_HI_1 0xF88084
+
+#define mmTPC6_QM_PQ_BASE_HI_2 0xF88088
+
+#define mmTPC6_QM_PQ_BASE_HI_3 0xF8808C
+
+#define mmTPC6_QM_PQ_SIZE_0 0xF88090
+
+#define mmTPC6_QM_PQ_SIZE_1 0xF88094
+
+#define mmTPC6_QM_PQ_SIZE_2 0xF88098
+
+#define mmTPC6_QM_PQ_SIZE_3 0xF8809C
+
+#define mmTPC6_QM_PQ_PI_0 0xF880A0
+
+#define mmTPC6_QM_PQ_PI_1 0xF880A4
+
+#define mmTPC6_QM_PQ_PI_2 0xF880A8
+
+#define mmTPC6_QM_PQ_PI_3 0xF880AC
+
+#define mmTPC6_QM_PQ_CI_0 0xF880B0
+
+#define mmTPC6_QM_PQ_CI_1 0xF880B4
+
+#define mmTPC6_QM_PQ_CI_2 0xF880B8
+
+#define mmTPC6_QM_PQ_CI_3 0xF880BC
+
+#define mmTPC6_QM_PQ_CFG0_0 0xF880C0
+
+#define mmTPC6_QM_PQ_CFG0_1 0xF880C4
+
+#define mmTPC6_QM_PQ_CFG0_2 0xF880C8
+
+#define mmTPC6_QM_PQ_CFG0_3 0xF880CC
+
+#define mmTPC6_QM_PQ_CFG1_0 0xF880D0
+
+#define mmTPC6_QM_PQ_CFG1_1 0xF880D4
+
+#define mmTPC6_QM_PQ_CFG1_2 0xF880D8
+
+#define mmTPC6_QM_PQ_CFG1_3 0xF880DC
+
+#define mmTPC6_QM_PQ_ARUSER_31_11_0 0xF880E0
+
+#define mmTPC6_QM_PQ_ARUSER_31_11_1 0xF880E4
+
+#define mmTPC6_QM_PQ_ARUSER_31_11_2 0xF880E8
+
+#define mmTPC6_QM_PQ_ARUSER_31_11_3 0xF880EC
+
+#define mmTPC6_QM_PQ_STS0_0 0xF880F0
+
+#define mmTPC6_QM_PQ_STS0_1 0xF880F4
+
+#define mmTPC6_QM_PQ_STS0_2 0xF880F8
+
+#define mmTPC6_QM_PQ_STS0_3 0xF880FC
+
+#define mmTPC6_QM_PQ_STS1_0 0xF88100
+
+#define mmTPC6_QM_PQ_STS1_1 0xF88104
+
+#define mmTPC6_QM_PQ_STS1_2 0xF88108
+
+#define mmTPC6_QM_PQ_STS1_3 0xF8810C
+
+#define mmTPC6_QM_CQ_CFG0_0 0xF88110
+
+#define mmTPC6_QM_CQ_CFG0_1 0xF88114
+
+#define mmTPC6_QM_CQ_CFG0_2 0xF88118
+
+#define mmTPC6_QM_CQ_CFG0_3 0xF8811C
+
+#define mmTPC6_QM_CQ_CFG0_4 0xF88120
+
+#define mmTPC6_QM_CQ_CFG1_0 0xF88124
+
+#define mmTPC6_QM_CQ_CFG1_1 0xF88128
+
+#define mmTPC6_QM_CQ_CFG1_2 0xF8812C
+
+#define mmTPC6_QM_CQ_CFG1_3 0xF88130
+
+#define mmTPC6_QM_CQ_CFG1_4 0xF88134
+
+#define mmTPC6_QM_CQ_ARUSER_31_11_0 0xF88138
+
+#define mmTPC6_QM_CQ_ARUSER_31_11_1 0xF8813C
+
+#define mmTPC6_QM_CQ_ARUSER_31_11_2 0xF88140
+
+#define mmTPC6_QM_CQ_ARUSER_31_11_3 0xF88144
+
+#define mmTPC6_QM_CQ_ARUSER_31_11_4 0xF88148
+
+#define mmTPC6_QM_CQ_STS0_0 0xF8814C
+
+#define mmTPC6_QM_CQ_STS0_1 0xF88150
+
+#define mmTPC6_QM_CQ_STS0_2 0xF88154
+
+#define mmTPC6_QM_CQ_STS0_3 0xF88158
+
+#define mmTPC6_QM_CQ_STS0_4 0xF8815C
+
+#define mmTPC6_QM_CQ_STS1_0 0xF88160
+
+#define mmTPC6_QM_CQ_STS1_1 0xF88164
+
+#define mmTPC6_QM_CQ_STS1_2 0xF88168
+
+#define mmTPC6_QM_CQ_STS1_3 0xF8816C
+
+#define mmTPC6_QM_CQ_STS1_4 0xF88170
+
+#define mmTPC6_QM_CQ_PTR_LO_0 0xF88174
+
+#define mmTPC6_QM_CQ_PTR_HI_0 0xF88178
+
+#define mmTPC6_QM_CQ_TSIZE_0 0xF8817C
+
+#define mmTPC6_QM_CQ_CTL_0 0xF88180
+
+#define mmTPC6_QM_CQ_PTR_LO_1 0xF88184
+
+#define mmTPC6_QM_CQ_PTR_HI_1 0xF88188
+
+#define mmTPC6_QM_CQ_TSIZE_1 0xF8818C
+
+#define mmTPC6_QM_CQ_CTL_1 0xF88190
+
+#define mmTPC6_QM_CQ_PTR_LO_2 0xF88194
+
+#define mmTPC6_QM_CQ_PTR_HI_2 0xF88198
+
+#define mmTPC6_QM_CQ_TSIZE_2 0xF8819C
+
+#define mmTPC6_QM_CQ_CTL_2 0xF881A0
+
+#define mmTPC6_QM_CQ_PTR_LO_3 0xF881A4
+
+#define mmTPC6_QM_CQ_PTR_HI_3 0xF881A8
+
+#define mmTPC6_QM_CQ_TSIZE_3 0xF881AC
+
+#define mmTPC6_QM_CQ_CTL_3 0xF881B0
+
+#define mmTPC6_QM_CQ_PTR_LO_4 0xF881B4
+
+#define mmTPC6_QM_CQ_PTR_HI_4 0xF881B8
+
+#define mmTPC6_QM_CQ_TSIZE_4 0xF881BC
+
+#define mmTPC6_QM_CQ_CTL_4 0xF881C0
+
+#define mmTPC6_QM_CQ_PTR_LO_STS_0 0xF881C4
+
+#define mmTPC6_QM_CQ_PTR_LO_STS_1 0xF881C8
+
+#define mmTPC6_QM_CQ_PTR_LO_STS_2 0xF881CC
+
+#define mmTPC6_QM_CQ_PTR_LO_STS_3 0xF881D0
+
+#define mmTPC6_QM_CQ_PTR_LO_STS_4 0xF881D4
+
+#define mmTPC6_QM_CQ_PTR_HI_STS_0 0xF881D8
+
+#define mmTPC6_QM_CQ_PTR_HI_STS_1 0xF881DC
+
+#define mmTPC6_QM_CQ_PTR_HI_STS_2 0xF881E0
+
+#define mmTPC6_QM_CQ_PTR_HI_STS_3 0xF881E4
+
+#define mmTPC6_QM_CQ_PTR_HI_STS_4 0xF881E8
+
+#define mmTPC6_QM_CQ_TSIZE_STS_0 0xF881EC
+
+#define mmTPC6_QM_CQ_TSIZE_STS_1 0xF881F0
+
+#define mmTPC6_QM_CQ_TSIZE_STS_2 0xF881F4
+
+#define mmTPC6_QM_CQ_TSIZE_STS_3 0xF881F8
+
+#define mmTPC6_QM_CQ_TSIZE_STS_4 0xF881FC
+
+#define mmTPC6_QM_CQ_CTL_STS_0 0xF88200
+
+#define mmTPC6_QM_CQ_CTL_STS_1 0xF88204
+
+#define mmTPC6_QM_CQ_CTL_STS_2 0xF88208
+
+#define mmTPC6_QM_CQ_CTL_STS_3 0xF8820C
+
+#define mmTPC6_QM_CQ_CTL_STS_4 0xF88210
+
+#define mmTPC6_QM_CQ_IFIFO_CNT_0 0xF88214
+
+#define mmTPC6_QM_CQ_IFIFO_CNT_1 0xF88218
+
+#define mmTPC6_QM_CQ_IFIFO_CNT_2 0xF8821C
+
+#define mmTPC6_QM_CQ_IFIFO_CNT_3 0xF88220
+
+#define mmTPC6_QM_CQ_IFIFO_CNT_4 0xF88224
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_0 0xF88228
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_1 0xF8822C
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_2 0xF88230
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_3 0xF88234
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO_4 0xF88238
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_0 0xF8823C
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_1 0xF88240
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_2 0xF88244
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_3 0xF88248
+
+#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI_4 0xF8824C
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_0 0xF88250
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_1 0xF88254
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_2 0xF88258
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_3 0xF8825C
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO_4 0xF88260
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_0 0xF88264
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_1 0xF88268
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_2 0xF8826C
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_3 0xF88270
+
+#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI_4 0xF88274
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_0 0xF88278
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_1 0xF8827C
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_2 0xF88280
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_3 0xF88284
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO_4 0xF88288
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_0 0xF8828C
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_1 0xF88290
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_2 0xF88294
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_3 0xF88298
+
+#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI_4 0xF8829C
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_0 0xF882A0
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_1 0xF882A4
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_2 0xF882A8
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_3 0xF882AC
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO_4 0xF882B0
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_0 0xF882B4
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_1 0xF882B8
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_2 0xF882BC
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_3 0xF882C0
+
+#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI_4 0xF882C4
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_0 0xF882C8
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_1 0xF882CC
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_2 0xF882D0
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_3 0xF882D4
+
+#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET_4 0xF882D8
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xF882E0
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xF882E4
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xF882E8
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xF882EC
+
+#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xF882F0
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xF882F4
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xF882F8
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xF882FC
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xF88300
+
+#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xF88304
+
+#define mmTPC6_QM_CP_FENCE0_RDATA_0 0xF88308
+
+#define mmTPC6_QM_CP_FENCE0_RDATA_1 0xF8830C
+
+#define mmTPC6_QM_CP_FENCE0_RDATA_2 0xF88310
+
+#define mmTPC6_QM_CP_FENCE0_RDATA_3 0xF88314
+
+#define mmTPC6_QM_CP_FENCE0_RDATA_4 0xF88318
+
+#define mmTPC6_QM_CP_FENCE1_RDATA_0 0xF8831C
+
+#define mmTPC6_QM_CP_FENCE1_RDATA_1 0xF88320
+
+#define mmTPC6_QM_CP_FENCE1_RDATA_2 0xF88324
+
+#define mmTPC6_QM_CP_FENCE1_RDATA_3 0xF88328
+
+#define mmTPC6_QM_CP_FENCE1_RDATA_4 0xF8832C
+
+#define mmTPC6_QM_CP_FENCE2_RDATA_0 0xF88330
+
+#define mmTPC6_QM_CP_FENCE2_RDATA_1 0xF88334
+
+#define mmTPC6_QM_CP_FENCE2_RDATA_2 0xF88338
+
+#define mmTPC6_QM_CP_FENCE2_RDATA_3 0xF8833C
+
+#define mmTPC6_QM_CP_FENCE2_RDATA_4 0xF88340
+
+#define mmTPC6_QM_CP_FENCE3_RDATA_0 0xF88344
+
+#define mmTPC6_QM_CP_FENCE3_RDATA_1 0xF88348
+
+#define mmTPC6_QM_CP_FENCE3_RDATA_2 0xF8834C
+
+#define mmTPC6_QM_CP_FENCE3_RDATA_3 0xF88350
+
+#define mmTPC6_QM_CP_FENCE3_RDATA_4 0xF88354
+
+#define mmTPC6_QM_CP_FENCE0_CNT_0 0xF88358
+
+#define mmTPC6_QM_CP_FENCE0_CNT_1 0xF8835C
+
+#define mmTPC6_QM_CP_FENCE0_CNT_2 0xF88360
+
+#define mmTPC6_QM_CP_FENCE0_CNT_3 0xF88364
+
+#define mmTPC6_QM_CP_FENCE0_CNT_4 0xF88368
+
+#define mmTPC6_QM_CP_FENCE1_CNT_0 0xF8836C
+
+#define mmTPC6_QM_CP_FENCE1_CNT_1 0xF88370
+
+#define mmTPC6_QM_CP_FENCE1_CNT_2 0xF88374
+
+#define mmTPC6_QM_CP_FENCE1_CNT_3 0xF88378
+
+#define mmTPC6_QM_CP_FENCE1_CNT_4 0xF8837C
+
+#define mmTPC6_QM_CP_FENCE2_CNT_0 0xF88380
+
+#define mmTPC6_QM_CP_FENCE2_CNT_1 0xF88384
+
+#define mmTPC6_QM_CP_FENCE2_CNT_2 0xF88388
+
+#define mmTPC6_QM_CP_FENCE2_CNT_3 0xF8838C
+
+#define mmTPC6_QM_CP_FENCE2_CNT_4 0xF88390
+
+#define mmTPC6_QM_CP_FENCE3_CNT_0 0xF88394
+
+#define mmTPC6_QM_CP_FENCE3_CNT_1 0xF88398
+
+#define mmTPC6_QM_CP_FENCE3_CNT_2 0xF8839C
+
+#define mmTPC6_QM_CP_FENCE3_CNT_3 0xF883A0
+
+#define mmTPC6_QM_CP_FENCE3_CNT_4 0xF883A4
+
+#define mmTPC6_QM_CP_STS_0 0xF883A8
+
+#define mmTPC6_QM_CP_STS_1 0xF883AC
+
+#define mmTPC6_QM_CP_STS_2 0xF883B0
+
+#define mmTPC6_QM_CP_STS_3 0xF883B4
+
+#define mmTPC6_QM_CP_STS_4 0xF883B8
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO_0 0xF883BC
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO_1 0xF883C0
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO_2 0xF883C4
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO_3 0xF883C8
+
+#define mmTPC6_QM_CP_CURRENT_INST_LO_4 0xF883CC
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI_0 0xF883D0
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI_1 0xF883D4
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI_2 0xF883D8
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI_3 0xF883DC
+
+#define mmTPC6_QM_CP_CURRENT_INST_HI_4 0xF883E0
+
+#define mmTPC6_QM_CP_BARRIER_CFG_0 0xF883F4
+
+#define mmTPC6_QM_CP_BARRIER_CFG_1 0xF883F8
+
+#define mmTPC6_QM_CP_BARRIER_CFG_2 0xF883FC
+
+#define mmTPC6_QM_CP_BARRIER_CFG_3 0xF88400
+
+#define mmTPC6_QM_CP_BARRIER_CFG_4 0xF88404
+
+#define mmTPC6_QM_CP_DBG_0_0 0xF88408
+
+#define mmTPC6_QM_CP_DBG_0_1 0xF8840C
+
+#define mmTPC6_QM_CP_DBG_0_2 0xF88410
+
+#define mmTPC6_QM_CP_DBG_0_3 0xF88414
+
+#define mmTPC6_QM_CP_DBG_0_4 0xF88418
+
+#define mmTPC6_QM_CP_ARUSER_31_11_0 0xF8841C
+
+#define mmTPC6_QM_CP_ARUSER_31_11_1 0xF88420
+
+#define mmTPC6_QM_CP_ARUSER_31_11_2 0xF88424
+
+#define mmTPC6_QM_CP_ARUSER_31_11_3 0xF88428
+
+#define mmTPC6_QM_CP_ARUSER_31_11_4 0xF8842C
+
+#define mmTPC6_QM_CP_AWUSER_31_11_0 0xF88430
+
+#define mmTPC6_QM_CP_AWUSER_31_11_1 0xF88434
+
+#define mmTPC6_QM_CP_AWUSER_31_11_2 0xF88438
+
+#define mmTPC6_QM_CP_AWUSER_31_11_3 0xF8843C
+
+#define mmTPC6_QM_CP_AWUSER_31_11_4 0xF88440
+
+#define mmTPC6_QM_ARB_CFG_0 0xF88A00
+
+#define mmTPC6_QM_ARB_CHOISE_Q_PUSH 0xF88A04
+
+#define mmTPC6_QM_ARB_WRR_WEIGHT_0 0xF88A08
+
+#define mmTPC6_QM_ARB_WRR_WEIGHT_1 0xF88A0C
+
+#define mmTPC6_QM_ARB_WRR_WEIGHT_2 0xF88A10
+
+#define mmTPC6_QM_ARB_WRR_WEIGHT_3 0xF88A14
+
+#define mmTPC6_QM_ARB_CFG_1 0xF88A18
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_0 0xF88A20
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_1 0xF88A24
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_2 0xF88A28
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_3 0xF88A2C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_4 0xF88A30
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_5 0xF88A34
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_6 0xF88A38
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_7 0xF88A3C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_8 0xF88A40
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_9 0xF88A44
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_10 0xF88A48
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_11 0xF88A4C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_12 0xF88A50
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_13 0xF88A54
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_14 0xF88A58
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_15 0xF88A5C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_16 0xF88A60
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_17 0xF88A64
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_18 0xF88A68
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_19 0xF88A6C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_20 0xF88A70
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_21 0xF88A74
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_22 0xF88A78
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_23 0xF88A7C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_24 0xF88A80
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_25 0xF88A84
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_26 0xF88A88
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_27 0xF88A8C
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_28 0xF88A90
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_29 0xF88A94
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_30 0xF88A98
+
+#define mmTPC6_QM_ARB_MST_AVAIL_CRED_31 0xF88A9C
+
+#define mmTPC6_QM_ARB_MST_CRED_INC 0xF88AA0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xF88AA4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xF88AA8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xF88AAC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xF88AB0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xF88AB4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xF88AB8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xF88ABC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xF88AC0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xF88AC4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xF88AC8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xF88ACC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xF88AD0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xF88AD4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xF88AD8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xF88ADC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xF88AE0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xF88AE4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xF88AE8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xF88AEC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xF88AF0
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xF88AF4
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xF88AF8
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xF88AFC
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xF88B00
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xF88B04
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xF88B08
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xF88B0C
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xF88B10
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xF88B14
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xF88B18
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xF88B1C
+
+#define mmTPC6_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xF88B20
+
+#define mmTPC6_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xF88B28
+
+#define mmTPC6_QM_ARB_MST_SLAVE_EN 0xF88B2C
+
+#define mmTPC6_QM_ARB_MST_QUIET_PER 0xF88B34
+
+#define mmTPC6_QM_ARB_SLV_CHOISE_WDT 0xF88B38
+
+#define mmTPC6_QM_ARB_SLV_ID 0xF88B3C
+
+#define mmTPC6_QM_ARB_MSG_MAX_INFLIGHT 0xF88B44
+
+#define mmTPC6_QM_ARB_MSG_AWUSER_31_11 0xF88B48
+
+#define mmTPC6_QM_ARB_MSG_AWUSER_SEC_PROP 0xF88B4C
+
+#define mmTPC6_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xF88B50
+
+#define mmTPC6_QM_ARB_BASE_LO 0xF88B54
+
+#define mmTPC6_QM_ARB_BASE_HI 0xF88B58
+
+#define mmTPC6_QM_ARB_STATE_STS 0xF88B80
+
+#define mmTPC6_QM_ARB_CHOISE_FULLNESS_STS 0xF88B84
+
+#define mmTPC6_QM_ARB_MSG_STS 0xF88B88
+
+#define mmTPC6_QM_ARB_SLV_CHOISE_Q_HEAD 0xF88B8C
+
+#define mmTPC6_QM_ARB_ERR_CAUSE 0xF88B9C
+
+#define mmTPC6_QM_ARB_ERR_MSG_EN 0xF88BA0
+
+#define mmTPC6_QM_ARB_ERR_STS_DRP 0xF88BA8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_0 0xF88BB0
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_1 0xF88BB4
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_2 0xF88BB8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_3 0xF88BBC
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_4 0xF88BC0
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_5 0xF88BC4
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_6 0xF88BC8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_7 0xF88BCC
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_8 0xF88BD0
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_9 0xF88BD4
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_10 0xF88BD8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_11 0xF88BDC
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_12 0xF88BE0
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_13 0xF88BE4
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_14 0xF88BE8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_15 0xF88BEC
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_16 0xF88BF0
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_17 0xF88BF4
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_18 0xF88BF8
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_19 0xF88BFC
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_20 0xF88C00
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_21 0xF88C04
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_22 0xF88C08
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_23 0xF88C0C
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_24 0xF88C10
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_25 0xF88C14
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_26 0xF88C18
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_27 0xF88C1C
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_28 0xF88C20
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_29 0xF88C24
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_30 0xF88C28
+
+#define mmTPC6_QM_ARB_MST_CRED_STS_31 0xF88C2C
+
+#define mmTPC6_QM_CGM_CFG 0xF88C70
+
+#define mmTPC6_QM_CGM_STS 0xF88C74
+
+#define mmTPC6_QM_CGM_CFG1 0xF88C78
+
+#define mmTPC6_QM_LOCAL_RANGE_BASE 0xF88C80
+
+#define mmTPC6_QM_LOCAL_RANGE_SIZE 0xF88C84
+
+#define mmTPC6_QM_CSMR_STRICT_PRIO_CFG 0xF88C90
+
+#define mmTPC6_QM_HBW_RD_RATE_LIM_CFG_1 0xF88C94
+
+#define mmTPC6_QM_LBW_WR_RATE_LIM_CFG_0 0xF88C98
+
+#define mmTPC6_QM_LBW_WR_RATE_LIM_CFG_1 0xF88C9C
+
+#define mmTPC6_QM_HBW_RD_RATE_LIM_CFG_0 0xF88CA0
+
+#define mmTPC6_QM_GLBL_AXCACHE 0xF88CA4
+
+#define mmTPC6_QM_IND_GW_APB_CFG 0xF88CB0
+
+#define mmTPC6_QM_IND_GW_APB_WDATA 0xF88CB4
+
+#define mmTPC6_QM_IND_GW_APB_RDATA 0xF88CB8
+
+#define mmTPC6_QM_IND_GW_APB_STATUS 0xF88CBC
+
+#define mmTPC6_QM_GLBL_ERR_ADDR_LO 0xF88CD0
+
+#define mmTPC6_QM_GLBL_ERR_ADDR_HI 0xF88CD4
+
+#define mmTPC6_QM_GLBL_ERR_WDATA 0xF88CD8
+
+#define mmTPC6_QM_GLBL_MEM_INIT_BUSY 0xF88D00
+
+#endif /* ASIC_REG_TPC6_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h
new file mode 100644
index 000000000000..1887b10e58e2
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h
@@ -0,0 +1,1226 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_CFG_REGS_H_
+#define ASIC_REG_TPC7_CFG_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_CFG (Prototype: TPC)
+ *****************************************
+ */
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xFC6400
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xFC6404
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xFC6408
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xFC640C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xFC6410
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xFC6414
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xFC6418
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xFC641C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xFC6420
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xFC6424
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xFC6428
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xFC642C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xFC6430
+
+#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xFC6434
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xFC6438
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xFC643C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xFC6440
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xFC6444
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xFC6448
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xFC644C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xFC6450
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xFC6454
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xFC6458
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xFC645C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xFC6460
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xFC6464
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xFC6468
+
+#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xFC646C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xFC6470
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xFC6474
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xFC6478
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xFC647C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xFC6480
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xFC6484
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xFC6488
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xFC648C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xFC6490
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xFC6494
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xFC6498
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xFC649C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xFC64A0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xFC64A4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xFC64A8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xFC64AC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xFC64B0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xFC64B4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xFC64B8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xFC64BC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xFC64C0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xFC64C4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xFC64C8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xFC64CC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xFC64D0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xFC64D4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xFC64D8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xFC64DC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xFC64E0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xFC64E4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xFC64E8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xFC64EC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xFC64F0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xFC64F4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xFC64F8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xFC64FC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xFC6500
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xFC6504
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xFC6508
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xFC650C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xFC6510
+
+#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xFC6514
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xFC6518
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xFC651C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xFC6520
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xFC6524
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xFC6528
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xFC652C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xFC6530
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xFC6534
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xFC6538
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xFC653C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xFC6540
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xFC6544
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xFC6548
+
+#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xFC654C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xFC6550
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xFC6554
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xFC6558
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xFC655C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xFC6560
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xFC6564
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xFC6568
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xFC656C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xFC6570
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xFC6574
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xFC6578
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xFC657C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xFC6580
+
+#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xFC6584
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xFC6588
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xFC658C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xFC6590
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xFC6594
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xFC6598
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xFC659C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xFC65A0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xFC65A4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xFC65A8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xFC65AC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xFC65B0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xFC65B4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xFC65B8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xFC65BC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_BASE_ADDR_LOW 0xFC65C0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_BASE_ADDR_HIGH 0xFC65C4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_PADDING_VALUE 0xFC65C8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_TENSOR_CONFIG 0xFC65CC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_0_SIZE 0xFC65D0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_0_STRIDE 0xFC65D4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_1_SIZE 0xFC65D8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_1_STRIDE 0xFC65DC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_2_SIZE 0xFC65E0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_2_STRIDE 0xFC65E4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_3_SIZE 0xFC65E8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_3_STRIDE 0xFC65EC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_4_SIZE 0xFC65F0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_8_DIM_4_STRIDE 0xFC65F4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_BASE_ADDR_LOW 0xFC65F8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_BASE_ADDR_HIGH 0xFC65FC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_PADDING_VALUE 0xFC6600
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_TENSOR_CONFIG 0xFC6604
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_0_SIZE 0xFC6608
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_0_STRIDE 0xFC660C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_1_SIZE 0xFC6610
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_1_STRIDE 0xFC6614
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_2_SIZE 0xFC6618
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_2_STRIDE 0xFC661C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_3_SIZE 0xFC6620
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_3_STRIDE 0xFC6624
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_4_SIZE 0xFC6628
+
+#define mmTPC7_CFG_KERNEL_TENSOR_9_DIM_4_STRIDE 0xFC662C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_BASE_ADDR_LOW 0xFC6630
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_BASE_ADDR_HIGH 0xFC6634
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_PADDING_VALUE 0xFC6638
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_TENSOR_CONFIG 0xFC663C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_0_SIZE 0xFC6640
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_0_STRIDE 0xFC6644
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_1_SIZE 0xFC6648
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_1_STRIDE 0xFC664C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_2_SIZE 0xFC6650
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_2_STRIDE 0xFC6654
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_3_SIZE 0xFC6658
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_3_STRIDE 0xFC665C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_4_SIZE 0xFC6660
+
+#define mmTPC7_CFG_KERNEL_TENSOR_10_DIM_4_STRIDE 0xFC6664
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_BASE_ADDR_LOW 0xFC6668
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_BASE_ADDR_HIGH 0xFC666C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_PADDING_VALUE 0xFC6670
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_TENSOR_CONFIG 0xFC6674
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_0_SIZE 0xFC6678
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_0_STRIDE 0xFC667C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_1_SIZE 0xFC6680
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_1_STRIDE 0xFC6684
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_2_SIZE 0xFC6688
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_2_STRIDE 0xFC668C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_3_SIZE 0xFC6690
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_3_STRIDE 0xFC6694
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_4_SIZE 0xFC6698
+
+#define mmTPC7_CFG_KERNEL_TENSOR_11_DIM_4_STRIDE 0xFC669C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_BASE_ADDR_LOW 0xFC66A0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_BASE_ADDR_HIGH 0xFC66A4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_PADDING_VALUE 0xFC66A8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_TENSOR_CONFIG 0xFC66AC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_0_SIZE 0xFC66B0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_0_STRIDE 0xFC66B4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_1_SIZE 0xFC66B8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_1_STRIDE 0xFC66BC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_2_SIZE 0xFC66C0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_2_STRIDE 0xFC66C4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_3_SIZE 0xFC66C8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_3_STRIDE 0xFC66CC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_4_SIZE 0xFC66D0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_12_DIM_4_STRIDE 0xFC66D4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_BASE_ADDR_LOW 0xFC66D8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_BASE_ADDR_HIGH 0xFC66DC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_PADDING_VALUE 0xFC66E0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_TENSOR_CONFIG 0xFC66E4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_0_SIZE 0xFC66E8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_0_STRIDE 0xFC66EC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_1_SIZE 0xFC66F0
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_1_STRIDE 0xFC66F4
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_2_SIZE 0xFC66F8
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_2_STRIDE 0xFC66FC
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_3_SIZE 0xFC6700
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_3_STRIDE 0xFC6704
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_4_SIZE 0xFC6708
+
+#define mmTPC7_CFG_KERNEL_TENSOR_13_DIM_4_STRIDE 0xFC670C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_BASE_ADDR_LOW 0xFC6710
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_BASE_ADDR_HIGH 0xFC6714
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_PADDING_VALUE 0xFC6718
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_TENSOR_CONFIG 0xFC671C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_0_SIZE 0xFC6720
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_0_STRIDE 0xFC6724
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_1_SIZE 0xFC6728
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_1_STRIDE 0xFC672C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_2_SIZE 0xFC6730
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_2_STRIDE 0xFC6734
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_3_SIZE 0xFC6738
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_3_STRIDE 0xFC673C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_4_SIZE 0xFC6740
+
+#define mmTPC7_CFG_KERNEL_TENSOR_14_DIM_4_STRIDE 0xFC6744
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_BASE_ADDR_LOW 0xFC6748
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_BASE_ADDR_HIGH 0xFC674C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_PADDING_VALUE 0xFC6750
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_TENSOR_CONFIG 0xFC6754
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_0_SIZE 0xFC6758
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_0_STRIDE 0xFC675C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_1_SIZE 0xFC6760
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_1_STRIDE 0xFC6764
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_2_SIZE 0xFC6768
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_2_STRIDE 0xFC676C
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_3_SIZE 0xFC6770
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_3_STRIDE 0xFC6774
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_4_SIZE 0xFC6778
+
+#define mmTPC7_CFG_KERNEL_TENSOR_15_DIM_4_STRIDE 0xFC677C
+
+#define mmTPC7_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xFC6780
+
+#define mmTPC7_CFG_KERNEL_SYNC_OBJECT_ADDR 0xFC6784
+
+#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xFC6788
+
+#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xFC678C
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_0 0xFC6790
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_0 0xFC6794
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_1 0xFC6798
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_1 0xFC679C
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_2 0xFC67A0
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_2 0xFC67A4
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_3 0xFC67A8
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_3 0xFC67AC
+
+#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_4 0xFC67B0
+
+#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_4 0xFC67B4
+
+#define mmTPC7_CFG_KERNEL_KERNEL_CONFIG 0xFC67B8
+
+#define mmTPC7_CFG_KERNEL_KERNEL_ID 0xFC67BC
+
+#define mmTPC7_CFG_KERNEL_SRF_0 0xFC67C0
+
+#define mmTPC7_CFG_KERNEL_SRF_1 0xFC67C4
+
+#define mmTPC7_CFG_KERNEL_SRF_2 0xFC67C8
+
+#define mmTPC7_CFG_KERNEL_SRF_3 0xFC67CC
+
+#define mmTPC7_CFG_KERNEL_SRF_4 0xFC67D0
+
+#define mmTPC7_CFG_KERNEL_SRF_5 0xFC67D4
+
+#define mmTPC7_CFG_KERNEL_SRF_6 0xFC67D8
+
+#define mmTPC7_CFG_KERNEL_SRF_7 0xFC67DC
+
+#define mmTPC7_CFG_KERNEL_SRF_8 0xFC67E0
+
+#define mmTPC7_CFG_KERNEL_SRF_9 0xFC67E4
+
+#define mmTPC7_CFG_KERNEL_SRF_10 0xFC67E8
+
+#define mmTPC7_CFG_KERNEL_SRF_11 0xFC67EC
+
+#define mmTPC7_CFG_KERNEL_SRF_12 0xFC67F0
+
+#define mmTPC7_CFG_KERNEL_SRF_13 0xFC67F4
+
+#define mmTPC7_CFG_KERNEL_SRF_14 0xFC67F8
+
+#define mmTPC7_CFG_KERNEL_SRF_15 0xFC67FC
+
+#define mmTPC7_CFG_KERNEL_SRF_16 0xFC6800
+
+#define mmTPC7_CFG_KERNEL_SRF_17 0xFC6804
+
+#define mmTPC7_CFG_KERNEL_SRF_18 0xFC6808
+
+#define mmTPC7_CFG_KERNEL_SRF_19 0xFC680C
+
+#define mmTPC7_CFG_KERNEL_SRF_20 0xFC6810
+
+#define mmTPC7_CFG_KERNEL_SRF_21 0xFC6814
+
+#define mmTPC7_CFG_KERNEL_SRF_22 0xFC6818
+
+#define mmTPC7_CFG_KERNEL_SRF_23 0xFC681C
+
+#define mmTPC7_CFG_KERNEL_SRF_24 0xFC6820
+
+#define mmTPC7_CFG_KERNEL_SRF_25 0xFC6824
+
+#define mmTPC7_CFG_KERNEL_SRF_26 0xFC6828
+
+#define mmTPC7_CFG_KERNEL_SRF_27 0xFC682C
+
+#define mmTPC7_CFG_KERNEL_SRF_28 0xFC6830
+
+#define mmTPC7_CFG_KERNEL_SRF_29 0xFC6834
+
+#define mmTPC7_CFG_KERNEL_SRF_30 0xFC6838
+
+#define mmTPC7_CFG_KERNEL_SRF_31 0xFC683C
+
+#define mmTPC7_CFG_ROUND_CSR 0xFC68FC
+
+#define mmTPC7_CFG_PROT 0xFC6900
+
+#define mmTPC7_CFG_SEMAPHORE 0xFC6908
+
+#define mmTPC7_CFG_VFLAGS 0xFC690C
+
+#define mmTPC7_CFG_SFLAGS 0xFC6910
+
+#define mmTPC7_CFG_LFSR_POLYNOM 0xFC6918
+
+#define mmTPC7_CFG_STATUS 0xFC691C
+
+#define mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH 0xFC6920
+
+#define mmTPC7_CFG_CFG_SUBTRACT_VALUE 0xFC6924
+
+#define mmTPC7_CFG_SM_BASE_ADDRESS_HIGH 0xFC692C
+
+#define mmTPC7_CFG_TPC_CMD 0xFC6930
+
+#define mmTPC7_CFG_TPC_EXECUTE 0xFC6938
+
+#define mmTPC7_CFG_TPC_STALL 0xFC693C
+
+#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_LOW 0xFC6940
+
+#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH 0xFC6944
+
+#define mmTPC7_CFG_RD_RATE_LIMIT 0xFC6948
+
+#define mmTPC7_CFG_WR_RATE_LIMIT 0xFC6950
+
+#define mmTPC7_CFG_MSS_CONFIG 0xFC6954
+
+#define mmTPC7_CFG_TPC_INTR_CAUSE 0xFC6958
+
+#define mmTPC7_CFG_TPC_INTR_MASK 0xFC695C
+
+#define mmTPC7_CFG_WQ_CREDITS 0xFC6960
+
+#define mmTPC7_CFG_ARUSER_LO 0xFC6964
+
+#define mmTPC7_CFG_ARUSER_HI 0xFC6968
+
+#define mmTPC7_CFG_AWUSER_LO 0xFC696C
+
+#define mmTPC7_CFG_AWUSER_HI 0xFC6970
+
+#define mmTPC7_CFG_OPCODE_EXEC 0xFC6974
+
+#define mmTPC7_CFG_LUT_FUNC32_BASE_ADDR_LO 0xFC6978
+
+#define mmTPC7_CFG_LUT_FUNC32_BASE_ADDR_HI 0xFC697C
+
+#define mmTPC7_CFG_LUT_FUNC64_BASE_ADDR_LO 0xFC6980
+
+#define mmTPC7_CFG_LUT_FUNC64_BASE_ADDR_HI 0xFC6984
+
+#define mmTPC7_CFG_LUT_FUNC128_BASE_ADDR_LO 0xFC6988
+
+#define mmTPC7_CFG_LUT_FUNC128_BASE_ADDR_HI 0xFC698C
+
+#define mmTPC7_CFG_LUT_FUNC256_BASE_ADDR_LO 0xFC6990
+
+#define mmTPC7_CFG_LUT_FUNC256_BASE_ADDR_HI 0xFC6994
+
+#define mmTPC7_CFG_TSB_CFG_MAX_SIZE 0xFC6998
+
+#define mmTPC7_CFG_TSB_CFG 0xFC699C
+
+#define mmTPC7_CFG_DBGMEM_ADD 0xFC69A0
+
+#define mmTPC7_CFG_DBGMEM_DATA_WR 0xFC69A4
+
+#define mmTPC7_CFG_DBGMEM_DATA_RD 0xFC69A8
+
+#define mmTPC7_CFG_DBGMEM_CTRL 0xFC69AC
+
+#define mmTPC7_CFG_DBGMEM_RC 0xFC69B0
+
+#define mmTPC7_CFG_TSB_INFLIGHT_CNTR 0xFC69B4
+
+#define mmTPC7_CFG_WQ_INFLIGHT_CNTR 0xFC69B8
+
+#define mmTPC7_CFG_WQ_LBW_TOTAL_CNTR 0xFC69BC
+
+#define mmTPC7_CFG_WQ_HBW_TOTAL_CNTR 0xFC69C0
+
+#define mmTPC7_CFG_IRQ_OCCOUPY_CNTR 0xFC69C4
+
+#define mmTPC7_CFG_FUNC_MBIST_CNTRL 0xFC69D0
+
+#define mmTPC7_CFG_FUNC_MBIST_PAT 0xFC69D4
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_0 0xFC69D8
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_1 0xFC69DC
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_2 0xFC69E0
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_3 0xFC69E4
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_4 0xFC69E8
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_5 0xFC69EC
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_6 0xFC69F0
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_7 0xFC69F4
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_8 0xFC69F8
+
+#define mmTPC7_CFG_FUNC_MBIST_MEM_9 0xFC69FC
+
+#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xFC6A00
+
+#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xFC6A04
+
+#define mmTPC7_CFG_QM_TENSOR_0_PADDING_VALUE 0xFC6A08
+
+#define mmTPC7_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xFC6A0C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_SIZE 0xFC6A10
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xFC6A14
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_SIZE 0xFC6A18
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xFC6A1C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_SIZE 0xFC6A20
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xFC6A24
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_SIZE 0xFC6A28
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xFC6A2C
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_SIZE 0xFC6A30
+
+#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xFC6A34
+
+#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xFC6A38
+
+#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xFC6A3C
+
+#define mmTPC7_CFG_QM_TENSOR_1_PADDING_VALUE 0xFC6A40
+
+#define mmTPC7_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xFC6A44
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_SIZE 0xFC6A48
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xFC6A4C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_SIZE 0xFC6A50
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xFC6A54
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_SIZE 0xFC6A58
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xFC6A5C
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_SIZE 0xFC6A60
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xFC6A64
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_SIZE 0xFC6A68
+
+#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xFC6A6C
+
+#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xFC6A70
+
+#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xFC6A74
+
+#define mmTPC7_CFG_QM_TENSOR_2_PADDING_VALUE 0xFC6A78
+
+#define mmTPC7_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xFC6A7C
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_SIZE 0xFC6A80
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xFC6A84
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_SIZE 0xFC6A88
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xFC6A8C
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_SIZE 0xFC6A90
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xFC6A94
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_SIZE 0xFC6A98
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xFC6A9C
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_SIZE 0xFC6AA0
+
+#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xFC6AA4
+
+#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xFC6AA8
+
+#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xFC6AAC
+
+#define mmTPC7_CFG_QM_TENSOR_3_PADDING_VALUE 0xFC6AB0
+
+#define mmTPC7_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xFC6AB4
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_SIZE 0xFC6AB8
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xFC6ABC
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_SIZE 0xFC6AC0
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xFC6AC4
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_SIZE 0xFC6AC8
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xFC6ACC
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_SIZE 0xFC6AD0
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xFC6AD4
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_SIZE 0xFC6AD8
+
+#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xFC6ADC
+
+#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xFC6AE0
+
+#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xFC6AE4
+
+#define mmTPC7_CFG_QM_TENSOR_4_PADDING_VALUE 0xFC6AE8
+
+#define mmTPC7_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xFC6AEC
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_SIZE 0xFC6AF0
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xFC6AF4
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_SIZE 0xFC6AF8
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xFC6AFC
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_SIZE 0xFC6B00
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xFC6B04
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_SIZE 0xFC6B08
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xFC6B0C
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_SIZE 0xFC6B10
+
+#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xFC6B14
+
+#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xFC6B18
+
+#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xFC6B1C
+
+#define mmTPC7_CFG_QM_TENSOR_5_PADDING_VALUE 0xFC6B20
+
+#define mmTPC7_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xFC6B24
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_SIZE 0xFC6B28
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xFC6B2C
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_SIZE 0xFC6B30
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xFC6B34
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_SIZE 0xFC6B38
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xFC6B3C
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_SIZE 0xFC6B40
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xFC6B44
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_SIZE 0xFC6B48
+
+#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xFC6B4C
+
+#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xFC6B50
+
+#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xFC6B54
+
+#define mmTPC7_CFG_QM_TENSOR_6_PADDING_VALUE 0xFC6B58
+
+#define mmTPC7_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xFC6B5C
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_SIZE 0xFC6B60
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xFC6B64
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_SIZE 0xFC6B68
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xFC6B6C
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_SIZE 0xFC6B70
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xFC6B74
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_SIZE 0xFC6B78
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xFC6B7C
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_SIZE 0xFC6B80
+
+#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xFC6B84
+
+#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xFC6B88
+
+#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xFC6B8C
+
+#define mmTPC7_CFG_QM_TENSOR_7_PADDING_VALUE 0xFC6B90
+
+#define mmTPC7_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xFC6B94
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_SIZE 0xFC6B98
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xFC6B9C
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_SIZE 0xFC6BA0
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xFC6BA4
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_SIZE 0xFC6BA8
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xFC6BAC
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_SIZE 0xFC6BB0
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xFC6BB4
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_SIZE 0xFC6BB8
+
+#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xFC6BBC
+
+#define mmTPC7_CFG_QM_TENSOR_8_BASE_ADDR_LOW 0xFC6BC0
+
+#define mmTPC7_CFG_QM_TENSOR_8_BASE_ADDR_HIGH 0xFC6BC4
+
+#define mmTPC7_CFG_QM_TENSOR_8_PADDING_VALUE 0xFC6BC8
+
+#define mmTPC7_CFG_QM_TENSOR_8_TENSOR_CONFIG 0xFC6BCC
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_0_SIZE 0xFC6BD0
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_0_STRIDE 0xFC6BD4
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_1_SIZE 0xFC6BD8
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_1_STRIDE 0xFC6BDC
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_2_SIZE 0xFC6BE0
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_2_STRIDE 0xFC6BE4
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_3_SIZE 0xFC6BE8
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_3_STRIDE 0xFC6BEC
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_4_SIZE 0xFC6BF0
+
+#define mmTPC7_CFG_QM_TENSOR_8_DIM_4_STRIDE 0xFC6BF4
+
+#define mmTPC7_CFG_QM_TENSOR_9_BASE_ADDR_LOW 0xFC6BF8
+
+#define mmTPC7_CFG_QM_TENSOR_9_BASE_ADDR_HIGH 0xFC6BFC
+
+#define mmTPC7_CFG_QM_TENSOR_9_PADDING_VALUE 0xFC6C00
+
+#define mmTPC7_CFG_QM_TENSOR_9_TENSOR_CONFIG 0xFC6C04
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_0_SIZE 0xFC6C08
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_0_STRIDE 0xFC6C0C
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_1_SIZE 0xFC6C10
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_1_STRIDE 0xFC6C14
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_2_SIZE 0xFC6C18
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_2_STRIDE 0xFC6C1C
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_3_SIZE 0xFC6C20
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_3_STRIDE 0xFC6C24
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_4_SIZE 0xFC6C28
+
+#define mmTPC7_CFG_QM_TENSOR_9_DIM_4_STRIDE 0xFC6C2C
+
+#define mmTPC7_CFG_QM_TENSOR_10_BASE_ADDR_LOW 0xFC6C30
+
+#define mmTPC7_CFG_QM_TENSOR_10_BASE_ADDR_HIGH 0xFC6C34
+
+#define mmTPC7_CFG_QM_TENSOR_10_PADDING_VALUE 0xFC6C38
+
+#define mmTPC7_CFG_QM_TENSOR_10_TENSOR_CONFIG 0xFC6C3C
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_0_SIZE 0xFC6C40
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_0_STRIDE 0xFC6C44
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_1_SIZE 0xFC6C48
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_1_STRIDE 0xFC6C4C
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_2_SIZE 0xFC6C50
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_2_STRIDE 0xFC6C54
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_3_SIZE 0xFC6C58
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_3_STRIDE 0xFC6C5C
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_4_SIZE 0xFC6C60
+
+#define mmTPC7_CFG_QM_TENSOR_10_DIM_4_STRIDE 0xFC6C64
+
+#define mmTPC7_CFG_QM_TENSOR_11_BASE_ADDR_LOW 0xFC6C68
+
+#define mmTPC7_CFG_QM_TENSOR_11_BASE_ADDR_HIGH 0xFC6C6C
+
+#define mmTPC7_CFG_QM_TENSOR_11_PADDING_VALUE 0xFC6C70
+
+#define mmTPC7_CFG_QM_TENSOR_11_TENSOR_CONFIG 0xFC6C74
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_0_SIZE 0xFC6C78
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_0_STRIDE 0xFC6C7C
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_1_SIZE 0xFC6C80
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_1_STRIDE 0xFC6C84
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_2_SIZE 0xFC6C88
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_2_STRIDE 0xFC6C8C
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_3_SIZE 0xFC6C90
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_3_STRIDE 0xFC6C94
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_4_SIZE 0xFC6C98
+
+#define mmTPC7_CFG_QM_TENSOR_11_DIM_4_STRIDE 0xFC6C9C
+
+#define mmTPC7_CFG_QM_TENSOR_12_BASE_ADDR_LOW 0xFC6CA0
+
+#define mmTPC7_CFG_QM_TENSOR_12_BASE_ADDR_HIGH 0xFC6CA4
+
+#define mmTPC7_CFG_QM_TENSOR_12_PADDING_VALUE 0xFC6CA8
+
+#define mmTPC7_CFG_QM_TENSOR_12_TENSOR_CONFIG 0xFC6CAC
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_0_SIZE 0xFC6CB0
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_0_STRIDE 0xFC6CB4
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_1_SIZE 0xFC6CB8
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_1_STRIDE 0xFC6CBC
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_2_SIZE 0xFC6CC0
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_2_STRIDE 0xFC6CC4
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_3_SIZE 0xFC6CC8
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_3_STRIDE 0xFC6CCC
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_4_SIZE 0xFC6CD0
+
+#define mmTPC7_CFG_QM_TENSOR_12_DIM_4_STRIDE 0xFC6CD4
+
+#define mmTPC7_CFG_QM_TENSOR_13_BASE_ADDR_LOW 0xFC6CD8
+
+#define mmTPC7_CFG_QM_TENSOR_13_BASE_ADDR_HIGH 0xFC6CDC
+
+#define mmTPC7_CFG_QM_TENSOR_13_PADDING_VALUE 0xFC6CE0
+
+#define mmTPC7_CFG_QM_TENSOR_13_TENSOR_CONFIG 0xFC6CE4
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_0_SIZE 0xFC6CE8
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_0_STRIDE 0xFC6CEC
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_1_SIZE 0xFC6CF0
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_1_STRIDE 0xFC6CF4
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_2_SIZE 0xFC6CF8
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_2_STRIDE 0xFC6CFC
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_3_SIZE 0xFC6D00
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_3_STRIDE 0xFC6D04
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_4_SIZE 0xFC6D08
+
+#define mmTPC7_CFG_QM_TENSOR_13_DIM_4_STRIDE 0xFC6D0C
+
+#define mmTPC7_CFG_QM_TENSOR_14_BASE_ADDR_LOW 0xFC6D10
+
+#define mmTPC7_CFG_QM_TENSOR_14_BASE_ADDR_HIGH 0xFC6D14
+
+#define mmTPC7_CFG_QM_TENSOR_14_PADDING_VALUE 0xFC6D18
+
+#define mmTPC7_CFG_QM_TENSOR_14_TENSOR_CONFIG 0xFC6D1C
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_0_SIZE 0xFC6D20
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_0_STRIDE 0xFC6D24
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_1_SIZE 0xFC6D28
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_1_STRIDE 0xFC6D2C
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_2_SIZE 0xFC6D30
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_2_STRIDE 0xFC6D34
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_3_SIZE 0xFC6D38
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_3_STRIDE 0xFC6D3C
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_4_SIZE 0xFC6D40
+
+#define mmTPC7_CFG_QM_TENSOR_14_DIM_4_STRIDE 0xFC6D44
+
+#define mmTPC7_CFG_QM_TENSOR_15_BASE_ADDR_LOW 0xFC6D48
+
+#define mmTPC7_CFG_QM_TENSOR_15_BASE_ADDR_HIGH 0xFC6D4C
+
+#define mmTPC7_CFG_QM_TENSOR_15_PADDING_VALUE 0xFC6D50
+
+#define mmTPC7_CFG_QM_TENSOR_15_TENSOR_CONFIG 0xFC6D54
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_0_SIZE 0xFC6D58
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_0_STRIDE 0xFC6D5C
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_1_SIZE 0xFC6D60
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_1_STRIDE 0xFC6D64
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_2_SIZE 0xFC6D68
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_2_STRIDE 0xFC6D6C
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_3_SIZE 0xFC6D70
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_3_STRIDE 0xFC6D74
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_4_SIZE 0xFC6D78
+
+#define mmTPC7_CFG_QM_TENSOR_15_DIM_4_STRIDE 0xFC6D7C
+
+#define mmTPC7_CFG_QM_SYNC_OBJECT_MESSAGE 0xFC6D80
+
+#define mmTPC7_CFG_QM_SYNC_OBJECT_ADDR 0xFC6D84
+
+#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xFC6D88
+
+#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xFC6D8C
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_0 0xFC6D90
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_0 0xFC6D94
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_1 0xFC6D98
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_1 0xFC6D9C
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_2 0xFC6DA0
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_2 0xFC6DA4
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_3 0xFC6DA8
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_3 0xFC6DAC
+
+#define mmTPC7_CFG_QM_TID_BASE_DIM_4 0xFC6DB0
+
+#define mmTPC7_CFG_QM_TID_SIZE_DIM_4 0xFC6DB4
+
+#define mmTPC7_CFG_QM_KERNEL_CONFIG 0xFC6DB8
+
+#define mmTPC7_CFG_QM_KERNEL_ID 0xFC6DBC
+
+#define mmTPC7_CFG_QM_SRF_0 0xFC6DC0
+
+#define mmTPC7_CFG_QM_SRF_1 0xFC6DC4
+
+#define mmTPC7_CFG_QM_SRF_2 0xFC6DC8
+
+#define mmTPC7_CFG_QM_SRF_3 0xFC6DCC
+
+#define mmTPC7_CFG_QM_SRF_4 0xFC6DD0
+
+#define mmTPC7_CFG_QM_SRF_5 0xFC6DD4
+
+#define mmTPC7_CFG_QM_SRF_6 0xFC6DD8
+
+#define mmTPC7_CFG_QM_SRF_7 0xFC6DDC
+
+#define mmTPC7_CFG_QM_SRF_8 0xFC6DE0
+
+#define mmTPC7_CFG_QM_SRF_9 0xFC6DE4
+
+#define mmTPC7_CFG_QM_SRF_10 0xFC6DE8
+
+#define mmTPC7_CFG_QM_SRF_11 0xFC6DEC
+
+#define mmTPC7_CFG_QM_SRF_12 0xFC6DF0
+
+#define mmTPC7_CFG_QM_SRF_13 0xFC6DF4
+
+#define mmTPC7_CFG_QM_SRF_14 0xFC6DF8
+
+#define mmTPC7_CFG_QM_SRF_15 0xFC6DFC
+
+#define mmTPC7_CFG_QM_SRF_16 0xFC6E00
+
+#define mmTPC7_CFG_QM_SRF_17 0xFC6E04
+
+#define mmTPC7_CFG_QM_SRF_18 0xFC6E08
+
+#define mmTPC7_CFG_QM_SRF_19 0xFC6E0C
+
+#define mmTPC7_CFG_QM_SRF_20 0xFC6E10
+
+#define mmTPC7_CFG_QM_SRF_21 0xFC6E14
+
+#define mmTPC7_CFG_QM_SRF_22 0xFC6E18
+
+#define mmTPC7_CFG_QM_SRF_23 0xFC6E1C
+
+#define mmTPC7_CFG_QM_SRF_24 0xFC6E20
+
+#define mmTPC7_CFG_QM_SRF_25 0xFC6E24
+
+#define mmTPC7_CFG_QM_SRF_26 0xFC6E28
+
+#define mmTPC7_CFG_QM_SRF_27 0xFC6E2C
+
+#define mmTPC7_CFG_QM_SRF_28 0xFC6E30
+
+#define mmTPC7_CFG_QM_SRF_29 0xFC6E34
+
+#define mmTPC7_CFG_QM_SRF_30 0xFC6E38
+
+#define mmTPC7_CFG_QM_SRF_31 0xFC6E3C
+
+#endif /* ASIC_REG_TPC7_CFG_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h
new file mode 100644
index 000000000000..5c36c972c027
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h
@@ -0,0 +1,834 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_TPC7_QM_REGS_H_
+#define ASIC_REG_TPC7_QM_REGS_H_
+
+/*
+ *****************************************
+ * TPC7_QM (Prototype: QMAN)
+ *****************************************
+ */
+
+#define mmTPC7_QM_GLBL_CFG0 0xFC8000
+
+#define mmTPC7_QM_GLBL_CFG1 0xFC8004
+
+#define mmTPC7_QM_GLBL_PROT 0xFC8008
+
+#define mmTPC7_QM_GLBL_ERR_CFG 0xFC800C
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS_0 0xFC8010
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS_1 0xFC8014
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS_2 0xFC8018
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS_3 0xFC801C
+
+#define mmTPC7_QM_GLBL_SECURE_PROPS_4 0xFC8020
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_0 0xFC8024
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_1 0xFC8028
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_2 0xFC802C
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_3 0xFC8030
+
+#define mmTPC7_QM_GLBL_NON_SECURE_PROPS_4 0xFC8034
+
+#define mmTPC7_QM_GLBL_STS0 0xFC8038
+
+#define mmTPC7_QM_GLBL_STS1_0 0xFC8040
+
+#define mmTPC7_QM_GLBL_STS1_1 0xFC8044
+
+#define mmTPC7_QM_GLBL_STS1_2 0xFC8048
+
+#define mmTPC7_QM_GLBL_STS1_3 0xFC804C
+
+#define mmTPC7_QM_GLBL_STS1_4 0xFC8050
+
+#define mmTPC7_QM_GLBL_MSG_EN_0 0xFC8054
+
+#define mmTPC7_QM_GLBL_MSG_EN_1 0xFC8058
+
+#define mmTPC7_QM_GLBL_MSG_EN_2 0xFC805C
+
+#define mmTPC7_QM_GLBL_MSG_EN_3 0xFC8060
+
+#define mmTPC7_QM_GLBL_MSG_EN_4 0xFC8068
+
+#define mmTPC7_QM_PQ_BASE_LO_0 0xFC8070
+
+#define mmTPC7_QM_PQ_BASE_LO_1 0xFC8074
+
+#define mmTPC7_QM_PQ_BASE_LO_2 0xFC8078
+
+#define mmTPC7_QM_PQ_BASE_LO_3 0xFC807C
+
+#define mmTPC7_QM_PQ_BASE_HI_0 0xFC8080
+
+#define mmTPC7_QM_PQ_BASE_HI_1 0xFC8084
+
+#define mmTPC7_QM_PQ_BASE_HI_2 0xFC8088
+
+#define mmTPC7_QM_PQ_BASE_HI_3 0xFC808C
+
+#define mmTPC7_QM_PQ_SIZE_0 0xFC8090
+
+#define mmTPC7_QM_PQ_SIZE_1 0xFC8094
+
+#define mmTPC7_QM_PQ_SIZE_2 0xFC8098
+
+#define mmTPC7_QM_PQ_SIZE_3 0xFC809C
+
+#define mmTPC7_QM_PQ_PI_0 0xFC80A0
+
+#define mmTPC7_QM_PQ_PI_1 0xFC80A4
+
+#define mmTPC7_QM_PQ_PI_2 0xFC80A8
+
+#define mmTPC7_QM_PQ_PI_3 0xFC80AC
+
+#define mmTPC7_QM_PQ_CI_0 0xFC80B0
+
+#define mmTPC7_QM_PQ_CI_1 0xFC80B4
+
+#define mmTPC7_QM_PQ_CI_2 0xFC80B8
+
+#define mmTPC7_QM_PQ_CI_3 0xFC80BC
+
+#define mmTPC7_QM_PQ_CFG0_0 0xFC80C0
+
+#define mmTPC7_QM_PQ_CFG0_1 0xFC80C4
+
+#define mmTPC7_QM_PQ_CFG0_2 0xFC80C8
+
+#define mmTPC7_QM_PQ_CFG0_3 0xFC80CC
+
+#define mmTPC7_QM_PQ_CFG1_0 0xFC80D0
+
+#define mmTPC7_QM_PQ_CFG1_1 0xFC80D4
+
+#define mmTPC7_QM_PQ_CFG1_2 0xFC80D8
+
+#define mmTPC7_QM_PQ_CFG1_3 0xFC80DC
+
+#define mmTPC7_QM_PQ_ARUSER_31_11_0 0xFC80E0
+
+#define mmTPC7_QM_PQ_ARUSER_31_11_1 0xFC80E4
+
+#define mmTPC7_QM_PQ_ARUSER_31_11_2 0xFC80E8
+
+#define mmTPC7_QM_PQ_ARUSER_31_11_3 0xFC80EC
+
+#define mmTPC7_QM_PQ_STS0_0 0xFC80F0
+
+#define mmTPC7_QM_PQ_STS0_1 0xFC80F4
+
+#define mmTPC7_QM_PQ_STS0_2 0xFC80F8
+
+#define mmTPC7_QM_PQ_STS0_3 0xFC80FC
+
+#define mmTPC7_QM_PQ_STS1_0 0xFC8100
+
+#define mmTPC7_QM_PQ_STS1_1 0xFC8104
+
+#define mmTPC7_QM_PQ_STS1_2 0xFC8108
+
+#define mmTPC7_QM_PQ_STS1_3 0xFC810C
+
+#define mmTPC7_QM_CQ_CFG0_0 0xFC8110
+
+#define mmTPC7_QM_CQ_CFG0_1 0xFC8114
+
+#define mmTPC7_QM_CQ_CFG0_2 0xFC8118
+
+#define mmTPC7_QM_CQ_CFG0_3 0xFC811C
+
+#define mmTPC7_QM_CQ_CFG0_4 0xFC8120
+
+#define mmTPC7_QM_CQ_CFG1_0 0xFC8124
+
+#define mmTPC7_QM_CQ_CFG1_1 0xFC8128
+
+#define mmTPC7_QM_CQ_CFG1_2 0xFC812C
+
+#define mmTPC7_QM_CQ_CFG1_3 0xFC8130
+
+#define mmTPC7_QM_CQ_CFG1_4 0xFC8134
+
+#define mmTPC7_QM_CQ_ARUSER_31_11_0 0xFC8138
+
+#define mmTPC7_QM_CQ_ARUSER_31_11_1 0xFC813C
+
+#define mmTPC7_QM_CQ_ARUSER_31_11_2 0xFC8140
+
+#define mmTPC7_QM_CQ_ARUSER_31_11_3 0xFC8144
+
+#define mmTPC7_QM_CQ_ARUSER_31_11_4 0xFC8148
+
+#define mmTPC7_QM_CQ_STS0_0 0xFC814C
+
+#define mmTPC7_QM_CQ_STS0_1 0xFC8150
+
+#define mmTPC7_QM_CQ_STS0_2 0xFC8154
+
+#define mmTPC7_QM_CQ_STS0_3 0xFC8158
+
+#define mmTPC7_QM_CQ_STS0_4 0xFC815C
+
+#define mmTPC7_QM_CQ_STS1_0 0xFC8160
+
+#define mmTPC7_QM_CQ_STS1_1 0xFC8164
+
+#define mmTPC7_QM_CQ_STS1_2 0xFC8168
+
+#define mmTPC7_QM_CQ_STS1_3 0xFC816C
+
+#define mmTPC7_QM_CQ_STS1_4 0xFC8170
+
+#define mmTPC7_QM_CQ_PTR_LO_0 0xFC8174
+
+#define mmTPC7_QM_CQ_PTR_HI_0 0xFC8178
+
+#define mmTPC7_QM_CQ_TSIZE_0 0xFC817C
+
+#define mmTPC7_QM_CQ_CTL_0 0xFC8180
+
+#define mmTPC7_QM_CQ_PTR_LO_1 0xFC8184
+
+#define mmTPC7_QM_CQ_PTR_HI_1 0xFC8188
+
+#define mmTPC7_QM_CQ_TSIZE_1 0xFC818C
+
+#define mmTPC7_QM_CQ_CTL_1 0xFC8190
+
+#define mmTPC7_QM_CQ_PTR_LO_2 0xFC8194
+
+#define mmTPC7_QM_CQ_PTR_HI_2 0xFC8198
+
+#define mmTPC7_QM_CQ_TSIZE_2 0xFC819C
+
+#define mmTPC7_QM_CQ_CTL_2 0xFC81A0
+
+#define mmTPC7_QM_CQ_PTR_LO_3 0xFC81A4
+
+#define mmTPC7_QM_CQ_PTR_HI_3 0xFC81A8
+
+#define mmTPC7_QM_CQ_TSIZE_3 0xFC81AC
+
+#define mmTPC7_QM_CQ_CTL_3 0xFC81B0
+
+#define mmTPC7_QM_CQ_PTR_LO_4 0xFC81B4
+
+#define mmTPC7_QM_CQ_PTR_HI_4 0xFC81B8
+
+#define mmTPC7_QM_CQ_TSIZE_4 0xFC81BC
+
+#define mmTPC7_QM_CQ_CTL_4 0xFC81C0
+
+#define mmTPC7_QM_CQ_PTR_LO_STS_0 0xFC81C4
+
+#define mmTPC7_QM_CQ_PTR_LO_STS_1 0xFC81C8
+
+#define mmTPC7_QM_CQ_PTR_LO_STS_2 0xFC81CC
+
+#define mmTPC7_QM_CQ_PTR_LO_STS_3 0xFC81D0
+
+#define mmTPC7_QM_CQ_PTR_LO_STS_4 0xFC81D4
+
+#define mmTPC7_QM_CQ_PTR_HI_STS_0 0xFC81D8
+
+#define mmTPC7_QM_CQ_PTR_HI_STS_1 0xFC81DC
+
+#define mmTPC7_QM_CQ_PTR_HI_STS_2 0xFC81E0
+
+#define mmTPC7_QM_CQ_PTR_HI_STS_3 0xFC81E4
+
+#define mmTPC7_QM_CQ_PTR_HI_STS_4 0xFC81E8
+
+#define mmTPC7_QM_CQ_TSIZE_STS_0 0xFC81EC
+
+#define mmTPC7_QM_CQ_TSIZE_STS_1 0xFC81F0
+
+#define mmTPC7_QM_CQ_TSIZE_STS_2 0xFC81F4
+
+#define mmTPC7_QM_CQ_TSIZE_STS_3 0xFC81F8
+
+#define mmTPC7_QM_CQ_TSIZE_STS_4 0xFC81FC
+
+#define mmTPC7_QM_CQ_CTL_STS_0 0xFC8200
+
+#define mmTPC7_QM_CQ_CTL_STS_1 0xFC8204
+
+#define mmTPC7_QM_CQ_CTL_STS_2 0xFC8208
+
+#define mmTPC7_QM_CQ_CTL_STS_3 0xFC820C
+
+#define mmTPC7_QM_CQ_CTL_STS_4 0xFC8210
+
+#define mmTPC7_QM_CQ_IFIFO_CNT_0 0xFC8214
+
+#define mmTPC7_QM_CQ_IFIFO_CNT_1 0xFC8218
+
+#define mmTPC7_QM_CQ_IFIFO_CNT_2 0xFC821C
+
+#define mmTPC7_QM_CQ_IFIFO_CNT_3 0xFC8220
+
+#define mmTPC7_QM_CQ_IFIFO_CNT_4 0xFC8224
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_0 0xFC8228
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_1 0xFC822C
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_2 0xFC8230
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_3 0xFC8234
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO_4 0xFC8238
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_0 0xFC823C
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_1 0xFC8240
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_2 0xFC8244
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_3 0xFC8248
+
+#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI_4 0xFC824C
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_0 0xFC8250
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_1 0xFC8254
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_2 0xFC8258
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_3 0xFC825C
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO_4 0xFC8260
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_0 0xFC8264
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_1 0xFC8268
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_2 0xFC826C
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_3 0xFC8270
+
+#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI_4 0xFC8274
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_0 0xFC8278
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_1 0xFC827C
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_2 0xFC8280
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_3 0xFC8284
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO_4 0xFC8288
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_0 0xFC828C
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_1 0xFC8290
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_2 0xFC8294
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_3 0xFC8298
+
+#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI_4 0xFC829C
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_0 0xFC82A0
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_1 0xFC82A4
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_2 0xFC82A8
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_3 0xFC82AC
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO_4 0xFC82B0
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_0 0xFC82B4
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_1 0xFC82B8
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_2 0xFC82BC
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_3 0xFC82C0
+
+#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI_4 0xFC82C4
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_0 0xFC82C8
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_1 0xFC82CC
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_2 0xFC82D0
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_3 0xFC82D4
+
+#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET_4 0xFC82D8
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xFC82E0
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xFC82E4
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xFC82E8
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xFC82EC
+
+#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xFC82F0
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0xFC82F4
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0xFC82F8
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0xFC82FC
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0xFC8300
+
+#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0xFC8304
+
+#define mmTPC7_QM_CP_FENCE0_RDATA_0 0xFC8308
+
+#define mmTPC7_QM_CP_FENCE0_RDATA_1 0xFC830C
+
+#define mmTPC7_QM_CP_FENCE0_RDATA_2 0xFC8310
+
+#define mmTPC7_QM_CP_FENCE0_RDATA_3 0xFC8314
+
+#define mmTPC7_QM_CP_FENCE0_RDATA_4 0xFC8318
+
+#define mmTPC7_QM_CP_FENCE1_RDATA_0 0xFC831C
+
+#define mmTPC7_QM_CP_FENCE1_RDATA_1 0xFC8320
+
+#define mmTPC7_QM_CP_FENCE1_RDATA_2 0xFC8324
+
+#define mmTPC7_QM_CP_FENCE1_RDATA_3 0xFC8328
+
+#define mmTPC7_QM_CP_FENCE1_RDATA_4 0xFC832C
+
+#define mmTPC7_QM_CP_FENCE2_RDATA_0 0xFC8330
+
+#define mmTPC7_QM_CP_FENCE2_RDATA_1 0xFC8334
+
+#define mmTPC7_QM_CP_FENCE2_RDATA_2 0xFC8338
+
+#define mmTPC7_QM_CP_FENCE2_RDATA_3 0xFC833C
+
+#define mmTPC7_QM_CP_FENCE2_RDATA_4 0xFC8340
+
+#define mmTPC7_QM_CP_FENCE3_RDATA_0 0xFC8344
+
+#define mmTPC7_QM_CP_FENCE3_RDATA_1 0xFC8348
+
+#define mmTPC7_QM_CP_FENCE3_RDATA_2 0xFC834C
+
+#define mmTPC7_QM_CP_FENCE3_RDATA_3 0xFC8350
+
+#define mmTPC7_QM_CP_FENCE3_RDATA_4 0xFC8354
+
+#define mmTPC7_QM_CP_FENCE0_CNT_0 0xFC8358
+
+#define mmTPC7_QM_CP_FENCE0_CNT_1 0xFC835C
+
+#define mmTPC7_QM_CP_FENCE0_CNT_2 0xFC8360
+
+#define mmTPC7_QM_CP_FENCE0_CNT_3 0xFC8364
+
+#define mmTPC7_QM_CP_FENCE0_CNT_4 0xFC8368
+
+#define mmTPC7_QM_CP_FENCE1_CNT_0 0xFC836C
+
+#define mmTPC7_QM_CP_FENCE1_CNT_1 0xFC8370
+
+#define mmTPC7_QM_CP_FENCE1_CNT_2 0xFC8374
+
+#define mmTPC7_QM_CP_FENCE1_CNT_3 0xFC8378
+
+#define mmTPC7_QM_CP_FENCE1_CNT_4 0xFC837C
+
+#define mmTPC7_QM_CP_FENCE2_CNT_0 0xFC8380
+
+#define mmTPC7_QM_CP_FENCE2_CNT_1 0xFC8384
+
+#define mmTPC7_QM_CP_FENCE2_CNT_2 0xFC8388
+
+#define mmTPC7_QM_CP_FENCE2_CNT_3 0xFC838C
+
+#define mmTPC7_QM_CP_FENCE2_CNT_4 0xFC8390
+
+#define mmTPC7_QM_CP_FENCE3_CNT_0 0xFC8394
+
+#define mmTPC7_QM_CP_FENCE3_CNT_1 0xFC8398
+
+#define mmTPC7_QM_CP_FENCE3_CNT_2 0xFC839C
+
+#define mmTPC7_QM_CP_FENCE3_CNT_3 0xFC83A0
+
+#define mmTPC7_QM_CP_FENCE3_CNT_4 0xFC83A4
+
+#define mmTPC7_QM_CP_STS_0 0xFC83A8
+
+#define mmTPC7_QM_CP_STS_1 0xFC83AC
+
+#define mmTPC7_QM_CP_STS_2 0xFC83B0
+
+#define mmTPC7_QM_CP_STS_3 0xFC83B4
+
+#define mmTPC7_QM_CP_STS_4 0xFC83B8
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO_0 0xFC83BC
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO_1 0xFC83C0
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO_2 0xFC83C4
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO_3 0xFC83C8
+
+#define mmTPC7_QM_CP_CURRENT_INST_LO_4 0xFC83CC
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI_0 0xFC83D0
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI_1 0xFC83D4
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI_2 0xFC83D8
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI_3 0xFC83DC
+
+#define mmTPC7_QM_CP_CURRENT_INST_HI_4 0xFC83E0
+
+#define mmTPC7_QM_CP_BARRIER_CFG_0 0xFC83F4
+
+#define mmTPC7_QM_CP_BARRIER_CFG_1 0xFC83F8
+
+#define mmTPC7_QM_CP_BARRIER_CFG_2 0xFC83FC
+
+#define mmTPC7_QM_CP_BARRIER_CFG_3 0xFC8400
+
+#define mmTPC7_QM_CP_BARRIER_CFG_4 0xFC8404
+
+#define mmTPC7_QM_CP_DBG_0_0 0xFC8408
+
+#define mmTPC7_QM_CP_DBG_0_1 0xFC840C
+
+#define mmTPC7_QM_CP_DBG_0_2 0xFC8410
+
+#define mmTPC7_QM_CP_DBG_0_3 0xFC8414
+
+#define mmTPC7_QM_CP_DBG_0_4 0xFC8418
+
+#define mmTPC7_QM_CP_ARUSER_31_11_0 0xFC841C
+
+#define mmTPC7_QM_CP_ARUSER_31_11_1 0xFC8420
+
+#define mmTPC7_QM_CP_ARUSER_31_11_2 0xFC8424
+
+#define mmTPC7_QM_CP_ARUSER_31_11_3 0xFC8428
+
+#define mmTPC7_QM_CP_ARUSER_31_11_4 0xFC842C
+
+#define mmTPC7_QM_CP_AWUSER_31_11_0 0xFC8430
+
+#define mmTPC7_QM_CP_AWUSER_31_11_1 0xFC8434
+
+#define mmTPC7_QM_CP_AWUSER_31_11_2 0xFC8438
+
+#define mmTPC7_QM_CP_AWUSER_31_11_3 0xFC843C
+
+#define mmTPC7_QM_CP_AWUSER_31_11_4 0xFC8440
+
+#define mmTPC7_QM_ARB_CFG_0 0xFC8A00
+
+#define mmTPC7_QM_ARB_CHOISE_Q_PUSH 0xFC8A04
+
+#define mmTPC7_QM_ARB_WRR_WEIGHT_0 0xFC8A08
+
+#define mmTPC7_QM_ARB_WRR_WEIGHT_1 0xFC8A0C
+
+#define mmTPC7_QM_ARB_WRR_WEIGHT_2 0xFC8A10
+
+#define mmTPC7_QM_ARB_WRR_WEIGHT_3 0xFC8A14
+
+#define mmTPC7_QM_ARB_CFG_1 0xFC8A18
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_0 0xFC8A20
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_1 0xFC8A24
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_2 0xFC8A28
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_3 0xFC8A2C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_4 0xFC8A30
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_5 0xFC8A34
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_6 0xFC8A38
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_7 0xFC8A3C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_8 0xFC8A40
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_9 0xFC8A44
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_10 0xFC8A48
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_11 0xFC8A4C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_12 0xFC8A50
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_13 0xFC8A54
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_14 0xFC8A58
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_15 0xFC8A5C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_16 0xFC8A60
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_17 0xFC8A64
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_18 0xFC8A68
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_19 0xFC8A6C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_20 0xFC8A70
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_21 0xFC8A74
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_22 0xFC8A78
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_23 0xFC8A7C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_24 0xFC8A80
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_25 0xFC8A84
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_26 0xFC8A88
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_27 0xFC8A8C
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_28 0xFC8A90
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_29 0xFC8A94
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_30 0xFC8A98
+
+#define mmTPC7_QM_ARB_MST_AVAIL_CRED_31 0xFC8A9C
+
+#define mmTPC7_QM_ARB_MST_CRED_INC 0xFC8AA0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_0 0xFC8AA4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_1 0xFC8AA8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_2 0xFC8AAC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_3 0xFC8AB0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_4 0xFC8AB4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_5 0xFC8AB8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_6 0xFC8ABC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_7 0xFC8AC0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_8 0xFC8AC4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_9 0xFC8AC8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_10 0xFC8ACC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_11 0xFC8AD0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_12 0xFC8AD4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_13 0xFC8AD8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_14 0xFC8ADC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_15 0xFC8AE0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_16 0xFC8AE4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_17 0xFC8AE8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_18 0xFC8AEC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_19 0xFC8AF0
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_20 0xFC8AF4
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_21 0xFC8AF8
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_22 0xFC8AFC
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_23 0xFC8B00
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_24 0xFC8B04
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_25 0xFC8B08
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_26 0xFC8B0C
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_27 0xFC8B10
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_28 0xFC8B14
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_29 0xFC8B18
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_30 0xFC8B1C
+
+#define mmTPC7_QM_ARB_MST_CHOISE_PUSH_OFST_31 0xFC8B20
+
+#define mmTPC7_QM_ARB_SLV_MASTER_INC_CRED_OFST 0xFC8B28
+
+#define mmTPC7_QM_ARB_MST_SLAVE_EN 0xFC8B2C
+
+#define mmTPC7_QM_ARB_MST_QUIET_PER 0xFC8B34
+
+#define mmTPC7_QM_ARB_SLV_CHOISE_WDT 0xFC8B38
+
+#define mmTPC7_QM_ARB_SLV_ID 0xFC8B3C
+
+#define mmTPC7_QM_ARB_MSG_MAX_INFLIGHT 0xFC8B44
+
+#define mmTPC7_QM_ARB_MSG_AWUSER_31_11 0xFC8B48
+
+#define mmTPC7_QM_ARB_MSG_AWUSER_SEC_PROP 0xFC8B4C
+
+#define mmTPC7_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0xFC8B50
+
+#define mmTPC7_QM_ARB_BASE_LO 0xFC8B54
+
+#define mmTPC7_QM_ARB_BASE_HI 0xFC8B58
+
+#define mmTPC7_QM_ARB_STATE_STS 0xFC8B80
+
+#define mmTPC7_QM_ARB_CHOISE_FULLNESS_STS 0xFC8B84
+
+#define mmTPC7_QM_ARB_MSG_STS 0xFC8B88
+
+#define mmTPC7_QM_ARB_SLV_CHOISE_Q_HEAD 0xFC8B8C
+
+#define mmTPC7_QM_ARB_ERR_CAUSE 0xFC8B9C
+
+#define mmTPC7_QM_ARB_ERR_MSG_EN 0xFC8BA0
+
+#define mmTPC7_QM_ARB_ERR_STS_DRP 0xFC8BA8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_0 0xFC8BB0
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_1 0xFC8BB4
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_2 0xFC8BB8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_3 0xFC8BBC
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_4 0xFC8BC0
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_5 0xFC8BC4
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_6 0xFC8BC8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_7 0xFC8BCC
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_8 0xFC8BD0
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_9 0xFC8BD4
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_10 0xFC8BD8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_11 0xFC8BDC
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_12 0xFC8BE0
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_13 0xFC8BE4
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_14 0xFC8BE8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_15 0xFC8BEC
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_16 0xFC8BF0
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_17 0xFC8BF4
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_18 0xFC8BF8
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_19 0xFC8BFC
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_20 0xFC8C00
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_21 0xFC8C04
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_22 0xFC8C08
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_23 0xFC8C0C
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_24 0xFC8C10
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_25 0xFC8C14
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_26 0xFC8C18
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_27 0xFC8C1C
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_28 0xFC8C20
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_29 0xFC8C24
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_30 0xFC8C28
+
+#define mmTPC7_QM_ARB_MST_CRED_STS_31 0xFC8C2C
+
+#define mmTPC7_QM_CGM_CFG 0xFC8C70
+
+#define mmTPC7_QM_CGM_STS 0xFC8C74
+
+#define mmTPC7_QM_CGM_CFG1 0xFC8C78
+
+#define mmTPC7_QM_LOCAL_RANGE_BASE 0xFC8C80
+
+#define mmTPC7_QM_LOCAL_RANGE_SIZE 0xFC8C84
+
+#define mmTPC7_QM_CSMR_STRICT_PRIO_CFG 0xFC8C90
+
+#define mmTPC7_QM_HBW_RD_RATE_LIM_CFG_1 0xFC8C94
+
+#define mmTPC7_QM_LBW_WR_RATE_LIM_CFG_0 0xFC8C98
+
+#define mmTPC7_QM_LBW_WR_RATE_LIM_CFG_1 0xFC8C9C
+
+#define mmTPC7_QM_HBW_RD_RATE_LIM_CFG_0 0xFC8CA0
+
+#define mmTPC7_QM_GLBL_AXCACHE 0xFC8CA4
+
+#define mmTPC7_QM_IND_GW_APB_CFG 0xFC8CB0
+
+#define mmTPC7_QM_IND_GW_APB_WDATA 0xFC8CB4
+
+#define mmTPC7_QM_IND_GW_APB_RDATA 0xFC8CB8
+
+#define mmTPC7_QM_IND_GW_APB_STATUS 0xFC8CBC
+
+#define mmTPC7_QM_GLBL_ERR_ADDR_LO 0xFC8CD0
+
+#define mmTPC7_QM_GLBL_ERR_ADDR_HI 0xFC8CD4
+
+#define mmTPC7_QM_GLBL_ERR_WDATA 0xFC8CD8
+
+#define mmTPC7_QM_GLBL_MEM_INIT_BUSY 0xFC8D00
+
+#endif /* ASIC_REG_TPC7_QM_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi.h b/drivers/misc/habanalabs/include/gaudi/gaudi.h
new file mode 100644
index 000000000000..8829891d3eef
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_H
+#define GAUDI_H
+
+#define SRAM_BAR_ID 0
+#define CFG_BAR_ID 2
+#define HBM_BAR_ID 4
+
+#define SRAM_BAR_SIZE 0x4000000ull /* 64MB */
+#define CFG_BAR_SIZE 0x8000000ull /* 128MB */
+
+#define CFG_BASE 0x7FFC000000ull
+#define CFG_SIZE 0x4000000 /* 32MB CFG + 32MB DBG*/
+
+#define SRAM_BASE_ADDR 0x7FF0000000ull
+#define SRAM_SIZE 0x1400000 /* 20MB */
+
+#define SPI_FLASH_BASE_ADDR 0x7FF8000000ull
+
+#define PSOC_SCRATCHPAD_ADDR 0x7FFBFE0000ull
+#define PSOC_SCRATCHPAD_SIZE 0x10000 /* 64KB */
+
+#define PCIE_FW_SRAM_ADDR 0x7FFBFF0000ull
+#define PCIE_FW_SRAM_SIZE 0x8000 /* 32KB */
+
+#define DRAM_PHYS_BASE 0x0ull
+
+#define HOST_PHYS_BASE 0x8000000000ull /* 0.5TB */
+#define HOST_PHYS_SIZE 0x1000000000000ull /* 0.25PB (48 bits) */
+
+#define GAUDI_MSI_ENTRIES 32
+
+#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
+
+#define MAX_ASID 1024
+
+#define PROT_BITS_OFFS 0xF80
+
+#define MME_NUMBER_OF_MASTER_ENGINES 2
+
+#define TPC_NUMBER_OF_ENGINES 8
+
+#define DMA_NUMBER_OF_CHANNELS 8
+
+#define NIC_NUMBER_OF_MACROS 5
+
+#define NIC_NUMBER_OF_ENGINES (NIC_NUMBER_OF_MACROS * 2)
+
+#define NUMBER_OF_IF 8
+
+#define DEVICE_CACHE_LINE_SIZE 128
+
+#endif /* GAUDI_H */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
new file mode 100644
index 000000000000..9ccba8437ec9
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef __GAUDI_ASYNC_EVENTS_H_
+#define __GAUDI_ASYNC_EVENTS_H_
+
+enum gaudi_async_event_id {
+ GAUDI_EVENT_PCIE_CORE_SERR = 32,
+ GAUDI_EVENT_PCIE_CORE_DERR = 33,
+ GAUDI_EVENT_PCIE_IF_SERR = 34,
+ GAUDI_EVENT_PCIE_IF_DERR = 35,
+ GAUDI_EVENT_PCIE_PHY_SERR = 36,
+ GAUDI_EVENT_PCIE_PHY_DERR = 37,
+ GAUDI_EVENT_TPC0_SERR = 38,
+ GAUDI_EVENT_TPC1_SERR = 39,
+ GAUDI_EVENT_TPC2_SERR = 40,
+ GAUDI_EVENT_TPC3_SERR = 41,
+ GAUDI_EVENT_TPC4_SERR = 42,
+ GAUDI_EVENT_TPC5_SERR = 43,
+ GAUDI_EVENT_TPC6_SERR = 44,
+ GAUDI_EVENT_TPC7_SERR = 45,
+ GAUDI_EVENT_TPC0_DERR = 46,
+ GAUDI_EVENT_TPC1_DERR = 47,
+ GAUDI_EVENT_TPC2_DERR = 48,
+ GAUDI_EVENT_TPC3_DERR = 49,
+ GAUDI_EVENT_TPC4_DERR = 50,
+ GAUDI_EVENT_TPC5_DERR = 51,
+ GAUDI_EVENT_TPC6_DERR = 52,
+ GAUDI_EVENT_TPC7_DERR = 53,
+ GAUDI_EVENT_MME0_ACC_SERR = 54,
+ GAUDI_EVENT_MME0_ACC_DERR = 55,
+ GAUDI_EVENT_MME0_SBAB_SERR = 56,
+ GAUDI_EVENT_MME0_SBAB_DERR = 57,
+ GAUDI_EVENT_MME1_ACC_SERR = 58,
+ GAUDI_EVENT_MME1_ACC_DERR = 59,
+ GAUDI_EVENT_MME1_SBAB_SERR = 60,
+ GAUDI_EVENT_MME1_SBAB_DERR = 61,
+ GAUDI_EVENT_MME2_ACC_SERR = 62,
+ GAUDI_EVENT_MME2_ACC_DERR = 63,
+ GAUDI_EVENT_MME2_SBAB_SERR = 64,
+ GAUDI_EVENT_MME2_SBAB_DERR = 65,
+ GAUDI_EVENT_MME3_ACC_SERR = 66,
+ GAUDI_EVENT_MME3_ACC_DERR = 67,
+ GAUDI_EVENT_MME3_SBAB_SERR = 68,
+ GAUDI_EVENT_MME3_SBAB_DERR = 69,
+ GAUDI_EVENT_DMA0_SERR_ECC = 70,
+ GAUDI_EVENT_DMA1_SERR_ECC = 71,
+ GAUDI_EVENT_DMA2_SERR_ECC = 72,
+ GAUDI_EVENT_DMA3_SERR_ECC = 73,
+ GAUDI_EVENT_DMA4_SERR_ECC = 74,
+ GAUDI_EVENT_DMA5_SERR_ECC = 75,
+ GAUDI_EVENT_DMA6_SERR_ECC = 76,
+ GAUDI_EVENT_DMA7_SERR_ECC = 77,
+ GAUDI_EVENT_DMA0_DERR_ECC = 78,
+ GAUDI_EVENT_DMA1_DERR_ECC = 79,
+ GAUDI_EVENT_DMA2_DERR_ECC = 80,
+ GAUDI_EVENT_DMA3_DERR_ECC = 81,
+ GAUDI_EVENT_DMA4_DERR_ECC = 82,
+ GAUDI_EVENT_DMA5_DERR_ECC = 83,
+ GAUDI_EVENT_DMA6_DERR_ECC = 84,
+ GAUDI_EVENT_DMA7_DERR_ECC = 85,
+ GAUDI_EVENT_CPU_IF_ECC_SERR = 86,
+ GAUDI_EVENT_CPU_IF_ECC_DERR = 87,
+ GAUDI_EVENT_PSOC_MEM_SERR = 88,
+ GAUDI_EVENT_PSOC_CORESIGHT_SERR = 89,
+ GAUDI_EVENT_PSOC_MEM_DERR = 90,
+ GAUDI_EVENT_PSOC_CORESIGHT_DERR = 91,
+ GAUDI_EVENT_SRAM0_SERR = 92,
+ GAUDI_EVENT_SRAM1_SERR = 93,
+ GAUDI_EVENT_SRAM2_SERR = 94,
+ GAUDI_EVENT_SRAM3_SERR = 95,
+ GAUDI_EVENT_SRAM7_SERR = 96,
+ GAUDI_EVENT_SRAM6_SERR = 97,
+ GAUDI_EVENT_SRAM5_SERR = 98,
+ GAUDI_EVENT_SRAM4_SERR = 99,
+ GAUDI_EVENT_SRAM8_SERR = 100,
+ GAUDI_EVENT_SRAM9_SERR = 101,
+ GAUDI_EVENT_SRAM10_SERR = 102,
+ GAUDI_EVENT_SRAM11_SERR = 103,
+ GAUDI_EVENT_SRAM15_SERR = 104,
+ GAUDI_EVENT_SRAM14_SERR = 105,
+ GAUDI_EVENT_SRAM13_SERR = 106,
+ GAUDI_EVENT_SRAM12_SERR = 107,
+ GAUDI_EVENT_SRAM16_SERR = 108,
+ GAUDI_EVENT_SRAM17_SERR = 109,
+ GAUDI_EVENT_SRAM18_SERR = 110,
+ GAUDI_EVENT_SRAM19_SERR = 111,
+ GAUDI_EVENT_SRAM23_SERR = 112,
+ GAUDI_EVENT_SRAM22_SERR = 113,
+ GAUDI_EVENT_SRAM21_SERR = 114,
+ GAUDI_EVENT_SRAM20_SERR = 115,
+ GAUDI_EVENT_SRAM24_SERR = 116,
+ GAUDI_EVENT_SRAM25_SERR = 117,
+ GAUDI_EVENT_SRAM26_SERR = 118,
+ GAUDI_EVENT_SRAM27_SERR = 119,
+ GAUDI_EVENT_SRAM31_SERR = 120,
+ GAUDI_EVENT_SRAM30_SERR = 121,
+ GAUDI_EVENT_SRAM29_SERR = 122,
+ GAUDI_EVENT_SRAM28_SERR = 123,
+ GAUDI_EVENT_SRAM0_DERR = 124,
+ GAUDI_EVENT_SRAM1_DERR = 125,
+ GAUDI_EVENT_SRAM2_DERR = 126,
+ GAUDI_EVENT_SRAM3_DERR = 127,
+ GAUDI_EVENT_SRAM7_DERR = 128,
+ GAUDI_EVENT_SRAM6_DERR = 129,
+ GAUDI_EVENT_SRAM5_DERR = 130,
+ GAUDI_EVENT_SRAM4_DERR = 131,
+ GAUDI_EVENT_SRAM8_DERR = 132,
+ GAUDI_EVENT_SRAM9_DERR = 133,
+ GAUDI_EVENT_SRAM10_DERR = 134,
+ GAUDI_EVENT_SRAM11_DERR = 135,
+ GAUDI_EVENT_SRAM15_DERR = 136,
+ GAUDI_EVENT_SRAM14_DERR = 137,
+ GAUDI_EVENT_SRAM13_DERR = 138,
+ GAUDI_EVENT_SRAM12_DERR = 139,
+ GAUDI_EVENT_SRAM16_DERR = 140,
+ GAUDI_EVENT_SRAM17_DERR = 141,
+ GAUDI_EVENT_SRAM18_DERR = 142,
+ GAUDI_EVENT_SRAM19_DERR = 143,
+ GAUDI_EVENT_SRAM23_DERR = 144,
+ GAUDI_EVENT_SRAM22_DERR = 145,
+ GAUDI_EVENT_SRAM21_DERR = 146,
+ GAUDI_EVENT_SRAM20_DERR = 147,
+ GAUDI_EVENT_SRAM24_DERR = 148,
+ GAUDI_EVENT_SRAM25_DERR = 149,
+ GAUDI_EVENT_SRAM26_DERR = 150,
+ GAUDI_EVENT_SRAM27_DERR = 151,
+ GAUDI_EVENT_SRAM31_DERR = 152,
+ GAUDI_EVENT_SRAM30_DERR = 153,
+ GAUDI_EVENT_SRAM29_DERR = 154,
+ GAUDI_EVENT_SRAM28_DERR = 155,
+ GAUDI_EVENT_NIC0_SERR = 156,
+ GAUDI_EVENT_NIC1_SERR = 157,
+ GAUDI_EVENT_NIC2_SERR = 158,
+ GAUDI_EVENT_NIC3_SERR = 159,
+ GAUDI_EVENT_NIC4_SERR = 160,
+ GAUDI_EVENT_NIC0_DERR = 166,
+ GAUDI_EVENT_NIC1_DERR = 167,
+ GAUDI_EVENT_NIC2_DERR = 168,
+ GAUDI_EVENT_NIC3_DERR = 169,
+ GAUDI_EVENT_NIC4_DERR = 170,
+ GAUDI_EVENT_DMA_IF0_SERR = 176,
+ GAUDI_EVENT_DMA_IF1_SERR = 177,
+ GAUDI_EVENT_DMA_IF2_SERR = 178,
+ GAUDI_EVENT_DMA_IF3_SERR = 179,
+ GAUDI_EVENT_DMA_IF0_DERR = 180,
+ GAUDI_EVENT_DMA_IF1_DERR = 181,
+ GAUDI_EVENT_DMA_IF2_DERR = 182,
+ GAUDI_EVENT_DMA_IF3_DERR = 183,
+ GAUDI_EVENT_GIC500 = 184,
+ GAUDI_EVENT_HBM_0_SERR = 185,
+ GAUDI_EVENT_HBM_1_SERR = 186,
+ GAUDI_EVENT_HBM_2_SERR = 187,
+ GAUDI_EVENT_HBM_3_SERR = 188,
+ GAUDI_EVENT_HBM_0_DERR = 189,
+ GAUDI_EVENT_HBM_1_DERR = 190,
+ GAUDI_EVENT_HBM_2_DERR = 191,
+ GAUDI_EVENT_HBM_3_DERR = 192,
+ GAUDI_EVENT_MMU_SERR = 193,
+ GAUDI_EVENT_MMU_DERR = 194,
+ GAUDI_EVENT_PCIE_DEC = 200,
+ GAUDI_EVENT_TPC0_DEC = 201,
+ GAUDI_EVENT_TPC1_DEC = 203,
+ GAUDI_EVENT_TPC2_DEC = 205,
+ GAUDI_EVENT_TPC3_DEC = 207,
+ GAUDI_EVENT_TPC4_DEC = 209,
+ GAUDI_EVENT_TPC5_DEC = 211,
+ GAUDI_EVENT_TPC6_DEC = 213,
+ GAUDI_EVENT_TPC7_DEC = 215,
+ GAUDI_EVENT_AXI_ECC = 217,
+ GAUDI_EVENT_L2_RAM_ECC = 218,
+ GAUDI_EVENT_MME0_WBC_RSP = 219,
+ GAUDI_EVENT_MME0_SBAB0_RSP = 220,
+ GAUDI_EVENT_MME1_WBC_RSP = 224,
+ GAUDI_EVENT_MME1_SBAB0_RSP = 225,
+ GAUDI_EVENT_MME2_WBC_RSP = 229,
+ GAUDI_EVENT_MME2_SBAB0_RSP = 230,
+ GAUDI_EVENT_MME3_WBC_RSP = 234,
+ GAUDI_EVENT_MME3_SBAB0_RSP = 235,
+ GAUDI_EVENT_PLL0 = 239,
+ GAUDI_EVENT_PLL1 = 240,
+ GAUDI_EVENT_PLL2 = 241,
+ GAUDI_EVENT_PLL3 = 242,
+ GAUDI_EVENT_PLL4 = 243,
+ GAUDI_EVENT_PLL5 = 244,
+ GAUDI_EVENT_PLL6 = 245,
+ GAUDI_EVENT_PLL7 = 246,
+ GAUDI_EVENT_PLL8 = 247,
+ GAUDI_EVENT_PLL9 = 248,
+ GAUDI_EVENT_PLL10 = 249,
+ GAUDI_EVENT_PLL11 = 250,
+ GAUDI_EVENT_PLL12 = 251,
+ GAUDI_EVENT_PLL13 = 252,
+ GAUDI_EVENT_PLL14 = 253,
+ GAUDI_EVENT_PLL15 = 254,
+ GAUDI_EVENT_PLL16 = 255,
+ GAUDI_EVENT_PLL17 = 256,
+ GAUDI_EVENT_CPU_AXI_SPLITTER = 257,
+ GAUDI_EVENT_PSOC_AXI_DEC = 262,
+ GAUDI_EVENT_PSOC_PRSTN_FALL = 263,
+ GAUDI_EVENT_NIC_SEI_0 = 264,
+ GAUDI_EVENT_NIC_SEI_1 = 265,
+ GAUDI_EVENT_NIC_SEI_2 = 266,
+ GAUDI_EVENT_NIC_SEI_3 = 267,
+ GAUDI_EVENT_NIC_SEI_4 = 268,
+ GAUDI_EVENT_PCIE_FLR = 290,
+ GAUDI_EVENT_TPC0_BMON_SPMU = 300,
+ GAUDI_EVENT_TPC0_KRN_ERR = 301,
+ GAUDI_EVENT_TPC1_BMON_SPMU = 306,
+ GAUDI_EVENT_TPC1_KRN_ERR = 307,
+ GAUDI_EVENT_TPC2_BMON_SPMU = 312,
+ GAUDI_EVENT_TPC2_KRN_ERR = 313,
+ GAUDI_EVENT_TPC3_BMON_SPMU = 318,
+ GAUDI_EVENT_TPC3_KRN_ERR = 319,
+ GAUDI_EVENT_TPC4_BMON_SPMU = 324,
+ GAUDI_EVENT_TPC4_KRN_ERR = 325,
+ GAUDI_EVENT_TPC5_BMON_SPMU = 330,
+ GAUDI_EVENT_TPC5_KRN_ERR = 331,
+ GAUDI_EVENT_TPC6_BMON_SPMU = 336,
+ GAUDI_EVENT_TPC6_KRN_ERR = 337,
+ GAUDI_EVENT_TPC7_BMON_SPMU = 342,
+ GAUDI_EVENT_TPC7_KRN_ERR = 343,
+ GAUDI_EVENT_MMU_PAGE_FAULT = 380,
+ GAUDI_EVENT_MMU_WR_PERM = 381,
+ GAUDI_EVENT_DMA_BM_CH0 = 383,
+ GAUDI_EVENT_DMA_BM_CH1 = 384,
+ GAUDI_EVENT_DMA_BM_CH2 = 385,
+ GAUDI_EVENT_DMA_BM_CH3 = 386,
+ GAUDI_EVENT_DMA_BM_CH4 = 387,
+ GAUDI_EVENT_DMA_BM_CH5 = 388,
+ GAUDI_EVENT_DMA_BM_CH6 = 389,
+ GAUDI_EVENT_DMA_BM_CH7 = 390,
+ GAUDI_EVENT_HBM0_SPI_0 = 395,
+ GAUDI_EVENT_HBM0_SPI_1 = 396,
+ GAUDI_EVENT_HBM1_SPI_0 = 399,
+ GAUDI_EVENT_HBM1_SPI_1 = 400,
+ GAUDI_EVENT_HBM2_SPI_0 = 403,
+ GAUDI_EVENT_HBM2_SPI_1 = 404,
+ GAUDI_EVENT_HBM3_SPI_0 = 407,
+ GAUDI_EVENT_HBM3_SPI_1 = 408,
+ GAUDI_EVENT_PSOC_GPIO_U16_0 = 421,
+ GAUDI_EVENT_PI_UPDATE = 484,
+ GAUDI_EVENT_HALT_MACHINE = 485,
+ GAUDI_EVENT_INTS_REGISTER = 486,
+ GAUDI_EVENT_SOFT_RESET = 487,
+ GAUDI_EVENT_RAZWI_OR_ADC = 548,
+ GAUDI_EVENT_TPC0_QM = 572,
+ GAUDI_EVENT_TPC1_QM = 573,
+ GAUDI_EVENT_TPC2_QM = 574,
+ GAUDI_EVENT_TPC3_QM = 575,
+ GAUDI_EVENT_TPC4_QM = 576,
+ GAUDI_EVENT_TPC5_QM = 577,
+ GAUDI_EVENT_TPC6_QM = 578,
+ GAUDI_EVENT_TPC7_QM = 579,
+ GAUDI_EVENT_MME0_QM = 581,
+ GAUDI_EVENT_MME2_QM = 582,
+ GAUDI_EVENT_DMA0_QM = 583,
+ GAUDI_EVENT_DMA1_QM = 584,
+ GAUDI_EVENT_DMA2_QM = 585,
+ GAUDI_EVENT_DMA3_QM = 586,
+ GAUDI_EVENT_DMA4_QM = 587,
+ GAUDI_EVENT_DMA5_QM = 588,
+ GAUDI_EVENT_DMA6_QM = 589,
+ GAUDI_EVENT_DMA7_QM = 590,
+ GAUDI_EVENT_NIC0_QM0 = 594,
+ GAUDI_EVENT_NIC0_QM1 = 595,
+ GAUDI_EVENT_NIC1_QM0 = 596,
+ GAUDI_EVENT_NIC1_QM1 = 597,
+ GAUDI_EVENT_NIC2_QM0 = 598,
+ GAUDI_EVENT_NIC2_QM1 = 599,
+ GAUDI_EVENT_NIC3_QM0 = 600,
+ GAUDI_EVENT_NIC3_QM1 = 601,
+ GAUDI_EVENT_NIC4_QM0 = 602,
+ GAUDI_EVENT_NIC4_QM1 = 603,
+ GAUDI_EVENT_DMA0_CORE = 604,
+ GAUDI_EVENT_DMA1_CORE = 605,
+ GAUDI_EVENT_DMA2_CORE = 606,
+ GAUDI_EVENT_DMA3_CORE = 607,
+ GAUDI_EVENT_DMA4_CORE = 608,
+ GAUDI_EVENT_DMA5_CORE = 609,
+ GAUDI_EVENT_DMA6_CORE = 610,
+ GAUDI_EVENT_DMA7_CORE = 611,
+ GAUDI_EVENT_NIC0_QP0 = 612,
+ GAUDI_EVENT_NIC0_QP1 = 613,
+ GAUDI_EVENT_NIC1_QP0 = 614,
+ GAUDI_EVENT_NIC1_QP1 = 615,
+ GAUDI_EVENT_NIC2_QP0 = 616,
+ GAUDI_EVENT_NIC2_QP1 = 617,
+ GAUDI_EVENT_NIC3_QP0 = 618,
+ GAUDI_EVENT_NIC3_QP1 = 619,
+ GAUDI_EVENT_NIC4_QP0 = 620,
+ GAUDI_EVENT_NIC4_QP1 = 621,
+ GAUDI_EVENT_FIX_POWER_ENV_S = 658,
+ GAUDI_EVENT_FIX_POWER_ENV_E = 659,
+ GAUDI_EVENT_FIX_THERMAL_ENV_S = 660,
+ GAUDI_EVENT_FIX_THERMAL_ENV_E = 661,
+ GAUDI_EVENT_RAZWI_OR_ADC_SW = 662,
+ GAUDI_EVENT_SIZE,
+};
+
+#endif /* __GAUDI_ASYNC_EVENTS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h b/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
new file mode 100644
index 000000000000..737176ba06fb
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
@@ -0,0 +1,694 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_
+#define __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_
+
+struct gaudi_async_events_ids_map {
+ int fc_id;
+ int cpu_id;
+ int valid;
+ char name[64];
+};
+
+static struct gaudi_async_events_ids_map gaudi_irq_map_table[] = {
+ { .fc_id = 0, .cpu_id = 0, .valid = 0, .name = "" },
+ { .fc_id = 1, .cpu_id = 1, .valid = 0, .name = "" },
+ { .fc_id = 2, .cpu_id = 2, .valid = 0, .name = "" },
+ { .fc_id = 3, .cpu_id = 3, .valid = 0, .name = "" },
+ { .fc_id = 4, .cpu_id = 4, .valid = 0, .name = "" },
+ { .fc_id = 5, .cpu_id = 5, .valid = 0, .name = "" },
+ { .fc_id = 6, .cpu_id = 6, .valid = 0, .name = "" },
+ { .fc_id = 7, .cpu_id = 7, .valid = 0, .name = "" },
+ { .fc_id = 8, .cpu_id = 8, .valid = 0, .name = "" },
+ { .fc_id = 9, .cpu_id = 9, .valid = 0, .name = "" },
+ { .fc_id = 10, .cpu_id = 10, .valid = 0, .name = "" },
+ { .fc_id = 11, .cpu_id = 11, .valid = 0, .name = "" },
+ { .fc_id = 12, .cpu_id = 12, .valid = 0, .name = "" },
+ { .fc_id = 13, .cpu_id = 13, .valid = 0, .name = "" },
+ { .fc_id = 14, .cpu_id = 14, .valid = 0, .name = "" },
+ { .fc_id = 15, .cpu_id = 15, .valid = 0, .name = "" },
+ { .fc_id = 16, .cpu_id = 16, .valid = 0, .name = "" },
+ { .fc_id = 17, .cpu_id = 17, .valid = 0, .name = "" },
+ { .fc_id = 18, .cpu_id = 18, .valid = 0, .name = "" },
+ { .fc_id = 19, .cpu_id = 19, .valid = 0, .name = "" },
+ { .fc_id = 20, .cpu_id = 20, .valid = 0, .name = "" },
+ { .fc_id = 21, .cpu_id = 21, .valid = 0, .name = "" },
+ { .fc_id = 22, .cpu_id = 22, .valid = 0, .name = "" },
+ { .fc_id = 23, .cpu_id = 23, .valid = 0, .name = "" },
+ { .fc_id = 24, .cpu_id = 24, .valid = 0, .name = "" },
+ { .fc_id = 25, .cpu_id = 25, .valid = 0, .name = "" },
+ { .fc_id = 26, .cpu_id = 26, .valid = 0, .name = "" },
+ { .fc_id = 27, .cpu_id = 27, .valid = 0, .name = "" },
+ { .fc_id = 28, .cpu_id = 28, .valid = 0, .name = "" },
+ { .fc_id = 29, .cpu_id = 29, .valid = 0, .name = "" },
+ { .fc_id = 30, .cpu_id = 30, .valid = 0, .name = "" },
+ { .fc_id = 31, .cpu_id = 31, .valid = 0, .name = "" },
+ { .fc_id = 32, .cpu_id = 32, .valid = 1, .name = "PCIE_CORE_SERR" },
+ { .fc_id = 33, .cpu_id = 33, .valid = 1, .name = "PCIE_CORE_DERR" },
+ { .fc_id = 34, .cpu_id = 34, .valid = 1, .name = "PCIE_IF_SERR" },
+ { .fc_id = 35, .cpu_id = 35, .valid = 1, .name = "PCIE_IF_DERR" },
+ { .fc_id = 36, .cpu_id = 36, .valid = 1, .name = "PCIE_PHY_SERR" },
+ { .fc_id = 37, .cpu_id = 37, .valid = 1, .name = "PCIE_PHY_DERR" },
+ { .fc_id = 38, .cpu_id = 38, .valid = 1, .name = "TPC0_SERR" },
+ { .fc_id = 39, .cpu_id = 38, .valid = 1, .name = "TPC1_SERR" },
+ { .fc_id = 40, .cpu_id = 38, .valid = 1, .name = "TPC2_SERR" },
+ { .fc_id = 41, .cpu_id = 38, .valid = 1, .name = "TPC3_SERR" },
+ { .fc_id = 42, .cpu_id = 38, .valid = 1, .name = "TPC4_SERR" },
+ { .fc_id = 43, .cpu_id = 38, .valid = 1, .name = "TPC5_SERR" },
+ { .fc_id = 44, .cpu_id = 38, .valid = 1, .name = "TPC6_SERR" },
+ { .fc_id = 45, .cpu_id = 38, .valid = 1, .name = "TPC7_SERR" },
+ { .fc_id = 46, .cpu_id = 39, .valid = 1, .name = "TPC0_DERR" },
+ { .fc_id = 47, .cpu_id = 39, .valid = 1, .name = "TPC1_DERR" },
+ { .fc_id = 48, .cpu_id = 39, .valid = 1, .name = "TPC2_DERR" },
+ { .fc_id = 49, .cpu_id = 39, .valid = 1, .name = "TPC3_DERR" },
+ { .fc_id = 50, .cpu_id = 39, .valid = 1, .name = "TPC4_DERR" },
+ { .fc_id = 51, .cpu_id = 39, .valid = 1, .name = "TPC5_DERR" },
+ { .fc_id = 52, .cpu_id = 39, .valid = 1, .name = "TPC6_DERR" },
+ { .fc_id = 53, .cpu_id = 39, .valid = 1, .name = "TPC7_DERR" },
+ { .fc_id = 54, .cpu_id = 40, .valid = 1, .name = "MME0_ACC_SERR" },
+ { .fc_id = 55, .cpu_id = 41, .valid = 1, .name = "MME0_ACC_DERR" },
+ { .fc_id = 56, .cpu_id = 42, .valid = 1, .name = "MME0_SBAB_SERR" },
+ { .fc_id = 57, .cpu_id = 43, .valid = 1, .name = "MME0_SBAB_DERR" },
+ { .fc_id = 58, .cpu_id = 44, .valid = 1, .name = "MME1_ACC_SERR" },
+ { .fc_id = 59, .cpu_id = 45, .valid = 1, .name = "MME1_ACC_DERR" },
+ { .fc_id = 60, .cpu_id = 46, .valid = 1, .name = "MME1_SBAB_SERR" },
+ { .fc_id = 61, .cpu_id = 47, .valid = 1, .name = "MME1_SBAB_DERR" },
+ { .fc_id = 62, .cpu_id = 48, .valid = 1, .name = "MME2_ACC_SERR" },
+ { .fc_id = 63, .cpu_id = 49, .valid = 1, .name = "MME2_ACC_DERR" },
+ { .fc_id = 64, .cpu_id = 50, .valid = 1, .name = "MME2_SBAB_SERR" },
+ { .fc_id = 65, .cpu_id = 51, .valid = 1, .name = "MME2_SBAB_DERR" },
+ { .fc_id = 66, .cpu_id = 52, .valid = 1, .name = "MME3_ACC_SERR" },
+ { .fc_id = 67, .cpu_id = 53, .valid = 1, .name = "MME3_ACC_DERR" },
+ { .fc_id = 68, .cpu_id = 54, .valid = 1, .name = "MME3_SBAB_SERR" },
+ { .fc_id = 69, .cpu_id = 55, .valid = 1, .name = "MME3_SBAB_DERR" },
+ { .fc_id = 70, .cpu_id = 56, .valid = 1, .name = "DMA0_SERR_ECC" },
+ { .fc_id = 71, .cpu_id = 56, .valid = 1, .name = "DMA1_SERR_ECC" },
+ { .fc_id = 72, .cpu_id = 56, .valid = 1, .name = "DMA2_SERR_ECC" },
+ { .fc_id = 73, .cpu_id = 56, .valid = 1, .name = "DMA3_SERR_ECC" },
+ { .fc_id = 74, .cpu_id = 56, .valid = 1, .name = "DMA4_SERR_ECC" },
+ { .fc_id = 75, .cpu_id = 56, .valid = 1, .name = "DMA5_SERR_ECC" },
+ { .fc_id = 76, .cpu_id = 56, .valid = 1, .name = "DMA6_SERR_ECC" },
+ { .fc_id = 77, .cpu_id = 56, .valid = 1, .name = "DMA7_SERR_ECC" },
+ { .fc_id = 78, .cpu_id = 57, .valid = 1, .name = "DMA0_DERR_ECC" },
+ { .fc_id = 79, .cpu_id = 57, .valid = 1, .name = "DMA1_DERR_ECC" },
+ { .fc_id = 80, .cpu_id = 57, .valid = 1, .name = "DMA2_DERR_ECC" },
+ { .fc_id = 81, .cpu_id = 57, .valid = 1, .name = "DMA3_DERR_ECC" },
+ { .fc_id = 82, .cpu_id = 57, .valid = 1, .name = "DMA4_DERR_ECC" },
+ { .fc_id = 83, .cpu_id = 57, .valid = 1, .name = "DMA5_DERR_ECC" },
+ { .fc_id = 84, .cpu_id = 57, .valid = 1, .name = "DMA6_DERR_ECC" },
+ { .fc_id = 85, .cpu_id = 57, .valid = 1, .name = "DMA7_DERR_ECC" },
+ { .fc_id = 86, .cpu_id = 58, .valid = 1, .name = "CPU_IF_ECC_SERR" },
+ { .fc_id = 87, .cpu_id = 59, .valid = 1, .name = "CPU_IF_ECC_DERR" },
+ { .fc_id = 88, .cpu_id = 60, .valid = 1, .name = "PSOC_MEM_SERR" },
+ { .fc_id = 89, .cpu_id = 61, .valid = 1,
+ .name = "PSOC_CORESIGHT_SERR" },
+ { .fc_id = 90, .cpu_id = 62, .valid = 1, .name = "PSOC_MEM_DERR" },
+ { .fc_id = 91, .cpu_id = 63, .valid = 1,
+ .name = "PSOC_CORESIGHT_DERR" },
+ { .fc_id = 92, .cpu_id = 64, .valid = 1, .name = "SRAM0_SERR" },
+ { .fc_id = 93, .cpu_id = 64, .valid = 1, .name = "SRAM1_SERR" },
+ { .fc_id = 94, .cpu_id = 64, .valid = 1, .name = "SRAM2_SERR" },
+ { .fc_id = 95, .cpu_id = 64, .valid = 1, .name = "SRAM3_SERR" },
+ { .fc_id = 96, .cpu_id = 64, .valid = 1, .name = "SRAM7_SERR" },
+ { .fc_id = 97, .cpu_id = 64, .valid = 1, .name = "SRAM6_SERR" },
+ { .fc_id = 98, .cpu_id = 64, .valid = 1, .name = "SRAM5_SERR" },
+ { .fc_id = 99, .cpu_id = 64, .valid = 1, .name = "SRAM4_SERR" },
+ { .fc_id = 100, .cpu_id = 64, .valid = 1, .name = "SRAM8_SERR" },
+ { .fc_id = 101, .cpu_id = 64, .valid = 1, .name = "SRAM9_SERR" },
+ { .fc_id = 102, .cpu_id = 64, .valid = 1, .name = "SRAM10_SERR" },
+ { .fc_id = 103, .cpu_id = 64, .valid = 1, .name = "SRAM11_SERR" },
+ { .fc_id = 104, .cpu_id = 64, .valid = 1, .name = "SRAM15_SERR" },
+ { .fc_id = 105, .cpu_id = 64, .valid = 1, .name = "SRAM14_SERR" },
+ { .fc_id = 106, .cpu_id = 64, .valid = 1, .name = "SRAM13_SERR" },
+ { .fc_id = 107, .cpu_id = 64, .valid = 1, .name = "SRAM12_SERR" },
+ { .fc_id = 108, .cpu_id = 64, .valid = 1, .name = "SRAM16_SERR" },
+ { .fc_id = 109, .cpu_id = 64, .valid = 1, .name = "SRAM17_SERR" },
+ { .fc_id = 110, .cpu_id = 64, .valid = 1, .name = "SRAM18_SERR" },
+ { .fc_id = 111, .cpu_id = 64, .valid = 1, .name = "SRAM19_SERR" },
+ { .fc_id = 112, .cpu_id = 64, .valid = 1, .name = "SRAM23_SERR" },
+ { .fc_id = 113, .cpu_id = 64, .valid = 1, .name = "SRAM22_SERR" },
+ { .fc_id = 114, .cpu_id = 64, .valid = 1, .name = "SRAM21_SERR" },
+ { .fc_id = 115, .cpu_id = 64, .valid = 1, .name = "SRAM20_SERR" },
+ { .fc_id = 116, .cpu_id = 64, .valid = 1, .name = "SRAM24_SERR" },
+ { .fc_id = 117, .cpu_id = 64, .valid = 1, .name = "SRAM25_SERR" },
+ { .fc_id = 118, .cpu_id = 64, .valid = 1, .name = "SRAM26_SERR" },
+ { .fc_id = 119, .cpu_id = 64, .valid = 1, .name = "SRAM27_SERR" },
+ { .fc_id = 120, .cpu_id = 64, .valid = 1, .name = "SRAM31_SERR" },
+ { .fc_id = 121, .cpu_id = 64, .valid = 1, .name = "SRAM30_SERR" },
+ { .fc_id = 122, .cpu_id = 64, .valid = 1, .name = "SRAM29_SERR" },
+ { .fc_id = 123, .cpu_id = 64, .valid = 1, .name = "SRAM28_SERR" },
+ { .fc_id = 124, .cpu_id = 65, .valid = 1, .name = "SRAM0_DERR" },
+ { .fc_id = 125, .cpu_id = 65, .valid = 1, .name = "SRAM1_DERR" },
+ { .fc_id = 126, .cpu_id = 65, .valid = 1, .name = "SRAM2_DERR" },
+ { .fc_id = 127, .cpu_id = 65, .valid = 1, .name = "SRAM3_DERR" },
+ { .fc_id = 128, .cpu_id = 65, .valid = 1, .name = "SRAM7_DERR" },
+ { .fc_id = 129, .cpu_id = 65, .valid = 1, .name = "SRAM6_DERR" },
+ { .fc_id = 130, .cpu_id = 65, .valid = 1, .name = "SRAM5_DERR" },
+ { .fc_id = 131, .cpu_id = 65, .valid = 1, .name = "SRAM4_DERR" },
+ { .fc_id = 132, .cpu_id = 65, .valid = 1, .name = "SRAM8_DERR" },
+ { .fc_id = 133, .cpu_id = 65, .valid = 1, .name = "SRAM9_DERR" },
+ { .fc_id = 134, .cpu_id = 65, .valid = 1, .name = "SRAM10_DERR" },
+ { .fc_id = 135, .cpu_id = 65, .valid = 1, .name = "SRAM11_DERR" },
+ { .fc_id = 136, .cpu_id = 65, .valid = 1, .name = "SRAM15_DERR" },
+ { .fc_id = 137, .cpu_id = 65, .valid = 1, .name = "SRAM14_DERR" },
+ { .fc_id = 138, .cpu_id = 65, .valid = 1, .name = "SRAM13_DERR" },
+ { .fc_id = 139, .cpu_id = 65, .valid = 1, .name = "SRAM12_DERR" },
+ { .fc_id = 140, .cpu_id = 65, .valid = 1, .name = "SRAM16_DERR" },
+ { .fc_id = 141, .cpu_id = 65, .valid = 1, .name = "SRAM17_DERR" },
+ { .fc_id = 142, .cpu_id = 65, .valid = 1, .name = "SRAM18_DERR" },
+ { .fc_id = 143, .cpu_id = 65, .valid = 1, .name = "SRAM19_DERR" },
+ { .fc_id = 144, .cpu_id = 65, .valid = 1, .name = "SRAM23_DERR" },
+ { .fc_id = 145, .cpu_id = 65, .valid = 1, .name = "SRAM22_DERR" },
+ { .fc_id = 146, .cpu_id = 65, .valid = 1, .name = "SRAM21_DERR" },
+ { .fc_id = 147, .cpu_id = 65, .valid = 1, .name = "SRAM20_DERR" },
+ { .fc_id = 148, .cpu_id = 65, .valid = 1, .name = "SRAM24_DERR" },
+ { .fc_id = 149, .cpu_id = 65, .valid = 1, .name = "SRAM25_DERR" },
+ { .fc_id = 150, .cpu_id = 65, .valid = 1, .name = "SRAM26_DERR" },
+ { .fc_id = 151, .cpu_id = 65, .valid = 1, .name = "SRAM27_DERR" },
+ { .fc_id = 152, .cpu_id = 65, .valid = 1, .name = "SRAM31_DERR" },
+ { .fc_id = 153, .cpu_id = 65, .valid = 1, .name = "SRAM30_DERR" },
+ { .fc_id = 154, .cpu_id = 65, .valid = 1, .name = "SRAM29_DERR" },
+ { .fc_id = 155, .cpu_id = 65, .valid = 1, .name = "SRAM28_DERR" },
+ { .fc_id = 156, .cpu_id = 66, .valid = 1, .name = "NIC0_SERR" },
+ { .fc_id = 157, .cpu_id = 66, .valid = 1, .name = "NIC1_SERR" },
+ { .fc_id = 158, .cpu_id = 66, .valid = 1, .name = "NIC2_SERR" },
+ { .fc_id = 159, .cpu_id = 66, .valid = 1, .name = "NIC3_SERR" },
+ { .fc_id = 160, .cpu_id = 66, .valid = 1, .name = "NIC4_SERR" },
+ { .fc_id = 161, .cpu_id = 66, .valid = 0, .name = "" },
+ { .fc_id = 162, .cpu_id = 66, .valid = 0, .name = "" },
+ { .fc_id = 163, .cpu_id = 66, .valid = 0, .name = "" },
+ { .fc_id = 164, .cpu_id = 66, .valid = 0, .name = "" },
+ { .fc_id = 165, .cpu_id = 66, .valid = 0, .name = "" },
+ { .fc_id = 166, .cpu_id = 67, .valid = 1, .name = "NIC0_DERR" },
+ { .fc_id = 167, .cpu_id = 67, .valid = 1, .name = "NIC1_DERR" },
+ { .fc_id = 168, .cpu_id = 67, .valid = 1, .name = "NIC2_DERR" },
+ { .fc_id = 169, .cpu_id = 67, .valid = 1, .name = "NIC3_DERR" },
+ { .fc_id = 170, .cpu_id = 67, .valid = 1, .name = "NIC4_DERR" },
+ { .fc_id = 171, .cpu_id = 67, .valid = 0, .name = "" },
+ { .fc_id = 172, .cpu_id = 67, .valid = 0, .name = "" },
+ { .fc_id = 173, .cpu_id = 67, .valid = 0, .name = "" },
+ { .fc_id = 174, .cpu_id = 67, .valid = 0, .name = "" },
+ { .fc_id = 175, .cpu_id = 67, .valid = 0, .name = "" },
+ { .fc_id = 176, .cpu_id = 68, .valid = 1, .name = "DMA_IF0_SERR" },
+ { .fc_id = 177, .cpu_id = 68, .valid = 1, .name = "DMA_IF1_SERR" },
+ { .fc_id = 178, .cpu_id = 68, .valid = 1, .name = "DMA_IF2_SERR" },
+ { .fc_id = 179, .cpu_id = 68, .valid = 1, .name = "DMA_IF3_SERR" },
+ { .fc_id = 180, .cpu_id = 69, .valid = 1, .name = "DMA_IF0_DERR" },
+ { .fc_id = 181, .cpu_id = 69, .valid = 1, .name = "DMA_IF1_DERR" },
+ { .fc_id = 182, .cpu_id = 69, .valid = 1, .name = "DMA_IF2_DERR" },
+ { .fc_id = 183, .cpu_id = 69, .valid = 1, .name = "DMA_IF3_DERR" },
+ { .fc_id = 184, .cpu_id = 70, .valid = 1, .name = "GIC500" },
+ { .fc_id = 185, .cpu_id = 71, .valid = 1, .name = "HBM_0_SERR" },
+ { .fc_id = 186, .cpu_id = 71, .valid = 1, .name = "HBM_1_SERR" },
+ { .fc_id = 187, .cpu_id = 71, .valid = 1, .name = "HBM_2_SERR" },
+ { .fc_id = 188, .cpu_id = 71, .valid = 1, .name = "HBM_3_SERR" },
+ { .fc_id = 189, .cpu_id = 72, .valid = 1, .name = "HBM_0_DERR" },
+ { .fc_id = 190, .cpu_id = 72, .valid = 1, .name = "HBM_1_DERR" },
+ { .fc_id = 191, .cpu_id = 72, .valid = 1, .name = "HBM_2_DERR" },
+ { .fc_id = 192, .cpu_id = 72, .valid = 1, .name = "HBM_3_DERR" },
+ { .fc_id = 193, .cpu_id = 73, .valid = 1, .name = "MMU_SERR" },
+ { .fc_id = 194, .cpu_id = 74, .valid = 1, .name = "MMU_DERR" },
+ { .fc_id = 195, .cpu_id = 75, .valid = 0, .name = "" },
+ { .fc_id = 196, .cpu_id = 76, .valid = 0, .name = "" },
+ { .fc_id = 197, .cpu_id = 77, .valid = 0, .name = "" },
+ { .fc_id = 198, .cpu_id = 78, .valid = 0, .name = "" },
+ { .fc_id = 199, .cpu_id = 79, .valid = 0, .name = "" },
+ { .fc_id = 200, .cpu_id = 80, .valid = 1, .name = "PCIE_DEC" },
+ { .fc_id = 201, .cpu_id = 81, .valid = 1, .name = "TPC0_DEC" },
+ { .fc_id = 202, .cpu_id = 82, .valid = 0, .name = "" },
+ { .fc_id = 203, .cpu_id = 83, .valid = 1, .name = "TPC1_DEC" },
+ { .fc_id = 204, .cpu_id = 84, .valid = 0, .name = "" },
+ { .fc_id = 205, .cpu_id = 85, .valid = 1, .name = "TPC2_DEC" },
+ { .fc_id = 206, .cpu_id = 86, .valid = 0, .name = "" },
+ { .fc_id = 207, .cpu_id = 87, .valid = 1, .name = "TPC3_DEC" },
+ { .fc_id = 208, .cpu_id = 88, .valid = 0, .name = "" },
+ { .fc_id = 209, .cpu_id = 89, .valid = 1, .name = "TPC4_DEC" },
+ { .fc_id = 210, .cpu_id = 90, .valid = 0, .name = "" },
+ { .fc_id = 211, .cpu_id = 91, .valid = 1, .name = "TPC5_DEC" },
+ { .fc_id = 212, .cpu_id = 92, .valid = 0, .name = "" },
+ { .fc_id = 213, .cpu_id = 93, .valid = 1, .name = "TPC6_DEC" },
+ { .fc_id = 214, .cpu_id = 94, .valid = 0, .name = "" },
+ { .fc_id = 215, .cpu_id = 95, .valid = 1, .name = "TPC7_DEC" },
+ { .fc_id = 216, .cpu_id = 96, .valid = 0, .name = "" },
+ { .fc_id = 217, .cpu_id = 97, .valid = 1, .name = "AXI_ECC" },
+ { .fc_id = 218, .cpu_id = 98, .valid = 1, .name = "L2_RAM_ECC" },
+ { .fc_id = 219, .cpu_id = 99, .valid = 1, .name = "MME0_WBC_RSP" },
+ { .fc_id = 220, .cpu_id = 100, .valid = 1, .name = "MME0_SBAB0_RSP" },
+ { .fc_id = 221, .cpu_id = 101, .valid = 0, .name = "" },
+ { .fc_id = 222, .cpu_id = 102, .valid = 0, .name = "" },
+ { .fc_id = 223, .cpu_id = 103, .valid = 0, .name = "" },
+ { .fc_id = 224, .cpu_id = 104, .valid = 1, .name = "MME1_WBC_RSP" },
+ { .fc_id = 225, .cpu_id = 105, .valid = 1, .name = "MME1_SBAB0_RSP" },
+ { .fc_id = 226, .cpu_id = 106, .valid = 0, .name = "" },
+ { .fc_id = 227, .cpu_id = 107, .valid = 0, .name = "" },
+ { .fc_id = 228, .cpu_id = 108, .valid = 0, .name = "" },
+ { .fc_id = 229, .cpu_id = 109, .valid = 1, .name = "MME2_WBC_RSP" },
+ { .fc_id = 230, .cpu_id = 110, .valid = 1, .name = "MME2_SBAB0_RSP" },
+ { .fc_id = 231, .cpu_id = 111, .valid = 0, .name = "" },
+ { .fc_id = 232, .cpu_id = 112, .valid = 0, .name = "" },
+ { .fc_id = 233, .cpu_id = 113, .valid = 0, .name = "" },
+ { .fc_id = 234, .cpu_id = 114, .valid = 1, .name = "MME3_WBC_RSP" },
+ { .fc_id = 235, .cpu_id = 115, .valid = 1, .name = "MME3_SBAB0_RSP" },
+ { .fc_id = 236, .cpu_id = 116, .valid = 0, .name = "" },
+ { .fc_id = 237, .cpu_id = 117, .valid = 0, .name = "" },
+ { .fc_id = 238, .cpu_id = 118, .valid = 0, .name = "" },
+ { .fc_id = 239, .cpu_id = 119, .valid = 1, .name = "PLL0" },
+ { .fc_id = 240, .cpu_id = 119, .valid = 1, .name = "PLL1" },
+ { .fc_id = 241, .cpu_id = 119, .valid = 1, .name = "PLL2" },
+ { .fc_id = 242, .cpu_id = 119, .valid = 1, .name = "PLL3" },
+ { .fc_id = 243, .cpu_id = 119, .valid = 1, .name = "PLL4" },
+ { .fc_id = 244, .cpu_id = 119, .valid = 1, .name = "PLL5" },
+ { .fc_id = 245, .cpu_id = 119, .valid = 1, .name = "PLL6" },
+ { .fc_id = 246, .cpu_id = 119, .valid = 1, .name = "PLL7" },
+ { .fc_id = 247, .cpu_id = 119, .valid = 1, .name = "PLL8" },
+ { .fc_id = 248, .cpu_id = 119, .valid = 1, .name = "PLL9" },
+ { .fc_id = 249, .cpu_id = 119, .valid = 1, .name = "PLL10" },
+ { .fc_id = 250, .cpu_id = 119, .valid = 1, .name = "PLL11" },
+ { .fc_id = 251, .cpu_id = 119, .valid = 1, .name = "PLL12" },
+ { .fc_id = 252, .cpu_id = 119, .valid = 1, .name = "PLL13" },
+ { .fc_id = 253, .cpu_id = 119, .valid = 1, .name = "PLL14" },
+ { .fc_id = 254, .cpu_id = 119, .valid = 1, .name = "PLL15" },
+ { .fc_id = 255, .cpu_id = 119, .valid = 1, .name = "PLL16" },
+ { .fc_id = 256, .cpu_id = 119, .valid = 1, .name = "PLL17" },
+ { .fc_id = 257, .cpu_id = 120, .valid = 1,
+ .name = "CPU_AXI_SPLITTER" },
+ { .fc_id = 258, .cpu_id = 121, .valid = 0, .name = "" },
+ { .fc_id = 259, .cpu_id = 122, .valid = 0, .name = "" },
+ { .fc_id = 260, .cpu_id = 123, .valid = 0, .name = "" },
+ { .fc_id = 261, .cpu_id = 124, .valid = 0, .name = "" },
+ { .fc_id = 262, .cpu_id = 125, .valid = 1, .name = "PSOC_AXI_DEC" },
+ { .fc_id = 263, .cpu_id = 126, .valid = 1, .name = "PSOC_PRSTN_FALL" },
+ { .fc_id = 264, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_0" },
+ { .fc_id = 265, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_1" },
+ { .fc_id = 266, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_2" },
+ { .fc_id = 267, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_3" },
+ { .fc_id = 268, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_4" },
+ { .fc_id = 269, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 270, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 271, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 272, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 273, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 274, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 275, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 276, .cpu_id = 128, .valid = 0, .name = "" },
+ { .fc_id = 277, .cpu_id = 129, .valid = 0, .name = "" },
+ { .fc_id = 278, .cpu_id = 129, .valid = 0, .name = "" },
+ { .fc_id = 279, .cpu_id = 129, .valid = 0, .name = "" },
+ { .fc_id = 280, .cpu_id = 129, .valid = 0, .name = "" },
+ { .fc_id = 281, .cpu_id = 130, .valid = 0, .name = "" },
+ { .fc_id = 282, .cpu_id = 131, .valid = 0, .name = "" },
+ { .fc_id = 283, .cpu_id = 132, .valid = 0, .name = "" },
+ { .fc_id = 284, .cpu_id = 133, .valid = 0, .name = "" },
+ { .fc_id = 285, .cpu_id = 134, .valid = 0, .name = "" },
+ { .fc_id = 286, .cpu_id = 135, .valid = 0, .name = "" },
+ { .fc_id = 287, .cpu_id = 136, .valid = 0, .name = "" },
+ { .fc_id = 288, .cpu_id = 137, .valid = 0, .name = "" },
+ { .fc_id = 289, .cpu_id = 138, .valid = 0, .name = "" },
+ { .fc_id = 290, .cpu_id = 139, .valid = 1, .name = "PCIE_FLR" },
+ { .fc_id = 291, .cpu_id = 140, .valid = 0, .name = "" },
+ { .fc_id = 292, .cpu_id = 141, .valid = 0, .name = "" },
+ { .fc_id = 293, .cpu_id = 142, .valid = 0, .name = "" },
+ { .fc_id = 294, .cpu_id = 143, .valid = 0, .name = "" },
+ { .fc_id = 295, .cpu_id = 144, .valid = 0, .name = "" },
+ { .fc_id = 296, .cpu_id = 145, .valid = 0, .name = "" },
+ { .fc_id = 297, .cpu_id = 146, .valid = 0, .name = "" },
+ { .fc_id = 298, .cpu_id = 147, .valid = 0, .name = "" },
+ { .fc_id = 299, .cpu_id = 148, .valid = 0, .name = "" },
+ { .fc_id = 300, .cpu_id = 149, .valid = 1, .name = "TPC0_BMON_SPMU" },
+ { .fc_id = 301, .cpu_id = 150, .valid = 1, .name = "TPC0_KRN_ERR" },
+ { .fc_id = 302, .cpu_id = 151, .valid = 0, .name = "" },
+ { .fc_id = 303, .cpu_id = 152, .valid = 0, .name = "" },
+ { .fc_id = 304, .cpu_id = 153, .valid = 0, .name = "" },
+ { .fc_id = 305, .cpu_id = 154, .valid = 0, .name = "" },
+ { .fc_id = 306, .cpu_id = 155, .valid = 1, .name = "TPC1_BMON_SPMU" },
+ { .fc_id = 307, .cpu_id = 156, .valid = 1, .name = "TPC1_KRN_ERR" },
+ { .fc_id = 308, .cpu_id = 157, .valid = 0, .name = "" },
+ { .fc_id = 309, .cpu_id = 158, .valid = 0, .name = "" },
+ { .fc_id = 310, .cpu_id = 159, .valid = 0, .name = "" },
+ { .fc_id = 311, .cpu_id = 160, .valid = 0, .name = "" },
+ { .fc_id = 312, .cpu_id = 161, .valid = 1, .name = "TPC2_BMON_SPMU" },
+ { .fc_id = 313, .cpu_id = 162, .valid = 1, .name = "TPC2_KRN_ERR" },
+ { .fc_id = 314, .cpu_id = 163, .valid = 0, .name = "" },
+ { .fc_id = 315, .cpu_id = 164, .valid = 0, .name = "" },
+ { .fc_id = 316, .cpu_id = 165, .valid = 0, .name = "" },
+ { .fc_id = 317, .cpu_id = 166, .valid = 0, .name = "" },
+ { .fc_id = 318, .cpu_id = 167, .valid = 1, .name = "TPC3_BMON_SPMU" },
+ { .fc_id = 319, .cpu_id = 168, .valid = 1, .name = "TPC3_KRN_ERR" },
+ { .fc_id = 320, .cpu_id = 169, .valid = 0, .name = "" },
+ { .fc_id = 321, .cpu_id = 170, .valid = 0, .name = "" },
+ { .fc_id = 322, .cpu_id = 171, .valid = 0, .name = "" },
+ { .fc_id = 323, .cpu_id = 172, .valid = 0, .name = "" },
+ { .fc_id = 324, .cpu_id = 173, .valid = 1, .name = "TPC4_BMON_SPMU" },
+ { .fc_id = 325, .cpu_id = 174, .valid = 1, .name = "TPC4_KRN_ERR" },
+ { .fc_id = 326, .cpu_id = 175, .valid = 0, .name = "" },
+ { .fc_id = 327, .cpu_id = 176, .valid = 0, .name = "" },
+ { .fc_id = 328, .cpu_id = 177, .valid = 0, .name = "" },
+ { .fc_id = 329, .cpu_id = 178, .valid = 0, .name = "" },
+ { .fc_id = 330, .cpu_id = 179, .valid = 1, .name = "TPC5_BMON_SPMU" },
+ { .fc_id = 331, .cpu_id = 180, .valid = 1, .name = "TPC5_KRN_ERR" },
+ { .fc_id = 332, .cpu_id = 181, .valid = 0, .name = "" },
+ { .fc_id = 333, .cpu_id = 182, .valid = 0, .name = "" },
+ { .fc_id = 334, .cpu_id = 183, .valid = 0, .name = "" },
+ { .fc_id = 335, .cpu_id = 184, .valid = 0, .name = "" },
+ { .fc_id = 336, .cpu_id = 185, .valid = 1, .name = "TPC6_BMON_SPMU" },
+ { .fc_id = 337, .cpu_id = 186, .valid = 1, .name = "TPC6_KRN_ERR" },
+ { .fc_id = 338, .cpu_id = 187, .valid = 0, .name = "" },
+ { .fc_id = 339, .cpu_id = 188, .valid = 0, .name = "" },
+ { .fc_id = 340, .cpu_id = 189, .valid = 0, .name = "" },
+ { .fc_id = 341, .cpu_id = 190, .valid = 0, .name = "" },
+ { .fc_id = 342, .cpu_id = 191, .valid = 1, .name = "TPC7_BMON_SPMU" },
+ { .fc_id = 343, .cpu_id = 192, .valid = 1, .name = "TPC7_KRN_ERR" },
+ { .fc_id = 344, .cpu_id = 193, .valid = 0, .name = "" },
+ { .fc_id = 345, .cpu_id = 194, .valid = 0, .name = "" },
+ { .fc_id = 346, .cpu_id = 195, .valid = 0, .name = "" },
+ { .fc_id = 347, .cpu_id = 196, .valid = 0, .name = "" },
+ { .fc_id = 348, .cpu_id = 197, .valid = 0, .name = "" },
+ { .fc_id = 349, .cpu_id = 198, .valid = 0, .name = "" },
+ { .fc_id = 350, .cpu_id = 199, .valid = 0, .name = "" },
+ { .fc_id = 351, .cpu_id = 200, .valid = 0, .name = "" },
+ { .fc_id = 352, .cpu_id = 201, .valid = 0, .name = "" },
+ { .fc_id = 353, .cpu_id = 202, .valid = 0, .name = "" },
+ { .fc_id = 354, .cpu_id = 203, .valid = 0, .name = "" },
+ { .fc_id = 355, .cpu_id = 204, .valid = 0, .name = "" },
+ { .fc_id = 356, .cpu_id = 205, .valid = 0, .name = "" },
+ { .fc_id = 357, .cpu_id = 206, .valid = 0, .name = "" },
+ { .fc_id = 358, .cpu_id = 207, .valid = 0, .name = "" },
+ { .fc_id = 359, .cpu_id = 208, .valid = 0, .name = "" },
+ { .fc_id = 360, .cpu_id = 209, .valid = 0, .name = "" },
+ { .fc_id = 361, .cpu_id = 210, .valid = 0, .name = "" },
+ { .fc_id = 362, .cpu_id = 211, .valid = 0, .name = "" },
+ { .fc_id = 363, .cpu_id = 212, .valid = 0, .name = "" },
+ { .fc_id = 364, .cpu_id = 213, .valid = 0, .name = "" },
+ { .fc_id = 365, .cpu_id = 214, .valid = 0, .name = "" },
+ { .fc_id = 366, .cpu_id = 215, .valid = 0, .name = "" },
+ { .fc_id = 367, .cpu_id = 216, .valid = 0, .name = "" },
+ { .fc_id = 368, .cpu_id = 217, .valid = 0, .name = "" },
+ { .fc_id = 369, .cpu_id = 218, .valid = 0, .name = "" },
+ { .fc_id = 370, .cpu_id = 219, .valid = 0, .name = "" },
+ { .fc_id = 371, .cpu_id = 220, .valid = 0, .name = "" },
+ { .fc_id = 372, .cpu_id = 221, .valid = 0, .name = "" },
+ { .fc_id = 373, .cpu_id = 222, .valid = 0, .name = "" },
+ { .fc_id = 374, .cpu_id = 223, .valid = 0, .name = "" },
+ { .fc_id = 375, .cpu_id = 224, .valid = 0, .name = "" },
+ { .fc_id = 376, .cpu_id = 225, .valid = 0, .name = "" },
+ { .fc_id = 377, .cpu_id = 226, .valid = 0, .name = "" },
+ { .fc_id = 378, .cpu_id = 227, .valid = 0, .name = "" },
+ { .fc_id = 379, .cpu_id = 228, .valid = 0, .name = "" },
+ { .fc_id = 380, .cpu_id = 229, .valid = 1, .name = "MMU_PAGE_FAULT" },
+ { .fc_id = 381, .cpu_id = 230, .valid = 1, .name = "MMU_WR_PERM" },
+ { .fc_id = 382, .cpu_id = 231, .valid = 0, .name = "" },
+ { .fc_id = 383, .cpu_id = 232, .valid = 1, .name = "DMA_BM_CH0" },
+ { .fc_id = 384, .cpu_id = 233, .valid = 1, .name = "DMA_BM_CH1" },
+ { .fc_id = 385, .cpu_id = 234, .valid = 1, .name = "DMA_BM_CH2" },
+ { .fc_id = 386, .cpu_id = 235, .valid = 1, .name = "DMA_BM_CH3" },
+ { .fc_id = 387, .cpu_id = 236, .valid = 1, .name = "DMA_BM_CH4" },
+ { .fc_id = 388, .cpu_id = 237, .valid = 1, .name = "DMA_BM_CH5" },
+ { .fc_id = 389, .cpu_id = 238, .valid = 1, .name = "DMA_BM_CH6" },
+ { .fc_id = 390, .cpu_id = 239, .valid = 1, .name = "DMA_BM_CH7" },
+ { .fc_id = 391, .cpu_id = 240, .valid = 0, .name = "" },
+ { .fc_id = 392, .cpu_id = 241, .valid = 0, .name = "" },
+ { .fc_id = 393, .cpu_id = 242, .valid = 0, .name = "" },
+ { .fc_id = 394, .cpu_id = 243, .valid = 0, .name = "" },
+ { .fc_id = 395, .cpu_id = 244, .valid = 1, .name = "HBM0_SPI_0" },
+ { .fc_id = 396, .cpu_id = 245, .valid = 1, .name = "HBM0_SPI_1" },
+ { .fc_id = 397, .cpu_id = 246, .valid = 0, .name = "" },
+ { .fc_id = 398, .cpu_id = 247, .valid = 0, .name = "" },
+ { .fc_id = 399, .cpu_id = 248, .valid = 1, .name = "HBM1_SPI_0" },
+ { .fc_id = 400, .cpu_id = 249, .valid = 1, .name = "HBM1_SPI_1" },
+ { .fc_id = 401, .cpu_id = 250, .valid = 0, .name = "" },
+ { .fc_id = 402, .cpu_id = 251, .valid = 0, .name = "" },
+ { .fc_id = 403, .cpu_id = 252, .valid = 1, .name = "HBM2_SPI_0" },
+ { .fc_id = 404, .cpu_id = 253, .valid = 1, .name = "HBM2_SPI_1" },
+ { .fc_id = 405, .cpu_id = 254, .valid = 0, .name = "" },
+ { .fc_id = 406, .cpu_id = 255, .valid = 0, .name = "" },
+ { .fc_id = 407, .cpu_id = 256, .valid = 1, .name = "HBM3_SPI_0" },
+ { .fc_id = 408, .cpu_id = 257, .valid = 1, .name = "HBM3_SPI_1" },
+ { .fc_id = 409, .cpu_id = 258, .valid = 0, .name = "" },
+ { .fc_id = 410, .cpu_id = 259, .valid = 0, .name = "" },
+ { .fc_id = 411, .cpu_id = 260, .valid = 0, .name = "" },
+ { .fc_id = 412, .cpu_id = 261, .valid = 0, .name = "" },
+ { .fc_id = 413, .cpu_id = 262, .valid = 0, .name = "" },
+ { .fc_id = 414, .cpu_id = 263, .valid = 0, .name = "" },
+ { .fc_id = 415, .cpu_id = 264, .valid = 0, .name = "" },
+ { .fc_id = 416, .cpu_id = 265, .valid = 0, .name = "" },
+ { .fc_id = 417, .cpu_id = 266, .valid = 0, .name = "" },
+ { .fc_id = 418, .cpu_id = 267, .valid = 0, .name = "" },
+ { .fc_id = 419, .cpu_id = 268, .valid = 0, .name = "" },
+ { .fc_id = 420, .cpu_id = 269, .valid = 0, .name = "" },
+ { .fc_id = 421, .cpu_id = 270, .valid = 1, .name = "PSOC_GPIO_U16_0" },
+ { .fc_id = 422, .cpu_id = 271, .valid = 0, .name = "" },
+ { .fc_id = 423, .cpu_id = 272, .valid = 0, .name = "" },
+ { .fc_id = 424, .cpu_id = 273, .valid = 0, .name = "" },
+ { .fc_id = 425, .cpu_id = 274, .valid = 0, .name = "" },
+ { .fc_id = 426, .cpu_id = 275, .valid = 0, .name = "" },
+ { .fc_id = 427, .cpu_id = 276, .valid = 0, .name = "" },
+ { .fc_id = 428, .cpu_id = 277, .valid = 0, .name = "" },
+ { .fc_id = 429, .cpu_id = 278, .valid = 0, .name = "" },
+ { .fc_id = 430, .cpu_id = 279, .valid = 0, .name = "" },
+ { .fc_id = 431, .cpu_id = 280, .valid = 0, .name = "" },
+ { .fc_id = 432, .cpu_id = 281, .valid = 0, .name = "" },
+ { .fc_id = 433, .cpu_id = 282, .valid = 0, .name = "" },
+ { .fc_id = 434, .cpu_id = 283, .valid = 0, .name = "" },
+ { .fc_id = 435, .cpu_id = 284, .valid = 0, .name = "" },
+ { .fc_id = 436, .cpu_id = 285, .valid = 0, .name = "" },
+ { .fc_id = 437, .cpu_id = 286, .valid = 0, .name = "" },
+ { .fc_id = 438, .cpu_id = 287, .valid = 0, .name = "" },
+ { .fc_id = 439, .cpu_id = 288, .valid = 0, .name = "" },
+ { .fc_id = 440, .cpu_id = 289, .valid = 0, .name = "" },
+ { .fc_id = 441, .cpu_id = 290, .valid = 0, .name = "" },
+ { .fc_id = 442, .cpu_id = 291, .valid = 0, .name = "" },
+ { .fc_id = 443, .cpu_id = 292, .valid = 0, .name = "" },
+ { .fc_id = 444, .cpu_id = 293, .valid = 0, .name = "" },
+ { .fc_id = 445, .cpu_id = 294, .valid = 0, .name = "" },
+ { .fc_id = 446, .cpu_id = 295, .valid = 0, .name = "" },
+ { .fc_id = 447, .cpu_id = 296, .valid = 0, .name = "" },
+ { .fc_id = 448, .cpu_id = 297, .valid = 0, .name = "" },
+ { .fc_id = 449, .cpu_id = 298, .valid = 0, .name = "" },
+ { .fc_id = 450, .cpu_id = 299, .valid = 0, .name = "" },
+ { .fc_id = 451, .cpu_id = 300, .valid = 0, .name = "" },
+ { .fc_id = 452, .cpu_id = 301, .valid = 0, .name = "" },
+ { .fc_id = 453, .cpu_id = 302, .valid = 0, .name = "" },
+ { .fc_id = 454, .cpu_id = 303, .valid = 0, .name = "" },
+ { .fc_id = 455, .cpu_id = 304, .valid = 0, .name = "" },
+ { .fc_id = 456, .cpu_id = 305, .valid = 0, .name = "" },
+ { .fc_id = 457, .cpu_id = 306, .valid = 0, .name = "" },
+ { .fc_id = 458, .cpu_id = 307, .valid = 0, .name = "" },
+ { .fc_id = 459, .cpu_id = 308, .valid = 0, .name = "" },
+ { .fc_id = 460, .cpu_id = 309, .valid = 0, .name = "" },
+ { .fc_id = 461, .cpu_id = 310, .valid = 0, .name = "" },
+ { .fc_id = 462, .cpu_id = 311, .valid = 0, .name = "" },
+ { .fc_id = 463, .cpu_id = 312, .valid = 0, .name = "" },
+ { .fc_id = 464, .cpu_id = 313, .valid = 0, .name = "" },
+ { .fc_id = 465, .cpu_id = 314, .valid = 0, .name = "" },
+ { .fc_id = 466, .cpu_id = 315, .valid = 0, .name = "" },
+ { .fc_id = 467, .cpu_id = 316, .valid = 0, .name = "" },
+ { .fc_id = 468, .cpu_id = 317, .valid = 0, .name = "" },
+ { .fc_id = 469, .cpu_id = 318, .valid = 0, .name = "" },
+ { .fc_id = 470, .cpu_id = 319, .valid = 0, .name = "" },
+ { .fc_id = 471, .cpu_id = 320, .valid = 0, .name = "" },
+ { .fc_id = 472, .cpu_id = 321, .valid = 0, .name = "" },
+ { .fc_id = 473, .cpu_id = 322, .valid = 0, .name = "" },
+ { .fc_id = 474, .cpu_id = 323, .valid = 0, .name = "" },
+ { .fc_id = 475, .cpu_id = 324, .valid = 0, .name = "" },
+ { .fc_id = 476, .cpu_id = 325, .valid = 0, .name = "" },
+ { .fc_id = 477, .cpu_id = 326, .valid = 0, .name = "" },
+ { .fc_id = 478, .cpu_id = 327, .valid = 0, .name = "" },
+ { .fc_id = 479, .cpu_id = 328, .valid = 0, .name = "" },
+ { .fc_id = 480, .cpu_id = 329, .valid = 0, .name = "" },
+ { .fc_id = 481, .cpu_id = 330, .valid = 0, .name = "" },
+ { .fc_id = 482, .cpu_id = 331, .valid = 0, .name = "" },
+ { .fc_id = 483, .cpu_id = 332, .valid = 0, .name = "" },
+ { .fc_id = 484, .cpu_id = 333, .valid = 1, .name = "PI_UPDATE" },
+ { .fc_id = 485, .cpu_id = 334, .valid = 1, .name = "HALT_MACHINE" },
+ { .fc_id = 486, .cpu_id = 335, .valid = 1, .name = "INTS_REGISTER" },
+ { .fc_id = 487, .cpu_id = 336, .valid = 1, .name = "SOFT_RESET" },
+ { .fc_id = 488, .cpu_id = 337, .valid = 0, .name = "" },
+ { .fc_id = 489, .cpu_id = 338, .valid = 0, .name = "" },
+ { .fc_id = 490, .cpu_id = 339, .valid = 0, .name = "" },
+ { .fc_id = 491, .cpu_id = 340, .valid = 0, .name = "" },
+ { .fc_id = 492, .cpu_id = 341, .valid = 0, .name = "" },
+ { .fc_id = 493, .cpu_id = 342, .valid = 0, .name = "" },
+ { .fc_id = 494, .cpu_id = 343, .valid = 0, .name = "" },
+ { .fc_id = 495, .cpu_id = 344, .valid = 0, .name = "" },
+ { .fc_id = 496, .cpu_id = 345, .valid = 0, .name = "" },
+ { .fc_id = 497, .cpu_id = 346, .valid = 0, .name = "" },
+ { .fc_id = 498, .cpu_id = 347, .valid = 0, .name = "" },
+ { .fc_id = 499, .cpu_id = 348, .valid = 0, .name = "" },
+ { .fc_id = 500, .cpu_id = 349, .valid = 0, .name = "" },
+ { .fc_id = 501, .cpu_id = 350, .valid = 0, .name = "" },
+ { .fc_id = 502, .cpu_id = 351, .valid = 0, .name = "" },
+ { .fc_id = 503, .cpu_id = 352, .valid = 0, .name = "" },
+ { .fc_id = 504, .cpu_id = 353, .valid = 0, .name = "" },
+ { .fc_id = 505, .cpu_id = 354, .valid = 0, .name = "" },
+ { .fc_id = 506, .cpu_id = 355, .valid = 0, .name = "" },
+ { .fc_id = 507, .cpu_id = 356, .valid = 0, .name = "" },
+ { .fc_id = 508, .cpu_id = 357, .valid = 0, .name = "" },
+ { .fc_id = 509, .cpu_id = 358, .valid = 0, .name = "" },
+ { .fc_id = 510, .cpu_id = 359, .valid = 0, .name = "" },
+ { .fc_id = 511, .cpu_id = 360, .valid = 0, .name = "" },
+ { .fc_id = 512, .cpu_id = 361, .valid = 0, .name = "" },
+ { .fc_id = 513, .cpu_id = 362, .valid = 0, .name = "" },
+ { .fc_id = 514, .cpu_id = 363, .valid = 0, .name = "" },
+ { .fc_id = 515, .cpu_id = 364, .valid = 0, .name = "" },
+ { .fc_id = 516, .cpu_id = 365, .valid = 0, .name = "" },
+ { .fc_id = 517, .cpu_id = 366, .valid = 0, .name = "" },
+ { .fc_id = 518, .cpu_id = 367, .valid = 0, .name = "" },
+ { .fc_id = 519, .cpu_id = 368, .valid = 0, .name = "" },
+ { .fc_id = 520, .cpu_id = 369, .valid = 0, .name = "" },
+ { .fc_id = 521, .cpu_id = 370, .valid = 0, .name = "" },
+ { .fc_id = 522, .cpu_id = 371, .valid = 0, .name = "" },
+ { .fc_id = 523, .cpu_id = 372, .valid = 0, .name = "" },
+ { .fc_id = 524, .cpu_id = 373, .valid = 0, .name = "" },
+ { .fc_id = 525, .cpu_id = 374, .valid = 0, .name = "" },
+ { .fc_id = 526, .cpu_id = 375, .valid = 0, .name = "" },
+ { .fc_id = 527, .cpu_id = 376, .valid = 0, .name = "" },
+ { .fc_id = 528, .cpu_id = 377, .valid = 0, .name = "" },
+ { .fc_id = 529, .cpu_id = 378, .valid = 0, .name = "" },
+ { .fc_id = 530, .cpu_id = 379, .valid = 0, .name = "" },
+ { .fc_id = 531, .cpu_id = 380, .valid = 0, .name = "" },
+ { .fc_id = 532, .cpu_id = 381, .valid = 0, .name = "" },
+ { .fc_id = 533, .cpu_id = 382, .valid = 0, .name = "" },
+ { .fc_id = 534, .cpu_id = 383, .valid = 0, .name = "" },
+ { .fc_id = 535, .cpu_id = 384, .valid = 0, .name = "" },
+ { .fc_id = 536, .cpu_id = 385, .valid = 0, .name = "" },
+ { .fc_id = 537, .cpu_id = 386, .valid = 0, .name = "" },
+ { .fc_id = 538, .cpu_id = 387, .valid = 0, .name = "" },
+ { .fc_id = 539, .cpu_id = 388, .valid = 0, .name = "" },
+ { .fc_id = 540, .cpu_id = 389, .valid = 0, .name = "" },
+ { .fc_id = 541, .cpu_id = 390, .valid = 0, .name = "" },
+ { .fc_id = 542, .cpu_id = 391, .valid = 0, .name = "" },
+ { .fc_id = 543, .cpu_id = 392, .valid = 0, .name = "" },
+ { .fc_id = 544, .cpu_id = 393, .valid = 0, .name = "" },
+ { .fc_id = 545, .cpu_id = 394, .valid = 0, .name = "" },
+ { .fc_id = 546, .cpu_id = 395, .valid = 0, .name = "" },
+ { .fc_id = 547, .cpu_id = 396, .valid = 0, .name = "" },
+ { .fc_id = 548, .cpu_id = 397, .valid = 1, .name = "RAZWI_OR_ADC" },
+ { .fc_id = 549, .cpu_id = 398, .valid = 0, .name = "" },
+ { .fc_id = 550, .cpu_id = 399, .valid = 0, .name = "" },
+ { .fc_id = 551, .cpu_id = 400, .valid = 0, .name = "" },
+ { .fc_id = 552, .cpu_id = 401, .valid = 0, .name = "" },
+ { .fc_id = 553, .cpu_id = 402, .valid = 0, .name = "" },
+ { .fc_id = 554, .cpu_id = 403, .valid = 0, .name = "" },
+ { .fc_id = 555, .cpu_id = 404, .valid = 0, .name = "" },
+ { .fc_id = 556, .cpu_id = 405, .valid = 0, .name = "" },
+ { .fc_id = 557, .cpu_id = 406, .valid = 0, .name = "" },
+ { .fc_id = 558, .cpu_id = 407, .valid = 0, .name = "" },
+ { .fc_id = 559, .cpu_id = 408, .valid = 0, .name = "" },
+ { .fc_id = 560, .cpu_id = 409, .valid = 0, .name = "" },
+ { .fc_id = 561, .cpu_id = 410, .valid = 0, .name = "" },
+ { .fc_id = 562, .cpu_id = 411, .valid = 0, .name = "" },
+ { .fc_id = 563, .cpu_id = 412, .valid = 0, .name = "" },
+ { .fc_id = 564, .cpu_id = 413, .valid = 0, .name = "" },
+ { .fc_id = 565, .cpu_id = 414, .valid = 0, .name = "" },
+ { .fc_id = 566, .cpu_id = 415, .valid = 0, .name = "" },
+ { .fc_id = 567, .cpu_id = 416, .valid = 0, .name = "" },
+ { .fc_id = 568, .cpu_id = 417, .valid = 0, .name = "" },
+ { .fc_id = 569, .cpu_id = 418, .valid = 0, .name = "" },
+ { .fc_id = 570, .cpu_id = 419, .valid = 0, .name = "" },
+ { .fc_id = 571, .cpu_id = 420, .valid = 0, .name = "" },
+ { .fc_id = 572, .cpu_id = 421, .valid = 1, .name = "TPC0_QM" },
+ { .fc_id = 573, .cpu_id = 422, .valid = 1, .name = "TPC1_QM" },
+ { .fc_id = 574, .cpu_id = 423, .valid = 1, .name = "TPC2_QM" },
+ { .fc_id = 575, .cpu_id = 424, .valid = 1, .name = "TPC3_QM" },
+ { .fc_id = 576, .cpu_id = 425, .valid = 1, .name = "TPC4_QM" },
+ { .fc_id = 577, .cpu_id = 426, .valid = 1, .name = "TPC5_QM" },
+ { .fc_id = 578, .cpu_id = 427, .valid = 1, .name = "TPC6_QM" },
+ { .fc_id = 579, .cpu_id = 428, .valid = 1, .name = "TPC7_QM" },
+ { .fc_id = 580, .cpu_id = 429, .valid = 0, .name = "" },
+ { .fc_id = 581, .cpu_id = 430, .valid = 1, .name = "MME0_QM" },
+ { .fc_id = 582, .cpu_id = 431, .valid = 1, .name = "MME2_QM" },
+ { .fc_id = 583, .cpu_id = 432, .valid = 1, .name = "DMA0_QM" },
+ { .fc_id = 584, .cpu_id = 433, .valid = 1, .name = "DMA1_QM" },
+ { .fc_id = 585, .cpu_id = 434, .valid = 1, .name = "DMA2_QM" },
+ { .fc_id = 586, .cpu_id = 435, .valid = 1, .name = "DMA3_QM" },
+ { .fc_id = 587, .cpu_id = 436, .valid = 1, .name = "DMA4_QM" },
+ { .fc_id = 588, .cpu_id = 437, .valid = 1, .name = "DMA5_QM" },
+ { .fc_id = 589, .cpu_id = 438, .valid = 1, .name = "DMA6_QM" },
+ { .fc_id = 590, .cpu_id = 439, .valid = 1, .name = "DMA7_QM" },
+ { .fc_id = 591, .cpu_id = 440, .valid = 0, .name = "" },
+ { .fc_id = 592, .cpu_id = 441, .valid = 0, .name = "" },
+ { .fc_id = 593, .cpu_id = 442, .valid = 0, .name = "" },
+ { .fc_id = 594, .cpu_id = 443, .valid = 1, .name = "NIC0_QM0" },
+ { .fc_id = 595, .cpu_id = 444, .valid = 1, .name = "NIC0_QM1" },
+ { .fc_id = 596, .cpu_id = 445, .valid = 1, .name = "NIC1_QM0" },
+ { .fc_id = 597, .cpu_id = 446, .valid = 1, .name = "NIC1_QM1" },
+ { .fc_id = 598, .cpu_id = 447, .valid = 1, .name = "NIC2_QM0" },
+ { .fc_id = 599, .cpu_id = 448, .valid = 1, .name = "NIC2_QM1" },
+ { .fc_id = 600, .cpu_id = 449, .valid = 1, .name = "NIC3_QM0" },
+ { .fc_id = 601, .cpu_id = 450, .valid = 1, .name = "NIC3_QM1" },
+ { .fc_id = 602, .cpu_id = 451, .valid = 1, .name = "NIC4_QM0" },
+ { .fc_id = 603, .cpu_id = 452, .valid = 1, .name = "NIC4_QM1" },
+ { .fc_id = 604, .cpu_id = 453, .valid = 1, .name = "DMA0_CORE" },
+ { .fc_id = 605, .cpu_id = 454, .valid = 1, .name = "DMA1_CORE" },
+ { .fc_id = 606, .cpu_id = 455, .valid = 1, .name = "DMA2_CORE" },
+ { .fc_id = 607, .cpu_id = 456, .valid = 1, .name = "DMA3_CORE" },
+ { .fc_id = 608, .cpu_id = 457, .valid = 1, .name = "DMA4_CORE" },
+ { .fc_id = 609, .cpu_id = 458, .valid = 1, .name = "DMA5_CORE" },
+ { .fc_id = 610, .cpu_id = 459, .valid = 1, .name = "DMA6_CORE" },
+ { .fc_id = 611, .cpu_id = 460, .valid = 1, .name = "DMA7_CORE" },
+ { .fc_id = 612, .cpu_id = 461, .valid = 1, .name = "NIC0_QP0" },
+ { .fc_id = 613, .cpu_id = 462, .valid = 1, .name = "NIC0_QP1" },
+ { .fc_id = 614, .cpu_id = 463, .valid = 1, .name = "NIC1_QP0" },
+ { .fc_id = 615, .cpu_id = 464, .valid = 1, .name = "NIC1_QP1" },
+ { .fc_id = 616, .cpu_id = 465, .valid = 1, .name = "NIC2_QP0" },
+ { .fc_id = 617, .cpu_id = 466, .valid = 1, .name = "NIC2_QP1" },
+ { .fc_id = 618, .cpu_id = 467, .valid = 1, .name = "NIC3_QP0" },
+ { .fc_id = 619, .cpu_id = 468, .valid = 1, .name = "NIC3_QP1" },
+ { .fc_id = 620, .cpu_id = 469, .valid = 1, .name = "NIC4_QP0" },
+ { .fc_id = 621, .cpu_id = 470, .valid = 1, .name = "NIC4_QP1" },
+ { .fc_id = 622, .cpu_id = 471, .valid = 0, .name = "" },
+ { .fc_id = 623, .cpu_id = 472, .valid = 0, .name = "" },
+ { .fc_id = 624, .cpu_id = 473, .valid = 0, .name = "" },
+ { .fc_id = 625, .cpu_id = 474, .valid = 0, .name = "" },
+ { .fc_id = 626, .cpu_id = 475, .valid = 0, .name = "" },
+ { .fc_id = 627, .cpu_id = 476, .valid = 0, .name = "" },
+ { .fc_id = 628, .cpu_id = 477, .valid = 0, .name = "" },
+ { .fc_id = 629, .cpu_id = 478, .valid = 0, .name = "" },
+ { .fc_id = 630, .cpu_id = 479, .valid = 0, .name = "" },
+ { .fc_id = 631, .cpu_id = 480, .valid = 0, .name = "" },
+ { .fc_id = 632, .cpu_id = 481, .valid = 0, .name = "" },
+ { .fc_id = 633, .cpu_id = 482, .valid = 0, .name = "" },
+ { .fc_id = 634, .cpu_id = 483, .valid = 0, .name = "" },
+ { .fc_id = 635, .cpu_id = 484, .valid = 0, .name = "" },
+ { .fc_id = 636, .cpu_id = 485, .valid = 0, .name = "" },
+ { .fc_id = 637, .cpu_id = 486, .valid = 0, .name = "" },
+ { .fc_id = 638, .cpu_id = 487, .valid = 0, .name = "" },
+ { .fc_id = 639, .cpu_id = 488, .valid = 0, .name = "" },
+ { .fc_id = 640, .cpu_id = 489, .valid = 0, .name = "" },
+ { .fc_id = 641, .cpu_id = 490, .valid = 0, .name = "" },
+ { .fc_id = 642, .cpu_id = 491, .valid = 0, .name = "" },
+ { .fc_id = 643, .cpu_id = 492, .valid = 0, .name = "" },
+ { .fc_id = 644, .cpu_id = 493, .valid = 0, .name = "" },
+ { .fc_id = 645, .cpu_id = 494, .valid = 0, .name = "" },
+ { .fc_id = 646, .cpu_id = 495, .valid = 0, .name = "" },
+ { .fc_id = 647, .cpu_id = 496, .valid = 0, .name = "" },
+ { .fc_id = 648, .cpu_id = 497, .valid = 0, .name = "" },
+ { .fc_id = 649, .cpu_id = 498, .valid = 0, .name = "" },
+ { .fc_id = 650, .cpu_id = 499, .valid = 0, .name = "" },
+ { .fc_id = 651, .cpu_id = 500, .valid = 0, .name = "" },
+ { .fc_id = 652, .cpu_id = 501, .valid = 0, .name = "" },
+ { .fc_id = 653, .cpu_id = 502, .valid = 0, .name = "" },
+ { .fc_id = 654, .cpu_id = 503, .valid = 0, .name = "" },
+ { .fc_id = 655, .cpu_id = 504, .valid = 0, .name = "" },
+ { .fc_id = 656, .cpu_id = 505, .valid = 0, .name = "" },
+ { .fc_id = 657, .cpu_id = 506, .valid = 0, .name = "" },
+ { .fc_id = 658, .cpu_id = 507, .valid = 1, .name = "FIX_POWER_ENV_S" },
+ { .fc_id = 659, .cpu_id = 508, .valid = 1, .name = "FIX_POWER_ENV_E" },
+ { .fc_id = 660, .cpu_id = 509, .valid = 1,
+ .name = "FIX_THERMAL_ENV_S" },
+ { .fc_id = 661, .cpu_id = 510, .valid = 1,
+ .name = "FIX_THERMAL_ENV_E" },
+ { .fc_id = 662, .cpu_id = 511, .valid = 1, .name = "RAZWI_OR_ADC_SW" },
+};
+
+#endif /* __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h b/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h
new file mode 100644
index 000000000000..c45cc7f4d4d7
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_CORESIGHT_H
+#define GAUDI_CORESIGHT_H
+
+enum gaudi_debug_stm_regs_index {
+ GAUDI_STM_FIRST = 0,
+ GAUDI_STM_MME0_ACC = GAUDI_STM_FIRST,
+ GAUDI_STM_MME0_SBAB,
+ GAUDI_STM_MME0_CTRL,
+ GAUDI_STM_MME1_ACC,
+ GAUDI_STM_MME1_SBAB,
+ GAUDI_STM_MME1_CTRL,
+ GAUDI_STM_MME2_ACC,
+ GAUDI_STM_MME2_SBAB,
+ GAUDI_STM_MME2_CTRL,
+ GAUDI_STM_MME3_ACC,
+ GAUDI_STM_MME3_SBAB,
+ GAUDI_STM_MME3_CTRL,
+ GAUDI_STM_DMA_IF_W_S,
+ GAUDI_STM_DMA_IF_E_S,
+ GAUDI_STM_DMA_IF_W_N,
+ GAUDI_STM_DMA_IF_E_N,
+ GAUDI_STM_CPU,
+ GAUDI_STM_DMA_CH_0_CS,
+ GAUDI_STM_DMA_CH_1_CS,
+ GAUDI_STM_DMA_CH_2_CS,
+ GAUDI_STM_DMA_CH_3_CS,
+ GAUDI_STM_DMA_CH_4_CS,
+ GAUDI_STM_DMA_CH_5_CS,
+ GAUDI_STM_DMA_CH_6_CS,
+ GAUDI_STM_DMA_CH_7_CS,
+ GAUDI_STM_PCIE,
+ GAUDI_STM_MMU_CS,
+ GAUDI_STM_PSOC,
+ GAUDI_STM_NIC0_0,
+ GAUDI_STM_NIC0_1,
+ GAUDI_STM_NIC1_0,
+ GAUDI_STM_NIC1_1,
+ GAUDI_STM_NIC2_0,
+ GAUDI_STM_NIC2_1,
+ GAUDI_STM_NIC3_0,
+ GAUDI_STM_NIC3_1,
+ GAUDI_STM_NIC4_0,
+ GAUDI_STM_NIC4_1,
+ GAUDI_STM_TPC0_EML,
+ GAUDI_STM_TPC1_EML,
+ GAUDI_STM_TPC2_EML,
+ GAUDI_STM_TPC3_EML,
+ GAUDI_STM_TPC4_EML,
+ GAUDI_STM_TPC5_EML,
+ GAUDI_STM_TPC6_EML,
+ GAUDI_STM_TPC7_EML,
+ GAUDI_STM_LAST = GAUDI_STM_TPC7_EML
+};
+
+enum gaudi_debug_etf_regs_index {
+ GAUDI_ETF_FIRST = 0,
+ GAUDI_ETF_MME0_ACC = GAUDI_ETF_FIRST,
+ GAUDI_ETF_MME0_SBAB,
+ GAUDI_ETF_MME0_CTRL,
+ GAUDI_ETF_MME1_ACC,
+ GAUDI_ETF_MME1_SBAB,
+ GAUDI_ETF_MME1_CTRL,
+ GAUDI_ETF_MME2_ACC,
+ GAUDI_ETF_MME2_SBAB,
+ GAUDI_ETF_MME2_CTRL,
+ GAUDI_ETF_MME3_ACC,
+ GAUDI_ETF_MME3_SBAB,
+ GAUDI_ETF_MME3_CTRL,
+ GAUDI_ETF_DMA_IF_W_S,
+ GAUDI_ETF_DMA_IF_E_S,
+ GAUDI_ETF_DMA_IF_W_N,
+ GAUDI_ETF_DMA_IF_E_N,
+ GAUDI_ETF_CPU_0,
+ GAUDI_ETF_CPU_1,
+ GAUDI_ETF_CPU_TRACE,
+ GAUDI_ETF_DMA_CH_0_CS,
+ GAUDI_ETF_DMA_CH_1_CS,
+ GAUDI_ETF_DMA_CH_2_CS,
+ GAUDI_ETF_DMA_CH_3_CS,
+ GAUDI_ETF_DMA_CH_4_CS,
+ GAUDI_ETF_DMA_CH_5_CS,
+ GAUDI_ETF_DMA_CH_6_CS,
+ GAUDI_ETF_DMA_CH_7_CS,
+ GAUDI_ETF_PCIE,
+ GAUDI_ETF_MMU_CS,
+ GAUDI_ETF_PSOC,
+ GAUDI_ETF_NIC0_0,
+ GAUDI_ETF_NIC0_1,
+ GAUDI_ETF_NIC1_0,
+ GAUDI_ETF_NIC1_1,
+ GAUDI_ETF_NIC2_0,
+ GAUDI_ETF_NIC2_1,
+ GAUDI_ETF_NIC3_0,
+ GAUDI_ETF_NIC3_1,
+ GAUDI_ETF_NIC4_0,
+ GAUDI_ETF_NIC4_1,
+ GAUDI_ETF_TPC0_EML,
+ GAUDI_ETF_TPC1_EML,
+ GAUDI_ETF_TPC2_EML,
+ GAUDI_ETF_TPC3_EML,
+ GAUDI_ETF_TPC4_EML,
+ GAUDI_ETF_TPC5_EML,
+ GAUDI_ETF_TPC6_EML,
+ GAUDI_ETF_TPC7_EML,
+ GAUDI_ETF_LAST = GAUDI_ETF_TPC7_EML
+};
+
+enum gaudi_debug_funnel_regs_index {
+ GAUDI_FUNNEL_FIRST = 0,
+ GAUDI_FUNNEL_MME0_ACC = GAUDI_FUNNEL_FIRST,
+ GAUDI_FUNNEL_MME1_ACC,
+ GAUDI_FUNNEL_MME2_ACC,
+ GAUDI_FUNNEL_MME3_ACC,
+ GAUDI_FUNNEL_SRAM_Y0_X0,
+ GAUDI_FUNNEL_SRAM_Y0_X1,
+ GAUDI_FUNNEL_SRAM_Y0_X2,
+ GAUDI_FUNNEL_SRAM_Y0_X3,
+ GAUDI_FUNNEL_SRAM_Y0_X4,
+ GAUDI_FUNNEL_SRAM_Y0_X5,
+ GAUDI_FUNNEL_SRAM_Y0_X6,
+ GAUDI_FUNNEL_SRAM_Y0_X7,
+ GAUDI_FUNNEL_SRAM_Y1_X0,
+ GAUDI_FUNNEL_SRAM_Y1_X1,
+ GAUDI_FUNNEL_SRAM_Y1_X2,
+ GAUDI_FUNNEL_SRAM_Y1_X3,
+ GAUDI_FUNNEL_SRAM_Y1_X4,
+ GAUDI_FUNNEL_SRAM_Y1_X5,
+ GAUDI_FUNNEL_SRAM_Y1_X6,
+ GAUDI_FUNNEL_SRAM_Y1_X7,
+ GAUDI_FUNNEL_SRAM_Y2_X0,
+ GAUDI_FUNNEL_SRAM_Y2_X1,
+ GAUDI_FUNNEL_SRAM_Y2_X2,
+ GAUDI_FUNNEL_SRAM_Y2_X3,
+ GAUDI_FUNNEL_SRAM_Y2_X4,
+ GAUDI_FUNNEL_SRAM_Y2_X5,
+ GAUDI_FUNNEL_SRAM_Y2_X6,
+ GAUDI_FUNNEL_SRAM_Y2_X7,
+ GAUDI_FUNNEL_SRAM_Y3_X0,
+ GAUDI_FUNNEL_SRAM_Y3_X1,
+ GAUDI_FUNNEL_SRAM_Y3_X2,
+ GAUDI_FUNNEL_SRAM_Y3_X4,
+ GAUDI_FUNNEL_SRAM_Y3_X3,
+ GAUDI_FUNNEL_SRAM_Y3_X5,
+ GAUDI_FUNNEL_SRAM_Y3_X6,
+ GAUDI_FUNNEL_SRAM_Y3_X7,
+ GAUDI_FUNNEL_SIF_0,
+ GAUDI_FUNNEL_SIF_1,
+ GAUDI_FUNNEL_SIF_2,
+ GAUDI_FUNNEL_SIF_3,
+ GAUDI_FUNNEL_SIF_4,
+ GAUDI_FUNNEL_SIF_5,
+ GAUDI_FUNNEL_SIF_6,
+ GAUDI_FUNNEL_SIF_7,
+ GAUDI_FUNNEL_NIF_0,
+ GAUDI_FUNNEL_NIF_1,
+ GAUDI_FUNNEL_NIF_2,
+ GAUDI_FUNNEL_NIF_3,
+ GAUDI_FUNNEL_NIF_4,
+ GAUDI_FUNNEL_NIF_5,
+ GAUDI_FUNNEL_NIF_6,
+ GAUDI_FUNNEL_NIF_7,
+ GAUDI_FUNNEL_DMA_IF_W_S,
+ GAUDI_FUNNEL_DMA_IF_E_S,
+ GAUDI_FUNNEL_DMA_IF_W_N,
+ GAUDI_FUNNEL_DMA_IF_E_N,
+ GAUDI_FUNNEL_CPU,
+ GAUDI_FUNNEL_NIC_TPC_W_S,
+ GAUDI_FUNNEL_NIC_TPC_E_S,
+ GAUDI_FUNNEL_NIC_TPC_W_N,
+ GAUDI_FUNNEL_NIC_TPC_E_N,
+ GAUDI_FUNNEL_PCIE,
+ GAUDI_FUNNEL_PSOC,
+ GAUDI_FUNNEL_NIC0,
+ GAUDI_FUNNEL_NIC1,
+ GAUDI_FUNNEL_NIC2,
+ GAUDI_FUNNEL_NIC3,
+ GAUDI_FUNNEL_NIC4,
+ GAUDI_FUNNEL_TPC0_EML,
+ GAUDI_FUNNEL_TPC1_EML,
+ GAUDI_FUNNEL_TPC2_EML,
+ GAUDI_FUNNEL_TPC3_EML,
+ GAUDI_FUNNEL_TPC4_EML,
+ GAUDI_FUNNEL_TPC5_EML,
+ GAUDI_FUNNEL_TPC6_EML,
+ GAUDI_FUNNEL_TPC7_EML,
+ GAUDI_FUNNEL_LAST = GAUDI_FUNNEL_TPC7_EML
+};
+
+enum gaudi_debug_bmon_regs_index {
+ GAUDI_BMON_FIRST = 0,
+ GAUDI_BMON_MME0_ACC_0 = GAUDI_BMON_FIRST,
+ GAUDI_BMON_MME0_SBAB_0,
+ GAUDI_BMON_MME0_SBAB_1,
+ GAUDI_BMON_MME0_CTRL_0,
+ GAUDI_BMON_MME0_CTRL_1,
+ GAUDI_BMON_MME1_ACC_0,
+ GAUDI_BMON_MME1_SBAB_0,
+ GAUDI_BMON_MME1_SBAB_1,
+ GAUDI_BMON_MME1_CTRL_0,
+ GAUDI_BMON_MME1_CTRL_1,
+ GAUDI_BMON_MME2_ACC_0,
+ GAUDI_BMON_MME2_SBAB_0,
+ GAUDI_BMON_MME2_SBAB_1,
+ GAUDI_BMON_MME2_CTRL_0,
+ GAUDI_BMON_MME2_CTRL_1,
+ GAUDI_BMON_MME3_ACC_0,
+ GAUDI_BMON_MME3_SBAB_0,
+ GAUDI_BMON_MME3_SBAB_1,
+ GAUDI_BMON_MME3_CTRL_0,
+ GAUDI_BMON_MME3_CTRL_1,
+ GAUDI_BMON_DMA_IF_W_S_SOB_WR,
+ GAUDI_BMON_DMA_IF_W_S_0_WR,
+ GAUDI_BMON_DMA_IF_W_S_0_RD,
+ GAUDI_BMON_DMA_IF_W_S_1_WR,
+ GAUDI_BMON_DMA_IF_W_S_1_RD,
+ GAUDI_BMON_DMA_IF_E_S_SOB_WR,
+ GAUDI_BMON_DMA_IF_E_S_0_WR,
+ GAUDI_BMON_DMA_IF_E_S_0_RD,
+ GAUDI_BMON_DMA_IF_E_S_1_WR,
+ GAUDI_BMON_DMA_IF_E_S_1_RD,
+ GAUDI_BMON_DMA_IF_W_N_SOB_WR,
+ GAUDI_BMON_DMA_IF_W_N_HBM0_WR,
+ GAUDI_BMON_DMA_IF_W_N_HBM0_RD,
+ GAUDI_BMON_DMA_IF_W_N_HBM1_WR,
+ GAUDI_BMON_DMA_IF_W_N_HBM1_RD,
+ GAUDI_BMON_DMA_IF_E_N_SOB_WR,
+ GAUDI_BMON_DMA_IF_E_N_HBM0_WR,
+ GAUDI_BMON_DMA_IF_E_N_HBM0_RD,
+ GAUDI_BMON_DMA_IF_E_N_HBM1_WR,
+ GAUDI_BMON_DMA_IF_E_N_HBM1_RD,
+ GAUDI_BMON_CPU_WR,
+ GAUDI_BMON_CPU_RD,
+ GAUDI_BMON_DMA_CH_0_0,
+ GAUDI_BMON_DMA_CH_0_1,
+ GAUDI_BMON_DMA_CH_1_0,
+ GAUDI_BMON_DMA_CH_1_1,
+ GAUDI_BMON_DMA_CH_2_0,
+ GAUDI_BMON_DMA_CH_2_1,
+ GAUDI_BMON_DMA_CH_3_0,
+ GAUDI_BMON_DMA_CH_3_1,
+ GAUDI_BMON_DMA_CH_4_0,
+ GAUDI_BMON_DMA_CH_4_1,
+ GAUDI_BMON_DMA_CH_5_0,
+ GAUDI_BMON_DMA_CH_5_1,
+ GAUDI_BMON_DMA_CH_6_0,
+ GAUDI_BMON_DMA_CH_6_1,
+ GAUDI_BMON_DMA_CH_7_0,
+ GAUDI_BMON_DMA_CH_7_1,
+ GAUDI_BMON_PCIE_MSTR_WR,
+ GAUDI_BMON_PCIE_MSTR_RD,
+ GAUDI_BMON_PCIE_SLV_WR,
+ GAUDI_BMON_PCIE_SLV_RD,
+ GAUDI_BMON_MMU_0,
+ GAUDI_BMON_MMU_1,
+ GAUDI_BMON_NIC0_0,
+ GAUDI_BMON_NIC0_1,
+ GAUDI_BMON_NIC0_2,
+ GAUDI_BMON_NIC0_3,
+ GAUDI_BMON_NIC0_4,
+ GAUDI_BMON_NIC1_0,
+ GAUDI_BMON_NIC1_1,
+ GAUDI_BMON_NIC1_2,
+ GAUDI_BMON_NIC1_3,
+ GAUDI_BMON_NIC1_4,
+ GAUDI_BMON_NIC2_0,
+ GAUDI_BMON_NIC2_1,
+ GAUDI_BMON_NIC2_2,
+ GAUDI_BMON_NIC2_3,
+ GAUDI_BMON_NIC2_4,
+ GAUDI_BMON_NIC3_0,
+ GAUDI_BMON_NIC3_1,
+ GAUDI_BMON_NIC3_2,
+ GAUDI_BMON_NIC3_3,
+ GAUDI_BMON_NIC3_4,
+ GAUDI_BMON_NIC4_0,
+ GAUDI_BMON_NIC4_1,
+ GAUDI_BMON_NIC4_2,
+ GAUDI_BMON_NIC4_3,
+ GAUDI_BMON_NIC4_4,
+ GAUDI_BMON_TPC0_EML_0,
+ GAUDI_BMON_TPC0_EML_1,
+ GAUDI_BMON_TPC0_EML_2,
+ GAUDI_BMON_TPC0_EML_3,
+ GAUDI_BMON_TPC1_EML_0,
+ GAUDI_BMON_TPC1_EML_1,
+ GAUDI_BMON_TPC1_EML_2,
+ GAUDI_BMON_TPC1_EML_3,
+ GAUDI_BMON_TPC2_EML_0,
+ GAUDI_BMON_TPC2_EML_1,
+ GAUDI_BMON_TPC2_EML_2,
+ GAUDI_BMON_TPC2_EML_3,
+ GAUDI_BMON_TPC3_EML_0,
+ GAUDI_BMON_TPC3_EML_1,
+ GAUDI_BMON_TPC3_EML_2,
+ GAUDI_BMON_TPC3_EML_3,
+ GAUDI_BMON_TPC4_EML_0,
+ GAUDI_BMON_TPC4_EML_1,
+ GAUDI_BMON_TPC4_EML_2,
+ GAUDI_BMON_TPC4_EML_3,
+ GAUDI_BMON_TPC5_EML_0,
+ GAUDI_BMON_TPC5_EML_1,
+ GAUDI_BMON_TPC5_EML_2,
+ GAUDI_BMON_TPC5_EML_3,
+ GAUDI_BMON_TPC6_EML_0,
+ GAUDI_BMON_TPC6_EML_1,
+ GAUDI_BMON_TPC6_EML_2,
+ GAUDI_BMON_TPC6_EML_3,
+ GAUDI_BMON_TPC7_EML_0,
+ GAUDI_BMON_TPC7_EML_1,
+ GAUDI_BMON_TPC7_EML_2,
+ GAUDI_BMON_TPC7_EML_3,
+ GAUDI_BMON_LAST = GAUDI_BMON_TPC7_EML_3
+};
+
+enum gaudi_debug_spmu_regs_index {
+ GAUDI_SPMU_FIRST = 0,
+ GAUDI_SPMU_MME0_ACC = GAUDI_SPMU_FIRST,
+ GAUDI_SPMU_MME0_SBAB,
+ GAUDI_SPMU_MME0_CTRL,
+ GAUDI_SPMU_MME1_ACC,
+ GAUDI_SPMU_MME1_SBAB,
+ GAUDI_SPMU_MME1_CTRL,
+ GAUDI_SPMU_MME2_MME2_ACC,
+ GAUDI_SPMU_MME2_SBAB,
+ GAUDI_SPMU_MME2_CTRL,
+ GAUDI_SPMU_MME3_ACC,
+ GAUDI_SPMU_MME3_SBAB,
+ GAUDI_SPMU_MME3_CTRL,
+ GAUDI_SPMU_DMA_CH_0_CS,
+ GAUDI_SPMU_DMA_CH_1_CS,
+ GAUDI_SPMU_DMA_CH_2_CS,
+ GAUDI_SPMU_DMA_CH_3_CS,
+ GAUDI_SPMU_DMA_CH_4_CS,
+ GAUDI_SPMU_DMA_CH_5_CS,
+ GAUDI_SPMU_DMA_CH_6_CS,
+ GAUDI_SPMU_DMA_CH_7_CS,
+ GAUDI_SPMU_PCIE,
+ GAUDI_SPMU_MMU_CS,
+ GAUDI_SPMU_NIC0_0,
+ GAUDI_SPMU_NIC0_1,
+ GAUDI_SPMU_NIC1_0,
+ GAUDI_SPMU_NIC1_1,
+ GAUDI_SPMU_NIC2_0,
+ GAUDI_SPMU_NIC2_1,
+ GAUDI_SPMU_NIC3_0,
+ GAUDI_SPMU_NIC3_1,
+ GAUDI_SPMU_NIC4_0,
+ GAUDI_SPMU_NIC4_1,
+ GAUDI_SPMU_TPC0_EML,
+ GAUDI_SPMU_TPC1_EML,
+ GAUDI_SPMU_TPC2_EML,
+ GAUDI_SPMU_TPC3_EML,
+ GAUDI_SPMU_TPC4_EML,
+ GAUDI_SPMU_TPC5_EML,
+ GAUDI_SPMU_TPC6_EML,
+ GAUDI_SPMU_TPC7_EML,
+ GAUDI_SPMU_LAST = GAUDI_SPMU_TPC7_EML
+};
+
+#endif /* GAUDI_CORESIGHT_H */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
new file mode 100644
index 000000000000..8aadc6357da1
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2019-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_FW_IF_H
+#define GAUDI_FW_IF_H
+
+#define GAUDI_EVENT_QUEUE_MSI_IDX 8
+#define GAUDI_NIC_PORT1_MSI_IDX 10
+#define GAUDI_NIC_PORT3_MSI_IDX 12
+#define GAUDI_NIC_PORT5_MSI_IDX 14
+#define GAUDI_NIC_PORT7_MSI_IDX 16
+#define GAUDI_NIC_PORT9_MSI_IDX 18
+
+#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */
+#define LINUX_FW_OFFSET 0x800000 /* 8MB in HBM */
+
+enum gaudi_pll_index {
+ CPU_PLL = 0,
+ PCI_PLL,
+ SRAM_PLL,
+ HBM_PLL,
+ NIC_PLL,
+ DMA_PLL,
+ MESH_PLL,
+ MME_PLL,
+ TPC_PLL,
+ IF_PLL
+};
+
+#define GAUDI_PLL_FREQ_LOW 200000000 /* 200 MHz */
+
+#endif /* GAUDI_FW_IF_H */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
new file mode 100644
index 000000000000..96f08050ef0f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_MASKS_H_
+#define GAUDI_MASKS_H_
+
+#include "asic_reg/gaudi_regs.h"
+
+/* Useful masks for bits in various registers */
+#define PCI_DMA_QMAN_ENABLE (\
+ (0xF << DMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_EXTERNAL_MAKE_TRUSTED (\
+ (0xF << DMA0_QM_GLBL_PROT_PQF_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_PROT_CQF_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_PROT_CP_SHIFT) | \
+ (0x1 << DMA0_QM_GLBL_PROT_ERR_SHIFT))
+
+#define QMAN_INTERNAL_MAKE_TRUSTED (\
+ (0xF << DMA0_QM_GLBL_PROT_PQF_SHIFT) | \
+ (0x1 << DMA0_QM_GLBL_PROT_ERR_SHIFT))
+
+#define HBM_DMA_QMAN_ENABLE (\
+ (0xF << DMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_MME_ENABLE (\
+ (0xF << MME0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_TPC_ENABLE (\
+ (0xF << TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_CFG0_CP_EN_SHIFT))
+
+#define QMAN_UPPER_CP_CGM_PWR_GATE_EN (\
+ (0x20 << DMA0_QM_CGM_CFG_IDLE_TH_SHIFT) | \
+ (0xA << DMA0_QM_CGM_CFG_G2F_TH_SHIFT) | \
+ (0x10 << DMA0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT) | \
+ (1 << DMA0_QM_CGM_CFG_EN_SHIFT))
+
+#define QMAN_COMMON_CP_CGM_PWR_GATE_EN (\
+ (0x20 << DMA0_QM_CGM_CFG_IDLE_TH_SHIFT) | \
+ (0xA << DMA0_QM_CGM_CFG_G2F_TH_SHIFT) | \
+ (0xF << DMA0_QM_CGM_CFG_CP_IDLE_MASK_SHIFT) | \
+ (1 << DMA0_QM_CGM_CFG_EN_SHIFT))
+
+#define PCI_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
+ (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+
+#define PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
+ (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (0xF << DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+
+#define HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
+ (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+
+#define HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
+ (0xF << DMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << DMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+
+#define TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
+ (0xF << TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+
+#define TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
+ (0xF << TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+
+#define MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\
+ (0xF << MME0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
+
+#define MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\
+ (0xF << MME0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
+ (0x1F << MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
+
+#define QMAN_CGM1_PWR_GATE_EN (0xA << DMA0_QM_CGM_CFG1_MASK_TH_SHIFT)
+
+/* RESET registers configuration */
+#define CFG_RST_L_PSOC_SHIFT 0
+#define CFG_RST_L_PCIE_SHIFT 1
+#define CFG_RST_L_PCIE_IF_SHIFT 2
+#define CFG_RST_L_HBM_S_PLL_SHIFT 3
+#define CFG_RST_L_TPC_S_PLL_SHIFT 4
+#define CFG_RST_L_MME_S_PLL_SHIFT 5
+#define CFG_RST_L_CPU_PLL_SHIFT 6
+#define CFG_RST_L_PCIE_PLL_SHIFT 7
+#define CFG_RST_L_NIC_S_PLL_SHIFT 8
+#define CFG_RST_L_HBM_N_PLL_SHIFT 9
+#define CFG_RST_L_TPC_N_PLL_SHIFT 10
+#define CFG_RST_L_MME_N_PLL_SHIFT 11
+#define CFG_RST_L_NIC_N_PLL_SHIFT 12
+#define CFG_RST_L_DMA_W_PLL_SHIFT 13
+#define CFG_RST_L_SIF_W_PLL_SHIFT 14
+#define CFG_RST_L_MESH_W_PLL_SHIFT 15
+#define CFG_RST_L_SRAM_W_PLL_SHIFT 16
+#define CFG_RST_L_DMA_E_PLL_SHIFT 17
+#define CFG_RST_L_SIF_E_PLL_SHIFT 18
+#define CFG_RST_L_MESH_E_PLL_SHIFT 19
+#define CFG_RST_L_SRAM_E_PLL_SHIFT 20
+#define CFG_RST_L_IF_1_SHIFT 21
+#define CFG_RST_L_IF_0_SHIFT 22
+#define CFG_RST_L_IF_2_SHIFT 23
+#define CFG_RST_L_IF_3_SHIFT 24
+#define CFG_RST_L_TPC_0_SHIFT 25
+#define CFG_RST_L_TPC_1_SHIFT 26
+#define CFG_RST_L_TPC_2_SHIFT 27
+#define CFG_RST_L_TPC_3_SHIFT 28
+#define CFG_RST_L_TPC_4_SHIFT 29
+#define CFG_RST_L_TPC_5_SHIFT 30
+#define CFG_RST_L_TPC_6_SHIFT 31
+#define CFG_RST_H_TPC_7_SHIFT 0
+#define CFG_RST_H_MME_0_SHIFT 1
+#define CFG_RST_H_MME_1_SHIFT 2
+#define CFG_RST_H_MME_2_SHIFT 3
+#define CFG_RST_H_MME_3_SHIFT 4
+#define CFG_RST_H_HBM_0_SHIFT 5
+#define CFG_RST_H_HBM_1_SHIFT 6
+#define CFG_RST_H_HBM_2_SHIFT 7
+#define CFG_RST_H_HBM_3_SHIFT 8
+#define CFG_RST_H_NIC_0_SHIFT 9
+#define CFG_RST_H_NIC_1_SHIFT 10
+#define CFG_RST_H_NIC_2_SHIFT 11
+#define CFG_RST_H_NIC_3_SHIFT 12
+#define CFG_RST_H_NIC_4_SHIFT 13
+#define CFG_RST_H_SM_0_SHIFT 14
+#define CFG_RST_H_SM_1_SHIFT 15
+#define CFG_RST_H_SM_2_SHIFT 16
+#define CFG_RST_H_SM_3_SHIFT 17
+#define CFG_RST_H_DMA_0_SHIFT 18
+#define CFG_RST_H_DMA_1_SHIFT 19
+#define CFG_RST_H_CPU_SHIFT 20
+#define CFG_RST_H_MMU_SHIFT 21
+
+
+#define CFG_RST_H_DMA_MASK ((1 << CFG_RST_H_DMA_0_SHIFT) | \
+ (1 << CFG_RST_H_DMA_1_SHIFT))
+
+#define CFG_RST_H_CPU_MASK (1 << CFG_RST_H_CPU_SHIFT)
+#define CFG_RST_H_MMU_MASK (1 << CFG_RST_H_MMU_SHIFT)
+
+#define CFG_RST_H_HBM_MASK ((1 << CFG_RST_H_HBM_0_SHIFT) | \
+ (1 << CFG_RST_H_HBM_1_SHIFT) | \
+ (1 << CFG_RST_H_HBM_2_SHIFT) | \
+ (1 << CFG_RST_H_HBM_3_SHIFT))
+
+#define CFG_RST_H_NIC_MASK ((1 << CFG_RST_H_NIC_0_SHIFT) | \
+ (1 << CFG_RST_H_NIC_1_SHIFT) | \
+ (1 << CFG_RST_H_NIC_2_SHIFT) | \
+ (1 << CFG_RST_H_NIC_3_SHIFT) | \
+ (1 << CFG_RST_H_NIC_4_SHIFT))
+
+#define CFG_RST_H_SM_MASK ((1 << CFG_RST_H_SM_0_SHIFT) | \
+ (1 << CFG_RST_H_SM_1_SHIFT) | \
+ (1 << CFG_RST_H_SM_2_SHIFT) | \
+ (1 << CFG_RST_H_SM_3_SHIFT))
+
+#define CFG_RST_H_MME_MASK ((1 << CFG_RST_H_MME_0_SHIFT) | \
+ (1 << CFG_RST_H_MME_1_SHIFT) | \
+ (1 << CFG_RST_H_MME_2_SHIFT) | \
+ (1 << CFG_RST_H_MME_3_SHIFT))
+
+#define CFG_RST_L_PSOC_MASK (1 << CFG_RST_L_PSOC_SHIFT)
+
+#define CFG_RST_L_IF_MASK ((1 << CFG_RST_L_IF_0_SHIFT) | \
+ (1 << CFG_RST_L_IF_1_SHIFT) | \
+ (1 << CFG_RST_L_IF_2_SHIFT) | \
+ (1 << CFG_RST_L_IF_3_SHIFT))
+
+#define CFG_RST_L_TPC_MASK ((1 << CFG_RST_L_TPC_0_SHIFT) | \
+ (1 << CFG_RST_L_TPC_1_SHIFT) | \
+ (1 << CFG_RST_L_TPC_2_SHIFT) | \
+ (1 << CFG_RST_L_TPC_3_SHIFT) | \
+ (1 << CFG_RST_L_TPC_4_SHIFT) | \
+ (1 << CFG_RST_L_TPC_5_SHIFT) | \
+ (1 << CFG_RST_L_TPC_6_SHIFT))
+
+#define CFG_RST_H_TPC_MASK (1 << CFG_RST_H_TPC_7_SHIFT)
+
+#define CA53_RESET (1 << CFG_RST_H_CPU_SHIFT)
+
+#define UNIT_RST_L_PSOC_SHIFT 0
+#define UNIT_RST_L_PCIE_SHIFT 1
+#define UNIT_RST_L_PCIE_IF_SHIFT 2
+#define UNIT_RST_L_HBM_S_PLL_SHIFT 3
+#define UNIT_RST_L_TPC_S_PLL_SHIFT 4
+#define UNIT_RST_L_MME_S_PLL_SHIFT 5
+#define UNIT_RST_L_CPU_PLL_SHIFT 6
+#define UNIT_RST_L_PCIE_PLL_SHIFT 7
+#define UNIT_RST_L_NIC_S_PLL_SHIFT 8
+#define UNIT_RST_L_HBM_N_PLL_SHIFT 9
+#define UNIT_RST_L_TPC_N_PLL_SHIFT 10
+#define UNIT_RST_L_MME_N_PLL_SHIFT 11
+#define UNIT_RST_L_NIC_N_PLL_SHIFT 12
+#define UNIT_RST_L_DMA_W_PLL_SHIFT 13
+#define UNIT_RST_L_SIF_W_PLL_SHIFT 14
+#define UNIT_RST_L_MESH_W_PLL_SHIFT 15
+#define UNIT_RST_L_SRAM_W_PLL_SHIFT 16
+#define UNIT_RST_L_DMA_E_PLL_SHIFT 17
+#define UNIT_RST_L_SIF_E_PLL_SHIFT 18
+#define UNIT_RST_L_MESH_E_PLL_SHIFT 19
+#define UNIT_RST_L_SRAM_E_PLL_SHIFT 20
+#define UNIT_RST_L_TPC_0_SHIFT 21
+#define UNIT_RST_L_TPC_1_SHIFT 22
+#define UNIT_RST_L_TPC_2_SHIFT 23
+#define UNIT_RST_L_TPC_3_SHIFT 24
+#define UNIT_RST_L_TPC_4_SHIFT 25
+#define UNIT_RST_L_TPC_5_SHIFT 26
+#define UNIT_RST_L_TPC_6_SHIFT 27
+#define UNIT_RST_L_TPC_7_SHIFT 28
+#define UNIT_RST_L_MME_0_SHIFT 29
+#define UNIT_RST_L_MME_1_SHIFT 30
+#define UNIT_RST_L_MME_2_SHIFT 31
+
+#define UNIT_RST_H_MME_3_SHIFT 0
+#define UNIT_RST_H_HBM_0_SHIFT 1
+#define UNIT_RST_H_HBM_1_SHIFT 2
+#define UNIT_RST_H_HBM_2_SHIFT 3
+#define UNIT_RST_H_HBM_3_SHIFT 4
+#define UNIT_RST_H_NIC_0_SHIFT 5
+#define UNIT_RST_H_NIC_1_SHIFT 6
+#define UNIT_RST_H_NIC_2_SHIFT 7
+#define UNIT_RST_H_NIC_3_SHIFT 8
+#define UNIT_RST_H_NIC_4_SHIFT 9
+#define UNIT_RST_H_SM_0_SHIFT 10
+#define UNIT_RST_H_SM_1_SHIFT 11
+#define UNIT_RST_H_SM_2_SHIFT 12
+#define UNIT_RST_H_SM_3_SHIFT 13
+#define UNIT_RST_H_IF_0_SHIFT 14
+#define UNIT_RST_H_IF_1_SHIFT 15
+#define UNIT_RST_H_IF_2_SHIFT 16
+#define UNIT_RST_H_IF_3_SHIFT 17
+#define UNIT_RST_H_DMA_0_SHIFT 18
+#define UNIT_RST_H_DMA_1_SHIFT 19
+#define UNIT_RST_H_CPU_SHIFT 20
+#define UNIT_RST_H_MMU_SHIFT 21
+
+#define UNIT_RST_H_HBM_MASK ((1 << UNIT_RST_H_HBM_0_SHIFT) | \
+ (1 << UNIT_RST_H_HBM_1_SHIFT) | \
+ (1 << UNIT_RST_H_HBM_2_SHIFT) | \
+ (1 << UNIT_RST_H_HBM_3_SHIFT))
+
+#define UNIT_RST_H_NIC_MASK ((1 << UNIT_RST_H_NIC_0_SHIFT) | \
+ (1 << UNIT_RST_H_NIC_1_SHIFT) | \
+ (1 << UNIT_RST_H_NIC_2_SHIFT) | \
+ (1 << UNIT_RST_H_NIC_3_SHIFT) | \
+ (1 << UNIT_RST_H_NIC_4_SHIFT))
+
+#define UNIT_RST_H_SM_MASK ((1 << UNIT_RST_H_SM_0_SHIFT) | \
+ (1 << UNIT_RST_H_SM_1_SHIFT) | \
+ (1 << UNIT_RST_H_SM_2_SHIFT) | \
+ (1 << UNIT_RST_H_SM_3_SHIFT))
+
+#define UNIT_RST_H_MME_MASK ((1 << UNIT_RST_H_MME_0_SHIFT) | \
+ (1 << UNIT_RST_H_MME_1_SHIFT) | \
+ (1 << UNIT_RST_H_MME_2_SHIFT))
+
+#define UNIT_RST_L_MME_MASK (1 << UNIT_RST_L_MME_3_SHIFT)
+
+#define UNIT_RST_L_IF_MASK ((1 << UNIT_RST_L_IF_0_SHIFT) | \
+ (1 << UNIT_RST_L_IF_1_SHIFT) | \
+ (1 << UNIT_RST_L_IF_2_SHIFT) | \
+ (1 << UNIT_RST_L_IF_3_SHIFT))
+
+#define UNIT_RST_L_TPC_MASK ((1 << UNIT_RST_L_TPC_0_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_1_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_2_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_3_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_4_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_5_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_6_SHIFT) | \
+ (1 << UNIT_RST_L_TPC_7_SHIFT))
+
+/* CPU_CA53_CFG_ARM_RST_CONTROL */
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT 0
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_MASK 0x3
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT 4
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_MASK 0x30
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT 8
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_MASK 0x100
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_SHIFT 12
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_MASK 0x1000
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT 16
+#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_MASK 0x10000
+#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_SHIFT 20
+#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_MASK 0x300000
+
+#define CPU_RESET_ASSERT (\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
+
+#define CPU_RESET_CORE0_DEASSERT (\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\
+ 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
+
+/* QM_IDLE_MASK is valid for all engines QM idle check */
+#define QM_IDLE_MASK (DMA0_QM_GLBL_STS0_PQF_IDLE_MASK | \
+ DMA0_QM_GLBL_STS0_CQF_IDLE_MASK | \
+ DMA0_QM_GLBL_STS0_CP_IDLE_MASK)
+
+/* CGM_IDLE_MASK is valid for all engines CGM idle check */
+#define CGM_IDLE_MASK DMA0_QM_CGM_STS_AGENT_IDLE_MASK
+
+#define TPC_IDLE_MASK ((1 << TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT) | \
+ (1 << TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT) | \
+ (1 << TPC0_CFG_STATUS_IQ_EMPTY_SHIFT) | \
+ (1 << TPC0_CFG_STATUS_SB_EMPTY_SHIFT) | \
+ (1 << TPC0_CFG_STATUS_QM_IDLE_SHIFT) | \
+ (1 << TPC0_CFG_STATUS_QM_RDY_SHIFT))
+
+#define MME0_CTRL_ARCH_STATUS_SB_A_EMPTY_MASK 0x80
+#define MME0_CTRL_ARCH_STATUS_SB_B_EMPTY_MASK 0x100
+#define MME0_CTRL_ARCH_STATUS_WBC_AXI_IDLE_MASK 0x1000
+
+#define MME_ARCH_IDLE_MASK (MME0_CTRL_ARCH_STATUS_SB_A_EMPTY_MASK | \
+ MME0_CTRL_ARCH_STATUS_SB_B_EMPTY_MASK | \
+ MME0_CTRL_ARCH_STATUS_WBC_AXI_IDLE_MASK)
+
+#define IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) \
+ ((((qm_glbl_sts0) & QM_IDLE_MASK) == QM_IDLE_MASK) && \
+ (((qm_cgm_sts) & CGM_IDLE_MASK) == CGM_IDLE_MASK))
+
+#define IS_DMA_IDLE(dma_core_sts0) \
+ !(dma_core_sts0 & DMA0_CORE_STS0_BUSY_MASK)
+
+#define IS_TPC_IDLE(tpc_cfg_sts) \
+ (((tpc_cfg_sts) & TPC_IDLE_MASK) == TPC_IDLE_MASK)
+
+#define IS_MME_IDLE(mme_arch_sts) \
+ (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
+
+enum axi_id {
+ AXI_ID_MME,
+ AXI_ID_TPC,
+ AXI_ID_DMA,
+ AXI_ID_NIC, /* Local NIC */
+ AXI_ID_PCI,
+ AXI_ID_CPU,
+ AXI_ID_PSOC,
+ AXI_ID_MMU,
+ AXI_ID_NIC_FT /* Feed-Through NIC */
+};
+
+/* RAZWI initiator ID is built from the location in the chip and the AXI ID */
+
+#define RAZWI_INITIATOR_AXI_ID_SHIFT 20
+#define RAZWI_INITIATOR_AXI_ID_MASK 0xF
+#define RAZWI_INITIATOR_X_SHIFT 24
+#define RAZWI_INITIATOR_X_MASK 0xF
+#define RAZWI_INITIATOR_Y_SHIFT 28
+#define RAZWI_INITIATOR_Y_MASK 0x7
+
+#define RAZWI_INITIATOR_ID_AXI_ID(axi_id) \
+ (((axi_id) & RAZWI_INITIATOR_AXI_ID_MASK) << \
+ RAZWI_INITIATOR_AXI_ID_SHIFT)
+
+#define RAZWI_INITIATOR_ID_X_Y(x, y) \
+ ((((y) & RAZWI_INITIATOR_Y_MASK) << RAZWI_INITIATOR_Y_SHIFT) | \
+ (((x) & RAZWI_INITIATOR_X_MASK) << RAZWI_INITIATOR_X_SHIFT))
+
+#define RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0 RAZWI_INITIATOR_ID_X_Y(1, 0)
+#define RAZWI_INITIATOR_ID_X_Y_TPC1 RAZWI_INITIATOR_ID_X_Y(2, 0)
+#define RAZWI_INITIATOR_ID_X_Y_MME0_0 RAZWI_INITIATOR_ID_X_Y(3, 0)
+#define RAZWI_INITIATOR_ID_X_Y_MME0_1 RAZWI_INITIATOR_ID_X_Y(4, 0)
+#define RAZWI_INITIATOR_ID_X_Y_MME1_0 RAZWI_INITIATOR_ID_X_Y(5, 0)
+#define RAZWI_INITIATOR_ID_X_Y_MME1_1 RAZWI_INITIATOR_ID_X_Y(6, 0)
+#define RAZWI_INITIATOR_ID_X_Y_TPC2 RAZWI_INITIATOR_ID_X_Y(7, 0)
+#define RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC \
+ RAZWI_INITIATOR_ID_X_Y(8, 0)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0 RAZWI_INITIATOR_ID_X_Y(0, 1)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0 RAZWI_INITIATOR_ID_X_Y(9, 1)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1 RAZWI_INITIATOR_ID_X_Y(0, 2)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1 RAZWI_INITIATOR_ID_X_Y(9, 2)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0 RAZWI_INITIATOR_ID_X_Y(0, 3)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0 RAZWI_INITIATOR_ID_X_Y(9, 3)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1 RAZWI_INITIATOR_ID_X_Y(0, 4)
+#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1 RAZWI_INITIATOR_ID_X_Y(9, 4)
+#define RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2 RAZWI_INITIATOR_ID_X_Y(1, 5)
+#define RAZWI_INITIATOR_ID_X_Y_TPC5 RAZWI_INITIATOR_ID_X_Y(2, 5)
+#define RAZWI_INITIATOR_ID_X_Y_MME2_0 RAZWI_INITIATOR_ID_X_Y(3, 5)
+#define RAZWI_INITIATOR_ID_X_Y_MME2_1 RAZWI_INITIATOR_ID_X_Y(4, 5)
+#define RAZWI_INITIATOR_ID_X_Y_MME3_0 RAZWI_INITIATOR_ID_X_Y(5, 5)
+#define RAZWI_INITIATOR_ID_X_Y_MME3_1 RAZWI_INITIATOR_ID_X_Y(6, 5)
+#define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 5)
+#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 5)
+
+#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
+
+/* STLB_CACHE_INV */
+#define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0
+#define STLB_CACHE_INV_PRODUCER_INDEX_MASK 0xFF
+#define STLB_CACHE_INV_INDEX_MASK_SHIFT 8
+#define STLB_CACHE_INV_INDEX_MASK_MASK 0xFF00
+
+#define MME_ACC_ACC_STALL_R_SHIFT 0
+#define MME_SBAB_SB_STALL_R_SHIFT 0
+
+#define PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK 0x700
+#define PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK 0x7000
+
+#define PCIE_WRAP_LBW_DRAIN_CFG_EN_SHIFT 0
+#define PCIE_WRAP_HBW_DRAIN_CFG_EN_SHIFT 0
+
+/* DMA_IF_HBM_CRED_EN */
+#define DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT 0
+#define DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_MASK 0x1
+#define DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT 1
+#define DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_MASK 0x2
+
+#define DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT 0
+#define DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT 0
+#define DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT 0
+#define DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT 0
+
+#define IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT 0
+#define IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT 0
+
+#define IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT 0
+#define IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT 0
+
+/* MMU_UP_PAGE_ERROR_CAPTURE */
+#define MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF
+#define MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000
+
+/* MMU_UP_ACCESS_ERROR_CAPTURE */
+#define MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF
+#define MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000
+
+#define QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1
+#define QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2
+#define QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
+
+#define QM_ARB_ERR_MSG_EN_MASK (\
+ QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK |\
+ QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK |\
+ QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK)
+
+#endif /* GAUDI_MASKS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
new file mode 100644
index 000000000000..9a5800b0086b
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2017-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_PACKETS_H
+#define GAUDI_PACKETS_H
+
+#include <linux/types.h>
+
+#define PACKET_HEADER_PACKET_ID_SHIFT 56
+#define PACKET_HEADER_PACKET_ID_MASK 0x1F00000000000000ull
+
+enum packet_id {
+ PACKET_WREG_32 = 0x1,
+ PACKET_WREG_BULK = 0x2,
+ PACKET_MSG_LONG = 0x3,
+ PACKET_MSG_SHORT = 0x4,
+ PACKET_CP_DMA = 0x5,
+ PACKET_REPEAT = 0x6,
+ PACKET_MSG_PROT = 0x7,
+ PACKET_FENCE = 0x8,
+ PACKET_LIN_DMA = 0x9,
+ PACKET_NOP = 0xA,
+ PACKET_STOP = 0xB,
+ PACKET_ARB_POINT = 0xC,
+ PACKET_WAIT = 0xD,
+ PACKET_LOAD_AND_EXE = 0xF,
+ MAX_PACKET_ID = (PACKET_HEADER_PACKET_ID_MASK >>
+ PACKET_HEADER_PACKET_ID_SHIFT) + 1
+};
+
+#define GAUDI_PKT_CTL_OPCODE_SHIFT 24
+#define GAUDI_PKT_CTL_OPCODE_MASK 0x1F000000
+
+#define GAUDI_PKT_CTL_EB_SHIFT 29
+#define GAUDI_PKT_CTL_EB_MASK 0x20000000
+
+#define GAUDI_PKT_CTL_RB_SHIFT 30
+#define GAUDI_PKT_CTL_RB_MASK 0x40000000
+
+#define GAUDI_PKT_CTL_MB_SHIFT 31
+#define GAUDI_PKT_CTL_MB_MASK 0x80000000
+
+/* All packets have, at least, an 8-byte header, which contains
+ * the packet type. The kernel driver uses the packet header for packet
+ * validation and to perform any necessary required preparation before
+ * sending them off to the hardware.
+ */
+struct gaudi_packet {
+ __le64 header;
+ /* The rest of the packet data follows. Use the corresponding
+ * packet_XXX struct to deference the data, based on packet type
+ */
+ u8 contents[0];
+};
+
+struct packet_nop {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct packet_stop {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct packet_wreg32 {
+ __le32 value;
+ __le32 ctl;
+};
+
+struct packet_wreg_bulk {
+ __le32 size64;
+ __le32 ctl;
+ __le64 values[0]; /* data starts here */
+};
+
+struct packet_msg_long {
+ __le32 value;
+ __le32 ctl;
+ __le64 addr;
+};
+
+#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_SHIFT 0
+#define GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK 0x0000EFFF
+
+#define GAUDI_PKT_SHORT_VAL_SOB_MOD_SHIFT 31
+#define GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK 0x80000000
+
+#define GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_SHIFT 0
+#define GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK 0x000000FF
+
+#define GAUDI_PKT_SHORT_VAL_MON_MASK_SHIFT 8
+#define GAUDI_PKT_SHORT_VAL_MON_MASK_MASK 0x0000FF00
+
+#define GAUDI_PKT_SHORT_VAL_MON_MODE_SHIFT 16
+#define GAUDI_PKT_SHORT_VAL_MON_MODE_MASK 0x00010000
+
+#define GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_SHIFT 17
+#define GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK 0xFFFE0000
+
+#define GAUDI_PKT_SHORT_CTL_ADDR_SHIFT 0
+#define GAUDI_PKT_SHORT_CTL_ADDR_MASK 0x0000FFFF
+
+#define GAUDI_PKT_SHORT_CTL_OP_SHIFT 20
+#define GAUDI_PKT_SHORT_CTL_OP_MASK 0x00300000
+
+#define GAUDI_PKT_SHORT_CTL_BASE_SHIFT 22
+#define GAUDI_PKT_SHORT_CTL_BASE_MASK 0x00C00000
+
+#define GAUDI_PKT_SHORT_CTL_OPCODE_SHIFT 24
+#define GAUDI_PKT_SHORT_CTL_OPCODE_MASK 0x1F000000
+
+#define GAUDI_PKT_SHORT_CTL_EB_SHIFT 29
+#define GAUDI_PKT_SHORT_CTL_EB_MASK 0x20000000
+
+#define GAUDI_PKT_SHORT_CTL_RB_SHIFT 30
+#define GAUDI_PKT_SHORT_CTL_RB_MASK 0x40000000
+
+#define GAUDI_PKT_SHORT_CTL_MB_SHIFT 31
+#define GAUDI_PKT_SHORT_CTL_MB_MASK 0x80000000
+
+struct packet_msg_short {
+ __le32 value;
+ __le32 ctl;
+};
+
+struct packet_msg_prot {
+ __le32 value;
+ __le32 ctl;
+ __le64 addr;
+};
+
+#define GAUDI_PKT_FENCE_CFG_DEC_VAL_SHIFT 0
+#define GAUDI_PKT_FENCE_CFG_DEC_VAL_MASK 0x0000000F
+
+#define GAUDI_PKT_FENCE_CFG_TARGET_VAL_SHIFT 16
+#define GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK 0x00FF0000
+
+#define GAUDI_PKT_FENCE_CFG_ID_SHIFT 30
+#define GAUDI_PKT_FENCE_CFG_ID_MASK 0xC000000
+
+#define GAUDI_PKT_FENCE_CTL_PRED_SHIFT 0
+#define GAUDI_PKT_FENCE_CTL_PRED_MASK 0x0000001F
+
+#define GAUDI_PKT_FENCE_CTL_OPCODE_SHIFT 24
+#define GAUDI_PKT_FENCE_CTL_OPCODE_MASK 0x1F000000
+
+#define GAUDI_PKT_FENCE_CTL_EB_SHIFT 29
+#define GAUDI_PKT_FENCE_CTL_EB_MASK 0x20000000
+
+#define GAUDI_PKT_FENCE_CTL_RB_SHIFT 30
+#define GAUDI_PKT_FENCE_CTL_RB_MASK 0x40000000
+
+#define GAUDI_PKT_FENCE_CTL_MB_SHIFT 31
+#define GAUDI_PKT_FENCE_CTL_MB_MASK 0x80000000
+
+struct packet_fence {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+#define GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_SHIFT 0
+#define GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK 0x00000001
+
+#define GAUDI_PKT_LIN_DMA_CTL_LIN_SHIFT 3
+#define GAUDI_PKT_LIN_DMA_CTL_LIN_MASK 0x00000008
+
+#define GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT 4
+#define GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK 0x00000010
+
+#define GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT 0
+#define GAUDI_PKT_LIN_DMA_DST_ADDR_MASK 0x00FFFFFFFFFFFFFFull
+
+struct packet_lin_dma {
+ __le32 tsize;
+ __le32 ctl;
+ __le64 src_addr;
+ __le64 dst_addr;
+};
+
+struct packet_arb_point {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+struct packet_repeat {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+struct packet_wait {
+ __le32 cfg;
+ __le32 ctl;
+};
+
+struct packet_load_and_exe {
+ __le32 cfg;
+ __le32 ctl;
+ __le64 src_addr;
+};
+
+struct packet_cp_dma {
+ __le32 tsize;
+ __le32 ctl;
+ __le64 src_addr;
+};
+
+#endif /* GAUDI_PACKETS_H */
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
new file mode 100644
index 000000000000..f25c60a2c243
--- /dev/null
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2019-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef GAUDI_REG_MAP_H_
+#define GAUDI_REG_MAP_H_
+
+/*
+ * PSOC scratch-pad registers
+ */
+#define mmHW_STATE mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
+#define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23
+#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
+#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
+#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
+#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
+#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
+#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
+#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
+#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
+#define mmPREBOOT_PCIE_EN mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1
+#define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3
+
+#endif /* GAUDI_REG_MAP_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
index 3c44ef3a23ed..067489bd048e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -55,8 +55,7 @@
(1 << DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \
(1 << DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
(1 << DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
- (1 << DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
- (1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT))
+ (1 << DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT))
#define QMAN_MME_ENABLE (\
(1 << MME_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
index fce490e6a231..ce65c9da5c60 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -18,6 +18,7 @@
#include "psoc_mme_pll_regs.h"
#include "psoc_pci_pll_regs.h"
#include "psoc_emmc_pll_regs.h"
+#include "psoc_timestamp_regs.h"
#include "cpu_if_regs.h"
#include "cpu_ca53_cfg_regs.h"
#include "cpu_pll_regs.h"
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h
new file mode 100644
index 000000000000..9ce24597d4b0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_PSOC_TIMESTAMP_REGS_H_
+#define ASIC_REG_PSOC_TIMESTAMP_REGS_H_
+
+/*
+ *****************************************
+ * PSOC_TIMESTAMP (Prototype: TIMESTAMP)
+ *****************************************
+ */
+
+#define mmPSOC_TIMESTAMP_CNTCR 0xC49000
+
+#define mmPSOC_TIMESTAMP_CNTSR 0xC49004
+
+#define mmPSOC_TIMESTAMP_CNTCVL 0xC49008
+
+#define mmPSOC_TIMESTAMP_CNTCVU 0xC4900C
+
+#define mmPSOC_TIMESTAMP_CNTFID0 0xC49020
+
+#define mmPSOC_TIMESTAMP_PIDR4 0xC49FD0
+
+#define mmPSOC_TIMESTAMP_PIDR5 0xC49FD4
+
+#define mmPSOC_TIMESTAMP_PIDR6 0xC49FD8
+
+#define mmPSOC_TIMESTAMP_PIDR7 0xC49FDC
+
+#define mmPSOC_TIMESTAMP_PIDR0 0xC49FE0
+
+#define mmPSOC_TIMESTAMP_PIDR1 0xC49FE4
+
+#define mmPSOC_TIMESTAMP_PIDR2 0xC49FE8
+
+#define mmPSOC_TIMESTAMP_PIDR3 0xC49FEC
+
+#define mmPSOC_TIMESTAMP_CIDR0 0xC49FF0
+
+#define mmPSOC_TIMESTAMP_CIDR1 0xC49FF4
+
+#define mmPSOC_TIMESTAMP_CIDR2 0xC49FF8
+
+#define mmPSOC_TIMESTAMP_CIDR3 0xC49FFC
+
+#endif /* ASIC_REG_PSOC_TIMESTAMP_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
index 08061282cd9c..0195f62d7254 100644
--- a/drivers/misc/habanalabs/include/goya/goya_reg_map.h
+++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h
@@ -11,27 +11,30 @@
/*
* PSOC scratch-pad registers
*/
-#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
-#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
-#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
-#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
-#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
-#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
-#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
-#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
-#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
-#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
-#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
-#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
-#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
-#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
-#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
-#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
-#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
-#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
-#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
+#define mmCPU_PQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_0
+#define mmCPU_PQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_1
+#define mmCPU_EQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_2
+#define mmCPU_EQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_3
+#define mmCPU_EQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_4
+#define mmCPU_PQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_5
+#define mmCPU_EQ_CI mmPSOC_GLOBAL_CONF_SCRATCHPAD_6
+#define mmCPU_PQ_INIT_STATUS mmPSOC_GLOBAL_CONF_SCRATCHPAD_7
+#define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8
+#define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9
+#define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
+#define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23
+#define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24
+#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
+#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
+#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
+#define mmPREBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
+#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
+#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
+#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
-#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS
+#define mmHW_STATE mmPSOC_GLOBAL_CONF_APP_STATUS
#define mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS mmPSOC_GLOBAL_CONF_WARM_REBOOT
+#define mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU mmPSOC_GLOBAL_CONF_UBOOT_MAGIC
+#define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3
#endif /* GOYA_REG_MAP_H_ */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
index f7992a69fd3a..c22d134e73af 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2018 HabanaLabs, Ltd.
+ * Copyright 2018-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -9,8 +9,47 @@
#define HL_BOOT_IF_H
#define LKD_HARD_RESET_MAGIC 0xED7BD694
+#define HL_POWER9_HOST_MAGIC 0x1DA30009
-/* CPU error bits in BOOT_ERROR registers */
+#define BOOT_FIT_SRAM_OFFSET 0x200000
+
+/*
+ * CPU error bits in BOOT_ERROR registers
+ *
+ * CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed.
+ * DRAM is not reliable to use.
+ *
+ * CPU_BOOT_ERR0_FIT_CORRUPTED FIT data integrity verification of the
+ * image provided by the host has failed.
+ *
+ * CPU_BOOT_ERR0_TS_INIT_FAIL Thermal Sensor initialization failed.
+ * Boot continues as usual, but keep in
+ * mind this is a warning.
+ *
+ * CPU_BOOT_ERR0_DRAM_SKIPPED DRAM initialization has been skipped.
+ * Skipping DRAM initialization has been
+ * requested (e.g. strap, command, etc.)
+ * and FW skipped the DRAM initialization.
+ * Host can initialize the DRAM.
+ *
+ * CPU_BOOT_ERR0_BMC_WAIT_SKIPPED Waiting for BMC data will be skipped.
+ * Meaning the BMC data might not be
+ * available until reset.
+ *
+ * CPU_BOOT_ERR0_NIC_DATA_NOT_RDY NIC data from BMC is not ready.
+ * BMC has not provided the NIC data yet.
+ * Once provided this bit will be cleared.
+ *
+ * CPU_BOOT_ERR0_NIC_FW_FAIL NIC FW loading failed.
+ * The NIC FW loading and initialization
+ * failed. This means NICs are not usable.
+ *
+ * CPU_BOOT_ERR0_ENABLED Error registers enabled.
+ * This is a main indication that the
+ * running FW populates the error
+ * registers. Meaning the error bits are
+ * not garbage, but actual error statuses.
+ */
#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << 0)
#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << 1)
#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << 2)
@@ -27,22 +66,33 @@ enum cpu_boot_status {
CPU_BOOT_STATUS_SRAM_AVAIL = 3,
CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */
CPU_BOOT_STATUS_IN_PREBOOT = 5,
- CPU_BOOT_STATUS_IN_SPL = 6,
+ CPU_BOOT_STATUS_IN_SPL, /* deprecated - not reported */
CPU_BOOT_STATUS_IN_UBOOT = 7,
CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */
CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */
+ /* U-Boot console prompt activated, commands are not processed */
CPU_BOOT_STATUS_UBOOT_NOT_READY = 10,
+ /* Finished NICs init, reported after DRAM and NICs */
CPU_BOOT_STATUS_NIC_FW_RDY = 11,
CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */
CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */
CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
+ /* Last boot loader progress status, ready to receive commands */
CPU_BOOT_STATUS_READY_TO_BOOT = 15,
+ CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16,
};
enum kmd_msg {
KMD_MSG_NA = 0,
KMD_MSG_GOTO_WFE,
- KMD_MSG_FIT_RDY
+ KMD_MSG_FIT_RDY,
+ KMD_MSG_SKIP_BMC,
+};
+
+enum cpu_msg_status {
+ CPU_MSG_CLR = 0,
+ CPU_MSG_OK,
+ CPU_MSG_ERR,
};
#endif /* HL_BOOT_IF_H */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index a6851a9d3f03..468bb045fbd1 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2018 HabanaLabs, Ltd.
+ * Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
new file mode 100644
index 000000000000..b2a9570583ac
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef INCLUDE_MMU_V1_1_H_
+#define INCLUDE_MMU_V1_1_H_
+
+#define MMU_ASID 0xC12004
+#define MMU_HOP0_PA43_12 0xC12008
+#define MMU_HOP0_PA49_44 0xC1200C
+#define MMU_BUSY 0xC12000
+
+#endif /* INCLUDE_MMU_V1_1_H_ */
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index a72f766ca470..47da84a17719 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -886,6 +886,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
vm_type = (enum vm_type_t *) userptr;
hint_addr = args->map_host.hint_addr;
+ handle = phys_pg_pack->handle;
} else {
handle = lower_32_bits(args->map_device.handle);
@@ -954,10 +955,17 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto map_err;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
+ rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
mutex_unlock(&ctx->mmu_lock);
+ if (rc) {
+ dev_err(hdev->dev,
+ "mapping handle %u failed due to MMU cache invalidation\n",
+ handle);
+ goto map_err;
+ }
+
ret_vaddr += phys_pg_pack->offset;
hnode->ptr = vm_type;
@@ -1015,7 +1023,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
struct hl_va_range *va_range;
enum vm_type_t *vm_type;
bool is_userptr;
- int rc;
+ int rc = 0;
/* protect from double entrance */
mutex_lock(&ctx->mem_hash_lock);
@@ -1083,21 +1091,34 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
* at the loop end rather than for each iteration
*/
if (!ctx_free)
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, *vm_type);
+ rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
+ *vm_type);
mutex_unlock(&ctx->mmu_lock);
/*
- * No point in maintaining the free VA block list if the context is
- * closing as the list will be freed anyway
+ * If the context is closing we don't need to check for the MMU cache
+ * invalidation return code and update the VA free list as in this flow
+ * we invalidate the MMU cache outside of this unmap function and the VA
+ * free list will be freed anyway.
*/
if (!ctx_free) {
- rc = add_va_block(hdev, va_range, vaddr,
- vaddr + phys_pg_pack->total_size - 1);
+ int tmp_rc;
+
if (rc)
+ dev_err(hdev->dev,
+ "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
+ vaddr);
+
+ tmp_rc = add_va_block(hdev, va_range, vaddr,
+ vaddr + phys_pg_pack->total_size - 1);
+ if (tmp_rc) {
dev_warn(hdev->dev,
"add va block failed for vaddr: 0x%llx\n",
vaddr);
+ if (!rc)
+ rc = tmp_rc;
+ }
}
atomic_dec(&phys_pg_pack->mapping_cnt);
@@ -1108,7 +1129,7 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
dma_unmap_host_va(hdev, userptr);
}
- return 0;
+ return rc;
mapping_cnt_err:
if (is_userptr)
diff --git a/drivers/misc/habanalabs/pci.c b/drivers/misc/habanalabs/pci.c
index c98d88c7a5c6..9f634ef6f5b3 100644
--- a/drivers/misc/habanalabs/pci.c
+++ b/drivers/misc/habanalabs/pci.c
@@ -267,6 +267,12 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
/* Enable + Bar match + match enable */
rc |= hl_pci_iatu_write(hdev, 0x104, 0xC0080000);
+ /* Return the DBI window to the default location */
+ rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
+ rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+
+ hdev->asic_funcs->set_dma_mask_from_fw(hdev);
+
/* Point to DRAM */
if (!hdev->asic_funcs->set_dram_bar_base)
return -EINVAL;
@@ -274,7 +280,6 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
U64_MAX)
return -EIO;
-
/* Outbound Region 0 - Point to Host */
host_phys_end_addr = host_phys_base_address + host_phys_size - 1;
rc |= hl_pci_iatu_write(hdev, 0x008,
@@ -283,7 +288,12 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
upper_32_bits(host_phys_base_address));
rc |= hl_pci_iatu_write(hdev, 0x010, lower_32_bits(host_phys_end_addr));
rc |= hl_pci_iatu_write(hdev, 0x014, 0);
- rc |= hl_pci_iatu_write(hdev, 0x018, 0);
+
+ if ((hdev->power9_64bit_dma_enable) && (hdev->dma_mask == 64))
+ rc |= hl_pci_iatu_write(hdev, 0x018, 0x08000000);
+ else
+ rc |= hl_pci_iatu_write(hdev, 0x018, 0);
+
rc |= hl_pci_iatu_write(hdev, 0x020, upper_32_bits(host_phys_end_addr));
/* Increase region size */
rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
@@ -310,41 +320,25 @@ int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
*
* Return: 0 on success, non-zero for failure.
*/
-int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask)
+static int hl_pci_set_dma_mask(struct hl_device *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int rc;
/* set DMA mask */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
if (rc) {
- dev_warn(hdev->dev,
+ dev_err(hdev->dev,
"Failed to set pci dma mask to %d bits, error %d\n",
- dma_mask, rc);
-
- dma_mask = hdev->dma_mask;
-
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
- if (rc) {
- dev_err(hdev->dev,
- "Failed to set pci dma mask to %d bits, error %d\n",
- dma_mask, rc);
- return rc;
- }
+ hdev->dma_mask, rc);
+ return rc;
}
- /*
- * We managed to set the dma mask, so update the dma mask field. If
- * the set to the coherent mask will fail with that mask, we will
- * fail the entire function
- */
- hdev->dma_mask = dma_mask;
-
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
if (rc) {
dev_err(hdev->dev,
"Failed to set pci consistent dma mask to %d bits, error %d\n",
- dma_mask, rc);
+ hdev->dma_mask, rc);
return rc;
}
@@ -354,21 +348,16 @@ int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask)
/**
* hl_pci_init() - PCI initialization code.
* @hdev: Pointer to hl_device structure.
- * @dma_mask: number of bits for the requested dma mask.
*
* Set DMA masks, initialize the PCI controller and map the PCI BARs.
*
* Return: 0 on success, non-zero for failure.
*/
-int hl_pci_init(struct hl_device *hdev, u8 dma_mask)
+int hl_pci_init(struct hl_device *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int rc;
- rc = hl_pci_set_dma_mask(hdev, dma_mask);
- if (rc)
- return rc;
-
if (hdev->reset_pcilink)
hl_pci_reset_link_through_bridge(hdev);
@@ -380,18 +369,22 @@ int hl_pci_init(struct hl_device *hdev, u8 dma_mask)
pci_set_master(pdev);
- rc = hdev->asic_funcs->init_iatu(hdev);
+ rc = hdev->asic_funcs->pci_bars_map(hdev);
if (rc) {
- dev_err(hdev->dev, "Failed to initialize iATU\n");
+ dev_err(hdev->dev, "Failed to initialize PCI BARs\n");
goto disable_device;
}
- rc = hdev->asic_funcs->pci_bars_map(hdev);
+ rc = hdev->asic_funcs->init_iatu(hdev);
if (rc) {
- dev_err(hdev->dev, "Failed to initialize PCI BARs\n");
+ dev_err(hdev->dev, "Failed to initialize iATU\n");
goto disable_device;
}
+ rc = hl_pci_set_dma_mask(hdev);
+ if (rc)
+ goto disable_device;
+
return 0;
disable_device:
diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c
index 4cd622b017b9..5d78d5e1c782 100644
--- a/drivers/misc/habanalabs/sysfs.c
+++ b/drivers/misc/habanalabs/sysfs.c
@@ -183,6 +183,13 @@ static ssize_t soft_reset_store(struct device *dev,
goto out;
}
+ if (!hdev->supports_soft_reset) {
+ dev_err(hdev->dev, "Device does not support soft-reset\n");
+ goto out;
+ }
+
+ dev_warn(hdev->dev, "Soft-Reset requested through sysfs\n");
+
hl_device_reset(hdev, false, false);
out:
@@ -204,6 +211,8 @@ static ssize_t hard_reset_store(struct device *dev,
goto out;
}
+ dev_warn(hdev->dev, "Hard-Reset requested through sysfs\n");
+
hl_device_reset(hdev, true, false);
out:
@@ -220,6 +229,9 @@ static ssize_t device_type_show(struct device *dev,
case ASIC_GOYA:
str = "GOYA";
break;
+ case ASIC_GAUDI:
+ str = "GAUDI";
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -406,7 +418,10 @@ int hl_sysfs_init(struct hl_device *hdev)
{
int rc;
- hdev->pm_mng_profile = PM_AUTO;
+ if (hdev->asic_type == ASIC_GOYA)
+ hdev->pm_mng_profile = PM_AUTO;
+ else
+ hdev->pm_mng_profile = PM_MANUAL;
hdev->max_power = hdev->asic_prop.max_power_default;
hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group);
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 886459e0ddd9..736675f0a246 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -208,7 +208,7 @@ void lkdtm_OVERFLOW_UNSIGNED(void)
ignored = value;
}
-/* Intentially using old-style flex array definition of 1 byte. */
+/* Intentionally using old-style flex array definition of 1 byte. */
struct array_bounds_flex_array {
int one;
int two;
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 3bfe72c59864..8f201d019f5a 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -116,7 +116,7 @@ config MIC_COSM
config VOP
tristate "VOP Driver"
- depends on VOP_BUS && VHOST_DPN
+ depends on VOP_BUS
select VHOST_RING
select VIRTIO
help
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index fcd999f50d14..ea084626fe11 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -660,7 +660,7 @@ int scif_nodeqp_send(struct scif_dev *scifdev, struct scifmsg *msg)
struct device *spdev = NULL;
if (msg->uop > SCIF_EXIT_ACK) {
- /* Dont send messages once the exit flow has begun */
+ /* Don't send messages once the exit flow has begun */
if (OP_IDLE != scifdev->exit)
return -ENODEV;
spdev = scif_get_peer_dev(scifdev);
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 01e27682ea30..406cd5abfa72 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -113,14 +113,17 @@ static int scif_destroy_pinned_pages(struct scif_pinned_pages *pin)
int writeable = pin->prot & SCIF_PROT_WRITE;
int kernel = SCIF_MAP_KERNEL & pin->map_flags;
- for (j = 0; j < pin->nr_pages; j++) {
- if (pin->pages[j] && !kernel) {
- if (writeable)
- SetPageDirty(pin->pages[j]);
- put_page(pin->pages[j]);
+ if (kernel) {
+ for (j = 0; j < pin->nr_pages; j++) {
+ if (pin->pages[j] && !kernel) {
+ if (writeable)
+ set_page_dirty_lock(pin->pages[j]);
+ put_page(pin->pages[j]);
+ }
}
- }
-
+ } else
+ unpin_user_pages_dirty_lock(pin->pages, pin->nr_pages,
+ writeable);
scif_free(pin->pages,
pin->nr_pages * sizeof(*pin->pages));
scif_free(pin, sizeof(*pin));
@@ -1375,7 +1378,7 @@ retry:
}
}
- pinned_pages->nr_pages = get_user_pages_fast(
+ pinned_pages->nr_pages = pin_user_pages_fast(
(u64)addr,
nr_pages,
(prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
@@ -1385,11 +1388,8 @@ retry:
if (ulimit)
__scif_dec_pinned_vm_lock(mm, nr_pages);
/* Roll back any pinned pages */
- for (i = 0; i < pinned_pages->nr_pages; i++) {
- if (pinned_pages->pages[i])
- put_page(
- pinned_pages->pages[i]);
- }
+ unpin_user_pages(pinned_pages->pages,
+ pinned_pages->nr_pages);
prot &= ~SCIF_PROT_WRITE;
try_upgrade = false;
goto retry;
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index de8a66b9d76b..c21f65a5c762 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -287,7 +287,7 @@ void ocxl_context_free(struct ocxl_context *ctx)
ocxl_afu_irq_free_all(ctx);
idr_destroy(&ctx->irq_idr);
- /* reference to the AFU taken in ocxl_context_init */
+ /* reference to the AFU taken in ocxl_context_alloc() */
ocxl_afu_put(ctx->afu);
kfree(ctx);
}
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index ef5a1af6bab7..41c40971979e 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -73,6 +73,8 @@
#define is_am654_pci_dev(pdev) \
((pdev)->device == PCI_DEVICE_ID_TI_AM654)
+#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
+
static DEFINE_IDA(pci_endpoint_test_ida);
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
@@ -942,6 +944,8 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
.driver_data = (kernel_ulong_t)&am654_data
},
+ { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),
+ },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 4b713a80b572..b1521112dbbd 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -21,7 +21,6 @@
#include <linux/uaccess.h>
#include <linux/security.h>
#include <linux/prefetch.h>
-#include <asm/pgtable.h>
#include "gru.h"
#include "grutables.h"
#include "grulib.h"
@@ -43,7 +42,7 @@ static inline int is_gru_paddr(unsigned long paddr)
}
/*
- * Find the vma of a GRU segment. Caller must hold mmap_sem.
+ * Find the vma of a GRU segment. Caller must hold mmap_lock.
*/
struct vm_area_struct *gru_find_vma(unsigned long vaddr)
{
@@ -59,7 +58,7 @@ struct vm_area_struct *gru_find_vma(unsigned long vaddr)
* Find and lock the gts that contains the specified user vaddr.
*
* Returns:
- * - *gts with the mmap_sem locked for read and the GTS locked.
+ * - *gts with the mmap_lock locked for read and the GTS locked.
* - NULL if vaddr invalid OR is not a valid GSEG vaddr.
*/
@@ -69,14 +68,14 @@ static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
struct vm_area_struct *vma;
struct gru_thread_state *gts = NULL;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = gru_find_vma(vaddr);
if (vma)
gts = gru_find_thread_state(vma, TSID(vaddr, vma));
if (gts)
mutex_lock(&gts->ts_ctxlock);
else
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return gts;
}
@@ -86,7 +85,7 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
struct vm_area_struct *vma;
struct gru_thread_state *gts = ERR_PTR(-EINVAL);
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
vma = gru_find_vma(vaddr);
if (!vma)
goto err;
@@ -95,11 +94,11 @@ static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
if (IS_ERR(gts))
goto err;
mutex_lock(&gts->ts_ctxlock);
- downgrade_write(&mm->mmap_sem);
+ mmap_write_downgrade(mm);
return gts;
err:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return gts;
}
@@ -109,7 +108,7 @@ err:
static void gru_unlock_gts(struct gru_thread_state *gts)
{
mutex_unlock(&gts->ts_ctxlock);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
/*
@@ -199,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
* Only supports Intel large pages (2MB only) on x86_64.
* ZZZ - hugepage support is incomplete
*
- * NOTE: mmap_sem is already held on entry to this function. This
+ * NOTE: mmap_lock is already held on entry to this function. This
* guarantees existence of the page tables.
*/
static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
@@ -570,14 +569,14 @@ static irqreturn_t gru_intr(int chiplet, int blade)
}
/*
- * This is running in interrupt context. Trylock the mmap_sem.
+ * This is running in interrupt context. Trylock the mmap_lock.
* If it fails, retry the fault in user context.
*/
gts->ustats.fmm_tlbmiss++;
if (!gts->ts_force_cch_reload &&
- down_read_trylock(&gts->ts_mm->mmap_sem)) {
+ mmap_read_trylock(gts->ts_mm)) {
gru_try_dropin(gru, gts, tfh, NULL);
- up_read(&gts->ts_mm->mmap_sem);
+ mmap_read_unlock(gts->ts_mm);
} else {
tfh_user_polling_mode(tfh);
STAT(intr_mm_lock_failed);
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 9d042310214f..93bb49ddda1f 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -135,7 +135,7 @@ static int gru_create_new_context(unsigned long arg)
if (!(req.options & GRU_OPT_MISS_MASK))
req.options |= GRU_OPT_MISS_FMM_INTR;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
vma = gru_find_vma(req.gseg);
if (vma) {
vdata = vma->vm_private_data;
@@ -146,7 +146,7 @@ static int gru_create_new_context(unsigned long arg)
vdata->vd_tlb_preload_count = req.tlb_preload_count;
ret = 0;
}
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
return ret;
}
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 79a963105983..d5e097cd556d 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -59,16 +59,16 @@
/* define two XPC debug device structures to be used with dev_dbg() et al */
-struct device_driver xpc_dbg_name = {
+static struct device_driver xpc_dbg_name = {
.name = "xpc"
};
-struct device xpc_part_dbg_subname = {
+static struct device xpc_part_dbg_subname = {
.init_name = "", /* set to "part" at xpc_init() time */
.driver = &xpc_dbg_name
};
-struct device xpc_chan_dbg_subname = {
+static struct device xpc_chan_dbg_subname = {
.init_name = "", /* set to "chan" at xpc_init() time */
.driver = &xpc_dbg_name
};
@@ -1217,7 +1217,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
return NOTIFY_DONE;
}
-int __init
+static int __init
xpc_init(void)
{
int ret;
@@ -1319,7 +1319,7 @@ out_1:
module_init(xpc_init);
-void __exit
+static void __exit
xpc_exit(void)
{
xpc_do_exit(xpUnloading);
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index ada94e6a3c91..837d6c3fe69c 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -96,7 +96,7 @@ struct xpnet_pending_msg {
atomic_t use_count;
};
-struct net_device *xpnet_device;
+static struct net_device *xpnet_device;
/*
* When we are notified of other partitions activating, we add them to
@@ -131,16 +131,16 @@ static DEFINE_SPINLOCK(xpnet_broadcast_lock);
/* Define the XPNET debug device structures to be used with dev_dbg() et al */
-struct device_driver xpnet_dbg_name = {
+static struct device_driver xpnet_dbg_name = {
.name = "xpnet"
};
-struct device xpnet_dbg_subname = {
+static struct device xpnet_dbg_subname = {
.init_name = "", /* set to "" */
.driver = &xpnet_dbg_name
};
-struct device *xpnet = &xpnet_dbg_subname;
+static struct device *xpnet = &xpnet_dbg_subname;
/*
* Packet was recevied by XPC and forwarded to us.
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index d39307f060bd..107028e77ca3 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -90,109 +90,39 @@ static long uacce_fops_compat_ioctl(struct file *filep,
}
#endif
-static int uacce_sva_exit(struct device *dev, struct iommu_sva *handle,
- void *data)
+static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
{
- struct uacce_mm *uacce_mm = data;
- struct uacce_queue *q;
-
- /*
- * No new queue can be added concurrently because no caller can have a
- * reference to this mm. But there may be concurrent calls to
- * uacce_mm_put(), so we need the lock.
- */
- mutex_lock(&uacce_mm->lock);
- list_for_each_entry(q, &uacce_mm->queues, list)
- uacce_put_queue(q);
- uacce_mm->mm = NULL;
- mutex_unlock(&uacce_mm->lock);
+ int pasid;
+ struct iommu_sva *handle;
- return 0;
-}
-
-static struct iommu_sva_ops uacce_sva_ops = {
- .mm_exit = uacce_sva_exit,
-};
-
-static struct uacce_mm *uacce_mm_get(struct uacce_device *uacce,
- struct uacce_queue *q,
- struct mm_struct *mm)
-{
- struct uacce_mm *uacce_mm = NULL;
- struct iommu_sva *handle = NULL;
- int ret;
-
- lockdep_assert_held(&uacce->mm_lock);
-
- list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
- if (uacce_mm->mm == mm) {
- mutex_lock(&uacce_mm->lock);
- list_add(&q->list, &uacce_mm->queues);
- mutex_unlock(&uacce_mm->lock);
- return uacce_mm;
- }
- }
-
- uacce_mm = kzalloc(sizeof(*uacce_mm), GFP_KERNEL);
- if (!uacce_mm)
- return NULL;
+ if (!(uacce->flags & UACCE_DEV_SVA))
+ return 0;
- if (uacce->flags & UACCE_DEV_SVA) {
- /*
- * Safe to pass an incomplete uacce_mm, since mm_exit cannot
- * fire while we hold a reference to the mm.
- */
- handle = iommu_sva_bind_device(uacce->parent, mm, uacce_mm);
- if (IS_ERR(handle))
- goto err_free;
+ handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
- ret = iommu_sva_set_ops(handle, &uacce_sva_ops);
- if (ret)
- goto err_unbind;
-
- uacce_mm->pasid = iommu_sva_get_pasid(handle);
- if (uacce_mm->pasid == IOMMU_PASID_INVALID)
- goto err_unbind;
+ pasid = iommu_sva_get_pasid(handle);
+ if (pasid == IOMMU_PASID_INVALID) {
+ iommu_sva_unbind_device(handle);
+ return -ENODEV;
}
- uacce_mm->mm = mm;
- uacce_mm->handle = handle;
- INIT_LIST_HEAD(&uacce_mm->queues);
- mutex_init(&uacce_mm->lock);
- list_add(&q->list, &uacce_mm->queues);
- list_add(&uacce_mm->list, &uacce->mm_list);
-
- return uacce_mm;
-
-err_unbind:
- if (handle)
- iommu_sva_unbind_device(handle);
-err_free:
- kfree(uacce_mm);
- return NULL;
+ q->handle = handle;
+ q->pasid = pasid;
+ return 0;
}
-static void uacce_mm_put(struct uacce_queue *q)
+static void uacce_unbind_queue(struct uacce_queue *q)
{
- struct uacce_mm *uacce_mm = q->uacce_mm;
-
- lockdep_assert_held(&q->uacce->mm_lock);
-
- mutex_lock(&uacce_mm->lock);
- list_del(&q->list);
- mutex_unlock(&uacce_mm->lock);
-
- if (list_empty(&uacce_mm->queues)) {
- if (uacce_mm->handle)
- iommu_sva_unbind_device(uacce_mm->handle);
- list_del(&uacce_mm->list);
- kfree(uacce_mm);
- }
+ if (!q->handle)
+ return;
+ iommu_sva_unbind_device(q->handle);
+ q->handle = NULL;
}
static int uacce_fops_open(struct inode *inode, struct file *filep)
{
- struct uacce_mm *uacce_mm = NULL;
struct uacce_device *uacce;
struct uacce_queue *q;
int ret = 0;
@@ -205,21 +135,16 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
if (!q)
return -ENOMEM;
- mutex_lock(&uacce->mm_lock);
- uacce_mm = uacce_mm_get(uacce, q, current->mm);
- mutex_unlock(&uacce->mm_lock);
- if (!uacce_mm) {
- ret = -ENOMEM;
+ ret = uacce_bind_queue(uacce, q);
+ if (ret)
goto out_with_mem;
- }
q->uacce = uacce;
- q->uacce_mm = uacce_mm;
if (uacce->ops->get_queue) {
- ret = uacce->ops->get_queue(uacce, uacce_mm->pasid, q);
+ ret = uacce->ops->get_queue(uacce, q->pasid, q);
if (ret < 0)
- goto out_with_mm;
+ goto out_with_bond;
}
init_waitqueue_head(&q->wait);
@@ -227,12 +152,14 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
uacce->inode = inode;
q->state = UACCE_Q_INIT;
+ mutex_lock(&uacce->queues_lock);
+ list_add(&q->list, &uacce->queues);
+ mutex_unlock(&uacce->queues_lock);
+
return 0;
-out_with_mm:
- mutex_lock(&uacce->mm_lock);
- uacce_mm_put(q);
- mutex_unlock(&uacce->mm_lock);
+out_with_bond:
+ uacce_unbind_queue(q);
out_with_mem:
kfree(q);
return ret;
@@ -241,14 +168,12 @@ out_with_mem:
static int uacce_fops_release(struct inode *inode, struct file *filep)
{
struct uacce_queue *q = filep->private_data;
- struct uacce_device *uacce = q->uacce;
+ mutex_lock(&q->uacce->queues_lock);
+ list_del(&q->list);
+ mutex_unlock(&q->uacce->queues_lock);
uacce_put_queue(q);
-
- mutex_lock(&uacce->mm_lock);
- uacce_mm_put(q);
- mutex_unlock(&uacce->mm_lock);
-
+ uacce_unbind_queue(q);
kfree(q);
return 0;
@@ -513,8 +438,8 @@ struct uacce_device *uacce_alloc(struct device *parent,
if (ret < 0)
goto err_with_uacce;
- INIT_LIST_HEAD(&uacce->mm_list);
- mutex_init(&uacce->mm_lock);
+ INIT_LIST_HEAD(&uacce->queues);
+ mutex_init(&uacce->queues_lock);
device_initialize(&uacce->dev);
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
uacce->dev.class = uacce_class;
@@ -561,8 +486,7 @@ EXPORT_SYMBOL_GPL(uacce_register);
*/
void uacce_remove(struct uacce_device *uacce)
{
- struct uacce_mm *uacce_mm;
- struct uacce_queue *q;
+ struct uacce_queue *q, *next_q;
if (!uacce)
return;
@@ -574,24 +498,12 @@ void uacce_remove(struct uacce_device *uacce)
unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
/* ensure no open queue remains */
- mutex_lock(&uacce->mm_lock);
- list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
- /*
- * We don't take the uacce_mm->lock here. Since we hold the
- * device's mm_lock, no queue can be added to or removed from
- * this uacce_mm. We may run concurrently with mm_exit, but
- * uacce_put_queue() is serialized and iommu_sva_unbind_device()
- * waits for the lock that mm_exit is holding.
- */
- list_for_each_entry(q, &uacce_mm->queues, list)
- uacce_put_queue(q);
-
- if (uacce->flags & UACCE_DEV_SVA) {
- iommu_sva_unbind_device(uacce_mm->handle);
- uacce_mm->handle = NULL;
- }
+ mutex_lock(&uacce->queues_lock);
+ list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
+ uacce_put_queue(q);
+ uacce_unbind_queue(q);
}
- mutex_unlock(&uacce->mm_lock);
+ mutex_unlock(&uacce->queues_lock);
/* disable sva now since no opened queues */
if (uacce->flags & UACCE_DEV_SVA)
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
deleted file mode 100644
index a431787c0898..000000000000
--- a/drivers/misc/vexpress-syscfg.c
+++ /dev/null
@@ -1,280 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *
- * Copyright (C) 2014 ARM Limited
- */
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/sched/signal.h>
-#include <linux/slab.h>
-#include <linux/syscore_ops.h>
-#include <linux/vexpress.h>
-
-
-#define SYS_CFGDATA 0x0
-
-#define SYS_CFGCTRL 0x4
-#define SYS_CFGCTRL_START (1 << 31)
-#define SYS_CFGCTRL_WRITE (1 << 30)
-#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
-#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
-#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
-#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
-#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
-
-#define SYS_CFGSTAT 0x8
-#define SYS_CFGSTAT_ERR (1 << 1)
-#define SYS_CFGSTAT_COMPLETE (1 << 0)
-
-
-struct vexpress_syscfg {
- struct device *dev;
- void __iomem *base;
- struct list_head funcs;
-};
-
-struct vexpress_syscfg_func {
- struct list_head list;
- struct vexpress_syscfg *syscfg;
- struct regmap *regmap;
- int num_templates;
- u32 template[]; /* Keep it last! */
-};
-
-
-static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
- int index, bool write, u32 *data)
-{
- struct vexpress_syscfg *syscfg = func->syscfg;
- u32 command, status;
- int tries;
- long timeout;
-
- if (WARN_ON(index >= func->num_templates))
- return -EINVAL;
-
- command = readl(syscfg->base + SYS_CFGCTRL);
- if (WARN_ON(command & SYS_CFGCTRL_START))
- return -EBUSY;
-
- command = func->template[index];
- command |= SYS_CFGCTRL_START;
- command |= write ? SYS_CFGCTRL_WRITE : 0;
-
- /* Use a canary for reads */
- if (!write)
- *data = 0xdeadbeef;
-
- dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
- func, command, *data);
- writel(*data, syscfg->base + SYS_CFGDATA);
- writel(0, syscfg->base + SYS_CFGSTAT);
- writel(command, syscfg->base + SYS_CFGCTRL);
- mb();
-
- /* The operation can take ages... Go to sleep, 100us initially */
- tries = 100;
- timeout = 100;
- do {
- if (!irqs_disabled()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(usecs_to_jiffies(timeout));
- if (signal_pending(current))
- return -EINTR;
- } else {
- udelay(timeout);
- }
-
- status = readl(syscfg->base + SYS_CFGSTAT);
- if (status & SYS_CFGSTAT_ERR)
- return -EFAULT;
-
- if (timeout > 20)
- timeout -= 20;
- } while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
- if (WARN_ON_ONCE(!tries))
- return -ETIMEDOUT;
-
- if (!write) {
- *data = readl(syscfg->base + SYS_CFGDATA);
- dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
- }
-
- return 0;
-}
-
-static int vexpress_syscfg_read(void *context, unsigned int index,
- unsigned int *val)
-{
- struct vexpress_syscfg_func *func = context;
-
- return vexpress_syscfg_exec(func, index, false, val);
-}
-
-static int vexpress_syscfg_write(void *context, unsigned int index,
- unsigned int val)
-{
- struct vexpress_syscfg_func *func = context;
-
- return vexpress_syscfg_exec(func, index, true, &val);
-}
-
-static struct regmap_config vexpress_syscfg_regmap_config = {
- .lock = vexpress_config_lock,
- .unlock = vexpress_config_unlock,
- .reg_bits = 32,
- .val_bits = 32,
- .reg_read = vexpress_syscfg_read,
- .reg_write = vexpress_syscfg_write,
- .reg_format_endian = REGMAP_ENDIAN_LITTLE,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
-};
-
-
-static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
- void *context)
-{
- int err;
- struct vexpress_syscfg *syscfg = context;
- struct vexpress_syscfg_func *func;
- struct property *prop;
- const __be32 *val = NULL;
- __be32 energy_quirk[4];
- int num;
- u32 site, position, dcc;
- int i;
-
- err = vexpress_config_get_topo(dev->of_node, &site,
- &position, &dcc);
- if (err)
- return ERR_PTR(err);
-
- prop = of_find_property(dev->of_node,
- "arm,vexpress-sysreg,func", NULL);
- if (!prop)
- return ERR_PTR(-EINVAL);
-
- num = prop->length / sizeof(u32) / 2;
- val = prop->value;
-
- /*
- * "arm,vexpress-energy" function used to be described
- * by its first device only, now it requires both
- */
- if (num == 1 && of_device_is_compatible(dev->of_node,
- "arm,vexpress-energy")) {
- num = 2;
- energy_quirk[0] = *val;
- energy_quirk[2] = *val++;
- energy_quirk[1] = *val;
- energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
- val = energy_quirk;
- }
-
- func = kzalloc(struct_size(func, template, num), GFP_KERNEL);
- if (!func)
- return ERR_PTR(-ENOMEM);
-
- func->syscfg = syscfg;
- func->num_templates = num;
-
- for (i = 0; i < num; i++) {
- u32 function, device;
-
- function = be32_to_cpup(val++);
- device = be32_to_cpup(val++);
-
- dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
- func, site, position, dcc,
- function, device);
-
- func->template[i] = SYS_CFGCTRL_DCC(dcc);
- func->template[i] |= SYS_CFGCTRL_SITE(site);
- func->template[i] |= SYS_CFGCTRL_POSITION(position);
- func->template[i] |= SYS_CFGCTRL_FUNC(function);
- func->template[i] |= SYS_CFGCTRL_DEVICE(device);
- }
-
- vexpress_syscfg_regmap_config.max_register = num - 1;
-
- func->regmap = regmap_init(dev, NULL, func,
- &vexpress_syscfg_regmap_config);
-
- if (IS_ERR(func->regmap)) {
- void *err = func->regmap;
-
- kfree(func);
- return err;
- }
-
- list_add(&func->list, &syscfg->funcs);
-
- return func->regmap;
-}
-
-static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
-{
- struct vexpress_syscfg *syscfg = context;
- struct vexpress_syscfg_func *func, *tmp;
-
- regmap_exit(regmap);
-
- list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
- if (func->regmap == regmap) {
- list_del(&syscfg->funcs);
- kfree(func);
- break;
- }
- }
-}
-
-static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
- .regmap_init = vexpress_syscfg_regmap_init,
- .regmap_exit = vexpress_syscfg_regmap_exit,
-};
-
-
-static int vexpress_syscfg_probe(struct platform_device *pdev)
-{
- struct vexpress_syscfg *syscfg;
- struct resource *res;
- struct device *bridge;
-
- syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
- if (!syscfg)
- return -ENOMEM;
- syscfg->dev = &pdev->dev;
- INIT_LIST_HEAD(&syscfg->funcs);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- syscfg->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(syscfg->base))
- return PTR_ERR(syscfg->base);
-
- /* Must use dev.parent (MFD), as that's where DT phandle points at... */
- bridge = vexpress_config_bridge_register(pdev->dev.parent,
- &vexpress_syscfg_bridge_ops, syscfg);
-
- return PTR_ERR_OR_ZERO(bridge);
-}
-
-static const struct platform_device_id vexpress_syscfg_id_table[] = {
- { "vexpress-syscfg", },
- {},
-};
-
-static struct platform_driver vexpress_syscfg_driver = {
- .driver.name = "vexpress-syscfg",
- .id_table = vexpress_syscfg_id_table,
- .probe = vexpress_syscfg_probe,
-};
-
-static int __init vexpress_syscfg_init(void)
-{
- return platform_driver_register(&vexpress_syscfg_driver);
-}
-core_initcall(vexpress_syscfg_init);
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 71bbaa56bdb5..92291292756a 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -602,10 +602,10 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
const u32 depth)
{
u32 reg = 0;
- u32 res;
- u32 n, i;
+ int res, i, nr_pages;
+ u32 n;
u32 *addr = NULL;
- struct page *page[MAX_NUM_PAGES];
+ struct page *pages[MAX_NUM_PAGES];
/*
* Writes that go beyond the length of
@@ -622,15 +622,21 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
n += 1;
- res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page);
- if (res < n) {
- for (i = 0; i < res; i++)
- put_page(page[i]);
+ if (WARN_ON_ONCE(n > INT_MAX))
+ return -EINVAL;
+
+ nr_pages = n;
+
+ res = pin_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages);
+ if (res < nr_pages) {
+ if (res > 0)
+ unpin_user_pages(pages, res);
+
return -EINVAL;
}
- for (i = 0; i < n; i++) {
- addr = kmap(page[i]);
+ for (i = 0; i < nr_pages; i++) {
+ addr = kmap(pages[i]);
do {
xsdfec_regwrite(xsdfec,
base_addr + ((offset + reg) *
@@ -639,9 +645,9 @@ static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
reg++;
} while ((reg < len) &&
((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
- put_page(page[i]);
+ unpin_user_page(pages[i]);
}
- return reg;
+ return 0;
}
static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
@@ -649,14 +655,9 @@ static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
struct xsdfec_ldpc_params *ldpc;
int ret, n;
- ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL);
- if (!ldpc)
- return -ENOMEM;
-
- if (copy_from_user(ldpc, arg, sizeof(*ldpc))) {
- ret = -EFAULT;
- goto err_out;
- }
+ ldpc = memdup_user(arg, sizeof(*ldpc));
+ if (IS_ERR(ldpc))
+ return PTR_ERR(ldpc);
if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
ret = -EIO;
@@ -720,8 +721,6 @@ static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE,
XSDFEC_QC_TABLE_DEPTH);
- if (ret > 0)
- ret = 0;
err_out:
kfree(ldpc);
return ret;
@@ -1484,25 +1483,7 @@ static struct platform_driver xsdfec_driver = {
.remove = xsdfec_remove,
};
-static int __init xsdfec_init(void)
-{
- int err;
-
- err = platform_driver_register(&xsdfec_driver);
- if (err < 0) {
- pr_err("%s Unabled to register SDFEC driver", __func__);
- return err;
- }
- return 0;
-}
-
-static void __exit xsdfec_exit(void)
-{
- platform_driver_unregister(&xsdfec_driver);
-}
-
-module_init(xsdfec_init);
-module_exit(xsdfec_exit);
+module_platform_driver(xsdfec_driver);
MODULE_AUTHOR("Xilinx, Inc");
MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 0ce332ad986b..eb85237bf2d6 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -436,7 +436,7 @@ config MMC_MESON_MX_SDIO
tristate "Amlogic Meson6/Meson8/Meson8b SD/MMC Host Controller support"
depends on ARCH_MESON || COMPILE_TEST
depends on COMMON_CLK
- depends on OF
+ depends on OF_ADDRESS
help
This selects support for the SD/MMC Host Controller on
Amlogic Meson6, Meson8 and Meson8b SoCs.
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 2a4c8a2f3e64..db9b544465cd 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -123,10 +123,6 @@ struct sdhci_arasan_clk_data {
void *clk_of_data;
};
-struct sdhci_arasan_zynqmp_clk_data {
- const struct zynqmp_eemi_ops *eemi_ops;
-};
-
/**
* struct sdhci_arasan_data - Arasan Controller Data
*
@@ -599,9 +595,6 @@ static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
struct sdhci_arasan_data *sdhci_arasan =
container_of(clk_data, struct sdhci_arasan_data, clk_data);
struct sdhci_host *host = sdhci_arasan->host;
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
- clk_data->clk_of_data;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
const char *clk_name = clk_hw_get_name(hw);
u32 node_id = !strcmp(clk_name, "clk_out_sd0") ? NODE_SD_0 : NODE_SD_1;
u8 tap_delay, tap_max = 0;
@@ -641,8 +634,7 @@ static int sdhci_zynqmp_sdcardclk_set_phase(struct clk_hw *hw, int degrees)
tap_delay = (degrees * tap_max) / 360;
/* Set the Clock Phase */
- ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
- PM_TAPDELAY_OUTPUT, tap_delay, NULL);
+ ret = zynqmp_pm_set_sd_tapdelay(node_id, PM_TAPDELAY_OUTPUT, tap_delay);
if (ret)
pr_err("Error setting Output Tap Delay\n");
@@ -671,9 +663,6 @@ static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
struct sdhci_arasan_data *sdhci_arasan =
container_of(clk_data, struct sdhci_arasan_data, clk_data);
struct sdhci_host *host = sdhci_arasan->host;
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
- clk_data->clk_of_data;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
const char *clk_name = clk_hw_get_name(hw);
u32 node_id = !strcmp(clk_name, "clk_in_sd0") ? NODE_SD_0 : NODE_SD_1;
u8 tap_delay, tap_max = 0;
@@ -713,8 +702,7 @@ static int sdhci_zynqmp_sampleclk_set_phase(struct clk_hw *hw, int degrees)
tap_delay = (degrees * tap_max) / 360;
/* Set the Clock Phase */
- ret = eemi_ops->ioctl(node_id, IOCTL_SET_SD_TAPDELAY,
- PM_TAPDELAY_INPUT, tap_delay, NULL);
+ ret = zynqmp_pm_set_sd_tapdelay(node_id, PM_TAPDELAY_INPUT, tap_delay);
if (ret)
pr_err("Error setting Input Tap Delay\n");
@@ -874,11 +862,6 @@ static const struct clk_ops versal_sampleclk_ops = {
static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data =
- sdhci_arasan->clk_data.clk_of_data;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_clk_data->eemi_ops;
u16 clk;
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
@@ -886,8 +869,7 @@ static void arasan_zynqmp_dll_reset(struct sdhci_host *host, u32 deviceid)
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Issue DLL Reset */
- eemi_ops->ioctl(deviceid, IOCTL_SD_DLL_RESET,
- PM_DLL_RESET_PULSE, 0, NULL);
+ zynqmp_pm_sd_dll_reset(deviceid, PM_DLL_RESET_PULSE);
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
@@ -1617,20 +1599,6 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto clk_disable_all;
if (of_device_is_compatible(np, "xlnx,zynqmp-8.9a")) {
- struct sdhci_arasan_zynqmp_clk_data *zynqmp_clk_data;
- const struct zynqmp_eemi_ops *eemi_ops;
-
- zynqmp_clk_data = devm_kzalloc(&pdev->dev,
- sizeof(*zynqmp_clk_data),
- GFP_KERNEL);
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops)) {
- ret = PTR_ERR(eemi_ops);
- goto unreg_clk;
- }
-
- zynqmp_clk_data->eemi_ops = eemi_ops;
- sdhci_arasan->clk_data.clk_of_data = zynqmp_clk_data;
host->mmc_host_ops.execute_tuning =
arasan_zynqmp_execute_tuning;
}
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 142c0f9485fe..42001c49833b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -420,8 +420,9 @@ read_pri_intelext(struct map_info *map, __u16 adr)
extra_size = 0;
/* Protection Register info */
- extra_size += (extp->NumProtectionFields - 1) *
- sizeof(struct cfi_intelext_otpinfo);
+ if (extp->NumProtectionFields)
+ extra_size += (extp->NumProtectionFields - 1) *
+ sizeof(struct cfi_intelext_otpinfo);
}
if (extp->MinorVersion >= '1') {
@@ -695,14 +696,16 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
*/
if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
&& extp->FeatureSupport & (1 << 9)) {
+ int offs = 0;
struct cfi_private *newcfi;
struct flchip *chip;
struct flchip_shared *shared;
- int offs, numregions, numparts, partshift, numvirtchips, i, j;
+ int numregions, numparts, partshift, numvirtchips, i, j;
/* Protection Register info */
- offs = (extp->NumProtectionFields - 1) *
- sizeof(struct cfi_intelext_otpinfo);
+ if (extp->NumProtectionFields)
+ offs = (extp->NumProtectionFields - 1) *
+ sizeof(struct cfi_intelext_otpinfo);
/* Burst Read info */
offs += extp->extra[offs+1]+2;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index eb0f4600efd1..a030792115bc 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -647,7 +647,7 @@ static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
ecc[i] = bitrev8(hwecc[i]);
- numerrs = decode_bch(docg3->cascade->bch, NULL,
+ numerrs = bch_decode(docg3->cascade->bch, NULL,
DOC_ECC_BCH_COVERED_BYTES,
NULL, ecc, NULL, errorpos);
BUG_ON(numerrs == -EINVAL);
@@ -1984,8 +1984,8 @@ static int __init docg3_probe(struct platform_device *pdev)
return ret;
cascade->base = base;
mutex_init(&cascade->lock);
- cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
- DOC_ECC_BCH_PRIMPOLY);
+ cascade->bch = bch_init(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+ DOC_ECC_BCH_PRIMPOLY, false);
if (!cascade->bch)
return ret;
@@ -2021,7 +2021,7 @@ notfound:
ret = -ENODEV;
dev_info(dev, "No supported DiskOnChip found\n");
err_probe:
- free_bch(cascade->bch);
+ bch_free(cascade->bch);
for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
if (cascade->floors[floor])
doc_release_device(cascade->floors[floor]);
@@ -2045,7 +2045,7 @@ static int docg3_release(struct platform_device *pdev)
if (cascade->floors[floor])
doc_release_device(cascade->floors[floor]);
- free_bch(docg3->cascade->bch);
+ bch_free(docg3->cascade->bch);
return 0;
}
diff --git a/drivers/mtd/maps/physmap-gemini.c b/drivers/mtd/maps/physmap-gemini.c
index a289c8b5cabf..d4a46e159d38 100644
--- a/drivers/mtd/maps/physmap-gemini.c
+++ b/drivers/mtd/maps/physmap-gemini.c
@@ -46,11 +46,6 @@
#define FLASH_PARALLEL_HIGH_PIN_CNT (1 << 20) /* else low pin cnt */
-static const struct of_device_id syscon_match[] = {
- { .compatible = "cortina,gemini-syscon" },
- { },
-};
-
struct gemini_flash {
struct device *dev;
struct pinctrl *p;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 078e0f67377d..32e52d83b961 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -89,8 +89,6 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
ret = erase_write (mtd, mtdblk->cache_offset,
mtdblk->cache_size, mtdblk->cache_data);
- if (ret)
- return ret;
/*
* Here we could arguably set the cache state to STATE_CLEAN.
@@ -98,9 +96,14 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
* be notified if this content is altered on the flash by other
* means. Let's declare it empty and leave buffering tasks to
* the buffer cache instead.
+ *
+ * If this cache_offset points to a bad block, data cannot be
+ * written to the device. Clear cache_state to avoid writing to
+ * bad blocks repeatedly.
*/
- mtdblk->cache_state = STATE_EMPTY;
- return 0;
+ if (ret == 0 || ret == -EIO)
+ mtdblk->cache_state = STATE_EMPTY;
+ return ret;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b47691e1b81c..76d832a88e0c 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -617,6 +617,19 @@ int add_mtd_device(struct mtd_info *mtd)
!(mtd->flags & MTD_NO_ERASE)))
return -EINVAL;
+ /*
+ * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
+ * master is an MLC NAND and has a proper pairing scheme defined.
+ * We also reject masters that implement ->_writev() for now, because
+ * NAND controller drivers don't implement this hook, and adding the
+ * SLC -> MLC address/length conversion to this path is useless if we
+ * don't have a user.
+ */
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
+ (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
+ !master->pairing || master->_writev))
+ return -EINVAL;
+
mutex_lock(&mtd_table_mutex);
i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
@@ -632,6 +645,14 @@ int add_mtd_device(struct mtd_info *mtd)
if (mtd->bitflip_threshold == 0)
mtd->bitflip_threshold = mtd->ecc_strength;
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ int ngroups = mtd_pairing_groups(master);
+
+ mtd->erasesize /= ngroups;
+ mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
+ mtd->erasesize;
+ }
+
if (is_power_of_2(mtd->erasesize))
mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
else
@@ -1074,9 +1095,11 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_info *master = mtd_get_master(mtd);
u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
+ struct erase_info adjinstr;
int ret;
instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ adjinstr = *instr;
if (!mtd->erasesize || !master->_erase)
return -ENOTSUPP;
@@ -1091,12 +1114,27 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
ledtrig_mtd_activity();
- instr->addr += mst_ofs;
- ret = master->_erase(master, instr);
- if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
- instr->fail_addr -= mst_ofs;
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
+ master->erasesize;
+ adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
+ master->erasesize) -
+ adjinstr.addr;
+ }
+
+ adjinstr.addr += mst_ofs;
+
+ ret = master->_erase(master, &adjinstr);
+
+ if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
+ instr->fail_addr = adjinstr.fail_addr - mst_ofs;
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
+ master);
+ instr->fail_addr *= mtd->erasesize;
+ }
+ }
- instr->addr -= mst_ofs;
return ret;
}
EXPORT_SYMBOL_GPL(mtd_erase);
@@ -1276,6 +1314,101 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
return 0;
}
+static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ from = mtd_get_master_ofs(mtd, from);
+ if (master->_read_oob)
+ ret = master->_read_oob(master, from, ops);
+ else
+ ret = master->_read(master, from, ops->len, &ops->retlen,
+ ops->datbuf);
+
+ return ret;
+}
+
+static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ret;
+
+ to = mtd_get_master_ofs(mtd, to);
+ if (master->_write_oob)
+ ret = master->_write_oob(master, to, ops);
+ else
+ ret = master->_write(master, to, ops->len, &ops->retlen,
+ ops->datbuf);
+
+ return ret;
+}
+
+static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *master = mtd_get_master(mtd);
+ int ngroups = mtd_pairing_groups(master);
+ int npairs = mtd_wunit_per_eb(master) / ngroups;
+ struct mtd_oob_ops adjops = *ops;
+ unsigned int wunit, oobavail;
+ struct mtd_pairing_info info;
+ int max_bitflips = 0;
+ u32 ebofs, pageofs;
+ loff_t base, pos;
+
+ ebofs = mtd_mod_by_eb(start, mtd);
+ base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
+ info.group = 0;
+ info.pair = mtd_div_by_ws(ebofs, mtd);
+ pageofs = mtd_mod_by_ws(ebofs, mtd);
+ oobavail = mtd_oobavail(mtd, ops);
+
+ while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
+ int ret;
+
+ if (info.pair >= npairs) {
+ info.pair = 0;
+ base += master->erasesize;
+ }
+
+ wunit = mtd_pairing_info_to_wunit(master, &info);
+ pos = mtd_wunit_to_offset(mtd, base, wunit);
+
+ adjops.len = ops->len - ops->retlen;
+ if (adjops.len > mtd->writesize - pageofs)
+ adjops.len = mtd->writesize - pageofs;
+
+ adjops.ooblen = ops->ooblen - ops->oobretlen;
+ if (adjops.ooblen > oobavail - adjops.ooboffs)
+ adjops.ooblen = oobavail - adjops.ooboffs;
+
+ if (read) {
+ ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
+ if (ret > 0)
+ max_bitflips = max(max_bitflips, ret);
+ } else {
+ ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ max_bitflips = max(max_bitflips, ret);
+ ops->retlen += adjops.retlen;
+ ops->oobretlen += adjops.oobretlen;
+ adjops.datbuf += adjops.retlen;
+ adjops.oobbuf += adjops.oobretlen;
+ adjops.ooboffs = 0;
+ pageofs = 0;
+ info.pair++;
+ }
+
+ return max_bitflips;
+}
+
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
struct mtd_info *master = mtd_get_master(mtd);
@@ -1294,12 +1427,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (!master->_read_oob && (!master->_read || ops->oobbuf))
return -EOPNOTSUPP;
- from = mtd_get_master_ofs(mtd, from);
- if (master->_read_oob)
- ret_code = master->_read_oob(master, from, ops);
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+ ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
else
- ret_code = master->_read(master, from, ops->len, &ops->retlen,
- ops->datbuf);
+ ret_code = mtd_read_oob_std(mtd, from, ops);
mtd_update_ecc_stats(mtd, master, &old_stats);
@@ -1338,13 +1469,10 @@ int mtd_write_oob(struct mtd_info *mtd, loff_t to,
if (!master->_write_oob && (!master->_write || ops->oobbuf))
return -EOPNOTSUPP;
- to = mtd_get_master_ofs(mtd, to);
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+ return mtd_io_emulated_slc(mtd, to, false, ops);
- if (master->_write_oob)
- return master->_write_oob(master, to, ops);
- else
- return master->_write(master, to, ops->len, &ops->retlen,
- ops->datbuf);
+ return mtd_write_oob_std(mtd, to, ops);
}
EXPORT_SYMBOL_GPL(mtd_write_oob);
@@ -1672,7 +1800,7 @@ EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
* @start: first ECC byte to set
* @nbytes: number of ECC bytes to set
*
- * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
+ * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
*
* Returns zero on success, a negative error code otherwise.
*/
@@ -1817,6 +1945,12 @@ int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return -EINVAL;
if (!len)
return 0;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+ len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+ }
+
return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_lock);
@@ -1831,6 +1965,12 @@ int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return -EINVAL;
if (!len)
return 0;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+ len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+ }
+
return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_unlock);
@@ -1845,6 +1985,12 @@ int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return -EINVAL;
if (!len)
return 0;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+ len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
+ }
+
return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
}
EXPORT_SYMBOL_GPL(mtd_is_locked);
@@ -1857,6 +2003,10 @@ int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
return -EINVAL;
if (!master->_block_isreserved)
return 0;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
}
EXPORT_SYMBOL_GPL(mtd_block_isreserved);
@@ -1869,6 +2019,10 @@ int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
return -EINVAL;
if (!master->_block_isbad)
return 0;
+
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
}
EXPORT_SYMBOL_GPL(mtd_block_isbad);
@@ -1885,6 +2039,9 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
+ if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
+ ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
+
ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
if (ret)
return ret;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 3f6025684f58..c3575b686f79 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -35,9 +35,12 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
const struct mtd_partition *part,
int partno, uint64_t cur_offset)
{
- int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize :
- parent->erasesize;
- struct mtd_info *child, *master = mtd_get_master(parent);
+ struct mtd_info *master = mtd_get_master(parent);
+ int wr_alignment = (parent->flags & MTD_NO_ERASE) ?
+ master->writesize : master->erasesize;
+ u64 parent_size = mtd_is_partition(parent) ?
+ parent->part.size : parent->size;
+ struct mtd_info *child;
u32 remainder;
char *name;
u64 tmp;
@@ -56,8 +59,9 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
/* set up the MTD object for this partition */
child->type = parent->type;
child->part.flags = parent->flags & ~part->mask_flags;
+ child->part.flags |= part->add_flags;
child->flags = child->part.flags;
- child->size = part->size;
+ child->part.size = part->size;
child->writesize = parent->writesize;
child->writebufsize = parent->writebufsize;
child->oobsize = parent->oobsize;
@@ -98,29 +102,29 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
}
if (child->part.offset == MTDPART_OFS_RETAIN) {
child->part.offset = cur_offset;
- if (parent->size - child->part.offset >= child->size) {
- child->size = parent->size - child->part.offset -
- child->size;
+ if (parent_size - child->part.offset >= child->part.size) {
+ child->part.size = parent_size - child->part.offset -
+ child->part.size;
} else {
printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
- part->name, parent->size - child->part.offset,
- child->size);
+ part->name, parent_size - child->part.offset,
+ child->part.size);
/* register to preserve ordering */
goto out_register;
}
}
- if (child->size == MTDPART_SIZ_FULL)
- child->size = parent->size - child->part.offset;
+ if (child->part.size == MTDPART_SIZ_FULL)
+ child->part.size = parent_size - child->part.offset;
printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n",
- child->part.offset, child->part.offset + child->size,
+ child->part.offset, child->part.offset + child->part.size,
child->name);
/* let's do some sanity checks */
- if (child->part.offset >= parent->size) {
+ if (child->part.offset >= parent_size) {
/* let's register it anyway to preserve ordering */
child->part.offset = 0;
- child->size = 0;
+ child->part.size = 0;
/* Initialize ->erasesize to make add_mtd_device() happy. */
child->erasesize = parent->erasesize;
@@ -128,15 +132,16 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
part->name);
goto out_register;
}
- if (child->part.offset + child->size > parent->size) {
- child->size = parent->size - child->part.offset;
+ if (child->part.offset + child->part.size > parent->size) {
+ child->part.size = parent_size - child->part.offset;
printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
- part->name, parent->name, child->size);
+ part->name, parent->name, child->part.size);
}
+
if (parent->numeraseregions > 1) {
/* Deal with variable erase size stuff */
int i, max = parent->numeraseregions;
- u64 end = child->part.offset + child->size;
+ u64 end = child->part.offset + child->part.size;
struct mtd_erase_region_info *regions = parent->eraseregions;
/* Find the first erase regions which is part of this
@@ -156,7 +161,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
BUG_ON(child->erasesize == 0);
} else {
/* Single erase size */
- child->erasesize = parent->erasesize;
+ child->erasesize = master->erasesize;
}
/*
@@ -178,7 +183,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
part->name);
}
- tmp = mtd_get_master_ofs(child, 0) + child->size;
+ tmp = mtd_get_master_ofs(child, 0) + child->part.size;
remainder = do_div(tmp, wr_alignment);
if ((child->flags & MTD_WRITEABLE) && remainder) {
child->flags &= ~MTD_WRITEABLE;
@@ -186,6 +191,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
part->name);
}
+ child->size = child->part.size;
child->ecc_step_size = parent->ecc_step_size;
child->ecc_strength = parent->ecc_strength;
child->bitflip_threshold = parent->bitflip_threshold;
@@ -193,7 +199,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent,
if (master->_block_isbad) {
uint64_t offs = 0;
- while (offs < child->size) {
+ while (offs < child->part.size) {
if (mtd_block_isreserved(child, offs))
child->ecc_stats.bbtblocks++;
else if (mtd_block_isbad(child, offs))
@@ -234,6 +240,8 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
long long offset, long long length)
{
struct mtd_info *master = mtd_get_master(parent);
+ u64 parent_size = mtd_is_partition(parent) ?
+ parent->part.size : parent->size;
struct mtd_partition part;
struct mtd_info *child;
int ret = 0;
@@ -244,7 +252,7 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
return -EINVAL;
if (length == MTDPART_SIZ_FULL)
- length = parent->size - offset;
+ length = parent_size - offset;
if (length <= 0)
return -EINVAL;
@@ -419,7 +427,7 @@ int add_mtd_partitions(struct mtd_info *parent,
/* Look for subpartitions */
parse_mtd_partitions(child, parts[i].types, NULL);
- cur_offset = child->part.offset + child->size;
+ cur_offset = child->part.offset + child->part.size;
}
return 0;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index a80a46bb5b8b..113f61052269 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -213,10 +213,6 @@ config MTD_NAND_MLC_LPC32XX
Please check the actual NAND chip connected and its support
by the MLC NAND controller.
-config MTD_NAND_CM_X270
- tristate "CM-X270 modules NAND controller"
- depends on MACH_ARMCORE
-
config MTD_NAND_PASEMI
tristate "PA Semi PWRficient NAND controller"
depends on PPC_PASEMI
@@ -457,6 +453,14 @@ config MTD_NAND_CADENCE
Enable the driver for NAND flash on platforms using a Cadence NAND
controller.
+config MTD_NAND_ARASAN
+ tristate "Support for Arasan NAND flash controller"
+ depends on HAS_IOMEM && HAS_DMA
+ select BCH
+ help
+ Enables the driver for the Arasan NAND flash controller on
+ Zynq Ultrascale+ MPSoC.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 2d136b158fb7..2930f5b9015d 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
omap2_nand-objs := omap2.o
obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
-obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
@@ -58,6 +57,7 @@ obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o
obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o
obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o
obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o
+obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index d66dab25df20..3711e7a0436c 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -387,12 +387,15 @@ static int gpio_nand_remove(struct platform_device *pdev)
{
struct gpio_nand *priv = platform_get_drvdata(pdev);
struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
+ int ret;
/* Apply write protection */
gpiod_set_value(priv->gpiod_nwp, 1);
/* Unregister device */
- nand_release(mtd_to_nand(mtd));
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(mtd_to_nand(mtd));
return 0;
}
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
new file mode 100644
index 000000000000..7141dcccba3c
--- /dev/null
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -0,0 +1,1297 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arasan NAND Flash Controller Driver
+ *
+ * Copyright (C) 2014 - 2020 Xilinx, Inc.
+ * Author:
+ * Miquel Raynal <miquel.raynal@bootlin.com>
+ * Original work (fully rewritten):
+ * Punnaiah Choudary Kalluri <punnaia@xilinx.com>
+ * Naga Sureshkumar Relli <nagasure@xilinx.com>
+ */
+
+#include <linux/bch.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define PKT_REG 0x00
+#define PKT_SIZE(x) FIELD_PREP(GENMASK(10, 0), (x))
+#define PKT_STEPS(x) FIELD_PREP(GENMASK(23, 12), (x))
+
+#define MEM_ADDR1_REG 0x04
+
+#define MEM_ADDR2_REG 0x08
+#define ADDR2_STRENGTH(x) FIELD_PREP(GENMASK(27, 25), (x))
+#define ADDR2_CS(x) FIELD_PREP(GENMASK(31, 30), (x))
+
+#define CMD_REG 0x0C
+#define CMD_1(x) FIELD_PREP(GENMASK(7, 0), (x))
+#define CMD_2(x) FIELD_PREP(GENMASK(15, 8), (x))
+#define CMD_PAGE_SIZE(x) FIELD_PREP(GENMASK(25, 23), (x))
+#define CMD_DMA_ENABLE BIT(27)
+#define CMD_NADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
+#define CMD_ECC_ENABLE BIT(31)
+
+#define PROG_REG 0x10
+#define PROG_PGRD BIT(0)
+#define PROG_ERASE BIT(2)
+#define PROG_STATUS BIT(3)
+#define PROG_PGPROG BIT(4)
+#define PROG_RDID BIT(6)
+#define PROG_RDPARAM BIT(7)
+#define PROG_RST BIT(8)
+#define PROG_GET_FEATURE BIT(9)
+#define PROG_SET_FEATURE BIT(10)
+
+#define INTR_STS_EN_REG 0x14
+#define INTR_SIG_EN_REG 0x18
+#define INTR_STS_REG 0x1C
+#define WRITE_READY BIT(0)
+#define READ_READY BIT(1)
+#define XFER_COMPLETE BIT(2)
+#define DMA_BOUNDARY BIT(6)
+#define EVENT_MASK GENMASK(7, 0)
+
+#define READY_STS_REG 0x20
+
+#define DMA_ADDR0_REG 0x50
+#define DMA_ADDR1_REG 0x24
+
+#define FLASH_STS_REG 0x28
+
+#define DATA_PORT_REG 0x30
+
+#define ECC_CONF_REG 0x34
+#define ECC_CONF_COL(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define ECC_CONF_LEN(x) FIELD_PREP(GENMASK(26, 16), (x))
+#define ECC_CONF_BCH_EN BIT(27)
+
+#define ECC_ERR_CNT_REG 0x38
+#define GET_PKT_ERR_CNT(x) FIELD_GET(GENMASK(7, 0), (x))
+#define GET_PAGE_ERR_CNT(x) FIELD_GET(GENMASK(16, 8), (x))
+
+#define ECC_SP_REG 0x3C
+#define ECC_SP_CMD1(x) FIELD_PREP(GENMASK(7, 0), (x))
+#define ECC_SP_CMD2(x) FIELD_PREP(GENMASK(15, 8), (x))
+#define ECC_SP_ADDRS(x) FIELD_PREP(GENMASK(30, 28), (x))
+
+#define ECC_1ERR_CNT_REG 0x40
+#define ECC_2ERR_CNT_REG 0x44
+
+#define DATA_INTERFACE_REG 0x6C
+#define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x))
+#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (X))
+#define DIFACE_SDR 0
+#define DIFACE_NVDDR BIT(9)
+
+#define ANFC_MAX_CS 2
+#define ANFC_DFLT_TIMEOUT_US 1000000
+#define ANFC_MAX_CHUNK_SIZE SZ_1M
+#define ANFC_MAX_PARAM_SIZE SZ_4K
+#define ANFC_MAX_STEPS SZ_2K
+#define ANFC_MAX_PKT_SIZE (SZ_2K - 1)
+#define ANFC_MAX_ADDR_CYC 5U
+#define ANFC_RSVD_ECC_BYTES 21
+
+#define ANFC_XLNX_SDR_DFLT_CORE_CLK 100000000
+#define ANFC_XLNX_SDR_HS_CORE_CLK 80000000
+
+/**
+ * struct anfc_op - Defines how to execute an operation
+ * @pkt_reg: Packet register
+ * @addr1_reg: Memory address 1 register
+ * @addr2_reg: Memory address 2 register
+ * @cmd_reg: Command register
+ * @prog_reg: Program register
+ * @steps: Number of "packets" to read/write
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @len: Data transfer length
+ * @read: Data transfer direction from the controller point of view
+ */
+struct anfc_op {
+ u32 pkt_reg;
+ u32 addr1_reg;
+ u32 addr2_reg;
+ u32 cmd_reg;
+ u32 prog_reg;
+ int steps;
+ unsigned int rdy_timeout_ms;
+ unsigned int len;
+ bool read;
+ u8 *buf;
+};
+
+/**
+ * struct anand - Defines the NAND chip related information
+ * @node: Used to store NAND chips into a list
+ * @chip: NAND chip information structure
+ * @cs: Chip select line
+ * @rb: Ready-busy line
+ * @page_sz: Register value of the page_sz field to use
+ * @clk: Expected clock frequency to use
+ * @timings: Data interface timing mode to use
+ * @ecc_conf: Hardware ECC configuration value
+ * @strength: Register value of the ECC strength
+ * @raddr_cycles: Row address cycle information
+ * @caddr_cycles: Column address cycle information
+ * @ecc_bits: Exact number of ECC bits per syndrome
+ * @ecc_total: Total number of ECC bytes
+ * @errloc: Array of errors located with soft BCH
+ * @hw_ecc: Buffer to store syndromes computed by hardware
+ * @bch: BCH structure
+ */
+struct anand {
+ struct list_head node;
+ struct nand_chip chip;
+ unsigned int cs;
+ unsigned int rb;
+ unsigned int page_sz;
+ unsigned long clk;
+ u32 timings;
+ u32 ecc_conf;
+ u32 strength;
+ u16 raddr_cycles;
+ u16 caddr_cycles;
+ unsigned int ecc_bits;
+ unsigned int ecc_total;
+ unsigned int *errloc;
+ u8 *hw_ecc;
+ struct bch_control *bch;
+};
+
+/**
+ * struct arasan_nfc - Defines the Arasan NAND flash controller driver instance
+ * @dev: Pointer to the device structure
+ * @base: Remapped register area
+ * @controller_clk: Pointer to the system clock
+ * @bus_clk: Pointer to the flash clock
+ * @controller: Base controller structure
+ * @chips: List of all NAND chips attached to the controller
+ * @assigned_cs: Bitmask describing already assigned CS lines
+ * @cur_clk: Current clock rate
+ */
+struct arasan_nfc {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *controller_clk;
+ struct clk *bus_clk;
+ struct nand_controller controller;
+ struct list_head chips;
+ unsigned long assigned_cs;
+ unsigned int cur_clk;
+};
+
+static struct anand *to_anand(struct nand_chip *nand)
+{
+ return container_of(nand, struct anand, chip);
+}
+
+static struct arasan_nfc *to_anfc(struct nand_controller *ctrl)
+{
+ return container_of(ctrl, struct arasan_nfc, controller);
+}
+
+static int anfc_wait_for_event(struct arasan_nfc *nfc, unsigned int event)
+{
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(nfc->base + INTR_STS_REG, val,
+ val & event, 0,
+ ANFC_DFLT_TIMEOUT_US);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for event 0x%x\n", event);
+ return -ETIMEDOUT;
+ }
+
+ writel_relaxed(event, nfc->base + INTR_STS_REG);
+
+ return 0;
+}
+
+static int anfc_wait_for_rb(struct arasan_nfc *nfc, struct nand_chip *chip,
+ unsigned int timeout_ms)
+{
+ struct anand *anand = to_anand(chip);
+ u32 val;
+ int ret;
+
+ /* There is no R/B interrupt, we must poll a register */
+ ret = readl_relaxed_poll_timeout(nfc->base + READY_STS_REG, val,
+ val & BIT(anand->rb),
+ 1, timeout_ms * 1000);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for R/B 0x%x\n",
+ readl_relaxed(nfc->base + READY_STS_REG));
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void anfc_trigger_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
+{
+ writel_relaxed(nfc_op->pkt_reg, nfc->base + PKT_REG);
+ writel_relaxed(nfc_op->addr1_reg, nfc->base + MEM_ADDR1_REG);
+ writel_relaxed(nfc_op->addr2_reg, nfc->base + MEM_ADDR2_REG);
+ writel_relaxed(nfc_op->cmd_reg, nfc->base + CMD_REG);
+ writel_relaxed(nfc_op->prog_reg, nfc->base + PROG_REG);
+}
+
+static int anfc_pkt_len_config(unsigned int len, unsigned int *steps,
+ unsigned int *pktsize)
+{
+ unsigned int nb, sz;
+
+ for (nb = 1; nb < ANFC_MAX_STEPS; nb *= 2) {
+ sz = len / nb;
+ if (sz <= ANFC_MAX_PKT_SIZE)
+ break;
+ }
+
+ if (sz * nb != len)
+ return -ENOTSUPP;
+
+ if (steps)
+ *steps = nb;
+
+ if (pktsize)
+ *pktsize = sz;
+
+ return 0;
+}
+
+/*
+ * When using the embedded hardware ECC engine, the controller is in charge of
+ * feeding the engine with, first, the ECC residue present in the data array.
+ * A typical read operation is:
+ * 1/ Assert the read operation by sending the relevant command/address cycles
+ * but targeting the column of the first ECC bytes in the OOB area instead of
+ * the main data directly.
+ * 2/ After having read the relevant number of ECC bytes, the controller uses
+ * the RNDOUT/RNDSTART commands which are set into the "ECC Spare Command
+ * Register" to move the pointer back at the beginning of the main data.
+ * 3/ It will read the content of the main area for a given size (pktsize) and
+ * will feed the ECC engine with this buffer again.
+ * 4/ The ECC engine derives the ECC bytes for the given data and compare them
+ * with the ones already received. It eventually trigger status flags and
+ * then set the "Buffer Read Ready" flag.
+ * 5/ The corrected data is then available for reading from the data port
+ * register.
+ *
+ * The hardware BCH ECC engine is known to be inconstent in BCH mode and never
+ * reports uncorrectable errors. Because of this bug, we have to use the
+ * software BCH implementation in the read path.
+ */
+static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct anand *anand = to_anand(chip);
+ unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+ unsigned int max_bitflips = 0;
+ dma_addr_t dma_addr;
+ int step, ret;
+ struct anfc_op nfc_op = {
+ .pkt_reg =
+ PKT_SIZE(chip->ecc.size) |
+ PKT_STEPS(chip->ecc.steps),
+ .addr1_reg =
+ (page & 0xFF) << (8 * (anand->caddr_cycles)) |
+ (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
+ .addr2_reg =
+ ((page >> 16) & 0xFF) |
+ ADDR2_STRENGTH(anand->strength) |
+ ADDR2_CS(anand->cs),
+ .cmd_reg =
+ CMD_1(NAND_CMD_READ0) |
+ CMD_2(NAND_CMD_READSTART) |
+ CMD_PAGE_SIZE(anand->page_sz) |
+ CMD_DMA_ENABLE |
+ CMD_NADDRS(anand->caddr_cycles +
+ anand->raddr_cycles),
+ .prog_reg = PROG_PGRD,
+ };
+
+ dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_addr)) {
+ dev_err(nfc->dev, "Buffer mapping error");
+ return -EIO;
+ }
+
+ writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
+ writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
+
+ anfc_trigger_op(nfc, &nfc_op);
+
+ ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+ dma_unmap_single(nfc->dev, dma_addr, len, DMA_FROM_DEVICE);
+ if (ret) {
+ dev_err(nfc->dev, "Error reading page %d\n", page);
+ return ret;
+ }
+
+ /* Store the raw OOB bytes as well */
+ ret = nand_change_read_column_op(chip, mtd->writesize, chip->oob_poi,
+ mtd->oobsize, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * For each step, compute by softare the BCH syndrome over the raw data.
+ * Compare the theoretical amount of errors and compare with the
+ * hardware engine feedback.
+ */
+ for (step = 0; step < chip->ecc.steps; step++) {
+ u8 *raw_buf = &buf[step * chip->ecc.size];
+ unsigned int bit, byte;
+ int bf, i;
+
+ /* Extract the syndrome, it is not necessarily aligned */
+ memset(anand->hw_ecc, 0, chip->ecc.bytes);
+ nand_extract_bits(anand->hw_ecc, 0,
+ &chip->oob_poi[mtd->oobsize - anand->ecc_total],
+ anand->ecc_bits * step, anand->ecc_bits);
+
+ bf = bch_decode(anand->bch, raw_buf, chip->ecc.size,
+ anand->hw_ecc, NULL, NULL, anand->errloc);
+ if (!bf) {
+ continue;
+ } else if (bf > 0) {
+ for (i = 0; i < bf; i++) {
+ /* Only correct the data, not the syndrome */
+ if (anand->errloc[i] < (chip->ecc.size * 8)) {
+ bit = BIT(anand->errloc[i] & 7);
+ byte = anand->errloc[i] >> 3;
+ raw_buf[byte] ^= bit;
+ }
+ }
+
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+
+ continue;
+ }
+
+ bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
+ NULL, 0, NULL, 0,
+ chip->ecc.strength);
+ if (bf > 0) {
+ mtd->ecc_stats.corrected += bf;
+ max_bitflips = max_t(unsigned int, max_bitflips, bf);
+ memset(raw_buf, 0xFF, chip->ecc.size);
+ } else if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ }
+ }
+
+ return 0;
+}
+
+static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
+ dma_addr_t dma_addr;
+ int ret;
+ struct anfc_op nfc_op = {
+ .pkt_reg =
+ PKT_SIZE(chip->ecc.size) |
+ PKT_STEPS(chip->ecc.steps),
+ .addr1_reg =
+ (page & 0xFF) << (8 * (anand->caddr_cycles)) |
+ (((page >> 8) & 0xFF) << (8 * (1 + anand->caddr_cycles))),
+ .addr2_reg =
+ ((page >> 16) & 0xFF) |
+ ADDR2_STRENGTH(anand->strength) |
+ ADDR2_CS(anand->cs),
+ .cmd_reg =
+ CMD_1(NAND_CMD_SEQIN) |
+ CMD_2(NAND_CMD_PAGEPROG) |
+ CMD_PAGE_SIZE(anand->page_sz) |
+ CMD_DMA_ENABLE |
+ CMD_NADDRS(anand->caddr_cycles +
+ anand->raddr_cycles) |
+ CMD_ECC_ENABLE,
+ .prog_reg = PROG_PGPROG,
+ };
+
+ writel_relaxed(anand->ecc_conf, nfc->base + ECC_CONF_REG);
+ writel_relaxed(ECC_SP_CMD1(NAND_CMD_RNDIN) |
+ ECC_SP_ADDRS(anand->caddr_cycles),
+ nfc->base + ECC_SP_REG);
+
+ dma_addr = dma_map_single(nfc->dev, (void *)buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(nfc->dev, dma_addr)) {
+ dev_err(nfc->dev, "Buffer mapping error");
+ return -EIO;
+ }
+
+ writel_relaxed(lower_32_bits(dma_addr), nfc->base + DMA_ADDR0_REG);
+ writel_relaxed(upper_32_bits(dma_addr), nfc->base + DMA_ADDR1_REG);
+
+ anfc_trigger_op(nfc, &nfc_op);
+ ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+ dma_unmap_single(nfc->dev, dma_addr, len, DMA_TO_DEVICE);
+ if (ret) {
+ dev_err(nfc->dev, "Error writing page %d\n", page);
+ return ret;
+ }
+
+ /* Spare data is not protected */
+ if (oob_required)
+ ret = nand_write_oob_std(chip, page);
+
+ return ret;
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static int anfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct anfc_op *nfc_op)
+{
+ struct anand *anand = to_anand(chip);
+ const struct nand_op_instr *instr = NULL;
+ bool first_cmd = true;
+ unsigned int op_id;
+ int ret, i;
+
+ memset(nfc_op, 0, sizeof(*nfc_op));
+ nfc_op->addr2_reg = ADDR2_CS(anand->cs);
+ nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz);
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs, pktsize;
+ const u8 *addrs;
+ u8 *buf;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd)
+ nfc_op->cmd_reg |= CMD_1(instr->ctx.cmd.opcode);
+ else
+ nfc_op->cmd_reg |= CMD_2(instr->ctx.cmd.opcode);
+
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ nfc_op->cmd_reg |= CMD_NADDRS(naddrs);
+
+ for (i = 0; i < min(ANFC_MAX_ADDR_CYC, naddrs); i++) {
+ if (i < 4)
+ nfc_op->addr1_reg |= (u32)addrs[i] << i * 8;
+ else
+ nfc_op->addr2_reg |= addrs[i];
+ }
+
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->read = true;
+ fallthrough;
+ case NAND_OP_DATA_OUT_INSTR:
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ buf = instr->ctx.data.buf.in;
+ nfc_op->buf = &buf[offset];
+ nfc_op->len = nand_subop_get_data_len(subop, op_id);
+ ret = anfc_pkt_len_config(nfc_op->len, &nfc_op->steps,
+ &pktsize);
+ if (ret)
+ return ret;
+
+ /*
+ * Number of DATA cycles must be aligned on 4, this
+ * means the controller might read/write more than
+ * requested. This is harmless most of the time as extra
+ * DATA are discarded in the write path and read pointer
+ * adjusted in the read path.
+ *
+ * FIXME: The core should mark operations where
+ * reading/writing more is allowed so the exec_op()
+ * implementation can take the right decision when the
+ * alignment constraint is not met: adjust the number of
+ * DATA cycles when it's allowed, reject the operation
+ * otherwise.
+ */
+ nfc_op->pkt_reg |= PKT_SIZE(round_up(pktsize, 4)) |
+ PKT_STEPS(nfc_op->steps);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int anfc_rw_pio_op(struct arasan_nfc *nfc, struct anfc_op *nfc_op)
+{
+ unsigned int dwords = (nfc_op->len / 4) / nfc_op->steps;
+ unsigned int last_len = nfc_op->len % 4;
+ unsigned int offset, dir;
+ u8 *buf = nfc_op->buf;
+ int ret, i;
+
+ for (i = 0; i < nfc_op->steps; i++) {
+ dir = nfc_op->read ? READ_READY : WRITE_READY;
+ ret = anfc_wait_for_event(nfc, dir);
+ if (ret) {
+ dev_err(nfc->dev, "PIO %s ready signal not received\n",
+ nfc_op->read ? "Read" : "Write");
+ return ret;
+ }
+
+ offset = i * (dwords * 4);
+ if (nfc_op->read)
+ ioread32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
+ dwords);
+ else
+ iowrite32_rep(nfc->base + DATA_PORT_REG, &buf[offset],
+ dwords);
+ }
+
+ if (last_len) {
+ u32 remainder;
+
+ offset = nfc_op->len - last_len;
+
+ if (nfc_op->read) {
+ remainder = readl_relaxed(nfc->base + DATA_PORT_REG);
+ memcpy(&buf[offset], &remainder, last_len);
+ } else {
+ memcpy(&remainder, &buf[offset], last_len);
+ writel_relaxed(remainder, nfc->base + DATA_PORT_REG);
+ }
+ }
+
+ return anfc_wait_for_event(nfc, XFER_COMPLETE);
+}
+
+static int anfc_misc_data_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ u32 prog_reg)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ int ret;
+
+ ret = anfc_parse_instructions(chip, subop, &nfc_op);
+ if (ret)
+ return ret;
+
+ nfc_op.prog_reg = prog_reg;
+ anfc_trigger_op(nfc, &nfc_op);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ return anfc_rw_pio_op(nfc, &nfc_op);
+}
+
+static int anfc_param_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_data_type_exec(chip, subop, PROG_RDPARAM);
+}
+
+static int anfc_data_read_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_data_type_exec(chip, subop, PROG_PGRD);
+}
+
+static int anfc_param_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_data_type_exec(chip, subop, PROG_SET_FEATURE);
+}
+
+static int anfc_data_write_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_data_type_exec(chip, subop, PROG_PGPROG);
+}
+
+static int anfc_misc_zerolen_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ u32 prog_reg)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ int ret;
+
+ ret = anfc_parse_instructions(chip, subop, &nfc_op);
+ if (ret)
+ return ret;
+
+ nfc_op.prog_reg = prog_reg;
+ anfc_trigger_op(nfc, &nfc_op);
+
+ ret = anfc_wait_for_event(nfc, XFER_COMPLETE);
+ if (ret)
+ return ret;
+
+ if (nfc_op.rdy_timeout_ms)
+ ret = anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+
+ return ret;
+}
+
+static int anfc_status_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ u32 tmp;
+ int ret;
+
+ /* See anfc_check_op() for details about this constraint */
+ if (subop->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS)
+ return -ENOTSUPP;
+
+ ret = anfc_misc_zerolen_type_exec(chip, subop, PROG_STATUS);
+ if (ret)
+ return ret;
+
+ tmp = readl_relaxed(nfc->base + FLASH_STS_REG);
+ memcpy(subop->instrs[1].ctx.data.buf.in, &tmp, 1);
+
+ return 0;
+}
+
+static int anfc_reset_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_zerolen_type_exec(chip, subop, PROG_RST);
+}
+
+static int anfc_erase_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ return anfc_misc_zerolen_type_exec(chip, subop, PROG_ERASE);
+}
+
+static int anfc_wait_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct anfc_op nfc_op = {};
+ int ret;
+
+ ret = anfc_parse_instructions(chip, subop, &nfc_op);
+ if (ret)
+ return ret;
+
+ return anfc_wait_for_rb(nfc, chip, nfc_op.rdy_timeout_ms);
+}
+
+static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ anfc_param_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_param_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_PARAM_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_data_read_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_data_write_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, ANFC_MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_reset_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_erase_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, ANFC_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_status_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, ANFC_MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ anfc_wait_type_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int anfc_select_target(struct nand_chip *chip, int target)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ int ret;
+
+ /* Update the controller timings and the potential ECC configuration */
+ writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG);
+
+ /* Update clock frequency */
+ if (nfc->cur_clk != anand->clk) {
+ clk_disable_unprepare(nfc->controller_clk);
+ ret = clk_set_rate(nfc->controller_clk, anand->clk);
+ if (ret) {
+ dev_err(nfc->dev, "Failed to change clock rate\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->controller_clk);
+ if (ret) {
+ dev_err(nfc->dev,
+ "Failed to re-enable the controller clock\n");
+ return ret;
+ }
+
+ nfc->cur_clk = anand->clk;
+ }
+
+ return 0;
+}
+
+static int anfc_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ int op_id;
+
+ /*
+ * The controller abstracts all the NAND operations and do not support
+ * data only operations.
+ *
+ * TODO: The nand_op_parser framework should be extended to
+ * support custom checks on DATA instructions.
+ */
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_ADDR_INSTR:
+ if (instr->ctx.addr.naddrs > ANFC_MAX_ADDR_CYC)
+ return -ENOTSUPP;
+
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (instr->ctx.data.len > ANFC_MAX_CHUNK_SIZE)
+ return -ENOTSUPP;
+
+ if (anfc_pkt_len_config(instr->ctx.data.len, 0, 0))
+ return -ENOTSUPP;
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * The controller does not allow to proceed with a CMD+DATA_IN cycle
+ * manually on the bus by reading data from the data register. Instead,
+ * the controller abstract a status read operation with its own status
+ * register after ordering a read status operation. Hence, we cannot
+ * support any CMD+DATA_IN operation other than a READ STATUS.
+ *
+ * TODO: The nand_op_parser() framework should be extended to describe
+ * fixed patterns instead of open-coding this check here.
+ */
+ if (op->ninstrs == 2 &&
+ op->instrs[0].type == NAND_OP_CMD_INSTR &&
+ op->instrs[0].ctx.cmd.opcode != NAND_CMD_STATUS &&
+ op->instrs[1].type == NAND_OP_DATA_IN_INSTR)
+ return -ENOTSUPP;
+
+ return nand_op_parser_exec_op(chip, &anfc_op_parser, op, true);
+}
+
+static int anfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int ret;
+
+ if (check_only)
+ return anfc_check_op(chip, op);
+
+ ret = anfc_select_target(chip, op->cs);
+ if (ret)
+ return ret;
+
+ return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only);
+}
+
+static int anfc_setup_data_interface(struct nand_chip *chip, int target,
+ const struct nand_data_interface *conf)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct device_node *np = nfc->dev->of_node;
+
+ if (target < 0)
+ return 0;
+
+ anand->timings = DIFACE_SDR | DIFACE_SDR_MODE(conf->timings.mode);
+ anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK;
+
+ /*
+ * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work
+ * with f > 90MHz (default clock is 100MHz) but signals are unstable
+ * with higher modes. Hence we decrease a little bit the clock rate to
+ * 80MHz when using modes 2-5 with this SoC.
+ */
+ if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") &&
+ conf->timings.mode >= 2)
+ anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK;
+
+ return 0;
+}
+
+static int anfc_calc_hw_ecc_bytes(int step_size, int strength)
+{
+ unsigned int bch_gf_mag, ecc_bits;
+
+ switch (step_size) {
+ case SZ_512:
+ bch_gf_mag = 13;
+ break;
+ case SZ_1K:
+ bch_gf_mag = 14;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ecc_bits = bch_gf_mag * strength;
+
+ return DIV_ROUND_UP(ecc_bits, 8);
+}
+
+static const int anfc_hw_ecc_512_strengths[] = {4, 8, 12};
+
+static const int anfc_hw_ecc_1024_strengths[] = {24};
+
+static const struct nand_ecc_step_info anfc_hw_ecc_step_infos[] = {
+ {
+ .stepsize = SZ_512,
+ .strengths = anfc_hw_ecc_512_strengths,
+ .nstrengths = ARRAY_SIZE(anfc_hw_ecc_512_strengths),
+ },
+ {
+ .stepsize = SZ_1K,
+ .strengths = anfc_hw_ecc_1024_strengths,
+ .nstrengths = ARRAY_SIZE(anfc_hw_ecc_1024_strengths),
+ },
+};
+
+static const struct nand_ecc_caps anfc_hw_ecc_caps = {
+ .stepinfos = anfc_hw_ecc_step_infos,
+ .nstepinfos = ARRAY_SIZE(anfc_hw_ecc_step_infos),
+ .calc_ecc_bytes = anfc_calc_hw_ecc_bytes,
+};
+
+static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc,
+ struct nand_chip *chip)
+{
+ struct anand *anand = to_anand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ unsigned int bch_prim_poly = 0, bch_gf_mag = 0, ecc_offset;
+ int ret;
+
+ switch (mtd->writesize) {
+ case SZ_512:
+ case SZ_2K:
+ case SZ_4K:
+ case SZ_8K:
+ case SZ_16K:
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported page size %d\n", mtd->writesize);
+ return -EINVAL;
+ }
+
+ ret = nand_ecc_choose_conf(chip, &anfc_hw_ecc_caps, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ switch (ecc->strength) {
+ case 12:
+ anand->strength = 0x1;
+ break;
+ case 8:
+ anand->strength = 0x2;
+ break;
+ case 4:
+ anand->strength = 0x3;
+ break;
+ case 24:
+ anand->strength = 0x4;
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported strength %d\n", ecc->strength);
+ return -EINVAL;
+ }
+
+ switch (ecc->size) {
+ case SZ_512:
+ bch_gf_mag = 13;
+ bch_prim_poly = 0x201b;
+ break;
+ case SZ_1K:
+ bch_gf_mag = 14;
+ bch_prim_poly = 0x4443;
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported step size %d\n", ecc->strength);
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+
+ ecc->steps = mtd->writesize / ecc->size;
+ ecc->algo = NAND_ECC_BCH;
+ anand->ecc_bits = bch_gf_mag * ecc->strength;
+ ecc->bytes = DIV_ROUND_UP(anand->ecc_bits, 8);
+ anand->ecc_total = DIV_ROUND_UP(anand->ecc_bits * ecc->steps, 8);
+ ecc_offset = mtd->writesize + mtd->oobsize - anand->ecc_total;
+ anand->ecc_conf = ECC_CONF_COL(ecc_offset) |
+ ECC_CONF_LEN(anand->ecc_total) |
+ ECC_CONF_BCH_EN;
+
+ anand->errloc = devm_kmalloc_array(nfc->dev, ecc->strength,
+ sizeof(*anand->errloc), GFP_KERNEL);
+ if (!anand->errloc)
+ return -ENOMEM;
+
+ anand->hw_ecc = devm_kmalloc(nfc->dev, ecc->bytes, GFP_KERNEL);
+ if (!anand->hw_ecc)
+ return -ENOMEM;
+
+ /* Enforce bit swapping to fit the hardware */
+ anand->bch = bch_init(bch_gf_mag, ecc->strength, bch_prim_poly, true);
+ if (!anand->bch)
+ return -EINVAL;
+
+ ecc->read_page = anfc_read_page_hw_ecc;
+ ecc->write_page = anfc_write_page_hw_ecc;
+
+ return 0;
+}
+
+static int anfc_attach_chip(struct nand_chip *chip)
+{
+ struct anand *anand = to_anand(chip);
+ struct arasan_nfc *nfc = to_anfc(chip->controller);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0;
+
+ if (mtd->writesize <= SZ_512)
+ anand->caddr_cycles = 1;
+ else
+ anand->caddr_cycles = 2;
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ anand->raddr_cycles = 3;
+ else
+ anand->raddr_cycles = 2;
+
+ switch (mtd->writesize) {
+ case 512:
+ anand->page_sz = 0;
+ break;
+ case 1024:
+ anand->page_sz = 5;
+ break;
+ case 2048:
+ anand->page_sz = 1;
+ break;
+ case 4096:
+ anand->page_sz = 2;
+ break;
+ case 8192:
+ anand->page_sz = 3;
+ break;
+ case 16384:
+ anand->page_sz = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* These hooks are valid for all ECC providers */
+ chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_NONE:
+ case NAND_ECC_SOFT:
+ case NAND_ECC_ON_DIE:
+ break;
+ case NAND_ECC_HW:
+ ret = anfc_init_hw_ecc_controller(nfc, chip);
+ break;
+ default:
+ dev_err(nfc->dev, "Unsupported ECC mode: %d\n",
+ chip->ecc.mode);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void anfc_detach_chip(struct nand_chip *chip)
+{
+ struct anand *anand = to_anand(chip);
+
+ if (anand->bch)
+ bch_free(anand->bch);
+}
+
+static const struct nand_controller_ops anfc_ops = {
+ .exec_op = anfc_exec_op,
+ .setup_data_interface = anfc_setup_data_interface,
+ .attach_chip = anfc_attach_chip,
+ .detach_chip = anfc_detach_chip,
+};
+
+static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np)
+{
+ struct anand *anand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ int cs, rb, ret;
+
+ anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL);
+ if (!anand)
+ return -ENOMEM;
+
+ /* We do not support multiple CS per chip yet */
+ if (of_property_count_elems_of_size(np, "reg", sizeof(u32)) != 1) {
+ dev_err(nfc->dev, "Invalid reg property\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(np, "reg", &cs);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(np, "nand-rb", &rb);
+ if (ret)
+ return ret;
+
+ if (cs >= ANFC_MAX_CS || rb >= ANFC_MAX_CS) {
+ dev_err(nfc->dev, "Wrong CS %d or RB %d\n", cs, rb);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+ dev_err(nfc->dev, "Already assigned CS %d\n", cs);
+ return -EINVAL;
+ }
+
+ anand->cs = cs;
+ anand->rb = rb;
+
+ chip = &anand->chip;
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = nfc->dev;
+ chip->controller = &nfc->controller;
+ chip->options = NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
+ NAND_USES_DMA;
+
+ nand_set_flash_node(chip, np);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "NAND label property is mandatory\n");
+ return -EINVAL;
+ }
+
+ ret = nand_scan(chip, 1);
+ if (ret) {
+ dev_err(nfc->dev, "Scan operation failed\n");
+ return ret;
+ }
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&anand->node, &nfc->chips);
+
+ return 0;
+}
+
+static void anfc_chips_cleanup(struct arasan_nfc *nfc)
+{
+ struct anand *anand, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(anand, tmp, &nfc->chips, node) {
+ chip = &anand->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&anand->node);
+ }
+}
+
+static int anfc_chips_init(struct arasan_nfc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node, *nand_np;
+ int nchips = of_get_child_count(np);
+ int ret;
+
+ if (!nchips || nchips > ANFC_MAX_CS) {
+ dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n",
+ nchips);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = anfc_chip_init(nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ anfc_chips_cleanup(nfc);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void anfc_reset(struct arasan_nfc *nfc)
+{
+ /* Disable interrupt signals */
+ writel_relaxed(0, nfc->base + INTR_SIG_EN_REG);
+
+ /* Enable interrupt status */
+ writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG);
+}
+
+static int anfc_probe(struct platform_device *pdev)
+{
+ struct arasan_nfc *nfc;
+ int ret;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = &pdev->dev;
+ nand_controller_init(&nfc->controller);
+ nfc->controller.ops = &anfc_ops;
+ INIT_LIST_HEAD(&nfc->chips);
+
+ nfc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nfc->base))
+ return PTR_ERR(nfc->base);
+
+ anfc_reset(nfc);
+
+ nfc->controller_clk = devm_clk_get(&pdev->dev, "controller");
+ if (IS_ERR(nfc->controller_clk))
+ return PTR_ERR(nfc->controller_clk);
+
+ nfc->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(nfc->bus_clk))
+ return PTR_ERR(nfc->bus_clk);
+
+ ret = clk_prepare_enable(nfc->controller_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(nfc->bus_clk);
+ if (ret)
+ goto disable_controller_clk;
+
+ ret = anfc_chips_init(nfc);
+ if (ret)
+ goto disable_bus_clk;
+
+ platform_set_drvdata(pdev, nfc);
+
+ return 0;
+
+disable_bus_clk:
+ clk_disable_unprepare(nfc->bus_clk);
+
+disable_controller_clk:
+ clk_disable_unprepare(nfc->controller_clk);
+
+ return ret;
+}
+
+static int anfc_remove(struct platform_device *pdev)
+{
+ struct arasan_nfc *nfc = platform_get_drvdata(pdev);
+
+ anfc_chips_cleanup(nfc);
+
+ clk_disable_unprepare(nfc->bus_clk);
+ clk_disable_unprepare(nfc->controller_clk);
+
+ return 0;
+}
+
+static const struct of_device_id anfc_ids[] = {
+ {
+ .compatible = "xlnx,zynqmp-nand-controller",
+ },
+ {
+ .compatible = "arasan,nfc-v3p10",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, anfc_ids);
+
+static struct platform_driver anfc_driver = {
+ .driver = {
+ .name = "arasan-nand-controller",
+ .of_match_table = anfc_ids,
+ },
+ .probe = anfc_probe,
+ .remove = anfc_remove,
+};
+module_platform_driver(anfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Punnaiah Choudary Kalluri <punnaia@xilinx.com>");
+MODULE_AUTHOR("Naga Sureshkumar Relli <nagasure@xilinx.com>");
+MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
+MODULE_DESCRIPTION("Arasan NAND Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 3ba17a98df4d..46a3724a788e 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1494,7 +1494,7 @@ static void atmel_nand_init(struct atmel_nand_controller *nc,
* suitable for DMA.
*/
if (nc->dmac)
- chip->options |= NAND_USE_BOUNCE_BUFFER;
+ chip->options |= NAND_USES_DMA;
/* Default to HW ECC if pmecc is available. */
if (nc->pmecc)
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index 75eb3e97fae3..d865200ccd08 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -16,63 +16,16 @@
struct au1550nd_ctx {
+ struct nand_controller controller;
struct nand_chip chip;
int cs;
void __iomem *base;
- void (*write_byte)(struct nand_chip *, u_char);
};
-/**
- * au_read_byte - read one byte from the chip
- * @this: NAND chip object
- *
- * read function for 8bit buswidth
- */
-static u_char au_read_byte(struct nand_chip *this)
-{
- u_char ret = readb(this->legacy.IO_ADDR_R);
- wmb(); /* drain writebuffer */
- return ret;
-}
-
-/**
- * au_write_byte - write one byte to the chip
- * @this: NAND chip object
- * @byte: pointer to data byte to write
- *
- * write function for 8it buswidth
- */
-static void au_write_byte(struct nand_chip *this, u_char byte)
-{
- writeb(byte, this->legacy.IO_ADDR_W);
- wmb(); /* drain writebuffer */
-}
-
-/**
- * au_read_byte16 - read one byte endianness aware from the chip
- * @this: NAND chip object
- *
- * read function for 16bit buswidth with endianness conversion
- */
-static u_char au_read_byte16(struct nand_chip *this)
-{
- u_char ret = (u_char) cpu_to_le16(readw(this->legacy.IO_ADDR_R));
- wmb(); /* drain writebuffer */
- return ret;
-}
-
-/**
- * au_write_byte16 - write one byte endianness aware to the chip
- * @this: NAND chip object
- * @byte: pointer to data byte to write
- *
- * write function for 16bit buswidth with endianness conversion
- */
-static void au_write_byte16(struct nand_chip *this, u_char byte)
+static struct au1550nd_ctx *chip_to_au_ctx(struct nand_chip *this)
{
- writew(le16_to_cpu((u16) byte), this->legacy.IO_ADDR_W);
- wmb(); /* drain writebuffer */
+ return container_of(this, struct au1550nd_ctx, chip);
}
/**
@@ -83,12 +36,15 @@ static void au_write_byte16(struct nand_chip *this, u_char byte)
*
* write function for 8bit buswidth
*/
-static void au_write_buf(struct nand_chip *this, const u_char *buf, int len)
+static void au_write_buf(struct nand_chip *this, const void *buf,
+ unsigned int len)
{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ const u8 *p = buf;
int i;
for (i = 0; i < len; i++) {
- writeb(buf[i], this->legacy.IO_ADDR_W);
+ writeb(p[i], ctx->base + MEM_STNAND_DATA);
wmb(); /* drain writebuffer */
}
}
@@ -101,12 +57,15 @@ static void au_write_buf(struct nand_chip *this, const u_char *buf, int len)
*
* read function for 8bit buswidth
*/
-static void au_read_buf(struct nand_chip *this, u_char *buf, int len)
+static void au_read_buf(struct nand_chip *this, void *buf,
+ unsigned int len)
{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ u8 *p = buf;
int i;
for (i = 0; i < len; i++) {
- buf[i] = readb(this->legacy.IO_ADDR_R);
+ p[i] = readb(ctx->base + MEM_STNAND_DATA);
wmb(); /* drain writebuffer */
}
}
@@ -119,17 +78,18 @@ static void au_read_buf(struct nand_chip *this, u_char *buf, int len)
*
* write function for 16bit buswidth
*/
-static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len)
+static void au_write_buf16(struct nand_chip *this, const void *buf,
+ unsigned int len)
{
- int i;
- u16 *p = (u16 *) buf;
- len >>= 1;
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ const u16 *p = buf;
+ unsigned int i;
+ len >>= 1;
for (i = 0; i < len; i++) {
- writew(p[i], this->legacy.IO_ADDR_W);
+ writew(p[i], ctx->base + MEM_STNAND_DATA);
wmb(); /* drain writebuffer */
}
-
}
/**
@@ -140,239 +100,146 @@ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len)
*
* read function for 16bit buswidth
*/
-static void au_read_buf16(struct nand_chip *this, u_char *buf, int len)
+static void au_read_buf16(struct nand_chip *this, void *buf, unsigned int len)
{
- int i;
- u16 *p = (u16 *) buf;
- len >>= 1;
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ unsigned int i;
+ u16 *p = buf;
+ len >>= 1;
for (i = 0; i < len; i++) {
- p[i] = readw(this->legacy.IO_ADDR_R);
+ p[i] = readw(ctx->base + MEM_STNAND_DATA);
wmb(); /* drain writebuffer */
}
}
-/* Select the chip by setting nCE to low */
-#define NAND_CTL_SETNCE 1
-/* Deselect the chip by setting nCE to high */
-#define NAND_CTL_CLRNCE 2
-/* Select the command latch by setting CLE to high */
-#define NAND_CTL_SETCLE 3
-/* Deselect the command latch by setting CLE to low */
-#define NAND_CTL_CLRCLE 4
-/* Select the address latch by setting ALE to high */
-#define NAND_CTL_SETALE 5
-/* Deselect the address latch by setting ALE to low */
-#define NAND_CTL_CLRALE 6
-
-static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
+static int find_nand_cs(unsigned long nand_base)
{
- struct nand_chip *this = mtd_to_nand(mtd);
- struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx,
- chip);
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+ unsigned long addr, staddr, start, mask, end;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ addr = 0x1000 + (i * 0x10); /* CSx */
+ staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
+ /* figure out the decoded range of this CS */
+ start = (staddr << 4) & 0xfffc0000;
+ mask = (staddr << 18) & 0xfffc0000;
+ end = (start | (start - 1)) & ~(start ^ mask);
+ if ((nand_base >= start) && (nand_base < end))
+ return i;
+ }
- switch (cmd) {
+ return -ENODEV;
+}
- case NAND_CTL_SETCLE:
- this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
- break;
+static int au1550nd_waitrdy(struct nand_chip *this, unsigned int timeout_ms)
+{
+ unsigned long timeout_jiffies = jiffies;
+
+ timeout_jiffies += msecs_to_jiffies(timeout_ms) + 1;
+ do {
+ if (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1)
+ return 0;
- case NAND_CTL_CLRCLE:
- this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
+ usleep_range(10, 100);
+ } while (time_before(jiffies, timeout_jiffies));
+
+ return -ETIMEDOUT;
+}
+
+static int au1550nd_exec_instr(struct nand_chip *this,
+ const struct nand_op_instr *instr)
+{
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ unsigned int i;
+ int ret = 0;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb(instr->ctx.cmd.opcode,
+ ctx->base + MEM_STNAND_CMD);
+ /* Drain the writebuffer */
+ wmb();
break;
- case NAND_CTL_SETALE:
- this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ writeb(instr->ctx.addr.addrs[i],
+ ctx->base + MEM_STNAND_ADDR);
+ /* Drain the writebuffer */
+ wmb();
+ }
break;
- case NAND_CTL_CLRALE:
- this->legacy.IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
- /* FIXME: Nobody knows why this is necessary,
- * but it works only that way */
- udelay(1);
+ case NAND_OP_DATA_IN_INSTR:
+ if ((this->options & NAND_BUSWIDTH_16) &&
+ !instr->ctx.data.force_8bit)
+ au_read_buf16(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ else
+ au_read_buf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
break;
- case NAND_CTL_SETNCE:
- /* assert (force assert) chip enable */
- alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
+ case NAND_OP_DATA_OUT_INSTR:
+ if ((this->options & NAND_BUSWIDTH_16) &&
+ !instr->ctx.data.force_8bit)
+ au_write_buf16(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ else
+ au_write_buf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
break;
- case NAND_CTL_CLRNCE:
- /* deassert chip enable */
- alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
+ case NAND_OP_WAITRDY_INSTR:
+ ret = au1550nd_waitrdy(this, instr->ctx.waitrdy.timeout_ms);
break;
+ default:
+ return -EINVAL;
}
- this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W;
-
- wmb(); /* Drain the writebuffer */
-}
-
-int au1550_device_ready(struct nand_chip *this)
-{
- return (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) ? 1 : 0;
-}
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
-/**
- * au1550_select_chip - control -CE line
- * Forbid driving -CE manually permitting the NAND controller to do this.
- * Keeping -CE asserted during the whole sector reads interferes with the
- * NOR flash and PCMCIA drivers as it causes contention on the static bus.
- * We only have to hold -CE low for the NAND read commands since the flash
- * chip needs it to be asserted during chip not ready time but the NAND
- * controller keeps it released.
- *
- * @this: NAND chip object
- * @chip: chipnumber to select, -1 for deselect
- */
-static void au1550_select_chip(struct nand_chip *this, int chip)
-{
+ return ret;
}
-/**
- * au1550_command - Send command to NAND device
- * @this: NAND chip object
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
- */
-static void au1550_command(struct nand_chip *this, unsigned command,
- int column, int page_addr)
+static int au1550nd_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
{
- struct mtd_info *mtd = nand_to_mtd(this);
- struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx,
- chip);
- int ce_override = 0, i;
- unsigned long flags = 0;
-
- /* Begin command latch cycle */
- au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
- /*
- * Write out the command to the device.
- */
- if (command == NAND_CMD_SEQIN) {
- int readcmd;
-
- if (column >= mtd->writesize) {
- /* OOB area */
- column -= mtd->writesize;
- readcmd = NAND_CMD_READOOB;
- } else if (column < 256) {
- /* First 256 bytes --> READ0 */
- readcmd = NAND_CMD_READ0;
- } else {
- column -= 256;
- readcmd = NAND_CMD_READ1;
- }
- ctx->write_byte(this, readcmd);
- }
- ctx->write_byte(this, command);
+ struct au1550nd_ctx *ctx = chip_to_au_ctx(this);
+ unsigned int i;
+ int ret;
- /* Set ALE and clear CLE to start address cycle */
- au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
+ if (check_only)
+ return 0;
- if (column != -1 || page_addr != -1) {
- au1550_hwcontrol(mtd, NAND_CTL_SETALE);
+ /* assert (force assert) chip enable */
+ alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
+ /* Drain the writebuffer */
+ wmb();
- /* Serially input address */
- if (column != -1) {
- /* Adjust columns for 16 bit buswidth */
- if (this->options & NAND_BUSWIDTH_16 &&
- !nand_opcode_8bits(command))
- column >>= 1;
- ctx->write_byte(this, column);
- }
- if (page_addr != -1) {
- ctx->write_byte(this, (u8)(page_addr & 0xff));
-
- if (command == NAND_CMD_READ0 ||
- command == NAND_CMD_READ1 ||
- command == NAND_CMD_READOOB) {
- /*
- * NAND controller will release -CE after
- * the last address byte is written, so we'll
- * have to forcibly assert it. No interrupts
- * are allowed while we do this as we don't
- * want the NOR flash or PCMCIA drivers to
- * steal our precious bytes of data...
- */
- ce_override = 1;
- local_irq_save(flags);
- au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
- }
-
- ctx->write_byte(this, (u8)(page_addr >> 8));
-
- if (this->options & NAND_ROW_ADDR_3)
- ctx->write_byte(this,
- ((page_addr >> 16) & 0x0f));
- }
- /* Latch in address */
- au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
- }
-
- /*
- * Program and erase have their own busy handlers.
- * Status and sequential in need no delay.
- */
- switch (command) {
-
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_SEQIN:
- case NAND_CMD_STATUS:
- return;
-
- case NAND_CMD_RESET:
- break;
-
- case NAND_CMD_READ0:
- case NAND_CMD_READ1:
- case NAND_CMD_READOOB:
- /* Check if we're really driving -CE low (just in case) */
- if (unlikely(!ce_override))
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = au1550nd_exec_instr(this, &op->instrs[i]);
+ if (ret)
break;
-
- /* Apply a short delay always to ensure that we do wait tWB. */
- ndelay(100);
- /* Wait for a chip to become ready... */
- for (i = this->legacy.chip_delay;
- !this->legacy.dev_ready(this) && i > 0; --i)
- udelay(1);
-
- /* Release -CE and re-enable interrupts. */
- au1550_hwcontrol(mtd, NAND_CTL_CLRNCE);
- local_irq_restore(flags);
- return;
}
- /* Apply this short delay always to ensure that we do wait tWB. */
- ndelay(100);
-
- while(!this->legacy.dev_ready(this));
-}
-static int find_nand_cs(unsigned long nand_base)
-{
- void __iomem *base =
- (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
- unsigned long addr, staddr, start, mask, end;
- int i;
+ /* deassert chip enable */
+ alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
+ /* Drain the writebuffer */
+ wmb();
- for (i = 0; i < 4; i++) {
- addr = 0x1000 + (i * 0x10); /* CSx */
- staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
- /* figure out the decoded range of this CS */
- start = (staddr << 4) & 0xfffc0000;
- mask = (staddr << 18) & 0xfffc0000;
- end = (start | (start - 1)) & ~(start ^ mask);
- if ((nand_base >= start) && (nand_base < end))
- return i;
- }
-
- return -ENODEV;
+ return ret;
}
+static const struct nand_controller_ops au1550nd_ops = {
+ .exec_op = au1550nd_exec_op,
+};
+
static int au1550nd_probe(struct platform_device *pdev)
{
struct au1550nd_platdata *pd;
@@ -424,23 +291,15 @@ static int au1550nd_probe(struct platform_device *pdev)
}
ctx->cs = cs;
- this->legacy.dev_ready = au1550_device_ready;
- this->legacy.select_chip = au1550_select_chip;
- this->legacy.cmdfunc = au1550_command;
-
- /* 30 us command delay time */
- this->legacy.chip_delay = 30;
+ nand_controller_init(&ctx->controller);
+ ctx->controller.ops = &au1550nd_ops;
+ this->controller = &ctx->controller;
this->ecc.mode = NAND_ECC_SOFT;
this->ecc.algo = NAND_ECC_HAMMING;
if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
- this->legacy.read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
- ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
- this->legacy.write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
- this->legacy.read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
-
ret = nand_scan(this, 1);
if (ret) {
dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
@@ -466,8 +325,12 @@ static int au1550nd_remove(struct platform_device *pdev)
{
struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct nand_chip *chip = &ctx->chip;
+ int ret;
- nand_release(&ctx->chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
iounmap(ctx->base);
release_mem_region(r->start, 0x1000);
kfree(ctx);
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/main.c b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
index 8dae97c1dbe7..dcc70d9dc6e5 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/main.c
@@ -60,8 +60,12 @@ static int bcm47xxnflash_probe(struct platform_device *pdev)
static int bcm47xxnflash_remove(struct platform_device *pdev)
{
struct bcm47xxnflash *nflash = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &nflash->nand_chip;
+ int ret;
- nand_release(&nflash->nand_chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 8f9ffb46a09f..44068e9eea03 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -4,7 +4,6 @@
*/
#include <linux/clk.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -264,6 +263,7 @@ struct brcmnand_controller {
const unsigned int *block_sizes;
unsigned int max_page_size;
const unsigned int *page_sizes;
+ unsigned int page_size_shift;
unsigned int max_oob;
u32 features;
@@ -338,8 +338,38 @@ enum brcmnand_reg {
BRCMNAND_FC_BASE,
};
-/* BRCMNAND v4.0 */
-static const u16 brcmnand_regs_v40[] = {
+/* BRCMNAND v2.1-v2.2 */
+static const u16 brcmnand_regs_v21[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x5c,
+ [BRCMNAND_CS_SELECT] = 0x14,
+ [BRCMNAND_CS_XOR] = 0x18,
+ [BRCMNAND_LL_OP] = 0,
+ [BRCMNAND_CS0_BASE] = 0x40,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
+ [BRCMNAND_UNCORR_COUNT] = 0,
+ [BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x60,
+ [BRCMNAND_CORR_ADDR] = 0x64,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x68,
+ [BRCMNAND_UNCORR_ADDR] = 0x6c,
+ [BRCMNAND_SEMAPHORE] = 0x50,
+ [BRCMNAND_ID] = 0x54,
+ [BRCMNAND_ID_EXT] = 0,
+ [BRCMNAND_LL_RDATA] = 0,
+ [BRCMNAND_OOB_READ_BASE] = 0x20,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x30,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x200,
+};
+
+/* BRCMNAND v3.3-v4.0 */
+static const u16 brcmnand_regs_v33[] = {
[BRCMNAND_CMD_START] = 0x04,
[BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
[BRCMNAND_CMD_ADDRESS] = 0x0c,
@@ -536,6 +566,9 @@ enum {
CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT),
CFG_DEVICE_SIZE_SHIFT = 24,
+ /* Only for v2.1 */
+ CFG_PAGE_SIZE_SHIFT_v2_1 = 30,
+
/* Only for pre-v7.1 (with no CFG_EXT register) */
CFG_PAGE_SIZE_SHIFT = 20,
CFG_BLK_SIZE_SHIFT = 28,
@@ -571,12 +604,16 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
{
static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
- static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
+ static const unsigned int block_sizes_v2_2[] = { 16, 128, 8, 512, 256, 0 };
+ static const unsigned int block_sizes_v2_1[] = { 16, 128, 8, 512, 0 };
+ static const unsigned int page_sizes_v3_4[] = { 512, 2048, 4096, 8192, 0 };
+ static const unsigned int page_sizes_v2_2[] = { 512, 2048, 4096, 0 };
+ static const unsigned int page_sizes_v2_1[] = { 512, 2048, 0 };
ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
- /* Only support v4.0+? */
- if (ctrl->nand_version < 0x0400) {
+ /* Only support v2.1+ */
+ if (ctrl->nand_version < 0x0201) {
dev_err(ctrl->dev, "version %#x not supported\n",
ctrl->nand_version);
return -ENODEV;
@@ -591,8 +628,10 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
ctrl->reg_offsets = brcmnand_regs_v60;
else if (ctrl->nand_version >= 0x0500)
ctrl->reg_offsets = brcmnand_regs_v50;
- else if (ctrl->nand_version >= 0x0400)
- ctrl->reg_offsets = brcmnand_regs_v40;
+ else if (ctrl->nand_version >= 0x0303)
+ ctrl->reg_offsets = brcmnand_regs_v33;
+ else if (ctrl->nand_version >= 0x0201)
+ ctrl->reg_offsets = brcmnand_regs_v21;
/* Chip-select stride */
if (ctrl->nand_version >= 0x0701)
@@ -606,8 +645,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
} else {
ctrl->cs_offsets = brcmnand_cs_offsets;
- /* v5.0 and earlier has a different CS0 offset layout */
- if (ctrl->nand_version <= 0x0500)
+ /* v3.3-5.0 have a different CS0 offset layout */
+ if (ctrl->nand_version >= 0x0303 &&
+ ctrl->nand_version <= 0x0500)
ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
}
@@ -617,14 +657,32 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
ctrl->max_page_size = 16 * 1024;
ctrl->max_block_size = 2 * 1024 * 1024;
} else {
- ctrl->page_sizes = page_sizes;
+ if (ctrl->nand_version >= 0x0304)
+ ctrl->page_sizes = page_sizes_v3_4;
+ else if (ctrl->nand_version >= 0x0202)
+ ctrl->page_sizes = page_sizes_v2_2;
+ else
+ ctrl->page_sizes = page_sizes_v2_1;
+
+ if (ctrl->nand_version >= 0x0202)
+ ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT;
+ else
+ ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1;
+
if (ctrl->nand_version >= 0x0600)
ctrl->block_sizes = block_sizes_v6;
- else
+ else if (ctrl->nand_version >= 0x0400)
ctrl->block_sizes = block_sizes_v4;
+ else if (ctrl->nand_version >= 0x0202)
+ ctrl->block_sizes = block_sizes_v2_2;
+ else
+ ctrl->block_sizes = block_sizes_v2_1;
if (ctrl->nand_version < 0x0400) {
- ctrl->max_page_size = 4096;
+ if (ctrl->nand_version < 0x0202)
+ ctrl->max_page_size = 2048;
+ else
+ ctrl->max_page_size = 4096;
ctrl->max_block_size = 512 * 1024;
}
}
@@ -810,6 +868,9 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
int cs = host->cs;
+ if (!ctrl->reg_offsets[reg])
+ return;
+
if (ctrl->nand_version == 0x0702)
bits = 7;
else if (ctrl->nand_version >= 0x0600)
@@ -868,8 +929,10 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
return GENMASK(7, 0);
else if (ctrl->nand_version >= 0x0600)
return GENMASK(6, 0);
- else
+ else if (ctrl->nand_version >= 0x0303)
return GENMASK(5, 0);
+ else
+ return GENMASK(4, 0);
}
#define NAND_ACC_CONTROL_ECC_SHIFT 16
@@ -1100,30 +1163,30 @@ static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
struct brcmnand_cfg *cfg = &host->hwcfg;
int sas = cfg->spare_area_size << cfg->sector_size_1k;
int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+ u32 next;
- if (section >= sectors * 2)
+ if (section > sectors)
return -ERANGE;
- oobregion->offset = (section / 2) * sas;
+ next = (section * sas);
+ if (section < sectors)
+ next += 6;
- if (section & 1) {
- oobregion->offset += 9;
- oobregion->length = 7;
+ if (section) {
+ oobregion->offset = ((section - 1) * sas) + 9;
} else {
- oobregion->length = 6;
-
- /* First sector of each page may have BBI */
- if (!section) {
- /*
- * Small-page NAND use byte 6 for BBI while large-page
- * NAND use byte 0.
- */
- if (cfg->page_size > 512)
- oobregion->offset++;
- oobregion->length--;
+ if (cfg->page_size > 512) {
+ /* Large page NAND uses first 2 bytes for BBI */
+ oobregion->offset = 2;
+ } else {
+ /* Small page NAND uses last byte before ECC for BBI */
+ oobregion->offset = 0;
+ next--;
}
}
+ oobregion->length = next - oobregion->offset;
+
return 0;
}
@@ -2018,28 +2081,31 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
struct nand_chip *chip, void *buf, u64 addr)
{
- int i, sas;
- void *oob = chip->oob_poi;
+ struct mtd_oob_region ecc;
+ int i;
int bitflips = 0;
int page = addr >> chip->page_shift;
int ret;
+ void *ecc_bytes;
void *ecc_chunk;
if (!buf)
buf = nand_get_data_buf(chip);
- sas = mtd->oobsize / chip->ecc.steps;
-
/* read without ecc for verification */
ret = chip->ecc.read_page_raw(chip, buf, true, page);
if (ret)
return ret;
- for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
+ for (i = 0; i < chip->ecc.steps; i++) {
ecc_chunk = buf + chip->ecc.size * i;
- ret = nand_check_erased_ecc_chunk(ecc_chunk,
- chip->ecc.size,
- oob, sas, NULL, 0,
+
+ mtd_ooblayout_ecc(mtd, i, &ecc);
+ ecc_bytes = chip->oob_poi + ecc.offset;
+
+ ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size,
+ ecc_bytes, ecc.length,
+ NULL, 0,
chip->ecc.strength);
if (ret < 0)
return ret;
@@ -2377,7 +2443,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
(!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
(device_size << CFG_DEVICE_SIZE_SHIFT);
if (cfg_offs == cfg_ext_offs) {
- tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) |
+ tmp |= (page_size << ctrl->page_size_shift) |
(block_size << CFG_BLK_SIZE_SHIFT);
nand_writereg(ctrl, cfg_offs, tmp);
} else {
@@ -2389,9 +2455,11 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
tmp = nand_readreg(ctrl, acc_control_offs);
tmp &= ~brcmnand_ecc_level_mask(ctrl);
- tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
tmp &= ~brcmnand_spare_area_mask(ctrl);
- tmp |= cfg->spare_area_size;
+ if (ctrl->nand_version >= 0x0302) {
+ tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
+ tmp |= cfg->spare_area_size;
+ }
nand_writereg(ctrl, acc_control_offs, tmp);
brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
@@ -2577,7 +2645,7 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
* to/from, and have nand_base pass us a bounce buffer instead, as
* needed.
*/
- chip->options |= NAND_USE_BOUNCE_BUFFER;
+ chip->options |= NAND_USES_DMA;
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
@@ -2764,6 +2832,8 @@ const struct dev_pm_ops brcmnand_pm_ops = {
EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
static const struct of_device_id brcmnand_of_match[] = {
+ { .compatible = "brcm,brcmnand-v2.1" },
+ { .compatible = "brcm,brcmnand-v2.2" },
{ .compatible = "brcm,brcmnand-v4.0" },
{ .compatible = "brcm,brcmnand-v5.0" },
{ .compatible = "brcm,brcmnand-v6.0" },
@@ -3045,9 +3115,15 @@ int brcmnand_remove(struct platform_device *pdev)
{
struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
struct brcmnand_host *host;
+ struct nand_chip *chip;
+ int ret;
- list_for_each_entry(host, &ctrl->host_list, node)
- nand_release(&host->chip);
+ list_for_each_entry(host, &ctrl->host_list, node) {
+ chip = &host->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ }
clk_disable_unprepare(ctrl->clk);
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index efddc5c68afb..c405722adfe1 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -2223,10 +2223,12 @@ static int cadence_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
- int status = cadence_nand_select_target(chip);
+ if (!check_only) {
+ int status = cadence_nand_select_target(chip);
- if (status)
- return status;
+ if (status)
+ return status;
+ }
return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
check_only);
@@ -2592,7 +2594,7 @@ cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr,
return 0;
}
-int cadence_nand_attach_chip(struct nand_chip *chip)
+static int cadence_nand_attach_chip(struct nand_chip *chip)
{
struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
@@ -2778,9 +2780,14 @@ static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
{
struct cdns_nand_chip *entry, *temp;
+ struct nand_chip *chip;
+ int ret;
list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
- nand_release(&entry->chip);
+ chip = &entry->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
list_del(&entry->node);
}
}
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index 2d1c22dc88c1..92173790f20b 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -546,11 +546,6 @@ static int cafe_nand_write_page_lowlevel(struct nand_chip *chip,
return nand_prog_page_end_op(chip);
}
-static int cafe_nand_block_bad(struct nand_chip *chip, loff_t ofs)
-{
- return 0;
-}
-
/* F_2[X]/(X**6+X+1) */
static unsigned short gf64_mul(u8 a, u8 b)
{
@@ -718,10 +713,8 @@ static int cafe_nand_probe(struct pci_dev *pdev,
/* Enable the following for a flash based bad block table */
cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
- if (skipbbt) {
- cafe->nand.options |= NAND_SKIP_BBTSCAN;
- cafe->nand.legacy.block_bad = cafe_nand_block_bad;
- }
+ if (skipbbt)
+ cafe->nand.options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
if (numtimings && numtimings != 3) {
dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings);
@@ -814,11 +807,14 @@ static void cafe_nand_remove(struct pci_dev *pdev)
struct mtd_info *mtd = pci_get_drvdata(pdev);
struct nand_chip *chip = mtd_to_nand(mtd);
struct cafe_priv *cafe = nand_get_controller_data(chip);
+ int ret;
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
free_irq(pdev->irq, mtd);
- nand_release(chip);
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
diff --git a/drivers/mtd/nand/raw/cmx270_nand.c b/drivers/mtd/nand/raw/cmx270_nand.c
deleted file mode 100644
index 045b6175ae79..000000000000
--- a/drivers/mtd/nand/raw/cmx270_nand.c
+++ /dev/null
@@ -1,236 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2006 Compulab, Ltd.
- * Mike Rapoport <mike@compulab.co.il>
- *
- * Derived from drivers/mtd/nand/h1910.c (removed in v3.10)
- * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * CM-X270 board.
- */
-
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/module.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-
-#include <mach/pxa2xx-regs.h>
-
-#define GPIO_NAND_CS (11)
-#define GPIO_NAND_RB (89)
-
-/* MTD structure for CM-X270 board */
-static struct mtd_info *cmx270_nand_mtd;
-
-/* remaped IO address of the device */
-static void __iomem *cmx270_nand_io;
-
-/*
- * Define static partitions for flash device
- */
-static const struct mtd_partition partition_info[] = {
- [0] = {
- .name = "cmx270-0",
- .offset = 0,
- .size = MTDPART_SIZ_FULL
- }
-};
-#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
-
-static u_char cmx270_read_byte(struct nand_chip *this)
-{
- return (readl(this->legacy.IO_ADDR_R) >> 16);
-}
-
-static void cmx270_write_buf(struct nand_chip *this, const u_char *buf,
- int len)
-{
- int i;
-
- for (i=0; i<len; i++)
- writel((*buf++ << 16), this->legacy.IO_ADDR_W);
-}
-
-static void cmx270_read_buf(struct nand_chip *this, u_char *buf, int len)
-{
- int i;
-
- for (i=0; i<len; i++)
- *buf++ = readl(this->legacy.IO_ADDR_R) >> 16;
-}
-
-static inline void nand_cs_on(void)
-{
- gpio_set_value(GPIO_NAND_CS, 0);
-}
-
-static void nand_cs_off(void)
-{
- dsb();
-
- gpio_set_value(GPIO_NAND_CS, 1);
-}
-
-/*
- * hardware specific access to control-lines
- */
-static void cmx270_hwcontrol(struct nand_chip *this, int dat,
- unsigned int ctrl)
-{
- unsigned int nandaddr = (unsigned int)this->legacy.IO_ADDR_W;
-
- dsb();
-
- if (ctrl & NAND_CTRL_CHANGE) {
- if ( ctrl & NAND_ALE )
- nandaddr |= (1 << 3);
- else
- nandaddr &= ~(1 << 3);
- if ( ctrl & NAND_CLE )
- nandaddr |= (1 << 2);
- else
- nandaddr &= ~(1 << 2);
- if ( ctrl & NAND_NCE )
- nand_cs_on();
- else
- nand_cs_off();
- }
-
- dsb();
- this->legacy.IO_ADDR_W = (void __iomem*)nandaddr;
- if (dat != NAND_CMD_NONE)
- writel((dat << 16), this->legacy.IO_ADDR_W);
-
- dsb();
-}
-
-/*
- * read device ready pin
- */
-static int cmx270_device_ready(struct nand_chip *this)
-{
- dsb();
-
- return (gpio_get_value(GPIO_NAND_RB));
-}
-
-/*
- * Main initialization routine
- */
-static int __init cmx270_init(void)
-{
- struct nand_chip *this;
- int ret;
-
- if (!(machine_is_armcore() && cpu_is_pxa27x()))
- return -ENODEV;
-
- ret = gpio_request(GPIO_NAND_CS, "NAND CS");
- if (ret) {
- pr_warn("CM-X270: failed to request NAND CS gpio\n");
- return ret;
- }
-
- gpio_direction_output(GPIO_NAND_CS, 1);
-
- ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
- if (ret) {
- pr_warn("CM-X270: failed to request NAND R/B gpio\n");
- goto err_gpio_request;
- }
-
- gpio_direction_input(GPIO_NAND_RB);
-
- /* Allocate memory for MTD device structure and private data */
- this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
- if (!this) {
- ret = -ENOMEM;
- goto err_kzalloc;
- }
-
- cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
- if (!cmx270_nand_io) {
- pr_debug("Unable to ioremap NAND device\n");
- ret = -EINVAL;
- goto err_ioremap;
- }
-
- cmx270_nand_mtd = nand_to_mtd(this);
-
- /* Link the private data with the MTD structure */
- cmx270_nand_mtd->owner = THIS_MODULE;
-
- /* insert callbacks */
- this->legacy.IO_ADDR_R = cmx270_nand_io;
- this->legacy.IO_ADDR_W = cmx270_nand_io;
- this->legacy.cmd_ctrl = cmx270_hwcontrol;
- this->legacy.dev_ready = cmx270_device_ready;
-
- /* 15 us command delay time */
- this->legacy.chip_delay = 20;
- this->ecc.mode = NAND_ECC_SOFT;
- this->ecc.algo = NAND_ECC_HAMMING;
-
- /* read/write functions */
- this->legacy.read_byte = cmx270_read_byte;
- this->legacy.read_buf = cmx270_read_buf;
- this->legacy.write_buf = cmx270_write_buf;
-
- /* Scan to find existence of the device */
- ret = nand_scan(this, 1);
- if (ret) {
- pr_notice("No NAND device\n");
- goto err_scan;
- }
-
- /* Register the partitions */
- ret = mtd_device_register(cmx270_nand_mtd, partition_info,
- NUM_PARTITIONS);
- if (ret)
- goto err_scan;
-
- /* Return happy */
- return 0;
-
-err_scan:
- iounmap(cmx270_nand_io);
-err_ioremap:
- kfree(this);
-err_kzalloc:
- gpio_free(GPIO_NAND_RB);
-err_gpio_request:
- gpio_free(GPIO_NAND_CS);
-
- return ret;
-
-}
-module_init(cmx270_init);
-
-/*
- * Clean up routine
- */
-static void __exit cmx270_cleanup(void)
-{
- /* Release resources, unregister device */
- nand_release(mtd_to_nand(cmx270_nand_mtd));
-
- gpio_free(GPIO_NAND_RB);
- gpio_free(GPIO_NAND_CS);
-
- iounmap(cmx270_nand_io);
-
- kfree(mtd_to_nand(cmx270_nand_mtd));
-}
-module_exit(cmx270_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
-MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module");
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index e2322cee3229..9472bf798ed5 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -21,9 +21,9 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
+#include <linux/iopoll.h>
#include <asm/msr.h>
-#include <asm/io.h>
#define NR_CS553X_CONTROLLERS 4
@@ -89,76 +89,151 @@
#define CS_NAND_ECC_CLRECC (1<<1)
#define CS_NAND_ECC_ENECC (1<<0)
-static void cs553x_read_buf(struct nand_chip *this, u_char *buf, int len)
+struct cs553x_nand_controller {
+ struct nand_controller base;
+ struct nand_chip chip;
+ void __iomem *mmio;
+};
+
+static struct cs553x_nand_controller *
+to_cs553x(struct nand_controller *controller)
+{
+ return container_of(controller, struct cs553x_nand_controller, base);
+}
+
+static int cs553x_write_ctrl_byte(struct cs553x_nand_controller *cs553x,
+ u32 ctl, u8 data)
{
+ u8 status;
+ int ret;
+
+ writeb(ctl, cs553x->mmio + MM_NAND_CTL);
+ writeb(data, cs553x->mmio + MM_NAND_IO);
+ ret = readb_poll_timeout_atomic(cs553x->mmio + MM_NAND_STS, status,
+ !(status & CS_NAND_CTLR_BUSY), 1,
+ 100000);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void cs553x_data_in(struct cs553x_nand_controller *cs553x, void *buf,
+ unsigned int len)
+{
+ writeb(0, cs553x->mmio + MM_NAND_CTL);
while (unlikely(len > 0x800)) {
- memcpy_fromio(buf, this->legacy.IO_ADDR_R, 0x800);
+ memcpy_fromio(buf, cs553x->mmio, 0x800);
buf += 0x800;
len -= 0x800;
}
- memcpy_fromio(buf, this->legacy.IO_ADDR_R, len);
+ memcpy_fromio(buf, cs553x->mmio, len);
}
-static void cs553x_write_buf(struct nand_chip *this, const u_char *buf, int len)
+static void cs553x_data_out(struct cs553x_nand_controller *cs553x,
+ const void *buf, unsigned int len)
{
+ writeb(0, cs553x->mmio + MM_NAND_CTL);
while (unlikely(len > 0x800)) {
- memcpy_toio(this->legacy.IO_ADDR_R, buf, 0x800);
+ memcpy_toio(cs553x->mmio, buf, 0x800);
buf += 0x800;
len -= 0x800;
}
- memcpy_toio(this->legacy.IO_ADDR_R, buf, len);
+ memcpy_toio(cs553x->mmio, buf, len);
}
-static unsigned char cs553x_read_byte(struct nand_chip *this)
+static int cs553x_wait_ready(struct cs553x_nand_controller *cs553x,
+ unsigned int timeout_ms)
{
- return readb(this->legacy.IO_ADDR_R);
+ u8 mask = CS_NAND_CTLR_BUSY | CS_NAND_STS_FLASH_RDY;
+ u8 status;
+
+ return readb_poll_timeout(cs553x->mmio + MM_NAND_STS, status,
+ (status & mask) == CS_NAND_STS_FLASH_RDY, 100,
+ timeout_ms * 1000);
}
-static void cs553x_write_byte(struct nand_chip *this, u_char byte)
+static int cs553x_exec_instr(struct cs553x_nand_controller *cs553x,
+ const struct nand_op_instr *instr)
{
- int i = 100000;
+ unsigned int i;
+ int ret = 0;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_CLE,
+ instr->ctx.cmd.opcode);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ ret = cs553x_write_ctrl_byte(cs553x, CS_NAND_CTL_ALE,
+ instr->ctx.addr.addrs[i]);
+ if (ret)
+ break;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ cs553x_data_in(cs553x, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ cs553x_data_out(cs553x, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
- while (i && readb(this->legacy.IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) {
- udelay(1);
- i--;
+ case NAND_OP_WAITRDY_INSTR:
+ ret = cs553x_wait_ready(cs553x, instr->ctx.waitrdy.timeout_ms);
+ break;
}
- writeb(byte, this->legacy.IO_ADDR_W + 0x801);
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return ret;
}
-static void cs553x_hwcontrol(struct nand_chip *this, int cmd,
- unsigned int ctrl)
+static int cs553x_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
{
- void __iomem *mmio_base = this->legacy.IO_ADDR_R;
- if (ctrl & NAND_CTRL_CHANGE) {
- unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01;
- writeb(ctl, mmio_base + MM_NAND_CTL);
+ struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
+ unsigned int i;
+ int ret;
+
+ if (check_only)
+ return true;
+
+ /* De-assert the CE pin */
+ writeb(0, cs553x->mmio + MM_NAND_CTL);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = cs553x_exec_instr(cs553x, &op->instrs[i]);
+ if (ret)
+ break;
}
- if (cmd != NAND_CMD_NONE)
- cs553x_write_byte(this, cmd);
-}
-static int cs553x_device_ready(struct nand_chip *this)
-{
- void __iomem *mmio_base = this->legacy.IO_ADDR_R;
- unsigned char foo = readb(mmio_base + MM_NAND_STS);
+ /* Re-assert the CE pin. */
+ writeb(CS_NAND_CTL_CE, cs553x->mmio + MM_NAND_CTL);
- return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY);
+ return ret;
}
static void cs_enable_hwecc(struct nand_chip *this, int mode)
{
- void __iomem *mmio_base = this->legacy.IO_ADDR_R;
+ struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
- writeb(0x07, mmio_base + MM_NAND_ECC_CTL);
+ writeb(0x07, cs553x->mmio + MM_NAND_ECC_CTL);
}
static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
u_char *ecc_code)
{
+ struct cs553x_nand_controller *cs553x = to_cs553x(this->controller);
uint32_t ecc;
- void __iomem *mmio_base = this->legacy.IO_ADDR_R;
- ecc = readl(mmio_base + MM_NAND_STS);
+ ecc = readl(cs553x->mmio + MM_NAND_STS);
ecc_code[1] = ecc >> 8;
ecc_code[0] = ecc >> 16;
@@ -166,10 +241,15 @@ static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
return 0;
}
-static struct mtd_info *cs553x_mtd[4];
+static struct cs553x_nand_controller *controllers[4];
+
+static const struct nand_controller_ops cs553x_nand_controller_ops = {
+ .exec_op = cs553x_exec_op,
+};
static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
{
+ struct cs553x_nand_controller *controller;
int err = 0;
struct nand_chip *this;
struct mtd_info *new_mtd;
@@ -183,33 +263,29 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
}
/* Allocate memory for MTD device structure and private data */
- this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
- if (!this) {
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ if (!controller) {
err = -ENOMEM;
goto out;
}
+ this = &controller->chip;
+ nand_controller_init(&controller->base);
+ controller->base.ops = &cs553x_nand_controller_ops;
+ this->controller = &controller->base;
new_mtd = nand_to_mtd(this);
/* Link the private data with the MTD structure */
new_mtd->owner = THIS_MODULE;
/* map physical address */
- this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = ioremap(adr, 4096);
- if (!this->legacy.IO_ADDR_R) {
+ controller->mmio = ioremap(adr, 4096);
+ if (!controller->mmio) {
pr_warn("ioremap cs553x NAND @0x%08lx failed\n", adr);
err = -EIO;
goto out_mtd;
}
- this->legacy.cmd_ctrl = cs553x_hwcontrol;
- this->legacy.dev_ready = cs553x_device_ready;
- this->legacy.read_byte = cs553x_read_byte;
- this->legacy.read_buf = cs553x_read_buf;
- this->legacy.write_buf = cs553x_write_buf;
-
- this->legacy.chip_delay = 0;
-
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 256;
this->ecc.bytes = 3;
@@ -232,15 +308,15 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
if (err)
goto out_free;
- cs553x_mtd[cs] = new_mtd;
+ controllers[cs] = controller;
goto out;
out_free:
kfree(new_mtd->name);
out_ior:
- iounmap(this->legacy.IO_ADDR_R);
+ iounmap(controller->mmio);
out_mtd:
- kfree(this);
+ kfree(controller);
out:
return err;
}
@@ -295,9 +371,10 @@ static int __init cs553x_init(void)
/* Register all devices together here. This means we can easily hack it to
do mtdconcat etc. if we want to. */
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
- if (cs553x_mtd[i]) {
+ if (controllers[i]) {
/* If any devices registered, return success. Else the last error. */
- mtd_device_register(cs553x_mtd[i], NULL, 0);
+ mtd_device_register(nand_to_mtd(&controllers[i]->chip),
+ NULL, 0);
err = 0;
}
}
@@ -312,26 +389,26 @@ static void __exit cs553x_cleanup(void)
int i;
for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
- struct mtd_info *mtd = cs553x_mtd[i];
- struct nand_chip *this;
- void __iomem *mmio_base;
+ struct cs553x_nand_controller *controller = controllers[i];
+ struct nand_chip *this = &controller->chip;
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int ret;
if (!mtd)
continue;
- this = mtd_to_nand(mtd);
- mmio_base = this->legacy.IO_ADDR_R;
-
/* Release resources, unregister device */
- nand_release(this);
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(this);
kfree(mtd->name);
- cs553x_mtd[i] = NULL;
+ controllers[i] = NULL;
/* unmap physical address */
- iounmap(mmio_base);
+ iounmap(controller->mmio);
/* Free the MTD device structure */
- kfree(this);
+ kfree(controller);
}
}
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 25c185bea50c..d975a62caaa5 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -14,7 +14,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
-#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
@@ -38,6 +38,7 @@
* outputs in a "wire-AND" configuration, with no per-chip signals.
*/
struct davinci_nand_info {
+ struct nand_controller controller;
struct nand_chip chip;
struct platform_device *pdev;
@@ -81,46 +82,6 @@ static inline void davinci_nand_writel(struct davinci_nand_info *info,
/*----------------------------------------------------------------------*/
/*
- * Access to hardware control lines: ALE, CLE, secondary chipselect.
- */
-
-static void nand_davinci_hwcontrol(struct nand_chip *nand, int cmd,
- unsigned int ctrl)
-{
- struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand));
- void __iomem *addr = info->current_cs;
-
- /* Did the control lines change? */
- if (ctrl & NAND_CTRL_CHANGE) {
- if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
- addr += info->mask_cle;
- else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
- addr += info->mask_ale;
-
- nand->legacy.IO_ADDR_W = addr;
- }
-
- if (cmd != NAND_CMD_NONE)
- iowrite8(cmd, nand->legacy.IO_ADDR_W);
-}
-
-static void nand_davinci_select_chip(struct nand_chip *nand, int chip)
-{
- struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(nand));
-
- info->current_cs = info->vaddr;
-
- /* maybe kick in a second chipselect */
- if (chip > 0)
- info->current_cs += info->mask_chipsel;
-
- info->chip.legacy.IO_ADDR_W = info->current_cs;
- info->chip.legacy.IO_ADDR_R = info->chip.legacy.IO_ADDR_W;
-}
-
-/*----------------------------------------------------------------------*/
-
-/*
* 1-bit hardware ECC ... context maintained for each core chipselect
*/
@@ -410,48 +371,75 @@ correct:
return corrected;
}
-/*----------------------------------------------------------------------*/
-
-/*
- * NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
- * how these chips are normally wired. This translates to both 8 and 16
- * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
+/**
+ * nand_read_page_hwecc_oob_first - hw ecc, read oob first
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
*
- * For now we assume that configuration, or any other one which ignores
- * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
- * and have that transparently morphed into multiple NAND operations.
+ * Hardware ECC for large page chips, require OOB to be read first. For this
+ * ECC mode, the write_page method is re-used from ECC_HW. These methods
+ * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
+ * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
+ * the data area, by overwriting the NAND manufacturer bad block markings.
*/
-static void nand_davinci_read_buf(struct nand_chip *chip, uint8_t *buf,
- int len)
+static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip,
+ uint8_t *buf,
+ int oob_required, int page)
{
- if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
- ioread32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2);
- else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
- ioread16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1);
- else
- ioread8_rep(chip->legacy.IO_ADDR_R, buf, len);
-}
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ unsigned int max_bitflips = 0;
+
+ /* Read the OOB area first */
+ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ if (ret)
+ return ret;
-static void nand_davinci_write_buf(struct nand_chip *chip, const uint8_t *buf,
- int len)
-{
- if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
- iowrite32_rep(chip->legacy.IO_ADDR_R, buf, len >> 2);
- else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
- iowrite16_rep(chip->legacy.IO_ADDR_R, buf, len >> 1);
- else
- iowrite8_rep(chip->legacy.IO_ADDR_R, buf, len);
-}
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
-/*
- * Check hardware register for wait status. Returns 1 if device is ready,
- * 0 if it is still busy.
- */
-static int nand_davinci_dev_ready(struct nand_chip *chip)
-{
- struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
- return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ &ecc_code[i],
+ eccbytes, NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
}
/*----------------------------------------------------------------------*/
@@ -613,6 +601,13 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
break;
case NAND_ECC_HW:
if (pdata->ecc_bits == 4) {
+ int chunks = mtd->writesize / 512;
+
+ if (!chunks || mtd->oobsize < 16) {
+ dev_dbg(&info->pdev->dev, "too small\n");
+ return -EINVAL;
+ }
+
/*
* No sanity checks: CPUs must support this,
* and the chips may not use NAND_BUSWIDTH_16.
@@ -635,6 +630,26 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
info->chip.ecc.bytes = 10;
info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
info->chip.ecc.algo = NAND_ECC_BCH;
+
+ /*
+ * Update ECC layout if needed ... for 1-bit HW ECC, the
+ * default is OK, but it allocates 6 bytes when only 3
+ * are needed (for each 512 bytes). For 4-bit HW ECC,
+ * the default is not usable: 10 bytes needed, not 6.
+ *
+ * For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ mtd_set_ooblayout(mtd,
+ &hwecc4_small_ooblayout_ops);
+ } else if (chunks == 4 || chunks == 8) {
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ info->chip.ecc.read_page = nand_davinci_read_page_hwecc_oob_first;
+ } else {
+ return -EIO;
+ }
} else {
/* 1bit ecc hamming */
info->chip.ecc.calculate = nand_davinci_calculate_1bit;
@@ -650,39 +665,111 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
return -EINVAL;
}
- /*
- * Update ECC layout if needed ... for 1-bit HW ECC, the default
- * is OK, but it allocates 6 bytes when only 3 are needed (for
- * each 512 bytes). For the 4-bit HW ECC, that default is not
- * usable: 10 bytes are needed, not 6.
- */
- if (pdata->ecc_bits == 4) {
- int chunks = mtd->writesize / 512;
+ return ret;
+}
- if (!chunks || mtd->oobsize < 16) {
- dev_dbg(&info->pdev->dev, "too small\n");
- return -EINVAL;
- }
+static void nand_davinci_data_in(struct davinci_nand_info *info, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ u32 alignment = ((uintptr_t)buf | len) & 3;
- /* For small page chips, preserve the manufacturer's
- * badblock marking data ... and make sure a flash BBT
- * table marker fits in the free bytes.
- */
- if (chunks == 1) {
- mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
- } else if (chunks == 4 || chunks == 8) {
- mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
- info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
- } else {
- return -EIO;
+ if (force_8bit || (alignment & 1))
+ ioread8_rep(info->current_cs, buf, len);
+ else if (alignment & 3)
+ ioread16_rep(info->current_cs, buf, len >> 1);
+ else
+ ioread32_rep(info->current_cs, buf, len >> 2);
+}
+
+static void nand_davinci_data_out(struct davinci_nand_info *info,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ u32 alignment = ((uintptr_t)buf | len) & 3;
+
+ if (force_8bit || (alignment & 1))
+ iowrite8_rep(info->current_cs, buf, len);
+ else if (alignment & 3)
+ iowrite16_rep(info->current_cs, buf, len >> 1);
+ else
+ iowrite32_rep(info->current_cs, buf, len >> 2);
+}
+
+static int davinci_nand_exec_instr(struct davinci_nand_info *info,
+ const struct nand_op_instr *instr)
+{
+ unsigned int i, timeout_us;
+ u32 status;
+ int ret;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ iowrite8(instr->ctx.cmd.opcode,
+ info->current_cs + info->mask_cle);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ iowrite8(instr->ctx.addr.addrs[i],
+ info->current_cs + info->mask_ale);
}
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nand_davinci_data_in(info, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nand_davinci_data_out(info, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
+ ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
+ status, status & BIT(0), 100,
+ timeout_us);
+ if (ret)
+ return ret;
+
+ break;
}
- return ret;
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
+
+ return 0;
+}
+
+static int davinci_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ unsigned int i;
+
+ if (check_only)
+ return 0;
+
+ info->current_cs = info->vaddr + (op->cs * info->mask_chipsel);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ int ret;
+
+ ret = davinci_nand_exec_instr(info, &op->instrs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static const struct nand_controller_ops davinci_nand_controller_ops = {
.attach_chip = davinci_nand_attach_chip,
+ .exec_op = davinci_nand_exec_op,
};
static int nand_davinci_probe(struct platform_device *pdev)
@@ -746,11 +833,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
nand_set_flash_node(&info->chip, pdev->dev.of_node);
- info->chip.legacy.IO_ADDR_R = vaddr;
- info->chip.legacy.IO_ADDR_W = vaddr;
- info->chip.legacy.chip_delay = 0;
- info->chip.legacy.select_chip = nand_davinci_select_chip;
-
/* options such as NAND_BBT_USE_FLASH */
info->chip.bbt_options = pdata->bbt_options;
/* options such as 16-bit widths */
@@ -767,14 +849,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->mask_ale = pdata->mask_ale ? : MASK_ALE;
info->mask_cle = pdata->mask_cle ? : MASK_CLE;
- /* Set address of hardware control function */
- info->chip.legacy.cmd_ctrl = nand_davinci_hwcontrol;
- info->chip.legacy.dev_ready = nand_davinci_dev_ready;
-
- /* Speed up buffer I/O */
- info->chip.legacy.read_buf = nand_davinci_read_buf;
- info->chip.legacy.write_buf = nand_davinci_write_buf;
-
/* Use board-specific ECC config */
info->chip.ecc.mode = pdata->ecc_mode;
@@ -788,7 +862,9 @@ static int nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- info->chip.legacy.dummy_controller.ops = &davinci_nand_controller_ops;
+ nand_controller_init(&info->controller);
+ info->controller.ops = &davinci_nand_controller_ops;
+ info->chip.controller = &info->controller;
ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
@@ -817,13 +893,17 @@ err_cleanup_nand:
static int nand_davinci_remove(struct platform_device *pdev)
{
struct davinci_nand_info *info = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &info->chip;
+ int ret;
spin_lock_irq(&davinci_nand_lock);
if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
- nand_release(&info->chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 6a6c919b2569..4e6e1578aa2d 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -764,6 +764,7 @@ static int denali_write_page(struct nand_chip *chip, const u8 *buf,
static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
const struct nand_data_interface *conf)
{
+ static const unsigned int data_setup_on_host = 10000;
struct denali_controller *denali = to_denali_controller(chip);
struct denali_chip_sel *sel;
const struct nand_sdr_timings *timings;
@@ -796,15 +797,6 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
sel = &to_denali_chip(chip)->sels[chipnr];
- /* tREA -> ACC_CLKS */
- acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
- acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
-
- tmp = ioread32(denali->reg + ACC_CLKS);
- tmp &= ~ACC_CLKS__VALUE;
- tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
- sel->acc_clks = tmp;
-
/* tRWH -> RE_2_WE */
re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
@@ -862,14 +854,45 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
sel->rdwr_en_hi_cnt = tmp;
- /* tRP, tWP -> RDWR_EN_LO_CNT */
+ /*
+ * tREA -> ACC_CLKS
+ * tRP, tWP, tRHOH, tRC, tWC -> RDWR_EN_LO_CNT
+ */
+
+ /*
+ * Determine the minimum of acc_clks to meet the setup timing when
+ * capturing the incoming data.
+ *
+ * The delay on the chip side is well-defined as tREA, but we need to
+ * take additional delay into account. This includes a certain degree
+ * of unknowledge, such as signal propagation delays on the PCB and
+ * in the SoC, load capacity of the I/O pins, etc.
+ */
+ acc_clks = DIV_ROUND_UP(timings->tREA_max + data_setup_on_host, t_x);
+
+ /* Determine the minimum of rdwr_en_lo_cnt from RE#/WE# pulse width */
rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
+
+ /* Extend rdwr_en_lo to meet the data hold timing */
+ rdwr_en_lo = max_t(int, rdwr_en_lo,
+ acc_clks - timings->tRHOH_min / t_x);
+
+ /* Extend rdwr_en_lo to meet the requirement for RE#/WE# cycle time */
rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
t_x);
- rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
+ /* Center the data latch timing for extra safety */
+ acc_clks = (acc_clks + rdwr_en_lo +
+ DIV_ROUND_UP(timings->tRHOH_min, t_x)) / 2;
+ acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
+
+ tmp = ioread32(denali->reg + ACC_CLKS);
+ tmp &= ~ACC_CLKS__VALUE;
+ tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
+ sel->acc_clks = tmp;
+
tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
tmp &= ~RDWR_EN_LO_CNT__VALUE;
tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
@@ -1203,7 +1226,7 @@ int denali_chip_init(struct denali_controller *denali,
mtd->name = "denali-nand";
if (denali->dma_avail) {
- chip->options |= NAND_USE_BOUNCE_BUFFER;
+ chip->options |= NAND_USES_DMA;
chip->buf_align = 16;
}
@@ -1336,10 +1359,17 @@ EXPORT_SYMBOL(denali_init);
void denali_remove(struct denali_controller *denali)
{
- struct denali_chip *dchip;
+ struct denali_chip *dchip, *tmp;
+ struct nand_chip *chip;
+ int ret;
- list_for_each_entry(dchip, &denali->chips, node)
- nand_release(&dchip->chip);
+ list_for_each_entry_safe(dchip, tmp, &denali->chips, node) {
+ chip = &dchip->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&dchip->node);
+ }
denali_disable_irq(denali);
}
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index c2a391ad2c35..43721863a0d8 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -58,6 +58,7 @@ static unsigned long doc_locations[] __initdata = {
static struct mtd_info *doclist = NULL;
struct doc_priv {
+ struct nand_controller base;
void __iomem *virtadr;
unsigned long physadr;
u_char ChipID;
@@ -69,6 +70,7 @@ struct doc_priv {
int mh1_page;
struct rs_control *rs_decoder;
struct mtd_info *nextdoc;
+ bool supports_32b_reads;
/* Handle the last stage of initialization (BBT scan, partitioning) */
int (*late_init)(struct mtd_info *mtd);
@@ -84,10 +86,6 @@ static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
-static void doc200x_hwcontrol(struct nand_chip *this, int cmd,
- unsigned int bitmask);
-static void doc200x_select_chip(struct nand_chip *this, int chip);
-
static int debug = 0;
module_param(debug, int, 0);
@@ -302,20 +300,6 @@ static void doc2000_write_byte(struct nand_chip *this, u_char datum)
WriteDOC(datum, docptr, 2k_CDSN_IO);
}
-static u_char doc2000_read_byte(struct nand_chip *this)
-{
- struct doc_priv *doc = nand_get_controller_data(this);
- void __iomem *docptr = doc->virtadr;
- u_char ret;
-
- ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- ret = ReadDOC(docptr, 2k_CDSN_IO);
- if (debug)
- printk("read_byte returns %02x\n", ret);
- return ret;
-}
-
static void doc2000_writebuf(struct nand_chip *this, const u_char *buf,
int len)
{
@@ -337,33 +321,42 @@ static void doc2000_readbuf(struct nand_chip *this, u_char *buf, int len)
{
struct doc_priv *doc = nand_get_controller_data(this);
void __iomem *docptr = doc->virtadr;
+ u32 *buf32 = (u32 *)buf;
int i;
if (debug)
printk("readbuf of %d bytes: ", len);
- for (i = 0; i < len; i++)
- buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+ if (!doc->supports_32b_reads ||
+ ((((unsigned long)buf) | len) & 3)) {
+ for (i = 0; i < len; i++)
+ buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+ } else {
+ for (i = 0; i < len / 4; i++)
+ buf32[i] = readl(docptr + DoC_2k_CDSN_IO + i);
+ }
}
-static void doc2000_readbuf_dword(struct nand_chip *this, u_char *buf, int len)
+/*
+ * We need our own readid() here because it's called before the NAND chip
+ * has been initialized, and calling nand_op_readid() would lead to a NULL
+ * pointer exception when dereferencing the NAND timings.
+ */
+static void doc200x_readid(struct nand_chip *this, unsigned int cs, u8 *id)
{
- struct doc_priv *doc = nand_get_controller_data(this);
- void __iomem *docptr = doc->virtadr;
- int i;
+ u8 addr = 0;
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READID, 0),
+ NAND_OP_ADDR(1, &addr, 50),
+ NAND_OP_8BIT_DATA_IN(2, id, 0),
+ };
- if (debug)
- printk("readbuf_dword of %d bytes: ", len);
+ struct nand_operation op = NAND_OPERATION(cs, instrs);
- if (unlikely((((unsigned long)buf) | len) & 3)) {
- for (i = 0; i < len; i++) {
- *(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
- }
- } else {
- for (i = 0; i < len; i += 4) {
- *(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
- }
- }
+ if (!id)
+ op.ninstrs--;
+
+ this->controller->ops->exec_op(this, &op, false);
}
static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
@@ -371,20 +364,11 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
uint16_t ret;
+ u8 id[2];
- doc200x_select_chip(this, nr);
- doc200x_hwcontrol(this, NAND_CMD_READID,
- NAND_CTRL_CLE | NAND_CTRL_CHANGE);
- doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
- doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ doc200x_readid(this, nr, id);
- /* We can't use dev_ready here, but at least we wait for the
- * command to complete
- */
- udelay(50);
-
- ret = this->legacy.read_byte(this) << 8;
- ret |= this->legacy.read_byte(this);
+ ret = ((u16)id[0] << 8) | id[1];
if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
/* First chip probe. See if we get same results by 32-bit access */
@@ -394,18 +378,12 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
} ident;
void __iomem *docptr = doc->virtadr;
- doc200x_hwcontrol(this, NAND_CMD_READID,
- NAND_CTRL_CLE | NAND_CTRL_CHANGE);
- doc200x_hwcontrol(this, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
- doc200x_hwcontrol(this, NAND_CMD_NONE,
- NAND_NCE | NAND_CTRL_CHANGE);
-
- udelay(50);
+ doc200x_readid(this, nr, NULL);
ident.dword = readl(docptr + DoC_2k_CDSN_IO);
if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
pr_info("DiskOnChip 2000 responds to DWORD access\n");
- this->legacy.read_buf = &doc2000_readbuf_dword;
+ doc->supports_32b_reads = true;
}
}
@@ -434,20 +412,6 @@ static void __init doc2000_count_chips(struct mtd_info *mtd)
pr_debug("Detected %d chips per floor.\n", i);
}
-static int doc200x_wait(struct nand_chip *this)
-{
- struct doc_priv *doc = nand_get_controller_data(this);
-
- int status;
-
- DoC_WaitReady(doc);
- nand_status_op(this, NULL);
- DoC_WaitReady(doc);
- status = (int)this->legacy.read_byte(this);
-
- return status;
-}
-
static void doc2001_write_byte(struct nand_chip *this, u_char datum)
{
struct doc_priv *doc = nand_get_controller_data(this);
@@ -458,19 +422,6 @@ static void doc2001_write_byte(struct nand_chip *this, u_char datum)
WriteDOC(datum, docptr, WritePipeTerm);
}
-static u_char doc2001_read_byte(struct nand_chip *this)
-{
- struct doc_priv *doc = nand_get_controller_data(this);
- void __iomem *docptr = doc->virtadr;
-
- //ReadDOC(docptr, CDSNSlowIO);
- /* 11.4.5 -- delay twice to allow extended length cycle */
- DoC_Delay(doc, 2);
- ReadDOC(docptr, ReadPipeInit);
- //return ReadDOC(docptr, Mil_CDSN_IO);
- return ReadDOC(docptr, LastDataRead);
-}
-
static void doc2001_writebuf(struct nand_chip *this, const u_char *buf, int len)
{
struct doc_priv *doc = nand_get_controller_data(this);
@@ -499,20 +450,6 @@ static void doc2001_readbuf(struct nand_chip *this, u_char *buf, int len)
buf[i] = ReadDOC(docptr, LastDataRead);
}
-static u_char doc2001plus_read_byte(struct nand_chip *this)
-{
- struct doc_priv *doc = nand_get_controller_data(this);
- void __iomem *docptr = doc->virtadr;
- u_char ret;
-
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ReadDOC(docptr, Mplus_ReadPipeInit);
- ret = ReadDOC(docptr, Mplus_LastDataRead);
- if (debug)
- printk("read_byte returns %02x\n", ret);
- return ret;
-}
-
static void doc2001plus_writebuf(struct nand_chip *this, const u_char *buf, int len)
{
struct doc_priv *doc = nand_get_controller_data(this);
@@ -550,9 +487,12 @@ static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len)
}
/* Terminate read pipeline */
- buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
- if (debug && i < 16)
- printk("%02x ", buf[len - 2]);
+ if (len >= 2) {
+ buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len - 2]);
+ }
+
buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
if (debug && i < 16)
printk("%02x ", buf[len - 1]);
@@ -560,226 +500,163 @@ static void doc2001plus_readbuf(struct nand_chip *this, u_char *buf, int len)
printk("\n");
}
-static void doc2001plus_select_chip(struct nand_chip *this, int chip)
+static void doc200x_write_control(struct doc_priv *doc, u8 value)
+{
+ WriteDOC(value, doc->virtadr, CDSNControl);
+ /* 11.4.3 -- 4 NOPs after CSDNControl write */
+ DoC_Delay(doc, 4);
+}
+
+static void doc200x_exec_instr(struct nand_chip *this,
+ const struct nand_op_instr *instr)
{
struct doc_priv *doc = nand_get_controller_data(this);
- void __iomem *docptr = doc->virtadr;
- int floor = 0;
+ unsigned int i;
- if (debug)
- printk("select chip (%d)\n", chip);
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_CLE);
+ doc2000_write_byte(this, instr->ctx.cmd.opcode);
+ break;
- if (chip == -1) {
- /* Disable flash internally */
- WriteDOC(0, docptr, Mplus_FlashSelect);
- return;
- }
+ case NAND_OP_ADDR_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE | CDSN_CTRL_ALE);
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ u8 addr = instr->ctx.addr.addrs[i];
- floor = chip / doc->chips_per_floor;
- chip -= (floor * doc->chips_per_floor);
+ if (DoC_is_2000(doc))
+ doc2000_write_byte(this, addr);
+ else
+ doc2001_write_byte(this, addr);
+ }
+ break;
- /* Assert ChipEnable and deassert WriteProtect */
- WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
- nand_reset_op(this);
+ case NAND_OP_DATA_IN_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE);
+ if (DoC_is_2000(doc))
+ doc2000_readbuf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ else
+ doc2001_readbuf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
- doc->curchip = chip;
- doc->curfloor = floor;
+ case NAND_OP_DATA_OUT_INSTR:
+ doc200x_write_control(doc, CDSN_CTRL_CE);
+ if (DoC_is_2000(doc))
+ doc2000_writebuf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ else
+ doc2001_writebuf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ DoC_WaitReady(doc);
+ break;
+ }
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
}
-static void doc200x_select_chip(struct nand_chip *this, int chip)
+static int doc200x_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
{
struct doc_priv *doc = nand_get_controller_data(this);
- void __iomem *docptr = doc->virtadr;
- int floor = 0;
+ unsigned int i;
- if (debug)
- printk("select chip (%d)\n", chip);
+ if (check_only)
+ return true;
- if (chip == -1)
- return;
+ doc->curchip = op->cs % doc->chips_per_floor;
+ doc->curfloor = op->cs / doc->chips_per_floor;
- floor = chip / doc->chips_per_floor;
- chip -= (floor * doc->chips_per_floor);
+ WriteDOC(doc->curfloor, doc->virtadr, FloorSelect);
+ WriteDOC(doc->curchip, doc->virtadr, CDSNDeviceSelect);
- /* 11.4.4 -- deassert CE before changing chip */
- doc200x_hwcontrol(this, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ /* Assert CE pin */
+ doc200x_write_control(doc, CDSN_CTRL_CE);
- WriteDOC(floor, docptr, FloorSelect);
- WriteDOC(chip, docptr, CDSNDeviceSelect);
+ for (i = 0; i < op->ninstrs; i++)
+ doc200x_exec_instr(this, &op->instrs[i]);
- doc200x_hwcontrol(this, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ /* De-assert CE pin */
+ doc200x_write_control(doc, 0);
- doc->curchip = chip;
- doc->curfloor = floor;
+ return 0;
}
-#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE)
-
-static void doc200x_hwcontrol(struct nand_chip *this, int cmd,
- unsigned int ctrl)
+static void doc2001plus_write_pipe_term(struct doc_priv *doc)
{
- struct doc_priv *doc = nand_get_controller_data(this);
- void __iomem *docptr = doc->virtadr;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- doc->CDSNControl &= ~CDSN_CTRL_MSK;
- doc->CDSNControl |= ctrl & CDSN_CTRL_MSK;
- if (debug)
- printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
- WriteDOC(doc->CDSNControl, docptr, CDSNControl);
- /* 11.4.3 -- 4 NOPs after CSDNControl write */
- DoC_Delay(doc, 4);
- }
- if (cmd != NAND_CMD_NONE) {
- if (DoC_is_2000(doc))
- doc2000_write_byte(this, cmd);
- else
- doc2001_write_byte(this, cmd);
- }
+ WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
+ WriteDOC(0x00, doc->virtadr, Mplus_WritePipeTerm);
}
-static void doc2001plus_command(struct nand_chip *this, unsigned command,
- int column, int page_addr)
+static void doc2001plus_exec_instr(struct nand_chip *this,
+ const struct nand_op_instr *instr)
{
- struct mtd_info *mtd = nand_to_mtd(this);
struct doc_priv *doc = nand_get_controller_data(this);
- void __iomem *docptr = doc->virtadr;
+ unsigned int i;
- /*
- * Must terminate write pipeline before sending any commands
- * to the device.
- */
- if (command == NAND_CMD_PAGEPROG) {
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
- }
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ WriteDOC(instr->ctx.cmd.opcode, doc->virtadr, Mplus_FlashCmd);
+ doc2001plus_write_pipe_term(doc);
+ break;
- /*
- * Write out the command to the device.
- */
- if (command == NAND_CMD_SEQIN) {
- int readcmd;
-
- if (column >= mtd->writesize) {
- /* OOB area */
- column -= mtd->writesize;
- readcmd = NAND_CMD_READOOB;
- } else if (column < 256) {
- /* First 256 bytes --> READ0 */
- readcmd = NAND_CMD_READ0;
- } else {
- column -= 256;
- readcmd = NAND_CMD_READ1;
- }
- WriteDOC(readcmd, docptr, Mplus_FlashCmd);
- }
- WriteDOC(command, docptr, Mplus_FlashCmd);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
-
- if (column != -1 || page_addr != -1) {
- /* Serially input address */
- if (column != -1) {
- /* Adjust columns for 16 bit buswidth */
- if (this->options & NAND_BUSWIDTH_16 &&
- !nand_opcode_8bits(command))
- column >>= 1;
- WriteDOC(column, docptr, Mplus_FlashAddress);
- }
- if (page_addr != -1) {
- WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
- WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
- if (this->options & NAND_ROW_ADDR_3) {
- WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
- printk("high density\n");
- }
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ u8 addr = instr->ctx.addr.addrs[i];
+
+ WriteDOC(addr, doc->virtadr, Mplus_FlashAddress);
}
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ doc2001plus_write_pipe_term(doc);
/* deassert ALE */
- if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
- command == NAND_CMD_READOOB || command == NAND_CMD_READID)
- WriteDOC(0, docptr, Mplus_FlashControl);
- }
-
- /*
- * program and erase have their own busy handlers
- * status and sequential in needs no delay
- */
- switch (command) {
-
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_SEQIN:
- case NAND_CMD_STATUS:
- return;
+ WriteDOC(0, doc->virtadr, Mplus_FlashControl);
+ break;
- case NAND_CMD_RESET:
- if (this->legacy.dev_ready)
- break;
- udelay(this->legacy.chip_delay);
- WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- WriteDOC(0, docptr, Mplus_WritePipeTerm);
- while (!(this->legacy.read_byte(this) & 0x40)) ;
- return;
-
- /* This applies to read commands */
- default:
- /*
- * If we don't have access to the busy pin, we apply the given
- * command delay
- */
- if (!this->legacy.dev_ready) {
- udelay(this->legacy.chip_delay);
- return;
- }
+ case NAND_OP_DATA_IN_INSTR:
+ doc2001plus_readbuf(this, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ doc2001plus_writebuf(this, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ doc2001plus_write_pipe_term(doc);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ DoC_WaitReady(doc);
+ break;
}
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
- ndelay(100);
- /* wait until command is processed */
- while (!this->legacy.dev_ready(this)) ;
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
}
-static int doc200x_dev_ready(struct nand_chip *this)
+static int doc2001plus_exec_op(struct nand_chip *this,
+ const struct nand_operation *op,
+ bool check_only)
{
struct doc_priv *doc = nand_get_controller_data(this);
- void __iomem *docptr = doc->virtadr;
+ unsigned int i;
- if (DoC_is_MillenniumPlus(doc)) {
- /* 11.4.2 -- must NOP four times before checking FR/B# */
- DoC_Delay(doc, 4);
- if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
- if (debug)
- printk("not ready\n");
- return 0;
- }
- if (debug)
- printk("was ready\n");
- return 1;
- } else {
- /* 11.4.2 -- must NOP four times before checking FR/B# */
- DoC_Delay(doc, 4);
- if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
- if (debug)
- printk("not ready\n");
- return 0;
- }
- /* 11.4.2 -- Must NOP twice if it's ready */
- DoC_Delay(doc, 2);
- if (debug)
- printk("was ready\n");
- return 1;
- }
-}
+ if (check_only)
+ return true;
+
+ doc->curchip = op->cs % doc->chips_per_floor;
+ doc->curfloor = op->cs / doc->chips_per_floor;
+
+ /* Assert ChipEnable and deassert WriteProtect */
+ WriteDOC(DOC_FLASH_CE, doc->virtadr, Mplus_FlashSelect);
+
+ for (i = 0; i < op->ninstrs; i++)
+ doc2001plus_exec_instr(this, &op->instrs[i]);
+
+ /* De-assert ChipEnable */
+ WriteDOC(0, doc->virtadr, Mplus_FlashSelect);
-static int doc200x_block_bad(struct nand_chip *this, loff_t ofs)
-{
- /* This is our last resort if we couldn't find or create a BBT. Just
- pretend all blocks are good. */
return 0;
}
@@ -1344,9 +1221,6 @@ static inline int __init doc2000_init(struct mtd_info *mtd)
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
- this->legacy.read_byte = doc2000_read_byte;
- this->legacy.write_buf = doc2000_writebuf;
- this->legacy.read_buf = doc2000_readbuf;
doc->late_init = nftl_scan_bbt;
doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
@@ -1360,10 +1234,6 @@ static inline int __init doc2001_init(struct mtd_info *mtd)
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
- this->legacy.read_byte = doc2001_read_byte;
- this->legacy.write_buf = doc2001_writebuf;
- this->legacy.read_buf = doc2001_readbuf;
-
ReadDOC(doc->virtadr, ChipID);
ReadDOC(doc->virtadr, ChipID);
ReadDOC(doc->virtadr, ChipID);
@@ -1390,13 +1260,7 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
struct nand_chip *this = mtd_to_nand(mtd);
struct doc_priv *doc = nand_get_controller_data(this);
- this->legacy.read_byte = doc2001plus_read_byte;
- this->legacy.write_buf = doc2001plus_writebuf;
- this->legacy.read_buf = doc2001plus_readbuf;
doc->late_init = inftl_scan_bbt;
- this->legacy.cmd_ctrl = NULL;
- this->legacy.select_chip = doc2001plus_select_chip;
- this->legacy.cmdfunc = doc2001plus_command;
this->ecc.hwctl = doc2001plus_enable_hwecc;
doc->chips_per_floor = 1;
@@ -1405,6 +1269,14 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
return 1;
}
+static const struct nand_controller_ops doc200x_ops = {
+ .exec_op = doc200x_exec_op,
+};
+
+static const struct nand_controller_ops doc2001plus_ops = {
+ .exec_op = doc2001plus_exec_op,
+};
+
static int __init doc_probe(unsigned long physadr)
{
struct nand_chip *nand = NULL;
@@ -1548,7 +1420,6 @@ static int __init doc_probe(unsigned long physadr)
goto fail;
}
-
/*
* Allocate a RS codec instance
*
@@ -1566,6 +1437,12 @@ static int __init doc_probe(unsigned long physadr)
goto fail;
}
+ nand_controller_init(&doc->base);
+ if (ChipID == DOC_ChipID_DocMilPlus16)
+ doc->base.ops = &doc2001plus_ops;
+ else
+ doc->base.ops = &doc200x_ops;
+
mtd = nand_to_mtd(nand);
nand->bbt_td = (struct nand_bbt_descr *) (doc + 1);
nand->bbt_md = nand->bbt_td + 1;
@@ -1573,12 +1450,8 @@ static int __init doc_probe(unsigned long physadr)
mtd->owner = THIS_MODULE;
mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
+ nand->controller = &doc->base;
nand_set_controller_data(nand, doc);
- nand->legacy.select_chip = doc200x_select_chip;
- nand->legacy.cmd_ctrl = doc200x_hwcontrol;
- nand->legacy.dev_ready = doc200x_dev_ready;
- nand->legacy.waitfunc = doc200x_wait;
- nand->legacy.block_bad = doc200x_block_bad;
nand->ecc.hwctl = doc200x_enable_hwecc;
nand->ecc.calculate = doc200x_calculate_ecc;
nand->ecc.correct = doc200x_correct_data;
@@ -1590,7 +1463,7 @@ static int __init doc_probe(unsigned long physadr)
nand->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
nand->bbt_options = NAND_BBT_USE_FLASH;
/* Skip the automatic BBT scan so we can run it manually */
- nand->options |= NAND_SKIP_BBTSCAN;
+ nand->options |= NAND_SKIP_BBTSCAN | NAND_NO_BBM_QUIRK;
doc->physadr = physadr;
doc->virtadr = virtadr;
@@ -1609,13 +1482,10 @@ static int __init doc_probe(unsigned long physadr)
numchips = doc2001_init(mtd);
if ((ret = nand_scan(nand, numchips)) || (ret = doc->late_init(mtd))) {
- /* DBB note: i believe nand_release is necessary here, as
+ /* DBB note: i believe nand_cleanup is necessary here, as
buffers may have been allocated in nand_base. Check with
Thomas. FIX ME! */
- /* nand_release will call mtd_device_unregister, but we
- haven't yet added it. This is handled without incident by
- mtd_device_unregister, as far as I can tell. */
- nand_release(nand);
+ nand_cleanup(nand);
goto fail;
}
@@ -1644,13 +1514,16 @@ static void release_nanddoc(void)
struct mtd_info *mtd, *nextmtd;
struct nand_chip *nand;
struct doc_priv *doc;
+ int ret;
for (mtd = doclist; mtd; mtd = nextmtd) {
nand = mtd_to_nand(mtd);
doc = nand_get_controller_data(nand);
nextmtd = doc->nextdoc;
- nand_release(nand);
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(nand);
iounmap(doc->virtadr);
release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
free_rs(doc->rs_decoder);
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index e1dc675b12bb..088692b2e27a 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -956,8 +956,13 @@ static int fsl_elbc_nand_remove(struct platform_device *pdev)
{
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+ struct nand_chip *chip = &priv->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
- nand_release(&priv->chip);
fsl_elbc_chip_remove(priv);
mutex_lock(&fsl_elbc_nand_mutex);
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 2af09edf405b..00ae7a910b03 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -1093,8 +1093,13 @@ err:
static int fsl_ifc_nand_remove(struct platform_device *dev)
{
struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+ struct nand_chip *chip = &priv->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
- nand_release(&priv->chip);
fsl_ifc_chip_remove(priv);
mutex_lock(&fsl_ifc_nand_mutex);
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index f31fae3a4c68..627deb26db51 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -317,10 +317,13 @@ err1:
static int fun_remove(struct platform_device *ofdev)
{
struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
- struct mtd_info *mtd = nand_to_mtd(&fun->chip);
- int i;
+ struct nand_chip *chip = &fun->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret, i;
- nand_release(&fun->chip);
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
kfree(mtd->name);
for (i = 0; i < fun->mchip_count; i++) {
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index a6964feeec77..3909752b14c5 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -608,6 +608,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
unsigned int op_id;
int i;
+ if (check_only)
+ return 0;
+
pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -691,7 +694,7 @@ static int fsmc_read_page_hwecc(struct nand_chip *chip, u8 *buf,
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
nand_read_page_op(chip, page, s * eccsize, NULL, 0);
chip->ecc.hwctl(chip, NAND_ECC_READ);
- ret = nand_read_data_op(chip, p, eccsize, false);
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
if (ret)
return ret;
@@ -809,11 +812,12 @@ static int fsmc_bch8_correct_data(struct nand_chip *chip, u8 *dat,
i = 0;
while (num_err--) {
- change_bit(0, (unsigned long *)&err_idx[i]);
- change_bit(1, (unsigned long *)&err_idx[i]);
+ err_idx[i] ^= 3;
if (err_idx[i] < chip->ecc.size * 8) {
- change_bit(err_idx[i], (unsigned long *)dat);
+ int err = err_idx[i];
+
+ dat[err >> 3] ^= BIT(err & 7);
i++;
}
}
@@ -1132,7 +1136,12 @@ static int fsmc_nand_remove(struct platform_device *pdev)
struct fsmc_nand_data *host = platform_get_drvdata(pdev);
if (host) {
- nand_release(&host->nand);
+ struct nand_chip *chip = &host->nand;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
fsmc_nand_disable(host);
if (host->mode == USE_DMA_ACCESS) {
diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c
index f6b12354024f..938077e5c6a9 100644
--- a/drivers/mtd/nand/raw/gpio.c
+++ b/drivers/mtd/nand/raw/gpio.c
@@ -190,8 +190,12 @@ gpio_nand_get_io_sync(struct platform_device *pdev)
static int gpio_nand_remove(struct platform_device *pdev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &gpiomtd->nand_chip;
+ int ret;
- nand_release(&gpiomtd->nand_chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
/* Enable write protection and disable the chip */
if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 53b00c841aec..061a8ddda275 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -540,8 +540,10 @@ static int bch_set_geometry(struct gpmi_nand_data *this)
return ret;
ret = pm_runtime_get_sync(this->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(this->dev);
return ret;
+ }
/*
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
@@ -834,158 +836,6 @@ map_fail:
return false;
}
-/**
- * gpmi_copy_bits - copy bits from one memory region to another
- * @dst: destination buffer
- * @dst_bit_off: bit offset we're starting to write at
- * @src: source buffer
- * @src_bit_off: bit offset we're starting to read from
- * @nbits: number of bits to copy
- *
- * This functions copies bits from one memory region to another, and is used by
- * the GPMI driver to copy ECC sections which are not guaranteed to be byte
- * aligned.
- *
- * src and dst should not overlap.
- *
- */
-static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src,
- size_t src_bit_off, size_t nbits)
-{
- size_t i;
- size_t nbytes;
- u32 src_buffer = 0;
- size_t bits_in_src_buffer = 0;
-
- if (!nbits)
- return;
-
- /*
- * Move src and dst pointers to the closest byte pointer and store bit
- * offsets within a byte.
- */
- src += src_bit_off / 8;
- src_bit_off %= 8;
-
- dst += dst_bit_off / 8;
- dst_bit_off %= 8;
-
- /*
- * Initialize the src_buffer value with bits available in the first
- * byte of data so that we end up with a byte aligned src pointer.
- */
- if (src_bit_off) {
- src_buffer = src[0] >> src_bit_off;
- if (nbits >= (8 - src_bit_off)) {
- bits_in_src_buffer += 8 - src_bit_off;
- } else {
- src_buffer &= GENMASK(nbits - 1, 0);
- bits_in_src_buffer += nbits;
- }
- nbits -= bits_in_src_buffer;
- src++;
- }
-
- /* Calculate the number of bytes that can be copied from src to dst. */
- nbytes = nbits / 8;
-
- /* Try to align dst to a byte boundary. */
- if (dst_bit_off) {
- if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
- src_buffer |= src[0] << bits_in_src_buffer;
- bits_in_src_buffer += 8;
- src++;
- nbytes--;
- }
-
- if (bits_in_src_buffer >= (8 - dst_bit_off)) {
- dst[0] &= GENMASK(dst_bit_off - 1, 0);
- dst[0] |= src_buffer << dst_bit_off;
- src_buffer >>= (8 - dst_bit_off);
- bits_in_src_buffer -= (8 - dst_bit_off);
- dst_bit_off = 0;
- dst++;
- if (bits_in_src_buffer > 7) {
- bits_in_src_buffer -= 8;
- dst[0] = src_buffer;
- dst++;
- src_buffer >>= 8;
- }
- }
- }
-
- if (!bits_in_src_buffer && !dst_bit_off) {
- /*
- * Both src and dst pointers are byte aligned, thus we can
- * just use the optimized memcpy function.
- */
- if (nbytes)
- memcpy(dst, src, nbytes);
- } else {
- /*
- * src buffer is not byte aligned, hence we have to copy each
- * src byte to the src_buffer variable before extracting a byte
- * to store in dst.
- */
- for (i = 0; i < nbytes; i++) {
- src_buffer |= src[i] << bits_in_src_buffer;
- dst[i] = src_buffer;
- src_buffer >>= 8;
- }
- }
- /* Update dst and src pointers */
- dst += nbytes;
- src += nbytes;
-
- /*
- * nbits is the number of remaining bits. It should not exceed 8 as
- * we've already copied as much bytes as possible.
- */
- nbits %= 8;
-
- /*
- * If there's no more bits to copy to the destination and src buffer
- * was already byte aligned, then we're done.
- */
- if (!nbits && !bits_in_src_buffer)
- return;
-
- /* Copy the remaining bits to src_buffer */
- if (nbits)
- src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
- bits_in_src_buffer;
- bits_in_src_buffer += nbits;
-
- /*
- * In case there were not enough bits to get a byte aligned dst buffer
- * prepare the src_buffer variable to match the dst organization (shift
- * src_buffer by dst_bit_off and retrieve the least significant bits
- * from dst).
- */
- if (dst_bit_off)
- src_buffer = (src_buffer << dst_bit_off) |
- (*dst & GENMASK(dst_bit_off - 1, 0));
- bits_in_src_buffer += dst_bit_off;
-
- /*
- * Keep most significant bits from dst if we end up with an unaligned
- * number of bits.
- */
- nbytes = bits_in_src_buffer / 8;
- if (bits_in_src_buffer % 8) {
- src_buffer |= (dst[nbytes] &
- GENMASK(7, bits_in_src_buffer % 8)) <<
- (nbytes * 8);
- nbytes++;
- }
-
- /* Copy the remaining bytes to dst */
- for (i = 0; i < nbytes; i++) {
- dst[i] = src_buffer;
- src_buffer >>= 8;
- }
-}
-
/* add our owner bbt descriptor */
static uint8_t scan_ff_pattern[] = { 0xff };
static struct nand_bbt_descr gpmi_bbt_descr = {
@@ -1713,7 +1563,7 @@ static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
* inline (interleaved with payload DATA), and do not align data chunk on
* byte boundaries.
* We thus need to take care moving the payload data and ECC bits stored in the
- * page into the provided buffers, which is why we're using gpmi_copy_bits.
+ * page into the provided buffers, which is why we're using nand_extract_bits().
*
* See set_geometry_by_ecc_info inline comments to have a full description
* of the layout used by the GPMI controller.
@@ -1762,9 +1612,8 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
/* Extract interleaved payload data and ECC bits */
for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
if (buf)
- gpmi_copy_bits(buf, step * eccsize * 8,
- tmp_buf, src_bit_off,
- eccsize * 8);
+ nand_extract_bits(buf, step * eccsize, tmp_buf,
+ src_bit_off, eccsize * 8);
src_bit_off += eccsize * 8;
/* Align last ECC block to align a byte boundary */
@@ -1773,9 +1622,8 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
eccbits += 8 - ((oob_bit_off + eccbits) % 8);
if (oob_required)
- gpmi_copy_bits(oob, oob_bit_off,
- tmp_buf, src_bit_off,
- eccbits);
+ nand_extract_bits(oob, oob_bit_off, tmp_buf,
+ src_bit_off, eccbits);
src_bit_off += eccbits;
oob_bit_off += eccbits;
@@ -1800,7 +1648,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
* inline (interleaved with payload DATA), and do not align data chunk on
* byte boundaries.
* We thus need to take care moving the OOB area at the right place in the
- * final page, which is why we're using gpmi_copy_bits.
+ * final page, which is why we're using nand_extract_bits().
*
* See set_geometry_by_ecc_info inline comments to have a full description
* of the layout used by the GPMI controller.
@@ -1839,8 +1687,8 @@ static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
/* Interleave payload data and ECC bits */
for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
if (buf)
- gpmi_copy_bits(tmp_buf, dst_bit_off,
- buf, step * eccsize * 8, eccsize * 8);
+ nand_extract_bits(tmp_buf, dst_bit_off, buf,
+ step * eccsize * 8, eccsize * 8);
dst_bit_off += eccsize * 8;
/* Align last ECC block to align a byte boundary */
@@ -1849,8 +1697,8 @@ static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
eccbits += 8 - ((oob_bit_off + eccbits) % 8);
if (oob_required)
- gpmi_copy_bits(tmp_buf, dst_bit_off,
- oob, oob_bit_off, eccbits);
+ nand_extract_bits(tmp_buf, dst_bit_off, oob,
+ oob_bit_off, eccbits);
dst_bit_off += eccbits;
oob_bit_off += eccbits;
@@ -2408,6 +2256,9 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
struct completion *completion;
unsigned long to;
+ if (check_only)
+ return 0;
+
this->ntransfers = 0;
for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
this->transfers[i].direction = DMA_NONE;
@@ -2658,7 +2509,7 @@ static int gpmi_nand_probe(struct platform_device *pdev)
ret = __gpmi_enable_clk(this, true);
if (ret)
- goto exit_nfc_init;
+ goto exit_acquire_resources;
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -2693,11 +2544,15 @@ exit_acquire_resources:
static int gpmi_nand_remove(struct platform_device *pdev)
{
struct gpmi_nand_data *this = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &this->nand;
+ int ret;
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- nand_release(&this->nand);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
gpmi_free_dma_buffer(this);
release_resources(this);
return 0;
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index 0b48be54ba6f..b84238e2268a 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -806,8 +806,12 @@ static int hisi_nfc_probe(struct platform_device *pdev)
static int hisi_nfc_remove(struct platform_device *pdev)
{
struct hinfc_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->chip;
+ int ret;
- nand_release(&host->chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index 935c4902ada7..69423bb29adb 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -27,9 +27,6 @@
#define DRV_NAME "ingenic-nand"
-/* Command delay when there is no R/B pin. */
-#define RB_DELAY_US 100
-
struct jz_soc_info {
unsigned long data_offset;
unsigned long addr_offset;
@@ -49,7 +46,6 @@ struct ingenic_nfc {
struct nand_controller controller;
unsigned int num_banks;
struct list_head chips;
- int selected;
struct ingenic_nand_cs cs[];
};
@@ -102,7 +98,7 @@ static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
-const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
+static const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
.ecc = qi_lb60_ooblayout_ecc,
.free = qi_lb60_ooblayout_free,
};
@@ -142,51 +138,6 @@ static const struct mtd_ooblayout_ops jz4725b_ooblayout_ops = {
.free = jz4725b_ooblayout_free,
};
-static void ingenic_nand_select_chip(struct nand_chip *chip, int chipnr)
-{
- struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
- struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
- struct ingenic_nand_cs *cs;
-
- /* Ensure the currently selected chip is deasserted. */
- if (chipnr == -1 && nfc->selected >= 0) {
- cs = &nfc->cs[nfc->selected];
- jz4780_nemc_assert(nfc->dev, cs->bank, false);
- }
-
- nfc->selected = chipnr;
-}
-
-static void ingenic_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
- unsigned int ctrl)
-{
- struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
- struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
- struct ingenic_nand_cs *cs;
-
- if (WARN_ON(nfc->selected < 0))
- return;
-
- cs = &nfc->cs[nfc->selected];
-
- jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE);
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_ALE)
- writeb(cmd, cs->base + nfc->soc_info->addr_offset);
- else if (ctrl & NAND_CLE)
- writeb(cmd, cs->base + nfc->soc_info->cmd_offset);
-}
-
-static int ingenic_nand_dev_ready(struct nand_chip *chip)
-{
- struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
-
- return !gpiod_get_value_cansleep(nand->busy_gpio);
-}
-
static void ingenic_nand_ecc_hwctl(struct nand_chip *chip, int mode)
{
struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
@@ -298,8 +249,91 @@ static int ingenic_nand_attach_chip(struct nand_chip *chip)
return 0;
}
+static int ingenic_nand_exec_instr(struct nand_chip *chip,
+ struct ingenic_nand_cs *cs,
+ const struct nand_op_instr *instr)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(chip->controller);
+ unsigned int i;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writeb(instr->ctx.cmd.opcode,
+ cs->base + nfc->soc_info->cmd_offset);
+ return 0;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++)
+ writeb(instr->ctx.addr.addrs[i],
+ cs->base + nfc->soc_info->addr_offset);
+ return 0;
+ case NAND_OP_DATA_IN_INSTR:
+ if (instr->ctx.data.force_8bit ||
+ !(chip->options & NAND_BUSWIDTH_16))
+ ioread8_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ else
+ ioread16_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_DATA_OUT_INSTR:
+ if (instr->ctx.data.force_8bit ||
+ !(chip->options & NAND_BUSWIDTH_16))
+ iowrite8_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ else
+ iowrite16_rep(cs->base + nfc->soc_info->data_offset,
+ instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ return 0;
+ case NAND_OP_WAITRDY_INSTR:
+ if (!nand->busy_gpio)
+ return nand_soft_waitrdy(chip,
+ instr->ctx.waitrdy.timeout_ms);
+
+ return nand_gpio_waitrdy(chip, nand->busy_gpio,
+ instr->ctx.waitrdy.timeout_ms);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ingenic_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct ingenic_nand *nand = to_ingenic_nand(nand_to_mtd(chip));
+ struct ingenic_nfc *nfc = to_ingenic_nfc(nand->chip.controller);
+ struct ingenic_nand_cs *cs;
+ unsigned int i;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ cs = &nfc->cs[op->cs];
+ jz4780_nemc_assert(nfc->dev, cs->bank, true);
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = ingenic_nand_exec_instr(chip, cs, &op->instrs[i]);
+ if (ret)
+ break;
+
+ if (op->instrs[i].delay_ns)
+ ndelay(op->instrs[i].delay_ns);
+ }
+ jz4780_nemc_assert(nfc->dev, cs->bank, false);
+
+ return ret;
+}
+
static const struct nand_controller_ops ingenic_nand_controller_ops = {
.attach_chip = ingenic_nand_attach_chip,
+ .exec_op = ingenic_nand_exec_op,
};
static int ingenic_nand_init_chip(struct platform_device *pdev,
@@ -339,10 +373,20 @@ static int ingenic_nand_init_chip(struct platform_device *pdev,
ret = PTR_ERR(nand->busy_gpio);
dev_err(dev, "failed to request busy GPIO: %d\n", ret);
return ret;
- } else if (nand->busy_gpio) {
- nand->chip.legacy.dev_ready = ingenic_nand_dev_ready;
}
+ /*
+ * The rb-gpios semantics was undocumented and qi,lb60 (along with
+ * the ingenic driver) got it wrong. The active state encodes the
+ * NAND ready state, which is high level. Since there's no signal
+ * inverter on this board, it should be active-high. Let's fix that
+ * here for older DTs so we can re-use the generic nand_gpio_waitrdy()
+ * helper, and be consistent with what other drivers do.
+ */
+ if (of_machine_is_compatible("qi,lb60") &&
+ gpiod_is_active_low(nand->busy_gpio))
+ gpiod_toggle_active_low(nand->busy_gpio);
+
nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
if (IS_ERR(nand->wp_gpio)) {
@@ -359,12 +403,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev,
return -ENOMEM;
mtd->dev.parent = dev;
- chip->legacy.IO_ADDR_R = cs->base + nfc->soc_info->data_offset;
- chip->legacy.IO_ADDR_W = cs->base + nfc->soc_info->data_offset;
- chip->legacy.chip_delay = RB_DELAY_US;
chip->options = NAND_NO_SUBPAGE_WRITE;
- chip->legacy.select_chip = ingenic_nand_select_chip;
- chip->legacy.cmd_ctrl = ingenic_nand_cmd_ctrl;
chip->ecc.mode = NAND_ECC_HW;
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
@@ -376,7 +415,7 @@ static int ingenic_nand_init_chip(struct platform_device *pdev,
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
- nand_release(chip);
+ nand_cleanup(chip);
return ret;
}
@@ -387,13 +426,18 @@ static int ingenic_nand_init_chip(struct platform_device *pdev,
static void ingenic_nand_cleanup_chips(struct ingenic_nfc *nfc)
{
- struct ingenic_nand *chip;
+ struct ingenic_nand *ingenic_chip;
+ struct nand_chip *chip;
+ int ret;
while (!list_empty(&nfc->chips)) {
- chip = list_first_entry(&nfc->chips,
- struct ingenic_nand, chip_list);
- nand_release(&chip->chip);
- list_del(&chip->chip_list);
+ ingenic_chip = list_first_entry(&nfc->chips,
+ struct ingenic_nand, chip_list);
+ chip = &ingenic_chip->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&ingenic_chip->chip_list);
}
}
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index 9d0caadf940e..03866b0aadea 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -75,6 +75,9 @@ extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
+/* MLC pairing schemes */
+extern const struct mtd_pairing_scheme dist3_pairing_scheme;
+
/* Core functions */
const struct nand_manufacturer *nand_get_manufacturer(u8 id);
int nand_bbm_get_next_page(struct nand_chip *chip, int page);
@@ -106,6 +109,15 @@ static inline bool nand_has_exec_op(struct nand_chip *chip)
return true;
}
+static inline int nand_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!nand_has_exec_op(chip))
+ return 0;
+
+ return chip->controller->ops->exec_op(chip, op, true);
+}
+
static inline int nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op)
{
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 241b58b83240..7521038af2ef 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -826,8 +826,13 @@ free_gpio:
static int lpc32xx_nand_remove(struct platform_device *pdev)
{
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->nand_chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
- nand_release(&host->nand_chip);
free_irq(host->irq, host);
if (use_dma)
dma_release_channel(host->dma_chan);
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 163f976353f8..b151fd000815 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -947,8 +947,12 @@ static int lpc32xx_nand_remove(struct platform_device *pdev)
{
uint32_t tmp;
struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->nand_chip;
+ int ret;
- nand_release(&host->nand_chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
dma_release_channel(host->dma_chan);
/* Force CE high */
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 179f0ca585f8..260a0430313e 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -707,7 +707,7 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
* In case the interrupt was not served in the required time frame,
* check if the ISR was not served or if something went actually wrong.
*/
- if (ret && !pending) {
+ if (!ret && !pending) {
dev_err(nfc->dev, "Timeout waiting for RB signal\n");
return -ETIMEDOUT;
}
@@ -932,14 +932,14 @@ static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
}
/*
- * Check a chunk is correct or not according to hardware ECC engine.
+ * Check if a chunk is correct or not according to the hardware ECC engine.
* mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
* mtd->ecc_stats.failure is not, the function will instead return a non-zero
* value indicating that a check on the emptyness of the subpage must be
- * performed before declaring the subpage corrupted.
+ * performed before actually declaring the subpage as "corrupted".
*/
-static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip,
- unsigned int *max_bitflips)
+static int marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip,
+ unsigned int *max_bitflips)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
@@ -1053,7 +1053,7 @@ static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf,
marvell_nfc_enable_hw_ecc(chip);
marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
page);
- ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
marvell_nfc_disable_hw_ecc(chip);
if (!ret)
@@ -1224,12 +1224,12 @@ static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf,
/* Read spare bytes */
nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
- spare_len, false);
+ spare_len, false, false);
/* Read ECC bytes */
nand_read_data_op(chip, oob + ecc_offset +
(ALIGN(lt->ecc_bytes, 32) * chunk),
- ecc_len, false);
+ ecc_len, false, false);
}
return 0;
@@ -1336,7 +1336,7 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
/* Read the chunk and detect number of bitflips */
marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
spare, spare_len, page);
- ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips);
if (ret)
failure_mask |= BIT(chunk);
@@ -1358,10 +1358,9 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
*/
/*
- * In case there is any subpage read error reported by ->correct(), we
- * usually re-read only ECC bytes in raw mode and check if the whole
- * page is empty. In this case, it is normal that the ECC check failed
- * and we just ignore the error.
+ * In case there is any subpage read error, we usually re-read only ECC
+ * bytes in raw mode and check if the whole page is empty. In this case,
+ * it is normal that the ECC check failed and we just ignore the error.
*
* However, it has been empirically observed that for some layouts (e.g
* 2k page, 8b strength per 512B chunk), the controller tries to correct
@@ -2107,7 +2106,8 @@ static int marvell_nfc_exec_op(struct nand_chip *chip,
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
- marvell_nfc_select_target(chip, op->cs);
+ if (!check_only)
+ marvell_nfc_select_target(chip, op->cs);
if (nfc->caps->is_nfcv2)
return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
@@ -2166,8 +2166,8 @@ static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
.free = marvell_nand_ooblayout_free,
};
-static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
- struct nand_ecc_ctrl *ecc)
+static int marvell_nand_hw_ecc_controller_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
@@ -2261,7 +2261,7 @@ static int marvell_nand_ecc_init(struct mtd_info *mtd,
switch (ecc->mode) {
case NAND_ECC_HW:
- ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc);
+ ret = marvell_nand_hw_ecc_controller_init(mtd, ecc);
if (ret)
return ret;
break;
@@ -2664,7 +2664,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register mtd device: %d\n", ret);
- nand_release(chip);
+ nand_cleanup(chip);
return ret;
}
@@ -2673,6 +2673,21 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
return 0;
}
+static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
+{
+ struct marvell_nand_chip *entry, *temp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
+ chip = &entry->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&entry->node);
+ }
+}
+
static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
{
struct device_node *np = dev->of_node;
@@ -2707,21 +2722,16 @@ static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
ret = marvell_nand_chip_init(dev, nfc, nand_np);
if (ret) {
of_node_put(nand_np);
- return ret;
+ goto cleanup_chips;
}
}
return 0;
-}
-static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
-{
- struct marvell_nand_chip *entry, *temp;
+cleanup_chips:
+ marvell_nand_chips_cleanup(nfc);
- list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
- nand_release(&entry->chip);
- list_del(&entry->node);
- }
+ return ret;
}
static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
@@ -2854,7 +2864,6 @@ static int marvell_nfc_init(struct marvell_nfc *nfc)
static int marvell_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *r;
struct marvell_nfc *nfc;
int ret;
int irq;
@@ -2869,8 +2878,7 @@ static int marvell_nfc_probe(struct platform_device *pdev)
nfc->controller.ops = &marvell_nand_controller_ops;
INIT_LIST_HEAD(&nfc->chips);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nfc->regs = devm_ioremap_resource(dev, r);
+ nfc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nfc->regs))
return PTR_ERR(nfc->regs);
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index f6fb5c0e6255..3f376471f3f7 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -899,6 +899,9 @@ static int meson_nfc_exec_op(struct nand_chip *nand,
u32 op_id, delay_idle, cmd;
int i;
+ if (check_only)
+ return 0;
+
meson_nfc_select_chip(nand, op->cs);
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
@@ -1266,7 +1269,7 @@ meson_nfc_nand_chip_init(struct device *dev,
nand_set_flash_node(nand, np);
nand_set_controller_data(nand, nfc);
- nand->options |= NAND_USE_BOUNCE_BUFFER;
+ nand->options |= NAND_USES_DMA;
mtd = nand_to_mtd(nand);
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index a2fcb739e5f8..18ecb096a32d 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -805,8 +805,11 @@ static int mpc5121_nfc_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct mtd_info *mtd = dev_get_drvdata(dev);
+ int ret;
- nand_release(mtd_to_nand(mtd));
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(mtd_to_nand(mtd));
mpc5121_nfc_free(dev, mtd);
return 0;
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index ef149e8b26d0..c1a6e31aabb8 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1380,7 +1380,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
nand_set_flash_node(nand, np);
nand_set_controller_data(nand, nfc);
- nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
+ nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
nand->legacy.dev_ready = mtk_nfc_dev_ready;
nand->legacy.select_chip = mtk_nfc_select_chip;
nand->legacy.write_byte = mtk_nfc_write_byte;
@@ -1419,7 +1419,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "mtd parse partition error\n");
- nand_release(nand);
+ nand_cleanup(nand);
return ret;
}
@@ -1578,13 +1578,18 @@ release_ecc:
static int mtk_nfc_remove(struct platform_device *pdev)
{
struct mtk_nfc *nfc = platform_get_drvdata(pdev);
- struct mtk_nfc_nand_chip *chip;
+ struct mtk_nfc_nand_chip *mtk_chip;
+ struct nand_chip *chip;
+ int ret;
while (!list_empty(&nfc->chips)) {
- chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
- node);
- nand_release(&chip->nand);
- list_del(&chip->node);
+ mtk_chip = list_first_entry(&nfc->chips,
+ struct mtk_nfc_nand_chip, node);
+ chip = &mtk_chip->nand;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&mtk_chip->node);
}
mtk_ecc_release(nfc->ecc);
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 59554c187e01..09dacb83cb5a 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -1919,8 +1919,12 @@ escan:
static int mxcnd_remove(struct platform_device *pdev)
{
struct mxc_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->nand;
+ int ret;
- nand_release(&host->nand);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
if (host->clk_act)
clk_disable_unprepare(host->clk);
diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c
index ed7a4e021bf5..57f36721f4c6 100644
--- a/drivers/mtd/nand/raw/mxic_nand.c
+++ b/drivers/mtd/nand/raw/mxic_nand.c
@@ -393,6 +393,9 @@ static int mxic_nfc_exec_op(struct nand_chip *chip,
int ret = 0;
unsigned int op_id;
+ if (check_only)
+ return 0;
+
mxic_nfc_cs_enable(nfc);
init_completion(&nfc->complete);
for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -553,8 +556,13 @@ fail:
static int mxic_nfc_remove(struct platform_device *pdev)
{
struct mxic_nand_ctlr *nfc = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &nfc->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
- nand_release(&nfc->chip);
mxic_nfc_clk_disable(nfc);
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index c24e5e2ba130..45124dbb1835 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -205,6 +205,56 @@ static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
.free = nand_ooblayout_free_lp_hamming,
};
+static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
+ struct mtd_pairing_info *info)
+{
+ int lastpage = (mtd->erasesize / mtd->writesize) - 1;
+ int dist = 3;
+
+ if (page == lastpage)
+ dist = 2;
+
+ if (!page || (page & 1)) {
+ info->group = 0;
+ info->pair = (page + 1) / 2;
+ } else {
+ info->group = 1;
+ info->pair = (page + 1 - dist) / 2;
+ }
+
+ return 0;
+}
+
+static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
+ const struct mtd_pairing_info *info)
+{
+ int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
+ int page = info->pair * 2;
+ int dist = 3;
+
+ if (!info->group && !info->pair)
+ return 0;
+
+ if (info->pair == lastpair && info->group)
+ dist = 2;
+
+ if (!info->group)
+ page--;
+ else if (info->pair)
+ page += dist - 1;
+
+ if (page >= mtd->erasesize / mtd->writesize)
+ return -EINVAL;
+
+ return page;
+}
+
+const struct mtd_pairing_scheme dist3_pairing_scheme = {
+ .ngroups = 2,
+ .get_info = nand_pairing_dist3_get_info,
+ .get_wunit = nand_pairing_dist3_get_wunit,
+};
+
static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
{
int ret = 0;
@@ -225,6 +275,50 @@ static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
}
/**
+ * nand_extract_bits - Copy unaligned bits from one buffer to another one
+ * @dst: destination buffer
+ * @dst_off: bit offset at which the writing starts
+ * @src: source buffer
+ * @src_off: bit offset at which the reading starts
+ * @nbits: number of bits to copy from @src to @dst
+ *
+ * Copy bits from one memory region to another (overlap authorized).
+ */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+ unsigned int src_off, unsigned int nbits)
+{
+ unsigned int tmp, n;
+
+ dst += dst_off / 8;
+ dst_off %= 8;
+ src += src_off / 8;
+ src_off %= 8;
+
+ while (nbits) {
+ n = min3(8 - dst_off, 8 - src_off, nbits);
+
+ tmp = (*src >> src_off) & GENMASK(n - 1, 0);
+ *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
+ *dst |= tmp << dst_off;
+
+ dst_off += n;
+ if (dst_off >= 8) {
+ dst++;
+ dst_off -= 8;
+ }
+
+ src_off += n;
+ if (src_off >= 8) {
+ src++;
+ src_off -= 8;
+ }
+
+ nbits -= n;
+ }
+}
+EXPORT_SYMBOL_GPL(nand_extract_bits);
+
+/**
* nand_select_target() - Select a NAND target (A.K.A. die)
* @chip: NAND chip object
* @cs: the CS line to select. Note that this CS id is always from the chip
@@ -345,6 +439,9 @@ static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
{
+ if (chip->options & NAND_NO_BBM_QUIRK)
+ return 0;
+
if (chip->legacy.block_bad)
return chip->legacy.block_bad(chip, ofs);
@@ -690,7 +787,8 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
*/
timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
- ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ ret = nand_read_data_op(chip, &status, sizeof(status), true,
+ false);
if (ret)
break;
@@ -736,8 +834,14 @@ EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
unsigned long timeout_ms)
{
- /* Wait until R/B pin indicates chip is ready or timeout occurs */
- timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+
+ /*
+ * Wait until R/B pin indicates chip is ready or timeout occurs.
+ * +1 below is necessary because if we are now in the last fraction
+ * of jiffy and msecs_to_jiffies is 1 then we will wait only that
+ * small jiffy fraction - possibly leading to false timeout.
+ */
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
do {
if (gpiod_get_value_cansleep(gpiod))
return 0;
@@ -770,7 +874,7 @@ void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
u8 status;
ret = nand_read_data_op(chip, &status, sizeof(status),
- true);
+ true, false);
if (ret)
return;
@@ -1868,6 +1972,8 @@ EXPORT_SYMBOL_GPL(nand_reset_op);
* @buf: buffer used to store the data
* @len: length of the buffer
* @force_8bit: force 8-bit bus access
+ * @check_only: do not actually run the command, only checks if the
+ * controller driver supports it
*
* This function does a raw data read on the bus. Usually used after launching
* another NAND operation like nand_read_page_op().
@@ -1876,7 +1982,7 @@ EXPORT_SYMBOL_GPL(nand_reset_op);
* Returns 0 on success, a negative error code otherwise.
*/
int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
- bool force_8bit)
+ bool force_8bit, bool check_only)
{
if (!len || !buf)
return -EINVAL;
@@ -1889,9 +1995,15 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
instrs[0].ctx.data.force_8bit = force_8bit;
+ if (check_only)
+ return nand_check_op(chip, &op);
+
return nand_exec_op(chip, &op);
}
+ if (check_only)
+ return 0;
+
if (force_8bit) {
u8 *p = buf;
unsigned int i;
@@ -2112,7 +2224,7 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
char *prefix = " ";
unsigned int i;
- pr_debug("executing subop:\n");
+ pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
for (i = 0; i < ctx->ninstrs; i++) {
instr = &ctx->instrs[i];
@@ -2176,6 +2288,7 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_operation *op, bool check_only)
{
struct nand_op_parser_ctx ctx = {
+ .subop.cs = op->cs,
.subop.instrs = op->instrs,
.instrs = op->instrs,
.ninstrs = op->ninstrs,
@@ -2620,7 +2733,7 @@ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
if (oob_required) {
ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
- false);
+ false, false);
if (ret)
return ret;
}
@@ -2630,6 +2743,47 @@ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
EXPORT_SYMBOL(nand_read_page_raw);
/**
+ * nand_monolithic_read_page_raw - Monolithic page read in raw mode
+ * @chip: NAND chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * This is a raw page read, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be loaded in the NAND cache and sent over the
+ * bus (from the NAND chip to the NAND controller) in a single
+ * operation. This is an alternative to nand_read_page_raw(), which
+ * first reads the main data, and if the OOB data is requested too,
+ * then reads more data on the bus.
+ */
+int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int size = mtd->writesize;
+ u8 *read_buf = buf;
+ int ret;
+
+ if (oob_required) {
+ size += mtd->oobsize;
+
+ if (buf != chip->data_buf)
+ read_buf = nand_get_data_buf(chip);
+ }
+
+ ret = nand_read_page_op(chip, page, 0, read_buf, size);
+ if (ret)
+ return ret;
+
+ if (buf != chip->data_buf)
+ memcpy(buf, read_buf, mtd->writesize);
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_monolithic_read_page_raw);
+
+/**
* nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
* @chip: nand chip info structure
* @buf: buffer to store read data
@@ -2652,7 +2806,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
- ret = nand_read_data_op(chip, buf, eccsize, false);
+ ret = nand_read_data_op(chip, buf, eccsize, false, false);
if (ret)
return ret;
@@ -2660,14 +2814,14 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
if (chip->ecc.prepad) {
ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
- false);
+ false, false);
if (ret)
return ret;
oob += chip->ecc.prepad;
}
- ret = nand_read_data_op(chip, oob, eccbytes, false);
+ ret = nand_read_data_op(chip, oob, eccbytes, false, false);
if (ret)
return ret;
@@ -2675,7 +2829,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
if (chip->ecc.postpad) {
ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
- false);
+ false, false);
if (ret)
return ret;
@@ -2685,7 +2839,7 @@ static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
size = mtd->oobsize - (oob - chip->oob_poi);
if (size) {
- ret = nand_read_data_op(chip, oob, size, false);
+ ret = nand_read_data_op(chip, oob, size, false, false);
if (ret)
return ret;
}
@@ -2878,14 +3032,15 @@ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(chip, NAND_ECC_READ);
- ret = nand_read_data_op(chip, p, eccsize, false);
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
if (ret)
return ret;
chip->ecc.calculate(chip, p, &ecc_calc[i]);
}
- ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+ false);
if (ret)
return ret;
@@ -2921,76 +3076,6 @@ static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
}
/**
- * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
- *
- * Hardware ECC for large page chips, require OOB to be read first. For this
- * ECC mode, the write_page method is re-used from ECC_HW. These methods
- * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
- * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
- * the data area, by overwriting the NAND manufacturer bad block markings.
- */
-static int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
- int oob_required, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int i, eccsize = chip->ecc.size, ret;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *ecc_code = chip->ecc.code_buf;
- uint8_t *ecc_calc = chip->ecc.calc_buf;
- unsigned int max_bitflips = 0;
-
- /* Read the OOB area first */
- ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
- if (ret)
- return ret;
-
- ret = nand_read_page_op(chip, page, 0, NULL, 0);
- if (ret)
- return ret;
-
- ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
- chip->ecc.total);
- if (ret)
- return ret;
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
-
- chip->ecc.hwctl(chip, NAND_ECC_READ);
-
- ret = nand_read_data_op(chip, p, eccsize, false);
- if (ret)
- return ret;
-
- chip->ecc.calculate(chip, p, &ecc_calc[i]);
-
- stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
- if (stat == -EBADMSG &&
- (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
- /* check for empty pages with bitflips */
- stat = nand_check_erased_ecc_chunk(p, eccsize,
- &ecc_code[i], eccbytes,
- NULL, 0,
- chip->ecc.strength);
- }
-
- if (stat < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += stat;
- max_bitflips = max_t(unsigned int, max_bitflips, stat);
- }
- }
- return max_bitflips;
-}
-
-/**
* nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
* @chip: nand chip info structure
* @buf: buffer to store read data
@@ -3021,13 +3106,13 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
chip->ecc.hwctl(chip, NAND_ECC_READ);
- ret = nand_read_data_op(chip, p, eccsize, false);
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
if (ret)
return ret;
if (chip->ecc.prepad) {
ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
- false);
+ false, false);
if (ret)
return ret;
@@ -3036,7 +3121,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
chip->ecc.hwctl(chip, NAND_ECC_READSYN);
- ret = nand_read_data_op(chip, oob, eccbytes, false);
+ ret = nand_read_data_op(chip, oob, eccbytes, false, false);
if (ret)
return ret;
@@ -3046,7 +3131,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
if (chip->ecc.postpad) {
ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
- false);
+ false, false);
if (ret)
return ret;
@@ -3074,7 +3159,7 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
if (i) {
- ret = nand_read_data_op(chip, oob, i, false);
+ ret = nand_read_data_op(chip, oob, i, false, false);
if (ret)
return ret;
}
@@ -3166,7 +3251,7 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
uint32_t max_oobsize = mtd_oobavail(mtd, ops);
uint8_t *bufpoi, *oob, *buf;
- int use_bufpoi;
+ int use_bounce_buf;
unsigned int max_bitflips = 0;
int retry_mode = 0;
bool ecc_fail = false;
@@ -3184,25 +3269,25 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
oob_required = oob ? 1 : 0;
while (1) {
- unsigned int ecc_failures = mtd->ecc_stats.failed;
+ struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
bytes = min(mtd->writesize - col, readlen);
aligned = (bytes == mtd->writesize);
if (!aligned)
- use_bufpoi = 1;
- else if (chip->options & NAND_USE_BOUNCE_BUFFER)
- use_bufpoi = !virt_addr_valid(buf) ||
- !IS_ALIGNED((unsigned long)buf,
- chip->buf_align);
+ use_bounce_buf = 1;
+ else if (chip->options & NAND_USES_DMA)
+ use_bounce_buf = !virt_addr_valid(buf) ||
+ !IS_ALIGNED((unsigned long)buf,
+ chip->buf_align);
else
- use_bufpoi = 0;
+ use_bounce_buf = 0;
/* Is the current page in the buffer? */
if (realpage != chip->pagecache.page || oob) {
- bufpoi = use_bufpoi ? chip->data_buf : buf;
+ bufpoi = use_bounce_buf ? chip->data_buf : buf;
- if (use_bufpoi && aligned)
+ if (use_bounce_buf && aligned)
pr_debug("%s: using read bounce buffer for buf@%p\n",
__func__, buf);
@@ -3223,16 +3308,19 @@ read_retry:
ret = chip->ecc.read_page(chip, bufpoi,
oob_required, page);
if (ret < 0) {
- if (use_bufpoi)
+ if (use_bounce_buf)
/* Invalidate page cache */
chip->pagecache.page = -1;
break;
}
- /* Transfer not aligned data */
- if (use_bufpoi) {
+ /*
+ * Copy back the data in the initial buffer when reading
+ * partial pages or when a bounce buffer is required.
+ */
+ if (use_bounce_buf) {
if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
- !(mtd->ecc_stats.failed - ecc_failures) &&
+ !(mtd->ecc_stats.failed - ecc_stats.failed) &&
(ops->mode != MTD_OPS_RAW)) {
chip->pagecache.page = realpage;
chip->pagecache.bitflips = ret;
@@ -3240,7 +3328,7 @@ read_retry:
/* Invalidate page cache */
chip->pagecache.page = -1;
}
- memcpy(buf, chip->data_buf + col, bytes);
+ memcpy(buf, bufpoi + col, bytes);
}
if (unlikely(oob)) {
@@ -3255,7 +3343,7 @@ read_retry:
nand_wait_readrdy(chip);
- if (mtd->ecc_stats.failed - ecc_failures) {
+ if (mtd->ecc_stats.failed - ecc_stats.failed) {
if (retry_mode + 1 < chip->read_retries) {
retry_mode++;
ret = nand_setup_read_retry(chip,
@@ -3263,8 +3351,8 @@ read_retry:
if (ret < 0)
break;
- /* Reset failures; retry */
- mtd->ecc_stats.failed = ecc_failures;
+ /* Reset ecc_stats; retry */
+ mtd->ecc_stats = ecc_stats;
goto read_retry;
} else {
/* No more retry modes; real failure */
@@ -3373,7 +3461,7 @@ static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
sndrnd = 1;
toread = min_t(int, length, chunk);
- ret = nand_read_data_op(chip, bufpoi, toread, false);
+ ret = nand_read_data_op(chip, bufpoi, toread, false, false);
if (ret)
return ret;
@@ -3381,7 +3469,7 @@ static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
length -= toread;
}
if (length > 0) {
- ret = nand_read_data_op(chip, bufpoi, length, false);
+ ret = nand_read_data_op(chip, bufpoi, length, false, false);
if (ret)
return ret;
}
@@ -3634,6 +3722,42 @@ int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
EXPORT_SYMBOL(nand_write_page_raw);
/**
+ * nand_monolithic_write_page_raw - Monolithic page write in raw mode
+ * @chip: NAND chip info structure
+ * @buf: data buffer to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * This is a raw page write, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be sent over the bus and effectively programmed
+ * into the NAND chip arrays in a single operation. This is an
+ * alternative to nand_write_page_raw(), which first sends the main
+ * data, then eventually send the OOB data by latching more data
+ * cycles on the NAND bus, and finally sends the program command to
+ * synchronyze the NAND chip cache.
+ */
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int size = mtd->writesize;
+ u8 *write_buf = (u8 *)buf;
+
+ if (oob_required) {
+ size += mtd->oobsize;
+
+ if (buf != chip->data_buf) {
+ write_buf = nand_get_data_buf(chip);
+ memcpy(write_buf, buf, mtd->writesize);
+ }
+ }
+
+ return nand_prog_page_op(chip, page, 0, write_buf, size);
+}
+EXPORT_SYMBOL(nand_monolithic_write_page_raw);
+
+/**
* nand_write_page_raw_syndrome - [INTERN] raw page write function
* @chip: nand chip info structure
* @buf: data buffer
@@ -4012,20 +4136,23 @@ static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
while (1) {
int bytes = mtd->writesize;
uint8_t *wbuf = buf;
- int use_bufpoi;
+ int use_bounce_buf;
int part_pagewr = (column || writelen < mtd->writesize);
if (part_pagewr)
- use_bufpoi = 1;
- else if (chip->options & NAND_USE_BOUNCE_BUFFER)
- use_bufpoi = !virt_addr_valid(buf) ||
- !IS_ALIGNED((unsigned long)buf,
- chip->buf_align);
+ use_bounce_buf = 1;
+ else if (chip->options & NAND_USES_DMA)
+ use_bounce_buf = !virt_addr_valid(buf) ||
+ !IS_ALIGNED((unsigned long)buf,
+ chip->buf_align);
else
- use_bufpoi = 0;
+ use_bounce_buf = 0;
- /* Partial page write?, or need to use bounce buffer */
- if (use_bufpoi) {
+ /*
+ * Copy the data from the initial buffer when doing partial page
+ * writes or when a bounce buffer is required.
+ */
+ if (use_bounce_buf) {
pr_debug("%s: using write bounce buffer for buf@%p\n",
__func__, buf);
if (part_pagewr)
@@ -4883,7 +5010,6 @@ static const char * const nand_ecc_modes[] = {
[NAND_ECC_SOFT] = "soft",
[NAND_ECC_HW] = "hw",
[NAND_ECC_HW_SYNDROME] = "hw_syndrome",
- [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
[NAND_ECC_ON_DIE] = "on-die",
};
@@ -4896,14 +5022,14 @@ static int of_get_nand_ecc_mode(struct device_node *np)
if (err < 0)
return err;
- for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
+ for (i = NAND_ECC_NONE; i < ARRAY_SIZE(nand_ecc_modes); i++)
if (!strcasecmp(pm, nand_ecc_modes[i]))
return i;
/*
* For backward compatibility we support few obsoleted values that don't
- * have their mappings into nand_ecc_modes_t anymore (they were merged
- * with other enums).
+ * have their mappings into the nand_ecc_mode enum anymore (they were
+ * merged with other enums).
*/
if (!strcasecmp(pm, "soft_bch"))
return NAND_ECC_SOFT;
@@ -4917,17 +5043,20 @@ static const char * const nand_ecc_algos[] = {
[NAND_ECC_RS] = "rs",
};
-static int of_get_nand_ecc_algo(struct device_node *np)
+static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
{
+ enum nand_ecc_algo ecc_algo;
const char *pm;
- int err, i;
+ int err;
err = of_property_read_string(np, "nand-ecc-algo", &pm);
if (!err) {
- for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
- if (!strcasecmp(pm, nand_ecc_algos[i]))
- return i;
- return -ENODEV;
+ for (ecc_algo = NAND_ECC_HAMMING;
+ ecc_algo < ARRAY_SIZE(nand_ecc_algos);
+ ecc_algo++) {
+ if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
+ return ecc_algo;
+ }
}
/*
@@ -4935,15 +5064,14 @@ static int of_get_nand_ecc_algo(struct device_node *np)
* for some obsoleted values that were specifying ECC algorithm.
*/
err = of_property_read_string(np, "nand-ecc-mode", &pm);
- if (err < 0)
- return err;
-
- if (!strcasecmp(pm, "soft"))
- return NAND_ECC_HAMMING;
- else if (!strcasecmp(pm, "soft_bch"))
- return NAND_ECC_BCH;
+ if (!err) {
+ if (!strcasecmp(pm, "soft"))
+ return NAND_ECC_HAMMING;
+ else if (!strcasecmp(pm, "soft_bch"))
+ return NAND_ECC_BCH;
+ }
- return -ENODEV;
+ return NAND_ECC_UNKNOWN;
}
static int of_get_nand_ecc_step_size(struct device_node *np)
@@ -4988,7 +5116,8 @@ static bool of_get_nand_on_flash_bbt(struct device_node *np)
static int nand_dt_init(struct nand_chip *chip)
{
struct device_node *dn = nand_get_flash_node(chip);
- int ecc_mode, ecc_algo, ecc_strength, ecc_step;
+ enum nand_ecc_algo ecc_algo;
+ int ecc_mode, ecc_strength, ecc_step;
if (!dn)
return 0;
@@ -5010,7 +5139,7 @@ static int nand_dt_init(struct nand_chip *chip)
if (ecc_mode >= 0)
chip->ecc.mode = ecc_mode;
- if (ecc_algo >= 0)
+ if (ecc_algo != NAND_ECC_UNKNOWN)
chip->ecc.algo = ecc_algo;
if (ecc_strength >= 0)
@@ -5140,8 +5269,10 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
ecc->read_page = nand_read_page_swecc;
ecc->read_subpage = nand_read_subpage;
ecc->write_page = nand_write_page_swecc;
- ecc->read_page_raw = nand_read_page_raw;
- ecc->write_page_raw = nand_write_page_raw;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
ecc->read_oob = nand_read_oob_std;
ecc->write_oob = nand_write_oob_std;
if (!ecc->size)
@@ -5163,8 +5294,10 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip)
ecc->read_page = nand_read_page_swecc;
ecc->read_subpage = nand_read_subpage;
ecc->write_page = nand_write_page_swecc;
- ecc->read_page_raw = nand_read_page_raw;
- ecc->write_page_raw = nand_write_page_raw;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
ecc->read_oob = nand_read_oob_std;
ecc->write_oob = nand_write_oob_std;
@@ -5628,16 +5761,6 @@ static int nand_scan_tail(struct nand_chip *chip)
*/
switch (ecc->mode) {
- case NAND_ECC_HW_OOB_FIRST:
- /* Similar to NAND_ECC_HW, but a separate read_page handle */
- if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
- WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
- ret = -EINVAL;
- goto err_nand_manuf_cleanup;
- }
- if (!ecc->read_page)
- ecc->read_page = nand_read_page_hwecc_oob_first;
- fallthrough;
case NAND_ECC_HW:
/* Use standard hwecc read page function? */
if (!ecc->read_page)
@@ -5781,8 +5904,10 @@ static int nand_scan_tail(struct nand_chip *chip)
/* ECC sanity check: warn if it's too weak */
if (!nand_ecc_strength_good(chip))
- pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
- mtd->name);
+ pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
+ mtd->name, chip->ecc.strength, chip->ecc.size,
+ chip->base.eccreq.strength,
+ chip->base.eccreq.step_size);
/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
@@ -5975,18 +6100,6 @@ void nand_cleanup(struct nand_chip *chip)
EXPORT_SYMBOL_GPL(nand_cleanup);
-/**
- * nand_release - [NAND Interface] Unregister the MTD device and free resources
- * held by the NAND device
- * @chip: NAND chip object
- */
-void nand_release(struct nand_chip *chip)
-{
- mtd_device_unregister(nand_to_mtd(chip));
- nand_cleanup(chip);
-}
-EXPORT_SYMBOL_GPL(nand_release);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c
index 17527310c3a1..d5af8c5fd02f 100644
--- a/drivers/mtd/nand/raw/nand_bch.c
+++ b/drivers/mtd/nand/raw/nand_bch.c
@@ -41,7 +41,7 @@ int nand_bch_calculate_ecc(struct nand_chip *chip, const unsigned char *buf,
unsigned int i;
memset(code, 0, chip->ecc.bytes);
- encode_bch(nbc->bch, buf, chip->ecc.size, code);
+ bch_encode(nbc->bch, buf, chip->ecc.size, code);
/* apply mask so that an erased page is a valid codeword */
for (i = 0; i < chip->ecc.bytes; i++)
@@ -67,7 +67,7 @@ int nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
unsigned int *errloc = nbc->errloc;
int i, count;
- count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
+ count = bch_decode(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
NULL, errloc);
if (count > 0) {
for (i = 0; i < count; i++) {
@@ -130,7 +130,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
if (!nbc)
goto fail;
- nbc->bch = init_bch(m, t, 0);
+ nbc->bch = bch_init(m, t, 0, false);
if (!nbc->bch)
goto fail;
@@ -182,7 +182,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
goto fail;
memset(erased_page, 0xff, eccsize);
- encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
+ bch_encode(nbc->bch, erased_page, eccsize, nbc->eccmask);
kfree(erased_page);
for (i = 0; i < eccbytes; i++)
@@ -205,7 +205,7 @@ EXPORT_SYMBOL(nand_bch_init);
void nand_bch_free(struct nand_bch_control *nbc)
{
if (nbc) {
- free_bch(nbc->bch);
+ bch_free(nbc->bch);
kfree(nbc->errloc);
kfree(nbc->eccmask);
kfree(nbc);
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index 9b540e76f84f..b15c42f48755 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -16,6 +16,8 @@
#include "internals.h"
+#define JEDEC_PARAM_PAGES 3
+
/*
* Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
*/
@@ -25,9 +27,11 @@ int nand_jedec_detect(struct nand_chip *chip)
struct nand_memory_organization *memorg;
struct nand_jedec_params *p;
struct jedec_ecc_info *ecc;
+ bool use_datain = false;
int jedec_version = 0;
char id[5];
int i, val, ret;
+ u16 crc;
memorg = nanddev_get_memorg(&chip->base);
@@ -41,25 +45,31 @@ int nand_jedec_detect(struct nand_chip *chip)
if (!p)
return -ENOMEM;
- ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
- if (ret) {
- ret = 0;
- goto free_jedec_param_page;
- }
-
- for (i = 0; i < 3; i++) {
- ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, p, sizeof(*p), true, true))
+ use_datain = true;
+
+ for (i = 0; i < JEDEC_PARAM_PAGES; i++) {
+ if (!i)
+ ret = nand_read_param_page_op(chip, 0x40, p,
+ sizeof(*p));
+ else if (use_datain)
+ ret = nand_read_data_op(chip, p, sizeof(*p), true,
+ false);
+ else
+ ret = nand_change_read_column_op(chip, sizeof(*p) * i,
+ p, sizeof(*p), true);
if (ret) {
ret = 0;
goto free_jedec_param_page;
}
- if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
- le16_to_cpu(p->crc))
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 510);
+ if (crc == le16_to_cpu(p->crc))
break;
}
- if (i == 3) {
+ if (i == JEDEC_PARAM_PAGES) {
pr_err("Could not find valid JEDEC parameter page; aborting\n");
goto free_jedec_param_page;
}
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index f91e92e1b972..d64791c06a97 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -225,7 +225,8 @@ static void nand_wait_status_ready(struct nand_chip *chip, unsigned long timeo)
do {
u8 status;
- ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ ret = nand_read_data_op(chip, &status, sizeof(status), true,
+ false);
if (ret)
return;
@@ -552,7 +553,8 @@ static int nand_wait(struct nand_chip *chip)
break;
} else {
ret = nand_read_data_op(chip, &status,
- sizeof(status), true);
+ sizeof(status), true,
+ false);
if (ret)
return ret;
@@ -563,7 +565,7 @@ static int nand_wait(struct nand_chip *chip)
} while (time_before(jiffies, timeo));
}
- ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ ret = nand_read_data_op(chip, &status, sizeof(status), true, false);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 56654030ec7f..3589b4fce0d4 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -192,6 +192,7 @@ static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status,
struct micron_nand *micron = nand_get_manufacturer_data(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int step, max_bitflips = 0;
+ bool use_datain = false;
int ret;
if (!(status & NAND_ECC_STATUS_WRITE_RECOMMENDED)) {
@@ -211,8 +212,27 @@ static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status,
* in non-raw mode, even if the user did not request those bytes.
*/
if (!oob_required) {
- ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
- false);
+ /*
+ * We first check which operation is supported by the controller
+ * before running it. This trick makes it possible to support
+ * all controllers, even the most constraints, without almost
+ * any performance hit.
+ *
+ * TODO: could be enhanced to avoid repeating the same check
+ * over and over in the fast path.
+ */
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+ true))
+ use_datain = true;
+
+ if (use_datain)
+ ret = nand_read_data_op(chip, chip->oob_poi,
+ mtd->oobsize, false, false);
+ else
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
if (ret)
return ret;
}
@@ -285,6 +305,7 @@ micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ bool use_datain = false;
u8 status;
int ret, max_bitflips = 0;
@@ -300,14 +321,36 @@ micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf,
if (ret)
goto out;
- ret = nand_exit_status_op(chip);
- if (ret)
- goto out;
+ /*
+ * We first check which operation is supported by the controller before
+ * running it. This trick makes it possible to support all controllers,
+ * even the most constraints, without almost any performance hit.
+ *
+ * TODO: could be enhanced to avoid repeating the same check over and
+ * over in the fast path.
+ */
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, buf, mtd->writesize, false, true))
+ use_datain = true;
- ret = nand_read_data_op(chip, buf, mtd->writesize, false);
- if (!ret && oob_required)
- ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ if (use_datain) {
+ ret = nand_exit_status_op(chip);
+ if (ret)
+ goto out;
+
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false,
false);
+ if (!ret && oob_required)
+ ret = nand_read_data_op(chip, chip->oob_poi,
+ mtd->oobsize, false, false);
+ } else {
+ ret = nand_change_read_column_op(chip, 0, buf, mtd->writesize,
+ false);
+ if (!ret && oob_required)
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
+ }
if (chip->ecc.strength == 4)
max_bitflips = micron_nand_on_die_ecc_status_4(chip, status,
@@ -508,8 +551,10 @@ static int micron_nand_init(struct nand_chip *chip)
chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
} else {
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
+ if (!chip->ecc.read_page_raw)
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ if (!chip->ecc.write_page_raw)
+ chip->ecc.write_page_raw = nand_write_page_raw;
}
}
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index 0b879bd0a68c..be3456627288 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -16,6 +16,8 @@
#include "internals.h"
+#define ONFI_PARAM_PAGES 3
+
u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
{
int i;
@@ -45,12 +47,10 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
if (!ep)
return -ENOMEM;
- /* Send our own NAND_CMD_PARAM. */
- ret = nand_read_param_page_op(chip, 0, NULL, 0);
- if (ret)
- goto ext_out;
-
- /* Use the Change Read Column command to skip the ONFI param pages. */
+ /*
+ * Use the Change Read Column command to skip the ONFI param pages and
+ * ensure we read at the right location.
+ */
ret = nand_change_read_column_op(chip,
sizeof(*p) * p->num_of_param_pages,
ep, len, true);
@@ -141,11 +141,13 @@ int nand_onfi_detect(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_memory_organization *memorg;
- struct nand_onfi_params *p;
+ struct nand_onfi_params *p = NULL, *pbuf;
struct onfi_params *onfi;
+ bool use_datain = false;
int onfi_version = 0;
char id[4];
int i, ret, val;
+ u16 crc;
memorg = nanddev_get_memorg(&chip->base);
@@ -155,43 +157,54 @@ int nand_onfi_detect(struct nand_chip *chip)
return 0;
/* ONFI chip: allocate a buffer to hold its parameter page */
- p = kzalloc((sizeof(*p) * 3), GFP_KERNEL);
- if (!p)
+ pbuf = kzalloc((sizeof(*pbuf) * ONFI_PARAM_PAGES), GFP_KERNEL);
+ if (!pbuf)
return -ENOMEM;
- ret = nand_read_param_page_op(chip, 0, NULL, 0);
- if (ret) {
- ret = 0;
- goto free_onfi_param_page;
- }
-
- for (i = 0; i < 3; i++) {
- ret = nand_read_data_op(chip, &p[i], sizeof(*p), true);
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, &pbuf[0], sizeof(*pbuf), true, true))
+ use_datain = true;
+
+ for (i = 0; i < ONFI_PARAM_PAGES; i++) {
+ if (!i)
+ ret = nand_read_param_page_op(chip, 0, &pbuf[i],
+ sizeof(*pbuf));
+ else if (use_datain)
+ ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf),
+ true, false);
+ else
+ ret = nand_change_read_column_op(chip, sizeof(*pbuf) * i,
+ &pbuf[i], sizeof(*pbuf),
+ true);
if (ret) {
ret = 0;
goto free_onfi_param_page;
}
- if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) ==
- le16_to_cpu(p->crc)) {
- if (i)
- memcpy(p, &p[i], sizeof(*p));
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)&pbuf[i], 254);
+ if (crc == le16_to_cpu(pbuf[i].crc)) {
+ p = &pbuf[i];
break;
}
}
- if (i == 3) {
- const void *srcbufs[3] = {p, p + 1, p + 2};
+ if (i == ONFI_PARAM_PAGES) {
+ const void *srcbufs[ONFI_PARAM_PAGES];
+ unsigned int j;
+
+ for (j = 0; j < ONFI_PARAM_PAGES; j++)
+ srcbufs[j] = pbuf + j;
pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
- nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p,
- sizeof(*p));
+ nand_bit_wise_majority(srcbufs, ONFI_PARAM_PAGES, pbuf,
+ sizeof(*pbuf));
- if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) !=
- le16_to_cpu(p->crc)) {
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)pbuf, 254);
+ if (crc != le16_to_cpu(pbuf->crc)) {
pr_err("ONFI parameter recovery failed, aborting\n");
goto free_onfi_param_page;
}
+ p = pbuf;
}
if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
@@ -299,14 +312,14 @@ int nand_onfi_detect(struct nand_chip *chip)
chip->parameters.onfi = onfi;
/* Identification done, free the full ONFI parameter page and exit */
- kfree(p);
+ kfree(pbuf);
return 1;
free_model:
kfree(chip->parameters.model);
free_onfi_param_page:
- kfree(p);
+ kfree(pbuf);
return ret;
}
diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c
index f64b06a71dfa..36d21be3dfe5 100644
--- a/drivers/mtd/nand/raw/nand_timings.c
+++ b/drivers/mtd/nand/raw/nand_timings.c
@@ -16,6 +16,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
/* Mode 0 */
{
.type = NAND_SDR_IFACE,
+ .timings.mode = 0,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
@@ -58,6 +59,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
/* Mode 1 */
{
.type = NAND_SDR_IFACE,
+ .timings.mode = 1,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
@@ -100,6 +102,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
/* Mode 2 */
{
.type = NAND_SDR_IFACE,
+ .timings.mode = 2,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
@@ -142,6 +145,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
/* Mode 3 */
{
.type = NAND_SDR_IFACE,
+ .timings.mode = 3,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
@@ -184,6 +188,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
/* Mode 4 */
{
.type = NAND_SDR_IFACE,
+ .timings.mode = 4,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
@@ -226,6 +231,7 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
/* Mode 5 */
{
.type = NAND_SDR_IFACE,
+ .timings.mode = 5,
.timings.sdr = {
.tCCS_min = 500000,
.tR_max = 200000000,
@@ -314,10 +320,9 @@ int onfi_fill_data_interface(struct nand_chip *chip,
/* microseconds -> picoseconds */
timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX;
- timings->tR_max = 1000000ULL * 200000000ULL;
- /* nanoseconds -> picoseconds */
- timings->tCCS_min = 1000UL * 500000;
+ timings->tR_max = 200000000;
+ timings->tCCS_min = 500000;
}
return 0;
diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c
index f3dcd695b5db..ae069905d7e4 100644
--- a/drivers/mtd/nand/raw/nand_toshiba.c
+++ b/drivers/mtd/nand/raw/nand_toshiba.c
@@ -194,6 +194,17 @@ static void toshiba_nand_decode_id(struct nand_chip *chip)
}
}
+static int tc58teg5dclta00_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ chip->onfi_timing_mode_default = 5;
+ chip->options |= NAND_NEED_SCRAMBLING;
+ mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme);
+
+ return 0;
+}
+
static int toshiba_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
@@ -204,6 +215,9 @@ static int toshiba_nand_init(struct nand_chip *chip)
chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND)
toshiba_nand_benand_init(chip);
+ if (!strcmp("TC58TEG5DCLTA00", chip->parameters.model))
+ tc58teg5dclta00_init(chip);
+
return 0;
}
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 1de03bb34e84..0a5cb77966cc 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -353,6 +353,9 @@ struct nandsim {
void *file_buf;
struct page *held_pages[NS_MAX_HELD_PAGES];
int held_cnt;
+
+ /* debugfs entry */
+ struct dentry *dent;
};
/*
@@ -432,7 +435,7 @@ static unsigned long total_wear = 0;
/* MTD structure for NAND controller */
static struct mtd_info *nsmtd;
-static int nandsim_show(struct seq_file *m, void *private)
+static int ns_show(struct seq_file *m, void *private)
{
unsigned long wmin = -1, wmax = 0, avg;
unsigned long deciles[10], decile_max[10], tot = 0;
@@ -483,19 +486,18 @@ static int nandsim_show(struct seq_file *m, void *private)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(nandsim);
+DEFINE_SHOW_ATTRIBUTE(ns);
/**
- * nandsim_debugfs_create - initialize debugfs
- * @dev: nandsim device description object
+ * ns_debugfs_create - initialize debugfs
+ * @ns: nandsim device description object
*
* This function creates all debugfs files for UBI device @ubi. Returns zero in
* case of success and a negative error code in case of failure.
*/
-static int nandsim_debugfs_create(struct nandsim *dev)
+static int ns_debugfs_create(struct nandsim *ns)
{
struct dentry *root = nsmtd->dbg.dfs_dir;
- struct dentry *dent;
/*
* Just skip debugfs initialization when the debugfs directory is
@@ -508,9 +510,9 @@ static int nandsim_debugfs_create(struct nandsim *dev)
return 0;
}
- dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
- root, dev, &nandsim_fops);
- if (IS_ERR_OR_NULL(dent)) {
+ ns->dent = debugfs_create_file("nandsim_wear_report", 0400, root, ns,
+ &ns_fops);
+ if (IS_ERR_OR_NULL(ns->dent)) {
NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n");
return -1;
}
@@ -518,13 +520,18 @@ static int nandsim_debugfs_create(struct nandsim *dev)
return 0;
}
+static void ns_debugfs_remove(struct nandsim *ns)
+{
+ debugfs_remove_recursive(ns->dent);
+}
+
/*
* Allocate array of page pointers, create slab allocation for an array
* and initialize the array by NULL pointers.
*
* RETURNS: 0 if success, -ENOMEM if memory alloc fails.
*/
-static int __init alloc_device(struct nandsim *ns)
+static int __init ns_alloc_device(struct nandsim *ns)
{
struct file *cfile;
int i, err;
@@ -536,12 +543,12 @@ static int __init alloc_device(struct nandsim *ns)
if (!(cfile->f_mode & FMODE_CAN_READ)) {
NS_ERR("alloc_device: cache file not readable\n");
err = -EINVAL;
- goto err_close;
+ goto err_close_filp;
}
if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
NS_ERR("alloc_device: cache file not writeable\n");
err = -EINVAL;
- goto err_close;
+ goto err_close_filp;
}
ns->pages_written =
vzalloc(array_size(sizeof(unsigned long),
@@ -549,16 +556,24 @@ static int __init alloc_device(struct nandsim *ns)
if (!ns->pages_written) {
NS_ERR("alloc_device: unable to allocate pages written array\n");
err = -ENOMEM;
- goto err_close;
+ goto err_close_filp;
}
ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
if (!ns->file_buf) {
NS_ERR("alloc_device: unable to allocate file buf\n");
err = -ENOMEM;
- goto err_free;
+ goto err_free_pw;
}
ns->cfile = cfile;
+
return 0;
+
+err_free_pw:
+ vfree(ns->pages_written);
+err_close_filp:
+ filp_close(cfile, NULL);
+
+ return err;
}
ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
@@ -573,22 +588,22 @@ static int __init alloc_device(struct nandsim *ns)
ns->geom.pgszoob, 0, 0, NULL);
if (!ns->nand_pages_slab) {
NS_ERR("cache_create: unable to create kmem_cache\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_free_pg;
}
return 0;
-err_free:
- vfree(ns->pages_written);
-err_close:
- filp_close(cfile, NULL);
+err_free_pg:
+ vfree(ns->pages);
+
return err;
}
/*
* Free any allocated pages, and free the array of page pointers.
*/
-static void free_device(struct nandsim *ns)
+static void ns_free_device(struct nandsim *ns)
{
int i;
@@ -610,7 +625,7 @@ static void free_device(struct nandsim *ns)
}
}
-static char __init *get_partition_name(int i)
+static char __init *ns_get_partition_name(int i)
{
return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
}
@@ -620,7 +635,7 @@ static char __init *get_partition_name(int i)
*
* RETURNS: 0 if success, -ERRNO if failure.
*/
-static int __init init_nandsim(struct mtd_info *mtd)
+static int __init ns_init(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nandsim *ns = nand_get_controller_data(chip);
@@ -693,7 +708,7 @@ static int __init init_nandsim(struct mtd_info *mtd)
NS_ERR("bad partition size.\n");
return -EINVAL;
}
- ns->partitions[i].name = get_partition_name(i);
+ ns->partitions[i].name = ns_get_partition_name(i);
if (!ns->partitions[i].name) {
NS_ERR("unable to allocate memory.\n");
return -ENOMEM;
@@ -707,12 +722,14 @@ static int __init init_nandsim(struct mtd_info *mtd)
if (remains) {
if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
NS_ERR("too many partitions.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_partition_names;
}
- ns->partitions[i].name = get_partition_name(i);
+ ns->partitions[i].name = ns_get_partition_name(i);
if (!ns->partitions[i].name) {
NS_ERR("unable to allocate memory.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_partition_names;
}
ns->partitions[i].offset = next_offset;
ns->partitions[i].size = remains;
@@ -739,33 +756,48 @@ static int __init init_nandsim(struct mtd_info *mtd)
printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
printk("options: %#x\n", ns->options);
- if ((ret = alloc_device(ns)) != 0)
- return ret;
+ ret = ns_alloc_device(ns);
+ if (ret)
+ goto free_partition_names;
/* Allocate / initialize the internal buffer */
ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
if (!ns->buf.byte) {
NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
ns->geom.pgszoob);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_device;
}
memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
return 0;
+
+free_device:
+ ns_free_device(ns);
+free_partition_names:
+ for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
+ kfree(ns->partitions[i].name);
+
+ return ret;
}
/*
* Free the nandsim structure.
*/
-static void free_nandsim(struct nandsim *ns)
+static void ns_free(struct nandsim *ns)
{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ns->partitions); ++i)
+ kfree(ns->partitions[i].name);
+
kfree(ns->buf.byte);
- free_device(ns);
+ ns_free_device(ns);
return;
}
-static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
+static int ns_parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
{
char *w;
int zero_ok;
@@ -793,7 +825,7 @@ static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
return 0;
}
-static int parse_weakblocks(void)
+static int ns_parse_weakblocks(void)
{
char *w;
int zero_ok;
@@ -830,7 +862,7 @@ static int parse_weakblocks(void)
return 0;
}
-static int erase_error(unsigned int erase_block_no)
+static int ns_erase_error(unsigned int erase_block_no)
{
struct weak_block *wb;
@@ -844,7 +876,7 @@ static int erase_error(unsigned int erase_block_no)
return 0;
}
-static int parse_weakpages(void)
+static int ns_parse_weakpages(void)
{
char *w;
int zero_ok;
@@ -881,7 +913,7 @@ static int parse_weakpages(void)
return 0;
}
-static int write_error(unsigned int page_no)
+static int ns_write_error(unsigned int page_no)
{
struct weak_page *wp;
@@ -895,7 +927,7 @@ static int write_error(unsigned int page_no)
return 0;
}
-static int parse_gravepages(void)
+static int ns_parse_gravepages(void)
{
char *g;
int zero_ok;
@@ -932,7 +964,7 @@ static int parse_gravepages(void)
return 0;
}
-static int read_error(unsigned int page_no)
+static int ns_read_error(unsigned int page_no)
{
struct grave_page *gp;
@@ -946,25 +978,7 @@ static int read_error(unsigned int page_no)
return 0;
}
-static void free_lists(void)
-{
- struct list_head *pos, *n;
- list_for_each_safe(pos, n, &weak_blocks) {
- list_del(pos);
- kfree(list_entry(pos, struct weak_block, list));
- }
- list_for_each_safe(pos, n, &weak_pages) {
- list_del(pos);
- kfree(list_entry(pos, struct weak_page, list));
- }
- list_for_each_safe(pos, n, &grave_pages) {
- list_del(pos);
- kfree(list_entry(pos, struct grave_page, list));
- }
- kfree(erase_block_wear);
-}
-
-static int setup_wear_reporting(struct mtd_info *mtd)
+static int ns_setup_wear_reporting(struct mtd_info *mtd)
{
size_t mem;
@@ -982,7 +996,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
return 0;
}
-static void update_wear(unsigned int erase_block_no)
+static void ns_update_wear(unsigned int erase_block_no)
{
if (!erase_block_wear)
return;
@@ -1001,7 +1015,7 @@ static void update_wear(unsigned int erase_block_no)
/*
* Returns the string representation of 'state' state.
*/
-static char *get_state_name(uint32_t state)
+static char *ns_get_state_name(uint32_t state)
{
switch (NS_STATE(state)) {
case STATE_CMD_READ0:
@@ -1061,7 +1075,7 @@ static char *get_state_name(uint32_t state)
*
* RETURNS: 1 if wrong command, 0 if right.
*/
-static int check_command(int cmd)
+static int ns_check_command(int cmd)
{
switch (cmd) {
@@ -1088,7 +1102,7 @@ static int check_command(int cmd)
/*
* Returns state after command is accepted by command number.
*/
-static uint32_t get_state_by_command(unsigned command)
+static uint32_t ns_get_state_by_command(unsigned command)
{
switch (command) {
case NAND_CMD_READ0:
@@ -1126,7 +1140,7 @@ static uint32_t get_state_by_command(unsigned command)
/*
* Move an address byte to the correspondent internal register.
*/
-static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
+static inline void ns_accept_addr_byte(struct nandsim *ns, u_char bt)
{
uint byte = (uint)bt;
@@ -1144,9 +1158,10 @@ static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
/*
* Switch to STATE_READY state.
*/
-static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
+static inline void ns_switch_to_ready_state(struct nandsim *ns, u_char status)
{
- NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
+ NS_DBG("switch_to_ready_state: switch to %s state\n",
+ ns_get_state_name(STATE_READY));
ns->state = STATE_READY;
ns->nxstate = STATE_UNKNOWN;
@@ -1203,7 +1218,7 @@ static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
* -1 - several matches.
* 0 - operation is found.
*/
-static int find_operation(struct nandsim *ns, uint32_t flag)
+static int ns_find_operation(struct nandsim *ns, uint32_t flag)
{
int opsfound = 0;
int i, j, idx = 0;
@@ -1256,7 +1271,8 @@ static int find_operation(struct nandsim *ns, uint32_t flag)
ns->state = ns->op[ns->stateidx];
ns->nxstate = ns->op[ns->stateidx + 1];
NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
- idx, get_state_name(ns->state), get_state_name(ns->nxstate));
+ idx, ns_get_state_name(ns->state),
+ ns_get_state_name(ns->nxstate));
return 0;
}
@@ -1264,13 +1280,13 @@ static int find_operation(struct nandsim *ns, uint32_t flag)
/* Nothing was found. Try to ignore previous commands (if any) and search again */
if (ns->npstates != 0) {
NS_DBG("find_operation: no operation found, try again with state %s\n",
- get_state_name(ns->state));
+ ns_get_state_name(ns->state));
ns->npstates = 0;
- return find_operation(ns, 0);
+ return ns_find_operation(ns, 0);
}
NS_DBG("find_operation: no operations found\n");
- switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return -2;
}
@@ -1287,7 +1303,7 @@ static int find_operation(struct nandsim *ns, uint32_t flag)
return -1;
}
-static void put_pages(struct nandsim *ns)
+static void ns_put_pages(struct nandsim *ns)
{
int i;
@@ -1296,7 +1312,8 @@ static void put_pages(struct nandsim *ns)
}
/* Get page cache pages in advance to provide NOFS memory allocation */
-static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
+static int ns_get_pages(struct nandsim *ns, struct file *file, size_t count,
+ loff_t pos)
{
pgoff_t index, start_index, end_index;
struct page *page;
@@ -1316,7 +1333,7 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
page = find_or_create_page(mapping, index, GFP_NOFS);
}
if (page == NULL) {
- put_pages(ns);
+ ns_put_pages(ns);
return -ENOMEM;
}
unlock_page(page);
@@ -1326,35 +1343,37 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
return 0;
}
-static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
+static ssize_t ns_read_file(struct nandsim *ns, struct file *file, void *buf,
+ size_t count, loff_t pos)
{
ssize_t tx;
int err;
unsigned int noreclaim_flag;
- err = get_pages(ns, file, count, pos);
+ err = ns_get_pages(ns, file, count, pos);
if (err)
return err;
noreclaim_flag = memalloc_noreclaim_save();
tx = kernel_read(file, buf, count, &pos);
memalloc_noreclaim_restore(noreclaim_flag);
- put_pages(ns);
+ ns_put_pages(ns);
return tx;
}
-static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
+static ssize_t ns_write_file(struct nandsim *ns, struct file *file, void *buf,
+ size_t count, loff_t pos)
{
ssize_t tx;
int err;
unsigned int noreclaim_flag;
- err = get_pages(ns, file, count, pos);
+ err = ns_get_pages(ns, file, count, pos);
if (err)
return err;
noreclaim_flag = memalloc_noreclaim_save();
tx = kernel_write(file, buf, count, &pos);
memalloc_noreclaim_restore(noreclaim_flag);
- put_pages(ns);
+ ns_put_pages(ns);
return tx;
}
@@ -1374,11 +1393,11 @@ static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
}
-static int do_read_error(struct nandsim *ns, int num)
+static int ns_do_read_error(struct nandsim *ns, int num)
{
unsigned int page_no = ns->regs.row;
- if (read_error(page_no)) {
+ if (ns_read_error(page_no)) {
prandom_bytes(ns->buf.byte, num);
NS_WARN("simulating read error in page %u\n", page_no);
return 1;
@@ -1386,7 +1405,7 @@ static int do_read_error(struct nandsim *ns, int num)
return 0;
}
-static void do_bit_flips(struct nandsim *ns, int num)
+static void ns_do_bit_flips(struct nandsim *ns, int num)
{
if (bitflips && prandom_u32() < (1 << 22)) {
int flips = 1;
@@ -1406,7 +1425,7 @@ static void do_bit_flips(struct nandsim *ns, int num)
/*
* Fill the NAND buffer with data read from the specified page.
*/
-static void read_page(struct nandsim *ns, int num)
+static void ns_read_page(struct nandsim *ns, int num)
{
union ns_mem *mypage;
@@ -1420,15 +1439,16 @@ static void read_page(struct nandsim *ns, int num)
NS_DBG("read_page: page %d written, reading from %d\n",
ns->regs.row, ns->regs.column + ns->regs.off);
- if (do_read_error(ns, num))
+ if (ns_do_read_error(ns, num))
return;
pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
- tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
+ tx = ns_read_file(ns, ns->cfile, ns->buf.byte, num,
+ pos);
if (tx != num) {
NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return;
}
- do_bit_flips(ns, num);
+ ns_do_bit_flips(ns, num);
}
return;
}
@@ -1440,17 +1460,17 @@ static void read_page(struct nandsim *ns, int num)
} else {
NS_DBG("read_page: page %d allocated, reading from %d\n",
ns->regs.row, ns->regs.column + ns->regs.off);
- if (do_read_error(ns, num))
+ if (ns_do_read_error(ns, num))
return;
memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
- do_bit_flips(ns, num);
+ ns_do_bit_flips(ns, num);
}
}
/*
* Erase all pages in the specified sector.
*/
-static void erase_sector(struct nandsim *ns)
+static void ns_erase_sector(struct nandsim *ns)
{
union ns_mem *mypage;
int i;
@@ -1478,7 +1498,7 @@ static void erase_sector(struct nandsim *ns)
/*
* Program the specified page with the contents from the NAND buffer.
*/
-static int prog_page(struct nandsim *ns, int num)
+static int ns_prog_page(struct nandsim *ns, int num)
{
int i;
union ns_mem *mypage;
@@ -1497,7 +1517,7 @@ static int prog_page(struct nandsim *ns, int num)
memset(ns->file_buf, 0xff, ns->geom.pgszoob);
} else {
all = 0;
- tx = read_file(ns, ns->cfile, pg_off, num, off);
+ tx = ns_read_file(ns, ns->cfile, pg_off, num, off);
if (tx != num) {
NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
@@ -1507,14 +1527,15 @@ static int prog_page(struct nandsim *ns, int num)
pg_off[i] &= ns->buf.byte[i];
if (all) {
loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
- tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
+ tx = ns_write_file(ns, ns->cfile, ns->file_buf,
+ ns->geom.pgszoob, pos);
if (tx != ns->geom.pgszoob) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
}
__set_bit(ns->regs.row, ns->pages_written);
} else {
- tx = write_file(ns, ns->cfile, pg_off, num, off);
+ tx = ns_write_file(ns, ns->cfile, pg_off, num, off);
if (tx != num) {
NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
return -1;
@@ -1552,7 +1573,7 @@ static int prog_page(struct nandsim *ns, int num)
*
* RETURNS: 0 if success, -1 if error.
*/
-static int do_state_action(struct nandsim *ns, uint32_t action)
+static int ns_do_state_action(struct nandsim *ns, uint32_t action)
{
int num;
int busdiv = ns->busw == 8 ? 1 : 2;
@@ -1579,7 +1600,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
break;
}
num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
- read_page(ns, num);
+ ns_read_page(ns, num);
NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
num, NS_RAW_OFFSET(ns) + ns->regs.off);
@@ -1622,14 +1643,14 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
ns->regs.row, NS_RAW_OFFSET(ns));
NS_LOG("erase sector %u\n", erase_block_no);
- erase_sector(ns);
+ ns_erase_sector(ns);
NS_MDELAY(erase_delay);
if (erase_block_wear)
- update_wear(erase_block_no);
+ ns_update_wear(erase_block_no);
- if (erase_error(erase_block_no)) {
+ if (ns_erase_error(erase_block_no)) {
NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
return -1;
}
@@ -1653,7 +1674,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
return -1;
}
- if (prog_page(ns, num) == -1)
+ if (ns_prog_page(ns, num) == -1)
return -1;
page_no = ns->regs.row;
@@ -1665,7 +1686,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
NS_UDELAY(programm_delay);
NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
- if (write_error(page_no)) {
+ if (ns_write_error(page_no)) {
NS_WARN("simulating write failure in page %u\n", page_no);
return -1;
}
@@ -1702,7 +1723,7 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
/*
* Switch simulator's state.
*/
-static void switch_state(struct nandsim *ns)
+static void ns_switch_state(struct nandsim *ns)
{
if (ns->op) {
/*
@@ -1716,11 +1737,13 @@ static void switch_state(struct nandsim *ns)
NS_DBG("switch_state: operation is known, switch to the next state, "
"state: %s, nxstate: %s\n",
- get_state_name(ns->state), get_state_name(ns->nxstate));
+ ns_get_state_name(ns->state),
+ ns_get_state_name(ns->nxstate));
/* See, whether we need to do some action */
- if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
- switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ if ((ns->state & ACTION_MASK) &&
+ ns_do_state_action(ns, ns->state) < 0) {
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
@@ -1734,15 +1757,16 @@ static void switch_state(struct nandsim *ns)
* The only event causing the switch_state function to
* be called with yet unknown operation is new command.
*/
- ns->state = get_state_by_command(ns->regs.command);
+ ns->state = ns_get_state_by_command(ns->regs.command);
NS_DBG("switch_state: operation is unknown, try to find it\n");
- if (find_operation(ns, 0) != 0)
+ if (!ns_find_operation(ns, 0))
return;
- if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
- switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ if ((ns->state & ACTION_MASK) &&
+ ns_do_state_action(ns, ns->state) < 0) {
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
}
@@ -1770,7 +1794,7 @@ static void switch_state(struct nandsim *ns)
NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
- switch_to_ready_state(ns, status);
+ ns_switch_to_ready_state(ns, status);
return;
} else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
@@ -1784,7 +1808,8 @@ static void switch_state(struct nandsim *ns)
NS_DBG("switch_state: the next state is data I/O, switch, "
"state: %s, nxstate: %s\n",
- get_state_name(ns->state), get_state_name(ns->nxstate));
+ ns_get_state_name(ns->state),
+ ns_get_state_name(ns->nxstate));
/*
* Set the internal register to the count of bytes which
@@ -1862,8 +1887,8 @@ static u_char ns_nand_read_byte(struct nand_chip *chip)
return outb;
}
if (!(ns->state & STATE_DATAOUT_MASK)) {
- NS_WARN("read_byte: unexpected data output cycle, state is %s "
- "return %#x\n", get_state_name(ns->state), (uint)outb);
+ NS_WARN("read_byte: unexpected data output cycle, state is %s return %#x\n",
+ ns_get_state_name(ns->state), (uint)outb);
return outb;
}
@@ -1902,7 +1927,7 @@ static u_char ns_nand_read_byte(struct nand_chip *chip)
NS_DBG("read_byte: all bytes were read\n");
if (NS_STATE(ns->nxstate) == STATE_READY)
- switch_state(ns);
+ ns_switch_state(ns);
}
return outb;
@@ -1929,12 +1954,12 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
if (byte == NAND_CMD_RESET) {
NS_LOG("reset chip\n");
- switch_to_ready_state(ns, NS_STATUS_OK(ns));
+ ns_switch_to_ready_state(ns, NS_STATUS_OK(ns));
return;
}
/* Check that the command byte is correct */
- if (check_command(byte)) {
+ if (ns_check_command(byte)) {
NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
return;
}
@@ -1943,7 +1968,7 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
|| NS_STATE(ns->state) == STATE_DATAOUT) {
int row = ns->regs.row;
- switch_state(ns);
+ ns_switch_state(ns);
if (byte == NAND_CMD_RNDOUT)
ns->regs.row = row;
}
@@ -1958,16 +1983,17 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
* was expected but command was input. In this case ignore
* previous command(s)/state(s) and accept the last one.
*/
- NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
- "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+ NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, ignore previous states\n",
+ (uint)byte,
+ ns_get_state_name(ns->nxstate));
}
- switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
}
NS_DBG("command byte corresponding to %s state accepted\n",
- get_state_name(get_state_by_command(byte)));
+ ns_get_state_name(ns_get_state_by_command(byte)));
ns->regs.command = byte;
- switch_state(ns);
+ ns_switch_state(ns);
} else if (ns->lines.ale == 1) {
/*
@@ -1978,11 +2004,13 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
NS_DBG("write_byte: operation isn't known yet, identify it\n");
- if (find_operation(ns, 1) < 0)
+ if (ns_find_operation(ns, 1) < 0)
return;
- if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
- switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ if ((ns->state & ACTION_MASK) &&
+ ns_do_state_action(ns, ns->state) < 0) {
+ ns_switch_to_ready_state(ns,
+ NS_STATUS_FAILED(ns));
return;
}
@@ -2004,20 +2032,20 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
/* Check that chip is expecting address */
if (!(ns->nxstate & STATE_ADDR_MASK)) {
- NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
- "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
- switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, switch to STATE_READY\n",
+ (uint)byte, ns_get_state_name(ns->nxstate));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
/* Check if this is expected byte */
if (ns->regs.count == ns->regs.num) {
NS_ERR("write_byte: no more address bytes expected\n");
- switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
- accept_addr_byte(ns, byte);
+ ns_accept_addr_byte(ns, byte);
ns->regs.count += 1;
@@ -2026,7 +2054,7 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
if (ns->regs.count == ns->regs.num) {
NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
- switch_state(ns);
+ ns_switch_state(ns);
}
} else {
@@ -2036,10 +2064,10 @@ static void ns_nand_write_byte(struct nand_chip *chip, u_char byte)
/* Check that chip is expecting data input */
if (!(ns->state & STATE_DATAIN_MASK)) {
- NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
- "switch to %s\n", (uint)byte,
- get_state_name(ns->state), get_state_name(STATE_READY));
- switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, switch to %s\n",
+ (uint)byte, ns_get_state_name(ns->state),
+ ns_get_state_name(STATE_READY));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
@@ -2069,16 +2097,16 @@ static void ns_nand_write_buf(struct nand_chip *chip, const u_char *buf,
/* Check that chip is expecting data input */
if (!(ns->state & STATE_DATAIN_MASK)) {
- NS_ERR("write_buf: data input isn't expected, state is %s, "
- "switch to STATE_READY\n", get_state_name(ns->state));
- switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ NS_ERR("write_buf: data input isn't expected, state is %s, switch to STATE_READY\n",
+ ns_get_state_name(ns->state));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
/* Check if these are expected bytes */
if (ns->regs.count + len > ns->regs.num) {
NS_ERR("write_buf: too many input bytes\n");
- switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
@@ -2105,7 +2133,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
}
if (!(ns->state & STATE_DATAOUT_MASK)) {
NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
- get_state_name(ns->state));
+ ns_get_state_name(ns->state));
return;
}
@@ -2121,7 +2149,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
/* Check if these are expected bytes */
if (ns->regs.count + len > ns->regs.num) {
NS_ERR("read_buf: too many bytes to read\n");
- switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ ns_switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
return;
}
@@ -2130,7 +2158,7 @@ static void ns_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
if (ns->regs.count == ns->regs.num) {
if (NS_STATE(ns->nxstate) == STATE_READY)
- switch_state(ns);
+ ns_switch_state(ns);
}
return;
@@ -2144,6 +2172,9 @@ static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op,
const struct nand_op_instr *instr = NULL;
struct nandsim *ns = nand_get_controller_data(chip);
+ if (check_only)
+ return 0;
+
ns->lines.ce = 1;
for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -2224,9 +2255,10 @@ static const struct nand_controller_ops ns_controller_ops = {
*/
static int __init ns_init_module(void)
{
+ struct list_head *pos, *n;
struct nand_chip *chip;
struct nandsim *ns;
- int retval = -ENOMEM, i;
+ int ret;
if (bus_width != 8 && bus_width != 16) {
NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
@@ -2259,8 +2291,8 @@ static int __init ns_init_module(void)
break;
default:
NS_ERR("bbt has to be 0..2\n");
- retval = -EINVAL;
- goto error;
+ ret = -EINVAL;
+ goto free_ns_struct;
}
/*
* Perform minimum nandsim structure initialization to handle
@@ -2285,23 +2317,26 @@ static int __init ns_init_module(void)
nsmtd->owner = THIS_MODULE;
- if ((retval = parse_weakblocks()) != 0)
- goto error;
+ ret = ns_parse_weakblocks();
+ if (ret)
+ goto free_ns_struct;
- if ((retval = parse_weakpages()) != 0)
- goto error;
+ ret = ns_parse_weakpages();
+ if (ret)
+ goto free_wb_list;
- if ((retval = parse_gravepages()) != 0)
- goto error;
+ ret = ns_parse_gravepages();
+ if (ret)
+ goto free_wp_list;
nand_controller_init(&ns->base);
ns->base.ops = &ns_controller_ops;
chip->controller = &ns->base;
- retval = nand_scan(chip, 1);
- if (retval) {
+ ret = nand_scan(chip, 1);
+ if (ret) {
NS_ERR("Could not scan NAND Simulator device\n");
- goto error;
+ goto free_gp_list;
}
if (overridesize) {
@@ -2313,8 +2348,8 @@ static int __init ns_init_module(void)
if (new_size >> overridesize != nsmtd->erasesize) {
NS_ERR("overridesize is too big\n");
- retval = -EINVAL;
- goto err_exit;
+ ret = -EINVAL;
+ goto cleanup_nand;
}
/* N.B. This relies on nand_scan not doing anything with the size before we change it */
@@ -2325,39 +2360,60 @@ static int __init ns_init_module(void)
chip->pagemask = (targetsize >> chip->page_shift) - 1;
}
- if ((retval = setup_wear_reporting(nsmtd)) != 0)
- goto err_exit;
+ ret = ns_setup_wear_reporting(nsmtd);
+ if (ret)
+ goto cleanup_nand;
- if ((retval = init_nandsim(nsmtd)) != 0)
- goto err_exit;
+ ret = ns_init(nsmtd);
+ if (ret)
+ goto free_ebw;
- if ((retval = nand_create_bbt(chip)) != 0)
- goto err_exit;
+ ret = nand_create_bbt(chip);
+ if (ret)
+ goto free_ns_object;
- if ((retval = parse_badblocks(ns, nsmtd)) != 0)
- goto err_exit;
+ ret = ns_parse_badblocks(ns, nsmtd);
+ if (ret)
+ goto free_ns_object;
/* Register NAND partitions */
- retval = mtd_device_register(nsmtd, &ns->partitions[0],
- ns->nbparts);
- if (retval != 0)
- goto err_exit;
+ ret = mtd_device_register(nsmtd, &ns->partitions[0], ns->nbparts);
+ if (ret)
+ goto free_ns_object;
- if ((retval = nandsim_debugfs_create(ns)) != 0)
- goto err_exit;
+ ret = ns_debugfs_create(ns);
+ if (ret)
+ goto unregister_mtd;
return 0;
-err_exit:
- free_nandsim(ns);
- nand_release(chip);
- for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
- kfree(ns->partitions[i].name);
-error:
+unregister_mtd:
+ WARN_ON(mtd_device_unregister(nsmtd));
+free_ns_object:
+ ns_free(ns);
+free_ebw:
+ kfree(erase_block_wear);
+cleanup_nand:
+ nand_cleanup(chip);
+free_gp_list:
+ list_for_each_safe(pos, n, &grave_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct grave_page, list));
+ }
+free_wp_list:
+ list_for_each_safe(pos, n, &weak_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_page, list));
+ }
+free_wb_list:
+ list_for_each_safe(pos, n, &weak_blocks) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_block, list));
+ }
+free_ns_struct:
kfree(ns);
- free_lists();
- return retval;
+ return ret;
}
module_init(ns_init_module);
@@ -2369,14 +2425,30 @@ static void __exit ns_cleanup_module(void)
{
struct nand_chip *chip = mtd_to_nand(nsmtd);
struct nandsim *ns = nand_get_controller_data(chip);
- int i;
+ struct list_head *pos, *n;
- free_nandsim(ns); /* Free nandsim private resources */
- nand_release(chip); /* Unregister driver */
- for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
- kfree(ns->partitions[i].name);
- kfree(ns); /* Free other structures */
- free_lists();
+ ns_debugfs_remove(ns);
+ WARN_ON(mtd_device_unregister(nsmtd));
+ ns_free(ns);
+ kfree(erase_block_wear);
+ nand_cleanup(chip);
+
+ list_for_each_safe(pos, n, &grave_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct grave_page, list));
+ }
+
+ list_for_each_safe(pos, n, &weak_pages) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_page, list));
+ }
+
+ list_for_each_safe(pos, n, &weak_blocks) {
+ list_del(pos);
+ kfree(list_entry(pos, struct weak_block, list));
+ }
+
+ kfree(ns);
}
module_exit(ns_cleanup_module);
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index d324396ab7ff..ed38338c1383 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -244,9 +244,13 @@ static int ndfc_probe(struct platform_device *ofdev)
static int ndfc_remove(struct platform_device *ofdev)
{
struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
- struct mtd_info *mtd = nand_to_mtd(&ndfc->chip);
+ struct nand_chip *chip = &ndfc->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
- nand_release(&ndfc->chip);
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
kfree(mtd->name);
return 0;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index ad77c112a78a..eb7fcfd9276b 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -2283,14 +2283,18 @@ static int omap_nand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct omap_nand_info *info = mtd_to_omap(mtd);
+ int ret;
+
if (nand_chip->ecc.priv) {
nand_bch_free(nand_chip->ecc.priv);
nand_chip->ecc.priv = NULL;
}
if (info->dma)
dma_release_channel(info->dma);
- nand_release(nand_chip);
- return 0;
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(nand_chip);
+ return ret;
}
static const struct of_device_id omap_nand_ids[] = {
diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
index 3fa0e2cbbe53..078b1022ac2a 100644
--- a/drivers/mtd/nand/raw/omap_elm.c
+++ b/drivers/mtd/nand/raw/omap_elm.c
@@ -411,6 +411,7 @@ static int elm_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
if (pm_runtime_get_sync(&pdev->dev) < 0) {
ret = -EINVAL;
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
dev_err(&pdev->dev, "can't enable clock\n");
return ret;
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index d27b39a7223c..880b54ca1b41 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -180,7 +180,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
mtd->name = "orion_nand";
ret = mtd_device_register(mtd, board->parts, board->nr_parts);
if (ret) {
- nand_release(nc);
+ nand_cleanup(nc);
goto no_dev;
}
@@ -195,8 +195,12 @@ static int orion_nand_remove(struct platform_device *pdev)
{
struct orion_nand_info *info = platform_get_drvdata(pdev);
struct nand_chip *chip = &info->chip;
+ int ret;
- nand_release(chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+
+ nand_cleanup(chip);
clk_disable_unprepare(info->clk);
diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
index c43cb4d92d3d..8d0d76ad319d 100644
--- a/drivers/mtd/nand/raw/oxnas_nand.c
+++ b/drivers/mtd/nand/raw/oxnas_nand.c
@@ -32,6 +32,7 @@ struct oxnas_nand_ctrl {
void __iomem *io_base;
struct clk *clk;
struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
+ unsigned int nchips;
};
static uint8_t oxnas_nand_read_byte(struct nand_chip *chip)
@@ -79,9 +80,9 @@ static int oxnas_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct resource *res;
- int nchips = 0;
int count = 0;
int err = 0;
+ int i;
/* Allocate memory for the device structure (and zero it) */
oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas),
@@ -140,17 +141,15 @@ static int oxnas_nand_probe(struct platform_device *pdev)
goto err_release_child;
err = mtd_device_register(mtd, NULL, 0);
- if (err) {
- nand_release(chip);
- goto err_release_child;
- }
+ if (err)
+ goto err_cleanup_nand;
- oxnas->chips[nchips] = chip;
- ++nchips;
+ oxnas->chips[oxnas->nchips] = chip;
+ ++oxnas->nchips;
}
/* Exit if no chips found */
- if (!nchips) {
+ if (!oxnas->nchips) {
err = -ENODEV;
goto err_clk_unprepare;
}
@@ -159,8 +158,17 @@ static int oxnas_nand_probe(struct platform_device *pdev)
return 0;
+err_cleanup_nand:
+ nand_cleanup(chip);
err_release_child:
of_node_put(nand_np);
+
+ for (i = 0; i < oxnas->nchips; i++) {
+ chip = oxnas->chips[i];
+ WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
+ nand_cleanup(chip);
+ }
+
err_clk_unprepare:
clk_disable_unprepare(oxnas->clk);
return err;
@@ -169,9 +177,14 @@ err_clk_unprepare:
static int oxnas_nand_remove(struct platform_device *pdev)
{
struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev);
+ struct nand_chip *chip;
+ int i;
- if (oxnas->chips[0])
- nand_release(oxnas->chips[0]);
+ for (i = 0; i < oxnas->nchips; i++) {
+ chip = oxnas->chips[i];
+ WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
+ nand_cleanup(chip);
+ }
clk_disable_unprepare(oxnas->clk);
diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c
index 9cfe7395172a..d8eca8c3fdcd 100644
--- a/drivers/mtd/nand/raw/pasemi_nand.c
+++ b/drivers/mtd/nand/raw/pasemi_nand.c
@@ -146,7 +146,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
dev_err(dev, "Unable to register MTD device\n");
err = -ENODEV;
- goto out_lpc;
+ goto out_cleanup_nand;
}
dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res,
@@ -154,6 +154,8 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
return 0;
+ out_cleanup_nand:
+ nand_cleanup(chip);
out_lpc:
release_region(lpcctl, 4);
out_ior:
@@ -167,6 +169,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
static int pasemi_nand_remove(struct platform_device *ofdev)
{
struct nand_chip *chip;
+ int ret;
if (!pasemi_nand_mtd)
return 0;
@@ -174,7 +177,9 @@ static int pasemi_nand_remove(struct platform_device *ofdev)
chip = mtd_to_nand(pasemi_nand_mtd);
/* Release resources, unregister device */
- nand_release(chip);
+ ret = mtd_device_unregister(pasemi_nand_mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
release_region(lpcctl, 4);
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index dc0f3074ddbf..556182f26057 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -92,7 +92,7 @@ static int plat_nand_probe(struct platform_device *pdev)
if (!err)
return err;
- nand_release(&data->chip);
+ nand_cleanup(&data->chip);
out:
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
@@ -106,8 +106,12 @@ static int plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+ struct nand_chip *chip = &data->chip;
+ int ret;
- nand_release(&data->chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
if (pdata->ctrl.remove)
pdata->ctrl.remove(pdev);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 5b11c7061497..f1daf330951b 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2836,7 +2836,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
chip->legacy.block_markbad = qcom_nandc_block_markbad;
chip->controller = &nandc->controller;
- chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
+ chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
NAND_SKIP_BBTSCAN;
/* set up initial status value */
@@ -3005,10 +3005,15 @@ static int qcom_nandc_remove(struct platform_device *pdev)
struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct qcom_nand_host *host;
+ struct nand_chip *chip;
+ int ret;
- list_for_each_entry(host, &nandc->host_list, node)
- nand_release(&host->chip);
-
+ list_for_each_entry(host, &nandc->host_list, node) {
+ chip = &host->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ }
qcom_nandc_unalloc(nandc);
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index 77774250fb11..f865e3a47b01 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -651,7 +651,8 @@ static int r852_register_nand_device(struct r852_device *dev)
dev->card_registered = 1;
return 0;
error3:
- nand_release(dev->chip);
+ WARN_ON(mtd_device_unregister(nand_to_mtd(dev->chip)));
+ nand_cleanup(dev->chip);
error1:
/* Force card redetect */
dev->card_detected = 0;
@@ -670,7 +671,8 @@ static void r852_unregister_nand_device(struct r852_device *dev)
return;
device_remove_file(&mtd->dev, &dev_attr_media_type);
- nand_release(dev->chip);
+ WARN_ON(mtd_device_unregister(mtd));
+ nand_cleanup(dev->chip);
r852_engine_disable(dev);
dev->card_registered = 0;
}
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index 0009c1820e21..f86dff311464 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -779,7 +779,8 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
- nand_release(&ptr->chip);
+ WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip)));
+ nand_cleanup(&ptr->chip);
}
}
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index 058e99d0cbcf..a661b8bb2dd5 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1204,9 +1204,13 @@ err_chip:
static int flctl_remove(struct platform_device *pdev)
{
struct sh_flctl *flctl = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &flctl->chip;
+ int ret;
flctl_release_dma(flctl);
- nand_release(&flctl->chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index b47a9eaff89b..51286f7acf54 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -183,7 +183,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
return 0;
err_add:
- nand_release(this);
+ nand_cleanup(this);
err_scan:
iounmap(sharpsl->io);
@@ -199,13 +199,19 @@ err_get_res:
static int sharpsl_nand_remove(struct platform_device *pdev)
{
struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &sharpsl->chip;
+ int ret;
- /* Release resources, unregister device */
- nand_release(&sharpsl->chip);
+ /* Unregister device */
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+
+ /* Release resources */
+ nand_cleanup(chip);
iounmap(sharpsl->io);
- /* Free the MTD device structure */
+ /* Free the driver's structure */
kfree(sharpsl);
return 0;
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
index 20f40c0e812c..243b34cfbc1b 100644
--- a/drivers/mtd/nand/raw/socrates_nand.c
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -169,7 +169,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)
if (!res)
return res;
- nand_release(nand_chip);
+ nand_cleanup(nand_chip);
out:
iounmap(host->io_base);
@@ -182,8 +182,12 @@ out:
static int socrates_nand_remove(struct platform_device *ofdev)
{
struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
+ struct nand_chip *chip = &host->nand_chip;
+ int ret;
- nand_release(&host->nand_chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
iounmap(host->io_base);
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index b6d45cd911ae..65c9d17b25a3 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -4,6 +4,7 @@
* Author: Christophe Kerello <christophe.kerello@st.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -37,8 +38,7 @@
/* Max ECC buffer length */
#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
-#define FMC2_TIMEOUT_US 1000
-#define FMC2_TIMEOUT_MS 1000
+#define FMC2_TIMEOUT_MS 5000
/* Timings */
#define FMC2_THIZ 1
@@ -85,20 +85,16 @@
/* Register: FMC2_PCR */
#define FMC2_PCR_PWAITEN BIT(1)
#define FMC2_PCR_PBKEN BIT(2)
-#define FMC2_PCR_PWID_MASK GENMASK(5, 4)
-#define FMC2_PCR_PWID(x) (((x) & 0x3) << 4)
+#define FMC2_PCR_PWID GENMASK(5, 4)
#define FMC2_PCR_PWID_BUSWIDTH_8 0
#define FMC2_PCR_PWID_BUSWIDTH_16 1
#define FMC2_PCR_ECCEN BIT(6)
#define FMC2_PCR_ECCALG BIT(8)
-#define FMC2_PCR_TCLR_MASK GENMASK(12, 9)
-#define FMC2_PCR_TCLR(x) (((x) & 0xf) << 9)
+#define FMC2_PCR_TCLR GENMASK(12, 9)
#define FMC2_PCR_TCLR_DEFAULT 0xf
-#define FMC2_PCR_TAR_MASK GENMASK(16, 13)
-#define FMC2_PCR_TAR(x) (((x) & 0xf) << 13)
+#define FMC2_PCR_TAR GENMASK(16, 13)
#define FMC2_PCR_TAR_DEFAULT 0xf
-#define FMC2_PCR_ECCSS_MASK GENMASK(19, 17)
-#define FMC2_PCR_ECCSS(x) (((x) & 0x7) << 17)
+#define FMC2_PCR_ECCSS GENMASK(19, 17)
#define FMC2_PCR_ECCSS_512 1
#define FMC2_PCR_ECCSS_2048 3
#define FMC2_PCR_BCHECC BIT(24)
@@ -108,17 +104,17 @@
#define FMC2_SR_NWRF BIT(6)
/* Register: FMC2_PMEM */
-#define FMC2_PMEM_MEMSET(x) (((x) & 0xff) << 0)
-#define FMC2_PMEM_MEMWAIT(x) (((x) & 0xff) << 8)
-#define FMC2_PMEM_MEMHOLD(x) (((x) & 0xff) << 16)
-#define FMC2_PMEM_MEMHIZ(x) (((x) & 0xff) << 24)
+#define FMC2_PMEM_MEMSET GENMASK(7, 0)
+#define FMC2_PMEM_MEMWAIT GENMASK(15, 8)
+#define FMC2_PMEM_MEMHOLD GENMASK(23, 16)
+#define FMC2_PMEM_MEMHIZ GENMASK(31, 24)
#define FMC2_PMEM_DEFAULT 0x0a0a0a0a
/* Register: FMC2_PATT */
-#define FMC2_PATT_ATTSET(x) (((x) & 0xff) << 0)
-#define FMC2_PATT_ATTWAIT(x) (((x) & 0xff) << 8)
-#define FMC2_PATT_ATTHOLD(x) (((x) & 0xff) << 16)
-#define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24)
+#define FMC2_PATT_ATTSET GENMASK(7, 0)
+#define FMC2_PATT_ATTWAIT GENMASK(15, 8)
+#define FMC2_PATT_ATTHOLD GENMASK(23, 16)
+#define FMC2_PATT_ATTHIZ GENMASK(31, 24)
#define FMC2_PATT_DEFAULT 0x0a0a0a0a
/* Register: FMC2_ISR */
@@ -133,9 +129,9 @@
/* Register: FMC2_CSQCFGR1 */
#define FMC2_CSQCFGR1_CMD2EN BIT(1)
#define FMC2_CSQCFGR1_DMADEN BIT(2)
-#define FMC2_CSQCFGR1_ACYNBR(x) (((x) & 0x7) << 4)
-#define FMC2_CSQCFGR1_CMD1(x) (((x) & 0xff) << 8)
-#define FMC2_CSQCFGR1_CMD2(x) (((x) & 0xff) << 16)
+#define FMC2_CSQCFGR1_ACYNBR GENMASK(6, 4)
+#define FMC2_CSQCFGR1_CMD1 GENMASK(15, 8)
+#define FMC2_CSQCFGR1_CMD2 GENMASK(23, 16)
#define FMC2_CSQCFGR1_CMD1T BIT(24)
#define FMC2_CSQCFGR1_CMD2T BIT(25)
@@ -143,13 +139,13 @@
#define FMC2_CSQCFGR2_SQSDTEN BIT(0)
#define FMC2_CSQCFGR2_RCMD2EN BIT(1)
#define FMC2_CSQCFGR2_DMASEN BIT(2)
-#define FMC2_CSQCFGR2_RCMD1(x) (((x) & 0xff) << 8)
-#define FMC2_CSQCFGR2_RCMD2(x) (((x) & 0xff) << 16)
+#define FMC2_CSQCFGR2_RCMD1 GENMASK(15, 8)
+#define FMC2_CSQCFGR2_RCMD2 GENMASK(23, 16)
#define FMC2_CSQCFGR2_RCMD1T BIT(24)
#define FMC2_CSQCFGR2_RCMD2T BIT(25)
/* Register: FMC2_CSQCFGR3 */
-#define FMC2_CSQCFGR3_SNBR(x) (((x) & 0x1f) << 8)
+#define FMC2_CSQCFGR3_SNBR GENMASK(13, 8)
#define FMC2_CSQCFGR3_AC1T BIT(16)
#define FMC2_CSQCFGR3_AC2T BIT(17)
#define FMC2_CSQCFGR3_AC3T BIT(18)
@@ -160,15 +156,15 @@
#define FMC2_CSQCFGR3_RAC2T BIT(23)
/* Register: FMC2_CSQCAR1 */
-#define FMC2_CSQCAR1_ADDC1(x) (((x) & 0xff) << 0)
-#define FMC2_CSQCAR1_ADDC2(x) (((x) & 0xff) << 8)
-#define FMC2_CSQCAR1_ADDC3(x) (((x) & 0xff) << 16)
-#define FMC2_CSQCAR1_ADDC4(x) (((x) & 0xff) << 24)
+#define FMC2_CSQCAR1_ADDC1 GENMASK(7, 0)
+#define FMC2_CSQCAR1_ADDC2 GENMASK(15, 8)
+#define FMC2_CSQCAR1_ADDC3 GENMASK(23, 16)
+#define FMC2_CSQCAR1_ADDC4 GENMASK(31, 24)
/* Register: FMC2_CSQCAR2 */
-#define FMC2_CSQCAR2_ADDC5(x) (((x) & 0xff) << 0)
-#define FMC2_CSQCAR2_NANDCEN(x) (((x) & 0x3) << 10)
-#define FMC2_CSQCAR2_SAO(x) (((x) & 0xffff) << 16)
+#define FMC2_CSQCAR2_ADDC5 GENMASK(7, 0)
+#define FMC2_CSQCAR2_NANDCEN GENMASK(11, 10)
+#define FMC2_CSQCAR2_SAO GENMASK(31, 16)
/* Register: FMC2_CSQIER */
#define FMC2_CSQIER_TCIE BIT(0)
@@ -189,28 +185,23 @@
/* Register: FMC2_BCHDSR0 */
#define FMC2_BCHDSR0_DUE BIT(0)
#define FMC2_BCHDSR0_DEF BIT(1)
-#define FMC2_BCHDSR0_DEN_MASK GENMASK(7, 4)
-#define FMC2_BCHDSR0_DEN_SHIFT 4
+#define FMC2_BCHDSR0_DEN GENMASK(7, 4)
/* Register: FMC2_BCHDSR1 */
-#define FMC2_BCHDSR1_EBP1_MASK GENMASK(12, 0)
-#define FMC2_BCHDSR1_EBP2_MASK GENMASK(28, 16)
-#define FMC2_BCHDSR1_EBP2_SHIFT 16
+#define FMC2_BCHDSR1_EBP1 GENMASK(12, 0)
+#define FMC2_BCHDSR1_EBP2 GENMASK(28, 16)
/* Register: FMC2_BCHDSR2 */
-#define FMC2_BCHDSR2_EBP3_MASK GENMASK(12, 0)
-#define FMC2_BCHDSR2_EBP4_MASK GENMASK(28, 16)
-#define FMC2_BCHDSR2_EBP4_SHIFT 16
+#define FMC2_BCHDSR2_EBP3 GENMASK(12, 0)
+#define FMC2_BCHDSR2_EBP4 GENMASK(28, 16)
/* Register: FMC2_BCHDSR3 */
-#define FMC2_BCHDSR3_EBP5_MASK GENMASK(12, 0)
-#define FMC2_BCHDSR3_EBP6_MASK GENMASK(28, 16)
-#define FMC2_BCHDSR3_EBP6_SHIFT 16
+#define FMC2_BCHDSR3_EBP5 GENMASK(12, 0)
+#define FMC2_BCHDSR3_EBP6 GENMASK(28, 16)
/* Register: FMC2_BCHDSR4 */
-#define FMC2_BCHDSR4_EBP7_MASK GENMASK(12, 0)
-#define FMC2_BCHDSR4_EBP8_MASK GENMASK(28, 16)
-#define FMC2_BCHDSR4_EBP8_SHIFT 16
+#define FMC2_BCHDSR4_EBP7 GENMASK(12, 0)
+#define FMC2_BCHDSR4_EBP8 GENMASK(28, 16)
enum stm32_fmc2_ecc {
FMC2_ECC_HAM = 1,
@@ -281,43 +272,41 @@ static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base)
return container_of(base, struct stm32_fmc2_nfc, base);
}
-/* Timings configuration */
-static void stm32_fmc2_timings_init(struct nand_chip *chip)
+static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
struct stm32_fmc2_timings *timings = &nand->timings;
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+ u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
u32 pmem, patt;
/* Set tclr/tar timings */
- pcr &= ~FMC2_PCR_TCLR_MASK;
- pcr |= FMC2_PCR_TCLR(timings->tclr);
- pcr &= ~FMC2_PCR_TAR_MASK;
- pcr |= FMC2_PCR_TAR(timings->tar);
+ pcr &= ~FMC2_PCR_TCLR;
+ pcr |= FIELD_PREP(FMC2_PCR_TCLR, timings->tclr);
+ pcr &= ~FMC2_PCR_TAR;
+ pcr |= FIELD_PREP(FMC2_PCR_TAR, timings->tar);
/* Set tset/twait/thold/thiz timings in common bank */
- pmem = FMC2_PMEM_MEMSET(timings->tset_mem);
- pmem |= FMC2_PMEM_MEMWAIT(timings->twait);
- pmem |= FMC2_PMEM_MEMHOLD(timings->thold_mem);
- pmem |= FMC2_PMEM_MEMHIZ(timings->thiz);
+ pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz);
/* Set tset/twait/thold/thiz timings in attribut bank */
- patt = FMC2_PATT_ATTSET(timings->tset_att);
- patt |= FMC2_PATT_ATTWAIT(timings->twait);
- patt |= FMC2_PATT_ATTHOLD(timings->thold_att);
- patt |= FMC2_PATT_ATTHIZ(timings->thiz);
-
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
- writel_relaxed(pmem, fmc2->io_base + FMC2_PMEM);
- writel_relaxed(patt, fmc2->io_base + FMC2_PATT);
+ patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att);
+ patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait);
+ patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att);
+ patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz);
+
+ writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
+ writel_relaxed(pmem, nfc->io_base + FMC2_PMEM);
+ writel_relaxed(patt, nfc->io_base + FMC2_PATT);
}
-/* Controller configuration */
-static void stm32_fmc2_setup(struct nand_chip *chip)
+static void stm32_fmc2_nfc_setup(struct nand_chip *chip)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
/* Configure ECC algorithm (default configuration is Hamming) */
pcr &= ~FMC2_PCR_ECCALG;
@@ -330,195 +319,182 @@ static void stm32_fmc2_setup(struct nand_chip *chip)
}
/* Set buswidth */
- pcr &= ~FMC2_PCR_PWID_MASK;
+ pcr &= ~FMC2_PCR_PWID;
if (chip->options & NAND_BUSWIDTH_16)
- pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
+ pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
/* Set ECC sector size */
- pcr &= ~FMC2_PCR_ECCSS_MASK;
- pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_512);
+ pcr &= ~FMC2_PCR_ECCSS;
+ pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512);
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+ writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
}
-/* Select target */
-static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr)
+static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
struct dma_slave_config dma_cfg;
int ret;
- if (nand->cs_used[chipnr] == fmc2->cs_sel)
+ if (nand->cs_used[chipnr] == nfc->cs_sel)
return 0;
- fmc2->cs_sel = nand->cs_used[chipnr];
+ nfc->cs_sel = nand->cs_used[chipnr];
+ stm32_fmc2_nfc_setup(chip);
+ stm32_fmc2_nfc_timings_init(chip);
- /* FMC2 setup routine */
- stm32_fmc2_setup(chip);
-
- /* Apply timings */
- stm32_fmc2_timings_init(chip);
-
- if (fmc2->dma_tx_ch && fmc2->dma_rx_ch) {
+ if (nfc->dma_tx_ch && nfc->dma_rx_ch) {
memset(&dma_cfg, 0, sizeof(dma_cfg));
- dma_cfg.src_addr = fmc2->data_phys_addr[fmc2->cs_sel];
- dma_cfg.dst_addr = fmc2->data_phys_addr[fmc2->cs_sel];
+ dma_cfg.src_addr = nfc->data_phys_addr[nfc->cs_sel];
+ dma_cfg.dst_addr = nfc->data_phys_addr[nfc->cs_sel];
dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_cfg.src_maxburst = 32;
dma_cfg.dst_maxburst = 32;
- ret = dmaengine_slave_config(fmc2->dma_tx_ch, &dma_cfg);
+ ret = dmaengine_slave_config(nfc->dma_tx_ch, &dma_cfg);
if (ret) {
- dev_err(fmc2->dev, "tx DMA engine slave config failed\n");
+ dev_err(nfc->dev, "tx DMA engine slave config failed\n");
return ret;
}
- ret = dmaengine_slave_config(fmc2->dma_rx_ch, &dma_cfg);
+ ret = dmaengine_slave_config(nfc->dma_rx_ch, &dma_cfg);
if (ret) {
- dev_err(fmc2->dev, "rx DMA engine slave config failed\n");
+ dev_err(nfc->dev, "rx DMA engine slave config failed\n");
return ret;
}
}
- if (fmc2->dma_ecc_ch) {
+ if (nfc->dma_ecc_ch) {
/*
* Hamming: we read HECCR register
* BCH4/BCH8: we read BCHDSRSx registers
*/
memset(&dma_cfg, 0, sizeof(dma_cfg));
- dma_cfg.src_addr = fmc2->io_phys_addr;
+ dma_cfg.src_addr = nfc->io_phys_addr;
dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ?
FMC2_HECCR : FMC2_BCHDSR0;
dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- ret = dmaengine_slave_config(fmc2->dma_ecc_ch, &dma_cfg);
+ ret = dmaengine_slave_config(nfc->dma_ecc_ch, &dma_cfg);
if (ret) {
- dev_err(fmc2->dev, "ECC DMA engine slave config failed\n");
+ dev_err(nfc->dev, "ECC DMA engine slave config failed\n");
return ret;
}
/* Calculate ECC length needed for one sector */
- fmc2->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
- FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
+ nfc->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ?
+ FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN;
}
return 0;
}
-/* Set bus width to 16-bit or 8-bit */
-static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set)
+static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set)
{
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+ u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
- pcr &= ~FMC2_PCR_PWID_MASK;
+ pcr &= ~FMC2_PCR_PWID;
if (set)
- pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16);
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+ pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
+ writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
}
-/* Enable/disable ECC */
-static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable)
+static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable)
{
- u32 pcr = readl(fmc2->io_base + FMC2_PCR);
+ u32 pcr = readl(nfc->io_base + FMC2_PCR);
pcr &= ~FMC2_PCR_ECCEN;
if (enable)
pcr |= FMC2_PCR_ECCEN;
- writel(pcr, fmc2->io_base + FMC2_PCR);
+ writel(pcr, nfc->io_base + FMC2_PCR);
}
-/* Enable irq sources in case of the sequencer is used */
-static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2)
+static inline void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc)
{
- u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
+ u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER);
csqier |= FMC2_CSQIER_TCIE;
- fmc2->irq_state = FMC2_IRQ_SEQ;
+ nfc->irq_state = FMC2_IRQ_SEQ;
- writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
+ writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER);
}
-/* Disable irq sources in case of the sequencer is used */
-static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2)
+static inline void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc)
{
- u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER);
+ u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER);
csqier &= ~FMC2_CSQIER_TCIE;
- writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER);
+ writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER);
- fmc2->irq_state = FMC2_IRQ_UNKNOWN;
+ nfc->irq_state = FMC2_IRQ_UNKNOWN;
}
-/* Clear irq sources in case of the sequencer is used */
-static inline void stm32_fmc2_clear_seq_irq(struct stm32_fmc2_nfc *fmc2)
+static inline void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc)
{
- writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, fmc2->io_base + FMC2_CSQICR);
+ writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, nfc->io_base + FMC2_CSQICR);
}
-/* Enable irq sources in case of bch is used */
-static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2,
- int mode)
+static inline void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc,
+ int mode)
{
- u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
+ u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER);
if (mode == NAND_ECC_WRITE)
bchier |= FMC2_BCHIER_EPBRIE;
else
bchier |= FMC2_BCHIER_DERIE;
- fmc2->irq_state = FMC2_IRQ_BCH;
+ nfc->irq_state = FMC2_IRQ_BCH;
- writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
+ writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER);
}
-/* Disable irq sources in case of bch is used */
-static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2)
+static inline void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc)
{
- u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER);
+ u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER);
bchier &= ~FMC2_BCHIER_DERIE;
bchier &= ~FMC2_BCHIER_EPBRIE;
- writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER);
+ writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER);
- fmc2->irq_state = FMC2_IRQ_UNKNOWN;
+ nfc->irq_state = FMC2_IRQ_UNKNOWN;
}
-/* Clear irq sources in case of bch is used */
-static inline void stm32_fmc2_clear_bch_irq(struct stm32_fmc2_nfc *fmc2)
+static inline void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc)
{
- writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, fmc2->io_base + FMC2_BCHICR);
+ writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, nfc->io_base + FMC2_BCHICR);
}
/*
* Enable ECC logic and reset syndrome/parity bits previously calculated
* Syndrome/parity bits is cleared by setting the ECCEN bit to 0
*/
-static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode)
+static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
- stm32_fmc2_set_ecc(fmc2, false);
+ stm32_fmc2_nfc_set_ecc(nfc, false);
if (chip->ecc.strength != FMC2_ECC_HAM) {
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+ u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
if (mode == NAND_ECC_WRITE)
pcr |= FMC2_PCR_WEN;
else
pcr &= ~FMC2_PCR_WEN;
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+ writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
- reinit_completion(&fmc2->complete);
- stm32_fmc2_clear_bch_irq(fmc2);
- stm32_fmc2_enable_bch_irq(fmc2, mode);
+ reinit_completion(&nfc->complete);
+ stm32_fmc2_nfc_clear_bch_irq(nfc);
+ stm32_fmc2_nfc_enable_bch_irq(nfc, mode);
}
- stm32_fmc2_set_ecc(fmc2, true);
+ stm32_fmc2_nfc_set_ecc(nfc, true);
}
/*
@@ -526,40 +502,37 @@ static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode)
* ECC is 3 bytes for 512 bytes of data (supports error correction up to
* max of 1-bit)
*/
-static inline void stm32_fmc2_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
+static inline void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc)
{
ecc[0] = ecc_sta;
ecc[1] = ecc_sta >> 8;
ecc[2] = ecc_sta >> 16;
}
-static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data,
- u8 *ecc)
+static int stm32_fmc2_nfc_ham_calculate(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
u32 sr, heccr;
int ret;
- ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR,
- sr, sr & FMC2_SR_NWRF, 10,
- FMC2_TIMEOUT_MS);
+ ret = readl_relaxed_poll_timeout(nfc->io_base + FMC2_SR,
+ sr, sr & FMC2_SR_NWRF, 1,
+ 1000 * FMC2_TIMEOUT_MS);
if (ret) {
- dev_err(fmc2->dev, "ham timeout\n");
+ dev_err(nfc->dev, "ham timeout\n");
return ret;
}
- heccr = readl_relaxed(fmc2->io_base + FMC2_HECCR);
-
- stm32_fmc2_ham_set_ecc(heccr, ecc);
-
- /* Disable ECC */
- stm32_fmc2_set_ecc(fmc2, false);
+ heccr = readl_relaxed(nfc->io_base + FMC2_HECCR);
+ stm32_fmc2_nfc_ham_set_ecc(heccr, ecc);
+ stm32_fmc2_nfc_set_ecc(nfc, false);
return 0;
}
-static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat,
- u8 *read_ecc, u8 *calc_ecc)
+static int stm32_fmc2_nfc_ham_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
{
u8 bit_position = 0, b0, b1, b2;
u32 byte_addr = 0, b;
@@ -615,28 +588,28 @@ static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat,
* ECC is 7/13 bytes for 512 bytes of data (supports error correction up to
* max of 4-bit/8-bit)
*/
-static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
- u8 *ecc)
+static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
u32 bchpbr;
/* Wait until the BCH code is ready */
- if (!wait_for_completion_timeout(&fmc2->complete,
+ if (!wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
- dev_err(fmc2->dev, "bch timeout\n");
- stm32_fmc2_disable_bch_irq(fmc2);
+ dev_err(nfc->dev, "bch timeout\n");
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
return -ETIMEDOUT;
}
/* Read parity bits */
- bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR1);
+ bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR1);
ecc[0] = bchpbr;
ecc[1] = bchpbr >> 8;
ecc[2] = bchpbr >> 16;
ecc[3] = bchpbr >> 24;
- bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR2);
+ bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR2);
ecc[4] = bchpbr;
ecc[5] = bchpbr >> 8;
ecc[6] = bchpbr >> 16;
@@ -644,24 +617,22 @@ static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
if (chip->ecc.strength == FMC2_ECC_BCH8) {
ecc[7] = bchpbr >> 24;
- bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR3);
+ bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR3);
ecc[8] = bchpbr;
ecc[9] = bchpbr >> 8;
ecc[10] = bchpbr >> 16;
ecc[11] = bchpbr >> 24;
- bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR4);
+ bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR4);
ecc[12] = bchpbr;
}
- /* Disable ECC */
- stm32_fmc2_set_ecc(fmc2, false);
+ stm32_fmc2_nfc_set_ecc(nfc, false);
return 0;
}
-/* BCH algorithm correction */
-static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
+static int stm32_fmc2_nfc_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
{
u32 bchdsr0 = ecc_sta[0];
u32 bchdsr1 = ecc_sta[1];
@@ -680,16 +651,16 @@ static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE))
return -EBADMSG;
- pos[0] = bchdsr1 & FMC2_BCHDSR1_EBP1_MASK;
- pos[1] = (bchdsr1 & FMC2_BCHDSR1_EBP2_MASK) >> FMC2_BCHDSR1_EBP2_SHIFT;
- pos[2] = bchdsr2 & FMC2_BCHDSR2_EBP3_MASK;
- pos[3] = (bchdsr2 & FMC2_BCHDSR2_EBP4_MASK) >> FMC2_BCHDSR2_EBP4_SHIFT;
- pos[4] = bchdsr3 & FMC2_BCHDSR3_EBP5_MASK;
- pos[5] = (bchdsr3 & FMC2_BCHDSR3_EBP6_MASK) >> FMC2_BCHDSR3_EBP6_SHIFT;
- pos[6] = bchdsr4 & FMC2_BCHDSR4_EBP7_MASK;
- pos[7] = (bchdsr4 & FMC2_BCHDSR4_EBP8_MASK) >> FMC2_BCHDSR4_EBP8_SHIFT;
+ pos[0] = FIELD_GET(FMC2_BCHDSR1_EBP1, bchdsr1);
+ pos[1] = FIELD_GET(FMC2_BCHDSR1_EBP2, bchdsr1);
+ pos[2] = FIELD_GET(FMC2_BCHDSR2_EBP3, bchdsr2);
+ pos[3] = FIELD_GET(FMC2_BCHDSR2_EBP4, bchdsr2);
+ pos[4] = FIELD_GET(FMC2_BCHDSR3_EBP5, bchdsr3);
+ pos[5] = FIELD_GET(FMC2_BCHDSR3_EBP6, bchdsr3);
+ pos[6] = FIELD_GET(FMC2_BCHDSR4_EBP7, bchdsr4);
+ pos[7] = FIELD_GET(FMC2_BCHDSR4_EBP8, bchdsr4);
- den = (bchdsr0 & FMC2_BCHDSR0_DEN_MASK) >> FMC2_BCHDSR0_DEN_SHIFT;
+ den = FIELD_GET(FMC2_BCHDSR0_DEN, bchdsr0);
for (i = 0; i < den; i++) {
if (pos[i] < eccsize * 8) {
change_bit(pos[i], (unsigned long *)dat);
@@ -700,34 +671,33 @@ static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta)
return nb_errs;
}
-static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat,
- u8 *read_ecc, u8 *calc_ecc)
+static int stm32_fmc2_nfc_bch_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
u32 ecc_sta[5];
/* Wait until the decoding error is ready */
- if (!wait_for_completion_timeout(&fmc2->complete,
+ if (!wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
- dev_err(fmc2->dev, "bch timeout\n");
- stm32_fmc2_disable_bch_irq(fmc2);
+ dev_err(nfc->dev, "bch timeout\n");
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
return -ETIMEDOUT;
}
- ecc_sta[0] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR0);
- ecc_sta[1] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR1);
- ecc_sta[2] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR2);
- ecc_sta[3] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR3);
- ecc_sta[4] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR4);
+ ecc_sta[0] = readl_relaxed(nfc->io_base + FMC2_BCHDSR0);
+ ecc_sta[1] = readl_relaxed(nfc->io_base + FMC2_BCHDSR1);
+ ecc_sta[2] = readl_relaxed(nfc->io_base + FMC2_BCHDSR2);
+ ecc_sta[3] = readl_relaxed(nfc->io_base + FMC2_BCHDSR3);
+ ecc_sta[4] = readl_relaxed(nfc->io_base + FMC2_BCHDSR4);
- /* Disable ECC */
- stm32_fmc2_set_ecc(fmc2, false);
+ stm32_fmc2_nfc_set_ecc(nfc, false);
- return stm32_fmc2_bch_decode(chip->ecc.size, dat, ecc_sta);
+ return stm32_fmc2_nfc_bch_decode(chip->ecc.size, dat, ecc_sta);
}
-static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf,
- int oob_required, int page)
+static int stm32_fmc2_nfc_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret, i, s, stat, eccsize = chip->ecc.size;
@@ -789,21 +759,21 @@ static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf,
}
/* Sequencer read/write configuration */
-static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
- int raw, bool write_data)
+static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page,
+ int raw, bool write_data)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
u32 csqcfgr1, csqcfgr2, csqcfgr3;
u32 csqar1, csqar2;
u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN;
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
+ u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
if (write_data)
pcr |= FMC2_PCR_WEN;
else
pcr &= ~FMC2_PCR_WEN;
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
+ writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
/*
* - Set Program Page/Page Read command
@@ -812,11 +782,11 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
*/
csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T;
if (write_data)
- csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_SEQIN);
+ csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN);
else
- csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_READ0) |
+ csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) |
FMC2_CSQCFGR1_CMD2EN |
- FMC2_CSQCFGR1_CMD2(NAND_CMD_READSTART) |
+ FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) |
FMC2_CSQCFGR1_CMD2T;
/*
@@ -826,11 +796,12 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
* - Set timings
*/
if (write_data)
- csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDIN);
+ csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN);
else
- csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDOUT) |
+ csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) |
FMC2_CSQCFGR2_RCMD2EN |
- FMC2_CSQCFGR2_RCMD2(NAND_CMD_RNDOUTSTART) |
+ FIELD_PREP(FMC2_CSQCFGR2_RCMD2,
+ NAND_CMD_RNDOUTSTART) |
FMC2_CSQCFGR2_RCMD1T |
FMC2_CSQCFGR2_RCMD2T;
if (!raw) {
@@ -842,7 +813,7 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
* - Set the number of sectors to be written
* - Set timings
*/
- csqcfgr3 = FMC2_CSQCFGR3_SNBR(chip->ecc.steps - 1);
+ csqcfgr3 = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1);
if (write_data) {
csqcfgr3 |= FMC2_CSQCFGR3_RAC2T;
if (chip->options & NAND_ROW_ADDR_3)
@@ -856,8 +827,8 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
* Byte 1 and byte 2 => column, we start at 0x0
* Byte 3 and byte 4 => page
*/
- csqar1 = FMC2_CSQCAR1_ADDC3(page);
- csqar1 |= FMC2_CSQCAR1_ADDC4(page >> 8);
+ csqar1 = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page);
+ csqar1 |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8);
/*
* - Set chip enable number
@@ -865,43 +836,44 @@ static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page,
* - Calculate the number of address cycles to be issued
* - Set byte 5 of address cycle if needed
*/
- csqar2 = FMC2_CSQCAR2_NANDCEN(fmc2->cs_sel);
+ csqar2 = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel);
if (chip->options & NAND_BUSWIDTH_16)
- csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset >> 1);
+ csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1);
else
- csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset);
+ csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset);
if (chip->options & NAND_ROW_ADDR_3) {
- csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(5);
- csqar2 |= FMC2_CSQCAR2_ADDC5(page >> 16);
+ csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5);
+ csqar2 |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16);
} else {
- csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(4);
+ csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4);
}
- writel_relaxed(csqcfgr1, fmc2->io_base + FMC2_CSQCFGR1);
- writel_relaxed(csqcfgr2, fmc2->io_base + FMC2_CSQCFGR2);
- writel_relaxed(csqcfgr3, fmc2->io_base + FMC2_CSQCFGR3);
- writel_relaxed(csqar1, fmc2->io_base + FMC2_CSQAR1);
- writel_relaxed(csqar2, fmc2->io_base + FMC2_CSQAR2);
+ writel_relaxed(csqcfgr1, nfc->io_base + FMC2_CSQCFGR1);
+ writel_relaxed(csqcfgr2, nfc->io_base + FMC2_CSQCFGR2);
+ writel_relaxed(csqcfgr3, nfc->io_base + FMC2_CSQCFGR3);
+ writel_relaxed(csqar1, nfc->io_base + FMC2_CSQAR1);
+ writel_relaxed(csqar2, nfc->io_base + FMC2_CSQAR2);
}
-static void stm32_fmc2_dma_callback(void *arg)
+static void stm32_fmc2_nfc_dma_callback(void *arg)
{
complete((struct completion *)arg);
}
/* Read/write data from/to a page */
-static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
- int raw, bool write_data)
+static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
+ int raw, bool write_data)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct dma_async_tx_descriptor *desc_data, *desc_ecc;
struct scatterlist *sg;
- struct dma_chan *dma_ch = fmc2->dma_rx_ch;
+ struct dma_chan *dma_ch = nfc->dma_rx_ch;
enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE;
enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM;
- u32 csqcr = readl_relaxed(fmc2->io_base + FMC2_CSQCR);
+ u32 csqcr = readl_relaxed(nfc->io_base + FMC2_CSQCR);
int eccsteps = chip->ecc.steps;
int eccsize = chip->ecc.size;
+ unsigned long timeout = msecs_to_jiffies(FMC2_TIMEOUT_MS);
const u8 *p = buf;
int s, ret;
@@ -909,20 +881,20 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
if (write_data) {
dma_data_dir = DMA_TO_DEVICE;
dma_transfer_dir = DMA_MEM_TO_DEV;
- dma_ch = fmc2->dma_tx_ch;
+ dma_ch = nfc->dma_tx_ch;
}
- for_each_sg(fmc2->dma_data_sg.sgl, sg, eccsteps, s) {
+ for_each_sg(nfc->dma_data_sg.sgl, sg, eccsteps, s) {
sg_set_buf(sg, p, eccsize);
p += eccsize;
}
- ret = dma_map_sg(fmc2->dev, fmc2->dma_data_sg.sgl,
+ ret = dma_map_sg(nfc->dev, nfc->dma_data_sg.sgl,
eccsteps, dma_data_dir);
if (ret < 0)
return ret;
- desc_data = dmaengine_prep_slave_sg(dma_ch, fmc2->dma_data_sg.sgl,
+ desc_data = dmaengine_prep_slave_sg(dma_ch, nfc->dma_data_sg.sgl,
eccsteps, dma_transfer_dir,
DMA_PREP_INTERRUPT);
if (!desc_data) {
@@ -930,10 +902,10 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
goto err_unmap_data;
}
- reinit_completion(&fmc2->dma_data_complete);
- reinit_completion(&fmc2->complete);
- desc_data->callback = stm32_fmc2_dma_callback;
- desc_data->callback_param = &fmc2->dma_data_complete;
+ reinit_completion(&nfc->dma_data_complete);
+ reinit_completion(&nfc->complete);
+ desc_data->callback = stm32_fmc2_nfc_dma_callback;
+ desc_data->callback_param = &nfc->dma_data_complete;
ret = dma_submit_error(dmaengine_submit(desc_data));
if (ret)
goto err_unmap_data;
@@ -942,19 +914,19 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
if (!write_data && !raw) {
/* Configure DMA ECC status */
- p = fmc2->ecc_buf;
- for_each_sg(fmc2->dma_ecc_sg.sgl, sg, eccsteps, s) {
- sg_set_buf(sg, p, fmc2->dma_ecc_len);
- p += fmc2->dma_ecc_len;
+ p = nfc->ecc_buf;
+ for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
+ sg_set_buf(sg, p, nfc->dma_ecc_len);
+ p += nfc->dma_ecc_len;
}
- ret = dma_map_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
+ ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
eccsteps, dma_data_dir);
if (ret < 0)
goto err_unmap_data;
- desc_ecc = dmaengine_prep_slave_sg(fmc2->dma_ecc_ch,
- fmc2->dma_ecc_sg.sgl,
+ desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
+ nfc->dma_ecc_sg.sgl,
eccsteps, dma_transfer_dir,
DMA_PREP_INTERRUPT);
if (!desc_ecc) {
@@ -962,76 +934,73 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
goto err_unmap_ecc;
}
- reinit_completion(&fmc2->dma_ecc_complete);
- desc_ecc->callback = stm32_fmc2_dma_callback;
- desc_ecc->callback_param = &fmc2->dma_ecc_complete;
+ reinit_completion(&nfc->dma_ecc_complete);
+ desc_ecc->callback = stm32_fmc2_nfc_dma_callback;
+ desc_ecc->callback_param = &nfc->dma_ecc_complete;
ret = dma_submit_error(dmaengine_submit(desc_ecc));
if (ret)
goto err_unmap_ecc;
- dma_async_issue_pending(fmc2->dma_ecc_ch);
+ dma_async_issue_pending(nfc->dma_ecc_ch);
}
- stm32_fmc2_clear_seq_irq(fmc2);
- stm32_fmc2_enable_seq_irq(fmc2);
+ stm32_fmc2_nfc_clear_seq_irq(nfc);
+ stm32_fmc2_nfc_enable_seq_irq(nfc);
/* Start the transfer */
csqcr |= FMC2_CSQCR_CSQSTART;
- writel_relaxed(csqcr, fmc2->io_base + FMC2_CSQCR);
+ writel_relaxed(csqcr, nfc->io_base + FMC2_CSQCR);
/* Wait end of sequencer transfer */
- if (!wait_for_completion_timeout(&fmc2->complete,
- msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
- dev_err(fmc2->dev, "seq timeout\n");
- stm32_fmc2_disable_seq_irq(fmc2);
+ if (!wait_for_completion_timeout(&nfc->complete, timeout)) {
+ dev_err(nfc->dev, "seq timeout\n");
+ stm32_fmc2_nfc_disable_seq_irq(nfc);
dmaengine_terminate_all(dma_ch);
if (!write_data && !raw)
- dmaengine_terminate_all(fmc2->dma_ecc_ch);
+ dmaengine_terminate_all(nfc->dma_ecc_ch);
ret = -ETIMEDOUT;
goto err_unmap_ecc;
}
/* Wait DMA data transfer completion */
- if (!wait_for_completion_timeout(&fmc2->dma_data_complete,
- msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
- dev_err(fmc2->dev, "data DMA timeout\n");
+ if (!wait_for_completion_timeout(&nfc->dma_data_complete, timeout)) {
+ dev_err(nfc->dev, "data DMA timeout\n");
dmaengine_terminate_all(dma_ch);
ret = -ETIMEDOUT;
}
/* Wait DMA ECC transfer completion */
if (!write_data && !raw) {
- if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete,
- msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
- dev_err(fmc2->dev, "ECC DMA timeout\n");
- dmaengine_terminate_all(fmc2->dma_ecc_ch);
+ if (!wait_for_completion_timeout(&nfc->dma_ecc_complete,
+ timeout)) {
+ dev_err(nfc->dev, "ECC DMA timeout\n");
+ dmaengine_terminate_all(nfc->dma_ecc_ch);
ret = -ETIMEDOUT;
}
}
err_unmap_ecc:
if (!write_data && !raw)
- dma_unmap_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl,
+ dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
eccsteps, dma_data_dir);
err_unmap_data:
- dma_unmap_sg(fmc2->dev, fmc2->dma_data_sg.sgl, eccsteps, dma_data_dir);
+ dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
return ret;
}
-static int stm32_fmc2_sequencer_write(struct nand_chip *chip,
- const u8 *buf, int oob_required,
- int page, int raw)
+static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page, int raw)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
/* Configure the sequencer */
- stm32_fmc2_rw_page_init(chip, page, raw, true);
+ stm32_fmc2_nfc_rw_page_init(chip, page, raw, true);
/* Write the page */
- ret = stm32_fmc2_xfer(chip, buf, raw, true);
+ ret = stm32_fmc2_nfc_xfer(chip, buf, raw, true);
if (ret)
return ret;
@@ -1047,55 +1016,50 @@ static int stm32_fmc2_sequencer_write(struct nand_chip *chip,
return nand_prog_page_end_op(chip);
}
-static int stm32_fmc2_sequencer_write_page(struct nand_chip *chip,
- const u8 *buf,
- int oob_required,
- int page)
+static int stm32_fmc2_nfc_seq_write_page(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
{
int ret;
- /* Select the target */
- ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
if (ret)
return ret;
- return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, false);
+ return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, false);
}
-static int stm32_fmc2_sequencer_write_page_raw(struct nand_chip *chip,
- const u8 *buf,
- int oob_required,
- int page)
+static int stm32_fmc2_nfc_seq_write_page_raw(struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
{
int ret;
- /* Select the target */
- ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
if (ret)
return ret;
- return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, true);
+ return stm32_fmc2_nfc_seq_write(chip, buf, oob_required, page, true);
}
/* Get a status indicating which sectors have errors */
-static inline u16 stm32_fmc2_get_mapping_status(struct stm32_fmc2_nfc *fmc2)
+static inline u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc)
{
- u32 csqemsr = readl_relaxed(fmc2->io_base + FMC2_CSQEMSR);
+ u32 csqemsr = readl_relaxed(nfc->io_base + FMC2_CSQEMSR);
return csqemsr & FMC2_CSQEMSR_SEM;
}
-static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
- u8 *read_ecc, u8 *calc_ecc)
+static int stm32_fmc2_nfc_seq_correct(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
int eccstrength = chip->ecc.strength;
int i, s, eccsize = chip->ecc.size;
- u32 *ecc_sta = (u32 *)fmc2->ecc_buf;
- u16 sta_map = stm32_fmc2_get_mapping_status(fmc2);
+ u32 *ecc_sta = (u32 *)nfc->ecc_buf;
+ u16 sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) {
@@ -1104,10 +1068,11 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
if (eccstrength == FMC2_ECC_HAM) {
/* Ecc_sta = FMC2_HECCR */
if (sta_map & BIT(s)) {
- stm32_fmc2_ham_set_ecc(*ecc_sta, &calc_ecc[i]);
- stat = stm32_fmc2_ham_correct(chip, dat,
- &read_ecc[i],
- &calc_ecc[i]);
+ stm32_fmc2_nfc_ham_set_ecc(*ecc_sta,
+ &calc_ecc[i]);
+ stat = stm32_fmc2_nfc_ham_correct(chip, dat,
+ &read_ecc[i],
+ &calc_ecc[i]);
}
ecc_sta++;
} else {
@@ -1119,8 +1084,8 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
* Ecc_sta[4] = FMC2_BCHDSR4
*/
if (sta_map & BIT(s))
- stat = stm32_fmc2_bch_decode(eccsize, dat,
- ecc_sta);
+ stat = stm32_fmc2_nfc_bch_decode(eccsize, dat,
+ ecc_sta);
ecc_sta += 5;
}
@@ -1143,30 +1108,29 @@ static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat,
return max_bitflips;
}
-static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf,
- int oob_required, int page)
+static int stm32_fmc2_nfc_seq_read_page(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
u8 *ecc_calc = chip->ecc.calc_buf;
u8 *ecc_code = chip->ecc.code_buf;
u16 sta_map;
int ret;
- /* Select the target */
- ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
if (ret)
return ret;
/* Configure the sequencer */
- stm32_fmc2_rw_page_init(chip, page, 0, false);
+ stm32_fmc2_nfc_rw_page_init(chip, page, 0, false);
/* Read the page */
- ret = stm32_fmc2_xfer(chip, buf, 0, false);
+ ret = stm32_fmc2_nfc_xfer(chip, buf, 0, false);
if (ret)
return ret;
- sta_map = stm32_fmc2_get_mapping_status(fmc2);
+ sta_map = stm32_fmc2_nfc_get_mapping_status(nfc);
/* Check if errors happen */
if (likely(!sta_map)) {
@@ -1193,22 +1157,21 @@ static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf,
return chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
}
-static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf,
- int oob_required, int page)
+static int stm32_fmc2_nfc_seq_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
- /* Select the target */
- ret = stm32_fmc2_select_chip(chip, chip->cur_cs);
+ ret = stm32_fmc2_nfc_select_chip(chip, chip->cur_cs);
if (ret)
return ret;
/* Configure the sequencer */
- stm32_fmc2_rw_page_init(chip, page, 1, false);
+ stm32_fmc2_nfc_rw_page_init(chip, page, 1, false);
/* Read the page */
- ret = stm32_fmc2_xfer(chip, buf, 1, false);
+ ret = stm32_fmc2_nfc_xfer(chip, buf, 1, false);
if (ret)
return ret;
@@ -1221,31 +1184,31 @@ static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf,
return 0;
}
-static irqreturn_t stm32_fmc2_irq(int irq, void *dev_id)
+static irqreturn_t stm32_fmc2_nfc_irq(int irq, void *dev_id)
{
- struct stm32_fmc2_nfc *fmc2 = (struct stm32_fmc2_nfc *)dev_id;
+ struct stm32_fmc2_nfc *nfc = (struct stm32_fmc2_nfc *)dev_id;
- if (fmc2->irq_state == FMC2_IRQ_SEQ)
+ if (nfc->irq_state == FMC2_IRQ_SEQ)
/* Sequencer is used */
- stm32_fmc2_disable_seq_irq(fmc2);
- else if (fmc2->irq_state == FMC2_IRQ_BCH)
+ stm32_fmc2_nfc_disable_seq_irq(nfc);
+ else if (nfc->irq_state == FMC2_IRQ_BCH)
/* BCH is used */
- stm32_fmc2_disable_bch_irq(fmc2);
+ stm32_fmc2_nfc_disable_bch_irq(nfc);
- complete(&fmc2->complete);
+ complete(&nfc->complete);
return IRQ_HANDLED;
}
-static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf,
- unsigned int len, bool force_8bit)
+static void stm32_fmc2_nfc_read_data(struct nand_chip *chip, void *buf,
+ unsigned int len, bool force_8bit)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
- void __iomem *io_addr_r = fmc2->data_base[fmc2->cs_sel];
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ void __iomem *io_addr_r = nfc->data_base[nfc->cs_sel];
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
/* Reconfigure bus width to 8-bit */
- stm32_fmc2_set_buswidth_16(fmc2, false);
+ stm32_fmc2_nfc_set_buswidth_16(nfc, false);
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
@@ -1281,18 +1244,18 @@ static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf,
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
/* Reconfigure bus width to 16-bit */
- stm32_fmc2_set_buswidth_16(fmc2, true);
+ stm32_fmc2_nfc_set_buswidth_16(nfc, true);
}
-static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
- unsigned int len, bool force_8bit)
+static void stm32_fmc2_nfc_write_data(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
- void __iomem *io_addr_w = fmc2->data_base[fmc2->cs_sel];
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ void __iomem *io_addr_w = nfc->data_base[nfc->cs_sel];
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
/* Reconfigure bus width to 8-bit */
- stm32_fmc2_set_buswidth_16(fmc2, false);
+ stm32_fmc2_nfc_set_buswidth_16(nfc, false);
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) {
if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) {
@@ -1328,48 +1291,49 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
if (force_8bit && chip->options & NAND_BUSWIDTH_16)
/* Reconfigure bus width to 16-bit */
- stm32_fmc2_set_buswidth_16(fmc2, true);
+ stm32_fmc2_nfc_set_buswidth_16(nfc, true);
}
-static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip,
+ unsigned long timeout_ms)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
const struct nand_sdr_timings *timings;
u32 isr, sr;
/* Check if there is no pending requests to the NAND flash */
- if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr,
+ if (readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_SR, sr,
sr & FMC2_SR_NWRF, 1,
- FMC2_TIMEOUT_US))
- dev_warn(fmc2->dev, "Waitrdy timeout\n");
+ 1000 * FMC2_TIMEOUT_MS))
+ dev_warn(nfc->dev, "Waitrdy timeout\n");
/* Wait tWB before R/B# signal is low */
timings = nand_get_sdr_timings(&chip->data_interface);
ndelay(PSEC_TO_NSEC(timings->tWB_max));
/* R/B# signal is low, clear high level flag */
- writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR);
+ writel_relaxed(FMC2_ICR_CIHLF, nfc->io_base + FMC2_ICR);
/* Wait R/B# signal is high */
- return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR,
+ return readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_ISR,
isr, isr & FMC2_ISR_IHLF,
5, 1000 * timeout_ms);
}
-static int stm32_fmc2_exec_op(struct nand_chip *chip,
- const struct nand_operation *op,
- bool check_only)
+static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
const struct nand_op_instr *instr = NULL;
- unsigned int op_id, i;
+ unsigned int op_id, i, timeout;
int ret;
- ret = stm32_fmc2_select_chip(chip, op->cs);
- if (ret)
- return ret;
-
if (check_only)
+ return 0;
+
+ ret = stm32_fmc2_nfc_select_chip(chip, op->cs);
+ if (ret)
return ret;
for (op_id = 0; op_id < op->ninstrs; op_id++) {
@@ -1378,30 +1342,30 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip,
switch (instr->type) {
case NAND_OP_CMD_INSTR:
writeb_relaxed(instr->ctx.cmd.opcode,
- fmc2->cmd_base[fmc2->cs_sel]);
+ nfc->cmd_base[nfc->cs_sel]);
break;
case NAND_OP_ADDR_INSTR:
for (i = 0; i < instr->ctx.addr.naddrs; i++)
writeb_relaxed(instr->ctx.addr.addrs[i],
- fmc2->addr_base[fmc2->cs_sel]);
+ nfc->addr_base[nfc->cs_sel]);
break;
case NAND_OP_DATA_IN_INSTR:
- stm32_fmc2_read_data(chip, instr->ctx.data.buf.in,
- instr->ctx.data.len,
- instr->ctx.data.force_8bit);
+ stm32_fmc2_nfc_read_data(chip, instr->ctx.data.buf.in,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
break;
case NAND_OP_DATA_OUT_INSTR:
- stm32_fmc2_write_data(chip, instr->ctx.data.buf.out,
- instr->ctx.data.len,
- instr->ctx.data.force_8bit);
+ stm32_fmc2_nfc_write_data(chip, instr->ctx.data.buf.out,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit);
break;
case NAND_OP_WAITRDY_INSTR:
- ret = stm32_fmc2_waitrdy(chip,
- instr->ctx.waitrdy.timeout_ms);
+ timeout = instr->ctx.waitrdy.timeout_ms;
+ ret = stm32_fmc2_nfc_waitrdy(chip, timeout);
break;
}
}
@@ -1409,21 +1373,20 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip,
return ret;
}
-/* Controller initialization */
-static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2)
+static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc)
{
- u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR);
- u32 bcr1 = readl_relaxed(fmc2->io_base + FMC2_BCR1);
+ u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR);
+ u32 bcr1 = readl_relaxed(nfc->io_base + FMC2_BCR1);
/* Set CS used to undefined */
- fmc2->cs_sel = -1;
+ nfc->cs_sel = -1;
/* Enable wait feature and nand flash memory bank */
pcr |= FMC2_PCR_PWAITEN;
pcr |= FMC2_PCR_PBKEN;
/* Set buswidth to 8 bits mode for identification */
- pcr &= ~FMC2_PCR_PWID_MASK;
+ pcr &= ~FMC2_PCR_PWID;
/* ECC logic is disabled */
pcr &= ~FMC2_PCR_ECCEN;
@@ -1434,32 +1397,31 @@ static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2)
pcr &= ~FMC2_PCR_WEN;
/* Set default ECC sector size */
- pcr &= ~FMC2_PCR_ECCSS_MASK;
- pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_2048);
+ pcr &= ~FMC2_PCR_ECCSS;
+ pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_2048);
/* Set default tclr/tar timings */
- pcr &= ~FMC2_PCR_TCLR_MASK;
- pcr |= FMC2_PCR_TCLR(FMC2_PCR_TCLR_DEFAULT);
- pcr &= ~FMC2_PCR_TAR_MASK;
- pcr |= FMC2_PCR_TAR(FMC2_PCR_TAR_DEFAULT);
+ pcr &= ~FMC2_PCR_TCLR;
+ pcr |= FIELD_PREP(FMC2_PCR_TCLR, FMC2_PCR_TCLR_DEFAULT);
+ pcr &= ~FMC2_PCR_TAR;
+ pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT);
/* Enable FMC2 controller */
bcr1 |= FMC2_BCR1_FMC2EN;
- writel_relaxed(bcr1, fmc2->io_base + FMC2_BCR1);
- writel_relaxed(pcr, fmc2->io_base + FMC2_PCR);
- writel_relaxed(FMC2_PMEM_DEFAULT, fmc2->io_base + FMC2_PMEM);
- writel_relaxed(FMC2_PATT_DEFAULT, fmc2->io_base + FMC2_PATT);
+ writel_relaxed(bcr1, nfc->io_base + FMC2_BCR1);
+ writel_relaxed(pcr, nfc->io_base + FMC2_PCR);
+ writel_relaxed(FMC2_PMEM_DEFAULT, nfc->io_base + FMC2_PMEM);
+ writel_relaxed(FMC2_PATT_DEFAULT, nfc->io_base + FMC2_PATT);
}
-/* Controller timings */
-static void stm32_fmc2_calc_timings(struct nand_chip *chip,
- const struct nand_sdr_timings *sdrt)
+static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip,
+ const struct nand_sdr_timings *sdrt)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
struct stm32_fmc2_timings *tims = &nand->timings;
- unsigned long hclk = clk_get_rate(fmc2->clk);
+ unsigned long hclk = clk_get_rate(nfc->clk);
unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
unsigned long timing, tar, tclr, thiz, twait;
unsigned long tset_mem, tset_att, thold_mem, thold_att;
@@ -1583,8 +1545,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
}
-static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
- const struct nand_data_interface *conf)
+static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf)
{
const struct nand_sdr_timings *sdrt;
@@ -1595,71 +1557,67 @@ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
return 0;
- stm32_fmc2_calc_timings(chip, sdrt);
-
- /* Apply timings */
- stm32_fmc2_timings_init(chip);
+ stm32_fmc2_nfc_calc_timings(chip, sdrt);
+ stm32_fmc2_nfc_timings_init(chip);
return 0;
}
-/* DMA configuration */
-static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2)
+static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
{
int ret = 0;
- fmc2->dma_tx_ch = dma_request_chan(fmc2->dev, "tx");
- if (IS_ERR(fmc2->dma_tx_ch)) {
- ret = PTR_ERR(fmc2->dma_tx_ch);
+ nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx");
+ if (IS_ERR(nfc->dma_tx_ch)) {
+ ret = PTR_ERR(nfc->dma_tx_ch);
if (ret != -ENODEV)
- dev_err(fmc2->dev,
+ dev_err(nfc->dev,
"failed to request tx DMA channel: %d\n", ret);
- fmc2->dma_tx_ch = NULL;
+ nfc->dma_tx_ch = NULL;
goto err_dma;
}
- fmc2->dma_rx_ch = dma_request_chan(fmc2->dev, "rx");
- if (IS_ERR(fmc2->dma_rx_ch)) {
- ret = PTR_ERR(fmc2->dma_rx_ch);
+ nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx");
+ if (IS_ERR(nfc->dma_rx_ch)) {
+ ret = PTR_ERR(nfc->dma_rx_ch);
if (ret != -ENODEV)
- dev_err(fmc2->dev,
+ dev_err(nfc->dev,
"failed to request rx DMA channel: %d\n", ret);
- fmc2->dma_rx_ch = NULL;
+ nfc->dma_rx_ch = NULL;
goto err_dma;
}
- fmc2->dma_ecc_ch = dma_request_chan(fmc2->dev, "ecc");
- if (IS_ERR(fmc2->dma_ecc_ch)) {
- ret = PTR_ERR(fmc2->dma_ecc_ch);
+ nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc");
+ if (IS_ERR(nfc->dma_ecc_ch)) {
+ ret = PTR_ERR(nfc->dma_ecc_ch);
if (ret != -ENODEV)
- dev_err(fmc2->dev,
+ dev_err(nfc->dev,
"failed to request ecc DMA channel: %d\n", ret);
- fmc2->dma_ecc_ch = NULL;
+ nfc->dma_ecc_ch = NULL;
goto err_dma;
}
- ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
+ ret = sg_alloc_table(&nfc->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL);
if (ret)
return ret;
/* Allocate a buffer to store ECC status registers */
- fmc2->ecc_buf = devm_kzalloc(fmc2->dev, FMC2_MAX_ECC_BUF_LEN,
- GFP_KERNEL);
- if (!fmc2->ecc_buf)
+ nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
+ if (!nfc->ecc_buf)
return -ENOMEM;
- ret = sg_alloc_table(&fmc2->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
+ ret = sg_alloc_table(&nfc->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL);
if (ret)
return ret;
- init_completion(&fmc2->dma_data_complete);
- init_completion(&fmc2->dma_ecc_complete);
+ init_completion(&nfc->dma_data_complete);
+ init_completion(&nfc->dma_ecc_complete);
return 0;
err_dma:
if (ret == -ENODEV) {
- dev_warn(fmc2->dev,
+ dev_warn(nfc->dev,
"DMAs not defined in the DT, polling mode is used\n");
ret = 0;
}
@@ -1667,35 +1625,34 @@ err_dma:
return ret;
}
-/* NAND callbacks setup */
-static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip)
+static void stm32_fmc2_nfc_nand_callbacks_setup(struct nand_chip *chip)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
/*
* Specific callbacks to read/write a page depending on
* the mode (polling/sequencer) and the algo used (Hamming, BCH).
*/
- if (fmc2->dma_tx_ch && fmc2->dma_rx_ch && fmc2->dma_ecc_ch) {
+ if (nfc->dma_tx_ch && nfc->dma_rx_ch && nfc->dma_ecc_ch) {
/* DMA => use sequencer mode callbacks */
- chip->ecc.correct = stm32_fmc2_sequencer_correct;
- chip->ecc.write_page = stm32_fmc2_sequencer_write_page;
- chip->ecc.read_page = stm32_fmc2_sequencer_read_page;
- chip->ecc.write_page_raw = stm32_fmc2_sequencer_write_page_raw;
- chip->ecc.read_page_raw = stm32_fmc2_sequencer_read_page_raw;
+ chip->ecc.correct = stm32_fmc2_nfc_seq_correct;
+ chip->ecc.write_page = stm32_fmc2_nfc_seq_write_page;
+ chip->ecc.read_page = stm32_fmc2_nfc_seq_read_page;
+ chip->ecc.write_page_raw = stm32_fmc2_nfc_seq_write_page_raw;
+ chip->ecc.read_page_raw = stm32_fmc2_nfc_seq_read_page_raw;
} else {
/* No DMA => use polling mode callbacks */
- chip->ecc.hwctl = stm32_fmc2_hwctl;
+ chip->ecc.hwctl = stm32_fmc2_nfc_hwctl;
if (chip->ecc.strength == FMC2_ECC_HAM) {
/* Hamming is used */
- chip->ecc.calculate = stm32_fmc2_ham_calculate;
- chip->ecc.correct = stm32_fmc2_ham_correct;
+ chip->ecc.calculate = stm32_fmc2_nfc_ham_calculate;
+ chip->ecc.correct = stm32_fmc2_nfc_ham_correct;
chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK;
} else {
/* BCH is used */
- chip->ecc.calculate = stm32_fmc2_bch_calculate;
- chip->ecc.correct = stm32_fmc2_bch_correct;
- chip->ecc.read_page = stm32_fmc2_read_page;
+ chip->ecc.calculate = stm32_fmc2_nfc_bch_calculate;
+ chip->ecc.correct = stm32_fmc2_nfc_bch_correct;
+ chip->ecc.read_page = stm32_fmc2_nfc_read_page;
}
}
@@ -1708,9 +1665,8 @@ static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip)
chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7;
}
-/* FMC2 layout */
-static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
+static int stm32_fmc2_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -1724,8 +1680,8 @@ static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
return 0;
}
-static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
+static int stm32_fmc2_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
@@ -1739,13 +1695,12 @@ static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
-static const struct mtd_ooblayout_ops stm32_fmc2_nand_ooblayout_ops = {
- .ecc = stm32_fmc2_nand_ooblayout_ecc,
- .free = stm32_fmc2_nand_ooblayout_free,
+static const struct mtd_ooblayout_ops stm32_fmc2_nfc_ooblayout_ops = {
+ .ecc = stm32_fmc2_nfc_ooblayout_ecc,
+ .free = stm32_fmc2_nfc_ooblayout_free,
};
-/* FMC2 caps */
-static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength)
+static int stm32_fmc2_nfc_calc_ecc_bytes(int step_size, int strength)
{
/* Hamming */
if (strength == FMC2_ECC_HAM)
@@ -1759,14 +1714,13 @@ static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength)
return 8;
}
-NAND_ECC_CAPS_SINGLE(stm32_fmc2_ecc_caps, stm32_fmc2_calc_ecc_bytes,
+NAND_ECC_CAPS_SINGLE(stm32_fmc2_nfc_ecc_caps, stm32_fmc2_nfc_calc_ecc_bytes,
FMC2_ECC_STEP_SIZE,
FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8);
-/* FMC2 controller ops */
-static int stm32_fmc2_attach_chip(struct nand_chip *chip)
+static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip)
{
- struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
@@ -1778,49 +1732,45 @@ static int stm32_fmc2_attach_chip(struct nand_chip *chip)
* ECC sector size = 512
*/
if (chip->ecc.mode != NAND_ECC_HW) {
- dev_err(fmc2->dev, "nand_ecc_mode is not well defined in the DT\n");
+ dev_err(nfc->dev, "nand_ecc_mode is not well defined in the DT\n");
return -EINVAL;
}
- ret = nand_ecc_choose_conf(chip, &stm32_fmc2_ecc_caps,
+ ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps,
mtd->oobsize - FMC2_BBM_LEN);
if (ret) {
- dev_err(fmc2->dev, "no valid ECC settings set\n");
+ dev_err(nfc->dev, "no valid ECC settings set\n");
return ret;
}
if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) {
- dev_err(fmc2->dev, "nand page size is not supported\n");
+ dev_err(nfc->dev, "nand page size is not supported\n");
return -EINVAL;
}
if (chip->bbt_options & NAND_BBT_USE_FLASH)
chip->bbt_options |= NAND_BBT_NO_OOB;
- /* NAND callbacks setup */
- stm32_fmc2_nand_callbacks_setup(chip);
+ stm32_fmc2_nfc_nand_callbacks_setup(chip);
- /* Define ECC layout */
- mtd_set_ooblayout(mtd, &stm32_fmc2_nand_ooblayout_ops);
+ mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops);
- /* Configure bus width to 16-bit */
if (chip->options & NAND_BUSWIDTH_16)
- stm32_fmc2_set_buswidth_16(fmc2, true);
+ stm32_fmc2_nfc_set_buswidth_16(nfc, true);
return 0;
}
-static const struct nand_controller_ops stm32_fmc2_nand_controller_ops = {
- .attach_chip = stm32_fmc2_attach_chip,
- .exec_op = stm32_fmc2_exec_op,
- .setup_data_interface = stm32_fmc2_setup_interface,
+static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = {
+ .attach_chip = stm32_fmc2_nfc_attach_chip,
+ .exec_op = stm32_fmc2_nfc_exec_op,
+ .setup_data_interface = stm32_fmc2_nfc_setup_interface,
};
-/* FMC2 probe */
-static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
- struct device_node *dn)
+static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc,
+ struct device_node *dn)
{
- struct stm32_fmc2_nand *nand = &fmc2->nand;
+ struct stm32_fmc2_nand *nand = &nfc->nand;
u32 cs;
int ret, i;
@@ -1829,29 +1779,29 @@ static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
nand->ncs /= sizeof(u32);
if (!nand->ncs) {
- dev_err(fmc2->dev, "invalid reg property size\n");
+ dev_err(nfc->dev, "invalid reg property size\n");
return -EINVAL;
}
for (i = 0; i < nand->ncs; i++) {
ret = of_property_read_u32_index(dn, "reg", i, &cs);
if (ret) {
- dev_err(fmc2->dev, "could not retrieve reg property: %d\n",
+ dev_err(nfc->dev, "could not retrieve reg property: %d\n",
ret);
return ret;
}
if (cs > FMC2_MAX_CE) {
- dev_err(fmc2->dev, "invalid reg value: %d\n", cs);
+ dev_err(nfc->dev, "invalid reg value: %d\n", cs);
return -EINVAL;
}
- if (fmc2->cs_assigned & BIT(cs)) {
- dev_err(fmc2->dev, "cs already assigned: %d\n", cs);
+ if (nfc->cs_assigned & BIT(cs)) {
+ dev_err(nfc->dev, "cs already assigned: %d\n", cs);
return -EINVAL;
}
- fmc2->cs_assigned |= BIT(cs);
+ nfc->cs_assigned |= BIT(cs);
nand->cs_used[i] = cs;
}
@@ -1860,25 +1810,25 @@ static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2,
return 0;
}
-static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2)
+static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc)
{
- struct device_node *dn = fmc2->dev->of_node;
+ struct device_node *dn = nfc->dev->of_node;
struct device_node *child;
int nchips = of_get_child_count(dn);
int ret = 0;
if (!nchips) {
- dev_err(fmc2->dev, "NAND chip not defined\n");
+ dev_err(nfc->dev, "NAND chip not defined\n");
return -EINVAL;
}
if (nchips > 1) {
- dev_err(fmc2->dev, "too many NAND chips defined\n");
+ dev_err(nfc->dev, "too many NAND chips defined\n");
return -EINVAL;
}
for_each_child_of_node(dn, child) {
- ret = stm32_fmc2_parse_child(fmc2, child);
+ ret = stm32_fmc2_nfc_parse_child(nfc, child);
if (ret < 0) {
of_node_put(child);
return ret;
@@ -1888,106 +1838,108 @@ static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2)
return ret;
}
-static int stm32_fmc2_probe(struct platform_device *pdev)
+static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct reset_control *rstc;
- struct stm32_fmc2_nfc *fmc2;
+ struct stm32_fmc2_nfc *nfc;
struct stm32_fmc2_nand *nand;
struct resource *res;
struct mtd_info *mtd;
struct nand_chip *chip;
int chip_cs, mem_region, ret, irq;
- fmc2 = devm_kzalloc(dev, sizeof(*fmc2), GFP_KERNEL);
- if (!fmc2)
+ nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
return -ENOMEM;
- fmc2->dev = dev;
- nand_controller_init(&fmc2->base);
- fmc2->base.ops = &stm32_fmc2_nand_controller_ops;
+ nfc->dev = dev;
+ nand_controller_init(&nfc->base);
+ nfc->base.ops = &stm32_fmc2_nfc_controller_ops;
- ret = stm32_fmc2_parse_dt(fmc2);
+ ret = stm32_fmc2_nfc_parse_dt(nfc);
if (ret)
return ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fmc2->io_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(fmc2->io_base))
- return PTR_ERR(fmc2->io_base);
+ nfc->io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->io_base))
+ return PTR_ERR(nfc->io_base);
- fmc2->io_phys_addr = res->start;
+ nfc->io_phys_addr = res->start;
for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE;
chip_cs++, mem_region += 3) {
- if (!(fmc2->cs_assigned & BIT(chip_cs)))
+ if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region);
- fmc2->data_base[chip_cs] = devm_ioremap_resource(dev, res);
- if (IS_ERR(fmc2->data_base[chip_cs]))
- return PTR_ERR(fmc2->data_base[chip_cs]);
+ nfc->data_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->data_base[chip_cs]))
+ return PTR_ERR(nfc->data_base[chip_cs]);
- fmc2->data_phys_addr[chip_cs] = res->start;
+ nfc->data_phys_addr[chip_cs] = res->start;
res = platform_get_resource(pdev, IORESOURCE_MEM,
mem_region + 1);
- fmc2->cmd_base[chip_cs] = devm_ioremap_resource(dev, res);
- if (IS_ERR(fmc2->cmd_base[chip_cs]))
- return PTR_ERR(fmc2->cmd_base[chip_cs]);
+ nfc->cmd_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->cmd_base[chip_cs]))
+ return PTR_ERR(nfc->cmd_base[chip_cs]);
res = platform_get_resource(pdev, IORESOURCE_MEM,
mem_region + 2);
- fmc2->addr_base[chip_cs] = devm_ioremap_resource(dev, res);
- if (IS_ERR(fmc2->addr_base[chip_cs]))
- return PTR_ERR(fmc2->addr_base[chip_cs]);
+ nfc->addr_base[chip_cs] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nfc->addr_base[chip_cs]))
+ return PTR_ERR(nfc->addr_base[chip_cs]);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
- dev_name(dev), fmc2);
+ ret = devm_request_irq(dev, irq, stm32_fmc2_nfc_irq, 0,
+ dev_name(dev), nfc);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
}
- init_completion(&fmc2->complete);
+ init_completion(&nfc->complete);
- fmc2->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(fmc2->clk))
- return PTR_ERR(fmc2->clk);
+ nfc->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(nfc->clk))
+ return PTR_ERR(nfc->clk);
- ret = clk_prepare_enable(fmc2->clk);
+ ret = clk_prepare_enable(nfc->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
return ret;
}
rstc = devm_reset_control_get(dev, NULL);
- if (!IS_ERR(rstc)) {
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ if (ret == -EPROBE_DEFER)
+ goto err_clk_disable;
+ } else {
reset_control_assert(rstc);
reset_control_deassert(rstc);
}
- /* DMA setup */
- ret = stm32_fmc2_dma_setup(fmc2);
+ ret = stm32_fmc2_nfc_dma_setup(nfc);
if (ret)
- return ret;
+ goto err_release_dma;
- /* FMC2 init routine */
- stm32_fmc2_init(fmc2);
+ stm32_fmc2_nfc_init(nfc);
- nand = &fmc2->nand;
+ nand = &nfc->nand;
chip = &nand->chip;
mtd = nand_to_mtd(chip);
mtd->dev.parent = dev;
- chip->controller = &fmc2->base;
+ chip->controller = &nfc->base;
chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
- NAND_USE_BOUNCE_BUFFER;
+ NAND_USES_DMA;
/* Default ECC settings */
chip->ecc.mode = NAND_ECC_HW;
@@ -1997,86 +1949,91 @@ static int stm32_fmc2_probe(struct platform_device *pdev)
/* Scan to find existence of the device */
ret = nand_scan(chip, nand->ncs);
if (ret)
- goto err_scan;
+ goto err_release_dma;
ret = mtd_device_register(mtd, NULL, 0);
if (ret)
- goto err_device_register;
+ goto err_nand_cleanup;
- platform_set_drvdata(pdev, fmc2);
+ platform_set_drvdata(pdev, nfc);
return 0;
-err_device_register:
+err_nand_cleanup:
nand_cleanup(chip);
-err_scan:
- if (fmc2->dma_ecc_ch)
- dma_release_channel(fmc2->dma_ecc_ch);
- if (fmc2->dma_tx_ch)
- dma_release_channel(fmc2->dma_tx_ch);
- if (fmc2->dma_rx_ch)
- dma_release_channel(fmc2->dma_rx_ch);
+err_release_dma:
+ if (nfc->dma_ecc_ch)
+ dma_release_channel(nfc->dma_ecc_ch);
+ if (nfc->dma_tx_ch)
+ dma_release_channel(nfc->dma_tx_ch);
+ if (nfc->dma_rx_ch)
+ dma_release_channel(nfc->dma_rx_ch);
- sg_free_table(&fmc2->dma_data_sg);
- sg_free_table(&fmc2->dma_ecc_sg);
+ sg_free_table(&nfc->dma_data_sg);
+ sg_free_table(&nfc->dma_ecc_sg);
- clk_disable_unprepare(fmc2->clk);
+err_clk_disable:
+ clk_disable_unprepare(nfc->clk);
return ret;
}
-static int stm32_fmc2_remove(struct platform_device *pdev)
+static int stm32_fmc2_nfc_remove(struct platform_device *pdev)
{
- struct stm32_fmc2_nfc *fmc2 = platform_get_drvdata(pdev);
- struct stm32_fmc2_nand *nand = &fmc2->nand;
+ struct stm32_fmc2_nfc *nfc = platform_get_drvdata(pdev);
+ struct stm32_fmc2_nand *nand = &nfc->nand;
+ struct nand_chip *chip = &nand->chip;
+ int ret;
- nand_release(&nand->chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
- if (fmc2->dma_ecc_ch)
- dma_release_channel(fmc2->dma_ecc_ch);
- if (fmc2->dma_tx_ch)
- dma_release_channel(fmc2->dma_tx_ch);
- if (fmc2->dma_rx_ch)
- dma_release_channel(fmc2->dma_rx_ch);
+ if (nfc->dma_ecc_ch)
+ dma_release_channel(nfc->dma_ecc_ch);
+ if (nfc->dma_tx_ch)
+ dma_release_channel(nfc->dma_tx_ch);
+ if (nfc->dma_rx_ch)
+ dma_release_channel(nfc->dma_rx_ch);
- sg_free_table(&fmc2->dma_data_sg);
- sg_free_table(&fmc2->dma_ecc_sg);
+ sg_free_table(&nfc->dma_data_sg);
+ sg_free_table(&nfc->dma_ecc_sg);
- clk_disable_unprepare(fmc2->clk);
+ clk_disable_unprepare(nfc->clk);
return 0;
}
-static int __maybe_unused stm32_fmc2_suspend(struct device *dev)
+static int __maybe_unused stm32_fmc2_nfc_suspend(struct device *dev)
{
- struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
+ struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
- clk_disable_unprepare(fmc2->clk);
+ clk_disable_unprepare(nfc->clk);
pinctrl_pm_select_sleep_state(dev);
return 0;
}
-static int __maybe_unused stm32_fmc2_resume(struct device *dev)
+static int __maybe_unused stm32_fmc2_nfc_resume(struct device *dev)
{
- struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev);
- struct stm32_fmc2_nand *nand = &fmc2->nand;
+ struct stm32_fmc2_nfc *nfc = dev_get_drvdata(dev);
+ struct stm32_fmc2_nand *nand = &nfc->nand;
int chip_cs, ret;
pinctrl_pm_select_default_state(dev);
- ret = clk_prepare_enable(fmc2->clk);
+ ret = clk_prepare_enable(nfc->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
return ret;
}
- stm32_fmc2_init(fmc2);
+ stm32_fmc2_nfc_init(nfc);
for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) {
- if (!(fmc2->cs_assigned & BIT(chip_cs)))
+ if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
nand_reset(&nand->chip, chip_cs);
@@ -2085,27 +2042,27 @@ static int __maybe_unused stm32_fmc2_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(stm32_fmc2_pm_ops, stm32_fmc2_suspend,
- stm32_fmc2_resume);
+static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend,
+ stm32_fmc2_nfc_resume);
-static const struct of_device_id stm32_fmc2_match[] = {
+static const struct of_device_id stm32_fmc2_nfc_match[] = {
{.compatible = "st,stm32mp15-fmc2"},
{}
};
-MODULE_DEVICE_TABLE(of, stm32_fmc2_match);
+MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match);
-static struct platform_driver stm32_fmc2_driver = {
- .probe = stm32_fmc2_probe,
- .remove = stm32_fmc2_remove,
+static struct platform_driver stm32_fmc2_nfc_driver = {
+ .probe = stm32_fmc2_nfc_probe,
+ .remove = stm32_fmc2_nfc_remove,
.driver = {
- .name = "stm32_fmc2_nand",
- .of_match_table = stm32_fmc2_match,
- .pm = &stm32_fmc2_pm_ops,
+ .name = "stm32_fmc2_nfc",
+ .of_match_table = stm32_fmc2_nfc_match,
+ .pm = &stm32_fmc2_nfc_pm_ops,
},
};
-module_platform_driver(stm32_fmc2_driver);
+module_platform_driver(stm32_fmc2_nfc_driver);
-MODULE_ALIAS("platform:stm32_fmc2_nand");
+MODULE_ALIAS("platform:stm32_fmc2_nfc");
MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 nand driver");
+MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 5f3e40b79fb1..ffbc1651fadc 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -1698,7 +1698,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand,
ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
- nand->options |= NAND_USE_BOUNCE_BUFFER;
+ nand->options |= NAND_USES_DMA;
} else {
ecc->read_page = sunxi_nfc_hw_ecc_read_page;
ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
@@ -1907,7 +1907,8 @@ static int sunxi_nfc_exec_op(struct nand_chip *nand,
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
const struct nand_op_parser *parser;
- sunxi_nfc_select_chip(nand, op->cs);
+ if (!check_only)
+ sunxi_nfc_select_chip(nand, op->cs);
if (sunxi_nand->sels[op->cs].rb >= 0)
parser = &sunxi_nfc_op_parser;
@@ -2003,7 +2004,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
dev_err(dev, "failed to register mtd device: %d\n", ret);
- nand_release(nand);
+ nand_cleanup(nand);
return ret;
}
@@ -2038,13 +2039,18 @@ static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
{
struct sunxi_nand_chip *sunxi_nand;
+ struct nand_chip *chip;
+ int ret;
while (!list_empty(&nfc->chips)) {
sunxi_nand = list_first_entry(&nfc->chips,
struct sunxi_nand_chip,
node);
- nand_release(&sunxi_nand->nand);
- sunxi_nand_ecc_cleanup(&sunxi_nand->nand.ecc);
+ chip = &sunxi_nand->nand;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ sunxi_nand_ecc_cleanup(&chip->ecc);
list_del(&sunxi_nand->node);
}
}
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index 9acf2de37ee0..246871e01027 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -568,7 +568,7 @@ static int chip_init(struct device *dev, struct device_node *np)
chip->legacy.select_chip = tango_select_chip;
chip->legacy.cmd_ctrl = tango_cmd_ctrl;
chip->legacy.dev_ready = tango_dev_ready;
- chip->options = NAND_USE_BOUNCE_BUFFER |
+ chip->options = NAND_USES_DMA |
NAND_NO_SUBPAGE_WRITE |
NAND_WAIT_TCCS;
chip->controller = &nfc->hw;
@@ -600,14 +600,19 @@ static int chip_init(struct device *dev, struct device_node *np)
static int tango_nand_remove(struct platform_device *pdev)
{
- int cs;
struct tango_nfc *nfc = platform_get_drvdata(pdev);
+ struct nand_chip *chip;
+ int cs, ret;
dma_release_channel(nfc->chan);
for (cs = 0; cs < MAX_CS; ++cs) {
- if (nfc->chips[cs])
- nand_release(&nfc->chips[cs]->nand_chip);
+ if (nfc->chips[cs]) {
+ chip = &nfc->chips[cs]->nand_chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ }
}
return 0;
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 3cc9a4c41443..f9d046b2cd3b 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -467,7 +467,9 @@ static int tegra_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
- tegra_nand_select_target(chip, op->cs);
+ if (!check_only)
+ tegra_nand_select_target(chip, op->cs);
+
return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
check_only);
}
@@ -1113,7 +1115,7 @@ static int tegra_nand_chips_init(struct device *dev,
if (!mtd->name)
mtd->name = "tegra_nand";
- chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER;
+ chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA;
ret = nand_scan(chip, 1);
if (ret)
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index db030f1701ee..843a8683b737 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -448,7 +448,7 @@ static int tmio_probe(struct platform_device *dev)
if (!retval)
return retval;
- nand_release(nand_chip);
+ nand_cleanup(nand_chip);
err_irq:
tmio_hw_stop(dev, tmio);
@@ -458,8 +458,12 @@ err_irq:
static int tmio_remove(struct platform_device *dev)
{
struct tmio_nand *tmio = platform_get_drvdata(dev);
+ struct nand_chip *chip = &tmio->chip;
+ int ret;
- nand_release(&tmio->chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
tmio_hw_stop(dev, tmio);
return 0;
}
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index 2642d5bb3241..47d966871445 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -371,7 +371,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
static int __exit txx9ndfmc_remove(struct platform_device *dev)
{
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
- int i;
+ int ret, i;
if (!drvdata)
return 0;
@@ -385,7 +385,9 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
chip = mtd_to_nand(mtd);
txx9_priv = nand_get_controller_data(chip);
- nand_release(chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index 6b399a75f9ae..7248c5901183 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -502,7 +502,9 @@ static int vf610_nfc_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
- vf610_nfc_select_target(chip, op->cs);
+ if (!check_only)
+ vf610_nfc_select_target(chip, op->cs);
+
return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op,
check_only);
}
@@ -915,8 +917,12 @@ err_disable_clk:
static int vf610_nfc_remove(struct platform_device *pdev)
{
struct vf610_nfc *nfc = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &nfc->chip;
+ int ret;
- nand_release(&nfc->chip);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
clk_disable_unprepare(nfc->clk);
return 0;
}
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index 834f794816a9..94bfba994326 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -210,7 +210,7 @@ static int xway_nand_probe(struct platform_device *pdev)
err = mtd_device_register(mtd, NULL, 0);
if (err)
- nand_release(&data->chip);
+ nand_cleanup(&data->chip);
return err;
}
@@ -221,8 +221,12 @@ static int xway_nand_probe(struct platform_device *pdev)
static int xway_nand_remove(struct platform_device *pdev)
{
struct xway_nand_data *data = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &data->chip;
+ int ret;
- nand_release(&data->chip);
+ ret = mtd_device_unregister(mtd);
+ WARN_ON(ret);
+ nand_cleanup(chip);
return 0;
}
diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c
index c86f2db8c882..a79e4d866b08 100644
--- a/drivers/mtd/parsers/cmdlinepart.c
+++ b/drivers/mtd/parsers/cmdlinepart.c
@@ -9,7 +9,7 @@
*
* mtdparts=<mtddef>[;<mtddef]
* <mtddef> := <mtd-id>:<partdef>[,<partdef>]
- * <partdef> := <size>[@<offset>][<name>][ro][lk]
+ * <partdef> := <size>[@<offset>][<name>][ro][lk][slc]
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
* size is automatically truncated at end of device
@@ -92,7 +92,7 @@ static struct mtd_partition * newpart(char *s,
int name_len;
unsigned char *extra_mem;
char delim;
- unsigned int mask_flags;
+ unsigned int mask_flags, add_flags;
/* fetch the partition size */
if (*s == '-') {
@@ -109,6 +109,7 @@ static struct mtd_partition * newpart(char *s,
/* fetch partition name and flags */
mask_flags = 0; /* this is going to be a regular partition */
+ add_flags = 0;
delim = 0;
/* check for offset */
@@ -152,6 +153,12 @@ static struct mtd_partition * newpart(char *s,
s += 2;
}
+ /* if slc is found use emulated SLC mode on this partition*/
+ if (!strncmp(s, "slc", 3)) {
+ add_flags |= MTD_SLC_ON_MLC_EMULATION;
+ s += 3;
+ }
+
/* test if more partitions are following */
if (*s == ',') {
if (size == SIZE_REMAINING) {
@@ -184,6 +191,7 @@ static struct mtd_partition * newpart(char *s,
parts[this_part].size = size;
parts[this_part].offset = offset;
parts[this_part].mask_flags = mask_flags;
+ parts[this_part].add_flags = add_flags;
if (name)
strlcpy(extra_mem, name, name_len + 1);
else
@@ -218,12 +226,29 @@ static int mtdpart_setup_real(char *s)
struct cmdline_mtd_partition *this_mtd;
struct mtd_partition *parts;
int mtd_id_len, num_parts;
- char *p, *mtd_id;
+ char *p, *mtd_id, *semicol;
+
+ /*
+ * Replace the first ';' by a NULL char so strrchr can work
+ * properly.
+ */
+ semicol = strchr(s, ';');
+ if (semicol)
+ *semicol = '\0';
mtd_id = s;
- /* fetch <mtd-id> */
- p = strchr(s, ':');
+ /*
+ * fetch <mtd-id>. We use strrchr to ignore all ':' that could
+ * be present in the MTD name, only the last one is interpreted
+ * as an <mtd-id>/<part-definition> separator.
+ */
+ p = strrchr(s, ':');
+
+ /* Restore the ';' now. */
+ if (semicol)
+ *semicol = ';';
+
if (!p) {
pr_err("no mtd-id\n");
return -EINVAL;
diff --git a/drivers/mtd/parsers/ofpart.c b/drivers/mtd/parsers/ofpart.c
index 3caeabf27987..daf507c123e6 100644
--- a/drivers/mtd/parsers/ofpart.c
+++ b/drivers/mtd/parsers/ofpart.c
@@ -117,6 +117,9 @@ static int parse_fixed_partitions(struct mtd_info *master,
if (of_get_property(pp, "lock", &len))
parts[i].mask_flags |= MTD_POWERUP_LOCK;
+ if (of_property_read_bool(pp, "slc-mode"))
+ parts[i].add_flags |= MTD_SLC_ON_MLC_EMULATION;
+
i++;
}
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 6e816eafb312..ffc4b380f2b1 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig MTD_SPI_NOR
- tristate "SPI-NOR device support"
+ tristate "SPI NOR device support"
depends on MTD
depends on MTD && SPI_MASTER
select SPI_MEM
help
This is the framework for the SPI NOR which can be used by the SPI
- device drivers and the SPI-NOR device driver.
+ device drivers and the SPI NOR device driver.
if MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig
index 10b86660b821..d89a5ea9446a 100644
--- a/drivers/mtd/spi-nor/controllers/Kconfig
+++ b/drivers/mtd/spi-nor/controllers/Kconfig
@@ -21,11 +21,11 @@ config SPI_CADENCE_QUADSPI
Flash as an MTD device.
config SPI_HISI_SFC
- tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)"
+ tristate "Hisilicon FMC SPI NOR Flash Controller(SFC)"
depends on ARCH_HISI || COMPILE_TEST
depends on HAS_IOMEM
help
- This enables support for HiSilicon FMC SPI-NOR flash controller.
+ This enables support for HiSilicon FMC SPI NOR flash controller.
config SPI_NXP_SPIFI
tristate "NXP SPI Flash Interface (SPIFI)"
diff --git a/drivers/mtd/spi-nor/controllers/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
index ae85e4c0e114..7225870e8b18 100644
--- a/drivers/mtd/spi-nor/controllers/aspeed-smc.c
+++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c
@@ -727,7 +727,7 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip)
/*
* TODO: Adjust clocks if fast read is supported and interpret
- * SPI-NOR flags to adjust controller settings.
+ * SPI NOR flags to adjust controller settings.
*/
if (chip->nor.read_proto == SNOR_PROTO_1_1_1) {
if (chip->nor.read_dummy == 0)
diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
index 6c7a4118752e..95c502173cbd 100644
--- a/drivers/mtd/spi-nor/controllers/hisi-sfc.c
+++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * HiSilicon FMC SPI-NOR flash controller driver
+ * HiSilicon FMC SPI NOR flash controller driver
*
* Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
*/
diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 9a5b1a7c636a..5703e8313980 100644
--- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * SPI-NOR driver for NXP SPI Flash Interface (SPIFI)
+ * SPI NOR driver for NXP SPI Flash Interface (SPIFI)
*
* Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
*
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index cc68ea84318e..0369d98b2d12 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -499,7 +499,7 @@ int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
* the flash is ready for new commands.
* @nor: pointer to 'struct spi_nor'.
*
- * Return: 0 on success, -errno otherwise.
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
*/
static int spi_nor_xsr_ready(struct spi_nor *nor)
{
@@ -542,7 +542,7 @@ static void spi_nor_clear_sr(struct spi_nor *nor)
* for new commands.
* @nor: pointer to 'struct spi_nor'.
*
- * Return: 0 on success, -errno otherwise.
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
*/
static int spi_nor_sr_ready(struct spi_nor *nor)
{
@@ -606,7 +606,7 @@ static void spi_nor_clear_fsr(struct spi_nor *nor)
* ready for new commands.
* @nor: pointer to 'struct spi_nor'.
*
- * Return: 0 on success, -errno otherwise.
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
*/
static int spi_nor_fsr_ready(struct spi_nor *nor)
{
@@ -640,14 +640,14 @@ static int spi_nor_fsr_ready(struct spi_nor *nor)
return -EIO;
}
- return nor->bouncebuf[0] & FSR_READY;
+ return !!(nor->bouncebuf[0] & FSR_READY);
}
/**
* spi_nor_ready() - Query the flash to see if it is ready for new commands.
* @nor: pointer to 'struct spi_nor'.
*
- * Return: 0 on success, -errno otherwise.
+ * Return: 1 if ready, 0 if not ready, -errno on errors.
*/
static int spi_nor_ready(struct spi_nor *nor)
{
@@ -2469,7 +2469,7 @@ static int spi_nor_select_read(struct spi_nor *nor,
nor->read_proto = read->proto;
/*
- * In the spi-nor framework, we don't need to make the difference
+ * In the SPI NOR framework, we don't need to make the difference
* between mode clock cycles and wait state clock cycles.
* Indeed, the value of the mode clock cycles is used by a QSPI
* flash memory to know whether it should enter or leave its 0-4-4
@@ -2675,7 +2675,7 @@ static int spi_nor_setup(struct spi_nor *nor,
/**
* spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
* settings based on MFR register and ->default_init() hook.
- * @nor: pointer to a 'struct spi-nor'.
+ * @nor: pointer to a 'struct spi_nor'.
*/
static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
{
@@ -2690,7 +2690,7 @@ static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
/**
* spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
* based on JESD216 SFDP standard.
- * @nor: pointer to a 'struct spi-nor'.
+ * @nor: pointer to a 'struct spi_nor'.
*
* The method has a roll-back mechanism: in case the SFDP parsing fails, the
* legacy flash parameters and settings will be restored.
@@ -2712,7 +2712,7 @@ static void spi_nor_sfdp_init_params(struct spi_nor *nor)
/**
* spi_nor_info_init_params() - Initialize the flash's parameters and settings
* based on nor->info data.
- * @nor: pointer to a 'struct spi-nor'.
+ * @nor: pointer to a 'struct spi_nor'.
*/
static void spi_nor_info_init_params(struct spi_nor *nor)
{
@@ -2841,7 +2841,7 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
/**
* spi_nor_init_params() - Initialize the flash's parameters and settings.
- * @nor: pointer to a 'struct spi-nor'.
+ * @nor: pointer to a 'struct spi_nor'.
*
* The flash parameters and settings are initialized based on a sequence of
* calls that are ordered by priority:
@@ -3126,7 +3126,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
/*
* Make sure the XSR_RDY flag is set before calling
* spi_nor_wait_till_ready(). Xilinx S3AN share MFR
- * with Atmel spi-nor
+ * with Atmel SPI NOR.
*/
if (info->flags & SPI_NOR_XSR_RDY)
nor->flags |= SNOR_F_READY_XSR_RDY;
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index ab0f963d630c..96735d83c77c 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -63,10 +63,16 @@ static const struct flash_info macronix_parts[] = {
.fixups = &mx25l25635_fixups },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_4B_OPCODES) },
+ { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16,
SECT_4K | SPI_NOR_DUAL_READ |
SPI_NOR_QUAD_READ) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+ { "mx25l51245g", INFO(0xc2201a, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES) },
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index 6c034b9718e2..3dca5b9af3b6 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -29,7 +29,9 @@ static const struct flash_info st_parts[] = {
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256,
- SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+ SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "mt25ql256a", INFO6(0x20ba19, 0x104400, 64 * 1024, 512,
@@ -59,6 +61,8 @@ static const struct flash_info st_parts[] = {
SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6) },
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
+ SPI_NOR_4BIT_BP | SPI_NOR_BP3_SR_BIT6 |
NO_CHIP_ERASE) },
{ "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048,
SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
index f6038d3a3684..55c0c508464b 100644
--- a/drivers/mtd/spi-nor/sfdp.c
+++ b/drivers/mtd/spi-nor/sfdp.c
@@ -21,10 +21,6 @@
#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
#define SFDP_SIGNATURE 0x50444653U
-#define SFDP_JESD216_MAJOR 1
-#define SFDP_JESD216_MINOR 0
-#define SFDP_JESD216A_MINOR 5
-#define SFDP_JESD216B_MINOR 6
struct sfdp_header {
u32 signature; /* Ox50444653U <=> "SFDP" */
@@ -437,7 +433,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
struct sfdp_bfpt bfpt;
size_t len;
int i, cmd, err;
- u32 addr;
+ u32 addr, val;
u16 half;
u8 erase_mask;
@@ -460,6 +456,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
/* Number of address bytes. */
switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
+ case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4:
nor->addr_width = 3;
break;
@@ -472,21 +469,21 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
}
/* Flash Memory Density (in bits). */
- params->size = bfpt.dwords[BFPT_DWORD(2)];
- if (params->size & BIT(31)) {
- params->size &= ~BIT(31);
+ val = bfpt.dwords[BFPT_DWORD(2)];
+ if (val & BIT(31)) {
+ val &= ~BIT(31);
/*
* Prevent overflows on params->size. Anyway, a NOR of 2^64
* bits is unlikely to exist so this error probably means
* the BFPT we are reading is corrupted/wrong.
*/
- if (params->size > 63)
+ if (val > 63)
return -EINVAL;
- params->size = 1ULL << params->size;
+ params->size = 1ULL << val;
} else {
- params->size++;
+ params->size = val + 1;
}
params->size >>= 3; /* Convert to bytes. */
@@ -548,15 +545,15 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
SNOR_ERASE_TYPE_MASK;
/* Stop here if not JESD216 rev A or later. */
- if (bfpt_header->length < BFPT_DWORD_MAX)
+ if (bfpt_header->length == BFPT_DWORD_MAX_JESD216)
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
params);
/* Page size: this field specifies 'N' so the page size = 2^N bytes. */
- params->page_size = bfpt.dwords[BFPT_DWORD(11)];
- params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
- params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
- params->page_size = 1U << params->page_size;
+ val = bfpt.dwords[BFPT_DWORD(11)];
+ val &= BFPT_DWORD11_PAGE_SIZE_MASK;
+ val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
+ params->page_size = 1U << val;
/* Quad Enable Requirements. */
switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
@@ -604,6 +601,11 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
return -EINVAL;
}
+ /* Stop here if not JESD216 rev C or later. */
+ if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B)
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
+ params);
+
return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
}
diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h
index e0a8ded04890..7f9846b3a1ad 100644
--- a/drivers/mtd/spi-nor/sfdp.h
+++ b/drivers/mtd/spi-nor/sfdp.h
@@ -7,14 +7,20 @@
#ifndef __LINUX_MTD_SFDP_H
#define __LINUX_MTD_SFDP_H
+/* SFDP revisions */
+#define SFDP_JESD216_MAJOR 1
+#define SFDP_JESD216_MINOR 0
+#define SFDP_JESD216A_MINOR 5
+#define SFDP_JESD216B_MINOR 6
+
/* Basic Flash Parameter Table */
/*
- * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
+ * JESD216 rev D defines a Basic Flash Parameter Table of 20 DWORDs.
* They are indexed from 1 but C arrays are indexed from 0.
*/
#define BFPT_DWORD(i) ((i) - 1)
-#define BFPT_DWORD_MAX 16
+#define BFPT_DWORD_MAX 20
struct sfdp_bfpt {
u32 dwords[BFPT_DWORD_MAX];
@@ -22,6 +28,7 @@ struct sfdp_bfpt {
/* The first version of JESD216 defined only 9 DWORDs. */
#define BFPT_DWORD_MAX_JESD216 9
+#define BFPT_DWORD_MAX_JESD216B 16
/* 1st DWORD. */
#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 6756202ace4b..e550cd5c9d3a 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -8,6 +8,27 @@
#include "core.h"
+static int
+s25fs_s_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /*
+ * The S25FS-S chip family reports 512-byte pages in BFPT but
+ * in reality the write buffer still wraps at the safe default
+ * of 256 bytes. Overwrite the page size advertised by BFPT
+ * to get the writes working.
+ */
+ params->page_size = 256;
+
+ return 0;
+}
+
+static struct spi_nor_fixups s25fs_s_fixups = {
+ .post_bfpt = s25fs_s_post_bfpt_fixups,
+};
+
static const struct flash_info spansion_parts[] = {
/* Spansion/Cypress -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
@@ -22,16 +43,27 @@ static const struct flash_info spansion_parts[] = {
{ "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
USE_CLSR) },
- { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
- { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512,
- SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- USE_CLSR) },
+ { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
{ "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | USE_CLSR) },
- { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
+ { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
+ .fixups = &s25fs_s_fixups, },
+ { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ USE_CLSR) },
+ { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512,
SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
USE_CLSR) },
+ { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR)
+ .fixups = &s25fs_s_fixups, },
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
@@ -70,6 +102,8 @@ static const struct flash_info spansion_parts[] = {
{ "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_4B_OPCODES) },
+ { "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1,
+ SPI_NOR_NO_ERASE) },
};
static void spansion_post_sfdp_fixups(struct spi_nor *nor)
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 17deabad57e1..5062af10f138 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -8,6 +8,31 @@
#include "core.h"
+static int
+w25q256_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /*
+ * W25Q256JV supports 4B opcodes but W25Q256FV does not.
+ * Unfortunately, Winbond has re-used the same JEDEC ID for both
+ * variants which prevents us from defining a new entry in the parts
+ * table.
+ * To differentiate between W25Q256JV and W25Q256FV check SFDP header
+ * version: only JV has JESD216A compliant structure (version 5).
+ */
+ if (bfpt_header->major == SFDP_JESD216_MAJOR &&
+ bfpt_header->minor == SFDP_JESD216A_MINOR)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ return 0;
+}
+
+static struct spi_nor_fixups w25q256_fixups = {
+ .post_bfpt = w25q256_post_bfpt_fixups,
+};
+
static const struct flash_info winbond_parts[] = {
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
{ "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
@@ -53,8 +78,8 @@ static const struct flash_info winbond_parts[] = {
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
- SPI_NOR_4B_OPCODES) },
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &w25q256_fixups },
{ "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 12c02342149c..e85b04e9716b 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -867,8 +867,11 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
* Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
* MLC NAND is different and needs special care, otherwise UBI or UBIFS
* will die soon and you will lose all your data.
+ * Relax this rule if the partition we're attaching to operates in SLC
+ * mode.
*/
- if (mtd->type == MTD_MLCNANDFLASH) {
+ if (mtd->type == MTD_MLCNANDFLASH &&
+ !(mtd->flags & MTD_SLC_ON_MLC_EMULATION)) {
pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
mtd->index);
return -EINVAL;
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index b486250923c5..83afc00e365a 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -116,6 +116,21 @@ void ubi_refill_pools(struct ubi_device *ubi)
wl_pool->size = 0;
pool->size = 0;
+ if (ubi->fm_anchor) {
+ wl_tree_add(ubi->fm_anchor, &ubi->free);
+ ubi->free_count++;
+ }
+ if (ubi->fm_next_anchor) {
+ wl_tree_add(ubi->fm_next_anchor, &ubi->free);
+ ubi->free_count++;
+ }
+
+ /* All available PEBs are in ubi->free, now is the time to get
+ * the best anchor PEBs.
+ */
+ ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
+ ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
+
for (;;) {
enough = 0;
if (pool->size < pool->max_size) {
@@ -271,26 +286,20 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
- struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
- /* Do we already have an anchor? */
- if (ubi->fm_anchor) {
- spin_unlock(&ubi->wl_lock);
- return 0;
- }
-
- /* See if we can find an anchor PEB on the list of free PEBs */
- anchor = ubi_wl_get_fm_peb(ubi, 1);
- if (anchor) {
- ubi->fm_anchor = anchor;
- spin_unlock(&ubi->wl_lock);
- return 0;
+ /* Do we have a next anchor? */
+ if (!ubi->fm_next_anchor) {
+ ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
+ if (!ubi->fm_next_anchor)
+ /* Tell wear leveling to produce a new anchor PEB */
+ ubi->fm_do_produce_anchor = 1;
}
- /* No luck, trigger wear leveling to produce a new anchor PEB */
- ubi->fm_do_produce_anchor = 1;
+ /* Do wear leveling to get a new anchor PEB or check the
+ * existing next anchor candidate.
+ */
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 53f448e7433a..022af59906aa 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1220,6 +1220,17 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
+ if (ubi->fm_next_anchor) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
+ set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
+
+ free_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 73c67e5c08f8..c2da77163f94 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -26,7 +26,7 @@
#include <linux/notifier.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/ubi.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "ubi-media.h"
@@ -491,7 +491,8 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
- * @fm_anchor: The next anchor PEB to use for fastmap
+ * @fm_anchor: The new anchor PEB used during fastmap update
+ * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
@@ -602,6 +603,7 @@ struct ubi_device {
int fm_work_scheduled;
int fast_attach;
struct ubi_wl_entry *fm_anchor;
+ struct ubi_wl_entry *fm_next_anchor;
int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 5146cce5fe32..27636063ed1b 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -687,20 +687,27 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
}
#ifdef CONFIG_MTD_UBI_FASTMAP
+ e1 = find_anchor_wl_entry(&ubi->used);
+ if (e1 && ubi->fm_next_anchor &&
+ (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ ubi->fm_do_produce_anchor = 1;
+ /* fm_next_anchor is no longer considered a good anchor
+ * candidate.
+ * NULL assignment also prevents multiple wear level checks
+ * of this PEB.
+ */
+ wl_tree_add(ubi->fm_next_anchor, &ubi->free);
+ ubi->fm_next_anchor = NULL;
+ ubi->free_count++;
+ }
+
if (ubi->fm_do_produce_anchor) {
- e1 = find_anchor_wl_entry(&ubi->used);
if (!e1)
goto out_cancel;
e2 = get_peb_for_wl(ubi);
if (!e2)
goto out_cancel;
- /*
- * Anchor move within the anchor area is useless.
- */
- if (e2->pnum < UBI_FM_MAX_START)
- goto out_cancel;
-
self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
@@ -1079,8 +1086,11 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
if (!err) {
spin_lock(&ubi->wl_lock);
- if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
- ubi->fm_anchor = e;
+ if (!ubi->fm_next_anchor && e->pnum < UBI_FM_MAX_START) {
+ /* Abort anchor production, if needed it will be
+ * enabled again in the wear leveling started below.
+ */
+ ubi->fm_next_anchor = e;
ubi->fm_do_produce_anchor = 0;
} else {
wl_tree_add(e, &ubi->free);
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 1538ad194cf4..1f2f2e8209c3 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -50,7 +50,7 @@ config CAIF_HSI
config CAIF_VIRTIO
tristate "CAIF virtio transport driver"
- depends on CAIF && HAS_DMA && VHOST_DPN
+ depends on CAIF && HAS_DMA
select VHOST_RING
select VIRTIO
select GENERIC_ALLOCATOR
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index cf6fa8fede33..521ebc072903 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1452,7 +1452,8 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
unsupported:
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- dev_err(ds->dev, "Unsupported interface: %d\n", state->interface);
+ dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
+ phy_modes(state->interface), port);
return;
}
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 9f4205b4439b..d2b5ab403e06 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1079,8 +1079,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (id != QCA8K_ID_QCA8337)
return -ENODEV;
- priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds),
- QCA8K_NUM_PORTS);
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a0af74c93971..dda4b8fc9525 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -355,7 +355,7 @@ error_unmap_dma:
ena_unmap_tx_buff(xdp_ring, tx_info);
tx_info->xdpf = NULL;
error_drop_packet:
-
+ __free_page(tx_info->xdp_rx_page);
return NETDEV_TX_OK;
}
@@ -1646,11 +1646,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
&next_to_clean);
if (unlikely(!skb)) {
- if (xdp_verdict == XDP_TX) {
+ if (xdp_verdict == XDP_TX)
ena_free_rx_page(rx_ring,
&rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
- res_budget--;
- }
for (i = 0; i < ena_rx_ctx.descs; i++) {
rx_ring->free_ids[next_to_clean] =
rx_ring->ena_bufs[i].req_id;
@@ -1658,8 +1656,10 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ENA_RX_RING_IDX_NEXT(next_to_clean,
rx_ring->ring_size);
}
- if (xdp_verdict == XDP_TX || xdp_verdict == XDP_DROP)
+ if (xdp_verdict != XDP_PASS) {
+ res_budget--;
continue;
+ }
break;
}
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 50fb66369415..ef512cf89abf 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -28,6 +28,7 @@
#include <linux/route.h>
#include <linux/string.h>
#include <linux/skbuff.h>
+#include <linux/pgtable.h>
#include <asm/irq.h>
/* Used for the temporal inet entries and routing */
#include <linux/socket.h>
@@ -35,7 +36,6 @@
#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_HP300
#include <asm/blinken.h>
#endif
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 1381a474063f..e10aceb2b767 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/pgtable.h>
/* Used for the temporal inet entries and routing */
#include <linux/socket.h>
#include <linux/route.h>
@@ -24,7 +25,6 @@
#include <linux/skbuff.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include "hplance.h"
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 72abd3f82249..3f2e4cdd0b83 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/gfp.h>
+#include <linux/pgtable.h>
/* Used for the temporal inet entries and routing */
#include <linux/socket.h>
#include <linux/route.h>
@@ -24,7 +25,6 @@
#include <linux/skbuff.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/mvme147hw.h>
/* We have 32K of RAM for the init block and buffers. This places
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index da7e3d4f4166..e1fde585fd0d 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -37,12 +37,12 @@ static const char version[] =
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/dvma.h>
#include <asm/idprom.h>
#include <asm/machines.h>
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index a21b2e60157e..ddece276ae23 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -94,10 +94,10 @@ static char lancestr[] = "LANCE";
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/gfp.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/pgtable.h>
#include <asm/byteorder.h> /* Used by the checksum routines */
#include <asm/idprom.h>
#include <asm/prom.h>
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 3e3711b60d01..1e4e402f07d7 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -24,11 +24,11 @@
#include <linux/bitrev.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
+#include <linux/pgtable.h>
#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/macio.h>
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index b8ba2abf5b3a..9e5006e59215 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -19,10 +19,10 @@
#include <linux/spinlock.h>
#include <linux/bitrev.h>
#include <linux/slab.h>
+#include <linux/pgtable.h>
#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/macio.h>
#include "mace.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 41315712deb8..828499256004 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3357,7 +3357,7 @@ static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
adap->sge.egr_sz, adap->sge.blocked_fl);
len += sprintf(buf + len, "\n");
size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
- kvfree(buf);
+ kfree(buf);
return size;
}
@@ -3374,12 +3374,12 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
if (err) {
- kvfree(t);
+ kfree(t);
return err;
}
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
- kvfree(t);
+ kfree(t);
return count;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 0307e9c69a47..08439e215efe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -663,6 +663,7 @@ static int uld_attach(struct adapter *adap, unsigned int uld)
return 0;
}
+#ifdef CONFIG_CHELSIO_TLS_DEVICE
static bool cxgb4_uld_in_use(struct adapter *adap)
{
const struct tid_info *t = &adap->tids;
@@ -670,7 +671,6 @@ static bool cxgb4_uld_in_use(struct adapter *adap)
return (atomic_read(&t->conns_in_use) || t->stids_in_use);
}
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
/* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
* @adap: adapter info
* @enable: 1 to enable / 0 to disable ktls settings.
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index ce85feaac357..b0d4b1984a70 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -40,9 +40,9 @@
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/of_net.h>
+#include <linux/pgtable.h>
#include <linux/vmalloc.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 6e64989f8478..b47490be872c 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -35,12 +35,12 @@
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/gfp.h>
+#include <linux/pgtable.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
#include <asm/cpm2.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 1582d82483ec..8b51ee142fa3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -32,8 +32,8 @@
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
#include <asm/mpc5xxx.h>
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 552e7554a9f8..db791f60b884 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -42,7 +42,6 @@
#include <soc/fsl/qe/ucc.h>
#include <soc/fsl/qe/ucc_fast.h>
#include <asm/machdep.h>
-#include <net/sch_generic.h>
#include "ucc_geth.h"
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index bef676d93339..fc8c7cd67471 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -53,10 +53,10 @@
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/dma.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
static char version[] __initdata =
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index f1d84921e42b..03e034918d14 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -53,10 +53,10 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
+#include <linux/pgtable.h>
#include <asm/bootinfo.h>
#include <asm/bitops.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 011cd26953d9..4cc9abd61c43 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -325,7 +325,7 @@
cache_line_size())
/* Driver assumes that the last 3 bits are 0 */
-#define MVNETA_SKB_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) & ~0x7)
+#define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
MVNETA_SKB_HEADROOM))
#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 17243bb5ba91..eb8cf60ecf12 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -31,8 +31,8 @@
#include <linux/types.h>
#include <linux/udp.h>
#include <linux/workqueue.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#define DRIVER_NAME "pxa168-eth"
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index c72c4e1ea383..3d9aa7da95e9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2345,8 +2345,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto out_free;
}
- dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
-
if (enable_4k_uar || !dev->persist->num_vfs) {
init_hca->log_uar_sz = ilog2(dev->caps.num_uars) +
PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 1a11bc0e1612..d2986f1f2db0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -966,189 +966,6 @@ void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
}
-static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
- int npages, u64 iova)
-{
- int i, page_mask;
-
- if (npages > fmr->max_pages)
- return -EINVAL;
-
- page_mask = (1 << fmr->page_shift) - 1;
-
- /* We are getting page lists, so va must be page aligned. */
- if (iova & page_mask)
- return -EINVAL;
-
- /* Trust the user not to pass misaligned data in page_list */
- if (0)
- for (i = 0; i < npages; ++i) {
- if (page_list[i] & ~page_mask)
- return -EINVAL;
- }
-
- if (fmr->maps >= fmr->max_maps)
- return -EINVAL;
-
- return 0;
-}
-
-int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
- int npages, u64 iova, u32 *lkey, u32 *rkey)
-{
- u32 key;
- int i, err;
-
- err = mlx4_check_fmr(fmr, page_list, npages, iova);
- if (err)
- return err;
-
- ++fmr->maps;
-
- key = key_to_hw_index(fmr->mr.key);
- key += dev->caps.num_mpts;
- *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
-
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
-
- /* Make sure MPT status is visible before writing MTT entries */
- wmb();
-
- dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle,
- npages * sizeof(u64), DMA_TO_DEVICE);
-
- for (i = 0; i < npages; ++i)
- fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
-
- dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle,
- npages * sizeof(u64), DMA_TO_DEVICE);
-
- fmr->mpt->key = cpu_to_be32(key);
- fmr->mpt->lkey = cpu_to_be32(key);
- fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
- fmr->mpt->start = cpu_to_be64(iova);
-
- /* Make MTT entries are visible before setting MPT status */
- wmb();
-
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
-
- /* Make sure MPT status is visible before consumer can use FMR */
- wmb();
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
-
-int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
- int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
- int err = -ENOMEM;
-
- if (max_maps > dev->caps.max_fmr_maps)
- return -EINVAL;
-
- if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
- return -EINVAL;
-
- /* All MTTs must fit in the same page */
- if (max_pages * sizeof(*fmr->mtts) > PAGE_SIZE)
- return -EINVAL;
-
- fmr->page_shift = page_shift;
- fmr->max_pages = max_pages;
- fmr->max_maps = max_maps;
- fmr->maps = 0;
-
- err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
- page_shift, &fmr->mr);
- if (err)
- return err;
-
- fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
- fmr->mr.mtt.offset,
- &fmr->dma_handle);
-
- if (!fmr->mtts) {
- err = -ENOMEM;
- goto err_free;
- }
-
- return 0;
-
-err_free:
- (void) mlx4_mr_free(dev, &fmr->mr);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
-
-int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
- int err;
-
- err = mlx4_mr_enable(dev, &fmr->mr);
- if (err)
- return err;
-
- fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
- key_to_hw_index(fmr->mr.key), NULL);
- if (!fmr->mpt)
- return -ENOMEM;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
-
-void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
- u32 *lkey, u32 *rkey)
-{
- if (!fmr->maps)
- return;
-
- /* To unmap: it is sufficient to take back ownership from HW */
- *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
-
- /* Make sure MPT status is visible */
- wmb();
-
- fmr->maps = 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
-
-int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
-{
- int ret;
-
- if (fmr->maps)
- return -EBUSY;
- if (fmr->mr.enabled == MLX4_MPT_EN_HW) {
- /* In case of FMR was enabled and unmapped
- * make sure to give ownership of MPT back to HW
- * so HW2SW_MPT command will success.
- */
- *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
- /* Make sure MPT status is visible before changing MPT fields */
- wmb();
- fmr->mpt->length = 0;
- fmr->mpt->start = 0;
- /* Make sure MPT data is visible after changing MPT status */
- wmb();
- *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
- /* make sure MPT status is visible */
- wmb();
- }
-
- ret = mlx4_mr_free(dev, &fmr->mr);
- if (ret)
- return ret;
- fmr->mr.enabled = MLX4_MPT_DISABLED;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_free);
-
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index ce0a6837daa3..05f8d5a92862 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -391,8 +391,7 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev,
int trip, enum thermal_trend *trend)
{
- struct mlxsw_thermal_module *tz = tzdev->devdata;
- struct mlxsw_thermal *thermal = tz->parent;
+ struct mlxsw_thermal *thermal = tzdev->devdata;
if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
return -EINVAL;
@@ -593,6 +592,22 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
return 0;
}
+static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev,
+ int trip, enum thermal_trend *trend)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+
+ if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+ return -EINVAL;
+
+ if (tzdev == thermal->tz_highest_dev)
+ return 1;
+
+ *trend = THERMAL_TREND_STABLE;
+ return 0;
+}
+
static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.bind = mlxsw_thermal_module_bind,
.unbind = mlxsw_thermal_module_unbind,
@@ -604,7 +619,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
- .get_trend = mlxsw_thermal_trend_get,
+ .get_trend = mlxsw_thermal_module_trend_get,
};
static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
@@ -643,7 +658,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
.set_trip_temp = mlxsw_thermal_module_trip_temp_set,
.get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
.set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
- .get_trend = mlxsw_thermal_trend_get,
+ .get_trend = mlxsw_thermal_module_trend_get,
};
static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 36624e3c633b..c5c5c688b7e2 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -985,7 +985,7 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
break;
case SPEED_1000:
data |= MAC_CR_CFG_H_;
- data |= MAC_CR_CFG_L_;
+ data &= ~MAC_CR_CFG_L_;
break;
}
lan743x_csr_write(adapter, MAC_CR, data);
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 8b018ed37b1b..ce3eca5d152b 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -36,9 +36,9 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/pgtable.h>
#include <asm/bootinfo.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/jazz.h>
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 1b5559aacb38..776b7d264dc3 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -51,8 +51,8 @@
#include <linux/dma-mapping.h>
#include <linux/bitrev.h>
#include <linux/slab.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/hwtest.h>
#include <asm/dma.h>
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index dda9ec7d9cee..afa166ff7aef 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -35,9 +35,9 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/dma.h>
static char xtsonic_string[] = "xtsonic";
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 66ed39d6f357..a49743d56b9c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -1014,6 +1014,7 @@ int qed_device_num_ports(struct qed_dev *cdev);
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info);
void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
+void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 83e798d4eebb..11367a248d55 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1949,6 +1949,15 @@ void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
op->link_update(cookie, &if_link);
}
+void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
+{
+ void *cookie = hwfn->cdev->ops_cookie;
+ struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+
+ if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
+ op->bw_update(cookie);
+}
+
static int qed_drain(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 98455f698f53..19c0c8864da1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -526,7 +526,6 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
dev->max_mw = 0;
- dev->max_fmr = QED_RDMA_MAX_FMR;
dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
dev->max_pkey = QED_RDMA_MAX_P_KEY;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 3898cae61e7a..1e69d5bb0a70 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -45,7 +45,6 @@
#include "qed_iwarp.h"
#include "qed_roce.h"
-#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
#define QED_RDMA_MAX_P_KEY (1)
#define QED_RDMA_MAX_WQE (0x7FFF)
#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 66876af814c4..20679fd4204b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -33,6 +33,7 @@
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
#include <linux/qed/qed_iov_if.h>
#include "qed_cxt.h"
#include "qed_hsi.h"
@@ -607,6 +608,9 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
int pos;
int rc;
+ if (is_kdump_kernel())
+ return 0;
+
if (IS_VF(p_hwfn->cdev))
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index aabeaf03135e..368e88565783 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -32,7 +32,6 @@
#ifndef _QED_SRIOV_H
#define _QED_SRIOV_H
-#include <linux/crash_dump.h>
#include <linux/types.h>
#include "qed_vf.h"
@@ -41,12 +40,9 @@
#define QED_VF_ARRAY_LENGTH (3)
#ifdef CONFIG_QED_SRIOV
-#define IS_VF(cdev) (is_kdump_kernel() ? \
- (0) : ((cdev)->b_is_vf))
-#define IS_PF(cdev) (is_kdump_kernel() ? \
- (1) : !((cdev)->b_is_vf))
-#define IS_PF_SRIOV(p_hwfn) (is_kdump_kernel() ? \
- (0) : !!((p_hwfn)->cdev->p_iov_info))
+#define IS_VF(cdev) ((cdev)->b_is_vf)
+#define IS_PF(cdev) (!((cdev)->b_is_vf))
+#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
#else
#define IS_VF(cdev) (0)
#define IS_PF(cdev) (1)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index b2d154258b07..756c05eb96f3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1265,7 +1265,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case QEDE_PRIVATE_VF:
if (debug & QED_LOG_VERBOSE_MASK)
dev_err(&pdev->dev, "Probing a VF\n");
- is_vf = is_kdump_kernel() ? false : true;
+ is_vf = true;
break;
default:
if (debug & QED_LOG_VERBOSE_MASK)
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 4d2ec9742cee..dad84ecf5a77 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -3928,7 +3928,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
netdev_reset_queue(tp->dev);
}
-static void rtl8169_hw_reset(struct rtl8169_private *tp)
+static void rtl8169_hw_reset(struct rtl8169_private *tp, bool going_down)
{
/* Give a racing hard_start_xmit a few cycles to complete. */
synchronize_rcu();
@@ -3938,6 +3938,9 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
rtl_rx_close(tp);
+ if (going_down && tp->dev->wol_enabled)
+ goto no_reset;
+
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
@@ -3959,7 +3962,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
}
rtl_hw_reset(tp);
-
+no_reset:
rtl8169_tx_clear(tp);
rtl8169_init_ring_indexes(tp);
}
@@ -3972,7 +3975,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
napi_disable(&tp->napi);
netif_stop_queue(dev);
- rtl8169_hw_reset(tp);
+ rtl8169_hw_reset(tp, false);
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i);
@@ -4637,7 +4640,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
phy_stop(tp->phydev);
napi_disable(&tp->napi);
- rtl8169_hw_reset(tp);
+ rtl8169_hw_reset(tp, true);
rtl_pll_power_down(tp);
@@ -4942,8 +4945,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr);
- rtl8169_hw_reset(tp);
-
if (system_state == SYSTEM_POWER_OFF) {
if (tp->saved_wolopts) {
rtl_wol_suspend_quirk(tp);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 5010af7dab4a..3c5df5eeed6c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -225,7 +225,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac)
- return PTR_ERR(dwmac);
+ return -ENOMEM;
plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
if (IS_ERR(plat_dat))
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index c5add0b45eed..34fdbc6d6031 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -5,6 +5,7 @@
*/
#include <linux/module.h>
+#include <linux/pgtable.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -34,7 +35,6 @@
#include <asm/io.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
-#include <asm/pgtable.h>
#include "sunbmac.h"
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index f0fe7bb2a750..54b53dbdb33c 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -52,7 +52,6 @@
#endif
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#ifdef CONFIG_PCI
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 2102b95ec347..577cd9753d8e 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -28,6 +28,7 @@
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/dma.h>
@@ -36,7 +37,6 @@
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include "sunqe.h"
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 310e6839c6e5..d9a5722f561b 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -382,8 +382,6 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
if (!descr->skb) {
descr->buf_addr = 0; /* tell DMAC don't touch memory */
- dev_info(ctodev(card),
- "%s:allocate skb failed !!\n", __func__);
return -ENOMEM;
}
descr->buf_size = cpu_to_be32(bufsize);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 6b461be1820b..75266580b586 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -987,9 +987,10 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (geneve->collect_md) {
info = skb_tunnel_info(skb);
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
- err = -EINVAL;
netdev_dbg(dev, "no tunnel metadata\n");
- goto tx_error;
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
} else {
info = &geneve->info;
@@ -1006,7 +1007,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!err))
return NETDEV_TX_OK;
-tx_error:
+
dev_kfree_skb(skb);
if (err == -ELOOP)
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 71cdef9fb56b..5ab53e9942f3 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1133,6 +1133,7 @@ static int __init yam_init_driver(void)
err = register_netdev(dev);
if (err) {
printk(KERN_WARNING "yam: cannot register net device %s\n", dev->name);
+ free_netdev(dev);
goto error;
}
yam_devs[i] = dev;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 4017ae1692d8..f3c04981b8da 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -488,7 +488,7 @@ static int dp83867_verify_rgmii_cfg(struct phy_device *phydev)
return 0;
}
-#ifdef CONFIG_OF_MDIO
+#if IS_ENABLED(CONFIG_OF_MDIO)
static int dp83867_of_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867 = phydev->priv;
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index cfb22a21a2e6..53ed3abc26c9 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -176,7 +176,7 @@ static int dp83869_set_strapped_mode(struct phy_device *phydev)
return 0;
}
-#ifdef CONFIG_OF_MDIO
+#if IS_ENABLED(CONFIG_OF_MDIO)
static int dp83869_of_init(struct phy_device *phydev)
{
struct dp83869_private *dp83869 = phydev->priv;
@@ -218,10 +218,13 @@ static int dp83869_of_init(struct phy_device *phydev)
ret = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_STRAP_STS1);
if (ret < 0)
return ret;
+
if (ret & DP83869_STRAP_MIRROR_ENABLED)
dp83869->port_mirroring = DP83869_PORT_MIRRORING_EN;
else
dp83869->port_mirroring = DP83869_PORT_MIRRORING_DIS;
+
+ ret = 0;
}
if (of_property_read_u32(of_node, "rx-fifo-depth",
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 4a3d34f40cb9..c4641b1704d6 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -19,7 +19,6 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
-#include <linux/seqlock.h>
#include <linux/idr.h>
#include <linux/netdevice.h>
#include <linux/linkmode.h>
@@ -34,7 +33,6 @@ struct fixed_mdio_bus {
struct fixed_phy {
int addr;
struct phy_device *phydev;
- seqcount_t seqcount;
struct fixed_phy_status status;
bool no_carrier;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
@@ -80,19 +78,17 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
list_for_each_entry(fp, &fmb->phys, node) {
if (fp->addr == phy_addr) {
struct fixed_phy_status state;
- int s;
-
- do {
- s = read_seqcount_begin(&fp->seqcount);
- fp->status.link = !fp->no_carrier;
- /* Issue callback if user registered it. */
- if (fp->link_update)
- fp->link_update(fp->phydev->attached_dev,
- &fp->status);
- /* Check the GPIO for change in status */
- fixed_phy_update(fp);
- state = fp->status;
- } while (read_seqcount_retry(&fp->seqcount, s));
+
+ fp->status.link = !fp->no_carrier;
+
+ /* Issue callback if user registered it. */
+ if (fp->link_update)
+ fp->link_update(fp->phydev->attached_dev,
+ &fp->status);
+
+ /* Check the GPIO for change in status */
+ fixed_phy_update(fp);
+ state = fp->status;
return swphy_read_reg(reg_num, &state);
}
@@ -150,8 +146,6 @@ static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
if (!fp)
return -ENOMEM;
- seqcount_init(&fp->seqcount);
-
if (irq != PHY_POLL)
fmb->mii_bus->irq[phy_addr] = irq;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 4ea226566cec..c9ecf3c8c3fd 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -429,7 +429,7 @@ static int m88e1101_config_aneg(struct phy_device *phydev)
return marvell_config_aneg(phydev);
}
-#ifdef CONFIG_OF_MDIO
+#if IS_ENABLED(CONFIG_OF_MDIO)
/* Set and/or override some configuration registers based on the
* marvell,reg-init property stored in the of_node for the phydev.
*
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 255fdfcc13a6..6ceee82b2839 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -764,6 +764,7 @@ EXPORT_SYMBOL(mdiobus_scan);
static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
{
+ preempt_disable();
u64_stats_update_begin(&stats->syncp);
u64_stats_inc(&stats->transfers);
@@ -778,6 +779,7 @@ static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
u64_stats_inc(&stats->writes);
out:
u64_stats_update_end(&stats->syncp);
+ preempt_enable();
}
/**
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index f828c917b9f7..fbcee5fce7b2 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -374,7 +374,7 @@ struct vsc8531_private {
#endif
};
-#ifdef CONFIG_OF_MDIO
+#if IS_ENABLED(CONFIG_OF_MDIO)
struct vsc8531_edge_rate_table {
u32 vddmac;
u32 slowdown[8];
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 7ed0285206d0..5ddc44f87eaf 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -98,7 +98,7 @@ static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
},
};
-#ifdef CONFIG_OF_MDIO
+#if IS_ENABLED(CONFIG_OF_MDIO)
static const struct vsc8531_edge_rate_table edge_table[] = {
{MSCC_VDDMAC_3300, { 0, 2, 4, 7, 10, 17, 29, 53} },
{MSCC_VDDMAC_2500, { 0, 3, 6, 10, 14, 23, 37, 63} },
@@ -382,7 +382,7 @@ out_unlock:
mutex_unlock(&phydev->lock);
}
-#ifdef CONFIG_OF_MDIO
+#if IS_ENABLED(CONFIG_OF_MDIO)
static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
{
u32 vdd, sd;
@@ -1396,7 +1396,7 @@ static int vsc8584_config_init(struct phy_device *phydev)
/* Disable SerDes for 100Base-FX */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
- PROC_CMD_FIBER_PORT(vsc8531->base_addr) |
+ PROC_CMD_FIBER_PORT(vsc8531->addr) |
PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_100BASE_FX);
@@ -1405,7 +1405,7 @@ static int vsc8584_config_init(struct phy_device *phydev)
/* Disable SerDes for 1000Base-X */
ret = vsc8584_cmd(phydev, PROC_CMD_FIBER_MEDIA_CONF |
- PROC_CMD_FIBER_PORT(vsc8531->base_addr) |
+ PROC_CMD_FIBER_PORT(vsc8531->addr) |
PROC_CMD_FIBER_DISABLE |
PROC_CMD_READ_MOD_WRITE_PORT |
PROC_CMD_RST_CONF_PORT | PROC_CMD_FIBER_1000BASE_X);
diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c
index 2581ab724c34..f8f75a504a58 100644
--- a/drivers/ntb/core.c
+++ b/drivers/ntb/core.c
@@ -214,10 +214,8 @@ int ntb_default_port_number(struct ntb_dev *ntb)
case NTB_TOPO_B2B_DSD:
return NTB_PORT_SEC_DSD;
default:
- break;
+ return 0;
}
-
- return -EINVAL;
}
EXPORT_SYMBOL(ntb_default_port_number);
@@ -240,10 +238,8 @@ int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx)
case NTB_TOPO_B2B_DSD:
return NTB_PORT_PRI_USD;
default:
- break;
+ return 0;
}
-
- return -EINVAL;
}
EXPORT_SYMBOL(ntb_default_peer_port_number);
@@ -315,4 +311,3 @@ static void __exit ntb_driver_exit(void)
bus_unregister(&ntb_bus);
}
module_exit(ntb_driver_exit);
-
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 9e310e1ad4d0..88e1db65be02 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -1191,10 +1191,6 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
}
- rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
- dma_get_mask(&pdev->dev));
- if (rc)
- goto err_dma_mask;
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index edae52384b8a..d54261f50851 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2660,12 +2660,6 @@ static int idt_init_pci(struct idt_ntb_dev *ndev)
dev_warn(&pdev->dev,
"Cannot set consistent DMA highmem bit mask\n");
}
- ret = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
- dma_get_mask(&pdev->dev));
- if (ret != 0) {
- dev_err(&pdev->dev, "Failed to set NTB device DMA bit mask\n");
- return ret;
- }
/*
* Enable the device advanced error reporting. It's not critical to
diff --git a/drivers/ntb/hw/intel/Makefile b/drivers/ntb/hw/intel/Makefile
index 60ec8a773eea..f80da0ba15b2 100644
--- a/drivers/ntb/hw/intel/Makefile
+++ b/drivers/ntb/hw/intel/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NTB_INTEL) += ntb_hw_intel.o
-ntb_hw_intel-y := ntb_hw_gen1.o ntb_hw_gen3.o
+ntb_hw_intel-y := ntb_hw_gen1.o ntb_hw_gen3.o ntb_hw_gen4.o
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index bb57ec239029..423f9b8fbbcf 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -60,6 +60,7 @@
#include "ntb_hw_intel.h"
#include "ntb_hw_gen1.h"
#include "ntb_hw_gen3.h"
+#include "ntb_hw_gen4.h"
#define NTB_NAME "ntb_hw_intel"
#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
@@ -762,6 +763,8 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
else if (pdev_is_gen3(ndev->ntb.pdev))
return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
+ else if (pdev_is_gen4(ndev->ntb.pdev))
+ return ndev_ntb4_debugfs_read(filp, ubuf, count, offp);
return -ENXIO;
}
@@ -1783,10 +1786,6 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
}
- rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
- dma_get_mask(&pdev->dev));
- if (rc)
- goto err_dma_mask;
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {
@@ -1858,16 +1857,15 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
int rc, node;
node = dev_to_node(&pdev->dev);
+ ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
+ if (!ndev) {
+ rc = -ENOMEM;
+ goto err_ndev;
+ }
- if (pdev_is_gen1(pdev)) {
- ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
- if (!ndev) {
- rc = -ENOMEM;
- goto err_ndev;
- }
-
- ndev_init_struct(ndev, pdev);
+ ndev_init_struct(ndev, pdev);
+ if (pdev_is_gen1(pdev)) {
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
goto err_init_pci;
@@ -1875,17 +1873,8 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
rc = xeon_init_dev(ndev);
if (rc)
goto err_init_dev;
-
} else if (pdev_is_gen3(pdev)) {
- ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
- if (!ndev) {
- rc = -ENOMEM;
- goto err_ndev;
- }
-
- ndev_init_struct(ndev, pdev);
ndev->ntb.ops = &intel_ntb3_ops;
-
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
goto err_init_pci;
@@ -1893,7 +1882,15 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
rc = gen3_init_dev(ndev);
if (rc)
goto err_init_dev;
+ } else if (pdev_is_gen4(pdev)) {
+ ndev->ntb.ops = &intel_ntb4_ops;
+ rc = intel_ntb_init_pci(ndev, pdev);
+ if (rc)
+ goto err_init_pci;
+ rc = gen4_init_dev(ndev);
+ if (rc)
+ goto err_init_dev;
} else {
rc = -EINVAL;
goto err_ndev;
@@ -1915,7 +1912,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
err_register:
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
@@ -1931,7 +1928,7 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
- if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
+ if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev) || pdev_is_gen4(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
@@ -2036,6 +2033,7 @@ static const struct file_operations intel_ntb_debugfs_info = {
};
static const struct pci_device_id intel_ntb_pci_tbl[] = {
+ /* GEN1 */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
@@ -2051,7 +2049,12 @@ static const struct pci_device_id intel_ntb_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
+
+ /* GEN3 */
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
+
+ /* GEN4 */
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)},
{0}
};
MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.h b/drivers/ntb/hw/intel/ntb_hw_gen1.h
index 544cf5c06f4d..1b759942d8af 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.h
@@ -140,6 +140,7 @@
#define NTB_HWERR_SB01BASE_LOCKUP BIT_ULL(1)
#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2)
#define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3)
+#define NTB_HWERR_BAR_ALIGN BIT_ULL(4)
extern struct intel_b2b_addr xeon_b2b_usd_addr;
extern struct intel_b2b_addr xeon_b2b_dsd_addr;
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c
index c3397160db7f..ffcfc3e02c35 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen3.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c
@@ -415,9 +415,8 @@ ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
return ret;
}
-static int intel_ntb3_link_enable(struct ntb_dev *ntb,
- enum ntb_speed max_speed,
- enum ntb_width max_width)
+int intel_ntb3_link_enable(struct ntb_dev *ntb, enum ntb_speed max_speed,
+ enum ntb_width max_width)
{
struct intel_ntb_dev *ndev;
u32 ntb_ctl;
@@ -532,7 +531,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return 0;
}
-static int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
+int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
resource_size_t *db_size,
u64 *db_data, int db_bit)
{
@@ -563,7 +562,7 @@ static int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
return 0;
}
-static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
int bit;
@@ -581,7 +580,7 @@ static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
return 0;
}
-static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
+u64 intel_ntb3_db_read(struct ntb_dev *ntb)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
@@ -590,7 +589,7 @@ static u64 intel_ntb3_db_read(struct ntb_dev *ntb)
ndev->self_reg->db_clear);
}
-static int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
+int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.h b/drivers/ntb/hw/intel/ntb_hw_gen3.h
index 75fb86ca27bb..2bc5d8356045 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen3.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen3.h
@@ -104,6 +104,14 @@ static inline void gen3_db_iowrite(u64 bits, void __iomem *mmio)
ssize_t ndev_ntb3_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp);
int gen3_init_dev(struct intel_ntb_dev *ndev);
+int intel_ntb3_link_enable(struct ntb_dev *ntb, enum ntb_speed max_speed,
+ enum ntb_width max_width);
+u64 intel_ntb3_db_read(struct ntb_dev *ntb);
+int intel_ntb3_db_clear(struct ntb_dev *ntb, u64 db_bits);
+int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits);
+int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
+ resource_size_t *db_size,
+ u64 *db_data, int db_bit);
extern const struct ntb_dev_ops intel_ntb3_ops;
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c
new file mode 100644
index 000000000000..bc4541cbf8c6
--- /dev/null
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/ntb.h>
+#include <linux/log2.h>
+
+#include "ntb_hw_intel.h"
+#include "ntb_hw_gen1.h"
+#include "ntb_hw_gen3.h"
+#include "ntb_hw_gen4.h"
+
+static int gen4_poll_link(struct intel_ntb_dev *ndev);
+static int gen4_link_is_up(struct intel_ntb_dev *ndev);
+
+static const struct intel_ntb_reg gen4_reg = {
+ .poll_link = gen4_poll_link,
+ .link_is_up = gen4_link_is_up,
+ .db_ioread = gen3_db_ioread,
+ .db_iowrite = gen3_db_iowrite,
+ .db_size = sizeof(u32),
+ .ntb_ctl = GEN4_NTBCNTL_OFFSET,
+ .mw_bar = {2, 4},
+};
+
+static const struct intel_ntb_alt_reg gen4_pri_reg = {
+ .db_clear = GEN4_IM_INT_STATUS_OFFSET,
+ .db_mask = GEN4_IM_INT_DISABLE_OFFSET,
+ .spad = GEN4_IM_SPAD_OFFSET,
+};
+
+static const struct intel_ntb_xlat_reg gen4_sec_xlat = {
+ .bar2_limit = GEN4_IM23XLMT_OFFSET,
+ .bar2_xlat = GEN4_IM23XBASE_OFFSET,
+ .bar2_idx = GEN4_IM23XBASEIDX_OFFSET,
+};
+
+static const struct intel_ntb_alt_reg gen4_b2b_reg = {
+ .db_bell = GEN4_IM_DOORBELL_OFFSET,
+ .spad = GEN4_EM_SPAD_OFFSET,
+};
+
+static int gen4_poll_link(struct intel_ntb_dev *ndev)
+{
+ u16 reg_val;
+
+ /*
+ * We need to write to DLLSCS bit in the SLOTSTS before we
+ * can clear the hardware link interrupt on ICX NTB.
+ */
+ iowrite16(GEN4_SLOTSTS_DLLSCS, ndev->self_mmio + GEN4_SLOTSTS);
+ ndev->reg->db_iowrite(ndev->db_link_mask,
+ ndev->self_mmio +
+ ndev->self_reg->db_clear);
+
+ reg_val = ioread16(ndev->self_mmio + GEN4_LINK_STATUS_OFFSET);
+ if (reg_val == ndev->lnk_sta)
+ return 0;
+
+ ndev->lnk_sta = reg_val;
+
+ return 1;
+}
+
+static int gen4_link_is_up(struct intel_ntb_dev *ndev)
+{
+ return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
+}
+
+static int gen4_init_isr(struct intel_ntb_dev *ndev)
+{
+ int i;
+
+ /*
+ * The MSIX vectors and the interrupt status bits are not lined up
+ * on Gen3 (Skylake) and Gen4. By default the link status bit is bit
+ * 32, however it is by default MSIX vector0. We need to fixup to
+ * line them up. The vectors at reset is 1-32,0. We need to reprogram
+ * to 0-32.
+ */
+ for (i = 0; i < GEN4_DB_MSIX_VECTOR_COUNT; i++)
+ iowrite8(i, ndev->self_mmio + GEN4_INTVEC_OFFSET + i);
+
+ return ndev_init_isr(ndev, GEN4_DB_MSIX_VECTOR_COUNT,
+ GEN4_DB_MSIX_VECTOR_COUNT,
+ GEN4_DB_MSIX_VECTOR_SHIFT,
+ GEN4_DB_TOTAL_SHIFT);
+}
+
+static int gen4_setup_b2b_mw(struct intel_ntb_dev *ndev,
+ const struct intel_b2b_addr *addr,
+ const struct intel_b2b_addr *peer_addr)
+{
+ struct pci_dev *pdev;
+ void __iomem *mmio;
+ phys_addr_t bar_addr;
+
+ pdev = ndev->ntb.pdev;
+ mmio = ndev->self_mmio;
+
+ /* setup incoming bar limits == base addrs (zero length windows) */
+ bar_addr = addr->bar2_addr64;
+ iowrite64(bar_addr, mmio + GEN4_IM23XLMT_OFFSET);
+ bar_addr = ioread64(mmio + GEN4_IM23XLMT_OFFSET);
+ dev_dbg(&pdev->dev, "IM23XLMT %#018llx\n", bar_addr);
+
+ bar_addr = addr->bar4_addr64;
+ iowrite64(bar_addr, mmio + GEN4_IM45XLMT_OFFSET);
+ bar_addr = ioread64(mmio + GEN4_IM45XLMT_OFFSET);
+ dev_dbg(&pdev->dev, "IM45XLMT %#018llx\n", bar_addr);
+
+ /* zero incoming translation addrs */
+ iowrite64(0, mmio + GEN4_IM23XBASE_OFFSET);
+ iowrite64(0, mmio + GEN4_IM45XBASE_OFFSET);
+
+ ndev->peer_mmio = ndev->self_mmio;
+
+ return 0;
+}
+
+static int gen4_init_ntb(struct intel_ntb_dev *ndev)
+{
+ int rc;
+
+
+ ndev->mw_count = XEON_MW_COUNT;
+ ndev->spad_count = GEN4_SPAD_COUNT;
+ ndev->db_count = GEN4_DB_COUNT;
+ ndev->db_link_mask = GEN4_DB_LINK_BIT;
+
+ ndev->self_reg = &gen4_pri_reg;
+ ndev->xlat_reg = &gen4_sec_xlat;
+ ndev->peer_reg = &gen4_b2b_reg;
+
+ if (ndev->ntb.topo == NTB_TOPO_B2B_USD)
+ rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_dsd_addr,
+ &xeon_b2b_usd_addr);
+ else
+ rc = gen4_setup_b2b_mw(ndev, &xeon_b2b_usd_addr,
+ &xeon_b2b_dsd_addr);
+ if (rc)
+ return rc;
+
+ ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
+
+ ndev->reg->db_iowrite(ndev->db_valid_mask,
+ ndev->self_mmio +
+ ndev->self_reg->db_mask);
+
+ return 0;
+}
+
+static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
+{
+ switch (ppd & GEN4_PPD_TOPO_MASK) {
+ case GEN4_PPD_TOPO_B2B_USD:
+ return NTB_TOPO_B2B_USD;
+ case GEN4_PPD_TOPO_B2B_DSD:
+ return NTB_TOPO_B2B_DSD;
+ }
+
+ return NTB_TOPO_NONE;
+}
+
+int gen4_init_dev(struct intel_ntb_dev *ndev)
+{
+ struct pci_dev *pdev = ndev->ntb.pdev;
+ u32 ppd1/*, ppd0*/;
+ u16 lnkctl;
+ int rc;
+
+ ndev->reg = &gen4_reg;
+
+ if (pdev_is_ICX(pdev))
+ ndev->hwerr_flags |= NTB_HWERR_BAR_ALIGN;
+
+ ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
+ ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
+ dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
+ ntb_topo_string(ndev->ntb.topo));
+ if (ndev->ntb.topo == NTB_TOPO_NONE)
+ return -EINVAL;
+
+ rc = gen4_init_ntb(ndev);
+ if (rc)
+ return rc;
+
+ /* init link setup */
+ lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+ lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
+ iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+
+ return gen4_init_isr(ndev);
+}
+
+ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct intel_ntb_dev *ndev;
+ void __iomem *mmio;
+ char *buf;
+ size_t buf_size;
+ ssize_t ret, off;
+ union { u64 v64; u32 v32; u16 v16; } u;
+
+ ndev = filp->private_data;
+ mmio = ndev->self_mmio;
+
+ buf_size = min(count, 0x800ul);
+
+ buf = kmalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ off = 0;
+
+ off += scnprintf(buf + off, buf_size - off,
+ "NTB Device Information:\n");
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Connection Topology -\t%s\n",
+ ntb_topo_string(ndev->ntb.topo));
+
+ off += scnprintf(buf + off, buf_size - off,
+ "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
+ off += scnprintf(buf + off, buf_size - off,
+ "LNK STA (cached) -\t\t%#06x\n", ndev->lnk_sta);
+
+ if (!ndev->reg->link_is_up(ndev))
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Status -\t\tDown\n");
+ else {
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Status -\t\tUp\n");
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Speed -\t\tPCI-E Gen %u\n",
+ NTB_LNK_STA_SPEED(ndev->lnk_sta));
+ off += scnprintf(buf + off, buf_size - off,
+ "Link Width -\t\tx%u\n",
+ NTB_LNK_STA_WIDTH(ndev->lnk_sta));
+ }
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Memory Window Count -\t%u\n", ndev->mw_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Scratchpad Count -\t%u\n", ndev->spad_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Count -\t%u\n", ndev->db_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
+
+ u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
+ off += scnprintf(buf + off, buf_size - off,
+ "Doorbell Mask -\t\t%#llx\n", u.v64);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Incoming XLAT:\n");
+
+ u.v64 = ioread64(mmio + GEN4_IM23XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IM23XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + GEN4_IM45XBASE_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IM45XBASE -\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + GEN4_IM23XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IM23XLMT -\t\t\t%#018llx\n", u.v64);
+
+ u.v64 = ioread64(mmio + GEN4_IM45XLMT_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "IM45XLMT -\t\t\t%#018llx\n", u.v64);
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Statistics:\n");
+
+ off += scnprintf(buf + off, buf_size - off,
+ "\nNTB Hardware Errors:\n");
+
+ if (!pci_read_config_word(ndev->ntb.pdev,
+ GEN4_DEVSTS_OFFSET, &u.v16))
+ off += scnprintf(buf + off, buf_size - off,
+ "DEVSTS -\t\t%#06x\n", u.v16);
+
+ u.v16 = ioread16(mmio + GEN4_LINK_STATUS_OFFSET);
+ off += scnprintf(buf + off, buf_size - off,
+ "LNKSTS -\t\t%#06x\n", u.v16);
+
+ if (!pci_read_config_dword(ndev->ntb.pdev,
+ GEN4_UNCERRSTS_OFFSET, &u.v32))
+ off += scnprintf(buf + off, buf_size - off,
+ "UNCERRSTS -\t\t%#06x\n", u.v32);
+
+ if (!pci_read_config_dword(ndev->ntb.pdev,
+ GEN4_CORERRSTS_OFFSET, &u.v32))
+ off += scnprintf(buf + off, buf_size - off,
+ "CORERRSTS -\t\t%#06x\n", u.v32);
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
+ kfree(buf);
+ return ret;
+}
+
+static int intel_ntb4_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ unsigned long xlat_reg, limit_reg, idx_reg;
+ unsigned short base_idx, reg_val16;
+ resource_size_t bar_size, mw_size;
+ void __iomem *mmio;
+ u64 base, limit, reg_val;
+ int bar;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+ idx += 1;
+
+ bar = ndev_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ bar_size = pci_resource_len(ndev->ntb.pdev, bar);
+
+ if (idx == ndev->b2b_idx)
+ mw_size = bar_size - ndev->b2b_off;
+ else
+ mw_size = bar_size;
+
+ if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) {
+ /* hardware requires that addr is aligned to bar size */
+ if (addr & (bar_size - 1))
+ return -EINVAL;
+ } else {
+ if (addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+ }
+
+ /* make sure the range fits in the usable mw size */
+ if (size > mw_size)
+ return -EINVAL;
+
+ mmio = ndev->self_mmio;
+ xlat_reg = ndev->xlat_reg->bar2_xlat + (idx * 0x10);
+ limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10);
+ base = pci_resource_start(ndev->ntb.pdev, bar);
+
+ /* Set the limit if supported, if size is not mw_size */
+ if (limit_reg && size != mw_size) {
+ limit = base + size;
+ base_idx = __ilog2_u64(size);
+ } else {
+ limit = base + mw_size;
+ base_idx = __ilog2_u64(mw_size);
+ }
+
+
+ /* set and verify setting the translation address */
+ iowrite64(addr, mmio + xlat_reg);
+ reg_val = ioread64(mmio + xlat_reg);
+ if (reg_val != addr) {
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ dev_dbg(&ntb->pdev->dev, "BAR %d IMXBASE: %#Lx\n", bar, reg_val);
+
+ /* set and verify setting the limit */
+ iowrite64(limit, mmio + limit_reg);
+ reg_val = ioread64(mmio + limit_reg);
+ if (reg_val != limit) {
+ iowrite64(base, mmio + limit_reg);
+ iowrite64(0, mmio + xlat_reg);
+ return -EIO;
+ }
+
+ dev_dbg(&ntb->pdev->dev, "BAR %d IMXLMT: %#Lx\n", bar, reg_val);
+
+ if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN) {
+ idx_reg = ndev->xlat_reg->bar2_idx + (idx * 0x2);
+ iowrite16(base_idx, mmio + idx_reg);
+ reg_val16 = ioread16(mmio + idx_reg);
+ if (reg_val16 != base_idx) {
+ iowrite64(base, mmio + limit_reg);
+ iowrite64(0, mmio + xlat_reg);
+ iowrite16(0, mmio + idx_reg);
+ return -EIO;
+ }
+ dev_dbg(&ntb->pdev->dev, "BAR %d IMBASEIDX: %#x\n", bar, reg_val16);
+ }
+
+
+ return 0;
+}
+
+static int intel_ntb4_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed, enum ntb_width max_width)
+{
+ struct intel_ntb_dev *ndev;
+ u32 ntb_ctl, ppd0;
+ u16 lnkctl;
+
+ ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+ dev_dbg(&ntb->pdev->dev,
+ "Enabling link with max_speed %d max_width %d\n",
+ max_speed, max_width);
+
+ if (max_speed != NTB_SPEED_AUTO)
+ dev_dbg(&ntb->pdev->dev,
+ "ignoring max_speed %d\n", max_speed);
+ if (max_width != NTB_WIDTH_AUTO)
+ dev_dbg(&ntb->pdev->dev,
+ "ignoring max_width %d\n", max_width);
+
+ ntb_ctl = NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP;
+ ntb_ctl |= NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP;
+ iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+ lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+ lnkctl &= ~GEN4_LINK_CTRL_LINK_DISABLE;
+ iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+
+ /* start link training in PPD0 */
+ ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
+ ppd0 |= GEN4_PPD_LINKTRN;
+ iowrite32(ppd0, ndev->self_mmio + GEN4_PPD0_OFFSET);
+
+ /* make sure link training has started */
+ ppd0 = ioread32(ndev->self_mmio + GEN4_PPD0_OFFSET);
+ if (!(ppd0 & GEN4_PPD_LINKTRN)) {
+ dev_warn(&ntb->pdev->dev, "Link is not training\n");
+ return -ENXIO;
+ }
+
+ ndev->dev_up = 1;
+
+ return 0;
+}
+
+static int intel_ntb4_link_disable(struct ntb_dev *ntb)
+{
+ struct intel_ntb_dev *ndev;
+ u32 ntb_cntl;
+ u16 lnkctl;
+
+ ndev = container_of(ntb, struct intel_ntb_dev, ntb);
+
+ dev_dbg(&ntb->pdev->dev, "Disabling link\n");
+
+ /* clear the snoop bits */
+ ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
+ ntb_cntl &= ~(NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP);
+ ntb_cntl &= ~(NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP);
+ iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
+
+ lnkctl = ioread16(ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+ lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
+ iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+
+ ndev->dev_up = 0;
+
+ return 0;
+}
+
+static int intel_ntb4_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
+ resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+ resource_size_t bar_size, mw_size;
+ int bar;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ if (idx >= ndev->b2b_idx && !ndev->b2b_off)
+ idx += 1;
+
+ bar = ndev_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
+
+ bar_size = pci_resource_len(ndev->ntb.pdev, bar);
+
+ if (idx == ndev->b2b_idx)
+ mw_size = bar_size - ndev->b2b_off;
+ else
+ mw_size = bar_size;
+
+ if (addr_align) {
+ if (ndev->hwerr_flags & NTB_HWERR_BAR_ALIGN)
+ *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
+ else
+ *addr_align = PAGE_SIZE;
+ }
+
+ if (size_align)
+ *size_align = 1;
+
+ if (size_max)
+ *size_max = mw_size;
+
+ return 0;
+}
+
+const struct ntb_dev_ops intel_ntb4_ops = {
+ .mw_count = intel_ntb_mw_count,
+ .mw_get_align = intel_ntb4_mw_get_align,
+ .mw_set_trans = intel_ntb4_mw_set_trans,
+ .peer_mw_count = intel_ntb_peer_mw_count,
+ .peer_mw_get_addr = intel_ntb_peer_mw_get_addr,
+ .link_is_up = intel_ntb_link_is_up,
+ .link_enable = intel_ntb4_link_enable,
+ .link_disable = intel_ntb4_link_disable,
+ .db_valid_mask = intel_ntb_db_valid_mask,
+ .db_vector_count = intel_ntb_db_vector_count,
+ .db_vector_mask = intel_ntb_db_vector_mask,
+ .db_read = intel_ntb3_db_read,
+ .db_clear = intel_ntb3_db_clear,
+ .db_set_mask = intel_ntb_db_set_mask,
+ .db_clear_mask = intel_ntb_db_clear_mask,
+ .peer_db_addr = intel_ntb3_peer_db_addr,
+ .peer_db_set = intel_ntb3_peer_db_set,
+ .spad_is_unsafe = intel_ntb_spad_is_unsafe,
+ .spad_count = intel_ntb_spad_count,
+ .spad_read = intel_ntb_spad_read,
+ .spad_write = intel_ntb_spad_write,
+ .peer_spad_addr = intel_ntb_peer_spad_addr,
+ .peer_spad_read = intel_ntb_peer_spad_read,
+ .peer_spad_write = intel_ntb_peer_spad_write,
+};
+
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h
new file mode 100644
index 000000000000..a868c788de02
--- /dev/null
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#ifndef _NTB_INTEL_GEN4_H_
+#define _NTB_INTEL_GEN4_H_
+
+#include "ntb_hw_intel.h"
+
+/* Supported PCI device revision range for ICX */
+#define PCI_DEVICE_REVISION_ICX_MIN 0x2
+#define PCI_DEVICE_REVISION_ICX_MAX 0xF
+
+/* Intel Gen4 NTB hardware */
+/* PCIe config space */
+#define GEN4_IMBAR23SZ_OFFSET 0x00c4
+#define GEN4_IMBAR45SZ_OFFSET 0x00c5
+#define GEN4_EMBAR23SZ_OFFSET 0x00c6
+#define GEN4_EMBAR45SZ_OFFSET 0x00c7
+#define GEN4_DEVCTRL_OFFSET 0x0048
+#define GEN4_DEVSTS_OFFSET 0x004a
+#define GEN4_UNCERRSTS_OFFSET 0x0104
+#define GEN4_CORERRSTS_OFFSET 0x0110
+
+/* BAR0 MMIO */
+#define GEN4_NTBCNTL_OFFSET 0x0000
+#define GEN4_IM23XBASE_OFFSET 0x0010 /* IMBAR1XBASE */
+#define GEN4_IM23XLMT_OFFSET 0x0018 /* IMBAR1XLMT */
+#define GEN4_IM45XBASE_OFFSET 0x0020 /* IMBAR2XBASE */
+#define GEN4_IM45XLMT_OFFSET 0x0028 /* IMBAR2XLMT */
+#define GEN4_IM_INT_STATUS_OFFSET 0x0040
+#define GEN4_IM_INT_DISABLE_OFFSET 0x0048
+#define GEN4_INTVEC_OFFSET 0x0050 /* 0-32 vecs */
+#define GEN4_IM23XBASEIDX_OFFSET 0x0074
+#define GEN4_IM45XBASEIDX_OFFSET 0x0076
+#define GEN4_IM_SPAD_OFFSET 0x0080 /* 0-15 SPADs */
+#define GEN4_IM_SPAD_SEM_OFFSET 0x00c0 /* SPAD hw semaphore */
+#define GEN4_IM_SPAD_STICKY_OFFSET 0x00c4 /* sticky SPAD */
+#define GEN4_IM_DOORBELL_OFFSET 0x0100 /* 0-31 doorbells */
+#define GEN4_EM_SPAD_OFFSET 0x8080
+/* note, link status is now in MMIO and not config space for NTB */
+#define GEN4_LINK_CTRL_OFFSET 0xb050
+#define GEN4_LINK_STATUS_OFFSET 0xb052
+#define GEN4_PPD0_OFFSET 0xb0d4
+#define GEN4_PPD1_OFFSET 0xb4c0
+#define GEN4_LTSSMSTATEJMP 0xf040
+
+#define GEN4_PPD_CLEAR_TRN 0x0001
+#define GEN4_PPD_LINKTRN 0x0008
+#define GEN4_PPD_CONN_MASK 0x0300
+#define GEN4_PPD_CONN_B2B 0x0200
+#define GEN4_PPD_DEV_MASK 0x1000
+#define GEN4_PPD_DEV_DSD 0x1000
+#define GEN4_PPD_DEV_USD 0x0000
+#define GEN4_LINK_CTRL_LINK_DISABLE 0x0010
+
+#define GEN4_SLOTSTS 0xb05a
+#define GEN4_SLOTSTS_DLLSCS 0x100
+
+#define GEN4_PPD_TOPO_MASK (GEN4_PPD_CONN_MASK | GEN4_PPD_DEV_MASK)
+#define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD)
+#define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD)
+
+#define GEN4_DB_COUNT 32
+#define GEN4_DB_LINK 32
+#define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK)
+#define GEN4_DB_MSIX_VECTOR_COUNT 33
+#define GEN4_DB_MSIX_VECTOR_SHIFT 1
+#define GEN4_DB_TOTAL_SHIFT 33
+#define GEN4_SPAD_COUNT 16
+
+#define NTB_CTL_E2I_BAR23_SNOOP 0x000004
+#define NTB_CTL_E2I_BAR23_NOSNOOP 0x000008
+#define NTB_CTL_I2E_BAR23_SNOOP 0x000010
+#define NTB_CTL_I2E_BAR23_NOSNOOP 0x000020
+#define NTB_CTL_E2I_BAR45_SNOOP 0x000040
+#define NTB_CTL_E2I_BAR45_NOSNOO 0x000080
+#define NTB_CTL_I2E_BAR45_SNOOP 0x000100
+#define NTB_CTL_I2E_BAR45_NOSNOOP 0x000200
+#define NTB_CTL_BUSNO_DIS_INC 0x000400
+#define NTB_CTL_LINK_DOWN 0x010000
+
+#define NTB_SJC_FORCEDETECT 0x000004
+
+ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp);
+int gen4_init_dev(struct intel_ntb_dev *ndev);
+ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp);
+
+extern const struct ntb_dev_ops intel_ntb4_ops;
+
+static inline int pdev_is_ICX(struct pci_dev *pdev)
+{
+ if (pdev_is_gen4(pdev) &&
+ pdev->revision >= PCI_DEVICE_REVISION_ICX_MIN &&
+ pdev->revision <= PCI_DEVICE_REVISION_ICX_MAX)
+ return 1;
+ return 0;
+}
+
+#endif
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h
index e071e28bca3f..d61fcd91714b 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.h
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.h
@@ -72,6 +72,7 @@
#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_SKX 0x201C
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_ICX 0x347e
/* Ntb control and link status */
#define NTB_CTL_CFG_LOCK BIT(0)
@@ -120,6 +121,7 @@ struct intel_ntb_xlat_reg {
unsigned long bar0_base;
unsigned long bar2_xlat;
unsigned long bar2_limit;
+ unsigned short bar2_idx;
};
struct intel_b2b_addr {
@@ -182,6 +184,9 @@ struct intel_ntb_dev {
struct dentry *debugfs_dir;
struct dentry *debugfs_info;
+
+ /* gen4 entries */
+ int dev_up;
};
#define ntb_ndev(__ntb) container_of(__ntb, struct intel_ntb_dev, ntb)
@@ -219,4 +224,11 @@ static inline int pdev_is_gen3(struct pci_dev *pdev)
return 0;
}
+static inline int pdev_is_gen4(struct pci_dev *pdev)
+{
+ if (pdev->device == PCI_DEVICE_ID_INTEL_NTB_B2B_ICX)
+ return 1;
+
+ return 0;
+}
#endif
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 972f6d984f6d..89df1350fefd 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -101,8 +101,8 @@ MODULE_DESCRIPTION("PCIe NTB Performance Measurement Tool");
#define DMA_MDELAY 10
#define MSG_TRIES 1000
-#define MSG_UDELAY_LOW 1000
-#define MSG_UDELAY_HIGH 2000
+#define MSG_UDELAY_LOW 1000000
+#define MSG_UDELAY_HIGH 2000000
#define PERF_BUF_LEN 1024
@@ -159,6 +159,8 @@ struct perf_peer {
/* NTB connection setup service */
struct work_struct service;
unsigned long sts;
+
+ struct completion init_comp;
};
#define to_peer_service(__work) \
container_of(__work, struct perf_peer, service)
@@ -547,6 +549,7 @@ static int perf_setup_outbuf(struct perf_peer *peer)
/* Initialization is finally done */
set_bit(PERF_STS_DONE, &peer->sts);
+ complete_all(&peer->init_comp);
return 0;
}
@@ -557,7 +560,7 @@ static void perf_free_inbuf(struct perf_peer *peer)
return;
(void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
- dma_free_coherent(&peer->perf->ntb->dev, peer->inbuf_size,
+ dma_free_coherent(&peer->perf->ntb->pdev->dev, peer->inbuf_size,
peer->inbuf, peer->inbuf_xlat);
peer->inbuf = NULL;
}
@@ -586,8 +589,9 @@ static int perf_setup_inbuf(struct perf_peer *peer)
perf_free_inbuf(peer);
- peer->inbuf = dma_alloc_coherent(&perf->ntb->dev, peer->inbuf_size,
- &peer->inbuf_xlat, GFP_KERNEL);
+ peer->inbuf = dma_alloc_coherent(&perf->ntb->pdev->dev,
+ peer->inbuf_size, &peer->inbuf_xlat,
+ GFP_KERNEL);
if (!peer->inbuf) {
dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n",
&peer->inbuf_size);
@@ -637,6 +641,7 @@ static void perf_service_work(struct work_struct *work)
perf_setup_outbuf(peer);
if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) {
+ init_completion(&peer->init_comp);
clear_bit(PERF_STS_DONE, &peer->sts);
if (test_bit(0, &peer->perf->busy_flag) &&
peer == peer->perf->test_peer) {
@@ -653,7 +658,7 @@ static int perf_init_service(struct perf_ctx *perf)
{
u64 mask;
- if (ntb_peer_mw_count(perf->ntb) < perf->pcnt + 1) {
+ if (ntb_peer_mw_count(perf->ntb) < perf->pcnt) {
dev_err(&perf->ntb->dev, "Not enough memory windows\n");
return -EINVAL;
}
@@ -803,7 +808,7 @@ static int perf_copy_chunk(struct perf_thread *pthr,
dst_vaddr = dst;
dst_dma_addr = peer->dma_dst_addr + (dst_vaddr - vbase);
- unmap = dmaengine_get_unmap_data(dma_dev, 2, GFP_NOWAIT);
+ unmap = dmaengine_get_unmap_data(dma_dev, 1, GFP_NOWAIT);
if (!unmap)
return -ENOMEM;
@@ -816,15 +821,8 @@ static int perf_copy_chunk(struct perf_thread *pthr,
}
unmap->to_cnt = 1;
- unmap->addr[1] = dst_dma_addr;
- if (dma_mapping_error(dma_dev, unmap->addr[1])) {
- ret = -EIO;
- goto err_free_resource;
- }
- unmap->from_cnt = 1;
-
do {
- tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, unmap->addr[1],
+ tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, dst_dma_addr,
unmap->addr[0], len, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx)
msleep(DMA_MDELAY);
@@ -1010,8 +1008,8 @@ static void perf_clear_test(struct perf_thread *pthr)
pthr->perf->test_peer->dma_dst_addr,
pthr->perf->test_peer->outbuf_size,
DMA_FROM_DEVICE, 0);
- if (pthr->dma_chan)
- dma_release_channel(pthr->dma_chan);
+
+ dma_release_channel(pthr->dma_chan);
no_dma_notify:
atomic_dec(&perf->tsync);
@@ -1083,8 +1081,9 @@ static int perf_submit_test(struct perf_peer *peer)
struct perf_thread *pthr;
int tidx, ret;
- if (!test_bit(PERF_STS_DONE, &peer->sts))
- return -ENOLINK;
+ ret = wait_for_completion_interruptible(&peer->init_comp);
+ if (ret < 0)
+ return ret;
if (test_and_set_bit_lock(0, &perf->busy_flag))
return -EBUSY;
@@ -1455,10 +1454,21 @@ static int perf_init_peers(struct perf_ctx *perf)
peer->gidx = pidx;
}
INIT_WORK(&peer->service, perf_service_work);
+ init_completion(&peer->init_comp);
}
if (perf->gidx == -1)
perf->gidx = pidx;
+ /*
+ * Hardware with only two ports may not have unique port
+ * numbers. In this case, the gidxs should all be zero.
+ */
+ if (perf->pcnt == 1 && ntb_port_number(perf->ntb) == 0 &&
+ ntb_peer_port_number(perf->ntb, 0) == 0) {
+ perf->gidx = 0;
+ perf->peers[0].gidx = 0;
+ }
+
for (pidx = 0; pidx < perf->pcnt; pidx++) {
ret = perf_setup_peer_mw(&perf->peers[pidx]);
if (ret)
@@ -1554,4 +1564,3 @@ static void __exit perf_exit(void)
destroy_workqueue(perf_wq);
}
module_exit(perf_exit);
-
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 04dd46647db3..2164e8492772 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -121,15 +121,14 @@ static int pp_find_next_peer(struct pp_ctx *pp)
link = ntb_link_is_up(pp->ntb, NULL, NULL);
/* Find next available peer */
- if (link & pp->nmask) {
+ if (link & pp->nmask)
pidx = __ffs64(link & pp->nmask);
- out_db = BIT_ULL(pidx + 1);
- } else if (link & pp->pmask) {
+ else if (link & pp->pmask)
pidx = __ffs64(link & pp->pmask);
- out_db = BIT_ULL(pidx);
- } else {
+ else
return -ENODEV;
- }
+
+ out_db = BIT_ULL(ntb_peer_port_number(pp->ntb, pidx));
spin_lock(&pp->lock);
pp->out_pidx = pidx;
@@ -303,7 +302,7 @@ static void pp_init_flds(struct pp_ctx *pp)
break;
}
- pp->in_db = BIT_ULL(pidx);
+ pp->in_db = BIT_ULL(lport);
pp->pmask = GENMASK_ULL(pidx, 0) >> 1;
pp->nmask = GENMASK_ULL(pcnt - 1, pidx);
@@ -432,4 +431,3 @@ static void __exit pp_exit(void)
debugfs_remove_recursive(pp_dbgfs_topdir);
}
module_exit(pp_exit);
-
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index 69da758fe64c..b7bf3f863d79 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -504,7 +504,7 @@ static ssize_t tool_peer_link_read(struct file *filep, char __user *ubuf,
buf[1] = '\n';
buf[2] = '\0';
- return simple_read_from_buffer(ubuf, size, offp, buf, 3);
+ return simple_read_from_buffer(ubuf, size, offp, buf, 2);
}
static TOOL_FOPS_RDWR(tool_peer_link_fops,
@@ -590,7 +590,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx,
inmw->size = min_t(resource_size_t, req_size, size);
inmw->size = round_up(inmw->size, addr_align);
inmw->size = round_up(inmw->size, size_align);
- inmw->mm_base = dma_alloc_coherent(&tc->ntb->dev, inmw->size,
+ inmw->mm_base = dma_alloc_coherent(&tc->ntb->pdev->dev, inmw->size,
&inmw->dma_base, GFP_KERNEL);
if (!inmw->mm_base)
return -ENOMEM;
@@ -612,7 +612,7 @@ static int tool_setup_mw(struct tool_ctx *tc, int pidx, int widx,
return 0;
err_free_dma:
- dma_free_coherent(&tc->ntb->dev, inmw->size, inmw->mm_base,
+ dma_free_coherent(&tc->ntb->pdev->dev, inmw->size, inmw->mm_base,
inmw->dma_base);
inmw->mm_base = NULL;
inmw->dma_base = 0;
@@ -629,7 +629,7 @@ static void tool_free_mw(struct tool_ctx *tc, int pidx, int widx)
if (inmw->mm_base != NULL) {
ntb_mw_clear_trans(tc->ntb, pidx, widx);
- dma_free_coherent(&tc->ntb->dev, inmw->size,
+ dma_free_coherent(&tc->ntb->pdev->dev, inmw->size,
inmw->mm_base, inmw->dma_base);
}
@@ -1690,4 +1690,3 @@ static void __exit tool_exit(void)
debugfs_remove_recursive(tool_dbgfs_topdir);
}
module_exit(tool_exit);
-
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 97f948f8f4e6..d1ecd6da11a2 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -7,7 +7,6 @@
* Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
*/
-#include <asm/cacheflush.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/init.h>
@@ -25,6 +24,8 @@
#include <linux/dax.h>
#include <linux/nd.h>
#include <linux/backing-dev.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
#include "pmem.h"
#include "pfn.h"
#include "nd.h"
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0585efa47d8f..c2c5bc4fb702 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3669,7 +3669,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->disk = disk;
if (__nvme_revalidate_disk(disk, id))
- goto out_free_disk;
+ goto out_put_disk;
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
ret = nvme_nvm_register(ns, disk_name, node);
@@ -3696,8 +3696,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
/* prevent double queue cleanup */
ns->disk->queue = NULL;
put_disk(ns->disk);
- out_free_disk:
- del_gendisk(ns->disk);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index cb0007592c12..e999a8c4b7e8 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2634,10 +2634,11 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
- if (!(op->flags & FCOP_FLAGS_AEN))
+ if (!(op->flags & FCOP_FLAGS_AEN)) {
nvme_fc_unmap_data(ctrl, op->rq, op);
+ nvme_cleanup_cmd(op->rq);
+ }
- nvme_cleanup_cmd(op->rq);
nvme_fc_ctrl_put(ctrl);
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index fa5c75501049..c0f4226d3299 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -599,8 +599,7 @@ static inline void nvme_trace_bio_complete(struct request *req,
struct nvme_ns *ns = req->q->queuedata;
if (req->cmd_flags & REQ_NVME_MPATH)
- trace_block_bio_complete(ns->head->disk->queue,
- req->bio, status);
+ trace_block_bio_complete(ns->head->disk->queue, req->bio);
}
extern struct device_attribute dev_attr_ana_grpid;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d690d5593a80..e2bacd369a88 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2950,9 +2950,15 @@ static int nvme_suspend(struct device *dev)
* the PCI bus layer to put it into D3 in order to take the PCIe link
* down, so as to allow the platform to achieve its minimum low-power
* state (which may not be possible if the link is up).
+ *
+ * If a host memory buffer is enabled, shut down the device as the NVMe
+ * specification allows the device to access the host memory buffer in
+ * host DRAM from all power states, but hosts will fail access to DRAM
+ * during S3.
*/
if (pm_suspend_via_firmware() || !ctrl->npss ||
!pcie_aspm_enabled(pdev) ||
+ ndev->nr_host_mem_descs ||
(ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
return nvme_disable_prepare_reset(ndev, true);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 1843110ec34f..3345ec7efaff 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -131,8 +131,8 @@ struct nvme_tcp_ctrl {
static LIST_HEAD(nvme_tcp_ctrl_list);
static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
static struct workqueue_struct *nvme_tcp_wq;
-static struct blk_mq_ops nvme_tcp_mq_ops;
-static struct blk_mq_ops nvme_tcp_admin_mq_ops;
+static const struct blk_mq_ops nvme_tcp_mq_ops;
+static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
@@ -2301,7 +2301,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
return queue->nr_cqe;
}
-static struct blk_mq_ops nvme_tcp_mq_ops = {
+static const struct blk_mq_ops nvme_tcp_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
.complete = nvme_complete_rq,
.init_request = nvme_tcp_init_request,
@@ -2312,7 +2312,7 @@ static struct blk_mq_ops nvme_tcp_mq_ops = {
.poll = nvme_tcp_poll,
};
-static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
+static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
.complete = nvme_complete_rq,
.init_request = nvme_tcp_init_request,
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6392bcd30bd7..6e2f623e472e 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -129,7 +129,22 @@ static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
}
-static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
+static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
+{
+ u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ struct nvmet_req *req;
+
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds) {
+ req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
+ mutex_unlock(&ctrl->lock);
+ nvmet_req_complete(req, status);
+ mutex_lock(&ctrl->lock);
+ }
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
{
struct nvmet_async_event *aen;
struct nvmet_req *req;
@@ -139,15 +154,14 @@ static void nvmet_async_events_process(struct nvmet_ctrl *ctrl, u16 status)
aen = list_first_entry(&ctrl->async_events,
struct nvmet_async_event, entry);
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
- if (status == 0)
- nvmet_set_result(req, nvmet_async_event_result(aen));
+ nvmet_set_result(req, nvmet_async_event_result(aen));
list_del(&aen->entry);
kfree(aen);
mutex_unlock(&ctrl->lock);
trace_nvmet_async_event(ctrl, req->cqe->result.u32);
- nvmet_req_complete(req, status);
+ nvmet_req_complete(req, 0);
mutex_lock(&ctrl->lock);
}
mutex_unlock(&ctrl->lock);
@@ -170,7 +184,7 @@ static void nvmet_async_event_work(struct work_struct *work)
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, async_event_work);
- nvmet_async_events_process(ctrl, 0);
+ nvmet_async_events_process(ctrl);
}
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
@@ -779,7 +793,6 @@ static void nvmet_confirm_sq(struct percpu_ref *ref)
void nvmet_sq_destroy(struct nvmet_sq *sq)
{
- u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
struct nvmet_ctrl *ctrl = sq->ctrl;
/*
@@ -787,7 +800,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
* queue doesn't have outstanding requests on it.
*/
if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
- nvmet_async_events_process(ctrl, status);
+ nvmet_async_events_failall(ctrl);
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index d5141780592e..76ea23a2c2be 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -20,6 +20,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <rdma/rw.h>
+#include <rdma/ib_cm.h>
#include <linux/nvme-rdma.h>
#include "nvmet.h"
@@ -1403,7 +1404,8 @@ static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
rej.sts = cpu_to_le16(status);
- return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
+ return rdma_reject(cm_id, (void *)&rej, sizeof(rej),
+ IB_CM_REJ_CONSUMER_DEFINED);
}
static struct nvmet_rdma_queue *
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 1669177cd26c..de9217cfd22d 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -153,7 +153,7 @@ static LIST_HEAD(nvmet_tcp_queue_list);
static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
static struct workqueue_struct *nvmet_tcp_wq;
-static struct nvmet_fabrics_ops nvmet_tcp_ops;
+static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
@@ -1713,7 +1713,7 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
}
}
-static struct nvmet_fabrics_ops nvmet_tcp_ops = {
+static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_TCP,
.msdbd = 1,
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 05c6ae4b0b97..927eb5f6003f 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -66,6 +66,30 @@ static LIST_HEAD(nvmem_lookup_list);
static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
+static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ if (nvmem->reg_read)
+ return nvmem->reg_read(nvmem->priv, offset, val, bytes);
+
+ return -EINVAL;
+}
+
+static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ int ret;
+
+ if (nvmem->reg_write) {
+ gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
+ ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
+ gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
#ifdef CONFIG_NVMEM_SYSFS
static const char * const nvmem_type_str[] = {
[NVMEM_TYPE_UNKNOWN] = "Unknown",
@@ -122,7 +146,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
if (!nvmem->reg_read)
return -EPERM;
- rc = nvmem->reg_read(nvmem->priv, pos, buf, count);
+ rc = nvmem_reg_read(nvmem, pos, buf, count);
if (rc)
return rc;
@@ -159,7 +183,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
if (!nvmem->reg_write)
return -EPERM;
- rc = nvmem->reg_write(nvmem->priv, pos, buf, count);
+ rc = nvmem_reg_write(nvmem, pos, buf, count);
if (rc)
return rc;
@@ -167,11 +191,8 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
return count;
}
-static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *attr, int i)
+static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct nvmem_device *nvmem = to_nvmem_device(dev);
umode_t mode = 0400;
if (!nvmem->root_only)
@@ -189,6 +210,15 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
return mode;
}
+static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+ return nvmem_bin_attr_get_umode(nvmem);
+}
+
/* default read/write permissions */
static struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
@@ -215,34 +245,14 @@ static const struct attribute_group *nvmem_dev_groups[] = {
NULL,
};
-/* read only permission */
-static struct bin_attribute bin_attr_ro_nvmem = {
+static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
.attr = {
- .name = "nvmem",
- .mode = 0444,
- },
- .read = bin_attr_nvmem_read,
-};
-
-/* default read/write permissions, root only */
-static struct bin_attribute bin_attr_rw_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0600,
+ .name = "eeprom",
},
.read = bin_attr_nvmem_read,
.write = bin_attr_nvmem_write,
};
-/* read only permission, root only */
-static struct bin_attribute bin_attr_ro_root_nvmem = {
- .attr = {
- .name = "nvmem",
- .mode = 0400,
- },
- .read = bin_attr_nvmem_read,
-};
-
/*
* nvmem_setup_compat() - Create an additional binary entry in
* drivers sys directory, to be backwards compatible with the older
@@ -259,18 +269,8 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
if (!config->base_dev)
return -EINVAL;
- if (nvmem->read_only) {
- if (config->root_only)
- nvmem->eeprom = bin_attr_ro_root_nvmem;
- else
- nvmem->eeprom = bin_attr_ro_nvmem;
- } else {
- if (config->root_only)
- nvmem->eeprom = bin_attr_rw_root_nvmem;
- else
- nvmem->eeprom = bin_attr_rw_nvmem;
- }
- nvmem->eeprom.attr.name = "eeprom";
+ nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
+ nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
nvmem->eeprom.size = nvmem->size;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
nvmem->eeprom.attr.key = &eeprom_lock_key;
@@ -311,30 +311,6 @@ static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
#endif /* CONFIG_NVMEM_SYSFS */
-static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
- void *val, size_t bytes)
-{
- if (nvmem->reg_read)
- return nvmem->reg_read(nvmem->priv, offset, val, bytes);
-
- return -EINVAL;
-}
-
-static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
- void *val, size_t bytes)
-{
- int ret;
-
- if (nvmem->reg_write) {
- gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
- ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
- gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
- return ret;
- }
-
- return -EINVAL;
-}
-
static void nvmem_release(struct device *dev)
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 50bea2aadc1b..7a1ebd6fd08b 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -196,7 +196,6 @@ static int imx_ocotp_read(void *context, unsigned int offset,
if (*(buf - 1) == IMX_OCOTP_READ_LOCKED_VAL)
imx_ocotp_clr_err_if_set(priv);
}
- ret = 0;
read_end:
clk_disable_unprepare(priv->clk);
@@ -435,17 +434,13 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
priv->base + IMX_OCOTP_ADDR_CTRL_SET);
ret = imx_ocotp_wait_for_busy(priv,
priv->params->ctrl.bm_rel_shadows);
- if (ret < 0) {
+ if (ret < 0)
dev_err(priv->dev, "timeout during shadow register reload\n");
- goto write_end;
- }
write_end:
clk_disable_unprepare(priv->clk);
mutex_unlock(&ocotp_mutex);
- if (ret < 0)
- return ret;
- return bytes;
+ return ret < 0 ? ret : bytes;
}
static struct nvmem_config imx_ocotp_nvmem_config = {
diff --git a/drivers/nvmem/jz4780-efuse.c b/drivers/nvmem/jz4780-efuse.c
index 512e1872ba36..0b01b840edd9 100644
--- a/drivers/nvmem/jz4780-efuse.c
+++ b/drivers/nvmem/jz4780-efuse.c
@@ -211,10 +211,8 @@ static int jz4780_efuse_probe(struct platform_device *pdev)
cfg.priv = efuse;
nvmem = devm_nvmem_register(dev, &cfg);
- if (IS_ERR(nvmem))
- return PTR_ERR(nvmem);
- return 0;
+ return PTR_ERR_OR_ZERO(nvmem);
}
static const struct of_device_id jz4780_efuse_match[] = {
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index d057f1bfb2e9..8a91717600be 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -27,25 +27,11 @@ static int qfprom_reg_read(void *context,
return 0;
}
-static int qfprom_reg_write(void *context,
- unsigned int reg, void *_val, size_t bytes)
-{
- struct qfprom_priv *priv = context;
- u8 *val = _val;
- int i = 0, words = bytes;
-
- while (words--)
- writeb(*val++, priv->base + reg + i++);
-
- return 0;
-}
-
static struct nvmem_config econfig = {
.name = "qfprom",
.stride = 1,
.word_size = 1,
.reg_read = qfprom_reg_read,
- .reg_write = qfprom_reg_write,
};
static int qfprom_probe(struct platform_device *pdev)
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index 5893543918c8..e28d7b133e11 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -16,8 +16,6 @@ struct zynqmp_nvmem_data {
struct nvmem_device *nvmem;
};
-static const struct zynqmp_eemi_ops *eemi_ops;
-
static int zynqmp_nvmem_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
@@ -25,10 +23,7 @@ static int zynqmp_nvmem_read(void *context, unsigned int offset,
int idcode, version;
struct zynqmp_nvmem_data *priv = context;
- if (!eemi_ops->get_chipid)
- return -ENXIO;
-
- ret = eemi_ops->get_chipid(&idcode, &version);
+ ret = zynqmp_pm_get_chipid(&idcode, &version);
if (ret < 0)
return ret;
@@ -61,10 +56,6 @@ static int zynqmp_nvmem_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
priv->dev = dev;
econfig.dev = dev;
econfig.reg_read = zynqmp_nvmem_read;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 08fd823edac9..fe64430b438a 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -286,7 +286,6 @@ int of_detach_node(struct device_node *np)
{
struct of_reconfig_data rd;
unsigned long flags;
- int rc = 0;
memset(&rd, 0, sizeof(rd));
rd.dn = np;
@@ -301,7 +300,7 @@ int of_detach_node(struct device_node *np)
of_reconfig_notify(OF_RECONFIG_DETACH_NODE, &rd);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_GPL(of_detach_node);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 2cdf64d2456f..4602e467ca8b 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -471,7 +471,7 @@ void *initial_boot_params __ro_after_init;
static u32 of_fdt_crc32;
/**
- * res_mem_reserve_reg() - reserve all memory described in 'reg' property
+ * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
*/
static int __init __reserved_mem_reserve_reg(unsigned long node,
const char *uname)
@@ -643,8 +643,6 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
offset = fdt_next_node(blob, offset, &depth)) {
pathp = fdt_get_name(blob, offset, NULL);
- if (*pathp == '/')
- pathp = kbasename(pathp);
rc = it(offset, pathp, depth, data);
}
return rc;
@@ -671,8 +669,6 @@ int __init of_scan_flat_dt_subnodes(unsigned long parent,
int rc;
pathp = fdt_get_name(blob, node, NULL);
- if (*pathp == '/')
- pathp = kbasename(pathp);
rc = it(node, pathp, data);
if (rc)
return rc;
@@ -1078,7 +1074,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
#endif
#endif /* CONFIG_CMDLINE */
- pr_debug("Command line is: %s\n", (char*)data);
+ pr_debug("Command line is: %s\n", (char *)data);
rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
if (rng_seed && l > 0) {
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
index c72eef988041..a32e60b024b8 100644
--- a/drivers/of/kobj.c
+++ b/drivers/of/kobj.c
@@ -134,8 +134,6 @@ int __of_attach_node_sysfs(struct device_node *np)
if (!name)
return -ENOMEM;
- of_node_get(np);
-
rc = kobject_add(&np->kobj, parent, "%s", name);
kfree(name);
if (rc)
@@ -144,6 +142,7 @@ int __of_attach_node_sysfs(struct device_node *np)
for_each_property_of_node(np, pp)
__of_add_property_sysfs(np, pp);
+ of_node_get(np);
return 0;
}
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 1a84bc0d5fa8..6877080c8af9 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -46,7 +46,7 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
}
/**
- * res_mem_save_node() - save fdt node for second pass initialization
+ * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
*/
void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
phys_addr_t base, phys_addr_t size)
@@ -68,8 +68,8 @@ void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
}
/**
- * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align'
- * and 'alloc-ranges' properties
+ * __reserved_mem_alloc_size() - allocate reserved memory described by
+ * 'size', 'align' and 'alloc-ranges' properties.
*/
static int __init __reserved_mem_alloc_size(unsigned long node,
const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
@@ -165,7 +165,7 @@ static const struct of_device_id __rmem_of_table_sentinel
__used __section(__reservedmem_of_table_end);
/**
- * res_mem_init_node() - call region specific reserved memory init code
+ * __reserved_mem_init_node() - call region specific reserved memory init code
*/
static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
{
@@ -232,7 +232,7 @@ static void __init __rmem_check_for_overlap(void)
}
/**
- * fdt_init_reserved_mem - allocate and init all saved reserved memory regions
+ * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions
*/
void __init fdt_init_reserved_mem(void)
{
@@ -358,6 +358,25 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
/**
+ * of_reserved_mem_device_init_by_name() - assign named reserved memory region
+ * to given device
+ * @dev: pointer to the device to configure
+ * @np: pointer to the device node with 'memory-region' property
+ * @name: name of the selected memory region
+ *
+ * Returns: 0 on success or a negative error-code on failure.
+ */
+int of_reserved_mem_device_init_by_name(struct device *dev,
+ struct device_node *np,
+ const char *name)
+{
+ int idx = of_property_match_string(np, "memory-region-names", name);
+
+ return of_reserved_mem_device_init_by_idx(dev, np, idx);
+}
+EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
+
+/**
* of_reserved_mem_device_release() - release reserved memory device structures
* @dev: Pointer to the device to deconfigure
*
@@ -366,24 +385,22 @@ EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
*/
void of_reserved_mem_device_release(struct device *dev)
{
- struct rmem_assigned_device *rd;
- struct reserved_mem *rmem = NULL;
+ struct rmem_assigned_device *rd, *tmp;
+ LIST_HEAD(release_list);
mutex_lock(&of_rmem_assigned_device_mutex);
- list_for_each_entry(rd, &of_rmem_assigned_device_list, list) {
- if (rd->dev == dev) {
- rmem = rd->rmem;
- list_del(&rd->list);
- kfree(rd);
- break;
- }
+ list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
+ if (rd->dev == dev)
+ list_move_tail(&rd->list, &release_list);
}
mutex_unlock(&of_rmem_assigned_device_mutex);
- if (!rmem || !rmem->ops || !rmem->ops->device_release)
- return;
+ list_for_each_entry_safe(rd, tmp, &release_list, list) {
+ if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
+ rd->rmem->ops->device_release(rd->rmem, dev);
- rmem->ops->device_release(rmem, dev);
+ kfree(rd);
+ }
}
EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 3371e4a06248..071f04da32c8 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -291,7 +291,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
#endif /* CONFIG_ARM_AMBA */
/**
- * of_devname_lookup() - Given a device node, lookup the preferred Linux name
+ * of_dev_lookup() - Given a device node, lookup the preferred Linux name
*/
static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *lookup,
struct device_node *np)
@@ -538,7 +538,9 @@ static int __init of_platform_default_populate_init(void)
}
/* Populate everything else. */
+ fw_devlink_pause();
of_platform_default_populate(NULL, NULL, NULL);
+ fw_devlink_resume();
return 0;
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index b4916dcc9e72..1f2086f4e7ce 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1045,8 +1045,20 @@ static int of_link_to_phandle(struct device *dev, struct device_node *sup_np,
* Find the device node that contains the supplier phandle. It may be
* @sup_np or it may be an ancestor of @sup_np.
*/
- while (sup_np && !of_find_property(sup_np, "compatible", NULL))
+ while (sup_np) {
+
+ /* Don't allow linking to a disabled supplier */
+ if (!of_device_is_available(sup_np)) {
+ of_node_put(sup_np);
+ sup_np = NULL;
+ }
+
+ if (of_find_property(sup_np, "compatible", NULL))
+ break;
+
sup_np = of_get_next_parent(sup_np);
+ }
+
if (!sup_np) {
dev_dbg(dev, "Not linking to %pOFP - No device\n", tmp_np);
return -ENODEV;
@@ -1074,7 +1086,7 @@ static int of_link_to_phandle(struct device *dev, struct device_node *sup_np,
return -EAGAIN;
}
if (!device_link_add(dev, sup_dev, dl_flags))
- ret = -EAGAIN;
+ ret = -EINVAL;
put_device(sup_dev);
return ret;
}
@@ -1206,6 +1218,7 @@ DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
+DEFINE_SIMPLE_PROP(extcon, "extcon", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
@@ -1230,6 +1243,7 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_dmas, },
{ .parse_prop = parse_power_domains, },
{ .parse_prop = parse_hwlocks, },
+ { .parse_prop = parse_extcon, },
{ .parse_prop = parse_regulators, },
{ .parse_prop = parse_gpio, },
{ .parse_prop = parse_gpios, },
@@ -1296,7 +1310,7 @@ static int of_link_to_suppliers(struct device *dev,
if (of_link_property(dev, con_np, p->name))
ret = -ENODEV;
- for_each_child_of_node(con_np, child)
+ for_each_available_child_of_node(con_np, child)
if (of_link_to_suppliers(dev, child) && !ret)
ret = -EAGAIN;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index e4f01e7771a2..dfbd3d10410c 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -664,7 +664,7 @@ static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
return ret;
}
-static int _generic_set_opp_regulator(const struct opp_table *opp_table,
+static int _generic_set_opp_regulator(struct opp_table *opp_table,
struct device *dev,
unsigned long old_freq,
unsigned long freq,
@@ -699,6 +699,18 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
goto restore_freq;
}
+ /*
+ * Enable the regulator after setting its voltages, otherwise it breaks
+ * some boot-enabled regulators.
+ */
+ if (unlikely(!opp_table->regulator_enabled)) {
+ ret = regulator_enable(reg);
+ if (ret < 0)
+ dev_warn(dev, "Failed to enable regulator: %d", ret);
+ else
+ opp_table->regulator_enabled = true;
+ }
+
return 0;
restore_freq:
@@ -713,6 +725,34 @@ restore_voltage:
return ret;
}
+static int _set_opp_bw(const struct opp_table *opp_table,
+ struct dev_pm_opp *opp, struct device *dev, bool remove)
+{
+ u32 avg, peak;
+ int i, ret;
+
+ if (!opp_table->paths)
+ return 0;
+
+ for (i = 0; i < opp_table->path_count; i++) {
+ if (remove) {
+ avg = 0;
+ peak = 0;
+ } else {
+ avg = opp->bandwidth[i].avg;
+ peak = opp->bandwidth[i].peak;
+ }
+ ret = icc_set_bw(opp_table->paths[i], avg, peak);
+ if (ret) {
+ dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",
+ remove ? "remove" : "set", i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int _set_opp_custom(const struct opp_table *opp_table,
struct device *dev, unsigned long old_freq,
unsigned long freq,
@@ -817,15 +857,31 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
}
if (unlikely(!target_freq)) {
- if (opp_table->required_opp_tables) {
- ret = _set_required_opps(dev, opp_table, NULL);
- } else if (!_get_opp_count(opp_table)) {
+ /*
+ * Some drivers need to support cases where some platforms may
+ * have OPP table for the device, while others don't and
+ * opp_set_rate() just needs to behave like clk_set_rate().
+ */
+ if (!_get_opp_count(opp_table))
return 0;
- } else {
+
+ if (!opp_table->required_opp_tables && !opp_table->regulators &&
+ !opp_table->paths) {
dev_err(dev, "target frequency can't be 0\n");
ret = -EINVAL;
+ goto put_opp_table;
+ }
+
+ ret = _set_opp_bw(opp_table, NULL, dev, true);
+ if (ret)
+ return ret;
+
+ if (opp_table->regulator_enabled) {
+ regulator_disable(opp_table->regulators[0]);
+ opp_table->regulator_enabled = false;
}
+ ret = _set_required_opps(dev, opp_table, NULL);
goto put_opp_table;
}
@@ -909,6 +965,9 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
dev_err(dev, "Failed to set required opps: %d\n", ret);
}
+ if (!ret)
+ ret = _set_opp_bw(opp_table, opp, dev, false);
+
put_opp:
dev_pm_opp_put(opp);
put_old_opp:
@@ -999,6 +1058,12 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
ret);
}
+ /* Find interconnect path(s) for the device */
+ ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
+ if (ret)
+ dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
+ __func__, ret);
+
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
INIT_LIST_HEAD(&opp_table->opp_list);
kref_init(&opp_table->kref);
@@ -1057,6 +1122,7 @@ static void _opp_table_kref_release(struct kref *kref)
{
struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
struct opp_device *opp_dev, *temp;
+ int i;
_of_clear_opp_table(opp_table);
@@ -1064,6 +1130,12 @@ static void _opp_table_kref_release(struct kref *kref)
if (!IS_ERR(opp_table->clk))
clk_put(opp_table->clk);
+ if (opp_table->paths) {
+ for (i = 0; i < opp_table->path_count; i++)
+ icc_put(opp_table->paths[i]);
+ kfree(opp_table->paths);
+ }
+
WARN_ON(!list_empty(&opp_table->opp_list));
list_for_each_entry_safe(opp_dev, temp, &opp_table->dev_list, node) {
@@ -1243,19 +1315,23 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
struct dev_pm_opp *_opp_allocate(struct opp_table *table)
{
struct dev_pm_opp *opp;
- int count, supply_size;
+ int supply_count, supply_size, icc_size;
/* Allocate space for at least one supply */
- count = table->regulator_count > 0 ? table->regulator_count : 1;
- supply_size = sizeof(*opp->supplies) * count;
+ supply_count = table->regulator_count > 0 ? table->regulator_count : 1;
+ supply_size = sizeof(*opp->supplies) * supply_count;
+ icc_size = sizeof(*opp->bandwidth) * table->path_count;
/* allocate new OPP node and supplies structures */
- opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
+ opp = kzalloc(sizeof(*opp) + supply_size + icc_size, GFP_KERNEL);
+
if (!opp)
return NULL;
/* Put the supplies at the end of the OPP structure as an empty array */
opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
+ if (icc_size)
+ opp->bandwidth = (struct dev_pm_opp_icc_bw *)(opp->supplies + supply_count);
INIT_LIST_HEAD(&opp->node);
return opp;
@@ -1286,11 +1362,24 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
return true;
}
+int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2)
+{
+ if (opp1->rate != opp2->rate)
+ return opp1->rate < opp2->rate ? -1 : 1;
+ if (opp1->bandwidth && opp2->bandwidth &&
+ opp1->bandwidth[0].peak != opp2->bandwidth[0].peak)
+ return opp1->bandwidth[0].peak < opp2->bandwidth[0].peak ? -1 : 1;
+ if (opp1->level != opp2->level)
+ return opp1->level < opp2->level ? -1 : 1;
+ return 0;
+}
+
static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
struct opp_table *opp_table,
struct list_head **head)
{
struct dev_pm_opp *opp;
+ int opp_cmp;
/*
* Insert new OPP in order of increasing frequency and discard if
@@ -1301,12 +1390,13 @@ static int _opp_is_duplicate(struct device *dev, struct dev_pm_opp *new_opp,
* loop.
*/
list_for_each_entry(opp, &opp_table->opp_list, node) {
- if (new_opp->rate > opp->rate) {
+ opp_cmp = _opp_compare_key(new_opp, opp);
+ if (opp_cmp > 0) {
*head = &opp->node;
continue;
}
- if (new_opp->rate < opp->rate)
+ if (opp_cmp < 0)
return 0;
/* Duplicate OPPs */
@@ -1670,6 +1760,13 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
/* Make sure there are no concurrent readers while updating opp_table */
WARN_ON(!list_empty(&opp_table->opp_list));
+ if (opp_table->regulator_enabled) {
+ for (i = opp_table->regulator_count - 1; i >= 0; i--)
+ regulator_disable(opp_table->regulators[i]);
+
+ opp_table->regulator_enabled = false;
+ }
+
for (i = opp_table->regulator_count - 1; i >= 0; i--)
regulator_put(opp_table->regulators[i]);
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 609665e339b6..596c185b5dda 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -32,6 +32,47 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
debugfs_remove_recursive(opp->dentry);
}
+static ssize_t bw_name_read(struct file *fp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct icc_path *path = fp->private_data;
+ char buf[64];
+ int i;
+
+ i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path));
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, i);
+}
+
+static const struct file_operations bw_name_fops = {
+ .open = simple_open,
+ .read = bw_name_read,
+ .llseek = default_llseek,
+};
+
+static void opp_debug_create_bw(struct dev_pm_opp *opp,
+ struct opp_table *opp_table,
+ struct dentry *pdentry)
+{
+ struct dentry *d;
+ char name[11];
+ int i;
+
+ for (i = 0; i < opp_table->path_count; i++) {
+ snprintf(name, sizeof(name), "icc-path-%.1d", i);
+
+ /* Create per-path directory */
+ d = debugfs_create_dir(name, pdentry);
+
+ debugfs_create_file("name", S_IRUGO, d, opp_table->paths[i],
+ &bw_name_fops);
+ debugfs_create_u32("peak_bw", S_IRUGO, d,
+ &opp->bandwidth[i].peak);
+ debugfs_create_u32("avg_bw", S_IRUGO, d,
+ &opp->bandwidth[i].avg);
+ }
+}
+
static void opp_debug_create_supplies(struct dev_pm_opp *opp,
struct opp_table *opp_table,
struct dentry *pdentry)
@@ -94,6 +135,7 @@ void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
&opp->clock_latency_ns);
opp_debug_create_supplies(opp, opp_table, d);
+ opp_debug_create_bw(opp, opp_table, d);
opp->dentry = d;
}
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 9cd8f0adacae..9a5873591a40 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -332,6 +332,105 @@ free_required_opps:
return ret;
}
+static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
+{
+ struct device_node *np, *opp_np;
+ struct property *prop;
+
+ if (!opp_table) {
+ np = of_node_get(dev->of_node);
+ if (!np)
+ return -ENODEV;
+
+ opp_np = _opp_of_get_opp_desc_node(np, 0);
+ of_node_put(np);
+ } else {
+ opp_np = of_node_get(opp_table->np);
+ }
+
+ /* Lets not fail in case we are parsing opp-v1 bindings */
+ if (!opp_np)
+ return 0;
+
+ /* Checking only first OPP is sufficient */
+ np = of_get_next_available_child(opp_np, NULL);
+ if (!np) {
+ dev_err(dev, "OPP table empty\n");
+ return -EINVAL;
+ }
+ of_node_put(opp_np);
+
+ prop = of_find_property(np, "opp-peak-kBps", NULL);
+ of_node_put(np);
+
+ if (!prop || !prop->length)
+ return 0;
+
+ return 1;
+}
+
+int dev_pm_opp_of_find_icc_paths(struct device *dev,
+ struct opp_table *opp_table)
+{
+ struct device_node *np;
+ int ret, i, count, num_paths;
+ struct icc_path **paths;
+
+ ret = _bandwidth_supported(dev, opp_table);
+ if (ret <= 0)
+ return ret;
+
+ ret = 0;
+
+ np = of_node_get(dev->of_node);
+ if (!np)
+ return 0;
+
+ count = of_count_phandle_with_args(np, "interconnects",
+ "#interconnect-cells");
+ of_node_put(np);
+ if (count < 0)
+ return 0;
+
+ /* two phandles when #interconnect-cells = <1> */
+ if (count % 2) {
+ dev_err(dev, "%s: Invalid interconnects values\n", __func__);
+ return -EINVAL;
+ }
+
+ num_paths = count / 2;
+ paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL);
+ if (!paths)
+ return -ENOMEM;
+
+ for (i = 0; i < num_paths; i++) {
+ paths[i] = of_icc_get_by_index(dev, i);
+ if (IS_ERR(paths[i])) {
+ ret = PTR_ERR(paths[i]);
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "%s: Unable to get path%d: %d\n",
+ __func__, i, ret);
+ }
+ goto err;
+ }
+ }
+
+ if (opp_table) {
+ opp_table->paths = paths;
+ opp_table->path_count = num_paths;
+ return 0;
+ }
+
+err:
+ while (i--)
+ icc_put(paths[i]);
+
+ kfree(paths);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths);
+
static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
struct device_node *np)
{
@@ -521,6 +620,90 @@ void dev_pm_opp_of_remove_table(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
+static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
+ struct device_node *np, bool peak)
+{
+ const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
+ struct property *prop;
+ int i, count, ret;
+ u32 *bw;
+
+ prop = of_find_property(np, name, NULL);
+ if (!prop)
+ return -ENODEV;
+
+ count = prop->length / sizeof(u32);
+ if (table->path_count != count) {
+ pr_err("%s: Mismatch between %s and paths (%d %d)\n",
+ __func__, name, count, table->path_count);
+ return -EINVAL;
+ }
+
+ bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL);
+ if (!bw)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, name, bw, count);
+ if (ret) {
+ pr_err("%s: Error parsing %s: %d\n", __func__, name, ret);
+ goto out;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (peak)
+ new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]);
+ else
+ new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]);
+ }
+
+out:
+ kfree(bw);
+ return ret;
+}
+
+static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table,
+ struct device_node *np, bool *rate_not_available)
+{
+ bool found = false;
+ u64 rate;
+ int ret;
+
+ ret = of_property_read_u64(np, "opp-hz", &rate);
+ if (!ret) {
+ /*
+ * Rate is defined as an unsigned long in clk API, and so
+ * casting explicitly to its type. Must be fixed once rate is 64
+ * bit guaranteed in clk API.
+ */
+ new_opp->rate = (unsigned long)rate;
+ found = true;
+ }
+ *rate_not_available = !!ret;
+
+ /*
+ * Bandwidth consists of peak and average (optional) values:
+ * opp-peak-kBps = <path1_value path2_value>;
+ * opp-avg-kBps = <path1_value path2_value>;
+ */
+ ret = _read_bw(new_opp, table, np, true);
+ if (!ret) {
+ found = true;
+ ret = _read_bw(new_opp, table, np, false);
+ }
+
+ /* The properties were found but we failed to parse them */
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ if (!of_property_read_u32(np, "opp-level", &new_opp->level))
+ found = true;
+
+ if (found)
+ return 0;
+
+ return ret;
+}
+
/**
* _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
* @opp_table: OPP table
@@ -558,26 +741,12 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
if (!new_opp)
return ERR_PTR(-ENOMEM);
- ret = of_property_read_u64(np, "opp-hz", &rate);
- if (ret < 0) {
- /* "opp-hz" is optional for devices like power domains. */
- if (!opp_table->is_genpd) {
- dev_err(dev, "%s: opp-hz not found\n", __func__);
- goto free_opp;
- }
-
- rate_not_available = true;
- } else {
- /*
- * Rate is defined as an unsigned long in clk API, and so
- * casting explicitly to its type. Must be fixed once rate is 64
- * bit guaranteed in clk API.
- */
- new_opp->rate = (unsigned long)rate;
+ ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
+ if (ret < 0 && !opp_table->is_genpd) {
+ dev_err(dev, "%s: opp key field not found\n", __func__);
+ goto free_opp;
}
- of_property_read_u32(np, "opp-level", &new_opp->level);
-
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index d14e27102730..e51646ff279e 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -12,6 +12,7 @@
#define __DRIVER_OPP_H__
#include <linux/device.h>
+#include <linux/interconnect.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
@@ -59,6 +60,7 @@ extern struct list_head opp_tables;
* @rate: Frequency in hertz
* @level: Performance level
* @supplies: Power supplies voltage/current values
+ * @bandwidth: Interconnect bandwidth values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
* @required_opps: List of OPPs that are required by this OPP.
@@ -81,6 +83,7 @@ struct dev_pm_opp {
unsigned int level;
struct dev_pm_opp_supply *supplies;
+ struct dev_pm_opp_icc_bw *bandwidth;
unsigned long clock_latency_ns;
@@ -144,8 +147,11 @@ enum opp_table_access {
* @clk: Device's clock handle
* @regulators: Supply regulators
* @regulator_count: Number of power supply regulators. Its value can be -1
+ * @regulator_enabled: Set to true if regulators were previously enabled.
* (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
* property).
+ * @paths: Interconnect path handles
+ * @path_count: Number of interconnect paths
* @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
* @set_opp: Platform specific set_opp callback
@@ -189,6 +195,9 @@ struct opp_table {
struct clk *clk;
struct regulator **regulators;
int regulator_count;
+ bool regulator_enabled;
+ struct icc_path **paths;
+ unsigned int path_count;
bool genpd_performance_state;
bool is_genpd;
@@ -211,6 +220,7 @@ struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_
void _dev_pm_opp_find_and_remove_table(struct device *dev);
struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table);
void _opp_free(struct dev_pm_opp *opp);
+int _opp_compare_key(struct dev_pm_opp *opp1, struct dev_pm_opp *opp2);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table, bool rate_not_available);
int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index ac27f3d3fbb4..4d7695289eda 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -91,11 +91,11 @@ munmap_notify(struct notifier_block *self, unsigned long val, void *data)
struct mm_struct *mm = current->mm;
struct vm_area_struct *mpnt;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
mpnt = find_vma(mm, addr);
if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU
*/
@@ -103,7 +103,7 @@ munmap_notify(struct notifier_block *self, unsigned long val, void *data)
return 0;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return 0;
}
@@ -256,7 +256,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
unsigned long cookie = NO_COOKIE;
struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
if (addr < vma->vm_start || addr >= vma->vm_end)
@@ -276,7 +276,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
if (!vma)
cookie = INVALID_COOKIE;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return cookie;
}
@@ -486,7 +486,7 @@ typedef enum {
/* Sync one of the CPU's buffers into the global event buffer.
* Here we need to go through each batch of samples punctuated
- * by context switch notes, taking the task's mmap_sem and doing
+ * by context switch notes, taking the task's mmap_lock and doing
* lookup in task->mm->mmap to convert EIP into dcookie/offset
* value.
*/
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 3b00e2c8e2e9..6d78ec3a762f 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -30,12 +30,6 @@
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK(stuff...) printk(stuff)
-#else
-#define DPRINTK(stuff...)
-#endif
-
static struct daisydev {
struct daisydev *next;
struct parport *port;
@@ -145,8 +139,7 @@ again:
((num_ports = num_mux_ports(port)) == 2 || num_ports == 4)) {
/* Leave original as port zero. */
port->muxport = 0;
- printk(KERN_INFO
- "%s: 1st (default) port of %d-way multiplexor\n",
+ pr_info("%s: 1st (default) port of %d-way multiplexor\n",
port->name, num_ports);
for (i = 1; i < num_ports; i++) {
/* Clone the port. */
@@ -159,8 +152,7 @@ again:
continue;
}
- printk(KERN_INFO
- "%s: %d%s port of %d-way multiplexor on %s\n",
+ pr_info("%s: %d%s port of %d-way multiplexor on %s\n",
extra->name, i + 1, th[i + 1], num_ports,
port->name);
@@ -323,8 +315,7 @@ static int cpp_daisy(struct parport *port, int cmd)
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: cpp_daisy: aa5500ff(%02x)\n",
- port->name, s);
+ pr_debug("%s: cpp_daisy: aa5500ff(%02x)\n", port->name, s);
return -ENXIO;
}
@@ -334,8 +325,7 @@ static int cpp_daisy(struct parport *port, int cmd)
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR);
if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: cpp_daisy: aa5500ff87(%02x)\n",
- port->name, s);
+ pr_debug("%s: cpp_daisy: aa5500ff87(%02x)\n", port->name, s);
return -ENXIO;
}
@@ -370,7 +360,7 @@ static int cpp_mux(struct parport *port, int cmd)
s = parport_read_status(port);
if (!(s & PARPORT_STATUS_ACK)) {
- DPRINTK(KERN_DEBUG "%s: cpp_mux: aa55f00f52ad%02x(%02x)\n",
+ pr_debug("%s: cpp_mux: aa55f00f52ad%02x(%02x)\n",
port->name, cmd, s);
return -EIO;
}
@@ -456,8 +446,7 @@ static int assign_addrs(struct parport *port)
| PARPORT_STATUS_PAPEROUT
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: assign_addrs: aa5500ff(%02x)\n",
- port->name, s);
+ pr_debug("%s: assign_addrs: aa5500ff(%02x)\n", port->name, s);
return 0;
}
@@ -467,8 +456,7 @@ static int assign_addrs(struct parport *port)
| PARPORT_STATUS_SELECT
| PARPORT_STATUS_ERROR);
if (s != (PARPORT_STATUS_SELECT | PARPORT_STATUS_ERROR)) {
- DPRINTK(KERN_DEBUG "%s: assign_addrs: aa5500ff87(%02x)\n",
- port->name, s);
+ pr_debug("%s: assign_addrs: aa5500ff87(%02x)\n", port->name, s);
return 0;
}
@@ -505,8 +493,7 @@ static int assign_addrs(struct parport *port)
parport_write_data(port, 0xff); udelay(2);
detected = numdevs - thisdev;
- DPRINTK(KERN_DEBUG "%s: Found %d daisy-chained devices\n", port->name,
- detected);
+ pr_debug("%s: Found %d daisy-chained devices\n", port->name, detected);
/* Ask the new devices to introduce themselves. */
deviceid = kmalloc(1024, GFP_KERNEL);
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index 90fb73575495..f28d6a3c5a68 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -31,12 +31,6 @@
#undef DEBUG /* Don't want a garbled console */
#endif
-#ifdef DEBUG
-#define DPRINTK(stuff...) printk (stuff)
-#else
-#define DPRINTK(stuff...)
-#endif
-
/* Make parport_wait_peripheral wake up.
* It will be useful to call this from an interrupt handler. */
static void parport_ieee1284_wakeup (struct parport *port)
@@ -258,12 +252,11 @@ static void parport_ieee1284_terminate (struct parport *port)
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r)
- DPRINTK (KERN_INFO "%s: Timeout at event 49\n",
+ pr_debug("%s: Timeout at event 49\n",
port->name);
parport_data_forward (port);
- DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
- port->name);
+ pr_debug("%s: ECP direction: forward\n", port->name);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
}
@@ -281,8 +274,7 @@ static void parport_ieee1284_terminate (struct parport *port)
/* Event 24: nAck goes low */
r = parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0);
if (r)
- DPRINTK (KERN_INFO "%s: Timeout at event 24\n",
- port->name);
+ pr_debug("%s: Timeout at event 24\n", port->name);
/* Event 25: Set nAutoFd low */
parport_frob_control (port,
@@ -294,8 +286,7 @@ static void parport_ieee1284_terminate (struct parport *port)
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK);
if (r)
- DPRINTK (KERN_INFO "%s: Timeout at event 27\n",
- port->name);
+ pr_debug("%s: Timeout at event 27\n", port->name);
/* Event 29: Set nAutoFd high */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
@@ -304,8 +295,7 @@ static void parport_ieee1284_terminate (struct parport *port)
port->ieee1284.mode = IEEE1284_MODE_COMPAT;
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
- DPRINTK (KERN_DEBUG "%s: In compatibility (forward idle) mode\n",
- port->name);
+ pr_debug("%s: In compatibility (forward idle) mode\n", port->name);
}
#endif /* IEEE1284 support */
@@ -329,7 +319,7 @@ int parport_negotiate (struct parport *port, int mode)
#ifndef CONFIG_PARPORT_1284
if (mode == IEEE1284_MODE_COMPAT)
return 0;
- printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n");
+ pr_err("parport: IEEE1284 not supported in this kernel\n");
return -1;
#else
int m = mode & ~IEEE1284_ADDR;
@@ -406,8 +396,7 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_CONTROL_SELECT
| PARPORT_CONTROL_AUTOFD,
PARPORT_CONTROL_SELECT);
- DPRINTK (KERN_DEBUG
- "%s: Peripheral not IEEE1284 compliant (0x%02X)\n",
+ pr_debug("%s: Peripheral not IEEE1284 compliant (0x%02X)\n",
port->name, parport_read_status (port));
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
return -1; /* Not IEEE1284 compliant */
@@ -430,8 +419,7 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant device. */
- DPRINTK (KERN_DEBUG
- "%s: Mode 0x%02x not supported? (0x%02x)\n",
+ pr_debug("%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode, port->ops->read_status (port));
parport_ieee1284_terminate (port);
return 1;
@@ -442,7 +430,7 @@ int parport_negotiate (struct parport *port, int mode)
/* xflag should be high for all modes other than nibble (0). */
if (mode && !xflag) {
/* Mode not supported. */
- DPRINTK (KERN_DEBUG "%s: Mode 0x%02x rejected by peripheral\n",
+ pr_debug("%s: Mode 0x%02x rejected by peripheral\n",
port->name, mode);
parport_ieee1284_terminate (port);
return 1;
@@ -463,9 +451,7 @@ int parport_negotiate (struct parport *port, int mode)
/* Event 52: nAck goes low */
if (parport_wait_peripheral (port, PARPORT_STATUS_ACK, 0)) {
/* This peripheral is _very_ slow. */
- DPRINTK (KERN_DEBUG
- "%s: Event 52 didn't happen\n",
- port->name);
+ pr_debug("%s: Event 52 didn't happen\n", port->name);
parport_ieee1284_terminate (port);
return 1;
}
@@ -481,10 +467,9 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_STATUS_ACK)) {
/* This shouldn't really happen with a compliant
* device. */
- DPRINTK (KERN_DEBUG
- "%s: Mode 0x%02x not supported? (0x%02x)\n",
+ pr_debug("%s: Mode 0x%02x not supported? (0x%02x)\n",
port->name, mode,
- port->ops->read_status (port));
+ port->ops->read_status(port));
parport_ieee1284_terminate (port);
return 1;
}
@@ -495,8 +480,8 @@ int parport_negotiate (struct parport *port, int mode)
/* xflag should be high. */
if (!xflag) {
/* Extended mode not supported. */
- DPRINTK (KERN_DEBUG "%s: Extended mode 0x%02x not "
- "supported\n", port->name, mode);
+ pr_debug("%s: Extended mode 0x%02x not supported\n",
+ port->name, mode);
parport_ieee1284_terminate (port);
return 1;
}
@@ -505,7 +490,7 @@ int parport_negotiate (struct parport *port, int mode)
}
/* Mode is supported */
- DPRINTK (KERN_DEBUG "%s: In mode 0x%02x\n", port->name, mode);
+ pr_debug("%s: In mode 0x%02x\n", port->name, mode);
port->ieee1284.mode = mode;
/* But ECP is special */
@@ -522,13 +507,11 @@ int parport_negotiate (struct parport *port, int mode)
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
- DPRINTK (KERN_INFO "%s: Timeout at event 31\n",
- port->name);
+ pr_debug("%s: Timeout at event 31\n", port->name);
}
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
- DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
- port->name);
+ pr_debug("%s: ECP direction: forward\n", port->name);
} else switch (mode) {
case IEEE1284_MODE_NIBBLE:
case IEEE1284_MODE_BYTE:
@@ -573,7 +556,7 @@ void parport_ieee1284_interrupt (void *handle)
if (port->ieee1284.phase == IEEE1284_PH_REV_IDLE) {
/* An interrupt in this phase means that data
* is now available. */
- DPRINTK (KERN_DEBUG "%s: Data available\n", port->name);
+ pr_debug("%s: Data available\n", port->name);
parport_ieee1284_ack_data_avail (port);
}
#endif /* IEEE1284 support */
@@ -617,13 +600,12 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
parport_negotiate (port, IEEE1284_MODE_COMPAT);
/* fall through */
case IEEE1284_MODE_COMPAT:
- DPRINTK (KERN_DEBUG "%s: Using compatibility mode\n",
- port->name);
+ pr_debug("%s: Using compatibility mode\n", port->name);
fn = port->ops->compat_write_data;
break;
case IEEE1284_MODE_EPP:
- DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name);
+ pr_debug("%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_write_addr;
} else {
@@ -631,8 +613,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
}
break;
case IEEE1284_MODE_EPPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated EPP mode\n", port->name);
if (addr) {
fn = parport_ieee1284_epp_write_addr;
} else {
@@ -641,7 +622,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
- DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name);
+ pr_debug("%s: Using ECP mode\n", port->name);
if (addr) {
fn = port->ops->ecp_write_addr;
} else {
@@ -650,8 +631,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
break;
case IEEE1284_MODE_ECPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated ECP mode\n", port->name);
/* The caller has specified that it must be emulated,
* even if we have ECP hardware! */
if (addr) {
@@ -662,13 +642,13 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
break;
default:
- DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name,
- port->ieee1284.mode);
+ pr_debug("%s: Unknown mode 0x%02x\n",
+ port->name, port->ieee1284.mode);
return -ENOSYS;
}
retval = (*fn) (port, buffer, len, 0);
- DPRINTK (KERN_DEBUG "%s: wrote %d/%d bytes\n", port->name, retval, len);
+ pr_debug("%s: wrote %zd/%zu bytes\n", port->name, retval, len);
return retval;
#endif /* IEEE1284 support */
}
@@ -694,7 +674,7 @@ ssize_t parport_write (struct parport *port, const void *buffer, size_t len)
ssize_t parport_read (struct parport *port, void *buffer, size_t len)
{
#ifndef CONFIG_PARPORT_1284
- printk (KERN_ERR "parport: IEEE1284 not supported in this kernel\n");
+ pr_err("parport: IEEE1284 not supported in this kernel\n");
return -ENODEV;
#else
int mode = port->physport->ieee1284.mode;
@@ -715,7 +695,7 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
if ((port->physport->modes & PARPORT_MODE_TRISTATE) &&
!parport_negotiate (port, IEEE1284_MODE_BYTE)) {
/* got into BYTE mode OK */
- DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name);
+ pr_debug("%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
}
@@ -724,17 +704,17 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
}
/* fall through - to NIBBLE */
case IEEE1284_MODE_NIBBLE:
- DPRINTK (KERN_DEBUG "%s: Using nibble mode\n", port->name);
+ pr_debug("%s: Using nibble mode\n", port->name);
fn = port->ops->nibble_read_data;
break;
case IEEE1284_MODE_BYTE:
- DPRINTK (KERN_DEBUG "%s: Using byte mode\n", port->name);
+ pr_debug("%s: Using byte mode\n", port->name);
fn = port->ops->byte_read_data;
break;
case IEEE1284_MODE_EPP:
- DPRINTK (KERN_DEBUG "%s: Using EPP mode\n", port->name);
+ pr_debug("%s: Using EPP mode\n", port->name);
if (addr) {
fn = port->ops->epp_read_addr;
} else {
@@ -742,8 +722,7 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
}
break;
case IEEE1284_MODE_EPPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated EPP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated EPP mode\n", port->name);
if (addr) {
fn = parport_ieee1284_epp_read_addr;
} else {
@@ -752,19 +731,18 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
break;
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
- DPRINTK (KERN_DEBUG "%s: Using ECP mode\n", port->name);
+ pr_debug("%s: Using ECP mode\n", port->name);
fn = port->ops->ecp_read_data;
break;
case IEEE1284_MODE_ECPSWE:
- DPRINTK (KERN_DEBUG "%s: Using software-emulated ECP mode\n",
- port->name);
+ pr_debug("%s: Using software-emulated ECP mode\n", port->name);
fn = parport_ieee1284_ecp_read_data;
break;
default:
- DPRINTK (KERN_DEBUG "%s: Unknown mode 0x%02x\n", port->name,
- port->physport->ieee1284.mode);
+ pr_debug("%s: Unknown mode 0x%02x\n",
+ port->name, port->physport->ieee1284.mode);
return -ENOSYS;
}
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
index 5d41dda6da4e..2c11bd3fe1fd 100644
--- a/drivers/parport/ieee1284_ops.c
+++ b/drivers/parport/ieee1284_ops.c
@@ -27,12 +27,6 @@
#undef DEBUG /* Don't want a garbled console */
#endif
-#ifdef DEBUG
-#define DPRINTK(stuff...) printk (stuff)
-#else
-#define DPRINTK(stuff...)
-#endif
-
/*** *
* One-way data transfer functions. *
* ***/
@@ -115,7 +109,7 @@ size_t parport_ieee1284_write_compat (struct parport *port,
if (signal_pending (current))
break;
- DPRINTK (KERN_DEBUG "%s: Timed out\n", port->name);
+ pr_debug("%s: Timed out\n", port->name);
break;
ready:
@@ -178,9 +172,8 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
if (parport_wait_peripheral (port,
PARPORT_STATUS_ACK, 0)) {
/* Timeout -- no more data? */
- DPRINTK (KERN_DEBUG
- "%s: Nibble timeout at event 9 (%d bytes)\n",
- port->name, i/2);
+ pr_debug("%s: Nibble timeout at event 9 (%d bytes)\n",
+ port->name, i / 2);
parport_frob_control (port, PARPORT_CONTROL_AUTOFD, 0);
break;
}
@@ -201,8 +194,7 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* Timeout -- no more data? */
- DPRINTK (KERN_DEBUG
- "%s: Nibble timeout at event 11\n",
+ pr_debug("%s: Nibble timeout at event 11\n",
port->name);
break;
}
@@ -219,9 +211,8 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
/* Read the last nibble without checking data avail. */
if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
end_of_data:
- DPRINTK (KERN_DEBUG
- "%s: No more nibble data (%d bytes)\n",
- port->name, i/2);
+ pr_debug("%s: No more nibble data (%d bytes)\n",
+ port->name, i / 2);
/* Go to reverse idle phase. */
parport_frob_control (port,
@@ -272,8 +263,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
/* Timeout -- no more data? */
parport_frob_control (port, PARPORT_CONTROL_AUTOFD,
0);
- DPRINTK (KERN_DEBUG "%s: Byte timeout at event 9\n",
- port->name);
+ pr_debug("%s: Byte timeout at event 9\n", port->name);
break;
}
@@ -288,8 +278,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
PARPORT_STATUS_ACK,
PARPORT_STATUS_ACK)) {
/* Timeout -- no more data? */
- DPRINTK (KERN_DEBUG "%s: Byte timeout at event 11\n",
- port->name);
+ pr_debug("%s: Byte timeout at event 11\n", port->name);
break;
}
@@ -307,8 +296,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
/* Read the last byte without checking data avail. */
if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
end_of_data:
- DPRINTK (KERN_DEBUG
- "%s: No more byte data (%zd bytes)\n",
+ pr_debug("%s: No more byte data (%zd bytes)\n",
port->name, count);
/* Go to reverse idle phase. */
@@ -353,12 +341,10 @@ int ecp_forward_to_reverse (struct parport *port)
PARPORT_STATUS_PAPEROUT, 0);
if (!retval) {
- DPRINTK (KERN_DEBUG "%s: ECP direction: reverse\n",
- port->name);
+ pr_debug("%s: ECP direction: reverse\n", port->name);
port->ieee1284.phase = IEEE1284_PH_REV_IDLE;
} else {
- DPRINTK (KERN_DEBUG "%s: ECP direction: failed to reverse\n",
- port->name);
+ pr_debug("%s: ECP direction: failed to reverse\n", port->name);
port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
}
@@ -384,12 +370,10 @@ int ecp_reverse_to_forward (struct parport *port)
if (!retval) {
parport_data_forward (port);
- DPRINTK (KERN_DEBUG "%s: ECP direction: forward\n",
- port->name);
+ pr_debug("%s: ECP direction: forward\n", port->name);
port->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
} else {
- DPRINTK (KERN_DEBUG
- "%s: ECP direction: failed to switch forward\n",
+ pr_debug("%s: ECP direction: failed to switch forward\n",
port->name);
port->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
}
@@ -450,7 +434,7 @@ size_t parport_ieee1284_ecp_write_data (struct parport *port,
}
/* Time for Host Transfer Recovery (page 41 of IEEE1284) */
- DPRINTK (KERN_DEBUG "%s: ECP transfer stalled!\n", port->name);
+ pr_debug("%s: ECP transfer stalled!\n", port->name);
parport_frob_control (port, PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
@@ -466,8 +450,7 @@ size_t parport_ieee1284_ecp_write_data (struct parport *port,
if (!(parport_read_status (port) & PARPORT_STATUS_PAPEROUT))
break;
- DPRINTK (KERN_DEBUG "%s: Host transfer recovered\n",
- port->name);
+ pr_debug("%s: Host transfer recovered\n", port->name);
if (time_after_eq (jiffies, expire)) break;
goto try_again;
@@ -565,23 +548,20 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
command or a normal data byte, don't accept it. */
if (command) {
if (byte & 0x80) {
- DPRINTK (KERN_DEBUG "%s: stopping short at "
- "channel command (%02x)\n",
+ pr_debug("%s: stopping short at channel command (%02x)\n",
port->name, byte);
goto out;
}
else if (port->ieee1284.mode != IEEE1284_MODE_ECPRLE)
- DPRINTK (KERN_DEBUG "%s: device illegally "
- "using RLE; accepting anyway\n",
+ pr_debug("%s: device illegally using RLE; accepting anyway\n",
port->name);
rle_count = byte + 1;
/* Are we allowed to read that many bytes? */
if (rle_count > (len - count)) {
- DPRINTK (KERN_DEBUG "%s: leaving %d RLE bytes "
- "for next time\n", port->name,
- rle_count);
+ pr_debug("%s: leaving %d RLE bytes for next time\n",
+ port->name, rle_count);
break;
}
@@ -596,11 +576,10 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
PARPORT_STATUS_ACK)) {
/* It's gone wrong. Return what data we have
to the caller. */
- DPRINTK (KERN_DEBUG "ECP read timed out at 45\n");
+ pr_debug("ECP read timed out at 45\n");
if (command)
- printk (KERN_WARNING
- "%s: command ignored (%02x)\n",
+ pr_warn("%s: command ignored (%02x)\n",
port->name, byte);
break;
@@ -620,7 +599,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
memset (buf, byte, rle_count);
buf += rle_count;
count += rle_count;
- DPRINTK (KERN_DEBUG "%s: decompressed to %d bytes\n",
+ pr_debug("%s: decompressed to %d bytes\n",
port->name, rle_count);
} else {
/* Normal data byte. */
@@ -686,7 +665,7 @@ size_t parport_ieee1284_ecp_write_addr (struct parport *port,
}
/* Time for Host Transfer Recovery (page 41 of IEEE1284) */
- DPRINTK (KERN_DEBUG "%s: ECP transfer stalled!\n", port->name);
+ pr_debug("%s: ECP transfer stalled!\n", port->name);
parport_frob_control (port, PARPORT_CONTROL_INIT,
PARPORT_CONTROL_INIT);
@@ -702,8 +681,7 @@ size_t parport_ieee1284_ecp_write_addr (struct parport *port,
if (!(parport_read_status (port) & PARPORT_STATUS_PAPEROUT))
break;
- DPRINTK (KERN_DEBUG "%s: Host transfer recovered\n",
- port->name);
+ pr_debug("%s: Host transfer recovered\n", port->name);
if (time_after_eq (jiffies, expire)) break;
goto try_again;
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index 3301861f69fa..1e88bcfe0d7b 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -28,16 +28,10 @@
#include <asm/amigaints.h>
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK printk
-#else
-#define DPRINTK(x...) do { } while (0)
-#endif
-
static void amiga_write_data(struct parport *p, unsigned char data)
{
- DPRINTK(KERN_DEBUG "write_data %c\n",data);
+ pr_debug("write_data %c\n", data);
/* Triggers also /STROBE. This behavior cannot be changed */
ciaa.prb = data;
mb();
@@ -59,13 +53,13 @@ static unsigned char control_amiga_to_pc(unsigned char control)
static void amiga_write_control(struct parport *p, unsigned char control)
{
- DPRINTK(KERN_DEBUG "write_control %02x\n",control);
+ pr_debug("write_control %02x\n", control);
/* No implementation possible */
}
static unsigned char amiga_read_control( struct parport *p)
{
- DPRINTK(KERN_DEBUG "read_control \n");
+ pr_debug("read_control\n");
return control_amiga_to_pc(0);
}
@@ -73,7 +67,7 @@ static unsigned char amiga_frob_control( struct parport *p, unsigned char mask,
{
unsigned char old;
- DPRINTK(KERN_DEBUG "frob_control mask %02x, value %02x\n",mask,val);
+ pr_debug("frob_control mask %02x, value %02x\n", mask, val);
old = amiga_read_control(p);
amiga_write_control(p, (old & ~mask) ^ val);
return old;
@@ -99,7 +93,7 @@ static unsigned char amiga_read_status(struct parport *p)
unsigned char status;
status = status_amiga_to_pc(ciab.pra & 7);
- DPRINTK(KERN_DEBUG "read_status %02x\n", status);
+ pr_debug("read_status %02x\n", status);
return status;
}
@@ -115,14 +109,14 @@ static void amiga_disable_irq(struct parport *p)
static void amiga_data_forward(struct parport *p)
{
- DPRINTK(KERN_DEBUG "forward\n");
+ pr_debug("forward\n");
ciaa.ddrb = 0xff; /* all pins output */
mb();
}
static void amiga_data_reverse(struct parport *p)
{
- DPRINTK(KERN_DEBUG "reverse\n");
+ pr_debug("reverse\n");
ciaa.ddrb = 0; /* all pins input */
mb();
}
@@ -212,7 +206,7 @@ static int __init amiga_parallel_probe(struct platform_device *pdev)
if (err)
goto out_irq;
- printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name);
+ pr_info("%s: Amiga built-in port using irq\n", p->name);
/* XXX: set operating mode */
parport_announce_port(p);
diff --git a/drivers/parport/parport_atari.c b/drivers/parport/parport_atari.c
index f8dd368bfdbb..2ff0fe053e6e 100644
--- a/drivers/parport/parport_atari.c
+++ b/drivers/parport/parport_atari.c
@@ -200,7 +200,7 @@ static int __init parport_atari_init(void)
}
this_port = p;
- printk(KERN_INFO "%s: Atari built-in port using irq\n", p->name);
+ pr_info("%s: Atari built-in port using irq\n", p->name);
parport_announce_port (p);
return 0;
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index e77044c2bf62..8e7e3ac4bb87 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -142,10 +142,8 @@ static int parport_config(struct pcmcia_device *link)
link->irq, PARPORT_DMA_NONE,
&link->dev, IRQF_SHARED);
if (p == NULL) {
- printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at "
- "0x%3x, irq %u failed\n",
- (unsigned int) link->resource[0]->start,
- link->irq);
+ pr_notice("parport_cs: parport_pc_probe_port() at 0x%3x, irq %u failed\n",
+ (unsigned int)link->resource[0]->start, link->irq);
goto failed;
}
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 922535a118ba..9228e8f90309 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -238,14 +238,14 @@ struct parport *parport_gsc_probe_port(unsigned long base,
priv = kzalloc (sizeof (struct parport_gsc_private), GFP_KERNEL);
if (!priv) {
- printk (KERN_DEBUG "parport (0x%lx): no memory!\n", base);
+ printk(KERN_DEBUG "parport (0x%lx): no memory!\n", base);
return NULL;
}
ops = kmemdup(&parport_gsc_ops, sizeof(struct parport_operations),
GFP_KERNEL);
if (!ops) {
- printk (KERN_DEBUG "parport (0x%lx): no memory for ops!\n",
- base);
+ printk(KERN_DEBUG "parport (0x%lx): no memory for ops!\n",
+ base);
kfree (priv);
return NULL;
}
@@ -282,7 +282,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->size = (p->modes & PARPORT_MODE_EPP)?8:3;
p->private_data = priv;
- printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
+ pr_info("%s: PC-style at 0x%lx", p->name, p->base);
p->irq = irq;
if (p->irq == PARPORT_IRQ_AUTO) {
p->irq = PARPORT_IRQ_NONE;
@@ -299,12 +299,16 @@ struct parport *parport_gsc_probe_port(unsigned long base,
p->dma = PARPORT_DMA_NONE;
pr_cont(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
{
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
- printmode(COMPAT)
+ printmode(COMPAT);
printmode(EPP);
// printmode(ECP);
// printmode(DMA);
@@ -315,8 +319,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler,
0, p->name, p)) {
- printk (KERN_WARNING "%s: irq %d in use, "
- "resorting to polled operation\n",
+ pr_warn("%s: irq %d in use, resorting to polled operation\n",
p->name, p->irq);
p->irq = PARPORT_IRQ_NONE;
p->dma = PARPORT_DMA_NONE;
@@ -347,7 +350,7 @@ static int __init parport_init_chip(struct parisc_device *dev)
unsigned long port;
if (!dev->irq) {
- printk(KERN_WARNING "IRQ not found for parallel device at 0x%llx\n",
+ pr_warn("IRQ not found for parallel device at 0x%llx\n",
(unsigned long long)dev->hpa.start);
return -ENODEV;
}
@@ -360,11 +363,11 @@ static int __init parport_init_chip(struct parisc_device *dev)
if (boot_cpu_data.cpu_type > pcxt && !pdc_add_valid(port+4)) {
/* Initialize bidirectional-mode (0x10) & data-tranfer-mode #1 (0x20) */
- printk("%s: initialize bidirectional-mode.\n", __func__);
+ pr_info("%s: initialize bidirectional-mode\n", __func__);
parport_writeb ( (0x10 + 0x20), port + 4);
} else {
- printk("%s: enhanced parport-modes not supported.\n", __func__);
+ pr_info("%s: enhanced parport-modes not supported\n", __func__);
}
p = parport_gsc_probe_port(port, 0, dev->irq,
diff --git a/drivers/parport/parport_gsc.h b/drivers/parport/parport_gsc.h
index 4c4d3c6cd77e..9301217edf12 100644
--- a/drivers/parport/parport_gsc.h
+++ b/drivers/parport/parport_gsc.h
@@ -71,7 +71,7 @@ struct parport_gsc_private {
static inline void parport_gsc_write_data(struct parport *p, unsigned char d)
{
#ifdef DEBUG_PARPORT
- printk (KERN_DEBUG "parport_gsc_write_data(%p,0x%02x)\n", p, d);
+ printk(KERN_DEBUG "%s(%p,0x%02x)\n", __func__, p, d);
#endif
parport_writeb(d, DATA(p));
}
@@ -80,8 +80,7 @@ static inline unsigned char parport_gsc_read_data(struct parport *p)
{
unsigned char val = parport_readb (DATA (p));
#ifdef DEBUG_PARPORT
- printk (KERN_DEBUG "parport_gsc_read_data(%p) = 0x%02x\n",
- p, val);
+ printk(KERN_DEBUG "%s(%p) = 0x%02x\n", __func__, p, val);
#endif
return val;
}
@@ -95,9 +94,9 @@ static inline unsigned char __parport_gsc_frob_control(struct parport *p,
struct parport_gsc_private *priv = p->physport->private_data;
unsigned char ctr = priv->ctr;
#ifdef DEBUG_PARPORT
- printk (KERN_DEBUG
- "__parport_gsc_frob_control(%02x,%02x): %02x -> %02x\n",
- mask, val, ctr, ((ctr & ~mask) ^ val) & priv->ctr_writable);
+ printk(KERN_DEBUG "%s(%02x,%02x): %02x -> %02x\n",
+ __func__, mask, val,
+ ctr, ((ctr & ~mask) ^ val) & priv->ctr_writable);
#endif
ctr = (ctr & ~mask) ^ val;
ctr &= priv->ctr_writable; /* only write writable bits. */
@@ -126,8 +125,8 @@ static inline void parport_gsc_write_control(struct parport *p,
/* Take this out when drivers have adapted to newer interface. */
if (d & 0x20) {
- printk (KERN_DEBUG "%s (%s): use data_reverse for this!\n",
- p->name, p->cad->name);
+ printk(KERN_DEBUG "%s (%s): use data_reverse for this!\n",
+ p->name, p->cad->name);
parport_gsc_data_reverse (p);
}
@@ -155,9 +154,9 @@ static inline unsigned char parport_gsc_frob_control(struct parport *p,
/* Take this out when drivers have adapted to newer interface. */
if (mask & 0x20) {
- printk (KERN_DEBUG "%s (%s): use data_%s for this!\n",
- p->name, p->cad->name,
- (val & 0x20) ? "reverse" : "forward");
+ printk(KERN_DEBUG "%s (%s): use data_%s for this!\n",
+ p->name, p->cad->name,
+ (val & 0x20) ? "reverse" : "forward");
if (val & 0x20)
parport_gsc_data_reverse (p);
else
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index ab215b650f41..48b084e86dc6 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -328,19 +328,19 @@ static void parport_ip32_dump_state(struct parport *p, char *str,
"TST", "CFG"};
unsigned int ecr = readb(priv->regs.ecr);
printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr);
- printk(" %s",
- ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
+ pr_cont(" %s",
+ ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
if (ecr & ECR_nERRINTR)
- printk(",nErrIntrEn");
+ pr_cont(",nErrIntrEn");
if (ecr & ECR_DMAEN)
- printk(",dmaEn");
+ pr_cont(",dmaEn");
if (ecr & ECR_SERVINTR)
- printk(",serviceIntr");
+ pr_cont(",serviceIntr");
if (ecr & ECR_F_FULL)
- printk(",f_full");
+ pr_cont(",f_full");
if (ecr & ECR_F_EMPTY)
- printk(",f_empty");
- printk("\n");
+ pr_cont(",f_empty");
+ pr_cont("\n");
}
if (show_ecp_config) {
unsigned int oecr, cnfgA, cnfgB;
@@ -352,52 +352,53 @@ static void parport_ip32_dump_state(struct parport *p, char *str,
writeb(ECR_MODE_PS2, priv->regs.ecr);
writeb(oecr, priv->regs.ecr);
printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA);
- printk(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
+ pr_cont(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
switch (cnfgA & CNFGA_ID_MASK) {
case CNFGA_ID_8:
- printk(",8 bits");
+ pr_cont(",8 bits");
break;
case CNFGA_ID_16:
- printk(",16 bits");
+ pr_cont(",16 bits");
break;
case CNFGA_ID_32:
- printk(",32 bits");
+ pr_cont(",32 bits");
break;
default:
- printk(",unknown ID");
+ pr_cont(",unknown ID");
break;
}
if (!(cnfgA & CNFGA_nBYTEINTRANS))
- printk(",ByteInTrans");
+ pr_cont(",ByteInTrans");
if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8)
- printk(",%d byte%s left", cnfgA & CNFGA_PWORDLEFT,
- ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
- printk("\n");
+ pr_cont(",%d byte%s left",
+ cnfgA & CNFGA_PWORDLEFT,
+ ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
+ pr_cont("\n");
printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB);
- printk(" irq=%u,dma=%u",
- (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
- (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
- printk(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
+ pr_cont(" irq=%u,dma=%u",
+ (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
+ (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
+ pr_cont(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
if (cnfgB & CNFGB_COMPRESS)
- printk(",compress");
- printk("\n");
+ pr_cont(",compress");
+ pr_cont("\n");
}
for (i = 0; i < 2; i++) {
unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr);
printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x",
i ? "soft" : "hard", dcr);
- printk(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
+ pr_cont(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
if (dcr & DCR_IRQ)
- printk(",ackIntEn");
+ pr_cont(",ackIntEn");
if (!(dcr & DCR_SELECT))
- printk(",nSelectIn");
+ pr_cont(",nSelectIn");
if (dcr & DCR_nINIT)
- printk(",nInit");
+ pr_cont(",nInit");
if (!(dcr & DCR_AUTOFD))
- printk(",nAutoFD");
+ pr_cont(",nAutoFD");
if (!(dcr & DCR_STROBE))
- printk(",nStrobe");
- printk("\n");
+ pr_cont(",nStrobe");
+ pr_cont("\n");
}
#define sep (f++ ? ',' : ' ')
{
@@ -405,20 +406,20 @@ static void parport_ip32_dump_state(struct parport *p, char *str,
unsigned int dsr = readb(priv->regs.dsr);
printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr);
if (!(dsr & DSR_nBUSY))
- printk("%cBusy", sep);
+ pr_cont("%cBusy", sep);
if (dsr & DSR_nACK)
- printk("%cnAck", sep);
+ pr_cont("%cnAck", sep);
if (dsr & DSR_PERROR)
- printk("%cPError", sep);
+ pr_cont("%cPError", sep);
if (dsr & DSR_SELECT)
- printk("%cSelect", sep);
+ pr_cont("%cSelect", sep);
if (dsr & DSR_nFAULT)
- printk("%cnFault", sep);
+ pr_cont("%cnFault", sep);
if (!(dsr & DSR_nPRINT))
- printk("%c(Print)", sep);
+ pr_cont("%c(Print)", sep);
if (dsr & DSR_TIMEOUT)
- printk("%cTimeout", sep);
- printk("\n");
+ pr_cont("%cTimeout", sep);
+ pr_cont("\n");
}
#undef sep
}
@@ -1337,9 +1338,8 @@ static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
ecr = parport_ip32_read_econtrol(p);
if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
&& !lost_interrupt) {
- printk(KERN_WARNING PPIP32
- "%s: lost interrupt in %s\n",
- p->name, __func__);
+ pr_warn(PPIP32 "%s: lost interrupt in %s\n",
+ p->name, __func__);
lost_interrupt = 1;
}
}
@@ -1643,8 +1643,8 @@ static size_t parport_ip32_compat_write_data(struct parport *p,
DSR_nBUSY | DSR_nFAULT)) {
/* Avoid to flood the logs */
if (ready_before)
- printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
- p->name, __func__);
+ pr_info(PPIP32 "%s: not ready in %s\n",
+ p->name, __func__);
ready_before = 0;
goto stop;
}
@@ -1704,7 +1704,7 @@ static size_t parport_ip32_ecp_write_data(struct parport *p,
/* Event 49: PError goes high. */
if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
- printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s",
+ printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s\n",
p->name, __func__);
physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
return 0;
@@ -1724,8 +1724,8 @@ static size_t parport_ip32_ecp_write_data(struct parport *p,
DSR_nBUSY | DSR_nFAULT)) {
/* Avoid to flood the logs */
if (ready_before)
- printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
- p->name, __func__);
+ pr_info(PPIP32 "%s: not ready in %s\n",
+ p->name, __func__);
ready_before = 0;
goto stop;
}
@@ -2064,8 +2064,7 @@ static __init struct parport *parport_ip32_probe_port(void)
p->modes |= PARPORT_MODE_TRISTATE;
if (!parport_ip32_fifo_supported(p)) {
- printk(KERN_WARNING PPIP32
- "%s: error: FIFO disabled\n", p->name);
+ pr_warn(PPIP32 "%s: error: FIFO disabled\n", p->name);
/* Disable hardware modes depending on a working FIFO. */
features &= ~PARPORT_IP32_ENABLE_SPP;
features &= ~PARPORT_IP32_ENABLE_ECP;
@@ -2077,8 +2076,7 @@ static __init struct parport *parport_ip32_probe_port(void)
if (features & PARPORT_IP32_ENABLE_IRQ) {
int irq = MACEISA_PARALLEL_IRQ;
if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
- printk(KERN_WARNING PPIP32
- "%s: error: IRQ disabled\n", p->name);
+ pr_warn(PPIP32 "%s: error: IRQ disabled\n", p->name);
/* DMA cannot work without interrupts. */
features &= ~PARPORT_IP32_ENABLE_DMA;
} else {
@@ -2091,8 +2089,7 @@ static __init struct parport *parport_ip32_probe_port(void)
/* Allocate DMA resources */
if (features & PARPORT_IP32_ENABLE_DMA) {
if (parport_ip32_dma_register())
- printk(KERN_WARNING PPIP32
- "%s: error: DMA disabled\n", p->name);
+ pr_warn(PPIP32 "%s: error: DMA disabled\n", p->name);
else {
pr_probe(p, "DMA support enabled\n");
p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
@@ -2134,13 +2131,15 @@ static __init struct parport *parport_ip32_probe_port(void)
parport_ip32_dump_state(p, "end init", 0);
/* Print out what we found */
- printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)",
- p->name, p->base, p->base_hi);
+ pr_info("%s: SGI IP32 at 0x%lx (0x%lx)", p->name, p->base, p->base_hi);
if (p->irq != PARPORT_IRQ_NONE)
- printk(", irq %d", p->irq);
- printk(" [");
-#define printmode(x) if (p->modes & PARPORT_MODE_##x) \
- printk("%s%s", f++ ? "," : "", #x)
+ pr_cont(", irq %d", p->irq);
+ pr_cont(" [");
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
{
unsigned int f = 0;
printmode(PCSPP);
@@ -2151,7 +2150,7 @@ static __init struct parport *parport_ip32_probe_port(void)
printmode(DMA);
}
#undef printmode
- printk("]\n");
+ pr_cont("]\n");
parport_announce_port(p);
return p;
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
index 9f87faf939e3..d6bbe8446301 100644
--- a/drivers/parport/parport_mfc3.c
+++ b/drivers/parport/parport_mfc3.c
@@ -70,11 +70,6 @@
#define MAX_MFC 5
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK printk
-#else
-static inline int DPRINTK(void *nothing, ...) {return 0;}
-#endif
static struct parport *this_port[MAX_MFC] = {NULL, };
static volatile int dummy; /* for trigger readds */
@@ -84,7 +79,7 @@ static struct parport_operations pp_mfc3_ops;
static void mfc3_write_data(struct parport *p, unsigned char data)
{
-DPRINTK(KERN_DEBUG "write_data %c\n",data);
+ pr_debug("write_data %c\n", data);
dummy = pia(p)->pprb; /* clears irq bit */
/* Triggers also /STROBE.*/
@@ -128,13 +123,13 @@ static unsigned char control_mfc3_to_pc(unsigned char control)
static void mfc3_write_control(struct parport *p, unsigned char control)
{
-DPRINTK(KERN_DEBUG "write_control %02x\n",control);
+ pr_debug("write_control %02x\n", control);
pia(p)->ppra = (pia(p)->ppra & 0x1f) | control_pc_to_mfc3(control);
}
static unsigned char mfc3_read_control( struct parport *p)
{
-DPRINTK(KERN_DEBUG "read_control \n");
+ pr_debug("read_control\n");
return control_mfc3_to_pc(pia(p)->ppra & 0xe0);
}
@@ -142,7 +137,7 @@ static unsigned char mfc3_frob_control( struct parport *p, unsigned char mask, u
{
unsigned char old;
-DPRINTK(KERN_DEBUG "frob_control mask %02x, value %02x\n",mask,val);
+ pr_debug("frob_control mask %02x, value %02x\n", mask, val);
old = mfc3_read_control(p);
mfc3_write_control(p, (old & ~mask) ^ val);
return old;
@@ -171,7 +166,7 @@ static unsigned char mfc3_read_status(struct parport *p)
unsigned char status;
status = status_mfc3_to_pc(pia(p)->ppra & 0x1f);
-DPRINTK(KERN_DEBUG "read_status %02x\n", status);
+ pr_debug("read_status %02x\n", status);
return status;
}
@@ -202,7 +197,7 @@ static void mfc3_disable_irq(struct parport *p)
static void mfc3_data_forward(struct parport *p)
{
- DPRINTK(KERN_DEBUG "forward\n");
+ pr_debug("forward\n");
pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
pia(p)->pddrb = 255; /* all pins output */
pia(p)->crb |= PIA_DDR; /* make data register visible - default */
@@ -210,7 +205,7 @@ static void mfc3_data_forward(struct parport *p)
static void mfc3_data_reverse(struct parport *p)
{
- DPRINTK(KERN_DEBUG "reverse\n");
+ pr_debug("reverse\n");
pia(p)->crb &= ~PIA_DDR; /* make data direction register visible */
pia(p)->pddrb = 0; /* all pins input */
pia(p)->crb |= PIA_DDR; /* make data register visible - default */
@@ -325,7 +320,7 @@ static int __init parport_mfc3_init(void)
p->dev = &z->dev;
this_port[pias++] = p;
- printk(KERN_INFO "%s: Multiface III port using irq\n", p->name);
+ pr_info("%s: Multiface III port using irq\n", p->name);
/* XXX: set operating mode */
p->private_data = (void *)piabase;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 1f17a39eabe8..77e37e3cb3a0 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -87,13 +87,6 @@
#undef DEBUG
-#ifdef DEBUG
-#define DPRINTK printk
-#else
-#define DPRINTK(stuff...)
-#endif
-
-
#define NR_SUPERIOS 3
static struct superio_struct { /* For Super-IO chips autodetection */
int io;
@@ -118,8 +111,8 @@ static void frob_econtrol(struct parport *pb, unsigned char m,
if (m != 0xff)
ectr = inb(ECONTROL(pb));
- DPRINTK(KERN_DEBUG "frob_econtrol(%02x,%02x): %02x -> %02x\n",
- m, v, ectr, (ectr & ~m) ^ v);
+ pr_debug("frob_econtrol(%02x,%02x): %02x -> %02x\n",
+ m, v, ectr, (ectr & ~m) ^ v);
outb((ectr & ~m) ^ v, ECONTROL(pb));
}
@@ -142,7 +135,7 @@ static int change_mode(struct parport *p, int m)
unsigned char oecr;
int mode;
- DPRINTK(KERN_INFO "parport change_mode ECP-ISA to mode 0x%02x\n", m);
+ pr_debug("parport change_mode ECP-ISA to mode 0x%02x\n", m);
if (!priv->ecr) {
printk(KERN_DEBUG "change_mode: but there's no ECR!\n");
@@ -298,8 +291,8 @@ static size_t parport_pc_epp_read_data(struct parport *port, void *buf,
status = inb(STATUS(port));
if (status & 0x01) {
/* EPP timeout should never occur... */
- printk(KERN_DEBUG
-"%s: EPP timeout occurred while talking to w91284pic (should not have done)\n", port->name);
+ printk(KERN_DEBUG "%s: EPP timeout occurred while talking to w91284pic (should not have done)\n",
+ port->name);
clear_epp_timeout(port);
}
}
@@ -727,7 +720,7 @@ static size_t parport_pc_compat_write_block_pio(struct parport *port,
r = change_mode(port, ECR_PPF); /* Parallel port FIFO */
if (r)
printk(KERN_DEBUG "%s: Warning change_mode ECR_PPF failed\n",
- port->name);
+ port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
@@ -770,9 +763,8 @@ static size_t parport_pc_compat_write_block_pio(struct parport *port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
if (r)
- printk(KERN_DEBUG
- "%s: BUSY timeout (%d) in compat_write_block_pio\n",
- port->name, r);
+ printk(KERN_DEBUG "%s: BUSY timeout (%d) in compat_write_block_pio\n",
+ port->name, r);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
@@ -810,8 +802,8 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r) {
- printk(KERN_DEBUG "%s: PError timeout (%d) "
- "in ecp_write_block_pio\n", port->name, r);
+ printk(KERN_DEBUG "%s: PError timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
}
}
@@ -824,7 +816,7 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
r = change_mode(port, ECR_ECP); /* ECP FIFO */
if (r)
printk(KERN_DEBUG "%s: Warning change_mode ECR_ECP failed\n",
- port->name);
+ port->name);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
/* Write the data to the FIFO. */
@@ -867,8 +859,8 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
parport_frob_control(port, PARPORT_CONTROL_INIT, 0);
r = parport_wait_peripheral(port, PARPORT_STATUS_PAPEROUT, 0);
if (r)
- printk(KERN_DEBUG "%s: PE,1 timeout (%d) "
- "in ecp_write_block_pio\n", port->name, r);
+ printk(KERN_DEBUG "%s: PE,1 timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
parport_frob_control(port,
PARPORT_CONTROL_INIT,
@@ -877,17 +869,16 @@ static size_t parport_pc_ecp_write_block_pio(struct parport *port,
PARPORT_STATUS_PAPEROUT,
PARPORT_STATUS_PAPEROUT);
if (r)
- printk(KERN_DEBUG "%s: PE,2 timeout (%d) "
- "in ecp_write_block_pio\n", port->name, r);
+ printk(KERN_DEBUG "%s: PE,2 timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
}
r = parport_wait_peripheral(port,
PARPORT_STATUS_BUSY,
PARPORT_STATUS_BUSY);
if (r)
- printk(KERN_DEBUG
- "%s: BUSY timeout (%d) in ecp_write_block_pio\n",
- port->name, r);
+ printk(KERN_DEBUG "%s: BUSY timeout (%d) in ecp_write_block_pio\n",
+ port->name, r);
port->physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
@@ -982,28 +973,24 @@ static void show_parconfig_smsc37c669(int io, int key)
outb(0xaa, io);
if (verbose_probing) {
- printk(KERN_INFO
- "SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, "
- "A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n",
+ pr_info("SMSC 37c669 LPT Config: cr_1=0x%02x, 4=0x%02x, A=0x%2x, 23=0x%02x, 26=0x%02x, 27=0x%02x\n",
cr1, cr4, cra, cr23, cr26, cr27);
/* The documentation calls DMA and IRQ-Lines by letters, so
the board maker can/will wire them
appropriately/randomly... G=reserved H=IDE-irq, */
- printk(KERN_INFO
- "SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n",
- cr23 * 4,
- (cr27 & 0x0f) ? 'A' - 1 + (cr27 & 0x0f) : '-',
- (cr26 & 0x0f) ? 'A' - 1 + (cr26 & 0x0f) : '-',
- cra & 0x0f);
- printk(KERN_INFO "SMSC LPT Config: enabled=%s power=%s\n",
- (cr23 * 4 >= 0x100) ? "yes" : "no",
- (cr1 & 4) ? "yes" : "no");
- printk(KERN_INFO
- "SMSC LPT Config: Port mode=%s, EPP version =%s\n",
- (cr1 & 0x08) ? "Standard mode only (SPP)"
- : modes[cr4 & 0x03],
- (cr4 & 0x40) ? "1.7" : "1.9");
+ pr_info("SMSC LPT Config: io=0x%04x, irq=%c, dma=%c, fifo threshold=%d\n",
+ cr23 * 4,
+ (cr27 & 0x0f) ? 'A' - 1 + (cr27 & 0x0f) : '-',
+ (cr26 & 0x0f) ? 'A' - 1 + (cr26 & 0x0f) : '-',
+ cra & 0x0f);
+ pr_info("SMSC LPT Config: enabled=%s power=%s\n",
+ (cr23 * 4 >= 0x100) ? "yes" : "no",
+ (cr1 & 4) ? "yes" : "no");
+ pr_info("SMSC LPT Config: Port mode=%s, EPP version =%s\n",
+ (cr1 & 0x08) ? "Standard mode only (SPP)"
+ : modes[cr4 & 0x03],
+ (cr4 & 0x40) ? "1.7" : "1.9");
}
/* Heuristics ! BIOS setup for this mainboard device limits
@@ -1013,7 +1000,7 @@ static void show_parconfig_smsc37c669(int io, int key)
if (cr23 * 4 >= 0x100) { /* if active */
s = find_free_superio();
if (s == NULL)
- printk(KERN_INFO "Super-IO: too many chips!\n");
+ pr_info("Super-IO: too many chips!\n");
else {
int d;
switch (cr23 * 4) {
@@ -1078,26 +1065,24 @@ static void show_parconfig_winbond(int io, int key)
outb(0xaa, io);
if (verbose_probing) {
- printk(KERN_INFO
- "Winbond LPT Config: cr_30=%02x 60,61=%02x%02x 70=%02x 74=%02x, f0=%02x\n",
- cr30, cr60, cr61, cr70, cr74, crf0);
- printk(KERN_INFO "Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
- (cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
+ pr_info("Winbond LPT Config: cr_30=%02x 60,61=%02x%02x 70=%02x 74=%02x, f0=%02x\n",
+ cr30, cr60, cr61, cr70, cr74, crf0);
+ pr_info("Winbond LPT Config: active=%s, io=0x%02x%02x irq=%d, ",
+ (cr30 & 0x01) ? "yes" : "no", cr60, cr61, cr70 & 0x0f);
if ((cr74 & 0x07) > 3)
pr_cont("dma=none\n");
else
pr_cont("dma=%d\n", cr74 & 0x07);
- printk(KERN_INFO
- "Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
- irqtypes[crf0>>7], (crf0>>3)&0x0f);
- printk(KERN_INFO "Winbond LPT Config: Port mode=%s\n",
- modes[crf0 & 0x07]);
+ pr_info("Winbond LPT Config: irqtype=%s, ECP fifo threshold=%d\n",
+ irqtypes[crf0 >> 7], (crf0 >> 3) & 0x0f);
+ pr_info("Winbond LPT Config: Port mode=%s\n",
+ modes[crf0 & 0x07]);
}
if (cr30 & 0x01) { /* the settings can be interrogated later ... */
s = find_free_superio();
if (s == NULL)
- printk(KERN_INFO "Super-IO: too many chips!\n");
+ pr_info("Super-IO: too many chips!\n");
else {
s->io = (cr60 << 8) | cr61;
s->irq = cr70 & 0x0f;
@@ -1151,9 +1136,8 @@ static void decode_winbond(int efer, int key, int devid, int devrev, int oldid)
progif = 0;
if (verbose_probing)
- printk(KERN_INFO "Winbond chip at EFER=0x%x key=0x%02x "
- "devid=%02x devrev=%02x oldid=%02x type=%s\n",
- efer, key, devid, devrev, oldid, type);
+ pr_info("Winbond chip at EFER=0x%x key=0x%02x devid=%02x devrev=%02x oldid=%02x type=%s\n",
+ efer, key, devid, devrev, oldid, type);
if (progif == 2)
show_parconfig_winbond(efer, key);
@@ -1184,9 +1168,8 @@ static void decode_smsc(int efer, int key, int devid, int devrev)
type = "37c666GT";
if (verbose_probing)
- printk(KERN_INFO "SMSC chip at EFER=0x%x "
- "key=0x%02x devid=%02x devrev=%02x type=%s\n",
- efer, key, devid, devrev, type);
+ pr_info("SMSC chip at EFER=0x%x key=0x%02x devid=%02x devrev=%02x type=%s\n",
+ efer, key, devid, devrev, type);
if (func)
func(efer, key);
@@ -1358,7 +1341,7 @@ static void detect_and_report_it87(void)
dev |= inb(0x2f);
if (dev == 0x8712 || dev == 0x8705 || dev == 0x8715 ||
dev == 0x8716 || dev == 0x8718 || dev == 0x8726) {
- printk(KERN_INFO "IT%04X SuperIO detected.\n", dev);
+ pr_info("IT%04X SuperIO detected\n", dev);
outb(0x07, 0x2E); /* Parallel Port */
outb(0x03, 0x2F);
outb(0xF0, 0x2E); /* BOOT 0x80 off */
@@ -1445,8 +1428,8 @@ static int parport_SPP_supported(struct parport *pb)
if (user_specified)
/* That didn't work, but the user thinks there's a
* port here. */
- printk(KERN_INFO "parport 0x%lx (WARNING): CTR: "
- "wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
+ pr_info("parport 0x%lx (WARNING): CTR: wrote 0x%02x, read 0x%02x\n",
+ pb->base, w, r);
/* Try the data register. The data lines aren't tri-stated at
* this stage, so we expect back what we wrote. */
@@ -1464,10 +1447,9 @@ static int parport_SPP_supported(struct parport *pb)
if (user_specified) {
/* Didn't work, but the user is convinced this is the
* place. */
- printk(KERN_INFO "parport 0x%lx (WARNING): DATA: "
- "wrote 0x%02x, read 0x%02x\n", pb->base, w, r);
- printk(KERN_INFO "parport 0x%lx: You gave this address, "
- "but there is probably no parallel port there!\n",
+ pr_info("parport 0x%lx (WARNING): DATA: wrote 0x%02x, read 0x%02x\n",
+ pb->base, w, r);
+ pr_info("parport 0x%lx: You gave this address, but there is probably no parallel port there!\n",
pb->base);
}
@@ -1620,7 +1602,7 @@ static int parport_ECP_supported(struct parport *pb)
if (i <= priv->fifo_depth) {
if (verbose_probing)
printk(KERN_DEBUG "0x%lx: writeIntrThreshold is %d\n",
- pb->base, i);
+ pb->base, i);
} else
/* Number of bytes we know we can write if we get an
interrupt. */
@@ -1642,7 +1624,7 @@ static int parport_ECP_supported(struct parport *pb)
if (i <= priv->fifo_depth) {
if (verbose_probing)
- printk(KERN_INFO "0x%lx: readIntrThreshold is %d\n",
+ pr_info("0x%lx: readIntrThreshold is %d\n",
pb->base, i);
} else
/* Number of bytes we can read if we get an interrupt. */
@@ -1657,17 +1639,14 @@ static int parport_ECP_supported(struct parport *pb)
switch (pword) {
case 0:
pword = 2;
- printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
- pb->base);
+ pr_warn("0x%lx: Unsupported pword size!\n", pb->base);
break;
case 2:
pword = 4;
- printk(KERN_WARNING "0x%lx: Unsupported pword size!\n",
- pb->base);
+ pr_warn("0x%lx: Unsupported pword size!\n", pb->base);
break;
default:
- printk(KERN_WARNING "0x%lx: Unknown implementation ID\n",
- pb->base);
+ pr_warn("0x%lx: Unknown implementation ID\n", pb->base);
/* Fall through - Assume 1 */
case 1:
pword = 1;
@@ -1676,14 +1655,14 @@ static int parport_ECP_supported(struct parport *pb)
if (verbose_probing) {
printk(KERN_DEBUG "0x%lx: PWord is %d bits\n",
- pb->base, 8 * pword);
+ pb->base, 8 * pword);
- printk(KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n", pb->base,
- config & 0x80 ? "Level" : "Pulses");
+ printk(KERN_DEBUG "0x%lx: Interrupts are ISA-%s\n",
+ pb->base, config & 0x80 ? "Level" : "Pulses");
configb = inb(CONFIGB(pb));
printk(KERN_DEBUG "0x%lx: ECP port cfgA=0x%02x cfgB=0x%02x\n",
- pb->base, config, configb);
+ pb->base, config, configb);
printk(KERN_DEBUG "0x%lx: ECP settings irq=", pb->base);
if ((configb >> 3) & 0x07)
pr_cont("%d", intrline[(configb >> 3) & 0x07]);
@@ -2107,9 +2086,9 @@ struct parport *parport_pc_probe_port(unsigned long int base,
p->size = (p->modes & PARPORT_MODE_EPP) ? 8 : 3;
- printk(KERN_INFO "%s: PC-style at 0x%lx", p->name, p->base);
+ pr_info("%s: PC-style at 0x%lx", p->name, p->base);
if (p->base_hi && priv->ecr)
- printk(KERN_CONT " (0x%lx)", p->base_hi);
+ pr_cont(" (0x%lx)", p->base_hi);
if (p->irq == PARPORT_IRQ_AUTO) {
p->irq = PARPORT_IRQ_NONE;
parport_irq_probe(p);
@@ -2120,7 +2099,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
p->irq = PARPORT_IRQ_NONE;
}
if (p->irq != PARPORT_IRQ_NONE) {
- printk(KERN_CONT ", irq %d", p->irq);
+ pr_cont(", irq %d", p->irq);
priv->ctr_writable |= 0x10;
if (p->dma == PARPORT_DMA_AUTO) {
@@ -2144,41 +2123,39 @@ struct parport *parport_pc_probe_port(unsigned long int base,
/* p->ops->ecp_read_data = parport_pc_ecp_read_block_pio; */
#endif /* IEEE 1284 support */
if (p->dma != PARPORT_DMA_NONE) {
- printk(KERN_CONT ", dma %d", p->dma);
+ pr_cont(", dma %d", p->dma);
p->modes |= PARPORT_MODE_DMA;
} else
- printk(KERN_CONT ", using FIFO");
+ pr_cont(", using FIFO");
} else
/* We can't use the DMA channel after all. */
p->dma = PARPORT_DMA_NONE;
#endif /* Allowed to use FIFO/DMA */
- printk(KERN_CONT " [");
+ pr_cont(" [");
-#define printmode(x) \
- {\
- if (p->modes & PARPORT_MODE_##x) {\
- printk(KERN_CONT "%s%s", f ? "," : "", #x);\
- f++;\
- } \
- }
+#define printmode(x) \
+do { \
+ if (p->modes & PARPORT_MODE_##x) \
+ pr_cont("%s%s", f++ ? "," : "", #x); \
+} while (0)
{
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
- printmode(COMPAT)
+ printmode(COMPAT);
printmode(EPP);
printmode(ECP);
printmode(DMA);
}
#undef printmode
#ifndef CONFIG_PARPORT_1284
- printk(KERN_CONT "(,...)");
+ pr_cont("(,...)");
#endif /* CONFIG_PARPORT_1284 */
- printk(KERN_CONT "]\n");
+ pr_cont("]\n");
if (probedirq != PARPORT_IRQ_NONE)
- printk(KERN_INFO "%s: irq %d detected\n", p->name, probedirq);
+ pr_info("%s: irq %d detected\n", p->name, probedirq);
/* If No ECP release the ports grabbed above. */
if (ECR_res && (p->modes & PARPORT_MODE_ECP) == 0) {
@@ -2193,8 +2170,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq(p->irq, parport_irq_handler,
irqflags, p->name, p)) {
- printk(KERN_WARNING "%s: irq %d in use, "
- "resorting to polled operation\n",
+ pr_warn("%s: irq %d in use, resorting to polled operation\n",
p->name, p->irq);
p->irq = PARPORT_IRQ_NONE;
p->dma = PARPORT_DMA_NONE;
@@ -2204,8 +2180,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
#ifdef HAS_DMA
if (p->dma != PARPORT_DMA_NONE) {
if (request_dma(p->dma, p->name)) {
- printk(KERN_WARNING "%s: dma %d in use, "
- "resorting to PIO operation\n",
+ pr_warn("%s: dma %d in use, resorting to PIO operation\n",
p->name, p->dma);
p->dma = PARPORT_DMA_NONE;
} else {
@@ -2215,9 +2190,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
&priv->dma_handle,
GFP_KERNEL);
if (!priv->dma_buf) {
- printk(KERN_WARNING "%s: "
- "cannot get buffer for DMA, "
- "resorting to PIO operation\n",
+ pr_warn("%s: cannot get buffer for DMA, resorting to PIO operation\n",
p->name);
free_dma(p->dma);
p->dma = PARPORT_DMA_NONE;
@@ -2313,7 +2286,7 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
int irq;
int i;
- DPRINTK(KERN_DEBUG "sio_ite_8872_probe()\n");
+ pr_debug("sio_ite_8872_probe()\n");
/* make sure which one chip */
for (i = 0; i < 5; i++) {
@@ -2330,7 +2303,7 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
}
}
if (i >= 5) {
- printk(KERN_INFO "parport_pc: cannot find ITE8872 INTA\n");
+ pr_info("parport_pc: cannot find ITE8872 INTA\n");
return 0;
}
@@ -2339,29 +2312,28 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
switch (type) {
case 0x2:
- printk(KERN_INFO "parport_pc: ITE8871 found (1P)\n");
+ pr_info("parport_pc: ITE8871 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xa:
- printk(KERN_INFO "parport_pc: ITE8875 found (1P)\n");
+ pr_info("parport_pc: ITE8875 found (1P)\n");
ite8872set = 0x64200000;
break;
case 0xe:
- printk(KERN_INFO "parport_pc: ITE8872 found (2S1P)\n");
+ pr_info("parport_pc: ITE8872 found (2S1P)\n");
ite8872set = 0x64e00000;
break;
case 0x6:
- printk(KERN_INFO "parport_pc: ITE8873 found (1S)\n");
+ pr_info("parport_pc: ITE8873 found (1S)\n");
release_region(inta_addr[i], 32);
return 0;
case 0x8:
- printk(KERN_INFO "parport_pc: ITE8874 found (2S)\n");
+ pr_info("parport_pc: ITE8874 found (2S)\n");
release_region(inta_addr[i], 32);
return 0;
default:
- printk(KERN_INFO "parport_pc: unknown ITE887x\n");
- printk(KERN_INFO "parport_pc: please mail 'lspci -nvv' "
- "output to Rich.Liu@ite.com.tw\n");
+ pr_info("parport_pc: unknown ITE887x\n");
+ pr_info("parport_pc: please mail 'lspci -nvv' output to Rich.Liu@ite.com.tw\n");
release_region(inta_addr[i], 32);
return 0;
}
@@ -2379,11 +2351,9 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
pci_write_config_dword(pdev, 0x9c,
ite8872set | (ite8872_irq * 0x11111));
- DPRINTK(KERN_DEBUG "ITE887x: The IRQ is %d.\n", ite8872_irq);
- DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O port is 0x%x.\n",
- ite8872_lpt);
- DPRINTK(KERN_DEBUG "ITE887x: The PARALLEL I/O porthi is 0x%x.\n",
- ite8872_lpthi);
+ pr_debug("ITE887x: The IRQ is %d\n", ite8872_irq);
+ pr_debug("ITE887x: The PARALLEL I/O port is 0x%x\n", ite8872_lpt);
+ pr_debug("ITE887x: The PARALLEL I/O porthi is 0x%x\n", ite8872_lpthi);
/* Let the user (or defaults) steer us away from interrupts */
irq = ite8872_irq;
@@ -2396,9 +2366,8 @@ static int sio_ite_8872_probe(struct pci_dev *pdev, int autoirq, int autodma,
release_region(inta_addr[i], 32);
if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
- printk(KERN_INFO
- "parport_pc: ITE 8872 parallel port: io=0x%X",
- ite8872_lpt);
+ pr_info("parport_pc: ITE 8872 parallel port: io=0x%X",
+ ite8872_lpt);
if (irq != PARPORT_IRQ_NONE)
pr_cont(", irq=%d", irq);
pr_cont("\n");
@@ -2471,8 +2440,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
have_epp = 1;
break;
default:
- printk(KERN_DEBUG
- "parport_pc: probing current configuration\n");
+ printk(KERN_DEBUG "parport_pc: probing current configuration\n");
siofunc = VIA_FUNCTION_PROBE;
break;
}
@@ -2508,12 +2476,11 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
port1 = inb(VIA_CONFIG_DATA) << 2;
printk(KERN_DEBUG "parport_pc: Current parallel port base: 0x%X\n",
- port1);
+ port1);
if (port1 == 0x3BC && have_epp) {
outb(via->viacfg_parport_base, VIA_CONFIG_INDEX);
outb((0x378 >> 2), VIA_CONFIG_DATA);
- printk(KERN_DEBUG
- "parport_pc: Parallel port base changed to 0x378\n");
+ printk(KERN_DEBUG "parport_pc: Parallel port base changed to 0x378\n");
port1 = 0x378;
}
@@ -2525,7 +2492,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
pci_write_config_byte(pdev, via->via_pci_superio_config_reg, tmp);
if (siofunc == VIA_FUNCTION_PARPORT_DISABLE) {
- printk(KERN_INFO "parport_pc: VIA parallel port disabled in BIOS\n");
+ pr_info("parport_pc: VIA parallel port disabled in BIOS\n");
return 0;
}
@@ -2558,9 +2525,8 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
case 0x278:
port2 = 0x678; break;
default:
- printk(KERN_INFO
- "parport_pc: Weird VIA parport base 0x%X, ignoring\n",
- port1);
+ pr_info("parport_pc: Weird VIA parport base 0x%X, ignoring\n",
+ port1);
return 0;
}
@@ -2579,8 +2545,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
/* finally, do the probe with values obtained */
if (parport_pc_probe_port(port1, port2, irq, dma, &pdev->dev, 0)) {
- printk(KERN_INFO
- "parport_pc: VIA parallel port: io=0x%X", port1);
+ pr_info("parport_pc: VIA parallel port: io=0x%X", port1);
if (irq != PARPORT_IRQ_NONE)
pr_cont(", irq=%d", irq);
if (dma != PARPORT_DMA_NONE)
@@ -2589,7 +2554,7 @@ static int sio_via_probe(struct pci_dev *pdev, int autoirq, int autodma,
return 1;
}
- printk(KERN_WARNING "parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n",
+ pr_warn("parport_pc: Strange, can't probe VIA parallel port: io=0x%X, irq=%d, dma=%d\n",
port1, irq, dma);
return 0;
}
@@ -2854,14 +2819,12 @@ static int parport_pc_pci_probe(struct pci_dev *dev,
/* TODO: test if sharing interrupts works */
irq = dev->irq;
if (irq == IRQ_NONE) {
- printk(KERN_DEBUG
- "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
- id->vendor, id->device, io_lo, io_hi);
+ printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx)\n",
+ id->vendor, id->device, io_lo, io_hi);
irq = PARPORT_IRQ_NONE;
} else {
- printk(KERN_DEBUG
- "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
- id->vendor, id->device, io_lo, io_hi, irq);
+ printk(KERN_DEBUG "PCI parallel port detected: %04x:%04x, I/O at %#lx(%#lx), IRQ %d\n",
+ id->vendor, id->device, io_lo, io_hi, irq);
}
data->ports[count] =
parport_pc_probe_port(io_lo, io_hi, irq,
@@ -3111,7 +3074,7 @@ static int __init parport_parse_param(const char *s, int *val,
if (ep != s)
*val = r;
else {
- printk(KERN_ERR "parport: bad specifier `%s'\n", s);
+ pr_err("parport: bad specifier `%s'\n", s);
return -1;
}
}
@@ -3133,8 +3096,8 @@ static int __init parport_parse_dma(const char *dmastr, int *val)
#ifdef CONFIG_PCI
static int __init parport_init_mode_setup(char *str)
{
- printk(KERN_DEBUG
- "parport_pc.c: Specified parameter parport_init_mode=%s\n", str);
+ printk(KERN_DEBUG "parport_pc.c: Specified parameter parport_init_mode=%s\n",
+ str);
if (!strcmp(str, "spp"))
parport_init_mode = 1;
@@ -3201,10 +3164,7 @@ static int __init parse_parport_params(void)
irqval[0] = val;
break;
default:
- printk(KERN_WARNING
- "parport_pc: irq specified "
- "without base address. Use 'io=' "
- "to specify one\n");
+ pr_warn("parport_pc: irq specified without base address. Use 'io=' to specify one\n");
}
if (dma[0] && !parport_parse_dma(dma[0], &val))
@@ -3214,10 +3174,7 @@ static int __init parse_parport_params(void)
dmaval[0] = val;
break;
default:
- printk(KERN_WARNING
- "parport_pc: dma specified "
- "without base address. Use 'io=' "
- "to specify one\n");
+ pr_warn("parport_pc: dma specified without base address. Use 'io=' to specify one\n");
}
}
return 0;
@@ -3256,12 +3213,12 @@ static int __init parport_setup(char *str)
val = simple_strtoul(str, &endptr, 0);
if (endptr == str) {
- printk(KERN_WARNING "parport=%s not understood\n", str);
+ pr_warn("parport=%s not understood\n", str);
return 1;
}
if (parport_setup_ptr == PARPORT_PC_MAX_PORTS) {
- printk(KERN_ERR "parport=%s ignored, too many ports\n", str);
+ pr_err("parport=%s ignored, too many ports\n", str);
return 1;
}
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index d5a669b60c27..e840c1b5ab90 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -314,7 +314,7 @@ static int bpp_probe(struct platform_device *op)
value_tcr &= ~P_TCR_DIR;
sbus_writeb(value_tcr, &regs->p_tcr);
- printk(KERN_INFO "%s: sunbpp at 0x%lx\n", p->name, p->base);
+ pr_info("%s: sunbpp at 0x%lx\n", p->name, p->base);
dev_set_drvdata(&op->dev, p);
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e5e6a463a941..7e6d713fa5ac 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -38,16 +38,16 @@ static void pretty_print(struct parport *port, int device)
{
struct parport_device_info *info = &port->probe_info[device + 1];
- printk(KERN_INFO "%s", port->name);
+ pr_info("%s", port->name);
if (device >= 0)
- printk (" (addr %d)", device);
+ pr_cont(" (addr %d)", device);
- printk (": %s", classes[info->class].descr);
+ pr_cont(": %s", classes[info->class].descr);
if (info->class)
- printk(", %s %s", info->mfr, info->model);
+ pr_cont(", %s %s", info->mfr, info->model);
- printk("\n");
+ pr_cont("\n");
}
static void parse_data(struct parport *port, int device, char *str)
@@ -58,7 +58,7 @@ static void parse_data(struct parport *port, int device, char *str)
struct parport_device_info *info = &port->probe_info[device + 1];
if (!txt) {
- printk(KERN_WARNING "%s probe: memory squeeze\n", port->name);
+ pr_warn("%s probe: memory squeeze\n", port->name);
return;
}
strcpy(txt, str);
@@ -98,7 +98,8 @@ static void parse_data(struct parport *port, int device, char *str)
goto rock_on;
}
}
- printk(KERN_WARNING "%s probe: warning, class '%s' not understood.\n", port->name, sep);
+ pr_warn("%s probe: warning, class '%s' not understood\n",
+ port->name, sep);
info->class = PARPORT_CLASS_OTHER;
} else if (!strcmp(p, "CMD") ||
!strcmp(p, "COMMAND SET")) {
@@ -177,9 +178,8 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
* just return constant nibble forever. This catches
* also those cases. */
if (idlens[0] == 0 || idlens[0] > 0xFFF) {
- printk (KERN_DEBUG "%s: reported broken Device ID"
- " length of %#zX bytes\n",
- port->name, idlens[0]);
+ printk(KERN_DEBUG "%s: reported broken Device ID length of %#zX bytes\n",
+ port->name, idlens[0]);
return -EIO;
}
numidlens = 2;
@@ -201,10 +201,8 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) {
if (belen != len) {
- printk (KERN_DEBUG "%s: Device ID was %zd bytes"
- " while device told it would be %d"
- " bytes\n",
- port->name, len, belen);
+ printk(KERN_DEBUG "%s: Device ID was %zd bytes while device told it would be %d bytes\n",
+ port->name, len, belen);
}
goto done;
}
@@ -214,11 +212,9 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
* the first 256 bytes or so that we must have read so
* far. */
if (buffer[len-1] == ';') {
- printk (KERN_DEBUG "%s: Device ID reading stopped"
- " before device told data not available. "
- "Current idlen %u of %u, len bytes %02X %02X\n",
- port->name, current_idlen, numidlens,
- length[0], length[1]);
+ printk(KERN_DEBUG "%s: Device ID reading stopped before device told data not available. Current idlen %u of %u, len bytes %02X %02X\n",
+ port->name, current_idlen, numidlens,
+ length[0], length[1]);
goto done;
}
}
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index ee7b5daabfd4..d740eba3c099 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -210,7 +210,11 @@ static int do_hardware_modes(struct ctl_table *table, int write,
return -EACCES;
{
-#define printmode(x) {if(port->modes&PARPORT_MODE_##x){len+=sprintf(buffer+len,"%s%s",f?",":"",#x);f++;}}
+#define printmode(x) \
+do { \
+ if (port->modes & PARPORT_MODE_##x) \
+ len += sprintf(buffer + len, "%s%s", f++ ? "," : "", #x); \
+} while (0)
int f = 0;
printmode(PCSPP);
printmode(TRISTATE);
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index d6920ebeabcd..7fec4fefe151 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -278,46 +278,32 @@ static int port_detect(struct device *dev, void *dev_drv)
int __parport_register_driver(struct parport_driver *drv, struct module *owner,
const char *mod_name)
{
- if (drv->devmodel) {
- /* using device model */
- int ret;
-
- /* initialize common driver fields */
- drv->driver.name = drv->name;
- drv->driver.bus = &parport_bus_type;
- drv->driver.owner = owner;
- drv->driver.mod_name = mod_name;
- ret = driver_register(&drv->driver);
- if (ret)
- return ret;
+ /* using device model */
+ int ret;
- /*
- * check if bus has any parallel port registered, if
- * none is found then load the lowlevel driver.
- */
- ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
- port_detect);
- if (!ret)
- get_lowlevel_driver();
-
- mutex_lock(&registration_lock);
- if (drv->match_port)
- bus_for_each_dev(&parport_bus_type, NULL, drv,
- port_check);
- mutex_unlock(&registration_lock);
- } else {
- struct parport *port;
-
- drv->devmodel = false;
-
- if (list_empty(&portlist))
- get_lowlevel_driver();
- mutex_lock(&registration_lock);
- list_for_each_entry(port, &portlist, list)
- drv->attach(port);
- list_add(&drv->list, &drivers);
- mutex_unlock(&registration_lock);
- }
+ /* initialize common driver fields */
+ drv->driver.name = drv->name;
+ drv->driver.bus = &parport_bus_type;
+ drv->driver.owner = owner;
+ drv->driver.mod_name = mod_name;
+ ret = driver_register(&drv->driver);
+ if (ret)
+ return ret;
+
+ /*
+ * check if bus has any parallel port registered, if
+ * none is found then load the lowlevel driver.
+ */
+ ret = bus_for_each_dev(&parport_bus_type, NULL, NULL,
+ port_detect);
+ if (!ret)
+ get_lowlevel_driver();
+
+ mutex_lock(&registration_lock);
+ if (drv->match_port)
+ bus_for_each_dev(&parport_bus_type, NULL, drv,
+ port_check);
+ mutex_unlock(&registration_lock);
return 0;
}
@@ -352,17 +338,9 @@ static int port_detach(struct device *dev, void *_drv)
void parport_unregister_driver(struct parport_driver *drv)
{
- struct parport *port;
-
mutex_lock(&registration_lock);
- if (drv->devmodel) {
- bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
- driver_unregister(&drv->driver);
- } else {
- list_del_init(&drv->list);
- list_for_each_entry(port, &portlist, list)
- drv->detach(port);
- }
+ bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach);
+ driver_unregister(&drv->driver);
mutex_unlock(&registration_lock);
}
EXPORT_SYMBOL(parport_unregister_driver);
@@ -554,8 +532,8 @@ void parport_announce_port(struct parport *port)
#endif
if (!port->dev)
- printk(KERN_WARNING "%s: fix this legacy no-device port driver!\n",
- port->name);
+ pr_warn("%s: fix this legacy no-device port driver!\n",
+ port->name);
parport_proc_register(port);
mutex_lock(&registration_lock);
@@ -641,47 +619,48 @@ void parport_remove_port(struct parport *port)
}
EXPORT_SYMBOL(parport_remove_port);
+static void free_pardevice(struct device *dev)
+{
+ struct pardevice *par_dev = to_pardevice(dev);
+
+ kfree(par_dev->name);
+ kfree(par_dev);
+}
+
/**
- * parport_register_device - register a device on a parallel port
+ * parport_register_dev_model - register a device on a parallel port
* @port: port to which the device is attached
* @name: a name to refer to the device
- * @pf: preemption callback
- * @kf: kick callback (wake-up)
- * @irq_func: interrupt handler
- * @flags: registration flags
- * @handle: data for callback functions
+ * @par_dev_cb: struct containing callbacks
+ * @id: device number to be given to the device
*
* This function, called by parallel port device drivers,
* declares that a device is connected to a port, and tells the
* system all it needs to know.
*
- * The @name is allocated by the caller and must not be
- * deallocated until the caller calls @parport_unregister_device
- * for that device.
- *
- * The preemption callback function, @pf, is called when this
- * device driver has claimed access to the port but another
- * device driver wants to use it. It is given @handle as its
- * parameter, and should return zero if it is willing for the
- * system to release the port to another driver on its behalf.
- * If it wants to keep control of the port it should return
- * non-zero, and no action will be taken. It is good manners for
- * the driver to try to release the port at the earliest
- * opportunity after its preemption callback rejects a preemption
- * attempt. Note that if a preemption callback is happy for
- * preemption to go ahead, there is no need to release the port;
- * it is done automatically. This function may not block, as it
- * may be called from interrupt context. If the device driver
- * does not support preemption, @pf can be %NULL.
+ * The struct pardev_cb contains pointer to callbacks. preemption
+ * callback function, @preempt, is called when this device driver
+ * has claimed access to the port but another device driver wants
+ * to use it. It is given, @private, as its parameter, and should
+ * return zero if it is willing for the system to release the port
+ * to another driver on its behalf. If it wants to keep control of
+ * the port it should return non-zero, and no action will be taken.
+ * It is good manners for the driver to try to release the port at
+ * the earliest opportunity after its preemption callback rejects a
+ * preemption attempt. Note that if a preemption callback is happy
+ * for preemption to go ahead, there is no need to release the
+ * port; it is done automatically. This function may not block, as
+ * it may be called from interrupt context. If the device driver
+ * does not support preemption, @preempt can be %NULL.
*
- * The wake-up ("kick") callback function, @kf, is called when
+ * The wake-up ("kick") callback function, @wakeup, is called when
* the port is available to be claimed for exclusive access; that
* is, parport_claim() is guaranteed to succeed when called from
* inside the wake-up callback function. If the driver wants to
* claim the port it should do so; otherwise, it need not take
* any action. This function may not block, as it may be called
* from interrupt context. If the device driver does not want to
- * be explicitly invited to claim the port in this way, @kf can
+ * be explicitly invited to claim the port in this way, @wakeup can
* be %NULL.
*
* The interrupt handler, @irq_func, is called when an interrupt
@@ -711,138 +690,6 @@ EXPORT_SYMBOL(parport_remove_port);
**/
struct pardevice *
-parport_register_device(struct parport *port, const char *name,
- int (*pf)(void *), void (*kf)(void *),
- void (*irq_func)(void *),
- int flags, void *handle)
-{
- struct pardevice *tmp;
-
- if (port->physport->flags & PARPORT_FLAG_EXCL) {
- /* An exclusive device is registered. */
- printk(KERN_DEBUG "%s: no more devices allowed\n",
- port->name);
- return NULL;
- }
-
- if (flags & PARPORT_DEV_LURK) {
- if (!pf || !kf) {
- printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
- return NULL;
- }
- }
-
- if (flags & PARPORT_DEV_EXCL) {
- if (port->physport->devices) {
- /*
- * If a device is already registered and this new
- * device wants exclusive access, then no need to
- * continue as we can not grant exclusive access to
- * this device.
- */
- pr_err("%s: cannot grant exclusive access for device %s\n",
- port->name, name);
- return NULL;
- }
- }
-
- /*
- * We up our own module reference count, and that of the port
- * on which a device is to be registered, to ensure that
- * neither of us gets unloaded while we sleep in (e.g.)
- * kmalloc.
- */
- if (!try_module_get(port->ops->owner))
- return NULL;
-
- parport_get_port(port);
-
- tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
- if (!tmp)
- goto out;
-
- tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
- if (!tmp->state)
- goto out_free_pardevice;
-
- tmp->name = name;
- tmp->port = port;
- tmp->daisy = -1;
- tmp->preempt = pf;
- tmp->wakeup = kf;
- tmp->private = handle;
- tmp->flags = flags;
- tmp->irq_func = irq_func;
- tmp->waiting = 0;
- tmp->timeout = 5 * HZ;
- tmp->devmodel = false;
-
- /* Chain this onto the list */
- tmp->prev = NULL;
- /*
- * This function must not run from an irq handler so we don' t need
- * to clear irq on the local CPU. -arca
- */
- spin_lock(&port->physport->pardevice_lock);
-
- if (flags & PARPORT_DEV_EXCL) {
- if (port->physport->devices) {
- spin_unlock(&port->physport->pardevice_lock);
- printk(KERN_DEBUG
- "%s: cannot grant exclusive access for device %s\n",
- port->name, name);
- goto out_free_all;
- }
- port->flags |= PARPORT_FLAG_EXCL;
- }
-
- tmp->next = port->physport->devices;
- wmb(); /*
- * Make sure that tmp->next is written before it's
- * added to the list; see comments marked 'no locking
- * required'
- */
- if (port->physport->devices)
- port->physport->devices->prev = tmp;
- port->physport->devices = tmp;
- spin_unlock(&port->physport->pardevice_lock);
-
- init_waitqueue_head(&tmp->wait_q);
- tmp->timeslice = parport_default_timeslice;
- tmp->waitnext = tmp->waitprev = NULL;
-
- /*
- * This has to be run as last thing since init_state may need other
- * pardevice fields. -arca
- */
- port->ops->init_state(tmp, tmp->state);
- if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
- port->proc_device = tmp;
- parport_device_proc_register(tmp);
- }
- return tmp;
-
- out_free_all:
- kfree(tmp->state);
- out_free_pardevice:
- kfree(tmp);
- out:
- parport_put_port(port);
- module_put(port->ops->owner);
-
- return NULL;
-}
-EXPORT_SYMBOL(parport_register_device);
-
-static void free_pardevice(struct device *dev)
-{
- struct pardevice *par_dev = to_pardevice(dev);
-
- kfree(par_dev->name);
- kfree(par_dev);
-}
-
-struct pardevice *
parport_register_dev_model(struct parport *port, const char *name,
const struct pardev_cb *par_dev_cb, int id)
{
@@ -996,7 +843,7 @@ void parport_unregister_device(struct pardevice *dev)
#ifdef PARPORT_PARANOID
if (!dev) {
- printk(KERN_ERR "parport_unregister_device: passed NULL\n");
+ pr_err("%s: passed NULL\n", __func__);
return;
}
#endif
@@ -1046,10 +893,7 @@ void parport_unregister_device(struct pardevice *dev)
spin_unlock_irq(&port->waitlist_lock);
kfree(dev->state);
- if (dev->devmodel)
- device_unregister(&dev->dev);
- else
- kfree(dev);
+ device_unregister(&dev->dev);
module_put(port->ops->owner);
parport_put_port(port);
@@ -1137,8 +981,7 @@ int parport_claim(struct pardevice *dev)
unsigned long flags;
if (port->cad == dev) {
- printk(KERN_INFO "%s: %s already owner\n",
- dev->port->name,dev->name);
+ pr_info("%s: %s already owner\n", dev->port->name, dev->name);
return 0;
}
@@ -1158,9 +1001,8 @@ int parport_claim(struct pardevice *dev)
* I think we'll actually deadlock rather than
* get here, but just in case..
*/
- printk(KERN_WARNING
- "%s: %s released port when preempted!\n",
- port->name, oldcad->name);
+ pr_warn("%s: %s released port when preempted!\n",
+ port->name, oldcad->name);
if (port->cad)
goto blocked;
}
@@ -1260,7 +1102,8 @@ int parport_claim_or_block(struct pardevice *dev)
r = parport_claim(dev);
if (r == -EAGAIN) {
#ifdef PARPORT_DEBUG_SHARING
- printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
+ printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n",
+ dev->name);
#endif
/*
* FIXME!!! Use the proper locking for dev->waiting,
@@ -1293,7 +1136,7 @@ int parport_claim_or_block(struct pardevice *dev)
if (dev->port->physport->cad != dev)
printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n",
dev->name, dev->port->physport->cad ?
- dev->port->physport->cad->name:"nobody");
+ dev->port->physport->cad->name : "nobody");
#endif
}
dev->waiting = 0;
@@ -1320,8 +1163,8 @@ void parport_release(struct pardevice *dev)
write_lock_irqsave(&port->cad_lock, flags);
if (port->cad != dev) {
write_unlock_irqrestore(&port->cad_lock, flags);
- printk(KERN_WARNING "%s: %s tried to release parport when not owner\n",
- port->name, dev->name);
+ pr_warn("%s: %s tried to release parport when not owner\n",
+ port->name, dev->name);
return;
}
@@ -1361,7 +1204,8 @@ void parport_release(struct pardevice *dev)
if (dev->port->cad) /* racy but no matter */
return;
} else {
- printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
+ pr_err("%s: don't know how to wake %s\n",
+ port->name, pd->name);
}
}
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 390e92f2d8d1..b761c1f72f67 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -31,6 +31,22 @@ void pci_ats_init(struct pci_dev *dev)
}
/**
+ * pci_ats_supported - check if the device can use ATS
+ * @dev: the PCI device
+ *
+ * Returns true if the device supports ATS and is allowed to use it, false
+ * otherwise.
+ */
+bool pci_ats_supported(struct pci_dev *dev)
+{
+ if (!dev->ats_cap)
+ return false;
+
+ return (dev->untrusted == 0);
+}
+EXPORT_SYMBOL_GPL(pci_ats_supported);
+
+/**
* pci_enable_ats - enable the ATS capability
* @dev: the PCI device
* @ps: the IOMMU page shift
@@ -42,7 +58,7 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
u16 ctrl;
struct pci_dev *pdev;
- if (!dev->ats_cap)
+ if (!pci_ats_supported(dev))
return -EINVAL;
if (WARN_ON(dev->ats_enabled))
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index ae36edb1d7db..b08efea39496 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -58,15 +58,33 @@ config PCIE_RCAR
bool "Renesas R-Car PCIe controller"
depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI_IRQ_DOMAIN
+ select PCIE_RCAR_HOST
help
Say Y here if you want PCIe controller support on R-Car SoCs.
+ This option will be removed after arm64 defconfig is updated.
+
+config PCIE_RCAR_HOST
+ bool "Renesas R-Car PCIe host controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_MSI_IRQ_DOMAIN
+ help
+ Say Y here if you want PCIe controller support on R-Car SoCs in host
+ mode.
+
+config PCIE_RCAR_EP
+ bool "Renesas R-Car PCIe endpoint controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ help
+ Say Y here if you want PCIe controller support on R-Car SoCs in
+ endpoint mode.
config PCI_HOST_COMMON
- bool
+ tristate
select PCI_ECAM
config PCI_HOST_GENERIC
- bool "Generic PCI host controller"
+ tristate "Generic PCI host controller"
depends on OF
select PCI_HOST_COMMON
select IRQ_DOMAIN
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index fbac4b0190a0..efd9733ead26 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -7,7 +7,8 @@ obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
-obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
+obj-$(CONFIG_PCIE_RCAR_HOST) += pcie-rcar.o pcie-rcar-host.o
+obj-$(CONFIG_PCIE_RCAR_EP) += pcie-rcar.o pcie-rcar-ep.o
obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 1c173dad67d1..1c15c8352125 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -450,7 +450,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
epc->max_functions = 1;
ret = pci_epc_mem_init(epc, pcie->mem_res->start,
- resource_size(pcie->mem_res));
+ resource_size(pcie->mem_res), PAGE_SIZE);
if (ret < 0) {
dev_err(dev, "failed to initialize the memory space\n");
goto err_init;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 9b1c3966414b..8c2543f28ba0 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -140,9 +140,6 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
for_each_of_pci_range(&parser, &range) {
bool is_io;
- if (r >= rc->max_regions)
- break;
-
if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
is_io = false;
else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
@@ -219,17 +216,14 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
pcie = &rc->pcie;
pcie->is_rc = true;
- rc->max_regions = 32;
- of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions);
-
rc->no_bar_nbits = 32;
of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits);
rc->vendor_id = 0xffff;
- of_property_read_u16(np, "vendor-id", &rc->vendor_id);
+ of_property_read_u32(np, "vendor-id", &rc->vendor_id);
rc->device_id = 0xffff;
- of_property_read_u16(np, "device-id", &rc->device_id);
+ of_property_read_u32(np, "device-id", &rc->device_id);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
pcie->reg_base = devm_ioremap_resource(dev, res);
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index a2b28b912ca4..df14ad002fe9 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -251,7 +251,6 @@ struct cdns_pcie {
* @bus_range: first/last buses behind the PCIe host controller
* @cfg_base: IO mapped window to access the PCI configuration space of a
* single function at a time
- * @max_regions: maximum number of regions supported by the hardware
* @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
* translation (nbits sets into the "no BAR match" register)
* @vendor_id: PCI vendor ID
@@ -262,10 +261,9 @@ struct cdns_pcie_rc {
struct resource *cfg_res;
struct resource *bus_range;
void __iomem *cfg_base;
- u32 max_regions;
u32 no_bar_nbits;
- u16 vendor_id;
- u16 device_id;
+ u32 vendor_id;
+ u32 device_id;
};
/**
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 03dcaf65d159..044a3761c44f 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -26,7 +26,7 @@ config PCI_DRA7XX_HOST
depends on OF && HAS_IOMEM && TI_PIPE3
select PCIE_DW_HOST
select PCI_DRA7XX
- default y
+ default y if SOC_DRA7XX
help
Enables support for the PCIe controller in the DRA7xx SoC to work in
host mode. There are two instances of PCIe controller in DRA7xx.
@@ -111,7 +111,6 @@ config PCI_KEYSTONE_HOST
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCI_KEYSTONE
- default y
help
Enables support for the PCIe controller in the Keystone SoC to
work in host mode. The PCI controller on Keystone is based on
@@ -281,15 +280,25 @@ config PCIE_TEGRA194_EP
selected. This uses the DesignWare core.
config PCIE_UNIPHIER
- bool "Socionext UniPhier PCIe controllers"
+ bool "Socionext UniPhier PCIe host controllers"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on OF && HAS_IOMEM
depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
- Say Y here if you want PCIe controller support on UniPhier SoCs.
+ Say Y here if you want PCIe host controller support on UniPhier SoCs.
This driver supports LD20 and PXs3 SoCs.
+config PCIE_UNIPHIER_EP
+ bool "Socionext UniPhier PCIe endpoint controllers"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here if you want PCIe endpoint controller support on
+ UniPhier SoCs. This driver supports Pro5 SoC.
+
config PCIE_AL
bool "Amazon Annapurna Labs PCIe controller"
depends on OF && (ARM64 || COMPILE_TEST)
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 8a637cfcf6e9..a751553fa0db 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
+obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 3b0e58f2de58..6184ebc9392d 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -840,7 +840,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
struct phy **phy;
struct device_link **link;
void __iomem *base;
- struct resource *res;
struct dw_pcie *pci;
struct dra7xx_pcie *dra7xx;
struct device *dev = &pdev->dev;
@@ -877,10 +876,9 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
return irq;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
- base = devm_ioremap(dev, res->start, resource_size(res));
- if (!base)
- return -ENOMEM;
+ base = devm_platform_ioremap_resource_byname(pdev, "ti_conf");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
phy_count = of_property_count_strings(np, "phy-names");
if (phy_count < 0) {
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index acfbd34032a8..8f08ae53f53e 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -868,9 +868,9 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq <= 0) {
+ if (pp->msi_irq < 0) {
dev_err(dev, "failed to get MSI irq\n");
- return -ENODEV;
+ return pp->msi_irq;
}
}
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 3715dceca1bf..ca59ba9e0ecd 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -289,11 +289,11 @@ static void meson_pcie_init_dw(struct meson_pcie *mp)
meson_cfg_writel(mp, val, PCIE_CFG0);
val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val &= ~LINK_CAPABLE_MASK;
+ val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE);
meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val |= LINK_CAPABLE_X1 | FAST_LINK_MODE;
+ val |= LINK_CAPABLE_X1;
meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index 1eeda2f6371f..270868f3859a 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -80,7 +80,7 @@ static int al_pcie_init(struct pci_config_window *cfg)
return 0;
}
-struct pci_ecam_ops al_pcie_ops = {
+const struct pci_ecam_ops al_pcie_ops = {
.bus_shift = 20,
.init = al_pcie_init,
.pci_ops = {
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1cdcbd102ce8..5e5b8821bed8 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -412,11 +412,11 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
reg = ep->msi_cap + PCI_MSI_DATA_32;
msg_data = dw_pcie_readw_dbi(pci, reg);
}
- aligned_offset = msg_addr_lower & (epc->mem->page_size - 1);
+ aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
msg_addr = ((u64)msg_addr_upper) << 32 |
(msg_addr_lower & ~aligned_offset);
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
- epc->mem->page_size);
+ epc->mem->window.page_size);
if (ret)
return ret;
@@ -433,7 +433,6 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epf_msix_tbl *msix_tbl;
struct pci_epc *epc = ep->epc;
- struct pci_epf_bar *epf_bar;
u32 reg, msg_data, vec_ctrl;
unsigned int aligned_offset;
u32 tbl_offset;
@@ -446,10 +445,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- epf_bar = ep->epf_bar[bir];
- msix_tbl = epf_bar->addr;
- msix_tbl = (struct pci_epf_msix_tbl *)((char *)msix_tbl + tbl_offset);
-
+ msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
@@ -459,9 +455,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return -EPERM;
}
- aligned_offset = msg_addr & (epc->mem->page_size - 1);
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
- epc->mem->page_size);
+ epc->mem->window.page_size);
if (ret)
return ret;
@@ -477,7 +473,7 @@ void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
struct pci_epc *epc = ep->epc;
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
- epc->mem->page_size);
+ epc->mem->window.page_size);
pci_epc_mem_exit(epc);
}
@@ -610,15 +606,15 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ret < 0)
epc->max_functions = 1;
- ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
- ep->page_size);
+ ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
+ ep->page_size);
if (ret < 0) {
dev_err(dev, "Failed to initialize address space\n");
return ret;
}
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
- epc->mem->page_size);
+ epc->mem->window.page_size);
if (!ep->msi_mem) {
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
return -ENOMEM;
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 395feb8ca051..0a4a5aa6fe46 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -236,7 +236,7 @@ static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct pcie_port *pp = domain->host_data;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
@@ -264,6 +264,8 @@ int dw_pcie_allocate_domains(struct pcie_port *pp)
return -ENOMEM;
}
+ irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
+
pp->msi_domain = pci_msi_create_irq_domain(fwnode,
&dw_pcie_msi_domain_info,
pp->irq_domain);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 681548c88282..c92496e36fd5 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -244,13 +244,16 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
u64 pci_addr, u32 size)
{
u32 retries, val;
+ u64 limit_addr = cpu_addr + size - 1;
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
lower_32_bits(cpu_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
upper_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
+ lower_32_bits(limit_addr));
+ dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
lower_32_bits(pci_addr));
dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index d6e1f397e6b0..656e00f8fbeb 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -112,9 +112,10 @@
#define PCIE_ATU_UNR_REGION_CTRL2 0x04
#define PCIE_ATU_UNR_LOWER_BASE 0x08
#define PCIE_ATU_UNR_UPPER_BASE 0x0C
-#define PCIE_ATU_UNR_LIMIT 0x10
+#define PCIE_ATU_UNR_LOWER_LIMIT 0x10
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
+#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
/*
* The default address offset between dbi_base and atu_base. Root controller
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 6d9e1b2b8f7b..0ad4e07dd4c2 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -104,7 +104,7 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
return 0;
}
-struct pci_ecam_ops hisi_pcie_ops = {
+const struct pci_ecam_ops hisi_pcie_ops = {
.bus_shift = 20,
.init = hisi_pcie_init,
.pci_ops = {
@@ -332,15 +332,6 @@ static struct platform_driver hisi_pcie_driver = {
};
builtin_platform_driver(hisi_pcie_driver);
-static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct pci_ecam_ops *ops;
-
- ops = (struct pci_ecam_ops *)of_device_get_match_data(dev);
- return pci_host_common_probe(pdev, ops);
-}
-
static int hisi_pcie_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
@@ -362,7 +353,7 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
return 0;
}
-struct pci_ecam_ops hisi_pcie_platform_ops = {
+static const struct pci_ecam_ops hisi_pcie_platform_ops = {
.bus_shift = 20,
.init = hisi_pcie_platform_init,
.pci_ops = {
@@ -375,17 +366,17 @@ struct pci_ecam_ops hisi_pcie_platform_ops = {
static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
{
.compatible = "hisilicon,hip06-pcie-ecam",
- .data = (void *) &hisi_pcie_platform_ops,
+ .data = &hisi_pcie_platform_ops,
},
{
.compatible = "hisilicon,hip07-pcie-ecam",
- .data = (void *) &hisi_pcie_platform_ops,
+ .data = &hisi_pcie_platform_ops,
},
{},
};
static struct platform_driver hisi_pcie_almost_ecam_driver = {
- .probe = hisi_pcie_almost_ecam_probe,
+ .probe = pci_host_common_probe,
.driver = {
.name = "hisi-pcie-almost-ecam",
.of_match_table = hisi_pcie_almost_ecam_of_match,
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index fc2a12212dec..2d8dbb318087 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -453,7 +453,7 @@ static int intel_pcie_msi_init(struct pcie_port *pp)
return 0;
}
-u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
+static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
{
return cpu_addr + BUS_IATU_OFFSET;
}
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index ae30a2fd3716..92b77f7d8354 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1623,7 +1623,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
ret = pinctrl_pm_select_default_state(dev);
if (ret < 0) {
dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
- goto fail_pinctrl;
+ goto fail_pm_get_sync;
}
tegra_pcie_init_controller(pcie);
@@ -1650,9 +1650,8 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
fail_host_init:
tegra_pcie_deinit_controller(pcie);
-fail_pinctrl:
- pm_runtime_put_sync(dev);
fail_pm_get_sync:
+ pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
return ret;
}
@@ -2190,9 +2189,9 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
}
pp->irq = platform_get_irq_byname(pdev, "intr");
- if (!pp->irq) {
+ if (pp->irq < 0) {
dev_err(dev, "Failed to get \"intr\" interrupt\n");
- return -ENODEV;
+ return pp->irq;
}
pcie->bpmp = tegra_bpmp_get(dev);
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
new file mode 100644
index 000000000000..148355960061
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe endpoint controller driver for UniPhier SoCs
+ * Copyright 2018 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/* Link Glue registers */
+#define PCL_RSTCTRL0 0x0010
+#define PCL_RSTCTRL_AXI_REG BIT(3)
+#define PCL_RSTCTRL_AXI_SLAVE BIT(2)
+#define PCL_RSTCTRL_AXI_MASTER BIT(1)
+#define PCL_RSTCTRL_PIPE3 BIT(0)
+
+#define PCL_RSTCTRL1 0x0020
+#define PCL_RSTCTRL_PERST BIT(0)
+
+#define PCL_RSTCTRL2 0x0024
+#define PCL_RSTCTRL_PHY_RESET BIT(0)
+
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
+#define PCL_APP_CLK_CTRL 0x8004
+#define PCL_APP_CLK_REQ BIT(0)
+
+#define PCL_APP_READY_CTRL 0x8008
+#define PCL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCL_APP_MSI0 0x8040
+#define PCL_APP_VEN_MSI_TC_MASK GENMASK(10, 8)
+#define PCL_APP_VEN_MSI_VECTOR_MASK GENMASK(4, 0)
+
+#define PCL_APP_MSI1 0x8044
+#define PCL_APP_MSI_REQ BIT(0)
+
+#define PCL_APP_INTX 0x8074
+#define PCL_APP_INTX_SYS_INT BIT(0)
+
+/* assertion time of INTx in usec */
+#define PCL_INTX_WIDTH_USEC 30
+
+struct uniphier_pcie_ep_priv {
+ void __iomem *base;
+ struct dw_pcie pci;
+ struct clk *clk, *clk_gio;
+ struct reset_control *rst, *rst_gio;
+ struct phy *phy;
+ const struct pci_epc_features *features;
+};
+
+#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
+
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_ep_priv *priv,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_APP_READY_CTRL);
+ if (enable)
+ val |= PCL_APP_LTSSM_ENABLE;
+ else
+ val &= ~PCL_APP_LTSSM_ENABLE;
+ writel(val, priv->base + PCL_APP_READY_CTRL);
+}
+
+static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv,
+ bool assert)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_RSTCTRL2);
+ if (assert)
+ val |= PCL_RSTCTRL_PHY_RESET;
+ else
+ val &= ~PCL_RSTCTRL_PHY_RESET;
+ writel(val, priv->base + PCL_RSTCTRL2);
+}
+
+static void uniphier_pcie_init_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 val;
+
+ /* set EP mode */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN | PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
+ /* clock request */
+ val = readl(priv->base + PCL_APP_CLK_CTRL);
+ val &= ~PCL_APP_CLK_REQ;
+ writel(val, priv->base + PCL_APP_CLK_CTRL);
+
+ /* deassert PIPE3 and AXI reset */
+ val = readl(priv->base + PCL_RSTCTRL0);
+ val |= PCL_RSTCTRL_AXI_REG | PCL_RSTCTRL_AXI_SLAVE
+ | PCL_RSTCTRL_AXI_MASTER | PCL_RSTCTRL_PIPE3;
+ writel(val, priv->base + PCL_RSTCTRL0);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+
+ msleep(100);
+}
+
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, true);
+
+ return 0;
+}
+
+static void uniphier_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+}
+
+static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ /*
+ * This makes pulse signal to send INTx to the RC, so this should
+ * be cleared as soon as possible. This sequence is covered with
+ * mutex in pci_epc_raise_irq().
+ */
+ /* assert INTx */
+ val = readl(priv->base + PCL_APP_INTX);
+ val |= PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ udelay(PCL_INTX_WIDTH_USEC);
+
+ /* deassert INTx */
+ val &= ~PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep,
+ u8 func_no, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ val = FIELD_PREP(PCL_APP_VEN_MSI_TC_MASK, func_no)
+ | FIELD_PREP(PCL_APP_VEN_MSI_VECTOR_MASK, interrupt_num - 1);
+ writel(val, priv->base + PCL_APP_MSI0);
+
+ val = readl(priv->base + PCL_APP_MSI1);
+ val |= PCL_APP_MSI_REQ;
+ writel(val, priv->base + PCL_APP_MSI1);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return uniphier_pcie_ep_raise_legacy_irq(ep);
+ case PCI_EPC_IRQ_MSI:
+ return uniphier_pcie_ep_raise_msi_irq(ep, func_no,
+ interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type (%d)\n", type);
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features*
+uniphier_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ return priv->features;
+}
+
+static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
+ .ep_init = uniphier_pcie_ep_init,
+ .raise_irq = uniphier_pcie_ep_raise_irq,
+ .get_features = uniphier_pcie_get_features,
+};
+
+static int uniphier_add_pcie_ep(struct uniphier_pcie_ep_priv *priv,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &priv->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
+
+ ep->ops = &uniphier_pcie_ep_ops;
+
+ pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2");
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret)
+ dev_err(dev, "Failed to initialize endpoint (%d)\n", ret);
+
+ return ret;
+}
+
+static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk_gio);
+ if (ret)
+ goto out_clk_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_gio);
+ if (ret)
+ goto out_rst_assert;
+
+ uniphier_pcie_init_ep(priv);
+
+ uniphier_pcie_phy_reset(priv, true);
+
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto out_rst_gio_assert;
+
+ uniphier_pcie_phy_reset(priv, false);
+
+ return 0;
+
+out_rst_gio_assert:
+ reset_control_assert(priv->rst_gio);
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_gio);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = uniphier_pcie_start_link,
+ .stop_link = uniphier_pcie_stop_link,
+};
+
+static int uniphier_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_pcie_ep_priv *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->features = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->features))
+ return -EINVAL;
+
+ priv->pci.dev = dev;
+ priv->pci.ops = &dw_pcie_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(priv->pci.dbi_base))
+ return PTR_ERR(priv->pci.dbi_base);
+
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_gio))
+ return PTR_ERR(priv->clk_gio);
+
+ priv->rst_gio = devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_gio))
+ return PTR_ERR(priv->rst_gio);
+
+ priv->clk = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ priv->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(priv->phy)) {
+ ret = PTR_ERR(priv->phy);
+ dev_err(dev, "Failed to get phy (%d)\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = uniphier_pcie_ep_enable(priv);
+ if (ret)
+ return ret;
+
+ return uniphier_add_pcie_ep(priv, pdev);
+}
+
+static const struct pci_epc_features uniphier_pro5_data = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 1 << 16,
+ .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .reserved_bar = BIT(BAR_4),
+};
+
+static const struct of_device_id uniphier_pcie_ep_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro5-pcie-ep",
+ .data = &uniphier_pro5_data,
+ },
+ { /* sentinel */ },
+};
+
+static struct platform_driver uniphier_pcie_ep_driver = {
+ .probe = uniphier_pcie_ep_probe,
+ .driver = {
+ .name = "uniphier-pcie-ep",
+ .of_match_table = uniphier_pcie_ep_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(uniphier_pcie_ep_driver);
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index a94be264240f..5907baa9b1f2 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -522,9 +522,9 @@ static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie)
mobiveil_pcie_enable_msi(pcie);
rp->irq = platform_get_irq(pdev, 0);
- if (rp->irq <= 0) {
+ if (rp->irq < 0) {
dev_err(dev, "failed to map IRQ: %d\n", rp->irq);
- return -ENODEV;
+ return rp->irq;
}
/* initialize the IRQ domains */
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 2a20b649f40c..90ff291c24f0 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -9,15 +9,18 @@
*/
#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/msi.h>
#include <linux/of_address.h>
+#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include "../pci.h"
@@ -31,16 +34,6 @@
#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
#define PCIE_CORE_DEV_REV_REG 0x8
#define PCIE_CORE_PCIEXP_CAP 0xc0
-#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
-#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
-#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
-#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
-#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
-#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
-#define PCIE_CORE_LINK_TRAINING BIT(5)
-#define PCIE_CORE_LINK_WIDTH_SHIFT 20
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
@@ -101,6 +94,8 @@
#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
+#define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14)
+#define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1)
#define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
#define PCIE_MSG_PM_PME_MASK BIT(7)
@@ -201,7 +196,10 @@ struct advk_pcie {
struct mutex msi_used_lock;
u16 msi_msg;
int root_bus_nr;
+ int link_gen;
struct pci_bridge_emul bridge;
+ struct gpio_desc *reset_gpio;
+ struct phy *phy;
};
static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
@@ -214,6 +212,11 @@ static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
return readl(pcie->base + reg);
}
+static inline u16 advk_read16(struct advk_pcie *pcie, u64 reg)
+{
+ return advk_readl(pcie, (reg & ~0x3)) >> ((reg & 0x3) * 8);
+}
+
static int advk_pcie_link_up(struct advk_pcie *pcie)
{
u32 val, ltssm_state;
@@ -225,20 +228,16 @@ static int advk_pcie_link_up(struct advk_pcie *pcie)
static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
{
- struct device *dev = &pcie->pdev->dev;
int retries;
/* check if the link is up or not */
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (advk_pcie_link_up(pcie)) {
- dev_info(dev, "link up\n");
+ if (advk_pcie_link_up(pcie))
return 0;
- }
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
}
- dev_err(dev, "link never came up\n");
return -ETIMEDOUT;
}
@@ -253,10 +252,115 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
}
}
+static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen)
+{
+ int ret, neg_gen;
+ u32 reg;
+
+ /* Setup link speed */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~PCIE_GEN_SEL_MSK;
+ if (gen == 3)
+ reg |= SPEED_GEN_3;
+ else if (gen == 2)
+ reg |= SPEED_GEN_2;
+ else
+ reg |= SPEED_GEN_1;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /*
+ * Enable link training. This is not needed in every call to this
+ * function, just once suffices, but it does not break anything either.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg |= LINK_TRAINING_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /*
+ * Start link training immediately after enabling it.
+ * This solves problems for some buggy cards.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
+ reg |= PCI_EXP_LNKCTL_RL;
+ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
+
+ ret = advk_pcie_wait_for_link(pcie);
+ if (ret)
+ return ret;
+
+ reg = advk_read16(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKSTA);
+ neg_gen = reg & PCI_EXP_LNKSTA_CLS;
+
+ return neg_gen;
+}
+
+static void advk_pcie_train_link(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ int neg_gen = -1, gen;
+
+ /*
+ * Try link training at link gen specified by device tree property
+ * 'max-link-speed'. If this fails, iteratively train at lower gen.
+ */
+ for (gen = pcie->link_gen; gen > 0; --gen) {
+ neg_gen = advk_pcie_train_at_gen(pcie, gen);
+ if (neg_gen > 0)
+ break;
+ }
+
+ if (neg_gen < 0)
+ goto err;
+
+ /*
+ * After successful training if negotiated gen is lower than requested,
+ * train again on negotiated gen. This solves some stability issues for
+ * some buggy gen1 cards.
+ */
+ if (neg_gen < gen) {
+ gen = neg_gen;
+ neg_gen = advk_pcie_train_at_gen(pcie, gen);
+ }
+
+ if (neg_gen == gen) {
+ dev_info(dev, "link up at gen %i\n", gen);
+ return;
+ }
+
+err:
+ dev_err(dev, "link never came up\n");
+}
+
+static void advk_pcie_issue_perst(struct advk_pcie *pcie)
+{
+ u32 reg;
+
+ if (!pcie->reset_gpio)
+ return;
+
+ /* PERST does not work for some cards when link training is enabled */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~LINK_TRAINING_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* 10ms delay is needed for some cards */
+ dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+}
+
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
+ advk_pcie_issue_perst(pcie);
+
+ /* Enable TX */
+ reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
+ reg |= PCIE_CORE_REF_CLK_TX_ENABLE;
+ advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG);
+
/* Set to Direct mode */
reg = advk_readl(pcie, CTRL_CONFIG_REG);
reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
@@ -275,36 +379,26 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
- /* Set PCIe Device Control and Status 1 PF0 register */
- reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
- (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
- PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
- (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
- PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
- advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
+ /* Set PCIe Device Control register */
+ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
+ reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+ reg &= ~PCI_EXP_DEVCTL_READRQ;
+ reg |= PCI_EXP_DEVCTL_PAYLOAD; /* Set max payload size */
+ reg |= PCI_EXP_DEVCTL_READRQ_512B;
+ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
/* Program PCIe Control 2 to disable strict ordering */
reg = PCIE_CORE_CTRL2_RESERVED |
PCIE_CORE_CTRL2_TD_ENABLE;
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
- /* Set GEN2 */
- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
- reg &= ~PCIE_GEN_SEL_MSK;
- reg |= SPEED_GEN_2;
- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
-
/* Set lane X1 */
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
reg &= ~LANE_CNT_MSK;
reg |= LANE_COUNT_1;
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
- /* Enable link training */
- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
- reg |= LINK_TRAINING_EN;
- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
-
/* Enable MSI */
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
@@ -340,23 +434,22 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
/*
* PERST# signal could have been asserted by pinctrl subsystem before
- * probe() callback has been called, making the endpoint going into
+ * probe() callback has been called or issued explicitly by reset gpio
+ * function advk_pcie_issue_perst(), making the endpoint going into
* fundamental reset. As required by PCI Express spec a delay for at
* least 100ms after such a reset before link training is needed.
*/
msleep(PCI_PM_D3COLD_WAIT);
- /* Start link training */
- reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
- reg |= PCIE_CORE_LINK_TRAINING;
- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
-
- advk_pcie_wait_for_link(pcie);
-
- reg = PCIE_CORE_LINK_L0S_ENTRY |
- (1 << PCIE_CORE_LINK_WIDTH_SHIFT);
- advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
+ advk_pcie_train_link(pcie);
+ /*
+ * FIXME: The following register update is suspicious. This register is
+ * applicable only when the PCI controller is configured for Endpoint
+ * mode, not as a Root Complex. But apparently when this code is
+ * removed, some cards stop working. This should be investigated and
+ * a comment explaining this should be put here.
+ */
reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
PCIE_CORE_CMD_IO_ACCESS_EN |
@@ -952,6 +1045,62 @@ static irqreturn_t advk_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static void __maybe_unused advk_pcie_disable_phy(struct advk_pcie *pcie)
+{
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+}
+
+static int advk_pcie_enable_phy(struct advk_pcie *pcie)
+{
+ int ret;
+
+ if (!pcie->phy)
+ return 0;
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
+ if (ret) {
+ phy_exit(pcie->phy);
+ return ret;
+ }
+
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ phy_exit(pcie->phy);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int advk_pcie_setup_phy(struct advk_pcie *pcie)
+{
+ struct device *dev = &pcie->pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret = 0;
+
+ pcie->phy = devm_of_phy_get(dev, node, NULL);
+ if (IS_ERR(pcie->phy) && (PTR_ERR(pcie->phy) == -EPROBE_DEFER))
+ return PTR_ERR(pcie->phy);
+
+ /* Old bindings miss the PHY handle */
+ if (IS_ERR(pcie->phy)) {
+ dev_warn(dev, "PHY unavailable (%ld)\n", PTR_ERR(pcie->phy));
+ pcie->phy = NULL;
+ return 0;
+ }
+
+ ret = advk_pcie_enable_phy(pcie);
+ if (ret)
+ dev_err(dev, "Failed to initialize PHY (%d)\n", ret);
+
+ return ret;
+}
+
static int advk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -973,6 +1122,9 @@ static int advk_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->base);
irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
ret = devm_request_irq(dev, irq, advk_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie",
pcie);
@@ -989,6 +1141,32 @@ static int advk_pcie_probe(struct platform_device *pdev)
}
pcie->root_bus_nr = bus->start;
+ pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
+ "reset-gpios", 0,
+ GPIOD_OUT_LOW,
+ "pcie1-reset");
+ ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
+ if (ret) {
+ if (ret == -ENOENT) {
+ pcie->reset_gpio = NULL;
+ } else {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset-gpio: %i\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = of_pci_get_max_link_speed(dev->of_node);
+ if (ret <= 0 || ret > 3)
+ pcie->link_gen = 3;
+ else
+ pcie->link_gen = ret;
+
+ ret = advk_pcie_setup_phy(pcie);
+ if (ret)
+ return ret;
+
advk_pcie_setup_hw(pcie);
advk_sw_pci_bridge_init(pcie);
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index 250a3fc80ec6..953de57f6c57 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -8,7 +8,9 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
@@ -19,7 +21,7 @@ static void gen_pci_unmap_cfg(void *ptr)
}
static struct pci_config_window *gen_pci_init(struct device *dev,
- struct list_head *resources, struct pci_ecam_ops *ops)
+ struct list_head *resources, const struct pci_ecam_ops *ops)
{
int err;
struct resource cfgres;
@@ -54,15 +56,19 @@ err_out:
return ERR_PTR(err);
}
-int pci_host_common_probe(struct platform_device *pdev,
- struct pci_ecam_ops *ops)
+int pci_host_common_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
struct list_head resources;
+ const struct pci_ecam_ops *ops;
int ret;
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
+ return -ENODEV;
+
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
return -ENOMEM;
@@ -82,7 +88,7 @@ int pci_host_common_probe(struct platform_device *pdev,
bridge->dev.parent = dev;
bridge->sysdata = cfg;
bridge->busnr = cfg->busr.start;
- bridge->ops = &ops->pci_ops;
+ bridge->ops = (struct pci_ops *)&ops->pci_ops;
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
@@ -95,6 +101,7 @@ int pci_host_common_probe(struct platform_device *pdev,
platform_set_drvdata(pdev, bridge->bus);
return 0;
}
+EXPORT_SYMBOL_GPL(pci_host_common_probe);
int pci_host_common_remove(struct platform_device *pdev)
{
@@ -107,3 +114,6 @@ int pci_host_common_remove(struct platform_device *pdev)
return 0;
}
+EXPORT_SYMBOL_GPL(pci_host_common_remove);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index 75a2fb930d4b..b51977abfdf1 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -10,12 +10,11 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_address.h>
-#include <linux/of_pci.h>
+#include <linux/module.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
-static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
+static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -49,7 +48,7 @@ static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus,
return pci_ecam_map_bus(bus, devfn, where);
}
-static struct pci_ecam_ops pci_dw_ecam_bus_ops = {
+static const struct pci_ecam_ops pci_dw_ecam_bus_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_dw_ecam_map_bus,
@@ -76,25 +75,16 @@ static const struct of_device_id gen_pci_of_match[] = {
{ },
};
-
-static int gen_pci_probe(struct platform_device *pdev)
-{
- const struct of_device_id *of_id;
- struct pci_ecam_ops *ops;
-
- of_id = of_match_node(gen_pci_of_match, pdev->dev.of_node);
- ops = (struct pci_ecam_ops *)of_id->data;
-
- return pci_host_common_probe(pdev, ops);
-}
+MODULE_DEVICE_TABLE(of, gen_pci_of_match);
static struct platform_driver gen_pci_driver = {
.driver = {
.name = "pci-host-generic",
.of_match_table = gen_pci_of_match,
- .suppress_bind_attrs = true,
},
- .probe = gen_pci_probe,
+ .probe = pci_host_common_probe,
.remove = pci_host_common_remove,
};
-builtin_platform_driver(gen_pci_driver);
+module_platform_driver(gen_pci_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 222ff5639ebe..bf40ff09c99d 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -480,6 +480,9 @@ struct hv_pcibus_device {
struct workqueue_struct *wq;
+ /* Highest slot of child device with resources allocated */
+ int wslot_res_allocated;
+
/* hypercall arg, must not cross page boundary */
struct hv_retarget_device_interrupt retarget_msi_interrupt_params;
@@ -2210,10 +2213,8 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
struct hv_dr_state *dr;
int i;
- dr = kzalloc(offsetof(struct hv_dr_state, func) +
- (sizeof(struct hv_pcidev_description) *
- (relations->device_count)), GFP_NOWAIT);
-
+ dr = kzalloc(struct_size(dr, func, relations->device_count),
+ GFP_NOWAIT);
if (!dr)
return;
@@ -2247,10 +2248,8 @@ static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
struct hv_dr_state *dr;
int i;
- dr = kzalloc(offsetof(struct hv_dr_state, func) +
- (sizeof(struct hv_pcidev_description) *
- (relations->device_count)), GFP_NOWAIT);
-
+ dr = kzalloc(struct_size(dr, func, relations->device_count),
+ GFP_NOWAIT);
if (!dr)
return;
@@ -2444,9 +2443,8 @@ static void hv_pci_onchannelcallback(void *context)
bus_rel = (struct pci_bus_relations *)buffer;
if (bytes_recvd <
- offsetof(struct pci_bus_relations, func) +
- (sizeof(struct pci_function_description) *
- (bus_rel->device_count))) {
+ struct_size(bus_rel, func,
+ bus_rel->device_count)) {
dev_err(&hbus->hdev->device,
"bus relations too small\n");
break;
@@ -2459,9 +2457,8 @@ static void hv_pci_onchannelcallback(void *context)
bus_rel2 = (struct pci_bus_relations2 *)buffer;
if (bytes_recvd <
- offsetof(struct pci_bus_relations2, func) +
- (sizeof(struct pci_function_description2) *
- (bus_rel2->device_count))) {
+ struct_size(bus_rel2, func,
+ bus_rel2->device_count)) {
dev_err(&hbus->hdev->device,
"bus relations v2 too small\n");
break;
@@ -2748,6 +2745,8 @@ static void hv_free_config_window(struct hv_pcibus_device *hbus)
vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
}
+static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
+
/**
* hv_pci_enter_d0() - Bring the "bus" into the D0 power state
* @hdev: VMBus's tracking struct for this root PCI bus
@@ -2760,8 +2759,10 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
struct pci_bus_d0_entry *d0_entry;
struct hv_pci_compl comp_pkt;
struct pci_packet *pkt;
+ bool retry = true;
int ret;
+enter_d0_retry:
/*
* Tell the host that the bus is ready to use, and moved into the
* powered-on state. This includes telling the host which region
@@ -2788,6 +2789,38 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
if (ret)
goto exit;
+ /*
+ * In certain case (Kdump) the pci device of interest was
+ * not cleanly shut down and resource is still held on host
+ * side, the host could return invalid device status.
+ * We need to explicitly request host to release the resource
+ * and try to enter D0 again.
+ */
+ if (comp_pkt.completion_status < 0 && retry) {
+ retry = false;
+
+ dev_err(&hdev->device, "Retrying D0 Entry\n");
+
+ /*
+ * Hv_pci_bus_exit() calls hv_send_resource_released()
+ * to free up resources of its child devices.
+ * In the kdump kernel we need to set the
+ * wslot_res_allocated to 255 so it scans all child
+ * devices to release resources allocated in the
+ * normal kernel before panic happened.
+ */
+ hbus->wslot_res_allocated = 255;
+
+ ret = hv_pci_bus_exit(hdev, true);
+
+ if (ret == 0) {
+ kfree(pkt);
+ goto enter_d0_retry;
+ }
+ dev_err(&hdev->device,
+ "Retrying D0 failed with ret %d\n", ret);
+ }
+
if (comp_pkt.completion_status < 0) {
dev_err(&hdev->device,
"PCI Pass-through VSP failed D0 Entry with status %x\n",
@@ -2859,7 +2892,7 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
struct hv_pci_dev *hpdev;
struct pci_packet *pkt;
size_t size_res;
- u32 wslot;
+ int wslot;
int ret;
size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
@@ -2912,6 +2945,8 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
comp_pkt.completion_status);
break;
}
+
+ hbus->wslot_res_allocated = wslot;
}
kfree(pkt);
@@ -2930,10 +2965,10 @@ static int hv_send_resources_released(struct hv_device *hdev)
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct pci_child_message pkt;
struct hv_pci_dev *hpdev;
- u32 wslot;
+ int wslot;
int ret;
- for (wslot = 0; wslot < 256; wslot++) {
+ for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
hpdev = get_pcichild_wslot(hbus, wslot);
if (!hpdev)
continue;
@@ -2948,8 +2983,12 @@ static int hv_send_resources_released(struct hv_device *hdev)
VM_PKT_DATA_INBAND, 0);
if (ret)
return ret;
+
+ hbus->wslot_res_allocated = wslot - 1;
}
+ hbus->wslot_res_allocated = -1;
+
return 0;
}
@@ -3049,6 +3088,7 @@ static int hv_pci_probe(struct hv_device *hdev,
if (!hbus)
return -ENOMEM;
hbus->state = hv_pcibus_init;
+ hbus->wslot_res_allocated = -1;
/*
* The PCI bus "domain" is what is called "segment" in ACPI and other
@@ -3148,7 +3188,7 @@ static int hv_pci_probe(struct hv_device *hdev,
ret = hv_pci_allocate_bridge_windows(hbus);
if (ret)
- goto free_irq_domain;
+ goto exit_d0;
ret = hv_send_resources_allocated(hdev);
if (ret)
@@ -3166,6 +3206,8 @@ static int hv_pci_probe(struct hv_device *hdev,
free_windows:
hv_pci_free_bridge_windows(hbus);
+exit_d0:
+ (void) hv_pci_bus_exit(hdev, true);
free_irq_domain:
irq_domain_remove(hbus->irq_domain);
free_fwnode:
@@ -3185,7 +3227,7 @@ free_bus:
return ret;
}
-static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
+static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
{
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct {
@@ -3203,7 +3245,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
if (hdev->channel->rescind)
return 0;
- if (!hibernating) {
+ if (!keep_devs) {
/* Delete any children which might still exist. */
dr = kzalloc(sizeof(*dr), GFP_KERNEL);
if (dr && hv_pci_start_relations_work(hbus, dr))
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 3e64ba6a36a8..235b456698fc 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -2219,8 +2219,8 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
rp->reset_gpio = NULL;
} else {
- dev_err(dev, "failed to get reset GPIO: %d\n",
- err);
+ dev_err(dev, "failed to get reset GPIO: %ld\n",
+ PTR_ERR(rp->reset_gpio));
return PTR_ERR(rp->reset_gpio);
}
}
@@ -2712,7 +2712,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
err = pm_runtime_get_sync(pcie->dev);
if (err < 0) {
dev_err(dev, "fail to enable pcie controller: %d\n", err);
- goto teardown_msi;
+ goto pm_runtime_put;
}
host->busnr = bus->start;
@@ -2746,7 +2746,6 @@ static int tegra_pcie_probe(struct platform_device *pdev)
pm_runtime_put:
pm_runtime_put_sync(pcie->dev);
pm_runtime_disable(pcie->dev);
-teardown_msi:
tegra_pcie_msi_teardown(pcie);
put_resources:
tegra_pcie_put_resources(pcie);
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index 32d1d7b81ef4..7e8835fee5f7 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -345,7 +345,7 @@ static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
return pci_generic_config_write(bus, devfn, where, size, val);
}
-struct pci_ecam_ops pci_thunder_ecam_ops = {
+const struct pci_ecam_ops pci_thunder_ecam_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -357,22 +357,20 @@ struct pci_ecam_ops pci_thunder_ecam_ops = {
#ifdef CONFIG_PCI_HOST_THUNDER_ECAM
static const struct of_device_id thunder_ecam_of_match[] = {
- { .compatible = "cavium,pci-host-thunder-ecam" },
+ {
+ .compatible = "cavium,pci-host-thunder-ecam",
+ .data = &pci_thunder_ecam_ops,
+ },
{ },
};
-static int thunder_ecam_probe(struct platform_device *pdev)
-{
- return pci_host_common_probe(pdev, &pci_thunder_ecam_ops);
-}
-
static struct platform_driver thunder_ecam_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = thunder_ecam_of_match,
.suppress_bind_attrs = true,
},
- .probe = thunder_ecam_probe,
+ .probe = pci_host_common_probe,
};
builtin_platform_driver(thunder_ecam_driver);
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index 9491e266b1ea..3f847969143e 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -403,7 +403,7 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
return thunder_pem_init(dev, cfg, res_pem);
}
-struct pci_ecam_ops thunder_pem_ecam_ops = {
+const struct pci_ecam_ops thunder_pem_ecam_ops = {
.bus_shift = 24,
.init = thunder_pem_acpi_init,
.pci_ops = {
@@ -440,7 +440,7 @@ static int thunder_pem_platform_init(struct pci_config_window *cfg)
return thunder_pem_init(dev, cfg, res_pem);
}
-static struct pci_ecam_ops pci_thunder_pem_ops = {
+static const struct pci_ecam_ops pci_thunder_pem_ops = {
.bus_shift = 24,
.init = thunder_pem_platform_init,
.pci_ops = {
@@ -451,22 +451,20 @@ static struct pci_ecam_ops pci_thunder_pem_ops = {
};
static const struct of_device_id thunder_pem_of_match[] = {
- { .compatible = "cavium,pci-host-thunder-pem" },
+ {
+ .compatible = "cavium,pci-host-thunder-pem",
+ .data = &pci_thunder_pem_ops,
+ },
{ },
};
-static int thunder_pem_probe(struct platform_device *pdev)
-{
- return pci_host_common_probe(pdev, &pci_thunder_pem_ops);
-}
-
static struct platform_driver thunder_pem_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = thunder_pem_of_match,
.suppress_bind_attrs = true,
},
- .probe = thunder_pem_probe,
+ .probe = pci_host_common_probe,
};
builtin_platform_driver(thunder_pem_driver);
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index bd05221f5a22..3681e5af3878 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -720,7 +720,7 @@ static int v3_pci_probe(struct platform_device *pdev)
int irq;
int ret;
- host = pci_alloc_host_bridge(sizeof(*v3));
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*v3));
if (!host)
return -ENOMEM;
@@ -777,9 +777,9 @@ static int v3_pci_probe(struct platform_device *pdev)
/* Get and request error IRQ resource */
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
+ if (irq < 0) {
dev_err(dev, "unable to obtain PCIv3 error IRQ\n");
- return -ENODEV;
+ return irq;
}
ret = devm_request_irq(dev, irq, v3_irq, 0,
"PCIv3 error", v3);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index de195fd430dc..d1efa8ffbae1 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -256,7 +256,7 @@ static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1);
}
-struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
+const struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
.bus_shift = 16,
.init = xgene_v1_pcie_ecam_init,
.pci_ops = {
@@ -271,7 +271,7 @@ static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2);
}
-struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
+const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
.bus_shift = 16,
.init = xgene_v2_pcie_ecam_init,
.pci_ops = {
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index b447c3e4abad..24cb1c331058 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -193,7 +193,7 @@ static bool altera_pcie_valid_device(struct altera_pcie *pcie,
if (bus->number == pcie->root_bus_nr && dev > 0)
return false;
- return true;
+ return true;
}
static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 6d79d14527a6..7730ea845ff2 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -28,6 +28,8 @@
#include <linux/string.h>
#include <linux/types.h>
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
#include "../pci.h"
/* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
@@ -41,6 +43,9 @@
#define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c
#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
+#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc
+#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00
+
#define PCIE_RC_DL_MDIO_ADDR 0x1100
#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
@@ -54,11 +59,11 @@
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c
#define PCIE_MEM_WIN0_LO(win) \
- PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 4)
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8)
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI 0x4010
#define PCIE_MEM_WIN0_HI(win) \
- PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 4)
+ PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
#define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
@@ -693,10 +698,11 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
int num_out_wins = 0;
u16 nlw, cls, lnksta;
int i, ret;
- u32 tmp;
+ u32 tmp, aspm_support;
/* Reset the bridge */
brcm_pcie_bridge_sw_init_set(pcie, 1);
+ brcm_pcie_perst_set(pcie, 1);
usleep_range(100, 200);
@@ -803,6 +809,15 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
num_out_wins++;
}
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+ aspm_support = PCIE_LINK_STATE_L1;
+ if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ u32p_replace_bits(&tmp, aspm_support,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+
/*
* For config space accesses on the RC, show the right class for
* a PCIe-PCIe bridge (the default setting is to be EP mode).
@@ -899,7 +914,6 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
brcm_msi_remove(pcie);
brcm_pcie_turn_off(pcie);
clk_disable_unprepare(pcie->clk);
- clk_put(pcie->clk);
}
static int brcm_pcie_remove(struct platform_device *pdev)
@@ -917,11 +931,26 @@ static int brcm_pcie_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *msi_np;
struct pci_host_bridge *bridge;
+ struct device_node *fw_np;
struct brcm_pcie *pcie;
struct pci_bus *child;
struct resource *res;
int ret;
+ /*
+ * We have to wait for Raspberry Pi's firmware interface to be up as a
+ * PCI fixup, rpi_firmware_init_vl805(), depends on it. This driver's
+ * probe can race with the firmware interface's (see
+ * drivers/firmware/raspberrypi.c) and potentially break the PCI fixup.
+ */
+ fw_np = of_find_compatible_node(NULL, NULL,
+ "raspberrypi,bcm2835-firmware");
+ if (fw_np && !rpi_firmware_get(fw_np)) {
+ of_node_put(fw_np);
+ return -EPROBE_DEFER;
+ }
+ of_node_put(fw_np);
+
bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
if (!bridge)
return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index cb982891b22b..ebfa7d5a4e2d 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -651,6 +651,9 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
}
port->irq = platform_get_irq(pdev, port->slot);
+ if (port->irq < 0)
+ return port->irq;
+
irq_set_chained_handler_and_data(port->irq,
mtk_pcie_intr_handler, port);
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
new file mode 100644
index 000000000000..b4a288e24aaf
--- /dev/null
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe endpoint driver for Renesas R-Car SoCs
+ * Copyright (c) 2020 Renesas Electronics Europe GmbH
+ *
+ * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/pci-epc.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#include "pcie-rcar.h"
+
+#define RCAR_EPC_MAX_FUNCTIONS 1
+
+/* Structure representing the PCIe interface */
+struct rcar_pcie_endpoint {
+ struct rcar_pcie pcie;
+ phys_addr_t *ob_mapped_addr;
+ struct pci_epc_mem_window *ob_window;
+ u8 max_functions;
+ unsigned int bar_to_atu[MAX_NR_INBOUND_MAPS];
+ unsigned long *ib_window_map;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
+};
+
+static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie)
+{
+ u32 val;
+
+ rcar_pci_write_reg(pcie, 0, PCIETCTLR);
+
+ /* Set endpoint mode */
+ rcar_pci_write_reg(pcie, 0, PCIEMSR);
+
+ /* Initialize default capabilities. */
+ rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
+ PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4);
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ PCI_HEADER_TYPE_NORMAL);
+
+ /* Write out the physical slot number = 0 */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
+
+ val = rcar_pci_read_reg(pcie, EXPCAP(1));
+ /* device supports fixed 128 bytes MPSS */
+ val &= ~GENMASK(2, 0);
+ rcar_pci_write_reg(pcie, val, EXPCAP(1));
+
+ val = rcar_pci_read_reg(pcie, EXPCAP(2));
+ /* read requests size 128 bytes */
+ val &= ~GENMASK(14, 12);
+ /* payload size 128 bytes */
+ val &= ~GENMASK(7, 5);
+ rcar_pci_write_reg(pcie, val, EXPCAP(2));
+
+ /* Set target link speed to 5.0 GT/s */
+ rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
+ PCI_EXP_LNKSTA_CLS_5_0GB);
+
+ /* Set the completion timer timeout to the maximum 50ms. */
+ rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
+
+ /* Terminate list of capabilities (Next Capability Offset=0) */
+ rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
+
+ /* flush modifications */
+ wmb();
+}
+
+static int rcar_pcie_ep_get_window(struct rcar_pcie_endpoint *ep,
+ phys_addr_t addr)
+{
+ int i;
+
+ for (i = 0; i < ep->num_ob_windows; i++)
+ if (ep->ob_window[i].phys_base == addr)
+ return i;
+
+ return -EINVAL;
+}
+
+static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
+ struct platform_device *pdev)
+{
+ struct rcar_pcie *pcie = &ep->pcie;
+ char outbound_name[10];
+ struct resource *res;
+ unsigned int i = 0;
+
+ ep->num_ob_windows = 0;
+ for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
+ sprintf(outbound_name, "memory%u", i);
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ outbound_name);
+ if (!res) {
+ dev_err(pcie->dev, "missing outbound window %u\n", i);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res),
+ outbound_name)) {
+ dev_err(pcie->dev, "Cannot request memory region %s.\n",
+ outbound_name);
+ return -EIO;
+ }
+
+ ep->ob_window[i].phys_base = res->start;
+ ep->ob_window[i].size = resource_size(res);
+ /* controller doesn't support multiple allocation
+ * from same window, so set page_size to window size
+ */
+ ep->ob_window[i].page_size = resource_size(res);
+ }
+ ep->num_ob_windows = i;
+
+ return 0;
+}
+
+static int rcar_pcie_ep_get_pdata(struct rcar_pcie_endpoint *ep,
+ struct platform_device *pdev)
+{
+ struct rcar_pcie *pcie = &ep->pcie;
+ struct pci_epc_mem_window *window;
+ struct device *dev = pcie->dev;
+ struct resource res;
+ int err;
+
+ err = of_address_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
+ pcie->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ ep->ob_window = devm_kcalloc(dev, RCAR_PCI_MAX_RESOURCES,
+ sizeof(*window), GFP_KERNEL);
+ if (!ep->ob_window)
+ return -ENOMEM;
+
+ rcar_pcie_parse_outbound_ranges(ep, pdev);
+
+ err = of_property_read_u8(dev->of_node, "max-functions",
+ &ep->max_functions);
+ if (err < 0 || ep->max_functions > RCAR_EPC_MAX_FUNCTIONS)
+ ep->max_functions = RCAR_EPC_MAX_FUNCTIONS;
+
+ return 0;
+}
+
+static int rcar_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+ struct pci_epf_header *hdr)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 val;
+
+ if (!fn)
+ val = hdr->vendorid;
+ else
+ val = rcar_pci_read_reg(pcie, IDSETR0);
+ val |= hdr->deviceid << 16;
+ rcar_pci_write_reg(pcie, val, IDSETR0);
+
+ val = hdr->revid;
+ val |= hdr->progif_code << 8;
+ val |= hdr->subclass_code << 16;
+ val |= hdr->baseclass_code << 24;
+ rcar_pci_write_reg(pcie, val, IDSETR1);
+
+ if (!fn)
+ val = hdr->subsys_vendor_id;
+ else
+ val = rcar_pci_read_reg(pcie, SUBIDSETR);
+ val |= hdr->subsys_id << 16;
+ rcar_pci_write_reg(pcie, val, SUBIDSETR);
+
+ if (hdr->interrupt_pin > PCI_INTERRUPT_INTA)
+ return -EINVAL;
+ val = rcar_pci_read_reg(pcie, PCICONF(15));
+ val |= (hdr->interrupt_pin << 8);
+ rcar_pci_write_reg(pcie, val, PCICONF(15));
+
+ return 0;
+}
+
+static int rcar_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ int flags = epf_bar->flags | LAR_ENABLE | LAM_64BIT;
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ u64 size = 1ULL << fls64(epf_bar->size - 1);
+ dma_addr_t cpu_addr = epf_bar->phys_addr;
+ enum pci_barno bar = epf_bar->barno;
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 mask;
+ int idx;
+ int err;
+
+ idx = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
+ if (idx >= ep->num_ib_windows) {
+ dev_err(pcie->dev, "no free inbound window\n");
+ return -EINVAL;
+ }
+
+ if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO)
+ flags |= IO_SPACE;
+
+ ep->bar_to_atu[bar] = idx;
+ /* use 64-bit BARs */
+ set_bit(idx, ep->ib_window_map);
+ set_bit(idx + 1, ep->ib_window_map);
+
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
+
+ size = min(size, alignment);
+ }
+
+ size = min(size, 1ULL << 32);
+
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
+
+ rcar_pcie_set_inbound(pcie, cpu_addr,
+ 0x0, mask | flags, idx, false);
+
+ err = rcar_pcie_wait_for_phyrdy(pcie);
+ if (err) {
+ dev_err(pcie->dev, "phy not ready\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+ struct pci_epf_bar *epf_bar)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ enum pci_barno bar = epf_bar->barno;
+ u32 atu_index = ep->bar_to_atu[bar];
+
+ rcar_pcie_set_inbound(&ep->pcie, 0x0, 0x0, 0x0, bar, false);
+
+ clear_bit(atu_index, ep->ib_window_map);
+ clear_bit(atu_index + 1, ep->ib_window_map);
+}
+
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 interrupts)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 flags;
+
+ flags = rcar_pci_read_reg(pcie, MSICAP(fn));
+ flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
+ rcar_pci_write_reg(pcie, flags, MSICAP(fn));
+
+ return 0;
+}
+
+static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 flags;
+
+ flags = rcar_pci_read_reg(pcie, MSICAP(fn));
+ if (!(flags & MSICAP0_MSIE))
+ return -EINVAL;
+
+ return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
+}
+
+static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr, u64 pci_addr, size_t size)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct rcar_pcie *pcie = &ep->pcie;
+ struct resource_entry win;
+ struct resource res;
+ int window;
+ int err;
+
+ /* check if we have a link. */
+ err = rcar_pcie_wait_for_dl(pcie);
+ if (err) {
+ dev_err(pcie->dev, "link not up\n");
+ return err;
+ }
+
+ window = rcar_pcie_ep_get_window(ep, addr);
+ if (window < 0) {
+ dev_err(pcie->dev, "failed to get corresponding window\n");
+ return -EINVAL;
+ }
+
+ memset(&win, 0x0, sizeof(win));
+ memset(&res, 0x0, sizeof(res));
+ res.start = pci_addr;
+ res.end = pci_addr + size - 1;
+ res.flags = IORESOURCE_MEM;
+ win.res = &res;
+
+ rcar_pcie_set_outbound(pcie, window, &win);
+
+ ep->ob_mapped_addr[window] = addr;
+
+ return 0;
+}
+
+static void rcar_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+ phys_addr_t addr)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+ struct resource_entry win;
+ struct resource res;
+ int idx;
+
+ for (idx = 0; idx < ep->num_ob_windows; idx++)
+ if (ep->ob_mapped_addr[idx] == addr)
+ break;
+
+ if (idx >= ep->num_ob_windows)
+ return;
+
+ memset(&win, 0x0, sizeof(win));
+ memset(&res, 0x0, sizeof(res));
+ win.res = &res;
+ rcar_pcie_set_outbound(&ep->pcie, idx, &win);
+
+ ep->ob_mapped_addr[idx] = 0;
+}
+
+static int rcar_pcie_ep_assert_intx(struct rcar_pcie_endpoint *ep,
+ u8 fn, u8 intx)
+{
+ struct rcar_pcie *pcie = &ep->pcie;
+ u32 val;
+
+ val = rcar_pci_read_reg(pcie, PCIEMSITXR);
+ if ((val & PCI_MSI_FLAGS_ENABLE)) {
+ dev_err(pcie->dev, "MSI is enabled, cannot assert INTx\n");
+ return -EINVAL;
+ }
+
+ val = rcar_pci_read_reg(pcie, PCICONF(1));
+ if ((val & INTDIS)) {
+ dev_err(pcie->dev, "INTx message transmission is disabled\n");
+ return -EINVAL;
+ }
+
+ val = rcar_pci_read_reg(pcie, PCIEINTXR);
+ if ((val & ASTINTX)) {
+ dev_err(pcie->dev, "INTx is already asserted\n");
+ return -EINVAL;
+ }
+
+ val |= ASTINTX;
+ rcar_pci_write_reg(pcie, val, PCIEINTXR);
+ usleep_range(1000, 1001);
+ val = rcar_pci_read_reg(pcie, PCIEINTXR);
+ val &= ~ASTINTX;
+ rcar_pci_write_reg(pcie, val, PCIEINTXR);
+
+ return 0;
+}
+
+static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
+ u8 fn, u8 interrupt_num)
+{
+ u16 msi_count;
+ u32 val;
+
+ /* Check MSI enable bit */
+ val = rcar_pci_read_reg(pcie, MSICAP(fn));
+ if (!(val & MSICAP0_MSIE))
+ return -EINVAL;
+
+ /* Get MSI numbers from MME */
+ msi_count = ((val & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
+ msi_count = 1 << msi_count;
+
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ val = rcar_pci_read_reg(pcie, PCIEMSITXR);
+ rcar_pci_write_reg(pcie, val | (interrupt_num - 1), PCIEMSITXR);
+
+ return 0;
+}
+
+static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return rcar_pcie_ep_assert_intx(ep, fn, 0);
+
+ case PCI_EPC_IRQ_MSI:
+ return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rcar_pcie_ep_start(struct pci_epc *epc)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+
+ rcar_pci_write_reg(&ep->pcie, MACCTLR_INIT_VAL, MACCTLR);
+ rcar_pci_write_reg(&ep->pcie, CFINIT, PCIETCTLR);
+
+ return 0;
+}
+
+static void rcar_pcie_ep_stop(struct pci_epc *epc)
+{
+ struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
+
+ rcar_pci_write_reg(&ep->pcie, 0, PCIETCTLR);
+}
+
+static const struct pci_epc_features rcar_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ /* use 64-bit BARs so mark BAR[1,3,5] as reserved */
+ .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
+ .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
+ .bar_fixed_size[0] = 128,
+ .bar_fixed_size[2] = 256,
+ .bar_fixed_size[4] = 256,
+};
+
+static const struct pci_epc_features*
+rcar_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+{
+ return &rcar_pcie_epc_features;
+}
+
+static const struct pci_epc_ops rcar_pcie_epc_ops = {
+ .write_header = rcar_pcie_ep_write_header,
+ .set_bar = rcar_pcie_ep_set_bar,
+ .clear_bar = rcar_pcie_ep_clear_bar,
+ .set_msi = rcar_pcie_ep_set_msi,
+ .get_msi = rcar_pcie_ep_get_msi,
+ .map_addr = rcar_pcie_ep_map_addr,
+ .unmap_addr = rcar_pcie_ep_unmap_addr,
+ .raise_irq = rcar_pcie_ep_raise_irq,
+ .start = rcar_pcie_ep_start,
+ .stop = rcar_pcie_ep_stop,
+ .get_features = rcar_pcie_ep_get_features,
+};
+
+static const struct of_device_id rcar_pcie_ep_of_match[] = {
+ { .compatible = "renesas,r8a774c0-pcie-ep", },
+ { .compatible = "renesas,rcar-gen3-pcie-ep" },
+ { },
+};
+
+static int rcar_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcar_pcie_endpoint *ep;
+ struct rcar_pcie *pcie;
+ struct pci_epc *epc;
+ int err;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ pcie = &ep->pcie;
+ pcie->dev = dev;
+
+ pm_runtime_enable(dev);
+ err = pm_runtime_get_sync(dev);
+ if (err < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_pm_disable;
+ }
+
+ err = rcar_pcie_ep_get_pdata(ep, pdev);
+ if (err < 0) {
+ dev_err(dev, "failed to request resources: %d\n", err);
+ goto err_pm_put;
+ }
+
+ ep->num_ib_windows = MAX_NR_INBOUND_MAPS;
+ ep->ib_window_map =
+ devm_kcalloc(dev, BITS_TO_LONGS(ep->num_ib_windows),
+ sizeof(long), GFP_KERNEL);
+ if (!ep->ib_window_map) {
+ err = -ENOMEM;
+ dev_err(dev, "failed to allocate memory for inbound map\n");
+ goto err_pm_put;
+ }
+
+ ep->ob_mapped_addr = devm_kcalloc(dev, ep->num_ob_windows,
+ sizeof(*ep->ob_mapped_addr),
+ GFP_KERNEL);
+ if (!ep->ob_mapped_addr) {
+ err = -ENOMEM;
+ dev_err(dev, "failed to allocate memory for outbound memory pointers\n");
+ goto err_pm_put;
+ }
+
+ epc = devm_pci_epc_create(dev, &rcar_pcie_epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create epc device\n");
+ err = PTR_ERR(epc);
+ goto err_pm_put;
+ }
+
+ epc->max_functions = ep->max_functions;
+ epc_set_drvdata(epc, ep);
+
+ rcar_pcie_ep_hw_init(pcie);
+
+ err = pci_epc_multi_mem_init(epc, ep->ob_window, ep->num_ob_windows);
+ if (err < 0) {
+ dev_err(dev, "failed to initialize the epc memory space\n");
+ goto err_pm_put;
+ }
+
+ return 0;
+
+err_pm_put:
+ pm_runtime_put(dev);
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+
+ return err;
+}
+
+static struct platform_driver rcar_pcie_ep_driver = {
+ .driver = {
+ .name = "rcar-pcie-ep",
+ .of_match_table = rcar_pcie_ep_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rcar_pcie_ep_probe,
+};
+builtin_platform_driver(rcar_pcie_ep_driver);
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
new file mode 100644
index 000000000000..d210a36561be
--- /dev/null
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -0,0 +1,1130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe driver for Renesas R-Car SoCs
+ * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
+ *
+ * Based on:
+ * arch/sh/drivers/pci/pcie-sh7786.c
+ * arch/sh/drivers/pci/ops-sh7786.c
+ * Copyright (C) 2009 - 2011 Paul Mundt
+ *
+ * Author: Phil Edworthy <phil.edworthy@renesas.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "pcie-rcar.h"
+
+struct rcar_msi {
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ struct msi_controller chip;
+ unsigned long pages;
+ struct mutex lock;
+ int irq1;
+ int irq2;
+};
+
+static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
+{
+ return container_of(chip, struct rcar_msi, chip);
+}
+
+/* Structure representing the PCIe interface */
+struct rcar_pcie_host {
+ struct rcar_pcie pcie;
+ struct device *dev;
+ struct phy *phy;
+ void __iomem *base;
+ struct list_head resources;
+ int root_bus_nr;
+ struct clk *bus_clk;
+ struct rcar_msi msi;
+ int (*phy_init_fn)(struct rcar_pcie_host *host);
+};
+
+static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
+{
+ unsigned int shift = BITS_PER_BYTE * (where & 3);
+ u32 val = rcar_pci_read_reg(pcie, where & ~3);
+
+ return val >> shift;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_config_access(struct rcar_pcie_host *host,
+ unsigned char access_type, struct pci_bus *bus,
+ unsigned int devfn, int where, u32 *data)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ unsigned int dev, func, reg, index;
+
+ dev = PCI_SLOT(devfn);
+ func = PCI_FUNC(devfn);
+ reg = where & ~3;
+ index = reg / 4;
+
+ /*
+ * While each channel has its own memory-mapped extended config
+ * space, it's generally only accessible when in endpoint mode.
+ * When in root complex mode, the controller is unable to target
+ * itself with either type 0 or type 1 accesses, and indeed, any
+ * controller initiated target transfer to its own config space
+ * result in a completer abort.
+ *
+ * Each channel effectively only supports a single device, but as
+ * the same channel <-> device access works for any PCI_SLOT()
+ * value, we cheat a bit here and bind the controller's config
+ * space to devfn 0 in order to enable self-enumeration. In this
+ * case the regular ECAR/ECDR path is sidelined and the mangled
+ * config access itself is initiated as an internal bus transaction.
+ */
+ if (pci_is_root_bus(bus)) {
+ if (dev != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == RCAR_PCI_ACCESS_READ) {
+ *data = rcar_pci_read_reg(pcie, PCICONF(index));
+ } else {
+ /* Keep an eye out for changes to the root bus number */
+ if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
+ host->root_bus_nr = *data & 0xff;
+
+ rcar_pci_write_reg(pcie, *data, PCICONF(index));
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (host->root_bus_nr < 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Clear errors */
+ rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
+
+ /* Set the PIO address */
+ rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
+ PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
+
+ /* Enable the configuration access */
+ if (bus->parent->number == host->root_bus_nr)
+ rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
+ else
+ rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
+
+ /* Check for errors */
+ if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Check for master and target aborts */
+ if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
+ (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == RCAR_PCI_ACCESS_READ)
+ *data = rcar_pci_read_reg(pcie, PCIECDR);
+ else
+ rcar_pci_write_reg(pcie, *data, PCIECDR);
+
+ /* Disable the configuration access */
+ rcar_pci_write_reg(pcie, 0, PCIECCTLR);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct rcar_pcie_host *host = bus->sysdata;
+ int ret;
+
+ ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
+ bus, devfn, where, val);
+ if (ret != PCIBIOS_SUCCESSFUL) {
+ *val = 0xffffffff;
+ return ret;
+ }
+
+ if (size == 1)
+ *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
+
+ dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+ bus->number, devfn, where, size, *val);
+
+ return ret;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct rcar_pcie_host *host = bus->sysdata;
+ unsigned int shift;
+ u32 data;
+ int ret;
+
+ ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
+ bus, devfn, where, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
+ bus->number, devfn, where, size, val);
+
+ if (size == 1) {
+ shift = BITS_PER_BYTE * (where & 3);
+ data &= ~(0xff << shift);
+ data |= ((val & 0xff) << shift);
+ } else if (size == 2) {
+ shift = BITS_PER_BYTE * (where & 2);
+ data &= ~(0xffff << shift);
+ data |= ((val & 0xffff) << shift);
+ } else
+ data = val;
+
+ ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
+ bus, devfn, where, &data);
+
+ return ret;
+}
+
+static struct pci_ops rcar_pcie_ops = {
+ .read = rcar_pcie_read_conf,
+ .write = rcar_pcie_write_conf,
+};
+
+static int rcar_pcie_setup(struct list_head *resource,
+ struct rcar_pcie_host *host)
+{
+ struct resource_entry *win;
+ int i = 0;
+
+ /* Setup PCI resources */
+ resource_list_for_each_entry(win, &host->resources) {
+ struct resource *res = win->res;
+
+ if (!res->flags)
+ continue;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ case IORESOURCE_MEM:
+ rcar_pcie_set_outbound(&host->pcie, i, win);
+ i++;
+ break;
+ case IORESOURCE_BUS:
+ host->root_bus_nr = res->start;
+ break;
+ default:
+ continue;
+ }
+
+ pci_add_resource(resource, res);
+ }
+
+ return 1;
+}
+
+static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned int timeout = 1000;
+ u32 macsr;
+
+ if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
+ return;
+
+ if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
+ dev_err(dev, "Speed change already in progress\n");
+ return;
+ }
+
+ macsr = rcar_pci_read_reg(pcie, MACSR);
+ if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
+ goto done;
+
+ /* Set target link speed to 5.0 GT/s */
+ rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
+ PCI_EXP_LNKSTA_CLS_5_0GB);
+
+ /* Set speed change reason as intentional factor */
+ rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
+
+ /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
+ if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
+ rcar_pci_write_reg(pcie, macsr, MACSR);
+
+ /* Start link speed change */
+ rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
+
+ while (timeout--) {
+ macsr = rcar_pci_read_reg(pcie, MACSR);
+ if (macsr & SPCHGFIN) {
+ /* Clear the interrupt bits */
+ rcar_pci_write_reg(pcie, macsr, MACSR);
+
+ if (macsr & SPCHGFAIL)
+ dev_err(dev, "Speed change failed\n");
+
+ goto done;
+ }
+
+ msleep(1);
+ }
+
+ dev_err(dev, "Speed change timed out\n");
+
+done:
+ dev_info(dev, "Current link speed is %s GT/s\n",
+ (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
+}
+
+static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct resource_entry *win;
+ LIST_HEAD(res);
+ int i = 0;
+
+ /* Try setting 5 GT/s link speed */
+ rcar_pcie_force_speedup(pcie);
+
+ /* Setup PCI resources */
+ resource_list_for_each_entry(win, &host->resources) {
+ struct resource *res = win->res;
+
+ if (!res->flags)
+ continue;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ case IORESOURCE_MEM:
+ rcar_pcie_set_outbound(pcie, i, win);
+ i++;
+ break;
+ }
+ }
+}
+
+static int rcar_pcie_enable(struct rcar_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct rcar_pcie *pcie = &host->pcie;
+ struct device *dev = pcie->dev;
+ struct pci_bus *bus, *child;
+ int ret;
+
+ /* Try setting 5 GT/s link speed */
+ rcar_pcie_force_speedup(pcie);
+
+ rcar_pcie_setup(&bridge->windows, host);
+
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
+
+ bridge->dev.parent = dev;
+ bridge->sysdata = host;
+ bridge->busnr = host->root_bus_nr;
+ bridge->ops = &rcar_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ bridge->msi = &host->msi.chip;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret < 0)
+ return ret;
+
+ bus = bridge->bus;
+
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(bus);
+
+ return 0;
+}
+
+static int phy_wait_for_ack(struct rcar_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ unsigned int timeout = 100;
+
+ while (timeout--) {
+ if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
+ return 0;
+
+ udelay(100);
+ }
+
+ dev_err(dev, "Access to PCIe phy timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static void phy_write_reg(struct rcar_pcie *pcie,
+ unsigned int rate, u32 addr,
+ unsigned int lane, u32 data)
+{
+ u32 phyaddr;
+
+ phyaddr = WRITE_CMD |
+ ((rate & 1) << RATE_POS) |
+ ((lane & 0xf) << LANE_POS) |
+ ((addr & 0xff) << ADR_POS);
+
+ /* Set write data */
+ rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
+ rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+
+ /* Clear command */
+ rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
+ rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+}
+
+static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
+{
+ int err;
+
+ /* Begin initialization */
+ rcar_pci_write_reg(pcie, 0, PCIETCTLR);
+
+ /* Set mode */
+ rcar_pci_write_reg(pcie, 1, PCIEMSR);
+
+ err = rcar_pcie_wait_for_phyrdy(pcie);
+ if (err)
+ return err;
+
+ /*
+ * Initial header for port config space is type 1, set the device
+ * class to match. Hardware takes care of propagating the IDSETR
+ * settings, so there is no need to bother with a quirk.
+ */
+ rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
+
+ /*
+ * Setup Secondary Bus Number & Subordinate Bus Number, even though
+ * they aren't used, to avoid bridge being detected as broken.
+ */
+ rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
+ rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
+
+ /* Initialize default capabilities. */
+ rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
+ PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ PCI_HEADER_TYPE_BRIDGE);
+
+ /* Enable data link layer active state reporting */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
+ PCI_EXP_LNKCAP_DLLLARC);
+
+ /* Write out the physical slot number = 0 */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
+
+ /* Set the completion timer timeout to the maximum 50ms. */
+ rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
+
+ /* Terminate list of capabilities (Next Capability Offset=0) */
+ rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
+
+ /* Enable MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
+
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+
+ /* Finish initialization - establish a PCI Express link */
+ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+
+ /* This will timeout if we don't have a link. */
+ err = rcar_pcie_wait_for_dl(pcie);
+ if (err)
+ return err;
+
+ /* Enable INTx interrupts */
+ rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
+
+ wmb();
+
+ return 0;
+}
+
+static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+
+ /* Initialize the phy */
+ phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
+ phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
+ phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
+ phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
+ phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
+ phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
+
+ phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
+ phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
+ phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
+
+ return 0;
+}
+
+static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+
+ /*
+ * These settings come from the R-Car Series, 2nd Generation User's
+ * Manual, section 50.3.1 (2) Initialization of the physical layer.
+ */
+ rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
+ rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
+ rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
+ rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
+
+ rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
+ /* The following value is for DC connection, no termination resistor */
+ rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
+ rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
+ rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
+
+ return 0;
+}
+
+static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
+{
+ int err;
+
+ err = phy_init(host->phy);
+ if (err)
+ return err;
+
+ err = phy_power_on(host->phy);
+ if (err)
+ phy_exit(host->phy);
+
+ return err;
+}
+
+static int rcar_msi_alloc(struct rcar_msi *chip)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+
+ msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+ if (msi < INT_PCI_MSI_NR)
+ set_bit(msi, chip->used);
+ else
+ msi = -ENOSPC;
+
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
+static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+ msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
+ order_base_2(no_irqs));
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
+static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
+{
+ mutex_lock(&chip->lock);
+ clear_bit(irq, chip->used);
+ mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
+{
+ struct rcar_pcie_host *host = data;
+ struct rcar_pcie *pcie = &host->pcie;
+ struct rcar_msi *msi = &host->msi;
+ struct device *dev = pcie->dev;
+ unsigned long reg;
+
+ reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
+
+ /* MSI & INTx share an interrupt - we only handle MSI here */
+ if (!reg)
+ return IRQ_NONE;
+
+ while (reg) {
+ unsigned int index = find_first_bit(&reg, 32);
+ unsigned int msi_irq;
+
+ /* clear the interrupt */
+ rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
+
+ msi_irq = irq_find_mapping(msi->domain, index);
+ if (msi_irq) {
+ if (test_bit(index, msi->used))
+ generic_handle_irq(msi_irq);
+ else
+ dev_info(dev, "unhandled MSI\n");
+ } else {
+ /* Unknown MSI, just clear it */
+ dev_dbg(dev, "unexpected MSI\n");
+ }
+
+ /* see if there's any more pending in this vector */
+ reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
+ msi.chip);
+ struct rcar_pcie *pcie = &host->pcie;
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+
+ hwirq = rcar_msi_alloc(msi);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_find_mapping(msi->domain, hwirq);
+ if (!irq) {
+ rcar_msi_free(msi, hwirq);
+ return -EINVAL;
+ }
+
+ irq_set_msi_desc(irq, desc);
+
+ msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+ msg.data = hwirq;
+
+ pci_write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static int rcar_msi_setup_irqs(struct msi_controller *chip,
+ struct pci_dev *pdev, int nvec, int type)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct rcar_pcie_host *host = container_of(chip, struct rcar_pcie_host,
+ msi.chip);
+ struct rcar_pcie *pcie = &host->pcie;
+ struct msi_desc *desc;
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+ int i;
+
+ /* MSI-X interrupts are not supported */
+ if (type == PCI_CAP_ID_MSIX)
+ return -EINVAL;
+
+ WARN_ON(!list_is_singular(&pdev->dev.msi_list));
+ desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
+
+ hwirq = rcar_msi_alloc_region(msi, nvec);
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ irq = irq_find_mapping(msi->domain, hwirq);
+ if (!irq)
+ return -ENOSPC;
+
+ for (i = 0; i < nvec; i++) {
+ /*
+ * irq_create_mapping() called from rcar_pcie_probe() pre-
+ * allocates descs, so there is no need to allocate descs here.
+ * We can therefore assume that if irq_find_mapping() above
+ * returns non-zero, then the descs are also successfully
+ * allocated.
+ */
+ if (irq_set_msi_desc_off(irq, i, desc)) {
+ /* TODO: clear */
+ return -EINVAL;
+ }
+ }
+
+ desc->nvec_used = nvec;
+ desc->msi_attrib.multiple = order_base_2(nvec);
+
+ msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+ msg.data = hwirq;
+
+ pci_write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ rcar_msi_free(msi, d->hwirq);
+}
+
+static struct irq_chip rcar_msi_irq_chip = {
+ .name = "R-Car PCIe MSI",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = rcar_msi_map,
+};
+
+static void rcar_pcie_unmap_msi(struct rcar_pcie_host *host)
+{
+ struct rcar_msi *msi = &host->msi;
+ int i, irq;
+
+ for (i = 0; i < INT_PCI_MSI_NR; i++) {
+ irq = irq_find_mapping(msi->domain, i);
+ if (irq > 0)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(msi->domain);
+}
+
+static void rcar_pcie_hw_enable_msi(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct rcar_msi *msi = &host->msi;
+ unsigned long base;
+
+ /* setup MSI data target */
+ base = virt_to_phys((void *)msi->pages);
+
+ rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
+ rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
+
+ /* enable all MSI interrupts */
+ rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
+}
+
+static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct device *dev = pcie->dev;
+ struct rcar_msi *msi = &host->msi;
+ int err, i;
+
+ mutex_init(&msi->lock);
+
+ msi->chip.dev = dev;
+ msi->chip.setup_irq = rcar_msi_setup_irq;
+ msi->chip.setup_irqs = rcar_msi_setup_irqs;
+ msi->chip.teardown_irq = rcar_msi_teardown_irq;
+
+ msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
+ &msi_domain_ops, &msi->chip);
+ if (!msi->domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < INT_PCI_MSI_NR; i++)
+ irq_create_mapping(msi->domain, i);
+
+ /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_irq_chip.name, host);
+ if (err < 0) {
+ dev_err(dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_irq_chip.name, host);
+ if (err < 0) {
+ dev_err(dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ /* setup MSI data target */
+ msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ rcar_pcie_hw_enable_msi(host);
+
+ return 0;
+
+err:
+ rcar_pcie_unmap_msi(host);
+ return err;
+}
+
+static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct rcar_msi *msi = &host->msi;
+
+ /* Disable all MSI interrupts */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
+
+ /* Disable address decoding of the MSI interrupt, MSIFE */
+ rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
+
+ free_pages(msi->pages, 0);
+
+ rcar_pcie_unmap_msi(host);
+}
+
+static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
+{
+ struct rcar_pcie *pcie = &host->pcie;
+ struct device *dev = pcie->dev;
+ struct resource res;
+ int err, i;
+
+ host->phy = devm_phy_optional_get(dev, "pcie");
+ if (IS_ERR(host->phy))
+ return PTR_ERR(host->phy);
+
+ err = of_address_to_resource(dev->of_node, 0, &res);
+ if (err)
+ return err;
+
+ pcie->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ host->bus_clk = devm_clk_get(dev, "pcie_bus");
+ if (IS_ERR(host->bus_clk)) {
+ dev_err(dev, "cannot get pcie bus clock\n");
+ return PTR_ERR(host->bus_clk);
+ }
+
+ i = irq_of_parse_and_map(dev->of_node, 0);
+ if (!i) {
+ dev_err(dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_irq1;
+ }
+ host->msi.irq1 = i;
+
+ i = irq_of_parse_and_map(dev->of_node, 1);
+ if (!i) {
+ dev_err(dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_irq2;
+ }
+ host->msi.irq2 = i;
+
+ return 0;
+
+err_irq2:
+ irq_dispose_mapping(host->msi.irq1);
+err_irq1:
+ return err;
+}
+
+static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
+ struct resource_entry *entry,
+ int *index)
+{
+ u64 restype = entry->res->flags;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 pci_addr = entry->res->start - entry->offset;
+ u32 flags = LAM_64BIT | LAR_ENABLE;
+ u64 mask;
+ u64 size = resource_size(entry->res);
+ int idx = *index;
+
+ if (restype & IORESOURCE_PREFETCH)
+ flags |= LAM_PREFETCH;
+
+ while (cpu_addr < cpu_end) {
+ if (idx >= MAX_NR_INBOUND_MAPS - 1) {
+ dev_err(pcie->dev, "Failed to map inbound regions!\n");
+ return -EINVAL;
+ }
+ /*
+ * If the size of the range is larger than the alignment of
+ * the start address, we have to use multiple entries to
+ * perform the mapping.
+ */
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
+
+ size = min(size, alignment);
+ }
+ /* Hardware supports max 4GiB inbound region */
+ size = min(size, 1ULL << 32);
+
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
+
+ rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
+ lower_32_bits(mask) | flags, idx, true);
+
+ pci_addr += size;
+ cpu_addr += size;
+ idx += 2;
+ }
+ *index = idx;
+
+ return 0;
+}
+
+static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct resource_entry *entry;
+ int index = 0, err = 0;
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static const struct of_device_id rcar_pcie_of_match[] = {
+ { .compatible = "renesas,pcie-r8a7779",
+ .data = rcar_pcie_phy_init_h1 },
+ { .compatible = "renesas,pcie-r8a7790",
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7791",
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-rcar-gen2",
+ .data = rcar_pcie_phy_init_gen2 },
+ { .compatible = "renesas,pcie-r8a7795",
+ .data = rcar_pcie_phy_init_gen3 },
+ { .compatible = "renesas,pcie-rcar-gen3",
+ .data = rcar_pcie_phy_init_gen3 },
+ {},
+};
+
+static int rcar_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcar_pcie_host *host;
+ struct rcar_pcie *pcie;
+ u32 data;
+ int err;
+ struct pci_host_bridge *bridge;
+
+ bridge = pci_alloc_host_bridge(sizeof(*host));
+ if (!bridge)
+ return -ENOMEM;
+
+ host = pci_host_bridge_priv(bridge);
+ pcie = &host->pcie;
+ pcie->dev = dev;
+ platform_set_drvdata(pdev, host);
+
+ err = pci_parse_request_of_pci_ranges(dev, &host->resources,
+ &bridge->dma_ranges, NULL);
+ if (err)
+ goto err_free_bridge;
+
+ pm_runtime_enable(pcie->dev);
+ err = pm_runtime_get_sync(pcie->dev);
+ if (err < 0) {
+ dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
+ goto err_pm_disable;
+ }
+
+ err = rcar_pcie_get_resources(host);
+ if (err < 0) {
+ dev_err(dev, "failed to request resources: %d\n", err);
+ goto err_pm_put;
+ }
+
+ err = clk_prepare_enable(host->bus_clk);
+ if (err) {
+ dev_err(dev, "failed to enable bus clock: %d\n", err);
+ goto err_unmap_msi_irqs;
+ }
+
+ err = rcar_pcie_parse_map_dma_ranges(host);
+ if (err)
+ goto err_clk_disable;
+
+ host->phy_init_fn = of_device_get_match_data(dev);
+ err = host->phy_init_fn(host);
+ if (err) {
+ dev_err(dev, "failed to init PCIe PHY\n");
+ goto err_clk_disable;
+ }
+
+ /* Failure to get a link might just be that no cards are inserted */
+ if (rcar_pcie_hw_init(pcie)) {
+ dev_info(dev, "PCIe link down\n");
+ err = -ENODEV;
+ goto err_phy_shutdown;
+ }
+
+ data = rcar_pci_read_reg(pcie, MACSR);
+ dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = rcar_pcie_enable_msi(host);
+ if (err < 0) {
+ dev_err(dev,
+ "failed to enable MSI support: %d\n",
+ err);
+ goto err_phy_shutdown;
+ }
+ }
+
+ err = rcar_pcie_enable(host);
+ if (err)
+ goto err_msi_teardown;
+
+ return 0;
+
+err_msi_teardown:
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pcie_teardown_msi(host);
+
+err_phy_shutdown:
+ if (host->phy) {
+ phy_power_off(host->phy);
+ phy_exit(host->phy);
+ }
+
+err_clk_disable:
+ clk_disable_unprepare(host->bus_clk);
+
+err_unmap_msi_irqs:
+ irq_dispose_mapping(host->msi.irq2);
+ irq_dispose_mapping(host->msi.irq1);
+
+err_pm_put:
+ pm_runtime_put(dev);
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+ pci_free_resource_list(&host->resources);
+
+err_free_bridge:
+ pci_free_host_bridge(bridge);
+
+ return err;
+}
+
+static int __maybe_unused rcar_pcie_resume(struct device *dev)
+{
+ struct rcar_pcie_host *host = dev_get_drvdata(dev);
+ struct rcar_pcie *pcie = &host->pcie;
+ unsigned int data;
+ int err;
+
+ err = rcar_pcie_parse_map_dma_ranges(host);
+ if (err)
+ return 0;
+
+ /* Failure to get a link might just be that no cards are inserted */
+ err = host->phy_init_fn(host);
+ if (err) {
+ dev_info(dev, "PCIe link down\n");
+ return 0;
+ }
+
+ data = rcar_pci_read_reg(pcie, MACSR);
+ dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+
+ /* Enable MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pcie_hw_enable_msi(host);
+
+ rcar_pcie_hw_enable(host);
+
+ return 0;
+}
+
+static int rcar_pcie_resume_noirq(struct device *dev)
+{
+ struct rcar_pcie_host *host = dev_get_drvdata(dev);
+ struct rcar_pcie *pcie = &host->pcie;
+
+ if (rcar_pci_read_reg(pcie, PMSR) &&
+ !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
+ return 0;
+
+ /* Re-establish the PCIe link */
+ rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+ return rcar_pcie_wait_for_dl(pcie);
+}
+
+static const struct dev_pm_ops rcar_pcie_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
+ .resume_noirq = rcar_pcie_resume_noirq,
+};
+
+static struct platform_driver rcar_pcie_driver = {
+ .driver = {
+ .name = "rcar-pcie",
+ .of_match_table = rcar_pcie_of_match,
+ .pm = &rcar_pcie_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rcar_pcie_probe,
+};
+builtin_platform_driver(rcar_pcie_driver);
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index 759c6542c5c8..7583699ef7b6 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -1,177 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
/*
* PCIe driver for Renesas R-Car SoCs
- * Copyright (C) 2014 Renesas Electronics Europe Ltd
- *
- * Based on:
- * arch/sh/drivers/pci/pcie-sh7786.c
- * arch/sh/drivers/pci/ops-sh7786.c
- * Copyright (C) 2009 - 2011 Paul Mundt
+ * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
*
* Author: Phil Edworthy <phil.edworthy@renesas.com>
*/
-#include <linux/bitops.h>
-#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/msi.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_pci.h>
-#include <linux/of_platform.h>
#include <linux/pci.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-
-#define PCIECAR 0x000010
-#define PCIECCTLR 0x000018
-#define CONFIG_SEND_ENABLE BIT(31)
-#define TYPE0 (0 << 8)
-#define TYPE1 BIT(8)
-#define PCIECDR 0x000020
-#define PCIEMSR 0x000028
-#define PCIEINTXR 0x000400
-#define PCIEPHYSR 0x0007f0
-#define PHYRDY BIT(0)
-#define PCIEMSITXR 0x000840
-
-/* Transfer control */
-#define PCIETCTLR 0x02000
-#define DL_DOWN BIT(3)
-#define CFINIT BIT(0)
-#define PCIETSTR 0x02004
-#define DATA_LINK_ACTIVE BIT(0)
-#define PCIEERRFR 0x02020
-#define UNSUPPORTED_REQUEST BIT(4)
-#define PCIEMSIFR 0x02044
-#define PCIEMSIALR 0x02048
-#define MSIFE BIT(0)
-#define PCIEMSIAUR 0x0204c
-#define PCIEMSIIER 0x02050
-
-/* root port address */
-#define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
-
-/* local address reg & mask */
-#define PCIELAR(x) (0x02200 + ((x) * 0x20))
-#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
-#define LAM_PREFETCH BIT(3)
-#define LAM_64BIT BIT(2)
-#define LAR_ENABLE BIT(1)
-
-/* PCIe address reg & mask */
-#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
-#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
-#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
-#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
-#define PAR_ENABLE BIT(31)
-#define IO_SPACE BIT(8)
-
-/* Configuration */
-#define PCICONF(x) (0x010000 + ((x) * 0x4))
-#define PMCAP(x) (0x010040 + ((x) * 0x4))
-#define EXPCAP(x) (0x010070 + ((x) * 0x4))
-#define VCCAP(x) (0x010100 + ((x) * 0x4))
-
-/* link layer */
-#define IDSETR1 0x011004
-#define TLCTLR 0x011048
-#define MACSR 0x011054
-#define SPCHGFIN BIT(4)
-#define SPCHGFAIL BIT(6)
-#define SPCHGSUC BIT(7)
-#define LINK_SPEED (0xf << 16)
-#define LINK_SPEED_2_5GTS (1 << 16)
-#define LINK_SPEED_5_0GTS (2 << 16)
-#define MACCTLR 0x011058
-#define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */
-#define SPEED_CHANGE BIT(24)
-#define SCRAMBLE_DISABLE BIT(27)
-#define LTSMDIS BIT(31)
-#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
-#define PMSR 0x01105c
-#define MACS2R 0x011078
-#define MACCGSPSETR 0x011084
-#define SPCNGRSN BIT(31)
-
-/* R-Car H1 PHY */
-#define H1_PCIEPHYADRR 0x04000c
-#define WRITE_CMD BIT(16)
-#define PHY_ACK BIT(24)
-#define RATE_POS 12
-#define LANE_POS 8
-#define ADR_POS 0
-#define H1_PCIEPHYDOUTR 0x040014
-
-/* R-Car Gen2 PHY */
-#define GEN2_PCIEPHYADDR 0x780
-#define GEN2_PCIEPHYDATA 0x784
-#define GEN2_PCIEPHYCTRL 0x78c
-
-#define INT_PCI_MSI_NR 32
-
-#define RCONF(x) (PCICONF(0) + (x))
-#define RPMCAP(x) (PMCAP(0) + (x))
-#define REXPCAP(x) (EXPCAP(0) + (x))
-#define RVCCAP(x) (VCCAP(0) + (x))
-#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
-#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
-#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
+#include "pcie-rcar.h"
-#define RCAR_PCI_MAX_RESOURCES 4
-#define MAX_NR_INBOUND_MAPS 6
-
-struct rcar_msi {
- DECLARE_BITMAP(used, INT_PCI_MSI_NR);
- struct irq_domain *domain;
- struct msi_controller chip;
- unsigned long pages;
- struct mutex lock;
- int irq1;
- int irq2;
-};
-
-static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
-{
- return container_of(chip, struct rcar_msi, chip);
-}
-
-/* Structure representing the PCIe interface */
-struct rcar_pcie {
- struct device *dev;
- struct phy *phy;
- void __iomem *base;
- struct list_head resources;
- int root_bus_nr;
- struct clk *bus_clk;
- struct rcar_msi msi;
-};
-
-static void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val,
- unsigned int reg)
+void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val, unsigned int reg)
{
writel(val, pcie->base + reg);
}
-static u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg)
+u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg)
{
return readl(pcie->base + reg);
}
-enum {
- RCAR_PCI_ACCESS_READ,
- RCAR_PCI_ACCESS_WRITE,
-};
-
-static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
+void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
{
unsigned int shift = BITS_PER_BYTE * (where & 3);
u32 val = rcar_pci_read_reg(pcie, where & ~3);
@@ -181,163 +31,42 @@ static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
rcar_pci_write_reg(pcie, val, where & ~3);
}
-static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
+int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
{
- unsigned int shift = BITS_PER_BYTE * (where & 3);
- u32 val = rcar_pci_read_reg(pcie, where & ~3);
-
- return val >> shift;
-}
-
-/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
-static int rcar_pcie_config_access(struct rcar_pcie *pcie,
- unsigned char access_type, struct pci_bus *bus,
- unsigned int devfn, int where, u32 *data)
-{
- unsigned int dev, func, reg, index;
-
- dev = PCI_SLOT(devfn);
- func = PCI_FUNC(devfn);
- reg = where & ~3;
- index = reg / 4;
-
- /*
- * While each channel has its own memory-mapped extended config
- * space, it's generally only accessible when in endpoint mode.
- * When in root complex mode, the controller is unable to target
- * itself with either type 0 or type 1 accesses, and indeed, any
- * controller initiated target transfer to its own config space
- * result in a completer abort.
- *
- * Each channel effectively only supports a single device, but as
- * the same channel <-> device access works for any PCI_SLOT()
- * value, we cheat a bit here and bind the controller's config
- * space to devfn 0 in order to enable self-enumeration. In this
- * case the regular ECAR/ECDR path is sidelined and the mangled
- * config access itself is initiated as an internal bus transaction.
- */
- if (pci_is_root_bus(bus)) {
- if (dev != 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (access_type == RCAR_PCI_ACCESS_READ) {
- *data = rcar_pci_read_reg(pcie, PCICONF(index));
- } else {
- /* Keep an eye out for changes to the root bus number */
- if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
- pcie->root_bus_nr = *data & 0xff;
-
- rcar_pci_write_reg(pcie, *data, PCICONF(index));
- }
-
- return PCIBIOS_SUCCESSFUL;
- }
-
- if (pcie->root_bus_nr < 0)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /* Clear errors */
- rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
-
- /* Set the PIO address */
- rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
- PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
-
- /* Enable the configuration access */
- if (bus->parent->number == pcie->root_bus_nr)
- rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
- else
- rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
-
- /* Check for errors */
- if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /* Check for master and target aborts */
- if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
- (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (access_type == RCAR_PCI_ACCESS_READ)
- *data = rcar_pci_read_reg(pcie, PCIECDR);
- else
- rcar_pci_write_reg(pcie, *data, PCIECDR);
-
- /* Disable the configuration access */
- rcar_pci_write_reg(pcie, 0, PCIECCTLR);
-
- return PCIBIOS_SUCCESSFUL;
-}
+ unsigned int timeout = 10;
-static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *val)
-{
- struct rcar_pcie *pcie = bus->sysdata;
- int ret;
+ while (timeout--) {
+ if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
+ return 0;
- ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
- bus, devfn, where, val);
- if (ret != PCIBIOS_SUCCESSFUL) {
- *val = 0xffffffff;
- return ret;
+ msleep(5);
}
- if (size == 1)
- *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
- else if (size == 2)
- *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
-
- dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
- bus->number, devfn, where, size, *val);
-
- return ret;
+ return -ETIMEDOUT;
}
-/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
-static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 val)
+int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
{
- struct rcar_pcie *pcie = bus->sysdata;
- unsigned int shift;
- u32 data;
- int ret;
-
- ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
- bus, devfn, where, &data);
- if (ret != PCIBIOS_SUCCESSFUL)
- return ret;
-
- dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
- bus->number, devfn, where, size, val);
+ unsigned int timeout = 10000;
- if (size == 1) {
- shift = BITS_PER_BYTE * (where & 3);
- data &= ~(0xff << shift);
- data |= ((val & 0xff) << shift);
- } else if (size == 2) {
- shift = BITS_PER_BYTE * (where & 2);
- data &= ~(0xffff << shift);
- data |= ((val & 0xffff) << shift);
- } else
- data = val;
+ while (timeout--) {
+ if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ return 0;
- ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
- bus, devfn, where, &data);
+ udelay(5);
+ cpu_relax();
+ }
- return ret;
+ return -ETIMEDOUT;
}
-static struct pci_ops rcar_pcie_ops = {
- .read = rcar_pcie_read_conf,
- .write = rcar_pcie_write_conf,
-};
-
-static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
- struct resource *res)
+void rcar_pcie_set_outbound(struct rcar_pcie *pcie, int win,
+ struct resource_entry *window)
{
/* Setup PCIe address space mappings for each resource */
- resource_size_t size;
+ struct resource *res = window->res;
resource_size_t res_start;
+ resource_size_t size;
u32 mask;
rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
@@ -347,13 +76,16 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
* keeps things pretty simple.
*/
size = resource_size(res);
- mask = (roundup_pow_of_two(size) / SZ_128) - 1;
+ if (size > 128)
+ mask = (roundup_pow_of_two(size) / SZ_128) - 1;
+ else
+ mask = 0x0;
rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
if (res->flags & IORESOURCE_IO)
- res_start = pci_pio_to_address(res->start);
+ res_start = pci_pio_to_address(res->start) - window->offset;
else
- res_start = res->start;
+ res_start = res->start - window->offset;
rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
@@ -367,883 +99,22 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
}
-static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
-{
- struct resource_entry *win;
- int i = 0;
-
- /* Setup PCI resources */
- resource_list_for_each_entry(win, &pci->resources) {
- struct resource *res = win->res;
-
- if (!res->flags)
- continue;
-
- switch (resource_type(res)) {
- case IORESOURCE_IO:
- case IORESOURCE_MEM:
- rcar_pcie_setup_window(i, pci, res);
- i++;
- break;
- case IORESOURCE_BUS:
- pci->root_bus_nr = res->start;
- break;
- default:
- continue;
- }
-
- pci_add_resource(resource, res);
- }
-
- return 1;
-}
-
-static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- unsigned int timeout = 1000;
- u32 macsr;
-
- if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
- return;
-
- if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
- dev_err(dev, "Speed change already in progress\n");
- return;
- }
-
- macsr = rcar_pci_read_reg(pcie, MACSR);
- if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
- goto done;
-
- /* Set target link speed to 5.0 GT/s */
- rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
- PCI_EXP_LNKSTA_CLS_5_0GB);
-
- /* Set speed change reason as intentional factor */
- rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
-
- /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */
- if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
- rcar_pci_write_reg(pcie, macsr, MACSR);
-
- /* Start link speed change */
- rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
-
- while (timeout--) {
- macsr = rcar_pci_read_reg(pcie, MACSR);
- if (macsr & SPCHGFIN) {
- /* Clear the interrupt bits */
- rcar_pci_write_reg(pcie, macsr, MACSR);
-
- if (macsr & SPCHGFAIL)
- dev_err(dev, "Speed change failed\n");
-
- goto done;
- }
-
- msleep(1);
- }
-
- dev_err(dev, "Speed change timed out\n");
-
-done:
- dev_info(dev, "Current link speed is %s GT/s\n",
- (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
-}
-
-static int rcar_pcie_enable(struct rcar_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- struct pci_bus *bus, *child;
- int ret;
-
- /* Try setting 5 GT/s link speed */
- rcar_pcie_force_speedup(pcie);
-
- rcar_pcie_setup(&bridge->windows, pcie);
-
- pci_add_flags(PCI_REASSIGN_ALL_BUS);
-
- bridge->dev.parent = dev;
- bridge->sysdata = pcie;
- bridge->busnr = pcie->root_bus_nr;
- bridge->ops = &rcar_pcie_ops;
- bridge->map_irq = of_irq_parse_and_map_pci;
- bridge->swizzle_irq = pci_common_swizzle;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- bridge->msi = &pcie->msi.chip;
-
- ret = pci_scan_root_bus_bridge(bridge);
- if (ret < 0)
- return ret;
-
- bus = bridge->bus;
-
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
-
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(bus);
-
- return 0;
-}
-
-static int phy_wait_for_ack(struct rcar_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- unsigned int timeout = 100;
-
- while (timeout--) {
- if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
- return 0;
-
- udelay(100);
- }
-
- dev_err(dev, "Access to PCIe phy timed out\n");
-
- return -ETIMEDOUT;
-}
-
-static void phy_write_reg(struct rcar_pcie *pcie,
- unsigned int rate, u32 addr,
- unsigned int lane, u32 data)
-{
- u32 phyaddr;
-
- phyaddr = WRITE_CMD |
- ((rate & 1) << RATE_POS) |
- ((lane & 0xf) << LANE_POS) |
- ((addr & 0xff) << ADR_POS);
-
- /* Set write data */
- rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
- rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
-
- /* Ignore errors as they will be dealt with if the data link is down */
- phy_wait_for_ack(pcie);
-
- /* Clear command */
- rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
- rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
-
- /* Ignore errors as they will be dealt with if the data link is down */
- phy_wait_for_ack(pcie);
-}
-
-static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie)
-{
- unsigned int timeout = 10;
-
- while (timeout--) {
- if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY)
- return 0;
-
- msleep(5);
- }
-
- return -ETIMEDOUT;
-}
-
-static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
-{
- unsigned int timeout = 10000;
-
- while (timeout--) {
- if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
- return 0;
-
- udelay(5);
- cpu_relax();
- }
-
- return -ETIMEDOUT;
-}
-
-static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
-{
- int err;
-
- /* Begin initialization */
- rcar_pci_write_reg(pcie, 0, PCIETCTLR);
-
- /* Set mode */
- rcar_pci_write_reg(pcie, 1, PCIEMSR);
-
- err = rcar_pcie_wait_for_phyrdy(pcie);
- if (err)
- return err;
-
- /*
- * Initial header for port config space is type 1, set the device
- * class to match. Hardware takes care of propagating the IDSETR
- * settings, so there is no need to bother with a quirk.
- */
- rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
-
- /*
- * Setup Secondary Bus Number & Subordinate Bus Number, even though
- * they aren't used, to avoid bridge being detected as broken.
- */
- rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
- rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
-
- /* Initialize default capabilities. */
- rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
- rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
- PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
- rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
- PCI_HEADER_TYPE_BRIDGE);
-
- /* Enable data link layer active state reporting */
- rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
- PCI_EXP_LNKCAP_DLLLARC);
-
- /* Write out the physical slot number = 0 */
- rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
-
- /* Set the completion timer timeout to the maximum 50ms. */
- rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
-
- /* Terminate list of capabilities (Next Capability Offset=0) */
- rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
-
- /* Enable MSI */
- if (IS_ENABLED(CONFIG_PCI_MSI))
- rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
-
- rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
-
- /* Finish initialization - establish a PCI Express link */
- rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
-
- /* This will timeout if we don't have a link. */
- err = rcar_pcie_wait_for_dl(pcie);
- if (err)
- return err;
-
- /* Enable INTx interrupts */
- rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
-
- wmb();
-
- return 0;
-}
-
-static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie)
-{
- /* Initialize the phy */
- phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
- phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
- phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
- phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
- phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
- phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
- phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
- phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
- phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
- phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
- phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
- phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
-
- phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
- phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
- phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
-
- return 0;
-}
-
-static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie)
+void rcar_pcie_set_inbound(struct rcar_pcie *pcie, u64 cpu_addr,
+ u64 pci_addr, u64 flags, int idx, bool host)
{
/*
- * These settings come from the R-Car Series, 2nd Generation User's
- * Manual, section 50.3.1 (2) Initialization of the physical layer.
+ * Set up 64-bit inbound regions as the range parser doesn't
+ * distinguish between 32 and 64-bit types.
*/
- rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
- rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
- rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
- rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
-
- rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
- /* The following value is for DC connection, no termination resistor */
- rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
- rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
- rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
-
- return 0;
-}
-
-static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
-{
- int err;
-
- err = phy_init(pcie->phy);
- if (err)
- return err;
-
- err = phy_power_on(pcie->phy);
- if (err)
- phy_exit(pcie->phy);
-
- return err;
-}
-
-static int rcar_msi_alloc(struct rcar_msi *chip)
-{
- int msi;
-
- mutex_lock(&chip->lock);
-
- msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
- if (msi < INT_PCI_MSI_NR)
- set_bit(msi, chip->used);
- else
- msi = -ENOSPC;
-
- mutex_unlock(&chip->lock);
-
- return msi;
-}
-
-static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs)
-{
- int msi;
-
- mutex_lock(&chip->lock);
- msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR,
- order_base_2(no_irqs));
- mutex_unlock(&chip->lock);
-
- return msi;
-}
-
-static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
-{
- mutex_lock(&chip->lock);
- clear_bit(irq, chip->used);
- mutex_unlock(&chip->lock);
-}
-
-static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
-{
- struct rcar_pcie *pcie = data;
- struct rcar_msi *msi = &pcie->msi;
- struct device *dev = pcie->dev;
- unsigned long reg;
-
- reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
-
- /* MSI & INTx share an interrupt - we only handle MSI here */
- if (!reg)
- return IRQ_NONE;
-
- while (reg) {
- unsigned int index = find_first_bit(&reg, 32);
- unsigned int msi_irq;
-
- /* clear the interrupt */
- rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
-
- msi_irq = irq_find_mapping(msi->domain, index);
- if (msi_irq) {
- if (test_bit(index, msi->used))
- generic_handle_irq(msi_irq);
- else
- dev_info(dev, "unhandled MSI\n");
- } else {
- /* Unknown MSI, just clear it */
- dev_dbg(dev, "unexpected MSI\n");
- }
-
- /* see if there's any more pending in this vector */
- reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
- }
-
- return IRQ_HANDLED;
-}
-
-static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
- struct msi_desc *desc)
-{
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
-
- hwirq = rcar_msi_alloc(msi);
- if (hwirq < 0)
- return hwirq;
-
- irq = irq_find_mapping(msi->domain, hwirq);
- if (!irq) {
- rcar_msi_free(msi, hwirq);
- return -EINVAL;
- }
-
- irq_set_msi_desc(irq, desc);
-
- msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
- msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
- msg.data = hwirq;
-
- pci_write_msi_msg(irq, &msg);
-
- return 0;
-}
-
-static int rcar_msi_setup_irqs(struct msi_controller *chip,
- struct pci_dev *pdev, int nvec, int type)
-{
- struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct msi_desc *desc;
- struct msi_msg msg;
- unsigned int irq;
- int hwirq;
- int i;
-
- /* MSI-X interrupts are not supported */
- if (type == PCI_CAP_ID_MSIX)
- return -EINVAL;
-
- WARN_ON(!list_is_singular(&pdev->dev.msi_list));
- desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
-
- hwirq = rcar_msi_alloc_region(msi, nvec);
- if (hwirq < 0)
- return -ENOSPC;
-
- irq = irq_find_mapping(msi->domain, hwirq);
- if (!irq)
- return -ENOSPC;
-
- for (i = 0; i < nvec; i++) {
- /*
- * irq_create_mapping() called from rcar_pcie_probe() pre-
- * allocates descs, so there is no need to allocate descs here.
- * We can therefore assume that if irq_find_mapping() above
- * returns non-zero, then the descs are also successfully
- * allocated.
- */
- if (irq_set_msi_desc_off(irq, i, desc)) {
- /* TODO: clear */
- return -EINVAL;
- }
- }
-
- desc->nvec_used = nvec;
- desc->msi_attrib.multiple = order_base_2(nvec);
-
- msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
- msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
- msg.data = hwirq;
-
- pci_write_msi_msg(irq, &msg);
-
- return 0;
-}
-
-static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
-{
- struct rcar_msi *msi = to_rcar_msi(chip);
- struct irq_data *d = irq_get_irq_data(irq);
-
- rcar_msi_free(msi, d->hwirq);
-}
-
-static struct irq_chip rcar_msi_irq_chip = {
- .name = "R-Car PCIe MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops msi_domain_ops = {
- .map = rcar_msi_map,
-};
-
-static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie)
-{
- struct rcar_msi *msi = &pcie->msi;
- int i, irq;
-
- for (i = 0; i < INT_PCI_MSI_NR; i++) {
- irq = irq_find_mapping(msi->domain, i);
- if (irq > 0)
- irq_dispose_mapping(irq);
- }
-
- irq_domain_remove(msi->domain);
-}
-
-static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- struct rcar_msi *msi = &pcie->msi;
- phys_addr_t base;
- int err, i;
-
- mutex_init(&msi->lock);
-
- msi->chip.dev = dev;
- msi->chip.setup_irq = rcar_msi_setup_irq;
- msi->chip.setup_irqs = rcar_msi_setup_irqs;
- msi->chip.teardown_irq = rcar_msi_teardown_irq;
-
- msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
- &msi_domain_ops, &msi->chip);
- if (!msi->domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < INT_PCI_MSI_NR; i++)
- irq_create_mapping(msi->domain, i);
-
- /* Two irqs are for MSI, but they are also used for non-MSI irqs */
- err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
- IRQF_SHARED | IRQF_NO_THREAD,
- rcar_msi_irq_chip.name, pcie);
- if (err < 0) {
- dev_err(dev, "failed to request IRQ: %d\n", err);
- goto err;
- }
-
- err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
- IRQF_SHARED | IRQF_NO_THREAD,
- rcar_msi_irq_chip.name, pcie);
- if (err < 0) {
- dev_err(dev, "failed to request IRQ: %d\n", err);
- goto err;
- }
-
- /* setup MSI data target */
- msi->pages = __get_free_pages(GFP_KERNEL, 0);
- if (!msi->pages) {
- err = -ENOMEM;
- goto err;
- }
- base = virt_to_phys((void *)msi->pages);
-
- rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR);
- rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR);
-
- /* enable all MSI interrupts */
- rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
-
- return 0;
-
-err:
- rcar_pcie_unmap_msi(pcie);
- return err;
-}
-
-static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie)
-{
- struct rcar_msi *msi = &pcie->msi;
-
- /* Disable all MSI interrupts */
- rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
-
- /* Disable address decoding of the MSI interrupt, MSIFE */
- rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
-
- free_pages(msi->pages, 0);
-
- rcar_pcie_unmap_msi(pcie);
-}
-
-static int rcar_pcie_get_resources(struct rcar_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- struct resource res;
- int err, i;
-
- pcie->phy = devm_phy_optional_get(dev, "pcie");
- if (IS_ERR(pcie->phy))
- return PTR_ERR(pcie->phy);
-
- err = of_address_to_resource(dev->of_node, 0, &res);
- if (err)
- return err;
-
- pcie->base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(pcie->base))
- return PTR_ERR(pcie->base);
-
- pcie->bus_clk = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(pcie->bus_clk)) {
- dev_err(dev, "cannot get pcie bus clock\n");
- return PTR_ERR(pcie->bus_clk);
- }
-
- i = irq_of_parse_and_map(dev->of_node, 0);
- if (!i) {
- dev_err(dev, "cannot get platform resources for msi interrupt\n");
- err = -ENOENT;
- goto err_irq1;
- }
- pcie->msi.irq1 = i;
-
- i = irq_of_parse_and_map(dev->of_node, 1);
- if (!i) {
- dev_err(dev, "cannot get platform resources for msi interrupt\n");
- err = -ENOENT;
- goto err_irq2;
- }
- pcie->msi.irq2 = i;
-
- return 0;
-
-err_irq2:
- irq_dispose_mapping(pcie->msi.irq1);
-err_irq1:
- return err;
-}
-
-static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
- struct resource_entry *entry,
- int *index)
-{
- u64 restype = entry->res->flags;
- u64 cpu_addr = entry->res->start;
- u64 cpu_end = entry->res->end;
- u64 pci_addr = entry->res->start - entry->offset;
- u32 flags = LAM_64BIT | LAR_ENABLE;
- u64 mask;
- u64 size = resource_size(entry->res);
- int idx = *index;
-
- if (restype & IORESOURCE_PREFETCH)
- flags |= LAM_PREFETCH;
-
- while (cpu_addr < cpu_end) {
- if (idx >= MAX_NR_INBOUND_MAPS - 1) {
- dev_err(pcie->dev, "Failed to map inbound regions!\n");
- return -EINVAL;
- }
- /*
- * If the size of the range is larger than the alignment of
- * the start address, we have to use multiple entries to
- * perform the mapping.
- */
- if (cpu_addr > 0) {
- unsigned long nr_zeros = __ffs64(cpu_addr);
- u64 alignment = 1ULL << nr_zeros;
-
- size = min(size, alignment);
- }
- /* Hardware supports max 4GiB inbound region */
- size = min(size, 1ULL << 32);
-
- mask = roundup_pow_of_two(size) - 1;
- mask &= ~0xf;
-
- /*
- * Set up 64-bit inbound regions as the range parser doesn't
- * distinguish between 32 and 64-bit types.
- */
+ if (host)
rcar_pci_write_reg(pcie, lower_32_bits(pci_addr),
PCIEPRAR(idx));
- rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
- rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags,
- PCIELAMR(idx));
+ rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
+ rcar_pci_write_reg(pcie, flags, PCIELAMR(idx));
+ if (host)
rcar_pci_write_reg(pcie, upper_32_bits(pci_addr),
PCIEPRAR(idx + 1));
- rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr),
- PCIELAR(idx + 1));
- rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
-
- pci_addr += size;
- cpu_addr += size;
- idx += 2;
- }
- *index = idx;
-
- return 0;
-}
-
-static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie)
-{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- struct resource_entry *entry;
- int index = 0, err = 0;
-
- resource_list_for_each_entry(entry, &bridge->dma_ranges) {
- err = rcar_pcie_inbound_ranges(pcie, entry, &index);
- if (err)
- break;
- }
-
- return err;
-}
-
-static const struct of_device_id rcar_pcie_of_match[] = {
- { .compatible = "renesas,pcie-r8a7779",
- .data = rcar_pcie_phy_init_h1 },
- { .compatible = "renesas,pcie-r8a7790",
- .data = rcar_pcie_phy_init_gen2 },
- { .compatible = "renesas,pcie-r8a7791",
- .data = rcar_pcie_phy_init_gen2 },
- { .compatible = "renesas,pcie-rcar-gen2",
- .data = rcar_pcie_phy_init_gen2 },
- { .compatible = "renesas,pcie-r8a7795",
- .data = rcar_pcie_phy_init_gen3 },
- { .compatible = "renesas,pcie-rcar-gen3",
- .data = rcar_pcie_phy_init_gen3 },
- {},
-};
-
-static int rcar_pcie_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct rcar_pcie *pcie;
- u32 data;
- int err;
- int (*phy_init_fn)(struct rcar_pcie *);
- struct pci_host_bridge *bridge;
-
- bridge = pci_alloc_host_bridge(sizeof(*pcie));
- if (!bridge)
- return -ENOMEM;
-
- pcie = pci_host_bridge_priv(bridge);
-
- pcie->dev = dev;
- platform_set_drvdata(pdev, pcie);
-
- err = pci_parse_request_of_pci_ranges(dev, &pcie->resources,
- &bridge->dma_ranges, NULL);
- if (err)
- goto err_free_bridge;
-
- pm_runtime_enable(pcie->dev);
- err = pm_runtime_get_sync(pcie->dev);
- if (err < 0) {
- dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
- goto err_pm_disable;
- }
-
- err = rcar_pcie_get_resources(pcie);
- if (err < 0) {
- dev_err(dev, "failed to request resources: %d\n", err);
- goto err_pm_put;
- }
-
- err = clk_prepare_enable(pcie->bus_clk);
- if (err) {
- dev_err(dev, "failed to enable bus clock: %d\n", err);
- goto err_unmap_msi_irqs;
- }
-
- err = rcar_pcie_parse_map_dma_ranges(pcie);
- if (err)
- goto err_clk_disable;
-
- phy_init_fn = of_device_get_match_data(dev);
- err = phy_init_fn(pcie);
- if (err) {
- dev_err(dev, "failed to init PCIe PHY\n");
- goto err_clk_disable;
- }
-
- /* Failure to get a link might just be that no cards are inserted */
- if (rcar_pcie_hw_init(pcie)) {
- dev_info(dev, "PCIe link down\n");
- err = -ENODEV;
- goto err_phy_shutdown;
- }
-
- data = rcar_pci_read_reg(pcie, MACSR);
- dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- err = rcar_pcie_enable_msi(pcie);
- if (err < 0) {
- dev_err(dev,
- "failed to enable MSI support: %d\n",
- err);
- goto err_phy_shutdown;
- }
- }
-
- err = rcar_pcie_enable(pcie);
- if (err)
- goto err_msi_teardown;
-
- return 0;
-
-err_msi_teardown:
- if (IS_ENABLED(CONFIG_PCI_MSI))
- rcar_pcie_teardown_msi(pcie);
-
-err_phy_shutdown:
- if (pcie->phy) {
- phy_power_off(pcie->phy);
- phy_exit(pcie->phy);
- }
-
-err_clk_disable:
- clk_disable_unprepare(pcie->bus_clk);
-
-err_unmap_msi_irqs:
- irq_dispose_mapping(pcie->msi.irq2);
- irq_dispose_mapping(pcie->msi.irq1);
-
-err_pm_put:
- pm_runtime_put(dev);
-
-err_pm_disable:
- pm_runtime_disable(dev);
- pci_free_resource_list(&pcie->resources);
-
-err_free_bridge:
- pci_free_host_bridge(bridge);
-
- return err;
+ rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx + 1));
+ rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
}
-
-static int rcar_pcie_resume_noirq(struct device *dev)
-{
- struct rcar_pcie *pcie = dev_get_drvdata(dev);
-
- if (rcar_pci_read_reg(pcie, PMSR) &&
- !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
- return 0;
-
- /* Re-establish the PCIe link */
- rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
- rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
- return rcar_pcie_wait_for_dl(pcie);
-}
-
-static const struct dev_pm_ops rcar_pcie_pm_ops = {
- .resume_noirq = rcar_pcie_resume_noirq,
-};
-
-static struct platform_driver rcar_pcie_driver = {
- .driver = {
- .name = "rcar-pcie",
- .of_match_table = rcar_pcie_of_match,
- .pm = &rcar_pcie_pm_ops,
- .suppress_bind_attrs = true,
- },
- .probe = rcar_pcie_probe,
-};
-builtin_platform_driver(rcar_pcie_driver);
diff --git a/drivers/pci/controller/pcie-rcar.h b/drivers/pci/controller/pcie-rcar.h
new file mode 100644
index 000000000000..d4c698b5f821
--- /dev/null
+++ b/drivers/pci/controller/pcie-rcar.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCIe driver for Renesas R-Car SoCs
+ * Copyright (C) 2014-2020 Renesas Electronics Europe Ltd
+ *
+ * Author: Phil Edworthy <phil.edworthy@renesas.com>
+ */
+
+#ifndef _PCIE_RCAR_H
+#define _PCIE_RCAR_H
+
+#define PCIECAR 0x000010
+#define PCIECCTLR 0x000018
+#define CONFIG_SEND_ENABLE BIT(31)
+#define TYPE0 (0 << 8)
+#define TYPE1 BIT(8)
+#define PCIECDR 0x000020
+#define PCIEMSR 0x000028
+#define PCIEINTXR 0x000400
+#define ASTINTX BIT(16)
+#define PCIEPHYSR 0x0007f0
+#define PHYRDY BIT(0)
+#define PCIEMSITXR 0x000840
+
+/* Transfer control */
+#define PCIETCTLR 0x02000
+#define DL_DOWN BIT(3)
+#define CFINIT BIT(0)
+#define PCIETSTR 0x02004
+#define DATA_LINK_ACTIVE BIT(0)
+#define PCIEERRFR 0x02020
+#define UNSUPPORTED_REQUEST BIT(4)
+#define PCIEMSIFR 0x02044
+#define PCIEMSIALR 0x02048
+#define MSIFE BIT(0)
+#define PCIEMSIAUR 0x0204c
+#define PCIEMSIIER 0x02050
+
+/* root port address */
+#define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
+
+/* local address reg & mask */
+#define PCIELAR(x) (0x02200 + ((x) * 0x20))
+#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
+#define LAM_PREFETCH BIT(3)
+#define LAM_64BIT BIT(2)
+#define LAR_ENABLE BIT(1)
+
+/* PCIe address reg & mask */
+#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
+#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
+#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
+#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
+#define PAR_ENABLE BIT(31)
+#define IO_SPACE BIT(8)
+
+/* Configuration */
+#define PCICONF(x) (0x010000 + ((x) * 0x4))
+#define INTDIS BIT(10)
+#define PMCAP(x) (0x010040 + ((x) * 0x4))
+#define MSICAP(x) (0x010050 + ((x) * 0x4))
+#define MSICAP0_MSIE BIT(16)
+#define MSICAP0_MMESCAP_OFFSET 17
+#define MSICAP0_MMESE_OFFSET 20
+#define MSICAP0_MMESE_MASK GENMASK(22, 20)
+#define EXPCAP(x) (0x010070 + ((x) * 0x4))
+#define VCCAP(x) (0x010100 + ((x) * 0x4))
+
+/* link layer */
+#define IDSETR0 0x011000
+#define IDSETR1 0x011004
+#define SUBIDSETR 0x011024
+#define TLCTLR 0x011048
+#define MACSR 0x011054
+#define SPCHGFIN BIT(4)
+#define SPCHGFAIL BIT(6)
+#define SPCHGSUC BIT(7)
+#define LINK_SPEED (0xf << 16)
+#define LINK_SPEED_2_5GTS (1 << 16)
+#define LINK_SPEED_5_0GTS (2 << 16)
+#define MACCTLR 0x011058
+#define MACCTLR_NFTS_MASK GENMASK(23, 16) /* The name is from SH7786 */
+#define SPEED_CHANGE BIT(24)
+#define SCRAMBLE_DISABLE BIT(27)
+#define LTSMDIS BIT(31)
+#define MACCTLR_INIT_VAL (LTSMDIS | MACCTLR_NFTS_MASK)
+#define PMSR 0x01105c
+#define MACS2R 0x011078
+#define MACCGSPSETR 0x011084
+#define SPCNGRSN BIT(31)
+
+/* R-Car H1 PHY */
+#define H1_PCIEPHYADRR 0x04000c
+#define WRITE_CMD BIT(16)
+#define PHY_ACK BIT(24)
+#define RATE_POS 12
+#define LANE_POS 8
+#define ADR_POS 0
+#define H1_PCIEPHYDOUTR 0x040014
+
+/* R-Car Gen2 PHY */
+#define GEN2_PCIEPHYADDR 0x780
+#define GEN2_PCIEPHYDATA 0x784
+#define GEN2_PCIEPHYCTRL 0x78c
+
+#define INT_PCI_MSI_NR 32
+
+#define RCONF(x) (PCICONF(0) + (x))
+#define RPMCAP(x) (PMCAP(0) + (x))
+#define REXPCAP(x) (EXPCAP(0) + (x))
+#define RVCCAP(x) (VCCAP(0) + (x))
+
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
+
+#define RCAR_PCI_MAX_RESOURCES 4
+#define MAX_NR_INBOUND_MAPS 6
+
+struct rcar_pcie {
+ struct device *dev;
+ void __iomem *base;
+};
+
+enum {
+ RCAR_PCI_ACCESS_READ,
+ RCAR_PCI_ACCESS_WRITE,
+};
+
+void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val, unsigned int reg);
+u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg);
+void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data);
+int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie);
+int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie);
+void rcar_pcie_set_outbound(struct rcar_pcie *pcie, int win,
+ struct resource_entry *window);
+void rcar_pcie_set_inbound(struct rcar_pcie *pcie, u64 cpu_addr,
+ u64 pci_addr, u64 flags, int idx, bool host);
+
+#endif
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index d743b0a48988..5eaf36629a75 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -615,7 +615,7 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
err = pci_epc_mem_init(epc, rockchip->mem_res->start,
- resource_size(rockchip->mem_res));
+ resource_size(rockchip->mem_res), PAGE_SIZE);
if (err < 0) {
dev_err(dev, "failed to initialize the memory space\n");
goto err_uninit_port;
diff --git a/drivers/pci/controller/pcie-tango.c b/drivers/pci/controller/pcie-tango.c
index 21a208da3f59..8f640c70f936 100644
--- a/drivers/pci/controller/pcie-tango.c
+++ b/drivers/pci/controller/pcie-tango.c
@@ -207,7 +207,7 @@ static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn,
return ret;
}
-static struct pci_ecam_ops smp8759_ecam_ops = {
+static const struct pci_ecam_ops smp8759_ecam_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -273,9 +273,9 @@ static int tango_pcie_probe(struct platform_device *pdev)
writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset);
virq = platform_get_irq(pdev, 1);
- if (virq <= 0) {
+ if (virq < 0) {
dev_err(dev, "Failed to map IRQ\n");
- return -ENXIO;
+ return virq;
}
irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie);
@@ -295,11 +295,14 @@ static int tango_pcie_probe(struct platform_device *pdev)
spin_lock_init(&pcie->used_msi_lock);
irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie);
- return pci_host_common_probe(pdev, &smp8759_ecam_ops);
+ return pci_host_common_probe(pdev);
}
static const struct of_device_id tango_pcie_ids[] = {
- { .compatible = "sigma,smp8759-pcie" },
+ {
+ .compatible = "sigma,smp8759-pcie",
+ .data = &smp8759_ecam_ops,
+ },
{ },
};
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index dac91d60701d..e386d4eac407 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -445,9 +445,11 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
if (!membar2)
return -ENOMEM;
offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
- readq(membar2 + MB2_SHADOW_OFFSET);
+ (readq(membar2 + MB2_SHADOW_OFFSET) &
+ PCI_BASE_ADDRESS_MEM_MASK);
offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
- readq(membar2 + MB2_SHADOW_OFFSET + 8);
+ (readq(membar2 + MB2_SHADOW_OFFSET + 8) &
+ PCI_BASE_ADDRESS_MEM_MASK);
pci_iounmap(vmd->dev, membar2);
}
}
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 1a81af0ba961..8f065a42fc1a 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -26,7 +26,7 @@ static const bool per_bus_mapping = !IS_ENABLED(CONFIG_64BIT);
*/
struct pci_config_window *pci_ecam_create(struct device *dev,
struct resource *cfgres, struct resource *busr,
- struct pci_ecam_ops *ops)
+ const struct pci_ecam_ops *ops)
{
struct pci_config_window *cfg;
unsigned int bus_range, bus_range_max, bsz;
@@ -101,6 +101,7 @@ err_exit:
pci_ecam_free(cfg);
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(pci_ecam_create);
void pci_ecam_free(struct pci_config_window *cfg)
{
@@ -121,6 +122,7 @@ void pci_ecam_free(struct pci_config_window *cfg)
release_resource(&cfg->res);
kfree(cfg);
}
+EXPORT_SYMBOL_GPL(pci_ecam_free);
/*
* Function to implement the pci_ops ->map_bus method
@@ -143,9 +145,10 @@ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
base = cfg->win + (busn << cfg->ops->bus_shift);
return base + (devfn << devfn_shift) + where;
}
+EXPORT_SYMBOL_GPL(pci_ecam_map_bus);
/* ECAM ops */
-struct pci_ecam_ops pci_generic_ecam_ops = {
+const struct pci_ecam_ops pci_generic_ecam_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
@@ -153,10 +156,11 @@ struct pci_ecam_ops pci_generic_ecam_ops = {
.write = pci_generic_config_write,
}
};
+EXPORT_SYMBOL_GPL(pci_generic_ecam_ops);
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
/* ECAM ops for 32-bit access only (non-compliant) */
-struct pci_ecam_ops pci_32b_ops = {
+const struct pci_ecam_ops pci_32b_ops = {
.bus_shift = 20,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 60330f3e3751..c89a9561439f 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -187,6 +187,9 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
*/
static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
{
+ if (!epf_test->dma_supported)
+ return;
+
dma_release_channel(epf_test->dma_chan);
epf_test->dma_chan = NULL;
}
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index abfac1109a13..80c46f3a4590 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -23,7 +23,7 @@
static int pci_epc_mem_get_order(struct pci_epc_mem *mem, size_t size)
{
int order;
- unsigned int page_shift = ilog2(mem->page_size);
+ unsigned int page_shift = ilog2(mem->window.page_size);
size--;
size >>= page_shift;
@@ -36,62 +36,97 @@ static int pci_epc_mem_get_order(struct pci_epc_mem *mem, size_t size)
}
/**
- * __pci_epc_mem_init() - initialize the pci_epc_mem structure
+ * pci_epc_multi_mem_init() - initialize the pci_epc_mem structure
* @epc: the EPC device that invoked pci_epc_mem_init
- * @phys_base: the physical address of the base
- * @size: the size of the address space
- * @page_size: size of each page
+ * @windows: pointer to windows supported by the device
+ * @num_windows: number of windows device supports
*
* Invoke to initialize the pci_epc_mem structure used by the
* endpoint functions to allocate mapped PCI address.
*/
-int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_base, size_t size,
- size_t page_size)
+int pci_epc_multi_mem_init(struct pci_epc *epc,
+ struct pci_epc_mem_window *windows,
+ unsigned int num_windows)
{
- int ret;
- struct pci_epc_mem *mem;
- unsigned long *bitmap;
+ struct pci_epc_mem *mem = NULL;
+ unsigned long *bitmap = NULL;
unsigned int page_shift;
- int pages;
+ size_t page_size;
int bitmap_size;
+ int pages;
+ int ret;
+ int i;
- if (page_size < PAGE_SIZE)
- page_size = PAGE_SIZE;
+ epc->num_windows = 0;
- page_shift = ilog2(page_size);
- pages = size >> page_shift;
- bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+ if (!windows || !num_windows)
+ return -EINVAL;
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem) {
- ret = -ENOMEM;
- goto err;
- }
+ epc->windows = kcalloc(num_windows, sizeof(*epc->windows), GFP_KERNEL);
+ if (!epc->windows)
+ return -ENOMEM;
- bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!bitmap) {
- ret = -ENOMEM;
- goto err_mem;
- }
+ for (i = 0; i < num_windows; i++) {
+ page_size = windows[i].page_size;
+ if (page_size < PAGE_SIZE)
+ page_size = PAGE_SIZE;
+ page_shift = ilog2(page_size);
+ pages = windows[i].size >> page_shift;
+ bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
- mem->bitmap = bitmap;
- mem->phys_base = phys_base;
- mem->page_size = page_size;
- mem->pages = pages;
- mem->size = size;
- mutex_init(&mem->lock);
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem) {
+ ret = -ENOMEM;
+ i--;
+ goto err_mem;
+ }
+
+ bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!bitmap) {
+ ret = -ENOMEM;
+ kfree(mem);
+ i--;
+ goto err_mem;
+ }
+
+ mem->window.phys_base = windows[i].phys_base;
+ mem->window.size = windows[i].size;
+ mem->window.page_size = page_size;
+ mem->bitmap = bitmap;
+ mem->pages = pages;
+ mutex_init(&mem->lock);
+ epc->windows[i] = mem;
+ }
- epc->mem = mem;
+ epc->mem = epc->windows[0];
+ epc->num_windows = num_windows;
return 0;
err_mem:
- kfree(mem);
+ for (; i >= 0; i--) {
+ mem = epc->windows[i];
+ kfree(mem->bitmap);
+ kfree(mem);
+ }
+ kfree(epc->windows);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_multi_mem_init);
+
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base,
+ size_t size, size_t page_size)
+{
+ struct pci_epc_mem_window mem_window;
+
+ mem_window.phys_base = base;
+ mem_window.size = size;
+ mem_window.page_size = page_size;
-err:
-return ret;
+ return pci_epc_multi_mem_init(epc, &mem_window, 1);
}
-EXPORT_SYMBOL_GPL(__pci_epc_mem_init);
+EXPORT_SYMBOL_GPL(pci_epc_mem_init);
/**
* pci_epc_mem_exit() - cleanup the pci_epc_mem structure
@@ -102,11 +137,22 @@ EXPORT_SYMBOL_GPL(__pci_epc_mem_init);
*/
void pci_epc_mem_exit(struct pci_epc *epc)
{
- struct pci_epc_mem *mem = epc->mem;
+ struct pci_epc_mem *mem;
+ int i;
+
+ if (!epc->num_windows)
+ return;
+ for (i = 0; i < epc->num_windows; i++) {
+ mem = epc->windows[i];
+ kfree(mem->bitmap);
+ kfree(mem);
+ }
+ kfree(epc->windows);
+
+ epc->windows = NULL;
epc->mem = NULL;
- kfree(mem->bitmap);
- kfree(mem);
+ epc->num_windows = 0;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_exit);
@@ -122,31 +168,60 @@ EXPORT_SYMBOL_GPL(pci_epc_mem_exit);
void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size)
{
- int pageno;
void __iomem *virt_addr = NULL;
- struct pci_epc_mem *mem = epc->mem;
- unsigned int page_shift = ilog2(mem->page_size);
+ struct pci_epc_mem *mem;
+ unsigned int page_shift;
+ size_t align_size;
+ int pageno;
int order;
+ int i;
- size = ALIGN(size, mem->page_size);
- order = pci_epc_mem_get_order(mem, size);
-
- mutex_lock(&mem->lock);
- pageno = bitmap_find_free_region(mem->bitmap, mem->pages, order);
- if (pageno < 0)
- goto ret;
+ for (i = 0; i < epc->num_windows; i++) {
+ mem = epc->windows[i];
+ mutex_lock(&mem->lock);
+ align_size = ALIGN(size, mem->window.page_size);
+ order = pci_epc_mem_get_order(mem, align_size);
- *phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift);
- virt_addr = ioremap(*phys_addr, size);
- if (!virt_addr)
- bitmap_release_region(mem->bitmap, pageno, order);
+ pageno = bitmap_find_free_region(mem->bitmap, mem->pages,
+ order);
+ if (pageno >= 0) {
+ page_shift = ilog2(mem->window.page_size);
+ *phys_addr = mem->window.phys_base +
+ ((phys_addr_t)pageno << page_shift);
+ virt_addr = ioremap(*phys_addr, align_size);
+ if (!virt_addr) {
+ bitmap_release_region(mem->bitmap,
+ pageno, order);
+ mutex_unlock(&mem->lock);
+ continue;
+ }
+ mutex_unlock(&mem->lock);
+ return virt_addr;
+ }
+ mutex_unlock(&mem->lock);
+ }
-ret:
- mutex_unlock(&mem->lock);
return virt_addr;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
+static struct pci_epc_mem *pci_epc_get_matching_window(struct pci_epc *epc,
+ phys_addr_t phys_addr)
+{
+ struct pci_epc_mem *mem;
+ int i;
+
+ for (i = 0; i < epc->num_windows; i++) {
+ mem = epc->windows[i];
+
+ if (phys_addr >= mem->window.phys_base &&
+ phys_addr < (mem->window.phys_base + mem->window.size))
+ return mem;
+ }
+
+ return NULL;
+}
+
/**
* pci_epc_mem_free_addr() - free the allocated memory address
* @epc: the EPC device on which memory was allocated
@@ -159,14 +234,23 @@ EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
void __iomem *virt_addr, size_t size)
{
+ struct pci_epc_mem *mem;
+ unsigned int page_shift;
+ size_t page_size;
int pageno;
- struct pci_epc_mem *mem = epc->mem;
- unsigned int page_shift = ilog2(mem->page_size);
int order;
+ mem = pci_epc_get_matching_window(epc, phys_addr);
+ if (!mem) {
+ pr_err("failed to get matching window\n");
+ return;
+ }
+
+ page_size = mem->window.page_size;
+ page_shift = ilog2(page_size);
iounmap(virt_addr);
- pageno = (phys_addr - mem->phys_base) >> page_shift;
- size = ALIGN(size, mem->page_size);
+ pageno = (phys_addr - mem->window.phys_base) >> page_shift;
+ size = ALIGN(size, page_size);
order = pci_epc_mem_get_order(mem, size);
mutex_lock(&mem->lock);
bitmap_release_region(mem->bitmap, pageno, order);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index ae44f46d1bf3..4fd200d8b0a9 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -148,8 +148,6 @@ struct controller {
#define MRL_SENS(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_MRLSP)
#define ATTN_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_AIP)
#define PWR_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PIP)
-#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS)
-#define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP)
#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
#define PSN(ctrl) (((ctrl)->slot_cap & PCI_EXP_SLTCAP_PSN) >> 19)
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 6504869efabc..9887c9de08c3 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -435,7 +435,7 @@ static int rpaphp_drc_add_slot(struct device_node *dn)
*/
int rpaphp_add_slot(struct device_node *dn)
{
- if (!dn->name || strcmp(dn->name, "pci"))
+ if (!of_node_name_eq(dn, "pci"))
return 0;
if (of_find_property(dn, "ibm,drc-info", NULL))
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 39295d88f670..b59f84918fe0 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -52,6 +52,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
{
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
+ struct zpci_bus *zbus = zdev->zbus;
int rc;
if (zdev->state != ZPCI_FN_STATE_STANDBY)
@@ -65,9 +66,9 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
if (rc)
goto out_deconfigure;
- pci_scan_slot(zdev->bus, ZPCI_DEVFN);
+ pci_scan_slot(zbus->bus, zdev->devfn);
pci_lock_rescan_remove();
- pci_bus_add_devices(zdev->bus);
+ pci_bus_add_devices(zbus->bus);
pci_unlock_rescan_remove();
return rc;
@@ -82,13 +83,17 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
struct pci_dev *pdev;
+ struct zpci_bus *zbus = zdev->zbus;
int rc;
if (!zpci_fn_configured(zdev->state))
return -EIO;
- pdev = pci_get_slot(zdev->bus, ZPCI_DEVFN);
+ pdev = pci_get_slot(zbus->bus, zdev->devfn);
if (pdev) {
+ if (pci_num_vf(pdev))
+ return -EBUSY;
+
pci_stop_and_remove_bus_device_locked(pdev);
pci_dev_put(pdev);
}
@@ -133,12 +138,13 @@ static const struct hotplug_slot_ops s390_hotplug_slot_ops = {
int zpci_init_slot(struct zpci_dev *zdev)
{
char name[SLOT_NAME_SIZE];
+ struct zpci_bus *zbus = zdev->zbus;
zdev->hotplug_slot.ops = &s390_hotplug_slot_ops;
snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
- return pci_hp_register(&zdev->hotplug_slot, zdev->bus,
- ZPCI_DEVFN, name);
+ return pci_hp_register(&zdev->hotplug_slot, zbus->bus,
+ zdev->devfn, name);
}
void zpci_exit_slot(struct zpci_dev *zdev)
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index f7f13ee5d06e..6e85885b554c 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -164,7 +164,7 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl);
u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl);
u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl);
int shpchp_configure_device(struct slot *p_slot);
-int shpchp_unconfigure_device(struct slot *p_slot);
+void shpchp_unconfigure_device(struct slot *p_slot);
void cleanup_slots(struct controller *ctrl);
void shpchp_queue_pushbutton_work(struct work_struct *work);
int shpc_init(struct controller *ctrl, struct pci_dev *pdev);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 078003dcde5b..afdc52d1cae7 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -341,8 +341,7 @@ static int remove_board(struct slot *p_slot)
u8 hp_slot;
int rc;
- if (shpchp_unconfigure_device(p_slot))
- return(1);
+ shpchp_unconfigure_device(p_slot);
hp_slot = p_slot->device - ctrl->slot_device_offset;
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 115701301487..36db0c3c4ea6 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -61,9 +61,8 @@ int shpchp_configure_device(struct slot *p_slot)
return ret;
}
-int shpchp_unconfigure_device(struct slot *p_slot)
+void shpchp_unconfigure_device(struct slot *p_slot)
{
- int rc = 0;
struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
struct pci_dev *dev, *temp;
struct controller *ctrl = p_slot->ctrl;
@@ -83,6 +82,4 @@ int shpchp_unconfigure_device(struct slot *p_slot)
}
pci_unlock_rescan_remove();
- return rc;
}
-
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 4d1f392b05f9..b37e08c4f9d1 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -133,12 +133,35 @@ static void pci_read_vf_config_common(struct pci_dev *virtfn)
&physfn->sriov->subsystem_device);
}
+int pci_iov_sysfs_link(struct pci_dev *dev,
+ struct pci_dev *virtfn, int id)
+{
+ char buf[VIRTFN_ID_LEN];
+ int rc;
+
+ sprintf(buf, "virtfn%u", id);
+ rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
+ if (rc)
+ goto failed;
+ rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
+ if (rc)
+ goto failed1;
+
+ kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+
+failed1:
+ sysfs_remove_link(&dev->dev.kobj, buf);
+failed:
+ return rc;
+}
+
int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
int i;
int rc = -ENOMEM;
u64 size;
- char buf[VIRTFN_ID_LEN];
struct pci_dev *virtfn;
struct resource *res;
struct pci_sriov *iov = dev->sriov;
@@ -182,23 +205,14 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
}
pci_device_add(virtfn, virtfn->bus);
-
- sprintf(buf, "virtfn%u", id);
- rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
+ rc = pci_iov_sysfs_link(dev, virtfn, id);
if (rc)
goto failed1;
- rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
- if (rc)
- goto failed2;
-
- kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
pci_bus_add_device(virtfn);
return 0;
-failed2:
- sysfs_remove_link(&dev->dev.kobj, buf);
failed1:
pci_stop_and_remove_bus_device(virtfn);
pci_dev_put(dev);
@@ -557,9 +571,6 @@ static void sriov_del_vfs(struct pci_dev *dev)
struct pci_sriov *iov = dev->sriov;
int i;
- if (dev->no_vf_scan)
- return;
-
for (i = 0; i < iov->num_VFs; i++)
pci_iov_remove_virtfn(dev, i);
}
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 81ceeaa6f1d5..27839cd2459f 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -592,7 +592,7 @@ int of_pci_get_max_link_speed(struct device_node *node)
u32 max_link_speed;
if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
- max_link_speed > 4)
+ max_link_speed == 0 || max_link_speed > 4)
return -EINVAL;
return max_link_speed;
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index b73b10bce0df..e8e444eeb1cd 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -282,6 +282,8 @@ static const struct pci_p2pdma_whitelist_entry {
} pci_p2pdma_whitelist[] = {
/* AMD ZEN */
{PCI_VENDOR_ID_AMD, 0x1450, 0},
+ {PCI_VENDOR_ID_AMD, 0x15d0, 0},
+ {PCI_VENDOR_ID_AMD, 0x1630, 0},
/* Intel Xeon E5/Core i7 */
{PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE},
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index d21969fba6ab..7224b1e5f2a8 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -948,7 +948,7 @@ static bool acpi_pci_bridge_d3(struct pci_dev *dev)
* Look for a special _DSD property for the root port and if it
* is set we know the hierarchy behind it supports D3 just fine.
*/
- root = pci_find_pcie_root_port(dev);
+ root = pcie_find_root_port(dev);
if (!root)
return false;
@@ -1128,7 +1128,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
return;
obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
- RESET_DELAY_DSM, NULL);
+ DSM_PCI_POWER_ON_RESET_DELAY, NULL);
if (!obj)
return;
@@ -1193,7 +1193,7 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev,
pdev->d3cold_delay = 0;
obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
- FUNCTION_DELAY_DSM, NULL);
+ DSM_PCI_DEVICE_READINESS_DURATIONS, NULL);
if (!obj)
return;
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 4f4f54bc732e..ccf26d12ec61 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -24,6 +24,17 @@
#define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
+/**
+ * struct pci_bridge_reg_behavior - register bits behaviors
+ * @ro: Read-Only bits
+ * @rw: Read-Write bits
+ * @w1c: Write-1-to-Clear bits
+ *
+ * Reads and Writes will be filtered by specified behavior. All other bits not
+ * declared are assumed 'Reserved' and will return 0 on reads, per PCIe 5.0:
+ * "Reserved register fields must be read only and must return 0 (all 0's for
+ * multi-bit fields) when read".
+ */
struct pci_bridge_reg_behavior {
/* Read-only bits */
u32 ro;
@@ -33,9 +44,6 @@ struct pci_bridge_reg_behavior {
/* Write-1-to-clear bits */
u32 w1c;
-
- /* Reserved bits (hardwired to 0) */
- u32 rsvd;
};
static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
@@ -49,7 +57,6 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
PCI_COMMAND_FAST_BACK) |
(PCI_STATUS_CAP_LIST | PCI_STATUS_66MHZ |
PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16),
- .rsvd = GENMASK(15, 10) | ((BIT(6) | GENMASK(3, 0)) << 16),
.w1c = PCI_STATUS_ERROR_BITS << 16,
},
[PCI_CLASS_REVISION / 4] = { .ro = ~0 },
@@ -96,8 +103,6 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
GENMASK(11, 8) | GENMASK(3, 0)),
.w1c = PCI_STATUS_ERROR_BITS << 16,
-
- .rsvd = ((BIT(6) | GENMASK(4, 0)) << 16),
},
[PCI_MEMORY_BASE / 4] = {
@@ -130,12 +135,10 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
[PCI_CAPABILITY_LIST / 4] = {
.ro = GENMASK(7, 0),
- .rsvd = GENMASK(31, 8),
},
[PCI_ROM_ADDRESS1 / 4] = {
.rw = GENMASK(31, 11) | BIT(0),
- .rsvd = GENMASK(10, 1),
},
/*
@@ -158,8 +161,6 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
.ro = (GENMASK(15, 8) | ((PCI_BRIDGE_CTL_FAST_BACK) << 16)),
.w1c = BIT(10) << 16,
-
- .rsvd = (GENMASK(15, 12) | BIT(4)) << 16,
},
};
@@ -181,31 +182,29 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
.rw = GENMASK(15, 0),
/*
- * Device status register has 4 bits W1C, then 2 bits
- * RO, the rest is reserved
+ * Device status register has bits 6 and [3:0] W1C, [5:4] RO,
+ * the rest is reserved
*/
- .w1c = GENMASK(19, 16),
- .ro = GENMASK(20, 19),
- .rsvd = GENMASK(31, 21),
+ .w1c = (BIT(6) | GENMASK(3, 0)) << 16,
+ .ro = GENMASK(5, 4) << 16,
},
[PCI_EXP_LNKCAP / 4] = {
/* All bits are RO, except bit 23 which is reserved */
.ro = lower_32_bits(~BIT(23)),
- .rsvd = BIT(23),
},
[PCI_EXP_LNKCTL / 4] = {
/*
- * Link control has bits [1:0] and [11:3] RW, the
- * other bits are reserved.
- * Link status has bits [13:0] RO, and bits [14:15]
+ * Link control has bits [15:14], [11:3] and [1:0] RW, the
+ * rest is reserved.
+ *
+ * Link status has bits [13:0] RO, and bits [15:14]
* W1C.
*/
- .rw = GENMASK(11, 3) | GENMASK(1, 0),
+ .rw = GENMASK(15, 14) | GENMASK(11, 3) | GENMASK(1, 0),
.ro = GENMASK(13, 0) << 16,
.w1c = GENMASK(15, 14) << 16,
- .rsvd = GENMASK(15, 12) | BIT(2),
},
[PCI_EXP_SLTCAP / 4] = {
@@ -214,19 +213,18 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
[PCI_EXP_SLTCTL / 4] = {
/*
- * Slot control has bits [12:0] RW, the rest is
+ * Slot control has bits [14:0] RW, the rest is
* reserved.
*
- * Slot status has a mix of W1C and RO bits, as well
- * as reserved bits.
+ * Slot status has bits 8 and [4:0] W1C, bits [7:5] RO, the
+ * rest is reserved.
*/
- .rw = GENMASK(12, 0),
+ .rw = GENMASK(14, 0),
.w1c = (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC) << 16,
.ro = (PCI_EXP_SLTSTA_MRLSS | PCI_EXP_SLTSTA_PDS |
PCI_EXP_SLTSTA_EIS) << 16,
- .rsvd = GENMASK(15, 12) | (GENMASK(15, 9) << 16),
},
[PCI_EXP_RTCTL / 4] = {
@@ -234,19 +232,21 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
* Root control has bits [4:0] RW, the rest is
* reserved.
*
- * Root status has bit 0 RO, the rest is reserved.
+ * Root capabilities has bit 0 RO, the rest is reserved.
*/
.rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE |
PCI_EXP_RTCTL_CRSSVE),
.ro = PCI_EXP_RTCAP_CRSVIS << 16,
- .rsvd = GENMASK(15, 5) | (GENMASK(15, 1) << 16),
},
[PCI_EXP_RTSTA / 4] = {
+ /*
+ * Root status has bits 17 and [15:0] RO, bit 16 W1C, the rest
+ * is reserved.
+ */
.ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING,
.w1c = PCI_EXP_RTSTA_PME,
- .rsvd = GENMASK(31, 18),
},
};
@@ -354,7 +354,8 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
* Make sure we never return any reserved bit with a value
* different from 0.
*/
- *value &= ~behavior[reg / 4].rsvd;
+ *value &= behavior[reg / 4].ro | behavior[reg / 4].rw |
+ behavior[reg / 4].w1c;
if (size == 1)
*value = (*value >> (8 * (where & 3))) & 0xff;
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index a5910f942857..707dd9808676 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -178,7 +178,7 @@ static int dsm_get_label(struct device *dev, char *buf,
return -1;
obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 0x2,
- DEVICE_LABEL_DSM, NULL);
+ DSM_PCI_DEVICE_NAME, NULL);
if (!obj)
return -1;
@@ -218,7 +218,7 @@ static bool device_has_dsm(struct device *dev)
return false;
return !!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2,
- 1 << DEVICE_LABEL_DSM);
+ 1 << DSM_PCI_DEVICE_NAME);
}
static umode_t acpi_index_string_exist(struct kobject *kobj,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 595fcf59843f..ce096272f52b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -752,30 +752,6 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
EXPORT_SYMBOL(pci_find_resource);
/**
- * pci_find_pcie_root_port - return PCIe Root Port
- * @dev: PCI device to query
- *
- * Traverse up the parent chain and return the PCIe Root Port PCI Device
- * for a given PCI Device.
- */
-struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
-{
- struct pci_dev *bridge, *highest_pcie_bridge = dev;
-
- bridge = pci_upstream_bridge(dev);
- while (bridge && pci_is_pcie(bridge)) {
- highest_pcie_bridge = bridge;
- bridge = pci_upstream_bridge(bridge);
- }
-
- if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
- return NULL;
-
- return highest_pcie_bridge;
-}
-EXPORT_SYMBOL(pci_find_pcie_root_port);
-
-/**
* pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
* @dev: the PCI device to operate on
* @pos: config space offset of status word
@@ -868,7 +844,9 @@ static inline bool platform_pci_need_resume(struct pci_dev *dev)
static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
{
- return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
+ if (pci_platform_pm && pci_platform_pm->bridge_d3)
+ return pci_platform_pm->bridge_d3(dev);
+ return false;
}
/**
@@ -1578,7 +1556,7 @@ EXPORT_SYMBOL(pci_restore_state);
struct pci_saved_state {
u32 config_space[16];
- struct pci_cap_saved_data cap[0];
+ struct pci_cap_saved_data cap[];
};
/**
@@ -4660,7 +4638,8 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
* pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
- * @delay: Delay to wait after link has become active (in ms)
+ * @delay: Delay to wait after link has become active (in ms). Specify %0
+ * for no delay.
*
* Use this to wait till link becomes active or inactive.
*/
@@ -4673,10 +4652,10 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
/*
* Some controllers might not implement link active reporting. In this
- * case, we wait for 1000 + 100 ms.
+ * case, we wait for 1000 ms + any delay requested by the caller.
*/
if (!pdev->link_active_reporting) {
- msleep(1100);
+ msleep(timeout + delay);
return true;
}
@@ -4701,7 +4680,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
msleep(10);
timeout -= 10;
}
- if (active && ret)
+ if (active && ret && delay)
msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
@@ -4822,17 +4801,28 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
if (!pcie_downstream_port(dev))
return;
- if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
- pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
- msleep(delay);
- } else {
- pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
- delay);
- if (!pcie_wait_for_link_delay(dev, true, delay)) {
+ /*
+ * Per PCIe r5.0, sec 6.6.1, for downstream ports that support
+ * speeds > 5 GT/s, we must wait for link training to complete
+ * before the mandatory delay.
+ *
+ * We can only tell when link training completes via DLL Link
+ * Active, which is required for downstream ports that support
+ * speeds > 5 GT/s (sec 7.5.3.6). Unfortunately some common
+ * devices do not implement Link Active reporting even when it's
+ * required, so we'll check for that directly instead of checking
+ * the supported link speed. We assume devices without Link Active
+ * reporting can train in 100 ms regardless of speed.
+ */
+ if (dev->link_active_reporting) {
+ pci_dbg(dev, "waiting for link to train\n");
+ if (!pcie_wait_for_link_delay(dev, true, 0)) {
/* Did not train, no need to wait any further */
return;
}
}
+ pci_dbg(child, "waiting %d ms to become accessible\n", delay);
+ msleep(delay);
if (!pci_device_is_present(child)) {
pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 66386811cfde..9cd31331aee9 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -25,7 +25,6 @@ config PCIEAER
bool "PCI Express Advanced Error Reporting support"
depends on PCIEPORTBUS
select RAS
- default y
help
This enables PCI Express Root Port Advanced Error Reporting
(AER) driver support. Error reporting messages sent to Root
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index f4274d301235..3acf56683915 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -136,22 +136,18 @@ static const char * const ecrc_policy_str[] = {
*/
static int enable_ecrc_checking(struct pci_dev *dev)
{
- int pos;
+ int aer = dev->aer_cap;
u32 reg32;
- if (!pci_is_pcie(dev))
+ if (!aer)
return -ENODEV;
- pos = dev->aer_cap;
- if (!pos)
- return -ENODEV;
-
- pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, &reg32);
if (reg32 & PCI_ERR_CAP_ECRC_GENC)
reg32 |= PCI_ERR_CAP_ECRC_GENE;
if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
reg32 |= PCI_ERR_CAP_ECRC_CHKE;
- pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+ pci_write_config_dword(dev, aer + PCI_ERR_CAP, reg32);
return 0;
}
@@ -164,19 +160,15 @@ static int enable_ecrc_checking(struct pci_dev *dev)
*/
static int disable_ecrc_checking(struct pci_dev *dev)
{
- int pos;
+ int aer = dev->aer_cap;
u32 reg32;
- if (!pci_is_pcie(dev))
+ if (!aer)
return -ENODEV;
- pos = dev->aer_cap;
- if (!pos)
- return -ENODEV;
-
- pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, &reg32);
reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
- pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+ pci_write_config_dword(dev, aer + PCI_ERR_CAP, reg32);
return 0;
}
@@ -217,142 +209,22 @@ void pcie_ecrc_get_policy(char *str)
}
#endif /* CONFIG_PCIE_ECRC */
-#ifdef CONFIG_ACPI_APEI
-static inline int hest_match_pci(struct acpi_hest_aer_common *p,
- struct pci_dev *pci)
-{
- return ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) &&
- ACPI_HEST_BUS(p->bus) == pci->bus->number &&
- p->device == PCI_SLOT(pci->devfn) &&
- p->function == PCI_FUNC(pci->devfn);
-}
-
-static inline bool hest_match_type(struct acpi_hest_header *hest_hdr,
- struct pci_dev *dev)
-{
- u16 hest_type = hest_hdr->type;
- u8 pcie_type = pci_pcie_type(dev);
-
- if ((hest_type == ACPI_HEST_TYPE_AER_ROOT_PORT &&
- pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
- (hest_type == ACPI_HEST_TYPE_AER_ENDPOINT &&
- pcie_type == PCI_EXP_TYPE_ENDPOINT) ||
- (hest_type == ACPI_HEST_TYPE_AER_BRIDGE &&
- (dev->class >> 16) == PCI_BASE_CLASS_BRIDGE))
- return true;
- return false;
-}
-
-struct aer_hest_parse_info {
- struct pci_dev *pci_dev;
- int firmware_first;
-};
-
-static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr)
-{
- if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT ||
- hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT ||
- hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE)
- return 1;
- return 0;
-}
-
-static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
-{
- struct aer_hest_parse_info *info = data;
- struct acpi_hest_aer_common *p;
- int ff;
-
- if (!hest_source_is_pcie_aer(hest_hdr))
- return 0;
-
- p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
- ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
-
- /*
- * If no specific device is supplied, determine whether
- * FIRMWARE_FIRST is set for *any* PCIe device.
- */
- if (!info->pci_dev) {
- info->firmware_first |= ff;
- return 0;
- }
-
- /* Otherwise, check the specific device */
- if (p->flags & ACPI_HEST_GLOBAL) {
- if (hest_match_type(hest_hdr, info->pci_dev))
- info->firmware_first = ff;
- } else
- if (hest_match_pci(p, info->pci_dev))
- info->firmware_first = ff;
-
- return 0;
-}
-
-static void aer_set_firmware_first(struct pci_dev *pci_dev)
-{
- int rc;
- struct aer_hest_parse_info info = {
- .pci_dev = pci_dev,
- .firmware_first = 0,
- };
-
- rc = apei_hest_parse(aer_hest_parse, &info);
-
- if (rc)
- pci_dev->__aer_firmware_first = 0;
- else
- pci_dev->__aer_firmware_first = info.firmware_first;
- pci_dev->__aer_firmware_first_valid = 1;
-}
+#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
+ PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
-int pcie_aer_get_firmware_first(struct pci_dev *dev)
+int pcie_aer_is_native(struct pci_dev *dev)
{
- if (!pci_is_pcie(dev))
- return 0;
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
- if (pcie_ports_native)
+ if (!dev->aer_cap)
return 0;
- if (!dev->__aer_firmware_first_valid)
- aer_set_firmware_first(dev);
- return dev->__aer_firmware_first;
-}
-
-static bool aer_firmware_first;
-
-/**
- * aer_acpi_firmware_first - Check if APEI should control AER.
- */
-bool aer_acpi_firmware_first(void)
-{
- static bool parsed = false;
- struct aer_hest_parse_info info = {
- .pci_dev = NULL, /* Check all PCIe devices */
- .firmware_first = 0,
- };
-
- if (pcie_ports_native)
- return false;
-
- if (!parsed) {
- apei_hest_parse(aer_hest_parse, &info);
- aer_firmware_first = info.firmware_first;
- parsed = true;
- }
- return aer_firmware_first;
+ return pcie_ports_native || host->native_aer;
}
-#endif
-
-#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
- PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
- if (pcie_aer_get_firmware_first(dev))
- return -EIO;
-
- if (!dev->aer_cap)
+ if (!pcie_aer_is_native(dev))
return -EIO;
return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
@@ -361,7 +233,7 @@ EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
- if (pcie_aer_get_firmware_first(dev))
+ if (!pcie_aer_is_native(dev))
return -EIO;
return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
@@ -379,22 +251,18 @@ void pci_aer_clear_device_status(struct pci_dev *dev)
int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
- int pos;
+ int aer = dev->aer_cap;
u32 status, sev;
- pos = dev->aer_cap;
- if (!pos)
- return -EIO;
-
- if (pcie_aer_get_firmware_first(dev))
+ if (!pcie_aer_is_native(dev))
return -EIO;
/* Clear status bits for ERR_NONFATAL errors only */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, &sev);
status &= ~sev;
if (status)
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
return 0;
}
@@ -402,22 +270,18 @@ EXPORT_SYMBOL_GPL(pci_aer_clear_nonfatal_status);
void pci_aer_clear_fatal_status(struct pci_dev *dev)
{
- int pos;
+ int aer = dev->aer_cap;
u32 status, sev;
- pos = dev->aer_cap;
- if (!pos)
- return;
-
- if (pcie_aer_get_firmware_first(dev))
+ if (!pcie_aer_is_native(dev))
return;
/* Clear status bits for ERR_FATAL errors only */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, &sev);
status &= sev;
if (status)
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
}
/**
@@ -431,35 +295,31 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev)
*/
int pci_aer_raw_clear_status(struct pci_dev *dev)
{
- int pos;
+ int aer = dev->aer_cap;
u32 status;
int port_type;
- if (!pci_is_pcie(dev))
- return -ENODEV;
-
- pos = dev->aer_cap;
- if (!pos)
+ if (!aer)
return -EIO;
port_type = pci_pcie_type(dev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
+ pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, &status);
+ pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, status);
}
- pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
- pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS, &status);
+ pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS, status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
return 0;
}
int pci_aer_clear_status(struct pci_dev *dev)
{
- if (pcie_aer_get_firmware_first(dev))
+ if (!pcie_aer_is_native(dev))
return -EIO;
return pci_aer_raw_clear_status(dev);
@@ -467,12 +327,11 @@ int pci_aer_clear_status(struct pci_dev *dev)
void pci_save_aer_state(struct pci_dev *dev)
{
+ int aer = dev->aer_cap;
struct pci_cap_saved_state *save_state;
u32 *cap;
- int pos;
- pos = dev->aer_cap;
- if (!pos)
+ if (!aer)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
@@ -480,22 +339,21 @@ void pci_save_aer_state(struct pci_dev *dev)
return;
cap = &save_state->cap.data[0];
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, cap++);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, cap++);
- pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, cap++);
- pci_read_config_dword(dev, pos + PCI_ERR_CAP, cap++);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, cap++);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, cap++);
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, cap++);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, cap++);
if (pcie_cap_has_rtctl(dev))
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, cap++);
+ pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, cap++);
}
void pci_restore_aer_state(struct pci_dev *dev)
{
+ int aer = dev->aer_cap;
struct pci_cap_saved_state *save_state;
u32 *cap;
- int pos;
- pos = dev->aer_cap;
- if (!pos)
+ if (!aer)
return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
@@ -503,12 +361,12 @@ void pci_restore_aer_state(struct pci_dev *dev)
return;
cap = &save_state->cap.data[0];
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, *cap++);
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, *cap++);
- pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, *cap++);
- pci_write_config_dword(dev, pos + PCI_ERR_CAP, *cap++);
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, *cap++);
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, *cap++);
+ pci_write_config_dword(dev, aer + PCI_ERR_COR_MASK, *cap++);
+ pci_write_config_dword(dev, aer + PCI_ERR_CAP, *cap++);
if (pcie_cap_has_rtctl(dev))
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, *cap++);
+ pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, *cap++);
}
void pci_aer_init(struct pci_dev *dev)
@@ -939,7 +797,7 @@ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
*/
static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
{
- int pos;
+ int aer = dev->aer_cap;
u32 status, mask;
u16 reg16;
@@ -974,17 +832,16 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
if (!(reg16 & PCI_EXP_AER_FLAGS))
return false;
- pos = dev->aer_cap;
- if (!pos)
+ if (!aer)
return false;
/* Check if error is recorded */
if (e_info->severity == AER_CORRECTABLE) {
- pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS, &status);
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, &mask);
} else {
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &mask);
}
if (status & ~mask)
return true;
@@ -1055,16 +912,15 @@ static bool find_source_device(struct pci_dev *parent,
*/
static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
{
- int pos;
+ int aer = dev->aer_cap;
if (info->severity == AER_CORRECTABLE) {
/*
* Correctable error does not need software intervention.
* No need to go through error recovery process.
*/
- pos = dev->aer_cap;
- if (pos)
- pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
+ if (aer)
+ pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS,
info->status);
pci_aer_clear_device_status(dev);
} else if (info->severity == AER_NONFATAL)
@@ -1155,22 +1011,21 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
*/
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
- int pos, temp;
+ int aer = dev->aer_cap;
+ int temp;
/* Must reset in this function */
info->status = 0;
info->tlp_header_valid = 0;
- pos = dev->aer_cap;
-
/* The device might not support AER */
- if (!pos)
+ if (!aer)
return 0;
if (info->severity == AER_CORRECTABLE) {
- pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS,
&info->status);
- pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK,
&info->mask);
if (!(info->status & ~info->mask))
return 0;
@@ -1179,27 +1034,27 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
info->severity == AER_NONFATAL) {
/* Link is still healthy for IO reads */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
&info->status);
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK,
&info->mask);
if (!(info->status & ~info->mask))
return 0;
/* Get First Error Pointer */
- pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, &temp);
info->first_error = PCI_ERR_CAP_FEP(temp);
if (info->status & AER_LOG_TLP_MASKS) {
info->tlp_header_valid = 1;
pci_read_config_dword(dev,
- pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
+ aer + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
pci_read_config_dword(dev,
- pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
+ aer + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
pci_read_config_dword(dev,
- pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
+ aer + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
pci_read_config_dword(dev,
- pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
+ aer + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
}
}
@@ -1305,15 +1160,15 @@ static irqreturn_t aer_irq(int irq, void *context)
struct pcie_device *pdev = (struct pcie_device *)context;
struct aer_rpc *rpc = get_service_data(pdev);
struct pci_dev *rp = rpc->rpd;
+ int aer = rp->aer_cap;
struct aer_err_source e_src = {};
- int pos = rp->aer_cap;
- pci_read_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, &e_src.status);
+ pci_read_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, &e_src.status);
if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV)))
return IRQ_NONE;
- pci_read_config_dword(rp, pos + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
- pci_write_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, e_src.status);
+ pci_read_config_dword(rp, aer + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
+ pci_write_config_dword(rp, aer + PCI_ERR_ROOT_STATUS, e_src.status);
if (!kfifo_put(&rpc->aer_fifo, e_src))
return IRQ_HANDLED;
@@ -1365,7 +1220,7 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
static void aer_enable_rootport(struct aer_rpc *rpc)
{
struct pci_dev *pdev = rpc->rpd;
- int aer_pos;
+ int aer = pdev->aer_cap;
u16 reg16;
u32 reg32;
@@ -1377,14 +1232,13 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
SYSTEM_ERROR_INTR_ON_MESG_MASK);
- aer_pos = pdev->aer_cap;
/* Clear error status */
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_COR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_COR_STATUS, reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, reg32);
/*
* Enable error reporting for the root port device and downstream port
@@ -1393,9 +1247,9 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
set_downstream_devices_error_reporting(pdev, true);
/* Enable Root Port's interrupt in response to error messages */
- pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
}
/**
@@ -1407,8 +1261,8 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
static void aer_disable_rootport(struct aer_rpc *rpc)
{
struct pci_dev *pdev = rpc->rpd;
+ int aer = pdev->aer_cap;
u32 reg32;
- int pos;
/*
* Disable error reporting for the root port device and downstream port
@@ -1416,15 +1270,14 @@ static void aer_disable_rootport(struct aer_rpc *rpc)
*/
set_downstream_devices_error_reporting(pdev, false);
- pos = pdev->aer_cap;
/* Disable Root's interrupt in response to error messages */
- pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
/* Clear Root's error status reg */
- pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
+ pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, reg32);
}
/**
@@ -1481,28 +1334,27 @@ static int aer_probe(struct pcie_device *dev)
*/
static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
{
+ int aer = dev->aer_cap;
u32 reg32;
- int pos;
int rc;
- pos = dev->aer_cap;
/* Disable Root's interrupt in response to error messages */
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+ pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32);
rc = pci_bus_error_reset(dev);
pci_info(dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32);
+ pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, reg32);
/* Enable Root Port's interrupt in response to error messages */
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+ pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32);
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
@@ -1523,7 +1375,7 @@ static struct pcie_port_service_driver aerdriver = {
*/
int __init pcie_aer_init(void)
{
- if (!pci_aer_available() || aer_acpi_firmware_first())
+ if (!pci_aer_available())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 2378ed692534..b17e5ffd31b1 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -628,16 +628,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Setup initial capable state. Will be updated later */
link->aspm_capable = link->aspm_support;
- /*
- * If the downstream component has pci bridge function, don't
- * do ASPM for now.
- */
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) {
- link->aspm_disable = ASPM_STATE_ALL;
- break;
- }
- }
/* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 762170423fdd..daa9a4153776 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -284,7 +284,7 @@ static int dpc_probe(struct pcie_device *dev)
int status;
u16 ctl, cap;
- if (pcie_aer_get_firmware_first(pdev) && !pcie_ports_dpc_native)
+ if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
return -ENOTSUPP;
status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
@@ -301,6 +301,7 @@ static int dpc_probe(struct pcie_device *dev)
ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
+ pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
index 594622a6cb16..a6b9b479b97a 100644
--- a/drivers/pci/pcie/edr.c
+++ b/drivers/pci/pcie/edr.c
@@ -148,11 +148,11 @@ static void edr_handle_event(acpi_handle handle, u32 event, void *data)
pci_ers_result_t estate = PCI_ERS_RESULT_DISCONNECT;
u16 status;
- pci_info(pdev, "ACPI event %#x received\n", event);
-
if (event != ACPI_NOTIFY_DISCONNECT_RECOVER)
return;
+ pci_info(pdev, "EDR event received\n");
+
/* Locate the port which issued EDR event */
edev = acpi_dpc_port_get(pdev);
if (!edev) {
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index f38e6c19dd50..6a32970bb731 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -408,7 +408,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
/**
* pcie_pme_resume - Resume PCIe PME service device.
- * @srv - PCIe service device to resume.
+ * @srv: PCIe service device to resume.
*/
static int pcie_pme_resume(struct pcie_device *srv)
{
@@ -431,7 +431,7 @@ static int pcie_pme_resume(struct pcie_device *srv)
/**
* pcie_pme_remove - Prepare PCIe PME service device for removal.
- * @srv - PCIe service device to remove.
+ * @srv: PCIe service device to remove.
*/
static void pcie_pme_remove(struct pcie_device *srv)
{
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 64b5e081cdb2..af7cf237432a 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -29,8 +29,10 @@ extern bool pcie_ports_dpc_native;
#ifdef CONFIG_PCIEAER
int pcie_aer_init(void);
+int pcie_aer_is_native(struct pci_dev *dev);
#else
static inline int pcie_aer_init(void) { return 0; }
+static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
#ifdef CONFIG_HOTPLUG_PCI_PCIE
@@ -147,16 +149,5 @@ static inline bool pcie_pme_no_msi(void) { return false; }
static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
-#ifdef CONFIG_ACPI_APEI
-int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
-#else
-static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
-{
- if (pci_dev->__aer_firmware_first_valid)
- return pci_dev->__aer_firmware_first;
- return 0;
-}
-#endif
-
struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index 9361f3aa26ab..357a454cafa0 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -39,10 +39,6 @@ void pci_ptm_init(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return;
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
- if (!pos)
- return;
-
/*
* Enable PTM only on interior devices (root ports, switch ports,
* etc.) on the assumption that it causes no link traffic until an
@@ -52,6 +48,23 @@ void pci_ptm_init(struct pci_dev *dev)
pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
return;
+ /*
+ * Switch Downstream Ports are not permitted to have a PTM
+ * capability; their PTM behavior is controlled by the Upstream
+ * Port (PCIe r5.0, sec 7.9.16).
+ */
+ ups = pci_upstream_bridge(dev);
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM &&
+ ups && ups->ptm_enabled) {
+ dev->ptm_granularity = ups->ptm_granularity;
+ dev->ptm_enabled = 1;
+ return;
+ }
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
+ if (!pos)
+ return;
+
pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
@@ -61,7 +74,6 @@ void pci_ptm_init(struct pci_dev *dev)
* the spec recommendation (PCIe r3.1, sec 7.32.3), select the
* furthest upstream Time Source as the PTM Root.
*/
- ups = pci_upstream_bridge(dev);
if (ups && ups->ptm_enabled) {
ctrl = PCI_PTM_CTRL_ENABLE;
if (ups->ptm_granularity == 0)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index d9c2c3301a8a..2f66988cea25 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -565,7 +565,7 @@ static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
return b;
}
-static void devm_pci_release_host_bridge_dev(struct device *dev)
+static void pci_release_host_bridge_dev(struct device *dev)
{
struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
@@ -574,12 +574,7 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
pci_free_resource_list(&bridge->windows);
pci_free_resource_list(&bridge->dma_ranges);
-}
-
-static void pci_release_host_bridge_dev(struct device *dev)
-{
- devm_pci_release_host_bridge_dev(dev);
- kfree(to_pci_host_bridge(dev));
+ kfree(bridge);
}
static void pci_init_host_bridge(struct pci_host_bridge *bridge)
@@ -599,6 +594,8 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
bridge->native_pme = 1;
bridge->native_ltr = 1;
bridge->native_dpc = 1;
+
+ device_initialize(&bridge->dev);
}
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
@@ -616,17 +613,25 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
}
EXPORT_SYMBOL(pci_alloc_host_bridge);
+static void devm_pci_alloc_host_bridge_release(void *data)
+{
+ pci_free_host_bridge(data);
+}
+
struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
size_t priv)
{
+ int ret;
struct pci_host_bridge *bridge;
- bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
+ bridge = pci_alloc_host_bridge(priv);
if (!bridge)
return NULL;
- pci_init_host_bridge(bridge);
- bridge->dev.release = devm_pci_release_host_bridge_dev;
+ ret = devm_add_action_or_reset(dev, devm_pci_alloc_host_bridge_release,
+ bridge);
+ if (ret)
+ return NULL;
return bridge;
}
@@ -634,10 +639,7 @@ EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
void pci_free_host_bridge(struct pci_host_bridge *bridge)
{
- pci_free_resource_list(&bridge->windows);
- pci_free_resource_list(&bridge->dma_ranges);
-
- kfree(bridge);
+ put_device(&bridge->dev);
}
EXPORT_SYMBOL(pci_free_host_bridge);
@@ -908,10 +910,11 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
if (err)
goto free;
- err = device_register(&bridge->dev);
- if (err)
+ err = device_add(&bridge->dev);
+ if (err) {
put_device(&bridge->dev);
-
+ goto free;
+ }
bus->bridge = get_device(&bridge->dev);
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
@@ -977,7 +980,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
unregister:
put_device(&bridge->dev);
- device_unregister(&bridge->dev);
+ device_del(&bridge->dev);
free:
kfree(bus);
@@ -1934,13 +1937,33 @@ static void pci_configure_mps(struct pci_dev *dev)
struct pci_dev *bridge = pci_upstream_bridge(dev);
int mps, mpss, p_mps, rc;
- if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
+ if (!pci_is_pcie(dev))
return;
/* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
if (dev->is_virtfn)
return;
+ /*
+ * For Root Complex Integrated Endpoints, program the maximum
+ * supported value unless limited by the PCIE_BUS_PEER2PEER case.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
+ if (pcie_bus_config == PCIE_BUS_PEER2PEER)
+ mps = 128;
+ else
+ mps = 128 << dev->pcie_mpss;
+ rc = pcie_set_mps(dev, mps);
+ if (rc) {
+ pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps);
+ }
+ return;
+ }
+
+ if (!bridge || !pci_is_pcie(bridge))
+ return;
+
mps = pcie_get_mps(dev);
p_mps = pcie_get_mps(bridge);
@@ -2056,7 +2079,7 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
* For now, we only deal with Relaxed Ordering issues with Root
* Ports. Peer-to-Peer DMA is another can of worms.
*/
- root = pci_find_pcie_root_port(dev);
+ root = pcie_find_root_port(dev);
if (!root)
return;
@@ -2952,7 +2975,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
return bridge->bus;
err_out:
- kfree(bridge);
+ put_device(&bridge->dev);
return NULL;
}
EXPORT_SYMBOL_GPL(pci_create_root_bus);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ca9ed5774eb1..812bfc32ecb8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4319,7 +4319,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED,
*/
static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
{
- struct pci_dev *root_port = pci_find_pcie_root_port(pdev);
+ struct pci_dev *root_port = pcie_find_root_port(pdev);
if (!root_port) {
pci_warn(pdev, "PCIe Completion erratum may cause device errors\n");
@@ -4682,6 +4682,20 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
}
+static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * Intel RCiEP's are required to allow p2p only on translated
+ * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16,
+ * "Root-Complex Peer to Peer Considerations".
+ */
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
+ return -ENOTTY;
+
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
+
static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
{
/*
@@ -4764,6 +4778,7 @@ static const struct pci_dev_acs_enabled {
/* I219 */
{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs },
/* QCOM QDF2xxx root ports */
{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
@@ -5129,13 +5144,25 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
-/* FLR may cause some 82579 devices to hang */
-static void quirk_intel_no_flr(struct pci_dev *dev)
+/*
+ * FLR may cause the following to devices to hang:
+ *
+ * AMD Starship/Matisse HD Audio Controller 0x1487
+ * AMD Starship USB 3.0 Host Controller 0x148c
+ * AMD Matisse USB 3.0 Host Controller 0x149c
+ * Intel 82579LM Gigabit Ethernet Controller 0x1502
+ * Intel 82579V Gigabit Ethernet Controller 0x1503
+ *
+ */
+static void quirk_no_flr(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
static void quirk_no_ext_tags(struct pci_dev *pdev)
{
@@ -5568,6 +5595,19 @@ static void pci_fixup_no_d0_pme(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
+/*
+ * Device [12d8:0x400e] and [12d8:0x400f]
+ * These devices advertise PME# support in all power states but don't
+ * reliably assert it.
+ */
+static void pci_fixup_no_pme(struct pci_dev *dev)
+{
+ pci_info(dev, "PME# is unreliable, disabling it\n");
+ dev->pme_support = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_pme);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_pme);
+
static void apex_pci_fixup_class(struct pci_dev *pdev)
{
pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index e9c6b120cf45..95dec03d9f2a 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -160,6 +160,6 @@ void pci_remove_root_bus(struct pci_bus *bus)
host_bridge->bus = NULL;
/* remove the host bridge */
- device_unregister(&host_bridge->dev);
+ device_del(&host_bridge->dev);
}
EXPORT_SYMBOL_GPL(pci_remove_root_bus);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index bbcef1a053ab..9b94b1f16d80 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -26,6 +26,7 @@
#include "pci.h"
unsigned int pci_flags;
+EXPORT_SYMBOL_GPL(pci_flags);
struct pci_dev_resource {
struct list_head list;
@@ -583,7 +584,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
io_mask = PCI_IO_1K_RANGE_MASK;
/* Set up the top and bottom of the PCI I/O segment for this bus */
- res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
+ res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
pci_read_config_word(bridge, PCI_IO_BASE, &l);
@@ -613,7 +614,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
u32 l;
/* Set up the top and bottom of the PCI Memory segment for this bus */
- res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
+ res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
@@ -640,7 +641,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
/* Set up PREF base/limit */
bu = lu = 0;
- res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+ res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
@@ -707,14 +708,14 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
if (!pci_bus_clip_resource(bridge, i))
return -EINVAL; /* Clipping didn't change anything */
- switch (i - PCI_BRIDGE_RESOURCES) {
- case 0:
+ switch (i) {
+ case PCI_BRIDGE_IO_WINDOW:
pci_setup_bridge_io(bridge);
break;
- case 1:
+ case PCI_BRIDGE_MEM_WINDOW:
pci_setup_bridge_mmio(bridge);
break;
- case 2:
+ case PCI_BRIDGE_PREF_MEM_WINDOW:
pci_setup_bridge_mmio_pref(bridge);
break;
default:
@@ -735,18 +736,22 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
static void pci_bridge_check_ranges(struct pci_bus *bus)
{
struct pci_dev *bridge = bus->self;
- struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
+ struct resource *b_res;
- b_res[1].flags |= IORESOURCE_MEM;
+ b_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+ b_res->flags |= IORESOURCE_MEM;
- if (bridge->io_window)
- b_res[0].flags |= IORESOURCE_IO;
+ if (bridge->io_window) {
+ b_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ b_res->flags |= IORESOURCE_IO;
+ }
if (bridge->pref_window) {
- b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ b_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
if (bridge->pref_64_window) {
- b_res[2].flags |= IORESOURCE_MEM_64;
- b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
+ b_res->flags |= IORESOURCE_MEM_64 |
+ PCI_PREF_RANGE_TYPE_64;
}
}
}
@@ -1105,35 +1110,37 @@ static void pci_bus_size_cardbus(struct pci_bus *bus,
struct list_head *realloc_head)
{
struct pci_dev *bridge = bus->self;
- struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
+ struct resource *b_res;
resource_size_t b_res_3_size = pci_cardbus_mem_size * 2;
u16 ctrl;
- if (b_res[0].parent)
+ b_res = &bridge->resource[PCI_CB_BRIDGE_IO_0_WINDOW];
+ if (b_res->parent)
goto handle_b_res_1;
/*
* Reserve some resources for CardBus. We reserve a fixed amount
* of bus space for CardBus bridges.
*/
- b_res[0].start = pci_cardbus_io_size;
- b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1;
- b_res[0].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
+ b_res->start = pci_cardbus_io_size;
+ b_res->end = b_res->start + pci_cardbus_io_size - 1;
+ b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
if (realloc_head) {
- b_res[0].end -= pci_cardbus_io_size;
+ b_res->end -= pci_cardbus_io_size;
add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
- pci_cardbus_io_size);
+ pci_cardbus_io_size);
}
handle_b_res_1:
- if (b_res[1].parent)
+ b_res = &bridge->resource[PCI_CB_BRIDGE_IO_1_WINDOW];
+ if (b_res->parent)
goto handle_b_res_2;
- b_res[1].start = pci_cardbus_io_size;
- b_res[1].end = b_res[1].start + pci_cardbus_io_size - 1;
- b_res[1].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
+ b_res->start = pci_cardbus_io_size;
+ b_res->end = b_res->start + pci_cardbus_io_size - 1;
+ b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
if (realloc_head) {
- b_res[1].end -= pci_cardbus_io_size;
- add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size,
- pci_cardbus_io_size);
+ b_res->end -= pci_cardbus_io_size;
+ add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
+ pci_cardbus_io_size);
}
handle_b_res_2:
@@ -1153,21 +1160,22 @@ handle_b_res_2:
pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
}
- if (b_res[2].parent)
+ b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_0_WINDOW];
+ if (b_res->parent)
goto handle_b_res_3;
/*
* If we have prefetchable memory support, allocate two regions.
* Otherwise, allocate one region of twice the size.
*/
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
- b_res[2].start = pci_cardbus_mem_size;
- b_res[2].end = b_res[2].start + pci_cardbus_mem_size - 1;
- b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
- IORESOURCE_STARTALIGN;
+ b_res->start = pci_cardbus_mem_size;
+ b_res->end = b_res->start + pci_cardbus_mem_size - 1;
+ b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
+ IORESOURCE_STARTALIGN;
if (realloc_head) {
- b_res[2].end -= pci_cardbus_mem_size;
- add_to_list(realloc_head, bridge, b_res+2,
- pci_cardbus_mem_size, pci_cardbus_mem_size);
+ b_res->end -= pci_cardbus_mem_size;
+ add_to_list(realloc_head, bridge, b_res,
+ pci_cardbus_mem_size, pci_cardbus_mem_size);
}
/* Reduce that to half */
@@ -1175,15 +1183,16 @@ handle_b_res_2:
}
handle_b_res_3:
- if (b_res[3].parent)
+ b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_1_WINDOW];
+ if (b_res->parent)
goto handle_done;
- b_res[3].start = pci_cardbus_mem_size;
- b_res[3].end = b_res[3].start + b_res_3_size - 1;
- b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
+ b_res->start = pci_cardbus_mem_size;
+ b_res->end = b_res->start + b_res_3_size - 1;
+ b_res->flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
if (realloc_head) {
- b_res[3].end -= b_res_3_size;
- add_to_list(realloc_head, bridge, b_res+3, b_res_3_size,
- pci_cardbus_mem_size);
+ b_res->end -= b_res_3_size;
+ add_to_list(realloc_head, bridge, b_res, b_res_3_size,
+ pci_cardbus_mem_size);
}
handle_done:
@@ -1227,7 +1236,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
break;
hdr_type = -1; /* Intentionally invalid - not a PCI device. */
} else {
- pref = &bus->self->resource[PCI_BRIDGE_RESOURCES + 2];
+ pref = &bus->self->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
hdr_type = bus->self->hdr_type;
}
@@ -1885,9 +1894,9 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
struct pci_dev *dev, *bridge = bus->self;
resource_size_t io_per_hp, mmio_per_hp, mmio_pref_per_hp, align;
- io_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
- mmio_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
- mmio_pref_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+ io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+ mmio_pref_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
/*
* The alignment of this bridge is yet to be considered, hence it must
@@ -1960,21 +1969,21 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
* Reduce the available resource space by what the
* bridge and devices below it occupy.
*/
- res = &dev->resource[PCI_BRIDGE_RESOURCES + 0];
+ res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
align = pci_resource_alignment(dev, res);
align = align ? ALIGN(io.start, align) - io.start : 0;
used_size = align + resource_size(res);
if (!res->parent)
io.start = min(io.start + used_size, io.end + 1);
- res = &dev->resource[PCI_BRIDGE_RESOURCES + 1];
+ res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
align = align ? ALIGN(mmio.start, align) - mmio.start : 0;
used_size = align + resource_size(res);
if (!res->parent)
mmio.start = min(mmio.start + used_size, mmio.end + 1);
- res = &dev->resource[PCI_BRIDGE_RESOURCES + 2];
+ res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
align = align ? ALIGN(mmio_pref.start, align) -
mmio_pref.start : 0;
@@ -2027,9 +2036,9 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
return;
/* Take the initial extra resources from the hotplug port */
- available_io = bridge->resource[PCI_BRIDGE_RESOURCES + 0];
- available_mmio = bridge->resource[PCI_BRIDGE_RESOURCES + 1];
- available_mmio_pref = bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+ available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+ available_mmio_pref = bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
pci_bus_distribute_available_resources(bridge->subordinate,
add_list, available_io,
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index d8ca40a97693..d21fa04fa44d 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -439,10 +439,11 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
res->end = res->start + pci_rebar_size_to_bytes(size) - 1;
/* Check if the new config works by trying to assign everything. */
- ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
- if (ret)
- goto error_resize;
-
+ if (dev->bus->self) {
+ ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
+ if (ret)
+ goto error_resize;
+ }
return 0;
error_resize:
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index e69cac84b605..850cfeb74608 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -25,7 +25,7 @@ static int max_devices = 16;
module_param(max_devices, int, 0644);
MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
-static bool use_dma_mrpc = 1;
+static bool use_dma_mrpc = true;
module_param(use_dma_mrpc, bool, 0644);
MODULE_PARM_DESC(use_dma_mrpc,
"Enable the use of the DMA MRPC feature");
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index d1b16cf3403f..fab267e359e7 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -77,9 +77,6 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
static DEFINE_SPINLOCK(pcifront_dev_lock);
static struct pcifront_device *pcifront_dev;
-static int verbose_request;
-module_param(verbose_request, int, 0644);
-
static int errno_to_pcibios_err(int errno)
{
switch (errno) {
@@ -190,18 +187,16 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
struct pcifront_sd *sd = bus->sysdata;
struct pcifront_device *pdev = pcifront_get_pdev(sd);
- if (verbose_request)
- dev_info(&pdev->xdev->dev,
- "read dev=%04x:%02x:%02x.%d - offset %x size %d\n",
- pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
- PCI_FUNC(devfn), where, size);
+ dev_dbg(&pdev->xdev->dev,
+ "read dev=%04x:%02x:%02x.%d - offset %x size %d\n",
+ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where, size);
err = do_pci_op(pdev, &op);
if (likely(!err)) {
- if (verbose_request)
- dev_info(&pdev->xdev->dev, "read got back value %x\n",
- op.value);
+ dev_dbg(&pdev->xdev->dev, "read got back value %x\n",
+ op.value);
*val = op.value;
} else if (err == -ENODEV) {
@@ -229,12 +224,10 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
struct pcifront_sd *sd = bus->sysdata;
struct pcifront_device *pdev = pcifront_get_pdev(sd);
- if (verbose_request)
- dev_info(&pdev->xdev->dev,
- "write dev=%04x:%02x:%02x.%d - "
- "offset %x size %d val %x\n",
- pci_domain_nr(bus), bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
+ dev_dbg(&pdev->xdev->dev,
+ "write dev=%04x:%02x:%02x.%d - offset %x size %d val %x\n",
+ pci_domain_nr(bus), bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
return errno_to_pcibios_err(do_pci_op(pdev, &op));
}
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index fb9b17fa0fb5..580369f3c0b0 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -164,12 +164,6 @@ int pcmcia_replace_cis(struct pcmcia_socket *s,
int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count);
int verify_cis_cache(struct pcmcia_socket *s);
-int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
- cisdata_t code, cisparse_t *parse, void *priv_data,
- int (*loop_tuple) (tuple_t *tuple,
- cisparse_t *parse,
- void *priv_data));
-
int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function,
tuple_t *tuple);
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
index b553f7ab532f..e4c4daf92038 100644
--- a/drivers/pcmcia/pcmcia_cis.c
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -78,9 +78,9 @@ done:
* calls the @loop_tuple function for each entry. If the call to @loop_tuple
* returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
*/
-int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
- cisdata_t code, cisparse_t *parse, void *priv_data,
- int (*loop_tuple) (tuple_t *tuple,
+static int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
+ cisdata_t code, cisparse_t *parse, void *priv_data,
+ int (*loop_tuple) (tuple_t *tuple,
cisparse_t *parse,
void *priv_data))
{
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index bf6529b0b5b0..84bfc0e85d6b 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -694,7 +694,7 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
struct pci_bus_region region;
unsigned mask;
- res = dev->resource + PCI_BRIDGE_RESOURCES + nr;
+ res = &dev->resource[nr];
/* Already allocated? */
if (res->parent)
return 0;
@@ -711,7 +711,7 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
region.end = config_readl(socket, addr_end) | ~mask;
if (region.start && region.end > region.start && !override_bios) {
pcibios_bus_to_resource(dev->bus, res, &region);
- if (pci_claim_resource(dev, PCI_BRIDGE_RESOURCES + nr) == 0)
+ if (pci_claim_resource(dev, nr) == 0)
return 0;
dev_info(&dev->dev,
"Preassigned resource %d busy or not available, reconfiguring...\n",
@@ -745,19 +745,35 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
return 0;
}
+static void yenta_free_res(struct yenta_socket *socket, int nr)
+{
+ struct pci_dev *dev = socket->dev;
+ struct resource *res;
+
+ res = &dev->resource[nr];
+ if (res->start != 0 && res->end != 0)
+ release_resource(res);
+
+ res->start = res->end = res->flags = 0;
+}
+
/*
* Allocate the bridge mappings for the device..
*/
static void yenta_allocate_resources(struct yenta_socket *socket)
{
int program = 0;
- program += yenta_allocate_res(socket, 0, IORESOURCE_IO,
+ program += yenta_allocate_res(socket, PCI_CB_BRIDGE_IO_0_WINDOW,
+ IORESOURCE_IO,
PCI_CB_IO_BASE_0, PCI_CB_IO_LIMIT_0);
- program += yenta_allocate_res(socket, 1, IORESOURCE_IO,
+ program += yenta_allocate_res(socket, PCI_CB_BRIDGE_IO_1_WINDOW,
+ IORESOURCE_IO,
PCI_CB_IO_BASE_1, PCI_CB_IO_LIMIT_1);
- program += yenta_allocate_res(socket, 2, IORESOURCE_MEM|IORESOURCE_PREFETCH,
+ program += yenta_allocate_res(socket, PCI_CB_BRIDGE_MEM_0_WINDOW,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH,
PCI_CB_MEMORY_BASE_0, PCI_CB_MEMORY_LIMIT_0);
- program += yenta_allocate_res(socket, 3, IORESOURCE_MEM,
+ program += yenta_allocate_res(socket, PCI_CB_BRIDGE_MEM_1_WINDOW,
+ IORESOURCE_MEM,
PCI_CB_MEMORY_BASE_1, PCI_CB_MEMORY_LIMIT_1);
if (program)
pci_setup_cardbus(socket->dev->subordinate);
@@ -769,14 +785,10 @@ static void yenta_allocate_resources(struct yenta_socket *socket)
*/
static void yenta_free_resources(struct yenta_socket *socket)
{
- int i;
- for (i = 0; i < 4; i++) {
- struct resource *res;
- res = socket->dev->resource + PCI_BRIDGE_RESOURCES + i;
- if (res->start != 0 && res->end != 0)
- release_resource(res);
- res->start = res->end = res->flags = 0;
- }
+ yenta_free_res(socket, PCI_CB_BRIDGE_IO_0_WINDOW);
+ yenta_free_res(socket, PCI_CB_BRIDGE_IO_1_WINDOW);
+ yenta_free_res(socket, PCI_CB_BRIDGE_MEM_0_WINDOW);
+ yenta_free_res(socket, PCI_CB_BRIDGE_MEM_1_WINDOW);
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 8dd1278bec04..7719ae4e2c56 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -35,7 +35,7 @@
/* L3C has 8-counters */
#define L3C_NR_COUNTERS 0x8
-#define L3C_PERF_CTRL_EN 0x20000
+#define L3C_PERF_CTRL_EN 0x10000
#define L3C_EVTYPE_NONE 0xff
/*
diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig
index 71801e30d601..617cf073e9aa 100644
--- a/drivers/phy/amlogic/Kconfig
+++ b/drivers/phy/amlogic/Kconfig
@@ -3,12 +3,13 @@
# Phy drivers for Amlogic platforms
#
config PHY_MESON8B_USB2
- tristate "Meson8, Meson8b and GXBB USB2 PHY driver"
+ tristate "Meson8, Meson8b, Meson8m2 and GXBB USB2 PHY driver"
default ARCH_MESON
depends on OF && (ARCH_MESON || COMPILE_TEST)
depends on USB_SUPPORT
select USB_COMMON
select GENERIC_PHY
+ select REGMAP_MMIO
help
Enable this to support the Meson USB2 PHYs found in Meson8,
Meson8b and GXBB SoCs.
@@ -26,18 +27,6 @@ config PHY_MESON_GXL_USB2
GXL and GXM SoCs.
If unsure, say N.
-config PHY_MESON_GXL_USB3
- tristate "Meson GXL and GXM USB3 PHY drivers"
- default ARCH_MESON
- depends on OF && (ARCH_MESON || COMPILE_TEST)
- depends on USB_SUPPORT
- select GENERIC_PHY
- select REGMAP_MMIO
- help
- Enable this to support the Meson USB3 PHY and OTG detection
- IP block found in Meson GXL and GXM SoCs.
- If unsure, say N.
-
config PHY_MESON_G12A_USB2
tristate "Meson G12A USB2 PHY driver"
default ARCH_MESON
diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile
index e2baa133f7af..99702a45e9be 100644
--- a/drivers/phy/amlogic/Makefile
+++ b/drivers/phy/amlogic/Makefile
@@ -2,7 +2,6 @@
obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
obj-$(CONFIG_PHY_MESON_GXL_USB2) += phy-meson-gxl-usb2.o
obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o
-obj-$(CONFIG_PHY_MESON_GXL_USB3) += phy-meson-gxl-usb3.o
obj-$(CONFIG_PHY_MESON_G12A_USB3_PCIE) += phy-meson-g12a-usb3-pcie.o
obj-$(CONFIG_PHY_MESON_AXG_PCIE) += phy-meson-axg-pcie.o
obj-$(CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG) += phy-meson-axg-mipi-pcie-analog.o
diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb3.c b/drivers/phy/amlogic/phy-meson-gxl-usb3.c
deleted file mode 100644
index c0e9e4c16149..000000000000
--- a/drivers/phy/amlogic/phy-meson-gxl-usb3.c
+++ /dev/null
@@ -1,283 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Meson GXL USB3 PHY and OTG mode detection driver
- *
- * Copyright (C) 2018 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
- */
-
-#include <linux/bitfield.h>
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/phy/phy.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <linux/platform_device.h>
-
-#define USB_R0 0x00
- #define USB_R0_P30_FSEL_MASK GENMASK(5, 0)
- #define USB_R0_P30_PHY_RESET BIT(6)
- #define USB_R0_P30_TEST_POWERDOWN_HSP BIT(7)
- #define USB_R0_P30_TEST_POWERDOWN_SSP BIT(8)
- #define USB_R0_P30_ACJT_LEVEL_MASK GENMASK(13, 9)
- #define USB_R0_P30_TX_BOOST_LEVEL_MASK GENMASK(16, 14)
- #define USB_R0_P30_LANE0_TX2RX_LOOPBACK BIT(17)
- #define USB_R0_P30_LANE0_EXT_PCLK_REQ BIT(18)
- #define USB_R0_P30_PCS_RX_LOS_MASK_VAL_MASK GENMASK(28, 19)
- #define USB_R0_U2D_SS_SCALEDOWN_MODE_MASK GENMASK(30, 29)
- #define USB_R0_U2D_ACT BIT(31)
-
-#define USB_R1 0x04
- #define USB_R1_U3H_BIGENDIAN_GS BIT(0)
- #define USB_R1_U3H_PME_ENABLE BIT(1)
- #define USB_R1_U3H_HUB_PORT_OVERCURRENT_MASK GENMASK(6, 2)
- #define USB_R1_U3H_HUB_PORT_PERM_ATTACH_MASK GENMASK(11, 7)
- #define USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK GENMASK(15, 12)
- #define USB_R1_U3H_HOST_U3_PORT_DISABLE BIT(16)
- #define USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT BIT(17)
- #define USB_R1_U3H_HOST_MSI_ENABLE BIT(18)
- #define USB_R1_U3H_FLADJ_30MHZ_REG_MASK GENMASK(24, 19)
- #define USB_R1_P30_PCS_TX_SWING_FULL_MASK GENMASK(31, 25)
-
-#define USB_R2 0x08
- #define USB_R2_P30_CR_DATA_IN_MASK GENMASK(15, 0)
- #define USB_R2_P30_CR_READ BIT(16)
- #define USB_R2_P30_CR_WRITE BIT(17)
- #define USB_R2_P30_CR_CAP_ADDR BIT(18)
- #define USB_R2_P30_CR_CAP_DATA BIT(19)
- #define USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK GENMASK(25, 20)
- #define USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK GENMASK(31, 26)
-
-#define USB_R3 0x0c
- #define USB_R3_P30_SSC_ENABLE BIT(0)
- #define USB_R3_P30_SSC_RANGE_MASK GENMASK(3, 1)
- #define USB_R3_P30_SSC_REF_CLK_SEL_MASK GENMASK(12, 4)
- #define USB_R3_P30_REF_SSP_EN BIT(13)
- #define USB_R3_P30_LOS_BIAS_MASK GENMASK(18, 16)
- #define USB_R3_P30_LOS_LEVEL_MASK GENMASK(23, 19)
- #define USB_R3_P30_MPLL_MULTIPLIER_MASK GENMASK(30, 24)
-
-#define USB_R4 0x10
- #define USB_R4_P21_PORT_RESET_0 BIT(0)
- #define USB_R4_P21_SLEEP_M0 BIT(1)
- #define USB_R4_MEM_PD_MASK GENMASK(3, 2)
- #define USB_R4_P21_ONLY BIT(4)
-
-#define USB_R5 0x14
- #define USB_R5_ID_DIG_SYNC BIT(0)
- #define USB_R5_ID_DIG_REG BIT(1)
- #define USB_R5_ID_DIG_CFG_MASK GENMASK(3, 2)
- #define USB_R5_ID_DIG_EN_0 BIT(4)
- #define USB_R5_ID_DIG_EN_1 BIT(5)
- #define USB_R5_ID_DIG_CURR BIT(6)
- #define USB_R5_ID_DIG_IRQ BIT(7)
- #define USB_R5_ID_DIG_TH_MASK GENMASK(15, 8)
- #define USB_R5_ID_DIG_CNT_MASK GENMASK(23, 16)
-
-/* read-only register */
-#define USB_R6 0x18
- #define USB_R6_P30_CR_DATA_OUT_MASK GENMASK(15, 0)
- #define USB_R6_P30_CR_ACK BIT(16)
-
-struct phy_meson_gxl_usb3_priv {
- struct regmap *regmap;
- enum phy_mode mode;
- struct clk *clk_phy;
- struct clk *clk_peripheral;
- struct reset_control *reset;
-};
-
-static const struct regmap_config phy_meson_gxl_usb3_regmap_conf = {
- .reg_bits = 8,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = USB_R6,
-};
-
-static int phy_meson_gxl_usb3_power_on(struct phy *phy)
-{
- struct phy_meson_gxl_usb3_priv *priv = phy_get_drvdata(phy);
-
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_EN_0,
- USB_R5_ID_DIG_EN_0);
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_EN_1,
- USB_R5_ID_DIG_EN_1);
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_TH_MASK,
- FIELD_PREP(USB_R5_ID_DIG_TH_MASK, 0xff));
-
- return 0;
-}
-
-static int phy_meson_gxl_usb3_power_off(struct phy *phy)
-{
- struct phy_meson_gxl_usb3_priv *priv = phy_get_drvdata(phy);
-
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_EN_0, 0);
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_EN_1, 0);
-
- return 0;
-}
-
-static int phy_meson_gxl_usb3_set_mode(struct phy *phy,
- enum phy_mode mode, int submode)
-{
- struct phy_meson_gxl_usb3_priv *priv = phy_get_drvdata(phy);
-
- switch (mode) {
- case PHY_MODE_USB_HOST:
- regmap_update_bits(priv->regmap, USB_R0, USB_R0_U2D_ACT, 0);
- regmap_update_bits(priv->regmap, USB_R4, USB_R4_P21_SLEEP_M0,
- 0);
- break;
-
- case PHY_MODE_USB_DEVICE:
- regmap_update_bits(priv->regmap, USB_R0, USB_R0_U2D_ACT,
- USB_R0_U2D_ACT);
- regmap_update_bits(priv->regmap, USB_R4, USB_R4_P21_SLEEP_M0,
- USB_R4_P21_SLEEP_M0);
- break;
-
- default:
- dev_err(&phy->dev, "unsupported PHY mode %d\n", mode);
- return -EINVAL;
- }
-
- priv->mode = mode;
-
- return 0;
-}
-
-static int phy_meson_gxl_usb3_init(struct phy *phy)
-{
- struct phy_meson_gxl_usb3_priv *priv = phy_get_drvdata(phy);
- int ret;
-
- ret = reset_control_reset(priv->reset);
- if (ret)
- goto err;
-
- ret = clk_prepare_enable(priv->clk_phy);
- if (ret)
- goto err;
-
- ret = clk_prepare_enable(priv->clk_peripheral);
- if (ret)
- goto err_disable_clk_phy;
-
- ret = phy_meson_gxl_usb3_set_mode(phy, priv->mode, 0);
- if (ret)
- goto err_disable_clk_peripheral;
-
- regmap_update_bits(priv->regmap, USB_R1,
- USB_R1_U3H_FLADJ_30MHZ_REG_MASK,
- FIELD_PREP(USB_R1_U3H_FLADJ_30MHZ_REG_MASK, 0x20));
-
- return 0;
-
-err_disable_clk_peripheral:
- clk_disable_unprepare(priv->clk_peripheral);
-err_disable_clk_phy:
- clk_disable_unprepare(priv->clk_phy);
-err:
- return ret;
-}
-
-static int phy_meson_gxl_usb3_exit(struct phy *phy)
-{
- struct phy_meson_gxl_usb3_priv *priv = phy_get_drvdata(phy);
-
- clk_disable_unprepare(priv->clk_peripheral);
- clk_disable_unprepare(priv->clk_phy);
-
- return 0;
-}
-
-static const struct phy_ops phy_meson_gxl_usb3_ops = {
- .power_on = phy_meson_gxl_usb3_power_on,
- .power_off = phy_meson_gxl_usb3_power_off,
- .set_mode = phy_meson_gxl_usb3_set_mode,
- .init = phy_meson_gxl_usb3_init,
- .exit = phy_meson_gxl_usb3_exit,
- .owner = THIS_MODULE,
-};
-
-static int phy_meson_gxl_usb3_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct phy_meson_gxl_usb3_priv *priv;
- struct resource *res;
- struct phy *phy;
- struct phy_provider *phy_provider;
- void __iomem *base;
- int ret;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- priv->regmap = devm_regmap_init_mmio(dev, base,
- &phy_meson_gxl_usb3_regmap_conf);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
-
- priv->clk_phy = devm_clk_get(dev, "phy");
- if (IS_ERR(priv->clk_phy))
- return PTR_ERR(priv->clk_phy);
-
- priv->clk_peripheral = devm_clk_get(dev, "peripheral");
- if (IS_ERR(priv->clk_peripheral))
- return PTR_ERR(priv->clk_peripheral);
-
- priv->reset = devm_reset_control_array_get_shared(dev);
- if (IS_ERR(priv->reset))
- return PTR_ERR(priv->reset);
-
- /*
- * default to host mode as hardware defaults and/or boot-loader
- * behavior can result in this PHY starting up in device mode. this
- * default and the initialization in phy_meson_gxl_usb3_init ensure
- * that we reproducibly start in a known mode on all devices.
- */
- priv->mode = PHY_MODE_USB_HOST;
-
- phy = devm_phy_create(dev, np, &phy_meson_gxl_usb3_ops);
- if (IS_ERR(phy)) {
- ret = PTR_ERR(phy);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to create PHY\n");
-
- return ret;
- }
-
- phy_set_drvdata(phy, priv);
-
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-
- return PTR_ERR_OR_ZERO(phy_provider);
-}
-
-static const struct of_device_id phy_meson_gxl_usb3_of_match[] = {
- { .compatible = "amlogic,meson-gxl-usb3-phy", },
- { },
-};
-MODULE_DEVICE_TABLE(of, phy_meson_gxl_usb3_of_match);
-
-static struct platform_driver phy_meson_gxl_usb3_driver = {
- .probe = phy_meson_gxl_usb3_probe,
- .driver = {
- .name = "phy-meson-gxl-usb3",
- .of_match_table = phy_meson_gxl_usb3_of_match,
- },
-};
-module_platform_driver(phy_meson_gxl_usb3_driver);
-
-MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
-MODULE_DESCRIPTION("Meson GXL USB3 PHY and OTG detection driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/amlogic/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c
index bd66bd723e4a..03c061dd5f0d 100644
--- a/drivers/phy/amlogic/phy-meson8b-usb2.c
+++ b/drivers/phy/amlogic/phy-meson8b-usb2.c
@@ -10,6 +10,8 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -76,6 +78,17 @@
#define REG_ADP_BC_ACA_PIN_FLOAT BIT(26)
#define REG_DBG_UART 0x10
+ #define REG_DBG_UART_BYPASS_SEL BIT(0)
+ #define REG_DBG_UART_BYPASS_DM_EN BIT(1)
+ #define REG_DBG_UART_BYPASS_DP_EN BIT(2)
+ #define REG_DBG_UART_BYPASS_DM_DATA BIT(3)
+ #define REG_DBG_UART_BYPASS_DP_DATA BIT(4)
+ #define REG_DBG_UART_FSV_MINUS BIT(5)
+ #define REG_DBG_UART_FSV_PLUS BIT(6)
+ #define REG_DBG_UART_FSV_BURN_IN_TEST BIT(7)
+ #define REG_DBG_UART_LOOPBACK_EN_B BIT(8)
+ #define REG_DBG_UART_SET_IDDQ BIT(9)
+ #define REG_DBG_UART_ATE_RESET BIT(10)
#define REG_TEST 0x14
#define REG_TEST_DATA_IN_MASK GENMASK(3, 0)
@@ -104,35 +117,30 @@
#define RESET_COMPLETE_TIME 500
#define ACA_ENABLE_COMPLETE_TIME 50
-struct phy_meson8b_usb2_priv {
- void __iomem *regs;
- enum usb_dr_mode dr_mode;
- struct clk *clk_usb_general;
- struct clk *clk_usb;
- struct reset_control *reset;
+struct phy_meson8b_usb2_match_data {
+ bool host_enable_aca;
};
-static u32 phy_meson8b_usb2_read(struct phy_meson8b_usb2_priv *phy_priv,
- u32 reg)
-{
- return readl(phy_priv->regs + reg);
-}
-
-static void phy_meson8b_usb2_mask_bits(struct phy_meson8b_usb2_priv *phy_priv,
- u32 reg, u32 mask, u32 value)
-{
- u32 data;
-
- data = phy_meson8b_usb2_read(phy_priv, reg);
- data &= ~mask;
- data |= (value & mask);
+struct phy_meson8b_usb2_priv {
+ struct regmap *regmap;
+ enum usb_dr_mode dr_mode;
+ struct clk *clk_usb_general;
+ struct clk *clk_usb;
+ struct reset_control *reset;
+ const struct phy_meson8b_usb2_match_data *match;
+};
- writel(data, phy_priv->regs + reg);
-}
+static const struct regmap_config phy_meson8b_usb2_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_TUNE,
+};
static int phy_meson8b_usb2_power_on(struct phy *phy)
{
struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy);
+ u32 reg;
int ret;
if (!IS_ERR_OR_NULL(priv->reset)) {
@@ -156,38 +164,43 @@ static int phy_meson8b_usb2_power_on(struct phy *phy)
return ret;
}
- phy_meson8b_usb2_mask_bits(priv, REG_CONFIG, REG_CONFIG_CLK_32k_ALTSEL,
- REG_CONFIG_CLK_32k_ALTSEL);
+ regmap_update_bits(priv->regmap, REG_CONFIG, REG_CONFIG_CLK_32k_ALTSEL,
+ REG_CONFIG_CLK_32k_ALTSEL);
- phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_REF_CLK_SEL_MASK,
- 0x2 << REG_CTRL_REF_CLK_SEL_SHIFT);
+ regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_REF_CLK_SEL_MASK,
+ 0x2 << REG_CTRL_REF_CLK_SEL_SHIFT);
- phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_FSEL_MASK,
- 0x5 << REG_CTRL_FSEL_SHIFT);
+ regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_FSEL_MASK,
+ 0x5 << REG_CTRL_FSEL_SHIFT);
/* reset the PHY */
- phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET,
- REG_CTRL_POWER_ON_RESET);
+ regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET,
+ REG_CTRL_POWER_ON_RESET);
udelay(RESET_COMPLETE_TIME);
- phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET, 0);
+ regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_POWER_ON_RESET, 0);
udelay(RESET_COMPLETE_TIME);
- phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_SOF_TOGGLE_OUT,
- REG_CTRL_SOF_TOGGLE_OUT);
+ regmap_update_bits(priv->regmap, REG_CTRL, REG_CTRL_SOF_TOGGLE_OUT,
+ REG_CTRL_SOF_TOGGLE_OUT);
if (priv->dr_mode == USB_DR_MODE_HOST) {
- phy_meson8b_usb2_mask_bits(priv, REG_ADP_BC,
+ regmap_update_bits(priv->regmap, REG_DBG_UART,
+ REG_DBG_UART_SET_IDDQ, 0);
+
+ if (priv->match->host_enable_aca) {
+ regmap_update_bits(priv->regmap, REG_ADP_BC,
REG_ADP_BC_ACA_ENABLE,
REG_ADP_BC_ACA_ENABLE);
- udelay(ACA_ENABLE_COMPLETE_TIME);
+ udelay(ACA_ENABLE_COMPLETE_TIME);
- if (phy_meson8b_usb2_read(priv, REG_ADP_BC) &
- REG_ADP_BC_ACA_PIN_FLOAT) {
- dev_warn(&phy->dev, "USB ID detect failed!\n");
- clk_disable_unprepare(priv->clk_usb);
- clk_disable_unprepare(priv->clk_usb_general);
- return -EINVAL;
+ regmap_read(priv->regmap, REG_ADP_BC, &reg);
+ if (reg & REG_ADP_BC_ACA_PIN_FLOAT) {
+ dev_warn(&phy->dev, "USB ID detect failed!\n");
+ clk_disable_unprepare(priv->clk_usb);
+ clk_disable_unprepare(priv->clk_usb_general);
+ return -EINVAL;
+ }
}
}
@@ -198,6 +211,11 @@ static int phy_meson8b_usb2_power_off(struct phy *phy)
{
struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy);
+ if (priv->dr_mode == USB_DR_MODE_HOST)
+ regmap_update_bits(priv->regmap, REG_DBG_UART,
+ REG_DBG_UART_SET_IDDQ,
+ REG_DBG_UART_SET_IDDQ);
+
clk_disable_unprepare(priv->clk_usb);
clk_disable_unprepare(priv->clk_usb_general);
@@ -213,18 +231,26 @@ static const struct phy_ops phy_meson8b_usb2_ops = {
static int phy_meson8b_usb2_probe(struct platform_device *pdev)
{
struct phy_meson8b_usb2_priv *priv;
- struct resource *res;
struct phy *phy;
struct phy_provider *phy_provider;
+ void __iomem *base;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->regs))
- return PTR_ERR(priv->regs);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->match = device_get_match_data(&pdev->dev);
+ if (!priv->match)
+ return -ENODEV;
+
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+ &phy_meson8b_usb2_regmap_conf);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
priv->clk_usb_general = devm_clk_get(&pdev->dev, "usb_general");
if (IS_ERR(priv->clk_usb_general))
@@ -259,11 +285,32 @@ static int phy_meson8b_usb2_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(phy_provider);
}
+static const struct phy_meson8b_usb2_match_data phy_meson8_usb2_match_data = {
+ .host_enable_aca = false,
+};
+
+static const struct phy_meson8b_usb2_match_data phy_meson8b_usb2_match_data = {
+ .host_enable_aca = true,
+};
+
static const struct of_device_id phy_meson8b_usb2_of_match[] = {
- { .compatible = "amlogic,meson8-usb2-phy", },
- { .compatible = "amlogic,meson8b-usb2-phy", },
- { .compatible = "amlogic,meson-gxbb-usb2-phy", },
- { },
+ {
+ .compatible = "amlogic,meson8-usb2-phy",
+ .data = &phy_meson8_usb2_match_data
+ },
+ {
+ .compatible = "amlogic,meson8b-usb2-phy",
+ .data = &phy_meson8b_usb2_match_data
+ },
+ {
+ .compatible = "amlogic,meson8m2-usb2-phy",
+ .data = &phy_meson8b_usb2_match_data
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-usb2-phy",
+ .data = &phy_meson8b_usb2_match_data
+ },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, phy_meson8b_usb2_of_match);
@@ -277,5 +324,5 @@ static struct platform_driver phy_meson8b_usb2_driver = {
module_platform_driver(phy_meson8b_usb2_driver);
MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
-MODULE_DESCRIPTION("Meson8, Meson8b and GXBB USB2 PHY driver");
+MODULE_DESCRIPTION("Meson8, Meson8b, Meson8m2 and GXBB USB2 PHY driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
index 7ceea5ae2704..527625912b78 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
@@ -279,7 +279,7 @@ static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
.init = ns2_drd_phy_init,
.power_on = ns2_drd_phy_poweron,
.power_off = ns2_drd_phy_poweroff,
diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c
index fe6c58910e4c..77c025a0720c 100644
--- a/drivers/phy/broadcom/phy-bcm-sr-usb.c
+++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c
@@ -16,8 +16,6 @@ enum bcm_usb_phy_version {
};
enum bcm_usb_phy_reg {
- PLL_NDIV_FRAC,
- PLL_NDIV_INT,
PLL_CTRL,
PHY_CTRL,
PHY_PLL_CTRL,
@@ -31,18 +29,11 @@ static const u8 bcm_usb_combo_phy_ss[] = {
};
static const u8 bcm_usb_combo_phy_hs[] = {
- [PLL_NDIV_FRAC] = 0x04,
- [PLL_NDIV_INT] = 0x08,
[PLL_CTRL] = 0x0c,
[PHY_CTRL] = 0x10,
};
-#define HSPLL_NDIV_INT_VAL 0x13
-#define HSPLL_NDIV_FRAC_VAL 0x1005
-
static const u8 bcm_usb_hs_phy[] = {
- [PLL_NDIV_FRAC] = 0x0,
- [PLL_NDIV_INT] = 0x4,
[PLL_CTRL] = 0x8,
[PHY_CTRL] = 0xc,
};
@@ -52,7 +43,6 @@ enum pll_ctrl_bits {
SSPLL_SUSPEND_EN,
PLL_SEQ_START,
PLL_LOCK,
- PLL_PDIV,
};
static const u8 u3pll_ctrl[] = {
@@ -66,29 +56,17 @@ static const u8 u3pll_ctrl[] = {
#define HSPLL_PDIV_VAL 0x1
static const u8 u2pll_ctrl[] = {
- [PLL_PDIV] = 1,
[PLL_RESETB] = 5,
[PLL_LOCK] = 6,
};
enum bcm_usb_phy_ctrl_bits {
CORERDY,
- AFE_LDO_PWRDWNB,
- AFE_PLL_PWRDWNB,
- AFE_BG_PWRDWNB,
- PHY_ISO,
PHY_RESETB,
PHY_PCTL,
};
#define PHY_PCTL_MASK 0xffff
-/*
- * 0x0806 of PCTL_VAL has below bits set
- * BIT-8 : refclk divider 1
- * BIT-3:2: device mode; mode is not effect
- * BIT-1: soft reset active low
- */
-#define HSPHY_PCTL_VAL 0x0806
#define SSPHY_PCTL_VAL 0x0006
static const u8 u3phy_ctrl[] = {
@@ -98,10 +76,6 @@ static const u8 u3phy_ctrl[] = {
static const u8 u2phy_ctrl[] = {
[CORERDY] = 0,
- [AFE_LDO_PWRDWNB] = 1,
- [AFE_PLL_PWRDWNB] = 2,
- [AFE_BG_PWRDWNB] = 3,
- [PHY_ISO] = 4,
[PHY_RESETB] = 5,
[PHY_PCTL] = 6,
};
@@ -186,38 +160,13 @@ static int bcm_usb_hs_phy_init(struct bcm_usb_phy_cfg *phy_cfg)
int ret = 0;
void __iomem *regs = phy_cfg->regs;
const u8 *offset;
- u32 rd_data;
offset = phy_cfg->offset;
- writel(HSPLL_NDIV_INT_VAL, regs + offset[PLL_NDIV_INT]);
- writel(HSPLL_NDIV_FRAC_VAL, regs + offset[PLL_NDIV_FRAC]);
-
- rd_data = readl(regs + offset[PLL_CTRL]);
- rd_data &= ~(HSPLL_PDIV_MASK << u2pll_ctrl[PLL_PDIV]);
- rd_data |= (HSPLL_PDIV_VAL << u2pll_ctrl[PLL_PDIV]);
- writel(rd_data, regs + offset[PLL_CTRL]);
-
- /* Set Core Ready high */
- bcm_usb_reg32_setbits(regs + offset[PHY_CTRL],
- BIT(u2phy_ctrl[CORERDY]));
-
- /* Maximum timeout for Core Ready done */
- msleep(30);
-
+ bcm_usb_reg32_clrbits(regs + offset[PLL_CTRL],
+ BIT(u2pll_ctrl[PLL_RESETB]));
bcm_usb_reg32_setbits(regs + offset[PLL_CTRL],
BIT(u2pll_ctrl[PLL_RESETB]));
- bcm_usb_reg32_setbits(regs + offset[PHY_CTRL],
- BIT(u2phy_ctrl[PHY_RESETB]));
-
-
- rd_data = readl(regs + offset[PHY_CTRL]);
- rd_data &= ~(PHY_PCTL_MASK << u2phy_ctrl[PHY_PCTL]);
- rd_data |= (HSPHY_PCTL_VAL << u2phy_ctrl[PHY_PCTL]);
- writel(rd_data, regs + offset[PHY_CTRL]);
-
- /* Maximum timeout for PLL reset done */
- msleep(30);
ret = bcm_usb_pll_lock_check(regs + offset[PLL_CTRL],
BIT(u2pll_ctrl[PLL_LOCK]));
@@ -256,7 +205,7 @@ static int bcm_usb_phy_init(struct phy *phy)
return ret;
}
-static struct phy_ops sr_phy_ops = {
+static const struct phy_ops sr_phy_ops = {
.init = bcm_usb_phy_init,
.reset = bcm_usb_phy_reset,
.owner = THIS_MODULE,
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index 491bbd46c5b3..99fbc7e4138b 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -39,14 +39,14 @@ struct match_chip_info {
u8 optional_reg;
};
-static struct value_to_name_map brcm_dr_mode_to_name[] = {
+static const struct value_to_name_map brcm_dr_mode_to_name[] = {
{ USB_CTLR_MODE_HOST, "host" },
{ USB_CTLR_MODE_DEVICE, "peripheral" },
{ USB_CTLR_MODE_DRD, "drd" },
{ USB_CTLR_MODE_TYPEC_PD, "typec-pd" }
};
-static struct value_to_name_map brcm_dual_mode_to_name[] = {
+static const struct value_to_name_map brcm_dual_mode_to_name[] = {
{ 0, "host" },
{ 1, "device" },
{ 2, "auto" },
@@ -138,7 +138,7 @@ static int brcm_usb_phy_exit(struct phy *gphy)
return 0;
}
-static struct phy_ops brcm_usb_phy_ops = {
+static const struct phy_ops brcm_usb_phy_ops = {
.init = brcm_usb_phy_init,
.exit = brcm_usb_phy_exit,
.owner = THIS_MODULE,
@@ -170,7 +170,7 @@ static struct phy *brcm_usb_phy_xlate(struct device *dev,
return ERR_PTR(-ENODEV);
}
-static int name_to_value(struct value_to_name_map *table, int count,
+static int name_to_value(const struct value_to_name_map *table, int count,
const char *name, int *value)
{
int x;
@@ -185,7 +185,7 @@ static int name_to_value(struct value_to_name_map *table, int count,
return -EINVAL;
}
-static const char *value_to_name(struct value_to_name_map *table, int count,
+static const char *value_to_name(const struct value_to_name_map *table, int count,
int value)
{
if (value >= count)
@@ -252,7 +252,7 @@ static const struct attribute_group brcm_usb_phy_group = {
.attrs = brcm_usb_phy_attrs,
};
-static struct match_chip_info chip_info_7216 = {
+static const struct match_chip_info chip_info_7216 = {
.init_func = &brcm_usb_dvr_init_7216,
.required_regs = {
BRCM_REGS_CTRL,
@@ -262,7 +262,7 @@ static struct match_chip_info chip_info_7216 = {
},
};
-static struct match_chip_info chip_info_7211b0 = {
+static const struct match_chip_info chip_info_7211b0 = {
.init_func = &brcm_usb_dvr_init_7211b0,
.required_regs = {
BRCM_REGS_CTRL,
@@ -275,7 +275,7 @@ static struct match_chip_info chip_info_7211b0 = {
.optional_reg = BRCM_REGS_BDC_EC,
};
-static struct match_chip_info chip_info_7445 = {
+static const struct match_chip_info chip_info_7445 = {
.init_func = &brcm_usb_dvr_init_7445,
.required_regs = {
BRCM_REGS_CTRL,
diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig
index 459545871608..432832bdbd16 100644
--- a/drivers/phy/cadence/Kconfig
+++ b/drivers/phy/cadence/Kconfig
@@ -27,3 +27,12 @@ config PHY_CADENCE_SIERRA
select GENERIC_PHY
help
Enable this to support the Cadence Sierra PHY driver
+
+config PHY_CADENCE_SALVO
+ tristate "Cadence Salvo PHY Driver"
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to support the Cadence SALVO PHY driver,
+ this PHY is a legacy PHY, and only are used for USB3
+ and USB2.
diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile
index 6a7ffc6ea599..26e16bd34efe 100644
--- a/drivers/phy/cadence/Makefile
+++ b/drivers/phy/cadence/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_PHY_CADENCE_TORRENT) += phy-cadence-torrent.o
obj-$(CONFIG_PHY_CADENCE_DPHY) += cdns-dphy.o
obj-$(CONFIG_PHY_CADENCE_SIERRA) += phy-cadence-sierra.o
+obj-$(CONFIG_PHY_CADENCE_SALVO) += phy-cadence-salvo.o
diff --git a/drivers/phy/cadence/phy-cadence-salvo.c b/drivers/phy/cadence/phy-cadence-salvo.c
new file mode 100644
index 000000000000..1ecbb964cd21
--- /dev/null
+++ b/drivers/phy/cadence/phy-cadence-salvo.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Salvo PHY is a 28nm PHY, it is a legacy PHY, and only
+ * for USB3 and USB2.
+ *
+ * Copyright (c) 2019-2020 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+/* PHY register definition */
+#define PHY_PMA_CMN_CTRL1 0xC800
+#define TB_ADDR_CMN_DIAG_HSCLK_SEL 0x01e0
+#define TB_ADDR_CMN_PLL0_VCOCAL_INIT_TMR 0x0084
+#define TB_ADDR_CMN_PLL0_VCOCAL_ITER_TMR 0x0085
+#define TB_ADDR_CMN_PLL0_INTDIV 0x0094
+#define TB_ADDR_CMN_PLL0_FRACDIV 0x0095
+#define TB_ADDR_CMN_PLL0_HIGH_THR 0x0096
+#define TB_ADDR_CMN_PLL0_SS_CTRL1 0x0098
+#define TB_ADDR_CMN_PLL0_SS_CTRL2 0x0099
+#define TB_ADDR_CMN_PLL0_DSM_DIAG 0x0097
+#define TB_ADDR_CMN_DIAG_PLL0_OVRD 0x01c2
+#define TB_ADDR_CMN_DIAG_PLL0_FBH_OVRD 0x01c0
+#define TB_ADDR_CMN_DIAG_PLL0_FBL_OVRD 0x01c1
+#define TB_ADDR_CMN_DIAG_PLL0_V2I_TUNE 0x01C5
+#define TB_ADDR_CMN_DIAG_PLL0_CP_TUNE 0x01C6
+#define TB_ADDR_CMN_DIAG_PLL0_LF_PROG 0x01C7
+#define TB_ADDR_CMN_DIAG_PLL0_TEST_MODE 0x01c4
+#define TB_ADDR_CMN_PSM_CLK_CTRL 0x0061
+#define TB_ADDR_XCVR_DIAG_RX_LANE_CAL_RST_TMR 0x40ea
+#define TB_ADDR_XCVR_PSM_RCTRL 0x4001
+#define TB_ADDR_TX_PSC_A0 0x4100
+#define TB_ADDR_TX_PSC_A1 0x4101
+#define TB_ADDR_TX_PSC_A2 0x4102
+#define TB_ADDR_TX_PSC_A3 0x4103
+#define TB_ADDR_TX_DIAG_ECTRL_OVRD 0x41f5
+#define TB_ADDR_TX_PSC_CAL 0x4106
+#define TB_ADDR_TX_PSC_RDY 0x4107
+#define TB_ADDR_RX_PSC_A0 0x8000
+#define TB_ADDR_RX_PSC_A1 0x8001
+#define TB_ADDR_RX_PSC_A2 0x8002
+#define TB_ADDR_RX_PSC_A3 0x8003
+#define TB_ADDR_RX_PSC_CAL 0x8006
+#define TB_ADDR_RX_PSC_RDY 0x8007
+#define TB_ADDR_TX_TXCC_MGNLS_MULT_000 0x4058
+#define TB_ADDR_TX_DIAG_BGREF_PREDRV_DELAY 0x41e7
+#define TB_ADDR_RX_SLC_CU_ITER_TMR 0x80e3
+#define TB_ADDR_RX_SIGDET_HL_FILT_TMR 0x8090
+#define TB_ADDR_RX_SAMP_DAC_CTRL 0x8058
+#define TB_ADDR_RX_DIAG_SIGDET_TUNE 0x81dc
+#define TB_ADDR_RX_DIAG_LFPSDET_TUNE2 0x81df
+#define TB_ADDR_RX_DIAG_BS_TM 0x81f5
+#define TB_ADDR_RX_DIAG_DFE_CTRL1 0x81d3
+#define TB_ADDR_RX_DIAG_ILL_IQE_TRIM4 0x81c7
+#define TB_ADDR_RX_DIAG_ILL_E_TRIM0 0x81c2
+#define TB_ADDR_RX_DIAG_ILL_IQ_TRIM0 0x81c1
+#define TB_ADDR_RX_DIAG_ILL_IQE_TRIM6 0x81c9
+#define TB_ADDR_RX_DIAG_RXFE_TM3 0x81f8
+#define TB_ADDR_RX_DIAG_RXFE_TM4 0x81f9
+#define TB_ADDR_RX_DIAG_LFPSDET_TUNE 0x81dd
+#define TB_ADDR_RX_DIAG_DFE_CTRL3 0x81d5
+#define TB_ADDR_RX_DIAG_SC2C_DELAY 0x81e1
+#define TB_ADDR_RX_REE_VGA_GAIN_NODFE 0x81bf
+#define TB_ADDR_XCVR_PSM_CAL_TMR 0x4002
+#define TB_ADDR_XCVR_PSM_A0BYP_TMR 0x4004
+#define TB_ADDR_XCVR_PSM_A0IN_TMR 0x4003
+#define TB_ADDR_XCVR_PSM_A1IN_TMR 0x4005
+#define TB_ADDR_XCVR_PSM_A2IN_TMR 0x4006
+#define TB_ADDR_XCVR_PSM_A3IN_TMR 0x4007
+#define TB_ADDR_XCVR_PSM_A4IN_TMR 0x4008
+#define TB_ADDR_XCVR_PSM_A5IN_TMR 0x4009
+#define TB_ADDR_XCVR_PSM_A0OUT_TMR 0x400a
+#define TB_ADDR_XCVR_PSM_A1OUT_TMR 0x400b
+#define TB_ADDR_XCVR_PSM_A2OUT_TMR 0x400c
+#define TB_ADDR_XCVR_PSM_A3OUT_TMR 0x400d
+#define TB_ADDR_XCVR_PSM_A4OUT_TMR 0x400e
+#define TB_ADDR_XCVR_PSM_A5OUT_TMR 0x400f
+#define TB_ADDR_TX_RCVDET_EN_TMR 0x4122
+#define TB_ADDR_TX_RCVDET_ST_TMR 0x4123
+#define TB_ADDR_XCVR_DIAG_LANE_FCM_EN_MGN_TMR 0x40f2
+#define TB_ADDR_TX_RCVDETSC_CTRL 0x4124
+
+/* TB_ADDR_TX_RCVDETSC_CTRL */
+#define RXDET_IN_P3_32KHZ BIT(1)
+
+struct cdns_reg_pairs {
+ u16 val;
+ u32 off;
+};
+
+struct cdns_salvo_data {
+ u8 reg_offset_shift;
+ struct cdns_reg_pairs *init_sequence_val;
+ u8 init_sequence_length;
+};
+
+struct cdns_salvo_phy {
+ struct phy *phy;
+ struct clk *clk;
+ void __iomem *base;
+ struct cdns_salvo_data *data;
+};
+
+static const struct of_device_id cdns_salvo_phy_of_match[];
+static u16 cdns_salvo_read(struct cdns_salvo_phy *salvo_phy, u32 reg)
+{
+ return (u16)readl(salvo_phy->base +
+ reg * (1 << salvo_phy->data->reg_offset_shift));
+}
+
+static void cdns_salvo_write(struct cdns_salvo_phy *salvo_phy,
+ u32 reg, u16 val)
+{
+ writel(val, salvo_phy->base +
+ reg * (1 << salvo_phy->data->reg_offset_shift));
+}
+
+/*
+ * Below bringup sequence pair are from Cadence PHY's User Guide
+ * and NXP platform tuning results.
+ */
+static struct cdns_reg_pairs cdns_nxp_sequence_pair[] = {
+ {0x0830, PHY_PMA_CMN_CTRL1},
+ {0x0010, TB_ADDR_CMN_DIAG_HSCLK_SEL},
+ {0x00f0, TB_ADDR_CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x0018, TB_ADDR_CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x00d0, TB_ADDR_CMN_PLL0_INTDIV},
+ {0x4aaa, TB_ADDR_CMN_PLL0_FRACDIV},
+ {0x0034, TB_ADDR_CMN_PLL0_HIGH_THR},
+ {0x01ee, TB_ADDR_CMN_PLL0_SS_CTRL1},
+ {0x7f03, TB_ADDR_CMN_PLL0_SS_CTRL2},
+ {0x0020, TB_ADDR_CMN_PLL0_DSM_DIAG},
+ {0x0000, TB_ADDR_CMN_DIAG_PLL0_OVRD},
+ {0x0000, TB_ADDR_CMN_DIAG_PLL0_FBH_OVRD},
+ {0x0000, TB_ADDR_CMN_DIAG_PLL0_FBL_OVRD},
+ {0x0007, TB_ADDR_CMN_DIAG_PLL0_V2I_TUNE},
+ {0x0027, TB_ADDR_CMN_DIAG_PLL0_CP_TUNE},
+ {0x0008, TB_ADDR_CMN_DIAG_PLL0_LF_PROG},
+ {0x0022, TB_ADDR_CMN_DIAG_PLL0_TEST_MODE},
+ {0x000a, TB_ADDR_CMN_PSM_CLK_CTRL},
+ {0x0139, TB_ADDR_XCVR_DIAG_RX_LANE_CAL_RST_TMR},
+ {0xbefc, TB_ADDR_XCVR_PSM_RCTRL},
+
+ {0x7799, TB_ADDR_TX_PSC_A0},
+ {0x7798, TB_ADDR_TX_PSC_A1},
+ {0x509b, TB_ADDR_TX_PSC_A2},
+ {0x0003, TB_ADDR_TX_DIAG_ECTRL_OVRD},
+ {0x509b, TB_ADDR_TX_PSC_A3},
+ {0x2090, TB_ADDR_TX_PSC_CAL},
+ {0x2090, TB_ADDR_TX_PSC_RDY},
+
+ {0xA6FD, TB_ADDR_RX_PSC_A0},
+ {0xA6FD, TB_ADDR_RX_PSC_A1},
+ {0xA410, TB_ADDR_RX_PSC_A2},
+ {0x2410, TB_ADDR_RX_PSC_A3},
+
+ {0x23FF, TB_ADDR_RX_PSC_CAL},
+ {0x2010, TB_ADDR_RX_PSC_RDY},
+
+ {0x0020, TB_ADDR_TX_TXCC_MGNLS_MULT_000},
+ {0x00ff, TB_ADDR_TX_DIAG_BGREF_PREDRV_DELAY},
+ {0x0002, TB_ADDR_RX_SLC_CU_ITER_TMR},
+ {0x0013, TB_ADDR_RX_SIGDET_HL_FILT_TMR},
+ {0x0000, TB_ADDR_RX_SAMP_DAC_CTRL},
+ {0x1004, TB_ADDR_RX_DIAG_SIGDET_TUNE},
+ {0x4041, TB_ADDR_RX_DIAG_LFPSDET_TUNE2},
+ {0x0480, TB_ADDR_RX_DIAG_BS_TM},
+ {0x8006, TB_ADDR_RX_DIAG_DFE_CTRL1},
+ {0x003f, TB_ADDR_RX_DIAG_ILL_IQE_TRIM4},
+ {0x543f, TB_ADDR_RX_DIAG_ILL_E_TRIM0},
+ {0x543f, TB_ADDR_RX_DIAG_ILL_IQ_TRIM0},
+ {0x0000, TB_ADDR_RX_DIAG_ILL_IQE_TRIM6},
+ {0x8000, TB_ADDR_RX_DIAG_RXFE_TM3},
+ {0x0003, TB_ADDR_RX_DIAG_RXFE_TM4},
+ {0x2408, TB_ADDR_RX_DIAG_LFPSDET_TUNE},
+ {0x05ca, TB_ADDR_RX_DIAG_DFE_CTRL3},
+ {0x0258, TB_ADDR_RX_DIAG_SC2C_DELAY},
+ {0x1fff, TB_ADDR_RX_REE_VGA_GAIN_NODFE},
+
+ {0x02c6, TB_ADDR_XCVR_PSM_CAL_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A0BYP_TMR},
+ {0x02c6, TB_ADDR_XCVR_PSM_A0IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A1IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A2IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A3IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A4IN_TMR},
+ {0x0010, TB_ADDR_XCVR_PSM_A5IN_TMR},
+
+ {0x0002, TB_ADDR_XCVR_PSM_A0OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A1OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A2OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A3OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A4OUT_TMR},
+ {0x0002, TB_ADDR_XCVR_PSM_A5OUT_TMR},
+ /* Change rx detect parameter */
+ {0x0960, TB_ADDR_TX_RCVDET_EN_TMR},
+ {0x01e0, TB_ADDR_TX_RCVDET_ST_TMR},
+ {0x0090, TB_ADDR_XCVR_DIAG_LANE_FCM_EN_MGN_TMR},
+};
+
+static int cdns_salvo_phy_init(struct phy *phy)
+{
+ struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
+ struct cdns_salvo_data *data = salvo_phy->data;
+ int ret, i;
+ u16 value;
+
+ ret = clk_prepare_enable(salvo_phy->clk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < data->init_sequence_length; i++) {
+ struct cdns_reg_pairs *reg_pair = data->init_sequence_val + i;
+
+ cdns_salvo_write(salvo_phy, reg_pair->off, reg_pair->val);
+ }
+
+ /* RXDET_IN_P3_32KHZ, Receiver detect slow clock enable */
+ value = cdns_salvo_read(salvo_phy, TB_ADDR_TX_RCVDETSC_CTRL);
+ value |= RXDET_IN_P3_32KHZ;
+ cdns_salvo_write(salvo_phy, TB_ADDR_TX_RCVDETSC_CTRL,
+ RXDET_IN_P3_32KHZ);
+
+ udelay(10);
+
+ clk_disable_unprepare(salvo_phy->clk);
+
+ return ret;
+}
+
+static int cdns_salvo_phy_power_on(struct phy *phy)
+{
+ struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
+
+ return clk_prepare_enable(salvo_phy->clk);
+}
+
+static int cdns_salvo_phy_power_off(struct phy *phy)
+{
+ struct cdns_salvo_phy *salvo_phy = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(salvo_phy->clk);
+
+ return 0;
+}
+
+static struct phy_ops cdns_salvo_phy_ops = {
+ .init = cdns_salvo_phy_init,
+ .power_on = cdns_salvo_phy_power_on,
+ .power_off = cdns_salvo_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int cdns_salvo_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct cdns_salvo_phy *salvo_phy;
+ struct resource *res;
+ const struct of_device_id *match;
+ struct cdns_salvo_data *data;
+
+ match = of_match_device(cdns_salvo_phy_of_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct cdns_salvo_data *)match->data;
+ salvo_phy = devm_kzalloc(dev, sizeof(*salvo_phy), GFP_KERNEL);
+ if (!salvo_phy)
+ return -ENOMEM;
+
+ salvo_phy->data = data;
+ salvo_phy->clk = devm_clk_get_optional(dev, "salvo_phy_clk");
+ if (IS_ERR(salvo_phy->clk))
+ return PTR_ERR(salvo_phy->clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ salvo_phy->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(salvo_phy->base))
+ return PTR_ERR(salvo_phy->base);
+
+ salvo_phy->phy = devm_phy_create(dev, NULL, &cdns_salvo_phy_ops);
+ if (IS_ERR(salvo_phy->phy))
+ return PTR_ERR(salvo_phy->phy);
+
+ phy_set_drvdata(salvo_phy->phy, salvo_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct cdns_salvo_data cdns_nxp_salvo_data = {
+ 2,
+ cdns_nxp_sequence_pair,
+ ARRAY_SIZE(cdns_nxp_sequence_pair),
+};
+
+static const struct of_device_id cdns_salvo_phy_of_match[] = {
+ {
+ .compatible = "nxp,salvo-phy",
+ .data = &cdns_nxp_salvo_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cdns_salvo_phy_of_match);
+
+static struct platform_driver cdns_salvo_phy_driver = {
+ .probe = cdns_salvo_phy_probe,
+ .driver = {
+ .name = "cdns-salvo-phy",
+ .of_match_table = cdns_salvo_phy_of_match,
+ }
+};
+module_platform_driver(cdns_salvo_phy_driver);
+
+MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence SALVO PHY Driver");
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index a5c08e5bd2bf..faed652b73f7 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -685,10 +685,10 @@ static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0xFE0A, SIERRA_DET_STANDEC_A_PREG},
{0x000F, SIERRA_DET_STANDEC_B_PREG},
- {0x00A5, SIERRA_DET_STANDEC_C_PREG},
+ {0x55A5, SIERRA_DET_STANDEC_C_PREG},
{0x69ad, SIERRA_DET_STANDEC_D_PREG},
{0x0241, SIERRA_DET_STANDEC_E_PREG},
- {0x0010, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
+ {0x0110, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
{0x0014, SIERRA_PSM_A0IN_TMR_PREG},
{0xCF00, SIERRA_PSM_DIAG_PREG},
{0x001F, SIERRA_PSC_TX_A0_PREG},
@@ -696,7 +696,7 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x0003, SIERRA_PSC_TX_A2_PREG},
{0x0003, SIERRA_PSC_TX_A3_PREG},
{0x0FFF, SIERRA_PSC_RX_A0_PREG},
- {0x0619, SIERRA_PSC_RX_A1_PREG},
+ {0x0003, SIERRA_PSC_RX_A1_PREG},
{0x0003, SIERRA_PSC_RX_A2_PREG},
{0x0001, SIERRA_PSC_RX_A3_PREG},
{0x0001, SIERRA_PLLCTRL_SUBRATE_PREG},
@@ -705,19 +705,19 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG},
{0x2512, SIERRA_DFE_BIASTRIM_PREG},
{0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
- {0x873E, SIERRA_CLKPATHCTRL_TMR_PREG},
- {0x03CF, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
- {0x01CE, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x823E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
- {0x033F, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x023C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
{0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG},
{0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
- {0x8000, SIERRA_CREQ_SPARE_PREG},
+ {0x0000, SIERRA_CREQ_SPARE_PREG},
{0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
- {0x8453, SIERRA_CTLELUT_CTRL_PREG},
- {0x4110, SIERRA_DFE_ECMP_RATESEL_PREG},
- {0x4110, SIERRA_DFE_SMP_RATESEL_PREG},
- {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x8452, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4121, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4121, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0003, SIERRA_DEQ_PHALIGN_CTRL},
{0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG},
{0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
@@ -725,7 +725,7 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG},
- {0x9A8A, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x9999, SIERRA_DEQ_VGATUNE_CTRL_PREG},
{0x0014, SIERRA_DEQ_GLUT0},
{0x0014, SIERRA_DEQ_GLUT1},
{0x0014, SIERRA_DEQ_GLUT2},
@@ -772,6 +772,7 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x000F, SIERRA_LFPSFILT_NS_PREG},
{0x0009, SIERRA_LFPSFILT_RD_PREG},
{0x0001, SIERRA_LFPSFILT_MP_PREG},
+ {0x6013, SIERRA_SIGDET_SUPPORT_PREG},
{0x8013, SIERRA_SDFILT_H2L_A_PREG},
{0x8009, SIERRA_SDFILT_L2H_PREG},
{0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG},
diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig
index 4ea6a8897cd7..7b47682a4e0e 100644
--- a/drivers/phy/intel/Kconfig
+++ b/drivers/phy/intel/Kconfig
@@ -2,8 +2,23 @@
#
# Phy drivers for Intel Lightning Mountain(LGM) platform
#
+config PHY_INTEL_COMBO
+ bool "Intel ComboPHY driver"
+ depends on X86 || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ select MFD_SYSCON
+ select GENERIC_PHY
+ select REGMAP
+ help
+ Enable this to support Intel ComboPhy.
+
+ This driver configures ComboPhy subsystem on Intel gateway
+ chipsets which provides PHYs for various controllers, EMAC,
+ SATA and PCIe.
+
config PHY_INTEL_EMMC
tristate "Intel EMMC PHY driver"
+ depends on X86 || COMPILE_TEST
select GENERIC_PHY
help
Enable this to support the Intel EMMC PHY
diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile
index 6b876a75599d..233d530dadde 100644
--- a/drivers/phy/intel/Makefile
+++ b/drivers/phy/intel/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_INTEL_COMBO) += phy-intel-combo.o
obj-$(CONFIG_PHY_INTEL_EMMC) += phy-intel-emmc.o
diff --git a/drivers/phy/intel/phy-intel-combo.c b/drivers/phy/intel/phy-intel-combo.c
new file mode 100644
index 000000000000..c2a35be4cdfb
--- /dev/null
+++ b/drivers/phy/intel/phy-intel-combo.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Combo-PHY driver
+ *
+ * Copyright (C) 2019-2020 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#define PCIE_PHY_GEN_CTRL 0x00
+#define PCIE_PHY_CLK_PAD BIT(17)
+
+#define PAD_DIS_CFG 0x174
+
+#define PCS_XF_ATE_OVRD_IN_2 0x3008
+#define ADAPT_REQ_MSK GENMASK(5, 4)
+
+#define PCS_XF_RX_ADAPT_ACK 0x3010
+#define RX_ADAPT_ACK_BIT BIT(0)
+
+#define CR_ADDR(addr, lane) (((addr) + (lane) * 0x100) << 2)
+#define REG_COMBO_MODE(x) ((x) * 0x200)
+#define REG_CLK_DISABLE(x) ((x) * 0x200 + 0x124)
+
+#define COMBO_PHY_ID(x) ((x)->parent->id)
+#define PHY_ID(x) ((x)->id)
+
+#define CLK_100MHZ 100000000
+#define CLK_156_25MHZ 156250000
+
+static const unsigned long intel_iphy_clk_rates[] = {
+ CLK_100MHZ, CLK_156_25MHZ, CLK_100MHZ,
+};
+
+enum {
+ PHY_0,
+ PHY_1,
+ PHY_MAX_NUM
+};
+
+/*
+ * Clock Register bit fields to enable clocks
+ * for ComboPhy according to the mode.
+ */
+enum intel_phy_mode {
+ PHY_PCIE_MODE = 0,
+ PHY_XPCS_MODE,
+ PHY_SATA_MODE,
+};
+
+/* ComboPhy mode Register values */
+enum intel_combo_mode {
+ PCIE0_PCIE1_MODE = 0,
+ PCIE_DL_MODE,
+ RXAUI_MODE,
+ XPCS0_XPCS1_MODE,
+ SATA0_SATA1_MODE,
+};
+
+enum aggregated_mode {
+ PHY_SL_MODE,
+ PHY_DL_MODE,
+};
+
+struct intel_combo_phy;
+
+struct intel_cbphy_iphy {
+ struct phy *phy;
+ struct intel_combo_phy *parent;
+ struct reset_control *app_rst;
+ u32 id;
+};
+
+struct intel_combo_phy {
+ struct device *dev;
+ struct clk *core_clk;
+ unsigned long clk_rate;
+ void __iomem *app_base;
+ void __iomem *cr_base;
+ struct regmap *syscfg;
+ struct regmap *hsiocfg;
+ u32 id;
+ u32 bid;
+ struct reset_control *phy_rst;
+ struct reset_control *core_rst;
+ struct intel_cbphy_iphy iphy[PHY_MAX_NUM];
+ enum intel_phy_mode phy_mode;
+ enum aggregated_mode aggr_mode;
+ u32 init_cnt;
+ struct mutex lock;
+};
+
+static int intel_cbphy_iphy_enable(struct intel_cbphy_iphy *iphy, bool set)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ u32 mask = BIT(cbphy->phy_mode * 2 + iphy->id);
+ u32 val;
+
+ /* Register: 0 is enable, 1 is disable */
+ val = set ? 0 : mask;
+
+ return regmap_update_bits(cbphy->hsiocfg, REG_CLK_DISABLE(cbphy->bid),
+ mask, val);
+}
+
+static int intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy *iphy, bool set)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ u32 mask = BIT(cbphy->id * 2 + iphy->id);
+ u32 val;
+
+ /* Register: 0 is enable, 1 is disable */
+ val = set ? 0 : mask;
+
+ return regmap_update_bits(cbphy->syscfg, PAD_DIS_CFG, mask, val);
+}
+
+static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
+ u32 mask, u32 val)
+{
+ u32 reg_val;
+
+ reg_val = readl(base + reg);
+ reg_val &= ~mask;
+ reg_val |= FIELD_PREP(mask, val);
+ writel(reg_val, base + reg);
+}
+
+static int intel_cbphy_iphy_cfg(struct intel_cbphy_iphy *iphy,
+ int (*phy_cfg)(struct intel_cbphy_iphy *))
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = phy_cfg(iphy);
+ if (ret)
+ return ret;
+
+ if (cbphy->aggr_mode != PHY_DL_MODE)
+ return 0;
+
+ return phy_cfg(&cbphy->iphy[PHY_1]);
+}
+
+static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = intel_cbphy_pcie_refclk_cfg(iphy, true);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed to enable PCIe pad refclk\n");
+ return ret;
+ }
+
+ if (cbphy->init_cnt)
+ return 0;
+
+ combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
+ PCIE_PHY_CLK_PAD, 0);
+
+ /* Delay for stable clock PLL */
+ usleep_range(50, 100);
+
+ return 0;
+}
+
+static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = intel_cbphy_pcie_refclk_cfg(iphy, false);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed to disable PCIe pad refclk\n");
+ return ret;
+ }
+
+ if (cbphy->init_cnt)
+ return 0;
+
+ combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
+ PCIE_PHY_CLK_PAD, 1);
+
+ return 0;
+}
+
+static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
+{
+ enum intel_combo_mode cb_mode = PHY_PCIE_MODE;
+ enum aggregated_mode aggr = cbphy->aggr_mode;
+ struct device *dev = cbphy->dev;
+ enum intel_phy_mode mode;
+ int ret;
+
+ mode = cbphy->phy_mode;
+
+ switch (mode) {
+ case PHY_PCIE_MODE:
+ cb_mode = (aggr == PHY_DL_MODE) ? PCIE_DL_MODE : PCIE0_PCIE1_MODE;
+ break;
+
+ case PHY_XPCS_MODE:
+ cb_mode = (aggr == PHY_DL_MODE) ? RXAUI_MODE : XPCS0_XPCS1_MODE;
+ break;
+
+ case PHY_SATA_MODE:
+ if (aggr == PHY_DL_MODE) {
+ dev_err(dev, "Mode:%u not support dual lane!\n", mode);
+ return -EINVAL;
+ }
+
+ cb_mode = SATA0_SATA1_MODE;
+ break;
+ }
+
+ ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
+ if (ret)
+ dev_err(dev, "Failed to set ComboPhy mode: %d\n", ret);
+
+ return ret;
+}
+
+static void intel_cbphy_rst_assert(struct intel_combo_phy *cbphy)
+{
+ reset_control_assert(cbphy->core_rst);
+ reset_control_assert(cbphy->phy_rst);
+}
+
+static void intel_cbphy_rst_deassert(struct intel_combo_phy *cbphy)
+{
+ reset_control_deassert(cbphy->core_rst);
+ reset_control_deassert(cbphy->phy_rst);
+ /* Delay to ensure reset process is done */
+ usleep_range(10, 20);
+}
+
+static int intel_cbphy_iphy_power_on(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ if (!cbphy->init_cnt) {
+ ret = clk_prepare_enable(cbphy->core_clk);
+ if (ret) {
+ dev_err(cbphy->dev, "Clock enable failed!\n");
+ return ret;
+ }
+
+ ret = clk_set_rate(cbphy->core_clk, cbphy->clk_rate);
+ if (ret) {
+ dev_err(cbphy->dev, "Clock freq set to %lu failed!\n",
+ cbphy->clk_rate);
+ goto clk_err;
+ }
+
+ intel_cbphy_rst_assert(cbphy);
+ intel_cbphy_rst_deassert(cbphy);
+ ret = intel_cbphy_set_mode(cbphy);
+ if (ret)
+ goto clk_err;
+ }
+
+ ret = intel_cbphy_iphy_enable(iphy, true);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed enabling PHY core\n");
+ goto clk_err;
+ }
+
+ ret = reset_control_deassert(iphy->app_rst);
+ if (ret) {
+ dev_err(cbphy->dev, "PHY(%u:%u) reset deassert failed!\n",
+ COMBO_PHY_ID(iphy), PHY_ID(iphy));
+ goto clk_err;
+ }
+
+ /* Delay to ensure reset process is done */
+ udelay(1);
+
+ return 0;
+
+clk_err:
+ clk_disable_unprepare(cbphy->core_clk);
+
+ return ret;
+}
+
+static int intel_cbphy_iphy_power_off(struct intel_cbphy_iphy *iphy)
+{
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ ret = reset_control_assert(iphy->app_rst);
+ if (ret) {
+ dev_err(cbphy->dev, "PHY(%u:%u) reset assert failed!\n",
+ COMBO_PHY_ID(iphy), PHY_ID(iphy));
+ return ret;
+ }
+
+ ret = intel_cbphy_iphy_enable(iphy, false);
+ if (ret) {
+ dev_err(cbphy->dev, "Failed disabling PHY core\n");
+ return ret;
+ }
+
+ if (cbphy->init_cnt)
+ return 0;
+
+ clk_disable_unprepare(cbphy->core_clk);
+ intel_cbphy_rst_assert(cbphy);
+
+ return 0;
+}
+
+static int intel_cbphy_init(struct phy *phy)
+{
+ struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ mutex_lock(&cbphy->lock);
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_on);
+ if (ret)
+ goto err;
+
+ if (cbphy->phy_mode == PHY_PCIE_MODE) {
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_en_pad_refclk);
+ if (ret)
+ goto err;
+ }
+
+ cbphy->init_cnt++;
+
+err:
+ mutex_unlock(&cbphy->lock);
+
+ return ret;
+}
+
+static int intel_cbphy_exit(struct phy *phy)
+{
+ struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
+ struct intel_combo_phy *cbphy = iphy->parent;
+ int ret;
+
+ mutex_lock(&cbphy->lock);
+ cbphy->init_cnt--;
+ if (cbphy->phy_mode == PHY_PCIE_MODE) {
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_pcie_dis_pad_refclk);
+ if (ret)
+ goto err;
+ }
+
+ ret = intel_cbphy_iphy_cfg(iphy, intel_cbphy_iphy_power_off);
+
+err:
+ mutex_unlock(&cbphy->lock);
+
+ return ret;
+}
+
+static int intel_cbphy_calibrate(struct phy *phy)
+{
+ struct intel_cbphy_iphy *iphy = phy_get_drvdata(phy);
+ struct intel_combo_phy *cbphy = iphy->parent;
+ void __iomem *cr_base = cbphy->cr_base;
+ int val, ret, id;
+
+ if (cbphy->phy_mode != PHY_XPCS_MODE)
+ return 0;
+
+ id = PHY_ID(iphy);
+
+ /* trigger auto RX adaptation */
+ combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
+ ADAPT_REQ_MSK, 3);
+ /* Wait RX adaptation to finish */
+ ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
+ val, val & RX_ADAPT_ACK_BIT, 10, 5000);
+ if (ret)
+ dev_err(cbphy->dev, "RX Adaptation failed!\n");
+ else
+ dev_dbg(cbphy->dev, "RX Adaptation success!\n");
+
+ /* Stop RX adaptation */
+ combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
+ ADAPT_REQ_MSK, 0);
+
+ return ret;
+}
+
+static int intel_cbphy_fwnode_parse(struct intel_combo_phy *cbphy)
+{
+ struct device *dev = cbphy->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct fwnode_reference_args ref;
+ int ret;
+ u32 val;
+
+ cbphy->core_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cbphy->core_clk)) {
+ ret = PTR_ERR(cbphy->core_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get clk failed:%d!\n", ret);
+ return ret;
+ }
+
+ cbphy->core_rst = devm_reset_control_get_optional(dev, "core");
+ if (IS_ERR(cbphy->core_rst)) {
+ ret = PTR_ERR(cbphy->core_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get core reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->phy_rst = devm_reset_control_get_optional(dev, "phy");
+ if (IS_ERR(cbphy->phy_rst)) {
+ ret = PTR_ERR(cbphy->phy_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get PHY reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->iphy[0].app_rst = devm_reset_control_get_optional(dev, "iphy0");
+ if (IS_ERR(cbphy->iphy[0].app_rst)) {
+ ret = PTR_ERR(cbphy->iphy[0].app_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get phy0 reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->iphy[1].app_rst = devm_reset_control_get_optional(dev, "iphy1");
+ if (IS_ERR(cbphy->iphy[1].app_rst)) {
+ ret = PTR_ERR(cbphy->iphy[1].app_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Get phy1 reset control err: %d!\n", ret);
+ return ret;
+ }
+
+ cbphy->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(cbphy->app_base))
+ return PTR_ERR(cbphy->app_base);
+
+ cbphy->cr_base = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(cbphy->cr_base))
+ return PTR_ERR(cbphy->cr_base);
+
+ /*
+ * syscfg and hsiocfg variables stores the handle of the registers set
+ * in which ComboPhy subsytem specific registers are subset. Using
+ * Register map framework to access the registers set.
+ */
+ ret = fwnode_property_get_reference_args(fwnode, "intel,syscfg", NULL,
+ 1, 0, &ref);
+ if (ret < 0)
+ return ret;
+
+ cbphy->id = ref.args[0];
+ cbphy->syscfg = device_node_to_regmap(to_of_node(ref.fwnode));
+ fwnode_handle_put(ref.fwnode);
+
+ ret = fwnode_property_get_reference_args(fwnode, "intel,hsio", NULL, 1,
+ 0, &ref);
+ if (ret < 0)
+ return ret;
+
+ cbphy->bid = ref.args[0];
+ cbphy->hsiocfg = device_node_to_regmap(to_of_node(ref.fwnode));
+ fwnode_handle_put(ref.fwnode);
+
+ ret = fwnode_property_read_u32_array(fwnode, "intel,phy-mode", &val, 1);
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case PHY_TYPE_PCIE:
+ cbphy->phy_mode = PHY_PCIE_MODE;
+ break;
+
+ case PHY_TYPE_SATA:
+ cbphy->phy_mode = PHY_SATA_MODE;
+ break;
+
+ case PHY_TYPE_XPCS:
+ cbphy->phy_mode = PHY_XPCS_MODE;
+ break;
+
+ default:
+ dev_err(dev, "Invalid PHY mode: %u\n", val);
+ return -EINVAL;
+ }
+
+ cbphy->clk_rate = intel_iphy_clk_rates[cbphy->phy_mode];
+
+ if (fwnode_property_present(fwnode, "intel,aggregation"))
+ cbphy->aggr_mode = PHY_DL_MODE;
+ else
+ cbphy->aggr_mode = PHY_SL_MODE;
+
+ return 0;
+}
+
+static const struct phy_ops intel_cbphy_ops = {
+ .init = intel_cbphy_init,
+ .exit = intel_cbphy_exit,
+ .calibrate = intel_cbphy_calibrate,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *intel_cbphy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct intel_combo_phy *cbphy = dev_get_drvdata(dev);
+ u32 iphy_id;
+
+ if (args->args_count < 1) {
+ dev_err(dev, "Invalid number of arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ iphy_id = args->args[0];
+ if (iphy_id >= PHY_MAX_NUM) {
+ dev_err(dev, "Invalid phy instance %d\n", iphy_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (cbphy->aggr_mode == PHY_DL_MODE && iphy_id == PHY_1) {
+ dev_err(dev, "Invalid. ComboPhy is in Dual lane mode %d\n", iphy_id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return cbphy->iphy[iphy_id].phy;
+}
+
+static int intel_cbphy_create(struct intel_combo_phy *cbphy)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = cbphy->dev;
+ struct intel_cbphy_iphy *iphy;
+ int i;
+
+ for (i = 0; i < PHY_MAX_NUM; i++) {
+ iphy = &cbphy->iphy[i];
+ iphy->parent = cbphy;
+ iphy->id = i;
+
+ /* In dual lane mode skip phy creation for the second phy */
+ if (cbphy->aggr_mode == PHY_DL_MODE && iphy->id == PHY_1)
+ continue;
+
+ iphy->phy = devm_phy_create(dev, NULL, &intel_cbphy_ops);
+ if (IS_ERR(iphy->phy)) {
+ dev_err(dev, "PHY[%u:%u]: create PHY instance failed!\n",
+ COMBO_PHY_ID(iphy), PHY_ID(iphy));
+
+ return PTR_ERR(iphy->phy);
+ }
+
+ phy_set_drvdata(iphy->phy, iphy);
+ }
+
+ dev_set_drvdata(dev, cbphy);
+ phy_provider = devm_of_phy_provider_register(dev, intel_cbphy_xlate);
+ if (IS_ERR(phy_provider))
+ dev_err(dev, "Register PHY provider failed!\n");
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static int intel_cbphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_combo_phy *cbphy;
+ int ret;
+
+ cbphy = devm_kzalloc(dev, sizeof(*cbphy), GFP_KERNEL);
+ if (!cbphy)
+ return -ENOMEM;
+
+ cbphy->dev = dev;
+ cbphy->init_cnt = 0;
+ mutex_init(&cbphy->lock);
+ ret = intel_cbphy_fwnode_parse(cbphy);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, cbphy);
+
+ return intel_cbphy_create(cbphy);
+}
+
+static int intel_cbphy_remove(struct platform_device *pdev)
+{
+ struct intel_combo_phy *cbphy = platform_get_drvdata(pdev);
+
+ intel_cbphy_rst_assert(cbphy);
+ clk_disable_unprepare(cbphy->core_clk);
+ return 0;
+}
+
+static const struct of_device_id of_intel_cbphy_match[] = {
+ { .compatible = "intel,combo-phy" },
+ { .compatible = "intel,combophy-lgm" },
+ {}
+};
+
+static struct platform_driver intel_cbphy_driver = {
+ .probe = intel_cbphy_probe,
+ .remove = intel_cbphy_remove,
+ .driver = {
+ .name = "intel-combo-phy",
+ .of_match_table = of_intel_cbphy_match,
+ }
+};
+
+module_platform_driver(intel_cbphy_driver);
+
+MODULE_DESCRIPTION("Intel Combo-phy driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index 12e71a315a2c..089db0dea703 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -122,7 +122,6 @@ enum cpcap_gpio_mode {
struct cpcap_phy_ddata {
struct regmap *reg;
struct device *dev;
- struct clk *refclk;
struct usb_phy phy;
struct delayed_work detect_work;
struct pinctrl *pins;
@@ -707,7 +706,6 @@ static int cpcap_usb_phy_remove(struct platform_device *pdev)
usb_remove_phy(&ddata->phy);
cancel_delayed_work_sync(&ddata->detect_work);
- clk_unprepare(ddata->refclk);
regulator_disable(ddata->vusb);
return 0;
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index 98674ed094d9..ca9ce7e84a5c 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -18,6 +18,13 @@ config PHY_QCOM_APQ8064_SATA
depends on OF
select GENERIC_PHY
+config PHY_QCOM_IPQ4019_USB
+ tristate "Qualcomm IPQ4019 USB PHY driver"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Support for the USB PHY-s on Qualcomm IPQ40xx SoC-s.
+
config PHY_QCOM_IPQ806X_SATA
tristate "Qualcomm IPQ806x SATA SerDes/PHY driver"
depends on ARCH_QCOM
@@ -85,6 +92,16 @@ config PHY_QCOM_USB_HS
Support for the USB high-speed ULPI compliant phy on Qualcomm
chipsets.
+config PHY_QCOM_USB_SNPS_FEMTO_V2
+ tristate "Qualcomm SNPS FEMTO USB HS PHY V2 module"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable support for the USB high-speed SNPS Femto phy on Qualcomm
+ chipsets. This PHY has differences in the register map compared
+ to the V1 variants. The PHY is paired with a Synopsys DWC3 USB
+ controller on Qualcomm SOCs.
+
config PHY_QCOM_USB_HSIC
tristate "Qualcomm USB HSIC ULPI PHY module"
depends on USB_ULPI_BUS
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 1f14aeacbd70..86fb32efab79 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ATH79_USB) += phy-ath79-usb.o
obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
+obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
obj-$(CONFIG_PHY_QCOM_QMP) += phy-qcom-qmp.o
@@ -12,3 +13,4 @@ obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
obj-$(CONFIG_PHY_QCOM_USB_HS_28NM) += phy-qcom-usb-hs-28nm.o
obj-$(CONFIG_PHY_QCOM_USB_SS) += phy-qcom-usb-ss.o
+obj-$(CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2)+= phy-qcom-snps-femto-v2.o
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
new file mode 100644
index 000000000000..b8ef331e1545
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2018 John Crispin <john@phrozen.org>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+struct ipq4019_usb_phy {
+ struct device *dev;
+ struct phy *phy;
+ void __iomem *base;
+ struct reset_control *por_rst;
+ struct reset_control *srif_rst;
+};
+
+static int ipq4019_ss_phy_power_off(struct phy *_phy)
+{
+ struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
+
+ reset_control_assert(phy->por_rst);
+ msleep(10);
+
+ return 0;
+}
+
+static int ipq4019_ss_phy_power_on(struct phy *_phy)
+{
+ struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
+
+ ipq4019_ss_phy_power_off(_phy);
+
+ reset_control_deassert(phy->por_rst);
+
+ return 0;
+}
+
+static struct phy_ops ipq4019_usb_ss_phy_ops = {
+ .power_on = ipq4019_ss_phy_power_on,
+ .power_off = ipq4019_ss_phy_power_off,
+};
+
+static int ipq4019_hs_phy_power_off(struct phy *_phy)
+{
+ struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
+
+ reset_control_assert(phy->por_rst);
+ msleep(10);
+
+ reset_control_assert(phy->srif_rst);
+ msleep(10);
+
+ return 0;
+}
+
+static int ipq4019_hs_phy_power_on(struct phy *_phy)
+{
+ struct ipq4019_usb_phy *phy = phy_get_drvdata(_phy);
+
+ ipq4019_hs_phy_power_off(_phy);
+
+ reset_control_deassert(phy->srif_rst);
+ msleep(10);
+
+ reset_control_deassert(phy->por_rst);
+
+ return 0;
+}
+
+static struct phy_ops ipq4019_usb_hs_phy_ops = {
+ .power_on = ipq4019_hs_phy_power_on,
+ .power_off = ipq4019_hs_phy_power_off,
+};
+
+static const struct of_device_id ipq4019_usb_phy_of_match[] = {
+ { .compatible = "qcom,usb-hs-ipq4019-phy", .data = &ipq4019_usb_hs_phy_ops},
+ { .compatible = "qcom,usb-ss-ipq4019-phy", .data = &ipq4019_usb_ss_phy_ops},
+ { },
+};
+MODULE_DEVICE_TABLE(of, ipq4019_usb_phy_of_match);
+
+static int ipq4019_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct phy_provider *phy_provider;
+ struct ipq4019_usb_phy *phy;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(phy->base)) {
+ dev_err(dev, "failed to remap register memory\n");
+ return PTR_ERR(phy->base);
+ }
+
+ phy->por_rst = devm_reset_control_get(phy->dev, "por_rst");
+ if (IS_ERR(phy->por_rst)) {
+ if (PTR_ERR(phy->por_rst) != -EPROBE_DEFER)
+ dev_err(dev, "POR reset is missing\n");
+ return PTR_ERR(phy->por_rst);
+ }
+
+ phy->srif_rst = devm_reset_control_get_optional(phy->dev, "srif_rst");
+ if (IS_ERR(phy->srif_rst))
+ return PTR_ERR(phy->srif_rst);
+
+ phy->phy = devm_phy_create(dev, NULL, of_device_get_match_data(dev));
+ if (IS_ERR(phy->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(phy->phy);
+ }
+ phy_set_drvdata(phy->phy, phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver ipq4019_usb_phy_driver = {
+ .probe = ipq4019_usb_phy_probe,
+ .driver = {
+ .of_match_table = ipq4019_usb_phy_of_match,
+ .name = "ipq4019-usb-phy",
+ }
+};
+module_platform_driver(ipq4019_usb_phy_driver);
+
+MODULE_DESCRIPTION("QCOM/IPQ4019 USB phy driver");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index c190406246ab..e91040af3394 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -119,14 +119,17 @@ enum qphy_reg_layout {
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+ QPHY_PCS_POWER_DOWN_CONTROL,
+ /* Keep last to ensure regs_layout arrays are properly initialized */
+ QPHY_LAYOUT_SIZE
};
-static const unsigned int msm8996_ufsphy_regs_layout[] = {
+static const unsigned int msm8996_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_START_CTRL] = 0x00,
[QPHY_PCS_READY_STATUS] = 0x168,
};
-static const unsigned int pciephy_regs_layout[] = {
+static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_SW_RESET] = 0x400,
[QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
[QPHY_COM_START_CONTROL] = 0x408,
@@ -142,7 +145,7 @@ static const unsigned int pciephy_regs_layout[] = {
[QPHY_PCS_STATUS] = 0x174,
};
-static const unsigned int usb3phy_regs_layout[] = {
+static const unsigned int usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_FLL_CNTRL1] = 0xc0,
[QPHY_FLL_CNTRL2] = 0xc4,
[QPHY_FLL_CNT_VAL_L] = 0xc8,
@@ -156,7 +159,7 @@ static const unsigned int usb3phy_regs_layout[] = {
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
};
-static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
+static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
[QPHY_PCS_STATUS] = 0x174,
@@ -165,27 +168,34 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
};
-static const unsigned int sdm845_qmp_pciephy_regs_layout[] = {
+static const unsigned int sdm845_qmp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
[QPHY_PCS_STATUS] = 0x174,
};
-static const unsigned int sdm845_qhp_pciephy_regs_layout[] = {
+static const unsigned int sdm845_qhp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = 0x00,
[QPHY_START_CTRL] = 0x08,
[QPHY_PCS_STATUS] = 0x2ac,
};
-static const unsigned int sdm845_ufsphy_regs_layout[] = {
+static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x44,
+ [QPHY_PCS_STATUS] = 0x14,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+};
+
+static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_START_CTRL] = 0x00,
[QPHY_PCS_READY_STATUS] = 0x160,
};
-static const unsigned int sm8150_ufsphy_regs_layout[] = {
- [QPHY_START_CTRL] = QPHY_V4_PHY_START,
- [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_READY_STATUS,
- [QPHY_SW_RESET] = QPHY_V4_SW_RESET,
+static const unsigned int sm8150_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_START_CTRL] = QPHY_V4_PCS_UFS_PHY_START,
+ [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_UFS_READY_STATUS,
+ [QPHY_SW_RESET] = QPHY_V4_PCS_UFS_SW_RESET,
};
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
@@ -1272,13 +1282,121 @@ static const struct qmp_phy_init_tbl sm8150_ufsphy_rx_tbl[] = {
};
static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_RX_SIGDET_CTRL2, 0x6d),
- QMP_PHY_INIT_CFG(QPHY_V4_TX_LARGE_AMP_DRV_LVL, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_TX_SMALL_AMP_DRV_LVL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V4_TX_MID_TERM_CTRL1, 0x43),
- QMP_PHY_INIT_CFG(QPHY_V4_DEBUG_BUS_CLKSEL, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V4_RX_MIN_HIBERN8_TIME, 0xff),
- QMP_PHY_INIT_CFG(QPHY_V4_MULTI_LANE_CTRL1, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_DEBUG_BUS_CLKSEL, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_MIN_HIBERN8_TIME, 0xff),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x94),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = {
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
};
/* struct qmp_phy_cfg - per-PHY initialization config */
@@ -1445,6 +1563,10 @@ static const char * const sdm845_pciephy_clk_l[] = {
"aux", "cfg_ahb", "ref", "refgen",
};
+static const char * const qmp_v4_phy_clk_l[] = {
+ "aux", "ref_clk_src", "ref", "com_aux",
+};
+
static const char * const sdm845_ufs_phy_clk_l[] = {
"ref", "ref_aux",
};
@@ -1458,6 +1580,10 @@ static const char * const msm8996_usb3phy_reset_l[] = {
"phy", "common",
};
+static const char * const sc7180_usb3phy_reset_l[] = {
+ "phy",
+};
+
static const char * const sdm845_pciephy_reset_l[] = {
"phy",
};
@@ -1671,6 +1797,37 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.is_dual_lane_phy = true,
};
+static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = sc7180_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.type = PHY_TYPE_USB3,
.nlanes = 1,
@@ -1798,6 +1955,37 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.is_dual_lane_phy = true,
};
+static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
+ .type = PHY_TYPE_USB3,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8150_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_tx_tbl),
+ .rx_tbl = sm8150_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_rx_tbl),
+ .pcs_tbl = sm8150_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+ .pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
+
+ .has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
+};
+
static void qcom_qmp_phy_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
@@ -1880,11 +2068,18 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
}
- if (cfg->has_phy_com_ctrl)
+ if (cfg->has_phy_com_ctrl) {
qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
SW_PWRDN);
- else
- qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+ } else {
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL])
+ qphy_setbits(pcs,
+ cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ else
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+ }
/* Serdes configuration */
qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl,
@@ -2110,7 +2305,13 @@ static int qcom_qmp_phy_disable(struct phy *phy)
qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
/* Put PHY into POWER DOWN state: active low */
- qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+ if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) {
+ qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ cfg->pwrdn_ctrl);
+ } else {
+ qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL,
+ cfg->pwrdn_ctrl);
+ }
if (cfg->has_lane_rst)
reset_control_assert(qphy->lane_rst);
@@ -2516,6 +2717,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,ipq8074-qmp-pcie-phy",
.data = &ipq8074_pciephy_cfg,
}, {
+ .compatible = "qcom,sc7180-qmp-usb3-phy",
+ .data = &sc7180_usb3phy_cfg,
+ }, {
.compatible = "qcom,sdm845-qhp-pcie-phy",
.data = &sdm845_qhp_pciephy_cfg,
}, {
@@ -2536,6 +2740,12 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,sm8150-qmp-ufs-phy",
.data = &sm8150_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-ufs-phy",
+ .data = &sm8150_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-usb3-phy",
+ .data = &sm8150_usb3phy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index dece0e67704b..6d017a0c0c8d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -125,7 +125,7 @@
#define QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1DC
#define QPHY_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1E0
-/* Only for QMP V3 PHY - DP COM registers */
+/* Only for QMP V3 & V4 PHY - DP COM registers */
#define QPHY_V3_DP_COM_PHY_MODE_CTRL 0x00
#define QPHY_V3_DP_COM_SW_RESET 0x04
#define QPHY_V3_DP_COM_POWER_DOWN_CTRL 0x08
@@ -314,6 +314,14 @@
#define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60
/* Only for QMP V4 PHY - QSERDES COM registers */
+#define QSERDES_V4_COM_SSC_EN_CENTER 0x010
+#define QSERDES_V4_COM_SSC_PER1 0x01c
+#define QSERDES_V4_COM_SSC_PER2 0x020
+#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0 0x024
+#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0 0x028
+#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1 0x030
+#define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1 0x034
+#define QSERDES_V4_COM_SYSCLK_BUF_ENABLE 0x050
#define QSERDES_V4_COM_PLL_IVCO 0x058
#define QSERDES_V4_COM_CMN_IPTRIM 0x060
#define QSERDES_V4_COM_CP_CTRL_MODE0 0x074
@@ -330,10 +338,22 @@
#define QSERDES_V4_COM_DEC_START_MODE0 0x0bc
#define QSERDES_V4_COM_LOCK_CMP2_MODE1 0x0b8
#define QSERDES_V4_COM_DEC_START_MODE1 0x0c4
+#define QSERDES_V4_COM_DIV_FRAC_START1_MODE0 0x0cc
+#define QSERDES_V4_COM_DIV_FRAC_START2_MODE0 0x0d0
+#define QSERDES_V4_COM_DIV_FRAC_START3_MODE0 0x0d4
+#define QSERDES_V4_COM_DIV_FRAC_START1_MODE1 0x0d8
+#define QSERDES_V4_COM_DIV_FRAC_START2_MODE1 0x0dc
+#define QSERDES_V4_COM_DIV_FRAC_START3_MODE1 0x0e0
#define QSERDES_V4_COM_VCO_TUNE_MAP 0x10c
+#define QSERDES_V4_COM_VCO_TUNE1_MODE0 0x110
+#define QSERDES_V4_COM_VCO_TUNE2_MODE0 0x114
+#define QSERDES_V4_COM_VCO_TUNE1_MODE1 0x118
+#define QSERDES_V4_COM_VCO_TUNE2_MODE1 0x11c
#define QSERDES_V4_COM_VCO_TUNE_INITVAL2 0x124
#define QSERDES_V4_COM_HSCLK_SEL 0x158
#define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V4_COM_CORECLK_DIV_MODE1 0x16c
+#define QSERDES_V4_COM_SVS_MODE_CLK_SEL 0x184
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
@@ -341,12 +361,16 @@
#define QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1b8
/* Only for QMP V4 PHY - TX registers */
+#define QSERDES_V4_TX_RES_CODE_LANE_TX 0x34
+#define QSERDES_V4_TX_RES_CODE_LANE_RX 0x38
#define QSERDES_V4_TX_LANE_MODE_1 0x84
+#define QSERDES_V4_TX_RCV_DETECT_LVL_2 0x9c
#define QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1 0xd8
#define QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1 0xdC
#define QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0xe0
#define QSERDES_V4_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0xe4
#define QSERDES_V4_TX_TRAN_DRVR_EMP_EN 0xb8
+#define QSERDES_V4_TX_PI_QEC_CTRL 0x104
/* Only for QMP V4 PHY - RX registers */
#define QSERDES_V4_RX_UCDR_FO_GAIN 0x008
@@ -354,17 +378,27 @@
#define QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN 0x030
#define QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040
#define QSERDES_V4_RX_UCDR_PI_CONTROLS 0x044
#define QSERDES_V4_RX_UCDR_PI_CTRL2 0x048
+#define QSERDES_V4_RX_UCDR_SB2_THRESH1 0x04c
+#define QSERDES_V4_RX_UCDR_SB2_THRESH2 0x050
+#define QSERDES_V4_RX_UCDR_SB2_GAIN1 0x054
+#define QSERDES_V4_RX_UCDR_SB2_GAIN2 0x058
+#define QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE 0x060
#define QSERDES_V4_RX_AC_JTAG_ENABLE 0x068
#define QSERDES_V4_RX_AC_JTAG_MODE 0x078
#define QSERDES_V4_RX_RX_TERM_BW 0x080
+#define QSERDES_V4_RX_VGA_CAL_CNTRL1 0x0d4
+#define QSERDES_V4_RX_VGA_CAL_CNTRL2 0x0d8
+#define QSERDES_V4_RX_GM_CAL 0x0dc
#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec
#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0
#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4
#define QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW 0x0f8
#define QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH 0x0fc
#define QSERDES_V4_RX_RX_IDAC_MEASURE_TIME 0x100
+#define QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
#define QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114
#define QSERDES_V4_RX_SIGDET_CNTRL 0x11c
#define QSERDES_V4_RX_SIGDET_LVL 0x120
@@ -385,29 +419,32 @@
#define QSERDES_V4_RX_RX_MODE_10_HIGH2 0x1a0
#define QSERDES_V4_RX_RX_MODE_10_HIGH3 0x1a4
#define QSERDES_V4_RX_RX_MODE_10_HIGH4 0x1a8
+#define QSERDES_V4_RX_DFE_EN_TIMER 0x1b4
+#define QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET 0x1b8
#define QSERDES_V4_RX_DCC_CTRL1 0x1bc
+#define QSERDES_V4_RX_VTH_CODE 0x1c4
-/* Only for QMP V4 PHY - PCS registers */
-#define QPHY_V4_PHY_START 0x000
-#define QPHY_V4_POWER_DOWN_CONTROL 0x004
-#define QPHY_V4_SW_RESET 0x008
-#define QPHY_V4_TIMER_20US_CORECLK_STEPS_MSB 0x00c
-#define QPHY_V4_TIMER_20US_CORECLK_STEPS_LSB 0x010
-#define QPHY_V4_PLL_CNTL 0x02c
-#define QPHY_V4_TX_LARGE_AMP_DRV_LVL 0x030
-#define QPHY_V4_TX_SMALL_AMP_DRV_LVL 0x038
-#define QPHY_V4_BIST_FIXED_PAT_CTRL 0x060
-#define QPHY_V4_TX_HSGEAR_CAPABILITY 0x074
-#define QPHY_V4_RX_HSGEAR_CAPABILITY 0x0b4
-#define QPHY_V4_DEBUG_BUS_CLKSEL 0x124
-#define QPHY_V4_LINECFG_DISABLE 0x148
-#define QPHY_V4_RX_MIN_HIBERN8_TIME 0x150
-#define QPHY_V4_RX_SIGDET_CTRL2 0x158
-#define QPHY_V4_TX_PWM_GEAR_BAND 0x160
-#define QPHY_V4_TX_HS_GEAR_BAND 0x168
-#define QPHY_V4_PCS_READY_STATUS 0x180
-#define QPHY_V4_TX_MID_TERM_CTRL1 0x1d8
-#define QPHY_V4_MULTI_LANE_CTRL1 0x1e0
+/* Only for QMP V4 PHY - UFS PCS registers */
+#define QPHY_V4_PCS_UFS_PHY_START 0x000
+#define QPHY_V4_PCS_UFS_POWER_DOWN_CONTROL 0x004
+#define QPHY_V4_PCS_UFS_SW_RESET 0x008
+#define QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
+#define QPHY_V4_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
+#define QPHY_V4_PCS_UFS_PLL_CNTL 0x02c
+#define QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030
+#define QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038
+#define QPHY_V4_PCS_UFS_BIST_FIXED_PAT_CTRL 0x060
+#define QPHY_V4_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074
+#define QPHY_V4_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0b4
+#define QPHY_V4_PCS_UFS_DEBUG_BUS_CLKSEL 0x124
+#define QPHY_V4_PCS_UFS_LINECFG_DISABLE 0x148
+#define QPHY_V4_PCS_UFS_RX_MIN_HIBERN8_TIME 0x150
+#define QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2 0x158
+#define QPHY_V4_PCS_UFS_TX_PWM_GEAR_BAND 0x160
+#define QPHY_V4_PCS_UFS_TX_HS_GEAR_BAND 0x168
+#define QPHY_V4_PCS_UFS_READY_STATUS 0x180
+#define QPHY_V4_PCS_UFS_TX_MID_TERM_CTRL1 0x1d8
+#define QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1 0x1e0
/* PCIE GEN3 COM registers */
#define PCIE_GEN3_QHP_COM_SSC_EN_CENTER 0x14
@@ -523,4 +560,161 @@
#define PCIE_GEN3_QHP_PHY_POWER_STATE_CONFIG5 0x16c
#define PCIE_GEN3_QHP_PHY_PCS_TX_RX_CONFIG 0x174
+/* Only for QMP V4 PHY - USB/PCIe PCS registers */
+#define QPHY_V4_PCS_SW_RESET 0x000
+#define QPHY_V4_PCS_REVISION_ID0 0x004
+#define QPHY_V4_PCS_REVISION_ID1 0x008
+#define QPHY_V4_PCS_REVISION_ID2 0x00c
+#define QPHY_V4_PCS_REVISION_ID3 0x010
+#define QPHY_V4_PCS_PCS_STATUS1 0x014
+#define QPHY_V4_PCS_PCS_STATUS2 0x018
+#define QPHY_V4_PCS_PCS_STATUS3 0x01c
+#define QPHY_V4_PCS_PCS_STATUS4 0x020
+#define QPHY_V4_PCS_PCS_STATUS5 0x024
+#define QPHY_V4_PCS_PCS_STATUS6 0x028
+#define QPHY_V4_PCS_PCS_STATUS7 0x02c
+#define QPHY_V4_PCS_DEBUG_BUS_0_STATUS 0x030
+#define QPHY_V4_PCS_DEBUG_BUS_1_STATUS 0x034
+#define QPHY_V4_PCS_DEBUG_BUS_2_STATUS 0x038
+#define QPHY_V4_PCS_DEBUG_BUS_3_STATUS 0x03c
+#define QPHY_V4_PCS_POWER_DOWN_CONTROL 0x040
+#define QPHY_V4_PCS_START_CONTROL 0x044
+#define QPHY_V4_PCS_INSIG_SW_CTRL1 0x048
+#define QPHY_V4_PCS_INSIG_SW_CTRL2 0x04c
+#define QPHY_V4_PCS_INSIG_SW_CTRL3 0x050
+#define QPHY_V4_PCS_INSIG_SW_CTRL4 0x054
+#define QPHY_V4_PCS_INSIG_SW_CTRL5 0x058
+#define QPHY_V4_PCS_INSIG_SW_CTRL6 0x05c
+#define QPHY_V4_PCS_INSIG_SW_CTRL7 0x060
+#define QPHY_V4_PCS_INSIG_SW_CTRL8 0x064
+#define QPHY_V4_PCS_INSIG_MX_CTRL1 0x068
+#define QPHY_V4_PCS_INSIG_MX_CTRL2 0x06c
+#define QPHY_V4_PCS_INSIG_MX_CTRL3 0x070
+#define QPHY_V4_PCS_INSIG_MX_CTRL4 0x074
+#define QPHY_V4_PCS_INSIG_MX_CTRL5 0x078
+#define QPHY_V4_PCS_INSIG_MX_CTRL7 0x07c
+#define QPHY_V4_PCS_INSIG_MX_CTRL8 0x080
+#define QPHY_V4_PCS_OUTSIG_SW_CTRL1 0x084
+#define QPHY_V4_PCS_OUTSIG_MX_CTRL1 0x088
+#define QPHY_V4_PCS_CLAMP_ENABLE 0x08c
+#define QPHY_V4_PCS_POWER_STATE_CONFIG1 0x090
+#define QPHY_V4_PCS_POWER_STATE_CONFIG2 0x094
+#define QPHY_V4_PCS_FLL_CNTRL1 0x098
+#define QPHY_V4_PCS_FLL_CNTRL2 0x09c
+#define QPHY_V4_PCS_FLL_CNT_VAL_L 0x0a0
+#define QPHY_V4_PCS_FLL_CNT_VAL_H_TOL 0x0a4
+#define QPHY_V4_PCS_FLL_MAN_CODE 0x0a8
+#define QPHY_V4_PCS_TEST_CONTROL1 0x0ac
+#define QPHY_V4_PCS_TEST_CONTROL2 0x0b0
+#define QPHY_V4_PCS_TEST_CONTROL3 0x0b4
+#define QPHY_V4_PCS_TEST_CONTROL4 0x0b8
+#define QPHY_V4_PCS_TEST_CONTROL5 0x0bc
+#define QPHY_V4_PCS_TEST_CONTROL6 0x0c0
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG4 0x0d0
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG5 0x0d4
+#define QPHY_V4_PCS_LOCK_DETECT_CONFIG6 0x0d8
+#define QPHY_V4_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V4_PCS_REFGEN_REQ_CONFIG2 0x0e0
+#define QPHY_V4_PCS_REFGEN_REQ_CONFIG3 0x0e4
+#define QPHY_V4_PCS_BIST_CTRL 0x0e8
+#define QPHY_V4_PCS_PRBS_POLY0 0x0ec
+#define QPHY_V4_PCS_PRBS_POLY1 0x0f0
+#define QPHY_V4_PCS_FIXED_PAT0 0x0f4
+#define QPHY_V4_PCS_FIXED_PAT1 0x0f8
+#define QPHY_V4_PCS_FIXED_PAT2 0x0fc
+#define QPHY_V4_PCS_FIXED_PAT3 0x100
+#define QPHY_V4_PCS_FIXED_PAT4 0x104
+#define QPHY_V4_PCS_FIXED_PAT5 0x108
+#define QPHY_V4_PCS_FIXED_PAT6 0x10c
+#define QPHY_V4_PCS_FIXED_PAT7 0x110
+#define QPHY_V4_PCS_FIXED_PAT8 0x114
+#define QPHY_V4_PCS_FIXED_PAT9 0x118
+#define QPHY_V4_PCS_FIXED_PAT10 0x11c
+#define QPHY_V4_PCS_FIXED_PAT11 0x120
+#define QPHY_V4_PCS_FIXED_PAT12 0x124
+#define QPHY_V4_PCS_FIXED_PAT13 0x128
+#define QPHY_V4_PCS_FIXED_PAT14 0x12c
+#define QPHY_V4_PCS_FIXED_PAT15 0x130
+#define QPHY_V4_PCS_TXMGN_CONFIG 0x134
+#define QPHY_V4_PCS_G12S1_TXMGN_V0 0x138
+#define QPHY_V4_PCS_G12S1_TXMGN_V1 0x13c
+#define QPHY_V4_PCS_G12S1_TXMGN_V2 0x140
+#define QPHY_V4_PCS_G12S1_TXMGN_V3 0x144
+#define QPHY_V4_PCS_G12S1_TXMGN_V4 0x148
+#define QPHY_V4_PCS_G12S1_TXMGN_V0_RS 0x14c
+#define QPHY_V4_PCS_G12S1_TXMGN_V1_RS 0x150
+#define QPHY_V4_PCS_G12S1_TXMGN_V2_RS 0x154
+#define QPHY_V4_PCS_G12S1_TXMGN_V3_RS 0x158
+#define QPHY_V4_PCS_G12S1_TXMGN_V4_RS 0x15c
+#define QPHY_V4_PCS_G3S2_TXMGN_MAIN 0x160
+#define QPHY_V4_PCS_G3S2_TXMGN_MAIN_RS 0x164
+#define QPHY_V4_PCS_G12S1_TXDEEMPH_M6DB 0x168
+#define QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB 0x16c
+#define QPHY_V4_PCS_G3S2_PRE_GAIN 0x170
+#define QPHY_V4_PCS_G3S2_POST_GAIN 0x174
+#define QPHY_V4_PCS_G3S2_PRE_POST_OFFSET 0x178
+#define QPHY_V4_PCS_G3S2_PRE_GAIN_RS 0x17c
+#define QPHY_V4_PCS_G3S2_POST_GAIN_RS 0x180
+#define QPHY_V4_PCS_G3S2_PRE_POST_OFFSET_RS 0x184
+#define QPHY_V4_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V4_PCS_RX_SIGDET_DTCT_CNTRL 0x18c
+#define QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
+#define QPHY_V4_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V4_PCS_RATE_SLEW_CNTRL2 0x19c
+#define QPHY_V4_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x1a0
+#define QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L 0x1a4
+#define QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H 0x1a8
+#define QPHY_V4_PCS_TSYNC_RSYNC_TIME 0x1ac
+#define QPHY_V4_PCS_CDR_RESET_TIME 0x1b0
+#define QPHY_V4_PCS_TSYNC_DLY_TIME 0x1b4
+#define QPHY_V4_PCS_ELECIDLE_DLY_SEL 0x1b8
+#define QPHY_V4_PCS_CMN_ACK_OUT_SEL 0x1bc
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG3 0x1c8
+#define QPHY_V4_PCS_ALIGN_DETECT_CONFIG4 0x1cc
+#define QPHY_V4_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V4_PCS_RX_IDLE_DTCT_CNTRL 0x1d4
+#define QPHY_V4_PCS_RX_DCC_CAL_CONFIG 0x1d8
+#define QPHY_V4_PCS_EQ_CONFIG1 0x1dc
+#define QPHY_V4_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V4_PCS_EQ_CONFIG3 0x1e4
+#define QPHY_V4_PCS_EQ_CONFIG4 0x1e8
+#define QPHY_V4_PCS_EQ_CONFIG5 0x1ec
+#define QPHY_V4_PCS_USB3_POWER_STATE_CONFIG1 0x300
+#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_STATUS 0x304
+#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x308
+#define QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL2 0x30c
+#define QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x310
+#define QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x314
+#define QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x318
+#define QPHY_V4_PCS_USB3_LFPS_TX_ECSTART 0x31c
+#define QPHY_V4_PCS_USB3_LFPS_PER_TIMER_VAL 0x320
+#define QPHY_V4_PCS_USB3_LFPS_TX_END_CNT_U3_START 0x324
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_LOCK_TIME 0x328
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_WAIT_TIME 0x32c
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_CTLE_TIME 0x330
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2 0x334
+#define QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x338
+#define QPHY_V4_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x33c
+#define QPHY_V4_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x340
+#define QPHY_V4_PCS_USB3_ARCVR_DTCT_EN_PERIOD 0x344
+#define QPHY_V4_PCS_USB3_ARCVR_DTCT_CM_DLY 0x348
+#define QPHY_V4_PCS_USB3_TXONESZEROS_RUN_LENGTH 0x34c
+#define QPHY_V4_PCS_USB3_ALFPS_DEGLITCH_VAL 0x350
+#define QPHY_V4_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x354
+#define QPHY_V4_PCS_USB3_TEST_CONTROL 0x358
+
+/* Only for QMP V4 PHY - PCS_MISC registers */
+#define QPHY_V4_PCS_MISC_TYPEC_CTRL 0x00
+#define QPHY_V4_PCS_MISC_TYPEC_PWRDN_CTRL 0x04
+#define QPHY_V4_PCS_MISC_PCS_MISC_CONFIG1 0x08
+#define QPHY_V4_PCS_MISC_CLAMP_ENABLE 0x0c
+#define QPHY_V4_PCS_MISC_TYPEC_STATUS 0x10
+#define QPHY_V4_PCS_MISC_PLACEHOLDER_STATUS 0x14
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
new file mode 100644
index 000000000000..4d74045271eb
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define USB2_PHY_USB_PHY_UTMI_CTRL0 (0x3c)
+#define SLEEPM BIT(0)
+#define OPMODE_MASK GENMASK(4, 3)
+#define OPMODE_NORMAL (0x00)
+#define OPMODE_NONDRIVING BIT(3)
+#define TERMSEL BIT(5)
+
+#define USB2_PHY_USB_PHY_UTMI_CTRL1 (0x40)
+#define XCVRSEL BIT(0)
+
+#define USB2_PHY_USB_PHY_UTMI_CTRL5 (0x50)
+#define POR BIT(1)
+
+#define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0 (0x54)
+#define RETENABLEN BIT(3)
+#define FSEL_MASK GENMASK(7, 5)
+#define FSEL_DEFAULT (0x3 << 4)
+
+#define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON1 (0x58)
+#define VBUSVLDEXTSEL0 BIT(4)
+#define PLLBTUNE BIT(5)
+
+#define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2 (0x5c)
+#define VREGBYPASS BIT(0)
+
+#define USB2_PHY_USB_PHY_HS_PHY_CTRL1 (0x60)
+#define VBUSVLDEXT0 BIT(0)
+
+#define USB2_PHY_USB_PHY_HS_PHY_CTRL2 (0x64)
+#define USB2_AUTO_RESUME BIT(0)
+#define USB2_SUSPEND_N BIT(2)
+#define USB2_SUSPEND_N_SEL BIT(3)
+
+#define USB2_PHY_USB_PHY_CFG0 (0x94)
+#define UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN BIT(0)
+#define UTMI_PHY_CMN_CTRL_OVERRIDE_EN BIT(1)
+
+#define USB2_PHY_USB_PHY_REFCLK_CTRL (0xa0)
+#define REFCLK_SEL_MASK GENMASK(1, 0)
+#define REFCLK_SEL_DEFAULT (0x2 << 0)
+
+static const char * const qcom_snps_hsphy_vreg_names[] = {
+ "vdda-pll", "vdda33", "vdda18",
+};
+
+#define SNPS_HS_NUM_VREGS ARRAY_SIZE(qcom_snps_hsphy_vreg_names)
+
+/**
+ * struct qcom_snps_hsphy - snps hs phy attributes
+ *
+ * @phy: generic phy
+ * @base: iomapped memory space for snps hs phy
+ *
+ * @cfg_ahb_clk: AHB2PHY interface clock
+ * @ref_clk: phy reference clock
+ * @iface_clk: phy interface clock
+ * @phy_reset: phy reset control
+ * @vregs: regulator supplies bulk data
+ * @phy_initialized: if PHY has been initialized correctly
+ */
+struct qcom_snps_hsphy {
+ struct phy *phy;
+ void __iomem *base;
+
+ struct clk *cfg_ahb_clk;
+ struct clk *ref_clk;
+ struct reset_control *phy_reset;
+ struct regulator_bulk_data vregs[SNPS_HS_NUM_VREGS];
+
+ bool phy_initialized;
+};
+
+static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset,
+ u32 mask, u32 val)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg &= ~mask;
+ reg |= val & mask;
+ writel_relaxed(reg, base + offset);
+
+ /* Ensure above write is completed */
+ readl_relaxed(base + offset);
+}
+
+static int qcom_snps_hsphy_init(struct phy *phy)
+{
+ struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
+ int ret;
+
+ dev_vdbg(&phy->dev, "%s(): Initializing SNPS HS phy\n", __func__);
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(hsphy->cfg_ahb_clk);
+ if (ret) {
+ dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret);
+ goto poweroff_phy;
+ }
+
+ ret = reset_control_assert(hsphy->phy_reset);
+ if (ret) {
+ dev_err(&phy->dev, "failed to assert phy_reset, %d\n", ret);
+ goto disable_ahb_clk;
+ }
+
+ usleep_range(100, 150);
+
+ ret = reset_control_deassert(hsphy->phy_reset);
+ if (ret) {
+ dev_err(&phy->dev, "failed to de-assert phy_reset, %d\n", ret);
+ goto disable_ahb_clk;
+ }
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_CFG0,
+ UTMI_PHY_CMN_CTRL_OVERRIDE_EN,
+ UTMI_PHY_CMN_CTRL_OVERRIDE_EN);
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_UTMI_CTRL5,
+ POR, POR);
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0,
+ FSEL_MASK, 0);
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON1,
+ PLLBTUNE, PLLBTUNE);
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_REFCLK_CTRL,
+ REFCLK_SEL_DEFAULT, REFCLK_SEL_MASK);
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON1,
+ VBUSVLDEXTSEL0, VBUSVLDEXTSEL0);
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL1,
+ VBUSVLDEXT0, VBUSVLDEXT0);
+
+ qcom_snps_hsphy_write_mask(hsphy->base,
+ USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2,
+ VREGBYPASS, VREGBYPASS);
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N_SEL | USB2_SUSPEND_N,
+ USB2_SUSPEND_N_SEL | USB2_SUSPEND_N);
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_UTMI_CTRL0,
+ SLEEPM, SLEEPM);
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_UTMI_CTRL5,
+ POR, 0);
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N_SEL, 0);
+
+ qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_CFG0,
+ UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0);
+
+ hsphy->phy_initialized = true;
+
+ return 0;
+
+disable_ahb_clk:
+ clk_disable_unprepare(hsphy->cfg_ahb_clk);
+poweroff_phy:
+ regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
+
+ return ret;
+}
+
+static int qcom_snps_hsphy_exit(struct phy *phy)
+{
+ struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy);
+
+ reset_control_assert(hsphy->phy_reset);
+ clk_disable_unprepare(hsphy->cfg_ahb_clk);
+ regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs);
+ hsphy->phy_initialized = false;
+
+ return 0;
+}
+
+static const struct phy_ops qcom_snps_hsphy_gen_ops = {
+ .init = qcom_snps_hsphy_init,
+ .exit = qcom_snps_hsphy_exit,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id qcom_snps_hsphy_of_match_table[] = {
+ { .compatible = "qcom,sm8150-usb-hs-phy", },
+ { .compatible = "qcom,usb-snps-hs-7nm-phy", },
+ { .compatible = "qcom,usb-snps-femto-v2-phy", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_of_match_table);
+
+static int qcom_snps_hsphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_snps_hsphy *hsphy;
+ struct phy_provider *phy_provider;
+ struct phy *generic_phy;
+ int ret, i;
+ int num;
+
+ hsphy = devm_kzalloc(dev, sizeof(*hsphy), GFP_KERNEL);
+ if (!hsphy)
+ return -ENOMEM;
+
+ hsphy->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hsphy->base))
+ return PTR_ERR(hsphy->base);
+
+ hsphy->ref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(hsphy->ref_clk)) {
+ ret = PTR_ERR(hsphy->ref_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get ref clk, %d\n", ret);
+ return ret;
+ }
+
+ hsphy->phy_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(hsphy->phy_reset)) {
+ dev_err(dev, "failed to get phy core reset\n");
+ return PTR_ERR(hsphy->phy_reset);
+ }
+
+ num = ARRAY_SIZE(hsphy->vregs);
+ for (i = 0; i < num; i++)
+ hsphy->vregs[i].supply = qcom_snps_hsphy_vreg_names[i];
+
+ ret = devm_regulator_bulk_get(dev, num, hsphy->vregs);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ generic_phy = devm_phy_create(dev, NULL, &qcom_snps_hsphy_gen_ops);
+ if (IS_ERR(generic_phy)) {
+ ret = PTR_ERR(generic_phy);
+ dev_err(dev, "failed to create phy, %d\n", ret);
+ return ret;
+ }
+ hsphy->phy = generic_phy;
+
+ dev_set_drvdata(dev, hsphy);
+ phy_set_drvdata(generic_phy, hsphy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_dbg(dev, "Registered Qcom-SNPS HS phy\n");
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver qcom_snps_hsphy_driver = {
+ .probe = qcom_snps_hsphy_probe,
+ .driver = {
+ .name = "qcom-snps-hs-femto-v2-phy",
+ .of_match_table = qcom_snps_hsphy_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_snps_hsphy_driver);
+
+MODULE_DESCRIPTION("Qualcomm SNPS FEMTO USB HS PHY V2 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c
index 56a5083fe6f9..32be62e49804 100644
--- a/drivers/phy/samsung/phy-s5pv210-usb2.c
+++ b/drivers/phy/samsung/phy-s5pv210-usb2.c
@@ -139,6 +139,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
udelay(10);
rst &= ~rstbits;
writel(rst, drv->reg_phy + S5PV210_UPHYRST);
+ /* The following delay is necessary for the reset sequence to be
+ * completed
+ */
+ udelay(80);
} else {
pwr = readl(drv->reg_phy + S5PV210_UPHYPWR);
pwr |= phypwr;
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 88a047b9fa6f..0a166d5a6414 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -77,6 +77,7 @@ static struct regmap_config serdes_am654_regmap_config = {
.val_bits = 32,
.reg_stride = 4,
.fast_io = true,
+ .max_register = 0x1ffc,
};
static const struct reg_field cmu_master_cdn_o = REG_FIELD(CMU_R07C, 24, 24);
@@ -200,9 +201,91 @@ static int serdes_am654_power_off(struct phy *x)
return 0;
}
-static int serdes_am654_init(struct phy *x)
+#define SERDES_AM654_CFG(offset, a, b, val) \
+ regmap_update_bits(phy->regmap, (offset),\
+ GENMASK((a), (b)), (val) << (b))
+
+static int serdes_am654_usb3_init(struct serdes_am654 *phy)
+{
+ SERDES_AM654_CFG(0x0000, 31, 24, 0x17);
+ SERDES_AM654_CFG(0x0004, 15, 8, 0x02);
+ SERDES_AM654_CFG(0x0004, 7, 0, 0x0e);
+ SERDES_AM654_CFG(0x0008, 23, 16, 0x2e);
+ SERDES_AM654_CFG(0x0008, 31, 24, 0x2e);
+ SERDES_AM654_CFG(0x0060, 7, 0, 0x4b);
+ SERDES_AM654_CFG(0x0060, 15, 8, 0x98);
+ SERDES_AM654_CFG(0x0060, 23, 16, 0x60);
+ SERDES_AM654_CFG(0x00d0, 31, 24, 0x45);
+ SERDES_AM654_CFG(0x00e8, 15, 8, 0x0e);
+ SERDES_AM654_CFG(0x0220, 7, 0, 0x34);
+ SERDES_AM654_CFG(0x0220, 15, 8, 0x34);
+ SERDES_AM654_CFG(0x0220, 31, 24, 0x37);
+ SERDES_AM654_CFG(0x0224, 7, 0, 0x37);
+ SERDES_AM654_CFG(0x0224, 15, 8, 0x37);
+ SERDES_AM654_CFG(0x0228, 23, 16, 0x37);
+ SERDES_AM654_CFG(0x0228, 31, 24, 0x37);
+ SERDES_AM654_CFG(0x022c, 7, 0, 0x37);
+ SERDES_AM654_CFG(0x022c, 15, 8, 0x37);
+ SERDES_AM654_CFG(0x0230, 15, 8, 0x2a);
+ SERDES_AM654_CFG(0x0230, 23, 16, 0x2a);
+ SERDES_AM654_CFG(0x0240, 23, 16, 0x10);
+ SERDES_AM654_CFG(0x0240, 31, 24, 0x34);
+ SERDES_AM654_CFG(0x0244, 7, 0, 0x40);
+ SERDES_AM654_CFG(0x0244, 23, 16, 0x34);
+ SERDES_AM654_CFG(0x0248, 15, 8, 0x0d);
+ SERDES_AM654_CFG(0x0258, 15, 8, 0x16);
+ SERDES_AM654_CFG(0x0258, 23, 16, 0x84);
+ SERDES_AM654_CFG(0x0258, 31, 24, 0xf2);
+ SERDES_AM654_CFG(0x025c, 7, 0, 0x21);
+ SERDES_AM654_CFG(0x0260, 7, 0, 0x27);
+ SERDES_AM654_CFG(0x0260, 15, 8, 0x04);
+ SERDES_AM654_CFG(0x0268, 15, 8, 0x04);
+ SERDES_AM654_CFG(0x0288, 15, 8, 0x2c);
+ SERDES_AM654_CFG(0x0330, 31, 24, 0xa0);
+ SERDES_AM654_CFG(0x0338, 23, 16, 0x03);
+ SERDES_AM654_CFG(0x0338, 31, 24, 0x00);
+ SERDES_AM654_CFG(0x033c, 7, 0, 0x00);
+ SERDES_AM654_CFG(0x0344, 31, 24, 0x18);
+ SERDES_AM654_CFG(0x034c, 7, 0, 0x18);
+ SERDES_AM654_CFG(0x039c, 23, 16, 0x3b);
+ SERDES_AM654_CFG(0x0a04, 7, 0, 0x03);
+ SERDES_AM654_CFG(0x0a14, 31, 24, 0x3c);
+ SERDES_AM654_CFG(0x0a18, 15, 8, 0x3c);
+ SERDES_AM654_CFG(0x0a38, 7, 0, 0x3e);
+ SERDES_AM654_CFG(0x0a38, 15, 8, 0x3e);
+ SERDES_AM654_CFG(0x0ae0, 7, 0, 0x07);
+ SERDES_AM654_CFG(0x0b6c, 23, 16, 0xcd);
+ SERDES_AM654_CFG(0x0b6c, 31, 24, 0x04);
+ SERDES_AM654_CFG(0x0b98, 23, 16, 0x03);
+ SERDES_AM654_CFG(0x1400, 7, 0, 0x3f);
+ SERDES_AM654_CFG(0x1404, 23, 16, 0x6f);
+ SERDES_AM654_CFG(0x1404, 31, 24, 0x6f);
+ SERDES_AM654_CFG(0x140c, 7, 0, 0x6f);
+ SERDES_AM654_CFG(0x140c, 15, 8, 0x6f);
+ SERDES_AM654_CFG(0x1410, 15, 8, 0x27);
+ SERDES_AM654_CFG(0x1414, 7, 0, 0x0c);
+ SERDES_AM654_CFG(0x1414, 23, 16, 0x07);
+ SERDES_AM654_CFG(0x1418, 23, 16, 0x40);
+ SERDES_AM654_CFG(0x141c, 7, 0, 0x00);
+ SERDES_AM654_CFG(0x141c, 15, 8, 0x1f);
+ SERDES_AM654_CFG(0x1428, 31, 24, 0x08);
+ SERDES_AM654_CFG(0x1434, 31, 24, 0x00);
+ SERDES_AM654_CFG(0x1444, 7, 0, 0x94);
+ SERDES_AM654_CFG(0x1460, 31, 24, 0x7f);
+ SERDES_AM654_CFG(0x1464, 7, 0, 0x43);
+ SERDES_AM654_CFG(0x1464, 23, 16, 0x6f);
+ SERDES_AM654_CFG(0x1464, 31, 24, 0x43);
+ SERDES_AM654_CFG(0x1484, 23, 16, 0x8f);
+ SERDES_AM654_CFG(0x1498, 7, 0, 0x4f);
+ SERDES_AM654_CFG(0x1498, 23, 16, 0x4f);
+ SERDES_AM654_CFG(0x007c, 31, 24, 0x0d);
+ SERDES_AM654_CFG(0x0b90, 15, 8, 0x0f);
+
+ return 0;
+}
+
+static int serdes_am654_pcie_init(struct serdes_am654 *phy)
{
- struct serdes_am654 *phy = phy_get_drvdata(x);
int ret;
ret = regmap_field_write(phy->config_version, VERSION);
@@ -220,11 +303,28 @@ static int serdes_am654_init(struct phy *x)
return 0;
}
+static int serdes_am654_init(struct phy *x)
+{
+ struct serdes_am654 *phy = phy_get_drvdata(x);
+
+ switch (phy->type) {
+ case PHY_TYPE_PCIE:
+ return serdes_am654_pcie_init(phy);
+ case PHY_TYPE_USB3:
+ return serdes_am654_usb3_init(phy);
+ default:
+ return -EINVAL;
+ }
+}
+
static int serdes_am654_reset(struct phy *x)
{
struct serdes_am654 *phy = phy_get_drvdata(x);
int ret;
+ serdes_am654_disable_pll(phy);
+ serdes_am654_disable_txrx(phy);
+
ret = regmap_field_write(phy->por_en, 0x1);
if (ret)
return ret;
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 7b51045df783..30ea5b207285 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
+#include <dt-bindings/phy/phy.h>
#define WIZ_SERDES_CTRL 0x404
#define WIZ_SERDES_TOP_CTRL 0x408
@@ -78,6 +79,8 @@ static const struct reg_field p_enable[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(3), 30, 31),
};
+enum p_enable { P_ENABLE = 2, P_ENABLE_FORCE = 1, P_ENABLE_DISABLE = 0 };
+
static const struct reg_field p_align[WIZ_MAX_LANES] = {
REG_FIELD(WIZ_LANECTL(0), 29, 29),
REG_FIELD(WIZ_LANECTL(1), 29, 29),
@@ -220,6 +223,7 @@ struct wiz {
struct reset_controller_dev wiz_phy_reset_dev;
struct gpio_desc *gpio_typec_dir;
int typec_dir_delay;
+ u32 lane_phy_type[WIZ_MAX_LANES];
};
static int wiz_reset(struct wiz *wiz)
@@ -242,12 +246,17 @@ static int wiz_reset(struct wiz *wiz)
static int wiz_mode_select(struct wiz *wiz)
{
u32 num_lanes = wiz->num_lanes;
+ enum wiz_lane_standard_mode mode;
int ret;
int i;
for (i = 0; i < num_lanes; i++) {
- ret = regmap_field_write(wiz->p_standard_mode[i],
- LANE_MODE_GEN4);
+ if (wiz->lane_phy_type[i] == PHY_TYPE_DP)
+ mode = LANE_MODE_GEN1;
+ else
+ mode = LANE_MODE_GEN4;
+
+ ret = regmap_field_write(wiz->p_standard_mode[i], mode);
if (ret)
return ret;
}
@@ -707,7 +716,7 @@ static int wiz_phy_reset_assert(struct reset_controller_dev *rcdev,
return ret;
}
- ret = regmap_field_write(wiz->p_enable[id - 1], false);
+ ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE_DISABLE);
return ret;
}
@@ -734,7 +743,11 @@ static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
return ret;
}
- ret = regmap_field_write(wiz->p_enable[id - 1], true);
+ if (wiz->lane_phy_type[id - 1] == PHY_TYPE_DP)
+ ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE);
+ else
+ ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE_FORCE);
+
return ret;
}
@@ -761,6 +774,40 @@ static const struct of_device_id wiz_id_table[] = {
};
MODULE_DEVICE_TABLE(of, wiz_id_table);
+static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
+{
+ struct device_node *serdes, *subnode;
+
+ serdes = of_get_child_by_name(dev->of_node, "serdes");
+ if (!serdes) {
+ dev_err(dev, "%s: Getting \"serdes\"-node failed\n", __func__);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(serdes, subnode) {
+ u32 reg, num_lanes = 1, phy_type = PHY_NONE;
+ int ret, i;
+
+ ret = of_property_read_u32(subnode, "reg", &reg);
+ if (ret) {
+ dev_err(dev,
+ "%s: Reading \"reg\" from \"%s\" failed: %d\n",
+ __func__, subnode->name, ret);
+ return ret;
+ }
+ of_property_read_u32(subnode, "cdns,num-lanes", &num_lanes);
+ of_property_read_u32(subnode, "cdns,phy-type", &phy_type);
+
+ dev_dbg(dev, "%s: Lanes %u-%u have phy-type %u\n", __func__,
+ reg, reg + num_lanes - 1, phy_type);
+
+ for (i = reg; i < reg + num_lanes; i++)
+ wiz->lane_phy_type[i] = phy_type;
+ }
+
+ return 0;
+}
+
static int wiz_probe(struct platform_device *pdev)
{
struct reset_controller_dev *phy_reset_dev;
@@ -794,8 +841,10 @@ static int wiz_probe(struct platform_device *pdev)
}
base = devm_ioremap(dev, res.start, resource_size(&res));
- if (!base)
+ if (!base) {
+ ret = -ENOMEM;
goto err_addr_to_resource;
+ }
regmap = devm_regmap_init_mmio(dev, base, &wiz_regmap_config);
if (IS_ERR(regmap)) {
@@ -812,6 +861,7 @@ static int wiz_probe(struct platform_device *pdev)
if (num_lanes > WIZ_MAX_LANES) {
dev_err(dev, "Cannot support %d lanes\n", num_lanes);
+ ret = -ENODEV;
goto err_addr_to_resource;
}
@@ -844,6 +894,10 @@ static int wiz_probe(struct platform_device *pdev)
}
}
+ ret = wiz_get_lane_phy_types(dev, wiz);
+ if (ret)
+ return ret;
+
wiz->dev = dev;
wiz->regmap = regmap;
wiz->num_lanes = num_lanes;
@@ -897,6 +951,7 @@ static int wiz_probe(struct platform_device *pdev)
serdes_pdev = of_platform_device_create(child_node, NULL, dev);
if (!serdes_pdev) {
dev_WARN(dev, "Unable to create SERDES platform device\n");
+ ret = -ENOMEM;
goto err_pdev_create;
}
wiz->serdes_pdev = serdes_pdev;
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index 3d74629d7423..cb2dd3230fa7 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * omap-usb2.c - USB PHY, talking to musb controller in OMAP.
+ * omap-usb2.c - USB PHY, talking to USB controller on TI SoCs.
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012-2020 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
@@ -23,13 +23,65 @@
#include <linux/regmap.h>
#include <linux/of_platform.h>
-#define USB2PHY_DISCON_BYP_LATCH (1 << 31)
-#define USB2PHY_ANA_CONFIG1 0x4c
+#define USB2PHY_ANA_CONFIG1 0x4c
+#define USB2PHY_DISCON_BYP_LATCH BIT(31)
+/* SoC Specific USB2_OTG register definitions */
#define AM654_USB2_OTG_PD BIT(8)
#define AM654_USB2_VBUS_DET_EN BIT(5)
#define AM654_USB2_VBUSVALID_DET_EN BIT(4)
+#define OMAP_DEV_PHY_PD BIT(0)
+#define OMAP_USB2_PHY_PD BIT(28)
+
+#define AM437X_USB2_PHY_PD BIT(0)
+#define AM437X_USB2_OTG_PD BIT(1)
+#define AM437X_USB2_OTGVDET_EN BIT(19)
+#define AM437X_USB2_OTGSESSEND_EN BIT(20)
+
+/* Driver Flags */
+#define OMAP_USB2_HAS_START_SRP BIT(0)
+#define OMAP_USB2_HAS_SET_VBUS BIT(1)
+#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT BIT(2)
+
+struct omap_usb {
+ struct usb_phy phy;
+ struct phy_companion *comparator;
+ void __iomem *pll_ctrl_base;
+ void __iomem *phy_base;
+ struct device *dev;
+ struct device *control_dev;
+ struct clk *wkupclk;
+ struct clk *optclk;
+ u8 flags;
+ struct regmap *syscon_phy_power; /* ctrl. reg. acces */
+ unsigned int power_reg; /* power reg. index within syscon */
+ u32 mask;
+ u32 power_on;
+ u32 power_off;
+};
+
+#define phy_to_omapusb(x) container_of((x), struct omap_usb, phy)
+
+struct usb_phy_data {
+ const char *label;
+ u8 flags;
+ u32 mask;
+ u32 power_on;
+ u32 power_off;
+};
+
+static inline u32 omap_usb_readl(void __iomem *addr, unsigned int offset)
+{
+ return __raw_readl(addr + offset);
+}
+
+static inline void omap_usb_writel(void __iomem *addr, unsigned int offset,
+ u32 data)
+{
+ __raw_writel(data, addr + offset);
+}
+
/**
* omap_usb2_set_comparator - links the comparator present in the sytem with
* this phy
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 834c59950d1c..8828613c4e0e 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -82,7 +82,7 @@ config PINCTRL_AT91
config PINCTRL_AT91PIO4
bool "AT91 PIO4 pinctrl driver"
depends on OF
- depends on ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
select PINMUX
select GENERIC_PINCONF
select GPIOLIB
@@ -95,6 +95,7 @@ config PINCTRL_AT91PIO4
config PINCTRL_AMD
tristate "AMD GPIO pin control"
depends on HAS_IOMEM
+ depends on ACPI || COMPILE_TEST
select GPIOLIB
select GPIOLIB_IRQCHIP
select PINMUX
@@ -172,15 +173,22 @@ config PINCTRL_GEMINI
select GENERIC_PINCONF
select MFD_SYSCON
+config PINCTRL_MCP23S08_I2C
+ tristate
+ select REGMAP_I2C
+
+config PINCTRL_MCP23S08_SPI
+ tristate
+ select REGMAP_SPI
+
config PINCTRL_MCP23S08
tristate "Microchip MCP23xxx I/O expander"
depends on SPI_MASTER || I2C
- depends on I2C || I2C=n
select GPIOLIB
select GPIOLIB_IRQCHIP
- select REGMAP_I2C if I2C
- select REGMAP_SPI if SPI_MASTER
select GENERIC_PINCONF
+ select PINCTRL_MCP23S08_I2C if I2C
+ select PINCTRL_MCP23S08_SPI if SPI_MASTER
help
SPI/I2C driver for Microchip MCP23S08 / MCP23S17 / MCP23S18 /
MCP23008 / MCP23017 / MCP23018 I/O expanders.
@@ -435,6 +443,7 @@ config PINCTRL_TB10X
config PINCTRL_EQUILIBRIUM
tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
depends on OF && HAS_IOMEM
+ depends on X86 || COMPILE_TEST
select PINMUX
select PINCONF
select GPIOLIB
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 0b36a1cfca8a..1731b2154df9 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -21,6 +21,8 @@ obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_GEMINI) += pinctrl-gemini.o
obj-$(CONFIG_PINCTRL_MAX77620) += pinctrl-max77620.o
+obj-$(CONFIG_PINCTRL_MCP23S08_I2C) += pinctrl-mcp23s08_i2c.o
+obj-$(CONFIG_PINCTRL_MCP23S08_SPI) += pinctrl-mcp23s08_spi.o
obj-$(CONFIG_PINCTRL_MCP23S08) += pinctrl-mcp23s08.o
obj-$(CONFIG_PINCTRL_MESON) += meson/
obj-$(CONFIG_PINCTRL_OXNAS) += pinctrl-oxnas.o
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index f690fc5cd688..71e666178300 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1406,7 +1406,7 @@ static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
pdata->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->reg_base)) {
dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
- return -ENODEV;
+ return PTR_ERR(pdata->reg_base);
}
/* Initialize the dynamic part of pinctrl_desc */
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 06bd2b70af3c..1d21129f7751 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -76,6 +77,7 @@
struct bcm2835_pinctrl {
struct device *dev;
void __iomem *base;
+ int *wake_irq;
/* note: locking assumes each bank will have its own unsigned long */
unsigned long enabled_irq_map[BCM2835_NUM_BANKS];
@@ -435,6 +437,11 @@ static void bcm2835_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(host_chip, desc);
}
+static irqreturn_t bcm2835_gpio_wake_irq_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
static inline void __bcm2835_gpio_irq_config(struct bcm2835_pinctrl *pc,
unsigned reg, unsigned offset, bool enable)
{
@@ -634,6 +641,34 @@ static void bcm2835_gpio_irq_ack(struct irq_data *data)
bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
}
+static int bcm2835_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
+ unsigned gpio = irqd_to_hwirq(data);
+ unsigned int irqgroup;
+ int ret = -EINVAL;
+
+ if (!pc->wake_irq)
+ return ret;
+
+ if (gpio <= 27)
+ irqgroup = 0;
+ else if (gpio >= 28 && gpio <= 45)
+ irqgroup = 1;
+ else if (gpio >= 46 && gpio <= 57)
+ irqgroup = 2;
+ else
+ return ret;
+
+ if (on)
+ ret = enable_irq_wake(pc->wake_irq[irqgroup]);
+ else
+ ret = disable_irq_wake(pc->wake_irq[irqgroup]);
+
+ return ret;
+}
+
static struct irq_chip bcm2835_gpio_irq_chip = {
.name = MODULE_NAME,
.irq_enable = bcm2835_gpio_irq_enable,
@@ -642,6 +677,8 @@ static struct irq_chip bcm2835_gpio_irq_chip = {
.irq_ack = bcm2835_gpio_irq_ack,
.irq_mask = bcm2835_gpio_irq_disable,
.irq_unmask = bcm2835_gpio_irq_enable,
+ .irq_set_wake = bcm2835_gpio_irq_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
};
static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
@@ -1137,6 +1174,10 @@ static const struct of_device_id bcm2835_pinctrl_match[] = {
.compatible = "brcm,bcm2711-gpio",
.data = &bcm2711_plat_data,
},
+ {
+ .compatible = "brcm,bcm7211-gpio",
+ .data = &bcm2711_plat_data,
+ },
{}
};
@@ -1150,6 +1191,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
struct resource iomem;
int err, i;
const struct of_device_id *match;
+ int is_7211 = 0;
BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_pins) != BCM2711_NUM_GPIOS);
BUILD_BUG_ON(ARRAY_SIZE(bcm2835_gpio_groups) != BCM2711_NUM_GPIOS);
@@ -1176,6 +1218,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
pdata = match->data;
+ is_7211 = of_device_is_compatible(np, "brcm,bcm7211-gpio");
pc->gpio_chip = *pdata->gpio_chip;
pc->gpio_chip.parent = dev;
@@ -1210,6 +1253,15 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
+
+ if (is_7211) {
+ pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS,
+ sizeof(*pc->wake_irq),
+ GFP_KERNEL);
+ if (!pc->wake_irq)
+ return -ENOMEM;
+ }
+
/*
* Use the same handler for all groups: this is necessary
* since we use one gpiochip to cover all lines - the
@@ -1217,8 +1269,34 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
* bank that was firing the IRQ and look up the per-group
* and bank data.
*/
- for (i = 0; i < BCM2835_NUM_IRQS; i++)
+ for (i = 0; i < BCM2835_NUM_IRQS; i++) {
+ int len;
+ char *name;
+
girq->parents[i] = irq_of_parse_and_map(np, i);
+ if (!is_7211)
+ continue;
+
+ /* Skip over the all banks interrupts */
+ pc->wake_irq[i] = irq_of_parse_and_map(np, i +
+ BCM2835_NUM_IRQS + 1);
+
+ len = strlen(dev_name(pc->dev)) + 16;
+ name = devm_kzalloc(pc->dev, len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i);
+
+ /* These are optional interrupts */
+ err = devm_request_irq(dev, pc->wake_irq[i],
+ bcm2835_gpio_wake_irq_handler,
+ IRQF_SHARED, name, pc);
+ if (err)
+ dev_warn(dev, "unable to request wake IRQ %d\n",
+ pc->wake_irq[i]);
+ }
+
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index c784663b00ad..4ca44dd69e53 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -165,6 +165,13 @@ config PINCTRL_IMX8QXP
help
Say Y here to enable the imx8qxp pinctrl driver
+config PINCTRL_IMX8DXL
+ bool "IMX8DXL pinctrl driver"
+ depends on IMX_SCU && ARCH_MXC && ARM64
+ select PINCTRL_IMX_SCU
+ help
+ Say Y here to enable the imx8dxl pinctrl driver
+
config PINCTRL_VF610
bool "Freescale Vybrid VF610 pinctrl driver"
depends on SOC_VF610
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 0ebd3af21e4d..c61722565289 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_PINCTRL_IMX8MP) += pinctrl-imx8mp.o
obj-$(CONFIG_PINCTRL_IMX8MQ) += pinctrl-imx8mq.o
obj-$(CONFIG_PINCTRL_IMX8QM) += pinctrl-imx8qm.o
obj-$(CONFIG_PINCTRL_IMX8QXP) += pinctrl-imx8qxp.o
+obj-$(CONFIG_PINCTRL_IMX8DXL) += pinctrl-imx8dxl.o
obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 9f42036c5fbb..cb7e0f08d2cf 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -774,16 +774,6 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
return 0;
}
-/*
- * imx_free_resources() - free memory used by this driver
- * @info: info driver instance
- */
-static void imx_free_resources(struct imx_pinctrl *ipctl)
-{
- if (ipctl->pctl)
- pinctrl_unregister(ipctl->pctl);
-}
-
int imx_pinctrl_probe(struct platform_device *pdev,
const struct imx_pinctrl_soc_info *info)
{
@@ -834,12 +824,13 @@ int imx_pinctrl_probe(struct platform_device *pdev,
return -EINVAL;
}
- ipctl->input_sel_base = of_iomap(np, 0);
+ ipctl->input_sel_base = devm_of_iomap(&pdev->dev, np,
+ 0, NULL);
of_node_put(np);
- if (!ipctl->input_sel_base) {
+ if (IS_ERR(ipctl->input_sel_base)) {
dev_err(&pdev->dev,
"iomuxc input select base address not found\n");
- return -ENOMEM;
+ return PTR_ERR(ipctl->input_sel_base);
}
}
}
@@ -874,23 +865,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
&ipctl->pctl);
if (ret) {
dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
- goto free;
+ return ret;
}
ret = imx_pinctrl_probe_dt(pdev, ipctl);
if (ret) {
dev_err(&pdev->dev, "fail to probe dt properties\n");
- goto free;
+ return ret;
}
dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
return pinctrl_enable(ipctl->pctl);
-
-free:
- imx_free_resources(ipctl);
-
- return ret;
}
static int __maybe_unused imx_pinctrl_suspend(struct device *dev)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index c00d0022d311..08d110078c43 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -60,7 +60,7 @@ struct imx1_pinctrl {
/*
* IMX1 IOMUXC manages the pins based on ports. Each port has 32 pins. IOMUX
- * control register are seperated into function, output configuration, input
+ * control registers are separated into function, output configuration, input
* configuration A, input configuration B, GPIO in use and data direction.
*
* Those controls that are represented by 1 bit have a direct mapping between
@@ -638,7 +638,6 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev,
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (ret) {
- pinctrl_unregister(ipctl->pctl);
dev_err(&pdev->dev, "Failed to populate subdevices\n");
return ret;
}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
new file mode 100644
index 000000000000..7f32e57b7f6a
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019~2020 NXP
+ */
+
+#include <dt-bindings/pinctrl/pads-imx8dxl.h>
+#include <linux/err.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+static const struct pinctrl_pin_desc imx8dxl_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(IMX8DXL_PCIE_CTRL0_PERST_B),
+ IMX_PINCTRL_PIN(IMX8DXL_PCIE_CTRL0_CLKREQ_B),
+ IMX_PINCTRL_PIN(IMX8DXL_PCIE_CTRL0_WAKE_B),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_PCIESEP),
+ IMX_PINCTRL_PIN(IMX8DXL_USB_SS3_TC0),
+ IMX_PINCTRL_PIN(IMX8DXL_USB_SS3_TC1),
+ IMX_PINCTRL_PIN(IMX8DXL_USB_SS3_TC2),
+ IMX_PINCTRL_PIN(IMX8DXL_USB_SS3_TC3),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_3V3_USB3IO),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_CLK),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_CMD),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA0),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA1),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA2),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA3),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA4),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA5),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA6),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_DATA7),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_STROBE),
+ IMX_PINCTRL_PIN(IMX8DXL_EMMC0_RESET_B),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_SD1FIX0),
+ IMX_PINCTRL_PIN(IMX8DXL_USDHC1_RESET_B),
+ IMX_PINCTRL_PIN(IMX8DXL_USDHC1_VSELECT),
+ IMX_PINCTRL_PIN(IMX8DXL_CTL_NAND_RE_P_N),
+ IMX_PINCTRL_PIN(IMX8DXL_USDHC1_WP),
+ IMX_PINCTRL_PIN(IMX8DXL_USDHC1_CD_B),
+ IMX_PINCTRL_PIN(IMX8DXL_CTL_NAND_DQS_P_N),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_VSELSEP),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TXC),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TX_CTL),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TXD0),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TXD1),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TXD2),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_TXD3),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RXC),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RX_CTL),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RXD0),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RXD1),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RXD2),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_RGMII_RXD3),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_REFCLK_125M_25M),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_MDIO),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET0_MDC),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOCT),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TXC),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TXD2),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TX_CTL),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TXD3),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RXC),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RXD3),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RXD2),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RXD1),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TXD0),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_TXD1),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RXD0),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_RGMII_RX_CTL),
+ IMX_PINCTRL_PIN(IMX8DXL_ENET1_REFCLK_125M_25M),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHB),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI3_SCK),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI3_SDO),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI3_SDI),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI3_CS0),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI3_CS1),
+ IMX_PINCTRL_PIN(IMX8DXL_MCLK_IN1),
+ IMX_PINCTRL_PIN(IMX8DXL_MCLK_IN0),
+ IMX_PINCTRL_PIN(IMX8DXL_MCLK_OUT0),
+ IMX_PINCTRL_PIN(IMX8DXL_UART1_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART1_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART1_RTS_B),
+ IMX_PINCTRL_PIN(IMX8DXL_UART1_CTS_B),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHK),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI0_SCK),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI0_SDI),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI0_SDO),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI0_CS1),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI0_CS0),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHT),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN1),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN0),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN3),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN2),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN5),
+ IMX_PINCTRL_PIN(IMX8DXL_ADC_IN4),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN0_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN0_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN1_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN1_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN2_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_FLEXCAN2_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART0_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART0_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART2_TX),
+ IMX_PINCTRL_PIN(IMX8DXL_UART2_RX),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOLH),
+ IMX_PINCTRL_PIN(IMX8DXL_JTAG_TRST_B),
+ IMX_PINCTRL_PIN(IMX8DXL_PMIC_I2C_SCL),
+ IMX_PINCTRL_PIN(IMX8DXL_PMIC_I2C_SDA),
+ IMX_PINCTRL_PIN(IMX8DXL_PMIC_INT_B),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_GPIO0_00),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_GPIO0_01),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_PMIC_STANDBY),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_BOOT_MODE1),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_BOOT_MODE0),
+ IMX_PINCTRL_PIN(IMX8DXL_SCU_BOOT_MODE2),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_OUT1),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_OUT2),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_OUT3),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_OUT4),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_IN0),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_IN1),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_IN2),
+ IMX_PINCTRL_PIN(IMX8DXL_SNVS_TAMPER_IN3),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI1_SCK),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI1_SDO),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI1_SDI),
+ IMX_PINCTRL_PIN(IMX8DXL_SPI1_CS0),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHD),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_DATA1),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_DATA0),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_DATA3),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_DATA2),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_SS0_B),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_DQS),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0A_SCLK),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0A),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_SCLK),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_DQS),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_DATA1),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_DATA0),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_DATA3),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_DATA2),
+ IMX_PINCTRL_PIN(IMX8DXL_QSPI0B_SS0_B),
+ IMX_PINCTRL_PIN(IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0B)
+};
+
+
+static struct imx_pinctrl_soc_info imx8dxl_pinctrl_info = {
+ .pins = imx8dxl_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx8dxl_pinctrl_pads),
+ .flags = IMX_USE_SCU,
+};
+
+static const struct of_device_id imx8dxl_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx8dxl-iomuxc", },
+ { /* sentinel */ }
+};
+
+static int imx8dxl_pinctrl_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = imx_pinctrl_sc_ipc_init(pdev);
+ if (ret)
+ return ret;
+
+ return imx_pinctrl_probe(pdev, &imx8dxl_pinctrl_info);
+}
+
+static struct platform_driver imx8dxl_pinctrl_driver = {
+ .driver = {
+ .name = "fsl,imx8dxl-iomuxc",
+ .of_match_table = of_match_ptr(imx8dxl_pinctrl_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = imx8dxl_pinctrl_probe,
+};
+
+static int __init imx8dxl_pinctrl_init(void)
+{
+ return platform_driver_register(&imx8dxl_pinctrl_driver);
+}
+arch_initcall(imx8dxl_pinctrl_init);
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index ee440ec4c94c..787833e343a4 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -111,6 +111,14 @@ config PINCTRL_ICELAKE
This pinctrl driver provides an interface that allows configuring
of Intel Ice Lake PCH pins and using them as GPIOs.
+config PINCTRL_JASPERLAKE
+ tristate "Intel Jasper Lake PCH pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Jasper Lake PCH pins and using them as GPIOs.
+
config PINCTRL_LEWISBURG
tristate "Intel Lewisburg pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index f60f99cfa7aa..f6f63eb8100f 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_CEDARFORK) += pinctrl-cedarfork.o
obj-$(CONFIG_PINCTRL_DENVERTON) += pinctrl-denverton.o
obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o
+obj-$(CONFIG_PINCTRL_JASPERLAKE) += pinctrl-jasperlake.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
obj-$(CONFIG_PINCTRL_TIGERLAKE) += pinctrl-tigerlake.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 9b821c9cbd16..0ff7c55173da 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1506,8 +1506,7 @@ static int byt_gpio_probe(struct intel_pinctrl *vg)
{
struct platform_device *pdev = to_platform_device(vg->dev);
struct gpio_chip *gc;
- struct resource *irq_rc;
- int ret;
+ int irq, ret;
/* Set up gpio chip */
vg->chip = byt_gpio_chip;
@@ -1527,8 +1526,8 @@ static int byt_gpio_probe(struct intel_pinctrl *vg)
#endif
/* set up interrupts */
- irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq_rc && irq_rc->start) {
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq > 0) {
struct gpio_irq_chip *girq;
vg->irqchip.name = "BYT-GPIO",
@@ -1548,7 +1547,7 @@ static int byt_gpio_probe(struct intel_pinctrl *vg)
sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
- girq->parents[0] = (unsigned int)irq_rc->start;
+ girq->parents[0] = irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
}
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index f51b27bbf9f1..515f57a0d180 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -30,8 +30,6 @@
.gpio_base = (g), \
}
-#define CNL_NO_GPIO -1
-
#define CNL_COMMUNITY(b, s, e, o, g) \
{ \
.barno = (b), \
@@ -377,27 +375,27 @@ static const struct intel_padgroup cnlh_community0_gpps[] = {
};
static const struct intel_padgroup cnlh_community1_gpps[] = {
- CNL_GPP(0, 51, 74, 64), /* GPP_C */
- CNL_GPP(1, 75, 98, 96), /* GPP_D */
- CNL_GPP(2, 99, 106, 128), /* GPP_G */
- CNL_GPP(3, 107, 114, CNL_NO_GPIO), /* AZA */
- CNL_GPP(4, 115, 146, 160), /* vGPIO_0 */
- CNL_GPP(5, 147, 154, CNL_NO_GPIO), /* vGPIO_1 */
+ CNL_GPP(0, 51, 74, 64), /* GPP_C */
+ CNL_GPP(1, 75, 98, 96), /* GPP_D */
+ CNL_GPP(2, 99, 106, 128), /* GPP_G */
+ CNL_GPP(3, 107, 114, INTEL_GPIO_BASE_NOMAP), /* AZA */
+ CNL_GPP(4, 115, 146, 160), /* vGPIO_0 */
+ CNL_GPP(5, 147, 154, INTEL_GPIO_BASE_NOMAP), /* vGPIO_1 */
};
static const struct intel_padgroup cnlh_community3_gpps[] = {
- CNL_GPP(0, 155, 178, 192), /* GPP_K */
- CNL_GPP(1, 179, 202, 224), /* GPP_H */
- CNL_GPP(2, 203, 215, 256), /* GPP_E */
- CNL_GPP(3, 216, 239, 288), /* GPP_F */
- CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */
+ CNL_GPP(0, 155, 178, 192), /* GPP_K */
+ CNL_GPP(1, 179, 202, 224), /* GPP_H */
+ CNL_GPP(2, 203, 215, 256), /* GPP_E */
+ CNL_GPP(3, 216, 239, 288), /* GPP_F */
+ CNL_GPP(4, 240, 248, INTEL_GPIO_BASE_NOMAP), /* SPI */
};
static const struct intel_padgroup cnlh_community4_gpps[] = {
- CNL_GPP(0, 249, 259, CNL_NO_GPIO), /* CPU */
- CNL_GPP(1, 260, 268, CNL_NO_GPIO), /* JTAG */
- CNL_GPP(2, 269, 286, 320), /* GPP_I */
- CNL_GPP(3, 287, 298, 352), /* GPP_J */
+ CNL_GPP(0, 249, 259, INTEL_GPIO_BASE_NOMAP), /* CPU */
+ CNL_GPP(1, 260, 268, INTEL_GPIO_BASE_NOMAP), /* JTAG */
+ CNL_GPP(2, 269, 286, 320), /* GPP_I */
+ CNL_GPP(3, 287, 298, 352), /* GPP_J */
};
static const unsigned int cnlh_spi0_pins[] = { 40, 41, 42, 43 };
@@ -790,25 +788,25 @@ static const struct intel_function cnllp_functions[] = {
};
static const struct intel_padgroup cnllp_community0_gpps[] = {
- CNL_GPP(0, 0, 24, 0), /* GPP_A */
- CNL_GPP(1, 25, 50, 32), /* GPP_B */
- CNL_GPP(2, 51, 58, 64), /* GPP_G */
- CNL_GPP(3, 59, 67, CNL_NO_GPIO), /* SPI */
+ CNL_GPP(0, 0, 24, 0), /* GPP_A */
+ CNL_GPP(1, 25, 50, 32), /* GPP_B */
+ CNL_GPP(2, 51, 58, 64), /* GPP_G */
+ CNL_GPP(3, 59, 67, INTEL_GPIO_BASE_NOMAP), /* SPI */
};
static const struct intel_padgroup cnllp_community1_gpps[] = {
- CNL_GPP(0, 68, 92, 96), /* GPP_D */
- CNL_GPP(1, 93, 116, 128), /* GPP_F */
- CNL_GPP(2, 117, 140, 160), /* GPP_H */
- CNL_GPP(3, 141, 172, 192), /* vGPIO */
- CNL_GPP(4, 173, 180, 224), /* vGPIO */
+ CNL_GPP(0, 68, 92, 96), /* GPP_D */
+ CNL_GPP(1, 93, 116, 128), /* GPP_F */
+ CNL_GPP(2, 117, 140, 160), /* GPP_H */
+ CNL_GPP(3, 141, 172, 192), /* vGPIO */
+ CNL_GPP(4, 173, 180, 224), /* vGPIO */
};
static const struct intel_padgroup cnllp_community4_gpps[] = {
- CNL_GPP(0, 181, 204, 256), /* GPP_C */
- CNL_GPP(1, 205, 228, 288), /* GPP_E */
- CNL_GPP(2, 229, 237, CNL_NO_GPIO), /* JTAG */
- CNL_GPP(3, 238, 243, CNL_NO_GPIO), /* HVCMOS */
+ CNL_GPP(0, 181, 204, 256), /* GPP_C */
+ CNL_GPP(1, 205, 228, 288), /* GPP_E */
+ CNL_GPP(2, 229, 237, INTEL_GPIO_BASE_NOMAP), /* JTAG */
+ CNL_GPP(3, 238, 243, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
};
static const struct intel_community cnllp_communities[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 1093a6105d40..8e3953a223d0 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -35,18 +35,18 @@
#define CHV_PADCTRL0 0x000
#define CHV_PADCTRL0_INTSEL_SHIFT 28
-#define CHV_PADCTRL0_INTSEL_MASK (0xf << CHV_PADCTRL0_INTSEL_SHIFT)
+#define CHV_PADCTRL0_INTSEL_MASK GENMASK(31, 28)
#define CHV_PADCTRL0_TERM_UP BIT(23)
#define CHV_PADCTRL0_TERM_SHIFT 20
-#define CHV_PADCTRL0_TERM_MASK (7 << CHV_PADCTRL0_TERM_SHIFT)
+#define CHV_PADCTRL0_TERM_MASK GENMASK(22, 20)
#define CHV_PADCTRL0_TERM_20K 1
#define CHV_PADCTRL0_TERM_5K 2
#define CHV_PADCTRL0_TERM_1K 4
#define CHV_PADCTRL0_PMODE_SHIFT 16
-#define CHV_PADCTRL0_PMODE_MASK (0xf << CHV_PADCTRL0_PMODE_SHIFT)
+#define CHV_PADCTRL0_PMODE_MASK GENMASK(19, 16)
#define CHV_PADCTRL0_GPIOEN BIT(15)
#define CHV_PADCTRL0_GPIOCFG_SHIFT 8
-#define CHV_PADCTRL0_GPIOCFG_MASK (7 << CHV_PADCTRL0_GPIOCFG_SHIFT)
+#define CHV_PADCTRL0_GPIOCFG_MASK GENMASK(10, 8)
#define CHV_PADCTRL0_GPIOCFG_GPIO 0
#define CHV_PADCTRL0_GPIOCFG_GPO 1
#define CHV_PADCTRL0_GPIOCFG_GPI 2
@@ -57,58 +57,17 @@
#define CHV_PADCTRL1 0x004
#define CHV_PADCTRL1_CFGLOCK BIT(31)
#define CHV_PADCTRL1_INVRXTX_SHIFT 4
-#define CHV_PADCTRL1_INVRXTX_MASK (0xf << CHV_PADCTRL1_INVRXTX_SHIFT)
-#define CHV_PADCTRL1_INVRXTX_TXENABLE (2 << CHV_PADCTRL1_INVRXTX_SHIFT)
+#define CHV_PADCTRL1_INVRXTX_MASK GENMASK(7, 4)
+#define CHV_PADCTRL1_INVRXTX_RXDATA BIT(6)
+#define CHV_PADCTRL1_INVRXTX_TXENABLE BIT(5)
#define CHV_PADCTRL1_ODEN BIT(3)
-#define CHV_PADCTRL1_INVRXTX_RXDATA (4 << CHV_PADCTRL1_INVRXTX_SHIFT)
-#define CHV_PADCTRL1_INTWAKECFG_MASK 7
+#define CHV_PADCTRL1_INTWAKECFG_MASK GENMASK(2, 0)
#define CHV_PADCTRL1_INTWAKECFG_FALLING 1
#define CHV_PADCTRL1_INTWAKECFG_RISING 2
#define CHV_PADCTRL1_INTWAKECFG_BOTH 3
#define CHV_PADCTRL1_INTWAKECFG_LEVEL 4
/**
- * struct chv_alternate_function - A per group or per pin alternate function
- * @pin: Pin number (only used in per pin configs)
- * @mode: Mode the pin should be set in
- * @invert_oe: Invert OE for this pin
- */
-struct chv_alternate_function {
- unsigned int pin;
- u8 mode;
- bool invert_oe;
-};
-
-/**
- * struct chv_pincgroup - describes a CHV pin group
- * @name: Name of the group
- * @pins: An array of pins in this group
- * @npins: Number of pins in this group
- * @altfunc: Alternate function applied to all pins in this group
- * @overrides: Alternate function override per pin or %NULL if not used
- * @noverrides: Number of per pin alternate function overrides if
- * @overrides != NULL.
- */
-struct chv_pingroup {
- const char *name;
- const unsigned int *pins;
- size_t npins;
- struct chv_alternate_function altfunc;
- const struct chv_alternate_function *overrides;
- size_t noverrides;
-};
-
-/**
- * struct chv_gpio_pinrange - A range of pins that can be used as GPIOs
- * @base: Start pin number
- * @npins: Number of pins in this range
- */
-struct chv_gpio_pinrange {
- unsigned int base;
- unsigned int npins;
-};
-
-/**
* struct chv_community - A community specific configuration
* @uid: ACPI _UID used to match the community
* @pins: All pins in this community
@@ -117,8 +76,8 @@ struct chv_gpio_pinrange {
* @ngroups: Number of groups
* @functions: All functions in this community
* @nfunctions: Number of functions
- * @gpio_ranges: An array of GPIO ranges in this community
- * @ngpio_ranges: Number of GPIO ranges
+ * @gpps: Pad groups
+ * @ngpps: Number of pad groups in this community
* @nirqs: Total number of IRQs this community can generate
* @acpi_space_id: An address space ID for ACPI OpRegion handler
*/
@@ -126,12 +85,12 @@ struct chv_community {
const char *uid;
const struct pinctrl_pin_desc *pins;
size_t npins;
- const struct chv_pingroup *groups;
+ const struct intel_pingroup *groups;
size_t ngroups;
const struct intel_function *functions;
size_t nfunctions;
- const struct chv_gpio_pinrange *gpio_ranges;
- size_t ngpio_ranges;
+ const struct intel_padgroup *gpps;
+ size_t ngpps;
size_t nirqs;
acpi_adr_space_type acpi_space_id;
};
@@ -173,37 +132,14 @@ struct chv_pinctrl {
struct chv_pin_context *saved_pin_context;
};
-#define ALTERNATE_FUNCTION(p, m, i) \
- { \
- .pin = (p), \
- .mode = (m), \
- .invert_oe = (i), \
- }
+#define PINMODE_INVERT_OE BIT(15)
-#define PIN_GROUP_WITH_ALT(n, p, m, i) \
- { \
- .name = (n), \
- .pins = (p), \
- .npins = ARRAY_SIZE((p)), \
- .altfunc.mode = (m), \
- .altfunc.invert_oe = (i), \
- }
+#define PINMODE(m, i) ((m) | ((i) * PINMODE_INVERT_OE))
-#define PIN_GROUP_WITH_OVERRIDE(n, p, m, i, o) \
- { \
- .name = (n), \
- .pins = (p), \
- .npins = ARRAY_SIZE((p)), \
- .altfunc.mode = (m), \
- .altfunc.invert_oe = (i), \
- .overrides = (o), \
- .noverrides = ARRAY_SIZE((o)), \
- }
-
-#define GPIO_PINRANGE(start, end) \
+#define CHV_GPP(start, end) \
{ \
.base = (start), \
- .npins = (end) - (start) + 1, \
+ .size = (end) - (start) + 1, \
}
static const struct pinctrl_pin_desc southwest_pins[] = {
@@ -288,40 +224,37 @@ static const unsigned southwest_i2c6_pins[] = { 47, 51 };
static const unsigned southwest_i2c_nfc_pins[] = { 49, 52 };
static const unsigned southwest_spi3_pins[] = { 76, 79, 80, 81, 82 };
-/* LPE I2S TXD pins need to have invert_oe set */
-static const struct chv_alternate_function southwest_lpe_altfuncs[] = {
- ALTERNATE_FUNCTION(30, 1, true),
- ALTERNATE_FUNCTION(34, 1, true),
- ALTERNATE_FUNCTION(97, 1, true),
+/* Some of LPE I2S TXD pins need to have OE inversion set */
+static const unsigned int southwest_lpe_altfuncs[] = {
+ PINMODE(1, 1), PINMODE(1, 0), PINMODE(1, 0), PINMODE(1, 0), /* 30, 31, 32, 33 */
+ PINMODE(1, 1), PINMODE(1, 0), PINMODE(1, 0), PINMODE(1, 0), /* 34, 35, 36, 37 */
+ PINMODE(1, 0), PINMODE(1, 0), PINMODE(1, 0), PINMODE(1, 1), /* 92, 94, 96, 97 */
};
/*
* Two spi3 chipselects are available in different mode than the main spi3
- * functionality, which is using mode 1.
+ * functionality, which is using mode 2.
*/
-static const struct chv_alternate_function southwest_spi3_altfuncs[] = {
- ALTERNATE_FUNCTION(76, 3, false),
- ALTERNATE_FUNCTION(80, 3, false),
+static const unsigned int southwest_spi3_altfuncs[] = {
+ PINMODE(3, 0), PINMODE(2, 0), PINMODE(3, 0), PINMODE(2, 0), /* 76, 79, 80, 81 */
+ PINMODE(2, 0), /* 82 */
};
-static const struct chv_pingroup southwest_groups[] = {
- PIN_GROUP_WITH_ALT("uart0_grp", southwest_uart0_pins, 2, false),
- PIN_GROUP_WITH_ALT("uart1_grp", southwest_uart1_pins, 1, false),
- PIN_GROUP_WITH_ALT("uart2_grp", southwest_uart2_pins, 1, false),
- PIN_GROUP_WITH_ALT("hda_grp", southwest_hda_pins, 2, false),
- PIN_GROUP_WITH_ALT("i2c0_grp", southwest_i2c0_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c1_grp", southwest_i2c1_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c2_grp", southwest_i2c2_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c3_grp", southwest_i2c3_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c4_grp", southwest_i2c4_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c5_grp", southwest_i2c5_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c6_grp", southwest_i2c6_pins, 1, true),
- PIN_GROUP_WITH_ALT("i2c_nfc_grp", southwest_i2c_nfc_pins, 2, true),
-
- PIN_GROUP_WITH_OVERRIDE("lpe_grp", southwest_lpe_pins, 1, false,
- southwest_lpe_altfuncs),
- PIN_GROUP_WITH_OVERRIDE("spi3_grp", southwest_spi3_pins, 2, false,
- southwest_spi3_altfuncs),
+static const struct intel_pingroup southwest_groups[] = {
+ PIN_GROUP("uart0_grp", southwest_uart0_pins, PINMODE(2, 0)),
+ PIN_GROUP("uart1_grp", southwest_uart1_pins, PINMODE(1, 0)),
+ PIN_GROUP("uart2_grp", southwest_uart2_pins, PINMODE(1, 0)),
+ PIN_GROUP("hda_grp", southwest_hda_pins, PINMODE(2, 0)),
+ PIN_GROUP("i2c0_grp", southwest_i2c0_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c1_grp", southwest_i2c1_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c2_grp", southwest_i2c2_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c3_grp", southwest_i2c3_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c4_grp", southwest_i2c4_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c5_grp", southwest_i2c5_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c6_grp", southwest_i2c6_pins, PINMODE(1, 1)),
+ PIN_GROUP("i2c_nfc_grp", southwest_i2c_nfc_pins, PINMODE(2, 1)),
+ PIN_GROUP("lpe_grp", southwest_lpe_pins, southwest_lpe_altfuncs),
+ PIN_GROUP("spi3_grp", southwest_spi3_pins, southwest_spi3_altfuncs),
};
static const char * const southwest_uart0_groups[] = { "uart0_grp" };
@@ -360,14 +293,14 @@ static const struct intel_function southwest_functions[] = {
FUNCTION("spi3", southwest_spi3_groups),
};
-static const struct chv_gpio_pinrange southwest_gpio_ranges[] = {
- GPIO_PINRANGE(0, 7),
- GPIO_PINRANGE(15, 22),
- GPIO_PINRANGE(30, 37),
- GPIO_PINRANGE(45, 52),
- GPIO_PINRANGE(60, 67),
- GPIO_PINRANGE(75, 82),
- GPIO_PINRANGE(90, 97),
+static const struct intel_padgroup southwest_gpps[] = {
+ CHV_GPP(0, 7),
+ CHV_GPP(15, 22),
+ CHV_GPP(30, 37),
+ CHV_GPP(45, 52),
+ CHV_GPP(60, 67),
+ CHV_GPP(75, 82),
+ CHV_GPP(90, 97),
};
static const struct chv_community southwest_community = {
@@ -378,8 +311,8 @@ static const struct chv_community southwest_community = {
.ngroups = ARRAY_SIZE(southwest_groups),
.functions = southwest_functions,
.nfunctions = ARRAY_SIZE(southwest_functions),
- .gpio_ranges = southwest_gpio_ranges,
- .ngpio_ranges = ARRAY_SIZE(southwest_gpio_ranges),
+ .gpps = southwest_gpps,
+ .ngpps = ARRAY_SIZE(southwest_gpps),
/*
* Southwest community can generate GPIO interrupts only for the
* first 8 interrupts. The upper half (8-15) can only be used to
@@ -455,20 +388,20 @@ static const struct pinctrl_pin_desc north_pins[] = {
PINCTRL_PIN(72, "PANEL0_VDDEN"),
};
-static const struct chv_gpio_pinrange north_gpio_ranges[] = {
- GPIO_PINRANGE(0, 8),
- GPIO_PINRANGE(15, 27),
- GPIO_PINRANGE(30, 41),
- GPIO_PINRANGE(45, 56),
- GPIO_PINRANGE(60, 72),
+static const struct intel_padgroup north_gpps[] = {
+ CHV_GPP(0, 8),
+ CHV_GPP(15, 27),
+ CHV_GPP(30, 41),
+ CHV_GPP(45, 56),
+ CHV_GPP(60, 72),
};
static const struct chv_community north_community = {
.uid = "2",
.pins = north_pins,
.npins = ARRAY_SIZE(north_pins),
- .gpio_ranges = north_gpio_ranges,
- .ngpio_ranges = ARRAY_SIZE(north_gpio_ranges),
+ .gpps = north_gpps,
+ .ngpps = ARRAY_SIZE(north_gpps),
/*
* North community can generate GPIO interrupts only for the first
* 8 interrupts. The upper half (8-15) can only be used to trigger
@@ -506,17 +439,17 @@ static const struct pinctrl_pin_desc east_pins[] = {
PINCTRL_PIN(26, "MF_ISH_I2C1_SDA"),
};
-static const struct chv_gpio_pinrange east_gpio_ranges[] = {
- GPIO_PINRANGE(0, 11),
- GPIO_PINRANGE(15, 26),
+static const struct intel_padgroup east_gpps[] = {
+ CHV_GPP(0, 11),
+ CHV_GPP(15, 26),
};
static const struct chv_community east_community = {
.uid = "3",
.pins = east_pins,
.npins = ARRAY_SIZE(east_pins),
- .gpio_ranges = east_gpio_ranges,
- .ngpio_ranges = ARRAY_SIZE(east_gpio_ranges),
+ .gpps = east_gpps,
+ .ngpps = ARRAY_SIZE(east_gpps),
.nirqs = 16,
.acpi_space_id = 0x93,
};
@@ -596,14 +529,14 @@ static const unsigned southeast_sdmmc3_pins[] = {
static const unsigned southeast_spi1_pins[] = { 60, 61, 62, 64, 66 };
static const unsigned southeast_spi2_pins[] = { 2, 3, 4, 6, 7 };
-static const struct chv_pingroup southeast_groups[] = {
- PIN_GROUP_WITH_ALT("pwm0_grp", southeast_pwm0_pins, 1, false),
- PIN_GROUP_WITH_ALT("pwm1_grp", southeast_pwm1_pins, 1, false),
- PIN_GROUP_WITH_ALT("sdmmc1_grp", southeast_sdmmc1_pins, 1, false),
- PIN_GROUP_WITH_ALT("sdmmc2_grp", southeast_sdmmc2_pins, 1, false),
- PIN_GROUP_WITH_ALT("sdmmc3_grp", southeast_sdmmc3_pins, 1, false),
- PIN_GROUP_WITH_ALT("spi1_grp", southeast_spi1_pins, 1, false),
- PIN_GROUP_WITH_ALT("spi2_grp", southeast_spi2_pins, 4, false),
+static const struct intel_pingroup southeast_groups[] = {
+ PIN_GROUP("pwm0_grp", southeast_pwm0_pins, PINMODE(1, 0)),
+ PIN_GROUP("pwm1_grp", southeast_pwm1_pins, PINMODE(1, 0)),
+ PIN_GROUP("sdmmc1_grp", southeast_sdmmc1_pins, PINMODE(1, 0)),
+ PIN_GROUP("sdmmc2_grp", southeast_sdmmc2_pins, PINMODE(1, 0)),
+ PIN_GROUP("sdmmc3_grp", southeast_sdmmc3_pins, PINMODE(1, 0)),
+ PIN_GROUP("spi1_grp", southeast_spi1_pins, PINMODE(1, 0)),
+ PIN_GROUP("spi2_grp", southeast_spi2_pins, PINMODE(4, 0)),
};
static const char * const southeast_pwm0_groups[] = { "pwm0_grp" };
@@ -624,13 +557,13 @@ static const struct intel_function southeast_functions[] = {
FUNCTION("spi2", southeast_spi2_groups),
};
-static const struct chv_gpio_pinrange southeast_gpio_ranges[] = {
- GPIO_PINRANGE(0, 7),
- GPIO_PINRANGE(15, 26),
- GPIO_PINRANGE(30, 35),
- GPIO_PINRANGE(45, 52),
- GPIO_PINRANGE(60, 69),
- GPIO_PINRANGE(75, 85),
+static const struct intel_padgroup southeast_gpps[] = {
+ CHV_GPP(0, 7),
+ CHV_GPP(15, 26),
+ CHV_GPP(30, 35),
+ CHV_GPP(45, 52),
+ CHV_GPP(60, 69),
+ CHV_GPP(75, 85),
};
static const struct chv_community southeast_community = {
@@ -641,8 +574,8 @@ static const struct chv_community southeast_community = {
.ngroups = ARRAY_SIZE(southeast_groups),
.functions = southeast_functions,
.nfunctions = ARRAY_SIZE(southeast_functions),
- .gpio_ranges = southeast_gpio_ranges,
- .ngpio_ranges = ARRAY_SIZE(southeast_gpio_ranges),
+ .gpps = southeast_gpps,
+ .ngpps = ARRAY_SIZE(southeast_gpps),
.nirqs = 16,
.acpi_space_id = 0x94,
};
@@ -789,7 +722,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int function, unsigned int group)
{
struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- const struct chv_pingroup *grp;
+ const struct intel_pingroup *grp;
unsigned long flags;
int i;
@@ -808,22 +741,21 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
}
for (i = 0; i < grp->npins; i++) {
- const struct chv_alternate_function *altfunc = &grp->altfunc;
int pin = grp->pins[i];
void __iomem *reg;
+ unsigned int mode;
+ bool invert_oe;
u32 value;
/* Check if there is pin-specific config */
- if (grp->overrides) {
- int j;
-
- for (j = 0; j < grp->noverrides; j++) {
- if (grp->overrides[j].pin == pin) {
- altfunc = &grp->overrides[j];
- break;
- }
- }
- }
+ if (grp->modes)
+ mode = grp->modes[i];
+ else
+ mode = grp->mode;
+
+ /* Extract OE inversion */
+ invert_oe = mode & PINMODE_INVERT_OE;
+ mode &= ~PINMODE_INVERT_OE;
reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
value = readl(reg);
@@ -831,18 +763,18 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
value &= ~CHV_PADCTRL0_GPIOEN;
/* Set to desired mode */
value &= ~CHV_PADCTRL0_PMODE_MASK;
- value |= altfunc->mode << CHV_PADCTRL0_PMODE_SHIFT;
+ value |= mode << CHV_PADCTRL0_PMODE_SHIFT;
chv_writel(value, reg);
/* Update for invert_oe */
reg = chv_padreg(pctrl, pin, CHV_PADCTRL1);
value = readl(reg) & ~CHV_PADCTRL1_INVRXTX_MASK;
- if (altfunc->invert_oe)
+ if (invert_oe)
value |= CHV_PADCTRL1_INVRXTX_TXENABLE;
chv_writel(value, reg);
dev_dbg(pctrl->dev, "configured pin %u mode %u OE %sinverted\n",
- pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
+ pin, mode, invert_oe ? "" : "not ");
}
raw_spin_unlock_irqrestore(&chv_lock, flags);
@@ -1594,14 +1526,14 @@ static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
{
struct chv_pinctrl *pctrl = gpiochip_get_data(chip);
const struct chv_community *community = pctrl->community;
- const struct chv_gpio_pinrange *range;
+ const struct intel_padgroup *gpp;
int ret, i;
- for (i = 0; i < community->ngpio_ranges; i++) {
- range = &community->gpio_ranges[i];
+ for (i = 0; i < community->ngpps; i++) {
+ gpp = &community->gpps[i];
ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev),
- range->base, range->base,
- range->npins);
+ gpp->base, gpp->base,
+ gpp->size);
if (ret) {
dev_err(pctrl->dev, "failed to add GPIO pin range\n");
return ret;
@@ -1613,7 +1545,7 @@ static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
{
- const struct chv_gpio_pinrange *range;
+ const struct intel_padgroup *gpp;
struct gpio_chip *chip = &pctrl->chip;
bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
const struct chv_community *community = pctrl->community;
@@ -1661,12 +1593,12 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
}
if (!need_valid_mask) {
- for (i = 0; i < community->ngpio_ranges; i++) {
- range = &community->gpio_ranges[i];
+ for (i = 0; i < community->ngpps; i++) {
+ gpp = &community->gpps[i];
irq_domain_associate_many(chip->irq.domain, irq_base,
- range->base, range->npins);
- irq_base += range->npins;
+ gpp->base, gpp->size);
+ irq_base += gpp->size;
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-icelake.c b/drivers/pinctrl/intel/pinctrl-icelake.c
index 6489e9bbb61f..429b5a83acf0 100644
--- a/drivers/pinctrl/intel/pinctrl-icelake.c
+++ b/drivers/pinctrl/intel/pinctrl-icelake.c
@@ -29,8 +29,6 @@
.gpio_base = (g), \
}
-#define ICL_NO_GPIO -1
-
#define ICL_COMMUNITY(b, s, e, g) \
{ \
.barno = (b), \
@@ -305,29 +303,29 @@ static const struct pinctrl_pin_desc icllp_pins[] = {
};
static const struct intel_padgroup icllp_community0_gpps[] = {
- ICL_GPP(0, 0, 7, 0), /* GPP_G */
- ICL_GPP(1, 8, 33, 32), /* GPP_B */
- ICL_GPP(2, 34, 58, 64), /* GPP_A */
+ ICL_GPP(0, 0, 7, 0), /* GPP_G */
+ ICL_GPP(1, 8, 33, 32), /* GPP_B */
+ ICL_GPP(2, 34, 58, 64), /* GPP_A */
};
static const struct intel_padgroup icllp_community1_gpps[] = {
- ICL_GPP(0, 59, 82, 96), /* GPP_H */
- ICL_GPP(1, 83, 103, 128), /* GPP_D */
- ICL_GPP(2, 104, 123, 160), /* GPP_F */
- ICL_GPP(3, 124, 152, 192), /* vGPIO */
+ ICL_GPP(0, 59, 82, 96), /* GPP_H */
+ ICL_GPP(1, 83, 103, 128), /* GPP_D */
+ ICL_GPP(2, 104, 123, 160), /* GPP_F */
+ ICL_GPP(3, 124, 152, 192), /* vGPIO */
};
static const struct intel_padgroup icllp_community4_gpps[] = {
- ICL_GPP(0, 153, 176, 224), /* GPP_C */
- ICL_GPP(1, 177, 182, ICL_NO_GPIO), /* HVCMOS */
- ICL_GPP(2, 183, 206, 256), /* GPP_E */
- ICL_GPP(3, 207, 215, ICL_NO_GPIO), /* JTAG */
+ ICL_GPP(0, 153, 176, 224), /* GPP_C */
+ ICL_GPP(1, 177, 182, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
+ ICL_GPP(2, 183, 206, 256), /* GPP_E */
+ ICL_GPP(3, 207, 215, INTEL_GPIO_BASE_NOMAP), /* JTAG */
};
static const struct intel_padgroup icllp_community5_gpps[] = {
- ICL_GPP(0, 216, 223, 288), /* GPP_R */
- ICL_GPP(1, 224, 231, 320), /* GPP_S */
- ICL_GPP(2, 232, 240, ICL_NO_GPIO), /* SPI */
+ ICL_GPP(0, 216, 223, 288), /* GPP_R */
+ ICL_GPP(1, 224, 231, 320), /* GPP_S */
+ ICL_GPP(2, 232, 240, INTEL_GPIO_BASE_NOMAP), /* SPI */
};
static const struct intel_community icllp_communities[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 74fdfd2b9ff5..6a274e20d926 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -798,7 +798,7 @@ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset,
for (j = 0; j < comm->ngpps; j++) {
const struct intel_padgroup *pgrp = &comm->gpps[j];
- if (pgrp->gpio_base < 0)
+ if (pgrp->gpio_base == INTEL_GPIO_BASE_NOMAP)
continue;
if (offset >= pgrp->gpio_base &&
@@ -1138,7 +1138,7 @@ static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
for (i = 0; i < community->ngpps; i++) {
const struct intel_padgroup *gpp = &community->gpps[i];
- if (gpp->gpio_base < 0)
+ if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP)
continue;
ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev),
@@ -1180,7 +1180,7 @@ static unsigned int intel_gpio_ngpio(const struct intel_pinctrl *pctrl)
for (j = 0; j < community->ngpps; j++) {
const struct intel_padgroup *gpp = &community->gpps[j];
- if (gpp->gpio_base < 0)
+ if (gpp->gpio_base == INTEL_GPIO_BASE_NOMAP)
continue;
if (gpp->gpio_base + gpp->size > ngpio)
@@ -1276,8 +1276,18 @@ static int intel_pinctrl_add_padgroups(struct intel_pinctrl *pctrl,
if (gpps[i].size > 32)
return -EINVAL;
- if (!gpps[i].gpio_base)
- gpps[i].gpio_base = gpps[i].base;
+ /* Special treatment for GPIO base */
+ switch (gpps[i].gpio_base) {
+ case INTEL_GPIO_BASE_MATCH:
+ gpps[i].gpio_base = gpps[i].base;
+ break;
+ case INTEL_GPIO_BASE_ZERO:
+ gpps[i].gpio_base = 0;
+ break;
+ case INTEL_GPIO_BASE_NOMAP:
+ default:
+ break;
+ }
gpps[i].padown_num = padown_num;
@@ -1596,7 +1606,7 @@ static void intel_restore_hostown(struct intel_pinctrl *pctrl, unsigned int c,
struct device *dev = pctrl->dev;
u32 requested;
- if (padgrp->gpio_base < 0)
+ if (padgrp->gpio_base == INTEL_GPIO_BASE_NOMAP)
return;
requested = intel_gpio_is_requested(&pctrl->chip, padgrp->gpio_base, padgrp->size);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index c6f066f6d3fb..cc78c483518f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -53,8 +53,7 @@ struct intel_function {
* @reg_num: GPI_IS register number
* @base: Starting pin of this group
* @size: Size of this group (maximum is 32).
- * @gpio_base: Starting GPIO base of this group (%0 if matches with @base,
- * and %-1 if no GPIO mapping should be created)
+ * @gpio_base: Starting GPIO base of this group
* @padown_num: PAD_OWN register number (assigned by the core driver)
*
* If pad groups of a community are not the same size, use this structure
@@ -69,6 +68,19 @@ struct intel_padgroup {
};
/**
+ * enum - Special treatment for GPIO base in pad group
+ *
+ * @INTEL_GPIO_BASE_ZERO: force GPIO base to be 0
+ * @INTEL_GPIO_BASE_NOMAP: no GPIO mapping should be created
+ * @INTEL_GPIO_BASE_MATCH: matches with starting pin number
+ */
+enum {
+ INTEL_GPIO_BASE_ZERO = -2,
+ INTEL_GPIO_BASE_NOMAP = -1,
+ INTEL_GPIO_BASE_MATCH = 0,
+};
+
+/**
* struct intel_community - Intel pin community description
* @barno: MMIO BAR number where registers for this community reside
* @padown_offset: Register offset of PAD_OWN register from @regs. If %0
@@ -82,20 +94,20 @@ struct intel_padgroup {
* @ie_offset: Register offset of GPI_IE from @regs.
* @features: Additional features supported by the hardware
* @pin_base: Starting pin of pins in this community
+ * @npins: Number of pins in this community
* @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
- * HOSTSW_OWN, GPI_IS, GPI_IE, etc. Used when @gpps is %NULL.
+ * HOSTSW_OWN, GPI_IS, GPI_IE. Used when @gpps is %NULL.
* @gpp_num_padown_regs: Number of pad registers each pad group consumes at
* minimum. Use %0 if the number of registers can be
* determined by the size of the group.
- * @npins: Number of pins in this community
* @gpps: Pad groups if the controller has variable size pad groups
* @ngpps: Number of pad groups in this community
* @pad_map: Optional non-linear mapping of the pads
* @regs: Community specific common registers (reserved for core driver)
* @pad_regs: Community specific pad registers (reserved for core driver)
*
- * Most Intel GPIO host controllers this driver supports each pad group is
- * of equal size (except the last one). In that case the driver can just
+ * In some of Intel GPIO host controllers this driver supports each pad group
+ * is of equal size (except the last one). In that case the driver can just
* fill in @gpp_size field and let the core driver to handle the rest. If
* the controller has pad groups of variable size the client driver can
* pass custom @gpps and @ngpps instead.
@@ -109,12 +121,13 @@ struct intel_community {
unsigned int ie_offset;
unsigned int features;
unsigned int pin_base;
+ size_t npins;
unsigned int gpp_size;
unsigned int gpp_num_padown_regs;
- size_t npins;
const struct intel_padgroup *gpps;
size_t ngpps;
const unsigned int *pad_map;
+
/* Reserved for the core driver */
void __iomem *regs;
void __iomem *pad_regs;
diff --git a/drivers/pinctrl/intel/pinctrl-jasperlake.c b/drivers/pinctrl/intel/pinctrl-jasperlake.c
new file mode 100644
index 000000000000..9bd0e8e6310c
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-jasperlake.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Jasper Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define JSL_PAD_OWN 0x020
+#define JSL_PADCFGLOCK 0x080
+#define JSL_HOSTSW_OWN 0x0b0
+#define JSL_GPI_IS 0x100
+#define JSL_GPI_IE 0x120
+
+#define JSL_GPP(r, s, e, g) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ .gpio_base = (g), \
+ }
+
+#define JSL_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = JSL_PAD_OWN, \
+ .padcfglock_offset = JSL_PADCFGLOCK, \
+ .hostown_offset = JSL_HOSTSW_OWN, \
+ .is_offset = JSL_GPI_IS, \
+ .ie_offset = JSL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Jasper Lake */
+static const struct pinctrl_pin_desc jsl_pins[] = {
+ /* GPP_F */
+ PINCTRL_PIN(0, "CNV_BRI_DT_UART0_RTSB"),
+ PINCTRL_PIN(1, "CNV_BRI_RSP_UART0_RXD"),
+ PINCTRL_PIN(2, "EMMC_HIP_MON"),
+ PINCTRL_PIN(3, "CNV_RGI_RSP_UART0_CTSB"),
+ PINCTRL_PIN(4, "CNV_RF_RESET_B"),
+ PINCTRL_PIN(5, "MODEM_CLKREQ"),
+ PINCTRL_PIN(6, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(7, "EMMC_CMD"),
+ PINCTRL_PIN(8, "EMMC_DATA0"),
+ PINCTRL_PIN(9, "EMMC_DATA1"),
+ PINCTRL_PIN(10, "EMMC_DATA2"),
+ PINCTRL_PIN(11, "EMMC_DATA3"),
+ PINCTRL_PIN(12, "EMMC_DATA4"),
+ PINCTRL_PIN(13, "EMMC_DATA5"),
+ PINCTRL_PIN(14, "EMMC_DATA6"),
+ PINCTRL_PIN(15, "EMMC_DATA7"),
+ PINCTRL_PIN(16, "EMMC_RCLK"),
+ PINCTRL_PIN(17, "EMMC_CLK"),
+ PINCTRL_PIN(18, "EMMC_RESETB"),
+ PINCTRL_PIN(19, "A4WP_PRESENT"),
+ /* GPP_B */
+ PINCTRL_PIN(20, "CORE_VID_0"),
+ PINCTRL_PIN(21, "CORE_VID_1"),
+ PINCTRL_PIN(22, "VRALERTB"),
+ PINCTRL_PIN(23, "CPU_GP_2"),
+ PINCTRL_PIN(24, "CPU_GP_3"),
+ PINCTRL_PIN(25, "SRCCLKREQB_0"),
+ PINCTRL_PIN(26, "SRCCLKREQB_1"),
+ PINCTRL_PIN(27, "SRCCLKREQB_2"),
+ PINCTRL_PIN(28, "SRCCLKREQB_3"),
+ PINCTRL_PIN(29, "SRCCLKREQB_4"),
+ PINCTRL_PIN(30, "SRCCLKREQB_5"),
+ PINCTRL_PIN(31, "PMCALERTB"),
+ PINCTRL_PIN(32, "SLP_S0B"),
+ PINCTRL_PIN(33, "PLTRSTB"),
+ PINCTRL_PIN(34, "SPKR"),
+ PINCTRL_PIN(35, "GSPI0_CS0B"),
+ PINCTRL_PIN(36, "GSPI0_CLK"),
+ PINCTRL_PIN(37, "GSPI0_MISO"),
+ PINCTRL_PIN(38, "GSPI0_MOSI"),
+ PINCTRL_PIN(39, "GSPI1_CS0B"),
+ PINCTRL_PIN(40, "GSPI1_CLK"),
+ PINCTRL_PIN(41, "GSPI1_MISO"),
+ PINCTRL_PIN(42, "GSPI1_MOSI"),
+ PINCTRL_PIN(43, "DDSP_HPD_A"),
+ PINCTRL_PIN(44, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(45, "GSPI1_CLK_LOOPBK"),
+ /* GPP_A */
+ PINCTRL_PIN(46, "ESPI_IO_0"),
+ PINCTRL_PIN(47, "ESPI_IO_1"),
+ PINCTRL_PIN(48, "ESPI_IO_2"),
+ PINCTRL_PIN(49, "ESPI_IO_3"),
+ PINCTRL_PIN(50, "ESPI_CSB"),
+ PINCTRL_PIN(51, "ESPI_CLK"),
+ PINCTRL_PIN(52, "ESPI_RESETB"),
+ PINCTRL_PIN(53, "SMBCLK"),
+ PINCTRL_PIN(54, "SMBDATA"),
+ PINCTRL_PIN(55, "SMBALERTB"),
+ PINCTRL_PIN(56, "CPU_GP_0"),
+ PINCTRL_PIN(57, "CPU_GP_1"),
+ PINCTRL_PIN(58, "USB2_OCB_1"),
+ PINCTRL_PIN(59, "USB2_OCB_2"),
+ PINCTRL_PIN(60, "USB2_OCB_3"),
+ PINCTRL_PIN(61, "DDSP_HPD_A_TIME_SYNC_0"),
+ PINCTRL_PIN(62, "DDSP_HPD_B"),
+ PINCTRL_PIN(63, "DDSP_HPD_C"),
+ PINCTRL_PIN(64, "USB2_OCB_0"),
+ PINCTRL_PIN(65, "PCHHOTB"),
+ PINCTRL_PIN(66, "ESPI_CLK_LOOPBK"),
+ /* GPP_S */
+ PINCTRL_PIN(67, "SNDW1_CLK"),
+ PINCTRL_PIN(68, "SNDW1_DATA"),
+ PINCTRL_PIN(69, "SNDW2_CLK"),
+ PINCTRL_PIN(70, "SNDW2_DATA"),
+ PINCTRL_PIN(71, "SNDW1_CLK"),
+ PINCTRL_PIN(72, "SNDW1_DATA"),
+ PINCTRL_PIN(73, "SNDW4_CLK_DMIC_CLK_0"),
+ PINCTRL_PIN(74, "SNDW4_DATA_DMIC_DATA_0"),
+ /* GPP_R */
+ PINCTRL_PIN(75, "HDA_BCLK"),
+ PINCTRL_PIN(76, "HDA_SYNC"),
+ PINCTRL_PIN(77, "HDA_SDO"),
+ PINCTRL_PIN(78, "HDA_SDI_0"),
+ PINCTRL_PIN(79, "HDA_RSTB"),
+ PINCTRL_PIN(80, "HDA_SDI_1"),
+ PINCTRL_PIN(81, "I2S1_SFRM"),
+ PINCTRL_PIN(82, "I2S1_TXD"),
+ /* GPP_H */
+ PINCTRL_PIN(83, "GPPC_H_0"),
+ PINCTRL_PIN(84, "SD_PWR_EN_B"),
+ PINCTRL_PIN(85, "MODEM_CLKREQ"),
+ PINCTRL_PIN(86, "SX_EXIT_HOLDOFFB"),
+ PINCTRL_PIN(87, "I2C2_SDA"),
+ PINCTRL_PIN(88, "I2C2_SCL"),
+ PINCTRL_PIN(89, "I2C3_SDA"),
+ PINCTRL_PIN(90, "I2C3_SCL"),
+ PINCTRL_PIN(91, "I2C4_SDA"),
+ PINCTRL_PIN(92, "I2C4_SCL"),
+ PINCTRL_PIN(93, "CPU_VCCIO_PWR_GATEB"),
+ PINCTRL_PIN(94, "I2S2_SCLK"),
+ PINCTRL_PIN(95, "I2S2_SFRM"),
+ PINCTRL_PIN(96, "I2S2_TXD"),
+ PINCTRL_PIN(97, "I2S2_RXD"),
+ PINCTRL_PIN(98, "I2S1_SCLK"),
+ PINCTRL_PIN(99, "GPPC_H_16"),
+ PINCTRL_PIN(100, "GPPC_H_17"),
+ PINCTRL_PIN(101, "GPPC_H_18"),
+ PINCTRL_PIN(102, "GPPC_H_19"),
+ PINCTRL_PIN(103, "GPPC_H_20"),
+ PINCTRL_PIN(104, "GPPC_H_21"),
+ PINCTRL_PIN(105, "GPPC_H_22"),
+ PINCTRL_PIN(106, "GPPC_H_23"),
+ /* GPP_D */
+ PINCTRL_PIN(107, "SPI1_CSB"),
+ PINCTRL_PIN(108, "SPI1_CLK"),
+ PINCTRL_PIN(109, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(110, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(111, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(112, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(113, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(114, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(115, "ISH_SPI_CSB"),
+ PINCTRL_PIN(116, "ISH_SPI_CLK"),
+ PINCTRL_PIN(117, "ISH_SPI_MISO"),
+ PINCTRL_PIN(118, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(119, "ISH_UART0_RXD"),
+ PINCTRL_PIN(120, "ISH_UART0_TXD"),
+ PINCTRL_PIN(121, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(122, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(123, "SPI1_IO_2"),
+ PINCTRL_PIN(124, "SPI1_IO_3"),
+ PINCTRL_PIN(125, "I2S_MCLK"),
+ PINCTRL_PIN(126, "CNV_MFUART2_RXD"),
+ PINCTRL_PIN(127, "CNV_MFUART2_TXD"),
+ PINCTRL_PIN(128, "CNV_PA_BLANKING"),
+ PINCTRL_PIN(129, "I2C5_SDA"),
+ PINCTRL_PIN(130, "I2C5_SCL"),
+ PINCTRL_PIN(131, "GSPI2_CLK_LOOPBK"),
+ PINCTRL_PIN(132, "SPI1_CLK_LOOPBK"),
+ /* vGPIO */
+ PINCTRL_PIN(133, "CNV_BTEN"),
+ PINCTRL_PIN(134, "CNV_WCEN"),
+ PINCTRL_PIN(135, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(136, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(137, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(138, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(139, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(140, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(141, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(142, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(143, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(144, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(145, "vUART0_TXD"),
+ PINCTRL_PIN(146, "vUART0_RXD"),
+ PINCTRL_PIN(147, "vUART0_CTS_B"),
+ PINCTRL_PIN(148, "vUART0_RTS_B"),
+ PINCTRL_PIN(149, "vISH_UART0_TXD"),
+ PINCTRL_PIN(150, "vISH_UART0_RXD"),
+ PINCTRL_PIN(151, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(152, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(153, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(154, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(155, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(156, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(157, "vI2S2_SCLK"),
+ PINCTRL_PIN(158, "vI2S2_SFRM"),
+ PINCTRL_PIN(159, "vI2S2_TXD"),
+ PINCTRL_PIN(160, "vI2S2_RXD"),
+ PINCTRL_PIN(161, "vSD3_CD_B"),
+ /* GPP_C */
+ PINCTRL_PIN(162, "GPPC_C_0"),
+ PINCTRL_PIN(163, "GPPC_C_1"),
+ PINCTRL_PIN(164, "GPPC_C_2"),
+ PINCTRL_PIN(165, "GPPC_C_3"),
+ PINCTRL_PIN(166, "GPPC_C_4"),
+ PINCTRL_PIN(167, "GPPC_C_5"),
+ PINCTRL_PIN(168, "SUSWARNB_SUSPWRDNACK"),
+ PINCTRL_PIN(169, "SUSACKB"),
+ PINCTRL_PIN(170, "UART0_RXD"),
+ PINCTRL_PIN(171, "UART0_TXD"),
+ PINCTRL_PIN(172, "UART0_RTSB"),
+ PINCTRL_PIN(173, "UART0_CTSB"),
+ PINCTRL_PIN(174, "UART1_RXD"),
+ PINCTRL_PIN(175, "UART1_TXD"),
+ PINCTRL_PIN(176, "UART1_RTSB"),
+ PINCTRL_PIN(177, "UART1_CTSB"),
+ PINCTRL_PIN(178, "I2C0_SDA"),
+ PINCTRL_PIN(179, "I2C0_SCL"),
+ PINCTRL_PIN(180, "I2C1_SDA"),
+ PINCTRL_PIN(181, "I2C1_SCL"),
+ PINCTRL_PIN(182, "UART2_RXD"),
+ PINCTRL_PIN(183, "UART2_TXD"),
+ PINCTRL_PIN(184, "UART2_RTSB"),
+ PINCTRL_PIN(185, "UART2_CTSB"),
+ /* HVCMOS */
+ PINCTRL_PIN(186, "L_BKLTEN"),
+ PINCTRL_PIN(187, "L_BKLTCTL"),
+ PINCTRL_PIN(188, "L_VDDEN"),
+ PINCTRL_PIN(189, "SYS_PWROK"),
+ PINCTRL_PIN(190, "SYS_RESETB"),
+ PINCTRL_PIN(191, "MLK_RSTB"),
+ /* GPP_E */
+ PINCTRL_PIN(192, "ISH_GP_0"),
+ PINCTRL_PIN(193, "ISH_GP_1"),
+ PINCTRL_PIN(194, "IMGCLKOUT_1"),
+ PINCTRL_PIN(195, "ISH_GP_2"),
+ PINCTRL_PIN(196, "IMGCLKOUT_2"),
+ PINCTRL_PIN(197, "SATA_LEDB"),
+ PINCTRL_PIN(198, "IMGCLKOUT_3"),
+ PINCTRL_PIN(199, "ISH_GP_3"),
+ PINCTRL_PIN(200, "ISH_GP_4"),
+ PINCTRL_PIN(201, "ISH_GP_5"),
+ PINCTRL_PIN(202, "ISH_GP_6"),
+ PINCTRL_PIN(203, "ISH_GP_7"),
+ PINCTRL_PIN(204, "IMGCLKOUT_4"),
+ PINCTRL_PIN(205, "DDPA_CTRLCLK"),
+ PINCTRL_PIN(206, "DDPA_CTRLDATA"),
+ PINCTRL_PIN(207, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(208, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(209, "DDPC_CTRLCLK"),
+ PINCTRL_PIN(210, "DDPC_CTRLDATA"),
+ PINCTRL_PIN(211, "IMGCLKOUT_5"),
+ PINCTRL_PIN(212, "CNV_BRI_DT"),
+ PINCTRL_PIN(213, "CNV_BRI_RSP"),
+ PINCTRL_PIN(214, "CNV_RGI_DT"),
+ PINCTRL_PIN(215, "CNV_RGI_RSP"),
+ /* GPP_G */
+ PINCTRL_PIN(216, "SD3_CMD"),
+ PINCTRL_PIN(217, "SD3_D0"),
+ PINCTRL_PIN(218, "SD3_D1"),
+ PINCTRL_PIN(219, "SD3_D2"),
+ PINCTRL_PIN(220, "SD3_D3"),
+ PINCTRL_PIN(221, "SD3_CDB"),
+ PINCTRL_PIN(222, "SD3_CLK"),
+ PINCTRL_PIN(223, "SD3_WP"),
+};
+
+static const struct intel_padgroup jsl_community0_gpps[] = {
+ JSL_GPP(0, 0, 19, 320), /* GPP_F */
+ JSL_GPP(1, 20, 45, 32), /* GPP_B */
+ JSL_GPP(2, 46, 66, 64), /* GPP_A */
+ JSL_GPP(3, 67, 74, 96), /* GPP_S */
+ JSL_GPP(4, 75, 82, 128), /* GPP_R */
+};
+
+static const struct intel_padgroup jsl_community1_gpps[] = {
+ JSL_GPP(0, 83, 106, 160), /* GPP_H */
+ JSL_GPP(1, 107, 132, 192), /* GPP_D */
+ JSL_GPP(2, 133, 161, 224), /* vGPIO */
+ JSL_GPP(3, 162, 185, 256), /* GPP_C */
+};
+
+static const struct intel_padgroup jsl_community4_gpps[] = {
+ JSL_GPP(0, 186, 191, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
+ JSL_GPP(1, 192, 215, 288), /* GPP_E */
+};
+
+static const struct intel_padgroup jsl_community5_gpps[] = {
+ JSL_GPP(0, 216, 223, INTEL_GPIO_BASE_ZERO), /* GPP_G */
+};
+
+static const struct intel_community jsl_communities[] = {
+ JSL_COMMUNITY(0, 0, 82, jsl_community0_gpps),
+ JSL_COMMUNITY(1, 83, 185, jsl_community1_gpps),
+ JSL_COMMUNITY(2, 186, 215, jsl_community4_gpps),
+ JSL_COMMUNITY(3, 216, 223, jsl_community5_gpps),
+};
+
+static const struct intel_pinctrl_soc_data jsl_soc_data = {
+ .pins = jsl_pins,
+ .npins = ARRAY_SIZE(jsl_pins),
+ .communities = jsl_communities,
+ .ncommunities = ARRAY_SIZE(jsl_communities),
+};
+
+static const struct acpi_device_id jsl_pinctrl_acpi_match[] = {
+ { "INT34C8", (kernel_ulong_t)&jsl_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, jsl_pinctrl_acpi_match);
+
+static INTEL_PINCTRL_PM_OPS(jsl_pinctrl_pm_ops);
+
+static struct platform_driver jsl_pinctrl_driver = {
+ .probe = intel_pinctrl_probe_by_hid,
+ .driver = {
+ .name = "jasperlake-pinctrl",
+ .acpi_match_table = jsl_pinctrl_acpi_match,
+ .pm = &jsl_pinctrl_pm_ops,
+ },
+};
+
+module_platform_driver(jsl_pinctrl_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Jasper Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index e928742c7181..a45b8f2182fd 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -794,11 +794,11 @@ static int lp_gpio_probe(struct platform_device *pdev)
const struct intel_pinctrl_soc_data *soc;
struct intel_pinctrl *lg;
struct gpio_chip *gc;
- struct resource *io_rc, *irq_rc;
struct device *dev = &pdev->dev;
+ struct resource *io_rc;
void __iomem *regs;
unsigned int i;
- int ret;
+ int irq, ret;
soc = (const struct intel_pinctrl_soc_data *)device_get_match_data(dev);
if (!soc)
@@ -870,8 +870,8 @@ static int lp_gpio_probe(struct platform_device *pdev)
gc->parent = dev;
/* set up interrupts */
- irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq_rc && irq_rc->start) {
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq > 0) {
struct gpio_irq_chip *girq;
girq = &gc->irq;
@@ -884,7 +884,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
- girq->parents[0] = (unsigned int)irq_rc->start;
+ girq->parents[0] = irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
}
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 08a86f6fdea6..bcfd7548e282 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -21,8 +21,6 @@
#define TGL_GPI_IS 0x100
#define TGL_GPI_IE 0x120
-#define TGL_NO_GPIO -1
-
#define TGL_GPP(r, s, e, g) \
{ \
.reg_num = (r), \
@@ -342,30 +340,30 @@ static const struct pinctrl_pin_desc tgllp_pins[] = {
};
static const struct intel_padgroup tgllp_community0_gpps[] = {
- TGL_GPP(0, 0, 25, 0), /* GPP_B */
- TGL_GPP(1, 26, 41, 32), /* GPP_T */
- TGL_GPP(2, 42, 66, 64), /* GPP_A */
+ TGL_GPP(0, 0, 25, 0), /* GPP_B */
+ TGL_GPP(1, 26, 41, 32), /* GPP_T */
+ TGL_GPP(2, 42, 66, 64), /* GPP_A */
};
static const struct intel_padgroup tgllp_community1_gpps[] = {
- TGL_GPP(0, 67, 74, 96), /* GPP_S */
- TGL_GPP(1, 75, 98, 128), /* GPP_H */
- TGL_GPP(2, 99, 119, 160), /* GPP_D */
- TGL_GPP(3, 120, 143, 192), /* GPP_U */
- TGL_GPP(4, 144, 170, 224), /* vGPIO */
+ TGL_GPP(0, 67, 74, 96), /* GPP_S */
+ TGL_GPP(1, 75, 98, 128), /* GPP_H */
+ TGL_GPP(2, 99, 119, 160), /* GPP_D */
+ TGL_GPP(3, 120, 143, 192), /* GPP_U */
+ TGL_GPP(4, 144, 170, 224), /* vGPIO */
};
static const struct intel_padgroup tgllp_community4_gpps[] = {
- TGL_GPP(0, 171, 194, 256), /* GPP_C */
- TGL_GPP(1, 195, 219, 288), /* GPP_F */
- TGL_GPP(2, 220, 225, TGL_NO_GPIO), /* HVCMOS */
- TGL_GPP(3, 226, 250, 320), /* GPP_E */
- TGL_GPP(4, 251, 259, TGL_NO_GPIO), /* JTAG */
+ TGL_GPP(0, 171, 194, 256), /* GPP_C */
+ TGL_GPP(1, 195, 219, 288), /* GPP_F */
+ TGL_GPP(2, 220, 225, INTEL_GPIO_BASE_NOMAP), /* HVCMOS */
+ TGL_GPP(3, 226, 250, 320), /* GPP_E */
+ TGL_GPP(4, 251, 259, INTEL_GPIO_BASE_NOMAP), /* JTAG */
};
static const struct intel_padgroup tgllp_community5_gpps[] = {
- TGL_GPP(0, 260, 267, 352), /* GPP_R */
- TGL_GPP(1, 268, 276, TGL_NO_GPIO), /* SPI */
+ TGL_GPP(0, 260, 267, 352), /* GPP_R */
+ TGL_GPP(1, 268, 276, INTEL_GPIO_BASE_NOMAP), /* SPI */
};
static const struct intel_community tgllp_communities[] = {
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 701f9af63f5e..f32d3644c509 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -3,10 +3,12 @@ menu "MediaTek pinctrl drivers"
depends on ARCH_MEDIATEK || COMPILE_TEST
config EINT_MTK
- bool "MediaTek External Interrupt Support"
+ tristate "MediaTek External Interrupt Support"
depends on PINCTRL_MTK || PINCTRL_MTK_MOORE || PINCTRL_MTK_PARIS || COMPILE_TEST
select GPIOLIB
select IRQ_DOMAIN
+ default y if PINCTRL_MTK || PINCTRL_MTK_MOORE
+ default PINCTRL_MTK_PARIS
config PINCTRL_MTK
bool
@@ -17,6 +19,9 @@ config PINCTRL_MTK
select EINT_MTK
select OF_GPIO
+config PINCTRL_MTK_V2
+ tristate
+
config PINCTRL_MTK_MOORE
bool
depends on OF
@@ -25,15 +30,17 @@ config PINCTRL_MTK_MOORE
select GENERIC_PINMUX_FUNCTIONS
select GPIOLIB
select OF_GPIO
+ select PINCTRL_MTK_V2
config PINCTRL_MTK_PARIS
- bool
+ tristate
depends on OF
select PINMUX
select GENERIC_PINCONF
select GPIOLIB
select EINT_MTK
select OF_GPIO
+ select PINCTRL_MTK_V2
# For ARMv7 SoCs
config PINCTRL_MT2701
@@ -80,7 +87,7 @@ config PINCTRL_MT2712
select PINCTRL_MTK
config PINCTRL_MT6765
- bool "Mediatek MT6765 pin control"
+ tristate "Mediatek MT6765 pin control"
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index a74325abd877..4b7132876e71 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -2,8 +2,9 @@
# Core
obj-$(CONFIG_EINT_MTK) += mtk-eint.o
obj-$(CONFIG_PINCTRL_MTK) += pinctrl-mtk-common.o
-obj-$(CONFIG_PINCTRL_MTK_MOORE) += pinctrl-moore.o pinctrl-mtk-common-v2.o
-obj-$(CONFIG_PINCTRL_MTK_PARIS) += pinctrl-paris.o pinctrl-mtk-common-v2.o
+obj-$(CONFIG_PINCTRL_MTK_V2) += pinctrl-mtk-common-v2.o
+obj-$(CONFIG_PINCTRL_MTK_MOORE) += pinctrl-moore.o
+obj-$(CONFIG_PINCTRL_MTK_PARIS) += pinctrl-paris.o
# SoC Drivers
obj-$(CONFIG_PINCTRL_MT2701) += pinctrl-mt2701.o
diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c
index 7e526bcf5e0b..22736f60c16c 100644
--- a/drivers/pinctrl/mediatek/mtk-eint.c
+++ b/drivers/pinctrl/mediatek/mtk-eint.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
@@ -379,6 +380,7 @@ int mtk_eint_do_suspend(struct mtk_eint *eint)
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_eint_do_suspend);
int mtk_eint_do_resume(struct mtk_eint *eint)
{
@@ -386,6 +388,7 @@ int mtk_eint_do_resume(struct mtk_eint *eint)
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_eint_do_resume);
int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
unsigned int debounce)
@@ -440,6 +443,7 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_eint_set_debounce);
int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
{
@@ -451,6 +455,7 @@ int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
return irq;
}
+EXPORT_SYMBOL_GPL(mtk_eint_find_irq);
int mtk_eint_do_init(struct mtk_eint *eint)
{
@@ -495,3 +500,7 @@ int mtk_eint_do_init(struct mtk_eint *eint)
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_eint_do_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek EINT Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6765.c b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
index 905dae8c3fd8..2c59d3936256 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6765.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6765.c
@@ -6,6 +6,7 @@
*
*/
+#include <linux/module.h>
#include "pinctrl-mtk-mt6765.h"
#include "pinctrl-paris.h"
@@ -1103,3 +1104,6 @@ static int __init mt6765_pinctrl_init(void)
return platform_driver_register(&mt6765_pinctrl_driver);
}
arch_initcall(mt6765_pinctrl_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek MT6765 Pinctrl Driver");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index d3169a87e1b3..b77b18fe5adc 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -12,6 +12,7 @@
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_irq.h>
#include "mtk-eint.h"
@@ -204,6 +205,7 @@ int mtk_hw_set_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_hw_set_value);
int mtk_hw_get_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
int field, int *value)
@@ -223,6 +225,7 @@ int mtk_hw_get_value(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_hw_get_value);
static int mtk_xt_find_eint_num(struct mtk_pinctrl *hw, unsigned long eint_n)
{
@@ -361,6 +364,7 @@ int mtk_build_eint(struct mtk_pinctrl *hw, struct platform_device *pdev)
return mtk_eint_do_init(hw->eint);
}
+EXPORT_SYMBOL_GPL(mtk_build_eint);
/* Revision 0 */
int mtk_pinconf_bias_disable_set(struct mtk_pinctrl *hw,
@@ -380,6 +384,7 @@ int mtk_pinconf_bias_disable_set(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_set);
int mtk_pinconf_bias_disable_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *res)
@@ -402,6 +407,7 @@ int mtk_pinconf_bias_disable_get(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_get);
int mtk_pinconf_bias_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup)
@@ -421,6 +427,7 @@ int mtk_pinconf_bias_set(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_set);
int mtk_pinconf_bias_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup, int *res)
@@ -440,6 +447,7 @@ int mtk_pinconf_bias_get(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get);
/* Revision 1 */
int mtk_pinconf_bias_disable_set_rev1(struct mtk_pinctrl *hw,
@@ -454,6 +462,7 @@ int mtk_pinconf_bias_disable_set_rev1(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_set_rev1);
int mtk_pinconf_bias_disable_get_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *res)
@@ -471,6 +480,7 @@ int mtk_pinconf_bias_disable_get_rev1(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_disable_get_rev1);
int mtk_pinconf_bias_set_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup)
@@ -490,6 +500,7 @@ int mtk_pinconf_bias_set_rev1(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_set_rev1);
int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
@@ -515,6 +526,7 @@ int mtk_pinconf_bias_get_rev1(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get_rev1);
/* Combo for the following pull register type:
* 1. PU + PD
@@ -715,6 +727,7 @@ int mtk_pinconf_bias_set_combo(struct mtk_pinctrl *hw,
out:
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_set_combo);
int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc,
@@ -735,6 +748,7 @@ int mtk_pinconf_bias_get_combo(struct mtk_pinctrl *hw,
out:
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_bias_get_combo);
/* Revision 0 */
int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
@@ -764,6 +778,7 @@ int mtk_pinconf_drive_set(struct mtk_pinctrl *hw,
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_set);
int mtk_pinconf_drive_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *val)
@@ -788,6 +803,7 @@ int mtk_pinconf_drive_get(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_get);
/* Revision 1 */
int mtk_pinconf_drive_set_rev1(struct mtk_pinctrl *hw,
@@ -809,6 +825,7 @@ int mtk_pinconf_drive_set_rev1(struct mtk_pinctrl *hw,
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_set_rev1);
int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *val)
@@ -826,18 +843,21 @@ int mtk_pinconf_drive_get_rev1(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_get_rev1);
int mtk_pinconf_drive_set_raw(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg)
{
return mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DRV, arg);
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_set_raw);
int mtk_pinconf_drive_get_raw(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, int *val)
{
return mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DRV, val);
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_drive_get_raw);
int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
@@ -878,6 +898,7 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_pull_set);
int mtk_pinconf_adv_pull_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, bool pullup,
@@ -920,6 +941,7 @@ int mtk_pinconf_adv_pull_get(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_pull_get);
int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 arg)
@@ -946,6 +968,7 @@ int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw,
return err;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_set);
int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
const struct mtk_pin_desc *desc, u32 *val)
@@ -969,3 +992,8 @@ int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_pinconf_adv_drive_get);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("Pin configuration library module for mediatek SoCs");
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index ee305f140400..90a432bf9fed 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -10,6 +10,7 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/module.h>
#include <dt-bindings/pinctrl/mt65xx.h>
#include "pinctrl-paris.h"
@@ -631,6 +632,7 @@ ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
return len;
}
+EXPORT_SYMBOL_GPL(mtk_pctrl_show_one_pin);
#define PIN_DBG_BUF_SZ 96
static void mtk_pctrl_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
@@ -1019,6 +1021,7 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_paris_pinctrl_probe);
static int mtk_paris_pinctrl_suspend(struct device *device)
{
@@ -1038,3 +1041,6 @@ const struct dev_pm_ops mtk_paris_pinctrl_pm_ops = {
.suspend_noirq = mtk_paris_pinctrl_suspend,
.resume_noirq = mtk_paris_pinctrl_resume,
};
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek Pinctrl Common Driver V2 Paris");
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index bbc919bef2bf..079f8ee8d353 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -549,6 +549,18 @@ static const struct pinconf_ops meson_pinconf_ops = {
.is_generic = true,
};
+static int meson_gpio_get_direction(struct gpio_chip *chip, unsigned gpio)
+{
+ struct meson_pinctrl *pc = gpiochip_get_data(chip);
+ int ret;
+
+ ret = meson_pinconf_get_output(pc, gpio);
+ if (ret < 0)
+ return ret;
+
+ return ret ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
static int meson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
return meson_pinconf_set_output(gpiochip_get_data(chip), gpio, false);
@@ -591,6 +603,8 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
pc->chip.parent = pc->dev;
pc->chip.request = gpiochip_generic_request;
pc->chip.free = gpiochip_generic_free;
+ pc->chip.set_config = gpiochip_generic_config;
+ pc->chip.get_direction = meson_gpio_get_direction;
pc->chip.direction_input = meson_gpio_direction_input;
pc->chip.direction_output = meson_gpio_direction_output;
pc->chip.get = meson_gpio_get;
diff --git a/drivers/pinctrl/nomadik/pinctrl-ab8505.c b/drivers/pinctrl/nomadik/pinctrl-ab8505.c
index 5e6e7d28390a..b93af1fb37f0 100644
--- a/drivers/pinctrl/nomadik/pinctrl-ab8505.c
+++ b/drivers/pinctrl/nomadik/pinctrl-ab8505.c
@@ -178,6 +178,7 @@ static const struct abx500_pingroup ab8505_groups[] = {
AB8505_PIN_GROUP(gpio40_a_1, ABX500_ALT_A),
AB8505_PIN_GROUP(gpio41_a_1, ABX500_ALT_A),
AB8505_PIN_GROUP(uartrxdata_a_1, ABX500_ALT_A),
+ AB8505_PIN_GROUP(gpio50_a_1, ABX500_ALT_A),
AB8505_PIN_GROUP(gpio52_a_1, ABX500_ALT_A),
AB8505_PIN_GROUP(gpio53_a_1, ABX500_ALT_A),
AB8505_PIN_GROUP(pdmdata_b_1, ABX500_ALT_B),
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index b9246e0b4fe2..acad3887cc74 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -691,18 +691,21 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(lcd_d8_d11_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(lcd_d12_d23_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(kp_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(kpskaskb_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc2_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(ssp1_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(ssp0_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(ipgpio0_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(ipgpio1_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(modem_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(kp_a_2, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp2sck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp2_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc4_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc1_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc1_a_2, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(mc1dir_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(hsir_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(hsit_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(hsit_a_2, NMK_GPIO_ALT_A),
@@ -760,7 +763,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(u0_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(ipgpio4_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(ipgpio5_c_1, NMK_GPIO_ALT_C),
- DB8500_PIN_GROUP(ipgpio6_c_1, NMK_GPIO_ALT_C),
+ DB8500_PIN_GROUP(ipgpio6_c_2, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(ipgpio7_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(smcleale_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(stmape_c_1, NMK_GPIO_ALT_C),
@@ -955,6 +958,7 @@ static const struct nmk_function nmk_db8500_functions[] = {
FUNCTION(spi0),
FUNCTION(spi2),
FUNCTION(remap),
+ FUNCTION(sbag),
FUNCTION(ptm),
FUNCTION(rf),
FUNCTION(hx),
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index ca7bbe4164c0..ba25c4654391 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1343,8 +1343,6 @@ static const struct nmk_cfg_param nmk_cfg_params[] = {
static int nmk_dt_pin_config(int index, int val, unsigned long *config)
{
- int ret = 0;
-
if (nmk_cfg_params[index].choice == NULL)
*config = nmk_cfg_params[index].config;
else {
@@ -1354,7 +1352,7 @@ static int nmk_dt_pin_config(int index, int val, unsigned long *config)
nmk_cfg_params[index].choice[val];
}
}
- return ret;
+ return 0;
}
static const char *nmk_find_pin_name(struct pinctrl_dev *pctldev, const char *pin_name)
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 694912409fd9..54222ccddfb1 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -1019,7 +1019,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
atmel_pioctrl->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pioctrl->reg_base))
- return -EINVAL;
+ return PTR_ERR(atmel_pioctrl->reg_base);
atmel_pioctrl->clk = devm_clk_get(dev, NULL);
if (IS_ERR(atmel_pioctrl->clk)) {
diff --git a/drivers/pinctrl/pinctrl-bm1880.c b/drivers/pinctrl/pinctrl-bm1880.c
index f7dff4f14101..d1a7d9836787 100644
--- a/drivers/pinctrl/pinctrl-bm1880.c
+++ b/drivers/pinctrl/pinctrl-bm1880.c
@@ -408,6 +408,7 @@ static const struct bm1880_pctrl_group bm1880_pctrl_groups[] = {
BM1880_PINCTRL_GRP(pwm34),
BM1880_PINCTRL_GRP(pwm35),
BM1880_PINCTRL_GRP(pwm36),
+ BM1880_PINCTRL_GRP(pwm37),
BM1880_PINCTRL_GRP(i2c0),
BM1880_PINCTRL_GRP(i2c1),
BM1880_PINCTRL_GRP(i2c2),
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index e5dcf77fe43d..6a8d44504f94 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -1977,6 +1977,25 @@ static const struct pinctrl_ops ingenic_pctlops = {
.dt_free_map = pinconf_generic_dt_free_map,
};
+static int ingenic_gpio_irq_request(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+ int ret;
+
+ ret = ingenic_gpio_direction_input(gpio_chip, data->hwirq);
+ if (ret)
+ return ret;
+
+ return gpiochip_reqres_irq(gpio_chip, data->hwirq);
+}
+
+static void ingenic_gpio_irq_release(struct irq_data *data)
+{
+ struct gpio_chip *gpio_chip = irq_data_get_irq_chip_data(data);
+
+ return gpiochip_relres_irq(gpio_chip, data->hwirq);
+}
+
static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
int pin, int func)
{
@@ -2338,6 +2357,8 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->irq_chip.irq_ack = ingenic_gpio_irq_ack;
jzgc->irq_chip.irq_set_type = ingenic_gpio_irq_set_type;
jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
+ jzgc->irq_chip.irq_request_resources = ingenic_gpio_irq_request;
+ jzgc->irq_chip.irq_release_resources = ingenic_gpio_irq_release;
jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
girq = &jzgc->gc.irq;
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
index aa92f141b865..626e02d7a1ba 100644
--- a/drivers/pinctrl/pinctrl-lantiq.c
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -221,7 +221,7 @@ static int match_mux(const struct ltq_mfp_pin *mfp, unsigned mux)
return i;
}
-/* dont assume .mfp is linearly mapped. find the mfp with the correct .pin */
+/* don't assume .mfp is linearly mapped. find the mfp with the correct .pin */
static int match_mfp(const struct ltq_pinmux_info *info, int pin)
{
int i;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 3a235487e38d..151931b593f6 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -1,34 +1,23 @@
// SPDX-License-Identifier: GPL-2.0-only
/* MCP23S08 SPI/I2C GPIO driver */
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/export.h>
#include <linux/gpio/driver.h>
-#include <linux/i2c.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/mcp23s08.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
-/*
- * MCP types supported by driver
- */
-#define MCP_TYPE_S08 0
-#define MCP_TYPE_S17 1
-#define MCP_TYPE_008 2
-#define MCP_TYPE_017 3
-#define MCP_TYPE_S18 4
-#define MCP_TYPE_018 5
-
-#define MCP_MAX_DEV_PER_CS 8
+#include "pinctrl-mcp23s08.h"
/* Registers are all 8 bits wide.
*
@@ -53,31 +42,6 @@
#define MCP_GPIO 0x09
#define MCP_OLAT 0x0a
-struct mcp23s08;
-
-struct mcp23s08 {
- u8 addr;
- bool irq_active_high;
- bool reg_shift;
-
- u16 irq_rise;
- u16 irq_fall;
- int irq;
- bool irq_controller;
- int cached_gpio;
- /* lock protects regmap access with bypass/cache flags */
- struct mutex lock;
-
- struct gpio_chip chip;
- struct irq_chip irq_chip;
-
- struct regmap *regmap;
- struct device *dev;
-
- struct pinctrl_dev *pctldev;
- struct pinctrl_desc pinctrl_desc;
-};
-
static const struct reg_default mcp23x08_defaults[] = {
{.reg = MCP_IODIR, .def = 0xff},
{.reg = MCP_IPOL, .def = 0x00},
@@ -109,7 +73,7 @@ static const struct regmap_access_table mcp23x08_precious_table = {
.n_yes_ranges = 1,
};
-static const struct regmap_config mcp23x08_regmap = {
+const struct regmap_config mcp23x08_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -121,6 +85,7 @@ static const struct regmap_config mcp23x08_regmap = {
.cache_type = REGCACHE_FLAT,
.max_register = MCP_OLAT,
};
+EXPORT_SYMBOL_GPL(mcp23x08_regmap);
static const struct reg_default mcp23x16_defaults[] = {
{.reg = MCP_IODIR << 1, .def = 0xffff},
@@ -153,7 +118,7 @@ static const struct regmap_access_table mcp23x16_precious_table = {
.n_yes_ranges = 1,
};
-static const struct regmap_config mcp23x17_regmap = {
+const struct regmap_config mcp23x17_regmap = {
.reg_bits = 8,
.val_bits = 16,
@@ -166,6 +131,7 @@ static const struct regmap_config mcp23x17_regmap = {
.cache_type = REGCACHE_FLAT,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
+EXPORT_SYMBOL_GPL(mcp23x17_regmap);
static int mcp_read(struct mcp23s08 *mcp, unsigned int reg, unsigned int *val)
{
@@ -309,80 +275,6 @@ static const struct pinconf_ops mcp_pinconf_ops = {
/*----------------------------------------------------------------------*/
-#ifdef CONFIG_SPI_MASTER
-
-static int mcp23sxx_spi_write(void *context, const void *data, size_t count)
-{
- struct mcp23s08 *mcp = context;
- struct spi_device *spi = to_spi_device(mcp->dev);
- struct spi_message m;
- struct spi_transfer t[2] = { { .tx_buf = &mcp->addr, .len = 1, },
- { .tx_buf = data, .len = count, }, };
-
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
-
- return spi_sync(spi, &m);
-}
-
-static int mcp23sxx_spi_gather_write(void *context,
- const void *reg, size_t reg_size,
- const void *val, size_t val_size)
-{
- struct mcp23s08 *mcp = context;
- struct spi_device *spi = to_spi_device(mcp->dev);
- struct spi_message m;
- struct spi_transfer t[3] = { { .tx_buf = &mcp->addr, .len = 1, },
- { .tx_buf = reg, .len = reg_size, },
- { .tx_buf = val, .len = val_size, }, };
-
- spi_message_init(&m);
- spi_message_add_tail(&t[0], &m);
- spi_message_add_tail(&t[1], &m);
- spi_message_add_tail(&t[2], &m);
-
- return spi_sync(spi, &m);
-}
-
-static int mcp23sxx_spi_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- struct mcp23s08 *mcp = context;
- struct spi_device *spi = to_spi_device(mcp->dev);
- u8 tx[2];
-
- if (reg_size != 1)
- return -EINVAL;
-
- tx[0] = mcp->addr | 0x01;
- tx[1] = *((u8 *) reg);
-
- return spi_write_then_read(spi, tx, sizeof(tx), val, val_size);
-}
-
-static const struct regmap_bus mcp23sxx_spi_regmap = {
- .write = mcp23sxx_spi_write,
- .gather_write = mcp23sxx_spi_gather_write,
- .read = mcp23sxx_spi_read,
-};
-
-#endif /* CONFIG_SPI_MASTER */
-
-/*----------------------------------------------------------------------*/
-
-/* A given spi_device can represent up to eight mcp23sxx chips
- * sharing the same chipselect but using different addresses
- * (e.g. chips #0 and #3 might be populated, but not #1 or $2).
- * Driver data holds all the per-chip data.
- */
-struct mcp23s08_driver_data {
- unsigned ngpio;
- struct mcp23s08 *mcp[8];
- struct mcp23s08 chip[];
-};
-
-
static int mcp23s08_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct mcp23s08 *mcp = gpiochip_get_data(chip);
@@ -562,7 +454,6 @@ static int mcp23s08_irq_set_type(struct irq_data *data, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
struct mcp23s08 *mcp = gpiochip_get_data(gc);
unsigned int pos = data->hwirq;
- int status = 0;
if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
mcp_set_bit(mcp, MCP_INTCON, pos, false);
@@ -585,7 +476,7 @@ static int mcp23s08_irq_set_type(struct irq_data *data, unsigned int type)
} else
return -EINVAL;
- return status;
+ return 0;
}
static void mcp23s08_irq_bus_lock(struct irq_data *data)
@@ -656,21 +547,25 @@ static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
/*----------------------------------------------------------------------*/
-static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
- void *data, unsigned addr, unsigned type,
- unsigned int base, int cs)
+int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ unsigned int addr, unsigned int type, unsigned int base)
{
int status, ret;
bool mirror = false;
bool open_drain = false;
- struct regmap_config *one_regmap_config = NULL;
- int raw_chip_address = (addr & ~0x40) >> 1;
mutex_init(&mcp->lock);
mcp->dev = dev;
mcp->addr = addr;
+
mcp->irq_active_high = false;
+ mcp->irq_chip.name = dev_name(dev);
+ mcp->irq_chip.irq_mask = mcp23s08_irq_mask;
+ mcp->irq_chip.irq_unmask = mcp23s08_irq_unmask;
+ mcp->irq_chip.irq_set_type = mcp23s08_irq_set_type;
+ mcp->irq_chip.irq_bus_lock = mcp23s08_irq_bus_lock;
+ mcp->irq_chip.irq_bus_sync_unlock = mcp23s08_irq_bus_unlock;
mcp->chip.direction_input = mcp23s08_direction_input;
mcp->chip.get = mcp23s08_get;
@@ -681,83 +576,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.of_node = dev->of_node;
#endif
- switch (type) {
-#ifdef CONFIG_SPI_MASTER
- case MCP_TYPE_S08:
- case MCP_TYPE_S17:
- switch (type) {
- case MCP_TYPE_S08:
- one_regmap_config =
- devm_kmemdup(dev, &mcp23x08_regmap,
- sizeof(struct regmap_config), GFP_KERNEL);
- mcp->reg_shift = 0;
- mcp->chip.ngpio = 8;
- mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL,
- "mcp23s08.%d", raw_chip_address);
- break;
- case MCP_TYPE_S17:
- one_regmap_config =
- devm_kmemdup(dev, &mcp23x17_regmap,
- sizeof(struct regmap_config), GFP_KERNEL);
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
- mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL,
- "mcp23s17.%d", raw_chip_address);
- break;
- }
- if (!one_regmap_config)
- return -ENOMEM;
-
- one_regmap_config->name = devm_kasprintf(dev, GFP_KERNEL, "%d", raw_chip_address);
- mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
- one_regmap_config);
- break;
-
- case MCP_TYPE_S18:
- one_regmap_config =
- devm_kmemdup(dev, &mcp23x17_regmap,
- sizeof(struct regmap_config), GFP_KERNEL);
- if (!one_regmap_config)
- return -ENOMEM;
- mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
- one_regmap_config);
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
- mcp->chip.label = "mcp23s18";
- break;
-#endif /* CONFIG_SPI_MASTER */
-
-#if IS_ENABLED(CONFIG_I2C)
- case MCP_TYPE_008:
- mcp->regmap = devm_regmap_init_i2c(data, &mcp23x08_regmap);
- mcp->reg_shift = 0;
- mcp->chip.ngpio = 8;
- mcp->chip.label = "mcp23008";
- break;
-
- case MCP_TYPE_017:
- mcp->regmap = devm_regmap_init_i2c(data, &mcp23x17_regmap);
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
- mcp->chip.label = "mcp23017";
- break;
-
- case MCP_TYPE_018:
- mcp->regmap = devm_regmap_init_i2c(data, &mcp23x17_regmap);
- mcp->reg_shift = 1;
- mcp->chip.ngpio = 16;
- mcp->chip.label = "mcp23018";
- break;
-#endif /* CONFIG_I2C */
-
- default:
- dev_err(dev, "invalid device type (%d)\n", type);
- return -EINVAL;
- }
-
- if (IS_ERR(mcp->regmap))
- return PTR_ERR(mcp->regmap);
-
mcp->chip.base = base;
mcp->chip.can_sleep = true;
mcp->chip.parent = dev;
@@ -816,14 +634,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail;
}
- if (one_regmap_config) {
- mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
- "mcp23xxx-pinctrl.%d", raw_chip_address);
- if (!mcp->pinctrl_desc.name)
- return -ENOMEM;
- } else {
- mcp->pinctrl_desc.name = "mcp23xxx-pinctrl";
- }
mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops;
mcp->pinctrl_desc.confops = &mcp_pinconf_ops;
mcp->pinctrl_desc.npins = mcp->chip.ngpio;
@@ -847,291 +657,5 @@ fail:
dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
return ret;
}
-
-/*----------------------------------------------------------------------*/
-
-#ifdef CONFIG_OF
-#ifdef CONFIG_SPI_MASTER
-static const struct of_device_id mcp23s08_spi_of_match[] = {
- {
- .compatible = "microchip,mcp23s08",
- .data = (void *) MCP_TYPE_S08,
- },
- {
- .compatible = "microchip,mcp23s17",
- .data = (void *) MCP_TYPE_S17,
- },
- {
- .compatible = "microchip,mcp23s18",
- .data = (void *) MCP_TYPE_S18,
- },
-/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
- {
- .compatible = "mcp,mcp23s08",
- .data = (void *) MCP_TYPE_S08,
- },
- {
- .compatible = "mcp,mcp23s17",
- .data = (void *) MCP_TYPE_S17,
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, mcp23s08_spi_of_match);
-#endif
-
-#if IS_ENABLED(CONFIG_I2C)
-static const struct of_device_id mcp23s08_i2c_of_match[] = {
- {
- .compatible = "microchip,mcp23008",
- .data = (void *) MCP_TYPE_008,
- },
- {
- .compatible = "microchip,mcp23017",
- .data = (void *) MCP_TYPE_017,
- },
- {
- .compatible = "microchip,mcp23018",
- .data = (void *) MCP_TYPE_018,
- },
-/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
- {
- .compatible = "mcp,mcp23008",
- .data = (void *) MCP_TYPE_008,
- },
- {
- .compatible = "mcp,mcp23017",
- .data = (void *) MCP_TYPE_017,
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, mcp23s08_i2c_of_match);
-#endif
-#endif /* CONFIG_OF */
-
-
-#if IS_ENABLED(CONFIG_I2C)
-
-static int mcp230xx_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct mcp23s08_platform_data *pdata, local_pdata;
- struct mcp23s08 *mcp;
- int status;
-
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- pdata = &local_pdata;
- pdata->base = -1;
- }
-
- mcp = devm_kzalloc(&client->dev, sizeof(*mcp), GFP_KERNEL);
- if (!mcp)
- return -ENOMEM;
-
- mcp->irq = client->irq;
- mcp->irq_chip.name = dev_name(&client->dev);
- mcp->irq_chip.irq_mask = mcp23s08_irq_mask;
- mcp->irq_chip.irq_unmask = mcp23s08_irq_unmask;
- mcp->irq_chip.irq_set_type = mcp23s08_irq_set_type;
- mcp->irq_chip.irq_bus_lock = mcp23s08_irq_bus_lock;
- mcp->irq_chip.irq_bus_sync_unlock = mcp23s08_irq_bus_unlock;
-
- status = mcp23s08_probe_one(mcp, &client->dev, client, client->addr,
- id->driver_data, pdata->base, 0);
- if (status)
- return status;
-
- i2c_set_clientdata(client, mcp);
-
- return 0;
-}
-
-static const struct i2c_device_id mcp230xx_id[] = {
- { "mcp23008", MCP_TYPE_008 },
- { "mcp23017", MCP_TYPE_017 },
- { "mcp23018", MCP_TYPE_018 },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
-
-static struct i2c_driver mcp230xx_driver = {
- .driver = {
- .name = "mcp230xx",
- .of_match_table = of_match_ptr(mcp23s08_i2c_of_match),
- },
- .probe = mcp230xx_probe,
- .id_table = mcp230xx_id,
-};
-
-static int __init mcp23s08_i2c_init(void)
-{
- return i2c_add_driver(&mcp230xx_driver);
-}
-
-static void mcp23s08_i2c_exit(void)
-{
- i2c_del_driver(&mcp230xx_driver);
-}
-
-#else
-
-static int __init mcp23s08_i2c_init(void) { return 0; }
-static void mcp23s08_i2c_exit(void) { }
-
-#endif /* CONFIG_I2C */
-
-/*----------------------------------------------------------------------*/
-
-#ifdef CONFIG_SPI_MASTER
-
-static int mcp23s08_probe(struct spi_device *spi)
-{
- struct mcp23s08_platform_data *pdata, local_pdata;
- unsigned addr;
- int chips = 0;
- struct mcp23s08_driver_data *data;
- int status, type;
- unsigned ngpio = 0;
- const struct of_device_id *match;
-
- match = of_match_device(of_match_ptr(mcp23s08_spi_of_match), &spi->dev);
- if (match)
- type = (int)(uintptr_t)match->data;
- else
- type = spi_get_device_id(spi)->driver_data;
-
- pdata = dev_get_platdata(&spi->dev);
- if (!pdata) {
- pdata = &local_pdata;
- pdata->base = -1;
-
- status = device_property_read_u32(&spi->dev,
- "microchip,spi-present-mask", &pdata->spi_present_mask);
- if (status) {
- status = device_property_read_u32(&spi->dev,
- "mcp,spi-present-mask",
- &pdata->spi_present_mask);
-
- if (status) {
- dev_err(&spi->dev, "missing spi-present-mask");
- return -ENODEV;
- }
- }
- }
-
- if (!pdata->spi_present_mask || pdata->spi_present_mask > 0xff) {
- dev_err(&spi->dev, "invalid spi-present-mask");
- return -ENODEV;
- }
-
- for (addr = 0; addr < MCP_MAX_DEV_PER_CS; addr++) {
- if (pdata->spi_present_mask & BIT(addr))
- chips++;
- }
-
- if (!chips)
- return -ENODEV;
-
- data = devm_kzalloc(&spi->dev,
- struct_size(data, chip, chips), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- spi_set_drvdata(spi, data);
-
- for (addr = 0; addr < MCP_MAX_DEV_PER_CS; addr++) {
- if (!(pdata->spi_present_mask & BIT(addr)))
- continue;
- chips--;
- data->mcp[addr] = &data->chip[chips];
- data->mcp[addr]->irq = spi->irq;
- data->mcp[addr]->irq_chip.name = dev_name(&spi->dev);
- data->mcp[addr]->irq_chip.irq_mask = mcp23s08_irq_mask;
- data->mcp[addr]->irq_chip.irq_unmask = mcp23s08_irq_unmask;
- data->mcp[addr]->irq_chip.irq_set_type = mcp23s08_irq_set_type;
- data->mcp[addr]->irq_chip.irq_bus_lock = mcp23s08_irq_bus_lock;
- data->mcp[addr]->irq_chip.irq_bus_sync_unlock =
- mcp23s08_irq_bus_unlock;
- status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
- 0x40 | (addr << 1), type,
- pdata->base, addr);
- if (status < 0)
- return status;
-
- if (pdata->base != -1)
- pdata->base += data->mcp[addr]->chip.ngpio;
- ngpio += data->mcp[addr]->chip.ngpio;
- }
- data->ngpio = ngpio;
-
- return 0;
-}
-
-static const struct spi_device_id mcp23s08_ids[] = {
- { "mcp23s08", MCP_TYPE_S08 },
- { "mcp23s17", MCP_TYPE_S17 },
- { "mcp23s18", MCP_TYPE_S18 },
- { },
-};
-MODULE_DEVICE_TABLE(spi, mcp23s08_ids);
-
-static struct spi_driver mcp23s08_driver = {
- .probe = mcp23s08_probe,
- .id_table = mcp23s08_ids,
- .driver = {
- .name = "mcp23s08",
- .of_match_table = of_match_ptr(mcp23s08_spi_of_match),
- },
-};
-
-static int __init mcp23s08_spi_init(void)
-{
- return spi_register_driver(&mcp23s08_driver);
-}
-
-static void mcp23s08_spi_exit(void)
-{
- spi_unregister_driver(&mcp23s08_driver);
-}
-
-#else
-
-static int __init mcp23s08_spi_init(void) { return 0; }
-static void mcp23s08_spi_exit(void) { }
-
-#endif /* CONFIG_SPI_MASTER */
-
-/*----------------------------------------------------------------------*/
-
-static int __init mcp23s08_init(void)
-{
- int ret;
-
- ret = mcp23s08_spi_init();
- if (ret)
- goto spi_fail;
-
- ret = mcp23s08_i2c_init();
- if (ret)
- goto i2c_fail;
-
- return 0;
-
- i2c_fail:
- mcp23s08_spi_exit();
- spi_fail:
- return ret;
-}
-/* register after spi/i2c postcore initcall and before
- * subsys initcalls that may rely on these GPIOs
- */
-subsys_initcall(mcp23s08_init);
-
-static void __exit mcp23s08_exit(void)
-{
- mcp23s08_spi_exit();
- mcp23s08_i2c_exit();
-}
-module_exit(mcp23s08_exit);
-
+EXPORT_SYMBOL_GPL(mcp23s08_probe_one);
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.h b/drivers/pinctrl/pinctrl-mcp23s08.h
new file mode 100644
index 000000000000..90dc27081a3c
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-mcp23s08.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* MCP23S08 SPI/I2C GPIO driver */
+
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/types.h>
+
+/*
+ * MCP types supported by driver
+ */
+#define MCP_TYPE_S08 1
+#define MCP_TYPE_S17 2
+#define MCP_TYPE_008 3
+#define MCP_TYPE_017 4
+#define MCP_TYPE_S18 5
+#define MCP_TYPE_018 6
+
+struct device;
+struct regmap;
+
+struct pinctrl_dev;
+
+struct mcp23s08 {
+ u8 addr;
+ bool irq_active_high;
+ bool reg_shift;
+
+ u16 irq_rise;
+ u16 irq_fall;
+ int irq;
+ bool irq_controller;
+ int cached_gpio;
+ /* lock protects regmap access with bypass/cache flags */
+ struct mutex lock;
+
+ struct gpio_chip chip;
+ struct irq_chip irq_chip;
+
+ struct regmap *regmap;
+ struct device *dev;
+
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_desc pinctrl_desc;
+};
+
+extern const struct regmap_config mcp23x08_regmap;
+extern const struct regmap_config mcp23x17_regmap;
+
+int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ unsigned int addr, unsigned int type, unsigned int base);
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_i2c.c b/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
new file mode 100644
index 000000000000..e0b001c8c08c
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* MCP23S08 I2C GPIO driver */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "pinctrl-mcp23s08.h"
+
+static int mcp230xx_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ unsigned int type = id->driver_data;
+ struct mcp23s08 *mcp;
+ int ret;
+
+ mcp = devm_kzalloc(dev, sizeof(*mcp), GFP_KERNEL);
+ if (!mcp)
+ return -ENOMEM;
+
+ switch (type) {
+ case MCP_TYPE_008:
+ mcp->regmap = devm_regmap_init_i2c(client, &mcp23x08_regmap);
+ mcp->reg_shift = 0;
+ mcp->chip.ngpio = 8;
+ mcp->chip.label = "mcp23008";
+ break;
+
+ case MCP_TYPE_017:
+ mcp->regmap = devm_regmap_init_i2c(client, &mcp23x17_regmap);
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23017";
+ break;
+
+ case MCP_TYPE_018:
+ mcp->regmap = devm_regmap_init_i2c(client, &mcp23x17_regmap);
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23018";
+ break;
+
+ default:
+ dev_err(dev, "invalid device type (%d)\n", type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR(mcp->regmap))
+ return PTR_ERR(mcp->regmap);
+
+ mcp->irq = client->irq;
+ mcp->pinctrl_desc.name = "mcp23xxx-pinctrl";
+
+ ret = mcp23s08_probe_one(mcp, dev, client->addr, type, -1);
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(client, mcp);
+
+ return 0;
+}
+
+static const struct i2c_device_id mcp230xx_id[] = {
+ { "mcp23008", MCP_TYPE_008 },
+ { "mcp23017", MCP_TYPE_017 },
+ { "mcp23018", MCP_TYPE_018 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
+
+static const struct of_device_id mcp23s08_i2c_of_match[] = {
+ {
+ .compatible = "microchip,mcp23008",
+ .data = (void *) MCP_TYPE_008,
+ },
+ {
+ .compatible = "microchip,mcp23017",
+ .data = (void *) MCP_TYPE_017,
+ },
+ {
+ .compatible = "microchip,mcp23018",
+ .data = (void *) MCP_TYPE_018,
+ },
+/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
+ {
+ .compatible = "mcp,mcp23008",
+ .data = (void *) MCP_TYPE_008,
+ },
+ {
+ .compatible = "mcp,mcp23017",
+ .data = (void *) MCP_TYPE_017,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp23s08_i2c_of_match);
+
+static struct i2c_driver mcp230xx_driver = {
+ .driver = {
+ .name = "mcp230xx",
+ .of_match_table = mcp23s08_i2c_of_match,
+ },
+ .probe = mcp230xx_probe,
+ .id_table = mcp230xx_id,
+};
+
+static int __init mcp23s08_i2c_init(void)
+{
+ return i2c_add_driver(&mcp230xx_driver);
+}
+
+/*
+ * Register after I²C postcore initcall and before
+ * subsys initcalls that may rely on these GPIOs.
+ */
+subsys_initcall(mcp23s08_i2c_init);
+
+static void mcp23s08_i2c_exit(void)
+{
+ i2c_del_driver(&mcp230xx_driver);
+}
+module_exit(mcp23s08_i2c_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
new file mode 100644
index 000000000000..e06fb885fd2b
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* MCP23S08 SPI GPIO driver */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "pinctrl-mcp23s08.h"
+
+#define MCP_MAX_DEV_PER_CS 8
+
+/*
+ * A given spi_device can represent up to eight mcp23sxx chips
+ * sharing the same chipselect but using different addresses
+ * (e.g. chips #0 and #3 might be populated, but not #1 or #2).
+ * Driver data holds all the per-chip data.
+ */
+struct mcp23s08_driver_data {
+ unsigned ngpio;
+ struct mcp23s08 *mcp[8];
+ struct mcp23s08 chip[];
+};
+
+static int mcp23sxx_spi_write(void *context, const void *data, size_t count)
+{
+ struct mcp23s08 *mcp = context;
+ struct spi_device *spi = to_spi_device(mcp->dev);
+ struct spi_message m;
+ struct spi_transfer t[2] = { { .tx_buf = &mcp->addr, .len = 1, },
+ { .tx_buf = data, .len = count, }, };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int mcp23sxx_spi_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ struct mcp23s08 *mcp = context;
+ struct spi_device *spi = to_spi_device(mcp->dev);
+ struct spi_message m;
+ struct spi_transfer t[3] = { { .tx_buf = &mcp->addr, .len = 1, },
+ { .tx_buf = reg, .len = reg_size, },
+ { .tx_buf = val, .len = val_size, }, };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+ spi_message_add_tail(&t[2], &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int mcp23sxx_spi_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct mcp23s08 *mcp = context;
+ struct spi_device *spi = to_spi_device(mcp->dev);
+ u8 tx[2];
+
+ if (reg_size != 1)
+ return -EINVAL;
+
+ tx[0] = mcp->addr | 0x01;
+ tx[1] = *((u8 *) reg);
+
+ return spi_write_then_read(spi, tx, sizeof(tx), val, val_size);
+}
+
+static const struct regmap_bus mcp23sxx_spi_regmap = {
+ .write = mcp23sxx_spi_write,
+ .gather_write = mcp23sxx_spi_gather_write,
+ .read = mcp23sxx_spi_read,
+};
+
+static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
+ unsigned int addr, unsigned int type)
+{
+ const struct regmap_config *config;
+ struct regmap_config *copy;
+ const char *name;
+
+ switch (type) {
+ case MCP_TYPE_S08:
+ mcp->reg_shift = 0;
+ mcp->chip.ngpio = 8;
+ mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s08.%d", addr);
+
+ config = &mcp23x08_regmap;
+ name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
+ break;
+
+ case MCP_TYPE_S17:
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = devm_kasprintf(dev, GFP_KERNEL, "mcp23s17.%d", addr);
+
+ config = &mcp23x17_regmap;
+ name = devm_kasprintf(dev, GFP_KERNEL, "%d", addr);
+ break;
+
+ case MCP_TYPE_S18:
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23s18";
+
+ config = &mcp23x17_regmap;
+ name = config->name;
+ break;
+
+ default:
+ dev_err(dev, "invalid device type (%d)\n", type);
+ return -EINVAL;
+ }
+
+ copy = devm_kmemdup(dev, &config, sizeof(config), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+
+ copy->name = name;
+
+ mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, copy);
+ if (IS_ERR(mcp->regmap))
+ return PTR_ERR(mcp->regmap);
+
+ return 0;
+}
+
+static int mcp23s08_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mcp23s08_driver_data *data;
+ unsigned long spi_present_mask;
+ const void *match;
+ unsigned int addr;
+ unsigned int ngpio = 0;
+ int chips;
+ int type;
+ int ret;
+ u32 v;
+
+ match = device_get_match_data(dev);
+ if (match)
+ type = (int)(uintptr_t)match;
+ else
+ type = spi_get_device_id(spi)->driver_data;
+
+ ret = device_property_read_u32(dev, "microchip,spi-present-mask", &v);
+ if (ret) {
+ ret = device_property_read_u32(dev, "mcp,spi-present-mask", &v);
+ if (ret) {
+ dev_err(dev, "missing spi-present-mask");
+ return ret;
+ }
+ }
+ spi_present_mask = v;
+
+ if (!spi_present_mask || spi_present_mask >= BIT(MCP_MAX_DEV_PER_CS)) {
+ dev_err(dev, "invalid spi-present-mask");
+ return -ENODEV;
+ }
+
+ chips = hweight_long(spi_present_mask);
+
+ data = devm_kzalloc(dev, struct_size(data, chip, chips), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, data);
+
+ for_each_set_bit(addr, &spi_present_mask, MCP_MAX_DEV_PER_CS) {
+ data->mcp[addr] = &data->chip[--chips];
+ data->mcp[addr]->irq = spi->irq;
+
+ ret = mcp23s08_spi_regmap_init(data->mcp[addr], dev, addr, type);
+ if (ret)
+ return ret;
+
+ data->mcp[addr]->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
+ "mcp23xxx-pinctrl.%d",
+ addr);
+ if (!data->mcp[addr]->pinctrl_desc.name)
+ return -ENOMEM;
+
+ ret = mcp23s08_probe_one(data->mcp[addr], dev, 0x40 | (addr << 1), type, -1);
+ if (ret < 0)
+ return ret;
+
+ ngpio += data->mcp[addr]->chip.ngpio;
+ }
+ data->ngpio = ngpio;
+
+ return 0;
+}
+
+static const struct spi_device_id mcp23s08_ids[] = {
+ { "mcp23s08", MCP_TYPE_S08 },
+ { "mcp23s17", MCP_TYPE_S17 },
+ { "mcp23s18", MCP_TYPE_S18 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, mcp23s08_ids);
+
+static const struct of_device_id mcp23s08_spi_of_match[] = {
+ {
+ .compatible = "microchip,mcp23s08",
+ .data = (void *) MCP_TYPE_S08,
+ },
+ {
+ .compatible = "microchip,mcp23s17",
+ .data = (void *) MCP_TYPE_S17,
+ },
+ {
+ .compatible = "microchip,mcp23s18",
+ .data = (void *) MCP_TYPE_S18,
+ },
+/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
+ {
+ .compatible = "mcp,mcp23s08",
+ .data = (void *) MCP_TYPE_S08,
+ },
+ {
+ .compatible = "mcp,mcp23s17",
+ .data = (void *) MCP_TYPE_S17,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp23s08_spi_of_match);
+
+static struct spi_driver mcp23s08_driver = {
+ .probe = mcp23s08_probe,
+ .id_table = mcp23s08_ids,
+ .driver = {
+ .name = "mcp23s08",
+ .of_match_table = mcp23s08_spi_of_match,
+ },
+};
+
+static int __init mcp23s08_spi_init(void)
+{
+ return spi_register_driver(&mcp23s08_driver);
+}
+
+/*
+ * Register after SPI postcore initcall and before
+ * subsys initcalls that may rely on these GPIOs.
+ */
+subsys_initcall(mcp23s08_spi_init);
+
+static void mcp23s08_spi_exit(void)
+{
+ spi_unregister_driver(&mcp23s08_driver);
+}
+module_exit(mcp23s08_spi_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index ed8eac6c1494..95c225bc7572 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -46,32 +46,15 @@ enum {
FUNC_IRQ0_OUT,
FUNC_IRQ1_IN,
FUNC_IRQ1_OUT,
- FUNC_MIIM1,
- FUNC_MIIM2,
+ FUNC_MIIM,
FUNC_PCI_WAKE,
FUNC_PTP0,
FUNC_PTP1,
FUNC_PTP2,
FUNC_PTP3,
FUNC_PWM,
- FUNC_RECO_CLK0,
- FUNC_RECO_CLK1,
- FUNC_SFP0,
- FUNC_SFP1,
- FUNC_SFP2,
- FUNC_SFP3,
- FUNC_SFP4,
- FUNC_SFP5,
- FUNC_SFP6,
- FUNC_SFP7,
- FUNC_SFP8,
- FUNC_SFP9,
- FUNC_SFP10,
- FUNC_SFP11,
- FUNC_SFP12,
- FUNC_SFP13,
- FUNC_SFP14,
- FUNC_SFP15,
+ FUNC_RECO_CLK,
+ FUNC_SFP,
FUNC_SG0,
FUNC_SG1,
FUNC_SG2,
@@ -92,32 +75,15 @@ static const char *const ocelot_function_names[] = {
[FUNC_IRQ0_OUT] = "irq0_out",
[FUNC_IRQ1_IN] = "irq1_in",
[FUNC_IRQ1_OUT] = "irq1_out",
- [FUNC_MIIM1] = "miim1",
- [FUNC_MIIM2] = "miim2",
+ [FUNC_MIIM] = "miim",
[FUNC_PCI_WAKE] = "pci_wake",
[FUNC_PTP0] = "ptp0",
[FUNC_PTP1] = "ptp1",
[FUNC_PTP2] = "ptp2",
[FUNC_PTP3] = "ptp3",
[FUNC_PWM] = "pwm",
- [FUNC_RECO_CLK0] = "reco_clk0",
- [FUNC_RECO_CLK1] = "reco_clk1",
- [FUNC_SFP0] = "sfp0",
- [FUNC_SFP1] = "sfp1",
- [FUNC_SFP2] = "sfp2",
- [FUNC_SFP3] = "sfp3",
- [FUNC_SFP4] = "sfp4",
- [FUNC_SFP5] = "sfp5",
- [FUNC_SFP6] = "sfp6",
- [FUNC_SFP7] = "sfp7",
- [FUNC_SFP8] = "sfp8",
- [FUNC_SFP9] = "sfp9",
- [FUNC_SFP10] = "sfp10",
- [FUNC_SFP11] = "sfp11",
- [FUNC_SFP12] = "sfp12",
- [FUNC_SFP13] = "sfp13",
- [FUNC_SFP14] = "sfp14",
- [FUNC_SFP15] = "sfp15",
+ [FUNC_RECO_CLK] = "reco_clk",
+ [FUNC_SFP] = "sfp",
[FUNC_SG0] = "sg0",
[FUNC_SG1] = "sg1",
[FUNC_SG2] = "sg2",
@@ -168,18 +134,18 @@ OCELOT_P(6, UART, TWI_SCL_M, NONE);
OCELOT_P(7, UART, TWI_SCL_M, NONE);
OCELOT_P(8, SI, TWI_SCL_M, IRQ0_OUT);
OCELOT_P(9, SI, TWI_SCL_M, IRQ1_OUT);
-OCELOT_P(10, PTP2, TWI_SCL_M, SFP0);
-OCELOT_P(11, PTP3, TWI_SCL_M, SFP1);
-OCELOT_P(12, UART2, TWI_SCL_M, SFP2);
-OCELOT_P(13, UART2, TWI_SCL_M, SFP3);
-OCELOT_P(14, MIIM1, TWI_SCL_M, SFP4);
-OCELOT_P(15, MIIM1, TWI_SCL_M, SFP5);
+OCELOT_P(10, PTP2, TWI_SCL_M, SFP);
+OCELOT_P(11, PTP3, TWI_SCL_M, SFP);
+OCELOT_P(12, UART2, TWI_SCL_M, SFP);
+OCELOT_P(13, UART2, TWI_SCL_M, SFP);
+OCELOT_P(14, MIIM, TWI_SCL_M, SFP);
+OCELOT_P(15, MIIM, TWI_SCL_M, SFP);
OCELOT_P(16, TWI, NONE, SI);
OCELOT_P(17, TWI, TWI_SCL_M, SI);
OCELOT_P(18, PTP0, TWI_SCL_M, NONE);
OCELOT_P(19, PTP1, TWI_SCL_M, NONE);
-OCELOT_P(20, RECO_CLK0, TACHO, NONE);
-OCELOT_P(21, RECO_CLK1, PWM, NONE);
+OCELOT_P(20, RECO_CLK, TACHO, TWI_SCL_M);
+OCELOT_P(21, RECO_CLK, PWM, TWI_SCL_M);
#define OCELOT_PIN(n) { \
.number = n, \
@@ -264,22 +230,22 @@ JAGUAR2_P(40, NONE, TWI_SCL_M);
JAGUAR2_P(41, NONE, TWI_SCL_M);
JAGUAR2_P(42, NONE, TWI_SCL_M);
JAGUAR2_P(43, NONE, TWI_SCL_M);
-JAGUAR2_P(44, NONE, SFP8);
-JAGUAR2_P(45, NONE, SFP9);
-JAGUAR2_P(46, NONE, SFP10);
-JAGUAR2_P(47, NONE, SFP11);
-JAGUAR2_P(48, SFP0, NONE);
-JAGUAR2_P(49, SFP1, SI);
-JAGUAR2_P(50, SFP2, SI);
-JAGUAR2_P(51, SFP3, SI);
-JAGUAR2_P(52, SFP4, NONE);
-JAGUAR2_P(53, SFP5, NONE);
-JAGUAR2_P(54, SFP6, NONE);
-JAGUAR2_P(55, SFP7, NONE);
-JAGUAR2_P(56, MIIM1, SFP12);
-JAGUAR2_P(57, MIIM1, SFP13);
-JAGUAR2_P(58, MIIM2, SFP14);
-JAGUAR2_P(59, MIIM2, SFP15);
+JAGUAR2_P(44, NONE, SFP);
+JAGUAR2_P(45, NONE, SFP);
+JAGUAR2_P(46, NONE, SFP);
+JAGUAR2_P(47, NONE, SFP);
+JAGUAR2_P(48, SFP, NONE);
+JAGUAR2_P(49, SFP, SI);
+JAGUAR2_P(50, SFP, SI);
+JAGUAR2_P(51, SFP, SI);
+JAGUAR2_P(52, SFP, NONE);
+JAGUAR2_P(53, SFP, NONE);
+JAGUAR2_P(54, SFP, NONE);
+JAGUAR2_P(55, SFP, NONE);
+JAGUAR2_P(56, MIIM, SFP);
+JAGUAR2_P(57, MIIM, SFP);
+JAGUAR2_P(58, MIIM, SFP);
+JAGUAR2_P(59, MIIM, SFP);
JAGUAR2_P(60, NONE, NONE);
JAGUAR2_P(61, NONE, NONE);
JAGUAR2_P(62, NONE, NONE);
@@ -714,11 +680,12 @@ static void ocelot_irq_handler(struct irq_desc *desc)
struct irq_chip *parent_chip = irq_desc_get_chip(desc);
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ unsigned int id_reg = OCELOT_GPIO_INTR_IDENT * info->stride;
unsigned int reg = 0, irq, i;
unsigned long irqs;
for (i = 0; i < info->stride; i++) {
- regmap_read(info->map, OCELOT_GPIO_INTR_IDENT + 4 * i, &reg);
+ regmap_read(info->map, id_reg + 4 * i, &reg);
if (!reg)
continue;
@@ -751,21 +718,21 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
gc->of_node = info->dev->of_node;
gc->label = "ocelot-gpio";
- irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (irq <= 0)
- return irq;
-
- girq = &gc->irq;
- girq->chip = &ocelot_irqchip;
- girq->parent_handler = ocelot_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->parents[0] = irq;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
+ irq = irq_of_parse_and_map(gc->of_node, 0);
+ if (irq) {
+ girq = &gc->irq;
+ girq->chip = &ocelot_irqchip;
+ girq->parent_handler = ocelot_irq_handler;
+ girq->num_parents = 1;
+ girq->parents = devm_kcalloc(&pdev->dev, 1,
+ sizeof(*girq->parents),
+ GFP_KERNEL);
+ if (!girq->parents)
+ return -ENOMEM;
+ girq->parents[0] = irq;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_edge_irq;
+ }
ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret)
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index cccbe072274e..c6f4229eb106 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -73,7 +73,7 @@ struct rk805_pctrl_info {
int num_pin_groups;
const struct pinctrl_pin_desc *pins;
unsigned int num_pins;
- struct rk805_pin_config *pin_cfg;
+ const struct rk805_pin_config *pin_cfg;
};
enum rk805_pinmux_option {
@@ -121,7 +121,7 @@ static const struct rk805_pin_group rk805_pin_groups[] = {
#define RK805_GPIO0_VAL_MSK BIT(0)
#define RK805_GPIO1_VAL_MSK BIT(1)
-static struct rk805_pin_config rk805_gpio_cfgs[] = {
+static const struct rk805_pin_config rk805_gpio_cfgs[] = {
{
.reg = RK805_OUT_REG,
.val_msk = RK805_GPIO0_VAL_MSK,
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 098951346339..c07324d1f265 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -508,8 +508,8 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
}
map_num += grp->npins;
- new_map = devm_kcalloc(pctldev->dev, map_num, sizeof(*new_map),
- GFP_KERNEL);
+
+ new_map = kcalloc(map_num, sizeof(*new_map), GFP_KERNEL);
if (!new_map)
return -ENOMEM;
@@ -519,7 +519,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create mux map */
parent = of_get_parent(np);
if (!parent) {
- devm_kfree(pctldev->dev, new_map);
+ kfree(new_map);
return -EINVAL;
}
new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
@@ -546,6 +546,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
static void rockchip_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
+ kfree(map);
}
static const struct pinctrl_ops rockchip_pctrl_ops = {
@@ -2940,14 +2941,14 @@ static int rockchip_pinctrl_parse_dt(struct platform_device *pdev,
sizeof(struct rockchip_pmx_func),
GFP_KERNEL);
if (!info->functions)
- return -EINVAL;
+ return -ENOMEM;
info->groups = devm_kcalloc(dev,
info->ngroups,
sizeof(struct rockchip_pin_group),
GFP_KERNEL);
if (!info->groups)
- return -EINVAL;
+ return -ENOMEM;
i = 0;
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index da2d8365c690..38a14bbced5f 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -93,7 +93,7 @@ struct rza1_bidir_entry {
};
/**
- * rza1_swio_pin - describe a single pin that needs bidir flag applied.
+ * rza1_swio_pin - describe a single pin that needs swio flag applied.
*/
struct rza1_swio_pin {
u16 pin: 4;
@@ -418,7 +418,7 @@ static const struct rza1_bidir_entry rza1l_bidir_entries[RZA1_NPORTS] = {
};
static const struct rza1_swio_entry rza1l_swio_entries[] = {
- [0] = { ARRAY_SIZE(rza1h_swio_pins), rza1h_swio_pins },
+ [0] = { ARRAY_SIZE(rza1l_swio_pins), rza1l_swio_pins },
};
/* RZ/A1L (r7s72102x) pinmux flags table */
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 60100b45f5e5..1aae803c12cd 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -288,7 +288,7 @@ static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
struct pinctrl_gpio_range *range;
enum pin_config_param param;
u32 arg;
- int dir, i, ret;
+ int i, ret;
range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
if (!range) {
@@ -296,10 +296,6 @@ static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -EINVAL;
}
- dir = stmfx_gpio_get_direction(&pctl->gpio_chip, pin);
- if (dir < 0)
- return dir;
-
for (i = 0; i < num_configs; i++) {
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 6e74bd87d959..708bc91862fe 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -988,7 +988,7 @@ static unsigned int sx150x_maybe_swizzle(struct sx150x_pinctrl *pctl,
/*
* In order to mask the differences between 16 and 8 bit expander
* devices we set up a sligthly ficticious regmap that pretends to be
- * a set of 32-bit (to accomodate RegSenseLow/RegSenseHigh
+ * a set of 32-bit (to accommodate RegSenseLow/RegSenseHigh
* pair/quartet) registers and transparently reconstructs those
* registers via multiple I2C/SMBus reads
*
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index bddf2c5dd3bf..eab029a21643 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -425,15 +425,6 @@ int pxa2xx_pinctrl_init(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(pxa2xx_pinctrl_init);
-int pxa2xx_pinctrl_exit(struct platform_device *pdev)
-{
- struct pxa_pinctrl *pctl = platform_get_drvdata(pdev);
-
- pinctrl_unregister(pctl->pctl_dev);
- return 0;
-}
-EXPORT_SYMBOL_GPL(pxa2xx_pinctrl_exit);
-
MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
MODULE_DESCRIPTION("Marvell PXA2xx pinctrl driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index c5d4428f1f94..ff1ee159dca2 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -216,4 +216,13 @@ config PINCTRL_SM8150
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM8150 platform.
+config PINCTRL_SM8250
+ tristate "Qualcomm Technologies Inc SM8250 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM8250 platform.
+
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index d9e09045a776..061ec9fb659b 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_PINCTRL_SC7180) += pinctrl-sc7180.o
obj-$(CONFIG_PINCTRL_SDM660) += pinctrl-sdm660.o
obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
+obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 85858c1d56d0..83b7d64bc4c1 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -23,7 +23,6 @@
#include <linux/pm.h>
#include <linux/log2.h>
#include <linux/qcom_scm.h>
-#include <linux/io.h>
#include <linux/soc/qcom/irq.h>
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
new file mode 100644
index 000000000000..a660f1274b66
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -0,0 +1,1361 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const char * const sm8250_tiles[] = {
+ "west",
+ "south",
+ "north",
+};
+
+enum {
+ WEST,
+ SOUTH,
+ NORTH,
+};
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = REG_SIZE * id + 0x4, \
+ .intr_cfg_reg = REG_SIZE * id + 0x8, \
+ .intr_status_reg = REG_SIZE * id + 0xc, \
+ .intr_target_reg = REG_SIZE * id + 0x8, \
+ .tile = _tile, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .tile = NORTH, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .tile = SOUTH, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sm8250_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "SDC2_CLK"),
+ PINCTRL_PIN(181, "SDC2_CMD"),
+ PINCTRL_PIN(182, "SDC2_DATA"),
+ PINCTRL_PIN(183, "UFS_RESET"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+
+static const unsigned int ufs_reset_pins[] = { 180 };
+static const unsigned int sdc2_clk_pins[] = { 181 };
+static const unsigned int sdc2_cmd_pins[] = { 182 };
+static const unsigned int sdc2_data_pins[] = { 183 };
+
+enum sm8250_functions {
+ msm_mux_aoss_cti,
+ msm_mux_atest,
+ msm_mux_audio_ref,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cci_timer3,
+ msm_mux_cci_timer4,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_dp_hot,
+ msm_mux_dp_lcd,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gpio,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_lpass_slimbus,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mi2s0_data0,
+ msm_mux_mi2s0_data1,
+ msm_mux_mi2s0_sck,
+ msm_mux_mi2s0_ws,
+ msm_mux_mi2s1_data0,
+ msm_mux_mi2s1_data1,
+ msm_mux_mi2s1_sck,
+ msm_mux_mi2s1_ws,
+ msm_mux_mi2s2_data0,
+ msm_mux_mi2s2_data1,
+ msm_mux_mi2s2_sck,
+ msm_mux_mi2s2_ws,
+ msm_mux_pci_e0,
+ msm_mux_pci_e1,
+ msm_mux_pci_e2,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist,
+ msm_mux_pll_bypassnl,
+ msm_mux_pll_clk,
+ msm_mux_pll_reset,
+ msm_mux_pri_mi2s,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qspi0,
+ msm_mux_qspi1,
+ msm_mux_qspi2,
+ msm_mux_qspi3,
+ msm_mux_qspi_clk,
+ msm_mux_qspi_cs,
+ msm_mux_qup0,
+ msm_mux_qup1,
+ msm_mux_qup10,
+ msm_mux_qup11,
+ msm_mux_qup12,
+ msm_mux_qup13,
+ msm_mux_qup14,
+ msm_mux_qup15,
+ msm_mux_qup16,
+ msm_mux_qup17,
+ msm_mux_qup18,
+ msm_mux_qup19,
+ msm_mux_qup2,
+ msm_mux_qup3,
+ msm_mux_qup4,
+ msm_mux_qup5,
+ msm_mux_qup6,
+ msm_mux_qup7,
+ msm_mux_qup8,
+ msm_mux_qup9,
+ msm_mux_qup_l4,
+ msm_mux_qup_l5,
+ msm_mux_qup_l6,
+ msm_mux_sd_write,
+ msm_mux_sdc40,
+ msm_mux_sdc41,
+ msm_mux_sdc42,
+ msm_mux_sdc43,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_sec_mi2s,
+ msm_mux_sp_cmu,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_tsif0_clk,
+ msm_mux_tsif0_data,
+ msm_mux_tsif0_en,
+ msm_mux_tsif0_error,
+ msm_mux_tsif0_sync,
+ msm_mux_tsif1_clk,
+ msm_mux_tsif1_data,
+ msm_mux_tsif1_en,
+ msm_mux_tsif1_error,
+ msm_mux_tsif1_sync,
+ msm_mux_usb2phy_ac,
+ msm_mux_usb_phy,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const tsif1_data_groups[] = {
+ "gpio75",
+};
+static const char * const sdc41_groups[] = {
+ "gpio75",
+};
+static const char * const tsif1_sync_groups[] = {
+ "gpio76",
+};
+static const char * const sdc40_groups[] = {
+ "gpio76",
+};
+static const char * const aoss_cti_groups[] = {
+ "gpio77",
+};
+static const char * const phase_flag_groups[] = {
+ "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50", "gpio51",
+ "gpio69", "gpio70", "gpio71", "gpio72", "gpio73", "gpio74", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio103", "gpio104", "gpio115", "gpio116", "gpio117", "gpio118",
+ "gpio119", "gpio120", "gpio122", "gpio124", "gpio125",
+};
+static const char * const sd_write_groups[] = {
+ "gpio78",
+};
+static const char * const pci_e0_groups[] = {
+ "gpio79", "gpio80",
+};
+static const char * const pci_e1_groups[] = {
+ "gpio82", "gpio83",
+};
+static const char * const pci_e2_groups[] = {
+ "gpio85", "gpio86",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio85",
+};
+static const char * const atest_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27", "gpio32", "gpio33", "gpio34",
+ "gpio35", "gpio36", "gpio37", "gpio85", "gpio86", "gpio87", "gpio88",
+ "gpio89",
+};
+static const char * const tgu_ch3_groups[] = {
+ "gpio86",
+};
+static const char * const tsif1_error_groups[] = {
+ "gpio90",
+};
+static const char * const tgu_ch1_groups[] = {
+ "gpio90",
+};
+static const char * const tsif0_error_groups[] = {
+ "gpio91",
+};
+static const char * const tgu_ch2_groups[] = {
+ "gpio91",
+};
+static const char * const cam_mclk_groups[] = {
+ "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", "gpio99", "gpio100",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio94", "gpio95", "gpio143", "gpio144",
+};
+static const char * const pll_bypassnl_groups[] = {
+ "gpio96",
+};
+static const char * const pll_reset_groups[] = {
+ "gpio97",
+};
+static const char * const cci_i2c_groups[] = {
+ "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", "gpio106",
+ "gpio107", "gpio108",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", "gpio99", "gpio100",
+ "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", "gpio106",
+ "gpio107", "gpio108", "gpio109", "gpio110", "gpio111", "gpio160",
+ "gpio161", "gpio162", "gpio163", "gpio164", "gpio165", "gpio166",
+ "gpio167", "gpio168", "gpio169", "gpio170", "gpio171", "gpio172",
+ "gpio173", "gpio174", "gpio175", "gpio176", "gpio177",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio106", "gpio136",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio107", "gpio137",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio108", "gpio138",
+};
+static const char * const cci_timer0_groups[] = {
+ "gpio109",
+};
+static const char * const cci_timer1_groups[] = {
+ "gpio110",
+};
+static const char * const cci_timer2_groups[] = {
+ "gpio111",
+};
+static const char * const cci_timer3_groups[] = {
+ "gpio112",
+};
+static const char * const cci_async_groups[] = {
+ "gpio112", "gpio113", "gpio114",
+};
+static const char * const cci_timer4_groups[] = {
+ "gpio113",
+};
+static const char * const qup2_groups[] = {
+ "gpio115", "gpio116", "gpio117", "gpio118",
+};
+static const char * const qup3_groups[] = {
+ "gpio119", "gpio120", "gpio121", "gpio122",
+};
+static const char * const tsense_pwm1_groups[] = {
+ "gpio123",
+};
+static const char * const tsense_pwm2_groups[] = {
+ "gpio123",
+};
+static const char * const qup9_groups[] = {
+ "gpio125", "gpio126", "gpio127", "gpio128",
+};
+static const char * const qup10_groups[] = {
+ "gpio129", "gpio130", "gpio131", "gpio132",
+};
+static const char * const mi2s2_sck_groups[] = {
+ "gpio133",
+};
+static const char * const mi2s2_data0_groups[] = {
+ "gpio134",
+};
+static const char * const mi2s2_ws_groups[] = {
+ "gpio135",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio136",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio137",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio137",
+};
+static const char * const mi2s2_data1_groups[] = {
+ "gpio137",
+};
+static const char * const mi2s0_sck_groups[] = {
+ "gpio138",
+};
+static const char * const mi2s0_data0_groups[] = {
+ "gpio139",
+};
+static const char * const mi2s0_data1_groups[] = {
+ "gpio140",
+};
+static const char * const mi2s0_ws_groups[] = {
+ "gpio141",
+};
+static const char * const lpass_slimbus_groups[] = {
+ "gpio142", "gpio143", "gpio144", "gpio145",
+};
+static const char * const mi2s1_sck_groups[] = {
+ "gpio142",
+};
+static const char * const mi2s1_data0_groups[] = {
+ "gpio143",
+};
+static const char * const mi2s1_data1_groups[] = {
+ "gpio144",
+};
+static const char * const mi2s1_ws_groups[] = {
+ "gpio145",
+};
+static const char * const cri_trng0_groups[] = {
+ "gpio159",
+};
+static const char * const cri_trng1_groups[] = {
+ "gpio160",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio161",
+};
+static const char * const sp_cmu_groups[] = {
+ "gpio162",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio163",
+};
+static const char * const qup19_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152",
+ "gpio153", "gpio154", "gpio155", "gpio156", "gpio157", "gpio158",
+ "gpio159", "gpio160", "gpio161", "gpio162", "gpio163", "gpio164",
+ "gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170",
+ "gpio171", "gpio172", "gpio173", "gpio174", "gpio175", "gpio176",
+ "gpio177", "gpio178", "gpio179",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio0", "gpio2", "gpio2", "gpio44", "gpio45", "gpio46", "gpio92",
+ "gpio93",
+};
+static const char * const qup1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const ibi_i3c_groups[] = {
+ "gpio4", "gpio5", "gpio24", "gpio25", "gpio28", "gpio29", "gpio40",
+ "gpio41",
+};
+static const char * const qup_l4_groups[] = {
+ "gpio6", "gpio14", "gpio46", "gpio123",
+};
+static const char * const qup_l5_groups[] = {
+ "gpio7", "gpio15", "gpio47", "gpio124",
+};
+static const char * const qup4_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const qup5_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const qup6_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const qup7_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+static const char * const qup8_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+static const char * const qup0_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+};
+static const char * const qup12_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+static const char * const qup13_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char * const qup14_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+};
+static const char * const ddr_pxi3_groups[] = {
+ "gpio40", "gpio43",
+};
+static const char * const ddr_pxi1_groups[] = {
+ "gpio41", "gpio42",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio42",
+};
+static const char * const qup15_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio44",
+};
+static const char * const qup16_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51",
+};
+static const char * const qup17_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio52", "gpio53",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio54",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio55",
+};
+static const char * const ddr_pxi2_groups[] = {
+ "gpio55", "gpio56",
+};
+static const char * const qup18_groups[] = {
+ "gpio56", "gpio57", "gpio58", "gpio59",
+};
+static const char * const qup11_groups[] = {
+ "gpio60", "gpio61", "gpio62", "gpio63",
+};
+static const char * const usb2phy_ac_groups[] = {
+ "gpio64", "gpio90",
+};
+static const char * const qup_l6_groups[] = {
+ "gpio64", "gpio77", "gpio92", "gpio93",
+};
+static const char * const usb_phy_groups[] = {
+ "gpio65",
+};
+static const char * const pll_clk_groups[] = {
+ "gpio65",
+};
+static const char * const mdp_vsync_groups[] = {
+ "gpio66", "gpio67", "gpio68", "gpio122", "gpio124",
+};
+static const char * const dp_lcd_groups[] = {
+ "gpio67",
+};
+static const char * const dp_hot_groups[] = {
+ "gpio68",
+};
+static const char * const qspi_cs_groups[] = {
+ "gpio69", "gpio75",
+};
+static const char * const tsif0_clk_groups[] = {
+ "gpio69",
+};
+static const char * const qspi0_groups[] = {
+ "gpio70",
+};
+static const char * const tsif0_en_groups[] = {
+ "gpio70",
+};
+static const char * const mdp_vsync0_groups[] = {
+ "gpio70",
+};
+static const char * const mdp_vsync1_groups[] = {
+ "gpio70",
+};
+static const char * const mdp_vsync2_groups[] = {
+ "gpio70",
+};
+static const char * const mdp_vsync3_groups[] = {
+ "gpio70",
+};
+static const char * const qspi1_groups[] = {
+ "gpio71",
+};
+static const char * const tsif0_data_groups[] = {
+ "gpio71",
+};
+static const char * const sdc4_cmd_groups[] = {
+ "gpio71",
+};
+static const char * const qspi2_groups[] = {
+ "gpio72",
+};
+static const char * const tsif0_sync_groups[] = {
+ "gpio72",
+};
+static const char * const sdc43_groups[] = {
+ "gpio72",
+};
+static const char * const qspi_clk_groups[] = {
+ "gpio73",
+};
+static const char * const tsif1_clk_groups[] = {
+ "gpio73",
+};
+static const char * const sdc4_clk_groups[] = {
+ "gpio73",
+};
+static const char * const qspi3_groups[] = {
+ "gpio74",
+};
+static const char * const tsif1_en_groups[] = {
+ "gpio74",
+};
+static const char * const sdc42_groups[] = {
+ "gpio74",
+};
+
+static const struct msm_function sm8250_functions[] = {
+ FUNCTION(aoss_cti),
+ FUNCTION(atest),
+ FUNCTION(audio_ref),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(dp_hot),
+ FUNCTION(dp_lcd),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gpio),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(lpass_slimbus),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0),
+ FUNCTION(mdp_vsync1),
+ FUNCTION(mdp_vsync2),
+ FUNCTION(mdp_vsync3),
+ FUNCTION(mi2s0_data0),
+ FUNCTION(mi2s0_data1),
+ FUNCTION(mi2s0_sck),
+ FUNCTION(mi2s0_ws),
+ FUNCTION(mi2s1_data0),
+ FUNCTION(mi2s1_data1),
+ FUNCTION(mi2s1_sck),
+ FUNCTION(mi2s1_ws),
+ FUNCTION(mi2s2_data0),
+ FUNCTION(mi2s2_data1),
+ FUNCTION(mi2s2_sck),
+ FUNCTION(mi2s2_ws),
+ FUNCTION(pci_e0),
+ FUNCTION(pci_e1),
+ FUNCTION(pci_e2),
+ FUNCTION(phase_flag),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_bypassnl),
+ FUNCTION(pll_clk),
+ FUNCTION(pll_reset),
+ FUNCTION(pri_mi2s),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qspi0),
+ FUNCTION(qspi1),
+ FUNCTION(qspi2),
+ FUNCTION(qspi3),
+ FUNCTION(qspi_clk),
+ FUNCTION(qspi_cs),
+ FUNCTION(qup0),
+ FUNCTION(qup1),
+ FUNCTION(qup10),
+ FUNCTION(qup11),
+ FUNCTION(qup12),
+ FUNCTION(qup13),
+ FUNCTION(qup14),
+ FUNCTION(qup15),
+ FUNCTION(qup16),
+ FUNCTION(qup17),
+ FUNCTION(qup18),
+ FUNCTION(qup19),
+ FUNCTION(qup2),
+ FUNCTION(qup3),
+ FUNCTION(qup4),
+ FUNCTION(qup5),
+ FUNCTION(qup6),
+ FUNCTION(qup7),
+ FUNCTION(qup8),
+ FUNCTION(qup9),
+ FUNCTION(qup_l4),
+ FUNCTION(qup_l5),
+ FUNCTION(qup_l6),
+ FUNCTION(sd_write),
+ FUNCTION(sdc40),
+ FUNCTION(sdc41),
+ FUNCTION(sdc42),
+ FUNCTION(sdc43),
+ FUNCTION(sdc4_clk),
+ FUNCTION(sdc4_cmd),
+ FUNCTION(sec_mi2s),
+ FUNCTION(sp_cmu),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tgu_ch2),
+ FUNCTION(tgu_ch3),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(tsif0_clk),
+ FUNCTION(tsif0_data),
+ FUNCTION(tsif0_en),
+ FUNCTION(tsif0_error),
+ FUNCTION(tsif0_sync),
+ FUNCTION(tsif1_clk),
+ FUNCTION(tsif1_data),
+ FUNCTION(tsif1_en),
+ FUNCTION(tsif1_error),
+ FUNCTION(tsif1_sync),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(usb_phy),
+ FUNCTION(vsense_trigger),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm8250_groups[] = {
+ [0] = PINGROUP(0, SOUTH, qup19, qdss_cti, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, SOUTH, qup19, _, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, SOUTH, qup19, qdss_cti, qdss_cti, _, _, _, _, _, _),
+ [3] = PINGROUP(3, SOUTH, qup19, _, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, NORTH, qup1, ibi_i3c, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, NORTH, qup1, ibi_i3c, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, NORTH, qup1, qup_l4, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, NORTH, qup1, qup_l5, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, NORTH, qup4, _, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, NORTH, qup4, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, NORTH, qup4, _, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, NORTH, qup4, _, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, NORTH, qup5, _, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, NORTH, qup5, _, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, NORTH, qup5, qup_l4, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, NORTH, qup5, qup_l5, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, NORTH, qup6, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, NORTH, qup6, _, _, _, _, _, _, _, _),
+ [18] = PINGROUP(18, NORTH, qup6, _, _, _, _, _, _, _, _),
+ [19] = PINGROUP(19, NORTH, qup6, _, _, _, _, _, _, _, _),
+ [20] = PINGROUP(20, NORTH, qup7, _, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, NORTH, qup7, _, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, NORTH, qup7, _, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, NORTH, qup7, _, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, SOUTH, qup8, ibi_i3c, atest, _, _, _, _, _, _),
+ [25] = PINGROUP(25, SOUTH, qup8, ibi_i3c, atest, _, _, _, _, _, _),
+ [26] = PINGROUP(26, SOUTH, qup8, atest, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, SOUTH, qup8, atest, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, NORTH, qup0, ibi_i3c, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, NORTH, qup0, ibi_i3c, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, NORTH, qup0, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, NORTH, qup0, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, SOUTH, qup12, _, atest, _, _, _, _, _, _),
+ [33] = PINGROUP(33, SOUTH, qup12, atest, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, SOUTH, qup12, atest, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, SOUTH, qup12, atest, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, SOUTH, qup13, atest, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, SOUTH, qup13, atest, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, SOUTH, qup13, _, _, _, _, _, _, _, _),
+ [39] = PINGROUP(39, SOUTH, qup13, _, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, SOUTH, qup14, ibi_i3c, _, ddr_pxi3, _, _, _, _, _),
+ [41] = PINGROUP(41, SOUTH, qup14, ibi_i3c, _, ddr_pxi1, _, _, _, _, _),
+ [42] = PINGROUP(42, SOUTH, qup14, vsense_trigger, ddr_pxi1, _, _, _, _, _, _),
+ [43] = PINGROUP(43, SOUTH, qup14, ddr_pxi3, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, SOUTH, qup15, qdss_cti, dbg_out, _, _, _, _, _, _),
+ [45] = PINGROUP(45, SOUTH, qup15, qdss_cti, phase_flag, _, _, _, _, _, _),
+ [46] = PINGROUP(46, SOUTH, qup15, qup_l4, qdss_cti, phase_flag, _, _, _, _, _),
+ [47] = PINGROUP(47, SOUTH, qup15, qup_l5, phase_flag, _, _, _, _, _, _),
+ [48] = PINGROUP(48, SOUTH, qup16, phase_flag, _, _, _, _, _, _, _),
+ [49] = PINGROUP(49, SOUTH, qup16, phase_flag, _, _, _, _, _, _, _),
+ [50] = PINGROUP(50, SOUTH, qup16, phase_flag, _, _, _, _, _, _, _),
+ [51] = PINGROUP(51, SOUTH, qup16, phase_flag, _, _, _, _, _, _, _),
+ [52] = PINGROUP(52, SOUTH, qup17, ddr_pxi0, _, _, _, _, _, _, _),
+ [53] = PINGROUP(53, SOUTH, qup17, ddr_pxi0, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, SOUTH, qup17, jitter_bist, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, SOUTH, qup17, pll_bist, ddr_pxi2, _, _, _, _, _, _),
+ [56] = PINGROUP(56, SOUTH, qup18, ddr_pxi2, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, SOUTH, qup18, _, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, SOUTH, qup18, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, SOUTH, qup18, _, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, SOUTH, qup11, _, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, SOUTH, qup11, _, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, SOUTH, qup11, _, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, SOUTH, qup11, _, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, SOUTH, usb2phy_ac, qup_l6, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, SOUTH, usb_phy, pll_clk, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, NORTH, mdp_vsync, _, _, _, _, _, _, _, _),
+ [67] = PINGROUP(67, NORTH, mdp_vsync, dp_lcd, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, NORTH, mdp_vsync, dp_hot, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, SOUTH, qspi_cs, tsif0_clk, phase_flag, _, _, _, _, _, _),
+ [70] = PINGROUP(70, SOUTH, qspi0, tsif0_en, mdp_vsync0, mdp_vsync1, mdp_vsync2, mdp_vsync3, phase_flag, _, _),
+ [71] = PINGROUP(71, SOUTH, qspi1, tsif0_data, sdc4_cmd, phase_flag, _, _, _, _, _),
+ [72] = PINGROUP(72, SOUTH, qspi2, tsif0_sync, sdc43, phase_flag, _, _, _, _, _),
+ [73] = PINGROUP(73, SOUTH, qspi_clk, tsif1_clk, sdc4_clk, phase_flag, _, _, _, _, _),
+ [74] = PINGROUP(74, SOUTH, qspi3, tsif1_en, sdc42, phase_flag, _, _, _, _, _),
+ [75] = PINGROUP(75, SOUTH, qspi_cs, tsif1_data, sdc41, _, _, _, _, _, _),
+ [76] = PINGROUP(76, SOUTH, tsif1_sync, sdc40, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, NORTH, qup_l6, aoss_cti, phase_flag, _, _, _, _, _, _),
+ [78] = PINGROUP(78, NORTH, sd_write, phase_flag, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, NORTH, pci_e0, phase_flag, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, NORTH, pci_e0, phase_flag, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, NORTH, phase_flag, _, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, NORTH, pci_e1, phase_flag, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, NORTH, pci_e1, phase_flag, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, NORTH, phase_flag, _, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, SOUTH, pci_e2, tgu_ch0, atest, _, _, _, _, _, _),
+ [86] = PINGROUP(86, SOUTH, pci_e2, tgu_ch3, atest, _, _, _, _, _, _),
+ [87] = PINGROUP(87, SOUTH, atest, _, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, SOUTH, _, atest, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, SOUTH, _, atest, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, SOUTH, tsif1_error, usb2phy_ac, tgu_ch1, _, _, _, _, _, _),
+ [91] = PINGROUP(91, SOUTH, tsif0_error, tgu_ch2, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, NORTH, qup_l6, qdss_cti, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, NORTH, qup_l6, qdss_cti, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, NORTH, cam_mclk, ddr_bist, qdss_gpio, _, _, _, _, _, _),
+ [95] = PINGROUP(95, NORTH, cam_mclk, ddr_bist, qdss_gpio, _, _, _, _, _, _),
+ [96] = PINGROUP(96, NORTH, cam_mclk, pll_bypassnl, qdss_gpio, _, _, _, _, _, _),
+ [97] = PINGROUP(97, NORTH, cam_mclk, pll_reset, qdss_gpio, _, _, _, _, _, _),
+ [98] = PINGROUP(98, NORTH, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, NORTH, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, NORTH, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, NORTH, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, NORTH, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, NORTH, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [104] = PINGROUP(104, NORTH, cci_i2c, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [105] = PINGROUP(105, NORTH, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, NORTH, cci_i2c, gcc_gp1, qdss_gpio, _, _, _, _, _, _),
+ [107] = PINGROUP(107, NORTH, cci_i2c, gcc_gp2, qdss_gpio, _, _, _, _, _, _),
+ [108] = PINGROUP(108, NORTH, cci_i2c, gcc_gp3, qdss_gpio, _, _, _, _, _, _),
+ [109] = PINGROUP(109, NORTH, cci_timer0, qdss_gpio, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, NORTH, cci_timer1, qdss_gpio, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, NORTH, cci_timer2, qdss_gpio, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, NORTH, cci_timer3, cci_async, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, NORTH, cci_timer4, cci_async, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, NORTH, cci_async, _, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, NORTH, qup2, phase_flag, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, NORTH, qup2, phase_flag, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, NORTH, qup2, phase_flag, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, NORTH, qup2, phase_flag, _, _, _, _, _, _, _),
+ [119] = PINGROUP(119, NORTH, qup3, phase_flag, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, NORTH, qup3, phase_flag, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, NORTH, qup3, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, NORTH, qup3, mdp_vsync, phase_flag, _, _, _, _, _, _),
+ [123] = PINGROUP(123, NORTH, qup_l4, tsense_pwm1, tsense_pwm2, _, _, _, _, _, _),
+ [124] = PINGROUP(124, NORTH, qup_l5, mdp_vsync, phase_flag, _, _, _, _, _, _),
+ [125] = PINGROUP(125, SOUTH, qup9, phase_flag, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, SOUTH, qup9, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, SOUTH, qup9, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, SOUTH, qup9, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, SOUTH, qup10, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, SOUTH, qup10, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, SOUTH, qup10, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, SOUTH, qup10, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, WEST, mi2s2_sck, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, WEST, mi2s2_data0, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, WEST, mi2s2_ws, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, WEST, pri_mi2s, gcc_gp1, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, WEST, sec_mi2s, audio_ref, mi2s2_data1, gcc_gp2, _, _, _, _, _),
+ [138] = PINGROUP(138, WEST, mi2s0_sck, gcc_gp3, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, WEST, mi2s0_data0, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, WEST, mi2s0_data1, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, WEST, mi2s0_ws, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, WEST, lpass_slimbus, mi2s1_sck, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, WEST, lpass_slimbus, mi2s1_data0, ddr_bist, _, _, _, _, _, _),
+ [144] = PINGROUP(144, WEST, lpass_slimbus, mi2s1_data1, ddr_bist, _, _, _, _, _, _),
+ [145] = PINGROUP(145, WEST, lpass_slimbus, mi2s1_ws, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, WEST, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, WEST, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, WEST, _, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, WEST, _, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, WEST, _, _, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, WEST, _, _, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, WEST, _, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, WEST, _, _, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, WEST, _, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, WEST, _, _, _, _, _, _, _, _, _),
+ [156] = PINGROUP(156, WEST, _, _, _, _, _, _, _, _, _),
+ [157] = PINGROUP(157, WEST, _, _, _, _, _, _, _, _, _),
+ [158] = PINGROUP(158, WEST, _, _, _, _, _, _, _, _, _),
+ [159] = PINGROUP(159, WEST, cri_trng0, _, _, _, _, _, _, _, _),
+ [160] = PINGROUP(160, WEST, cri_trng1, qdss_gpio, _, _, _, _, _, _, _),
+ [161] = PINGROUP(161, WEST, cri_trng, qdss_gpio, _, _, _, _, _, _, _),
+ [162] = PINGROUP(162, WEST, sp_cmu, qdss_gpio, _, _, _, _, _, _, _),
+ [163] = PINGROUP(163, WEST, prng_rosc, qdss_gpio, _, _, _, _, _, _, _),
+ [164] = PINGROUP(164, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [165] = PINGROUP(165, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [166] = PINGROUP(166, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [167] = PINGROUP(167, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [168] = PINGROUP(168, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [169] = PINGROUP(169, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [170] = PINGROUP(170, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [171] = PINGROUP(171, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [172] = PINGROUP(172, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [175] = PINGROUP(175, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [176] = PINGROUP(176, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [177] = PINGROUP(177, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
+ [178] = PINGROUP(178, WEST, _, _, _, _, _, _, _, _, _),
+ [179] = PINGROUP(179, WEST, _, _, _, _, _, _, _, _, _),
+ [180] = UFS_RESET(ufs_reset, 0xb8000),
+ [181] = SDC_PINGROUP(sdc2_clk, 0x7000, 14, 6),
+ [182] = SDC_PINGROUP(sdc2_cmd, 0xb7000, 11, 3),
+ [183] = SDC_PINGROUP(sdc2_data, 0xb7000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
+ .pins = sm8250_pins,
+ .npins = ARRAY_SIZE(sm8250_pins),
+ .functions = sm8250_functions,
+ .nfunctions = ARRAY_SIZE(sm8250_functions),
+ .groups = sm8250_groups,
+ .ngroups = ARRAY_SIZE(sm8250_groups),
+ .ngpios = 181,
+ .tiles = sm8250_tiles,
+ .ntiles = ARRAY_SIZE(sm8250_tiles),
+};
+
+static int sm8250_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm8250_pinctrl);
+}
+
+static const struct of_device_id sm8250_pinctrl_of_match[] = {
+ { .compatible = "qcom,sm8250-pinctrl", },
+ { },
+};
+
+static struct platform_driver sm8250_pinctrl_driver = {
+ .driver = {
+ .name = "sm8250-pinctrl",
+ .of_match_table = sm8250_pinctrl_of_match,
+ },
+ .probe = sm8250_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sm8250_pinctrl_init(void)
+{
+ return platform_driver_register(&sm8250_pinctrl_driver);
+}
+arch_initcall(sm8250_pinctrl_init);
+
+static void __exit sm8250_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sm8250_pinctrl_driver);
+}
+module_exit(sm8250_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sm8250 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sm8250_pinctrl_of_match);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 0599f5127b01..84501c785473 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -40,6 +40,8 @@ struct exynos_irq_chip {
u32 eint_pend;
u32 eint_wake_mask_value;
u32 eint_wake_mask_reg;
+ void (*set_eint_wakeup_mask)(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip);
};
static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip)
@@ -265,6 +267,7 @@ struct exynos_eint_gpio_save {
u32 eint_con;
u32 eint_fltcon0;
u32 eint_fltcon1;
+ u32 eint_mask;
};
/*
@@ -342,6 +345,47 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
return 0;
}
+static void
+exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip)
+{
+ struct regmap *pmu_regs;
+
+ if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
+ dev_warn(drvdata->dev,
+ "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
+ return;
+ }
+
+ pmu_regs = drvdata->retention_ctrl->priv;
+ dev_info(drvdata->dev,
+ "Setting external wakeup interrupt mask: 0x%x\n",
+ irq_chip->eint_wake_mask_value);
+
+ regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
+ irq_chip->eint_wake_mask_value);
+}
+
+static void
+s5pv210_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
+ struct exynos_irq_chip *irq_chip)
+
+{
+ void __iomem *clk_base;
+
+ if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
+ dev_warn(drvdata->dev,
+ "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
+ return;
+ }
+
+
+ clk_base = (void __iomem *) drvdata->retention_ctrl->priv;
+
+ __raw_writel(irq_chip->eint_wake_mask_value,
+ clk_base + irq_chip->eint_wake_mask_reg);
+}
+
/*
* irq_chip for wakeup interrupts
*/
@@ -360,8 +404,9 @@ static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = {
.eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
.eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
- /* Only difference with exynos4210_wkup_irq_chip: */
+ /* Only differences with exynos4210_wkup_irq_chip: */
.eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK,
+ .set_eint_wakeup_mask = s5pv210_pinctrl_set_eint_wakeup_mask,
};
static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
@@ -380,6 +425,7 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
.eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
.eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK,
+ .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
};
static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
@@ -398,6 +444,7 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
.eint_pend = EXYNOS7_WKUP_EPEND_OFFSET,
.eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED,
.eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK,
+ .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask,
};
/* list of external wakeup controllers supported */
@@ -574,27 +621,6 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
return 0;
}
-static void
-exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata,
- struct exynos_irq_chip *irq_chip)
-{
- struct regmap *pmu_regs;
-
- if (!drvdata->retention_ctrl || !drvdata->retention_ctrl->priv) {
- dev_warn(drvdata->dev,
- "No retention data configured bank with external wakeup interrupt. Wake-up mask will not be set.\n");
- return;
- }
-
- pmu_regs = drvdata->retention_ctrl->priv;
- dev_info(drvdata->dev,
- "Setting external wakeup interrupt mask: 0x%x\n",
- irq_chip->eint_wake_mask_value);
-
- regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg,
- irq_chip->eint_wake_mask_value);
-}
-
static void exynos_pinctrl_suspend_bank(
struct samsung_pinctrl_drv_data *drvdata,
struct samsung_pin_bank *bank)
@@ -608,10 +634,13 @@ static void exynos_pinctrl_suspend_bank(
+ 2 * bank->eint_offset);
save->eint_fltcon1 = readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ 2 * bank->eint_offset + 4);
+ save->eint_mask = readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
pr_debug("%s: save con %#010x\n", bank->name, save->eint_con);
pr_debug("%s: save fltcon0 %#010x\n", bank->name, save->eint_fltcon0);
pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
+ pr_debug("%s: save mask %#010x\n", bank->name, save->eint_mask);
}
void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
@@ -626,8 +655,8 @@ void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
else if (bank->eint_type == EINT_TYPE_WKUP) {
if (!irq_chip) {
irq_chip = bank->irq_chip;
- exynos_pinctrl_set_eint_wakeup_mask(drvdata,
- irq_chip);
+ irq_chip->set_eint_wakeup_mask(drvdata,
+ irq_chip);
} else if (bank->irq_chip != irq_chip) {
dev_warn(drvdata->dev,
"More than one external wakeup interrupt chip configured (bank: %s). This is not supported by hardware nor by driver.\n",
@@ -653,6 +682,9 @@ static void exynos_pinctrl_resume_bank(
pr_debug("%s: fltcon1 %#010x => %#010x\n", bank->name,
readl(regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ 2 * bank->eint_offset + 4), save->eint_fltcon1);
+ pr_debug("%s: mask %#010x => %#010x\n", bank->name,
+ readl(regs + bank->irq_chip->eint_mask
+ + bank->eint_offset), save->eint_mask);
writel(save->eint_con, regs + EXYNOS_GPIO_ECON_OFFSET
+ bank->eint_offset);
@@ -660,6 +692,8 @@ static void exynos_pinctrl_resume_bank(
+ 2 * bank->eint_offset);
writel(save->eint_fltcon1, regs + EXYNOS_GPIO_EFLTCON_OFFSET
+ 2 * bank->eint_offset + 4);
+ writel(save->eint_mask, regs + bank->irq_chip->eint_mask
+ + bank->eint_offset);
}
void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 9552851b96f1..c461a2f1927a 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -12,6 +12,7 @@ config PINCTRL_SH_PFC
select PINCTRL_PFC_EMEV2 if ARCH_EMEV2
select PINCTRL_PFC_R8A73A4 if ARCH_R8A73A4
select PINCTRL_PFC_R8A7740 if ARCH_R8A7740
+ select PINCTRL_PFC_R8A7742 if ARCH_R8A7742
select PINCTRL_PFC_R8A7743 if ARCH_R8A7743
select PINCTRL_PFC_R8A7744 if ARCH_R8A7744
select PINCTRL_PFC_R8A7745 if ARCH_R8A7745
@@ -74,6 +75,9 @@ config PINCTRL_PFC_R8A7740
bool "R-Mobile A1 pin control support" if COMPILE_TEST
select PINCTRL_SH_PFC_GPIO
+config PINCTRL_PFC_R8A7742
+ bool "RZ/G1H pin control support" if COMPILE_TEST
+
config PINCTRL_PFC_R8A7743
bool "RZ/G1M pin control support" if COMPILE_TEST
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 9ebe321d24c4..3855d82069c9 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_PINCTRL_SH_PFC_GPIO) += gpio.o
obj-$(CONFIG_PINCTRL_PFC_EMEV2) += pfc-emev2.o
obj-$(CONFIG_PINCTRL_PFC_R8A73A4) += pfc-r8a73a4.o
obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7742) += pfc-r8a7790.o
obj-$(CONFIG_PINCTRL_PFC_R8A7743) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7744) += pfc-r8a7791.o
obj-$(CONFIG_PINCTRL_PFC_R8A7745) += pfc-r8a7794.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index a2e19efa26e3..f368383cba61 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -485,6 +485,12 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7740_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7742
+ {
+ .compatible = "renesas,pfc-r8a7742",
+ .data = &r8a7742_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A7743
{
.compatible = "renesas,pfc-r8a7743",
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 3366ed561cce..f524401fec5f 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -3938,297 +3938,304 @@ static const unsigned int vin3_clk_mux[] = {
VI3_CLK_MARK,
};
-static const struct sh_pfc_pin_group pinmux_groups[] = {
- SH_PFC_PIN_GROUP(audio_clk_a),
- SH_PFC_PIN_GROUP(audio_clk_b),
- SH_PFC_PIN_GROUP(audio_clk_c),
- SH_PFC_PIN_GROUP(audio_clkout),
- SH_PFC_PIN_GROUP(audio_clkout_b),
- SH_PFC_PIN_GROUP(audio_clkout_c),
- SH_PFC_PIN_GROUP(audio_clkout_d),
- SH_PFC_PIN_GROUP(avb_link),
- SH_PFC_PIN_GROUP(avb_magic),
- SH_PFC_PIN_GROUP(avb_phy_int),
- SH_PFC_PIN_GROUP(avb_mdio),
- SH_PFC_PIN_GROUP(avb_mii),
- SH_PFC_PIN_GROUP(avb_gmii),
- SH_PFC_PIN_GROUP(du_rgb666),
- SH_PFC_PIN_GROUP(du_rgb888),
- SH_PFC_PIN_GROUP(du_clk_out_0),
- SH_PFC_PIN_GROUP(du_clk_out_1),
- SH_PFC_PIN_GROUP(du_sync_0),
- SH_PFC_PIN_GROUP(du_sync_1),
- SH_PFC_PIN_GROUP(du_cde),
- SH_PFC_PIN_GROUP(du0_clk_in),
- SH_PFC_PIN_GROUP(du1_clk_in),
- SH_PFC_PIN_GROUP(du2_clk_in),
- SH_PFC_PIN_GROUP(eth_link),
- SH_PFC_PIN_GROUP(eth_magic),
- SH_PFC_PIN_GROUP(eth_mdio),
- SH_PFC_PIN_GROUP(eth_rmii),
- SH_PFC_PIN_GROUP(hscif0_data),
- SH_PFC_PIN_GROUP(hscif0_clk),
- SH_PFC_PIN_GROUP(hscif0_ctrl),
- SH_PFC_PIN_GROUP(hscif0_data_b),
- SH_PFC_PIN_GROUP(hscif0_ctrl_b),
- SH_PFC_PIN_GROUP(hscif0_data_c),
- SH_PFC_PIN_GROUP(hscif0_ctrl_c),
- SH_PFC_PIN_GROUP(hscif0_data_d),
- SH_PFC_PIN_GROUP(hscif0_ctrl_d),
- SH_PFC_PIN_GROUP(hscif0_data_e),
- SH_PFC_PIN_GROUP(hscif0_ctrl_e),
- SH_PFC_PIN_GROUP(hscif0_data_f),
- SH_PFC_PIN_GROUP(hscif0_ctrl_f),
- SH_PFC_PIN_GROUP(hscif1_data),
- SH_PFC_PIN_GROUP(hscif1_clk),
- SH_PFC_PIN_GROUP(hscif1_ctrl),
- SH_PFC_PIN_GROUP(hscif1_data_b),
- SH_PFC_PIN_GROUP(hscif1_clk_b),
- SH_PFC_PIN_GROUP(hscif1_ctrl_b),
- SH_PFC_PIN_GROUP(i2c0),
- SH_PFC_PIN_GROUP(i2c1),
- SH_PFC_PIN_GROUP(i2c1_b),
- SH_PFC_PIN_GROUP(i2c1_c),
- SH_PFC_PIN_GROUP(i2c2),
- SH_PFC_PIN_GROUP(i2c2_b),
- SH_PFC_PIN_GROUP(i2c2_c),
- SH_PFC_PIN_GROUP(i2c2_d),
- SH_PFC_PIN_GROUP(i2c2_e),
- SH_PFC_PIN_GROUP(i2c3),
- SH_PFC_PIN_GROUP(iic0),
- SH_PFC_PIN_GROUP(iic1),
- SH_PFC_PIN_GROUP(iic1_b),
- SH_PFC_PIN_GROUP(iic1_c),
- SH_PFC_PIN_GROUP(iic2),
- SH_PFC_PIN_GROUP(iic2_b),
- SH_PFC_PIN_GROUP(iic2_c),
- SH_PFC_PIN_GROUP(iic2_d),
- SH_PFC_PIN_GROUP(iic2_e),
- SH_PFC_PIN_GROUP(iic3),
- SH_PFC_PIN_GROUP(intc_irq0),
- SH_PFC_PIN_GROUP(intc_irq1),
- SH_PFC_PIN_GROUP(intc_irq2),
- SH_PFC_PIN_GROUP(intc_irq3),
- SH_PFC_PIN_GROUP(mlb_3pin),
- SH_PFC_PIN_GROUP(mmc0_data1),
- SH_PFC_PIN_GROUP(mmc0_data4),
- SH_PFC_PIN_GROUP(mmc0_data8),
- SH_PFC_PIN_GROUP(mmc0_ctrl),
- SH_PFC_PIN_GROUP(mmc1_data1),
- SH_PFC_PIN_GROUP(mmc1_data4),
- SH_PFC_PIN_GROUP(mmc1_data8),
- SH_PFC_PIN_GROUP(mmc1_ctrl),
- SH_PFC_PIN_GROUP(msiof0_clk),
- SH_PFC_PIN_GROUP(msiof0_sync),
- SH_PFC_PIN_GROUP(msiof0_ss1),
- SH_PFC_PIN_GROUP(msiof0_ss2),
- SH_PFC_PIN_GROUP(msiof0_rx),
- SH_PFC_PIN_GROUP(msiof0_tx),
- SH_PFC_PIN_GROUP(msiof0_clk_b),
- SH_PFC_PIN_GROUP(msiof0_ss1_b),
- SH_PFC_PIN_GROUP(msiof0_ss2_b),
- SH_PFC_PIN_GROUP(msiof0_rx_b),
- SH_PFC_PIN_GROUP(msiof0_tx_b),
- SH_PFC_PIN_GROUP(msiof1_clk),
- SH_PFC_PIN_GROUP(msiof1_sync),
- SH_PFC_PIN_GROUP(msiof1_ss1),
- SH_PFC_PIN_GROUP(msiof1_ss2),
- SH_PFC_PIN_GROUP(msiof1_rx),
- SH_PFC_PIN_GROUP(msiof1_tx),
- SH_PFC_PIN_GROUP(msiof1_clk_b),
- SH_PFC_PIN_GROUP(msiof1_ss1_b),
- SH_PFC_PIN_GROUP(msiof1_ss2_b),
- SH_PFC_PIN_GROUP(msiof1_rx_b),
- SH_PFC_PIN_GROUP(msiof1_tx_b),
- SH_PFC_PIN_GROUP(msiof2_clk),
- SH_PFC_PIN_GROUP(msiof2_sync),
- SH_PFC_PIN_GROUP(msiof2_ss1),
- SH_PFC_PIN_GROUP(msiof2_ss2),
- SH_PFC_PIN_GROUP(msiof2_rx),
- SH_PFC_PIN_GROUP(msiof2_tx),
- SH_PFC_PIN_GROUP(msiof3_clk),
- SH_PFC_PIN_GROUP(msiof3_sync),
- SH_PFC_PIN_GROUP(msiof3_ss1),
- SH_PFC_PIN_GROUP(msiof3_ss2),
- SH_PFC_PIN_GROUP(msiof3_rx),
- SH_PFC_PIN_GROUP(msiof3_tx),
- SH_PFC_PIN_GROUP(msiof3_clk_b),
- SH_PFC_PIN_GROUP(msiof3_sync_b),
- SH_PFC_PIN_GROUP(msiof3_rx_b),
- SH_PFC_PIN_GROUP(msiof3_tx_b),
- SH_PFC_PIN_GROUP(pwm0),
- SH_PFC_PIN_GROUP(pwm0_b),
- SH_PFC_PIN_GROUP(pwm1),
- SH_PFC_PIN_GROUP(pwm1_b),
- SH_PFC_PIN_GROUP(pwm2),
- SH_PFC_PIN_GROUP(pwm3),
- SH_PFC_PIN_GROUP(pwm4),
- SH_PFC_PIN_GROUP(pwm5),
- SH_PFC_PIN_GROUP(pwm6),
- SH_PFC_PIN_GROUP(qspi_ctrl),
- SH_PFC_PIN_GROUP(qspi_data2),
- SH_PFC_PIN_GROUP(qspi_data4),
- SH_PFC_PIN_GROUP(scif0_data),
- SH_PFC_PIN_GROUP(scif0_clk),
- SH_PFC_PIN_GROUP(scif0_ctrl),
- SH_PFC_PIN_GROUP(scif0_data_b),
- SH_PFC_PIN_GROUP(scif1_data),
- SH_PFC_PIN_GROUP(scif1_clk),
- SH_PFC_PIN_GROUP(scif1_ctrl),
- SH_PFC_PIN_GROUP(scif1_data_b),
- SH_PFC_PIN_GROUP(scif1_data_c),
- SH_PFC_PIN_GROUP(scif1_data_d),
- SH_PFC_PIN_GROUP(scif1_clk_d),
- SH_PFC_PIN_GROUP(scif1_data_e),
- SH_PFC_PIN_GROUP(scif1_clk_e),
- SH_PFC_PIN_GROUP(scif2_data),
- SH_PFC_PIN_GROUP(scif2_clk),
- SH_PFC_PIN_GROUP(scif2_data_b),
- SH_PFC_PIN_GROUP(scifa0_data),
- SH_PFC_PIN_GROUP(scifa0_clk),
- SH_PFC_PIN_GROUP(scifa0_ctrl),
- SH_PFC_PIN_GROUP(scifa0_data_b),
- SH_PFC_PIN_GROUP(scifa0_clk_b),
- SH_PFC_PIN_GROUP(scifa0_ctrl_b),
- SH_PFC_PIN_GROUP(scifa1_data),
- SH_PFC_PIN_GROUP(scifa1_clk),
- SH_PFC_PIN_GROUP(scifa1_ctrl),
- SH_PFC_PIN_GROUP(scifa1_data_b),
- SH_PFC_PIN_GROUP(scifa1_clk_b),
- SH_PFC_PIN_GROUP(scifa1_ctrl_b),
- SH_PFC_PIN_GROUP(scifa1_data_c),
- SH_PFC_PIN_GROUP(scifa1_clk_c),
- SH_PFC_PIN_GROUP(scifa1_ctrl_c),
- SH_PFC_PIN_GROUP(scifa1_data_d),
- SH_PFC_PIN_GROUP(scifa1_clk_d),
- SH_PFC_PIN_GROUP(scifa1_ctrl_d),
- SH_PFC_PIN_GROUP(scifa2_data),
- SH_PFC_PIN_GROUP(scifa2_clk),
- SH_PFC_PIN_GROUP(scifa2_ctrl),
- SH_PFC_PIN_GROUP(scifa2_data_b),
- SH_PFC_PIN_GROUP(scifa2_data_c),
- SH_PFC_PIN_GROUP(scifa2_clk_c),
- SH_PFC_PIN_GROUP(scifb0_data),
- SH_PFC_PIN_GROUP(scifb0_clk),
- SH_PFC_PIN_GROUP(scifb0_ctrl),
- SH_PFC_PIN_GROUP(scifb0_data_b),
- SH_PFC_PIN_GROUP(scifb0_clk_b),
- SH_PFC_PIN_GROUP(scifb0_ctrl_b),
- SH_PFC_PIN_GROUP(scifb0_data_c),
- SH_PFC_PIN_GROUP(scifb1_data),
- SH_PFC_PIN_GROUP(scifb1_clk),
- SH_PFC_PIN_GROUP(scifb1_ctrl),
- SH_PFC_PIN_GROUP(scifb1_data_b),
- SH_PFC_PIN_GROUP(scifb1_clk_b),
- SH_PFC_PIN_GROUP(scifb1_ctrl_b),
- SH_PFC_PIN_GROUP(scifb1_data_c),
- SH_PFC_PIN_GROUP(scifb1_data_d),
- SH_PFC_PIN_GROUP(scifb1_data_e),
- SH_PFC_PIN_GROUP(scifb1_clk_e),
- SH_PFC_PIN_GROUP(scifb1_data_f),
- SH_PFC_PIN_GROUP(scifb1_data_g),
- SH_PFC_PIN_GROUP(scifb1_clk_g),
- SH_PFC_PIN_GROUP(scifb2_data),
- SH_PFC_PIN_GROUP(scifb2_clk),
- SH_PFC_PIN_GROUP(scifb2_ctrl),
- SH_PFC_PIN_GROUP(scifb2_data_b),
- SH_PFC_PIN_GROUP(scifb2_clk_b),
- SH_PFC_PIN_GROUP(scifb2_ctrl_b),
- SH_PFC_PIN_GROUP(scifb2_data_c),
- SH_PFC_PIN_GROUP(scif_clk),
- SH_PFC_PIN_GROUP(scif_clk_b),
- SH_PFC_PIN_GROUP(sdhi0_data1),
- SH_PFC_PIN_GROUP(sdhi0_data4),
- SH_PFC_PIN_GROUP(sdhi0_ctrl),
- SH_PFC_PIN_GROUP(sdhi0_cd),
- SH_PFC_PIN_GROUP(sdhi0_wp),
- SH_PFC_PIN_GROUP(sdhi1_data1),
- SH_PFC_PIN_GROUP(sdhi1_data4),
- SH_PFC_PIN_GROUP(sdhi1_ctrl),
- SH_PFC_PIN_GROUP(sdhi1_cd),
- SH_PFC_PIN_GROUP(sdhi1_wp),
- SH_PFC_PIN_GROUP(sdhi2_data1),
- SH_PFC_PIN_GROUP(sdhi2_data4),
- SH_PFC_PIN_GROUP(sdhi2_ctrl),
- SH_PFC_PIN_GROUP(sdhi2_cd),
- SH_PFC_PIN_GROUP(sdhi2_wp),
- SH_PFC_PIN_GROUP(sdhi3_data1),
- SH_PFC_PIN_GROUP(sdhi3_data4),
- SH_PFC_PIN_GROUP(sdhi3_ctrl),
- SH_PFC_PIN_GROUP(sdhi3_cd),
- SH_PFC_PIN_GROUP(sdhi3_wp),
- SH_PFC_PIN_GROUP(ssi0_data),
- SH_PFC_PIN_GROUP(ssi0129_ctrl),
- SH_PFC_PIN_GROUP(ssi1_data),
- SH_PFC_PIN_GROUP(ssi1_ctrl),
- SH_PFC_PIN_GROUP(ssi2_data),
- SH_PFC_PIN_GROUP(ssi2_ctrl),
- SH_PFC_PIN_GROUP(ssi3_data),
- SH_PFC_PIN_GROUP(ssi34_ctrl),
- SH_PFC_PIN_GROUP(ssi4_data),
- SH_PFC_PIN_GROUP(ssi4_ctrl),
- SH_PFC_PIN_GROUP(ssi5),
- SH_PFC_PIN_GROUP(ssi5_b),
- SH_PFC_PIN_GROUP(ssi5_c),
- SH_PFC_PIN_GROUP(ssi6),
- SH_PFC_PIN_GROUP(ssi6_b),
- SH_PFC_PIN_GROUP(ssi7_data),
- SH_PFC_PIN_GROUP(ssi7_b_data),
- SH_PFC_PIN_GROUP(ssi7_c_data),
- SH_PFC_PIN_GROUP(ssi78_ctrl),
- SH_PFC_PIN_GROUP(ssi78_b_ctrl),
- SH_PFC_PIN_GROUP(ssi78_c_ctrl),
- SH_PFC_PIN_GROUP(ssi8_data),
- SH_PFC_PIN_GROUP(ssi8_b_data),
- SH_PFC_PIN_GROUP(ssi8_c_data),
- SH_PFC_PIN_GROUP(ssi9_data),
- SH_PFC_PIN_GROUP(ssi9_ctrl),
- SH_PFC_PIN_GROUP(tpu0_to0),
- SH_PFC_PIN_GROUP(tpu0_to1),
- SH_PFC_PIN_GROUP(tpu0_to2),
- SH_PFC_PIN_GROUP(tpu0_to3),
- SH_PFC_PIN_GROUP(usb0),
- SH_PFC_PIN_GROUP(usb0_ovc_vbus),
- SH_PFC_PIN_GROUP(usb1),
- SH_PFC_PIN_GROUP(usb2),
- VIN_DATA_PIN_GROUP(vin0_data, 24),
- VIN_DATA_PIN_GROUP(vin0_data, 20),
- SH_PFC_PIN_GROUP(vin0_data18),
- VIN_DATA_PIN_GROUP(vin0_data, 16),
- VIN_DATA_PIN_GROUP(vin0_data, 12),
- VIN_DATA_PIN_GROUP(vin0_data, 10),
- VIN_DATA_PIN_GROUP(vin0_data, 8),
- VIN_DATA_PIN_GROUP(vin0_data, 4),
- SH_PFC_PIN_GROUP(vin0_sync),
- SH_PFC_PIN_GROUP(vin0_field),
- SH_PFC_PIN_GROUP(vin0_clkenb),
- SH_PFC_PIN_GROUP(vin0_clk),
- VIN_DATA_PIN_GROUP(vin1_data, 24),
- VIN_DATA_PIN_GROUP(vin1_data, 20),
- SH_PFC_PIN_GROUP(vin1_data18),
- VIN_DATA_PIN_GROUP(vin1_data, 16),
- VIN_DATA_PIN_GROUP(vin1_data, 12),
- VIN_DATA_PIN_GROUP(vin1_data, 10),
- VIN_DATA_PIN_GROUP(vin1_data, 8),
- VIN_DATA_PIN_GROUP(vin1_data, 4),
- SH_PFC_PIN_GROUP(vin1_sync),
- SH_PFC_PIN_GROUP(vin1_field),
- SH_PFC_PIN_GROUP(vin1_clkenb),
- SH_PFC_PIN_GROUP(vin1_clk),
- VIN_DATA_PIN_GROUP(vin2_data, 24),
- SH_PFC_PIN_GROUP(vin2_data18),
- VIN_DATA_PIN_GROUP(vin2_data, 16),
- VIN_DATA_PIN_GROUP(vin2_data, 8),
- VIN_DATA_PIN_GROUP(vin2_data, 4),
- SH_PFC_PIN_GROUP(vin2_sync),
- SH_PFC_PIN_GROUP(vin2_field),
- SH_PFC_PIN_GROUP(vin2_clkenb),
- SH_PFC_PIN_GROUP(vin2_clk),
- SH_PFC_PIN_GROUP(vin3_data8),
- SH_PFC_PIN_GROUP(vin3_sync),
- SH_PFC_PIN_GROUP(vin3_field),
- SH_PFC_PIN_GROUP(vin3_clkenb),
- SH_PFC_PIN_GROUP(vin3_clk),
+static const struct {
+ struct sh_pfc_pin_group common[289];
+ struct sh_pfc_pin_group automotive[1];
+} pinmux_groups = {
+ .common = {
+ SH_PFC_PIN_GROUP(audio_clk_a),
+ SH_PFC_PIN_GROUP(audio_clk_b),
+ SH_PFC_PIN_GROUP(audio_clk_c),
+ SH_PFC_PIN_GROUP(audio_clkout),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_gmii),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync_0),
+ SH_PFC_PIN_GROUP(du_sync_1),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du0_clk_in),
+ SH_PFC_PIN_GROUP(du1_clk_in),
+ SH_PFC_PIN_GROUP(du2_clk_in),
+ SH_PFC_PIN_GROUP(eth_link),
+ SH_PFC_PIN_GROUP(eth_magic),
+ SH_PFC_PIN_GROUP(eth_mdio),
+ SH_PFC_PIN_GROUP(eth_rmii),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif0_data_b),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif0_data_c),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif0_data_d),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_d),
+ SH_PFC_PIN_GROUP(hscif0_data_e),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_e),
+ SH_PFC_PIN_GROUP(hscif0_data_f),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_f),
+ SH_PFC_PIN_GROUP(hscif1_data),
+ SH_PFC_PIN_GROUP(hscif1_clk),
+ SH_PFC_PIN_GROUP(hscif1_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c1_c),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c2_c),
+ SH_PFC_PIN_GROUP(i2c2_d),
+ SH_PFC_PIN_GROUP(i2c2_e),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(iic0),
+ SH_PFC_PIN_GROUP(iic1),
+ SH_PFC_PIN_GROUP(iic1_b),
+ SH_PFC_PIN_GROUP(iic1_c),
+ SH_PFC_PIN_GROUP(iic2),
+ SH_PFC_PIN_GROUP(iic2_b),
+ SH_PFC_PIN_GROUP(iic2_c),
+ SH_PFC_PIN_GROUP(iic2_d),
+ SH_PFC_PIN_GROUP(iic2_e),
+ SH_PFC_PIN_GROUP(iic3),
+ SH_PFC_PIN_GROUP(intc_irq0),
+ SH_PFC_PIN_GROUP(intc_irq1),
+ SH_PFC_PIN_GROUP(intc_irq2),
+ SH_PFC_PIN_GROUP(intc_irq3),
+ SH_PFC_PIN_GROUP(mmc0_data1),
+ SH_PFC_PIN_GROUP(mmc0_data4),
+ SH_PFC_PIN_GROUP(mmc0_data8),
+ SH_PFC_PIN_GROUP(mmc0_ctrl),
+ SH_PFC_PIN_GROUP(mmc1_data1),
+ SH_PFC_PIN_GROUP(mmc1_data4),
+ SH_PFC_PIN_GROUP(mmc1_data8),
+ SH_PFC_PIN_GROUP(mmc1_ctrl),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_rx),
+ SH_PFC_PIN_GROUP(msiof0_tx),
+ SH_PFC_PIN_GROUP(msiof0_clk_b),
+ SH_PFC_PIN_GROUP(msiof0_ss1_b),
+ SH_PFC_PIN_GROUP(msiof0_ss2_b),
+ SH_PFC_PIN_GROUP(msiof0_rx_b),
+ SH_PFC_PIN_GROUP(msiof0_tx_b),
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_rx),
+ SH_PFC_PIN_GROUP(msiof1_tx),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_rx_b),
+ SH_PFC_PIN_GROUP(msiof1_tx_b),
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_rx),
+ SH_PFC_PIN_GROUP(msiof2_tx),
+ SH_PFC_PIN_GROUP(msiof3_clk),
+ SH_PFC_PIN_GROUP(msiof3_sync),
+ SH_PFC_PIN_GROUP(msiof3_ss1),
+ SH_PFC_PIN_GROUP(msiof3_ss2),
+ SH_PFC_PIN_GROUP(msiof3_rx),
+ SH_PFC_PIN_GROUP(msiof3_tx),
+ SH_PFC_PIN_GROUP(msiof3_clk_b),
+ SH_PFC_PIN_GROUP(msiof3_sync_b),
+ SH_PFC_PIN_GROUP(msiof3_rx_b),
+ SH_PFC_PIN_GROUP(msiof3_tx_b),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm0_b),
+ SH_PFC_PIN_GROUP(pwm1),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2),
+ SH_PFC_PIN_GROUP(pwm3),
+ SH_PFC_PIN_GROUP(pwm4),
+ SH_PFC_PIN_GROUP(pwm5),
+ SH_PFC_PIN_GROUP(pwm6),
+ SH_PFC_PIN_GROUP(qspi_ctrl),
+ SH_PFC_PIN_GROUP(qspi_data2),
+ SH_PFC_PIN_GROUP(qspi_data4),
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_clk),
+ SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif0_data_b),
+ SH_PFC_PIN_GROUP(scif1_data),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif1_data_c),
+ SH_PFC_PIN_GROUP(scif1_data_d),
+ SH_PFC_PIN_GROUP(scif1_clk_d),
+ SH_PFC_PIN_GROUP(scif1_data_e),
+ SH_PFC_PIN_GROUP(scif1_clk_e),
+ SH_PFC_PIN_GROUP(scif2_data),
+ SH_PFC_PIN_GROUP(scif2_clk),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scifa0_data),
+ SH_PFC_PIN_GROUP(scifa0_clk),
+ SH_PFC_PIN_GROUP(scifa0_ctrl),
+ SH_PFC_PIN_GROUP(scifa0_data_b),
+ SH_PFC_PIN_GROUP(scifa0_clk_b),
+ SH_PFC_PIN_GROUP(scifa0_ctrl_b),
+ SH_PFC_PIN_GROUP(scifa1_data),
+ SH_PFC_PIN_GROUP(scifa1_clk),
+ SH_PFC_PIN_GROUP(scifa1_ctrl),
+ SH_PFC_PIN_GROUP(scifa1_data_b),
+ SH_PFC_PIN_GROUP(scifa1_clk_b),
+ SH_PFC_PIN_GROUP(scifa1_ctrl_b),
+ SH_PFC_PIN_GROUP(scifa1_data_c),
+ SH_PFC_PIN_GROUP(scifa1_clk_c),
+ SH_PFC_PIN_GROUP(scifa1_ctrl_c),
+ SH_PFC_PIN_GROUP(scifa1_data_d),
+ SH_PFC_PIN_GROUP(scifa1_clk_d),
+ SH_PFC_PIN_GROUP(scifa1_ctrl_d),
+ SH_PFC_PIN_GROUP(scifa2_data),
+ SH_PFC_PIN_GROUP(scifa2_clk),
+ SH_PFC_PIN_GROUP(scifa2_ctrl),
+ SH_PFC_PIN_GROUP(scifa2_data_b),
+ SH_PFC_PIN_GROUP(scifa2_data_c),
+ SH_PFC_PIN_GROUP(scifa2_clk_c),
+ SH_PFC_PIN_GROUP(scifb0_data),
+ SH_PFC_PIN_GROUP(scifb0_clk),
+ SH_PFC_PIN_GROUP(scifb0_ctrl),
+ SH_PFC_PIN_GROUP(scifb0_data_b),
+ SH_PFC_PIN_GROUP(scifb0_clk_b),
+ SH_PFC_PIN_GROUP(scifb0_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb0_data_c),
+ SH_PFC_PIN_GROUP(scifb1_data),
+ SH_PFC_PIN_GROUP(scifb1_clk),
+ SH_PFC_PIN_GROUP(scifb1_ctrl),
+ SH_PFC_PIN_GROUP(scifb1_data_b),
+ SH_PFC_PIN_GROUP(scifb1_clk_b),
+ SH_PFC_PIN_GROUP(scifb1_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb1_data_c),
+ SH_PFC_PIN_GROUP(scifb1_data_d),
+ SH_PFC_PIN_GROUP(scifb1_data_e),
+ SH_PFC_PIN_GROUP(scifb1_clk_e),
+ SH_PFC_PIN_GROUP(scifb1_data_f),
+ SH_PFC_PIN_GROUP(scifb1_data_g),
+ SH_PFC_PIN_GROUP(scifb1_clk_g),
+ SH_PFC_PIN_GROUP(scifb2_data),
+ SH_PFC_PIN_GROUP(scifb2_clk),
+ SH_PFC_PIN_GROUP(scifb2_ctrl),
+ SH_PFC_PIN_GROUP(scifb2_data_b),
+ SH_PFC_PIN_GROUP(scifb2_clk_b),
+ SH_PFC_PIN_GROUP(scifb2_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb2_data_c),
+ SH_PFC_PIN_GROUP(scif_clk),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd),
+ SH_PFC_PIN_GROUP(sdhi2_wp),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi0129_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data),
+ SH_PFC_PIN_GROUP(ssi1_ctrl),
+ SH_PFC_PIN_GROUP(ssi2_data),
+ SH_PFC_PIN_GROUP(ssi2_ctrl),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi34_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5),
+ SH_PFC_PIN_GROUP(ssi5_b),
+ SH_PFC_PIN_GROUP(ssi5_c),
+ SH_PFC_PIN_GROUP(ssi6),
+ SH_PFC_PIN_GROUP(ssi6_b),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi7_b_data),
+ SH_PFC_PIN_GROUP(ssi7_c_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi78_b_ctrl),
+ SH_PFC_PIN_GROUP(ssi78_c_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi8_b_data),
+ SH_PFC_PIN_GROUP(ssi8_c_data),
+ SH_PFC_PIN_GROUP(ssi9_data),
+ SH_PFC_PIN_GROUP(ssi9_ctrl),
+ SH_PFC_PIN_GROUP(tpu0_to0),
+ SH_PFC_PIN_GROUP(tpu0_to1),
+ SH_PFC_PIN_GROUP(tpu0_to2),
+ SH_PFC_PIN_GROUP(tpu0_to3),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb0_ovc_vbus),
+ SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb2),
+ VIN_DATA_PIN_GROUP(vin0_data, 24),
+ VIN_DATA_PIN_GROUP(vin0_data, 20),
+ SH_PFC_PIN_GROUP(vin0_data18),
+ VIN_DATA_PIN_GROUP(vin0_data, 16),
+ VIN_DATA_PIN_GROUP(vin0_data, 12),
+ VIN_DATA_PIN_GROUP(vin0_data, 10),
+ VIN_DATA_PIN_GROUP(vin0_data, 8),
+ VIN_DATA_PIN_GROUP(vin0_data, 4),
+ SH_PFC_PIN_GROUP(vin0_sync),
+ SH_PFC_PIN_GROUP(vin0_field),
+ SH_PFC_PIN_GROUP(vin0_clkenb),
+ SH_PFC_PIN_GROUP(vin0_clk),
+ VIN_DATA_PIN_GROUP(vin1_data, 24),
+ VIN_DATA_PIN_GROUP(vin1_data, 20),
+ SH_PFC_PIN_GROUP(vin1_data18),
+ VIN_DATA_PIN_GROUP(vin1_data, 16),
+ VIN_DATA_PIN_GROUP(vin1_data, 12),
+ VIN_DATA_PIN_GROUP(vin1_data, 10),
+ VIN_DATA_PIN_GROUP(vin1_data, 8),
+ VIN_DATA_PIN_GROUP(vin1_data, 4),
+ SH_PFC_PIN_GROUP(vin1_sync),
+ SH_PFC_PIN_GROUP(vin1_field),
+ SH_PFC_PIN_GROUP(vin1_clkenb),
+ SH_PFC_PIN_GROUP(vin1_clk),
+ VIN_DATA_PIN_GROUP(vin2_data, 24),
+ SH_PFC_PIN_GROUP(vin2_data18),
+ VIN_DATA_PIN_GROUP(vin2_data, 16),
+ VIN_DATA_PIN_GROUP(vin2_data, 8),
+ VIN_DATA_PIN_GROUP(vin2_data, 4),
+ SH_PFC_PIN_GROUP(vin2_sync),
+ SH_PFC_PIN_GROUP(vin2_field),
+ SH_PFC_PIN_GROUP(vin2_clkenb),
+ SH_PFC_PIN_GROUP(vin2_clk),
+ SH_PFC_PIN_GROUP(vin3_data8),
+ SH_PFC_PIN_GROUP(vin3_sync),
+ SH_PFC_PIN_GROUP(vin3_field),
+ SH_PFC_PIN_GROUP(vin3_clkenb),
+ SH_PFC_PIN_GROUP(vin3_clk),
+ },
+ .automotive = {
+ SH_PFC_PIN_GROUP(mlb_3pin),
+ }
};
static const char * const audio_clk_groups[] = {
@@ -4689,63 +4696,70 @@ static const char * const vin3_groups[] = {
"vin3_clk",
};
-static const struct sh_pfc_function pinmux_functions[] = {
- SH_PFC_FUNCTION(audio_clk),
- SH_PFC_FUNCTION(avb),
- SH_PFC_FUNCTION(du),
- SH_PFC_FUNCTION(du0),
- SH_PFC_FUNCTION(du1),
- SH_PFC_FUNCTION(du2),
- SH_PFC_FUNCTION(eth),
- SH_PFC_FUNCTION(hscif0),
- SH_PFC_FUNCTION(hscif1),
- SH_PFC_FUNCTION(i2c0),
- SH_PFC_FUNCTION(i2c1),
- SH_PFC_FUNCTION(i2c2),
- SH_PFC_FUNCTION(i2c3),
- SH_PFC_FUNCTION(iic0),
- SH_PFC_FUNCTION(iic1),
- SH_PFC_FUNCTION(iic2),
- SH_PFC_FUNCTION(iic3),
- SH_PFC_FUNCTION(intc),
- SH_PFC_FUNCTION(mlb),
- SH_PFC_FUNCTION(mmc0),
- SH_PFC_FUNCTION(mmc1),
- SH_PFC_FUNCTION(msiof0),
- SH_PFC_FUNCTION(msiof1),
- SH_PFC_FUNCTION(msiof2),
- SH_PFC_FUNCTION(msiof3),
- SH_PFC_FUNCTION(pwm0),
- SH_PFC_FUNCTION(pwm1),
- SH_PFC_FUNCTION(pwm2),
- SH_PFC_FUNCTION(pwm3),
- SH_PFC_FUNCTION(pwm4),
- SH_PFC_FUNCTION(pwm5),
- SH_PFC_FUNCTION(pwm6),
- SH_PFC_FUNCTION(qspi),
- SH_PFC_FUNCTION(scif0),
- SH_PFC_FUNCTION(scif1),
- SH_PFC_FUNCTION(scif2),
- SH_PFC_FUNCTION(scifa0),
- SH_PFC_FUNCTION(scifa1),
- SH_PFC_FUNCTION(scifa2),
- SH_PFC_FUNCTION(scifb0),
- SH_PFC_FUNCTION(scifb1),
- SH_PFC_FUNCTION(scifb2),
- SH_PFC_FUNCTION(scif_clk),
- SH_PFC_FUNCTION(sdhi0),
- SH_PFC_FUNCTION(sdhi1),
- SH_PFC_FUNCTION(sdhi2),
- SH_PFC_FUNCTION(sdhi3),
- SH_PFC_FUNCTION(ssi),
- SH_PFC_FUNCTION(tpu0),
- SH_PFC_FUNCTION(usb0),
- SH_PFC_FUNCTION(usb1),
- SH_PFC_FUNCTION(usb2),
- SH_PFC_FUNCTION(vin0),
- SH_PFC_FUNCTION(vin1),
- SH_PFC_FUNCTION(vin2),
- SH_PFC_FUNCTION(vin3),
+static const struct {
+ struct sh_pfc_function common[55];
+ struct sh_pfc_function automotive[1];
+} pinmux_functions = {
+ .common = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(du0),
+ SH_PFC_FUNCTION(du1),
+ SH_PFC_FUNCTION(du2),
+ SH_PFC_FUNCTION(eth),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(iic0),
+ SH_PFC_FUNCTION(iic1),
+ SH_PFC_FUNCTION(iic2),
+ SH_PFC_FUNCTION(iic3),
+ SH_PFC_FUNCTION(intc),
+ SH_PFC_FUNCTION(mmc0),
+ SH_PFC_FUNCTION(mmc1),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(qspi),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scifa0),
+ SH_PFC_FUNCTION(scifa1),
+ SH_PFC_FUNCTION(scifa2),
+ SH_PFC_FUNCTION(scifb0),
+ SH_PFC_FUNCTION(scifb1),
+ SH_PFC_FUNCTION(scifb2),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(tpu0),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(usb2),
+ SH_PFC_FUNCTION(vin0),
+ SH_PFC_FUNCTION(vin1),
+ SH_PFC_FUNCTION(vin2),
+ SH_PFC_FUNCTION(vin3),
+ },
+ .automotive = {
+ SH_PFC_FUNCTION(mlb),
+ }
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -5736,6 +5750,29 @@ static const struct sh_pfc_soc_operations r8a7790_pinmux_ops = {
.pin_to_pocctrl = r8a7790_pin_to_pocctrl,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A7742
+const struct sh_pfc_soc_info r8a7742_pinmux_info = {
+ .name = "r8a77420_pfc",
+ .ops = &r8a7790_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common),
+
+ .cfg_regs = pinmux_config_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A7790
const struct sh_pfc_soc_info r8a7790_pinmux_info = {
.name = "r8a77900_pfc",
.ops = &r8a7790_pinmux_ops,
@@ -5745,13 +5782,16 @@ const struct sh_pfc_soc_info r8a7790_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .groups = pinmux_groups,
- .nr_groups = ARRAY_SIZE(pinmux_groups),
- .functions = pinmux_functions,
- .nr_functions = ARRAY_SIZE(pinmux_functions),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.automotive),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.automotive),
.cfg_regs = pinmux_config_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index d20974a55d93..e2916aaa8304 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -1963,8 +1963,9 @@ static const struct pinmux_func pinmux_func_gpios[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* "name" addr register_size Field_Width */
- /* where Field_Width is 1 for single mode registers or 4 for upto 16
- mode registers and modes are described in assending order [0..16] */
+ /* where Field_Width is 1 for single mode registers or 4 for up to 16
+ * mode registers and modes are described in assending order [0..15]
+ */
{ PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1, GROUP(
0, 0, 0, 0, 0, 0, 0, 0,
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index d57e633e99c0..0f013827baf9 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -304,6 +304,7 @@ struct sh_pfc_soc_info {
extern const struct sh_pfc_soc_info emev2_pinmux_info;
extern const struct sh_pfc_soc_info r8a73a4_pinmux_info;
extern const struct sh_pfc_soc_info r8a7740_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7742_pinmux_info;
extern const struct sh_pfc_soc_info r8a7743_pinmux_info;
extern const struct sh_pfc_soc_info r8a7744_pinmux_info;
extern const struct sh_pfc_soc_info r8a7745_pinmux_info;
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 1ebcb957c654..63a287d5795f 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -794,13 +794,17 @@ static int sirfsoc_gpio_probe(struct device_node *np)
return -ENODEV;
sgpio = devm_kzalloc(&pdev->dev, sizeof(*sgpio), GFP_KERNEL);
- if (!sgpio)
- return -ENOMEM;
+ if (!sgpio) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
spin_lock_init(&sgpio->lock);
regs = of_iomap(np, 0);
- if (!regs)
- return -ENOMEM;
+ if (!regs) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
sgpio->chip.gc.request = sirfsoc_gpio_request;
sgpio->chip.gc.free = sirfsoc_gpio_free;
@@ -824,8 +828,10 @@ static int sirfsoc_gpio_probe(struct device_node *np)
girq->parents = devm_kcalloc(&pdev->dev, SIRFSOC_GPIO_NO_OF_BANKS,
sizeof(*girq->parents),
GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
+ if (!girq->parents) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
bank = &sgpio->sgpio_bank[i];
spin_lock_init(&bank->lock);
@@ -868,6 +874,8 @@ out_no_range:
gpiochip_remove(&sgpio->chip.gc);
out:
iounmap(regs);
+out_put_device:
+ put_device(&pdev->dev);
return err;
}
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index 48cbf2a2837f..08dc1931b358 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -68,8 +68,8 @@
#define SLEEP_PULL_UP_MASK 0x1
#define SLEEP_PULL_UP_SHIFT 3
-#define PULL_UP_20K (BIT(12) | BIT(7))
-#define PULL_UP_4_7K BIT(12)
+#define PULL_UP_4_7K (BIT(12) | BIT(7))
+#define PULL_UP_20K BIT(7)
#define PULL_UP_MASK 0x21
#define PULL_UP_SHIFT 7
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 8a08c4afc6a8..9e5b61449999 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -103,8 +103,11 @@ static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev)
rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(rstc)) {
- dev_err(&pdev->dev, "Reset controller missing\n");
- return PTR_ERR(rstc);
+ ret = PTR_ERR(rstc);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_err(&pdev->dev, "Reset controller missing err=%d\n", ret);
+ return ret;
}
ret = reset_control_deassert(rstc);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index 6f7b3767f453..43922ab81666 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -123,7 +123,7 @@ static int tegra_xusb_padctl_get_group_pins(struct pinctrl_dev *pinctrl,
unsigned *num_pins)
{
/*
- * For the tegra-xusb pad controller groups are synonomous
+ * For the tegra-xusb pad controller groups are synonymous
* with lanes/pins and there is always one lane/pin per group.
*/
*pins = &pinctrl->desc->pins[group].number;
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index 786bf89487d6..80d00ab8c110 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -94,7 +94,7 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
if (data->aon_pin) {
/*
* It's an AON pin, whose mux register offset and bit position
- * can be caluculated from pin number. Each register covers 16
+ * can be calculated from pin number. Each register covers 16
* pins, and each pin occupies 2 bits.
*/
u16 aoffset = pindesc->number / 16 * 4;
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 03ea5129ed0c..a484ab2c91ff 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -217,6 +217,7 @@ config CROS_EC_SYSFS
config CROS_EC_TYPEC
tristate "ChromeOS EC Type-C Connector Control"
depends on MFD_CROS_EC_DEV && TYPEC
+ depends on CROS_USBPD_NOTIFY
default MFD_CROS_EC_DEV
help
If you say Y here, you get support for accessing Type C connector
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
index fa51153688b4..f37c0ef4af1f 100644
--- a/drivers/platform/chrome/chromeos_pstore.c
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -57,6 +57,7 @@ static struct ramoops_platform_data chromeos_ramoops_data = {
.record_size = 0x40000,
.console_size = 0x20000,
.ftrace_size = 0x20000,
+ .pmsg_size = 0x20000,
.max_reason = KMSG_DUMP_OOPS,
};
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 6119eccd8a18..30c8938c27d5 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -16,7 +16,7 @@
#include "cros_ec.h"
-/**
+/*
* Request format for protocol v3
* byte 0 0xda (EC_COMMAND_PROTOCOL_3)
* byte 1-8 struct ec_host_request
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 93a71e93a2f1..ed794a7ddba9 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -48,7 +48,8 @@ static const guid_t cros_ish_guid =
struct header {
u8 channel;
u8 status;
- u8 reserved[2];
+ u8 token;
+ u8 reserved;
} __packed;
struct cros_ish_out_msg {
@@ -90,6 +91,7 @@ static DECLARE_RWSEM(init_lock);
* data exceeds this value, we log an error.
* @size: Actual size of data received from firmware.
* @error: 0 for success, negative error code for a failure in process_recv().
+ * @token: Expected token for response that we are waiting on.
* @received: Set to true on receiving a valid firmware response to host command
* @wait_queue: Wait queue for host to wait for firmware response.
*/
@@ -98,6 +100,7 @@ struct response_info {
size_t max_size;
size_t size;
int error;
+ u8 token;
bool received;
wait_queue_head_t wait_queue;
};
@@ -162,6 +165,7 @@ static int ish_send(struct ishtp_cl_data *client_data,
u8 *out_msg, size_t out_size,
u8 *in_msg, size_t in_size)
{
+ static u8 next_token;
int rv;
struct header *out_hdr = (struct header *)out_msg;
struct ishtp_cl *cros_ish_cl = client_data->cros_ish_cl;
@@ -174,8 +178,11 @@ static int ish_send(struct ishtp_cl_data *client_data,
client_data->response.data = in_msg;
client_data->response.max_size = in_size;
client_data->response.error = 0;
+ client_data->response.token = next_token++;
client_data->response.received = false;
+ out_hdr->token = client_data->response.token;
+
rv = ishtp_cl_send(cros_ish_cl, out_msg, out_size);
if (rv) {
dev_err(cl_data_to_dev(client_data),
@@ -249,17 +256,23 @@ static void process_recv(struct ishtp_cl *cros_ish_cl,
switch (in_msg->hdr.channel) {
case CROS_EC_COMMAND:
- /* Sanity check */
- if (!client_data->response.data) {
+ if (client_data->response.received) {
dev_err(dev,
- "Receiving buffer is null. Should be allocated by calling function\n");
- client_data->response.error = -EINVAL;
- goto error_wake_up;
+ "Previous firmware message not yet processed\n");
+ goto end_error;
}
- if (client_data->response.received) {
+ if (client_data->response.token != in_msg->hdr.token) {
+ dev_err_ratelimited(dev,
+ "Dropping old response token %d\n",
+ in_msg->hdr.token);
+ goto end_error;
+ }
+
+ /* Sanity check */
+ if (!client_data->response.data) {
dev_err(dev,
- "Previous firmware message not yet processed\n");
+ "Receiving buffer is null. Should be allocated by calling function\n");
client_data->response.error = -EINVAL;
goto error_wake_up;
}
@@ -289,21 +302,28 @@ static void process_recv(struct ishtp_cl *cros_ish_cl,
memcpy(client_data->response.data,
rb_in_proc->buffer.data, data_len);
+error_wake_up:
+ /* Free the buffer since we copied data or didn't need it */
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ rb_in_proc = NULL;
+
/* Set flag before waking up the caller */
client_data->response.received = true;
-error_wake_up:
+
/* Wake the calling thread */
wake_up_interruptible(&client_data->response.wait_queue);
break;
case CROS_MKBP_EVENT:
+ /* Free the buffer. This is just an event without data */
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ rb_in_proc = NULL;
/*
* Set timestamp from beginning of function since we actually
* got an incoming MKBP event
*/
client_data->ec_dev->last_event_time = timestamp;
- /* The event system doesn't send any data in buffer */
schedule_work(&client_data->work_ec_evt);
break;
@@ -313,8 +333,9 @@ error_wake_up:
}
end_error:
- /* Free the buffer */
- ishtp_cl_io_rb_recycle(rb_in_proc);
+ /* Free the buffer if we already haven't */
+ if (rb_in_proc)
+ ishtp_cl_io_rb_recycle(rb_in_proc);
up_read(&init_lock);
}
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 874269c07073..66b8d21092af 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -11,11 +11,22 @@
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
#include <linux/usb/typec.h>
#define DRV_NAME "cros-ec-typec"
+/* Per port data. */
+struct cros_typec_port {
+ struct typec_port *port;
+ /* Initial capabilities for the port. */
+ struct typec_capability caps;
+ struct typec_partner *partner;
+ /* Port partner PD identity info. */
+ struct usb_pd_identity p_identity;
+};
+
/* Platform-specific data for the Chrome OS EC Type C controller. */
struct cros_typec_data {
struct device *dev;
@@ -23,9 +34,8 @@ struct cros_typec_data {
int num_ports;
unsigned int cmd_ver;
/* Array of ports, indexed by port number. */
- struct typec_port *ports[EC_USB_PD_MAX_PORTS];
- /* Initial capabilities for each port. */
- struct typec_capability *caps[EC_USB_PD_MAX_PORTS];
+ struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
+ struct notifier_block nb;
};
static int cros_typec_parse_port_props(struct typec_capability *cap,
@@ -74,14 +84,25 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
return 0;
}
+static void cros_unregister_ports(struct cros_typec_data *typec)
+{
+ int i;
+
+ for (i = 0; i < typec->num_ports; i++) {
+ if (!typec->ports[i])
+ continue;
+ typec_unregister_port(typec->ports[i]->port);
+ }
+}
+
static int cros_typec_init_ports(struct cros_typec_data *typec)
{
struct device *dev = typec->dev;
struct typec_capability *cap;
struct fwnode_handle *fwnode;
+ struct cros_typec_port *cros_port;
const char *port_prop;
int ret;
- int i;
int nports;
u32 port_num = 0;
@@ -113,22 +134,23 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
dev_dbg(dev, "Registering port %d\n", port_num);
- cap = devm_kzalloc(dev, sizeof(*cap), GFP_KERNEL);
- if (!cap) {
+ cros_port = devm_kzalloc(dev, sizeof(*cros_port), GFP_KERNEL);
+ if (!cros_port) {
ret = -ENOMEM;
goto unregister_ports;
}
- typec->caps[port_num] = cap;
+ typec->ports[port_num] = cros_port;
+ cap = &cros_port->caps;
ret = cros_typec_parse_port_props(cap, fwnode, dev);
if (ret < 0)
goto unregister_ports;
- typec->ports[port_num] = typec_register_port(dev, cap);
- if (IS_ERR(typec->ports[port_num])) {
+ cros_port->port = typec_register_port(dev, cap);
+ if (IS_ERR(cros_port->port)) {
dev_err(dev, "Failed to register port %d\n", port_num);
- ret = PTR_ERR(typec->ports[port_num]);
+ ret = PTR_ERR(cros_port->port);
goto unregister_ports;
}
}
@@ -136,8 +158,7 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
return 0;
unregister_ports:
- for (i = 0; i < typec->num_ports; i++)
- typec_unregister_port(typec->ports[i]);
+ cros_unregister_ports(typec);
return ret;
}
@@ -172,10 +193,34 @@ static int cros_typec_ec_command(struct cros_typec_data *typec,
return ret;
}
+static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
+ bool pd_en)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct typec_partner_desc p_desc = {
+ .usb_pd = pd_en,
+ };
+ int ret = 0;
+
+ /*
+ * Fill an initial PD identity, which will then be updated with info
+ * from the EC.
+ */
+ p_desc.identity = &port->p_identity;
+
+ port->partner = typec_register_partner(port->port, &p_desc);
+ if (IS_ERR(port->partner)) {
+ ret = PTR_ERR(port->partner);
+ port->partner = NULL;
+ }
+
+ return ret;
+}
+
static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
int port_num, struct ec_response_usb_pd_control *resp)
{
- struct typec_port *port = typec->ports[port_num];
+ struct typec_port *port = typec->ports[port_num]->port;
enum typec_orientation polarity;
if (!resp->enabled)
@@ -192,8 +237,10 @@ static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
int port_num, struct ec_response_usb_pd_control_v1 *resp)
{
- struct typec_port *port = typec->ports[port_num];
+ struct typec_port *port = typec->ports[port_num]->port;
enum typec_orientation polarity;
+ bool pd_en;
+ int ret;
if (!(resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
polarity = TYPEC_ORIENTATION_NONE;
@@ -208,6 +255,25 @@ static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
TYPEC_SOURCE : TYPEC_SINK);
typec_set_vconn_role(port, resp->role & PD_CTRL_RESP_ROLE_VCONN ?
TYPEC_SOURCE : TYPEC_SINK);
+
+ /* Register/remove partners when a connect/disconnect occurs. */
+ if (resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED) {
+ if (typec->ports[port_num]->partner)
+ return;
+
+ pd_en = resp->enabled & PD_CTRL_RESP_ENABLED_PD_CAPABLE;
+ ret = cros_typec_add_partner(typec, port_num, pd_en);
+ if (ret)
+ dev_warn(typec->dev,
+ "Failed to register partner on port: %d\n",
+ port_num);
+ } else {
+ if (!typec->ports[port_num]->partner)
+ return;
+
+ typec_unregister_partner(typec->ports[port_num]->partner);
+ typec->ports[port_num]->partner = NULL;
+ }
}
static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
@@ -272,6 +338,22 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
return 0;
}
+static int cros_ec_typec_event(struct notifier_block *nb,
+ unsigned long host_event, void *_notify)
+{
+ struct cros_typec_data *typec = container_of(nb, struct cros_typec_data,
+ nb);
+ int ret, i;
+
+ for (i = 0; i < typec->num_ports; i++) {
+ ret = cros_typec_port_update(typec, i);
+ if (ret < 0)
+ dev_warn(typec->dev, "Update failed for port: %d\n", i);
+ }
+
+ return NOTIFY_OK;
+}
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id cros_typec_acpi_id[] = {
{ "GOOG0014", 0 },
@@ -332,12 +414,15 @@ static int cros_typec_probe(struct platform_device *pdev)
goto unregister_ports;
}
+ typec->nb.notifier_call = cros_ec_typec_event;
+ ret = cros_usbpd_register_notify(&typec->nb);
+ if (ret < 0)
+ goto unregister_ports;
+
return 0;
unregister_ports:
- for (i = 0; i < typec->num_ports; i++)
- if (typec->ports[i])
- typec_unregister_port(typec->ports[i]);
+ cros_unregister_ports(typec);
return ret;
}
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index 7de3ea75ef46..d16931203d82 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -46,6 +46,7 @@ static const char * const fault_names[] = {
"---", "OCP", "fast OCP", "OVP", "Discharge"
};
+__printf(3, 4)
static int append_str(char *buf, int pos, const char *fmt, ...)
{
va_list args;
diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c
index df5a5f6c3ec6..a812788a0bdc 100644
--- a/drivers/platform/chrome/wilco_ec/debugfs.c
+++ b/drivers/platform/chrome/wilco_ec/debugfs.c
@@ -208,7 +208,12 @@ static int send_ec_cmd(struct wilco_ec_device *ec, u8 sub_cmd, u8 *out_val)
*/
static int h1_gpio_get(void *arg, u64 *val)
{
- return send_ec_cmd(arg, SUB_CMD_H1_GPIO, (u8 *)val);
+ int ret;
+
+ ret = send_ec_cmd(arg, SUB_CMD_H1_GPIO, (u8 *)val);
+ if (ret == 0)
+ *val &= 0xFF;
+ return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_h1_gpio, h1_gpio_get, NULL, "0x%02llx\n");
diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c
index 83ed1fbf73cf..5e1d14e35f20 100644
--- a/drivers/platform/olpc/olpc-xo175-ec.c
+++ b/drivers/platform/olpc/olpc-xo175-ec.c
@@ -410,7 +410,7 @@ static void olpc_xo175_ec_complete(void *arg)
dev_dbg(dev, "got event %.2x\n", byte);
switch (byte) {
case EVENT_AC_CHANGE:
- psy = power_supply_get_by_name("olpc-ac");
+ psy = power_supply_get_by_name("olpc_ac");
if (psy) {
power_supply_changed(psy);
power_supply_put(psy);
@@ -420,7 +420,7 @@ static void olpc_xo175_ec_complete(void *arg)
case EVENT_BATTERY_CRITICAL:
case EVENT_BATTERY_SOC_CHANGE:
case EVENT_BATTERY_ERROR:
- psy = power_supply_get_by_name("olpc-battery");
+ psy = power_supply_get_by_name("olpc_battery");
if (psy) {
power_supply_changed(psy);
power_supply_put(psy);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 890380302080..f07b982c8dff 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -123,6 +123,13 @@ config POWER_RESET_OCELOT_RESET
help
This driver supports restart for Microsemi Ocelot SoC.
+config POWER_RESET_OXNAS
+ bool "OXNAS SoC restart driver"
+ depends on ARCH_OXNAS
+ default MACH_OX820
+ help
+ Restart support for OXNAS/PLXTECH OX820 SoC.
+
config POWER_RESET_PIIX4_POWEROFF
tristate "Intel PIIX4 power-off driver"
depends on PCI
@@ -184,7 +191,7 @@ config POWER_RESET_VERSATILE
config POWER_RESET_VEXPRESS
bool "ARM Versatile Express power-off and reset driver"
depends on ARM || ARM64
- depends on VEXPRESS_CONFIG
+ depends on VEXPRESS_CONFIG=y
help
Power off and reset support for the ARM Ltd. Versatile
Express boards.
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index da37f8b851dc..5710ca469517 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o
+obj-$(CONFIG_POWER_RESET_OXNAS) += oxnas-restart.o
obj-$(CONFIG_POWER_RESET_QCOM_PON) += qcom-pon.o
obj-$(CONFIG_POWER_RESET_OCELOT_RESET) += ocelot-reset.o
obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
index 6a4bbb506551..c5067eb75370 100644
--- a/drivers/power/reset/gpio-poweroff.c
+++ b/drivers/power/reset/gpio-poweroff.c
@@ -54,7 +54,7 @@ static int gpio_poweroff_probe(struct platform_device *pdev)
/* If a pm_power_off function has already been added, leave it alone */
if (pm_power_off != NULL) {
dev_err(&pdev->dev,
- "%s: pm_power_off function already registered",
+ "%s: pm_power_off function already registered\n",
__func__);
return -EBUSY;
}
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index e4a0cc45b3d1..318927938b05 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -94,7 +94,6 @@ static enum hrtimer_restart ltc2952_poweroff_timer_wde(struct hrtimer *timer)
{
ktime_t now;
int state;
- unsigned long overruns;
struct ltc2952_poweroff *data = to_ltc2952(timer, timer_wde);
if (data->kernel_panic)
@@ -104,7 +103,7 @@ static enum hrtimer_restart ltc2952_poweroff_timer_wde(struct hrtimer *timer)
gpiod_set_value(data->gpio_watchdog, !state);
now = hrtimer_cb_get_time(timer);
- overruns = hrtimer_forward(timer, now, data->wde_interval);
+ hrtimer_forward(timer, now, data->wde_interval);
return HRTIMER_RESTART;
}
diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c
index 1caf43d9e46d..0532803e6cbc 100644
--- a/drivers/power/reset/mt6323-poweroff.c
+++ b/drivers/power/reset/mt6323-poweroff.c
@@ -30,7 +30,7 @@ static void mt6323_do_pwroff(void)
int ret;
regmap_write(pwrc->regmap, pwrc->base + RTC_BBPU, RTC_BBPU_KEY);
- regmap_write(pwrc->regmap, pwrc->base + RTC_WRTGR, 1);
+ regmap_write(pwrc->regmap, pwrc->base + RTC_WRTGR_MT6323, 1);
ret = regmap_read_poll_timeout(pwrc->regmap,
pwrc->base + RTC_BBPU, val,
diff --git a/drivers/power/reset/oxnas-restart.c b/drivers/power/reset/oxnas-restart.c
new file mode 100644
index 000000000000..13090bec058a
--- /dev/null
+++ b/drivers/power/reset/oxnas-restart.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: (GPL-2.0)
+/*
+ * oxnas SoC reset driver
+ * based on:
+ * Microsemi MIPS SoC reset driver
+ * and ox820_assert_system_reset() written by Ma Hajun <mahaijuns@gmail.com>
+ *
+ * Copyright (c) 2013 Ma Hajun <mahaijuns@gmail.com>
+ * Copyright (c) 2017 Microsemi Corporation
+ * Copyright (c) 2020 Daniel Golle <daniel@makrotopia.org>
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/notifier.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+
+/* bit numbers of reset control register */
+#define OX820_SYS_CTRL_RST_SCU 0
+#define OX820_SYS_CTRL_RST_COPRO 1
+#define OX820_SYS_CTRL_RST_ARM0 2
+#define OX820_SYS_CTRL_RST_ARM1 3
+#define OX820_SYS_CTRL_RST_USBHS 4
+#define OX820_SYS_CTRL_RST_USBHSPHYA 5
+#define OX820_SYS_CTRL_RST_MACA 6
+#define OX820_SYS_CTRL_RST_MAC OX820_SYS_CTRL_RST_MACA
+#define OX820_SYS_CTRL_RST_PCIEA 7
+#define OX820_SYS_CTRL_RST_SGDMA 8
+#define OX820_SYS_CTRL_RST_CIPHER 9
+#define OX820_SYS_CTRL_RST_DDR 10
+#define OX820_SYS_CTRL_RST_SATA 11
+#define OX820_SYS_CTRL_RST_SATA_LINK 12
+#define OX820_SYS_CTRL_RST_SATA_PHY 13
+#define OX820_SYS_CTRL_RST_PCIEPHY 14
+#define OX820_SYS_CTRL_RST_STATIC 15
+#define OX820_SYS_CTRL_RST_GPIO 16
+#define OX820_SYS_CTRL_RST_UART1 17
+#define OX820_SYS_CTRL_RST_UART2 18
+#define OX820_SYS_CTRL_RST_MISC 19
+#define OX820_SYS_CTRL_RST_I2S 20
+#define OX820_SYS_CTRL_RST_SD 21
+#define OX820_SYS_CTRL_RST_MACB 22
+#define OX820_SYS_CTRL_RST_PCIEB 23
+#define OX820_SYS_CTRL_RST_VIDEO 24
+#define OX820_SYS_CTRL_RST_DDR_PHY 25
+#define OX820_SYS_CTRL_RST_USBHSPHYB 26
+#define OX820_SYS_CTRL_RST_USBDEV 27
+#define OX820_SYS_CTRL_RST_ARMDBG 29
+#define OX820_SYS_CTRL_RST_PLLA 30
+#define OX820_SYS_CTRL_RST_PLLB 31
+
+/* bit numbers of clock control register */
+#define OX820_SYS_CTRL_CLK_COPRO 0
+#define OX820_SYS_CTRL_CLK_DMA 1
+#define OX820_SYS_CTRL_CLK_CIPHER 2
+#define OX820_SYS_CTRL_CLK_SD 3
+#define OX820_SYS_CTRL_CLK_SATA 4
+#define OX820_SYS_CTRL_CLK_I2S 5
+#define OX820_SYS_CTRL_CLK_USBHS 6
+#define OX820_SYS_CTRL_CLK_MACA 7
+#define OX820_SYS_CTRL_CLK_MAC OX820_SYS_CTRL_CLK_MACA
+#define OX820_SYS_CTRL_CLK_PCIEA 8
+#define OX820_SYS_CTRL_CLK_STATIC 9
+#define OX820_SYS_CTRL_CLK_MACB 10
+#define OX820_SYS_CTRL_CLK_PCIEB 11
+#define OX820_SYS_CTRL_CLK_REF600 12
+#define OX820_SYS_CTRL_CLK_USBDEV 13
+#define OX820_SYS_CTRL_CLK_DDR 14
+#define OX820_SYS_CTRL_CLK_DDRPHY 15
+#define OX820_SYS_CTRL_CLK_DDRCK 16
+
+/* Regmap offsets */
+#define OX820_CLK_SET_REGOFFSET 0x2c
+#define OX820_CLK_CLR_REGOFFSET 0x30
+#define OX820_RST_SET_REGOFFSET 0x34
+#define OX820_RST_CLR_REGOFFSET 0x38
+#define OX820_SECONDARY_SEL_REGOFFSET 0x14
+#define OX820_TERTIARY_SEL_REGOFFSET 0x8c
+#define OX820_QUATERNARY_SEL_REGOFFSET 0x94
+#define OX820_DEBUG_SEL_REGOFFSET 0x9c
+#define OX820_ALTERNATIVE_SEL_REGOFFSET 0xa4
+#define OX820_PULLUP_SEL_REGOFFSET 0xac
+#define OX820_SEC_SECONDARY_SEL_REGOFFSET 0x100014
+#define OX820_SEC_TERTIARY_SEL_REGOFFSET 0x10008c
+#define OX820_SEC_QUATERNARY_SEL_REGOFFSET 0x100094
+#define OX820_SEC_DEBUG_SEL_REGOFFSET 0x10009c
+#define OX820_SEC_ALTERNATIVE_SEL_REGOFFSET 0x1000a4
+#define OX820_SEC_PULLUP_SEL_REGOFFSET 0x1000ac
+
+struct oxnas_restart_context {
+ struct regmap *sys_ctrl;
+ struct notifier_block restart_handler;
+};
+
+static int ox820_restart_handle(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ struct oxnas_restart_context *ctx = container_of(this, struct
+ oxnas_restart_context,
+ restart_handler);
+ u32 value;
+
+ /*
+ * Assert reset to cores as per power on defaults
+ * Don't touch the DDR interface as things will come to an impromptu
+ * stop NB Possibly should be asserting reset for PLLB, but there are
+ * timing concerns here according to the docs
+ */
+ value = BIT(OX820_SYS_CTRL_RST_COPRO) |
+ BIT(OX820_SYS_CTRL_RST_USBHS) |
+ BIT(OX820_SYS_CTRL_RST_USBHSPHYA) |
+ BIT(OX820_SYS_CTRL_RST_MACA) |
+ BIT(OX820_SYS_CTRL_RST_PCIEA) |
+ BIT(OX820_SYS_CTRL_RST_SGDMA) |
+ BIT(OX820_SYS_CTRL_RST_CIPHER) |
+ BIT(OX820_SYS_CTRL_RST_SATA) |
+ BIT(OX820_SYS_CTRL_RST_SATA_LINK) |
+ BIT(OX820_SYS_CTRL_RST_SATA_PHY) |
+ BIT(OX820_SYS_CTRL_RST_PCIEPHY) |
+ BIT(OX820_SYS_CTRL_RST_STATIC) |
+ BIT(OX820_SYS_CTRL_RST_UART1) |
+ BIT(OX820_SYS_CTRL_RST_UART2) |
+ BIT(OX820_SYS_CTRL_RST_MISC) |
+ BIT(OX820_SYS_CTRL_RST_I2S) |
+ BIT(OX820_SYS_CTRL_RST_SD) |
+ BIT(OX820_SYS_CTRL_RST_MACB) |
+ BIT(OX820_SYS_CTRL_RST_PCIEB) |
+ BIT(OX820_SYS_CTRL_RST_VIDEO) |
+ BIT(OX820_SYS_CTRL_RST_USBHSPHYB) |
+ BIT(OX820_SYS_CTRL_RST_USBDEV);
+
+ regmap_write(ctx->sys_ctrl, OX820_RST_SET_REGOFFSET, value);
+
+ /* Release reset to cores as per power on defaults */
+ regmap_write(ctx->sys_ctrl, OX820_RST_CLR_REGOFFSET,
+ BIT(OX820_SYS_CTRL_RST_GPIO));
+
+ /*
+ * Disable clocks to cores as per power-on defaults - must leave DDR
+ * related clocks enabled otherwise we'll stop rather abruptly.
+ */
+ value = BIT(OX820_SYS_CTRL_CLK_COPRO) |
+ BIT(OX820_SYS_CTRL_CLK_DMA) |
+ BIT(OX820_SYS_CTRL_CLK_CIPHER) |
+ BIT(OX820_SYS_CTRL_CLK_SD) |
+ BIT(OX820_SYS_CTRL_CLK_SATA) |
+ BIT(OX820_SYS_CTRL_CLK_I2S) |
+ BIT(OX820_SYS_CTRL_CLK_USBHS) |
+ BIT(OX820_SYS_CTRL_CLK_MAC) |
+ BIT(OX820_SYS_CTRL_CLK_PCIEA) |
+ BIT(OX820_SYS_CTRL_CLK_STATIC) |
+ BIT(OX820_SYS_CTRL_CLK_MACB) |
+ BIT(OX820_SYS_CTRL_CLK_PCIEB) |
+ BIT(OX820_SYS_CTRL_CLK_REF600) |
+ BIT(OX820_SYS_CTRL_CLK_USBDEV);
+
+ regmap_write(ctx->sys_ctrl, OX820_CLK_CLR_REGOFFSET, value);
+
+ /* Enable clocks to cores as per power-on defaults */
+
+ /* Set sys-control pin mux'ing as per power-on defaults */
+ regmap_write(ctx->sys_ctrl, OX820_SECONDARY_SEL_REGOFFSET, 0);
+ regmap_write(ctx->sys_ctrl, OX820_TERTIARY_SEL_REGOFFSET, 0);
+ regmap_write(ctx->sys_ctrl, OX820_QUATERNARY_SEL_REGOFFSET, 0);
+ regmap_write(ctx->sys_ctrl, OX820_DEBUG_SEL_REGOFFSET, 0);
+ regmap_write(ctx->sys_ctrl, OX820_ALTERNATIVE_SEL_REGOFFSET, 0);
+ regmap_write(ctx->sys_ctrl, OX820_PULLUP_SEL_REGOFFSET, 0);
+
+ regmap_write(ctx->sys_ctrl, OX820_SEC_SECONDARY_SEL_REGOFFSET, 0);
+ regmap_write(ctx->sys_ctrl, OX820_SEC_TERTIARY_SEL_REGOFFSET, 0);
+ regmap_write(ctx->sys_ctrl, OX820_SEC_QUATERNARY_SEL_REGOFFSET, 0);
+ regmap_write(ctx->sys_ctrl, OX820_SEC_DEBUG_SEL_REGOFFSET, 0);
+ regmap_write(ctx->sys_ctrl, OX820_SEC_ALTERNATIVE_SEL_REGOFFSET, 0);
+ regmap_write(ctx->sys_ctrl, OX820_SEC_PULLUP_SEL_REGOFFSET, 0);
+
+ /*
+ * No need to save any state, as the ROM loader can determine whether
+ * reset is due to power cycling or programatic action, just hit the
+ * (self-clearing) CPU reset bit of the block reset register
+ */
+ value =
+ BIT(OX820_SYS_CTRL_RST_SCU) |
+ BIT(OX820_SYS_CTRL_RST_ARM0) |
+ BIT(OX820_SYS_CTRL_RST_ARM1);
+
+ regmap_write(ctx->sys_ctrl, OX820_RST_SET_REGOFFSET, value);
+
+ pr_emerg("Unable to restart system\n");
+ return NOTIFY_DONE;
+}
+
+static int ox820_restart_probe(struct platform_device *pdev)
+{
+ struct oxnas_restart_context *ctx;
+ struct regmap *sys_ctrl;
+ struct device *dev = &pdev->dev;
+ int err = 0;
+
+ sys_ctrl = syscon_node_to_regmap(pdev->dev.of_node);
+ if (IS_ERR(sys_ctrl))
+ return PTR_ERR(sys_ctrl);
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->sys_ctrl = sys_ctrl;
+ ctx->restart_handler.notifier_call = ox820_restart_handle;
+ ctx->restart_handler.priority = 192;
+ err = register_restart_handler(&ctx->restart_handler);
+ if (err)
+ dev_err(dev, "can't register restart notifier (err=%d)\n", err);
+
+ return err;
+}
+
+static const struct of_device_id ox820_restart_of_match[] = {
+ { .compatible = "oxsemi,ox820-sys-ctrl" },
+ {}
+};
+
+static struct platform_driver ox820_restart_driver = {
+ .probe = ox820_restart_probe,
+ .driver = {
+ .name = "ox820-chip-reset",
+ .of_match_table = ox820_restart_of_match,
+ },
+};
+builtin_platform_driver(ox820_restart_driver);
diff --git a/drivers/power/reset/qcom-pon.c b/drivers/power/reset/qcom-pon.c
index 22a743a0bf28..4a688741a88a 100644
--- a/drivers/power/reset/qcom-pon.c
+++ b/drivers/power/reset/qcom-pon.c
@@ -34,7 +34,8 @@ static int pm8916_reboot_mode_write(struct reboot_mode_driver *reboot,
ret = regmap_update_bits(pon->regmap,
pon->baseaddr + PON_SOFT_RB_SPARE,
- 0xfc, magic << pon->reason_shift);
+ GENMASK(7, pon->reason_shift),
+ magic << pon->reason_shift);
if (ret < 0)
dev_err(pon->dev, "update reboot mode bits failed\n");
diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c
index 62fbba0df971..510e363381ca 100644
--- a/drivers/power/reset/syscon-reboot.c
+++ b/drivers/power/reset/syscon-reboot.c
@@ -51,8 +51,11 @@ static int syscon_reboot_probe(struct platform_device *pdev)
return -ENOMEM;
ctx->map = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap");
- if (IS_ERR(ctx->map))
- return PTR_ERR(ctx->map);
+ if (IS_ERR(ctx->map)) {
+ ctx->map = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(ctx->map))
+ return PTR_ERR(ctx->map);
+ }
if (of_property_read_u32(pdev->dev.of_node, "offset", &ctx->offset))
return -EINVAL;
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 90cbaa8341e3..1fdbcbd95fc2 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -143,11 +143,7 @@ static struct platform_driver vexpress_reset_driver = {
.driver = {
.name = "vexpress-reset",
.of_match_table = vexpress_reset_of_match,
+ .suppress_bind_attrs = true,
},
};
-
-static int __init vexpress_reset_init(void)
-{
- return platform_driver_register(&vexpress_reset_driver);
-}
-device_initcall(vexpress_reset_init);
+builtin_platform_driver(vexpress_reset_driver);
diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c
index 5ca047b3f58f..1308f3a185f3 100644
--- a/drivers/power/supply/88pm860x_battery.c
+++ b/drivers/power/supply/88pm860x_battery.c
@@ -919,16 +919,12 @@ static int pm860x_battery_probe(struct platform_device *pdev)
return -ENOMEM;
info->irq_cc = platform_get_irq(pdev, 0);
- if (info->irq_cc <= 0) {
- dev_err(&pdev->dev, "No IRQ resource!\n");
+ if (info->irq_cc <= 0)
return -EINVAL;
- }
info->irq_batt = platform_get_irq(pdev, 1);
- if (info->irq_batt <= 0) {
- dev_err(&pdev->dev, "No IRQ resource!\n");
+ if (info->irq_batt <= 0)
return -EINVAL;
- }
info->chip = chip;
info->i2c =
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index f3424fdce341..44d3c8512fb8 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -116,6 +116,17 @@ config BATTERY_CPCAP
Say Y here to enable support for battery on Motorola
phones and tablets such as droid 4.
+config BATTERY_CW2015
+ tristate "CW2015 Battery driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here to enable support for the cellwise cw2015
+ battery fuel gauge (used in the Pinebook Pro & others)
+
+ This driver can also be built as a module. If so, the module will be
+ called cw2015_battery.
+
config BATTERY_DS2760
tristate "DS2760 battery driver (HP iPAQ & others)"
depends on W1
@@ -415,7 +426,7 @@ config CHARGER_PCF50633
tristate "NXP PCF50633 MBC"
depends on MFD_PCF50633
help
- Say Y to include support for NXP PCF50633 Main Battery Charger.
+ Say Y to include support for NXP PCF50633 Main Battery Charger.
config BATTERY_RX51
tristate "Nokia RX-51 (N900) battery driver"
@@ -541,6 +552,16 @@ config CHARGER_MAX8998
Say Y to enable support for the battery charger control sysfs and
platform data of MAX8998/LP3974 PMICs.
+config CHARGER_MP2629
+ tristate "Monolithic power system MP2629 Battery charger"
+ depends on MFD_MP2629
+ depends on MP2629_ADC
+ depends on IIO
+ help
+ Select this option to enable support for Monolithic power system
+ Battery charger. This driver provides Battery charger power management
+ functions on the systems.
+
config CHARGER_QCOM_SMBB
tristate "Qualcomm Switch-Mode Battery Charger and Boost"
depends on MFD_SPMI_PMIC || COMPILE_TEST
@@ -577,7 +598,7 @@ config CHARGER_BQ24257
tristate "TI BQ24250/24251/24257 battery charger driver"
depends on I2C
depends on GPIOLIB || COMPILE_TEST
- depends on REGMAP_I2C
+ select REGMAP_I2C
help
Say Y to enable support for the TI BQ24250, BQ24251, and BQ24257 battery
chargers.
@@ -609,15 +630,15 @@ config CHARGER_TPS65090
tristate "TPS65090 battery charger driver"
depends on MFD_TPS65090
help
- Say Y here to enable support for battery charging with TPS65090
- PMIC chips.
+ Say Y here to enable support for battery charging with TPS65090
+ PMIC chips.
config CHARGER_TPS65217
tristate "TPS65217 battery charger driver"
depends on MFD_TPS65217
help
- Say Y here to enable support for battery charging with TPS65217
- PMIC chips.
+ Say Y here to enable support for battery charging with TPS65217
+ PMIC chips.
config BATTERY_GAUGE_LTC2941
tristate "LTC2941/LTC2943 Battery Gauge Driver"
@@ -660,7 +681,6 @@ config CHARGER_RT9455
config CHARGER_CROS_USBPD
tristate "ChromeOS EC based USBPD charger"
depends on CROS_USBPD_NOTIFY
- default n
help
Say Y here to enable ChromeOS EC based USBPD charger
driver. This driver gets various bits of information about
@@ -671,16 +691,16 @@ config CHARGER_SC2731
tristate "Spreadtrum SC2731 charger driver"
depends on MFD_SC27XX_PMIC || COMPILE_TEST
help
- Say Y here to enable support for battery charging with SC2731
- PMIC chips.
+ Say Y here to enable support for battery charging with SC2731
+ PMIC chips.
config FUEL_GAUGE_SC27XX
tristate "Spreadtrum SC27XX fuel gauge driver"
depends on MFD_SC27XX_PMIC || COMPILE_TEST
depends on IIO
help
- Say Y here to enable support for fuel gauge with SC27XX
- PMIC chips.
+ Say Y here to enable support for fuel gauge with SC27XX
+ PMIC chips.
config CHARGER_UCS1002
tristate "Microchip UCS1002 USB Port Power Controller"
@@ -695,11 +715,20 @@ config CHARGER_UCS1002
config CHARGER_BD70528
tristate "ROHM bd70528 charger driver"
depends on MFD_ROHM_BD70528
- default n
+ select LINEAR_RANGES
+ help
+ Say Y here to enable support for getting battery status
+ information and altering charger configurations from charger
+ block of the ROHM BD70528 Power Management IC.
+
+config CHARGER_BD99954
+ tristate "ROHM bd99954 charger driver"
+ depends on I2C
+ select LINEAR_RANGES
help
- Say Y here to enable support for getting battery status
- information and altering charger configurations from charger
- block of the ROHM BD70528 Power Management IC.
+ Say Y here to enable support for getting battery and charger
+ information and altering charger configurations from the ROHM
+ BD99954 charger IC.
config CHARGER_WILCO
tristate "Wilco EC based charger for ChromeOS"
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 6c7da920ea83..b9644663e435 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_BATTERY_ACT8945A) += act8945a_charger.o
obj-$(CONFIG_BATTERY_AXP20X) += axp20x_battery.o
obj-$(CONFIG_CHARGER_AXP20X) += axp20x_ac_power.o
obj-$(CONFIG_BATTERY_CPCAP) += cpcap-battery.o
+obj-$(CONFIG_BATTERY_CW2015) += cw2015_battery.o
obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o
obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o
@@ -75,6 +76,7 @@ obj-$(CONFIG_CHARGER_MAX77650) += max77650-charger.o
obj-$(CONFIG_CHARGER_MAX77693) += max77693_charger.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
+obj-$(CONFIG_CHARGER_MP2629) += mp2629_charger.o
obj-$(CONFIG_CHARGER_QCOM_SMBB) += qcom_smbb.o
obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
@@ -91,4 +93,5 @@ obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o
obj-$(CONFIG_FUEL_GAUGE_SC27XX) += sc27xx_fuel_gauge.o
obj-$(CONFIG_CHARGER_UCS1002) += ucs1002_power.o
obj-$(CONFIG_CHARGER_BD70528) += bd70528-charger.o
+obj-$(CONFIG_CHARGER_BD99954) += bd99954-charger.o
obj-$(CONFIG_CHARGER_WILCO) += wilco-charger.o
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index b96f90a82ecf..751c4f6c7487 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2399,7 +2399,7 @@ static void ab8500_fg_reinit_work(struct work_struct *work)
struct ab8500_fg *di = container_of(work, struct ab8500_fg,
fg_reinit_work.work);
- if (di->flags.calibrate == false) {
+ if (!di->flags.calibrate) {
dev_dbg(di->dev, "Resetting FG state machine to init.\n");
ab8500_fg_clear_cap_samples(di);
ab8500_fg_calc_cap_discharge_voltage(di, true);
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index cf4c67b2d235..9d981b76c1e7 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -880,10 +880,9 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register charger interrupts */
for (i = 0; i < CHRG_INTR_END; i++) {
pirq = platform_get_irq(info->pdev, i);
- if (pirq < 0) {
- dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
+ if (pirq < 0)
return pirq;
- }
+
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) {
dev_warn(&info->pdev->dev,
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index f40fa0e63b6e..148eb8105803 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -718,6 +718,12 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
},
},
{
+ /* Meegopad T02 */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "MEEGOPAD T02"),
+ },
+ },
+ {
/* Meegopad T08 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
diff --git a/drivers/power/supply/bd70528-charger.c b/drivers/power/supply/bd70528-charger.c
index 3b820110ecfa..7c1f0b99c71b 100644
--- a/drivers/power/supply/bd70528-charger.c
+++ b/drivers/power/supply/bd70528-charger.c
@@ -72,6 +72,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
+#include <linux/linear_range.h>
#define CHG_STAT_SUSPEND 0x0
#define CHG_STAT_TRICKLE 0x1
@@ -335,38 +336,37 @@ static int bd70528_get_present(struct bd70528_psy *bdpsy, int *val)
return 0;
}
-struct bd70528_linear_range {
- int min;
- int step;
- int vals;
- int low_sel;
-};
-
-static const struct bd70528_linear_range current_limit_ranges[] = {
+static const struct linear_range current_limit_ranges[] = {
{
.min = 5,
.step = 1,
- .vals = 36,
- .low_sel = 0,
+ .min_sel = 0,
+ .max_sel = 0x22,
},
{
.min = 40,
.step = 5,
- .vals = 5,
- .low_sel = 0x23,
+ .min_sel = 0x23,
+ .max_sel = 0x26,
},
{
.min = 60,
.step = 20,
- .vals = 8,
- .low_sel = 0x27,
+ .min_sel = 0x27,
+ .max_sel = 0x2d,
},
{
.min = 200,
.step = 50,
- .vals = 7,
- .low_sel = 0x2e,
- }
+ .min_sel = 0x2e,
+ .max_sel = 0x34,
+ },
+ {
+ .min = 500,
+ .step = 0,
+ .min_sel = 0x35,
+ .max_sel = 0x3f,
+ },
};
/*
@@ -374,18 +374,18 @@ static const struct bd70528_linear_range current_limit_ranges[] = {
* voltage for low temperatures. The driver currently only reads
* the charge current at room temperature. We do set both though.
*/
-static const struct bd70528_linear_range warm_charge_curr[] = {
+static const struct linear_range warm_charge_curr[] = {
{
.min = 10,
.step = 10,
- .vals = 20,
- .low_sel = 0,
+ .min_sel = 0,
+ .max_sel = 0x12,
},
{
.min = 200,
.step = 25,
- .vals = 13,
- .low_sel = 0x13,
+ .min_sel = 0x13,
+ .max_sel = 0x1f,
},
};
@@ -398,56 +398,6 @@ static const struct bd70528_linear_range warm_charge_curr[] = {
#define MAX_WARM_CHG_CURR_SEL 0x1f
#define MIN_CHG_CURR_SEL 0x0
-static int find_value_for_selector_low(const struct bd70528_linear_range *r,
- int selectors, unsigned int sel,
- unsigned int *val)
-{
- int i;
-
- for (i = 0; i < selectors; i++) {
- if (r[i].low_sel <= sel && r[i].low_sel + r[i].vals >= sel) {
- *val = r[i].min + (sel - r[i].low_sel) * r[i].step;
- return 0;
- }
- }
- return -EINVAL;
-}
-
-/*
- * For BD70528 voltage/current limits we happily accept any value which
- * belongs the range. We could check if value matching the selector is
- * desired by computing the range min + (sel - sel_low) * range step - but
- * I guess it is enough if we use voltage/current which is closest (below)
- * the requested?
- */
-static int find_selector_for_value_low(const struct bd70528_linear_range *r,
- int selectors, unsigned int val,
- unsigned int *sel, bool *found)
-{
- int i;
- int ret = -EINVAL;
-
- *found = false;
- for (i = 0; i < selectors; i++) {
- if (r[i].min <= val) {
- if (r[i].min + r[i].step * r[i].vals >= val) {
- *found = true;
- *sel = r[i].low_sel + (val - r[i].min) /
- r[i].step;
- ret = 0;
- break;
- }
- /*
- * If the range max is smaller than requested
- * we can set the max supported value from range
- */
- *sel = r[i].low_sel + r[i].vals;
- ret = 0;
- }
- }
- return ret;
-}
-
static int get_charge_current(struct bd70528_psy *bdpsy, int *ma)
{
unsigned int sel;
@@ -463,9 +413,9 @@ static int get_charge_current(struct bd70528_psy *bdpsy, int *ma)
sel &= BD70528_MASK_CHG_CHG_CURR;
- ret = find_value_for_selector_low(&warm_charge_curr[0],
- ARRAY_SIZE(warm_charge_curr), sel,
- ma);
+ ret = linear_range_get_value_array(&warm_charge_curr[0],
+ ARRAY_SIZE(warm_charge_curr),
+ sel, ma);
if (ret) {
dev_err(bdpsy->dev,
"Unknown charge current value 0x%x\n",
@@ -491,10 +441,9 @@ static int get_current_limit(struct bd70528_psy *bdpsy, int *ma)
sel &= BD70528_MASK_CHG_DCIN_ILIM;
- ret = find_value_for_selector_low(&current_limit_ranges[0],
- ARRAY_SIZE(current_limit_ranges), sel,
- ma);
-
+ ret = linear_range_get_value_array(&current_limit_ranges[0],
+ ARRAY_SIZE(current_limit_ranges),
+ sel, ma);
if (ret) {
/* Unspecified values mean 500 mA */
*ma = 500;
@@ -588,15 +537,28 @@ static int set_charge_current(struct bd70528_psy *bdpsy, int ma)
goto set;
}
- ret = find_selector_for_value_low(&warm_charge_curr[0],
- ARRAY_SIZE(warm_charge_curr), ma,
- &reg, &found);
+/*
+ * For BD70528 voltage/current limits we happily accept any value which
+ * belongs the range. We could check if value matching the selector is
+ * desired by computing the range min + (sel - sel_low) * range step - but
+ * I guess it is enough if we use voltage/current which is closest (below)
+ * the requested?
+ */
+
+ ret = linear_range_get_selector_low_array(warm_charge_curr,
+ ARRAY_SIZE(warm_charge_curr),
+ ma, &reg, &found);
if (ret) {
+ dev_err(bdpsy->dev,
+ "Unsupported charge current %u mA\n", ma);
reg = MIN_CHG_CURR_SEL;
goto set;
}
if (!found) {
- /* There was a gap in supported values and we hit it */
+ /*
+ * There was a gap in supported values and we hit it.
+ * Yet a smaller value was found so we use it.
+ */
dev_warn(bdpsy->dev,
"Unsupported charge current %u mA\n", ma);
}
@@ -648,17 +610,21 @@ static int set_current_limit(struct bd70528_psy *bdpsy, int ma)
goto set;
}
- ret = find_selector_for_value_low(&current_limit_ranges[0],
- ARRAY_SIZE(current_limit_ranges), ma,
- &reg, &found);
+ ret = linear_range_get_selector_low_array(current_limit_ranges,
+ ARRAY_SIZE(current_limit_ranges),
+ ma, &reg, &found);
if (ret) {
+ dev_err(bdpsy->dev, "Unsupported current limit %umA\n", ma);
reg = MIN_CURR_LIMIT_SEL;
goto set;
}
if (!found) {
- /* There was a gap in supported values and we hit it ?*/
- dev_warn(bdpsy->dev, "Unsupported current limit %umA\n",
- ma);
+ /*
+ * There was a gap in supported values and we hit it.
+ * We found a smaller value from ranges and use it.
+ * Warn user though.
+ */
+ dev_warn(bdpsy->dev, "Unsupported current limit %umA\n", ma);
}
set:
diff --git a/drivers/power/supply/bd99954-charger.c b/drivers/power/supply/bd99954-charger.c
new file mode 100644
index 000000000000..ffd8bfa08179
--- /dev/null
+++ b/drivers/power/supply/bd99954-charger.c
@@ -0,0 +1,1142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ROHM BD99954 charger driver
+ *
+ * Copyright (C) 2020 Rohm Semiconductors
+ * Originally written by:
+ * Mikko Mutanen <mikko.mutanen@fi.rohmeurope.com>
+ * Markus Laine <markus.laine@fi.rohmeurope.com>
+ * Bugs added by:
+ * Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+ */
+
+/*
+ * The battery charging profile of BD99954.
+ *
+ * Curve (1) represents charging current.
+ * Curve (2) represents battery voltage.
+ *
+ * The BD99954 data sheet divides charging to three phases.
+ * a) Trickle-charge with constant current (8).
+ * b) pre-charge with constant current (6)
+ * c) fast-charge, first with constant current (5) phase. After
+ * the battery voltage has reached target level (4) we have constant
+ * voltage phase until charging current has dropped to termination
+ * level (7)
+ *
+ * V ^ ^ I
+ * . .
+ * . .
+ *(4)` `.` ` ` ` ` ` ` ` ` ` ` ` ` ` ----------------------------.
+ * . :/ .
+ * . o----+/:/ ` ` ` ` ` ` ` ` ` ` ` ` `.` ` (5)
+ * . + :: + .
+ * . + /- -- .
+ * . +`/- + .
+ * . o/- -: .
+ * . .s. +` .
+ * . .--+ `/ .
+ * . ..`` + .: .
+ * . -` + -- .
+ * . (2) ...`` + :- .
+ * . ...`` + -: .
+ *(3)` `.`."" ` ` ` `+-------- ` ` ` ` ` ` `.:` ` ` ` ` ` ` ` ` .` ` (6)
+ * . + `:. .
+ * . + -: .
+ * . + -:. .
+ * . + .--. .
+ * . (1) + `.+` ` ` `.` ` (7)
+ * -..............` ` ` ` ` ` ` ` ` ` ` ` ` ` ` ` ` + ` ` ` .` ` (8)
+ * . + -
+ * -------------------------------------------------+++++++++-->
+ * | trickle | pre | fast |
+ *
+ * Details of DT properties for different limits can be found from BD99954
+ * device tree binding documentation.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include "bd99954-charger.h"
+
+struct battery_data {
+ u16 precharge_current; /* Trickle-charge Current */
+ u16 fc_reg_voltage; /* Fast Charging Regulation Voltage */
+ u16 voltage_min;
+ u16 voltage_max;
+};
+
+/* Initial field values, converted to initial register values */
+struct bd9995x_init_data {
+ u16 vsysreg_set; /* VSYS Regulation Setting */
+ u16 ibus_lim_set; /* VBUS input current limitation */
+ u16 icc_lim_set; /* VCC/VACP Input Current Limit Setting */
+ u16 itrich_set; /* Trickle-charge Current Setting */
+ u16 iprech_set; /* Pre-Charge Current Setting */
+ u16 ichg_set; /* Fast-Charge constant current */
+ u16 vfastchg_reg_set1; /* Fast Charging Regulation Voltage */
+ u16 vprechg_th_set; /* Pre-charge Voltage Threshold Setting */
+ u16 vrechg_set; /* Re-charge Battery Voltage Setting */
+ u16 vbatovp_set; /* Battery Over Voltage Threshold Setting */
+ u16 iterm_set; /* Charging termination current */
+};
+
+struct bd9995x_state {
+ u8 online;
+ u16 chgstm_status;
+ u16 vbat_vsys_status;
+ u16 vbus_vcc_status;
+};
+
+struct bd9995x_device {
+ struct i2c_client *client;
+ struct device *dev;
+ struct power_supply *charger;
+
+ struct regmap *rmap;
+ struct regmap_field *rmap_fields[F_MAX_FIELDS];
+
+ int chip_id;
+ int chip_rev;
+ struct bd9995x_init_data init_data;
+ struct bd9995x_state state;
+
+ struct mutex lock; /* Protect state data */
+};
+
+static const struct regmap_range bd9995x_readonly_reg_ranges[] = {
+ regmap_reg_range(CHGSTM_STATUS, SEL_ILIM_VAL),
+ regmap_reg_range(IOUT_DACIN_VAL, IOUT_DACIN_VAL),
+ regmap_reg_range(VCC_UCD_STATUS, VCC_IDD_STATUS),
+ regmap_reg_range(VBUS_UCD_STATUS, VBUS_IDD_STATUS),
+ regmap_reg_range(CHIP_ID, CHIP_REV),
+ regmap_reg_range(SYSTEM_STATUS, SYSTEM_STATUS),
+ regmap_reg_range(IBATP_VAL, VBAT_AVE_VAL),
+ regmap_reg_range(VTH_VAL, EXTIADP_AVE_VAL),
+};
+
+static const struct regmap_access_table bd9995x_writeable_regs = {
+ .no_ranges = bd9995x_readonly_reg_ranges,
+ .n_no_ranges = ARRAY_SIZE(bd9995x_readonly_reg_ranges),
+};
+
+static const struct regmap_range bd9995x_volatile_reg_ranges[] = {
+ regmap_reg_range(CHGSTM_STATUS, WDT_STATUS),
+ regmap_reg_range(VCC_UCD_STATUS, VCC_IDD_STATUS),
+ regmap_reg_range(VBUS_UCD_STATUS, VBUS_IDD_STATUS),
+ regmap_reg_range(INT0_STATUS, INT7_STATUS),
+ regmap_reg_range(SYSTEM_STATUS, SYSTEM_CTRL_SET),
+ regmap_reg_range(IBATP_VAL, EXTIADP_AVE_VAL), /* Measurement regs */
+};
+
+static const struct regmap_access_table bd9995x_volatile_regs = {
+ .yes_ranges = bd9995x_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bd9995x_volatile_reg_ranges),
+};
+
+static const struct regmap_range_cfg regmap_range_cfg[] = {
+ {
+ .selector_reg = MAP_SET,
+ .selector_mask = 0xFFFF,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x100,
+ .range_min = 0 * 0x100,
+ .range_max = 3 * 0x100,
+ },
+};
+
+static const struct regmap_config bd9995x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .reg_stride = 1,
+
+ .max_register = 3 * 0x100,
+ .cache_type = REGCACHE_RBTREE,
+
+ .ranges = regmap_range_cfg,
+ .num_ranges = ARRAY_SIZE(regmap_range_cfg),
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .wr_table = &bd9995x_writeable_regs,
+ .volatile_table = &bd9995x_volatile_regs,
+};
+
+enum bd9995x_chrg_fault {
+ CHRG_FAULT_NORMAL,
+ CHRG_FAULT_INPUT,
+ CHRG_FAULT_THERMAL_SHUTDOWN,
+ CHRG_FAULT_TIMER_EXPIRED,
+};
+
+static int bd9995x_get_prop_batt_health(struct bd9995x_device *bd)
+{
+ int ret, tmp;
+
+ ret = regmap_field_read(bd->rmap_fields[F_BATTEMP], &tmp);
+ if (ret)
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+
+ /* TODO: Check these against datasheet page 34 */
+
+ switch (tmp) {
+ case ROOM:
+ return POWER_SUPPLY_HEALTH_GOOD;
+ case HOT1:
+ case HOT2:
+ case HOT3:
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+ case COLD1:
+ case COLD2:
+ return POWER_SUPPLY_HEALTH_COLD;
+ case TEMP_DIS:
+ case BATT_OPEN:
+ default:
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+}
+
+static int bd9995x_get_prop_charge_type(struct bd9995x_device *bd)
+{
+ int ret, tmp;
+
+ ret = regmap_field_read(bd->rmap_fields[F_CHGSTM_STATE], &tmp);
+ if (ret)
+ return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+
+ switch (tmp) {
+ case CHGSTM_TRICKLE_CHARGE:
+ case CHGSTM_PRE_CHARGE:
+ return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ case CHGSTM_FAST_CHARGE:
+ return POWER_SUPPLY_CHARGE_TYPE_FAST;
+ case CHGSTM_TOP_OFF:
+ case CHGSTM_DONE:
+ case CHGSTM_SUSPEND:
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+ default: /* Rest of the states are error related, no charging */
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+ }
+}
+
+static bool bd9995x_get_prop_batt_present(struct bd9995x_device *bd)
+{
+ int ret, tmp;
+
+ ret = regmap_field_read(bd->rmap_fields[F_BATTEMP], &tmp);
+ if (ret)
+ return false;
+
+ return tmp != BATT_OPEN;
+}
+
+static int bd9995x_get_prop_batt_voltage(struct bd9995x_device *bd)
+{
+ int ret, tmp;
+
+ ret = regmap_field_read(bd->rmap_fields[F_VBAT_VAL], &tmp);
+ if (ret)
+ return 0;
+
+ tmp = min(tmp, 19200);
+
+ return tmp * 1000;
+}
+
+static int bd9995x_get_prop_batt_current(struct bd9995x_device *bd)
+{
+ int ret, tmp;
+
+ ret = regmap_field_read(bd->rmap_fields[F_IBATP_VAL], &tmp);
+ if (ret)
+ return 0;
+
+ return tmp * 1000;
+}
+
+#define DEFAULT_BATTERY_TEMPERATURE 250
+
+static int bd9995x_get_prop_batt_temp(struct bd9995x_device *bd)
+{
+ int ret, tmp;
+
+ ret = regmap_field_read(bd->rmap_fields[F_THERM_VAL], &tmp);
+ if (ret)
+ return DEFAULT_BATTERY_TEMPERATURE;
+
+ return (200 - tmp) * 10;
+}
+
+static int bd9995x_power_supply_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int ret, tmp;
+ struct bd9995x_device *bd = power_supply_get_drvdata(psy);
+ struct bd9995x_state state;
+
+ mutex_lock(&bd->lock);
+ state = bd->state;
+ mutex_unlock(&bd->lock);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ switch (state.chgstm_status) {
+ case CHGSTM_TRICKLE_CHARGE:
+ case CHGSTM_PRE_CHARGE:
+ case CHGSTM_FAST_CHARGE:
+ case CHGSTM_TOP_OFF:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+
+ case CHGSTM_DONE:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+
+ case CHGSTM_SUSPEND:
+ case CHGSTM_TEMPERATURE_ERROR_1:
+ case CHGSTM_TEMPERATURE_ERROR_2:
+ case CHGSTM_TEMPERATURE_ERROR_3:
+ case CHGSTM_TEMPERATURE_ERROR_4:
+ case CHGSTM_TEMPERATURE_ERROR_5:
+ case CHGSTM_TEMPERATURE_ERROR_6:
+ case CHGSTM_TEMPERATURE_ERROR_7:
+ case CHGSTM_THERMAL_SHUT_DOWN_1:
+ case CHGSTM_THERMAL_SHUT_DOWN_2:
+ case CHGSTM_THERMAL_SHUT_DOWN_3:
+ case CHGSTM_THERMAL_SHUT_DOWN_4:
+ case CHGSTM_THERMAL_SHUT_DOWN_5:
+ case CHGSTM_THERMAL_SHUT_DOWN_6:
+ case CHGSTM_THERMAL_SHUT_DOWN_7:
+ case CHGSTM_BATTERY_ERROR:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = BD9995X_MANUFACTURER;
+ break;
+
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = state.online;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = regmap_field_read(bd->rmap_fields[F_IBATP_VAL], &tmp);
+ if (ret)
+ return ret;
+ val->intval = tmp * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_AVG:
+ ret = regmap_field_read(bd->rmap_fields[F_IBATP_AVE_VAL], &tmp);
+ if (ret)
+ return ret;
+ val->intval = tmp * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ /*
+ * Currently the DT uses this property to give the
+ * target current for fast-charging constant current phase.
+ * I think it is correct in a sense.
+ *
+ * Yet, this prop we read and return here is the programmed
+ * safety limit for combined input currents. This feels
+ * also correct in a sense.
+ *
+ * However, this results a mismatch to DT value and value
+ * read from sysfs.
+ */
+ ret = regmap_field_read(bd->rmap_fields[F_SEL_ILIM_VAL], &tmp);
+ if (ret)
+ return ret;
+ val->intval = tmp * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ if (!state.online) {
+ val->intval = 0;
+ break;
+ }
+
+ ret = regmap_field_read(bd->rmap_fields[F_VFASTCHG_REG_SET1],
+ &tmp);
+ if (ret)
+ return ret;
+
+ /*
+ * The actual range : 2560 to 19200 mV. No matter what the
+ * register says
+ */
+ val->intval = clamp_val(tmp << 4, 2560, 19200);
+ val->intval *= 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ ret = regmap_field_read(bd->rmap_fields[F_ITERM_SET], &tmp);
+ if (ret)
+ return ret;
+ /* Start step is 64 mA */
+ val->intval = tmp << 6;
+ /* Maximum is 1024 mA - no matter what register says */
+ val->intval = min(val->intval, 1024);
+ val->intval *= 1000;
+ break;
+
+ /* Battery properties which we access through charger */
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = bd9995x_get_prop_batt_present(bd);
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = bd9995x_get_prop_batt_voltage(bd);
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = bd9995x_get_prop_batt_current(bd);
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = bd9995x_get_prop_charge_type(bd);
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = bd9995x_get_prop_batt_health(bd);
+ break;
+
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = bd9995x_get_prop_batt_temp(bd);
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = "bd99954";
+ break;
+
+ default:
+ return -EINVAL;
+
+ }
+
+ return 0;
+}
+
+static int bd9995x_get_chip_state(struct bd9995x_device *bd,
+ struct bd9995x_state *state)
+{
+ int i, ret, tmp;
+ struct {
+ struct regmap_field *id;
+ u16 *data;
+ } state_fields[] = {
+ {
+ bd->rmap_fields[F_CHGSTM_STATE], &state->chgstm_status,
+ }, {
+ bd->rmap_fields[F_VBAT_VSYS_STATUS],
+ &state->vbat_vsys_status,
+ }, {
+ bd->rmap_fields[F_VBUS_VCC_STATUS],
+ &state->vbus_vcc_status,
+ },
+ };
+
+
+ for (i = 0; i < ARRAY_SIZE(state_fields); i++) {
+ ret = regmap_field_read(state_fields[i].id, &tmp);
+ if (ret)
+ return ret;
+
+ *state_fields[i].data = tmp;
+ }
+
+ if (state->vbus_vcc_status & STATUS_VCC_DET ||
+ state->vbus_vcc_status & STATUS_VBUS_DET)
+ state->online = 1;
+ else
+ state->online = 0;
+
+ return 0;
+}
+
+static irqreturn_t bd9995x_irq_handler_thread(int irq, void *private)
+{
+ struct bd9995x_device *bd = private;
+ int ret, status, mask, i;
+ unsigned long tmp;
+ struct bd9995x_state state;
+
+ /*
+ * The bd9995x does not seem to generate big amount of interrupts.
+ * The logic regarding which interrupts can cause relevant
+ * status changes seem to be pretty complex.
+ *
+ * So lets implement really simple and hopefully bullet-proof handler:
+ * It does not really matter which IRQ we handle, we just go and
+ * re-read all interesting statuses + give the framework a nudge.
+ *
+ * Other option would be building a _complex_ and error prone logic
+ * trying to decide what could have been changed (resulting this IRQ
+ * we are now handling). During the normal operation the BD99954 does
+ * not seem to be generating much of interrupts so benefit from such
+ * logic would probably be minimal.
+ */
+
+ ret = regmap_read(bd->rmap, INT0_STATUS, &status);
+ if (ret) {
+ dev_err(bd->dev, "Failed to read IRQ status\n");
+ return IRQ_NONE;
+ }
+
+ ret = regmap_field_read(bd->rmap_fields[F_INT0_SET], &mask);
+ if (ret) {
+ dev_err(bd->dev, "Failed to read IRQ mask\n");
+ return IRQ_NONE;
+ }
+
+ /* Handle only IRQs that are not masked */
+ status &= mask;
+ tmp = status;
+
+ /* Lowest bit does not represent any sub-registers */
+ tmp >>= 1;
+
+ /*
+ * Mask and ack IRQs we will handle (+ the idiot bit)
+ */
+ ret = regmap_field_write(bd->rmap_fields[F_INT0_SET], 0);
+ if (ret) {
+ dev_err(bd->dev, "Failed to mask F_INT0\n");
+ return IRQ_NONE;
+ }
+
+ ret = regmap_write(bd->rmap, INT0_STATUS, status);
+ if (ret) {
+ dev_err(bd->dev, "Failed to ack F_INT0\n");
+ goto err_umask;
+ }
+
+ for_each_set_bit(i, &tmp, 7) {
+ int sub_status, sub_mask;
+ int sub_status_reg[] = {
+ INT1_STATUS, INT2_STATUS, INT3_STATUS, INT4_STATUS,
+ INT5_STATUS, INT6_STATUS, INT7_STATUS,
+ };
+ struct regmap_field *sub_mask_f[] = {
+ bd->rmap_fields[F_INT1_SET],
+ bd->rmap_fields[F_INT2_SET],
+ bd->rmap_fields[F_INT3_SET],
+ bd->rmap_fields[F_INT4_SET],
+ bd->rmap_fields[F_INT5_SET],
+ bd->rmap_fields[F_INT6_SET],
+ bd->rmap_fields[F_INT7_SET],
+ };
+
+ /* Clear sub IRQs */
+ ret = regmap_read(bd->rmap, sub_status_reg[i], &sub_status);
+ if (ret) {
+ dev_err(bd->dev, "Failed to read IRQ sub-status\n");
+ goto err_umask;
+ }
+
+ ret = regmap_field_read(sub_mask_f[i], &sub_mask);
+ if (ret) {
+ dev_err(bd->dev, "Failed to read IRQ sub-mask\n");
+ goto err_umask;
+ }
+
+ /* Ack active sub-statuses */
+ sub_status &= sub_mask;
+
+ ret = regmap_write(bd->rmap, sub_status_reg[i], sub_status);
+ if (ret) {
+ dev_err(bd->dev, "Failed to ack sub-IRQ\n");
+ goto err_umask;
+ }
+ }
+
+ ret = regmap_field_write(bd->rmap_fields[F_INT0_SET], mask);
+ if (ret)
+ /* May as well retry once */
+ goto err_umask;
+
+ /* Read whole chip state */
+ ret = bd9995x_get_chip_state(bd, &state);
+ if (ret < 0) {
+ dev_err(bd->dev, "Failed to read chip state\n");
+ } else {
+ mutex_lock(&bd->lock);
+ bd->state = state;
+ mutex_unlock(&bd->lock);
+
+ power_supply_changed(bd->charger);
+ }
+
+ return IRQ_HANDLED;
+
+err_umask:
+ ret = regmap_field_write(bd->rmap_fields[F_INT0_SET], mask);
+ if (ret)
+ dev_err(bd->dev,
+ "Failed to un-mask F_INT0 - IRQ permanently disabled\n");
+
+ return IRQ_NONE;
+}
+
+static int __bd9995x_chip_reset(struct bd9995x_device *bd)
+{
+ int ret, state;
+ int rst_check_counter = 10;
+ u16 tmp = ALLRST | OTPLD;
+
+ ret = regmap_raw_write(bd->rmap, SYSTEM_CTRL_SET, &tmp, 2);
+ if (ret < 0)
+ return ret;
+
+ do {
+ ret = regmap_field_read(bd->rmap_fields[F_OTPLD_STATE], &state);
+ if (ret)
+ return ret;
+
+ msleep(10);
+ } while (state == 0 && --rst_check_counter);
+
+ if (!rst_check_counter) {
+ dev_err(bd->dev, "chip reset not completed\n");
+ return -ETIMEDOUT;
+ }
+
+ tmp = 0;
+ ret = regmap_raw_write(bd->rmap, SYSTEM_CTRL_SET, &tmp, 2);
+
+ return ret;
+}
+
+static int bd9995x_hw_init(struct bd9995x_device *bd)
+{
+ int ret;
+ int i;
+ struct bd9995x_state state;
+ struct bd9995x_init_data *id = &bd->init_data;
+
+ const struct {
+ enum bd9995x_fields id;
+ u16 value;
+ } init_data[] = {
+ /* Enable the charging trigger after SDP charger attached */
+ {F_SDP_CHG_TRIG_EN, 1},
+ /* Enable charging trigger after SDP charger attached */
+ {F_SDP_CHG_TRIG, 1},
+ /* Disable charging trigger by BC1.2 detection */
+ {F_VBUS_BC_DISEN, 1},
+ /* Disable charging trigger by BC1.2 detection */
+ {F_VCC_BC_DISEN, 1},
+ /* Disable automatic limitation of the input current */
+ {F_ILIM_AUTO_DISEN, 1},
+ /* Select current limitation when SDP charger attached*/
+ {F_SDP_500_SEL, 1},
+ /* Select current limitation when DCP charger attached */
+ {F_DCP_2500_SEL, 1},
+ {F_VSYSREG_SET, id->vsysreg_set},
+ /* Activate USB charging and DC/DC converter */
+ {F_USB_SUS, 0},
+ /* DCDC clock: 1200 kHz*/
+ {F_DCDC_CLK_SEL, 3},
+ /* Enable charging */
+ {F_CHG_EN, 1},
+ /* Disable Input current Limit setting voltage measurement */
+ {F_EXTIADPEN, 0},
+ /* Disable input current limiting */
+ {F_VSYS_PRIORITY, 1},
+ {F_IBUS_LIM_SET, id->ibus_lim_set},
+ {F_ICC_LIM_SET, id->icc_lim_set},
+ /* Charge Termination Current Setting to 0*/
+ {F_ITERM_SET, id->iterm_set},
+ /* Trickle-charge Current Setting */
+ {F_ITRICH_SET, id->itrich_set},
+ /* Pre-charge Current setting */
+ {F_IPRECH_SET, id->iprech_set},
+ /* Fast Charge Current for constant current phase */
+ {F_ICHG_SET, id->ichg_set},
+ /* Fast Charge Voltage Regulation Setting */
+ {F_VFASTCHG_REG_SET1, id->vfastchg_reg_set1},
+ /* Set Pre-charge Voltage Threshold for trickle charging. */
+ {F_VPRECHG_TH_SET, id->vprechg_th_set},
+ {F_VRECHG_SET, id->vrechg_set},
+ {F_VBATOVP_SET, id->vbatovp_set},
+ /* Reverse buck boost voltage Setting */
+ {F_VRBOOST_SET, 0},
+ /* Disable fast-charging watchdog */
+ {F_WDT_FST, 0},
+ /* Disable pre-charging watchdog */
+ {F_WDT_PRE, 0},
+ /* Power save off */
+ {F_POWER_SAVE_MODE, 0},
+ {F_INT1_SET, INT1_ALL},
+ {F_INT2_SET, INT2_ALL},
+ {F_INT3_SET, INT3_ALL},
+ {F_INT4_SET, INT4_ALL},
+ {F_INT5_SET, INT5_ALL},
+ {F_INT6_SET, INT6_ALL},
+ {F_INT7_SET, INT7_ALL},
+ };
+
+ /*
+ * Currently we initialize charger to a known state at startup.
+ * If we want to allow for example the boot code to initialize
+ * charger we should get rid of this.
+ */
+ ret = __bd9995x_chip_reset(bd);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize currents/voltages and other parameters */
+ for (i = 0; i < ARRAY_SIZE(init_data); i++) {
+ ret = regmap_field_write(bd->rmap_fields[init_data[i].id],
+ init_data[i].value);
+ if (ret) {
+ dev_err(bd->dev, "failed to initialize charger (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = bd9995x_get_chip_state(bd, &state);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&bd->lock);
+ bd->state = state;
+ mutex_unlock(&bd->lock);
+
+ return 0;
+}
+
+static enum power_supply_property bd9995x_power_supply_props[] = {
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CHARGE_AVG,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ /* Battery props we access through charger */
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+};
+
+static const struct power_supply_desc bd9995x_power_supply_desc = {
+ .name = "bd9995x-charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = bd9995x_power_supply_props,
+ .num_properties = ARRAY_SIZE(bd9995x_power_supply_props),
+ .get_property = bd9995x_power_supply_get_property,
+};
+
+/*
+ * Limit configurations for vbus-input-current and vcc-vacp-input-current
+ * Minimum limit is 0 uA. Max is 511 * 32000 uA = 16352000 uA. This is
+ * configured by writing a register so that each increment in register
+ * value equals to 32000 uA limit increment.
+ *
+ * Eg, value 0x0 is limit 0, value 0x1 is limit 32000, ...
+ * Describe the setting in linear_range table.
+ */
+static const struct linear_range input_current_limit_ranges[] = {
+ {
+ .min = 0,
+ .step = 32000,
+ .min_sel = 0x0,
+ .max_sel = 0x1ff,
+ },
+};
+
+/* Possible trickle, pre-charging and termination current values */
+static const struct linear_range charging_current_ranges[] = {
+ {
+ .min = 0,
+ .step = 64000,
+ .min_sel = 0x0,
+ .max_sel = 0x10,
+ }, {
+ .min = 1024000,
+ .step = 0,
+ .min_sel = 0x11,
+ .max_sel = 0x1f,
+ },
+};
+
+/*
+ * Fast charging voltage regulation, starting re-charging limit
+ * and battery over voltage protection have same possible values
+ */
+static const struct linear_range charge_voltage_regulation_ranges[] = {
+ {
+ .min = 2560000,
+ .step = 0,
+ .min_sel = 0,
+ .max_sel = 0xA0,
+ }, {
+ .min = 2560000,
+ .step = 16000,
+ .min_sel = 0xA0,
+ .max_sel = 0x4B0,
+ }, {
+ .min = 19200000,
+ .step = 0,
+ .min_sel = 0x4B0,
+ .max_sel = 0x7FF,
+ },
+};
+
+/* Possible VSYS voltage regulation values */
+static const struct linear_range vsys_voltage_regulation_ranges[] = {
+ {
+ .min = 2560000,
+ .step = 0,
+ .min_sel = 0,
+ .max_sel = 0x28,
+ }, {
+ .min = 2560000,
+ .step = 64000,
+ .min_sel = 0x28,
+ .max_sel = 0x12C,
+ }, {
+ .min = 19200000,
+ .step = 0,
+ .min_sel = 0x12C,
+ .max_sel = 0x1FF,
+ },
+};
+
+/* Possible settings for switching from trickle to pre-charging limits */
+static const struct linear_range trickle_to_pre_threshold_ranges[] = {
+ {
+ .min = 2048000,
+ .step = 0,
+ .min_sel = 0,
+ .max_sel = 0x20,
+ }, {
+ .min = 2048000,
+ .step = 64000,
+ .min_sel = 0x20,
+ .max_sel = 0x12C,
+ }, {
+ .min = 19200000,
+ .step = 0,
+ .min_sel = 0x12C,
+ .max_sel = 0x1FF
+ }
+};
+
+/* Possible current values for fast-charging constant current phase */
+static const struct linear_range fast_charge_current_ranges[] = {
+ {
+ .min = 0,
+ .step = 64000,
+ .min_sel = 0,
+ .max_sel = 0xFF,
+ }
+};
+
+struct battery_init {
+ const char *name;
+ int *info_data;
+ const struct linear_range *range;
+ int ranges;
+ u16 *data;
+};
+
+struct dt_init {
+ char *prop;
+ const struct linear_range *range;
+ int ranges;
+ u16 *data;
+};
+
+static int bd9995x_fw_probe(struct bd9995x_device *bd)
+{
+ int ret;
+ struct power_supply_battery_info info;
+ u32 property;
+ int i;
+ int regval;
+ bool found;
+ struct bd9995x_init_data *init = &bd->init_data;
+ struct battery_init battery_inits[] = {
+ {
+ .name = "trickle-charging current",
+ .info_data = &info.tricklecharge_current_ua,
+ .range = &charging_current_ranges[0],
+ .ranges = 2,
+ .data = &init->itrich_set,
+ }, {
+ .name = "pre-charging current",
+ .info_data = &info.precharge_current_ua,
+ .range = &charging_current_ranges[0],
+ .ranges = 2,
+ .data = &init->iprech_set,
+ }, {
+ .name = "pre-to-trickle charge voltage threshold",
+ .info_data = &info.precharge_voltage_max_uv,
+ .range = &trickle_to_pre_threshold_ranges[0],
+ .ranges = 2,
+ .data = &init->vprechg_th_set,
+ }, {
+ .name = "charging termination current",
+ .info_data = &info.charge_term_current_ua,
+ .range = &charging_current_ranges[0],
+ .ranges = 2,
+ .data = &init->iterm_set,
+ }, {
+ .name = "charging re-start voltage",
+ .info_data = &info.charge_restart_voltage_uv,
+ .range = &charge_voltage_regulation_ranges[0],
+ .ranges = 2,
+ .data = &init->vrechg_set,
+ }, {
+ .name = "battery overvoltage limit",
+ .info_data = &info.overvoltage_limit_uv,
+ .range = &charge_voltage_regulation_ranges[0],
+ .ranges = 2,
+ .data = &init->vbatovp_set,
+ }, {
+ .name = "fast-charging max current",
+ .info_data = &info.constant_charge_current_max_ua,
+ .range = &fast_charge_current_ranges[0],
+ .ranges = 1,
+ .data = &init->ichg_set,
+ }, {
+ .name = "fast-charging voltage",
+ .info_data = &info.constant_charge_voltage_max_uv,
+ .range = &charge_voltage_regulation_ranges[0],
+ .ranges = 2,
+ .data = &init->vfastchg_reg_set1,
+ },
+ };
+ struct dt_init props[] = {
+ {
+ .prop = "rohm,vsys-regulation-microvolt",
+ .range = &vsys_voltage_regulation_ranges[0],
+ .ranges = 2,
+ .data = &init->vsysreg_set,
+ }, {
+ .prop = "rohm,vbus-input-current-limit-microamp",
+ .range = &input_current_limit_ranges[0],
+ .ranges = 1,
+ .data = &init->ibus_lim_set,
+ }, {
+ .prop = "rohm,vcc-input-current-limit-microamp",
+ .range = &input_current_limit_ranges[0],
+ .ranges = 1,
+ .data = &init->icc_lim_set,
+ },
+ };
+
+ /*
+ * The power_supply_get_battery_info() does not support getting values
+ * from ACPI. Let's fix it if ACPI is required here.
+ */
+ ret = power_supply_get_battery_info(bd->charger, &info);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(battery_inits); i++) {
+ int val = *battery_inits[i].info_data;
+ const struct linear_range *range = battery_inits[i].range;
+ int ranges = battery_inits[i].ranges;
+
+ if (val == -EINVAL)
+ continue;
+
+ ret = linear_range_get_selector_low_array(range, ranges, val,
+ &regval, &found);
+ if (ret) {
+ dev_err(bd->dev, "Unsupported value for %s\n",
+ battery_inits[i].name);
+
+ power_supply_put_battery_info(bd->charger, &info);
+ return -EINVAL;
+ }
+ if (!found) {
+ dev_warn(bd->dev,
+ "Unsupported value for %s - using smaller\n",
+ battery_inits[i].name);
+ }
+ *(battery_inits[i].data) = regval;
+ }
+
+ power_supply_put_battery_info(bd->charger, &info);
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ ret = device_property_read_u32(bd->dev, props[i].prop,
+ &property);
+ if (ret < 0) {
+ dev_err(bd->dev, "failed to read %s", props[i].prop);
+
+ return ret;
+ }
+
+ ret = linear_range_get_selector_low_array(props[i].range,
+ props[i].ranges,
+ property, &regval,
+ &found);
+ if (ret) {
+ dev_err(bd->dev, "Unsupported value for '%s'\n",
+ props[i].prop);
+
+ return -EINVAL;
+ }
+
+ if (!found) {
+ dev_warn(bd->dev,
+ "Unsupported value for '%s' - using smaller\n",
+ props[i].prop);
+ }
+
+ *(props[i].data) = regval;
+ }
+
+ return 0;
+}
+
+static void bd9995x_chip_reset(void *bd)
+{
+ __bd9995x_chip_reset(bd);
+}
+
+static int bd9995x_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct bd9995x_device *bd;
+ struct power_supply_config psy_cfg = {};
+ int ret;
+ int i;
+
+ bd = devm_kzalloc(dev, sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ return -ENOMEM;
+
+ bd->client = client;
+ bd->dev = dev;
+ psy_cfg.drv_data = bd;
+ psy_cfg.of_node = dev->of_node;
+
+ mutex_init(&bd->lock);
+
+ bd->rmap = devm_regmap_init_i2c(client, &bd9995x_regmap_config);
+ if (IS_ERR(bd->rmap)) {
+ dev_err(dev, "Failed to setup register access via i2c\n");
+ return PTR_ERR(bd->rmap);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bd9995x_reg_fields); i++) {
+ const struct reg_field *reg_fields = bd9995x_reg_fields;
+
+ bd->rmap_fields[i] = devm_regmap_field_alloc(dev, bd->rmap,
+ reg_fields[i]);
+ if (IS_ERR(bd->rmap_fields[i])) {
+ dev_err(dev, "cannot allocate regmap field\n");
+ return PTR_ERR(bd->rmap_fields[i]);
+ }
+ }
+
+ i2c_set_clientdata(client, bd);
+
+ ret = regmap_field_read(bd->rmap_fields[F_CHIP_ID], &bd->chip_id);
+ if (ret) {
+ dev_err(dev, "Cannot read chip ID.\n");
+ return ret;
+ }
+
+ if (bd->chip_id != BD99954_ID) {
+ dev_err(dev, "Chip with ID=0x%x, not supported!\n",
+ bd->chip_id);
+ return -ENODEV;
+ }
+
+ ret = regmap_field_read(bd->rmap_fields[F_CHIP_REV], &bd->chip_rev);
+ if (ret) {
+ dev_err(dev, "Cannot read revision.\n");
+ return ret;
+ }
+
+ dev_info(bd->dev, "Found BD99954 chip rev %d\n", bd->chip_rev);
+
+ /*
+ * We need to init the psy before we can call
+ * power_supply_get_battery_info() for it
+ */
+ bd->charger = devm_power_supply_register(bd->dev,
+ &bd9995x_power_supply_desc,
+ &psy_cfg);
+ if (IS_ERR(bd->charger)) {
+ dev_err(dev, "Failed to register power supply\n");
+ return PTR_ERR(bd->charger);
+ }
+
+ ret = bd9995x_fw_probe(bd);
+ if (ret < 0) {
+ dev_err(dev, "Cannot read device properties.\n");
+ return ret;
+ }
+
+ ret = bd9995x_hw_init(bd);
+ if (ret < 0) {
+ dev_err(dev, "Cannot initialize the chip.\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, bd9995x_chip_reset, bd);
+ if (ret)
+ return ret;
+
+ return devm_request_threaded_irq(dev, client->irq, NULL,
+ bd9995x_irq_handler_thread,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ BD9995X_IRQ_PIN, bd);
+}
+
+static const struct of_device_id bd9995x_of_match[] = {
+ { .compatible = "rohm,bd99954", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bd9995x_of_match);
+
+static struct i2c_driver bd9995x_driver = {
+ .driver = {
+ .name = "bd9995x-charger",
+ .of_match_table = bd9995x_of_match,
+ },
+ .probe_new = bd9995x_probe,
+};
+module_i2c_driver(bd9995x_driver);
+
+MODULE_AUTHOR("Laine Markus <markus.laine@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("ROHM BD99954 charger driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/bd99954-charger.h b/drivers/power/supply/bd99954-charger.h
new file mode 100644
index 000000000000..f58897925383
--- /dev/null
+++ b/drivers/power/supply/bd99954-charger.h
@@ -0,0 +1,1075 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2020 ROHM Semiconductors */
+#ifndef BD99954_CHARGER_H
+#define BD99954_CHARGER_H
+
+#include <linux/regmap.h>
+
+#define BD9995X_MANUFACTURER "Rohm Semiconductor"
+#define BD9995X_IRQ_PIN "bd9995x_irq"
+
+#define BD9995X_VSYS_PRECHARGE_OFFSET_MV 200
+
+#define BD99954_ID 0x346
+#define BD99955_ID 0x221
+#define BD99956_ID 0x331
+
+/* Battery Charger Commands */
+#define CHARGING_CURRENT 0x14
+#define CHARGING_VOLTAGE 0x15
+#define PROTECT_SET 0x3E
+#define MAP_SET 0x3F
+
+/* Extended commands */
+#define CHGSTM_STATUS 0x100
+#define VBAT_VSYS_STATUS 0x101
+#define VBUS_VCC_STATUS 0x102
+#define CHGOP_STATUS 0x103
+#define WDT_STATUS 0x104
+#define CUR_ILIM_VAL 0x105
+#define SEL_ILIM_VAL 0x106
+#define IBUS_LIM_SET 0x107
+#define ICC_LIM_SET 0x108
+#define IOTG_LIM_SET 0x109
+#define VIN_CTRL_SET 0x10A
+#define CHGOP_SET1 0x10B
+#define CHGOP_SET2 0x10C
+#define VBUSCLPS_TH_SET 0x10D
+#define VCCCLPS_TH_SET 0x10E
+#define CHGWDT_SET 0x10F
+#define BATTWDT_SET 0x110
+#define VSYSREG_SET 0x111
+#define VSYSVAL_THH_SET 0x112
+#define VSYSVAL_THL_SET 0x113
+#define ITRICH_SET 0x114
+#define IPRECH_SET 0x115
+#define ICHG_SET 0x116
+#define ITERM_SET 0x117
+#define VPRECHG_TH_SET 0x118
+#define VRBOOST_SET 0x119
+#define VFASTCHG_REG_SET1 0x11A
+#define VFASTCHG_REG_SET2 0x11B
+#define VFASTCHG_REG_SET3 0x11C
+#define VRECHG_SET 0x11D
+#define VBATOVP_SET 0x11E
+#define IBATSHORT_SET 0x11F
+#define PROCHOT_CTRL_SET 0x120
+#define PROCHOT_ICRIT_SET 0x121
+#define PROCHOT_INORM_SET 0x122
+#define PROCHOT_IDCHG_SET 0x123
+#define PROCHOT_VSYS_SET 0x124
+#define PMON_IOUT_CTRL_SET 0x125
+#define PMON_DACIN_VAL 0x126
+#define IOUT_DACIN_VAL 0x127
+#define VCC_UCD_SET 0x128
+#define VCC_UCD_STATUS 0x129
+#define VCC_IDD_STATUS 0x12A
+#define VCC_UCD_FCTRL_SET 0x12B
+#define VCC_UCD_FCTRL_EN 0x12C
+#define VBUS_UCD_SET 0x130
+#define VBUS_UCD_STATUS 0x131
+#define VBUS_IDD_STATUS 0x132
+#define VBUS_UCD_FCTRL_SET 0x133
+#define VBUS_UCD_FCTRL_EN 0x134
+#define CHIP_ID 0x138
+#define CHIP_REV 0x139
+#define IC_SET1 0x13A
+#define IC_SET2 0x13B
+#define SYSTEM_STATUS 0x13C
+#define SYSTEM_CTRL_SET 0x13D
+#define VM_CTRL_SET 0x140
+#define THERM_WINDOW_SET1 0x141
+#define THERM_WINDOW_SET2 0x142
+#define THERM_WINDOW_SET3 0x143
+#define THERM_WINDOW_SET4 0x144
+#define THERM_WINDOW_SET5 0x145
+#define IBATP_TH_SET 0x146
+#define IBATM_TH_SET 0x147
+#define VBAT_TH_SET 0x148
+#define THERM_TH_SET 0x149
+#define IACP_TH_SET 0x14A
+#define VACP_TH_SET 0x14B
+#define VBUS_TH_SET 0x14C
+#define VCC_TH_SET 0x14D
+#define VSYS_TH_SET 0x14E
+#define EXTIADP_TH_SET 0x14F
+#define IBATP_VAL 0x150
+#define IBATP_AVE_VAL 0x151
+#define IBATM_VAL 0x152
+#define IBATM_AVE_VAL 0x153
+#define VBAT_VAL 0x154
+#define VBAT_AVE_VAL 0x155
+#define THERM_VAL 0x156
+#define VTH_VAL 0x157
+#define IACP_VAL 0x158
+#define IACP_AVE_VAL 0x159
+#define VACP_VAL 0x15A
+#define VACP_AVE_VAL 0x15B
+#define VBUS_VAL 0x15C
+#define VBUS_AVE_VAL 0x15D
+#define VCC_VAL 0x15E
+#define VCC_AVE_VAL 0x15F
+#define VSYS_VAL 0x160
+#define VSYS_AVE_VAL 0x161
+#define EXTIADP_VAL 0x162
+#define EXTIADP_AVE_VAL 0x163
+#define VACPCLPS_TH_SET 0x164
+#define INT0_SET 0x168
+#define INT1_SET 0x169
+#define INT2_SET 0x16A
+#define INT3_SET 0x16B
+#define INT4_SET 0x16C
+#define INT5_SET 0x16D
+#define INT6_SET 0x16E
+#define INT7_SET 0x16F
+#define INT0_STATUS 0x170
+#define INT1_STATUS 0x171
+#define INT2_STATUS 0x172
+#define INT3_STATUS 0x173
+#define INT4_STATUS 0x174
+#define INT5_STATUS 0x175
+#define INT6_STATUS 0x176
+#define INT7_STATUS 0x177
+#define OTPREG0 0x17A
+#define OTPREG1 0x17B
+#define SMBREG 0x17C
+#define DEBUG_MODE_SET 0x17F
+#define DEBUG0x14 0x214
+#define DEBUG0x1A 0x21A
+
+enum bd9995x_fields {
+ F_PREV_CHGSTM_STATE, F_CHGSTM_STATE,
+ F_VBAT_VSYS_STATUS,
+ F_VBUS_VCC_STATUS,
+ F_BATTEMP, F_VRECHG_DET, F_RBOOST_UV, F_RBOOSTS,
+ F_THERMWDT_VAL, F_CHGWDT_VAL,
+ F_CUR_ILIM_VAL,
+ F_SEL_ILIM_VAL,
+ F_IBUS_LIM_SET,
+ F_ICC_LIM_SET,
+ F_IOTG_LIM_SET,
+ F_OTG_BOTH_EN,
+ F_VRBOOST_TRIG,
+ F_VRBOOST_EN,
+ F_PP_BOTH_THRU,
+ F_VIN_ORD,
+ F_VBUS_EN,
+ F_VCC_EN,
+ F_VSYS_PRIORITY,
+ F_PPC_SUB_CAP,
+ F_PPC_CAP,
+ F_DCP_2500_SEL,
+ F_SDP_500_SEL,
+ F_ILIM_AUTO_DISEN,
+ F_VCC_BC_DISEN,
+ F_VBUS_BC_DISEN,
+ F_SDP_CHG_TRIG_EN,
+ F_SDP_CHG_TRIG,
+ F_AUTO_TOF,
+ F_AUTO_FST,
+ F_AUTO_RECH,
+ F_ILIM_RESET_EN,
+ F_DCDC_1MS_SEL,
+ F_SEL_ILIM_DIV,
+ F_BATT_LEARN,
+ F_CHG_EN,
+ F_USB_SUS,
+ F_CHOP_SS_INIT,
+ F_CHOP_ALL_INIT,
+ F_DCDC_CLK_SEL,
+ F_CHOP_SS,
+ F_CHOP_ALL,
+ F_VBUSCLPS_TH_SET,
+ F_VCCCLPS_TH_SET,
+ F_WDT_FST,
+ F_WDT_PRE,
+ F_WDT_IBAT_SHORT,
+ F_WDT_THERM,
+ F_VSYSREG_SET,
+ F_VSYSVAL_THH_SET,
+ F_VSYSVAL_THL_SET,
+ F_ITRICH_SET,
+ F_IPRECH_SET,
+ F_ICHG_SET,
+ F_ITERM_SET,
+ F_VPRECHG_TH_SET,
+ F_VRBOOST_SET,
+ F_VFASTCHG_REG_SET1,
+ F_VFASTCHG_REG_SET2,
+ F_VFASTCHG_REG_SET3,
+ F_VRECHG_SET,
+ F_VBATOVP_SET,
+ F_IBATM_SHORT_SET,
+ F_PROCHOT_DG_SET,
+ F_PROCHOT_ICRIT_DG_SET,
+ F_PROCHOT_IDCHG_DG_SET,
+ F_PROCHOT_EN,
+ F_PROCHOT_ICRIT_SET,
+ F_PROCHOT_INORM_SET,
+ F_PROCHOT_IDCHG_SET,
+ F_PROCHOT_VSYS_SET,
+ F_IMON_INSEL,
+ F_PMON_INSEL,
+ F_IOUT_OUT_EN,
+ F_IOUT_SOURCE_SEL,
+ F_IOUT_GAIN_SET,
+ F_PMON_OUT_EN,
+ F_PMON_GAIN_SET,
+ F_PMON_DACIN_VAL,
+ F_IOUT_DACIN_VAL,
+ F_VCC_BCSRETRY,
+ F_VCC_ADCRTRY,
+ F_VCC_USBDETEN,
+ F_VCC_IDRDETEN,
+ F_VCC_ENUMRDY,
+ F_VCC_ADCPOLEN,
+ F_VCC_DCDMODE,
+ F_VCC_USB_SW_EN,
+ F_VCC_USB_SW,
+ F_VCC_DCDFAIL,
+ F_VCC_CHGPORT,
+ F_VCC_PUPDET,
+ F_VCC_VBUS_VLD,
+ F_VCC_CHGDET,
+ F_VCC_OTGDET,
+ F_VCC_VBINOP,
+ F_VCC_EXTID,
+ F_VCC_IDRDET,
+ F_VCC_INDO,
+ F_VCC_UCDSWEN,
+ F_VCC_RREF_EN,
+ F_VCC_DPPU_EN,
+ F_VCC_DPREF_EN,
+ F_VCC_DMREF_EN,
+ F_VCC_DPDET_EN,
+ F_VCC_DMDET_EN,
+ F_VCC_DPSINK_EN,
+ F_VCC_DMSINK_EN,
+ F_VCC_DP_BUFF_EN,
+ F_VCC_DM_BUFF_EN,
+ F_VCC_EXTCLKENBL,
+ F_VCC_PLSTESTEN,
+ F_VCC_UCDSWEN_TSTENB,
+ F_VCC_RREF_EN_TSTENB,
+ F_VCC_DPPU_EN_TSTENB,
+ F_VCC_DPREF_EN_TSTENB,
+ F_VCC_DMREF_EN_TSTENB,
+ F_VCC_DPDET_EN_TSTENB,
+ F_VCC_DMDET_EN_TSTENB,
+ F_VCC_DPSINK_EN_TSTENB,
+ F_VCC_DMSINK_EN_TSTENB,
+ F_VCC_DP_BUFF_EN_TSTENB,
+ F_VCC_DM_BUFF_EN_TSTENB,
+ F_VBUS_BCSRETRY,
+ F_VBUS_ADCRTRY,
+ F_VBUS_USBDETEN,
+ F_VBUS_IDRDETEN,
+ F_VBUS_ENUMRDY,
+ F_VBUS_ADCPOLEN,
+ F_VBUS_DCDMODE,
+ F_VBUS_USB_SW_EN,
+ F_VBUS_USB_SW,
+ F_VBUS_DCDFAIL,
+ F_VBUS_CHGPORT,
+ F_VBUS_PUPDET,
+ F_VBUS_VBUS_VLD,
+ F_VBUS_CHGDET,
+ F_VBUS_OTGDET,
+ F_VBUS_VBINOP,
+ F_VBUS_EXTID,
+ F_VBUS_IDRDET,
+ F_VBUS_INDO,
+ F_VBUS_UCDSWEN,
+ F_VBUS_RREF_EN,
+ F_VBUS_DPPU_EN,
+ F_VBUS_DPREF_EN,
+ F_VBUS_DMREF_EN,
+ F_VBUS_DPDET_EN,
+ F_VBUS_DMDET_EN,
+ F_VBUS_DPSINK_EN,
+ F_VBUS_DMSINK_EN,
+ F_VBUS_DP_BUFF_EN,
+ F_VBUS_DM_BUFF_EN,
+ F_VBUS_EXTCLKENBL,
+ F_VBUS_PLSTESTEN,
+ F_VBUS_UCDSWEN_TSTENB,
+ F_VBUS_RREF_EN_TSTENB,
+ F_VBUS_DPPU_EN_TSTENB,
+ F_VBUS_DPREF_EN_TSTENB,
+ F_VBUS_DMREF_EN_TSTENB,
+ F_VBUS_DPDET_EN_TSTENB,
+ F_VBUS_DMDET_EN_TSTENB,
+ F_VBUS_DPSINK_EN_TSTENB,
+ F_VBUS_DMSINK_EN_TSTENB,
+ F_VBUS_DP_BUFF_EN_TSTENB,
+ F_VBUS_DM_BUFF_EN_TSTENB,
+ F_CHIP_ID,
+ F_CHIP_REV,
+ F_ONE_CELL_MODE,
+ F_cell,
+ F_VACP_AUTO_DISCHG,
+ F_VACP_LOAD,
+ F_ACOK_POL,
+ F_ACOK_DISEN,
+ F_DEBUG_SET1,
+ F_DEBUG_SET0,
+ F_MONRST_STATE,
+ F_ALMRST_STATE,
+ F_CHGRST_STATE,
+ F_OTPLD_STATE,
+ F_ALLRST_STATE,
+ F_PROTECT_SET,
+ F_MAP_SET,
+ F_ADCINTERVAL,
+ F_ADCMOD,
+ F_ADCTMOD,
+ F_EXTIADPEN,
+ F_VSYSENB,
+ F_VCCENB,
+ F_VBUSENB,
+ F_VACPENB,
+ F_IACPENB,
+ F_THERMENB,
+ F_VBATENB,
+ F_IBATMENB,
+ F_IBATPENB,
+ F_TMPTHR1B,
+ F_TMPTHR1A,
+ F_TMPTHR2B,
+ F_TMPTHR2A,
+ F_TMPTHR3B,
+ F_TMPTHR3A,
+ F_TMPTHR4B,
+ F_TMPTHR4A,
+ F_TMPTHR5B,
+ F_TMPTHR5A,
+ F_IBATP_TH_SET,
+ F_IBATM_TH_SET,
+ F_VBAT_TH_SET,
+ F_THERM_TH_SET,
+ F_IACP_TH_SET,
+ F_VACP_TH_SET,
+ F_VBUS_TH_SET,
+ F_VCC_TH_SET,
+ F_VSYS_TH_SET,
+ F_EXTIADP_TH_SET,
+ F_IBATP_VAL,
+ F_IBATP_AVE_VAL,
+ F_IBATM_VAL,
+ F_IBATM_AVE_VAL,
+ F_VBAT_VAL,
+ F_VBAT_AVE_VAL,
+ F_THERM_VAL,
+ F_VTH_VAL,
+ F_IACP_VAL,
+ F_IACP_AVE_VAL,
+ F_VACP_VAL,
+ F_VACP_AVE_VAL,
+ F_VBUS_VAL,
+ F_VBUS_AVE_VAL,
+ F_VCC_VAL,
+ F_VCC_AVE_VAL,
+ F_VSYS_VAL,
+ F_VSYS_AVE_VAL,
+ F_EXTIADP_VAL,
+ F_EXTIADP_AVE_VAL,
+ F_VACPCLPS_TH_SET,
+ F_INT7_SET,
+ F_INT6_SET,
+ F_INT5_SET,
+ F_INT4_SET,
+ F_INT3_SET,
+ F_INT2_SET,
+ F_INT1_SET,
+ F_INT0_SET,
+ F_VBUS_RBUV_DET,
+ F_VBUS_RBUV_RES,
+ F_VBUS_TH_DET,
+ F_VBUS_TH_RES,
+ F_VBUS_IIN_MOD,
+ F_VBUS_OV_DET,
+ F_VBUS_OV_RES,
+ F_VBUS_CLPS_DET,
+ F_VBUS_CLPS,
+ F_VBUS_DET,
+ F_VBUS_RES,
+ F_VCC_RBUV_DET,
+ F_VCC_RBUV_RES,
+ F_VCC_TH_DET,
+ F_VCC_TH_RES,
+ F_VCC_IIN_MOD,
+ F_VCC_OVP_DET,
+ F_VCC_OVP_RES,
+ F_VCC_CLPS_DET,
+ F_VCC_CLPS_RES,
+ F_VCC_DET,
+ F_VCC_RES,
+ F_TH_DET,
+ F_TH_RMV,
+ F_TMP_OUT_DET,
+ F_TMP_OUT_RES,
+ F_VBAT_TH_DET,
+ F_VBAT_TH_RES,
+ F_IBAT_SHORT_DET,
+ F_IBAT_SHORT_RES,
+ F_VBAT_OV_DET,
+ F_VBAT_OV_RES,
+ F_BAT_ASSIST_DET,
+ F_BAT_ASSIST_RES,
+ F_VSYS_TH_DET,
+ F_VSYS_TH_RES,
+ F_VSYS_OV_DET,
+ F_VSYS_OV_RES,
+ F_VSYS_SHT_DET,
+ F_VSYS_SHT_RES,
+ F_VSYS_UV_DET,
+ F_VSYS_UV_RES,
+ F_OTP_LOAD_DONE,
+ F_PWR_ON,
+ F_EXTIADP_TRNS,
+ F_EXTIADP_TH_DET,
+ F_EXIADP_TH_RES,
+ F_BAT_MNT_DET,
+ F_BAT_MNT_RES,
+ F_TSD_DET,
+ F_TSD_RES,
+ F_CHGWDT_EXP,
+ F_THERMWDT_EXP,
+ F_TMP_TRNS,
+ F_CHG_TRNS,
+ F_VBUS_UCD_PORT_DET,
+ F_VBUS_UCD_UCHG_DET,
+ F_VBUS_UCD_URID_RMV,
+ F_VBUS_UCD_OTG_DET,
+ F_VBUS_UCD_URID_MOD,
+ F_VCC_UCD_PORT_DET,
+ F_VCC_UCD_UCHG_DET,
+ F_VCC_UCD_URID_RMV,
+ F_VCC_UCD_OTG_DET,
+ F_VCC_UCD_URID_MOD,
+ F_PROCHOT_DET,
+ F_PROCHOT_RES,
+ F_VACP_DET,
+ F_VACP_RES,
+ F_VACP_TH_DET,
+ F_VACP_TH_RES,
+ F_IACP_TH_DET,
+ F_IACP_THE_RES,
+ F_THERM_TH_DET,
+ F_THERM_TH_RES,
+ F_IBATM_TH_DET,
+ F_IBATM_TH_RES,
+ F_IBATP_TH_DET,
+ F_IBATP_TH_RES,
+ F_INT7_STATUS,
+ F_INT6_STATUS,
+ F_INT5_STATUS,
+ F_INT4_STATUS,
+ F_INT3_STATUS,
+ F_INT2_STATUS,
+ F_INT1_STATUS,
+ F_INT0_STATUS,
+ F_ILIM_DECREASE,
+ F_RESERVE_OTPREG1,
+ F_POWER_SAVE_MODE,
+ F_DEBUG_MODE_SET,
+ F_DEBUG0x14,
+ F_DEBUG0x1A,
+ F_MAX_FIELDS
+};
+
+static const struct reg_field bd9995x_reg_fields[] = {
+ [F_PREV_CHGSTM_STATE] = REG_FIELD(CHGSTM_STATUS, 8, 14),
+ [F_CHGSTM_STATE] = REG_FIELD(CHGSTM_STATUS, 0, 6),
+ [F_VBAT_VSYS_STATUS] = REG_FIELD(VBAT_VSYS_STATUS, 0, 15),
+ [F_VBUS_VCC_STATUS] = REG_FIELD(VBUS_VCC_STATUS, 0, 12),
+ [F_BATTEMP] = REG_FIELD(CHGOP_STATUS, 8, 10),
+ [F_VRECHG_DET] = REG_FIELD(CHGOP_STATUS, 6, 6),
+ [F_RBOOST_UV] = REG_FIELD(CHGOP_STATUS, 1, 1),
+ [F_RBOOSTS] = REG_FIELD(CHGOP_STATUS, 0, 0),
+ [F_THERMWDT_VAL] = REG_FIELD(WDT_STATUS, 8, 15),
+ [F_CHGWDT_VAL] = REG_FIELD(WDT_STATUS, 0, 7),
+ [F_CUR_ILIM_VAL] = REG_FIELD(CUR_ILIM_VAL, 0, 13),
+ [F_SEL_ILIM_VAL] = REG_FIELD(SEL_ILIM_VAL, 0, 13),
+ [F_IBUS_LIM_SET] = REG_FIELD(IBUS_LIM_SET, 5, 13),
+ [F_ICC_LIM_SET] = REG_FIELD(ICC_LIM_SET, 5, 13),
+ [F_IOTG_LIM_SET] = REG_FIELD(IOTG_LIM_SET, 5, 13),
+ [F_OTG_BOTH_EN] = REG_FIELD(VIN_CTRL_SET, 15, 15),
+ [F_VRBOOST_TRIG] = REG_FIELD(VIN_CTRL_SET, 14, 14),
+ [F_VRBOOST_EN] = REG_FIELD(VIN_CTRL_SET, 12, 13),
+ [F_PP_BOTH_THRU] = REG_FIELD(VIN_CTRL_SET, 11, 11),
+ [F_VIN_ORD] = REG_FIELD(VIN_CTRL_SET, 7, 7),
+ [F_VBUS_EN] = REG_FIELD(VIN_CTRL_SET, 6, 6),
+ [F_VCC_EN] = REG_FIELD(VIN_CTRL_SET, 5, 5),
+ [F_VSYS_PRIORITY] = REG_FIELD(VIN_CTRL_SET, 4, 4),
+ [F_PPC_SUB_CAP] = REG_FIELD(VIN_CTRL_SET, 2, 3),
+ [F_PPC_CAP] = REG_FIELD(VIN_CTRL_SET, 0, 1),
+ [F_DCP_2500_SEL] = REG_FIELD(CHGOP_SET1, 15, 15),
+ [F_SDP_500_SEL] = REG_FIELD(CHGOP_SET1, 14, 14),
+ [F_ILIM_AUTO_DISEN] = REG_FIELD(CHGOP_SET1, 13, 13),
+ [F_VCC_BC_DISEN] = REG_FIELD(CHGOP_SET1, 11, 11),
+ [F_VBUS_BC_DISEN] = REG_FIELD(CHGOP_SET1, 10, 10),
+ [F_SDP_CHG_TRIG_EN] = REG_FIELD(CHGOP_SET1, 9, 9),
+ [F_SDP_CHG_TRIG] = REG_FIELD(CHGOP_SET1, 8, 8),
+ [F_AUTO_TOF] = REG_FIELD(CHGOP_SET1, 6, 6),
+ [F_AUTO_FST] = REG_FIELD(CHGOP_SET1, 5, 5),
+ [F_AUTO_RECH] = REG_FIELD(CHGOP_SET1, 3, 3),
+ [F_ILIM_RESET_EN] = REG_FIELD(CHGOP_SET2, 14, 14),
+ [F_DCDC_1MS_SEL] = REG_FIELD(CHGOP_SET2, 12, 13),
+ [F_SEL_ILIM_DIV] = REG_FIELD(CHGOP_SET2, 10, 10),
+ [F_BATT_LEARN] = REG_FIELD(CHGOP_SET2, 8, 8),
+ [F_CHG_EN] = REG_FIELD(CHGOP_SET2, 7, 7),
+ [F_USB_SUS] = REG_FIELD(CHGOP_SET2, 6, 6),
+ [F_CHOP_SS_INIT] = REG_FIELD(CHGOP_SET2, 5, 5),
+ [F_CHOP_ALL_INIT] = REG_FIELD(CHGOP_SET2, 4, 4),
+ [F_DCDC_CLK_SEL] = REG_FIELD(CHGOP_SET2, 2, 3),
+ [F_CHOP_SS] = REG_FIELD(CHGOP_SET2, 1, 1),
+ [F_CHOP_ALL] = REG_FIELD(CHGOP_SET2, 0, 0),
+ [F_VBUSCLPS_TH_SET] = REG_FIELD(VBUSCLPS_TH_SET, 7, 14),
+ [F_VCCCLPS_TH_SET] = REG_FIELD(VCCCLPS_TH_SET, 7, 14),
+ [F_WDT_FST] = REG_FIELD(CHGWDT_SET, 8, 15),
+ [F_WDT_PRE] = REG_FIELD(CHGWDT_SET, 0, 7),
+ [F_WDT_IBAT_SHORT] = REG_FIELD(BATTWDT_SET, 8, 15),
+ [F_WDT_THERM] = REG_FIELD(BATTWDT_SET, 0, 7),
+ [F_VSYSREG_SET] = REG_FIELD(VSYSREG_SET, 6, 14),
+ [F_VSYSVAL_THH_SET] = REG_FIELD(VSYSVAL_THH_SET, 6, 14),
+ [F_VSYSVAL_THL_SET] = REG_FIELD(VSYSVAL_THL_SET, 6, 14),
+ [F_ITRICH_SET] = REG_FIELD(ITRICH_SET, 6, 10),
+ [F_IPRECH_SET] = REG_FIELD(IPRECH_SET, 6, 10),
+ [F_ICHG_SET] = REG_FIELD(ICHG_SET, 6, 13),
+ [F_ITERM_SET] = REG_FIELD(ITERM_SET, 6, 10),
+ [F_VPRECHG_TH_SET] = REG_FIELD(VPRECHG_TH_SET, 6, 14),
+ [F_VRBOOST_SET] = REG_FIELD(VRBOOST_SET, 6, 14),
+ [F_VFASTCHG_REG_SET1] = REG_FIELD(VFASTCHG_REG_SET1, 4, 14),
+ [F_VFASTCHG_REG_SET2] = REG_FIELD(VFASTCHG_REG_SET2, 4, 14),
+ [F_VFASTCHG_REG_SET3] = REG_FIELD(VFASTCHG_REG_SET3, 4, 14),
+ [F_VRECHG_SET] = REG_FIELD(VRECHG_SET, 4, 14),
+ [F_VBATOVP_SET] = REG_FIELD(VBATOVP_SET, 4, 14),
+ [F_IBATM_SHORT_SET] = REG_FIELD(IBATSHORT_SET, 0, 14),
+ [F_PROCHOT_DG_SET] = REG_FIELD(PROCHOT_CTRL_SET, 14, 15),
+ [F_PROCHOT_ICRIT_DG_SET] = REG_FIELD(PROCHOT_CTRL_SET, 10, 11),
+ [F_PROCHOT_IDCHG_DG_SET] = REG_FIELD(PROCHOT_CTRL_SET, 8, 9),
+ [F_PROCHOT_EN] = REG_FIELD(PROCHOT_CTRL_SET, 0, 4),
+ [F_PROCHOT_ICRIT_SET] = REG_FIELD(PROCHOT_ICRIT_SET, 0, 14),
+ [F_PROCHOT_INORM_SET] = REG_FIELD(PROCHOT_INORM_SET, 0, 14),
+ [F_PROCHOT_IDCHG_SET] = REG_FIELD(PROCHOT_IDCHG_SET, 0, 14),
+ [F_PROCHOT_VSYS_SET] = REG_FIELD(PROCHOT_VSYS_SET, 0, 14),
+ [F_IMON_INSEL] = REG_FIELD(PMON_IOUT_CTRL_SET, 9, 9),
+ [F_PMON_INSEL] = REG_FIELD(PMON_IOUT_CTRL_SET, 8, 8),
+ [F_IOUT_OUT_EN] = REG_FIELD(PMON_IOUT_CTRL_SET, 7, 7),
+ [F_IOUT_SOURCE_SEL] = REG_FIELD(PMON_IOUT_CTRL_SET, 6, 6),
+ [F_IOUT_GAIN_SET] = REG_FIELD(PMON_IOUT_CTRL_SET, 4, 5),
+ [F_PMON_OUT_EN] = REG_FIELD(PMON_IOUT_CTRL_SET, 3, 3),
+ [F_PMON_GAIN_SET] = REG_FIELD(PMON_IOUT_CTRL_SET, 0, 2),
+ [F_PMON_DACIN_VAL] = REG_FIELD(PMON_DACIN_VAL, 0, 9),
+ [F_IOUT_DACIN_VAL] = REG_FIELD(IOUT_DACIN_VAL, 0, 11),
+ [F_VCC_BCSRETRY] = REG_FIELD(VCC_UCD_SET, 12, 12),
+ [F_VCC_ADCRTRY] = REG_FIELD(VCC_UCD_SET, 8, 8),
+ [F_VCC_USBDETEN] = REG_FIELD(VCC_UCD_SET, 7, 7),
+ [F_VCC_IDRDETEN] = REG_FIELD(VCC_UCD_SET, 6, 6),
+ [F_VCC_ENUMRDY] = REG_FIELD(VCC_UCD_SET, 5, 5),
+ [F_VCC_ADCPOLEN] = REG_FIELD(VCC_UCD_SET, 4, 4),
+ [F_VCC_DCDMODE] = REG_FIELD(VCC_UCD_SET, 3, 3),
+ [F_VCC_USB_SW_EN] = REG_FIELD(VCC_UCD_SET, 1, 1),
+ [F_VCC_USB_SW] = REG_FIELD(VCC_UCD_SET, 0, 0),
+ [F_VCC_DCDFAIL] = REG_FIELD(VCC_UCD_STATUS, 15, 15),
+ [F_VCC_CHGPORT] = REG_FIELD(VCC_UCD_STATUS, 12, 13),
+ [F_VCC_PUPDET] = REG_FIELD(VCC_UCD_STATUS, 11, 11),
+ [F_VCC_VBUS_VLD] = REG_FIELD(VCC_UCD_STATUS, 7, 7),
+ [F_VCC_CHGDET] = REG_FIELD(VCC_UCD_STATUS, 6, 6),
+ [F_VCC_OTGDET] = REG_FIELD(VCC_UCD_STATUS, 3, 3),
+ [F_VCC_VBINOP] = REG_FIELD(VCC_IDD_STATUS, 6, 6),
+ [F_VCC_EXTID] = REG_FIELD(VCC_IDD_STATUS, 5, 5),
+ [F_VCC_IDRDET] = REG_FIELD(VCC_IDD_STATUS, 4, 4),
+ [F_VCC_INDO] = REG_FIELD(VCC_IDD_STATUS, 0, 3),
+ [F_VCC_UCDSWEN] = REG_FIELD(VCC_UCD_FCTRL_SET, 10, 10),
+ [F_VCC_RREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 9, 9),
+ [F_VCC_DPPU_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 8, 8),
+ [F_VCC_DPREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 7, 7),
+ [F_VCC_DMREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 6, 6),
+ [F_VCC_DPDET_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 5, 5),
+ [F_VCC_DMDET_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 4, 4),
+ [F_VCC_DPSINK_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 3, 3),
+ [F_VCC_DMSINK_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 2, 2),
+ [F_VCC_DP_BUFF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 1, 1),
+ [F_VCC_DM_BUFF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 0, 0),
+ [F_VCC_EXTCLKENBL] = REG_FIELD(VCC_UCD_FCTRL_EN, 15, 15),
+ [F_VCC_PLSTESTEN] = REG_FIELD(VCC_UCD_FCTRL_EN, 14, 14),
+ [F_VCC_UCDSWEN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 10, 10),
+ [F_VCC_RREF_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 9, 9),
+ [F_VCC_DPPU_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 8, 8),
+ [F_VCC_DPREF_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 7, 7),
+ [F_VCC_DMREF_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 6, 6),
+ [F_VCC_DPDET_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 5, 5),
+ [F_VCC_DMDET_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 4, 4),
+ [F_VCC_DPSINK_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 3, 3),
+ [F_VCC_DMSINK_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 2, 2),
+ [F_VCC_DP_BUFF_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 1, 1),
+ [F_VCC_DM_BUFF_EN_TSTENB] = REG_FIELD(VCC_UCD_FCTRL_EN, 0, 0),
+
+ [F_VBUS_BCSRETRY] = REG_FIELD(VBUS_UCD_SET, 12, 12),
+ [F_VBUS_ADCRTRY] = REG_FIELD(VBUS_UCD_SET, 8, 8),
+ [F_VBUS_USBDETEN] = REG_FIELD(VBUS_UCD_SET, 7, 7),
+ [F_VBUS_IDRDETEN] = REG_FIELD(VBUS_UCD_SET, 6, 6),
+ [F_VBUS_ENUMRDY] = REG_FIELD(VBUS_UCD_SET, 5, 5),
+ [F_VBUS_ADCPOLEN] = REG_FIELD(VBUS_UCD_SET, 4, 4),
+ [F_VBUS_DCDMODE] = REG_FIELD(VBUS_UCD_SET, 3, 3),
+ [F_VBUS_USB_SW_EN] = REG_FIELD(VBUS_UCD_SET, 1, 1),
+ [F_VBUS_USB_SW] = REG_FIELD(VBUS_UCD_SET, 0, 0),
+ [F_VBUS_DCDFAIL] = REG_FIELD(VBUS_UCD_STATUS, 15, 15),
+ [F_VBUS_CHGPORT] = REG_FIELD(VBUS_UCD_STATUS, 12, 13),
+ [F_VBUS_PUPDET] = REG_FIELD(VBUS_UCD_STATUS, 11, 11),
+ [F_VBUS_VBUS_VLD] = REG_FIELD(VBUS_UCD_STATUS, 7, 7),
+ [F_VBUS_CHGDET] = REG_FIELD(VBUS_UCD_STATUS, 6, 6),
+ [F_VBUS_OTGDET] = REG_FIELD(VBUS_UCD_STATUS, 3, 3),
+ [F_VBUS_VBINOP] = REG_FIELD(VBUS_IDD_STATUS, 6, 6),
+ [F_VBUS_EXTID] = REG_FIELD(VBUS_IDD_STATUS, 5, 5),
+ [F_VBUS_IDRDET] = REG_FIELD(VBUS_IDD_STATUS, 4, 4),
+ [F_VBUS_INDO] = REG_FIELD(VBUS_IDD_STATUS, 0, 3),
+ [F_VBUS_UCDSWEN] = REG_FIELD(VCC_UCD_FCTRL_SET, 10, 10),
+ [F_VBUS_RREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 9, 9),
+ [F_VBUS_DPPU_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 8, 8),
+ [F_VBUS_DPREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 7, 7),
+ [F_VBUS_DMREF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 6, 6),
+ [F_VBUS_DPDET_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 5, 5),
+ [F_VBUS_DMDET_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 4, 4),
+ [F_VBUS_DPSINK_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 3, 3),
+ [F_VBUS_DMSINK_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 2, 2),
+ [F_VBUS_DP_BUFF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 1, 1),
+ [F_VBUS_DM_BUFF_EN] = REG_FIELD(VCC_UCD_FCTRL_SET, 0, 0),
+
+ [F_VBUS_EXTCLKENBL] = REG_FIELD(VBUS_UCD_FCTRL_EN, 15, 15),
+ [F_VBUS_PLSTESTEN] = REG_FIELD(VBUS_UCD_FCTRL_EN, 14, 14),
+ [F_VBUS_UCDSWEN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 10, 10),
+ [F_VBUS_RREF_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 9, 9),
+ [F_VBUS_DPPU_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 8, 8),
+ [F_VBUS_DPREF_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 7, 7),
+ [F_VBUS_DMREF_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 6, 6),
+ [F_VBUS_DPDET_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 5, 5),
+ [F_VBUS_DMDET_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 4, 4),
+ [F_VBUS_DPSINK_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 3, 3),
+ [F_VBUS_DMSINK_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 2, 2),
+ [F_VBUS_DP_BUFF_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 1, 1),
+ [F_VBUS_DM_BUFF_EN_TSTENB] = REG_FIELD(VBUS_UCD_FCTRL_EN, 0, 0),
+
+ [F_CHIP_ID] = REG_FIELD(CHIP_ID, 0, 15),
+ [F_CHIP_REV] = REG_FIELD(CHIP_REV, 0, 15),
+ [F_ONE_CELL_MODE] = REG_FIELD(IC_SET1, 11, 11),
+ [F_cell] = REG_FIELD(IC_SET1, 1, 1),
+ [F_VACP_AUTO_DISCHG] = REG_FIELD(IC_SET1, 9, 9),
+ [F_VACP_LOAD] = REG_FIELD(IC_SET1, 8, 8),
+ [F_ACOK_POL] = REG_FIELD(IC_SET1, 1, 1),
+ [F_ACOK_DISEN] = REG_FIELD(IC_SET1, 0, 0),
+ [F_DEBUG_SET1] = REG_FIELD(IC_SET2, 4, 8),
+ [F_DEBUG_SET0] = REG_FIELD(IC_SET2, 0, 0),
+ [F_MONRST_STATE] = REG_FIELD(SYSTEM_STATUS, 6, 6),
+ [F_ALMRST_STATE] = REG_FIELD(SYSTEM_STATUS, 5, 5),
+ [F_CHGRST_STATE] = REG_FIELD(SYSTEM_STATUS, 4, 4),
+ [F_OTPLD_STATE] = REG_FIELD(SYSTEM_STATUS, 1, 1),
+ [F_ALLRST_STATE] = REG_FIELD(SYSTEM_STATUS, 0, 0),
+ [F_PROTECT_SET] = REG_FIELD(PROTECT_SET, 0, 15),
+ [F_MAP_SET] = REG_FIELD(MAP_SET, 0, 15),
+ [F_ADCINTERVAL] = REG_FIELD(VM_CTRL_SET, 14, 15),
+ [F_ADCMOD] = REG_FIELD(VM_CTRL_SET, 12, 13),
+ [F_ADCTMOD] = REG_FIELD(VM_CTRL_SET, 10, 11),
+ [F_EXTIADPEN] = REG_FIELD(VM_CTRL_SET, 9, 9),
+ [F_VSYSENB] = REG_FIELD(VM_CTRL_SET, 8, 8),
+ [F_VCCENB] = REG_FIELD(VM_CTRL_SET, 7, 7),
+ [F_VBUSENB] = REG_FIELD(VM_CTRL_SET, 6, 6),
+ [F_VACPENB] = REG_FIELD(VM_CTRL_SET, 5, 5),
+ [F_IACPENB] = REG_FIELD(VM_CTRL_SET, 4, 4),
+ [F_THERMENB] = REG_FIELD(VM_CTRL_SET, 3, 3),
+ [F_VBATENB] = REG_FIELD(VM_CTRL_SET, 2, 2),
+ [F_IBATMENB] = REG_FIELD(VM_CTRL_SET, 1, 1),
+ [F_IBATPENB] = REG_FIELD(VM_CTRL_SET, 0, 0),
+ [F_TMPTHR1B] = REG_FIELD(THERM_WINDOW_SET1, 8, 15),
+ [F_TMPTHR1A] = REG_FIELD(THERM_WINDOW_SET1, 0, 7),
+ [F_TMPTHR2B] = REG_FIELD(THERM_WINDOW_SET2, 8, 15),
+ [F_TMPTHR2A] = REG_FIELD(THERM_WINDOW_SET2, 0, 7),
+ [F_TMPTHR3B] = REG_FIELD(THERM_WINDOW_SET3, 8, 15),
+ [F_TMPTHR3A] = REG_FIELD(THERM_WINDOW_SET3, 0, 7),
+ [F_TMPTHR4B] = REG_FIELD(THERM_WINDOW_SET4, 8, 15),
+ [F_TMPTHR4A] = REG_FIELD(THERM_WINDOW_SET4, 0, 7),
+ [F_TMPTHR5B] = REG_FIELD(THERM_WINDOW_SET5, 8, 15),
+ [F_TMPTHR5A] = REG_FIELD(THERM_WINDOW_SET5, 0, 7),
+ [F_IBATP_TH_SET] = REG_FIELD(IBATP_TH_SET, 0, 14),
+ [F_IBATM_TH_SET] = REG_FIELD(IBATM_TH_SET, 0, 14),
+ [F_VBAT_TH_SET] = REG_FIELD(VBAT_TH_SET, 0, 14),
+ [F_THERM_TH_SET] = REG_FIELD(THERM_TH_SET, 0, 7),
+ [F_IACP_TH_SET] = REG_FIELD(IACP_TH_SET, 0, 14),
+ [F_VACP_TH_SET] = REG_FIELD(VACP_TH_SET, 0, 14),
+ [F_VBUS_TH_SET] = REG_FIELD(VBUS_TH_SET, 0, 14),
+ [F_VCC_TH_SET] = REG_FIELD(VCC_TH_SET, 0, 14),
+ [F_VSYS_TH_SET] = REG_FIELD(VSYS_TH_SET, 0, 14),
+ [F_EXTIADP_TH_SET] = REG_FIELD(EXTIADP_TH_SET, 0, 11),
+ [F_IBATP_VAL] = REG_FIELD(IBATP_VAL, 0, 14),
+ [F_IBATP_AVE_VAL] = REG_FIELD(IBATP_AVE_VAL, 0, 14),
+ [F_IBATM_VAL] = REG_FIELD(IBATM_VAL, 0, 14),
+ [F_IBATM_AVE_VAL] = REG_FIELD(IBATM_AVE_VAL, 0, 14),
+ [F_VBAT_VAL] = REG_FIELD(VBAT_VAL, 0, 14),
+ [F_VBAT_AVE_VAL] = REG_FIELD(VBAT_AVE_VAL, 0, 14),
+ [F_THERM_VAL] = REG_FIELD(THERM_VAL, 0, 7),
+ [F_VTH_VAL] = REG_FIELD(VTH_VAL, 0, 11),
+ [F_IACP_VAL] = REG_FIELD(IACP_VAL, 0, 14),
+ [F_IACP_AVE_VAL] = REG_FIELD(IACP_AVE_VAL, 0, 14),
+ [F_VACP_VAL] = REG_FIELD(VACP_VAL, 0, 14),
+ [F_VACP_AVE_VAL] = REG_FIELD(VACP_AVE_VAL, 0, 14),
+ [F_VBUS_VAL] = REG_FIELD(VBUS_VAL, 0, 14),
+ [F_VBUS_AVE_VAL] = REG_FIELD(VBUS_AVE_VAL, 0, 14),
+ [F_VCC_VAL] = REG_FIELD(VCC_VAL, 0, 14),
+ [F_VCC_AVE_VAL] = REG_FIELD(VCC_AVE_VAL, 0, 14),
+ [F_VSYS_VAL] = REG_FIELD(VSYS_VAL, 0, 14),
+ [F_VSYS_AVE_VAL] = REG_FIELD(VSYS_AVE_VAL, 0, 14),
+ [F_EXTIADP_VAL] = REG_FIELD(EXTIADP_VAL, 0, 11),
+ [F_EXTIADP_AVE_VAL] = REG_FIELD(EXTIADP_AVE_VAL, 0, 11),
+ [F_VACPCLPS_TH_SET] = REG_FIELD(VACPCLPS_TH_SET, 7, 14),
+ [F_INT7_SET] = REG_FIELD(INT7_SET, 0, 15),
+ [F_INT6_SET] = REG_FIELD(INT6_SET, 0, 13),
+ [F_INT5_SET] = REG_FIELD(INT5_SET, 0, 13),
+ [F_INT4_SET] = REG_FIELD(INT4_SET, 0, 9),
+ [F_INT3_SET] = REG_FIELD(INT3_SET, 0, 15),
+ [F_INT2_SET] = REG_FIELD(INT2_SET, 0, 15),
+ [F_INT1_SET] = REG_FIELD(INT1_SET, 0, 15),
+ [F_INT0_SET] = REG_FIELD(INT0_SET, 0, 7),
+ [F_VBUS_RBUV_DET] = REG_FIELD(INT1_SET, 15, 15),
+ [F_VBUS_RBUV_RES] = REG_FIELD(INT1_SET, 14, 14),
+ [F_VBUS_TH_DET] = REG_FIELD(INT1_SET, 9, 9),
+ [F_VBUS_TH_RES] = REG_FIELD(INT1_SET, 8, 8),
+ [F_VBUS_IIN_MOD] = REG_FIELD(INT1_SET, 6, 6),
+ [F_VBUS_OV_DET] = REG_FIELD(INT1_SET, 5, 5),
+ [F_VBUS_OV_RES] = REG_FIELD(INT1_SET, 4, 4),
+ [F_VBUS_CLPS_DET] = REG_FIELD(INT1_SET, 3, 3),
+ [F_VBUS_CLPS] = REG_FIELD(INT1_SET, 2, 2),
+ [F_VBUS_DET] = REG_FIELD(INT1_SET, 1, 1),
+ [F_VBUS_RES] = REG_FIELD(INT1_SET, 0, 0),
+ [F_VCC_RBUV_DET] = REG_FIELD(INT2_SET, 15, 15),
+ [F_VCC_RBUV_RES] = REG_FIELD(INT2_SET, 14, 14),
+ [F_VCC_TH_DET] = REG_FIELD(INT2_SET, 9, 9),
+ [F_VCC_TH_RES] = REG_FIELD(INT2_SET, 8, 8),
+ [F_VCC_IIN_MOD] = REG_FIELD(INT2_SET, 6, 6),
+ [F_VCC_OVP_DET] = REG_FIELD(INT2_SET, 5, 5),
+ [F_VCC_OVP_RES] = REG_FIELD(INT2_SET, 4, 4),
+ [F_VCC_CLPS_DET] = REG_FIELD(INT2_SET, 3, 3),
+ [F_VCC_CLPS_RES] = REG_FIELD(INT2_SET, 2, 2),
+ [F_VCC_DET] = REG_FIELD(INT2_SET, 1, 1),
+ [F_VCC_RES] = REG_FIELD(INT2_SET, 0, 0),
+ [F_TH_DET] = REG_FIELD(INT3_SET, 15, 15),
+ [F_TH_RMV] = REG_FIELD(INT3_SET, 14, 14),
+ [F_TMP_OUT_DET] = REG_FIELD(INT3_SET, 11, 11),
+ [F_TMP_OUT_RES] = REG_FIELD(INT3_SET, 10, 10),
+ [F_VBAT_TH_DET] = REG_FIELD(INT3_SET, 9, 9),
+ [F_VBAT_TH_RES] = REG_FIELD(INT3_SET, 8, 8),
+ [F_IBAT_SHORT_DET] = REG_FIELD(INT3_SET, 7, 7),
+ [F_IBAT_SHORT_RES] = REG_FIELD(INT3_SET, 6, 6),
+ [F_VBAT_OV_DET] = REG_FIELD(INT3_SET, 5, 5),
+ [F_VBAT_OV_RES] = REG_FIELD(INT3_SET, 4, 4),
+ [F_BAT_ASSIST_DET] = REG_FIELD(INT3_SET, 3, 3),
+ [F_BAT_ASSIST_RES] = REG_FIELD(INT3_SET, 2, 2),
+ [F_VSYS_TH_DET] = REG_FIELD(INT4_SET, 9, 9),
+ [F_VSYS_TH_RES] = REG_FIELD(INT4_SET, 8, 8),
+ [F_VSYS_OV_DET] = REG_FIELD(INT4_SET, 5, 5),
+ [F_VSYS_OV_RES] = REG_FIELD(INT4_SET, 4, 4),
+ [F_VSYS_SHT_DET] = REG_FIELD(INT4_SET, 3, 3),
+ [F_VSYS_SHT_RES] = REG_FIELD(INT4_SET, 2, 2),
+ [F_VSYS_UV_DET] = REG_FIELD(INT4_SET, 1, 1),
+ [F_VSYS_UV_RES] = REG_FIELD(INT4_SET, 0, 0),
+ [F_OTP_LOAD_DONE] = REG_FIELD(INT5_SET, 13, 13),
+ [F_PWR_ON] = REG_FIELD(INT5_SET, 12, 12),
+ [F_EXTIADP_TRNS] = REG_FIELD(INT5_SET, 11, 11),
+ [F_EXTIADP_TH_DET] = REG_FIELD(INT5_SET, 9, 9),
+ [F_EXIADP_TH_RES] = REG_FIELD(INT5_SET, 8, 8),
+ [F_BAT_MNT_DET] = REG_FIELD(INT5_SET, 7, 7),
+ [F_BAT_MNT_RES] = REG_FIELD(INT5_SET, 6, 6),
+ [F_TSD_DET] = REG_FIELD(INT5_SET, 5, 5),
+ [F_TSD_RES] = REG_FIELD(INT5_SET, 4, 4),
+ [F_CHGWDT_EXP] = REG_FIELD(INT5_SET, 3, 3),
+ [F_THERMWDT_EXP] = REG_FIELD(INT5_SET, 2, 2),
+ [F_TMP_TRNS] = REG_FIELD(INT5_SET, 1, 1),
+ [F_CHG_TRNS] = REG_FIELD(INT5_SET, 0, 0),
+ [F_VBUS_UCD_PORT_DET] = REG_FIELD(INT6_SET, 13, 13),
+ [F_VBUS_UCD_UCHG_DET] = REG_FIELD(INT6_SET, 12, 12),
+ [F_VBUS_UCD_URID_RMV] = REG_FIELD(INT6_SET, 11, 11),
+ [F_VBUS_UCD_OTG_DET] = REG_FIELD(INT6_SET, 10, 10),
+ [F_VBUS_UCD_URID_MOD] = REG_FIELD(INT6_SET, 8, 8),
+ [F_VCC_UCD_PORT_DET] = REG_FIELD(INT6_SET, 5, 5),
+ [F_VCC_UCD_UCHG_DET] = REG_FIELD(INT6_SET, 4, 4),
+ [F_VCC_UCD_URID_RMV] = REG_FIELD(INT6_SET, 3, 3),
+ [F_VCC_UCD_OTG_DET] = REG_FIELD(INT6_SET, 2, 2),
+ [F_VCC_UCD_URID_MOD] = REG_FIELD(INT6_SET, 0, 0),
+ [F_PROCHOT_DET] = REG_FIELD(INT7_SET, 15, 15),
+ [F_PROCHOT_RES] = REG_FIELD(INT7_SET, 14, 14),
+ [F_VACP_DET] = REG_FIELD(INT7_SET, 11, 11),
+ [F_VACP_RES] = REG_FIELD(INT7_SET, 10, 10),
+ [F_VACP_TH_DET] = REG_FIELD(INT7_SET, 9, 9),
+ [F_VACP_TH_RES] = REG_FIELD(INT7_SET, 8, 8),
+ [F_IACP_TH_DET] = REG_FIELD(INT7_SET, 7, 7),
+ [F_IACP_THE_RES] = REG_FIELD(INT7_SET, 6, 6),
+ [F_THERM_TH_DET] = REG_FIELD(INT7_SET, 5, 5),
+ [F_THERM_TH_RES] = REG_FIELD(INT7_SET, 4, 4),
+ [F_IBATM_TH_DET] = REG_FIELD(INT7_SET, 3, 3),
+ [F_IBATM_TH_RES] = REG_FIELD(INT7_SET, 2, 2),
+ [F_IBATP_TH_DET] = REG_FIELD(INT7_SET, 1, 1),
+ [F_IBATP_TH_RES] = REG_FIELD(INT7_SET, 0, 0),
+ [F_INT7_STATUS] = REG_FIELD(INT7_STATUS, 0, 15),
+ [F_INT6_STATUS] = REG_FIELD(INT6_STATUS, 0, 13),
+ [F_INT5_STATUS] = REG_FIELD(INT5_STATUS, 0, 13),
+ [F_INT4_STATUS] = REG_FIELD(INT4_STATUS, 0, 9),
+ [F_INT3_STATUS] = REG_FIELD(INT3_STATUS, 0, 15),
+ [F_INT2_STATUS] = REG_FIELD(INT2_STATUS, 0, 15),
+ [F_INT1_STATUS] = REG_FIELD(INT1_STATUS, 0, 15),
+ [F_INT0_STATUS] = REG_FIELD(INT0_STATUS, 0, 7),
+ [F_ILIM_DECREASE] = REG_FIELD(OTPREG0, 0, 15),
+ [F_RESERVE_OTPREG1] = REG_FIELD(OTPREG1, 0, 15),
+ [F_POWER_SAVE_MODE] = REG_FIELD(SMBREG, 0, 15),
+ [F_DEBUG_MODE_SET] = REG_FIELD(DEBUG_MODE_SET, 0, 15),
+ [F_DEBUG0x14] = REG_FIELD(DEBUG0x14, 0, 15),
+ [F_DEBUG0x1A] = REG_FIELD(DEBUG0x1A, 0, 15),
+};
+
+/* CHGSTM_STATEs */
+#define CHGSTM_SUSPEND 0x00
+#define CHGSTM_TRICKLE_CHARGE 0x01
+#define CHGSTM_PRE_CHARGE 0x02
+#define CHGSTM_FAST_CHARGE 0x03
+#define CHGSTM_TOP_OFF 0x04
+#define CHGSTM_DONE 0x05
+#define CHGSTM_OTG 0x08
+#define CHGSTM_OTG_DONE 0x09
+#define CHGSTM_TEMPERATURE_ERROR_1 0x10
+#define CHGSTM_TEMPERATURE_ERROR_2 0x11
+#define CHGSTM_TEMPERATURE_ERROR_3 0x12
+#define CHGSTM_TEMPERATURE_ERROR_4 0x13
+#define CHGSTM_TEMPERATURE_ERROR_5 0x14
+#define CHGSTM_TEMPERATURE_ERROR_6 0x15
+#define CHGSTM_TEMPERATURE_ERROR_7 0x18
+#define CHGSTM_THERMAL_SHUT_DOWN_1 0x20
+#define CHGSTM_THERMAL_SHUT_DOWN_2 0x21
+#define CHGSTM_THERMAL_SHUT_DOWN_3 0x22
+#define CHGSTM_THERMAL_SHUT_DOWN_4 0x23
+#define CHGSTM_THERMAL_SHUT_DOWN_5 0x24
+#define CHGSTM_THERMAL_SHUT_DOWN_6 0x25
+#define CHGSTM_THERMAL_SHUT_DOWN_7 0x28
+#define CHGSTM_BATTERY_ERROR 0x40
+
+/* VBAT_VSYS_STATUS */
+#define STATUS_VSYS_OV BIT(15)
+#define STATUS_VSYS_SSD BIT(14)
+#define STATUS_VSYS_SCP BIT(13)
+#define STATUS_VSYS_UVN BIT(12)
+#define STATUS_IBAT_SHORT BIT(6)
+#define STATUS_VBAT_OV BIT(3)
+#define STATUS_DEAD_BAT BIT(0)
+
+/* VBUS_VCC_STATUS */
+#define STATUS_VACP_DET BIT(12)
+#define STATUS_VCC_OVP BIT(11)
+#define STATUS_ILIM_VCC_MOD BIT(10)
+#define STATUS_VCC_CLPS BIT(9)
+#define STATUS_VCC_DET BIT(8)
+#define STATUS_VBUS_OVP BIT(3)
+#define STATUS_ILIM_VBUS_MOD BIT(2)
+#define STATUS_VBUS_CLPS BIT(1)
+#define STATUS_VBUS_DET BIT(0)
+
+/* Interrupt set/status definitions */
+
+/* INT 0 */
+#define INT0_INT7_STATUS BIT(7)
+#define INT0_INT6_STATUS BIT(6)
+#define INT0_INT5_STATUS BIT(5)
+#define INT0_INT4_STATUS BIT(4)
+#define INT0_INT3_STATUS BIT(3)
+#define INT0_INT2_STATUS BIT(2)
+#define INT0_INT1_STATUS BIT(1)
+#define INT0_INT0_STATUS BIT(0)
+#define INT0_ALL 0xff
+
+/* INT 1 */
+#define VBUS_RBUV_DET BIT(15)
+#define VBUS_RBUV_RES BIT(14)
+#define VBUS_TH_DET BIT(9)
+#define VBUS_TH_RES BIT(8)
+#define VBUS_IIN_MOD BIT(6)
+#define VBUS_OV_DET BIT(5)
+#define VBUS_OV_RES BIT(4)
+#define VBUS_CLPS_DET BIT(3)
+#define VBUS_CLPS BIT(2)
+#define VBUS_DET BIT(1)
+#define VBUS_RES BIT(0)
+#define INT1_ALL (VBUS_RBUV_DET|\
+ VBUS_RBUV_RES|\
+ VBUS_TH_DET |\
+ VBUS_TH_RES |\
+ VBUS_IIN_MOD|\
+ VBUS_OV_DET |\
+ VBUS_OV_RES |\
+ VBUS_CLPS_DET |\
+ VBUS_CLPS |\
+ VBUS_DET |\
+ VBUS_RES)
+
+/* INT 2 */
+#define VCC_RBUV_DET BIT(15)
+#define VCC_RBUV_RES BIT(14)
+#define VCC_TH_DET BIT(9)
+#define VCC_TH_RES BIT(8)
+#define VCC_IIN_MOD BIT(6)
+#define VCC_OVP_DET BIT(5)
+#define VCC_OVP_RES BIT(4)
+#define VCC_CLPS_DET BIT(3)
+#define VCC_CLPS_RES BIT(2)
+#define VCC_DET BIT(1)
+#define VCC_RES BIT(0)
+#define INT2_ALL (VCC_RBUV_DET |\
+ VCC_RBUV_RES |\
+ VCC_TH_DET |\
+ VCC_TH_RES |\
+ VCC_IIN_MOD |\
+ VCC_OVP_DET |\
+ VCC_OVP_RES |\
+ VCC_CLPS_DET |\
+ VCC_CLPS_RES |\
+ VCC_DET |\
+ VCC_RES)
+/* INT 3 */
+#define TH_DET BIT(15)
+#define TH_RMV BIT(14)
+#define TMP_OUT_DET BIT(11)
+#define TMP_OUT_RES BIT(10)
+#define VBAT_TH_DET BIT(9)
+#define VBAT_TH_RES BIT(8)
+#define IBAT_SHORT_DET BIT(7)
+#define IBAT_SHORT_RES BIT(6)
+#define VBAT_OV_DET BIT(5)
+#define VBAT_OV_RES BIT(4)
+#define BAT_ASSIST_DET BIT(3)
+#define BAT_ASSIST_RES BIT(2)
+#define INT3_ALL (TH_DET |\
+ TH_RMV |\
+ TMP_OUT_DET |\
+ TMP_OUT_RES |\
+ VBAT_TH_DET |\
+ VBAT_TH_RES |\
+ IBAT_SHORT_DET |\
+ IBAT_SHORT_RES |\
+ VBAT_OV_DET |\
+ VBAT_OV_RES |\
+ BAT_ASSIST_DET |\
+ BAT_ASSIST_RES)
+
+/* INT 4 */
+#define VSYS_TH_DET BIT(9)
+#define VSYS_TH_RES BIT(8)
+#define VSYS_OV_DET BIT(5)
+#define VSYS_OV_RES BIT(4)
+#define VSYS_SHT_DET BIT(3)
+#define VSYS_SHT_RES BIT(2)
+#define VSYS_UV_DET BIT(1)
+#define VSYS_UV_RES BIT(0)
+#define INT4_ALL (VSYS_TH_DET |\
+ VSYS_TH_RES |\
+ VSYS_OV_DET |\
+ VSYS_OV_RES |\
+ VSYS_SHT_DET |\
+ VSYS_SHT_RES |\
+ VSYS_UV_DET |\
+ VSYS_UV_RES)
+
+/* INT 5*/
+#define OTP_LOAD_DONE BIT(13)
+#define PWR_ON BIT(12)
+#define EXTIADP_TRNS BIT(11)
+#define EXTIADP_TH_DET BIT(9)
+#define EXIADP_TH_RES BIT(8)
+#define BAT_MNT_DET BIT(7)
+#define BAT_MNT_RES BIT(6)
+#define TSD_DET BIT(5)
+#define TSD_RES BIT(4)
+#define CHGWDT_EXP BIT(3)
+#define THERMWDT_EXP BIT(2)
+#define TMP_TRNS BIT(1)
+#define CHG_TRNS BIT(0)
+#define INT5_ALL (OTP_LOAD_DONE |\
+ PWR_ON |\
+ EXTIADP_TRNS |\
+ EXTIADP_TH_DET |\
+ EXIADP_TH_RES |\
+ BAT_MNT_DET |\
+ BAT_MNT_RES |\
+ TSD_DET |\
+ TSD_RES |\
+ CHGWDT_EXP |\
+ THERMWDT_EXP |\
+ TMP_TRNS |\
+ CHG_TRNS)
+
+/* INT 6*/
+#define VBUS_UCD_PORT_DET BIT(13)
+#define VBUS_UCD_UCHG_DET BIT(12)
+#define VBUS_UCD_URID_RMV BIT(11)
+#define VBUS_UCD_OTG_DET BIT(10)
+#define VBUS_UCD_URID_MOD BIT(8)
+#define VCC_UCD_PORT_DET BIT(5)
+#define VCC_UCD_UCHG_DET BIT(4)
+#define VCC_UCD_URID_RMV BIT(3)
+#define VCC_UCD_OTG_DET BIT(2)
+#define VCC_UCD_URID_MOD BIT(0)
+#define INT6_ALL (VBUS_UCD_PORT_DET |\
+ VBUS_UCD_UCHG_DET |\
+ VBUS_UCD_URID_RMV |\
+ VBUS_UCD_OTG_DET |\
+ VBUS_UCD_URID_MOD |\
+ VCC_UCD_PORT_DET |\
+ VCC_UCD_UCHG_DET |\
+ VCC_UCD_URID_RMV |\
+ VCC_UCD_OTG_DET |\
+ VCC_UCD_URID_MOD)
+
+/* INT 7 */
+#define PROCHOT_DET BIT(15)
+#define PROCHOT_RES BIT(14)
+#define VACP_DET BIT(11)
+#define VACP_RES BIT(10)
+#define VACP_TH_DET BIT(9)
+#define VACP_TH_RES BIT(8)
+#define IACP_TH_DET BIT(7)
+#define IACP_THE_RES BIT(6)
+#define THERM_TH_DET BIT(5)
+#define THERM_TH_RES BIT(4)
+#define IBATM_TH_DET BIT(3)
+#define IBATM_TH_RES BIT(2)
+#define IBATP_TH_DET BIT(1)
+#define IBATP_TH_RES BIT(0)
+#define INT7_ALL (PROCHOT_DET |\
+ PROCHOT_RES |\
+ VACP_DET |\
+ VACP_RES |\
+ VACP_TH_DET |\
+ VACP_TH_RES |\
+ IACP_TH_DET |\
+ IACP_THE_RES |\
+ THERM_TH_DET |\
+ THERM_TH_RES |\
+ IBATM_TH_DET |\
+ IBATM_TH_RES |\
+ IBATP_TH_DET |\
+ IBATP_TH_RES)
+
+/* SYSTEM_CTRL_SET*/
+#define MONRST BIT(6)
+#define ALMRST BIT(5)
+#define CHGRST BIT(4)
+#define OTPLD BIT(1)
+#define ALLRST BIT(0)
+
+/* F_BATTEMP */
+#define ROOM 0x0
+#define HOT1 0x1
+#define HOT2 0x2
+#define HOT3 0x3
+#define COLD1 0x4
+#define COLD2 0x5
+#define TEMP_DIS 0x6
+#define BATT_OPEN 0x7
+
+#endif
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 453d6332d43a..4540e913057f 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -673,7 +673,7 @@ static int bq24190_register_reset(struct bq24190_dev_info *bdi)
* { .type = "bq24190", .addr = 0x6b, .properties = pe, .irq = irq };
* struct i2c_adapter ad = { ... };
* i2c_add_adapter(&ad);
- * i2c_new_device(&ad, &bi);
+ * i2c_new_client_device(&ad, &bi);
*/
if (device_property_read_bool(bdi->dev, "disable-reset"))
return 0;
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index aebd1253dbc9..77150667e36b 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -32,6 +32,13 @@ enum bq25890_chip_version {
BQ25896,
};
+static const char *const bq25890_chip_name[] = {
+ "BQ25890",
+ "BQ25892",
+ "BQ25895",
+ "BQ25896",
+};
+
enum bq25890_fields {
F_EN_HIZ, F_EN_ILIM, F_IILIM, /* Reg00 */
F_BHOT, F_BCOLD, F_VINDPM_OFS, /* Reg01 */
@@ -119,6 +126,7 @@ static const struct regmap_access_table bq25890_writeable_regs = {
static const struct regmap_range bq25890_volatile_reg_ranges[] = {
regmap_reg_range(0x00, 0x00),
+ regmap_reg_range(0x02, 0x02),
regmap_reg_range(0x09, 0x09),
regmap_reg_range(0x0b, 0x14),
};
@@ -246,6 +254,7 @@ enum bq25890_table_ids {
/* range tables */
TBL_ICHG,
TBL_ITERM,
+ TBL_IILIM,
TBL_VREG,
TBL_BOOSTV,
TBL_SYSVMIN,
@@ -286,6 +295,7 @@ static const union {
/* TODO: BQ25896 has max ICHG 3008 mA */
[TBL_ICHG] = { .rt = {0, 5056000, 64000} }, /* uA */
[TBL_ITERM] = { .rt = {64000, 1024000, 64000} }, /* uA */
+ [TBL_IILIM] = { .rt = {50000, 3200000, 50000} }, /* uA */
[TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */
[TBL_BOOSTV] = { .rt = {4550000, 5510000, 64000} }, /* uV */
[TBL_SYSVMIN] = { .rt = {3000000, 3700000, 100000} }, /* uV */
@@ -367,18 +377,42 @@ enum bq25890_chrg_fault {
CHRG_FAULT_TIMER_EXPIRED,
};
+static bool bq25890_is_adc_property(enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static irqreturn_t __bq25890_handle_irq(struct bq25890_device *bq);
+
static int bq25890_power_supply_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- int ret;
struct bq25890_device *bq = power_supply_get_drvdata(psy);
struct bq25890_state state;
+ bool do_adc_conv;
+ int ret;
mutex_lock(&bq->lock);
+ /* update state in case we lost an interrupt */
+ __bq25890_handle_irq(bq);
state = bq->state;
+ do_adc_conv = !state.online && bq25890_is_adc_property(psp);
+ if (do_adc_conv)
+ bq25890_field_write(bq, F_CONV_START, 1);
mutex_unlock(&bq->lock);
+ if (do_adc_conv)
+ regmap_field_read_poll_timeout(bq->rmap_fields[F_CONV_START],
+ ret, !ret, 25000, 1000000);
+
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
if (!state.online)
@@ -395,22 +429,24 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ if (!state.online || state.chrg_status == STATUS_NOT_CHARGING ||
+ state.chrg_status == STATUS_TERMINATION_DONE)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ else if (state.chrg_status == STATUS_PRE_CHARGING)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ else if (state.chrg_status == STATUS_FAST_CHARGING)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else /* unreachable */
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ break;
+
case POWER_SUPPLY_PROP_MANUFACTURER:
val->strval = BQ25890_MANUFACTURER;
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
- if (bq->chip_version == BQ25890)
- val->strval = "BQ25890";
- else if (bq->chip_version == BQ25892)
- val->strval = "BQ25892";
- else if (bq->chip_version == BQ25895)
- val->strval = "BQ25895";
- else if (bq->chip_version == BQ25896)
- val->strval = "BQ25896";
- else
- val->strval = "UNKNOWN";
-
+ val->strval = bq25890_chip_name[bq->chip_version];
break;
case POWER_SUPPLY_PROP_ONLINE:
@@ -430,15 +466,6 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
break;
- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- ret = bq25890_field_read(bq, F_ICHGR); /* read measured value */
- if (ret < 0)
- return ret;
-
- /* converted_val = ADC_val * 50mA (table 10.3.19) */
- val->intval = ret * 50000;
- break;
-
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
val->intval = bq25890_find_val(bq->init_data.ichg, TBL_ICHG);
break;
@@ -461,10 +488,22 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
val->intval = bq25890_find_val(bq->init_data.vreg, TBL_VREG);
break;
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ val->intval = bq25890_find_val(bq->init_data.iprechg, TBL_ITERM);
+ break;
+
case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
val->intval = bq25890_find_val(bq->init_data.iterm, TBL_ITERM);
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = bq25890_field_read(bq, F_IILIM);
+ if (ret < 0)
+ return ret;
+
+ val->intval = bq25890_find_val(ret, TBL_IILIM);
+ break;
+
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
ret = bq25890_field_read(bq, F_SYSV); /* read measured value */
if (ret < 0)
@@ -474,6 +513,15 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
val->intval = 2304000 + ret * 20000;
break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = bq25890_field_read(bq, F_ICHGR); /* read measured value */
+ if (ret < 0)
+ return ret;
+
+ /* converted_val = ADC_val * 50mA (table 10.3.19) */
+ val->intval = ret * -50000;
+ break;
+
default:
return -EINVAL;
}
@@ -513,74 +561,50 @@ static int bq25890_get_chip_state(struct bq25890_device *bq,
return 0;
}
-static bool bq25890_state_changed(struct bq25890_device *bq,
- struct bq25890_state *new_state)
-{
- struct bq25890_state old_state;
-
- mutex_lock(&bq->lock);
- old_state = bq->state;
- mutex_unlock(&bq->lock);
-
- return (old_state.chrg_status != new_state->chrg_status ||
- old_state.chrg_fault != new_state->chrg_fault ||
- old_state.online != new_state->online ||
- old_state.bat_fault != new_state->bat_fault ||
- old_state.boost_fault != new_state->boost_fault ||
- old_state.vsys_status != new_state->vsys_status);
-}
-
-static void bq25890_handle_state_change(struct bq25890_device *bq,
- struct bq25890_state *new_state)
+static irqreturn_t __bq25890_handle_irq(struct bq25890_device *bq)
{
+ struct bq25890_state new_state;
int ret;
- struct bq25890_state old_state;
- mutex_lock(&bq->lock);
- old_state = bq->state;
- mutex_unlock(&bq->lock);
+ ret = bq25890_get_chip_state(bq, &new_state);
+ if (ret < 0)
+ return IRQ_NONE;
+
+ if (!memcmp(&bq->state, &new_state, sizeof(new_state)))
+ return IRQ_NONE;
- if (!new_state->online) { /* power removed */
+ if (!new_state.online && bq->state.online) { /* power removed */
/* disable ADC */
ret = bq25890_field_write(bq, F_CONV_START, 0);
if (ret < 0)
goto error;
- } else if (!old_state.online) { /* power inserted */
+ } else if (new_state.online && !bq->state.online) { /* power inserted */
/* enable ADC, to have control of charge current/voltage */
ret = bq25890_field_write(bq, F_CONV_START, 1);
if (ret < 0)
goto error;
}
- return;
+ bq->state = new_state;
+ power_supply_changed(bq->charger);
+ return IRQ_HANDLED;
error:
- dev_err(bq->dev, "Error communicating with the chip.\n");
+ dev_err(bq->dev, "Error communicating with the chip: %pe\n",
+ ERR_PTR(ret));
+ return IRQ_HANDLED;
}
static irqreturn_t bq25890_irq_handler_thread(int irq, void *private)
{
struct bq25890_device *bq = private;
- int ret;
- struct bq25890_state state;
-
- ret = bq25890_get_chip_state(bq, &state);
- if (ret < 0)
- goto handled;
-
- if (!bq25890_state_changed(bq, &state))
- goto handled;
-
- bq25890_handle_state_change(bq, &state);
+ irqreturn_t ret;
mutex_lock(&bq->lock);
- bq->state = state;
+ ret = __bq25890_handle_irq(bq);
mutex_unlock(&bq->lock);
- power_supply_changed(bq->charger);
-
-handled:
- return IRQ_HANDLED;
+ return ret;
}
static int bq25890_chip_reset(struct bq25890_device *bq)
@@ -610,7 +634,6 @@ static int bq25890_hw_init(struct bq25890_device *bq)
{
int ret;
int i;
- struct bq25890_state state;
const struct {
enum bq25890_fields id;
@@ -651,38 +674,37 @@ static int bq25890_hw_init(struct bq25890_device *bq)
}
}
- /* Configure ADC for continuous conversions. This does not enable it. */
- ret = bq25890_field_write(bq, F_CONV_RATE, 1);
+ /* Configure ADC for continuous conversions when charging */
+ ret = bq25890_field_write(bq, F_CONV_RATE, !!bq->state.online);
if (ret < 0) {
dev_dbg(bq->dev, "Config ADC failed %d\n", ret);
return ret;
}
- ret = bq25890_get_chip_state(bq, &state);
+ ret = bq25890_get_chip_state(bq, &bq->state);
if (ret < 0) {
dev_dbg(bq->dev, "Get state failed %d\n", ret);
return ret;
}
- mutex_lock(&bq->lock);
- bq->state = state;
- mutex_unlock(&bq->lock);
-
return 0;
}
-static enum power_supply_property bq25890_power_supply_props[] = {
+static const enum power_supply_property bq25890_power_supply_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_HEALTH,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
};
static char *bq25890_charger_supplied_to[] = {
@@ -881,17 +903,11 @@ static int bq25890_fw_probe(struct bq25890_device *bq)
static int bq25890_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct i2c_adapter *adapter = client->adapter;
struct device *dev = &client->dev;
struct bq25890_device *bq;
int ret;
int i;
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(dev, "No support for SMBUS_BYTE_DATA\n");
- return -ENODEV;
- }
-
bq = devm_kzalloc(dev, sizeof(*bq), GFP_KERNEL);
if (!bq)
return -ENOMEM;
@@ -1004,34 +1020,34 @@ static int bq25890_suspend(struct device *dev)
* If charger is removed, while in suspend, make sure ADC is diabled
* since it consumes slightly more power.
*/
- return bq25890_field_write(bq, F_CONV_START, 0);
+ return bq25890_field_write(bq, F_CONV_RATE, 0);
}
static int bq25890_resume(struct device *dev)
{
int ret;
- struct bq25890_state state;
struct bq25890_device *bq = dev_get_drvdata(dev);
- ret = bq25890_get_chip_state(bq, &state);
- if (ret < 0)
- return ret;
-
mutex_lock(&bq->lock);
- bq->state = state;
- mutex_unlock(&bq->lock);
+
+ ret = bq25890_get_chip_state(bq, &bq->state);
+ if (ret < 0)
+ goto unlock;
/* Re-enable ADC only if charger is plugged in. */
- if (state.online) {
- ret = bq25890_field_write(bq, F_CONV_START, 1);
+ if (bq->state.online) {
+ ret = bq25890_field_write(bq, F_CONV_RATE, 1);
if (ret < 0)
- return ret;
+ goto unlock;
}
/* signal userspace, maybe state changed while suspended */
power_supply_changed(bq->charger);
- return 0;
+unlock:
+ mutex_unlock(&bq->lock);
+
+ return ret;
}
#endif
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index a21e1a2673f8..2ef53dc1f2fb 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -1422,7 +1422,9 @@ static int charger_manager_prepare_sysfs(struct charger_manager *cm)
}
static int cm_init_thermal_data(struct charger_manager *cm,
- struct power_supply *fuel_gauge)
+ struct power_supply *fuel_gauge,
+ enum power_supply_property *properties,
+ size_t *num_properties)
{
struct charger_desc *desc = cm->desc;
union power_supply_propval val;
@@ -1433,9 +1435,8 @@ static int cm_init_thermal_data(struct charger_manager *cm,
POWER_SUPPLY_PROP_TEMP, &val);
if (!ret) {
- cm->charger_psy_desc.properties[cm->charger_psy_desc.num_properties] =
- POWER_SUPPLY_PROP_TEMP;
- cm->charger_psy_desc.num_properties++;
+ properties[*num_properties] = POWER_SUPPLY_PROP_TEMP;
+ (*num_properties)++;
cm->desc->measure_battery_temp = true;
}
#ifdef CONFIG_THERMAL
@@ -1446,9 +1447,8 @@ static int cm_init_thermal_data(struct charger_manager *cm,
return PTR_ERR(cm->tzd_batt);
/* Use external thermometer */
- cm->charger_psy_desc.properties[cm->charger_psy_desc.num_properties] =
- POWER_SUPPLY_PROP_TEMP_AMBIENT;
- cm->charger_psy_desc.num_properties++;
+ properties[*num_properties] = POWER_SUPPLY_PROP_TEMP_AMBIENT;
+ (*num_properties)++;
cm->desc->measure_battery_temp = true;
ret = 0;
}
@@ -1621,6 +1621,8 @@ static int charger_manager_probe(struct platform_device *pdev)
int j = 0;
union power_supply_propval val;
struct power_supply *fuel_gauge;
+ enum power_supply_property *properties;
+ size_t num_properties;
struct power_supply_config psy_cfg = {};
if (IS_ERR(desc)) {
@@ -1717,18 +1719,17 @@ static int charger_manager_probe(struct platform_device *pdev)
cm->charger_psy_desc.name = cm->psy_name_buf;
/* Allocate for psy properties because they may vary */
- cm->charger_psy_desc.properties =
- devm_kcalloc(&pdev->dev,
+ properties = devm_kcalloc(&pdev->dev,
ARRAY_SIZE(default_charger_props) +
NUM_CHARGER_PSY_OPTIONAL,
- sizeof(enum power_supply_property), GFP_KERNEL);
- if (!cm->charger_psy_desc.properties)
+ sizeof(*properties), GFP_KERNEL);
+ if (!properties)
return -ENOMEM;
- memcpy(cm->charger_psy_desc.properties, default_charger_props,
+ memcpy(properties, default_charger_props,
sizeof(enum power_supply_property) *
ARRAY_SIZE(default_charger_props));
- cm->charger_psy_desc.num_properties = psy_default.num_properties;
+ num_properties = ARRAY_SIZE(default_charger_props);
/* Find which optional psy-properties are available */
fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge);
@@ -1739,25 +1740,28 @@ static int charger_manager_probe(struct platform_device *pdev)
}
if (!power_supply_get_property(fuel_gauge,
POWER_SUPPLY_PROP_CHARGE_NOW, &val)) {
- cm->charger_psy_desc.properties[cm->charger_psy_desc.num_properties] =
+ properties[num_properties] =
POWER_SUPPLY_PROP_CHARGE_NOW;
- cm->charger_psy_desc.num_properties++;
+ num_properties++;
}
if (!power_supply_get_property(fuel_gauge,
POWER_SUPPLY_PROP_CURRENT_NOW,
&val)) {
- cm->charger_psy_desc.properties[cm->charger_psy_desc.num_properties] =
+ properties[num_properties] =
POWER_SUPPLY_PROP_CURRENT_NOW;
- cm->charger_psy_desc.num_properties++;
+ num_properties++;
}
- ret = cm_init_thermal_data(cm, fuel_gauge);
+ ret = cm_init_thermal_data(cm, fuel_gauge, properties, &num_properties);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize thermal data\n");
cm->desc->measure_battery_temp = false;
}
power_supply_put(fuel_gauge);
+ cm->charger_psy_desc.properties = properties;
+ cm->charger_psy_desc.num_properties = num_properties;
+
INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk);
/* Register sysfs entry for charger(regulator) */
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
new file mode 100644
index 000000000000..0146f1bfc29b
--- /dev/null
+++ b/drivers/power/supply/cw2015_battery.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Fuel gauge driver for CellWise 2013 / 2015
+ *
+ * Copyright (C) 2012, RockChip
+ * Copyright (C) 2020, Tobias Schramm
+ *
+ * Authors: xuhuicong <xhc@rock-chips.com>
+ * Authors: Tobias Schramm <t.schramm@manjaro.org>
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/gfp.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/time.h>
+#include <linux/workqueue.h>
+
+#define CW2015_SIZE_BATINFO 64
+
+#define CW2015_RESET_TRIES 5
+
+#define CW2015_REG_VERSION 0x00
+#define CW2015_REG_VCELL 0x02
+#define CW2015_REG_SOC 0x04
+#define CW2015_REG_RRT_ALERT 0x06
+#define CW2015_REG_CONFIG 0x08
+#define CW2015_REG_MODE 0x0A
+#define CW2015_REG_BATINFO 0x10
+
+#define CW2015_MODE_SLEEP_MASK GENMASK(7, 6)
+#define CW2015_MODE_SLEEP (0x03 << 6)
+#define CW2015_MODE_NORMAL (0x00 << 6)
+#define CW2015_MODE_QUICK_START (0x03 << 4)
+#define CW2015_MODE_RESTART (0x0f << 0)
+
+#define CW2015_CONFIG_UPDATE_FLG (0x01 << 1)
+#define CW2015_ATHD(x) ((x) << 3)
+#define CW2015_MASK_ATHD GENMASK(7, 3)
+#define CW2015_MASK_SOC GENMASK(12, 0)
+
+/* reset gauge of no valid state of charge could be polled for 40s */
+#define CW2015_BAT_SOC_ERROR_MS (40 * MSEC_PER_SEC)
+/* reset gauge if state of charge stuck for half an hour during charging */
+#define CW2015_BAT_CHARGING_STUCK_MS (1800 * MSEC_PER_SEC)
+
+/* poll interval from CellWise GPL Android driver example */
+#define CW2015_DEFAULT_POLL_INTERVAL_MS 8000
+
+#define CW2015_AVERAGING_SAMPLES 3
+
+struct cw_battery {
+ struct device *dev;
+ struct workqueue_struct *battery_workqueue;
+ struct delayed_work battery_delay_work;
+ struct regmap *regmap;
+ struct power_supply *rk_bat;
+ struct power_supply_battery_info battery;
+ u8 *bat_profile;
+
+ bool charger_attached;
+ bool battery_changed;
+
+ int soc;
+ int voltage_mv;
+ int status;
+ int time_to_empty;
+ int charge_count;
+
+ u32 poll_interval_ms;
+ u8 alert_level;
+
+ unsigned int read_errors;
+ unsigned int charge_stuck_cnt;
+};
+
+static int cw_read_word(struct cw_battery *cw_bat, u8 reg, u16 *val)
+{
+ __be16 value;
+ int ret;
+
+ ret = regmap_bulk_read(cw_bat->regmap, reg, &value, sizeof(value));
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(value);
+ return 0;
+}
+
+static int cw_update_profile(struct cw_battery *cw_bat)
+{
+ int ret;
+ unsigned int reg_val;
+ u8 reset_val;
+
+ /* make sure gauge is not in sleep mode */
+ ret = regmap_read(cw_bat->regmap, CW2015_REG_MODE, &reg_val);
+ if (ret)
+ return ret;
+
+ reset_val = reg_val;
+ if ((reg_val & CW2015_MODE_SLEEP_MASK) == CW2015_MODE_SLEEP) {
+ dev_err(cw_bat->dev,
+ "Gauge is in sleep mode, can't update battery info\n");
+ return -EINVAL;
+ }
+
+ /* write new battery info */
+ ret = regmap_raw_write(cw_bat->regmap, CW2015_REG_BATINFO,
+ cw_bat->bat_profile,
+ CW2015_SIZE_BATINFO);
+ if (ret)
+ return ret;
+
+ /* set config update flag */
+ reg_val |= CW2015_CONFIG_UPDATE_FLG;
+ reg_val &= ~CW2015_MASK_ATHD;
+ reg_val |= CW2015_ATHD(cw_bat->alert_level);
+ ret = regmap_write(cw_bat->regmap, CW2015_REG_CONFIG, reg_val);
+ if (ret)
+ return ret;
+
+ /* reset gauge to apply new battery profile */
+ reset_val &= ~CW2015_MODE_RESTART;
+ reg_val = reset_val | CW2015_MODE_RESTART;
+ ret = regmap_write(cw_bat->regmap, CW2015_REG_MODE, reg_val);
+ if (ret)
+ return ret;
+
+ /* wait for gauge to reset */
+ msleep(20);
+
+ /* clear reset flag */
+ ret = regmap_write(cw_bat->regmap, CW2015_REG_MODE, reset_val);
+ if (ret)
+ return ret;
+
+ /* wait for gauge to become ready */
+ ret = regmap_read_poll_timeout(cw_bat->regmap, CW2015_REG_SOC,
+ reg_val, reg_val <= 100,
+ 10 * USEC_PER_MSEC, 10 * USEC_PER_SEC);
+ if (ret)
+ dev_err(cw_bat->dev,
+ "Gauge did not become ready after profile upload\n");
+ else
+ dev_dbg(cw_bat->dev, "Battery profile updated\n");
+
+ return ret;
+}
+
+static int cw_init(struct cw_battery *cw_bat)
+{
+ int ret;
+ unsigned int reg_val = CW2015_MODE_SLEEP;
+
+ if ((reg_val & CW2015_MODE_SLEEP_MASK) == CW2015_MODE_SLEEP) {
+ reg_val = CW2015_MODE_NORMAL;
+ ret = regmap_write(cw_bat->regmap, CW2015_REG_MODE, reg_val);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(cw_bat->regmap, CW2015_REG_CONFIG, &reg_val);
+ if (ret)
+ return ret;
+
+ if ((reg_val & CW2015_MASK_ATHD) != CW2015_ATHD(cw_bat->alert_level)) {
+ dev_dbg(cw_bat->dev, "Setting new alert level\n");
+ reg_val &= ~CW2015_MASK_ATHD;
+ reg_val |= ~CW2015_ATHD(cw_bat->alert_level);
+ ret = regmap_write(cw_bat->regmap, CW2015_REG_CONFIG, reg_val);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(cw_bat->regmap, CW2015_REG_CONFIG, &reg_val);
+ if (ret)
+ return ret;
+
+ if (!(reg_val & CW2015_CONFIG_UPDATE_FLG)) {
+ dev_dbg(cw_bat->dev,
+ "Battery profile not present, uploading battery profile\n");
+ if (cw_bat->bat_profile) {
+ ret = cw_update_profile(cw_bat);
+ if (ret) {
+ dev_err(cw_bat->dev,
+ "Failed to upload battery profile\n");
+ return ret;
+ }
+ } else {
+ dev_warn(cw_bat->dev,
+ "No profile specified, continuing without profile\n");
+ }
+ } else if (cw_bat->bat_profile) {
+ u8 bat_info[CW2015_SIZE_BATINFO];
+
+ ret = regmap_raw_read(cw_bat->regmap, CW2015_REG_BATINFO,
+ bat_info, CW2015_SIZE_BATINFO);
+ if (ret) {
+ dev_err(cw_bat->dev,
+ "Failed to read stored battery profile\n");
+ return ret;
+ }
+
+ if (memcmp(bat_info, cw_bat->bat_profile, CW2015_SIZE_BATINFO)) {
+ dev_warn(cw_bat->dev, "Replacing stored battery profile\n");
+ ret = cw_update_profile(cw_bat);
+ if (ret)
+ return ret;
+ }
+ } else {
+ dev_warn(cw_bat->dev,
+ "Can't check current battery profile, no profile provided\n");
+ }
+
+ dev_dbg(cw_bat->dev, "Battery profile configured\n");
+ return 0;
+}
+
+static int cw_power_on_reset(struct cw_battery *cw_bat)
+{
+ int ret;
+ unsigned char reset_val;
+
+ reset_val = CW2015_MODE_SLEEP;
+ ret = regmap_write(cw_bat->regmap, CW2015_REG_MODE, reset_val);
+ if (ret)
+ return ret;
+
+ /* wait for gauge to enter sleep */
+ msleep(20);
+
+ reset_val = CW2015_MODE_NORMAL;
+ ret = regmap_write(cw_bat->regmap, CW2015_REG_MODE, reset_val);
+ if (ret)
+ return ret;
+
+ ret = cw_init(cw_bat);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+#define HYSTERESIS(current, previous, up, down) \
+ (((current) < (previous) + (up)) && ((current) > (previous) - (down)))
+
+static int cw_get_soc(struct cw_battery *cw_bat)
+{
+ unsigned int soc;
+ int ret;
+
+ ret = regmap_read(cw_bat->regmap, CW2015_REG_SOC, &soc);
+ if (ret)
+ return ret;
+
+ if (soc > 100) {
+ int max_error_cycles =
+ CW2015_BAT_SOC_ERROR_MS / cw_bat->poll_interval_ms;
+
+ dev_err(cw_bat->dev, "Invalid SoC %d%%\n", soc);
+ cw_bat->read_errors++;
+ if (cw_bat->read_errors > max_error_cycles) {
+ dev_warn(cw_bat->dev,
+ "Too many invalid SoC reports, resetting gauge\n");
+ cw_power_on_reset(cw_bat);
+ cw_bat->read_errors = 0;
+ }
+ return cw_bat->soc;
+ }
+ cw_bat->read_errors = 0;
+
+ /* Reset gauge if stuck while charging */
+ if (cw_bat->status == POWER_SUPPLY_STATUS_CHARGING && soc == cw_bat->soc) {
+ int max_stuck_cycles =
+ CW2015_BAT_CHARGING_STUCK_MS / cw_bat->poll_interval_ms;
+
+ cw_bat->charge_stuck_cnt++;
+ if (cw_bat->charge_stuck_cnt > max_stuck_cycles) {
+ dev_warn(cw_bat->dev,
+ "SoC stuck @%u%%, resetting gauge\n", soc);
+ cw_power_on_reset(cw_bat);
+ cw_bat->charge_stuck_cnt = 0;
+ }
+ } else {
+ cw_bat->charge_stuck_cnt = 0;
+ }
+
+ /* Ignore voltage dips during charge */
+ if (cw_bat->charger_attached && HYSTERESIS(soc, cw_bat->soc, 0, 3))
+ soc = cw_bat->soc;
+
+ /* Ignore voltage spikes during discharge */
+ if (!cw_bat->charger_attached && HYSTERESIS(soc, cw_bat->soc, 3, 0))
+ soc = cw_bat->soc;
+
+ return soc;
+}
+
+static int cw_get_voltage(struct cw_battery *cw_bat)
+{
+ int ret, i, voltage_mv;
+ u16 reg_val;
+ u32 avg = 0;
+
+ for (i = 0; i < CW2015_AVERAGING_SAMPLES; i++) {
+ ret = cw_read_word(cw_bat, CW2015_REG_VCELL, &reg_val);
+ if (ret)
+ return ret;
+
+ avg += reg_val;
+ }
+ avg /= CW2015_AVERAGING_SAMPLES;
+
+ /*
+ * 305 uV per ADC step
+ * Use 312 / 1024 as efficient approximation of 305 / 1000
+ * Negligible error of 0.1%
+ */
+ voltage_mv = avg * 312 / 1024;
+
+ dev_dbg(cw_bat->dev, "Read voltage: %d mV, raw=0x%04x\n",
+ voltage_mv, reg_val);
+ return voltage_mv;
+}
+
+static int cw_get_time_to_empty(struct cw_battery *cw_bat)
+{
+ int ret;
+ u16 value16;
+
+ ret = cw_read_word(cw_bat, CW2015_REG_RRT_ALERT, &value16);
+ if (ret)
+ return ret;
+
+ return value16 & CW2015_MASK_SOC;
+}
+
+static void cw_update_charge_status(struct cw_battery *cw_bat)
+{
+ int ret;
+
+ ret = power_supply_am_i_supplied(cw_bat->rk_bat);
+ if (ret < 0) {
+ dev_warn(cw_bat->dev, "Failed to get supply state: %d\n", ret);
+ } else {
+ bool charger_attached;
+
+ charger_attached = !!ret;
+ if (cw_bat->charger_attached != charger_attached) {
+ cw_bat->battery_changed = true;
+ if (charger_attached)
+ cw_bat->charge_count++;
+ }
+ cw_bat->charger_attached = charger_attached;
+ }
+}
+
+static void cw_update_soc(struct cw_battery *cw_bat)
+{
+ int soc;
+
+ soc = cw_get_soc(cw_bat);
+ if (soc < 0)
+ dev_err(cw_bat->dev, "Failed to get SoC from gauge: %d\n", soc);
+ else if (cw_bat->soc != soc) {
+ cw_bat->soc = soc;
+ cw_bat->battery_changed = true;
+ }
+}
+
+static void cw_update_voltage(struct cw_battery *cw_bat)
+{
+ int voltage_mv;
+
+ voltage_mv = cw_get_voltage(cw_bat);
+ if (voltage_mv < 0)
+ dev_err(cw_bat->dev, "Failed to get voltage from gauge: %d\n",
+ voltage_mv);
+ else
+ cw_bat->voltage_mv = voltage_mv;
+}
+
+static void cw_update_status(struct cw_battery *cw_bat)
+{
+ int status = POWER_SUPPLY_STATUS_DISCHARGING;
+
+ if (cw_bat->charger_attached) {
+ if (cw_bat->soc >= 100)
+ status = POWER_SUPPLY_STATUS_FULL;
+ else
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ }
+
+ if (cw_bat->status != status)
+ cw_bat->battery_changed = true;
+ cw_bat->status = status;
+}
+
+static void cw_update_time_to_empty(struct cw_battery *cw_bat)
+{
+ int time_to_empty;
+
+ time_to_empty = cw_get_time_to_empty(cw_bat);
+ if (time_to_empty < 0)
+ dev_err(cw_bat->dev, "Failed to get time to empty from gauge: %d\n",
+ time_to_empty);
+ else if (cw_bat->time_to_empty != time_to_empty) {
+ cw_bat->time_to_empty = time_to_empty;
+ cw_bat->battery_changed = true;
+ }
+}
+
+static void cw_bat_work(struct work_struct *work)
+{
+ struct delayed_work *delay_work;
+ struct cw_battery *cw_bat;
+ int ret;
+ unsigned int reg_val;
+
+ delay_work = to_delayed_work(work);
+ cw_bat = container_of(delay_work, struct cw_battery, battery_delay_work);
+ ret = regmap_read(cw_bat->regmap, CW2015_REG_MODE, &reg_val);
+ if (ret) {
+ dev_err(cw_bat->dev, "Failed to read mode from gauge: %d\n", ret);
+ } else {
+ if ((reg_val & CW2015_MODE_SLEEP_MASK) == CW2015_MODE_SLEEP) {
+ int i;
+
+ for (i = 0; i < CW2015_RESET_TRIES; i++) {
+ if (!cw_power_on_reset(cw_bat))
+ break;
+ }
+ }
+ cw_update_soc(cw_bat);
+ cw_update_voltage(cw_bat);
+ cw_update_charge_status(cw_bat);
+ cw_update_status(cw_bat);
+ cw_update_time_to_empty(cw_bat);
+ }
+ dev_dbg(cw_bat->dev, "charger_attached = %d\n", cw_bat->charger_attached);
+ dev_dbg(cw_bat->dev, "status = %d\n", cw_bat->status);
+ dev_dbg(cw_bat->dev, "soc = %d%%\n", cw_bat->soc);
+ dev_dbg(cw_bat->dev, "voltage = %dmV\n", cw_bat->voltage_mv);
+
+ if (cw_bat->battery_changed)
+ power_supply_changed(cw_bat->rk_bat);
+ cw_bat->battery_changed = false;
+
+ queue_delayed_work(cw_bat->battery_workqueue,
+ &cw_bat->battery_delay_work,
+ msecs_to_jiffies(cw_bat->poll_interval_ms));
+}
+
+static bool cw_battery_valid_time_to_empty(struct cw_battery *cw_bat)
+{
+ return cw_bat->time_to_empty > 0 &&
+ cw_bat->time_to_empty < CW2015_MASK_SOC &&
+ cw_bat->status == POWER_SUPPLY_STATUS_DISCHARGING;
+}
+
+static int cw_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct cw_battery *cw_bat;
+
+ cw_bat = power_supply_get_drvdata(psy);
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = cw_bat->soc;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = cw_bat->status;
+ break;
+
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = !!cw_bat->voltage_mv;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = cw_bat->voltage_mv * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ if (cw_battery_valid_time_to_empty(cw_bat))
+ val->intval = cw_bat->time_to_empty;
+ else
+ val->intval = 0;
+ break;
+
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ val->intval = cw_bat->charge_count;
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ if (cw_bat->battery.charge_full_design_uah > 0)
+ val->intval = cw_bat->battery.charge_full_design_uah;
+ else
+ val->intval = 0;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ if (cw_battery_valid_time_to_empty(cw_bat) &&
+ cw_bat->battery.charge_full_design_uah > 0) {
+ /* calculate remaining capacity */
+ val->intval = cw_bat->battery.charge_full_design_uah;
+ val->intval = val->intval * cw_bat->soc / 100;
+
+ /* estimate current based on time to empty */
+ val->intval = 60 * val->intval / cw_bat->time_to_empty;
+ } else {
+ val->intval = 0;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static enum power_supply_property cw_battery_properties[] = {
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static const struct power_supply_desc cw2015_bat_desc = {
+ .name = "cw2015-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = cw_battery_properties,
+ .num_properties = ARRAY_SIZE(cw_battery_properties),
+ .get_property = cw_battery_get_property,
+};
+
+static int cw2015_parse_properties(struct cw_battery *cw_bat)
+{
+ struct device *dev = cw_bat->dev;
+ int length;
+ int ret;
+
+ length = device_property_count_u8(dev, "cellwise,battery-profile");
+ if (length < 0) {
+ dev_warn(cw_bat->dev,
+ "No battery-profile found, using current flash contents\n");
+ } else if (length != CW2015_SIZE_BATINFO) {
+ dev_err(cw_bat->dev, "battery-profile must be %d bytes\n",
+ CW2015_SIZE_BATINFO);
+ return -EINVAL;
+ } else {
+ cw_bat->bat_profile = devm_kzalloc(dev, length, GFP_KERNEL);
+ if (!cw_bat->bat_profile)
+ return -ENOMEM;
+
+ ret = device_property_read_u8_array(dev,
+ "cellwise,battery-profile",
+ cw_bat->bat_profile,
+ length);
+ if (ret)
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "cellwise,monitor-interval-ms",
+ &cw_bat->poll_interval_ms);
+ if (ret) {
+ dev_dbg(cw_bat->dev, "Using default poll interval\n");
+ cw_bat->poll_interval_ms = CW2015_DEFAULT_POLL_INTERVAL_MS;
+ }
+
+ return 0;
+}
+
+static const struct regmap_range regmap_ranges_rd_yes[] = {
+ regmap_reg_range(CW2015_REG_VERSION, CW2015_REG_VERSION),
+ regmap_reg_range(CW2015_REG_VCELL, CW2015_REG_CONFIG),
+ regmap_reg_range(CW2015_REG_MODE, CW2015_REG_MODE),
+ regmap_reg_range(CW2015_REG_BATINFO,
+ CW2015_REG_BATINFO + CW2015_SIZE_BATINFO - 1),
+};
+
+static const struct regmap_access_table regmap_rd_table = {
+ .yes_ranges = regmap_ranges_rd_yes,
+ .n_yes_ranges = 4,
+};
+
+static const struct regmap_range regmap_ranges_wr_yes[] = {
+ regmap_reg_range(CW2015_REG_RRT_ALERT, CW2015_REG_CONFIG),
+ regmap_reg_range(CW2015_REG_MODE, CW2015_REG_MODE),
+ regmap_reg_range(CW2015_REG_BATINFO,
+ CW2015_REG_BATINFO + CW2015_SIZE_BATINFO - 1),
+};
+
+static const struct regmap_access_table regmap_wr_table = {
+ .yes_ranges = regmap_ranges_wr_yes,
+ .n_yes_ranges = 3,
+};
+
+static const struct regmap_range regmap_ranges_vol_yes[] = {
+ regmap_reg_range(CW2015_REG_VCELL, CW2015_REG_SOC + 1),
+};
+
+static const struct regmap_access_table regmap_vol_table = {
+ .yes_ranges = regmap_ranges_vol_yes,
+ .n_yes_ranges = 1,
+};
+
+static const struct regmap_config cw2015_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &regmap_rd_table,
+ .wr_table = &regmap_wr_table,
+ .volatile_table = &regmap_vol_table,
+ .max_register = CW2015_REG_BATINFO + CW2015_SIZE_BATINFO - 1,
+};
+
+static int cw_bat_probe(struct i2c_client *client)
+{
+ int ret;
+ struct cw_battery *cw_bat;
+ struct power_supply_config psy_cfg = { 0 };
+
+ cw_bat = devm_kzalloc(&client->dev, sizeof(*cw_bat), GFP_KERNEL);
+ if (!cw_bat)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, cw_bat);
+ cw_bat->dev = &client->dev;
+ cw_bat->soc = 1;
+
+ ret = cw2015_parse_properties(cw_bat);
+ if (ret) {
+ dev_err(cw_bat->dev, "Failed to parse cw2015 properties\n");
+ return ret;
+ }
+
+ cw_bat->regmap = devm_regmap_init_i2c(client, &cw2015_regmap_config);
+ if (IS_ERR(cw_bat->regmap)) {
+ dev_err(cw_bat->dev, "Failed to allocate regmap: %ld\n",
+ PTR_ERR(cw_bat->regmap));
+ return PTR_ERR(cw_bat->regmap);
+ }
+
+ ret = cw_init(cw_bat);
+ if (ret) {
+ dev_err(cw_bat->dev, "Init failed: %d\n", ret);
+ return ret;
+ }
+
+ psy_cfg.drv_data = cw_bat;
+ psy_cfg.fwnode = dev_fwnode(cw_bat->dev);
+
+ cw_bat->rk_bat = devm_power_supply_register(&client->dev,
+ &cw2015_bat_desc,
+ &psy_cfg);
+ if (IS_ERR(cw_bat->rk_bat)) {
+ dev_err(cw_bat->dev, "Failed to register power supply\n");
+ return PTR_ERR(cw_bat->rk_bat);
+ }
+
+ ret = power_supply_get_battery_info(cw_bat->rk_bat, &cw_bat->battery);
+ if (ret) {
+ dev_warn(cw_bat->dev,
+ "No monitored battery, some properties will be missing\n");
+ }
+
+ cw_bat->battery_workqueue = create_singlethread_workqueue("rk_battery");
+ INIT_DELAYED_WORK(&cw_bat->battery_delay_work, cw_bat_work);
+ queue_delayed_work(cw_bat->battery_workqueue,
+ &cw_bat->battery_delay_work, msecs_to_jiffies(10));
+ return 0;
+}
+
+static int __maybe_unused cw_bat_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cw_battery *cw_bat = i2c_get_clientdata(client);
+
+ cancel_delayed_work_sync(&cw_bat->battery_delay_work);
+ return 0;
+}
+
+static int __maybe_unused cw_bat_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct cw_battery *cw_bat = i2c_get_clientdata(client);
+
+ queue_delayed_work(cw_bat->battery_workqueue,
+ &cw_bat->battery_delay_work, 0);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cw_bat_pm_ops, cw_bat_suspend, cw_bat_resume);
+
+static int cw_bat_remove(struct i2c_client *client)
+{
+ struct cw_battery *cw_bat = i2c_get_clientdata(client);
+
+ cancel_delayed_work_sync(&cw_bat->battery_delay_work);
+ power_supply_put_battery_info(cw_bat->rk_bat, &cw_bat->battery);
+ return 0;
+}
+
+static const struct i2c_device_id cw_bat_id_table[] = {
+ { "cw2015", 0 },
+ { }
+};
+
+static const struct of_device_id cw2015_of_match[] = {
+ { .compatible = "cellwise,cw2015" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cw2015_of_match);
+
+static struct i2c_driver cw_bat_driver = {
+ .driver = {
+ .name = "cw2015",
+ .of_match_table = cw2015_of_match,
+ .pm = &cw_bat_pm_ops,
+ },
+ .probe_new = cw_bat_probe,
+ .remove = cw_bat_remove,
+ .id_table = cw_bat_id_table,
+};
+
+module_i2c_driver(cw_bat_driver);
+
+MODULE_AUTHOR("xhc<xhc@rock-chips.com>");
+MODULE_AUTHOR("Tobias Schramm <t.schramm@manjaro.org>");
+MODULE_DESCRIPTION("cw2015/cw2013 battery driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index bc462d1ec963..caa829738ef7 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -241,6 +241,7 @@ static int gab_probe(struct platform_device *pdev)
struct power_supply_desc *psy_desc;
struct power_supply_config psy_cfg = {};
struct gab_platform_data *pdata = pdev->dev.platform_data;
+ enum power_supply_property *properties;
int ret = 0;
int chan;
int index = ARRAY_SIZE(gab_props);
@@ -268,16 +269,16 @@ static int gab_probe(struct platform_device *pdev)
* copying the static properties and allocating extra memory for holding
* the extra configurable properties received from platform data.
*/
- psy_desc->properties = kcalloc(ARRAY_SIZE(gab_props) +
- ARRAY_SIZE(gab_chan_name),
- sizeof(*psy_desc->properties),
- GFP_KERNEL);
- if (!psy_desc->properties) {
+ properties = kcalloc(ARRAY_SIZE(gab_props) +
+ ARRAY_SIZE(gab_chan_name),
+ sizeof(*properties),
+ GFP_KERNEL);
+ if (!properties) {
ret = -ENOMEM;
goto first_mem_fail;
}
- memcpy(psy_desc->properties, gab_props, sizeof(gab_props));
+ memcpy(properties, gab_props, sizeof(gab_props));
/*
* getting channel from iio and copying the battery properties
@@ -294,13 +295,11 @@ static int gab_probe(struct platform_device *pdev)
int index2;
for (index2 = 0; index2 < index; index2++) {
- if (psy_desc->properties[index2] ==
- gab_dyn_props[chan])
+ if (properties[index2] == gab_dyn_props[chan])
break; /* already known */
}
if (index2 == index) /* really new */
- psy_desc->properties[index++] =
- gab_dyn_props[chan];
+ properties[index++] = gab_dyn_props[chan];
any = true;
}
}
@@ -317,6 +316,7 @@ static int gab_probe(struct platform_device *pdev)
* as come channels may be not be supported by the device.So
* we need to take care of that.
*/
+ psy_desc->properties = properties;
psy_desc->num_properties = index;
adc_bat->psy = power_supply_register(&pdev->dev, psy_desc, &psy_cfg);
@@ -358,7 +358,7 @@ err_reg_fail:
iio_channel_release(adc_bat->channel[chan]);
}
second_mem_fail:
- kfree(psy_desc->properties);
+ kfree(properties);
first_mem_fail:
return ret;
}
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 84a206f42a8e..e7931ffb7151 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -572,27 +572,14 @@ static void lp8788_setup_adc_channel(struct device *dev,
return;
/* ADC channel for battery voltage */
- chan = iio_channel_get(dev, pdata->adc_vbatt);
+ chan = devm_iio_channel_get(dev, pdata->adc_vbatt);
pchg->chan[LP8788_VBATT] = IS_ERR(chan) ? NULL : chan;
/* ADC channel for battery temperature */
- chan = iio_channel_get(dev, pdata->adc_batt_temp);
+ chan = devm_iio_channel_get(dev, pdata->adc_batt_temp);
pchg->chan[LP8788_BATT_TEMP] = IS_ERR(chan) ? NULL : chan;
}
-static void lp8788_release_adc_channel(struct lp8788_charger *pchg)
-{
- int i;
-
- for (i = 0; i < LP8788_NUM_CHG_ADC; i++) {
- if (!pchg->chan[i])
- continue;
-
- iio_channel_release(pchg->chan[i]);
- pchg->chan[i] = NULL;
- }
-}
-
static ssize_t lp8788_show_charger_status(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -735,7 +722,6 @@ static int lp8788_charger_remove(struct platform_device *pdev)
flush_work(&pchg->charger_work);
lp8788_irq_unregister(pdev, pchg);
lp8788_psy_unregister(pchg);
- lp8788_release_adc_channel(pchg);
return 0;
}
diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c
index 8a59feac6468..dcedae18d7be 100644
--- a/drivers/power/supply/max14577_charger.c
+++ b/drivers/power/supply/max14577_charger.c
@@ -623,9 +623,19 @@ static const struct platform_device_id max14577_charger_id[] = {
};
MODULE_DEVICE_TABLE(platform, max14577_charger_id);
+static const struct of_device_id of_max14577_charger_dt_match[] = {
+ { .compatible = "maxim,max14577-charger",
+ .data = (void *)MAXIM_DEVICE_TYPE_MAX14577, },
+ { .compatible = "maxim,max77836-charger",
+ .data = (void *)MAXIM_DEVICE_TYPE_MAX77836, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_max14577_charger_dt_match);
+
static struct platform_driver max14577_charger_driver = {
.driver = {
.name = "max14577-charger",
+ .of_match_table = of_max14577_charger_dt_match,
},
.probe = max14577_charger_probe,
.remove = max14577_charger_remove,
diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
index 3bbb8b4c8ae7..137f9fafce8c 100644
--- a/drivers/power/supply/max14656_charger_detector.c
+++ b/drivers/power/supply/max14656_charger_detector.c
@@ -139,10 +139,9 @@ static void max14656_irq_worker(struct work_struct *work)
u8 buf[REG_TOTAL_NUM];
u8 chg_type;
- int ret = 0;
- ret = max14656_read_block_reg(chip->client, MAX14656_DEVICE_ID,
- REG_TOTAL_NUM, buf);
+ max14656_read_block_reg(chip->client, MAX14656_DEVICE_ID,
+ REG_TOTAL_NUM, buf);
if ((buf[MAX14656_STATUS_1] & STATUS1_VB_VALID_MASK) &&
(buf[MAX14656_STATUS_1] & STATUS1_CHG_TYPE_MASK)) {
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 8a1f0ee493aa..48aa44665e2f 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -126,7 +126,7 @@ static void max17040_get_vcell(struct i2c_client *client)
vcell = max17040_read_reg(client, MAX17040_VCELL);
- chip->vcell = vcell;
+ chip->vcell = (vcell >> 4) * 1250;
}
static void max17040_get_soc(struct i2c_client *client)
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 69ec4295d55d..f284547913d6 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -87,6 +87,7 @@ static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
};
static int max17042_get_temperature(struct max17042_chip *chip, int *temp)
@@ -411,6 +412,13 @@ static int max17042_get_property(struct power_supply *psy,
return -EINVAL;
}
break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ ret = regmap_read(map, MAX17042_TTE, &data);
+ if (ret < 0)
+ return ret;
+
+ val->intval = data * 5625 / 1000;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/power/supply/mp2629_charger.c b/drivers/power/supply/mp2629_charger.c
new file mode 100644
index 000000000000..bdf924b73e47
--- /dev/null
+++ b/drivers/power/supply/mp2629_charger.c
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MP2629 battery charger driver
+ *
+ * Copyright 2020 Monolithic Power Systems, Inc
+ *
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/types.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/mp2629.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+#define MP2629_REG_INPUT_ILIM 0x00
+#define MP2629_REG_INPUT_VLIM 0x01
+#define MP2629_REG_CHARGE_CTRL 0x04
+#define MP2629_REG_CHARGE_ILIM 0x05
+#define MP2629_REG_PRECHARGE 0x06
+#define MP2629_REG_TERM_CURRENT 0x06
+#define MP2629_REG_CHARGE_VLIM 0x07
+#define MP2629_REG_TIMER_CTRL 0x08
+#define MP2629_REG_IMPEDANCE_COMP 0x09
+#define MP2629_REG_INTERRUPT 0x0b
+#define MP2629_REG_STATUS 0x0c
+#define MP2629_REG_FAULT 0x0d
+
+#define MP2629_MASK_INPUT_TYPE GENMASK(7, 5)
+#define MP2629_MASK_CHARGE_TYPE GENMASK(4, 3)
+#define MP2629_MASK_CHARGE_CTRL GENMASK(5, 4)
+#define MP2629_MASK_WDOG_CTRL GENMASK(5, 4)
+#define MP2629_MASK_IMPEDANCE GENMASK(7, 4)
+
+#define MP2629_INPUTSOURCE_CHANGE GENMASK(7, 5)
+#define MP2629_CHARGING_CHANGE GENMASK(4, 3)
+#define MP2629_FAULT_BATTERY BIT(3)
+#define MP2629_FAULT_THERMAL BIT(4)
+#define MP2629_FAULT_INPUT BIT(5)
+#define MP2629_FAULT_OTG BIT(6)
+
+#define MP2629_MAX_BATT_CAPACITY 100
+
+#define MP2629_PROPS(_idx, _min, _max, _step) \
+ [_idx] = { \
+ .min = _min, \
+ .max = _max, \
+ .step = _step, \
+}
+
+enum mp2629_source_type {
+ MP2629_SOURCE_TYPE_NO_INPUT,
+ MP2629_SOURCE_TYPE_NON_STD,
+ MP2629_SOURCE_TYPE_SDP,
+ MP2629_SOURCE_TYPE_CDP,
+ MP2629_SOURCE_TYPE_DCP,
+ MP2629_SOURCE_TYPE_OTG = 7,
+};
+
+enum mp2629_field {
+ INPUT_ILIM,
+ INPUT_VLIM,
+ CHARGE_ILIM,
+ CHARGE_VLIM,
+ PRECHARGE,
+ TERM_CURRENT,
+ MP2629_MAX_FIELD
+};
+
+struct mp2629_charger {
+ struct device *dev;
+ int status;
+ int fault;
+
+ struct regmap *regmap;
+ struct regmap_field *regmap_fields[MP2629_MAX_FIELD];
+ struct mutex lock;
+ struct power_supply *usb;
+ struct power_supply *battery;
+ struct iio_channel *iiochan[MP2629_ADC_CHAN_END];
+};
+
+struct mp2629_prop {
+ int reg;
+ int mask;
+ int min;
+ int max;
+ int step;
+ int shift;
+};
+
+static enum power_supply_usb_type mp2629_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_PD_DRP,
+ POWER_SUPPLY_USB_TYPE_UNKNOWN
+};
+
+static enum power_supply_property mp2629_charger_usb_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+};
+
+static enum power_supply_property mp2629_charger_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+};
+
+static struct mp2629_prop props[] = {
+ MP2629_PROPS(INPUT_ILIM, 100000, 3250000, 50000),
+ MP2629_PROPS(INPUT_VLIM, 3800000, 5300000, 100000),
+ MP2629_PROPS(CHARGE_ILIM, 320000, 4520000, 40000),
+ MP2629_PROPS(CHARGE_VLIM, 3400000, 4670000, 10000),
+ MP2629_PROPS(PRECHARGE, 120000, 720000, 40000),
+ MP2629_PROPS(TERM_CURRENT, 80000, 680000, 40000),
+};
+
+static const struct reg_field mp2629_reg_fields[] = {
+ [INPUT_ILIM] = REG_FIELD(MP2629_REG_INPUT_ILIM, 0, 5),
+ [INPUT_VLIM] = REG_FIELD(MP2629_REG_INPUT_VLIM, 0, 3),
+ [CHARGE_ILIM] = REG_FIELD(MP2629_REG_CHARGE_ILIM, 0, 6),
+ [CHARGE_VLIM] = REG_FIELD(MP2629_REG_CHARGE_VLIM, 1, 7),
+ [PRECHARGE] = REG_FIELD(MP2629_REG_PRECHARGE, 4, 7),
+ [TERM_CURRENT] = REG_FIELD(MP2629_REG_TERM_CURRENT, 0, 3),
+};
+
+static char *adc_chan_name[] = {
+ "mp2629-batt-volt",
+ "mp2629-system-volt",
+ "mp2629-input-volt",
+ "mp2629-batt-current",
+ "mp2629-input-current",
+};
+
+static int mp2629_read_adc(struct mp2629_charger *charger,
+ enum mp2629_adc_chan ch,
+ union power_supply_propval *val)
+{
+ int ret;
+ int chval;
+
+ ret = iio_read_channel_processed(charger->iiochan[ch], &chval);
+ if (ret)
+ return ret;
+
+ val->intval = chval * 1000;
+
+ return 0;
+}
+
+static int mp2629_get_prop(struct mp2629_charger *charger,
+ enum mp2629_field fld,
+ union power_supply_propval *val)
+{
+ int ret;
+ unsigned int rval;
+
+ ret = regmap_field_read(charger->regmap_fields[fld], &rval);
+ if (ret)
+ return ret;
+
+ val->intval = rval * props[fld].step + props[fld].min;
+
+ return 0;
+}
+
+static int mp2629_set_prop(struct mp2629_charger *charger,
+ enum mp2629_field fld,
+ const union power_supply_propval *val)
+{
+ unsigned int rval;
+
+ if (val->intval < props[fld].min || val->intval > props[fld].max)
+ return -EINVAL;
+
+ rval = (val->intval - props[fld].min) / props[fld].step;
+ return regmap_field_write(charger->regmap_fields[fld], rval);
+}
+
+static int mp2629_get_battery_capacity(struct mp2629_charger *charger,
+ union power_supply_propval *val)
+{
+ union power_supply_propval vnow, vlim;
+ int ret;
+
+ ret = mp2629_read_adc(charger, MP2629_BATT_VOLT, &vnow);
+ if (ret)
+ return ret;
+
+ ret = mp2629_get_prop(charger, CHARGE_VLIM, &vlim);
+ if (ret)
+ return ret;
+
+ val->intval = (vnow.intval * 100) / vlim.intval;
+ val->intval = min(val->intval, MP2629_MAX_BATT_CAPACITY);
+
+ return 0;
+}
+
+static int mp2629_charger_battery_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(psy->dev.parent);
+ unsigned int rval;
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = mp2629_read_adc(charger, MP2629_BATT_VOLT, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = mp2629_read_adc(charger, MP2629_BATT_CURRENT, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = 4520000;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = 4670000;
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = mp2629_get_battery_capacity(charger, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ ret = mp2629_get_prop(charger, TERM_CURRENT, val);
+ break;
+
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ ret = mp2629_get_prop(charger, PRECHARGE, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = mp2629_get_prop(charger, CHARGE_VLIM, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = mp2629_get_prop(charger, CHARGE_ILIM, val);
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (!charger->fault)
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ if (MP2629_FAULT_BATTERY & charger->fault)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else if (MP2629_FAULT_THERMAL & charger->fault)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (MP2629_FAULT_INPUT & charger->fault)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = regmap_read(charger->regmap, MP2629_REG_STATUS, &rval);
+ if (ret)
+ break;
+
+ rval = (rval & MP2629_MASK_CHARGE_TYPE) >> 3;
+ switch (rval) {
+ case 0x00:
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case 0x01:
+ case 0x10:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0x11:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = regmap_read(charger->regmap, MP2629_REG_STATUS, &rval);
+ if (ret)
+ break;
+
+ rval = (rval & MP2629_MASK_CHARGE_TYPE) >> 3;
+ switch (rval) {
+ case 0x00:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ case 0x01:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case 0x10:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int mp2629_charger_battery_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(psy->dev.parent);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return mp2629_set_prop(charger, TERM_CURRENT, val);
+
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return mp2629_set_prop(charger, PRECHARGE, val);
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return mp2629_set_prop(charger, CHARGE_VLIM, val);
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return mp2629_set_prop(charger, CHARGE_ILIM, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mp2629_charger_usb_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(psy->dev.parent);
+ unsigned int rval;
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = regmap_read(charger->regmap, MP2629_REG_STATUS, &rval);
+ if (ret)
+ break;
+
+ val->intval = !!(rval & MP2629_MASK_INPUT_TYPE);
+ break;
+
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ ret = regmap_read(charger->regmap, MP2629_REG_STATUS, &rval);
+ if (ret)
+ break;
+
+ rval = (rval & MP2629_MASK_INPUT_TYPE) >> 5;
+ switch (rval) {
+ case MP2629_SOURCE_TYPE_SDP:
+ val->intval = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case MP2629_SOURCE_TYPE_CDP:
+ val->intval = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ case MP2629_SOURCE_TYPE_DCP:
+ val->intval = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ case MP2629_SOURCE_TYPE_OTG:
+ val->intval = POWER_SUPPLY_USB_TYPE_PD_DRP;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = mp2629_read_adc(charger, MP2629_INPUT_VOLT, val);
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = mp2629_read_adc(charger, MP2629_INPUT_CURRENT, val);
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ ret = mp2629_get_prop(charger, INPUT_VLIM, val);
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = mp2629_get_prop(charger, INPUT_ILIM, val);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int mp2629_charger_usb_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(psy->dev.parent);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return mp2629_set_prop(charger, INPUT_VLIM, val);
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return mp2629_set_prop(charger, INPUT_ILIM, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mp2629_charger_battery_prop_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ return (psp == POWER_SUPPLY_PROP_PRECHARGE_CURRENT) ||
+ (psp == POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT) ||
+ (psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT) ||
+ (psp == POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE);
+}
+
+static int mp2629_charger_usb_prop_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ return (psp == POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT) ||
+ (psp == POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT);
+}
+
+static irqreturn_t mp2629_irq_handler(int irq, void *dev_id)
+{
+ struct mp2629_charger *charger = dev_id;
+ unsigned int rval;
+ int ret;
+
+ mutex_lock(&charger->lock);
+
+ ret = regmap_read(charger->regmap, MP2629_REG_FAULT, &rval);
+ if (ret)
+ goto unlock;
+
+ if (rval) {
+ charger->fault = rval;
+ if (MP2629_FAULT_BATTERY & rval)
+ dev_err(charger->dev, "Battery fault OVP\n");
+ else if (MP2629_FAULT_THERMAL & rval)
+ dev_err(charger->dev, "Thermal shutdown fault\n");
+ else if (MP2629_FAULT_INPUT & rval)
+ dev_err(charger->dev, "no input or input OVP\n");
+ else if (MP2629_FAULT_OTG & rval)
+ dev_err(charger->dev, "VIN overloaded\n");
+
+ goto unlock;
+ }
+
+ ret = regmap_read(charger->regmap, MP2629_REG_STATUS, &rval);
+ if (ret)
+ goto unlock;
+
+ if (rval & MP2629_INPUTSOURCE_CHANGE)
+ power_supply_changed(charger->usb);
+ else if (rval & MP2629_CHARGING_CHANGE)
+ power_supply_changed(charger->battery);
+
+unlock:
+ mutex_unlock(&charger->lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct power_supply_desc mp2629_usb_desc = {
+ .name = "mp2629_usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = mp2629_usb_types,
+ .num_usb_types = ARRAY_SIZE(mp2629_usb_types),
+ .properties = mp2629_charger_usb_props,
+ .num_properties = ARRAY_SIZE(mp2629_charger_usb_props),
+ .get_property = mp2629_charger_usb_get_prop,
+ .set_property = mp2629_charger_usb_set_prop,
+ .property_is_writeable = mp2629_charger_usb_prop_writeable,
+};
+
+static const struct power_supply_desc mp2629_battery_desc = {
+ .name = "mp2629_battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = mp2629_charger_bat_props,
+ .num_properties = ARRAY_SIZE(mp2629_charger_bat_props),
+ .get_property = mp2629_charger_battery_get_prop,
+ .set_property = mp2629_charger_battery_set_prop,
+ .property_is_writeable = mp2629_charger_battery_prop_writeable,
+};
+
+static ssize_t batt_impedance_compensation_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(dev->parent);
+ unsigned int rval;
+ int ret;
+
+ ret = regmap_read(charger->regmap, MP2629_REG_IMPEDANCE_COMP, &rval);
+ if (ret)
+ return ret;
+
+ rval = (rval >> 4) * 10;
+ return sprintf(buf, "%d mohm\n", rval);
+}
+
+static ssize_t batt_impedance_compensation_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct mp2629_charger *charger = dev_get_drvdata(dev->parent);
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val > 140)
+ return -ERANGE;
+
+ /* multiples of 10 mohm so round off */
+ val = val / 10;
+ ret = regmap_update_bits(charger->regmap, MP2629_REG_IMPEDANCE_COMP,
+ MP2629_MASK_IMPEDANCE, val << 4);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(batt_impedance_compensation);
+
+static struct attribute *mp2629_charger_sysfs_attrs[] = {
+ &dev_attr_batt_impedance_compensation.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(mp2629_charger_sysfs);
+
+static void mp2629_charger_disable(void *data)
+{
+ struct mp2629_charger *charger = data;
+
+ regmap_update_bits(charger->regmap, MP2629_REG_CHARGE_CTRL,
+ MP2629_MASK_CHARGE_CTRL, 0);
+}
+
+static int mp2629_charger_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mp2629_data *ddata = dev_get_drvdata(dev->parent);
+ struct mp2629_charger *charger;
+ struct power_supply_config psy_cfg = {};
+ int ret, i, irq;
+
+ charger = devm_kzalloc(dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->regmap = ddata->regmap;
+ charger->dev = dev;
+ platform_set_drvdata(pdev, charger);
+
+ irq = platform_get_irq_optional(to_platform_device(dev->parent), 0);
+ if (irq < 0) {
+ dev_err(dev, "get irq fail: %d\n", irq);
+ return irq;
+ }
+
+ for (i = 0; i < MP2629_MAX_FIELD; i++) {
+ charger->regmap_fields[i] = devm_regmap_field_alloc(dev,
+ charger->regmap, mp2629_reg_fields[i]);
+ if (IS_ERR(charger->regmap_fields[i])) {
+ dev_err(dev, "regmap field alloc fail %d\n", i);
+ return PTR_ERR(charger->regmap_fields[i]);
+ }
+ }
+
+ for (i = 0; i < MP2629_ADC_CHAN_END; i++) {
+ charger->iiochan[i] = devm_iio_channel_get(dev,
+ adc_chan_name[i]);
+ if (IS_ERR(charger->iiochan[i])) {
+ dev_err(dev, "iio chan get %s err\n", adc_chan_name[i]);
+ return PTR_ERR(charger->iiochan[i]);
+ }
+ }
+
+ ret = devm_add_action_or_reset(dev, mp2629_charger_disable, charger);
+ if (ret)
+ return ret;
+
+ charger->usb = devm_power_supply_register(dev, &mp2629_usb_desc, NULL);
+ if (IS_ERR(charger->usb)) {
+ dev_err(dev, "power supply register usb failed\n");
+ return PTR_ERR(charger->usb);
+ }
+
+ psy_cfg.drv_data = charger;
+ psy_cfg.attr_grp = mp2629_charger_sysfs_groups;
+ charger->battery = devm_power_supply_register(dev,
+ &mp2629_battery_desc, &psy_cfg);
+ if (IS_ERR(charger->battery)) {
+ dev_err(dev, "power supply register battery failed\n");
+ return PTR_ERR(charger->battery);
+ }
+
+ ret = regmap_update_bits(charger->regmap, MP2629_REG_CHARGE_CTRL,
+ MP2629_MASK_CHARGE_CTRL, BIT(4));
+ if (ret) {
+ dev_err(dev, "enable charge fail: %d\n", ret);
+ return ret;
+ }
+
+ regmap_update_bits(charger->regmap, MP2629_REG_TIMER_CTRL,
+ MP2629_MASK_WDOG_CTRL, 0);
+
+ mutex_init(&charger->lock);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mp2629_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "mp2629-charger", charger);
+ if (ret) {
+ dev_err(dev, "failed to request gpio IRQ\n");
+ return ret;
+ }
+
+ regmap_update_bits(charger->regmap, MP2629_REG_INTERRUPT,
+ GENMASK(6, 5), BIT(6) | BIT(5));
+
+ return 0;
+}
+
+static const struct of_device_id mp2629_charger_of_match[] = {
+ { .compatible = "mps,mp2629_charger"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2629_charger_of_match);
+
+static struct platform_driver mp2629_charger_driver = {
+ .driver = {
+ .name = "mp2629_charger",
+ .of_match_table = mp2629_charger_of_match,
+ },
+ .probe = mp2629_charger_probe,
+};
+module_platform_driver(mp2629_charger_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>");
+MODULE_DESCRIPTION("MP2629 Charger driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index ad0e9e0edb3f..e0476ec06601 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -88,7 +88,7 @@ static enum power_supply_property olpc_ac_props[] = {
};
static const struct power_supply_desc olpc_ac_desc = {
- .name = "olpc-ac",
+ .name = "olpc_ac",
.type = POWER_SUPPLY_TYPE_MAINS,
.properties = olpc_ac_props,
.num_properties = ARRAY_SIZE(olpc_ac_props),
@@ -605,7 +605,7 @@ static const struct attribute_group *olpc_bat_sysfs_groups[] = {
*********************************************************************/
static struct power_supply_desc olpc_bat_desc = {
- .name = "olpc-battery",
+ .name = "olpc_battery",
.get_property = olpc_bat_get_property,
.use_for_apm = 1,
};
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 1a9a9fae73d3..02b37fe6061c 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -620,10 +620,18 @@ int power_supply_get_battery_info(struct power_supply *psy,
&info->voltage_min_design_uv);
of_property_read_u32(battery_np, "voltage-max-design-microvolt",
&info->voltage_max_design_uv);
+ of_property_read_u32(battery_np, "trickle-charge-current-microamp",
+ &info->tricklecharge_current_ua);
of_property_read_u32(battery_np, "precharge-current-microamp",
&info->precharge_current_ua);
+ of_property_read_u32(battery_np, "precharge-upper-limit-microvolt",
+ &info->precharge_voltage_max_uv);
of_property_read_u32(battery_np, "charge-term-current-microamp",
&info->charge_term_current_ua);
+ of_property_read_u32(battery_np, "re-charge-voltage-microvolt",
+ &info->charge_restart_voltage_uv);
+ of_property_read_u32(battery_np, "over-voltage-threshold-microvolt",
+ &info->overvoltage_limit_uv);
of_property_read_u32(battery_np, "constant-charge-current-max-microamp",
&info->constant_charge_current_max_ua);
of_property_read_u32(battery_np, "constant-charge-voltage-max-microvolt",
diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
index 75cf861ba492..7fe4b6b6ddc8 100644
--- a/drivers/power/supply/power_supply_hwmon.c
+++ b/drivers/power/supply/power_supply_hwmon.c
@@ -13,6 +13,11 @@ struct power_supply_hwmon {
unsigned long *props;
};
+static const char *const ps_temp_label[] = {
+ "temp",
+ "ambient temp",
+};
+
static int power_supply_hwmon_in_to_property(u32 attr)
{
switch (attr) {
@@ -98,6 +103,39 @@ static bool power_supply_hwmon_is_a_label(enum hwmon_sensor_types type,
return type == hwmon_temp && attr == hwmon_temp_label;
}
+struct hwmon_type_attr_list {
+ const u32 *attrs;
+ size_t n_attrs;
+};
+
+static const u32 ps_temp_attrs[] = {
+ hwmon_temp_input,
+ hwmon_temp_min, hwmon_temp_max,
+ hwmon_temp_min_alarm, hwmon_temp_max_alarm,
+};
+
+static const struct hwmon_type_attr_list ps_type_attrs[hwmon_max] = {
+ [hwmon_temp] = { ps_temp_attrs, ARRAY_SIZE(ps_temp_attrs) },
+};
+
+static bool power_supply_hwmon_has_input(
+ const struct power_supply_hwmon *psyhw,
+ enum hwmon_sensor_types type, int channel)
+{
+ const struct hwmon_type_attr_list *attr_list = &ps_type_attrs[type];
+ size_t i;
+
+ for (i = 0; i < attr_list->n_attrs; ++i) {
+ int prop = power_supply_hwmon_to_property(type,
+ attr_list->attrs[i], channel);
+
+ if (prop >= 0 && test_bit(prop, psyhw->props))
+ return true;
+ }
+
+ return false;
+}
+
static bool power_supply_hwmon_is_writable(enum hwmon_sensor_types type,
u32 attr)
{
@@ -124,9 +162,12 @@ static umode_t power_supply_hwmon_is_visible(const void *data,
const struct power_supply_hwmon *psyhw = data;
int prop;
-
- if (power_supply_hwmon_is_a_label(type, attr))
- return 0444;
+ if (power_supply_hwmon_is_a_label(type, attr)) {
+ if (power_supply_hwmon_has_input(psyhw, type, channel))
+ return 0444;
+ else
+ return 0;
+ }
prop = power_supply_hwmon_to_property(type, attr, channel);
if (prop < 0 || !test_bit(prop, psyhw->props))
@@ -144,7 +185,20 @@ static int power_supply_hwmon_read_string(struct device *dev,
u32 attr, int channel,
const char **str)
{
- *str = channel ? "temp" : "temp ambient";
+ switch (type) {
+ case hwmon_temp:
+ *str = ps_temp_label[channel];
+ break;
+ default:
+ /* unreachable, but see:
+ * gcc bug #51513 [1] and clang bug #978 [2]
+ *
+ * [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51513
+ * [2] https://github.com/ClangBuiltLinux/linux/issues/978
+ */
+ break;
+ }
+
return 0;
}
@@ -304,7 +358,7 @@ int power_supply_add_hwmon_sysfs(struct power_supply *psy)
goto error;
}
- ret = devm_add_action(dev, power_supply_hwmon_bitmap_free,
+ ret = devm_add_action_or_reset(dev, power_supply_hwmon_bitmap_free,
psyhw->props);
if (ret)
goto error;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index f37ad4eae60b..bc79560229b5 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -18,68 +18,211 @@
#include "power_supply.h"
-/*
- * This is because the name "current" breaks the device attr macro.
- * The "current" word resolves to "(get_current())" so instead of
- * "current" "(get_current())" appears in the sysfs.
- *
- * The source of this definition is the device.h which calls __ATTR
- * macro in sysfs.h which calls the __stringify macro.
- *
- * Only modification that the name is not tried to be resolved
- * (as a macro let's say).
- */
+#define MAX_PROP_NAME_LEN 30
+
+struct power_supply_attr {
+ const char *prop_name;
+ char attr_name[MAX_PROP_NAME_LEN + 1];
+ struct device_attribute dev_attr;
+ const char * const *text_values;
+ int text_values_len;
+};
-#define POWER_SUPPLY_ATTR(_name) \
-{ \
- .attr = { .name = #_name }, \
- .show = power_supply_show_property, \
- .store = power_supply_store_property, \
+#define _POWER_SUPPLY_ATTR(_name, _text, _len) \
+[POWER_SUPPLY_PROP_ ## _name] = \
+{ \
+ .prop_name = #_name, \
+ .attr_name = #_name "\0", \
+ .text_values = _text, \
+ .text_values_len = _len, \
}
-static struct device_attribute power_supply_attrs[];
+#define POWER_SUPPLY_ATTR(_name) _POWER_SUPPLY_ATTR(_name, NULL, 0)
+#define _POWER_SUPPLY_ENUM_ATTR(_name, _text) \
+ _POWER_SUPPLY_ATTR(_name, _text, ARRAY_SIZE(_text))
+#define POWER_SUPPLY_ENUM_ATTR(_name) \
+ _POWER_SUPPLY_ENUM_ATTR(_name, POWER_SUPPLY_ ## _name ## _TEXT)
+
+static const char * const POWER_SUPPLY_TYPE_TEXT[] = {
+ [POWER_SUPPLY_TYPE_UNKNOWN] = "Unknown",
+ [POWER_SUPPLY_TYPE_BATTERY] = "Battery",
+ [POWER_SUPPLY_TYPE_UPS] = "UPS",
+ [POWER_SUPPLY_TYPE_MAINS] = "Mains",
+ [POWER_SUPPLY_TYPE_USB] = "USB",
+ [POWER_SUPPLY_TYPE_USB_DCP] = "USB_DCP",
+ [POWER_SUPPLY_TYPE_USB_CDP] = "USB_CDP",
+ [POWER_SUPPLY_TYPE_USB_ACA] = "USB_ACA",
+ [POWER_SUPPLY_TYPE_USB_TYPE_C] = "USB_C",
+ [POWER_SUPPLY_TYPE_USB_PD] = "USB_PD",
+ [POWER_SUPPLY_TYPE_USB_PD_DRP] = "USB_PD_DRP",
+ [POWER_SUPPLY_TYPE_APPLE_BRICK_ID] = "BrickID",
+};
-static const char * const power_supply_type_text[] = {
- "Unknown", "Battery", "UPS", "Mains", "USB",
- "USB_DCP", "USB_CDP", "USB_ACA", "USB_C",
- "USB_PD", "USB_PD_DRP", "BrickID"
+static const char * const POWER_SUPPLY_USB_TYPE_TEXT[] = {
+ [POWER_SUPPLY_USB_TYPE_UNKNOWN] = "Unknown",
+ [POWER_SUPPLY_USB_TYPE_SDP] = "SDP",
+ [POWER_SUPPLY_USB_TYPE_DCP] = "DCP",
+ [POWER_SUPPLY_USB_TYPE_CDP] = "CDP",
+ [POWER_SUPPLY_USB_TYPE_ACA] = "ACA",
+ [POWER_SUPPLY_USB_TYPE_C] = "C",
+ [POWER_SUPPLY_USB_TYPE_PD] = "PD",
+ [POWER_SUPPLY_USB_TYPE_PD_DRP] = "PD_DRP",
+ [POWER_SUPPLY_USB_TYPE_PD_PPS] = "PD_PPS",
+ [POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID] = "BrickID",
};
-static const char * const power_supply_usb_type_text[] = {
- "Unknown", "SDP", "DCP", "CDP", "ACA", "C",
- "PD", "PD_DRP", "PD_PPS", "BrickID"
+static const char * const POWER_SUPPLY_STATUS_TEXT[] = {
+ [POWER_SUPPLY_STATUS_UNKNOWN] = "Unknown",
+ [POWER_SUPPLY_STATUS_CHARGING] = "Charging",
+ [POWER_SUPPLY_STATUS_DISCHARGING] = "Discharging",
+ [POWER_SUPPLY_STATUS_NOT_CHARGING] = "Not charging",
+ [POWER_SUPPLY_STATUS_FULL] = "Full",
};
-static const char * const power_supply_status_text[] = {
- "Unknown", "Charging", "Discharging", "Not charging", "Full"
+static const char * const POWER_SUPPLY_CHARGE_TYPE_TEXT[] = {
+ [POWER_SUPPLY_CHARGE_TYPE_UNKNOWN] = "Unknown",
+ [POWER_SUPPLY_CHARGE_TYPE_NONE] = "N/A",
+ [POWER_SUPPLY_CHARGE_TYPE_TRICKLE] = "Trickle",
+ [POWER_SUPPLY_CHARGE_TYPE_FAST] = "Fast",
+ [POWER_SUPPLY_CHARGE_TYPE_STANDARD] = "Standard",
+ [POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE] = "Adaptive",
+ [POWER_SUPPLY_CHARGE_TYPE_CUSTOM] = "Custom",
};
-static const char * const power_supply_charge_type_text[] = {
- "Unknown", "N/A", "Trickle", "Fast", "Standard", "Adaptive", "Custom"
+static const char * const POWER_SUPPLY_HEALTH_TEXT[] = {
+ [POWER_SUPPLY_HEALTH_UNKNOWN] = "Unknown",
+ [POWER_SUPPLY_HEALTH_GOOD] = "Good",
+ [POWER_SUPPLY_HEALTH_OVERHEAT] = "Overheat",
+ [POWER_SUPPLY_HEALTH_DEAD] = "Dead",
+ [POWER_SUPPLY_HEALTH_OVERVOLTAGE] = "Over voltage",
+ [POWER_SUPPLY_HEALTH_UNSPEC_FAILURE] = "Unspecified failure",
+ [POWER_SUPPLY_HEALTH_COLD] = "Cold",
+ [POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE] = "Watchdog timer expire",
+ [POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE] = "Safety timer expire",
+ [POWER_SUPPLY_HEALTH_OVERCURRENT] = "Over current",
+ [POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED] = "Calibration required",
};
-static const char * const power_supply_health_text[] = {
- "Unknown", "Good", "Overheat", "Dead", "Over voltage",
- "Unspecified failure", "Cold", "Watchdog timer expire",
- "Safety timer expire", "Over current"
+static const char * const POWER_SUPPLY_TECHNOLOGY_TEXT[] = {
+ [POWER_SUPPLY_TECHNOLOGY_UNKNOWN] = "Unknown",
+ [POWER_SUPPLY_TECHNOLOGY_NiMH] = "NiMH",
+ [POWER_SUPPLY_TECHNOLOGY_LION] = "Li-ion",
+ [POWER_SUPPLY_TECHNOLOGY_LIPO] = "Li-poly",
+ [POWER_SUPPLY_TECHNOLOGY_LiFe] = "LiFe",
+ [POWER_SUPPLY_TECHNOLOGY_NiCd] = "NiCd",
+ [POWER_SUPPLY_TECHNOLOGY_LiMn] = "LiMn",
};
-static const char * const power_supply_technology_text[] = {
- "Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
- "LiMn"
+static const char * const POWER_SUPPLY_CAPACITY_LEVEL_TEXT[] = {
+ [POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN] = "Unknown",
+ [POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL] = "Critical",
+ [POWER_SUPPLY_CAPACITY_LEVEL_LOW] = "Low",
+ [POWER_SUPPLY_CAPACITY_LEVEL_NORMAL] = "Normal",
+ [POWER_SUPPLY_CAPACITY_LEVEL_HIGH] = "High",
+ [POWER_SUPPLY_CAPACITY_LEVEL_FULL] = "Full",
};
-static const char * const power_supply_capacity_level_text[] = {
- "Unknown", "Critical", "Low", "Normal", "High", "Full"
+static const char * const POWER_SUPPLY_SCOPE_TEXT[] = {
+ [POWER_SUPPLY_SCOPE_UNKNOWN] = "Unknown",
+ [POWER_SUPPLY_SCOPE_SYSTEM] = "System",
+ [POWER_SUPPLY_SCOPE_DEVICE] = "Device",
};
-static const char * const power_supply_scope_text[] = {
- "Unknown", "System", "Device"
+static struct power_supply_attr power_supply_attrs[] = {
+ /* Properties of type `int' */
+ POWER_SUPPLY_ENUM_ATTR(STATUS),
+ POWER_SUPPLY_ENUM_ATTR(CHARGE_TYPE),
+ POWER_SUPPLY_ENUM_ATTR(HEALTH),
+ POWER_SUPPLY_ATTR(PRESENT),
+ POWER_SUPPLY_ATTR(ONLINE),
+ POWER_SUPPLY_ATTR(AUTHENTIC),
+ POWER_SUPPLY_ENUM_ATTR(TECHNOLOGY),
+ POWER_SUPPLY_ATTR(CYCLE_COUNT),
+ POWER_SUPPLY_ATTR(VOLTAGE_MAX),
+ POWER_SUPPLY_ATTR(VOLTAGE_MIN),
+ POWER_SUPPLY_ATTR(VOLTAGE_MAX_DESIGN),
+ POWER_SUPPLY_ATTR(VOLTAGE_MIN_DESIGN),
+ POWER_SUPPLY_ATTR(VOLTAGE_NOW),
+ POWER_SUPPLY_ATTR(VOLTAGE_AVG),
+ POWER_SUPPLY_ATTR(VOLTAGE_OCV),
+ POWER_SUPPLY_ATTR(VOLTAGE_BOOT),
+ POWER_SUPPLY_ATTR(CURRENT_MAX),
+ POWER_SUPPLY_ATTR(CURRENT_NOW),
+ POWER_SUPPLY_ATTR(CURRENT_AVG),
+ POWER_SUPPLY_ATTR(CURRENT_BOOT),
+ POWER_SUPPLY_ATTR(POWER_NOW),
+ POWER_SUPPLY_ATTR(POWER_AVG),
+ POWER_SUPPLY_ATTR(CHARGE_FULL_DESIGN),
+ POWER_SUPPLY_ATTR(CHARGE_EMPTY_DESIGN),
+ POWER_SUPPLY_ATTR(CHARGE_FULL),
+ POWER_SUPPLY_ATTR(CHARGE_EMPTY),
+ POWER_SUPPLY_ATTR(CHARGE_NOW),
+ POWER_SUPPLY_ATTR(CHARGE_AVG),
+ POWER_SUPPLY_ATTR(CHARGE_COUNTER),
+ POWER_SUPPLY_ATTR(CONSTANT_CHARGE_CURRENT),
+ POWER_SUPPLY_ATTR(CONSTANT_CHARGE_CURRENT_MAX),
+ POWER_SUPPLY_ATTR(CONSTANT_CHARGE_VOLTAGE),
+ POWER_SUPPLY_ATTR(CONSTANT_CHARGE_VOLTAGE_MAX),
+ POWER_SUPPLY_ATTR(CHARGE_CONTROL_LIMIT),
+ POWER_SUPPLY_ATTR(CHARGE_CONTROL_LIMIT_MAX),
+ POWER_SUPPLY_ATTR(CHARGE_CONTROL_START_THRESHOLD),
+ POWER_SUPPLY_ATTR(CHARGE_CONTROL_END_THRESHOLD),
+ POWER_SUPPLY_ATTR(INPUT_CURRENT_LIMIT),
+ POWER_SUPPLY_ATTR(INPUT_VOLTAGE_LIMIT),
+ POWER_SUPPLY_ATTR(INPUT_POWER_LIMIT),
+ POWER_SUPPLY_ATTR(ENERGY_FULL_DESIGN),
+ POWER_SUPPLY_ATTR(ENERGY_EMPTY_DESIGN),
+ POWER_SUPPLY_ATTR(ENERGY_FULL),
+ POWER_SUPPLY_ATTR(ENERGY_EMPTY),
+ POWER_SUPPLY_ATTR(ENERGY_NOW),
+ POWER_SUPPLY_ATTR(ENERGY_AVG),
+ POWER_SUPPLY_ATTR(CAPACITY),
+ POWER_SUPPLY_ATTR(CAPACITY_ALERT_MIN),
+ POWER_SUPPLY_ATTR(CAPACITY_ALERT_MAX),
+ POWER_SUPPLY_ATTR(CAPACITY_ERROR_MARGIN),
+ POWER_SUPPLY_ENUM_ATTR(CAPACITY_LEVEL),
+ POWER_SUPPLY_ATTR(TEMP),
+ POWER_SUPPLY_ATTR(TEMP_MAX),
+ POWER_SUPPLY_ATTR(TEMP_MIN),
+ POWER_SUPPLY_ATTR(TEMP_ALERT_MIN),
+ POWER_SUPPLY_ATTR(TEMP_ALERT_MAX),
+ POWER_SUPPLY_ATTR(TEMP_AMBIENT),
+ POWER_SUPPLY_ATTR(TEMP_AMBIENT_ALERT_MIN),
+ POWER_SUPPLY_ATTR(TEMP_AMBIENT_ALERT_MAX),
+ POWER_SUPPLY_ATTR(TIME_TO_EMPTY_NOW),
+ POWER_SUPPLY_ATTR(TIME_TO_EMPTY_AVG),
+ POWER_SUPPLY_ATTR(TIME_TO_FULL_NOW),
+ POWER_SUPPLY_ATTR(TIME_TO_FULL_AVG),
+ POWER_SUPPLY_ENUM_ATTR(TYPE),
+ POWER_SUPPLY_ATTR(USB_TYPE),
+ POWER_SUPPLY_ENUM_ATTR(SCOPE),
+ POWER_SUPPLY_ATTR(PRECHARGE_CURRENT),
+ POWER_SUPPLY_ATTR(CHARGE_TERM_CURRENT),
+ POWER_SUPPLY_ATTR(CALIBRATE),
+ POWER_SUPPLY_ATTR(MANUFACTURE_YEAR),
+ POWER_SUPPLY_ATTR(MANUFACTURE_MONTH),
+ POWER_SUPPLY_ATTR(MANUFACTURE_DAY),
+ /* Properties of type `const char *' */
+ POWER_SUPPLY_ATTR(MODEL_NAME),
+ POWER_SUPPLY_ATTR(MANUFACTURER),
+ POWER_SUPPLY_ATTR(SERIAL_NUMBER),
};
+static struct attribute *
+__power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1];
+
+static struct power_supply_attr *to_ps_attr(struct device_attribute *attr)
+{
+ return container_of(attr, struct power_supply_attr, dev_attr);
+}
+
+static enum power_supply_property dev_attr_psp(struct device_attribute *attr)
+{
+ return to_ps_attr(attr) - power_supply_attrs;
+}
+
static ssize_t power_supply_show_usb_type(struct device *dev,
- enum power_supply_usb_type *usb_types,
- ssize_t num_usb_types,
+ const struct power_supply_desc *desc,
union power_supply_propval *value,
char *buf)
{
@@ -88,16 +231,16 @@ static ssize_t power_supply_show_usb_type(struct device *dev,
bool match = false;
int i;
- for (i = 0; i < num_usb_types; ++i) {
- usb_type = usb_types[i];
+ for (i = 0; i < desc->num_usb_types; ++i) {
+ usb_type = desc->usb_types[i];
if (value->intval == usb_type) {
count += sprintf(buf + count, "[%s] ",
- power_supply_usb_type_text[usb_type]);
+ POWER_SUPPLY_USB_TYPE_TEXT[usb_type]);
match = true;
} else {
count += sprintf(buf + count, "%s ",
- power_supply_usb_type_text[usb_type]);
+ POWER_SUPPLY_USB_TYPE_TEXT[usb_type]);
}
}
@@ -117,7 +260,8 @@ static ssize_t power_supply_show_property(struct device *dev,
char *buf) {
ssize_t ret;
struct power_supply *psy = dev_get_drvdata(dev);
- enum power_supply_property psp = attr - power_supply_attrs;
+ struct power_supply_attr *ps_attr = to_ps_attr(attr);
+ enum power_supply_property psp = dev_attr_psp(attr);
union power_supply_propval value;
if (psp == POWER_SUPPLY_PROP_TYPE) {
@@ -137,39 +281,15 @@ static ssize_t power_supply_show_property(struct device *dev,
}
}
+ if (ps_attr->text_values_len > 0 &&
+ value.intval < ps_attr->text_values_len && value.intval >= 0) {
+ return sprintf(buf, "%s\n", ps_attr->text_values[value.intval]);
+ }
+
switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- ret = sprintf(buf, "%s\n",
- power_supply_status_text[value.intval]);
- break;
- case POWER_SUPPLY_PROP_CHARGE_TYPE:
- ret = sprintf(buf, "%s\n",
- power_supply_charge_type_text[value.intval]);
- break;
- case POWER_SUPPLY_PROP_HEALTH:
- ret = sprintf(buf, "%s\n",
- power_supply_health_text[value.intval]);
- break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- ret = sprintf(buf, "%s\n",
- power_supply_technology_text[value.intval]);
- break;
- case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
- ret = sprintf(buf, "%s\n",
- power_supply_capacity_level_text[value.intval]);
- break;
- case POWER_SUPPLY_PROP_TYPE:
- ret = sprintf(buf, "%s\n",
- power_supply_type_text[value.intval]);
- break;
case POWER_SUPPLY_PROP_USB_TYPE:
- ret = power_supply_show_usb_type(dev, psy->desc->usb_types,
- psy->desc->num_usb_types,
- &value, buf);
- break;
- case POWER_SUPPLY_PROP_SCOPE:
- ret = sprintf(buf, "%s\n",
- power_supply_scope_text[value.intval]);
+ ret = power_supply_show_usb_type(dev, psy->desc,
+ &value, buf);
break;
case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = sprintf(buf, "%s\n", value.strval);
@@ -186,30 +306,14 @@ static ssize_t power_supply_store_property(struct device *dev,
const char *buf, size_t count) {
ssize_t ret;
struct power_supply *psy = dev_get_drvdata(dev);
- enum power_supply_property psp = attr - power_supply_attrs;
+ struct power_supply_attr *ps_attr = to_ps_attr(attr);
+ enum power_supply_property psp = dev_attr_psp(attr);
union power_supply_propval value;
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- ret = sysfs_match_string(power_supply_status_text, buf);
- break;
- case POWER_SUPPLY_PROP_CHARGE_TYPE:
- ret = sysfs_match_string(power_supply_charge_type_text, buf);
- break;
- case POWER_SUPPLY_PROP_HEALTH:
- ret = sysfs_match_string(power_supply_health_text, buf);
- break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- ret = sysfs_match_string(power_supply_technology_text, buf);
- break;
- case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
- ret = sysfs_match_string(power_supply_capacity_level_text, buf);
- break;
- case POWER_SUPPLY_PROP_SCOPE:
- ret = sysfs_match_string(power_supply_scope_text, buf);
- break;
- default:
- ret = -EINVAL;
+ ret = -EINVAL;
+ if (ps_attr->text_values_len > 0) {
+ ret = __sysfs_match_string(ps_attr->text_values,
+ ps_attr->text_values_len, buf);
}
/*
@@ -235,86 +339,6 @@ static ssize_t power_supply_store_property(struct device *dev,
return count;
}
-/* Must be in the same order as POWER_SUPPLY_PROP_* */
-static struct device_attribute power_supply_attrs[] = {
- /* Properties of type `int' */
- POWER_SUPPLY_ATTR(status),
- POWER_SUPPLY_ATTR(charge_type),
- POWER_SUPPLY_ATTR(health),
- POWER_SUPPLY_ATTR(present),
- POWER_SUPPLY_ATTR(online),
- POWER_SUPPLY_ATTR(authentic),
- POWER_SUPPLY_ATTR(technology),
- POWER_SUPPLY_ATTR(cycle_count),
- POWER_SUPPLY_ATTR(voltage_max),
- POWER_SUPPLY_ATTR(voltage_min),
- POWER_SUPPLY_ATTR(voltage_max_design),
- POWER_SUPPLY_ATTR(voltage_min_design),
- POWER_SUPPLY_ATTR(voltage_now),
- POWER_SUPPLY_ATTR(voltage_avg),
- POWER_SUPPLY_ATTR(voltage_ocv),
- POWER_SUPPLY_ATTR(voltage_boot),
- POWER_SUPPLY_ATTR(current_max),
- POWER_SUPPLY_ATTR(current_now),
- POWER_SUPPLY_ATTR(current_avg),
- POWER_SUPPLY_ATTR(current_boot),
- POWER_SUPPLY_ATTR(power_now),
- POWER_SUPPLY_ATTR(power_avg),
- POWER_SUPPLY_ATTR(charge_full_design),
- POWER_SUPPLY_ATTR(charge_empty_design),
- POWER_SUPPLY_ATTR(charge_full),
- POWER_SUPPLY_ATTR(charge_empty),
- POWER_SUPPLY_ATTR(charge_now),
- POWER_SUPPLY_ATTR(charge_avg),
- POWER_SUPPLY_ATTR(charge_counter),
- POWER_SUPPLY_ATTR(constant_charge_current),
- POWER_SUPPLY_ATTR(constant_charge_current_max),
- POWER_SUPPLY_ATTR(constant_charge_voltage),
- POWER_SUPPLY_ATTR(constant_charge_voltage_max),
- POWER_SUPPLY_ATTR(charge_control_limit),
- POWER_SUPPLY_ATTR(charge_control_limit_max),
- POWER_SUPPLY_ATTR(charge_control_start_threshold),
- POWER_SUPPLY_ATTR(charge_control_end_threshold),
- POWER_SUPPLY_ATTR(input_current_limit),
- POWER_SUPPLY_ATTR(input_voltage_limit),
- POWER_SUPPLY_ATTR(input_power_limit),
- POWER_SUPPLY_ATTR(energy_full_design),
- POWER_SUPPLY_ATTR(energy_empty_design),
- POWER_SUPPLY_ATTR(energy_full),
- POWER_SUPPLY_ATTR(energy_empty),
- POWER_SUPPLY_ATTR(energy_now),
- POWER_SUPPLY_ATTR(energy_avg),
- POWER_SUPPLY_ATTR(capacity),
- POWER_SUPPLY_ATTR(capacity_alert_min),
- POWER_SUPPLY_ATTR(capacity_alert_max),
- POWER_SUPPLY_ATTR(capacity_level),
- POWER_SUPPLY_ATTR(temp),
- POWER_SUPPLY_ATTR(temp_max),
- POWER_SUPPLY_ATTR(temp_min),
- POWER_SUPPLY_ATTR(temp_alert_min),
- POWER_SUPPLY_ATTR(temp_alert_max),
- POWER_SUPPLY_ATTR(temp_ambient),
- POWER_SUPPLY_ATTR(temp_ambient_alert_min),
- POWER_SUPPLY_ATTR(temp_ambient_alert_max),
- POWER_SUPPLY_ATTR(time_to_empty_now),
- POWER_SUPPLY_ATTR(time_to_empty_avg),
- POWER_SUPPLY_ATTR(time_to_full_now),
- POWER_SUPPLY_ATTR(time_to_full_avg),
- POWER_SUPPLY_ATTR(type),
- POWER_SUPPLY_ATTR(usb_type),
- POWER_SUPPLY_ATTR(scope),
- POWER_SUPPLY_ATTR(precharge_current),
- POWER_SUPPLY_ATTR(charge_term_current),
- POWER_SUPPLY_ATTR(calibrate),
- /* Properties of type `const char *' */
- POWER_SUPPLY_ATTR(model_name),
- POWER_SUPPLY_ATTR(manufacturer),
- POWER_SUPPLY_ATTR(serial_number),
-};
-
-static struct attribute *
-__power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1];
-
static umode_t power_supply_attr_is_visible(struct kobject *kobj,
struct attribute *attr,
int attrno)
@@ -324,6 +348,9 @@ static umode_t power_supply_attr_is_visible(struct kobject *kobj,
umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
int i;
+ if (!power_supply_attrs[attrno].prop_name)
+ return 0;
+
if (attrno == POWER_SUPPLY_PROP_TYPE)
return mode;
@@ -352,31 +379,69 @@ static const struct attribute_group *power_supply_attr_groups[] = {
NULL,
};
+static void str_to_lower(char *str)
+{
+ while (*str) {
+ *str = tolower(*str);
+ str++;
+ }
+}
+
void power_supply_init_attrs(struct device_type *dev_type)
{
int i;
dev_type->groups = power_supply_attr_groups;
- for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
- __power_supply_attrs[i] = &power_supply_attrs[i].attr;
-}
+ for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++) {
+ struct device_attribute *attr;
-static char *kstruprdup(const char *str, gfp_t gfp)
-{
- char *ret, *ustr;
+ if (!power_supply_attrs[i].prop_name) {
+ pr_warn("%s: Property %d skipped because is is missing from power_supply_attrs\n",
+ __func__, i);
+ sprintf(power_supply_attrs[i].attr_name, "_err_%d", i);
+ } else {
+ str_to_lower(power_supply_attrs[i].attr_name);
+ }
- ustr = ret = kmalloc(strlen(str) + 1, gfp);
+ attr = &power_supply_attrs[i].dev_attr;
- if (!ret)
- return NULL;
+ attr->attr.name = power_supply_attrs[i].attr_name;
+ attr->show = power_supply_show_property;
+ attr->store = power_supply_store_property;
+ __power_supply_attrs[i] = &attr->attr;
+ }
+}
+
+static int add_prop_uevent(struct device *dev, struct kobj_uevent_env *env,
+ enum power_supply_property prop, char *prop_buf)
+{
+ int ret = 0;
+ struct power_supply_attr *pwr_attr;
+ struct device_attribute *dev_attr;
+ char *line;
+
+ pwr_attr = &power_supply_attrs[prop];
+ dev_attr = &pwr_attr->dev_attr;
+
+ ret = power_supply_show_property(dev, dev_attr, prop_buf);
+ if (ret == -ENODEV || ret == -ENODATA) {
+ /*
+ * When a battery is absent, we expect -ENODEV. Don't abort;
+ * send the uevent with at least the the PRESENT=0 property
+ */
+ return 0;
+ }
- while (*str)
- *ustr++ = toupper(*str++);
+ if (ret < 0)
+ return ret;
- *ustr = 0;
+ line = strchr(prop_buf, '\n');
+ if (line)
+ *line = 0;
- return ret;
+ return add_uevent_var(env, "POWER_SUPPLY_%s=%s",
+ pwr_attr->prop_name, prop_buf);
}
int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -384,7 +449,6 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
struct power_supply *psy = dev_get_drvdata(dev);
int ret = 0, j;
char *prop_buf;
- char *attrname;
if (!psy || !psy->desc) {
dev_dbg(dev, "No power supply yet\n");
@@ -399,35 +463,13 @@ int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env)
if (!prop_buf)
return -ENOMEM;
- for (j = 0; j < psy->desc->num_properties; j++) {
- struct device_attribute *attr;
- char *line;
-
- attr = &power_supply_attrs[psy->desc->properties[j]];
-
- ret = power_supply_show_property(dev, attr, prop_buf);
- if (ret == -ENODEV || ret == -ENODATA) {
- /* When a battery is absent, we expect -ENODEV. Don't abort;
- send the uevent with at least the the PRESENT=0 property */
- ret = 0;
- continue;
- }
-
- if (ret < 0)
- goto out;
-
- line = strchr(prop_buf, '\n');
- if (line)
- *line = 0;
-
- attrname = kstruprdup(attr->attr.name, GFP_KERNEL);
- if (!attrname) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = add_prop_uevent(dev, env, POWER_SUPPLY_PROP_TYPE, prop_buf);
+ if (ret)
+ goto out;
- ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf);
- kfree(attrname);
+ for (j = 0; j < psy->desc->num_properties; j++) {
+ ret = add_prop_uevent(dev, env, psy->desc->properties[j],
+ prop_buf);
if (ret)
goto out;
}
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index 6acd242eed48..83b9924033bd 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/of_device.h>
#include <linux/power/sbs-battery.h>
#include <linux/power_supply.h>
@@ -23,9 +23,12 @@
enum {
REG_MANUFACTURER_DATA,
+ REG_BATTERY_MODE,
REG_TEMPERATURE,
REG_VOLTAGE,
- REG_CURRENT,
+ REG_CURRENT_NOW,
+ REG_CURRENT_AVG,
+ REG_MAX_ERR,
REG_CAPACITY,
REG_TIME_TO_EMPTY,
REG_TIME_TO_FULL,
@@ -41,10 +44,15 @@ enum {
REG_DESIGN_CAPACITY_CHARGE,
REG_DESIGN_VOLTAGE_MIN,
REG_DESIGN_VOLTAGE_MAX,
+ REG_CHEMISTRY,
REG_MANUFACTURER,
REG_MODEL_NAME,
+ REG_CHARGE_CURRENT,
+ REG_CHARGE_VOLTAGE,
};
+#define REG_ADDR_MANUFACTURE_DATE 0x1B
+
/* Battery Mode defines */
#define BATTERY_MODE_OFFSET 0x03
#define BATTERY_MODE_CAPACITY_MASK BIT(15)
@@ -52,6 +60,7 @@ enum sbs_capacity_mode {
CAPACITY_MODE_AMPS = 0,
CAPACITY_MODE_WATTS = BATTERY_MODE_CAPACITY_MASK
};
+#define BATTERY_MODE_CHARGER_MASK (1<<14)
/* manufacturer access defines */
#define MANUFACTURER_ACCESS_STATUS 0x0006
@@ -79,12 +88,18 @@ static const struct chip_data {
} sbs_data[] = {
[REG_MANUFACTURER_DATA] =
SBS_DATA(POWER_SUPPLY_PROP_PRESENT, 0x00, 0, 65535),
+ [REG_BATTERY_MODE] =
+ SBS_DATA(-1, 0x03, 0, 65535),
[REG_TEMPERATURE] =
SBS_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535),
[REG_VOLTAGE] =
SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000),
- [REG_CURRENT] =
+ [REG_CURRENT_NOW] =
SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
+ [REG_CURRENT_AVG] =
+ SBS_DATA(POWER_SUPPLY_PROP_CURRENT_AVG, 0x0B, -32768, 32767),
+ [REG_MAX_ERR] =
+ SBS_DATA(POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, 0x0c, 0, 100),
[REG_CAPACITY] =
SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0D, 0, 100),
[REG_REMAINING_CAPACITY] =
@@ -99,6 +114,10 @@ static const struct chip_data {
SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0, 65535),
[REG_TIME_TO_FULL] =
SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0, 65535),
+ [REG_CHARGE_CURRENT] =
+ SBS_DATA(POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, 0x14, 0, 65535),
+ [REG_CHARGE_VOLTAGE] =
+ SBS_DATA(POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, 0x15, 0, 65535),
[REG_STATUS] =
SBS_DATA(POWER_SUPPLY_PROP_STATUS, 0x16, 0, 65535),
[REG_CAPACITY_LEVEL] =
@@ -119,10 +138,12 @@ static const struct chip_data {
[REG_MANUFACTURER] =
SBS_DATA(POWER_SUPPLY_PROP_MANUFACTURER, 0x20, 0, 65535),
[REG_MODEL_NAME] =
- SBS_DATA(POWER_SUPPLY_PROP_MODEL_NAME, 0x21, 0, 65535)
+ SBS_DATA(POWER_SUPPLY_PROP_MODEL_NAME, 0x21, 0, 65535),
+ [REG_CHEMISTRY] =
+ SBS_DATA(POWER_SUPPLY_PROP_TECHNOLOGY, 0x22, 0, 65535)
};
-static enum power_supply_property sbs_properties[] = {
+static const enum power_supply_property sbs_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_HEALTH,
@@ -131,7 +152,9 @@ static enum power_supply_property sbs_properties[] = {
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
@@ -144,13 +167,18 @@ static enum power_supply_property sbs_properties[] = {
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+ POWER_SUPPLY_PROP_MANUFACTURE_DAY,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_MODEL_NAME
};
-/* Supports special manufacturer commands from TI BQ20Z75 IC. */
-#define SBS_FLAGS_TI_BQ20Z75 BIT(0)
+/* Supports special manufacturer commands from TI BQ20Z65 and BQ20Z75 IC. */
+#define SBS_FLAGS_TI_BQ20ZX5 BIT(0)
struct sbs_info {
struct i2c_client *client;
@@ -158,6 +186,7 @@ struct sbs_info {
bool is_present;
struct gpio_desc *gpio_detect;
bool enable_detection;
+ bool charger_broadcasts;
int last_state;
int poll_time;
u32 i2c_retry_count;
@@ -169,8 +198,48 @@ struct sbs_info {
static char model_name[I2C_SMBUS_BLOCK_MAX + 1];
static char manufacturer[I2C_SMBUS_BLOCK_MAX + 1];
+static char chemistry[I2C_SMBUS_BLOCK_MAX + 1];
static bool force_load;
+static int sbs_read_word_data(struct i2c_client *client, u8 address);
+static int sbs_write_word_data(struct i2c_client *client, u8 address, u16 value);
+
+static void sbs_disable_charger_broadcasts(struct sbs_info *chip)
+{
+ int val = sbs_read_word_data(chip->client, BATTERY_MODE_OFFSET);
+ if (val < 0)
+ goto exit;
+
+ val |= BATTERY_MODE_CHARGER_MASK;
+
+ val = sbs_write_word_data(chip->client, BATTERY_MODE_OFFSET, val);
+
+exit:
+ if (val < 0)
+ dev_err(&chip->client->dev,
+ "Failed to disable charger broadcasting: %d\n", val);
+ else
+ dev_dbg(&chip->client->dev, "%s\n", __func__);
+}
+
+static int sbs_update_presence(struct sbs_info *chip, bool is_present)
+{
+ if (chip->is_present == is_present)
+ return 0;
+
+ if (!is_present) {
+ chip->is_present = false;
+ return 0;
+ }
+
+ if (!chip->is_present && is_present && !chip->charger_broadcasts)
+ sbs_disable_charger_broadcasts(chip);
+
+ chip->is_present = true;
+
+ return 0;
+}
+
static int sbs_read_word_data(struct i2c_client *client, u8 address)
{
struct sbs_info *chip = i2c_get_clientdata(client);
@@ -288,15 +357,15 @@ static int sbs_status_correct(struct i2c_client *client, int *intval)
{
int ret;
- ret = sbs_read_word_data(client, sbs_data[REG_CURRENT].addr);
+ ret = sbs_read_word_data(client, sbs_data[REG_CURRENT_NOW].addr);
if (ret < 0)
return ret;
ret = (s16)ret;
- /* Not drawing current means full (cannot be not charging) */
- if (ret == 0)
- *intval = POWER_SUPPLY_STATUS_FULL;
+ /* Not drawing current -> not charging (i.e. idle) */
+ if (*intval != POWER_SUPPLY_STATUS_FULL && ret == 0)
+ *intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
if (*intval == POWER_SUPPLY_STATUS_FULL) {
/* Drawing or providing current when full */
@@ -309,6 +378,17 @@ static int sbs_status_correct(struct i2c_client *client, int *intval)
return 0;
}
+static bool sbs_bat_needs_calibration(struct i2c_client *client)
+{
+ int ret;
+
+ ret = sbs_read_word_data(client, sbs_data[REG_BATTERY_MODE].addr);
+ if (ret < 0)
+ return false;
+
+ return !!(ret & BIT(7));
+}
+
static int sbs_get_battery_presence_and_health(
struct i2c_client *client, enum power_supply_property psp,
union power_supply_propval *val)
@@ -328,9 +408,14 @@ static int sbs_get_battery_presence_and_health(
if (psp == POWER_SUPPLY_PROP_PRESENT)
val->intval = 1; /* battery present */
- else /* POWER_SUPPLY_PROP_HEALTH */
- /* SBS spec doesn't have a general health command. */
- val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+ else { /* POWER_SUPPLY_PROP_HEALTH */
+ if (sbs_bat_needs_calibration(client)) {
+ val->intval = POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED;
+ } else {
+ /* SBS spec doesn't have a general health command. */
+ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+ }
return 0;
}
@@ -384,6 +469,8 @@ static int sbs_get_ti_battery_presence_and_health(
val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
else if (ret == 0x0C)
val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (sbs_bat_needs_calibration(client))
+ val->intval = POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED;
else
val->intval = POWER_SUPPLY_HEALTH_GOOD;
}
@@ -492,7 +579,10 @@ static void sbs_unit_adjustment(struct i2c_client *client,
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
val->intval *= BASE_UNIT_CONVERSION;
@@ -602,6 +692,70 @@ static int sbs_get_property_index(struct i2c_client *client,
return -EINVAL;
}
+static int sbs_get_chemistry(struct i2c_client *client,
+ union power_supply_propval *val)
+{
+ enum power_supply_property psp = POWER_SUPPLY_PROP_TECHNOLOGY;
+ int ret;
+
+ ret = sbs_get_property_index(client, psp);
+ if (ret < 0)
+ return ret;
+
+ ret = sbs_get_battery_string_property(client, ret, psp,
+ chemistry);
+ if (ret < 0)
+ return ret;
+
+ if (!strncasecmp(chemistry, "LION", 4))
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ else if (!strncasecmp(chemistry, "LiP", 3))
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO;
+ else if (!strncasecmp(chemistry, "NiCd", 4))
+ val->intval = POWER_SUPPLY_TECHNOLOGY_NiCd;
+ else if (!strncasecmp(chemistry, "NiMH", 4))
+ val->intval = POWER_SUPPLY_TECHNOLOGY_NiMH;
+ else
+ val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+
+ if (val->intval == POWER_SUPPLY_TECHNOLOGY_UNKNOWN)
+ dev_warn(&client->dev, "Unknown chemistry: %s\n", chemistry);
+
+ return 0;
+}
+
+static int sbs_get_battery_manufacture_date(struct i2c_client *client,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int ret;
+ u16 day, month, year;
+
+ ret = sbs_read_word_data(client, REG_ADDR_MANUFACTURE_DATE);
+ if (ret < 0)
+ return ret;
+
+ day = ret & GENMASK(4, 0);
+ month = (ret & GENMASK(8, 5)) >> 5;
+ year = ((ret & GENMASK(15, 9)) >> 9) + 1980;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
+ val->intval = year;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURE_MONTH:
+ val->intval = month;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURE_DAY:
+ val->intval = day;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int sbs_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -616,7 +770,7 @@ static int sbs_get_property(struct power_supply *psy,
return ret;
if (psp == POWER_SUPPLY_PROP_PRESENT) {
val->intval = ret;
- chip->is_present = val->intval;
+ sbs_update_presence(chip, ret);
return 0;
}
if (ret == 0)
@@ -626,7 +780,7 @@ static int sbs_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_HEALTH:
- if (chip->flags & SBS_FLAGS_TI_BQ20Z75)
+ if (chip->flags & SBS_FLAGS_TI_BQ20ZX5)
ret = sbs_get_ti_battery_presence_and_health(client,
psp, val);
else
@@ -639,7 +793,10 @@ static int sbs_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ ret = sbs_get_chemistry(client, val);
+ if (ret < 0)
+ break;
+
goto done; /* don't trigger power_supply_changed()! */
case POWER_SUPPLY_PROP_ENERGY_NOW:
@@ -670,12 +827,16 @@ static int sbs_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CYCLE_COUNT:
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_TEMP:
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
case POWER_SUPPLY_PROP_CAPACITY:
+ case POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN:
ret = sbs_get_property_index(client, psp);
if (ret < 0)
break;
@@ -703,6 +864,12 @@ static int sbs_get_property(struct power_supply *psy,
val->strval = manufacturer;
break;
+ case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
+ case POWER_SUPPLY_PROP_MANUFACTURE_MONTH:
+ case POWER_SUPPLY_PROP_MANUFACTURE_DAY:
+ ret = sbs_get_battery_manufacture_date(client, psp, val);
+ break;
+
default:
dev_err(&client->dev,
"%s: INVALID property\n", __func__);
@@ -714,7 +881,7 @@ static int sbs_get_property(struct power_supply *psy,
if (!chip->gpio_detect &&
chip->is_present != (ret >= 0)) {
- chip->is_present = (ret >= 0);
+ sbs_update_presence(chip, (ret >= 0));
power_supply_changed(chip->power_supply);
}
@@ -745,7 +912,7 @@ static void sbs_supply_changed(struct sbs_info *chip)
ret = gpiod_get_value_cansleep(chip->gpio_detect);
if (ret < 0)
return;
- chip->is_present = ret;
+ sbs_update_presence(chip, ret);
power_supply_changed(battery);
}
@@ -815,8 +982,7 @@ static const struct power_supply_desc sbs_default_desc = {
.external_power_changed = sbs_external_power_changed,
};
-static int sbs_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sbs_probe(struct i2c_client *client)
{
struct sbs_info *chip;
struct power_supply_desc *sbs_desc;
@@ -839,7 +1005,7 @@ static int sbs_probe(struct i2c_client *client,
if (!chip)
return -ENOMEM;
- chip->flags = (u32)(uintptr_t)of_device_get_match_data(&client->dev);
+ chip->flags = (u32)(uintptr_t)device_get_match_data(&client->dev);
chip->client = client;
chip->enable_detection = false;
psy_cfg.of_node = client->dev.of_node;
@@ -850,13 +1016,13 @@ static int sbs_probe(struct i2c_client *client,
/* use pdata if available, fall back to DT properties,
* or hardcoded defaults if not
*/
- rc = of_property_read_u32(client->dev.of_node, "sbs,i2c-retry-count",
- &chip->i2c_retry_count);
+ rc = device_property_read_u32(&client->dev, "sbs,i2c-retry-count",
+ &chip->i2c_retry_count);
if (rc)
chip->i2c_retry_count = 0;
- rc = of_property_read_u32(client->dev.of_node, "sbs,poll-retry-count",
- &chip->poll_retry_count);
+ rc = device_property_read_u32(&client->dev, "sbs,poll-retry-count",
+ &chip->poll_retry_count);
if (rc)
chip->poll_retry_count = 0;
@@ -866,6 +1032,9 @@ static int sbs_probe(struct i2c_client *client,
}
chip->i2c_retry_count = chip->i2c_retry_count + 1;
+ chip->charger_broadcasts = !device_property_read_bool(&client->dev,
+ "sbs,disable-charger-broadcasts");
+
chip->gpio_detect = devm_gpiod_get_optional(&client->dev,
"sbs,battery-detect", GPIOD_IN);
if (IS_ERR(chip->gpio_detect)) {
@@ -950,7 +1119,7 @@ static int sbs_suspend(struct device *dev)
if (chip->poll_time > 0)
cancel_delayed_work_sync(&chip->work);
- if (chip->flags & SBS_FLAGS_TI_BQ20Z75) {
+ if (chip->flags & SBS_FLAGS_TI_BQ20ZX5) {
/* Write to manufacturer access with sleep command. */
ret = sbs_write_word_data(client,
sbs_data[REG_MANUFACTURER_DATA].addr,
@@ -970,6 +1139,7 @@ static SIMPLE_DEV_PM_OPS(sbs_pm_ops, sbs_suspend, NULL);
#endif
static const struct i2c_device_id sbs_id[] = {
+ { "bq20z65", 0 },
{ "bq20z75", 0 },
{ "sbs-battery", 1 },
{}
@@ -979,15 +1149,19 @@ MODULE_DEVICE_TABLE(i2c, sbs_id);
static const struct of_device_id sbs_dt_ids[] = {
{ .compatible = "sbs,sbs-battery" },
{
+ .compatible = "ti,bq20z65",
+ .data = (void *)SBS_FLAGS_TI_BQ20ZX5,
+ },
+ {
.compatible = "ti,bq20z75",
- .data = (void *)SBS_FLAGS_TI_BQ20Z75,
+ .data = (void *)SBS_FLAGS_TI_BQ20ZX5,
},
{ }
};
MODULE_DEVICE_TABLE(of, sbs_dt_ids);
static struct i2c_driver sbs_battery_driver = {
- .probe = sbs_probe,
+ .probe_new = sbs_probe,
.remove = sbs_remove,
.alert = sbs_alert,
.id_table = sbs_id,
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index a7c8a8453db1..be42e814ea34 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -42,6 +42,8 @@
#define SC27XX_FGU_USER_AREA_SET 0xa0
#define SC27XX_FGU_USER_AREA_CLEAR 0xa4
#define SC27XX_FGU_USER_AREA_STATUS 0xa8
+#define SC27XX_FGU_VOLTAGE_BUF 0xd0
+#define SC27XX_FGU_CURRENT_BUF 0xf0
#define SC27XX_WRITE_SELCLB_EN BIT(0)
#define SC27XX_FGU_CLBCNT_MASK GENMASK(15, 0)
@@ -82,6 +84,7 @@
* @init_clbcnt: the initial coulomb counter
* @max_volt: the maximum constant input voltage in millivolt
* @min_volt: the minimum drained battery voltage in microvolt
+ * @boot_volt: the voltage measured during boot in microvolt
* @table_len: the capacity table length
* @resist_table_len: the resistance table length
* @cur_1000ma_adc: ADC value corresponding to 1000 mA
@@ -107,6 +110,7 @@ struct sc27xx_fgu_data {
int init_clbcnt;
int max_volt;
int min_volt;
+ int boot_volt;
int table_len;
int resist_table_len;
int cur_1000ma_adc;
@@ -319,6 +323,7 @@ static int sc27xx_fgu_get_boot_capacity(struct sc27xx_fgu_data *data, int *cap)
volt = sc27xx_fgu_adc_to_voltage(data, volt);
ocv = volt * 1000 - oci * data->internal_resist;
+ data->boot_volt = ocv;
/*
* Parse the capacity table to look up the correct capacity percent
@@ -376,6 +381,44 @@ static int sc27xx_fgu_get_clbcnt(struct sc27xx_fgu_data *data, int *clb_cnt)
return 0;
}
+static int sc27xx_fgu_get_vol_now(struct sc27xx_fgu_data *data, int *val)
+{
+ int ret;
+ u32 vol;
+
+ ret = regmap_read(data->regmap, data->base + SC27XX_FGU_VOLTAGE_BUF,
+ &vol);
+ if (ret)
+ return ret;
+
+ /*
+ * It is ADC values reading from registers which need to convert to
+ * corresponding voltage values.
+ */
+ *val = sc27xx_fgu_adc_to_voltage(data, vol);
+
+ return 0;
+}
+
+static int sc27xx_fgu_get_cur_now(struct sc27xx_fgu_data *data, int *val)
+{
+ int ret;
+ u32 cur;
+
+ ret = regmap_read(data->regmap, data->base + SC27XX_FGU_CURRENT_BUF,
+ &cur);
+ if (ret)
+ return ret;
+
+ /*
+ * It is ADC values reading from registers which need to convert to
+ * corresponding current values.
+ */
+ *val = sc27xx_fgu_adc_to_current(data, cur - SC27XX_FGU_CUR_BASIC_ADC);
+
+ return 0;
+}
+
static int sc27xx_fgu_get_capacity(struct sc27xx_fgu_data *data, int *cap)
{
int ret, cur_clbcnt, delta_clbcnt, delta_cap, temp;
@@ -577,7 +620,7 @@ static int sc27xx_fgu_get_property(struct power_supply *psy,
val->intval = value;
break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
ret = sc27xx_fgu_get_vbat_vol(data, &value);
if (ret)
goto error;
@@ -601,7 +644,6 @@ static int sc27xx_fgu_get_property(struct power_supply *psy,
val->intval = value;
break;
- case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_CURRENT_AVG:
ret = sc27xx_fgu_get_current(data, &value);
if (ret)
@@ -625,6 +667,26 @@ static int sc27xx_fgu_get_property(struct power_supply *psy,
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = sc27xx_fgu_get_vol_now(data, &value);
+ if (ret)
+ goto error;
+
+ val->intval = value * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = sc27xx_fgu_get_cur_now(data, &value);
+ if (ret)
+ goto error;
+
+ val->intval = value * 1000;
+ break;
+
+ case POWER_SUPPLY_PROP_VOLTAGE_BOOT:
+ val->intval = data->boot_volt;
+ break;
+
default:
ret = -EINVAL;
break;
@@ -656,6 +718,11 @@ static int sc27xx_fgu_set_property(struct power_supply *psy,
ret = 0;
break;
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ data->total_cap = val->intval / 1000;
+ ret = 0;
+ break;
+
default:
ret = -EINVAL;
}
@@ -676,7 +743,8 @@ static int sc27xx_fgu_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
return psp == POWER_SUPPLY_PROP_CAPACITY ||
- psp == POWER_SUPPLY_PROP_CALIBRATE;
+ psp == POWER_SUPPLY_PROP_CALIBRATE ||
+ psp == POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN;
}
static enum power_supply_property sc27xx_fgu_props[] = {
@@ -688,6 +756,8 @@ static enum power_supply_property sc27xx_fgu_props[] = {
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_BOOT,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
@@ -705,6 +775,7 @@ static const struct power_supply_desc sc27xx_fgu_desc = {
.set_property = sc27xx_fgu_set_property,
.external_power_changed = sc27xx_fgu_external_power_changed,
.property_is_writeable = sc27xx_fgu_property_is_writeable,
+ .no_thermal = true,
};
static void sc27xx_fgu_adjust_cap(struct sc27xx_fgu_data *data, int cap)
diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
index c1d124b8be0c..f99026d81f2a 100644
--- a/drivers/power/supply/smb347-charger.c
+++ b/drivers/power/supply/smb347-charger.c
@@ -8,6 +8,7 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
@@ -708,6 +709,9 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
bool handled = false;
int ret;
+ /* SMB347 it needs at least 20ms for setting IRQSTAT_E_*IN_UV_IRQ */
+ usleep_range(25000, 35000);
+
ret = regmap_read(smb->regmap, STAT_C, &stat_c);
if (ret < 0) {
dev_warn(smb->dev, "reading STAT_C failed\n");
@@ -1138,6 +1142,7 @@ static bool smb347_volatile_reg(struct device *dev, unsigned int reg)
switch (reg) {
case IRQSTAT_A:
case IRQSTAT_C:
+ case IRQSTAT_D:
case IRQSTAT_E:
case IRQSTAT_F:
case STAT_A:
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index 83c45659bc9d..e54aa2d82f50 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -1096,8 +1096,8 @@ int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
lpm_priv->tb_cache_internal = NULL;
lpm_priv->tb_cache = NULL;
} else if (tb_cache) {
- if (tb_cache != (void *)_ALIGN_UP((unsigned long)tb_cache, 128)
- || tb_cache_size != _ALIGN_UP(tb_cache_size, 128)) {
+ if (tb_cache != (void *)ALIGN((unsigned long)tb_cache, 128)
+ || tb_cache_size != ALIGN(tb_cache_size, 128)) {
dev_err(sbd_core(), "%s:%u: unaligned tb_cache\n",
__func__, __LINE__);
result = -EINVAL;
@@ -1111,12 +1111,10 @@ int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
lpm_priv->tb_cache_internal = kzalloc(
lpm_priv->tb_cache_size + 127, GFP_KERNEL);
if (!lpm_priv->tb_cache_internal) {
- dev_err(sbd_core(), "%s:%u: alloc internal tb_cache "
- "failed\n", __func__, __LINE__);
result = -ENOMEM;
goto fail_malloc;
}
- lpm_priv->tb_cache = (void *)_ALIGN_UP(
+ lpm_priv->tb_cache = (void *)ALIGN(
(unsigned long)lpm_priv->tb_cache_internal, 128);
}
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index ddaa5ea5801a..4ed131eaff51 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -858,13 +858,13 @@ static int ps3_vuart_handle_port_interrupt(struct ps3_system_bus_device *dev)
return 0;
}
-struct vuart_bus_priv {
+static struct vuart_bus_priv {
struct ports_bmp *bmp;
unsigned int virq;
struct mutex probe_mutex;
int use_count;
struct ps3_system_bus_device *devices[PORT_COUNT];
-} static vuart_bus_priv;
+} vuart_bus_priv;
/**
* ps3_vuart_irq_handler - first stage interrupt handler
@@ -917,7 +917,6 @@ static int ps3_vuart_bus_interrupt_get(void)
vuart_bus_priv.bmp = kzalloc(sizeof(struct ports_bmp), GFP_KERNEL);
if (!vuart_bus_priv.bmp) {
- pr_debug("%s:%d: kzalloc failed.\n", __func__, __LINE__);
result = -ENOMEM;
goto fail_bmp_malloc;
}
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 10af330153b5..451608e960a1 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -572,14 +572,12 @@ static void dma_req_free(struct kref *ref)
struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
refcount);
struct mport_cdev_priv *priv = req->priv;
- unsigned int i;
dma_unmap_sg(req->dmach->device->dev,
req->sgt.sgl, req->sgt.nents, req->dir);
sg_free_table(&req->sgt);
if (req->page_list) {
- for (i = 0; i < req->nr_pages; i++)
- put_page(req->page_list[i]);
+ unpin_user_pages(req->page_list, req->nr_pages);
kfree(req->page_list);
}
@@ -815,7 +813,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
struct mport_dma_req *req;
struct mport_dev *md = priv->md;
struct dma_chan *chan;
- int i, ret;
+ int ret;
int nents;
if (xfer->length == 0)
@@ -862,7 +860,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
goto err_req;
}
- pinned = get_user_pages_fast(
+ pinned = pin_user_pages_fast(
(unsigned long)xfer->loc_addr & PAGE_MASK,
nr_pages,
dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
@@ -870,7 +868,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
if (pinned != nr_pages) {
if (pinned < 0) {
- rmcd_error("get_user_pages_unlocked err=%ld",
+ rmcd_error("pin_user_pages_fast err=%ld",
pinned);
nr_pages = 0;
} else
@@ -951,8 +949,7 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
err_pg:
if (!req->page_list) {
- for (i = 0; i < nr_pages; i++)
- put_page(page_list[i]);
+ unpin_user_pages(page_list, nr_pages);
kfree(page_list);
}
err_req:
@@ -2384,13 +2381,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
cdev_init(&md->cdev, &mport_fops);
md->cdev.owner = THIS_MODULE;
- ret = cdev_device_add(&md->cdev, &md->dev);
- if (ret) {
- rmcd_error("Failed to register mport %d (err=%d)",
- mport->id, ret);
- goto err_cdev;
- }
-
INIT_LIST_HEAD(&md->doorbells);
spin_lock_init(&md->db_lock);
INIT_LIST_HEAD(&md->portwrites);
@@ -2410,6 +2400,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
#else
md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
#endif
+
+ ret = cdev_device_add(&md->cdev, &md->dev);
+ if (ret) {
+ rmcd_error("Failed to register mport %d (err=%d)",
+ mport->id, ret);
+ goto err_cdev;
+ }
ret = rio_query_mport(mport, &attr);
if (!ret) {
md->properties.flags = attr.flags;
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index fbaed079b299..c4d1731295eb 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -23,6 +23,15 @@ config IMX_REMOTEPROC
It's safe to say N here.
+config INGENIC_VPU_RPROC
+ tristate "Ingenic JZ47xx VPU remoteproc support"
+ depends on MIPS || COMPILE_TEST
+ help
+ Say y or m here to support the VPU in the JZ47xx SoCs from Ingenic.
+
+ This can be either built-in or a loadable module.
+ If unsure say N.
+
config MTK_SCP
tristate "Mediatek SCP support"
depends on ARCH_MEDIATEK
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 0effd3825035..e8b886e511f0 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -10,6 +10,7 @@ remoteproc-y += remoteproc_sysfs.o
remoteproc-y += remoteproc_virtio.o
remoteproc-y += remoteproc_elf_loader.o
obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o
+obj-$(CONFIG_INGENIC_VPU_RPROC) += ingenic_rproc.o
obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o
obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
new file mode 100644
index 000000000000..189020d77b25
--- /dev/null
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Ingenic JZ47xx remoteproc driver
+ * Copyright 2019, Paul Cercueil <paul@crapouillou.net>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+
+#include "remoteproc_internal.h"
+
+#define REG_AUX_CTRL 0x0
+#define REG_AUX_MSG_ACK 0x10
+#define REG_AUX_MSG 0x14
+#define REG_CORE_MSG_ACK 0x18
+#define REG_CORE_MSG 0x1C
+
+#define AUX_CTRL_SLEEP BIT(31)
+#define AUX_CTRL_MSG_IRQ_EN BIT(3)
+#define AUX_CTRL_NMI_RESETS BIT(2)
+#define AUX_CTRL_NMI BIT(1)
+#define AUX_CTRL_SW_RESET BIT(0)
+
+struct vpu_mem_map {
+ const char *name;
+ unsigned int da;
+};
+
+struct vpu_mem_info {
+ const struct vpu_mem_map *map;
+ unsigned long len;
+ void __iomem *base;
+};
+
+static const struct vpu_mem_map vpu_mem_map[] = {
+ { "tcsm0", 0x132b0000 },
+ { "tcsm1", 0xf4000000 },
+ { "sram", 0x132f0000 },
+};
+
+/**
+ * struct vpu - Ingenic VPU remoteproc private structure
+ * @irq: interrupt number
+ * @clks: pointers to the VPU and AUX clocks
+ * @aux_base: raw pointer to the AUX interface registers
+ * @mem_info: array of struct vpu_mem_info, which contain the mapping info of
+ * each of the external memories
+ * @dev: private pointer to the device
+ */
+struct vpu {
+ int irq;
+ struct clk_bulk_data clks[2];
+ void __iomem *aux_base;
+ struct vpu_mem_info mem_info[ARRAY_SIZE(vpu_mem_map)];
+ struct device *dev;
+};
+
+static int ingenic_rproc_start(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+ u32 ctrl;
+
+ enable_irq(vpu->irq);
+
+ /* Reset the AUX and enable message IRQ */
+ ctrl = AUX_CTRL_NMI_RESETS | AUX_CTRL_NMI | AUX_CTRL_MSG_IRQ_EN;
+ writel(ctrl, vpu->aux_base + REG_AUX_CTRL);
+
+ return 0;
+}
+
+static int ingenic_rproc_stop(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+
+ disable_irq(vpu->irq);
+
+ /* Keep AUX in reset mode */
+ writel(AUX_CTRL_SW_RESET, vpu->aux_base + REG_AUX_CTRL);
+
+ return 0;
+}
+
+static void ingenic_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct vpu *vpu = rproc->priv;
+
+ writel(vqid, vpu->aux_base + REG_CORE_MSG);
+}
+
+static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
+{
+ struct vpu *vpu = rproc->priv;
+ void __iomem *va = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vpu_mem_map); i++) {
+ const struct vpu_mem_info *info = &vpu->mem_info[i];
+ const struct vpu_mem_map *map = info->map;
+
+ if (da >= map->da && (da + len) < (map->da + info->len)) {
+ va = info->base + (da - map->da);
+ break;
+ }
+ }
+
+ return (__force void *)va;
+}
+
+static struct rproc_ops ingenic_rproc_ops = {
+ .start = ingenic_rproc_start,
+ .stop = ingenic_rproc_stop,
+ .kick = ingenic_rproc_kick,
+ .da_to_va = ingenic_rproc_da_to_va,
+};
+
+static irqreturn_t vpu_interrupt(int irq, void *data)
+{
+ struct rproc *rproc = data;
+ struct vpu *vpu = rproc->priv;
+ u32 vring;
+
+ vring = readl(vpu->aux_base + REG_AUX_MSG);
+
+ /* Ack the interrupt */
+ writel(0, vpu->aux_base + REG_AUX_MSG_ACK);
+
+ return rproc_vq_interrupt(rproc, vring);
+}
+
+static void ingenic_rproc_disable_clks(void *data)
+{
+ struct vpu *vpu = data;
+
+ pm_runtime_resume(vpu->dev);
+ pm_runtime_disable(vpu->dev);
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks);
+}
+
+static int ingenic_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *mem;
+ struct rproc *rproc;
+ struct vpu *vpu;
+ unsigned int i;
+ int ret;
+
+ rproc = devm_rproc_alloc(dev, "ingenic-vpu",
+ &ingenic_rproc_ops, NULL, sizeof(*vpu));
+ if (!rproc)
+ return -ENOMEM;
+
+ vpu = rproc->priv;
+ vpu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, vpu);
+
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aux");
+ vpu->aux_base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(vpu->aux_base)) {
+ dev_err(dev, "Failed to ioremap\n");
+ return PTR_ERR(vpu->aux_base);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vpu_mem_map); i++) {
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ vpu_mem_map[i].name);
+
+ vpu->mem_info[i].base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(vpu->mem_info[i].base)) {
+ ret = PTR_ERR(vpu->mem_info[i].base);
+ dev_err(dev, "Failed to ioremap\n");
+ return ret;
+ }
+
+ vpu->mem_info[i].len = resource_size(mem);
+ vpu->mem_info[i].map = &vpu_mem_map[i];
+ }
+
+ vpu->clks[0].id = "vpu";
+ vpu->clks[1].id = "aux";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(vpu->clks), vpu->clks);
+ if (ret) {
+ dev_err(dev, "Failed to get clocks\n");
+ return ret;
+ }
+
+ vpu->irq = platform_get_irq(pdev, 0);
+ if (vpu->irq < 0)
+ return vpu->irq;
+
+ ret = devm_request_irq(dev, vpu->irq, vpu_interrupt, 0, "VPU", rproc);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ disable_irq(vpu->irq);
+
+ /* The clocks must be enabled for the firmware to be loaded in TCSM */
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
+ if (ret) {
+ dev_err(dev, "Unable to start clocks\n");
+ return ret;
+ }
+
+ pm_runtime_irq_safe(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+ pm_runtime_use_autosuspend(dev);
+
+ ret = devm_add_action_or_reset(dev, ingenic_rproc_disable_clks, vpu);
+ if (ret) {
+ dev_err(dev, "Unable to register action\n");
+ goto out_pm_put;
+ }
+
+ ret = devm_rproc_add(dev, rproc);
+ if (ret) {
+ dev_err(dev, "Failed to register remote processor\n");
+ goto out_pm_put;
+ }
+
+out_pm_put:
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static const struct of_device_id ingenic_rproc_of_matches[] = {
+ { .compatible = "ingenic,jz4770-vpu-rproc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ingenic_rproc_of_matches);
+
+static int __maybe_unused ingenic_rproc_suspend(struct device *dev)
+{
+ struct vpu *vpu = dev_get_drvdata(dev);
+
+ clk_bulk_disable(ARRAY_SIZE(vpu->clks), vpu->clks);
+
+ return 0;
+}
+
+static int __maybe_unused ingenic_rproc_resume(struct device *dev)
+{
+ struct vpu *vpu = dev_get_drvdata(dev);
+
+ return clk_bulk_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
+}
+
+static const struct dev_pm_ops __maybe_unused ingenic_rproc_pm = {
+ SET_RUNTIME_PM_OPS(ingenic_rproc_suspend, ingenic_rproc_resume, NULL)
+};
+
+static struct platform_driver ingenic_rproc_driver = {
+ .probe = ingenic_rproc_probe,
+ .driver = {
+ .name = "ingenic-vpu",
+#ifdef CONFIG_PM
+ .pm = &ingenic_rproc_pm,
+#endif
+ .of_match_table = ingenic_rproc_of_matches,
+ },
+};
+module_platform_driver(ingenic_rproc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ47xx Remote Processor control driver");
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index 2bead57c9cf9..ac13e7b046a6 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -132,8 +132,8 @@ static int scp_ipi_init(struct mtk_scp *scp)
(struct mtk_share_obj __iomem *)(scp->sram_base + recv_offset);
scp->send_buf =
(struct mtk_share_obj __iomem *)(scp->sram_base + send_offset);
- memset_io(scp->recv_buf, 0, sizeof(scp->recv_buf));
- memset_io(scp->send_buf, 0, sizeof(scp->send_buf));
+ memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
+ memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
return 0;
}
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index 60650bcc8c67..9028cea2d81e 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -42,12 +42,21 @@ static void glink_subdev_stop(struct rproc_subdev *subdev, bool crashed)
glink->edge = NULL;
}
+static void glink_subdev_unprepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
+
+ qcom_glink_ssr_notify(glink->ssr_name);
+}
+
/**
* qcom_add_glink_subdev() - try to add a GLINK subdevice to rproc
* @rproc: rproc handle to parent the subdevice
* @glink: reference to a GLINK subdev context
+ * @ssr_name: identifier of the associated remoteproc for ssr notifications
*/
-void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink)
+void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
+ const char *ssr_name)
{
struct device *dev = &rproc->dev;
@@ -55,9 +64,14 @@ void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink)
if (!glink->node)
return;
+ glink->ssr_name = kstrdup_const(ssr_name, GFP_KERNEL);
+ if (!glink->ssr_name)
+ return;
+
glink->dev = dev;
glink->subdev.start = glink_subdev_start;
glink->subdev.stop = glink_subdev_stop;
+ glink->subdev.unprepare = glink_subdev_unprepare;
rproc_add_subdev(rproc, &glink->subdev);
}
@@ -74,6 +88,7 @@ void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glin
return;
rproc_remove_subdev(rproc, &glink->subdev);
+ kfree_const(glink->ssr_name);
of_node_put(glink->node);
}
EXPORT_SYMBOL_GPL(qcom_remove_glink_subdev);
diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h
index 58de71e4781c..34e5188187dc 100644
--- a/drivers/remoteproc/qcom_common.h
+++ b/drivers/remoteproc/qcom_common.h
@@ -11,6 +11,8 @@ struct qcom_sysmon;
struct qcom_rproc_glink {
struct rproc_subdev subdev;
+ const char *ssr_name;
+
struct device *dev;
struct device_node *node;
struct qcom_glink *edge;
@@ -30,7 +32,8 @@ struct qcom_rproc_ssr {
const char *name;
};
-void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink);
+void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
+ const char *ssr_name);
void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink);
int qcom_register_dump_segments(struct rproc *rproc, const struct firmware *fw);
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 24a3db961d5e..d2a2574dcf35 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -431,6 +431,7 @@ static int adsp_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
}
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
adsp = (struct qcom_adsp *)rproc->priv;
adsp->dev = &pdev->dev;
@@ -460,7 +461,7 @@ static int adsp_probe(struct platform_device *pdev)
if (ret)
goto disable_pm;
- qcom_add_glink_subdev(rproc, &adsp->glink_subdev);
+ qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 5475d4f808a8..feb70283b6a2 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -69,13 +69,9 @@
#define AXI_HALTREQ_REG 0x0
#define AXI_HALTACK_REG 0x4
#define AXI_IDLE_REG 0x8
-#define NAV_AXI_HALTREQ_BIT BIT(0)
-#define NAV_AXI_HALTACK_BIT BIT(1)
-#define NAV_AXI_IDLE_BIT BIT(2)
#define AXI_GATING_VALID_OVERRIDE BIT(0)
#define HALT_ACK_TIMEOUT_US 100000
-#define NAV_HALT_ACK_TIMEOUT_US 200
/* QDSP6SS_RESET */
#define Q6SS_STOP_CORE BIT(0)
@@ -143,7 +139,7 @@ struct rproc_hexagon_res {
int version;
bool need_mem_protection;
bool has_alt_reset;
- bool has_halt_nav;
+ bool has_spare_reg;
};
struct q6v5 {
@@ -154,13 +150,11 @@ struct q6v5 {
void __iomem *rmb_base;
struct regmap *halt_map;
- struct regmap *halt_nav_map;
struct regmap *conn_map;
u32 halt_q6;
u32 halt_modem;
u32 halt_nc;
- u32 halt_nav;
u32 conn_box;
struct reset_control *mss_restart;
@@ -196,7 +190,6 @@ struct q6v5 {
phys_addr_t mpss_phys;
phys_addr_t mpss_reloc;
- void *mpss_region;
size_t mpss_size;
struct qcom_rproc_glink glink_subdev;
@@ -206,7 +199,7 @@ struct q6v5 {
struct qcom_sysmon *sysmon;
bool need_mem_protection;
bool has_alt_reset;
- bool has_halt_nav;
+ bool has_spare_reg;
int mpss_perm;
int mba_perm;
const char *hexagon_mdt_image;
@@ -427,21 +420,19 @@ static int q6v5_reset_assert(struct q6v5 *qproc)
reset_control_assert(qproc->pdc_reset);
ret = reset_control_reset(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset);
- } else if (qproc->has_halt_nav) {
+ } else if (qproc->has_spare_reg) {
/*
* When the AXI pipeline is being reset with the Q6 modem partly
* operational there is possibility of AXI valid signal to
* glitch, leading to spurious transactions and Q6 hangs. A work
* around is employed by asserting the AXI_GATING_VALID_OVERRIDE
- * BIT before triggering Q6 MSS reset. Both the HALTREQ and
- * AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert
- * followed by a MSS deassert, while holding the PDC reset.
+ * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
+ * is withdrawn post MSS assert followed by a MSS deassert,
+ * while holding the PDC reset.
*/
reset_control_assert(qproc->pdc_reset);
regmap_update_bits(qproc->conn_map, qproc->conn_box,
AXI_GATING_VALID_OVERRIDE, 1);
- regmap_update_bits(qproc->halt_nav_map, qproc->halt_nav,
- NAV_AXI_HALTREQ_BIT, 0);
reset_control_assert(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset);
regmap_update_bits(qproc->conn_map, qproc->conn_box,
@@ -464,7 +455,7 @@ static int q6v5_reset_deassert(struct q6v5 *qproc)
ret = reset_control_reset(qproc->mss_restart);
writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
reset_control_deassert(qproc->pdc_reset);
- } else if (qproc->has_halt_nav) {
+ } else if (qproc->has_spare_reg) {
ret = reset_control_reset(qproc->mss_restart);
} else {
ret = reset_control_deassert(qproc->mss_restart);
@@ -761,32 +752,6 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
}
-static void q6v5proc_halt_nav_axi_port(struct q6v5 *qproc,
- struct regmap *halt_map,
- u32 offset)
-{
- unsigned int val;
- int ret;
-
- /* Check if we're already idle */
- ret = regmap_read(halt_map, offset, &val);
- if (!ret && (val & NAV_AXI_IDLE_BIT))
- return;
-
- /* Assert halt request */
- regmap_update_bits(halt_map, offset, NAV_AXI_HALTREQ_BIT,
- NAV_AXI_HALTREQ_BIT);
-
- /* Wait for halt ack*/
- regmap_read_poll_timeout(halt_map, offset, val,
- (val & NAV_AXI_HALTACK_BIT),
- 5, NAV_HALT_ACK_TIMEOUT_US);
-
- ret = regmap_read(halt_map, offset, &val);
- if (ret || !(val & NAV_AXI_IDLE_BIT))
- dev_err(qproc->dev, "port failed halt\n");
-}
-
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
{
unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
@@ -951,9 +916,6 @@ static int q6v5_mba_load(struct q6v5 *qproc)
halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
- if (qproc->has_halt_nav)
- q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
- qproc->halt_nav);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
reclaim_mba:
@@ -1001,9 +963,6 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
- if (qproc->has_halt_nav)
- q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map,
- qproc->halt_nav);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
if (qproc->version == MSS_MSM8996) {
/*
@@ -1156,7 +1115,13 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
goto release_firmware;
}
- ptr = qproc->mpss_region + offset;
+ ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz);
+ if (!ptr) {
+ dev_err(qproc->dev,
+ "unable to map memory region: %pa+%zx-%x\n",
+ &qproc->mpss_phys, offset, phdr->p_memsz);
+ goto release_firmware;
+ }
if (phdr->p_filesz && phdr->p_offset < fw->size) {
/* Firmware is large enough to be non-split */
@@ -1165,6 +1130,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
"failed to load segment %d from truncated file %s\n",
i, fw_name);
ret = -EINVAL;
+ iounmap(ptr);
goto release_firmware;
}
@@ -1175,6 +1141,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
ret = request_firmware(&seg_fw, fw_name, qproc->dev);
if (ret) {
dev_err(qproc->dev, "failed to load %s\n", fw_name);
+ iounmap(ptr);
goto release_firmware;
}
@@ -1187,6 +1154,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
memset(ptr + phdr->p_filesz, 0,
phdr->p_memsz - phdr->p_filesz);
}
+ iounmap(ptr);
size += phdr->p_memsz;
code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
@@ -1236,7 +1204,8 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
int ret = 0;
struct q6v5 *qproc = rproc->priv;
unsigned long mask = BIT((unsigned long)segment->priv);
- void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
+ int offset = segment->da - qproc->mpss_reloc;
+ void *ptr = NULL;
/* Unlock mba before copying segments */
if (!qproc->dump_mba_loaded) {
@@ -1250,10 +1219,15 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc,
}
}
- if (!ptr || ret)
- memset(dest, 0xff, segment->size);
- else
+ if (!ret)
+ ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size);
+
+ if (ptr) {
memcpy(dest, ptr, segment->size);
+ iounmap(ptr);
+ } else {
+ memset(dest, 0xff, segment->size);
+ }
qproc->dump_segment_mask |= mask;
@@ -1327,18 +1301,6 @@ static int q6v5_stop(struct rproc *rproc)
return 0;
}
-static void *q6v5_da_to_va(struct rproc *rproc, u64 da, size_t len)
-{
- struct q6v5 *qproc = rproc->priv;
- int offset;
-
- offset = da - qproc->mpss_reloc;
- if (offset < 0 || offset + len > qproc->mpss_size)
- return NULL;
-
- return qproc->mpss_region + offset;
-}
-
static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
const struct firmware *mba_fw)
{
@@ -1357,6 +1319,8 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
return ret;
}
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
ehdr = (struct elf32_hdr *)fw->data;
phdrs = (struct elf32_phdr *)(ehdr + 1);
qproc->dump_complete_mask = 0;
@@ -1384,7 +1348,6 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
static const struct rproc_ops q6v5_ops = {
.start = q6v5_start,
.stop = q6v5_stop,
- .da_to_va = q6v5_da_to_va,
.parse_fw = qcom_q6v5_register_dump_segments,
.load = q6v5_load,
};
@@ -1432,36 +1395,12 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
qproc->halt_modem = args.args[1];
qproc->halt_nc = args.args[2];
- if (qproc->has_halt_nav) {
- struct platform_device *nav_pdev;
-
+ if (qproc->has_spare_reg) {
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
- "qcom,halt-nav-regs",
+ "qcom,spare-regs",
1, 0, &args);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
- return -EINVAL;
- }
-
- nav_pdev = of_find_device_by_node(args.np);
- of_node_put(args.np);
- if (!nav_pdev) {
- dev_err(&pdev->dev, "failed to get mss clock device\n");
- return -EPROBE_DEFER;
- }
-
- qproc->halt_nav_map = dev_get_regmap(&nav_pdev->dev, NULL);
- if (!qproc->halt_nav_map) {
- dev_err(&pdev->dev, "failed to get map from device\n");
- return -EINVAL;
- }
- qproc->halt_nav = args.args[0];
-
- ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
- "qcom,halt-nav-regs",
- 1, 1, &args);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to parse halt-nav-regs\n");
+ dev_err(&pdev->dev, "failed to parse spare-regs\n");
return -EINVAL;
}
@@ -1547,7 +1486,7 @@ static int q6v5_init_reset(struct q6v5 *qproc)
return PTR_ERR(qproc->mss_restart);
}
- if (qproc->has_alt_reset || qproc->has_halt_nav) {
+ if (qproc->has_alt_reset || qproc->has_spare_reg) {
qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
"pdc_reset");
if (IS_ERR(qproc->pdc_reset)) {
@@ -1566,8 +1505,17 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
struct resource r;
int ret;
+ /*
+ * In the absence of mba/mpss sub-child, extract the mba and mpss
+ * reserved memory regions from device's memory-region property.
+ */
child = of_get_child_by_name(qproc->dev->of_node, "mba");
- node = of_parse_phandle(child, "memory-region", 0);
+ if (!child)
+ node = of_parse_phandle(qproc->dev->of_node,
+ "memory-region", 0);
+ else
+ node = of_parse_phandle(child, "memory-region", 0);
+
ret = of_address_to_resource(node, 0, &r);
if (ret) {
dev_err(qproc->dev, "unable to resolve mba region\n");
@@ -1584,8 +1532,14 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
return -EBUSY;
}
- child = of_get_child_by_name(qproc->dev->of_node, "mpss");
- node = of_parse_phandle(child, "memory-region", 0);
+ if (!child) {
+ node = of_parse_phandle(qproc->dev->of_node,
+ "memory-region", 1);
+ } else {
+ child = of_get_child_by_name(qproc->dev->of_node, "mpss");
+ node = of_parse_phandle(child, "memory-region", 0);
+ }
+
ret = of_address_to_resource(node, 0, &r);
if (ret) {
dev_err(qproc->dev, "unable to resolve mpss region\n");
@@ -1595,12 +1549,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
qproc->mpss_phys = qproc->mpss_reloc = r.start;
qproc->mpss_size = resource_size(&r);
- qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
- if (!qproc->mpss_region) {
- dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
- &r.start, qproc->mpss_size);
- return -EBUSY;
- }
return 0;
}
@@ -1667,6 +1615,7 @@ static int q6v5_probe(struct platform_device *pdev)
}
rproc->auto_boot = false;
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
qproc = (struct q6v5 *)rproc->priv;
qproc->dev = &pdev->dev;
@@ -1679,7 +1628,7 @@ static int q6v5_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qproc);
- qproc->has_halt_nav = desc->has_halt_nav;
+ qproc->has_spare_reg = desc->has_spare_reg;
ret = q6v5_init_mem(qproc, pdev);
if (ret)
goto free_rproc;
@@ -1759,7 +1708,7 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
- qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
+ qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev);
@@ -1828,8 +1777,6 @@ static const struct rproc_hexagon_res sc7180_mss = {
.active_clk_names = (char*[]){
"mnoc_axi",
"nav",
- "mss_nav",
- "mss_crypto",
NULL
},
.active_pd_names = (char*[]){
@@ -1844,7 +1791,7 @@ static const struct rproc_hexagon_res sc7180_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
- .has_halt_nav = true,
+ .has_spare_reg = true,
.version = MSS_SC7180,
};
@@ -1879,7 +1826,7 @@ static const struct rproc_hexagon_res sdm845_mss = {
},
.need_mem_protection = true,
.has_alt_reset = true,
- .has_halt_nav = false,
+ .has_spare_reg = false,
.version = MSS_SDM845,
};
@@ -1906,7 +1853,7 @@ static const struct rproc_hexagon_res msm8998_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
- .has_halt_nav = false,
+ .has_spare_reg = false,
.version = MSS_MSM8998,
};
@@ -1936,7 +1883,7 @@ static const struct rproc_hexagon_res msm8996_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
- .has_halt_nav = false,
+ .has_spare_reg = false,
.version = MSS_MSM8996,
};
@@ -1969,7 +1916,7 @@ static const struct rproc_hexagon_res msm8916_mss = {
},
.need_mem_protection = false,
.has_alt_reset = false,
- .has_halt_nav = false,
+ .has_spare_reg = false,
.version = MSS_MSM8916,
};
@@ -2010,7 +1957,7 @@ static const struct rproc_hexagon_res msm8974_mss = {
},
.need_mem_protection = false,
.has_alt_reset = false,
- .has_halt_nav = false,
+ .has_spare_reg = false,
.version = MSS_MSM8974,
};
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 7a63efb85405..61791a03f648 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -398,6 +398,7 @@ static int adsp_probe(struct platform_device *pdev)
}
rproc->auto_boot = desc->auto_boot;
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
adsp = (struct qcom_adsp *)rproc->priv;
adsp->dev = &pdev->dev;
@@ -406,6 +407,8 @@ static int adsp_probe(struct platform_device *pdev)
adsp->has_aggre2_clk = desc->has_aggre2_clk;
platform_set_drvdata(pdev, adsp);
+ device_wakeup_enable(adsp->dev);
+
ret = adsp_alloc_memory_region(adsp);
if (ret)
goto free_rproc;
@@ -435,7 +438,7 @@ static int adsp_probe(struct platform_device *pdev)
if (ret)
goto detach_proxy_pds;
- qcom_add_glink_subdev(rproc, &adsp->glink_subdev);
+ qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
@@ -507,6 +510,26 @@ static const struct adsp_data sm8150_adsp_resource = {
.ssctl_id = 0x14,
};
+static const struct adsp_data sm8250_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
static const struct adsp_data msm8998_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
@@ -552,6 +575,25 @@ static const struct adsp_data sm8150_cdsp_resource = {
.ssctl_id = 0x17,
};
+static const struct adsp_data sm8250_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
static const struct adsp_data mpss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
@@ -603,6 +645,26 @@ static const struct adsp_data sm8150_slpi_resource = {
.ssctl_id = 0x16,
};
+static const struct adsp_data sm8250_slpi_resource = {
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .active_pd_names = (char*[]){
+ "load_state",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
+};
+
static const struct adsp_data msm8998_slpi_resource = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
@@ -637,12 +699,16 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
+ { .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sm8150-slpi-pas", .data = &sm8150_slpi_resource},
+ { .compatible = "qcom,sm8250-adsp-pas", .data = &sm8250_adsp_resource},
+ { .compatible = "qcom,sm8250-cdsp-pas", .data = &sm8250_cdsp_resource},
+ { .compatible = "qcom,sm8250-slpi-pas", .data = &sm8250_slpi_resource},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index f1924b740a10..88c76b9417fa 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -91,6 +91,9 @@ struct q6v5_wcss {
phys_addr_t mem_reloc;
void *mem_region;
size_t mem_size;
+
+ struct qcom_rproc_glink glink_subdev;
+ struct qcom_rproc_ssr ssr_subdev;
};
static int q6v5_wcss_reset(struct q6v5_wcss *wcss)
@@ -557,6 +560,9 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
+ qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss");
+ qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss");
+
ret = rproc_add(rproc);
if (ret)
goto free_rproc;
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index faf3822d8791..8d8996d714f0 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -46,6 +46,25 @@ struct qcom_sysmon {
struct sockaddr_qrtr ssctl;
};
+enum {
+ SSCTL_SSR_EVENT_BEFORE_POWERUP,
+ SSCTL_SSR_EVENT_AFTER_POWERUP,
+ SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
+ SSCTL_SSR_EVENT_AFTER_SHUTDOWN,
+};
+
+static const char * const sysmon_state_string[] = {
+ [SSCTL_SSR_EVENT_BEFORE_POWERUP] = "before_powerup",
+ [SSCTL_SSR_EVENT_AFTER_POWERUP] = "after_powerup",
+ [SSCTL_SSR_EVENT_BEFORE_SHUTDOWN] = "before_shutdown",
+ [SSCTL_SSR_EVENT_AFTER_SHUTDOWN] = "after_shutdown",
+};
+
+struct sysmon_event {
+ const char *subsys_name;
+ u32 ssr_event;
+};
+
static DEFINE_MUTEX(sysmon_lock);
static LIST_HEAD(sysmon_list);
@@ -54,13 +73,15 @@ static LIST_HEAD(sysmon_list);
* @sysmon: sysmon context
* @name: other remote's name
*/
-static void sysmon_send_event(struct qcom_sysmon *sysmon, const char *name)
+static void sysmon_send_event(struct qcom_sysmon *sysmon,
+ const struct sysmon_event *event)
{
char req[50];
int len;
int ret;
- len = snprintf(req, sizeof(req), "ssr:%s:before_shutdown", name);
+ len = snprintf(req, sizeof(req), "ssr:%s:%s", event->subsys_name,
+ sysmon_state_string[event->ssr_event]);
if (len >= sizeof(req))
return;
@@ -149,13 +170,6 @@ static int sysmon_callback(struct rpmsg_device *rpdev, void *data, int count,
#define SSCTL_SUBSYS_NAME_LENGTH 15
enum {
- SSCTL_SSR_EVENT_BEFORE_POWERUP,
- SSCTL_SSR_EVENT_AFTER_POWERUP,
- SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
- SSCTL_SSR_EVENT_AFTER_SHUTDOWN,
-};
-
-enum {
SSCTL_SSR_EVENT_FORCED,
SSCTL_SSR_EVENT_GRACEFUL,
};
@@ -331,7 +345,8 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
* @sysmon: sysmon context
* @name: other remote's name
*/
-static void ssctl_send_event(struct qcom_sysmon *sysmon, const char *name)
+static void ssctl_send_event(struct qcom_sysmon *sysmon,
+ const struct sysmon_event *event)
{
struct ssctl_subsys_event_resp resp;
struct ssctl_subsys_event_req req;
@@ -346,9 +361,9 @@ static void ssctl_send_event(struct qcom_sysmon *sysmon, const char *name)
}
memset(&req, 0, sizeof(req));
- strlcpy(req.subsys_name, name, sizeof(req.subsys_name));
+ strlcpy(req.subsys_name, event->subsys_name, sizeof(req.subsys_name));
req.subsys_name_len = strlen(req.subsys_name);
- req.event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN;
+ req.event = event->ssr_event;
req.evt_driven_valid = true;
req.evt_driven = SSCTL_SSR_EVENT_FORCED;
@@ -424,16 +439,68 @@ static const struct qmi_ops ssctl_ops = {
.del_server = ssctl_del_server,
};
+static int sysmon_prepare(struct rproc_subdev *subdev)
+{
+ struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
+ subdev);
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_BEFORE_POWERUP
+ };
+
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+
+ return 0;
+}
+
+/**
+ * sysmon_start() - start callback for the sysmon remoteproc subdevice
+ * @subdev: instance of the sysmon subdevice
+ *
+ * Inform all the listners of sysmon notifications that the rproc associated
+ * to @subdev has booted up. The rproc that booted up also needs to know
+ * which rprocs are already up and running, so send start notifications
+ * on behalf of all the online rprocs.
+ */
static int sysmon_start(struct rproc_subdev *subdev)
{
+ struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
+ subdev);
+ struct qcom_sysmon *target;
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP
+ };
+
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+
+ mutex_lock(&sysmon_lock);
+ list_for_each_entry(target, &sysmon_list, node) {
+ if (target == sysmon ||
+ target->rproc->state != RPROC_RUNNING)
+ continue;
+
+ event.subsys_name = target->name;
+
+ if (sysmon->ssctl_version == 2)
+ ssctl_send_event(sysmon, &event);
+ else if (sysmon->ept)
+ sysmon_send_event(sysmon, &event);
+ }
+ mutex_unlock(&sysmon_lock);
+
return 0;
}
static void sysmon_stop(struct rproc_subdev *subdev, bool crashed)
{
struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon, subdev);
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN
+ };
- blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)sysmon->name);
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
/* Don't request graceful shutdown if we've crashed */
if (crashed)
@@ -445,6 +512,18 @@ static void sysmon_stop(struct rproc_subdev *subdev, bool crashed)
sysmon_request_shutdown(sysmon);
}
+static void sysmon_unprepare(struct rproc_subdev *subdev)
+{
+ struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
+ subdev);
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_AFTER_SHUTDOWN
+ };
+
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+}
+
/**
* sysmon_notify() - notify sysmon target of another's SSR
* @nb: notifier_block associated with sysmon instance
@@ -456,19 +535,20 @@ static int sysmon_notify(struct notifier_block *nb, unsigned long event,
{
struct qcom_sysmon *sysmon = container_of(nb, struct qcom_sysmon, nb);
struct rproc *rproc = sysmon->rproc;
- const char *ssr_name = data;
+ struct sysmon_event *sysmon_event = data;
/* Skip non-running rprocs and the originating instance */
- if (rproc->state != RPROC_RUNNING || !strcmp(data, sysmon->name)) {
+ if (rproc->state != RPROC_RUNNING ||
+ !strcmp(sysmon_event->subsys_name, sysmon->name)) {
dev_dbg(sysmon->dev, "not notifying %s\n", sysmon->name);
return NOTIFY_DONE;
}
/* Only SSCTL version 2 supports SSR events */
if (sysmon->ssctl_version == 2)
- ssctl_send_event(sysmon, ssr_name);
+ ssctl_send_event(sysmon, sysmon_event);
else if (sysmon->ept)
- sysmon_send_event(sysmon, ssr_name);
+ sysmon_send_event(sysmon, sysmon_event);
return NOTIFY_DONE;
}
@@ -543,8 +623,10 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
qmi_add_lookup(&sysmon->qmi, 43, 0, 0);
+ sysmon->subdev.prepare = sysmon_prepare;
sysmon->subdev.start = sysmon_start;
sysmon->subdev.stop = sysmon_stop;
+ sysmon->subdev.unprepare = sysmon_unprepare;
rproc_add_subdev(rproc, &sysmon->subdev);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 0c7afd038f0d..5d65e1a9329a 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -480,6 +480,7 @@ static int wcnss_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
}
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
wcnss = (struct qcom_wcnss *)rproc->priv;
wcnss->dev = &pdev->dev;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index e12a54e67588..9f04c30c4aaf 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -29,6 +29,7 @@
#include <linux/devcoredump.h>
#include <linux/rculist.h>
#include <linux/remoteproc.h>
+#include <linux/pm_runtime.h>
#include <linux/iommu.h>
#include <linux/idr.h>
#include <linux/elf.h>
@@ -517,7 +518,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
/* Initialise vdev subdevice */
snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
- rvdev->dev.parent = rproc->dev.parent;
+ rvdev->dev.parent = &rproc->dev;
rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset;
rvdev->dev.release = rproc_rvdev_release;
dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
@@ -1382,6 +1383,12 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
if (ret)
return ret;
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret);
+ return ret;
+ }
+
dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
/*
@@ -1391,7 +1398,14 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
ret = rproc_enable_iommu(rproc);
if (ret) {
dev_err(dev, "can't enable iommu: %d\n", ret);
- return ret;
+ goto put_pm_runtime;
+ }
+
+ /* Prepare rproc for firmware loading if needed */
+ ret = rproc_prepare_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
+ goto disable_iommu;
}
rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
@@ -1399,7 +1413,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
/* Load resource table, core dump segment list etc from the firmware */
ret = rproc_parse_fw(rproc, fw);
if (ret)
- goto disable_iommu;
+ goto unprepare_rproc;
/* reset max_notifyid */
rproc->max_notifyid = -1;
@@ -1433,8 +1447,13 @@ clean_up_resources:
kfree(rproc->cached_table);
rproc->cached_table = NULL;
rproc->table_ptr = NULL;
+unprepare_rproc:
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
disable_iommu:
rproc_disable_iommu(rproc);
+put_pm_runtime:
+ pm_runtime_put(dev);
return ret;
}
@@ -1566,6 +1585,28 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc,
EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
/**
+ * rproc_coredump_set_elf_info() - set coredump elf information
+ * @rproc: handle of a remote processor
+ * @class: elf class for coredump elf file
+ * @machine: elf machine for coredump elf file
+ *
+ * Set elf information which will be used for coredump elf file.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
+{
+ if (class != ELFCLASS64 && class != ELFCLASS32)
+ return -EINVAL;
+
+ rproc->elf_class = class;
+ rproc->elf_machine = machine;
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_set_elf_info);
+
+/**
* rproc_coredump() - perform coredump
* @rproc: rproc handle
*
@@ -1587,6 +1628,11 @@ static void rproc_coredump(struct rproc *rproc)
if (list_empty(&rproc->dump_segments))
return;
+ if (class == ELFCLASSNONE) {
+ dev_err(&rproc->dev, "Elf class is not set\n");
+ return;
+ }
+
data_size = elf_size_of_hdr(class);
list_for_each_entry(segment, &rproc->dump_segments, node) {
data_size += elf_size_of_phdr(class) + segment->size;
@@ -1605,7 +1651,7 @@ static void rproc_coredump(struct rproc *rproc)
elf_hdr_init_ident(ehdr, class);
elf_hdr_set_e_type(class, ehdr, ET_CORE);
- elf_hdr_set_e_machine(class, ehdr, EM_NONE);
+ elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
@@ -1729,6 +1775,8 @@ static void rproc_crash_handler_work(struct work_struct *work)
if (!rproc->recovery_disabled)
rproc_trigger_recovery(rproc);
+
+ pm_relax(rproc->dev.parent);
}
/**
@@ -1838,8 +1886,13 @@ void rproc_shutdown(struct rproc *rproc)
/* clean up all acquired resources */
rproc_resource_cleanup(rproc);
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
+
rproc_disable_iommu(rproc);
+ pm_runtime_put(dev);
+
/* Free the copy of the resource table */
kfree(rproc->cached_table);
rproc->cached_table = NULL;
@@ -1949,6 +2002,33 @@ int rproc_add(struct rproc *rproc)
}
EXPORT_SYMBOL(rproc_add);
+static void devm_rproc_remove(void *rproc)
+{
+ rproc_del(rproc);
+}
+
+/**
+ * devm_rproc_add() - resource managed rproc_add()
+ * @dev: the underlying device
+ * @rproc: the remote processor handle to register
+ *
+ * This function performs like rproc_add() but the registered rproc device will
+ * automatically be removed on driver detach.
+ *
+ * Returns: 0 on success, negative errno on failure
+ */
+int devm_rproc_add(struct device *dev, struct rproc *rproc)
+{
+ int err;
+
+ err = rproc_add(rproc);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(dev, devm_rproc_remove, rproc);
+}
+EXPORT_SYMBOL(devm_rproc_add);
+
/**
* rproc_type_release() - release a remote processor instance
* @dev: the rproc's device
@@ -1969,7 +2049,8 @@ static void rproc_type_release(struct device *dev)
if (rproc->index >= 0)
ida_simple_remove(&rproc_dev_index, rproc->index);
- kfree(rproc->firmware);
+ kfree_const(rproc->firmware);
+ kfree_const(rproc->name);
kfree(rproc->ops);
kfree(rproc);
}
@@ -1979,6 +2060,47 @@ static const struct device_type rproc_type = {
.release = rproc_type_release,
};
+static int rproc_alloc_firmware(struct rproc *rproc,
+ const char *name, const char *firmware)
+{
+ const char *p;
+
+ /*
+ * Allocate a firmware name if the caller gave us one to work
+ * with. Otherwise construct a new one using a default pattern.
+ */
+ if (firmware)
+ p = kstrdup_const(firmware, GFP_KERNEL);
+ else
+ p = kasprintf(GFP_KERNEL, "rproc-%s-fw", name);
+
+ if (!p)
+ return -ENOMEM;
+
+ rproc->firmware = p;
+
+ return 0;
+}
+
+static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops)
+{
+ rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
+ if (!rproc->ops)
+ return -ENOMEM;
+
+ if (rproc->ops->load)
+ return 0;
+
+ /* Default to ELF loader if no load function is specified */
+ rproc->ops->load = rproc_elf_load_segments;
+ rproc->ops->parse_fw = rproc_elf_load_rsc_table;
+ rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
+ rproc->ops->sanity_check = rproc_elf_sanity_check;
+ rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
+
+ return 0;
+}
+
/**
* rproc_alloc() - allocate a remote processor handle
* @dev: the underlying device
@@ -2007,79 +2129,49 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
const char *firmware, int len)
{
struct rproc *rproc;
- char *p, *template = "rproc-%s-fw";
- int name_len;
if (!dev || !name || !ops)
return NULL;
- if (!firmware) {
- /*
- * If the caller didn't pass in a firmware name then
- * construct a default name.
- */
- name_len = strlen(name) + strlen(template) - 2 + 1;
- p = kmalloc(name_len, GFP_KERNEL);
- if (!p)
- return NULL;
- snprintf(p, name_len, template, name);
- } else {
- p = kstrdup(firmware, GFP_KERNEL);
- if (!p)
- return NULL;
- }
-
rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
- if (!rproc) {
- kfree(p);
- return NULL;
- }
-
- rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
- if (!rproc->ops) {
- kfree(p);
- kfree(rproc);
+ if (!rproc)
return NULL;
- }
- rproc->firmware = p;
- rproc->name = name;
rproc->priv = &rproc[1];
rproc->auto_boot = true;
- rproc->elf_class = ELFCLASS32;
+ rproc->elf_class = ELFCLASSNONE;
+ rproc->elf_machine = EM_NONE;
device_initialize(&rproc->dev);
rproc->dev.parent = dev;
rproc->dev.type = &rproc_type;
rproc->dev.class = &rproc_class;
rproc->dev.driver_data = rproc;
+ idr_init(&rproc->notifyids);
+
+ rproc->name = kstrdup_const(name, GFP_KERNEL);
+ if (!rproc->name)
+ goto put_device;
+
+ if (rproc_alloc_firmware(rproc, name, firmware))
+ goto put_device;
+
+ if (rproc_alloc_ops(rproc, ops))
+ goto put_device;
/* Assign a unique device index and name */
rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
if (rproc->index < 0) {
dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
- put_device(&rproc->dev);
- return NULL;
+ goto put_device;
}
dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
atomic_set(&rproc->power, 0);
- /* Default to ELF loader if no load function is specified */
- if (!rproc->ops->load) {
- rproc->ops->load = rproc_elf_load_segments;
- rproc->ops->parse_fw = rproc_elf_load_rsc_table;
- rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
- if (!rproc->ops->sanity_check)
- rproc->ops->sanity_check = rproc_elf32_sanity_check;
- rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
- }
-
mutex_init(&rproc->lock);
- idr_init(&rproc->notifyids);
-
INIT_LIST_HEAD(&rproc->carveouts);
INIT_LIST_HEAD(&rproc->mappings);
INIT_LIST_HEAD(&rproc->traces);
@@ -2091,7 +2183,14 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->state = RPROC_OFFLINE;
+ pm_runtime_no_callbacks(&rproc->dev);
+ pm_runtime_enable(&rproc->dev);
+
return rproc;
+
+put_device:
+ put_device(&rproc->dev);
+ return NULL;
}
EXPORT_SYMBOL(rproc_alloc);
@@ -2106,6 +2205,7 @@ EXPORT_SYMBOL(rproc_alloc);
*/
void rproc_free(struct rproc *rproc)
{
+ pm_runtime_disable(&rproc->dev);
put_device(&rproc->dev);
}
EXPORT_SYMBOL(rproc_free);
@@ -2171,6 +2271,46 @@ int rproc_del(struct rproc *rproc)
}
EXPORT_SYMBOL(rproc_del);
+static void devm_rproc_free(struct device *dev, void *res)
+{
+ rproc_free(*(struct rproc **)res);
+}
+
+/**
+ * devm_rproc_alloc() - resource managed rproc_alloc()
+ * @dev: the underlying device
+ * @name: name of this remote processor
+ * @ops: platform-specific handlers (mainly start/stop)
+ * @firmware: name of firmware file to load, can be NULL
+ * @len: length of private data needed by the rproc driver (in bytes)
+ *
+ * This function performs like rproc_alloc() but the acquired rproc device will
+ * automatically be released on driver detach.
+ *
+ * Returns: new rproc instance, or NULL on failure
+ */
+struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
+ const struct rproc_ops *ops,
+ const char *firmware, int len)
+{
+ struct rproc **ptr, *rproc;
+
+ ptr = devres_alloc(devm_rproc_free, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ rproc = rproc_alloc(dev, name, ops, firmware, len);
+ if (rproc) {
+ *ptr = rproc;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return rproc;
+}
+EXPORT_SYMBOL(devm_rproc_alloc);
+
/**
* rproc_add_subdev() - add a subdevice to a remoteproc
* @rproc: rproc handle to add the subdevice to
@@ -2230,6 +2370,9 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
return;
}
+ /* Prevent suspend while the remoteproc is being recovered */
+ pm_stay_awake(rproc->dev.parent);
+
dev_err(&rproc->dev, "crash detected in %s: type %s\n",
rproc->name, rproc_crash_to_string(type));
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index d734cadb16e3..732770e92b99 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -269,17 +269,7 @@ static int rproc_rsc_table_show(struct seq_file *seq, void *p)
return 0;
}
-static int rproc_rsc_table_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rproc_rsc_table_show, inode->i_private);
-}
-
-static const struct file_operations rproc_rsc_table_ops = {
- .open = rproc_rsc_table_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(rproc_rsc_table);
/* Expose carveout content via debugfs */
static int rproc_carveouts_show(struct seq_file *seq, void *p)
@@ -299,17 +289,7 @@ static int rproc_carveouts_show(struct seq_file *seq, void *p)
return 0;
}
-static int rproc_carveouts_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rproc_carveouts_show, inode->i_private);
-}
-
-static const struct file_operations rproc_carveouts_ops = {
- .open = rproc_carveouts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(rproc_carveouts);
void rproc_remove_trace_file(struct dentry *tfile)
{
@@ -354,9 +334,9 @@ void rproc_create_debug_dir(struct rproc *rproc)
debugfs_create_file("crash", 0200, rproc->dbg_dir,
rproc, &rproc_crash_ops);
debugfs_create_file("resource_table", 0400, rproc->dbg_dir,
- rproc, &rproc_rsc_table_ops);
+ rproc, &rproc_rsc_table_fops);
debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
- rproc, &rproc_carveouts_ops);
+ rproc, &rproc_carveouts_fops);
}
void __init rproc_init_debugfs(void)
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
index 16e2c496fd45..df68d87752e4 100644
--- a/drivers/remoteproc/remoteproc_elf_loader.c
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -113,27 +113,6 @@ int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
EXPORT_SYMBOL(rproc_elf_sanity_check);
/**
- * rproc_elf_sanity_check() - Sanity Check ELF32 firmware image
- * @rproc: the remote processor handle
- * @fw: the ELF32 firmware image
- *
- * Make sure this fw image is sane.
- */
-int rproc_elf32_sanity_check(struct rproc *rproc, const struct firmware *fw)
-{
- int ret = rproc_elf_sanity_check(rproc, fw);
-
- if (ret)
- return ret;
-
- if (fw_elf_get_class(fw) == ELFCLASS32)
- return 0;
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(rproc_elf32_sanity_check);
-
-/**
* rproc_elf_get_boot_addr() - Get rproc's boot address.
* @rproc: the remote processor handle
* @fw: the ELF firmware image
@@ -248,9 +227,6 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
memset(ptr + filesz, 0, memsz - filesz);
}
- if (ret == 0)
- rproc->elf_class = class;
-
return ret;
}
EXPORT_SYMBOL(rproc_elf_load_segments);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index b389dc79da81..4ba7cb59d3e8 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -54,7 +54,6 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len);
phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
-int rproc_elf32_sanity_check(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw);
u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw);
@@ -64,6 +63,22 @@ struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
struct rproc_mem_entry *
rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...);
+static inline int rproc_prepare_device(struct rproc *rproc)
+{
+ if (rproc->ops->prepare)
+ return rproc->ops->prepare(rproc);
+
+ return 0;
+}
+
+static inline int rproc_unprepare_device(struct rproc *rproc)
+{
+ if (rproc->ops->unprepare)
+ return rproc->ops->unprepare(rproc);
+
+ return 0;
+}
+
static inline
int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
{
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index e61d738d9b47..dfd3808c34fd 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -337,8 +337,7 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
if (rproc->ops->kick == NULL) {
ret = -EINVAL;
- dev_err(dev, ".kick method not defined for %s",
- rproc->name);
+ dev_err(dev, ".kick method not defined for %s\n", rproc->name);
goto out;
}
@@ -376,6 +375,18 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
goto out;
}
}
+ } else {
+ struct device_node *np = rproc->dev.parent->of_node;
+
+ /*
+ * If we don't have dedicated buffer, just attempt to re-assign
+ * the reserved memory from our parent. A default memory-region
+ * at index 0 from the parent's memory-regions is assigned for
+ * the rvdev dev to allocate from. Failure is non-critical and
+ * the allocations will fall back to global pools, so don't
+ * check return value either.
+ */
+ of_reserved_mem_device_init_by_idx(dev, np, 0);
}
/* Allocate virtio device */
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index a6cbfa452764..a3268d95a50e 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -233,7 +233,7 @@ static const struct rproc_ops st_rproc_ops = {
.parse_fw = st_rproc_parse_fw,
.load = rproc_elf_load_segments,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
- .sanity_check = rproc_elf32_sanity_check,
+ .sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 3cca8b65a8db..09bcb4d8b9e0 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -203,7 +203,7 @@ static const struct rproc_ops slim_rproc_ops = {
.da_to_va = slim_rproc_da_to_va,
.get_boot_addr = rproc_elf_get_boot_addr,
.load = rproc_elf_load_segments,
- .sanity_check = rproc_elf32_sanity_check,
+ .sanity_check = rproc_elf_sanity_check,
};
/**
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 0bdd56f02f18..062797a447c6 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -506,7 +506,7 @@ static struct rproc_ops st_rproc_ops = {
.load = rproc_elf_load_segments,
.parse_fw = stm32_rproc_parse_fw,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
- .sanity_check = rproc_elf32_sanity_check,
+ .sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
@@ -626,6 +626,7 @@ static int stm32_rproc_probe(struct platform_device *pdev)
if (!rproc)
return -ENOMEM;
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
rproc->has_iommu = false;
ddata = rproc->priv;
ddata->workqueue = create_workqueue(dev_name(dev));
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 24e6d420b26b..19926506d033 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -33,6 +33,7 @@
enum hi6220_reset_ctrl_type {
PERIPHERAL,
MEDIA,
+ AO,
};
struct hi6220_reset_data {
@@ -92,6 +93,65 @@ static const struct reset_control_ops hi6220_media_reset_ops = {
.deassert = hi6220_media_deassert,
};
+#define AO_SCTRL_SC_PW_CLKEN0 0x800
+#define AO_SCTRL_SC_PW_CLKDIS0 0x804
+
+#define AO_SCTRL_SC_PW_RSTEN0 0x810
+#define AO_SCTRL_SC_PW_RSTDIS0 0x814
+
+#define AO_SCTRL_SC_PW_ISOEN0 0x820
+#define AO_SCTRL_SC_PW_ISODIS0 0x824
+#define AO_MAX_INDEX 12
+
+static int hi6220_ao_assert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
+{
+ struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
+ int ret;
+
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_RSTEN0, BIT(idx));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_ISOEN0, BIT(idx));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_CLKDIS0, BIT(idx));
+ return ret;
+}
+
+static int hi6220_ao_deassert(struct reset_controller_dev *rc_dev,
+ unsigned long idx)
+{
+ struct hi6220_reset_data *data = to_reset_data(rc_dev);
+ struct regmap *regmap = data->regmap;
+ int ret;
+
+ /*
+ * It was suggested to disable isolation before enabling
+ * the clocks and deasserting reset, to avoid glitches.
+ * But this order is preserved to keep it matching the
+ * vendor code.
+ */
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_RSTDIS0, BIT(idx));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_ISODIS0, BIT(idx));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(regmap, AO_SCTRL_SC_PW_CLKEN0, BIT(idx));
+ return ret;
+}
+
+static const struct reset_control_ops hi6220_ao_reset_ops = {
+ .assert = hi6220_ao_assert,
+ .deassert = hi6220_ao_deassert,
+};
+
static int hi6220_reset_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -117,9 +177,12 @@ static int hi6220_reset_probe(struct platform_device *pdev)
if (type == MEDIA) {
data->rc_dev.ops = &hi6220_media_reset_ops;
data->rc_dev.nr_resets = MEDIA_MAX_INDEX;
- } else {
+ } else if (type == PERIPHERAL) {
data->rc_dev.ops = &hi6220_peripheral_reset_ops;
data->rc_dev.nr_resets = PERIPH_MAX_INDEX;
+ } else {
+ data->rc_dev.ops = &hi6220_ao_reset_ops;
+ data->rc_dev.nr_resets = AO_MAX_INDEX;
}
return reset_controller_register(&data->rc_dev);
@@ -134,6 +197,10 @@ static const struct of_device_id hi6220_reset_match[] = {
.compatible = "hisilicon,hi6220-mediactrl",
.data = (void *)MEDIA,
},
+ {
+ .compatible = "hisilicon,hi6220-aoctrl",
+ .data = (void *)AO,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, hi6220_reset_match);
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 1443a55a0c29..d170fe663210 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -15,6 +15,7 @@
#include <linux/regmap.h>
#include <dt-bindings/reset/imx7-reset.h>
#include <dt-bindings/reset/imx8mq-reset.h>
+#include <dt-bindings/reset/imx8mp-reset.h>
struct imx7_src_signal {
unsigned int offset, bit;
@@ -145,6 +146,18 @@ enum imx8mq_src_registers {
SRC_DDRC2_RCR = 0x1004,
};
+enum imx8mp_src_registers {
+ SRC_SUPERMIX_RCR = 0x0018,
+ SRC_AUDIOMIX_RCR = 0x001c,
+ SRC_MLMIX_RCR = 0x0028,
+ SRC_GPU2D_RCR = 0x0038,
+ SRC_GPU3D_RCR = 0x003c,
+ SRC_VPU_G1_RCR = 0x0048,
+ SRC_VPU_G2_RCR = 0x004c,
+ SRC_VPUVC8KE_RCR = 0x0050,
+ SRC_NOC_RCR = 0x0054,
+};
+
static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = {
[IMX8MQ_RESET_A53_CORE_POR_RESET0] = { SRC_A53RCR0, BIT(0) },
[IMX8MQ_RESET_A53_CORE_POR_RESET1] = { SRC_A53RCR0, BIT(1) },
@@ -253,6 +266,93 @@ static const struct imx7_src_variant variant_imx8mq = {
},
};
+static const struct imx7_src_signal imx8mp_src_signals[IMX8MP_RESET_NUM] = {
+ [IMX8MP_RESET_A53_CORE_POR_RESET0] = { SRC_A53RCR0, BIT(0) },
+ [IMX8MP_RESET_A53_CORE_POR_RESET1] = { SRC_A53RCR0, BIT(1) },
+ [IMX8MP_RESET_A53_CORE_POR_RESET2] = { SRC_A53RCR0, BIT(2) },
+ [IMX8MP_RESET_A53_CORE_POR_RESET3] = { SRC_A53RCR0, BIT(3) },
+ [IMX8MP_RESET_A53_CORE_RESET0] = { SRC_A53RCR0, BIT(4) },
+ [IMX8MP_RESET_A53_CORE_RESET1] = { SRC_A53RCR0, BIT(5) },
+ [IMX8MP_RESET_A53_CORE_RESET2] = { SRC_A53RCR0, BIT(6) },
+ [IMX8MP_RESET_A53_CORE_RESET3] = { SRC_A53RCR0, BIT(7) },
+ [IMX8MP_RESET_A53_DBG_RESET0] = { SRC_A53RCR0, BIT(8) },
+ [IMX8MP_RESET_A53_DBG_RESET1] = { SRC_A53RCR0, BIT(9) },
+ [IMX8MP_RESET_A53_DBG_RESET2] = { SRC_A53RCR0, BIT(10) },
+ [IMX8MP_RESET_A53_DBG_RESET3] = { SRC_A53RCR0, BIT(11) },
+ [IMX8MP_RESET_A53_ETM_RESET0] = { SRC_A53RCR0, BIT(12) },
+ [IMX8MP_RESET_A53_ETM_RESET1] = { SRC_A53RCR0, BIT(13) },
+ [IMX8MP_RESET_A53_ETM_RESET2] = { SRC_A53RCR0, BIT(14) },
+ [IMX8MP_RESET_A53_ETM_RESET3] = { SRC_A53RCR0, BIT(15) },
+ [IMX8MP_RESET_A53_SOC_DBG_RESET] = { SRC_A53RCR0, BIT(20) },
+ [IMX8MP_RESET_A53_L2RESET] = { SRC_A53RCR0, BIT(21) },
+ [IMX8MP_RESET_SW_NON_SCLR_M7C_RST] = { SRC_M4RCR, BIT(0) },
+ [IMX8MP_RESET_OTG1_PHY_RESET] = { SRC_USBOPHY1_RCR, BIT(0) },
+ [IMX8MP_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) },
+ [IMX8MP_RESET_SUPERMIX_RESET] = { SRC_SUPERMIX_RCR, BIT(0) },
+ [IMX8MP_RESET_AUDIOMIX_RESET] = { SRC_AUDIOMIX_RCR, BIT(0) },
+ [IMX8MP_RESET_MLMIX_RESET] = { SRC_MLMIX_RCR, BIT(0) },
+ [IMX8MP_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR, BIT(2) },
+ [IMX8MP_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
+ [IMX8MP_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
+ [IMX8MP_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) },
+ [IMX8MP_RESET_HDMI_PHY_APB_RESET] = { SRC_HDMI_RCR, BIT(0) },
+ [IMX8MP_RESET_MEDIA_RESET] = { SRC_DISP_RCR, BIT(0) },
+ [IMX8MP_RESET_GPU2D_RESET] = { SRC_GPU2D_RCR, BIT(0) },
+ [IMX8MP_RESET_GPU3D_RESET] = { SRC_GPU3D_RCR, BIT(0) },
+ [IMX8MP_RESET_GPU_RESET] = { SRC_GPU_RCR, BIT(0) },
+ [IMX8MP_RESET_VPU_RESET] = { SRC_VPU_RCR, BIT(0) },
+ [IMX8MP_RESET_VPU_G1_RESET] = { SRC_VPU_G1_RCR, BIT(0) },
+ [IMX8MP_RESET_VPU_G2_RESET] = { SRC_VPU_G2_RCR, BIT(0) },
+ [IMX8MP_RESET_VPUVC8KE_RESET] = { SRC_VPUVC8KE_RCR, BIT(0) },
+ [IMX8MP_RESET_NOC_RESET] = { SRC_NOC_RCR, BIT(0) },
+};
+
+static int imx8mp_reset_set(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct imx7_src *imx7src = to_imx7_src(rcdev);
+ const unsigned int bit = imx7src->signals[id].bit;
+ unsigned int value = assert ? bit : 0;
+
+ switch (id) {
+ case IMX8MP_RESET_PCIEPHY:
+ /*
+ * wait for more than 10us to release phy g_rst and
+ * btnrst
+ */
+ if (!assert)
+ udelay(10);
+ break;
+
+ case IMX8MP_RESET_PCIE_CTRL_APPS_EN:
+ value = assert ? 0 : bit;
+ break;
+ }
+
+ return imx7_reset_update(imx7src, id, value);
+}
+
+static int imx8mp_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx8mp_reset_set(rcdev, id, true);
+}
+
+static int imx8mp_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx8mp_reset_set(rcdev, id, false);
+}
+
+static const struct imx7_src_variant variant_imx8mp = {
+ .signals = imx8mp_src_signals,
+ .signals_num = ARRAY_SIZE(imx8mp_src_signals),
+ .ops = {
+ .assert = imx8mp_reset_assert,
+ .deassert = imx8mp_reset_deassert,
+ },
+};
+
static int imx7_reset_probe(struct platform_device *pdev)
{
struct imx7_src *imx7src;
@@ -283,6 +383,7 @@ static int imx7_reset_probe(struct platform_device *pdev)
static const struct of_device_id imx7_reset_dt_ids[] = {
{ .compatible = "fsl,imx7d-src", .data = &variant_imx7 },
{ .compatible = "fsl,imx8mq-src", .data = &variant_imx8mq },
+ { .compatible = "fsl,imx8mp-src", .data = &variant_imx8mp },
{ /* sentinel */ },
};
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
index 0144075b11a6..373ea8d4f7a1 100644
--- a/drivers/reset/reset-zynqmp.c
+++ b/drivers/reset/reset-zynqmp.c
@@ -15,7 +15,6 @@
struct zynqmp_reset_data {
struct reset_controller_dev rcdev;
- const struct zynqmp_eemi_ops *eemi_ops;
};
static inline struct zynqmp_reset_data *
@@ -27,28 +26,23 @@ to_zynqmp_reset_data(struct reset_controller_dev *rcdev)
static int zynqmp_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-
- return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
- PM_RESET_ACTION_ASSERT);
+ return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_ASSERT);
}
static int zynqmp_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-
- return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
- PM_RESET_ACTION_RELEASE);
+ return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_RELEASE);
}
static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
int val, err;
- err = priv->eemi_ops->reset_get_status(ZYNQMP_RESET_ID + id, &val);
+ err = zynqmp_pm_reset_get_status(ZYNQMP_RESET_ID + id, &val);
if (err)
return err;
@@ -58,10 +52,8 @@ static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
static int zynqmp_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
-
- return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
- PM_RESET_ACTION_PULSE);
+ return zynqmp_pm_reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_PULSE);
}
static const struct reset_control_ops zynqmp_reset_ops = {
@@ -79,10 +71,6 @@ static int zynqmp_reset_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(priv->eemi_ops))
- return PTR_ERR(priv->eemi_ops);
-
platform_set_drvdata(pdev, priv);
priv->rcdev.ops = &zynqmp_reset_ops;
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index a9108ff563dc..f96716893c2a 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -24,13 +24,13 @@ config RPMSG_MTK_SCP
remote processors in MediaTek platforms.
This use IPI and IPC to communicate with remote processors.
-config RPMSG_QCOM_GLINK_NATIVE
+config RPMSG_QCOM_GLINK
tristate
select RPMSG
config RPMSG_QCOM_GLINK_RPM
tristate "Qualcomm RPM Glink driver"
- select RPMSG_QCOM_GLINK_NATIVE
+ select RPMSG_QCOM_GLINK
depends on HAS_IOMEM
depends on MAILBOX
help
@@ -40,7 +40,7 @@ config RPMSG_QCOM_GLINK_RPM
config RPMSG_QCOM_GLINK_SMEM
tristate "Qualcomm SMEM Glink driver"
- select RPMSG_QCOM_GLINK_NATIVE
+ select RPMSG_QCOM_GLINK
depends on MAILBOX
depends on QCOM_SMEM
help
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index ae92a7fb08f6..ffe932ef6050 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -2,8 +2,9 @@
obj-$(CONFIG_RPMSG) += rpmsg_core.o
obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o
obj-$(CONFIG_RPMSG_MTK_SCP) += mtk_rpmsg.o
+qcom_glink-objs := qcom_glink_native.o qcom_glink_ssr.o
+obj-$(CONFIG_RPMSG_QCOM_GLINK) += qcom_glink.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
-obj-$(CONFIG_RPMSG_QCOM_GLINK_NATIVE) += qcom_glink_native.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o
obj-$(CONFIG_RPMSG_QCOM_SMD) += qcom_smd.o
obj-$(CONFIG_RPMSG_VIRTIO) += virtio_rpmsg_bus.o
diff --git a/drivers/rpmsg/qcom_glink_ssr.c b/drivers/rpmsg/qcom_glink_ssr.c
new file mode 100644
index 000000000000..dcd1ce616974
--- /dev/null
+++ b/drivers/rpmsg/qcom_glink_ssr.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, Linaro Ltd.
+ */
+
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/rpmsg.h>
+#include <linux/remoteproc/qcom_rproc.h>
+
+/**
+ * struct do_cleanup_msg - The data structure for an SSR do_cleanup message
+ * version: The G-Link SSR protocol version
+ * command: The G-Link SSR command - do_cleanup
+ * seq_num: Sequence number
+ * name_len: Length of the name of the subsystem being restarted
+ * name: G-Link edge name of the subsystem being restarted
+ */
+struct do_cleanup_msg {
+ __le32 version;
+ __le32 command;
+ __le32 seq_num;
+ __le32 name_len;
+ char name[32];
+};
+
+/**
+ * struct cleanup_done_msg - The data structure for an SSR cleanup_done message
+ * version: The G-Link SSR protocol version
+ * response: The G-Link SSR response to a do_cleanup command, cleanup_done
+ * seq_num: Sequence number
+ */
+struct cleanup_done_msg {
+ __le32 version;
+ __le32 response;
+ __le32 seq_num;
+};
+
+/**
+ * G-Link SSR protocol commands
+ */
+#define GLINK_SSR_DO_CLEANUP 0
+#define GLINK_SSR_CLEANUP_DONE 1
+
+struct glink_ssr {
+ struct device *dev;
+ struct rpmsg_endpoint *ept;
+
+ struct notifier_block nb;
+
+ u32 seq_num;
+ struct completion completion;
+};
+
+/* Notifier list for all registered glink_ssr instances */
+static BLOCKING_NOTIFIER_HEAD(ssr_notifiers);
+
+/**
+ * qcom_glink_ssr_notify() - notify GLINK SSR about stopped remoteproc
+ * @ssr_name: name of the remoteproc that has been stopped
+ */
+void qcom_glink_ssr_notify(const char *ssr_name)
+{
+ blocking_notifier_call_chain(&ssr_notifiers, 0, (void *)ssr_name);
+}
+EXPORT_SYMBOL_GPL(qcom_glink_ssr_notify);
+
+static int qcom_glink_ssr_callback(struct rpmsg_device *rpdev,
+ void *data, int len, void *priv, u32 addr)
+{
+ struct cleanup_done_msg *msg = data;
+ struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
+
+ if (len < sizeof(*msg)) {
+ dev_err(ssr->dev, "message too short\n");
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(msg->version) != 0)
+ return -EINVAL;
+
+ if (le32_to_cpu(msg->response) != GLINK_SSR_CLEANUP_DONE)
+ return 0;
+
+ if (le32_to_cpu(msg->seq_num) != ssr->seq_num) {
+ dev_err(ssr->dev, "invalid sequence number of response\n");
+ return -EINVAL;
+ }
+
+ complete(&ssr->completion);
+
+ return 0;
+}
+
+static int qcom_glink_ssr_notifier_call(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ struct glink_ssr *ssr = container_of(nb, struct glink_ssr, nb);
+ struct do_cleanup_msg msg;
+ char *ssr_name = data;
+ int ret;
+
+ ssr->seq_num++;
+ reinit_completion(&ssr->completion);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.command = cpu_to_le32(GLINK_SSR_DO_CLEANUP);
+ msg.seq_num = cpu_to_le32(ssr->seq_num);
+ msg.name_len = cpu_to_le32(strlen(ssr_name));
+ strlcpy(msg.name, ssr_name, sizeof(msg.name));
+
+ ret = rpmsg_send(ssr->ept, &msg, sizeof(msg));
+ if (ret < 0)
+ dev_err(ssr->dev, "failed to send cleanup message\n");
+
+ ret = wait_for_completion_timeout(&ssr->completion, HZ);
+ if (!ret)
+ dev_err(ssr->dev, "timeout waiting for cleanup done message\n");
+
+ return NOTIFY_DONE;
+}
+
+static int qcom_glink_ssr_probe(struct rpmsg_device *rpdev)
+{
+ struct glink_ssr *ssr;
+
+ ssr = devm_kzalloc(&rpdev->dev, sizeof(*ssr), GFP_KERNEL);
+ if (!ssr)
+ return -ENOMEM;
+
+ init_completion(&ssr->completion);
+
+ ssr->dev = &rpdev->dev;
+ ssr->ept = rpdev->ept;
+ ssr->nb.notifier_call = qcom_glink_ssr_notifier_call;
+
+ dev_set_drvdata(&rpdev->dev, ssr);
+
+ return blocking_notifier_chain_register(&ssr_notifiers, &ssr->nb);
+}
+
+static void qcom_glink_ssr_remove(struct rpmsg_device *rpdev)
+{
+ struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
+
+ blocking_notifier_chain_unregister(&ssr_notifiers, &ssr->nb);
+}
+
+static const struct rpmsg_device_id qcom_glink_ssr_match[] = {
+ { "glink_ssr" },
+ {}
+};
+
+static struct rpmsg_driver qcom_glink_ssr_driver = {
+ .probe = qcom_glink_ssr_probe,
+ .remove = qcom_glink_ssr_remove,
+ .callback = qcom_glink_ssr_callback,
+ .id_table = qcom_glink_ssr_match,
+ .drv = {
+ .name = "qcom_glink_ssr",
+ },
+};
+module_rpmsg_driver(qcom_glink_ssr_driver);
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index e330ec4dfc33..a6361cad608b 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -284,7 +284,7 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
EXPORT_SYMBOL(rpmsg_trysend_offchannel);
/*
- * match an rpmsg channel with a channel info struct.
+ * match a rpmsg channel with a channel info struct.
* this is used to make sure we're not creating rpmsg devices for channels
* that already exist.
*/
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 376ebbf880d6..07d4f3374098 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -89,7 +89,7 @@ struct rpmsg_hdr {
u32 reserved;
u16 len;
u16 flags;
- u8 data[0];
+ u8 data[];
} __packed;
/**
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index ec873f09c763..b54d87d45c89 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1516,7 +1516,7 @@ config RTC_DRV_GENERIC
tristate "Generic RTC support"
# Please consider writing a new RTC driver instead of using the generic
# RTC abstraction
- depends on PARISC || M68K || PPC || SUPERH32 || COMPILE_TEST
+ depends on PARISC || M68K || PPC || SUPERH || COMPILE_TEST
help
Say Y or M here to enable RTC support on systems using the generic
RTC abstraction. If you do not know what you are doing, you should
@@ -1680,6 +1680,7 @@ config RTC_DRV_MPC5121
config RTC_DRV_JZ4740
tristate "Ingenic JZ4740 SoC"
depends on MIPS || COMPILE_TEST
+ depends on OF
help
If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
controllers.
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index cc9b14ef90f1..c90457d001e9 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -106,12 +106,6 @@ static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned char buf[4];
unsigned long ticks, base, data;
- if (tm->tm_year > 206) {
- dev_dbg(info->dev, "Set time %d out of range. "
- "Please set time between 1970 to 2106.\n",
- 1900 + tm->tm_year);
- return -EINVAL;
- }
ticks = rtc_tm_to_time64(tm);
/* load 32-bit read-only counter */
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index 3521d8e8dc38..803725b3a02c 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -13,6 +13,7 @@
#include <linux/bcd.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/rtc.h>
#include <linux/watchdog.h>
@@ -554,8 +555,9 @@ static const struct rtc_class_ops abx80x_rtc_ops = {
.ioctl = abx80x_ioctl,
};
-static int abx80x_dt_trickle_cfg(struct device_node *np)
+static int abx80x_dt_trickle_cfg(struct i2c_client *client)
{
+ struct device_node *np = client->dev.of_node;
const char *diode;
int trickle_cfg = 0;
int i, ret;
@@ -565,12 +567,14 @@ static int abx80x_dt_trickle_cfg(struct device_node *np)
if (ret)
return ret;
- if (!strcmp(diode, "standard"))
+ if (!strcmp(diode, "standard")) {
trickle_cfg |= ABX8XX_TRICKLE_STANDARD_DIODE;
- else if (!strcmp(diode, "schottky"))
+ } else if (!strcmp(diode, "schottky")) {
trickle_cfg |= ABX8XX_TRICKLE_SCHOTTKY_DIODE;
- else
+ } else {
+ dev_dbg(&client->dev, "Invalid tc-diode value: %s\n", diode);
return -EINVAL;
+ }
ret = of_property_read_u32(np, "abracon,tc-resistor", &tmp);
if (ret)
@@ -580,8 +584,10 @@ static int abx80x_dt_trickle_cfg(struct device_node *np)
if (trickle_resistors[i] == tmp)
break;
- if (i == sizeof(trickle_resistors))
+ if (i == sizeof(trickle_resistors)) {
+ dev_dbg(&client->dev, "Invalid tc-resistor value: %u\n", tmp);
return -EINVAL;
+ }
return (trickle_cfg | i);
}
@@ -793,7 +799,7 @@ static int abx80x_probe(struct i2c_client *client,
}
if (np && abx80x_caps[part].has_tc)
- trickle_cfg = abx80x_dt_trickle_cfg(np);
+ trickle_cfg = abx80x_dt_trickle_cfg(client);
if (trickle_cfg > 0) {
dev_info(&client->dev, "Enabling trickle charger: %02x\n",
@@ -863,9 +869,57 @@ static const struct i2c_device_id abx80x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, abx80x_id);
+#ifdef CONFIG_OF
+static const struct of_device_id abx80x_of_match[] = {
+ {
+ .compatible = "abracon,abx80x",
+ .data = (void *)ABX80X
+ },
+ {
+ .compatible = "abracon,ab0801",
+ .data = (void *)AB0801
+ },
+ {
+ .compatible = "abracon,ab0803",
+ .data = (void *)AB0803
+ },
+ {
+ .compatible = "abracon,ab0804",
+ .data = (void *)AB0804
+ },
+ {
+ .compatible = "abracon,ab0805",
+ .data = (void *)AB0805
+ },
+ {
+ .compatible = "abracon,ab1801",
+ .data = (void *)AB1801
+ },
+ {
+ .compatible = "abracon,ab1803",
+ .data = (void *)AB1803
+ },
+ {
+ .compatible = "abracon,ab1804",
+ .data = (void *)AB1804
+ },
+ {
+ .compatible = "abracon,ab1805",
+ .data = (void *)AB1805
+ },
+ {
+ .compatible = "microcrystal,rv1805",
+ .data = (void *)RV1805
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, abx80x_of_match);
+#endif
+
static struct i2c_driver abx80x_driver = {
.driver = {
.name = "rtc-abx80x",
+ .of_match_table = of_match_ptr(abx80x_of_match),
},
.probe = abx80x_probe,
.id_table = abx80x_id,
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 756af62b0486..68f0a1801a2e 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -21,6 +21,7 @@
#include <linux/rtc.h>
#include <linux/time.h>
#include <linux/acpi.h>
+#include <linux/pm_wakeirq.h>
#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT)
@@ -268,13 +269,11 @@ static int ftm_rtc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "can't get irq number\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, ftm_rtc_alarm_interrupt,
- IRQF_NO_SUSPEND, dev_name(&pdev->dev), rtc);
+ 0, dev_name(&pdev->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request irq\n");
return ret;
@@ -287,6 +286,9 @@ static int ftm_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->ops = &ftm_rtc_ops;
device_init_wakeup(&pdev->dev, true);
+ ret = dev_pm_set_wake_irq(&pdev->dev, irq);
+ if (ret)
+ dev_err(&pdev->dev, "failed to enable irq wake\n");
ret = rtc_register_device(rtc->rtc_dev);
if (ret) {
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
index cb6b0ad7ec3f..27797157fcb3 100644
--- a/drivers/rtc/rtc-goldfish.c
+++ b/drivers/rtc/rtc-goldfish.c
@@ -174,7 +174,7 @@ static int goldfish_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtcdrv);
rtcdrv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtcdrv->base))
- return -ENODEV;
+ return PTR_ERR(rtcdrv->base);
rtcdrv->irq = platform_get_irq(pdev, 0);
if (rtcdrv->irq < 0)
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index e4c719085c31..9607e6b6e0b3 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -55,14 +55,8 @@ struct jz4740_rtc {
enum jz4740_rtc_type type;
struct rtc_device *rtc;
- struct clk *clk;
-
- int irq;
spinlock_t lock;
-
- unsigned int min_wakeup_pin_assert_time;
- unsigned int reset_pin_assert_time;
};
static struct device *dev_for_power_off;
@@ -259,156 +253,157 @@ static void jz4740_rtc_poweroff(struct device *dev)
static void jz4740_rtc_power_off(void)
{
- struct jz4740_rtc *rtc = dev_get_drvdata(dev_for_power_off);
- unsigned long rtc_rate;
- unsigned long wakeup_filter_ticks;
- unsigned long reset_counter_ticks;
+ jz4740_rtc_poweroff(dev_for_power_off);
+ kernel_halt();
+}
- clk_prepare_enable(rtc->clk);
+static void jz4740_rtc_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
- rtc_rate = clk_get_rate(rtc->clk);
+static const struct of_device_id jz4740_rtc_of_match[] = {
+ { .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
+ { .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 },
+ { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
+
+static void jz4740_rtc_set_wakeup_params(struct jz4740_rtc *rtc,
+ struct device_node *np,
+ unsigned long rate)
+{
+ unsigned long wakeup_ticks, reset_ticks;
+ unsigned int min_wakeup_pin_assert_time = 60; /* Default: 60ms */
+ unsigned int reset_pin_assert_time = 100; /* Default: 100ms */
+
+ of_property_read_u32(np, "ingenic,reset-pin-assert-time-ms",
+ &reset_pin_assert_time);
+ of_property_read_u32(np, "ingenic,min-wakeup-pin-assert-time-ms",
+ &min_wakeup_pin_assert_time);
/*
* Set minimum wakeup pin assertion time: 100 ms.
* Range is 0 to 2 sec if RTC is clocked at 32 kHz.
*/
- wakeup_filter_ticks =
- (rtc->min_wakeup_pin_assert_time * rtc_rate) / 1000;
- if (wakeup_filter_ticks < JZ_RTC_WAKEUP_FILTER_MASK)
- wakeup_filter_ticks &= JZ_RTC_WAKEUP_FILTER_MASK;
+ wakeup_ticks = (min_wakeup_pin_assert_time * rate) / 1000;
+ if (wakeup_ticks < JZ_RTC_WAKEUP_FILTER_MASK)
+ wakeup_ticks &= JZ_RTC_WAKEUP_FILTER_MASK;
else
- wakeup_filter_ticks = JZ_RTC_WAKEUP_FILTER_MASK;
- jz4740_rtc_reg_write(rtc,
- JZ_REG_RTC_WAKEUP_FILTER, wakeup_filter_ticks);
+ wakeup_ticks = JZ_RTC_WAKEUP_FILTER_MASK;
+ jz4740_rtc_reg_write(rtc, JZ_REG_RTC_WAKEUP_FILTER, wakeup_ticks);
/*
* Set reset pin low-level assertion time after wakeup: 60 ms.
* Range is 0 to 125 ms if RTC is clocked at 32 kHz.
*/
- reset_counter_ticks = (rtc->reset_pin_assert_time * rtc_rate) / 1000;
- if (reset_counter_ticks < JZ_RTC_RESET_COUNTER_MASK)
- reset_counter_ticks &= JZ_RTC_RESET_COUNTER_MASK;
+ reset_ticks = (reset_pin_assert_time * rate) / 1000;
+ if (reset_ticks < JZ_RTC_RESET_COUNTER_MASK)
+ reset_ticks &= JZ_RTC_RESET_COUNTER_MASK;
else
- reset_counter_ticks = JZ_RTC_RESET_COUNTER_MASK;
- jz4740_rtc_reg_write(rtc,
- JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
-
- jz4740_rtc_poweroff(dev_for_power_off);
- kernel_halt();
+ reset_ticks = JZ_RTC_RESET_COUNTER_MASK;
+ jz4740_rtc_reg_write(rtc, JZ_REG_RTC_RESET_COUNTER, reset_ticks);
}
-static const struct of_device_id jz4740_rtc_of_match[] = {
- { .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 },
- { .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 },
- { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
- {},
-};
-MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
-
static int jz4740_rtc_probe(struct platform_device *pdev)
{
- int ret;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct jz4740_rtc *rtc;
- const struct platform_device_id *id = platform_get_device_id(pdev);
- const struct of_device_id *of_id = of_match_device(
- jz4740_rtc_of_match, &pdev->dev);
- struct device_node *np = pdev->dev.of_node;
+ unsigned long rate;
+ struct clk *clk;
+ int ret, irq;
- rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
- if (of_id)
- rtc->type = (enum jz4740_rtc_type)of_id->data;
- else
- rtc->type = id->driver_data;
+ rtc->type = (enum jz4740_rtc_type)device_get_match_data(dev);
- rtc->irq = platform_get_irq(pdev, 0);
- if (rtc->irq < 0)
- return -ENOENT;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rtc->base))
return PTR_ERR(rtc->base);
- rtc->clk = devm_clk_get(&pdev->dev, "rtc");
- if (IS_ERR(rtc->clk)) {
- dev_err(&pdev->dev, "Failed to get RTC clock\n");
- return PTR_ERR(rtc->clk);
+ clk = devm_clk_get(dev, "rtc");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Failed to get RTC clock\n");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, jz4740_rtc_clk_disable, clk);
+ if (ret) {
+ dev_err(dev, "Failed to register devm action\n");
+ return ret;
}
spin_lock_init(&rtc->lock);
platform_set_drvdata(pdev, rtc);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(dev, 1);
- ret = dev_pm_set_wake_irq(&pdev->dev, rtc->irq);
+ ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
- dev_err(&pdev->dev, "Failed to set wake irq: %d\n", ret);
+ dev_err(dev, "Failed to set wake irq: %d\n", ret);
return ret;
}
- rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ rtc->rtc = devm_rtc_allocate_device(dev);
if (IS_ERR(rtc->rtc)) {
ret = PTR_ERR(rtc->rtc);
- dev_err(&pdev->dev, "Failed to allocate rtc device: %d\n", ret);
+ dev_err(dev, "Failed to allocate rtc device: %d\n", ret);
return ret;
}
rtc->rtc->ops = &jz4740_rtc_ops;
rtc->rtc->range_max = U32_MAX;
+ rate = clk_get_rate(clk);
+ jz4740_rtc_set_wakeup_params(rtc, np, rate);
+
+ /* Each 1 Hz pulse should happen after (rate) ticks */
+ jz4740_rtc_reg_write(rtc, JZ_REG_RTC_REGULATOR, rate - 1);
+
ret = rtc_register_device(rtc->rtc);
if (ret)
return ret;
- ret = devm_request_irq(&pdev->dev, rtc->irq, jz4740_rtc_irq, 0,
- pdev->name, rtc);
+ ret = devm_request_irq(dev, irq, jz4740_rtc_irq, 0,
+ pdev->name, rtc);
if (ret) {
- dev_err(&pdev->dev, "Failed to request rtc irq: %d\n", ret);
+ dev_err(dev, "Failed to request rtc irq: %d\n", ret);
return ret;
}
- if (np && of_device_is_system_power_controller(np)) {
- if (!pm_power_off) {
- /* Default: 60ms */
- rtc->reset_pin_assert_time = 60;
- of_property_read_u32(np,
- "ingenic,reset-pin-assert-time-ms",
- &rtc->reset_pin_assert_time);
-
- /* Default: 100ms */
- rtc->min_wakeup_pin_assert_time = 100;
- of_property_read_u32(np,
- "ingenic,min-wakeup-pin-assert-time-ms",
- &rtc->min_wakeup_pin_assert_time);
-
- dev_for_power_off = &pdev->dev;
+ if (of_device_is_system_power_controller(np)) {
+ dev_for_power_off = dev;
+
+ if (!pm_power_off)
pm_power_off = jz4740_rtc_power_off;
- } else {
- dev_warn(&pdev->dev,
- "Poweroff handler already present!\n");
- }
+ else
+ dev_warn(dev, "Poweroff handler already present!\n");
}
return 0;
}
-static const struct platform_device_id jz4740_rtc_ids[] = {
- { "jz4740-rtc", ID_JZ4740 },
- { "jz4780-rtc", ID_JZ4780 },
- {}
-};
-MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
-
static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
.driver = {
.name = "jz4740-rtc",
- .of_match_table = of_match_ptr(jz4740_rtc_of_match),
+ .of_match_table = jz4740_rtc_of_match,
},
- .id_table = jz4740_rtc_ids,
};
module_platform_driver(jz4740_rtc_driver);
diff --git a/drivers/rtc/rtc-lpc24xx.c b/drivers/rtc/rtc-lpc24xx.c
index 00ef16ba9480..eec881a81067 100644
--- a/drivers/rtc/rtc-lpc24xx.c
+++ b/drivers/rtc/rtc-lpc24xx.c
@@ -205,10 +205,8 @@ static int lpc24xx_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc->rtc_base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_warn(&pdev->dev, "can't get interrupt resource\n");
+ if (irq < 0)
return irq;
- }
rtc->clk_rtc = devm_clk_get(&pdev->dev, "rtc");
if (IS_ERR(rtc->clk_rtc)) {
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index d5a0e27dd0a0..03ebcf1c0f3d 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -78,6 +78,8 @@ struct max77686_rtc_driver_data {
int alarm_pending_status_reg;
/* RTC IRQ CHIP for regmap */
const struct regmap_irq_chip *rtc_irq_chip;
+ /* regmap configuration for the chip */
+ const struct regmap_config *regmap_config;
};
struct max77686_rtc_info {
@@ -182,6 +184,11 @@ static const struct regmap_irq_chip max77686_rtc_irq_chip = {
.num_irqs = ARRAY_SIZE(max77686_rtc_irqs),
};
+static const struct regmap_config max77686_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
static const struct max77686_rtc_driver_data max77686_drv_data = {
.delay = 16000,
.mask = 0x7f,
@@ -191,6 +198,13 @@ static const struct max77686_rtc_driver_data max77686_drv_data = {
.alarm_pending_status_reg = MAX77686_REG_STATUS2,
.rtc_i2c_addr = MAX77686_I2C_ADDR_RTC,
.rtc_irq_chip = &max77686_rtc_irq_chip,
+ .regmap_config = &max77686_rtc_regmap_config,
+};
+
+static const struct regmap_config max77620_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_write = true,
};
static const struct max77686_rtc_driver_data max77620_drv_data = {
@@ -202,6 +216,7 @@ static const struct max77686_rtc_driver_data max77620_drv_data = {
.alarm_pending_status_reg = MAX77686_INVALID_REG,
.rtc_i2c_addr = MAX77620_I2C_ADDR_RTC,
.rtc_irq_chip = &max77686_rtc_irq_chip,
+ .regmap_config = &max77620_rtc_regmap_config,
};
static const unsigned int max77802_map[REG_RTC_END] = {
@@ -658,11 +673,6 @@ static int max77686_rtc_init_reg(struct max77686_rtc_info *info)
return ret;
}
-static const struct regmap_config max77686_rtc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
static int max77686_init_rtc_regmap(struct max77686_rtc_info *info)
{
struct device *parent = info->dev->parent;
@@ -698,7 +708,7 @@ static int max77686_init_rtc_regmap(struct max77686_rtc_info *info)
}
info->rtc_regmap = devm_regmap_init_i2c(info->rtc,
- &max77686_rtc_regmap_config);
+ info->drv_data->regmap_config);
if (IS_ERR(info->rtc_regmap)) {
ret = PTR_ERR(info->rtc_regmap);
dev_err(info->dev, "Failed to allocate RTC regmap: %d\n", ret);
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index afce2c0b4bd6..d6802e6191cb 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -308,8 +308,10 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
mc13xxx_unlock(mc13xxx);
ret = rtc_register_device(priv->rtc);
- if (ret)
+ if (ret) {
+ mc13xxx_lock(mc13xxx);
goto err_irq_request;
+ }
return 0;
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 3040844129ce..5c2ce71aa044 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -316,7 +316,7 @@ static int mpc5121_rtc_probe(struct platform_device *op)
rtc->regs = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(rtc->regs)) {
dev_err(&op->dev, "%s: couldn't map io space\n", __func__);
- return -ENOSYS;
+ return PTR_ERR(rtc->regs);
}
device_init_wakeup(&op->dev, 1);
diff --git a/drivers/rtc/rtc-mt2712.c b/drivers/rtc/rtc-mt2712.c
index 581b8731fb8a..d5f691c8a035 100644
--- a/drivers/rtc/rtc-mt2712.c
+++ b/drivers/rtc/rtc-mt2712.c
@@ -310,7 +310,6 @@ static const struct rtc_class_ops mt2712_rtc_ops = {
static int mt2712_rtc_probe(struct platform_device *pdev)
{
- struct resource *res;
struct mt2712_rtc *mt2712_rtc;
int ret;
@@ -319,8 +318,7 @@ static int mt2712_rtc_probe(struct platform_device *pdev)
if (!mt2712_rtc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mt2712_rtc->base = devm_ioremap_resource(&pdev->dev, res);
+ mt2712_rtc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mt2712_rtc->base))
return PTR_ERR(mt2712_rtc->base);
@@ -328,10 +326,8 @@ static int mt2712_rtc_probe(struct platform_device *pdev)
mt2712_rtc_hw_init(mt2712_rtc);
mt2712_rtc->irq = platform_get_irq(pdev, 0);
- if (mt2712_rtc->irq < 0) {
- dev_err(&pdev->dev, "No IRQ resource\n");
+ if (mt2712_rtc->irq < 0)
return mt2712_rtc->irq;
- }
platform_set_drvdata(pdev, mt2712_rtc);
@@ -356,13 +352,7 @@ static int mt2712_rtc_probe(struct platform_device *pdev)
mt2712_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
mt2712_rtc->rtc->range_max = MT2712_RTC_TIMESTAMP_END_2127;
- ret = rtc_register_device(mt2712_rtc->rtc);
- if (ret) {
- dev_err(&pdev->dev, "register rtc device failed\n");
- return ret;
- }
-
- return 0;
+ return rtc_register_device(mt2712_rtc->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index cda238dfe69b..f8b1353777ba 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -9,6 +9,7 @@
#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
@@ -20,7 +21,7 @@ static int mtk_rtc_write_trigger(struct mt6397_rtc *rtc)
int ret;
u32 data;
- ret = regmap_write(rtc->regmap, rtc->addr_base + RTC_WRTGR, 1);
+ ret = regmap_write(rtc->regmap, rtc->addr_base + rtc->data->wrtgr, 1);
if (ret < 0)
return ret;
@@ -269,6 +270,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rtc->addr_base = res->start;
+ rtc->data = of_device_get_match_data(&pdev->dev);
+
rtc->irq = platform_get_irq(pdev, 0);
if (rtc->irq < 0)
return rtc->irq;
@@ -325,9 +328,18 @@ static int mt6397_rtc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_rtc_suspend,
mt6397_rtc_resume);
+static const struct mtk_rtc_data mt6358_rtc_data = {
+ .wrtgr = RTC_WRTGR_MT6358,
+};
+
+static const struct mtk_rtc_data mt6397_rtc_data = {
+ .wrtgr = RTC_WRTGR_MT6397,
+};
+
static const struct of_device_id mt6397_rtc_of_match[] = {
- { .compatible = "mediatek,mt6323-rtc", },
- { .compatible = "mediatek,mt6397-rtc", },
+ { .compatible = "mediatek,mt6323-rtc", .data = &mt6397_rtc_data },
+ { .compatible = "mediatek,mt6358-rtc", .data = &mt6358_rtc_data },
+ { .compatible = "mediatek,mt6397-rtc", .data = &mt6397_rtc_data },
{ }
};
MODULE_DEVICE_TABLE(of, mt6397_rtc_of_match);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 4e50d6768f13..9c5670776c68 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -137,8 +137,7 @@ static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_wday = buf[PCF2127_REG_DW] & 0x07;
tm->tm_mon = bcd2bin(buf[PCF2127_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
tm->tm_year = bcd2bin(buf[PCF2127_REG_YR]);
- if (tm->tm_year < 70)
- tm->tm_year += 100; /* assume we are in 1970...2069 */
+ tm->tm_year += 100;
dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -172,7 +171,7 @@ static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
buf[i++] = bin2bcd(tm->tm_mon + 1);
/* year */
- buf[i++] = bin2bcd(tm->tm_year % 100);
+ buf[i++] = bin2bcd(tm->tm_year - 100);
/* write register's data */
err = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_SC, buf, i);
@@ -185,30 +184,35 @@ static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
return 0;
}
-#ifdef CONFIG_RTC_INTF_DEV
static int pcf2127_rtc_ioctl(struct device *dev,
unsigned int cmd, unsigned long arg)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
- int touser;
+ int val, touser = 0;
int ret;
switch (cmd) {
case RTC_VL_READ:
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL3, &touser);
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL3, &val);
if (ret)
return ret;
- touser = touser & PCF2127_BIT_CTRL3_BLF ? RTC_VL_BACKUP_LOW : 0;
+ if (val & PCF2127_BIT_CTRL3_BLF)
+ touser |= RTC_VL_BACKUP_LOW;
+
+ if (val & PCF2127_BIT_CTRL3_BF)
+ touser |= RTC_VL_BACKUP_SWITCH;
return put_user(touser, (unsigned int __user *)arg);
+
+ case RTC_VL_CLR:
+ return regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL3,
+ PCF2127_BIT_CTRL3_BF, 0);
+
default:
return -ENOIOCTLCMD;
}
}
-#else
-#define pcf2127_rtc_ioctl NULL
-#endif
static const struct rtc_class_ops pcf2127_rtc_ops = {
.ioctl = pcf2127_rtc_ioctl,
@@ -433,6 +437,9 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
return PTR_ERR(pcf2127->rtc);
pcf2127->rtc->ops = &pcf2127_rtc_ops;
+ pcf2127->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
pcf2127->wdd.parent = dev;
pcf2127->wdd.info = &pcf2127_wdt_info;
@@ -441,6 +448,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX;
pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT;
pcf2127->wdd.min_hw_heartbeat_ms = 500;
+ pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
watchdog_set_drvdata(&pcf2127->wdd, pcf2127);
@@ -495,7 +503,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
*/
ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL3,
PCF2127_BIT_CTRL3_BTSE |
- PCF2127_BIT_CTRL3_BF |
PCF2127_BIT_CTRL3_BIE |
PCF2127_BIT_CTRL3_BLIE, 0);
if (ret) {
@@ -636,6 +643,7 @@ static int pcf2127_i2c_probe(struct i2c_client *client,
static const struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
+ .max_register = 0x1d,
};
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
@@ -703,6 +711,7 @@ static int pcf2127_spi_probe(struct spi_device *spi)
.val_bits = 8,
.read_flag_mask = 0xa0,
.write_flag_mask = 0x20,
+ .max_register = 0x1d,
};
struct regmap *regmap;
diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c
index 24e386ecbc7e..dd1a20977478 100644
--- a/drivers/rtc/rtc-rc5t619.c
+++ b/drivers/rtc/rtc-rc5t619.c
@@ -356,10 +356,8 @@ static int rc5t619_rtc_probe(struct platform_device *pdev)
int err;
rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL);
- if (IS_ERR(rtc)) {
- err = PTR_ERR(rtc);
+ if (!rtc)
return -ENOMEM;
- }
rtc->rn5t618 = rn5t618;
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index a0ddc86c975a..ec84db0b3d7a 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -755,6 +755,8 @@ static int rv3028_probe(struct i2c_client *client)
return -ENOMEM;
rv3028->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(rv3028->regmap))
+ return PTR_ERR(rv3028->regmap);
i2c_set_clientdata(client, rv3028);
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 35ee08aa7584..0263d996b8a8 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -148,10 +148,21 @@ static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
static int snvs_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
- unsigned long time = rtc_read_lp_counter(data);
+ unsigned long time;
+ int ret;
+
+ if (data->clk) {
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
+ }
+ time = rtc_read_lp_counter(data);
rtc_time64_to_tm(time, tm);
+ if (data->clk)
+ clk_disable(data->clk);
+
return 0;
}
@@ -161,6 +172,12 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned long time = rtc_tm_to_time64(tm);
int ret;
+ if (data->clk) {
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
+ }
+
/* Disable RTC first */
ret = snvs_rtc_enable(data, false);
if (ret)
@@ -173,6 +190,9 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* Enable RTC again */
ret = snvs_rtc_enable(data, true);
+ if (data->clk)
+ clk_disable(data->clk);
+
return ret;
}
@@ -180,6 +200,13 @@ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
u32 lptar, lpsr;
+ int ret;
+
+ if (data->clk) {
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
+ }
regmap_read(data->regmap, data->offset + SNVS_LPTAR, &lptar);
rtc_time64_to_tm(lptar, &alrm->time);
@@ -187,18 +214,33 @@ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
regmap_read(data->regmap, data->offset + SNVS_LPSR, &lpsr);
alrm->pending = (lpsr & SNVS_LPSR_LPTA) ? 1 : 0;
+ if (data->clk)
+ clk_disable(data->clk);
+
return 0;
}
static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ if (data->clk) {
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
+ }
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR,
(SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 0);
- return rtc_write_sync_lp(data);
+ ret = rtc_write_sync_lp(data);
+
+ if (data->clk)
+ clk_disable(data->clk);
+
+ return ret;
}
static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -207,6 +249,12 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned long time = rtc_tm_to_time64(&alrm->time);
int ret;
+ if (data->clk) {
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
+ }
+
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
ret = rtc_write_sync_lp(data);
if (ret)
@@ -216,6 +264,9 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
/* Clear alarm interrupt status bit */
regmap_write(data->regmap, data->offset + SNVS_LPSR, SNVS_LPSR_LPTA);
+ if (data->clk)
+ clk_disable(data->clk);
+
return snvs_rtc_alarm_irq_enable(dev, alrm->enabled);
}
@@ -362,7 +413,7 @@ static int __maybe_unused snvs_rtc_suspend_noirq(struct device *dev)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
if (data->clk)
- clk_disable_unprepare(data->clk);
+ clk_disable(data->clk);
return 0;
}
@@ -372,7 +423,7 @@ static int __maybe_unused snvs_rtc_resume_noirq(struct device *dev)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
if (data->clk)
- return clk_prepare_enable(data->clk);
+ return clk_enable(data->clk);
return 0;
}
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index ff6488be385f..c9bc3d4a1e66 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -416,5 +416,5 @@ module_platform_driver(stmp3xxx_rtcdrv);
MODULE_DESCRIPTION("STMP3xxx RTC Driver");
MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com> and "
- "Wolfram Sang <w.sang@pengutronix.de>");
+ "Wolfram Sang <kernel@pengutronix.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index 23eae4188876..a9235f111e79 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -21,5 +21,5 @@ qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o
vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
- vfio_ccw_async.o vfio_ccw_trace.o
+ vfio_ccw_async.o vfio_ccw_trace.o vfio_ccw_chp.o
obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1ca73c2e5a8f..c314e9495c1b 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -57,6 +57,7 @@ int chsc_error_from_response(int response)
case 0x0104:
return -EINVAL;
case 0x0004:
+ case 0x0106: /* "Wrong Channel Parm" for the op 0x003d */
return -EOPNOTSUPP;
case 0x000b:
case 0x0107: /* "Channel busy" for the op 0x003d */
@@ -1336,36 +1337,35 @@ out:
EXPORT_SYMBOL_GPL(chsc_scm_info);
/**
- * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
+ * chsc_pnso() - Perform Network-Subchannel Operation
* @schid: id of the subchannel on which PNSO is performed
- * @brinfo_area: request and response block for the operation
+ * @pnso_area: request and response block for the operation
* @resume_token: resume token for multiblock response
* @cnc: Boolean change-notification control
*
- * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
*
* Returns 0 on success.
*/
-int chsc_pnso_brinfo(struct subchannel_id schid,
- struct chsc_pnso_area *brinfo_area,
- struct chsc_brinfo_resume_token resume_token,
- int cnc)
+int chsc_pnso(struct subchannel_id schid,
+ struct chsc_pnso_area *pnso_area,
+ struct chsc_pnso_resume_token resume_token,
+ int cnc)
{
- memset(brinfo_area, 0, sizeof(*brinfo_area));
- brinfo_area->request.length = 0x0030;
- brinfo_area->request.code = 0x003d; /* network-subchannel operation */
- brinfo_area->m = schid.m;
- brinfo_area->ssid = schid.ssid;
- brinfo_area->sch = schid.sch_no;
- brinfo_area->cssid = schid.cssid;
- brinfo_area->oc = 0; /* Store-network-bridging-information list */
- brinfo_area->resume_token = resume_token;
- brinfo_area->n = (cnc != 0);
- if (chsc(brinfo_area))
+ memset(pnso_area, 0, sizeof(*pnso_area));
+ pnso_area->request.length = 0x0030;
+ pnso_area->request.code = 0x003d; /* network-subchannel operation */
+ pnso_area->m = schid.m;
+ pnso_area->ssid = schid.ssid;
+ pnso_area->sch = schid.sch_no;
+ pnso_area->cssid = schid.cssid;
+ pnso_area->oc = 0; /* Store-network-bridging-information list */
+ pnso_area->resume_token = resume_token;
+ pnso_area->n = (cnc != 0);
+ if (chsc(pnso_area))
return -EIO;
- return chsc_error_from_response(brinfo_area->response.code);
+ return chsc_error_from_response(pnso_area->response.code);
}
-EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
int chsc_sgib(u32 origin)
{
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 34de6d77442c..7ecf7e4c402e 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -205,52 +205,10 @@ struct chsc_scm_info {
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
-struct chsc_brinfo_resume_token {
- u64 t1;
- u64 t2;
-} __packed;
-
-struct chsc_brinfo_naihdr {
- struct chsc_brinfo_resume_token resume_token;
- u32:32;
- u32 instance;
- u32:24;
- u8 naids;
- u32 reserved[3];
-} __packed;
-
-struct chsc_pnso_area {
- struct chsc_header request;
- u8:2;
- u8 m:1;
- u8:5;
- u8:2;
- u8 ssid:2;
- u8 fmt:4;
- u16 sch;
- u8:8;
- u8 cssid;
- u16:16;
- u8 oc;
- u32:24;
- struct chsc_brinfo_resume_token resume_token;
- u32 n:1;
- u32:31;
- u32 reserved[3];
- struct chsc_header response;
- u32:32;
- struct chsc_brinfo_naihdr naihdr;
- union {
- struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
- struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
- struct qdio_brinfo_entry_l2 l2[0];
- } entries;
-} __packed __aligned(PAGE_SIZE);
-
-int chsc_pnso_brinfo(struct subchannel_id schid,
- struct chsc_pnso_area *brinfo_area,
- struct chsc_brinfo_resume_token resume_token,
- int cnc);
+int chsc_pnso(struct subchannel_id schid,
+ struct chsc_pnso_area *pnso_area,
+ struct chsc_pnso_resume_token resume_token,
+ int cnc);
int __init chsc_get_cssid(int idx);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index ccecf6b9504e..963fcc9054c6 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -710,6 +710,29 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
}
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
+/**
+ * ccw_device_pnso() - Perform Network-Subchannel Operation
+ * @cdev: device on which PNSO is performed
+ * @pnso_area: request and response block for the operation
+ * @resume_token: resume token for multiblock response
+ * @cnc: Boolean change-notification control
+ *
+ * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
+ *
+ * Returns 0 on success.
+ */
+int ccw_device_pnso(struct ccw_device *cdev,
+ struct chsc_pnso_area *pnso_area,
+ struct chsc_pnso_resume_token resume_token,
+ int cnc)
+{
+ struct subchannel_id schid;
+
+ ccw_device_get_schid(cdev, &schid);
+ return chsc_pnso(schid, pnso_area, resume_token, cnc);
+}
+EXPORT_SYMBOL_GPL(ccw_device_pnso);
+
/*
* Allocate zeroed dma coherent 31 bit addressable memory using
* the subchannels dma pool. Maximal size of allocation supported
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 77d0ea7b381b..45f9c0736be4 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -59,18 +59,6 @@ static inline int idset_contains(struct idset *set, int ssid, int id)
return test_bit(ssid * set->num_id + id, set->bitmap);
}
-static inline int idset_get_first(struct idset *set, int *ssid, int *id)
-{
- int bitnum;
-
- bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
- if (bitnum >= set->num_ssid * set->num_id)
- return 0;
- *ssid = bitnum / set->num_id;
- *id = bitnum % set->num_id;
- return 1;
-}
-
struct idset *idset_sch_new(void)
{
return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index b8453b594679..eb13c479e11d 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -221,9 +221,6 @@ struct qdio_q {
*/
int first_to_check;
- /* beginning position for calling the program */
- int first_to_kick;
-
/* number of buffers in use by the adapter */
atomic_t nr_buf_used;
@@ -292,6 +289,8 @@ struct qdio_irq {
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
+ unsigned int max_input_qs;
+ unsigned int max_output_qs;
void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
unsigned long poll_state;
@@ -364,16 +363,13 @@ static inline int multicast_outbound(struct qdio_q *q)
extern u64 last_ai_time;
/* prototypes for thin interrupt */
-void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
void tiqdio_add_device(struct qdio_irq *irq_ptr);
void tiqdio_remove_device(struct qdio_irq *irq_ptr);
void tiqdio_inbound_processing(unsigned long q);
-int tiqdio_allocate_memory(void);
-void tiqdio_free_memory(void);
-int tiqdio_register_thinints(void);
-void tiqdio_unregister_thinints(void);
+int qdio_thinint_init(void);
+void qdio_thinint_exit(void);
int test_nonshared_ind(struct qdio_irq *);
/* prototypes for setup */
@@ -389,8 +385,10 @@ int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
struct subchannel_id *schid,
struct qdio_ssqd_desc *data);
int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
+void qdio_shutdown_irq(struct qdio_irq *irq);
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
-void qdio_release_memory(struct qdio_irq *irq_ptr);
+void qdio_free_queues(struct qdio_irq *irq_ptr);
+void qdio_free_async_data(struct qdio_irq *irq_ptr);
int qdio_setup_init(void);
void qdio_setup_exit(void);
int qdio_enable_async_operation(struct qdio_output_q *q);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index bcc3ab14e72d..610c05f59589 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -143,7 +143,7 @@ again:
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
- q->first_to_kick, count, q->irq_ptr->int_parm);
+ q->first_to_check, count, q->irq_ptr->int_parm);
return 0;
}
}
@@ -191,7 +191,7 @@ again:
DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
- q->first_to_kick, count, q->irq_ptr->int_parm);
+ q->first_to_check, count, q->irq_ptr->int_parm);
return 0;
}
}
@@ -438,15 +438,12 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start,
q->sbal[start]->element[15].sflags);
}
-static inline void inbound_primed(struct qdio_q *q, unsigned int start,
- int count)
+static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
+ int count, bool auto_ack)
{
int new;
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
-
- /* for QEBSM the ACK was already set by EQBS */
- if (is_qebsm(q)) {
+ if (auto_ack) {
if (!q->u.in.ack_count) {
q->u.in.ack_count = count;
q->u.in.ack_start = start;
@@ -466,15 +463,14 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start,
* or by the next inbound run.
*/
new = add_buf(start, count - 1);
- if (q->u.in.ack_count) {
- /* reset the previous ACK but first set the new one */
- set_buf_state(q, new, SLSB_P_INPUT_ACK);
- set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
- } else {
- q->u.in.ack_count = 1;
- set_buf_state(q, new, SLSB_P_INPUT_ACK);
- }
+ set_buf_state(q, new, SLSB_P_INPUT_ACK);
+
+ /* delete the previous ACKs */
+ if (q->u.in.ack_count)
+ set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
+ q->u.in.ack_count);
+ q->u.in.ack_count = 1;
q->u.in.ack_start = new;
count--;
if (!count)
@@ -508,19 +504,21 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
switch (state) {
case SLSB_P_INPUT_PRIMED:
- inbound_primed(q, start, count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
+ count);
+
+ inbound_handle_work(q, start, count, is_qebsm(q));
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
return count;
case SLSB_P_INPUT_ERROR:
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
+ count);
+
process_buffer_error(q, start, count);
- /*
- * Interrupts may be avoided as long as the error is present
- * so change the buffer state immediately to avoid starvation.
- */
- set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
+ inbound_handle_work(q, start, count, false);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
@@ -624,10 +622,9 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
return phys_aob;
}
-static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
+static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
+ unsigned int count)
{
- int start = q->first_to_kick;
-
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
@@ -644,7 +641,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
q->irq_ptr->int_parm);
/* for the next time */
- q->first_to_kick = add_buf(start, count);
q->qdio_error = 0;
}
@@ -668,9 +664,9 @@ static void __qdio_inbound_processing(struct qdio_q *q)
if (count == 0)
return;
+ qdio_kick_handler(q, start, count);
start = add_buf(start, count);
q->first_to_check = start;
- qdio_kick_handler(q, count);
if (!qdio_inbound_q_done(q, start)) {
/* means poll time is not yet over */
@@ -826,7 +822,7 @@ static void __qdio_outbound_processing(struct qdio_q *q)
count = qdio_outbound_q_moved(q, start);
if (count) {
q->first_to_check = add_buf(start, count);
- qdio_kick_handler(q, count);
+ qdio_kick_handler(q, start, count);
}
if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
@@ -880,47 +876,17 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
qdio_tasklet_schedule(out);
}
-static void __tiqdio_inbound_processing(struct qdio_q *q)
+void tiqdio_inbound_processing(unsigned long data)
{
- unsigned int start = q->first_to_check;
- int count;
+ struct qdio_q *q = (struct qdio_q *)data;
- qperf_inc(q, tasklet_inbound);
if (need_siga_sync(q) && need_siga_sync_after_ai(q))
qdio_sync_queues(q);
/* The interrupt could be caused by a PCI request: */
qdio_check_outbound_pci_queues(q->irq_ptr);
- count = qdio_inbound_q_moved(q, start);
- if (count == 0)
- return;
-
- start = add_buf(start, count);
- q->first_to_check = start;
- qdio_kick_handler(q, count);
-
- if (!qdio_inbound_q_done(q, start)) {
- qperf_inc(q, tasklet_inbound_resched);
- if (!qdio_tasklet_schedule(q))
- return;
- }
-
- qdio_stop_polling(q);
- /*
- * We need to check again to not lose initiative after
- * resetting the ACK state.
- */
- if (!qdio_inbound_q_done(q, start)) {
- qperf_inc(q, tasklet_inbound_resched2);
- qdio_tasklet_schedule(q);
- }
-}
-
-void tiqdio_inbound_processing(unsigned long data)
-{
- struct qdio_q *q = (struct qdio_q *)data;
- __tiqdio_inbound_processing(q);
+ __qdio_inbound_processing(q);
}
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
@@ -977,7 +943,6 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
- int count;
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
DBF_ERROR("intp :%lx", intparm);
@@ -992,9 +957,8 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
goto no_handler;
}
- count = sub_buf(q->first_to_check, q->first_to_kick);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
- q->nr, q->first_to_kick, count, irq_ptr->int_parm);
+ q->nr, q->first_to_check, 0, irq_ptr->int_parm);
no_handler:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
/*
@@ -1154,35 +1118,27 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
/* cleanup subchannel */
spin_lock_irq(get_ccwdev_lock(cdev));
-
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
else
/* default behaviour is halt */
rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
+ spin_unlock_irq(get_ccwdev_lock(cdev));
if (rc) {
DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4d", rc);
goto no_cleanup;
}
- qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
- spin_unlock_irq(get_ccwdev_lock(cdev));
wait_event_interruptible_timeout(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
irq_ptr->state == QDIO_IRQ_STATE_ERR,
10 * HZ);
- spin_lock_irq(get_ccwdev_lock(cdev));
no_cleanup:
qdio_shutdown_thinint(irq_ptr);
-
- /* restore interrupt handler */
- if ((void *)cdev->handler == (void *)qdio_int_handler) {
- cdev->handler = irq_ptr->orig_handler;
- cdev->private->intparm = 0;
- }
- spin_unlock_irq(get_ccwdev_lock(cdev));
+ qdio_shutdown_irq(irq_ptr);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
mutex_unlock(&irq_ptr->setup_mutex);
@@ -1213,7 +1169,11 @@ int qdio_free(struct ccw_device *cdev)
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
- qdio_release_memory(irq_ptr);
+ qdio_free_async_data(irq_ptr);
+ qdio_free_queues(irq_ptr);
+ free_page((unsigned long) irq_ptr->qdr);
+ free_page(irq_ptr->chsc_page);
+ free_page((unsigned long) irq_ptr);
return 0;
}
EXPORT_SYMBOL_GPL(qdio_free);
@@ -1229,6 +1189,7 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
{
struct subchannel_id schid;
struct qdio_irq *irq_ptr;
+ int rc = -ENOMEM;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qallocate:%4x", schid.sch_no);
@@ -1240,12 +1201,12 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr)
- goto out_err;
+ return -ENOMEM;
irq_ptr->cdev = cdev;
mutex_init(&irq_ptr->setup_mutex);
if (qdio_allocate_dbf(irq_ptr))
- goto out_rel;
+ goto err_dbf;
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
no_output_qs);
@@ -1258,24 +1219,30 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
*/
irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
if (!irq_ptr->chsc_page)
- goto out_rel;
+ goto err_chsc;
/* qdr is used in ccw1.cda which is u32 */
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
- goto out_rel;
+ goto err_qdr;
- if (qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs))
- goto out_rel;
+ rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
+ if (rc)
+ goto err_queues;
INIT_LIST_HEAD(&irq_ptr->entry);
cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0;
-out_rel:
- qdio_release_memory(irq_ptr);
-out_err:
- return -ENOMEM;
+
+err_queues:
+ free_page((unsigned long) irq_ptr->qdr);
+err_qdr:
+ free_page(irq_ptr->chsc_page);
+err_chsc:
+err_dbf:
+ free_page((unsigned long) irq_ptr);
+ return rc;
}
EXPORT_SYMBOL_GPL(qdio_allocate);
@@ -1338,6 +1305,10 @@ int qdio_establish(struct ccw_device *cdev,
if (!irq_ptr)
return -ENODEV;
+ if (init_data->no_input_qs > irq_ptr->max_input_qs ||
+ init_data->no_output_qs > irq_ptr->max_output_qs)
+ return -EINVAL;
+
if ((init_data->no_input_qs && !init_data->input_handler) ||
(init_data->no_output_qs && !init_data->output_handler))
return -EINVAL;
@@ -1352,8 +1323,8 @@ int qdio_establish(struct ccw_device *cdev,
rc = qdio_establish_thinint(irq_ptr);
if (rc) {
+ qdio_shutdown_irq(irq_ptr);
mutex_unlock(&irq_ptr->setup_mutex);
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return rc;
}
@@ -1371,8 +1342,9 @@ int qdio_establish(struct ccw_device *cdev,
if (rc) {
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
+ qdio_shutdown_thinint(irq_ptr);
+ qdio_shutdown_irq(irq_ptr);
mutex_unlock(&irq_ptr->setup_mutex);
- qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
return rc;
}
@@ -1460,25 +1432,6 @@ out:
}
EXPORT_SYMBOL_GPL(qdio_activate);
-static inline int buf_in_between(int bufnr, int start, int count)
-{
- int end = add_buf(start, count);
-
- if (end > start) {
- if (bufnr >= start && bufnr < end)
- return 1;
- else
- return 0;
- }
-
- /* wrap-around case */
- if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
- (bufnr < end))
- return 1;
- else
- return 0;
-}
-
/**
* handle_inbound - reset processed input buffers
* @q: queue containing the buffers
@@ -1489,36 +1442,18 @@ static inline int buf_in_between(int bufnr, int start, int count)
static int handle_inbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
- int diff;
+ int overlap;
qperf_inc(q, inbound_call);
- if (!q->u.in.ack_count)
- goto set;
-
- /* protect against stop polling setting an ACK for an emptied slsb */
- if (count == QDIO_MAX_BUFFERS_PER_Q) {
- /* overwriting everything, just delete polling status */
- q->u.in.ack_count = 0;
- goto set;
- } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
- if (is_qebsm(q)) {
- /* partial overwrite, just update ack_start */
- diff = add_buf(bufnr, count);
- diff = sub_buf(diff, q->u.in.ack_start);
- q->u.in.ack_count -= diff;
- if (q->u.in.ack_count <= 0) {
- q->u.in.ack_count = 0;
- goto set;
- }
- q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
- } else {
- /* the only ACK will be deleted */
- q->u.in.ack_count = 0;
- }
+ /* If any ACKed SBALs are returned to HW, adjust ACK tracking: */
+ overlap = min(count - sub_buf(q->u.in.ack_start, bufnr),
+ q->u.in.ack_count);
+ if (overlap > 0) {
+ q->u.in.ack_start = add_buf(q->u.in.ack_start, overlap);
+ q->u.in.ack_count -= overlap;
}
-set:
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
atomic_add(count, &q->nr_buf_used);
@@ -1627,7 +1562,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
EXPORT_SYMBOL_GPL(do_QDIO);
/**
- * qdio_start_irq - process input buffers
+ * qdio_start_irq - enable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel
*
* Return codes
@@ -1770,94 +1705,6 @@ int qdio_stop_irq(struct ccw_device *cdev)
}
EXPORT_SYMBOL(qdio_stop_irq);
-/**
- * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
- * @schid: Subchannel ID.
- * @cnc: Boolean Change-Notification Control
- * @response: Response code will be stored at this address
- * @cb: Callback function will be executed for each element
- * of the address list
- * @priv: Pointer to pass to the callback function.
- *
- * Performs "Store-network-bridging-information list" operation and calls
- * the callback function for every entry in the list. If "change-
- * notification-control" is set, further changes in the address list
- * will be reported via the IPA command.
- */
-int qdio_pnso_brinfo(struct subchannel_id schid,
- int cnc, u16 *response,
- void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
- void *entry),
- void *priv)
-{
- struct chsc_pnso_area *rr;
- int rc;
- u32 prev_instance = 0;
- int isfirstblock = 1;
- int i, size, elems;
-
- rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
- if (rr == NULL)
- return -ENOMEM;
- do {
- /* on the first iteration, naihdr.resume_token will be zero */
- rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
- if (rc != 0 && rc != -EBUSY)
- goto out;
- if (rr->response.code != 1) {
- rc = -EIO;
- continue;
- } else
- rc = 0;
-
- if (cb == NULL)
- continue;
-
- size = rr->naihdr.naids;
- elems = (rr->response.length -
- sizeof(struct chsc_header) -
- sizeof(struct chsc_brinfo_naihdr)) /
- size;
-
- if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
- /* Inform the caller that they need to scrap */
- /* the data that was already reported via cb */
- rc = -EAGAIN;
- break;
- }
- isfirstblock = 0;
- prev_instance = rr->naihdr.instance;
- for (i = 0; i < elems; i++)
- switch (size) {
- case sizeof(struct qdio_brinfo_entry_l3_ipv6):
- (*cb)(priv, l3_ipv6_addr,
- &rr->entries.l3_ipv6[i]);
- break;
- case sizeof(struct qdio_brinfo_entry_l3_ipv4):
- (*cb)(priv, l3_ipv4_addr,
- &rr->entries.l3_ipv4[i]);
- break;
- case sizeof(struct qdio_brinfo_entry_l2):
- (*cb)(priv, l2_addr_lnid,
- &rr->entries.l2[i]);
- break;
- default:
- WARN_ON_ONCE(1);
- rc = -EIO;
- goto out;
- }
- } while (rr->response.code == 0x0107 || /* channel busy */
- (rr->response.code == 1 && /* list stored */
- /* resume token is non-zero => list incomplete */
- (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
- (*response) = rr->response.code;
-
-out:
- free_page((unsigned long)rr);
- return rc;
-}
-EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
-
static int __init init_QDIO(void)
{
int rc;
@@ -1868,16 +1715,11 @@ static int __init init_QDIO(void)
rc = qdio_setup_init();
if (rc)
goto out_debug;
- rc = tiqdio_allocate_memory();
+ rc = qdio_thinint_init();
if (rc)
goto out_cache;
- rc = tiqdio_register_thinints();
- if (rc)
- goto out_ti;
return 0;
-out_ti:
- tiqdio_free_memory();
out_cache:
qdio_setup_exit();
out_debug:
@@ -1887,8 +1729,7 @@ out_debug:
static void __exit exit_QDIO(void)
{
- tiqdio_unregister_thinints();
- tiqdio_free_memory();
+ qdio_thinint_exit();
qdio_setup_exit();
qdio_debug_exit();
}
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 3083edd61f0c..2c5cc6ec668e 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -135,6 +135,27 @@ output:
}
}
+static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
+{
+ struct qdio_q *q;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ q = queues[i];
+ free_page((unsigned long) q->slib);
+ kmem_cache_free(qdio_q_cache, q);
+ }
+}
+
+void qdio_free_queues(struct qdio_irq *irq_ptr)
+{
+ __qdio_free_queues(irq_ptr->input_qs, irq_ptr->max_input_qs);
+ irq_ptr->max_input_qs = 0;
+
+ __qdio_free_queues(irq_ptr->output_qs, irq_ptr->max_output_qs);
+ irq_ptr->max_output_qs = 0;
+}
+
static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
{
struct qdio_q *q;
@@ -142,12 +163,15 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
for (i = 0; i < nr_queues; i++) {
q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
- if (!q)
+ if (!q) {
+ __qdio_free_queues(irq_ptr_qs, i);
return -ENOMEM;
+ }
q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
kmem_cache_free(qdio_q_cache, q);
+ __qdio_free_queues(irq_ptr_qs, i);
return -ENOMEM;
}
irq_ptr_qs[i] = q;
@@ -162,8 +186,16 @@ int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs
rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
if (rc)
return rc;
+
rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
- return rc;
+ if (rc) {
+ __qdio_free_queues(irq_ptr->input_qs, nr_input_qs);
+ return rc;
+ }
+
+ irq_ptr->max_input_qs = nr_input_qs;
+ irq_ptr->max_output_qs = nr_output_qs;
+ return 0;
}
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
@@ -347,45 +379,28 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
}
-void qdio_release_memory(struct qdio_irq *irq_ptr)
+void qdio_free_async_data(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
- /*
- * Must check queue array manually since irq_ptr->nr_input_queues /
- * irq_ptr->nr_input_queues may not yet be set.
- */
- for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
- q = irq_ptr->input_qs[i];
- if (q) {
- free_page((unsigned long) q->slib);
- kmem_cache_free(qdio_q_cache, q);
- }
- }
- for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
+ for (i = 0; i < irq_ptr->max_output_qs; i++) {
q = irq_ptr->output_qs[i];
- if (q) {
- if (q->u.out.use_cq) {
- int n;
-
- for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
- struct qaob *aob = q->u.out.aobs[n];
- if (aob) {
- qdio_release_aob(aob);
- q->u.out.aobs[n] = NULL;
- }
- }
+ if (q->u.out.use_cq) {
+ unsigned int n;
+
+ for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; n++) {
+ struct qaob *aob = q->u.out.aobs[n];
- qdio_disable_async_operation(&q->u.out);
+ if (aob) {
+ qdio_release_aob(aob);
+ q->u.out.aobs[n] = NULL;
+ }
}
- free_page((unsigned long) q->slib);
- kmem_cache_free(qdio_q_cache, q);
+
+ qdio_disable_async_operation(&q->u.out);
}
}
- free_page((unsigned long) irq_ptr->qdr);
- free_page(irq_ptr->chsc_page);
- free_page((unsigned long) irq_ptr);
}
static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
@@ -480,7 +495,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
}
setup_qib(irq_ptr, init_data);
- qdio_setup_thinint(irq_ptr);
set_impl_params(irq_ptr, init_data->qib_param_field_format,
init_data->qib_param_field,
init_data->input_slib_elements,
@@ -491,6 +505,12 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
+ /* set our IRQ handler */
+ spin_lock_irq(get_ccwdev_lock(cdev));
+ irq_ptr->orig_handler = cdev->handler;
+ cdev->handler = qdio_int_handler;
+ spin_unlock_irq(get_ccwdev_lock(cdev));
+
/* get qdio commands */
ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
@@ -506,12 +526,18 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
}
irq_ptr->aqueue = *ciw;
- /* set new interrupt handler */
+ return 0;
+}
+
+void qdio_shutdown_irq(struct qdio_irq *irq)
+{
+ struct ccw_device *cdev = irq->cdev;
+
+ /* restore IRQ handler */
spin_lock_irq(get_ccwdev_lock(cdev));
- irq_ptr->orig_handler = cdev->handler;
- cdev->handler = qdio_int_handler;
+ cdev->handler = irq->orig_handler;
+ cdev->private->intparm = 0;
spin_unlock_irq(get_ccwdev_lock(cdev));
- return 0;
}
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index ae50373617cd..7a440e4328cd 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -197,47 +197,21 @@ out:
return rc;
}
-/* allocate non-shared indicators and shared indicator */
-int __init tiqdio_allocate_memory(void)
-{
- q_indicators = kcalloc(TIQDIO_NR_INDICATORS,
- sizeof(struct indicator_t),
- GFP_KERNEL);
- if (!q_indicators)
- return -ENOMEM;
- return 0;
-}
-
-void tiqdio_free_memory(void)
-{
- kfree(q_indicators);
-}
-
-int __init tiqdio_register_thinints(void)
+int qdio_establish_thinint(struct qdio_irq *irq_ptr)
{
int rc;
- rc = register_adapter_interrupt(&tiqdio_airq);
- if (rc) {
- DBF_EVENT("RTI:%x", rc);
- return rc;
- }
- return 0;
-}
-
-int qdio_establish_thinint(struct qdio_irq *irq_ptr)
-{
if (!is_thinint_irq(irq_ptr))
return 0;
- return set_subchannel_ind(irq_ptr, 0);
-}
-void qdio_setup_thinint(struct qdio_irq *irq_ptr)
-{
- if (!is_thinint_irq(irq_ptr))
- return;
irq_ptr->dsci = get_indicator();
DBF_HEX(&irq_ptr->dsci, sizeof(void *));
+
+ rc = set_subchannel_ind(irq_ptr, 0);
+ if (rc)
+ put_indicator(irq_ptr->dsci);
+
+ return rc;
}
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
@@ -250,8 +224,27 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
put_indicator(irq_ptr->dsci);
}
-void __exit tiqdio_unregister_thinints(void)
+int __init qdio_thinint_init(void)
+{
+ int rc;
+
+ q_indicators = kcalloc(TIQDIO_NR_INDICATORS, sizeof(struct indicator_t),
+ GFP_KERNEL);
+ if (!q_indicators)
+ return -ENOMEM;
+
+ rc = register_adapter_interrupt(&tiqdio_airq);
+ if (rc) {
+ DBF_EVENT("RTI:%x", rc);
+ kfree(q_indicators);
+ return rc;
+ }
+ return 0;
+}
+
+void __exit qdio_thinint_exit(void)
{
WARN_ON(!list_empty(&tiq_list));
unregister_adapter_interrupt(&tiqdio_airq);
+ kfree(q_indicators);
}
diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c
new file mode 100644
index 000000000000..a646fc81c872
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_chp.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Channel path related status regions for vfio_ccw
+ *
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s): Farhan Ali <alifm@linux.ibm.com>
+ * Eric Farman <farman@linux.ibm.com>
+ */
+
+#include <linux/vfio.h>
+#include "vfio_ccw_private.h"
+
+static ssize_t vfio_ccw_schib_region_read(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_schib_region *region;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ mutex_lock(&private->io_mutex);
+ region = private->region[i].data;
+
+ if (cio_update_schib(private->sch)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ memcpy(region, &private->sch->schib, sizeof(*region));
+
+ if (copy_to_user(buf, (void *)region + pos, count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = count;
+
+out:
+ mutex_unlock(&private->io_mutex);
+ return ret;
+}
+
+static ssize_t vfio_ccw_schib_region_write(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+
+static void vfio_ccw_schib_region_release(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region)
+{
+
+}
+
+static const struct vfio_ccw_regops vfio_ccw_schib_region_ops = {
+ .read = vfio_ccw_schib_region_read,
+ .write = vfio_ccw_schib_region_write,
+ .release = vfio_ccw_schib_region_release,
+};
+
+int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private)
+{
+ return vfio_ccw_register_dev_region(private,
+ VFIO_REGION_SUBTYPE_CCW_SCHIB,
+ &vfio_ccw_schib_region_ops,
+ sizeof(struct ccw_schib_region),
+ VFIO_REGION_INFO_FLAG_READ,
+ private->schib_region);
+}
+
+static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+ loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+ struct ccw_crw_region *region;
+ struct vfio_ccw_crw *crw;
+ int ret;
+
+ if (pos + count > sizeof(*region))
+ return -EINVAL;
+
+ crw = list_first_entry_or_null(&private->crw,
+ struct vfio_ccw_crw, next);
+
+ if (crw)
+ list_del(&crw->next);
+
+ mutex_lock(&private->io_mutex);
+ region = private->region[i].data;
+
+ if (crw)
+ memcpy(&region->crw, &crw->crw, sizeof(region->crw));
+
+ if (copy_to_user(buf, (void *)region + pos, count))
+ ret = -EFAULT;
+ else
+ ret = count;
+
+ region->crw = 0;
+
+ mutex_unlock(&private->io_mutex);
+
+ kfree(crw);
+
+ /* Notify the guest if more CRWs are on our queue */
+ if (!list_empty(&private->crw) && private->crw_trigger)
+ eventfd_signal(private->crw_trigger, 1);
+
+ return ret;
+}
+
+static ssize_t vfio_ccw_crw_region_write(struct vfio_ccw_private *private,
+ const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+static void vfio_ccw_crw_region_release(struct vfio_ccw_private *private,
+ struct vfio_ccw_region *region)
+{
+
+}
+
+static const struct vfio_ccw_regops vfio_ccw_crw_region_ops = {
+ .read = vfio_ccw_crw_region_read,
+ .write = vfio_ccw_crw_region_write,
+ .release = vfio_ccw_crw_region_release,
+};
+
+int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private)
+{
+ return vfio_ccw_register_dev_region(private,
+ VFIO_REGION_SUBTYPE_CCW_CRW,
+ &vfio_ccw_crw_region_ops,
+ sizeof(struct ccw_crw_region),
+ VFIO_REGION_INFO_FLAG_READ,
+ private->crw_region);
+}
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 3645d1720c4b..b9febc581b1f 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -8,6 +8,7 @@
* Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
*/
+#include <linux/ratelimit.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/iommu.h>
@@ -625,23 +626,27 @@ static int ccwchain_fetch_one(struct ccwchain *chain,
* the target channel program from @orb->cmd.iova to the new ccwchain(s).
*
* Limitations:
- * 1. Supports only prefetch enabled mode.
- * 2. Supports idal(c64) ccw chaining.
- * 3. Supports 4k idaw.
+ * 1. Supports idal(c64) ccw chaining.
+ * 2. Supports 4k idaw.
*
* Returns:
* %0 on success and a negative error value on failure.
*/
int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
{
+ /* custom ratelimit used to avoid flood during guest IPL */
+ static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
int ret;
/*
- * XXX:
- * Only support prefetch enable mode now.
+ * We only support prefetching the channel program. We assume all channel
+ * programs executed by supported guests likewise support prefetching.
+ * Executing a channel program that does not specify prefetching will
+ * typically not cause an error, but a warning is issued to help identify
+ * the problem if something does break.
*/
- if (!orb->cmd.pfch)
- return -EOPNOTSUPP;
+ if (!orb->cmd.pfch && __ratelimit(&ratelimit_state))
+ dev_warn(mdev, "Prefetching channel program even though prefetch not specified in ORB");
INIT_LIST_HEAD(&cp->ccwchain_list);
memcpy(&cp->orb, orb, sizeof(*orb));
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 339a6bc0339b..8c625b530035 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -19,6 +19,7 @@
#include <asm/isc.h>
+#include "chp.h"
#include "ioasm.h"
#include "css.h"
#include "vfio_ccw_private.h"
@@ -26,6 +27,8 @@
struct workqueue_struct *vfio_ccw_work_q;
static struct kmem_cache *vfio_ccw_io_region;
static struct kmem_cache *vfio_ccw_cmd_region;
+static struct kmem_cache *vfio_ccw_schib_region;
+static struct kmem_cache *vfio_ccw_crw_region;
debug_info_t *vfio_ccw_debug_msg_id;
debug_info_t *vfio_ccw_debug_trace_id;
@@ -105,6 +108,16 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
eventfd_signal(private->io_trigger, 1);
}
+static void vfio_ccw_crw_todo(struct work_struct *work)
+{
+ struct vfio_ccw_private *private;
+
+ private = container_of(work, struct vfio_ccw_private, crw_work);
+
+ if (!list_empty(&private->crw) && private->crw_trigger)
+ eventfd_signal(private->crw_trigger, 1);
+}
+
/*
* Css driver callbacks
*/
@@ -116,6 +129,18 @@ static void vfio_ccw_sch_irq(struct subchannel *sch)
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
}
+static void vfio_ccw_free_regions(struct vfio_ccw_private *private)
+{
+ if (private->crw_region)
+ kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
+ if (private->schib_region)
+ kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
+ if (private->cmd_region)
+ kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
+ if (private->io_region)
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
+}
+
static int vfio_ccw_sch_probe(struct subchannel *sch)
{
struct pmcw *pmcw = &sch->schib.pmcw;
@@ -147,6 +172,18 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (!private->cmd_region)
goto out_free;
+ private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
+ GFP_KERNEL | GFP_DMA);
+
+ if (!private->schib_region)
+ goto out_free;
+
+ private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
+ GFP_KERNEL | GFP_DMA);
+
+ if (!private->crw_region)
+ goto out_free;
+
private->sch = sch;
dev_set_drvdata(&sch->dev, private);
mutex_init(&private->io_mutex);
@@ -159,7 +196,9 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (ret)
goto out_free;
+ INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
+ INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
atomic_set(&private->avail, 1);
private->state = VFIO_CCW_STATE_STANDBY;
@@ -181,10 +220,7 @@ out_disable:
cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
- if (private->cmd_region)
- kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
- if (private->io_region)
- kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ vfio_ccw_free_regions(private);
kfree(private->cp.guest_cp);
kfree(private);
return ret;
@@ -193,15 +229,20 @@ out_free:
static int vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+ struct vfio_ccw_crw *crw, *temp;
vfio_ccw_sch_quiesce(sch);
+ list_for_each_entry_safe(crw, temp, &private->crw, next) {
+ list_del(&crw->next);
+ kfree(crw);
+ }
+
vfio_ccw_mdev_unreg(sch);
dev_set_drvdata(&sch->dev, NULL);
- kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
- kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ vfio_ccw_free_regions(private);
kfree(private->cp.guest_cp);
kfree(private);
@@ -258,6 +299,83 @@ out_unlock:
return rc;
}
+static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
+ unsigned int rsc,
+ unsigned int erc,
+ unsigned int rsid)
+{
+ struct vfio_ccw_crw *crw;
+
+ /*
+ * If unable to allocate a CRW, just drop the event and
+ * carry on. The guest will either see a later one or
+ * learn when it issues its own store subchannel.
+ */
+ crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
+ if (!crw)
+ return;
+
+ /*
+ * Build the CRW based on the inputs given to us.
+ */
+ crw->crw.rsc = rsc;
+ crw->crw.erc = erc;
+ crw->crw.rsid = rsid;
+
+ list_add_tail(&crw->next, &private->crw);
+ queue_work(vfio_ccw_work_q, &private->crw_work);
+}
+
+static int vfio_ccw_chp_event(struct subchannel *sch,
+ struct chp_link *link, int event)
+{
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+ int mask = chp_ssd_get_mask(&sch->ssd_info, link);
+ int retry = 255;
+
+ if (!private || !mask)
+ return 0;
+
+ trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
+ VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
+ mdev_uuid(private->mdev), sch->schid.cssid,
+ sch->schid.ssid, sch->schid.sch_no,
+ mask, event);
+
+ if (cio_update_schib(sch))
+ return -ENODEV;
+
+ switch (event) {
+ case CHP_VARY_OFF:
+ /* Path logically turned off */
+ sch->opm &= ~mask;
+ sch->lpm &= ~mask;
+ if (sch->schib.pmcw.lpum & mask)
+ cio_cancel_halt_clear(sch, &retry);
+ break;
+ case CHP_OFFLINE:
+ /* Path is gone */
+ if (sch->schib.pmcw.lpum & mask)
+ cio_cancel_halt_clear(sch, &retry);
+ vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
+ link->chpid.id);
+ break;
+ case CHP_VARY_ON:
+ /* Path logically turned on */
+ sch->opm |= mask;
+ sch->lpm |= mask;
+ break;
+ case CHP_ONLINE:
+ /* Path became available */
+ sch->lpm |= mask & sch->opm;
+ vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
+ link->chpid.id);
+ break;
+ }
+
+ return 0;
+}
+
static struct css_device_id vfio_ccw_sch_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
{ /* end of list */ },
@@ -275,6 +393,7 @@ static struct css_driver vfio_ccw_sch_driver = {
.remove = vfio_ccw_sch_remove,
.shutdown = vfio_ccw_sch_shutdown,
.sch_event = vfio_ccw_sch_event,
+ .chp_event = vfio_ccw_chp_event,
};
static int __init vfio_ccw_debug_init(void)
@@ -304,6 +423,14 @@ static void vfio_ccw_debug_exit(void)
debug_unregister(vfio_ccw_debug_trace_id);
}
+static void vfio_ccw_destroy_regions(void)
+{
+ kmem_cache_destroy(vfio_ccw_crw_region);
+ kmem_cache_destroy(vfio_ccw_schib_region);
+ kmem_cache_destroy(vfio_ccw_cmd_region);
+ kmem_cache_destroy(vfio_ccw_io_region);
+}
+
static int __init vfio_ccw_sch_init(void)
{
int ret;
@@ -336,6 +463,26 @@ static int __init vfio_ccw_sch_init(void)
goto out_err;
}
+ vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
+ sizeof(struct ccw_schib_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_schib_region), NULL);
+
+ if (!vfio_ccw_schib_region) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
+ sizeof(struct ccw_crw_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_crw_region), NULL);
+
+ if (!vfio_ccw_crw_region) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) {
@@ -346,8 +493,7 @@ static int __init vfio_ccw_sch_init(void)
return ret;
out_err:
- kmem_cache_destroy(vfio_ccw_cmd_region);
- kmem_cache_destroy(vfio_ccw_io_region);
+ vfio_ccw_destroy_regions();
destroy_workqueue(vfio_ccw_work_q);
vfio_ccw_debug_exit();
return ret;
@@ -357,8 +503,7 @@ static void __exit vfio_ccw_sch_exit(void)
{
css_driver_unregister(&vfio_ccw_sch_driver);
isc_unregister(VFIO_CCW_ISC);
- kmem_cache_destroy(vfio_ccw_io_region);
- kmem_cache_destroy(vfio_ccw_cmd_region);
+ vfio_ccw_destroy_regions();
destroy_workqueue(vfio_ccw_work_q);
vfio_ccw_debug_exit();
}
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index f0d71ab77c50..8b3ed5b45277 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -172,8 +172,22 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
ret = vfio_ccw_register_async_dev_regions(private);
if (ret)
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &private->nb);
+ goto out_unregister;
+
+ ret = vfio_ccw_register_schib_dev_regions(private);
+ if (ret)
+ goto out_unregister;
+
+ ret = vfio_ccw_register_crw_dev_regions(private);
+ if (ret)
+ goto out_unregister;
+
+ return ret;
+
+out_unregister:
+ vfio_ccw_unregister_dev_regions(private);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ &private->nb);
return ret;
}
@@ -181,7 +195,6 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev)
{
struct vfio_ccw_private *private =
dev_get_drvdata(mdev_parent_dev(mdev));
- int i;
if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
(private->state != VFIO_CCW_STATE_STANDBY)) {
@@ -191,15 +204,9 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev)
}
cp_free(&private->cp);
+ vfio_ccw_unregister_dev_regions(private);
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
&private->nb);
-
- for (i = 0; i < private->num_regions; i++)
- private->region[i].ops->release(private, &private->region[i]);
-
- private->num_regions = 0;
- kfree(private->region);
- private->region = NULL;
}
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
@@ -384,17 +391,22 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
{
- if (info->index != VFIO_CCW_IO_IRQ_INDEX)
+ switch (info->index) {
+ case VFIO_CCW_IO_IRQ_INDEX:
+ case VFIO_CCW_CRW_IRQ_INDEX:
+ info->count = 1;
+ info->flags = VFIO_IRQ_INFO_EVENTFD;
+ break;
+ default:
return -EINVAL;
-
- info->count = 1;
- info->flags = VFIO_IRQ_INFO_EVENTFD;
+ }
return 0;
}
static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
uint32_t flags,
+ uint32_t index,
void __user *data)
{
struct vfio_ccw_private *private;
@@ -404,7 +416,17 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
return -EINVAL;
private = dev_get_drvdata(mdev_parent_dev(mdev));
- ctx = &private->io_trigger;
+
+ switch (index) {
+ case VFIO_CCW_IO_IRQ_INDEX:
+ ctx = &private->io_trigger;
+ break;
+ case VFIO_CCW_CRW_IRQ_INDEX:
+ ctx = &private->crw_trigger;
+ break;
+ default:
+ return -EINVAL;
+ }
switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
case VFIO_IRQ_SET_DATA_NONE:
@@ -482,6 +504,17 @@ int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
return 0;
}
+void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
+{
+ int i;
+
+ for (i = 0; i < private->num_regions; i++)
+ private->region[i].ops->release(private, &private->region[i]);
+ private->num_regions = 0;
+ kfree(private->region);
+ private->region = NULL;
+}
+
static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
unsigned int cmd,
unsigned long arg)
@@ -565,7 +598,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
return ret;
data = (void __user *)(arg + minsz);
- return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
+ return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, hdr.index, data);
}
case VFIO_DEVICE_RESET:
return vfio_ccw_mdev_reset(mdev);
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 9b9bb4982972..8723156b29ea 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -17,6 +17,7 @@
#include <linux/eventfd.h>
#include <linux/workqueue.h>
#include <linux/vfio_ccw.h>
+#include <asm/crw.h>
#include <asm/debug.h>
#include "css.h"
@@ -53,8 +54,16 @@ int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
unsigned int subtype,
const struct vfio_ccw_regops *ops,
size_t size, u32 flags, void *data);
+void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private);
int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
+int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private);
+int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private);
+
+struct vfio_ccw_crw {
+ struct list_head next;
+ struct crw crw;
+};
/**
* struct vfio_ccw_private
@@ -68,6 +77,8 @@ int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
* @io_mutex: protect against concurrent update of I/O regions
* @region: additional regions for other subchannel operations
* @cmd_region: MMIO region for asynchronous I/O commands other than START
+ * @schib_region: MMIO region for SCHIB information
+ * @crw_region: MMIO region for getting channel report words
* @num_regions: number of additional regions
* @cp: channel program for the current I/O operation
* @irb: irb info received from interrupt
@@ -86,14 +97,19 @@ struct vfio_ccw_private {
struct mutex io_mutex;
struct vfio_ccw_region *region;
struct ccw_cmd_region *cmd_region;
+ struct ccw_schib_region *schib_region;
+ struct ccw_crw_region *crw_region;
int num_regions;
struct channel_program cp;
struct irb irb;
union scsw scsw;
+ struct list_head crw;
struct eventfd_ctx *io_trigger;
+ struct eventfd_ctx *crw_trigger;
struct work_struct io_work;
+ struct work_struct crw_work;
} __aligned(8);
extern int vfio_ccw_mdev_reg(struct subchannel *sch);
diff --git a/drivers/s390/cio/vfio_ccw_trace.c b/drivers/s390/cio/vfio_ccw_trace.c
index 8c671d2519f6..4a0205905afc 100644
--- a/drivers/s390/cio/vfio_ccw_trace.c
+++ b/drivers/s390/cio/vfio_ccw_trace.c
@@ -9,6 +9,7 @@
#define CREATE_TRACE_POINTS
#include "vfio_ccw_trace.h"
+EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_chp_event);
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_async_request);
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_event);
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_io_request);
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
index f5d31887d413..62fb30598d47 100644
--- a/drivers/s390/cio/vfio_ccw_trace.h
+++ b/drivers/s390/cio/vfio_ccw_trace.h
@@ -17,6 +17,36 @@
#include <linux/tracepoint.h>
+TRACE_EVENT(vfio_ccw_chp_event,
+ TP_PROTO(struct subchannel_id schid,
+ int mask,
+ int event),
+ TP_ARGS(schid, mask, event),
+
+ TP_STRUCT__entry(
+ __field(u8, cssid)
+ __field(u8, ssid)
+ __field(u16, sch_no)
+ __field(int, mask)
+ __field(int, event)
+ ),
+
+ TP_fast_assign(
+ __entry->cssid = schid.cssid;
+ __entry->ssid = schid.ssid;
+ __entry->sch_no = schid.sch_no;
+ __entry->mask = mask;
+ __entry->event = event;
+ ),
+
+ TP_printk("schid=%x.%x.%04x mask=0x%x event=%d",
+ __entry->cssid,
+ __entry->ssid,
+ __entry->sch_no,
+ __entry->mask,
+ __entry->event)
+);
+
TRACE_EVENT(vfio_ccw_fsm_async_request,
TP_PROTO(struct subchannel_id schid,
int command,
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 35064443e748..e71ca4a719a5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -62,8 +62,10 @@ MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
static struct device *ap_root_device;
-DEFINE_SPINLOCK(ap_list_lock);
-LIST_HEAD(ap_card_list);
+/* Hashtable of all queue devices on the AP bus */
+DEFINE_HASHTABLE(ap_queues, 8);
+/* lock used for the ap_queues hashtable */
+DEFINE_SPINLOCK(ap_queues_lock);
/* Default permissions (ioctl, card and domain masking) */
struct ap_perms ap_perms;
@@ -414,7 +416,7 @@ static void ap_interrupt_handler(struct airq_struct *airq, bool floating)
*/
static void ap_tasklet_fn(unsigned long dummy)
{
- struct ap_card *ac;
+ int bkt;
struct ap_queue *aq;
enum ap_wait wait = AP_WAIT_NONE;
@@ -425,34 +427,30 @@ static void ap_tasklet_fn(unsigned long dummy)
if (ap_using_interrupts())
xchg(ap_airq.lsi_ptr, 0);
- spin_lock_bh(&ap_list_lock);
- for_each_ap_card(ac) {
- for_each_ap_queue(aq, ac) {
- spin_lock_bh(&aq->lock);
- wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
- spin_unlock_bh(&aq->lock);
- }
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ spin_lock_bh(&aq->lock);
+ wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
}
- spin_unlock_bh(&ap_list_lock);
+ spin_unlock_bh(&ap_queues_lock);
ap_wait(wait);
}
static int ap_pending_requests(void)
{
- struct ap_card *ac;
+ int bkt;
struct ap_queue *aq;
- spin_lock_bh(&ap_list_lock);
- for_each_ap_card(ac) {
- for_each_ap_queue(aq, ac) {
- if (aq->queue_count == 0)
- continue;
- spin_unlock_bh(&ap_list_lock);
- return 1;
- }
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ if (aq->queue_count == 0)
+ continue;
+ spin_unlock_bh(&ap_queues_lock);
+ return 1;
}
- spin_unlock_bh(&ap_list_lock);
+ spin_unlock_bh(&ap_queues_lock);
return 0;
}
@@ -683,24 +681,20 @@ static int ap_device_probe(struct device *dev)
}
/* Add queue/card to list of active queues/cards */
- spin_lock_bh(&ap_list_lock);
- if (is_card_dev(dev))
- list_add(&to_ap_card(dev)->list, &ap_card_list);
- else
- list_add(&to_ap_queue(dev)->list,
- &to_ap_queue(dev)->card->queues);
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_add(ap_queues, &to_ap_queue(dev)->hnode,
+ to_ap_queue(dev)->qid);
+ spin_unlock_bh(&ap_queues_lock);
ap_dev->drv = ap_drv;
rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
if (rc) {
- spin_lock_bh(&ap_list_lock);
- if (is_card_dev(dev))
- list_del_init(&to_ap_card(dev)->list);
- else
- list_del_init(&to_ap_queue(dev)->list);
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_del(&to_ap_queue(dev)->hnode);
+ spin_unlock_bh(&ap_queues_lock);
ap_dev->drv = NULL;
}
@@ -725,16 +719,33 @@ static int ap_device_remove(struct device *dev)
ap_queue_remove(to_ap_queue(dev));
/* Remove queue/card from list of active queues/cards */
- spin_lock_bh(&ap_list_lock);
- if (is_card_dev(dev))
- list_del_init(&to_ap_card(dev)->list);
- else
- list_del_init(&to_ap_queue(dev)->list);
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_del(&to_ap_queue(dev)->hnode);
+ spin_unlock_bh(&ap_queues_lock);
return 0;
}
+struct ap_queue *ap_get_qdev(ap_qid_t qid)
+{
+ int bkt;
+ struct ap_queue *aq;
+
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ if (aq->qid == qid) {
+ get_device(&aq->ap_dev.device);
+ spin_unlock_bh(&ap_queues_lock);
+ return aq;
+ }
+ }
+ spin_unlock_bh(&ap_queues_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ap_get_qdev);
+
int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
char *name)
{
@@ -1506,6 +1517,9 @@ static int __init ap_module_init(void)
return -ENODEV;
}
+ /* init ap_queue hashtable */
+ hash_init(ap_queues);
+
/* set up the AP permissions (ioctls, ap and aq masks) */
ap_perms_init();
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 8e8e37b6c0ee..053cc34d2ca2 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/types.h>
+#include <linux/hashtable.h>
#include <asm/isc.h>
#include <asm/ap.h>
@@ -27,8 +28,8 @@
extern int ap_domain_index;
-extern spinlock_t ap_list_lock;
-extern struct list_head ap_card_list;
+extern DECLARE_HASHTABLE(ap_queues, 8);
+extern spinlock_t ap_queues_lock;
static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
{
@@ -152,8 +153,6 @@ struct ap_device {
struct ap_card {
struct ap_device ap_dev;
- struct list_head list; /* Private list of AP cards. */
- struct list_head queues; /* List of assoc. AP queues */
void *private; /* ap driver private pointer. */
int raw_hwtype; /* AP raw hardware type. */
unsigned int functions; /* AP device function bitfield. */
@@ -166,7 +165,7 @@ struct ap_card {
struct ap_queue {
struct ap_device ap_dev;
- struct list_head list; /* Private list of AP queues. */
+ struct hlist_node hnode; /* Node for the ap_queues hashtable */
struct ap_card *card; /* Ptr to assoc. AP card. */
spinlock_t lock; /* Per device lock. */
void *private; /* ap driver private pointer. */
@@ -223,12 +222,6 @@ static inline void ap_release_message(struct ap_message *ap_msg)
kzfree(ap_msg->private);
}
-#define for_each_ap_card(_ac) \
- list_for_each_entry(_ac, &ap_card_list, list)
-
-#define for_each_ap_queue(_aq, _ac) \
- list_for_each_entry(_aq, &(_ac)->queues, list)
-
/*
* Note: don't use ap_send/ap_recv after using ap_queue_message
* for the first time. Otherwise the ap message queue will get
@@ -270,6 +263,16 @@ extern struct ap_perms ap_perms;
extern struct mutex ap_perms_mutex;
/*
+ * Get ap_queue device for this qid.
+ * Returns ptr to the struct ap_queue device or NULL if there
+ * was no ap_queue device with this qid found. When something is
+ * found, the reference count of the embedded device is increased.
+ * So the caller has to decrease the reference count after use
+ * with a call to put_device(&aq->ap_dev.device).
+ */
+struct ap_queue *ap_get_qdev(ap_qid_t qid);
+
+/*
* check APQN for owned/reserved by ap bus and default driver(s).
* Checks if this APQN is or will be in use by the ap bus
* and the default set of drivers.
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 0a39dfdb6a1d..6588713319ba 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -66,9 +66,9 @@ static ssize_t request_count_show(struct device *dev,
u64 req_cnt;
req_cnt = 0;
- spin_lock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
req_cnt = atomic64_read(&ac->total_request_count);
- spin_unlock_bh(&ap_list_lock);
+ spin_unlock_bh(&ap_queues_lock);
return scnprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
@@ -76,13 +76,15 @@ static ssize_t request_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ap_card *ac = to_ap_card(dev);
+ int bkt;
struct ap_queue *aq;
+ struct ap_card *ac = to_ap_card(dev);
- spin_lock_bh(&ap_list_lock);
- for_each_ap_queue(aq, ac)
- aq->total_request_count = 0;
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ aq->total_request_count = 0;
+ spin_unlock_bh(&ap_queues_lock);
atomic64_set(&ac->total_request_count, 0);
return count;
@@ -93,15 +95,17 @@ static DEVICE_ATTR_RW(request_count);
static ssize_t requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ap_card *ac = to_ap_card(dev);
+ int bkt;
struct ap_queue *aq;
unsigned int reqq_cnt;
+ struct ap_card *ac = to_ap_card(dev);
reqq_cnt = 0;
- spin_lock_bh(&ap_list_lock);
- for_each_ap_queue(aq, ac)
- reqq_cnt += aq->requestq_count;
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ reqq_cnt += aq->requestq_count;
+ spin_unlock_bh(&ap_queues_lock);
return scnprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
}
@@ -110,15 +114,17 @@ static DEVICE_ATTR_RO(requestq_count);
static ssize_t pendingq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ap_card *ac = to_ap_card(dev);
+ int bkt;
struct ap_queue *aq;
unsigned int penq_cnt;
+ struct ap_card *ac = to_ap_card(dev);
penq_cnt = 0;
- spin_lock_bh(&ap_list_lock);
- for_each_ap_queue(aq, ac)
- penq_cnt += aq->pendingq_count;
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ penq_cnt += aq->pendingq_count;
+ spin_unlock_bh(&ap_queues_lock);
return scnprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
}
@@ -163,11 +169,6 @@ static void ap_card_device_release(struct device *dev)
{
struct ap_card *ac = to_ap_card(dev);
- if (!list_empty(&ac->list)) {
- spin_lock_bh(&ap_list_lock);
- list_del_init(&ac->list);
- spin_unlock_bh(&ap_list_lock);
- }
kfree(ac);
}
@@ -179,8 +180,6 @@ struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
if (!ac)
return NULL;
- INIT_LIST_HEAD(&ac->list);
- INIT_LIST_HEAD(&ac->queues);
ac->ap_dev.device.release = ap_card_device_release;
ac->ap_dev.device.type = &ap_card_type;
ac->ap_dev.device_type = comp_type;
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 0eaf1d04e8df..73b077dca3e6 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -568,11 +568,10 @@ static void ap_queue_device_release(struct device *dev)
{
struct ap_queue *aq = to_ap_queue(dev);
- if (!list_empty(&aq->list)) {
- spin_lock_bh(&ap_list_lock);
- list_del_init(&aq->list);
- spin_unlock_bh(&ap_list_lock);
- }
+ spin_lock_bh(&ap_queues_lock);
+ hash_del(&aq->hnode);
+ spin_unlock_bh(&ap_queues_lock);
+
kfree(aq);
}
@@ -590,7 +589,6 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->state = AP_STATE_UNBOUND;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
- INIT_LIST_HEAD(&aq->list);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
timer_setup(&aq->timeout, ap_request_timeout, 0);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index da47e423e1b1..2d3bca3c0141 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -20,6 +20,7 @@
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/hashtable.h>
+#include <asm/chsc.h>
#include <asm/setup.h>
#include "qeth_core.h"
#include "qeth_l2.h"
@@ -27,8 +28,8 @@
static void qeth_bridgeport_query_support(struct qeth_card *card);
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
-static void qeth_bridge_host_event(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd);
+static void qeth_addr_change_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd);
static void qeth_l2_vnicc_set_defaults(struct qeth_card *card);
static void qeth_l2_vnicc_init(struct qeth_card *card);
static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
@@ -629,6 +630,72 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
schedule_work(&card->rx_mode_work);
}
+/**
+ * qeth_l2_pnso() - perform network subchannel operation
+ * @card: qeth_card structure pointer
+ * @cnc: Boolean Change-Notification Control
+ * @cb: Callback function will be executed for each element
+ * of the address list
+ * @priv: Pointer to pass to the callback function.
+ *
+ * Collects network information in a network address list and calls the
+ * callback function for every entry in the list. If "change-notification-
+ * control" is set, further changes in the address list will be reported
+ * via the IPA command.
+ */
+static int qeth_l2_pnso(struct qeth_card *card, int cnc,
+ void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
+ void *priv)
+{
+ struct ccw_device *ddev = CARD_DDEV(card);
+ struct chsc_pnso_area *rr;
+ u32 prev_instance = 0;
+ int isfirstblock = 1;
+ int i, size, elems;
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "PNSO");
+ rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
+ if (rr == NULL)
+ return -ENOMEM;
+ do {
+ /* on the first iteration, naihdr.resume_token will be zero */
+ rc = ccw_device_pnso(ddev, rr, rr->naihdr.resume_token, cnc);
+ if (rc)
+ continue;
+ if (cb == NULL)
+ continue;
+
+ size = rr->naihdr.naids;
+ if (size != sizeof(struct chsc_pnso_naid_l2)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ elems = (rr->response.length - sizeof(struct chsc_header) -
+ sizeof(struct chsc_pnso_naihdr)) / size;
+
+ if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
+ /* Inform the caller that they need to scrap */
+ /* the data that was already reported via cb */
+ rc = -EAGAIN;
+ break;
+ }
+ isfirstblock = 0;
+ prev_instance = rr->naihdr.instance;
+ for (i = 0; i < elems; i++)
+ (*cb)(priv, &rr->entries[i]);
+ } while ((rc == -EBUSY) || (!rc && /* list stored */
+ /* resume token is non-zero => list incomplete */
+ (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
+
+ if (rc)
+ QETH_CARD_TEXT_(card, 2, "PNrp%04x", rr->response.code);
+
+ free_page((unsigned long)rr);
+ return rc;
+}
+
static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
@@ -856,7 +923,7 @@ static int qeth_l2_control_event(struct qeth_card *card,
} else
return 1;
case IPA_CMD_ADDRESS_CHANGE_NOTIF:
- qeth_bridge_host_event(card, cmd);
+ qeth_addr_change_event(card, cmd);
return 0;
default:
return 1;
@@ -973,8 +1040,10 @@ enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset};
* for all currently registered addresses.
*/
static void qeth_bridge_emit_host_event(struct qeth_card *card,
- enum qeth_an_event_type evtype,
- u8 code, struct net_if_token *token, struct mac_addr_lnid *addr_lnid)
+ enum qeth_an_event_type evtype,
+ u8 code,
+ struct net_if_token *token,
+ struct mac_addr_lnid *addr_lnid)
{
char str[7][32];
char *env[8];
@@ -1091,74 +1160,76 @@ static void qeth_bridge_state_change(struct qeth_card *card,
queue_work(card->event_wq, &data->worker);
}
-struct qeth_bridge_host_data {
+struct qeth_addr_change_data {
struct work_struct worker;
struct qeth_card *card;
- struct qeth_ipacmd_addr_change hostevs;
+ struct qeth_ipacmd_addr_change ac_event;
};
-static void qeth_bridge_host_event_worker(struct work_struct *work)
+static void qeth_addr_change_event_worker(struct work_struct *work)
{
- struct qeth_bridge_host_data *data =
- container_of(work, struct qeth_bridge_host_data, worker);
+ struct qeth_addr_change_data *data =
+ container_of(work, struct qeth_addr_change_data, worker);
int i;
- if (data->hostevs.lost_event_mask) {
+ QETH_CARD_TEXT(data->card, 4, "adrchgew");
+ if (data->ac_event.lost_event_mask) {
dev_info(&data->card->gdev->dev,
-"Address notification from the Bridge Port stopped %s (%s)\n",
- data->card->dev->name,
- (data->hostevs.lost_event_mask == 0x01)
+ "Address change notification stopped on %s (%s)\n",
+ data->card->dev->name,
+ (data->ac_event.lost_event_mask == 0x01)
? "Overflow"
- : (data->hostevs.lost_event_mask == 0x02)
+ : (data->ac_event.lost_event_mask == 0x02)
? "Bridge port state change"
: "Unknown reason");
mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.hostnotification = 0;
mutex_unlock(&data->card->sbp_lock);
qeth_bridge_emit_host_event(data->card, anev_abort,
- 0, NULL, NULL);
+ 0, NULL, NULL);
} else
- for (i = 0; i < data->hostevs.num_entries; i++) {
+ for (i = 0; i < data->ac_event.num_entries; i++) {
struct qeth_ipacmd_addr_change_entry *entry =
- &data->hostevs.entry[i];
+ &data->ac_event.entry[i];
qeth_bridge_emit_host_event(data->card,
- anev_reg_unreg,
- entry->change_code,
- &entry->token, &entry->addr_lnid);
+ anev_reg_unreg,
+ entry->change_code,
+ &entry->token,
+ &entry->addr_lnid);
}
kfree(data);
}
-static void qeth_bridge_host_event(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd)
+static void qeth_addr_change_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
{
struct qeth_ipacmd_addr_change *hostevs =
&cmd->data.addrchange;
- struct qeth_bridge_host_data *data;
+ struct qeth_addr_change_data *data;
int extrasize;
- QETH_CARD_TEXT(card, 2, "brhostev");
+ QETH_CARD_TEXT(card, 4, "adrchgev");
if (cmd->hdr.return_code != 0x0000) {
if (cmd->hdr.return_code == 0x0010) {
if (hostevs->lost_event_mask == 0x00)
hostevs->lost_event_mask = 0xff;
} else {
- QETH_CARD_TEXT_(card, 2, "BPHe%04x",
+ QETH_CARD_TEXT_(card, 2, "ACHN%04x",
cmd->hdr.return_code);
return;
}
}
extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) *
hostevs->num_entries;
- data = kzalloc(sizeof(struct qeth_bridge_host_data) + extrasize,
- GFP_ATOMIC);
+ data = kzalloc(sizeof(struct qeth_addr_change_data) + extrasize,
+ GFP_ATOMIC);
if (!data) {
- QETH_CARD_TEXT(card, 2, "BPHalloc");
+ QETH_CARD_TEXT(card, 2, "ACNalloc");
return;
}
- INIT_WORK(&data->worker, qeth_bridge_host_event_worker);
+ INIT_WORK(&data->worker, qeth_addr_change_event_worker);
data->card = card;
- memcpy(&data->hostevs, hostevs,
+ memcpy(&data->ac_event, hostevs,
sizeof(struct qeth_ipacmd_addr_change) + extrasize);
queue_work(card->event_wq, &data->worker);
}
@@ -1448,63 +1519,18 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL);
}
-/**
- * qeth_anset_makerc() - derive "traditional" error from hardware codes.
- * @card: qeth_card structure pointer, for debug messages.
- *
- * Returns negative errno-compatible error indication or 0 on success.
- */
-static int qeth_anset_makerc(struct qeth_card *card, int pnso_rc, u16 response)
-{
- int rc;
-
- if (pnso_rc == 0)
- switch (response) {
- case 0x0001:
- rc = 0;
- break;
- case 0x0004:
- case 0x0100:
- case 0x0106:
- rc = -EOPNOTSUPP;
- dev_err(&card->gdev->dev,
- "Setting address notification failed\n");
- break;
- case 0x0107:
- rc = -EAGAIN;
- break;
- default:
- rc = -EIO;
- }
- else
- rc = -EIO;
-
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "SBPp%04x", pnso_rc);
- QETH_CARD_TEXT_(card, 2, "SBPr%04x", response);
- }
- return rc;
-}
-
static void qeth_bridgeport_an_set_cb(void *priv,
- enum qdio_brinfo_entry_type type, void *entry)
+ struct chsc_pnso_naid_l2 *entry)
{
struct qeth_card *card = (struct qeth_card *)priv;
- struct qdio_brinfo_entry_l2 *l2entry;
u8 code;
- if (type != l2_addr_lnid) {
- WARN_ON_ONCE(1);
- return;
- }
-
- l2entry = (struct qdio_brinfo_entry_l2 *)entry;
code = IPA_ADDR_CHANGE_CODE_MACADDR;
- if (l2entry->addr_lnid.lnid < VLAN_N_VID)
+ if (entry->addr_lnid.lnid < VLAN_N_VID)
code |= IPA_ADDR_CHANGE_CODE_VLANID;
qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
- (struct net_if_token *)&l2entry->nit,
- (struct mac_addr_lnid *)&l2entry->addr_lnid);
+ (struct net_if_token *)&entry->nit,
+ (struct mac_addr_lnid *)&entry->addr_lnid);
}
/**
@@ -1520,22 +1546,16 @@ static void qeth_bridgeport_an_set_cb(void *priv,
int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
{
int rc;
- u16 response;
- struct ccw_device *ddev;
- struct subchannel_id schid;
if (!card->options.sbp.supported_funcs)
return -EOPNOTSUPP;
- ddev = CARD_DDEV(card);
- ccw_device_get_schid(ddev, &schid);
if (enable) {
qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
- rc = qdio_pnso_brinfo(schid, 1, &response,
- qeth_bridgeport_an_set_cb, card);
+ rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card);
} else
- rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL);
- return qeth_anset_makerc(card, rc, response);
+ rc = qeth_l2_pnso(card, 0, NULL, NULL);
+ return rc;
}
static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 09ec846fe01d..18b713a616de 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -4,7 +4,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
/*
@@ -415,8 +415,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
- if (!zfcp_scsi_adapter_register(adapter))
- return adapter;
+ return adapter;
failed:
zfcp_adapter_unregister(adapter);
diff --git a/drivers/s390/scsi/zfcp_diag.h b/drivers/s390/scsi/zfcp_diag.h
index b9c93d15f67c..3852367f15f6 100644
--- a/drivers/s390/scsi/zfcp_diag.h
+++ b/drivers/s390/scsi/zfcp_diag.h
@@ -4,7 +4,7 @@
*
* Definitions for handling diagnostics in the the zfcp device driver.
*
- * Copyright IBM Corp. 2018
+ * Copyright IBM Corp. 2018, 2020
*/
#ifndef ZFCP_DIAG_H
@@ -56,11 +56,11 @@ struct zfcp_diag_adapter {
unsigned long max_age;
- struct {
+ struct zfcp_diag_adapter_port_data {
struct zfcp_diag_header header;
struct fsf_qtcb_bottom_port data;
} port_data;
- struct {
+ struct zfcp_diag_adapter_config_data {
struct zfcp_diag_header header;
struct fsf_qtcb_bottom_config data;
} config_data;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 3d0bc000f500..db320dab1fee 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -4,7 +4,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -14,6 +14,7 @@
#include <linux/bug.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
+#include "zfcp_diag.h"
#define ZFCP_MAX_ERPS 3
@@ -768,10 +769,14 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strat_fsf_xconf(
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
return ZFCP_ERP_FAILED;
+ return ZFCP_ERP_SUCCEEDED;
+}
+
+static void
+zfcp_erp_adapter_strategy_open_ptp_port(struct zfcp_adapter *const adapter)
+{
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
zfcp_erp_enqueue_ptp_port(adapter);
-
- return ZFCP_ERP_SUCCEEDED;
}
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
@@ -800,6 +805,59 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf_xport(
return ZFCP_ERP_SUCCEEDED;
}
+static enum zfcp_erp_act_result
+zfcp_erp_adapter_strategy_alloc_shost(struct zfcp_adapter *const adapter)
+{
+ struct zfcp_diag_adapter_config_data *const config_data =
+ &adapter->diagnostics->config_data;
+ struct zfcp_diag_adapter_port_data *const port_data =
+ &adapter->diagnostics->port_data;
+ unsigned long flags;
+ int rc;
+
+ rc = zfcp_scsi_adapter_register(adapter);
+ if (rc == -EEXIST)
+ return ZFCP_ERP_SUCCEEDED;
+ else if (rc)
+ return ZFCP_ERP_FAILED;
+
+ /*
+ * We allocated the shost for the first time. Before it was NULL,
+ * and so we deferred all updates in the xconf- and xport-data
+ * handlers. We need to make up for that now, and make all the updates
+ * that would have been done before.
+ *
+ * We can be sure that xconf- and xport-data succeeded, because
+ * otherwise this function is not called. But they might have been
+ * incomplete.
+ */
+
+ spin_lock_irqsave(&config_data->header.access_lock, flags);
+ zfcp_scsi_shost_update_config_data(adapter, &config_data->data,
+ !!config_data->header.incomplete);
+ spin_unlock_irqrestore(&config_data->header.access_lock, flags);
+
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ spin_lock_irqsave(&port_data->header.access_lock, flags);
+ zfcp_scsi_shost_update_port_data(adapter, &port_data->data);
+ spin_unlock_irqrestore(&port_data->header.access_lock, flags);
+ }
+
+ /*
+ * There is a remote possibility that the 'Exchange Port Data' request
+ * reports a different connectivity status than 'Exchange Config Data'.
+ * But any change to the connectivity status of the local optic that
+ * happens after the initial xconf request is expected to be reported
+ * to us, as soon as we post Status Read Buffers to the FCP channel
+ * firmware after this function. So any resulting inconsistency will
+ * only be momentary.
+ */
+ if (config_data->header.incomplete)
+ zfcp_fsf_fc_host_link_down(adapter);
+
+ return ZFCP_ERP_SUCCEEDED;
+}
+
static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
struct zfcp_erp_action *act)
{
@@ -809,6 +867,12 @@ static enum zfcp_erp_act_result zfcp_erp_adapter_strategy_open_fsf(
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
+ if (zfcp_erp_adapter_strategy_alloc_shost(act->adapter) ==
+ ZFCP_ERP_FAILED)
+ return ZFCP_ERP_FAILED;
+
+ zfcp_erp_adapter_strategy_open_ptp_port(act->adapter);
+
if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num))
return ZFCP_ERP_FAILED;
@@ -1636,6 +1700,13 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
atomic_or(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ /*
+ * if `scsi_host` is missing, xconfig/xport data has never completed
+ * yet, so we can't access it, but there are also no SDEVs yet
+ */
+ if (adapter->scsi_host == NULL)
+ return;
+
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host)
atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
@@ -1673,6 +1744,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
+ /*
+ * if `scsi_host` is missing, xconfig/xport data has never completed
+ * yet, so we can't access it, but there are also no SDEVs yet
+ */
+ if (adapter->scsi_host == NULL)
+ return;
+
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) {
atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 88294ca0e2ea..3ef5d74331c3 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -125,6 +125,7 @@ extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
struct fsf_qtcb_bottom_port *);
+extern u32 zfcp_fsf_convert_portspeed(u32 fsf_speed);
extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
extern int zfcp_fsf_status_read(struct zfcp_qdio *);
extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
@@ -134,6 +135,7 @@ extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
struct zfcp_fsf_ct_els *, unsigned int);
extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
+extern void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter);
extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
u8 tm_flags);
extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
@@ -153,6 +155,8 @@ extern int zfcp_qdio_sbal_get(struct zfcp_qdio *);
extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_qdio_req *);
extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
struct scatterlist *);
+extern void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
+ const struct zfcp_qdio *const qdio);
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
extern void zfcp_qdio_siosl(struct zfcp_adapter *);
@@ -169,6 +173,13 @@ extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
+extern void zfcp_scsi_shost_update_config_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_config *const bottom,
+ const bool bottom_incomplete);
+extern void zfcp_scsi_shost_update_port_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_port *const bottom);
/* zfcp_sysfs.c */
extern const struct attribute_group *zfcp_unit_attr_groups[];
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 111fe3fc32d7..c795f22249d8 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -120,21 +120,25 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
-static void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
+void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost = adapter->scsi_host;
+ adapter->hydra_version = 0;
+ adapter->peer_wwpn = 0;
+ adapter->peer_wwnn = 0;
+ adapter->peer_d_id = 0;
+
+ /* if there is no shost yet, we have nothing to zero-out */
+ if (shost == NULL)
+ return;
+
fc_host_port_id(shost) = 0;
fc_host_fabric_name(shost) = 0;
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
- adapter->hydra_version = 0;
snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
-
- adapter->peer_wwpn = 0;
- adapter->peer_wwnn = 0;
- adapter->peer_d_id = 0;
}
static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
@@ -479,7 +483,7 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
#define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8)
#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
-static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
+u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
{
u32 fdmi_speed = 0;
if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
@@ -509,64 +513,36 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
{
struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
struct zfcp_adapter *adapter = req->adapter;
- struct Scsi_Host *shost = adapter->scsi_host;
- struct fc_els_flogi *nsp, *plogi;
+ struct fc_els_flogi *plogi;
/* adjust pointers for missing command code */
- nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
- - sizeof(u32));
plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
- sizeof(u32));
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
- "IBM");
- fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
- fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
- fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
-
adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
(u16)FSF_STATUS_READS_RECOM);
- zfcp_scsi_set_prot(adapter);
-
/* no error return above here, otherwise must fix call chains */
/* do not evaluate invalid fields */
if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
return 0;
- fc_host_port_id(shost) = ntoh24(bottom->s_id);
- fc_host_speed(shost) =
- zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
-
adapter->hydra_version = bottom->adapter_type;
- snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
- bottom->adapter_type);
switch (bottom->fc_topology) {
case FSF_TOPO_P2P:
adapter->peer_d_id = ntoh24(bottom->peer_d_id);
adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
- fc_host_port_type(shost) = FC_PORTTYPE_PTP;
- fc_host_fabric_name(shost) = 0;
break;
case FSF_TOPO_FABRIC:
- fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
- if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
- fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
- else
- fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
break;
case FSF_TOPO_AL:
- fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
- fc_host_fabric_name(shost) = 0;
- fallthrough;
default:
- fc_host_fabric_name(shost) = 0;
dev_err(&adapter->ccw_device->dev,
"Unknown or unsupported arbitrated loop "
"fibre channel topology detected\n");
@@ -584,13 +560,10 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->diagnostics->config_data.header;
struct fsf_qtcb *qtcb = req->qtcb;
struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
- struct Scsi_Host *shost = adapter->scsi_host;
if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;
- snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
- "0x%08x", bottom->lic_version);
adapter->fsf_lic_version = bottom->lic_version;
adapter->adapter_features = bottom->adapter_features;
adapter->connection_features = bottom->connection_features;
@@ -606,6 +579,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
+ zfcp_scsi_shost_update_config_data(adapter, bottom, false);
if (zfcp_fsf_exchange_config_evaluate(req))
return;
@@ -630,6 +604,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
&adapter->status);
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
+
+ zfcp_scsi_shost_update_config_data(adapter, bottom, true);
if (zfcp_fsf_exchange_config_evaluate(req))
return;
break;
@@ -638,16 +614,8 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
return;
}
- if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)
adapter->hardware_version = bottom->hardware_version;
- snprintf(fc_host_hardware_version(shost),
- FC_VERSION_STRING_SIZE,
- "0x%08x", bottom->hardware_version);
- memcpy(fc_host_serial_number(shost), bottom->serial_number,
- min(FC_SERIAL_NUMBER_SIZE, 17));
- EBCASC(fc_host_serial_number(shost),
- min(FC_SERIAL_NUMBER_SIZE, 17));
- }
if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
dev_err(&adapter->ccw_device->dev,
@@ -761,19 +729,10 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
- struct Scsi_Host *shost = adapter->scsi_host;
if (req->data)
memcpy(req->data, bottom, sizeof(*bottom));
- fc_host_permanent_port_name(shost) = bottom->wwpn;
- fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
- fc_host_supported_speeds(shost) =
- zfcp_fsf_convert_portspeed(bottom->supported_speed);
- memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
- FC_FC4_LIST_SIZE);
- memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
- FC_FC4_LIST_SIZE);
if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
adapter->fc_security_algorithms =
bottom->fc_security_algorithms;
@@ -800,6 +759,7 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
*/
zfcp_diag_update_xdata(diag_hdr, bottom, false);
+ zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req);
break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
@@ -808,6 +768,8 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info);
+
+ zfcp_scsi_shost_update_port_data(req->adapter, bottom);
zfcp_fsf_exchange_port_evaluate(req);
break;
}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 26702b56a7ab..3a7f3374d10a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -4,7 +4,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -342,6 +342,18 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, 0);
}
+void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
+ const struct zfcp_qdio *const qdio)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+
+ if (shost == NULL)
+ return;
+
+ shost->sg_tablesize = qdio->max_sbale_per_req;
+ shost->max_sectors = qdio->max_sbale_per_req * 8;
+}
+
/**
* zfcp_qdio_open - prepare and initialize response queue
* @qdio: pointer to struct zfcp_qdio
@@ -420,10 +432,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
- if (adapter->scsi_host) {
- adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
- adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
- }
+ zfcp_qdio_shost_update(adapter, qdio);
return 0;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 13d873f806e4..d58bf79892f2 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -4,7 +4,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2018
+ * Copyright IBM Corp. 2002, 2020
*/
#define KMSG_COMPONENT "zfcp"
@@ -451,26 +451,39 @@ static struct scsi_host_template zfcp_scsi_host_template = {
};
/**
- * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * zfcp_scsi_adapter_register() - Allocate and register SCSI and FC host with
+ * SCSI midlayer
* @adapter: The zfcp adapter to register with the SCSI midlayer
+ *
+ * Allocates the SCSI host object for the given adapter, sets basic properties
+ * (such as the transport template, QDIO limits, ...), and registers it with
+ * the midlayer.
+ *
+ * During registration with the midlayer the corresponding FC host object for
+ * the referenced transport class is also implicitely allocated.
+ *
+ * Upon success adapter->scsi_host is set, and upon failure it remains NULL. If
+ * adapter->scsi_host is already set, nothing is done.
+ *
+ * Return:
+ * * 0 - Allocation and registration was successful
+ * * -EEXIST - SCSI and FC host did already exist, nothing was done, nothing
+ * was changed
+ * * -EIO - Allocation or registration failed
*/
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
if (adapter->scsi_host)
- return 0;
+ return -EEXIST;
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
- if (!adapter->scsi_host) {
- dev_err(&adapter->ccw_device->dev,
- "Registering the FCP device with the "
- "SCSI stack failed\n");
- return -EIO;
- }
+ if (!adapter->scsi_host)
+ goto err_out;
/* tell the SCSI stack some characteristics of this adapter */
adapter->scsi_host->max_id = 511;
@@ -480,14 +493,23 @@ int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
adapter->scsi_host->transportt = zfcp_scsi_transport_template;
+ /* make all basic properties known at registration time */
+ zfcp_qdio_shost_update(adapter, adapter->qdio);
+ zfcp_scsi_set_prot(adapter);
+
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
scsi_host_put(adapter->scsi_host);
- return -EIO;
+ goto err_out;
}
return 0;
+err_out:
+ adapter->scsi_host = NULL;
+ dev_err(&adapter->ccw_device->dev,
+ "Registering the FCP device with the SCSI stack failed\n");
+ return -EIO;
}
/**
@@ -841,6 +863,95 @@ void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
set_host_byte(scmd, DID_SOFT_ERROR);
}
+void zfcp_scsi_shost_update_config_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_config *const bottom,
+ const bool bottom_incomplete)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+ const struct fc_els_flogi *nsp, *plogi;
+
+ if (shost == NULL)
+ return;
+
+ snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->lic_version);
+
+ if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
+ snprintf(fc_host_hardware_version(shost),
+ FC_VERSION_STRING_SIZE,
+ "0x%08x", bottom->hardware_version);
+ memcpy(fc_host_serial_number(shost), bottom->serial_number,
+ min(FC_SERIAL_NUMBER_SIZE, 17));
+ EBCASC(fc_host_serial_number(shost),
+ min(FC_SERIAL_NUMBER_SIZE, 17));
+ }
+
+ /* adjust pointers for missing command code */
+ nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
+ - sizeof(u32));
+ plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
+ - sizeof(u32));
+
+ snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s",
+ "IBM");
+ fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
+ fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
+ fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ zfcp_scsi_set_prot(adapter);
+
+ /* do not evaluate invalid fields */
+ if (bottom_incomplete)
+ return;
+
+ fc_host_port_id(shost) = ntoh24(bottom->s_id);
+ fc_host_speed(shost) =
+ zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
+
+ snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x",
+ bottom->adapter_type);
+
+ switch (bottom->fc_topology) {
+ case FSF_TOPO_P2P:
+ fc_host_port_type(shost) = FC_PORTTYPE_PTP;
+ fc_host_fabric_name(shost) = 0;
+ break;
+ case FSF_TOPO_FABRIC:
+ fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn);
+ if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ break;
+ case FSF_TOPO_AL:
+ fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+ fallthrough;
+ default:
+ fc_host_fabric_name(shost) = 0;
+ break;
+ }
+}
+
+void zfcp_scsi_shost_update_port_data(
+ struct zfcp_adapter *const adapter,
+ const struct fsf_qtcb_bottom_port *const bottom)
+{
+ struct Scsi_Host *const shost = adapter->scsi_host;
+
+ if (shost == NULL)
+ return;
+
+ fc_host_permanent_port_name(shost) = bottom->wwpn;
+ fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
+ fc_host_supported_speeds(shost) =
+ zfcp_fsf_convert_portspeed(bottom->supported_speed);
+ memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
+ FC_FC4_LIST_SIZE);
+ memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
+ FC_FC4_LIST_SIZE);
+}
+
struct fc_function_template zfcp_transport_functions = {
.show_starget_port_id = 1,
.show_starget_port_name = 1,
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 7ec30ded0169..8d9662e8b717 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -216,20 +216,32 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
{
struct ccw_device *cdev = to_ccwdev(dev);
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
+ int retval = 0;
if (!adapter)
return -ENODEV;
/*
+ * If `scsi_host` is missing, we can't schedule `scan_work`, as it
+ * makes use of the corresponding fc_host object. But this state is
+ * only possible if xconfig/xport data has never completed yet,
+ * and we couldn't successfully scan for ports anyway.
+ */
+ if (adapter->scsi_host == NULL) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ /*
* Users wish is our command: immediately schedule and flush a
* worker to conduct a synchronous port scan, that is, neither
* a random delay nor a rate limit is applied here.
*/
queue_delayed_work(adapter->work_queue, &adapter->scan_work, 0);
flush_delayed_work(&adapter->scan_work);
+out:
zfcp_ccw_adapter_put(adapter);
-
- return (ssize_t) count;
+ return retval ? retval : (ssize_t) count;
}
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 4147d22fd448..3adfef210d8e 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -17,7 +17,6 @@
#include <linux/of_device.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/upa.h>
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index 8af216287a84..21b7cb6e7e70 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -410,9 +410,7 @@ static void dax_unlock_pages(struct dax_ctx *ctx, int ccb_index, int nelem)
if (p) {
dax_dbg("freeing page %p", p);
- if (j == OUT)
- set_page_dirty(p);
- put_page(p);
+ unpin_user_pages_dirty_lock(&p, 1, j == OUT);
ctx->pages[i][j] = NULL;
}
}
@@ -425,13 +423,13 @@ static int dax_lock_page(void *va, struct page **p)
dax_dbg("uva %p", va);
- ret = get_user_pages_fast((unsigned long)va, 1, FOLL_WRITE, p);
+ ret = pin_user_pages_fast((unsigned long)va, 1, FOLL_WRITE, p);
if (ret == 1) {
dax_dbg("locked page %p, for VA %p", *p, va);
return 0;
}
- dax_dbg("get_user_pages failed, va=%p, ret=%d", va, ret);
+ dax_dbg("pin_user_pages failed, va=%p, ret=%d", va, ret);
return -1;
}
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 37d252f2548d..05de0ce79cb9 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -21,7 +21,6 @@
#include <asm/oplib.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#define DEBUG 1
#ifdef DEBUG
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 0068963bb933..461b3babb601 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -116,9 +116,9 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/pgtable.h>
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/byteorder.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index b5b3154e2c28..bb49d83cadc7 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2237,7 +2237,7 @@ static bool __init blogic_inquiry(struct blogic_adapter *adapter)
"INQUIRE INSTALLED DEVICES ID 0 TO 7");
for (tgt_id = 0; tgt_id < 8; tgt_id++)
adapter->tgt_flags[tgt_id].tgt_exists =
- (installed_devs0to7[tgt_id] != 0 ? true : false);
+ installed_devs0to7[tgt_id] != 0;
}
/*
Issue the Inquire Setup Information command.
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 564b35473672..5853db36eceb 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index b6a0432f305a..86f1da22aaa5 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index eb72ac8136c3..2b868f8db8ff 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -814,7 +814,6 @@ int aac_probe_container(struct aac_dev *dev, int cid)
kfree(scsidev);
return -ENOMEM;
}
- scsicmd->list.next = NULL;
scsicmd->scsi_done = aac_probe_container_scsi_done;
scsicmd->device = scsidev;
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index ffe41bc111fc..34e65dea992e 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -513,15 +513,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
- user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
- if (!user_srbcmd) {
- dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
- rcode = -ENOMEM;
- goto cleanup;
- }
- if(copy_from_user(user_srbcmd, user_srb,fibsize)){
- dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
- rcode = -EFAULT;
+ user_srbcmd = memdup_user(user_srb, fibsize);
+ if (IS_ERR(user_srbcmd)) {
+ rcode = PTR_ERR(user_srbcmd);
+ user_srbcmd = NULL;
goto cleanup;
}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index ddd73f6798af..8ee4e1abe568 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2351,7 +2351,7 @@ fib_free_out:
goto out;
}
-int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
+static int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
{
struct tm cur_tm;
char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
@@ -2380,7 +2380,7 @@ out:
return ret;
}
-int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
+static int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
{
int ret = -ENOMEM;
struct fib *fibptr;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 83a60b0a8cd8..a308e86a97f1 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -864,7 +864,7 @@ static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
return HBA_IU_TYPE_SATA_REQ;
}
-void aac_tmf_callback(void *context, struct fib *fibptr)
+static void aac_tmf_callback(void *context, struct fib *fibptr)
{
struct aac_hba_resp *err =
&((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
@@ -1078,7 +1078,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
* @scsi_cmd: SCSI command block causing the reset
*
*/
-int aac_eh_host_reset(struct scsi_cmnd *cmd)
+static int aac_eh_host_reset(struct scsi_cmnd *cmd)
{
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
@@ -1632,7 +1632,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct Scsi_Host *shost;
struct aac_dev *aac;
struct list_head *insert = &aac_devices;
- int error = -ENODEV;
+ int error;
int unique_id = 0;
u64 dmamask;
int mask_bits = 0;
@@ -1657,7 +1657,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = pci_enable_device(pdev);
if (error)
goto out;
- error = -ENODEV;
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -1689,8 +1688,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
- if (!shost)
+ if (!shost) {
+ error = -ENOMEM;
goto out_disable_pdev;
+ }
shost->irq = pdev->irq;
shost->unique_id = unique_id;
@@ -1714,8 +1715,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
sizeof(struct fib),
GFP_KERNEL);
- if (!aac->fibs)
+ if (!aac->fibs) {
+ error = -ENOMEM;
goto out_free_host;
+ }
+
spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex);
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index a336a458c978..e4a09b93d00c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -3662,8 +3662,7 @@ ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force)
return;
tstate = ahd->enabled_targets[scsi_id];
- if (tstate != NULL)
- kfree(tstate);
+ kfree(tstate);
ahd->enabled_targets[scsi_id] = NULL;
}
#endif
@@ -6054,14 +6053,13 @@ ahd_alloc(void *platform_arg, char *name)
{
struct ahd_softc *ahd;
- ahd = kmalloc(sizeof(*ahd), GFP_ATOMIC);
+ ahd = kzalloc(sizeof(*ahd), GFP_ATOMIC);
if (!ahd) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
- memset(ahd, 0, sizeof(*ahd));
ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
if (ahd->seep_config == NULL) {
kfree(ahd);
@@ -6120,8 +6118,7 @@ ahd_set_unit(struct ahd_softc *ahd, int unit)
void
ahd_set_name(struct ahd_softc *ahd, char *name)
{
- if (ahd->name != NULL)
- kfree(ahd->name);
+ kfree(ahd->name);
ahd->name = name;
}
@@ -6182,12 +6179,9 @@ ahd_free(struct ahd_softc *ahd)
kfree(ahd->black_hole);
}
#endif
- if (ahd->name != NULL)
- kfree(ahd->name);
- if (ahd->seep_config != NULL)
- kfree(ahd->seep_config);
- if (ahd->saved_stack != NULL)
- kfree(ahd->saved_stack);
+ kfree(ahd->name);
+ kfree(ahd->seep_config);
+ kfree(ahd->saved_stack);
kfree(ahd);
return;
}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 84fc499cb1e6..3d4df906fa4f 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2178,8 +2178,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
if (channel == 'B')
scsi_id += 8;
tstate = ahc->enabled_targets[scsi_id];
- if (tstate != NULL)
- kfree(tstate);
+ kfree(tstate);
ahc->enabled_targets[scsi_id] = NULL;
}
#endif
@@ -4384,13 +4383,13 @@ ahc_alloc(void *platform_arg, char *name)
struct ahc_softc *ahc;
int i;
- ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC);
+ ahc = kzalloc(sizeof(*ahc), GFP_ATOMIC);
if (!ahc) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
- memset(ahc, 0, sizeof(*ahc));
+
ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
if (ahc->seep_config == NULL) {
kfree(ahc);
@@ -4453,8 +4452,7 @@ ahc_set_unit(struct ahc_softc *ahc, int unit)
void
ahc_set_name(struct ahc_softc *ahc, char *name)
{
- if (ahc->name != NULL)
- kfree(ahc->name);
+ kfree(ahc->name);
ahc->name = name;
}
@@ -4515,10 +4513,8 @@ ahc_free(struct ahc_softc *ahc)
kfree(ahc->black_hole);
}
#endif
- if (ahc->name != NULL)
- kfree(ahc->name);
- if (ahc->seep_config != NULL)
- kfree(ahc->seep_config);
+ kfree(ahc->name);
+ kfree(ahc->seep_config);
kfree(ahc);
return;
}
@@ -4927,8 +4923,7 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
case 0:
break;
}
- if (scb_data->scbarray != NULL)
- kfree(scb_data->scbarray);
+ kfree(scb_data->scbarray);
}
static void
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 3ddc8852bc32..105adba559a1 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -406,7 +406,7 @@ struct asd_manuf_sec {
u8 sas_addr[SAS_ADDR_SIZE];
u8 pcba_sn[ASD_PCBA_SN_SIZE];
/* Here start the other segments */
- u8 linked_list[0];
+ u8 linked_list[];
} __attribute__ ((packed));
struct asd_manuf_phy_desc {
@@ -449,7 +449,7 @@ struct asd_ms_sb_desc {
u8 type;
u8 node_desc_index;
u8 conn_desc_index;
- u8 _recvd[0];
+ u8 _recvd[];
} __attribute__ ((packed));
#if 0
@@ -478,12 +478,12 @@ struct asd_ms_conn_desc {
u8 size_sideband_desc;
u32 _resvd;
u8 name[16];
- struct asd_ms_sb_desc sb_desc[0];
+ struct asd_ms_sb_desc sb_desc[];
} __attribute__ ((packed));
struct asd_nd_phy_desc {
u8 vp_attch_type;
- u8 attch_specific[0];
+ u8 attch_specific[];
} __attribute__ ((packed));
#if 0
@@ -503,7 +503,7 @@ struct asd_ms_node_desc {
u8 size_phy_desc;
u8 _resvd;
u8 name[16];
- struct asd_nd_phy_desc phy_desc[0];
+ struct asd_nd_phy_desc phy_desc[];
} __attribute__ ((packed));
struct asd_ms_conn_map {
@@ -518,7 +518,7 @@ struct asd_ms_conn_map {
u8 usage_model_id;
u32 _resvd;
struct asd_ms_conn_desc conn_desc[0];
- struct asd_ms_node_desc node_desc[0];
+ struct asd_ms_node_desc node_desc[];
} __attribute__ ((packed));
struct asd_ctrla_phy_entry {
@@ -542,7 +542,7 @@ struct asd_ll_el {
u8 id0;
u8 id1;
__le16 next;
- u8 something_here[0];
+ u8 something_here[];
} __attribute__ ((packed));
static int asd_poll_flash(struct asd_ha_struct *asd_ha)
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index a1f3e9ee4e63..65691c21f133 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -23,11 +23,11 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
#include <asm/dma.h>
#include <asm/ecard.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include "../scsi.h"
#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 134f040d58e2..6e204a2e0c8d 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -29,11 +29,11 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/ecard.h>
-#include <asm/pgtable.h>
#include "../scsi.h"
#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index c795537a671c..772a13e5fd91 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -14,11 +14,11 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
#include <asm/dma.h>
#include <asm/ecard.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include "../scsi.h"
#include <scsi/scsi_host.h>
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 0f554ebb8f2c..fb4c469bd89f 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -708,7 +708,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
}
}
-bfa_boolean_t
+static bfa_boolean_t
bfa_isr_rspq(struct bfa_s *bfa, int qid)
{
struct bfi_msg_s *m;
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 284baa3b0c8e..766f2b5ed2ab 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -436,7 +436,7 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa,
return BFA_STATUS_OK;
}
-void
+static void
bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
{
struct bfa_itnim_latency_s *io_lat =
@@ -453,7 +453,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
io_lat->avg[idx] += val;
}
-void
+static void
bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
{
ioim->start_time = jiffies;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 7c3eadc58b98..297a77f5806c 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -1283,7 +1283,7 @@ bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
n2n_port->reply_oxid = 0;
}
-void
+static void
bfa_fcport_get_loop_attr(struct bfa_fcs_lport_s *port)
{
int i = 0, j = 0, bit = 0, alpa_bit = 0;
@@ -4358,7 +4358,7 @@ bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
bfa_sm_set_state(ns,
bfa_fcs_lport_ns_sm_sending_gid_ft);
bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
- };
+ }
break;
default:
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 82801b366500..fc294e1950a6 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -1575,7 +1575,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
- };
+ }
break;
case RPSM_EVENT_DELETE:
@@ -2449,7 +2449,7 @@ bfa_fcs_rport_hal_online_action(struct bfa_fcs_rport_s *rport)
bfa_fcs_itnim_brp_online(rport->itnim);
if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_online(rport);
- };
+ }
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 18b58b2f304f..6fd3383ee538 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -364,7 +364,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
writel(r32, rb + FNC_PERS_REG);
}
-bfa_boolean_t
+static bfa_boolean_t
bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
{
u32 r32;
@@ -744,7 +744,7 @@ bfa_ioc_ct2_mem_init(void __iomem *rb)
writel(0, (rb + CT2_MBIST_CTL_REG));
}
-void
+static void
bfa_ioc_ct2_mac_reset(void __iomem *rb)
{
/* put port0, port1 MAC & AHB in reset */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 6d2131441f0a..0b7d2e8f4a66 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -4284,7 +4284,7 @@ bfa_fcport_dportdisable(struct bfa_s *bfa)
bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
}
-void
+static void
bfa_fcport_ddportenable(struct bfa_s *bfa)
{
/*
@@ -4293,7 +4293,7 @@ bfa_fcport_ddportenable(struct bfa_s *bfa)
bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
}
-void
+static void
bfa_fcport_ddportdisable(struct bfa_s *bfa)
{
/*
@@ -5517,7 +5517,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
struct bfa_uf_buf_s *uf_buf;
uint8_t *buf;
- struct fchs_s *fchs;
uf_buf = (struct bfa_uf_buf_s *)
bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
@@ -5526,8 +5525,6 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
m->frm_len = be16_to_cpu(m->frm_len);
m->xfr_len = be16_to_cpu(m->xfr_len);
- fchs = (struct fchs_s *)uf_buf;
-
list_del(&uf->qe); /* dequeue from posted queue */
uf->data_ptr = buf;
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index eb0c76338295..bc5d84f87d8f 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -50,7 +50,7 @@ int pcie_max_read_reqsz;
int bfa_debugfs_enable = 1;
int msix_disable_cb = 0, msix_disable_ct = 0;
int max_xfer_size = BFAD_MAX_SECTORS >> 1;
-int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
+static int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
/* Firmware releated */
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index fbfce02e5b93..5ae1e3f78910 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -437,7 +437,7 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
return status;
}
-int
+static int
bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
{
struct bfad_im_port_s *im_port =
@@ -562,7 +562,7 @@ bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable)
return 0;
}
-void
+static void
bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)
{
struct bfad_vport_s *vport = (struct bfad_vport_s *)fc_vport->dd_data;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index a76c968dbac5..412dbe125e10 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -136,7 +136,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
return 0;
}
-int
+static int
bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 1cbb431fa682..0e33324e16f5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -945,7 +945,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
*/
if (interface->enabled)
fcoe_ctlr_link_up(ctlr);
- };
+ }
} else if (fcoe_ctlr_link_down(ctlr)) {
switch (cdev->enabled) {
case FCOE_CTLR_DISABLED:
@@ -965,7 +965,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
put_cpu();
fcoe_clean_pending_queue(lport);
wait_for_upload = 1;
- };
+ }
}
}
mutex_unlock(&bnx2fc_dev_lock);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 2b070f0835df..1aba5897ccb0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1081,6 +1081,7 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
}
static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
+ __must_hold(&tgt->tgt_lock)
{
struct bnx2fc_rport *tgt = io_req->tgt;
unsigned int time_left;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 524cdbcd29aa..ec7d01f6e2d5 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -959,6 +959,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct net_device *ndev = cdev->ports[csk->port_id];
struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
struct sk_buff *skb = NULL;
+ int ret;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
@@ -979,16 +980,16 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
if (csk->atid < 0) {
pr_err("NO atid available.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_sock;
}
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
if (!skb) {
- cxgb3_free_atid(t3dev, csk->atid);
- cxgbi_sock_put(csk);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_atid;
}
skb->sk = (struct sock *)csk;
set_arp_failure_handler(skb, act_open_arp_failure);
@@ -1010,6 +1011,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t);
return 0;
+
+free_atid:
+ cxgb3_free_atid(t3dev, csk->atid);
+put_sock:
+ cxgbi_sock_put(csk);
+ l2t_release(t3dev, csk->l2t);
+ csk->l2t = NULL;
+
+ return ret;
}
cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index bc1086ae6835..8ce8592f6a64 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1127,10 +1127,9 @@ static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
if (!csk)
goto rel_skb;
- if (csk)
- pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
- (&csk->saddr), (&csk->daddr), csk,
- csk->state, csk->flags, csk->tid, rpl->status);
+ pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
+ (&csk->saddr), (&csk->daddr), csk,
+ csk->state, csk->flags, csk->tid, rpl->status);
if (rpl->status == CPL_ERR_ABORT_FAILED)
goto rel_skb;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index fbd2ae40dab4..fcc5aa9f6014 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3744,6 +3744,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->afu_cookie = cfg->ops->create_afu(pdev);
if (unlikely(!cfg->afu_cookie)) {
dev_err(dev, "%s: create_afu failed\n", __func__);
+ rc = -ENOMEM;
goto out_remove;
}
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 02dff3a684e0..0497ef6a9453 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -27,6 +27,7 @@
/*#define UARTDELAY 1 */
#include <linux/module.h>
+#include <linux/pgtable.h>
MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
@@ -55,7 +56,6 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
#include <linux/mutex.h>
#include <asm/processor.h> /* for boot_cpu_data */
-#include <asm/pgtable.h>
#include <asm/io.h> /* for virt_to_bus, etc. */
#include <scsi/scsi.h>
@@ -1120,7 +1120,7 @@ static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u6
{
struct adpt_device* d;
- if(chan < 0 || chan >= MAX_CHANNEL)
+ if (chan >= MAX_CHANNEL)
return NULL;
d = pHba->channel[chan].device[id];
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 25dae9f0b205..cb41d166e0c0 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1915,7 +1915,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
case FCOE_CTLR_ENABLED:
case FCOE_CTLR_UNUSED:
fcoe_ctlr_link_up(ctlr);
- };
+ }
} else if (fcoe_ctlr_link_down(ctlr)) {
switch (cdev->enabled) {
case FCOE_CTLR_DISABLED:
@@ -1927,7 +1927,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
stats->LinkFailureCount++;
put_cpu();
fcoe_clean_pending_queue(lport);
- };
+ }
}
out:
return rc;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 18584ab27c32..7910b573bacb 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -49,8 +49,8 @@
static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
static struct kmem_cache *fnic_io_req_cache;
-LIST_HEAD(fnic_list);
-DEFINE_SPINLOCK(fnic_list_lock);
+static LIST_HEAD(fnic_list);
+static DEFINE_SPINLOCK(fnic_list_lock);
/* Supported devices by fnic module */
static struct pci_device_id fnic_id_table[] = {
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index b60795893994..27535c90b248 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2624,8 +2624,8 @@ int fnic_host_reset(struct scsi_cmnd *sc)
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
- if (fnic->internal_reset_inprogress == 0) {
- fnic->internal_reset_inprogress = 1;
+ if (!fnic->internal_reset_inprogress) {
+ fnic->internal_reset_inprogress = true;
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2654,7 +2654,7 @@ int fnic_host_reset(struct scsi_cmnd *sc)
}
spin_lock_irqsave(&fnic->fnic_lock, flags);
- fnic->internal_reset_inprogress = 0;
+ fnic->internal_reset_inprogress = false;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return ret;
}
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 1b88a3b53eee..a2beee6e09f0 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -254,7 +254,7 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
}
}
-int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
+static int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
{
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
@@ -316,7 +316,7 @@ int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
return -ETIMEDOUT;
}
-int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+static int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
@@ -411,7 +411,7 @@ int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
-int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
{
vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
if (!vdev->devcmd)
@@ -422,7 +422,7 @@ int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
}
-int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
{
int err;
unsigned int fetch_index;
@@ -492,7 +492,7 @@ err_free_devcmd2:
}
-void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
{
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
vnic_wq_disable(&vdev->devcmd2->wq);
@@ -503,7 +503,7 @@ void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
}
-int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
+static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
{
int err;
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
index 015af2cdabaf..442972c04e65 100644
--- a/drivers/scsi/fnic/vnic_wq.c
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -25,7 +25,7 @@
#include "vnic_wq.h"
-int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
+static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, enum vnic_res_type res_type)
{
wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
@@ -37,7 +37,7 @@ int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
}
-int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
+static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size)
{
return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index fe03410268e6..7f150d52b4a6 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -329,8 +329,8 @@ static void gdth_scsi_done(struct scsi_cmnd *scp)
scp->scsi_done(scp);
}
-int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
- int timeout, u32 *info)
+static int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd,
+ char *cmnd, int timeout, u32 *info)
{
gdth_ha_str *ha = shost_priv(sdev->host);
struct scsi_cmnd *scp;
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 11df0eca0293..727f8c8f30b5 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 9a6deb21fe4d..11caa4b0d797 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -898,8 +898,11 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct device *dev = hisi_hba->dev;
+ dev_dbg(dev, "phy%d OOB ready\n", phy_no);
+ if (phy->phy_attached)
+ return;
+
if (!timer_pending(&phy->timer)) {
- dev_dbg(dev, "phy%d OOB ready\n", phy_no);
phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
add_timer(&phy->timer);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index fa25766502a2..2e1718f9ade2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1175,15 +1175,14 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
}
-static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot)
+static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
- enum exec_status sts;
struct hisi_sas_complete_v1_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v1_hdr *complete_hdr;
@@ -1194,7 +1193,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
cmplt_hdr_data = le32_to_cpu(complete_hdr->data);
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
@@ -1260,7 +1259,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
slot_err_v1_hw(hisi_hba, task, slot);
if (unlikely(slot->abort))
- return ts->stat;
+ return;
goto out;
}
@@ -1309,12 +1308,9 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
out:
hisi_sas_slot_task_free(hisi_hba, task, slot);
- sts = ts->stat;
if (task->task_done)
task->task_done(task);
-
- return sts;
}
/* Interrupts */
@@ -1757,6 +1753,7 @@ static struct device_attribute *host_attrs_v1_hw[] = {
static struct scsi_host_template sht_v1_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index e05faf315dcd..e7e7849a4c14 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2318,8 +2318,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
}
}
-static int
-slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
+static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
@@ -2327,7 +2327,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct task_status_struct *ts;
struct domain_device *device;
struct sas_ha_struct *ha;
- enum exec_status sts;
struct hisi_sas_complete_v2_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v2_hdr *complete_hdr =
@@ -2337,7 +2336,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
u32 dw0;
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
@@ -2406,7 +2405,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
error_info[2], error_info[3]);
if (unlikely(slot->abort))
- return ts->stat;
+ return;
goto out;
}
@@ -2456,12 +2455,11 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%pK) aborted\n", task);
- return SAS_ABORTED_TASK;
+ return;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2473,15 +2471,13 @@ out:
spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%pK) ignored\n",
task);
- return sts;
+ return;
}
spin_unlock_irqrestore(&device->done_lock, flags);
}
if (task->task_done)
task->task_done(task);
-
- return sts;
}
static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
@@ -3533,6 +3529,7 @@ static struct device_attribute *host_attrs_v2_hw[] = {
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 374885aa8d77..3e6b78a1f993 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -912,11 +912,15 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
return -EINVAL;
}
- /* Switch over to MSI handling , from PCI AER default */
+ /*
+ * This DSM handles some hardware-related configurations:
+ * 1. Switch over to MSI error handling in kernel
+ * 2. BIOS *may* reset some register values through this method
+ */
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0,
DSM_FUNC_ERR_HANDLE_MSI, NULL);
if (!obj)
- dev_warn(dev, "Switch over to MSI handling failed\n");
+ dev_warn(dev, "can not find DSM method, ignore\n");
else
ACPI_FREE(obj);
@@ -2152,8 +2156,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
}
}
-static int
-slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
+static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
@@ -2161,7 +2165,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
struct task_status_struct *ts;
struct domain_device *device;
struct sas_ha_struct *ha;
- enum exec_status sts;
struct hisi_sas_complete_v3_hdr *complete_queue =
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v3_hdr *complete_hdr =
@@ -2171,7 +2174,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
u32 dw0, dw1, dw3;
if (unlikely(!task || !task->lldd_task || !task->dev))
- return -EINVAL;
+ return;
ts = &task->task_status;
device = task->dev;
@@ -2233,7 +2236,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
error_info[0], error_info[1],
error_info[2], error_info[3]);
if (unlikely(slot->abort))
- return ts->stat;
+ return;
goto out;
}
@@ -2278,12 +2281,11 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
dev_info(dev, "slot complete: task(%pK) aborted\n", task);
- return SAS_ABORTED_TASK;
+ return;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -2295,15 +2297,13 @@ out:
spin_unlock_irqrestore(&device->done_lock, flags);
dev_info(dev, "slot complete: task(%pK) ignored\n ",
task);
- return sts;
+ return;
}
spin_unlock_irqrestore(&device->done_lock, flags);
}
if (task->task_done)
task->task_done(task);
-
- return sts;
}
static irqreturn_t cq_thread_v3_hw(int irq_no, void *p)
@@ -2897,6 +2897,7 @@ static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
};
static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
+ HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK),
HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK),
@@ -3071,6 +3072,7 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
static struct scsi_host_template sht_v3_hw = {
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 59f0f1030c54..44e64aa21194 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2384,7 +2384,7 @@ static struct vio_driver ibmvscsi_driver = {
static struct srp_function_template ibmvscsi_transport_functions = {
};
-int __init ibmvscsi_module_init(void)
+static int __init ibmvscsi_module_init(void)
{
int ret;
@@ -2406,7 +2406,7 @@ int __init ibmvscsi_module_init(void)
return ret;
}
-void __exit ibmvscsi_module_exit(void)
+static void __exit ibmvscsi_module_exit(void)
{
vio_unregister_driver(&ibmvscsi_driver);
srp_release_transport(ibmvscsi_transport_template);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d48a8fa997b9..7d77997d26d4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1164,7 +1164,7 @@ static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int pr
default:
res->ata_class = ATA_DEV_UNKNOWN;
break;
- };
+ }
}
/**
@@ -9529,8 +9529,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
}
}
- if (ioa_cfg->ipr_cmd_pool)
- dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
+ dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
kfree(ioa_cfg->ipr_cmnd_list);
kfree(ioa_cfg->ipr_cmnd_list_dma);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 680e30947671..4e6b1decbca7 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -500,19 +500,19 @@ struct sci_timer {
static inline
void sci_init_timer(struct sci_timer *tmr, void (*fn)(struct timer_list *t))
{
- tmr->cancel = 0;
+ tmr->cancel = false;
timer_setup(&tmr->timer, fn, 0);
}
static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
{
- tmr->cancel = 0;
+ tmr->cancel = false;
mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
}
static inline void sci_del_timer(struct sci_timer *tmr)
{
- tmr->cancel = 1;
+ tmr->cancel = true;
del_timer(&tmr->timer);
}
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index c48a73a0f517..de71d240a56f 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/hardware.h>
#include <asm/parisc-device.h>
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 874dd4beed10..e5a64d4f255c 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2627,7 +2627,9 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
if (xmit_can_sleep) {
snprintf(ihost->workq_name, sizeof(ihost->workq_name),
"iscsi_q_%d", shost->host_no);
- ihost->workq = create_singlethread_workqueue(ihost->workq_name);
+ ihost->workq = alloc_workqueue("%s",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 2, ihost->workq_name);
if (!ihost->workq)
goto free_host;
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index c5a828a041e0..5d716d388707 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -160,6 +160,7 @@ qc_already_gone:
}
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
+ __must_hold(ap->lock)
{
struct sas_task *task;
struct scatterlist *sg;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 62e96d4fdcc6..c3ceb6e5b061 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -627,6 +627,19 @@ struct lpfc_ras_fwlog {
enum ras_state state; /* RAS logging running state */
};
+enum lpfc_irq_chann_mode {
+ /* Assign IRQs to all possible cpus that have hardware queues */
+ NORMAL_MODE,
+
+ /* Assign IRQs only to cpus on the same numa node as HBA */
+ NUMA_MODE,
+
+ /* Assign IRQs only on non-hyperthreaded CPUs. This is the
+ * same as normal_mode, but assign IRQS only on physical CPUs.
+ */
+ NHT_MODE,
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -835,7 +848,6 @@ struct lpfc_hba {
uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
- uint32_t cfg_irq_numa;
uint32_t cfg_suppress_rsp;
uint32_t cfg_nvme_oas;
uint32_t cfg_nvme_embed_cmd;
@@ -1003,6 +1015,7 @@ struct lpfc_hba {
mempool_t *active_rrq_pool;
struct fc_host_statistics link_stats;
+ enum lpfc_irq_chann_mode irq_chann_mode;
enum intr_type_t intr_type;
uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF
@@ -1314,19 +1327,19 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
}
/**
- * lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node
- * @numa_mask: Pointer to phba's numa_mask member.
+ * lpfc_next_online_cpu - Finds next online CPU on cpumask
+ * @mask: Pointer to phba's cpumask member.
* @start: starting cpu index
*
* Note: If no valid cpu found, then nr_cpu_ids is returned.
*
**/
static inline unsigned int
-lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start)
+lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
{
unsigned int cpu_it;
- for_each_cpu_wrap(cpu_it, numa_mask, start) {
+ for_each_cpu_wrap(cpu_it, mask, start) {
if (cpu_online(cpu_it))
break;
}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f089867674cb..a62c60ca6477 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -4874,7 +4874,7 @@ lpfc_request_firmware_upgrade_store(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- int val = 0, rc = -EINVAL;
+ int val = 0, rc;
/* Sanity check on user data */
if (!isdigit(buf[0]))
@@ -5701,17 +5701,69 @@ LPFC_ATTR_R(hdw_queue,
LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
"Set the number of I/O Hardware Queues");
-static inline void
-lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
+#if IS_ENABLED(CONFIG_X86)
+/**
+ * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
+ * irq_chann_mode
+ * @phba: Pointer to HBA context object.
+ **/
+static void
+lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
+{
+ unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
+ const struct cpumask *sibling_mask;
+ struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
+
+ cpumask_clear(aff_mask);
+
+ if (phba->irq_chann_mode == NUMA_MODE) {
+ /* Check if we're a NUMA architecture */
+ numa_node = dev_to_node(&phba->pcidev->dev);
+ if (numa_node == NUMA_NO_NODE) {
+ phba->irq_chann_mode = NORMAL_MODE;
+ return;
+ }
+ }
+
+ for_each_possible_cpu(cpu) {
+ switch (phba->irq_chann_mode) {
+ case NUMA_MODE:
+ if (cpu_to_node(cpu) == numa_node)
+ cpumask_set_cpu(cpu, aff_mask);
+ break;
+ case NHT_MODE:
+ sibling_mask = topology_sibling_cpumask(cpu);
+ first_cpu = cpumask_first(sibling_mask);
+ if (first_cpu < nr_cpu_ids)
+ cpumask_set_cpu(first_cpu, aff_mask);
+ break;
+ default:
+ break;
+ }
+ }
+}
+#endif
+
+static void
+lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
{
#if IS_ENABLED(CONFIG_X86)
- /* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- phba->cfg_irq_numa = 1;
- else
- phba->cfg_irq_numa = 0;
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_AMD:
+ /* If AMD architecture, then default is NUMA_MODE */
+ phba->irq_chann_mode = NUMA_MODE;
+ break;
+ case X86_VENDOR_INTEL:
+ /* If Intel architecture, then default is no hyperthread mode */
+ phba->irq_chann_mode = NHT_MODE;
+ break;
+ default:
+ phba->irq_chann_mode = NORMAL_MODE;
+ break;
+ }
+ lpfc_cpumask_irq_mode_init(phba);
#else
- phba->cfg_irq_numa = 0;
+ phba->irq_chann_mode = NORMAL_MODE;
#endif
}
@@ -5723,6 +5775,7 @@ lpfc_assign_default_irq_numa(struct lpfc_hba *phba)
*
* 0 = Configure number of IRQ Channels to:
* if AMD architecture, number of CPUs on HBA's NUMA node
+ * if Intel architecture, number of physical CPUs.
* otherwise, number of active CPUs.
* [1,256] = Manually specify how many IRQ Channels to use.
*
@@ -5748,35 +5801,44 @@ MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
static int
lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
{
- const struct cpumask *numa_mask;
+ const struct cpumask *aff_mask;
if (phba->cfg_use_msi != 2) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8532 use_msi = %u ignoring cfg_irq_numa\n",
phba->cfg_use_msi);
- phba->cfg_irq_numa = 0;
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ phba->irq_chann_mode = NORMAL_MODE;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
return 0;
}
/* Check if default setting was passed */
if (val == LPFC_IRQ_CHANN_DEF)
- lpfc_assign_default_irq_numa(phba);
+ lpfc_assign_default_irq_chann(phba);
- if (phba->cfg_irq_numa) {
- numa_mask = &phba->sli4_hba.numa_mask;
+ if (phba->irq_chann_mode != NORMAL_MODE) {
+ aff_mask = &phba->sli4_hba.irq_aff_mask;
- if (cpumask_empty(numa_mask)) {
+ if (cpumask_empty(aff_mask)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "8533 Could not identify NUMA node, "
- "ignoring cfg_irq_numa\n");
- phba->cfg_irq_numa = 0;
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ "8533 Could not identify CPUS for "
+ "mode %d, ignoring\n",
+ phba->irq_chann_mode);
+ phba->irq_chann_mode = NORMAL_MODE;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
} else {
- phba->cfg_irq_chann = cpumask_weight(numa_mask);
+ phba->cfg_irq_chann = cpumask_weight(aff_mask);
+
+ /* If no hyperthread mode, then set hdwq count to
+ * aff_mask weight as well
+ */
+ if (phba->irq_chann_mode == NHT_MODE)
+ phba->cfg_hdw_queue = phba->cfg_irq_chann;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"8543 lpfc_irq_chann set to %u "
- "(numa)\n", phba->cfg_irq_chann);
+ "(mode: %d)\n", phba->cfg_irq_chann,
+ phba->irq_chann_mode);
}
} else {
if (val > LPFC_IRQ_CHANN_MAX) {
@@ -5787,7 +5849,7 @@ lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
val,
LPFC_IRQ_CHANN_MIN,
LPFC_IRQ_CHANN_MAX);
- phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN;
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
return -EINVAL;
}
phba->cfg_irq_chann = val;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 196f6ae9952e..69d4710d95a0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -461,7 +461,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
struct lpfc_nodelist *ndlp;
if ((vport->port_type != LPFC_NPIV_PORT) ||
- (fc4_type == FC_TYPE_FCP) ||
!(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
ndlp = lpfc_setup_disc_node(vport, Did);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 4daae90e0c99..ae0a8252128c 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2429,7 +2429,8 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
return 0;
if (dent == phba->debug_InjErrLBA) {
- if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
+ if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') &&
+ (dstbuf[2] == 'f'))
tmp = (uint64_t)(-1);
}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 80d1e661b0d4..3d670568a276 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7936,19 +7936,13 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (unlikely(!pring))
return;
- if ((phba->pport->load_flag & FC_UNLOADING))
+ if (phba->pport->load_flag & FC_UNLOADING)
return;
+
spin_lock_irq(&phba->hbalock);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
- if ((phba->pport->load_flag & FC_UNLOADING)) {
- if (phba->sli_rev == LPFC_SLI_REV4)
- spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
- return;
- }
-
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
cmd = &piocb->iocb;
@@ -8514,6 +8508,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_lock_irq(shost->host_lock);
if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
spin_unlock_irq(shost->host_lock);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
goto dropit;
}
spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f5952f8cd4b5..4084f7f2b821 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1360,14 +1360,14 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
}
/**
- * lpfc_update_fcf_record - Update driver fcf record
* __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: Index for the lpfc_fcf_record.
* @new_fcf_record: pointer to hba fcf record.
*
* This routine updates the driver FCF priority record from the new HBA FCF
- * record. This routine is called with the host lock held.
+ * record. The hbalock is asserted held in the code path calling this
+ * routine.
**/
static void
__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
@@ -1376,8 +1376,6 @@ __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
{
struct lpfc_fcf_pri *fcf_pri;
- lockdep_assert_held(&phba->hbalock);
-
fcf_pri = &phba->fcf.fcf_pri[fcf_index];
fcf_pri->fcf_rec.fcf_index = fcf_index;
/* FCF record priority */
@@ -1455,7 +1453,7 @@ lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
*
* This routine updates the driver FCF record from the new HBA FCF record
* together with the address mode, vlan_id, and other informations. This
- * routine is called with the host lock held.
+ * routine is called with the hbalock held.
**/
static void
__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 10c5d1c3122e..6dfff0376547 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3541,7 +3541,7 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_UER_SHIFT 0
#define lpfc_mbx_set_feature_UER_MASK 0x00000001
#define lpfc_mbx_set_feature_UER_WORD word6
-#define lpfc_mbx_set_feature_mds_SHIFT 0
+#define lpfc_mbx_set_feature_mds_SHIFT 2
#define lpfc_mbx_set_feature_mds_MASK 0x00000001
#define lpfc_mbx_set_feature_mds_WORD word6
#define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ea99483345f2..69a5249e007a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6020,29 +6020,6 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
}
/**
- * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node
- * @phba: Pointer to HBA context object.
- *
- **/
-static void
-lpfc_cpumask_of_node_init(struct lpfc_hba *phba)
-{
- unsigned int cpu, numa_node;
- struct cpumask *numa_mask = &phba->sli4_hba.numa_mask;
-
- cpumask_clear(numa_mask);
-
- /* Check if we're a NUMA architecture */
- numa_node = dev_to_node(&phba->pcidev->dev);
- if (numa_node == NUMA_NO_NODE)
- return;
-
- for_each_possible_cpu(cpu)
- if (cpu_to_node(cpu) == numa_node)
- cpumask_set_cpu(cpu, numa_mask);
-}
-
-/**
* lpfc_enable_pci_dev - Enable a generic PCI device.
* @phba: pointer to lpfc hba data structure.
*
@@ -6480,7 +6457,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
phba->sli4_hba.curr_disp_cpu = 0;
- lpfc_cpumask_of_node_init(phba);
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
@@ -6688,6 +6664,13 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
#endif
/* Not supported for NVMET */
phba->cfg_xri_rebalancing = 0;
+ if (phba->irq_chann_mode == NHT_MODE) {
+ phba->cfg_irq_chann =
+ phba->sli4_hba.num_present_cpu;
+ phba->cfg_hdw_queue =
+ phba->sli4_hba.num_present_cpu;
+ phba->irq_chann_mode = NORMAL_MODE;
+ }
break;
}
}
@@ -7029,7 +7012,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
phba->sli4_hba.num_possible_cpu = 0;
phba->sli4_hba.num_present_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
- cpumask_clear(&phba->sli4_hba.numa_mask);
+ cpumask_clear(&phba->sli4_hba.irq_aff_mask);
/* Free memory allocated for fast-path work queue handles */
kfree(phba->sli4_hba.hba_eq_hdl);
@@ -11284,11 +11267,12 @@ lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
* @offline: true, cpu is going offline. false, cpu is coming online.
*
* If cpu is going offline, we'll try our best effort to find the next
- * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities.
+ * online cpu on the phba's original_mask and migrate all offlining IRQ
+ * affinities.
*
- * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu.
+ * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
*
- * Note: Call only if cfg_irq_numa is enabled, otherwise rely on
+ * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
* PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
*
**/
@@ -11298,14 +11282,14 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
struct lpfc_vector_map_info *cpup;
struct cpumask *aff_mask;
unsigned int cpu_select, cpu_next, idx;
- const struct cpumask *numa_mask;
+ const struct cpumask *orig_mask;
- if (!phba->cfg_irq_numa)
+ if (phba->irq_chann_mode == NORMAL_MODE)
return;
- numa_mask = &phba->sli4_hba.numa_mask;
+ orig_mask = &phba->sli4_hba.irq_aff_mask;
- if (!cpumask_test_cpu(cpu, numa_mask))
+ if (!cpumask_test_cpu(cpu, orig_mask))
return;
cpup = &phba->sli4_hba.cpu_map[cpu];
@@ -11314,9 +11298,9 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
return;
if (offline) {
- /* Find next online CPU on NUMA node */
- cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true);
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next);
+ /* Find next online CPU on original mask */
+ cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */
if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
@@ -11431,7 +11415,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{
int vectors, rc, index;
char *name;
- const struct cpumask *numa_mask = NULL;
+ const struct cpumask *aff_mask = NULL;
unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
struct lpfc_hba_eq_hdl *eqhdl;
const struct cpumask *maskp;
@@ -11441,16 +11425,18 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Set up MSI-X multi-message vectors */
vectors = phba->cfg_irq_chann;
- if (phba->cfg_irq_numa) {
- numa_mask = &phba->sli4_hba.numa_mask;
- cpu_cnt = cpumask_weight(numa_mask);
+ if (phba->irq_chann_mode != NORMAL_MODE)
+ aff_mask = &phba->sli4_hba.irq_aff_mask;
+
+ if (aff_mask) {
+ cpu_cnt = cpumask_weight(aff_mask);
vectors = min(phba->cfg_irq_chann, cpu_cnt);
- /* cpu: iterates over numa_mask including offline or online
- * cpu_select: iterates over online numa_mask to set affinity
+ /* cpu: iterates over aff_mask including offline or online
+ * cpu_select: iterates over online aff_mask to set affinity
*/
- cpu = cpumask_first(numa_mask);
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ cpu = cpumask_first(aff_mask);
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
} else {
flags |= PCI_IRQ_AFFINITY;
}
@@ -11484,7 +11470,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
eqhdl->irq = pci_irq_vector(phba->pcidev, index);
- if (phba->cfg_irq_numa) {
+ if (aff_mask) {
/* If found a neighboring online cpu, set affinity */
if (cpu_select < nr_cpu_ids)
lpfc_irq_set_aff(eqhdl, cpu_select);
@@ -11494,11 +11480,11 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
LPFC_CPU_FIRST_IRQ,
cpu);
- /* Iterate to next offline or online cpu in numa_mask */
- cpu = cpumask_next(cpu, numa_mask);
+ /* Iterate to next offline or online cpu in aff_mask */
+ cpu = cpumask_next(cpu, aff_mask);
- /* Find next online cpu in numa_mask to set affinity */
- cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu);
+ /* Find next online cpu in aff_mask to set affinity */
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
} else if (vectors == 1) {
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e35b52b66d6c..e34e0f11bfdd 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1378,7 +1378,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
*/
if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
- phba->host_gp = &phba->mbox->us.s2.host[0];
+ phba->host_gp = (struct lpfc_hgp __iomem *)
+ &phba->mbox->us.s2.host[0];
phba->hbq_put = NULL;
offset = (uint8_t *)&phba->mbox->us.s2.host -
(uint8_t *)phba->slim2p.virt;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index b46ba70f78da..b16c087ba272 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1654,11 +1654,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
- if (vport->load_flag & FC_UNLOADING) {
- ret = -ENODEV;
- goto out_fail;
- }
-
if (unlikely(vport->load_flag & FC_UNLOADING)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
@@ -2491,38 +2486,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&vport->phba->hbalock);
rport = remote_port->private;
if (oldrport) {
- /* New remoteport record does not guarantee valid
- * host private memory area.
- */
- if (oldrport == remote_port->private) {
- /* Same remoteport - ndlp should match.
- * Just reuse.
- */
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NVME_DISC,
- "6014 Rebind lport to current "
- "remoteport x%px wwpn 0x%llx, "
- "Data: x%x x%x x%px x%px x%x "
- " x%06x\n",
- remote_port,
- remote_port->port_name,
- remote_port->port_id,
- remote_port->port_role,
- oldrport->ndlp,
- ndlp,
- ndlp->nlp_type,
- ndlp->nlp_DID);
-
- /* It's a complete rebind only if the driver
- * is registering with the same ndlp. Otherwise
- * the driver likely executed a node swap
- * prior to this registration and the ndlp to
- * remoteport binding needs to be redone.
- */
- if (prev_ndlp == ndlp)
- return 0;
-
- }
/* Sever the ndlp<->rport association
* before dropping the ndlp ref from
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 32eb5e873e9b..88760416a8cb 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1030,11 +1030,6 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
goto aerr;
}
- if (phba->pport->load_flag & FC_UNLOADING) {
- rc = -ENODEV;
- goto aerr;
- }
-
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP)
@@ -1157,9 +1152,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return;
- if (phba->pport->load_flag & FC_UNLOADING)
- return;
-
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[0];
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 9e21c4f3b009..25653baba367 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -535,7 +535,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
if (count > eq->EQ_max_eqe)
eq->EQ_max_eqe = count;
- eq->queue_claimed = 0;
+ xchg(&eq->queue_claimed, 0);
rearm_and_exit:
/* Always clear the EQ. */
@@ -1245,8 +1245,8 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
- * This function is called with hbalock held to release driver
- * iocb object to the iocb pool. The iotag in the iocb object
+ * This function is called to release the driver iocb object
+ * to the iocb pool. The iotag in the iocb object
* does not change for each use of the iocb object. This function
* clears all other fields of the iocb object when it is freed.
* The sqlq structure that holds the xritag and phys and virtual
@@ -1256,7 +1256,8 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* this IO was aborted then the sglq entry it put on the
* lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
* IO has good status or fails for any other reason then the sglq
- * entry is added to the free list (lpfc_els_sgl_list).
+ * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
+ * asserted held in the code path calling this routine.
**/
static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
@@ -1266,8 +1267,6 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
unsigned long iflag = 0;
struct lpfc_sli_ring *pring;
- lockdep_assert_held(&phba->hbalock);
-
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
@@ -1330,18 +1329,17 @@ out:
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
- * This function is called with hbalock held to release driver
- * iocb object to the iocb pool. The iotag in the iocb object
- * does not change for each use of the iocb object. This function
- * clears all other fields of the iocb object when it is freed.
+ * This function is called to release the driver iocb object to the
+ * iocb pool. The iotag in the iocb object does not change for each
+ * use of the iocb object. This function clears all other fields of
+ * the iocb object when it is freed. The hbalock is asserted held in
+ * the code path calling this routine.
**/
static void
__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
- lockdep_assert_held(&phba->hbalock);
-
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
@@ -1786,17 +1784,17 @@ lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
* @nextiocb: Pointer to driver iocb object which need to be
* posted to firmware.
*
- * This function is called with hbalock held to post a new iocb to
- * the firmware. This function copies the new iocb to ring iocb slot and
- * updates the ring pointers. It adds the new iocb to txcmplq if there is
+ * This function is called to post a new iocb to the firmware. This
+ * function copies the new iocb to ring iocb slot and updates the
+ * ring pointers. It adds the new iocb to txcmplq if there is
* a completion call back for this iocb else the function will free the
- * iocb object.
+ * iocb object. The hbalock is asserted held in the code path calling
+ * this routine.
**/
static void
lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
{
- lockdep_assert_held(&phba->hbalock);
/*
* Set up an iotag
*/
@@ -11284,6 +11282,7 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* request, this function issues abort out unconditionally. This function is
* called with hbalock held. The function returns 0 when it fails due to
* memory allocation failure or when the command iocb is an abort request.
+ * The hbalock is asserted held in the code path calling this routine.
**/
static int
lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
@@ -11297,8 +11296,6 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
unsigned long iflags;
struct lpfc_nodelist *ndlp;
- lockdep_assert_held(&phba->hbalock);
-
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
@@ -13808,7 +13805,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
"0369 No entry from completion queue "
"qid=%d\n", cq->queue_id);
- cq->queue_claimed = 0;
+ xchg(&cq->queue_claimed, 0);
rearm_and_exit:
phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
@@ -14389,7 +14386,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
int ecount = 0;
int hba_eqidx;
struct lpfc_eq_intr_info *eqi;
- uint32_t icnt;
/* Get the driver's phba structure from the dev_id */
hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
@@ -14417,11 +14413,12 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- eqi = phba->sli4_hba.eq_info;
- icnt = this_cpu_inc_return(eqi->icnt);
+ eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
+ eqi->icnt++;
+
fpeq->last_cpu = raw_smp_processor_id();
- if (icnt > LPFC_EQD_ISR_TRIGGER &&
+ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
fpeq->q_flag & HBA_EQ_DELAY_CHK &&
phba->cfg_auto_imax &&
fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 8da7429e385a..4decb53d81c3 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -920,7 +920,7 @@ struct lpfc_sli4_hba {
struct lpfc_vector_map_info *cpu_map;
uint16_t num_possible_cpu;
uint16_t num_present_cpu;
- struct cpumask numa_mask;
+ struct cpumask irq_aff_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ca40c47cfbe0..ab0bc26c098d 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.0"
+#define LPFC_DRIVER_VERSION "12.8.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 35d3e322d6d5..43edf83fdb62 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -20,9 +20,9 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pgtable.h>
#include <asm/dbdma.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/macio.h>
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 8443f2f35be2..8f918df631bf 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -302,8 +302,8 @@ static struct pci_driver megaraid_pci_driver = {
// definitions for the device attributes for exporting logical drive number
// for a scsi address (Host, Channel, Id, Lun)
-DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
- NULL);
+static DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
+ NULL);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_shost_attrs[] = {
@@ -312,7 +312,7 @@ static struct device_attribute *megaraid_shost_attrs[] = {
};
-DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
+static DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_sdev_attrs[] = {
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 83d8c4cb1ad5..af2c7a2a9565 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -21,8 +21,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.713.01.00-rc1"
-#define MEGASAS_RELDATE "Dec 27, 2019"
+#define MEGASAS_VERSION "07.714.04.00-rc1"
+#define MEGASAS_RELDATE "Apr 14, 2020"
#define MEGASAS_MSIX_NAME_LEN 32
@@ -511,7 +511,7 @@ union MR_PROGRESS {
*/
struct MR_PD_PROGRESS {
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 rbld:1;
u32 patrol:1;
u32 clear:1;
@@ -537,7 +537,7 @@ struct MR_PD_PROGRESS {
};
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 rbld:1;
u32 patrol:1;
u32 clear:1;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index babe85d7b537..00668335c2af 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -81,7 +81,7 @@ int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, 0444);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
-int rdpq_enable = 1;
+static int rdpq_enable = 1;
module_param(rdpq_enable, int, 0444);
MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
@@ -89,7 +89,7 @@ unsigned int dual_qdepth_disable;
module_param(dual_qdepth_disable, int, 0444);
MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
-unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
+static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
module_param(scmd_timeout, int, 0444);
MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
@@ -1982,9 +1982,9 @@ static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
if (is_target_prop) {
tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
- if (tgt_device_qd &&
- (tgt_device_qd <= instance->host->can_queue))
- device_qd = tgt_device_qd;
+ if (tgt_device_qd)
+ device_qd = min(instance->host->can_queue,
+ (int)tgt_device_qd);
}
if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 89c3685f5163..3b3d04d7671f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -85,7 +85,7 @@ u32 mega_mod64(u64 dividend, u32 divisor)
*
* @return quotient
**/
-u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
+static u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
{
u32 remainder;
u64 d;
@@ -367,7 +367,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
return 1;
}
-u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
+static u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
@@ -417,7 +417,7 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
* div_error - Devide error code.
*/
-u32 mr_spanset_get_span_block(struct megasas_instance *instance,
+static u32 mr_spanset_get_span_block(struct megasas_instance *instance,
u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
@@ -642,7 +642,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
}
/* This Function will return Phys arm */
-u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
+static u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -785,7 +785,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
* span - Span number
* block - Absolute Block number in the physical disk
*/
-u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
+static u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u16 stripRef, struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
struct MR_DRV_RAID_MAP_ALL *map)
@@ -1342,7 +1342,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
}
}
-u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
+static u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
struct LD_LOAD_BALANCE_INFO *lbInfo,
struct IO_REQUEST_INFO *io_info,
struct MR_DRV_RAID_MAP_ALL *drv_map)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index b2ad96564484..319f241da4b6 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -176,7 +176,7 @@ static inline bool megasas_check_same_4gb_region
* megasas_enable_intr_fusion - Enables interrupts
* @regs: MFI register set
*/
-void
+static void
megasas_enable_intr_fusion(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
@@ -198,7 +198,7 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
* megasas_disable_intr_fusion - Disables interrupt
* @regs: MFI register set
*/
-void
+static void
megasas_disable_intr_fusion(struct megasas_instance *instance)
{
u32 mask = 0xFFFFFFFF;
@@ -2070,7 +2070,6 @@ static bool
megasas_is_prp_possible(struct megasas_instance *instance,
struct scsi_cmnd *scmd, int sge_count)
{
- int i;
u32 data_length = 0;
struct scatterlist *sg_scmd;
bool build_prp = false;
@@ -2099,63 +2098,6 @@ megasas_is_prp_possible(struct megasas_instance *instance,
build_prp = true;
}
-/*
- * Below code detects gaps/holes in IO data buffers.
- * What does holes/gaps mean?
- * Any SGE except first one in a SGL starts at non NVME page size
- * aligned address OR Any SGE except last one in a SGL ends at
- * non NVME page size boundary.
- *
- * Driver has already informed block layer by setting boundary rules for
- * bio merging done at NVME page size boundary calling kernel API
- * blk_queue_virt_boundary inside slave_config.
- * Still there is possibility of IO coming with holes to driver because of
- * IO merging done by IO scheduler.
- *
- * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
- * IO scheduling so no IO merging.
- *
- * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
- * then sending IOs with holes.
- *
- * Though driver can request block layer to disable IO merging by calling-
- * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
- * user may tune sysfs parameter- nomerges again to 0 or 1.
- *
- * If in future IO scheduling is enabled with SCSI BLK MQ,
- * this algorithm to detect holes will be required in driver
- * for SCSI BLK MQ enabled case as well.
- *
- *
- */
- scsi_for_each_sg(scmd, sg_scmd, sge_count, i) {
- if ((i != 0) && (i != (sge_count - 1))) {
- if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) ||
- mega_mod64(sg_dma_address(sg_scmd),
- mr_nvme_pg_size)) {
- build_prp = false;
- break;
- }
- }
-
- if ((sge_count > 1) && (i == 0)) {
- if ((mega_mod64((sg_dma_address(sg_scmd) +
- sg_dma_len(sg_scmd)),
- mr_nvme_pg_size))) {
- build_prp = false;
- break;
- }
- }
-
- if ((sge_count > 1) && (i == (sge_count - 1))) {
- if (mega_mod64(sg_dma_address(sg_scmd),
- mr_nvme_pg_size)) {
- build_prp = false;
- break;
- }
- }
- }
-
return build_prp;
}
@@ -4230,7 +4172,7 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
* megasas_refire_mgmt_cmd : Re-fire management commands
* @instance: Controller's soft instance
*/
-void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
+static void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
bool return_ioctl)
{
int j;
@@ -4238,8 +4180,9 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
struct fusion_context *fusion;
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
u16 smid;
- bool refire_cmd = 0;
+ bool refire_cmd = false;
u8 result;
u32 opcode = 0;
@@ -4305,6 +4248,11 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
result = COMPLETE_CMD;
}
+ scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
+ cmd_fusion->io_request;
+ if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT)
+ result = RETURN_CMD;
+
switch (result) {
case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
@@ -4533,7 +4481,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
if (!timeleft) {
dev_err(&instance->pdev->dev,
"task mgmt type 0x%x timed out\n", type);
- cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
mutex_unlock(&instance->reset_mutex);
rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
mutex_lock(&instance->reset_mutex);
@@ -4713,12 +4660,12 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
"attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n",
scmd, devhandle);
- mr_device_priv_data->tm_busy = 1;
+ mr_device_priv_data->tm_busy = true;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, smid,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
mr_device_priv_data);
- mr_device_priv_data->tm_busy = 0;
+ mr_device_priv_data->tm_busy = false;
mutex_unlock(&instance->reset_mutex);
scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n",
@@ -4783,12 +4730,12 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
sdev_printk(KERN_INFO, scmd->device,
"attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n",
scmd, devhandle);
- mr_device_priv_data->tm_busy = 1;
+ mr_device_priv_data->tm_busy = true;
ret = megasas_issue_tm(instance, devhandle,
scmd->device->channel, scmd->device->id, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
mr_device_priv_data);
- mr_device_priv_data->tm_busy = 0;
+ mr_device_priv_data->tm_busy = false;
mutex_unlock(&instance->reset_mutex);
scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n",
(ret == SUCCESS) ? "SUCCESS" : "FAILED");
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index d57ecc7f88d8..30de4b01f703 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -774,7 +774,7 @@ struct MR_SPAN_BLOCK_INFO {
struct MR_CPU_AFFINITY_MASK {
union {
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u8 hw_path:1;
u8 cpu0:1;
u8 cpu1:1;
@@ -866,7 +866,7 @@ struct MR_LD_RAID {
__le16 seqNum;
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
u32 ldSyncRequired:1;
u32 regTypeReqOnReadIsValid:1;
u32 isEPD:1;
@@ -889,7 +889,7 @@ struct {
/* 0x30 - 0x33, Logical block size for the LD */
u32 logical_block_length;
struct {
-#ifndef MFI_BIG_ENDIAN
+#ifndef __BIG_ENDIAN_BITFIELD
/* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
u32 ld_pi_exp:4;
/* 0x34, LOGICAL BLOCKS PER PHYSICAL
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 74fb50644678..f9f8f4921654 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -31,9 +31,9 @@
#include <linux/reboot.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/pgtable.h>
#include <asm/dbdma.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/hydra.h>
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
index 84fb3fbdb0ca..e76d994dbed3 100644
--- a/drivers/scsi/mpt3sas/Makefile
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -7,4 +7,5 @@ mpt3sas-y += mpt3sas_base.o \
mpt3sas_transport.o \
mpt3sas_ctl.o \
mpt3sas_trigger_diag.o \
- mpt3sas_warpdrive.o
+ mpt3sas_warpdrive.o \
+ mpt3sas_debugfs.o \
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 663782bb790d..beaea1933f5c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -413,7 +413,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
{
Mpi2SGESimple32_t *sgel, *sgel_next;
u32 sgl_flags, sge_chain_count = 0;
- bool is_write = 0;
+ bool is_write = false;
u16 i = 0;
void __iomem *buffer_iomem;
phys_addr_t buffer_iomem_phys;
@@ -482,7 +482,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
if (le32_to_cpu(sgel->FlagsLength) &
(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
- is_write = 1;
+ is_write = true;
for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
@@ -2806,58 +2806,38 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
- u64 required_mask, coherent_mask;
struct sysinfo s;
- /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
- int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
-
- if (ioc->is_mcpu_endpoint)
- goto try_32bit;
+ int dma_mask;
- required_mask = dma_get_required_mask(&pdev->dev);
- if (sizeof(dma_addr_t) == 4 || required_mask == 32)
- goto try_32bit;
-
- if (ioc->dma_mask)
- coherent_mask = DMA_BIT_MASK(dma_mask);
+ if (ioc->is_mcpu_endpoint ||
+ sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
+ dma_get_required_mask(&pdev->dev) <= 32)
+ dma_mask = 32;
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
+ dma_mask = 63;
else
- coherent_mask = DMA_BIT_MASK(32);
+ dma_mask = 64;
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
- dma_set_coherent_mask(&pdev->dev, coherent_mask))
- goto try_32bit;
-
- ioc->base_add_sg_single = &_base_add_sg_single_64;
- ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = dma_mask;
- goto out;
-
- try_32bit:
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)))
return -ENODEV;
- ioc->base_add_sg_single = &_base_add_sg_single_32;
- ioc->sge_size = sizeof(Mpi2SGESimple32_t);
- ioc->dma_mask = 32;
- out:
+ if (dma_mask > 32) {
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ } else {
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ }
+
si_meminfo(&s);
ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
- ioc->dma_mask, convert_to_kb(s.totalram));
+ dma_mask, convert_to_kb(s.totalram));
return 0;
}
-static int
-_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
- struct pci_dev *pdev)
-{
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
- return -ENODEV;
- }
- return 0;
-}
-
/**
* _base_check_enable_msix - checks MSIX capabable.
* @ioc: per adapter object
@@ -4827,8 +4807,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
{
int i = 0;
int j = 0;
+ int dma_alloc_count = 0;
struct chain_tracker *ct;
- struct reply_post_struct *rps;
+ int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -4870,29 +4851,34 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->reply_post) {
- do {
- rps = &ioc->reply_post[i];
- if (rps->reply_post_free) {
- dma_pool_free(
- ioc->reply_post_free_dma_pool,
- rps->reply_post_free,
- rps->reply_post_free_dma);
- dexitprintk(ioc,
- ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
- rps->reply_post_free));
- rps->reply_post_free = NULL;
+ dma_alloc_count = DIV_ROUND_UP(count,
+ RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+ for (i = 0; i < count; i++) {
+ if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
+ && dma_alloc_count) {
+ if (ioc->reply_post[i].reply_post_free) {
+ dma_pool_free(
+ ioc->reply_post_free_dma_pool,
+ ioc->reply_post[i].reply_post_free,
+ ioc->reply_post[i].reply_post_free_dma);
+ dexitprintk(ioc, ioc_info(ioc,
+ "reply_post_free_pool(0x%p): free\n",
+ ioc->reply_post[i].reply_post_free));
+ ioc->reply_post[i].reply_post_free =
+ NULL;
+ }
+ --dma_alloc_count;
}
- } while (ioc->rdpq_array_enable &&
- (++i < ioc->reply_queue_count));
+ }
+ dma_pool_destroy(ioc->reply_post_free_dma_pool);
if (ioc->reply_post_free_array &&
ioc->rdpq_array_enable) {
dma_pool_free(ioc->reply_post_free_array_dma_pool,
- ioc->reply_post_free_array,
- ioc->reply_post_free_array_dma);
+ ioc->reply_post_free_array,
+ ioc->reply_post_free_array_dma);
ioc->reply_post_free_array = NULL;
}
dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
- dma_pool_destroy(ioc->reply_post_free_dma_pool);
kfree(ioc->reply_post);
}
@@ -4902,8 +4888,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->pcie_sg_lookup[i].pcie_sgl,
ioc->pcie_sg_lookup[i].pcie_sgl_dma);
}
- if (ioc->pcie_sgl_dma_pool)
- dma_pool_destroy(ioc->pcie_sgl_dma_pool);
+ dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
if (ioc->config_page) {
@@ -4915,7 +4900,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
kfree(ioc->hpr_lookup);
+ ioc->hpr_lookup = NULL;
kfree(ioc->internal_lookup);
+ ioc->internal_lookup = NULL;
if (ioc->chain_lookup) {
for (i = 0; i < ioc->scsiio_depth; i++) {
for (j = ioc->chains_per_prp_buffer;
@@ -4935,7 +4922,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * is_MSB_are_same - checks whether all reply queues in a set are
+ * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
* having same upper 32bits in their base memory address.
* @reply_pool_start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
@@ -4945,7 +4932,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
*/
static int
-is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
+mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
{
long reply_pool_end_address;
@@ -4959,6 +4946,88 @@ is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
}
/**
+ * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
+ * for reply queues.
+ * @ioc: per adapter object
+ * @sz: DMA Pool size
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
+{
+ int i = 0;
+ u32 dma_alloc_count = 0;
+ int reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
+
+ ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
+ GFP_KERNEL);
+ if (!ioc->reply_post)
+ return -ENOMEM;
+ /*
+ * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
+ * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
+ * be within 4GB boundary i.e reply queues in a set must have same
+ * upper 32-bits in their memory address. so here driver is allocating
+ * the DMA'able memory for reply queues according.
+ * Driver uses limitation of
+ * VENTURA_SERIES to manage INVADER_SERIES as well.
+ */
+ dma_alloc_count = DIV_ROUND_UP(count,
+ RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+ ioc->reply_post_free_dma_pool =
+ dma_pool_create("reply_post_free pool",
+ &ioc->pdev->dev, sz, 16, 0);
+ if (!ioc->reply_post_free_dma_pool)
+ return -ENOMEM;
+ for (i = 0; i < count; i++) {
+ if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
+ ioc->reply_post[i].reply_post_free =
+ dma_pool_alloc(ioc->reply_post_free_dma_pool,
+ GFP_KERNEL,
+ &ioc->reply_post[i].reply_post_free_dma);
+ if (!ioc->reply_post[i].reply_post_free)
+ return -ENOMEM;
+ /*
+ * Each set of RDPQ pool must satisfy 4gb boundary
+ * restriction.
+ * 1) Check if allocated resources for RDPQ pool are in
+ * the same 4GB range.
+ * 2) If #1 is true, continue with 64 bit DMA.
+ * 3) If #1 is false, return 1. which means free all the
+ * resources and set DMA mask to 32 and allocate.
+ */
+ if (!mpt3sas_check_same_4gb_region(
+ (long)ioc->reply_post[i].reply_post_free, sz)) {
+ dinitprintk(ioc,
+ ioc_err(ioc, "bad Replypost free pool(0x%p)"
+ "reply_post_free_dma = (0x%llx)\n",
+ ioc->reply_post[i].reply_post_free,
+ (unsigned long long)
+ ioc->reply_post[i].reply_post_free_dma));
+ return -EAGAIN;
+ }
+ memset(ioc->reply_post[i].reply_post_free, 0,
+ RDPQ_MAX_INDEX_IN_ONE_CHUNK *
+ reply_post_free_sz);
+ dma_alloc_count--;
+
+ } else {
+ ioc->reply_post[i].reply_post_free =
+ (Mpi2ReplyDescriptorsUnion_t *)
+ ((long)ioc->reply_post[i-1].reply_post_free
+ + reply_post_free_sz);
+ ioc->reply_post[i].reply_post_free_dma =
+ (dma_addr_t)
+ (ioc->reply_post[i-1].reply_post_free_dma +
+ reply_post_free_sz);
+ }
+ }
+ return 0;
+}
+
+/**
* _base_allocate_memory_pools - allocate start of day memory pools
* @ioc: per adapter object
*
@@ -4972,10 +5041,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
u32 retry_sz;
+ u32 rdpq_sz = 0;
u16 max_request_credit, nvme_blocks_needed;
unsigned short sg_tablesize;
u16 sge_size;
int i, j;
+ int ret = 0;
struct chain_tracker *ct;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -5129,54 +5200,28 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
/* reply post queue, 16 byte align */
reply_post_free_sz = ioc->reply_post_queue_depth *
sizeof(Mpi2DefaultReplyDescriptor_t);
-
- sz = reply_post_free_sz;
+ rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
- sz *= ioc->reply_queue_count;
-
- ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
- (ioc->reply_queue_count):1,
- sizeof(struct reply_post_struct), GFP_KERNEL);
-
- if (!ioc->reply_post) {
- ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
- goto out;
- }
- ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
- &ioc->pdev->dev, sz, 16, 0);
- if (!ioc->reply_post_free_dma_pool) {
- ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
- goto out;
- }
- i = 0;
- do {
- ioc->reply_post[i].reply_post_free =
- dma_pool_zalloc(ioc->reply_post_free_dma_pool,
- GFP_KERNEL,
- &ioc->reply_post[i].reply_post_free_dma);
- if (!ioc->reply_post[i].reply_post_free) {
- ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
- goto out;
- }
- dinitprintk(ioc,
- ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
- ioc->reply_post[i].reply_post_free,
- ioc->reply_post_queue_depth,
- 8, sz / 1024));
- dinitprintk(ioc,
- ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
- (u64)ioc->reply_post[i].reply_post_free_dma));
- total_sz += sz;
- } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
-
- if (ioc->dma_mask > 32) {
- if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
- ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
- pci_name(ioc->pdev));
- goto out;
+ rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
+ ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
+ if (ret == -EAGAIN) {
+ /*
+ * Free allocated bad RDPQ memory pools.
+ * Change dma coherent mask to 32 bit and reallocate RDPQ
+ */
+ _base_release_memory_pools(ioc);
+ ioc->use_32bit_dma = true;
+ if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
+ ioc_err(ioc,
+ "32 DMA mask failed %s\n", pci_name(ioc->pdev));
+ return -ENODEV;
}
- }
-
+ if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
+ return -ENOMEM;
+ } else if (ret == -ENOMEM)
+ return -ENOMEM;
+ total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
+ DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
ioc->scsiio_depth = ioc->hba_queue_depth -
ioc->hi_priority_depth - ioc->internal_depth;
@@ -5188,7 +5233,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
ioc->shost->can_queue));
-
/* contiguous pool for request and chains, 16 byte align, one extra "
* "frame for smid=0
*/
@@ -5405,7 +5449,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* Actual requirement is not alignment, but we need start and end of
* DMA address must have same upper 32 bit address.
*/
- if (!is_MSB_are_same((long)ioc->sense, sz)) {
+ if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
//Release Sense pool & Reallocate
dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
dma_pool_destroy(ioc->sense_dma_pool);
@@ -7158,7 +7202,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->smp_affinity_enable = smp_affinity_enable;
ioc->rdpq_array_enable_assigned = 0;
- ioc->dma_mask = 0;
+ ioc->use_32bit_dma = false;
if (ioc->is_aero_ioc)
ioc->base_readl = &_base_readl_aero;
else
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index e7197150721f..4fca3939c034 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "33.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 33
+#define MPT3SAS_DRIVER_VERSION "34.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 34
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -367,6 +367,7 @@ struct mpt3sas_nvme_cmd {
#define MPT3SAS_HIGH_IOPS_REPLY_QUEUES 8
#define MPT3SAS_HIGH_IOPS_BATCH_COUNT 16
#define MPT3SAS_GEN35_MAX_MSIX_QUEUES 128
+#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
/* OEM Specific Flags will come from OEM specific header files */
struct Mpi2ManufacturingPage10_t {
@@ -1026,7 +1027,6 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @ir_firmware: IR firmware present
* @bars: bitmask of BAR's that must be configured
* @mask_interrupts: ignore interrupt
- * @dma_mask: used to set the consistent dma mask
* @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and
* pci resource handling
* @fault_reset_work_q_name: fw fault work queue
@@ -1064,6 +1064,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @thresh_hold: Max number of reply descriptors processed
* before updating Host Index
* @drv_support_bitmap: driver's supported feature bit map
+ * @use_32bit_dma: Flag to use 32 bit consistent dma mask
* @scsi_io_cb_idx: shost generated commands
* @tm_cb_idx: task management commands
* @scsih_cb_idx: scsih internal commands
@@ -1205,7 +1206,6 @@ struct MPT3SAS_ADAPTER {
u8 ir_firmware;
int bars;
u8 mask_interrupts;
- int dma_mask;
/* fw fault handler */
char fault_reset_work_q_name[20];
@@ -1254,6 +1254,7 @@ struct MPT3SAS_ADAPTER {
u8 high_iops_queues;
u32 drv_support_bitmap;
bool enable_sdev_max_qd;
+ bool use_32bit_dma;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
@@ -1471,6 +1472,8 @@ struct MPT3SAS_ADAPTER {
u16 device_remove_in_progress_sz;
u8 is_gen35_ioc;
u8 is_aero_ioc;
+ struct dentry *debugfs_root;
+ struct dentry *ioc_dump;
PUT_SMID_IO_FP_HIP put_smid_scsi_io;
PUT_SMID_IO_FP_HIP put_smid_fast_path;
PUT_SMID_IO_FP_HIP put_smid_hi_priority;
@@ -1478,6 +1481,11 @@ struct MPT3SAS_ADAPTER {
GET_MSIX_INDEX get_msix_index_for_smlio;
};
+struct mpt3sas_debugfs_buffer {
+ void *buf;
+ u32 len;
+};
+
#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1781,6 +1789,11 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_init_debugfs(void);
+void mpt3sas_exit_debugfs(void);
+
/**
* _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device
* @device_info: bitfield providing information about the device.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debugfs.c b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c
new file mode 100644
index 000000000000..a6ab1db81167
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_debugfs.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Debugfs interface Support for MPT (Message Passing Technology) based
+ * controllers.
+ *
+ * Copyright (C) 2020 Broadcom Inc.
+ *
+ * Authors: Broadcom Inc.
+ * Sreekanth Reddy <sreekanth.reddy@broadcom.com>
+ * Suganath Prabu <suganath-prabu.subramani@broadcom.com>
+ *
+ * Send feedback to : MPT-FusionLinux.pdl@broadcom.com)
+ *
+ **/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/compat.h>
+#include <linux/uio.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include "mpt3sas_base.h"
+#include <linux/debugfs.h>
+
+static struct dentry *mpt3sas_debugfs_root;
+
+/*
+ * _debugfs_iocdump_read - copy ioc dump from debugfs buffer
+ * @filep: File Pointer
+ * @ubuf: Buffer to fill data
+ * @cnt: Length of the buffer
+ * @ppos: Offset in the file
+ */
+
+static ssize_t
+_debugfs_iocdump_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+
+{
+ struct mpt3sas_debugfs_buffer *debug = filp->private_data;
+
+ if (!debug || !debug->buf)
+ return 0;
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, debug->buf, debug->len);
+}
+
+/*
+ * _debugfs_iocdump_open : open the ioc_dump debugfs attribute file
+ */
+static int
+_debugfs_iocdump_open(struct inode *inode, struct file *file)
+{
+ struct MPT3SAS_ADAPTER *ioc = inode->i_private;
+ struct mpt3sas_debugfs_buffer *debug;
+
+ debug = kzalloc(sizeof(struct mpt3sas_debugfs_buffer), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->buf = (void *)ioc;
+ debug->len = sizeof(struct MPT3SAS_ADAPTER);
+ file->private_data = debug;
+ return 0;
+}
+
+/*
+ * _debugfs_iocdump_release : release the ioc_dump debugfs attribute
+ * @inode: inode structure to the corresponds device
+ * @file: File pointer
+ */
+static int
+_debugfs_iocdump_release(struct inode *inode, struct file *file)
+{
+ struct mpt3sas_debugfs_buffer *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ file->private_data = NULL;
+ kfree(debug);
+ return 0;
+}
+
+static const struct file_operations mpt3sas_debugfs_iocdump_fops = {
+ .owner = THIS_MODULE,
+ .open = _debugfs_iocdump_open,
+ .read = _debugfs_iocdump_read,
+ .release = _debugfs_iocdump_release,
+};
+
+/*
+ * mpt3sas_init_debugfs : Create debugfs root for mpt3sas driver
+ */
+void mpt3sas_init_debugfs(void)
+{
+ mpt3sas_debugfs_root = debugfs_create_dir("mpt3sas", NULL);
+ if (!mpt3sas_debugfs_root)
+ pr_info("mpt3sas: Cannot create debugfs root\n");
+}
+
+/*
+ * mpt3sas_exit_debugfs : Remove debugfs root for mpt3sas driver
+ */
+void mpt3sas_exit_debugfs(void)
+{
+ debugfs_remove_recursive(mpt3sas_debugfs_root);
+}
+
+/*
+ * mpt3sas_setup_debugfs : Setup debugfs per HBA adapter
+ * ioc: MPT3SAS_ADAPTER object
+ */
+void
+mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc)
+{
+ char name[64];
+
+ snprintf(name, sizeof(name), "scsi_host%d", ioc->shost->host_no);
+ if (!ioc->debugfs_root) {
+ ioc->debugfs_root =
+ debugfs_create_dir(name, mpt3sas_debugfs_root);
+ if (!ioc->debugfs_root) {
+ dev_err(&ioc->pdev->dev,
+ "Cannot create per adapter debugfs directory\n");
+ return;
+ }
+ }
+
+ snprintf(name, sizeof(name), "ioc_dump");
+ ioc->ioc_dump = debugfs_create_file(name, 0444,
+ ioc->debugfs_root, ioc, &mpt3sas_debugfs_iocdump_fops);
+ if (!ioc->ioc_dump) {
+ dev_err(&ioc->pdev->dev,
+ "Cannot create ioc_dump debugfs file\n");
+ debugfs_remove(ioc->debugfs_root);
+ return;
+ }
+
+ snprintf(name, sizeof(name), "host_recovery");
+ debugfs_create_u8(name, 0444, ioc->debugfs_root, &ioc->shost_recovery);
+
+}
+
+/*
+ * mpt3sas_destroy_debugfs : Destroy debugfs per HBA adapter
+ * @ioc: MPT3SAS_ADAPTER object
+ */
+void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc)
+{
+ debugfs_remove_recursive(ioc->debugfs_root);
+}
+
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 04a40afe60e3..08fc4b381056 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -9928,6 +9928,7 @@ static void scsih_remove(struct pci_dev *pdev)
&ioc->ioc_pg1_copy);
/* release all the volumes */
_scsih_ir_shutdown(ioc);
+ mpt3sas_destroy_debugfs(ioc);
sas_remove_host(shost);
list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
list) {
@@ -10763,8 +10764,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
/* register EEDP capabilities with SCSI layer */
- if (prot_mask > 0)
- scsi_host_set_prot(shost, prot_mask);
+ if (prot_mask >= 0)
+ scsi_host_set_prot(shost, (prot_mask & 0x07));
else
scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
@@ -10814,6 +10815,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
scsi_scan_host(shost);
+ mpt3sas_setup_debugfs(ioc);
return 0;
out_add_shost_fail:
mpt3sas_base_detach(ioc);
@@ -11220,6 +11222,7 @@ scsih_init(void)
tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
_scsih_sas_control_complete);
+ mpt3sas_init_debugfs();
return 0;
}
@@ -11251,6 +11254,7 @@ scsih_exit(void)
if (hbas_to_enumerate != 2)
raid_class_release(mpt2sas_raid_template);
sas_release_transport(mpt3sas_transport_template);
+ mpt3sas_exit_debugfs();
}
/**
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index ca96d6d9c350..869b8b058a43 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mvme147hw.h>
#include <asm/irq.h>
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 7af9173c4925..5973eed94938 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -25,7 +25,7 @@ static const struct mvs_chip_info mvs_chips[] = {
[chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
};
-struct device_attribute *mvst_host_attrs[];
+static struct device_attribute *mvst_host_attrs[];
#define SOC_SAS_NUM 2
@@ -759,8 +759,6 @@ static DEVICE_ATTR(interrupt_coalescing,
mvs_show_interrupt_coalescing,
mvs_store_interrupt_coalescing);
-/* task handler */
-struct task_struct *mvs_th;
static int __init mvs_init(void)
{
int rc;
@@ -785,7 +783,7 @@ static void __exit mvs_exit(void)
sas_release_transport(mvs_stt);
}
-struct device_attribute *mvst_host_attrs[] = {
+static struct device_attribute *mvst_host_attrs[] = {
&dev_attr_driver_version,
&dev_attr_interrupt_coalescing,
NULL,
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 7eb88fe1eb0b..aa9ae2ae8579 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4652,7 +4652,7 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
for (i = 0; i < PMCRAID_MAX_CMD; i++) {
pinstance->cmd_list[i]->ioa_cb =
- dma_pool_alloc(
+ dma_pool_zalloc(
pinstance->control_pool,
GFP_KERNEL,
&(pinstance->cmd_list[i]->ioa_cb_bus_addr));
@@ -4661,8 +4661,6 @@ static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
pmcraid_release_control_blocks(pinstance, i);
return -ENOMEM;
}
- memset(pinstance->cmd_list[i]->ioa_cb, 0,
- sizeof(struct pmcraid_control_block));
}
return 0;
}
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index f3f399fe10c8..e163be8af965 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -355,6 +355,7 @@ struct qedf_ctx {
#define QEDF_GRCDUMP_CAPTURE 4
#define QEDF_IN_RECOVERY 5
#define QEDF_DBG_STOP_IO 6
+#define QEDF_PROBING 8
unsigned long flags; /* Miscellaneous state flags */
int fipvlan_retries;
u8 num_queues;
@@ -387,7 +388,9 @@ struct qedf_ctx {
#define QEDF_IO_WORK_MIN 64
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
+ struct delayed_work recovery_work;
struct delayed_work grcdump_work;
+ struct delayed_work stag_work;
u32 slow_sge_ios;
u32 fast_sge_ios;
@@ -403,6 +406,7 @@ struct qedf_ctx {
u32 flogi_cnt;
u32 flogi_failed;
+ u32 flogi_pending;
/* Used for fc statistics */
struct mutex stats_mutex;
@@ -468,7 +472,7 @@ extern uint qedf_dump_frames;
extern uint qedf_io_tracing;
extern uint qedf_stop_io_on_error;
extern uint qedf_link_down_tmo;
-#define QEDF_RETRY_DELAY_MAX 20 /* 2 seconds */
+#define QEDF_RETRY_DELAY_MAX 600 /* 60 seconds */
extern bool qedf_retry_delay;
extern uint qedf_debug;
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 87e169dcebdb..542ba9454257 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -388,14 +388,10 @@ void qedf_restart_rport(struct qedf_rport *fcport)
mutex_lock(&lport->disc.disc_mutex);
/* Recreate the rport and log back in */
rdata = fc_rport_create(lport, port_id);
- if (rdata) {
- mutex_unlock(&lport->disc.disc_mutex);
+ mutex_unlock(&lport->disc.disc_mutex);
+ if (rdata)
fc_rport_login(rdata);
- fcport->rdata = rdata;
- } else {
- mutex_unlock(&lport->disc.disc_mutex);
- fcport->rdata = NULL;
- }
+ fcport->rdata = rdata;
}
clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
}
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index e749a2dcaad7..0f6a15c1a04b 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1021,14 +1021,18 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
atomic_inc(&fcport->ios_to_queue);
if (fcport->retry_delay_timestamp) {
+ /* Take fcport->rport_lock for resetting the delay_timestamp */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
if (time_after(jiffies, fcport->retry_delay_timestamp)) {
fcport->retry_delay_timestamp = 0;
} else {
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
/* If retry_delay timer is active, flow off the ML */
rc = SCSI_MLQUEUE_TARGET_BUSY;
atomic_dec(&fcport->ios_to_queue);
goto exit_qcmd;
}
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
}
io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
@@ -1134,6 +1138,8 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
int refcount;
u16 scope, qualifier = 0;
u8 fw_residual_flag = 0;
+ unsigned long flags = 0;
+ u16 chk_scope = 0;
if (!io_req)
return;
@@ -1267,16 +1273,8 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
/* Lower 14 bits */
qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
- if (qedf_retry_delay &&
- scope > 0 && qualifier > 0 &&
- qualifier <= 0x3FEF) {
- /* Check we don't go over the max */
- if (qualifier > QEDF_RETRY_DELAY_MAX)
- qualifier =
- QEDF_RETRY_DELAY_MAX;
- fcport->retry_delay_timestamp =
- jiffies + (qualifier * HZ / 10);
- }
+ if (qedf_retry_delay)
+ chk_scope = 1;
/* Record stats */
if (io_req->cdb_status ==
SAM_STAT_TASK_SET_FULL)
@@ -1287,6 +1285,36 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
}
if (io_req->fcp_resid)
scsi_set_resid(sc_cmd, io_req->fcp_resid);
+
+ if (chk_scope == 1) {
+ if ((scope == 1 || scope == 2) &&
+ (qualifier > 0 && qualifier <= 0x3FEF)) {
+ /* Check we don't go over the max */
+ if (qualifier > QEDF_RETRY_DELAY_MAX) {
+ qualifier = QEDF_RETRY_DELAY_MAX;
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "qualifier = %d\n",
+ (fcp_rsp->retry_delay_timer &
+ 0x3FFF));
+ }
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Scope = %d and qualifier = %d",
+ scope, qualifier);
+ /* Take fcport->rport_lock to
+ * update the retry_delay_timestamp
+ */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+ fcport->retry_delay_timestamp =
+ jiffies + (qualifier * HZ / 10);
+ spin_unlock_irqrestore(&fcport->rport_lock,
+ flags);
+
+ } else {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
+ scope, qualifier);
+ }
+ }
break;
default:
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 5b19f5175c5c..36b1ca2dadbb 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -28,6 +28,8 @@ const struct qed_fcoe_ops *qed_ops;
static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qedf_remove(struct pci_dev *pdev);
static void qedf_shutdown(struct pci_dev *pdev);
+static void qedf_schedule_recovery_handler(void *dev);
+static void qedf_recovery_handler(struct work_struct *work);
/*
* Driver module parameters.
@@ -282,6 +284,7 @@ static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
/* Set the source MAC we will use for FCoE traffic */
qedf_set_data_src_addr(qedf, fp);
+ qedf->flogi_pending = 0;
}
/* Complete flogi_compl so we can proceed to sending ADISCs */
@@ -307,6 +310,11 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
*/
if (resp == fc_lport_flogi_resp) {
qedf->flogi_cnt++;
+ if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
+ schedule_delayed_work(&qedf->stag_work, 2);
+ return NULL;
+ }
+ qedf->flogi_pending++;
return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
arg, timeout);
}
@@ -503,6 +511,32 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
}
+static void qedf_bw_update(void *dev)
+{
+ struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+ struct qed_link_output link;
+
+ /* Get the latest status of the link */
+ qed_ops->common->get_link(qedf->cdev, &link);
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore link update, driver getting unload.\n");
+ return;
+ }
+
+ if (link.link_up) {
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
+ qedf_update_link_speed(qedf, &link);
+ else
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore bw update, link is down.\n");
+
+ } else {
+ QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
+ }
+}
+
static void qedf_link_update(void *dev, struct qed_link_output *link)
{
struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
@@ -629,6 +663,8 @@ static u32 qedf_get_login_failures(void *cookie)
static struct qed_fcoe_cb_ops qedf_cb_ops = {
{
.link_update = qedf_link_update,
+ .bw_update = qedf_bw_update,
+ .schedule_recovery_handler = qedf_schedule_recovery_handler,
.dcbx_aen = qedf_dcbx_handler,
.get_generic_tlv_data = qedf_get_generic_tlv_data,
.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
@@ -850,6 +886,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
qedf = lport_priv(lport);
+ qedf->flogi_pending = 0;
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
@@ -3153,7 +3190,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
{
int rc = -EINVAL;
struct fc_lport *lport;
- struct qedf_ctx *qedf;
+ struct qedf_ctx *qedf = NULL;
struct Scsi_Host *host;
bool is_vf = false;
struct qed_ll2_params params;
@@ -3183,6 +3220,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
/* Initialize qedf_ctx */
qedf = lport_priv(lport);
+ set_bit(QEDF_PROBING, &qedf->flags);
qedf->lport = lport;
qedf->ctlr.lp = lport;
qedf->pdev = pdev;
@@ -3197,6 +3235,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
init_completion(&qedf->fipvlan_compl);
mutex_init(&qedf->stats_mutex);
mutex_init(&qedf->flush_mutex);
+ qedf->flogi_pending = 0;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
"QLogic FastLinQ FCoE Module qedf %s, "
@@ -3206,9 +3245,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
} else {
/* Init pointers during recovery */
qedf = pci_get_drvdata(pdev);
+ set_bit(QEDF_PROBING, &qedf->flags);
lport = qedf->lport;
}
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
+
host = lport->host;
/* Allocate mempool for qedf_io_work structs */
@@ -3227,6 +3269,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
+ INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
qedf->fipvlan_retries = qedf_fipvlan_retries;
/* Set a default prio in case DCBX doesn't converge */
if (qedf_default_prio > -1) {
@@ -3281,6 +3324,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
}
qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
+ /* Learn information crucial for qedf to progress */
+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
+ goto err2;
+ }
+
/* Record BDQ producer doorbell addresses */
qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
@@ -3466,6 +3516,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->lport->host->host_no);
qedf->dpc_wq = create_workqueue(host_buf);
}
+ INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
/*
* GRC dump and sysfs parameters are not reaped during the recovery
@@ -3513,6 +3564,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
else
fc_fabric_login(lport);
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+
/* All good */
return 0;
@@ -3538,6 +3593,11 @@ err2:
err1:
scsi_host_put(lport->host);
err0:
+ if (qedf) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
+
+ clear_bit(QEDF_PROBING, &qedf->flags);
+ }
return rc;
}
@@ -3687,11 +3747,25 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
{
struct qedf_ctx *qedf = dev;
struct qed_mfw_tlv_fcoe *fcoe = data;
- struct fc_lport *lport = qedf->lport;
- struct Scsi_Host *host = lport->host;
- struct fc_host_attrs *fc_host = shost_to_fc_host(host);
+ struct fc_lport *lport;
+ struct Scsi_Host *host;
+ struct fc_host_attrs *fc_host;
struct fc_host_statistics *hst;
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is null.\n");
+ return;
+ }
+
+ if (test_bit(QEDF_PROBING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
+ return;
+ }
+
+ lport = qedf->lport;
+ host = lport->host;
+ fc_host = shost_to_fc_host(host);
+
/* Force a refresh of the fc_host stats including offload stats */
hst = qedf_fc_get_host_stats(host);
@@ -3762,11 +3836,64 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
fcoe->scsi_tsk_full = qedf->task_set_fulls;
}
+/* Deferred work function to perform soft context reset on STAG change */
+void qedf_stag_change_work(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, stag_work.work);
+
+ if (!qedf) {
+ QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
+ return;
+ }
+ QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
+ qedf_ctx_soft_reset(qedf->lport);
+}
+
static void qedf_shutdown(struct pci_dev *pdev)
{
__qedf_remove(pdev, QEDF_MODE_NORMAL);
}
+/*
+ * Recovery handler code
+ */
+static void qedf_schedule_recovery_handler(void *dev)
+{
+ struct qedf_ctx *qedf = dev;
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
+ schedule_delayed_work(&qedf->recovery_work, 0);
+}
+
+static void qedf_recovery_handler(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, recovery_work.work);
+
+ if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
+ return;
+
+ /*
+ * Call common_ops->recovery_prolog to allow the MFW to quiesce
+ * any PCI transactions.
+ */
+ qed_ops->common->recovery_prolog(qedf->cdev);
+
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
+ __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
+ /*
+ * Reset link and dcbx to down state since we will not get a link down
+ * event from the MFW but calling __qedf_remove will essentially be a
+ * link down event.
+ */
+ atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+ atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
+ __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
+ clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
+}
+
/* Generic TLV data callback */
void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 1f4a5fb00a05..425e665ec08b 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -836,6 +836,11 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
+ if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
+ QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
+ return ERR_PTR(-ENXIO);
+ }
+
ep = iscsi_create_endpoint(sizeof(struct qedi_endpoint));
if (!ep) {
QEDI_ERR(&qedi->dbg_ctx, "endpoint create fail\n");
@@ -870,12 +875,6 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
QEDI_ERR(&qedi->dbg_ctx, "Invalid endpoint\n");
}
- if (atomic_read(&qedi->link_state) != QEDI_LINK_UP) {
- QEDI_WARN(&qedi->dbg_ctx, "qedi link down\n");
- ret = -ENXIO;
- goto ep_conn_exit;
- }
-
ret = qedi_alloc_sq(qedi, qedi_ep);
if (ret)
goto ep_conn_exit;
@@ -1001,7 +1000,8 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- flush_work(&qedi_ep->offload_work);
+ if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
+ flush_work(&qedi_ep->offload_work);
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
@@ -1065,6 +1065,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
break;
}
+ if (!abrt_conn)
+ wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
+
qedi_ep->state = EP_STATE_DISCONN_START;
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
if (ret) {
@@ -1218,6 +1221,10 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
}
iscsi_cid = (u32)path_data->handle;
+ if (iscsi_cid >= qedi->max_active_conns) {
+ ret = -EINVAL;
+ goto set_path_exit;
+ }
qedi_ep = qedi->ep_tbl[iscsi_cid];
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index b995b19865ca..81a307695cc9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -28,6 +28,10 @@
#include "qedi_gbl.h"
#include "qedi_iscsi.h"
+static uint qedi_qed_debug;
+module_param(qedi_qed_debug, uint, 0644);
+MODULE_PARM_DESC(qedi_qed_debug, " QED debug level 0 (default)");
+
static uint qedi_fw_debug;
module_param(qedi_fw_debug, uint, 0644);
MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
@@ -41,7 +45,7 @@ module_param(qedi_io_tracing, uint, 0644);
MODULE_PARM_DESC(qedi_io_tracing,
" Enable logging of SCSI requests/completions into trace buffer. (default off).");
-uint qedi_ll2_buf_size = 0x400;
+static uint qedi_ll2_buf_size = 0x400;
module_param(qedi_ll2_buf_size, uint, 0644);
MODULE_PARM_DESC(qedi_ll2_buf_size,
"parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
@@ -658,8 +662,6 @@ exit_setup_shost:
static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
{
struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
- struct qedi_uio_dev *udev;
- struct qedi_uio_ctrl *uctrl;
struct skb_work_list *work;
struct ethhdr *eh;
@@ -698,9 +700,6 @@ static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
"Allowed frame ethertype [0x%x] len [0x%x].\n",
eh->h_proto, skb->len);
- udev = qedi->udev;
- uctrl = udev->uctrl;
-
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -921,7 +920,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
ipv6_en = !!(block->generic.ctrl_flags &
NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
- snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
+ snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s",
block->target[index].target_name.byte);
tgt->ipv6_en = ipv6_en;
@@ -1302,13 +1301,13 @@ process_again:
"process already running\n");
}
- if (qedi_fp_has_work(fp) == 0)
+ if (!qedi_fp_has_work(fp))
qed_sb_update_sb_idx(fp->sb_info);
/* Check for more work */
rmb();
- if (qedi_fp_has_work(fp) == 0)
+ if (!qedi_fp_has_work(fp))
qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
else
goto process_again;
@@ -1360,7 +1359,7 @@ static int qedi_request_msix_irq(struct qedi_ctx *qedi)
u16 idx;
cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
+ for (i = 0; i < qedi->int_info.msix_cnt; i++) {
idx = i * qedi->dev_info.common.num_hwfns +
qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
@@ -2422,7 +2421,6 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi;
struct qed_ll2_params params;
- u32 dp_module = 0;
u8 dp_level = 0;
bool is_vf = false;
char host_buf[16];
@@ -2445,7 +2443,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
memset(&qed_params, 0, sizeof(qed_params));
qed_params.protocol = QED_PROTOCOL_ISCSI;
- qed_params.dp_module = dp_module;
+ qed_params.dp_module = qedi_qed_debug;
qed_params.dp_level = dp_level;
qed_params.is_vf = is_vf;
qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 3337cd341d21..441a45349349 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -526,7 +526,7 @@ static struct pci_device_id qla1280_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
-DEFINE_MUTEX(qla1280_firmware_mutex);
+static DEFINE_MUTEX(qla1280_firmware_mutex);
struct qla_fw {
char *fwname;
@@ -535,7 +535,7 @@ struct qla_fw {
#define QL_NUM_FW_IMAGES 3
-struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
+static struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
{"qlogic/1040.bin", NULL}, /* image 0 */
{"qlogic/1280.bin", NULL}, /* image 1 */
{"qlogic/12160.bin", NULL}, /* image 2 */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 2c9e5ac24692..5d93ccc73153 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -26,7 +26,8 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
int rval = 0;
- if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
+ if (!(ha->fw_dump_reading || ha->mctp_dump_reading ||
+ ha->mpi_fw_dump_reading))
return 0;
mutex_lock(&ha->optrom_mutex);
@@ -42,6 +43,10 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
} else if (ha->mctp_dumped && ha->mctp_dump_reading) {
rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
MCTP_DUMP_SIZE);
+ } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) {
+ rval = memory_read_from_buffer(buf, count, &off,
+ ha->mpi_fw_dump,
+ ha->mpi_fw_dump_len);
} else if (ha->fw_dump_reading) {
rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
ha->fw_dump_len);
@@ -79,7 +84,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_md_prep(vha);
}
ha->fw_dump_reading = 0;
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
break;
case 1:
if (ha->fw_dumped && !ha->fw_dump_reading) {
@@ -103,7 +108,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_set_reset_owner(vha);
qla8044_idc_unlock(ha);
} else {
- ha->fw_dump_mpi = 1;
qla2x00_system_error(vha);
}
break;
@@ -137,6 +141,22 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
vha->host_no);
}
break;
+ case 8:
+ if (!ha->mpi_fw_dump_reading)
+ break;
+ ql_log(ql_log_info, vha, 0x70e7,
+ "MPI firmware dump cleared on (%ld).\n", vha->host_no);
+ ha->mpi_fw_dump_reading = 0;
+ ha->mpi_fw_dumped = 0;
+ break;
+ case 9:
+ if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) {
+ ha->mpi_fw_dump_reading = 1;
+ ql_log(ql_log_info, vha, 0x70e8,
+ "Raw MPI firmware dump ready for read on (%ld).\n",
+ vha->host_no);
+ }
+ break;
}
return count;
}
@@ -207,10 +227,9 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
/* Checksum NVRAM. */
if (IS_FWI2_CAPABLE(ha)) {
- uint32_t *iter;
+ __le32 *iter = (__force __le32 *)buf;
uint32_t chksum;
- iter = (uint32_t *)buf;
chksum = 0;
for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
chksum += le32_to_cpu(*iter);
@@ -706,7 +725,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
case 0x2025d:
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
return -EPERM;
ql_log(ql_log_info, vha, 0x706f,
@@ -724,6 +744,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
qla83xx_idc_unlock(vha, 0);
break;
+ } else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ qla27xx_reset_mpi(vha);
} else {
/* Make sure FC side is not in reset */
WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
@@ -737,6 +759,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
}
+ break;
case 0x2025e:
if (!IS_P3P_TYPE(ha) || vha != base_vha) {
ql_log(ql_log_info, vha, 0x7071,
@@ -1898,9 +1921,8 @@ static char *mode_to_str[] = {
};
#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
-static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
+static void qla_set_ini_mode(scsi_qla_host_t *vha, int op)
{
- int rc = 0;
enum {
NO_ACTION,
MODE_CHANGE_ACCEPT,
@@ -2173,8 +2195,6 @@ static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
vha->ql2xexchoffld, vha->u_ql2xexchoffld);
break;
}
-
- return rc;
}
static ssize_t
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 97b51c477972..88c0338a2ec7 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -490,7 +490,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
>> 24;
switch (loop_id) {
case 0xFC:
- loop_id = cpu_to_le16(NPH_SNS);
+ loop_id = NPH_SNS;
break;
case 0xFA:
loop_id = vha->mgmt_svr_loop_id;
@@ -691,7 +691,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
* dump and reset the chip.
*/
if (ret) {
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
rval = -EINVAL;
@@ -896,7 +896,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
* doesn't work take FCoE dump and then
* reset the chip.
*/
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
}
@@ -2042,7 +2042,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->loop_id = piocb_rqst->dataword;
+ fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
sp->type = SRB_FXIOCB_BCMD;
sp->name = "bsg_fx_mgmt";
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index bf1e98f11990..19005710f7f6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -115,7 +115,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
{
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint32_t *chunk = (void *)ha->gid_list;
+ uint32_t *chunk = (uint32_t *)ha->gid_list;
uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
uint32_t stat;
ulong i, j, timer = 6000000;
@@ -126,26 +126,26 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
if (i + dwords > ram_dwords)
dwords = ram_dwords - i;
- WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
- WRT_REG_WORD(&reg->mailbox1, LSW(addr));
- WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+ wrt_reg_word(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
+ wrt_reg_word(&reg->mailbox1, LSW(addr));
+ wrt_reg_word(&reg->mailbox8, MSW(addr));
- WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
- WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
+ wrt_reg_word(&reg->mailbox4, MSW(dwords));
+ wrt_reg_word(&reg->mailbox5, LSW(dwords));
- WRT_REG_WORD(&reg->mailbox9, 0);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_word(&reg->mailbox9, 0);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
ha->flags.mbox_int = 0;
while (timer--) {
udelay(5);
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
/* Check for pending interrupts. */
if (!(stat & HSRX_RISC_INT))
continue;
@@ -155,15 +155,15 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
stat != 0x10 && stat != 0x11) {
/* Clear this intr; it wasn't a mailbox intr */
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
continue;
}
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
break;
}
ha->flags.mbox_int = 1;
@@ -189,13 +189,13 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
}
int
-qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
- uint32_t ram_dwords, void **nxt)
+qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
+ uint32_t ram_dwords, void **nxt)
{
int rval = QLA_FUNCTION_FAILED;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint32_t *chunk = (void *)ha->gid_list;
+ uint32_t *chunk = (uint32_t *)ha->gid_list;
uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
uint32_t stat;
ulong i, j, timer = 6000000;
@@ -206,23 +206,23 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
if (i + dwords > ram_dwords)
dwords = ram_dwords - i;
- WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
- WRT_REG_WORD(&reg->mailbox1, LSW(addr));
- WRT_REG_WORD(&reg->mailbox8, MSW(addr));
+ wrt_reg_word(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
+ wrt_reg_word(&reg->mailbox1, LSW(addr));
+ wrt_reg_word(&reg->mailbox8, MSW(addr));
- WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
+ wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
- WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
- WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_word(&reg->mailbox4, MSW(dwords));
+ wrt_reg_word(&reg->mailbox5, LSW(dwords));
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
ha->flags.mbox_int = 0;
while (timer--) {
udelay(5);
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
/* Check for pending interrupts. */
if (!(stat & HSRX_RISC_INT))
@@ -231,15 +231,15 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
stat &= 0xff;
if (stat != 0x1 && stat != 0x2 &&
stat != 0x10 && stat != 0x11) {
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
continue;
}
set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword(&reg->hccr);
break;
}
ha->flags.mbox_int = 1;
@@ -254,9 +254,9 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
return rval;
}
for (j = 0; j < dwords; j++) {
- ram[i + j] =
- (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
- chunk[j] : swab32(chunk[j]);
+ ram[i + j] = (__force __be32)
+ ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+ chunk[j] : swab32(chunk[j]));
}
}
@@ -265,8 +265,8 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
}
static int
-qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
- uint32_t cram_size, void **nxt)
+qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram,
+ uint32_t cram_size, void **nxt)
{
int rval;
@@ -286,16 +286,16 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
return rval;
}
-static uint32_t *
+static __be32 *
qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
- uint32_t count, uint32_t *buf)
+ uint32_t count, __be32 *buf)
{
- uint32_t __iomem *dmp_reg;
+ __le32 __iomem *dmp_reg;
- WRT_REG_DWORD(&reg->iobase_addr, iobase);
+ wrt_reg_dword(&reg->iobase_addr, iobase);
dmp_reg = &reg->iobase_window;
for ( ; count--; dmp_reg++)
- *buf++ = htonl(RD_REG_DWORD(dmp_reg));
+ *buf++ = htonl(rd_reg_dword(dmp_reg));
return buf;
}
@@ -303,11 +303,11 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
void
qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
{
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_PAUSE);
/* 100 usec delay is sufficient enough for hardware to pause RISC */
udelay(100);
- if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
+ if (rd_reg_dword(&reg->host_status) & HSRX_RISC_PAUSED)
set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
}
@@ -324,17 +324,17 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
* Driver can proceed with the reset sequence after waiting
* for a timeout period.
*/
- WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+ if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
- WRT_REG_DWORD(&reg->ctrl_status,
+ wrt_reg_dword(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
@@ -342,19 +342,19 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
/* Wait for soft-reset to complete. */
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) &
+ if ((rd_reg_dword(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ rd_reg_dword(&reg->hccr); /* PCI Posting. */
- for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(10);
@@ -368,7 +368,7 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
}
static int
-qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
+qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram,
uint32_t ram_words, void **nxt)
{
int rval;
@@ -376,7 +376,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
uint16_t mb0;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
dma_addr_t dump_dma = ha->gid_list_dma;
- uint16_t *dump = (uint16_t *)ha->gid_list;
+ __le16 *dump = (__force __le16 *)ha->gid_list;
rval = QLA_SUCCESS;
mb0 = 0;
@@ -399,11 +399,11 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
WRT_MAILBOX_REG(ha, reg, 4, words);
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
for (timer = 6000000; timer; timer--) {
/* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_INT) {
stat &= 0xff;
@@ -414,10 +414,10 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
mb0 = RD_MAILBOX_REG(ha, reg, 0);
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->semaphore, 0);
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
} else if (stat == 0x10 || stat == 0x11) {
set_bit(MBX_INTERRUPT,
@@ -425,15 +425,15 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
mb0 = RD_MAILBOX_REG(ha, reg, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
}
/* clear this intr; it wasn't a mailbox intr */
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
udelay(5);
}
@@ -441,7 +441,8 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
rval = mb0 & MBS_MASK;
for (idx = 0; idx < words; idx++)
- ram[cnt + idx] = swab16(dump[idx]);
+ ram[cnt + idx] =
+ cpu_to_be16(le16_to_cpu(dump[idx]));
} else {
rval = QLA_FUNCTION_FAILED;
}
@@ -453,12 +454,12 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
static inline void
qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
- uint16_t *buf)
+ __be16 *buf)
{
- uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
+ __le16 __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
for ( ; count--; dmp_reg++)
- *buf++ = htons(RD_REG_WORD(dmp_reg));
+ *buf++ = htons(rd_reg_word(dmp_reg));
}
static inline void *
@@ -472,10 +473,10 @@ qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
}
static inline void *
-qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
uint32_t cnt;
- uint32_t *iter_reg;
+ __be32 *iter_reg;
struct qla2xxx_fce_chain *fcec = ptr;
if (!ha->fce)
@@ -499,7 +500,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
-qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_offld_chain *c = ptr;
@@ -517,11 +518,11 @@ qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ptr += sizeof(struct qla2xxx_offld_chain);
memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
- return (char *)ptr + cpu_to_be32(c->size);
+ return (char *)ptr + be32_to_cpu(c->size);
}
static inline void *
-qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_offld_chain *c = ptr;
@@ -539,12 +540,12 @@ qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ptr += sizeof(struct qla2xxx_offld_chain);
memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
- return (char *)ptr + cpu_to_be32(c->size);
+ return (char *)ptr + be32_to_cpu(c->size);
}
static inline void *
qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
- uint32_t **last_chain)
+ __be32 **last_chain)
{
struct qla2xxx_mqueue_chain *q;
struct qla2xxx_mqueue_header *qh;
@@ -591,7 +592,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
}
static inline void *
-qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
struct qla2xxx_mqueue_chain *q;
struct qla2xxx_mqueue_header *qh;
@@ -662,7 +663,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
-qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
{
uint32_t cnt, que_idx;
uint8_t que_cnt;
@@ -685,13 +686,13 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
reg = ISP_QUE_REG(ha, cnt);
que_idx = cnt * 4;
mq->qregs[que_idx] =
- htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
+ htonl(rd_reg_dword(&reg->isp25mq.req_q_in));
mq->qregs[que_idx+1] =
- htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
+ htonl(rd_reg_dword(&reg->isp25mq.req_q_out));
mq->qregs[que_idx+2] =
- htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
+ htonl(rd_reg_dword(&reg->isp25mq.rsp_q_in));
mq->qregs[que_idx+3] =
- htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
+ htonl(rd_reg_dword(&reg->isp25mq.rsp_q_out));
}
return ptr + sizeof(struct qla2xxx_mq_chain);
@@ -706,45 +707,47 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
ql_log(ql_log_warn, vha, 0xd000,
"Failed to dump firmware (%x), dump status flags (0x%lx).\n",
rval, ha->fw_dump_cap_flags);
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
} else {
ql_log(ql_log_info, vha, 0xd001,
"Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
}
+void qla2xxx_dump_fw(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ vha->hw->isp_ops->fw_dump(vha);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
/**
* qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
* @vha: HA context
- * @hardware_locked: Called with the hardware_lock
*/
void
-qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla2300_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- uint16_t __iomem *dmp_reg;
- unsigned long flags;
+ __le16 __iomem *dmp_reg;
struct qla2300_fw_dump *fw;
void *nxt;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
-
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ lockdep_assert_held(&ha->hardware_lock);
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd002,
"No buffer available for dump.\n");
- goto qla2300_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -752,19 +755,19 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla2300_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp23;
qla2xxx_prep_dump(ha, ha->fw_dump);
rval = QLA_SUCCESS;
- fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+ fw->hccr = htons(rd_reg_word(&reg->hccr));
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
if (IS_QLA2300(ha)) {
for (cnt = 30000;
- (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -772,74 +775,74 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
rval = QLA_FUNCTION_TIMEOUT;
}
} else {
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
udelay(10);
}
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2300.req_q_in;
- for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg);
cnt++, dmp_reg++)
- fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2300.mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg);
cnt++, dmp_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->ctrl_status, 0x40);
+ wrt_reg_word(&reg->ctrl_status, 0x40);
qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x50);
+ wrt_reg_word(&reg->ctrl_status, 0x50);
qla2xxx_read_window(reg, 48, fw->dma_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x00);
+ wrt_reg_word(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg);
cnt++, dmp_reg++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->pcr, 0x2000);
+ wrt_reg_word(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
- WRT_REG_WORD(&reg->pcr, 0x2200);
+ wrt_reg_word(&reg->pcr, 0x2200);
qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
- WRT_REG_WORD(&reg->pcr, 0x2400);
+ wrt_reg_word(&reg->pcr, 0x2400);
qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
- WRT_REG_WORD(&reg->pcr, 0x2600);
+ wrt_reg_word(&reg->pcr, 0x2600);
qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
- WRT_REG_WORD(&reg->pcr, 0x2800);
+ wrt_reg_word(&reg->pcr, 0x2800);
qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
- WRT_REG_WORD(&reg->pcr, 0x2A00);
+ wrt_reg_word(&reg->pcr, 0x2A00);
qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
- WRT_REG_WORD(&reg->pcr, 0x2C00);
+ wrt_reg_word(&reg->pcr, 0x2C00);
qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
- WRT_REG_WORD(&reg->pcr, 0x2E00);
+ wrt_reg_word(&reg->pcr, 0x2E00);
qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
+ wrt_reg_word(&reg->ctrl_status, 0x10);
qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x30);
+ wrt_reg_word(&reg->ctrl_status, 0x30);
qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
/* Reset RISC. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->ctrl_status) &
+ if ((rd_reg_word(&reg->ctrl_status) &
CSR_ISP_SOFT_RESET) == 0)
break;
@@ -860,12 +863,12 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Get RISC SRAM. */
if (rval == QLA_SUCCESS)
rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
- sizeof(fw->risc_ram) / 2, &nxt);
+ ARRAY_SIZE(fw->risc_ram), &nxt);
/* Get stack SRAM. */
if (rval == QLA_SUCCESS)
rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
- sizeof(fw->stack_ram) / 2, &nxt);
+ ARRAY_SIZE(fw->stack_ram), &nxt);
/* Get data SRAM. */
if (rval == QLA_SUCCESS)
@@ -876,48 +879,31 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla2xxx_copy_queues(ha, nxt);
qla2xxx_dump_post_process(base_vha, rval);
-
-qla2300_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
/**
* qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
* @vha: HA context
- * @hardware_locked: Called with the hardware_lock
*/
void
-qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla2100_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt, timer;
- uint16_t risc_address;
- uint16_t mb0, mb2;
+ uint16_t risc_address = 0;
+ uint16_t mb0 = 0, mb2 = 0;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- uint16_t __iomem *dmp_reg;
- unsigned long flags;
+ __le16 __iomem *dmp_reg;
struct qla2100_fw_dump *fw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- risc_address = 0;
- mb0 = mb2 = 0;
- flags = 0;
-
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ lockdep_assert_held(&ha->hardware_lock);
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd004,
"No buffer available for dump.\n");
- goto qla2100_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -925,17 +911,17 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla2100_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp21;
qla2xxx_prep_dump(ha, ha->fw_dump);
rval = QLA_SUCCESS;
- fw->hccr = htons(RD_REG_WORD(&reg->hccr));
+ fw->hccr = htons(rd_reg_word(&reg->hccr));
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
- for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
+ for (cnt = 30000; (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -944,61 +930,61 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
}
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
dmp_reg = &reg->u.isp2100.mailbox0;
for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
if (cnt == 8)
dmp_reg = &reg->u_end.isp2200.mailbox8;
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
}
dmp_reg = &reg->u.isp2100.unused_2[0];
- for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
- fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++)
+ fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->ctrl_status, 0x00);
+ wrt_reg_word(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++)
+ fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
- WRT_REG_WORD(&reg->pcr, 0x2000);
+ wrt_reg_word(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
- WRT_REG_WORD(&reg->pcr, 0x2100);
+ wrt_reg_word(&reg->pcr, 0x2100);
qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
- WRT_REG_WORD(&reg->pcr, 0x2200);
+ wrt_reg_word(&reg->pcr, 0x2200);
qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
- WRT_REG_WORD(&reg->pcr, 0x2300);
+ wrt_reg_word(&reg->pcr, 0x2300);
qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
- WRT_REG_WORD(&reg->pcr, 0x2400);
+ wrt_reg_word(&reg->pcr, 0x2400);
qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
- WRT_REG_WORD(&reg->pcr, 0x2500);
+ wrt_reg_word(&reg->pcr, 0x2500);
qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
- WRT_REG_WORD(&reg->pcr, 0x2600);
+ wrt_reg_word(&reg->pcr, 0x2600);
qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
- WRT_REG_WORD(&reg->pcr, 0x2700);
+ wrt_reg_word(&reg->pcr, 0x2700);
qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
+ wrt_reg_word(&reg->ctrl_status, 0x10);
qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
- WRT_REG_WORD(&reg->ctrl_status, 0x30);
+ wrt_reg_word(&reg->ctrl_status, 0x30);
qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
/* Reset the ISP. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
}
for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
@@ -1011,11 +997,11 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Pause RISC. */
if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
- (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
+ (rd_reg_word(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
for (cnt = 30000;
- (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
+ (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt)
udelay(100);
@@ -1025,13 +1011,13 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (rval == QLA_SUCCESS) {
/* Set memory configuration and timing. */
if (IS_QLA2100(ha))
- WRT_REG_WORD(&reg->mctr, 0xf1);
+ wrt_reg_word(&reg->mctr, 0xf1);
else
- WRT_REG_WORD(&reg->mctr, 0xf2);
- RD_REG_WORD(&reg->mctr); /* PCI Posting. */
+ wrt_reg_word(&reg->mctr, 0xf2);
+ rd_reg_word(&reg->mctr); /* PCI Posting. */
/* Release RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
}
}
@@ -1041,29 +1027,29 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
}
- for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS;
cnt++, risc_address++) {
WRT_MAILBOX_REG(ha, reg, 1, risc_address);
- WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
for (timer = 6000000; timer != 0; timer--) {
/* Check for pending interrupts. */
- if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
- if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
+ if (rd_reg_word(&reg->istatus) & ISR_RISC_INT) {
+ if (rd_reg_word(&reg->semaphore) & BIT_0) {
set_bit(MBX_INTERRUPT,
&ha->mbx_cmd_flags);
mb0 = RD_MAILBOX_REG(ha, reg, 0);
mb2 = RD_MAILBOX_REG(ha, reg, 2);
- WRT_REG_WORD(&reg->semaphore, 0);
- WRT_REG_WORD(&reg->hccr,
+ wrt_reg_word(&reg->semaphore, 0);
+ wrt_reg_word(&reg->hccr,
HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ rd_reg_word(&reg->hccr);
break;
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
udelay(5);
}
@@ -1080,48 +1066,35 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
qla2xxx_dump_post_process(base_vha, rval);
-
-qla2100_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla24xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla24xx_fw_dump *fw;
void *nxt;
void *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ lockdep_assert_held(&ha->hardware_lock);
+
if (IS_P3P_TYPE(ha))
return;
- flags = 0;
ha->fw_dump_cap_flags = 0;
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
-
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd006,
"No buffer available for dump.\n");
- goto qla24xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1129,13 +1102,13 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla24xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp24;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1145,41 +1118,41 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1218,19 +1191,19 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1339,44 +1312,31 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla24xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla25xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla25xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd008,
"No buffer available for dump.\n");
- goto qla25xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1384,14 +1344,14 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla25xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp25;
qla2xxx_prep_dump(ha, ha->fw_dump);
ha->fw_dump->version = htonl(2);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1405,73 +1365,73 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7010, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1535,19 +1495,19 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1665,44 +1625,31 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla25xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla25xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla81xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla81xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd00a,
"No buffer available for dump.\n");
- goto qla81xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
@@ -1710,12 +1657,12 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
- goto qla81xx_fw_dump_failed;
+ return;
}
fw = &ha->fw_dump->isp.isp81;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -1729,73 +1676,73 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7010, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1859,19 +1806,19 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1993,57 +1940,44 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla81xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla81xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
void
-qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla83xx_fw_dump(scsi_qla_host_t *vha)
{
int rval;
uint32_t cnt;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t __iomem *dmp_reg;
- uint32_t *iter_reg;
- uint16_t __iomem *mbx_reg;
- unsigned long flags;
+ __le32 __iomem *dmp_reg;
+ __be32 *iter_reg;
+ __le16 __iomem *mbx_reg;
struct qla83xx_fw_dump *fw;
void *nxt, *nxt_chain;
- uint32_t *last_chain = NULL;
+ __be32 *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- flags = 0;
- ha->fw_dump_cap_flags = 0;
+ lockdep_assert_held(&ha->hardware_lock);
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
+ ha->fw_dump_cap_flags = 0;
if (!ha->fw_dump) {
ql_log(ql_log_warn, vha, 0xd00c,
"No buffer available for dump!!!\n");
- goto qla83xx_fw_dump_failed;
+ return;
}
if (ha->fw_dumped) {
ql_log(ql_log_warn, vha, 0xd00d,
"Firmware has been previously dumped (%p) -- ignoring "
"request...\n", ha->fw_dump);
- goto qla83xx_fw_dump_failed;
+ return;
}
QLA_FW_STOPPED(ha);
fw = &ha->fw_dump->isp.isp83;
qla2xxx_prep_dump(ha, ha->fw_dump);
- fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
+ fw->host_status = htonl(rd_reg_dword(&reg->host_status));
/*
* Pause RISC. No need to track timeout, as resetting the chip
@@ -2051,24 +1985,24 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
*/
qla24xx_pause_risc(reg, ha);
- WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
+ wrt_reg_dword(&reg->iobase_addr, 0x6000);
dmp_reg = &reg->iobase_window;
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
dmp_reg = &reg->unused_4_1[0];
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
- WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
+ wrt_reg_dword(&reg->iobase_addr, 0x6010);
dmp_reg = &reg->unused_4_1[2];
- RD_REG_DWORD(dmp_reg);
- WRT_REG_DWORD(dmp_reg, 0);
+ rd_reg_dword(dmp_reg);
+ wrt_reg_dword(dmp_reg, 0);
/* select PCR and disable ecc checking and correction */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
/* Host/Risc registers. */
iter_reg = fw->host_risc_reg;
@@ -2077,73 +2011,73 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_read_window(reg, 0x7040, 16, iter_reg);
/* PCIe registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x01);
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
dmp_reg++;
- fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
- fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
+ fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
+ fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
- WRT_REG_DWORD(&reg->iobase_window, 0x00);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x00);
+ rd_reg_dword(&reg->iobase_window);
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
/* Disable interrupts. */
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
/* Shadow registers. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
- fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_addr, 0x0F70);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_select, 0xB0000000);
+ fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
- fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0100000);
+ fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
- fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0200000);
+ fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
- fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0300000);
+ fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
- fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0400000);
+ fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
- fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0500000);
+ fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
- fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0600000);
+ fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
- fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0700000);
+ fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
- fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0800000);
+ fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
- fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0900000);
+ fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
- WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
- fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
+ wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
+ fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
/* RISC I/O register. */
- WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
- fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
+ wrt_reg_dword(&reg->iobase_addr, 0x0010);
+ fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
+ for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -2239,19 +2173,19 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
+ *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -2457,16 +2391,16 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
+ for (cnt = 30000; cnt && (rd_reg_word(&reg->mailbox0)); cnt--)
udelay(5);
if (!cnt) {
@@ -2507,14 +2441,6 @@ copy_queue:
qla83xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
-
-qla83xx_fw_dump_failed:
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#else
- ;
-#endif
}
/****************************************************************************/
@@ -2735,7 +2661,7 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
- uint16_t __iomem *mbx_reg;
+ __le16 __iomem *mbx_reg;
if (!ql_mask_match(level))
return;
@@ -2750,7 +2676,7 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
ql_dbg(level, vha, id, "Mailbox registers:\n");
for (i = 0; i < 6; i++, mbx_reg++)
ql_dbg(level, vha, id,
- "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg));
+ "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg));
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 433e95502808..54ed020e6f75 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -12,205 +12,205 @@
*/
struct qla2300_fw_dump {
- uint16_t hccr;
- uint16_t pbiu_reg[8];
- uint16_t risc_host_reg[8];
- uint16_t mailbox_reg[32];
- uint16_t resp_dma_reg[32];
- uint16_t dma_reg[48];
- uint16_t risc_hdw_reg[16];
- uint16_t risc_gp0_reg[16];
- uint16_t risc_gp1_reg[16];
- uint16_t risc_gp2_reg[16];
- uint16_t risc_gp3_reg[16];
- uint16_t risc_gp4_reg[16];
- uint16_t risc_gp5_reg[16];
- uint16_t risc_gp6_reg[16];
- uint16_t risc_gp7_reg[16];
- uint16_t frame_buf_hdw_reg[64];
- uint16_t fpm_b0_reg[64];
- uint16_t fpm_b1_reg[64];
- uint16_t risc_ram[0xf800];
- uint16_t stack_ram[0x1000];
- uint16_t data_ram[1];
+ __be16 hccr;
+ __be16 pbiu_reg[8];
+ __be16 risc_host_reg[8];
+ __be16 mailbox_reg[32];
+ __be16 resp_dma_reg[32];
+ __be16 dma_reg[48];
+ __be16 risc_hdw_reg[16];
+ __be16 risc_gp0_reg[16];
+ __be16 risc_gp1_reg[16];
+ __be16 risc_gp2_reg[16];
+ __be16 risc_gp3_reg[16];
+ __be16 risc_gp4_reg[16];
+ __be16 risc_gp5_reg[16];
+ __be16 risc_gp6_reg[16];
+ __be16 risc_gp7_reg[16];
+ __be16 frame_buf_hdw_reg[64];
+ __be16 fpm_b0_reg[64];
+ __be16 fpm_b1_reg[64];
+ __be16 risc_ram[0xf800];
+ __be16 stack_ram[0x1000];
+ __be16 data_ram[1];
};
struct qla2100_fw_dump {
- uint16_t hccr;
- uint16_t pbiu_reg[8];
- uint16_t mailbox_reg[32];
- uint16_t dma_reg[48];
- uint16_t risc_hdw_reg[16];
- uint16_t risc_gp0_reg[16];
- uint16_t risc_gp1_reg[16];
- uint16_t risc_gp2_reg[16];
- uint16_t risc_gp3_reg[16];
- uint16_t risc_gp4_reg[16];
- uint16_t risc_gp5_reg[16];
- uint16_t risc_gp6_reg[16];
- uint16_t risc_gp7_reg[16];
- uint16_t frame_buf_hdw_reg[16];
- uint16_t fpm_b0_reg[64];
- uint16_t fpm_b1_reg[64];
- uint16_t risc_ram[0xf000];
+ __be16 hccr;
+ __be16 pbiu_reg[8];
+ __be16 mailbox_reg[32];
+ __be16 dma_reg[48];
+ __be16 risc_hdw_reg[16];
+ __be16 risc_gp0_reg[16];
+ __be16 risc_gp1_reg[16];
+ __be16 risc_gp2_reg[16];
+ __be16 risc_gp3_reg[16];
+ __be16 risc_gp4_reg[16];
+ __be16 risc_gp5_reg[16];
+ __be16 risc_gp6_reg[16];
+ __be16 risc_gp7_reg[16];
+ __be16 frame_buf_hdw_reg[16];
+ __be16 fpm_b0_reg[64];
+ __be16 fpm_b1_reg[64];
+ __be16 risc_ram[0xf000];
};
struct qla24xx_fw_dump {
- uint32_t host_status;
- uint32_t host_reg[32];
- uint32_t shadow_reg[7];
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[16];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[16];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[112];
- uint32_t fpm_hdw_reg[192];
- uint32_t fb_hdw_reg[176];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_reg[32];
+ __be32 shadow_reg[7];
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[16];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[16];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[112];
+ __be32 fpm_hdw_reg[192];
+ __be32 fb_hdw_reg[176];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla25xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[32];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t aseq_gp_reg[128];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[192];
- uint32_t fb_hdw_reg[192];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[32];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 aseq_gp_reg[128];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[192];
+ __be32 fb_hdw_reg[192];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla81xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[32];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[128];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t rseq_gp_reg[128];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t aseq_gp_reg[128];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t cmd_dma_reg[16];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[224];
- uint32_t fb_hdw_reg[208];
- uint32_t code_ram[0x2000];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[32];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[128];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 rseq_gp_reg[128];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 aseq_gp_reg[128];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 cmd_dma_reg[16];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[224];
+ __be32 fb_hdw_reg[208];
+ __be32 code_ram[0x2000];
+ __be32 ext_mem[1];
};
struct qla83xx_fw_dump {
- uint32_t host_status;
- uint32_t host_risc_reg[48];
- uint32_t pcie_regs[4];
- uint32_t host_reg[32];
- uint32_t shadow_reg[11];
- uint32_t risc_io_reg;
- uint16_t mailbox_reg[32];
- uint32_t xseq_gp_reg[256];
- uint32_t xseq_0_reg[48];
- uint32_t xseq_1_reg[16];
- uint32_t xseq_2_reg[16];
- uint32_t rseq_gp_reg[256];
- uint32_t rseq_0_reg[32];
- uint32_t rseq_1_reg[16];
- uint32_t rseq_2_reg[16];
- uint32_t rseq_3_reg[16];
- uint32_t aseq_gp_reg[256];
- uint32_t aseq_0_reg[32];
- uint32_t aseq_1_reg[16];
- uint32_t aseq_2_reg[16];
- uint32_t aseq_3_reg[16];
- uint32_t cmd_dma_reg[64];
- uint32_t req0_dma_reg[15];
- uint32_t resp0_dma_reg[15];
- uint32_t req1_dma_reg[15];
- uint32_t xmt0_dma_reg[32];
- uint32_t xmt1_dma_reg[32];
- uint32_t xmt2_dma_reg[32];
- uint32_t xmt3_dma_reg[32];
- uint32_t xmt4_dma_reg[32];
- uint32_t xmt_data_dma_reg[16];
- uint32_t rcvt0_data_dma_reg[32];
- uint32_t rcvt1_data_dma_reg[32];
- uint32_t risc_gp_reg[128];
- uint32_t lmc_reg[128];
- uint32_t fpm_hdw_reg[256];
- uint32_t rq0_array_reg[256];
- uint32_t rq1_array_reg[256];
- uint32_t rp0_array_reg[256];
- uint32_t rp1_array_reg[256];
- uint32_t queue_control_reg[16];
- uint32_t fb_hdw_reg[432];
- uint32_t at0_array_reg[128];
- uint32_t code_ram[0x2400];
- uint32_t ext_mem[1];
+ __be32 host_status;
+ __be32 host_risc_reg[48];
+ __be32 pcie_regs[4];
+ __be32 host_reg[32];
+ __be32 shadow_reg[11];
+ __be32 risc_io_reg;
+ __be16 mailbox_reg[32];
+ __be32 xseq_gp_reg[256];
+ __be32 xseq_0_reg[48];
+ __be32 xseq_1_reg[16];
+ __be32 xseq_2_reg[16];
+ __be32 rseq_gp_reg[256];
+ __be32 rseq_0_reg[32];
+ __be32 rseq_1_reg[16];
+ __be32 rseq_2_reg[16];
+ __be32 rseq_3_reg[16];
+ __be32 aseq_gp_reg[256];
+ __be32 aseq_0_reg[32];
+ __be32 aseq_1_reg[16];
+ __be32 aseq_2_reg[16];
+ __be32 aseq_3_reg[16];
+ __be32 cmd_dma_reg[64];
+ __be32 req0_dma_reg[15];
+ __be32 resp0_dma_reg[15];
+ __be32 req1_dma_reg[15];
+ __be32 xmt0_dma_reg[32];
+ __be32 xmt1_dma_reg[32];
+ __be32 xmt2_dma_reg[32];
+ __be32 xmt3_dma_reg[32];
+ __be32 xmt4_dma_reg[32];
+ __be32 xmt_data_dma_reg[16];
+ __be32 rcvt0_data_dma_reg[32];
+ __be32 rcvt1_data_dma_reg[32];
+ __be32 risc_gp_reg[128];
+ __be32 lmc_reg[128];
+ __be32 fpm_hdw_reg[256];
+ __be32 rq0_array_reg[256];
+ __be32 rq1_array_reg[256];
+ __be32 rp0_array_reg[256];
+ __be32 rp1_array_reg[256];
+ __be32 queue_control_reg[16];
+ __be32 fb_hdw_reg[432];
+ __be32 at0_array_reg[128];
+ __be32 code_ram[0x2400];
+ __be32 ext_mem[1];
};
#define EFT_NUM_BUFFERS 4
@@ -223,44 +223,45 @@ struct qla83xx_fw_dump {
#define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b))
struct qla2xxx_fce_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t size;
- uint32_t addr_l;
- uint32_t addr_h;
- uint32_t eregs[8];
+ __be32 size;
+ __be32 addr_l;
+ __be32 addr_h;
+ __be32 eregs[8];
};
/* used by exchange off load and extended login offload */
struct qla2xxx_offld_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t size;
- u64 addr;
+ __be32 size;
+ __be32 reserved;
+ __be64 addr;
};
struct qla2xxx_mq_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
- uint32_t count;
- uint32_t qregs[4 * QLA_MQ_SIZE];
+ __be32 count;
+ __be32 qregs[4 * QLA_MQ_SIZE];
};
struct qla2xxx_mqueue_header {
- uint32_t queue;
+ __be32 queue;
#define TYPE_REQUEST_QUEUE 0x1
#define TYPE_RESPONSE_QUEUE 0x2
#define TYPE_ATIO_QUEUE 0x3
- uint32_t number;
- uint32_t size;
+ __be32 number;
+ __be32 size;
};
struct qla2xxx_mqueue_chain {
- uint32_t type;
- uint32_t chain_size;
+ __be32 type;
+ __be32 chain_size;
};
#define DUMP_CHAIN_VARIANT 0x80000000
@@ -273,28 +274,28 @@ struct qla2xxx_mqueue_chain {
struct qla2xxx_fw_dump {
uint8_t signature[4];
- uint32_t version;
+ __be32 version;
- uint32_t fw_major_version;
- uint32_t fw_minor_version;
- uint32_t fw_subminor_version;
- uint32_t fw_attributes;
+ __be32 fw_major_version;
+ __be32 fw_minor_version;
+ __be32 fw_subminor_version;
+ __be32 fw_attributes;
- uint32_t vendor;
- uint32_t device;
- uint32_t subsystem_vendor;
- uint32_t subsystem_device;
+ __be32 vendor;
+ __be32 device;
+ __be32 subsystem_vendor;
+ __be32 subsystem_device;
- uint32_t fixed_size;
- uint32_t mem_size;
- uint32_t req_q_size;
- uint32_t rsp_q_size;
+ __be32 fixed_size;
+ __be32 mem_size;
+ __be32 req_q_size;
+ __be32 rsp_q_size;
- uint32_t eft_size;
- uint32_t eft_addr_l;
- uint32_t eft_addr_h;
+ __be32 eft_size;
+ __be32 eft_addr_l;
+ __be32 eft_addr_h;
- uint32_t header_size;
+ __be32 header_size;
union {
struct qla2100_fw_dump isp21;
@@ -369,7 +370,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
-extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
+extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, __be32 *,
uint32_t, void **);
extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 47c7a56438b5..42dbf90d4651 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -128,15 +128,50 @@ static inline uint32_t make_handle(uint16_t x, uint16_t y)
* I/O register
*/
-#define RD_REG_BYTE(addr) readb(addr)
-#define RD_REG_WORD(addr) readw(addr)
-#define RD_REG_DWORD(addr) readl(addr)
-#define RD_REG_BYTE_RELAXED(addr) readb_relaxed(addr)
-#define RD_REG_WORD_RELAXED(addr) readw_relaxed(addr)
-#define RD_REG_DWORD_RELAXED(addr) readl_relaxed(addr)
-#define WRT_REG_BYTE(addr, data) writeb(data, addr)
-#define WRT_REG_WORD(addr, data) writew(data, addr)
-#define WRT_REG_DWORD(addr, data) writel(data, addr)
+static inline u8 rd_reg_byte(const volatile u8 __iomem *addr)
+{
+ return readb(addr);
+}
+
+static inline u16 rd_reg_word(const volatile __le16 __iomem *addr)
+{
+ return readw(addr);
+}
+
+static inline u32 rd_reg_dword(const volatile __le32 __iomem *addr)
+{
+ return readl(addr);
+}
+
+static inline u8 rd_reg_byte_relaxed(const volatile u8 __iomem *addr)
+{
+ return readb_relaxed(addr);
+}
+
+static inline u16 rd_reg_word_relaxed(const volatile __le16 __iomem *addr)
+{
+ return readw_relaxed(addr);
+}
+
+static inline u32 rd_reg_dword_relaxed(const volatile __le32 __iomem *addr)
+{
+ return readl_relaxed(addr);
+}
+
+static inline void wrt_reg_byte(volatile u8 __iomem *addr, u8 data)
+{
+ return writeb(data, addr);
+}
+
+static inline void wrt_reg_word(volatile __le16 __iomem *addr, u16 data)
+{
+ return writew(data, addr);
+}
+
+static inline void wrt_reg_dword(volatile __le32 __iomem *addr, u32 data)
+{
+ return writel(data, addr);
+}
/*
* ISP83XX specific remote register addresses
@@ -469,7 +504,7 @@ struct srb_iocb {
u32 rx_size;
dma_addr_t els_plogi_pyld_dma;
dma_addr_t els_resp_pyld_dma;
- uint32_t fw_status[3];
+ __le32 fw_status[3];
__le16 comp_status;
__le16 len;
} els_plogi;
@@ -520,8 +555,8 @@ struct srb_iocb {
#define MAX_IOCB_MB_REG 28
#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
struct {
- __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
- __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
+ u16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
+ u16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
void *out, *in;
dma_addr_t out_dma, in_dma;
struct completion comp;
@@ -532,7 +567,7 @@ struct srb_iocb {
} nack;
struct {
__le16 comp_status;
- uint16_t rsp_pyld_len;
+ __le16 rsp_pyld_len;
uint8_t aen_op;
void *desc;
@@ -663,23 +698,23 @@ struct msg_echo_lb {
* ISP I/O Register Set structure definitions.
*/
struct device_reg_2xxx {
- uint16_t flash_address; /* Flash BIOS address */
- uint16_t flash_data; /* Flash BIOS data */
- uint16_t unused_1[1]; /* Gap */
- uint16_t ctrl_status; /* Control/Status */
+ __le16 flash_address; /* Flash BIOS address */
+ __le16 flash_data; /* Flash BIOS data */
+ __le16 unused_1[1]; /* Gap */
+ __le16 ctrl_status; /* Control/Status */
#define CSR_FLASH_64K_BANK BIT_3 /* Flash upper 64K bank select */
#define CSR_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable */
#define CSR_ISP_SOFT_RESET BIT_0 /* ISP soft reset */
- uint16_t ictrl; /* Interrupt control */
+ __le16 ictrl; /* Interrupt control */
#define ICR_EN_INT BIT_15 /* ISP enable interrupts. */
#define ICR_EN_RISC BIT_3 /* ISP enable RISC interrupts. */
- uint16_t istatus; /* Interrupt status */
+ __le16 istatus; /* Interrupt status */
#define ISR_RISC_INT BIT_3 /* RISC interrupt */
- uint16_t semaphore; /* Semaphore */
- uint16_t nvram; /* NVRAM register. */
+ __le16 semaphore; /* Semaphore */
+ __le16 nvram; /* NVRAM register. */
#define NVR_DESELECT 0
#define NVR_BUSY BIT_15
#define NVR_WRT_ENABLE BIT_14 /* Write enable */
@@ -693,80 +728,80 @@ struct device_reg_2xxx {
union {
struct {
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t unused_2[59]; /* Gap */
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 unused_2[59]; /* Gap */
} __attribute__((packed)) isp2100;
struct {
/* Request Queue */
- uint16_t req_q_in; /* In-Pointer */
- uint16_t req_q_out; /* Out-Pointer */
+ __le16 req_q_in; /* In-Pointer */
+ __le16 req_q_out; /* Out-Pointer */
/* Response Queue */
- uint16_t rsp_q_in; /* In-Pointer */
- uint16_t rsp_q_out; /* Out-Pointer */
+ __le16 rsp_q_in; /* In-Pointer */
+ __le16 rsp_q_out; /* Out-Pointer */
/* RISC to Host Status */
- uint32_t host_status;
+ __le32 host_status;
#define HSR_RISC_INT BIT_15 /* RISC interrupt */
#define HSR_RISC_PAUSED BIT_8 /* RISC Paused */
/* Host to Host Semaphore */
- uint16_t host_semaphore;
- uint16_t unused_3[17]; /* Gap */
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23;
- uint16_t mailbox24;
- uint16_t mailbox25;
- uint16_t mailbox26;
- uint16_t mailbox27;
- uint16_t mailbox28;
- uint16_t mailbox29;
- uint16_t mailbox30;
- uint16_t mailbox31;
- uint16_t fb_cmd;
- uint16_t unused_4[10]; /* Gap */
+ __le16 host_semaphore;
+ __le16 unused_3[17]; /* Gap */
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23;
+ __le16 mailbox24;
+ __le16 mailbox25;
+ __le16 mailbox26;
+ __le16 mailbox27;
+ __le16 mailbox28;
+ __le16 mailbox29;
+ __le16 mailbox30;
+ __le16 mailbox31;
+ __le16 fb_cmd;
+ __le16 unused_4[10]; /* Gap */
} __attribute__((packed)) isp2300;
} u;
- uint16_t fpm_diag_config;
- uint16_t unused_5[0x4]; /* Gap */
- uint16_t risc_hw;
- uint16_t unused_5_1; /* Gap */
- uint16_t pcr; /* Processor Control Register. */
- uint16_t unused_6[0x5]; /* Gap */
- uint16_t mctr; /* Memory Configuration and Timing. */
- uint16_t unused_7[0x3]; /* Gap */
- uint16_t fb_cmd_2100; /* Unused on 23XX */
- uint16_t unused_8[0x3]; /* Gap */
- uint16_t hccr; /* Host command & control register. */
+ __le16 fpm_diag_config;
+ __le16 unused_5[0x4]; /* Gap */
+ __le16 risc_hw;
+ __le16 unused_5_1; /* Gap */
+ __le16 pcr; /* Processor Control Register. */
+ __le16 unused_6[0x5]; /* Gap */
+ __le16 mctr; /* Memory Configuration and Timing. */
+ __le16 unused_7[0x3]; /* Gap */
+ __le16 fb_cmd_2100; /* Unused on 23XX */
+ __le16 unused_8[0x3]; /* Gap */
+ __le16 hccr; /* Host command & control register. */
#define HCCR_HOST_INT BIT_7 /* Host interrupt bit */
#define HCCR_RISC_PAUSE BIT_5 /* Pause mode bit */
/* HCCR commands */
@@ -779,9 +814,9 @@ struct device_reg_2xxx {
#define HCCR_DISABLE_PARITY_PAUSE 0x4001 /* Disable parity error RISC pause. */
#define HCCR_ENABLE_PARITY 0xA000 /* Enable PARITY interrupt */
- uint16_t unused_9[5]; /* Gap */
- uint16_t gpiod; /* GPIO Data register. */
- uint16_t gpioe; /* GPIO Enable register. */
+ __le16 unused_9[5]; /* Gap */
+ __le16 gpiod; /* GPIO Data register. */
+ __le16 gpioe; /* GPIO Enable register. */
#define GPIO_LED_MASK 0x00C0
#define GPIO_LED_GREEN_OFF_AMBER_OFF 0x0000
#define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040
@@ -793,95 +828,95 @@ struct device_reg_2xxx {
union {
struct {
- uint16_t unused_10[8]; /* Gap */
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23; /* Also probe reg. */
+ __le16 unused_10[8]; /* Gap */
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23; /* Also probe reg. */
} __attribute__((packed)) isp2200;
} u_end;
};
struct device_reg_25xxmq {
- uint32_t req_q_in;
- uint32_t req_q_out;
- uint32_t rsp_q_in;
- uint32_t rsp_q_out;
- uint32_t atio_q_in;
- uint32_t atio_q_out;
+ __le32 req_q_in;
+ __le32 req_q_out;
+ __le32 rsp_q_in;
+ __le32 rsp_q_out;
+ __le32 atio_q_in;
+ __le32 atio_q_out;
};
struct device_reg_fx00 {
- uint32_t mailbox0; /* 00 */
- uint32_t mailbox1; /* 04 */
- uint32_t mailbox2; /* 08 */
- uint32_t mailbox3; /* 0C */
- uint32_t mailbox4; /* 10 */
- uint32_t mailbox5; /* 14 */
- uint32_t mailbox6; /* 18 */
- uint32_t mailbox7; /* 1C */
- uint32_t mailbox8; /* 20 */
- uint32_t mailbox9; /* 24 */
- uint32_t mailbox10; /* 28 */
- uint32_t mailbox11;
- uint32_t mailbox12;
- uint32_t mailbox13;
- uint32_t mailbox14;
- uint32_t mailbox15;
- uint32_t mailbox16;
- uint32_t mailbox17;
- uint32_t mailbox18;
- uint32_t mailbox19;
- uint32_t mailbox20;
- uint32_t mailbox21;
- uint32_t mailbox22;
- uint32_t mailbox23;
- uint32_t mailbox24;
- uint32_t mailbox25;
- uint32_t mailbox26;
- uint32_t mailbox27;
- uint32_t mailbox28;
- uint32_t mailbox29;
- uint32_t mailbox30;
- uint32_t mailbox31;
- uint32_t aenmailbox0;
- uint32_t aenmailbox1;
- uint32_t aenmailbox2;
- uint32_t aenmailbox3;
- uint32_t aenmailbox4;
- uint32_t aenmailbox5;
- uint32_t aenmailbox6;
- uint32_t aenmailbox7;
+ __le32 mailbox0; /* 00 */
+ __le32 mailbox1; /* 04 */
+ __le32 mailbox2; /* 08 */
+ __le32 mailbox3; /* 0C */
+ __le32 mailbox4; /* 10 */
+ __le32 mailbox5; /* 14 */
+ __le32 mailbox6; /* 18 */
+ __le32 mailbox7; /* 1C */
+ __le32 mailbox8; /* 20 */
+ __le32 mailbox9; /* 24 */
+ __le32 mailbox10; /* 28 */
+ __le32 mailbox11;
+ __le32 mailbox12;
+ __le32 mailbox13;
+ __le32 mailbox14;
+ __le32 mailbox15;
+ __le32 mailbox16;
+ __le32 mailbox17;
+ __le32 mailbox18;
+ __le32 mailbox19;
+ __le32 mailbox20;
+ __le32 mailbox21;
+ __le32 mailbox22;
+ __le32 mailbox23;
+ __le32 mailbox24;
+ __le32 mailbox25;
+ __le32 mailbox26;
+ __le32 mailbox27;
+ __le32 mailbox28;
+ __le32 mailbox29;
+ __le32 mailbox30;
+ __le32 mailbox31;
+ __le32 aenmailbox0;
+ __le32 aenmailbox1;
+ __le32 aenmailbox2;
+ __le32 aenmailbox3;
+ __le32 aenmailbox4;
+ __le32 aenmailbox5;
+ __le32 aenmailbox6;
+ __le32 aenmailbox7;
/* Request Queue. */
- uint32_t req_q_in; /* A0 - Request Queue In-Pointer */
- uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */
+ __le32 req_q_in; /* A0 - Request Queue In-Pointer */
+ __le32 req_q_out; /* A4 - Request Queue Out-Pointer */
/* Response Queue. */
- uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */
- uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */
+ __le32 rsp_q_in; /* A8 - Response Queue In-Pointer */
+ __le32 rsp_q_out; /* AC - Response Queue Out-Pointer */
/* Init values shadowed on FW Up Event */
- uint32_t initval0; /* B0 */
- uint32_t initval1; /* B4 */
- uint32_t initval2; /* B8 */
- uint32_t initval3; /* BC */
- uint32_t initval4; /* C0 */
- uint32_t initval5; /* C4 */
- uint32_t initval6; /* C8 */
- uint32_t initval7; /* CC */
- uint32_t fwheartbeat; /* D0 */
- uint32_t pseudoaen; /* D4 */
+ __le32 initval0; /* B0 */
+ __le32 initval1; /* B4 */
+ __le32 initval2; /* B8 */
+ __le32 initval3; /* BC */
+ __le32 initval4; /* C0 */
+ __le32 initval5; /* C4 */
+ __le32 initval6; /* C8 */
+ __le32 initval7; /* CC */
+ __le32 fwheartbeat; /* D0 */
+ __le32 pseudoaen; /* D4 */
};
@@ -921,18 +956,18 @@ typedef union {
&(reg)->u_end.isp2200.mailbox8 + (num) - 8) : \
&(reg)->u.isp2300.mailbox0 + (num))
#define RD_MAILBOX_REG(ha, reg, num) \
- RD_REG_WORD(MAILBOX_REG(ha, reg, num))
+ rd_reg_word(MAILBOX_REG(ha, reg, num))
#define WRT_MAILBOX_REG(ha, reg, num, data) \
- WRT_REG_WORD(MAILBOX_REG(ha, reg, num), data)
+ wrt_reg_word(MAILBOX_REG(ha, reg, num), data)
#define FB_CMD_REG(ha, reg) \
(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
&(reg)->fb_cmd_2100 : \
&(reg)->u.isp2300.fb_cmd)
#define RD_FB_CMD_REG(ha, reg) \
- RD_REG_WORD(FB_CMD_REG(ha, reg))
+ rd_reg_word(FB_CMD_REG(ha, reg))
#define WRT_FB_CMD_REG(ha, reg, data) \
- WRT_REG_WORD(FB_CMD_REG(ha, reg), data)
+ wrt_reg_word(FB_CMD_REG(ha, reg), data)
typedef struct {
uint32_t out_mb; /* outbound from driver */
@@ -1316,7 +1351,7 @@ typedef struct {
uint8_t port_id[4];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
- uint16_t execution_throttle;
+ __le16 execution_throttle;
uint16_t execution_count;
uint8_t reset_count;
uint8_t reserved_2;
@@ -1402,9 +1437,9 @@ typedef struct {
*/
uint8_t firmware_options[2];
- uint16_t frame_payload_size;
- uint16_t max_iocb_allocation;
- uint16_t execution_throttle;
+ __le16 frame_payload_size;
+ __le16 max_iocb_allocation;
+ __le16 execution_throttle;
uint8_t retry_count;
uint8_t retry_delay; /* unused */
uint8_t port_name[WWN_SIZE]; /* Big endian. */
@@ -1413,17 +1448,17 @@ typedef struct {
uint8_t login_timeout;
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t request_q_outpointer;
- uint16_t response_q_inpointer;
- uint16_t request_q_length;
- uint16_t response_q_length;
- __le64 request_q_address __packed;
- __le64 response_q_address __packed;
+ __le16 request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_length;
+ __le16 response_q_length;
+ __le64 request_q_address __packed;
+ __le64 response_q_address __packed;
- uint16_t lun_enables;
+ __le16 lun_enables;
uint8_t command_resource_count;
uint8_t immediate_notify_resource_count;
- uint16_t timeout;
+ __le16 timeout;
uint8_t reserved_2[2];
/*
@@ -1571,8 +1606,8 @@ typedef struct {
uint8_t firmware_options[2];
uint16_t frame_payload_size;
- uint16_t max_iocb_allocation;
- uint16_t execution_throttle;
+ __le16 max_iocb_allocation;
+ __le16 execution_throttle;
uint8_t retry_count;
uint8_t retry_delay; /* unused */
uint8_t port_name[WWN_SIZE]; /* Big endian. */
@@ -1696,7 +1731,7 @@ typedef struct {
uint8_t reset_delay;
uint8_t port_down_retry_count;
uint8_t boot_id_number;
- uint16_t max_luns_per_target;
+ __le16 max_luns_per_target;
uint8_t fcode_boot_port_name[WWN_SIZE];
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
@@ -1802,7 +1837,7 @@ struct atio {
};
typedef union {
- uint16_t extended;
+ __le16 extended;
struct {
uint8_t reserved;
uint8_t standard;
@@ -1828,18 +1863,18 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
target_id_t target; /* SCSI ID */
- uint16_t lun; /* SCSI LUN */
- uint16_t control_flags; /* Control flags. */
+ __le16 lun; /* SCSI LUN */
+ __le16 control_flags; /* Control flags. */
#define CF_WRITE BIT_6
#define CF_READ BIT_5
#define CF_SIMPLE_TAG BIT_3
#define CF_ORDERED_TAG BIT_2
#define CF_HEAD_TAG BIT_1
uint16_t reserved_1;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
union {
struct dsd32 dsd32[3];
struct dsd64 dsd64[2];
@@ -1857,11 +1892,11 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
target_id_t target; /* SCSI ID */
- uint16_t lun; /* SCSI LUN */
- uint16_t control_flags; /* Control flags. */
+ __le16 lun; /* SCSI LUN */
+ __le16 control_flags; /* Control flags. */
uint16_t reserved_1;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
uint32_t byte_count; /* Total byte count. */
struct dsd64 dsd[2];
@@ -1923,7 +1958,7 @@ struct crc_context {
__le16 guard_seed; /* Initial Guard Seed */
__le16 prot_opts; /* Requested Data Protection Mode */
__le16 blk_size; /* Data size in bytes */
- uint16_t runt_blk_guard; /* Guard value for runt block (tape
+ __le16 runt_blk_guard; /* Guard value for runt block (tape
* only) */
__le32 byte_count; /* Total byte count/ total data
* transfer count */
@@ -1976,13 +2011,13 @@ typedef struct {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t scsi_status; /* SCSI status. */
- uint16_t comp_status; /* Completion status. */
- uint16_t state_flags; /* State flags. */
- uint16_t status_flags; /* Status flags. */
- uint16_t rsp_info_len; /* Response Info Length. */
- uint16_t req_sense_length; /* Request sense data length. */
- uint32_t residual_length; /* Residual transfer length. */
+ __le16 scsi_status; /* SCSI status. */
+ __le16 comp_status; /* Completion status. */
+ __le16 state_flags; /* State flags. */
+ __le16 status_flags; /* Status flags. */
+ __le16 rsp_info_len; /* Response Info Length. */
+ __le16 req_sense_length; /* Request sense data length. */
+ __le32 residual_length; /* Residual transfer length. */
uint8_t rsp_info[8]; /* FCP response information. */
uint8_t req_sense_data[32]; /* Request sense data. */
} sts_entry_t;
@@ -2114,8 +2149,8 @@ typedef struct {
/* clear port changed, */
/* use sequence number. */
uint8_t reserved_1;
- uint16_t sequence_number; /* Sequence number of event */
- uint16_t lun; /* SCSI LUN */
+ __le16 sequence_number; /* Sequence number of event */
+ __le16 lun; /* SCSI LUN */
uint8_t reserved_2[48];
} mrk_entry_t;
@@ -2130,19 +2165,19 @@ typedef struct {
uint8_t entry_status; /* Entry Status. */
uint32_t handle1; /* System handle. */
target_id_t loop_id;
- uint16_t status;
- uint16_t control_flags; /* Control flags. */
+ __le16 status;
+ __le16 control_flags; /* Control flags. */
uint16_t reserved2;
- uint16_t timeout;
- uint16_t cmd_dsd_count;
- uint16_t total_dsd_count;
+ __le16 timeout;
+ __le16 cmd_dsd_count;
+ __le16 total_dsd_count;
uint8_t type;
uint8_t r_ctl;
- uint16_t rx_id;
+ __le16 rx_id;
uint16_t reserved3;
uint32_t handle2;
- uint32_t rsp_bytecount;
- uint32_t req_bytecount;
+ __le32 rsp_bytecount;
+ __le32 req_bytecount;
struct dsd64 req_dsd;
struct dsd64 rsp_dsd;
} ms_iocb_entry_t;
@@ -2170,20 +2205,20 @@ struct mbx_entry {
uint32_t handle;
target_id_t loop_id;
- uint16_t status;
- uint16_t state_flags;
- uint16_t status_flags;
+ __le16 status;
+ __le16 state_flags;
+ __le16 status_flags;
uint32_t sys_define2[2];
- uint16_t mb0;
- uint16_t mb1;
- uint16_t mb2;
- uint16_t mb3;
- uint16_t mb6;
- uint16_t mb7;
- uint16_t mb9;
- uint16_t mb10;
+ __le16 mb0;
+ __le16 mb1;
+ __le16 mb2;
+ __le16 mb3;
+ __le16 mb6;
+ __le16 mb7;
+ __le16 mb9;
+ __le16 mb10;
uint32_t reserved_2[2];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
@@ -2205,52 +2240,52 @@ struct imm_ntfy_from_isp {
uint8_t entry_status; /* Entry Status. */
union {
struct {
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
- uint16_t lun;
+ __le16 lun;
uint8_t target_id;
uint8_t reserved_1;
- uint16_t status_modifier;
- uint16_t status;
- uint16_t task_flags;
- uint16_t seq_id;
- uint16_t srr_rx_id;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
+ __le16 status_modifier;
+ __le16 status;
+ __le16 task_flags;
+ __le16 seq_id;
+ __le16 srr_rx_id;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
#define SRR_IU_DATA_IN 0x1
#define SRR_IU_DATA_OUT 0x5
#define SRR_IU_STATUS 0x7
- uint16_t srr_ox_id;
+ __le16 srr_ox_id;
uint8_t reserved_2[28];
} isp2x;
struct {
uint32_t reserved;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t reserved_2;
- uint16_t flags;
+ __le16 flags;
#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
- uint16_t srr_rx_id;
- uint16_t status;
+ __le16 srr_rx_id;
+ __le16 status;
uint8_t status_subcode;
uint8_t fw_handle;
- uint32_t exchange_address;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_ox_id;
+ __le32 exchange_address;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_ox_id;
union {
struct {
uint8_t node_name[8];
} plogi; /* PLOGI/ADISC/PDISC */
struct {
/* PRLI word 3 bit 0-15 */
- uint16_t wd3_lo;
+ __le16 wd3_lo;
uint8_t resv0[6];
} prli;
struct {
uint8_t port_id[3];
uint8_t resv1;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t resv2;
} req_els;
} u;
@@ -2263,7 +2298,7 @@ struct imm_ntfy_from_isp {
} isp24;
} u;
uint16_t reserved_7;
- uint16_t ox_id;
+ __le16 ox_id;
} __packed;
#endif
@@ -2653,8 +2688,8 @@ static const char * const port_dstate_str[] = {
#define FDMI_HBA_VENDOR_IDENTIFIER 0xe0
struct ct_fdmi_hba_attr {
- uint16_t type;
- uint16_t len;
+ __be16 type;
+ __be16 len;
union {
uint8_t node_name[WWN_SIZE];
uint8_t manufacturer[64];
@@ -2666,11 +2701,11 @@ struct ct_fdmi_hba_attr {
uint8_t orom_version[16];
uint8_t fw_version[32];
uint8_t os_version[128];
- uint32_t max_ct_len;
+ __be32 max_ct_len;
uint8_t sym_name[256];
- uint32_t vendor_specific_info;
- uint32_t num_ports;
+ __be32 vendor_specific_info;
+ __be32 num_ports;
uint8_t fabric_name[WWN_SIZE];
uint8_t bios_name[32];
uint8_t vendor_identifier[8];
@@ -2678,12 +2713,12 @@ struct ct_fdmi_hba_attr {
};
struct ct_fdmi1_hba_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_hba_attr entry[FDMI1_HBA_ATTR_COUNT];
};
struct ct_fdmi2_hba_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_hba_attr entry[FDMI2_HBA_ATTR_COUNT];
};
@@ -2735,44 +2770,44 @@ struct ct_fdmi2_hba_attributes {
#define FC_CLASS_2_3 0x0C
struct ct_fdmi_port_attr {
- uint16_t type;
- uint16_t len;
+ __be16 type;
+ __be16 len;
union {
uint8_t fc4_types[32];
- uint32_t sup_speed;
- uint32_t cur_speed;
- uint32_t max_frame_size;
+ __be32 sup_speed;
+ __be32 cur_speed;
+ __be32 max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[256];
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
uint8_t port_sym_name[128];
- uint32_t port_type;
- uint32_t port_supported_cos;
+ __be32 port_type;
+ __be32 port_supported_cos;
uint8_t fabric_name[WWN_SIZE];
uint8_t port_fc4_type[32];
- uint32_t port_state;
- uint32_t num_ports;
- uint32_t port_id;
+ __be32 port_state;
+ __be32 num_ports;
+ __be32 port_id;
uint8_t smartsan_service[24];
uint8_t smartsan_guid[16];
uint8_t smartsan_version[24];
uint8_t smartsan_prod_name[16];
- uint32_t smartsan_port_info;
- uint32_t smartsan_qos_support;
- uint32_t smartsan_security_support;
+ __be32 smartsan_port_info;
+ __be32 smartsan_qos_support;
+ __be32 smartsan_security_support;
} a;
};
struct ct_fdmi1_port_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_port_attr entry[FDMI1_PORT_ATTR_COUNT];
};
struct ct_fdmi2_port_attributes {
- uint32_t count;
+ __be32 count;
struct ct_fdmi_port_attr entry[FDMI2_PORT_ATTR_COUNT];
};
@@ -2826,8 +2861,8 @@ struct ct_cmd_hdr {
/* CT command request */
struct ct_sns_req {
struct ct_cmd_hdr header;
- uint16_t command;
- uint16_t max_rsp_size;
+ __be16 command;
+ __be16 max_rsp_size;
uint8_t fragment_id;
uint8_t reserved[3];
@@ -2884,7 +2919,7 @@ struct ct_sns_req {
struct {
uint8_t hba_identifier[8];
- uint32_t entry_count;
+ __be32 entry_count;
uint8_t port_name[8];
struct ct_fdmi2_hba_attributes attrs;
} rhba;
@@ -2939,7 +2974,7 @@ struct ct_sns_req {
/* CT command response header */
struct ct_rsp_hdr {
struct ct_cmd_hdr header;
- uint16_t response;
+ __be16 response;
uint16_t residual;
uint8_t fragment_id;
uint8_t reason_code;
@@ -3025,8 +3060,8 @@ struct ct_sns_rsp {
} gfpn_id;
struct {
- uint16_t speeds;
- uint16_t speed;
+ __be16 speeds;
+ __be16 speed;
} gpsc;
#define GFF_FCP_SCSI_OFFSET 7
@@ -3116,13 +3151,13 @@ struct fab_scan {
struct sns_cmd_pkt {
union {
struct {
- uint16_t buffer_length;
- uint16_t reserved_1;
- __le64 buffer_address __packed;
- uint16_t subcommand_length;
- uint16_t reserved_2;
- uint16_t subcommand;
- uint16_t size;
+ __le16 buffer_length;
+ __le16 reserved_1;
+ __le64 buffer_address __packed;
+ __le16 subcommand_length;
+ __le16 reserved_2;
+ __le16 subcommand;
+ __le16 size;
uint32_t reserved_3;
uint8_t param[36];
} cmd;
@@ -3148,7 +3183,7 @@ struct gid_list_info {
uint8_t area;
uint8_t domain;
uint8_t loop_id_2100; /* ISP2100/ISP2200 -- 4 bytes. */
- uint16_t loop_id; /* ISP23XX -- 6 bytes. */
+ __le16 loop_id; /* ISP23XX -- 6 bytes. */
uint16_t reserved_1; /* ISP24XX -- 8 bytes. */
};
@@ -3222,7 +3257,8 @@ struct isp_operations {
int (*write_nvram)(struct scsi_qla_host *, void *, uint32_t,
uint32_t);
- void (*fw_dump) (struct scsi_qla_host *, int);
+ void (*fw_dump)(struct scsi_qla_host *vha);
+ void (*mpi_fw_dump)(struct scsi_qla_host *, int);
int (*beacon_on) (struct scsi_qla_host *);
int (*beacon_off) (struct scsi_qla_host *);
@@ -3456,8 +3492,8 @@ struct rsp_que {
dma_addr_t dma;
response_t *ring;
response_t *ring_ptr;
- uint32_t __iomem *rsp_q_in; /* FWI2-capable only. */
- uint32_t __iomem *rsp_q_out;
+ __le32 __iomem *rsp_q_in; /* FWI2-capable only. */
+ __le32 __iomem *rsp_q_out;
uint16_t ring_index;
uint16_t out_ptr;
uint16_t *in_ptr; /* queue shadow in index */
@@ -3483,8 +3519,8 @@ struct req_que {
dma_addr_t dma;
request_t *ring;
request_t *ring_ptr;
- uint32_t __iomem *req_q_in; /* FWI2-capable only. */
- uint32_t __iomem *req_q_out;
+ __le32 __iomem *req_q_in; /* FWI2-capable only. */
+ __le32 __iomem *req_q_out;
uint16_t ring_index;
uint16_t in_ptr;
uint16_t *out_ptr; /* queue shadow out index */
@@ -3552,7 +3588,7 @@ struct qla_qpair {
struct list_head hints_list;
uint16_t cpuid;
uint16_t retry_term_cnt;
- uint32_t retry_term_exchg_addr;
+ __le32 retry_term_exchg_addr;
uint64_t retry_term_jiff;
struct qla_tgt_counters tgt_counters;
};
@@ -3579,98 +3615,98 @@ struct rdp_req_payload {
struct rdp_rsp_payload {
struct {
- uint32_t cmd;
- uint32_t len;
+ __be32 cmd;
+ __be32 len;
} hdr;
/* LS Request Info descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t req_payload_word_0;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 req_payload_word_0;
} ls_req_info_desc;
/* LS Request Info descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t req_payload_word_0;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 req_payload_word_0;
} ls_req_info_desc2;
/* SFP diagnostic param descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint16_t temperature;
- uint16_t vcc;
- uint16_t tx_bias;
- uint16_t tx_power;
- uint16_t rx_power;
- uint16_t sfp_flags;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 temperature;
+ __be16 vcc;
+ __be16 tx_bias;
+ __be16 tx_power;
+ __be16 rx_power;
+ __be16 sfp_flags;
} sfp_diag_desc;
/* Port Speed Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint16_t speed_capab;
- uint16_t operating_speed;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 speed_capab;
+ __be16 operating_speed;
} port_speed_desc;
/* Link Error Status Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t link_fail_cnt;
- uint32_t loss_sync_cnt;
- uint32_t loss_sig_cnt;
- uint32_t prim_seq_err_cnt;
- uint32_t inval_xmit_word_cnt;
- uint32_t inval_crc_cnt;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 link_fail_cnt;
+ __be32 loss_sync_cnt;
+ __be32 loss_sig_cnt;
+ __be32 prim_seq_err_cnt;
+ __be32 inval_xmit_word_cnt;
+ __be32 inval_crc_cnt;
uint8_t pn_port_phy_type;
uint8_t reserved[3];
} ls_err_desc;
/* Port name description with diag param */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
+ __be32 desc_tag;
+ __be32 desc_len;
uint8_t WWNN[WWN_SIZE];
uint8_t WWPN[WWN_SIZE];
} port_name_diag_desc;
/* Port Name desc for Direct attached Fx_Port or Nx_Port */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
+ __be32 desc_tag;
+ __be32 desc_len;
uint8_t WWNN[WWN_SIZE];
uint8_t WWPN[WWN_SIZE];
} port_name_direct_desc;
/* Buffer Credit descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint32_t fcport_b2b;
- uint32_t attached_fcport_b2b;
- uint32_t fcport_rtt;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be32 fcport_b2b;
+ __be32 attached_fcport_b2b;
+ __be32 fcport_rtt;
} buffer_credit_desc;
/* Optical Element Data Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
- uint16_t high_alarm;
- uint16_t low_alarm;
- uint16_t high_warn;
- uint16_t low_warn;
- uint32_t element_flags;
+ __be32 desc_tag;
+ __be32 desc_len;
+ __be16 high_alarm;
+ __be16 low_alarm;
+ __be16 high_warn;
+ __be16 low_warn;
+ __be32 element_flags;
} optical_elmt_desc[5];
/* Optical Product Data Descriptor */
struct {
- uint32_t desc_tag;
- uint32_t desc_len;
+ __be32 desc_tag;
+ __be32 desc_len;
uint8_t vendor_name[16];
uint8_t part_number[16];
uint8_t serial_number[16];
@@ -3708,17 +3744,17 @@ struct qlt_hw_data {
struct atio *atio_ring_ptr; /* Current address. */
uint16_t atio_ring_index; /* Current index. */
uint16_t atio_q_length;
- uint32_t __iomem *atio_q_in;
- uint32_t __iomem *atio_q_out;
+ __le32 __iomem *atio_q_in;
+ __le32 __iomem *atio_q_out;
struct qla_tgt_func_tmpl *tgt_ops;
struct qla_tgt_vp_map *tgt_vp_map;
int saved_set;
- uint16_t saved_exchange_count;
- uint32_t saved_firmware_options_1;
- uint32_t saved_firmware_options_2;
- uint32_t saved_firmware_options_3;
+ __le16 saved_exchange_count;
+ __le32 saved_firmware_options_1;
+ __le32 saved_firmware_options_2;
+ __le32 saved_firmware_options_3;
uint8_t saved_firmware_options[2];
uint8_t saved_add_firmware_options[2];
@@ -3748,6 +3784,11 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
+struct qla_hw_data_stat {
+ u32 num_fw_dump;
+ u32 num_mpi_reset;
+};
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -4212,7 +4253,7 @@ struct qla_hw_data {
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
- uint16_t fw_seriallink_options24[4];
+ __le16 fw_seriallink_options24[4];
uint8_t serdes_version[3];
uint8_t mpi_version[3];
@@ -4230,7 +4271,6 @@ struct qla_hw_data {
uint32_t fw_dump_len;
u32 fw_dump_alloc_len;
bool fw_dumped;
- bool fw_dump_mpi;
unsigned long fw_dump_cap_flags;
#define RISC_PAUSE_CMPL 0
#define DMA_SHUTDOWN_CMPL 1
@@ -4241,6 +4281,10 @@ struct qla_hw_data {
#define ISP_MBX_RDY 6
#define ISP_SOFT_RESET_CMPL 7
int fw_dump_reading;
+ void *mpi_fw_dump;
+ u32 mpi_fw_dump_len;
+ unsigned int mpi_fw_dump_reading:1;
+ unsigned int mpi_fw_dumped:1;
int prev_minidump_failed;
dma_addr_t eft_dma;
void *eft;
@@ -4392,7 +4436,7 @@ struct qla_hw_data {
#define NUM_DSD_CHAIN 4096
uint8_t fw_type;
- __le32 file_prd_off; /* File firmware product offset */
+ uint32_t file_prd_off; /* File firmware product offset */
uint32_t md_template_size;
void *md_tmplt_hdr;
@@ -4454,6 +4498,8 @@ struct qla_hw_data {
uint16_t last_zio_threshold;
#define DEFAULT_ZIO_THRESHOLD 5
+
+ struct qla_hw_data_stat stat;
};
struct active_regions {
@@ -4698,13 +4744,13 @@ typedef struct scsi_qla_host {
struct qla27xx_image_status {
uint8_t image_status_mask;
- uint16_t generation;
+ __le16 generation;
uint8_t ver_major;
uint8_t ver_minor;
uint8_t bitmap; /* 28xx only */
uint8_t reserved[2];
- uint32_t checksum;
- uint32_t signature;
+ __le32 checksum;
+ __le32 signature;
} __packed;
/* 28xx aux image status bimap values */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index f9bad5bd7198..d1e12a29c3f7 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -134,28 +134,28 @@ struct vp_database_24xx {
struct nvram_24xx {
/* NVRAM header. */
uint8_t id[4];
- uint16_t nvram_version;
+ __le16 nvram_version;
uint16_t reserved_0;
/* Firmware Initialization Control Block. */
- uint16_t version;
+ __le16 version;
uint16_t reserved_1;
- __le16 frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
- uint16_t hard_address;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
+ __le16 hard_address;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint16_t login_retry_count;
- uint16_t link_down_on_nos;
- uint16_t interrupt_delay_timer;
- uint16_t login_timeout;
+ __le16 login_retry_count;
+ __le16 link_down_on_nos;
+ __le16 interrupt_delay_timer;
+ __le16 login_timeout;
- uint32_t firmware_options_1;
- uint32_t firmware_options_2;
- uint32_t firmware_options_3;
+ __le32 firmware_options_1;
+ __le32 firmware_options_2;
+ __le32 firmware_options_3;
/* Offset 56. */
@@ -178,7 +178,7 @@ struct nvram_24xx {
* BIT 11-13 = Output Emphasis 4G
* BIT 14-15 = Reserved
*/
- uint16_t seriallink_options[4];
+ __le16 seriallink_options[4];
uint16_t reserved_2[16];
@@ -218,25 +218,25 @@ struct nvram_24xx {
*
* BIT 16-31 =
*/
- uint32_t host_p;
+ __le32 host_p;
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
uint8_t boot_port_name[WWN_SIZE];
- uint16_t boot_lun_number;
+ __le16 boot_lun_number;
uint16_t reserved_8;
uint8_t alt1_boot_port_name[WWN_SIZE];
- uint16_t alt1_boot_lun_number;
+ __le16 alt1_boot_lun_number;
uint16_t reserved_9;
uint8_t alt2_boot_port_name[WWN_SIZE];
- uint16_t alt2_boot_lun_number;
+ __le16 alt2_boot_lun_number;
uint16_t reserved_10;
uint8_t alt3_boot_port_name[WWN_SIZE];
- uint16_t alt3_boot_lun_number;
+ __le16 alt3_boot_lun_number;
uint16_t reserved_11;
/*
@@ -249,23 +249,23 @@ struct nvram_24xx {
* BIT 6 = Reserved
* BIT 7-31 =
*/
- uint32_t efi_parameters;
+ __le32 efi_parameters;
uint8_t reset_delay;
uint8_t reserved_12;
uint16_t reserved_13;
- uint16_t boot_id_number;
+ __le16 boot_id_number;
uint16_t reserved_14;
- uint16_t max_luns_per_target;
+ __le16 max_luns_per_target;
uint16_t reserved_15;
- uint16_t port_down_retry_count;
- uint16_t link_down_timeout;
+ __le16 port_down_retry_count;
+ __le16 link_down_timeout;
/* FCode parameters. */
- uint16_t fcode_parameter;
+ __le16 fcode_parameter;
uint16_t reserved_16[3];
@@ -275,13 +275,13 @@ struct nvram_24xx {
uint8_t prev_drv_ver_minor;
uint8_t prev_drv_ver_subminor;
- uint16_t prev_bios_ver_major;
- uint16_t prev_bios_ver_minor;
+ __le16 prev_bios_ver_major;
+ __le16 prev_bios_ver_minor;
- uint16_t prev_efi_ver_major;
- uint16_t prev_efi_ver_minor;
+ __le16 prev_efi_ver_major;
+ __le16 prev_efi_ver_minor;
- uint16_t prev_fw_ver_major;
+ __le16 prev_fw_ver_major;
uint8_t prev_fw_ver_minor;
uint8_t prev_fw_ver_subminor;
@@ -309,7 +309,7 @@ struct nvram_24xx {
uint16_t subsystem_vendor_id;
uint16_t subsystem_device_id;
- uint32_t checksum;
+ __le32 checksum;
};
/*
@@ -318,46 +318,46 @@ struct nvram_24xx {
*/
#define ICB_VERSION 1
struct init_cb_24xx {
- uint16_t version;
+ __le16 version;
uint16_t reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
- uint16_t hard_address;
+ __le16 hard_address;
uint8_t port_name[WWN_SIZE]; /* Big endian. */
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t response_q_inpointer;
- uint16_t request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_outpointer;
- uint16_t login_retry_count;
+ __le16 login_retry_count;
- uint16_t prio_request_q_outpointer;
+ __le16 prio_request_q_outpointer;
- uint16_t response_q_length;
- uint16_t request_q_length;
+ __le16 response_q_length;
+ __le16 request_q_length;
- uint16_t link_down_on_nos; /* Milliseconds. */
+ __le16 link_down_on_nos; /* Milliseconds. */
- uint16_t prio_request_q_length;
+ __le16 prio_request_q_length;
__le64 request_q_address __packed;
__le64 response_q_address __packed;
__le64 prio_request_q_address __packed;
- uint16_t msix;
- uint16_t msix_atio;
+ __le16 msix;
+ __le16 msix_atio;
uint8_t reserved_2[4];
- uint16_t atio_q_inpointer;
- uint16_t atio_q_length;
- __le64 atio_q_address __packed;
+ __le16 atio_q_inpointer;
+ __le16 atio_q_length;
+ __le64 atio_q_address __packed;
- uint16_t interrupt_delay_timer; /* 100us increments. */
- uint16_t login_timeout;
+ __le16 interrupt_delay_timer; /* 100us increments. */
+ __le16 login_timeout;
/*
* BIT 0 = Enable Hard Loop Id
@@ -378,7 +378,7 @@ struct init_cb_24xx {
* BIT 14 = Node Name Option
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_1;
+ __le32 firmware_options_1;
/*
* BIT 0 = Operation Mode bit 0
@@ -399,7 +399,7 @@ struct init_cb_24xx {
* BIT 14 = Enable Target PRLI Control
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_2;
+ __le32 firmware_options_2;
/*
* BIT 0 = Reserved
@@ -425,9 +425,9 @@ struct init_cb_24xx {
* BIT 30 = Enable request queue 0 out index shadowing
* BIT 31 = Reserved
*/
- uint32_t firmware_options_3;
- uint16_t qos;
- uint16_t rid;
+ __le32 firmware_options_3;
+ __le16 qos;
+ __le16 rid;
uint8_t reserved_3[20];
};
@@ -443,27 +443,27 @@ struct cmd_bidir {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT hanlde. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Commnad timeout. */
+ __le16 timeout; /* Command timeout. */
- uint16_t wr_dseg_count; /* Write Data segment count. */
- uint16_t rd_dseg_count; /* Read Data segment count. */
+ __le16 wr_dseg_count; /* Write Data segment count. */
+ __le16 rd_dseg_count; /* Read Data segment count. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
#define BD_WRAP_BACK BIT_3
#define BD_READ_DATA BIT_1
#define BD_WRITE_DATA BIT_0
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
__le64 fcp_cmnd_dseg_address __packed;/* Data segment address. */
uint16_t reserved[2]; /* Reserved */
- uint32_t rd_byte_count; /* Total Byte count Read. */
- uint32_t wr_byte_count; /* Total Byte count write. */
+ __le32 rd_byte_count; /* Total Byte count Read. */
+ __le32 wr_byte_count; /* Total Byte count write. */
uint8_t port_id[3]; /* PortID of destination port.*/
uint8_t vp_index;
@@ -480,28 +480,28 @@ struct cmd_type_6 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
- uint16_t fcp_rsp_dsd_len; /* FCP_RSP DSD length. */
+ __le16 fcp_rsp_dsd_len; /* FCP_RSP DSD length. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
/* Data segment address. */
__le64 fcp_cmnd_dseg_address __packed;
/* Data segment address. */
__le64 fcp_rsp_dseg_address __packed;
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -518,16 +518,16 @@ struct cmd_type_7 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
#define FW_MAX_TIMEOUT 0x1999
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
uint16_t reserved_1;
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t task_mgmt_flags; /* Task management flags. */
+ __le16 task_mgmt_flags; /* Task management flags. */
#define TMF_CLEAR_ACA BIT_14
#define TMF_TARGET_RESET BIT_13
#define TMF_LUN_RESET BIT_12
@@ -547,7 +547,7 @@ struct cmd_type_7 {
uint8_t crn;
uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -565,29 +565,29 @@ struct cmd_type_crc_2 {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
- uint16_t fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
+ __le16 fcp_rsp_dseg_len; /* FCP_RSP DSD length. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
- uint16_t fcp_cmnd_dseg_len; /* Data segment length. */
+ __le16 fcp_cmnd_dseg_len; /* Data segment length. */
__le64 fcp_cmnd_dseg_address __packed;
/* Data segment address. */
__le64 fcp_rsp_dseg_address __packed;
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
__le64 crc_context_address __packed; /* Data segment address. */
- uint16_t crc_context_len; /* Data segment length. */
+ __le16 crc_context_len; /* Data segment length. */
uint16_t reserved_1; /* MUST be set to 0. */
};
@@ -604,32 +604,32 @@ struct sts_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
- uint16_t ox_id; /* OX_ID used by the firmware. */
+ __le16 comp_status; /* Completion status. */
+ __le16 ox_id; /* OX_ID used by the firmware. */
- uint32_t residual_len; /* FW calc residual transfer length. */
+ __le32 residual_len; /* FW calc residual transfer length. */
union {
uint16_t reserved_1;
- uint16_t nvme_rsp_pyld_len;
+ __le16 nvme_rsp_pyld_len;
};
- uint16_t state_flags; /* State flags. */
+ __le16 state_flags; /* State flags. */
#define SF_TRANSFERRED_DATA BIT_11
#define SF_NVME_ERSP BIT_6
#define SF_FCP_RSP_DMA BIT_0
- uint16_t retry_delay;
- uint16_t scsi_status; /* SCSI status. */
+ __le16 retry_delay;
+ __le16 scsi_status; /* SCSI status. */
#define SS_CONFIRMATION_REQ BIT_12
- uint32_t rsp_residual_count; /* FCP RSP residual count. */
+ __le32 rsp_residual_count; /* FCP RSP residual count. */
- uint32_t sense_len; /* FCP SENSE length. */
+ __le32 sense_len; /* FCP SENSE length. */
union {
struct {
- uint32_t rsp_data_len; /* FCP response data length */
+ __le32 rsp_data_len; /* FCP response data length */
uint8_t data[28]; /* FCP rsp/sense information */
};
struct nvme_fc_ersp_iu nvme_ersp;
@@ -672,7 +672,7 @@ struct mrk_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
uint8_t modifier; /* Modifier (7-0). */
#define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */
@@ -701,24 +701,24 @@ struct ct_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t cmd_dsd_count;
+ __le16 cmd_dsd_count;
uint8_t vp_index;
uint8_t reserved_1;
- uint16_t timeout; /* Command timeout. */
+ __le16 timeout; /* Command timeout. */
uint16_t reserved_2;
- uint16_t rsp_dsd_count;
+ __le16 rsp_dsd_count;
uint8_t reserved_3[10];
- uint32_t rsp_byte_count;
- uint32_t cmd_byte_count;
+ __le32 rsp_byte_count;
+ __le32 cmd_byte_count;
struct dsd64 dsd[2];
};
@@ -733,17 +733,17 @@ struct purex_entry_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint16_t reserved1;
+ __le16 reserved1;
uint8_t vp_idx;
uint8_t reserved2;
- uint16_t status_flags;
- uint16_t nport_handle;
+ __le16 status_flags;
+ __le16 nport_handle;
- uint16_t frame_size;
- uint16_t trunc_frame_size;
+ __le16 frame_size;
+ __le16 trunc_frame_size;
- uint32_t rx_xchg_addr;
+ __le32 rx_xchg_addr;
uint8_t d_id[3];
uint8_t r_ctl;
@@ -754,13 +754,13 @@ struct purex_entry_24xx {
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
- uint32_t param;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 param;
uint8_t els_frame_payload[20];
};
@@ -777,18 +777,18 @@ struct els_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* response only */
- uint16_t nport_handle;
+ __le16 comp_status; /* response only */
+ __le16 nport_handle;
- uint16_t tx_dsd_count;
+ __le16 tx_dsd_count;
uint8_t vp_index;
uint8_t sof_type;
#define EST_SOFI3 (1 << 4)
#define EST_SOFI2 (3 << 4)
- uint32_t rx_xchg_address; /* Receive exchange address. */
- uint16_t rx_dsd_count;
+ __le32 rx_xchg_address; /* Receive exchange address. */
+ __le16 rx_dsd_count;
uint8_t opcode;
uint8_t reserved_2;
@@ -796,7 +796,7 @@ struct els_entry_24xx {
uint8_t d_id[3];
uint8_t s_id[3];
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
#define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13)
#define EPD_ELS_COMMAND (0 << 13)
#define EPD_ELS_ACC (1 << 13)
@@ -817,10 +817,10 @@ struct els_entry_24xx {
__le32 rx_len; /* DSD 1 length. */
};
struct {
- uint32_t total_byte_count;
- uint32_t error_subcode_1;
- uint32_t error_subcode_2;
- uint32_t error_subcode_3;
+ __le32 total_byte_count;
+ __le32 error_subcode_1;
+ __le32 error_subcode_2;
+ __le32 error_subcode_3;
};
};
};
@@ -831,19 +831,19 @@ struct els_sts_entry_24xx {
uint8_t sys_define; /* System Defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t handle; /* System handle. */
+ __le32 handle; /* System handle. */
- uint16_t comp_status;
+ __le16 comp_status;
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t reserved_1;
+ __le16 reserved_1;
uint8_t vp_index;
uint8_t sof_type;
- uint32_t rx_xchg_address; /* Receive exchange address. */
- uint16_t reserved_2;
+ __le32 rx_xchg_address; /* Receive exchange address. */
+ __le16 reserved_2;
uint8_t opcode;
uint8_t reserved_3;
@@ -851,13 +851,13 @@ struct els_sts_entry_24xx {
uint8_t d_id[3];
uint8_t s_id[3];
- uint16_t control_flags; /* Control flags. */
- uint32_t total_byte_count;
- uint32_t error_subcode_1;
- uint32_t error_subcode_2;
- uint32_t error_subcode_3;
+ __le16 control_flags; /* Control flags. */
+ __le32 total_byte_count;
+ __le32 error_subcode_1;
+ __le32 error_subcode_2;
+ __le32 error_subcode_3;
- uint32_t reserved_4[4];
+ __le32 reserved_4[4];
};
/*
* ISP queue - Mailbox Command entry structure definition.
@@ -884,12 +884,12 @@ struct logio_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_LOGIO_ERROR 0x31 /* Login/Logout IOCB error. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
- uint16_t control_flags; /* Control flags. */
+ __le16 control_flags; /* Control flags. */
/* Modifiers. */
#define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */
#define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */
@@ -918,7 +918,7 @@ struct logio_entry_24xx {
uint8_t rsp_size; /* Response size in 32bit words. */
- uint32_t io_parameter[11]; /* General I/O parameters. */
+ __le32 io_parameter[11]; /* General I/O parameters. */
#define LSC_SCODE_NOLINK 0x01
#define LSC_SCODE_NOIOCB 0x02
#define LSC_SCODE_NOXCB 0x03
@@ -946,17 +946,17 @@ struct tsk_mgmt_entry {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
uint16_t reserved_1;
- uint16_t delay; /* Activity delay in seconds. */
+ __le16 delay; /* Activity delay in seconds. */
- uint16_t timeout; /* Command timeout. */
+ __le16 timeout; /* Command timeout. */
struct scsi_lun lun; /* FCP LUN (BE). */
- uint32_t control_flags; /* Control Flags. */
+ __le32 control_flags; /* Control Flags. */
#define TCF_NOTMCMD_TO_TARGET BIT_31
#define TCF_LUN_RESET BIT_4
#define TCF_ABORT_TASK_SET BIT_3
@@ -981,15 +981,15 @@ struct abort_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
/* or Completion status. */
- uint16_t options; /* Options. */
+ __le16 options; /* Options. */
#define AOF_NO_ABTS BIT_0 /* Do not send any ABTS. */
uint32_t handle_to_abort; /* System handle to abort. */
- uint16_t req_que_no;
+ __le16 req_que_no;
uint8_t reserved_1[30];
uint8_t port_id[3]; /* PortID of destination port. */
@@ -1006,16 +1006,16 @@ struct abts_entry_24xx {
uint8_t handle_count;
uint8_t entry_status;
- uint32_t handle; /* type 0x55 only */
+ __le32 handle; /* type 0x55 only */
- uint16_t comp_status; /* type 0x55 only */
- uint16_t nport_handle; /* type 0x54 only */
+ __le16 comp_status; /* type 0x55 only */
+ __le16 nport_handle; /* type 0x54 only */
- uint16_t control_flags; /* type 0x55 only */
+ __le16 control_flags; /* type 0x55 only */
uint8_t vp_idx;
uint8_t sof_type; /* sof_type is upper nibble */
- uint32_t rx_xch_addr;
+ __le32 rx_xch_addr;
uint8_t d_id[3];
uint8_t r_ctl;
@@ -1026,30 +1026,30 @@ struct abts_entry_24xx {
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
+ __le16 rx_id;
+ __le16 ox_id;
- uint32_t param;
+ __le32 param;
union {
struct {
- uint32_t subcode3;
- uint32_t rsvd;
- uint32_t subcode1;
- uint32_t subcode2;
+ __le32 subcode3;
+ __le32 rsvd;
+ __le32 subcode1;
+ __le32 subcode2;
} error;
struct {
- uint16_t rsrvd1;
+ __le16 rsrvd1;
uint8_t last_seq_id;
uint8_t seq_id_valid;
- uint16_t aborted_rx_id;
- uint16_t aborted_ox_id;
- uint16_t high_seq_cnt;
- uint16_t low_seq_cnt;
+ __le16 aborted_rx_id;
+ __le16 aborted_ox_id;
+ __le16 high_seq_cnt;
+ __le16 low_seq_cnt;
} ba_acc;
struct {
uint8_t vendor_unique;
@@ -1058,7 +1058,7 @@ struct abts_entry_24xx {
} ba_rjt;
} payload;
- uint32_t rx_xch_addr_to_abort;
+ __le32 rx_xch_addr_to_abort;
} __packed;
/* ABTS payload explanation values */
@@ -1087,7 +1087,7 @@ struct abts_entry_24xx {
* ISP I/O Register Set structure definitions.
*/
struct device_reg_24xx {
- uint32_t flash_addr; /* Flash/NVRAM BIOS address. */
+ __le32 flash_addr; /* Flash/NVRAM BIOS address. */
#define FARX_DATA_FLAG BIT_31
#define FARX_ACCESS_FLASH_CONF 0x7FFD0000
#define FARX_ACCESS_FLASH_DATA 0x7FF00000
@@ -1138,9 +1138,9 @@ struct device_reg_24xx {
#define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023
#define HW_EVENT_FLASH_FW_ERR 0xF024
- uint32_t flash_data; /* Flash/NVRAM BIOS data. */
+ __le32 flash_data; /* Flash/NVRAM BIOS data. */
- uint32_t ctrl_status; /* Control/Status. */
+ __le32 ctrl_status; /* Control/Status. */
#define CSRX_FLASH_ACCESS_ERROR BIT_18 /* Flash/NVRAM Access Error. */
#define CSRX_DMA_ACTIVE BIT_17 /* DMA Active status. */
#define CSRX_DMA_SHUTDOWN BIT_16 /* DMA Shutdown control status. */
@@ -1166,35 +1166,35 @@ struct device_reg_24xx {
#define CSRX_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable. */
#define CSRX_ISP_SOFT_RESET BIT_0 /* ISP soft reset. */
- uint32_t ictrl; /* Interrupt control. */
+ __le32 ictrl; /* Interrupt control. */
#define ICRX_EN_RISC_INT BIT_3 /* Enable RISC interrupts on PCI. */
- uint32_t istatus; /* Interrupt status. */
+ __le32 istatus; /* Interrupt status. */
#define ISRX_RISC_INT BIT_3 /* RISC interrupt. */
- uint32_t unused_1[2]; /* Gap. */
+ __le32 unused_1[2]; /* Gap. */
/* Request Queue. */
- uint32_t req_q_in; /* In-Pointer. */
- uint32_t req_q_out; /* Out-Pointer. */
+ __le32 req_q_in; /* In-Pointer. */
+ __le32 req_q_out; /* Out-Pointer. */
/* Response Queue. */
- uint32_t rsp_q_in; /* In-Pointer. */
- uint32_t rsp_q_out; /* Out-Pointer. */
+ __le32 rsp_q_in; /* In-Pointer. */
+ __le32 rsp_q_out; /* Out-Pointer. */
/* Priority Request Queue. */
- uint32_t preq_q_in; /* In-Pointer. */
- uint32_t preq_q_out; /* Out-Pointer. */
+ __le32 preq_q_in; /* In-Pointer. */
+ __le32 preq_q_out; /* Out-Pointer. */
- uint32_t unused_2[2]; /* Gap. */
+ __le32 unused_2[2]; /* Gap. */
/* ATIO Queue. */
- uint32_t atio_q_in; /* In-Pointer. */
- uint32_t atio_q_out; /* Out-Pointer. */
+ __le32 atio_q_in; /* In-Pointer. */
+ __le32 atio_q_out; /* Out-Pointer. */
- uint32_t host_status;
+ __le32 host_status;
#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
- uint32_t hccr; /* Host command & control register. */
+ __le32 hccr; /* Host command & control register. */
/* HCCR statuses. */
#define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */
#define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */
@@ -1216,7 +1216,7 @@ struct device_reg_24xx {
/* Clear RISC to PCI interrupt. */
#define HCCRX_CLR_RISC_INT 0xA0000000
- uint32_t gpiod; /* GPIO Data register. */
+ __le32 gpiod; /* GPIO Data register. */
/* LED update mask. */
#define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18)
@@ -1235,7 +1235,7 @@ struct device_reg_24xx {
/* Data in/out. */
#define GPDX_DATA_INOUT (BIT_1|BIT_0)
- uint32_t gpioe; /* GPIO Enable register. */
+ __le32 gpioe; /* GPIO Enable register. */
/* Enable update mask. */
#define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16)
/* Enable update mask. */
@@ -1243,56 +1243,56 @@ struct device_reg_24xx {
/* Enable. */
#define GPEX_ENABLE (BIT_1|BIT_0)
- uint32_t iobase_addr; /* I/O Bus Base Address register. */
-
- uint32_t unused_3[10]; /* Gap. */
-
- uint16_t mailbox0;
- uint16_t mailbox1;
- uint16_t mailbox2;
- uint16_t mailbox3;
- uint16_t mailbox4;
- uint16_t mailbox5;
- uint16_t mailbox6;
- uint16_t mailbox7;
- uint16_t mailbox8;
- uint16_t mailbox9;
- uint16_t mailbox10;
- uint16_t mailbox11;
- uint16_t mailbox12;
- uint16_t mailbox13;
- uint16_t mailbox14;
- uint16_t mailbox15;
- uint16_t mailbox16;
- uint16_t mailbox17;
- uint16_t mailbox18;
- uint16_t mailbox19;
- uint16_t mailbox20;
- uint16_t mailbox21;
- uint16_t mailbox22;
- uint16_t mailbox23;
- uint16_t mailbox24;
- uint16_t mailbox25;
- uint16_t mailbox26;
- uint16_t mailbox27;
- uint16_t mailbox28;
- uint16_t mailbox29;
- uint16_t mailbox30;
- uint16_t mailbox31;
-
- uint32_t iobase_window;
- uint32_t iobase_c4;
- uint32_t iobase_c8;
- uint32_t unused_4_1[6]; /* Gap. */
- uint32_t iobase_q;
- uint32_t unused_5[2]; /* Gap. */
- uint32_t iobase_select;
- uint32_t unused_6[2]; /* Gap. */
- uint32_t iobase_sdata;
+ __le32 iobase_addr; /* I/O Bus Base Address register. */
+
+ __le32 unused_3[10]; /* Gap. */
+
+ __le16 mailbox0;
+ __le16 mailbox1;
+ __le16 mailbox2;
+ __le16 mailbox3;
+ __le16 mailbox4;
+ __le16 mailbox5;
+ __le16 mailbox6;
+ __le16 mailbox7;
+ __le16 mailbox8;
+ __le16 mailbox9;
+ __le16 mailbox10;
+ __le16 mailbox11;
+ __le16 mailbox12;
+ __le16 mailbox13;
+ __le16 mailbox14;
+ __le16 mailbox15;
+ __le16 mailbox16;
+ __le16 mailbox17;
+ __le16 mailbox18;
+ __le16 mailbox19;
+ __le16 mailbox20;
+ __le16 mailbox21;
+ __le16 mailbox22;
+ __le16 mailbox23;
+ __le16 mailbox24;
+ __le16 mailbox25;
+ __le16 mailbox26;
+ __le16 mailbox27;
+ __le16 mailbox28;
+ __le16 mailbox29;
+ __le16 mailbox30;
+ __le16 mailbox31;
+
+ __le32 iobase_window;
+ __le32 iobase_c4;
+ __le32 iobase_c8;
+ __le32 unused_4_1[6]; /* Gap. */
+ __le32 iobase_q;
+ __le32 unused_5[2]; /* Gap. */
+ __le32 iobase_select;
+ __le32 unused_6[2]; /* Gap. */
+ __le32 iobase_sdata;
};
/* RISC-RISC semaphore register PCI offet */
#define RISC_REGISTER_BASE_OFFSET 0x7010
-#define RISC_REGISTER_WINDOW_OFFET 0x6
+#define RISC_REGISTER_WINDOW_OFFSET 0x6
/* RISC-RISC semaphore/flag register (risc address 0x7016) */
@@ -1354,8 +1354,8 @@ struct mid_conf_entry_24xx {
struct mid_init_cb_24xx {
struct init_cb_24xx init_cb;
- uint16_t count;
- uint16_t options;
+ __le16 count;
+ __le16 options;
struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC];
};
@@ -1389,27 +1389,27 @@ struct vp_ctrl_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t vp_idx_failed;
+ __le16 vp_idx_failed;
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_VCE_IOCB_ERROR 0x01 /* Error processing IOCB */
#define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */
#define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */
- uint16_t command;
+ __le16 command;
#define VCE_COMMAND_ENABLE_VPS 0x00 /* Enable VPs. */
#define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */
#define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */
#define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */
#define VCE_COMMAND_DISABLE_VPS_LOGO_ALL 0x0b /* Disable VPs and LOGO ports. */
- uint16_t vp_count;
+ __le16 vp_count;
uint8_t vp_idx_map[16];
- uint16_t flags;
- uint16_t id;
+ __le16 flags;
+ __le16 id;
uint16_t reserved_4;
- uint16_t hopct;
+ __le16 hopct;
uint8_t reserved_5[24];
};
@@ -1425,12 +1425,12 @@ struct vp_config_entry_24xx {
uint32_t handle; /* System handle. */
- uint16_t flags;
+ __le16 flags;
#define CS_VF_BIND_VPORTS_TO_VF BIT_0
#define CS_VF_SET_QOS_OF_VPORTS BIT_1
#define CS_VF_SET_HOPS_OF_VPORTS BIT_2
- uint16_t comp_status; /* Completion status. */
+ __le16 comp_status; /* Completion status. */
#define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */
#define CS_VCT_CNT_ERROR 0x02 /* Invalid VP count. */
#define CS_VCT_ERROR 0x03 /* Unknown error. */
@@ -1457,9 +1457,9 @@ struct vp_config_entry_24xx {
uint16_t reserved_vp2;
uint8_t port_name_idx2[WWN_SIZE];
uint8_t node_name_idx2[WWN_SIZE];
- uint16_t id;
+ __le16 id;
uint16_t reserved_4;
- uint16_t hopct;
+ __le16 hopct;
uint8_t reserved_5[2];
};
@@ -1486,7 +1486,7 @@ struct vp_rpt_id_entry_24xx {
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t resv1;
+ __le32 resv1;
uint8_t vp_acquired;
uint8_t vp_setup;
uint8_t vp_idx; /* Format 0=reserved */
@@ -1550,15 +1550,15 @@ struct vf_evfp_entry_24xx {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t comp_status; /* Completion status. */
- uint16_t timeout; /* timeout */
- uint16_t adim_tagging_mode;
+ __le16 comp_status; /* Completion status. */
+ __le16 timeout; /* timeout */
+ __le16 adim_tagging_mode;
- uint16_t vfport_id;
+ __le16 vfport_id;
uint32_t exch_addr;
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t control_flags;
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 control_flags;
uint32_t io_parameter_0;
uint32_t io_parameter_1;
__le64 tx_address __packed; /* Data segment 0 address. */
@@ -1573,13 +1573,13 @@ struct vf_evfp_entry_24xx {
struct qla_fdt_layout {
uint8_t sig[4];
- uint16_t version;
- uint16_t len;
- uint16_t checksum;
+ __le16 version;
+ __le16 len;
+ __le16 checksum;
uint8_t unused1[2];
uint8_t model[16];
- uint16_t man_id;
- uint16_t id;
+ __le16 man_id;
+ __le16 id;
uint8_t flags;
uint8_t erase_cmd;
uint8_t alt_erase_cmd;
@@ -1588,15 +1588,15 @@ struct qla_fdt_layout {
uint8_t wrt_sts_reg_cmd;
uint8_t unprotect_sec_cmd;
uint8_t read_man_id_cmd;
- uint32_t block_size;
- uint32_t alt_block_size;
- uint32_t flash_size;
- uint32_t wrt_enable_data;
+ __le32 block_size;
+ __le32 alt_block_size;
+ __le32 flash_size;
+ __le32 wrt_enable_data;
uint8_t read_id_addr_len;
uint8_t wrt_disable_bits;
uint8_t read_dev_id_len;
uint8_t chip_erase_cmd;
- uint16_t read_timeout;
+ __le16 read_timeout;
uint8_t protect_sec_cmd;
uint8_t unused2[65];
};
@@ -1605,11 +1605,11 @@ struct qla_fdt_layout {
struct qla_flt_location {
uint8_t sig[4];
- uint16_t start_lo;
- uint16_t start_hi;
+ __le16 start_lo;
+ __le16 start_hi;
uint8_t version;
uint8_t unused[5];
- uint16_t checksum;
+ __le16 checksum;
};
#define FLT_REG_FW 0x01
@@ -1664,19 +1664,19 @@ struct qla_flt_location {
#define FLT_REG_PEP_SEC_28XX 0xF1
struct qla_flt_region {
- uint16_t code;
+ __le16 code;
uint8_t attribute;
uint8_t reserved;
- uint32_t size;
- uint32_t start;
- uint32_t end;
+ __le32 size;
+ __le32 start;
+ __le32 end;
};
struct qla_flt_header {
- uint16_t version;
- uint16_t length;
- uint16_t checksum;
- uint16_t unused;
+ __le16 version;
+ __le16 length;
+ __le16 checksum;
+ __le16 unused;
struct qla_flt_region region[0];
};
@@ -1688,18 +1688,18 @@ struct qla_flt_header {
struct qla_npiv_header {
uint8_t sig[2];
- uint16_t version;
- uint16_t entries;
- uint16_t unused[4];
- uint16_t checksum;
+ __le16 version;
+ __le16 entries;
+ __le16 unused[4];
+ __le16 checksum;
};
struct qla_npiv_entry {
- uint16_t flags;
- uint16_t vf_id;
+ __le16 flags;
+ __le16 vf_id;
uint8_t q_qos;
uint8_t f_qos;
- uint16_t unused1;
+ __le16 unused1;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
};
@@ -1729,7 +1729,7 @@ struct verify_chip_entry_84xx {
uint32_t handle;
- uint16_t options;
+ __le16 options;
#define VCO_DONT_UPDATE_FW BIT_0
#define VCO_FORCE_UPDATE BIT_1
#define VCO_DONT_RESET_UPDATE BIT_2
@@ -1737,18 +1737,18 @@ struct verify_chip_entry_84xx {
#define VCO_END_OF_DATA BIT_14
#define VCO_ENABLE_DSD BIT_15
- uint16_t reserved_1;
+ __le16 reserved_1;
- uint16_t data_seg_cnt;
- uint16_t reserved_2[3];
+ __le16 data_seg_cnt;
+ __le16 reserved_2[3];
- uint32_t fw_ver;
- uint32_t exchange_address;
+ __le32 fw_ver;
+ __le32 exchange_address;
- uint32_t reserved_3[3];
- uint32_t fw_size;
- uint32_t fw_seq_size;
- uint32_t relative_offset;
+ __le32 reserved_3[3];
+ __le32 fw_size;
+ __le32 fw_seq_size;
+ __le32 relative_offset;
struct dsd64 dsd;
};
@@ -1761,22 +1761,22 @@ struct verify_chip_rsp_84xx {
uint32_t handle;
- uint16_t comp_status;
+ __le16 comp_status;
#define CS_VCS_CHIP_FAILURE 0x3
#define CS_VCS_BAD_EXCHANGE 0x8
#define CS_VCS_SEQ_COMPLETEi 0x40
- uint16_t failure_code;
+ __le16 failure_code;
#define VFC_CHECKSUM_ERROR 0x1
#define VFC_INVALID_LEN 0x2
#define VFC_ALREADY_IN_PROGRESS 0x8
- uint16_t reserved_1[4];
+ __le16 reserved_1[4];
- uint32_t fw_ver;
- uint32_t exchange_address;
+ __le32 fw_ver;
+ __le32 exchange_address;
- uint32_t reserved_2[6];
+ __le32 reserved_2[6];
};
#define ACCESS_CHIP_IOCB_TYPE 0x2B
@@ -1788,24 +1788,24 @@ struct access_chip_84xx {
uint32_t handle;
- uint16_t options;
+ __le16 options;
#define ACO_DUMP_MEMORY 0x0
#define ACO_LOAD_MEMORY 0x1
#define ACO_CHANGE_CONFIG_PARAM 0x2
#define ACO_REQUEST_INFO 0x3
- uint16_t reserved1;
+ __le16 reserved1;
- uint16_t dseg_count;
- uint16_t reserved2[3];
+ __le16 dseg_count;
+ __le16 reserved2[3];
- uint32_t parameter1;
- uint32_t parameter2;
- uint32_t parameter3;
+ __le32 parameter1;
+ __le32 parameter2;
+ __le32 parameter3;
- uint32_t reserved3[3];
- uint32_t total_byte_cnt;
- uint32_t reserved4;
+ __le32 reserved3[3];
+ __le32 total_byte_cnt;
+ __le32 reserved4;
struct dsd64 dsd;
};
@@ -1818,11 +1818,11 @@ struct access_chip_rsp_84xx {
uint32_t handle;
- uint16_t comp_status;
- uint16_t failure_code;
- uint32_t residual_count;
+ __le16 comp_status;
+ __le16 failure_code;
+ __le32 residual_count;
- uint32_t reserved[12];
+ __le32 reserved[12];
};
/* 81XX Support **************************************************************/
@@ -1877,52 +1877,52 @@ struct access_chip_rsp_84xx {
struct nvram_81xx {
/* NVRAM header. */
uint8_t id[4];
- uint16_t nvram_version;
- uint16_t reserved_0;
+ __le16 nvram_version;
+ __le16 reserved_0;
/* Firmware Initialization Control Block. */
- uint16_t version;
- uint16_t reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
- uint16_t reserved_2;
+ __le16 version;
+ __le16 reserved_1;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
+ __le16 reserved_2;
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint16_t login_retry_count;
- uint16_t reserved_3;
- uint16_t interrupt_delay_timer;
- uint16_t login_timeout;
+ __le16 login_retry_count;
+ __le16 reserved_3;
+ __le16 interrupt_delay_timer;
+ __le16 login_timeout;
- uint32_t firmware_options_1;
- uint32_t firmware_options_2;
- uint32_t firmware_options_3;
+ __le32 firmware_options_1;
+ __le32 firmware_options_2;
+ __le32 firmware_options_3;
- uint16_t reserved_4[4];
+ __le16 reserved_4[4];
/* Offset 64. */
uint8_t enode_mac[6];
- uint16_t reserved_5[5];
+ __le16 reserved_5[5];
/* Offset 80. */
- uint16_t reserved_6[24];
+ __le16 reserved_6[24];
/* Offset 128. */
- uint16_t ex_version;
+ __le16 ex_version;
uint8_t prio_fcf_matching_flags;
uint8_t reserved_6_1[3];
- uint16_t pri_fcf_vlan_id;
+ __le16 pri_fcf_vlan_id;
uint8_t pri_fcf_fabric_name[8];
- uint16_t reserved_6_2[7];
+ __le16 reserved_6_2[7];
uint8_t spma_mac_addr[6];
- uint16_t reserved_6_3[14];
+ __le16 reserved_6_3[14];
/* Offset 192. */
uint8_t min_supported_speed;
uint8_t reserved_7_0;
- uint16_t reserved_7[31];
+ __le16 reserved_7[31];
/*
* BIT 0 = Enable spinup delay
@@ -1955,26 +1955,26 @@ struct nvram_81xx {
* BIT 25 = Temp WWPN
* BIT 26-31 =
*/
- uint32_t host_p;
+ __le32 host_p;
uint8_t alternate_port_name[WWN_SIZE];
uint8_t alternate_node_name[WWN_SIZE];
uint8_t boot_port_name[WWN_SIZE];
- uint16_t boot_lun_number;
- uint16_t reserved_8;
+ __le16 boot_lun_number;
+ __le16 reserved_8;
uint8_t alt1_boot_port_name[WWN_SIZE];
- uint16_t alt1_boot_lun_number;
- uint16_t reserved_9;
+ __le16 alt1_boot_lun_number;
+ __le16 reserved_9;
uint8_t alt2_boot_port_name[WWN_SIZE];
- uint16_t alt2_boot_lun_number;
- uint16_t reserved_10;
+ __le16 alt2_boot_lun_number;
+ __le16 reserved_10;
uint8_t alt3_boot_port_name[WWN_SIZE];
- uint16_t alt3_boot_lun_number;
- uint16_t reserved_11;
+ __le16 alt3_boot_lun_number;
+ __le16 reserved_11;
/*
* BIT 0 = Selective Login
@@ -1986,35 +1986,35 @@ struct nvram_81xx {
* BIT 6 = Reserved
* BIT 7-31 =
*/
- uint32_t efi_parameters;
+ __le32 efi_parameters;
uint8_t reset_delay;
uint8_t reserved_12;
- uint16_t reserved_13;
+ __le16 reserved_13;
- uint16_t boot_id_number;
- uint16_t reserved_14;
+ __le16 boot_id_number;
+ __le16 reserved_14;
- uint16_t max_luns_per_target;
- uint16_t reserved_15;
+ __le16 max_luns_per_target;
+ __le16 reserved_15;
- uint16_t port_down_retry_count;
- uint16_t link_down_timeout;
+ __le16 port_down_retry_count;
+ __le16 link_down_timeout;
/* FCode parameters. */
- uint16_t fcode_parameter;
+ __le16 fcode_parameter;
- uint16_t reserved_16[3];
+ __le16 reserved_16[3];
/* Offset 352. */
uint8_t reserved_17[4];
- uint16_t reserved_18[5];
+ __le16 reserved_18[5];
uint8_t reserved_19[2];
- uint16_t reserved_20[8];
+ __le16 reserved_20[8];
/* Offset 384. */
uint8_t reserved_21[16];
- uint16_t reserved_22[3];
+ __le16 reserved_22[3];
/* Offset 406 (0x196) Enhanced Features
* BIT 0 = Extended BB credits for LR
@@ -2027,20 +2027,20 @@ struct nvram_81xx {
uint16_t reserved_24[4];
/* Offset 416. */
- uint16_t reserved_25[32];
+ __le16 reserved_25[32];
/* Offset 480. */
uint8_t model_name[16];
/* Offset 496. */
- uint16_t feature_mask_l;
- uint16_t feature_mask_h;
- uint16_t reserved_26[2];
+ __le16 feature_mask_l;
+ __le16 feature_mask_h;
+ __le16 reserved_26[2];
- uint16_t subsystem_vendor_id;
- uint16_t subsystem_device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_device_id;
- uint32_t checksum;
+ __le32 checksum;
};
/*
@@ -2049,31 +2049,31 @@ struct nvram_81xx {
*/
#define ICB_VERSION 1
struct init_cb_81xx {
- uint16_t version;
- uint16_t reserved_1;
+ __le16 version;
+ __le16 reserved_1;
- uint16_t frame_payload_size;
- uint16_t execution_throttle;
- uint16_t exchange_count;
+ __le16 frame_payload_size;
+ __le16 execution_throttle;
+ __le16 exchange_count;
- uint16_t reserved_2;
+ __le16 reserved_2;
uint8_t port_name[WWN_SIZE]; /* Big endian. */
uint8_t node_name[WWN_SIZE]; /* Big endian. */
- uint16_t response_q_inpointer;
- uint16_t request_q_outpointer;
+ __le16 response_q_inpointer;
+ __le16 request_q_outpointer;
- uint16_t login_retry_count;
+ __le16 login_retry_count;
- uint16_t prio_request_q_outpointer;
+ __le16 prio_request_q_outpointer;
- uint16_t response_q_length;
- uint16_t request_q_length;
+ __le16 response_q_length;
+ __le16 request_q_length;
- uint16_t reserved_3;
+ __le16 reserved_3;
- uint16_t prio_request_q_length;
+ __le16 prio_request_q_length;
__le64 request_q_address __packed;
__le64 response_q_address __packed;
@@ -2081,12 +2081,12 @@ struct init_cb_81xx {
uint8_t reserved_4[8];
- uint16_t atio_q_inpointer;
- uint16_t atio_q_length;
+ __le16 atio_q_inpointer;
+ __le16 atio_q_length;
__le64 atio_q_address __packed;
- uint16_t interrupt_delay_timer; /* 100us increments. */
- uint16_t login_timeout;
+ __le16 interrupt_delay_timer; /* 100us increments. */
+ __le16 login_timeout;
/*
* BIT 0-3 = Reserved
@@ -2099,7 +2099,7 @@ struct init_cb_81xx {
* BIT 14 = Node Name Option
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_1;
+ __le32 firmware_options_1;
/*
* BIT 0 = Operation Mode bit 0
@@ -2117,7 +2117,7 @@ struct init_cb_81xx {
* BIT 14 = Enable Target PRLI Control
* BIT 15-31 = Reserved
*/
- uint32_t firmware_options_2;
+ __le32 firmware_options_2;
/*
* BIT 0-3 = Reserved
@@ -2138,7 +2138,7 @@ struct init_cb_81xx {
* BIT 28 = SPMA selection bit 1
* BIT 30-31 = Reserved
*/
- uint32_t firmware_options_3;
+ __le32 firmware_options_3;
uint8_t reserved_5[8];
@@ -2216,9 +2216,9 @@ struct qla_fcp_prio_cfg {
#define FCP_PRIO_ATTR_ENABLE 0x1
#define FCP_PRIO_ATTR_PERSIST 0x2
uint8_t reserved; /* Reserved for future use */
-#define FCP_PRIO_CFG_HDR_SIZE 0x10
- struct qla_fcp_prio_entry entry[1]; /* fcp priority entries */
-#define FCP_PRIO_CFG_ENTRY_SIZE 0x20
+#define FCP_PRIO_CFG_HDR_SIZE offsetof(struct qla_fcp_prio_cfg, entry)
+ struct qla_fcp_prio_entry entry[1023]; /* fcp priority entries */
+ uint8_t reserved2[16];
};
#define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1b93f5b4d77d..061f91b521b3 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -173,6 +173,7 @@ extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
extern int ql2xexlogins;
extern int ql2xdifbundlinginternalbuffers;
+extern int ql2xfulldump_on_mpifail;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -636,15 +637,17 @@ extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_dbg.c source file.
*/
-extern void qla2100_fw_dump(scsi_qla_host_t *, int);
-extern void qla2300_fw_dump(scsi_qla_host_t *, int);
-extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
-extern void qla8044_fw_dump(scsi_qla_host_t *, int);
-
-extern void qla27xx_fwdump(scsi_qla_host_t *, int);
+void qla2xxx_dump_fw(scsi_qla_host_t *vha);
+void qla2100_fw_dump(scsi_qla_host_t *vha);
+void qla2300_fw_dump(scsi_qla_host_t *vha);
+void qla24xx_fw_dump(scsi_qla_host_t *vha);
+void qla25xx_fw_dump(scsi_qla_host_t *vha);
+void qla81xx_fw_dump(scsi_qla_host_t *vha);
+void qla82xx_fw_dump(scsi_qla_host_t *vha);
+void qla8044_fw_dump(scsi_qla_host_t *vha);
+
+void qla27xx_fwdump(scsi_qla_host_t *vha);
+extern void qla27xx_mpi_fwdump(scsi_qla_host_t *, int);
extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *);
extern int qla27xx_fwdt_template_valid(void *);
extern ulong qla27xx_fwdt_template_size(void *);
@@ -769,7 +772,7 @@ extern int qlafx00_fw_ready(scsi_qla_host_t *);
extern int qlafx00_configure_devices(scsi_qla_host_t *);
extern int qlafx00_reset_initialize(scsi_qla_host_t *);
extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t);
-extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
+extern void qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t,
uint32_t *, int);
extern uint32_t qlafx00_fw_state_show(struct device *,
@@ -871,7 +874,7 @@ extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
uint16_t *, uint16_t *);
/* 83xx related functions */
-extern void qla83xx_fw_dump(scsi_qla_host_t *, int);
+void qla83xx_fw_dump(scsi_qla_host_t *vha);
/* Minidump related functions */
extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
@@ -933,5 +936,6 @@ extern void qla24xx_process_purex_list(struct purex_list *);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
+void qla27xx_reset_mpi(scsi_qla_host_t *vha);
void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index caa6b840e459..4576d3ae9937 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -120,7 +120,7 @@ static void qla24xx_abort_iocb_timeout(void *data)
if (sp->cmd_sp)
sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
- abt->u.abt.comp_status = CS_TIMEOUT;
+ abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
sp->done(sp, QLA_OS_TIMER_EXPIRED);
}
@@ -992,7 +992,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
ql_dbg(ql_dbg_disc, vha, 0x20e8,
"%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
- __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
+ __func__, &wwn, e->port_id[2], e->port_id[1],
e->port_id[0], e->current_login_state, e->last_login_state,
(loop_id & 0x7fff));
}
@@ -1343,7 +1343,7 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
mb[9] = vha->vp_idx;
mb[10] = opt;
- mbx->u.mbx.in = (void *)pd;
+ mbx->u.mbx.in = pd;
mbx->u.mbx.in_dma = pd_dma;
sp->done = qla24xx_async_gpdb_sp_done;
@@ -1791,7 +1791,7 @@ qla2x00_tmf_iocb_timeout(void *data)
}
}
spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
- tmf->u.tmf.comp_status = CS_TIMEOUT;
+ tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
tmf->u.tmf.data = QLA_FUNCTION_FAILED;
complete(&tmf->u.tmf.comp);
}
@@ -2219,7 +2219,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
/* Check for secure flash support */
if (IS_QLA28XX(ha)) {
- if (RD_REG_DWORD(&reg->mailbox12) & BIT_0)
+ if (rd_reg_word(&reg->mailbox12) & BIT_0)
ha->flags.secure_adapter = 1;
ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
(ha->flags.secure_adapter) ? "Yes" : "No");
@@ -2357,7 +2357,7 @@ qla2100_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_word(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2399,17 +2399,17 @@ qla2300_pci_config(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
break;
udelay(10);
}
/* Select FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
- RD_REG_WORD(&reg->ctrl_status);
+ wrt_reg_word(&reg->ctrl_status, 0x20);
+ rd_reg_word(&reg->ctrl_status);
/* Get the fb rev level */
ha->fb_rev = RD_FB_CMD_REG(ha, reg);
@@ -2418,13 +2418,13 @@ qla2300_pci_config(scsi_qla_host_t *vha)
pci_clear_mwi(ha->pdev);
/* Deselect FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x0);
- RD_REG_WORD(&reg->ctrl_status);
+ wrt_reg_word(&reg->ctrl_status, 0x0);
+ rd_reg_word(&reg->ctrl_status);
/* Release RISC module. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
break;
udelay(10);
@@ -2439,7 +2439,7 @@ qla2300_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_WORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_word(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2483,7 +2483,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
/* Get PCI bus information. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- ha->pci_attr = RD_REG_DWORD(&reg->ctrl_status);
+ ha->pci_attr = rd_reg_dword(&reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -2587,36 +2587,36 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
if (!IS_QLA2100(ha)) {
/* Pause RISC. */
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) &
+ if ((rd_reg_word(&reg->hccr) &
HCCR_RISC_PAUSE) != 0)
break;
udelay(100);
}
} else {
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
udelay(10);
}
/* Select FPM registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x20);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0x20);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* FPM Soft Reset. */
- WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
- RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
+ wrt_reg_word(&reg->fpm_diag_config, 0x100);
+ rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
/* Toggle Fpm Reset. */
if (!IS_QLA2200(ha)) {
- WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
- RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
+ wrt_reg_word(&reg->fpm_diag_config, 0x0);
+ rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
}
/* Select frame buffer registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0x10);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0x10);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* Reset frame buffer FIFOs. */
if (IS_QLA2200(ha)) {
@@ -2634,23 +2634,23 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
}
/* Select RISC module registers. */
- WRT_REG_WORD(&reg->ctrl_status, 0);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, 0);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
/* Release RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_HOST_INT);
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
/* Wait for RISC to recover from reset. */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
@@ -2661,7 +2661,7 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
*/
udelay(20);
for (cnt = 30000; cnt; cnt--) {
- if ((RD_REG_WORD(&reg->ctrl_status) &
+ if ((rd_reg_word(&reg->ctrl_status) &
CSR_ISP_SOFT_RESET) == 0)
break;
udelay(100);
@@ -2670,13 +2670,13 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
udelay(10);
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
- WRT_REG_WORD(&reg->semaphore, 0);
+ wrt_reg_word(&reg->semaphore, 0);
/* Release RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
@@ -2694,8 +2694,8 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
/* Disable RISC pause on FPM parity error. */
if (!IS_QLA2100(ha)) {
- WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2740,32 +2740,32 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Reset RISC. */
- WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
+ wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
+ if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
break;
udelay(10);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
"HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->ctrl_status),
- (RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_dword(&reg->ctrl_status),
+ (rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
- WRT_REG_DWORD(&reg->ctrl_status,
+ wrt_reg_dword(&reg->ctrl_status,
CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
udelay(100);
/* Wait for firmware to complete NVRAM accesses. */
- RD_REG_WORD(&reg->mailbox0);
- for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rd_reg_word(&reg->mailbox0);
+ for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
@@ -2779,26 +2779,26 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
"HCCR: 0x%x, MailBox0 Status 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->mailbox0));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_word(&reg->mailbox0));
/* Wait for soft-reset to complete. */
- RD_REG_DWORD(&reg->ctrl_status);
+ rd_reg_dword(&reg->ctrl_status);
for (cnt = 0; cnt < 60; cnt++) {
barrier();
- if ((RD_REG_DWORD(&reg->ctrl_status) &
+ if ((rd_reg_dword(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
break;
udelay(5);
}
- if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+ if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
"HCCR: 0x%x, Soft Reset status: 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_DWORD(&reg->ctrl_status));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_dword(&reg->ctrl_status));
/* If required, do an MPI FW reset now */
if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
@@ -2817,17 +2817,17 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
}
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
- RD_REG_WORD(&reg->mailbox0);
- for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ rd_reg_word(&reg->mailbox0);
+ for (cnt = 60; rd_reg_word(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
@@ -2840,8 +2840,8 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
"Host Risc 0x%x, mailbox0 0x%x\n",
- RD_REG_DWORD(&reg->hccr),
- RD_REG_WORD(&reg->mailbox0));
+ rd_reg_dword(&reg->hccr),
+ rd_reg_word(&reg->mailbox0));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2860,9 +2860,8 @@ qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
{
struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
- WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
- *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
-
+ wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ *data = rd_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
}
static void
@@ -2870,8 +2869,8 @@ qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
{
struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
- WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
- WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
+ wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ wrt_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
}
static void
@@ -2887,7 +2886,7 @@ qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
vha->hw->pdev->subsystem_device != 0x0240)
return;
- WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
+ wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
udelay(100);
attempt:
@@ -2989,7 +2988,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
/*
* We need to have a delay here since the card will not respond while
@@ -2999,7 +2998,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
data = qla2x00_debounce_register(&reg->ctrl_status);
for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
udelay(5);
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
barrier();
}
@@ -3010,8 +3009,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
"Reset register cleared by chip reset.\n");
/* Reset RISC processor. */
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
/* Workaround for QLA2312 PCI parity error */
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
@@ -3339,6 +3338,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
dump_size / 1024);
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->mpi_fw_dump = (char *)fw_dump +
+ ha->fwdt[1].dump_size;
mutex_unlock(&ha->optrom_mutex);
return;
}
@@ -3650,8 +3651,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
/* Disable SRAM, Instruction RAM and GP RAM parity. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
+ rd_reg_word(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -3758,11 +3759,11 @@ enable_82xx_npiv:
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2300(ha))
/* SRAM parity */
- WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
+ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
else
/* SRAM, Instruction RAM and GP RAM parity */
- WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
+ rd_reg_word(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -4006,11 +4007,11 @@ qla2x00_config_rings(struct scsi_qla_host *vha)
put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
- WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
- WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
- WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
- RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
+ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
+ wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
+ wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
+ wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
+ rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
}
void
@@ -4072,15 +4073,15 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
}
icb->firmware_options_2 |= cpu_to_le32(BIT_23);
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
- WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
+ wrt_reg_dword(&reg->isp25mq.req_q_in, 0);
+ wrt_reg_dword(&reg->isp25mq.req_q_out, 0);
+ wrt_reg_dword(&reg->isp25mq.rsp_q_in, 0);
+ wrt_reg_dword(&reg->isp25mq.rsp_q_out, 0);
} else {
- WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
- WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
+ wrt_reg_dword(&reg->isp24.req_q_in, 0);
+ wrt_reg_dword(&reg->isp24.req_q_out, 0);
+ wrt_reg_dword(&reg->isp24.rsp_q_in, 0);
+ wrt_reg_dword(&reg->isp24.rsp_q_out, 0);
}
qlt_24xx_config_rings(vha);
@@ -4090,11 +4091,11 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
ql_dbg(ql_dbg_init, vha, 0x00fd,
"Speed set by user : %s Gbps \n",
qla2x00_get_link_speed_str(ha, ha->set_data_rate));
- icb->firmware_options_3 = (ha->set_data_rate << 13);
+ icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
}
/* PCI posting */
- RD_REG_DWORD(&ioreg->hccr);
+ rd_reg_word(&ioreg->hccr);
}
/**
@@ -4125,7 +4126,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req || !test_bit(que, ha->req_qid_map))
continue;
- req->out_ptr = (void *)(req->ring + req->length);
+ req->out_ptr = (uint16_t *)(req->ring + req->length);
*req->out_ptr = 0;
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
@@ -4142,7 +4143,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
rsp = ha->rsp_q_map[que];
if (!rsp || !test_bit(que, ha->rsp_qid_map))
continue;
- rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
*rsp->in_ptr = 0;
/* Initialize response queue entries */
if (IS_QLAFX00(ha))
@@ -4181,12 +4182,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
mid_init_cb->init_cb.execution_throttle =
cpu_to_le16(ha->cur_fw_xcb_count);
ha->flags.dport_enabled =
- (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
+ (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
+ BIT_7) != 0;
ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
(ha->flags.dport_enabled) ? "enabled" : "disabled");
/* FA-WWPN Status */
ha->flags.fawwpn_enabled =
- (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
+ (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
+ BIT_6) != 0;
ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
}
@@ -4565,7 +4568,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
ha->nvram_size = sizeof(*nv);
ha->nvram_base = 0;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
- if ((RD_REG_WORD(&reg->ctrl_status) >> 14) == 1)
+ if ((rd_reg_word(&reg->ctrl_status) >> 14) == 1)
ha->nvram_base = 0x80;
/* Get NVRAM data and calculate checksum. */
@@ -5079,6 +5082,54 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
return (rval);
}
+static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ fc_port_t *fcport;
+ int rval;
+
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+ /* borrowing */
+ u32 *bp, sz;
+
+ memset(ha->init_cb, 0, ha->init_cb_size);
+ sz = min_t(int, sizeof(struct els_plogi_payload),
+ ha->init_cb_size);
+ rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ ha->init_cb, sz);
+ if (rval == QLA_SUCCESS) {
+ __be32 *q = &ha->plogi_els_payld.data[0];
+
+ bp = (uint32_t *)ha->init_cb;
+ cpu_to_be32_array(q, bp, sz / 4);
+ memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
+ } else {
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "PLOGI ELS param read fail.\n");
+ goto skip_login;
+ }
+ }
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->n2n_flag) {
+ qla24xx_fcport_handle_login(vha, fcport);
+ return QLA_SUCCESS;
+ }
+ }
+
+skip_login:
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ }
+ return QLA_FUNCTION_FAILED;
+}
+
/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
@@ -5096,7 +5147,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
int found_devs;
int found;
fc_port_t *fcport, *new_fcport;
-
uint16_t index;
uint16_t entries;
struct gid_list_info *gid;
@@ -5106,47 +5156,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
unsigned long flags;
/* Inititae N2N login. */
- if (N2N_TOPO(ha)) {
- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
- /* borrowing */
- u32 *bp, sz;
-
- memset(ha->init_cb, 0, ha->init_cb_size);
- sz = min_t(int, sizeof(struct els_plogi_payload),
- ha->init_cb_size);
- rval = qla24xx_get_port_login_templ(vha,
- ha->init_cb_dma, (void *)ha->init_cb, sz);
- if (rval == QLA_SUCCESS) {
- __be32 *q = &ha->plogi_els_payld.data[0];
-
- bp = (uint32_t *)ha->init_cb;
- cpu_to_be32_array(q, bp, sz / 4);
-
- memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
- } else {
- ql_dbg(ql_dbg_init, vha, 0x00d1,
- "PLOGI ELS param read fail.\n");
- goto skip_login;
- }
- }
-
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->n2n_flag) {
- qla24xx_fcport_handle_login(vha, fcport);
- return QLA_SUCCESS;
- }
- }
-skip_login:
- spin_lock_irqsave(&vha->work_lock, flags);
- vha->scan.scan_retry++;
- spin_unlock_irqrestore(&vha->work_lock, flags);
-
- if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- }
- return QLA_FUNCTION_FAILED;
- }
+ if (N2N_TOPO(ha))
+ return qla2x00_configure_n2n_loop(vha);
found_devs = 0;
new_fcport = NULL;
@@ -7078,10 +7089,10 @@ qla2x00_reset_adapter(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
- WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
- RD_REG_WORD(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
+ wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
+ rd_reg_word(&reg->hccr); /* PCI Posting. */
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -7102,10 +7113,10 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
- RD_REG_DWORD(&reg->hccr);
- WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
- RD_REG_DWORD(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
+ rd_reg_dword(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
+ rd_reg_dword(&reg->hccr);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (IS_NOPOLLING_TYPE(ha))
@@ -7143,7 +7154,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
int rval;
struct init_cb_24xx *icb;
struct nvram_24xx *nv;
- uint32_t *dptr;
+ __le32 *dptr;
uint8_t *dptr1, *dptr2;
uint32_t chksum;
uint16_t cnt;
@@ -7171,7 +7182,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
/* Get NVRAM data into cache and calculate checksum. */
- dptr = (uint32_t *)nv;
+ dptr = (__force __le32 *)nv;
ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr);
@@ -7199,7 +7210,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = cpu_to_le16(ICB_VERSION);
nv->version = cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = 2048;
+ nv->frame_payload_size = cpu_to_le16(2048);
nv->execution_throttle = cpu_to_le16(0xFFFF);
nv->exchange_count = cpu_to_le16(0);
nv->hard_address = cpu_to_le16(124);
@@ -7367,7 +7378,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
ha->login_retry_count = ql2xloginretrycount;
/* N2N: driver will initiate Login instead of FW */
- icb->firmware_options_3 |= BIT_8;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_8);
/* Enable ZIO. */
if (!vha->flags.init_done) {
@@ -7435,7 +7446,7 @@ qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
static ulong
qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
{
- uint32_t *p = (void *)image_status;
+ __le32 *p = (__force __le32 *)image_status;
uint n = sizeof(*image_status) / sizeof(*p);
uint32_t sum = 0;
@@ -7498,7 +7509,7 @@ qla28xx_get_aux_images(
goto check_sec_image;
}
- qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status,
+ qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
ha->flt_region_aux_img_status_pri,
sizeof(pri_aux_image_status) >> 2);
qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
@@ -7531,7 +7542,7 @@ check_sec_image:
goto check_valid_image;
}
- qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status,
+ qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
ha->flt_region_aux_img_status_sec,
sizeof(sec_aux_image_status) >> 2);
qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
@@ -7596,7 +7607,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
goto check_sec_image;
}
- if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
+ if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
QLA_SUCCESS) {
WARN_ON_ONCE(true);
@@ -7703,7 +7714,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
ql_dbg(ql_dbg_init, vha, 0x008b,
"FW: Loading firmware from flash (%x).\n", faddr);
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 8);
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x008c,
@@ -7716,18 +7727,18 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return QLA_FUNCTION_FAILED;
}
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
segments = FA_RISC_CODE_SEGMENTS;
for (j = 0; j < segments; j++) {
ql_dbg(ql_dbg_init, vha, 0x008d,
"-> Loading segment %u...\n", j);
qla24xx_read_flash_data(vha, dcode, faddr, 10);
- risc_addr = be32_to_cpu(dcode[2]);
- risc_size = be32_to_cpu(dcode[3]);
+ risc_addr = be32_to_cpu((__force __be32)dcode[2]);
+ risc_size = be32_to_cpu((__force __be32)dcode[3]);
if (!*srisc_addr) {
*srisc_addr = risc_addr;
- risc_attr = be32_to_cpu(dcode[9]);
+ risc_attr = be32_to_cpu((__force __be32)dcode[9]);
}
dlen = ha->fw_transfer_size >> 2;
@@ -7767,9 +7778,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
fwdt->template = NULL;
fwdt->length = 0;
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
qla24xx_read_flash_data(vha, dcode, faddr, 7);
- risc_size = be32_to_cpu(dcode[2]);
+ risc_size = be32_to_cpu((__force __be32)dcode[2]);
ql_dbg(ql_dbg_init, vha, 0x0161,
"-> fwdt%u template array at %#x (%#x dwords)\n",
j, faddr, risc_size);
@@ -7838,7 +7849,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
int rval;
int i, fragment;
- uint16_t *wcode, *fwcode;
+ uint16_t *wcode;
+ __be16 *fwcode;
uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
struct fw_blob *blob;
struct qla_hw_data *ha = vha->hw;
@@ -7858,7 +7870,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
wcode = (uint16_t *)req->ring;
*srisc_addr = 0;
- fwcode = (uint16_t *)blob->fw->data;
+ fwcode = (__force __be16 *)blob->fw->data;
fwclen = 0;
/* Validate firmware image by checking version. */
@@ -7906,7 +7918,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
"words 0x%x.\n", risc_addr, wlen);
for (i = 0; i < wlen; i++)
- wcode[i] = swab16(fwcode[i]);
+ wcode[i] = swab16((__force u32)fwcode[i]);
rval = qla2x00_load_ram(vha, req->dma, risc_addr,
wlen);
@@ -7943,7 +7955,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
ulong i;
uint j;
struct fw_blob *blob;
- uint32_t *fwcode;
+ __be32 *fwcode;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct fwdt *fwdt = ha->fwdt;
@@ -7959,8 +7971,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED;
}
- fwcode = (void *)blob->fw->data;
- dcode = fwcode;
+ fwcode = (__force __be32 *)blob->fw->data;
+ dcode = (__force uint32_t *)fwcode;
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_fatal, vha, 0x0093,
"Unable to verify integrity of firmware image (%zd).\n",
@@ -7971,7 +7983,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED;
}
- dcode = (void *)req->ring;
+ dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
segments = FA_RISC_CODE_SEGMENTS;
for (j = 0; j < segments; j++) {
@@ -7997,7 +8009,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dlen);
for (i = 0; i < dlen; i++)
- dcode[i] = swab32(fwcode[i]);
+ dcode[i] = swab32((__force u32)fwcode[i]);
rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
if (rval) {
@@ -8051,7 +8063,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dcode = fwdt->template;
for (i = 0; i < risc_size; i++)
- dcode[i] = fwcode[i];
+ dcode[i] = (__force u32)fwcode[i];
if (!qla27xx_fwdt_template_valid(dcode)) {
ql_log(ql_log_warn, vha, 0x0175,
@@ -8322,7 +8334,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
int rval;
struct init_cb_81xx *icb;
struct nvram_81xx *nv;
- uint32_t *dptr;
+ __le32 *dptr;
uint8_t *dptr1, *dptr2;
uint32_t chksum;
uint16_t cnt;
@@ -8369,7 +8381,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
"primary" : "secondary");
ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
- dptr = (uint32_t *)nv;
+ dptr = (__force __le32 *)nv;
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
chksum += le32_to_cpu(*dptr);
@@ -8396,7 +8408,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
memset(nv, 0, ha->nvram_size);
nv->nvram_version = cpu_to_le16(ICB_VERSION);
nv->version = cpu_to_le16(ICB_VERSION);
- nv->frame_payload_size = 2048;
+ nv->frame_payload_size = cpu_to_le16(2048);
nv->execution_throttle = cpu_to_le16(0xFFFF);
nv->exchange_count = cpu_to_le16(0);
nv->port_name[0] = 0x21;
@@ -8440,7 +8452,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
if (IS_T10_PI_CAPABLE(ha))
- nv->frame_payload_size &= ~7;
+ nv->frame_payload_size &= cpu_to_le16(~7);
qlt_81xx_config_nvram_stage1(vha, nv);
@@ -8603,10 +8615,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
/* enable RIDA Format2 */
- icb->firmware_options_3 |= BIT_0;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_0);
/* N2N: driver will initiate Login instead of FW */
- icb->firmware_options_3 |= BIT_8;
+ icb->firmware_options_3 |= cpu_to_le32(BIT_8);
/* Determine NVMe/FCP priority for target ports */
ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 364b3db8b2dc..1fb6ccac07cc 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -40,16 +40,16 @@ qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
* register value.
*/
static __inline__ uint16_t
-qla2x00_debounce_register(volatile uint16_t __iomem *addr)
+qla2x00_debounce_register(volatile __le16 __iomem *addr)
{
volatile uint16_t first;
volatile uint16_t second;
do {
- first = RD_REG_WORD(addr);
+ first = rd_reg_word(addr);
barrier();
cpu_relax();
- second = RD_REG_WORD(addr);
+ second = rd_reg_word(addr);
} while (first != second);
return (first);
@@ -329,7 +329,7 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair)
} else
req->ring_ptr++;
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
}
static inline int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 182bd68c79ac..8865c35d3421 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -376,7 +376,7 @@ qla2x00_start_scsi(srb_t *sp)
/* Calculate the number of request entries needed. */
req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
+ cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -428,8 +428,8 @@ qla2x00_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
+ wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
+ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
@@ -472,21 +472,21 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
/* Set chip new ring index. */
if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
} else if (IS_QLA83XX(ha)) {
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
} else if (IS_QLAFX00(ha)) {
- WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
+ wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
} else if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
+ rd_reg_dword_relaxed(&reg->isp24.req_q_in);
} else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
}
}
}
@@ -661,7 +661,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
cur_dsd->address = 0;
cur_dsd->length = 0;
cur_dsd++;
- cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+ cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
return 0;
}
@@ -755,8 +755,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
}
struct fw_dif_context {
- uint32_t ref_tag;
- uint16_t app_tag;
+ __le32 ref_tag;
+ __le16 app_tag;
uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
};
@@ -1389,7 +1389,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
{
struct dsd64 *cur_dsd;
- uint32_t *fcp_dl;
+ __be32 *fcp_dl;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
uint32_t total_bytes = 0;
@@ -1456,7 +1456,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
&crc_ctx_pkt->ref_tag, tot_prot_dsds);
put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
- cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+ cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
/* Determine SCSI command length -- align to 4 byte boundary */
if (cmd->cmd_len > 16) {
@@ -1545,7 +1545,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
crc_ctx_pkt->guard_seed = cpu_to_le16(0);
/* Fibre channel byte count */
cmd_pkt->byte_count = cpu_to_le32(total_bytes);
- fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
+ fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
additional_fcpcdb_len);
*fcp_dl = htonl(total_bytes);
@@ -1637,7 +1637,7 @@ qla24xx_start_scsi(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1698,7 +1698,7 @@ qla24xx_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -1822,7 +1822,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
tot_dsds += nseg;
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -1881,7 +1881,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
req->ring_ptr++;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1957,7 +1957,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2018,7 +2018,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_SUCCESS;
@@ -2157,7 +2157,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
tot_dsds += nseg;
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
@@ -2214,7 +2214,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
req->ring_ptr++;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
@@ -2266,13 +2266,13 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
cnt = *req->out_ptr;
else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
IS_QLA28XX(ha))
- cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+ cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
- cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
+ cnt = rd_reg_dword(reg->isp82.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
- cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+ cnt = rd_reg_dword(&reg->isp24.req_q_out);
else if (IS_QLAFX00(ha))
- cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
+ cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
else
cnt = qla2x00_debounce_register(
ISP_REQ_Q_OUT(ha, &reg->isp));
@@ -2305,8 +2305,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
pkt = req->ring_ptr;
memset(pkt, 0, REQUEST_ENTRY_SIZE);
if (IS_QLAFX00(ha)) {
- WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
- WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
+ wrt_reg_byte((void __iomem *)&pkt->entry_count, req_cnt);
+ wrt_reg_word((void __iomem *)&pkt->handle, handle);
} else {
pkt->entry_count = req_cnt;
pkt->handle = handle;
@@ -2344,9 +2344,10 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
- logio->control_flags |= LCF_NVME_PRLI;
+ logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
if (sp->vha->flags.nvme_first_burst)
- logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
+ logio->io_parameter[0] =
+ cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
}
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
@@ -2680,7 +2681,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->entry_status = 0;
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- els_iocb->tx_dsd_count = 1;
+ els_iocb->tx_dsd_count = cpu_to_le16(1);
els_iocb->vp_index = vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = 0;
@@ -2700,7 +2701,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
&els_iocb->tx_address);
- els_iocb->rx_dsd_count = 1;
+ els_iocb->rx_dsd_count = cpu_to_le16(1);
els_iocb->rx_byte_count = els_iocb->rx_len =
cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
@@ -2712,7 +2713,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
(uint8_t *)els_iocb,
sizeof(*els_iocb));
} else {
- els_iocb->control_flags = 1 << 13;
+ els_iocb->control_flags = cpu_to_le16(1 << 13);
els_iocb->tx_byte_count =
cpu_to_le32(sizeof(struct els_logo_payload));
put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
@@ -2787,7 +2788,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
struct qla_work_evt *e;
struct fc_port *conflict_fcport;
port_id_t cid; /* conflict Nport id */
- u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
+ const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
u16 lid;
ql_dbg(ql_dbg_disc, vha, 0x3072,
@@ -2800,7 +2801,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
else {
- switch (fw_status[0]) {
+ switch (le32_to_cpu(fw_status[0])) {
case CS_DATA_UNDERRUN:
case CS_COMPLETE:
memset(&ea, 0, sizeof(ea));
@@ -2810,9 +2811,9 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
break;
case CS_IOCB_ERROR:
- switch (fw_status[1]) {
+ switch (le32_to_cpu(fw_status[1])) {
case LSC_SCODE_PORTID_USED:
- lid = fw_status[2] & 0xffff;
+ lid = le32_to_cpu(fw_status[2]) & 0xffff;
qlt_find_sess_invalidate_other(vha,
wwn_to_u64(fcport->port_name),
fcport->d_id, lid, &conflict_fcport);
@@ -2846,9 +2847,11 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
break;
case LSC_SCODE_NPORT_USED:
- cid.b.domain = (fw_status[2] >> 16) & 0xff;
- cid.b.area = (fw_status[2] >> 8) & 0xff;
- cid.b.al_pa = fw_status[2] & 0xff;
+ cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
+ & 0xff;
+ cid.b.area = (le32_to_cpu(fw_status[2]) >> 8)
+ & 0xff;
+ cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff;
cid.b.rsvd_1 = 0;
ql_dbg(ql_dbg_disc, vha, 0x20ec,
@@ -3022,7 +3025,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->sys_define = 0;
els_iocb->entry_status = 0;
els_iocb->handle = sp->handle;
- els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
els_iocb->vp_index = sp->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
@@ -3216,7 +3219,7 @@ qla82xx_start_scsi(srb_t *sp)
uint16_t tot_dsds;
struct device_reg_82xx __iomem *reg;
uint32_t dbval;
- uint32_t *fcp_dl;
+ __be32 *fcp_dl;
uint8_t additional_cdb_len;
struct ct6_dsd *ctx;
struct scsi_qla_host *vha = sp->vha;
@@ -3310,7 +3313,7 @@ sufficient_dsds:
req_cnt = 1;
if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ cnt = (uint16_t)rd_reg_dword_relaxed(
&reg->req_q_out[0]);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3398,7 +3401,7 @@ sufficient_dsds:
memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
- fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
additional_cdb_len);
*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
@@ -3419,7 +3422,7 @@ sufficient_dsds:
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ cnt = (uint16_t)rd_reg_dword_relaxed(
&reg->req_q_out[0]);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3495,10 +3498,10 @@ sufficient_dsds:
if (ql2xdbwr)
qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
else {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
}
}
@@ -3536,7 +3539,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle = cpu_to_le32(make_handle(req->id, sp->handle));
+ abt_iocb->handle = make_handle(req->id, sp->handle);
if (sp->fcport) {
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -3544,10 +3547,10 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
}
abt_iocb->handle_to_abort =
- cpu_to_le32(make_handle(aio->u.abt.req_que_no,
- aio->u.abt.cmd_hndl));
+ make_handle(le16_to_cpu(aio->u.abt.req_que_no),
+ aio->u.abt.cmd_hndl);
abt_iocb->vp_index = vha->vp_idx;
- abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
+ abt_iocb->req_que_no = aio->u.abt.req_que_no;
/* Send the command to the firmware */
wmb();
}
@@ -3562,7 +3565,7 @@ qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
for (i = 0; i < sz; i++)
- mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
+ mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
}
static void
@@ -3586,7 +3589,7 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
@@ -3604,32 +3607,29 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
/*
* Build NVME LS request
*/
-static int
+static void
qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
{
struct srb_iocb *nvme;
- int rval = QLA_SUCCESS;
nvme = &sp->u.iocb_cmd;
cmd_pkt->entry_type = PT_LS4_REQUEST;
cmd_pkt->entry_count = 1;
- cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
+ cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
- cmd_pkt->tx_dseg_count = 1;
- cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
- cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
+ cmd_pkt->tx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
+ cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
- cmd_pkt->rx_dseg_count = 1;
- cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
- cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
+ cmd_pkt->rx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
+ cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
-
- return rval;
}
static void
@@ -3894,7 +3894,7 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
else
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 8a78d395bbc8..cf0800546740 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -89,9 +89,9 @@ qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
/* terminate exchange */
rsp_els->entry_type = ELS_IOCB_TYPE;
rsp_els->entry_count = 1;
- rsp_els->nport_handle = ~0;
+ rsp_els->nport_handle = cpu_to_le16(~0);
rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
- rsp_els->control_flags = EPD_RX_XCHG;
+ rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
ql_dbg(ql_dbg_init, vha, 0x0283,
"Sending ELS Response to terminate exchange %#x...\n",
abts->rx_xch_addr_to_abort);
@@ -141,7 +141,7 @@ qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt)
abts_rsp->ox_id = abts->ox_id;
abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
- abts_rsp->payload.ba_acc.high_seq_cnt = ~0;
+ abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
ql_dbg(ql_dbg_init, vha, 0x028b,
"Sending BA ACC response to ABTS %#x...\n",
@@ -204,7 +204,7 @@ qla2100_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- hccr = RD_REG_WORD(&reg->hccr);
+ hccr = rd_reg_word(&reg->hccr);
if (qla2x00_check_reg16_for_disconnect(vha, hccr))
break;
if (hccr & HCCR_RISC_PAUSE) {
@@ -216,18 +216,18 @@ qla2100_intr_handler(int irq, void *dev_id)
* bit to be cleared. Schedule a big hammer to get
* out of the RISC PAUSED state.
*/
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
- } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
+ } else if ((rd_reg_word(&reg->istatus) & ISR_RISC_INT) == 0)
break;
- if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ if (rd_reg_word(&reg->semaphore) & BIT_0) {
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
/* Get mailbox data. */
mb[0] = RD_MAILBOX_REG(ha, reg, 0);
@@ -246,13 +246,13 @@ qla2100_intr_handler(int irq, void *dev_id)
mb[0]);
}
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
- RD_REG_WORD(&reg->semaphore);
+ wrt_reg_word(&reg->semaphore, 0);
+ rd_reg_word(&reg->semaphore);
} else {
qla2x00_process_response_queue(rsp);
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word(&reg->hccr);
}
}
qla2x00_handle_mbx_completion(ha, status);
@@ -324,14 +324,14 @@ qla2300_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSR_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_WORD(&reg->hccr);
+ hccr = rd_reg_word(&reg->hccr);
if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
ql_log(ql_log_warn, vha, 0x5026,
@@ -347,10 +347,10 @@ qla2300_intr_handler(int irq, void *dev_id)
* interrupt bit to be cleared. Schedule a big
* hammer to get out of the RISC PAUSED state.
*/
- WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
+ rd_reg_word(&reg->hccr);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSR_RISC_INT) == 0)
@@ -365,7 +365,7 @@ qla2300_intr_handler(int irq, void *dev_id)
status |= MBX_INTERRUPT;
/* Release mailbox registers. */
- WRT_REG_WORD(&reg->semaphore, 0);
+ wrt_reg_word(&reg->semaphore, 0);
break;
case 0x12:
mb[0] = MSW(stat);
@@ -393,8 +393,8 @@ qla2300_intr_handler(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
- WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
- RD_REG_WORD_RELAXED(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
+ rd_reg_word_relaxed(&reg->hccr);
}
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -412,7 +412,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
uint32_t mboxes;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -428,15 +428,15 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
mboxes >>= 1;
- wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
+ wptr = MAILBOX_REG(ha, reg, 1);
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
- wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
+ wptr = MAILBOX_REG(ha, reg, 8);
if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
else if (mboxes & BIT_0)
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
wptr++;
mboxes >>= 1;
@@ -451,19 +451,19 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
int rval;
struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
/* Seed data -- mailbox1 -> mailbox7. */
if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
- wptr = (uint16_t __iomem *)&reg24->mailbox1;
+ wptr = &reg24->mailbox1;
else if (IS_QLA8044(vha->hw))
- wptr = (uint16_t __iomem *)&reg82->mailbox_out[1];
+ wptr = &reg82->mailbox_out[1];
else
return;
for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
- mb[cnt] = RD_REG_WORD(wptr);
+ mb[cnt] = rd_reg_word(wptr);
ql_dbg(ql_dbg_async, vha, 0x5021,
"Inter-Driver Communication %s -- "
@@ -756,6 +756,39 @@ qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
return NULL;
}
+/* Shall be called only on supported adapters. */
+static void
+qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ bool reset_isp_needed = 0;
+
+ ql_log(ql_log_warn, vha, 0x02f0,
+ "MPI Heartbeat stop. MPI reset is%s needed. "
+ "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
+ mb[0] & BIT_8 ? "" : " not",
+ mb[0], mb[1], mb[2], mb[3]);
+
+ if ((mb[1] & BIT_8) == 0)
+ return;
+
+ ql_log(ql_log_warn, vha, 0x02f1,
+ "MPI Heartbeat stop. FW dump needed\n");
+
+ if (ql2xfulldump_on_mpifail) {
+ ha->isp_ops->fw_dump(vha);
+ reset_isp_needed = 1;
+ }
+
+ ha->isp_ops->mpi_fw_dump(vha, 1);
+
+ if (reset_isp_needed) {
+ vha->hw->flags.fw_init_done = 0;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @vha: SCSI driver HA context
@@ -785,7 +818,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
goto skip_rio;
switch (mb[0]) {
case MBA_SCSI_COMPLETION:
- handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
+ handles[0] = make_handle(mb[2], mb[1]);
handle_cnt = 1;
break;
case MBA_CMPLT_1_16BIT:
@@ -824,10 +857,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
mb[0] = MBA_SCSI_COMPLETION;
break;
case MBA_CMPLT_2_32BIT:
- handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
- handles[1] = le32_to_cpu(
- ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
- RD_MAILBOX_REG(ha, reg, 6));
+ handles[0] = make_handle(mb[2], mb[1]);
+ handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
+ RD_MAILBOX_REG(ha, reg, 6));
handle_cnt = 2;
mb[0] = MBA_SCSI_COMPLETION;
break;
@@ -858,10 +890,10 @@ skip_rio:
IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
u16 m[4];
- m[0] = RD_REG_WORD(&reg24->mailbox4);
- m[1] = RD_REG_WORD(&reg24->mailbox5);
- m[2] = RD_REG_WORD(&reg24->mailbox6);
- mbx = m[3] = RD_REG_WORD(&reg24->mailbox7);
+ m[0] = rd_reg_word(&reg24->mailbox4);
+ m[1] = rd_reg_word(&reg24->mailbox5);
+ m[2] = rd_reg_word(&reg24->mailbox6);
+ mbx = m[3] = rd_reg_word(&reg24->mailbox7);
ql_log(ql_log_warn, vha, 0x5003,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
@@ -871,10 +903,10 @@ skip_rio:
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
mb[1], mb[2], mb[3]);
- ha->fw_dump_mpi =
- (IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
- RD_REG_WORD(&reg24->mailbox7) & BIT_8;
- ha->isp_ops->fw_dump(vha, 1);
+ if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+ rd_reg_word(&reg24->mailbox7) & BIT_8)
+ ha->isp_ops->mpi_fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
ha->flags.fw_init_done = 0;
QLA_FW_STOPPED(ha);
@@ -979,8 +1011,8 @@ skip_rio:
ha->current_topology = 0;
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
- ? RD_REG_WORD(&reg24->mailbox4) : 0;
- mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
+ ? rd_reg_word(&reg24->mailbox4) : 0;
+ mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(&reg82->mailbox_out[4])
: mbx;
ql_log(ql_log_info, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
@@ -1347,7 +1379,7 @@ global_port_update:
break;
case MBA_IDC_NOTIFY:
if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ mb[4] = rd_reg_word(&reg24->mailbox4);
if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
(mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
(mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
@@ -1374,25 +1406,12 @@ global_port_update:
case MBA_IDC_AEN:
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- ha->flags.fw_init_done = 0;
- ql_log(ql_log_warn, vha, 0xffff,
- "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
- mb[0], mb[1], mb[2], mb[3]);
-
- if ((mb[1] & BIT_8) ||
- (mb[2] & BIT_8)) {
- ql_log(ql_log_warn, vha, 0xd013,
- "MPI Heartbeat stop. FW dump needed\n");
- ha->fw_dump_mpi = 1;
- ha->isp_ops->fw_dump(vha, 1);
- }
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ qla27xx_handle_8200_aen(vha, mb);
} else if (IS_QLA83XX(ha)) {
- mb[4] = RD_REG_WORD(&reg24->mailbox4);
- mb[5] = RD_REG_WORD(&reg24->mailbox5);
- mb[6] = RD_REG_WORD(&reg24->mailbox6);
- mb[7] = RD_REG_WORD(&reg24->mailbox7);
+ mb[4] = rd_reg_word(&reg24->mailbox4);
+ mb[5] = rd_reg_word(&reg24->mailbox5);
+ mb[6] = rd_reg_word(&reg24->mailbox6);
+ mb[7] = rd_reg_word(&reg24->mailbox7);
qla83xx_handle_8200_aen(vha, mb);
} else {
ql_dbg(ql_dbg_async, vha, 0x5052,
@@ -1646,7 +1665,7 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
for (i = 0; i < sz; i++)
- si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
+ si->u.mbx.in_mb[i] = pkt->mb[i];
res = (si->u.mbx.in_mb[0] & MBS_MASK);
@@ -1747,6 +1766,7 @@ static void
qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
struct sts_entry_24xx *pkt, int iocb_type)
{
+ struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
const char func[] = "ELS_CT_IOCB";
const char *type;
srb_t *sp;
@@ -1796,23 +1816,22 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
- fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
- fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
+ fw_status[1] = le32_to_cpu(ese->error_subcode_1);
+ fw_status[2] = le32_to_cpu(ese->error_subcode_2);
if (iocb_type == ELS_IOCB_TYPE) {
els = &sp->u.iocb_cmd;
- els->u.els_plogi.fw_status[0] = fw_status[0];
- els->u.els_plogi.fw_status[1] = fw_status[1];
- els->u.els_plogi.fw_status[2] = fw_status[2];
- els->u.els_plogi.comp_status = fw_status[0];
+ els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
+ els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
+ els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
+ els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
if (comp_status == CS_COMPLETE) {
res = DID_OK << 16;
} else {
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
- els->u.els_plogi.len =
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count);
+ els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
+ ese->total_byte_count));
} else {
els->u.els_plogi.len = 0;
res = DID_ERROR << 16;
@@ -1821,8 +1840,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count));
+ le32_to_cpu(ese->total_byte_count));
goto els_ct_done;
}
@@ -1838,23 +1856,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
bsg_reply->reply_payload_rcv_len =
- le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
+ le32_to_cpu(ese->total_byte_count);
ql_dbg(ql_dbg_user, vha, 0x503f,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
type, sp->handle, comp_status, fw_status[1], fw_status[2],
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count));
+ le32_to_cpu(ese->total_byte_count));
} else {
ql_dbg(ql_dbg_user, vha, 0x5040,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
type, sp->handle, comp_status,
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->error_subcode_1),
- le16_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->error_subcode_2));
+ le32_to_cpu(ese->error_subcode_1),
+ le32_to_cpu(ese->error_subcode_2));
res = DID_ERROR << 16;
bsg_reply->reply_payload_rcv_len = 0;
}
@@ -2062,7 +2077,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
uint16_t state_flags;
struct nvmefc_fcp_req *fd;
uint16_t ret = QLA_SUCCESS;
- uint16_t comp_status = le16_to_cpu(sts->comp_status);
+ __le16 comp_status = sts->comp_status;
int logit = 0;
iocb = &sp->u.iocb_cmd;
@@ -2093,7 +2108,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
} else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
(SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
/* Response already DMA'd to fd->rspaddr. */
- iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
} else if ((state_flags & SF_FCP_RSP_DMA)) {
/*
* Non-zero value in first 12 bytes of NVMe_RSP IU, treat this
@@ -2110,8 +2125,8 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
inbuf = (uint32_t *)&sts->nvme_ersp_data;
outbuf = (uint32_t *)fd->rspaddr;
- iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
- if (unlikely(iocb->u.nvme.rsp_pyld_len >
+ iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
+ if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
sizeof(struct nvme_fc_ersp_iu))) {
if (ql_mask_match(ql_dbg_io)) {
WARN_ONCE(1, "Unexpected response payload length %u.\n",
@@ -2121,9 +2136,9 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
iocb->u.nvme.rsp_pyld_len);
}
iocb->u.nvme.rsp_pyld_len =
- sizeof(struct nvme_fc_ersp_iu);
+ cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
}
- iter = iocb->u.nvme.rsp_pyld_len >> 2;
+ iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
for (; iter; iter--)
*outbuf++ = swab32(*inbuf++);
}
@@ -2138,7 +2153,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
"Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
tgt_xfer_len, fd->transferred_length);
logit = 1;
- } else if (comp_status == CS_DATA_UNDERRUN) {
+ } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
/*
* Do not log if this is just an underflow and there
* is no data loss.
@@ -2158,7 +2173,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
* If transport error then Failure (HBA rejects request)
* otherwise transport will handle.
*/
- switch (comp_status) {
+ switch (le16_to_cpu(comp_status)) {
case CS_COMPLETE:
break;
@@ -2300,7 +2315,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
/* Adjust ring index */
- WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
+ wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
}
static inline void
@@ -2391,9 +2406,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
* For type 3: ref & app tag is all 'f's
* For type 0,1,2: app tag is all 'f's
*/
- if ((a_app_tag == T10_PI_APP_ESCAPE) &&
- ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
- (a_ref_tag == T10_PI_REF_ESCAPE))) {
+ if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
+ (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
+ a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
uint32_t blocks_done, resid;
sector_t lba_s = scsi_get_lba(cmd);
@@ -2751,6 +2766,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sense_len = par_sense_len = rsp_info_len = resid_len =
fw_resid_len = 0;
if (IS_FWI2_CAPABLE(ha)) {
+ u16 sts24_retry_delay = le16_to_cpu(sts24->retry_delay);
+
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le32_to_cpu(sts24->sense_len);
if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
@@ -2765,11 +2782,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
ox_id = le16_to_cpu(sts24->ox_id);
par_sense_len = sizeof(sts24->data);
/* Valid values of the retry delay timer are 0x1-0xffef */
- if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
- retry_delay = sts24->retry_delay & 0x3fff;
+ if (sts24_retry_delay > 0 && sts24_retry_delay < 0xfff1) {
+ retry_delay = sts24_retry_delay & 0x3fff;
ql_dbg(ql_dbg_io, sp->vha, 0x3033,
"%s: scope=%#x retry_delay=%#x\n", __func__,
- sts24->retry_delay >> 14, retry_delay);
+ sts24_retry_delay >> 14, retry_delay);
}
} else {
if (scsi_status & SS_SENSE_LEN_VALID)
@@ -3143,7 +3160,7 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
uint32_t mboxes;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -3159,11 +3176,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
mboxes >>= 1;
- wptr = (uint16_t __iomem *)&reg->mailbox1;
+ wptr = &reg->mailbox1;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0)
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
mboxes >>= 1;
wptr++;
@@ -3183,7 +3200,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
abt = &sp->u.iocb_cmd;
- abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
+ abt->u.abt.comp_status = pkt->nport_handle;
sp->done(sp, 0);
}
@@ -3340,9 +3357,9 @@ process_err:
if (IS_P3P_TYPE(ha)) {
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
- WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
+ wrt_reg_dword(&reg->rsp_q_out[0], rsp->ring_index);
} else {
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
}
}
@@ -3359,13 +3376,13 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
return;
rval = QLA_SUCCESS;
- WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
- RD_REG_DWORD(&reg->iobase_addr);
- WRT_REG_DWORD(&reg->iobase_window, 0x0001);
- for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+ wrt_reg_dword(&reg->iobase_addr, 0x7C00);
+ rd_reg_dword(&reg->iobase_addr);
+ wrt_reg_dword(&reg->iobase_window, 0x0001);
+ for (cnt = 10000; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt) {
- WRT_REG_DWORD(&reg->iobase_window, 0x0001);
+ wrt_reg_dword(&reg->iobase_window, 0x0001);
udelay(10);
} else
rval = QLA_FUNCTION_TIMEOUT;
@@ -3374,11 +3391,11 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
goto next_test;
rval = QLA_SUCCESS;
- WRT_REG_DWORD(&reg->iobase_window, 0x0003);
- for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
+ wrt_reg_dword(&reg->iobase_window, 0x0003);
+ for (cnt = 100; (rd_reg_dword(&reg->iobase_window) & BIT_0) == 0 &&
rval == QLA_SUCCESS; cnt--) {
if (cnt) {
- WRT_REG_DWORD(&reg->iobase_window, 0x0003);
+ wrt_reg_dword(&reg->iobase_window, 0x0003);
udelay(10);
} else
rval = QLA_FUNCTION_TIMEOUT;
@@ -3387,13 +3404,13 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
goto done;
next_test:
- if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
+ if (rd_reg_dword(&reg->iobase_c8) & BIT_3)
ql_log(ql_log_info, vha, 0x504c,
"Additional code -- 0x55AA.\n");
done:
- WRT_REG_DWORD(&reg->iobase_window, 0x0000);
- RD_REG_DWORD(&reg->iobase_window);
+ wrt_reg_dword(&reg->iobase_window, 0x0000);
+ rd_reg_dword(&reg->iobase_window);
}
/**
@@ -3437,14 +3454,14 @@ qla24xx_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_DWORD(&reg->hccr);
+ hccr = rd_reg_dword(&reg->hccr);
ql_log(ql_log_warn, vha, 0x504b,
"RISC paused -- HCCR=%x, Dumping firmware.\n",
@@ -3452,7 +3469,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla2xxx_check_risc_status(vha);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@@ -3469,9 +3486,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
break;
case INTR_ASYNC_EVENT:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox1);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ mb[1] = rd_reg_word(&reg->mailbox1);
+ mb[2] = rd_reg_word(&reg->mailbox2);
+ mb[3] = rd_reg_word(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb);
break;
case INTR_RSP_QUE_UPDATE:
@@ -3491,8 +3508,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat * 0xff);
break;
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD_RELAXED(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword_relaxed(&reg->hccr);
if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
ndelay(3500);
}
@@ -3531,8 +3548,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
if (!ha->flags.disable_msix_handshake) {
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
- RD_REG_DWORD_RELAXED(&reg->hccr);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
+ rd_reg_dword_relaxed(&reg->hccr);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3566,14 +3583,14 @@ qla24xx_msix_default(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
do {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (qla2x00_check_reg32_for_disconnect(vha, stat))
break;
if (stat & HSRX_RISC_PAUSED) {
if (unlikely(pci_channel_offline(ha->pdev)))
break;
- hccr = RD_REG_DWORD(&reg->hccr);
+ hccr = rd_reg_dword(&reg->hccr);
ql_log(ql_log_info, vha, 0x5050,
"RISC paused -- HCCR=%x, Dumping firmware.\n",
@@ -3581,7 +3598,7 @@ qla24xx_msix_default(int irq, void *dev_id)
qla2xxx_check_risc_status(vha);
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
} else if ((stat & HSRX_RISC_INT) == 0)
@@ -3598,9 +3615,9 @@ qla24xx_msix_default(int irq, void *dev_id)
break;
case INTR_ASYNC_EVENT:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox1);
- mb[2] = RD_REG_WORD(&reg->mailbox2);
- mb[3] = RD_REG_WORD(&reg->mailbox3);
+ mb[1] = rd_reg_word(&reg->mailbox1);
+ mb[2] = rd_reg_word(&reg->mailbox2);
+ mb[3] = rd_reg_word(&reg->mailbox3);
qla2x00_async_event(vha, rsp, mb);
break;
case INTR_RSP_QUE_UPDATE:
@@ -3620,7 +3637,7 @@ qla24xx_msix_default(int irq, void *dev_id)
"Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
} while (0);
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3671,7 +3688,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
reg = &ha->iobase->isp24;
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+ wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
queue_work(ha->wq, &qpair->q_work);
@@ -3932,7 +3949,7 @@ clear_risc_ints:
goto fail;
spin_lock_irq(&ha->hardware_lock);
- WRT_REG_WORD(&reg->isp.semaphore, 0);
+ wrt_reg_word(&reg->isp.semaphore, 0);
spin_unlock_irq(&ha->hardware_lock);
fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d6c991bd1bde..df31ee0d59b2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -106,7 +106,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint8_t io_lock_on;
uint16_t command = 0;
uint16_t *iptr;
- uint16_t __iomem *optr;
+ __le16 __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
@@ -208,11 +208,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Load mailbox registers. */
if (IS_P3P_TYPE(ha))
- optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
+ optr = &reg->isp82.mailbox_in[0];
else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
- optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
+ optr = &reg->isp24.mailbox0;
else
- optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
+ optr = MAILBOX_REG(ha, &reg->isp, 0);
iptr = mcp->mb;
command = mcp->mb[0];
@@ -222,12 +222,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Mailbox registers (OUT):\n");
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
- optr =
- (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
+ optr = MAILBOX_REG(ha, &reg->isp, 8);
if (mboxes & BIT_0) {
ql_dbg(ql_dbg_mbx, vha, 0x1112,
"mbox[%d]<-0x%04x\n", cnt, *iptr);
- WRT_REG_WORD(optr, *iptr);
+ wrt_reg_word(optr, *iptr);
}
mboxes >>= 1;
@@ -253,11 +252,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
if (IS_P3P_TYPE(ha))
- WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
else if (IS_FWI2_CAPABLE(ha))
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
- WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
@@ -300,7 +299,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Cmd=%x Polling Mode.\n", command);
if (IS_P3P_TYPE(ha)) {
- if (RD_REG_DWORD(&reg->isp82.hint) &
+ if (rd_reg_dword(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -311,11 +310,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
- WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
+ wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
} else if (IS_FWI2_CAPABLE(ha))
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
- WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
+ wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
@@ -413,14 +412,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
uint16_t w;
if (IS_FWI2_CAPABLE(ha)) {
- mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
- mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
- mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
- mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
- mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
- ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
- host_status = RD_REG_DWORD(&reg->isp24.host_status);
- hccr = RD_REG_DWORD(&reg->isp24.hccr);
+ mb[0] = rd_reg_word(&reg->isp24.mailbox0);
+ mb[1] = rd_reg_word(&reg->isp24.mailbox1);
+ mb[2] = rd_reg_word(&reg->isp24.mailbox2);
+ mb[3] = rd_reg_word(&reg->isp24.mailbox3);
+ mb[7] = rd_reg_word(&reg->isp24.mailbox7);
+ ictrl = rd_reg_dword(&reg->isp24.ictrl);
+ host_status = rd_reg_dword(&reg->isp24.host_status);
+ hccr = rd_reg_dword(&reg->isp24.hccr);
ql_log(ql_log_warn, vha, 0xd04c,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
@@ -430,7 +429,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
} else {
mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
- ictrl = RD_REG_WORD(&reg->isp.ictrl);
+ ictrl = rd_reg_word(&reg->isp.ictrl);
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
@@ -462,7 +461,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
* a dump
*/
if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
rval = QLA_FUNCTION_TIMEOUT;
}
}
@@ -573,15 +572,15 @@ mbx_done:
if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
ql_dbg(ql_dbg_mbx, vha, 0x1198,
"host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
- RD_REG_DWORD(&reg->isp24.host_status),
- RD_REG_DWORD(&reg->isp24.ictrl),
- RD_REG_DWORD(&reg->isp24.istatus));
+ rd_reg_dword(&reg->isp24.host_status),
+ rd_reg_dword(&reg->isp24.ictrl),
+ rd_reg_dword(&reg->isp24.istatus));
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1206,
"ctrl_status=%#x ictrl=%#x istatus=%#x\n",
- RD_REG_WORD(&reg->isp.ctrl_status),
- RD_REG_WORD(&reg->isp.ictrl),
- RD_REG_WORD(&reg->isp.istatus));
+ rd_reg_word(&reg->isp.ctrl_status),
+ rd_reg_word(&reg->isp.ictrl),
+ rd_reg_word(&reg->isp.istatus));
}
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
@@ -3038,7 +3037,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *iter = (void *)stats;
+ uint32_t *iter = (uint32_t *)stats;
ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
struct qla_hw_data *ha = vha->hw;
@@ -3097,7 +3096,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *iter = (void *)stats;
+ uint32_t *iter = (uint32_t *)stats;
ushort dwords = sizeof(*stats)/sizeof(*iter);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
@@ -3110,8 +3109,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
mc.mb[6] = MSW(MSD(stats_dma));
mc.mb[7] = LSW(MSD(stats_dma));
mc.mb[8] = dwords;
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
- mc.mb[10] = cpu_to_le16(options);
+ mc.mb[9] = vha->vp_idx;
+ mc.mb[10] = options;
rval = qla24xx_send_mb_cmd(vha, &mc);
@@ -3204,7 +3203,7 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx, vha, 0x1090,
"Failed to complete IOCB -- completion status (%x).\n",
le16_to_cpu(abt->nport_handle));
- if (abt->nport_handle == CS_IOCB_ERROR)
+ if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
rval = QLA_FUNCTION_PARAMETER_ERROR;
else
rval = QLA_FUNCTION_FAILED;
@@ -4427,9 +4426,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
- WRT_REG_DWORD(req->req_q_in, 0);
+ wrt_reg_dword(req->req_q_in, 0);
if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- WRT_REG_DWORD(req->req_q_out, 0);
+ wrt_reg_dword(req->req_q_out, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4498,9 +4497,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) {
- WRT_REG_DWORD(rsp->rsp_q_out, 0);
+ wrt_reg_dword(rsp->rsp_q_out, 0);
if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- WRT_REG_DWORD(rsp->rsp_q_in, 0);
+ wrt_reg_dword(rsp->rsp_q_in, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -4727,7 +4726,7 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
mbx_cmd_t *mcp = &mc;
int i;
int len;
- uint16_t *str;
+ __le16 *str;
struct qla_hw_data *ha = vha->hw;
if (!IS_P3P_TYPE(ha))
@@ -4736,14 +4735,14 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
"Entered %s.\n", __func__);
- str = (void *)version;
+ str = (__force __le16 *)version;
len = strlen(version);
mcp->mb[0] = MBC_SET_RNID_PARAMS;
mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
mcp->out_mb = MBX_1|MBX_0;
for (i = 4; i < 16 && len; i++, str++, len -= 2) {
- mcp->mb[i] = cpu_to_le16p(str);
+ mcp->mb[i] = le16_to_cpup(str);
mcp->out_mb |= 1<<i;
}
for (; i < 16; i++) {
@@ -4861,7 +4860,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
"Done %s.\n", __func__);
bp = (uint32_t *) buf;
for (i = 0; i < (bufsiz-4)/4; i++, bp++)
- *bp = le32_to_cpu(*bp);
+ *bp = le32_to_cpu((__force __le32)*bp);
}
return rval;
@@ -5411,18 +5410,18 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
/* Write the MBC data to the registers */
- WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
- WRT_REG_WORD(&reg->mailbox1, mb[0]);
- WRT_REG_WORD(&reg->mailbox2, mb[1]);
- WRT_REG_WORD(&reg->mailbox3, mb[2]);
- WRT_REG_WORD(&reg->mailbox4, mb[3]);
+ wrt_reg_word(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
+ wrt_reg_word(&reg->mailbox1, mb[0]);
+ wrt_reg_word(&reg->mailbox2, mb[1]);
+ wrt_reg_word(&reg->mailbox3, mb[2]);
+ wrt_reg_word(&reg->mailbox4, mb[3]);
- WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
+ wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
/* Poll for MBC interrupt */
for (timer = 6000000; timer; timer--) {
/* Check for pending interrupts. */
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
if (stat & HSRX_RISC_INT) {
stat &= 0xff;
@@ -5430,10 +5429,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
stat == 0x10 || stat == 0x11) {
set_bit(MBX_INTERRUPT,
&ha->mbx_cmd_flags);
- mb0 = RD_REG_WORD(&reg->mailbox0);
- WRT_REG_DWORD(&reg->hccr,
+ mb0 = rd_reg_word(&reg->mailbox0);
+ wrt_reg_dword(&reg->hccr,
HCCRX_CLR_RISC_INT);
- RD_REG_DWORD(&reg->hccr);
+ rd_reg_dword(&reg->hccr);
break;
}
}
@@ -6211,7 +6210,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1144,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
} else {
ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
}
@@ -6256,7 +6255,7 @@ qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
mcp->mb[4]);
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
} else {
if (subcode & BIT_5)
*sector_size = mcp->mb[1];
@@ -6470,13 +6469,13 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
memset(&mc, 0, sizeof(mc));
mc.mb[0] = MBC_GET_PORT_DATABASE;
- mc.mb[1] = cpu_to_le16(fcport->loop_id);
+ mc.mb[1] = fcport->loop_id;
mc.mb[2] = MSW(pd_dma);
mc.mb[3] = LSW(pd_dma);
mc.mb[6] = MSW(MSD(pd_dma));
mc.mb[7] = LSW(MSD(pd_dma));
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
- mc.mb[10] = cpu_to_le16((uint16_t)opt);
+ mc.mb[9] = vha->vp_idx;
+ mc.mb[10] = opt;
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
@@ -6587,7 +6586,7 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
mc.mb[6] = MSW(MSD(id_list_dma));
mc.mb[7] = LSW(MSD(id_list_dma));
mc.mb[8] = 0;
- mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[9] = vha->vp_idx;
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
@@ -6613,8 +6612,8 @@ int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
- mcp->mb[1] = cpu_to_le16(1);
- mcp->mb[2] = cpu_to_le16(value);
+ mcp->mb[1] = 1;
+ mcp->mb[2] = value;
mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -6639,7 +6638,7 @@ int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
memset(mcp->mb, 0, sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
- mcp->mb[1] = cpu_to_le16(0);
+ mcp->mb[1] = 0;
mcp->out_mb = MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index d82e92da529a..15efe2f04b86 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -770,7 +770,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->req_q_in = &reg->isp25mq.req_q_in;
req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
- req->out_ptr = (void *)(req->ring + req->length);
+ req->out_ptr = (uint16_t *)(req->ring + req->length);
mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
@@ -884,7 +884,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
reg = ISP_QUE_REG(ha, que_id);
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
- rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
mutex_unlock(&ha->mq_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index df99911b8bb9..a8fe4f725fa0 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -46,7 +46,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
uint8_t io_lock_on;
uint16_t command = 0;
uint32_t *iptr;
- uint32_t __iomem *optr;
+ __le32 __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
@@ -109,7 +109,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
- optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
+ optr = &reg->ispfx00.mailbox0;
iptr = mcp->mb;
command = mcp->mb[0];
@@ -117,7 +117,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0)
- WRT_REG_DWORD(optr, *iptr);
+ wrt_reg_dword(optr, *iptr);
mboxes >>= 1;
optr++;
@@ -676,14 +676,14 @@ qlafx00_config_rings(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
- WRT_REG_DWORD(&reg->req_q_in, 0);
- WRT_REG_DWORD(&reg->req_q_out, 0);
+ wrt_reg_dword(&reg->req_q_in, 0);
+ wrt_reg_dword(&reg->req_q_out, 0);
- WRT_REG_DWORD(&reg->rsp_q_in, 0);
- WRT_REG_DWORD(&reg->rsp_q_out, 0);
+ wrt_reg_dword(&reg->rsp_q_in, 0);
+ wrt_reg_dword(&reg->rsp_q_out, 0);
/* PCI posting */
- RD_REG_DWORD(&reg->rsp_q_out);
+ rd_reg_dword(&reg->rsp_q_out);
}
char *
@@ -912,9 +912,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
/* 30 seconds wait - Adjust if required */
wait_time = 30;
- pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
+ pseudo_aen = rd_reg_dword(&reg->pseudoaen);
if (pseudo_aen == 1) {
- aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ aenmbx7 = rd_reg_dword(&reg->initval7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
rval = qlafx00_driver_shutdown(vha, 10);
@@ -925,7 +925,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
/* wait time before firmware ready */
wtime = jiffies + (wait_time * HZ);
do {
- aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
+ aenmbx = rd_reg_dword(&reg->aenmailbox0);
barrier();
ql_dbg(ql_dbg_mbx, vha, 0x0133,
"aenmbx: 0x%x\n", aenmbx);
@@ -944,15 +944,15 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
case MBA_FW_RESTART_CMPLT:
/* Set the mbx and rqstq intr code */
- aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ aenmbx7 = rd_reg_dword(&reg->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
- ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
- WRT_REG_DWORD(&reg->aenmailbox0, 0);
- RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
+ ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
+ ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
+ ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
+ ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
+ wrt_reg_dword(&reg->aenmailbox0, 0);
+ rd_reg_dword_relaxed(&reg->aenmailbox0);
ql_dbg(ql_dbg_init, vha, 0x0134,
"f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n",
@@ -982,13 +982,13 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
* 3. issue Get FW State Mbox cmd to determine fw state
* Set the mbx and rqstq intr code from Shadow Regs
*/
- aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ aenmbx7 = rd_reg_dword(&reg->initval7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->initval1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
- ha->req_que_len = RD_REG_DWORD(&reg->initval5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
+ ha->req_que_off = rd_reg_dword(&reg->initval1);
+ ha->rsp_que_off = rd_reg_dword(&reg->initval3);
+ ha->req_que_len = rd_reg_dword(&reg->initval5);
+ ha->rsp_que_len = rd_reg_dword(&reg->initval6);
ql_dbg(ql_dbg_init, vha, 0x0135,
"f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n",
@@ -1034,7 +1034,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
if (time_after_eq(jiffies, wtime)) {
ql_dbg(ql_dbg_init, vha, 0x0137,
"Init f/w failed: aen[7]: 0x%x\n",
- RD_REG_DWORD(&reg->aenmailbox7));
+ rd_reg_dword(&reg->aenmailbox7));
rval = QLA_FUNCTION_FAILED;
done = true;
break;
@@ -1428,7 +1428,7 @@ qlafx00_init_response_q_entries(struct rsp_que *rsp)
pkt = rsp->ring_ptr;
for (cnt = 0; cnt < rsp->length; cnt++) {
pkt->signature = RESPONSE_PROCESSED;
- WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
+ wrt_reg_dword((void __force __iomem *)&pkt->signature,
RESPONSE_PROCESSED);
pkt++;
}
@@ -1444,13 +1444,13 @@ qlafx00_rescan_isp(scsi_qla_host_t *vha)
qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
- aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+ aenmbx7 = rd_reg_dword(&reg->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
- ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
- ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
- ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+ ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
+ ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
+ ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
+ ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
ql_dbg(ql_dbg_disc, vha, 0x2094,
"fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
@@ -1495,7 +1495,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
(!test_bit(UNLOADING, &vha->dpc_flags)) &&
(!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
(ha->mr.fw_hbt_en)) {
- fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
+ fw_heart_beat = rd_reg_dword(&reg->fwheartbeat);
if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
ha->mr.old_fw_hbt_cnt = fw_heart_beat;
ha->mr.fw_hbt_miss_cnt = 0;
@@ -1515,7 +1515,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
/* Reset recovery to be performed in timer routine */
- aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
+ aenmbx0 = rd_reg_dword(&reg->aenmailbox0);
if (ha->mr.fw_reset_timer_exp) {
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -1710,10 +1710,9 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
return;
}
-int
+void
qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
{
- int rval = 0;
uint32_t aen_code, aen_data;
aen_code = FCH_EVT_VENDOR_UNIQUE;
@@ -1764,8 +1763,6 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
fc_host_post_event(vha->host, fc_get_event_number(),
aen_code, aen_data);
-
- return rval;
}
static void
@@ -2721,7 +2718,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
uint16_t lreq_q_in = 0;
uint16_t lreq_q_out = 0;
- lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
+ lreq_q_in = rd_reg_dword(rsp->rsp_q_in);
lreq_q_out = rsp->ring_index;
while (lreq_q_in != lreq_q_out) {
@@ -2783,7 +2780,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
}
/* Adjust ring index */
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
}
/**
@@ -2814,9 +2811,9 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break;
case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
- ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
- ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
- ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
+ ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
+ ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
+ ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
ql_dbg(ql_dbg_async, vha, 0x5077,
"Asynchronous port Update received "
"aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
@@ -2846,13 +2843,13 @@ qlafx00_async_event(scsi_qla_host_t *vha)
break;
default:
- ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
- ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
- ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
- ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
- ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
- ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
- ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
+ ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
+ ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
+ ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
+ ha->aenmb[4] = rd_reg_dword(&reg->aenmailbox4);
+ ha->aenmb[5] = rd_reg_dword(&reg->aenmailbox5);
+ ha->aenmb[6] = rd_reg_dword(&reg->aenmailbox6);
+ ha->aenmb[7] = rd_reg_dword(&reg->aenmailbox7);
ql_dbg(ql_dbg_async, vha, 0x5078,
"AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
@@ -2872,7 +2869,7 @@ static void
qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
{
uint16_t cnt;
- uint32_t __iomem *wptr;
+ __le32 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
@@ -2882,10 +2879,10 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out32[0] = mb0;
- wptr = (uint32_t __iomem *)&reg->mailbox17;
+ wptr = &reg->mailbox17;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
+ ha->mailbox_out32[cnt] = rd_reg_dword(wptr);
wptr++;
}
}
@@ -2939,13 +2936,13 @@ qlafx00_intr_handler(int irq, void *dev_id)
break;
if (stat & QLAFX00_INTR_MB_CMPLT) {
- mb[0] = RD_REG_WORD(&reg->mailbox16);
+ mb[0] = rd_reg_dword(&reg->mailbox16);
qlafx00_mbx_completion(vha, mb[0]);
status |= MBX_INTERRUPT;
clr_intr |= QLAFX00_INTR_MB_CMPLT;
}
if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
- ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
+ ha->aenmb[0] = rd_reg_dword(&reg->aenmailbox0);
qlafx00_async_event(vha);
clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
}
@@ -3113,7 +3110,7 @@ qlafx00_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3178,7 +3175,7 @@ qlafx00_start_scsi(srb_t *sp)
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3205,7 +3202,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
tm_iocb.entry_count = 1;
- tm_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle));
+ tm_iocb.handle = make_handle(req->id, sp->handle);
tm_iocb.reserved_0 = 0;
tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
@@ -3215,7 +3212,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
sizeof(struct scsi_lun));
}
- memcpy((void *)ptm_iocb, &tm_iocb,
+ memcpy(ptm_iocb, &tm_iocb,
sizeof(struct tsk_mgmt_entry_fx00));
wmb();
}
@@ -3231,13 +3228,12 @@ qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
abt_iocb.entry_count = 1;
- abt_iocb.handle = cpu_to_le32(make_handle(req->id, sp->handle));
- abt_iocb.abort_handle =
- cpu_to_le32(make_handle(req->id, fxio->u.abt.cmd_hndl));
+ abt_iocb.handle = make_handle(req->id, sp->handle);
+ abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl);
abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
abt_iocb.req_que_no = cpu_to_le16(req->id);
- memcpy((void *)pabt_iocb, &abt_iocb,
+ memcpy(pabt_iocb, &abt_iocb,
sizeof(struct abort_iocb_entry_fx00));
wmb();
}
@@ -3254,7 +3250,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
fx_iocb.entry_type = FX00_IOCB_TYPE;
- fx_iocb.handle = cpu_to_le32(sp->handle);
+ fx_iocb.handle = sp->handle;
fx_iocb.entry_count = entry_cnt;
if (sp->type == SRB_FXIOCB_DCMD) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 4567f0c42486..762250891a8f 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -96,7 +96,7 @@ struct tsk_mgmt_entry_fx00 {
uint8_t sys_define;
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
uint32_t reserved_0;
@@ -121,13 +121,13 @@ struct abort_iocb_entry_fx00 {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
__le32 reserved_0;
__le16 tgt_id_sts; /* Completion status. */
__le16 options;
- __le32 abort_handle; /* System handle. */
+ uint32_t abort_handle; /* System handle. */
__le32 reserved_2;
__le16 req_que_no;
@@ -166,7 +166,7 @@ struct fxdisc_entry_fx00 {
uint8_t sys_define; /* System Defined. */
uint8_t entry_status; /* Entry Status. */
- __le32 handle; /* System handle. */
+ uint32_t handle; /* System handle. */
__le32 reserved_0; /* System handle. */
__le16 func_num;
@@ -359,47 +359,47 @@ struct config_info_data {
#define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */
#define QLAFX00_SET_HST_INTR(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
value)
#define QLAFX00_CLR_HST_INTR(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value)
#define QLAFX00_RD_INTR_REG(ha) \
- RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
+ rd_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
#define QLAFX00_CLR_INTR_REG(ha, value) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
~value)
#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
- WRT_REG_DWORD((ha)->cregbase + off, val)
+ wrt_reg_dword((ha)->cregbase + off, val)
#define QLAFX00_GET_HBA_SOC_REG(ha, off)\
- RD_REG_DWORD((ha)->cregbase + off)
+ rd_reg_dword((ha)->cregbase + off)
#define QLAFX00_HBA_RST_REG(ha, val)\
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val)
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HST_RST_REG, val)
#define QLAFX00_RD_ICNTRL_REG(ha) \
- RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
+ rd_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
#define QLAFX00_ENABLE_ICNTRL_REG(ha) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
QLAFX00_ICR_ENB_MASK))
#define QLAFX00_DISABLE_ICNTRL_REG(ha) \
- WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+ wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
QLAFX00_ICR_DIS_MASK))
#define QLAFX00_RD_REG(ha, off) \
- RD_REG_DWORD((ha)->cregbase + off)
+ rd_reg_dword((ha)->cregbase + off)
#define QLAFX00_WR_REG(ha, off, val) \
- WRT_REG_DWORD((ha)->cregbase + off, val)
+ wrt_reg_dword((ha)->cregbase + off, val)
struct qla_mt_iocb_rqst_fx00 {
__le32 reserved_0;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 4886d247df6f..d66d47a0f958 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -138,7 +138,7 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
priv->sp = NULL;
sp->priv = NULL;
if (priv->comp_status == QLA_SUCCESS) {
- fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+ fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
} else {
fd->rcv_rsplen = 0;
fd->transferred_length = 0;
@@ -295,7 +295,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
sp->name = "nvme_ls";
sp->done = qla_nvme_sp_ls_done;
sp->put_fn = qla_nvme_release_ls_cmd_kref;
- sp->priv = (void *)priv;
+ sp->priv = priv;
priv->sp = sp;
kref_init(&sp->cmd_kref);
spin_lock_init(&priv->cmd_lock);
@@ -384,7 +384,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out);
+ rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -426,11 +426,11 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* No data transfer how do we check buffer len == 0?? */
if (fd->io_dir == NVMEFC_FCP_READ) {
- cmd_pkt->control_flags = CF_READ_DATA;
+ cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
vha->qla_stats.input_bytes += fd->payload_length;
vha->qla_stats.input_requests++;
} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
- cmd_pkt->control_flags = CF_WRITE_DATA;
+ cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
if ((vha->flags.nvme_first_burst) &&
(sp->fcport->nvme_prli_service_param &
NVME_PRLI_SP_FIRST_BURST)) {
@@ -438,7 +438,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
sp->fcport->nvme_first_burst_size) ||
(sp->fcport->nvme_first_burst_size == 0))
cmd_pkt->control_flags |=
- CF_NVME_FIRST_BURST_ENABLE;
+ cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
}
vha->qla_stats.output_bytes += fd->payload_length;
vha->qla_stats.output_requests++;
@@ -514,7 +514,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
}
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
queuing_error:
spin_unlock_irqrestore(&qpair->qp_lock, flags);
@@ -560,7 +560,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
init_waitqueue_head(&sp->nvme_ls_waitq);
kref_init(&sp->cmd_kref);
spin_lock_init(&priv->cmd_lock);
- sp->priv = (void *)priv;
+ sp->priv = priv;
priv->sp = sp;
sp->type = SRB_NVME_CMD;
sp->name = "nvme_cmd";
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index ef912902d4e5..fbb844226630 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -48,26 +48,26 @@ struct cmd_nvme {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
- uint16_t timeout; /* Command timeout. */
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
- uint16_t nvme_rsp_dsd_len; /* NVMe RSP DSD length */
+ __le16 dseg_count; /* Data segment count. */
+ __le16 nvme_rsp_dsd_len; /* NVMe RSP DSD length */
uint64_t rsvd;
- uint16_t control_flags; /* Control Flags */
+ __le16 control_flags; /* Control Flags */
#define CF_NVME_FIRST_BURST_ENABLE BIT_11
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
#define CF_READ_DATA BIT_1
#define CF_WRITE_DATA BIT_0
- uint16_t nvme_cmnd_dseg_len; /* Data segment length. */
+ __le16 nvme_cmnd_dseg_len; /* Data segment length. */
__le64 nvme_cmnd_dseg_address __packed;/* Data segment address. */
__le64 nvme_rsp_dseg_address __packed; /* Data segment address. */
- uint32_t byte_count; /* Total byte count. */
+ __le32 byte_count; /* Total byte count. */
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
@@ -82,24 +82,24 @@ struct pt_ls4_request {
uint8_t sys_define;
uint8_t entry_status;
uint32_t handle;
- uint16_t status;
- uint16_t nport_handle;
- uint16_t tx_dseg_count;
+ __le16 status;
+ __le16 nport_handle;
+ __le16 tx_dseg_count;
uint8_t vp_index;
uint8_t rsvd;
- uint16_t timeout;
- uint16_t control_flags;
+ __le16 timeout;
+ __le16 control_flags;
#define CF_LS4_SHIFT 13
#define CF_LS4_ORIGINATOR 0
#define CF_LS4_RESPONDER 1
#define CF_LS4_RESPONDER_TERM 2
- uint16_t rx_dseg_count;
- uint16_t rsvd2;
- uint32_t exchange_address;
- uint32_t rsvd3;
- uint32_t rx_byte_count;
- uint32_t tx_byte_count;
+ __le16 rx_dseg_count;
+ __le16 rsvd2;
+ __le32 exchange_address;
+ __le32 rsvd3;
+ __le32 rx_byte_count;
+ __le32 tx_byte_count;
struct dsd64 dsd[2];
};
@@ -107,32 +107,32 @@ struct pt_ls4_request {
struct pt_ls4_rx_unsol {
uint8_t entry_type;
uint8_t entry_count;
- uint16_t rsvd0;
- uint16_t rsvd1;
+ __le16 rsvd0;
+ __le16 rsvd1;
uint8_t vp_index;
uint8_t rsvd2;
- uint16_t rsvd3;
- uint16_t nport_handle;
- uint16_t frame_size;
- uint16_t rsvd4;
- uint32_t exchange_address;
+ __le16 rsvd3;
+ __le16 nport_handle;
+ __le16 frame_size;
+ __le16 rsvd4;
+ __le32 exchange_address;
uint8_t d_id[3];
uint8_t r_ctl;
be_id_t s_id;
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
- uint32_t param;
- uint32_t desc0;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 param;
+ __le32 desc0;
#define PT_LS4_PAYLOAD_OFFSET 0x2c
#define PT_LS4_FIRST_PACKET_LEN 20
- uint32_t desc_len;
- uint32_t payload[3];
+ __le32 desc_len;
+ __le32 payload[3];
};
/*
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 185c5f34d4c1..0baf55b7e88f 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -370,7 +370,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
/* Read back value to make sure write has gone through before trying
* to use it.
*/
- win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
+ win_read = rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase);
if (win_read != ha->crb_win) {
ql_dbg(ql_dbg_p3p, vha, 0xb000,
"%s: Written crbwin (0x%x) "
@@ -380,47 +380,6 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
*off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
-static inline unsigned long
-qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
-{
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- /* See if we are currently pointing to the region we want to use next */
- if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
- /* No need to change window. PCIX and PCIEregs are in both
- * regs are in both windows.
- */
- return off;
- }
-
- if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
- /* We are in first CRB window */
- if (ha->curr_window != 0)
- WARN_ON(1);
- return off;
- }
-
- if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
- /* We are in second CRB window */
- off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
-
- if (ha->curr_window != 1)
- return off;
-
- /* We are in the QM or direct access
- * register region - do nothing
- */
- if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
- (off < QLA82XX_PCI_CAMQM_MAX))
- return off;
- }
- /* strange address given */
- ql_dbg(ql_dbg_p3p, vha, 0xb001,
- "%s: Warning: unm_nic_pci_set_crbwindow "
- "called with an unknown address(%llx).\n",
- QLA2XXX_DRIVER_NAME, off);
- return off;
-}
-
static int
qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
void __iomem **off_out)
@@ -520,7 +479,7 @@ qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
qla82xx_crb_win_lock(ha);
qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
}
- data = RD_REG_DWORD(off);
+ data = rd_reg_dword(off);
if (rv == 1) {
qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
@@ -937,17 +896,17 @@ qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
{
uint32_t off_value, rval = 0;
- WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
+ wrt_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
/* Read back value to make sure write has gone through */
- RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
+ rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase);
off_value = (off & 0x0000FFFF);
if (flag)
- WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
+ wrt_reg_dword(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
data);
else
- rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M +
+ rval = rd_reg_dword(off_value + CRB_INDIRECT_2M +
ha->nx_pcibase);
return rval;
@@ -1561,14 +1520,14 @@ qla82xx_get_table_desc(const u8 *unirom, int section)
uint32_t i;
struct qla82xx_uri_table_desc *directory =
(struct qla82xx_uri_table_desc *)&unirom[0];
- __le32 offset;
- __le32 tab_type;
- __le32 entries = cpu_to_le32(directory->num_entries);
+ uint32_t offset;
+ uint32_t tab_type;
+ uint32_t entries = le32_to_cpu(directory->num_entries);
for (i = 0; i < entries; i++) {
- offset = cpu_to_le32(directory->findex) +
- (i * cpu_to_le32(directory->entry_size));
- tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
+ offset = le32_to_cpu(directory->findex) +
+ (i * le32_to_cpu(directory->entry_size));
+ tab_type = get_unaligned_le32((u32 *)&unirom[offset] + 8);
if (tab_type == section)
return (struct qla82xx_uri_table_desc *)&unirom[offset];
@@ -1582,16 +1541,17 @@ qla82xx_get_data_desc(struct qla_hw_data *ha,
u32 section, u32 idx_offset)
{
const u8 *unirom = ha->hablob->fw->data;
- int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
+ int idx = get_unaligned_le32((u32 *)&unirom[ha->file_prd_off] +
+ idx_offset);
struct qla82xx_uri_table_desc *tab_desc = NULL;
- __le32 offset;
+ uint32_t offset;
tab_desc = qla82xx_get_table_desc(unirom, section);
if (!tab_desc)
return NULL;
- offset = cpu_to_le32(tab_desc->findex) +
- (cpu_to_le32(tab_desc->entry_size) * idx);
+ offset = le32_to_cpu(tab_desc->findex) +
+ (le32_to_cpu(tab_desc->entry_size) * idx);
return (struct qla82xx_uri_data_desc *)&unirom[offset];
}
@@ -1606,7 +1566,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha,
QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
if (uri_desc)
- offset = cpu_to_le32(uri_desc->findex);
+ offset = le32_to_cpu(uri_desc->findex);
}
return (u8 *)&ha->hablob->fw->data[offset];
@@ -1620,7 +1580,7 @@ static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
QLA82XX_URI_FIRMWARE_IDX_OFF);
if (uri_desc)
- return cpu_to_le32(uri_desc->size);
+ return le32_to_cpu(uri_desc->size);
}
return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
@@ -1636,7 +1596,7 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha)
uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
QLA82XX_URI_FIRMWARE_IDX_OFF);
if (uri_desc)
- offset = cpu_to_le32(uri_desc->findex);
+ offset = le32_to_cpu(uri_desc->findex);
}
return (u8 *)&ha->hablob->fw->data[offset];
@@ -1790,9 +1750,9 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
put_unaligned_le64(req->dma, &icb->request_q_address);
put_unaligned_le64(rsp->dma, &icb->response_q_address);
- WRT_REG_DWORD(&reg->req_q_out[0], 0);
- WRT_REG_DWORD(&reg->rsp_q_in[0], 0);
- WRT_REG_DWORD(&reg->rsp_q_out[0], 0);
+ wrt_reg_dword(&reg->req_q_out[0], 0);
+ wrt_reg_dword(&reg->rsp_q_in[0], 0);
+ wrt_reg_dword(&reg->rsp_q_out[0], 0);
}
static int
@@ -1847,8 +1807,8 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
struct qla82xx_uri_table_desc *ptab_desc = NULL;
const uint8_t *unirom = ha->hablob->fw->data;
uint32_t i;
- __le32 entries;
- __le32 flags, file_chiprev, offset;
+ uint32_t entries;
+ uint32_t flags, file_chiprev, offset;
uint8_t chiprev = ha->chip_revision;
/* Hardcoding mn_present flag for P3P */
int mn_present = 0;
@@ -1859,14 +1819,14 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
if (!ptab_desc)
return -1;
- entries = cpu_to_le32(ptab_desc->num_entries);
+ entries = le32_to_cpu(ptab_desc->num_entries);
for (i = 0; i < entries; i++) {
- offset = cpu_to_le32(ptab_desc->findex) +
- (i * cpu_to_le32(ptab_desc->entry_size));
- flags = cpu_to_le32(*((int *)&unirom[offset] +
+ offset = le32_to_cpu(ptab_desc->findex) +
+ (i * le32_to_cpu(ptab_desc->entry_size));
+ flags = le32_to_cpu(*((__le32 *)&unirom[offset] +
QLA82XX_URI_FLAGS_OFF));
- file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
+ file_chiprev = le32_to_cpu(*((__le32 *)&unirom[offset] +
QLA82XX_URI_CHIP_REV_OFF));
flagbit = mn_present ? 1 : 2;
@@ -1996,18 +1956,18 @@ void
qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
- uint16_t __iomem *wptr;
+ __le16 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
- wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
+ wptr = &reg->mailbox_out[1];
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
+ ha->mailbox_out[cnt] = rd_reg_word(wptr);
wptr++;
}
@@ -2069,8 +2029,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
vha = pci_get_drvdata(ha->pdev);
for (iter = 1; iter--; ) {
- if (RD_REG_DWORD(&reg->host_int)) {
- stat = RD_REG_DWORD(&reg->host_status);
+ if (rd_reg_dword(&reg->host_int)) {
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
@@ -2082,9 +2042,9 @@ qla82xx_intr_handler(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2097,7 +2057,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
qla2x00_handle_mbx_completion(ha, status);
@@ -2135,11 +2095,11 @@ qla82xx_msix_default(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
do {
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
break;
if (host_int) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
@@ -2151,9 +2111,9 @@ qla82xx_msix_default(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2166,7 +2126,7 @@ qla82xx_msix_default(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
} while (0);
qla2x00_handle_mbx_completion(ha, status);
@@ -2196,11 +2156,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
reg = &ha->iobase->isp82;
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
qla24xx_process_response_queue(vha, rsp);
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
out:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
@@ -2231,11 +2191,11 @@ qla82xx_poll(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
- host_int = RD_REG_DWORD(&reg->host_int);
+ host_int = rd_reg_dword(&reg->host_int);
if (qla2x00_check_reg32_for_disconnect(vha, host_int))
goto out;
if (host_int) {
- stat = RD_REG_DWORD(&reg->host_status);
+ stat = rd_reg_dword(&reg->host_status);
switch (stat & 0xff) {
case 0x1:
case 0x2:
@@ -2246,9 +2206,9 @@ qla82xx_poll(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -2260,7 +2220,7 @@ qla82xx_poll(int irq, void *dev_id)
stat * 0xff);
break;
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
out:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2549,8 +2509,8 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
return qla82xx_check_rcvpeg_state(ha);
}
-static uint32_t *
-qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+static __le32 *
+qla82xx_read_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr,
uint32_t length)
{
uint32_t i;
@@ -2675,13 +2635,13 @@ qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
uint32_t offset, uint32_t length)
{
scsi_block_requests(vha->host);
- qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
+ qla82xx_read_flash_data(vha, buf, offset, length);
scsi_unblock_requests(vha->host);
return buf;
}
static int
-qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
+qla82xx_write_flash_data(struct scsi_qla_host *vha, __le32 *dwptr,
uint32_t faddr, uint32_t dwords)
{
int ret;
@@ -2758,7 +2718,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
}
ret = qla82xx_write_flash_dword(ha, faddr,
- cpu_to_le32(*dwptr));
+ le32_to_cpu(*dwptr));
if (ret) {
ql_dbg(ql_dbg_p3p, vha, 0xb020,
"Unable to program flash address=%x data=%x.\n",
@@ -2818,10 +2778,10 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
if (ql2xdbwr)
qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval);
else {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+ while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
+ wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
wmb();
}
}
@@ -3724,7 +3684,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
/* Minidump related functions */
static int
qla82xx_minidump_process_control(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
struct qla82xx_md_entry_crb *crb_entry;
@@ -3841,12 +3801,12 @@ qla82xx_minidump_process_control(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
struct qla82xx_md_entry_rdocm *ocm_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
r_addr = ocm_hdr->read_addr;
@@ -3854,7 +3814,7 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
loop_cnt = ocm_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
- r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase);
+ r_value = rd_reg_dword(r_addr + ha->nx_pcibase);
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
@@ -3863,12 +3823,12 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
struct qla82xx_md_entry_mux *mux_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
r_addr = mux_hdr->read_addr;
@@ -3889,12 +3849,12 @@ qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_stride, loop_cnt, i, r_value;
struct qla82xx_md_entry_crb *crb_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
r_addr = crb_hdr->addr;
@@ -3912,7 +3872,7 @@ qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
static int
qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t addr, r_addr, c_addr, t_r_addr;
@@ -3921,7 +3881,7 @@ qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
uint32_t c_value_w, c_value_r;
struct qla82xx_md_entry_cache *cache_hdr;
int rval = QLA_FUNCTION_FAILED;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
@@ -3971,14 +3931,14 @@ qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t addr, r_addr, c_addr, t_r_addr;
uint32_t i, k, loop_count, t_value, r_cnt, r_value;
uint32_t c_value_w;
struct qla82xx_md_entry_cache *cache_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
loop_count = cache_hdr->op_count;
@@ -4006,14 +3966,14 @@ qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t s_addr, r_addr;
uint32_t r_stride, r_value, r_cnt, qid = 0;
uint32_t i, k, loop_cnt;
struct qla82xx_md_entry_queue *q_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
s_addr = q_hdr->select_addr;
@@ -4036,13 +3996,13 @@ qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
static void
qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_value;
uint32_t i, loop_cnt;
struct qla82xx_md_entry_rdrom *rom_hdr;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
r_addr = rom_hdr->read_addr;
@@ -4062,7 +4022,7 @@ qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
static int
qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
- qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
+ qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr)
{
struct qla_hw_data *ha = vha->hw;
uint32_t r_addr, r_value, r_data;
@@ -4070,7 +4030,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
struct qla82xx_md_entry_rdmem *m_hdr;
unsigned long flags;
int rval = QLA_FUNCTION_FAILED;
- uint32_t *data_ptr = *d_ptr;
+ __le32 *data_ptr = *d_ptr;
m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
r_addr = m_hdr->read_addr;
@@ -4163,12 +4123,12 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
int no_entry_hdr = 0;
qla82xx_md_entry_hdr_t *entry_hdr;
struct qla82xx_md_template_hdr *tmplt_hdr;
- uint32_t *data_ptr;
+ __le32 *data_ptr;
uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
int i = 0, rval = QLA_FUNCTION_FAILED;
tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
- data_ptr = (uint32_t *)ha->md_dump;
+ data_ptr = ha->md_dump;
if (ha->fw_dumped) {
ql_log(ql_log_warn, vha, 0xb037,
@@ -4177,7 +4137,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
goto md_failed;
}
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
if (!ha->md_tmplt_hdr || !ha->md_dump) {
ql_log(ql_log_warn, vha, 0xb038,
@@ -4357,7 +4317,7 @@ skip_nxt_entry:
ql_log(ql_log_info, vha, 0xb044,
"Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
md_failed:
@@ -4514,7 +4474,7 @@ exit:
}
void
-qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla82xx_fw_dump(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 230abee10598..93344a05910a 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -800,16 +800,16 @@ struct qla82xx_legacy_intr_set {
#define QLA82XX_URI_FIRMWARE_IDX_OFF 29
struct qla82xx_uri_table_desc{
- uint32_t findex;
- uint32_t num_entries;
- uint32_t entry_size;
- uint32_t reserved[5];
+ __le32 findex;
+ __le32 num_entries;
+ __le32 entry_size;
+ __le32 reserved[5];
};
struct qla82xx_uri_data_desc{
- uint32_t findex;
- uint32_t size;
- uint32_t reserved[5];
+ __le32 findex;
+ __le32 size;
+ __le32 reserved[5];
};
/* UNIFIED ROMIMAGE END */
@@ -829,22 +829,22 @@ struct qla82xx_uri_data_desc{
* ISP 8021 I/O Register Set structure definitions.
*/
struct device_reg_82xx {
- uint32_t req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
- uint32_t rsp_q_in[64]; /* Response Queue In-Pointer. */
- uint32_t rsp_q_out[64]; /* Response Queue Out-Pointer. */
+ __le32 req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */
+ __le32 rsp_q_in[64]; /* Response Queue In-Pointer. */
+ __le32 rsp_q_out[64]; /* Response Queue Out-Pointer. */
- uint16_t mailbox_in[32]; /* Mail box In registers */
- uint16_t unused_1[32];
- uint32_t hint; /* Host interrupt register */
+ __le16 mailbox_in[32]; /* Mailbox In registers */
+ __le16 unused_1[32];
+ __le32 hint; /* Host interrupt register */
#define HINT_MBX_INT_PENDING BIT_0
- uint16_t unused_2[62];
- uint16_t mailbox_out[32]; /* Mail box Out registers */
- uint32_t unused_3[48];
+ __le16 unused_2[62];
+ __le16 mailbox_out[32]; /* Mailbox Out registers */
+ __le32 unused_3[48];
- uint32_t host_status; /* host status */
+ __le32 host_status; /* host status */
#define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */
#define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */
- uint32_t host_int; /* Interrupt status. */
+ __le32 host_int; /* Interrupt status. */
#define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */
};
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index c056f466f1f4..50e57603ce3d 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1441,7 +1441,7 @@ qla8044_device_bootstrap(struct scsi_qla_host *vha)
if (idc_ctrl & GRACEFUL_RESET_BIT1) {
qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
(idc_ctrl & ~GRACEFUL_RESET_BIT1));
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
}
dev_ready:
@@ -2965,7 +2965,7 @@ qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
/* Prepare: Write pex-dma descriptor to MS memory. */
rval = qla8044_ms_mem_write_128b(vha,
- m_hdr->desc_card_addr, (void *)&dma_desc,
+ m_hdr->desc_card_addr, (uint32_t *)&dma_desc,
(sizeof(struct qla8044_pex_dma_descriptor)/16));
if (rval) {
ql_log(ql_log_warn, vha, 0xb14a,
@@ -2987,7 +2987,7 @@ qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
read_size += chunk_size;
}
- *d_ptr = (void *)data_ptr;
+ *d_ptr = (uint32_t *)data_ptr;
error_exit:
if (rdmem_buffer)
@@ -3249,7 +3249,7 @@ qla8044_collect_md_data(struct scsi_qla_host *vha)
goto md_failed;
}
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
if (!ha->md_tmplt_hdr || !ha->md_dump) {
ql_log(ql_log_warn, vha, 0xb10e,
@@ -3470,7 +3470,7 @@ skip_nxt_entry:
ql_log(ql_log_info, vha, 0xb110,
"Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
@@ -3487,7 +3487,7 @@ qla8044_get_minidump(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
if (!qla8044_collect_md_data(vha)) {
- ha->fw_dumped = 1;
+ ha->fw_dumped = true;
ha->prev_minidump_failed = 0;
} else {
ql_log(ql_log_fatal, vha, 0xb0db,
@@ -3946,8 +3946,8 @@ qla8044_intr_handler(int irq, void *dev_id)
spin_lock_irqsave(&ha->hardware_lock, flags);
for (iter = 1; iter--; ) {
- if (RD_REG_DWORD(&reg->host_int)) {
- stat = RD_REG_DWORD(&reg->host_status);
+ if (rd_reg_dword(&reg->host_int)) {
+ stat = rd_reg_dword(&reg->host_status);
if ((stat & HSRX_RISC_INT) == 0)
break;
@@ -3961,9 +3961,9 @@ qla8044_intr_handler(int irq, void *dev_id)
break;
case 0x12:
mb[0] = MSW(stat);
- mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
- mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
- mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ mb[1] = rd_reg_word(&reg->mailbox_out[1]);
+ mb[2] = rd_reg_word(&reg->mailbox_out[2]);
+ mb[3] = rd_reg_word(&reg->mailbox_out[3]);
qla2x00_async_event(vha, rsp, mb);
break;
case 0x13:
@@ -3976,7 +3976,7 @@ qla8044_intr_handler(int irq, void *dev_id)
break;
}
}
- WRT_REG_DWORD(&reg->host_int, 0);
+ wrt_reg_dword(&reg->host_int, 0);
}
qla2x00_handle_mbx_completion(ha, status);
@@ -4070,7 +4070,7 @@ exit_isp_reset:
}
void
-qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
+qla8044_fw_dump(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1d9a4866f9a7..e92fad99338c 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -35,6 +35,11 @@ static int apidev_major;
*/
struct kmem_cache *srb_cachep;
+int ql2xfulldump_on_mpifail;
+module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
+ "Set this to take full dump on MPI hang.");
+
/*
* CT6 CTX allocation cache
*/
@@ -1216,9 +1221,9 @@ uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
if (IS_P3P_TYPE(ha))
- return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
+ return ((rd_reg_dword(&reg82->host_int)) == ISP_REG_DISCONNECT);
else
- return ((RD_REG_DWORD(&reg->host_status)) ==
+ return ((rd_reg_dword(&reg->host_status)) ==
ISP_REG_DISCONNECT);
}
@@ -1902,8 +1907,8 @@ qla2x00_enable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
/* enable risc and host interrupts */
- WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
- RD_REG_WORD(&reg->ictrl);
+ wrt_reg_word(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
+ rd_reg_word(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1917,8 +1922,8 @@ qla2x00_disable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
/* disable risc and host interrupts */
- WRT_REG_WORD(&reg->ictrl, 0);
- RD_REG_WORD(&reg->ictrl);
+ wrt_reg_word(&reg->ictrl, 0);
+ rd_reg_word(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1930,8 +1935,8 @@ qla24xx_enable_intrs(struct qla_hw_data *ha)
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 1;
- WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, ICRX_EN_RISC_INT);
+ rd_reg_dword(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1945,8 +1950,8 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
return;
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->interrupts_on = 0;
- WRT_REG_DWORD(&reg->ictrl, 0);
- RD_REG_DWORD(&reg->ictrl);
+ wrt_reg_dword(&reg->ictrl, 0);
+ rd_reg_dword(&reg->ictrl);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -2518,6 +2523,7 @@ static struct isp_operations qla27xx_isp_ops = {
.read_nvram = NULL,
.write_nvram = NULL,
.fw_dump = qla27xx_fwdump,
+ .mpi_fw_dump = qla27xx_mpi_fwdump,
.beacon_on = qla24xx_beacon_on,
.beacon_off = qla24xx_beacon_off,
.beacon_blink = qla83xx_beacon_blink,
@@ -4614,7 +4620,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
ha->flags.fce_enabled = 0;
ha->eft = NULL;
ha->eft_dma = 0;
- ha->fw_dumped = 0;
+ ha->fw_dumped = false;
ha->fw_dump_cap_flags = 0;
ha->fw_dump_reading = 0;
ha->fw_dump = NULL;
@@ -5758,7 +5764,8 @@ qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
if (!pdb) {
ql_dbg(ql_dbg_init, vha, 0x0181,
"%s: Failed allocate pdb\n", __func__);
- } else if (qla24xx_get_port_database(vha, purex->nport_handle, pdb)) {
+ } else if (qla24xx_get_port_database(vha,
+ le16_to_cpu(purex->nport_handle), pdb)) {
ql_dbg(ql_dbg_init, vha, 0x0181,
"%s: Failed get pdb sid=%x\n", __func__, sid);
} else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
@@ -5910,7 +5917,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
"-------- ELS REQ -------\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
- (void *)purex, sizeof(*purex));
+ purex, sizeof(*purex));
if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
rsp_payload_length =
@@ -5952,7 +5959,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
rsp_els->entry_status = 0;
rsp_els->handle = 0;
rsp_els->nport_handle = purex->nport_handle;
- rsp_els->tx_dsd_count = 1;
+ rsp_els->tx_dsd_count = cpu_to_le16(1);
rsp_els->vp_index = purex->vp_idx;
rsp_els->sof_type = EST_SOFI3;
rsp_els->rx_xchg_address = purex->rx_xchg_addr;
@@ -5963,7 +5970,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
rsp_els->d_id[1] = purex->s_id[1];
rsp_els->d_id[2] = purex->s_id[2];
- rsp_els->control_flags = EPD_ELS_ACC;
+ rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC);
rsp_els->rx_byte_count = 0;
rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
@@ -5975,8 +5982,8 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
/* Prepare Response Payload */
rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
- rsp_payload->hdr.len = cpu_to_be32(
- rsp_els->tx_byte_count - sizeof(rsp_payload->hdr));
+ rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) -
+ sizeof(rsp_payload->hdr));
/* Link service Request Info Descriptor */
rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
@@ -6026,7 +6033,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
memset(sfp, 0, SFP_RTDI_LEN);
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
if (!rval) {
- uint16_t *trx = (void *)sfp; /* already be16 */
+ __be16 *trx = (__force __be16 *)sfp; /* already be16 */
rsp_payload->sfp_diag_desc.temperature = trx[0];
rsp_payload->sfp_diag_desc.vcc = trx[1];
rsp_payload->sfp_diag_desc.tx_bias = trx[2];
@@ -6053,17 +6060,17 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
if (!rval) {
rsp_payload->ls_err_desc.link_fail_cnt =
- cpu_to_be32(stat->link_fail_cnt);
+ cpu_to_be32(le32_to_cpu(stat->link_fail_cnt));
rsp_payload->ls_err_desc.loss_sync_cnt =
- cpu_to_be32(stat->loss_sync_cnt);
+ cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt));
rsp_payload->ls_err_desc.loss_sig_cnt =
- cpu_to_be32(stat->loss_sig_cnt);
+ cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt));
rsp_payload->ls_err_desc.prim_seq_err_cnt =
- cpu_to_be32(stat->prim_seq_err_cnt);
+ cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt));
rsp_payload->ls_err_desc.inval_xmit_word_cnt =
- cpu_to_be32(stat->inval_xmit_word_cnt);
+ cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt));
rsp_payload->ls_err_desc.inval_crc_cnt =
- cpu_to_be32(stat->inval_crc_cnt);
+ cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt));
rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
}
}
@@ -6135,7 +6142,7 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, void *pkt)
memset(sfp, 0, SFP_RTDI_LEN);
rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
if (!rval) {
- uint16_t *trx = (void *)sfp; /* already be16 */
+ __be16 *trx = (__force __be16 *)sfp; /* already be16 */
/* Optical Element Descriptor, Temperature */
rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
@@ -6261,11 +6268,11 @@ send:
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
"-------- ELS RSP -------\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
- (void *)rsp_els, sizeof(*rsp_els));
+ rsp_els, sizeof(*rsp_els));
ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
"-------- ELS RSP PAYLOAD -------\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
- (void *)rsp_payload, rsp_payload_length);
+ rsp_payload, rsp_payload_length);
rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
@@ -6871,6 +6878,7 @@ qla2x00_do_dpc(void *data)
if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
+ base_vha->flags.online = 1;
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
"ISP abort scheduled.\n");
if (ha->isp_ops->abort_isp(base_vha)) {
@@ -7550,15 +7558,15 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2100(ha) || IS_QLA2200(ha)){
- stat = RD_REG_DWORD(&reg->hccr);
+ stat = rd_reg_word(&reg->hccr);
if (stat & HCCR_RISC_PAUSE)
risc_paused = 1;
} else if (IS_QLA23XX(ha)) {
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_PAUSED)
risc_paused = 1;
} else if (IS_FWI2_CAPABLE(ha)) {
- stat = RD_REG_DWORD(&reg24->host_status);
+ stat = rd_reg_dword(&reg24->host_status);
if (stat & HSRX_RISC_PAUSED)
risc_paused = 1;
}
@@ -7567,7 +7575,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
if (risc_paused) {
ql_log(ql_log_info, base_vha, 0x9003,
"RISC paused -- mmio_enabled, Dumping firmware.\n");
- ha->isp_ops->fw_dump(base_vha, 0);
+ qla2xxx_dump_fw(base_vha);
return PCI_ERS_RESULT_NEED_RESET;
} else
@@ -7814,13 +7822,19 @@ qla2x00_module_init(void)
{
int ret = 0;
+ BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64);
BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
BUILD_BUG_ON(sizeof(init_cb_t) != 96);
+ BUILD_BUG_ON(sizeof(mrk_entry_t) != 64);
BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
BUILD_BUG_ON(sizeof(request_t) != 64);
+ BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64);
+ BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64);
BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
+ BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64);
BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
@@ -7828,17 +7842,70 @@ qla2x00_module_init(void)
BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2344);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
+ BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260);
+ BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16);
BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256);
+ BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24);
+ BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256);
+ BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288);
+ BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216);
BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64);
BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
+ BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64);
BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
+ BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct mbx_entry) != 64);
+ BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252);
+ BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512);
+ BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512);
BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
+ BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64);
+ BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634);
+ BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100);
+ BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976);
+ BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12);
+ BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24);
+ BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420);
+ BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28);
+ BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32);
+ BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196);
+ BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE);
+ BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128);
+ BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
+ BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
+ BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24);
+ BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16);
+ BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336);
BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
+ BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64);
+ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64);
BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
+ BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52);
BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
- BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
- BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
+ BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64);
+ BUILD_BUG_ON(sizeof(sts21_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts22_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sts_entry_t) != 64);
+ BUILD_BUG_ON(sizeof(sw_info_t) != 32);
+ BUILD_BUG_ON(sizeof(target_id_t) != 2);
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3da79ee1d88e..e161c05d7d82 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -26,24 +26,24 @@ qla2x00_lock_nvram_access(struct qla_hw_data *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
- data = RD_REG_WORD(&reg->nvram);
+ data = rd_reg_word(&reg->nvram);
while (data & NVR_BUSY) {
udelay(100);
- data = RD_REG_WORD(&reg->nvram);
+ data = rd_reg_word(&reg->nvram);
}
/* Lock resource */
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0x1);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
udelay(5);
- data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ data = rd_reg_word(&reg->u.isp2300.host_semaphore);
while ((data & BIT_0) == 0) {
/* Lock failed */
udelay(100);
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0x1);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0x1);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
udelay(5);
- data = RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ data = rd_reg_word(&reg->u.isp2300.host_semaphore);
}
}
}
@@ -58,8 +58,8 @@ qla2x00_unlock_nvram_access(struct qla_hw_data *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) {
- WRT_REG_WORD(&reg->u.isp2300.host_semaphore, 0);
- RD_REG_WORD(&reg->u.isp2300.host_semaphore);
+ wrt_reg_word(&reg->u.isp2300.host_semaphore, 0);
+ rd_reg_word(&reg->u.isp2300.host_semaphore);
}
}
@@ -73,15 +73,15 @@ qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
- WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
@@ -120,21 +120,21 @@ qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd)
/* Read data from NVRAM. */
for (cnt = 0; cnt < 16; cnt++) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT | NVR_CLOCK);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
data <<= 1;
- reg_data = RD_REG_WORD(&reg->nvram);
+ reg_data = rd_reg_word(&reg->nvram);
if (reg_data & NVR_DATA_IN)
data |= BIT_0;
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
/* Deselect chip. */
- WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_DESELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
return data;
@@ -171,8 +171,8 @@ qla2x00_nv_deselect(struct qla_hw_data *ha)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_DESELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
NVRAM_DELAY();
}
@@ -183,7 +183,7 @@ qla2x00_nv_deselect(struct qla_hw_data *ha)
* @data: word to program
*/
static void
-qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
+qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data)
{
int count;
uint16_t word;
@@ -202,7 +202,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
/* Write data */
nv_cmd = (addr << 16) | NV_WRITE_OP;
- nv_cmd |= data;
+ nv_cmd |= (__force u16)data;
nv_cmd <<= 5;
for (count = 0; count < 27; count++) {
if (nv_cmd & BIT_31)
@@ -216,8 +216,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -226,7 +226,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
qla2x00_nv_deselect(ha);
@@ -241,7 +241,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
static int
qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
- uint16_t data, uint32_t tmo)
+ __le16 data, uint32_t tmo)
{
int ret, count;
uint16_t word;
@@ -261,7 +261,7 @@ qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
/* Write data */
nv_cmd = (addr << 16) | NV_WRITE_OP;
- nv_cmd |= data;
+ nv_cmd |= (__force u16)data;
nv_cmd <<= 5;
for (count = 0; count < 27; count++) {
if (nv_cmd & BIT_31)
@@ -275,11 +275,11 @@ qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
do {
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
if (!--tmo) {
ret = QLA_FUNCTION_FAILED;
break;
@@ -308,7 +308,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
int ret, stat;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t word, wait_cnt;
- uint16_t wprot, wprot_old;
+ __le16 wprot, wprot_old;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* Clear NVRAM write protection. */
@@ -318,7 +318,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base,
cpu_to_le16(0x1234), 100000);
wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
- if (stat != QLA_SUCCESS || wprot != 0x1234) {
+ if (stat != QLA_SUCCESS || wprot != cpu_to_le16(0x1234)) {
/* Write enable. */
qla2x00_nv_write(ha, NVR_DATA_OUT);
qla2x00_nv_write(ha, 0);
@@ -347,8 +347,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready. */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -357,7 +357,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
if (wait_cnt)
@@ -407,8 +407,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
qla2x00_nv_deselect(ha);
/* Wait for NVRAM to become ready. */
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
@@ -417,7 +417,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
break;
}
NVRAM_DELAY();
- word = RD_REG_WORD(&reg->nvram);
+ word = rd_reg_word(&reg->nvram);
} while ((word & NVR_DATA_IN) == 0);
}
@@ -456,11 +456,11 @@ qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ulong cnt = 30000;
- WRT_REG_DWORD(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
+ wrt_reg_dword(&reg->flash_addr, addr & ~FARX_DATA_FLAG);
while (cnt--) {
- if (RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG) {
- *data = RD_REG_DWORD(&reg->flash_data);
+ if (rd_reg_dword(&reg->flash_addr) & FARX_DATA_FLAG) {
+ *data = rd_reg_dword(&reg->flash_data);
return QLA_SUCCESS;
}
udelay(10);
@@ -499,11 +499,11 @@ qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
ulong cnt = 500000;
- WRT_REG_DWORD(&reg->flash_data, data);
- WRT_REG_DWORD(&reg->flash_addr, addr | FARX_DATA_FLAG);
+ wrt_reg_dword(&reg->flash_data, data);
+ wrt_reg_dword(&reg->flash_addr, addr | FARX_DATA_FLAG);
while (cnt--) {
- if (!(RD_REG_DWORD(&reg->flash_addr) & FARX_DATA_FLAG))
+ if (!(rd_reg_dword(&reg->flash_addr) & FARX_DATA_FLAG))
return QLA_SUCCESS;
udelay(10);
cond_resched();
@@ -549,11 +549,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
{
const char *loc, *locations[] = { "DEF", "PCI" };
uint32_t pcihdr, pcids;
- uint16_t cnt, chksum, *wptr;
+ uint16_t cnt, chksum;
+ __le16 *wptr;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct qla_flt_location *fltl = (void *)req->ring;
- uint32_t *dcode = (void *)req->ring;
+ uint32_t *dcode = (uint32_t *)req->ring;
uint8_t *buf = (void *)req->ring, *bcode, last_image;
/*
@@ -610,7 +611,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
if (memcmp(fltl->sig, "QFLT", 4))
goto end;
- wptr = (void *)req->ring;
+ wptr = (__force __le16 *)req->ring;
cnt = sizeof(*fltl) / sizeof(*wptr);
for (chksum = 0; cnt--; wptr++)
chksum += le16_to_cpu(*wptr);
@@ -671,7 +672,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0;
struct qla_flt_header *flt = ha->flt;
struct qla_flt_region *region = &flt->region[0];
- uint16_t *wptr, cnt, chksum;
+ __le16 *wptr;
+ uint16_t cnt, chksum;
uint32_t start;
/* Assign FCP prio region since older adapters may not have FLT, or
@@ -681,8 +683,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
ha->flt_region_flt = flt_addr;
- wptr = (uint16_t *)ha->flt;
- ha->isp_ops->read_optrom(vha, (void *)flt, flt_addr << 2,
+ wptr = (__force __le16 *)ha->flt;
+ ha->isp_ops->read_optrom(vha, flt, flt_addr << 2,
(sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE));
if (le16_to_cpu(*wptr) == 0xffff)
@@ -949,7 +951,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
uint16_t cnt, chksum;
- uint16_t *wptr = (void *)req->ring;
+ __le16 *wptr = (__force __le16 *)req->ring;
struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring;
uint8_t man_id, flash_id;
uint16_t mid = 0, fid = 0;
@@ -1042,14 +1044,14 @@ static void
qla2xxx_get_idc_param(scsi_qla_host_t *vha)
{
#define QLA82XX_IDC_PARAM_ADDR 0x003e885c
- uint32_t *wptr;
+ __le32 *wptr;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
if (!(IS_P3P_TYPE(ha)))
return;
- wptr = (uint32_t *)req->ring;
+ wptr = (__force __le32 *)req->ring;
ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8);
if (*wptr == cpu_to_le32(0xffffffff)) {
@@ -1095,7 +1097,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
{
#define NPIV_CONFIG_SIZE (16*1024)
void *data;
- uint16_t *wptr;
+ __le16 *wptr;
uint16_t cnt, chksum;
int i;
struct qla_npiv_header hdr;
@@ -1197,9 +1199,9 @@ qla24xx_unprotect_flash(scsi_qla_host_t *vha)
return qla81xx_fac_do_write_enable(vha, 1);
/* Enable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
if (!ha->fdt_wrt_disable)
goto done;
@@ -1240,8 +1242,8 @@ qla24xx_protect_flash(scsi_qla_host_t *vha)
skip_wrt_protect:
/* Disable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
return QLA_SUCCESS;
}
@@ -1265,7 +1267,7 @@ qla24xx_erase_sector(scsi_qla_host_t *vha, uint32_t fdata)
}
static int
-qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr,
uint32_t dwords)
{
int ret;
@@ -1352,7 +1354,7 @@ next:
/* Slow write */
ret = qla24xx_write_flash_dword(ha,
- flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
+ flash_data_addr(ha, faddr), le32_to_cpu(*dwptr));
if (ret) {
ql_dbg(ql_dbg_user, vha, 0x7006,
"Failed slopw write %x (%x)\n", faddr, *dwptr);
@@ -1379,11 +1381,11 @@ qla2x00_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
uint32_t bytes)
{
uint32_t i;
- uint16_t *wptr;
+ __le16 *wptr;
struct qla_hw_data *ha = vha->hw;
/* Word reads to NVRAM via registers. */
- wptr = (uint16_t *)buf;
+ wptr = buf;
qla2x00_lock_nvram_access(ha);
for (i = 0; i < bytes >> 1; i++, naddr++)
wptr[i] = cpu_to_le16(qla2x00_get_nvram_word(ha,
@@ -1456,7 +1458,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
{
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- uint32_t *dwptr = buf;
+ __le32 *dwptr = buf;
uint32_t i;
int ret;
@@ -1466,9 +1468,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
return ret;
/* Enable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
/* Disable NVRAM write-protection. */
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0);
@@ -1478,7 +1480,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
naddr = nvram_data_addr(ha, naddr);
bytes >>= 2;
for (i = 0; i < bytes; i++, naddr++, dwptr++) {
- if (qla24xx_write_flash_dword(ha, naddr, cpu_to_le32(*dwptr))) {
+ if (qla24xx_write_flash_dword(ha, naddr, le32_to_cpu(*dwptr))) {
ql_dbg(ql_dbg_user, vha, 0x709a,
"Unable to program nvram address=%x data=%x.\n",
naddr, *dwptr);
@@ -1490,9 +1492,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr,
qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c);
/* Disable flash write. */
- WRT_REG_DWORD(&reg->ctrl_status,
- RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
- RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_dword(&reg->ctrl_status,
+ rd_reg_dword(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
+ rd_reg_dword(&reg->ctrl_status); /* PCI Posting. */
return ret;
}
@@ -1588,8 +1590,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
- gpio_enable = RD_REG_WORD(&reg->gpioe);
- gpio_data = RD_REG_WORD(&reg->gpiod);
+ gpio_enable = rd_reg_word(&reg->gpioe);
+ gpio_data = rd_reg_word(&reg->gpiod);
}
/* Set the modified gpio_enable values */
@@ -1598,8 +1600,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
- WRT_REG_WORD(&reg->gpioe, gpio_enable);
- RD_REG_WORD(&reg->gpioe);
+ wrt_reg_word(&reg->gpioe, gpio_enable);
+ rd_reg_word(&reg->gpioe);
}
qla2x00_flip_colors(ha, &led_color);
@@ -1614,8 +1616,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
- WRT_REG_WORD(&reg->gpiod, gpio_data);
- RD_REG_WORD(&reg->gpiod);
+ wrt_reg_word(&reg->gpiod, gpio_data);
+ rd_reg_word(&reg->gpiod);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1645,8 +1647,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
- gpio_enable = RD_REG_WORD(&reg->gpioe);
- gpio_data = RD_REG_WORD(&reg->gpiod);
+ gpio_enable = rd_reg_word(&reg->gpioe);
+ gpio_data = rd_reg_word(&reg->gpiod);
}
gpio_enable |= GPIO_LED_MASK;
@@ -1654,8 +1656,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
- WRT_REG_WORD(&reg->gpioe, gpio_enable);
- RD_REG_WORD(&reg->gpioe);
+ wrt_reg_word(&reg->gpioe, gpio_enable);
+ rd_reg_word(&reg->gpioe);
}
/* Clear out previously set LED colour. */
@@ -1663,8 +1665,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
if (ha->pio_address) {
WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
- WRT_REG_WORD(&reg->gpiod, gpio_data);
- RD_REG_WORD(&reg->gpiod);
+ wrt_reg_word(&reg->gpiod, gpio_data);
+ rd_reg_word(&reg->gpiod);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1731,13 +1733,13 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
/* Save the Original GPIOD. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Enable the gpio_data reg for update. */
gpio_data |= GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Set the color bits. */
qla24xx_flip_colors(ha, &led_color);
@@ -1749,8 +1751,8 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
gpio_data |= led_color;
/* Set the modified gpio_data values. */
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ gpio_data = rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1881,12 +1883,12 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
goto skip_gpio;
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Enable the gpio_data reg for update. */
gpio_data |= GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -1929,12 +1931,12 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
/* Give control back to firmware. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- gpio_data = RD_REG_DWORD(&reg->gpiod);
+ gpio_data = rd_reg_dword(&reg->gpiod);
/* Disable the gpio_data reg for update. */
gpio_data &= ~GPDX_LED_UPDATE_MASK;
- WRT_REG_DWORD(&reg->gpiod, gpio_data);
- RD_REG_DWORD(&reg->gpiod);
+ wrt_reg_dword(&reg->gpiod, gpio_data);
+ rd_reg_dword(&reg->gpiod);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
set_fw_options:
@@ -1970,10 +1972,10 @@ qla2x00_flash_enable(struct qla_hw_data *ha)
uint16_t data;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
data |= CSR_FLASH_ENABLE;
- WRT_REG_WORD(&reg->ctrl_status, data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/**
@@ -1986,10 +1988,10 @@ qla2x00_flash_disable(struct qla_hw_data *ha)
uint16_t data;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- data = RD_REG_WORD(&reg->ctrl_status);
+ data = rd_reg_word(&reg->ctrl_status);
data &= ~(CSR_FLASH_ENABLE);
- WRT_REG_WORD(&reg->ctrl_status, data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/**
@@ -2008,7 +2010,7 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
uint16_t bank_select;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- bank_select = RD_REG_WORD(&reg->ctrl_status);
+ bank_select = rd_reg_word(&reg->ctrl_status);
if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
/* Specify 64K address range: */
@@ -2016,11 +2018,11 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
bank_select &= ~0xf8;
bank_select |= addr >> 12 & 0xf0;
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- data = RD_REG_WORD(&reg->flash_data);
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ data = rd_reg_word(&reg->flash_data);
return (uint8_t)data;
}
@@ -2028,13 +2030,13 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
/* Setup bit 16 of flash address. */
if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
} else if (((addr & BIT_16) == 0) &&
(bank_select & CSR_FLASH_64K_BANK)) {
bank_select &= ~(CSR_FLASH_64K_BANK);
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/* Always perform IO mapped accesses to the FLASH registers. */
@@ -2049,7 +2051,7 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
} while (data != data2);
} else {
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
data = qla2x00_debounce_register(&reg->flash_data);
}
@@ -2068,20 +2070,20 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
uint16_t bank_select;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- bank_select = RD_REG_WORD(&reg->ctrl_status);
+ bank_select = rd_reg_word(&reg->ctrl_status);
if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
/* Specify 64K address range: */
/* clear out Module Select and Flash Address bits [19:16]. */
bank_select &= ~0xf8;
bank_select |= addr >> 12 & 0xf0;
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_data, (uint16_t)data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
return;
}
@@ -2089,13 +2091,13 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
/* Setup bit 16 of flash address. */
if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) {
bank_select |= CSR_FLASH_64K_BANK;
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
} else if (((addr & BIT_16) == 0) &&
(bank_select & CSR_FLASH_64K_BANK)) {
bank_select &= ~(CSR_FLASH_64K_BANK);
- WRT_REG_WORD(&reg->ctrl_status, bank_select);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->ctrl_status, bank_select);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
/* Always perform IO mapped accesses to the FLASH registers. */
@@ -2103,10 +2105,10 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data);
} else {
- WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
- WRT_REG_WORD(&reg->flash_data, (uint16_t)data);
- RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_address, (uint16_t)addr);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
+ wrt_reg_word(&reg->flash_data, (uint16_t)data);
+ rd_reg_word(&reg->ctrl_status); /* PCI Posting. */
}
}
@@ -2289,12 +2291,12 @@ qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
midpoint = length / 2;
- WRT_REG_WORD(&reg->nvram, 0);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, 0);
+ rd_reg_word(&reg->nvram);
for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) {
if (ilength == midpoint) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram);
}
data = qla2x00_read_flash_byte(ha, saddr);
if (saddr % 100)
@@ -2319,11 +2321,11 @@ qla2x00_suspend_hba(struct scsi_qla_host *vha)
/* Pause RISC. */
spin_lock_irqsave(&ha->hardware_lock, flags);
- WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
- RD_REG_WORD(&reg->hccr);
+ wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
+ rd_reg_word(&reg->hccr);
if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
for (cnt = 0; cnt < 30000; cnt++) {
- if ((RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
+ if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
break;
udelay(100);
}
@@ -2362,12 +2364,12 @@ qla2x00_read_optrom_data(struct scsi_qla_host *vha, void *buf,
midpoint = ha->optrom_size / 2;
qla2x00_flash_enable(ha);
- WRT_REG_WORD(&reg->nvram, 0);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, 0);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
for (addr = offset, data = buf; addr < length; addr++, data++) {
if (addr == midpoint) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram); /* PCI Posting. */
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram); /* PCI Posting. */
}
*data = qla2x00_read_flash_byte(ha, addr);
@@ -2399,7 +2401,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf,
sec_number = 0;
/* Reset ISP chip. */
- WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
+ wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
/* Go with write. */
@@ -2548,8 +2550,8 @@ update_flash:
}
}
} else if (addr == ha->optrom_size / 2) {
- WRT_REG_WORD(&reg->nvram, NVR_SELECT);
- RD_REG_WORD(&reg->nvram);
+ wrt_reg_word(&reg->nvram, NVR_SELECT);
+ rd_reg_word(&reg->nvram);
}
if (flash_id == 0xda && man_id == 0xc1) {
@@ -2610,7 +2612,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf,
set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
/* Go with read. */
- qla24xx_read_flash_data(vha, (void *)buf, offset >> 2, length >> 2);
+ qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2);
/* Resume HBA. */
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
@@ -2662,7 +2664,7 @@ qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start,
cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
for (; cnt; cnt--, flt_reg++) {
- if (flt_reg->start == start) {
+ if (le32_to_cpu(flt_reg->start) == start) {
memcpy((uint8_t *)region, flt_reg,
sizeof(struct qla_flt_region));
rval = QLA_SUCCESS;
@@ -2691,7 +2693,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
struct qla_flt_region region;
bool reset_to_rom = false;
uint32_t risc_size, risc_attr = 0;
- uint32_t *fw_array = NULL;
+ __be32 *fw_array = NULL;
/* Retrieve region info - must be a start address passed in */
rval = qla28xx_get_flash_region(vha, offset, &region);
@@ -2722,12 +2724,12 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
"Region %x is secure\n", region.code);
- switch (region.code) {
+ switch (le16_to_cpu(region.code)) {
case FLT_REG_FW:
case FLT_REG_FW_SEC_27XX:
case FLT_REG_MPI_PRI_28XX:
case FLT_REG_MPI_SEC_28XX:
- fw_array = dwptr;
+ fw_array = (__force __be32 *)dwptr;
/* 1st fw array */
risc_size = be32_to_cpu(fw_array[3]);
@@ -2761,7 +2763,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
case FLT_REG_PEP_PRI_28XX:
case FLT_REG_PEP_SEC_28XX:
- fw_array = dwptr;
+ fw_array = (__force __be32 *)dwptr;
/* 1st fw array */
risc_size = be32_to_cpu(fw_array[3]);
@@ -2892,7 +2894,8 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
if (region.attribute && buf_size_without_sfub) {
ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff,
"Sending Secure Flash MB Cmd\n");
- rval = qla28xx_secure_flash_update(vha, 0, region.code,
+ rval = qla28xx_secure_flash_update(vha, 0,
+ le16_to_cpu(region.code),
buf_size_without_sfub, sfub_dma,
sizeof(struct secure_flash_update_block) >> 2);
if (rval != QLA_SUCCESS) {
@@ -2981,11 +2984,11 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, void *buf,
/* Go with write. */
if (IS_QLA28XX(ha))
- rval = qla28xx_write_flash_data(vha, (uint32_t *)buf,
- offset >> 2, length >> 2);
+ rval = qla28xx_write_flash_data(vha, buf, offset >> 2,
+ length >> 2);
else
- rval = qla24xx_write_flash_data(vha, (uint32_t *)buf,
- offset >> 2, length >> 2);
+ rval = qla24xx_write_flash_data(vha, buf, offset >> 2,
+ length >> 2);
clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
scsi_unblock_requests(vha->host);
@@ -3513,7 +3516,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32);
} else {
for (i = 0; i < 4; i++)
- ha->fw_revision[i] = be32_to_cpu(dcode[4+i]);
+ ha->fw_revision[i] =
+ be32_to_cpu((__force __be32)dcode[4+i]);
ql_dbg(ql_dbg_init, vha, 0x0060,
"Firmware revision (flash) %u.%u.%u (%x).\n",
ha->fw_revision[0], ha->fw_revision[1],
@@ -3528,7 +3532,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version));
faddr = ha->flt_region_gold_fw;
- qla24xx_read_flash_data(vha, (void *)dcode, ha->flt_region_gold_fw, 8);
+ qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8);
if (qla24xx_risc_firmware_invalid(dcode)) {
ql_log(ql_log_warn, vha, 0x0056,
"Unrecognized golden fw at %#x.\n", faddr);
@@ -3537,7 +3541,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
}
for (i = 0; i < 4; i++)
- ha->gold_fw_version[i] = be32_to_cpu(dcode[4+i]);
+ ha->gold_fw_version[i] =
+ be32_to_cpu((__force __be32)dcode[4+i]);
return ret;
}
@@ -3617,7 +3622,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
/* read remaining FCP CMD config data from flash */
fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2);
- len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE;
+ len = ha->fcp_prio_cfg->num_entries * sizeof(struct qla_fcp_prio_entry);
max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE;
ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0],
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 622e7337affc..fbb80a043b4f 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -378,7 +378,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
qlt_issue_marker(vha, ha_locked);
if ((entry->u.isp24.vp_index != 0xFF) &&
- (entry->u.isp24.nport_handle != 0xFFFF)) {
+ (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
host = qlt_find_host_by_vp_idx(vha,
entry->u.isp24.vp_index);
if (unlikely(!host)) {
@@ -1697,7 +1697,7 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
@@ -1725,7 +1725,8 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
struct scsi_qla_host *vha = mcmd->vha;
struct qla_hw_data *ha = vha->hw;
struct abts_resp_to_24xx *resp;
- uint32_t f_ctl, h;
+ __le32 f_ctl;
+ uint32_t h;
uint8_t *p;
int rc;
struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
@@ -1782,7 +1783,7 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
resp->payload.ba_acct.low_seq_cnt = 0x0000;
- resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
} else {
@@ -1814,7 +1815,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
struct scsi_qla_host *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
struct abts_resp_to_24xx *resp;
- uint32_t f_ctl;
+ __le32 f_ctl;
uint8_t *p;
ql_dbg(ql_dbg_tgt, vha, 0xe006,
@@ -1857,7 +1858,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
resp->payload.ba_acct.low_seq_cnt = 0x0000;
- resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
} else {
@@ -2030,7 +2031,7 @@ static void qlt_do_tmr_work(struct work_struct *work)
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
- tag = mcmd->orig_iocb.abts.exchange_addr_to_abort;
+ tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort);
break;
default:
tag = 0;
@@ -2110,7 +2111,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct qla_tgt_cmd *abort_cmd;
abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
- abts->exchange_addr_to_abort);
+ le32_to_cpu(abts->exchange_addr_to_abort));
if (abort_cmd && abort_cmd->qpair) {
mcmd->qpair = abort_cmd->qpair;
mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
@@ -2133,7 +2134,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
{
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess;
- uint32_t tag = abts->exchange_addr_to_abort;
+ uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort);
be_id_t s_id;
int rc;
unsigned long flags;
@@ -2223,7 +2224,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
ctio->entry_type = CTIO_TYPE7;
ctio->entry_count = 1;
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
- ctio->nport_handle = mcmd->sess->loop_id;
+ ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id);
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = ha->vp_idx;
ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -2280,7 +2281,7 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
ctio->entry_type = CTIO_TYPE7;
ctio->entry_count = 1;
ctio->handle = QLA_TGT_SKIP_HANDLE;
- ctio->nport_handle = cmd->sess->loop_id;
+ ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = vha->vp_idx;
ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -2484,7 +2485,7 @@ static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
if (req->cnt < (req_cnt + 2)) {
cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
- RD_REG_DWORD_RELAXED(req->req_q_out));
+ rd_reg_dword_relaxed(req->req_q_out));
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -2840,10 +2841,14 @@ skip_explict_conf:
cpu_to_le16(SS_SENSE_LEN_VALID);
ctio->u.status1.sense_length =
cpu_to_le16(prm->sense_buffer_len);
- for (i = 0; i < prm->sense_buffer_len/4; i++)
- ((uint32_t *)ctio->u.status1.sense_data)[i] =
- cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+ for (i = 0; i < prm->sense_buffer_len/4; i++) {
+ uint32_t v;
+ v = get_unaligned_be32(
+ &((uint32_t *)prm->sense_buffer)[i]);
+ put_unaligned_le32(v,
+ &((uint32_t *)ctio->u.status1.sense_data)[i]);
+ }
qlt_print_dif_err(prm);
} else {
@@ -3114,7 +3119,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
- pkt->dseg_count = prm->tot_dsds;
+ pkt->dseg_count = cpu_to_le16(prm->tot_dsds);
/* Fibre channel byte count */
pkt->transfer_length = cpu_to_le32(transfer_length);
@@ -3136,7 +3141,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
- pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+ pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
if (!bundling) {
cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
@@ -3573,7 +3578,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
- __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
}
/* terminate */
@@ -3647,7 +3652,7 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
+ ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -3885,7 +3890,7 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return NULL;
}
- cmd = (void *) req->outstanding_cmds[h];
+ cmd = req->outstanding_cmds[h];
if (unlikely(cmd == NULL)) {
ql_dbg(ql_dbg_async, vha, 0xe053,
"qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
@@ -4110,7 +4115,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
spin_lock_init(&cmd->cmd_lock);
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
- cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
+ cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
if (atio->u.isp24.fcp_cmnd.rddata &&
atio->u.isp24.fcp_cmnd.wrdata) {
@@ -5302,7 +5307,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
ctio24 = (struct ctio7_to_24xx *)pkt;
ctio24->entry_type = CTIO_TYPE7;
- ctio24->nport_handle = sess->loop_id;
+ ctio24->nport_handle = cpu_to_le16(sess->loop_id);
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
@@ -5315,13 +5320,14 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
* CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
* if the explicit conformation is used.
*/
- ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ ctio24->u.status1.ox_id =
+ cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
ctio24->u.status1.scsi_status = cpu_to_le16(status);
- ctio24->u.status1.residual = get_datalen_for_atio(atio);
+ ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
if (ctio24->u.status1.residual != 0)
- ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+ ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER);
/* Memory Barrier */
wmb();
@@ -5550,7 +5556,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
switch (atio->u.raw.entry_type) {
case ATIO_TYPE7:
if (unlikely(atio->u.isp24.exchange_addr ==
- ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+ cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) {
ql_dbg(ql_dbg_io, vha, 0x3065,
"qla_target(%d): ATIO_TYPE7 "
"received with UNKNOWN exchange address, "
@@ -5670,9 +5676,9 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
if (qpair == ha->base_qpair)
- ha->isp_ops->fw_dump(vha, 1);
+ ha->isp_ops->fw_dump(vha);
else
- ha->isp_ops->fw_dump(vha, 0);
+ qla2xxx_dump_fw(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -5713,8 +5719,8 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
entry->compl_status);
if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
- if ((entry->error_subcode1 == 0x1E) &&
- (entry->error_subcode2 == 0)) {
+ if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
+ le32_to_cpu(entry->error_subcode2) == 0) {
if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
ha->tgt.tgt_ops->free_mcmd(mcmd);
return;
@@ -5928,11 +5934,10 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
"qla_target(%d): Async LOOP_UP occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
if (tgt->link_reinit_iocb_pending) {
qlt_send_notify_ack(ha->base_qpair,
- (void *)&tgt->link_reinit_iocb,
+ &tgt->link_reinit_iocb,
0, 0, 0, 0, 0, 0);
tgt->link_reinit_iocb_pending = 0;
}
@@ -5946,18 +5951,16 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
"qla_target(%d): Async event %#x occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
break;
case MBA_REJECTED_FCP_CMD:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
"qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
vha->vp_idx,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
- if (le16_to_cpu(mailbox[3]) == 1) {
+ if (mailbox[3] == 1) {
/* exchange starvation. */
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
@@ -5981,10 +5984,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
"qla_target(%d): Port update async event %#x "
"occurred: updating the ports database (m[0]=%x, m[1]=%x, "
"m[2]=%x, m[3]=%x)", vha->vp_idx, code,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
- login_code = le16_to_cpu(mailbox[2]);
+ login_code = mailbox[2];
if (login_code == 0x4) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
"Async MB 2: Got PLOGI Complete\n");
@@ -6661,9 +6663,14 @@ static void qlt_disable_vha(struct scsi_qla_host *vha)
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+
+ /*
+ * We are expecting the offline state.
+ * QLA_FUNCTION_FAILED means that adapter is offline.
+ */
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
ql_dbg(ql_dbg_tgt, vha, 0xe081,
- "qla2x00_wait_for_hba_online() failed\n");
+ "adapter is offline\n");
}
/*
@@ -6729,7 +6736,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha)
return;
for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
- pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
pkt++;
}
@@ -6764,7 +6771,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
"corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
&pkt->u.isp24.fcp_hdr.s_id,
be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
- le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+ pkt->u.isp24.exchange_addr, pkt);
adjust_corrupted_atio(pkt);
qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
@@ -6782,14 +6789,14 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
} else
ha->tgt.atio_ring_ptr++;
- pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
}
wmb();
}
/* Adjust ring index */
- WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+ wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
}
void
@@ -6802,19 +6809,19 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
if (!QLA_TGT_MODE_ENABLED())
return;
- WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
- WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
- RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+ wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
+ wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
+ rd_reg_dword(ISP_ATIO_Q_OUT(vha));
if (ha->flags.msix_enabled) {
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
if (IS_QLA2071(ha)) {
/* 4 ports Baker: Enable Interrupt Handshake */
icb->msix_atio = 0;
- icb->firmware_options_2 |= BIT_26;
+ icb->firmware_options_2 |= cpu_to_le32(BIT_26);
} else {
icb->msix_atio = cpu_to_le16(msix->entry);
- icb->firmware_options_2 &= ~BIT_26;
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
}
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
@@ -6824,7 +6831,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
/* INTx|MSI */
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
icb->msix_atio = 0;
- icb->firmware_options_2 |= BIT_26;
+ icb->firmware_options_2 |= cpu_to_le32(BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"%s: Use INTx for ATIOQ.\n", __func__);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 3cf8590feeac..010f12523b2a 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -135,37 +135,37 @@ struct nack_to_isp {
uint8_t entry_status; /* Entry Status. */
union {
struct {
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
uint8_t target_id;
uint8_t reserved_1;
- uint16_t flags;
- uint16_t resp_code;
- uint16_t status;
- uint16_t task_flags;
- uint16_t seq_id;
- uint16_t srr_rx_id;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_flags;
- uint16_t srr_reject_code;
+ __le16 flags;
+ __le16 resp_code;
+ __le16 status;
+ __le16 task_flags;
+ __le16 seq_id;
+ __le16 srr_rx_id;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_flags;
+ __le16 srr_reject_code;
uint8_t srr_reject_vendor_uniq;
uint8_t srr_reject_code_expl;
uint8_t reserved_2[24];
} isp2x;
struct {
uint32_t handle;
- uint16_t nport_handle;
+ __le16 nport_handle;
uint16_t reserved_1;
- uint16_t flags;
- uint16_t srr_rx_id;
- uint16_t status;
+ __le16 flags;
+ __le16 srr_rx_id;
+ __le16 status;
uint8_t status_subcode;
uint8_t fw_handle;
- uint32_t exchange_address;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_flags;
+ __le32 exchange_address;
+ __le32 srr_rel_offs;
+ __le16 srr_ui;
+ __le16 srr_flags;
uint8_t reserved_4[19];
uint8_t vp_index;
uint8_t srr_reject_vendor_uniq;
@@ -175,7 +175,7 @@ struct nack_to_isp {
} isp24;
} u;
uint8_t reserved[2];
- uint16_t ox_id;
+ __le16 ox_id;
} __packed;
#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
@@ -206,16 +206,16 @@ struct ctio_to_2xxx {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
target_id_t target;
- uint16_t rx_id;
- uint16_t flags;
- uint16_t status;
- uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
- uint16_t dseg_count; /* Data segment count. */
- uint32_t relative_offset;
- uint32_t residual;
- uint16_t reserved_1[3];
- uint16_t scsi_status;
- uint32_t transfer_length;
+ __le16 rx_id;
+ __le16 flags;
+ __le16 status;
+ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ __le16 dseg_count; /* Data segment count. */
+ __le32 relative_offset;
+ __le32 residual;
+ __le16 reserved_1[3];
+ __le16 scsi_status;
+ __le32 transfer_length;
struct dsd32 dsd[3];
} __packed;
#define ATIO_PATH_INVALID 0x07
@@ -257,7 +257,7 @@ struct fcp_hdr {
uint16_t seq_cnt;
__be16 ox_id;
uint16_t rx_id;
- uint32_t parameter;
+ __le32 parameter;
} __packed;
struct fcp_hdr_le {
@@ -267,12 +267,12 @@ struct fcp_hdr_le {
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
- uint16_t seq_cnt;
+ __le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- uint16_t rx_id;
- uint16_t ox_id;
- uint32_t parameter;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 parameter;
} __packed;
#define F_CTL_EXCH_CONTEXT_RESP BIT_23
@@ -306,7 +306,7 @@ struct atio7_fcp_cmnd {
* BUILD_BUG_ON in qlt_init().
*/
uint8_t add_cdb[4];
- /* uint32_t data_length; */
+ /* __le32 data_length; */
} __packed;
/*
@@ -316,31 +316,31 @@ struct atio7_fcp_cmnd {
struct atio_from_isp {
union {
struct {
- uint16_t entry_hdr;
+ __le16 entry_hdr;
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
- uint32_t sys_define_2; /* System defined. */
+ __le32 sys_define_2; /* System defined. */
target_id_t target;
- uint16_t rx_id;
- uint16_t flags;
- uint16_t status;
+ __le16 rx_id;
+ __le16 flags;
+ __le16 status;
uint8_t command_ref;
uint8_t task_codes;
uint8_t task_flags;
uint8_t execution_codes;
uint8_t cdb[MAX_CMDSZ];
- uint32_t data_length;
- uint16_t lun;
+ __le32 data_length;
+ __le16 lun;
uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
- uint16_t reserved_32[6];
- uint16_t ox_id;
+ __le16 reserved_32[6];
+ __le16 ox_id;
} isp2x;
struct {
- uint16_t entry_hdr;
+ __le16 entry_hdr;
uint8_t fcp_cmnd_len_low;
uint8_t fcp_cmnd_len_high:4;
uint8_t attr:4;
- uint32_t exchange_addr;
+ __le32 exchange_addr;
#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
struct fcp_hdr fcp_hdr;
struct atio7_fcp_cmnd fcp_cmnd;
@@ -352,7 +352,7 @@ struct atio_from_isp {
#define FCP_CMD_LENGTH_MASK 0x0fff
#define FCP_CMD_LENGTH_MIN 0x38
uint8_t data[56];
- uint32_t signature;
+ __le32 signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
} raw;
} u;
@@ -395,36 +395,36 @@ struct ctio7_to_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
- uint16_t nport_handle;
+ __le16 nport_handle;
#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
- uint16_t timeout;
- uint16_t dseg_count; /* Data segment count. */
+ __le16 timeout;
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t add_flags;
le_id_t initiator_id;
uint8_t reserved;
- uint32_t exchange_addr;
+ __le32 exchange_addr;
union {
struct {
- uint16_t reserved1;
+ __le16 reserved1;
__le16 flags;
- uint32_t residual;
+ __le32 residual;
__le16 ox_id;
- uint16_t scsi_status;
- uint32_t relative_offset;
- uint32_t reserved2;
- uint32_t transfer_length;
- uint32_t reserved3;
+ __le16 scsi_status;
+ __le32 relative_offset;
+ __le32 reserved2;
+ __le32 transfer_length;
+ __le32 reserved3;
struct dsd64 dsd;
} status0;
struct {
- uint16_t sense_length;
+ __le16 sense_length;
__le16 flags;
- uint32_t residual;
+ __le32 residual;
__le16 ox_id;
- uint16_t scsi_status;
- uint16_t response_len;
- uint16_t reserved;
+ __le16 scsi_status;
+ __le16 response_len;
+ __le16 reserved;
uint8_t sense_data[24];
} status1;
} u;
@@ -440,18 +440,18 @@ struct ctio7_from_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System defined handle */
- uint16_t status;
- uint16_t timeout;
- uint16_t dseg_count; /* Data segment count. */
+ __le16 status;
+ __le16 timeout;
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t reserved1[5];
- uint32_t exchange_address;
- uint16_t reserved2;
- uint16_t flags;
- uint32_t residual;
- uint16_t ox_id;
- uint16_t reserved3;
- uint32_t relative_offset;
+ __le32 exchange_address;
+ __le16 reserved2;
+ __le16 flags;
+ __le32 residual;
+ __le16 ox_id;
+ __le16 reserved3;
+ __le32 relative_offset;
uint8_t reserved4[24];
} __packed;
@@ -489,29 +489,29 @@ struct ctio_crc2_to_fw {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t nport_handle; /* N_PORT handle. */
+ __le16 nport_handle; /* N_PORT handle. */
__le16 timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
+ __le16 dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t add_flags; /* additional flags */
#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
le_id_t initiator_id; /* initiator ID */
uint8_t reserved1;
- uint32_t exchange_addr; /* rcv exchange address */
- uint16_t reserved2;
+ __le32 exchange_addr; /* rcv exchange address */
+ __le16 reserved2;
__le16 flags; /* refer to CTIO7 flags values */
- uint32_t residual;
+ __le32 residual;
__le16 ox_id;
- uint16_t scsi_status;
+ __le16 scsi_status;
__le32 relative_offset;
- uint32_t reserved5;
+ __le32 reserved5;
__le32 transfer_length; /* total fc transfer length */
- uint32_t reserved6;
+ __le32 reserved6;
__le64 crc_context_address __packed; /* Data segment address. */
- uint16_t crc_context_len; /* Data segment length. */
- uint16_t reserved_1; /* MUST be set to 0. */
+ __le16 crc_context_len; /* Data segment length. */
+ __le16 reserved_1; /* MUST be set to 0. */
};
/* CTIO Type CRC_x Status IOCB */
@@ -522,20 +522,20 @@ struct ctio_crc_from_fw {
uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */
- uint16_t status;
- uint16_t timeout; /* Command timeout. */
- uint16_t dseg_count; /* Data segment count. */
- uint32_t reserved1;
- uint16_t state_flags;
+ __le16 status;
+ __le16 timeout; /* Command timeout. */
+ __le16 dseg_count; /* Data segment count. */
+ __le32 reserved1;
+ __le16 state_flags;
#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
- uint32_t exchange_address; /* rcv exchange address */
- uint16_t reserved2;
- uint16_t flags;
- uint32_t resid_xfer_length;
- uint16_t ox_id;
+ __le32 exchange_address; /* rcv exchange address */
+ __le16 reserved2;
+ __le16 flags;
+ __le32 resid_xfer_length;
+ __le16 ox_id;
uint8_t reserved3[12];
- uint16_t runt_guard; /* reported runt blk guard */
+ __le16 runt_guard; /* reported runt blk guard */
uint8_t actual_dif[8];
uint8_t expected_dif[8];
} __packed;
@@ -558,29 +558,29 @@ struct abts_recv_from_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint8_t reserved_1[6];
- uint16_t nport_handle;
+ __le16 nport_handle;
uint8_t reserved_2[2];
uint8_t vp_index;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
uint8_t reserved_4[16];
- uint32_t exchange_addr_to_abort;
+ __le32 exchange_addr_to_abort;
} __packed;
#define ABTS_PARAM_ABORT_SEQ BIT_0
struct ba_acc_le {
- uint16_t reserved;
+ __le16 reserved;
uint8_t seq_id_last;
uint8_t seq_id_valid;
#define SEQ_ID_VALID 0x80
#define SEQ_ID_INVALID 0x00
- uint16_t rx_id;
- uint16_t ox_id;
- uint16_t high_seq_cnt;
- uint16_t low_seq_cnt;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le16 high_seq_cnt;
+ __le16 low_seq_cnt;
} __packed;
struct ba_rjt_le {
@@ -604,21 +604,21 @@ struct abts_resp_to_24xx {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle;
- uint16_t reserved_1;
- uint16_t nport_handle;
- uint16_t control_flags;
+ __le16 reserved_1;
+ __le16 nport_handle;
+ __le16 control_flags;
#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
uint8_t vp_index;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
union {
struct ba_acc_le ba_acct;
struct ba_rjt_le ba_rjt;
} __packed payload;
- uint32_t reserved_4;
- uint32_t exchange_addr_to_abort;
+ __le32 reserved_4;
+ __le32 exchange_addr_to_abort;
} __packed;
/*
@@ -634,21 +634,21 @@ struct abts_resp_from_24xx_fw {
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
uint32_t handle;
- uint16_t compl_status;
+ __le16 compl_status;
#define ABTS_RESP_COMPL_SUCCESS 0
#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
- uint16_t nport_handle;
- uint16_t reserved_1;
+ __le16 nport_handle;
+ __le16 reserved_1;
uint8_t reserved_2;
uint8_t reserved_3:4;
uint8_t sof_type:4;
- uint32_t exchange_address;
+ __le32 exchange_address;
struct fcp_hdr_le fcp_hdr_le;
uint8_t reserved_4[8];
- uint32_t error_subcode1;
+ __le32 error_subcode1;
#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
- uint32_t error_subcode2;
- uint32_t exchange_addr_to_abort;
+ __le32 error_subcode2;
+ __le32 exchange_addr_to_abort;
} __packed;
/********************************************************************\
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 6aeb1c3fb7a8..8dc82cfd38b2 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -12,6 +12,33 @@
#define IOBASE(vha) IOBAR(ISPREG(vha))
#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
+/* hardware_lock assumed held. */
+static void
+qla27xx_write_remote_reg(struct scsi_qla_host *vha,
+ u32 addr, u32 data)
+{
+ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+ ql_dbg(ql_dbg_misc, vha, 0xd300,
+ "%s: addr/data = %xh/%xh\n", __func__, addr, data);
+
+ wrt_reg_dword(&reg->iobase_addr, 0x40);
+ wrt_reg_dword(&reg->iobase_c4, data);
+ wrt_reg_dword(&reg->iobase_window, addr);
+}
+
+void
+qla27xx_reset_mpi(scsi_qla_host_t *vha)
+{
+ ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd301,
+ "Entered %s.\n", __func__);
+
+ qla27xx_write_remote_reg(vha, 0x104050, 0x40004);
+ qla27xx_write_remote_reg(vha, 0x10405c, 0x4);
+
+ vha->hw->stat.num_mpi_reset++;
+}
+
static inline void
qla27xx_insert16(uint16_t value, void *buf, ulong *len)
{
@@ -48,7 +75,7 @@ qla27xx_read8(void __iomem *window, void *buf, ulong *len)
uint8_t value = ~0;
if (buf) {
- value = RD_REG_BYTE(window);
+ value = rd_reg_byte(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -59,7 +86,7 @@ qla27xx_read16(void __iomem *window, void *buf, ulong *len)
uint16_t value = ~0;
if (buf) {
- value = RD_REG_WORD(window);
+ value = rd_reg_word(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -70,7 +97,7 @@ qla27xx_read32(void __iomem *window, void *buf, ulong *len)
uint32_t value = ~0;
if (buf) {
- value = RD_REG_DWORD(window);
+ value = rd_reg_dword(window);
}
qla27xx_insert32(value, buf, len);
}
@@ -99,7 +126,7 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
if (buf) {
void __iomem *window = (void __iomem *)reg + offset;
- WRT_REG_DWORD(window, data);
+ wrt_reg_dword(window, data);
}
}
@@ -892,9 +919,9 @@ static void
qla27xx_firmware_info(struct scsi_qla_host *vha,
struct qla27xx_fwdt_template *tmp)
{
- tmp->firmware_version[0] = vha->hw->fw_major_version;
- tmp->firmware_version[1] = vha->hw->fw_minor_version;
- tmp->firmware_version[2] = vha->hw->fw_subminor_version;
+ tmp->firmware_version[0] = cpu_to_le32(vha->hw->fw_major_version);
+ tmp->firmware_version[1] = cpu_to_le32(vha->hw->fw_minor_version);
+ tmp->firmware_version[2] = cpu_to_le32(vha->hw->fw_subminor_version);
tmp->firmware_version[3] = cpu_to_le32(
vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes);
tmp->firmware_version[4] = cpu_to_le32(
@@ -998,14 +1025,65 @@ qla27xx_fwdt_template_valid(void *p)
}
void
-qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
+qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
{
ulong flags = 0;
+ bool need_mpi_reset = true;
#ifndef __CHECKER__
if (!hardware_locked)
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
#endif
+ if (!vha->hw->mpi_fw_dump) {
+ ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n");
+ } else if (vha->hw->mpi_fw_dumped) {
+ ql_log(ql_log_warn, vha, 0x02f4,
+ "-> MPI firmware already dumped (%p) -- ignoring request\n",
+ vha->hw->mpi_fw_dump);
+ } else {
+ struct fwdt *fwdt = &vha->hw->fwdt[1];
+ ulong len;
+ void *buf = vha->hw->mpi_fw_dump;
+
+ ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n");
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0x02f6,
+ "-> fwdt1 no template\n");
+ goto bailout;
+ }
+ len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
+ if (len == 0) {
+ goto bailout;
+ } else if (len != fwdt->dump_size) {
+ ql_log(ql_log_warn, vha, 0x02f7,
+ "-> fwdt1 fwdump residual=%+ld\n",
+ fwdt->dump_size - len);
+ } else {
+ need_mpi_reset = false;
+ }
+
+ vha->hw->mpi_fw_dump_len = len;
+ vha->hw->mpi_fw_dumped = 1;
+
+ ql_log(ql_log_warn, vha, 0x02f8,
+ "-> MPI firmware dump saved to buffer (%lu/%p)\n",
+ vha->host_no, vha->hw->mpi_fw_dump);
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+ }
+
+bailout:
+ if (need_mpi_reset)
+ qla27xx_reset_mpi(vha);
+#ifndef __CHECKER__
+ if (!hardware_locked)
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+#endif
+}
+
+void
+qla27xx_fwdump(scsi_qla_host_t *vha)
+{
+ lockdep_assert_held(&vha->hw->hardware_lock);
if (!vha->hw->fw_dump) {
ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n");
@@ -1015,42 +1093,30 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
vha->hw->fw_dump);
} else {
struct fwdt *fwdt = vha->hw->fwdt;
- uint j;
ulong len;
void *buf = vha->hw->fw_dump;
- uint count = vha->hw->fw_dump_mpi ? 2 : 1;
-
- for (j = 0; j < count; j++, fwdt++, buf += len) {
- ql_log(ql_log_warn, vha, 0xd011,
- "-> fwdt%u running...\n", j);
- if (!fwdt->template) {
- ql_log(ql_log_warn, vha, 0xd012,
- "-> fwdt%u no template\n", j);
- break;
- }
- len = qla27xx_execute_fwdt_template(vha,
- fwdt->template, buf);
- if (len == 0) {
- goto bailout;
- } else if (len != fwdt->dump_size) {
- ql_log(ql_log_warn, vha, 0xd013,
- "-> fwdt%u fwdump residual=%+ld\n",
- j, fwdt->dump_size - len);
- }
+
+ ql_log(ql_log_warn, vha, 0xd011, "-> fwdt0 running...\n");
+ if (!fwdt->template) {
+ ql_log(ql_log_warn, vha, 0xd012,
+ "-> fwdt0 no template\n");
+ return;
}
- vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump;
- vha->hw->fw_dumped = 1;
+ len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf);
+ if (len == 0) {
+ return;
+ } else if (len != fwdt->dump_size) {
+ ql_log(ql_log_warn, vha, 0xd013,
+ "-> fwdt0 fwdump residual=%+ld\n",
+ fwdt->dump_size - len);
+ }
+
+ vha->hw->fw_dump_len = len;
+ vha->hw->fw_dumped = true;
ql_log(ql_log_warn, vha, 0xd015,
"-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags);
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
-
-bailout:
- vha->hw->fw_dump_mpi = 0;
-#ifndef __CHECKER__
- if (!hardware_locked)
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
-#endif
}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index bba8dc90acfb..89280b3477aa 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -27,7 +27,7 @@ struct __packed qla27xx_fwdt_template {
uint32_t saved_state[16];
uint32_t reserved_3[8];
- uint32_t firmware_version[5];
+ __le32 firmware_version[5];
};
#define TEMPLATE_TYPE_FWDUMP 99
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 1f0a185b2a95..68183a96a417 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -949,6 +949,7 @@ static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item,
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
return count;
@@ -1111,6 +1112,7 @@ static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item,
atomic_set(&tpg->lport_tpg_enabled, 0);
qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ qlt_stop_phase2(vha->vha_tgt.qla_tgt);
}
return count;
@@ -1958,6 +1960,20 @@ static int __init tcm_qla2xxx_init(void)
{
int ret;
+ BUILD_BUG_ON(sizeof(struct abts_recv_from_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct abts_resp_from_24xx_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct atio7_fcp_cmnd) != 32);
+ BUILD_BUG_ON(sizeof(struct atio_from_isp) != 64);
+ BUILD_BUG_ON(sizeof(struct ba_acc_le) != 12);
+ BUILD_BUG_ON(sizeof(struct ba_rjt_le) != 4);
+ BUILD_BUG_ON(sizeof(struct ctio7_from_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_crc_from_fw) != 64);
+ BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
+ BUILD_BUG_ON(sizeof(struct fcp_hdr_le) != 24);
+ BUILD_BUG_ON(sizeof(struct nack_to_isp) != 64);
+
ret = tcm_qla2xxx_register_configfs();
if (ret < 0)
return ret;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5504ab11decc..5dc697ce8b5d 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -966,7 +966,7 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
"%s: No such sysfs attribute\n", __func__);
rc = -ENOSYS;
goto exit_set_chap;
- };
+ }
}
if (chap_rec.chap_type == CHAP_TYPE_IN)
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index d539beef3ce8..3790e8b70bba 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -30,6 +30,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/firmware.h>
+#include <linux/pgtable.h>
#include <asm/byteorder.h>
@@ -37,7 +38,6 @@
#include <asm/dma.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 4c6c448dc2df..843cccb38cb7 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -7,7 +7,7 @@
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * Copyright (C) 2001 - 2018 Douglas Gilbert
+ * Copyright (C) 2001 - 2020 Douglas Gilbert
*
* For documentation see http://sg.danny.cz/sg/sdebug26.html
*/
@@ -39,6 +39,9 @@
#include <linux/uuid.h>
#include <linux/t10-pi.h>
#include <linux/msdos_partition.h>
+#include <linux/random.h>
+#include <linux/xarray.h>
+#include <linux/prefetch.h>
#include <net/checksum.h>
@@ -57,8 +60,8 @@
#include "scsi_logging.h"
/* make sure inq_product_rev string corresponds to this version */
-#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20190125";
+#define SDEBUG_VERSION "0189" /* format to fit INQUIRY revision field */
+static const char *sdebug_version_date = "20200421";
#define MY_NAME "scsi_debug"
@@ -91,6 +94,11 @@ static const char *sdebug_version_date = "20190125";
#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
#define WRITE_ERROR_ASC 0xc
+#define UNALIGNED_WRITE_ASCQ 0x4
+#define WRITE_BOUNDARY_ASCQ 0x5
+#define READ_INVDATA_ASCQ 0x6
+#define READ_BOUNDARY_ASCQ 0x7
+#define INSUFF_ZONE_ASCQ 0xe
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
@@ -105,9 +113,12 @@ static const char *sdebug_version_date = "20190125";
#define DEF_ATO 1
#define DEF_CDB_LEN 10
#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
+#define DEF_DEV_SIZE_PRE_INIT 0
#define DEF_DEV_SIZE_MB 8
+#define DEF_ZBC_DEV_SIZE_MB 128
#define DEF_DIF 0
#define DEF_DIX 0
+#define DEF_PER_HOST_STORE false
#define DEF_D_SENSE 0
#define DEF_EVERY_NTH 0
#define DEF_FAKE_RW 0
@@ -126,6 +137,7 @@ static const char *sdebug_version_date = "20190125";
#define DEF_PHYSBLK_EXP 0
#define DEF_OPT_XFERLEN_EXP 0
#define DEF_PTYPE TYPE_DISK
+#define DEF_RANDOM false
#define DEF_REMOVABLE false
#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
#define DEF_SECTOR_SIZE 512
@@ -142,6 +154,11 @@ static const char *sdebug_version_date = "20190125";
#define DEF_UUID_CTL 0
#define JDELAY_OVERRIDDEN -9999
+/* Default parameters for ZBC drives */
+#define DEF_ZBC_ZONE_SIZE_MB 128
+#define DEF_ZBC_MAX_OPEN_ZONES 8
+#define DEF_ZBC_NR_CONV_ZONES 1
+
#define SDEBUG_LUN_0_VAL 0
/* bit mask values for sdebug_opts */
@@ -219,21 +236,23 @@ static const char *sdebug_version_date = "20190125";
#define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
#define DEF_CMD_PER_LUN 255
-#define F_D_IN 1
-#define F_D_OUT 2
+/* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
+#define F_D_IN 1 /* Data-in command (e.g. READ) */
+#define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
#define F_D_UNKN 8
-#define F_RL_WLUN_OK 0x10
-#define F_SKIP_UA 0x20
-#define F_DELAY_OVERR 0x40
-#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
-#define F_SA_HIGH 0x100 /* as used by variable length cdbs */
-#define F_INV_OP 0x200
-#define F_FAKE_RW 0x400
-#define F_M_ACCESS 0x800 /* media access */
-#define F_SSU_DELAY 0x1000
-#define F_SYNC_DELAY 0x2000
-
+#define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
+#define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
+#define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
+#define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
+#define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
+#define F_INV_OP 0x200 /* invalid opcode (not supported) */
+#define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
+#define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
+#define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
+#define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
+
+/* Useful combinations of the above flags */
#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
#define FF_SA (F_SA_HIGH | F_SA_LOW)
@@ -243,6 +262,35 @@ static const char *sdebug_version_date = "20190125";
#define SDEBUG_MAX_CMD_LEN 32
+#define SDEB_XA_NOT_IN_USE XA_MARK_1
+
+/* Zone types (zbcr05 table 25) */
+enum sdebug_z_type {
+ ZBC_ZONE_TYPE_CNV = 0x1,
+ ZBC_ZONE_TYPE_SWR = 0x2,
+ ZBC_ZONE_TYPE_SWP = 0x3,
+};
+
+/* enumeration names taken from table 26, zbcr05 */
+enum sdebug_z_cond {
+ ZBC_NOT_WRITE_POINTER = 0x0,
+ ZC1_EMPTY = 0x1,
+ ZC2_IMPLICIT_OPEN = 0x2,
+ ZC3_EXPLICIT_OPEN = 0x3,
+ ZC4_CLOSED = 0x4,
+ ZC6_READ_ONLY = 0xd,
+ ZC5_FULL = 0xe,
+ ZC7_OFFLINE = 0xf,
+};
+
+struct sdeb_zone_state { /* ZBC: per zone state */
+ enum sdebug_z_type z_type;
+ enum sdebug_z_cond z_cond;
+ bool z_non_seq_resource;
+ unsigned int z_size;
+ sector_t z_start;
+ sector_t z_wp;
+};
struct sdebug_dev_info {
struct list_head dev_list;
@@ -255,15 +303,36 @@ struct sdebug_dev_info {
atomic_t num_in_q;
atomic_t stopped;
bool used;
+
+ /* For ZBC devices */
+ enum blk_zoned_model zmodel;
+ unsigned int zsize;
+ unsigned int zsize_shift;
+ unsigned int nr_zones;
+ unsigned int nr_conv_zones;
+ unsigned int nr_imp_open;
+ unsigned int nr_exp_open;
+ unsigned int nr_closed;
+ unsigned int max_open;
+ struct sdeb_zone_state *zstate;
};
struct sdebug_host_info {
struct list_head host_list;
+ int si_idx; /* sdeb_store_info (per host) xarray index */
struct Scsi_Host *shost;
struct device dev;
struct list_head dev_info_list;
};
+/* There is an xarray of pointers to this struct's objects, one per host */
+struct sdeb_store_info {
+ rwlock_t macc_lck; /* for atomic media access on this store */
+ u8 *storep; /* user data storage (ram) */
+ struct t10_pi_tuple *dif_storep; /* protection info */
+ void *map_storep; /* provisioning map */
+};
+
#define to_sdebug_host(d) \
container_of(d, struct sdebug_host_info, dev)
@@ -339,7 +408,7 @@ enum sdeb_opcode_index {
SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
SDEB_I_MAINT_IN = 14,
SDEB_I_MAINT_OUT = 15,
- SDEB_I_VERIFY = 16, /* 10 only */
+ SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
SDEB_I_RESERVE = 18, /* 6, 10 */
SDEB_I_RELEASE = 19, /* 6, 10 */
@@ -352,7 +421,10 @@ enum sdeb_opcode_index {
SDEB_I_WRITE_SAME = 26, /* 10, 16 */
SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
SDEB_I_COMP_WRITE = 28,
- SDEB_I_LAST_ELEMENT = 29, /* keep this last (previous + 1) */
+ SDEB_I_PRE_FETCH = 29, /* 10, 16 */
+ SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
+ SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
+ SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
};
@@ -368,7 +440,7 @@ static const unsigned char opcode_ind_arr[256] = {
/* 0x20; 0x20->0x3f: 10 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
- 0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
+ 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
/* 0x40; 0x40->0x5f: 10 byte cdbs */
0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
@@ -382,8 +454,10 @@ static const unsigned char opcode_ind_arr[256] = {
0, SDEB_I_VARIABLE_LEN,
/* 0x80; 0x80->0x9f: 16 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
- SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
- 0, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
+ SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
+ 0, 0, 0, SDEB_I_VERIFY,
+ SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
+ SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
@@ -424,11 +498,25 @@ static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+
+static int sdebug_do_add_host(bool mk_new_store);
+static int sdebug_add_host_helper(int per_host_idx);
+static void sdebug_do_remove_host(bool the_end);
+static int sdebug_add_store(void);
+static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
+static void sdebug_erase_all_stores(bool apart_from_first);
/*
* The following are overflow arrays for cdbs that "hit" the same index in
@@ -468,6 +556,12 @@ static const struct opcode_info_t write_iarr[] = {
0xbf, 0xc7, 0, 0, 0, 0} },
};
+static const struct opcode_info_t verify_iarr[] = {
+ {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
+ NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} },
+};
+
static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
@@ -514,11 +608,35 @@ static const struct opcode_info_t sync_cache_iarr[] = {
0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
};
+static const struct opcode_info_t pre_fetch_iarr[] = {
+ {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
+ {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
+};
+
+static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
+ {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
+ {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
+ {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
+ {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
+ {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
+ {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
+};
+
+static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
+ {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
+ {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
+};
+
/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
* plus the terminating elements for logic that scans this table such as
* REPORT SUPPORTED OPERATION CODES. */
-static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
+static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
/* 0 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -568,9 +686,10 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
/* 15 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
- {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
- 0, 0, 0, 0, 0, 0} },
+ {ARRAY_SIZE(verify_iarr), 0x8f, 0,
+ F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
+ verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
{ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
{32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
@@ -609,17 +728,31 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
-
-/* 29 */
+ {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
+ resp_pre_fetch, pre_fetch_iarr,
+ {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} }, /* PRE-FETCH (10) */
+
+/* 30 */
+ {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
+ resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
+ {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
+ {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
+ resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
+ {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
+/* sentinel */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static int sdebug_add_host = DEF_NUM_HOST;
+static int sdebug_num_hosts;
+static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
static int sdebug_ato = DEF_ATO;
static int sdebug_cdb_len = DEF_CDB_LEN;
static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
-static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
+static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
static int sdebug_dif = DEF_DIF;
static int sdebug_dix = DEF_DIX;
static int sdebug_dsense = DEF_D_SENSE;
@@ -656,6 +789,8 @@ static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
static int sdebug_uuid_ctl = DEF_UUID_CTL;
+static bool sdebug_random = DEF_RANDOM;
+static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
static bool sdebug_removable = DEF_REMOVABLE;
static bool sdebug_clustering;
static bool sdebug_host_lock = DEF_HOST_LOCK;
@@ -666,6 +801,9 @@ static bool have_dif_prot;
static bool write_since_sync;
static bool sdebug_statistics = DEF_STATISTICS;
static bool sdebug_wp;
+/* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
+static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
+static char *sdeb_zbc_model_s;
static unsigned int sdebug_store_sectors;
static sector_t sdebug_capacity; /* in sectors */
@@ -679,9 +817,11 @@ static int sdebug_sectors_per; /* sectors per cylinder */
static LIST_HEAD(sdebug_host_list);
static DEFINE_SPINLOCK(sdebug_host_list_lock);
-static unsigned char *fake_storep; /* ramdisk storage */
-static struct t10_pi_tuple *dif_storep; /* protection info */
-static void *map_storep; /* provisioning map */
+static struct xarray per_store_arr;
+static struct xarray *per_store_ap = &per_store_arr;
+static int sdeb_first_idx = -1; /* invalid index ==> none created */
+static int sdeb_most_recent_idx = -1;
+static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
static unsigned long map_size;
static int num_aborts;
@@ -693,10 +833,19 @@ static int dix_writes;
static int dix_reads;
static int dif_errors;
+/* ZBC global data */
+static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
+static int sdeb_zbc_zone_size_mb;
+static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
+static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
+
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
static DEFINE_RWLOCK(atomic_rw);
+static DEFINE_RWLOCK(atomic_rw2);
+
+static rwlock_t *ramdisk_lck_a[2];
static char sdebug_proc_name[] = MY_NAME;
static const char *my_name = MY_NAME;
@@ -717,6 +866,8 @@ static const int illegal_condition_result =
static const int device_qfull_result =
(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
+static const int condition_met_result = SAM_STAT_CONDITION_MET;
+
/* Only do the extra work involved in logical block provisioning if one or
* more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
@@ -728,18 +879,25 @@ static inline bool scsi_debug_lbp(void)
(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
}
-static void *lba2fake_store(unsigned long long lba)
+static void *lba2fake_store(struct sdeb_store_info *sip,
+ unsigned long long lba)
{
- lba = do_div(lba, sdebug_store_sectors);
+ struct sdeb_store_info *lsip = sip;
- return fake_storep + lba * sdebug_sector_size;
+ lba = do_div(lba, sdebug_store_sectors);
+ if (!sip || !sip->storep) {
+ WARN_ON_ONCE(true);
+ lsip = xa_load(per_store_ap, 0); /* should never be NULL */
+ }
+ return lsip->storep + lba * sdebug_sector_size;
}
-static struct t10_pi_tuple *dif_store(sector_t sector)
+static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
+ sector_t sector)
{
sector = sector_div(sector, sdebug_store_sectors);
- return dif_storep + sector;
+ return sip->dif_storep + sector;
}
static void sdebug_max_tgts_luns(void)
@@ -1041,7 +1199,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
__func__, off_dst, scsi_bufflen(scp), act_len,
scsi_get_resid(scp));
n = scsi_bufflen(scp) - (off_dst + act_len);
- scsi_set_resid(scp, min(scsi_get_resid(scp), n));
+ scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
return 0;
}
@@ -1354,13 +1512,15 @@ static int inquiry_vpd_b0(unsigned char *arr)
}
/* Block device characteristics VPD page (SBC-3) */
-static int inquiry_vpd_b1(unsigned char *arr)
+static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
{
memset(arr, 0, 0x3c);
arr[0] = 0;
arr[1] = 1; /* non rotating medium (e.g. solid state) */
arr[2] = 0;
arr[3] = 5; /* less than 1.8" */
+ if (devip->zmodel == BLK_ZONED_HA)
+ arr[4] = 1 << 4; /* zoned field = 01b */
return 0x3c;
}
@@ -1384,6 +1544,26 @@ static int inquiry_vpd_b2(unsigned char *arr)
return 0x4;
}
+/* Zoned block device characteristics VPD page (ZBC mandatory) */
+static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
+{
+ memset(arr, 0, 0x3c);
+ arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
+ /*
+ * Set Optimal number of open sequential write preferred zones and
+ * Optimal number of non-sequentially written sequential write
+ * preferred zones fields to 'not reported' (0xffffffff). Leave other
+ * fields set to zero, apart from Max. number of open swrz_s field.
+ */
+ put_unaligned_be32(0xffffffff, &arr[4]);
+ put_unaligned_be32(0xffffffff, &arr[8]);
+ if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
+ put_unaligned_be32(devip->max_open, &arr[12]);
+ else
+ put_unaligned_be32(0xffffffff, &arr[12]);
+ return 0x3c;
+}
+
#define SDEBUG_LONG_INQ_SZ 96
#define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -1393,13 +1573,15 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
unsigned char *arr;
unsigned char *cmd = scp->cmnd;
int alloc_len, n, ret;
- bool have_wlun, is_disk;
+ bool have_wlun, is_disk, is_zbc, is_disk_zbc;
alloc_len = get_unaligned_be16(cmd + 3);
arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
if (! arr)
return DID_REQUEUE << 16;
is_disk = (sdebug_ptype == TYPE_DISK);
+ is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ is_disk_zbc = (is_disk || is_zbc);
have_wlun = scsi_is_wlun(scp->device->lun);
if (have_wlun)
pq_pdt = TYPE_WLUN; /* present, wlun */
@@ -1437,11 +1619,14 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[n++] = 0x86; /* extended inquiry */
arr[n++] = 0x87; /* mode page policy */
arr[n++] = 0x88; /* SCSI ports */
- if (is_disk) { /* SBC only */
+ if (is_disk_zbc) { /* SBC or ZBC */
arr[n++] = 0x89; /* ATA information */
arr[n++] = 0xb0; /* Block limits */
arr[n++] = 0xb1; /* Block characteristics */
- arr[n++] = 0xb2; /* Logical Block Prov */
+ if (is_disk)
+ arr[n++] = 0xb2; /* LB Provisioning */
+ if (is_zbc)
+ arr[n++] = 0xb6; /* ZB dev. char. */
}
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
@@ -1480,19 +1665,22 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
} else if (0x88 == cmd[2]) { /* SCSI Ports */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
- } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
+ } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
arr[1] = cmd[2]; /*sanity */
n = inquiry_vpd_89(&arr[4]);
put_unaligned_be16(n, arr + 2);
- } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
+ } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b0(&arr[4]);
- } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
+ } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
arr[1] = cmd[2]; /*sanity */
- arr[3] = inquiry_vpd_b1(&arr[4]);
+ arr[3] = inquiry_vpd_b1(devip, &arr[4]);
} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b2(&arr[4]);
+ } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
+ arr[1] = cmd[2]; /*sanity */
+ arr[3] = inquiry_vpd_b6(devip, &arr[4]);
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
kfree(arr);
@@ -1530,10 +1718,13 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
} else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
put_unaligned_be16(0x525, arr + n);
n += 2;
+ } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
+ put_unaligned_be16(0x624, arr + n);
+ n += 2;
}
put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
ret = fill_from_dev_buffer(scp, arr,
- min(alloc_len, SDEBUG_LONG_INQ_SZ));
+ min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
kfree(arr);
return ret;
}
@@ -1688,7 +1879,7 @@ static int resp_readcap16(struct scsi_cmnd *scp,
}
return fill_from_dev_buffer(scp, arr,
- min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
+ min_t(int, alloc_len, SDEBUG_READCAP16_ARR_SZ));
}
#define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
@@ -1762,9 +1953,9 @@ static int resp_report_tgtpgs(struct scsi_cmnd *scp,
* - The constructed command length
* - The maximum array size
*/
- rlen = min(alen,n);
+ rlen = min_t(int, alen, n);
ret = fill_from_dev_buffer(scp, arr,
- min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
+ min_t(int, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
kfree(arr);
return ret;
}
@@ -2119,7 +2310,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
unsigned char *ap;
unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, bad_pcode;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
pcontrol = (cmd[2] & 0xc0) >> 6;
@@ -2128,7 +2319,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
msense_6 = (MODE_SENSE == cmd[0]);
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
is_disk = (sdebug_ptype == TYPE_DISK);
- if (is_disk && !dbd)
+ is_zbc = (devip->zmodel != BLK_ZONED_NONE);
+ if ((is_disk || is_zbc) && !dbd)
bd_len = llbaa ? 16 : 8;
else
bd_len = 0;
@@ -2140,8 +2332,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
}
target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
(devip->target * 1000) - 3;
- /* for disks set DPOFUA bit and clear write protect (WP) bit */
- if (is_disk) {
+ /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
+ if (is_disk || is_zbc) {
dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
if (sdebug_wp)
dev_spec |= 0x80;
@@ -2201,7 +2393,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
bad_pcode = true;
break;
case 0x8: /* Caching page, direct access */
- if (is_disk) {
+ if (is_disk || is_zbc) {
len = resp_caching_pg(ap, pcontrol, target);
offset += len;
} else
@@ -2239,6 +2431,9 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
target);
len += resp_caching_pg(ap + len, pcontrol,
target);
+ } else if (is_zbc) {
+ len += resp_caching_pg(ap + len, pcontrol,
+ target);
}
len += resp_ctrl_m_pg(ap + len, pcontrol, target);
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2266,7 +2461,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
arr[0] = offset - 1;
else
put_unaligned_be16((offset - 2), arr + 0);
- return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
+ return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
}
#define SDEBUG_MAX_MSELECT_SZ 512
@@ -2451,14 +2646,217 @@ static int resp_log_sense(struct scsi_cmnd *scp,
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
}
- len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
+ len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
return fill_from_dev_buffer(scp, arr,
- min(len, SDEBUG_MAX_INQ_ARR_SZ));
+ min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
+}
+
+static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
+{
+ return devip->nr_zones != 0;
+}
+
+static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
+ unsigned long long lba)
+{
+ return &devip->zstate[lba >> devip->zsize_shift];
+}
+
+static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
+{
+ return zsp->z_type == ZBC_ZONE_TYPE_CNV;
+}
+
+static void zbc_close_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ enum sdebug_z_cond zc;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
+ return;
+
+ if (zc == ZC2_IMPLICIT_OPEN)
+ devip->nr_imp_open--;
+ else
+ devip->nr_exp_open--;
+
+ if (zsp->z_wp == zsp->z_start) {
+ zsp->z_cond = ZC1_EMPTY;
+ } else {
+ zsp->z_cond = ZC4_CLOSED;
+ devip->nr_closed++;
+ }
+}
+
+static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp = &devip->zstate[0];
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++, zsp++) {
+ if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
+ zbc_close_zone(devip, zsp);
+ return;
+ }
+ }
+}
+
+static void zbc_open_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp, bool explicit)
+{
+ enum sdebug_z_cond zc;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
+ (!explicit && zc == ZC2_IMPLICIT_OPEN))
+ return;
+
+ /* Close an implicit open zone if necessary */
+ if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+ else if (devip->max_open &&
+ devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
+ zbc_close_imp_open_zone(devip);
+
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+ if (explicit) {
+ zsp->z_cond = ZC3_EXPLICIT_OPEN;
+ devip->nr_exp_open++;
+ } else {
+ zsp->z_cond = ZC2_IMPLICIT_OPEN;
+ devip->nr_imp_open++;
+ }
+}
+
+static void zbc_inc_wp(struct sdebug_dev_info *devip,
+ unsigned long long lba, unsigned int num)
+{
+ struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
+ unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+ zsp->z_wp += num;
+ if (zsp->z_wp >= zend)
+ zsp->z_cond = ZC5_FULL;
+ return;
+ }
+
+ while (num) {
+ if (lba != zsp->z_wp)
+ zsp->z_non_seq_resource = true;
+
+ end = lba + num;
+ if (end >= zend) {
+ n = zend - lba;
+ zsp->z_wp = zend;
+ } else if (end > zsp->z_wp) {
+ n = num;
+ zsp->z_wp = end;
+ } else {
+ n = num;
+ }
+ if (zsp->z_wp >= zend)
+ zsp->z_cond = ZC5_FULL;
+
+ num -= n;
+ lba += n;
+ if (num) {
+ zsp++;
+ zend = zsp->z_start + zsp->z_size;
+ }
+ }
}
-static inline int check_device_access_params(struct scsi_cmnd *scp,
- unsigned long long lba, unsigned int num, bool write)
+static int check_zbc_access_params(struct scsi_cmnd *scp,
+ unsigned long long lba, unsigned int num, bool write)
{
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+ struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
+ struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
+
+ if (!write) {
+ if (devip->zmodel == BLK_ZONED_HA)
+ return 0;
+ /* For host-managed, reads cannot cross zone types boundaries */
+ if (zsp_end != zsp &&
+ zbc_zone_is_conv(zsp) &&
+ !zbc_zone_is_conv(zsp_end)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ READ_INVDATA_ASCQ);
+ return check_condition_result;
+ }
+ return 0;
+ }
+
+ /* No restrictions for writes within conventional zones */
+ if (zbc_zone_is_conv(zsp)) {
+ if (!zbc_zone_is_conv(zsp_end)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ WRITE_BOUNDARY_ASCQ);
+ return check_condition_result;
+ }
+ return 0;
+ }
+
+ if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+ /* Writes cannot cross sequential zone boundaries */
+ if (zsp_end != zsp) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ WRITE_BOUNDARY_ASCQ);
+ return check_condition_result;
+ }
+ /* Cannot write full zones */
+ if (zsp->z_cond == ZC5_FULL) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ INVALID_FIELD_IN_CDB, 0);
+ return check_condition_result;
+ }
+ /* Writes must be aligned to the zone WP */
+ if (lba != zsp->z_wp) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ LBA_OUT_OF_RANGE,
+ UNALIGNED_WRITE_ASCQ);
+ return check_condition_result;
+ }
+ }
+
+ /* Handle implicit open of closed and empty zones */
+ if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
+ if (devip->max_open &&
+ devip->nr_exp_open >= devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT,
+ INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ return check_condition_result;
+ }
+ zbc_open_zone(devip, zsp, false);
+ }
+
+ return 0;
+}
+
+static inline int check_device_access_params
+ (struct scsi_cmnd *scp, unsigned long long lba,
+ unsigned int num, bool write)
+{
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
+
if (lba + num > sdebug_capacity) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
return check_condition_result;
@@ -2473,17 +2871,37 @@ static inline int check_device_access_params(struct scsi_cmnd *scp,
mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
return check_condition_result;
}
+ if (sdebug_dev_is_zoned(devip))
+ return check_zbc_access_params(scp, lba, num, write);
+
return 0;
}
+/*
+ * Note: if BUG_ON() fires it usually indicates a problem with the parser
+ * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
+ * that access any of the "stores" in struct sdeb_store_info should call this
+ * function with bug_if_fake_rw set to true.
+ */
+static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
+ bool bug_if_fake_rw)
+{
+ if (sdebug_fake_rw) {
+ BUG_ON(bug_if_fake_rw); /* See note above */
+ return NULL;
+ }
+ return xa_load(per_store_ap, devip->sdbg_host->si_idx);
+}
+
/* Returns number of bytes copied or -1 if error. */
-static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
- u32 num, bool do_write)
+static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
+ u32 sg_skip, u64 lba, u32 num, bool do_write)
{
int ret;
u64 block, rest = 0;
- struct scsi_data_buffer *sdb = &scmd->sdb;
enum dma_data_direction dir;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ u8 *fsp;
if (do_write) {
dir = DMA_TO_DEVICE;
@@ -2492,24 +2910,25 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
dir = DMA_FROM_DEVICE;
}
- if (!sdb->length)
+ if (!sdb->length || !sip)
return 0;
- if (scmd->sc_data_direction != dir)
+ if (scp->sc_data_direction != dir)
return -1;
+ fsp = sip->storep;
block = do_div(lba, sdebug_store_sectors);
if (block + num > sdebug_store_sectors)
rest = block + num - sdebug_store_sectors;
ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
- fake_storep + (block * sdebug_sector_size),
+ fsp + (block * sdebug_sector_size),
(num - rest) * sdebug_sector_size, sg_skip, do_write);
if (ret != (num - rest) * sdebug_sector_size)
return ret;
if (rest) {
ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
- fake_storep, rest * sdebug_sector_size,
+ fsp, rest * sdebug_sector_size,
sg_skip + ((num - rest) * sdebug_sector_size),
do_write);
}
@@ -2517,34 +2936,49 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
return ret;
}
-/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
- * arr into lba2fake_store(lba,num) and return true. If comparison fails then
+/* Returns number of bytes copied or -1 if error. */
+static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
+{
+ struct scsi_data_buffer *sdb = &scp->sdb;
+
+ if (!sdb->length)
+ return 0;
+ if (scp->sc_data_direction != DMA_TO_DEVICE)
+ return -1;
+ return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
+ num * sdebug_sector_size, 0, true);
+}
+
+/* If sip->storep+lba compares equal to arr(num), then copy top half of
+ * arr into sip->storep+lba and return true. If comparison fails then
* return false. */
-static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
+static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
+ const u8 *arr, bool compare_only)
{
bool res;
u64 block, rest = 0;
u32 store_blks = sdebug_store_sectors;
u32 lb_size = sdebug_sector_size;
+ u8 *fsp = sip->storep;
block = do_div(lba, store_blks);
if (block + num > store_blks)
rest = block + num - store_blks;
- res = !memcmp(fake_storep + (block * lb_size), arr,
- (num - rest) * lb_size);
+ res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
if (!res)
return res;
if (rest)
- res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
+ res = memcmp(fsp, arr + ((num - rest) * lb_size),
rest * lb_size);
if (!res)
return res;
+ if (compare_only)
+ return true;
arr += num * lb_size;
- memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
+ memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
if (rest)
- memcpy(fake_storep, arr + ((num - rest) * lb_size),
- rest * lb_size);
+ memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
return res;
}
@@ -2587,24 +3021,27 @@ static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
return 0;
}
-static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
+static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
unsigned int sectors, bool read)
{
size_t resid;
void *paddr;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
+ struct t10_pi_tuple *dif_storep = sip->dif_storep;
const void *dif_store_end = dif_storep + sdebug_store_sectors;
struct sg_mapping_iter miter;
/* Bytes of protection data to copy into sgl */
resid = sectors * sizeof(*dif_storep);
- sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
- scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
- (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
+ sg_miter_start(&miter, scsi_prot_sglist(scp),
+ scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
+ (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
while (sg_miter_next(&miter) && resid > 0) {
- size_t len = min(miter.length, resid);
- void *start = dif_store(sector);
+ size_t len = min_t(size_t, miter.length, resid);
+ void *start = dif_store(sip, sector);
size_t rest = 0;
if (dif_store_end < start + len)
@@ -2630,30 +3067,33 @@ static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
sg_miter_stop(&miter);
}
-static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
+static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
unsigned int sectors, u32 ei_lba)
{
unsigned int i;
- struct t10_pi_tuple *sdt;
sector_t sector;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
+ struct t10_pi_tuple *sdt;
for (i = 0; i < sectors; i++, ei_lba++) {
int ret;
sector = start_sec + i;
- sdt = dif_store(sector);
+ sdt = dif_store(sip, sector);
if (sdt->app_tag == cpu_to_be16(0xffff))
continue;
- ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
+ ret = dif_verify(sdt, lba2fake_store(sip, sector), sector,
+ ei_lba);
if (ret) {
dif_errors++;
return ret;
}
}
- dif_copy_prot(SCpnt, start_sec, sectors, true);
+ dif_copy_prot(scp, start_sec, sectors, true);
dix_reads++;
return 0;
@@ -2661,14 +3101,15 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
- u8 *cmd = scp->cmnd;
- struct sdebug_queued_cmd *sqcp;
- u64 lba;
+ bool check_prot;
u32 num;
u32 ei_lba;
- unsigned long iflags;
int ret;
- bool check_prot;
+ u64 lba;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+ u8 *cmd = scp->cmnd;
+ struct sdebug_queued_cmd *sqcp;
switch (cmd[0]) {
case READ_16:
@@ -2750,21 +3191,21 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
- read_lock_irqsave(&atomic_rw, iflags);
+ read_lock(macc_lckp);
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
if (prot_ret) {
- read_unlock_irqrestore(&atomic_rw, iflags);
+ read_unlock(macc_lckp);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
return illegal_condition_result;
}
}
- ret = do_device_access(scp, 0, lba, num, false);
- read_unlock_irqrestore(&atomic_rw, iflags);
+ ret = do_device_access(sip, scp, 0, lba, num, false);
+ read_unlock(macc_lckp);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -2902,7 +3343,8 @@ static sector_t map_index_to_lba(unsigned long index)
return lba;
}
-static unsigned int map_state(sector_t lba, unsigned int *num)
+static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int *num)
{
sector_t end;
unsigned int mapped;
@@ -2910,19 +3352,20 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
unsigned long next;
index = lba_to_map_index(lba);
- mapped = test_bit(index, map_storep);
+ mapped = test_bit(index, sip->map_storep);
if (mapped)
- next = find_next_zero_bit(map_storep, map_size, index);
+ next = find_next_zero_bit(sip->map_storep, map_size, index);
else
- next = find_next_bit(map_storep, map_size, index);
+ next = find_next_bit(sip->map_storep, map_size, index);
end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
*num = end - lba;
return mapped;
}
-static void map_region(sector_t lba, unsigned int len)
+static void map_region(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int len)
{
sector_t end = lba + len;
@@ -2930,15 +3373,17 @@ static void map_region(sector_t lba, unsigned int len)
unsigned long index = lba_to_map_index(lba);
if (index < map_size)
- set_bit(index, map_storep);
+ set_bit(index, sip->map_storep);
lba = map_index_to_lba(index + 1);
}
}
-static void unmap_region(sector_t lba, unsigned int len)
+static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
+ unsigned int len)
{
sector_t end = lba + len;
+ u8 *fsp = sip->storep;
while (lba < end) {
unsigned long index = lba_to_map_index(lba);
@@ -2946,17 +3391,16 @@ static void unmap_region(sector_t lba, unsigned int len)
if (lba == map_index_to_lba(index) &&
lba + sdebug_unmap_granularity <= end &&
index < map_size) {
- clear_bit(index, map_storep);
+ clear_bit(index, sip->map_storep);
if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
- memset(fake_storep +
- lba * sdebug_sector_size,
+ memset(fsp + lba * sdebug_sector_size,
(sdebug_lbprz & 1) ? 0 : 0xff,
sdebug_sector_size *
sdebug_unmap_granularity);
}
- if (dif_storep) {
- memset(dif_storep + lba, 0xff,
- sizeof(*dif_storep) *
+ if (sip->dif_storep) {
+ memset(sip->dif_storep + lba, 0xff,
+ sizeof(*sip->dif_storep) *
sdebug_unmap_granularity);
}
}
@@ -2966,13 +3410,14 @@ static void unmap_region(sector_t lba, unsigned int len)
static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
- u8 *cmd = scp->cmnd;
- u64 lba;
+ bool check_prot;
u32 num;
u32 ei_lba;
- unsigned long iflags;
int ret;
- bool check_prot;
+ u64 lba;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
+ u8 *cmd = scp->cmnd;
switch (cmd[0]) {
case WRITE_16:
@@ -3025,26 +3470,32 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
"to DIF device\n");
}
+
+ write_lock(macc_lckp);
ret = check_device_access_params(scp, lba, num, true);
- if (ret)
+ if (ret) {
+ write_unlock(macc_lckp);
return ret;
- write_lock_irqsave(&atomic_rw, iflags);
+ }
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
if (prot_ret) {
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
return illegal_condition_result;
}
}
- ret = do_device_access(scp, 0, lba, num, true);
+ ret = do_device_access(sip, scp, 0, lba, num, true);
if (unlikely(scsi_debug_lbp()))
- map_region(lba, num);
- write_unlock_irqrestore(&atomic_rw, iflags);
+ map_region(sip, lba, num);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
+ write_unlock(macc_lckp);
if (unlikely(-1 == ret))
return DID_ERROR << 16;
else if (unlikely(sdebug_verbose &&
@@ -3085,13 +3536,14 @@ static int resp_write_scat(struct scsi_cmnd *scp,
u8 *cmd = scp->cmnd;
u8 *lrdp = NULL;
u8 *up;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
u8 wrprotect;
u16 lbdof, num_lrd, k;
u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
u32 lb_size = sdebug_sector_size;
u32 ei_lba;
u64 lba;
- unsigned long iflags;
int ret, res;
bool is_16;
static const u32 lrd_size = 32; /* + parameter list header size */
@@ -3153,7 +3605,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
goto err_out;
}
- write_lock_irqsave(&atomic_rw, iflags);
+ write_lock(macc_lckp);
sg_off = lbdof_blen;
/* Spec says Buffer xfer Length field in number of LBs in dout */
cum_lb = 0;
@@ -3196,9 +3648,12 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
}
- ret = do_device_access(scp, sg_off, lba, num, true);
+ ret = do_device_access(sip, scp, sg_off, lba, num, true);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
if (unlikely(scsi_debug_lbp()))
- map_region(lba, num);
+ map_region(sip, lba, num);
if (unlikely(-1 == ret)) {
ret = DID_ERROR << 16;
goto err_out_unlock;
@@ -3236,7 +3691,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
ret = 0;
err_out_unlock:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
err_out:
kfree(lrdp);
return ret;
@@ -3245,27 +3700,35 @@ err_out:
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
- int ret;
- unsigned long iflags;
+ struct scsi_device *sdp = scp->device;
+ struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
unsigned long long i;
- u32 lb_size = sdebug_sector_size;
u64 block, lbaa;
+ u32 lb_size = sdebug_sector_size;
+ int ret;
+ struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
+ scp->device->hostdata, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
u8 *fs1p;
+ u8 *fsp;
+
+ write_lock(macc_lckp);
ret = check_device_access_params(scp, lba, num, true);
- if (ret)
+ if (ret) {
+ write_unlock(macc_lckp);
return ret;
-
- write_lock_irqsave(&atomic_rw, iflags);
+ }
if (unmap && scsi_debug_lbp()) {
- unmap_region(lba, num);
+ unmap_region(sip, lba, num);
goto out;
}
lbaa = lba;
block = do_div(lbaa, sdebug_store_sectors);
/* if ndob then zero 1 logical block, else fetch 1 logical block */
- fs1p = fake_storep + (block * lb_size);
+ fsp = sip->storep;
+ fs1p = fsp + (block * lb_size);
if (ndob) {
memset(fs1p, 0, lb_size);
ret = 0;
@@ -3273,7 +3736,7 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
if (-1 == ret) {
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(&sip->macc_lck);
return DID_ERROR << 16;
} else if (sdebug_verbose && !ndob && (ret < lb_size))
sdev_printk(KERN_INFO, scp->device,
@@ -3284,12 +3747,15 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
for (i = 1 ; i < num ; i++) {
lbaa = lba + i;
block = do_div(lbaa, sdebug_store_sectors);
- memmove(fake_storep + (block * lb_size), fs1p, lb_size);
+ memmove(fsp + (block * lb_size), fs1p, lb_size);
}
if (scsi_debug_lbp())
- map_region(lba, num);
+ map_region(sip, lba, num);
+ /* If ZBC zone then bump its write pointer */
+ if (sdebug_dev_is_zoned(devip))
+ zbc_inc_wp(devip, lba, num);
out:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
return 0;
}
@@ -3401,12 +3867,12 @@ static int resp_comp_write(struct scsi_cmnd *scp,
{
u8 *cmd = scp->cmnd;
u8 *arr;
- u8 *fake_storep_hold;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
u64 lba;
u32 dnum;
u32 lb_size = sdebug_sector_size;
u8 num;
- unsigned long iflags;
int ret;
int retval = 0;
@@ -3435,14 +3901,9 @@ static int resp_comp_write(struct scsi_cmnd *scp,
return check_condition_result;
}
- write_lock_irqsave(&atomic_rw, iflags);
+ write_lock(macc_lckp);
- /* trick do_device_access() to fetch both compare and write buffers
- * from data-in into arr. Safe (atomic) since write_lock held. */
- fake_storep_hold = fake_storep;
- fake_storep = arr;
- ret = do_device_access(scp, 0, 0, dnum, true);
- fake_storep = fake_storep_hold;
+ ret = do_dout_fetch(scp, dnum, arr);
if (ret == -1) {
retval = DID_ERROR << 16;
goto cleanup;
@@ -3450,15 +3911,15 @@ static int resp_comp_write(struct scsi_cmnd *scp,
sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
"indicated=%u, IO sent=%d bytes\n", my_name,
dnum * lb_size, ret);
- if (!comp_write_worker(lba, num, arr)) {
+ if (!comp_write_worker(sip, lba, num, arr, false)) {
mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
retval = check_condition_result;
goto cleanup;
}
if (scsi_debug_lbp())
- map_region(lba, num);
+ map_region(sip, lba, num);
cleanup:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
kfree(arr);
return retval;
}
@@ -3473,10 +3934,10 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
unsigned char *buf;
struct unmap_block_desc *desc;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
unsigned int i, payload_len, descriptors;
int ret;
- unsigned long iflags;
-
if (!scsi_debug_lbp())
return 0; /* fib and say its done */
@@ -3503,7 +3964,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
desc = (void *)&buf[8];
- write_lock_irqsave(&atomic_rw, iflags);
+ write_lock(macc_lckp);
for (i = 0 ; i < descriptors ; i++) {
unsigned long long lba = get_unaligned_be64(&desc[i].lba);
@@ -3513,13 +3974,13 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (ret)
goto out;
- unmap_region(lba, num);
+ unmap_region(sip, lba, num);
}
ret = 0;
out:
- write_unlock_irqrestore(&atomic_rw, iflags);
+ write_unlock(macc_lckp);
kfree(buf);
return ret;
@@ -3533,8 +3994,8 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
u8 *cmd = scp->cmnd;
u64 lba;
u32 alloc_len, mapped, num;
- u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
int ret;
+ u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
lba = get_unaligned_be64(cmd + 2);
alloc_len = get_unaligned_be32(cmd + 10);
@@ -3546,9 +4007,11 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
if (ret)
return ret;
- if (scsi_debug_lbp())
- mapped = map_state(lba, &num);
- else {
+ if (scsi_debug_lbp()) {
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+
+ mapped = map_state(sip, lba, &num);
+ } else {
mapped = 1;
/* following just in case virtual_gb changed */
sdebug_capacity = get_sdebug_capacity();
@@ -3593,6 +4056,56 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
return res;
}
+/*
+ * Assuming the LBA+num_blocks is not out-of-range, this function will return
+ * CONDITION MET if the specified blocks will/have fitted in the cache, and
+ * a GOOD status otherwise. Model a disk with a big cache and yield
+ * CONDITION MET. Actually tries to bring range in main memory into the
+ * cache associated with the CPU(s).
+ */
+static int resp_pre_fetch(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 lba;
+ u64 block, rest = 0;
+ u32 nblks;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
+ u8 *fsp = sip->storep;
+
+ if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
+ lba = get_unaligned_be32(cmd + 2);
+ nblks = get_unaligned_be16(cmd + 7);
+ } else { /* PRE-FETCH(16) */
+ lba = get_unaligned_be64(cmd + 2);
+ nblks = get_unaligned_be32(cmd + 10);
+ }
+ if (lba + nblks > sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+ if (!fsp)
+ goto fini;
+ /* PRE-FETCH spec says nothing about LBP or PI so skip them */
+ block = do_div(lba, sdebug_store_sectors);
+ if (block + nblks > sdebug_store_sectors)
+ rest = block + nblks - sdebug_store_sectors;
+
+ /* Try to bring the PRE-FETCH range into CPU's cache */
+ read_lock(macc_lckp);
+ prefetch_range(fsp + (sdebug_sector_size * block),
+ (nblks - rest) * sdebug_sector_size);
+ if (rest)
+ prefetch_range(fsp, rest * sdebug_sector_size);
+ read_unlock(macc_lckp);
+fini:
+ if (cmd[1] & 0x2)
+ res = SDEG_RES_IMMED_MASK;
+ return res | condition_met_result;
+}
+
#define RL_BUCKET_ELEMS 8
/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
@@ -3694,6 +4207,504 @@ static int resp_report_luns(struct scsi_cmnd *scp,
return res;
}
+static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ bool is_bytchk3 = false;
+ u8 bytchk;
+ int ret, j;
+ u32 vnum, a_num, off;
+ const u32 lb_size = sdebug_sector_size;
+ u64 lba;
+ u8 *arr;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_store_info *sip = devip2sip(devip, true);
+ rwlock_t *macc_lckp = &sip->macc_lck;
+
+ bytchk = (cmd[1] >> 1) & 0x3;
+ if (bytchk == 0) {
+ return 0; /* always claim internal verify okay */
+ } else if (bytchk == 2) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
+ return check_condition_result;
+ } else if (bytchk == 3) {
+ is_bytchk3 = true; /* 1 block sent, compared repeatedly */
+ }
+ switch (cmd[0]) {
+ case VERIFY_16:
+ lba = get_unaligned_be64(cmd + 2);
+ vnum = get_unaligned_be32(cmd + 10);
+ break;
+ case VERIFY: /* is VERIFY(10) */
+ lba = get_unaligned_be32(cmd + 2);
+ vnum = get_unaligned_be16(cmd + 7);
+ break;
+ default:
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+ a_num = is_bytchk3 ? 1 : vnum;
+ /* Treat following check like one for read (i.e. no write) access */
+ ret = check_device_access_params(scp, lba, a_num, false);
+ if (ret)
+ return ret;
+
+ arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+ /* Not changing store, so only need read access */
+ read_lock(macc_lckp);
+
+ ret = do_dout_fetch(scp, a_num, arr);
+ if (ret == -1) {
+ ret = DID_ERROR << 16;
+ goto cleanup;
+ } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, __func__, a_num * lb_size, ret);
+ }
+ if (is_bytchk3) {
+ for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
+ memcpy(arr + off, arr, lb_size);
+ }
+ ret = 0;
+ if (!comp_write_worker(sip, lba, vnum, arr, true)) {
+ mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
+ ret = check_condition_result;
+ goto cleanup;
+ }
+cleanup:
+ read_unlock(macc_lckp);
+ kfree(arr);
+ return ret;
+}
+
+#define RZONES_DESC_HD 64
+
+/* Report zones depending on start LBA nad reporting options */
+static int resp_report_zones(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned int i, max_zones, rep_max_zones, nrz = 0;
+ int ret = 0;
+ u32 alloc_len, rep_opts, rep_len;
+ bool partial;
+ u64 lba, zs_lba;
+ u8 *arr = NULL, *desc;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+ zs_lba = get_unaligned_be64(cmd + 2);
+ alloc_len = get_unaligned_be32(cmd + 10);
+ rep_opts = cmd[14] & 0x3f;
+ partial = cmd[14] & 0x80;
+
+ if (zs_lba >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ return check_condition_result;
+ }
+
+ max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
+ rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
+ max_zones);
+
+ arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+
+ read_lock(macc_lckp);
+
+ desc = arr + 64;
+ for (i = 0; i < max_zones; i++) {
+ lba = zs_lba + devip->zsize * i;
+ if (lba > sdebug_capacity)
+ break;
+ zsp = zbc_zone(devip, lba);
+ switch (rep_opts) {
+ case 0x00:
+ /* All zones */
+ break;
+ case 0x01:
+ /* Empty zones */
+ if (zsp->z_cond != ZC1_EMPTY)
+ continue;
+ break;
+ case 0x02:
+ /* Implicit open zones */
+ if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
+ continue;
+ break;
+ case 0x03:
+ /* Explicit open zones */
+ if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
+ continue;
+ break;
+ case 0x04:
+ /* Closed zones */
+ if (zsp->z_cond != ZC4_CLOSED)
+ continue;
+ break;
+ case 0x05:
+ /* Full zones */
+ if (zsp->z_cond != ZC5_FULL)
+ continue;
+ break;
+ case 0x06:
+ case 0x07:
+ case 0x10:
+ /*
+ * Read-only, offline, reset WP recommended are
+ * not emulated: no zones to report;
+ */
+ continue;
+ case 0x11:
+ /* non-seq-resource set */
+ if (!zsp->z_non_seq_resource)
+ continue;
+ break;
+ case 0x3f:
+ /* Not write pointer (conventional) zones */
+ if (!zbc_zone_is_conv(zsp))
+ continue;
+ break;
+ default:
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ INVALID_FIELD_IN_CDB, 0);
+ ret = check_condition_result;
+ goto fini;
+ }
+
+ if (nrz < rep_max_zones) {
+ /* Fill zone descriptor */
+ desc[0] = zsp->z_type;
+ desc[1] = zsp->z_cond << 4;
+ if (zsp->z_non_seq_resource)
+ desc[1] |= 1 << 1;
+ put_unaligned_be64((u64)zsp->z_size, desc + 8);
+ put_unaligned_be64((u64)zsp->z_start, desc + 16);
+ put_unaligned_be64((u64)zsp->z_wp, desc + 24);
+ desc += 64;
+ }
+
+ if (partial && nrz >= rep_max_zones)
+ break;
+
+ nrz++;
+ }
+
+ /* Report header */
+ put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
+ put_unaligned_be64(sdebug_capacity - 1, arr + 8);
+
+ rep_len = (unsigned long)desc - (unsigned long)arr;
+ ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
+
+fini:
+ read_unlock(macc_lckp);
+ kfree(arr);
+ return ret;
+}
+
+/* Logic transplanted from tcmu-runner, file_zbc.c */
+static void zbc_open_all(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp = &devip->zstate[0];
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++, zsp++) {
+ if (zsp->z_cond == ZC4_CLOSED)
+ zbc_open_zone(devip, &devip->zstate[i], true);
+ }
+}
+
+static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 z_id;
+ enum sdebug_z_cond zc;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ /* Check if all closed zones can be open */
+ if (devip->max_open &&
+ devip->nr_exp_open + devip->nr_closed > devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ res = check_condition_result;
+ goto fini;
+ }
+ /* Open all closed zones */
+ zbc_open_all(devip);
+ goto fini;
+ }
+
+ /* Open the specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zc = zsp->z_cond;
+ if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
+ goto fini;
+
+ if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
+ mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
+ INSUFF_ZONE_ASCQ);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ if (zc == ZC2_IMPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+ zbc_open_zone(devip, zsp, true);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
+static void zbc_close_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_close_zone(devip, &devip->zstate[i]);
+}
+
+static int resp_close_zone(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ struct sdeb_zone_state *zsp;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ zbc_close_all(devip);
+ goto fini;
+ }
+
+ /* Close specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_close_zone(devip, zsp);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
+static void zbc_finish_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp, bool empty)
+{
+ enum sdebug_z_cond zc = zsp->z_cond;
+
+ if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
+ zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
+ if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+ zsp->z_wp = zsp->z_start + zsp->z_size;
+ zsp->z_cond = ZC5_FULL;
+ }
+}
+
+static void zbc_finish_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_finish_zone(devip, &devip->zstate[i], false);
+}
+
+static int resp_finish_zone(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ zbc_finish_all(devip);
+ goto fini;
+ }
+
+ /* Finish the specified zone */
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_finish_zone(devip, zsp, true);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
+static void zbc_rwp_zone(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ enum sdebug_z_cond zc;
+
+ if (zbc_zone_is_conv(zsp))
+ return;
+
+ zc = zsp->z_cond;
+ if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
+ zbc_close_zone(devip, zsp);
+
+ if (zsp->z_cond == ZC4_CLOSED)
+ devip->nr_closed--;
+
+ zsp->z_non_seq_resource = false;
+ zsp->z_wp = zsp->z_start;
+ zsp->z_cond = ZC1_EMPTY;
+}
+
+static void zbc_rwp_all(struct sdebug_dev_info *devip)
+{
+ unsigned int i;
+
+ for (i = 0; i < devip->nr_zones; i++)
+ zbc_rwp_zone(devip, &devip->zstate[i]);
+}
+
+static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ int res = 0;
+ u64 z_id;
+ u8 *cmd = scp->cmnd;
+ bool all = cmd[14] & 0x01;
+ struct sdeb_store_info *sip = devip2sip(devip, false);
+ rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
+
+ if (!sdebug_dev_is_zoned(devip)) {
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
+ write_lock(macc_lckp);
+
+ if (all) {
+ zbc_rwp_all(devip);
+ goto fini;
+ }
+
+ z_id = get_unaligned_be64(cmd + 2);
+ if (z_id >= sdebug_capacity) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zsp = zbc_zone(devip, z_id);
+ if (z_id != zsp->z_start) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+ if (zbc_zone_is_conv(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ res = check_condition_result;
+ goto fini;
+ }
+
+ zbc_rwp_zone(devip, zsp);
+fini:
+ write_unlock(macc_lckp);
+ return res;
+}
+
static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
{
u32 tag = blk_mq_unique_tag(cmnd->request);
@@ -3799,6 +4810,92 @@ static void sdebug_q_cmd_wq_complete(struct work_struct *work)
static bool got_shared_uuid;
static uuid_t shared_uuid;
+static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
+{
+ struct sdeb_zone_state *zsp;
+ sector_t capacity = get_sdebug_capacity();
+ sector_t zstart = 0;
+ unsigned int i;
+
+ /*
+ * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
+ * a zone size allowing for at least 4 zones on the device. Otherwise,
+ * use the specified zone size checking that at least 2 zones can be
+ * created for the device.
+ */
+ if (!sdeb_zbc_zone_size_mb) {
+ devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
+ >> ilog2(sdebug_sector_size);
+ while (capacity < devip->zsize << 2 && devip->zsize >= 2)
+ devip->zsize >>= 1;
+ if (devip->zsize < 2) {
+ pr_err("Device capacity too small\n");
+ return -EINVAL;
+ }
+ } else {
+ if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
+ pr_err("Zone size is not a power of 2\n");
+ return -EINVAL;
+ }
+ devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
+ >> ilog2(sdebug_sector_size);
+ if (devip->zsize >= capacity) {
+ pr_err("Zone size too large for device capacity\n");
+ return -EINVAL;
+ }
+ }
+
+ devip->zsize_shift = ilog2(devip->zsize);
+ devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
+
+ if (sdeb_zbc_nr_conv >= devip->nr_zones) {
+ pr_err("Number of conventional zones too large\n");
+ return -EINVAL;
+ }
+ devip->nr_conv_zones = sdeb_zbc_nr_conv;
+
+ if (devip->zmodel == BLK_ZONED_HM) {
+ /* zbc_max_open_zones can be 0, meaning "not reported" */
+ if (sdeb_zbc_max_open >= devip->nr_zones - 1)
+ devip->max_open = (devip->nr_zones - 1) / 2;
+ else
+ devip->max_open = sdeb_zbc_max_open;
+ }
+
+ devip->zstate = kcalloc(devip->nr_zones,
+ sizeof(struct sdeb_zone_state), GFP_KERNEL);
+ if (!devip->zstate)
+ return -ENOMEM;
+
+ for (i = 0; i < devip->nr_zones; i++) {
+ zsp = &devip->zstate[i];
+
+ zsp->z_start = zstart;
+
+ if (i < devip->nr_conv_zones) {
+ zsp->z_type = ZBC_ZONE_TYPE_CNV;
+ zsp->z_cond = ZBC_NOT_WRITE_POINTER;
+ zsp->z_wp = (sector_t)-1;
+ } else {
+ if (devip->zmodel == BLK_ZONED_HM)
+ zsp->z_type = ZBC_ZONE_TYPE_SWR;
+ else
+ zsp->z_type = ZBC_ZONE_TYPE_SWP;
+ zsp->z_cond = ZC1_EMPTY;
+ zsp->z_wp = zsp->z_start;
+ }
+
+ if (zsp->z_start + devip->zsize < capacity)
+ zsp->z_size = devip->zsize;
+ else
+ zsp->z_size = capacity - zsp->z_start;
+
+ zstart += zsp->z_size;
+ }
+
+ return 0;
+}
+
static struct sdebug_dev_info *sdebug_device_create(
struct sdebug_host_info *sdbg_host, gfp_t flags)
{
@@ -3818,6 +4915,16 @@ static struct sdebug_dev_info *sdebug_device_create(
}
}
devip->sdbg_host = sdbg_host;
+ if (sdeb_zbc_in_use) {
+ devip->zmodel = sdeb_zbc_model;
+ if (sdebug_device_create_zones(devip)) {
+ kfree(devip);
+ return NULL;
+ }
+ } else {
+ devip->zmodel = BLK_ZONED_NONE;
+ }
+ devip->sdbg_host = sdbg_host;
list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
}
return devip;
@@ -4144,8 +5251,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
return SUCCESS;
}
-static void __init sdebug_build_parts(unsigned char *ramp,
- unsigned long store_size)
+static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
{
struct msdos_partition *pp;
int starts[SDEBUG_MAX_PARTS + 2];
@@ -4247,6 +5353,8 @@ static void setup_inject(struct sdebug_queue *sqp,
sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
}
+#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
+
/* Complete the processing of the thread that queued a SCSI command to this
* driver. It either completes the command by calling cmnd_done() or
* schedules a hr timer or work queue then returns 0. Returns
@@ -4258,8 +5366,10 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
- unsigned long iflags;
+ bool new_sd_dp;
int k, num_in_q, qdepth, inject;
+ unsigned long iflags;
+ u64 ns_from_boot = 0;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
@@ -4275,7 +5385,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (delta_jiff == 0)
goto respond_in_thread;
- /* schedule the response at a later time if resources permit */
sqp = get_queue(cmnd);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (unlikely(atomic_read(&sqp->blocked))) {
@@ -4334,13 +5443,17 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
if (sd_dp == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
+ new_sd_dp = true;
+ } else {
+ new_sd_dp = false;
}
+ if (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)
+ ns_from_boot = ktime_get_boottime_ns();
+
+ /* one of the resp_*() response functions is called here */
cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
if (cmnd->result & SDEG_RES_IMMED_MASK) {
- /*
- * This is the F_DELAY_OVERR case. No delay.
- */
cmnd->result &= ~SDEG_RES_IMMED_MASK;
delta_jiff = ndelay = 0;
}
@@ -4355,9 +5468,37 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
ktime_t kt;
if (delta_jiff > 0) {
- kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
- } else
- kt = ndelay;
+ u64 ns = jiffies_to_nsecs(delta_jiff);
+
+ if (sdebug_random && ns < U32_MAX) {
+ ns = prandom_u32_max((u32)ns);
+ } else if (sdebug_random) {
+ ns >>= 12; /* scale to 4 usec precision */
+ if (ns < U32_MAX) /* over 4 hours max */
+ ns = prandom_u32_max((u32)ns);
+ ns <<= 12;
+ }
+ kt = ns_to_ktime(ns);
+ } else { /* ndelay has a 4.2 second max */
+ kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
+ (u32)ndelay;
+ if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
+ u64 d = ktime_get_boottime_ns() - ns_from_boot;
+
+ if (kt <= d) { /* elapsed duration >= kt */
+ sqcp->a_cmnd = NULL;
+ atomic_dec(&devip->num_in_q);
+ clear_bit(k, sqp->in_use_bm);
+ if (new_sd_dp)
+ kfree(sd_dp);
+ /* call scsi_done() from this thread */
+ cmnd->scsi_done(cmnd);
+ return 0;
+ }
+ /* otherwise reduce kt by elapsed time */
+ kt -= d;
+ }
+ }
if (!sd_dp->init_hrt) {
sd_dp->init_hrt = true;
sqcp->sd_dp = sd_dp;
@@ -4370,6 +5511,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
sd_dp->defer_t = SDEB_DEFER_HRT;
+ /* schedule the invocation of scsi_done() for a later time */
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
} else { /* jdelay < 0, use work queue */
if (!sd_dp->init_wq) {
@@ -4427,31 +5569,36 @@ module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(guard, sdebug_guard, uint, S_IRUGO);
module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
-module_param_string(inq_vendor, sdebug_inq_vendor_id,
- sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
module_param_string(inq_product, sdebug_inq_product_id,
- sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
+ sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
module_param_string(inq_rev, sdebug_inq_product_rev,
- sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
+ sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
+module_param_string(inq_vendor, sdebug_inq_vendor_id,
+ sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
+module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
-module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
-module_param_named(medium_error_start, sdebug_medium_error_start, int, S_IRUGO | S_IWUSR);
-module_param_named(medium_error_count, sdebug_medium_error_count, int, S_IRUGO | S_IWUSR);
+module_param_named(medium_error_count, sdebug_medium_error_count, int,
+ S_IRUGO | S_IWUSR);
+module_param_named(medium_error_start, sdebug_medium_error_start, int,
+ S_IRUGO | S_IWUSR);
module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
+module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
+module_param_named(per_host_store, sdebug_per_host_store, bool,
+ S_IRUGO | S_IWUSR);
module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
-module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
+module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
@@ -4462,20 +5609,24 @@ module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
-module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
+module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
S_IRUGO | S_IWUSR);
module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
module_param_named(write_same_length, sdebug_write_same_length, int,
S_IRUGO | S_IWUSR);
+module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
+module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
+module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
+module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SDEBUG_VERSION);
-MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
+MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
@@ -4488,30 +5639,32 @@ MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
-MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
SDEBUG_VERSION "\")");
+MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
+MODULE_PARM_DESC(lbprz,
+ "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
-MODULE_PARM_DESC(lbprz,
- "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
-MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
+MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
+MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
+MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
-MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
+MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
@@ -4528,6 +5681,10 @@ MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(wp, "Write Protect (def=0)");
MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
+MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
+MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
+MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
+MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
#define SDEBUG_INFO_LEN 256
static char sdebug_info[SDEBUG_INFO_LEN];
@@ -4576,6 +5733,7 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
{
int f, j, l;
struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
SDEBUG_VERSION, sdebug_version_date);
@@ -4611,6 +5769,34 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
"first,last bits", f, l);
}
}
+
+ seq_printf(m, "this host_no=%d\n", host->host_no);
+ if (!xa_empty(per_store_ap)) {
+ bool niu;
+ int idx;
+ unsigned long l_idx;
+ struct sdeb_store_info *sip;
+
+ seq_puts(m, "\nhost list:\n");
+ j = 0;
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ idx = sdhp->si_idx;
+ seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
+ sdhp->shost->host_no, idx);
+ ++j;
+ }
+ seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
+ sdeb_most_recent_idx);
+ j = 0;
+ xa_for_each(per_store_ap, l_idx, sip) {
+ niu = xa_get_mark(per_store_ap, l_idx,
+ SDEB_XA_NOT_IN_USE);
+ idx = (int)l_idx;
+ seq_printf(m, " %d: idx=%d%s\n", j, idx,
+ (niu ? " not_in_use" : ""));
+ ++j;
+ }
+ }
return 0;
}
@@ -4734,7 +5920,13 @@ static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
{
int n;
+ /* Cannot change from or to TYPE_ZBC with sysfs */
+ if (sdebug_ptype == TYPE_ZBC)
+ return -EINVAL;
+
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ if (n == TYPE_ZBC)
+ return -EINVAL;
sdebug_ptype = n;
return count;
}
@@ -4766,25 +5958,41 @@ static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n, idx;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ bool want_store = (n == 0);
+ struct sdebug_host_info *sdhp;
+
n = (n > 0);
sdebug_fake_rw = (sdebug_fake_rw > 0);
- if (sdebug_fake_rw != n) {
- if ((0 == n) && (NULL == fake_storep)) {
- unsigned long sz =
- (unsigned long)sdebug_dev_size_mb *
- 1048576;
-
- fake_storep = vzalloc(sz);
- if (NULL == fake_storep) {
- pr_err("out of memory, 9\n");
- return -ENOMEM;
+ if (sdebug_fake_rw == n)
+ return count; /* not transitioning so do nothing */
+
+ if (want_store) { /* 1 --> 0 transition, set up store */
+ if (sdeb_first_idx < 0) {
+ idx = sdebug_add_store();
+ if (idx < 0)
+ return idx;
+ } else {
+ idx = sdeb_first_idx;
+ xa_clear_mark(per_store_ap, idx,
+ SDEB_XA_NOT_IN_USE);
+ }
+ /* make all hosts use same store */
+ list_for_each_entry(sdhp, &sdebug_host_list,
+ host_list) {
+ if (sdhp->si_idx != idx) {
+ xa_set_mark(per_store_ap, sdhp->si_idx,
+ SDEB_XA_NOT_IN_USE);
+ sdhp->si_idx = idx;
}
}
- sdebug_fake_rw = n;
+ sdeb_most_recent_idx = idx;
+ } else { /* 0 --> 1 transition is trigger for shrink */
+ sdebug_erase_all_stores(true /* apart from first */);
}
+ sdebug_fake_rw = n;
return count;
}
return -EINVAL;
@@ -4832,6 +6040,24 @@ static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(dev_size_mb);
+static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
+}
+
+static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ bool v;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdebug_per_host_store = v;
+ return count;
+}
+static DRIVER_ATTR_RW(per_host_store);
+
static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
@@ -4957,6 +6183,10 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
int n;
bool changed;
+ /* Ignore capacity change for ZBC drives for now */
+ if (sdeb_zbc_in_use)
+ return -ENOTSUPP;
+
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
changed = (sdebug_virtual_gb != n);
sdebug_virtual_gb = n;
@@ -4984,26 +6214,42 @@ static DRIVER_ATTR_RW(virtual_gb);
static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
+ /* absolute number of hosts currently active is what is shown */
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
}
-static int sdebug_add_adapter(void);
-static void sdebug_remove_adapter(void);
-
static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
size_t count)
{
+ bool found;
+ unsigned long idx;
+ struct sdeb_store_info *sip;
+ bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
int delta_hosts;
if (sscanf(buf, "%d", &delta_hosts) != 1)
return -EINVAL;
if (delta_hosts > 0) {
do {
- sdebug_add_adapter();
+ found = false;
+ if (want_phs) {
+ xa_for_each_marked(per_store_ap, idx, sip,
+ SDEB_XA_NOT_IN_USE) {
+ sdeb_most_recent_idx = (int)idx;
+ found = true;
+ break;
+ }
+ if (found) /* re-use case */
+ sdebug_add_host_helper((int)idx);
+ else
+ sdebug_do_add_host(true);
+ } else {
+ sdebug_do_add_host(false);
+ }
} while (--delta_hosts);
} else if (delta_hosts < 0) {
do {
- sdebug_remove_adapter();
+ sdebug_do_remove_host(false);
} while (++delta_hosts);
}
return count;
@@ -5087,14 +6333,19 @@ static DRIVER_ATTR_RO(ato);
static ssize_t map_show(struct device_driver *ddp, char *buf)
{
- ssize_t count;
+ ssize_t count = 0;
if (!scsi_debug_lbp())
return scnprintf(buf, PAGE_SIZE, "0-%u\n",
sdebug_store_sectors);
- count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
- (int)map_size, map_storep);
+ if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
+ struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
+
+ if (sip)
+ count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
+ (int)map_size, sip->map_storep);
+ }
buf[count++] = '\n';
buf[count] = '\0';
@@ -5102,6 +6353,24 @@ static ssize_t map_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(map);
+static ssize_t random_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
+}
+
+static ssize_t random_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ bool v;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdebug_random = v;
+ return count;
+}
+static DRIVER_ATTR_RW(random);
+
static ssize_t removable_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
@@ -5178,12 +6447,51 @@ static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
}
static DRIVER_ATTR_RW(cdb_len);
+static const char * const zbc_model_strs_a[] = {
+ [BLK_ZONED_NONE] = "none",
+ [BLK_ZONED_HA] = "host-aware",
+ [BLK_ZONED_HM] = "host-managed",
+};
+
+static const char * const zbc_model_strs_b[] = {
+ [BLK_ZONED_NONE] = "no",
+ [BLK_ZONED_HA] = "aware",
+ [BLK_ZONED_HM] = "managed",
+};
+
+static const char * const zbc_model_strs_c[] = {
+ [BLK_ZONED_NONE] = "0",
+ [BLK_ZONED_HA] = "1",
+ [BLK_ZONED_HM] = "2",
+};
+
+static int sdeb_zbc_model_str(const char *cp)
+{
+ int res = sysfs_match_string(zbc_model_strs_a, cp);
+
+ if (res < 0) {
+ res = sysfs_match_string(zbc_model_strs_b, cp);
+ if (res < 0) {
+ res = sysfs_match_string(zbc_model_strs_c, cp);
+ if (res < 0)
+ return -EINVAL;
+ }
+ }
+ return res;
+}
+
+static ssize_t zbc_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ zbc_model_strs_a[sdeb_zbc_model]);
+}
+static DRIVER_ATTR_RO(zbc);
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters
directory) is that auxiliary actions can be triggered when an attribute
- is changed. For example see: sdebug_add_host_store() above.
+ is changed. For example see: add_host_store() above.
*/
static struct attribute *sdebug_drv_attrs[] = {
@@ -5203,6 +6511,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_scsi_level.attr,
&driver_attr_virtual_gb.attr,
&driver_attr_add_host.attr,
+ &driver_attr_per_host_store.attr,
&driver_attr_vpd_use_hostno.attr,
&driver_attr_sector_size.attr,
&driver_attr_statistics.attr,
@@ -5212,12 +6521,14 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_guard.attr,
&driver_attr_ato.attr,
&driver_attr_map.attr,
+ &driver_attr_random.attr,
&driver_attr_removable.attr,
&driver_attr_host_lock.attr,
&driver_attr_ndelay.attr,
&driver_attr_strict.attr,
&driver_attr_uuid_ctl.attr,
&driver_attr_cdb_len.attr,
+ &driver_attr_zbc.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -5226,11 +6537,13 @@ static struct device *pseudo_primary;
static int __init scsi_debug_init(void)
{
+ bool want_store = (sdebug_fake_rw == 0);
unsigned long sz;
- int host_to_add;
- int k;
- int ret;
+ int k, ret, hosts_to_add;
+ int idx = -1;
+ ramdisk_lck_a[0] = &atomic_rw;
+ ramdisk_lck_a[1] = &atomic_rw2;
atomic_set(&retired_max_queue, 0);
if (sdebug_ndelay >= 1000 * 1000 * 1000) {
@@ -5304,6 +6617,40 @@ static int __init scsi_debug_init(void)
for (k = 0; k < submit_queues; ++k)
spin_lock_init(&sdebug_q_arr[k].qc_lock);
+ /*
+ * check for host managed zoned block device specified with
+ * ptype=0x14 or zbc=XXX.
+ */
+ if (sdebug_ptype == TYPE_ZBC) {
+ sdeb_zbc_model = BLK_ZONED_HM;
+ } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
+ k = sdeb_zbc_model_str(sdeb_zbc_model_s);
+ if (k < 0) {
+ ret = k;
+ goto free_vm;
+ }
+ sdeb_zbc_model = k;
+ switch (sdeb_zbc_model) {
+ case BLK_ZONED_NONE:
+ case BLK_ZONED_HA:
+ sdebug_ptype = TYPE_DISK;
+ break;
+ case BLK_ZONED_HM:
+ sdebug_ptype = TYPE_ZBC;
+ break;
+ default:
+ pr_err("Invalid ZBC model\n");
+ return -EINVAL;
+ }
+ }
+ if (sdeb_zbc_model != BLK_ZONED_NONE) {
+ sdeb_zbc_in_use = true;
+ if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
+ sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
+ }
+
+ if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
+ sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
if (sdebug_dev_size_mb < 1)
sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
sz = (unsigned long)sdebug_dev_size_mb * 1048576;
@@ -5326,36 +6673,6 @@ static int __init scsi_debug_init(void)
sdebug_cylinders_per = (unsigned long)sdebug_capacity /
(sdebug_sectors_per * sdebug_heads);
}
-
- if (sdebug_fake_rw == 0) {
- fake_storep = vzalloc(sz);
- if (NULL == fake_storep) {
- pr_err("out of memory, 1\n");
- ret = -ENOMEM;
- goto free_q_arr;
- }
- if (sdebug_num_parts > 0)
- sdebug_build_parts(fake_storep, sz);
- }
-
- if (sdebug_dix) {
- int dif_size;
-
- dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
- dif_storep = vmalloc(dif_size);
-
- pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
-
- if (dif_storep == NULL) {
- pr_err("out of mem. (DIX)\n");
- ret = -ENOMEM;
- goto free_vm;
- }
-
- memset(dif_storep, 0xff, dif_size);
- }
-
- /* Logical Block Provisioning */
if (scsi_debug_lbp()) {
sdebug_unmap_max_blocks =
clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
@@ -5371,26 +6688,16 @@ static int __init scsi_debug_init(void)
sdebug_unmap_alignment) {
pr_err("ERR: unmap_granularity <= unmap_alignment\n");
ret = -EINVAL;
- goto free_vm;
+ goto free_q_arr;
}
-
- map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
- map_storep = vmalloc(array_size(sizeof(long),
- BITS_TO_LONGS(map_size)));
-
- pr_info("%lu provisioning blocks\n", map_size);
-
- if (map_storep == NULL) {
- pr_err("out of mem. (MAP)\n");
- ret = -ENOMEM;
- goto free_vm;
+ }
+ xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+ if (want_store) {
+ idx = sdebug_add_store();
+ if (idx < 0) {
+ ret = idx;
+ goto free_q_arr;
}
-
- bitmap_zero(map_storep, map_size);
-
- /* Map first 1KB for partition table */
- if (sdebug_num_parts)
- map_region(0, 2);
}
pseudo_primary = root_device_register("pseudo_0");
@@ -5410,18 +6717,28 @@ static int __init scsi_debug_init(void)
goto bus_unreg;
}
- host_to_add = sdebug_add_host;
+ hosts_to_add = sdebug_add_host;
sdebug_add_host = 0;
- for (k = 0; k < host_to_add; k++) {
- if (sdebug_add_adapter()) {
- pr_err("sdebug_add_adapter failed k=%d\n", k);
- break;
+ for (k = 0; k < hosts_to_add; k++) {
+ if (want_store && k == 0) {
+ ret = sdebug_add_host_helper(idx);
+ if (ret < 0) {
+ pr_err("add_host_helper k=%d, error=%d\n",
+ k, -ret);
+ break;
+ }
+ } else {
+ ret = sdebug_do_add_host(want_store &&
+ sdebug_per_host_store);
+ if (ret < 0) {
+ pr_err("add_host k=%d error=%d\n", k, -ret);
+ break;
+ }
}
}
-
if (sdebug_verbose)
- pr_info("built %d host(s)\n", sdebug_add_host);
+ pr_info("built %d host(s)\n", sdebug_num_hosts);
return 0;
@@ -5430,9 +6747,7 @@ bus_unreg:
dev_unreg:
root_device_unregister(pseudo_primary);
free_vm:
- vfree(map_storep);
- vfree(dif_storep);
- vfree(fake_storep);
+ sdebug_erase_store(idx, NULL);
free_q_arr:
kfree(sdebug_q_arr);
return ret;
@@ -5440,20 +6755,18 @@ free_q_arr:
static void __exit scsi_debug_exit(void)
{
- int k = sdebug_add_host;
+ int k = sdebug_num_hosts;
stop_all_queued();
for (; k; k--)
- sdebug_remove_adapter();
+ sdebug_do_remove_host(true);
free_all_queued();
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
- vfree(map_storep);
- vfree(dif_storep);
- vfree(fake_storep);
- kfree(sdebug_q_arr);
+ sdebug_erase_all_stores(false);
+ xa_destroy(per_store_ap);
}
device_initcall(scsi_debug_init);
@@ -5467,29 +6780,146 @@ static void sdebug_release_adapter(struct device *dev)
kfree(sdbg_host);
}
-static int sdebug_add_adapter(void)
+/* idx must be valid, if sip is NULL then it will be obtained using idx */
+static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
{
- int k, devs_per_host;
- int error = 0;
+ if (idx < 0)
+ return;
+ if (!sip) {
+ if (xa_empty(per_store_ap))
+ return;
+ sip = xa_load(per_store_ap, idx);
+ if (!sip)
+ return;
+ }
+ vfree(sip->map_storep);
+ vfree(sip->dif_storep);
+ vfree(sip->storep);
+ xa_erase(per_store_ap, idx);
+ kfree(sip);
+}
+
+/* Assume apart_from_first==false only in shutdown case. */
+static void sdebug_erase_all_stores(bool apart_from_first)
+{
+ unsigned long idx;
+ struct sdeb_store_info *sip = NULL;
+
+ xa_for_each(per_store_ap, idx, sip) {
+ if (apart_from_first)
+ apart_from_first = false;
+ else
+ sdebug_erase_store(idx, sip);
+ }
+ if (apart_from_first)
+ sdeb_most_recent_idx = sdeb_first_idx;
+}
+
+/*
+ * Returns store xarray new element index (idx) if >=0 else negated errno.
+ * Limit the number of stores to 65536.
+ */
+static int sdebug_add_store(void)
+{
+ int res;
+ u32 n_idx;
+ unsigned long iflags;
+ unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
+ struct sdeb_store_info *sip = NULL;
+ struct xa_limit xal = { .max = 1 << 16, .min = 0 };
+
+ sip = kzalloc(sizeof(*sip), GFP_KERNEL);
+ if (!sip)
+ return -ENOMEM;
+
+ xa_lock_irqsave(per_store_ap, iflags);
+ res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
+ if (unlikely(res < 0)) {
+ xa_unlock_irqrestore(per_store_ap, iflags);
+ kfree(sip);
+ pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
+ return res;
+ }
+ sdeb_most_recent_idx = n_idx;
+ if (sdeb_first_idx < 0)
+ sdeb_first_idx = n_idx;
+ xa_unlock_irqrestore(per_store_ap, iflags);
+
+ res = -ENOMEM;
+ sip->storep = vzalloc(sz);
+ if (!sip->storep) {
+ pr_err("user data oom\n");
+ goto err;
+ }
+ if (sdebug_num_parts > 0)
+ sdebug_build_parts(sip->storep, sz);
+
+ /* DIF/DIX: what T10 calls Protection Information (PI) */
+ if (sdebug_dix) {
+ int dif_size;
+
+ dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
+ sip->dif_storep = vmalloc(dif_size);
+
+ pr_info("dif_storep %u bytes @ %pK\n", dif_size,
+ sip->dif_storep);
+
+ if (!sip->dif_storep) {
+ pr_err("DIX oom\n");
+ goto err;
+ }
+ memset(sip->dif_storep, 0xff, dif_size);
+ }
+ /* Logical Block Provisioning */
+ if (scsi_debug_lbp()) {
+ map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
+ sip->map_storep = vmalloc(array_size(sizeof(long),
+ BITS_TO_LONGS(map_size)));
+
+ pr_info("%lu provisioning blocks\n", map_size);
+
+ if (!sip->map_storep) {
+ pr_err("LBP map oom\n");
+ goto err;
+ }
+
+ bitmap_zero(sip->map_storep, map_size);
+
+ /* Map first 1KB for partition table */
+ if (sdebug_num_parts)
+ map_region(sip, 0, 2);
+ }
+
+ rwlock_init(&sip->macc_lck);
+ return (int)n_idx;
+err:
+ sdebug_erase_store((int)n_idx, sip);
+ pr_warn("%s: failed, errno=%d\n", __func__, -res);
+ return res;
+}
+
+static int sdebug_add_host_helper(int per_host_idx)
+{
+ int k, devs_per_host, idx;
+ int error = -ENOMEM;
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
- if (sdbg_host == NULL) {
- pr_err("out of memory at line %d\n", __LINE__);
+ if (!sdbg_host)
return -ENOMEM;
- }
+ idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
+ if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
+ xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
+ sdbg_host->si_idx = idx;
INIT_LIST_HEAD(&sdbg_host->dev_info_list);
devs_per_host = sdebug_num_tgts * sdebug_max_luns;
for (k = 0; k < devs_per_host; k++) {
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
- if (!sdbg_devinfo) {
- pr_err("out of memory at line %d\n", __LINE__);
- error = -ENOMEM;
+ if (!sdbg_devinfo)
goto clean;
- }
}
spin_lock(&sdebug_host_list_lock);
@@ -5499,44 +6929,77 @@ static int sdebug_add_adapter(void)
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
sdbg_host->dev.release = &sdebug_release_adapter;
- dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
+ dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
error = device_register(&sdbg_host->dev);
-
if (error)
goto clean;
- ++sdebug_add_host;
- return error;
+ ++sdebug_num_hosts;
+ return 0;
clean:
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo->zstate);
kfree(sdbg_devinfo);
}
-
kfree(sdbg_host);
+ pr_warn("%s: failed, errno=%d\n", __func__, -error);
return error;
}
-static void sdebug_remove_adapter(void)
+static int sdebug_do_add_host(bool mk_new_store)
{
+ int ph_idx = sdeb_most_recent_idx;
+
+ if (mk_new_store) {
+ ph_idx = sdebug_add_store();
+ if (ph_idx < 0)
+ return ph_idx;
+ }
+ return sdebug_add_host_helper(ph_idx);
+}
+
+static void sdebug_do_remove_host(bool the_end)
+{
+ int idx = -1;
struct sdebug_host_info *sdbg_host = NULL;
+ struct sdebug_host_info *sdbg_host2;
spin_lock(&sdebug_host_list_lock);
if (!list_empty(&sdebug_host_list)) {
sdbg_host = list_entry(sdebug_host_list.prev,
struct sdebug_host_info, host_list);
- list_del(&sdbg_host->host_list);
+ idx = sdbg_host->si_idx;
+ }
+ if (!the_end && idx >= 0) {
+ bool unique = true;
+
+ list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
+ if (sdbg_host2 == sdbg_host)
+ continue;
+ if (idx == sdbg_host2->si_idx) {
+ unique = false;
+ break;
+ }
+ }
+ if (unique) {
+ xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
+ if (idx == sdeb_most_recent_idx)
+ --sdeb_most_recent_idx;
+ }
}
+ if (sdbg_host)
+ list_del(&sdbg_host->host_list);
spin_unlock(&sdebug_host_list_lock);
if (!sdbg_host)
return;
device_unregister(&sdbg_host->dev);
- --sdebug_add_host;
+ --sdebug_num_hosts;
}
static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
@@ -5595,6 +7058,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
const struct opcode_info_t *oip;
const struct opcode_info_t *r_oip;
struct sdebug_dev_info *devip;
+
u8 *cmd = scp->cmnd;
int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
@@ -5724,7 +7188,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
fini:
- if (F_DELAY_OVERR & flags)
+ if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
return schedule_resp(scp, devip, errsts, pfp, 0, 0);
else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
sdebug_ndelay > 10000)) {
@@ -5866,8 +7330,9 @@ static int sdebug_driver_probe(struct device *dev)
pr_err("scsi_add_host failed\n");
error = -ENODEV;
scsi_host_put(hpnt);
- } else
+ } else {
scsi_scan_host(hpnt);
+ }
return error;
}
@@ -5889,6 +7354,7 @@ static int sdebug_driver_remove(struct device *dev)
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo->zstate);
kfree(sdbg_devinfo);
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 978be1602f71..927b1e641842 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1412,6 +1412,7 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdev,
"%s: skip START_UNIT, past eh deadline\n",
current->comm));
+ scsi_device_put(sdev);
break;
}
stu_scmd = NULL;
@@ -1478,6 +1479,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdev,
"%s: skip BDR, past eh deadline\n",
current->comm));
+ scsi_device_put(sdev);
break;
}
bdr_scmd = NULL;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index df4905df5cd4..0ba7a65e7c8d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -202,24 +202,17 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
blk_mq_requeue_request(cmd->request, true);
}
-/*
- * Function: scsi_queue_insert()
- *
- * Purpose: Insert a command in the midlevel queue.
- *
- * Arguments: cmd - command that we are adding to queue.
- * reason - why we are inserting command to queue.
- *
- * Lock status: Assumed that lock is not held upon entry.
+/**
+ * scsi_queue_insert - Reinsert a command in the queue.
+ * @cmd: command that we are adding to queue.
+ * @reason: why we are inserting command to queue.
*
- * Returns: Nothing.
+ * We do this for one of two cases. Either the host is busy and it cannot accept
+ * any more commands for the time being, or the device returned QUEUE_FULL and
+ * can accept no more commands.
*
- * Notes: We do this for one of two cases. Either the host is busy
- * and it cannot accept any more commands for the time being,
- * or the device returned QUEUE_FULL and can accept no more
- * commands.
- * Notes: This could be called either from an interrupt context or a
- * normal process context.
+ * Context: This could be called either from an interrupt context or a normal
+ * process context.
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
@@ -301,16 +294,12 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
}
EXPORT_SYMBOL(__scsi_execute);
-/*
- * Function: scsi_init_cmd_errh()
- *
- * Purpose: Initialize cmd fields related to error handling.
- *
- * Arguments: cmd - command that is ready to be queued.
+/**
+ * scsi_init_cmd_errh - Initialize cmd fields related to error handling.
+ * @cmd: command that is ready to be queued.
*
- * Notes: This function has the job of initializing a number of
- * fields related to error handling. Typically this will
- * be called once for each command, as required.
+ * This function has the job of initializing a number of fields related to error
+ * handling. Typically this will be called once for each command, as required.
*/
static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{
@@ -496,17 +485,11 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
spin_unlock_irqrestore(shost->host_lock, flags);
}
-/*
- * Function: scsi_run_queue()
- *
- * Purpose: Select a proper request queue to serve next
- *
- * Arguments: q - last request's queue
- *
- * Returns: Nothing
+/**
+ * scsi_run_queue - Select a proper request queue to serve next.
+ * @q: last request's queue
*
- * Notes: The previous command was completely finished, start
- * a new one if possible.
+ * The previous command was completely finished, start a new one if possible.
*/
static void scsi_run_queue(struct request_queue *q)
{
@@ -548,7 +531,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
}
}
-static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
+static void scsi_free_sgtables(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table,
@@ -560,7 +543,7 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
- scsi_mq_free_sgtables(cmd);
+ scsi_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
}
@@ -896,34 +879,27 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
return result;
}
-/*
- * Function: scsi_io_completion()
- *
- * Purpose: Completion processing for block device I/O requests.
- *
- * Arguments: cmd - command that is finished.
- *
- * Lock status: Assumed that no lock is held upon entry.
- *
- * Returns: Nothing
- *
- * Notes: We will finish off the specified number of sectors. If we
- * are done, the command block will be released and the queue
- * function will be goosed. If we are not done then we have to
- * figure out what to do next:
- *
- * a) We can call scsi_requeue_command(). The request
- * will be unprepared and put back on the queue. Then
- * a new command will be created for it. This should
- * be used if we made forward progress, or if we want
- * to switch from READ(10) to READ(6) for example.
- *
- * b) We can call __scsi_queue_insert(). The request will
- * be put back on the queue and retried using the same
- * command as before, possibly after a delay.
- *
- * c) We can call scsi_end_request() with blk_stat other than
- * BLK_STS_OK, to fail the remainder of the request.
+/**
+ * scsi_io_completion - Completion processing for SCSI commands.
+ * @cmd: command that is finished.
+ * @good_bytes: number of processed bytes.
+ *
+ * We will finish off the specified number of sectors. If we are done, the
+ * command block will be released and the queue function will be goosed. If we
+ * are not done then we have to figure out what to do next:
+ *
+ * a) We can call scsi_io_completion_reprep(). The request will be
+ * unprepared and put back on the queue. Then a new command will
+ * be created for it. This should be used if we made forward
+ * progress, or if we want to switch from READ(10) to READ(6) for
+ * example.
+ *
+ * b) We can call scsi_io_completion_action(). The request will be
+ * put back on the queue and retried using the same command as
+ * before, possibly after a delay.
+ *
+ * c) We can call scsi_end_request() with blk_stat other than
+ * BLK_STS_OK, to fail the remainder of the request.
*/
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
@@ -951,8 +927,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
blk_rq_sectors(req), good_bytes));
/*
- * Next deal with any sectors which we were able to correctly
- * handle. Failed, zero length commands always need to drop down
+ * Failed, zero length commands always need to drop down
* to retry code. Fast path should return in this block.
*/
if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
@@ -986,16 +961,14 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
sdev->host->hostt->dma_need_drain(rq);
}
-/*
- * Function: scsi_init_io()
- *
- * Purpose: SCSI I/O initialize function.
- *
- * Arguments: cmd - Command descriptor we wish to initialize
+/**
+ * scsi_init_io - SCSI I/O initialization function.
+ * @cmd: command descriptor we wish to initialize
*
- * Returns: BLK_STS_OK on success
- * BLK_STS_RESOURCE if the failure is retryable
- * BLK_STS_IOERR if the failure is fatal
+ * Returns:
+ * * BLK_STS_OK - on success
+ * * BLK_STS_RESOURCE - if the failure is retryable
+ * * BLK_STS_IOERR - if the failure is fatal
*/
blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
{
@@ -1086,7 +1059,7 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
return BLK_STS_OK;
out_free_sgtables:
- scsi_mq_free_sgtables(cmd);
+ scsi_free_sgtables(cmd);
return ret;
}
EXPORT_SYMBOL(scsi_init_io);
@@ -1217,6 +1190,7 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ blk_status_t ret;
if (!blk_rq_bytes(req))
cmd->sc_data_direction = DMA_NONE;
@@ -1226,9 +1200,14 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
cmd->sc_data_direction = DMA_FROM_DEVICE;
if (blk_rq_is_scsi(req))
- return scsi_setup_scsi_cmnd(sdev, req);
+ ret = scsi_setup_scsi_cmnd(sdev, req);
else
- return scsi_setup_fs_cmnd(sdev, req);
+ ret = scsi_setup_fs_cmnd(sdev, req);
+
+ if (ret != BLK_STS_OK)
+ scsi_free_sgtables(cmd);
+
+ return ret;
}
static blk_status_t
@@ -1893,6 +1872,7 @@ struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
unsigned int cmd_size, sgl_size;
+ struct blk_mq_tag_set *tag_set = &shost->tag_set;
sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
scsi_mq_inline_sgl_size(shost));
@@ -1901,21 +1881,21 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
cmd_size += sizeof(struct scsi_data_buffer) +
sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
- memset(&shost->tag_set, 0, sizeof(shost->tag_set));
+ memset(tag_set, 0, sizeof(*tag_set));
if (shost->hostt->commit_rqs)
- shost->tag_set.ops = &scsi_mq_ops;
+ tag_set->ops = &scsi_mq_ops;
else
- shost->tag_set.ops = &scsi_mq_ops_no_commit;
- shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
- shost->tag_set.queue_depth = shost->can_queue;
- shost->tag_set.cmd_size = cmd_size;
- shost->tag_set.numa_node = NUMA_NO_NODE;
- shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- shost->tag_set.flags |=
+ tag_set->ops = &scsi_mq_ops_no_commit;
+ tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
+ tag_set->queue_depth = shost->can_queue;
+ tag_set->cmd_size = cmd_size;
+ tag_set->numa_node = NUMA_NO_NODE;
+ tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
+ tag_set->flags |=
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
- shost->tag_set.driver_data = shost;
+ tag_set->driver_data = shost;
- return blk_mq_alloc_tag_set(&shost->tag_set);
+ return blk_mq_alloc_tag_set(tag_set);
}
void scsi_mq_destroy_tags(struct Scsi_Host *shost)
@@ -1944,21 +1924,13 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(scsi_device_from_queue);
-/*
- * Function: scsi_block_requests()
- *
- * Purpose: Utility function used by low-level drivers to prevent further
- * commands from being queued to the device.
- *
- * Arguments: shost - Host in question
- *
- * Returns: Nothing
- *
- * Lock status: No locks are assumed held.
+/**
+ * scsi_block_requests - Utility function used by low-level drivers to prevent
+ * further commands from being queued to the device.
+ * @shost: host in question
*
- * Notes: There is no timer nor any other means by which the requests
- * get unblocked other than the low-level driver calling
- * scsi_unblock_requests().
+ * There is no timer nor any other means by which the requests get unblocked
+ * other than the low-level driver calling scsi_unblock_requests().
*/
void scsi_block_requests(struct Scsi_Host *shost)
{
@@ -1966,25 +1938,15 @@ void scsi_block_requests(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_block_requests);
-/*
- * Function: scsi_unblock_requests()
- *
- * Purpose: Utility function used by low-level drivers to allow further
- * commands from being queued to the device.
- *
- * Arguments: shost - Host in question
- *
- * Returns: Nothing
- *
- * Lock status: No locks are assumed held.
- *
- * Notes: There is no timer nor any other means by which the requests
- * get unblocked other than the low-level driver calling
- * scsi_unblock_requests().
- *
- * This is done as an API function so that changes to the
- * internals of the scsi mid-layer won't require wholesale
- * changes to drivers that use this feature.
+/**
+ * scsi_unblock_requests - Utility function used by low-level drivers to allow
+ * further commands to be queued to the device.
+ * @shost: host in question
+ *
+ * There is no timer nor any other means by which the requests get unblocked
+ * other than the low-level driver calling scsi_unblock_requests(). This is done
+ * as an API function so that changes to the internals of the scsi mid-layer
+ * won't require wholesale changes to drivers that use this feature.
*/
void scsi_unblock_requests(struct Scsi_Host *shost)
{
@@ -2865,11 +2827,27 @@ scsi_host_block(struct Scsi_Host *shost)
struct scsi_device *sdev;
int ret = 0;
+ /*
+ * Call scsi_internal_device_block_nowait so we can avoid
+ * calling synchronize_rcu() for each LUN.
+ */
shost_for_each_device(sdev, shost) {
- ret = scsi_internal_device_block(sdev);
+ mutex_lock(&sdev->state_mutex);
+ ret = scsi_internal_device_block_nowait(sdev);
+ mutex_unlock(&sdev->state_mutex);
if (ret)
break;
}
+
+ /*
+ * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so
+ * calling synchronize_rcu() once is enough.
+ */
+ WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING);
+
+ if (!ret)
+ synchronize_rcu();
+
return ret;
}
EXPORT_SYMBOL_GPL(scsi_host_block);
@@ -2882,8 +2860,10 @@ scsi_host_unblock(struct Scsi_Host *shost, int new_state)
shost_for_each_device(sdev, shost) {
ret = scsi_internal_device_unblock(sdev, new_state);
- if (ret)
+ if (ret) {
+ scsi_device_put(sdev);
break;
+ }
}
return ret;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index b2a803c51288..f4cc08eb47ba 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1616,6 +1616,12 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class,
static struct sock *nls;
static DEFINE_MUTEX(rx_queue_mutex);
+/*
+ * conn_mutex protects the {start,bind,stop,destroy}_conn from racing
+ * against the kernel stop_connection recovery mechanism
+ */
+static DEFINE_MUTEX(conn_mutex);
+
static LIST_HEAD(sesslist);
static LIST_HEAD(sessdestroylist);
static DEFINE_SPINLOCK(sesslock);
@@ -2445,6 +2451,32 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
+/*
+ * This can be called without the rx_queue_mutex, if invoked by the kernel
+ * stop work. But, in that case, it is guaranteed not to race with
+ * iscsi_destroy by conn_mutex.
+ */
+static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
+{
+ /*
+ * It is important that this path doesn't rely on
+ * rx_queue_mutex, otherwise, a thread doing allocation on a
+ * start_session/start_connection could sleep waiting on a
+ * writeback to a failed iscsi device, that cannot be recovered
+ * because the lock is held. If we don't hold it here, the
+ * kernel stop_conn_work_fn has a chance to stop the broken
+ * session and resolve the allocation.
+ *
+ * Still, the user invoked .stop_conn() needs to be serialized
+ * with stop_conn_work_fn by a private mutex. Not pretty, but
+ * it works.
+ */
+ mutex_lock(&conn_mutex);
+ conn->transport->stop_conn(conn, flag);
+ mutex_unlock(&conn_mutex);
+
+}
+
static void stop_conn_work_fn(struct work_struct *work)
{
struct iscsi_cls_conn *conn, *tmp;
@@ -2463,30 +2495,17 @@ static void stop_conn_work_fn(struct work_struct *work)
uint32_t sid = iscsi_conn_get_sid(conn);
struct iscsi_cls_session *session;
- mutex_lock(&rx_queue_mutex);
-
session = iscsi_session_lookup(sid);
if (session) {
if (system_state != SYSTEM_RUNNING) {
session->recovery_tmo = 0;
- conn->transport->stop_conn(conn,
- STOP_CONN_TERM);
+ iscsi_if_stop_conn(conn, STOP_CONN_TERM);
} else {
- conn->transport->stop_conn(conn,
- STOP_CONN_RECOVER);
+ iscsi_if_stop_conn(conn, STOP_CONN_RECOVER);
}
}
list_del_init(&conn->conn_list_err);
-
- mutex_unlock(&rx_queue_mutex);
-
- /* we don't want to hold rx_queue_mutex for too long,
- * for instance if many conns failed at the same time,
- * since this stall other iscsi maintenance operations.
- * Give other users a chance to proceed.
- */
- cond_resched();
}
}
@@ -2846,8 +2865,11 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
spin_unlock_irqrestore(&connlock, flags);
ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
+
+ mutex_lock(&conn_mutex);
if (transport->destroy_conn)
transport->destroy_conn(conn);
+ mutex_unlock(&conn_mutex);
return 0;
}
@@ -3689,9 +3711,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
break;
}
+ mutex_lock(&conn_mutex);
ev->r.retcode = transport->bind_conn(session, conn,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
+ mutex_unlock(&conn_mutex);
+
if (ev->r.retcode || !transport->ep_connect)
break;
@@ -3713,9 +3738,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_START_CONN:
conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid);
if (conn) {
+ mutex_lock(&conn_mutex);
ev->r.retcode = transport->start_conn(conn);
if (!ev->r.retcode)
conn->state = ISCSI_CONN_UP;
+ mutex_unlock(&conn_mutex);
}
else
err = -EINVAL;
@@ -3723,17 +3750,20 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_STOP_CONN:
conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
if (conn)
- transport->stop_conn(conn, ev->u.stop_conn.flag);
+ iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
else
err = -EINVAL;
break;
case ISCSI_UEVENT_SEND_PDU:
conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid);
- if (conn)
+ if (conn) {
+ mutex_lock(&conn_mutex);
ev->r.retcode = transport->send_pdu(conn,
(struct iscsi_hdr*)((char*)ev + sizeof(*ev)),
(char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size,
ev->u.send_pdu.data_size);
+ mutex_unlock(&conn_mutex);
+ }
else
err = -EINVAL;
break;
@@ -4728,7 +4758,9 @@ static __init int iscsi_transport_init(void)
goto unregister_flashnode_bus;
}
- iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
+ iscsi_eh_timer_workq = alloc_workqueue("%s",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 2, "iscsi_eh");
if (!iscsi_eh_timer_workq) {
err = -ENOMEM;
goto release_nls;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 7b0383e42b4c..d90fefffe31b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -528,6 +528,21 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(max_write_same_blocks);
+static ssize_t
+zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ if (sdkp->device->type == TYPE_ZBC)
+ return sprintf(buf, "host-managed\n");
+ if (sdkp->zoned == 1)
+ return sprintf(buf, "host-aware\n");
+ if (sdkp->zoned == 2)
+ return sprintf(buf, "drive-managed\n");
+ return sprintf(buf, "none\n");
+}
+static DEVICE_ATTR_RO(zoned_cap);
+
static struct attribute *sd_disk_attrs[] = {
&dev_attr_cache_type.attr,
&dev_attr_FUA.attr,
@@ -541,6 +556,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_zeroing_mode.attr,
&dev_attr_max_write_same_blocks.attr,
&dev_attr_max_medium_access_timeouts.attr,
+ &dev_attr_zoned_cap.attr,
NULL,
};
ATTRIBUTE_GROUPS(sd_disk);
@@ -2962,6 +2978,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
* with partitions as regular block devices.
*/
q->limits.zoned = BLK_ZONED_NONE;
+ if (sdkp->zoned == 2 && sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Drive-managed SMR disk\n");
}
}
if (blk_queue_is_zoned(q) && sdkp->first_scan)
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 713bce998b0e..3bdf0deb8f15 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -187,7 +187,7 @@ static inline void init_hpc_chain(struct ip22_hostdata *hdata)
hcp++;
dma += sizeof(struct hpc_chunk);
start += sizeof(struct hpc_chunk);
- };
+ }
hcp--;
hcp->desc.pnext = hdata->dma;
}
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index f8397978f8ab..03d43f016397 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -28,7 +28,6 @@
#include <linux/platform_device.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/delay.h>
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
index de0ab5fc8474..f4c666285bba 100644
--- a/drivers/scsi/snic/snic.h
+++ b/drivers/scsi/snic/snic.h
@@ -399,7 +399,7 @@ void snic_handle_link_event(struct snic *);
void snic_handle_link(struct work_struct *);
int snic_queue_exch_ver_req(struct snic *);
-int snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
+void snic_io_exch_ver_cmpl_handler(struct snic *, struct snic_fw_req *);
int snic_queue_wq_desc(struct snic *, void *os_buf, u16 len);
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index 449b03f3bbd3..4cd86115cfb2 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -151,7 +151,7 @@ error:
/*
* snic_io_exch_ver_cmpl_handler
*/
-int
+void
snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
{
struct snic_req_info *rqi = NULL;
@@ -160,7 +160,6 @@ snic_io_exch_ver_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
u32 cmnd_id, hid, max_sgs;
ulong ctx = 0;
unsigned long flags;
- int ret = 0;
SNIC_HOST_INFO(snic->shost, "Exch Ver Compl Received.\n");
snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
@@ -224,8 +223,6 @@ exch_cmpl_end:
snic_release_untagged_req(snic, rqi);
SNIC_HOST_INFO(snic->shost, "Exch_cmpl Done, hdr_stat %d.\n", hdr_stat);
-
- return ret;
} /* end of snic_io_exch_ver_cmpl_handler */
/*
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index f9b589d60a46..4dcd735ea49e 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -51,6 +51,8 @@
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
@@ -344,10 +346,8 @@ static int sr_done(struct scsi_cmnd *SCpnt)
case ILLEGAL_REQUEST:
if (!(SCpnt->sense_buffer[0] & 0x90))
break;
- error_sector = (SCpnt->sense_buffer[3] << 24) |
- (SCpnt->sense_buffer[4] << 16) |
- (SCpnt->sense_buffer[5] << 8) |
- SCpnt->sense_buffer[6];
+ error_sector =
+ get_unaligned_be32(&SCpnt->sense_buffer[3]);
if (SCpnt->request->bio != NULL)
block_sectors =
bio_sectors(SCpnt->request->bio);
@@ -495,13 +495,9 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
SCpnt->sdb.length = this_count * s_size;
}
- SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
- SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
- SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
- SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+ put_unaligned_be32(block, &SCpnt->cmnd[2]);
SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
- SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
- SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+ put_unaligned_be16(this_count, &SCpnt->cmnd[7]);
/*
* We shouldn't disconnect in the middle of a sector, so with a dumb
@@ -853,8 +849,7 @@ static void get_sectorsize(struct scsi_cd *cd)
} else {
long last_written;
- cd->capacity = 1 + ((buffer[0] << 24) | (buffer[1] << 16) |
- (buffer[2] << 8) | buffer[3]);
+ cd->capacity = 1 + get_unaligned_be32(&buffer[0]);
/*
* READ_CAPACITY doesn't return the correct size on
* certain UDF media. If last_written is larger, use
@@ -865,8 +860,7 @@ static void get_sectorsize(struct scsi_cd *cd)
if (!cdrom_get_last_written(&cd->cdi, &last_written))
cd->capacity = max_t(long, cd->capacity, last_written);
- sector_size = (buffer[4] << 24) |
- (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+ sector_size = get_unaligned_be32(&buffer[4]);
switch (sector_size) {
/*
* HP 4020i CD-Recorder reports 2340 byte sectors
@@ -954,13 +948,13 @@ static void get_capabilities(struct scsi_cd *cd)
}
n = data.header_length + data.block_descriptor_length;
- cd->cdi.speed = ((buffer[n + 8] << 8) + buffer[n + 9]) / 176;
+ cd->cdi.speed = get_unaligned_be16(&buffer[n + 8]) / 176;
cd->readcd_known = 1;
cd->readcd_cdda = buffer[n + 5] & 0x01;
/* print some capability bits */
sr_printk(KERN_INFO, cd,
"scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n",
- ((buffer[n + 14] << 8) + buffer[n + 15]) / 176,
+ get_unaligned_be16(&buffer[n + 14]) / 176,
cd->cdi.speed,
buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */
buffer[n + 3] & 0x20 ? "dvd-ram " : "",
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c5f9b348b438..4bf4ab3b70f4 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1457,7 +1457,6 @@ static int st_flush(struct file *filp, fl_owner_t id)
accessing this tape. */
static int st_release(struct inode *inode, struct file *filp)
{
- int result = 0;
struct scsi_tape *STp = filp->private_data;
if (STp->door_locked == ST_LOCKED_AUTO)
@@ -1470,9 +1469,9 @@ static int st_release(struct inode *inode, struct file *filp)
scsi_autopm_put_device(STp->device);
scsi_tape_put(STp);
- return result;
+ return 0;
}
-
+
/* The checks common to both reading and writing */
static ssize_t rw_checks(struct scsi_tape *STp, struct file *filp, size_t count)
{
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
index 5216d228cdd9..46bb905b4d6a 100644
--- a/drivers/scsi/ufs/ti-j721e-ufs.c
+++ b/drivers/scsi/ufs/ti-j721e-ufs.c
@@ -32,14 +32,14 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
- return ret;
+ goto disable_pm;
}
/* Select MPHY refclk frequency */
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
dev_err(dev, "Cannot claim MPHY clock.\n");
- return PTR_ERR(clk);
+ goto clk_err;
}
clk_rate = clk_get_rate(clk);
if (clk_rate == 26000000)
@@ -54,16 +54,23 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev)
dev);
if (ret) {
dev_err(dev, "failed to populate child nodes %d\n", ret);
- pm_runtime_put_sync(dev);
+ goto clk_err;
}
return ret;
+
+clk_err:
+ pm_runtime_put_sync(dev);
+disable_pm:
+ pm_runtime_disable(dev);
+ return ret;
}
static int ti_j721e_ufs_remove(struct platform_device *pdev)
{
of_platform_depopulate(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 673c16596fb2..d56ce8d97d4e 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -30,6 +30,12 @@
#define ufs_mtk_device_reset_ctrl(high, res) \
ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
+static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
+ UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
+ UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
+ END_FIX
+};
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -73,9 +79,9 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
if (status == PRE_CHANGE) {
if (host->unipro_lpm)
- hba->hba_enable_delay_us = 0;
+ hba->vps->hba_enable_delay_us = 0;
else
- hba->hba_enable_delay_us = 600;
+ hba->vps->hba_enable_delay_us = 600;
}
return 0;
@@ -263,6 +269,10 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable clock-gating */
hba->caps |= UFSHCD_CAP_CLK_GATING;
+ /* Enable WriteBooster */
+ hba->caps |= UFSHCD_CAP_WB_EN;
+ hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
+
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -555,10 +565,8 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
- if (mid == UFS_VENDOR_SAMSUNG) {
- hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+ if (mid == UFS_VENDOR_SAMSUNG)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
- }
/*
* Decide waiting time before gating reference clock and
@@ -575,6 +583,17 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
return 0;
}
+static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u16 mid = dev_info->wmanufacturerid;
+
+ ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+
+ if (mid == UFS_VENDOR_SAMSUNG)
+ hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
+}
+
/**
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
@@ -589,6 +608,7 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.link_startup_notify = ufs_mtk_link_startup_notify,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
.apply_dev_quirks = ufs_mtk_apply_dev_quirks,
+ .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
.suspend = ufs_mtk_suspend,
.resume = ufs_mtk_resume,
.dbg_register_dump = ufs_mtk_dbg_register_dump,
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 19aa5c44e0da..2e6ddb5cdfc2 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -572,7 +572,6 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
- int ret = 0;
if (ufs_qcom_is_link_off(hba)) {
/*
@@ -587,7 +586,7 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufs_qcom_disable_lane_clks(host);
}
- return ret;
+ return 0;
}
static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
@@ -1071,6 +1070,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
hba->caps |= UFSHCD_CAP_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+ hba->caps |= UFSHCD_CAP_WB_EN;
if (host->hw_ver.major >= 0x2) {
host->caps = UFS_QCOM_CAP_QUNIPRO |
@@ -1658,11 +1658,11 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
/* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_testbus_read(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
ufs_qcom_print_unipro_testbus(hba);
- usleep_range(1000, 1100);
+ udelay(1000);
}
/**
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 92a63eebdca9..2d71d232a69d 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -276,6 +276,10 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
+UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
+UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
+UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
+UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4);
static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_device_type.attr,
@@ -304,6 +308,10 @@ static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_number_of_secure_wpa.attr,
&dev_attr_psa_max_data_size.attr,
&dev_attr_psa_state_timeout.attr,
+ &dev_attr_ext_feature_sup.attr,
+ &dev_attr_wb_presv_us_en.attr,
+ &dev_attr_wb_type.attr,
+ &dev_attr_wb_shared_alloc_units.attr,
NULL,
};
@@ -373,6 +381,12 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
_ENM4_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
_ENM4_CAP_ADJ_FCTR, 2);
+UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
+UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1);
+UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1);
+
static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_raw_device_capacity.attr,
@@ -404,6 +418,11 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_enh3_memory_capacity_adjustment_factor.attr,
&dev_attr_enh4_memory_max_alloc_units.attr,
&dev_attr_enh4_memory_capacity_adjustment_factor.attr,
+ &dev_attr_wb_max_alloc_units.attr,
+ &dev_attr_wb_max_wb_luns.attr,
+ &dev_attr_wb_buff_cap_adj.attr,
+ &dev_attr_wb_sup_red_type.attr,
+ &dev_attr_wb_sup_wb_type.attr,
NULL,
};
@@ -603,20 +622,29 @@ static const struct attribute_group ufs_sysfs_string_descriptors_group = {
.attrs = ufs_sysfs_string_descriptors,
};
+static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
+{
+ return ((idn >= QUERY_FLAG_IDN_WB_EN) &&
+ (idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8));
+}
+
#define UFS_FLAG(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
bool flag; \
+ u8 index = 0; \
int ret; \
struct ufs_hba *hba = dev_get_drvdata(dev); \
+ if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
+ index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
- QUERY_FLAG_IDN##_uname, &flag); \
+ QUERY_FLAG_IDN##_uname, index, &flag); \
pm_runtime_put_sync(hba->dev); \
if (ret) \
return -EINVAL; \
- return sprintf(buf, "%s\n", flag ? "true" : "false"); \
+ return sprintf(buf, "%s\n", flag ? "true" : "false"); \
} \
static DEVICE_ATTR_RO(_name)
@@ -628,6 +656,9 @@ UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE);
UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL);
UFS_FLAG(busy_rtc, _BUSY_RTC);
UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
+UFS_FLAG(wb_enable, _WB_EN);
+UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
+UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_device_init.attr,
@@ -638,6 +669,9 @@ static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_phy_resource_removal.attr,
&dev_attr_busy_rtc.attr,
&dev_attr_disable_fw_update.attr,
+ &dev_attr_wb_enable.attr,
+ &dev_attr_wb_flush_en.attr,
+ &dev_attr_wb_flush_during_h8.attr,
NULL,
};
@@ -646,6 +680,12 @@ static const struct attribute_group ufs_sysfs_flags_group = {
.attrs = ufs_sysfs_device_flags,
};
+static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
+{
+ return ((idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS) &&
+ (idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE));
+}
+
#define UFS_ATTRIBUTE(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -653,9 +693,12 @@ static ssize_t _name##_show(struct device *dev, \
struct ufs_hba *hba = dev_get_drvdata(dev); \
u32 value; \
int ret; \
+ u8 index = 0; \
+ if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
+ index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
- QUERY_ATTR_IDN##_uname, 0, 0, &value); \
+ QUERY_ATTR_IDN##_uname, index, 0, &value); \
pm_runtime_put_sync(hba->dev); \
if (ret) \
return -EINVAL; \
@@ -679,6 +722,11 @@ UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
UFS_ATTRIBUTE(psa_state, _PSA_STATE);
UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE);
+UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS);
+UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE);
+UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST);
+UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
+
static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_boot_lun_enabled.attr,
@@ -697,6 +745,10 @@ static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_ffu_status.attr,
&dev_attr_psa_state.attr,
&dev_attr_psa_data_size.attr,
+ &dev_attr_wb_flush_status.attr,
+ &dev_attr_wb_avail_buf.attr,
+ &dev_attr_wb_life_time_est.attr,
+ &dev_attr_wb_cur_buf.attr,
NULL,
};
@@ -748,6 +800,8 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
+UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
+
static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_boot_lun_id.attr,
@@ -763,6 +817,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_physical_memory_resourse_count.attr,
&dev_attr_context_capabilities.attr,
&dev_attr_large_unit_granularity.attr,
+ &dev_attr_wb_buf_alloc_units.attr,
NULL,
};
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 990cb48e2403..c70845d41449 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -64,6 +64,9 @@
#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
#define UFS_UPIU_WLUN_ID (1 << 7)
+/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
+#define UFS_UPIU_MAX_WB_LUN_ID 8
+
/* Well known logical unit id in LUN field of UPIU */
enum {
UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
@@ -140,6 +143,9 @@ enum flag_idn {
QUERY_FLAG_IDN_BUSY_RTC = 0x09,
QUERY_FLAG_IDN_RESERVED3 = 0x0A,
QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
+ QUERY_FLAG_IDN_WB_EN = 0x0E,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
};
/* Attribute idn for Query requests */
@@ -168,6 +174,10 @@ enum attr_idn {
QUERY_ATTR_IDN_PSA_STATE = 0x15,
QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
+ QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
+ QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
};
/* Descriptor idn for Query requests */
@@ -191,9 +201,9 @@ enum desc_header_offset {
};
enum ufs_desc_def_size {
- QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
+ QUERY_DESC_DEVICE_DEF_SIZE = 0x59,
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
- QUERY_DESC_UNIT_DEF_SIZE = 0x23,
+ QUERY_DESC_UNIT_DEF_SIZE = 0x2D,
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
QUERY_DESC_POWER_DEF_SIZE = 0x62,
@@ -219,6 +229,7 @@ enum unit_desc_param {
UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
};
/* Device descriptor parameters offsets in bytes*/
@@ -258,6 +269,10 @@ enum device_desc_param {
DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
DEVICE_DESC_PARAM_PSA_TMT = 0x29,
DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
+ DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
+ DEVICE_DESC_PARAM_WB_TYPE = 0x54,
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55,
};
/* Interconnect descriptor parameters offsets in bytes*/
@@ -302,6 +317,11 @@ enum geometry_desc_param {
GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
+ GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
+ GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
+ GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
+ GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55,
+ GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56,
};
/* Health descriptor parameters offsets in bytes*/
@@ -313,6 +333,12 @@ enum health_desc_param {
HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
};
+/* WriteBooster buffer mode */
+enum {
+ WB_BUF_MODE_LU_DEDICATED = 0x0,
+ WB_BUF_MODE_SHARED = 0x1,
+};
+
/*
* Logical Unit Write Protect
* 00h: LU not write protected
@@ -333,6 +359,11 @@ enum {
UFSHCD_AMP = 3,
};
+/* Possible values for dExtendedUFSFeaturesSupport */
+enum {
+ UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
+};
+
#define POWER_DESC_MAX_SIZE 0x62
#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
@@ -447,6 +478,8 @@ enum ufs_dev_pwr_mode {
UFS_POWERDOWN_PWR_MODE = 3,
};
+#define UFS_WB_BUF_REMAIN_PERCENT(val) ((val) / 10)
+
/**
* struct utp_cmd_rsp - Response UPIU structure
* @residual_transfer_count: Residual transfer count DW-3
@@ -532,11 +565,17 @@ struct ufs_dev_info {
bool is_lu_power_on_wp;
/* Maximum number of general LU supported by the UFS device */
u8 max_lu_supported;
+ u8 wb_dedicated_lu;
u16 wmanufacturerid;
/*UFS device Product Name */
u8 *model;
u16 wspecversion;
u32 clk_gating_wait_us;
+ u32 d_ext_ufs_feature_sup;
+ u8 b_wb_buffer_type;
+ u32 d_wb_alloc_units;
+ bool b_rpm_dev_flush_capable;
+ u8 b_presrv_uspc_en;
};
/**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index df7a1e6805a3..e3175a63c676 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -101,4 +101,11 @@ struct ufs_dev_fix {
*/
#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
+/*
+ * Some pre-3.1 UFS devices can support extended features by upgrading
+ * the firmware. Enable this quirk to make UFS core driver probe and enable
+ * supported features on such devices.
+ */
+#define UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES (1 << 10)
+
#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 698e8d20b4ba..5db18f444ea9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -48,6 +48,8 @@
#include "unipro.h"
#include "ufs-sysfs.h"
#include "ufs_bsg.h"
+#include <asm/unaligned.h>
+#include <linux/blkdev.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -92,6 +94,9 @@
/* default delay of autosuspend: 2000 ms */
#define RPM_AUTOSUSPEND_DELAY_MS 2000
+/* Default delay of RPM device flush delayed work */
+#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
+
/* Default value of wait time before gating device ref clock */
#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
@@ -251,6 +256,12 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+
static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
{
return tag >= 0 && tag < hba->nutrs;
@@ -272,6 +283,25 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
+static inline void ufshcd_wb_config(struct ufs_hba *hba)
+{
+ int ret;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return;
+
+ ret = ufshcd_wb_ctrl(hba, true);
+ if (ret)
+ dev_err(hba->dev, "%s: Enable WB failed: %d\n", __func__, ret);
+ else
+ dev_info(hba->dev, "%s: Write Booster Configured\n", __func__);
+ ret = ufshcd_wb_toggle_flush_during_h8(hba, true);
+ if (ret)
+ dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
+ __func__, ret);
+ ufshcd_wb_toggle_flush(hba, true);
+}
+
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
{
if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
@@ -535,21 +565,21 @@ void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
}
EXPORT_SYMBOL_GPL(ufshcd_delay_us);
-/*
+/**
* ufshcd_wait_for_register - wait for register value to change
- * @hba - per-adapter interface
- * @reg - mmio register offset
- * @mask - mask to apply to read register value
- * @val - wait condition
- * @interval_us - polling interval in microsecs
- * @timeout_ms - timeout in millisecs
- * @can_sleep - perform sleep or just spin
+ * @hba: per-adapter interface
+ * @reg: mmio register offset
+ * @mask: mask to apply to the read register value
+ * @val: value to wait for
+ * @interval_us: polling interval in microseconds
+ * @timeout_ms: timeout in milliseconds
*
- * Returns -ETIMEDOUT on error, zero on success
+ * Return:
+ * -ETIMEDOUT on error, zero on success.
*/
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
- unsigned long timeout_ms, bool can_sleep)
+ unsigned long timeout_ms)
{
int err = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
@@ -558,10 +588,7 @@ int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
val = val & mask;
while ((ufshcd_readl(hba, reg) & mask) != val) {
- if (can_sleep)
- usleep_range(interval_us, interval_us + 50);
- else
- udelay(interval_us);
+ usleep_range(interval_us, interval_us + 50);
if (time_after(jiffies, timeout)) {
if ((ufshcd_readl(hba, reg) & mask) != val)
err = -ETIMEDOUT;
@@ -1150,10 +1177,17 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
- if (ret)
+ if (ret) {
ufshcd_scale_clks(hba, false);
+ goto out_unprepare;
+ }
}
+ /* Enable Write Booster if we have scaled up else disable it */
+ up_write(&hba->clk_scaling_lock);
+ ufshcd_wb_ctrl(hba, scale_up);
+ down_write(&hba->clk_scaling_lock);
+
out_unprepare:
ufshcd_clock_scaling_unprepare(hba);
out:
@@ -1319,23 +1353,6 @@ start_window:
return 0;
}
-static struct devfreq_dev_profile ufs_devfreq_profile = {
- .polling_ms = 100,
- .target = ufshcd_devfreq_target,
- .get_dev_status = ufshcd_devfreq_get_dev_status,
-};
-
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
-static struct devfreq_simple_ondemand_data ufs_ondemand_data = {
- .upthreshold = 70,
- .downdifferential = 5,
-};
-
-static void *gov_data = &ufs_ondemand_data;
-#else
-static void *gov_data; /* NULL */
-#endif
-
static int ufshcd_devfreq_init(struct ufs_hba *hba)
{
struct list_head *clk_list = &hba->clk_list_head;
@@ -1351,12 +1368,12 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
dev_pm_opp_add(hba->dev, clki->min_freq, 0);
dev_pm_opp_add(hba->dev, clki->max_freq, 0);
- ufshcd_vops_config_scaling_param(hba, &ufs_devfreq_profile,
- gov_data);
+ ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
+ &hba->vps->ondemand_data);
devfreq = devfreq_add_device(hba->dev,
- &ufs_devfreq_profile,
+ &hba->vps->devfreq_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
- gov_data);
+ &hba->vps->ondemand_data);
if (IS_ERR(devfreq)) {
ret = PTR_ERR(devfreq);
dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
@@ -2560,7 +2577,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
*/
err = ufshcd_wait_for_register(hba,
REG_UTP_TRANSFER_REQ_DOOR_BELL,
- mask, ~mask, 1000, 1000, true);
+ mask, ~mask, 1000, 1000);
return err;
}
@@ -2747,13 +2764,13 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
}
static int ufshcd_query_flag_retry(struct ufs_hba *hba,
- enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+ enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
{
int ret;
int retries;
for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
- ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+ ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
if (ret)
dev_dbg(hba->dev,
"%s: failed with error %d, retries %d\n",
@@ -2774,16 +2791,17 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @hba: per-adapter instance
* @opcode: flag query to perform
* @idn: flag idn to access
+ * @index: flag index to access
* @flag_res: the flag value after the query request completes
*
* Returns 0 for success, non-zero in case of failure
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, bool *flag_res)
+ enum flag_idn idn, u8 index, bool *flag_res)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
- int err, index = 0, selector = 0;
+ int err, selector = 0;
int timeout = QUERY_REQ_TIMEOUT;
BUG_ON(!hba);
@@ -3223,7 +3241,7 @@ static inline int ufshcd_read_desc(struct ufs_hba *hba,
struct uc_string_id {
u8 len;
u8 type;
- wchar_t uc[0];
+ wchar_t uc[];
} __packed;
/* replace non-printable or non-ASCII characters with spaces */
@@ -4137,10 +4155,10 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
int i;
int err;
- bool flag_res = 1;
+ bool flag_res = true;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, NULL);
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
if (err) {
dev_err(hba->dev,
"%s setting fDeviceInit flag failed with error %d\n",
@@ -4151,7 +4169,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
/* poll for max. 1000 iterations for fDeviceInit flag to clear */
for (i = 0; i < 1000 && !err && flag_res; i++)
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
if (err)
dev_err(hba->dev,
@@ -4229,16 +4247,23 @@ EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
/**
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
- * @can_sleep: perform sleep or just spin
*/
-static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
{
+ unsigned long flags;
int err;
+ /*
+ * Obtain the host lock to prevent that the controller is disabled
+ * while the UFS interrupt handler is active on another CPU.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
- 10, 1, can_sleep);
+ 10, 1);
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
@@ -4259,7 +4284,7 @@ int ufshcd_hba_enable(struct ufs_hba *hba)
if (!ufshcd_is_hba_active(hba))
/* change controller state to "reset state" */
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
/* UniPro link is disabled at this point */
ufshcd_set_link_off(hba);
@@ -4279,7 +4304,7 @@ int ufshcd_hba_enable(struct ufs_hba *hba)
* instruction might be read back.
* This delay can be changed based on the controller.
*/
- ufshcd_delay_us(hba->hba_enable_delay_us, 100);
+ ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
/* wait for the host controller to complete initialization */
retry = 50;
@@ -4966,7 +4991,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
goto out;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to enable bkops %d\n",
__func__, err);
@@ -5016,7 +5041,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
}
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to disable bkops %d\n",
__func__, err);
@@ -5161,6 +5186,190 @@ out:
__func__, err);
}
+static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
+{
+ int ret;
+ u8 index;
+ enum query_opcode opcode;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return 0;
+
+ if (!(enable ^ hba->wb_enabled))
+ return 0;
+ if (enable)
+ opcode = UPIU_QUERY_OPCODE_SET_FLAG;
+ else
+ opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, opcode,
+ QUERY_FLAG_IDN_WB_EN, index, NULL);
+ if (ret) {
+ dev_err(hba->dev, "%s write booster %s failed %d\n",
+ __func__, enable ? "enable" : "disable", ret);
+ return ret;
+ }
+
+ hba->wb_enabled = enable;
+ dev_dbg(hba->dev, "%s write booster %s %d\n",
+ __func__, enable ? "enable" : "disable", ret);
+
+ return ret;
+}
+
+static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
+{
+ int val;
+ u8 index;
+
+ if (set)
+ val = UPIU_QUERY_OPCODE_SET_FLAG;
+ else
+ val = UPIU_QUERY_OPCODE_CLEAR_FLAG;
+
+ index = ufshcd_wb_get_query_index(hba);
+ return ufshcd_query_flag_retry(hba, val,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8,
+ index, NULL);
+}
+
+static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
+{
+ if (enable)
+ ufshcd_wb_buf_flush_enable(hba);
+ else
+ ufshcd_wb_buf_flush_disable(hba);
+
+}
+
+static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
+{
+ int ret;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
+ return 0;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
+ index, NULL);
+ if (ret)
+ dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
+ __func__, ret);
+ else
+ hba->wb_buf_flush_enabled = true;
+
+ dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
+ return ret;
+}
+
+static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
+{
+ int ret;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
+ return 0;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
+ index, NULL);
+ if (ret) {
+ dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
+ __func__, ret);
+ } else {
+ hba->wb_buf_flush_enabled = false;
+ dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
+ u32 avail_buf)
+{
+ u32 cur_buf;
+ int ret;
+ u8 index;
+
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
+ index, 0, &cur_buf);
+ if (ret) {
+ dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ if (!cur_buf) {
+ dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
+ cur_buf);
+ return false;
+ }
+ /* Let it continue to flush when available buffer exceeds threshold */
+ if (avail_buf < hba->vps->wb_flush_threshold)
+ return true;
+
+ return false;
+}
+
+static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
+{
+ int ret;
+ u32 avail_buf;
+ u8 index;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return false;
+ /*
+ * The ufs device needs the vcc to be ON to flush.
+ * With user-space reduction enabled, it's enough to enable flush
+ * by checking only the available buffer. The threshold
+ * defined here is > 90% full.
+ * With user-space preserved enabled, the current-buffer
+ * should be checked too because the wb buffer size can reduce
+ * when disk tends to be full. This info is provided by current
+ * buffer (dCurrentWriteBoosterBufferSize). There's no point in
+ * keeping vcc on when current buffer is empty.
+ */
+ index = ufshcd_wb_get_query_index(hba);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
+ index, 0, &avail_buf);
+ if (ret) {
+ dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
+ __func__, ret);
+ return false;
+ }
+
+ if (!hba->dev_info.b_presrv_uspc_en) {
+ if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
+ return true;
+ return false;
+ }
+
+ return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
+}
+
+static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(to_delayed_work(work),
+ struct ufs_hba,
+ rpm_dev_flush_recheck_work);
+ /*
+ * To prevent unnecessary VCC power drain after device finishes
+ * WriteBooster buffer flush or Auto BKOPs, force runtime resume
+ * after a certain delay to recheck the threshold by next runtime
+ * suspend.
+ */
+ pm_runtime_get_sync(hba->dev);
+ pm_runtime_put_sync(hba->dev);
+}
+
/**
* ufshcd_exception_event_handler - handle exceptions raised by device
* @work: pointer to work data
@@ -5723,7 +5932,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
/* poll for max. 1 sec to clear door bell register by h/w */
err = ufshcd_wait_for_register(hba,
REG_UTP_TASK_REQ_DOOR_BELL,
- mask, 0, 1000, 1000, true);
+ mask, 0, 1000, 1000);
out:
return err;
}
@@ -6299,8 +6508,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Stop the host controller and complete the requests
* cleared by h/w
*/
+ ufshcd_hba_stop(hba);
+
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_hba_stop(hba, false);
hba->silence_err_logs = true;
ufshcd_complete_requests(hba);
hba->silence_err_logs = false;
@@ -6603,6 +6813,93 @@ out:
return ret;
}
+static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ u8 lun;
+ u32 d_lu_wb_buf_alloc;
+
+ if (!ufshcd_is_wb_allowed(hba))
+ return;
+
+ if (hba->desc_size.dev_desc < DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
+ goto wb_disabled;
+
+ hba->dev_info.d_ext_ufs_feature_sup =
+ get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+
+ if (!(hba->dev_info.d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
+ goto wb_disabled;
+
+ /*
+ * WB may be supported but not configured while provisioning.
+ * The spec says, in dedicated wb buffer mode,
+ * a max of 1 lun would have wb buffer configured.
+ * Now only shared buffer mode is supported.
+ */
+ hba->dev_info.b_wb_buffer_type =
+ desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+
+ hba->dev_info.b_presrv_uspc_en =
+ desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
+
+ if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_SHARED) {
+ hba->dev_info.d_wb_alloc_units =
+ get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
+ if (!hba->dev_info.d_wb_alloc_units)
+ goto wb_disabled;
+ } else {
+ for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
+ d_lu_wb_buf_alloc = 0;
+ ufshcd_read_unit_desc_param(hba,
+ lun,
+ UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
+ (u8 *)&d_lu_wb_buf_alloc,
+ sizeof(d_lu_wb_buf_alloc));
+ if (d_lu_wb_buf_alloc) {
+ hba->dev_info.wb_dedicated_lu = lun;
+ break;
+ }
+ }
+
+ if (!d_lu_wb_buf_alloc)
+ goto wb_disabled;
+ }
+ return;
+
+wb_disabled:
+ hba->caps &= ~UFSHCD_CAP_WB_EN;
+}
+
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
+{
+ struct ufs_dev_fix *f;
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+
+ if (!fixups)
+ return;
+
+ for (f = fixups; f->quirk; f++) {
+ if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
+ f->wmanufacturerid == UFS_ANY_VENDOR) &&
+ ((dev_info->model &&
+ STR_PRFX_EQUAL(f->model, dev_info->model)) ||
+ !strcmp(f->model, UFS_ANY_MODEL)))
+ hba->dev_quirks |= f->quirk;
+ }
+}
+EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
+
+static void ufs_fixup_device_setup(struct ufs_hba *hba)
+{
+ /* fix by general quirk table */
+ ufshcd_fixup_dev_quirks(hba, ufs_fixups);
+
+ /* allow vendors to fix quirks */
+ ufshcd_vops_fixup_dev_quirks(hba);
+}
+
static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
@@ -6639,6 +6936,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
if (err < 0) {
@@ -6647,6 +6945,17 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
goto out;
}
+ ufs_fixup_device_setup(hba);
+
+ /*
+ * Probe WB only for UFS-3.1 devices or UFS devices with quirk
+ * UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES enabled
+ */
+ if (dev_info->wspecversion >= 0x310 ||
+ dev_info->wspecversion == 0x220 ||
+ (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES))
+ ufshcd_wb_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -6666,21 +6975,6 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
dev_info->model = NULL;
}
-static void ufs_fixup_device_setup(struct ufs_hba *hba)
-{
- struct ufs_dev_fix *f;
- struct ufs_dev_info *dev_info = &hba->dev_info;
-
- for (f = ufs_fixups; f->quirk; f++) {
- if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
- f->wmanufacturerid == UFS_ANY_VENDOR) &&
- ((dev_info->model &&
- STR_PRFX_EQUAL(f->model, dev_info->model)) ||
- !strcmp(f->model, UFS_ANY_MODEL)))
- hba->dev_quirks |= f->quirk;
- }
-}
-
/**
* ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
* @hba: per-adapter instance
@@ -6996,9 +7290,6 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
bool flag;
int ret;
- /* Clear any previous UFS device information */
- memset(&hba->dev_info, 0, sizeof(hba->dev_info));
-
/* Init check for device descriptor sizes */
ufshcd_init_desc_sizes(hba);
@@ -7017,10 +7308,8 @@ static int ufshcd_device_params_init(struct ufs_hba *hba)
ufshcd_get_ref_clk_gating_wait(hba);
- ufs_fixup_device_setup(hba);
-
if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
hba->dev_info.f_power_on_wp_en = flag;
/* Probe maximum power mode co-supported by both UFS host and device */
@@ -7149,6 +7438,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ ufshcd_wb_config(hba);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
@@ -7195,6 +7485,16 @@ static const struct attribute_group *ufshcd_driver_groups[] = {
NULL,
};
+static struct ufs_hba_variant_params ufs_hba_vps = {
+ .hba_enable_delay_us = 1000,
+ .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
+ .devfreq_profile.polling_ms = 100,
+ .devfreq_profile.target = ufshcd_devfreq_target,
+ .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
+ .ondemand_data.upthreshold = 70,
+ .ondemand_data.downdifferential = 5,
+};
+
static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
@@ -7774,7 +8074,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
* Change controller state to "reset state" which
* should also put the link in off/reset state
*/
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
/*
* TODO: Check if we need any delay to make sure that
* controller is reset
@@ -7809,6 +8109,9 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
*
* Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
* in low power state which would save some power.
+ *
+ * If Write Booster is enabled and the device needs to flush the WB
+ * buffer OR if bkops status is urgent for WB, keep Vcc on.
*/
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
!hba->dev_info.is_lu_power_on_wp) {
@@ -7938,16 +8241,31 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);
}
- }
+ /*
+ * If device needs to do BKOP or WB buffer flush during
+ * Hibern8, keep device power mode as "active power mode"
+ * and VCC supply.
+ */
+ hba->dev_info.b_rpm_dev_flush_capable =
+ hba->auto_bkops_enabled ||
+ (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
+ ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
+ ufshcd_is_auto_hibern8_enabled(hba))) &&
+ ufshcd_wb_need_flush(hba));
+ }
+
+ if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
+ if ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+ !ufshcd_is_runtime_pm(pm_op)) {
+ /* ensure that bkops is disabled */
+ ufshcd_disable_auto_bkops(hba);
+ }
- if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
- ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
- !ufshcd_is_runtime_pm(pm_op))) {
- /* ensure that bkops is disabled */
- ufshcd_disable_auto_bkops(hba);
- ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
- if (ret)
- goto enable_gating;
+ if (!hba->dev_info.b_rpm_dev_flush_capable) {
+ ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+ if (ret)
+ goto enable_gating;
+ }
}
flush_work(&hba->eeh_work);
@@ -8000,9 +8318,16 @@ enable_gating:
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
hba->clk_gating.is_suspended = false;
+ hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_release(hba);
out:
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
+ schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
+ msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
+ }
+
hba->pm_op_in_progress = 0;
+
if (ret)
ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
return ret;
@@ -8055,9 +8380,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
else
goto vendor_suspend;
} else if (ufshcd_is_link_off(hba)) {
- ret = ufshcd_host_reset_and_restore(hba);
/*
- * ufshcd_host_reset_and_restore() should have already
+ * A full initialization of the host and the device is
+ * required since the link was put to off during suspend.
+ */
+ ret = ufshcd_reset_and_restore(hba);
+ /*
+ * ufshcd_reset_and_restore() should have already
* set the link state as active
*/
if (ret || !ufshcd_is_link_active(hba))
@@ -8087,6 +8416,11 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
+ if (hba->dev_info.b_rpm_dev_flush_capable) {
+ hba->dev_info.b_rpm_dev_flush_capable = false;
+ cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
+ }
+
/* Schedule clock gating in case of no access to UFS device yet */
ufshcd_release(hba);
@@ -8313,7 +8647,7 @@ void ufshcd_remove(struct ufs_hba *hba)
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
- ufshcd_hba_stop(hba, true);
+ ufshcd_hba_stop(hba);
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
@@ -8422,7 +8756,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
- hba->hba_enable_delay_us = 1000;
+ hba->vps = &ufs_hba_vps;
err = ufshcd_hba_init(hba);
if (err)
@@ -8560,6 +8894,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
+ INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
+ ufshcd_rpm_dev_flush_recheck_work);
+
/* Set the default auto-hiberate idle timer value to 150 ms */
if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 6ffc08ad85f6..bf97d616e597 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -69,6 +69,7 @@
#include <scsi/scsi_eh.h>
#include "ufs.h"
+#include "ufs_quirks.h"
#include "ufshci.h"
#define UFSHCD "ufshcd"
@@ -336,6 +337,7 @@ struct ufs_hba_variant_ops {
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
enum ufs_notify_change_status);
int (*apply_dev_quirks)(struct ufs_hba *hba);
+ void (*fixup_dev_quirks)(struct ufs_hba *hba);
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
@@ -555,6 +557,20 @@ enum ufshcd_caps {
* for userspace to control the power management.
*/
UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
+
+ /*
+ * This capability allows the host controller driver to turn-on
+ * WriteBooster, if the underlying device supports it and is
+ * provisioned to be used. This would increase the write performance.
+ */
+ UFSHCD_CAP_WB_EN = 1 << 7,
+};
+
+struct ufs_hba_variant_params {
+ struct devfreq_dev_profile devfreq_profile;
+ struct devfreq_simple_ondemand_data ondemand_data;
+ u16 hba_enable_delay_us;
+ u32 wb_flush_threshold;
};
/**
@@ -654,6 +670,7 @@ struct ufs_hba {
int nutmrs;
u32 ufs_version;
const struct ufs_hba_variant_ops *vops;
+ struct ufs_hba_variant_params *vps;
void *priv;
unsigned int irq;
bool is_irq_enabled;
@@ -675,7 +692,6 @@ struct ufs_hba {
u32 eh_flags;
u32 intr_mask;
u16 ee_ctrl_mask;
- u16 hba_enable_delay_us;
bool is_powered;
/* Work Queues */
@@ -727,6 +743,9 @@ struct ufs_hba {
struct device bsg_dev;
struct request_queue *bsg_queue;
+ bool wb_buf_flush_enabled;
+ bool wb_enabled;
+ struct delayed_work rpm_dev_flush_recheck_work;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -775,6 +794,11 @@ static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
}
+static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_WB_EN;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
@@ -808,7 +832,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
- unsigned long timeout_ms, bool can_sleep);
+ unsigned long timeout_ms);
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
u32 reg);
@@ -845,6 +869,13 @@ static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
}
+static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
+{
+ if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
+ return hba->dev_info.wb_dedicated_lu;
+ return 0;
+}
+
extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
extern int ufshcd_runtime_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_idle(struct ufs_hba *hba);
@@ -932,11 +963,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, bool *flag_res);
+ enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-
+void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups);
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
@@ -1071,6 +1102,12 @@ static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
return 0;
}
+static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->fixup_dev_quirks)
+ hba->vops->fixup_dev_quirks(hba);
+}
+
static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c3f010df641e..8dbb4db6831a 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -908,7 +908,7 @@ static int pvscsi_host_reset(struct scsi_cmnd *cmd)
use_msg = adapter->use_msg;
if (use_msg) {
- adapter->use_msg = 0;
+ adapter->use_msg = false;
spin_unlock_irqrestore(&adapter->hw_lock, flags);
/*
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index c6727bcbc2e3..928c8adf5cb3 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -34,9 +34,9 @@
#include <linux/delay.h>
#include <linux/zorro.h>
#include <linux/slab.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 526e3215d8fe..ae1e248a8fb8 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -162,9 +162,8 @@ static int slim_add_device(struct slim_controller *ctrl,
sbdev->ctrl = ctrl;
INIT_LIST_HEAD(&sbdev->stream_list);
spin_lock_init(&sbdev->stream_list_lock);
-
- if (node)
- sbdev->dev.of_node = of_node_get(node);
+ sbdev->dev.of_node = of_node_get(node);
+ sbdev->dev.fwnode = of_fwnode_handle(node);
dev_set_name(&sbdev->dev, "%x:%x:%x:%x",
sbdev->e_addr.manf_id,
@@ -283,6 +282,7 @@ EXPORT_SYMBOL_GPL(slim_register_controller);
/* slim_remove_device: Remove the effect of slim_add_device() */
static void slim_remove_device(struct slim_device *sbdev)
{
+ of_node_put(sbdev->dev.of_node);
device_unregister(&sbdev->dev);
}
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index fc2575fef51b..743ee7b4e63f 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1361,12 +1361,10 @@ static int of_qcom_slim_ngd_register(struct device *parent,
ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
ngd->pdev->dev.of_node = node;
ctrl->ngd = ngd;
- platform_set_drvdata(ngd->pdev, ctrl);
platform_device_add(ngd->pdev);
ngd->base = ctrl->base + ngd->id * data->offset +
(ngd->id - 1) * data->size;
- ctrl->ngd = ngd;
return 0;
}
@@ -1376,12 +1374,13 @@ static int of_qcom_slim_ngd_register(struct device *parent,
static int qcom_slim_ngd_probe(struct platform_device *pdev)
{
- struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
+ struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev->parent);
int ret;
ctrl->ctrl.dev = dev;
+ platform_set_drvdata(pdev, ctrl);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND);
pm_runtime_set_suspended(dev);
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 3f0261d53ad9..43665b77aa9e 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -14,13 +14,23 @@
#include <linux/reset-controller.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <dt-bindings/power/meson8-power.h>
#include <dt-bindings/power/meson-g12a-power.h>
+#include <dt-bindings/power/meson-gxbb-power.h>
#include <dt-bindings/power/meson-sm1-power.h>
/* AO Offsets */
-#define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
-#define AO_RTI_GEN_PWR_ISO0 (0x3b << 2)
+#define GX_AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
+#define GX_AO_RTI_GEN_PWR_ISO0 (0x3b << 2)
+
+/*
+ * Meson8/Meson8b/Meson8m2 only expose the power management registers of the
+ * AO-bus as syscon. 0x3a from GX translates to 0x02, 0x3b translates to 0x03
+ * and so on.
+ */
+#define MESON8_AO_RTI_GEN_PWR_SLEEP0 (0x02 << 2)
+#define MESON8_AO_RTI_GEN_PWR_ISO0 (0x03 << 2)
/* HHI Offsets */
@@ -66,18 +76,25 @@ struct meson_ee_pwrc_domain_data {
/* TOP Power Domains */
-static struct meson_ee_pwrc_top_domain g12a_pwrc_vpu = {
- .sleep_reg = AO_RTI_GEN_PWR_SLEEP0,
+static struct meson_ee_pwrc_top_domain gx_pwrc_vpu = {
+ .sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
+ .sleep_mask = BIT(8),
+ .iso_reg = GX_AO_RTI_GEN_PWR_SLEEP0,
+ .iso_mask = BIT(9),
+};
+
+static struct meson_ee_pwrc_top_domain meson8_pwrc_vpu = {
+ .sleep_reg = MESON8_AO_RTI_GEN_PWR_SLEEP0,
.sleep_mask = BIT(8),
- .iso_reg = AO_RTI_GEN_PWR_SLEEP0,
+ .iso_reg = MESON8_AO_RTI_GEN_PWR_SLEEP0,
.iso_mask = BIT(9),
};
#define SM1_EE_PD(__bit) \
{ \
- .sleep_reg = AO_RTI_GEN_PWR_SLEEP0, \
+ .sleep_reg = GX_AO_RTI_GEN_PWR_SLEEP0, \
.sleep_mask = BIT(__bit), \
- .iso_reg = AO_RTI_GEN_PWR_ISO0, \
+ .iso_reg = GX_AO_RTI_GEN_PWR_ISO0, \
.iso_mask = BIT(__bit), \
}
@@ -124,10 +141,26 @@ static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_vpu[] = {
VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
};
-static struct meson_ee_pwrc_mem_domain g12a_pwrc_mem_eth[] = {
+static struct meson_ee_pwrc_mem_domain gxbb_pwrc_mem_vpu[] = {
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
+ VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
+};
+
+static struct meson_ee_pwrc_mem_domain meson_pwrc_mem_eth[] = {
{ HHI_MEM_PD_REG0, GENMASK(3, 2) },
};
+static struct meson_ee_pwrc_mem_domain meson8_pwrc_audio_dsp_mem[] = {
+ { HHI_MEM_PD_REG0, GENMASK(1, 0) },
+};
+
+static struct meson_ee_pwrc_mem_domain meson8_pwrc_mem_vpu[] = {
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
+ VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
+ VPU_HHI_MEMPD(HHI_MEM_PD_REG0),
+};
+
static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_vpu[] = {
VPU_MEMPD(HHI_VPU_MEM_PD_REG0),
VPU_MEMPD(HHI_VPU_MEM_PD_REG1),
@@ -199,9 +232,35 @@ static struct meson_ee_pwrc_mem_domain sm1_pwrc_mem_audio[] = {
static bool pwrc_ee_get_power(struct meson_ee_pwrc_domain *pwrc_domain);
static struct meson_ee_pwrc_domain_desc g12a_pwrc_domains[] = {
- [PWRC_G12A_VPU_ID] = VPU_PD("VPU", &g12a_pwrc_vpu, g12a_pwrc_mem_vpu,
+ [PWRC_G12A_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, g12a_pwrc_mem_vpu,
pwrc_ee_get_power, 11, 2),
- [PWRC_G12A_ETH_ID] = MEM_PD("ETH", g12a_pwrc_mem_eth),
+ [PWRC_G12A_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
+};
+
+static struct meson_ee_pwrc_domain_desc gxbb_pwrc_domains[] = {
+ [PWRC_GXBB_VPU_ID] = VPU_PD("VPU", &gx_pwrc_vpu, gxbb_pwrc_mem_vpu,
+ pwrc_ee_get_power, 12, 2),
+ [PWRC_GXBB_ETHERNET_MEM_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
+};
+
+static struct meson_ee_pwrc_domain_desc meson8_pwrc_domains[] = {
+ [PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
+ meson8_pwrc_mem_vpu, pwrc_ee_get_power,
+ 0, 1),
+ [PWRC_MESON8_ETHERNET_MEM_ID] = MEM_PD("ETHERNET_MEM",
+ meson_pwrc_mem_eth),
+ [PWRC_MESON8_AUDIO_DSP_MEM_ID] = MEM_PD("AUDIO_DSP_MEM",
+ meson8_pwrc_audio_dsp_mem),
+};
+
+static struct meson_ee_pwrc_domain_desc meson8b_pwrc_domains[] = {
+ [PWRC_MESON8_VPU_ID] = VPU_PD("VPU", &meson8_pwrc_vpu,
+ meson8_pwrc_mem_vpu, pwrc_ee_get_power,
+ 11, 1),
+ [PWRC_MESON8_ETHERNET_MEM_ID] = MEM_PD("ETHERNET_MEM",
+ meson_pwrc_mem_eth),
+ [PWRC_MESON8_AUDIO_DSP_MEM_ID] = MEM_PD("AUDIO_DSP_MEM",
+ meson8_pwrc_audio_dsp_mem),
};
static struct meson_ee_pwrc_domain_desc sm1_pwrc_domains[] = {
@@ -216,7 +275,7 @@ static struct meson_ee_pwrc_domain_desc sm1_pwrc_domains[] = {
[PWRC_SM1_GE2D_ID] = TOP_PD("GE2D", &sm1_pwrc_ge2d, sm1_pwrc_mem_ge2d,
pwrc_ee_get_power),
[PWRC_SM1_AUDIO_ID] = MEM_PD("AUDIO", sm1_pwrc_mem_audio),
- [PWRC_SM1_ETH_ID] = MEM_PD("ETH", g12a_pwrc_mem_eth),
+ [PWRC_SM1_ETH_ID] = MEM_PD("ETH", meson_pwrc_mem_eth),
};
struct meson_ee_pwrc_domain {
@@ -470,6 +529,21 @@ static struct meson_ee_pwrc_domain_data meson_ee_g12a_pwrc_data = {
.domains = g12a_pwrc_domains,
};
+static struct meson_ee_pwrc_domain_data meson_ee_gxbb_pwrc_data = {
+ .count = ARRAY_SIZE(gxbb_pwrc_domains),
+ .domains = gxbb_pwrc_domains,
+};
+
+static struct meson_ee_pwrc_domain_data meson_ee_m8_pwrc_data = {
+ .count = ARRAY_SIZE(meson8_pwrc_domains),
+ .domains = meson8_pwrc_domains,
+};
+
+static struct meson_ee_pwrc_domain_data meson_ee_m8b_pwrc_data = {
+ .count = ARRAY_SIZE(meson8b_pwrc_domains),
+ .domains = meson8b_pwrc_domains,
+};
+
static struct meson_ee_pwrc_domain_data meson_ee_sm1_pwrc_data = {
.count = ARRAY_SIZE(sm1_pwrc_domains),
.domains = sm1_pwrc_domains,
@@ -477,6 +551,22 @@ static struct meson_ee_pwrc_domain_data meson_ee_sm1_pwrc_data = {
static const struct of_device_id meson_ee_pwrc_match_table[] = {
{
+ .compatible = "amlogic,meson8-pwrc",
+ .data = &meson_ee_m8_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,meson8b-pwrc",
+ .data = &meson_ee_m8b_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,meson8m2-pwrc",
+ .data = &meson_ee_m8b_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-pwrc",
+ .data = &meson_ee_gxbb_pwrc_data,
+ },
+ {
.compatible = "amlogic,meson-g12a-pwrc",
.data = &meson_ee_g12a_pwrc_data,
},
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index bcdcd3e7d7f1..7351f3030550 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -58,7 +58,7 @@ static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
* If cpu == -1, choose the current cpu, with no guarantees about
* potentially being migrated away.
*/
- if (unlikely(cpu < 0))
+ if (cpu < 0)
cpu = smp_processor_id();
/* If a specific cpu was requested, pick it up immediately */
@@ -70,6 +70,10 @@ static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
if (d)
return d;
+ d = service_select_by_cpu(d, -1);
+ if (d)
+ return d;
+
spin_lock(&dpio_list_lock);
d = list_entry(dpio_list.next, struct dpaa2_io, node);
list_del(&d->node);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 23a1377971f4..0ab85bfb116f 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -572,18 +572,6 @@ void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
#define EQAR_VB(eqar) ((eqar) & 0x80)
#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
-static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
- u8 idx)
-{
- if (idx < 16)
- qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
- QMAN_RT_MODE);
- else
- qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
- (idx - 16) * 4,
- QMAN_RT_MODE);
-}
-
#define QB_RT_BIT ((u32)0x100)
/**
* qbman_swp_enqueue_direct() - Issue an enqueue command
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 1e164e03410a..9888a7061873 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -449,11 +449,6 @@ static inline int qm_eqcr_init(struct qm_portal *portal,
return 0;
}
-static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
-{
- return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
-}
-
static inline void qm_eqcr_finish(struct qm_portal *portal)
{
struct qm_eqcr *eqcr = &portal->eqcr;
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 447146861c2c..2df20d6f85fa 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -448,7 +448,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
unsigned int i;
unsigned int j;
u32 crc;
- size_t calc_size = sizeof(struct qe_firmware);
+ size_t calc_size;
size_t length;
const struct qe_header *hdr;
@@ -480,7 +480,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
}
/* Validate the length and check if there's a CRC */
- calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
+ calc_size = struct_size(firmware, microcode, firmware->count);
for (i = 0; i < firmware->count; i++)
/*
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index d6c93970df4d..cac0fb7693a0 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -519,7 +519,7 @@ int ucc_set_tdm_rxtx_clk(u32 tdm_num, enum qe_clock clock,
int clock_bits;
u32 shift;
struct qe_mux __iomem *qe_mux_reg;
- __be32 __iomem *cmxs1cr;
+ __be32 __iomem *cmxs1cr;
qe_mux_reg = &qe_immr->qmx;
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 103e2c93c342..446143241fe7 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
+ifeq ($(CONFIG_ARM),y)
+obj-$(CONFIG_ARCH_MXC) += soc-imx.o
+endif
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c
new file mode 100644
index 000000000000..fec3d672b606
--- /dev/null
+++ b/drivers/soc/imx/soc-imx.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 NXP
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#include <soc/imx/cpu.h>
+#include <soc/imx/revision.h>
+
+#define OCOTP_UID_H 0x420
+#define OCOTP_UID_L 0x410
+
+#define OCOTP_ULP_UID_1 0x4b0
+#define OCOTP_ULP_UID_2 0x4c0
+#define OCOTP_ULP_UID_3 0x4d0
+#define OCOTP_ULP_UID_4 0x4e0
+
+static int __init imx_soc_device_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ const char *ocotp_compat = NULL;
+ struct soc_device *soc_dev;
+ struct device_node *root;
+ struct regmap *ocotp = NULL;
+ const char *soc_id;
+ u64 soc_uid = 0;
+ u32 val;
+ int ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ root = of_find_node_by_path("/");
+ ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
+ of_node_put(root);
+ if (ret)
+ goto free_soc;
+
+ switch (__mxc_cpu_type) {
+ case MXC_CPU_MX1:
+ soc_id = "i.MX1";
+ break;
+ case MXC_CPU_MX21:
+ soc_id = "i.MX21";
+ break;
+ case MXC_CPU_MX25:
+ soc_id = "i.MX25";
+ break;
+ case MXC_CPU_MX27:
+ soc_id = "i.MX27";
+ break;
+ case MXC_CPU_MX31:
+ soc_id = "i.MX31";
+ break;
+ case MXC_CPU_MX35:
+ soc_id = "i.MX35";
+ break;
+ case MXC_CPU_MX51:
+ soc_id = "i.MX51";
+ break;
+ case MXC_CPU_MX53:
+ soc_id = "i.MX53";
+ break;
+ case MXC_CPU_IMX6SL:
+ ocotp_compat = "fsl,imx6sl-ocotp";
+ soc_id = "i.MX6SL";
+ break;
+ case MXC_CPU_IMX6DL:
+ ocotp_compat = "fsl,imx6q-ocotp";
+ soc_id = "i.MX6DL";
+ break;
+ case MXC_CPU_IMX6SX:
+ ocotp_compat = "fsl,imx6sx-ocotp";
+ soc_id = "i.MX6SX";
+ break;
+ case MXC_CPU_IMX6Q:
+ ocotp_compat = "fsl,imx6q-ocotp";
+ soc_id = "i.MX6Q";
+ break;
+ case MXC_CPU_IMX6UL:
+ ocotp_compat = "fsl,imx6ul-ocotp";
+ soc_id = "i.MX6UL";
+ break;
+ case MXC_CPU_IMX6ULL:
+ ocotp_compat = "fsl,imx6ull-ocotp";
+ soc_id = "i.MX6ULL";
+ break;
+ case MXC_CPU_IMX6ULZ:
+ ocotp_compat = "fsl,imx6ull-ocotp";
+ soc_id = "i.MX6ULZ";
+ break;
+ case MXC_CPU_IMX6SLL:
+ ocotp_compat = "fsl,imx6sll-ocotp";
+ soc_id = "i.MX6SLL";
+ break;
+ case MXC_CPU_IMX7D:
+ ocotp_compat = "fsl,imx7d-ocotp";
+ soc_id = "i.MX7D";
+ break;
+ case MXC_CPU_IMX7ULP:
+ ocotp_compat = "fsl,imx7ulp-ocotp";
+ soc_id = "i.MX7ULP";
+ break;
+ case MXC_CPU_VF500:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF500";
+ break;
+ case MXC_CPU_VF510:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF510";
+ break;
+ case MXC_CPU_VF600:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF600";
+ break;
+ case MXC_CPU_VF610:
+ ocotp_compat = "fsl,vf610-ocotp";
+ soc_id = "VF610";
+ break;
+ default:
+ soc_id = "Unknown";
+ }
+ soc_dev_attr->soc_id = soc_id;
+
+ if (ocotp_compat) {
+ ocotp = syscon_regmap_lookup_by_compatible(ocotp_compat);
+ if (IS_ERR(ocotp))
+ pr_err("%s: failed to find %s regmap!\n", __func__, ocotp_compat);
+ }
+
+ if (!IS_ERR_OR_NULL(ocotp)) {
+ if (__mxc_cpu_type == MXC_CPU_IMX7ULP) {
+ regmap_read(ocotp, OCOTP_ULP_UID_4, &val);
+ soc_uid = val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_3, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_2, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ regmap_read(ocotp, OCOTP_ULP_UID_1, &val);
+ soc_uid <<= 16;
+ soc_uid |= val & 0xffff;
+ } else {
+ regmap_read(ocotp, OCOTP_UID_H, &val);
+ soc_uid = val;
+ regmap_read(ocotp, OCOTP_UID_L, &val);
+ soc_uid <<= 32;
+ soc_uid |= val;
+ }
+ }
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%d.%d",
+ (imx_get_soc_revision() >> 4) & 0xf,
+ imx_get_soc_revision() & 0xf);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto free_soc;
+ }
+
+ soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid);
+ if (!soc_dev_attr->serial_number) {
+ ret = -ENOMEM;
+ goto free_rev;
+ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto free_serial_number;
+ }
+
+ return 0;
+
+free_serial_number:
+ kfree(soc_dev_attr->serial_number);
+free_rev:
+ kfree(soc_dev_attr->revision);
+free_soc:
+ kfree(soc_dev_attr);
+ return ret;
+}
+device_initcall(imx_soc_device_init);
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 719e1f189ebf..7b0759adb47d 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -53,11 +53,11 @@ static u32 __init imx8mq_soc_revision(void)
struct device_node *np;
void __iomem *ocotp_base;
u32 magic;
- u32 rev = 0;
+ u32 rev;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
if (!np)
- goto out;
+ return 0;
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
@@ -78,9 +78,8 @@ static u32 __init imx8mq_soc_revision(void)
soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
iounmap(ocotp_base);
-
-out:
of_node_put(np);
+
return rev;
}
diff --git a/drivers/soc/kendryte/k210-sysctl.c b/drivers/soc/kendryte/k210-sysctl.c
index 4608fbca20e1..707019223dd8 100644
--- a/drivers/soc/kendryte/k210-sysctl.c
+++ b/drivers/soc/kendryte/k210-sysctl.c
@@ -246,3 +246,15 @@ static void __init k210_soc_early_init(const void *fdt)
iounmap(regs);
}
SOC_EARLY_INIT_DECLARE(generic_k210, "kendryte,k210", k210_soc_early_init);
+
+#ifdef CONFIG_SOC_KENDRYTE_K210_DTB_BUILTIN
+/*
+ * Generic entry for the default k210.dtb embedded DTB for boards with:
+ * - Vendor ID: 0x4B5
+ * - Arch ID: 0xE59889E6A5A04149 (= "Canaan AI" in UTF-8 encoded Chinese)
+ * - Impl ID: 0x4D41495832303030 (= "MAIX2000")
+ * These values are reported by the SiPEED MAXDUINO, SiPEED MAIX GO and
+ * SiPEED Dan dock boards.
+ */
+SOC_BUILTIN_DTB_DECLARE(k210, 0x4B5, 0xE59889E6A5A04149, 0x4D41495832303030);
+#endif
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 2114b563478c..59a56cd790ec 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -44,4 +44,11 @@ config MTK_SCPSYS
Say yes here to add support for the MediaTek SCPSYS power domain
driver.
+config MTK_MMSYS
+ bool "MediaTek MMSYS Support"
+ default ARCH_MEDIATEK
+ help
+ Say yes here to add support for the MediaTek Multimedia
+ Subsystem (MMSYS).
+
endmenu
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index b01733074ad6..01f9f873634a 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
+obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
new file mode 100644
index 000000000000..a55f25511173
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ */
+
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
+
+#include "../../gpu/drm/mediatek/mtk_drm_ddp.h"
+#include "../../gpu/drm/mediatek/mtk_drm_ddp_comp.h"
+
+#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
+#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
+#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
+#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
+#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
+#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
+#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
+#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
+#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
+#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
+#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
+#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
+#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
+#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
+
+#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
+#define DISP_REG_CONFIG_OUT_SEL 0x04c
+#define DISP_REG_CONFIG_DSI_SEL 0x050
+#define DISP_REG_CONFIG_DPI_SEL 0x064
+
+#define OVL0_MOUT_EN_COLOR0 0x1
+#define OD_MOUT_EN_RDMA0 0x1
+#define OD1_MOUT_EN_RDMA1 BIT(16)
+#define UFOE_MOUT_EN_DSI0 0x1
+#define COLOR0_SEL_IN_OVL0 0x1
+#define OVL1_MOUT_EN_COLOR1 0x1
+#define GAMMA_MOUT_EN_RDMA1 0x1
+#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI1 0x3
+#define RDMA0_SOUT_DSI1 0x1
+#define RDMA0_SOUT_DSI2 0x4
+#define RDMA0_SOUT_DSI3 0x5
+#define RDMA1_SOUT_DPI0 0x2
+#define RDMA1_SOUT_DPI1 0x3
+#define RDMA1_SOUT_DSI1 0x1
+#define RDMA1_SOUT_DSI2 0x4
+#define RDMA1_SOUT_DSI3 0x5
+#define RDMA2_SOUT_DPI0 0x2
+#define RDMA2_SOUT_DPI1 0x3
+#define RDMA2_SOUT_DSI1 0x1
+#define RDMA2_SOUT_DSI2 0x4
+#define RDMA2_SOUT_DSI3 0x5
+#define DPI0_SEL_IN_RDMA1 0x1
+#define DPI0_SEL_IN_RDMA2 0x3
+#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
+#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DSI0_SEL_IN_RDMA1 0x1
+#define DSI0_SEL_IN_RDMA2 0x4
+#define DSI1_SEL_IN_RDMA1 0x1
+#define DSI1_SEL_IN_RDMA2 0x4
+#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
+#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
+#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
+#define COLOR1_SEL_IN_OVL1 0x1
+
+#define OVL_MOUT_EN_RDMA 0x1
+#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
+#define BLS_TO_DPI_RDMA1_TO_DSI 0x2
+#define DSI_SEL_IN_BLS 0x0
+#define DPI_SEL_IN_BLS 0x0
+#define DSI_SEL_IN_RDMA 0x1
+
+struct mtk_mmsys_driver_data {
+ const char *clk_driver;
+};
+
+static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
+ .clk_driver = "clk-mt2701-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
+ .clk_driver = "clk-mt2712-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt6779_mmsys_driver_data = {
+ .clk_driver = "clk-mt6779-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt6797_mmsys_driver_data = {
+ .clk_driver = "clk-mt6797-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
+ .clk_driver = "clk-mt8173-mm",
+};
+
+static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
+ .clk_driver = "clk-mt8183-mm",
+};
+
+static unsigned int mtk_mmsys_ddp_mout_en(enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next,
+ unsigned int *addr)
+{
+ unsigned int value;
+
+ if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
+ *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
+ value = OVL0_MOUT_EN_COLOR0;
+ } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
+ *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
+ value = OVL_MOUT_EN_RDMA;
+ } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
+ *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+ value = OD_MOUT_EN_RDMA0;
+ } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DISP_UFOE_MOUT_EN;
+ value = UFOE_MOUT_EN_DSI0;
+ } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
+ *addr = DISP_REG_CONFIG_DISP_OVL1_MOUT_EN;
+ value = OVL1_MOUT_EN_COLOR1;
+ } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
+ *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
+ value = GAMMA_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
+ *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+ value = OD1_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI3;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DSI3;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+ value = RDMA1_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+ value = RDMA2_SOUT_DSI3;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+static unsigned int mtk_mmsys_ddp_sel_in(enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next,
+ unsigned int *addr)
+{
+ unsigned int value;
+
+ if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
+ *addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN;
+ value = COLOR0_SEL_IN_OVL0;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI0_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI3_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+ value = DSI1_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI2_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI3_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
+ *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
+ value = COLOR1_SEL_IN_OVL1;
+ } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSI_SEL;
+ value = DSI_SEL_IN_BLS;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+static void mtk_mmsys_ddp_sout_sel(void __iomem *config_regs,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
+ writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
+ config_regs + DISP_REG_CONFIG_OUT_SEL);
+ } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DPI0) {
+ writel_relaxed(BLS_TO_DPI_RDMA1_TO_DSI,
+ config_regs + DISP_REG_CONFIG_OUT_SEL);
+ writel_relaxed(DSI_SEL_IN_RDMA,
+ config_regs + DISP_REG_CONFIG_DSI_SEL);
+ writel_relaxed(DPI_SEL_IN_BLS,
+ config_regs + DISP_REG_CONFIG_DPI_SEL);
+ }
+}
+
+void mtk_mmsys_ddp_connect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ void __iomem *config_regs = dev_get_drvdata(dev);
+ unsigned int addr, value, reg;
+
+ value = mtk_mmsys_ddp_mout_en(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) | value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+
+ mtk_mmsys_ddp_sout_sel(config_regs, cur, next);
+
+ value = mtk_mmsys_ddp_sel_in(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) | value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_connect);
+
+void mtk_mmsys_ddp_disconnect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ void __iomem *config_regs = dev_get_drvdata(dev);
+ unsigned int addr, value, reg;
+
+ value = mtk_mmsys_ddp_mout_en(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) & ~value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+
+ value = mtk_mmsys_ddp_sel_in(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) & ~value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+}
+EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_disconnect);
+
+static int mtk_mmsys_probe(struct platform_device *pdev)
+{
+ const struct mtk_mmsys_driver_data *data;
+ struct device *dev = &pdev->dev;
+ struct platform_device *clks;
+ struct platform_device *drm;
+ void __iomem *config_regs;
+ struct resource *mem;
+ int ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ config_regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(config_regs)) {
+ ret = PTR_ERR(config_regs);
+ dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
+ ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, config_regs);
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ clks = platform_device_register_data(&pdev->dev, data->clk_driver,
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(clks))
+ return PTR_ERR(clks);
+
+ drm = platform_device_register_data(&pdev->dev, "mediatek-drm",
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(drm)) {
+ platform_device_unregister(clks);
+ return PTR_ERR(drm);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id of_match_mtk_mmsys[] = {
+ {
+ .compatible = "mediatek,mt2701-mmsys",
+ .data = &mt2701_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt2712-mmsys",
+ .data = &mt2712_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt6779-mmsys",
+ .data = &mt6779_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt6797-mmsys",
+ .data = &mt6797_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt8173-mmsys",
+ .data = &mt8173_mmsys_driver_data,
+ },
+ {
+ .compatible = "mediatek,mt8183-mmsys",
+ .data = &mt8183_mmsys_driver_data,
+ },
+ { }
+};
+
+static struct platform_driver mtk_mmsys_drv = {
+ .driver = {
+ .name = "mtk-mmsys",
+ .of_match_table = of_match_mtk_mmsys,
+ },
+ .probe = mtk_mmsys_probe,
+};
+
+builtin_platform_driver(mtk_mmsys_drv);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 285baa7e474e..07bb261a63d2 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -35,15 +35,6 @@ config QCOM_GENI_SE
driver is also used to manage the common aspects of multiple Serial
Engines present in the QUP.
-config QCOM_GLINK_SSR
- tristate "Qualcomm Glink SSR driver"
- depends on RPMSG
- depends on QCOM_RPROC_COMMON
- help
- Say y here to enable GLINK SSR support. The GLINK SSR driver
- implements the SSR protocol for notifying the remote processor about
- neighboring subsystems going up or down.
-
config QCOM_GSBI
tristate "QCOM General Serial Bus Interface"
depends on ARCH_QCOM || COMPILE_TEST
@@ -107,7 +98,7 @@ config QCOM_RPMH
help apply the aggregated state on the resource.
config QCOM_RPMHPD
- bool "Qualcomm RPMh Power domain driver"
+ tristate "Qualcomm RPMh Power domain driver"
depends on QCOM_RPMH && QCOM_COMMAND_DB
help
QCOM RPMh Power domain driver to support power-domains with
@@ -116,8 +107,8 @@ config QCOM_RPMHPD
for the voltage rail.
config QCOM_RPMPD
- bool "Qualcomm RPM Power domain driver"
- depends on QCOM_SMD_RPM=y
+ tristate "Qualcomm RPM Power domain driver"
+ depends on QCOM_SMD_RPM
help
QCOM RPM Power domain driver to support power-domains with
performance states. The driver communicates a performance state
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 92cc4232d72c..7d7e2ecbdce6 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -3,7 +3,6 @@ CFLAGS_rpmh-rsc.o := -I$(src)
obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o
obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
-obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index f6c3d17b05c7..fc5610603b17 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <linux/types.h>
#include <soc/qcom/cmd-db.h>
@@ -236,6 +237,77 @@ enum cmd_db_hw_type cmd_db_read_slave_id(const char *id)
}
EXPORT_SYMBOL(cmd_db_read_slave_id);
+#ifdef CONFIG_DEBUG_FS
+static int cmd_db_debugfs_dump(struct seq_file *seq, void *p)
+{
+ int i, j;
+ const struct rsc_hdr *rsc;
+ const struct entry_header *ent;
+ const char *name;
+ u16 len, version;
+ u8 major, minor;
+
+ seq_puts(seq, "Command DB DUMP\n");
+
+ for (i = 0; i < MAX_SLV_ID; i++) {
+ rsc = &cmd_db_header->header[i];
+ if (!rsc->slv_id)
+ break;
+
+ switch (le16_to_cpu(rsc->slv_id)) {
+ case CMD_DB_HW_ARC:
+ name = "ARC";
+ break;
+ case CMD_DB_HW_VRM:
+ name = "VRM";
+ break;
+ case CMD_DB_HW_BCM:
+ name = "BCM";
+ break;
+ default:
+ name = "Unknown";
+ break;
+ }
+
+ version = le16_to_cpu(rsc->version);
+ major = version >> 8;
+ minor = version;
+
+ seq_printf(seq, "Slave %s (v%u.%u)\n", name, major, minor);
+ seq_puts(seq, "-------------------------\n");
+
+ ent = rsc_to_entry_header(rsc);
+ for (j = 0; j < le16_to_cpu(rsc->cnt); j++, ent++) {
+ seq_printf(seq, "0x%05x: %*pEp", le32_to_cpu(ent->addr),
+ (int)sizeof(ent->id), ent->id);
+
+ len = le16_to_cpu(ent->len);
+ if (len) {
+ seq_printf(seq, " [%*ph]",
+ len, rsc_offset(rsc, ent));
+ }
+ seq_putc(seq, '\n');
+ }
+ }
+
+ return 0;
+}
+
+static int open_cmd_db_debugfs(struct inode *inode, struct file *file)
+{
+ return single_open(file, cmd_db_debugfs_dump, inode->i_private);
+}
+#endif
+
+static const struct file_operations cmd_db_debugfs_ops = {
+#ifdef CONFIG_DEBUG_FS
+ .open = open_cmd_db_debugfs,
+#endif
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int cmd_db_dev_probe(struct platform_device *pdev)
{
struct reserved_mem *rmem;
@@ -259,12 +331,14 @@ static int cmd_db_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
+ debugfs_create_file("cmd-db", 0400, NULL, NULL, &cmd_db_debugfs_ops);
+
return 0;
}
static const struct of_device_id cmd_db_match_table[] = {
{ .compatible = "qcom,cmd-db" },
- { },
+ { }
};
static struct platform_driver cmd_db_dev_driver = {
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
deleted file mode 100644
index d7babe3d67bc..000000000000
--- a/drivers/soc/qcom/glink_ssr.c
+++ /dev/null
@@ -1,156 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
- * Copyright (c) 2017, Linaro Ltd.
- */
-
-#include <linux/completion.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/rpmsg.h>
-#include <linux/remoteproc/qcom_rproc.h>
-
-/**
- * struct do_cleanup_msg - The data structure for an SSR do_cleanup message
- * version: The G-Link SSR protocol version
- * command: The G-Link SSR command - do_cleanup
- * seq_num: Sequence number
- * name_len: Length of the name of the subsystem being restarted
- * name: G-Link edge name of the subsystem being restarted
- */
-struct do_cleanup_msg {
- __le32 version;
- __le32 command;
- __le32 seq_num;
- __le32 name_len;
- char name[32];
-};
-
-/**
- * struct cleanup_done_msg - The data structure for an SSR cleanup_done message
- * version: The G-Link SSR protocol version
- * response: The G-Link SSR response to a do_cleanup command, cleanup_done
- * seq_num: Sequence number
- */
-struct cleanup_done_msg {
- __le32 version;
- __le32 response;
- __le32 seq_num;
-};
-
-/**
- * G-Link SSR protocol commands
- */
-#define GLINK_SSR_DO_CLEANUP 0
-#define GLINK_SSR_CLEANUP_DONE 1
-
-struct glink_ssr {
- struct device *dev;
- struct rpmsg_endpoint *ept;
-
- struct notifier_block nb;
-
- u32 seq_num;
- struct completion completion;
-};
-
-static int qcom_glink_ssr_callback(struct rpmsg_device *rpdev,
- void *data, int len, void *priv, u32 addr)
-{
- struct cleanup_done_msg *msg = data;
- struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
-
- if (len < sizeof(*msg)) {
- dev_err(ssr->dev, "message too short\n");
- return -EINVAL;
- }
-
- if (le32_to_cpu(msg->version) != 0)
- return -EINVAL;
-
- if (le32_to_cpu(msg->response) != GLINK_SSR_CLEANUP_DONE)
- return 0;
-
- if (le32_to_cpu(msg->seq_num) != ssr->seq_num) {
- dev_err(ssr->dev, "invalid sequence number of response\n");
- return -EINVAL;
- }
-
- complete(&ssr->completion);
-
- return 0;
-}
-
-static int qcom_glink_ssr_notify(struct notifier_block *nb, unsigned long event,
- void *data)
-{
- struct glink_ssr *ssr = container_of(nb, struct glink_ssr, nb);
- struct do_cleanup_msg msg;
- char *ssr_name = data;
- int ret;
-
- ssr->seq_num++;
- reinit_completion(&ssr->completion);
-
- memset(&msg, 0, sizeof(msg));
- msg.command = cpu_to_le32(GLINK_SSR_DO_CLEANUP);
- msg.seq_num = cpu_to_le32(ssr->seq_num);
- msg.name_len = cpu_to_le32(strlen(ssr_name));
- strlcpy(msg.name, ssr_name, sizeof(msg.name));
-
- ret = rpmsg_send(ssr->ept, &msg, sizeof(msg));
- if (ret < 0)
- dev_err(ssr->dev, "failed to send cleanup message\n");
-
- ret = wait_for_completion_timeout(&ssr->completion, HZ);
- if (!ret)
- dev_err(ssr->dev, "timeout waiting for cleanup done message\n");
-
- return NOTIFY_DONE;
-}
-
-static int qcom_glink_ssr_probe(struct rpmsg_device *rpdev)
-{
- struct glink_ssr *ssr;
-
- ssr = devm_kzalloc(&rpdev->dev, sizeof(*ssr), GFP_KERNEL);
- if (!ssr)
- return -ENOMEM;
-
- init_completion(&ssr->completion);
-
- ssr->dev = &rpdev->dev;
- ssr->ept = rpdev->ept;
- ssr->nb.notifier_call = qcom_glink_ssr_notify;
-
- dev_set_drvdata(&rpdev->dev, ssr);
-
- return qcom_register_ssr_notifier(&ssr->nb);
-}
-
-static void qcom_glink_ssr_remove(struct rpmsg_device *rpdev)
-{
- struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
-
- qcom_unregister_ssr_notifier(&ssr->nb);
-}
-
-static const struct rpmsg_device_id qcom_glink_ssr_match[] = {
- { "glink_ssr" },
- {}
-};
-
-static struct rpmsg_driver qcom_glink_ssr_driver = {
- .probe = qcom_glink_ssr_probe,
- .remove = qcom_glink_ssr_remove,
- .callback = qcom_glink_ssr_callback,
- .id_table = qcom_glink_ssr_match,
- .drv = {
- .name = "qcom_glink_ssr",
- },
-};
-module_rpmsg_driver(qcom_glink_ssr_driver);
-
-MODULE_ALIAS("rpmsg:glink_ssr");
-MODULE_DESCRIPTION("Qualcomm GLINK SSR notifier");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 17ad3b8698e1..bdcf16f88a97 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -155,10 +155,6 @@ static int pdr_register_listener(struct pdr_handle *pdr,
return ret;
}
- if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX)
- pr_err("PDR: %s notification state invalid: 0x%x\n",
- pds->service_path, resp.curr_state);
-
pds->state = resp.curr_state;
return 0;
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index f43a2e07ee83..ed2c687c16b3 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -599,6 +599,7 @@ static const struct of_device_id qmp_dt_match[] = {
{ .compatible = "qcom,sc7180-aoss-qmp", },
{ .compatible = "qcom,sdm845-aoss-qmp", },
{ .compatible = "qcom,sm8150-aoss-qmp", },
+ { .compatible = "qcom,sm8250-aoss-qmp", },
{}
};
MODULE_DEVICE_TABLE(of, qmp_dt_match);
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
index 6eec32b97f83..ef60e790a750 100644
--- a/drivers/soc/qcom/rpmh-internal.h
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -22,16 +22,23 @@ struct rsc_drv;
* struct tcs_group: group of Trigger Command Sets (TCS) to send state requests
* to the controller
*
- * @drv: the controller
- * @type: type of the TCS in this group - active, sleep, wake
- * @mask: mask of the TCSes relative to all the TCSes in the RSC
- * @offset: start of the TCS group relative to the TCSes in the RSC
- * @num_tcs: number of TCSes in this type
- * @ncpt: number of commands in each TCS
- * @lock: lock for synchronizing this TCS writes
- * @req: requests that are sent from the TCS
- * @cmd_cache: flattened cache of cmds in sleep/wake TCS
- * @slots: indicates which of @cmd_addr are occupied
+ * @drv: The controller.
+ * @type: Type of the TCS in this group - active, sleep, wake.
+ * @mask: Mask of the TCSes relative to all the TCSes in the RSC.
+ * @offset: Start of the TCS group relative to the TCSes in the RSC.
+ * @num_tcs: Number of TCSes in this type.
+ * @ncpt: Number of commands in each TCS.
+ * @req: Requests that are sent from the TCS; only used for ACTIVE_ONLY
+ * transfers (could be on a wake/sleep TCS if we are borrowing for
+ * an ACTIVE_ONLY transfer).
+ * Start: grab drv->lock, set req, set tcs_in_use, drop drv->lock,
+ * trigger
+ * End: get irq, access req,
+ * grab drv->lock, clear tcs_in_use, drop drv->lock
+ * @slots: Indicates which of @cmd_addr are occupied; only used for
+ * SLEEP / WAKE TCSs. Things are tightly packed in the
+ * case that (ncpt < MAX_CMDS_PER_TCS). That is if ncpt = 2 and
+ * MAX_CMDS_PER_TCS = 16 then bit[2] = the first bit in 2nd TCS.
*/
struct tcs_group {
struct rsc_drv *drv;
@@ -40,9 +47,7 @@ struct tcs_group {
u32 offset;
int num_tcs;
int ncpt;
- spinlock_t lock;
const struct tcs_request *req[MAX_TCS_PER_TYPE];
- u32 *cmd_cache;
DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
};
@@ -84,20 +89,32 @@ struct rpmh_ctrlr {
* struct rsc_drv: the Direct Resource Voter (DRV) of the
* Resource State Coordinator controller (RSC)
*
- * @name: controller identifier
- * @tcs_base: start address of the TCS registers in this controller
- * @id: instance id in the controller (Direct Resource Voter)
- * @num_tcs: number of TCSes in this DRV
- * @tcs: TCS groups
- * @tcs_in_use: s/w state of the TCS
- * @lock: synchronize state of the controller
- * @client: handle to the DRV's client.
+ * @name: Controller identifier.
+ * @tcs_base: Start address of the TCS registers in this controller.
+ * @id: Instance id in the controller (Direct Resource Voter).
+ * @num_tcs: Number of TCSes in this DRV.
+ * @rsc_pm: CPU PM notifier for controller.
+ * Used when solver mode is not present.
+ * @cpus_in_pm: Number of CPUs not in idle power collapse.
+ * Used when solver mode is not present.
+ * @tcs: TCS groups.
+ * @tcs_in_use: S/W state of the TCS; only set for ACTIVE_ONLY
+ * transfers, but might show a sleep/wake TCS in use if
+ * it was borrowed for an active_only transfer. You
+ * must hold the lock in this struct (AKA drv->lock) in
+ * order to update this.
+ * @lock: Synchronize state of the controller. If RPMH's cache
+ * lock will also be held, the order is: drv->lock then
+ * cache_lock.
+ * @client: Handle to the DRV's client.
*/
struct rsc_drv {
const char *name;
void __iomem *tcs_base;
int id;
int num_tcs;
+ struct notifier_block rsc_pm;
+ atomic_t cpus_in_pm;
struct tcs_group tcs[TCS_TYPE_NR];
DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
spinlock_t lock;
@@ -107,7 +124,7 @@ struct rsc_drv {
int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
const struct tcs_request *msg);
-int rpmh_rsc_invalidate(struct rsc_drv *drv);
+void rpmh_rsc_invalidate(struct rsc_drv *drv);
void rpmh_tx_done(const struct tcs_request *msg, int r);
int rpmh_flush(struct rpmh_ctrlr *ctrlr);
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index b71822131f59..076fd27f3081 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -6,9 +6,11 @@
#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
#include <linux/atomic.h>
+#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/of.h>
@@ -30,21 +32,41 @@
#define RSC_DRV_TCS_OFFSET 672
#define RSC_DRV_CMD_OFFSET 20
-/* DRV Configuration Information Register */
+/* DRV HW Solver Configuration Information Register */
+#define DRV_SOLVER_CONFIG 0x04
+#define DRV_HW_SOLVER_MASK 1
+#define DRV_HW_SOLVER_SHIFT 24
+
+/* DRV TCS Configuration Information Register */
#define DRV_PRNT_CHLD_CONFIG 0x0C
#define DRV_NUM_TCS_MASK 0x3F
#define DRV_NUM_TCS_SHIFT 6
#define DRV_NCPT_MASK 0x1F
#define DRV_NCPT_SHIFT 27
-/* Register offsets */
+/* Offsets for common TCS Registers, one bit per TCS */
#define RSC_DRV_IRQ_ENABLE 0x00
#define RSC_DRV_IRQ_STATUS 0x04
-#define RSC_DRV_IRQ_CLEAR 0x08
-#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
+#define RSC_DRV_IRQ_CLEAR 0x08 /* w/o; write 1 to clear */
+
+/*
+ * Offsets for per TCS Registers.
+ *
+ * TCSes start at 0x10 from tcs_base and are stored one after another.
+ * Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one
+ * of the below to find a register.
+ */
+#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10 /* 1 bit per command */
#define RSC_DRV_CONTROL 0x14
-#define RSC_DRV_STATUS 0x18
-#define RSC_DRV_CMD_ENABLE 0x1C
+#define RSC_DRV_STATUS 0x18 /* zero if tcs is busy */
+#define RSC_DRV_CMD_ENABLE 0x1C /* 1 bit per command */
+
+/*
+ * Offsets for per command in a TCS.
+ *
+ * Commands (up to 16) start at 0x30 in a TCS; multiply command index
+ * by RSC_DRV_CMD_OFFSET and add one of the below to find a register.
+ */
#define RSC_DRV_CMD_MSGID 0x30
#define RSC_DRV_CMD_ADDR 0x34
#define RSC_DRV_CMD_DATA 0x38
@@ -61,94 +83,179 @@
#define CMD_STATUS_ISSUED BIT(8)
#define CMD_STATUS_COMPL BIT(16)
-static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
+/*
+ * Here's a high level overview of how all the registers in RPMH work
+ * together:
+ *
+ * - The main rpmh-rsc address is the base of a register space that can
+ * be used to find overall configuration of the hardware
+ * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register
+ * space are all the TCS blocks. The offset of the TCS blocks is
+ * specified in the device tree by "qcom,tcs-offset" and used to
+ * compute tcs_base.
+ * - TCS blocks come one after another. Type, count, and order are
+ * specified by the device tree as "qcom,tcs-config".
+ * - Each TCS block has some registers, then space for up to 16 commands.
+ * Note that though address space is reserved for 16 commands, fewer
+ * might be present. See ncpt (num cmds per TCS).
+ *
+ * Here's a picture:
+ *
+ * +---------------------------------------------------+
+ * |RSC |
+ * | ctrl |
+ * | |
+ * | Drvs: |
+ * | +-----------------------------------------------+ |
+ * | |DRV0 | |
+ * | | ctrl/config | |
+ * | | IRQ | |
+ * | | | |
+ * | | TCSes: | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS0 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS1 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | +------------------------------------------+ | |
+ * | | |TCS2 | | | | | | | | | | | | | | |
+ * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | |
+ * | | | | | | | | | | | | | | | | | |
+ * | | +------------------------------------------+ | |
+ * | | ...... | |
+ * | +-----------------------------------------------+ |
+ * | +-----------------------------------------------+ |
+ * | |DRV1 | |
+ * | | (same as DRV0) | |
+ * | +-----------------------------------------------+ |
+ * | ...... |
+ * +---------------------------------------------------+
+ */
+
+static inline void __iomem *
+tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id)
{
- return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
- RSC_DRV_CMD_OFFSET * cmd_id);
+ return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg;
}
-static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id,
- u32 data)
+static inline void __iomem *
+tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
{
- writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
- RSC_DRV_CMD_OFFSET * cmd_id);
+ return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id;
}
-static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data)
+static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
+ int cmd_id)
{
- writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
+ return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
}
-static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
- u32 data)
+static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id)
{
- writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
- for (;;) {
- if (data == readl(drv->tcs_base + reg +
- RSC_DRV_TCS_OFFSET * tcs_id))
- break;
- udelay(1);
- }
+ return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
}
-static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
+static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id,
+ int cmd_id, u32 data)
{
- return !test_bit(tcs_id, drv->tcs_in_use) &&
- read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
+ writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
}
-static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
+static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id,
+ u32 data)
{
- return &drv->tcs[type];
+ writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
}
-static int tcs_invalidate(struct rsc_drv *drv, int type)
+static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id,
+ u32 data)
{
- int m;
- struct tcs_group *tcs;
+ u32 new_data;
- tcs = get_tcs_of_type(drv, type);
+ writel(data, tcs_reg_addr(drv, reg, tcs_id));
+ if (readl_poll_timeout_atomic(tcs_reg_addr(drv, reg, tcs_id), new_data,
+ new_data == data, 1, USEC_PER_SEC))
+ pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
+ data, tcs_id, reg);
+}
- spin_lock(&tcs->lock);
- if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
- spin_unlock(&tcs->lock);
- return 0;
- }
+/**
+ * tcs_is_free() - Return if a TCS is totally free.
+ * @drv: The RSC controller.
+ * @tcs_id: The global ID of this TCS.
+ *
+ * Returns true if nobody has claimed this TCS (by setting tcs_in_use).
+ *
+ * Context: Must be called with the drv->lock held.
+ *
+ * Return: true if the given TCS is free.
+ */
+static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
+{
+ return !test_bit(tcs_id, drv->tcs_in_use);
+}
+
+/**
+ * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake).
+ * @drv: The RSC controller.
+ * @type: SLEEP_TCS or WAKE_TCS
+ *
+ * This will clear the "slots" variable of the given tcs_group and also
+ * tell the hardware to forget about all entries.
+ *
+ * The caller must ensure that no other RPMH actions are happening when this
+ * function is called, since otherwise the device may immediately become
+ * used again even before this function exits.
+ */
+static void tcs_invalidate(struct rsc_drv *drv, int type)
+{
+ int m;
+ struct tcs_group *tcs = &drv->tcs[type];
+
+ /* Caller ensures nobody else is running so no lock */
+ if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
+ return;
for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
- if (!tcs_is_free(drv, m)) {
- spin_unlock(&tcs->lock);
- return -EAGAIN;
- }
write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
}
bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
- spin_unlock(&tcs->lock);
-
- return 0;
}
/**
- * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
+ * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes.
+ * @drv: The RSC controller.
*
- * @drv: the RSC controller
+ * The caller must ensure that no other RPMH actions are happening when this
+ * function is called, since otherwise the device may immediately become
+ * used again even before this function exits.
*/
-int rpmh_rsc_invalidate(struct rsc_drv *drv)
+void rpmh_rsc_invalidate(struct rsc_drv *drv)
{
- int ret;
-
- ret = tcs_invalidate(drv, SLEEP_TCS);
- if (!ret)
- ret = tcs_invalidate(drv, WAKE_TCS);
-
- return ret;
+ tcs_invalidate(drv, SLEEP_TCS);
+ tcs_invalidate(drv, WAKE_TCS);
}
+/**
+ * get_tcs_for_msg() - Get the tcs_group used to send the given message.
+ * @drv: The RSC controller.
+ * @msg: The message we want to send.
+ *
+ * This is normally pretty straightforward except if we are trying to send
+ * an ACTIVE_ONLY message but don't have any active_only TCSes.
+ *
+ * Return: A pointer to a tcs_group or an ERR_PTR.
+ */
static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
const struct tcs_request *msg)
{
- int type, ret;
+ int type;
struct tcs_group *tcs;
switch (msg->state) {
@@ -168,24 +275,33 @@ static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
/*
* If we are making an active request on a RSC that does not have a
* dedicated TCS for active state use, then re-purpose a wake TCS to
- * send active votes.
- * NOTE: The driver must be aware that this RSC does not have a
- * dedicated AMC, and therefore would invalidate the sleep and wake
- * TCSes before making an active state request.
+ * send active votes. This is safe because we ensure any active-only
+ * transfers have finished before we use it (maybe by running from
+ * the last CPU in PM code).
*/
- tcs = get_tcs_of_type(drv, type);
- if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) {
- tcs = get_tcs_of_type(drv, WAKE_TCS);
- if (tcs->num_tcs) {
- ret = rpmh_rsc_invalidate(drv);
- if (ret)
- return ERR_PTR(ret);
- }
- }
+ tcs = &drv->tcs[type];
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
+ tcs = &drv->tcs[WAKE_TCS];
return tcs;
}
+/**
+ * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS.
+ * @drv: The RSC controller.
+ * @tcs_id: The global ID of this TCS.
+ *
+ * For ACTIVE_ONLY transfers we want to call back into the client when the
+ * transfer finishes. To do this we need the "request" that the client
+ * originally provided us. This function grabs the request that we stashed
+ * when we started the transfer.
+ *
+ * This only makes sense for ACTIVE_ONLY transfers since those are the only
+ * ones we track sending (the only ones we enable interrupts for and the only
+ * ones we call back to the client for).
+ *
+ * Return: The stashed request.
+ */
static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
int tcs_id)
{
@@ -202,7 +318,76 @@ static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
}
/**
- * tcs_tx_done: TX Done interrupt handler
+ * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @trigger: If true then untrigger/retrigger. If false then just untrigger.
+ *
+ * In the normal case we only ever call with "trigger=true" to start a
+ * transfer. That will un-trigger/disable the TCS from the last transfer
+ * then trigger/enable for this transfer.
+ *
+ * If we borrowed a wake TCS for an active-only transfer we'll also call
+ * this function with "trigger=false" to just do the un-trigger/disable
+ * before using the TCS for wake purposes again.
+ *
+ * Note that the AP is only in charge of triggering active-only transfers.
+ * The AP never triggers sleep/wake values using this function.
+ */
+static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
+{
+ u32 enable;
+
+ /*
+ * HW req: Clear the DRV_CONTROL and enable TCS again
+ * While clearing ensure that the AMC mode trigger is cleared
+ * and then the mode enable is cleared.
+ */
+ enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id);
+ enable &= ~TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable &= ~TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+
+ if (trigger) {
+ /* Enable the AMC mode on the TCS and then trigger the TCS */
+ enable = TCS_AMC_MODE_ENABLE;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ enable |= TCS_AMC_MODE_TRIGGER;
+ write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
+ }
+}
+
+/**
+ * enable_tcs_irq() - Enable or disable interrupts on the given TCS.
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @enable: If true then enable; if false then disable
+ *
+ * We only ever call this when we borrow a wake TCS for an active-only
+ * transfer. For active-only TCSes interrupts are always left enabled.
+ */
+static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
+{
+ u32 data;
+
+ data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE);
+ if (enable)
+ data |= BIT(tcs_id);
+ else
+ data &= ~BIT(tcs_id);
+ writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE);
+}
+
+/**
+ * tcs_tx_done() - TX Done interrupt handler.
+ * @irq: The IRQ number (ignored).
+ * @p: Pointer to "struct rsc_drv".
+ *
+ * Called for ACTIVE_ONLY transfers (those are the only ones we enable the
+ * IRQ for) when a transfer is done.
+ *
+ * Return: IRQ_HANDLED
*/
static irqreturn_t tcs_tx_done(int irq, void *p)
{
@@ -212,7 +397,7 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
const struct tcs_request *req;
struct tcs_cmd *cmd;
- irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0);
+ irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS);
for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
req = get_req_from_tcs(drv, i);
@@ -226,7 +411,7 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
u32 sts;
cmd = &req->cmds[j];
- sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j);
+ sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j);
if (!(sts & CMD_STATUS_ISSUED) ||
((req->wait_for_compl || cmd->wait) &&
!(sts & CMD_STATUS_COMPL))) {
@@ -237,13 +422,28 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
}
trace_rpmh_tx_done(drv, i, req, err);
+
+ /*
+ * If wake tcs was re-purposed for sending active
+ * votes, clear AMC trigger & enable modes and
+ * disable interrupt for this TCS
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ __tcs_set_trigger(drv, i, false);
skip:
/* Reclaim the TCS */
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
- write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
+ writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR);
spin_lock(&drv->lock);
clear_bit(i, drv->tcs_in_use);
+ /*
+ * Disable interrupt for WAKE TCS to avoid being
+ * spammed with interrupts coming when the solver
+ * sends its wake votes.
+ */
+ if (!drv->tcs[ACTIVE_TCS].num_tcs)
+ enable_tcs_irq(drv, i, false);
spin_unlock(&drv->lock);
if (req)
rpmh_tx_done(req, err);
@@ -252,6 +452,16 @@ skip:
return IRQ_HANDLED;
}
+/**
+ * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger.
+ * @drv: The controller.
+ * @tcs_id: The global ID of this TCS.
+ * @cmd_id: The index within the TCS to start writing.
+ * @msg: The message we want to send, which will contain several addr/data
+ * pairs to program (but few enough that they all fit in one TCS).
+ *
+ * This is used for all types of transfers (active, sleep, and wake).
+ */
static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
const struct tcs_request *msg)
{
@@ -265,7 +475,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
cmd_msgid |= CMD_MSGID_WRITE;
- cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+ cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id);
for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
cmd = &msg->cmds[i];
@@ -281,32 +491,30 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
}
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
- cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
}
-static void __tcs_trigger(struct rsc_drv *drv, int tcs_id)
-{
- u32 enable;
-
- /*
- * HW req: Clear the DRV_CONTROL and enable TCS again
- * While clearing ensure that the AMC mode trigger is cleared
- * and then the mode enable is cleared.
- */
- enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
- enable &= ~TCS_AMC_MODE_TRIGGER;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
- enable &= ~TCS_AMC_MODE_ENABLE;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
-
- /* Enable the AMC mode on the TCS and then trigger the TCS */
- enable = TCS_AMC_MODE_ENABLE;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
- enable |= TCS_AMC_MODE_TRIGGER;
- write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
-}
-
+/**
+ * check_for_req_inflight() - Look to see if conflicting cmds are in flight.
+ * @drv: The controller.
+ * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers.
+ * @msg: The message we want to send, which will contain several addr/data
+ * pairs to program (but few enough that they all fit in one TCS).
+ *
+ * This will walk through the TCSes in the group and check if any of them
+ * appear to be sending to addresses referenced in the message. If it finds
+ * one it'll return -EBUSY.
+ *
+ * Only for use for active-only transfers.
+ *
+ * Must be called with the drv->lock held since that protects tcs_in_use.
+ *
+ * Return: 0 if nothing in flight or -EBUSY if we should try again later.
+ * The caller must re-enable interrupts between tries since that's
+ * the only way tcs_is_free() will ever return true and the only way
+ * RSC_DRV_CMD_ENABLE will ever be cleared.
+ */
static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
const struct tcs_request *msg)
{
@@ -319,10 +527,10 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
if (tcs_is_free(drv, tcs_id))
continue;
- curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id);
for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
- addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
+ addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
for (k = 0; k < msg->num_cmds; k++) {
if (addr == msg->cmds[k].addr)
return -EBUSY;
@@ -333,6 +541,15 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
return 0;
}
+/**
+ * find_free_tcs() - Find free tcs in the given tcs_group; only for active.
+ * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if
+ * we borrowed it because there are zero active-only ones).
+ *
+ * Must be called with the drv->lock held since that protects tcs_in_use.
+ *
+ * Return: The first tcs that's free.
+ */
static int find_free_tcs(struct tcs_group *tcs)
{
int i;
@@ -345,6 +562,20 @@ static int find_free_tcs(struct tcs_group *tcs)
return -EBUSY;
}
+/**
+ * tcs_write() - Store messages into a TCS right now, or return -EBUSY.
+ * @drv: The controller.
+ * @msg: The data to be sent.
+ *
+ * Grabs a TCS for ACTIVE_ONLY transfers and writes the messages to it.
+ *
+ * If there are no free TCSes for ACTIVE_ONLY transfers or if a command for
+ * the same address is already transferring returns -EBUSY which means the
+ * client should retry shortly.
+ *
+ * Return: 0 on success, -EBUSY if client should retry, or an error.
+ * Client should have interrupts enabled for a bit before retrying.
+ */
static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
{
struct tcs_group *tcs;
@@ -356,57 +587,77 @@ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
if (IS_ERR(tcs))
return PTR_ERR(tcs);
- spin_lock_irqsave(&tcs->lock, flags);
- spin_lock(&drv->lock);
+ spin_lock_irqsave(&drv->lock, flags);
/*
* The h/w does not like if we send a request to the same address,
* when one is already in-flight or being processed.
*/
ret = check_for_req_inflight(drv, tcs, msg);
- if (ret) {
- spin_unlock(&drv->lock);
- goto done_write;
- }
+ if (ret)
+ goto unlock;
- tcs_id = find_free_tcs(tcs);
- if (tcs_id < 0) {
- ret = tcs_id;
- spin_unlock(&drv->lock);
- goto done_write;
- }
+ ret = find_free_tcs(tcs);
+ if (ret < 0)
+ goto unlock;
+ tcs_id = ret;
tcs->req[tcs_id - tcs->offset] = msg;
set_bit(tcs_id, drv->tcs_in_use);
- spin_unlock(&drv->lock);
+ if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) {
+ /*
+ * Clear previously programmed WAKE commands in selected
+ * repurposed TCS to avoid triggering them. tcs->slots will be
+ * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
+ */
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
+ write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
+ enable_tcs_irq(drv, tcs_id, true);
+ }
+ spin_unlock_irqrestore(&drv->lock, flags);
+ /*
+ * These two can be done after the lock is released because:
+ * - We marked "tcs_in_use" under lock.
+ * - Once "tcs_in_use" has been marked nobody else could be writing
+ * to these registers until the interrupt goes off.
+ * - The interrupt can't go off until we trigger w/ the last line
+ * of __tcs_set_trigger() below.
+ */
__tcs_buffer_write(drv, tcs_id, 0, msg);
- __tcs_trigger(drv, tcs_id);
+ __tcs_set_trigger(drv, tcs_id, true);
-done_write:
- spin_unlock_irqrestore(&tcs->lock, flags);
+ return 0;
+unlock:
+ spin_unlock_irqrestore(&drv->lock, flags);
return ret;
}
/**
- * rpmh_rsc_send_data: Validate the incoming message and write to the
- * appropriate TCS block.
+ * rpmh_rsc_send_data() - Write / trigger active-only message.
+ * @drv: The controller.
+ * @msg: The data to be sent.
*
- * @drv: the controller
- * @msg: the data to be sent
+ * NOTES:
+ * - This is only used for "ACTIVE_ONLY" since the limitations of this
+ * function don't make sense for sleep/wake cases.
+ * - To do the transfer, we will grab a whole TCS for ourselves--we don't
+ * try to share. If there are none available we'll wait indefinitely
+ * for a free one.
+ * - This function will not wait for the commands to be finished, only for
+ * data to be programmed into the RPMh. See rpmh_tx_done() which will
+ * be called when the transfer is fully complete.
+ * - This function must be called with interrupts enabled. If the hardware
+ * is busy doing someone else's transfer we need that transfer to fully
+ * finish so that we can have the hardware, and to fully finish it needs
+ * the interrupt handler to run. If the interrupts is set to run on the
+ * active CPU this can never happen if interrupts are disabled.
*
* Return: 0 on success, -EINVAL on error.
- * Note: This call blocks until a valid data is written to the TCS.
*/
int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
{
int ret;
- if (!msg || !msg->cmds || !msg->num_cmds ||
- msg->num_cmds > MAX_RPMH_PAYLOAD) {
- WARN_ON(1);
- return -EINVAL;
- }
-
do {
ret = tcs_write(drv, msg);
if (ret == -EBUSY) {
@@ -419,43 +670,28 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
return ret;
}
-static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd,
- int len)
-{
- int i, j;
-
- /* Check for already cached commands */
- for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) {
- if (tcs->cmd_cache[i] != cmd[0].addr)
- continue;
- if (i + len >= tcs->num_tcs * tcs->ncpt)
- goto seq_err;
- for (j = 0; j < len; j++) {
- if (tcs->cmd_cache[i + j] != cmd[j].addr)
- goto seq_err;
- }
- return i;
- }
-
- return -ENODATA;
-
-seq_err:
- WARN(1, "Message does not match previous sequence.\n");
- return -EINVAL;
-}
-
+/**
+ * find_slots() - Find a place to write the given message.
+ * @tcs: The tcs group to search.
+ * @msg: The message we want to find room for.
+ * @tcs_id: If we return 0 from the function, we return the global ID of the
+ * TCS to write to here.
+ * @cmd_id: If we return 0 from the function, we return the index of
+ * the command array of the returned TCS where the client should
+ * start writing the message.
+ *
+ * Only for use on sleep/wake TCSes since those are the only ones we maintain
+ * tcs->slots for.
+ *
+ * Return: -ENOMEM if there was no room, else 0.
+ */
static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
int *tcs_id, int *cmd_id)
{
int slot, offset;
int i = 0;
- /* Find if we already have the msg in our TCS */
- slot = find_match(tcs, msg->cmds, msg->num_cmds);
- if (slot >= 0)
- goto copy_data;
-
- /* Do over, until we can fit the full payload in a TCS */
+ /* Do over, until we can fit the full payload in a single TCS */
do {
slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
i, msg->num_cmds, 0);
@@ -464,11 +700,7 @@ static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
i += tcs->ncpt;
} while (slot + msg->num_cmds - 1 >= i);
-copy_data:
bitmap_set(tcs->slots, slot, msg->num_cmds);
- /* Copy the addresses of the resources over to the slots */
- for (i = 0; i < msg->num_cmds; i++)
- tcs->cmd_cache[slot + i] = msg->cmds[i].addr;
offset = slot / tcs->ncpt;
*tcs_id = offset + tcs->offset;
@@ -477,52 +709,157 @@ copy_data:
return 0;
}
-static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
+/**
+ * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger.
+ * @drv: The controller.
+ * @msg: The data to be written to the controller.
+ *
+ * This should only be called for for sleep/wake state, never active-only
+ * state.
+ *
+ * The caller must ensure that no other RPMH actions are happening and the
+ * controller is idle when this function is called since it runs lockless.
+ *
+ * Return: 0 if no error; else -error.
+ */
+int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
{
struct tcs_group *tcs;
int tcs_id = 0, cmd_id = 0;
- unsigned long flags;
int ret;
tcs = get_tcs_for_msg(drv, msg);
if (IS_ERR(tcs))
return PTR_ERR(tcs);
- spin_lock_irqsave(&tcs->lock, flags);
/* find the TCS id and the command in the TCS to write to */
ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
if (!ret)
__tcs_buffer_write(drv, tcs_id, cmd_id, msg);
- spin_unlock_irqrestore(&tcs->lock, flags);
return ret;
}
/**
- * rpmh_rsc_write_ctrl_data: Write request to the controller
+ * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy.
+ * @drv: The controller
+ *
+ * Checks if any of the AMCs are busy in handling ACTIVE sets.
+ * This is called from the last cpu powering down before flushing
+ * SLEEP and WAKE sets. If AMCs are busy, controller can not enter
+ * power collapse, so deny from the last cpu's pm notification.
*
- * @drv: the controller
- * @msg: the data to be written to the controller
+ * Context: Must be called with the drv->lock held.
*
- * There is no response returned for writing the request to the controller.
+ * Return:
+ * * False - AMCs are idle
+ * * True - AMCs are busy
*/
-int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
+static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
{
- if (!msg || !msg->cmds || !msg->num_cmds ||
- msg->num_cmds > MAX_RPMH_PAYLOAD) {
- pr_err("Payload error\n");
- return -EINVAL;
+ int m;
+ struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS];
+
+ /*
+ * If we made an active request on a RSC that does not have a
+ * dedicated TCS for active state use, then re-purposed wake TCSes
+ * should be checked for not busy, because we used wake TCSes for
+ * active requests in this case.
+ */
+ if (!tcs->num_tcs)
+ tcs = &drv->tcs[WAKE_TCS];
+
+ for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
+ if (!tcs_is_free(drv, m))
+ return true;
}
- /* Data sent to this API will not be sent immediately */
- if (msg->state == RPMH_ACTIVE_ONLY_STATE)
- return -EINVAL;
+ return false;
+}
+
+/**
+ * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy.
+ * @nfb: Pointer to the notifier block in struct rsc_drv.
+ * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT.
+ * @v: Unused
+ *
+ * This function is given to cpu_pm_register_notifier so we can be informed
+ * about when CPUs go down. When all CPUs go down we know no more active
+ * transfers will be started so we write sleep/wake sets. This function gets
+ * called from cpuidle code paths and also at system suspend time.
+ *
+ * If its last CPU going down and AMCs are not busy then writes cached sleep
+ * and wake messages to TCSes. The firmware then takes care of triggering
+ * them when entering deepest low power modes.
+ *
+ * Return: See cpu_pm_register_notifier()
+ */
+static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb,
+ unsigned long action, void *v)
+{
+ struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm);
+ int ret = NOTIFY_OK;
+ int cpus_in_pm;
- return tcs_ctrl_write(drv, msg);
+ switch (action) {
+ case CPU_PM_ENTER:
+ cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm);
+ /*
+ * NOTE: comments for num_online_cpus() point out that it's
+ * only a snapshot so we need to be careful. It should be OK
+ * for us to use, though. It's important for us not to miss
+ * if we're the last CPU going down so it would only be a
+ * problem if a CPU went offline right after we did the check
+ * AND that CPU was not idle AND that CPU was the last non-idle
+ * CPU. That can't happen. CPUs would have to come out of idle
+ * before the CPU could go offline.
+ */
+ if (cpus_in_pm < num_online_cpus())
+ return NOTIFY_OK;
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ atomic_dec(&drv->cpus_in_pm);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * It's likely we're on the last CPU. Grab the drv->lock and write
+ * out the sleep/wake commands to RPMH hardware. Grabbing the lock
+ * means that if we race with another CPU coming up we are still
+ * guaranteed to be safe. If another CPU came up just after we checked
+ * and has grabbed the lock or started an active transfer then we'll
+ * notice we're busy and abort. If another CPU comes up after we start
+ * flushing it will be blocked from starting an active transfer until
+ * we're done flushing. If another CPU starts an active transfer after
+ * we release the lock we're still OK because we're no longer the last
+ * CPU.
+ */
+ if (spin_trylock(&drv->lock)) {
+ if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
+ ret = NOTIFY_BAD;
+ spin_unlock(&drv->lock);
+ } else {
+ /* Another CPU must be up */
+ return NOTIFY_OK;
+ }
+
+ if (ret == NOTIFY_BAD) {
+ /* Double-check if we're here because someone else is up */
+ if (cpus_in_pm < num_online_cpus())
+ ret = NOTIFY_OK;
+ else
+ /* We won't be called w/ CPU_PM_ENTER_FAILED */
+ atomic_dec(&drv->cpus_in_pm);
+ }
+
+ return ret;
}
static int rpmh_probe_tcs_config(struct platform_device *pdev,
- struct rsc_drv *drv)
+ struct rsc_drv *drv, void __iomem *base)
{
struct tcs_type_config {
u32 type;
@@ -532,15 +869,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev,
u32 config, max_tcs, ncpt, offset;
int i, ret, n, st = 0;
struct tcs_group *tcs;
- struct resource *res;
- void __iomem *base;
- char drv_id[10] = {0};
-
- snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
if (ret)
@@ -584,7 +912,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev,
tcs->type = tcs_cfg[i].type;
tcs->num_tcs = tcs_cfg[i].n;
tcs->ncpt = ncpt;
- spin_lock_init(&tcs->lock);
if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
continue;
@@ -596,19 +923,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev,
tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
tcs->offset = st;
st += tcs->num_tcs;
-
- /*
- * Allocate memory to cache sleep and wake requests to
- * avoid reading TCS register memory.
- */
- if (tcs->type == ACTIVE_TCS)
- continue;
-
- tcs->cmd_cache = devm_kcalloc(&pdev->dev,
- tcs->num_tcs * ncpt, sizeof(u32),
- GFP_KERNEL);
- if (!tcs->cmd_cache)
- return -ENOMEM;
}
drv->num_tcs = st;
@@ -620,7 +934,11 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct rsc_drv *drv;
+ struct resource *res;
+ char drv_id[10] = {0};
int ret, irq;
+ u32 solver_config;
+ void __iomem *base;
/*
* Even though RPMh doesn't directly use cmd-db, all of its children
@@ -646,7 +964,13 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
if (!drv->name)
drv->name = dev_name(&pdev->dev);
- ret = rpmh_probe_tcs_config(pdev, drv);
+ snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = rpmh_probe_tcs_config(pdev, drv, base);
if (ret)
return ret;
@@ -663,8 +987,22 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /*
+ * CPU PM notification are not required for controllers that support
+ * 'HW solver' mode where they can be in autonomous mode executing low
+ * power mode to power down.
+ */
+ solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG);
+ solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
+ solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
+ if (!solver_config) {
+ drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
+ cpu_pm_register_notifier(&drv->rsc_pm);
+ }
+
/* Enable the active TCS to send requests immediately */
- write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
+ writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
+ drv->tcs_base + RSC_DRV_IRQ_ENABLE);
spin_lock_init(&drv->client.cache_lock);
INIT_LIST_HEAD(&drv->client.cache);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index eb0ded059d2e..f2b5b46ccd1f 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -9,6 +9,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -119,6 +120,7 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
{
struct cache_req *req;
unsigned long flags;
+ u32 old_sleep_val, old_wake_val;
spin_lock_irqsave(&ctrlr->cache_lock, flags);
req = __find_req(ctrlr, cmd->addr);
@@ -133,26 +135,27 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
req->addr = cmd->addr;
req->sleep_val = req->wake_val = UINT_MAX;
- INIT_LIST_HEAD(&req->list);
list_add_tail(&req->list, &ctrlr->cache);
existing:
+ old_sleep_val = req->sleep_val;
+ old_wake_val = req->wake_val;
+
switch (state) {
case RPMH_ACTIVE_ONLY_STATE:
- if (req->sleep_val != UINT_MAX)
- req->wake_val = cmd->data;
- break;
case RPMH_WAKE_ONLY_STATE:
req->wake_val = cmd->data;
break;
case RPMH_SLEEP_STATE:
req->sleep_val = cmd->data;
break;
- default:
- break;
}
- ctrlr->dirty = true;
+ ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
+ req->wake_val != old_wake_val) &&
+ req->sleep_val != UINT_MAX &&
+ req->wake_val != UINT_MAX;
+
unlock:
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
@@ -287,6 +290,7 @@ static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_add_tail(&req->list, &ctrlr->batch_cache);
+ ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
}
@@ -294,12 +298,10 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
{
struct batch_cache_req *req;
const struct rpmh_request *rpm_msg;
- unsigned long flags;
int ret = 0;
int i;
/* Send Sleep/Wake requests to the controller, expect no response */
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_for_each_entry(req, &ctrlr->batch_cache, list) {
for (i = 0; i < req->count; i++) {
rpm_msg = req->rpm_msgs + i;
@@ -309,23 +311,10 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
break;
}
}
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
return ret;
}
-static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
-{
- struct batch_cache_req *req, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
- list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
- kfree(req);
- INIT_LIST_HEAD(&ctrlr->batch_cache);
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
-}
-
/**
* rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
* batch to finish.
@@ -442,36 +431,42 @@ static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
}
/**
- * rpmh_flush: Flushes the buffered active and sleep sets to TCS
- *
- * @ctrlr: controller making request to flush cached data
+ * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
*
- * Return: -EBUSY if the controller is busy, probably waiting on a response
- * to a RPMH request sent earlier.
+ * @ctrlr: Controller making request to flush cached data
*
- * This function is always called from the sleep code from the last CPU
- * that is powering down the entire system. Since no other RPMH API would be
- * executing at this time, it is safe to run lockless.
+ * Return:
+ * * 0 - Success
+ * * Error code - Otherwise
*/
int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{
struct cache_req *p;
- int ret;
+ int ret = 0;
+
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * Currently rpmh_flush() is only called when we think we're running
+ * on the last processor. If the lock is busy it means another
+ * processor is up and it's better to abort than spin.
+ */
+ if (!spin_trylock(&ctrlr->cache_lock))
+ return -EBUSY;
if (!ctrlr->dirty) {
pr_debug("Skipping flush, TCS has latest data.\n");
- return 0;
+ goto exit;
}
+ /* Invalidate the TCSes first to avoid stale data */
+ rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
+
/* First flush the cached batch requests */
ret = flush_batch(ctrlr);
if (ret)
- return ret;
+ goto exit;
- /*
- * Nobody else should be calling this function other than system PM,
- * hence we can run without locks.
- */
list_for_each_entry(p, &ctrlr->cache, list) {
if (!is_req_valid(p)) {
pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
@@ -481,38 +476,40 @@ int rpmh_flush(struct rpmh_ctrlr *ctrlr)
ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
p->sleep_val);
if (ret)
- return ret;
+ goto exit;
ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
p->wake_val);
if (ret)
- return ret;
+ goto exit;
}
ctrlr->dirty = false;
- return 0;
+exit:
+ spin_unlock(&ctrlr->cache_lock);
+ return ret;
}
/**
- * rpmh_invalidate: Invalidate all sleep and active sets
- * sets.
+ * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
*
* @dev: The device making the request
*
- * Invalidate the sleep and active values in the TCS blocks.
+ * Invalidate the sleep and wake values in batch_cache.
*/
int rpmh_invalidate(const struct device *dev)
{
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
- int ret;
+ struct batch_cache_req *req, *tmp;
+ unsigned long flags;
- invalidate_batch(ctrlr);
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+ kfree(req);
+ INIT_LIST_HEAD(&ctrlr->batch_cache);
ctrlr->dirty = true;
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
- do {
- ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
- } while (ret == -EAGAIN);
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index 4d264d0672c4..e72426221a69 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
@@ -166,6 +167,24 @@ static const struct rpmhpd_desc sm8150_desc = {
.num_pds = ARRAY_SIZE(sm8150_rpmhpds),
};
+static struct rpmhpd *sm8250_rpmhpds[] = {
+ [SM8250_CX] = &sdm845_cx,
+ [SM8250_CX_AO] = &sdm845_cx_ao,
+ [SM8250_EBI] = &sdm845_ebi,
+ [SM8250_GFX] = &sdm845_gfx,
+ [SM8250_LCX] = &sdm845_lcx,
+ [SM8250_LMX] = &sdm845_lmx,
+ [SM8250_MMCX] = &sm8150_mmcx,
+ [SM8250_MMCX_AO] = &sm8150_mmcx_ao,
+ [SM8250_MX] = &sdm845_mx,
+ [SM8250_MX_AO] = &sdm845_mx_ao,
+};
+
+static const struct rpmhpd_desc sm8250_desc = {
+ .rpmhpds = sm8250_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm8250_rpmhpds),
+};
+
/* SC7180 RPMH powerdomains */
static struct rpmhpd *sc7180_rpmhpds[] = {
[SC7180_CX] = &sdm845_cx,
@@ -187,8 +206,10 @@ static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
+ { .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
{ }
};
+MODULE_DEVICE_TABLE(of, rpmhpd_match_table);
static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
unsigned int corner, bool sync)
@@ -460,3 +481,6 @@ static int __init rpmhpd_init(void)
return platform_driver_register(&rpmhpd_driver);
}
core_initcall(rpmhpd_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Power Domain Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 2b1834c5609a..f2168e4259b2 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_domain.h>
#include <linux/of.h>
@@ -226,6 +227,7 @@ static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
{ }
};
+MODULE_DEVICE_TABLE(of, rpmpd_match_table);
static int rpmpd_send_enable(struct rpmpd *pd, bool enable)
{
@@ -422,3 +424,6 @@ static int __init rpmpd_init(void)
return platform_driver_register(&rpmpd_driver);
}
core_initcall(rpmpd_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPM Power Domain Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index c7300d54e444..07183d731d74 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -474,10 +474,8 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
goto report_read_failure;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n");
+ if (irq < 0)
return irq;
- }
smp2p->mbox_client.dev = &pdev->dev;
smp2p->mbox_client.knows_txdone = true;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index ebb49aee179b..5983c6ffb078 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -188,6 +188,10 @@ static const struct soc_id soc_id[] = {
{ 216, "MSM8674PRO" },
{ 217, "MSM8974-AA" },
{ 218, "MSM8974-AB" },
+ { 233, "MSM8936" },
+ { 239, "MSM8939" },
+ { 240, "APQ8036" },
+ { 241, "APQ8039" },
{ 246, "MSM8996" },
{ 247, "APQ8016" },
{ 248, "MSM8216" },
@@ -430,6 +434,8 @@ static int qcom_socinfo_probe(struct platform_device *pdev)
qs->attr.family = "Snapdragon";
qs->attr.machine = socinfo_machine(&pdev->dev,
le32_to_cpu(info->id));
+ qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u",
+ le32_to_cpu(info->id));
qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u",
SOCINFO_MAJOR(le32_to_cpu(info->ver)),
SOCINFO_MINOR(le32_to_cpu(info->ver)));
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 1982c7fb45fa..53cd8d2d0cd2 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -83,6 +83,13 @@ config ARCH_R8A7740
select ARM_ERRATA_754322
select RENESAS_INTC_IRQPIN
+config ARCH_R8A7742
+ bool "RZ/G1H (R8A77420)"
+ select ARCH_RCAR_GEN2
+ select ARM_ERRATA_798181 if SMP
+ select ARM_ERRATA_814220
+ select SYSC_R8A7742
+
config ARCH_R8A7743
bool "RZ/G1M (R8A77430)"
select ARCH_RCAR_GEN2
@@ -261,6 +268,10 @@ config ARCH_R8A77995
endif # ARM64
# SoC
+config SYSC_R8A7742
+ bool "RZ/G1H System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
config SYSC_R8A7743
bool "RZ/G1M System Controller support" if COMPILE_TEST
select SYSC_RCAR
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index e595c3c3bd10..08296d78e2ad 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_SOC_RENESAS) += renesas-soc.o
# SoC
+obj-$(CONFIG_SYSC_R8A7742) += r8a7742-sysc.o
obj-$(CONFIG_SYSC_R8A7743) += r8a7743-sysc.o
obj-$(CONFIG_SYSC_R8A7745) += r8a7745-sysc.o
obj-$(CONFIG_SYSC_R8A77470) += r8a77470-sysc.o
diff --git a/drivers/soc/renesas/r8a7742-sysc.c b/drivers/soc/renesas/r8a7742-sysc.c
new file mode 100644
index 000000000000..219a675f83f4
--- /dev/null
+++ b/drivers/soc/renesas/r8a7742-sysc.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G1H System Controller
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7742-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7742_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7742_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7742_PD_CA15_SCU, R8A7742_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7742_PD_CA15_CPU0, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7742_PD_CA15_CPU1, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu2", 0x40, 2, R8A7742_PD_CA15_CPU2, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu3", 0x40, 3, R8A7742_PD_CA15_CPU3, R8A7742_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca7-scu", 0x100, 0, R8A7742_PD_CA7_SCU, R8A7742_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A7742_PD_CA7_CPU0, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A7742_PD_CA7_CPU1, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu2", 0x1c0, 2, R8A7742_PD_CA7_CPU2, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu3", 0x1c0, 3, R8A7742_PD_CA7_CPU3, R8A7742_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "rgx", 0xc0, 0, R8A7742_PD_RGX, R8A7742_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7742_sysc_info __initconst = {
+ .areas = r8a7742_areas,
+ .num_areas = ARRAY_SIZE(r8a7742_areas),
+};
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 2af2e0dd83fe..a2b2b1768768 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -39,6 +39,7 @@ static const struct rst_config rcar_rst_gen3 __initconst = {
static const struct of_device_id rcar_rst_matches[] __initconst = {
/* RZ/G1 is handled like R-Car Gen2 */
+ { .compatible = "renesas,r8a7742-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7743-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7744-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7745-rst", .data = &rcar_rst_gen2 },
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index f0b291e02b8a..04ea87a188f1 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -273,6 +273,9 @@ finalize:
}
static const struct of_device_id rcar_sysc_matches[] __initconst = {
+#ifdef CONFIG_SYSC_R8A7742
+ { .compatible = "renesas,r8a7742-sysc", .data = &r8a7742_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A7743
{ .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
/* RZ/G1N is identical to RZ/G2M w.r.t. power domains. */
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 0fc3b119930a..e417f26fe155 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -49,6 +49,7 @@ struct rcar_sysc_info {
u32 extmask_val; /* SYSCEXTMASK register mask value */
};
+extern const struct rcar_sysc_info r8a7742_sysc_info;
extern const struct rcar_sysc_info r8a7743_sysc_info;
extern const struct rcar_sysc_info r8a7745_sysc_info;
extern const struct rcar_sysc_info r8a77470_sysc_info;
diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c
index a5069394cd61..44d7e1951da3 100644
--- a/drivers/soc/sifive/sifive_l2_cache.c
+++ b/drivers/soc/sifive/sifive_l2_cache.c
@@ -9,6 +9,8 @@
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/device.h>
+#include <asm/cacheinfo.h>
#include <soc/sifive/sifive_l2_cache.h>
#define SIFIVE_L2_DIRECCFIX_LOW 0x100
@@ -31,6 +33,7 @@
static void __iomem *l2_base;
static int g_irq[SIFIVE_L2_MAX_ECCINTR];
+static struct riscv_cacheinfo_ops l2_cache_ops;
enum {
DIR_CORR = 0,
@@ -48,7 +51,7 @@ static ssize_t l2_write(struct file *file, const char __user *data,
if (kstrtouint_from_user(data, count, 0, &val))
return -EINVAL;
- if ((val >= 0 && val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
+ if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
else
return -EINVAL;
@@ -107,6 +110,38 @@ int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
+static int l2_largest_wayenabled(void)
+{
+ return readl(l2_base + SIFIVE_L2_WAYENABLE) & 0xFF;
+}
+
+static ssize_t number_of_ways_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", l2_largest_wayenabled());
+}
+
+static DEVICE_ATTR_RO(number_of_ways_enabled);
+
+static struct attribute *priv_attrs[] = {
+ &dev_attr_number_of_ways_enabled.attr,
+ NULL,
+};
+
+static const struct attribute_group priv_attr_group = {
+ .attrs = priv_attrs,
+};
+
+static const struct attribute_group *l2_get_priv_group(struct cacheinfo *this_leaf)
+{
+ /* We want to use private group for L2 cache only */
+ if (this_leaf->level == 2)
+ return &priv_attr_group;
+ else
+ return NULL;
+}
+
static irqreturn_t l2_int_handler(int irq, void *device)
{
unsigned int add_h, add_l;
@@ -170,6 +205,9 @@ static int __init sifive_l2_init(void)
l2_config_read();
+ l2_cache_ops.get_priv_group = l2_get_priv_group;
+ riscv_set_cacheinfo_ops(&l2_cache_ops);
+
#ifdef CONFIG_DEBUG_FS
setup_sifive_debug();
#endif
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 3693532949b8..6bc603d0b9d9 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -133,6 +133,7 @@ config SOC_TEGRA_FLOWCTRL
config SOC_TEGRA_PMC
bool
+ select GENERIC_PINCONF
config SOC_TEGRA_POWERGATE_BPMP
def_bool y
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 802717b9f6a3..d1f8dd0289e6 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -300,6 +300,59 @@ static void tegra_enable_fuse_clk(void __iomem *base)
writel(reg, base + 0x14);
}
+static ssize_t major_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tegra_get_major_rev());
+}
+
+static DEVICE_ATTR_RO(major);
+
+static ssize_t minor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", tegra_get_minor_rev());
+}
+
+static DEVICE_ATTR_RO(minor);
+
+static struct attribute *tegra_soc_attr[] = {
+ &dev_attr_major.attr,
+ &dev_attr_minor.attr,
+ NULL,
+};
+
+const struct attribute_group tegra_soc_attr_group = {
+ .attrs = tegra_soc_attr,
+};
+
+#ifdef CONFIG_ARCH_TEGRA_194_SOC
+static ssize_t platform_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ /*
+ * Displays the value in the 'pre_si_platform' field of the HIDREV
+ * register for Tegra194 devices. A value of 0 indicates that the
+ * platform type is silicon and all other non-zero values indicate
+ * the type of simulation platform is being used.
+ */
+ return sprintf(buf, "%d\n", (tegra_read_chipid() >> 20) & 0xf);
+}
+
+static DEVICE_ATTR_RO(platform);
+
+static struct attribute *tegra194_soc_attr[] = {
+ &dev_attr_major.attr,
+ &dev_attr_minor.attr,
+ &dev_attr_platform.attr,
+ NULL,
+};
+
+const struct attribute_group tegra194_soc_attr_group = {
+ .attrs = tegra194_soc_attr,
+};
+#endif
+
struct device * __init tegra_soc_device_register(void)
{
struct soc_device_attribute *attr;
@@ -310,8 +363,10 @@ struct device * __init tegra_soc_device_register(void)
return NULL;
attr->family = kasprintf(GFP_KERNEL, "Tegra");
- attr->revision = kasprintf(GFP_KERNEL, "%d", tegra_sku_info.revision);
+ attr->revision = kasprintf(GFP_KERNEL, "%s",
+ tegra_revision_name[tegra_sku_info.revision]);
attr->soc_id = kasprintf(GFP_KERNEL, "%u", tegra_get_chip_id());
+ attr->custom_attr_group = fuse->soc->soc_attr_group;
dev = soc_device_register(attr);
if (IS_ERR(dev)) {
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
index d4aef9c4a94c..16aaa28573ac 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra20.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -164,4 +164,5 @@ const struct tegra_fuse_soc tegra20_fuse_soc = {
.speedo_init = tegra20_init_speedo_data,
.probe = tegra20_fuse_probe,
.info = &tegra20_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
};
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index e6037f900fb7..85accef41fa1 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -111,6 +111,7 @@ const struct tegra_fuse_soc tegra30_fuse_soc = {
.init = tegra30_fuse_init,
.speedo_init = tegra30_init_speedo_data,
.info = &tegra30_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
@@ -125,6 +126,7 @@ const struct tegra_fuse_soc tegra114_fuse_soc = {
.init = tegra30_fuse_init,
.speedo_init = tegra114_init_speedo_data,
.info = &tegra114_fuse_info,
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
@@ -205,6 +207,7 @@ const struct tegra_fuse_soc tegra124_fuse_soc = {
.info = &tegra124_fuse_info,
.lookups = tegra124_fuse_lookups,
.num_lookups = ARRAY_SIZE(tegra124_fuse_lookups),
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
@@ -290,6 +293,7 @@ const struct tegra_fuse_soc tegra210_fuse_soc = {
.info = &tegra210_fuse_info,
.lookups = tegra210_fuse_lookups,
.num_lookups = ARRAY_SIZE(tegra210_fuse_lookups),
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
@@ -319,6 +323,7 @@ const struct tegra_fuse_soc tegra186_fuse_soc = {
.info = &tegra186_fuse_info,
.lookups = tegra186_fuse_lookups,
.num_lookups = ARRAY_SIZE(tegra186_fuse_lookups),
+ .soc_attr_group = &tegra_soc_attr_group,
};
#endif
@@ -348,5 +353,6 @@ const struct tegra_fuse_soc tegra194_fuse_soc = {
.info = &tegra194_fuse_info,
.lookups = tegra194_fuse_lookups,
.num_lookups = ARRAY_SIZE(tegra194_fuse_lookups),
+ .soc_attr_group = &tegra194_soc_attr_group,
};
#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 94a059e577a1..9d4fc315a007 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -32,6 +32,8 @@ struct tegra_fuse_soc {
const struct nvmem_cell_lookup *lookups;
unsigned int num_lookups;
+
+ const struct attribute_group *soc_attr_group;
};
struct tegra_fuse {
@@ -64,6 +66,11 @@ void tegra_init_apbmisc(void);
bool __init tegra_fuse_read_spare(unsigned int spare);
u32 __init tegra_fuse_read_early(unsigned int offset);
+u8 tegra_get_major_rev(void);
+u8 tegra_get_minor_rev(void);
+
+extern const struct attribute_group tegra_soc_attr_group;
+
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
void tegra20_init_speedo_data(struct tegra_sku_info *sku_info);
#endif
@@ -110,6 +117,7 @@ extern const struct tegra_fuse_soc tegra186_fuse_soc;
#ifdef CONFIG_ARCH_TEGRA_194_SOC
extern const struct tegra_fuse_soc tegra194_fuse_soc;
+extern const struct attribute_group tegra194_soc_attr_group;
#endif
#endif
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index 089d9340564b..3cdd69d1bd4d 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -37,6 +37,16 @@ u8 tegra_get_chip_id(void)
return (tegra_read_chipid() >> 8) & 0xff;
}
+u8 tegra_get_major_rev(void)
+{
+ return (tegra_read_chipid() >> 4) & 0xf;
+}
+
+u8 tegra_get_minor_rev(void)
+{
+ return (tegra_read_chipid() >> 16) & 0xf;
+}
+
u32 tegra_read_straps(void)
{
WARN(!chipid, "Tegra ABP MISC not yet available\n");
@@ -65,36 +75,32 @@ static const struct of_device_id apbmisc_match[] __initconst = {
void __init tegra_init_revision(void)
{
- u32 id, chip_id, minor_rev;
- int rev;
+ u8 chip_id, minor_rev;
- id = tegra_read_chipid();
- chip_id = (id >> 8) & 0xff;
- minor_rev = (id >> 16) & 0xf;
+ chip_id = tegra_get_chip_id();
+ minor_rev = tegra_get_minor_rev();
switch (minor_rev) {
case 1:
- rev = TEGRA_REVISION_A01;
+ tegra_sku_info.revision = TEGRA_REVISION_A01;
break;
case 2:
- rev = TEGRA_REVISION_A02;
+ tegra_sku_info.revision = TEGRA_REVISION_A02;
break;
case 3:
if (chip_id == TEGRA20 && (tegra_fuse_read_spare(18) ||
tegra_fuse_read_spare(19)))
- rev = TEGRA_REVISION_A03p;
+ tegra_sku_info.revision = TEGRA_REVISION_A03p;
else
- rev = TEGRA_REVISION_A03;
+ tegra_sku_info.revision = TEGRA_REVISION_A03;
break;
case 4:
- rev = TEGRA_REVISION_A04;
+ tegra_sku_info.revision = TEGRA_REVISION_A04;
break;
default:
- rev = TEGRA_REVISION_UNKNOWN;
+ tegra_sku_info.revision = TEGRA_REVISION_UNKNOWN;
}
- tegra_sku_info.revision = rev;
-
tegra_sku_info.sku_id = tegra_fuse_read_early(FUSE_SKU_INFO);
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 1c533a969f54..42cf37a0556b 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -3063,6 +3063,7 @@ static const struct pinctrl_pin_desc tegra210_pin_descs[] = {
static const struct tegra_wake_event tegra210_wake_events[] = {
TEGRA_WAKE_IRQ("rtc", 16, 2),
+ TEGRA_WAKE_IRQ("pmu", 51, 86),
};
static const struct tegra_pmc_soc tegra210_pmc_soc = {
@@ -3193,6 +3194,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
}
static const struct tegra_wake_event tegra186_wake_events[] = {
+ TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA186_AON_GPIO(FF, 0)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
};
@@ -3325,6 +3327,7 @@ static const char * const tegra194_reset_sources[] = {
};
static const struct tegra_wake_event tegra194_wake_events[] = {
+ TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
};
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index 4486e055794c..e192fb788836 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -91,6 +91,16 @@ config TI_K3_RINGACC
and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs
If unsure, say N.
+config TI_K3_SOCINFO
+ bool
+ depends on ARCH_K3 || COMPILE_TEST
+ select SOC_BUS
+ select MFD_SYSCON
+ help
+ Include support for the SoC bus socinfo for the TI K3 Multicore SoC
+ platforms to provide information about the SoC family and
+ variant to user space.
+
endif # SOC_TI
config TI_SCI_INTA_MSI_DOMAIN
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index bec827937a5f..1110e5c98685 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
+obj-$(CONFIG_TI_K3_SOCINFO) += k3-socinfo.o
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
new file mode 100644
index 000000000000..af0ba5288e58
--- /dev/null
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI K3 SoC info driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+
+#define CTRLMMR_WKUP_JTAGID_REG 0
+/*
+ * Bits:
+ * 31-28 VARIANT Device variant
+ * 27-12 PARTNO Part number
+ * 11-1 MFG Indicates TI as manufacturer (0x17)
+ * 1 Always 1
+ */
+#define CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT (28)
+#define CTRLMMR_WKUP_JTAGID_VARIANT_MASK GENMASK(31, 28)
+
+#define CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT (12)
+#define CTRLMMR_WKUP_JTAGID_PARTNO_MASK GENMASK(27, 12)
+
+#define CTRLMMR_WKUP_JTAGID_MFG_SHIFT (1)
+#define CTRLMMR_WKUP_JTAGID_MFG_MASK GENMASK(11, 1)
+
+#define CTRLMMR_WKUP_JTAGID_MFG_TI 0x17
+
+static const struct k3_soc_id {
+ unsigned int id;
+ const char *family_name;
+} k3_soc_ids[] = {
+ { 0xBB5A, "AM65X" },
+ { 0xBB64, "J721E" },
+};
+
+static int
+k3_chipinfo_partno_to_names(unsigned int partno,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(k3_soc_ids); i++)
+ if (partno == k3_soc_ids[i].id) {
+ soc_dev_attr->family = k3_soc_ids[i].family_name;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int k3_chipinfo_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct soc_device_attribute *soc_dev_attr;
+ struct device *dev = &pdev->dev;
+ struct soc_device *soc_dev;
+ struct regmap *regmap;
+ u32 partno_id;
+ u32 variant;
+ u32 jtag_id;
+ u32 mfg;
+ int ret;
+
+ regmap = device_node_to_regmap(node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = regmap_read(regmap, CTRLMMR_WKUP_JTAGID_REG, &jtag_id);
+ if (ret < 0)
+ return ret;
+
+ mfg = (jtag_id & CTRLMMR_WKUP_JTAGID_MFG_MASK) >>
+ CTRLMMR_WKUP_JTAGID_MFG_SHIFT;
+
+ if (mfg != CTRLMMR_WKUP_JTAGID_MFG_TI) {
+ dev_err(dev, "Invalid MFG SoC\n");
+ return -ENODEV;
+ }
+
+ variant = (jtag_id & CTRLMMR_WKUP_JTAGID_VARIANT_MASK) >>
+ CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT;
+ variant++;
+
+ partno_id = (jtag_id & CTRLMMR_WKUP_JTAGID_PARTNO_MASK) >>
+ CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0", variant);
+ if (!soc_dev_attr->revision) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = k3_chipinfo_partno_to_names(partno_id, soc_dev_attr);
+ if (ret) {
+ dev_err(dev, "Unknown SoC JTAGID[0x%08X]\n", jtag_id);
+ ret = -ENODEV;
+ goto err_free_rev;
+ }
+
+ node = of_find_node_by_path("/");
+ of_property_read_string(node, "model", &soc_dev_attr->machine);
+ of_node_put(node);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ ret = PTR_ERR(soc_dev);
+ goto err_free_rev;
+ }
+
+ dev_info(dev, "Family:%s rev:%s JTAGID[0x%08x] Detected\n",
+ soc_dev_attr->family,
+ soc_dev_attr->revision, jtag_id);
+
+ return 0;
+
+err_free_rev:
+ kfree(soc_dev_attr->revision);
+err:
+ kfree(soc_dev_attr);
+ return ret;
+}
+
+static const struct of_device_id k3_chipinfo_of_match[] = {
+ { .compatible = "ti,am654-chipid", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver k3_chipinfo_driver = {
+ .driver = {
+ .name = "k3-chipinfo",
+ .of_match_table = k3_chipinfo_of_match,
+ },
+ .probe = k3_chipinfo_probe,
+};
+
+static int __init k3_chipinfo_init(void)
+{
+ return platform_driver_register(&k3_chipinfo_driver);
+}
+subsys_initcall(k3_chipinfo_init);
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 37f3db6c041c..aa071d96ef36 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -409,7 +409,7 @@ static int knav_gp_close_queue(struct knav_range_info *range,
return 0;
}
-struct knav_range_ops knav_gp_range_ops = {
+static struct knav_range_ops knav_gp_range_ops = {
.set_notify = knav_gp_set_notify,
.open_queue = knav_gp_open_queue,
.close_queue = knav_gp_close_queue,
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
index 23d90cb12ba9..226d343f0a6a 100644
--- a/drivers/soc/xilinx/zynqmp_pm_domains.c
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -23,8 +23,6 @@
/* Flag stating if PM nodes mapped to the PM domain has been requested */
#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
-static const struct zynqmp_eemi_ops *eemi_ops;
-
static int min_capability;
/**
@@ -76,11 +74,8 @@ static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->set_requirement)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
- ret = eemi_ops->set_requirement(pd->node_id,
+ ret = zynqmp_pm_set_requirement(pd->node_id,
ZYNQMP_PM_CAPABILITY_ACCESS,
ZYNQMP_PM_MAX_QOS,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
@@ -111,9 +106,6 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
u32 capabilities = min_capability;
bool may_wakeup;
- if (!eemi_ops->set_requirement)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If domain is already released there is nothing to be done */
@@ -134,7 +126,7 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
}
}
- ret = eemi_ops->set_requirement(pd->node_id, capabilities, 0,
+ ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0,
ZYNQMP_PM_REQUEST_ACK_NO);
/**
* If powering down of any node inside this domain fails,
@@ -163,16 +155,13 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->request_node)
- return -ENXIO;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If this is not the first device to attach there is nothing to do */
if (domain->device_count)
return 0;
- ret = eemi_ops->request_node(pd->node_id, 0, 0,
+ ret = zynqmp_pm_request_node(pd->node_id, 0, 0,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
/* If requesting a node fails print and return the error */
if (ret) {
@@ -199,16 +188,13 @@ static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
int ret;
struct zynqmp_pm_domain *pd;
- if (!eemi_ops->release_node)
- return;
-
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If this is not the last device to detach there is nothing to do */
if (domain->device_count)
return;
- ret = eemi_ops->release_node(pd->node_id);
+ ret = zynqmp_pm_release_node(pd->node_id);
/* If releasing a node fails print the error and return */
if (ret) {
pr_err("%s() %s release failed for node %d: %d\n",
@@ -266,10 +252,6 @@ static int zynqmp_gpd_probe(struct platform_device *pdev)
struct zynqmp_pm_domain *pd;
struct device *dev = &pdev->dev;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 09227895d216..31ff49fcd078 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -30,7 +30,6 @@ struct zynqmp_pm_work_struct {
static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
static struct mbox_chan *rx_chan;
-static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
@@ -155,9 +154,6 @@ static ssize_t suspend_mode_store(struct device *dev,
{
int md, ret = -EINVAL;
- if (!eemi_ops->set_suspend_mode)
- return ret;
-
for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
if (suspend_modes[md] &&
sysfs_streq(suspend_modes[md], buf)) {
@@ -166,7 +162,7 @@ static ssize_t suspend_mode_store(struct device *dev,
}
if (!ret && md != suspend_mode) {
- ret = eemi_ops->set_suspend_mode(md);
+ ret = zynqmp_pm_set_suspend_mode(md);
if (likely(!ret))
suspend_mode = md;
}
@@ -182,15 +178,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
u32 pm_api_version;
struct mbox_client *client;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
- if (!eemi_ops->get_api_version || !eemi_ops->init_finalize)
- return -ENXIO;
-
- eemi_ops->init_finalize();
- eemi_ops->get_api_version(&pm_api_version);
+ zynqmp_pm_init_finalize();
+ zynqmp_pm_get_api_version(&pm_api_version);
/* Check PM API version number */
if (pm_api_version < ZYNQMP_PM_VERSION)
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index e2cdff990e9f..b5871612613b 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -4,7 +4,8 @@
#
#Bus Objs
-soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
+soundwire-bus-objs := bus_type.o bus.o master.o slave.o mipi_disco.o stream.o \
+ sysfs_slave.o sysfs_slave_dpn.o
obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
ifdef CONFIG_DEBUG_FS
@@ -16,12 +17,9 @@ soundwire-cadence-objs := cadence_master.o
obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o
#Intel driver
-soundwire-intel-objs := intel.o
+soundwire-intel-objs := intel.o intel_init.o
obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
-soundwire-intel-init-objs := intel_init.o
-obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel-init.o
-
#Qualcomm driver
soundwire-qcom-objs := qcom.o
obj-$(CONFIG_SOUNDWIRE_QCOM) += soundwire-qcom.o
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 488c3c9e4947..24ba77226376 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -8,24 +8,54 @@
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
#include "bus.h"
+#include "sysfs_local.h"
+
+static DEFINE_IDA(sdw_ida);
+
+static int sdw_get_id(struct sdw_bus *bus)
+{
+ int rc = ida_alloc(&sdw_ida, GFP_KERNEL);
+
+ if (rc < 0)
+ return rc;
+
+ bus->id = rc;
+ return 0;
+}
/**
- * sdw_add_bus_master() - add a bus Master instance
+ * sdw_bus_master_add() - add a bus Master instance
* @bus: bus instance
+ * @parent: parent device
+ * @fwnode: firmware node handle
*
* Initializes the bus instance, read properties and create child
* devices.
*/
-int sdw_add_bus_master(struct sdw_bus *bus)
+int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
+ struct fwnode_handle *fwnode)
{
struct sdw_master_prop *prop = NULL;
int ret;
- if (!bus->dev) {
- pr_err("SoundWire bus has no device\n");
+ if (!parent) {
+ pr_err("SoundWire parent device is not set\n");
return -ENODEV;
}
+ ret = sdw_get_id(bus);
+ if (ret) {
+ dev_err(parent, "Failed to get bus id\n");
+ return ret;
+ }
+
+ ret = sdw_master_device_add(bus, parent, fwnode);
+ if (ret) {
+ dev_err(parent, "Failed to add master device at link %d\n",
+ bus->link_id);
+ return ret;
+ }
+
if (!bus->ops) {
dev_err(bus->dev, "SoundWire Bus ops are not set\n");
return -EINVAL;
@@ -107,7 +137,7 @@ int sdw_add_bus_master(struct sdw_bus *bus)
return 0;
}
-EXPORT_SYMBOL(sdw_add_bus_master);
+EXPORT_SYMBOL(sdw_bus_master_add);
static int sdw_delete_slave(struct device *dev, void *data)
{
@@ -131,18 +161,20 @@ static int sdw_delete_slave(struct device *dev, void *data)
}
/**
- * sdw_delete_bus_master() - delete the bus master instance
+ * sdw_bus_master_delete() - delete the bus master instance
* @bus: bus to be deleted
*
* Remove the instance, delete the child devices.
*/
-void sdw_delete_bus_master(struct sdw_bus *bus)
+void sdw_bus_master_delete(struct sdw_bus *bus)
{
device_for_each_child(bus->dev, NULL, sdw_delete_slave);
+ sdw_master_device_del(bus);
sdw_bus_debugfs_exit(bus);
+ ida_free(&sdw_ida, bus->id);
}
-EXPORT_SYMBOL(sdw_delete_bus_master);
+EXPORT_SYMBOL(sdw_bus_master_delete);
/*
* SDW IO Calls
@@ -284,9 +316,10 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
msg->flags = flags;
msg->buf = buf;
- if (addr < SDW_REG_NO_PAGE) { /* no paging area */
+ if (addr < SDW_REG_NO_PAGE) /* no paging area */
return 0;
- } else if (addr >= SDW_REG_MAX) { /* illegal addr */
+
+ if (addr >= SDW_REG_MAX) { /* illegal addr */
pr_err("SDW: Invalid address %x passed\n", addr);
return -EINVAL;
}
@@ -306,7 +339,9 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
if (!slave) {
pr_err("SDW: No slave for paging addr\n");
return -EINVAL;
- } else if (!slave->prop.paging_support) {
+ }
+
+ if (!slave->prop.paging_support) {
dev_err(&slave->dev,
"address %x needs paging but no support\n", addr);
return -EINVAL;
@@ -375,8 +410,8 @@ sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
ret = sdw_transfer(bus, &msg);
if (ret < 0)
return ret;
- else
- return buf;
+
+ return buf;
}
static int
@@ -471,8 +506,8 @@ int sdw_read(struct sdw_slave *slave, u32 addr)
ret = sdw_nread(slave, addr, 1, &buf);
if (ret < 0)
return ret;
- else
- return buf;
+
+ return buf;
}
EXPORT_SYMBOL(sdw_read);
@@ -563,9 +598,9 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
}
if (!new_device)
- dev_info(slave->bus->dev,
- "Slave already registered, reusing dev_num:%d\n",
- slave->dev_num);
+ dev_dbg(slave->bus->dev,
+ "Slave already registered, reusing dev_num:%d\n",
+ slave->dev_num);
/* Clear the slave->dev_num to transfer message on device 0 */
dev_num = slave->dev_num;
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index 204204a26db8..82484f741168 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -19,6 +19,9 @@ static inline int sdw_acpi_find_slaves(struct sdw_bus *bus)
int sdw_of_find_slaves(struct sdw_bus *bus);
void sdw_extract_slave_id(struct sdw_bus *bus,
u64 addr, struct sdw_slave_id *id);
+int sdw_master_device_add(struct sdw_bus *bus, struct device *parent,
+ struct fwnode_handle *fwnode);
+int sdw_master_device_del(struct sdw_bus *bus);
#ifdef CONFIG_DEBUG_FS
void sdw_bus_debugfs_init(struct sdw_bus *bus);
@@ -172,5 +175,6 @@ sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
#define SDW_UNATTACH_REQUEST_MASTER_RESET BIT(0)
void sdw_clear_slave_status(struct sdw_bus *bus, u32 request);
+int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
#endif /* __SDW_BUS_H */
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 17f096dd6806..de9a671802b8 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -7,6 +7,7 @@
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_type.h>
#include "bus.h"
+#include "sysfs_local.h"
/**
* sdw_get_device_id - find the matching SoundWire device id
@@ -33,10 +34,17 @@ sdw_get_device_id(struct sdw_slave *slave, struct sdw_driver *drv)
static int sdw_bus_match(struct device *dev, struct device_driver *ddrv)
{
- struct sdw_slave *slave = dev_to_sdw_dev(dev);
- struct sdw_driver *drv = drv_to_sdw_driver(ddrv);
+ struct sdw_slave *slave;
+ struct sdw_driver *drv;
+ int ret = 0;
+
+ if (is_sdw_slave(dev)) {
+ slave = dev_to_sdw_dev(dev);
+ drv = drv_to_sdw_driver(ddrv);
- return !!sdw_get_device_id(slave, drv);
+ ret = !!sdw_get_device_id(slave, drv);
+ }
+ return ret;
}
int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size)
@@ -47,7 +55,7 @@ int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size)
slave->id.mfg_id, slave->id.part_id);
}
-static int sdw_uevent(struct device *dev, struct kobj_uevent_env *env)
+int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct sdw_slave *slave = dev_to_sdw_dev(dev);
char modalias[32];
@@ -63,7 +71,6 @@ static int sdw_uevent(struct device *dev, struct kobj_uevent_env *env)
struct bus_type sdw_bus_type = {
.name = "soundwire",
.match = sdw_bus_match,
- .uevent = sdw_uevent,
};
EXPORT_SYMBOL_GPL(sdw_bus_type);
@@ -98,6 +105,11 @@ static int sdw_drv_probe(struct device *dev)
if (slave->ops && slave->ops->read_prop)
slave->ops->read_prop(slave);
+ /* init the sysfs as we have properties now */
+ ret = sdw_slave_sysfs_init(slave);
+ if (ret < 0)
+ dev_warn(dev, "Slave sysfs init failed:%d\n", ret);
+
/*
* Check for valid clk_stop_timeout, use DisCo worst case value of
* 300ms
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index ecd357d1c63d..9ea87538b9ef 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -407,7 +407,9 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
if (nack) {
dev_err_ratelimited(cdns->dev, "Msg NACKed for Slave %d\n", msg->dev_num);
return SDW_CMD_FAIL;
- } else if (no_ack) {
+ }
+
+ if (no_ack) {
dev_dbg_ratelimited(cdns->dev, "Msg ignored for Slave %d\n", msg->dev_num);
return SDW_CMD_IGNORED;
}
@@ -520,7 +522,9 @@ cdns_program_scp_addr(struct sdw_cdns *cdns, struct sdw_msg *msg)
dev_err_ratelimited(cdns->dev,
"SCP_addrpage NACKed for Slave %d\n", msg->dev_num);
return SDW_CMD_FAIL;
- } else if (no_ack) {
+ }
+
+ if (no_ack) {
dev_dbg_ratelimited(cdns->dev,
"SCP_addrpage ignored for Slave %d\n", msg->dev_num);
return SDW_CMD_IGNORED;
diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c
index fb1140e82b86..b6cad0d59b7b 100644
--- a/drivers/soundwire/debugfs.c
+++ b/drivers/soundwire/debugfs.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2017-2019 Intel Corporation.
#include <linux/device.h>
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 3c83e76c6bf9..4cfdd074e310 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -669,11 +669,11 @@ static int sdw_stream_setup(struct snd_pcm_substream *substream,
/* Set stream pointer on all CODEC DAIs */
for (i = 0; i < rtd->num_codecs; i++) {
- ret = snd_soc_dai_set_sdw_stream(rtd->codec_dais[i], sdw_stream,
+ ret = snd_soc_dai_set_sdw_stream(asoc_rtd_to_codec(rtd, i), sdw_stream,
substream->stream);
if (ret < 0) {
dev_err(dai->dev, "failed to set stream pointer on codec dai %s",
- rtd->codec_dais[i]->name);
+ asoc_rtd_to_codec(rtd, i)->name);
goto release_stream;
}
}
@@ -1099,7 +1099,6 @@ static int intel_probe(struct platform_device *pdev)
sdw->cdns.registers = sdw->link_res->registers;
sdw->cdns.instance = sdw->instance;
sdw->cdns.msg_count = 0;
- sdw->cdns.bus.dev = &pdev->dev;
sdw->cdns.bus.link_id = pdev->id;
sdw_cdns_probe(&sdw->cdns);
@@ -1110,9 +1109,9 @@ static int intel_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sdw);
- ret = sdw_add_bus_master(&sdw->cdns.bus);
+ ret = sdw_bus_master_add(&sdw->cdns.bus, &pdev->dev, pdev->dev.fwnode);
if (ret) {
- dev_err(&pdev->dev, "sdw_add_bus_master fail: %d\n", ret);
+ dev_err(&pdev->dev, "sdw_bus_master_add fail: %d\n", ret);
return ret;
}
@@ -1173,7 +1172,7 @@ err_interrupt:
sdw_cdns_enable_interrupt(&sdw->cdns, false);
free_irq(sdw->link_res->irq, sdw);
err_init:
- sdw_delete_bus_master(&sdw->cdns.bus);
+ sdw_bus_master_delete(&sdw->cdns.bus);
return ret;
}
@@ -1189,7 +1188,7 @@ static int intel_remove(struct platform_device *pdev)
free_irq(sdw->link_res->irq, sdw);
snd_soc_unregister_component(sdw->cdns.dev);
}
- sdw_delete_bus_master(&sdw->cdns.bus);
+ sdw_bus_master_delete(&sdw->cdns.bus);
return 0;
}
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 4b769409f6f8..d5d42795a48f 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -86,7 +86,9 @@ static struct sdw_intel_ctx
dev_err(&adev->dev, "Link count %d exceeds max %d\n",
count, SDW_MAX_LINKS);
return NULL;
- } else if (!count) {
+ }
+
+ if (!count) {
dev_warn(&adev->dev, "No SoundWire links detected\n");
return NULL;
}
diff --git a/drivers/soundwire/master.c b/drivers/soundwire/master.c
new file mode 100644
index 000000000000..5f0b2189defe
--- /dev/null
+++ b/drivers/soundwire/master.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2019-2020 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/acpi.h>
+#include <linux/pm_runtime.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include "bus.h"
+
+/*
+ * The sysfs for properties reflects the MIPI description as given
+ * in the MIPI DisCo spec
+ *
+ * Base file is:
+ * sdw-master-N
+ * |---- revision
+ * |---- clk_stop_modes
+ * |---- max_clk_freq
+ * |---- clk_freq
+ * |---- clk_gears
+ * |---- default_row
+ * |---- default_col
+ * |---- dynamic_shape
+ * |---- err_threshold
+ */
+
+#define sdw_master_attr(field, format_string) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sdw_master_device *md = dev_to_sdw_master_device(dev); \
+ return sprintf(buf, format_string, md->bus->prop.field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+sdw_master_attr(revision, "0x%x\n");
+sdw_master_attr(clk_stop_modes, "0x%x\n");
+sdw_master_attr(max_clk_freq, "%d\n");
+sdw_master_attr(default_row, "%d\n");
+sdw_master_attr(default_col, "%d\n");
+sdw_master_attr(default_frame_rate, "%d\n");
+sdw_master_attr(dynamic_frame, "%d\n");
+sdw_master_attr(err_threshold, "%d\n");
+
+static ssize_t clock_frequencies_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdw_master_device *md = dev_to_sdw_master_device(dev);
+ ssize_t size = 0;
+ int i;
+
+ for (i = 0; i < md->bus->prop.num_clk_freq; i++)
+ size += sprintf(buf + size, "%8d ",
+ md->bus->prop.clk_freq[i]);
+ size += sprintf(buf + size, "\n");
+
+ return size;
+}
+static DEVICE_ATTR_RO(clock_frequencies);
+
+static ssize_t clock_gears_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdw_master_device *md = dev_to_sdw_master_device(dev);
+ ssize_t size = 0;
+ int i;
+
+ for (i = 0; i < md->bus->prop.num_clk_gears; i++)
+ size += sprintf(buf + size, "%8d ",
+ md->bus->prop.clk_gears[i]);
+ size += sprintf(buf + size, "\n");
+
+ return size;
+}
+static DEVICE_ATTR_RO(clock_gears);
+
+static struct attribute *master_node_attrs[] = {
+ &dev_attr_revision.attr,
+ &dev_attr_clk_stop_modes.attr,
+ &dev_attr_max_clk_freq.attr,
+ &dev_attr_default_row.attr,
+ &dev_attr_default_col.attr,
+ &dev_attr_default_frame_rate.attr,
+ &dev_attr_dynamic_frame.attr,
+ &dev_attr_err_threshold.attr,
+ &dev_attr_clock_frequencies.attr,
+ &dev_attr_clock_gears.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(master_node);
+
+static void sdw_master_device_release(struct device *dev)
+{
+ struct sdw_master_device *md = dev_to_sdw_master_device(dev);
+
+ kfree(md);
+}
+
+static const struct dev_pm_ops master_dev_pm = {
+ SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend,
+ pm_generic_runtime_resume, NULL)
+};
+
+struct device_type sdw_master_type = {
+ .name = "soundwire_master",
+ .release = sdw_master_device_release,
+ .pm = &master_dev_pm,
+};
+
+/**
+ * sdw_master_device_add() - create a Linux Master Device representation.
+ * @bus: SDW bus instance
+ * @parent: parent device
+ * @fwnode: firmware node handle
+ */
+int sdw_master_device_add(struct sdw_bus *bus, struct device *parent,
+ struct fwnode_handle *fwnode)
+{
+ struct sdw_master_device *md;
+ int ret;
+
+ if (!parent)
+ return -EINVAL;
+
+ md = kzalloc(sizeof(*md), GFP_KERNEL);
+ if (!md)
+ return -ENOMEM;
+
+ md->dev.bus = &sdw_bus_type;
+ md->dev.type = &sdw_master_type;
+ md->dev.parent = parent;
+ md->dev.groups = master_node_groups;
+ md->dev.of_node = parent->of_node;
+ md->dev.fwnode = fwnode;
+ md->dev.dma_mask = parent->dma_mask;
+
+ dev_set_name(&md->dev, "sdw-master-%d", bus->id);
+
+ ret = device_register(&md->dev);
+ if (ret) {
+ dev_err(parent, "Failed to add master: ret %d\n", ret);
+ /*
+ * On err, don't free but drop ref as this will be freed
+ * when release method is invoked.
+ */
+ put_device(&md->dev);
+ goto device_register_err;
+ }
+
+ /* add shortcuts to improve code readability/compactness */
+ md->bus = bus;
+ bus->dev = &md->dev;
+ bus->md = md;
+
+device_register_err:
+ return ret;
+}
+
+/**
+ * sdw_master_device_del() - delete a Linux Master Device representation.
+ * @bus: bus handle
+ *
+ * This function is the dual of sdw_master_device_add()
+ */
+int sdw_master_device_del(struct sdw_bus *bus)
+{
+ device_unregister(bus->dev);
+
+ return 0;
+}
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index 844e6b22974f..4ae62b452b8c 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -231,16 +231,17 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
nval = fwnode_property_count_u32(node, "mipi-sdw-channel-number-list");
if (nval > 0) {
- dpn[i].num_ch = nval;
- dpn[i].ch = devm_kcalloc(&slave->dev, dpn[i].num_ch,
- sizeof(*dpn[i].ch),
+ dpn[i].num_channels = nval;
+ dpn[i].channels = devm_kcalloc(&slave->dev,
+ dpn[i].num_channels,
+ sizeof(*dpn[i].channels),
GFP_KERNEL);
- if (!dpn[i].ch)
+ if (!dpn[i].channels)
return -ENOMEM;
fwnode_property_read_u32_array(node,
"mipi-sdw-channel-number-list",
- dpn[i].ch, dpn[i].num_ch);
+ dpn[i].channels, dpn[i].num_channels);
}
nval = fwnode_property_count_u32(node, "mipi-sdw-channel-combination-list");
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index d6c9ad231873..a1c2a44a3b4d 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -765,12 +765,16 @@ static int qcom_swrm_probe(struct platform_device *pdev)
}
ctrl->irq = of_irq_get(dev->of_node, 0);
- if (ctrl->irq < 0)
- return ctrl->irq;
+ if (ctrl->irq < 0) {
+ ret = ctrl->irq;
+ goto err_init;
+ }
ctrl->hclk = devm_clk_get(dev, "iface");
- if (IS_ERR(ctrl->hclk))
- return PTR_ERR(ctrl->hclk);
+ if (IS_ERR(ctrl->hclk)) {
+ ret = PTR_ERR(ctrl->hclk);
+ goto err_init;
+ }
clk_prepare_enable(ctrl->hclk);
@@ -780,14 +784,13 @@ static int qcom_swrm_probe(struct platform_device *pdev)
mutex_init(&ctrl->port_lock);
INIT_WORK(&ctrl->slave_work, qcom_swrm_slave_wq);
- ctrl->bus.dev = dev;
ctrl->bus.ops = &qcom_swrm_ops;
ctrl->bus.port_ops = &qcom_swrm_port_ops;
ctrl->bus.compute_params = &qcom_swrm_compute_params;
ret = qcom_swrm_get_port_config(ctrl);
if (ret)
- return ret;
+ goto err_clk;
params = &ctrl->bus.params;
params->max_dr_freq = DEFAULT_CLK_FREQ;
@@ -810,32 +813,37 @@ static int qcom_swrm_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(dev, ctrl->irq, NULL,
qcom_swrm_irq_handler,
- IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
"soundwire", ctrl);
if (ret) {
dev_err(dev, "Failed to request soundwire irq\n");
- goto err;
+ goto err_clk;
}
- ret = sdw_add_bus_master(&ctrl->bus);
+ ret = sdw_bus_master_add(&ctrl->bus, dev, dev->fwnode);
if (ret) {
dev_err(dev, "Failed to register Soundwire controller (%d)\n",
ret);
- goto err;
+ goto err_clk;
}
qcom_swrm_init(ctrl);
ret = qcom_swrm_register_dais(ctrl);
if (ret)
- goto err;
+ goto err_master_add;
dev_info(dev, "Qualcomm Soundwire controller v%x.%x.%x Registered\n",
(ctrl->version >> 24) & 0xff, (ctrl->version >> 16) & 0xff,
ctrl->version & 0xffff);
return 0;
-err:
+
+err_master_add:
+ sdw_bus_master_delete(&ctrl->bus);
+err_clk:
clk_disable_unprepare(ctrl->hclk);
+err_init:
return ret;
}
@@ -843,7 +851,7 @@ static int qcom_swrm_remove(struct platform_device *pdev)
{
struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(&pdev->dev);
- sdw_delete_bus_master(&ctrl->bus);
+ sdw_bus_master_delete(&ctrl->bus);
clk_disable_unprepare(ctrl->hclk);
return 0;
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index aace57fae7f8..0839445ee07b 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -14,6 +14,12 @@ static void sdw_slave_release(struct device *dev)
kfree(slave);
}
+struct device_type sdw_slave_type = {
+ .name = "sdw_slave",
+ .release = sdw_slave_release,
+ .uevent = sdw_slave_uevent,
+};
+
static int sdw_slave_add(struct sdw_bus *bus,
struct sdw_slave_id *id, struct fwnode_handle *fwnode)
{
@@ -41,9 +47,9 @@ static int sdw_slave_add(struct sdw_bus *bus,
id->class_id, id->unique_id);
}
- slave->dev.release = sdw_slave_release;
slave->dev.bus = &sdw_bus_type;
slave->dev.of_node = of_node_get(to_of_node(fwnode));
+ slave->dev.type = &sdw_slave_type;
slave->bus = bus;
slave->status = SDW_SLAVE_UNATTACHED;
init_completion(&slave->enumeration_complete);
@@ -68,6 +74,8 @@ static int sdw_slave_add(struct sdw_bus *bus,
list_del(&slave->node);
mutex_unlock(&bus->bus_lock);
put_device(&slave->dev);
+
+ return ret;
}
sdw_slave_debugfs_init(slave);
diff --git a/drivers/soundwire/sysfs_local.h b/drivers/soundwire/sysfs_local.h
new file mode 100644
index 000000000000..ff60adee3c41
--- /dev/null
+++ b/drivers/soundwire/sysfs_local.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2015-2020 Intel Corporation. */
+
+#ifndef __SDW_SYSFS_LOCAL_H
+#define __SDW_SYSFS_LOCAL_H
+
+/*
+ * SDW sysfs APIs -
+ */
+
+int sdw_slave_sysfs_init(struct sdw_slave *slave);
+int sdw_slave_sysfs_dpn_init(struct sdw_slave *slave);
+
+#endif /* __SDW_SYSFS_LOCAL_H */
diff --git a/drivers/soundwire/sysfs_slave.c b/drivers/soundwire/sysfs_slave.c
new file mode 100644
index 000000000000..f510071b0add
--- /dev/null
+++ b/drivers/soundwire/sysfs_slave.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2015-2020 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include "bus.h"
+#include "sysfs_local.h"
+
+/*
+ * Slave sysfs
+ */
+
+/*
+ * The sysfs for Slave reflects the MIPI description as given
+ * in the MIPI DisCo spec
+ *
+ * Base file is device
+ * |---- modalias
+ * |---- dev-properties
+ * |---- mipi_revision
+ * |---- wake_capable
+ * |---- test_mode_capable
+ * |---- clk_stop_mode1
+ * |---- simple_clk_stop_capable
+ * |---- clk_stop_timeout
+ * |---- ch_prep_timeout
+ * |---- reset_behave
+ * |---- high_PHY_capable
+ * |---- paging_support
+ * |---- bank_delay_support
+ * |---- p15_behave
+ * |---- master_count
+ * |---- source_ports
+ * |---- sink_ports
+ * |---- dp0
+ * |---- max_word
+ * |---- min_word
+ * |---- words
+ * |---- BRA_flow_controlled
+ * |---- simple_ch_prep_sm
+ * |---- imp_def_interrupts
+ * |---- dpN_<sink/src>
+ * |---- max_word
+ * |---- min_word
+ * |---- words
+ * |---- type
+ * |---- max_grouping
+ * |---- simple_ch_prep_sm
+ * |---- ch_prep_timeout
+ * |---- imp_def_interrupts
+ * |---- min_ch
+ * |---- max_ch
+ * |---- channels
+ * |---- ch_combinations
+ * |---- max_async_buffer
+ * |---- block_pack_mode
+ * |---- port_encoding
+ *
+ */
+
+#define sdw_slave_attr(field, format_string) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sdw_slave *slave = dev_to_sdw_dev(dev); \
+ return sprintf(buf, format_string, slave->prop.field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+sdw_slave_attr(mipi_revision, "0x%x\n");
+sdw_slave_attr(wake_capable, "%d\n");
+sdw_slave_attr(test_mode_capable, "%d\n");
+sdw_slave_attr(clk_stop_mode1, "%d\n");
+sdw_slave_attr(simple_clk_stop_capable, "%d\n");
+sdw_slave_attr(clk_stop_timeout, "%d\n");
+sdw_slave_attr(ch_prep_timeout, "%d\n");
+sdw_slave_attr(reset_behave, "%d\n");
+sdw_slave_attr(high_PHY_capable, "%d\n");
+sdw_slave_attr(paging_support, "%d\n");
+sdw_slave_attr(bank_delay_support, "%d\n");
+sdw_slave_attr(p15_behave, "%d\n");
+sdw_slave_attr(master_count, "%d\n");
+sdw_slave_attr(source_ports, "0x%x\n");
+sdw_slave_attr(sink_ports, "0x%x\n");
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+ return sdw_slave_modalias(slave, buf, 256);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *slave_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(slave);
+
+static struct attribute *slave_dev_attrs[] = {
+ &dev_attr_mipi_revision.attr,
+ &dev_attr_wake_capable.attr,
+ &dev_attr_test_mode_capable.attr,
+ &dev_attr_clk_stop_mode1.attr,
+ &dev_attr_simple_clk_stop_capable.attr,
+ &dev_attr_clk_stop_timeout.attr,
+ &dev_attr_ch_prep_timeout.attr,
+ &dev_attr_reset_behave.attr,
+ &dev_attr_high_PHY_capable.attr,
+ &dev_attr_paging_support.attr,
+ &dev_attr_bank_delay_support.attr,
+ &dev_attr_p15_behave.attr,
+ &dev_attr_master_count.attr,
+ &dev_attr_source_ports.attr,
+ &dev_attr_sink_ports.attr,
+ NULL,
+};
+
+/*
+ * we don't use ATTRIBUTES_GROUP here since we want to add a subdirectory
+ * for device-level properties
+ */
+static struct attribute_group sdw_slave_dev_attr_group = {
+ .attrs = slave_dev_attrs,
+ .name = "dev-properties",
+};
+
+/*
+ * DP0 sysfs
+ */
+
+#define sdw_dp0_attr(field, format_string) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sdw_slave *slave = dev_to_sdw_dev(dev); \
+ return sprintf(buf, format_string, slave->prop.dp0_prop->field);\
+} \
+static DEVICE_ATTR_RO(field)
+
+sdw_dp0_attr(max_word, "%d\n");
+sdw_dp0_attr(min_word, "%d\n");
+sdw_dp0_attr(BRA_flow_controlled, "%d\n");
+sdw_dp0_attr(simple_ch_prep_sm, "%d\n");
+sdw_dp0_attr(imp_def_interrupts, "0x%x\n");
+
+static ssize_t words_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ ssize_t size = 0;
+ int i;
+
+ for (i = 0; i < slave->prop.dp0_prop->num_words; i++)
+ size += sprintf(buf + size, "%d ",
+ slave->prop.dp0_prop->words[i]);
+ size += sprintf(buf + size, "\n");
+
+ return size;
+}
+static DEVICE_ATTR_RO(words);
+
+static struct attribute *dp0_attrs[] = {
+ &dev_attr_max_word.attr,
+ &dev_attr_min_word.attr,
+ &dev_attr_words.attr,
+ &dev_attr_BRA_flow_controlled.attr,
+ &dev_attr_simple_ch_prep_sm.attr,
+ &dev_attr_imp_def_interrupts.attr,
+ NULL,
+};
+
+/*
+ * we don't use ATTRIBUTES_GROUP here since we want to add a subdirectory
+ * for dp0-level properties
+ */
+static const struct attribute_group dp0_group = {
+ .attrs = dp0_attrs,
+ .name = "dp0",
+};
+
+int sdw_slave_sysfs_init(struct sdw_slave *slave)
+{
+ int ret;
+
+ ret = devm_device_add_groups(&slave->dev, slave_groups);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_device_add_group(&slave->dev, &sdw_slave_dev_attr_group);
+ if (ret < 0)
+ return ret;
+
+ if (slave->prop.dp0_prop) {
+ ret = devm_device_add_group(&slave->dev, &dp0_group);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (slave->prop.source_ports || slave->prop.sink_ports) {
+ ret = sdw_slave_sysfs_dpn_init(slave);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/soundwire/sysfs_slave_dpn.c b/drivers/soundwire/sysfs_slave_dpn.c
new file mode 100644
index 000000000000..05a721ea9830
--- /dev/null
+++ b/drivers/soundwire/sysfs_slave_dpn.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright(c) 2015-2020 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_type.h>
+#include "bus.h"
+#include "sysfs_local.h"
+
+struct dpn_attribute {
+ struct device_attribute dev_attr;
+ int N;
+ int dir;
+ const char *format_string;
+};
+
+/*
+ * Since we can't use ARRAY_SIZE, hard-code number of dpN attributes.
+ * This needs to be updated when adding new attributes - an error will be
+ * flagged on a mismatch.
+ */
+#define SDW_DPN_ATTRIBUTES 15
+
+#define sdw_dpn_attribute_alloc(field) \
+static int field##_attribute_alloc(struct device *dev, \
+ struct attribute **res, \
+ int N, int dir, \
+ const char *format_string) \
+{ \
+ struct dpn_attribute *dpn_attr; \
+ \
+ dpn_attr = devm_kzalloc(dev, sizeof(*dpn_attr), GFP_KERNEL); \
+ if (!dpn_attr) \
+ return -ENOMEM; \
+ dpn_attr->N = N; \
+ dpn_attr->dir = dir; \
+ dpn_attr->format_string = format_string; \
+ dpn_attr->dev_attr.attr.name = __stringify(field); \
+ dpn_attr->dev_attr.attr.mode = 0444; \
+ dpn_attr->dev_attr.show = field##_show; \
+ \
+ *res = &dpn_attr->dev_attr.attr; \
+ \
+ return 0; \
+}
+
+#define sdw_dpn_attr(field) \
+ \
+static ssize_t field##_dpn_show(struct sdw_slave *slave, \
+ int N, \
+ int dir, \
+ const char *format_string, \
+ char *buf) \
+{ \
+ struct sdw_dpn_prop *dpn; \
+ unsigned long mask; \
+ int bit; \
+ int i; \
+ \
+ if (dir) { \
+ dpn = slave->prop.src_dpn_prop; \
+ mask = slave->prop.source_ports; \
+ } else { \
+ dpn = slave->prop.sink_dpn_prop; \
+ mask = slave->prop.sink_ports; \
+ } \
+ \
+ i = 0; \
+ for_each_set_bit(bit, &mask, 32) { \
+ if (bit == N) { \
+ return sprintf(buf, format_string, \
+ dpn[i].field); \
+ } \
+ i++; \
+ } \
+ return -EINVAL; \
+} \
+ \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sdw_slave *slave = dev_to_sdw_dev(dev); \
+ struct dpn_attribute *dpn_attr = \
+ container_of(attr, struct dpn_attribute, dev_attr); \
+ \
+ return field##_dpn_show(slave, \
+ dpn_attr->N, dpn_attr->dir, \
+ dpn_attr->format_string, \
+ buf); \
+} \
+sdw_dpn_attribute_alloc(field)
+
+sdw_dpn_attr(imp_def_interrupts);
+sdw_dpn_attr(max_word);
+sdw_dpn_attr(min_word);
+sdw_dpn_attr(type);
+sdw_dpn_attr(max_grouping);
+sdw_dpn_attr(simple_ch_prep_sm);
+sdw_dpn_attr(ch_prep_timeout);
+sdw_dpn_attr(max_ch);
+sdw_dpn_attr(min_ch);
+sdw_dpn_attr(max_async_buffer);
+sdw_dpn_attr(block_pack_mode);
+sdw_dpn_attr(port_encoding);
+
+#define sdw_dpn_array_attr(field) \
+ \
+static ssize_t field##_dpn_show(struct sdw_slave *slave, \
+ int N, \
+ int dir, \
+ const char *format_string, \
+ char *buf) \
+{ \
+ struct sdw_dpn_prop *dpn; \
+ unsigned long mask; \
+ ssize_t size = 0; \
+ int bit; \
+ int i; \
+ int j; \
+ \
+ if (dir) { \
+ dpn = slave->prop.src_dpn_prop; \
+ mask = slave->prop.source_ports; \
+ } else { \
+ dpn = slave->prop.sink_dpn_prop; \
+ mask = slave->prop.sink_ports; \
+ } \
+ \
+ i = 0; \
+ for_each_set_bit(bit, &mask, 32) { \
+ if (bit == N) { \
+ for (j = 0; j < dpn[i].num_##field; j++) \
+ size += sprintf(buf + size, \
+ format_string, \
+ dpn[i].field[j]); \
+ size += sprintf(buf + size, "\n"); \
+ return size; \
+ } \
+ i++; \
+ } \
+ return -EINVAL; \
+} \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sdw_slave *slave = dev_to_sdw_dev(dev); \
+ struct dpn_attribute *dpn_attr = \
+ container_of(attr, struct dpn_attribute, dev_attr); \
+ \
+ return field##_dpn_show(slave, \
+ dpn_attr->N, dpn_attr->dir, \
+ dpn_attr->format_string, \
+ buf); \
+} \
+sdw_dpn_attribute_alloc(field)
+
+sdw_dpn_array_attr(words);
+sdw_dpn_array_attr(ch_combinations);
+sdw_dpn_array_attr(channels);
+
+static int add_all_attributes(struct device *dev, int N, int dir)
+{
+ struct attribute **dpn_attrs;
+ struct attribute_group *dpn_group;
+ int i = 0;
+ int ret;
+
+ /* allocate attributes, last one is NULL */
+ dpn_attrs = devm_kcalloc(dev, SDW_DPN_ATTRIBUTES + 1,
+ sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!dpn_attrs)
+ return -ENOMEM;
+
+ ret = max_word_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = min_word_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = words_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = type_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = max_grouping_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = simple_ch_prep_sm_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = ch_prep_timeout_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = imp_def_interrupts_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "0x%x\n");
+ if (ret < 0)
+ return ret;
+
+ ret = min_ch_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = max_ch_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = channels_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = ch_combinations_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = max_async_buffer_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = block_pack_mode_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ ret = port_encoding_attribute_alloc(dev, &dpn_attrs[i++],
+ N, dir, "%d\n");
+ if (ret < 0)
+ return ret;
+
+ /* paranoia check for editing mistakes */
+ if (i != SDW_DPN_ATTRIBUTES) {
+ dev_err(dev, "mismatch in attributes, allocated %d got %d\n",
+ SDW_DPN_ATTRIBUTES, i);
+ return -EINVAL;
+ }
+
+ dpn_group = devm_kzalloc(dev, sizeof(*dpn_group), GFP_KERNEL);
+ if (!dpn_group)
+ return -ENOMEM;
+
+ dpn_group->attrs = dpn_attrs;
+ dpn_group->name = devm_kasprintf(dev, GFP_KERNEL, "dp%d_%s",
+ N, dir ? "src" : "sink");
+ if (!dpn_group->name)
+ return -ENOMEM;
+
+ ret = devm_device_add_group(dev, dpn_group);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int sdw_slave_sysfs_dpn_init(struct sdw_slave *slave)
+{
+ unsigned long mask;
+ int ret;
+ int i;
+
+ mask = slave->prop.source_ports;
+ for_each_set_bit(i, &mask, 32) {
+ ret = add_all_attributes(&slave->dev, i, 1);
+ if (ret < 0)
+ return ret;
+ }
+
+ mask = slave->prop.sink_ports;
+ for_each_set_bit(i, &mask, 32) {
+ ret = add_all_attributes(&slave->dev, i, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 7412a3042a8d..811c97a7c858 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -135,7 +135,6 @@
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
-static const struct zynqmp_eemi_ops *eemi_ops;
/**
* struct zynqmp_qspi - Defines qspi driver instance
@@ -1015,10 +1014,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
struct zynqmp_qspi *xqspi;
struct device *dev = &pdev->dev;
- eemi_ops = zynqmp_pm_get_eemi_ops();
- if (IS_ERR(eemi_ops))
- return PTR_ERR(eemi_ops);
-
master = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
if (!master)
return -ENOMEM;
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 8044510d8ec6..c05a214191da 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -555,7 +555,7 @@ static int set_name(struct ashmem_area *asma, void __user *name)
/*
* Holding the ashmem_mutex while doing a copy_from_user might cause
- * an data abort which would try to access mmap_sem. If another
+ * an data abort which would try to access mmap_lock. If another
* thread has invoked ashmem_mmap then it will be holding the
* semaphore and will be waiting for ashmem_mutex, there by leading to
* deadlock. We'll release the mutex and take the name to a local
@@ -586,7 +586,7 @@ static int get_name(struct ashmem_area *asma, void __user *name)
* Have a local variable to which we'll copy the content
* from asma with the lock held. Later we can copy this to the user
* space safely without holding any locks. So even if we proceed to
- * wait for mmap_sem, it won't lead to deadlock.
+ * wait for mmap_lock, it won't lead to deadlock.
*/
char local_name[ASHMEM_NAME_LEN];
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index f85ec5b16b65..0198b886d906 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -37,7 +37,7 @@ static void ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
}
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
- 1 << pool->order);
+ 1 << pool->order);
mutex_unlock(&pool->mutex);
}
@@ -57,7 +57,7 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
list_del(&page->lru);
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
- -(1 << pool->order));
+ -(1 << pool->order));
return page;
}
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 5801067e7c1b..2bb1c2e9cb57 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -383,8 +383,9 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
mutex_lock(&fifo->read_lock);
ret = wait_event_interruptible_timeout(fifo->read_queue,
ioread32(fifo->base_addr + XLLF_RDFO_OFFSET),
- (read_timeout >= 0) ? msecs_to_jiffies(read_timeout) :
- MAX_SCHEDULE_TIMEOUT);
+ (read_timeout >= 0) ?
+ msecs_to_jiffies(read_timeout) :
+ MAX_SCHEDULE_TIMEOUT);
if (ret <= 0) {
if (ret == 0) {
@@ -525,9 +526,10 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
mutex_lock(&fifo->write_lock);
ret = wait_event_interruptible_timeout(fifo->write_queue,
ioread32(fifo->base_addr + XLLF_TDFV_OFFSET)
- >= words_to_write,
- (write_timeout >= 0) ? msecs_to_jiffies(write_timeout) :
- MAX_SCHEDULE_TIMEOUT);
+ >= words_to_write,
+ (write_timeout >= 0) ?
+ msecs_to_jiffies(write_timeout) :
+ MAX_SCHEDULE_TIMEOUT);
if (ret <= 0) {
if (ret == 0) {
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index a56c8f74a27b..e85a99b68f31 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2325,7 +2325,7 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
int retval = 0;
/*
- * 'trylock' avoids circular dependency with current->mm->mmap_sem
+ * 'trylock' avoids circular dependency with current->mm->mmap_lock
* and down-reading &dev->attach_lock should normally succeed without
* contention unless the device is in the process of being attached
* or detached.
diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
index 7c8f18f55122..9b3631a654c8 100644
--- a/drivers/staging/comedi/comedi_internal.h
+++ b/drivers/staging/comedi/comedi_internal.h
@@ -32,8 +32,8 @@ void comedi_buf_map_get(struct comedi_buf_map *bm);
int comedi_buf_map_put(struct comedi_buf_map *bm);
int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
void *buf, int len, int write);
-struct comedi_buf_map *comedi_buf_map_from_subdev_get(
- struct comedi_subdevice *s);
+struct comedi_buf_map *
+comedi_buf_map_from_subdev_get(struct comedi_subdevice *s);
unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s);
unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s);
void comedi_device_cancel_all(struct comedi_device *dev);
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 7c82d5f9778f..c1d70eec24ab 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1214,7 +1214,7 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
struct ni_private *devpriv = dev->private;
struct mite *mite = devpriv->mite;
resource_size_t daq_phys_addr;
- static const int Start_Cal_EEPROM = 0x400;
+ static const int start_cal_eeprom = 0x400;
static const unsigned int window_size = 10;
unsigned int old_iodwbsr_bits;
unsigned int old_iodwbsr1_bits;
@@ -1234,7 +1234,7 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
writel(0xf, mite->mmio + 0x30);
for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
- devpriv->eeprom_buffer[i] = ni_readb(dev, Start_Cal_EEPROM + i);
+ devpriv->eeprom_buffer[i] = ni_readb(dev, start_cal_eeprom + i);
writel(old_iodwbsr1_bits, mite->mmio + MITE_IODWBSR_1);
writel(old_iodwbsr_bits, mite->mmio + MITE_IODWBSR);
diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c
index 3c3f387936e8..3a280cc1892c 100644
--- a/drivers/staging/fbtft/fb_st7789v.c
+++ b/drivers/staging/fbtft/fb_st7789v.c
@@ -20,6 +20,12 @@
"70 2C 2E 15 10 09 48 33 53 0B 19 18 20 25\n" \
"70 2C 2E 15 10 09 48 33 53 0B 19 18 20 25"
+#define HSD20_IPS_GAMMA \
+ "D0 05 0A 09 08 05 2E 44 45 0F 17 16 2B 33\n" \
+ "D0 05 0A 09 08 05 2E 43 45 0F 16 16 2B 33"
+
+#define HSD20_IPS 1
+
/**
* enum st7789v_command - ST7789V display controller commands
*
@@ -82,14 +88,20 @@ static int init_display(struct fbtft_par *par)
/* set pixel format to RGB-565 */
write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+ if (HSD20_IPS)
+ write_reg(par, PORCTRL, 0x05, 0x05, 0x00, 0x33, 0x33);
- write_reg(par, PORCTRL, 0x08, 0x08, 0x00, 0x22, 0x22);
+ else
+ write_reg(par, PORCTRL, 0x08, 0x08, 0x00, 0x22, 0x22);
/*
* VGH = 13.26V
* VGL = -10.43V
*/
- write_reg(par, GCTRL, 0x35);
+ if (HSD20_IPS)
+ write_reg(par, GCTRL, 0x75);
+ else
+ write_reg(par, GCTRL, 0x35);
/*
* VDV and VRH register values come from command write
@@ -101,13 +113,19 @@ static int init_display(struct fbtft_par *par)
* VAP = 4.1V + (VCOM + VCOM offset + 0.5 * VDV)
* VAN = -4.1V + (VCOM + VCOM offset + 0.5 * VDV)
*/
- write_reg(par, VRHS, 0x0B);
+ if (HSD20_IPS)
+ write_reg(par, VRHS, 0x13);
+ else
+ write_reg(par, VRHS, 0x0B);
/* VDV = 0V */
write_reg(par, VDVS, 0x20);
/* VCOM = 0.9V */
- write_reg(par, VCOMS, 0x20);
+ if (HSD20_IPS)
+ write_reg(par, VCOMS, 0x22);
+ else
+ write_reg(par, VCOMS, 0x20);
/* VCOM offset = 0V */
write_reg(par, VCMOFSET, 0x20);
@@ -120,6 +138,10 @@ static int init_display(struct fbtft_par *par)
write_reg(par, PWCTRL1, 0xA4, 0xA1);
write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
+
+ if (HSD20_IPS)
+ write_reg(par, MIPI_DCS_ENTER_INVERT_MODE);
+
return 0;
}
@@ -234,7 +256,7 @@ static struct fbtft_display display = {
.height = 320,
.gamma_num = 2,
.gamma_len = 14,
- .gamma = DEFAULT_GAMMA,
+ .gamma = HSD20_IPS_GAMMA,
.fbtftops = {
.init_display = init_display,
.set_var = set_var,
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index 676d1ad1b50d..546ad376df99 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -1094,7 +1094,8 @@ static int swdev_port_obj_del(struct net_device *netdev,
static int
ethsw_switchdev_port_attr_set_event(struct net_device *netdev,
- struct switchdev_notifier_port_attr_info *port_attr_info)
+ struct switchdev_notifier_port_attr_info
+ *port_attr_info)
{
int err;
@@ -1277,7 +1278,8 @@ err_addr_alloc:
static int
ethsw_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
- struct switchdev_notifier_port_obj_info *port_obj_info)
+ struct switchdev_notifier_port_obj_info
+ *port_obj_info)
{
int err = -EOPNOTSUPP;
diff --git a/drivers/staging/gasket/gasket_page_table.c b/drivers/staging/gasket/gasket_page_table.c
index f6d715787da8..f3dbe0fe2a67 100644
--- a/drivers/staging/gasket/gasket_page_table.c
+++ b/drivers/staging/gasket/gasket_page_table.c
@@ -898,7 +898,7 @@ static int gasket_alloc_extended_subtable(struct gasket_page_table *pg_tbl,
*
* Note that memory for second level page tables is allocated as needed, but
* that memory is only freed on the final close of the device file, when the
- * page tables are repartitioned, or the the device is removed. If there is an
+ * page tables are repartitioned, or the device is removed. If there is an
* error or if the full range of slots is not available, any memory
* allocated for second level page tables remains allocated until final close,
* repartition, or device removal.
diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c
index 5f0e089573a2..af26bc9f184a 100644
--- a/drivers/staging/gasket/gasket_sysfs.c
+++ b/drivers/staging/gasket/gasket_sysfs.c
@@ -339,6 +339,7 @@ void gasket_sysfs_put_attr(struct device *device,
dev_err(device, "Unable to put unknown attribute: %s\n",
attr->attr.attr.name);
+ put_mapping(mapping);
}
EXPORT_SYMBOL(gasket_sysfs_put_attr);
@@ -372,6 +373,7 @@ ssize_t gasket_sysfs_register_store(struct device *device,
gasket_dev = mapping->gasket_dev;
if (!gasket_dev) {
dev_err(device, "Device driver may have been removed\n");
+ put_mapping(mapping);
return 0;
}
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 354727f0a1fc..eb309190f5be 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -172,7 +172,7 @@ static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
static __sum16 icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
{
- unsigned short *w = ptr;
+ unsigned short *w;
__wsum sum = 0;
int i;
u16 pa;
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
index 04bfd9110502..ed706f39e87a 100644
--- a/drivers/staging/greybus/hid.c
+++ b/drivers/staging/greybus/hid.c
@@ -290,9 +290,8 @@ static int gb_hid_parse(struct hid_device *hid)
}
rdesc = kzalloc(rsize, GFP_KERNEL);
- if (!rdesc) {
+ if (!rdesc)
return -ENOMEM;
- }
ret = gb_hid_get_report_desc(ghid, rdesc);
if (ret) {
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index d6ba25f21d80..d2672b65c3f4 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -1026,7 +1026,8 @@ static int gb_lights_light_config(struct gb_lights *glights, u8 id)
light->channels_count = conf.channel_count;
light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL);
-
+ if (!light->name)
+ return -ENOMEM;
light->channels = kcalloc(light->channels_count,
sizeof(struct gb_channel), GFP_KERNEL);
if (!light->channels)
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index 583d9708a191..2471448ba42a 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -135,7 +135,7 @@ static ssize_t name##_##field##_show(struct device *dev, \
char *buf) \
{ \
struct gb_loopback *gb = dev_get_drvdata(dev); \
- /* Report 0 for min and max if no transfer successed */ \
+ /* Report 0 for min and max if no transfer succeeded */ \
if (!gb->requests_completed) \
return sprintf(buf, "0\n"); \
return sprintf(buf, "%" #type "\n", gb->name.field); \
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 4ffb334cd5cd..607378bfebb7 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -40,14 +40,6 @@
#define GB_UART_FIRMWARE_CREDITS 4096
#define GB_UART_CREDIT_WAIT_TIMEOUT_MSEC 10000
-struct gb_tty_line_coding {
- __le32 rate;
- __u8 format;
- __u8 parity;
- __u8 data_bits;
- __u8 flow_control;
-};
-
struct gb_tty {
struct gbphy_device *gbphy_dev;
struct tty_port port;
@@ -66,7 +58,7 @@ struct gb_tty {
struct mutex mutex;
u8 ctrlin; /* input control lines */
u8 ctrlout; /* output control lines */
- struct gb_tty_line_coding line_coding;
+ struct gb_uart_set_line_coding_request line_coding;
struct work_struct tx_work;
struct kfifo write_fifo;
bool close_pending;
@@ -288,12 +280,9 @@ static void gb_uart_tx_write_work(struct work_struct *work)
static int send_line_coding(struct gb_tty *tty)
{
- struct gb_uart_set_line_coding_request request;
-
- memcpy(&request, &tty->line_coding,
- sizeof(tty->line_coding));
return gb_operation_sync(tty->connection, GB_UART_TYPE_SET_LINE_CODING,
- &request, sizeof(request), NULL, 0);
+ &tty->line_coding, sizeof(tty->line_coding),
+ NULL, 0);
}
static int send_control(struct gb_tty *gb_tty, u8 control)
@@ -493,9 +482,9 @@ static int gb_tty_break_ctl(struct tty_struct *tty, int state)
static void gb_tty_set_termios(struct tty_struct *tty,
struct ktermios *termios_old)
{
+ struct gb_uart_set_line_coding_request newline;
struct gb_tty *gb_tty = tty->driver_data;
struct ktermios *termios = &tty->termios;
- struct gb_tty_line_coding newline;
u8 newctrl = gb_tty->ctrlout;
newline.rate = cpu_to_le32(tty_get_baud_rate(tty));
diff --git a/drivers/staging/iio/Documentation/overview.txt b/drivers/staging/iio/Documentation/overview.txt
index 43f92b06bc3e..ebdc64f451d7 100644
--- a/drivers/staging/iio/Documentation/overview.txt
+++ b/drivers/staging/iio/Documentation/overview.txt
@@ -34,7 +34,7 @@ turned on or off (if possible) via sysfs interfaces.
fifo / ring buffers on the sensor chip. These greatly reduce the load
on the host CPU by buffering relatively large numbers of data samples
based on an internal sampling clock. Examples include VTI SCA3000
-series and Analog Device ADXL345 accelerometers. Each buffer supports
+series and Analog Devices ADXL345 accelerometers. Each buffer supports
polling to establish when data is available.
* Trigger and software buffer support. In many data analysis
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index af0bcf95ee8a..c468355b0848 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -602,11 +602,12 @@ static const struct iio_buffer_setup_ops ad5933_ring_setup_ops = {
.postdisable = ad5933_ring_postdisable,
};
-static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+static int ad5933_register_ring_funcs_and_init(struct device *dev,
+ struct iio_dev *indio_dev)
{
struct iio_buffer *buffer;
- buffer = iio_kfifo_allocate();
+ buffer = devm_iio_kfifo_allocate(dev);
if (!buffer)
return -ENOMEM;
@@ -676,6 +677,20 @@ static void ad5933_work(struct work_struct *work)
}
}
+static void ad5933_reg_disable(void *data)
+{
+ struct ad5933_state *st = data;
+
+ regulator_disable(st->reg);
+}
+
+static void ad5933_clk_disable(void *data)
+{
+ struct ad5933_state *st = data;
+
+ clk_disable_unprepare(st->mclk);
+}
+
static int ad5933_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -703,23 +718,32 @@ static int ad5933_probe(struct i2c_client *client,
dev_err(&client->dev, "Failed to enable specified VDD supply\n");
return ret;
}
- ret = regulator_get_voltage(st->reg);
+ ret = devm_add_action_or_reset(&client->dev, ad5933_reg_disable, st);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(st->reg);
if (ret < 0)
- goto error_disable_reg;
+ return ret;
st->vref_mv = ret / 1000;
st->mclk = devm_clk_get(&client->dev, "mclk");
- if (IS_ERR(st->mclk) && PTR_ERR(st->mclk) != -ENOENT) {
- ret = PTR_ERR(st->mclk);
- goto error_disable_reg;
- }
+ if (IS_ERR(st->mclk) && PTR_ERR(st->mclk) != -ENOENT)
+ return PTR_ERR(st->mclk);
if (!IS_ERR(st->mclk)) {
ret = clk_prepare_enable(st->mclk);
if (ret < 0)
- goto error_disable_reg;
+ return ret;
+
+ ret = devm_add_action_or_reset(&client->dev,
+ ad5933_clk_disable,
+ st);
+ if (ret)
+ return ret;
+
ext_clk_hz = clk_get_rate(st->mclk);
}
@@ -742,41 +766,15 @@ static int ad5933_probe(struct i2c_client *client,
indio_dev->channels = ad5933_channels;
indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
- ret = ad5933_register_ring_funcs_and_init(indio_dev);
+ ret = ad5933_register_ring_funcs_and_init(&client->dev, indio_dev);
if (ret)
- goto error_disable_mclk;
+ return ret;
ret = ad5933_setup(st);
if (ret)
- goto error_unreg_ring;
-
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_unreg_ring;
-
- return 0;
-
-error_unreg_ring:
- iio_kfifo_free(indio_dev->buffer);
-error_disable_mclk:
- clk_disable_unprepare(st->mclk);
-error_disable_reg:
- regulator_disable(st->reg);
-
- return ret;
-}
-
-static int ad5933_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct ad5933_state *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- iio_kfifo_free(indio_dev->buffer);
- regulator_disable(st->reg);
- clk_disable_unprepare(st->mclk);
+ return ret;
- return 0;
+ return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id ad5933_id[] = {
@@ -801,7 +799,6 @@ static struct i2c_driver ad5933_driver = {
.of_match_table = ad5933_of_match,
},
.probe = ad5933_probe,
- .remove = ad5933_remove,
.id_table = ad5933_id,
};
module_i2c_driver(ad5933_driver);
diff --git a/drivers/staging/kpc2000/kpc_dma/fileops.c b/drivers/staging/kpc2000/kpc_dma/fileops.c
index 7caabdd77bbf..89753463e926 100644
--- a/drivers/staging/kpc2000/kpc_dma/fileops.c
+++ b/drivers/staging/kpc2000/kpc_dma/fileops.c
@@ -75,9 +75,9 @@ static int kpc_dma_transfer(struct dev_private_data *priv,
}
// Lock the user buffer pages in memory, and hold on to the page pointers (for the sglist)
- down_read(&current->mm->mmap_sem); /* get memory map semaphore */
+ mmap_read_lock(current->mm); /* get memory map semaphore */
rv = get_user_pages(iov_base, acd->page_count, FOLL_TOUCH | FOLL_WRITE | FOLL_GET, acd->user_pages, NULL);
- up_read(&current->mm->mmap_sem); /* release the semaphore */
+ mmap_read_unlock(current->mm); /* release the semaphore */
if (rv != acd->page_count) {
dev_err(&priv->ldev->pldev->dev, "Couldn't get_user_pages (%ld)\n", rv);
goto err_get_user_pages;
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 053f485eb994..4bb1eca6f597 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -38,6 +38,8 @@ source "drivers/staging/media/sunxi/Kconfig"
source "drivers/staging/media/tegra-vde/Kconfig"
+source "drivers/staging/media/tegra-video/Kconfig"
+
source "drivers/staging/media/ipu3/Kconfig"
source "drivers/staging/media/soc_camera/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index e01f13a1b4a2..71a47b61836d 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_ROCKCHIP_VDEC) += rkvdec/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
+obj-$(CONFIG_VIDEO_TEGRA) += tegra-video/
obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
obj-$(CONFIG_VIDEO_HANTRO) += hantro/
obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
index 492b76c29490..c9e7df0ea5a6 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
@@ -982,9 +982,9 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
}
mutex_unlock(&bo->mutex);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)userptr);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (!vma) {
dev_err(atomisp_dev, "find_vma failed\n");
kfree(bo->page_obj);
diff --git a/drivers/staging/media/tegra-video/Kconfig b/drivers/staging/media/tegra-video/Kconfig
new file mode 100644
index 000000000000..f6c61ec74386
--- /dev/null
+++ b/drivers/staging/media/tegra-video/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_TEGRA
+ tristate "NVIDIA Tegra VI driver"
+ depends on TEGRA_HOST1X
+ depends on VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Choose this option if you have an NVIDIA Tegra SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tegra-video.
diff --git a/drivers/staging/media/tegra-video/Makefile b/drivers/staging/media/tegra-video/Makefile
new file mode 100644
index 000000000000..dfa2ef8f99ef
--- /dev/null
+++ b/drivers/staging/media/tegra-video/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+tegra-video-objs := \
+ video.o \
+ vi.o \
+ csi.o
+
+tegra-video-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
+obj-$(CONFIG_VIDEO_TEGRA) += tegra-video.o
diff --git a/drivers/staging/media/tegra-video/TODO b/drivers/staging/media/tegra-video/TODO
new file mode 100644
index 000000000000..6ceb7549c218
--- /dev/null
+++ b/drivers/staging/media/tegra-video/TODO
@@ -0,0 +1,11 @@
+TODO list
+* Currently driver supports Tegra build-in TPG only with direct media links
+ from CSI to VI. Add kernel config CONFIG_VIDEO_TEGRA_TPG and update the
+ driver to do TPG Vs Sensor media links based on CONFIG_VIDEO_TEGRA_TPG.
+* Add real camera sensor capture support.
+* Add Tegra CSI MIPI pads calibration.
+* Add MIPI clock Settle time computation based on the data rate.
+* Add support for Ganged mode.
+* Add RAW10 packed video format support to Tegra210 video formats.
+* Add support for suspend and resume.
+* Make sure v4l2-compliance tests pass with all of the above implementations.
diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
new file mode 100644
index 000000000000..40ea195d141d
--- /dev/null
+++ b/drivers/staging/media/tegra-video/csi.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/device.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "csi.h"
+#include "video.h"
+
+static inline struct tegra_csi *
+host1x_client_to_csi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_csi, client);
+}
+
+static inline struct tegra_csi_channel *to_csi_chan(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct tegra_csi_channel, subdev);
+}
+
+/*
+ * CSI is a separate subdevice which has 6 source pads to generate
+ * test pattern. CSI subdevice pad ops are used only for TPG and
+ * allows below TPG formats.
+ */
+static const struct v4l2_mbus_framefmt tegra_csi_tpg_fmts[] = {
+ {
+ TEGRA_DEF_WIDTH,
+ TEGRA_DEF_HEIGHT,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ V4L2_FIELD_NONE,
+ V4L2_COLORSPACE_SRGB
+ },
+ {
+ TEGRA_DEF_WIDTH,
+ TEGRA_DEF_HEIGHT,
+ MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ V4L2_FIELD_NONE,
+ V4L2_COLORSPACE_SRGB
+ },
+};
+
+static const struct v4l2_frmsize_discrete tegra_csi_tpg_sizes[] = {
+ { 1280, 720 },
+ { 1920, 1080 },
+ { 3840, 2160 },
+};
+
+/*
+ * V4L2 Subdevice Pad Operations
+ */
+static int csi_enum_bus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index >= ARRAY_SIZE(tegra_csi_tpg_fmts))
+ return -EINVAL;
+
+ code->code = tegra_csi_tpg_fmts[code->index].code;
+
+ return 0;
+}
+
+static int csi_get_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+
+ fmt->format = csi_chan->format;
+
+ return 0;
+}
+
+static int csi_get_frmrate_table_index(struct tegra_csi *csi, u32 code,
+ u32 width, u32 height)
+{
+ const struct tpg_framerate *frmrate;
+ unsigned int i;
+
+ frmrate = csi->soc->tpg_frmrate_table;
+ for (i = 0; i < csi->soc->tpg_frmrate_table_size; i++) {
+ if (frmrate[i].code == code &&
+ frmrate[i].frmsize.width == width &&
+ frmrate[i].frmsize.height == height) {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void csi_chan_update_blank_intervals(struct tegra_csi_channel *csi_chan,
+ u32 code, u32 width, u32 height)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ const struct tpg_framerate *frmrate = csi->soc->tpg_frmrate_table;
+ int index;
+
+ index = csi_get_frmrate_table_index(csi_chan->csi, code,
+ width, height);
+ if (index >= 0) {
+ csi_chan->h_blank = frmrate[index].h_blank;
+ csi_chan->v_blank = frmrate[index].v_blank;
+ csi_chan->framerate = frmrate[index].framerate;
+ }
+}
+
+static int csi_enum_framesizes(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ unsigned int i;
+
+ if (fse->index >= ARRAY_SIZE(tegra_csi_tpg_sizes))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++)
+ if (fse->code == tegra_csi_tpg_fmts[i].code)
+ break;
+
+ if (i == ARRAY_SIZE(tegra_csi_tpg_fmts))
+ return -EINVAL;
+
+ fse->min_width = tegra_csi_tpg_sizes[fse->index].width;
+ fse->max_width = tegra_csi_tpg_sizes[fse->index].width;
+ fse->min_height = tegra_csi_tpg_sizes[fse->index].height;
+ fse->max_height = tegra_csi_tpg_sizes[fse->index].height;
+
+ return 0;
+}
+
+static int csi_enum_frameintervals(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ struct tegra_csi *csi = csi_chan->csi;
+ const struct tpg_framerate *frmrate = csi->soc->tpg_frmrate_table;
+ int index;
+
+ /* one framerate per format and resolution */
+ if (fie->index > 0)
+ return -EINVAL;
+
+ index = csi_get_frmrate_table_index(csi_chan->csi, fie->code,
+ fie->width, fie->height);
+ if (index < 0)
+ return -EINVAL;
+
+ fie->interval.numerator = 1;
+ fie->interval.denominator = frmrate[index].framerate;
+
+ return 0;
+}
+
+static int csi_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ struct v4l2_mbus_framefmt *format = &fmt->format;
+ const struct v4l2_frmsize_discrete *sizes;
+ unsigned int i;
+
+ sizes = v4l2_find_nearest_size(tegra_csi_tpg_sizes,
+ ARRAY_SIZE(tegra_csi_tpg_sizes),
+ width, height,
+ format->width, format->width);
+ format->width = sizes->width;
+ format->height = sizes->height;
+
+ for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++)
+ if (format->code == tegra_csi_tpg_fmts[i].code)
+ break;
+
+ if (i == ARRAY_SIZE(tegra_csi_tpg_fmts))
+ i = 0;
+
+ format->code = tegra_csi_tpg_fmts[i].code;
+ format->field = V4L2_FIELD_NONE;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ /* update blanking intervals from frame rate table and format */
+ csi_chan_update_blank_intervals(csi_chan, format->code,
+ format->width, format->height);
+ csi_chan->format = *format;
+
+ return 0;
+}
+
+/*
+ * V4L2 Subdevice Video Operations
+ */
+static int tegra_csi_g_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *vfi)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+
+ vfi->interval.numerator = 1;
+ vfi->interval.denominator = csi_chan->framerate;
+
+ return 0;
+}
+
+static int tegra_csi_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct tegra_vi_channel *chan = v4l2_get_subdev_hostdata(subdev);
+ struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
+ struct tegra_csi *csi = csi_chan->csi;
+ int ret = 0;
+
+ csi_chan->pg_mode = chan->pg_mode;
+ if (enable) {
+ ret = pm_runtime_get_sync(csi->dev);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to get runtime PM: %d\n", ret);
+ pm_runtime_put_noidle(csi->dev);
+ return ret;
+ }
+
+ ret = csi->ops->csi_start_streaming(csi_chan);
+ if (ret < 0)
+ goto rpm_put;
+
+ return 0;
+ }
+
+ csi->ops->csi_stop_streaming(csi_chan);
+
+rpm_put:
+ pm_runtime_put(csi->dev);
+ return ret;
+}
+
+/*
+ * V4L2 Subdevice Operations
+ */
+static const struct v4l2_subdev_video_ops tegra_csi_video_ops = {
+ .s_stream = tegra_csi_s_stream,
+ .g_frame_interval = tegra_csi_g_frame_interval,
+ .s_frame_interval = tegra_csi_g_frame_interval,
+};
+
+static const struct v4l2_subdev_pad_ops tegra_csi_pad_ops = {
+ .enum_mbus_code = csi_enum_bus_code,
+ .enum_frame_size = csi_enum_framesizes,
+ .enum_frame_interval = csi_enum_frameintervals,
+ .get_fmt = csi_get_format,
+ .set_fmt = csi_set_format,
+};
+
+static const struct v4l2_subdev_ops tegra_csi_ops = {
+ .video = &tegra_csi_video_ops,
+ .pad = &tegra_csi_pad_ops,
+};
+
+static int tegra_csi_tpg_channels_alloc(struct tegra_csi *csi)
+{
+ struct device_node *node = csi->dev->of_node;
+ unsigned int port_num;
+ struct tegra_csi_channel *chan;
+ unsigned int tpg_channels = csi->soc->csi_max_channels;
+
+ /* allocate CSI channel for each CSI x2 ports */
+ for (port_num = 0; port_num < tpg_channels; port_num++) {
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ list_add_tail(&chan->list, &csi->csi_chans);
+ chan->csi = csi;
+ chan->csi_port_num = port_num;
+ chan->numlanes = 2;
+ chan->of_node = node;
+ chan->numpads = 1;
+ chan->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+ }
+
+ return 0;
+}
+
+static int tegra_csi_channel_init(struct tegra_csi_channel *chan)
+{
+ struct tegra_csi *csi = chan->csi;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ /* initialize the default format */
+ chan->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ chan->format.field = V4L2_FIELD_NONE;
+ chan->format.colorspace = V4L2_COLORSPACE_SRGB;
+ chan->format.width = TEGRA_DEF_WIDTH;
+ chan->format.height = TEGRA_DEF_HEIGHT;
+ csi_chan_update_blank_intervals(chan, chan->format.code,
+ chan->format.width,
+ chan->format.height);
+ /* initialize V4L2 subdevice and media entity */
+ subdev = &chan->subdev;
+ v4l2_subdev_init(subdev, &tegra_csi_ops);
+ subdev->dev = csi->dev;
+ snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s-%d", "tpg",
+ chan->csi_port_num);
+
+ v4l2_set_subdevdata(subdev, chan);
+ subdev->fwnode = of_fwnode_handle(chan->of_node);
+ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+ /* initialize media entity pads */
+ ret = media_entity_pads_init(&subdev->entity, chan->numpads,
+ chan->pads);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to initialize media entity: %d\n", ret);
+ subdev->dev = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+void tegra_csi_error_recover(struct v4l2_subdev *sd)
+{
+ struct tegra_csi_channel *csi_chan = to_csi_chan(sd);
+ struct tegra_csi *csi = csi_chan->csi;
+
+ /* stop streaming during error recovery */
+ csi->ops->csi_stop_streaming(csi_chan);
+ csi->ops->csi_err_recover(csi_chan);
+ csi->ops->csi_start_streaming(csi_chan);
+}
+
+static int tegra_csi_channels_init(struct tegra_csi *csi)
+{
+ struct tegra_csi_channel *chan;
+ int ret;
+
+ list_for_each_entry(chan, &csi->csi_chans, list) {
+ ret = tegra_csi_channel_init(chan);
+ if (ret) {
+ dev_err(csi->dev,
+ "failed to initialize channel-%d: %d\n",
+ chan->csi_port_num, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void tegra_csi_channels_cleanup(struct tegra_csi *csi)
+{
+ struct v4l2_subdev *subdev;
+ struct tegra_csi_channel *chan, *tmp;
+
+ list_for_each_entry_safe(chan, tmp, &csi->csi_chans, list) {
+ subdev = &chan->subdev;
+ if (subdev->dev)
+ media_entity_cleanup(&subdev->entity);
+ list_del(&chan->list);
+ kfree(chan);
+ }
+}
+
+static int __maybe_unused csi_runtime_suspend(struct device *dev)
+{
+ struct tegra_csi *csi = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(csi->soc->num_clks, csi->clks);
+
+ return 0;
+}
+
+static int __maybe_unused csi_runtime_resume(struct device *dev)
+{
+ struct tegra_csi *csi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(csi->soc->num_clks, csi->clks);
+ if (ret < 0) {
+ dev_err(csi->dev, "failed to enable clocks: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_csi_init(struct host1x_client *client)
+{
+ struct tegra_csi *csi = host1x_client_to_csi(client);
+ struct tegra_video_device *vid = dev_get_drvdata(client->host);
+ int ret;
+
+ INIT_LIST_HEAD(&csi->csi_chans);
+
+ ret = tegra_csi_tpg_channels_alloc(csi);
+ if (ret < 0) {
+ dev_err(csi->dev,
+ "failed to allocate tpg channels: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = tegra_csi_channels_init(csi);
+ if (ret < 0)
+ goto cleanup;
+
+ vid->csi = csi;
+
+ return 0;
+
+cleanup:
+ tegra_csi_channels_cleanup(csi);
+ return ret;
+}
+
+static int tegra_csi_exit(struct host1x_client *client)
+{
+ struct tegra_csi *csi = host1x_client_to_csi(client);
+
+ tegra_csi_channels_cleanup(csi);
+
+ return 0;
+}
+
+static const struct host1x_client_ops csi_client_ops = {
+ .init = tegra_csi_init,
+ .exit = tegra_csi_exit,
+};
+
+static int tegra_csi_probe(struct platform_device *pdev)
+{
+ struct tegra_csi *csi;
+ unsigned int i;
+ int ret;
+
+ csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->iomem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(csi->iomem))
+ return PTR_ERR(csi->iomem);
+
+ csi->soc = of_device_get_match_data(&pdev->dev);
+
+ csi->clks = devm_kcalloc(&pdev->dev, csi->soc->num_clks,
+ sizeof(*csi->clks), GFP_KERNEL);
+ if (!csi->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < csi->soc->num_clks; i++)
+ csi->clks[i].id = csi->soc->clk_names[i];
+
+ ret = devm_clk_bulk_get(&pdev->dev, csi->soc->num_clks, csi->clks);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get the clocks: %d\n", ret);
+ return ret;
+ }
+
+ if (!pdev->dev.pm_domain) {
+ ret = -ENOENT;
+ dev_warn(&pdev->dev, "PM domain is not attached: %d\n", ret);
+ return ret;
+ }
+
+ csi->dev = &pdev->dev;
+ csi->ops = csi->soc->ops;
+ platform_set_drvdata(pdev, csi);
+ pm_runtime_enable(&pdev->dev);
+
+ /* initialize host1x interface */
+ INIT_LIST_HEAD(&csi->client.list);
+ csi->client.ops = &csi_client_ops;
+ csi->client.dev = &pdev->dev;
+
+ ret = host1x_client_register(&csi->client);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to register host1x client: %d\n", ret);
+ goto rpm_disable;
+ }
+
+ return 0;
+
+rpm_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int tegra_csi_remove(struct platform_device *pdev)
+{
+ struct tegra_csi *csi = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&csi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to unregister host1x client: %d\n", err);
+ return err;
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_csi_of_id_table[] = {
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-csi", .data = &tegra210_csi_soc },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_csi_of_id_table);
+
+static const struct dev_pm_ops tegra_csi_pm_ops = {
+ SET_RUNTIME_PM_OPS(csi_runtime_suspend, csi_runtime_resume, NULL)
+};
+
+struct platform_driver tegra_csi_driver = {
+ .driver = {
+ .name = "tegra-csi",
+ .of_match_table = tegra_csi_of_id_table,
+ .pm = &tegra_csi_pm_ops,
+ },
+ .probe = tegra_csi_probe,
+ .remove = tegra_csi_remove,
+};
diff --git a/drivers/staging/media/tegra-video/csi.h b/drivers/staging/media/tegra-video/csi.h
new file mode 100644
index 000000000000..93bd2a05797d
--- /dev/null
+++ b/drivers/staging/media/tegra-video/csi.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __TEGRA_CSI_H__
+#define __TEGRA_CSI_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+/*
+ * Each CSI brick supports max of 4 lanes that can be used as either
+ * one x4 port using both CILA and CILB partitions of a CSI brick or can
+ * be used as two x2 ports with one x2 from CILA and the other x2 from
+ * CILB.
+ */
+#define CSI_PORTS_PER_BRICK 2
+
+/* each CSI channel can have one sink and one source pads */
+#define TEGRA_CSI_PADS_NUM 2
+
+enum tegra_csi_cil_port {
+ PORT_A = 0,
+ PORT_B,
+};
+
+enum tegra_csi_block {
+ CSI_CIL_AB = 0,
+ CSI_CIL_CD,
+ CSI_CIL_EF,
+};
+
+struct tegra_csi;
+
+/**
+ * struct tegra_csi_channel - Tegra CSI channel
+ *
+ * @list: list head for this entry
+ * @subdev: V4L2 subdevice associated with this channel
+ * @pads: media pads for the subdevice entity
+ * @numpads: number of pads.
+ * @csi: Tegra CSI device structure
+ * @of_node: csi device tree node
+ * @numlanes: number of lanes used per port/channel
+ * @csi_port_num: CSI channel port number
+ * @pg_mode: test pattern generator mode for channel
+ * @format: active format of the channel
+ * @framerate: active framerate for TPG
+ * @h_blank: horizontal blanking for TPG active format
+ * @v_blank: vertical blanking for TPG active format
+ */
+struct tegra_csi_channel {
+ struct list_head list;
+ struct v4l2_subdev subdev;
+ struct media_pad pads[TEGRA_CSI_PADS_NUM];
+ unsigned int numpads;
+ struct tegra_csi *csi;
+ struct device_node *of_node;
+ unsigned int numlanes;
+ u8 csi_port_num;
+ u8 pg_mode;
+ struct v4l2_mbus_framefmt format;
+ unsigned int framerate;
+ unsigned int h_blank;
+ unsigned int v_blank;
+};
+
+/**
+ * struct tpg_framerate - Tegra CSI TPG framerate configuration
+ *
+ * @frmsize: frame resolution
+ * @code: media bus format code
+ * @h_blank: horizontal blanking used for TPG
+ * @v_blank: vertical blanking interval used for TPG
+ * @framerate: framerate achieved with the corresponding blanking intervals,
+ * format and resolution.
+ */
+struct tpg_framerate {
+ struct v4l2_frmsize_discrete frmsize;
+ u32 code;
+ unsigned int h_blank;
+ unsigned int v_blank;
+ unsigned int framerate;
+};
+
+/**
+ * struct tegra_csi_ops - Tegra CSI operations
+ *
+ * @csi_start_streaming: programs csi hardware to enable streaming.
+ * @csi_stop_streaming: programs csi hardware to disable streaming.
+ * @csi_err_recover: csi hardware block recovery in case of any capture errors
+ * due to missing source stream or due to improper csi input from
+ * the external source.
+ */
+struct tegra_csi_ops {
+ int (*csi_start_streaming)(struct tegra_csi_channel *csi_chan);
+ void (*csi_stop_streaming)(struct tegra_csi_channel *csi_chan);
+ void (*csi_err_recover)(struct tegra_csi_channel *csi_chan);
+};
+
+/**
+ * struct tegra_csi_soc - NVIDIA Tegra CSI SoC structure
+ *
+ * @ops: csi hardware operations
+ * @csi_max_channels: supported max streaming channels
+ * @clk_names: csi and cil clock names
+ * @num_clks: total clocks count
+ * @tpg_frmrate_table: csi tpg frame rate table with blanking intervals
+ * @tpg_frmrate_table_size: size of frame rate table
+ */
+struct tegra_csi_soc {
+ const struct tegra_csi_ops *ops;
+ unsigned int csi_max_channels;
+ const char * const *clk_names;
+ unsigned int num_clks;
+ const struct tpg_framerate *tpg_frmrate_table;
+ unsigned int tpg_frmrate_table_size;
+};
+
+/**
+ * struct tegra_csi - NVIDIA Tegra CSI device structure
+ *
+ * @dev: device struct
+ * @client: host1x_client struct
+ * @iomem: register base
+ * @clks: clock for CSI and CIL
+ * @soc: pointer to SoC data structure
+ * @ops: csi operations
+ * @channels: list head for CSI channels
+ */
+struct tegra_csi {
+ struct device *dev;
+ struct host1x_client client;
+ void __iomem *iomem;
+ struct clk_bulk_data *clks;
+ const struct tegra_csi_soc *soc;
+ const struct tegra_csi_ops *ops;
+ struct list_head csi_chans;
+};
+
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_csi_soc tegra210_csi_soc;
+#endif
+
+void tegra_csi_error_recover(struct v4l2_subdev *subdev);
+#endif
diff --git a/drivers/staging/media/tegra-video/tegra210.c b/drivers/staging/media/tegra-video/tegra210.c
new file mode 100644
index 000000000000..3baa4e314203
--- /dev/null
+++ b/drivers/staging/media/tegra-video/tegra210.c
@@ -0,0 +1,978 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+/*
+ * This source file contains Tegra210 supported video formats,
+ * VI and CSI SoC specific data, operations and registers accessors.
+ */
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/kthread.h>
+
+#include "csi.h"
+#include "vi.h"
+
+#define TEGRA_VI_SYNCPT_WAIT_TIMEOUT msecs_to_jiffies(200)
+
+/* Tegra210 VI registers */
+#define TEGRA_VI_CFG_VI_INCR_SYNCPT 0x000
+#define VI_CFG_VI_INCR_SYNCPT_COND(x) (((x) & 0xff) << 8)
+#define VI_CSI_PP_FRAME_START(port) (5 + (port) * 4)
+#define VI_CSI_MW_ACK_DONE(port) (7 + (port) * 4)
+#define TEGRA_VI_CFG_VI_INCR_SYNCPT_CNTRL 0x004
+#define VI_INCR_SYNCPT_NO_STALL BIT(8)
+#define TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR 0x008
+#define TEGRA_VI_CFG_CG_CTRL 0x0b8
+#define VI_CG_2ND_LEVEL_EN 0x1
+
+/* Tegra210 VI CSI registers */
+#define TEGRA_VI_CSI_SW_RESET 0x000
+#define TEGRA_VI_CSI_SINGLE_SHOT 0x004
+#define SINGLE_SHOT_CAPTURE 0x1
+#define TEGRA_VI_CSI_IMAGE_DEF 0x00c
+#define BYPASS_PXL_TRANSFORM_OFFSET 24
+#define IMAGE_DEF_FORMAT_OFFSET 16
+#define IMAGE_DEF_DEST_MEM 0x1
+#define TEGRA_VI_CSI_IMAGE_SIZE 0x018
+#define IMAGE_SIZE_HEIGHT_OFFSET 16
+#define TEGRA_VI_CSI_IMAGE_SIZE_WC 0x01c
+#define TEGRA_VI_CSI_IMAGE_DT 0x020
+#define TEGRA_VI_CSI_SURFACE0_OFFSET_MSB 0x024
+#define TEGRA_VI_CSI_SURFACE0_OFFSET_LSB 0x028
+#define TEGRA_VI_CSI_SURFACE1_OFFSET_MSB 0x02c
+#define TEGRA_VI_CSI_SURFACE1_OFFSET_LSB 0x030
+#define TEGRA_VI_CSI_SURFACE2_OFFSET_MSB 0x034
+#define TEGRA_VI_CSI_SURFACE2_OFFSET_LSB 0x038
+#define TEGRA_VI_CSI_SURFACE0_STRIDE 0x054
+#define TEGRA_VI_CSI_SURFACE1_STRIDE 0x058
+#define TEGRA_VI_CSI_SURFACE2_STRIDE 0x05c
+#define TEGRA_VI_CSI_SURFACE_HEIGHT0 0x060
+#define TEGRA_VI_CSI_ERROR_STATUS 0x084
+
+/* Tegra210 CSI Pixel Parser registers: Starts from 0x838, offset 0x0 */
+#define TEGRA_CSI_INPUT_STREAM_CONTROL 0x000
+#define CSI_SKIP_PACKET_THRESHOLD_OFFSET 16
+#define TEGRA_CSI_PIXEL_STREAM_CONTROL0 0x004
+#define CSI_PP_PACKET_HEADER_SENT BIT(4)
+#define CSI_PP_DATA_IDENTIFIER_ENABLE BIT(5)
+#define CSI_PP_WORD_COUNT_SELECT_HEADER BIT(6)
+#define CSI_PP_CRC_CHECK_ENABLE BIT(7)
+#define CSI_PP_WC_CHECK BIT(8)
+#define CSI_PP_OUTPUT_FORMAT_STORE (0x3 << 16)
+#define CSI_PPA_PAD_LINE_NOPAD (0x2 << 24)
+#define CSI_PP_HEADER_EC_DISABLE (0x1 << 27)
+#define CSI_PPA_PAD_FRAME_NOPAD (0x2 << 28)
+#define TEGRA_CSI_PIXEL_STREAM_CONTROL1 0x008
+#define CSI_PP_TOP_FIELD_FRAME_OFFSET 0
+#define CSI_PP_TOP_FIELD_FRAME_MASK_OFFSET 4
+#define TEGRA_CSI_PIXEL_STREAM_GAP 0x00c
+#define PP_FRAME_MIN_GAP_OFFSET 16
+#define TEGRA_CSI_PIXEL_STREAM_PP_COMMAND 0x010
+#define CSI_PP_ENABLE 0x1
+#define CSI_PP_DISABLE 0x2
+#define CSI_PP_RST 0x3
+#define CSI_PP_SINGLE_SHOT_ENABLE (0x1 << 2)
+#define CSI_PP_START_MARKER_FRAME_MAX_OFFSET 12
+#define TEGRA_CSI_PIXEL_STREAM_EXPECTED_FRAME 0x014
+#define TEGRA_CSI_PIXEL_PARSER_INTERRUPT_MASK 0x018
+#define TEGRA_CSI_PIXEL_PARSER_STATUS 0x01c
+
+/* Tegra210 CSI PHY registers */
+/* CSI_PHY_CIL_COMMAND_0 offset 0x0d0 from TEGRA_CSI_PIXEL_PARSER_0_BASE */
+#define TEGRA_CSI_PHY_CIL_COMMAND 0x0d0
+#define CSI_A_PHY_CIL_NOP 0x0
+#define CSI_A_PHY_CIL_ENABLE 0x1
+#define CSI_A_PHY_CIL_DISABLE 0x2
+#define CSI_A_PHY_CIL_ENABLE_MASK 0x3
+#define CSI_B_PHY_CIL_NOP (0x0 << 8)
+#define CSI_B_PHY_CIL_ENABLE (0x1 << 8)
+#define CSI_B_PHY_CIL_DISABLE (0x2 << 8)
+#define CSI_B_PHY_CIL_ENABLE_MASK (0x3 << 8)
+
+#define TEGRA_CSI_CIL_PAD_CONFIG0 0x000
+#define BRICK_CLOCK_A_4X (0x1 << 16)
+#define BRICK_CLOCK_B_4X (0x2 << 16)
+#define TEGRA_CSI_CIL_PAD_CONFIG1 0x004
+#define TEGRA_CSI_CIL_PHY_CONTROL 0x008
+#define TEGRA_CSI_CIL_INTERRUPT_MASK 0x00c
+#define TEGRA_CSI_CIL_STATUS 0x010
+#define TEGRA_CSI_CILX_STATUS 0x014
+#define TEGRA_CSI_CIL_SW_SENSOR_RESET 0x020
+
+#define TEGRA_CSI_PATTERN_GENERATOR_CTRL 0x000
+#define PG_MODE_OFFSET 2
+#define PG_ENABLE 0x1
+#define PG_DISABLE 0x0
+#define TEGRA_CSI_PG_BLANK 0x004
+#define PG_VBLANK_OFFSET 16
+#define TEGRA_CSI_PG_PHASE 0x008
+#define TEGRA_CSI_PG_RED_FREQ 0x00c
+#define PG_RED_VERT_INIT_FREQ_OFFSET 16
+#define PG_RED_HOR_INIT_FREQ_OFFSET 0
+#define TEGRA_CSI_PG_RED_FREQ_RATE 0x010
+#define TEGRA_CSI_PG_GREEN_FREQ 0x014
+#define PG_GREEN_VERT_INIT_FREQ_OFFSET 16
+#define PG_GREEN_HOR_INIT_FREQ_OFFSET 0
+#define TEGRA_CSI_PG_GREEN_FREQ_RATE 0x018
+#define TEGRA_CSI_PG_BLUE_FREQ 0x01c
+#define PG_BLUE_VERT_INIT_FREQ_OFFSET 16
+#define PG_BLUE_HOR_INIT_FREQ_OFFSET 0
+#define TEGRA_CSI_PG_BLUE_FREQ_RATE 0x020
+#define TEGRA_CSI_PG_AOHDR 0x024
+#define TEGRA_CSI_CSI_SW_STATUS_RESET 0x214
+#define TEGRA_CSI_CLKEN_OVERRIDE 0x218
+
+#define TEGRA210_CSI_PORT_OFFSET 0x34
+#define TEGRA210_CSI_CIL_OFFSET 0x0f4
+#define TEGRA210_CSI_TPG_OFFSET 0x18c
+
+#define CSI_PP_OFFSET(block) ((block) * 0x800)
+#define TEGRA210_VI_CSI_BASE(x) (0x100 + (x) * 0x100)
+
+/* Tegra210 VI registers accessors */
+static void tegra_vi_write(struct tegra_vi_channel *chan, unsigned int addr,
+ u32 val)
+{
+ writel_relaxed(val, chan->vi->iomem + addr);
+}
+
+static u32 tegra_vi_read(struct tegra_vi_channel *chan, unsigned int addr)
+{
+ return readl_relaxed(chan->vi->iomem + addr);
+}
+
+/* Tegra210 VI_CSI registers accessors */
+static void vi_csi_write(struct tegra_vi_channel *chan, unsigned int addr,
+ u32 val)
+{
+ void __iomem *vi_csi_base;
+
+ vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(chan->portno);
+
+ writel_relaxed(val, vi_csi_base + addr);
+}
+
+static u32 vi_csi_read(struct tegra_vi_channel *chan, unsigned int addr)
+{
+ void __iomem *vi_csi_base;
+
+ vi_csi_base = chan->vi->iomem + TEGRA210_VI_CSI_BASE(chan->portno);
+
+ return readl_relaxed(vi_csi_base + addr);
+}
+
+/*
+ * Tegra210 VI channel capture operations
+ */
+static int tegra_channel_capture_setup(struct tegra_vi_channel *chan)
+{
+ u32 height = chan->format.height;
+ u32 width = chan->format.width;
+ u32 format = chan->fmtinfo->img_fmt;
+ u32 data_type = chan->fmtinfo->img_dt;
+ u32 word_count = (width * chan->fmtinfo->bit_width) / 8;
+
+ vi_csi_write(chan, TEGRA_VI_CSI_ERROR_STATUS, 0xffffffff);
+ vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_DEF,
+ ((chan->pg_mode ? 0 : 1) << BYPASS_PXL_TRANSFORM_OFFSET) |
+ (format << IMAGE_DEF_FORMAT_OFFSET) |
+ IMAGE_DEF_DEST_MEM);
+ vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_DT, data_type);
+ vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_SIZE_WC, word_count);
+ vi_csi_write(chan, TEGRA_VI_CSI_IMAGE_SIZE,
+ (height << IMAGE_SIZE_HEIGHT_OFFSET) | width);
+ return 0;
+}
+
+static void tegra_channel_vi_soft_reset(struct tegra_vi_channel *chan)
+{
+ /* disable clock gating to enable continuous clock */
+ tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, 0);
+ /*
+ * Soft reset memory client interface, pixel format logic, sensor
+ * control logic, and a shadow copy logic to bring VI to clean state.
+ */
+ vi_csi_write(chan, TEGRA_VI_CSI_SW_RESET, 0xf);
+ usleep_range(100, 200);
+ vi_csi_write(chan, TEGRA_VI_CSI_SW_RESET, 0x0);
+
+ /* enable back VI clock gating */
+ tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, VI_CG_2ND_LEVEL_EN);
+}
+
+static void tegra_channel_capture_error_recover(struct tegra_vi_channel *chan)
+{
+ struct v4l2_subdev *subdev;
+ u32 val;
+
+ /*
+ * Recover VI and CSI hardware blocks in case of missing frame start
+ * events due to source not streaming or noisy csi inputs from the
+ * external source or many outstanding frame start or MW_ACK_DONE
+ * events which can cause CSI and VI hardware hang.
+ * This helps to have a clean capture for next frame.
+ */
+ val = vi_csi_read(chan, TEGRA_VI_CSI_ERROR_STATUS);
+ dev_dbg(&chan->video.dev, "TEGRA_VI_CSI_ERROR_STATUS 0x%08x\n", val);
+ vi_csi_write(chan, TEGRA_VI_CSI_ERROR_STATUS, val);
+
+ val = tegra_vi_read(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR);
+ dev_dbg(&chan->video.dev,
+ "TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR 0x%08x\n", val);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR, val);
+
+ /* recover VI by issuing software reset and re-setup for capture */
+ tegra_channel_vi_soft_reset(chan);
+ tegra_channel_capture_setup(chan);
+
+ /* recover CSI block */
+ subdev = tegra_channel_get_remote_subdev(chan);
+ tegra_csi_error_recover(subdev);
+}
+
+static struct tegra_channel_buffer *
+dequeue_buf_done(struct tegra_vi_channel *chan)
+{
+ struct tegra_channel_buffer *buf = NULL;
+
+ spin_lock(&chan->done_lock);
+ if (list_empty(&chan->done)) {
+ spin_unlock(&chan->done_lock);
+ return NULL;
+ }
+
+ buf = list_first_entry(&chan->done,
+ struct tegra_channel_buffer, queue);
+ if (buf)
+ list_del_init(&buf->queue);
+ spin_unlock(&chan->done_lock);
+
+ return buf;
+}
+
+static void release_buffer(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *vb = &buf->buf;
+
+ vb->sequence = chan->sequence++;
+ vb->field = V4L2_FIELD_NONE;
+ vb->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vb->vb2_buf, state);
+}
+
+static int tegra_channel_capture_frame(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf)
+{
+ u32 thresh, value, frame_start, mw_ack_done;
+ int bytes_per_line = chan->format.bytesperline;
+ int err;
+
+ /* program buffer address by using surface 0 */
+ vi_csi_write(chan, TEGRA_VI_CSI_SURFACE0_OFFSET_MSB,
+ (u64)buf->addr >> 32);
+ vi_csi_write(chan, TEGRA_VI_CSI_SURFACE0_OFFSET_LSB, buf->addr);
+ vi_csi_write(chan, TEGRA_VI_CSI_SURFACE0_STRIDE, bytes_per_line);
+
+ /*
+ * Tegra VI block interacts with host1x syncpt for synchronizing
+ * programmed condition of capture state and hardware operation.
+ * Frame start and Memory write acknowledge syncpts has their own
+ * FIFO of depth 2.
+ *
+ * Syncpoint trigger conditions set through VI_INCR_SYNCPT register
+ * are added to HW syncpt FIFO and when the HW triggers, syncpt
+ * condition is removed from the FIFO and counter at syncpoint index
+ * will be incremented by the hardware and software can wait for
+ * counter to reach threshold to synchronize capturing frame with the
+ * hardware capture events.
+ */
+
+ /* increase channel syncpoint threshold for FRAME_START */
+ thresh = host1x_syncpt_incr_max(chan->frame_start_sp, 1);
+
+ /* Program FRAME_START trigger condition syncpt request */
+ frame_start = VI_CSI_PP_FRAME_START(chan->portno);
+ value = VI_CFG_VI_INCR_SYNCPT_COND(frame_start) |
+ host1x_syncpt_id(chan->frame_start_sp);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
+
+ /* increase channel syncpoint threshold for MW_ACK_DONE */
+ buf->mw_ack_sp_thresh = host1x_syncpt_incr_max(chan->mw_ack_sp, 1);
+
+ /* Program MW_ACK_DONE trigger condition syncpt request */
+ mw_ack_done = VI_CSI_MW_ACK_DONE(chan->portno);
+ value = VI_CFG_VI_INCR_SYNCPT_COND(mw_ack_done) |
+ host1x_syncpt_id(chan->mw_ack_sp);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT, value);
+
+ /* enable single shot capture */
+ vi_csi_write(chan, TEGRA_VI_CSI_SINGLE_SHOT, SINGLE_SHOT_CAPTURE);
+
+ /* wait for syncpt counter to reach frame start event threshold */
+ err = host1x_syncpt_wait(chan->frame_start_sp, thresh,
+ TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
+ if (err) {
+ dev_err_ratelimited(&chan->video.dev,
+ "frame start syncpt timeout: %d\n", err);
+ /* increment syncpoint counter for timedout events */
+ host1x_syncpt_incr(chan->frame_start_sp);
+ spin_lock(&chan->sp_incr_lock);
+ host1x_syncpt_incr(chan->mw_ack_sp);
+ spin_unlock(&chan->sp_incr_lock);
+ /* clear errors and recover */
+ tegra_channel_capture_error_recover(chan);
+ release_buffer(chan, buf, VB2_BUF_STATE_ERROR);
+ return err;
+ }
+
+ /* move buffer to capture done queue */
+ spin_lock(&chan->done_lock);
+ list_add_tail(&buf->queue, &chan->done);
+ spin_unlock(&chan->done_lock);
+
+ /* wait up kthread for capture done */
+ wake_up_interruptible(&chan->done_wait);
+
+ return 0;
+}
+
+static void tegra_channel_capture_done(struct tegra_vi_channel *chan,
+ struct tegra_channel_buffer *buf)
+{
+ enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+ u32 value;
+ int ret;
+
+ /* wait for syncpt counter to reach MW_ACK_DONE event threshold */
+ ret = host1x_syncpt_wait(chan->mw_ack_sp, buf->mw_ack_sp_thresh,
+ TEGRA_VI_SYNCPT_WAIT_TIMEOUT, &value);
+ if (ret) {
+ dev_err_ratelimited(&chan->video.dev,
+ "MW_ACK_DONE syncpt timeout: %d\n", ret);
+ state = VB2_BUF_STATE_ERROR;
+ /* increment syncpoint counter for timedout event */
+ spin_lock(&chan->sp_incr_lock);
+ host1x_syncpt_incr(chan->mw_ack_sp);
+ spin_unlock(&chan->sp_incr_lock);
+ }
+
+ release_buffer(chan, buf, state);
+}
+
+static int chan_capture_kthread_start(void *data)
+{
+ struct tegra_vi_channel *chan = data;
+ struct tegra_channel_buffer *buf;
+ int err = 0;
+
+ while (1) {
+ /*
+ * Source is not streaming if error is non-zero.
+ * So, do not dequeue buffers on error and let the thread sleep
+ * till kthread stop signal is received.
+ */
+ wait_event_interruptible(chan->start_wait,
+ kthread_should_stop() ||
+ (!list_empty(&chan->capture) &&
+ !err));
+
+ if (kthread_should_stop())
+ break;
+
+ /* dequeue the buffer and start capture */
+ spin_lock(&chan->start_lock);
+ if (list_empty(&chan->capture)) {
+ spin_unlock(&chan->start_lock);
+ continue;
+ }
+
+ buf = list_first_entry(&chan->capture,
+ struct tegra_channel_buffer, queue);
+ list_del_init(&buf->queue);
+ spin_unlock(&chan->start_lock);
+
+ err = tegra_channel_capture_frame(chan, buf);
+ if (err)
+ vb2_queue_error(&chan->queue);
+ }
+
+ return 0;
+}
+
+static int chan_capture_kthread_finish(void *data)
+{
+ struct tegra_vi_channel *chan = data;
+ struct tegra_channel_buffer *buf;
+
+ while (1) {
+ wait_event_interruptible(chan->done_wait,
+ !list_empty(&chan->done) ||
+ kthread_should_stop());
+
+ /* dequeue buffers and finish capture */
+ buf = dequeue_buf_done(chan);
+ while (buf) {
+ tegra_channel_capture_done(chan, buf);
+ buf = dequeue_buf_done(chan);
+ }
+
+ if (kthread_should_stop())
+ break;
+ }
+
+ return 0;
+}
+
+static int tegra210_vi_start_streaming(struct vb2_queue *vq, u32 count)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+ struct media_pipeline *pipe = &chan->video.pipe;
+ u32 val;
+ int ret;
+
+ tegra_vi_write(chan, TEGRA_VI_CFG_CG_CTRL, VI_CG_2ND_LEVEL_EN);
+
+ /* clear errors */
+ val = vi_csi_read(chan, TEGRA_VI_CSI_ERROR_STATUS);
+ vi_csi_write(chan, TEGRA_VI_CSI_ERROR_STATUS, val);
+
+ val = tegra_vi_read(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR);
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_ERROR, val);
+
+ /*
+ * Sync point FIFO full stalls the host interface.
+ * Setting NO_STALL will drop INCR_SYNCPT methods when fifos are
+ * full and the corresponding condition bits in INCR_SYNCPT_ERROR
+ * register will be set.
+ * This allows SW to process error recovery.
+ */
+ tegra_vi_write(chan, TEGRA_VI_CFG_VI_INCR_SYNCPT_CNTRL,
+ VI_INCR_SYNCPT_NO_STALL);
+
+ /* start the pipeline */
+ ret = media_pipeline_start(&chan->video.entity, pipe);
+ if (ret < 0)
+ goto error_pipeline_start;
+
+ tegra_channel_capture_setup(chan);
+ ret = tegra_channel_set_stream(chan, true);
+ if (ret < 0)
+ goto error_set_stream;
+
+ chan->sequence = 0;
+
+ /* start kthreads to capture data to buffer and return them */
+ chan->kthread_start_capture = kthread_run(chan_capture_kthread_start,
+ chan, "%s:0",
+ chan->video.name);
+ if (IS_ERR(chan->kthread_start_capture)) {
+ ret = PTR_ERR(chan->kthread_start_capture);
+ chan->kthread_start_capture = NULL;
+ dev_err(&chan->video.dev,
+ "failed to run capture start kthread: %d\n", ret);
+ goto error_kthread_start;
+ }
+
+ chan->kthread_finish_capture = kthread_run(chan_capture_kthread_finish,
+ chan, "%s:1",
+ chan->video.name);
+ if (IS_ERR(chan->kthread_finish_capture)) {
+ ret = PTR_ERR(chan->kthread_finish_capture);
+ chan->kthread_finish_capture = NULL;
+ dev_err(&chan->video.dev,
+ "failed to run capture finish kthread: %d\n", ret);
+ goto error_kthread_done;
+ }
+
+ return 0;
+
+error_kthread_done:
+ kthread_stop(chan->kthread_start_capture);
+error_kthread_start:
+ tegra_channel_set_stream(chan, false);
+error_set_stream:
+ media_pipeline_stop(&chan->video.entity);
+error_pipeline_start:
+ tegra_channel_release_buffers(chan, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void tegra210_vi_stop_streaming(struct vb2_queue *vq)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+
+ if (chan->kthread_start_capture) {
+ kthread_stop(chan->kthread_start_capture);
+ chan->kthread_start_capture = NULL;
+ }
+
+ if (chan->kthread_finish_capture) {
+ kthread_stop(chan->kthread_finish_capture);
+ chan->kthread_finish_capture = NULL;
+ }
+
+ tegra_channel_release_buffers(chan, VB2_BUF_STATE_ERROR);
+ tegra_channel_set_stream(chan, false);
+ media_pipeline_stop(&chan->video.entity);
+}
+
+/*
+ * Tegra210 VI Pixel memory format enum.
+ * These format enum value gets programmed into corresponding Tegra VI
+ * channel register bits.
+ */
+enum tegra210_image_format {
+ TEGRA210_IMAGE_FORMAT_T_L8 = 16,
+
+ TEGRA210_IMAGE_FORMAT_T_R16_I = 32,
+ TEGRA210_IMAGE_FORMAT_T_B5G6R5,
+ TEGRA210_IMAGE_FORMAT_T_R5G6B5,
+ TEGRA210_IMAGE_FORMAT_T_A1B5G5R5,
+ TEGRA210_IMAGE_FORMAT_T_A1R5G5B5,
+ TEGRA210_IMAGE_FORMAT_T_B5G5R5A1,
+ TEGRA210_IMAGE_FORMAT_T_R5G5B5A1,
+ TEGRA210_IMAGE_FORMAT_T_A4B4G4R4,
+ TEGRA210_IMAGE_FORMAT_T_A4R4G4B4,
+ TEGRA210_IMAGE_FORMAT_T_B4G4R4A4,
+ TEGRA210_IMAGE_FORMAT_T_R4G4B4A4,
+
+ TEGRA210_IMAGE_FORMAT_T_A8B8G8R8 = 64,
+ TEGRA210_IMAGE_FORMAT_T_A8R8G8B8,
+ TEGRA210_IMAGE_FORMAT_T_B8G8R8A8,
+ TEGRA210_IMAGE_FORMAT_T_R8G8B8A8,
+ TEGRA210_IMAGE_FORMAT_T_A2B10G10R10,
+ TEGRA210_IMAGE_FORMAT_T_A2R10G10B10,
+ TEGRA210_IMAGE_FORMAT_T_B10G10R10A2,
+ TEGRA210_IMAGE_FORMAT_T_R10G10B10A2,
+
+ TEGRA210_IMAGE_FORMAT_T_A8Y8U8V8 = 193,
+ TEGRA210_IMAGE_FORMAT_T_V8U8Y8A8,
+
+ TEGRA210_IMAGE_FORMAT_T_A2Y10U10V10 = 197,
+ TEGRA210_IMAGE_FORMAT_T_V10U10Y10A2,
+ TEGRA210_IMAGE_FORMAT_T_Y8_U8__Y8_V8,
+ TEGRA210_IMAGE_FORMAT_T_Y8_V8__Y8_U8,
+ TEGRA210_IMAGE_FORMAT_T_U8_Y8__V8_Y8,
+ TEGRA210_IMAGE_FORMAT_T_V8_Y8__U8_Y8,
+
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N444 = 224,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N444,
+ TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N444,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N422,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N422,
+ TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N422,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8__V8_N420,
+ TEGRA210_IMAGE_FORMAT_T_Y8__U8V8_N420,
+ TEGRA210_IMAGE_FORMAT_T_Y8__V8U8_N420,
+ TEGRA210_IMAGE_FORMAT_T_X2LC10LB10LA10,
+ TEGRA210_IMAGE_FORMAT_T_A2R6R6R6R6R6,
+};
+
+#define TEGRA210_VIDEO_FMT(DATA_TYPE, BIT_WIDTH, MBUS_CODE, BPP, \
+ FORMAT, FOURCC) \
+{ \
+ TEGRA_IMAGE_DT_##DATA_TYPE, \
+ BIT_WIDTH, \
+ MEDIA_BUS_FMT_##MBUS_CODE, \
+ BPP, \
+ TEGRA210_IMAGE_FORMAT_##FORMAT, \
+ V4L2_PIX_FMT_##FOURCC, \
+}
+
+/* Tegra210 supported video formats */
+static const struct tegra_video_format tegra210_video_formats[] = {
+ /* RAW 8 */
+ TEGRA210_VIDEO_FMT(RAW8, 8, SRGGB8_1X8, 1, T_L8, SRGGB8),
+ TEGRA210_VIDEO_FMT(RAW8, 8, SGRBG8_1X8, 1, T_L8, SGRBG8),
+ TEGRA210_VIDEO_FMT(RAW8, 8, SGBRG8_1X8, 1, T_L8, SGBRG8),
+ TEGRA210_VIDEO_FMT(RAW8, 8, SBGGR8_1X8, 1, T_L8, SBGGR8),
+ /* RAW 10 */
+ TEGRA210_VIDEO_FMT(RAW10, 10, SRGGB10_1X10, 2, T_R16_I, SRGGB10),
+ TEGRA210_VIDEO_FMT(RAW10, 10, SGRBG10_1X10, 2, T_R16_I, SGRBG10),
+ TEGRA210_VIDEO_FMT(RAW10, 10, SGBRG10_1X10, 2, T_R16_I, SGBRG10),
+ TEGRA210_VIDEO_FMT(RAW10, 10, SBGGR10_1X10, 2, T_R16_I, SBGGR10),
+ /* RAW 12 */
+ TEGRA210_VIDEO_FMT(RAW12, 12, SRGGB12_1X12, 2, T_R16_I, SRGGB12),
+ TEGRA210_VIDEO_FMT(RAW12, 12, SGRBG12_1X12, 2, T_R16_I, SGRBG12),
+ TEGRA210_VIDEO_FMT(RAW12, 12, SGBRG12_1X12, 2, T_R16_I, SGBRG12),
+ TEGRA210_VIDEO_FMT(RAW12, 12, SBGGR12_1X12, 2, T_R16_I, SBGGR12),
+ /* RGB888 */
+ TEGRA210_VIDEO_FMT(RGB888, 24, RGB888_1X24, 4, T_A8R8G8B8, RGB24),
+ TEGRA210_VIDEO_FMT(RGB888, 24, RGB888_1X32_PADHI, 4, T_A8B8G8R8,
+ XBGR32),
+ /* YUV422 */
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_1X16, 2, T_U8_Y8__V8_Y8, UYVY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_1X16, 2, T_V8_Y8__U8_Y8, VYUY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_1X16, 2, T_Y8_U8__Y8_V8, YUYV),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_1X16, 2, T_Y8_V8__Y8_U8, YVYU),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_1X16, 1, T_Y8__V8U8_N422, NV16),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, UYVY8_2X8, 2, T_U8_Y8__V8_Y8, UYVY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, VYUY8_2X8, 2, T_V8_Y8__U8_Y8, VYUY),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YUYV8_2X8, 2, T_Y8_U8__Y8_V8, YUYV),
+ TEGRA210_VIDEO_FMT(YUV422_8, 16, YVYU8_2X8, 2, T_Y8_V8__Y8_U8, YVYU),
+};
+
+/* Tegra210 VI operations */
+static const struct tegra_vi_ops tegra210_vi_ops = {
+ .vi_start_streaming = tegra210_vi_start_streaming,
+ .vi_stop_streaming = tegra210_vi_stop_streaming,
+};
+
+/* Tegra210 VI SoC data */
+const struct tegra_vi_soc tegra210_vi_soc = {
+ .video_formats = tegra210_video_formats,
+ .nformats = ARRAY_SIZE(tegra210_video_formats),
+ .ops = &tegra210_vi_ops,
+ .hw_revision = 3,
+ .vi_max_channels = 6,
+ .vi_max_clk_hz = 499200000,
+};
+
+/* Tegra210 CSI PHY registers accessors */
+static void csi_write(struct tegra_csi *csi, u8 portno, unsigned int addr,
+ u32 val)
+{
+ void __iomem *csi_pp_base;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+
+ writel_relaxed(val, csi_pp_base + addr);
+}
+
+/* Tegra210 CSI Pixel parser registers accessors */
+static void pp_write(struct tegra_csi *csi, u8 portno, u32 addr, u32 val)
+{
+ void __iomem *csi_pp_base;
+ unsigned int offset;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ writel_relaxed(val, csi_pp_base + offset + addr);
+}
+
+static u32 pp_read(struct tegra_csi *csi, u8 portno, u32 addr)
+{
+ void __iomem *csi_pp_base;
+ unsigned int offset;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ return readl_relaxed(csi_pp_base + offset + addr);
+}
+
+/* Tegra210 CSI CIL A/B port registers accessors */
+static void cil_write(struct tegra_csi *csi, u8 portno, u32 addr, u32 val)
+{
+ void __iomem *csi_cil_base;
+ unsigned int offset;
+
+ csi_cil_base = csi->iomem + CSI_PP_OFFSET(portno >> 1) +
+ TEGRA210_CSI_CIL_OFFSET;
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ writel_relaxed(val, csi_cil_base + offset + addr);
+}
+
+static u32 cil_read(struct tegra_csi *csi, u8 portno, u32 addr)
+{
+ void __iomem *csi_cil_base;
+ unsigned int offset;
+
+ csi_cil_base = csi->iomem + CSI_PP_OFFSET(portno >> 1) +
+ TEGRA210_CSI_CIL_OFFSET;
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET;
+
+ return readl_relaxed(csi_cil_base + offset + addr);
+}
+
+/* Tegra210 CSI Test pattern generator registers accessor */
+static void tpg_write(struct tegra_csi *csi, u8 portno, unsigned int addr,
+ u32 val)
+{
+ void __iomem *csi_pp_base;
+ unsigned int offset;
+
+ csi_pp_base = csi->iomem + CSI_PP_OFFSET(portno >> 1);
+ offset = (portno % CSI_PORTS_PER_BRICK) * TEGRA210_CSI_PORT_OFFSET +
+ TEGRA210_CSI_TPG_OFFSET;
+
+ writel_relaxed(val, csi_pp_base + offset + addr);
+}
+
+/*
+ * Tegra210 CSI operations
+ */
+static void tegra210_csi_error_recover(struct tegra_csi_channel *csi_chan)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ unsigned int portno = csi_chan->csi_port_num;
+ u32 val;
+
+ /*
+ * Recover CSI hardware in case of capture errors by issuing
+ * software reset to CSICIL sensor, pixel parser, and clear errors
+ * to have clean capture on next streaming.
+ */
+ val = pp_read(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_PIXEL_PARSER_STATUS 0x%08x\n", val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CIL_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CIL_STATUS 0x%08x\n", val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CILX_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CILX_STATUS 0x%08x\n", val);
+
+ if (csi_chan->numlanes == 4) {
+ /* reset CSI CIL sensor */
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
+ /*
+ * SW_STATUS_RESET resets all status bits of PPA, PPB, CILA,
+ * CILB status registers and debug counters.
+ * So, SW_STATUS_RESET can be used only when CSI brick is in
+ * x4 mode.
+ */
+ csi_write(csi, portno, TEGRA_CSI_CSI_SW_STATUS_RESET, 0x1);
+
+ /* sleep for 20 clock cycles to drain the FIFO */
+ usleep_range(10, 20);
+
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
+ csi_write(csi, portno, TEGRA_CSI_CSI_SW_STATUS_RESET, 0x0);
+ } else {
+ /* reset CSICIL sensor */
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x1);
+ usleep_range(10, 20);
+ cil_write(csi, portno, TEGRA_CSI_CIL_SW_SENSOR_RESET, 0x0);
+
+ /* clear the errors */
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS,
+ 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, 0xffffffff);
+ }
+}
+
+static int tegra210_csi_start_streaming(struct tegra_csi_channel *csi_chan)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ unsigned int portno = csi_chan->csi_port_num;
+ u32 val;
+
+ csi_write(csi, portno, TEGRA_CSI_CLKEN_OVERRIDE, 0);
+
+ /* clean up status */
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, 0xffffffff);
+ cil_write(csi, portno, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
+
+ /* CIL PHY registers setup */
+ cil_write(csi, portno, TEGRA_CSI_CIL_PAD_CONFIG0, 0x0);
+ cil_write(csi, portno, TEGRA_CSI_CIL_PHY_CONTROL, 0xa);
+
+ /*
+ * The CSI unit provides for connection of up to six cameras in
+ * the system and is organized as three identical instances of
+ * two MIPI support blocks, each with a separate 4-lane
+ * interface that can be configured as a single camera with 4
+ * lanes or as a dual camera with 2 lanes available for each
+ * camera.
+ */
+ if (csi_chan->numlanes == 4) {
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_STATUS, 0xffffffff);
+ cil_write(csi, portno + 1, TEGRA_CSI_CILX_STATUS, 0xffffffff);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
+
+ cil_write(csi, portno, TEGRA_CSI_CIL_PAD_CONFIG0,
+ BRICK_CLOCK_A_4X);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_PAD_CONFIG0, 0x0);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_INTERRUPT_MASK, 0x0);
+ cil_write(csi, portno + 1, TEGRA_CSI_CIL_PHY_CONTROL, 0xa);
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND,
+ CSI_A_PHY_CIL_ENABLE | CSI_B_PHY_CIL_ENABLE);
+ } else {
+ val = ((portno & 1) == PORT_A) ?
+ CSI_A_PHY_CIL_ENABLE | CSI_B_PHY_CIL_NOP :
+ CSI_B_PHY_CIL_ENABLE | CSI_A_PHY_CIL_NOP;
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND, val);
+ }
+
+ /* CSI pixel parser registers setup */
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
+ (0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
+ CSI_PP_SINGLE_SHOT_ENABLE | CSI_PP_RST);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_INTERRUPT_MASK, 0x0);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_CONTROL0,
+ CSI_PP_PACKET_HEADER_SENT |
+ CSI_PP_DATA_IDENTIFIER_ENABLE |
+ CSI_PP_WORD_COUNT_SELECT_HEADER |
+ CSI_PP_CRC_CHECK_ENABLE | CSI_PP_WC_CHECK |
+ CSI_PP_OUTPUT_FORMAT_STORE | CSI_PPA_PAD_LINE_NOPAD |
+ CSI_PP_HEADER_EC_DISABLE | CSI_PPA_PAD_FRAME_NOPAD |
+ (portno & 1));
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_CONTROL1,
+ (0x1 << CSI_PP_TOP_FIELD_FRAME_OFFSET) |
+ (0x1 << CSI_PP_TOP_FIELD_FRAME_MASK_OFFSET));
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_GAP,
+ 0x14 << PP_FRAME_MIN_GAP_OFFSET);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_EXPECTED_FRAME, 0x0);
+ pp_write(csi, portno, TEGRA_CSI_INPUT_STREAM_CONTROL,
+ (0x3f << CSI_SKIP_PACKET_THRESHOLD_OFFSET) |
+ (csi_chan->numlanes - 1));
+
+ /* TPG setup */
+ if (csi_chan->pg_mode) {
+ tpg_write(csi, portno, TEGRA_CSI_PATTERN_GENERATOR_CTRL,
+ ((csi_chan->pg_mode - 1) << PG_MODE_OFFSET) |
+ PG_ENABLE);
+ tpg_write(csi, portno, TEGRA_CSI_PG_BLANK,
+ csi_chan->v_blank << PG_VBLANK_OFFSET |
+ csi_chan->h_blank);
+ tpg_write(csi, portno, TEGRA_CSI_PG_PHASE, 0x0);
+ tpg_write(csi, portno, TEGRA_CSI_PG_RED_FREQ,
+ (0x10 << PG_RED_VERT_INIT_FREQ_OFFSET) |
+ (0x10 << PG_RED_HOR_INIT_FREQ_OFFSET));
+ tpg_write(csi, portno, TEGRA_CSI_PG_RED_FREQ_RATE, 0x0);
+ tpg_write(csi, portno, TEGRA_CSI_PG_GREEN_FREQ,
+ (0x10 << PG_GREEN_VERT_INIT_FREQ_OFFSET) |
+ (0x10 << PG_GREEN_HOR_INIT_FREQ_OFFSET));
+ tpg_write(csi, portno, TEGRA_CSI_PG_GREEN_FREQ_RATE, 0x0);
+ tpg_write(csi, portno, TEGRA_CSI_PG_BLUE_FREQ,
+ (0x10 << PG_BLUE_VERT_INIT_FREQ_OFFSET) |
+ (0x10 << PG_BLUE_HOR_INIT_FREQ_OFFSET));
+ tpg_write(csi, portno, TEGRA_CSI_PG_BLUE_FREQ_RATE, 0x0);
+ }
+
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
+ (0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
+ CSI_PP_SINGLE_SHOT_ENABLE | CSI_PP_ENABLE);
+
+ return 0;
+}
+
+static void tegra210_csi_stop_streaming(struct tegra_csi_channel *csi_chan)
+{
+ struct tegra_csi *csi = csi_chan->csi;
+ unsigned int portno = csi_chan->csi_port_num;
+ u32 val;
+
+ val = pp_read(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS);
+
+ dev_dbg(csi->dev, "TEGRA_CSI_PIXEL_PARSER_STATUS 0x%08x\n", val);
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_PARSER_STATUS, val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CIL_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CIL_STATUS 0x%08x\n", val);
+ cil_write(csi, portno, TEGRA_CSI_CIL_STATUS, val);
+
+ val = cil_read(csi, portno, TEGRA_CSI_CILX_STATUS);
+ dev_dbg(csi->dev, "TEGRA_CSI_CILX_STATUS 0x%08x\n", val);
+ cil_write(csi, portno, TEGRA_CSI_CILX_STATUS, val);
+
+ pp_write(csi, portno, TEGRA_CSI_PIXEL_STREAM_PP_COMMAND,
+ (0xf << CSI_PP_START_MARKER_FRAME_MAX_OFFSET) |
+ CSI_PP_DISABLE);
+
+ if (csi_chan->pg_mode) {
+ tpg_write(csi, portno, TEGRA_CSI_PATTERN_GENERATOR_CTRL,
+ PG_DISABLE);
+ return;
+ }
+
+ if (csi_chan->numlanes == 4) {
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND,
+ CSI_A_PHY_CIL_DISABLE |
+ CSI_B_PHY_CIL_DISABLE);
+ } else {
+ val = ((portno & 1) == PORT_A) ?
+ CSI_A_PHY_CIL_DISABLE | CSI_B_PHY_CIL_NOP :
+ CSI_B_PHY_CIL_DISABLE | CSI_A_PHY_CIL_NOP;
+ csi_write(csi, portno, TEGRA_CSI_PHY_CIL_COMMAND, val);
+ }
+}
+
+/*
+ * Tegra210 CSI TPG frame rate table with horizontal and vertical
+ * blanking intervals for corresponding format and resolution.
+ * Blanking intervals are tuned values from design team for max TPG
+ * clock rate.
+ */
+static const struct tpg_framerate tegra210_tpg_frmrate_table[] = {
+ {
+ .frmsize = { 1280, 720 },
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .framerate = 120,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 1920, 1080 },
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .framerate = 60,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 3840, 2160 },
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .framerate = 20,
+ .h_blank = 8,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 1280, 720 },
+ .code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ .framerate = 60,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 1920, 1080 },
+ .code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ .framerate = 30,
+ .h_blank = 512,
+ .v_blank = 8,
+ },
+ {
+ .frmsize = { 3840, 2160 },
+ .code = MEDIA_BUS_FMT_RGB888_1X32_PADHI,
+ .framerate = 8,
+ .h_blank = 8,
+ .v_blank = 8,
+ },
+};
+
+static const char * const tegra210_csi_cil_clks[] = {
+ "csi",
+ "cilab",
+ "cilcd",
+ "cile",
+ "csi_tpg",
+};
+
+/* Tegra210 CSI operations */
+static const struct tegra_csi_ops tegra210_csi_ops = {
+ .csi_start_streaming = tegra210_csi_start_streaming,
+ .csi_stop_streaming = tegra210_csi_stop_streaming,
+ .csi_err_recover = tegra210_csi_error_recover,
+};
+
+/* Tegra210 CSI SoC data */
+const struct tegra_csi_soc tegra210_csi_soc = {
+ .ops = &tegra210_csi_ops,
+ .csi_max_channels = 6,
+ .clk_names = tegra210_csi_cil_clks,
+ .num_clks = ARRAY_SIZE(tegra210_csi_cil_clks),
+ .tpg_frmrate_table = tegra210_tpg_frmrate_table,
+ .tpg_frmrate_table_size = ARRAY_SIZE(tegra210_tpg_frmrate_table),
+};
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
new file mode 100644
index 000000000000..1b5e660155f5
--- /dev/null
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -0,0 +1,1074 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/host1x.h>
+#include <linux/lcm.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <soc/tegra/pmc.h>
+
+#include "vi.h"
+#include "video.h"
+
+#define SURFACE_ALIGN_BYTES 64
+#define MAX_CID_CONTROLS 1
+
+static const struct tegra_video_format tegra_default_format = {
+ .img_dt = TEGRA_IMAGE_DT_RAW10,
+ .bit_width = 10,
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .bpp = 2,
+ .img_fmt = TEGRA_IMAGE_FORMAT_DEF,
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+};
+
+static inline struct tegra_vi *
+host1x_client_to_vi(struct host1x_client *client)
+{
+ return container_of(client, struct tegra_vi, client);
+}
+
+static inline struct tegra_channel_buffer *
+to_tegra_channel_buffer(struct vb2_v4l2_buffer *vb)
+{
+ return container_of(vb, struct tegra_channel_buffer, buf);
+}
+
+static int tegra_get_format_idx_by_code(struct tegra_vi *vi,
+ unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < vi->soc->nformats; ++i) {
+ if (vi->soc->video_formats[i].code == code)
+ return i;
+ }
+
+ return -1;
+}
+
+static u32 tegra_get_format_fourcc_by_idx(struct tegra_vi *vi,
+ unsigned int index)
+{
+ if (index >= vi->soc->nformats)
+ return -EINVAL;
+
+ return vi->soc->video_formats[index].fourcc;
+}
+
+static const struct tegra_video_format *
+tegra_get_format_by_fourcc(struct tegra_vi *vi, u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vi->soc->nformats; ++i) {
+ if (vi->soc->video_formats[i].fourcc == fourcc)
+ return &vi->soc->video_formats[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * videobuf2 queue operations
+ */
+static int tegra_channel_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+
+ if (*nplanes)
+ return sizes[0] < chan->format.sizeimage ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = chan->format.sizeimage;
+ alloc_devs[0] = chan->vi->dev;
+
+ return 0;
+}
+
+static int tegra_channel_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct tegra_channel_buffer *buf = to_tegra_channel_buffer(vbuf);
+ unsigned long size = chan->format.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ v4l2_err(chan->video.v4l2_dev,
+ "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+ buf->chan = chan;
+ buf->addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ return 0;
+}
+
+static void tegra_channel_buffer_queue(struct vb2_buffer *vb)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct tegra_channel_buffer *buf = to_tegra_channel_buffer(vbuf);
+
+ /* put buffer into the capture queue */
+ spin_lock(&chan->start_lock);
+ list_add_tail(&buf->queue, &chan->capture);
+ spin_unlock(&chan->start_lock);
+
+ /* wait up kthread for capture */
+ wake_up_interruptible(&chan->start_wait);
+}
+
+struct v4l2_subdev *
+tegra_channel_get_remote_subdev(struct tegra_vi_channel *chan)
+{
+ struct media_pad *pad;
+ struct v4l2_subdev *subdev;
+ struct media_entity *entity;
+
+ pad = media_entity_remote_pad(&chan->pad);
+ entity = pad->entity;
+ subdev = media_entity_to_v4l2_subdev(entity);
+
+ return subdev;
+}
+
+int tegra_channel_set_stream(struct tegra_vi_channel *chan, bool on)
+{
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ /* stream CSI */
+ subdev = tegra_channel_get_remote_subdev(chan);
+ ret = v4l2_subdev_call(subdev, video, s_stream, on);
+ if (on && ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ return 0;
+}
+
+void tegra_channel_release_buffers(struct tegra_vi_channel *chan,
+ enum vb2_buffer_state state)
+{
+ struct tegra_channel_buffer *buf, *nbuf;
+
+ spin_lock(&chan->start_lock);
+ list_for_each_entry_safe(buf, nbuf, &chan->capture, queue) {
+ vb2_buffer_done(&buf->buf.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+ spin_unlock(&chan->start_lock);
+
+ spin_lock(&chan->done_lock);
+ list_for_each_entry_safe(buf, nbuf, &chan->done, queue) {
+ vb2_buffer_done(&buf->buf.vb2_buf, state);
+ list_del(&buf->queue);
+ }
+ spin_unlock(&chan->done_lock);
+}
+
+static int tegra_channel_start_streaming(struct vb2_queue *vq, u32 count)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+ int ret;
+
+ ret = pm_runtime_get_sync(chan->vi->dev);
+ if (ret < 0) {
+ dev_err(chan->vi->dev, "failed to get runtime PM: %d\n", ret);
+ pm_runtime_put_noidle(chan->vi->dev);
+ return ret;
+ }
+
+ ret = chan->vi->ops->vi_start_streaming(vq, count);
+ if (ret < 0)
+ pm_runtime_put(chan->vi->dev);
+
+ return ret;
+}
+
+static void tegra_channel_stop_streaming(struct vb2_queue *vq)
+{
+ struct tegra_vi_channel *chan = vb2_get_drv_priv(vq);
+
+ chan->vi->ops->vi_stop_streaming(vq);
+ pm_runtime_put(chan->vi->dev);
+}
+
+static const struct vb2_ops tegra_channel_queue_qops = {
+ .queue_setup = tegra_channel_queue_setup,
+ .buf_prepare = tegra_channel_buffer_prepare,
+ .buf_queue = tegra_channel_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = tegra_channel_start_streaming,
+ .stop_streaming = tegra_channel_stop_streaming,
+};
+
+/*
+ * V4L2 ioctl operations
+ */
+static int tegra_channel_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+
+ strscpy(cap->driver, "tegra-video", sizeof(cap->driver));
+ strscpy(cap->card, chan->video.name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(chan->vi->dev));
+
+ return 0;
+}
+
+static int tegra_channel_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_subdev(chan);
+ return v4l2_g_parm_cap(&chan->video, subdev, a);
+}
+
+static int tegra_channel_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+
+ subdev = tegra_channel_get_remote_subdev(chan);
+ return v4l2_s_parm_cap(&chan->video, subdev, a);
+}
+
+static int tegra_channel_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *sizes)
+{
+ int ret;
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ const struct tegra_video_format *fmtinfo;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = sizes->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, sizes->pixel_format);
+ if (!fmtinfo)
+ return -EINVAL;
+
+ fse.code = fmtinfo->code;
+
+ subdev = tegra_channel_get_remote_subdev(chan);
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return ret;
+
+ sizes->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ sizes->discrete.width = fse.max_width;
+ sizes->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int tegra_channel_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *ivals)
+{
+ int ret;
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ struct v4l2_subdev *subdev;
+ const struct tegra_video_format *fmtinfo;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = ivals->index,
+ .width = ivals->width,
+ .height = ivals->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, ivals->pixel_format);
+ if (!fmtinfo)
+ return -EINVAL;
+
+ fie.code = fmtinfo->code;
+
+ subdev = tegra_channel_get_remote_subdev(chan);
+ ret = v4l2_subdev_call(subdev, pad, enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ ivals->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ ivals->discrete.numerator = fie.interval.numerator;
+ ivals->discrete.denominator = fie.interval.denominator;
+
+ return 0;
+}
+
+static int tegra_channel_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ unsigned int index = 0, i;
+ unsigned long *fmts_bitmap = chan->tpg_fmts_bitmap;
+
+ if (f->index >= bitmap_weight(fmts_bitmap, MAX_FORMAT_NUM))
+ return -EINVAL;
+
+ for (i = 0; i < f->index + 1; i++, index++)
+ index = find_next_bit(fmts_bitmap, MAX_FORMAT_NUM, index);
+
+ f->pixelformat = tegra_get_format_fourcc_by_idx(chan->vi, index - 1);
+
+ return 0;
+}
+
+static int tegra_channel_get_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+
+ format->fmt.pix = chan->format;
+
+ return 0;
+}
+
+static void tegra_channel_fmt_align(struct tegra_vi_channel *chan,
+ struct v4l2_pix_format *pix,
+ unsigned int bpp)
+{
+ unsigned int align;
+ unsigned int min_width;
+ unsigned int max_width;
+ unsigned int width;
+ unsigned int min_bpl;
+ unsigned int max_bpl;
+ unsigned int bpl;
+
+ /*
+ * The transfer alignment requirements are expressed in bytes. Compute
+ * minimum and maximum values, clamp the requested width and convert
+ * it back to pixels. Use bytesperline to adjust the width.
+ */
+ align = lcm(SURFACE_ALIGN_BYTES, bpp);
+ min_width = roundup(TEGRA_MIN_WIDTH, align);
+ max_width = rounddown(TEGRA_MAX_WIDTH, align);
+ width = roundup(pix->width * bpp, align);
+
+ pix->width = clamp(width, min_width, max_width) / bpp;
+ pix->height = clamp(pix->height, TEGRA_MIN_HEIGHT, TEGRA_MAX_HEIGHT);
+
+ /* Clamp the requested bytes per line value. If the maximum bytes per
+ * line value is zero, the module doesn't support user configurable
+ * line sizes. Override the requested value with the minimum in that
+ * case.
+ */
+ min_bpl = pix->width * bpp;
+ max_bpl = rounddown(TEGRA_MAX_WIDTH, SURFACE_ALIGN_BYTES);
+ bpl = roundup(pix->bytesperline, SURFACE_ALIGN_BYTES);
+
+ pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
+ pix->sizeimage = pix->bytesperline * pix->height;
+}
+
+static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
+ struct v4l2_pix_format *pix)
+{
+ const struct tegra_video_format *fmtinfo;
+ struct v4l2_subdev *subdev;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev_pad_config *pad_cfg;
+
+ subdev = tegra_channel_get_remote_subdev(chan);
+ pad_cfg = v4l2_subdev_alloc_pad_config(subdev);
+ if (!pad_cfg)
+ return -ENOMEM;
+ /*
+ * Retrieve the format information and if requested format isn't
+ * supported, keep the current format.
+ */
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, pix->pixelformat);
+ if (!fmtinfo) {
+ pix->pixelformat = chan->format.pixelformat;
+ pix->colorspace = chan->format.colorspace;
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi,
+ pix->pixelformat);
+ }
+
+ pix->field = V4L2_FIELD_NONE;
+ fmt.which = V4L2_SUBDEV_FORMAT_TRY;
+ fmt.pad = 0;
+ v4l2_fill_mbus_format(&fmt.format, pix, fmtinfo->code);
+ v4l2_subdev_call(subdev, pad, set_fmt, pad_cfg, &fmt);
+ v4l2_fill_pix_format(pix, &fmt.format);
+ tegra_channel_fmt_align(chan, pix, fmtinfo->bpp);
+
+ v4l2_subdev_free_pad_config(pad_cfg);
+
+ return 0;
+}
+
+static int tegra_channel_try_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+
+ return __tegra_channel_try_format(chan, &format->fmt.pix);
+}
+
+static int tegra_channel_set_format(struct file *file, void *fh,
+ struct v4l2_format *format)
+{
+ struct tegra_vi_channel *chan = video_drvdata(file);
+ const struct tegra_video_format *fmtinfo;
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ struct v4l2_pix_format *pix = &format->fmt.pix;
+ int ret;
+
+ if (vb2_is_busy(&chan->queue))
+ return -EBUSY;
+
+ /* get supported format by try_fmt */
+ ret = __tegra_channel_try_format(chan, pix);
+ if (ret)
+ return ret;
+
+ fmtinfo = tegra_get_format_by_fourcc(chan->vi, pix->pixelformat);
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.pad = 0;
+ v4l2_fill_mbus_format(&fmt.format, pix, fmtinfo->code);
+ subdev = tegra_channel_get_remote_subdev(chan);
+ v4l2_subdev_call(subdev, pad, set_fmt, NULL, &fmt);
+ v4l2_fill_pix_format(pix, &fmt.format);
+ tegra_channel_fmt_align(chan, pix, fmtinfo->bpp);
+
+ chan->format = *pix;
+ chan->fmtinfo = fmtinfo;
+
+ return 0;
+}
+
+static int tegra_channel_enum_input(struct file *file, void *fh,
+ struct v4l2_input *inp)
+{
+ /* currently driver supports internal TPG only */
+ if (inp->index)
+ return -EINVAL;
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ strscpy(inp->name, "Tegra TPG", sizeof(inp->name));
+
+ return 0;
+}
+
+static int tegra_channel_g_input(struct file *file, void *priv,
+ unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int tegra_channel_s_input(struct file *file, void *priv,
+ unsigned int input)
+{
+ if (input > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops tegra_channel_ioctl_ops = {
+ .vidioc_querycap = tegra_channel_querycap,
+ .vidioc_g_parm = tegra_channel_g_parm,
+ .vidioc_s_parm = tegra_channel_s_parm,
+ .vidioc_enum_framesizes = tegra_channel_enum_framesizes,
+ .vidioc_enum_frameintervals = tegra_channel_enum_frameintervals,
+ .vidioc_enum_fmt_vid_cap = tegra_channel_enum_format,
+ .vidioc_g_fmt_vid_cap = tegra_channel_get_format,
+ .vidioc_s_fmt_vid_cap = tegra_channel_set_format,
+ .vidioc_try_fmt_vid_cap = tegra_channel_try_format,
+ .vidioc_enum_input = tegra_channel_enum_input,
+ .vidioc_g_input = tegra_channel_g_input,
+ .vidioc_s_input = tegra_channel_s_input,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * V4L2 file operations
+ */
+static const struct v4l2_file_operations tegra_channel_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+/*
+ * V4L2 control operations
+ */
+static int vi_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct tegra_vi_channel *chan = container_of(ctrl->handler,
+ struct tegra_vi_channel,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_TEST_PATTERN:
+ /* pattern change takes effect on next stream */
+ chan->pg_mode = ctrl->val + 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vi_ctrl_ops = {
+ .s_ctrl = vi_s_ctrl,
+};
+
+static const char *const vi_pattern_strings[] = {
+ "Black/White Direct Mode",
+ "Color Patch Mode",
+};
+
+static int tegra_channel_setup_ctrl_handler(struct tegra_vi_channel *chan)
+{
+ int ret;
+
+ /* add test pattern control handler to v4l2 device */
+ v4l2_ctrl_new_std_menu_items(&chan->ctrl_handler, &vi_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(vi_pattern_strings) - 1,
+ 0, 0, vi_pattern_strings);
+ if (chan->ctrl_handler.error) {
+ dev_err(chan->vi->dev, "failed to add TPG ctrl handler: %d\n",
+ chan->ctrl_handler.error);
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ return chan->ctrl_handler.error;
+ }
+
+ /* setup the controls */
+ ret = v4l2_ctrl_handler_setup(&chan->ctrl_handler);
+ if (ret < 0) {
+ dev_err(chan->vi->dev,
+ "failed to setup v4l2 ctrl handler: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* VI only support 2 formats in TPG mode */
+static void vi_tpg_fmts_bitmap_init(struct tegra_vi_channel *chan)
+{
+ int index;
+
+ bitmap_zero(chan->tpg_fmts_bitmap, MAX_FORMAT_NUM);
+
+ index = tegra_get_format_idx_by_code(chan->vi,
+ MEDIA_BUS_FMT_SRGGB10_1X10);
+ bitmap_set(chan->tpg_fmts_bitmap, index, 1);
+
+ index = tegra_get_format_idx_by_code(chan->vi,
+ MEDIA_BUS_FMT_RGB888_1X32_PADHI);
+ bitmap_set(chan->tpg_fmts_bitmap, index, 1);
+}
+
+static void tegra_channel_cleanup(struct tegra_vi_channel *chan)
+{
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+ media_entity_cleanup(&chan->video.entity);
+ host1x_syncpt_free(chan->mw_ack_sp);
+ host1x_syncpt_free(chan->frame_start_sp);
+ mutex_destroy(&chan->video_lock);
+}
+
+void tegra_channels_cleanup(struct tegra_vi *vi)
+{
+ struct tegra_vi_channel *chan, *tmp;
+
+ if (!vi)
+ return;
+
+ list_for_each_entry_safe(chan, tmp, &vi->vi_chans, list) {
+ tegra_channel_cleanup(chan);
+ list_del(&chan->list);
+ kfree(chan);
+ }
+}
+
+static int tegra_channel_init(struct tegra_vi_channel *chan)
+{
+ struct tegra_vi *vi = chan->vi;
+ struct tegra_video_device *vid = dev_get_drvdata(vi->client.host);
+ unsigned long flags = HOST1X_SYNCPT_CLIENT_MANAGED;
+ int ret;
+
+ mutex_init(&chan->video_lock);
+ INIT_LIST_HEAD(&chan->capture);
+ INIT_LIST_HEAD(&chan->done);
+ spin_lock_init(&chan->start_lock);
+ spin_lock_init(&chan->done_lock);
+ spin_lock_init(&chan->sp_incr_lock);
+ init_waitqueue_head(&chan->start_wait);
+ init_waitqueue_head(&chan->done_wait);
+
+ /* initialize the video format */
+ chan->fmtinfo = &tegra_default_format;
+ chan->format.pixelformat = chan->fmtinfo->fourcc;
+ chan->format.colorspace = V4L2_COLORSPACE_SRGB;
+ chan->format.field = V4L2_FIELD_NONE;
+ chan->format.width = TEGRA_DEF_WIDTH;
+ chan->format.height = TEGRA_DEF_HEIGHT;
+ chan->format.bytesperline = TEGRA_DEF_WIDTH * chan->fmtinfo->bpp;
+ chan->format.sizeimage = chan->format.bytesperline * TEGRA_DEF_HEIGHT;
+ tegra_channel_fmt_align(chan, &chan->format, chan->fmtinfo->bpp);
+
+ chan->frame_start_sp = host1x_syncpt_request(&vi->client, flags);
+ if (!chan->frame_start_sp) {
+ dev_err(vi->dev, "failed to request frame start syncpoint\n");
+ return -ENOMEM;
+ }
+
+ chan->mw_ack_sp = host1x_syncpt_request(&vi->client, flags);
+ if (!chan->mw_ack_sp) {
+ dev_err(vi->dev, "failed to request memory ack syncpoint\n");
+ ret = -ENOMEM;
+ goto free_fs_syncpt;
+ }
+
+ /* initialize the media entity */
+ chan->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&chan->video.entity, 1, &chan->pad);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to initialize media entity: %d\n", ret);
+ goto free_mw_ack_syncpt;
+ }
+
+ ret = v4l2_ctrl_handler_init(&chan->ctrl_handler, MAX_CID_CONTROLS);
+ if (chan->ctrl_handler.error) {
+ dev_err(vi->dev,
+ "failed to initialize v4l2 ctrl handler: %d\n", ret);
+ goto cleanup_media;
+ }
+
+ /* initialize the video_device */
+ chan->video.fops = &tegra_channel_fops;
+ chan->video.v4l2_dev = &vid->v4l2_dev;
+ chan->video.release = video_device_release_empty;
+ chan->video.queue = &chan->queue;
+ snprintf(chan->video.name, sizeof(chan->video.name), "%s-%s-%u",
+ dev_name(vi->dev), "output", chan->portno);
+ chan->video.vfl_type = VFL_TYPE_VIDEO;
+ chan->video.vfl_dir = VFL_DIR_RX;
+ chan->video.ioctl_ops = &tegra_channel_ioctl_ops;
+ chan->video.ctrl_handler = &chan->ctrl_handler;
+ chan->video.lock = &chan->video_lock;
+ chan->video.device_caps = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ video_set_drvdata(&chan->video, chan);
+
+ chan->queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ chan->queue.io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ chan->queue.lock = &chan->video_lock;
+ chan->queue.drv_priv = chan;
+ chan->queue.buf_struct_size = sizeof(struct tegra_channel_buffer);
+ chan->queue.ops = &tegra_channel_queue_qops;
+ chan->queue.mem_ops = &vb2_dma_contig_memops;
+ chan->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ chan->queue.min_buffers_needed = 2;
+ chan->queue.dev = vi->dev;
+ ret = vb2_queue_init(&chan->queue);
+ if (ret < 0) {
+ dev_err(vi->dev, "failed to initialize vb2 queue: %d\n", ret);
+ goto free_v4l2_ctrl_hdl;
+ }
+
+ return 0;
+
+free_v4l2_ctrl_hdl:
+ v4l2_ctrl_handler_free(&chan->ctrl_handler);
+cleanup_media:
+ media_entity_cleanup(&chan->video.entity);
+free_mw_ack_syncpt:
+ host1x_syncpt_free(chan->mw_ack_sp);
+free_fs_syncpt:
+ host1x_syncpt_free(chan->frame_start_sp);
+ return ret;
+}
+
+static int tegra_vi_tpg_channels_alloc(struct tegra_vi *vi)
+{
+ struct tegra_vi_channel *chan;
+ unsigned int port_num;
+ unsigned int nchannels = vi->soc->vi_max_channels;
+
+ for (port_num = 0; port_num < nchannels; port_num++) {
+ /*
+ * Do not use devm_kzalloc as memory is freed immediately
+ * when device instance is unbound but application might still
+ * be holding the device node open. Channel memory allocated
+ * with kzalloc is freed during video device release callback.
+ */
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ chan->vi = vi;
+ chan->portno = port_num;
+ list_add_tail(&chan->list, &vi->vi_chans);
+ }
+
+ return 0;
+}
+
+static int tegra_vi_channels_init(struct tegra_vi *vi)
+{
+ struct tegra_vi_channel *chan;
+ int ret;
+
+ list_for_each_entry(chan, &vi->vi_chans, list) {
+ ret = tegra_channel_init(chan);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to initialize channel-%d: %d\n",
+ chan->portno, ret);
+ goto cleanup;
+ }
+ }
+
+ return 0;
+
+cleanup:
+ list_for_each_entry_continue_reverse(chan, &vi->vi_chans, list)
+ tegra_channel_cleanup(chan);
+
+ return ret;
+}
+
+void tegra_v4l2_nodes_cleanup_tpg(struct tegra_video_device *vid)
+{
+ struct tegra_vi *vi = vid->vi;
+ struct tegra_csi *csi = vid->csi;
+ struct tegra_csi_channel *csi_chan;
+ struct tegra_vi_channel *chan;
+
+ list_for_each_entry(chan, &vi->vi_chans, list) {
+ video_unregister_device(&chan->video);
+ mutex_lock(&chan->video_lock);
+ vb2_queue_release(&chan->queue);
+ mutex_unlock(&chan->video_lock);
+ }
+
+ list_for_each_entry(csi_chan, &csi->csi_chans, list)
+ v4l2_device_unregister_subdev(&csi_chan->subdev);
+}
+
+int tegra_v4l2_nodes_setup_tpg(struct tegra_video_device *vid)
+{
+ struct tegra_vi *vi = vid->vi;
+ struct tegra_csi *csi = vid->csi;
+ struct tegra_vi_channel *vi_chan;
+ struct tegra_csi_channel *csi_chan;
+ u32 link_flags = MEDIA_LNK_FL_ENABLED;
+ int ret;
+
+ if (!vi || !csi)
+ return -ENODEV;
+
+ csi_chan = list_first_entry(&csi->csi_chans,
+ struct tegra_csi_channel, list);
+
+ list_for_each_entry(vi_chan, &vi->vi_chans, list) {
+ struct media_entity *source = &csi_chan->subdev.entity;
+ struct media_entity *sink = &vi_chan->video.entity;
+ struct media_pad *source_pad = csi_chan->pads;
+ struct media_pad *sink_pad = &vi_chan->pad;
+
+ ret = v4l2_device_register_subdev(&vid->v4l2_dev,
+ &csi_chan->subdev);
+ if (ret) {
+ dev_err(vi->dev,
+ "failed to register subdev: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = video_register_device(&vi_chan->video,
+ VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to register video device: %d\n", ret);
+ goto cleanup;
+ }
+
+ dev_dbg(vi->dev, "creating %s:%u -> %s:%u link\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index);
+
+ ret = media_create_pad_link(source, source_pad->index,
+ sink, sink_pad->index,
+ link_flags);
+ if (ret < 0) {
+ dev_err(vi->dev,
+ "failed to create %s:%u -> %s:%u link: %d\n",
+ source->name, source_pad->index,
+ sink->name, sink_pad->index, ret);
+ goto cleanup;
+ }
+
+ ret = tegra_channel_setup_ctrl_handler(vi_chan);
+ if (ret < 0)
+ goto cleanup;
+
+ v4l2_set_subdev_hostdata(&csi_chan->subdev, vi_chan);
+ vi_tpg_fmts_bitmap_init(vi_chan);
+ csi_chan = list_next_entry(csi_chan, list);
+ }
+
+ return 0;
+
+cleanup:
+ tegra_v4l2_nodes_cleanup_tpg(vid);
+ return ret;
+}
+
+static int __maybe_unused vi_runtime_resume(struct device *dev)
+{
+ struct tegra_vi *vi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_enable(vi->vdd);
+ if (ret) {
+ dev_err(dev, "failed to enable VDD supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_set_rate(vi->clk, vi->soc->vi_max_clk_hz);
+ if (ret) {
+ dev_err(dev, "failed to set vi clock rate: %d\n", ret);
+ goto disable_vdd;
+ }
+
+ ret = clk_prepare_enable(vi->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable vi clock: %d\n", ret);
+ goto disable_vdd;
+ }
+
+ return 0;
+
+disable_vdd:
+ regulator_disable(vi->vdd);
+ return ret;
+}
+
+static int __maybe_unused vi_runtime_suspend(struct device *dev)
+{
+ struct tegra_vi *vi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(vi->clk);
+
+ regulator_disable(vi->vdd);
+
+ return 0;
+}
+
+static int tegra_vi_init(struct host1x_client *client)
+{
+ struct tegra_video_device *vid = dev_get_drvdata(client->host);
+ struct tegra_vi *vi = host1x_client_to_vi(client);
+ struct tegra_vi_channel *chan, *tmp;
+ int ret;
+
+ vid->media_dev.hw_revision = vi->soc->hw_revision;
+ snprintf(vid->media_dev.bus_info, sizeof(vid->media_dev.bus_info),
+ "platform:%s", dev_name(vi->dev));
+
+ INIT_LIST_HEAD(&vi->vi_chans);
+
+ ret = tegra_vi_tpg_channels_alloc(vi);
+ if (ret < 0) {
+ dev_err(vi->dev, "failed to allocate tpg channels: %d\n", ret);
+ goto free_chans;
+ }
+
+ ret = tegra_vi_channels_init(vi);
+ if (ret < 0)
+ goto free_chans;
+
+ vid->vi = vi;
+
+ return 0;
+
+free_chans:
+ list_for_each_entry_safe(chan, tmp, &vi->vi_chans, list) {
+ list_del(&chan->list);
+ kfree(chan);
+ }
+
+ return ret;
+}
+
+static int tegra_vi_exit(struct host1x_client *client)
+{
+ /*
+ * Do not cleanup the channels here as application might still be
+ * holding video device nodes. Channels cleanup will happen during
+ * v4l2_device release callback which gets called after all video
+ * device nodes are released.
+ */
+
+ return 0;
+}
+
+static const struct host1x_client_ops vi_client_ops = {
+ .init = tegra_vi_init,
+ .exit = tegra_vi_exit,
+};
+
+static int tegra_vi_probe(struct platform_device *pdev)
+{
+ struct tegra_vi *vi;
+ int ret;
+
+ vi = devm_kzalloc(&pdev->dev, sizeof(*vi), GFP_KERNEL);
+ if (!vi)
+ return -ENOMEM;
+
+ vi->iomem = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(vi->iomem))
+ return PTR_ERR(vi->iomem);
+
+ vi->soc = of_device_get_match_data(&pdev->dev);
+
+ vi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(vi->clk)) {
+ ret = PTR_ERR(vi->clk);
+ dev_err(&pdev->dev, "failed to get vi clock: %d\n", ret);
+ return ret;
+ }
+
+ vi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
+ if (IS_ERR(vi->vdd)) {
+ ret = PTR_ERR(vi->vdd);
+ dev_err(&pdev->dev, "failed to get VDD supply: %d\n", ret);
+ return ret;
+ }
+
+ if (!pdev->dev.pm_domain) {
+ ret = -ENOENT;
+ dev_warn(&pdev->dev, "PM domain is not attached: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to populate vi child device: %d\n", ret);
+ return ret;
+ }
+
+ vi->dev = &pdev->dev;
+ vi->ops = vi->soc->ops;
+ platform_set_drvdata(pdev, vi);
+ pm_runtime_enable(&pdev->dev);
+
+ /* initialize host1x interface */
+ INIT_LIST_HEAD(&vi->client.list);
+ vi->client.ops = &vi_client_ops;
+ vi->client.dev = &pdev->dev;
+
+ ret = host1x_client_register(&vi->client);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to register host1x client: %d\n", ret);
+ goto rpm_disable;
+ }
+
+ return 0;
+
+rpm_disable:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int tegra_vi_remove(struct platform_device *pdev)
+{
+ struct tegra_vi *vi = platform_get_drvdata(pdev);
+ int err;
+
+ err = host1x_client_unregister(&vi->client);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to unregister host1x client: %d\n", err);
+ return err;
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_vi_of_id_table[] = {
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-vi", .data = &tegra210_vi_soc },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_vi_of_id_table);
+
+static const struct dev_pm_ops tegra_vi_pm_ops = {
+ SET_RUNTIME_PM_OPS(vi_runtime_suspend, vi_runtime_resume, NULL)
+};
+
+struct platform_driver tegra_vi_driver = {
+ .driver = {
+ .name = "tegra-vi",
+ .of_match_table = tegra_vi_of_id_table,
+ .pm = &tegra_vi_pm_ops,
+ },
+ .probe = tegra_vi_probe,
+ .remove = tegra_vi_remove,
+};
diff --git a/drivers/staging/media/tegra-video/vi.h b/drivers/staging/media/tegra-video/vi.h
new file mode 100644
index 000000000000..6272c9a61809
--- /dev/null
+++ b/drivers/staging/media/tegra-video/vi.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __TEGRA_VI_H__
+#define __TEGRA_VI_H__
+
+#include <linux/host1x.h>
+#include <linux/list.h>
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-v4l2.h>
+
+#define TEGRA_MIN_WIDTH 32U
+#define TEGRA_MAX_WIDTH 32768U
+#define TEGRA_MIN_HEIGHT 32U
+#define TEGRA_MAX_HEIGHT 32768U
+
+#define TEGRA_DEF_WIDTH 1920
+#define TEGRA_DEF_HEIGHT 1080
+#define TEGRA_IMAGE_FORMAT_DEF 32
+
+#define MAX_FORMAT_NUM 64
+
+enum tegra_vi_pg_mode {
+ TEGRA_VI_PG_DISABLED = 0,
+ TEGRA_VI_PG_DIRECT,
+ TEGRA_VI_PG_PATCH,
+};
+
+/**
+ * struct tegra_vi_ops - Tegra VI operations
+ * @vi_start_streaming: starts media pipeline, subdevice streaming, sets up
+ * VI for capture and runs capture start and capture finish
+ * kthreads for capturing frames to buffer and returns them back.
+ * @vi_stop_streaming: stops media pipeline and subdevice streaming and returns
+ * back any queued buffers.
+ */
+struct tegra_vi_ops {
+ int (*vi_start_streaming)(struct vb2_queue *vq, u32 count);
+ void (*vi_stop_streaming)(struct vb2_queue *vq);
+};
+
+/**
+ * struct tegra_vi_soc - NVIDIA Tegra Video Input SoC structure
+ *
+ * @video_formats: supported video formats
+ * @nformats: total video formats
+ * @ops: vi operations
+ * @hw_revision: VI hw_revision
+ * @vi_max_channels: supported max streaming channels
+ * @vi_max_clk_hz: VI clock max frequency
+ */
+struct tegra_vi_soc {
+ const struct tegra_video_format *video_formats;
+ const unsigned int nformats;
+ const struct tegra_vi_ops *ops;
+ u32 hw_revision;
+ unsigned int vi_max_channels;
+ unsigned int vi_max_clk_hz;
+};
+
+/**
+ * struct tegra_vi - NVIDIA Tegra Video Input device structure
+ *
+ * @dev: device struct
+ * @client: host1x_client struct
+ * @iomem: register base
+ * @clk: main clock for VI block
+ * @vdd: vdd regulator for VI hardware, normally it is avdd_dsi_csi
+ * @soc: pointer to SoC data structure
+ * @ops: vi operations
+ * @vi_chans: list head for VI channels
+ */
+struct tegra_vi {
+ struct device *dev;
+ struct host1x_client client;
+ void __iomem *iomem;
+ struct clk *clk;
+ struct regulator *vdd;
+ const struct tegra_vi_soc *soc;
+ const struct tegra_vi_ops *ops;
+ struct list_head vi_chans;
+};
+
+/**
+ * struct tegra_vi_channel - Tegra video channel
+ *
+ * @list: list head for this entry
+ * @video: V4L2 video device associated with the video channel
+ * @video_lock: protects the @format and @queue fields
+ * @pad: media pad for the video device entity
+ *
+ * @vi: Tegra video input device structure
+ * @frame_start_sp: host1x syncpoint pointer to synchronize programmed capture
+ * start condition with hardware frame start events through host1x
+ * syncpoint counters.
+ * @mw_ack_sp: host1x syncpoint pointer to synchronize programmed memory write
+ * ack trigger condition with hardware memory write done at end of
+ * frame through host1x syncpoint counters.
+ * @sp_incr_lock: protects cpu syncpoint increment.
+ *
+ * @kthread_start_capture: kthread to start capture of single frame when
+ * vb buffer is available. This thread programs VI CSI hardware
+ * for single frame capture and waits for frame start event from
+ * the hardware. On receiving frame start event, it wakes up
+ * kthread_finish_capture thread to wait for finishing frame data
+ * write to the memory. In case of missing frame start event, this
+ * thread returns buffer back to vb with VB2_BUF_STATE_ERROR.
+ * @start_wait: waitqueue for starting frame capture when buffer is available.
+ * @kthread_finish_capture: kthread to finish the buffer capture and return to.
+ * This thread is woken up by kthread_start_capture on receiving
+ * frame start event from the hardware and this thread waits for
+ * MW_ACK_DONE event which indicates completion of writing frame
+ * data to the memory. On receiving MW_ACK_DONE event, buffer is
+ * returned back to vb with VB2_BUF_STATE_DONE and in case of
+ * missing MW_ACK_DONE event, buffer is returned back to vb with
+ * VB2_BUF_STATE_ERROR.
+ * @done_wait: waitqueue for finishing capture data writes to memory.
+ *
+ * @format: active V4L2 pixel format
+ * @fmtinfo: format information corresponding to the active @format
+ * @queue: vb2 buffers queue
+ * @sequence: V4L2 buffers sequence number
+ *
+ * @capture: list of queued buffers for capture
+ * @start_lock: protects the capture queued list
+ * @done: list of capture done queued buffers
+ * @done_lock: protects the capture done queue list
+ *
+ * @portno: VI channel port number
+ *
+ * @ctrl_handler: V4L2 control handler of this video channel
+ * @tpg_fmts_bitmap: a bitmap for supported TPG formats
+ * @pg_mode: test pattern generator mode (disabled/direct/patch)
+ */
+struct tegra_vi_channel {
+ struct list_head list;
+ struct video_device video;
+ /* protects the @format and @queue fields */
+ struct mutex video_lock;
+ struct media_pad pad;
+
+ struct tegra_vi *vi;
+ struct host1x_syncpt *frame_start_sp;
+ struct host1x_syncpt *mw_ack_sp;
+ /* protects the cpu syncpoint increment */
+ spinlock_t sp_incr_lock;
+
+ struct task_struct *kthread_start_capture;
+ wait_queue_head_t start_wait;
+ struct task_struct *kthread_finish_capture;
+ wait_queue_head_t done_wait;
+
+ struct v4l2_pix_format format;
+ const struct tegra_video_format *fmtinfo;
+ struct vb2_queue queue;
+ u32 sequence;
+
+ struct list_head capture;
+ /* protects the capture queued list */
+ spinlock_t start_lock;
+ struct list_head done;
+ /* protects the capture done queue list */
+ spinlock_t done_lock;
+
+ unsigned char portno;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ DECLARE_BITMAP(tpg_fmts_bitmap, MAX_FORMAT_NUM);
+ enum tegra_vi_pg_mode pg_mode;
+};
+
+/**
+ * struct tegra_channel_buffer - video channel buffer
+ *
+ * @buf: vb2 buffer base object
+ * @queue: buffer list entry in the channel queued buffers list
+ * @chan: channel that uses the buffer
+ * @addr: Tegra IOVA buffer address for VI output
+ * @mw_ack_sp_thresh: MW_ACK_DONE syncpoint threshold corresponding
+ * to the capture buffer.
+ */
+struct tegra_channel_buffer {
+ struct vb2_v4l2_buffer buf;
+ struct list_head queue;
+ struct tegra_vi_channel *chan;
+ dma_addr_t addr;
+ u32 mw_ack_sp_thresh;
+};
+
+/*
+ * VI channel input data type enum.
+ * These data type enum value gets programmed into corresponding Tegra VI
+ * channel register bits.
+ */
+enum tegra_image_dt {
+ TEGRA_IMAGE_DT_YUV420_8 = 24,
+ TEGRA_IMAGE_DT_YUV420_10,
+
+ TEGRA_IMAGE_DT_YUV420CSPS_8 = 28,
+ TEGRA_IMAGE_DT_YUV420CSPS_10,
+ TEGRA_IMAGE_DT_YUV422_8,
+ TEGRA_IMAGE_DT_YUV422_10,
+ TEGRA_IMAGE_DT_RGB444,
+ TEGRA_IMAGE_DT_RGB555,
+ TEGRA_IMAGE_DT_RGB565,
+ TEGRA_IMAGE_DT_RGB666,
+ TEGRA_IMAGE_DT_RGB888,
+
+ TEGRA_IMAGE_DT_RAW6 = 40,
+ TEGRA_IMAGE_DT_RAW7,
+ TEGRA_IMAGE_DT_RAW8,
+ TEGRA_IMAGE_DT_RAW10,
+ TEGRA_IMAGE_DT_RAW12,
+ TEGRA_IMAGE_DT_RAW14,
+};
+
+/**
+ * struct tegra_video_format - Tegra video format description
+ *
+ * @img_dt: image data type
+ * @bit_width: format width in bits per component
+ * @code: media bus format code
+ * @bpp: bytes per pixel (when stored in memory)
+ * @img_fmt: image format
+ * @fourcc: V4L2 pixel format FCC identifier
+ */
+struct tegra_video_format {
+ enum tegra_image_dt img_dt;
+ unsigned int bit_width;
+ unsigned int code;
+ unsigned int bpp;
+ u32 img_fmt;
+ u32 fourcc;
+};
+
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_vi_soc tegra210_vi_soc;
+#endif
+
+struct v4l2_subdev *
+tegra_channel_get_remote_subdev(struct tegra_vi_channel *chan);
+int tegra_channel_set_stream(struct tegra_vi_channel *chan, bool on);
+void tegra_channel_release_buffers(struct tegra_vi_channel *chan,
+ enum vb2_buffer_state state);
+void tegra_channels_cleanup(struct tegra_vi *vi);
+#endif
diff --git a/drivers/staging/media/tegra-video/video.c b/drivers/staging/media/tegra-video/video.c
new file mode 100644
index 000000000000..30816aa41e81
--- /dev/null
+++ b/drivers/staging/media/tegra-video/video.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "video.h"
+
+static void tegra_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+{
+ struct tegra_video_device *vid;
+
+ vid = container_of(v4l2_dev, struct tegra_video_device, v4l2_dev);
+
+ /* cleanup channels here as all video device nodes are released */
+ tegra_channels_cleanup(vid->vi);
+
+ v4l2_device_unregister(v4l2_dev);
+ media_device_unregister(&vid->media_dev);
+ media_device_cleanup(&vid->media_dev);
+ kfree(vid);
+}
+
+static int host1x_video_probe(struct host1x_device *dev)
+{
+ struct tegra_video_device *vid;
+ int ret;
+
+ vid = kzalloc(sizeof(*vid), GFP_KERNEL);
+ if (!vid)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, vid);
+
+ vid->media_dev.dev = &dev->dev;
+ strscpy(vid->media_dev.model, "NVIDIA Tegra Video Input Device",
+ sizeof(vid->media_dev.model));
+
+ media_device_init(&vid->media_dev);
+ ret = media_device_register(&vid->media_dev);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "failed to register media device: %d\n", ret);
+ goto cleanup;
+ }
+
+ vid->v4l2_dev.mdev = &vid->media_dev;
+ vid->v4l2_dev.release = tegra_v4l2_dev_release;
+ ret = v4l2_device_register(&dev->dev, &vid->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "V4L2 device registration failed: %d\n", ret);
+ goto unregister_media;
+ }
+
+ ret = host1x_device_init(dev);
+ if (ret < 0)
+ goto unregister_v4l2;
+
+ /*
+ * Both vi and csi channels are available now.
+ * Register v4l2 nodes and create media links for TPG.
+ */
+ ret = tegra_v4l2_nodes_setup_tpg(vid);
+ if (ret < 0) {
+ dev_err(&dev->dev,
+ "failed to setup tpg graph: %d\n", ret);
+ goto device_exit;
+ }
+
+ return 0;
+
+device_exit:
+ host1x_device_exit(dev);
+ /* vi exit ops does not clean channels, so clean them here */
+ tegra_channels_cleanup(vid->vi);
+unregister_v4l2:
+ v4l2_device_unregister(&vid->v4l2_dev);
+unregister_media:
+ media_device_unregister(&vid->media_dev);
+cleanup:
+ media_device_cleanup(&vid->media_dev);
+ kfree(vid);
+ return ret;
+}
+
+static int host1x_video_remove(struct host1x_device *dev)
+{
+ struct tegra_video_device *vid = dev_get_drvdata(&dev->dev);
+
+ tegra_v4l2_nodes_cleanup_tpg(vid);
+
+ host1x_device_exit(dev);
+
+ /* This calls v4l2_dev release callback on last reference */
+ v4l2_device_put(&vid->v4l2_dev);
+
+ return 0;
+}
+
+static const struct of_device_id host1x_video_subdevs[] = {
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-csi", },
+ { .compatible = "nvidia,tegra210-vi", },
+#endif
+ { }
+};
+
+static struct host1x_driver host1x_video_driver = {
+ .driver = {
+ .name = "tegra-video",
+ },
+ .probe = host1x_video_probe,
+ .remove = host1x_video_remove,
+ .subdevs = host1x_video_subdevs,
+};
+
+static struct platform_driver * const drivers[] = {
+ &tegra_csi_driver,
+ &tegra_vi_driver,
+};
+
+static int __init host1x_video_init(void)
+{
+ int err;
+
+ err = host1x_driver_register(&host1x_video_driver);
+ if (err < 0)
+ return err;
+
+ err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+ if (err < 0)
+ goto unregister_host1x;
+
+ return 0;
+
+unregister_host1x:
+ host1x_driver_unregister(&host1x_video_driver);
+ return err;
+}
+module_init(host1x_video_init);
+
+static void __exit host1x_video_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+ host1x_driver_unregister(&host1x_video_driver);
+}
+module_exit(host1x_video_exit);
+
+MODULE_AUTHOR("Sowjanya Komatineni <skomatineni@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra Host1x Video driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/tegra-video/video.h b/drivers/staging/media/tegra-video/video.h
new file mode 100644
index 000000000000..fadaf2189dc9
--- /dev/null
+++ b/drivers/staging/media/tegra-video/video.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __TEGRA_VIDEO_H__
+#define __TEGRA_VIDEO_H__
+
+#include <linux/host1x.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+
+#include "vi.h"
+#include "csi.h"
+
+struct tegra_video_device {
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct tegra_vi *vi;
+ struct tegra_csi *csi;
+};
+
+int tegra_v4l2_nodes_setup_tpg(struct tegra_video_device *vid);
+void tegra_v4l2_nodes_cleanup_tpg(struct tegra_video_device *vid);
+
+extern struct platform_driver tegra_vi_driver;
+extern struct platform_driver tegra_csi_driver;
+#endif
diff --git a/drivers/staging/most/usb/Kconfig b/drivers/staging/most/usb/Kconfig
index a86f1f63def4..75dc25c0e0e5 100644
--- a/drivers/staging/most/usb/Kconfig
+++ b/drivers/staging/most/usb/Kconfig
@@ -7,7 +7,7 @@ config MOST_USB
tristate "USB"
depends on USB && NET
help
- Say Y here if you want to connect via USB to network tranceiver.
+ Say Y here if you want to connect via USB to network transceiver.
This device driver depends on the networking AIM.
To compile this driver as a module, choose M here: the
diff --git a/drivers/staging/most/usb/usb.c b/drivers/staging/most/usb/usb.c
index e8c5a8c98375..2640c5b326a4 100644
--- a/drivers/staging/most/usb/usb.c
+++ b/drivers/staging/most/usb/usb.c
@@ -5,7 +5,6 @@
* Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/usb.h>
@@ -140,9 +139,10 @@ static void wq_netinfo(struct work_struct *wq_obj);
static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
{
int retval;
- __le16 *dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
+ __le16 *dma_buf;
u8 req_type = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
if (!dma_buf)
return -ENOMEM;
@@ -153,7 +153,9 @@ static inline int drci_rd_reg(struct usb_device *dev, u16 reg, u16 *buf)
*buf = le16_to_cpu(*dma_buf);
kfree(dma_buf);
- return retval;
+ if (retval < 0)
+ return retval;
+ return 0;
}
/**
@@ -184,16 +186,18 @@ static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
/**
* get_stream_frame_size - calculate frame size of current configuration
+ * @dev: device structure
* @cfg: channel configuration
*/
-static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
+static unsigned int get_stream_frame_size(struct device *dev,
+ struct most_channel_config *cfg)
{
- unsigned int frame_size = 0;
+ unsigned int frame_size;
unsigned int sub_size = cfg->subbuffer_size;
if (!sub_size) {
- pr_warn("Misconfig: Subbuffer size zero.\n");
- return frame_size;
+ dev_warn(dev, "Misconfig: Subbuffer size zero.\n");
+ return 0;
}
switch (cfg->data_type) {
case MOST_CH_ISOC:
@@ -201,7 +205,7 @@ static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
break;
case MOST_CH_SYNC:
if (cfg->packets_per_xact == 0) {
- pr_warn("Misconfig: Packets per XACT zero\n");
+ dev_warn(dev, "Misconfig: Packets per XACT zero\n");
frame_size = 0;
} else if (cfg->packets_per_xact == 0xFF) {
frame_size = (USB_MTU / sub_size) * sub_size;
@@ -210,7 +214,8 @@ static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
}
break;
default:
- pr_warn("Query frame size of non-streaming channel\n");
+ dev_warn(dev, "Query frame size of non-streaming channel\n");
+ frame_size = 0;
break;
}
return frame_size;
@@ -233,11 +238,7 @@ static int hdm_poison_channel(struct most_interface *iface, int channel)
unsigned long flags;
spinlock_t *lock; /* temp. lock */
- if (unlikely(!iface)) {
- dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
- return -EIO;
- }
- if (unlikely(channel < 0 || channel >= iface->num_channels)) {
+ if (channel < 0 || channel >= iface->num_channels) {
dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
return -ECHRNG;
}
@@ -274,17 +275,17 @@ static int hdm_poison_channel(struct most_interface *iface, int channel)
static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
{
struct most_channel_config *conf = &mdev->conf[channel];
- unsigned int frame_size = get_stream_frame_size(conf);
+ unsigned int frame_size = get_stream_frame_size(&mdev->dev, conf);
unsigned int j, num_frames;
if (!frame_size)
- return -EIO;
+ return -EINVAL;
num_frames = mbo->buffer_length / frame_size;
if (num_frames < 1) {
dev_err(&mdev->usb_device->dev,
"Missed minimal transfer unit.\n");
- return -EIO;
+ return -EINVAL;
}
for (j = num_frames - 1; j > 0; j--)
@@ -308,11 +309,11 @@ static int hdm_remove_padding(struct most_dev *mdev, int channel,
struct mbo *mbo)
{
struct most_channel_config *const conf = &mdev->conf[channel];
- unsigned int frame_size = get_stream_frame_size(conf);
+ unsigned int frame_size = get_stream_frame_size(&mdev->dev, conf);
unsigned int j, num_frames;
if (!frame_size)
- return -EIO;
+ return -EINVAL;
num_frames = mbo->processed_length / USB_MTU;
for (j = 1; j < num_frames; j++)
@@ -386,103 +387,6 @@ static void hdm_write_completion(struct urb *urb)
* padding bytes -if necessary- and calls the completion function.
*
* Context: interrupt!
- *
- * **************************************************************************
- * Error codes returned by in urb->status
- * or in iso_frame_desc[n].status (for ISO)
- * *************************************************************************
- *
- * USB device drivers may only test urb status values in completion handlers.
- * This is because otherwise there would be a race between HCDs updating
- * these values on one CPU, and device drivers testing them on another CPU.
- *
- * A transfer's actual_length may be positive even when an error has been
- * reported. That's because transfers often involve several packets, so that
- * one or more packets could finish before an error stops further endpoint I/O.
- *
- * For isochronous URBs, the urb status value is non-zero only if the URB is
- * unlinked, the device is removed, the host controller is disabled or the total
- * transferred length is less than the requested length and the URB_SHORT_NOT_OK
- * flag is set. Completion handlers for isochronous URBs should only see
- * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
- * Individual frame descriptor status fields may report more status codes.
- *
- *
- * 0 Transfer completed successfully
- *
- * -ENOENT URB was synchronously unlinked by usb_unlink_urb
- *
- * -EINPROGRESS URB still pending, no results yet
- * (That is, if drivers see this it's a bug.)
- *
- * -EPROTO (*, **) a) bitstuff error
- * b) no response packet received within the
- * prescribed bus turn-around time
- * c) unknown USB error
- *
- * -EILSEQ (*, **) a) CRC mismatch
- * b) no response packet received within the
- * prescribed bus turn-around time
- * c) unknown USB error
- *
- * Note that often the controller hardware does not
- * distinguish among cases a), b), and c), so a
- * driver cannot tell whether there was a protocol
- * error, a failure to respond (often caused by
- * device disconnect), or some other fault.
- *
- * -ETIME (**) No response packet received within the prescribed
- * bus turn-around time. This error may instead be
- * reported as -EPROTO or -EILSEQ.
- *
- * -ETIMEDOUT Synchronous USB message functions use this code
- * to indicate timeout expired before the transfer
- * completed, and no other error was reported by HC.
- *
- * -EPIPE (**) Endpoint stalled. For non-control endpoints,
- * reset this status with usb_clear_halt().
- *
- * -ECOMM During an IN transfer, the host controller
- * received data from an endpoint faster than it
- * could be written to system memory
- *
- * -ENOSR During an OUT transfer, the host controller
- * could not retrieve data from system memory fast
- * enough to keep up with the USB data rate
- *
- * -EOVERFLOW (*) The amount of data returned by the endpoint was
- * greater than either the max packet size of the
- * endpoint or the remaining buffer size. "Babble".
- *
- * -EREMOTEIO The data read from the endpoint did not fill the
- * specified buffer, and URB_SHORT_NOT_OK was set in
- * urb->transfer_flags.
- *
- * -ENODEV Device was removed. Often preceded by a burst of
- * other errors, since the hub driver doesn't detect
- * device removal events immediately.
- *
- * -EXDEV ISO transfer only partially completed
- * (only set in iso_frame_desc[n].status, not urb->status)
- *
- * -EINVAL ISO madness, if this happens: Log off and go home
- *
- * -ECONNRESET URB was asynchronously unlinked by usb_unlink_urb
- *
- * -ESHUTDOWN The device or host controller has been disabled due
- * to some problem that could not be worked around,
- * such as a physical disconnect.
- *
- *
- * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
- * hardware problems such as bad devices (including firmware) or cables.
- *
- * (**) This is also one of several codes that different kinds of host
- * controller use to indicate a transfer has failed because of device
- * disconnect. In the interval before the hub driver starts disconnect
- * processing, devices may receive such fault reports for every request.
- *
- * See <https://www.kernel.org/doc/Documentation/driver-api/usb/error-codes.rst>
*/
static void hdm_read_completion(struct urb *urb)
{
@@ -552,36 +456,33 @@ static void hdm_read_completion(struct urb *urb)
static int hdm_enqueue(struct most_interface *iface, int channel,
struct mbo *mbo)
{
- struct most_dev *mdev;
+ struct most_dev *mdev = to_mdev(iface);
struct most_channel_config *conf;
int retval = 0;
struct urb *urb;
unsigned long length;
void *virt_address;
- if (unlikely(!iface || !mbo))
- return -EIO;
- if (unlikely(iface->num_channels <= channel || channel < 0))
+ if (!mbo)
+ return -EINVAL;
+ if (iface->num_channels <= channel || channel < 0)
return -ECHRNG;
- mdev = to_mdev(iface);
+ urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
conf = &mdev->conf[channel];
mutex_lock(&mdev->io_mutex);
if (!mdev->usb_device) {
retval = -ENODEV;
- goto unlock_io_mutex;
- }
-
- urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
- if (!urb) {
- retval = -ENOMEM;
- goto unlock_io_mutex;
+ goto err_free_urb;
}
if ((conf->direction & MOST_CH_TX) && mdev->padding_active[channel] &&
hdm_add_padding(mdev, channel, mbo)) {
- retval = -EIO;
+ retval = -EINVAL;
goto err_free_urb;
}
@@ -619,13 +520,13 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
"URB submit failed with error %d.\n", retval);
goto err_unanchor_urb;
}
- goto unlock_io_mutex;
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
err_unanchor_urb:
usb_unanchor_urb(urb);
err_free_urb:
usb_free_urb(urb);
-unlock_io_mutex:
mutex_unlock(&mdev->io_mutex);
return retval;
}
@@ -669,19 +570,20 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
struct most_dev *mdev = to_mdev(iface);
struct device *dev = &mdev->usb_device->dev;
- mdev->is_channel_healthy[channel] = true;
- mdev->clear_work[channel].channel = channel;
- mdev->clear_work[channel].mdev = mdev;
- INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
-
- if (unlikely(!iface || !conf)) {
- dev_err(dev, "Bad interface or config pointer.\n");
+ if (!conf) {
+ dev_err(dev, "Bad config pointer.\n");
return -EINVAL;
}
- if (unlikely(channel < 0 || channel >= iface->num_channels)) {
+ if (channel < 0 || channel >= iface->num_channels) {
dev_err(dev, "Channel ID out of range.\n");
return -EINVAL;
}
+
+ mdev->is_channel_healthy[channel] = true;
+ mdev->clear_work[channel].channel = channel;
+ mdev->clear_work[channel].mdev = mdev;
+ INIT_WORK(&mdev->clear_work[channel].ws, wq_clear_halt);
+
if (!conf->num_buffers || !conf->buffer_size) {
dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
return -EINVAL;
@@ -701,7 +603,7 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
mdev->padding_active[channel] = true;
- frame_size = get_stream_frame_size(conf);
+ frame_size = get_stream_frame_size(&mdev->dev, conf);
if (frame_size == 0 || frame_size > USB_MTU) {
dev_warn(dev, "Misconfig: frame size wrong\n");
return -EINVAL;
@@ -745,10 +647,8 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel,
unsigned char,
unsigned char *))
{
- struct most_dev *mdev;
+ struct most_dev *mdev = to_mdev(iface);
- BUG_ON(!iface);
- mdev = to_mdev(iface);
mdev->on_netinfo = on_netinfo;
if (!on_netinfo)
return;
@@ -787,22 +687,22 @@ static void wq_netinfo(struct work_struct *wq_obj)
u16 hi, mi, lo, link;
u8 hw_addr[6];
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi)) {
dev_err(dev, "Vendor request 'hw_addr_hi' failed\n");
return;
}
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi)) {
dev_err(dev, "Vendor request 'hw_addr_mid' failed\n");
return;
}
- if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
+ if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo)) {
dev_err(dev, "Vendor request 'hw_addr_low' failed\n");
return;
}
- if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
+ if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link)) {
dev_err(dev, "Vendor request 'link status' failed\n");
return;
}
@@ -830,6 +730,8 @@ static void wq_clear_halt(struct work_struct *wq_obj)
struct most_dev *mdev = clear_work->mdev;
unsigned int channel = clear_work->channel;
int pipe = clear_work->pipe;
+ int snd_pipe;
+ int peer;
mutex_lock(&mdev->io_mutex);
most_stop_enqueue(&mdev->iface, channel);
@@ -847,9 +749,12 @@ static void wq_clear_halt(struct work_struct *wq_obj)
*/
if (mdev->conf[channel].data_type == MOST_CH_ASYNC &&
mdev->conf[channel].direction == MOST_CH_RX) {
- int peer = 1 - channel;
- int snd_pipe = usb_sndbulkpipe(mdev->usb_device,
- mdev->ep_address[peer]);
+ if (channel == 0)
+ peer = 1;
+ else
+ peer = 0;
+ snd_pipe = usb_sndbulkpipe(mdev->usb_device,
+ mdev->ep_address[peer]);
usb_clear_halt(mdev->usb_device, snd_pipe);
}
mdev->is_channel_healthy[channel] = true;
@@ -904,12 +809,12 @@ static int get_stat_reg_addr(const struct regs *regs, int size,
int i;
for (i = 0; i < size; i++) {
- if (!strcmp(name, regs[i].name)) {
+ if (sysfs_streq(name, regs[i].name)) {
*reg_addr = regs[i].reg;
return 0;
}
}
- return -EFAULT;
+ return -EINVAL;
}
#define get_static_reg_addr(regs, name, reg_addr) \
@@ -924,14 +829,14 @@ static ssize_t value_show(struct device *dev, struct device_attribute *attr,
u16 reg_addr;
int err;
- if (!strcmp(name, "arb_address"))
+ if (sysfs_streq(name, "arb_address"))
return snprintf(buf, PAGE_SIZE, "%04x\n", dci_obj->reg_addr);
- if (!strcmp(name, "arb_value"))
+ if (sysfs_streq(name, "arb_value"))
reg_addr = dci_obj->reg_addr;
else if (get_static_reg_addr(ro_regs, name, &reg_addr) &&
get_static_reg_addr(rw_regs, name, &reg_addr))
- return -EFAULT;
+ return -EINVAL;
err = drci_rd_reg(dci_obj->usb_device, reg_addr, &val);
if (err < 0)
@@ -948,24 +853,25 @@ static ssize_t value_store(struct device *dev, struct device_attribute *attr,
const char *name = attr->attr.name;
struct most_dci_obj *dci_obj = to_dci_obj(dev);
struct usb_device *usb_dev = dci_obj->usb_device;
- int err = kstrtou16(buf, 16, &val);
+ int err;
+ err = kstrtou16(buf, 16, &val);
if (err)
return err;
- if (!strcmp(name, "arb_address")) {
+ if (sysfs_streq(name, "arb_address")) {
dci_obj->reg_addr = val;
return count;
}
- if (!strcmp(name, "arb_value"))
+ if (sysfs_streq(name, "arb_value"))
err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
- else if (!strcmp(name, "sync_ep"))
+ else if (sysfs_streq(name, "sync_ep"))
err = start_sync_ep(usb_dev, val);
else if (!get_static_reg_addr(rw_regs, name, &reg_addr))
err = drci_wr_reg(usb_dev, reg_addr, val);
else
- return -EFAULT;
+ return -EINVAL;
if (err < 0)
return err;
@@ -1008,19 +914,13 @@ static struct attribute *dci_attrs[] = {
NULL,
};
-static struct attribute_group dci_attr_group = {
- .attrs = dci_attrs,
-};
-
-static const struct attribute_group *dci_attr_groups[] = {
- &dci_attr_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(dci);
static void release_dci(struct device *dev)
{
struct most_dci_obj *dci = to_dci_obj(dev);
+ put_device(dev->parent);
kfree(dci);
}
@@ -1048,18 +948,23 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
struct usb_host_interface *usb_iface_desc = interface->cur_altsetting;
struct usb_device *usb_dev = interface_to_usbdev(interface);
struct device *dev = &usb_dev->dev;
- struct most_dev *mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ struct most_dev *mdev;
unsigned int i;
unsigned int num_endpoints;
struct most_channel_capability *tmp_cap;
struct usb_endpoint_descriptor *ep_desc;
- int ret = 0;
+ int ret = -ENOMEM;
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
- goto err_out_of_memory;
+ return -ENOMEM;
usb_set_intfdata(interface, mdev);
num_endpoints = usb_iface_desc->desc.bNumEndpoints;
+ if (num_endpoints > MAX_NUM_ENDPOINTS) {
+ kfree(mdev);
+ return -EINVAL;
+ }
mutex_init(&mdev->io_mutex);
INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
@@ -1134,17 +1039,17 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
init_usb_anchor(&mdev->busy_urbs[i]);
spin_lock_init(&mdev->channel_lock[i]);
}
- dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
- le16_to_cpu(usb_dev->descriptor.idVendor),
- le16_to_cpu(usb_dev->descriptor.idProduct),
- usb_dev->bus->busnum,
- usb_dev->devnum);
-
- dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
- usb_dev->bus->busnum,
- usb_dev->devpath,
- usb_dev->config->desc.bConfigurationValue,
- usb_iface_desc->desc.bInterfaceNumber);
+ dev_dbg(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
+ le16_to_cpu(usb_dev->descriptor.idVendor),
+ le16_to_cpu(usb_dev->descriptor.idProduct),
+ usb_dev->bus->busnum,
+ usb_dev->devnum);
+
+ dev_dbg(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
+ usb_dev->bus->busnum,
+ usb_dev->devpath,
+ usb_dev->config->desc.bConfigurationValue,
+ usb_iface_desc->desc.bInterfaceNumber);
ret = most_register_interface(&mdev->iface);
if (ret)
@@ -1164,7 +1069,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
mdev->dci->dev.init_name = "dci";
mdev->dci->dev.parent = get_device(mdev->iface.dev);
- mdev->dci->dev.groups = dci_attr_groups;
+ mdev->dci->dev.groups = dci_groups;
mdev->dci->dev.release = release_dci;
if (device_register(&mdev->dci->dev)) {
mutex_unlock(&mdev->io_mutex);
@@ -1188,11 +1093,6 @@ err_free_conf:
kfree(mdev->conf);
err_free_mdev:
put_device(&mdev->dev);
-err_out_of_memory:
- if (ret == 0 || ret == -ENOMEM) {
- ret = -ENOMEM;
- dev_err(dev, "out of memory\n");
- }
return ret;
}
@@ -1225,14 +1125,43 @@ static void hdm_disconnect(struct usb_interface *interface)
kfree(mdev->cap);
kfree(mdev->conf);
kfree(mdev->ep_address);
+ put_device(&mdev->dci->dev);
put_device(&mdev->dev);
}
+static int hdm_suspend(struct usb_interface *interface, pm_message_t message)
+{
+ struct most_dev *mdev = usb_get_intfdata(interface);
+ int i;
+
+ mutex_lock(&mdev->io_mutex);
+ for (i = 0; i < mdev->iface.num_channels; i++) {
+ most_stop_enqueue(&mdev->iface, i);
+ usb_kill_anchored_urbs(&mdev->busy_urbs[i]);
+ }
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
+}
+
+static int hdm_resume(struct usb_interface *interface)
+{
+ struct most_dev *mdev = usb_get_intfdata(interface);
+ int i;
+
+ mutex_lock(&mdev->io_mutex);
+ for (i = 0; i < mdev->iface.num_channels; i++)
+ most_resume_enqueue(&mdev->iface, i);
+ mutex_unlock(&mdev->io_mutex);
+ return 0;
+}
+
static struct usb_driver hdm_usb = {
.name = "hdm_usb",
.id_table = usbid,
.probe = hdm_probe,
.disconnect = hdm_disconnect,
+ .resume = hdm_resume,
+ .suspend = hdm_suspend,
};
module_usb_driver(hdm_usb);
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 9e5cf68731bb..82aa93634eda 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -523,11 +523,10 @@
0x01000000 0 0x00000000 0x1e160000 0 0x00010000 /* io space */
>;
- #interrupt-cells = <1>;
- interrupt-map-mask = <0xF0000 0 0 1>;
- interrupt-map = <0x10000 0 0 1 &gic GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH>,
- <0x20000 0 0 1 &gic GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>,
- <0x30000 0 0 1 &gic GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 4 IRQ_TYPE_LEVEL_HIGH
+ GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH
+ GIC_SHARED 25 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
diff --git a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt b/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt
deleted file mode 100644
index a369d715378b..000000000000
--- a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Mediatek Mt7621 PCIe PHY
-
-Required properties:
-- compatible: must be "mediatek,mt7621-pci-phy"
-- reg: base address and length of the PCIe PHY block
-- #phy-cells: must be <1> for pcie0_phy and for pcie1_phy.
-
-Example:
- pcie0_phy: pcie-phy@1e149000 {
- compatible = "mediatek,mt7621-pci-phy";
- reg = <0x1e149000 0x0700>;
- #phy-cells = <1>;
- };
-
- pcie1_phy: pcie-phy@1e14a000 {
- compatible = "mediatek,mt7621-pci-phy";
- reg = <0x1e14a000 0x0700>;
- #phy-cells = <1>;
- };
-
- /* users of the PCIe phy */
-
- pcie: pcie@1e140000 {
- ...
- ...
- phys = <&pcie0_phy 0>, <&pcie0_phy 1>, <&pcie1_phy 0>;
- phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2";
- };
diff --git a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml b/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml
new file mode 100644
index 000000000000..cf32bbc45b5d
--- /dev/null
+++ b/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/mediatek,mt7621-pci-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Mediatek Mt7621 PCIe PHY Device Tree Bindings
+
+maintainers:
+ - Sergio Paracuellos <sergio.paracuellos@gmail.com>
+
+properties:
+ compatible:
+ const: mediatek,mt7621-pci-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 1
+ description: selects if the phy is dual-ported
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ pcie0_phy: pcie-phy@1e149000 {
+ compatible = "mediatek,mt7621-pci-phy";
+ reg = <0x1e149000 0x0700>;
+ #phy-cells = <1>;
+ };
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index f58e3a51fc71..f961b353c22e 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -55,7 +55,7 @@
#define RALINK_PCI_IOBASE 0x002C
/* PCICFG virtual bridges */
-#define PCIE_P2P_MAX 3
+#define PCIE_P2P_CNT 3
#define PCIE_P2P_BR_DEVNUM_SHIFT(p) (16 + (p) * 4)
#define PCIE_P2P_BR_DEVNUM0_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(0)
#define PCIE_P2P_BR_DEVNUM1_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(1)
@@ -97,6 +97,7 @@
* @pcie_rst: pointer to port reset control
* @gpio_rst: gpio reset
* @slot: port slot
+ * @irq: GIC irq
* @enabled: indicates if port is enabled
*/
struct mt7621_pcie_port {
@@ -107,6 +108,7 @@ struct mt7621_pcie_port {
struct reset_control *pcie_rst;
struct gpio_desc *gpio_rst;
u32 slot;
+ int irq;
bool enabled;
};
@@ -120,6 +122,7 @@ struct mt7621_pcie_port {
* @dev: Pointer to PCIe device
* @io_map_base: virtual memory base address for io
* @ports: pointer to PCIe port information
+ * @irq_map: irq mapping info according pcie link status
* @resets_inverted: depends on chip revision
* reset lines are inverted.
*/
@@ -135,6 +138,7 @@ struct mt7621_pcie {
} offset;
unsigned long io_map_base;
struct list_head ports;
+ int irq_map[PCIE_P2P_CNT];
bool resets_inverted;
};
@@ -279,6 +283,16 @@ static void setup_cm_memory_region(struct mt7621_pcie *pcie)
}
}
+static int mt7621_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
+{
+ struct mt7621_pcie *pcie = pdev->bus->sysdata;
+ struct device *dev = pcie->dev;
+ int irq = pcie->irq_map[slot];
+
+ dev_info(dev, "bus=%d slot=%d irq=%d\n", pdev->bus->number, slot, irq);
+ return irq;
+}
+
static int mt7621_pci_parse_request_of_pci_ranges(struct mt7621_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -330,6 +344,7 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
{
struct mt7621_pcie_port *port;
struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *pnode = dev->of_node;
struct resource regs;
char name[10];
@@ -371,6 +386,12 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
port->slot = slot;
port->pcie = pcie;
+ port->irq = platform_get_irq(pdev, slot);
+ if (port->irq < 0) {
+ dev_err(dev, "Failed to get IRQ for PCIe%d\n", slot);
+ return -ENXIO;
+ }
+
INIT_LIST_HEAD(&port->list);
list_add_tail(&port->list, &pcie->ports);
@@ -502,17 +523,25 @@ static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
mt7621_pcie_reset_ep_deassert(pcie);
+ tmp = NULL;
list_for_each_entry(port, &pcie->ports, list) {
u32 slot = port->slot;
if (!mt7621_pcie_port_is_linkup(port)) {
dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
slot);
- if (slot != 1)
- phy_power_off(port->phy);
mt7621_control_assert(port);
mt7621_pcie_port_clk_disable(port);
port->enabled = false;
+
+ if (slot == 0) {
+ tmp = port;
+ continue;
+ }
+
+ if (slot == 1 && tmp && !tmp->enabled)
+ phy_power_off(tmp->phy);
+
}
}
}
@@ -576,14 +605,16 @@ static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie)
static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
{
u32 pcie_link_status = 0;
- u32 n;
- int i;
- u32 p2p_br_devnum[PCIE_P2P_MAX];
+ u32 n = 0;
+ int i = 0;
+ u32 p2p_br_devnum[PCIE_P2P_CNT];
+ int irqs[PCIE_P2P_CNT];
struct mt7621_pcie_port *port;
list_for_each_entry(port, &pcie->ports, list) {
u32 slot = port->slot;
+ irqs[i++] = port->irq;
if (port->enabled)
pcie_link_status |= BIT(slot);
}
@@ -591,12 +622,16 @@ static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
if (pcie_link_status == 0)
return -1;
- n = 0;
- for (i = 0; i < PCIE_P2P_MAX; i++)
+ /*
+ * Assign device numbers from zero to the enabled ports,
+ * then assigning remaining device numbers to any disabled
+ * ports.
+ */
+ for (i = 0; i < PCIE_P2P_CNT; i++)
if (pcie_link_status & BIT(i))
p2p_br_devnum[i] = n++;
- for (i = 0; i < PCIE_P2P_MAX; i++)
+ for (i = 0; i < PCIE_P2P_CNT; i++)
if ((pcie_link_status & BIT(i)) == 0)
p2p_br_devnum[i] = n++;
@@ -606,6 +641,15 @@ static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
(p2p_br_devnum[1] << PCIE_P2P_BR_DEVNUM1_SHIFT) |
(p2p_br_devnum[2] << PCIE_P2P_BR_DEVNUM2_SHIFT));
+ /* Assign IRQs */
+ n = 0;
+ for (i = 0; i < PCIE_P2P_CNT; i++)
+ if (pcie_link_status & BIT(i))
+ pcie->irq_map[n++] = irqs[i];
+
+ for (i = n; i < PCIE_P2P_CNT; i++)
+ pcie->irq_map[i] = -1;
+
return 0;
}
@@ -630,7 +674,7 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host,
host->busnr = pcie->busn.start;
host->dev.parent = pcie->dev;
host->ops = &mt7621_pci_ops;
- host->map_irq = of_irq_parse_and_map_pci;
+ host->map_irq = mt7621_map_irq;
host->swizzle_irq = pci_common_swizzle;
host->sysdata = pcie;
diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
index d0f06790d38f..caaf9e34f1ee 100644
--- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
+++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
@@ -220,7 +220,7 @@ static int rt2880_pinmux_index(struct rt2880_priv *p)
/* allocate our function and group mapping index buffers */
f = p->func = devm_kcalloc(p->dev,
p->func_count,
- sizeof(struct rt2880_pmx_func),
+ sizeof(*p->func),
GFP_KERNEL);
gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int),
GFP_KERNEL);
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 313d22f6210f..c8d0c63fdd1d 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -1230,6 +1230,7 @@ static int pi433_probe(struct spi_device *spi)
device->cdev = cdev_alloc();
if (!device->cdev) {
dev_dbg(device->dev, "allocation of cdev failed");
+ retval = -ENOMEM;
goto cdev_failed;
}
device->cdev->owner = THIS_MODULE;
diff --git a/drivers/staging/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 1795533cbd3a..058889687907 100644
--- a/drivers/staging/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
@@ -1559,18 +1559,17 @@ void ql_dump_stat(struct ql_adapter *qdev)
#ifdef QL_DEV_DUMP
#define DUMP_QDEV_FIELD(qdev, type, field) \
- pr_err("qdev->%-24s = " type "\n", #field, (qdev)->(field))
+ pr_err("qdev->%-24s = " type "\n", #field, (qdev)->field)
#define DUMP_QDEV_DMA_FIELD(qdev, field) \
pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
pr_err("%s[%d].%s = " type "\n", \
- #array, index, #field, (qdev)->array[index].field);
+ #array, index, #field, (qdev)->array[index].field)
void ql_dump_qdev(struct ql_adapter *qdev)
{
int i;
DUMP_QDEV_FIELD(qdev, "%lx", flags);
- DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
DUMP_QDEV_FIELD(qdev, "%p", pdev);
DUMP_QDEV_FIELD(qdev, "%p", ndev);
DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
@@ -1758,8 +1757,6 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring)
rx_ring->lbq.prod_idx_db_reg);
pr_err("rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
pr_err("rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
- pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
- pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
pr_err("rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
pr_err("rx_ring->sbq.base_dma = %llx\n",
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index c92820f07968..402edaeffe12 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -214,19 +214,20 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
u32 mask;
u32 value;
- direction =
- (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE;
+ if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
+ direction = DMA_TO_DEVICE;
+ else
+ direction = DMA_FROM_DEVICE;
- map = pci_map_single(qdev->pdev, ptr, size, direction);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
+ map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
+ if (dma_mapping_error(&qdev->pdev->dev, map)) {
netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
return -ENOMEM;
}
status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
if (status)
- return status;
+ goto lock_failed;
status = ql_wait_cfg(qdev, bit);
if (status) {
@@ -235,8 +236,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
goto exit;
}
- ql_write32(qdev, ICB_L, (u32) map);
- ql_write32(qdev, ICB_H, (u32) (map >> 32));
+ ql_write32(qdev, ICB_L, (u32)map);
+ ql_write32(qdev, ICB_H, (u32)(map >> 32));
mask = CFG_Q_MASK | (bit << 16);
value = bit | (q_id << CFG_Q_SHIFT);
@@ -248,7 +249,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
status = ql_wait_cfg(qdev, bit);
exit:
ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
- pci_unmap_single(qdev->pdev, map, size, direction);
+lock_failed:
+ dma_unmap_single(&qdev->pdev->dev, map, size, direction);
return status;
}
@@ -261,52 +263,50 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
- case MAC_ADDR_TYPE_CAM_MAC:
- {
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ case MAC_ADDR_TYPE_CAM_MAC: {
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ MAC_ADDR_ADR | MAC_ADDR_RS |
+ type); /* type */
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ if (status)
+ break;
+ *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ MAC_ADDR_ADR | MAC_ADDR_RS |
+ type); /* type */
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
+ if (status)
+ break;
+ *value++ = ql_read32(qdev, MAC_ADDR_DATA);
+ if (type == MAC_ADDR_TYPE_CAM_MAC) {
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+ MAC_ADDR_MW, 0);
if (status)
- goto exit;
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
+ (index
+ << MAC_ADDR_IDX_SHIFT) | /* index */
+ MAC_ADDR_ADR |
+ MAC_ADDR_RS | type); /* type */
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
+ MAC_ADDR_MR, 0);
+ if (status)
+ break;
*value++ = ql_read32(qdev, MAC_ADDR_DATA);
- if (type == MAC_ADDR_TYPE_CAM_MAC) {
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW,
- 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
- MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- }
- break;
}
+ break;
+ }
case MAC_ADDR_TYPE_VLAN:
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
@@ -314,7 +314,6 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
"Address type %d not yet supported.\n", type);
status = -EPERM;
}
-exit:
return status;
}
@@ -328,107 +327,93 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
int status = 0;
switch (type) {
- case MAC_ADDR_TYPE_MULTI_MAC:
- {
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower = (addr[2] << 24) | (addr[3] << 16) |
- (addr[4] << 8) | (addr[5]);
-
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
- (index << MAC_ADDR_IDX_SHIFT) |
- type | MAC_ADDR_E);
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
- (index << MAC_ADDR_IDX_SHIFT) |
- type | MAC_ADDR_E);
+ case MAC_ADDR_TYPE_MULTI_MAC: {
+ u32 upper = (addr[0] << 8) | addr[1];
+ u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+ (addr[5]);
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
break;
- }
- case MAC_ADDR_TYPE_CAM_MAC:
- {
- u32 cam_output;
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower =
- (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
+ MAC_ADDR_E);
+ ql_write32(qdev, MAC_ADDR_DATA, lower);
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
+ MAC_ADDR_E);
+
+ ql_write32(qdev, MAC_ADDR_DATA, upper);
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ break;
+ }
+ case MAC_ADDR_TYPE_CAM_MAC: {
+ u32 cam_output;
+ u32 upper = (addr[0] << 8) | addr[1];
+ u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
+ type); /* type */
+ ql_write32(qdev, MAC_ADDR_DATA, lower);
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ break;
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset++) | /* offset */
(index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- /* This field should also include the queue id
- * and possibly the function id. Right now we hardcode
- * the route field to NIC core.
- */
- cam_output = (CAM_OUT_ROUTE_NIC |
- (qdev->
- func << CAM_OUT_FUNC_SHIFT) |
- (0 << CAM_OUT_CQ_ID_SHIFT));
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- cam_output |= CAM_OUT_RV;
- /* route to NIC core */
- ql_write32(qdev, MAC_ADDR_DATA, cam_output);
+ type); /* type */
+ ql_write32(qdev, MAC_ADDR_DATA, upper);
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
break;
- }
- case MAC_ADDR_TYPE_VLAN:
- {
- u32 enable_bit = *((u32 *) &addr[0]);
- /* For VLAN, the addr actually holds a bit that
- * either enables or disables the vlan id we are
- * addressing. It's either MAC_ADDR_E on or off.
- * That's bit-27 we're talking about.
- */
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type | /* type */
- enable_bit); /* enable/disable */
+ ql_write32(qdev, MAC_ADDR_IDX,
+ (offset) | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type); /* type */
+ /* This field should also include the queue id
+ * and possibly the function id. Right now we hardcode
+ * the route field to NIC core.
+ */
+ cam_output = (CAM_OUT_ROUTE_NIC |
+ (qdev->func << CAM_OUT_FUNC_SHIFT) |
+ (0 << CAM_OUT_CQ_ID_SHIFT));
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ cam_output |= CAM_OUT_RV;
+ /* route to NIC core */
+ ql_write32(qdev, MAC_ADDR_DATA, cam_output);
+ break;
+ }
+ case MAC_ADDR_TYPE_VLAN: {
+ u32 enable_bit = *((u32 *)&addr[0]);
+ /* For VLAN, the addr actually holds a bit that
+ * either enables or disables the vlan id we are
+ * addressing. It's either MAC_ADDR_E on or off.
+ * That's bit-27 we're talking about.
+ */
+ status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
break;
- }
+ ql_write32(qdev, MAC_ADDR_IDX,
+ offset | /* offset */
+ (index << MAC_ADDR_IDX_SHIFT) | /* index */
+ type | /* type */
+ enable_bit); /* enable/disable */
+ break;
+ }
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
netif_crit(qdev, ifup, qdev->ndev,
"Address type %d not yet supported.\n", type);
status = -EPERM;
}
-exit:
return status;
}
@@ -455,7 +440,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
+ status = ql_set_mac_addr_reg(qdev, (u8 *)addr,
MAC_ADDR_TYPE_CAM_MAC,
qdev->func * MAX_CQ);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -857,7 +842,7 @@ int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
if (status)
goto exit;
- *data = (u64) lo | ((u64) hi << 32);
+ *data = (u64)lo | ((u64)hi << 32);
exit:
return status;
@@ -983,14 +968,14 @@ static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
{
struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
- pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr,
- qdev->lbq_buf_size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
+ qdev->lbq_buf_size, DMA_FROM_DEVICE);
if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
ql_lbq_block_size(qdev)) {
/* last chunk of the master page */
- pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
- ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
+ ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
}
return lbq_desc;
@@ -1036,10 +1021,10 @@ static int qlge_refill_sb(struct rx_ring *rx_ring,
return -ENOMEM;
skb_reserve(skb, QLGE_SB_PAD);
- sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data,
+ sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
dev_kfree_skb_any(skb);
return -EIO;
@@ -1064,10 +1049,10 @@ static int qlge_refill_lb(struct rx_ring *rx_ring,
page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
if (unlikely(!page))
return -ENOMEM;
- dma_addr = pci_map_page(qdev->pdev, page, 0,
+ dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, dma_addr)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
__free_pages(page, qdev->lbq_buf_order);
netif_err(qdev, drv, qdev->ndev,
"PCI mapping failed.\n");
@@ -1224,20 +1209,20 @@ static void ql_unmap_send(struct ql_adapter *qdev,
qdev->ndev,
"unmapping OAL area.\n");
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
} else {
netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
"unmapping frag %d.\n", i);
- pci_unmap_page(qdev->pdev,
+ dma_unmap_page(&qdev->pdev->dev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
- maplen), PCI_DMA_TODEVICE);
+ maplen), DMA_TO_DEVICE);
}
}
@@ -1263,9 +1248,9 @@ static int ql_map_send(struct ql_adapter *qdev,
/*
* Map the skb buffer first.
*/
- map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping failed with error: %d\n", err);
@@ -1310,10 +1295,10 @@ static int ql_map_send(struct ql_adapter *qdev,
* etc...
*/
/* Tack on the OAL in the eighth segment of IOCB. */
- map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
+ map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
sizeof(struct oal),
- PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ DMA_TO_DEVICE);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping outbound address list with error: %d\n",
@@ -1584,8 +1569,8 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
}
skb_reserve(new_skb, NET_IP_ALIGN);
- pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
skb_put_data(new_skb, skb->data, length);
@@ -1647,7 +1632,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *) skb->data;
+ struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
@@ -1707,8 +1692,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* Headers fit nicely into a small buffer.
*/
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
skb_put(skb, hdr_len);
@@ -1737,10 +1722,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* buffer.
*/
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&qdev->pdev->dev,
+ sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
+ DMA_FROM_DEVICE);
skb_put_data(skb, sbq_desc->p.skb->data, length);
} else {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1750,9 +1735,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
skb_put(skb, length);
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
sbq_desc->p.skb = NULL;
}
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
@@ -1787,9 +1772,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
"No skb available, drop the packet.\n");
return NULL;
}
- pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
qdev->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
@@ -1820,8 +1805,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
int size, i = 0;
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
* This is an non TCP/UDP IP frame, so
@@ -1936,7 +1921,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
/* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *) skb->data;
+ struct iphdr *iph = (struct iphdr *)skb->data;
if (!(iph->frag_off &
htons(IP_MF|IP_OFFSET))) {
@@ -2317,7 +2302,7 @@ static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
u32 enable_bit = MAC_ADDR_E;
int err;
- err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
@@ -2348,7 +2333,7 @@ static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
u32 enable_bit = 0;
int err;
- err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
MAC_ADDR_TYPE_VLAN, vid);
if (err)
netif_err(qdev, ifup, qdev->ndev,
@@ -2489,7 +2474,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
- mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+ mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
mac_iocb_ptr->total_hdrs_len =
cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
mac_iocb_ptr->net_trans_offset =
@@ -2527,7 +2512,7 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
__sum16 *check;
mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
- mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
+ mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
@@ -2558,7 +2543,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
struct ql_adapter *qdev = netdev_priv(ndev);
int tso;
struct tx_ring *tx_ring;
- u32 tx_ring_idx = (u32) skb->queue_mapping;
+ u32 tx_ring_idx = (u32)skb->queue_mapping;
tx_ring = &qdev->tx_ring[tx_ring_idx];
@@ -2585,7 +2570,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
mac_iocb_ptr->txq_idx = tx_ring_idx;
tx_ring_desc->skb = skb;
- mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
+ mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
if (skb_vlan_tag_present(skb)) {
netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
@@ -2636,17 +2621,17 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
static void ql_free_shadow_space(struct ql_adapter *qdev)
{
if (qdev->rx_ring_shadow_reg_area) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ PAGE_SIZE,
+ qdev->rx_ring_shadow_reg_area,
+ qdev->rx_ring_shadow_reg_dma);
qdev->rx_ring_shadow_reg_area = NULL;
}
if (qdev->tx_ring_shadow_reg_area) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->tx_ring_shadow_reg_area,
- qdev->tx_ring_shadow_reg_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ PAGE_SIZE,
+ qdev->tx_ring_shadow_reg_area,
+ qdev->tx_ring_shadow_reg_dma);
qdev->tx_ring_shadow_reg_area = NULL;
}
}
@@ -2654,8 +2639,8 @@ static void ql_free_shadow_space(struct ql_adapter *qdev)
static int ql_alloc_shadow_space(struct ql_adapter *qdev)
{
qdev->rx_ring_shadow_reg_area =
- pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
- &qdev->rx_ring_shadow_reg_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
if (!qdev->rx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of RX shadow space failed.\n");
@@ -2663,8 +2648,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
}
qdev->tx_ring_shadow_reg_area =
- pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
- &qdev->tx_ring_shadow_reg_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
if (!qdev->tx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of TX shadow space failed.\n");
@@ -2673,10 +2658,10 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
return 0;
err_wqp_sh_area:
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ PAGE_SIZE,
+ qdev->rx_ring_shadow_reg_area,
+ qdev->rx_ring_shadow_reg_dma);
return -ENOMEM;
}
@@ -2702,8 +2687,8 @@ static void ql_free_tx_resources(struct ql_adapter *qdev,
struct tx_ring *tx_ring)
{
if (tx_ring->wq_base) {
- pci_free_consistent(qdev->pdev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
+ dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
+ tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
}
kfree(tx_ring->q);
@@ -2714,8 +2699,8 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
struct tx_ring *tx_ring)
{
tx_ring->wq_base =
- pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
- &tx_ring->wq_base_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
+ &tx_ring->wq_base_dma, GFP_ATOMIC);
if (!tx_ring->wq_base ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
@@ -2729,8 +2714,8 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
return 0;
err:
- pci_free_consistent(qdev->pdev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
+ dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
+ tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
pci_alloc_err:
netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
@@ -2748,17 +2733,17 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
&lbq->queue[lbq->next_to_clean];
if (lbq_desc->p.pg_chunk.offset == last_offset)
- pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
put_page(lbq_desc->p.pg_chunk.page);
lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
}
if (rx_ring->master_chunk.page) {
- pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr,
- ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
+ ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
put_page(rx_ring->master_chunk.page);
rx_ring->master_chunk.page = NULL;
}
@@ -2777,9 +2762,9 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
return;
}
if (sbq_desc->p.skb) {
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
}
@@ -2820,8 +2805,8 @@ static int qlge_init_bq(struct qlge_bq *bq)
__le64 *buf_ptr;
int i;
- bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
- &bq->base_dma);
+ bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
+ &bq->base_dma, GFP_ATOMIC);
if (!bq->base) {
netif_err(qdev, ifup, qdev->ndev,
"ring %u %s allocation failed.\n", rx_ring->cq_id,
@@ -2850,8 +2835,8 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
{
/* Free the small buffer queue. */
if (rx_ring->sbq.base) {
- pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
- rx_ring->sbq.base, rx_ring->sbq.base_dma);
+ dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
+ rx_ring->sbq.base, rx_ring->sbq.base_dma);
rx_ring->sbq.base = NULL;
}
@@ -2861,8 +2846,8 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
/* Free the large buffer queue. */
if (rx_ring->lbq.base) {
- pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
- rx_ring->lbq.base, rx_ring->lbq.base_dma);
+ dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
+ rx_ring->lbq.base, rx_ring->lbq.base_dma);
rx_ring->lbq.base = NULL;
}
@@ -2872,9 +2857,9 @@ static void ql_free_rx_resources(struct ql_adapter *qdev,
/* Free the rx queue. */
if (rx_ring->cq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->cq_size,
- rx_ring->cq_base, rx_ring->cq_base_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ rx_ring->cq_size,
+ rx_ring->cq_base, rx_ring->cq_base_dma);
rx_ring->cq_base = NULL;
}
}
@@ -2890,8 +2875,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
* Allocate the completion queue for this rx_ring.
*/
rx_ring->cq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
- &rx_ring->cq_base_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
+ &rx_ring->cq_base_dma, GFP_ATOMIC);
if (!rx_ring->cq_base) {
netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
@@ -3008,7 +2993,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
/* PCI doorbell mem area + 0x00 for consumer index register */
- rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
+ rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
rx_ring->cnsmr_idx = 0;
rx_ring->curr_entry = rx_ring->cq_base;
@@ -3108,7 +3093,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
* Assign doorbell registers for this tx_ring.
*/
/* TX PCI doorbell mem area for tx producer index */
- tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
+ tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
tx_ring->prod_idx = 0;
/* TX PCI doorbell mem area + 0x04 */
tx_ring->valid_db_reg = doorbell_area + 0x04;
@@ -3131,7 +3116,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
ql_init_tx_ring(qdev, tx_ring);
err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
- (u16) tx_ring->wq_id);
+ (u16)tx_ring->wq_id);
if (err) {
netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
return err;
@@ -3431,9 +3416,9 @@ static int ql_request_irq(struct ql_adapter *qdev)
&qdev->rx_ring[0]);
status =
request_irq(pdev->irq, qlge_isr,
- test_bit(QL_MSI_ENABLED,
- &qdev->
- flags) ? 0 : IRQF_SHARED,
+ test_bit(QL_MSI_ENABLED, &qdev->flags)
+ ? 0
+ : IRQF_SHARED,
intr_context->name, &qdev->rx_ring[0]);
if (status)
goto err_irq;
@@ -3463,7 +3448,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
struct ricb *ricb = &qdev->ricb;
int status = 0;
int i;
- u8 *hash_id = (u8 *) ricb->hash_cq_id;
+ u8 *hash_id = (u8 *)ricb->hash_cq_id;
memset((void *)ricb, 0, sizeof(*ricb));
@@ -4125,11 +4110,11 @@ static struct net_device_stats *qlge_get_stats(struct net_device
/* Get RX stats. */
pkts = mcast = dropped = errors = bytes = 0;
for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
- pkts += rx_ring->rx_packets;
- bytes += rx_ring->rx_bytes;
- dropped += rx_ring->rx_dropped;
- errors += rx_ring->rx_errors;
- mcast += rx_ring->rx_multicast;
+ pkts += rx_ring->rx_packets;
+ bytes += rx_ring->rx_bytes;
+ dropped += rx_ring->rx_dropped;
+ errors += rx_ring->rx_errors;
+ mcast += rx_ring->rx_multicast;
}
ndev->stats.rx_packets = pkts;
ndev->stats.rx_bytes = bytes;
@@ -4140,9 +4125,9 @@ static struct net_device_stats *qlge_get_stats(struct net_device
/* Get TX stats. */
pkts = errors = bytes = 0;
for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
- pkts += tx_ring->tx_packets;
- bytes += tx_ring->tx_bytes;
- errors += tx_ring->tx_errors;
+ pkts += tx_ring->tx_packets;
+ bytes += tx_ring->tx_bytes;
+ errors += tx_ring->tx_errors;
}
ndev->stats.tx_packets = pkts;
ndev->stats.tx_bytes = bytes;
@@ -4218,7 +4203,7 @@ static void qlge_set_multicast_list(struct net_device *ndev)
goto exit;
i = 0;
netdev_for_each_mc_addr(ha, ndev) {
- if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
+ if (ql_set_mac_addr_reg(qdev, (u8 *)ha->addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
netif_err(qdev, hw, qdev->ndev,
"Failed to loadmulticast address.\n");
@@ -4255,7 +4240,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
+ status = ql_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
MAC_ADDR_TYPE_CAM_MAC,
qdev->func * MAX_CQ);
if (status)
@@ -4430,13 +4415,14 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
}
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
set_bit(QL_DMA64, &qdev->flags);
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
}
if (err) {
@@ -4448,8 +4434,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
pdev->needs_freset = 1;
pci_save_state(pdev);
qdev->reg_base =
- ioremap(pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1));
+ ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
if (!qdev->reg_base) {
dev_err(&pdev->dev, "Register mapping failed.\n");
err = -ENOMEM;
@@ -4458,8 +4443,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
qdev->doorbell_area_size = pci_resource_len(pdev, 3);
qdev->doorbell_area =
- ioremap(pci_resource_start(pdev, 3),
- pci_resource_len(pdev, 3));
+ ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
if (!qdev->doorbell_area) {
dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
err = -ENOMEM;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 93283c7deec4..817793b9aff2 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -264,8 +264,10 @@ void expire_timeout_chk(struct adapter *padapter)
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
- DBG_88E("asoc expire %pM, state = 0x%x\n", (psta->hwaddr), psta->state);
- updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ DBG_88E("asoc expire %pM, state = 0x%x\n",
+ (psta->hwaddr), psta->state);
+ updated = ap_free_sta(padapter, psta, true,
+ WLAN_REASON_DEAUTH_LEAVING);
} else {
/* TODO: Aging mechanism to digest frames in sleep_q to avoid running out of xmitframe */
if (psta->sleepq_len > (NR_XMITFRAME / pstapriv->asoc_list_cnt) &&
@@ -294,16 +296,21 @@ void expire_timeout_chk(struct adapter *padapter)
for (i = 0; i < chk_alive_num; i++) {
int ret = _FAIL;
- psta = rtw_get_stainfo_by_offset(pstapriv, chk_alive_list[i]);
+ psta = rtw_get_stainfo_by_offset(pstapriv,
+ chk_alive_list[i]);
- if (psta->state & WIFI_SLEEP_STATE)
- ret = issue_nulldata(padapter, psta->hwaddr, 0, 1, 50);
- else
- ret = issue_nulldata(padapter, psta->hwaddr, 0, 3, 50);
+ if (psta->state & WIFI_SLEEP_STATE) {
+ ret = issue_nulldata(padapter, psta->hwaddr,
+ 0, 1, 50);
+ } else {
+ ret = issue_nulldata(padapter, psta->hwaddr,
+ 0, 3, 50);
+ }
psta->keep_alive_trycnt++;
if (ret == _SUCCESS) {
- DBG_88E("asoc check, sta(%pM) is alive\n", (psta->hwaddr));
+ DBG_88E("asoc check, sta(%pM) is alive\n",
+ (psta->hwaddr));
psta->expire_to = pstapriv->expire_to;
psta->keep_alive_trycnt = 0;
continue;
@@ -315,11 +322,13 @@ void expire_timeout_chk(struct adapter *padapter)
psta->keep_alive_trycnt = 0;
- DBG_88E("asoc expire %pM, state = 0x%x\n", (psta->hwaddr), psta->state);
+ DBG_88E("asoc expire %pM, state = 0x%x\n",
+ psta->hwaddr, psta->state);
spin_lock_bh(&pstapriv->asoc_list_lock);
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
- updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ updated = ap_free_sta(padapter, psta, true,
+ WLAN_REASON_DEAUTH_LEAVING);
spin_unlock_bh(&pstapriv->asoc_list_lock);
}
@@ -431,7 +440,8 @@ static void update_bmc_sta(struct adapter *padapter)
supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates);
network_type = rtw_check_network_type((u8 *)&pcur_network->SupportedRates);
- memcpy(psta->bssrateset, &pcur_network->SupportedRates, supportRateNum);
+ memcpy(psta->bssrateset, &pcur_network->SupportedRates,
+ supportRateNum);
psta->bssratelen = supportRateNum;
/* b/g mode ra_bitmap */
@@ -445,7 +455,8 @@ static void update_bmc_sta(struct adapter *padapter)
tx_ra_bitmap = 0xf;
raid = networktype_to_raid(network_type);
- init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
+ init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) &
+ 0x3f;
/* ap mode */
rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
@@ -456,7 +467,8 @@ static void update_bmc_sta(struct adapter *padapter)
arg = psta->mac_id & 0x1f;
arg |= BIT(7);
tx_ra_bitmap |= ((raid << 28) & 0xf0000000);
- DBG_88E("%s, mask = 0x%x, arg = 0x%x\n", __func__, tx_ra_bitmap, arg);
+ DBG_88E("%s, mask = 0x%x, arg = 0x%x\n", __func__,
+ tx_ra_bitmap, arg);
/* bitmap[0:27] = tx_rate_bitmap */
/* bitmap[28:31]= Rate Adaptive id */
@@ -647,7 +659,8 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
/* Beacon Control related register */
- rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&bcn_interval));
+ rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL,
+ (u8 *)(&bcn_interval));
UpdateBrateTbl(padapter, pnetwork->SupportedRates);
rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, pnetwork->SupportedRates);
@@ -657,7 +670,10 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
}
/* set channel, bwmode */
- p = rtw_get_ie((pnetwork->ies + sizeof(struct ndis_802_11_fixed_ie)), _HT_ADD_INFO_IE_, &ie_len, (pnetwork->ie_length - sizeof(struct ndis_802_11_fixed_ie)));
+ p = rtw_get_ie(pnetwork->ies + sizeof(struct ndis_802_11_fixed_ie),
+ _HT_ADD_INFO_IE_, &ie_len,
+ pnetwork->ie_length -
+ sizeof(struct ndis_802_11_fixed_ie));
if (p && ie_len) {
pht_info = (struct HT_info_element *)(p + 2);
@@ -682,7 +698,8 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
*/
set_channel_bwmode(padapter, cur_channel, cur_ch_offset, cur_bwmode);
- DBG_88E("CH =%d, BW =%d, offset =%d\n", cur_channel, cur_bwmode, cur_ch_offset);
+ DBG_88E("CH =%d, BW =%d, offset =%d\n", cur_channel, cur_bwmode,
+ cur_ch_offset);
/* */
pmlmeext->cur_channel = cur_channel;
@@ -771,17 +788,19 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
cap = get_unaligned_le16(ie);
/* SSID */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len,
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p && ie_len > 0) {
memset(&pbss_network->ssid, 0, sizeof(struct ndis_802_11_ssid));
- memcpy(pbss_network->ssid.ssid, (p + 2), ie_len);
+ memcpy(pbss_network->ssid.ssid, p + 2, ie_len);
pbss_network->ssid.ssid_length = ie_len;
}
/* channel */
channel = 0;
pbss_network->Configuration.Length = 0;
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _DSSET_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _DSSET_IE_, &ie_len,
+ (pbss_network->ie_length - _BEACON_IE_OFFSET_));
if (p && ie_len > 0)
channel = *(p + 2);
@@ -789,14 +808,16 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
memset(supportRate, 0, NDIS_802_11_LENGTH_RATES_EX);
/* get supported rates */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len,
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p) {
memcpy(supportRate, p + 2, ie_len);
supportRateNum = ie_len;
}
/* get ext_supported rates */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_);
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_,
+ &ie_len, pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p) {
memcpy(supportRate + supportRateNum, p + 2, ie_len);
supportRateNum += ie_len;
@@ -807,7 +828,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
rtw_set_supported_rate(pbss_network->SupportedRates, network_type);
/* parsing ERP_IE */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &ie_len,
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p && ie_len > 0)
ERP_IE_handler(padapter, (struct ndis_802_11_var_ie *)p);
@@ -824,7 +846,8 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
pairwise_cipher = 0;
psecuritypriv->wpa2_group_cipher = _NO_PRIVACY_;
psecuritypriv->wpa2_pairwise_cipher = _NO_PRIVACY_;
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len,
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p && ie_len > 0) {
if (rtw_parse_wpa2_ie(p, ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
@@ -844,7 +867,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
psecuritypriv->wpa_pairwise_cipher = _NO_PRIVACY_;
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _SSN_IE_1_, &ie_len,
- (pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2));
if ((p) && (!memcmp(p + 2, OUI1, 4))) {
if (rtw_parse_wpa_ie(p, ie_len + 2, &group_cipher,
&pairwise_cipher, NULL) == _SUCCESS) {
@@ -869,7 +892,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
if (pregistrypriv->wmm_enable) {
for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len,
- (pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ pbss_network->ie_length - _BEACON_IE_OFFSET_ - (ie_len + 2));
if ((p) && !memcmp(p + 2, WMM_PARA_IE, 6)) {
pmlmepriv->qospriv.qos_option = 1;
@@ -892,7 +915,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
}
/* parsing HT_CAP_IE */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len,
- (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p && ie_len > 0) {
struct ieee80211_ht_cap *pht_cap = (struct ieee80211_ht_cap *)(p + 2);
@@ -916,7 +939,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
/* parsing HT_INFO_IE */
p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len,
- (pbss_network->ie_length - _BEACON_IE_OFFSET_));
+ pbss_network->ie_length - _BEACON_IE_OFFSET_);
if (p && ie_len > 0)
pHT_info_ie = p;
switch (network_type) {
@@ -1226,17 +1249,17 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
}
/*
-op_mode
-Set to 0 (HT pure) under the following conditions
- - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
- - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
-Set to 1 (HT non-member protection) if there may be non-HT STAs
- in both the primary and the secondary channel
-Set to 2 if only HT STAs are associated in BSS,
- however and at least one 20 MHz HT STA is associated
-Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
- (currently non-GF HT station is considered as non-HT STA also)
-*/
+ * op_mode
+ * Set to 0 (HT pure) under the following conditions
+ * - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
+ * - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
+ * Set to 1 (HT non-member protection) if there may be non-HT STAs
+ * in both the primary and the secondary channel
+ * Set to 2 if only HT STAs are associated in BSS,
+ * however and at least one 20 MHz HT STA is associated
+ * Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
+ * (currently non-GF HT station is considered as non-HT STA also)
+ */
static int rtw_ht_operation_update(struct adapter *padapter)
{
u16 cur_op_mode, new_op_mode;
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index c525682d0edf..9bb3ec0cd62f 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -370,28 +370,27 @@ static u16 Efuse_GetCurrentSize(struct adapter *pAdapter)
while (efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data) &&
AVAILABLE_EFUSE_ADDR(efuse_addr)) {
- if (efuse_data != 0xFF) {
- if ((efuse_data & 0x1F) == 0x0F) { /* extended header */
- hoffset = efuse_data;
+ if (efuse_data == 0xFF)
+ break;
+ if ((efuse_data & 0x1F) == 0x0F) { /* extended header */
+ hoffset = efuse_data;
+ efuse_addr++;
+ efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data);
+ if ((efuse_data & 0x0F) == 0x0F) {
efuse_addr++;
- efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data);
- if ((efuse_data & 0x0F) == 0x0F) {
- efuse_addr++;
- continue;
- } else {
- hoffset = ((hoffset & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
- hworden = efuse_data & 0x0F;
- }
+ continue;
} else {
- hoffset = (efuse_data >> 4) & 0x0F;
- hworden = efuse_data & 0x0F;
+ hoffset = ((hoffset & 0xE0) >> 5) |
+ ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
}
- word_cnts = Efuse_CalculateWordCnts(hworden);
- /* read next header */
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
} else {
- break;
+ hoffset = (efuse_data >> 4) & 0x0F;
+ hworden = efuse_data & 0x0F;
}
+ word_cnts = Efuse_CalculateWordCnts(hworden);
+ /* read next header */
+ efuse_addr = efuse_addr + (word_cnts * 2) + 1;
}
rtw_hal_set_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index e186982d5908..caf600eba03b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -253,11 +253,11 @@ int rtw_generate_ie(struct registry_priv *pregistrypriv)
}
/* DS parameter set */
- ie = rtw_set_ie(ie, _DSSET_IE_, 1, (u8 *)&(pdev_network->Configuration.DSConfig), &sz);
+ ie = rtw_set_ie(ie, _DSSET_IE_, 1, (u8 *)&pdev_network->Configuration.DSConfig, &sz);
/* IBSS Parameter Set */
- ie = rtw_set_ie(ie, _IBSS_PARA_IE_, 2, (u8 *)&(pdev_network->Configuration.ATIMWindow), &sz);
+ ie = rtw_set_ie(ie, _IBSS_PARA_IE_, 2, (u8 *)&pdev_network->Configuration.ATIMWindow, &sz);
if (rateLen > 8)
ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz);
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index d1406cc99768..32dccae186ca 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -90,7 +90,6 @@ static void SwLedBlink1(struct LED_871x *pLed)
{
struct adapter *padapter = pLed->padapter;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
/* Change LED according to BlinkingLedState specified. */
if (pLed->BlinkingLedState == RTW_LED_ON) {
@@ -128,9 +127,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
break;
case LED_BLINK_SCAN:
pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
+ if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
@@ -164,9 +161,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
break;
case LED_BLINK_TXRX:
pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
+ if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
@@ -188,7 +183,6 @@ static void SwLedBlink1(struct LED_871x *pLed)
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
}
- pLed->BlinkTimes = 0;
pLed->bLedBlinkInProgress = false;
} else {
if (pLed->bLedOn)
@@ -208,12 +202,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
break;
case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- bStopBlinking = false;
- else
- bStopBlinking = true;
-
- if (bStopBlinking) {
+ if (pLed->BlinkingLedState != RTW_LED_ON) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
if (pLed->bLedOn)
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 04897cd48370..8d035f67ef61 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -1781,7 +1781,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
p = rtw_get_ie(pbss_network->ies + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->ie_length - _FIXED_IE_LENGTH_);
if (!p || len == 0) { /* non-HT */
- if ((pbss_network->Configuration.DSConfig <= 0) || (pbss_network->Configuration.DSConfig > 14))
+ if (pbss_network->Configuration.DSConfig <= 0)
continue;
ICS[0][pbss_network->Configuration.DSConfig] = 1;
@@ -1932,11 +1932,11 @@ static void site_survey(struct adapter *padapter)
if (pmlmeext->sitesurvey_res.ssid[i].ssid_length) {
/* todo: to issue two probe req??? */
issue_probereq(padapter,
- &(pmlmeext->sitesurvey_res.ssid[i]),
+ &pmlmeext->sitesurvey_res.ssid[i],
NULL, false);
/* msleep(SURVEY_TO>>1); */
issue_probereq(padapter,
- &(pmlmeext->sitesurvey_res.ssid[i]),
+ &pmlmeext->sitesurvey_res.ssid[i],
NULL, false);
}
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index c4f58507dbfd..c000382c96d9 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -173,7 +173,7 @@ int ips_leave(struct adapter *padapter)
DBG_88E_LEVEL(_drv_info_, "nolinked power save leave\n");
- if ((_WEP40_ == psecuritypriv->dot11PrivacyAlgrthm) || (_WEP104_ == psecuritypriv->dot11PrivacyAlgrthm)) {
+ if ((psecuritypriv->dot11PrivacyAlgrthm == _WEP40_) || (psecuritypriv->dot11PrivacyAlgrthm == _WEP104_)) {
DBG_88E("==>%s, channel(%d), processing(%x)\n", __func__, padapter->mlmeextpriv.cur_channel, pwrpriv->bips_processing);
set_channel_bwmode(padapter, padapter->mlmeextpriv.cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
for (keyid = 0; keyid < 4; keyid++) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index d4278361e002..a036ef104198 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -1525,21 +1525,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
- if (sub_skb) {
- skb_reserve(sub_skb, 12);
- skb_put_data(sub_skb, pdata, nSubframe_Length);
- } else {
- sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
- if (sub_skb) {
- sub_skb->data = pdata;
- sub_skb->len = nSubframe_Length;
- skb_set_tail_pointer(sub_skb, nSubframe_Length);
- } else {
- DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes);
- break;
- }
+ if (!sub_skb) {
+ DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes);
+ break;
}
+ skb_reserve(sub_skb, 12);
+ skb_put_data(sub_skb, pdata, nSubframe_Length);
+
subframes[nr_subframes++] = sub_skb;
if (nr_subframes >= MAX_SUBFRAME_COUNT) {
diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c
index 486ee4bd4744..3d1d29e9f8e0 100644
--- a/drivers/staging/rtl8188eu/hal/fw.c
+++ b/drivers/staging/rtl8188eu/hal/fw.c
@@ -111,7 +111,7 @@ static int _rtl88e_fw_free_to_go(struct adapter *adapt)
do {
value32 = usb_read32(adapt, REG_MCUFWDL);
- if (value32 & FWDL_ChkSum_rpt)
+ if (value32 & FWDL_CHKSUM_RPT)
break;
} while (counter++ < POLLING_READY_TIMEOUT_COUNT);
@@ -146,7 +146,7 @@ int rtl88eu_download_fw(struct adapter *adapt)
struct dvobj_priv *dvobj = adapter_to_dvobj(adapt);
struct device *device = dvobj_to_dev(dvobj);
const struct firmware *fw;
- const char fw_name[] = "rtlwifi/rtl8188eufw.bin";
+ static const char fw_name[] = "rtlwifi/rtl8188eufw.bin";
struct rtl92c_firmware_header *pfwheader = NULL;
u8 *download_data, *fw_data;
size_t download_size;
@@ -192,7 +192,8 @@ int rtl88eu_download_fw(struct adapter *adapt)
rtl88e_firmware_selfreset(adapt);
}
_rtl88e_enable_fw_download(adapt, true);
- usb_write8(adapt, REG_MCUFWDL, usb_read8(adapt, REG_MCUFWDL) | FWDL_ChkSum_rpt);
+ usb_write8(adapt, REG_MCUFWDL,
+ usb_read8(adapt, REG_MCUFWDL) | FWDL_CHKSUM_RPT);
_rtl88e_write_fw(adapt, download_data, download_size);
_rtl88e_enable_fw_download(adapt, false);
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 698377ea60ee..28974808839d 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -5,8 +5,6 @@
*
******************************************************************************/
-/* include files */
-
#include "odm_precomp.h"
#include "phy.h"
@@ -193,7 +191,7 @@ void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
odm_DIG(pDM_Odm);
odm_CCKPacketDetectionThresh(pDM_Odm);
- if (*(pDM_Odm->pbPowerSaving))
+ if (*pDM_Odm->pbPowerSaving)
return;
odm_RefreshRateAdaptiveMask(pDM_Odm);
@@ -229,13 +227,13 @@ void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
u8 i;
struct sta_info *pEntry;
- if (*(pDM_Odm->pBandWidth) == ODM_BW40M) {
- if (*(pDM_Odm->pSecChOffset) == 1)
- pDM_Odm->ControlChannel = *(pDM_Odm->pChannel) - 2;
- else if (*(pDM_Odm->pSecChOffset) == 2)
- pDM_Odm->ControlChannel = *(pDM_Odm->pChannel) + 2;
+ if (*pDM_Odm->pBandWidth == ODM_BW40M) {
+ if (*pDM_Odm->pSecChOffset == 1)
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel - 2;
+ else if (*pDM_Odm->pSecChOffset == 2)
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel + 2;
} else {
- pDM_Odm->ControlChannel = *(pDM_Odm->pChannel);
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel;
}
for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
@@ -270,16 +268,16 @@ void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm)
void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm)
{
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoHook_Debug==>\n"));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumTxBytesUnicast=%llu\n", *(pDM_Odm->pNumTxBytesUnicast)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumRxBytesUnicast=%llu\n", *(pDM_Odm->pNumRxBytesUnicast)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pWirelessMode=0x%x\n", *(pDM_Odm->pWirelessMode)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecChOffset=%d\n", *(pDM_Odm->pSecChOffset)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecurity=%d\n", *(pDM_Odm->pSecurity)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pBandWidth=%d\n", *(pDM_Odm->pBandWidth)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pChannel=%d\n", *(pDM_Odm->pChannel)));
-
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbScanInProcess=%d\n", *(pDM_Odm->pbScanInProcess)));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbPowerSaving=%d\n", *(pDM_Odm->pbPowerSaving)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumTxBytesUnicast=%llu\n", *pDM_Odm->pNumTxBytesUnicast));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumRxBytesUnicast=%llu\n", *pDM_Odm->pNumRxBytesUnicast));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pWirelessMode=0x%x\n", *pDM_Odm->pWirelessMode));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecChOffset=%d\n", *pDM_Odm->pSecChOffset));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecurity=%d\n", *pDM_Odm->pSecurity));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pBandWidth=%d\n", *pDM_Odm->pBandWidth));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pChannel=%d\n", *pDM_Odm->pChannel));
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbScanInProcess=%d\n", *pDM_Odm->pbScanInProcess));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbPowerSaving=%d\n", *pDM_Odm->pbPowerSaving));
}
void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm)
@@ -348,7 +346,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
return;
}
- if (*(pDM_Odm->pbScanInProcess)) {
+ if (*pDM_Odm->pbScanInProcess) {
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() Return: In Scan Progress\n"));
return;
}
@@ -508,7 +506,7 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
{
struct adapter *adapter = pDM_Odm->Adapter;
u32 ret_value;
- struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
+ struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
return;
@@ -581,7 +579,7 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
{
u8 CurCCK_CCAThres;
- struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
+ struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
if (!(pDM_Odm->SupportAbility & (ODM_BB_CCK_PD | ODM_BB_FA_CNT)))
return;
@@ -739,7 +737,7 @@ u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u
} else if (rssi_level == DM_RATR_STA_MIDDLE) {
rate_bitmap = 0x000ff000;
} else {
- if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
+ if (*pDM_Odm->pBandWidth == ODM_BW40M)
rate_bitmap = 0x000ff015;
else
rate_bitmap = 0x000ff005;
@@ -945,7 +943,7 @@ void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm)
{
pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
- if (*(pDM_Odm->mp_mode) != 1)
+ if (*pDM_Odm->mp_mode != 1)
pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
@@ -1035,11 +1033,11 @@ void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm)
u64 cur_tx_bytes = 0;
u64 cur_rx_bytes = 0;
u8 bbtchange = false;
- struct xmit_priv *pxmitpriv = &(Adapter->xmitpriv);
- struct recv_priv *precvpriv = &(Adapter->recvpriv);
+ struct xmit_priv *pxmitpriv = &Adapter->xmitpriv;
+ struct recv_priv *precvpriv = &Adapter->recvpriv;
struct registry_priv *pregpriv = &Adapter->registrypriv;
- struct mlme_ext_priv *pmlmeext = &(Adapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pregpriv->wifi_spec == 1) /* (pmlmeinfo->HT_enable == 0)) */
goto dm_CheckEdcaTurbo_EXIT;
diff --git a/drivers/staging/rtl8188eu/hal/odm_hwconfig.c b/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
index a6f2731b076d..65a346ae3cb0 100644
--- a/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_hwconfig.c
@@ -5,8 +5,6 @@
*
******************************************************************************/
-/* include files */
-
#include "odm_precomp.h"
#define READ_AND_CONFIG READ_AND_CONFIG_MP
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index b9025815b682..920688fc9e9f 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -345,8 +345,8 @@ static void dm_txpwr_track_setpwr(struct odm_dm_struct *dm_odm)
{
if (dm_odm->BbSwingFlagOfdm || dm_odm->BbSwingFlagCck) {
ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
- ("dm_txpwr_track_setpwr CH=%d\n", *(dm_odm->pChannel)));
- phy_set_tx_power_level(dm_odm->Adapter, *(dm_odm->pChannel));
+ ("dm_txpwr_track_setpwr CH=%d\n", *dm_odm->pChannel));
+ phy_set_tx_power_level(dm_odm->Adapter, *dm_odm->pChannel);
dm_odm->BbSwingFlagOfdm = false;
dm_odm->BbSwingFlagCck = false;
}
@@ -786,7 +786,7 @@ static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8],
}
}
-static void save_adda_registers(struct adapter *adapt, u32 *addareg,
+static void save_adda_registers(struct adapter *adapt, const u32 *addareg,
u32 *backup, u32 register_num)
{
u32 i;
@@ -795,7 +795,7 @@ static void save_adda_registers(struct adapter *adapt, u32 *addareg,
backup[i] = phy_query_bb_reg(adapt, addareg[i], bMaskDWord);
}
-static void save_mac_registers(struct adapter *adapt, u32 *mac_reg,
+static void save_mac_registers(struct adapter *adapt, const u32 *mac_reg,
u32 *backup)
{
u32 i;
@@ -806,7 +806,7 @@ static void save_mac_registers(struct adapter *adapt, u32 *mac_reg,
backup[i] = usb_read32(adapt, mac_reg[i]);
}
-static void reload_adda_reg(struct adapter *adapt, u32 *adda_reg,
+static void reload_adda_reg(struct adapter *adapt, const u32 *adda_reg,
u32 *backup, u32 regiester_num)
{
u32 i;
@@ -815,8 +815,8 @@ static void reload_adda_reg(struct adapter *adapt, u32 *adda_reg,
phy_set_bb_reg(adapt, adda_reg[i], bMaskDWord, backup[i]);
}
-static void reload_mac_registers(struct adapter *adapt,
- u32 *mac_reg, u32 *backup)
+static void reload_mac_registers(struct adapter *adapt, const u32 *mac_reg,
+ u32 *backup)
{
u32 i;
@@ -826,7 +826,7 @@ static void reload_mac_registers(struct adapter *adapt,
usb_write32(adapt, mac_reg[i], backup[i]);
}
-static void path_adda_on(struct adapter *adapt, u32 *adda_reg,
+static void path_adda_on(struct adapter *adapt, const u32 *adda_reg,
bool is_path_a_on, bool is2t)
{
u32 path_on;
@@ -844,7 +844,8 @@ static void path_adda_on(struct adapter *adapt, u32 *adda_reg,
phy_set_bb_reg(adapt, adda_reg[i], bMaskDWord, path_on);
}
-static void mac_setting_calibration(struct adapter *adapt, u32 *mac_reg, u32 *backup)
+static void mac_setting_calibration(struct adapter *adapt, const u32 *mac_reg,
+ u32 *backup)
{
u32 i = 0;
@@ -952,30 +953,31 @@ static void phy_iq_calibrate(struct adapter *adapt, s32 result[][8],
struct odm_dm_struct *dm_odm = &adapt->HalData->odmpriv;
u32 i;
u8 path_a_ok, path_b_ok;
- u32 adda_reg[IQK_ADDA_REG_NUM] = {
- rFPGA0_XCD_SwitchControl, rBlue_Tooth,
- rRx_Wait_CCA, rTx_CCK_RFON,
- rTx_CCK_BBON, rTx_OFDM_RFON,
- rTx_OFDM_BBON, rTx_To_Rx,
- rTx_To_Tx, rRx_CCK,
- rRx_OFDM, rRx_Wait_RIFS,
- rRx_TO_Rx, rStandby,
- rSleep, rPMPD_ANAEN};
-
- u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
- REG_TXPAUSE, REG_BCN_CTRL,
- REG_BCN_CTRL_1, REG_GPIO_MUXCFG};
-
+ static const u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ rFPGA0_XCD_SwitchControl, rBlue_Tooth,
+ rRx_Wait_CCA, rTx_CCK_RFON,
+ rTx_CCK_BBON, rTx_OFDM_RFON,
+ rTx_OFDM_BBON, rTx_To_Rx,
+ rTx_To_Tx, rRx_CCK,
+ rRx_OFDM, rRx_Wait_RIFS,
+ rRx_TO_Rx, rStandby,
+ rSleep, rPMPD_ANAEN
+ };
+ static const u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE, REG_BCN_CTRL,
+ REG_BCN_CTRL_1, REG_GPIO_MUXCFG
+ };
/* since 92C & 92D have the different define in IQK_BB_REG */
- u32 iqk_bb_reg_92c[IQK_BB_REG_NUM] = {
- rOFDM0_TRxPathEnable, rOFDM0_TRMuxPar,
- rFPGA0_XCD_RFInterfaceSW, rConfig_AntA, rConfig_AntB,
- rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE,
- rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD};
+ static const u32 iqk_bb_reg_92c[IQK_BB_REG_NUM] = {
+ rOFDM0_TRxPathEnable, rOFDM0_TRMuxPar,
+ rFPGA0_XCD_RFInterfaceSW, rConfig_AntA, rConfig_AntB,
+ rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE,
+ rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD
+ };
u32 retry_count = 9;
- if (*(dm_odm->mp_mode) == 1)
+ if (*dm_odm->mp_mode == 1)
retry_count = 9;
else
retry_count = 2;
@@ -1320,7 +1322,7 @@ void rtl88eu_phy_lc_calibrate(struct adapter *adapt)
if (singletone || carrier_sup)
return;
- while (*(dm_odm->pbScanInProcess) && timecount < timeout) {
+ while (*dm_odm->pbScanInProcess && timecount < timeout) {
mdelay(50);
timecount += 50;
}
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 00a9f692bb06..6702f263c770 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -79,7 +79,7 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
}
}
for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- ptr = (u8 *)(&(tx_agc[idx1]));
+ ptr = (u8 *)(&tx_agc[idx1]);
for (idx2 = 0; idx2 < 4; idx2++) {
if (*ptr > RF6052_MAX_TX_PWR)
*ptr = RF6052_MAX_TX_PWR;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 371e746915dd..176716d3e903 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -256,7 +256,7 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, min_t(u32, rate_len, 8), cur_network->SupportedRates, &pktlen);
/* DS parameter set */
- pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pktlen);
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&cur_network->Configuration.DSConfig, &pktlen);
if ((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) {
u32 ATIMWindow;
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 241f55b92808..1af919ff6d93 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -29,9 +29,6 @@ static void dm_InitGPIOSetting(struct adapter *Adapter)
usb_write8(Adapter, REG_GPIO_MUXCFG, tmp1byte);
}
-/* */
-/* functions */
-/* */
static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
{
struct hal_data_8188e *hal_data = Adapter->HalData;
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index c0114ad79788..0d3e4a6e7e85 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -50,7 +50,7 @@ struct __queue {
static inline struct list_head *get_list_head(struct __queue *queue)
{
- return &(queue->queue);
+ return &queue->queue;
}
static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index dd943c831d91..be30c9434a29 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -817,7 +817,7 @@ So the following defines for 92C is not entire!!!!!!
/* 2 MCUFWDL */
#define MCUFWDL_EN BIT(0)
#define MCUFWDL_RDY BIT(1)
-#define FWDL_ChkSum_rpt BIT(2)
+#define FWDL_CHKSUM_RPT BIT(2)
#define MACINI_RDY BIT(3)
#define BBINI_RDY BIT(4)
#define RFINI_RDY BIT(5)
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 9a89791720e0..d5968ef9f43d 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -93,7 +93,7 @@ static char *translate_scan(struct adapter *padapter,
struct wlan_network *pnetwork,
char *start, char *stop)
{
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct iw_event iwe;
u16 cap;
__le16 le_tmp;
@@ -417,7 +417,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
ret = -EOPNOTSUPP;
goto exit;
}
- memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+ memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->KeyMaterial, pwep->KeyLength);
psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0);
}
@@ -444,8 +444,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
- memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
- memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+ memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &param->u.crypt.key[24], 8);
padapter->securitypriv.busetkipkey = false;
}
@@ -454,8 +454,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
rtw_setstakey_cmd(padapter, (unsigned char *)psta, true);
} else { /* group key */
memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16 ));
- memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
+ memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
padapter->securitypriv.binstallGrpkey = true;
DBG_88E(" ~~~~set sta key:groupkey\n");
@@ -620,7 +620,7 @@ static int rtw_wx_get_name(struct net_device *dev,
u32 ht_ielen = 0;
char *p;
u8 ht_cap = false;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
NDIS_802_11_RATES_EX *prates = NULL;
@@ -669,7 +669,7 @@ static int rtw_wx_get_freq(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
@@ -738,7 +738,7 @@ static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_get_mode\n"));
@@ -938,10 +938,10 @@ static int rtw_wx_set_wap(struct net_device *dev,
uint ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct sockaddr *temp = (struct sockaddr *)awrq;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct list_head *phead;
u8 *dst_bssid, *src_bssid;
- struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
enum ndis_802_11_auth_mode authmode;
@@ -1002,7 +1002,7 @@ static int rtw_wx_get_wap(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
wrqu->ap_addr.sa_family = ARPHRD_ETHER;
@@ -1188,8 +1188,8 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
{
struct list_head *plist, *phead;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
char *ev = extra;
char *stop = ev + wrqu->data.length;
@@ -1217,7 +1217,7 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
break;
}
- spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
phead = get_list_head(queue);
plist = phead->next;
@@ -1358,7 +1358,7 @@ static int rtw_wx_get_essid(struct net_device *dev,
{
u32 len;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_essid\n"));
@@ -1564,7 +1564,7 @@ static int rtw_wx_set_enc(struct net_device *dev,
struct ndis_802_11_wep wep;
enum ndis_802_11_auth_mode authmode;
- struct iw_point *erq = &(wrqu->encoding);
+ struct iw_point *erq = &wrqu->encoding;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
@@ -1675,8 +1675,8 @@ static int rtw_wx_get_enc(struct net_device *dev,
{
uint key;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_point *erq = &(wrqu->encoding);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct iw_point *erq = &wrqu->encoding;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
if (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
@@ -1759,7 +1759,7 @@ static int rtw_wx_set_auth(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_param *param = (struct iw_param *)&(wrqu->param);
+ struct iw_param *param = (struct iw_param *)&wrqu->param;
int ret = 0;
switch (param->flags & IW_AUTH_INDEX) {
@@ -2012,14 +2012,9 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
if (!p->pointer || p->length != sizeof(struct ieee_param))
return -EINVAL;
- param = (struct ieee_param *)rtw_malloc(p->length);
- if (!param)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- return -EFAULT;
- }
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param))
+ return PTR_ERR(param);
switch (param->cmd) {
case IEEE_CMD_SET_WPA_PARAM:
@@ -2093,7 +2088,7 @@ static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
u8 keylen;
struct cmd_obj *pcmd;
struct setkey_parm *psetkeyparm;
- struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
int res = _SUCCESS;
DBG_88E("%s\n", __func__);
@@ -2130,7 +2125,7 @@ static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
keylen = 16;
}
- memcpy(&(psetkeyparm->key[0]), key, keylen);
+ memcpy(&psetkeyparm->key[0], key, keylen);
pcmd->cmdcode = _SetKey_CMD_;
pcmd->parmbuf = (u8 *)psetkeyparm;
@@ -2173,7 +2168,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
struct sta_info *psta = NULL, *pbcmc_sta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
struct sta_priv *pstapriv = &padapter->stapriv;
DBG_88E("%s\n", __func__);
@@ -2245,7 +2240,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
- memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+ memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->KeyMaterial, pwep->KeyLength);
psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
@@ -2256,7 +2251,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
/* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
/* psecuritypriv->dot11PrivacyKeyIndex = keyid", but can rtw_set_key to cam */
- memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+ memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->KeyMaterial, pwep->KeyLength);
psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
@@ -2283,8 +2278,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
@@ -2326,8 +2321,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psta->dot118021XPrivacy = _TKIP_;
/* set mic key */
- memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
- memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+ memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &param->u.crypt.key[24], 8);
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
@@ -2357,8 +2352,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
param->u.crypt.key, min_t(u16, param->u.crypt.key_len, 16));
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
@@ -2398,7 +2393,7 @@ static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int
{
int ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
unsigned char *pbuf = param->u.bcn_ie.buf;
@@ -2436,7 +2431,7 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
DBG_88E("rtw_add_sta(aid =%d) =%pM\n", param->u.add_sta.aid, (param->sta_addr));
@@ -2489,7 +2484,7 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
{
struct sta_info *psta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
int updated = 0;
@@ -2524,7 +2519,7 @@ static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *par
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct ieee_param_ex *param_ex = (struct ieee_param_ex *)param;
struct sta_data *psta_data = (struct sta_data *)param_ex->data;
@@ -2580,7 +2575,7 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
DBG_88E("rtw_get_sta_wpaie, sta_addr: %pM\n", (param->sta_addr));
@@ -2616,8 +2611,8 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
{
unsigned char wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
int ie_len;
DBG_88E("%s, len =%d\n", __func__, len);
@@ -2651,7 +2646,7 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ie_len;
DBG_88E("%s, len =%d\n", __func__, len);
@@ -2680,7 +2675,7 @@ static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *par
static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ie_len;
DBG_88E("%s, len =%d\n", __func__, len);
@@ -2710,9 +2705,9 @@ static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *par
static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u8 value;
@@ -2734,7 +2729,7 @@ static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param,
static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
@@ -2748,7 +2743,7 @@ static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *p
static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
@@ -2762,7 +2757,7 @@ static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *para
static int rtw_ioctl_set_macaddr_acl(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (!check_fwstate(pmlmepriv, WIFI_AP_STATE))
return -EINVAL;
@@ -2789,14 +2784,9 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
if (!p->pointer || p->length != sizeof(struct ieee_param))
return -EINVAL;
- param = (struct ieee_param *)rtw_malloc(p->length);
- if (!param)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- kfree(param);
- return -EFAULT;
- }
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param))
+ return PTR_ERR(param);
switch (param->cmd) {
case RTL871X_HOSTAPD_FLUSH:
@@ -2882,7 +2872,7 @@ static int rtw_wx_set_priv(struct net_device *dev,
/* added for wps2.0 @20110524 */
if (dwrq->flags == 0x8766 && len > 8) {
u32 cp_sz;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 *probereq_wpsie = ext;
int probereq_wpsie_len = len;
u8 wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index daf6db354982..bf86d03820ca 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -77,7 +77,7 @@ static int rtw_android_get_rssi(struct net_device *net, char *command,
int total_len)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(net);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *pcur_network = &pmlmepriv->cur_network;
int bytes_written = 0;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index d3664e508cbe..a7cd4de65b28 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -81,8 +81,8 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void _rtl92e_tx_cmd(struct net_device *dev, struct sk_buff *skb);
static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb);
static short _rtl92e_pci_initdescring(struct net_device *dev);
-static void _rtl92e_irq_tx_tasklet(struct r8192_priv *priv);
-static void _rtl92e_irq_rx_tasklet(struct r8192_priv *priv);
+static void _rtl92e_irq_tx_tasklet(unsigned long data);
+static void _rtl92e_irq_rx_tasklet(unsigned long data);
static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv);
static int _rtl92e_up(struct net_device *dev, bool is_silent_reset);
static int _rtl92e_try_up(struct net_device *dev);
@@ -516,8 +516,9 @@ static int _rtl92e_handle_assoc_response(struct net_device *dev,
return 0;
}
-static void _rtl92e_prepare_beacon(struct r8192_priv *priv)
+static void _rtl92e_prepare_beacon(unsigned long data)
{
+ struct r8192_priv *priv = (struct r8192_priv *)data;
struct net_device *dev = priv->rtllib->dev;
struct sk_buff *pskb = NULL, *pnewskb = NULL;
struct cb_desc *tcb_desc = NULL;
@@ -1007,14 +1008,11 @@ static void _rtl92e_init_priv_task(struct net_device *dev)
(void *)rtl92e_hw_wakeup_wq, dev);
INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_sleep_wq,
(void *)rtl92e_hw_sleep_wq, dev);
- tasklet_init(&priv->irq_rx_tasklet,
- (void(*)(unsigned long))_rtl92e_irq_rx_tasklet,
+ tasklet_init(&priv->irq_rx_tasklet, _rtl92e_irq_rx_tasklet,
(unsigned long)priv);
- tasklet_init(&priv->irq_tx_tasklet,
- (void(*)(unsigned long))_rtl92e_irq_tx_tasklet,
+ tasklet_init(&priv->irq_tx_tasklet, _rtl92e_irq_tx_tasklet,
(unsigned long)priv);
- tasklet_init(&priv->irq_prepare_beacon_tasklet,
- (void(*)(unsigned long))_rtl92e_prepare_beacon,
+ tasklet_init(&priv->irq_prepare_beacon_tasklet, _rtl92e_prepare_beacon,
(unsigned long)priv);
}
@@ -2113,13 +2111,17 @@ static void _rtl92e_tx_resume(struct net_device *dev)
}
}
-static void _rtl92e_irq_tx_tasklet(struct r8192_priv *priv)
+static void _rtl92e_irq_tx_tasklet(unsigned long data)
{
+ struct r8192_priv *priv = (struct r8192_priv *)data;
+
_rtl92e_tx_resume(priv->rtllib->dev);
}
-static void _rtl92e_irq_rx_tasklet(struct r8192_priv *priv)
+static void _rtl92e_irq_rx_tasklet(unsigned long data)
{
+ struct r8192_priv *priv = (struct r8192_priv *)data;
+
_rtl92e_rx_normal(priv->rtllib->dev);
rtl92e_writel(priv->rtllib->dev, INTA_MASK,
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 20e494186c9e..462835684e8b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -456,7 +456,7 @@ static void _rtl92e_dm_bandwidth_autoswitch(struct net_device *dev)
if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ||
!priv->rtllib->bandwidth_auto_switch.bautoswitch_enable)
return;
- if (priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz == false) {
+ if (!priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz) {
if (priv->undecorated_smoothed_pwdb <=
priv->rtllib->bandwidth_auto_switch.threshold_40Mhzto20Mhz)
priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz = true;
@@ -1297,7 +1297,7 @@ static void _rtl92e_dm_dig_init(struct net_device *dev)
static void _rtl92e_dm_ctrl_initgain_byrssi(struct net_device *dev)
{
- if (dm_digtable.dig_enable_flag == false)
+ if (!dm_digtable.dig_enable_flag)
return;
if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
@@ -1332,7 +1332,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_driver(struct net_device *dev)
u8 i;
static u8 fw_dig;
- if (dm_digtable.dig_enable_flag == false)
+ if (!dm_digtable.dig_enable_flag)
return;
if (dm_digtable.dig_algorithm_switch)
@@ -1366,7 +1366,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev)
static u32 reset_cnt;
u8 i;
- if (dm_digtable.dig_enable_flag == false)
+ if (!dm_digtable.dig_enable_flag)
return;
if (dm_digtable.dig_algorithm_switch) {
@@ -1501,7 +1501,7 @@ static void _rtl92e_dm_initial_gain(struct net_device *dev)
reset_cnt = 0;
}
- if (rtllib_act_scanning(priv->rtllib, true) == true) {
+ if (rtllib_act_scanning(priv->rtllib, true)) {
force_write = 1;
return;
}
@@ -2444,7 +2444,7 @@ static void _rtl92e_dm_init_dynamic_tx_power(struct net_device *dev)
static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- unsigned int txhipower_threshhold = 0;
+ unsigned int txhipower_threshold = 0;
unsigned int txlowpower_threshold = 0;
if (priv->rtllib->bdynamic_txpower_enable != true) {
@@ -2454,10 +2454,10 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
}
if ((priv->rtllib->pHTInfo->IOTPeer == HT_IOT_PEER_ATHEROS) &&
(priv->rtllib->mode == IEEE_G)) {
- txhipower_threshhold = TX_POWER_ATHEROAP_THRESH_HIGH;
+ txhipower_threshold = TX_POWER_ATHEROAP_THRESH_HIGH;
txlowpower_threshold = TX_POWER_ATHEROAP_THRESH_LOW;
} else {
- txhipower_threshhold = TX_POWER_NEAR_FIELD_THRESH_HIGH;
+ txhipower_threshold = TX_POWER_NEAR_FIELD_THRESH_HIGH;
txlowpower_threshold = TX_POWER_NEAR_FIELD_THRESH_LOW;
}
@@ -2465,7 +2465,7 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
priv->undecorated_smoothed_pwdb);
if (priv->rtllib->state == RTLLIB_LINKED) {
- if (priv->undecorated_smoothed_pwdb >= txhipower_threshhold) {
+ if (priv->undecorated_smoothed_pwdb >= txhipower_threshold) {
priv->bDynamicTxHighPower = true;
priv->bDynamicTxLowPower = false;
} else {
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index d83d72594312..8abc921ecb3e 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -371,7 +371,7 @@ void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
if ((ieee->iw_mode == IW_MODE_ADHOC) ||
(ieee->iw_mode == IW_MODE_MASTER)) {
pHTInfoEle->ControlChl = ieee->current_network.channel;
- pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false) ?
+ pHTInfoEle->ExtChlOffset = ((!pHT->bRegBW40MHz) ?
HT_EXTCHNL_OFFSET_NO_EXT :
(ieee->current_network.channel <= 6)
? HT_EXTCHNL_OFFSET_UPPER :
@@ -526,7 +526,7 @@ void HTOnAssocRsp(struct rtllib_device *ieee)
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34};
- if (pHTInfo->bCurrentHTSupport == false) {
+ if (!pHTInfo->bCurrentHTSupport) {
netdev_warn(ieee->dev, "%s(): HT_DISABLE\n", __func__);
return;
}
@@ -873,7 +873,7 @@ void HTSetConnectBwMode(struct rtllib_device *ieee,
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
- if (pHTInfo->bRegBW40MHz == false)
+ if (!pHTInfo->bRegBW40MHz)
return;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index e101f7b13c7e..195d963c4fbb 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -520,55 +520,68 @@ static bool AddReorderEntry(struct rx_ts_record *pTS, struct rx_reorder_entry *p
return true;
}
-void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb **prxbIndicateArray, u8 index)
+static void indicate_packets(struct ieee80211_device *ieee,
+ struct ieee80211_rxb *rxb)
{
- u8 i = 0, j = 0;
+ struct net_device_stats *stats = &ieee->stats;
+ struct net_device *dev = ieee->dev;
u16 ethertype;
-// if(index > 1)
-// IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): hahahahhhh, We indicate packet from reorder list, index is %u\n",__func__,index);
- for (j = 0; j < index; j++) {
-//added by amy for reorder
- struct ieee80211_rxb *prxb = prxbIndicateArray[j];
- for (i = 0; i < prxb->nr_subframes; i++) {
- struct sk_buff *sub_skb = prxb->subframes[i];
+ u8 i;
+
+ for (i = 0; i < rxb->nr_subframes; i++) {
+ struct sk_buff *sub_skb = rxb->subframes[i];
+
+ if (!sub_skb)
+ continue;
/* convert hdr + possible LLC headers into Ethernet header */
- ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
- if (sub_skb->len >= 8 &&
- ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 &&
- ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) {
+ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
+ if (sub_skb->len >= 8 &&
+ ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
+ ethertype != ETH_P_AARP &&
+ ethertype != ETH_P_IPX) ||
+ !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType */
- skb_pull(sub_skb, SNAP_SIZE);
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
- } else {
+ skb_pull(sub_skb, SNAP_SIZE);
+ } else {
/* Leave Ethernet header part of hdr and full payload */
- put_unaligned_be16(sub_skb->len, skb_push(sub_skb, 2));
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN);
- }
- //stats->rx_packets++;
- //stats->rx_bytes += sub_skb->len;
+ put_unaligned_be16(sub_skb->len, skb_push(sub_skb, 2));
+ }
+ memcpy(skb_push(sub_skb, ETH_ALEN), rxb->src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), rxb->dst, ETH_ALEN);
+
+ stats->rx_packets++;
+ stats->rx_bytes += sub_skb->len;
+ if (is_multicast_ether_addr(rxb->dst))
+ stats->multicast++;
/* Indicate the packets to upper layer */
- if (sub_skb) {
- sub_skb->protocol = eth_type_trans(sub_skb, ieee->dev);
- memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
- sub_skb->dev = ieee->dev;
- sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
- //skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */
- ieee->last_rx_ps_time = jiffies;
- netif_rx(sub_skb);
- }
- }
+ sub_skb->protocol = eth_type_trans(sub_skb, dev);
+ memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
+ sub_skb->dev = dev;
+ /* 802.11 crc not sufficient */
+ sub_skb->ip_summed = CHECKSUM_NONE;
+ ieee->last_rx_ps_time = jiffies;
+ netif_rx(sub_skb);
+ }
+}
+
+void ieee80211_indicate_packets(struct ieee80211_device *ieee,
+ struct ieee80211_rxb **prxbIndicateArray,
+ u8 index)
+{
+ u8 i;
+
+ for (i = 0; i < index; i++) {
+ struct ieee80211_rxb *prxb = prxbIndicateArray[i];
+
+ indicate_packets(ieee, prxb);
kfree(prxb);
prxb = NULL;
}
}
-
static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
struct ieee80211_rxb *prxb,
struct rx_ts_record *pTS, u16 SeqNum)
@@ -877,7 +890,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
u16 fc, type, stype, sc;
struct net_device_stats *stats;
unsigned int frag;
- u16 ethertype;
//added by amy for reorder
u8 TID = 0;
u16 SeqNum = 0;
@@ -1260,47 +1272,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
//added by amy for reorder
if (!ieee->pHTInfo->bCurRxReorderEnable || !pTS) {
-//added by amy for reorder
- for (i = 0; i < rxb->nr_subframes; i++) {
- struct sk_buff *sub_skb = rxb->subframes[i];
-
- if (sub_skb) {
- /* convert hdr + possible LLC headers into Ethernet header */
- ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7];
- if (sub_skb->len >= 8 &&
- ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 &&
- ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) {
- /* remove RFC1042 or Bridge-Tunnel encapsulation and
- * replace EtherType */
- skb_pull(sub_skb, SNAP_SIZE);
- memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
- } else {
- u16 len;
- /* Leave Ethernet header part of hdr and full payload */
- len = be16_to_cpu(htons(sub_skb->len));
- memcpy(skb_push(sub_skb, 2), &len, 2);
- memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN);
- }
-
- stats->rx_packets++;
- stats->rx_bytes += sub_skb->len;
- if (is_multicast_ether_addr(dst)) {
- stats->multicast++;
- }
-
- /* Indicate the packets to upper layer */
- sub_skb->protocol = eth_type_trans(sub_skb, dev);
- memset(sub_skb->cb, 0, sizeof(sub_skb->cb));
- sub_skb->dev = dev;
- sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
- //skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */
- ieee->last_rx_ps_time = jiffies;
- netif_rx(sub_skb);
- }
- }
+ indicate_packets(ieee, rxb);
kfree(rxb);
rxb = NULL;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 0ee054d82832..63a561ab4a76 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -372,9 +372,9 @@ ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, struct cb_desc *tcb_
return;
}
- if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
+ if (pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI40MHz)
tcb_desc->bUseShortGI = true;
- else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
+ else if (!pHTInfo->bCurBW40MHz && pHTInfo->bCurShortGI20MHz)
tcb_desc->bUseShortGI = true;
}
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index f0b85338b567..2f0d0ffa6fae 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -71,12 +71,13 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
_init_queue(&pxmitpriv->apsd_queue);
_init_queue(&pxmitpriv->free_xmit_queue);
/*
- * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+ * Please allocate memory with sz = (struct xmit_frame) * NR_XMITFRAME,
* and initialize free_xmit_frame below.
* Please also apply free_txobj to link_up all the xmit_frames...
*/
pxmitpriv->pallocated_frame_buf =
- kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4, GFP_ATOMIC);
+ kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4,
+ GFP_ATOMIC);
if (!pxmitpriv->pallocated_frame_buf) {
pxmitpriv->pxmit_frame_buf = NULL;
return -ENOMEM;
@@ -126,8 +127,8 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
for (i = 0; i < NR_XMITBUFF; i++) {
INIT_LIST_HEAD(&pxmitbuf->list);
- pxmitbuf->pallocated_buf = kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ,
- GFP_ATOMIC);
+ pxmitbuf->pallocated_buf =
+ kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ, GFP_ATOMIC);
if (!pxmitbuf->pallocated_buf)
return -ENOMEM;
pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ -
@@ -350,7 +351,7 @@ static int xmitframe_addmic(struct _adapter *padapter,
struct sta_info *stainfo;
struct qos_priv *pqospriv = &(padapter->mlmepriv.qospriv);
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct security_priv *psecpriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
bool bmcst = is_multicast_ether_addr(pattrib->ra);
@@ -368,15 +369,14 @@ static int xmitframe_addmic(struct _adapter *padapter,
0x0, 0x0};
pframe = pxmitframe->buf_addr + TXDESC_OFFSET;
if (bmcst) {
- if (!memcmp(psecuritypriv->XGrptxmickey
- [psecuritypriv->XGrpKeyid].skey,
+ if (!memcmp(psecpriv->XGrptxmickey
+ [psecpriv->XGrpKeyid].skey,
null_key, 16))
return -ENOMEM;
/*start to calculate the mic code*/
r8712_secmicsetkey(&micdata,
- psecuritypriv->
- XGrptxmickey[psecuritypriv->
- XGrpKeyid].skey);
+ psecpriv->XGrptxmickey
+ [psecpriv->XGrpKeyid].skey);
} else {
if (!memcmp(&stainfo->tkiptxmickey.skey[0],
null_key, 16))
@@ -416,7 +416,7 @@ static int xmitframe_addmic(struct _adapter *padapter,
length = pattrib->last_txcmdsz -
pattrib->hdrlen -
pattrib->iv_len -
- ((psecuritypriv->sw_encrypt)
+ ((psecpriv->sw_encrypt)
? pattrib->icv_len : 0);
r8712_secmicappend(&micdata, payload,
length);
@@ -424,7 +424,7 @@ static int xmitframe_addmic(struct _adapter *padapter,
} else {
length = pxmitpriv->frag_len -
pattrib->hdrlen - pattrib->iv_len -
- ((psecuritypriv->sw_encrypt) ?
+ ((psecpriv->sw_encrypt) ?
pattrib->icv_len : 0);
r8712_secmicappend(&micdata, payload,
length);
@@ -477,75 +477,72 @@ static int make_wlanhdr(struct _adapter *padapter, u8 *hdr,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
__le16 *fctrl = &pwlanhdr->frame_ctl;
+ u8 *bssid;
memset(hdr, 0, WLANHDR_OFFSET);
SetFrameSubType(fctrl, pattrib->subtype);
- if (pattrib->subtype & WIFI_DATA_TYPE) {
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- /* to_ds = 1, fr_ds = 0; */
- SetToDs(fctrl);
- memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv),
- ETH_ALEN);
- memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
- memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- /* to_ds = 0, fr_ds = 1; */
- SetFrDs(fctrl);
- memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
- memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv),
- ETH_ALEN);
- memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
- } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv,
- WIFI_ADHOC_MASTER_STATE)) {
- memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
- memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv),
- ETH_ALEN);
- } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
- memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
- memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv),
- ETH_ALEN);
- } else {
- return -EINVAL;
- }
+ if (!(pattrib->subtype & WIFI_DATA_TYPE))
+ return 0;
- if (pattrib->encrypt)
- SetPrivacy(fctrl);
- if (pqospriv->qos_option) {
- qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
- if (pattrib->priority)
- SetPriority(qc, pattrib->priority);
- SetAckpolicy(qc, pattrib->ack_policy);
- }
- /* TODO: fill HT Control Field */
- /* Update Seq Num will be handled by f/w */
- {
- struct sta_info *psta;
- bool bmcst = is_multicast_ether_addr(pattrib->ra);
-
- if (pattrib->psta) {
- psta = pattrib->psta;
- } else {
- if (bmcst)
- psta = r8712_get_bcmc_stainfo(padapter);
- else
- psta =
- r8712_get_stainfo(&padapter->stapriv,
- pattrib->ra);
- }
- if (psta) {
- psta->sta_xmitpriv.txseq_tid
- [pattrib->priority]++;
- psta->sta_xmitpriv.txseq_tid[pattrib->priority]
- &= 0xFFF;
- pattrib->seqnum = psta->sta_xmitpriv.
- txseq_tid[pattrib->priority];
- SetSeqNum(hdr, pattrib->seqnum);
- }
+ bssid = get_bssid(pmlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ /* to_ds = 1, fr_ds = 0; */
+ SetToDs(fctrl);
+ ether_addr_copy(pwlanhdr->addr1, bssid);
+ ether_addr_copy(pwlanhdr->addr2, pattrib->src);
+ ether_addr_copy(pwlanhdr->addr3, pattrib->dst);
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ /* to_ds = 0, fr_ds = 1; */
+ SetFrDs(fctrl);
+ ether_addr_copy(pwlanhdr->addr1, pattrib->dst);
+ ether_addr_copy(pwlanhdr->addr2, bssid);
+ ether_addr_copy(pwlanhdr->addr3, pattrib->src);
+ } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ ether_addr_copy(pwlanhdr->addr1, pattrib->dst);
+ ether_addr_copy(pwlanhdr->addr2, pattrib->src);
+ ether_addr_copy(pwlanhdr->addr3, bssid);
+ } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
+ ether_addr_copy(pwlanhdr->addr1, pattrib->dst);
+ ether_addr_copy(pwlanhdr->addr2, pattrib->src);
+ ether_addr_copy(pwlanhdr->addr3, bssid);
+ } else {
+ return -EINVAL;
+ }
+
+ if (pattrib->encrypt)
+ SetPrivacy(fctrl);
+ if (pqospriv->qos_option) {
+ qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
+ if (pattrib->priority)
+ SetPriority(qc, pattrib->priority);
+ SetAckpolicy(qc, pattrib->ack_policy);
+ }
+ /* TODO: fill HT Control Field */
+ /* Update Seq Num will be handled by f/w */
+ {
+ struct sta_info *psta;
+ bool bmcst = is_multicast_ether_addr(pattrib->ra);
+
+ if (pattrib->psta)
+ psta = pattrib->psta;
+ else if (bmcst)
+ psta = r8712_get_bcmc_stainfo(padapter);
+ else
+ psta = r8712_get_stainfo(&padapter->stapriv,
+ pattrib->ra);
+
+ if (psta) {
+ u16 *txtid = psta->sta_xmitpriv.txseq_tid;
+
+ txtid[pattrib->priority]++;
+ txtid[pattrib->priority] &= 0xFFF;
+ pattrib->seqnum = txtid[pattrib->priority];
+ SetSeqNum(hdr, pattrib->seqnum);
}
}
+
return 0;
}
@@ -589,7 +586,7 @@ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
addr_t addr;
u8 *pframe, *mem_start, *ptxdesc;
struct sta_info *psta;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct security_priv *psecpriv = &padapter->securitypriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
@@ -632,15 +629,13 @@ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
case _WEP40_:
case _WEP104_:
WEP_IV(pattrib->iv, psta->txpn,
- (u8)psecuritypriv->
- PrivacyKeyIndex);
+ (u8)psecpriv->PrivacyKeyIndex);
break;
case _TKIP_:
if (bmcst)
TKIP_IV(pattrib->iv,
psta->txpn,
- (u8)psecuritypriv->
- XGrpKeyid);
+ (u8)psecpriv->XGrpKeyid);
else
TKIP_IV(pattrib->iv, psta->txpn,
0);
@@ -648,8 +643,7 @@ sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
case _AES_:
if (bmcst)
AES_IV(pattrib->iv, psta->txpn,
- (u8)psecuritypriv->
- XGrpKeyid);
+ (u8)psecpriv->XGrpKeyid);
else
AES_IV(pattrib->iv, psta->txpn,
0);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index f227828094bf..c0c0c781fe17 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -115,7 +115,7 @@ struct pkt_attrib {
u8 icv_len;
unsigned char iv[8];
unsigned char icv[8];
- u8 dst[ETH_ALEN];
+ u8 dst[ETH_ALEN] __aligned(2); /* for ether_addr_copy */
u8 src[ETH_ALEN];
u8 ta[ETH_ALEN];
u8 ra[ETH_ALEN];
diff --git a/drivers/staging/rtl8712/usb_halinit.c b/drivers/staging/rtl8712/usb_halinit.c
index 6cc4a704c3a0..313c569748e9 100644
--- a/drivers/staging/rtl8712/usb_halinit.c
+++ b/drivers/staging/rtl8712/usb_halinit.c
@@ -58,7 +58,7 @@ u8 r8712_usb_hal_bus_init(struct _adapter *adapter)
r8712_write8(adapter, SYS_ISO_CTRL + 1, val8);
val8 = r8712_read8(adapter, SYS_ISO_CTRL + 1);
val8 = val8 & 0xEF;
- /* attatch AFE PLL to MACTOP/BB/PCIe Digital */
+ /* attach AFE PLL to MACTOP/BB/PCIe Digital */
r8712_write8(adapter, SYS_ISO_CTRL + 1, val8);
val8 = r8712_read8(adapter, AFE_XTAL_CTRL + 1);
val8 = val8 & 0xFB;
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index be731f1a2209..91b65731fcaa 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -440,7 +440,7 @@ static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
/* block-ack parameters */
#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
+#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
@@ -532,13 +532,6 @@ struct ieee80211_ht_addt_info {
#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
-/* block-ack parameters */
-#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
-#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
-#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
-#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
-
/*
* A-PMDU buffer sizes
* According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index efb5135ad743..bd18d1803e27 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -822,7 +822,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecnetwork->IELength = 0;
/* Added by Albert 2009/02/18 */
- /* If the the driver wants to use the bssid to create the connection. */
+ /* If the driver wants to use the bssid to create the connection. */
/* If not, we have to copy the connecting AP's MAC address to it so that */
/* the driver just has the bssid information for PMKIDList searching. */
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index d7a58af76ea0..e65c5a870b46 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -1097,9 +1097,6 @@ inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted)
(!adapter_to_pwrctl(padapter)->bInSuspend) &&
(!check_fwstate(&padapter->mlmepriv,
WIFI_ASOC_STATE|WIFI_UNDER_LINKING))) {
- struct pwrctrl_priv *pwrpriv;
-
- pwrpriv = adapter_to_pwrctl(padapter);
rtw_set_ips_deny(padapter, 0);
_set_timer(&padapter->mlmepriv.dynamic_chk_timer, 1);
}
@@ -2917,12 +2914,11 @@ void rtw_append_exented_cap(struct adapter *padapter, u8 *out_ie, uint *pout_len
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
u8 cap_content[8] = {0};
- u8 *pframe;
if (phtpriv->bss_coexist)
SET_EXT_CAPABILITY_ELE_BSS_COEXIST(cap_content, 1);
- pframe = rtw_set_ie(out_ie + *pout_len, EID_EXTCapability, 8, cap_content, pout_len);
+ rtw_set_ie(out_ie + *pout_len, EID_EXTCapability, 8, cap_content, pout_len);
}
inline void rtw_set_to_roam(struct adapter *adapter, u8 to_roam)
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 8f9da1d49343..d6d7198dfe45 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -1084,7 +1084,7 @@ auth_fail:
unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_frame)
{
- unsigned int seq, len, status, algthm, offset;
+ unsigned int seq, len, status, offset;
unsigned char *p;
unsigned int go2asoc = 0;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -1103,7 +1103,6 @@ unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_fram
offset = (GetPrivacy(pframe)) ? 4 : 0;
- algthm = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset));
seq = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset + 2));
status = le16_to_cpu(*(__le16 *)((SIZE_PTR)pframe + WLAN_HDR_A3_LEN + offset + 4));
@@ -1170,7 +1169,7 @@ authclnt_fail:
unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
{
- u16 capab_info, listen_interval;
+ u16 capab_info;
struct rtw_ieee802_11_elems elems;
struct sta_info *pstat;
unsigned char reassoc, *p, *pos, *wpa_ie;
@@ -1216,8 +1215,6 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
capab_info = RTW_GET_LE16(pframe + WLAN_HDR_A3_LEN);
/* capab_info = le16_to_cpu(*(unsigned short *)(pframe + WLAN_HDR_A3_LEN)); */
- /* listen_interval = le16_to_cpu(*(unsigned short *)(pframe + WLAN_HDR_A3_LEN+2)); */
- listen_interval = RTW_GET_LE16(pframe + WLAN_HDR_A3_LEN+2);
left = pkt_len - (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
pos = pframe + (sizeof(struct ieee80211_hdr_3addr) + ie_offset);
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 5245098b9ecf..7e1da0e35812 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -10,14 +10,11 @@
#include <rtw_debug.h>
#include <linux/jiffies.h>
#include <rtw_recv.h>
+#include <net/cfg80211.h>
static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
-u8 rtw_rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
-/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
-u8 rtw_bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-
static void rtw_signal_stat_timer_hdl(struct timer_list *t);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
@@ -1625,11 +1622,11 @@ sint wlanhdr_to_ethhdr(union recv_frame *precvframe)
psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
/* convert hdr + possible LLC headers into Ethernet header */
/* eth_type = (psnap_type[0] << 8) | psnap_type[1]; */
- if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
+ if ((!memcmp(psnap, rfc1042_header, SNAP_SIZE) &&
(memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2)) &&
(memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2))) ||
/* eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) || */
- !memcmp(psnap, rtw_bridge_tunnel_header, SNAP_SIZE)) {
+ !memcmp(psnap, bridge_tunnel_header, SNAP_SIZE)) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
bsnaphdr = true;
} else
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 5ebf691bd743..0f95009a30b6 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -756,7 +756,7 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
static u32 no_gkey_bc_cnt;
static u32 no_gkey_mc_cnt;
- if (psecuritypriv->binstallGrpkey == false) {
+ if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
if (start == 0)
@@ -1837,7 +1837,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
static u32 no_gkey_bc_cnt;
static u32 no_gkey_mc_cnt;
- if (psecuritypriv->binstallGrpkey == false) {
+ if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
if (start == 0)
@@ -2369,7 +2369,7 @@ u8 rtw_handle_tkip_countermeasure(struct adapter *adapter, const char *caller)
struct security_priv *securitypriv = &(adapter->securitypriv);
u8 status = _SUCCESS;
- if (securitypriv->btkip_countermeasure == true) {
+ if (securitypriv->btkip_countermeasure) {
unsigned long passing_ms = jiffies_to_msecs(jiffies - securitypriv->btkip_countermeasure_time);
if (passing_ms > 60*1000) {
DBG_871X_LEVEL(_drv_always_, "%s("ADPT_FMT") countermeasure time:%lus > 60s\n",
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index 09d2ca30d653..e3f56c6cc882 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -553,7 +553,6 @@ u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
{
struct sta_info *psta;
- struct tx_servq *ptxservq;
u32 res = _SUCCESS;
NDIS_802_11_MAC_ADDRESS bcast_addr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -571,7 +570,6 @@ u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
/* default broadcast & multicast use macid 1 */
psta->mac_id = 1;
- ptxservq = &(psta->sta_xmitpriv.be_q);
exit:
return _SUCCESS;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index 110338dbe372..69bcd172b298 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -1271,13 +1271,13 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
unsigned char *pbuf;
u32 wpa_ielen = 0;
u8 *pbssid = GetAddr3Ptr(pframe);
- u32 hidden_ssid = 0;
struct HT_info_element *pht_info = NULL;
struct rtw_ieee80211_ht_cap *pht_cap = NULL;
u32 bcn_channel;
unsigned short ht_cap_info;
unsigned char ht_info_infos_0;
struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ int ssid_len;
if (is_client_associated_to_ap(Adapter) == false)
return true;
@@ -1370,21 +1370,15 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
/* checking SSID */
+ ssid_len = 0;
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (!p) {
- DBG_871X("%s marc: cannot find SSID for survey event\n", __func__);
- hidden_ssid = true;
- } else {
- hidden_ssid = false;
- }
-
- if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
- memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
- bssid->Ssid.SsidLength = *(p + 1);
- } else {
- bssid->Ssid.SsidLength = 0;
- bssid->Ssid.Ssid[0] = '\0';
+ if (p) {
+ ssid_len = *(p + 1);
+ if (ssid_len > NDIS_802_11_LENGTH_SSID)
+ ssid_len = 0;
}
+ memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len);
+ bssid->Ssid.SsidLength = ssid_len;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
"cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index d5793e4614bf..3705a60a0546 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -12,49 +12,6 @@
#include <Mp_Precomp.h>
/* Global variables */
-static const char *const BtProfileString[] = {
- "NONE",
- "A2DP",
- "PAN",
- "HID",
- "SCO",
-};
-
-static const char *const BtSpecString[] = {
- "1.0b",
- "1.1",
- "1.2",
- "2.0+EDR",
- "2.1+EDR",
- "3.0+HS",
- "4.0",
-};
-
-static const char *const BtLinkRoleString[] = {
- "Master",
- "Slave",
-};
-
-static const char *const h2cStaString[] = {
- "successful",
- "h2c busy",
- "rf off",
- "fw not read",
-};
-
-static const char *const ioStaString[] = {
- "success",
- "can not IO",
- "rf off",
- "fw not read",
- "wait io timeout",
- "invalid len",
- "idle Q empty",
- "insert waitQ fail",
- "unknown fail",
- "wrong level",
- "h2c stopped",
-};
BTC_COEXIST GLBtCoexist;
static u8 GLBtcWiFiInScanState;
@@ -450,7 +407,7 @@ static u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
break;
case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
- *pu8 = padapter->securitypriv.dot11PrivacyAlgrthm == 0 ? false : true;
+ *pu8 = padapter->securitypriv.dot11PrivacyAlgrthm != 0;
break;
case BTC_GET_BL_WIFI_UNDER_B_MODE:
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 767e2a784f78..10250642d30a 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -1816,7 +1816,7 @@ static void phy_CrossReferenceHTAndVHTTxPowerLimit(struct adapter *padapter)
s8 tempPwrLmt = 0;
for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
- for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) {
+ for (bw = 0; bw < MAX_5G_BANDWIDTH_NUM; ++bw) {
for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) {
for (rateSection = 0; rateSection < MAX_RATE_SECTION_NUM; ++rateSection) {
tempPwrLmt = pHalData->TxPwrLimit_5G[regulation][bw][rateSection][channel][ODM_RF_PATH_A];
@@ -1877,7 +1877,7 @@ void PHY_ConvertTxPowerLimitToPowerIndex(struct adapter *Adapter)
phy_CrossReferenceHTAndVHTTxPowerLimit(Adapter);
for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
- for (bw = 0; bw < MAX_2_4G_BANDWITH_NUM; ++bw) {
+ for (bw = 0; bw < MAX_2_4G_BANDWIDTH_NUM; ++bw) {
for (channel = 0; channel < CHANNEL_MAX_NUMBER_2G; ++channel) {
for (rateSection = 0; rateSection < MAX_RATE_SECTION_NUM; ++rateSection) {
tempPwrLmt = pHalData->TxPwrLimit_2_4G[regulation][bw][rateSection][channel][ODM_RF_PATH_A];
@@ -1920,7 +1920,7 @@ void PHY_InitTxPowerLimit(struct adapter *Adapter)
/* DBG_871X("=====> PHY_InitTxPowerLimit()!\n"); */
for (i = 0; i < MAX_REGULATION_NUM; ++i) {
- for (j = 0; j < MAX_2_4G_BANDWITH_NUM; ++j)
+ for (j = 0; j < MAX_2_4G_BANDWIDTH_NUM; ++j)
for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
for (m = 0; m < CHANNEL_MAX_NUMBER_2G; ++m)
for (l = 0; l < MAX_RF_PATH_NUM; ++l)
@@ -1928,7 +1928,7 @@ void PHY_InitTxPowerLimit(struct adapter *Adapter)
}
for (i = 0; i < MAX_REGULATION_NUM; ++i) {
- for (j = 0; j < MAX_5G_BANDWITH_NUM; ++j)
+ for (j = 0; j < MAX_5G_BANDWIDTH_NUM; ++j)
for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
for (m = 0; m < CHANNEL_MAX_NUMBER_5G; ++m)
for (l = 0; l < MAX_RF_PATH_NUM; ++l)
diff --git a/drivers/staging/rtl8723bs/hal/odm.c b/drivers/staging/rtl8723bs/hal/odm.c
index aa6631ee4ea7..f2a9e95a1563 100644
--- a/drivers/staging/rtl8723bs/hal/odm.c
+++ b/drivers/staging/rtl8723bs/hal/odm.c
@@ -7,19 +7,6 @@
#include "odm_precomp.h"
-static const u16 dB_Invert_Table[8][12] = {
- {1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4},
- {4, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16},
- {18, 20, 22, 25, 28, 32, 35, 40, 45, 50, 56, 63},
- {71, 79, 89, 100, 112, 126, 141, 158, 178, 200, 224, 251},
- {282, 316, 355, 398, 447, 501, 562, 631, 708, 794, 891, 1000},
- {1122, 1259, 1413, 1585, 1778, 1995, 2239, 2512, 2818, 3162, 3548, 3981},
- {4467, 5012, 5623, 6310, 7079, 7943, 8913, 10000, 11220, 12589, 14125,
- 15849},
- {17783, 19953, 22387, 25119, 28184, 31623, 35481, 39811, 44668, 50119,
- 56234, 65535}
- };
-
/* Global var */
u32 OFDMSwingTable[OFDM_TABLE_SIZE] = {
diff --git a/drivers/staging/rtl8723bs/hal/odm.h b/drivers/staging/rtl8723bs/hal/odm.h
index b77d1fe33a28..16e8f66a3171 100644
--- a/drivers/staging/rtl8723bs/hal/odm.h
+++ b/drivers/staging/rtl8723bs/hal/odm.h
@@ -541,7 +541,7 @@ typedef enum tag_Operation_Mode_Definition {
/* ODM_CMNINFO_WM_MODE */
typedef enum tag_Wireless_Mode_Definition {
- ODM_WM_UNKNOW = 0x0,
+ ODM_WM_UNKNOWN = 0x0,
ODM_WM_B = BIT0,
ODM_WM_G = BIT1,
ODM_WM_A = BIT2,
diff --git a/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h b/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h
index f2c0707aad4c..1c6c08000e27 100644
--- a/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h
+++ b/drivers/staging/rtl8723bs/hal/odm_RegDefine11N.h
@@ -31,8 +31,8 @@
#define ODM_REG_TX_ANT_CTRL_11N 0x80C
#define ODM_REG_BB_PWR_SAV5_11N 0x818
#define ODM_REG_CCK_RPT_FORMAT_11N 0x824
-#define ODM_REG_RX_DEFUALT_A_11N 0x858
-#define ODM_REG_RX_DEFUALT_B_11N 0x85A
+#define ODM_REG_RX_DEFAULT_A_11N 0x858
+#define ODM_REG_RX_DEFAULT_B_11N 0x85A
#define ODM_REG_BB_PWR_SAV3_11N 0x85C
#define ODM_REG_ANTSEL_CTRL_11N 0x860
#define ODM_REG_RX_ANT_CTRL_11N 0x864
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index c3051ebaeb78..29c29e2e125b 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -311,39 +311,21 @@ static void rtl8723bs_recv_tasklet(unsigned long priv)
}
pkt_copy = rtw_skb_alloc(alloc_sz);
-
- if (pkt_copy) {
- pkt_copy->dev = padapter->pnetdev;
- precvframe->u.hdr.pkt = pkt_copy;
- skb_reserve(pkt_copy, 8 - ((SIZE_PTR)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
- skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
- memcpy(pkt_copy->data, (ptr + rx_report_sz + pattrib->shift_sz), skb_len);
- precvframe->u.hdr.rx_head = pkt_copy->head;
- precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail = pkt_copy->data;
- precvframe->u.hdr.rx_end = skb_end_pointer(pkt_copy);
- } else {
- if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
- DBG_8192C("%s: alloc_skb fail, drop frag frame\n", __func__);
- rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
- break;
- }
-
- precvframe->u.hdr.pkt = rtw_skb_clone(precvbuf->pskb);
- if (precvframe->u.hdr.pkt) {
- _pkt *pkt_clone = precvframe->u.hdr.pkt;
-
- pkt_clone->data = ptr + rx_report_sz + pattrib->shift_sz;
- skb_reset_tail_pointer(pkt_clone);
- precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail
- = pkt_clone->data;
- precvframe->u.hdr.rx_end = pkt_clone->data + skb_len;
- } else {
- DBG_8192C("%s: rtw_skb_clone fail\n", __func__);
- rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
- break;
- }
+ if (!pkt_copy) {
+ DBG_8192C("%s: alloc_skb fail, drop frame\n", __func__);
+ rtw_free_recvframe(precvframe, &precvpriv->free_recv_queue);
+ break;
}
+ pkt_copy->dev = padapter->pnetdev;
+ precvframe->u.hdr.pkt = pkt_copy;
+ skb_reserve(pkt_copy, 8 - ((SIZE_PTR)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
+ skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
+ memcpy(pkt_copy->data, (ptr + rx_report_sz + pattrib->shift_sz), skb_len);
+ precvframe->u.hdr.rx_head = pkt_copy->head;
+ precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail = pkt_copy->data;
+ precvframe->u.hdr.rx_end = skb_end_pointer(pkt_copy);
+
recvframe_put(precvframe, skb_len);
/* recvframe_pull(precvframe, drvinfo_sz + RXDESC_SIZE); */
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index 7853af53051d..e42d8c18e1ae 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -544,13 +544,9 @@ static void _InitRetryFunction(struct adapter *padapter)
static void HalRxAggr8723BSdio(struct adapter *padapter)
{
- struct registry_priv *pregistrypriv;
u8 valueDMATimeout;
u8 valueDMAPageCount;
-
- pregistrypriv = &padapter->registrypriv;
-
valueDMATimeout = 0x06;
valueDMAPageCount = 0x06;
diff --git a/drivers/staging/rtl8723bs/include/hal_data.h b/drivers/staging/rtl8723bs/include/hal_data.h
index e5e667df6154..fa5d70016f05 100644
--- a/drivers/staging/rtl8723bs/include/hal_data.h
+++ b/drivers/staging/rtl8723bs/include/hal_data.h
@@ -56,9 +56,9 @@ enum RT_AMPDU_BURST {
/* Tx Power Limit Table Size */
#define MAX_REGULATION_NUM 4
#define MAX_RF_PATH_NUM_IN_POWER_LIMIT_TABLE 4
-#define MAX_2_4G_BANDWITH_NUM 4
+#define MAX_2_4G_BANDWIDTH_NUM 4
#define MAX_RATE_SECTION_NUM 10
-#define MAX_5G_BANDWITH_NUM 4
+#define MAX_5G_BANDWIDTH_NUM 4
#define MAX_BASE_NUM_IN_PHY_REG_PG_2_4G 10 /* CCK:1, OFDM:1, HT:4, VHT:4 */
#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 9 /* OFDM:1, HT:4, VHT:4 */
@@ -280,14 +280,14 @@ struct hal_com_data {
/* Power Limit Table for 2.4G */
s8 TxPwrLimit_2_4G[MAX_REGULATION_NUM]
- [MAX_2_4G_BANDWITH_NUM]
+ [MAX_2_4G_BANDWIDTH_NUM]
[MAX_RATE_SECTION_NUM]
[CHANNEL_MAX_NUMBER_2G]
[MAX_RF_PATH_NUM];
/* Power Limit Table for 5G */
s8 TxPwrLimit_5G[MAX_REGULATION_NUM]
- [MAX_5G_BANDWITH_NUM]
+ [MAX_5G_BANDWIDTH_NUM]
[MAX_RATE_SECTION_NUM]
[CHANNEL_MAX_NUMBER_5G]
[MAX_RF_PATH_NUM];
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index 98c3e92245b7..a851b818ef0e 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -38,8 +38,6 @@
#define RX_MAX_QUEUE 2
#define MAX_SUBFRAME_COUNT 64
-extern u8 rtw_rfc1042_header[];
-extern u8 rtw_bridge_tunnel_header[];
/* for Rx reordering buffer control */
struct recv_reorder_ctrl
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index cd31ad2b8a7b..2fb80b6eb51d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -98,7 +98,7 @@ static struct ieee80211_channel rtw_2ghz_channels[] = {
static void rtw_2g_channels_init(struct ieee80211_channel *channels)
{
- memcpy((void*)channels, (void*)rtw_2ghz_channels,
+ memcpy((void *)channels, (void *)rtw_2ghz_channels,
sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
);
}
@@ -133,8 +133,8 @@ static struct ieee80211_supported_band *rtw_spt_band_alloc(
if (!spt_band)
goto exit;
- spt_band->channels = (struct ieee80211_channel*)(((u8 *)spt_band)+sizeof(struct ieee80211_supported_band));
- spt_band->bitrates = (struct ieee80211_rate*)(((u8 *)spt_band->channels)+sizeof(struct ieee80211_channel)*n_channels);
+ spt_band->channels = (struct ieee80211_channel *)(((u8 *)spt_band)+sizeof(struct ieee80211_supported_band));
+ spt_band->bitrates = (struct ieee80211_rate *)(((u8 *)spt_band->channels)+sizeof(struct ieee80211_channel)*n_channels);
spt_band->band = band;
spt_band->n_channels = n_channels;
spt_band->n_bitrates = n_bitrates;
@@ -152,22 +152,6 @@ exit:
return spt_band;
}
-static void rtw_spt_band_free(struct ieee80211_supported_band *spt_band)
-{
- u32 size = 0;
-
- if (!spt_band)
- return;
-
- if (spt_band->band == NL80211_BAND_2GHZ)
- {
- size = sizeof(struct ieee80211_supported_band)
- + sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
- + sizeof(struct ieee80211_rate)*RTW_G_RATES_NUM;
- }
- kfree(spt_band);
-}
-
static const struct ieee80211_txrx_stypes
rtw_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_ADHOC] = {
@@ -358,7 +342,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
memcpy(pbuf, pnetwork->network.IEs, pnetwork->network.IELength);
len += pnetwork->network.IELength;
- *((__le64*)pbuf) = cpu_to_le64(notify_timestamp);
+ *((__le64 *)pbuf) = cpu_to_le64(notify_timestamp);
bss = cfg80211_inform_bss_frame(wiphy, notify_channel, (struct ieee80211_mgmt *)buf,
len, notify_signal, GFP_ATOMIC);
@@ -1129,7 +1113,7 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
{
if (mac_addr)
- memcpy(param->sta_addr, (void*)mac_addr, ETH_ALEN);
+ memcpy(param->sta_addr, (void *)mac_addr, ETH_ALEN);
ret = rtw_cfg80211_ap_set_encryption(ndev, param, param_len);
}
@@ -2485,7 +2469,7 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
* for two MAC addresses
*/
skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
- pdata = (unsigned char*)skb->data;
+ pdata = (unsigned char *)skb->data;
memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr));
memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr));
@@ -2540,7 +2524,7 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- memcpy(pframe, (void*)buf, len);
+ memcpy(pframe, (void *)buf, len);
pattrib->pktlen = len;
pwlanhdr = (struct ieee80211_hdr *)pframe;
@@ -3030,7 +3014,7 @@ static int _cfg80211_rtw_mgmt_tx(struct adapter *padapter, u8 tx_ch, const u8 *b
pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- memcpy(pframe, (void*)buf, len);
+ memcpy(pframe, (void *)buf, len);
pattrib->pktlen = len;
pwlanhdr = (struct ieee80211_hdr *)pframe;
@@ -3463,7 +3447,7 @@ void rtw_wdev_free(struct wireless_dev *wdev)
if (!wdev)
return;
- rtw_spt_band_free(wdev->wiphy->bands[NL80211_BAND_2GHZ]);
+ kfree(wdev->wiphy->bands[NL80211_BAND_2GHZ]);
wiphy_free(wdev->wiphy);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 5059b874080e..902ac8169948 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -2561,14 +2561,16 @@ static int rtw_wps_start(struct net_device *dev,
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct iw_point *pdata = &wrqu->data;
u32 u32wps_start = 0;
- unsigned int uintRet = 0;
if ((true == padapter->bDriverStopped) || (true == padapter->bSurpriseRemoved) || (NULL == pdata)) {
ret = -EINVAL;
goto exit;
}
- uintRet = copy_from_user((void *)&u32wps_start, pdata->pointer, 4);
+ if (copy_from_user((void *)&u32wps_start, pdata->pointer, 4)) {
+ ret = -EFAULT;
+ goto exit;
+ }
if (u32wps_start == 0)
u32wps_start = *extra;
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index d29f59bbb613..50a3c2c3a8d2 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -1057,9 +1057,9 @@ static int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
status = _netdev_open(pnetdev);
mutex_unlock(&(adapter_to_dvobj(padapter)->hw_init_mutex));
}
- }
- else
+ } else {
status = (_SUCCESS == ips_netdrv_open(padapter)) ? (0) : (-1);
+ }
return status;
}
@@ -1192,8 +1192,7 @@ void rtw_dev_unload(struct adapter *padapter)
padapter->bup = false;
DBG_871X("<=== %s\n", __func__);
- }
- else {
+ } else {
RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("%s: bup ==false\n", __func__));
DBG_871X("%s: bup ==false\n", __func__);
}
@@ -1223,8 +1222,7 @@ static int rtw_suspend_free_assoc_resource(struct adapter *padapter)
rtw_disassoc_cmd(padapter, 0, false);
/* s2-2. indicate disconnect to os */
rtw_indicate_disconnect(padapter);
- }
- else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
rtw_sta_flush(padapter);
}
@@ -1270,9 +1268,8 @@ void rtw_suspend_wow(struct adapter *padapter)
padapter->bDriverStopped = false; /* for 32k command */
/* 2. disable interrupt */
- if (padapter->intf_stop) {
+ if (padapter->intf_stop)
padapter->intf_stop(padapter);
- }
/* 2.1 clean interrupt */
if (padapter->HalFunc.clear_interrupt)
@@ -1448,14 +1445,13 @@ int rtw_suspend_common(struct adapter *padapter)
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
#ifdef CONFIG_WOWLAN
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
pwrpriv->wowlan_mode = true;
- } else if (pwrpriv->wowlan_pno_enable == true) {
+ else if (pwrpriv->wowlan_pno_enable == true)
pwrpriv->wowlan_mode |= pwrpriv->wowlan_pno_enable;
- }
if (pwrpriv->wowlan_mode == true)
- rtw_suspend_wow(padapter);
+ rtw_suspend_wow(padapter);
else
rtw_suspend_normal(padapter);
@@ -1522,9 +1518,8 @@ int rtw_resume_process_wow(struct adapter *padapter)
pwrpriv->bFwCurrentInPSMode = false;
- if (padapter->intf_stop) {
+ if (padapter->intf_stop)
padapter->intf_stop(padapter);
- }
if (padapter->HalFunc.clear_interrupt)
padapter->HalFunc.clear_interrupt(padapter);
@@ -1541,18 +1536,15 @@ int rtw_resume_process_wow(struct adapter *padapter)
padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_WOWLAN, (u8 *)&poidparam);
psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
- if (psta) {
+ if (psta)
set_sta_rate(padapter, psta);
- }
-
padapter->bDriverStopped = false;
DBG_871X("%s: wowmode resuming, DriverStopped:%d\n", __func__, padapter->bDriverStopped);
rtw_start_drv_threads(padapter);
- if (padapter->intf_start) {
+ if (padapter->intf_start)
padapter->intf_start(padapter);
- }
/* start netif queue */
if (pnetdev) {
@@ -1656,9 +1648,8 @@ int rtw_resume_process_ap_wow(struct adapter *padapter)
DBG_871X("%s: wowmode resuming, DriverStopped:%d\n", __func__, padapter->bDriverStopped);
rtw_start_drv_threads(padapter);
- if (padapter->intf_start) {
+ if (padapter->intf_start)
padapter->intf_start(padapter);
- }
/* start netif queue */
if (pnetdev) {
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index 60c35d92ba29..eb4d1c3008fe 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -9,6 +9,7 @@
#include <drv_types.h>
#include <rtw_debug.h>
#include <linux/jiffies.h>
+#include <net/cfg80211.h>
void rtw_os_free_recvframe(union recv_frame *precvframe)
{
@@ -60,27 +61,20 @@ _pkt *rtw_os_alloc_msdu_pkt(union recv_frame *prframe, u16 nSubframe_Length, u8
pattrib = &prframe->u.hdr.attrib;
sub_skb = rtw_skb_alloc(nSubframe_Length + 12);
- if (sub_skb) {
- skb_reserve(sub_skb, 12);
- skb_put_data(sub_skb, (pdata + ETH_HLEN), nSubframe_Length);
- } else {
- sub_skb = rtw_skb_clone(prframe->u.hdr.pkt);
- if (sub_skb) {
- sub_skb->data = pdata + ETH_HLEN;
- sub_skb->len = nSubframe_Length;
- skb_set_tail_pointer(sub_skb, nSubframe_Length);
- } else {
- DBG_871X("%s(): rtw_skb_clone() Fail!!!\n", __func__);
- return NULL;
- }
+ if (!sub_skb) {
+ DBG_871X("%s(): rtw_skb_alloc() Fail!!!\n", __func__);
+ return NULL;
}
+ skb_reserve(sub_skb, 12);
+ skb_put_data(sub_skb, (pdata + ETH_HLEN), nSubframe_Length);
+
eth_type = RTW_GET_BE16(&sub_skb->data[6]);
if (sub_skb->len >= 8 &&
- ((!memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
+ ((!memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) &&
eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
- !memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) {
+ !memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE))) {
/*
* remove RFC1042 or Bridge-Tunnel encapsulation and replace
* EtherType
@@ -230,7 +224,7 @@ static void rtw_os_ksocket_send(struct adapter *padapter, union recv_frame *prec
if (rx_pid == psta->pid) {
int i;
- u16 len = *(u16*)(skb->data+ETH_HLEN+2);
+ u16 len = *(u16 *)(skb->data+ETH_HLEN+2);
DBG_871X("eth, RC: len = 0x%x\n", len);
for (i = 0; i < len; i++)
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index b093b5629171..5b1392deb0a7 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -514,7 +514,7 @@ static void rtw_dev_remove(struct sdio_func *func)
rtw_unregister_netdevs(dvobj);
- if (padapter->bSurpriseRemoved == false) {
+ if (!padapter->bSurpriseRemoved) {
int err;
/* test surprise remove */
@@ -554,12 +554,12 @@ static int rtw_sdio_suspend(struct device *dev)
struct adapter *padapter = psdpriv->if1;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
- if (padapter->bDriverStopped == true) {
+ if (padapter->bDriverStopped) {
DBG_871X("%s bDriverStopped = %d\n", __func__, padapter->bDriverStopped);
return 0;
}
- if (pwrpriv->bInSuspend == true) {
+ if (pwrpriv->bInSuspend) {
DBG_871X("%s bInSuspend = %d\n", __func__, pwrpriv->bInSuspend);
pdbgpriv->dbg_suspend_error_cnt++;
return 0;
@@ -574,7 +574,7 @@ static int rtw_resume_process(struct adapter *padapter)
struct dvobj_priv *psdpriv = padapter->dvobj;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
- if (pwrpriv->bInSuspend == false) {
+ if (!pwrpriv->bInSuspend) {
pdbgpriv->dbg_resume_error_cnt++;
DBG_871X("%s bInSuspend = %d\n", __func__, pwrpriv->bInSuspend);
return -1;
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 59568d18ce23..a1a82e59dfee 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -297,6 +297,62 @@ static int lynxfb_ops_pan_display(struct fb_var_screeninfo *var,
return hw_sm750_pan_display(crtc, var, info);
}
+static inline void lynxfb_set_visual_mode(struct fb_info *info)
+{
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+ case 16:
+ case 24:
+ case 32:
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ break;
+ default:
+ break;
+ }
+}
+
+static inline int lynxfb_set_color_offsets(struct fb_info *info)
+{
+ lynxfb_set_visual_mode(info);
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ info->var.red.offset = 0;
+ info->var.red.length = 8;
+ info->var.green.offset = 0;
+ info->var.green.length = 8;
+ info->var.blue.offset = 0;
+ info->var.blue.length = 8;
+ info->var.transp.length = 0;
+ info->var.transp.offset = 0;
+ break;
+ case 16:
+ info->var.red.offset = 11;
+ info->var.red.length = 5;
+ info->var.green.offset = 5;
+ info->var.green.length = 6;
+ info->var.blue.offset = 0;
+ info->var.blue.length = 5;
+ info->var.transp.length = 0;
+ info->var.transp.offset = 0;
+ break;
+ case 24:
+ case 32:
+ info->var.red.offset = 16;
+ info->var.red.length = 8;
+ info->var.green.offset = 8;
+ info->var.green.length = 8;
+ info->var.blue.offset = 0;
+ info->var.blue.length = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int lynxfb_ops_set_par(struct fb_info *info)
{
struct lynxfb_par *par;
@@ -328,48 +384,13 @@ static int lynxfb_ops_set_par(struct fb_info *info)
* and these data should be set before setcolreg routine
*/
- switch (var->bits_per_pixel) {
- case 8:
- fix->visual = FB_VISUAL_PSEUDOCOLOR;
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 0;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 16:
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.length = 0;
- var->transp.offset = 0;
- fix->visual = FB_VISUAL_TRUECOLOR;
- break;
- case 24:
- case 32:
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- fix->visual = FB_VISUAL_TRUECOLOR;
- break;
- default:
- ret = -EINVAL;
- break;
- }
+ ret = lynxfb_set_color_offsets(info);
+
var->height = var->width = -1;
var->accel_flags = 0;/*FB_ACCELF_TEXT;*/
if (ret) {
- pr_err("pixel bpp format not satisfied\n.");
+ pr_err("bpp %d not supported\n", var->bits_per_pixel);
return ret;
}
ret = hw_sm750_crtc_setMode(crtc, var, fix);
@@ -511,10 +532,12 @@ lynxfb_resume_err:
static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
+ int ret;
struct lynxfb_par *par;
struct lynxfb_crtc *crtc;
resource_size_t request;
+ ret = 0;
par = info->par;
crtc = &par->crtc;
@@ -523,43 +546,13 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
var->yres,
var->bits_per_pixel);
- switch (var->bits_per_pixel) {
- case 8:
- info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 0;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 16:
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.length = 0;
- var->transp.offset = 0;
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- break;
- case 24:
- case 32:
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- break;
- default:
+ ret = lynxfb_set_color_offsets(info);
+
+ if (ret) {
pr_err("bpp %d not supported\n", var->bits_per_pixel);
- return -EINVAL;
+ return ret;
}
+
var->height = var->width = -1;
var->accel_flags = 0;/* FB_ACCELF_TEXT; */
@@ -709,7 +702,9 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
} else {
output->paths = sm750_crt;
crtc->channel = sm750_primary;
- /* not consider of padding stuffs for oScreen,need fix */
+ /* not consider of padding stuffs for oScreen,
+ * need fix
+ */
crtc->oScreen = sm750_dev->vidmem_size >> 1;
crtc->vScreen = sm750_dev->pvMem + crtc->oScreen;
}
@@ -893,15 +888,8 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
pr_info("fix->mmio_start = %lx\n", fix->mmio_start);
fix->mmio_len = sm750_dev->vidreg_size;
pr_info("fix->mmio_len = %x\n", fix->mmio_len);
- switch (var->bits_per_pixel) {
- case 8:
- fix->visual = FB_VISUAL_PSEUDOCOLOR;
- break;
- case 16:
- case 32:
- fix->visual = FB_VISUAL_TRUECOLOR;
- break;
- }
+
+ lynxfb_set_visual_mode(info);
/* set var */
var->activate = FB_ACTIVATE_NOW;
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index ce90adcb449d..19823c7277a4 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -59,16 +59,19 @@ struct lynx_accel {
int (*de_wait)(void);/* see if hardware ready to work */
- int (*de_fillrect)(struct lynx_accel *, u32, u32, u32, u32,
- u32, u32, u32, u32, u32);
+ int (*de_fillrect)(struct lynx_accel *,
+ u32, u32, u32, u32,
+ u32, u32, u32, u32, u32);
- int (*de_copyarea)(struct lynx_accel *, u32, u32, u32, u32,
- u32, u32, u32, u32,
- u32, u32, u32, u32);
+ int (*de_copyarea)(struct lynx_accel *,
+ u32, u32, u32, u32,
+ u32, u32, u32, u32,
+ u32, u32, u32, u32);
- int (*de_imageblit)(struct lynx_accel *, const char *, u32, u32, u32, u32,
- u32, u32, u32, u32,
- u32, u32, u32, u32);
+ int (*de_imageblit)(struct lynx_accel *, const char *,
+ u32, u32, u32, u32,
+ u32, u32, u32, u32,
+ u32, u32, u32, u32);
};
@@ -163,7 +166,7 @@ struct lynxfb_output {
*/
void *priv;
- int (*proc_setBLANK)(struct lynxfb_output*, int);
+ int (*proc_setBLANK)(struct lynxfb_output *output, int blank);
};
struct lynxfb_par {
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index b8d60701f898..7136d751cff5 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -51,7 +51,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
/* now map mmio and vidmem */
sm750_dev->pvReg = ioremap(sm750_dev->vidreg_start,
- sm750_dev->vidreg_size);
+ sm750_dev->vidreg_size);
if (!sm750_dev->pvReg) {
pr_err("mmio failed\n");
ret = -EFAULT;
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index ddbb7e97d118..7408eb29cf38 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -43,6 +43,7 @@ static struct var_t vars[] = {
{ CAPS_STOP, .u.s = {"[:dv ap 100]" } },
{ RATE, .u.n = {"[:ra %d]", 7, 0, 9, 150, 25, NULL } },
{ PITCH, .u.n = {"[:dv ap %d]", 100, 0, 100, 0, 0, NULL } },
+ { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
{ VOL, .u.n = {"[:dv gv %d]", 13, 0, 16, 0, 5, NULL } },
{ PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } },
{ VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } },
@@ -59,6 +60,8 @@ static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute inflection_attribute =
+ __ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
@@ -87,6 +90,7 @@ static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
+ &inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 798c42dfa16c..96f24c848cc5 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -139,6 +139,7 @@ static struct var_t vars[] = {
{ CAPS_STOP, .u.s = {"[:dv ap 100]" } },
{ RATE, .u.n = {"[:ra %d]", 9, 0, 18, 150, 25, NULL } },
{ PITCH, .u.n = {"[:dv ap %d]", 80, 0, 100, 20, 0, NULL } },
+ { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
{ VOL, .u.n = {"[:vo se %d]", 5, 0, 9, 5, 10, NULL } },
{ PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } },
{ VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } },
@@ -155,6 +156,8 @@ static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute inflection_attribute =
+ __ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
@@ -183,6 +186,7 @@ static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
+ &inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index dccb4ea29d37..780214b5ca16 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -44,7 +44,7 @@ static struct var_t vars[] = {
{ CAPS_START, .u.s = {"[:dv ap 160] " } },
{ CAPS_STOP, .u.s = {"[:dv ap 100 ] " } },
{ RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } },
- { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } },
+ { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
{ VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } },
{ PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } },
{ VOICE, .u.n = {"[:n%c] ", 0, 0, 9, 0, 0, "phfdburwkv" } },
@@ -61,6 +61,8 @@ static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute inflection_attribute =
+ __ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
@@ -89,6 +91,7 @@ static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
+ &inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index 7df1a84297f6..e393438af81b 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -24,6 +24,7 @@ static struct var_t vars[] = {
{ PAUSE, .u.s = {"PAUSE\n"} },
{ RATE, .u.n = {"RATE %d\n", 8, 1, 16, 0, 0, NULL } },
{ PITCH, .u.n = {"PITCH %d\n", 8, 0, 16, 0, 0, NULL } },
+ { INFLECTION, .u.n = {"INFLECTION %d\n", 8, 0, 16, 0, 0, NULL } },
{ VOL, .u.n = {"VOL %d\n", 8, 0, 16, 0, 0, NULL } },
{ TONE, .u.n = {"TONE %d\n", 8, 0, 16, 0, 0, NULL } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
@@ -39,6 +40,8 @@ static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute inflection_attribute =
+ __ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute =
@@ -65,6 +68,7 @@ static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
+ &inflection_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
&vol_attribute.attr,
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index f591ec095582..9a7029539f35 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -38,6 +38,7 @@ static struct var_t vars[] = {
{ PAUSE, .u.n = {"\x01P" } },
{ RATE, .u.n = {"\x01%ds", 2, 0, 9, 0, 0, NULL } },
{ PITCH, .u.n = {"\x01%dp", 5, 0, 9, 0, 0, NULL } },
+ { INFLECTION, .u.n = {"\x01%dr", 5, 0, 9, 0, 0, NULL } },
{ VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
{ TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } },
{ PUNCT, .u.n = {"\x01%db", 0, 0, 2, 0, 0, NULL } },
@@ -57,6 +58,8 @@ static struct kobj_attribute freq_attribute =
__ATTR(freq, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store);
+static struct kobj_attribute inflection_attribute =
+ __ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
@@ -96,6 +99,7 @@ static struct attribute *synth_attrs[] = {
&freq_attribute.attr,
/* &lang_attribute.attr, */
&pitch_attribute.attr,
+ &inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&tone_attribute.attr,
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index fc6a9416829c..d3272c6d199a 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -42,7 +42,8 @@ enum var_id_t {
SAY_CONTROL, SAY_WORD_CTL, NO_INTERRUPT, KEY_ECHO,
SPELL_DELAY, PUNC_LEVEL, READING_PUNC,
ATTRIB_BLEEP, BLEEPS,
- RATE, PITCH, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG, DIRECT, PAUSE,
+ RATE, PITCH, INFLECTION, VOL, TONE, PUNCT, VOICE, FREQUENCY, LANG,
+ DIRECT, PAUSE,
CAPS_START, CAPS_STOP, CHARTAB,
MAXVARS
};
diff --git a/drivers/staging/speakup/spkguide.txt b/drivers/staging/speakup/spkguide.txt
index c23549c54c3c..1e622cd34363 100644
--- a/drivers/staging/speakup/spkguide.txt
+++ b/drivers/staging/speakup/spkguide.txt
@@ -406,6 +406,7 @@ freq
full_time
jiffy_delta
pitch
+inflection
punct
rate
tone
@@ -518,9 +519,9 @@ All the entries in the Speakup sys system are readable, some are
writable by root only, and some are writable by everyone. Unless you
know what you are doing, you should probably leave the ones that are
writable by root only alone. Most of the names are self explanatory.
-Vol for controlling volume, pitch for pitch, rate for controlling speaking
-rate, etc. If you find one you aren't sure about, you can post a query
-on the Speakup list.
+Vol for controlling volume, pitch for pitch, inflection for pitch range, rate
+for controlling speaking rate, etc. If you find one you aren't sure about, you
+can post a query on the Speakup list.
6. Changing Synthesizers
diff --git a/drivers/staging/speakup/sysfs-driver-speakup b/drivers/staging/speakup/sysfs-driver-speakup
index be3f5d6962e9..c6a32c434ce9 100644
--- a/drivers/staging/speakup/sysfs-driver-speakup
+++ b/drivers/staging/speakup/sysfs-driver-speakup
@@ -325,6 +325,12 @@ KernelVersion: 2.6
Contact: speakup@linux-speakup.org
Description: Gets or sets the pitch of the synthesizer. The range is 0-9.
+What: /sys/accessibility/speakup/soft/inflection
+KernelVersion: 5.8
+Contact: speakup@linux-speakup.org
+Description: Gets or sets the inflection of the synthesizer, i.e. the pitch
+ range. The range is 0-9.
+
What: /sys/accessibility/speakup/soft/punct
KernelVersion: 2.6
Contact: speakup@linux-speakup.org
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 5741d1cb6227..d7f6bec7ff06 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -37,6 +37,7 @@ static struct st_var_header var_headers[] = {
{ "bell_pos", BELL_POS, VAR_NUM, &spk_bell_pos, NULL },
{ "rate", RATE, VAR_NUM, NULL, NULL },
{ "pitch", PITCH, VAR_NUM, NULL, NULL },
+ { "inflection", INFLECTION, VAR_NUM, NULL, NULL },
{ "vol", VOL, VAR_NUM, NULL, NULL },
{ "tone", TONE, VAR_NUM, NULL, NULL },
{ "punct", PUNCT, VAR_NUM, NULL, NULL },
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index dd979ee4dcf1..99c57ceeb357 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -292,7 +292,7 @@ static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
* @tasktype: Type of taskmgmt command
* @scsidev: Scsidev that issued command
*
- * Create a cmdrsp packet and send it to the Serivce Partition
+ * Create a cmdrsp packet and send it to the Service Partition
* that will service this request.
*
* Return: Int representing whether command was queued successfully or not
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index 33485184a98a..f783b632141b 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -233,7 +233,7 @@ static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
}
static void snd_bcm2835_pcm_transfer(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect *rec, size_t bytes)
+ struct snd_pcm_indirect *rec, size_t bytes)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
@@ -346,7 +346,7 @@ int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, const char *name,
&snd_bcm2835_playback_ops);
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
- chip->card->dev, 128 * 1024, 128 * 1024);
+ chip->card->dev, 128 * 1024, 128 * 1024);
if (spdif)
chip->pcm_spdif = pcm;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 597acef35d0b..4f1adddb804f 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -369,8 +369,8 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
if (dev->capture.vc_start_timestamp != -1 && pts) {
ktime_t timestamp;
- s64 runtime_us = pts -
- dev->capture.vc_start_timestamp;
+ s64 runtime_us = pts - dev->capture.vc_start_timestamp;
+
timestamp = ktime_add_us(dev->capture.kernel_start_ts,
runtime_us);
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
@@ -420,9 +420,8 @@ static int enable_camera(struct bm2835_mmal_dev *dev)
return -EINVAL;
}
- ret = vchiq_mmal_component_enable(
- dev->instance,
- dev->component[COMP_CAMERA]);
+ ret = vchiq_mmal_component_enable(dev->instance,
+ dev->component[COMP_CAMERA]);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"Failed enabling camera, ret %d\n", ret);
@@ -451,10 +450,8 @@ static int disable_camera(struct bm2835_mmal_dev *dev)
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Disabling camera\n");
- ret =
- vchiq_mmal_component_disable(
- dev->instance,
- dev->component[COMP_CAMERA]);
+ ret = vchiq_mmal_component_disable(dev->instance,
+ dev->component[COMP_CAMERA]);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"Failed disabling camera, ret %d\n", ret);
@@ -555,8 +552,8 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
/* enable the camera port */
dev->capture.port->cb_ctx = dev;
- ret =
- vchiq_mmal_port_enable(dev->instance, dev->capture.port, buffer_cb);
+ ret = vchiq_mmal_port_enable(dev->instance, dev->capture.port,
+ buffer_cb);
if (ret) {
v4l2_err(&dev->v4l2_dev,
"Failed to enable capture port - error %d. Disabling camera port again\n",
@@ -668,7 +665,7 @@ static int set_overlay_params(struct bm2835_mmal_dev *dev,
MMAL_DISPLAY_SET_ALPHA |
MMAL_DISPLAY_SET_DEST_RECT |
MMAL_DISPLAY_SET_FULLSCREEN,
- .layer = PREVIEW_LAYER,
+ .layer = 2,
.alpha = dev->overlay.global_alpha,
.fullscreen = 0,
.dest_rect = {
@@ -767,16 +764,14 @@ static int vidioc_overlay(struct file *file, void *f, unsigned int on)
(!on && !dev->component[COMP_PREVIEW]->enabled))
return 0; /* already in requested state */
- src =
- &dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
+ src = &dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
if (!on) {
/* disconnect preview ports and disable component */
ret = vchiq_mmal_port_disable(dev->instance, src);
if (!ret)
- ret =
- vchiq_mmal_port_connect_tunnel(dev->instance, src,
- NULL);
+ ret = vchiq_mmal_port_connect_tunnel(dev->instance, src,
+ NULL);
if (ret >= 0)
ret = vchiq_mmal_component_disable(
dev->instance,
@@ -800,9 +795,8 @@ static int vidioc_overlay(struct file *file, void *f, unsigned int on)
if (enable_camera(dev) < 0)
return -EINVAL;
- ret = vchiq_mmal_component_enable(
- dev->instance,
- dev->component[COMP_PREVIEW]);
+ ret = vchiq_mmal_component_enable(dev->instance,
+ dev->component[COMP_PREVIEW]);
if (ret < 0)
return ret;
@@ -1001,6 +995,141 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
+
+static int mmal_setup_video_component(struct bm2835_mmal_dev *dev,
+ struct v4l2_format *f)
+{
+ bool overlay_enabled = !!dev->component[COMP_PREVIEW]->enabled;
+ struct vchiq_mmal_port *preview_port;
+ int ret;
+
+ preview_port = &dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
+
+ /* Preview and encode ports need to match on resolution */
+ if (overlay_enabled) {
+ /* Need to disable the overlay before we can update
+ * the resolution
+ */
+ ret = vchiq_mmal_port_disable(dev->instance, preview_port);
+ if (!ret) {
+ ret = vchiq_mmal_port_connect_tunnel(dev->instance,
+ preview_port,
+ NULL);
+ }
+ }
+ preview_port->es.video.width = f->fmt.pix.width;
+ preview_port->es.video.height = f->fmt.pix.height;
+ preview_port->es.video.crop.x = 0;
+ preview_port->es.video.crop.y = 0;
+ preview_port->es.video.crop.width = f->fmt.pix.width;
+ preview_port->es.video.crop.height = f->fmt.pix.height;
+ preview_port->es.video.frame_rate.num =
+ dev->capture.timeperframe.denominator;
+ preview_port->es.video.frame_rate.den =
+ dev->capture.timeperframe.numerator;
+ ret = vchiq_mmal_port_set_format(dev->instance, preview_port);
+
+ if (overlay_enabled) {
+ ret = vchiq_mmal_port_connect_tunnel(dev->instance,
+ preview_port,
+ &dev->component[COMP_PREVIEW]->input[0]);
+ if (ret)
+ return ret;
+
+ ret = vchiq_mmal_port_enable(dev->instance, preview_port, NULL);
+ }
+
+ return ret;
+}
+
+static int mmal_setup_encode_component(struct bm2835_mmal_dev *dev,
+ struct v4l2_format *f,
+ struct vchiq_mmal_port *port,
+ struct vchiq_mmal_port *camera_port,
+ struct vchiq_mmal_component *component)
+{
+ struct mmal_fmt *mfmt = get_format(f);
+ int ret;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "vid_cap - set up encode comp\n");
+
+ /* configure buffering */
+ camera_port->current_buffer.size = camera_port->recommended_buffer.size;
+ camera_port->current_buffer.num = camera_port->recommended_buffer.num;
+
+ ret = vchiq_mmal_port_connect_tunnel(dev->instance, camera_port,
+ &component->input[0]);
+ if (ret) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s failed to create connection\n", __func__);
+ /* ensure capture is not going to be tried */
+ dev->capture.port = NULL;
+ return ret;
+ }
+
+ port->es.video.width = f->fmt.pix.width;
+ port->es.video.height = f->fmt.pix.height;
+ port->es.video.crop.x = 0;
+ port->es.video.crop.y = 0;
+ port->es.video.crop.width = f->fmt.pix.width;
+ port->es.video.crop.height = f->fmt.pix.height;
+ port->es.video.frame_rate.num =
+ dev->capture.timeperframe.denominator;
+ port->es.video.frame_rate.den =
+ dev->capture.timeperframe.numerator;
+
+ port->format.encoding = mfmt->mmal;
+ port->format.encoding_variant = 0;
+ /* Set any encoding specific parameters */
+ switch (mfmt->mmal_component) {
+ case COMP_VIDEO_ENCODE:
+ port->format.bitrate = dev->capture.encode_bitrate;
+ break;
+ case COMP_IMAGE_ENCODE:
+ /* Could set EXIF parameters here */
+ break;
+ default:
+ break;
+ }
+
+ ret = vchiq_mmal_port_set_format(dev->instance, port);
+ if (ret) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s failed to set format %dx%d fmt %08X\n",
+ __func__,
+ f->fmt.pix.width,
+ f->fmt.pix.height,
+ f->fmt.pix.pixelformat);
+ return ret;
+ }
+
+ ret = vchiq_mmal_component_enable(dev->instance, component);
+ if (ret) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "%s Failed to enable encode components\n", __func__);
+ return ret;
+ }
+
+ /* configure buffering */
+ port->current_buffer.num = 1;
+ port->current_buffer.size = f->fmt.pix.sizeimage;
+ if (port->format.encoding == MMAL_ENCODING_JPEG) {
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "JPG - buf size now %d was %d\n",
+ f->fmt.pix.sizeimage,
+ port->current_buffer.size);
+ port->current_buffer.size =
+ (f->fmt.pix.sizeimage < (100 << 10)) ?
+ (100 << 10) : f->fmt.pix.sizeimage;
+ }
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "vid_cap - cur_buf.size set to %d\n", f->fmt.pix.sizeimage);
+ port->current_buffer.alignment = 0;
+
+ return 0;
+}
+
static int mmal_setup_components(struct bm2835_mmal_dev *dev,
struct v4l2_format *f)
{
@@ -1075,8 +1204,7 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
}
remove_padding = mfmt->remove_padding;
- vchiq_mmal_port_parameter_set(dev->instance,
- camera_port,
+ vchiq_mmal_port_parameter_set(dev->instance, camera_port,
MMAL_PARAMETER_NO_IMAGE_PADDING,
&remove_padding, sizeof(remove_padding));
@@ -1096,46 +1224,7 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
if (!ret &&
camera_port ==
&dev->component[COMP_CAMERA]->output[CAM_PORT_VIDEO]) {
- bool overlay_enabled =
- !!dev->component[COMP_PREVIEW]->enabled;
- struct vchiq_mmal_port *preview_port =
- &dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
- /* Preview and encode ports need to match on resolution */
- if (overlay_enabled) {
- /* Need to disable the overlay before we can update
- * the resolution
- */
- ret =
- vchiq_mmal_port_disable(dev->instance,
- preview_port);
- if (!ret)
- ret =
- vchiq_mmal_port_connect_tunnel(
- dev->instance,
- preview_port,
- NULL);
- }
- preview_port->es.video.width = f->fmt.pix.width;
- preview_port->es.video.height = f->fmt.pix.height;
- preview_port->es.video.crop.x = 0;
- preview_port->es.video.crop.y = 0;
- preview_port->es.video.crop.width = f->fmt.pix.width;
- preview_port->es.video.crop.height = f->fmt.pix.height;
- preview_port->es.video.frame_rate.num =
- dev->capture.timeperframe.denominator;
- preview_port->es.video.frame_rate.den =
- dev->capture.timeperframe.numerator;
- ret = vchiq_mmal_port_set_format(dev->instance, preview_port);
- if (overlay_enabled) {
- ret = vchiq_mmal_port_connect_tunnel(
- dev->instance,
- preview_port,
- &dev->component[COMP_PREVIEW]->input[0]);
- if (!ret)
- ret = vchiq_mmal_port_enable(dev->instance,
- preview_port,
- NULL);
- }
+ ret = mmal_setup_video_component(dev, f);
}
if (ret) {
@@ -1145,128 +1234,39 @@ static int mmal_setup_components(struct bm2835_mmal_dev *dev,
f->fmt.pix.pixelformat);
/* ensure capture is not going to be tried */
dev->capture.port = NULL;
- } else {
- if (encode_component) {
- v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
- "vid_cap - set up encode comp\n");
+ return ret;
+ }
- /* configure buffering */
- camera_port->current_buffer.size =
- camera_port->recommended_buffer.size;
- camera_port->current_buffer.num =
- camera_port->recommended_buffer.num;
+ if (encode_component) {
+ ret = mmal_setup_encode_component(dev, f, port,
+ camera_port,
+ encode_component);
- ret =
- vchiq_mmal_port_connect_tunnel(
- dev->instance,
- camera_port,
- &encode_component->input[0]);
- if (ret) {
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "%s failed to create connection\n",
- __func__);
- /* ensure capture is not going to be tried */
- dev->capture.port = NULL;
- } else {
- port->es.video.width = f->fmt.pix.width;
- port->es.video.height = f->fmt.pix.height;
- port->es.video.crop.x = 0;
- port->es.video.crop.y = 0;
- port->es.video.crop.width = f->fmt.pix.width;
- port->es.video.crop.height = f->fmt.pix.height;
- port->es.video.frame_rate.num =
- dev->capture.timeperframe.denominator;
- port->es.video.frame_rate.den =
- dev->capture.timeperframe.numerator;
-
- port->format.encoding = mfmt->mmal;
- port->format.encoding_variant = 0;
- /* Set any encoding specific parameters */
- switch (mfmt->mmal_component) {
- case COMP_VIDEO_ENCODE:
- port->format.bitrate =
- dev->capture.encode_bitrate;
- break;
- case COMP_IMAGE_ENCODE:
- /* Could set EXIF parameters here */
- break;
- default:
- break;
- }
- ret = vchiq_mmal_port_set_format(dev->instance,
- port);
- if (ret)
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "%s failed to set format %dx%d fmt %08X\n",
- __func__,
- f->fmt.pix.width,
- f->fmt.pix.height,
- f->fmt.pix.pixelformat
- );
- }
+ if (ret)
+ return ret;
+ } else {
+ /* configure buffering */
+ camera_port->current_buffer.num = 1;
+ camera_port->current_buffer.size = f->fmt.pix.sizeimage;
+ camera_port->current_buffer.alignment = 0;
+ }
- if (!ret) {
- ret = vchiq_mmal_component_enable(
- dev->instance,
- encode_component);
- if (ret) {
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "%s Failed to enable encode components\n",
- __func__);
- }
- }
- if (!ret) {
- /* configure buffering */
- port->current_buffer.num = 1;
- port->current_buffer.size =
- f->fmt.pix.sizeimage;
- if (port->format.encoding ==
- MMAL_ENCODING_JPEG) {
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "JPG - buf size now %d was %d\n",
- f->fmt.pix.sizeimage,
- port->current_buffer.size);
- port->current_buffer.size =
- (f->fmt.pix.sizeimage <
- (100 << 10)) ?
- (100 << 10) : f->fmt.pix.sizeimage;
- }
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "vid_cap - cur_buf.size set to %d\n",
- f->fmt.pix.sizeimage);
- port->current_buffer.alignment = 0;
- }
- } else {
- /* configure buffering */
- camera_port->current_buffer.num = 1;
- camera_port->current_buffer.size = f->fmt.pix.sizeimage;
- camera_port->current_buffer.alignment = 0;
- }
+ dev->capture.fmt = mfmt;
+ dev->capture.stride = f->fmt.pix.bytesperline;
+ dev->capture.width = camera_port->es.video.crop.width;
+ dev->capture.height = camera_port->es.video.crop.height;
+ dev->capture.buffersize = port->current_buffer.size;
- if (!ret) {
- dev->capture.fmt = mfmt;
- dev->capture.stride = f->fmt.pix.bytesperline;
- dev->capture.width = camera_port->es.video.crop.width;
- dev->capture.height = camera_port->es.video.crop.height;
- dev->capture.buffersize = port->current_buffer.size;
-
- /* select port for capture */
- dev->capture.port = port;
- dev->capture.camera_port = camera_port;
- dev->capture.encode_component = encode_component;
- v4l2_dbg(1, bcm2835_v4l2_debug,
- &dev->v4l2_dev,
- "Set dev->capture.fmt %08X, %dx%d, stride %d, size %d",
- port->format.encoding,
- dev->capture.width, dev->capture.height,
- dev->capture.stride, dev->capture.buffersize);
- }
- }
+ /* select port for capture */
+ dev->capture.port = port;
+ dev->capture.camera_port = camera_port;
+ dev->capture.encode_component = encode_component;
+ v4l2_dbg(1, bcm2835_v4l2_debug,
+ &dev->v4l2_dev,
+ "Set dev->capture.fmt %08X, %dx%d, stride %d, size %d",
+ port->format.encoding,
+ dev->capture.width, dev->capture.height,
+ dev->capture.stride, dev->capture.buffersize);
/* todo: Need to convert the vchiq/mmal error into a v4l2 error. */
return ret;
@@ -1658,9 +1658,8 @@ static int mmal_init(struct bm2835_mmal_dev *dev)
dev->capture.enc_level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
/* get the preview component ready */
- ret = vchiq_mmal_component_init(
- dev->instance, "ril.video_render",
- &dev->component[COMP_PREVIEW]);
+ ret = vchiq_mmal_component_init(dev->instance, "ril.video_render",
+ &dev->component[COMP_PREVIEW]);
if (ret < 0)
goto unreg_camera;
@@ -1672,9 +1671,8 @@ static int mmal_init(struct bm2835_mmal_dev *dev)
}
/* get the image encoder component ready */
- ret = vchiq_mmal_component_init(
- dev->instance, "ril.image_encode",
- &dev->component[COMP_IMAGE_ENCODE]);
+ ret = vchiq_mmal_component_init(dev->instance, "ril.image_encode",
+ &dev->component[COMP_IMAGE_ENCODE]);
if (ret < 0)
goto unreg_preview;
@@ -1734,15 +1732,13 @@ static int mmal_init(struct bm2835_mmal_dev *dev)
unreg_vid_encoder:
pr_err("Cleanup: Destroy video encoder\n");
- vchiq_mmal_component_finalise(
- dev->instance,
- dev->component[COMP_VIDEO_ENCODE]);
+ vchiq_mmal_component_finalise(dev->instance,
+ dev->component[COMP_VIDEO_ENCODE]);
unreg_image_encoder:
pr_err("Cleanup: Destroy image encoder\n");
- vchiq_mmal_component_finalise(
- dev->instance,
- dev->component[COMP_IMAGE_ENCODE]);
+ vchiq_mmal_component_finalise(dev->instance,
+ dev->component[COMP_IMAGE_ENCODE]);
unreg_preview:
pr_err("Cleanup: Destroy video render\n");
@@ -1775,8 +1771,7 @@ static int bm2835_mmal_init_device(struct bm2835_mmal_dev *dev,
/* video device needs to be able to access instance data */
video_set_drvdata(vfd, dev);
- ret = video_register_device(vfd,
- VFL_TYPE_VIDEO,
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO,
video_nr[dev->camera_num]);
if (ret < 0)
return ret;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
index b5fce38de038..75524adff0f5 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
@@ -30,79 +30,77 @@ enum {
CAM_PORT_COUNT
};
-#define PREVIEW_LAYER 2
-
extern int bcm2835_v4l2_debug;
struct bm2835_mmal_dev {
/* v4l2 devices */
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct mutex mutex;
+ struct v4l2_device v4l2_dev;
+ struct video_device vdev;
+ struct mutex mutex;
/* controls */
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *ctrls[V4L2_CTRL_COUNT];
- enum v4l2_scene_mode scene_mode;
- struct mmal_colourfx colourfx;
- int hflip;
- int vflip;
- int red_gain;
- int blue_gain;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrls[V4L2_CTRL_COUNT];
+ enum v4l2_scene_mode scene_mode;
+ struct mmal_colourfx colourfx;
+ int hflip;
+ int vflip;
+ int red_gain;
+ int blue_gain;
enum mmal_parameter_exposuremode exposure_mode_user;
enum v4l2_exposure_auto_type exposure_mode_v4l2_user;
/* active exposure mode may differ if selected via a scene mode */
enum mmal_parameter_exposuremode exposure_mode_active;
enum mmal_parameter_exposuremeteringmode metering_mode;
- unsigned int manual_shutter_speed;
- bool exp_auto_priority;
+ unsigned int manual_shutter_speed;
+ bool exp_auto_priority;
bool manual_iso_enabled;
u32 iso;
/* allocated mmal instance and components */
- struct vchiq_mmal_instance *instance;
- struct vchiq_mmal_component *component[COMP_COUNT];
+ struct vchiq_mmal_instance *instance;
+ struct vchiq_mmal_component *component[COMP_COUNT];
int camera_use_count;
struct v4l2_window overlay;
struct {
- unsigned int width; /* width */
- unsigned int height; /* height */
- unsigned int stride; /* stride */
- unsigned int buffersize; /* buffer size with padding */
- struct mmal_fmt *fmt;
+ unsigned int width; /* width */
+ unsigned int height; /* height */
+ unsigned int stride; /* stride */
+ unsigned int buffersize; /* buffer size with padding */
+ struct mmal_fmt *fmt;
struct v4l2_fract timeperframe;
/* H264 encode bitrate */
- int encode_bitrate;
+ int encode_bitrate;
/* H264 bitrate mode. CBR/VBR */
- int encode_bitrate_mode;
+ int encode_bitrate_mode;
/* H264 profile */
enum v4l2_mpeg_video_h264_profile enc_profile;
/* H264 level */
enum v4l2_mpeg_video_h264_level enc_level;
/* JPEG Q-factor */
- int q_factor;
+ int q_factor;
- struct vb2_queue vb_vidq;
+ struct vb2_queue vb_vidq;
/* VC start timestamp for streaming */
- s64 vc_start_timestamp;
+ s64 vc_start_timestamp;
/* Kernel start timestamp for streaming */
ktime_t kernel_start_ts;
/* Sequence number of last buffer */
- u32 sequence;
+ u32 sequence;
- struct vchiq_mmal_port *port; /* port being used for capture */
+ struct vchiq_mmal_port *port; /* port being used for capture */
/* camera port being used for capture */
- struct vchiq_mmal_port *camera_port;
+ struct vchiq_mmal_port *camera_port;
/* component being used for encode */
struct vchiq_mmal_component *encode_component;
/* number of frames remaining which driver should capture */
- unsigned int frame_count;
+ unsigned int frame_count;
/* last frame completion */
- struct completion frame_cmplt;
+ struct completion frame_cmplt;
} capture;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index 5137fcf203d6..b096a12387f7 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -135,8 +135,8 @@ static const struct v4l2_to_mmal_effects_setting
};
struct v4l2_mmal_scene_config {
- enum v4l2_scene_mode v4l2_scene;
- enum mmal_parameter_exposuremode exposure_mode;
+ enum v4l2_scene_mode v4l2_scene;
+ enum mmal_parameter_exposuremode exposure_mode;
enum mmal_parameter_exposuremeteringmode metering_mode;
};
@@ -377,11 +377,9 @@ static int ctrl_set_metering_mode(struct bm2835_mmal_dev *dev,
dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT;
break;
- /* todo matrix weighting not added to Linux API till 3.9
- * case V4L2_EXPOSURE_METERING_MATRIX:
- * dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX;
- * break;
- */
+ case V4L2_EXPOSURE_METERING_MATRIX:
+ dev->metering_mode = MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX;
+ break;
}
if (dev->scene_mode == V4L2_SCENE_MODE_NONE) {
@@ -516,42 +514,41 @@ static int ctrl_set_image_effect(struct bm2835_mmal_dev *dev,
struct mmal_parameter_imagefx_parameters imagefx;
for (i = 0; i < ARRAY_SIZE(v4l2_to_mmal_effects_values); i++) {
- if (ctrl->val == v4l2_to_mmal_effects_values[i].v4l2_effect) {
- imagefx.effect =
- v4l2_to_mmal_effects_values[i].mmal_effect;
- imagefx.num_effect_params =
- v4l2_to_mmal_effects_values[i].num_effect_params;
-
- if (imagefx.num_effect_params > MMAL_MAX_IMAGEFX_PARAMETERS)
- imagefx.num_effect_params = MMAL_MAX_IMAGEFX_PARAMETERS;
-
- for (j = 0; j < imagefx.num_effect_params; j++)
- imagefx.effect_parameter[j] =
- v4l2_to_mmal_effects_values[i].effect_params[j];
-
- dev->colourfx.enable =
- v4l2_to_mmal_effects_values[i].col_fx_enable;
- if (!v4l2_to_mmal_effects_values[i].col_fx_fixed_cbcr) {
- dev->colourfx.u =
- v4l2_to_mmal_effects_values[i].u;
- dev->colourfx.v =
- v4l2_to_mmal_effects_values[i].v;
- }
+ if (ctrl->val != v4l2_to_mmal_effects_values[i].v4l2_effect)
+ continue;
+
+ imagefx.effect =
+ v4l2_to_mmal_effects_values[i].mmal_effect;
+ imagefx.num_effect_params =
+ v4l2_to_mmal_effects_values[i].num_effect_params;
- control = &dev->component[COMP_CAMERA]->control;
+ if (imagefx.num_effect_params > MMAL_MAX_IMAGEFX_PARAMETERS)
+ imagefx.num_effect_params = MMAL_MAX_IMAGEFX_PARAMETERS;
- ret = vchiq_mmal_port_parameter_set(
- dev->instance, control,
- MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
- &imagefx, sizeof(imagefx));
- if (ret)
- goto exit;
+ for (j = 0; j < imagefx.num_effect_params; j++)
+ imagefx.effect_parameter[j] =
+ v4l2_to_mmal_effects_values[i].effect_params[j];
- ret = vchiq_mmal_port_parameter_set(
- dev->instance, control,
- MMAL_PARAMETER_COLOUR_EFFECT,
- &dev->colourfx, sizeof(dev->colourfx));
+ dev->colourfx.enable =
+ v4l2_to_mmal_effects_values[i].col_fx_enable;
+ if (!v4l2_to_mmal_effects_values[i].col_fx_fixed_cbcr) {
+ dev->colourfx.u = v4l2_to_mmal_effects_values[i].u;
+ dev->colourfx.v = v4l2_to_mmal_effects_values[i].v;
}
+
+ control = &dev->component[COMP_CAMERA]->control;
+
+ ret = vchiq_mmal_port_parameter_set(
+ dev->instance, control,
+ MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
+ &imagefx, sizeof(imagefx));
+ if (ret)
+ goto exit;
+
+ ret = vchiq_mmal_port_parameter_set(
+ dev->instance, control,
+ MMAL_PARAMETER_COLOUR_EFFECT,
+ &dev->colourfx, sizeof(dev->colourfx));
}
exit:
@@ -841,8 +838,7 @@ static int ctrl_set_scene_mode(struct bm2835_mmal_dev *dev,
enum mmal_parameter_exposuremeteringmode metering_mode;
for (i = 0; i < ARRAY_SIZE(scene_configs); i++) {
- if (scene_configs[i].v4l2_scene ==
- ctrl->val) {
+ if (scene_configs[i].v4l2_scene == ctrl->val) {
scene = &scene_configs[i];
break;
}
@@ -1045,8 +1041,8 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
{
.id = V4L2_CID_EXPOSURE_METERING,
.type = MMAL_CONTROL_TYPE_STD_MENU,
- .min = ~0x7,
- .max = V4L2_EXPOSURE_METERING_SPOT,
+ .min = ~0xf,
+ .max = V4L2_EXPOSURE_METERING_MATRIX,
.def = V4L2_EXPOSURE_METERING_AVERAGE,
.step = 0,
.imenu = NULL,
@@ -1282,21 +1278,18 @@ int set_framerate_params(struct bm2835_mmal_dev *dev)
struct mmal_parameter_fps_range fps_range;
int ret;
+ fps_range.fps_high.num = dev->capture.timeperframe.denominator;
+ fps_range.fps_high.den = dev->capture.timeperframe.numerator;
+
if ((dev->exposure_mode_active != MMAL_PARAM_EXPOSUREMODE_OFF) &&
(dev->exp_auto_priority)) {
- /* Variable FPS. Define min FPS as 1fps.
- * Max as max defined FPS.
- */
+ /* Variable FPS. Define min FPS as 1fps. */
fps_range.fps_low.num = 1;
fps_range.fps_low.den = 1;
- fps_range.fps_high.num = dev->capture.timeperframe.denominator;
- fps_range.fps_high.den = dev->capture.timeperframe.numerator;
} else {
/* Fixed FPS - set min and max to be the same */
- fps_range.fps_low.num = fps_range.fps_high.num =
- dev->capture.timeperframe.denominator;
- fps_range.fps_low.den = fps_range.fps_high.den =
- dev->capture.timeperframe.numerator;
+ fps_range.fps_low.num = fps_range.fps_high.num;
+ fps_range.fps_low.den = fps_range.fps_high.den;
}
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
index ff5398737b4a..ce88fac7c24b 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-common.h
@@ -26,13 +26,13 @@ struct mmal_msg_context;
/* mapping between v4l and mmal video modes */
struct mmal_fmt {
- u32 fourcc; /* v4l2 format id */
- int flags; /* v4l2 flags field */
- u32 mmal;
- int depth;
- u32 mmal_component; /* MMAL component index to be used to encode */
- u32 ybbp; /* depth of first Y plane for planar formats */
- bool remove_padding; /* Does the GPU have to remove padding,
+ u32 fourcc; /* v4l2 format id */
+ int flags; /* v4l2 flags field */
+ u32 mmal;
+ int depth;
+ u32 mmal_component; /* MMAL component index to be used to encode */
+ u32 ybbp; /* depth of first Y plane for planar formats */
+ bool remove_padding; /* Does the GPU have to remove padding,
* or can we do hide padding via bytesperline.
*/
};
@@ -40,10 +40,10 @@ struct mmal_fmt {
/* buffer for one video frame */
struct mmal_buffer {
/* v4l buffer data -- must be first */
- struct vb2_v4l2_buffer vb;
+ struct vb2_v4l2_buffer vb;
/* list of buffers available */
- struct list_head list;
+ struct list_head list;
void *buffer; /* buffer pointer */
unsigned long buffer_size; /* size of allocated buffer */
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
index 80a99128f5f3..f4ac5a6149ea 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-parameters.h
@@ -724,11 +724,11 @@ struct mmal_parameter_imagefx_parameters {
#define MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN 16
struct mmal_parameter_camera_info_camera_t {
- u32 port_id;
- u32 max_width;
- u32 max_height;
- u32 lens_present;
- u8 camera_name[MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN];
+ u32 port_id;
+ u32 max_width;
+ u32 max_height;
+ u32 lens_present;
+ u8 camera_name[MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN];
};
enum mmal_parameter_camera_info_flash_type_t {
@@ -744,8 +744,8 @@ struct mmal_parameter_camera_info_flash_t {
};
struct mmal_parameter_camera_info_t {
- u32 num_cameras;
- u32 num_flashes;
+ u32 num_cameras;
+ u32 num_flashes;
struct mmal_parameter_camera_info_camera_t
cameras[MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS];
struct mmal_parameter_camera_info_flash_t
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index ff2b960d8cac..1a981e98e82b 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -60,32 +60,18 @@ struct vchi_service_handle;
* (local / remote)
*****************************************************************************/
-#ifdef __cplusplus
-extern "C" {
-#endif
-
// Routine used to initialise the vchi on both local + remote connections
extern int32_t vchi_initialise(struct vchi_instance_handle **instance_handle);
-extern int32_t vchi_exit(void);
-
extern int32_t vchi_connect(struct vchi_instance_handle *instance_handle);
//When this is called, ensure that all services have no data pending.
//Bulk transfers can remain 'queued'
extern int32_t vchi_disconnect(struct vchi_instance_handle *instance_handle);
-// helper functions
-extern void *vchi_allocate_buffer(struct vchi_service_handle *handle, uint32_t *length);
-extern void vchi_free_buffer(struct vchi_service_handle *handle, void *address);
-extern uint32_t vchi_current_time(struct vchi_instance_handle *instance_handle);
-
/******************************************************************************
* Global service API
*****************************************************************************/
-// Routine to destroy a service
-extern int32_t vchi_service_destroy(const struct vchi_service_handle *handle);
-
// Routine to open a named service
extern int32_t vchi_service_open(struct vchi_instance_handle *instance_handle,
struct service_creation *setup,
@@ -103,23 +89,12 @@ extern int32_t vchi_service_use(const struct vchi_service_handle *handle);
// Routine to decrement ref count on a named service
extern int32_t vchi_service_release(const struct vchi_service_handle *handle);
-// Routine to set a control option for a named service
-extern int32_t vchi_service_set_option(const struct vchi_service_handle *handle,
- enum vchi_service_option option,
- int value);
-
/* Routine to send a message from kernel memory across a service */
extern int
vchi_queue_kernel_message(struct vchi_service_handle *handle,
void *data,
unsigned int size);
-/* Routine to send a message from user memory across a service */
-extern int
-vchi_queue_user_message(struct vchi_service_handle *handle,
- void __user *data,
- unsigned int size);
-
// Routine to receive a msg from a service
// Dequeue is equivalent to hold, copy into client buffer, release
extern int32_t vchi_msg_dequeue(struct vchi_service_handle *handle,
@@ -149,54 +124,14 @@ extern int32_t vchi_msg_hold(struct vchi_service_handle *handle,
enum vchi_flags flags,
struct vchi_held_msg *message_descriptor);
-// Initialise an iterator to look through messages in place
-extern int32_t vchi_msg_look_ahead(struct vchi_service_handle *handle,
- struct vchi_msg_iter *iter,
- enum vchi_flags flags);
-
/*******************************************************************************
* Global service support API - operations on held messages
* and message iterators
******************************************************************************/
-// Routine to get the address of a held message
-extern void *vchi_held_msg_ptr(const struct vchi_held_msg *message);
-
-// Routine to get the size of a held message
-extern int32_t vchi_held_msg_size(const struct vchi_held_msg *message);
-
-// Routine to get the transmit timestamp as written into the header by the peer
-extern uint32_t vchi_held_msg_tx_timestamp(const struct vchi_held_msg *message);
-
-// Routine to get the reception timestamp, written as we parsed the header
-extern uint32_t vchi_held_msg_rx_timestamp(const struct vchi_held_msg *message);
-
// Routine to release a held message after it has been processed
extern int32_t vchi_held_msg_release(struct vchi_held_msg *message);
-// Indicates whether the iterator has a next message.
-extern int32_t vchi_msg_iter_has_next(const struct vchi_msg_iter *iter);
-
-// Return the pointer and length for the next message and advance the iterator.
-extern int32_t vchi_msg_iter_next(struct vchi_msg_iter *iter,
- void **data,
- uint32_t *msg_size);
-
-// Remove the last message returned by vchi_msg_iter_next.
-// Can only be called once after each call to vchi_msg_iter_next.
-extern int32_t vchi_msg_iter_remove(struct vchi_msg_iter *iter);
-
-// Hold the last message returned by vchi_msg_iter_next.
-// Can only be called once after each call to vchi_msg_iter_next.
-extern int32_t vchi_msg_iter_hold(struct vchi_msg_iter *iter,
- struct vchi_held_msg *message);
-
-// Return information for the next message, and hold it, advancing the iterator.
-extern int32_t vchi_msg_iter_hold_next(struct vchi_msg_iter *iter,
- void **data, // } may be NULL
- uint32_t *msg_size, // }
- struct vchi_held_msg *message);
-
/******************************************************************************
* Global bulk API
*****************************************************************************/
@@ -208,13 +143,6 @@ extern int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle,
enum vchi_flags flags,
void *transfer_handle);
-// Prepare interface for a transfer from the other side into relocatable memory.
-int32_t vchi_bulk_queue_receive_reloc(const struct vchi_service_handle *handle,
- uint32_t offset,
- uint32_t data_size,
- const enum vchi_flags flags,
- void * const bulk_handle);
-
// Routine to queue up data ready for transfer to the other (once they have signalled they are ready)
extern int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
const void *data_src,
@@ -226,15 +154,6 @@ extern int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
* Configuration plumbing
*****************************************************************************/
-#ifdef __cplusplus
-}
-#endif
-
-extern int32_t vchi_bulk_queue_transmit_reloc(struct vchi_service_handle *handle,
- uint32_t offset,
- uint32_t data_size,
- enum vchi_flags flags,
- void *transfer_handle);
#endif /* VCHI_H_ */
/****************************** End of file **********************************/
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index c18c6ca0b6c0..38a13e4618a8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -371,14 +371,15 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
pagelistinfo->scatterlist = scatterlist;
pagelistinfo->scatterlist_mapped = 0;
- if (is_vmalloc_addr(buf)) {
+ if (is_vmalloc_addr((void __force *)buf)) {
unsigned long length = count;
unsigned int off = offset;
for (actual_pages = 0; actual_pages < num_pages;
actual_pages++) {
- struct page *pg = vmalloc_to_page(buf + (actual_pages *
- PAGE_SIZE));
+ struct page *pg =
+ vmalloc_to_page((void __force *)(buf +
+ (actual_pages * PAGE_SIZE)));
size_t bytes = PAGE_SIZE - off;
if (!pg) {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index a1ea9777a444..28ea8c3a4cba 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1209,7 +1209,9 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* The completion must point to the
** msgbuf. */
- completion->header = msgbuf;
+ completion->header =
+ (struct vchiq_header __force *)
+ msgbuf;
}
if ((completion->reason ==
@@ -2353,7 +2355,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum vchiq_status ret = VCHIQ_SUCCESS;
char entity[16];
int *entity_uc;
- int local_uc, local_entity_uc;
+ int local_uc;
if (!arm_state)
goto out;
@@ -2377,7 +2379,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
write_lock_bh(&arm_state->susp_res_lock);
local_uc = ++arm_state->videocore_use_count;
- local_entity_uc = ++(*entity_uc);
+ ++(*entity_uc);
vchiq_log_trace(vchiq_susp_log_level,
"%s %s count %d, state count %d",
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index 1640906e3929..79b75efa6868 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -14,12 +14,7 @@ static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
static int g_once_init;
static struct mutex g_connected_mutex;
-/****************************************************************************
-*
-* Function to initialize our lock.
-*
-***************************************************************************/
-
+/* Function to initialize our lock */
static void connected_init(void)
{
if (!g_once_init) {
@@ -28,15 +23,12 @@ static void connected_init(void)
}
}
-/****************************************************************************
-*
-* This function is used to defer initialization until the vchiq stack is
-* initialized. If the stack is already initialized, then the callback will
-* be made immediately, otherwise it will be deferred until
-* vchiq_call_connected_callbacks is called.
-*
-***************************************************************************/
-
+/*
+ * This function is used to defer initialization until the vchiq stack is
+ * initialized. If the stack is already initialized, then the callback will
+ * be made immediately, otherwise it will be deferred until
+ * vchiq_call_connected_callbacks is called.
+ */
void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
{
connected_init();
@@ -63,13 +55,10 @@ void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
mutex_unlock(&g_connected_mutex);
}
-/****************************************************************************
-*
-* This function is called by the vchiq stack once it has been connected to
-* the videocore and clients can start to use the stack.
-*
-***************************************************************************/
-
+/*
+ * This function is called by the vchiq stack once it has been connected to
+ * the videocore and clients can start to use the stack.
+ */
void vchiq_call_connected_callbacks(void)
{
int i;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index edcd97373809..ae9183db44ee 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -372,6 +372,10 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
service->state->id, service->handle);
status = VCHIQ_SUCCESS;
}
+
+ if (reason != VCHIQ_MESSAGE_AVAILABLE)
+ vchiq_release_message(service->handle, header);
+
return status;
}
@@ -1480,15 +1484,6 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
: VCHIQ_SRVSTATE_OPEN);
}
- service->remoteport = remoteport;
- service->client_id = ((int *)header->data)[1];
- if (make_service_callback(service, VCHIQ_SERVICE_OPENED,
- NULL, NULL) == VCHIQ_RETRY) {
- /* Bail out if not ready */
- service->remoteport = VCHIQ_PORT_FREE;
- goto bail_not_ready;
- }
-
/* Success - the message has been dealt with */
unlock_service(service);
return 1;
@@ -3147,6 +3142,12 @@ error_exit:
return status;
}
+enum vchiq_status vchiq_queue_kernel_message(unsigned int handle, void *context,
+ size_t size)
+{
+ return vchiq_queue_message(handle, memcpy_copy_callback, context, size);
+}
+
void
vchiq_release_message(unsigned int handle,
struct vchiq_header *header)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index cedd8e721aae..1fe6cd8b86c0 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -587,6 +587,13 @@ lock_service(struct vchiq_service *service);
extern void
unlock_service(struct vchiq_service *service);
+extern enum vchiq_status
+vchiq_queue_message(unsigned int handle,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ size_t size);
+
/* The following functions are called from vchiq_core, and external
** implementations must be provided. */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index 39b77ea19210..b62fd6d6f1ac 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -105,12 +105,8 @@ extern enum vchiq_status vchiq_close_service(unsigned int service);
extern enum vchiq_status vchiq_remove_service(unsigned int service);
extern enum vchiq_status vchiq_use_service(unsigned int service);
extern enum vchiq_status vchiq_release_service(unsigned int service);
-extern enum vchiq_status
-vchiq_queue_message(unsigned int handle,
- ssize_t (*copy_callback)(void *context, void *dest,
- size_t offset, size_t maxsize),
- void *context,
- size_t size);
+extern enum vchiq_status vchiq_queue_kernel_message(unsigned int handle,
+ void *context, size_t size);
extern void vchiq_release_message(unsigned int service,
struct vchiq_header *header);
extern enum vchiq_status vchiq_bulk_transmit(unsigned int service,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index efdd3b1c7d85..75d87b6992c4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -9,8 +9,6 @@
#include "vchiq_util.h"
-#define vchiq_status_to_vchi(status) ((int32_t)status)
-
struct shim_service {
unsigned int handle;
@@ -84,35 +82,15 @@ int32_t vchi_msg_remove(struct vchi_service_handle *handle)
}
EXPORT_SYMBOL(vchi_msg_remove);
-/***********************************************************
- * Name: vchi_msg_queue
- *
- * Arguments: struct vchi_service_handle *handle,
- * ssize_t (*copy_callback)(void *context, void *dest,
- * size_t offset, size_t maxsize),
- * void *context,
- * uint32_t data_size
- *
- * Description: Thin wrapper to queue a message onto a connection
- *
- * Returns: int32_t - success == 0
- *
- ***********************************************************/
-static
-int32_t vchi_msg_queue(struct vchi_service_handle *handle,
- ssize_t (*copy_callback)(void *context, void *dest,
- size_t offset, size_t maxsize),
- void *context,
- uint32_t data_size)
+int vchi_queue_kernel_message(struct vchi_service_handle *handle, void *data,
+ unsigned int size)
{
struct shim_service *service = (struct shim_service *)handle;
enum vchiq_status status;
while (1) {
- status = vchiq_queue_message(service->handle,
- copy_callback,
- context,
- data_size);
+ status = vchiq_queue_kernel_message(service->handle, data,
+ size);
/*
* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
@@ -125,65 +103,10 @@ int32_t vchi_msg_queue(struct vchi_service_handle *handle,
msleep(1);
}
- return vchiq_status_to_vchi(status);
-}
-
-static ssize_t
-vchi_queue_kernel_message_callback(void *context,
- void *dest,
- size_t offset,
- size_t maxsize)
-{
- memcpy(dest, context + offset, maxsize);
- return maxsize;
-}
-
-int
-vchi_queue_kernel_message(struct vchi_service_handle *handle,
- void *data,
- unsigned int size)
-{
- return vchi_msg_queue(handle,
- vchi_queue_kernel_message_callback,
- data,
- size);
+ return status;
}
EXPORT_SYMBOL(vchi_queue_kernel_message);
-struct vchi_queue_user_message_context {
- void __user *data;
-};
-
-static ssize_t
-vchi_queue_user_message_callback(void *context,
- void *dest,
- size_t offset,
- size_t maxsize)
-{
- struct vchi_queue_user_message_context *copycontext = context;
-
- if (copy_from_user(dest, copycontext->data + offset, maxsize))
- return -EFAULT;
-
- return maxsize;
-}
-
-int
-vchi_queue_user_message(struct vchi_service_handle *handle,
- void __user *data,
- unsigned int size)
-{
- struct vchi_queue_user_message_context copycontext = {
- .data = data
- };
-
- return vchi_msg_queue(handle,
- vchi_queue_user_message_callback,
- &copycontext,
- size);
-}
-EXPORT_SYMBOL(vchi_queue_user_message);
-
/***********************************************************
* Name: vchi_bulk_queue_receive
*
@@ -221,7 +144,7 @@ int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle, void *data_d
break;
default:
WARN(1, "unsupported message\n");
- return vchiq_status_to_vchi(VCHIQ_ERROR);
+ return VCHIQ_ERROR;
}
while (1) {
@@ -238,7 +161,7 @@ int32_t vchi_bulk_queue_receive(struct vchi_service_handle *handle, void *data_d
msleep(1);
}
- return vchiq_status_to_vchi(status);
+ return status;
}
EXPORT_SYMBOL(vchi_bulk_queue_receive);
@@ -282,7 +205,7 @@ int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
break;
default:
WARN(1, "unsupported message\n");
- return vchiq_status_to_vchi(VCHIQ_ERROR);
+ return VCHIQ_ERROR;
}
while (1) {
@@ -300,7 +223,7 @@ int32_t vchi_bulk_queue_transmit(struct vchi_service_handle *handle,
msleep(1);
}
- return vchiq_status_to_vchi(status);
+ return status;
}
EXPORT_SYMBOL(vchi_bulk_queue_transmit);
@@ -447,7 +370,7 @@ int32_t vchi_initialise(struct vchi_instance_handle **instance_handle)
*instance_handle = (struct vchi_instance_handle *)instance;
- return vchiq_status_to_vchi(status);
+ return status;
}
EXPORT_SYMBOL(vchi_initialise);
@@ -485,7 +408,7 @@ int32_t vchi_disconnect(struct vchi_instance_handle *instance_handle)
{
struct vchiq_instance *instance = (struct vchiq_instance *)instance_handle;
- return vchiq_status_to_vchi(vchiq_shutdown(instance));
+ return vchiq_shutdown(instance);
}
EXPORT_SYMBOL(vchi_disconnect);
@@ -521,7 +444,7 @@ static enum vchiq_status shim_callback(enum vchiq_reason reason,
service->callback(service->callback_param,
VCHI_CALLBACK_MSG_AVAILABLE, NULL);
- goto done;
+ break;
case VCHIQ_BULK_TRANSMIT_DONE:
service->callback(service->callback_param,
@@ -538,10 +461,6 @@ static enum vchiq_status shim_callback(enum vchiq_reason reason,
VCHI_CALLBACK_SERVICE_CLOSED, NULL);
break;
- case VCHIQ_SERVICE_OPENED:
- /* No equivalent VCHI reason */
- break;
-
case VCHIQ_BULK_TRANSMIT_ABORTED:
service->callback(service->callback_param,
VCHI_CALLBACK_BULK_TRANSMIT_ABORTED,
@@ -560,8 +479,6 @@ static enum vchiq_status shim_callback(enum vchiq_reason reason,
}
release:
- vchiq_release_message(service->handle, header);
-done:
return VCHIQ_SUCCESS;
}
@@ -636,62 +553,12 @@ int32_t vchi_service_close(const struct vchi_service_handle *handle)
if (status == VCHIQ_SUCCESS)
service_free(service);
- ret = vchiq_status_to_vchi(status);
+ ret = status;
}
return ret;
}
EXPORT_SYMBOL(vchi_service_close);
-int32_t vchi_service_destroy(const struct vchi_service_handle *handle)
-{
- int32_t ret = -1;
- struct shim_service *service = (struct shim_service *)handle;
-
- if (service) {
- enum vchiq_status status = vchiq_remove_service(service->handle);
-
- if (status == VCHIQ_SUCCESS) {
- service_free(service);
- service = NULL;
- }
-
- ret = vchiq_status_to_vchi(status);
- }
- return ret;
-}
-EXPORT_SYMBOL(vchi_service_destroy);
-
-int32_t vchi_service_set_option(const struct vchi_service_handle *handle,
- enum vchi_service_option option,
- int value)
-{
- int32_t ret = -1;
- struct shim_service *service = (struct shim_service *)handle;
- enum vchiq_service_option vchiq_option;
-
- switch (option) {
- case VCHI_SERVICE_OPTION_TRACE:
- vchiq_option = VCHIQ_SERVICE_OPTION_TRACE;
- break;
- case VCHI_SERVICE_OPTION_SYNCHRONOUS:
- vchiq_option = VCHIQ_SERVICE_OPTION_SYNCHRONOUS;
- break;
- default:
- service = NULL;
- break;
- }
- if (service) {
- enum vchiq_status status =
- vchiq_set_service_option(service->handle,
- vchiq_option,
- value);
-
- ret = vchiq_status_to_vchi(status);
- }
- return ret;
-}
-EXPORT_SYMBOL(vchi_service_set_option);
-
int32_t vchi_get_peer_version(const struct vchi_service_handle *handle, short *peer_version)
{
int32_t ret = -1;
@@ -701,7 +568,7 @@ int32_t vchi_get_peer_version(const struct vchi_service_handle *handle, short *p
enum vchiq_status status;
status = vchiq_get_peer_version(service->handle, peer_version);
- ret = vchiq_status_to_vchi(status);
+ ret = status;
}
return ret;
}
@@ -723,7 +590,7 @@ int32_t vchi_service_use(const struct vchi_service_handle *handle)
struct shim_service *service = (struct shim_service *)handle;
if (service)
- ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
+ ret = vchiq_use_service(service->handle);
return ret;
}
EXPORT_SYMBOL(vchi_service_use);
@@ -744,8 +611,7 @@ int32_t vchi_service_release(const struct vchi_service_handle *handle)
struct shim_service *service = (struct shim_service *)handle;
if (service)
- ret = vchiq_status_to_vchi(
- vchiq_release_service(service->handle));
+ ret = vchiq_release_service(service->handle);
return ret;
}
EXPORT_SYMBOL(vchi_service_release);
diff --git a/drivers/staging/vt6655/Makefile b/drivers/staging/vt6655/Makefile
index a151f30fc46f..e70357ec0af8 100644
--- a/drivers/staging/vt6655/Makefile
+++ b/drivers/staging/vt6655/Makefile
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-# TODO: all of these should be removed
-ccflags-y := -DLINUX -D__KERNEL__ -D__NO_VERSION__
-ccflags-y += -DHOSTAP
vt6655_stage-y += device_main.o \
card.o \
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index b4cdc0b7fee7..6b25d75d2501 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -12,12 +12,10 @@
* Date: Aug.22, 2002
*
* Functions:
- * BBuGetFrameTime - Calculate data frame transmitting time
- * BBvCalculateParameter - Calculate PhyLength, PhyService and Phy Signal
- * parameter for baseband Tx
- * BBbReadEmbedded - Embedded read baseband register via MAC
- * BBbWriteEmbedded - Embedded write baseband register via MAC
- * BBbVT3253Init - VIA VT3253 baseband chip init code
+ * bb_get_frame_time - Calculate data frame transmitting time
+ * bb_read_embedded - Embedded read baseband register via MAC
+ * bb_write_embedded - Embedded write baseband register via MAC
+ * bb_vt3253_init - VIA VT3253 baseband chip init code
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
@@ -1695,53 +1693,53 @@ static const unsigned short awcFrameTime[MAX_RATE] = {
*
* Parameters:
* In:
- * byPreambleType - Preamble Type
- * byPktType - PK_TYPE_11A, PK_TYPE_11B, PK_TYPE_11GB, PK_TYPE_11GA
- * cbFrameLength - Baseband Type
- * wRate - Tx Rate
+ * by_preamble_type - Preamble Type
+ * by_pkt_type - PK_TYPE_11A, PK_TYPE_11B, PK_TYPE_11GB, PK_TYPE_11GA
+ * cb_frame_length - Baseband Type
+ * tx_rate - Tx Rate
* Out:
*
* Return Value: FrameTime
*
*/
-unsigned int BBuGetFrameTime(unsigned char byPreambleType,
- unsigned char byPktType,
- unsigned int cbFrameLength, unsigned short wRate)
+unsigned int bb_get_frame_time(unsigned char by_preamble_type,
+ unsigned char by_pkt_type,
+ unsigned int cb_frame_length,
+ unsigned short tx_rate)
{
- unsigned int uFrameTime;
- unsigned int uPreamble;
- unsigned int uTmp;
- unsigned int uRateIdx = (unsigned int)wRate;
- unsigned int uRate = 0;
+ unsigned int frame_time;
+ unsigned int preamble;
+ unsigned int tmp;
+ unsigned int rate_idx = (unsigned int)tx_rate;
+ unsigned int rate = 0;
- if (uRateIdx > RATE_54M)
+ if (rate_idx > RATE_54M)
return 0;
- uRate = (unsigned int)awcFrameTime[uRateIdx];
+ rate = (unsigned int)awcFrameTime[rate_idx];
- if (uRateIdx <= 3) { /* CCK mode */
- if (byPreambleType == 1) /* Short */
- uPreamble = 96;
+ if (rate_idx <= 3) { /* CCK mode */
+ if (by_preamble_type == 1) /* Short */
+ preamble = 96;
else
- uPreamble = 192;
-
- uFrameTime = (cbFrameLength * 80) / uRate; /* ????? */
- uTmp = (uFrameTime * uRate) / 80;
- if (cbFrameLength != uTmp)
- uFrameTime++;
+ preamble = 192;
+ frame_time = (cb_frame_length * 80) / rate; /* ????? */
+ tmp = (frame_time * rate) / 80;
+ if (cb_frame_length != tmp)
+ frame_time++;
- return uPreamble + uFrameTime;
+ return preamble + frame_time;
}
- uFrameTime = (cbFrameLength * 8 + 22) / uRate; /* ???????? */
- uTmp = ((uFrameTime * uRate) - 22) / 8;
- if (cbFrameLength != uTmp)
- uFrameTime++;
+ frame_time = (cb_frame_length * 8 + 22) / rate; /* ???????? */
+ tmp = ((frame_time * rate) - 22) / 8;
+ if (cb_frame_length != tmp)
+ frame_time++;
- uFrameTime = uFrameTime * 4; /* ??????? */
- if (byPktType != PK_TYPE_11A)
- uFrameTime += 6; /* ?????? */
+ frame_time = frame_time * 4; /* ??????? */
+ if (by_pkt_type != PK_TYPE_11A)
+ frame_time += 6; /* ?????? */
- return 20 + uFrameTime; /* ?????? */
+ return 20 + frame_time; /* ?????? */
}
/*
@@ -1899,34 +1897,34 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
* Parameters:
* In:
* iobase - I/O base address
- * byBBAddr - address of register in Baseband
+ * by_bb_addr - address of register in Baseband
* Out:
- * pbyData - data read
+ * pby_data - data read
*
* Return Value: true if succeeded; false if failed.
*
*/
-bool BBbReadEmbedded(struct vnt_private *priv,
- unsigned char byBBAddr, unsigned char *pbyData)
+bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
+ unsigned char *pby_data)
{
void __iomem *iobase = priv->PortOffset;
unsigned short ww;
- unsigned char byValue;
+ unsigned char by_value;
/* BB reg offset */
- VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
+ VNSvOutPortB(iobase + MAC_REG_BBREGADR, by_bb_addr);
/* turn on REGR */
MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
- if (byValue & BBREGCTL_DONE)
+ VNSvInPortB(iobase + MAC_REG_BBREGCTL, &by_value);
+ if (by_value & BBREGCTL_DONE)
break;
}
/* get BB data */
- VNSvInPortB(iobase + MAC_REG_BBREGDATA, pbyData);
+ VNSvInPortB(iobase + MAC_REG_BBREGDATA, pby_data);
if (ww == W_MAX_TIMEOUT) {
pr_debug(" DBG_PORT80(0x30)\n");
@@ -1941,32 +1939,32 @@ bool BBbReadEmbedded(struct vnt_private *priv,
* Parameters:
* In:
* iobase - I/O base address
- * byBBAddr - address of register in Baseband
- * byData - data to write
+ * by_bb_addr - address of register in Baseband
+ * by_data - data to write
* Out:
* none
*
* Return Value: true if succeeded; false if failed.
*
*/
-bool BBbWriteEmbedded(struct vnt_private *priv,
- unsigned char byBBAddr, unsigned char byData)
+bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
+ unsigned char by_data)
{
void __iomem *iobase = priv->PortOffset;
unsigned short ww;
- unsigned char byValue;
+ unsigned char by_value;
/* BB reg offset */
- VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
+ VNSvOutPortB(iobase + MAC_REG_BBREGADR, by_bb_addr);
/* set BB data */
- VNSvOutPortB(iobase + MAC_REG_BBREGDATA, byData);
+ VNSvOutPortB(iobase + MAC_REG_BBREGDATA, by_data);
/* turn on BBREGCTL_REGW */
MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
- if (byValue & BBREGCTL_DONE)
+ VNSvInPortB(iobase + MAC_REG_BBREGCTL, &by_value);
+ if (by_value & BBREGCTL_DONE)
break;
}
@@ -1992,29 +1990,29 @@ bool BBbWriteEmbedded(struct vnt_private *priv,
*
*/
-bool BBbVT3253Init(struct vnt_private *priv)
+bool bb_vt3253_init(struct vnt_private *priv)
{
- bool bResult = true;
+ bool result = true;
int ii;
void __iomem *iobase = priv->PortOffset;
- unsigned char byRFType = priv->byRFType;
- unsigned char byLocalID = priv->byLocalID;
+ unsigned char by_rf_type = priv->byRFType;
+ unsigned char by_local_id = priv->byLocalID;
- if (byRFType == RF_RFMD2959) {
- if (byLocalID <= REV_ID_VT3253_A1) {
+ if (by_rf_type == RF_RFMD2959) {
+ if (by_local_id <= REV_ID_VT3253_A1) {
for (ii = 0; ii < CB_VT3253_INIT_FOR_RFMD; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253InitTab_RFMD[ii][0],
byVT3253InitTab_RFMD[ii][1]);
} else {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_RFMD; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_RFMD[ii][0],
byVT3253B0_RFMD[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC_FOR_RFMD2959; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC4_RFMD2959[ii][0],
byVT3253B0_AGC4_RFMD2959[ii][1]);
@@ -2029,14 +2027,14 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->ldBmThreshold[1] = -50;
priv->ldBmThreshold[2] = 0;
priv->ldBmThreshold[3] = 0;
- } else if ((byRFType == RF_AIROHA) || (byRFType == RF_AL2230S)) {
+ } else if ((by_rf_type == RF_AIROHA) || (by_rf_type == RF_AL2230S)) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AIROHA2230[ii][0],
byVT3253B0_AIROHA2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
priv->abyBBVGA[0] = 0x1C;
@@ -2047,14 +2045,14 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->ldBmThreshold[1] = -48;
priv->ldBmThreshold[2] = 0;
priv->ldBmThreshold[3] = 0;
- } else if (byRFType == RF_UW2451) {
+ } else if (by_rf_type == RF_UW2451) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_UW2451[ii][0],
byVT3253B0_UW2451[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0],
byVT3253B0_AGC[ii][1]);
@@ -2069,9 +2067,9 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->ldBmThreshold[1] = -50;
priv->ldBmThreshold[2] = 0;
priv->ldBmThreshold[3] = 0;
- } else if (byRFType == RF_UW2452) {
+ } else if (by_rf_type == RF_UW2452) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_UW2451[ii][0],
byVT3253B0_UW2451[ii][1]);
@@ -2080,7 +2078,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
* 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
+ /*bResult &= bb_write_embedded(iobase,0x09,0x41);*/
/* Init ANT B select,
* RX Config CR10 = 0x28->0x2A,
@@ -2088,23 +2086,23 @@ bool BBbVT3253Init(struct vnt_private *priv)
* make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
+ /*bResult &= bb_write_embedded(iobase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
- bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
+ result &= bb_write_embedded(priv, 0xd7, 0x06);
/* {{RobertYu:20050125, request by Jack */
- bResult &= BBbWriteEmbedded(priv, 0x90, 0x20);
- bResult &= BBbWriteEmbedded(priv, 0x97, 0xeb);
+ result &= bb_write_embedded(priv, 0x90, 0x20);
+ result &= bb_write_embedded(priv, 0x97, 0xeb);
/* }} */
/* {{RobertYu:20050221, request by Jack */
- bResult &= BBbWriteEmbedded(priv, 0xa6, 0x00);
- bResult &= BBbWriteEmbedded(priv, 0xa8, 0x30);
+ result &= bb_write_embedded(priv, 0xa6, 0x00);
+ result &= bb_write_embedded(priv, 0xa8, 0x30);
/* }} */
- bResult &= BBbWriteEmbedded(priv, 0xb0, 0x58);
+ result &= bb_write_embedded(priv, 0xb0, 0x58);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
priv->abyBBVGA[0] = 0x14;
@@ -2117,14 +2115,14 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->ldBmThreshold[3] = 0;
/* }} RobertYu */
- } else if (byRFType == RF_VT3226) {
+ } else if (by_rf_type == RF_VT3226) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AIROHA2230[ii][0],
byVT3253B0_AIROHA2230[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
priv->abyBBVGA[0] = 0x1C;
@@ -2138,9 +2136,9 @@ bool BBbVT3253Init(struct vnt_private *priv)
/* Fix VT3226 DFC system timing issue */
MACvSetRFLE_LatchBase(iobase);
/* {{ RobertYu: 20050104 */
- } else if (byRFType == RF_AIROHA7230) {
+ } else if (by_rf_type == RF_AIROHA7230) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AIROHA2230[ii][0],
byVT3253B0_AIROHA2230[ii][1]);
@@ -2148,17 +2146,17 @@ bool BBbVT3253Init(struct vnt_private *priv)
/* Init ANT B select,TX Config CR09 = 0x61->0x45,
* 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
+ /* bResult &= bb_write_embedded(iobase,0x09,0x41);*/
/* Init ANT B select,RX Config CR10 = 0x28->0x2A,
* 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
*/
- /*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
+ /* bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
- bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
+ result &= bb_write_embedded(priv, 0xd7, 0x06);
/* }} */
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
- bResult &= BBbWriteEmbedded(priv,
+ result &= bb_write_embedded(priv,
byVT3253B0_AGC[ii][0], byVT3253B0_AGC[ii][1]);
priv->abyBBVGA[0] = 0x1C;
@@ -2176,12 +2174,12 @@ bool BBbVT3253Init(struct vnt_private *priv)
priv->abyBBVGA[0] = 0x1C;
}
- if (byLocalID > REV_ID_VT3253_A1) {
- BBbWriteEmbedded(priv, 0x04, 0x7F);
- BBbWriteEmbedded(priv, 0x0D, 0x01);
+ if (by_local_id > REV_ID_VT3253_A1) {
+ bb_write_embedded(priv, 0x04, 0x7F);
+ bb_write_embedded(priv, 0x0D, 0x01);
}
- return bResult;
+ return result;
}
/*
@@ -2197,42 +2195,42 @@ bool BBbVT3253Init(struct vnt_private *priv)
*
*/
void
-BBvSetShortSlotTime(struct vnt_private *priv)
+bb_set_short_slot_time(struct vnt_private *priv)
{
- unsigned char byBBRxConf = 0;
- unsigned char byBBVGA = 0;
+ unsigned char by_bb_rx_conf = 0;
+ unsigned char by_bb_vga = 0;
- BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
+ bb_read_embedded(priv, 0x0A, &by_bb_rx_conf); /* CR10 */
if (priv->bShortSlotTime)
- byBBRxConf &= 0xDF; /* 1101 1111 */
+ by_bb_rx_conf &= 0xDF; /* 1101 1111 */
else
- byBBRxConf |= 0x20; /* 0010 0000 */
+ by_bb_rx_conf |= 0x20; /* 0010 0000 */
/* patch for 3253B0 Baseband with Cardbus module */
- BBbReadEmbedded(priv, 0xE7, &byBBVGA);
- if (byBBVGA == priv->abyBBVGA[0])
- byBBRxConf |= 0x20; /* 0010 0000 */
+ bb_read_embedded(priv, 0xE7, &by_bb_vga);
+ if (by_bb_vga == priv->abyBBVGA[0])
+ by_bb_rx_conf |= 0x20; /* 0010 0000 */
- BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
+ bb_write_embedded(priv, 0x0A, by_bb_rx_conf); /* CR10 */
}
-void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData)
+void bb_set_vga_gain_offset(struct vnt_private *priv, unsigned char by_data)
{
- unsigned char byBBRxConf = 0;
+ unsigned char by_bb_rx_conf = 0;
- BBbWriteEmbedded(priv, 0xE7, byData);
+ bb_write_embedded(priv, 0xE7, by_data);
- BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
+ bb_read_embedded(priv, 0x0A, &by_bb_rx_conf); /* CR10 */
/* patch for 3253B0 Baseband with Cardbus module */
- if (byData == priv->abyBBVGA[0])
- byBBRxConf |= 0x20; /* 0010 0000 */
+ if (by_data == priv->abyBBVGA[0])
+ by_bb_rx_conf |= 0x20; /* 0010 0000 */
else if (priv->bShortSlotTime)
- byBBRxConf &= 0xDF; /* 1101 1111 */
+ by_bb_rx_conf &= 0xDF; /* 1101 1111 */
else
- byBBRxConf |= 0x20; /* 0010 0000 */
- priv->byBBVGACurrent = byData;
- BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
+ by_bb_rx_conf |= 0x20; /* 0010 0000 */
+ priv->byBBVGACurrent = by_data;
+ bb_write_embedded(priv, 0x0A, by_bb_rx_conf); /* CR10 */
}
/*
@@ -2248,12 +2246,12 @@ void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData)
*
*/
void
-BBvSoftwareReset(struct vnt_private *priv)
+bb_software_reset(struct vnt_private *priv)
{
- BBbWriteEmbedded(priv, 0x50, 0x40);
- BBbWriteEmbedded(priv, 0x50, 0);
- BBbWriteEmbedded(priv, 0x9C, 0x01);
- BBbWriteEmbedded(priv, 0x9C, 0);
+ bb_write_embedded(priv, 0x50, 0x40);
+ bb_write_embedded(priv, 0x50, 0);
+ bb_write_embedded(priv, 0x9C, 0x01);
+ bb_write_embedded(priv, 0x9C, 0);
}
/*
@@ -2269,13 +2267,13 @@ BBvSoftwareReset(struct vnt_private *priv)
*
*/
void
-BBvPowerSaveModeON(struct vnt_private *priv)
+bb_power_save_mode_on(struct vnt_private *priv)
{
- unsigned char byOrgData;
+ unsigned char by_org_data;
- BBbReadEmbedded(priv, 0x0D, &byOrgData);
- byOrgData |= BIT(0);
- BBbWriteEmbedded(priv, 0x0D, byOrgData);
+ bb_read_embedded(priv, 0x0D, &by_org_data);
+ by_org_data |= BIT(0);
+ bb_write_embedded(priv, 0x0D, by_org_data);
}
/*
@@ -2291,13 +2289,13 @@ BBvPowerSaveModeON(struct vnt_private *priv)
*
*/
void
-BBvPowerSaveModeOFF(struct vnt_private *priv)
+bb_power_save_mode_off(struct vnt_private *priv)
{
- unsigned char byOrgData;
+ unsigned char by_org_data;
- BBbReadEmbedded(priv, 0x0D, &byOrgData);
- byOrgData &= ~(BIT(0));
- BBbWriteEmbedded(priv, 0x0D, byOrgData);
+ bb_read_embedded(priv, 0x0D, &by_org_data);
+ by_org_data &= ~(BIT(0));
+ bb_write_embedded(priv, 0x0D, by_org_data);
}
/*
@@ -2306,7 +2304,7 @@ BBvPowerSaveModeOFF(struct vnt_private *priv)
* Parameters:
* In:
* priv - Device Structure
- * byAntennaMode - Antenna Mode
+ * by_antenna_mode - Antenna Mode
* Out:
* none
*
@@ -2315,22 +2313,22 @@ BBvPowerSaveModeOFF(struct vnt_private *priv)
*/
void
-BBvSetTxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
+bb_set_tx_antenna_mode(struct vnt_private *priv, unsigned char by_antenna_mode)
{
- unsigned char byBBTxConf;
+ unsigned char by_bb_tx_conf;
- BBbReadEmbedded(priv, 0x09, &byBBTxConf); /* CR09 */
- if (byAntennaMode == ANT_DIVERSITY) {
+ bb_read_embedded(priv, 0x09, &by_bb_tx_conf); /* CR09 */
+ if (by_antenna_mode == ANT_DIVERSITY) {
/* bit 1 is diversity */
- byBBTxConf |= 0x02;
- } else if (byAntennaMode == ANT_A) {
+ by_bb_tx_conf |= 0x02;
+ } else if (by_antenna_mode == ANT_A) {
/* bit 2 is ANTSEL */
- byBBTxConf &= 0xF9; /* 1111 1001 */
- } else if (byAntennaMode == ANT_B) {
- byBBTxConf &= 0xFD; /* 1111 1101 */
- byBBTxConf |= 0x04;
+ by_bb_tx_conf &= 0xF9; /* 1111 1001 */
+ } else if (by_antenna_mode == ANT_B) {
+ by_bb_tx_conf &= 0xFD; /* 1111 1101 */
+ by_bb_tx_conf |= 0x04;
}
- BBbWriteEmbedded(priv, 0x09, byBBTxConf); /* CR09 */
+ bb_write_embedded(priv, 0x09, by_bb_tx_conf); /* CR09 */
}
/*
@@ -2339,7 +2337,7 @@ BBvSetTxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
* Parameters:
* In:
* priv - Device Structure
- * byAntennaMode - Antenna Mode
+ * by_antenna_mode - Antenna Mode
* Out:
* none
*
@@ -2348,25 +2346,25 @@ BBvSetTxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
*/
void
-BBvSetRxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
+bb_set_rx_antenna_mode(struct vnt_private *priv, unsigned char by_antenna_mode)
{
- unsigned char byBBRxConf;
+ unsigned char by_bb_rx_conf;
- BBbReadEmbedded(priv, 0x0A, &byBBRxConf); /* CR10 */
- if (byAntennaMode == ANT_DIVERSITY) {
- byBBRxConf |= 0x01;
+ bb_read_embedded(priv, 0x0A, &by_bb_rx_conf); /* CR10 */
+ if (by_antenna_mode == ANT_DIVERSITY) {
+ by_bb_rx_conf |= 0x01;
- } else if (byAntennaMode == ANT_A) {
- byBBRxConf &= 0xFC; /* 1111 1100 */
- } else if (byAntennaMode == ANT_B) {
- byBBRxConf &= 0xFE; /* 1111 1110 */
- byBBRxConf |= 0x02;
+ } else if (by_antenna_mode == ANT_A) {
+ by_bb_rx_conf &= 0xFC; /* 1111 1100 */
+ } else if (by_antenna_mode == ANT_B) {
+ by_bb_rx_conf &= 0xFE; /* 1111 1110 */
+ by_bb_rx_conf |= 0x02;
}
- BBbWriteEmbedded(priv, 0x0A, byBBRxConf); /* CR10 */
+ bb_write_embedded(priv, 0x0A, by_bb_rx_conf); /* CR10 */
}
/*
- * Description: BBvSetDeepSleep
+ * Description: bb_set_deep_sleep
*
* Parameters:
* In:
@@ -2378,15 +2376,9 @@ BBvSetRxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode)
*
*/
void
-BBvSetDeepSleep(struct vnt_private *priv, unsigned char byLocalID)
+bb_set_deep_sleep(struct vnt_private *priv, unsigned char by_local_id)
{
- BBbWriteEmbedded(priv, 0x0C, 0x17); /* CR12 */
- BBbWriteEmbedded(priv, 0x0D, 0xB9); /* CR13 */
+ bb_write_embedded(priv, 0x0C, 0x17); /* CR12 */
+ bb_write_embedded(priv, 0x0D, 0xB9); /* CR13 */
}
-void
-BBvExitDeepSleep(struct vnt_private *priv, unsigned char byLocalID)
-{
- BBbWriteEmbedded(priv, 0x0C, 0x00); /* CR12 */
- BBbWriteEmbedded(priv, 0x0D, 0x01); /* CR13 */
-}
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index 0cc2e07829c5..9354ce724446 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -46,30 +46,31 @@
#define TOP_RATE_2M 0x00200000
#define TOP_RATE_1M 0x00100000
-unsigned int BBuGetFrameTime(unsigned char byPreambleType,
- unsigned char byPktType,
- unsigned int cbFrameLength,
- unsigned short wRate);
+unsigned int bb_get_frame_time(unsigned char by_preamble_type,
+ unsigned char by_pkt_type,
+ unsigned int cb_frame_length,
+ unsigned short w_rate);
void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy);
-bool BBbReadEmbedded(struct vnt_private *priv, unsigned char byBBAddr,
- unsigned char *pbyData);
-bool BBbWriteEmbedded(struct vnt_private *priv, unsigned char byBBAddr,
- unsigned char byData);
+bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
+ unsigned char *pby_data);
+bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
+ unsigned char by_data);
-void BBvSetShortSlotTime(struct vnt_private *priv);
-void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData);
+void bb_set_short_slot_time(struct vnt_private *priv);
+void bb_set_vga_gain_offset(struct vnt_private *priv, unsigned char by_data);
/* VT3253 Baseband */
-bool BBbVT3253Init(struct vnt_private *priv);
-void BBvSoftwareReset(struct vnt_private *priv);
-void BBvPowerSaveModeON(struct vnt_private *priv);
-void BBvPowerSaveModeOFF(struct vnt_private *priv);
-void BBvSetTxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode);
-void BBvSetRxAntennaMode(struct vnt_private *priv, unsigned char byAntennaMode);
-void BBvSetDeepSleep(struct vnt_private *priv, unsigned char byLocalID);
-void BBvExitDeepSleep(struct vnt_private *priv, unsigned char byLocalID);
+bool bb_vt3253_init(struct vnt_private *priv);
+void bb_software_reset(struct vnt_private *priv);
+void bb_power_save_mode_on(struct vnt_private *priv);
+void bb_power_save_mode_off(struct vnt_private *priv);
+void bb_set_tx_antenna_mode(struct vnt_private *priv,
+ unsigned char by_antenna_mode);
+void bb_set_rx_antenna_mode(struct vnt_private *priv,
+ unsigned char by_antenna_mode);
+void bb_set_deep_sleep(struct vnt_private *priv, unsigned char by_local_id);
#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index e65c9825ea5a..6148310c06d6 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -11,15 +11,12 @@
* CARDvUpdateBasicTopRate - Update BasicTopRate
* CARDbAddBasicRate - Add to BasicRateSet
* CARDbIsOFDMinBasicRate - Check if any OFDM rate is in BasicRateSet
- * CARDvSetLoopbackMode - Set Loopback mode
- * CARDbSoftwareReset - Sortware reset NIC
* CARDqGetTSFOffset - Calculate TSFOffset
* CARDbGetCurrentTSF - Read Current NIC TSF counter
* CARDqGetNextTBTT - Calculate Next Beacon TSF counter
* CARDvSetFirstNextTBTT - Set NIC Beacon time
* CARDvUpdateNextTBTT - Sync. NIC Beacon time
* CARDbRadioPowerOff - Turn Off NIC Radio Power
- * CARDbRadioPowerOn - Turn On NIC Radio Power
*
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
@@ -198,22 +195,22 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x20;
priv->abyBBVGA[2] = 0x10;
priv->abyBBVGA[3] = 0x10;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x1C)
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
MACvSetBBType(priv->PortOffset, BB_TYPE_11A);
priv->abyBBVGA[0] = 0x18;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x14) {
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
- BBbWriteEmbedded(priv, 0xE1, 0x57);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE1, 0x57);
}
} else {
MACvSetBBType(priv->PortOffset, BB_TYPE_11A);
}
- BBbWriteEmbedded(priv, 0x88, 0x03);
+ bb_write_embedded(priv, 0x88, 0x03);
bySlot = C_SLOT_SHORT;
bySIFS = C_SIFS_A;
byDIFS = C_SIFS_A + 2 * C_SLOT_SHORT;
@@ -224,19 +221,19 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[2] = 0x00;
priv->abyBBVGA[3] = 0x00;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x20)
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
priv->abyBBVGA[0] = 0x14;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x18) {
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
- BBbWriteEmbedded(priv, 0xE1, 0xD3);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE1, 0xD3);
}
}
- BBbWriteEmbedded(priv, 0x88, 0x02);
+ bb_write_embedded(priv, 0x88, 0x02);
bySlot = C_SLOT_LONG;
bySIFS = C_SIFS_BG;
byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
@@ -247,19 +244,19 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[2] = 0x00;
priv->abyBBVGA[3] = 0x00;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x20)
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
priv->abyBBVGA[0] = 0x14;
- BBbReadEmbedded(priv, 0xE7, &byData);
+ bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x18) {
- BBbWriteEmbedded(priv, 0xE7, priv->abyBBVGA[0]);
- BBbWriteEmbedded(priv, 0xE1, 0xD3);
+ bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
+ bb_write_embedded(priv, 0xE1, 0xD3);
}
}
- BBbWriteEmbedded(priv, 0x88, 0x08);
+ bb_write_embedded(priv, 0x88, 0x08);
bySIFS = C_SIFS_BG;
if (priv->bShortSlotTime) {
@@ -310,7 +307,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->bySlot = bySlot;
VNSvOutPortB(priv->PortOffset + MAC_REG_SLOT, priv->bySlot);
- BBvSetShortSlotTime(priv);
+ bb_set_short_slot_time(priv);
}
if (priv->byCWMaxMin != byCWMaxMin) {
priv->byCWMaxMin = byCWMaxMin;
@@ -431,7 +428,7 @@ void CARDbRadioPowerOff(struct vnt_private *priv)
MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON);
- BBvSetDeepSleep(priv, priv->byLocalID);
+ bb_set_deep_sleep(priv, priv->byLocalID);
priv->bRadioOff = true;
pr_debug("chester power off\n");
@@ -439,60 +436,6 @@ void CARDbRadioPowerOff(struct vnt_private *priv)
LED_ACTSET); /* LED issue */
}
-/*
- * Description: Turn on Radio power
- *
- * Parameters:
- * In:
- * priv - The adapter to be turned on
- * Out:
- * none
- *
- * Return Value: true if success; otherwise false
- */
-bool CARDbRadioPowerOn(struct vnt_private *priv)
-{
- bool bResult = true;
-
- pr_debug("chester power on\n");
- if (priv->bRadioControlOff) {
- if (priv->bHWRadioOff)
- pr_debug("chester bHWRadioOff\n");
- if (priv->bRadioControlOff)
- pr_debug("chester bRadioControlOff\n");
- return false; }
-
- if (!priv->bRadioOff) {
- pr_debug("chester pbRadioOff\n");
- return true; }
-
- BBvExitDeepSleep(priv, priv->byLocalID);
-
- MACvRegBitsOn(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON);
-
- switch (priv->byRFType) {
- case RF_RFMD2959:
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
- SOFTPWRCTL_TXPEINV);
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
- SOFTPWRCTL_SWPE1);
- break;
-
- case RF_AIROHA:
- case RF_AL2230S:
- case RF_AIROHA7230:
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
- (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
- break;
- }
-
- priv->bRadioOff = false;
- pr_debug("chester power on\n");
- MACvRegBitsOff(priv->PortOffset, MAC_REG_GPIOCTL0,
- LED_ACTSET); /* LED issue */
- return bResult;
-}
-
void CARDvSafeResetTx(struct vnt_private *priv)
{
unsigned int uu;
@@ -816,54 +759,6 @@ unsigned char CARDbyGetPktType(struct vnt_private *priv)
}
/*
- * Description: Set NIC Loopback mode
- *
- * Parameters:
- * In:
- * priv - The adapter to be set
- * wLoopbackMode - Loopback mode to be set
- * Out:
- * none
- *
- * Return Value: none
- */
-void CARDvSetLoopbackMode(struct vnt_private *priv,
- unsigned short wLoopbackMode)
-{
- switch (wLoopbackMode) {
- case CARD_LB_NONE:
- case CARD_LB_MAC:
- case CARD_LB_PHY:
- break;
- default:
- break;
- }
- /* set MAC loopback */
- MACvSetLoopbackMode(priv, LOBYTE(wLoopbackMode));
- /* set Baseband loopback */
-}
-
-/*
- * Description: Software Reset NIC
- *
- * Parameters:
- * In:
- * priv - The adapter to be reset
- * Out:
- * none
- *
- * Return Value: none
- */
-bool CARDbSoftwareReset(struct vnt_private *priv)
-{
- /* reset MAC */
- if (!MACbSafeSoftwareReset(priv))
- return false;
-
- return true;
-}
-
-/*
* Description: Calculate TSF offset of two TSF input
* Get TSF Offset from RxBCN's TSF and local TSF
*
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 337266add6b2..568a2ddd6588 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -44,9 +44,6 @@ struct vnt_private;
void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type);
void CARDvUpdateBasicTopRate(struct vnt_private *priv);
bool CARDbIsOFDMinBasicRate(struct vnt_private *priv);
-void CARDvSetLoopbackMode(struct vnt_private *priv,
- unsigned short wLoopbackMode);
-bool CARDbSoftwareReset(struct vnt_private *priv);
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval);
void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
@@ -58,7 +55,6 @@ unsigned char CARDbyGetPktType(struct vnt_private *priv);
void CARDvSafeResetTx(struct vnt_private *priv);
void CARDvSafeResetRx(struct vnt_private *priv);
void CARDbRadioPowerOff(struct vnt_private *priv);
-bool CARDbRadioPowerOn(struct vnt_private *priv);
bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type);
bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
u64 qwBSSTimestamp);
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index dec6f0f23b88..62a85c1ca6c4 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -173,7 +173,7 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
priv->byBBVGACurrent != priv->abyBBVGA[0]) {
priv->byBBVGACurrent = priv->abyBBVGA[0];
- BBvSetVGAGainOffset(priv, priv->byBBVGACurrent);
+ bb_set_vga_gain_offset(priv, priv->byBBVGACurrent);
}
/* clear NAV */
@@ -195,7 +195,7 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
if (priv->bEnablePSMode)
RFvWriteWakeProgSyn(priv, priv->byRFType, ch->hw_value);
- BBvSoftwareReset(priv);
+ bb_software_reset(priv);
if (priv->byLocalID > REV_ID_VT3253_B1) {
unsigned long flags;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 5c86cc60eb5c..41cbec4134b0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -21,18 +21,17 @@
* device_alloc_rx_buf - rx buffer pre-allocated function
* device_free_rx_buf - free rx buffer function
* device_free_tx_buf - free tx buffer function
- * device_init_rd0_ring- initial rd dma0 ring
- * device_init_rd1_ring- initial rd dma1 ring
- * device_init_td0_ring- initial tx dma0 ring buffer
- * device_init_td1_ring- initial tx dma1 ring buffer
- * device_init_registers- initial MAC & BBP & RF internal registers.
- * device_init_rings- initial tx/rx ring buffer
- * device_free_rings- free all allocated ring buffer
- * device_tx_srv- tx interrupt service function
+ * device_init_rd0_ring - initial rd dma0 ring
+ * device_init_rd1_ring - initial rd dma1 ring
+ * device_init_td0_ring - initial tx dma0 ring buffer
+ * device_init_td1_ring - initial tx dma1 ring buffer
+ * device_init_registers - initial MAC & BBP & RF internal registers.
+ * device_init_rings - initial tx/rx ring buffer
+ * device_free_rings - free all allocated ring buffer
+ * device_tx_srv - tx interrupt service function
*
* Revision History:
*/
-#undef __NO_VERSION__
#include <linux/file.h>
#include "device.h"
@@ -202,7 +201,7 @@ static void device_init_registers(struct vnt_private *priv)
unsigned char byOFDMPwrdBm = 0;
MACbShutdown(priv);
- BBvSoftwareReset(priv);
+ bb_software_reset(priv);
/* Do MACbSoftwareReset in MACvInitialize */
MACbSoftwareReset(priv);
@@ -279,8 +278,8 @@ static void device_init_registers(struct vnt_private *priv)
}
/* Set initial antenna mode */
- BBvSetTxAntennaMode(priv, priv->byTxAntennaMode);
- BBvSetRxAntennaMode(priv, priv->byRxAntennaMode);
+ bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
+ bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
/* zonetype initial */
priv->byOriginalZonetype = priv->abyEEPROM[EEP_OFS_ZONETYPE];
@@ -357,16 +356,16 @@ static void device_init_registers(struct vnt_private *priv)
VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
/* initialize BBP registers */
- BBbVT3253Init(priv);
+ bb_vt3253_init(priv);
if (priv->bUpdateBBVGA) {
priv->byBBVGACurrent = priv->abyBBVGA[0];
priv->byBBVGANew = priv->byBBVGACurrent;
- BBvSetVGAGainOffset(priv, priv->abyBBVGA[0]);
+ bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
}
- BBvSetRxAntennaMode(priv, priv->byRxAntennaMode);
- BBvSetTxAntennaMode(priv, priv->byTxAntennaMode);
+ bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
+ bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
/* Set BB and packet type at the same time. */
/* Set Short Slot Time, xIFS, and RSPINF. */
@@ -1001,7 +1000,7 @@ static void vnt_check_bb_vga(struct vnt_private *priv)
if (priv->uBBVGADiffCount == 1) {
/* first VGA diff gain */
- BBvSetVGAGainOffset(priv, priv->byBBVGANew);
+ bb_set_vga_gain_offset(priv, priv->byBBVGANew);
dev_dbg(&priv->pcid->dev,
"First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
@@ -1017,7 +1016,7 @@ static void vnt_check_bb_vga(struct vnt_private *priv)
priv->byBBVGACurrent,
(int)priv->uBBVGADiffCount);
- BBvSetVGAGainOffset(priv, priv->byBBVGANew);
+ bb_set_vga_gain_offset(priv, priv->byBBVGANew);
}
}
@@ -1445,7 +1444,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->bShortSlotTime = false;
CARDbSetPhyParameter(priv, priv->byBBType);
- BBvSetVGAGainOffset(priv, priv->abyBBVGA[0]);
+ bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
}
if (changed & BSS_CHANGED_TXPOWER)
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index d6ca6e5551a7..747d79265a7c 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -419,7 +419,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
- BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */
+ bb_power_save_mode_off(priv); /* RobertYu:20050106, have DC value for Calibration */
for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
@@ -443,7 +443,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
SOFTPWRCTL_SWPECTI |
SOFTPWRCTL_TXPEINV));
- BBvPowerSaveModeON(priv); /* RobertYu:20050106 */
+ bb_power_save_mode_on(priv); /* RobertYu:20050106 */
/* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */
/* 3-wire control for power saving mode */
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 37fcc42ed000..cfab64d2b312 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -165,16 +165,21 @@ s_uGetTxRsvTime(
{
unsigned int uDataTime, uAckTime;
- uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
- if (byPktType == PK_TYPE_11B) /* llb,CCK mode */
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (unsigned short)pDevice->byTopCCKBasicRate);
- else /* 11g 2.4G OFDM mode & 11a 5G OFDM mode */
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, (unsigned short)pDevice->byTopOFDMBasicRate);
-
- if (bNeedAck)
- return uDataTime + pDevice->uSIFS + uAckTime;
- else
+ uDataTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
+
+ if (!bNeedAck)
return uDataTime;
+
+ /*
+ * CCK mode - 11b
+ * OFDM mode - 11g 2.4G & 11a 5G
+ */
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14,
+ byPktType == PK_TYPE_11B ?
+ pDevice->byTopCCKBasicRate :
+ pDevice->byTopOFDMBasicRate);
+
+ return uDataTime + pDevice->uSIFS + uAckTime;
}
static __le16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
@@ -195,24 +200,28 @@ s_uGetRTSCTSRsvTime(
unsigned short wCurrentRate
)
{
- unsigned int uRrvTime, uRTSTime, uCTSTime, uAckTime, uDataTime;
-
- uRrvTime = uRTSTime = uCTSTime = uAckTime = uDataTime = 0;
+ unsigned int uRrvTime = 0;
+ unsigned int uRTSTime = 0;
+ unsigned int uCTSTime = 0;
+ unsigned int uAckTime = 0;
+ unsigned int uDataTime = 0;
- uDataTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, cbFrameLength, wCurrentRate);
+ uDataTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, cbFrameLength, wCurrentRate);
if (byRTSRsvType == 0) { /* RTSTxRrvTime_bb */
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
- uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uRTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = uAckTime;
} else if (byRTSRsvType == 1) { /* RTSTxRrvTime_ba, only in 2.4GHZ */
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uRTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 20, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
} else if (byRTSRsvType == 2) { /* RTSTxRrvTime_aa */
- uRTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 20, pDevice->byTopOFDMBasicRate);
- uCTSTime = uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uRTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 20, pDevice->byTopOFDMBasicRate);
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = uAckTime;
} else if (byRTSRsvType == 3) { /* CTSTxRrvTime_ba, only in 2.4GHZ */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
uRrvTime = uCTSTime + uAckTime + uDataTime + 2 * pDevice->uSIFS;
return cpu_to_le16((u16)uRrvTime);
}
@@ -239,138 +248,83 @@ s_uGetDataDuration(
)
{
bool bLastFrag = false;
- unsigned int uAckTime = 0, uNextPktTime = 0;
+ unsigned int uAckTime = 0, uNextPktTime = 0, len;
if (uFragIdx == (uMACfragNum - 1))
bLastFrag = true;
+ if (uFragIdx == (uMACfragNum - 2))
+ len = cbLastFragmentSize;
+ else
+ len = cbFrameLength;
+
switch (byDurType) {
case DATADUR_B: /* DATADUR_B */
- if (((uMACfragNum == 1)) || bLastFrag) {/* Non Frag or Last Frag */
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- return pDevice->uSIFS + uAckTime;
- } else {
+ if (bNeedAck) {
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType,
+ byPktType, 14,
+ pDevice->byTopCCKBasicRate);
+ }
+ /* Non Frag or Last Frag */
+ if ((uMACfragNum == 1) || bLastFrag) {
+ if (!bNeedAck)
return 0;
- }
- } else {/* First Frag or Mid Frag */
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
-
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- return pDevice->uSIFS + uAckTime + uNextPktTime;
- } else {
- return pDevice->uSIFS + uNextPktTime;
- }
+ } else {
+ /* First Frag or Mid Frag */
+ uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType,
+ len, wRate, bNeedAck);
}
- break;
+
+ return pDevice->uSIFS + uAckTime + uNextPktTime;
case DATADUR_A: /* DATADUR_A */
- if (((uMACfragNum == 1)) || bLastFrag) {/* Non Frag or Last Frag */
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime;
- } else {
+ if (bNeedAck) {
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType,
+ byPktType, 14,
+ pDevice->byTopOFDMBasicRate);
+ }
+ /* Non Frag or Last Frag */
+ if ((uMACfragNum == 1) || bLastFrag) {
+ if (!bNeedAck)
return 0;
- }
- } else {/* First Frag or Mid Frag */
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
-
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime + uNextPktTime;
- } else {
- return pDevice->uSIFS + uNextPktTime;
- }
+ } else {
+ /* First Frag or Mid Frag */
+ uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType,
+ len, wRate, bNeedAck);
}
- break;
+
+ return pDevice->uSIFS + uAckTime + uNextPktTime;
case DATADUR_A_F0: /* DATADUR_A_F0 */
- if (((uMACfragNum == 1)) || bLastFrag) {/* Non Frag or Last Frag */
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime;
- } else {
+ case DATADUR_A_F1: /* DATADUR_A_F1 */
+ if (bNeedAck) {
+ uAckTime = bb_get_frame_time(pDevice->byPreambleType,
+ byPktType, 14,
+ pDevice->byTopOFDMBasicRate);
+ }
+ /* Non Frag or Last Frag */
+ if ((uMACfragNum == 1) || bLastFrag) {
+ if (!bNeedAck)
return 0;
- }
- } else { /* First Frag or Mid Frag */
- if (byFBOption == AUTO_FB_0) {
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
-
- } else { /* (byFBOption == AUTO_FB_1) */
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
- }
+ } else {
+ /* First Frag or Mid Frag */
+ if (wRate < RATE_18M)
+ wRate = RATE_18M;
+ else if (wRate > RATE_54M)
+ wRate = RATE_54M;
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime + uNextPktTime;
- } else {
- return pDevice->uSIFS + uNextPktTime;
- }
- }
- break;
+ wRate -= RATE_18M;
- case DATADUR_A_F1: /* DATADUR_A_F1 */
- if (((uMACfragNum == 1)) || bLastFrag) { /* Non Frag or Last Frag */
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime;
- } else {
- return 0;
- }
- } else { /* First Frag or Mid Frag */
- if (byFBOption == AUTO_FB_0) {
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
-
- } else { /* (byFBOption == AUTO_FB_1) */
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if (uFragIdx == (uMACfragNum - 2))
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
- else
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
- }
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return pDevice->uSIFS + uAckTime + uNextPktTime;
- } else {
- return pDevice->uSIFS + uNextPktTime;
- }
+ if (byFBOption == AUTO_FB_0)
+ wRate = wFB_Opt0[FB_RATE0][wRate];
+ else
+ wRate = wFB_Opt1[FB_RATE0][wRate];
+
+ uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType,
+ len, wRate, bNeedAck);
}
- break;
+
+ return pDevice->uSIFS + uAckTime + uNextPktTime;
default:
break;
@@ -396,17 +350,17 @@ s_uGetRTSCTSDuration(
switch (byDurType) {
case RTSDUR_BB: /* RTSDuration_bb */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_BA: /* RTSDuration_ba */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_AA: /* RTSDuration_aa */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
@@ -415,7 +369,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_BA_F0: /* RTSDuration_ba_f0 */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -424,7 +378,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_AA_F0: /* RTSDuration_aa_f0 */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -433,7 +387,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_BA_F1: /* RTSDuration_ba_f1 */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2*pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -442,7 +396,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_AA_F1: /* RTSDuration_aa_f1 */
- uCTSTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -1040,16 +994,14 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
bool bRTS = (bool)(fifo_ctl & FIFOCTL_RTS);
struct vnt_tx_desc *ptdCurr;
unsigned int cbHeaderLength = 0;
- void *pvRrvTime;
- struct vnt_mic_hdr *pMICHDR;
- void *pvRTS;
- void *pvCTS;
- void *pvTxDataHd;
+ void *pvRrvTime = NULL;
+ struct vnt_mic_hdr *pMICHDR = NULL;
+ void *pvRTS = NULL;
+ void *pvCTS = NULL;
+ void *pvTxDataHd = NULL;
unsigned short wTxBufSize; /* FFinfo size */
unsigned char byFBOption = AUTO_FB_NONE;
- pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
-
cbFrameSize = skb->len + 4;
if (info->control.hw_key) {
diff --git a/drivers/staging/vt6656/Makefile b/drivers/staging/vt6656/Makefile
index 375f54e9f58b..f696a9d7a143 100644
--- a/drivers/staging/vt6656/Makefile
+++ b/drivers/staging/vt6656/Makefile
@@ -1,7 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-# TODO: all of these should be removed
-ccflags-y := -DLINUX -D__KERNEL__ -DEXPORT_SYMTAB -D__NO_VERSION__
-ccflags-y += -DHOSTAP
vt6656_stage-y += main_usb.o \
card.o \
@@ -13,7 +10,6 @@ vt6656_stage-y += main_usb.o \
key.o \
rf.o \
usbpipe.o \
- channel.o \
- firmware.o
+ channel.o
obj-$(CONFIG_VT6656) += vt6656_stage.o
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index a19a563d8bcc..41ae779ec61f 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -23,13 +23,15 @@
*/
#include <linux/bits.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
+#include "device.h"
#include "mac.h"
#include "baseband.h"
#include "rf.h"
#include "usbpipe.h"
-static u8 vnt_vt3184_agc[] = {
+static const u8 vnt_vt3184_agc[] = {
0x00, 0x00, 0x02, 0x02, 0x04, 0x04, 0x06, 0x06,
0x08, 0x08, 0x0a, 0x0a, 0x0c, 0x0c, 0x0e, 0x0e, /* 0x0f */
0x10, 0x10, 0x12, 0x12, 0x14, 0x14, 0x16, 0x16,
@@ -76,7 +78,7 @@ static u8 vnt_vt3184_al2230[] = {
};
/* {{RobertYu:20060515, new BB setting for VT3226D0 */
-static u8 vnt_vt3184_vt3226d0[] = {
+static const u8 vnt_vt3184_vt3226d0[] = {
0x31, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00,
0x70, 0x45, 0x2a, 0x76, 0x00, 0x00, 0x80, 0x00, /* 0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -111,198 +113,85 @@ static u8 vnt_vt3184_vt3226d0[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* 0xff */
};
-static const u16 vnt_frame_time[MAX_RATE] = {
- 10, 20, 55, 110, 24, 36, 48, 72, 96, 144, 192, 216
+struct vnt_threshold {
+ u8 bb_pre_ed_rssi;
+ u8 cr_201;
+ u8 cr_206;
};
-/*
- * Description: Calculate data frame transmitting time
- *
- * Parameters:
- * In:
- * preamble_type - Preamble Type
- * pkt_type - PK_TYPE_11A, PK_TYPE_11B, PK_TYPE_11GB, PK_TYPE_11GA
- * frame_length - Baseband Type
- * tx_rate - Tx Rate
- * Out:
- *
- * Return Value: FrameTime
- *
- */
-unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
- unsigned int frame_length, u16 tx_rate)
-{
- unsigned int frame_time;
- unsigned int preamble;
- unsigned int rate = 0;
-
- if (tx_rate > RATE_54M)
- return 0;
-
- rate = (unsigned int)vnt_frame_time[tx_rate];
-
- if (tx_rate <= 3) {
- if (preamble_type == 1)
- preamble = 96;
- else
- preamble = 192;
-
- frame_time = DIV_ROUND_UP(frame_length * 80, rate);
- return preamble + frame_time;
- }
-
- frame_time = DIV_ROUND_UP(frame_length * 8 + 22, rate);
- frame_time = frame_time * 4;
-
- if (pkt_type != PK_TYPE_11A)
- frame_time += 6;
- return 20 + frame_time;
-}
-
-/*
- * Description: Calculate Length, Service, and Signal fields of Phy for Tx
- *
- * Parameters:
- * In:
- * priv - Device Structure
- * frame_length - Tx Frame Length
- * tx_rate - Tx Rate
- * Out:
- * struct vnt_phy_field *phy
- * - pointer to Phy Length field
- * - pointer to Phy Service field
- * - pointer to Phy Signal field
- *
- * Return Value: none
- *
- */
-void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
- u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy)
-{
- u32 bit_count;
- u32 count = 0;
- u32 tmp;
- int ext_bit;
- u8 preamble_type = priv->preamble_type;
-
- bit_count = frame_length * 8;
- ext_bit = false;
-
- switch (tx_rate) {
- case RATE_1M:
- count = bit_count;
-
- phy->signal = 0x00;
-
- break;
- case RATE_2M:
- count = bit_count / 2;
-
- if (preamble_type == 1)
- phy->signal = 0x09;
- else
- phy->signal = 0x01;
-
- break;
- case RATE_5M:
- count = DIV_ROUND_UP(bit_count * 10, 55);
-
- if (preamble_type == 1)
- phy->signal = 0x0a;
- else
- phy->signal = 0x02;
-
- break;
- case RATE_11M:
- count = bit_count / 11;
- tmp = count * 11;
-
- if (tmp != bit_count) {
- count++;
-
- if ((bit_count - tmp) <= 3)
- ext_bit = true;
- }
-
- if (preamble_type == 1)
- phy->signal = 0x0b;
- else
- phy->signal = 0x03;
-
- break;
- case RATE_6M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9b;
- else
- phy->signal = 0x8b;
-
- break;
- case RATE_9M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9f;
- else
- phy->signal = 0x8f;
-
- break;
- case RATE_12M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9a;
- else
- phy->signal = 0x8a;
-
- break;
- case RATE_18M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9e;
- else
- phy->signal = 0x8e;
-
- break;
- case RATE_24M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x99;
- else
- phy->signal = 0x89;
-
- break;
- case RATE_36M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9d;
- else
- phy->signal = 0x8d;
-
- break;
- case RATE_48M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x98;
- else
- phy->signal = 0x88;
+static const struct vnt_threshold al2230_vnt_threshold[] = {
+ {0, 0x00, 0x30}, /* Max sensitivity */
+ {68, 0x00, 0x36},
+ {67, 0x00, 0x43},
+ {66, 0x00, 0x51},
+ {65, 0x00, 0x62},
+ {64, 0x00, 0x79},
+ {63, 0x00, 0x93},
+ {62, 0x00, 0xb9},
+ {61, 0x00, 0xe3},
+ {60, 0x01, 0x18},
+ {59, 0x01, 0x54},
+ {58, 0x01, 0xa0},
+ {57, 0x02, 0x20},
+ {56, 0x02, 0xa0},
+ {55, 0x03, 0x00},
+ {53, 0x06, 0x00},
+ {51, 0x09, 0x00},
+ {49, 0x0e, 0x00},
+ {47, 0x15, 0x00},
+ {46, 0x1a, 0x00},
+ {45, 0xff, 0x00}
+};
- break;
- case RATE_54M:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9c;
- else
- phy->signal = 0x8c;
- break;
- default:
- if (pkt_type == PK_TYPE_11A)
- phy->signal = 0x9c;
- else
- phy->signal = 0x8c;
- break;
- }
+static const struct vnt_threshold vt3226_vnt_threshold[] = {
+ {0, 0x00, 0x24}, /* Max sensitivity */
+ {68, 0x00, 0x2d},
+ {67, 0x00, 0x36},
+ {66, 0x00, 0x43},
+ {65, 0x00, 0x52},
+ {64, 0x00, 0x68},
+ {63, 0x00, 0x80},
+ {62, 0x00, 0x9c},
+ {61, 0x00, 0xc0},
+ {60, 0x00, 0xea},
+ {59, 0x01, 0x30},
+ {58, 0x01, 0x70},
+ {57, 0x01, 0xb0},
+ {56, 0x02, 0x30},
+ {55, 0x02, 0xc0},
+ {53, 0x04, 0x00},
+ {51, 0x07, 0x00},
+ {49, 0x0a, 0x00},
+ {47, 0x11, 0x00},
+ {45, 0x18, 0x00},
+ {43, 0x26, 0x00},
+ {42, 0x36, 0x00},
+ {41, 0xff, 0x00}
+};
- if (pkt_type == PK_TYPE_11B) {
- phy->service = 0x00;
- if (ext_bit)
- phy->service |= 0x80;
- phy->len = cpu_to_le16((u16)count);
- } else {
- phy->service = 0x00;
- phy->len = cpu_to_le16((u16)frame_length);
- }
-}
+static const struct vnt_threshold vt3342_vnt_threshold[] = {
+ {0, 0x00, 0x38}, /* Max sensitivity */
+ {66, 0x00, 0x43},
+ {65, 0x00, 0x52},
+ {64, 0x00, 0x68},
+ {63, 0x00, 0x80},
+ {62, 0x00, 0x9c},
+ {61, 0x00, 0xc0},
+ {60, 0x00, 0xea},
+ {59, 0x01, 0x30},
+ {58, 0x01, 0x70},
+ {57, 0x01, 0xb0},
+ {56, 0x02, 0x30},
+ {55, 0x02, 0xc0},
+ {53, 0x04, 0x00},
+ {51, 0x07, 0x00},
+ {49, 0x0a, 0x00},
+ {47, 0x11, 0x00},
+ {45, 0x18, 0x00},
+ {43, 0x26, 0x00},
+ {42, 0x36, 0x00},
+ {41, 0xff, 0x00}
+};
/*
* Description: Set Antenna mode
@@ -352,9 +241,10 @@ int vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode)
int vnt_vt3184_init(struct vnt_private *priv)
{
- int ret = 0;
+ int ret;
u16 length;
- u8 *addr;
+ u8 *addr = NULL;
+ const u8 *c_addr;
u8 data;
ret = vnt_control_in(priv, MESSAGE_TYPE_READ, 0, MESSAGE_REQUEST_EEPROM,
@@ -366,23 +256,15 @@ int vnt_vt3184_init(struct vnt_private *priv)
dev_dbg(&priv->usb->dev, "RF Type %d\n", priv->rf_type);
- if (priv->rf_type == RF_AL2230 ||
- priv->rf_type == RF_AL2230S) {
+ if ((priv->rf_type == RF_AL2230) ||
+ (priv->rf_type == RF_AL2230S) ||
+ (priv->rf_type == RF_AIROHA7230)) {
priv->bb_rx_conf = vnt_vt3184_al2230[10];
length = sizeof(vnt_vt3184_al2230);
addr = vnt_vt3184_al2230;
- priv->bb_vga[0] = 0x1C;
- priv->bb_vga[1] = 0x10;
- priv->bb_vga[2] = 0x0;
- priv->bb_vga[3] = 0x0;
-
- } else if (priv->rf_type == RF_AIROHA7230) {
- priv->bb_rx_conf = vnt_vt3184_al2230[10];
- length = sizeof(vnt_vt3184_al2230);
- addr = vnt_vt3184_al2230;
-
- addr[0xd7] = 0x06;
+ if (priv->rf_type == RF_AIROHA7230)
+ addr[0xd7] = 0x06;
priv->bb_vga[0] = 0x1c;
priv->bb_vga[1] = 0x10;
@@ -390,25 +272,11 @@ int vnt_vt3184_init(struct vnt_private *priv)
priv->bb_vga[3] = 0x0;
} else if ((priv->rf_type == RF_VT3226) ||
- (priv->rf_type == RF_VT3226D0)) {
+ (priv->rf_type == RF_VT3226D0) ||
+ (priv->rf_type == RF_VT3342A0)) {
priv->bb_rx_conf = vnt_vt3184_vt3226d0[10];
length = sizeof(vnt_vt3184_vt3226d0);
- addr = vnt_vt3184_vt3226d0;
-
- priv->bb_vga[0] = 0x20;
- priv->bb_vga[1] = 0x10;
- priv->bb_vga[2] = 0x0;
- priv->bb_vga[3] = 0x0;
-
- /* Fix VT3226 DFC system timing issue */
- ret = vnt_mac_reg_bits_on(priv, MAC_REG_SOFTPWRCTL2,
- SOFTPWRCTL_RFLEOPT);
- if (ret)
- goto end;
- } else if (priv->rf_type == RF_VT3342A0) {
- priv->bb_rx_conf = vnt_vt3184_vt3226d0[10];
- length = sizeof(vnt_vt3184_vt3226d0);
- addr = vnt_vt3184_vt3226d0;
+ c_addr = vnt_vt3184_vt3226d0;
priv->bb_vga[0] = 0x20;
priv->bb_vga[1] = 0x10;
@@ -424,8 +292,11 @@ int vnt_vt3184_init(struct vnt_private *priv)
goto end;
}
+ if (addr)
+ c_addr = addr;
+
ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
- MESSAGE_REQUEST_BBREG, length, addr);
+ MESSAGE_REQUEST_BBREG, length, c_addr);
if (ret)
goto end;
@@ -435,19 +306,13 @@ int vnt_vt3184_init(struct vnt_private *priv)
if (ret)
goto end;
- if (priv->rf_type == RF_VT3226 ||
- priv->rf_type == RF_VT3342A0) {
- ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
- MAC_REG_ITRTMSET, 0x23);
- if (ret)
- goto end;
+ if ((priv->rf_type == RF_VT3226) ||
+ (priv->rf_type == RF_VT3342A0) ||
+ (priv->rf_type == RF_VT3226D0)) {
+ data = (priv->rf_type == RF_VT3226D0) ? 0x11 : 0x23;
- ret = vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, BIT(0));
- if (ret)
- goto end;
- } else if (priv->rf_type == RF_VT3226D0) {
ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
- MAC_REG_ITRTMSET, 0x11);
+ MAC_REG_ITRTMSET, data);
if (ret)
goto end;
@@ -507,21 +372,22 @@ int vnt_set_short_slot_time(struct vnt_private *priv)
ret = vnt_control_in_u8(priv, MESSAGE_REQUEST_BBREG, 0xe7, &bb_vga);
if (ret)
- goto end;
+ return ret;
if (bb_vga == priv->bb_vga[0])
priv->bb_rx_conf |= 0x20;
- ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x0a,
- priv->bb_rx_conf);
-
-end:
- return ret;
+ return vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x0a,
+ priv->bb_rx_conf);
}
-void vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data)
+int vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data)
{
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xE7, data);
+ int ret;
+
+ ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xE7, data);
+ if (ret)
+ return ret;
/* patch for 3253B0 Baseband with Cardbus module */
if (priv->short_slot_time)
@@ -529,7 +395,8 @@ void vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data)
else
priv->bb_rx_conf |= 0x20; /* 0010 0000 */
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x0a, priv->bb_rx_conf);
+ return vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x0a,
+ priv->bb_rx_conf);
}
/*
@@ -570,268 +437,57 @@ int vnt_exit_deep_sleep(struct vnt_private *priv)
return vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x0d, 0x01);
}
-void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning)
+int vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning)
{
- u8 cr_201 = 0x0, cr_206 = 0x0;
- u8 ed_inx = priv->bb_pre_ed_index;
+ const struct vnt_threshold *threshold = NULL;
+ u8 length;
+ u8 cr_201, cr_206;
+ u8 ed_inx;
+ int ret;
switch (priv->rf_type) {
case RF_AL2230:
case RF_AL2230S:
case RF_AIROHA7230:
- if (scanning) { /* Max sensitivity */
- ed_inx = 0;
- cr_206 = 0x30;
- break;
- }
-
- if (priv->bb_pre_ed_rssi <= 45) {
- ed_inx = 20;
- cr_201 = 0xff;
- } else if (priv->bb_pre_ed_rssi <= 46) {
- ed_inx = 19;
- cr_201 = 0x1a;
- } else if (priv->bb_pre_ed_rssi <= 47) {
- ed_inx = 18;
- cr_201 = 0x15;
- } else if (priv->bb_pre_ed_rssi <= 49) {
- ed_inx = 17;
- cr_201 = 0xe;
- } else if (priv->bb_pre_ed_rssi <= 51) {
- ed_inx = 16;
- cr_201 = 0x9;
- } else if (priv->bb_pre_ed_rssi <= 53) {
- ed_inx = 15;
- cr_201 = 0x6;
- } else if (priv->bb_pre_ed_rssi <= 55) {
- ed_inx = 14;
- cr_201 = 0x3;
- } else if (priv->bb_pre_ed_rssi <= 56) {
- ed_inx = 13;
- cr_201 = 0x2;
- cr_206 = 0xa0;
- } else if (priv->bb_pre_ed_rssi <= 57) {
- ed_inx = 12;
- cr_201 = 0x2;
- cr_206 = 0x20;
- } else if (priv->bb_pre_ed_rssi <= 58) {
- ed_inx = 11;
- cr_201 = 0x1;
- cr_206 = 0xa0;
- } else if (priv->bb_pre_ed_rssi <= 59) {
- ed_inx = 10;
- cr_201 = 0x1;
- cr_206 = 0x54;
- } else if (priv->bb_pre_ed_rssi <= 60) {
- ed_inx = 9;
- cr_201 = 0x1;
- cr_206 = 0x18;
- } else if (priv->bb_pre_ed_rssi <= 61) {
- ed_inx = 8;
- cr_206 = 0xe3;
- } else if (priv->bb_pre_ed_rssi <= 62) {
- ed_inx = 7;
- cr_206 = 0xb9;
- } else if (priv->bb_pre_ed_rssi <= 63) {
- ed_inx = 6;
- cr_206 = 0x93;
- } else if (priv->bb_pre_ed_rssi <= 64) {
- ed_inx = 5;
- cr_206 = 0x79;
- } else if (priv->bb_pre_ed_rssi <= 65) {
- ed_inx = 4;
- cr_206 = 0x62;
- } else if (priv->bb_pre_ed_rssi <= 66) {
- ed_inx = 3;
- cr_206 = 0x51;
- } else if (priv->bb_pre_ed_rssi <= 67) {
- ed_inx = 2;
- cr_206 = 0x43;
- } else if (priv->bb_pre_ed_rssi <= 68) {
- ed_inx = 1;
- cr_206 = 0x36;
- } else {
- ed_inx = 0;
- cr_206 = 0x30;
- }
+ threshold = al2230_vnt_threshold;
+ length = ARRAY_SIZE(al2230_vnt_threshold);
break;
case RF_VT3226:
case RF_VT3226D0:
- if (scanning) { /* Max sensitivity */
- ed_inx = 0;
- cr_206 = 0x24;
- break;
- }
-
- if (priv->bb_pre_ed_rssi <= 41) {
- ed_inx = 22;
- cr_201 = 0xff;
- } else if (priv->bb_pre_ed_rssi <= 42) {
- ed_inx = 21;
- cr_201 = 0x36;
- } else if (priv->bb_pre_ed_rssi <= 43) {
- ed_inx = 20;
- cr_201 = 0x26;
- } else if (priv->bb_pre_ed_rssi <= 45) {
- ed_inx = 19;
- cr_201 = 0x18;
- } else if (priv->bb_pre_ed_rssi <= 47) {
- ed_inx = 18;
- cr_201 = 0x11;
- } else if (priv->bb_pre_ed_rssi <= 49) {
- ed_inx = 17;
- cr_201 = 0xa;
- } else if (priv->bb_pre_ed_rssi <= 51) {
- ed_inx = 16;
- cr_201 = 0x7;
- } else if (priv->bb_pre_ed_rssi <= 53) {
- ed_inx = 15;
- cr_201 = 0x4;
- } else if (priv->bb_pre_ed_rssi <= 55) {
- ed_inx = 14;
- cr_201 = 0x2;
- cr_206 = 0xc0;
- } else if (priv->bb_pre_ed_rssi <= 56) {
- ed_inx = 13;
- cr_201 = 0x2;
- cr_206 = 0x30;
- } else if (priv->bb_pre_ed_rssi <= 57) {
- ed_inx = 12;
- cr_201 = 0x1;
- cr_206 = 0xb0;
- } else if (priv->bb_pre_ed_rssi <= 58) {
- ed_inx = 11;
- cr_201 = 0x1;
- cr_206 = 0x70;
- } else if (priv->bb_pre_ed_rssi <= 59) {
- ed_inx = 10;
- cr_201 = 0x1;
- cr_206 = 0x30;
- } else if (priv->bb_pre_ed_rssi <= 60) {
- ed_inx = 9;
- cr_206 = 0xea;
- } else if (priv->bb_pre_ed_rssi <= 61) {
- ed_inx = 8;
- cr_206 = 0xc0;
- } else if (priv->bb_pre_ed_rssi <= 62) {
- ed_inx = 7;
- cr_206 = 0x9c;
- } else if (priv->bb_pre_ed_rssi <= 63) {
- ed_inx = 6;
- cr_206 = 0x80;
- } else if (priv->bb_pre_ed_rssi <= 64) {
- ed_inx = 5;
- cr_206 = 0x68;
- } else if (priv->bb_pre_ed_rssi <= 65) {
- ed_inx = 4;
- cr_206 = 0x52;
- } else if (priv->bb_pre_ed_rssi <= 66) {
- ed_inx = 3;
- cr_206 = 0x43;
- } else if (priv->bb_pre_ed_rssi <= 67) {
- ed_inx = 2;
- cr_206 = 0x36;
- } else if (priv->bb_pre_ed_rssi <= 68) {
- ed_inx = 1;
- cr_206 = 0x2d;
- } else {
- ed_inx = 0;
- cr_206 = 0x24;
- }
+ threshold = vt3226_vnt_threshold;
+ length = ARRAY_SIZE(vt3226_vnt_threshold);
break;
case RF_VT3342A0:
- if (scanning) { /* need Max sensitivity */
- ed_inx = 0;
- cr_206 = 0x38;
- break;
- }
-
- if (priv->bb_pre_ed_rssi <= 41) {
- ed_inx = 20;
- cr_201 = 0xff;
- } else if (priv->bb_pre_ed_rssi <= 42) {
- ed_inx = 19;
- cr_201 = 0x36;
- } else if (priv->bb_pre_ed_rssi <= 43) {
- ed_inx = 18;
- cr_201 = 0x26;
- } else if (priv->bb_pre_ed_rssi <= 45) {
- ed_inx = 17;
- cr_201 = 0x18;
- } else if (priv->bb_pre_ed_rssi <= 47) {
- ed_inx = 16;
- cr_201 = 0x11;
- } else if (priv->bb_pre_ed_rssi <= 49) {
- ed_inx = 15;
- cr_201 = 0xa;
- } else if (priv->bb_pre_ed_rssi <= 51) {
- ed_inx = 14;
- cr_201 = 0x7;
- } else if (priv->bb_pre_ed_rssi <= 53) {
- ed_inx = 13;
- cr_201 = 0x4;
- } else if (priv->bb_pre_ed_rssi <= 55) {
- ed_inx = 12;
- cr_201 = 0x2;
- cr_206 = 0xc0;
- } else if (priv->bb_pre_ed_rssi <= 56) {
- ed_inx = 11;
- cr_201 = 0x2;
- cr_206 = 0x30;
- } else if (priv->bb_pre_ed_rssi <= 57) {
- ed_inx = 10;
- cr_201 = 0x1;
- cr_206 = 0xb0;
- } else if (priv->bb_pre_ed_rssi <= 58) {
- ed_inx = 9;
- cr_201 = 0x1;
- cr_206 = 0x70;
- } else if (priv->bb_pre_ed_rssi <= 59) {
- ed_inx = 8;
- cr_201 = 0x1;
- cr_206 = 0x30;
- } else if (priv->bb_pre_ed_rssi <= 60) {
- ed_inx = 7;
- cr_206 = 0xea;
- } else if (priv->bb_pre_ed_rssi <= 61) {
- ed_inx = 6;
- cr_206 = 0xc0;
- } else if (priv->bb_pre_ed_rssi <= 62) {
- ed_inx = 5;
- cr_206 = 0x9c;
- } else if (priv->bb_pre_ed_rssi <= 63) {
- ed_inx = 4;
- cr_206 = 0x80;
- } else if (priv->bb_pre_ed_rssi <= 64) {
- ed_inx = 3;
- cr_206 = 0x68;
- } else if (priv->bb_pre_ed_rssi <= 65) {
- ed_inx = 2;
- cr_206 = 0x52;
- } else if (priv->bb_pre_ed_rssi <= 66) {
- ed_inx = 1;
- cr_206 = 0x43;
- } else {
- ed_inx = 0;
- cr_206 = 0x38;
- }
+ threshold = vt3342_vnt_threshold;
+ length = ARRAY_SIZE(vt3342_vnt_threshold);
break;
}
+ if (!threshold)
+ return -EINVAL;
+
+ for (ed_inx = scanning ? 0 : length - 1; ed_inx > 0; ed_inx--) {
+ if (priv->bb_pre_ed_rssi <= threshold[ed_inx].bb_pre_ed_rssi)
+ break;
+ }
+
+ cr_201 = threshold[ed_inx].cr_201;
+ cr_206 = threshold[ed_inx].cr_206;
+
if (ed_inx == priv->bb_pre_ed_index && !scanning)
- return;
+ return 0;
priv->bb_pre_ed_index = ed_inx;
dev_dbg(&priv->usb->dev, "%s bb_pre_ed_rssi %d\n",
__func__, priv->bb_pre_ed_rssi);
- if (!cr_201 && !cr_206)
- return;
+ ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xc9, cr_201);
+ if (ret)
+ return ret;
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xc9, cr_201);
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xce, cr_206);
+ return vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0xce, cr_206);
}
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index dc42aa6ae1d9..12456ebc23ec 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -66,25 +66,12 @@
#define TOP_RATE_2M 0x00200000
#define TOP_RATE_1M 0x00100000
-/* Length, Service, and Signal fields of Phy for Tx */
-struct vnt_phy_field {
- u8 signal;
- u8 service;
- __le16 len;
-} __packed;
-
-unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
- unsigned int frame_length, u16 tx_rate);
-
-void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
- u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy);
-
int vnt_set_short_slot_time(struct vnt_private *priv);
-void vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data);
+int vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data);
int vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode);
int vnt_vt3184_init(struct vnt_private *priv);
int vnt_set_deep_sleep(struct vnt_private *priv);
int vnt_exit_deep_sleep(struct vnt_private *priv);
-void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning);
+int vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning);
#endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index dc3ab10eb630..10f3dfda83b5 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -26,7 +26,8 @@
*
*/
-#include <linux/bits.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
#include "device.h"
#include "card.h"
#include "baseband.h"
@@ -45,20 +46,12 @@ static const u16 cw_rxbcntsf_off[MAX_RATE] = {
192, 96, 34, 17, 34, 23, 17, 11, 8, 5, 4, 3
};
-/*
- * Description: Set NIC media channel
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * connection_channel - Channel to be set
- * Out:
- * none
- */
-void vnt_set_channel(struct vnt_private *priv, u32 connection_channel)
+int vnt_set_channel(struct vnt_private *priv, u32 connection_channel)
{
+ int ret;
+
if (connection_channel > CB_MAX_CHANNEL || !connection_channel)
- return;
+ return -EINVAL;
/* clear NAV */
vnt_mac_reg_bits_on(priv, MAC_REG_MACCR, MACCR_CLRNAV);
@@ -67,284 +60,73 @@ void vnt_set_channel(struct vnt_private *priv, u32 connection_channel)
vnt_mac_reg_bits_off(priv, MAC_REG_CHANNEL,
(BIT(7) | BIT(5) | BIT(4)));
- vnt_control_out(priv, MESSAGE_TYPE_SELECT_CHANNEL,
- connection_channel, 0, 0, NULL);
-
- vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG, MAC_REG_CHANNEL,
- (u8)(connection_channel | 0x80));
-}
-
-/*
- * Description: Get CCK mode basic rate
- *
- * Parameters:
- * In:
- * priv - The adapter to be set
- * rate_idx - Receiving data rate
- * Out:
- * none
- *
- * Return Value: response Control frame rate
- *
- */
-static u16 vnt_get_cck_rate(struct vnt_private *priv, u16 rate_idx)
-{
- u16 ui = rate_idx;
-
- while (ui > RATE_1M) {
- if (priv->basic_rates & (1 << ui))
- return ui;
- ui--;
- }
+ ret = vnt_control_out(priv, MESSAGE_TYPE_SELECT_CHANNEL,
+ connection_channel, 0, 0, NULL);
+ if (ret)
+ return ret;
- return RATE_1M;
+ return vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG, MAC_REG_CHANNEL,
+ (u8)(connection_channel | 0x80));
}
-/*
- * Description: Get OFDM mode basic rate
- *
- * Parameters:
- * In:
- * priv - The adapter to be set
- * rate_idx - Receiving data rate
- * Out:
- * none
- *
- * Return Value: response Control frame rate
- *
- */
-static u16 vnt_get_ofdm_rate(struct vnt_private *priv, u16 rate_idx)
-{
- u16 ui = rate_idx;
-
- dev_dbg(&priv->usb->dev, "%s basic rate: %d\n",
- __func__, priv->basic_rates);
-
- if (!vnt_ofdm_min_rate(priv)) {
- dev_dbg(&priv->usb->dev, "%s (NO OFDM) %d\n",
- __func__, rate_idx);
- if (rate_idx > RATE_24M)
- rate_idx = RATE_24M;
- return rate_idx;
- }
-
- while (ui > RATE_11M) {
- if (priv->basic_rates & (1 << ui)) {
- dev_dbg(&priv->usb->dev, "%s rate: %d\n",
- __func__, ui);
- return ui;
- }
- ui--;
- }
-
- dev_dbg(&priv->usb->dev, "%s basic rate: 24M\n", __func__);
+static const u8 vnt_rspinf_b_short_table[] = {
+ 0x70, 0x00, 0x00, 0x00, 0x38, 0x00, 0x09, 0x00,
+ 0x15, 0x00, 0x0a, 0x00, 0x0b, 0x00, 0x0b, 0x80
+};
- return RATE_24M;
-}
+static const u8 vnt_rspinf_b_long_table[] = {
+ 0x70, 0x00, 0x00, 0x00, 0x38, 0x00, 0x01, 0x00,
+ 0x15, 0x00, 0x02, 0x00, 0x0b, 0x00, 0x03, 0x80
+};
-/*
- * Description: Calculate TxRate and RsvTime fields for RSPINF in OFDM mode.
- *
- * Parameters:
- * In:
- * rate - Tx Rate
- * bb_type - Tx Packet type
- * Out:
- * tx_rate - pointer to RSPINF TxRate field
- * rsv_time- pointer to RSPINF RsvTime field
- *
- * Return Value: none
- *
- */
-static void vnt_calculate_ofdm_rate(u16 rate, u8 bb_type,
- u8 *tx_rate, u8 *rsv_time)
-{
- switch (rate) {
- case RATE_6M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9b;
- *rsv_time = 24;
- } else {
- *tx_rate = 0x8b;
- *rsv_time = 30;
- }
- break;
- case RATE_9M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9f;
- *rsv_time = 16;
- } else {
- *tx_rate = 0x8f;
- *rsv_time = 22;
- }
- break;
- case RATE_12M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9a;
- *rsv_time = 12;
- } else {
- *tx_rate = 0x8a;
- *rsv_time = 18;
- }
- break;
- case RATE_18M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9e;
- *rsv_time = 8;
- } else {
- *tx_rate = 0x8e;
- *rsv_time = 14;
- }
- break;
- case RATE_36M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9d;
- *rsv_time = 4;
- } else {
- *tx_rate = 0x8d;
- *rsv_time = 10;
- }
- break;
- case RATE_48M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x98;
- *rsv_time = 4;
- } else {
- *tx_rate = 0x88;
- *rsv_time = 10;
- }
- break;
- case RATE_54M:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x9c;
- *rsv_time = 4;
- } else {
- *tx_rate = 0x8c;
- *rsv_time = 10;
- }
- break;
- case RATE_24M:
- default:
- if (bb_type == BB_TYPE_11A) {
- *tx_rate = 0x99;
- *rsv_time = 8;
- } else {
- *tx_rate = 0x89;
- *rsv_time = 14;
- }
- break;
- }
-}
+static const u8 vnt_rspinf_a_table[] = {
+ 0x9b, 0x18, 0x9f, 0x10, 0x9a, 0x0a, 0x9e, 0x08, 0x99,
+ 0x08, 0x9d, 0x04, 0x98, 0x04, 0x9c, 0x04, 0x9c, 0x04
+};
-/*
- * Description: Set RSPINF
- *
- * Parameters:
- * In:
- * pDevice - The adapter to be set
- * Out:
- * none
- *
- * Return Value: None.
- *
- */
+static const u8 vnt_rspinf_gb_table[] = {
+ 0x8b, 0x1e, 0x8f, 0x16, 0x8a, 0x12, 0x8e, 0x0e, 0x89,
+ 0x0e, 0x8d, 0x0a, 0x88, 0x0a, 0x8c, 0x0a, 0x8c, 0x0a
+};
-void vnt_set_rspinf(struct vnt_private *priv, u8 bb_type)
+int vnt_set_rspinf(struct vnt_private *priv, u8 bb_type)
{
- struct vnt_phy_field phy[4];
- u8 tx_rate[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; /* For OFDM */
- u8 rsv_time[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
- u8 data[34];
- int i;
-
- /*RSPINF_b_1*/
- vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_1M),
- PK_TYPE_11B, &phy[0]);
-
- /*RSPINF_b_2*/
- vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_2M),
- PK_TYPE_11B, &phy[1]);
-
- /*RSPINF_b_5*/
- vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_5M),
- PK_TYPE_11B, &phy[2]);
-
- /*RSPINF_b_11*/
- vnt_get_phy_field(priv, 14, vnt_get_cck_rate(priv, RATE_11M),
- PK_TYPE_11B, &phy[3]);
-
- /*RSPINF_a_6*/
- vnt_calculate_ofdm_rate(RATE_6M, bb_type, &tx_rate[0], &rsv_time[0]);
-
- /*RSPINF_a_9*/
- vnt_calculate_ofdm_rate(RATE_9M, bb_type, &tx_rate[1], &rsv_time[1]);
+ const u8 *data;
+ u16 len;
+ int ret;
- /*RSPINF_a_12*/
- vnt_calculate_ofdm_rate(RATE_12M, bb_type, &tx_rate[2], &rsv_time[2]);
-
- /*RSPINF_a_18*/
- vnt_calculate_ofdm_rate(RATE_18M, bb_type, &tx_rate[3], &rsv_time[3]);
-
- /*RSPINF_a_24*/
- vnt_calculate_ofdm_rate(RATE_24M, bb_type, &tx_rate[4], &rsv_time[4]);
-
- /*RSPINF_a_36*/
- vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_36M),
- bb_type, &tx_rate[5], &rsv_time[5]);
-
- /*RSPINF_a_48*/
- vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_48M),
- bb_type, &tx_rate[6], &rsv_time[6]);
-
- /*RSPINF_a_54*/
- vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_54M),
- bb_type, &tx_rate[7], &rsv_time[7]);
-
- /*RSPINF_a_72*/
- vnt_calculate_ofdm_rate(vnt_get_ofdm_rate(priv, RATE_54M),
- bb_type, &tx_rate[8], &rsv_time[8]);
-
- put_unaligned(phy[0].len, (u16 *)&data[0]);
- data[2] = phy[0].signal;
- data[3] = phy[0].service;
-
- put_unaligned(phy[1].len, (u16 *)&data[4]);
- data[6] = phy[1].signal;
- data[7] = phy[1].service;
-
- put_unaligned(phy[2].len, (u16 *)&data[8]);
- data[10] = phy[2].signal;
- data[11] = phy[2].service;
+ if (priv->preamble_type) {
+ data = vnt_rspinf_b_short_table;
+ len = ARRAY_SIZE(vnt_rspinf_b_short_table);
+ } else {
+ data = vnt_rspinf_b_long_table;
+ len = ARRAY_SIZE(vnt_rspinf_b_long_table);
+ }
- put_unaligned(phy[3].len, (u16 *)&data[12]);
- data[14] = phy[3].signal;
- data[15] = phy[3].service;
+ /* RSPINF_b_1 to RSPINF_b_11 */
+ ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_RSPINF_B_1,
+ MESSAGE_REQUEST_MACREG, len, data);
+ if (ret)
+ return ret;
- for (i = 0; i < 9; i++) {
- data[16 + i * 2] = tx_rate[i];
- data[16 + i * 2 + 1] = rsv_time[i];
+ if (bb_type == BB_TYPE_11A) {
+ data = vnt_rspinf_a_table;
+ len = ARRAY_SIZE(vnt_rspinf_a_table);
+ } else {
+ data = vnt_rspinf_gb_table;
+ len = ARRAY_SIZE(vnt_rspinf_gb_table);
}
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_RSPINF_B_1,
- MESSAGE_REQUEST_MACREG, 34, &data[0]);
+ /* RSPINF_a_6 to RSPINF_a_72 */
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_RSPINF_A_6,
+ MESSAGE_REQUEST_MACREG, len, data);
}
-/*
- * Description: Update IFS
- *
- * Parameters:
- * In:
- * priv - The adapter to be set
- * Out:
- * none
- *
- * Return Value: None.
- *
- */
-void vnt_update_ifs(struct vnt_private *priv)
+int vnt_update_ifs(struct vnt_private *priv)
{
u8 max_min = 0;
u8 data[4];
+ int ret;
if (priv->packet_type == PK_TYPE_11A) {
priv->slot = C_SLOT_SHORT;
@@ -367,89 +149,36 @@ void vnt_update_ifs(struct vnt_private *priv)
priv->eifs = C_EIFS;
- switch (priv->rf_type) {
- case RF_VT3226D0:
- if (priv->bb_type != BB_TYPE_11B) {
- priv->sifs -= 1;
- priv->difs -= 1;
- break;
- }
- /* fall through */
- case RF_AIROHA7230:
- case RF_AL2230:
- case RF_AL2230S:
- if (priv->bb_type != BB_TYPE_11B)
- break;
- /* fall through */
- case RF_RFMD2959:
- case RF_VT3226:
- case RF_VT3342A0:
- priv->sifs -= 3;
- priv->difs -= 3;
- break;
- case RF_MAXIM2829:
- if (priv->bb_type == BB_TYPE_11A) {
- priv->sifs -= 5;
- priv->difs -= 5;
- } else {
- priv->sifs -= 2;
- priv->difs -= 2;
- }
-
- break;
- }
-
data[0] = (u8)priv->sifs;
data[1] = (u8)priv->difs;
data[2] = (u8)priv->eifs;
data[3] = (u8)priv->slot;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_SIFS,
- MESSAGE_REQUEST_MACREG, 4, &data[0]);
+ ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_SIFS,
+ MESSAGE_REQUEST_MACREG, 4, &data[0]);
+ if (ret)
+ return ret;
max_min |= 0xa0;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_CWMAXMIN0,
- MESSAGE_REQUEST_MACREG, 1, &max_min);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_CWMAXMIN0,
+ MESSAGE_REQUEST_MACREG, 1, &max_min);
}
void vnt_update_top_rates(struct vnt_private *priv)
{
- u8 top_ofdm = RATE_24M, top_cck = RATE_1M;
- u8 i;
-
- /*Determines the highest basic rate.*/
- for (i = RATE_54M; i >= RATE_6M; i--) {
- if (priv->basic_rates & (u16)(1 << i)) {
- top_ofdm = i;
- break;
- }
- }
-
- priv->top_ofdm_basic_rate = top_ofdm;
+ int pos;
- for (i = RATE_11M;; i--) {
- if (priv->basic_rates & (u16)(1 << i)) {
- top_cck = i;
- break;
- }
- if (i == RATE_1M)
- break;
- }
+ pos = fls(priv->basic_rates & GENMASK(RATE_54M, RATE_6M));
+ priv->top_ofdm_basic_rate = pos ? (pos - 1) : RATE_24M;
- priv->top_cck_basic_rate = top_cck;
+ pos = fls(priv->basic_rates & GENMASK(RATE_11M, RATE_1M));
+ priv->top_cck_basic_rate = pos ? (pos - 1) : RATE_1M;
}
-int vnt_ofdm_min_rate(struct vnt_private *priv)
+bool vnt_ofdm_min_rate(struct vnt_private *priv)
{
- int ii;
-
- for (ii = RATE_54M; ii >= RATE_6M; ii--) {
- if ((priv->basic_rates) & ((u16)BIT(ii)))
- return true;
- }
-
- return false;
+ return priv->basic_rates & GENMASK(RATE_54M, RATE_6M) ? true : false;
}
u8 vnt_get_pkt_type(struct vnt_private *priv)
@@ -481,23 +210,8 @@ u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2)
return tsf1 - tsf2 - (u64)cw_rxbcntsf_off[rx_rate % MAX_RATE];
}
-/*
- * Description: Sync. TSF counter to BSS
- * Get TSF offset and write to HW
- *
- * Parameters:
- * In:
- * priv - The adapter to be sync.
- * time_stamp - Rx BCN's TSF
- * local_tsf - Local TSF
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
- u64 time_stamp, u64 local_tsf)
+int vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
+ u64 time_stamp, u64 local_tsf)
{
u64 tsf_offset = 0;
u8 data[8];
@@ -513,8 +227,8 @@ void vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
data[6] = (u8)(tsf_offset >> 48);
data[7] = (u8)(tsf_offset >> 56);
- vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
- MESSAGE_REQUEST_TSF, 0, 8, data);
+ return vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
+ MESSAGE_REQUEST_TSF, 0, 8, data);
}
/*
@@ -589,21 +303,7 @@ u64 vnt_get_next_tbtt(u64 tsf, u16 beacon_interval)
return tsf;
}
-/*
- * Description: Set NIC TSF counter for first Beacon time
- * Get NEXTTBTT from adjusted TSF and Beacon Interval
- *
- * Parameters:
- * In:
- * dwIoBase - IO Base
- * beacon_interval - Beacon Interval
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval)
+int vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval)
{
u64 next_tbtt = 0;
u8 data[8];
@@ -621,29 +321,15 @@ void vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval)
data[6] = (u8)(next_tbtt >> 48);
data[7] = (u8)(next_tbtt >> 56);
- vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
- MESSAGE_REQUEST_TBTT, 0, 8, data);
+ return vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
+ MESSAGE_REQUEST_TBTT, 0, 8, data);
}
-/*
- * Description: Sync NIC TSF counter for Beacon time
- * Get NEXTTBTT and write to HW
- *
- * Parameters:
- * In:
- * priv - The adapter to be set
- * tsf - Current TSF counter
- * beacon_interval - Beacon Interval
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_update_next_tbtt(struct vnt_private *priv, u64 tsf,
- u16 beacon_interval)
+int vnt_update_next_tbtt(struct vnt_private *priv, u64 tsf,
+ u16 beacon_interval)
{
u8 data[8];
+ int ret;
tsf = vnt_get_next_tbtt(tsf, beacon_interval);
@@ -656,10 +342,13 @@ void vnt_update_next_tbtt(struct vnt_private *priv, u64 tsf,
data[6] = (u8)(tsf >> 48);
data[7] = (u8)(tsf >> 56);
- vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
- MESSAGE_REQUEST_TBTT, 0, 8, data);
+ ret = vnt_control_out(priv, MESSAGE_TYPE_SET_TSFTBTT,
+ MESSAGE_REQUEST_TBTT, 0, 8, data);
+ if (ret)
+ return ret;
dev_dbg(&priv->usb->dev, "%s TBTT: %8llx\n", __func__, tsf);
+ return 0;
}
/*
@@ -723,9 +412,13 @@ int vnt_radio_power_on(struct vnt_private *priv)
{
int ret = 0;
- vnt_exit_deep_sleep(priv);
+ ret = vnt_exit_deep_sleep(priv);
+ if (ret)
+ return ret;
- vnt_mac_reg_bits_on(priv, MAC_REG_HOSTCR, HOSTCR_RXON);
+ ret = vnt_mac_reg_bits_on(priv, MAC_REG_HOSTCR, HOSTCR_RXON);
+ if (ret)
+ return ret;
switch (priv->rf_type) {
case RF_AL2230:
@@ -734,56 +427,69 @@ int vnt_radio_power_on(struct vnt_private *priv)
case RF_VT3226:
case RF_VT3226D0:
case RF_VT3342A0:
- vnt_mac_reg_bits_on(priv, MAC_REG_SOFTPWRCTL,
- (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
- break;
+ ret = vnt_mac_reg_bits_on(priv, MAC_REG_SOFTPWRCTL,
+ (SOFTPWRCTL_SWPE2 |
+ SOFTPWRCTL_SWPE3));
+ if (ret)
+ return ret;
}
- vnt_mac_reg_bits_off(priv, MAC_REG_GPIOCTL1, GPIO3_INTMD);
-
- return ret;
+ return vnt_mac_reg_bits_off(priv, MAC_REG_GPIOCTL1, GPIO3_INTMD);
}
-void vnt_set_bss_mode(struct vnt_private *priv)
+int vnt_set_bss_mode(struct vnt_private *priv)
{
- if (priv->rf_type == RF_AIROHA7230 && priv->bb_type == BB_TYPE_11A)
- vnt_mac_set_bb_type(priv, BB_TYPE_11G);
- else
- vnt_mac_set_bb_type(priv, priv->bb_type);
+ int ret;
+ unsigned char type = priv->bb_type;
+ unsigned char data = 0;
+ unsigned char bb_vga_0 = 0x1c;
+ unsigned char bb_vga_2_3 = 0x00;
- priv->packet_type = vnt_get_pkt_type(priv);
+ if (priv->rf_type == RF_AIROHA7230 && priv->bb_type == BB_TYPE_11A)
+ type = BB_TYPE_11G;
- if (priv->bb_type == BB_TYPE_11A)
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x88, 0x03);
- else if (priv->bb_type == BB_TYPE_11B)
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x88, 0x02);
- else if (priv->bb_type == BB_TYPE_11G)
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG, 0x88, 0x08);
+ ret = vnt_mac_set_bb_type(priv, type);
+ if (ret)
+ return ret;
- vnt_update_ifs(priv);
- vnt_set_rspinf(priv, (u8)priv->bb_type);
+ priv->packet_type = vnt_get_pkt_type(priv);
if (priv->bb_type == BB_TYPE_11A) {
- if (priv->rf_type == RF_AIROHA7230) {
- priv->bb_vga[0] = 0x20;
+ data = 0x03;
+ bb_vga_0 = 0x20;
+ bb_vga_2_3 = 0x10;
+ } else if (priv->bb_type == BB_TYPE_11B) {
+ data = 0x02;
+ } else if (priv->bb_type == BB_TYPE_11G) {
+ data = 0x08;
+ }
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG,
- 0xe7, priv->bb_vga[0]);
- }
+ if (data) {
+ ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG,
+ 0x88, data);
+ if (ret)
+ return ret;
+ }
- priv->bb_vga[2] = 0x10;
- priv->bb_vga[3] = 0x10;
- } else {
- if (priv->rf_type == RF_AIROHA7230) {
- priv->bb_vga[0] = 0x1c;
+ ret = vnt_update_ifs(priv);
+ if (ret)
+ return ret;
- vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG,
- 0xe7, priv->bb_vga[0]);
- }
+ ret = vnt_set_rspinf(priv, priv->bb_type);
+ if (ret)
+ return ret;
- priv->bb_vga[2] = 0x0;
- priv->bb_vga[3] = 0x0;
+ if (priv->rf_type == RF_AIROHA7230) {
+ priv->bb_vga[0] = bb_vga_0;
+
+ ret = vnt_control_out_u8(priv, MESSAGE_REQUEST_BBREG,
+ 0xe7, priv->bb_vga[0]);
+ if (ret)
+ return ret;
}
- vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
+ priv->bb_vga[2] = bb_vga_2_3;
+ priv->bb_vga[3] = bb_vga_2_3;
+
+ return vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
}
diff --git a/drivers/staging/vt6656/card.h b/drivers/staging/vt6656/card.h
index 75cd340c0cce..a524fdc60ae3 100644
--- a/drivers/staging/vt6656/card.h
+++ b/drivers/staging/vt6656/card.h
@@ -25,23 +25,23 @@
struct vnt_private;
-void vnt_set_channel(struct vnt_private *priv, u32 connection_channel);
-void vnt_set_rspinf(struct vnt_private *priv, u8 bb_type);
-void vnt_update_ifs(struct vnt_private *priv);
+int vnt_set_channel(struct vnt_private *priv, u32 connection_channel);
+int vnt_set_rspinf(struct vnt_private *priv, u8 bb_type);
+int vnt_update_ifs(struct vnt_private *priv);
void vnt_update_top_rates(struct vnt_private *priv);
-int vnt_ofdm_min_rate(struct vnt_private *priv);
-void vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
- u64 time_stamp, u64 local_tsf);
+bool vnt_ofdm_min_rate(struct vnt_private *priv);
+int vnt_adjust_tsf(struct vnt_private *priv, u8 rx_rate,
+ u64 time_stamp, u64 local_tsf);
bool vnt_get_current_tsf(struct vnt_private *priv, u64 *current_tsf);
bool vnt_clear_current_tsf(struct vnt_private *priv);
-void vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval);
-void vnt_update_next_tbtt(struct vnt_private *priv, u64 tsf,
- u16 beacon_interval);
+int vnt_reset_next_tbtt(struct vnt_private *priv, u16 beacon_interval);
+int vnt_update_next_tbtt(struct vnt_private *priv, u64 tsf,
+ u16 beacon_interval);
u64 vnt_get_next_tbtt(u64 tsf, u16 beacon_interval);
u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2);
int vnt_radio_power_off(struct vnt_private *priv);
int vnt_radio_power_on(struct vnt_private *priv);
u8 vnt_get_pkt_type(struct vnt_private *priv);
-void vnt_set_bss_mode(struct vnt_private *priv);
+int vnt_set_bss_mode(struct vnt_private *priv);
#endif /* __CARD_H__ */
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index e6ee9411f080..947530fefe94 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -73,6 +73,10 @@
#define DEVICE_VERSION "mac80211"
+#define FIRMWARE_VERSION 0x133 /* version 1.51 */
+#define FIRMWARE_NAME "vntwusb.fw"
+#define FIRMWARE_CHUNK_SIZE 0x400
+
#define CONFIG_PATH "/etc/vntconfiguration.dat"
#define MAX_UINTS 8
@@ -202,8 +206,7 @@ struct vnt_rsp_card_init {
* Enum of context types for SendPacket
*/
enum {
- CONTEXT_DATA_PACKET = 1,
- CONTEXT_MGMT_PACKET,
+ CONTEXT_DATA_PACKET = 0,
CONTEXT_BEACON_PACKET
};
@@ -234,18 +237,14 @@ struct vnt_rcb {
struct vnt_usb_send_context {
void *priv;
struct sk_buff *skb;
- struct urb *urb;
- struct ieee80211_hdr *hdr;
- unsigned int buf_len;
+ void *tx_buffer;
u32 frame_len;
u16 tx_hdr_size;
u16 tx_rate;
u8 type;
u8 pkt_no;
u8 pkt_type;
- u8 need_ack;
bool in_use;
- unsigned char data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
};
/*
@@ -288,6 +287,7 @@ struct vnt_private {
/* Variables to track resources for the BULK Out Pipe */
struct vnt_usb_send_context *tx_context[CB_MAX_TX_DESC];
+ struct usb_anchor tx_submitted;
u32 num_tx_context;
/* Variables to track resources for the Interrupt In Pipe */
@@ -344,13 +344,9 @@ struct vnt_private {
u8 ofdm_pwr_tbl[14];
u8 ofdm_a_pwr_tbl[42];
- u16 current_rate;
u16 tx_rate_fb0;
u16 tx_rate_fb1;
- u8 short_retry_limit;
- u8 long_retry_limit;
-
enum nl80211_iftype op_mode;
int short_slot_time;
@@ -383,8 +379,6 @@ struct vnt_private {
u8 bb_pre_ed_rssi;
u8 bb_pre_ed_index;
- u16 wake_up_count;
-
/* command timer */
struct delayed_work run_command_work;
diff --git a/drivers/staging/vt6656/firmware.c b/drivers/staging/vt6656/firmware.c
deleted file mode 100644
index 70358d427211..000000000000
--- a/drivers/staging/vt6656/firmware.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * File: baseband.c
- *
- * Purpose: Implement functions to access baseband
- *
- * Author: Yiching Chen
- *
- * Date: May 20, 2004
- *
- * Functions:
- *
- * Revision History:
- *
- */
-
-#include <linux/compiler.h>
-#include "firmware.h"
-#include "usbpipe.h"
-
-#define FIRMWARE_VERSION 0x133 /* version 1.51 */
-#define FIRMWARE_NAME "vntwusb.fw"
-
-#define FIRMWARE_CHUNK_SIZE 0x400
-
-int vnt_download_firmware(struct vnt_private *priv)
-{
- struct device *dev = &priv->usb->dev;
- const struct firmware *fw;
- u16 length;
- int ii;
- int ret = 0;
-
- dev_dbg(dev, "---->Download firmware\n");
-
- ret = request_firmware(&fw, FIRMWARE_NAME, dev);
- if (ret) {
- dev_err(dev, "firmware file %s request failed (%d)\n",
- FIRMWARE_NAME, ret);
- goto end;
- }
-
- for (ii = 0; ii < fw->size; ii += FIRMWARE_CHUNK_SIZE) {
- length = min_t(int, fw->size - ii, FIRMWARE_CHUNK_SIZE);
-
- ret = vnt_control_out(priv, 0, 0x1200 + ii, 0x0000, length,
- fw->data + ii);
- if (ret)
- goto free_fw;
-
- dev_dbg(dev, "Download firmware...%d %zu\n", ii, fw->size);
- }
-
-free_fw:
- release_firmware(fw);
-end:
- return ret;
-}
-MODULE_FIRMWARE(FIRMWARE_NAME);
-
-int vnt_firmware_branch_to_sram(struct vnt_private *priv)
-{
- dev_dbg(&priv->usb->dev, "---->Branch to Sram\n");
-
- return vnt_control_out(priv, 1, 0x1200, 0x0000, 0, NULL);
-}
-
-int vnt_check_firmware_version(struct vnt_private *priv)
-{
- int ret = 0;
-
- ret = vnt_control_in(priv, MESSAGE_TYPE_READ, 0,
- MESSAGE_REQUEST_VERSION, 2,
- (u8 *)&priv->firmware_version);
- if (ret) {
- dev_dbg(&priv->usb->dev,
- "Could not get firmware version: %d.\n", ret);
- goto end;
- }
-
- dev_dbg(&priv->usb->dev, "Firmware Version [%04x]\n",
- priv->firmware_version);
-
- if (priv->firmware_version == 0xFFFF) {
- dev_dbg(&priv->usb->dev, "In Loader.\n");
- ret = -EINVAL;
- goto end;
- }
-
- if (priv->firmware_version < FIRMWARE_VERSION) {
- /* branch to loader for download new firmware */
- ret = vnt_firmware_branch_to_sram(priv);
- if (ret) {
- dev_dbg(&priv->usb->dev,
- "Could not branch to SRAM: %d.\n", ret);
- } else {
- ret = -EINVAL;
- }
- }
-
-end:
- return ret;
-}
diff --git a/drivers/staging/vt6656/firmware.h b/drivers/staging/vt6656/firmware.h
deleted file mode 100644
index 161126faf396..000000000000
--- a/drivers/staging/vt6656/firmware.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
- * All rights reserved.
- *
- * File: firmware.h
- *
- * Purpose: Version and Release Information
- *
- * Author: Yiching Chen
- *
- * Date: May 20, 2004
- *
- */
-
-#ifndef __FIRMWARE_H__
-#define __FIRMWARE_H__
-
-#include "device.h"
-
-int vnt_download_firmware(struct vnt_private *priv);
-int vnt_firmware_branch_to_sram(struct vnt_private *priv);
-int vnt_check_firmware_version(struct vnt_private *priv);
-
-#endif /* __FIRMWARE_H__ */
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index ac3b188984d0..c66cb53cfc09 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -35,7 +35,7 @@ int vnt_key_init_table(struct vnt_private *priv)
static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
struct ieee80211_key_conf *key, u32 key_type,
- u32 mode, bool onfly_latch)
+ u32 mode)
{
struct vnt_private *priv = hw->priv;
u8 broadcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -68,17 +68,11 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
entry = MAX_KEY_TABLE - 1;
key->hw_key_idx = entry;
/* fall through */
- case VNT_KEY_ALLGROUP:
- key_mode |= VNT_KEY_ALLGROUP;
- if (onfly_latch)
- key_mode |= VNT_KEY_ONFLY_ALL;
- /* fall through */
case VNT_KEY_GROUP_ADDRESS:
- key_mode |= mode;
- /* fall through */
+ key_mode = mode | (mode << 4);
+ break;
case VNT_KEY_GROUP:
- key_mode |= (mode << 4);
- key_mode |= VNT_KEY_GROUP;
+ key_mode = mode << 4;
break;
case VNT_KEY_PAIRWISE:
key_mode |= mode;
@@ -88,8 +82,7 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
return -EINVAL;
}
- if (onfly_latch)
- key_mode |= VNT_KEY_ONFLY;
+ key_mode |= key_type;
if (mode == KEY_CTL_WEP) {
if (key->keylen == WLAN_KEY_LEN_WEP40)
@@ -98,9 +91,8 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
key->key[15] |= 0x80;
}
- vnt_mac_set_keyentry(priv, key_mode, entry, key_inx, bssid, key->key);
-
- return 0;
+ return vnt_mac_set_keyentry(priv, key_mode, entry,
+ key_inx, bssid, key->key);
}
int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
@@ -109,28 +101,21 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
struct vnt_private *priv = hw->priv;
u8 *mac_addr = NULL;
u8 key_dec_mode = 0;
- int ret = 0, u;
if (sta)
mac_addr = &sta->addr[0];
switch (key->cipher) {
- case 0:
- for (u = 0 ; u < MAX_KEY_TABLE; u++)
- vnt_mac_disable_keyentry(priv, u);
- return ret;
-
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- for (u = 0; u < MAX_KEY_TABLE; u++)
- vnt_mac_disable_keyentry(priv, u);
-
vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY,
- KEY_CTL_WEP, true);
+ KEY_CTL_WEP);
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- return ret;
+ return vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY,
+ KEY_CTL_WEP);
+
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -151,11 +136,9 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
- vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE,
- key_dec_mode, true);
- else
- vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS,
- key_dec_mode, true);
+ return vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE,
+ key_dec_mode);
- return 0;
+ return vnt_set_keymode(hw, mac_addr, key,
+ VNT_KEY_GROUP_ADDRESS, key_dec_mode);
}
diff --git a/drivers/staging/vt6656/key.h b/drivers/staging/vt6656/key.h
index 918c07cf86cd..1f3449e66143 100644
--- a/drivers/staging/vt6656/key.h
+++ b/drivers/staging/vt6656/key.h
@@ -25,13 +25,14 @@
#define KEY_CTL_TKIP 0x02
#define KEY_CTL_CCMP 0x03
-#define VNT_KEY_DEFAULTKEY 0x1
-#define VNT_KEY_GROUP_ADDRESS 0x2
-#define VNT_KEY_ALLGROUP 0x4
-#define VNT_KEY_GROUP 0x40
-#define VNT_KEY_PAIRWISE 0x00
-#define VNT_KEY_ONFLY 0x8000
#define VNT_KEY_ONFLY_ALL 0x4000
+#define VNT_KEY_ONFLY 0x8000
+#define VNT_KEY_ALLGROUP 0x04
+#define VNT_KEY_GROUP 0x40
+#define VNT_KEY_PAIRWISE VNT_KEY_ONFLY
+#define VNT_KEY_GROUP_ADDRESS (VNT_KEY_ALLGROUP | VNT_KEY_GROUP)
+#define VNT_KEY_DEFAULTKEY (VNT_KEY_GROUP_ADDRESS | VNT_KEY_ONFLY |\
+ VNT_KEY_ONFLY_ALL)
int vnt_key_init_table(struct vnt_private *priv);
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 5cacf6e60e90..da7067c34643 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -22,90 +22,40 @@
#include "mac.h"
#include "usbpipe.h"
-/*
- * Description:
- * Write MAC Multicast Address Mask
- *
- * Parameters:
- * In:
- * mc_filter (mac filter)
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_mac_set_filter(struct vnt_private *priv, u64 mc_filter)
+int vnt_mac_set_filter(struct vnt_private *priv, u64 mc_filter)
{
__le64 le_mc = cpu_to_le64(mc_filter);
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_MAR0,
- MESSAGE_REQUEST_MACREG, sizeof(le_mc), (u8 *)&le_mc);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_MAR0,
+ MESSAGE_REQUEST_MACREG, sizeof(le_mc),
+ (u8 *)&le_mc);
}
-/*
- * Description:
- * Shut Down MAC
- *
- * Parameters:
- * In:
- * Out:
- * none
- *
- *
- */
-void vnt_mac_shutdown(struct vnt_private *priv)
+int vnt_mac_shutdown(struct vnt_private *priv)
{
- vnt_control_out(priv, MESSAGE_TYPE_MACSHUTDOWN, 0, 0, 0, NULL);
+ return vnt_control_out(priv, MESSAGE_TYPE_MACSHUTDOWN, 0, 0, 0, NULL);
}
-void vnt_mac_set_bb_type(struct vnt_private *priv, u8 type)
+int vnt_mac_set_bb_type(struct vnt_private *priv, u8 type)
{
u8 data[2];
data[0] = type;
data[1] = EnCFG_BBType_MASK;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data),
+ data);
}
-/*
- * Description:
- * Disable the Key Entry by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_mac_disable_keyentry(struct vnt_private *priv, u8 entry_idx)
+int vnt_mac_disable_keyentry(struct vnt_private *priv, u8 entry_idx)
{
- vnt_control_out(priv, MESSAGE_TYPE_CLRKEYENTRY, 0, 0,
- sizeof(entry_idx), &entry_idx);
+ return vnt_control_out(priv, MESSAGE_TYPE_CLRKEYENTRY, 0, 0,
+ sizeof(entry_idx), &entry_idx);
}
-/*
- * Description:
- * Set the Key by MISCFIFO
- *
- * Parameters:
- * In:
- * dwIoBase - Base Address for MAC
- *
- * Out:
- * none
- *
- * Return Value: none
- *
- */
-void vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
- u32 key_idx, u8 *addr, u8 *key)
+int vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
+ u32 key_idx, u8 *addr, u8 *key)
{
struct vnt_mac_set_key set_key;
u16 offset;
@@ -124,9 +74,9 @@ void vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
dev_dbg(&priv->usb->dev, "offset %d key ctl %d set key %24ph\n",
offset, key_ctl, (u8 *)&set_key);
- vnt_control_out(priv, MESSAGE_TYPE_SETKEY, offset,
- (u16)key_idx, sizeof(struct vnt_mac_set_key),
- (u8 *)&set_key);
+ return vnt_control_out(priv, MESSAGE_TYPE_SETKEY, offset,
+ (u16)key_idx, sizeof(struct vnt_mac_set_key),
+ (u8 *)&set_key);
}
int vnt_mac_reg_bits_off(struct vnt_private *priv, u8 reg_ofs, u8 bits)
@@ -151,76 +101,76 @@ int vnt_mac_reg_bits_on(struct vnt_private *priv, u8 reg_ofs, u8 bits)
MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_write_word(struct vnt_private *priv, u8 reg_ofs, u16 word)
+int vnt_mac_write_word(struct vnt_private *priv, u8 reg_ofs, u16 word)
{
u8 data[2];
data[0] = (u8)(word & 0xff);
data[1] = (u8)(word >> 8);
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, reg_ofs,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, reg_ofs,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_set_bssid_addr(struct vnt_private *priv, u8 *addr)
+int vnt_mac_set_bssid_addr(struct vnt_private *priv, u8 *addr)
{
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_BSSID0,
- MESSAGE_REQUEST_MACREG, ETH_ALEN, addr);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_BSSID0,
+ MESSAGE_REQUEST_MACREG, ETH_ALEN, addr);
}
-void vnt_mac_enable_protect_mode(struct vnt_private *priv)
+int vnt_mac_enable_protect_mode(struct vnt_private *priv)
{
u8 data[2];
data[0] = EnCFG_ProtectMd;
data[1] = EnCFG_ProtectMd;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_disable_protect_mode(struct vnt_private *priv)
+int vnt_mac_disable_protect_mode(struct vnt_private *priv)
{
u8 data[2];
data[0] = 0;
data[1] = EnCFG_ProtectMd;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG0,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_enable_barker_preamble_mode(struct vnt_private *priv)
+int vnt_mac_enable_barker_preamble_mode(struct vnt_private *priv)
{
u8 data[2];
data[0] = EnCFG_BarkerPream;
data[1] = EnCFG_BarkerPream;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG2,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG2,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_disable_barker_preamble_mode(struct vnt_private *priv)
+int vnt_mac_disable_barker_preamble_mode(struct vnt_private *priv)
{
u8 data[2];
data[0] = 0;
data[1] = EnCFG_BarkerPream;
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG2,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_MASK, MAC_REG_ENCFG2,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
-void vnt_mac_set_beacon_interval(struct vnt_private *priv, u16 interval)
+int vnt_mac_set_beacon_interval(struct vnt_private *priv, u16 interval)
{
u8 data[2];
data[0] = (u8)(interval & 0xff);
data[1] = (u8)(interval >> 8);
- vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_BI,
- MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE, MAC_REG_BI,
+ MESSAGE_REQUEST_MACREG, ARRAY_SIZE(data), data);
}
int vnt_mac_set_led(struct vnt_private *priv, u8 state, u8 led)
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index c532b27de37f..dae70b5c7634 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -177,7 +177,7 @@
#define EnCFG_BBType_a 0x00
#define EnCFG_BBType_b BIT(0)
#define EnCFG_BBType_g BIT(1)
-#define EnCFG_BBType_MASK (BIT(0) | BIT(1))
+#define EnCFG_BBType_MASK (EnCFG_BBType_b | EnCFG_BBType_g)
#define EnCFG_ProtectMd BIT(5)
/* Bits in the EnhanceCFG_1 register */
@@ -355,21 +355,21 @@ struct vnt_mac_set_key {
u8 key[WLAN_KEY_LEN_CCMP];
} __packed;
-void vnt_mac_set_filter(struct vnt_private *priv, u64 mc_filter);
-void vnt_mac_shutdown(struct vnt_private *priv);
-void vnt_mac_set_bb_type(struct vnt_private *priv, u8 type);
-void vnt_mac_disable_keyentry(struct vnt_private *priv, u8 entry_idx);
-void vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
- u32 key_idx, u8 *addr, u8 *key);
+int vnt_mac_set_filter(struct vnt_private *priv, u64 mc_filter);
+int vnt_mac_shutdown(struct vnt_private *priv);
+int vnt_mac_set_bb_type(struct vnt_private *priv, u8 type);
+int vnt_mac_disable_keyentry(struct vnt_private *priv, u8 entry_idx);
+int vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
+ u32 key_idx, u8 *addr, u8 *key);
int vnt_mac_reg_bits_off(struct vnt_private *priv, u8 reg_ofs, u8 bits);
int vnt_mac_reg_bits_on(struct vnt_private *priv, u8 reg_ofs, u8 bits);
-void vnt_mac_write_word(struct vnt_private *priv, u8 reg_ofs, u16 word);
-void vnt_mac_set_bssid_addr(struct vnt_private *priv, u8 *addr);
-void vnt_mac_enable_protect_mode(struct vnt_private *priv);
-void vnt_mac_disable_protect_mode(struct vnt_private *priv);
-void vnt_mac_enable_barker_preamble_mode(struct vnt_private *priv);
-void vnt_mac_disable_barker_preamble_mode(struct vnt_private *priv);
-void vnt_mac_set_beacon_interval(struct vnt_private *priv, u16 interval);
+int vnt_mac_write_word(struct vnt_private *priv, u8 reg_ofs, u16 word);
+int vnt_mac_set_bssid_addr(struct vnt_private *priv, u8 *addr);
+int vnt_mac_enable_protect_mode(struct vnt_private *priv);
+int vnt_mac_disable_protect_mode(struct vnt_private *priv);
+int vnt_mac_enable_barker_preamble_mode(struct vnt_private *priv);
+int vnt_mac_disable_barker_preamble_mode(struct vnt_private *priv);
+int vnt_mac_set_beacon_interval(struct vnt_private *priv, u16 interval);
int vnt_mac_set_led(struct vnt_private *privpriv, u8 state, u8 led);
#endif /* __MAC_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 5f78cad3b647..8bf851c53f4e 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -33,7 +33,6 @@
#include "wcmd.h"
#include "rxtx.h"
#include "rf.h"
-#include "firmware.h"
#include "usbpipe.h"
#include "channel.h"
@@ -60,8 +59,6 @@ MODULE_PARM_DESC(tx_buffers, "Number of receive usb tx buffers");
#define RTS_THRESH_DEF 2347
#define FRAG_THRESH_DEF 2346
-#define SHORT_RETRY_DEF 8
-#define LONG_RETRY_DEF 4
/* BasebandType[] baseband type selected
* 0: indicate 802.11a type
@@ -94,15 +91,91 @@ static void vnt_set_options(struct vnt_private *priv)
else
priv->num_rcb = vnt_rx_buffers;
- priv->short_retry_limit = SHORT_RETRY_DEF;
- priv->long_retry_limit = LONG_RETRY_DEF;
priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
priv->bb_type = BBP_TYPE_DEF;
priv->packet_type = priv->bb_type;
- priv->preamble_type = 0;
+ priv->preamble_type = PREAMBLE_LONG;
priv->exist_sw_net_addr = false;
}
+static int vnt_download_firmware(struct vnt_private *priv)
+{
+ struct device *dev = &priv->usb->dev;
+ const struct firmware *fw;
+ u16 length;
+ int ii;
+ int ret = 0;
+
+ dev_dbg(dev, "---->Download firmware\n");
+
+ ret = request_firmware(&fw, FIRMWARE_NAME, dev);
+ if (ret) {
+ dev_err(dev, "firmware file %s request failed (%d)\n",
+ FIRMWARE_NAME, ret);
+ goto end;
+ }
+
+ for (ii = 0; ii < fw->size; ii += FIRMWARE_CHUNK_SIZE) {
+ length = min_t(int, fw->size - ii, FIRMWARE_CHUNK_SIZE);
+
+ ret = vnt_control_out(priv, 0, 0x1200 + ii, 0x0000, length,
+ fw->data + ii);
+ if (ret)
+ goto free_fw;
+
+ dev_dbg(dev, "Download firmware...%d %zu\n", ii, fw->size);
+ }
+
+free_fw:
+ release_firmware(fw);
+end:
+ return ret;
+}
+
+static int vnt_firmware_branch_to_sram(struct vnt_private *priv)
+{
+ dev_dbg(&priv->usb->dev, "---->Branch to Sram\n");
+
+ return vnt_control_out(priv, 1, 0x1200, 0x0000, 0, NULL);
+}
+
+static int vnt_check_firmware_version(struct vnt_private *priv)
+{
+ int ret = 0;
+
+ ret = vnt_control_in(priv, MESSAGE_TYPE_READ, 0,
+ MESSAGE_REQUEST_VERSION, 2,
+ (u8 *)&priv->firmware_version);
+ if (ret) {
+ dev_dbg(&priv->usb->dev,
+ "Could not get firmware version: %d.\n", ret);
+ goto end;
+ }
+
+ dev_dbg(&priv->usb->dev, "Firmware Version [%04x]\n",
+ priv->firmware_version);
+
+ if (priv->firmware_version == 0xFFFF) {
+ dev_dbg(&priv->usb->dev, "In Loader.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (priv->firmware_version < FIRMWARE_VERSION) {
+ /* branch to loader for download new firmware */
+ ret = vnt_firmware_branch_to_sram(priv);
+ if (ret) {
+ dev_dbg(&priv->usb->dev,
+ "Could not branch to SRAM: %d.\n", ret);
+ } else {
+ ret = -EINVAL;
+ }
+ }
+
+end:
+ return ret;
+}
+
/*
* initialization of MAC & BBP registers
*/
@@ -146,8 +219,8 @@ static int vnt_init_registers(struct vnt_private *priv)
init_cmd->exist_sw_net_addr = priv->exist_sw_net_addr;
for (ii = 0; ii < ARRAY_SIZE(init_cmd->sw_net_addr); ii++)
init_cmd->sw_net_addr[ii] = priv->current_net_addr[ii];
- init_cmd->short_retry_limit = priv->short_retry_limit;
- init_cmd->long_retry_limit = priv->long_retry_limit;
+ init_cmd->short_retry_limit = priv->hw->wiphy->retry_short;
+ init_cmd->long_retry_limit = priv->hw->wiphy->retry_long;
/* issue card_init command to device */
ret = vnt_control_out(priv, MESSAGE_TYPE_CARDINIT, 0, 0,
@@ -324,19 +397,6 @@ static int vnt_init_registers(struct vnt_private *priv)
dev_dbg(&priv->usb->dev, "Network address = %pM\n",
priv->current_net_addr);
- /*
- * set BB and packet type at the same time
- * set Short Slot Time, xIFS, and RSPINF
- */
- if (priv->bb_type == BB_TYPE_11A)
- priv->short_slot_time = true;
- else
- priv->short_slot_time = false;
-
- ret = vnt_set_short_slot_time(priv);
- if (ret)
- goto end;
-
priv->radio_ctl = priv->eeprom[EEP_OFS_RADIOCTL];
if ((priv->radio_ctl & EEP_RADIOCTL_ENABLE) != 0) {
@@ -385,17 +445,13 @@ static void vnt_free_tx_bufs(struct vnt_private *priv)
struct vnt_usb_send_context *tx_context;
int ii;
+ usb_kill_anchored_urbs(&priv->tx_submitted);
+
for (ii = 0; ii < priv->num_tx_context; ii++) {
tx_context = priv->tx_context[ii];
if (!tx_context)
continue;
- /* deallocate URBs */
- if (tx_context->urb) {
- usb_kill_urb(tx_context->urb);
- usb_free_urb(tx_context->urb);
- }
-
kfree(tx_context);
}
}
@@ -436,6 +492,8 @@ static int vnt_alloc_bufs(struct vnt_private *priv)
struct vnt_rcb *rcb;
int ii;
+ init_usb_anchor(&priv->tx_submitted);
+
for (ii = 0; ii < priv->num_tx_context; ii++) {
tx_context = kmalloc(sizeof(*tx_context), GFP_KERNEL);
if (!tx_context) {
@@ -446,14 +504,6 @@ static int vnt_alloc_bufs(struct vnt_private *priv)
priv->tx_context[ii] = tx_context;
tx_context->priv = priv;
tx_context->pkt_no = ii;
-
- /* allocate URBs */
- tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tx_context->urb) {
- ret = -ENOMEM;
- goto free_tx;
- }
-
tx_context->in_use = false;
}
@@ -683,15 +733,14 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
priv->bb_type = BB_TYPE_11G;
}
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- if (priv->bb_type == BB_TYPE_11B)
- priv->current_rate = RATE_1M;
- else
- priv->current_rate = RATE_54M;
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
+ vnt_rf_setpower(priv, conf->chandef.chan);
- vnt_rf_setpower(priv, priv->current_rate,
- conf->chandef.chan->hw_value);
- }
+ if (conf->flags & (IEEE80211_CONF_OFFCHANNEL | IEEE80211_CONF_IDLE))
+ /* Set max sensitivity*/
+ vnt_update_pre_ed_threshold(priv, true);
+ else
+ vnt_update_pre_ed_threshold(priv, false);
return 0;
}
@@ -718,10 +767,10 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (conf->use_short_preamble) {
vnt_mac_enable_barker_preamble_mode(priv);
- priv->preamble_type = true;
+ priv->preamble_type = PREAMBLE_SHORT;
} else {
vnt_mac_disable_barker_preamble_mode(priv);
- priv->preamble_type = false;
+ priv->preamble_type = PREAMBLE_LONG;
}
}
@@ -740,16 +789,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
vnt_set_short_slot_time(priv);
vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
- vnt_update_pre_ed_threshold(priv, false);
}
if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE |
BSS_CHANGED_ERP_SLOT))
vnt_set_bss_mode(priv);
- if (changed & BSS_CHANGED_TXPOWER)
- vnt_rf_setpower(priv, priv->current_rate,
- conf->chandef.chan->hw_value);
+ if (changed & (BSS_CHANGED_TXPOWER | BSS_CHANGED_BANDWIDTH))
+ vnt_rf_setpower(priv, conf->chandef.chan);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
dev_dbg(&priv->usb->dev,
@@ -767,10 +814,17 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
priv->op_mode != NL80211_IFTYPE_AP) {
if (conf->assoc && conf->beacon_rate) {
+ u16 ps_beacon_int = conf->beacon_int;
+
+ if (conf->dtim_period)
+ ps_beacon_int *= conf->dtim_period;
+ else if (hw->conf.listen_interval)
+ ps_beacon_int *= hw->conf.listen_interval;
+
vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL,
TFTCTL_TSFCNTREN);
- vnt_mac_set_beacon_interval(priv, conf->beacon_int);
+ vnt_mac_set_beacon_interval(priv, ps_beacon_int);
vnt_reset_next_tbtt(priv, conf->beacon_int);
@@ -778,7 +832,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
conf->sync_tsf, priv->current_tsf);
vnt_update_next_tbtt(priv,
- conf->sync_tsf, conf->beacon_int);
+ conf->sync_tsf, ps_beacon_int);
} else {
vnt_clear_current_tsf(priv);
@@ -868,25 +922,6 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return 0;
}
-static void vnt_sw_scan_start(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *addr)
-{
- struct vnt_private *priv = hw->priv;
-
- /* Set max sensitivity*/
- vnt_update_pre_ed_threshold(priv, true);
-}
-
-static void vnt_sw_scan_complete(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct vnt_private *priv = hw->priv;
-
- /* Return sensitivity to channel level*/
- vnt_update_pre_ed_threshold(priv, false);
-}
-
static int vnt_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -932,8 +967,6 @@ static const struct ieee80211_ops vnt_mac_ops = {
.prepare_multicast = vnt_prepare_multicast,
.configure_filter = vnt_configure,
.set_key = vnt_set_key,
- .sw_scan_start = vnt_sw_scan_start,
- .sw_scan_complete = vnt_sw_scan_complete,
.get_stats = vnt_get_stats,
.get_tsf = vnt_get_tsf,
.set_tsf = vnt_set_tsf,
@@ -1010,6 +1043,8 @@ vt6656_probe(struct usb_interface *intf, const struct usb_device_id *id)
ieee80211_hw_set(priv->hw, SUPPORTS_PS);
ieee80211_hw_set(priv->hw, PS_NULLFUNC_STACK);
+ priv->hw->extra_tx_headroom =
+ sizeof(struct vnt_tx_buffer) + sizeof(struct vnt_tx_usb_header);
priv->hw->max_signal = 100;
SET_IEEE80211_DEV(priv->hw, &intf->dev);
@@ -1078,3 +1113,5 @@ static struct usb_driver vt6656_driver = {
};
module_usb_driver(vt6656_driver);
+
+MODULE_FIRMWARE(FIRMWARE_NAME);
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index 7a086c72d5a8..2f49c870272a 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -63,41 +63,29 @@ void vnt_enable_power_saving(struct vnt_private *priv, u16 listen_interval)
*/
vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_GO2DOZE);
- if (listen_interval >= 2) {
- /* clear always listen beacon */
- vnt_mac_reg_bits_off(priv, MAC_REG_PSCTL, PSCTL_ALBCN);
-
- /* first time set listen next beacon */
- vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_LNBCN);
- } else {
- /* always listen beacon */
- vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_ALBCN);
- }
+ /* always listen beacon */
+ vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_ALBCN);
dev_dbg(&priv->usb->dev, "PS:Power Saving Mode Enable...\n");
}
-/*
- *
- * Routine Description:
- * Disable hw power saving functions
- *
- * Return Value:
- * None.
- *
- */
-
-void vnt_disable_power_saving(struct vnt_private *priv)
+int vnt_disable_power_saving(struct vnt_private *priv)
{
+ int ret;
+
/* disable power saving hw function */
- vnt_control_out(priv, MESSAGE_TYPE_DISABLE_PS, 0,
- 0, 0, NULL);
+ ret = vnt_control_out(priv, MESSAGE_TYPE_DISABLE_PS, 0,
+ 0, 0, NULL);
+ if (ret)
+ return ret;
/* clear AutoSleep */
vnt_mac_reg_bits_off(priv, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* set always listen beacon */
vnt_mac_reg_bits_on(priv, MAC_REG_PSCTL, PSCTL_ALBCN);
+
+ return 0;
}
/*
diff --git a/drivers/staging/vt6656/power.h b/drivers/staging/vt6656/power.h
index 58755ae16e5a..160872026db3 100644
--- a/drivers/staging/vt6656/power.h
+++ b/drivers/staging/vt6656/power.h
@@ -18,7 +18,7 @@
#define C_PWBT 1000 /* micro sec. power up before TBTT */
-void vnt_disable_power_saving(struct vnt_private *priv);
+int vnt_disable_power_saving(struct vnt_private *priv);
void vnt_enable_power_saving(struct vnt_private *priv, u16 listen_interval);
int vnt_next_tbtt_wakeup(struct vnt_private *priv);
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 43237b7e1dbe..5b8da06e3916 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -21,22 +21,16 @@
*
*/
+#include <linux/errno.h>
#include "mac.h"
#include "rf.h"
#include "baseband.h"
#include "usbpipe.h"
#define CB_AL2230_INIT_SEQ 15
-#define AL2230_PWR_IDX_LEN 64
-
#define CB_AL7230_INIT_SEQ 16
-#define AL7230_PWR_IDX_LEN 64
-
#define CB_VT3226_INIT_SEQ 11
-#define VT3226_PWR_IDX_LEN 64
-
#define CB_VT3342_INIT_SEQ 13
-#define VT3342_PWR_IDX_LEN 64
static u8 al2230_init_table[CB_AL2230_INIT_SEQ][3] = {
{0x03, 0xf7, 0x90},
@@ -518,72 +512,45 @@ static u8 vt3342_channel_table1[CB_MAX_CHANNEL][3] = {
{0x03, 0x00, 0x04}
};
-/* Power Table */
-static const u32 al2230_power_table[AL2230_PWR_IDX_LEN] = {
- 0x04040900,
- 0x04041900,
- 0x04042900,
- 0x04043900,
- 0x04044900,
- 0x04045900,
- 0x04046900,
- 0x04047900,
- 0x04048900,
- 0x04049900,
- 0x0404a900,
- 0x0404b900,
- 0x0404c900,
- 0x0404d900,
- 0x0404e900,
- 0x0404f900,
- 0x04050900,
- 0x04051900,
- 0x04052900,
- 0x04053900,
- 0x04054900,
- 0x04055900,
- 0x04056900,
- 0x04057900,
- 0x04058900,
- 0x04059900,
- 0x0405a900,
- 0x0405b900,
- 0x0405c900,
- 0x0405d900,
- 0x0405e900,
- 0x0405f900,
- 0x04060900,
- 0x04061900,
- 0x04062900,
- 0x04063900,
- 0x04064900,
- 0x04065900,
- 0x04066900,
- 0x04067900,
- 0x04068900,
- 0x04069900,
- 0x0406a900,
- 0x0406b900,
- 0x0406c900,
- 0x0406d900,
- 0x0406e900,
- 0x0406f900,
- 0x04070900,
- 0x04071900,
- 0x04072900,
- 0x04073900,
- 0x04074900,
- 0x04075900,
- 0x04076900,
- 0x04077900,
- 0x04078900,
- 0x04079900,
- 0x0407a900,
- 0x0407b900,
- 0x0407c900,
- 0x0407d900,
- 0x0407e900,
- 0x0407f900
+enum {
+ VNT_TABLE_INIT = 0,
+ VNT_TABLE_INIT_2 = 0,
+ VNT_TABLE_0 = 1,
+ VNT_TABLE_1 = 2,
+ VNT_TABLE_2 = 1
+};
+
+struct vnt_table_info {
+ u8 *addr;
+ int length;
+};
+
+static const struct vnt_table_info vnt_table_seq[][3] = {
+ { /* RF_AL2230, RF_AL2230S init table, channel table 0 and 1 */
+ {&al2230_init_table[0][0], CB_AL2230_INIT_SEQ * 3},
+ {&al2230_channel_table0[0][0], CB_MAX_CHANNEL_24G * 3},
+ {&al2230_channel_table1[0][0], CB_MAX_CHANNEL_24G * 3}
+ }, { /* RF_AIROHA7230 init table, channel table 0 and 1 */
+ {&al7230_init_table[0][0], CB_AL7230_INIT_SEQ * 3},
+ {&al7230_channel_table0[0][0], CB_MAX_CHANNEL * 3},
+ {&al7230_channel_table1[0][0], CB_MAX_CHANNEL * 3}
+ }, { /* RF_VT3226 init table, channel table 0 and 1 */
+ {&vt3226_init_table[0][0], CB_VT3226_INIT_SEQ * 3},
+ {&vt3226_channel_table0[0][0], CB_MAX_CHANNEL_24G * 3},
+ {&vt3226_channel_table1[0][0], CB_MAX_CHANNEL_24G * 3}
+ }, { /* RF_VT3226D0 init table, channel table 0 and 1 */
+ {&vt3226d0_init_table[0][0], CB_VT3226_INIT_SEQ * 3},
+ {&vt3226_channel_table0[0][0], CB_MAX_CHANNEL_24G * 3},
+ {&vt3226_channel_table1[0][0], CB_MAX_CHANNEL_24G * 3}
+ }, { /* RF_VT3342A0 init table, channel table 0 and 1 */
+ {&vt3342a0_init_table[0][0], CB_VT3342_INIT_SEQ * 3},
+ {&vt3342_channel_table0[0][0], CB_MAX_CHANNEL * 3},
+ {&vt3342_channel_table1[0][0], CB_MAX_CHANNEL * 3}
+ }, { /* RF_AIROHA7230 init table 2 and channel table 2 */
+ {&al7230_init_table_amode[0][0], CB_AL7230_INIT_SEQ * 3},
+ {&al7230_channel_table2[0][0], CB_MAX_CHANNEL * 3},
+ {NULL, 0}
+ }
};
/*
@@ -600,124 +567,90 @@ int vnt_rf_write_embedded(struct vnt_private *priv, u32 data)
reg_data[2] = (u8)(data >> 16);
reg_data[3] = (u8)(data >> 24);
- vnt_control_out(priv, MESSAGE_TYPE_WRITE_IFRF,
- 0, 0, ARRAY_SIZE(reg_data), reg_data);
-
- return true;
-}
-
-/* Set Tx power by rate and channel number */
-int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel)
-{
- u8 power = priv->cck_pwr;
-
- if (channel == 0)
- return -EINVAL;
-
- switch (rate) {
- case RATE_1M:
- case RATE_2M:
- case RATE_5M:
- case RATE_11M:
- channel--;
-
- if (channel < sizeof(priv->cck_pwr_tbl))
- power = priv->cck_pwr_tbl[channel];
- break;
- case RATE_6M:
- case RATE_9M:
- case RATE_12M:
- case RATE_18M:
- case RATE_24M:
- case RATE_36M:
- case RATE_48M:
- case RATE_54M:
- if (channel > CB_MAX_CHANNEL_24G)
- power = priv->ofdm_a_pwr_tbl[channel - 15];
- else
- power = priv->ofdm_pwr_tbl[channel - 1];
- break;
- }
-
- return vnt_rf_set_txpower(priv, power, rate);
+ return vnt_control_out(priv, MESSAGE_TYPE_WRITE_IFRF, 0, 0,
+ ARRAY_SIZE(reg_data), reg_data);
}
static u8 vnt_rf_addpower(struct vnt_private *priv)
{
+ int base;
s32 rssi = -priv->current_rssi;
if (!rssi)
return 7;
- if (priv->rf_type == RF_VT3226D0) {
- if (rssi < -70)
- return 9;
- else if (rssi < -65)
- return 7;
- else if (rssi < -60)
- return 5;
- } else {
- if (rssi < -80)
- return 9;
- else if (rssi < -75)
- return 7;
- else if (rssi < -70)
- return 5;
- }
+ if (priv->rf_type == RF_VT3226D0)
+ base = -60;
+ else
+ base = -70;
+
+ if (rssi < base)
+ return ((rssi - base + 1) / -5) * 2 + 5;
return 0;
}
/* Set Tx power by power level and rate */
-int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
+static int vnt_rf_set_txpower(struct vnt_private *priv, u8 power,
+ struct ieee80211_channel *ch)
{
u32 power_setting = 0;
- int ret = true;
+ int ret = 0;
power += vnt_rf_addpower(priv);
if (power > VNT_RF_MAX_POWER)
power = VNT_RF_MAX_POWER;
if (priv->power == power)
- return true;
+ return 0;
priv->power = power;
switch (priv->rf_type) {
case RF_AL2230:
- if (power >= AL2230_PWR_IDX_LEN)
- return false;
+ power_setting = 0x0404090 | (power << 12);
- ret &= vnt_rf_write_embedded(priv, al2230_power_table[power]);
+ ret = vnt_rf_write_embedded(priv, power_setting);
+ if (ret)
+ return ret;
- if (rate <= RATE_11M)
- ret &= vnt_rf_write_embedded(priv, 0x0001b400);
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM)
+ ret = vnt_rf_write_embedded(priv, 0x0001b400);
else
- ret &= vnt_rf_write_embedded(priv, 0x0005a400);
+ ret = vnt_rf_write_embedded(priv, 0x0005a400);
+
break;
case RF_AL2230S:
- if (power >= AL2230_PWR_IDX_LEN)
- return false;
+ power_setting = 0x0404090 | (power << 12);
- ret &= vnt_rf_write_embedded(priv, al2230_power_table[power]);
+ ret = vnt_rf_write_embedded(priv, power_setting);
+ if (ret)
+ return ret;
- if (rate <= RATE_11M) {
- ret &= vnt_rf_write_embedded(priv, 0x040c1400);
- ret &= vnt_rf_write_embedded(priv, 0x00299b00);
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM) {
+ ret = vnt_rf_write_embedded(priv, 0x040c1400);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x00299b00);
} else {
- ret &= vnt_rf_write_embedded(priv, 0x0005a400);
- ret &= vnt_rf_write_embedded(priv, 0x00099b00);
+ ret = vnt_rf_write_embedded(priv, 0x0005a400);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x00099b00);
}
+
break;
case RF_AIROHA7230:
- if (rate <= RATE_11M)
- ret &= vnt_rf_write_embedded(priv, 0x111bb900);
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM)
+ ret = vnt_rf_write_embedded(priv, 0x111bb900);
else
- ret &= vnt_rf_write_embedded(priv, 0x221bb900);
+ ret = vnt_rf_write_embedded(priv, 0x221bb900);
- if (power >= AL7230_PWR_IDX_LEN)
- return false;
+ if (ret)
+ return ret;
/*
* 0x080F1B00 for 3 wire control TxGain(D10)
@@ -725,61 +658,68 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
*/
power_setting = 0x080c0b00 | (power << 12);
- ret &= vnt_rf_write_embedded(priv, power_setting);
-
+ ret = vnt_rf_write_embedded(priv, power_setting);
break;
case RF_VT3226:
- if (power >= VT3226_PWR_IDX_LEN)
- return false;
power_setting = ((0x3f - power) << 20) | (0x17 << 8);
- ret &= vnt_rf_write_embedded(priv, power_setting);
-
+ ret = vnt_rf_write_embedded(priv, power_setting);
break;
case RF_VT3226D0:
- if (power >= VT3226_PWR_IDX_LEN)
- return false;
-
- if (rate <= RATE_11M) {
- u16 hw_value = priv->hw->conf.chandef.chan->hw_value;
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM) {
+ u16 hw_value = ch->hw_value;
power_setting = ((0x3f - power) << 20) | (0xe07 << 8);
- ret &= vnt_rf_write_embedded(priv, power_setting);
- ret &= vnt_rf_write_embedded(priv, 0x03c6a200);
+ ret = vnt_rf_write_embedded(priv, power_setting);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x03c6a200);
+ if (ret)
+ return ret;
dev_dbg(&priv->usb->dev,
"%s 11b channel [%d]\n", __func__, hw_value);
hw_value--;
- if (hw_value < ARRAY_SIZE(vt3226d0_lo_current_table))
- ret &= vnt_rf_write_embedded(priv,
+ if (hw_value < ARRAY_SIZE(vt3226d0_lo_current_table)) {
+ ret = vnt_rf_write_embedded(priv,
vt3226d0_lo_current_table[hw_value]);
+ if (ret)
+ return ret;
+ }
- ret &= vnt_rf_write_embedded(priv, 0x015C0800);
+ ret = vnt_rf_write_embedded(priv, 0x015C0800);
} else {
dev_dbg(&priv->usb->dev,
"@@@@ %s> 11G mode\n", __func__);
power_setting = ((0x3f - power) << 20) | (0x7 << 8);
- ret &= vnt_rf_write_embedded(priv, power_setting);
- ret &= vnt_rf_write_embedded(priv, 0x00C6A200);
- ret &= vnt_rf_write_embedded(priv, 0x016BC600);
- ret &= vnt_rf_write_embedded(priv, 0x00900800);
+ ret = vnt_rf_write_embedded(priv, power_setting);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x00C6A200);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x016BC600);
+ if (ret)
+ return ret;
+
+ ret = vnt_rf_write_embedded(priv, 0x00900800);
}
+
break;
case RF_VT3342A0:
- if (power >= VT3342_PWR_IDX_LEN)
- return false;
-
power_setting = ((0x3f - power) << 20) | (0x27 << 8);
- ret &= vnt_rf_write_embedded(priv, power_setting);
-
+ ret = vnt_rf_write_embedded(priv, power_setting);
break;
default:
break;
@@ -787,6 +727,36 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
return ret;
}
+/* Set Tx power by channel number type */
+int vnt_rf_setpower(struct vnt_private *priv,
+ struct ieee80211_channel *ch)
+{
+ u16 channel;
+ u8 power = priv->cck_pwr;
+
+ if (!ch)
+ return -EINVAL;
+
+ /* set channel number to array number */
+ channel = ch->hw_value - 1;
+
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM) {
+ if (channel < ARRAY_SIZE(priv->cck_pwr_tbl))
+ power = priv->cck_pwr_tbl[channel];
+ } else if (ch->band == NL80211_BAND_5GHZ) {
+ /* remove 14 channels to array size */
+ channel -= 14;
+
+ if (channel < ARRAY_SIZE(priv->ofdm_a_pwr_tbl))
+ power = priv->ofdm_a_pwr_tbl[channel];
+ } else {
+ if (channel < ARRAY_SIZE(priv->ofdm_pwr_tbl))
+ power = priv->ofdm_pwr_tbl[channel];
+ }
+
+ return vnt_rf_set_txpower(priv, power, ch);
+}
+
/* Convert rssi to dbm */
void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm)
{
@@ -813,140 +783,73 @@ void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm)
int vnt_rf_table_download(struct vnt_private *priv)
{
- int ret = 0;
- u16 length1 = 0, length2 = 0, length3 = 0;
- u8 *addr1 = NULL, *addr2 = NULL, *addr3 = NULL;
- u16 length, value;
- u8 array[256];
+ int ret;
+ int idx = -1;
+ const struct vnt_table_info *table_seq;
switch (priv->rf_type) {
case RF_AL2230:
case RF_AL2230S:
- length1 = CB_AL2230_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL_24G * 3;
- length3 = CB_MAX_CHANNEL_24G * 3;
- addr1 = &al2230_init_table[0][0];
- addr2 = &al2230_channel_table0[0][0];
- addr3 = &al2230_channel_table1[0][0];
+ idx = 0;
break;
case RF_AIROHA7230:
- length1 = CB_AL7230_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL * 3;
- length3 = CB_MAX_CHANNEL * 3;
- addr1 = &al7230_init_table[0][0];
- addr2 = &al7230_channel_table0[0][0];
- addr3 = &al7230_channel_table1[0][0];
+ idx = 1;
break;
case RF_VT3226:
- length1 = CB_VT3226_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL_24G * 3;
- length3 = CB_MAX_CHANNEL_24G * 3;
- addr1 = &vt3226_init_table[0][0];
- addr2 = &vt3226_channel_table0[0][0];
- addr3 = &vt3226_channel_table1[0][0];
+ idx = 2;
break;
case RF_VT3226D0:
- length1 = CB_VT3226_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL_24G * 3;
- length3 = CB_MAX_CHANNEL_24G * 3;
- addr1 = &vt3226d0_init_table[0][0];
- addr2 = &vt3226_channel_table0[0][0];
- addr3 = &vt3226_channel_table1[0][0];
+ idx = 3;
break;
case RF_VT3342A0:
- length1 = CB_VT3342_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL * 3;
- length3 = CB_MAX_CHANNEL * 3;
- addr1 = &vt3342a0_init_table[0][0];
- addr2 = &vt3342_channel_table0[0][0];
- addr3 = &vt3342_channel_table1[0][0];
+ idx = 4;
break;
}
- /* Init Table */
- memcpy(array, addr1, length1);
+ if (idx < 0)
+ return 0;
+ table_seq = &vnt_table_seq[idx][0];
+
+ /* Init Table */
ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_RF_INIT, length1, array);
+ MESSAGE_REQUEST_RF_INIT,
+ table_seq[VNT_TABLE_INIT].length,
+ table_seq[VNT_TABLE_INIT].addr);
if (ret)
- goto end;
+ return ret;
/* Channel Table 0 */
- value = 0;
- while (length2 > 0) {
- if (length2 >= 64)
- length = 64;
- else
- length = length2;
-
- memcpy(array, addr2, length);
-
- ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, value,
- MESSAGE_REQUEST_RF_CH0, length, array);
- if (ret)
- goto end;
-
- length2 -= length;
- value += length;
- addr2 += length;
- }
-
- /* Channel table 1 */
- value = 0;
- while (length3 > 0) {
- if (length3 >= 64)
- length = 64;
- else
- length = length3;
-
- memcpy(array, addr3, length);
+ ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
+ MESSAGE_REQUEST_RF_CH0,
+ table_seq[VNT_TABLE_0].length,
+ table_seq[VNT_TABLE_0].addr);
+ if (ret)
+ return ret;
- ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, value,
- MESSAGE_REQUEST_RF_CH1, length, array);
- if (ret)
- goto end;
-
- length3 -= length;
- value += length;
- addr3 += length;
- }
+ /* Channel Table 1 */
+ ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
+ MESSAGE_REQUEST_RF_CH1,
+ table_seq[VNT_TABLE_1].length,
+ table_seq[VNT_TABLE_1].addr);
if (priv->rf_type == RF_AIROHA7230) {
- length1 = CB_AL7230_INIT_SEQ * 3;
- length2 = CB_MAX_CHANNEL * 3;
- addr1 = &al7230_init_table_amode[0][0];
- addr2 = &al7230_channel_table2[0][0];
-
- memcpy(array, addr1, length1);
+ table_seq = &vnt_table_seq[5][0];
/* Init Table 2 */
ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_RF_INIT2, length1, array);
+ MESSAGE_REQUEST_RF_INIT2,
+ table_seq[VNT_TABLE_INIT_2].length,
+ table_seq[VNT_TABLE_INIT_2].addr);
if (ret)
- goto end;
+ return ret;
- /* Channel Table 0 */
- value = 0;
- while (length2 > 0) {
- if (length2 >= 64)
- length = 64;
- else
- length = length2;
-
- memcpy(array, addr2, length);
-
- ret = vnt_control_out(priv, MESSAGE_TYPE_WRITE, value,
- MESSAGE_REQUEST_RF_CH2, length,
- array);
- if (ret)
- goto end;
-
- length2 -= length;
- value += length;
- addr2 += length;
- }
+ /* Channel Table 2 */
+ ret = vnt_control_out_blocks(priv, VNT_REG_BLOCK_SIZE,
+ MESSAGE_REQUEST_RF_CH2,
+ table_seq[VNT_TABLE_2].length,
+ table_seq[VNT_TABLE_2].addr);
}
-end:
return ret;
}
diff --git a/drivers/staging/vt6656/rf.h b/drivers/staging/vt6656/rf.h
index 7494546d71b8..493faaf4e2b5 100644
--- a/drivers/staging/vt6656/rf.h
+++ b/drivers/staging/vt6656/rf.h
@@ -41,8 +41,7 @@
#define VNT_RF_REG_LEN 0x17 /* 24 bit length */
int vnt_rf_write_embedded(struct vnt_private *priv, u32 data);
-int vnt_rf_setpower(struct vnt_private *priv, u32 rate, u32 channel);
-int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate);
+int vnt_rf_setpower(struct vnt_private *priv, struct ieee80211_channel *ch);
void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm);
int vnt_rf_table_download(struct vnt_private *priv);
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 9439d190f431..5dd6b4d2bf20 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -13,8 +13,6 @@
*
* Functions:
* vnt_generate_tx_parameter - Generate tx dma required parameter.
- * vnt_get_rtscts_duration_le- get rtx/cts required duration
- * vnt_get_rtscts_rsvtime_le- get rts/cts reserved time
* vnt_get_rsvtime- get frame reserved time
* vnt_fill_cts_head- fulfill CTS ctl header
*
@@ -38,13 +36,24 @@ static const u16 vnt_time_stampoff[2][MAX_RATE] = {
{384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23},
};
-#define RTSDUR_BB 0
-#define RTSDUR_BA 1
-#define RTSDUR_AA 2
-#define CTSDUR_BA 3
#define DATADUR_B 10
#define DATADUR_A 11
+static const u8 vnt_phy_signal[] = {
+ 0x00, /* RATE_1M */
+ 0x01, /* RATE_2M */
+ 0x02, /* RATE_5M */
+ 0x03, /* RATE_11M */
+ 0x8b, /* RATE_6M */
+ 0x8f, /* RATE_9M */
+ 0x8a, /* RATE_12M */
+ 0x8e, /* RATE_18M */
+ 0x89, /* RATE_24M */
+ 0x8d, /* RATE_36M */
+ 0x88, /* RATE_48M */
+ 0x8c /* RATE_54M */
+};
+
static struct vnt_usb_send_context
*vnt_get_free_context(struct vnt_private *priv)
{
@@ -60,11 +69,6 @@ static struct vnt_usb_send_context
context = priv->tx_context[ii];
if (!context->in_use) {
context->in_use = true;
- memset(context->data, 0,
- MAX_TOTAL_SIZE_WITH_ALL_HEADERS);
-
- context->hdr = NULL;
-
return context;
}
}
@@ -78,146 +82,105 @@ static struct vnt_usb_send_context
return NULL;
}
-static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
+/* Get Length, Service, and Signal fields of Phy for Tx */
+static void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
+ u16 tx_rate, u8 pkt_type,
+ struct vnt_phy_field *phy)
{
- return cpu_to_le16(vnt_time_stampoff[priv->preamble_type % 2]
- [rate % MAX_RATE]);
-}
+ u32 bit_count;
+ u32 count = 0;
+ u32 tmp;
+ int ext_bit;
+ int i;
+ u8 mask = 0;
+ u8 preamble_type = priv->preamble_type;
+
+ bit_count = frame_length * 8;
+ ext_bit = false;
+
+ switch (tx_rate) {
+ case RATE_1M:
+ count = bit_count;
+ break;
+ case RATE_2M:
+ count = bit_count / 2;
+ break;
+ case RATE_5M:
+ count = DIV_ROUND_UP(bit_count * 10, 55);
+ break;
+ case RATE_11M:
+ count = bit_count / 11;
+ tmp = count * 11;
-static u32 vnt_get_rsvtime(struct vnt_private *priv, u8 pkt_type,
- u32 frame_length, u16 rate, int need_ack)
-{
- u32 data_time, ack_time;
+ if (tmp != bit_count) {
+ count++;
- data_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- frame_length, rate);
+ if ((bit_count - tmp) <= 3)
+ ext_bit = true;
+ }
+
+ break;
+ }
- if (pkt_type == PK_TYPE_11B)
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14,
- (u16)priv->top_cck_basic_rate);
- else
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type, 14,
- (u16)priv->top_ofdm_basic_rate);
+ if (tx_rate > RATE_11M) {
+ if (pkt_type == PK_TYPE_11A)
+ mask = BIT(4);
+ } else if (tx_rate > RATE_1M) {
+ if (preamble_type == PREAMBLE_SHORT)
+ mask = BIT(3);
+ }
- if (need_ack)
- return data_time + priv->sifs + ack_time;
+ i = tx_rate > RATE_54M ? RATE_54M : tx_rate;
+ phy->signal = vnt_phy_signal[i] | mask;
+ phy->service = 0x00;
- return data_time;
+ if (pkt_type == PK_TYPE_11B) {
+ if (ext_bit)
+ phy->service |= 0x80;
+ phy->len = cpu_to_le16((u16)count);
+ } else {
+ phy->len = cpu_to_le16((u16)frame_length);
+ }
}
-static __le16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
- u32 frame_length, u16 rate, int need_ack)
+static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
{
- return cpu_to_le16((u16)vnt_get_rsvtime(priv, pkt_type,
- frame_length, rate, need_ack));
+ return cpu_to_le16(vnt_time_stampoff[priv->preamble_type % 2]
+ [rate % MAX_RATE]);
}
-static __le16 vnt_get_rtscts_rsvtime_le(struct vnt_private *priv, u8 rsv_type,
- u8 pkt_type, u32 frame_length,
- u16 current_rate)
+static __le16 vnt_rxtx_rsvtime_le16(struct vnt_usb_send_context *context)
{
- u32 rrv_time, rts_time, cts_time, ack_time, data_time;
-
- rrv_time = 0;
- rts_time = 0;
- cts_time = 0;
- ack_time = 0;
-
- data_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- frame_length, current_rate);
-
- if (rsv_type == 0) {
- rts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 20, priv->top_cck_basic_rate);
- ack_time = vnt_get_frame_time(priv->preamble_type,
- pkt_type, 14,
- priv->top_cck_basic_rate);
- cts_time = ack_time;
-
- } else if (rsv_type == 1) {
- rts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 20, priv->top_cck_basic_rate);
- cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_cck_basic_rate);
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_ofdm_basic_rate);
- } else if (rsv_type == 2) {
- rts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 20, priv->top_ofdm_basic_rate);
- ack_time = vnt_get_frame_time(priv->preamble_type,
- pkt_type, 14,
- priv->top_ofdm_basic_rate);
- cts_time = ack_time;
-
- } else if (rsv_type == 3) {
- cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_cck_basic_rate);
- ack_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_ofdm_basic_rate);
-
- rrv_time = cts_time + ack_time + data_time + 2 * priv->sifs;
-
- return cpu_to_le16((u16)rrv_time);
- }
-
- rrv_time = rts_time + cts_time + ack_time + data_time + 3 * priv->sifs;
+ struct vnt_private *priv = context->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(context->skb);
+ struct ieee80211_rate *rate = ieee80211_get_tx_rate(priv->hw, info);
- return cpu_to_le16((u16)rrv_time);
+ return ieee80211_generic_frame_duration(priv->hw,
+ info->control.vif, info->band,
+ context->frame_len,
+ rate);
}
-static __le16 vnt_get_rtscts_duration_le(struct vnt_usb_send_context *context,
- u8 dur_type, u8 pkt_type, u16 rate)
+static __le16 vnt_get_rts_duration(struct vnt_usb_send_context *context)
{
struct vnt_private *priv = context->priv;
- u32 cts_time = 0, dur_time = 0;
- u32 frame_length = context->frame_len;
- u8 need_ack = context->need_ack;
-
- switch (dur_type) {
- case RTSDUR_BB:
- case RTSDUR_BA:
- cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_cck_basic_rate);
- dur_time = cts_time + 2 * priv->sifs +
- vnt_get_rsvtime(priv, pkt_type,
- frame_length, rate, need_ack);
- break;
-
- case RTSDUR_AA:
- cts_time = vnt_get_frame_time(priv->preamble_type, pkt_type,
- 14, priv->top_ofdm_basic_rate);
- dur_time = cts_time + 2 * priv->sifs +
- vnt_get_rsvtime(priv, pkt_type,
- frame_length, rate, need_ack);
- break;
-
- case CTSDUR_BA:
- dur_time = priv->sifs + vnt_get_rsvtime(priv,
- pkt_type, frame_length, rate, need_ack);
- break;
-
- default:
- break;
- }
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(context->skb);
- return cpu_to_le16((u16)dur_time);
+ return ieee80211_rts_duration(priv->hw, priv->vif,
+ context->frame_len, info);
}
-static u16 vnt_mac_hdr_pos(struct vnt_usb_send_context *tx_context,
- struct ieee80211_hdr *hdr)
+static __le16 vnt_get_cts_duration(struct vnt_usb_send_context *context)
{
- u8 *head = tx_context->data + offsetof(struct vnt_tx_buffer, fifo_head);
- u8 *hdr_pos = (u8 *)hdr;
-
- tx_context->hdr = hdr;
- if (!tx_context->hdr)
- return 0;
+ struct vnt_private *priv = context->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(context->skb);
- return (u16)(hdr_pos - head);
+ return ieee80211_ctstoself_duration(priv->hw, priv->vif,
+ context->frame_len, info);
}
-static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
- struct vnt_tx_datahead_g *buf)
+static void vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
+ struct vnt_tx_datahead_g *buf)
{
struct vnt_private *priv = tx_context->priv;
struct ieee80211_hdr *hdr =
@@ -236,14 +199,10 @@ static u16 vnt_rxtx_datahead_g(struct vnt_usb_send_context *tx_context,
buf->time_stamp_off_a = vnt_time_stamp_off(priv, rate);
buf->time_stamp_off_b = vnt_time_stamp_off(priv,
priv->top_cck_basic_rate);
-
- tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr);
-
- return le16_to_cpu(buf->duration_a);
}
-static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
- struct vnt_tx_datahead_ab *buf)
+static void vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
+ struct vnt_tx_datahead_ab *buf)
{
struct vnt_private *priv = tx_context->priv;
struct ieee80211_hdr *hdr =
@@ -258,14 +217,10 @@ static u16 vnt_rxtx_datahead_ab(struct vnt_usb_send_context *tx_context,
/* Get Duration and TimeStampOff */
buf->duration = hdr->duration_id;
buf->time_stamp_off = vnt_time_stamp_off(priv, rate);
-
- tx_context->tx_hdr_size = vnt_mac_hdr_pos(tx_context, &buf->hdr);
-
- return le16_to_cpu(buf->duration);
}
-static int vnt_fill_ieee80211_rts(struct vnt_usb_send_context *tx_context,
- struct ieee80211_rts *rts, __le16 duration)
+static void vnt_fill_ieee80211_rts(struct vnt_usb_send_context *tx_context,
+ struct ieee80211_rts *rts, __le16 duration)
{
struct ieee80211_hdr *hdr =
(struct ieee80211_hdr *)tx_context->skb->data;
@@ -276,72 +231,56 @@ static int vnt_fill_ieee80211_rts(struct vnt_usb_send_context *tx_context,
ether_addr_copy(rts->ra, hdr->addr1);
ether_addr_copy(rts->ta, hdr->addr2);
-
- return 0;
}
-static u16 vnt_rxtx_rts_g_head(struct vnt_usb_send_context *tx_context,
- struct vnt_rts_g *buf)
+static void vnt_rxtx_rts_g_head(struct vnt_usb_send_context *tx_context,
+ struct vnt_rts_g *buf)
{
struct vnt_private *priv = tx_context->priv;
u16 rts_frame_len = 20;
- u16 current_rate = tx_context->tx_rate;
vnt_get_phy_field(priv, rts_frame_len, priv->top_cck_basic_rate,
PK_TYPE_11B, &buf->b);
vnt_get_phy_field(priv, rts_frame_len, priv->top_ofdm_basic_rate,
tx_context->pkt_type, &buf->a);
- buf->duration_bb = vnt_get_rtscts_duration_le(tx_context, RTSDUR_BB,
- PK_TYPE_11B,
- priv->top_cck_basic_rate);
- buf->duration_aa = vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA,
- tx_context->pkt_type,
- current_rate);
- buf->duration_ba = vnt_get_rtscts_duration_le(tx_context, RTSDUR_BA,
- tx_context->pkt_type,
- current_rate);
+ buf->duration_bb = vnt_get_rts_duration(tx_context);
+ buf->duration_aa = buf->duration_bb;
+ buf->duration_ba = buf->duration_bb;
vnt_fill_ieee80211_rts(tx_context, &buf->data, buf->duration_aa);
- return vnt_rxtx_datahead_g(tx_context, &buf->data_head);
+ vnt_rxtx_datahead_g(tx_context, &buf->data_head);
}
-static u16 vnt_rxtx_rts_ab_head(struct vnt_usb_send_context *tx_context,
- struct vnt_rts_ab *buf)
+static void vnt_rxtx_rts_ab_head(struct vnt_usb_send_context *tx_context,
+ struct vnt_rts_ab *buf)
{
struct vnt_private *priv = tx_context->priv;
- u16 current_rate = tx_context->tx_rate;
u16 rts_frame_len = 20;
vnt_get_phy_field(priv, rts_frame_len, priv->top_ofdm_basic_rate,
tx_context->pkt_type, &buf->ab);
- buf->duration = vnt_get_rtscts_duration_le(tx_context, RTSDUR_AA,
- tx_context->pkt_type,
- current_rate);
+ buf->duration = vnt_get_rts_duration(tx_context);
vnt_fill_ieee80211_rts(tx_context, &buf->data, buf->duration);
- return vnt_rxtx_datahead_ab(tx_context, &buf->data_head);
+ vnt_rxtx_datahead_ab(tx_context, &buf->data_head);
}
-static u16 vnt_fill_cts_head(struct vnt_usb_send_context *tx_context,
- union vnt_tx_data_head *head)
+static void vnt_fill_cts_head(struct vnt_usb_send_context *tx_context,
+ union vnt_tx_data_head *head)
{
struct vnt_private *priv = tx_context->priv;
struct vnt_cts *buf = &head->cts_g;
u32 cts_frame_len = 14;
- u16 current_rate = tx_context->tx_rate;
/* Get SignalField,ServiceField,Length */
vnt_get_phy_field(priv, cts_frame_len, priv->top_cck_basic_rate,
PK_TYPE_11B, &buf->b);
/* Get CTSDuration_ba */
- buf->duration_ba =
- vnt_get_rtscts_duration_le(tx_context, CTSDUR_BA,
- tx_context->pkt_type,
- current_rate);
+ buf->duration_ba = vnt_get_cts_duration(tx_context);
/*Get CTS Frame body*/
buf->data.duration = buf->duration_ba;
buf->data.frame_control =
@@ -349,127 +288,19 @@ static u16 vnt_fill_cts_head(struct vnt_usb_send_context *tx_context,
ether_addr_copy(buf->data.ra, priv->current_net_addr);
- return vnt_rxtx_datahead_g(tx_context, &buf->data_head);
+ vnt_rxtx_datahead_g(tx_context, &buf->data_head);
}
-static u16 vnt_rxtx_rts(struct vnt_usb_send_context *tx_context,
- union vnt_tx_head *tx_head, bool need_mic)
+/* returns true if mic_hdr is needed */
+static bool vnt_fill_txkey(struct vnt_tx_buffer *tx_buffer, struct sk_buff *skb)
{
- struct vnt_private *priv = tx_context->priv;
- struct vnt_rrv_time_rts *buf = &tx_head->tx_rts.rts;
- union vnt_tx_data_head *head = &tx_head->tx_rts.tx.head;
- u32 frame_len = tx_context->frame_len;
- u16 current_rate = tx_context->tx_rate;
- u8 need_ack = tx_context->need_ack;
-
- buf->rts_rrv_time_aa = vnt_get_rtscts_rsvtime_le(priv, 2,
- tx_context->pkt_type, frame_len, current_rate);
- buf->rts_rrv_time_ba = vnt_get_rtscts_rsvtime_le(priv, 1,
- tx_context->pkt_type, frame_len, current_rate);
- buf->rts_rrv_time_bb = vnt_get_rtscts_rsvtime_le(priv, 0,
- tx_context->pkt_type, frame_len, current_rate);
-
- buf->rrv_time_a = vnt_rxtx_rsvtime_le16(priv, tx_context->pkt_type,
- frame_len, current_rate,
- need_ack);
- buf->rrv_time_b = vnt_rxtx_rsvtime_le16(priv, PK_TYPE_11B, frame_len,
- priv->top_cck_basic_rate, need_ack);
-
- if (need_mic)
- head = &tx_head->tx_rts.tx.mic.head;
-
- return vnt_rxtx_rts_g_head(tx_context, &head->rts_g);
-}
-
-static u16 vnt_rxtx_cts(struct vnt_usb_send_context *tx_context,
- union vnt_tx_head *tx_head, bool need_mic)
-{
- struct vnt_private *priv = tx_context->priv;
- struct vnt_rrv_time_cts *buf = &tx_head->tx_cts.cts;
- union vnt_tx_data_head *head = &tx_head->tx_cts.tx.head;
- u32 frame_len = tx_context->frame_len;
- u16 current_rate = tx_context->tx_rate;
- u8 need_ack = tx_context->need_ack;
-
- buf->rrv_time_a = vnt_rxtx_rsvtime_le16(priv, tx_context->pkt_type,
- frame_len, current_rate, need_ack);
- buf->rrv_time_b = vnt_rxtx_rsvtime_le16(priv, PK_TYPE_11B,
- frame_len, priv->top_cck_basic_rate, need_ack);
-
- buf->cts_rrv_time_ba = vnt_get_rtscts_rsvtime_le(priv, 3,
- tx_context->pkt_type, frame_len, current_rate);
-
- if (need_mic)
- head = &tx_head->tx_cts.tx.mic.head;
-
- return vnt_fill_cts_head(tx_context, head);
-}
-
-static u16 vnt_rxtx_ab(struct vnt_usb_send_context *tx_context,
- union vnt_tx_head *tx_head, bool need_rts, bool need_mic)
-{
- struct vnt_private *priv = tx_context->priv;
- struct vnt_rrv_time_ab *buf = &tx_head->tx_ab.ab;
- union vnt_tx_data_head *head = &tx_head->tx_ab.tx.head;
- u32 frame_len = tx_context->frame_len;
- u16 current_rate = tx_context->tx_rate;
- u8 need_ack = tx_context->need_ack;
-
- buf->rrv_time = vnt_rxtx_rsvtime_le16(priv, tx_context->pkt_type,
- frame_len, current_rate, need_ack);
-
- if (need_mic)
- head = &tx_head->tx_ab.tx.mic.head;
-
- if (need_rts) {
- if (tx_context->pkt_type == PK_TYPE_11B)
- buf->rts_rrv_time = vnt_get_rtscts_rsvtime_le(priv, 0,
- tx_context->pkt_type, frame_len, current_rate);
- else /* PK_TYPE_11A */
- buf->rts_rrv_time = vnt_get_rtscts_rsvtime_le(priv, 2,
- tx_context->pkt_type, frame_len, current_rate);
-
- return vnt_rxtx_rts_ab_head(tx_context, &head->rts_ab);
- }
-
- return vnt_rxtx_datahead_ab(tx_context, &head->data_head_ab);
-}
-
-static u16 vnt_generate_tx_parameter(struct vnt_usb_send_context *tx_context,
- struct vnt_tx_buffer *tx_buffer,
- struct vnt_mic_hdr **mic_hdr, u32 need_mic,
- bool need_rts)
-{
- if (tx_context->pkt_type == PK_TYPE_11GB ||
- tx_context->pkt_type == PK_TYPE_11GA) {
- if (need_rts) {
- if (need_mic)
- *mic_hdr =
- &tx_buffer->tx_head.tx_rts.tx.mic.hdr;
-
- return vnt_rxtx_rts(tx_context, &tx_buffer->tx_head,
- need_mic);
- }
-
- if (need_mic)
- *mic_hdr = &tx_buffer->tx_head.tx_cts.tx.mic.hdr;
-
- return vnt_rxtx_cts(tx_context, &tx_buffer->tx_head, need_mic);
- }
-
- if (need_mic)
- *mic_hdr = &tx_buffer->tx_head.tx_ab.tx.mic.hdr;
-
- return vnt_rxtx_ab(tx_context, &tx_buffer->tx_head, need_rts, need_mic);
-}
-
-static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
- u8 *key_buffer, struct ieee80211_key_conf *tx_key,
- struct sk_buff *skb, u16 payload_len,
- struct vnt_mic_hdr *mic_hdr)
-{
- struct ieee80211_hdr *hdr = tx_context->hdr;
+ struct vnt_tx_fifo_head *fifo = &tx_buffer->fifo_head;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *tx_key = info->control.hw_key;
+ struct vnt_mic_hdr *mic_hdr;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
u64 pn64;
+ u16 payload_len = skb->len;
u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb));
/* strip header and icv len from payload */
@@ -479,24 +310,31 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
switch (tx_key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- memcpy(key_buffer, iv, 3);
- memcpy(key_buffer + 3, tx_key->key, tx_key->keylen);
+ memcpy(fifo->tx_key, iv, 3);
+ memcpy(fifo->tx_key + 3, tx_key->key, tx_key->keylen);
if (tx_key->keylen == WLAN_KEY_LEN_WEP40) {
- memcpy(key_buffer + 8, iv, 3);
- memcpy(key_buffer + 11,
+ memcpy(fifo->tx_key + 8, iv, 3);
+ memcpy(fifo->tx_key + 11,
tx_key->key, WLAN_KEY_LEN_WEP40);
}
+ fifo->frag_ctl |= cpu_to_le16(FRAGCTL_LEGACY);
break;
case WLAN_CIPHER_SUITE_TKIP:
- ieee80211_get_tkip_p2k(tx_key, skb, key_buffer);
+ ieee80211_get_tkip_p2k(tx_key, skb, fifo->tx_key);
+ fifo->frag_ctl |= cpu_to_le16(FRAGCTL_TKIP);
break;
case WLAN_CIPHER_SUITE_CCMP:
-
- if (!mic_hdr)
- return;
+ if (info->control.use_cts_prot) {
+ if (info->control.use_rts)
+ mic_hdr = &tx_buffer->tx_head.tx_rts.tx.mic.hdr;
+ else
+ mic_hdr = &tx_buffer->tx_head.tx_cts.tx.mic.hdr;
+ } else {
+ mic_hdr = &tx_buffer->tx_head.tx_ab.tx.mic.hdr;
+ }
mic_hdr->id = 0x59;
mic_hdr->payload_len = cpu_to_be16(payload_len);
@@ -527,12 +365,137 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
if (ieee80211_has_a4(hdr->frame_control))
ether_addr_copy(mic_hdr->addr4, hdr->addr4);
- memcpy(key_buffer, tx_key->key, WLAN_KEY_LEN_CCMP);
+ memcpy(fifo->tx_key, tx_key->key, WLAN_KEY_LEN_CCMP);
- break;
+ fifo->frag_ctl |= cpu_to_le16(FRAGCTL_AES);
+ return true;
default:
break;
}
+
+ return false;
+}
+
+static void vnt_rxtx_rts(struct vnt_usb_send_context *tx_context)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_context->skb);
+ struct vnt_tx_buffer *tx_buffer = tx_context->tx_buffer;
+ union vnt_tx_head *tx_head = &tx_buffer->tx_head;
+ struct vnt_rrv_time_rts *buf = &tx_head->tx_rts.rts;
+ union vnt_tx_data_head *head = &tx_head->tx_rts.tx.head;
+
+ buf->rts_rrv_time_aa = vnt_get_rts_duration(tx_context);
+ buf->rts_rrv_time_ba = buf->rts_rrv_time_aa;
+ buf->rts_rrv_time_bb = buf->rts_rrv_time_aa;
+
+ buf->rrv_time_a = vnt_rxtx_rsvtime_le16(tx_context);
+ buf->rrv_time_b = buf->rrv_time_a;
+
+ if (info->control.hw_key) {
+ if (vnt_fill_txkey(tx_buffer, tx_context->skb))
+ head = &tx_head->tx_rts.tx.mic.head;
+ }
+
+ vnt_rxtx_rts_g_head(tx_context, &head->rts_g);
+}
+
+static void vnt_rxtx_cts(struct vnt_usb_send_context *tx_context)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_context->skb);
+ struct vnt_tx_buffer *tx_buffer = tx_context->tx_buffer;
+ union vnt_tx_head *tx_head = &tx_buffer->tx_head;
+ struct vnt_rrv_time_cts *buf = &tx_head->tx_cts.cts;
+ union vnt_tx_data_head *head = &tx_head->tx_cts.tx.head;
+
+ buf->rrv_time_a = vnt_rxtx_rsvtime_le16(tx_context);
+ buf->rrv_time_b = buf->rrv_time_a;
+
+ buf->cts_rrv_time_ba = vnt_get_cts_duration(tx_context);
+
+ if (info->control.hw_key) {
+ if (vnt_fill_txkey(tx_buffer, tx_context->skb))
+ head = &tx_head->tx_cts.tx.mic.head;
+ }
+
+ vnt_fill_cts_head(tx_context, head);
+}
+
+static void vnt_rxtx_ab(struct vnt_usb_send_context *tx_context)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_context->skb);
+ struct vnt_tx_buffer *tx_buffer = tx_context->tx_buffer;
+ union vnt_tx_head *tx_head = &tx_buffer->tx_head;
+ struct vnt_rrv_time_ab *buf = &tx_head->tx_ab.ab;
+ union vnt_tx_data_head *head = &tx_head->tx_ab.tx.head;
+
+ buf->rrv_time = vnt_rxtx_rsvtime_le16(tx_context);
+
+ if (info->control.hw_key) {
+ if (vnt_fill_txkey(tx_buffer, tx_context->skb))
+ head = &tx_head->tx_ab.tx.mic.head;
+ }
+
+ if (info->control.use_rts) {
+ buf->rts_rrv_time = vnt_get_rts_duration(tx_context);
+
+ vnt_rxtx_rts_ab_head(tx_context, &head->rts_ab);
+
+ return;
+ }
+
+ vnt_rxtx_datahead_ab(tx_context, &head->data_head_ab);
+}
+
+static void vnt_generate_tx_parameter(struct vnt_usb_send_context *tx_context)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_context->skb);
+
+ if (info->control.use_cts_prot) {
+ if (info->control.use_rts) {
+ vnt_rxtx_rts(tx_context);
+
+ return;
+ }
+
+ vnt_rxtx_cts(tx_context);
+
+ return;
+ }
+
+ vnt_rxtx_ab(tx_context);
+}
+
+static u16 vnt_get_hdr_size(struct ieee80211_tx_info *info)
+{
+ u16 size = sizeof(struct vnt_tx_datahead_ab);
+
+ if (info->control.use_cts_prot) {
+ if (info->control.use_rts)
+ size = sizeof(struct vnt_rts_g);
+ else
+ size = sizeof(struct vnt_cts);
+ } else if (info->control.use_rts) {
+ size = sizeof(struct vnt_rts_ab);
+ }
+
+ if (info->control.hw_key) {
+ if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ size += sizeof(struct vnt_mic_hdr);
+ }
+
+ /* Get rrv_time header */
+ if (info->control.use_cts_prot) {
+ if (info->control.use_rts)
+ size += sizeof(struct vnt_rrv_time_rts);
+ else
+ size += sizeof(struct vnt_rrv_time_cts);
+ } else {
+ size += sizeof(struct vnt_rrv_time_ab);
+ }
+
+ size += sizeof(struct vnt_tx_fifo_head);
+
+ return size;
}
int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
@@ -540,30 +503,18 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *tx_rate = &info->control.rates[0];
struct ieee80211_rate *rate;
- struct ieee80211_key_conf *tx_key;
struct ieee80211_hdr *hdr;
- struct vnt_mic_hdr *mic_hdr = NULL;
struct vnt_tx_buffer *tx_buffer;
struct vnt_tx_fifo_head *tx_buffer_head;
struct vnt_usb_send_context *tx_context;
unsigned long flags;
- u16 tx_bytes, tx_header_size, tx_body_size, current_rate, duration_id;
u8 pkt_type;
- bool need_rts = false;
- bool need_mic = false;
hdr = (struct ieee80211_hdr *)(skb->data);
rate = ieee80211_get_tx_rate(priv->hw, info);
- current_rate = rate->hw_value;
- if (priv->current_rate != current_rate &&
- !(priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
- priv->current_rate = current_rate;
- vnt_schedule_command(priv, WLAN_CMD_SETPOWER);
- }
-
- if (current_rate > RATE_11M) {
+ if (rate->hw_value > RATE_11M) {
if (info->band == NL80211_BAND_5GHZ) {
pkt_type = PK_TYPE_11A;
} else {
@@ -589,17 +540,23 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
return -ENOMEM;
}
- tx_context->skb = skb;
tx_context->pkt_type = pkt_type;
- tx_context->need_ack = false;
tx_context->frame_len = skb->len + 4;
- tx_context->tx_rate = current_rate;
+ tx_context->tx_rate = rate->hw_value;
spin_unlock_irqrestore(&priv->lock, flags);
- tx_buffer = (struct vnt_tx_buffer *)tx_context->data;
+ tx_context->skb = skb_clone(skb, GFP_ATOMIC);
+ if (!tx_context->skb) {
+ tx_context->in_use = false;
+ return -ENOMEM;
+ }
+
+ tx_buffer = skb_push(skb, vnt_get_hdr_size(info));
+ tx_context->tx_buffer = tx_buffer;
tx_buffer_head = &tx_buffer->fifo_head;
- tx_body_size = skb->len;
+
+ tx_context->type = CONTEXT_DATA_PACKET;
/*Set fifo controls */
if (pkt_type == PK_TYPE_11A)
@@ -623,92 +580,43 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
cpu_to_le16(DEFAULT_MSDU_LIFETIME_RES_64us);
}
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_NEEDACK);
- tx_context->need_ack = true;
- }
if (ieee80211_has_retry(hdr->frame_control))
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LRETRY);
- if (tx_rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- priv->preamble_type = PREAMBLE_SHORT;
- else
- priv->preamble_type = PREAMBLE_LONG;
-
- if (tx_rate->flags & IEEE80211_TX_RC_USE_RTS_CTS) {
- need_rts = true;
+ if (info->control.use_rts)
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_RTS);
- }
if (ieee80211_has_a4(hdr->frame_control))
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LHEAD);
tx_buffer_head->frag_ctl =
- cpu_to_le16(ieee80211_get_hdrlen_from_skb(skb) << 10);
+ cpu_to_le16(ieee80211_hdrlen(hdr->frame_control) << 10);
- if (info->control.hw_key) {
- tx_key = info->control.hw_key;
- switch (info->control.hw_key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_LEGACY);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_TKIP);
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_AES);
- need_mic = true;
- default:
- break;
- }
- tx_context->frame_len += tx_key->icv_len;
- }
+ if (info->control.hw_key)
+ tx_context->frame_len += info->control.hw_key->icv_len;
- tx_buffer_head->current_rate = cpu_to_le16(current_rate);
+ tx_buffer_head->current_rate = cpu_to_le16(rate->hw_value);
- duration_id = vnt_generate_tx_parameter(tx_context, tx_buffer, &mic_hdr,
- need_mic, need_rts);
-
- tx_header_size = tx_context->tx_hdr_size;
- if (!tx_header_size) {
- tx_context->in_use = false;
- return -ENOMEM;
- }
+ vnt_generate_tx_parameter(tx_context);
tx_buffer_head->frag_ctl |= cpu_to_le16(FRAGCTL_NONFRAG);
- tx_bytes = tx_header_size + tx_body_size;
-
- memcpy(tx_context->hdr, skb->data, tx_body_size);
-
- if (info->control.hw_key) {
- tx_key = info->control.hw_key;
- if (tx_key->keylen > 0)
- vnt_fill_txkey(tx_context, tx_buffer_head->tx_key,
- tx_key, skb, tx_body_size, mic_hdr);
- }
-
priv->seq_counter = (le16_to_cpu(hdr->seq_ctrl) &
IEEE80211_SCTL_SEQ) >> 4;
- tx_buffer->tx_byte_count = cpu_to_le16(tx_bytes);
- tx_buffer->pkt_no = tx_context->pkt_no;
- tx_buffer->type = 0x00;
-
- tx_bytes += 4;
-
- tx_context->type = CONTEXT_DATA_PACKET;
- tx_context->buf_len = tx_bytes;
-
spin_lock_irqsave(&priv->lock, flags);
- if (vnt_tx_context(priv, tx_context)) {
+ if (vnt_tx_context(priv, tx_context, skb)) {
+ dev_kfree_skb(tx_context->skb);
spin_unlock_irqrestore(&priv->lock, flags);
return -EIO;
}
+ dev_kfree_skb(skb);
+
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
@@ -716,14 +624,13 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
{
- struct vnt_beacon_buffer *beacon_buffer;
struct vnt_tx_short_buf_head *short_head;
struct ieee80211_tx_info *info;
struct vnt_usb_send_context *context;
struct ieee80211_mgmt *mgmt_hdr;
unsigned long flags;
u32 frame_size = skb->len + 4;
- u16 current_rate, count;
+ u16 current_rate;
spin_lock_irqsave(&priv->lock, flags);
@@ -738,8 +645,8 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->lock, flags);
- beacon_buffer = (struct vnt_beacon_buffer *)&context->data[0];
- short_head = &beacon_buffer->short_head;
+ mgmt_hdr = (struct ieee80211_mgmt *)skb->data;
+ short_head = skb_push(skb, sizeof(*short_head));
if (priv->bb_type == BB_TYPE_11A) {
current_rate = RATE_6M;
@@ -764,10 +671,6 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
vnt_time_stamp_off(priv, current_rate);
}
- /* Generate Beacon Header */
- mgmt_hdr = &beacon_buffer->mgmt_hdr;
- memcpy(mgmt_hdr, skb->data, skb->len);
-
/* Get Duration */
short_head->duration = mgmt_hdr->duration;
@@ -786,18 +689,11 @@ static int vnt_beacon_xmit(struct vnt_private *priv, struct sk_buff *skb)
if (priv->seq_counter > 0x0fff)
priv->seq_counter = 0;
- count = sizeof(struct vnt_tx_short_buf_head) + skb->len;
-
- beacon_buffer->tx_byte_count = cpu_to_le16(count);
- beacon_buffer->pkt_no = context->pkt_no;
- beacon_buffer->type = 0x01;
-
context->type = CONTEXT_BEACON_PACKET;
- context->buf_len = count + 4; /* USB header */
spin_lock_irqsave(&priv->lock, flags);
- if (vnt_tx_context(priv, context))
+ if (vnt_tx_context(priv, context, skb))
ieee80211_free_txskb(priv->hw, context->skb);
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index 0e6226af7d41..6ca2ca32d036 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -23,6 +23,13 @@
#define DEFAULT_MGN_LIFETIME_RES_64us 125 /* 64us */
#define DEFAULT_MSDU_LIFETIME_RES_64us 8000
+/* Length, Service, and Signal fields of Phy for Tx */
+struct vnt_phy_field {
+ u8 signal;
+ u8 service;
+ __le16 len;
+} __packed;
+
/* MIC HDR data header */
struct vnt_mic_hdr {
u8 id;
@@ -70,14 +77,12 @@ struct vnt_tx_datahead_g {
__le16 duration_a;
__le16 time_stamp_off_b;
__le16 time_stamp_off_a;
- struct ieee80211_hdr hdr;
} __packed;
struct vnt_tx_datahead_ab {
struct vnt_phy_field ab;
__le16 duration;
__le16 time_stamp_off;
- struct ieee80211_hdr hdr;
} __packed;
/* RTS buffer header */
@@ -155,9 +160,6 @@ struct vnt_tx_fifo_head {
} __packed;
struct vnt_tx_buffer {
- u8 type;
- u8 pkt_no;
- __le16 tx_byte_count;
struct vnt_tx_fifo_head fifo_head;
union vnt_tx_head tx_head;
} __packed;
@@ -170,14 +172,6 @@ struct vnt_tx_short_buf_head {
__le16 time_stamp_off;
} __packed;
-struct vnt_beacon_buffer {
- u8 type;
- u8 pkt_no;
- __le16 tx_byte_count;
- struct vnt_tx_short_buf_head short_head;
- struct ieee80211_mgmt mgmt_hdr;
-} __packed;
-
int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb);
int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif);
int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 91b62c3dff7b..82b774be6485 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -77,7 +77,7 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 data)
}
int vnt_control_out_blocks(struct vnt_private *priv,
- u16 block, u8 reg, u16 length, u8 *data)
+ u16 block, u8 reg, u16 length, const u8 *data)
{
int ret = 0, i;
@@ -196,32 +196,18 @@ static void vnt_int_process_data(struct vnt_private *priv)
if (int_data->tsr3 & TSR_VALID)
vnt_int_report_rate(priv, int_data->pkt3, int_data->tsr3);
- if (int_data->isr0 != 0) {
- if (int_data->isr0 & ISR_BNTX &&
- priv->op_mode == NL80211_IFTYPE_AP)
- vnt_schedule_command(priv, WLAN_CMD_BECON_SEND);
-
- if (int_data->isr0 & ISR_TBTT &&
- priv->hw->conf.flags & IEEE80211_CONF_PS) {
- if (!priv->wake_up_count)
- priv->wake_up_count =
- priv->hw->conf.listen_interval;
+ if (!int_data->isr0)
+ return;
- if (priv->wake_up_count)
- --priv->wake_up_count;
+ if (int_data->isr0 & ISR_BNTX && priv->op_mode == NL80211_IFTYPE_AP)
+ vnt_schedule_command(priv, WLAN_CMD_BECON_SEND);
- /* Turn on wake up to listen next beacon */
- if (priv->wake_up_count == 1)
- vnt_schedule_command(priv,
- WLAN_CMD_TBTT_WAKEUP);
- }
- priv->current_tsf = le64_to_cpu(int_data->tsf);
+ priv->current_tsf = le64_to_cpu(int_data->tsf);
- low_stats->dot11RTSSuccessCount += int_data->rts_success;
- low_stats->dot11RTSFailureCount += int_data->rts_fail;
- low_stats->dot11ACKFailureCount += int_data->ack_fail;
- low_stats->dot11FCSErrorCount += int_data->fcs_err;
- }
+ low_stats->dot11RTSSuccessCount += int_data->rts_success;
+ low_stats->dot11RTSFailureCount += int_data->rts_fail;
+ low_stats->dot11ACKFailureCount += int_data->ack_fail;
+ low_stats->dot11FCSErrorCount += int_data->fcs_err;
}
static void vnt_start_interrupt_urb_complete(struct urb *urb)
@@ -442,7 +428,8 @@ static void vnt_tx_context_complete(struct urb *urb)
switch (urb->status) {
case 0:
- dev_dbg(&priv->usb->dev, "Write %d bytes\n", context->buf_len);
+ dev_dbg(&priv->usb->dev,
+ "Write %d bytes\n", urb->actual_length);
break;
case -ECONNRESET:
case -ENOENT:
@@ -467,30 +454,53 @@ static void vnt_tx_context_complete(struct urb *urb)
}
int vnt_tx_context(struct vnt_private *priv,
- struct vnt_usb_send_context *context)
+ struct vnt_usb_send_context *context,
+ struct sk_buff *skb)
{
+ struct vnt_tx_usb_header *usb;
+ struct urb *urb;
int status;
- struct urb *urb = context->urb;
+ u16 count = skb->len;
+
+ usb = skb_push(skb, sizeof(*usb));
+ usb->tx_byte_count = cpu_to_le16(count);
+ usb->pkt_no = context->pkt_no;
+ usb->type = context->type;
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) {
context->in_use = false;
return -ENODEV;
}
+ if (skb->len > MAX_TOTAL_SIZE_WITH_ALL_HEADERS) {
+ context->in_use = false;
+ return -E2BIG;
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ context->in_use = false;
+ return -ENOMEM;
+ }
+
usb_fill_bulk_urb(urb,
priv->usb,
usb_sndbulkpipe(priv->usb, 3),
- context->data,
- context->buf_len,
+ skb->data,
+ skb->len,
vnt_tx_context_complete,
context);
+ usb_anchor_urb(urb, &priv->tx_submitted);
+
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
dev_dbg(&priv->usb->dev, "Submit Tx URB failed %d\n", status);
-
+ usb_unanchor_urb(urb);
context->in_use = false;
}
+ usb_free_urb(urb);
+
return status;
}
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index 35697b58d748..52c2a928c9c1 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -41,6 +41,12 @@ struct vnt_interrupt_data {
u8 sw[2];
} __packed;
+struct vnt_tx_usb_header {
+ u8 type;
+ u8 pkt_no;
+ __le16 tx_byte_count;
+} __packed;
+
#define VNT_REG_BLOCK_SIZE 64
int vnt_control_out(struct vnt_private *priv, u8 request, u16 value,
@@ -52,11 +58,12 @@ int vnt_control_out_u8(struct vnt_private *priv, u8 reg, u8 ref_off, u8 data);
int vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data);
int vnt_control_out_blocks(struct vnt_private *priv,
- u16 block, u8 reg, u16 len, u8 *data);
+ u16 block, u8 reg, u16 len, const u8 *data);
int vnt_start_interrupt_urb(struct vnt_private *priv);
int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb);
int vnt_tx_context(struct vnt_private *priv,
- struct vnt_usb_send_context *context);
+ struct vnt_usb_send_context *context,
+ struct sk_buff *skb);
#endif /* __USBPIPE_H__ */
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 2c5250ca2801..0ccc87da394e 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -122,8 +122,7 @@ void vnt_run_command(struct work_struct *work)
case WLAN_CMD_SETPOWER_START:
- vnt_rf_setpower(priv, priv->current_rate,
- priv->hw->conf.chandef.chan->hw_value);
+ vnt_rf_setpower(priv, priv->hw->conf.chandef.chan);
break;
diff --git a/drivers/staging/wfx/Makefile b/drivers/staging/wfx/Makefile
index 0d9c1ed092f6..0e0cc982ceab 100644
--- a/drivers/staging/wfx/Makefile
+++ b/drivers/staging/wfx/Makefile
@@ -7,6 +7,7 @@ wfx-y := \
bh.o \
hwio.o \
fwio.o \
+ hif_tx_mib.o \
hif_tx.o \
hif_rx.o \
queue.o \
diff --git a/drivers/staging/wfx/TODO b/drivers/staging/wfx/TODO
index efcb7c6a5aa7..42bf36d43970 100644
--- a/drivers/staging/wfx/TODO
+++ b/drivers/staging/wfx/TODO
@@ -1,60 +1,25 @@
This is a list of things that need to be done to get this driver out of the
staging directory.
- - All structures defined in hif_api_*.h are intended to sent/received to/from
- hardware. All their members whould be declared __le32 or __le16.
- See:
- https://lore.kernel.org/lkml/20191111202852.GX26530@ZenIV.linux.org.uk
+ - The HIF API is not yet clean enough.
- - Once previous item done, it will be possible to audit the driver with
- `sparse'. It will probably find tons of problems with big endian
- architectures.
-
- - hif_api_*.h whave been imported from firmware code. Some of the structures
- are never used in driver.
-
- - Driver try to maintains power save status of the stations. However, this
- work is already done by mac80211. sta_asleep_mask and pspoll_mask should be
- dropped.
-
- - wfx_tx_queues_get() should be reworked. It currently try compute itself the
- QoS policy. However, firmware already do the job. Firmware would prefer to
- have a few packets in each queue and be able to choose itself which queue to
- use.
+ - The code that check the corectness of received message (in rx_helper()) can
+ be improved. See:
+ https://lore.kernel.org/driverdev-devel/2302785.6C7ODC2LYm@pc-42/
- As suggested by Felix, rate control could be improved following this idea:
https://lore.kernel.org/lkml/3099559.gv3Q75KnN1@pc-42/
- - When driver is about to loose BSS, it forge its own Null Func request (see
- wfx_cqm_bssloss_sm()). It should use mechanism provided by mac80211.
-
- - AP is actually is setup after a call to wfx_bss_info_changed(). Yet,
- ieee80211_ops provide callback start_ap().
-
- - The current process for joining a network is incredibly complex. Should be
- reworked.
-
- - Monitoring mode is not implemented despite being mandatory by mac80211.
-
- - "compatible" value are not correct. They should be "vendor,chip". See:
- https://lore.kernel.org/driverdev-devel/5226570.CMH5hVlZcI@pc-42
-
- - The "state" field from wfx_vif should be replaced by "vif->type".
-
- - It seems that wfx_upload_keys() is useless.
-
- - "event_queue" from wfx_vif seems overkill. These event are rare and they
- probably could be handled in a simpler fashion.
-
- Feature called "secure link" should be either developed (using kernel
crypto API) or dropped.
+ - The device allows to filter multicast traffic. The code to support these
+ filters exists in the driver but it is disabled because it has never been
+ tested.
+
- In wfx_cmd_send(), "async" allow to send command without waiting the reply.
It may help in some situation, but it is not yet used. In add, it may cause
some trouble:
https://lore.kernel.org/driverdev-devel/alpine.DEB.2.21.1910041317381.2992@hadrien/
So, fix it (by replacing the mutex with a semaphore) or drop it.
- - Chip support P2P, but driver does not implement it.
-
- - Chip support kind of Mesh, but driver does not implement it.
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
index 9fcab00a3733..1cbaf8bb4fa3 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/staging/wfx/bh.c
@@ -70,7 +70,7 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
if (wfx_data_read(wdev, skb->data, alloc_len))
goto err;
- piggyback = le16_to_cpup((u16 *)(skb->data + alloc_len - 2));
+ piggyback = le16_to_cpup((__le16 *)(skb->data + alloc_len - 2));
_trace_piggyback(piggyback, false);
hif = (struct hif_msg *)skb->data;
@@ -84,13 +84,12 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
// piggyback is probably correct.
return piggyback;
}
- le16_to_cpus(&hif->len);
- computed_len = round_up(hif->len - sizeof(hif->len), 16)
- + sizeof(struct hif_sl_msg)
- + sizeof(struct hif_sl_tag);
+ computed_len =
+ round_up(le16_to_cpu(hif->len) - sizeof(hif->len), 16) +
+ sizeof(struct hif_sl_msg) +
+ sizeof(struct hif_sl_tag);
} else {
- le16_to_cpus(&hif->len);
- computed_len = round_up(hif->len, 2);
+ computed_len = round_up(le16_to_cpu(hif->len), 2);
}
if (computed_len != read_len) {
dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n",
@@ -103,13 +102,11 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
if (!(hif->id & HIF_ID_IS_INDICATION)) {
(*is_cnf)++;
if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT)
- release_count = le32_to_cpu(((struct hif_cnf_multi_transmit *)hif->body)->num_tx_confs);
+ release_count = ((struct hif_cnf_multi_transmit *)hif->body)->num_tx_confs;
else
release_count = 1;
WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter");
wdev->hif.tx_buffers_used -= release_count;
- if (!wdev->hif.tx_buffers_used)
- wake_up(&wdev->hif.tx_buffers_empty);
}
_trace_hif_recv(hif, wdev->hif.tx_buffers_used);
@@ -120,9 +117,11 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1);
}
- skb_put(skb, hif->len);
+ skb_put(skb, le16_to_cpu(hif->len));
// wfx_handle_rx takes care on SKB livetime
wfx_handle_rx(wdev, skb);
+ if (!wdev->hif.tx_buffers_used)
+ wake_up(&wdev->hif.tx_buffers_empty);
return piggyback;
@@ -307,6 +306,35 @@ void wfx_bh_request_tx(struct wfx_dev *wdev)
queue_work(system_highpri_wq, &wdev->hif.bh);
}
+/*
+ * If IRQ is not available, this function allow to manually poll the control
+ * register and simulate an IRQ ahen an event happened.
+ *
+ * Note that the device has a bug: If an IRQ raise while host read control
+ * register, the IRQ is lost. So, use this function carefully (only duing
+ * device initialisation).
+ */
+void wfx_bh_poll_irq(struct wfx_dev *wdev)
+{
+ ktime_t now, start;
+ u32 reg;
+
+ WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ");
+ start = ktime_get();
+ for (;;) {
+ control_reg_read(wdev, &reg);
+ now = ktime_get();
+ if (reg & 0xFFF)
+ break;
+ if (ktime_after(now, ktime_add_ms(start, 1000))) {
+ dev_err(wdev->dev, "time out while polling control register\n");
+ return;
+ }
+ udelay(200);
+ }
+ wfx_bh_request_rx(wdev);
+}
+
void wfx_bh_register(struct wfx_dev *wdev)
{
INIT_WORK(&wdev->hif.bh, bh_work);
diff --git a/drivers/staging/wfx/bh.h b/drivers/staging/wfx/bh.h
index 93ca98424e0b..4b73437869e1 100644
--- a/drivers/staging/wfx/bh.h
+++ b/drivers/staging/wfx/bh.h
@@ -28,5 +28,6 @@ void wfx_bh_register(struct wfx_dev *wdev);
void wfx_bh_unregister(struct wfx_dev *wdev);
void wfx_bh_request_rx(struct wfx_dev *wdev);
void wfx_bh_request_tx(struct wfx_dev *wdev);
+void wfx_bh_poll_irq(struct wfx_dev *wdev);
#endif /* WFX_BH_H */
diff --git a/drivers/staging/wfx/bus.h b/drivers/staging/wfx/bus.h
index 62d6ecabe4cb..0370b6c59863 100644
--- a/drivers/staging/wfx/bus.h
+++ b/drivers/staging/wfx/bus.h
@@ -25,6 +25,8 @@ struct hwbus_ops {
void *dst, size_t count);
int (*copy_to_io)(void *bus_priv, unsigned int addr,
const void *src, size_t count);
+ int (*irq_subscribe)(void *bus_priv);
+ int (*irq_unsubscribe)(void *bus_priv);
void (*lock)(void *bus_priv);
void (*unlock)(void *bus_priv);
size_t (*align_size)(void *bus_priv, size_t size);
diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c
index dedc3ff58d3e..496bfc8bbacc 100644
--- a/drivers/staging/wfx/bus_sdio.c
+++ b/drivers/staging/wfx/bus_sdio.c
@@ -6,10 +6,12 @@
* Copyright (c) 2010, ST-Ericsson
*/
#include <linux/module.h>
+#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
+#include <linux/irq.h>
#include "bus.h"
#include "wfx.h"
@@ -91,53 +93,58 @@ static void wfx_sdio_irq_handler(struct sdio_func *func)
{
struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
- if (bus->core)
- wfx_bh_request_rx(bus->core);
- else
- WARN(!bus->core, "race condition in driver init/deinit");
+ wfx_bh_request_rx(bus->core);
}
static irqreturn_t wfx_sdio_irq_handler_ext(int irq, void *priv)
{
struct wfx_sdio_priv *bus = priv;
- if (!bus->core) {
- WARN(!bus->core, "race condition in driver init/deinit");
- return IRQ_NONE;
- }
sdio_claim_host(bus->func);
wfx_bh_request_rx(bus->core);
sdio_release_host(bus->func);
return IRQ_HANDLED;
}
-static int wfx_sdio_irq_subscribe(struct wfx_sdio_priv *bus)
+static int wfx_sdio_irq_subscribe(void *priv)
{
+ struct wfx_sdio_priv *bus = priv;
+ u32 flags;
int ret;
+ u8 cccr;
- if (bus->of_irq) {
- ret = request_irq(bus->of_irq, wfx_sdio_irq_handler_ext,
- IRQF_TRIGGER_RISING, "wfx", bus);
- } else {
+ if (!bus->of_irq) {
sdio_claim_host(bus->func);
ret = sdio_claim_irq(bus->func, wfx_sdio_irq_handler);
sdio_release_host(bus->func);
+ return ret;
}
- return ret;
+
+ sdio_claim_host(bus->func);
+ cccr = sdio_f0_readb(bus->func, SDIO_CCCR_IENx, NULL);
+ cccr |= BIT(0);
+ cccr |= BIT(bus->func->num);
+ sdio_f0_writeb(bus->func, cccr, SDIO_CCCR_IENx, NULL);
+ sdio_release_host(bus->func);
+ flags = irq_get_trigger_type(bus->of_irq);
+ if (!flags)
+ flags = IRQF_TRIGGER_HIGH;
+ flags |= IRQF_ONESHOT;
+ return devm_request_threaded_irq(&bus->func->dev, bus->of_irq, NULL,
+ wfx_sdio_irq_handler_ext, flags,
+ "wfx", bus);
}
-static int wfx_sdio_irq_unsubscribe(struct wfx_sdio_priv *bus)
+static int wfx_sdio_irq_unsubscribe(void *priv)
{
+ struct wfx_sdio_priv *bus = priv;
int ret;
- if (bus->of_irq) {
- free_irq(bus->of_irq, bus);
- ret = 0;
- } else {
- sdio_claim_host(bus->func);
- ret = sdio_release_irq(bus->func);
- sdio_release_host(bus->func);
- }
+ if (bus->of_irq)
+ devm_free_irq(&bus->func->dev, bus->of_irq, bus);
+ sdio_claim_host(bus->func);
+ ret = sdio_release_irq(bus->func);
+ sdio_release_host(bus->func);
return ret;
}
@@ -151,12 +158,20 @@ static size_t wfx_sdio_align_size(void *priv, size_t size)
static const struct hwbus_ops wfx_sdio_hwbus_ops = {
.copy_from_io = wfx_sdio_copy_from_io,
.copy_to_io = wfx_sdio_copy_to_io,
+ .irq_subscribe = wfx_sdio_irq_subscribe,
+ .irq_unsubscribe = wfx_sdio_irq_unsubscribe,
.lock = wfx_sdio_lock,
.unlock = wfx_sdio_unlock,
.align_size = wfx_sdio_align_size,
};
-static const struct of_device_id wfx_sdio_of_match[];
+static const struct of_device_id wfx_sdio_of_match[] = {
+ { .compatible = "silabs,wfx-sdio" },
+ { .compatible = "silabs,wf200" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
+
static int wfx_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -165,7 +180,8 @@ static int wfx_sdio_probe(struct sdio_func *func,
int ret;
if (func->num != 1) {
- dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n", func->num);
+ dev_err(&func->dev, "SDIO function number is %d while it should always be 1 (unsupported chip?)\n",
+ func->num);
return -ENODEV;
}
@@ -207,18 +223,12 @@ static int wfx_sdio_probe(struct sdio_func *func,
goto err1;
}
- ret = wfx_sdio_irq_subscribe(bus);
- if (ret)
- goto err1;
-
ret = wfx_probe(bus->core);
if (ret)
- goto err2;
+ goto err1;
return 0;
-err2:
- wfx_sdio_irq_unsubscribe(bus);
err1:
sdio_claim_host(func);
sdio_disable_func(func);
@@ -232,7 +242,6 @@ static void wfx_sdio_remove(struct sdio_func *func)
struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
wfx_release(bus->core);
- wfx_sdio_irq_unsubscribe(bus);
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
@@ -248,15 +257,6 @@ static const struct sdio_device_id wfx_sdio_ids[] = {
};
MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids);
-#ifdef CONFIG_OF
-static const struct of_device_id wfx_sdio_of_match[] = {
- { .compatible = "silabs,wfx-sdio" },
- { .compatible = "silabs,wf200" },
- { },
-};
-MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
-#endif
-
struct sdio_driver wfx_sdio_driver = {
.name = "wfx-sdio",
.id_table = wfx_sdio_ids,
@@ -264,6 +264,6 @@ struct sdio_driver wfx_sdio_driver = {
.remove = wfx_sdio_remove,
.drv = {
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(wfx_sdio_of_match),
+ .of_match_table = wfx_sdio_of_match,
}
};
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
index 61e99b09decb..e8da61fb096b 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/staging/wfx/bus_spi.c
@@ -12,6 +12,7 @@
#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include "bus.h"
@@ -39,7 +40,6 @@ struct wfx_spi_priv {
struct spi_device *func;
struct wfx_dev *core;
struct gpio_desc *gpio_reset;
- struct work_struct request_rx;
bool need_swab;
};
@@ -140,25 +140,30 @@ static irqreturn_t wfx_spi_irq_handler(int irq, void *priv)
{
struct wfx_spi_priv *bus = priv;
- if (!bus->core) {
- WARN(!bus->core, "race condition in driver init/deinit");
- return IRQ_NONE;
- }
- queue_work(system_highpri_wq, &bus->request_rx);
+ wfx_bh_request_rx(bus->core);
return IRQ_HANDLED;
}
-static void wfx_spi_request_rx(struct work_struct *work)
+static int wfx_spi_irq_subscribe(void *priv)
{
- struct wfx_spi_priv *bus =
- container_of(work, struct wfx_spi_priv, request_rx);
-
- wfx_bh_request_rx(bus->core);
+ struct wfx_spi_priv *bus = priv;
+ u32 flags;
+
+ flags = irq_get_trigger_type(bus->func->irq);
+ if (!flags)
+ flags = IRQF_TRIGGER_HIGH;
+ flags |= IRQF_ONESHOT;
+ return devm_request_threaded_irq(&bus->func->dev, bus->func->irq, NULL,
+ wfx_spi_irq_handler, IRQF_ONESHOT,
+ "wfx", bus);
}
-static void wfx_flush_irq_work(void *w)
+static int wfx_spi_irq_unsubscribe(void *priv)
{
- flush_work(w);
+ struct wfx_spi_priv *bus = priv;
+
+ devm_free_irq(&bus->func->dev, bus->func->irq, bus);
+ return 0;
}
static size_t wfx_spi_align_size(void *priv, size_t size)
@@ -170,6 +175,8 @@ static size_t wfx_spi_align_size(void *priv, size_t size)
static const struct hwbus_ops wfx_spi_hwbus_ops = {
.copy_from_io = wfx_spi_copy_from_io,
.copy_to_io = wfx_spi_copy_to_io,
+ .irq_subscribe = wfx_spi_irq_subscribe,
+ .irq_unsubscribe = wfx_spi_irq_unsubscribe,
.lock = wfx_spi_lock,
.unlock = wfx_spi_unlock,
.align_size = wfx_spi_align_size,
@@ -216,22 +223,11 @@ static int wfx_spi_probe(struct spi_device *func)
usleep_range(2000, 2500);
}
- INIT_WORK(&bus->request_rx, wfx_spi_request_rx);
bus->core = wfx_init_common(&func->dev, &wfx_spi_pdata,
&wfx_spi_hwbus_ops, bus);
if (!bus->core)
return -EIO;
- ret = devm_add_action_or_reset(&func->dev, wfx_flush_irq_work,
- &bus->request_rx);
- if (ret)
- return ret;
-
- ret = devm_request_irq(&func->dev, func->irq, wfx_spi_irq_handler,
- IRQF_TRIGGER_RISING, "wfx", bus);
- if (ret)
- return ret;
-
return wfx_probe(bus->core);
}
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
index c5b83fedeb55..0e959ebc38b5 100644
--- a/drivers/staging/wfx/data_rx.c
+++ b/drivers/staging/wfx/data_rx.c
@@ -49,7 +49,7 @@ static int wfx_drop_encrypt_data(struct wfx_dev *wdev,
}
/* Firmware strips ICV in case of MIC failure. */
- if (arg->status == HIF_STATUS_MICFAILURE)
+ if (arg->status == HIF_STATUS_RX_FAIL_MIC)
icv_len = 0;
if (skb->len < hdrlen + iv_len + icv_len) {
@@ -79,7 +79,7 @@ void wfx_rx_cb(struct wfx_vif *wvif,
ieee80211_is_beacon(frame->frame_control)))
goto drop;
- if (arg->status == HIF_STATUS_MICFAILURE)
+ if (arg->status == HIF_STATUS_RX_FAIL_MIC)
hdr->flag |= RX_FLAG_MMIC_ERROR;
else if (arg->status)
goto drop;
@@ -118,18 +118,6 @@ void wfx_rx_cb(struct wfx_vif *wvif,
arg->rx_flags.match_uc_addr &&
mgmt->u.action.category == WLAN_CATEGORY_BACK)
goto drop;
- if (ieee80211_is_beacon(frame->frame_control) &&
- !arg->status && wvif->vif &&
- ether_addr_equal(ieee80211_get_SA(frame),
- wvif->vif->bss_conf.bssid)) {
- /* Disable beacon filter once we're associated... */
- if (wvif->disable_beacon_filter &&
- (wvif->vif->bss_conf.assoc ||
- wvif->vif->bss_conf.ibss_joined)) {
- wvif->disable_beacon_filter = false;
- schedule_work(&wvif->update_filtering_work);
- }
- }
ieee80211_rx_irqsafe(wvif->wdev->hw, skb);
return;
diff --git a/drivers/staging/wfx/data_rx.h b/drivers/staging/wfx/data_rx.h
index 61c28bfd2a37..125dbfc1f875 100644
--- a/drivers/staging/wfx/data_rx.h
+++ b/drivers/staging/wfx/data_rx.h
@@ -8,10 +8,9 @@
#ifndef WFX_DATA_RX_H
#define WFX_DATA_RX_H
-#include "hif_api_cmd.h"
-
struct wfx_vif;
struct sk_buff;
+struct hif_ind_rx;
void wfx_rx_cb(struct wfx_vif *wvif,
const struct hif_ind_rx *arg, struct sk_buff *skb);
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
index 42183c70d4df..f042ef36b408 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/staging/wfx/data_tx.c
@@ -6,6 +6,7 @@
* Copyright (c) 2010, ST-Ericsson
*/
#include <net/mac80211.h>
+#include <linux/etherdevice.h>
#include "data_tx.h"
#include "wfx.h"
@@ -16,12 +17,11 @@
#include "traces.h"
#include "hif_tx_mib.h"
-#define WFX_INVALID_RATE_ID 15
-#define WFX_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
-
static int wfx_get_hw_rate(struct wfx_dev *wdev,
const struct ieee80211_tx_rate *rate)
{
+ struct ieee80211_supported_band *band;
+
if (rate->idx < 0)
return -1;
if (rate->flags & IEEE80211_TX_RC_MCS) {
@@ -33,7 +33,8 @@ static int wfx_get_hw_rate(struct wfx_dev *wdev,
}
// WFx only support 2GHz, else band information should be retrieved
// from ieee80211_tx_info
- return wdev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates[rate->idx].hw_value;
+ band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ return band->bitrates[rate->idx].hw_value;
}
/* TX policy cache implementation */
@@ -41,73 +42,13 @@ static int wfx_get_hw_rate(struct wfx_dev *wdev,
static void wfx_tx_policy_build(struct wfx_vif *wvif, struct tx_policy *policy,
struct ieee80211_tx_rate *rates)
{
- int i;
- size_t count;
struct wfx_dev *wdev = wvif->wdev;
+ int i, rateid;
+ u8 count;
WARN(rates[0].idx < 0, "invalid rate policy");
memset(policy, 0, sizeof(*policy));
- for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
- if (rates[i].idx < 0)
- break;
- count = i;
-
- /* HACK!!! Device has problems (at least) switching from
- * 54Mbps CTS to 1Mbps. This switch takes enormous amount
- * of time (100-200 ms), leading to valuable throughput drop.
- * As a workaround, additional g-rates are injected to the
- * policy.
- */
- if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
- rates[0].idx > 4 && rates[0].count > 2 &&
- rates[1].idx < 2) {
- int mid_rate = (rates[0].idx + 4) >> 1;
-
- /* Decrease number of retries for the initial rate */
- rates[0].count -= 2;
-
- if (mid_rate != 4) {
- /* Keep fallback rate at 1Mbps. */
- rates[3] = rates[1];
-
- /* Inject 1 transmission on lowest g-rate */
- rates[2].idx = 4;
- rates[2].count = 1;
- rates[2].flags = rates[1].flags;
-
- /* Inject 1 transmission on mid-rate */
- rates[1].idx = mid_rate;
- rates[1].count = 1;
-
- /* Fallback to 1 Mbps is a really bad thing,
- * so let's try to increase probability of
- * successful transmission on the lowest g rate
- * even more
- */
- if (rates[0].count >= 3) {
- --rates[0].count;
- ++rates[2].count;
- }
-
- /* Adjust amount of rates defined */
- count += 2;
- } else {
- /* Keep fallback rate at 1Mbps. */
- rates[2] = rates[1];
-
- /* Inject 2 transmissions on lowest g-rate */
- rates[1].idx = 4;
- rates[1].count = 2;
-
- /* Adjust amount of rates defined */
- count += 1;
- }
- }
-
for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
- int rateid;
- u8 count;
-
if (rates[i].idx < 0)
break;
WARN_ON(rates[i].count > 15);
@@ -158,8 +99,7 @@ static int wfx_tx_policy_release(struct tx_policy_cache *cache,
}
static int wfx_tx_policy_get(struct wfx_vif *wvif,
- struct ieee80211_tx_rate *rates,
- bool *renew)
+ struct ieee80211_tx_rate *rates, bool *renew)
{
int idx;
struct tx_policy_cache *cache = &wvif->tx_policy_cache;
@@ -171,7 +111,7 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif,
if (list_empty(&cache->free)) {
WARN(1, "unable to get a valid Tx policy");
spin_unlock_bh(&cache->lock);
- return WFX_INVALID_RATE_ID;
+ return HIF_TX_RETRY_POLICY_INVALID;
}
idx = wfx_tx_policy_find(cache, &wanted);
if (idx >= 0) {
@@ -189,10 +129,8 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif,
idx = entry - cache->cache;
}
wfx_tx_policy_use(cache, &cache->cache[idx]);
- if (list_empty(&cache->free)) {
- /* Lock TX queues. */
- wfx_tx_queues_lock(wvif->wdev);
- }
+ if (list_empty(&cache->free))
+ ieee80211_stop_queues(wvif->wdev->hw);
spin_unlock_bh(&cache->lock);
return idx;
}
@@ -202,15 +140,13 @@ static void wfx_tx_policy_put(struct wfx_vif *wvif, int idx)
int usage, locked;
struct tx_policy_cache *cache = &wvif->tx_policy_cache;
- if (idx == WFX_INVALID_RATE_ID)
+ if (idx == HIF_TX_RETRY_POLICY_INVALID)
return;
spin_lock_bh(&cache->lock);
locked = list_empty(&cache->free);
usage = wfx_tx_policy_release(cache, &cache->cache[idx]);
- if (locked && !usage) {
- /* Unlock TX queues. */
- wfx_tx_queues_unlock(wvif->wdev);
- }
+ if (locked && !usage)
+ ieee80211_wake_queues(wvif->wdev->hw);
spin_unlock_bh(&cache->lock);
}
@@ -218,15 +154,17 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif)
{
struct tx_policy *policies = wvif->tx_policy_cache.cache;
u8 tmp_rates[12];
- int i;
+ int i, is_used;
do {
spin_lock_bh(&wvif->tx_policy_cache.lock);
- for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
- if (!policies[i].uploaded &&
- memzcmp(policies[i].rates, sizeof(policies[i].rates)))
+ for (i = 0; i < ARRAY_SIZE(wvif->tx_policy_cache.cache); ++i) {
+ is_used = memzcmp(policies[i].rates,
+ sizeof(policies[i].rates));
+ if (!policies[i].uploaded && is_used)
break;
- if (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES) {
+ }
+ if (i < ARRAY_SIZE(wvif->tx_policy_cache.cache)) {
policies[i].uploaded = true;
memcpy(tmp_rates, policies[i].rates, sizeof(tmp_rates));
spin_unlock_bh(&wvif->tx_policy_cache.lock);
@@ -234,7 +172,7 @@ static int wfx_tx_policy_upload(struct wfx_vif *wvif)
} else {
spin_unlock_bh(&wvif->tx_policy_cache.lock);
}
- } while (i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES);
+ } while (i < ARRAY_SIZE(wvif->tx_policy_cache.cache));
return 0;
}
@@ -244,9 +182,7 @@ void wfx_tx_policy_upload_work(struct work_struct *work)
container_of(work, struct wfx_vif, tx_policy_upload_work);
wfx_tx_policy_upload(wvif);
-
wfx_tx_unlock(wvif->wdev);
- wfx_tx_queues_unlock(wvif->wdev);
}
void wfx_tx_policy_init(struct wfx_vif *wvif)
@@ -260,7 +196,7 @@ void wfx_tx_policy_init(struct wfx_vif *wvif)
INIT_LIST_HEAD(&cache->used);
INIT_LIST_HEAD(&cache->free);
- for (i = 0; i < HIF_MIB_NUM_TX_RATE_RETRY_POLICIES; ++i)
+ for (i = 0; i < ARRAY_SIZE(cache->cache); ++i)
list_add(&cache->cache[i].link, &cache->free);
}
@@ -281,16 +217,11 @@ static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
struct wfx_tx_priv *tx_priv,
struct ieee80211_sta *sta)
{
- u32 mask = ~BIT(tx_priv->raw_link_id);
struct wfx_sta_priv *sta_priv;
int tid = ieee80211_get_tid(hdr);
- spin_lock_bh(&wvif->ps_state_lock);
- if (ieee80211_is_auth(hdr->frame_control))
- wvif->sta_asleep_mask &= mask;
- spin_unlock_bh(&wvif->ps_state_lock);
-
if (sta) {
+ tx_priv->has_sta = true;
sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
spin_lock_bh(&sta_priv->lock);
sta_priv->buffered[tid]++;
@@ -299,9 +230,8 @@ static void wfx_tx_manage_pm(struct wfx_vif *wvif, struct ieee80211_hdr *hdr,
}
}
-static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
- struct ieee80211_sta *sta,
- struct ieee80211_hdr *hdr)
+static u8 wfx_tx_get_link_id(struct wfx_vif *wvif, struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr)
{
struct wfx_sta_priv *sta_priv =
sta ? (struct wfx_sta_priv *)&sta->drv_priv : NULL;
@@ -313,7 +243,7 @@ static u8 wfx_tx_get_raw_link_id(struct wfx_vif *wvif,
return 0;
if (is_multicast_ether_addr(da))
return 0;
- return WFX_LINK_ID_NO_ASSOC;
+ return HIF_LINK_ID_NOT_ASSOCIATED;
}
static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
@@ -358,7 +288,8 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
if (rates[i].idx == -1) {
rates[i].idx = 0;
rates[i].count = 8; // == hw->max_rate_tries
- rates[i].flags = rates[i - 1].flags & IEEE80211_TX_RC_MCS;
+ rates[i].flags = rates[i - 1].flags &
+ IEEE80211_TX_RC_MCS;
break;
}
}
@@ -375,24 +306,19 @@ static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif,
rate_id = wfx_tx_policy_get(wvif,
tx_info->driver_rates, &tx_policy_renew);
- if (rate_id == WFX_INVALID_RATE_ID)
+ if (rate_id == HIF_TX_RETRY_POLICY_INVALID)
dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy");
if (tx_policy_renew) {
- /* FIXME: It's not so optimal to stop TX queues every now and
- * then. Better to reimplement task scheduling with a counter.
- */
wfx_tx_lock(wvif->wdev);
- wfx_tx_queues_lock(wvif->wdev);
- if (!schedule_work(&wvif->tx_policy_upload_work)) {
- wfx_tx_queues_unlock(wvif->wdev);
+ if (!schedule_work(&wvif->tx_policy_upload_work))
wfx_tx_unlock(wvif->wdev);
- }
}
return rate_id;
}
-static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev, struct ieee80211_tx_info *tx_info)
+static struct hif_ht_tx_parameters wfx_tx_get_tx_parms(struct wfx_dev *wdev,
+ struct ieee80211_tx_info *tx_info)
{
struct ieee80211_tx_rate *rate = &tx_info->driver_rates[0];
struct hif_ht_tx_parameters ret = { };
@@ -429,7 +355,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int queue_id = tx_info->hw_queue;
+ int queue_id = skb_get_queue_mapping(skb);
size_t offset = (size_t)skb->data & 3;
int wmsg_len = sizeof(struct hif_msg) +
sizeof(struct hif_req_tx) + offset;
@@ -441,14 +367,8 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
// Fill tx_priv
tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
- tx_priv->raw_link_id = wfx_tx_get_raw_link_id(wvif, sta, hdr);
- tx_priv->link_id = tx_priv->raw_link_id;
if (ieee80211_has_protected(hdr->frame_control))
tx_priv->hw_key = hw_key;
- if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
- tx_priv->link_id = WFX_LINK_ID_AFTER_DTIM;
- if (sta && (sta->uapsd_queues & BIT(queue_id)))
- tx_priv->link_id = WFX_LINK_ID_UAPSD;
// Fill hif_msg
WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
@@ -461,7 +381,8 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
hif_msg->id = HIF_REQ_ID_TX;
hif_msg->interface = wvif->id;
if (skb->len > wvif->wdev->hw_caps.size_inp_ch_buf) {
- dev_warn(wvif->wdev->dev, "requested frame size (%d) is larger than maximum supported (%d)\n",
+ dev_warn(wvif->wdev->dev,
+ "requested frame size (%d) is larger than maximum supported (%d)\n",
skb->len, wvif->wdev->hw_caps.size_inp_ch_buf);
skb_pull(skb, wmsg_len);
return -EIO;
@@ -472,13 +393,14 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
// packet_id just need to be unique on device. 32bits are more than
// necessary for that task, so we tae advantage of it to add some extra
// data for debug.
- req->packet_id = queue_id << 28 |
- IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16 |
- (atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF);
+ req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF;
+ req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16;
+ req->packet_id |= queue_id << 28;
+
req->data_flags.fc_offset = offset;
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
req->data_flags.after_dtim = 1;
- req->queue_id.peer_sta_id = tx_priv->raw_link_id;
+ req->queue_id.peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr);
// Queue index are inverted between firmware and Linux
req->queue_id.queue_id = 3 - queue_id;
req->ht_tx_parameters = wfx_tx_get_tx_parms(wvif->wdev, tx_info);
@@ -486,7 +408,7 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
// Auxiliary operations
wfx_tx_manage_pm(wvif, hdr, tx_priv, sta);
- wfx_tx_queue_put(wvif->wdev, &wvif->wdev->tx_queue[queue_id], skb);
+ wfx_tx_queues_put(wvif->wdev, skb);
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
schedule_work(&wvif->update_tim_work);
wfx_bh_request_tx(wvif->wdev);
@@ -528,28 +450,59 @@ drop:
ieee80211_tx_status_irqsafe(wdev->hw, skb);
}
-void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
+static struct ieee80211_hdr *wfx_skb_hdr80211(struct sk_buff *skb)
{
- int i;
- int tx_count;
- struct sk_buff *skb;
- struct ieee80211_tx_rate *rate;
- struct ieee80211_tx_info *tx_info;
- const struct wfx_tx_priv *tx_priv;
+ struct hif_msg *hif = (struct hif_msg *)skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
- skb = wfx_pending_get(wvif->wdev, arg->packet_id);
- if (!skb) {
- dev_warn(wvif->wdev->dev,
- "received unknown packet_id (%#.8x) from chip\n",
- arg->packet_id);
- return;
+ return (struct ieee80211_hdr *)(req->frame + req->data_flags.fc_offset);
+}
+
+static void wfx_tx_update_sta(struct wfx_vif *wvif, struct ieee80211_hdr *hdr)
+{
+ int tid = ieee80211_get_tid(hdr);
+ struct wfx_sta_priv *sta_priv;
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock(); // protect sta
+ sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
+ if (sta) {
+ sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
+ spin_lock_bh(&sta_priv->lock);
+ WARN(!sta_priv->buffered[tid], "inconsistent notification");
+ sta_priv->buffered[tid]--;
+ if (!sta_priv->buffered[tid])
+ ieee80211_sta_set_buffered(sta, tid, false);
+ spin_unlock_bh(&sta_priv->lock);
+ } else {
+ dev_dbg(wvif->wdev->dev, "%s: sta does not exist anymore\n",
+ __func__);
}
- tx_info = IEEE80211_SKB_CB(skb);
- tx_priv = wfx_skb_tx_priv(skb);
- _trace_tx_stats(arg, skb,
- wfx_pending_get_pkt_us_delay(wvif->wdev, skb));
+ rcu_read_unlock();
+}
+
+static void wfx_skb_dtor(struct wfx_vif *wvif, struct sk_buff *skb)
+{
+ struct hif_msg *hif = (struct hif_msg *)skb->data;
+ struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
+ unsigned int offset = sizeof(struct hif_msg) +
+ sizeof(struct hif_req_tx) +
+ req->data_flags.fc_offset;
+
+ WARN_ON(!wvif);
+ wfx_tx_policy_put(wvif, req->tx_flags.retry_policy_index);
+ skb_pull(skb, offset);
+ ieee80211_tx_status_irqsafe(wvif->wdev->hw, skb);
+}
+
+static void wfx_tx_fill_rates(struct wfx_dev *wdev,
+ struct ieee80211_tx_info *tx_info,
+ const struct hif_cnf_tx *arg)
+{
+ struct ieee80211_tx_rate *rate;
+ int tx_count;
+ int i;
- // You can touch to tx_priv, but don't touch to tx_info->status.
tx_count = arg->ack_failures;
if (!arg->status || arg->ack_failures)
tx_count += 1; // Also report success
@@ -558,16 +511,14 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
if (rate->idx < 0)
break;
if (tx_count < rate->count &&
- arg->status == HIF_STATUS_RETRY_EXCEEDED &&
+ arg->status == HIF_STATUS_TX_FAIL_RETRIES &&
arg->ack_failures)
- dev_dbg(wvif->wdev->dev, "all retries were not consumed: %d != %d\n",
+ dev_dbg(wdev->dev, "all retries were not consumed: %d != %d\n",
rate->count, tx_count);
if (tx_count <= rate->count && tx_count &&
- arg->txed_rate != wfx_get_hw_rate(wvif->wdev, rate))
- dev_dbg(wvif->wdev->dev,
- "inconsistent tx_info rates: %d != %d\n",
- arg->txed_rate,
- wfx_get_hw_rate(wvif->wdev, rate));
+ arg->txed_rate != wfx_get_hw_rate(wdev, rate))
+ dev_dbg(wdev->dev, "inconsistent tx_info rates: %d != %d\n",
+ arg->txed_rate, wfx_get_hw_rate(wdev, rate));
if (tx_count > rate->count) {
tx_count -= rate->count;
} else if (!tx_count) {
@@ -579,8 +530,30 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
}
}
if (tx_count)
- dev_dbg(wvif->wdev->dev,
- "%d more retries than expected\n", tx_count);
+ dev_dbg(wdev->dev, "%d more retries than expected\n", tx_count);
+}
+
+void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
+{
+ struct ieee80211_tx_info *tx_info;
+ const struct wfx_tx_priv *tx_priv;
+ struct sk_buff *skb;
+
+ skb = wfx_pending_get(wvif->wdev, arg->packet_id);
+ if (!skb) {
+ dev_warn(wvif->wdev->dev, "received unknown packet_id (%#.8x) from chip\n",
+ arg->packet_id);
+ return;
+ }
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_priv = wfx_skb_tx_priv(skb);
+ _trace_tx_stats(arg, skb,
+ wfx_pending_get_pkt_us_delay(wvif->wdev, skb));
+
+ // You can touch to tx_priv, but don't touch to tx_info->status.
+ wfx_tx_fill_rates(wvif->wdev, tx_info, arg);
+ if (tx_priv->has_sta)
+ wfx_tx_update_sta(wvif, wfx_skb_hdr80211(skb));
skb_trim(skb, skb->len - wfx_tx_get_icv_len(tx_priv->hw_key));
// From now, you can touch to tx_info->status, but do not touch to
@@ -590,63 +563,64 @@ void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg)
memset(tx_info->pad, 0, sizeof(tx_info->pad));
if (!arg->status) {
- if (wvif->bss_loss_state &&
- arg->packet_id == wvif->bss_loss_confirm_id)
- wfx_cqm_bssloss_sm(wvif, 0, 1, 0);
tx_info->status.tx_time =
- arg->media_delay - arg->tx_queue_delay;
+ le32_to_cpu(arg->media_delay) -
+ le32_to_cpu(arg->tx_queue_delay);
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
tx_info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
else
tx_info->flags |= IEEE80211_TX_STAT_ACK;
- } else if (arg->status == HIF_REQUEUE) {
- WARN(!arg->tx_result_flags.requeue, "incoherent status and result_flags");
+ } else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) {
+ WARN(!arg->tx_result_flags.requeue,
+ "incoherent status and result_flags");
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
wvif->after_dtim_tx_allowed = false; // DTIM period elapsed
schedule_work(&wvif->update_tim_work);
}
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
- } else {
- if (wvif->bss_loss_state &&
- arg->packet_id == wvif->bss_loss_confirm_id)
- wfx_cqm_bssloss_sm(wvif, 0, 0, 1);
}
- wfx_pending_remove(wvif->wdev, skb);
+ wfx_skb_dtor(wvif, skb);
}
-static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb)
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_sta *sta;
- struct wfx_sta_priv *sta_priv;
- int tid = ieee80211_get_tid(hdr);
+ struct wfx_dev *wdev = hw->priv;
+ struct sk_buff_head dropped;
+ struct wfx_queue *queue;
+ struct wfx_vif *wvif;
+ struct hif_msg *hif;
+ struct sk_buff *skb;
+ int vif_id = -1;
+ int i;
- rcu_read_lock(); // protect sta
- sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
- if (sta) {
- sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
- spin_lock_bh(&sta_priv->lock);
- WARN(!sta_priv->buffered[tid], "inconsistent notification");
- sta_priv->buffered[tid]--;
- if (!sta_priv->buffered[tid])
- ieee80211_sta_set_buffered(sta, tid, false);
- spin_unlock_bh(&sta_priv->lock);
+ if (vif)
+ vif_id = ((struct wfx_vif *)vif->drv_priv)->id;
+ skb_queue_head_init(&dropped);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (!(BIT(i) & queues))
+ continue;
+ queue = &wdev->tx_queue[i];
+ if (drop)
+ wfx_tx_queue_drop(wdev, queue, vif_id, &dropped);
+ if (wdev->chip_frozen)
+ continue;
+ if (wait_event_timeout(wdev->tx_dequeue,
+ wfx_tx_queue_empty(wdev, queue, vif_id),
+ msecs_to_jiffies(1000)) <= 0)
+ dev_warn(wdev->dev,
+ "frames queued while flushing tx queues?");
+ }
+ wfx_tx_flush(wdev);
+ if (wdev->chip_frozen)
+ wfx_pending_drop(wdev, &dropped);
+ while ((skb = skb_dequeue(&dropped)) != NULL) {
+ hif = (struct hif_msg *)skb->data;
+ wvif = wdev_to_wvif(wdev, hif->interface);
+ if (wfx_skb_tx_priv(skb)->has_sta)
+ wfx_tx_update_sta(wvif, wfx_skb_hdr80211(skb));
+ ieee80211_tx_info_clear_status(IEEE80211_SKB_CB(skb));
+ wfx_skb_dtor(wvif, skb);
}
- rcu_read_unlock();
}
-void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
-{
- struct hif_msg *hif = (struct hif_msg *)skb->data;
- struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
- struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
- unsigned int offset = sizeof(struct hif_req_tx) +
- sizeof(struct hif_msg) +
- req->data_flags.fc_offset;
-
- WARN_ON(!wvif);
- skb_pull(skb, offset);
- wfx_notify_buffered_tx(wvif, skb);
- wfx_tx_policy_put(wvif, req->tx_flags.retry_policy_index);
- ieee80211_tx_status_irqsafe(wdev->hw, skb);
-}
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
index c545dd75449b..54fff24508fb 100644
--- a/drivers/staging/wfx/data_tx.h
+++ b/drivers/staging/wfx/data_tx.h
@@ -26,7 +26,7 @@ struct tx_policy {
};
struct tx_policy_cache {
- struct tx_policy cache[HIF_MIB_NUM_TX_RATE_RETRY_POLICIES];
+ struct tx_policy cache[HIF_TX_RETRY_POLICY_MAX];
// FIXME: use a trees and drop hash from tx_policy
struct list_head used;
struct list_head free;
@@ -36,8 +36,7 @@ struct tx_policy_cache {
struct wfx_tx_priv {
ktime_t xmit_timestamp;
struct ieee80211_key_conf *hw_key;
- u8 link_id;
- u8 raw_link_id;
+ bool has_sta;
} __packed;
void wfx_tx_policy_init(struct wfx_vif *wvif);
@@ -46,7 +45,8 @@ void wfx_tx_policy_upload_work(struct work_struct *work);
void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
void wfx_tx_confirm_cb(struct wfx_vif *wvif, const struct hif_cnf_tx *arg);
-void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb);
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
static inline struct wfx_tx_priv *wfx_skb_tx_priv(struct sk_buff *skb)
{
diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c
index 1164aba118a1..10d649985696 100644
--- a/drivers/staging/wfx/debug.c
+++ b/drivers/staging/wfx/debug.c
@@ -61,19 +61,26 @@ const char *get_reg_name(unsigned long id)
static int wfx_counters_show(struct seq_file *seq, void *v)
{
- int ret;
+ int ret, i;
struct wfx_dev *wdev = seq->private;
- struct hif_mib_extended_count_table counters;
+ struct hif_mib_extended_count_table counters[3];
+
+ for (i = 0; i < ARRAY_SIZE(counters); i++) {
+ ret = hif_get_counters_table(wdev, i, counters + i);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return -EIO;
+ }
- ret = hif_get_counters_table(wdev, &counters);
- if (ret < 0)
- return ret;
- if (ret > 0)
- return -EIO;
+ seq_printf(seq, "%-24s %12s %12s %12s\n",
+ "", "global", "iface 0", "iface 1");
#define PUT_COUNTER(name) \
- seq_printf(seq, "%24s %d\n", #name ":",\
- le32_to_cpu(counters.count_##name))
+ seq_printf(seq, "%-24s %12d %12d %12d\n", #name, \
+ le32_to_cpu(counters[2].count_##name), \
+ le32_to_cpu(counters[0].count_##name), \
+ le32_to_cpu(counters[1].count_##name))
PUT_COUNTER(tx_packets);
PUT_COUNTER(tx_multicast_frames);
@@ -105,6 +112,12 @@ static int wfx_counters_show(struct seq_file *seq, void *v)
#undef PUT_COUNTER
+ for (i = 0; i < ARRAY_SIZE(counters[0].reserved); i++)
+ seq_printf(seq, "reserved[%02d]%12s %12d %12d %12d\n", i, "",
+ le32_to_cpu(counters[2].reserved[i]),
+ le32_to_cpu(counters[0].reserved[i]),
+ le32_to_cpu(counters[1].reserved[i]));
+
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wfx_counters);
@@ -142,7 +155,7 @@ static int wfx_rx_stats_show(struct seq_file *seq, void *v)
mutex_lock(&wdev->rx_stats_lock);
seq_printf(seq, "Timestamp: %dus\n", st->date);
seq_printf(seq, "Low power clock: frequency %uHz, external %s\n",
- st->pwr_clk_freq,
+ le32_to_cpu(st->pwr_clk_freq),
st->is_ext_pwr_clk ? "yes" : "no");
seq_printf(seq,
"Num. of frames: %d, PER (x10e4): %d, Throughput: %dKbps/s\n",
@@ -152,9 +165,12 @@ static int wfx_rx_stats_show(struct seq_file *seq, void *v)
for (i = 0; i < ARRAY_SIZE(channel_names); i++) {
if (channel_names[i])
seq_printf(seq, "%5s %8d %8d %8d %8d %8d\n",
- channel_names[i], st->nb_rx_by_rate[i],
- st->per[i], st->rssi[i] / 100,
- st->snr[i] / 100, st->cfo[i]);
+ channel_names[i],
+ le32_to_cpu(st->nb_rx_by_rate[i]),
+ le16_to_cpu(st->per[i]),
+ (s16)le16_to_cpu(st->rssi[i]) / 100,
+ (s16)le16_to_cpu(st->snr[i]) / 100,
+ (s16)le16_to_cpu(st->cfo[i]));
}
mutex_unlock(&wdev->rx_stats_lock);
@@ -162,6 +178,30 @@ static int wfx_rx_stats_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(wfx_rx_stats);
+static int wfx_tx_power_loop_show(struct seq_file *seq, void *v)
+{
+ struct wfx_dev *wdev = seq->private;
+ struct hif_tx_power_loop_info *st = &wdev->tx_power_loop_info;
+ int tmp;
+
+ mutex_lock(&wdev->tx_power_loop_info_lock);
+ tmp = le16_to_cpu(st->tx_gain_dig);
+ seq_printf(seq, "Tx gain digital: %d\n", tmp);
+ tmp = le16_to_cpu(st->tx_gain_pa);
+ seq_printf(seq, "Tx gain PA: %d\n", tmp);
+ tmp = (s16)le16_to_cpu(st->target_pout);
+ seq_printf(seq, "Target Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25);
+ tmp = (s16)le16_to_cpu(st->p_estimation);
+ seq_printf(seq, "FEM Pout: %d.%02d dBm\n", tmp / 4, (tmp % 4) * 25);
+ tmp = le16_to_cpu(st->vpdet);
+ seq_printf(seq, "Vpdet: %d mV\n", tmp);
+ seq_printf(seq, "Measure index: %d\n", st->measurement_index);
+ mutex_unlock(&wdev->tx_power_loop_info_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wfx_tx_power_loop);
+
static ssize_t wfx_send_pds_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -234,7 +274,7 @@ static ssize_t wfx_send_hif_msg_write(struct file *file,
request = memdup_user(user_buf, count);
if (IS_ERR(request))
return PTR_ERR(request);
- if (request->len != count) {
+ if (le16_to_cpu(request->len) != count) {
kfree(request);
return -EINVAL;
}
@@ -301,6 +341,8 @@ int wfx_debug_init(struct wfx_dev *wdev)
d = debugfs_create_dir("wfx", wdev->hw->wiphy->debugfsdir);
debugfs_create_file("counters", 0444, d, wdev, &wfx_counters_fops);
debugfs_create_file("rx_stats", 0444, d, wdev, &wfx_rx_stats_fops);
+ debugfs_create_file("tx_power_loop", 0444, d, wdev,
+ &wfx_tx_power_loop_fops);
debugfs_create_file("send_pds", 0200, d, wdev, &wfx_send_pds_fops);
debugfs_create_file("burn_slk_key", 0200, d, wdev,
&wfx_burn_slk_key_fops);
diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c
index 9d61082c1e6c..72bb3d2a9613 100644
--- a/drivers/staging/wfx/fwio.c
+++ b/drivers/staging/wfx/fwio.c
@@ -99,16 +99,16 @@ static int sram_write_dma_safe(struct wfx_dev *wdev, u32 addr, const u8 *buf,
return ret;
}
-int get_firmware(struct wfx_dev *wdev, u32 keyset_chip,
- const struct firmware **fw, int *file_offset)
+static int get_firmware(struct wfx_dev *wdev, u32 keyset_chip,
+ const struct firmware **fw, int *file_offset)
{
int keyset_file;
char filename[256];
const char *data;
int ret;
- snprintf(filename, sizeof(filename), "%s_%02X.sec", wdev->pdata.file_fw,
- keyset_chip);
+ snprintf(filename, sizeof(filename), "%s_%02X.sec",
+ wdev->pdata.file_fw, keyset_chip);
ret = firmware_request_nowarn(fw, filename, wdev->dev);
if (ret) {
dev_info(wdev->dev, "can't load %s, falling back to %s.sec\n",
@@ -325,8 +325,8 @@ static int init_gpr(struct wfx_dev *wdev)
gpr_init[i].value);
if (ret < 0)
return ret;
- dev_dbg(wdev->dev, " index %02x: %08x\n", gpr_init[i].index,
- gpr_init[i].value);
+ dev_dbg(wdev->dev, " index %02x: %08x\n",
+ gpr_init[i].index, gpr_init[i].value);
}
return 0;
}
@@ -360,7 +360,7 @@ int wfx_init_device(struct wfx_dev *wdev)
dev_dbg(wdev->dev, "initial config register value: %08x\n", reg);
hw_revision = FIELD_GET(CFG_DEVICE_ID_MAJOR, reg);
- if (hw_revision == 0 || hw_revision > 2) {
+ if (hw_revision == 0) {
dev_err(wdev->dev, "bad hardware revision number: %d\n",
hw_revision);
return -ENODEV;
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
index 071b71e2a107..21cde19cff75 100644
--- a/drivers/staging/wfx/hif_api_cmd.h
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -10,56 +10,54 @@
#include "hif_api_general.h"
-#define HIF_NUM_AC 4
-
#define HIF_API_SSID_SIZE API_SSID_SIZE
enum hif_requests_ids {
- HIF_REQ_ID_RESET = 0x0a,
- HIF_REQ_ID_READ_MIB = 0x05,
- HIF_REQ_ID_WRITE_MIB = 0x06,
- HIF_REQ_ID_START_SCAN = 0x07,
- HIF_REQ_ID_STOP_SCAN = 0x08,
- HIF_REQ_ID_TX = 0x04,
- HIF_REQ_ID_JOIN = 0x0b,
- HIF_REQ_ID_SET_PM_MODE = 0x10,
- HIF_REQ_ID_SET_BSS_PARAMS = 0x11,
- HIF_REQ_ID_ADD_KEY = 0x0c,
- HIF_REQ_ID_REMOVE_KEY = 0x0d,
- HIF_REQ_ID_EDCA_QUEUE_PARAMS = 0x13,
- HIF_REQ_ID_START = 0x17,
- HIF_REQ_ID_BEACON_TRANSMIT = 0x18,
- HIF_REQ_ID_UPDATE_IE = 0x1b,
- HIF_REQ_ID_MAP_LINK = 0x1c,
+ HIF_REQ_ID_RESET = 0x0a,
+ HIF_REQ_ID_READ_MIB = 0x05,
+ HIF_REQ_ID_WRITE_MIB = 0x06,
+ HIF_REQ_ID_START_SCAN = 0x07,
+ HIF_REQ_ID_STOP_SCAN = 0x08,
+ HIF_REQ_ID_TX = 0x04,
+ HIF_REQ_ID_JOIN = 0x0b,
+ HIF_REQ_ID_SET_PM_MODE = 0x10,
+ HIF_REQ_ID_SET_BSS_PARAMS = 0x11,
+ HIF_REQ_ID_ADD_KEY = 0x0c,
+ HIF_REQ_ID_REMOVE_KEY = 0x0d,
+ HIF_REQ_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_REQ_ID_START = 0x17,
+ HIF_REQ_ID_BEACON_TRANSMIT = 0x18,
+ HIF_REQ_ID_UPDATE_IE = 0x1b,
+ HIF_REQ_ID_MAP_LINK = 0x1c,
};
enum hif_confirmations_ids {
- HIF_CNF_ID_RESET = 0x0a,
- HIF_CNF_ID_READ_MIB = 0x05,
- HIF_CNF_ID_WRITE_MIB = 0x06,
- HIF_CNF_ID_START_SCAN = 0x07,
- HIF_CNF_ID_STOP_SCAN = 0x08,
- HIF_CNF_ID_TX = 0x04,
- HIF_CNF_ID_MULTI_TRANSMIT = 0x1e,
- HIF_CNF_ID_JOIN = 0x0b,
- HIF_CNF_ID_SET_PM_MODE = 0x10,
- HIF_CNF_ID_SET_BSS_PARAMS = 0x11,
- HIF_CNF_ID_ADD_KEY = 0x0c,
- HIF_CNF_ID_REMOVE_KEY = 0x0d,
- HIF_CNF_ID_EDCA_QUEUE_PARAMS = 0x13,
- HIF_CNF_ID_START = 0x17,
- HIF_CNF_ID_BEACON_TRANSMIT = 0x18,
- HIF_CNF_ID_UPDATE_IE = 0x1b,
- HIF_CNF_ID_MAP_LINK = 0x1c,
+ HIF_CNF_ID_RESET = 0x0a,
+ HIF_CNF_ID_READ_MIB = 0x05,
+ HIF_CNF_ID_WRITE_MIB = 0x06,
+ HIF_CNF_ID_START_SCAN = 0x07,
+ HIF_CNF_ID_STOP_SCAN = 0x08,
+ HIF_CNF_ID_TX = 0x04,
+ HIF_CNF_ID_MULTI_TRANSMIT = 0x1e,
+ HIF_CNF_ID_JOIN = 0x0b,
+ HIF_CNF_ID_SET_PM_MODE = 0x10,
+ HIF_CNF_ID_SET_BSS_PARAMS = 0x11,
+ HIF_CNF_ID_ADD_KEY = 0x0c,
+ HIF_CNF_ID_REMOVE_KEY = 0x0d,
+ HIF_CNF_ID_EDCA_QUEUE_PARAMS = 0x13,
+ HIF_CNF_ID_START = 0x17,
+ HIF_CNF_ID_BEACON_TRANSMIT = 0x18,
+ HIF_CNF_ID_UPDATE_IE = 0x1b,
+ HIF_CNF_ID_MAP_LINK = 0x1c,
};
enum hif_indications_ids {
- HIF_IND_ID_RX = 0x84,
- HIF_IND_ID_SCAN_CMPL = 0x86,
- HIF_IND_ID_JOIN_COMPLETE = 0x8f,
- HIF_IND_ID_SET_PM_MODE_CMPL = 0x89,
- HIF_IND_ID_SUSPEND_RESUME_TX = 0x8c,
- HIF_IND_ID_EVENT = 0x85
+ HIF_IND_ID_RX = 0x84,
+ HIF_IND_ID_SCAN_CMPL = 0x86,
+ HIF_IND_ID_JOIN_COMPLETE = 0x8f,
+ HIF_IND_ID_SET_PM_MODE_CMPL = 0x89,
+ HIF_IND_ID_SUSPEND_RESUME_TX = 0x8c,
+ HIF_IND_ID_EVENT = 0x85
};
union hif_commands_ids {
@@ -68,27 +66,11 @@ union hif_commands_ids {
enum hif_indications_ids indication;
};
-enum hif_status {
- HIF_STATUS_SUCCESS = 0x0,
- HIF_STATUS_FAILURE = 0x1,
- HIF_INVALID_PARAMETER = 0x2,
- HIF_STATUS_WARNING = 0x3,
- HIF_ERROR_UNSUPPORTED_MSG_ID = 0x4,
- HIF_STATUS_DECRYPTFAILURE = 0x10,
- HIF_STATUS_MICFAILURE = 0x11,
- HIF_STATUS_NO_KEY_FOUND = 0x12,
- HIF_STATUS_RETRY_EXCEEDED = 0x13,
- HIF_STATUS_TX_LIFETIME_EXCEEDED = 0x14,
- HIF_REQUEUE = 0x15,
- HIF_STATUS_REFUSED = 0x16,
- HIF_STATUS_BUSY = 0x17
-};
-
struct hif_reset_flags {
- u8 reset_stat:1;
- u8 reset_all_int:1;
- u8 reserved1:6;
- u8 reserved2[3];
+ u8 reset_stat:1;
+ u8 reset_all_int:1;
+ u8 reserved1:6;
+ u8 reserved2[3];
} __packed;
struct hif_req_reset {
@@ -96,117 +78,101 @@ struct hif_req_reset {
} __packed;
struct hif_req_read_mib {
- u16 mib_id;
- u16 reserved;
+ __le16 mib_id;
+ __le16 reserved;
} __packed;
struct hif_cnf_read_mib {
- u32 status;
- u16 mib_id;
- u16 length;
- u8 mib_data[];
+ __le32 status;
+ __le16 mib_id;
+ __le16 length;
+ u8 mib_data[];
} __packed;
struct hif_req_write_mib {
- u16 mib_id;
- u16 length;
- u8 mib_data[];
+ __le16 mib_id;
+ __le16 length;
+ u8 mib_data[];
} __packed;
struct hif_cnf_write_mib {
- u32 status;
+ __le32 status;
} __packed;
struct hif_ie_flags {
- u8 beacon:1;
- u8 probe_resp:1;
- u8 probe_req:1;
- u8 reserved1:5;
- u8 reserved2;
+ u8 beacon:1;
+ u8 probe_resp:1;
+ u8 probe_req:1;
+ u8 reserved1:5;
+ u8 reserved2;
} __packed;
struct hif_ie_tlv {
- u8 type;
- u8 length;
- u8 data[];
+ u8 type;
+ u8 length;
+ u8 data[];
} __packed;
struct hif_req_update_ie {
struct hif_ie_flags ie_flags;
- u16 num_ies;
+ __le16 num_ies;
struct hif_ie_tlv ie[];
} __packed;
struct hif_cnf_update_ie {
- u32 status;
+ __le32 status;
} __packed;
struct hif_scan_type {
- u8 type:1;
- u8 mode:1;
- u8 reserved:6;
+ u8 type:1;
+ u8 mode:1;
+ u8 reserved:6;
} __packed;
struct hif_scan_flags {
- u8 fbg:1;
- u8 reserved1:1;
- u8 pre:1;
- u8 reserved2:5;
+ u8 fbg:1;
+ u8 reserved1:1;
+ u8 pre:1;
+ u8 reserved2:5;
} __packed;
struct hif_auto_scan_param {
- u16 interval;
- u8 reserved;
+ __le16 interval;
+ u8 reserved;
s8 rssi_thr;
} __packed;
struct hif_ssid_def {
- u32 ssid_length;
- u8 ssid[HIF_API_SSID_SIZE];
+ __le32 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
} __packed;
#define HIF_API_MAX_NB_SSIDS 2
#define HIF_API_MAX_NB_CHANNELS 14
-struct hif_req_start_scan {
- u8 band;
- struct hif_scan_type scan_type;
- struct hif_scan_flags scan_flags;
- u8 max_transmit_rate;
- struct hif_auto_scan_param auto_scan_param;
- u8 num_of_probe_requests;
- u8 probe_delay;
- u8 num_of_ssids;
- u8 num_of_channels;
- u32 min_channel_time;
- u32 max_channel_time;
- s32 tx_power_level;
- u8 ssid_and_channel_lists[];
-} __packed;
-
struct hif_req_start_scan_alt {
- u8 band;
+ u8 band;
struct hif_scan_type scan_type;
struct hif_scan_flags scan_flags;
- u8 max_transmit_rate;
+ u8 max_transmit_rate;
struct hif_auto_scan_param auto_scan_param;
- u8 num_of_probe_requests;
- u8 probe_delay;
- u8 num_of_ssids;
- u8 num_of_channels;
- u32 min_channel_time;
- u32 max_channel_time;
- s32 tx_power_level;
+ u8 num_of_probe_requests;
+ u8 probe_delay;
+ u8 num_of_ssids;
+ u8 num_of_channels;
+ __le32 min_channel_time;
+ __le32 max_channel_time;
+ __le32 tx_power_level; // signed value
struct hif_ssid_def ssid_def[HIF_API_MAX_NB_SSIDS];
- u8 channel_list[];
+ u8 channel_list[];
} __packed;
struct hif_cnf_start_scan {
- u32 status;
+ __le32 status;
} __packed;
struct hif_cnf_stop_scan {
- u32 status;
+ __le32 status;
} __packed;
enum hif_pm_mode_status {
@@ -216,10 +182,10 @@ enum hif_pm_mode_status {
};
struct hif_ind_scan_cmpl {
- u32 status;
- u8 pm_mode;
- u8 num_channels_completed;
- u16 reserved;
+ __le32 status;
+ u8 pm_mode;
+ u8 num_channels_completed;
+ __le16 reserved;
} __packed;
enum hif_queue_id {
@@ -241,46 +207,48 @@ enum hif_stbc {
};
struct hif_queue {
- u8 queue_id:2;
- u8 peer_sta_id:4;
- u8 reserved:2;
+ u8 queue_id:2;
+ u8 peer_sta_id:4;
+ u8 reserved:2;
} __packed;
struct hif_data_flags {
- u8 more:1;
- u8 fc_offset:3;
- u8 after_dtim:1;
- u8 reserved:3;
+ u8 more:1;
+ u8 fc_offset:3;
+ u8 after_dtim:1;
+ u8 reserved:3;
} __packed;
struct hif_tx_flags {
- u8 start_exp:1;
- u8 reserved:3;
- u8 retry_policy_index:4;
+ u8 start_exp:1;
+ u8 reserved:3;
+ u8 retry_policy_index:4;
} __packed;
struct hif_ht_tx_parameters {
- u8 frame_format:4;
- u8 fec_coding:1;
- u8 short_gi:1;
- u8 reserved1:1;
- u8 stbc:1;
- u8 reserved2;
- u8 aggregation:1;
- u8 reserved3:7;
- u8 reserved4;
+ u8 frame_format:4;
+ u8 fec_coding:1;
+ u8 short_gi:1;
+ u8 reserved1:1;
+ u8 stbc:1;
+ u8 reserved2;
+ u8 aggregation:1;
+ u8 reserved3:7;
+ u8 reserved4;
} __packed;
struct hif_req_tx {
- u32 packet_id;
- u8 max_tx_rate;
+ // packet_id is not interpreted by the device, so it is not necessary to
+ // declare it little endian
+ u32 packet_id;
+ u8 max_tx_rate;
struct hif_queue queue_id;
struct hif_data_flags data_flags;
struct hif_tx_flags tx_flags;
- u32 reserved;
- u32 expire_time;
+ __le32 reserved;
+ __le32 expire_time;
struct hif_ht_tx_parameters ht_tx_parameters;
- u8 frame[];
+ u8 frame[];
} __packed;
enum hif_qos_ackplcy {
@@ -291,26 +259,29 @@ enum hif_qos_ackplcy {
};
struct hif_tx_result_flags {
- u8 aggr:1;
- u8 requeue:1;
- u8 ack_policy:2;
- u8 txop_limit:1;
- u8 reserved1:3;
- u8 reserved2;
+ u8 aggr:1;
+ u8 requeue:1;
+ u8 ack_policy:2;
+ u8 txop_limit:1;
+ u8 reserved1:3;
+ u8 reserved2;
} __packed;
struct hif_cnf_tx {
- u32 status;
- u32 packet_id;
- u8 txed_rate;
- u8 ack_failures;
+ __le32 status;
+ // packet_id is copied from struct hif_req_tx without been interpreted
+ // by the device, so it is not necessary to declare it little endian
+ u32 packet_id;
+ u8 txed_rate;
+ u8 ack_failures;
struct hif_tx_result_flags tx_result_flags;
- u32 media_delay;
- u32 tx_queue_delay;
+ __le32 media_delay;
+ __le32 tx_queue_delay;
} __packed;
struct hif_cnf_multi_transmit {
- u32 num_tx_confs;
+ u8 num_tx_confs;
+ u8 reserved[3];
struct hif_cnf_tx tx_conf_payload[];
} __packed;
@@ -323,147 +294,150 @@ enum hif_ri_flags_encrypt {
};
struct hif_rx_flags {
- u8 encryp:3;
- u8 in_aggr:1;
- u8 first_aggr:1;
- u8 last_aggr:1;
- u8 defrag:1;
- u8 beacon:1;
- u8 tim:1;
- u8 bitmap:1;
- u8 match_ssid:1;
- u8 match_bssid:1;
- u8 more:1;
- u8 reserved1:1;
- u8 ht:1;
- u8 stbc:1;
- u8 match_uc_addr:1;
- u8 match_mc_addr:1;
- u8 match_bc_addr:1;
- u8 key_type:1;
- u8 key_index:4;
- u8 reserved2:1;
- u8 peer_sta_id:4;
- u8 reserved3:2;
- u8 reserved4:1;
+ u8 encryp:3;
+ u8 in_aggr:1;
+ u8 first_aggr:1;
+ u8 last_aggr:1;
+ u8 defrag:1;
+ u8 beacon:1;
+ u8 tim:1;
+ u8 bitmap:1;
+ u8 match_ssid:1;
+ u8 match_bssid:1;
+ u8 more:1;
+ u8 reserved1:1;
+ u8 ht:1;
+ u8 stbc:1;
+ u8 match_uc_addr:1;
+ u8 match_mc_addr:1;
+ u8 match_bc_addr:1;
+ u8 key_type:1;
+ u8 key_index:4;
+ u8 reserved2:1;
+ u8 peer_sta_id:4;
+ u8 reserved3:2;
+ u8 reserved4:1;
} __packed;
struct hif_ind_rx {
- u32 status;
- u16 channel_number;
- u8 rxed_rate;
- u8 rcpi_rssi;
+ __le32 status;
+ u8 channel_number;
+ u8 reserved;
+ u8 rxed_rate;
+ u8 rcpi_rssi;
struct hif_rx_flags rx_flags;
- u8 frame[];
+ u8 frame[];
} __packed;
struct hif_req_edca_queue_params {
- u8 queue_id;
- u8 reserved1;
- u8 aifsn;
- u8 reserved2;
- u16 cw_min;
- u16 cw_max;
- u16 tx_op_limit;
- u16 allowed_medium_time;
- u32 reserved3;
+ u8 queue_id;
+ u8 reserved1;
+ u8 aifsn;
+ u8 reserved2;
+ __le16 cw_min;
+ __le16 cw_max;
+ __le16 tx_op_limit;
+ __le16 allowed_medium_time;
+ __le32 reserved3;
} __packed;
struct hif_cnf_edca_queue_params {
- u32 status;
+ __le32 status;
} __packed;
struct hif_join_flags {
- u8 reserved1:2;
- u8 force_no_beacon:1;
- u8 force_with_ind:1;
- u8 reserved2:4;
+ u8 reserved1:2;
+ u8 force_no_beacon:1;
+ u8 force_with_ind:1;
+ u8 reserved2:4;
} __packed;
struct hif_req_join {
- u8 infrastructure_bss_mode:1;
- u8 reserved1:7;
- u8 band;
- u16 channel_number;
- u8 bssid[ETH_ALEN];
- u16 atim_window;
- u8 short_preamble:1;
- u8 reserved2:7;
- u8 probe_for_join;
- u8 reserved3;
+ u8 infrastructure_bss_mode:1;
+ u8 reserved1:7;
+ u8 band;
+ u8 channel_number;
+ u8 reserved;
+ u8 bssid[ETH_ALEN];
+ __le16 atim_window;
+ u8 short_preamble:1;
+ u8 reserved2:7;
+ u8 probe_for_join;
+ u8 reserved3;
struct hif_join_flags join_flags;
- u32 ssid_length;
- u8 ssid[HIF_API_SSID_SIZE];
- u32 beacon_interval;
- u32 basic_rate_set;
+ __le32 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+ __le32 beacon_interval;
+ __le32 basic_rate_set;
} __packed;
struct hif_cnf_join {
- u32 status;
+ __le32 status;
} __packed;
struct hif_ind_join_complete {
- u32 status;
+ __le32 status;
} __packed;
struct hif_bss_flags {
- u8 lost_count_only:1;
- u8 reserved:7;
+ u8 lost_count_only:1;
+ u8 reserved:7;
} __packed;
struct hif_req_set_bss_params {
struct hif_bss_flags bss_flags;
- u8 beacon_lost_count;
- u16 aid;
- u32 operational_rate_set;
+ u8 beacon_lost_count;
+ __le16 aid;
+ __le32 operational_rate_set;
} __packed;
struct hif_cnf_set_bss_params {
- u32 status;
+ __le32 status;
} __packed;
struct hif_pm_mode {
- u8 enter_psm:1;
- u8 reserved:6;
- u8 fast_psm:1;
+ u8 enter_psm:1;
+ u8 reserved:6;
+ u8 fast_psm:1;
} __packed;
struct hif_req_set_pm_mode {
struct hif_pm_mode pm_mode;
- u8 fast_psm_idle_period;
- u8 ap_psm_change_period;
- u8 min_auto_ps_poll_period;
+ u8 fast_psm_idle_period;
+ u8 ap_psm_change_period;
+ u8 min_auto_ps_poll_period;
} __packed;
struct hif_cnf_set_pm_mode {
- u32 status;
+ __le32 status;
} __packed;
struct hif_ind_set_pm_mode_cmpl {
- u32 status;
- u8 pm_mode;
- u8 reserved[3];
+ __le32 status;
+ u8 pm_mode;
+ u8 reserved[3];
} __packed;
struct hif_req_start {
- u8 mode;
- u8 band;
- u16 channel_number;
- u32 reserved1;
- u32 beacon_interval;
- u8 dtim_period;
- u8 short_preamble:1;
- u8 reserved2:7;
- u8 reserved3;
- u8 ssid_length;
- u8 ssid[HIF_API_SSID_SIZE];
- u32 basic_rate_set;
+ u8 mode;
+ u8 band;
+ u8 channel_number;
+ u8 reserved1;
+ __le32 reserved2;
+ __le32 beacon_interval;
+ u8 dtim_period;
+ u8 short_preamble:1;
+ u8 reserved3:7;
+ u8 reserved4;
+ u8 ssid_length;
+ u8 ssid[HIF_API_SSID_SIZE];
+ __le32 basic_rate_set;
} __packed;
struct hif_cnf_start {
- u32 status;
+ __le32 status;
} __packed;
enum hif_beacon {
@@ -472,46 +446,49 @@ enum hif_beacon {
};
struct hif_req_beacon_transmit {
- u8 enable_beaconing;
- u8 reserved[3];
+ u8 enable_beaconing;
+ u8 reserved[3];
} __packed;
struct hif_cnf_beacon_transmit {
- u32 status;
+ __le32 status;
} __packed;
+#define HIF_LINK_ID_MAX 14
+#define HIF_LINK_ID_NOT_ASSOCIATED (HIF_LINK_ID_MAX + 1)
+
enum hif_sta_map_direction {
HIF_STA_MAP = 0x0,
HIF_STA_UNMAP = 0x1
};
struct hif_map_link_flags {
- u8 map_direction:1;
- u8 mfpc:1;
- u8 reserved:6;
+ u8 map_direction:1;
+ u8 mfpc:1;
+ u8 reserved:6;
} __packed;
struct hif_req_map_link {
- u8 mac_addr[ETH_ALEN];
+ u8 mac_addr[ETH_ALEN];
struct hif_map_link_flags map_link_flags;
- u8 peer_sta_id;
+ u8 peer_sta_id;
} __packed;
struct hif_cnf_map_link {
- u32 status;
+ __le32 status;
} __packed;
struct hif_suspend_resume_flags {
- u8 resume:1;
- u8 reserved1:2;
- u8 bc_mc_only:1;
- u8 reserved2:4;
- u8 reserved3;
+ u8 resume:1;
+ u8 reserved1:2;
+ u8 bc_mc_only:1;
+ u8 reserved2:4;
+ u8 reserved3;
} __packed;
struct hif_ind_suspend_resume_tx {
struct hif_suspend_resume_flags suspend_resume_flags;
- u16 peer_sta_set;
+ __le16 peer_sta_set;
} __packed;
@@ -541,102 +518,102 @@ enum hif_key_type {
};
struct hif_wep_pairwise_key {
- u8 peer_address[ETH_ALEN];
- u8 reserved;
- u8 key_length;
- u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+ u8 peer_address[ETH_ALEN];
+ u8 reserved;
+ u8 key_length;
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
} __packed;
struct hif_wep_group_key {
- u8 key_id;
- u8 key_length;
- u8 reserved[2];
- u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 key_length;
+ u8 reserved[2];
+ u8 key_data[HIF_API_WEP_KEY_DATA_SIZE];
} __packed;
struct hif_tkip_pairwise_key {
- u8 peer_address[ETH_ALEN];
- u8 reserved[2];
- u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
- u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
- u8 tx_mic_key[HIF_API_TX_MIC_KEY_SIZE];
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 tx_mic_key[HIF_API_TX_MIC_KEY_SIZE];
} __packed;
struct hif_tkip_group_key {
- u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
- u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
- u8 key_id;
- u8 reserved[3];
- u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+ u8 tkip_key_data[HIF_API_TKIP_KEY_DATA_SIZE];
+ u8 rx_mic_key[HIF_API_RX_MIC_KEY_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
} __packed;
struct hif_aes_pairwise_key {
- u8 peer_address[ETH_ALEN];
- u8 reserved[2];
- u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+ u8 peer_address[ETH_ALEN];
+ u8 reserved[2];
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
} __packed;
struct hif_aes_group_key {
- u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
- u8 key_id;
- u8 reserved[3];
- u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
+ u8 aes_key_data[HIF_API_AES_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 rx_sequence_counter[HIF_API_RX_SEQUENCE_COUNTER_SIZE];
} __packed;
struct hif_wapi_pairwise_key {
- u8 peer_address[ETH_ALEN];
- u8 key_id;
- u8 reserved;
- u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
- u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+ u8 peer_address[ETH_ALEN];
+ u8 key_id;
+ u8 reserved;
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
} __packed;
struct hif_wapi_group_key {
- u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
- u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
- u8 key_id;
- u8 reserved[3];
+ u8 wapi_key_data[HIF_API_WAPI_KEY_DATA_SIZE];
+ u8 mic_key_data[HIF_API_MIC_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
} __packed;
struct hif_igtk_group_key {
- u8 igtk_key_data[HIF_API_IGTK_KEY_DATA_SIZE];
- u8 key_id;
- u8 reserved[3];
- u8 ipn[HIF_API_IPN_SIZE];
+ u8 igtk_key_data[HIF_API_IGTK_KEY_DATA_SIZE];
+ u8 key_id;
+ u8 reserved[3];
+ u8 ipn[HIF_API_IPN_SIZE];
} __packed;
union hif_privacy_key_data {
- struct hif_wep_pairwise_key wep_pairwise_key;
- struct hif_wep_group_key wep_group_key;
- struct hif_tkip_pairwise_key tkip_pairwise_key;
- struct hif_tkip_group_key tkip_group_key;
- struct hif_aes_pairwise_key aes_pairwise_key;
- struct hif_aes_group_key aes_group_key;
- struct hif_wapi_pairwise_key wapi_pairwise_key;
- struct hif_wapi_group_key wapi_group_key;
- struct hif_igtk_group_key igtk_group_key;
+ struct hif_wep_pairwise_key wep_pairwise_key;
+ struct hif_wep_group_key wep_group_key;
+ struct hif_tkip_pairwise_key tkip_pairwise_key;
+ struct hif_tkip_group_key tkip_group_key;
+ struct hif_aes_pairwise_key aes_pairwise_key;
+ struct hif_aes_group_key aes_group_key;
+ struct hif_wapi_pairwise_key wapi_pairwise_key;
+ struct hif_wapi_group_key wapi_group_key;
+ struct hif_igtk_group_key igtk_group_key;
};
struct hif_req_add_key {
- u8 type;
- u8 entry_index;
- u8 int_id:2;
- u8 reserved1:6;
- u8 reserved2;
+ u8 type;
+ u8 entry_index;
+ u8 int_id:2;
+ u8 reserved1:6;
+ u8 reserved2;
union hif_privacy_key_data key;
} __packed;
struct hif_cnf_add_key {
- u32 status;
+ __le32 status;
} __packed;
struct hif_req_remove_key {
- u8 entry_index;
- u8 reserved[3];
+ u8 entry_index;
+ u8 reserved[3];
} __packed;
struct hif_cnf_remove_key {
- u32 status;
+ __le32 status;
} __packed;
enum hif_event_ind {
@@ -656,13 +633,13 @@ enum hif_ps_mode_error {
};
union hif_event_data {
- u8 rcpi_rssi;
- u32 ps_mode_error;
- u32 peer_sta_set;
+ u8 rcpi_rssi;
+ __le32 ps_mode_error;
+ __le32 peer_sta_set;
};
struct hif_ind_event {
- u32 event_id;
+ __le32 event_id;
union hif_event_data event_data;
} __packed;
diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/staging/wfx/hif_api_general.h
index a069c3a21b4d..dba18a7ae919 100644
--- a/drivers/staging/wfx/hif_api_general.h
+++ b/drivers/staging/wfx/hif_api_general.h
@@ -17,13 +17,13 @@
#define __packed __attribute__((__packed__))
#endif
-#define API_SSID_SIZE 32
+#define API_SSID_SIZE 32
-#define HIF_ID_IS_INDICATION 0x80
-#define HIF_COUNTER_MAX 7
+#define HIF_ID_IS_INDICATION 0x80
+#define HIF_COUNTER_MAX 7
struct hif_msg {
- u16 len;
+ __le16 len;
u8 id;
u8 reserved:1;
u8 interface:2;
@@ -33,239 +33,252 @@ struct hif_msg {
} __packed;
enum hif_general_requests_ids {
- HIF_REQ_ID_CONFIGURATION = 0x09,
- HIF_REQ_ID_CONTROL_GPIO = 0x26,
- HIF_REQ_ID_SET_SL_MAC_KEY = 0x27,
- HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
- HIF_REQ_ID_SL_CONFIGURE = 0x29,
- HIF_REQ_ID_PREVENT_ROLLBACK = 0x2a,
- HIF_REQ_ID_PTA_SETTINGS = 0x2b,
- HIF_REQ_ID_PTA_PRIORITY = 0x2c,
- HIF_REQ_ID_PTA_STATE = 0x2d,
- HIF_REQ_ID_SHUT_DOWN = 0x32,
+ HIF_REQ_ID_CONFIGURATION = 0x09,
+ HIF_REQ_ID_CONTROL_GPIO = 0x26,
+ HIF_REQ_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_REQ_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_REQ_ID_SL_CONFIGURE = 0x29,
+ HIF_REQ_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_REQ_ID_PTA_SETTINGS = 0x2b,
+ HIF_REQ_ID_PTA_PRIORITY = 0x2c,
+ HIF_REQ_ID_PTA_STATE = 0x2d,
+ HIF_REQ_ID_SHUT_DOWN = 0x32,
};
enum hif_general_confirmations_ids {
- HIF_CNF_ID_CONFIGURATION = 0x09,
- HIF_CNF_ID_CONTROL_GPIO = 0x26,
- HIF_CNF_ID_SET_SL_MAC_KEY = 0x27,
- HIF_CNF_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
- HIF_CNF_ID_SL_CONFIGURE = 0x29,
- HIF_CNF_ID_PREVENT_ROLLBACK = 0x2a,
- HIF_CNF_ID_PTA_SETTINGS = 0x2b,
- HIF_CNF_ID_PTA_PRIORITY = 0x2c,
- HIF_CNF_ID_PTA_STATE = 0x2d,
- HIF_CNF_ID_SHUT_DOWN = 0x32,
+ HIF_CNF_ID_CONFIGURATION = 0x09,
+ HIF_CNF_ID_CONTROL_GPIO = 0x26,
+ HIF_CNF_ID_SET_SL_MAC_KEY = 0x27,
+ HIF_CNF_ID_SL_EXCHANGE_PUB_KEYS = 0x28,
+ HIF_CNF_ID_SL_CONFIGURE = 0x29,
+ HIF_CNF_ID_PREVENT_ROLLBACK = 0x2a,
+ HIF_CNF_ID_PTA_SETTINGS = 0x2b,
+ HIF_CNF_ID_PTA_PRIORITY = 0x2c,
+ HIF_CNF_ID_PTA_STATE = 0x2d,
+ HIF_CNF_ID_SHUT_DOWN = 0x32,
};
enum hif_general_indications_ids {
- HIF_IND_ID_EXCEPTION = 0xe0,
- HIF_IND_ID_STARTUP = 0xe1,
- HIF_IND_ID_WAKEUP = 0xe2,
- HIF_IND_ID_GENERIC = 0xe3,
- HIF_IND_ID_ERROR = 0xe4,
- HIF_IND_ID_SL_EXCHANGE_PUB_KEYS = 0xe5
+ HIF_IND_ID_EXCEPTION = 0xe0,
+ HIF_IND_ID_STARTUP = 0xe1,
+ HIF_IND_ID_WAKEUP = 0xe2,
+ HIF_IND_ID_GENERIC = 0xe3,
+ HIF_IND_ID_ERROR = 0xe4,
+ HIF_IND_ID_SL_EXCHANGE_PUB_KEYS = 0xe5
};
-enum hif_hi_status {
- HI_STATUS_SUCCESS = 0x0000,
- HI_STATUS_FAILURE = 0x0001,
- HI_INVALID_PARAMETER = 0x0002,
- HI_STATUS_GPIO_WARNING = 0x0003,
- HI_ERROR_UNSUPPORTED_MSG_ID = 0x0004,
- SL_MAC_KEY_STATUS_SUCCESS = 0x005A,
- SL_MAC_KEY_STATUS_FAILED_KEY_ALREADY_BURNED = 0x006B,
- SL_MAC_KEY_STATUS_FAILED_RAM_MODE_NOT_ALLOWED = 0x007C,
- SL_MAC_KEY_STATUS_FAILED_UNKNOWN_MODE = 0x008D,
- SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS = 0x009E,
- SL_PUB_KEY_EXCHANGE_STATUS_FAILED = 0x00AF,
- PREVENT_ROLLBACK_CNF_SUCCESS = 0x1234,
- PREVENT_ROLLBACK_CNF_WRONG_MAGIC_WORD = 0x1256
-};
+#define HIF_STATUS_SUCCESS (cpu_to_le32(0x0000))
+#define HIF_STATUS_FAIL (cpu_to_le32(0x0001))
+#define HIF_STATUS_INVALID_PARAMETER (cpu_to_le32(0x0002))
+#define HIF_STATUS_WARNING (cpu_to_le32(0x0003))
+#define HIF_STATUS_UNKNOWN_REQUEST (cpu_to_le32(0x0004))
+#define HIF_STATUS_RX_FAIL_DECRYPT (cpu_to_le32(0x0010))
+#define HIF_STATUS_RX_FAIL_MIC (cpu_to_le32(0x0011))
+#define HIF_STATUS_RX_FAIL_NO_KEY (cpu_to_le32(0x0012))
+#define HIF_STATUS_TX_FAIL_RETRIES (cpu_to_le32(0x0013))
+#define HIF_STATUS_TX_FAIL_TIMEOUT (cpu_to_le32(0x0014))
+#define HIF_STATUS_TX_FAIL_REQUEUE (cpu_to_le32(0x0015))
+#define HIF_STATUS_REFUSED (cpu_to_le32(0x0016))
+#define HIF_STATUS_BUSY (cpu_to_le32(0x0017))
+#define HIF_STATUS_SLK_SET_KEY_SUCCESS (cpu_to_le32(0x005A))
+#define HIF_STATUS_SLK_SET_KEY_ALREADY_BURNED (cpu_to_le32(0x006B))
+#define HIF_STATUS_SLK_SET_KEY_DISALLOWED_MODE (cpu_to_le32(0x007C))
+#define HIF_STATUS_SLK_SET_KEY_UNKNOWN_MODE (cpu_to_le32(0x008D))
+#define HIF_STATUS_SLK_NEGO_SUCCESS (cpu_to_le32(0x009E))
+#define HIF_STATUS_SLK_NEGO_FAILED (cpu_to_le32(0x00AF))
+#define HIF_STATUS_ROLLBACK_SUCCESS (cpu_to_le32(0x1234))
+#define HIF_STATUS_ROLLBACK_FAIL (cpu_to_le32(0x1256))
enum hif_api_rate_index {
- API_RATE_INDEX_B_1MBPS = 0,
- API_RATE_INDEX_B_2MBPS = 1,
- API_RATE_INDEX_B_5P5MBPS = 2,
- API_RATE_INDEX_B_11MBPS = 3,
- API_RATE_INDEX_PBCC_22MBPS = 4,
- API_RATE_INDEX_PBCC_33MBPS = 5,
- API_RATE_INDEX_G_6MBPS = 6,
- API_RATE_INDEX_G_9MBPS = 7,
- API_RATE_INDEX_G_12MBPS = 8,
- API_RATE_INDEX_G_18MBPS = 9,
- API_RATE_INDEX_G_24MBPS = 10,
- API_RATE_INDEX_G_36MBPS = 11,
- API_RATE_INDEX_G_48MBPS = 12,
- API_RATE_INDEX_G_54MBPS = 13,
- API_RATE_INDEX_N_6P5MBPS = 14,
- API_RATE_INDEX_N_13MBPS = 15,
- API_RATE_INDEX_N_19P5MBPS = 16,
- API_RATE_INDEX_N_26MBPS = 17,
- API_RATE_INDEX_N_39MBPS = 18,
- API_RATE_INDEX_N_52MBPS = 19,
- API_RATE_INDEX_N_58P5MBPS = 20,
- API_RATE_INDEX_N_65MBPS = 21,
- API_RATE_NUM_ENTRIES = 22
+ API_RATE_INDEX_B_1MBPS = 0,
+ API_RATE_INDEX_B_2MBPS = 1,
+ API_RATE_INDEX_B_5P5MBPS = 2,
+ API_RATE_INDEX_B_11MBPS = 3,
+ API_RATE_INDEX_PBCC_22MBPS = 4,
+ API_RATE_INDEX_PBCC_33MBPS = 5,
+ API_RATE_INDEX_G_6MBPS = 6,
+ API_RATE_INDEX_G_9MBPS = 7,
+ API_RATE_INDEX_G_12MBPS = 8,
+ API_RATE_INDEX_G_18MBPS = 9,
+ API_RATE_INDEX_G_24MBPS = 10,
+ API_RATE_INDEX_G_36MBPS = 11,
+ API_RATE_INDEX_G_48MBPS = 12,
+ API_RATE_INDEX_G_54MBPS = 13,
+ API_RATE_INDEX_N_6P5MBPS = 14,
+ API_RATE_INDEX_N_13MBPS = 15,
+ API_RATE_INDEX_N_19P5MBPS = 16,
+ API_RATE_INDEX_N_26MBPS = 17,
+ API_RATE_INDEX_N_39MBPS = 18,
+ API_RATE_INDEX_N_52MBPS = 19,
+ API_RATE_INDEX_N_58P5MBPS = 20,
+ API_RATE_INDEX_N_65MBPS = 21,
+ API_RATE_NUM_ENTRIES = 22
};
enum hif_fw_type {
- HIF_FW_TYPE_ETF = 0x0,
- HIF_FW_TYPE_WFM = 0x1,
- HIF_FW_TYPE_WSM = 0x2
+ HIF_FW_TYPE_ETF = 0x0,
+ HIF_FW_TYPE_WFM = 0x1,
+ HIF_FW_TYPE_WSM = 0x2
};
struct hif_capabilities {
- u8 link_mode:2;
- u8 reserved1:6;
- u8 reserved2;
- u8 reserved3;
- u8 reserved4;
+ u8 link_mode:2;
+ u8 reserved1:6;
+ u8 reserved2;
+ u8 reserved3;
+ u8 reserved4;
} __packed;
struct hif_otp_regul_sel_mode_info {
- u8 region_sel_mode:4;
- u8 reserved:4;
+ u8 region_sel_mode:4;
+ u8 reserved:4;
} __packed;
struct hif_otp_phy_info {
- u8 phy1_region:3;
- u8 phy0_region:3;
- u8 otp_phy_ver:2;
+ u8 phy1_region:3;
+ u8 phy0_region:3;
+ u8 otp_phy_ver:2;
} __packed;
-#define API_OPN_SIZE 14
-#define API_UID_SIZE 8
-#define API_DISABLED_CHANNEL_LIST_SIZE 2
-#define API_FIRMWARE_LABEL_SIZE 128
-
struct hif_ind_startup {
- u32 status;
- u16 hardware_id;
- u8 opn[API_OPN_SIZE];
- u8 uid[API_UID_SIZE];
- u16 num_inp_ch_bufs;
- u16 size_inp_ch_buf;
- u8 num_links_ap;
- u8 num_interfaces;
- u8 mac_addr[2][ETH_ALEN];
- u8 api_version_minor;
- u8 api_version_major;
+ // As the others, this struct is interpreted as little endian by the
+ // device. However, this struct is also used by the driver. We prefer to
+ // declare it in native order and doing byte swap on reception.
+ __le32 status;
+ u16 hardware_id;
+ u8 opn[14];
+ u8 uid[8];
+ u16 num_inp_ch_bufs;
+ u16 size_inp_ch_buf;
+ u8 num_links_ap;
+ u8 num_interfaces;
+ u8 mac_addr[2][ETH_ALEN];
+ u8 api_version_minor;
+ u8 api_version_major;
struct hif_capabilities capabilities;
- u8 firmware_build;
- u8 firmware_minor;
- u8 firmware_major;
- u8 firmware_type;
- u8 disabled_channel_list[API_DISABLED_CHANNEL_LIST_SIZE];
+ u8 firmware_build;
+ u8 firmware_minor;
+ u8 firmware_major;
+ u8 firmware_type;
+ u8 disabled_channel_list[2];
struct hif_otp_regul_sel_mode_info regul_sel_mode_info;
struct hif_otp_phy_info otp_phy_info;
- u32 supported_rate_mask;
- u8 firmware_label[API_FIRMWARE_LABEL_SIZE];
+ u32 supported_rate_mask;
+ u8 firmware_label[128];
} __packed;
struct hif_ind_wakeup {
} __packed;
struct hif_req_configuration {
- u16 length;
- u8 pds_data[];
+ __le16 length;
+ u8 pds_data[];
} __packed;
struct hif_cnf_configuration {
- u32 status;
+ __le32 status;
} __packed;
enum hif_gpio_mode {
- HIF_GPIO_MODE_D0 = 0x0,
- HIF_GPIO_MODE_D1 = 0x1,
- HIF_GPIO_MODE_OD0 = 0x2,
- HIF_GPIO_MODE_OD1 = 0x3,
- HIF_GPIO_MODE_TRISTATE = 0x4,
- HIF_GPIO_MODE_TOGGLE = 0x5,
- HIF_GPIO_MODE_READ = 0x6
+ HIF_GPIO_MODE_D0 = 0x0,
+ HIF_GPIO_MODE_D1 = 0x1,
+ HIF_GPIO_MODE_OD0 = 0x2,
+ HIF_GPIO_MODE_OD1 = 0x3,
+ HIF_GPIO_MODE_TRISTATE = 0x4,
+ HIF_GPIO_MODE_TOGGLE = 0x5,
+ HIF_GPIO_MODE_READ = 0x6
};
struct hif_req_control_gpio {
- u8 gpio_label;
- u8 gpio_mode;
+ u8 gpio_label;
+ u8 gpio_mode;
} __packed;
-enum hif_gpio_error {
- HIF_GPIO_ERROR_0 = 0x0,
- HIF_GPIO_ERROR_1 = 0x1,
- HIF_GPIO_ERROR_2 = 0x2
-};
-
struct hif_cnf_control_gpio {
- u32 status;
- u32 value;
+ __le32 status;
+ __le32 value;
} __packed;
enum hif_generic_indication_type {
- HIF_GENERIC_INDICATION_TYPE_RAW = 0x0,
- HIF_GENERIC_INDICATION_TYPE_STRING = 0x1,
- HIF_GENERIC_INDICATION_TYPE_RX_STATS = 0x2
+ HIF_GENERIC_INDICATION_TYPE_RAW = 0x0,
+ HIF_GENERIC_INDICATION_TYPE_STRING = 0x1,
+ HIF_GENERIC_INDICATION_TYPE_RX_STATS = 0x2,
+ HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO = 0x3,
};
struct hif_rx_stats {
- u32 nb_rx_frame;
- u32 nb_crc_frame;
- u32 per_total;
- u32 throughput;
- u32 nb_rx_by_rate[API_RATE_NUM_ENTRIES];
- u16 per[API_RATE_NUM_ENTRIES];
- s16 snr[API_RATE_NUM_ENTRIES];
- s16 rssi[API_RATE_NUM_ENTRIES];
- s16 cfo[API_RATE_NUM_ENTRIES];
- u32 date;
- u32 pwr_clk_freq;
- u8 is_ext_pwr_clk;
+ __le32 nb_rx_frame;
+ __le32 nb_crc_frame;
+ __le32 per_total;
+ __le32 throughput;
+ __le32 nb_rx_by_rate[API_RATE_NUM_ENTRIES];
+ __le16 per[API_RATE_NUM_ENTRIES];
+ __le16 snr[API_RATE_NUM_ENTRIES]; // signed value
+ __le16 rssi[API_RATE_NUM_ENTRIES]; // signed value
+ __le16 cfo[API_RATE_NUM_ENTRIES]; // signed value
+ __le32 date;
+ __le32 pwr_clk_freq;
+ u8 is_ext_pwr_clk;
s8 current_temp;
} __packed;
+struct hif_tx_power_loop_info {
+ __le16 tx_gain_dig;
+ __le16 tx_gain_pa;
+ __le16 target_pout; // signed value
+ __le16 p_estimation; // signed value
+ __le16 vpdet;
+ u8 measurement_index;
+ u8 reserved;
+} __packed;
+
union hif_indication_data {
- struct hif_rx_stats rx_stats;
- u8 raw_data[1];
+ struct hif_rx_stats rx_stats;
+ struct hif_tx_power_loop_info tx_power_loop_info;
+ u8 raw_data[1];
};
struct hif_ind_generic {
- u32 indication_type;
+ __le32 indication_type;
union hif_indication_data indication_data;
} __packed;
-
-#define HIF_EXCEPTION_DATA_SIZE 124
-
-struct hif_ind_exception {
- u8 data[HIF_EXCEPTION_DATA_SIZE];
-} __packed;
-
-
enum hif_error {
- HIF_ERROR_FIRMWARE_ROLLBACK = 0x0,
- HIF_ERROR_FIRMWARE_DEBUG_ENABLED = 0x1,
- HIF_ERROR_OUTDATED_SESSION_KEY = 0x2,
- HIF_ERROR_INVALID_SESSION_KEY = 0x3,
- HIF_ERROR_OOR_VOLTAGE = 0x4,
- HIF_ERROR_PDS_VERSION = 0x5,
- HIF_ERROR_OOR_TEMPERATURE = 0x6,
- HIF_ERROR_REQ_DURING_KEY_EXCHANGE = 0x7,
- HIF_ERROR_MULTI_TX_CNF_SECURELINK = 0x8,
- HIF_ERROR_SECURELINK_OVERFLOW = 0x9,
- HIF_ERROR_SECURELINK_DECRYPTION = 0xa
+ HIF_ERROR_FIRMWARE_ROLLBACK = 0x00,
+ HIF_ERROR_FIRMWARE_DEBUG_ENABLED = 0x01,
+ HIF_ERROR_SLK_OUTDATED_SESSION_KEY = 0x02,
+ HIF_ERROR_SLK_SESSION_KEY = 0x03,
+ HIF_ERROR_OOR_VOLTAGE = 0x04,
+ HIF_ERROR_PDS_PAYLOAD = 0x05,
+ HIF_ERROR_OOR_TEMPERATURE = 0x06,
+ HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE = 0x07,
+ HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED = 0x08,
+ HIF_ERROR_SLK_OVERFLOW = 0x09,
+ HIF_ERROR_SLK_DECRYPTION = 0x0a,
+ HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE = 0x0b,
+ HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW = 0x0c,
+ HIF_ERROR_HIF_RX_DATA_TOO_LARGE = 0x0e,
+ HIF_ERROR_HIF_TX_QUEUE_FULL = 0x0d,
+ HIF_ERROR_HIF_BUS = 0x0f,
+ HIF_ERROR_PDS_TESTFEATURE = 0x10,
};
struct hif_ind_error {
- u32 type;
- u8 data[];
+ __le32 type;
+ u8 data[];
+} __packed;
+
+struct hif_ind_exception {
+ __le32 type;
+ u8 data[];
} __packed;
enum hif_secure_link_state {
- SEC_LINK_UNAVAILABLE = 0x0,
- SEC_LINK_RESERVED = 0x1,
- SEC_LINK_EVAL = 0x2,
- SEC_LINK_ENFORCED = 0x3
+ SEC_LINK_UNAVAILABLE = 0x0,
+ SEC_LINK_RESERVED = 0x1,
+ SEC_LINK_EVAL = 0x2,
+ SEC_LINK_ENFORCED = 0x3
};
enum hif_sl_encryption_type {
@@ -282,156 +295,70 @@ struct hif_sl_msg_hdr {
struct hif_sl_msg {
struct hif_sl_msg_hdr hdr;
- u16 len;
- u8 payload[];
+ __le16 len;
+ u8 payload[];
} __packed;
-#define AES_CCM_TAG_SIZE 16
+#define AES_CCM_TAG_SIZE 16
struct hif_sl_tag {
- u8 tag[16];
+ u8 tag[16];
} __packed;
enum hif_sl_mac_key_dest {
- SL_MAC_KEY_DEST_OTP = 0x78,
- SL_MAC_KEY_DEST_RAM = 0x87
+ SL_MAC_KEY_DEST_OTP = 0x78,
+ SL_MAC_KEY_DEST_RAM = 0x87
};
-#define API_KEY_VALUE_SIZE 32
+#define API_KEY_VALUE_SIZE 32
struct hif_req_set_sl_mac_key {
- u8 otp_or_ram;
- u8 key_value[API_KEY_VALUE_SIZE];
+ u8 otp_or_ram;
+ u8 key_value[API_KEY_VALUE_SIZE];
} __packed;
struct hif_cnf_set_sl_mac_key {
- u32 status;
+ __le32 status;
} __packed;
-#define API_HOST_PUB_KEY_SIZE 32
-#define API_HOST_PUB_KEY_MAC_SIZE 64
-
enum hif_sl_session_key_alg {
- HIF_SL_CURVE25519 = 0x01,
- HIF_SL_KDF = 0x02
+ HIF_SL_CURVE25519 = 0x01,
+ HIF_SL_KDF = 0x02
};
+#define API_HOST_PUB_KEY_SIZE 32
+#define API_HOST_PUB_KEY_MAC_SIZE 64
+
struct hif_req_sl_exchange_pub_keys {
- u8 algorithm:2;
- u8 reserved1:6;
- u8 reserved2[3];
- u8 host_pub_key[API_HOST_PUB_KEY_SIZE];
- u8 host_pub_key_mac[API_HOST_PUB_KEY_MAC_SIZE];
+ u8 algorithm:2;
+ u8 reserved1:6;
+ u8 reserved2[3];
+ u8 host_pub_key[API_HOST_PUB_KEY_SIZE];
+ u8 host_pub_key_mac[API_HOST_PUB_KEY_MAC_SIZE];
} __packed;
struct hif_cnf_sl_exchange_pub_keys {
- u32 status;
+ __le32 status;
} __packed;
-#define API_NCP_PUB_KEY_SIZE 32
-#define API_NCP_PUB_KEY_MAC_SIZE 64
+#define API_NCP_PUB_KEY_SIZE 32
+#define API_NCP_PUB_KEY_MAC_SIZE 64
struct hif_ind_sl_exchange_pub_keys {
- u32 status;
- u8 ncp_pub_key[API_NCP_PUB_KEY_SIZE];
- u8 ncp_pub_key_mac[API_NCP_PUB_KEY_MAC_SIZE];
+ __le32 status;
+ u8 ncp_pub_key[API_NCP_PUB_KEY_SIZE];
+ u8 ncp_pub_key_mac[API_NCP_PUB_KEY_MAC_SIZE];
} __packed;
-#define API_ENCR_BMP_SIZE 32
-
struct hif_req_sl_configure {
- u8 encr_bmp[API_ENCR_BMP_SIZE];
- u8 disable_session_key_protection:1;
- u8 reserved1:7;
- u8 reserved2[3];
+ u8 encr_bmp[32];
+ u8 disable_session_key_protection:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
} __packed;
struct hif_cnf_sl_configure {
- u32 status;
-} __packed;
-
-struct hif_req_prevent_rollback {
- u32 magic_word;
-} __packed;
-
-struct hif_cnf_prevent_rollback {
- u32 status;
-} __packed;
-
-enum hif_pta_mode {
- PTA_1W_WLAN_MASTER = 0,
- PTA_1W_COEX_MASTER = 1,
- PTA_2W = 2,
- PTA_3W = 3,
- PTA_4W = 4
-};
-
-enum hif_signal_level {
- SIGNAL_LOW = 0,
- SIGNAL_HIGH = 1
-};
-
-enum hif_coex_type {
- COEX_TYPE_GENERIC = 0,
- COEX_TYPE_BLE = 1
-};
-
-enum hif_grant_state {
- NO_GRANT = 0,
- GRANT = 1
-};
-
-struct hif_req_pta_settings {
- u8 pta_mode;
- u8 request_signal_active_level;
- u8 priority_signal_active_level;
- u8 freq_signal_active_level;
- u8 grant_signal_active_level;
- u8 coex_type;
- u8 default_grant_state;
- u8 simultaneous_rx_accesses;
- u8 priority_sampling_time;
- u8 tx_rx_sampling_time;
- u8 freq_sampling_time;
- u8 grant_valid_time;
- u8 fem_control_time;
- u8 first_slot_time;
- u16 periodic_tx_rx_sampling_time;
- u16 coex_quota;
- u16 wlan_quota;
-} __packed;
-
-struct hif_cnf_pta_settings {
- u32 status;
-} __packed;
-
-enum hif_pta_priority {
- HIF_PTA_PRIORITY_COEX_MAXIMIZED = 0x00000562,
- HIF_PTA_PRIORITY_COEX_HIGH = 0x00000462,
- HIF_PTA_PRIORITY_BALANCED = 0x00001461,
- HIF_PTA_PRIORITY_WLAN_HIGH = 0x00001851,
- HIF_PTA_PRIORITY_WLAN_MAXIMIZED = 0x00001A51
-};
-
-struct hif_req_pta_priority {
- u32 priority;
-} __packed;
-
-struct hif_cnf_pta_priority {
- u32 status;
-} __packed;
-
-enum hif_pta_state {
- PTA_OFF = 0,
- PTA_ON = 1
-};
-
-struct hif_req_pta_state {
- u32 pta_state;
-} __packed;
-
-struct hif_cnf_pta_state {
- u32 status;
+ __le32 status;
} __packed;
#endif
diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/staging/wfx/hif_api_mib.h
index 0c67cd4c1593..6f1434795fa8 100644
--- a/drivers/staging/wfx/hif_api_mib.h
+++ b/drivers/staging/wfx/hif_api_mib.h
@@ -10,175 +10,88 @@
#include "hif_api_general.h"
-#define HIF_API_IPV4_ADDRESS_SIZE 4
-#define HIF_API_IPV6_ADDRESS_SIZE 16
+#define HIF_API_IPV4_ADDRESS_SIZE 4
+#define HIF_API_IPV6_ADDRESS_SIZE 16
enum hif_mib_ids {
- HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE = 0x2000,
- HIF_MIB_ID_GL_BLOCK_ACK_INFO = 0x2001,
- HIF_MIB_ID_GL_SET_MULTI_MSG = 0x2002,
- HIF_MIB_ID_CCA_CONFIG = 0x2003,
- HIF_MIB_ID_ETHERTYPE_DATAFRAME_CONDITION = 0x2010,
- HIF_MIB_ID_PORT_DATAFRAME_CONDITION = 0x2011,
- HIF_MIB_ID_MAGIC_DATAFRAME_CONDITION = 0x2012,
- HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION = 0x2013,
- HIF_MIB_ID_IPV4_ADDR_DATAFRAME_CONDITION = 0x2014,
- HIF_MIB_ID_IPV6_ADDR_DATAFRAME_CONDITION = 0x2015,
- HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION = 0x2016,
- HIF_MIB_ID_CONFIG_DATA_FILTER = 0x2017,
- HIF_MIB_ID_SET_DATA_FILTERING = 0x2018,
- HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE = 0x2019,
- HIF_MIB_ID_NS_IP_ADDRESSES_TABLE = 0x201A,
- HIF_MIB_ID_RX_FILTER = 0x201B,
- HIF_MIB_ID_BEACON_FILTER_TABLE = 0x201C,
- HIF_MIB_ID_BEACON_FILTER_ENABLE = 0x201D,
- HIF_MIB_ID_GRP_SEQ_COUNTER = 0x2030,
- HIF_MIB_ID_TSF_COUNTER = 0x2031,
- HIF_MIB_ID_STATISTICS_TABLE = 0x2032,
- HIF_MIB_ID_COUNTERS_TABLE = 0x2033,
- HIF_MIB_ID_MAX_TX_POWER_LEVEL = 0x2034,
- HIF_MIB_ID_EXTENDED_COUNTERS_TABLE = 0x2035,
- HIF_MIB_ID_DOT11_MAC_ADDRESS = 0x2040,
+ HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE = 0x2000,
+ HIF_MIB_ID_GL_BLOCK_ACK_INFO = 0x2001,
+ HIF_MIB_ID_GL_SET_MULTI_MSG = 0x2002,
+ HIF_MIB_ID_CCA_CONFIG = 0x2003,
+ HIF_MIB_ID_ETHERTYPE_DATAFRAME_CONDITION = 0x2010,
+ HIF_MIB_ID_PORT_DATAFRAME_CONDITION = 0x2011,
+ HIF_MIB_ID_MAGIC_DATAFRAME_CONDITION = 0x2012,
+ HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION = 0x2013,
+ HIF_MIB_ID_IPV4_ADDR_DATAFRAME_CONDITION = 0x2014,
+ HIF_MIB_ID_IPV6_ADDR_DATAFRAME_CONDITION = 0x2015,
+ HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION = 0x2016,
+ HIF_MIB_ID_CONFIG_DATA_FILTER = 0x2017,
+ HIF_MIB_ID_SET_DATA_FILTERING = 0x2018,
+ HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE = 0x2019,
+ HIF_MIB_ID_NS_IP_ADDRESSES_TABLE = 0x201A,
+ HIF_MIB_ID_RX_FILTER = 0x201B,
+ HIF_MIB_ID_BEACON_FILTER_TABLE = 0x201C,
+ HIF_MIB_ID_BEACON_FILTER_ENABLE = 0x201D,
+ HIF_MIB_ID_GRP_SEQ_COUNTER = 0x2030,
+ HIF_MIB_ID_TSF_COUNTER = 0x2031,
+ HIF_MIB_ID_STATISTICS_TABLE = 0x2032,
+ HIF_MIB_ID_COUNTERS_TABLE = 0x2033,
+ HIF_MIB_ID_MAX_TX_POWER_LEVEL = 0x2034,
+ HIF_MIB_ID_EXTENDED_COUNTERS_TABLE = 0x2035,
+ HIF_MIB_ID_DOT11_MAC_ADDRESS = 0x2040,
HIF_MIB_ID_DOT11_MAX_TRANSMIT_MSDU_LIFETIME = 0x2041,
- HIF_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME = 0x2042,
- HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID = 0x2043,
- HIF_MIB_ID_DOT11_RTS_THRESHOLD = 0x2044,
- HIF_MIB_ID_SLOT_TIME = 0x2045,
- HIF_MIB_ID_CURRENT_TX_POWER_LEVEL = 0x2046,
- HIF_MIB_ID_NON_ERP_PROTECTION = 0x2047,
- HIF_MIB_ID_TEMPLATE_FRAME = 0x2048,
- HIF_MIB_ID_BEACON_WAKEUP_PERIOD = 0x2049,
- HIF_MIB_ID_RCPI_RSSI_THRESHOLD = 0x204A,
- HIF_MIB_ID_BLOCK_ACK_POLICY = 0x204B,
- HIF_MIB_ID_OVERRIDE_INTERNAL_TX_RATE = 0x204C,
- HIF_MIB_ID_SET_ASSOCIATION_MODE = 0x204D,
- HIF_MIB_ID_SET_UAPSD_INFORMATION = 0x204E,
- HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY = 0x204F,
- HIF_MIB_ID_PROTECTED_MGMT_POLICY = 0x2050,
- HIF_MIB_ID_SET_HT_PROTECTION = 0x2051,
- HIF_MIB_ID_KEEP_ALIVE_PERIOD = 0x2052,
- HIF_MIB_ID_ARP_KEEP_ALIVE_PERIOD = 0x2053,
- HIF_MIB_ID_INACTIVITY_TIMER = 0x2054,
- HIF_MIB_ID_INTERFACE_PROTECTION = 0x2055,
- HIF_MIB_ID_BEACON_STATS = 0x2056,
+ HIF_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME = 0x2042,
+ HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID = 0x2043,
+ HIF_MIB_ID_DOT11_RTS_THRESHOLD = 0x2044,
+ HIF_MIB_ID_SLOT_TIME = 0x2045,
+ HIF_MIB_ID_CURRENT_TX_POWER_LEVEL = 0x2046,
+ HIF_MIB_ID_NON_ERP_PROTECTION = 0x2047,
+ HIF_MIB_ID_TEMPLATE_FRAME = 0x2048,
+ HIF_MIB_ID_BEACON_WAKEUP_PERIOD = 0x2049,
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD = 0x204A,
+ HIF_MIB_ID_BLOCK_ACK_POLICY = 0x204B,
+ HIF_MIB_ID_OVERRIDE_INTERNAL_TX_RATE = 0x204C,
+ HIF_MIB_ID_SET_ASSOCIATION_MODE = 0x204D,
+ HIF_MIB_ID_SET_UAPSD_INFORMATION = 0x204E,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY = 0x204F,
+ HIF_MIB_ID_PROTECTED_MGMT_POLICY = 0x2050,
+ HIF_MIB_ID_SET_HT_PROTECTION = 0x2051,
+ HIF_MIB_ID_KEEP_ALIVE_PERIOD = 0x2052,
+ HIF_MIB_ID_ARP_KEEP_ALIVE_PERIOD = 0x2053,
+ HIF_MIB_ID_INACTIVITY_TIMER = 0x2054,
+ HIF_MIB_ID_INTERFACE_PROTECTION = 0x2055,
+ HIF_MIB_ID_BEACON_STATS = 0x2056,
};
-#define HIF_OP_POWER_MODE_MASK 0xf
-
enum hif_op_power_mode {
- HIF_OP_POWER_MODE_ACTIVE = 0x0,
- HIF_OP_POWER_MODE_DOZE = 0x1,
- HIF_OP_POWER_MODE_QUIESCENT = 0x2
+ HIF_OP_POWER_MODE_ACTIVE = 0x0,
+ HIF_OP_POWER_MODE_DOZE = 0x1,
+ HIF_OP_POWER_MODE_QUIESCENT = 0x2
};
struct hif_mib_gl_operational_power_mode {
- u8 power_mode:4;
- u8 reserved1:3;
- u8 wup_ind_activation:1;
- u8 reserved2[3];
-} __packed;
-
-struct hif_mib_gl_block_ack_info {
- u8 rx_buffer_size;
- u8 rx_max_num_agreements;
- u8 tx_buffer_size;
- u8 tx_max_num_agreements;
+ u8 power_mode:4;
+ u8 reserved1:3;
+ u8 wup_ind_activation:1;
+ u8 reserved2[3];
} __packed;
struct hif_mib_gl_set_multi_msg {
- u8 enable_multi_tx_conf:1;
- u8 reserved1:7;
- u8 reserved2[3];
-} __packed;
-
-enum hif_cca_thr_mode {
- HIF_CCA_THR_MODE_RELATIVE = 0x0,
- HIF_CCA_THR_MODE_ABSOLUTE = 0x1
-};
-
-struct hif_mib_gl_cca_config {
- u8 cca_thr_mode;
- u8 reserved[3];
-} __packed;
-
-#define MAX_NUMBER_DATA_FILTERS 0xA
-
-#define MAX_NUMBER_IPV4_ADDR_CONDITIONS 0x4
-#define MAX_NUMBER_IPV6_ADDR_CONDITIONS 0x4
-#define MAX_NUMBER_MAC_ADDR_CONDITIONS 0x4
-#define MAX_NUMBER_UC_MC_BC_CONDITIONS 0x4
-#define MAX_NUMBER_ETHER_TYPE_CONDITIONS 0x4
-#define MAX_NUMBER_PORT_CONDITIONS 0x4
-#define MAX_NUMBER_MAGIC_CONDITIONS 0x4
-#define MAX_NUMBER_ARP_CONDITIONS 0x2
-#define MAX_NUMBER_NS_CONDITIONS 0x2
-
-struct hif_mib_ethertype_data_frame_condition {
- u8 condition_idx;
- u8 reserved;
- u16 ether_type;
-} __packed;
-
-enum hif_udp_tcp_protocol {
- HIF_PROTOCOL_UDP = 0x0,
- HIF_PROTOCOL_TCP = 0x1,
- HIF_PROTOCOL_BOTH_UDP_TCP = 0x2
-};
-
-enum hif_which_port {
- HIF_PORT_DST = 0x0,
- HIF_PORT_SRC = 0x1,
- HIF_PORT_SRC_OR_DST = 0x2
-};
-
-struct hif_mib_ports_data_frame_condition {
- u8 condition_idx;
- u8 protocol;
- u8 which_port;
- u8 reserved1;
- u16 port_number;
- u8 reserved2[2];
-} __packed;
-
-#define HIF_API_MAGIC_PATTERN_SIZE 32
-
-struct hif_mib_magic_data_frame_condition {
- u8 condition_idx;
- u8 offset;
- u8 magic_pattern_length;
- u8 reserved;
- u8 magic_pattern[HIF_API_MAGIC_PATTERN_SIZE];
+ u8 enable_multi_tx_conf:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
} __packed;
enum hif_mac_addr_type {
- HIF_MAC_ADDR_A1 = 0x0,
- HIF_MAC_ADDR_A2 = 0x1,
- HIF_MAC_ADDR_A3 = 0x2
+ HIF_MAC_ADDR_A1 = 0x0,
+ HIF_MAC_ADDR_A2 = 0x1,
+ HIF_MAC_ADDR_A3 = 0x2
};
struct hif_mib_mac_addr_data_frame_condition {
- u8 condition_idx;
- u8 address_type;
- u8 mac_address[ETH_ALEN];
-} __packed;
-
-enum hif_ip_addr_mode {
- HIF_IP_ADDR_SRC = 0x0,
- HIF_IP_ADDR_DST = 0x1
-};
-
-struct hif_mib_ipv4_addr_data_frame_condition {
- u8 condition_idx;
- u8 address_mode;
- u8 reserved[2];
- u8 i_pv4_address[HIF_API_IPV4_ADDRESS_SIZE];
-} __packed;
-
-struct hif_mib_ipv6_addr_data_frame_condition {
- u8 condition_idx;
- u8 address_mode;
- u8 reserved[2];
- u8 i_pv6_address[HIF_API_IPV6_ADDRESS_SIZE];
+ u8 condition_idx;
+ u8 address_type;
+ u8 mac_address[ETH_ALEN];
} __packed;
#define HIF_FILTER_UNICAST 0x1
@@ -186,365 +99,289 @@ struct hif_mib_ipv6_addr_data_frame_condition {
#define HIF_FILTER_BROADCAST 0x4
struct hif_mib_uc_mc_bc_data_frame_condition {
- u8 condition_idx;
- u8 allowed_frames;
- u8 reserved[2];
+ u8 condition_idx;
+ u8 allowed_frames;
+ u8 reserved[2];
} __packed;
struct hif_mib_config_data_filter {
- u8 filter_idx;
- u8 enable;
- u8 reserved1[2];
- u8 eth_type_cond;
- u8 port_cond;
- u8 magic_cond;
- u8 mac_cond;
- u8 ipv4_cond;
- u8 ipv6_cond;
- u8 uc_mc_bc_cond;
- u8 reserved2;
+ u8 filter_idx;
+ u8 enable;
+ u8 reserved1[2];
+ u8 eth_type_cond;
+ u8 port_cond;
+ u8 magic_cond;
+ u8 mac_cond;
+ u8 ipv4_cond;
+ u8 ipv6_cond;
+ u8 uc_mc_bc_cond;
+ u8 reserved2;
} __packed;
struct hif_mib_set_data_filtering {
- u8 invert_matching:1;
- u8 reserved1:7;
- u8 enable:1;
- u8 reserved2:7;
- u8 reserved3[2];
+ u8 invert_matching:1;
+ u8 reserved1:7;
+ u8 enable:1;
+ u8 reserved2:7;
+ u8 reserved3[2];
} __packed;
enum hif_arp_ns_frame_treatment {
- HIF_ARP_NS_FILTERING_DISABLE = 0x0,
- HIF_ARP_NS_FILTERING_ENABLE = 0x1,
- HIF_ARP_NS_REPLY_ENABLE = 0x2
+ HIF_ARP_NS_FILTERING_DISABLE = 0x0,
+ HIF_ARP_NS_FILTERING_ENABLE = 0x1,
+ HIF_ARP_NS_REPLY_ENABLE = 0x2
};
struct hif_mib_arp_ip_addr_table {
- u8 condition_idx;
- u8 arp_enable;
- u8 reserved[2];
- u8 ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
-} __packed;
-
-struct hif_mib_ns_ip_addr_table {
- u8 condition_idx;
- u8 ns_enable;
- u8 reserved[2];
- u8 ipv6_address[HIF_API_IPV6_ADDRESS_SIZE];
+ u8 condition_idx;
+ u8 arp_enable;
+ u8 reserved[2];
+ u8 ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
} __packed;
struct hif_mib_rx_filter {
- u8 reserved1:1;
- u8 bssid_filter:1;
- u8 reserved2:1;
- u8 fwd_probe_req:1;
- u8 keep_alive_filter:1;
- u8 reserved3:3;
- u8 reserved4[3];
+ u8 reserved1:1;
+ u8 bssid_filter:1;
+ u8 reserved2:1;
+ u8 fwd_probe_req:1;
+ u8 keep_alive_filter:1;
+ u8 reserved3:3;
+ u8 reserved4[3];
} __packed;
-#define HIF_API_OUI_SIZE 3
-#define HIF_API_MATCH_DATA_SIZE 3
-
struct hif_ie_table_entry {
- u8 ie_id;
- u8 has_changed:1;
- u8 no_longer:1;
- u8 has_appeared:1;
- u8 reserved:1;
- u8 num_match_data:4;
- u8 oui[HIF_API_OUI_SIZE];
- u8 match_data[HIF_API_MATCH_DATA_SIZE];
+ u8 ie_id;
+ u8 has_changed:1;
+ u8 no_longer:1;
+ u8 has_appeared:1;
+ u8 reserved:1;
+ u8 num_match_data:4;
+ u8 oui[3];
+ u8 match_data[3];
} __packed;
struct hif_mib_bcn_filter_table {
- u32 num_of_info_elmts;
+ __le32 num_of_info_elmts;
struct hif_ie_table_entry ie_table[];
} __packed;
enum hif_beacon_filter {
- HIF_BEACON_FILTER_DISABLE = 0x0,
- HIF_BEACON_FILTER_ENABLE = 0x1,
- HIF_BEACON_FILTER_AUTO_ERP = 0x2
+ HIF_BEACON_FILTER_DISABLE = 0x0,
+ HIF_BEACON_FILTER_ENABLE = 0x1,
+ HIF_BEACON_FILTER_AUTO_ERP = 0x2
};
struct hif_mib_bcn_filter_enable {
- u32 enable;
- u32 bcn_count;
-} __packed;
-
-struct hif_mib_group_seq_counter {
- u32 bits4716;
- u16 bits1500;
- u16 reserved;
-} __packed;
-
-struct hif_mib_tsf_counter {
- u32 tsf_counterlo;
- u32 tsf_counterhi;
-} __packed;
-
-struct hif_mib_stats_table {
- s16 latest_snr;
- u8 latest_rcpi;
- s8 latest_rssi;
+ __le32 enable;
+ __le32 bcn_count;
} __packed;
struct hif_mib_extended_count_table {
- u32 count_plcp_errors;
- u32 count_fcs_errors;
- u32 count_tx_packets;
- u32 count_rx_packets;
- u32 count_rx_packet_errors;
- u32 count_rx_decryption_failures;
- u32 count_rx_mic_failures;
- u32 count_rx_no_key_failures;
- u32 count_tx_multicast_frames;
- u32 count_tx_frames_success;
- u32 count_tx_frame_failures;
- u32 count_tx_frames_retried;
- u32 count_tx_frames_multi_retried;
- u32 count_rx_frame_duplicates;
- u32 count_rts_success;
- u32 count_rts_failures;
- u32 count_ack_failures;
- u32 count_rx_multicast_frames;
- u32 count_rx_frames_success;
- u32 count_rx_cmacicv_errors;
- u32 count_rx_cmac_replays;
- u32 count_rx_mgmt_ccmp_replays;
- u32 count_rx_bipmic_errors;
- u32 count_rx_beacon;
- u32 count_miss_beacon;
- u32 reserved[15];
+ __le32 count_plcp_errors;
+ __le32 count_fcs_errors;
+ __le32 count_tx_packets;
+ __le32 count_rx_packets;
+ __le32 count_rx_packet_errors;
+ __le32 count_rx_decryption_failures;
+ __le32 count_rx_mic_failures;
+ __le32 count_rx_no_key_failures;
+ __le32 count_tx_multicast_frames;
+ __le32 count_tx_frames_success;
+ __le32 count_tx_frame_failures;
+ __le32 count_tx_frames_retried;
+ __le32 count_tx_frames_multi_retried;
+ __le32 count_rx_frame_duplicates;
+ __le32 count_rts_success;
+ __le32 count_rts_failures;
+ __le32 count_ack_failures;
+ __le32 count_rx_multicast_frames;
+ __le32 count_rx_frames_success;
+ __le32 count_rx_cmacicv_errors;
+ __le32 count_rx_cmac_replays;
+ __le32 count_rx_mgmt_ccmp_replays;
+ __le32 count_rx_bipmic_errors;
+ __le32 count_rx_beacon;
+ __le32 count_miss_beacon;
+ __le32 reserved[15];
} __packed;
struct hif_mib_count_table {
- u32 count_plcp_errors;
- u32 count_fcs_errors;
- u32 count_tx_packets;
- u32 count_rx_packets;
- u32 count_rx_packet_errors;
- u32 count_rx_decryption_failures;
- u32 count_rx_mic_failures;
- u32 count_rx_no_key_failures;
- u32 count_tx_multicast_frames;
- u32 count_tx_frames_success;
- u32 count_tx_frame_failures;
- u32 count_tx_frames_retried;
- u32 count_tx_frames_multi_retried;
- u32 count_rx_frame_duplicates;
- u32 count_rts_success;
- u32 count_rts_failures;
- u32 count_ack_failures;
- u32 count_rx_multicast_frames;
- u32 count_rx_frames_success;
- u32 count_rx_cmacicv_errors;
- u32 count_rx_cmac_replays;
- u32 count_rx_mgmt_ccmp_replays;
- u32 count_rx_bipmic_errors;
-} __packed;
-
-struct hif_mib_max_tx_power_level {
- s32 max_tx_power_level_rf_port1;
- s32 max_tx_power_level_rf_port2;
-} __packed;
-
-struct hif_mib_beacon_stats {
- s32 latest_tbtt_diff;
- u32 reserved[4];
+ __le32 count_plcp_errors;
+ __le32 count_fcs_errors;
+ __le32 count_tx_packets;
+ __le32 count_rx_packets;
+ __le32 count_rx_packet_errors;
+ __le32 count_rx_decryption_failures;
+ __le32 count_rx_mic_failures;
+ __le32 count_rx_no_key_failures;
+ __le32 count_tx_multicast_frames;
+ __le32 count_tx_frames_success;
+ __le32 count_tx_frame_failures;
+ __le32 count_tx_frames_retried;
+ __le32 count_tx_frames_multi_retried;
+ __le32 count_rx_frame_duplicates;
+ __le32 count_rts_success;
+ __le32 count_rts_failures;
+ __le32 count_ack_failures;
+ __le32 count_rx_multicast_frames;
+ __le32 count_rx_frames_success;
+ __le32 count_rx_cmacicv_errors;
+ __le32 count_rx_cmac_replays;
+ __le32 count_rx_mgmt_ccmp_replays;
+ __le32 count_rx_bipmic_errors;
} __packed;
struct hif_mib_mac_address {
- u8 mac_addr[ETH_ALEN];
- u16 reserved;
-} __packed;
-
-struct hif_mib_dot11_max_transmit_msdu_lifetime {
- u32 max_life_time;
-} __packed;
-
-struct hif_mib_dot11_max_receive_lifetime {
- u32 max_life_time;
+ u8 mac_addr[ETH_ALEN];
+ __le16 reserved;
} __packed;
struct hif_mib_wep_default_key_id {
- u8 wep_default_key_id;
- u8 reserved[3];
+ u8 wep_default_key_id;
+ u8 reserved[3];
} __packed;
struct hif_mib_dot11_rts_threshold {
- u32 threshold;
+ __le32 threshold;
} __packed;
struct hif_mib_slot_time {
- u32 slot_time;
+ __le32 slot_time;
} __packed;
struct hif_mib_current_tx_power_level {
- s32 power_level;
+ __le32 power_level; // signed value
} __packed;
struct hif_mib_non_erp_protection {
- u8 use_cts_to_self:1;
- u8 reserved1:7;
- u8 reserved2[3];
+ u8 use_cts_to_self:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
} __packed;
enum hif_tmplt {
- HIF_TMPLT_PRBREQ = 0x0,
- HIF_TMPLT_BCN = 0x1,
- HIF_TMPLT_NULL = 0x2,
- HIF_TMPLT_QOSNUL = 0x3,
- HIF_TMPLT_PSPOLL = 0x4,
- HIF_TMPLT_PRBRES = 0x5,
- HIF_TMPLT_ARP = 0x6,
- HIF_TMPLT_NA = 0x7
+ HIF_TMPLT_PRBREQ = 0x0,
+ HIF_TMPLT_BCN = 0x1,
+ HIF_TMPLT_NULL = 0x2,
+ HIF_TMPLT_QOSNUL = 0x3,
+ HIF_TMPLT_PSPOLL = 0x4,
+ HIF_TMPLT_PRBRES = 0x5,
+ HIF_TMPLT_ARP = 0x6,
+ HIF_TMPLT_NA = 0x7
};
-#define HIF_API_MAX_TEMPLATE_FRAME_SIZE 700
+#define HIF_API_MAX_TEMPLATE_FRAME_SIZE 700
struct hif_mib_template_frame {
- u8 frame_type;
- u8 init_rate:7;
- u8 mode:1;
- u16 frame_length;
- u8 frame[HIF_API_MAX_TEMPLATE_FRAME_SIZE];
+ u8 frame_type;
+ u8 init_rate:7;
+ u8 mode:1;
+ __le16 frame_length;
+ u8 frame[];
} __packed;
struct hif_mib_beacon_wake_up_period {
- u8 wakeup_period_min;
- u8 receive_dtim:1;
- u8 reserved1:7;
- u8 wakeup_period_max;
- u8 reserved2;
+ u8 wakeup_period_min;
+ u8 receive_dtim:1;
+ u8 reserved1:7;
+ u8 wakeup_period_max;
+ u8 reserved2;
} __packed;
struct hif_mib_rcpi_rssi_threshold {
- u8 detection:1;
- u8 rcpi_rssi:1;
- u8 upperthresh:1;
- u8 lowerthresh:1;
- u8 reserved:4;
- u8 lower_threshold;
- u8 upper_threshold;
- u8 rolling_average_count;
+ u8 detection:1;
+ u8 rcpi_rssi:1;
+ u8 upperthresh:1;
+ u8 lowerthresh:1;
+ u8 reserved:4;
+ u8 lower_threshold;
+ u8 upper_threshold;
+ u8 rolling_average_count;
} __packed;
#define DEFAULT_BA_MAX_RX_BUFFER_SIZE 16
struct hif_mib_block_ack_policy {
- u8 block_ack_tx_tid_policy;
- u8 reserved1;
- u8 block_ack_rx_tid_policy;
- u8 block_ack_rx_max_buffer_size;
-} __packed;
-
-struct hif_mib_override_int_rate {
- u8 internal_tx_rate;
- u8 non_erp_internal_tx_rate;
- u8 reserved[2];
+ u8 block_ack_tx_tid_policy;
+ u8 reserved1;
+ u8 block_ack_rx_tid_policy;
+ u8 block_ack_rx_max_buffer_size;
} __packed;
enum hif_mpdu_start_spacing {
- HIF_MPDU_START_SPACING_NO_RESTRIC = 0x0,
- HIF_MPDU_START_SPACING_QUARTER = 0x1,
- HIF_MPDU_START_SPACING_HALF = 0x2,
- HIF_MPDU_START_SPACING_ONE = 0x3,
- HIF_MPDU_START_SPACING_TWO = 0x4,
- HIF_MPDU_START_SPACING_FOUR = 0x5,
- HIF_MPDU_START_SPACING_EIGHT = 0x6,
- HIF_MPDU_START_SPACING_SIXTEEN = 0x7
+ HIF_MPDU_START_SPACING_NO_RESTRIC = 0x0,
+ HIF_MPDU_START_SPACING_QUARTER = 0x1,
+ HIF_MPDU_START_SPACING_HALF = 0x2,
+ HIF_MPDU_START_SPACING_ONE = 0x3,
+ HIF_MPDU_START_SPACING_TWO = 0x4,
+ HIF_MPDU_START_SPACING_FOUR = 0x5,
+ HIF_MPDU_START_SPACING_EIGHT = 0x6,
+ HIF_MPDU_START_SPACING_SIXTEEN = 0x7
};
struct hif_mib_set_association_mode {
- u8 preambtype_use:1;
- u8 mode:1;
- u8 rateset:1;
- u8 spacing:1;
- u8 reserved1:4;
- u8 short_preamble:1;
- u8 reserved2:7;
- u8 greenfield:1;
- u8 reserved3:7;
- u8 mpdu_start_spacing;
- u32 basic_rate_set;
+ u8 preambtype_use:1;
+ u8 mode:1;
+ u8 rateset:1;
+ u8 spacing:1;
+ u8 reserved1:4;
+ u8 short_preamble:1;
+ u8 reserved2:7;
+ u8 greenfield:1;
+ u8 reserved3:7;
+ u8 mpdu_start_spacing;
+ __le32 basic_rate_set;
} __packed;
struct hif_mib_set_uapsd_information {
- u8 trig_bckgrnd:1;
- u8 trig_be:1;
- u8 trig_video:1;
- u8 trig_voice:1;
- u8 reserved1:4;
- u8 deliv_bckgrnd:1;
- u8 deliv_be:1;
- u8 deliv_video:1;
- u8 deliv_voice:1;
- u8 reserved2:4;
- u16 min_auto_trigger_interval;
- u16 max_auto_trigger_interval;
- u16 auto_trigger_step;
+ u8 trig_bckgrnd:1;
+ u8 trig_be:1;
+ u8 trig_video:1;
+ u8 trig_voice:1;
+ u8 reserved1:4;
+ u8 deliv_bckgrnd:1;
+ u8 deliv_be:1;
+ u8 deliv_video:1;
+ u8 deliv_voice:1;
+ u8 reserved2:4;
+ __le16 min_auto_trigger_interval;
+ __le16 max_auto_trigger_interval;
+ __le16 auto_trigger_step;
} __packed;
struct hif_mib_tx_rate_retry_policy {
- u8 policy_index;
- u8 short_retry_count;
- u8 long_retry_count;
- u8 first_rate_sel:2;
- u8 terminate:1;
- u8 count_init:1;
- u8 reserved1:4;
- u8 rate_recovery_count;
- u8 reserved2[3];
- u8 rates[12];
+ u8 policy_index;
+ u8 short_retry_count;
+ u8 long_retry_count;
+ u8 first_rate_sel:2;
+ u8 terminate:1;
+ u8 count_init:1;
+ u8 reserved1:4;
+ u8 rate_recovery_count;
+ u8 reserved2[3];
+ u8 rates[12];
} __packed;
-#define HIF_MIB_NUM_TX_RATE_RETRY_POLICIES 15
+#define HIF_TX_RETRY_POLICY_MAX 15
+#define HIF_TX_RETRY_POLICY_INVALID HIF_TX_RETRY_POLICY_MAX
struct hif_mib_set_tx_rate_retry_policy {
- u8 num_tx_rate_policies;
- u8 reserved[3];
+ u8 num_tx_rate_policies;
+ u8 reserved[3];
struct hif_mib_tx_rate_retry_policy tx_rate_retry_policy[];
} __packed;
struct hif_mib_protected_mgmt_policy {
- u8 pmf_enable:1;
- u8 unpmf_allowed:1;
- u8 host_enc_auth_frames:1;
- u8 reserved1:5;
- u8 reserved2[3];
-} __packed;
-
-struct hif_mib_set_ht_protection {
- u8 dual_cts_prot:1;
- u8 reserved1:7;
- u8 reserved2[3];
+ u8 pmf_enable:1;
+ u8 unpmf_allowed:1;
+ u8 host_enc_auth_frames:1;
+ u8 reserved1:5;
+ u8 reserved2[3];
} __packed;
struct hif_mib_keep_alive_period {
- u16 keep_alive_period;
- u8 reserved[2];
-} __packed;
-
-struct hif_mib_arp_keep_alive_period {
- u16 arp_keep_alive_period;
- u8 encr_type;
- u8 reserved;
- u8 sender_ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
- u8 target_ipv4_address[HIF_API_IPV4_ADDRESS_SIZE];
-} __packed;
-
-struct hif_mib_inactivity_timer {
- u8 min_active_time;
- u8 max_active_time;
- u16 reserved;
-} __packed;
-
-struct hif_mib_interface_protection {
- u8 use_cts_prot:1;
- u8 reserved1:7;
- u8 reserved2[3];
+ __le16 keep_alive_period;
+ u8 reserved[2];
} __packed;
#endif
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c
index 33c22c5d629d..bb156033d1e1 100644
--- a/drivers/staging/wfx/hif_rx.c
+++ b/drivers/staging/wfx/hif_rx.c
@@ -22,9 +22,9 @@ static int hif_generic_confirm(struct wfx_dev *wdev,
const struct hif_msg *hif, const void *buf)
{
// All confirm messages start with status
- int status = le32_to_cpu(*((__le32 *) buf));
+ int status = le32_to_cpup((__le32 *)buf);
int cmd = hif->id;
- int len = hif->len - 4; // drop header
+ int len = le16_to_cpu(hif->len) - 4; // drop header
WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
@@ -100,10 +100,10 @@ static int hif_startup_indication(struct wfx_dev *wdev,
return -EINVAL;
}
memcpy(&wdev->hw_caps, body, sizeof(struct hif_ind_startup));
- le32_to_cpus(&wdev->hw_caps.status);
- le16_to_cpus(&wdev->hw_caps.hardware_id);
- le16_to_cpus(&wdev->hw_caps.num_inp_ch_bufs);
- le16_to_cpus(&wdev->hw_caps.size_inp_ch_buf);
+ le16_to_cpus((__le16 *)&wdev->hw_caps.hardware_id);
+ le16_to_cpus((__le16 *)&wdev->hw_caps.num_inp_ch_bufs);
+ le16_to_cpus((__le16 *)&wdev->hw_caps.size_inp_ch_buf);
+ le32_to_cpus((__le32 *)&wdev->hw_caps.supported_rate_mask);
complete(&wdev->firmware_ready);
return 0;
@@ -127,7 +127,7 @@ static int hif_keys_indication(struct wfx_dev *wdev,
u8 pubkey[API_NCP_PUB_KEY_SIZE];
// SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS is used by legacy secure link
- if (body->status && body->status != SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
+ if (body->status && body->status != HIF_STATUS_SLK_NEGO_SUCCESS)
dev_warn(wdev->dev, "secure link negociation error\n");
memcpy(pubkey, body->ncp_pub_key, sizeof(pubkey));
memreverse(pubkey, sizeof(pubkey));
@@ -158,26 +158,39 @@ static int hif_event_indication(struct wfx_dev *wdev,
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
const struct hif_ind_event *body = buf;
- struct wfx_hif_event *event;
- int first;
+ int type = le32_to_cpu(body->event_id);
+ int cause;
- WARN_ON(!wvif);
- if (!wvif)
+ if (!wvif) {
+ dev_warn(wdev->dev, "received event for non-existent vif\n");
return 0;
+ }
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return -ENOMEM;
-
- memcpy(&event->evt, body, sizeof(struct hif_ind_event));
- spin_lock(&wvif->event_queue_lock);
- first = list_empty(&wvif->event_queue);
- list_add_tail(&event->link, &wvif->event_queue);
- spin_unlock(&wvif->event_queue_lock);
-
- if (first)
- schedule_work(&wvif->event_handler_work);
-
+ switch (type) {
+ case HIF_EVENT_IND_RCPI_RSSI:
+ wfx_event_report_rssi(wvif, body->event_data.rcpi_rssi);
+ break;
+ case HIF_EVENT_IND_BSSLOST:
+ schedule_delayed_work(&wvif->beacon_loss_work, 0);
+ break;
+ case HIF_EVENT_IND_BSSREGAINED:
+ cancel_delayed_work(&wvif->beacon_loss_work);
+ dev_dbg(wdev->dev, "ignore BSSREGAINED indication\n");
+ break;
+ case HIF_EVENT_IND_PS_MODE_ERROR:
+ cause = le32_to_cpu(body->event_data.ps_mode_error);
+ dev_warn(wdev->dev, "error while processing power save request: %d\n",
+ cause);
+ if (cause == HIF_PS_ERROR_AP_NOT_RESP_TO_POLL) {
+ wvif->bss_not_support_ps_poll = true;
+ schedule_work(&wvif->update_pm_work);
+ }
+ break;
+ default:
+ dev_warn(wdev->dev, "unhandled event indication: %.2x\n",
+ type);
+ break;
+ }
return 0;
}
@@ -224,53 +237,21 @@ static int hif_suspend_resume_indication(struct wfx_dev *wdev,
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
const struct hif_ind_suspend_resume_tx *body = buf;
- WARN_ON(!wvif);
- WARN(!body->suspend_resume_flags.bc_mc_only, "unsupported suspend/resume notification");
- if (body->suspend_resume_flags.resume)
- wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE);
- else
- wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP);
-
- return 0;
-}
-
-static int hif_error_indication(struct wfx_dev *wdev,
- const struct hif_msg *hif, const void *buf)
-{
- const struct hif_ind_error *body = buf;
- u8 *pRollback = (u8 *) body->data;
- u32 *pStatus = (u32 *) body->data;
-
- switch (body->type) {
- case HIF_ERROR_FIRMWARE_ROLLBACK:
- dev_err(wdev->dev,
- "asynchronous error: firmware rollback error %d\n",
- *pRollback);
- break;
- case HIF_ERROR_FIRMWARE_DEBUG_ENABLED:
- dev_err(wdev->dev, "asynchronous error: firmware debug feature enabled\n");
- break;
- case HIF_ERROR_OUTDATED_SESSION_KEY:
- dev_err(wdev->dev, "asynchronous error: secure link outdated key: %#.8x\n",
- *pStatus);
- break;
- case HIF_ERROR_INVALID_SESSION_KEY:
- dev_err(wdev->dev, "asynchronous error: invalid session key\n");
- break;
- case HIF_ERROR_OOR_VOLTAGE:
- dev_err(wdev->dev, "asynchronous error: out-of-range overvoltage: %#.8x\n",
- *pStatus);
- break;
- case HIF_ERROR_PDS_VERSION:
- dev_err(wdev->dev,
- "asynchronous error: wrong PDS payload or version: %#.8x\n",
- *pStatus);
- break;
- default:
- dev_err(wdev->dev, "asynchronous error: unknown (%d)\n",
- body->type);
- break;
+ if (body->suspend_resume_flags.bc_mc_only) {
+ WARN_ON(!wvif);
+ if (body->suspend_resume_flags.resume)
+ wfx_suspend_resume_mc(wvif, STA_NOTIFY_AWAKE);
+ else
+ wfx_suspend_resume_mc(wvif, STA_NOTIFY_SLEEP);
+ } else {
+ WARN(body->peer_sta_set, "misunderstood indication");
+ WARN(hif->interface != 2, "misunderstood indication");
+ if (body->suspend_resume_flags.resume)
+ wfx_suspend_hot_dev(wdev, STA_NOTIFY_AWAKE);
+ else
+ wfx_suspend_hot_dev(wdev, STA_NOTIFY_SLEEP);
}
+
return 0;
}
@@ -278,13 +259,14 @@ static int hif_generic_indication(struct wfx_dev *wdev,
const struct hif_msg *hif, const void *buf)
{
const struct hif_ind_generic *body = buf;
+ int type = le32_to_cpu(body->indication_type);
- switch (body->indication_type) {
+ switch (type) {
case HIF_GENERIC_INDICATION_TYPE_RAW:
return 0;
case HIF_GENERIC_INDICATION_TYPE_STRING:
dev_info(wdev->dev, "firmware says: %s\n",
- (char *) body->indication_data.raw_data);
+ (char *)body->indication_data.raw_data);
return 0;
case HIF_GENERIC_INDICATION_TYPE_RX_STATS:
mutex_lock(&wdev->rx_stats_lock);
@@ -296,22 +278,103 @@ static int hif_generic_indication(struct wfx_dev *wdev,
sizeof(wdev->rx_stats));
mutex_unlock(&wdev->rx_stats_lock);
return 0;
+ case HIF_GENERIC_INDICATION_TYPE_TX_POWER_LOOP_INFO:
+ mutex_lock(&wdev->tx_power_loop_info_lock);
+ memcpy(&wdev->tx_power_loop_info,
+ &body->indication_data.tx_power_loop_info,
+ sizeof(wdev->tx_power_loop_info));
+ mutex_unlock(&wdev->tx_power_loop_info_lock);
+ return 0;
default:
- dev_err(wdev->dev,
- "generic_indication: unknown indication type: %#.8x\n",
- body->indication_type);
+ dev_err(wdev->dev, "generic_indication: unknown indication type: %#.8x\n",
+ type);
return -EIO;
}
}
+static const struct {
+ int val;
+ const char *str;
+ bool has_param;
+} hif_errors[] = {
+ { HIF_ERROR_FIRMWARE_ROLLBACK,
+ "rollback status" },
+ { HIF_ERROR_FIRMWARE_DEBUG_ENABLED,
+ "debug feature enabled" },
+ { HIF_ERROR_PDS_PAYLOAD,
+ "PDS version is not supported" },
+ { HIF_ERROR_PDS_TESTFEATURE,
+ "PDS ask for an unknown test mode" },
+ { HIF_ERROR_OOR_VOLTAGE,
+ "out-of-range power supply voltage", true },
+ { HIF_ERROR_OOR_TEMPERATURE,
+ "out-of-range temperature", true },
+ { HIF_ERROR_SLK_REQ_DURING_KEY_EXCHANGE,
+ "secure link does not expect request during key exchange" },
+ { HIF_ERROR_SLK_SESSION_KEY,
+ "secure link session key is invalid" },
+ { HIF_ERROR_SLK_OVERFLOW,
+ "secure link overflow" },
+ { HIF_ERROR_SLK_WRONG_ENCRYPTION_STATE,
+ "secure link messages list does not match message encryption" },
+ { HIF_ERROR_HIF_BUS_FREQUENCY_TOO_LOW,
+ "bus clock is too slow (<1kHz)" },
+ { HIF_ERROR_HIF_RX_DATA_TOO_LARGE,
+ "HIF message too large" },
+ // Following errors only exists in old firmware versions:
+ { HIF_ERROR_HIF_TX_QUEUE_FULL,
+ "HIF messages queue is full" },
+ { HIF_ERROR_HIF_BUS,
+ "HIF bus" },
+ { HIF_ERROR_SLK_MULTI_TX_UNSUPPORTED,
+ "secure link does not support multi-tx confirmations" },
+ { HIF_ERROR_SLK_OUTDATED_SESSION_KEY,
+ "secure link session key is outdated" },
+ { HIF_ERROR_SLK_DECRYPTION,
+ "secure link params (nonce or tag) mismatch" },
+};
+
+static int hif_error_indication(struct wfx_dev *wdev,
+ const struct hif_msg *hif, const void *buf)
+{
+ const struct hif_ind_error *body = buf;
+ int type = le32_to_cpu(body->type);
+ int param = (s8)body->data[0];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hif_errors); i++)
+ if (type == hif_errors[i].val)
+ break;
+ if (i < ARRAY_SIZE(hif_errors))
+ if (hif_errors[i].has_param)
+ dev_err(wdev->dev, "asynchronous error: %s: %d\n",
+ hif_errors[i].str, param);
+ else
+ dev_err(wdev->dev, "asynchronous error: %s\n",
+ hif_errors[i].str);
+ else
+ dev_err(wdev->dev, "asynchronous error: unknown: %08x\n", type);
+ print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET,
+ 16, 1, hif, le16_to_cpu(hif->len), false);
+ wdev->chip_frozen = true;
+
+ return 0;
+};
+
static int hif_exception_indication(struct wfx_dev *wdev,
const struct hif_msg *hif, const void *buf)
{
- size_t len = hif->len - 4; // drop header
+ const struct hif_ind_exception *body = buf;
+ int type = le32_to_cpu(body->type);
- dev_err(wdev->dev, "firmware exception\n");
- print_hex_dump_bytes("Dump: ", DUMP_PREFIX_NONE, buf, len);
- wdev->chip_frozen = 1;
+ if (type == 4)
+ dev_err(wdev->dev, "firmware assert %d\n",
+ le32_to_cpup((__le32 *)body->data));
+ else
+ dev_err(wdev->dev, "firmware exception\n");
+ print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET,
+ 16, 1, hif, le16_to_cpu(hif->len), false);
+ wdev->chip_frozen = true;
return -1;
}
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
index 77bca43aca42..893b67f2f792 100644
--- a/drivers/staging/wfx/hif_tx.c
+++ b/drivers/staging/wfx/hif_tx.c
@@ -23,8 +23,8 @@ void wfx_init_hif_cmd(struct wfx_hif_cmd *hif_cmd)
mutex_init(&hif_cmd->key_renew_lock);
}
-static void wfx_fill_header(struct hif_msg *hif, int if_id, unsigned int cmd,
- size_t size)
+static void wfx_fill_header(struct hif_msg *hif, int if_id,
+ unsigned int cmd, size_t size)
{
if (if_id == -1)
if_id = 2;
@@ -47,8 +47,8 @@ static void *wfx_alloc_hif(size_t body_len, struct hif_msg **hif)
return NULL;
}
-int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply,
- size_t reply_len, bool async)
+int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
+ void *reply, size_t reply_len, bool async)
{
const char *mib_name = "";
const char *mib_sep = "";
@@ -82,6 +82,9 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply,
if (async)
return 0;
+ if (wdev->poll_irq)
+ wfx_bh_poll_irq(wdev);
+
ret = wait_for_completion_timeout(&wdev->hif_cmd.done, 1 * HZ);
if (!ret) {
dev_err(wdev->dev, "chip is abnormally long to answer\n");
@@ -91,7 +94,7 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply,
if (!ret) {
dev_err(wdev->dev, "chip did not answer\n");
wfx_pending_dump_old_frames(wdev, 3000);
- wdev->chip_frozen = 1;
+ wdev->chip_frozen = true;
reinit_completion(&wdev->hif_cmd.done);
ret = -ETIMEDOUT;
} else {
@@ -103,7 +106,7 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request, void *reply,
if (ret &&
(cmd == HIF_REQ_ID_READ_MIB || cmd == HIF_REQ_ID_WRITE_MIB)) {
- mib_name = get_mib_name(((u16 *) request)[2]);
+ mib_name = get_mib_name(((u16 *)request)[2]);
mib_sep = "/";
}
if (ret < 0)
@@ -128,7 +131,11 @@ int hif_shutdown(struct wfx_dev *wdev)
int ret;
struct hif_msg *hif;
+ if (wdev->chip_frozen)
+ return 0;
wfx_alloc_hif(0, &hif);
+ if (!hif)
+ return -ENOMEM;
wfx_fill_header(hif, -1, HIF_REQ_ID_SHUT_DOWN, 0);
ret = wfx_cmd_send(wdev, hif, NULL, 0, true);
// After this command, chip won't reply. Be sure to give enough time to
@@ -152,6 +159,8 @@ int hif_configuration(struct wfx_dev *wdev, const u8 *conf, size_t len)
struct hif_msg *hif;
struct hif_req_configuration *body = wfx_alloc_hif(buf_len, &hif);
+ if (!hif)
+ return -ENOMEM;
body->length = cpu_to_le16(len);
memcpy(body->pds_data, conf, len);
wfx_fill_header(hif, -1, HIF_REQ_ID_CONFIGURATION, buf_len);
@@ -166,6 +175,8 @@ int hif_reset(struct wfx_vif *wvif, bool reset_stat)
struct hif_msg *hif;
struct hif_req_reset *body = wfx_alloc_hif(sizeof(*body), &hif);
+ if (!hif)
+ return -ENOMEM;
body->reset_flags.reset_stat = reset_stat;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_RESET, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -173,8 +184,8 @@ int hif_reset(struct wfx_vif *wvif, bool reset_stat)
return ret;
}
-int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
- size_t val_len)
+int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
+ void *val, size_t val_len)
{
int ret;
struct hif_msg *hif;
@@ -182,36 +193,43 @@ int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
struct hif_req_read_mib *body = wfx_alloc_hif(sizeof(*body), &hif);
struct hif_cnf_read_mib *reply = kmalloc(buf_len, GFP_KERNEL);
+ if (!body || !reply) {
+ ret = -ENOMEM;
+ goto out;
+ }
body->mib_id = cpu_to_le16(mib_id);
wfx_fill_header(hif, vif_id, HIF_REQ_ID_READ_MIB, sizeof(*body));
ret = wfx_cmd_send(wdev, hif, reply, buf_len, false);
- if (!ret && mib_id != reply->mib_id) {
- dev_warn(wdev->dev,
- "%s: confirmation mismatch request\n", __func__);
+ if (!ret && mib_id != le16_to_cpu(reply->mib_id)) {
+ dev_warn(wdev->dev, "%s: confirmation mismatch request\n",
+ __func__);
ret = -EIO;
}
if (ret == -ENOMEM)
- dev_err(wdev->dev,
- "buffer is too small to receive %s (%zu < %d)\n",
- get_mib_name(mib_id), val_len, reply->length);
+ dev_err(wdev->dev, "buffer is too small to receive %s (%zu < %d)\n",
+ get_mib_name(mib_id), val_len,
+ le16_to_cpu(reply->length));
if (!ret)
- memcpy(val, &reply->mib_data, reply->length);
+ memcpy(val, &reply->mib_data, le16_to_cpu(reply->length));
else
memset(val, 0xFF, val_len);
+out:
kfree(hif);
kfree(reply);
return ret;
}
-int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id, void *val,
- size_t val_len)
+int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
+ void *val, size_t val_len)
{
int ret;
struct hif_msg *hif;
int buf_len = sizeof(struct hif_req_write_mib) + val_len;
struct hif_req_write_mib *body = wfx_alloc_hif(buf_len, &hif);
+ if (!hif)
+ return -ENOMEM;
body->mib_id = cpu_to_le16(mib_id);
body->length = cpu_to_le16(val_len);
memcpy(&body->mib_data, val, val_len);
@@ -236,6 +254,8 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
compiletime_assert(IEEE80211_MAX_SSID_LEN == HIF_API_SSID_SIZE,
"API inconsistency");
+ if (!hif)
+ return -ENOMEM;
for (i = 0; i < req->n_ssids; i++) {
memcpy(body->ssid_def[i].ssid, req->ssids[i].ssid,
IEEE80211_MAX_SSID_LEN);
@@ -268,7 +288,7 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
tmo_chan_bg = le32_to_cpu(body->max_channel_time) * USEC_PER_TU;
tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay;
tmo_chan_fg *= body->num_of_probe_requests;
- tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg);
+ tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg) + 512 * USEC_PER_TU;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -283,6 +303,8 @@ int hif_stop_scan(struct wfx_vif *wvif)
// body associated to HIF_REQ_ID_STOP_SCAN is empty
wfx_alloc_hif(0, &hif);
+ if (!hif)
+ return -ENOMEM;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_STOP_SCAN, 0);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
@@ -296,19 +318,24 @@ int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
struct hif_msg *hif;
struct hif_req_join *body = wfx_alloc_hif(sizeof(*body), &hif);
+ WARN_ON(!conf->beacon_int);
WARN_ON(!conf->basic_rates);
+ WARN_ON(sizeof(body->ssid) < ssidlen);
+ WARN(!conf->ibss_joined && !ssidlen, "joining an unknown BSS");
+ if (!hif)
+ return -ENOMEM;
body->infrastructure_bss_mode = !conf->ibss_joined;
body->short_preamble = conf->use_short_preamble;
if (channel && channel->flags & IEEE80211_CHAN_NO_IR)
body->probe_for_join = 0;
else
body->probe_for_join = 1;
- body->channel_number = cpu_to_le16(channel->hw_value);
+ body->channel_number = channel->hw_value;
body->beacon_interval = cpu_to_le32(conf->beacon_int);
body->basic_rate_set =
cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
- if (!conf->ibss_joined && ssid) {
+ if (ssid) {
body->ssid_length = cpu_to_le32(ssidlen);
memcpy(body->ssid, ssid, ssidlen);
}
@@ -318,17 +345,17 @@ int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
return ret;
}
-int hif_set_bss_params(struct wfx_vif *wvif,
- const struct hif_req_set_bss_params *arg)
+int hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count)
{
int ret;
struct hif_msg *hif;
- struct hif_req_set_bss_params *body = wfx_alloc_hif(sizeof(*body),
- &hif);
+ struct hif_req_set_bss_params *body =
+ wfx_alloc_hif(sizeof(*body), &hif);
- memcpy(body, arg, sizeof(*body));
- cpu_to_le16s(&body->aid);
- cpu_to_le32s(&body->operational_rate_set);
+ if (!hif)
+ return -ENOMEM;
+ body->aid = cpu_to_le16(aid);
+ body->beacon_lost_count = beacon_lost_count;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_SET_BSS_PARAMS,
sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -343,6 +370,8 @@ int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg)
// FIXME: only send necessary bits
struct hif_req_add_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+ if (!hif)
+ return -ENOMEM;
// FIXME: swap bytes as necessary in body
memcpy(body, arg, sizeof(*body));
if (wfx_api_older_than(wdev, 1, 5))
@@ -363,6 +392,8 @@ int hif_remove_key(struct wfx_dev *wdev, int idx)
struct hif_msg *hif;
struct hif_req_remove_key *body = wfx_alloc_hif(sizeof(*body), &hif);
+ if (!hif)
+ return -ENOMEM;
body->entry_index = idx;
wfx_fill_header(hif, -1, HIF_REQ_ID_REMOVE_KEY, sizeof(*body));
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
@@ -382,6 +413,8 @@ int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
return -ENOMEM;
WARN_ON(arg->aifs > 255);
+ if (!hif)
+ return -ENOMEM;
body->aifsn = arg->aifs;
body->cw_min = cpu_to_le16(arg->cw_min);
body->cw_max = cpu_to_le16(arg->cw_max);
@@ -408,6 +441,8 @@ int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout)
if (!body)
return -ENOMEM;
+ if (!hif)
+ return -ENOMEM;
if (ps) {
body->pm_mode.enter_psm = 1;
// Firmware does not support more than 128ms
@@ -428,9 +463,12 @@ int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
struct hif_msg *hif;
struct hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
+ WARN_ON(!conf->beacon_int);
+ if (!hif)
+ return -ENOMEM;
body->dtim_period = conf->dtim_period;
body->short_preamble = conf->use_short_preamble;
- body->channel_number = cpu_to_le16(channel->hw_value);
+ body->channel_number = channel->hw_value;
body->beacon_interval = cpu_to_le32(conf->beacon_int);
body->basic_rate_set =
cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
@@ -449,6 +487,8 @@ int hif_beacon_transmit(struct wfx_vif *wvif, bool enable)
struct hif_req_beacon_transmit *body = wfx_alloc_hif(sizeof(*body),
&hif);
+ if (!hif)
+ return -ENOMEM;
body->enable_beaconing = enable ? 1 : 0;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_BEACON_TRANSMIT,
sizeof(*body));
@@ -463,9 +503,11 @@ int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id)
struct hif_msg *hif;
struct hif_req_map_link *body = wfx_alloc_hif(sizeof(*body), &hif);
+ if (!hif)
+ return -ENOMEM;
if (mac_addr)
ether_addr_copy(body->mac_addr, mac_addr);
- body->map_link_flags = *(struct hif_map_link_flags *) &flags;
+ body->map_link_flags = *(struct hif_map_link_flags *)&flags;
body->peer_sta_id = sta_id;
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_MAP_LINK, sizeof(*body));
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -480,6 +522,8 @@ int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len)
int buf_len = sizeof(struct hif_req_update_ie) + ies_len;
struct hif_req_update_ie *body = wfx_alloc_hif(buf_len, &hif);
+ if (!hif)
+ return -ENOMEM;
body->ie_flags.beacon = 1;
body->num_ies = cpu_to_le16(1);
memcpy(body->ie, ies, ies_len);
@@ -489,14 +533,16 @@ int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len)
return ret;
}
-int hif_sl_send_pub_keys(struct wfx_dev *wdev, const uint8_t *pubkey,
- const uint8_t *pubkey_hmac)
+int hif_sl_send_pub_keys(struct wfx_dev *wdev,
+ const u8 *pubkey, const u8 *pubkey_hmac)
{
int ret;
struct hif_msg *hif;
struct hif_req_sl_exchange_pub_keys *body = wfx_alloc_hif(sizeof(*body),
&hif);
+ if (!hif)
+ return -ENOMEM;
body->algorithm = HIF_SL_CURVE25519;
memcpy(body->host_pub_key, pubkey, sizeof(body->host_pub_key));
memcpy(body->host_pub_key_mac, pubkey_hmac,
@@ -506,7 +552,7 @@ int hif_sl_send_pub_keys(struct wfx_dev *wdev, const uint8_t *pubkey,
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
kfree(hif);
// Compatibility with legacy secure link
- if (ret == SL_PUB_KEY_EXCHANGE_STATUS_SUCCESS)
+ if (ret == le32_to_cpu(HIF_STATUS_SLK_NEGO_SUCCESS))
ret = 0;
return ret;
}
@@ -517,6 +563,8 @@ int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap)
struct hif_msg *hif;
struct hif_req_sl_configure *body = wfx_alloc_hif(sizeof(*body), &hif);
+ if (!hif)
+ return -ENOMEM;
memcpy(body->encr_bmp, bitmap, sizeof(body->encr_bmp));
wfx_fill_header(hif, -1, HIF_REQ_ID_SL_CONFIGURE, sizeof(*body));
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
@@ -524,21 +572,22 @@ int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap)
return ret;
}
-int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
- int destination)
+int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key, int destination)
{
int ret;
struct hif_msg *hif;
struct hif_req_set_sl_mac_key *body = wfx_alloc_hif(sizeof(*body),
&hif);
+ if (!hif)
+ return -ENOMEM;
memcpy(body->key_value, slk_key, sizeof(body->key_value));
body->otp_or_ram = destination;
wfx_fill_header(hif, -1, HIF_REQ_ID_SET_SL_MAC_KEY, sizeof(*body));
ret = wfx_cmd_send(wdev, hif, NULL, 0, false);
kfree(hif);
// Compatibility with legacy secure link
- if (ret == SL_MAC_KEY_STATUS_SUCCESS)
+ if (ret == le32_to_cpu(HIF_STATUS_SLK_SET_KEY_SUCCESS))
ret = 0;
return ret;
}
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h
index f8520a14c14c..e9eca9330178 100644
--- a/drivers/staging/wfx/hif_tx.h
+++ b/drivers/staging/wfx/hif_tx.h
@@ -10,12 +10,11 @@
#ifndef WFX_HIF_TX_H
#define WFX_HIF_TX_H
-#include "hif_api_cmd.h"
-
struct ieee80211_channel;
struct ieee80211_bss_conf;
struct ieee80211_tx_queue_params;
struct cfg80211_scan_request;
+struct hif_req_add_key;
struct wfx_dev;
struct wfx_vif;
@@ -48,8 +47,7 @@ int hif_stop_scan(struct wfx_vif *wvif);
int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
struct ieee80211_channel *channel, const u8 *ssid, int ssidlen);
int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout);
-int hif_set_bss_params(struct wfx_vif *wvif,
- const struct hif_req_set_bss_params *arg);
+int hif_set_bss_params(struct wfx_vif *wvif, int aid, int beacon_lost_count);
int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg);
int hif_remove_key(struct wfx_dev *wdev, int idx);
int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
@@ -59,8 +57,8 @@ int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
int hif_beacon_transmit(struct wfx_vif *wvif, bool enable);
int hif_map_link(struct wfx_vif *wvif, u8 *mac_addr, int flags, int sta_id);
int hif_update_ie_beacon(struct wfx_vif *wvif, const u8 *ies, size_t ies_len);
-int hif_sl_set_mac_key(struct wfx_dev *wdev, const u8 *slk_key,
- int destination);
+int hif_sl_set_mac_key(struct wfx_dev *wdev,
+ const u8 *slk_key, int destination);
int hif_sl_config(struct wfx_dev *wdev, const unsigned long *bitmap);
int hif_sl_send_pub_keys(struct wfx_dev *wdev,
const u8 *pubkey, const u8 *pubkey_hmac);
diff --git a/drivers/staging/wfx/hif_tx_mib.c b/drivers/staging/wfx/hif_tx_mib.c
new file mode 100644
index 000000000000..1689cb42acc0
--- /dev/null
+++ b/drivers/staging/wfx/hif_tx_mib.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of host-to-chip MIBs of WFxxx Split Mac (WSM) API.
+ *
+ * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
+ * Copyright (c) 2010, ST-Ericsson
+ * Copyright (C) 2010, ST-Ericsson SA
+ */
+
+#include <linux/etherdevice.h>
+
+#include "wfx.h"
+#include "hif_tx.h"
+#include "hif_tx_mib.h"
+#include "hif_api_mib.h"
+
+int hif_set_output_power(struct wfx_vif *wvif, int val)
+{
+ struct hif_mib_current_tx_power_level arg = {
+ .power_level = cpu_to_le32(val * 10),
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
+ &arg, sizeof(arg));
+}
+
+int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
+ unsigned int dtim_interval,
+ unsigned int listen_interval)
+{
+ struct hif_mib_beacon_wake_up_period val = {
+ .wakeup_period_min = dtim_interval,
+ .receive_dtim = 0,
+ .wakeup_period_max = listen_interval,
+ };
+
+ if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
+ return -EINVAL;
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_WAKEUP_PERIOD,
+ &val, sizeof(val));
+}
+
+int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
+ int rssi_thold, int rssi_hyst)
+{
+ struct hif_mib_rcpi_rssi_threshold arg = {
+ .rolling_average_count = 8,
+ .detection = 1,
+ };
+
+ if (!rssi_thold && !rssi_hyst) {
+ arg.upperthresh = 1;
+ arg.lowerthresh = 1;
+ } else {
+ arg.upper_threshold = rssi_thold + rssi_hyst;
+ arg.upper_threshold = (arg.upper_threshold + 110) * 2;
+ arg.lower_threshold = rssi_thold;
+ arg.lower_threshold = (arg.lower_threshold + 110) * 2;
+ }
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_RCPI_RSSI_THRESHOLD, &arg, sizeof(arg));
+}
+
+int hif_get_counters_table(struct wfx_dev *wdev, int vif_id,
+ struct hif_mib_extended_count_table *arg)
+{
+ if (wfx_api_older_than(wdev, 1, 3)) {
+ // extended_count_table is wider than count_table
+ memset(arg, 0xFF, sizeof(*arg));
+ return hif_read_mib(wdev, vif_id, HIF_MIB_ID_COUNTERS_TABLE,
+ arg, sizeof(struct hif_mib_count_table));
+ } else {
+ return hif_read_mib(wdev, vif_id,
+ HIF_MIB_ID_EXTENDED_COUNTERS_TABLE, arg,
+ sizeof(struct hif_mib_extended_count_table));
+ }
+}
+
+int hif_set_macaddr(struct wfx_vif *wvif, u8 *mac)
+{
+ struct hif_mib_mac_address msg = { };
+
+ if (mac)
+ ether_addr_copy(msg.mac_addr, mac);
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS,
+ &msg, sizeof(msg));
+}
+
+int hif_set_rx_filter(struct wfx_vif *wvif,
+ bool filter_bssid, bool filter_prbreq)
+{
+ struct hif_mib_rx_filter val = { };
+
+ if (filter_bssid)
+ val.bssid_filter = 1;
+ if (!filter_prbreq)
+ val.fwd_probe_req = 1;
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER,
+ &val, sizeof(val));
+}
+
+int hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len,
+ const struct hif_ie_table_entry *tbl)
+{
+ int ret;
+ struct hif_mib_bcn_filter_table *val;
+ int buf_len = struct_size(val, ie_table, tbl_len);
+
+ val = kzalloc(buf_len, GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+ val->num_of_info_elmts = cpu_to_le32(tbl_len);
+ memcpy(val->ie_table, tbl, tbl_len * sizeof(*tbl));
+ ret = hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_FILTER_TABLE, val, buf_len);
+ kfree(val);
+ return ret;
+}
+
+int hif_beacon_filter_control(struct wfx_vif *wvif,
+ int enable, int beacon_count)
+{
+ struct hif_mib_bcn_filter_enable arg = {
+ .enable = cpu_to_le32(enable),
+ .bcn_count = cpu_to_le32(beacon_count),
+ };
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_BEACON_FILTER_ENABLE,
+ &arg, sizeof(arg));
+}
+
+int hif_set_operational_mode(struct wfx_dev *wdev, enum hif_op_power_mode mode)
+{
+ struct hif_mib_gl_operational_power_mode val = {
+ .power_mode = mode,
+ .wup_ind_activation = 1,
+ };
+
+ return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE,
+ &val, sizeof(val));
+}
+
+int hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb,
+ u8 frame_type, int init_rate)
+{
+ struct hif_mib_template_frame *arg;
+
+ WARN(skb->len > HIF_API_MAX_TEMPLATE_FRAME_SIZE, "frame is too big");
+ skb_push(skb, 4);
+ arg = (struct hif_mib_template_frame *)skb->data;
+ skb_pull(skb, 4);
+ arg->init_rate = init_rate;
+ arg->frame_type = frame_type;
+ arg->frame_length = cpu_to_le16(skb->len);
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
+ arg, sizeof(*arg) + skb->len);
+}
+
+int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
+{
+ struct hif_mib_protected_mgmt_policy val = { };
+
+ WARN(required && !capable, "incoherent arguments");
+ if (capable) {
+ val.pmf_enable = 1;
+ val.host_enc_auth_frames = 1;
+ }
+ if (!required)
+ val.unpmf_allowed = 1;
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_PROTECTED_MGMT_POLICY,
+ &val, sizeof(val));
+}
+
+int hif_set_block_ack_policy(struct wfx_vif *wvif,
+ u8 tx_tid_policy, u8 rx_tid_policy)
+{
+ struct hif_mib_block_ack_policy val = {
+ .block_ack_tx_tid_policy = tx_tid_policy,
+ .block_ack_rx_tid_policy = rx_tid_policy,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY,
+ &val, sizeof(val));
+}
+
+int hif_set_association_mode(struct wfx_vif *wvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ieee80211_sta *sta = NULL;
+ struct hif_mib_set_association_mode val = {
+ .preambtype_use = 1,
+ .mode = 1,
+ .spacing = 1,
+ .short_preamble = info->use_short_preamble,
+ };
+
+ rcu_read_lock(); // protect sta
+ if (info->bssid && !info->ibss_joined)
+ sta = ieee80211_find_sta(wvif->vif, info->bssid);
+
+ // FIXME: it is strange to not retrieve all information from bss_info
+ if (sta && sta->ht_cap.ht_supported) {
+ val.mpdu_start_spacing = sta->ht_cap.ampdu_density;
+ if (!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
+ val.greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+ }
+ rcu_read_unlock();
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_ASSOCIATION_MODE, &val, sizeof(val));
+}
+
+int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
+ int policy_index, u8 *rates)
+{
+ struct hif_mib_set_tx_rate_retry_policy *arg;
+ size_t size = struct_size(arg, tx_rate_retry_policy, 1);
+ int ret;
+
+ arg = kzalloc(size, GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
+ arg->num_tx_rate_policies = 1;
+ arg->tx_rate_retry_policy[0].policy_index = policy_index;
+ arg->tx_rate_retry_policy[0].short_retry_count = 255;
+ arg->tx_rate_retry_policy[0].long_retry_count = 255;
+ arg->tx_rate_retry_policy[0].first_rate_sel = 1;
+ arg->tx_rate_retry_policy[0].terminate = 1;
+ arg->tx_rate_retry_policy[0].count_init = 1;
+ memcpy(&arg->tx_rate_retry_policy[0].rates, rates,
+ sizeof(arg->tx_rate_retry_policy[0].rates));
+ ret = hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
+ kfree(arg);
+ return ret;
+}
+
+int hif_set_mac_addr_condition(struct wfx_vif *wvif,
+ int idx, const u8 *mac_addr)
+{
+ struct hif_mib_mac_addr_data_frame_condition val = {
+ .condition_idx = idx,
+ .address_type = HIF_MAC_ADDR_A1,
+ };
+
+ ether_addr_copy(val.mac_address, mac_addr);
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION,
+ &val, sizeof(val));
+}
+
+int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif, int idx, u8 allowed_frames)
+{
+ struct hif_mib_uc_mc_bc_data_frame_condition val = {
+ .condition_idx = idx,
+ .allowed_frames = allowed_frames,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION,
+ &val, sizeof(val));
+}
+
+int hif_set_config_data_filter(struct wfx_vif *wvif, bool enable, int idx,
+ int mac_filters, int frames_types_filters)
+{
+ struct hif_mib_config_data_filter val = {
+ .enable = enable,
+ .filter_idx = idx,
+ .mac_cond = mac_filters,
+ .uc_mc_bc_cond = frames_types_filters,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_CONFIG_DATA_FILTER, &val, sizeof(val));
+}
+
+int hif_set_data_filtering(struct wfx_vif *wvif, bool enable, bool invert)
+{
+ struct hif_mib_set_data_filtering val = {
+ .enable = enable,
+ .invert_matching = invert,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_DATA_FILTERING, &val, sizeof(val));
+}
+
+int hif_keep_alive_period(struct wfx_vif *wvif, int period)
+{
+ struct hif_mib_keep_alive_period arg = {
+ .keep_alive_period = cpu_to_le16(period),
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD,
+ &arg, sizeof(arg));
+};
+
+int hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr)
+{
+ struct hif_mib_arp_ip_addr_table arg = {
+ .condition_idx = idx,
+ .arp_enable = HIF_ARP_NS_FILTERING_DISABLE,
+ };
+
+ if (addr) {
+ // Caution: type of addr is __be32
+ memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address));
+ arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
+ }
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
+ &arg, sizeof(arg));
+}
+
+int hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable)
+{
+ struct hif_mib_gl_set_multi_msg arg = {
+ .enable_multi_tx_conf = enable,
+ };
+
+ return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG,
+ &arg, sizeof(arg));
+}
+
+int hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val)
+{
+ struct hif_mib_set_uapsd_information arg = { };
+
+ if (val & BIT(IEEE80211_AC_VO))
+ arg.trig_voice = 1;
+ if (val & BIT(IEEE80211_AC_VI))
+ arg.trig_video = 1;
+ if (val & BIT(IEEE80211_AC_BE))
+ arg.trig_be = 1;
+ if (val & BIT(IEEE80211_AC_BK))
+ arg.trig_bckgrnd = 1;
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_SET_UAPSD_INFORMATION,
+ &arg, sizeof(arg));
+}
+
+int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
+{
+ struct hif_mib_non_erp_protection arg = {
+ .use_cts_to_self = enable,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_NON_ERP_PROTECTION, &arg, sizeof(arg));
+}
+
+int hif_slot_time(struct wfx_vif *wvif, int val)
+{
+ struct hif_mib_slot_time arg = {
+ .slot_time = cpu_to_le32(val),
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME,
+ &arg, sizeof(arg));
+}
+
+int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
+{
+ struct hif_mib_wep_default_key_id arg = {
+ .wep_default_key_id = val,
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
+ &arg, sizeof(arg));
+}
+
+int hif_rts_threshold(struct wfx_vif *wvif, int val)
+{
+ struct hif_mib_dot11_rts_threshold arg = {
+ .threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF),
+ };
+
+ return hif_write_mib(wvif->wdev, wvif->id,
+ HIF_MIB_ID_DOT11_RTS_THRESHOLD, &arg, sizeof(arg));
+}
diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h
index 26b1406f9f6c..86683de7de7c 100644
--- a/drivers/staging/wfx/hif_tx_mib.h
+++ b/drivers/staging/wfx/hif_tx_mib.h
@@ -9,398 +9,48 @@
#ifndef WFX_HIF_TX_MIB_H
#define WFX_HIF_TX_MIB_H
-#include <linux/etherdevice.h>
-
-#include "wfx.h"
-#include "hif_tx.h"
-#include "hif_api_mib.h"
-
-static inline int hif_set_output_power(struct wfx_vif *wvif, int val)
-{
- struct hif_mib_current_tx_power_level arg = {
- .power_level = cpu_to_le32(val * 10),
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_CURRENT_TX_POWER_LEVEL,
- &arg, sizeof(arg));
-}
-
-static inline int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
- unsigned int dtim_interval,
- unsigned int listen_interval)
-{
- struct hif_mib_beacon_wake_up_period val = {
- .wakeup_period_min = dtim_interval,
- .receive_dtim = 0,
- .wakeup_period_max = cpu_to_le16(listen_interval),
- };
-
- if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
- return -EINVAL;
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_BEACON_WAKEUP_PERIOD,
- &val, sizeof(val));
-}
-
-static inline int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
- int rssi_thold, int rssi_hyst)
-{
- struct hif_mib_rcpi_rssi_threshold arg = {
- .rolling_average_count = 8,
- .detection = 1,
- };
-
- if (!rssi_thold && !rssi_hyst) {
- arg.upperthresh = 1;
- arg.lowerthresh = 1;
- } else {
- arg.upper_threshold = rssi_thold + rssi_hyst;
- arg.upper_threshold = (arg.upper_threshold + 110) * 2;
- arg.lower_threshold = rssi_thold;
- arg.lower_threshold = (arg.lower_threshold + 110) * 2;
- }
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_RCPI_RSSI_THRESHOLD, &arg, sizeof(arg));
-}
-
-static inline int hif_get_counters_table(struct wfx_dev *wdev,
- struct hif_mib_extended_count_table *arg)
-{
- if (wfx_api_older_than(wdev, 1, 3)) {
- // extended_count_table is wider than count_table
- memset(arg, 0xFF, sizeof(*arg));
- return hif_read_mib(wdev, 0, HIF_MIB_ID_COUNTERS_TABLE,
- arg, sizeof(struct hif_mib_count_table));
- } else {
- return hif_read_mib(wdev, 0,
- HIF_MIB_ID_EXTENDED_COUNTERS_TABLE, arg,
- sizeof(struct hif_mib_extended_count_table));
- }
-}
-
-static inline int hif_set_macaddr(struct wfx_vif *wvif, u8 *mac)
-{
- struct hif_mib_mac_address msg = { };
-
- if (mac)
- ether_addr_copy(msg.mac_addr, mac);
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS,
- &msg, sizeof(msg));
-}
-
-static inline int hif_set_rx_filter(struct wfx_vif *wvif, bool filter_bssid,
- bool fwd_probe_req)
-{
- struct hif_mib_rx_filter val = { };
-
- if (filter_bssid)
- val.bssid_filter = 1;
- if (fwd_probe_req)
- val.fwd_probe_req = 1;
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_RX_FILTER,
- &val, sizeof(val));
-}
-
-static inline int hif_set_beacon_filter_table(struct wfx_vif *wvif,
- int tbl_len,
- struct hif_ie_table_entry *tbl)
-{
- int ret;
- struct hif_mib_bcn_filter_table *val;
- int buf_len = struct_size(val, ie_table, tbl_len);
-
- val = kzalloc(buf_len, GFP_KERNEL);
- if (!val)
- return -ENOMEM;
- val->num_of_info_elmts = cpu_to_le32(tbl_len);
- memcpy(val->ie_table, tbl, tbl_len * sizeof(*tbl));
- ret = hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_BEACON_FILTER_TABLE, val, buf_len);
- kfree(val);
- return ret;
-}
-
-static inline int hif_beacon_filter_control(struct wfx_vif *wvif,
- int enable, int beacon_count)
-{
- struct hif_mib_bcn_filter_enable arg = {
- .enable = cpu_to_le32(enable),
- .bcn_count = cpu_to_le32(beacon_count),
- };
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_BEACON_FILTER_ENABLE,
- &arg, sizeof(arg));
-}
-
-static inline int hif_set_operational_mode(struct wfx_dev *wdev,
- enum hif_op_power_mode mode)
-{
- struct hif_mib_gl_operational_power_mode val = {
- .power_mode = mode,
- .wup_ind_activation = 1,
- };
-
- return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_OPERATIONAL_POWER_MODE,
- &val, sizeof(val));
-}
-
-static inline int hif_set_template_frame(struct wfx_vif *wvif,
- struct sk_buff *skb,
- u8 frame_type, int init_rate)
-{
- struct hif_mib_template_frame *arg;
-
- skb_push(skb, 4);
- arg = (struct hif_mib_template_frame *)skb->data;
- skb_pull(skb, 4);
- arg->init_rate = init_rate;
- arg->frame_type = frame_type;
- arg->frame_length = cpu_to_le16(skb->len);
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_TEMPLATE_FRAME,
- arg, sizeof(*arg));
-}
-
-static inline int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required)
-{
- struct hif_mib_protected_mgmt_policy val = { };
-
- WARN(required && !capable, "incoherent arguments");
- if (capable) {
- val.pmf_enable = 1;
- val.host_enc_auth_frames = 1;
- }
- if (!required)
- val.unpmf_allowed = 1;
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_PROTECTED_MGMT_POLICY,
- &val, sizeof(val));
-}
-
-static inline int hif_set_block_ack_policy(struct wfx_vif *wvif,
- u8 tx_tid_policy, u8 rx_tid_policy)
-{
- struct hif_mib_block_ack_policy val = {
- .block_ack_tx_tid_policy = tx_tid_policy,
- .block_ack_rx_tid_policy = rx_tid_policy,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_BLOCK_ACK_POLICY,
- &val, sizeof(val));
-}
-
-static inline int hif_set_association_mode(struct wfx_vif *wvif,
- struct ieee80211_bss_conf *info)
-{
- int basic_rates = wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates);
- struct ieee80211_sta *sta = NULL;
- struct hif_mib_set_association_mode val = {
- .preambtype_use = 1,
- .mode = 1,
- .rateset = 1,
- .spacing = 1,
- .short_preamble = info->use_short_preamble,
- .basic_rate_set = cpu_to_le32(basic_rates)
- };
-
- rcu_read_lock(); // protect sta
- if (info->bssid && !info->ibss_joined)
- sta = ieee80211_find_sta(wvif->vif, info->bssid);
-
- // FIXME: it is strange to not retrieve all information from bss_info
- if (sta && sta->ht_cap.ht_supported) {
- val.mpdu_start_spacing = sta->ht_cap.ampdu_density;
- if (!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
- val.greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
- }
- rcu_read_unlock();
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_ASSOCIATION_MODE, &val, sizeof(val));
-}
-
-static inline int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
- int policy_index, uint8_t *rates)
-{
- struct hif_mib_set_tx_rate_retry_policy *arg;
- size_t size = struct_size(arg, tx_rate_retry_policy, 1);
- int ret;
-
- arg = kzalloc(size, GFP_KERNEL);
- arg->num_tx_rate_policies = 1;
- arg->tx_rate_retry_policy[0].policy_index = policy_index;
- arg->tx_rate_retry_policy[0].short_retry_count = 255;
- arg->tx_rate_retry_policy[0].long_retry_count = 255;
- arg->tx_rate_retry_policy[0].first_rate_sel = 1;
- arg->tx_rate_retry_policy[0].terminate = 1;
- arg->tx_rate_retry_policy[0].count_init = 1;
- memcpy(&arg->tx_rate_retry_policy[0].rates, rates,
- sizeof(arg->tx_rate_retry_policy[0].rates));
- ret = hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg, size);
- kfree(arg);
- return ret;
-}
-
-static inline int hif_set_mac_addr_condition(struct wfx_vif *wvif,
- int idx, const u8 *mac_addr)
-{
- struct hif_mib_mac_addr_data_frame_condition val = {
- .condition_idx = idx,
- .address_type = HIF_MAC_ADDR_A1,
- };
-
- ether_addr_copy(val.mac_address, mac_addr);
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_MAC_ADDR_DATAFRAME_CONDITION,
- &val, sizeof(val));
-}
-
-static inline int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif,
- int idx, u8 allowed_frames)
-{
- struct hif_mib_uc_mc_bc_data_frame_condition val = {
- .condition_idx = idx,
- .allowed_frames = allowed_frames,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_UC_MC_BC_DATAFRAME_CONDITION,
- &val, sizeof(val));
-}
-
-static inline int hif_set_config_data_filter(struct wfx_vif *wvif, bool enable,
- int idx, int mac_filters,
- int frames_types_filters)
-{
- struct hif_mib_config_data_filter val = {
- .enable = enable,
- .filter_idx = idx,
- .mac_cond = mac_filters,
- .uc_mc_bc_cond = frames_types_filters,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_CONFIG_DATA_FILTER, &val, sizeof(val));
-}
-
-static inline int hif_set_data_filtering(struct wfx_vif *wvif,
- bool enable, bool invert)
-{
- struct hif_mib_set_data_filtering val = {
- .enable = enable,
- .invert_matching = invert,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_DATA_FILTERING, &val, sizeof(val));
-}
-
-static inline int hif_keep_alive_period(struct wfx_vif *wvif, int period)
-{
- struct hif_mib_keep_alive_period arg = {
- .keep_alive_period = cpu_to_le16(period),
- };
-
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_KEEP_ALIVE_PERIOD,
- &arg, sizeof(arg));
-};
-
-static inline int hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx,
- __be32 *addr)
-{
- struct hif_mib_arp_ip_addr_table arg = {
- .condition_idx = idx,
- .arp_enable = HIF_ARP_NS_FILTERING_DISABLE,
- };
-
- if (addr) {
- // Caution: type of addr is __be32
- memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address));
- arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
- }
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_ARP_IP_ADDRESSES_TABLE,
- &arg, sizeof(arg));
-}
-
-static inline int hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable)
-{
- struct hif_mib_gl_set_multi_msg arg = {
- .enable_multi_tx_conf = enable,
- };
-
- return hif_write_mib(wdev, -1, HIF_MIB_ID_GL_SET_MULTI_MSG,
- &arg, sizeof(arg));
-}
-
-static inline int hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val)
-{
- struct hif_mib_set_uapsd_information arg = { };
-
- if (val & BIT(IEEE80211_AC_VO))
- arg.trig_voice = 1;
- if (val & BIT(IEEE80211_AC_VI))
- arg.trig_video = 1;
- if (val & BIT(IEEE80211_AC_BE))
- arg.trig_be = 1;
- if (val & BIT(IEEE80211_AC_BK))
- arg.trig_bckgrnd = 1;
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_SET_UAPSD_INFORMATION,
- &arg, sizeof(arg));
-}
-
-static inline int hif_erp_use_protection(struct wfx_vif *wvif, bool enable)
-{
- struct hif_mib_non_erp_protection arg = {
- .use_cts_to_self = enable,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_NON_ERP_PROTECTION, &arg, sizeof(arg));
-}
-
-static inline int hif_slot_time(struct wfx_vif *wvif, int val)
-{
- struct hif_mib_slot_time arg = {
- .slot_time = cpu_to_le32(val),
- };
-
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SLOT_TIME,
- &arg, sizeof(arg));
-}
-
-static inline int hif_dual_cts_protection(struct wfx_vif *wvif, bool enable)
-{
- struct hif_mib_set_ht_protection arg = {
- .dual_cts_prot = enable,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_SET_HT_PROTECTION,
- &arg, sizeof(arg));
-}
-
-static inline int hif_wep_default_key_id(struct wfx_vif *wvif, int val)
-{
- struct hif_mib_wep_default_key_id arg = {
- .wep_default_key_id = val,
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
- &arg, sizeof(arg));
-}
-
-static inline int hif_rts_threshold(struct wfx_vif *wvif, int val)
-{
- struct hif_mib_dot11_rts_threshold arg = {
- .threshold = cpu_to_le32(val >= 0 ? val : 0xFFFF),
- };
-
- return hif_write_mib(wvif->wdev, wvif->id,
- HIF_MIB_ID_DOT11_RTS_THRESHOLD, &arg, sizeof(arg));
-}
+struct wfx_vif;
+struct sk_buff;
+
+int hif_set_output_power(struct wfx_vif *wvif, int val);
+int hif_set_beacon_wakeup_period(struct wfx_vif *wvif,
+ unsigned int dtim_interval,
+ unsigned int listen_interval);
+int hif_set_rcpi_rssi_threshold(struct wfx_vif *wvif,
+ int rssi_thold, int rssi_hyst);
+int hif_get_counters_table(struct wfx_dev *wdev, int vif_id,
+ struct hif_mib_extended_count_table *arg);
+int hif_set_macaddr(struct wfx_vif *wvif, u8 *mac);
+int hif_set_rx_filter(struct wfx_vif *wvif,
+ bool filter_bssid, bool fwd_probe_req);
+int hif_set_beacon_filter_table(struct wfx_vif *wvif, int tbl_len,
+ const struct hif_ie_table_entry *tbl);
+int hif_beacon_filter_control(struct wfx_vif *wvif,
+ int enable, int beacon_count);
+int hif_set_operational_mode(struct wfx_dev *wdev, enum hif_op_power_mode mode);
+int hif_set_template_frame(struct wfx_vif *wvif, struct sk_buff *skb,
+ u8 frame_type, int init_rate);
+int hif_set_mfp(struct wfx_vif *wvif, bool capable, bool required);
+int hif_set_block_ack_policy(struct wfx_vif *wvif,
+ u8 tx_tid_policy, u8 rx_tid_policy);
+int hif_set_association_mode(struct wfx_vif *wvif,
+ struct ieee80211_bss_conf *info);
+int hif_set_tx_rate_retry_policy(struct wfx_vif *wvif,
+ int policy_index, u8 *rates);
+int hif_set_mac_addr_condition(struct wfx_vif *wvif,
+ int idx, const u8 *mac_addr);
+int hif_set_uc_mc_bc_condition(struct wfx_vif *wvif,
+ int idx, u8 allowed_frames);
+int hif_set_config_data_filter(struct wfx_vif *wvif, bool enable, int idx,
+ int mac_filters, int frames_types_filters);
+int hif_set_data_filtering(struct wfx_vif *wvif, bool enable, bool invert);
+int hif_keep_alive_period(struct wfx_vif *wvif, int period);
+int hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr);
+int hif_use_multi_tx_conf(struct wfx_dev *wdev, bool enable);
+int hif_set_uapsd_info(struct wfx_vif *wvif, unsigned long val);
+int hif_erp_use_protection(struct wfx_vif *wvif, bool enable);
+int hif_slot_time(struct wfx_vif *wvif, int val);
+int hif_wep_default_key_id(struct wfx_vif *wvif, int val);
+int hif_rts_threshold(struct wfx_vif *wvif, int val);
#endif
diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c
index d3a141d95a0e..777217cdf9a7 100644
--- a/drivers/staging/wfx/hwio.c
+++ b/drivers/staging/wfx/hwio.c
@@ -106,8 +106,8 @@ err:
return ret;
}
-static int indirect_read(struct wfx_dev *wdev, int reg, u32 addr, void *buf,
- size_t len)
+static int indirect_read(struct wfx_dev *wdev, int reg, u32 addr,
+ void *buf, size_t len)
{
int ret;
int i;
@@ -195,8 +195,8 @@ static int indirect_write_locked(struct wfx_dev *wdev, int reg, u32 addr,
return ret;
}
-static int indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr,
- u32 *val)
+static int indirect_read32_locked(struct wfx_dev *wdev, int reg,
+ u32 addr, u32 *val)
{
int ret;
__le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
@@ -205,15 +205,15 @@ static int indirect_read32_locked(struct wfx_dev *wdev, int reg, u32 addr,
return -ENOMEM;
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = indirect_read(wdev, reg, addr, tmp, sizeof(u32));
- *val = cpu_to_le32(*tmp);
+ *val = le32_to_cpu(*tmp);
_trace_io_ind_read32(reg, addr, *val);
wdev->hwbus_ops->unlock(wdev->hwbus_priv);
kfree(tmp);
return ret;
}
-static int indirect_write32_locked(struct wfx_dev *wdev, int reg, u32 addr,
- u32 val)
+static int indirect_write32_locked(struct wfx_dev *wdev, int reg,
+ u32 addr, u32 val)
{
int ret;
__le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
@@ -233,7 +233,7 @@ int wfx_data_read(struct wfx_dev *wdev, void *buf, size_t len)
{
int ret;
- WARN((long) buf & 3, "%s: unaligned buffer", __func__);
+ WARN((long)buf & 3, "%s: unaligned buffer", __func__);
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv,
WFX_REG_IN_OUT_QUEUE, buf, len);
@@ -249,7 +249,7 @@ int wfx_data_write(struct wfx_dev *wdev, const void *buf, size_t len)
{
int ret;
- WARN((long) buf & 3, "%s: unaligned buffer", __func__);
+ WARN((long)buf & 3, "%s: unaligned buffer", __func__);
wdev->hwbus_ops->lock(wdev->hwbus_priv);
ret = wdev->hwbus_ops->copy_to_io(wdev->hwbus_priv,
WFX_REG_IN_OUT_QUEUE, buf, len);
diff --git a/drivers/staging/wfx/key.c b/drivers/staging/wfx/key.c
index 96adfa330604..5ee2ffc5f935 100644
--- a/drivers/staging/wfx/key.c
+++ b/drivers/staging/wfx/key.c
@@ -5,6 +5,7 @@
* Copyright (c) 2017-2019, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
+#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "key.h"
@@ -20,14 +21,12 @@ static int wfx_alloc_key(struct wfx_dev *wdev)
return -1;
wdev->key_map |= BIT(idx);
- wdev->keys[idx].entry_index = idx;
return idx;
}
static void wfx_free_key(struct wfx_dev *wdev, int idx)
{
WARN(!(wdev->key_map & BIT(idx)), "inconsistent key allocation");
- memset(&wdev->keys[idx], 0, sizeof(wdev->keys[idx]));
wdev->key_map &= ~BIT(idx);
}
@@ -159,7 +158,7 @@ static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
int ret;
- struct hif_req_add_key *k;
+ struct hif_req_add_key k = { };
struct ieee80211_key_seq seq;
struct wfx_dev *wdev = wvif->wdev;
int idx = wfx_alloc_key(wvif->wdev);
@@ -169,44 +168,44 @@ static int wfx_add_key(struct wfx_vif *wvif, struct ieee80211_sta *sta,
ieee80211_get_key_rx_seq(key, 0, &seq);
if (idx < 0)
return -EINVAL;
- k = &wdev->keys[idx];
- k->int_id = wvif->id;
+ k.int_id = wvif->id;
+ k.entry_index = idx;
if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104) {
if (pairwise)
- k->type = fill_wep_pair(&k->key.wep_pairwise_key, key,
- sta->addr);
+ k.type = fill_wep_pair(&k.key.wep_pairwise_key, key,
+ sta->addr);
else
- k->type = fill_wep_group(&k->key.wep_group_key, key);
+ k.type = fill_wep_group(&k.key.wep_group_key, key);
} else if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
if (pairwise)
- k->type = fill_tkip_pair(&k->key.tkip_pairwise_key, key,
- sta->addr);
+ k.type = fill_tkip_pair(&k.key.tkip_pairwise_key, key,
+ sta->addr);
else
- k->type = fill_tkip_group(&k->key.tkip_group_key, key,
- &seq, wvif->vif->type);
+ k.type = fill_tkip_group(&k.key.tkip_group_key, key,
+ &seq, wvif->vif->type);
} else if (key->cipher == WLAN_CIPHER_SUITE_CCMP) {
if (pairwise)
- k->type = fill_ccmp_pair(&k->key.aes_pairwise_key, key,
- sta->addr);
+ k.type = fill_ccmp_pair(&k.key.aes_pairwise_key, key,
+ sta->addr);
else
- k->type = fill_ccmp_group(&k->key.aes_group_key, key,
- &seq);
+ k.type = fill_ccmp_group(&k.key.aes_group_key, key,
+ &seq);
} else if (key->cipher == WLAN_CIPHER_SUITE_SMS4) {
if (pairwise)
- k->type = fill_sms4_pair(&k->key.wapi_pairwise_key, key,
- sta->addr);
+ k.type = fill_sms4_pair(&k.key.wapi_pairwise_key, key,
+ sta->addr);
else
- k->type = fill_sms4_group(&k->key.wapi_group_key, key);
+ k.type = fill_sms4_group(&k.key.wapi_group_key, key);
} else if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
- k->type = fill_aes_cmac_group(&k->key.igtk_group_key, key,
- &seq);
+ k.type = fill_aes_cmac_group(&k.key.igtk_group_key, key,
+ &seq);
} else {
dev_warn(wdev->dev, "unsupported key type %d\n", key->cipher);
wfx_free_key(wdev, idx);
return -EOPNOTSUPP;
}
- ret = hif_add_key(wdev, k);
+ ret = hif_add_key(wdev, &k);
if (ret) {
wfx_free_key(wdev, idx);
return -EOPNOTSUPP;
@@ -229,7 +228,7 @@ int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
int ret = -EOPNOTSUPP;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
mutex_lock(&wvif->wdev->conf_mutex);
if (cmd == SET_KEY)
@@ -240,29 +239,3 @@ int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
-int wfx_upload_keys(struct wfx_vif *wvif)
-{
- int i;
- struct hif_req_add_key *key;
- struct wfx_dev *wdev = wvif->wdev;
-
- for (i = 0; i < ARRAY_SIZE(wdev->keys); i++) {
- if (wdev->key_map & BIT(i)) {
- key = &wdev->keys[i];
- if (key->int_id == wvif->id)
- hif_add_key(wdev, key);
- }
- }
- return 0;
-}
-
-void wfx_wep_key_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif, wep_key_work);
-
- wfx_tx_flush(wvif->wdev);
- hif_wep_default_key_id(wvif, wvif->wep_default_key_id);
- wfx_pending_requeue(wvif->wdev, wvif->wep_pending_skb);
- wvif->wep_pending_skb = NULL;
- wfx_tx_unlock(wvif->wdev);
-}
diff --git a/drivers/staging/wfx/key.h b/drivers/staging/wfx/key.h
index 9436ccdf4d3b..ff31fc9c565a 100644
--- a/drivers/staging/wfx/key.h
+++ b/drivers/staging/wfx/key.h
@@ -16,7 +16,5 @@ struct wfx_vif;
int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
-int wfx_upload_keys(struct wfx_vif *wvif);
-void wfx_wep_key_work(struct work_struct *work);
#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index 3c4c240229ad..6bd96f476388 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -28,6 +28,7 @@
#include "bh.h"
#include "sta.h"
#include "key.h"
+#include "scan.h"
#include "debug.h"
#include "data_tx.h"
#include "secure_link.h"
@@ -106,7 +107,7 @@ static const struct ieee80211_supported_band wfx_band_2ghz = {
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
.mcs = {
.rx_mask = { 0xFF }, // MCS0 to MCS7
- .rx_highest = 65,
+ .rx_highest = cpu_to_le16(72),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
},
@@ -133,15 +134,19 @@ static const struct ieee80211_ops wfx_ops = {
.remove_interface = wfx_remove_interface,
.config = wfx_config,
.tx = wfx_tx,
+ .join_ibss = wfx_join_ibss,
+ .leave_ibss = wfx_leave_ibss,
.conf_tx = wfx_conf_tx,
.hw_scan = wfx_hw_scan,
.cancel_hw_scan = wfx_cancel_hw_scan,
+ .start_ap = wfx_start_ap,
+ .stop_ap = wfx_stop_ap,
.sta_add = wfx_sta_add,
.sta_remove = wfx_sta_remove,
- .sta_notify = wfx_sta_notify,
.set_tim = wfx_set_tim,
.set_key = wfx_set_key,
.set_rts_threshold = wfx_set_rts_threshold,
+ .set_default_unicast_key = wfx_set_default_unicast_key,
.bss_info_changed = wfx_bss_info_changed,
.prepare_multicast = wfx_prepare_multicast,
.configure_filter = wfx_configure_filter,
@@ -165,8 +170,8 @@ bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
return false;
}
-struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
- const char *label)
+struct gpio_desc *wfx_get_gpio(struct device *dev,
+ int override, const char *label)
{
struct gpio_desc *ret;
char label_buf[256];
@@ -187,18 +192,18 @@ struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
if (!ret || PTR_ERR(ret) == -ENOENT)
dev_warn(dev, "gpio %s is not defined\n", label);
else
- dev_warn(dev,
- "error while requesting gpio %s\n", label);
+ dev_warn(dev, "error while requesting gpio %s\n",
+ label);
ret = NULL;
} else {
- dev_dbg(dev,
- "using gpio %d for %s\n", desc_to_gpio(ret), label);
+ dev_dbg(dev, "using gpio %d for %s\n",
+ desc_to_gpio(ret), label);
}
return ret;
}
/* NOTE: wfx_send_pds() destroy buf */
-int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len)
+int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
{
int ret;
int start, brace_level, i;
@@ -224,16 +229,19 @@ int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len)
buf[i] = '}';
ret = hif_configuration(wdev, buf + start,
i - start + 1);
- if (ret == HIF_STATUS_FAILURE) {
- dev_err(wdev->dev, "PDS bytes %d to %d: invalid data (unsupported options?)\n", start, i);
+ if (ret > 0) {
+ dev_err(wdev->dev, "PDS bytes %d to %d: invalid data (unsupported options?)\n",
+ start, i);
return -EINVAL;
}
if (ret == -ETIMEDOUT) {
- dev_err(wdev->dev, "PDS bytes %d to %d: chip didn't reply (corrupted file?)\n", start, i);
+ dev_err(wdev->dev, "PDS bytes %d to %d: chip didn't reply (corrupted file?)\n",
+ start, i);
return ret;
}
if (ret) {
- dev_err(wdev->dev, "PDS bytes %d to %d: chip returned an unknown error\n", start, i);
+ dev_err(wdev->dev, "PDS bytes %d to %d: chip returned an unknown error\n",
+ start, i);
return -EIO;
}
buf[i] = ',';
@@ -247,7 +255,7 @@ static int wfx_send_pdata_pds(struct wfx_dev *wdev)
{
int ret = 0;
const struct firmware *pds;
- unsigned char *tmp_buf;
+ u8 *tmp_buf;
ret = request_firmware(&pds, wdev->pdata.file_pds, wdev->dev);
if (ret) {
@@ -266,9 +274,9 @@ static void wfx_free_common(void *data)
{
struct wfx_dev *wdev = data;
+ mutex_destroy(&wdev->tx_power_loop_info_lock);
mutex_destroy(&wdev->rx_stats_lock);
mutex_destroy(&wdev->conf_mutex);
- wfx_tx_queues_deinit(wdev);
ieee80211_free_hw(wdev->hw);
}
@@ -286,7 +294,6 @@ struct wfx_dev *wfx_init_common(struct device *dev,
SET_IEEE80211_DEV(hw, dev);
- ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
@@ -315,7 +322,7 @@ struct wfx_dev *wfx_init_common(struct device *dev,
hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- hw->wiphy->max_ap_assoc_sta = WFX_MAX_STA_IN_AP_MODE;
+ hw->wiphy->max_ap_assoc_sta = HIF_LINK_ID_MAX;
hw->wiphy->max_scan_ssids = 2;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations);
@@ -338,7 +345,10 @@ struct wfx_dev *wfx_init_common(struct device *dev,
mutex_init(&wdev->conf_mutex);
mutex_init(&wdev->rx_stats_lock);
+ mutex_init(&wdev->tx_power_loop_info_lock);
init_completion(&wdev->firmware_ready);
+ INIT_DELAYED_WORK(&wdev->cooling_timeout_work,
+ wfx_cooling_timeout_work);
wfx_init_hif_cmd(&wdev->hif_cmd);
wfx_tx_queues_init(wdev);
@@ -359,23 +369,24 @@ int wfx_probe(struct wfx_dev *wdev)
// prevent bh() to touch it.
gpio_saved = wdev->pdata.gpio_wakeup;
wdev->pdata.gpio_wakeup = NULL;
+ wdev->poll_irq = true;
wfx_bh_register(wdev);
err = wfx_init_device(wdev);
if (err)
- goto err1;
+ goto err0;
- err = wait_for_completion_interruptible_timeout(&wdev->firmware_ready,
- 10 * HZ);
+ wfx_bh_poll_irq(wdev);
+ err = wait_for_completion_timeout(&wdev->firmware_ready, 1 * HZ);
if (err <= 0) {
if (err == 0) {
- dev_err(wdev->dev, "timeout while waiting for startup indication. IRQ configuration error?\n");
+ dev_err(wdev->dev, "timeout while waiting for startup indication\n");
err = -ETIMEDOUT;
} else if (err == -ERESTARTSYS) {
dev_info(wdev->dev, "probe interrupted by user\n");
}
- goto err1;
+ goto err0;
}
// FIXME: fill wiphy::hw_version
@@ -384,7 +395,7 @@ int wfx_probe(struct wfx_dev *wdev)
wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label,
wdev->hw_caps.api_version_major,
wdev->hw_caps.api_version_minor,
- wdev->keyset, *((u32 *) &wdev->hw_caps.capabilities));
+ wdev->keyset, *((u32 *)&wdev->hw_caps.capabilities));
snprintf(wdev->hw->wiphy->fw_version,
sizeof(wdev->hw->wiphy->fw_version),
"%d.%d.%d",
@@ -397,14 +408,14 @@ int wfx_probe(struct wfx_dev *wdev)
"unsupported firmware API version (expect 1 while firmware returns %d)\n",
wdev->hw_caps.api_version_major);
err = -ENOTSUPP;
- goto err1;
+ goto err0;
}
err = wfx_sl_init(wdev);
if (err && wdev->hw_caps.capabilities.link_mode == SEC_LINK_ENFORCED) {
dev_err(wdev->dev,
"chip require secure_link, but can't negociate it\n");
- goto err1;
+ goto err0;
}
if (wdev->hw_caps.regul_sel_mode_info.region_sel_mode) {
@@ -417,7 +428,16 @@ int wfx_probe(struct wfx_dev *wdev)
wdev->pdata.file_pds);
err = wfx_send_pdata_pds(wdev);
if (err < 0)
- goto err1;
+ goto err0;
+
+ wdev->poll_irq = false;
+ err = wdev->hwbus_ops->irq_subscribe(wdev->hwbus_priv);
+ if (err)
+ goto err0;
+
+ err = hif_use_multi_tx_conf(wdev, true);
+ if (err)
+ dev_err(wdev->dev, "misconfigured IRQ?\n");
wdev->pdata.gpio_wakeup = gpio_saved;
if (wdev->pdata.gpio_wakeup) {
@@ -432,8 +452,6 @@ int wfx_probe(struct wfx_dev *wdev)
hif_set_operational_mode(wdev, HIF_OP_POWER_MODE_DOZE);
}
- hif_use_multi_tx_conf(wdev, true);
-
for (i = 0; i < ARRAY_SIZE(wdev->addresses); i++) {
eth_zero_addr(wdev->addresses[i].addr);
macaddr = of_get_mac_address(wdev->dev->of_node);
@@ -466,8 +484,9 @@ int wfx_probe(struct wfx_dev *wdev)
err2:
ieee80211_unregister_hw(wdev->hw);
- ieee80211_free_hw(wdev->hw);
err1:
+ wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv);
+err0:
wfx_bh_unregister(wdev);
return err;
}
@@ -476,6 +495,7 @@ void wfx_release(struct wfx_dev *wdev)
{
ieee80211_unregister_hw(wdev->hw);
hif_shutdown(wdev);
+ wdev->hwbus_ops->irq_unsubscribe(wdev->hwbus_priv);
wfx_bh_unregister(wdev);
wfx_sl_deinit(wdev);
}
diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h
index 9c9410072def..f832ce409fda 100644
--- a/drivers/staging/wfx/main.h
+++ b/drivers/staging/wfx/main.h
@@ -13,10 +13,10 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
-#include "bus.h"
#include "hif_api_general.h"
struct wfx_dev;
+struct hwbus_ops;
struct wfx_platform_data {
/* Keyset and ".sec" extention will appended to this string */
@@ -41,6 +41,6 @@ void wfx_release(struct wfx_dev *wdev);
struct gpio_desc *wfx_get_gpio(struct device *dev, int override,
const char *label);
bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor);
-int wfx_send_pds(struct wfx_dev *wdev, unsigned char *buf, size_t len);
+int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len);
#endif
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
index 39d9127ce4b9..3248ecefda56 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/staging/wfx/queue.c
@@ -35,6 +35,7 @@ void wfx_tx_flush(struct wfx_dev *wdev)
if (wdev->chip_frozen)
return;
+ wfx_tx_lock(wdev);
mutex_lock(&wdev->hif_cmd.lock);
ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
!wdev->hif.tx_buffers_used,
@@ -44,9 +45,10 @@ void wfx_tx_flush(struct wfx_dev *wdev)
wdev->hif.tx_buffers_used);
wfx_pending_dump_old_frames(wdev, 3000);
// FIXME: drop pending frames here
- wdev->chip_frozen = 1;
+ wdev->chip_frozen = true;
}
mutex_unlock(&wdev->hif_cmd.lock);
+ wfx_tx_unlock(wdev);
}
void wfx_tx_lock_flush(struct wfx_dev *wdev)
@@ -55,261 +57,142 @@ void wfx_tx_lock_flush(struct wfx_dev *wdev)
wfx_tx_flush(wdev);
}
-void wfx_tx_queues_lock(struct wfx_dev *wdev)
+void wfx_tx_queues_init(struct wfx_dev *wdev)
{
int i;
- struct wfx_queue *queue;
+ skb_queue_head_init(&wdev->tx_pending);
+ init_waitqueue_head(&wdev->tx_dequeue);
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- queue = &wdev->tx_queue[i];
- spin_lock_bh(&queue->queue.lock);
- if (queue->tx_locked_cnt++ == 0)
- ieee80211_stop_queue(wdev->hw, queue->queue_id);
- spin_unlock_bh(&queue->queue.lock);
+ skb_queue_head_init(&wdev->tx_queue[i].normal);
+ skb_queue_head_init(&wdev->tx_queue[i].cab);
}
}
-void wfx_tx_queues_unlock(struct wfx_dev *wdev)
+void wfx_tx_queues_check_empty(struct wfx_dev *wdev)
{
int i;
- struct wfx_queue *queue;
+ WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending));
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- queue = &wdev->tx_queue[i];
- spin_lock_bh(&queue->queue.lock);
- WARN(!queue->tx_locked_cnt, "queue already unlocked");
- if (--queue->tx_locked_cnt == 0)
- ieee80211_wake_queue(wdev->hw, queue->queue_id);
- spin_unlock_bh(&queue->queue.lock);
+ WARN_ON(atomic_read(&wdev->tx_queue[i].pending_frames));
+ WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].normal));
+ WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].cab));
}
}
-/* If successful, LOCKS the TX queue! */
-void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
+static bool __wfx_tx_queue_empty(struct wfx_dev *wdev,
+ struct sk_buff_head *skb_queue, int vif_id)
{
- int i;
- bool done;
- struct wfx_queue *queue;
- struct sk_buff *item;
- struct wfx_dev *wdev = wvif->wdev;
- struct hif_msg *hif;
-
- if (wvif->wdev->chip_frozen) {
- wfx_tx_lock_flush(wdev);
- wfx_tx_queues_clear(wdev);
- return;
- }
+ struct hif_msg *hif_msg;
+ struct sk_buff *skb;
- do {
- done = true;
- wfx_tx_lock_flush(wdev);
- for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
- queue = &wdev->tx_queue[i];
- spin_lock_bh(&queue->queue.lock);
- skb_queue_walk(&queue->queue, item) {
- hif = (struct hif_msg *) item->data;
- if (hif->interface == wvif->id)
- done = false;
- }
- spin_unlock_bh(&queue->queue.lock);
- }
- if (!done) {
- wfx_tx_unlock(wdev);
- msleep(20);
+ spin_lock_bh(&skb_queue->lock);
+ skb_queue_walk(skb_queue, skb) {
+ hif_msg = (struct hif_msg *)skb->data;
+ if (vif_id < 0 || hif_msg->interface == vif_id) {
+ spin_unlock_bh(&skb_queue->lock);
+ return false;
}
- } while (!done);
-}
-
-static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
- struct sk_buff_head *gc_list)
-{
- int i;
- struct sk_buff *item;
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
-
- spin_lock_bh(&queue->queue.lock);
- while ((item = __skb_dequeue(&queue->queue)) != NULL)
- skb_queue_head(gc_list, item);
- spin_lock_nested(&stats->pending.lock, 1);
- for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
- stats->link_map_cache[i] -= queue->link_map_cache[i];
- queue->link_map_cache[i] = 0;
- }
- spin_unlock(&stats->pending.lock);
- spin_unlock_bh(&queue->queue.lock);
-}
-
-void wfx_tx_queues_clear(struct wfx_dev *wdev)
-{
- int i;
- struct sk_buff *item;
- struct sk_buff_head gc_list;
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
-
- skb_queue_head_init(&gc_list);
- for (i = 0; i < IEEE80211_NUM_ACS; ++i)
- wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
- wake_up(&stats->wait_link_id_empty);
- while ((item = skb_dequeue(&gc_list)) != NULL)
- wfx_skb_dtor(wdev, item);
-}
-
-void wfx_tx_queues_init(struct wfx_dev *wdev)
-{
- int i;
-
- memset(&wdev->tx_queue_stats, 0, sizeof(wdev->tx_queue_stats));
- memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
- skb_queue_head_init(&wdev->tx_queue_stats.pending);
- init_waitqueue_head(&wdev->tx_queue_stats.wait_link_id_empty);
-
- for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- wdev->tx_queue[i].queue_id = i;
- skb_queue_head_init(&wdev->tx_queue[i].queue);
}
+ spin_unlock_bh(&skb_queue->lock);
+ return true;
}
-void wfx_tx_queues_deinit(struct wfx_dev *wdev)
+bool wfx_tx_queue_empty(struct wfx_dev *wdev,
+ struct wfx_queue *queue, int vif_id)
{
- WARN_ON(!skb_queue_empty(&wdev->tx_queue_stats.pending));
- wfx_tx_queues_clear(wdev);
+ return __wfx_tx_queue_empty(wdev, &queue->normal, vif_id) &&
+ __wfx_tx_queue_empty(wdev, &queue->cab, vif_id);
}
-int wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map)
+static void __wfx_tx_queue_drop(struct wfx_dev *wdev,
+ struct sk_buff_head *skb_queue, int vif_id,
+ struct sk_buff_head *dropped)
{
- int ret, i;
-
- if (!link_id_map)
- return 0;
-
- spin_lock_bh(&queue->queue.lock);
- if (link_id_map == (u32)-1) {
- ret = skb_queue_len(&queue->queue);
- } else {
- ret = 0;
- for (i = 0; i < ARRAY_SIZE(queue->link_map_cache); i++)
- if (link_id_map & BIT(i))
- ret += queue->link_map_cache[i];
+ struct sk_buff *skb, *tmp;
+ struct hif_msg *hif_msg;
+
+ spin_lock_bh(&skb_queue->lock);
+ skb_queue_walk_safe(skb_queue, skb, tmp) {
+ hif_msg = (struct hif_msg *)skb->data;
+ if (vif_id < 0 || hif_msg->interface == vif_id) {
+ __skb_unlink(skb, skb_queue);
+ skb_queue_head(dropped, skb);
+ }
}
- spin_unlock_bh(&queue->queue.lock);
- return ret;
+ spin_unlock_bh(&skb_queue->lock);
}
-void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
- struct sk_buff *skb)
+void wfx_tx_queue_drop(struct wfx_dev *wdev, struct wfx_queue *queue,
+ int vif_id, struct sk_buff_head *dropped)
{
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
- struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
-
- WARN(tx_priv->link_id >= ARRAY_SIZE(stats->link_map_cache), "invalid link-id value");
- spin_lock_bh(&queue->queue.lock);
- __skb_queue_tail(&queue->queue, skb);
-
- ++queue->link_map_cache[tx_priv->link_id];
-
- spin_lock_nested(&stats->pending.lock, 1);
- ++stats->link_map_cache[tx_priv->link_id];
- spin_unlock(&stats->pending.lock);
- spin_unlock_bh(&queue->queue.lock);
-}
-
-static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
- struct wfx_queue *queue,
- u32 link_id_map)
-{
- struct sk_buff *skb = NULL;
- struct sk_buff *item;
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
- struct wfx_tx_priv *tx_priv;
- bool wakeup_stats = false;
-
- spin_lock_bh(&queue->queue.lock);
- skb_queue_walk(&queue->queue, item) {
- tx_priv = wfx_skb_tx_priv(item);
- if (link_id_map & BIT(tx_priv->link_id)) {
- skb = item;
- break;
- }
- }
- if (skb) {
- tx_priv = wfx_skb_tx_priv(skb);
- tx_priv->xmit_timestamp = ktime_get();
- __skb_unlink(skb, &queue->queue);
- --queue->link_map_cache[tx_priv->link_id];
-
- spin_lock_nested(&stats->pending.lock, 1);
- __skb_queue_tail(&stats->pending, skb);
- if (!--stats->link_map_cache[tx_priv->link_id])
- wakeup_stats = true;
- spin_unlock(&stats->pending.lock);
- }
- spin_unlock_bh(&queue->queue.lock);
- if (wakeup_stats)
- wake_up(&stats->wait_link_id_empty);
- return skb;
+ __wfx_tx_queue_drop(wdev, &queue->cab, vif_id, dropped);
+ __wfx_tx_queue_drop(wdev, &queue->normal, vif_id, dropped);
+ wake_up(&wdev->tx_dequeue);
}
-int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
+void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb)
{
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
- struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- WARN_ON(skb_get_queue_mapping(skb) > 3);
- spin_lock_bh(&queue->queue.lock);
- ++queue->link_map_cache[tx_priv->link_id];
-
- spin_lock_nested(&stats->pending.lock, 1);
- ++stats->link_map_cache[tx_priv->link_id];
- __skb_unlink(skb, &stats->pending);
- spin_unlock(&stats->pending.lock);
- __skb_queue_tail(&queue->queue, skb);
- spin_unlock_bh(&queue->queue.lock);
- return 0;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ skb_queue_tail(&queue->cab, skb);
+ else
+ skb_queue_tail(&queue->normal, skb);
}
-int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
+void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
{
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
-
- spin_lock_bh(&stats->pending.lock);
- __skb_unlink(skb, &stats->pending);
- spin_unlock_bh(&stats->pending.lock);
- wfx_skb_dtor(wdev, skb);
+ struct wfx_queue *queue;
+ struct sk_buff *skb;
- return 0;
+ WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device",
+ __func__);
+ while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
+ queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ WARN_ON(!atomic_read(&queue->pending_frames));
+ atomic_dec(&queue->pending_frames);
+ skb_queue_head(dropped, skb);
+ }
}
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
{
- struct sk_buff *skb;
+ struct wfx_queue *queue;
struct hif_req_tx *req;
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
+ struct sk_buff *skb;
- spin_lock_bh(&stats->pending.lock);
- skb_queue_walk(&stats->pending, skb) {
+ spin_lock_bh(&wdev->tx_pending.lock);
+ skb_queue_walk(&wdev->tx_pending, skb) {
req = wfx_skb_txreq(skb);
if (req->packet_id == packet_id) {
- spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&wdev->tx_pending.lock);
+ queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ WARN_ON(!atomic_read(&queue->pending_frames));
+ atomic_dec(&queue->pending_frames);
+ skb_unlink(skb, &wdev->tx_pending);
return skb;
}
}
- spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&wdev->tx_pending.lock);
WARN(1, "cannot find packet in pending queue");
return NULL;
}
void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
{
- struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
ktime_t now = ktime_get();
struct wfx_tx_priv *tx_priv;
struct hif_req_tx *req;
struct sk_buff *skb;
bool first = true;
- spin_lock_bh(&stats->pending.lock);
- skb_queue_walk(&stats->pending, skb) {
+ spin_lock_bh(&wdev->tx_pending.lock);
+ skb_queue_walk(&wdev->tx_pending, skb) {
tx_priv = wfx_skb_tx_priv(skb);
req = wfx_skb_txreq(skb);
if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
@@ -324,7 +207,7 @@ void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
ktime_ms_delta(now, tx_priv->xmit_timestamp));
}
}
- spin_unlock_bh(&stats->pending.lock);
+ spin_unlock_bh(&wdev->tx_pending.lock);
}
unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
@@ -336,138 +219,66 @@ unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
return ktime_us_delta(now, tx_priv->xmit_timestamp);
}
-bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
+bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
{
+ struct wfx_dev *wdev = wvif->wdev;
int i;
- struct sk_buff_head *queue;
- bool ret = true;
-
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- queue = &wdev->tx_queue[i].queue;
- spin_lock_bh(&queue->lock);
- if (!skb_queue_empty(queue))
- ret = false;
- spin_unlock_bh(&queue->lock);
- }
- return ret;
-}
-
-static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
- struct wfx_queue *queue)
-{
- struct hif_req_tx *req = wfx_skb_txreq(skb);
- struct ieee80211_key_conf *hw_key = wfx_skb_tx_priv(skb)->hw_key;
- struct ieee80211_hdr *frame =
- (struct ieee80211_hdr *)(req->frame + req->data_flags.fc_offset);
-
- // FIXME: mac80211 is smart enough to handle BSS loss. Driver should not
- // try to do anything about that.
- if (ieee80211_is_nullfunc(frame->frame_control)) {
- mutex_lock(&wvif->bss_loss_lock);
- if (wvif->bss_loss_state) {
- wvif->bss_loss_confirm_id = req->packet_id;
- req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
- }
- mutex_unlock(&wvif->bss_loss_lock);
- }
- // FIXME: identify the exact scenario matched by this condition. Does it
- // happen yet?
- if (ieee80211_has_protected(frame->frame_control) &&
- hw_key && hw_key->keyidx != wvif->wep_default_key_id &&
- (hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
- wfx_tx_lock(wvif->wdev);
- WARN_ON(wvif->wep_pending_skb);
- wvif->wep_default_key_id = hw_key->keyidx;
- wvif->wep_pending_skb = skb;
- if (!schedule_work(&wvif->wep_key_work))
- wfx_tx_unlock(wvif->wdev);
- return true;
- } else {
+ if (wvif->vif->type != NL80211_IFTYPE_AP)
return false;
- }
-}
-
-static int wfx_get_prio_queue(struct wfx_vif *wvif,
- u32 tx_allowed_mask, int *total)
-{
- static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
- BIT(WFX_LINK_ID_UAPSD);
- const struct ieee80211_tx_queue_params *edca;
- unsigned int score, best = -1;
- int winner = -1;
- int i;
-
- /* search for a winner using edca params */
- for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- int queued;
-
- edca = &wvif->edca_params[i];
- queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
- tx_allowed_mask);
- if (!queued)
- continue;
- *total += queued;
- score = ((edca->aifs + edca->cw_min) << 16) +
- ((edca->cw_max - edca->cw_min) *
- (get_random_int() & 0xFFFF));
- if (score < best && (winner < 0 || i != 3)) {
- best = score;
- winner = i;
- }
- }
-
- /* override winner if bursting */
- if (winner >= 0 && wvif->wdev->tx_burst_idx >= 0 &&
- winner != wvif->wdev->tx_burst_idx &&
- !wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[winner],
- tx_allowed_mask & urgent) &&
- wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[wvif->wdev->tx_burst_idx], tx_allowed_mask))
- winner = wvif->wdev->tx_burst_idx;
-
- return winner;
-}
-
-static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
- struct wfx_queue **queue_p,
- u32 *tx_allowed_mask_p)
-{
- int idx;
- u32 tx_allowed_mask;
- int total = 0;
-
- /* Search for unicast traffic */
- tx_allowed_mask = ~wvif->sta_asleep_mask;
- tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
- if (wvif->sta_asleep_mask)
- tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
- else
- tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
- idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
- if (idx < 0)
- return -ENOENT;
-
- *queue_p = &wvif->wdev->tx_queue[idx];
- *tx_allowed_mask_p = tx_allowed_mask;
- return 0;
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i)
+ // Note: since only AP can have mcast frames in queue and only
+ // one vif can be AP, all queued frames has same interface id
+ if (!skb_queue_empty_lockless(&wdev->tx_queue[i].cab))
+ return true;
+ return false;
}
-struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif)
+static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
{
- struct wfx_dev *wdev = wvif->wdev;
- struct ieee80211_tx_info *tx_info;
+ struct wfx_queue *sorted_queues[IEEE80211_NUM_ACS];
+ struct wfx_vif *wvif;
struct hif_msg *hif;
struct sk_buff *skb;
- int i;
+ int i, j;
- for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- skb_queue_walk(&wdev->tx_queue[i].queue, skb) {
- tx_info = IEEE80211_SKB_CB(skb);
+ // bubble sort
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ sorted_queues[i] = &wdev->tx_queue[i];
+ for (j = i; j > 0; j--)
+ if (atomic_read(&sorted_queues[j]->pending_frames) >
+ atomic_read(&sorted_queues[j - 1]->pending_frames))
+ swap(sorted_queues[j - 1], sorted_queues[j]);
+ }
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
+ if (!wvif->after_dtim_tx_allowed)
+ continue;
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ skb = skb_dequeue(&sorted_queues[i]->cab);
+ if (!skb)
+ continue;
+ // Note: since only AP can have mcast frames in queue
+ // and only one vif can be AP, all queued frames has
+ // same interface id
hif = (struct hif_msg *)skb->data;
- if ((tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) &&
- (hif->interface == wvif->id))
- return (struct hif_msg *)skb->data;
+ WARN_ON(hif->interface != wvif->id);
+ WARN_ON(sorted_queues[i] !=
+ &wdev->tx_queue[skb_get_queue_mapping(skb)]);
+ atomic_inc(&sorted_queues[i]->pending_frames);
+ return skb;
+ }
+ // No more multicast to sent
+ wvif->after_dtim_tx_allowed = false;
+ schedule_work(&wvif->update_tim_work);
+ }
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ skb = skb_dequeue(&sorted_queues[i]->normal);
+ if (skb) {
+ WARN_ON(sorted_queues[i] !=
+ &wdev->tx_queue[skb_get_queue_mapping(skb)]);
+ atomic_inc(&sorted_queues[i]->pending_frames);
+ return skb;
}
}
return NULL;
@@ -475,90 +286,20 @@ struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif)
struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
{
+ struct wfx_tx_priv *tx_priv;
struct sk_buff *skb;
- struct hif_msg *hif = NULL;
- struct wfx_queue *queue = NULL;
- struct wfx_queue *vif_queue = NULL;
- u32 tx_allowed_mask = 0;
- u32 vif_tx_allowed_mask = 0;
- struct wfx_vif *wvif;
- int not_found;
- int burst;
- int i;
if (atomic_read(&wdev->tx_lock))
return NULL;
- wvif = NULL;
- while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- if (wvif->after_dtim_tx_allowed) {
- for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- skb = wfx_tx_queue_get(wvif->wdev,
- &wdev->tx_queue[i],
- BIT(WFX_LINK_ID_AFTER_DTIM));
- if (skb) {
- hif = (struct hif_msg *)skb->data;
- // Cannot happen since only one vif can
- // be AP at time
- WARN_ON(wvif->id != hif->interface);
- return hif;
- }
- }
- // No more multicast to sent
- wvif->after_dtim_tx_allowed = false;
- schedule_work(&wvif->update_tim_work);
- }
- }
-
for (;;) {
- int ret = -ENOENT;
- int queue_num;
-
- wvif = NULL;
- while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- spin_lock_bh(&wvif->ps_state_lock);
-
- not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
- &vif_tx_allowed_mask);
-
- spin_unlock_bh(&wvif->ps_state_lock);
-
- if (!not_found) {
- if (queue && queue != vif_queue)
- dev_info(wdev->dev, "vifs disagree about queue priority\n");
- tx_allowed_mask |= vif_tx_allowed_mask;
- queue = vif_queue;
- ret = 0;
- }
- }
-
- if (ret)
- return NULL;
-
- queue_num = queue - wdev->tx_queue;
-
- skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
+ skb = wfx_tx_queues_get_skb(wdev);
if (!skb)
- continue;
- hif = (struct hif_msg *)skb->data;
- wvif = wdev_to_wvif(wdev, hif->interface);
- WARN_ON(!wvif);
-
- if (hif_handle_tx_data(wvif, skb, queue))
- continue; /* Handled by WSM */
-
- /* allow bursting if txop is set */
- if (wvif->edca_params[queue_num].txop)
- burst = wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
- else
- burst = 1;
-
- /* store index of bursting queue */
- if (burst > 1)
- wdev->tx_burst_idx = queue_num;
- else
- wdev->tx_burst_idx = -1;
-
- return hif;
+ return NULL;
+ skb_queue_tail(&wdev->tx_pending, skb);
+ wake_up(&wdev->tx_dequeue);
+ tx_priv = wfx_skb_tx_priv(skb);
+ tx_priv->xmit_timestamp = ktime_get();
+ return (struct hif_msg *)skb->data;
}
}
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
index 90bb060d1204..0c3b7244498e 100644
--- a/drivers/staging/wfx/queue.h
+++ b/drivers/staging/wfx/queue.h
@@ -9,29 +9,15 @@
#define WFX_QUEUE_H
#include <linux/skbuff.h>
-
-#include "hif_api_cmd.h"
-
-#define WFX_MAX_STA_IN_AP_MODE 14
-#define WFX_LINK_ID_NO_ASSOC 15
-#define WFX_LINK_ID_AFTER_DTIM (WFX_LINK_ID_NO_ASSOC + 1)
-#define WFX_LINK_ID_UAPSD (WFX_LINK_ID_NO_ASSOC + 2)
-#define WFX_LINK_ID_MAX (WFX_LINK_ID_NO_ASSOC + 3)
+#include <linux/atomic.h>
struct wfx_dev;
struct wfx_vif;
struct wfx_queue {
- struct sk_buff_head queue;
- int tx_locked_cnt;
- int link_map_cache[WFX_LINK_ID_MAX];
- u8 queue_id;
-};
-
-struct wfx_queue_stats {
- int link_map_cache[WFX_LINK_ID_MAX];
- struct sk_buff_head pending;
- wait_queue_head_t wait_link_id_empty;
+ struct sk_buff_head normal;
+ struct sk_buff_head cab; // Content After (DTIM) Beacon
+ atomic_t pending_frames;
};
void wfx_tx_lock(struct wfx_dev *wdev);
@@ -40,22 +26,18 @@ void wfx_tx_flush(struct wfx_dev *wdev);
void wfx_tx_lock_flush(struct wfx_dev *wdev);
void wfx_tx_queues_init(struct wfx_dev *wdev);
-void wfx_tx_queues_deinit(struct wfx_dev *wdev);
-void wfx_tx_queues_lock(struct wfx_dev *wdev);
-void wfx_tx_queues_unlock(struct wfx_dev *wdev);
-void wfx_tx_queues_clear(struct wfx_dev *wdev);
-bool wfx_tx_queues_is_empty(struct wfx_dev *wdev);
-void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif);
+void wfx_tx_queues_check_empty(struct wfx_dev *wdev);
+bool wfx_tx_queues_has_cab(struct wfx_vif *wvif);
+void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb);
struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev);
-struct hif_msg *wfx_tx_queues_get_after_dtim(struct wfx_vif *wvif);
-void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
- struct sk_buff *skb);
-int wfx_tx_queue_get_num_queued(struct wfx_queue *queue, u32 link_id_map);
+bool wfx_tx_queue_empty(struct wfx_dev *wdev, struct wfx_queue *queue,
+ int vif_id);
+void wfx_tx_queue_drop(struct wfx_dev *wdev, struct wfx_queue *queue,
+ int vif_id, struct sk_buff_head *dropped);
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
-int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb);
-int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb);
+void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped);
unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
struct sk_buff *skb);
void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms);
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
index 9aa14331affd..57ea9997800b 100644
--- a/drivers/staging/wfx/scan.c
+++ b/drivers/staging/wfx/scan.c
@@ -88,18 +88,22 @@ void wfx_hw_scan_work(struct work_struct *work)
struct ieee80211_scan_request *hw_req = wvif->scan_req;
int chan_cur, ret;
- mutex_lock(&wvif->scan_lock);
mutex_lock(&wvif->wdev->conf_mutex);
+ mutex_lock(&wvif->scan_lock);
+ if (wvif->join_in_progress) {
+ dev_info(wvif->wdev->dev, "%s: abort in-progress REQ_JOIN",
+ __func__);
+ wfx_reset(wvif);
+ }
update_probe_tmpl(wvif, &hw_req->req);
- wfx_fwd_probe_req(wvif, true);
chan_cur = 0;
do {
ret = send_scan_req(wvif, &hw_req->req, chan_cur);
if (ret > 0)
chan_cur += ret;
} while (ret > 0 && chan_cur < hw_req->req.n_channels);
- mutex_unlock(&wvif->wdev->conf_mutex);
mutex_unlock(&wvif->scan_lock);
+ mutex_unlock(&wvif->wdev->conf_mutex);
__ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0);
}
@@ -113,9 +117,6 @@ int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_AP)
return -EOPNOTSUPP;
- if (wvif->state == WFX_STATE_PRE_STA)
- return -EBUSY;
-
wvif->scan_req = hw_req;
schedule_work(&wvif->scan_work);
return 0;
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
index 9d430346a58b..12e8a5b638f1 100644
--- a/drivers/staging/wfx/sta.c
+++ b/drivers/staging/wfx/sta.c
@@ -5,6 +5,7 @@
* Copyright (c) 2017-2019, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
*/
+#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "sta.h"
@@ -37,117 +38,32 @@ u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
return ret;
}
-static void __wfx_free_event_queue(struct list_head *list)
+void wfx_cooling_timeout_work(struct work_struct *work)
{
- struct wfx_hif_event *event, *tmp;
+ struct wfx_dev *wdev = container_of(to_delayed_work(work),
+ struct wfx_dev,
+ cooling_timeout_work);
- list_for_each_entry_safe(event, tmp, list, link) {
- list_del(&event->link);
- kfree(event);
- }
-}
-
-static void wfx_free_event_queue(struct wfx_vif *wvif)
-{
- LIST_HEAD(list);
-
- spin_lock(&wvif->event_queue_lock);
- list_splice_init(&wvif->event_queue, &list);
- spin_unlock(&wvif->event_queue_lock);
-
- __wfx_free_event_queue(&list);
+ wdev->chip_frozen = true;
+ wfx_tx_unlock(wdev);
}
-void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad)
+void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd)
{
- int tx = 0;
-
- mutex_lock(&wvif->bss_loss_lock);
- cancel_work_sync(&wvif->bss_params_work);
-
- if (init) {
- schedule_delayed_work(&wvif->bss_loss_work, HZ);
- wvif->bss_loss_state = 0;
-
- if (!atomic_read(&wvif->wdev->tx_lock))
- tx = 1;
- } else if (good) {
- cancel_delayed_work_sync(&wvif->bss_loss_work);
- wvif->bss_loss_state = 0;
- schedule_work(&wvif->bss_params_work);
- } else if (bad) {
- /* FIXME Should we just keep going until we time out? */
- if (wvif->bss_loss_state < 3)
- tx = 1;
+ if (cmd == STA_NOTIFY_AWAKE) {
+ // Device recover normal temperature
+ if (cancel_delayed_work(&wdev->cooling_timeout_work))
+ wfx_tx_unlock(wdev);
} else {
- cancel_delayed_work_sync(&wvif->bss_loss_work);
- wvif->bss_loss_state = 0;
- }
-
- /* Spit out a NULL packet to our AP if necessary */
- // FIXME: call ieee80211_beacon_loss/ieee80211_connection_loss instead
- if (tx) {
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
- struct ieee80211_tx_control control = { };
-
- wvif->bss_loss_state++;
-
- skb = ieee80211_nullfunc_get(wvif->wdev->hw, wvif->vif, false);
- if (!skb)
- goto end;
- hdr = (struct ieee80211_hdr *)skb->data;
- memset(IEEE80211_SKB_CB(skb), 0,
- sizeof(*IEEE80211_SKB_CB(skb)));
- IEEE80211_SKB_CB(skb)->control.vif = wvif->vif;
- IEEE80211_SKB_CB(skb)->driver_rates[0].idx = 0;
- IEEE80211_SKB_CB(skb)->driver_rates[0].count = 1;
- IEEE80211_SKB_CB(skb)->driver_rates[1].idx = -1;
- rcu_read_lock(); // protect control.sta
- control.sta = ieee80211_find_sta(wvif->vif, hdr->addr1);
- wfx_tx(wvif->wdev->hw, &control, skb);
- rcu_read_unlock();
+ // Device is too hot
+ schedule_delayed_work(&wdev->cooling_timeout_work, 10 * HZ);
+ wfx_tx_lock(wdev);
}
-end:
- mutex_unlock(&wvif->bss_loss_lock);
-}
-
-int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable)
-{
- wvif->fwd_probe_req = enable;
- return hif_set_rx_filter(wvif, wvif->filter_bssid,
- wvif->fwd_probe_req);
-}
-
-static int wfx_set_mcast_filter(struct wfx_vif *wvif,
- struct wfx_grp_addr_table *fp)
-{
- int i;
-
- // Temporary workaround for filters
- return hif_set_data_filtering(wvif, false, true);
-
- if (!fp->enable)
- return hif_set_data_filtering(wvif, false, true);
-
- for (i = 0; i < fp->num_addresses; i++)
- hif_set_mac_addr_condition(wvif, i, fp->address_list[i]);
- hif_set_uc_mc_bc_condition(wvif, 0,
- HIF_FILTER_UNICAST | HIF_FILTER_BROADCAST);
- hif_set_config_data_filter(wvif, true, 0, BIT(1),
- BIT(fp->num_addresses) - 1);
- hif_set_data_filtering(wvif, true, true);
-
- return 0;
}
-void wfx_update_filtering(struct wfx_vif *wvif)
+static void wfx_filter_beacon(struct wfx_vif *wvif, bool filter_beacon)
{
- int ret;
- int bf_enable;
- int bf_count;
- int n_filter_ies;
- struct hif_ie_table_entry filter_ies[] = {
+ const struct hif_ie_table_entry filter_ies[] = {
{
.ie_id = WLAN_EID_VENDOR_SPECIFIC,
.has_changed = 1,
@@ -167,40 +83,33 @@ void wfx_update_filtering(struct wfx_vif *wvif)
}
};
- if (wvif->state == WFX_STATE_PASSIVE)
- return;
-
- if (wvif->disable_beacon_filter) {
- bf_enable = 0;
- bf_count = 1;
- n_filter_ies = 0;
- } else if (wvif->vif->type != NL80211_IFTYPE_STATION) {
- bf_enable = HIF_BEACON_FILTER_ENABLE | HIF_BEACON_FILTER_AUTO_ERP;
- bf_count = 0;
- n_filter_ies = 2;
+ if (!filter_beacon) {
+ hif_beacon_filter_control(wvif, 0, 1);
} else {
- bf_enable = HIF_BEACON_FILTER_ENABLE;
- bf_count = 0;
- n_filter_ies = 3;
+ hif_set_beacon_filter_table(wvif, 3, filter_ies);
+ hif_beacon_filter_control(wvif, HIF_BEACON_FILTER_ENABLE, 0);
}
-
- ret = hif_set_rx_filter(wvif, wvif->filter_bssid, wvif->fwd_probe_req);
- if (!ret)
- ret = hif_set_beacon_filter_table(wvif, n_filter_ies, filter_ies);
- if (!ret)
- ret = hif_beacon_filter_control(wvif, bf_enable, bf_count);
- if (!ret)
- ret = wfx_set_mcast_filter(wvif, &wvif->mcast_filter);
- if (ret)
- dev_err(wvif->wdev->dev, "update filtering failed: %d\n", ret);
}
-static void wfx_update_filtering_work(struct work_struct *work)
+static void wfx_filter_mcast(struct wfx_vif *wvif, bool filter_mcast)
{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- update_filtering_work);
+ int i;
- wfx_update_filtering(wvif);
+ // Temporary workaround for filters
+ hif_set_data_filtering(wvif, false, true);
+ return;
+
+ if (!filter_mcast) {
+ hif_set_data_filtering(wvif, false, true);
+ return;
+ }
+ for (i = 0; i < wvif->filter_mcast_count; i++)
+ hif_set_mac_addr_condition(wvif, i, wvif->filter_mcast_addr[i]);
+ hif_set_uc_mc_bc_condition(wvif, 0,
+ HIF_FILTER_UNICAST | HIF_FILTER_BROADCAST);
+ hif_set_config_data_filter(wvif, true, 0, BIT(1),
+ BIT(wvif->filter_mcast_count) - 1);
+ hif_set_data_filtering(wvif, true, true);
}
u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
@@ -213,72 +122,127 @@ u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
int count = netdev_hw_addr_list_count(mc_list);
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- memset(&wvif->mcast_filter, 0x00, sizeof(wvif->mcast_filter));
- if (!count ||
- count > ARRAY_SIZE(wvif->mcast_filter.address_list))
+ if (count > ARRAY_SIZE(wvif->filter_mcast_addr)) {
+ wvif->filter_mcast_count = 0;
continue;
+ }
+ wvif->filter_mcast_count = count;
i = 0;
netdev_hw_addr_list_for_each(ha, mc_list) {
- ether_addr_copy(wvif->mcast_filter.address_list[i],
- ha->addr);
+ ether_addr_copy(wvif->filter_mcast_addr[i], ha->addr);
i++;
}
- wvif->mcast_filter.enable = true;
- wvif->mcast_filter.num_addresses = count;
}
return 0;
}
-void wfx_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 unused)
+void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 unused)
{
struct wfx_vif *wvif = NULL;
struct wfx_dev *wdev = hw->priv;
+ bool filter_bssid, filter_prbreq, filter_beacon, filter_mcast;
+
+ // Notes:
+ // - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered
+ // - PS-Poll (FIF_PSPOLL) are never filtered
+ // - RTS, CTS and Ack (FIF_CONTROL) are always filtered
+ // - Broken frames (FIF_FCSFAIL and FIF_PLCPFAIL) are always filtered
+ // - Firmware does (yet) allow to forward unicast traffic sent to
+ // other stations (aka. promiscuous mode)
+ *total_flags &= FIF_BCN_PRBRESP_PROMISC | FIF_ALLMULTI | FIF_OTHER_BSS |
+ FIF_PROBE_REQ | FIF_PSPOLL;
- *total_flags &= FIF_OTHER_BSS | FIF_FCSFAIL | FIF_PROBE_REQ;
-
+ mutex_lock(&wdev->conf_mutex);
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
mutex_lock(&wvif->scan_lock);
- wvif->filter_bssid = (*total_flags &
- (FIF_OTHER_BSS | FIF_PROBE_REQ)) ? 0 : 1;
- wvif->disable_beacon_filter = !(*total_flags & FIF_PROBE_REQ);
- wfx_fwd_probe_req(wvif, true);
- wfx_update_filtering(wvif);
+
+ // Note: FIF_BCN_PRBRESP_PROMISC covers probe response and
+ // beacons from other BSS
+ if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
+ filter_beacon = false;
+ else
+ filter_beacon = true;
+ wfx_filter_beacon(wvif, filter_beacon);
+
+ if (*total_flags & FIF_ALLMULTI) {
+ filter_mcast = false;
+ } else if (!wvif->filter_mcast_count) {
+ dev_dbg(wdev->dev, "disabling unconfigured multicast filter");
+ filter_mcast = false;
+ } else {
+ filter_mcast = true;
+ }
+ wfx_filter_mcast(wvif, filter_mcast);
+
+ if (*total_flags & FIF_OTHER_BSS)
+ filter_bssid = false;
+ else
+ filter_bssid = true;
+
+ // In AP mode, chip can reply to probe request itself
+ if (*total_flags & FIF_PROBE_REQ &&
+ wvif->vif->type == NL80211_IFTYPE_AP) {
+ dev_dbg(wdev->dev, "do not forward probe request in AP mode\n");
+ *total_flags &= ~FIF_PROBE_REQ;
+ }
+
+ if (*total_flags & FIF_PROBE_REQ)
+ filter_prbreq = false;
+ else
+ filter_prbreq = true;
+ hif_set_rx_filter(wvif, filter_bssid, filter_prbreq);
+
mutex_unlock(&wvif->scan_lock);
}
+ mutex_unlock(&wdev->conf_mutex);
}
-static int wfx_update_pm(struct wfx_vif *wvif)
+int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
{
- struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
- bool ps = conf->flags & IEEE80211_CONF_PS;
- int ps_timeout = conf->dynamic_ps_timeout;
struct ieee80211_channel *chan0 = NULL, *chan1 = NULL;
+ struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
- WARN_ON(conf->dynamic_ps_timeout < 0);
- if (wvif->state != WFX_STATE_STA || !wvif->bss_params.aid)
- return 0;
- if (!ps)
- ps_timeout = 0;
- if (wvif->uapsd_mask)
- ps_timeout = 0;
-
- // Kernel disable powersave when an AP is in use. In contrary, it is
- // absolutely necessary to enable legacy powersave for WF200 if channels
- // are differents.
+ WARN(!wvif->vif->bss_conf.assoc && enable_ps,
+ "enable_ps is reliable only if associated");
if (wdev_to_wvif(wvif->wdev, 0))
chan0 = wdev_to_wvif(wvif->wdev, 0)->vif->bss_conf.chandef.chan;
if (wdev_to_wvif(wvif->wdev, 1))
chan1 = wdev_to_wvif(wvif->wdev, 1)->vif->bss_conf.chandef.chan;
if (chan0 && chan1 && chan0->hw_value != chan1->hw_value &&
wvif->vif->type != NL80211_IFTYPE_AP) {
- ps = true;
- ps_timeout = 0;
+ // It is necessary to enable powersave if channels
+ // are differents.
+ if (enable_ps)
+ *enable_ps = true;
+ if (wvif->bss_not_support_ps_poll)
+ return 30;
+ else
+ return 0;
}
+ if (enable_ps)
+ *enable_ps = wvif->vif->bss_conf.ps;
+ if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps)
+ return conf->dynamic_ps_timeout;
+ else
+ return -1;
+}
+
+int wfx_update_pm(struct wfx_vif *wvif)
+{
+ int ps_timeout;
+ bool ps;
+
+ if (!wvif->vif->bss_conf.assoc)
+ return 0;
+ ps_timeout = wfx_get_ps_timeout(wvif, &ps);
+ if (!ps)
+ ps_timeout = 0;
+ WARN_ON(ps_timeout < 0);
+ if (wvif->uapsd_mask)
+ ps_timeout = 0;
if (!wait_for_completion_timeout(&wvif->set_pm_mode_complete,
TU_TO_JIFFIES(512)))
@@ -287,18 +251,25 @@ static int wfx_update_pm(struct wfx_vif *wvif)
return hif_set_pm(wvif, ps, ps_timeout);
}
+static void wfx_update_pm_work(struct work_struct *work)
+{
+ struct wfx_vif *wvif = container_of(work, struct wfx_vif,
+ update_pm_work);
+
+ wfx_update_pm(wvif);
+}
+
int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 queue, const struct ieee80211_tx_queue_params *params)
{
struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
int old_uapsd = wvif->uapsd_mask;
WARN_ON(queue >= hw->queues);
mutex_lock(&wdev->conf_mutex);
assign_bit(queue, &wvif->uapsd_mask, params->uapsd);
- memcpy(&wvif->edca_params[queue], params, sizeof(*params));
hif_set_edca_queue_params(wvif, queue, params);
if (wvif->vif->type == NL80211_IFTYPE_STATION &&
old_uapsd != wvif->uapsd_mask) {
@@ -319,32 +290,9 @@ int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return 0;
}
-static int __wfx_flush(struct wfx_dev *wdev, bool drop)
-{
- for (;;) {
- if (drop)
- wfx_tx_queues_clear(wdev);
- if (wait_event_timeout(wdev->tx_queue_stats.wait_link_id_empty,
- wfx_tx_queues_is_empty(wdev),
- 2 * HZ) <= 0)
- return -ETIMEDOUT;
- wfx_tx_flush(wdev);
- if (wfx_tx_queues_is_empty(wdev))
- return 0;
- dev_warn(wdev->dev, "frames queued while flushing tx queues");
- }
-}
-
-void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
-{
- // FIXME: only flush requested vif and queues
- __wfx_flush(hw->priv, drop);
-}
-
/* WSM callbacks */
-static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
+void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
{
/* RSSI: signed Q8.0, RCPI: unsigned Q7.1
* RSSI = RCPI / 2 - 110
@@ -360,100 +308,23 @@ static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
ieee80211_cqm_rssi_notify(wvif->vif, cqm_evt, rcpi_rssi, GFP_KERNEL);
}
-static void wfx_event_handler_work(struct work_struct *work)
+static void wfx_beacon_loss_work(struct work_struct *work)
{
- struct wfx_vif *wvif =
- container_of(work, struct wfx_vif, event_handler_work);
- struct wfx_hif_event *event;
-
- LIST_HEAD(list);
-
- spin_lock(&wvif->event_queue_lock);
- list_splice_init(&wvif->event_queue, &list);
- spin_unlock(&wvif->event_queue_lock);
-
- list_for_each_entry(event, &list, link) {
- switch (event->evt.event_id) {
- case HIF_EVENT_IND_BSSLOST:
- cancel_work_sync(&wvif->unjoin_work);
- mutex_lock(&wvif->scan_lock);
- wfx_cqm_bssloss_sm(wvif, 1, 0, 0);
- mutex_unlock(&wvif->scan_lock);
- break;
- case HIF_EVENT_IND_BSSREGAINED:
- wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
- cancel_work_sync(&wvif->unjoin_work);
- break;
- case HIF_EVENT_IND_RCPI_RSSI:
- wfx_event_report_rssi(wvif,
- event->evt.event_data.rcpi_rssi);
- break;
- case HIF_EVENT_IND_PS_MODE_ERROR:
- dev_warn(wvif->wdev->dev,
- "error while processing power save request\n");
- break;
- default:
- dev_warn(wvif->wdev->dev,
- "unhandled event indication: %.2x\n",
- event->evt.event_id);
- break;
- }
- }
- __wfx_free_event_queue(&list);
-}
-
-static void wfx_bss_loss_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- bss_loss_work.work);
-
- ieee80211_connection_loss(wvif->vif);
-}
-
-static void wfx_bss_params_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif,
- bss_params_work);
+ struct wfx_vif *wvif = container_of(to_delayed_work(work),
+ struct wfx_vif, beacon_loss_work);
+ struct ieee80211_bss_conf *bss_conf = &wvif->vif->bss_conf;
- mutex_lock(&wvif->wdev->conf_mutex);
- wvif->bss_params.bss_flags.lost_count_only = 1;
- hif_set_bss_params(wvif, &wvif->bss_params);
- wvif->bss_params.bss_flags.lost_count_only = 0;
- mutex_unlock(&wvif->wdev->conf_mutex);
+ ieee80211_beacon_loss(wvif->vif);
+ schedule_delayed_work(to_delayed_work(work),
+ msecs_to_jiffies(bss_conf->beacon_int));
}
-static void wfx_do_unjoin(struct wfx_vif *wvif)
+void wfx_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx)
{
- mutex_lock(&wvif->wdev->conf_mutex);
-
- if (!wvif->state)
- goto done;
-
- if (wvif->state == WFX_STATE_AP)
- goto done;
-
- cancel_work_sync(&wvif->update_filtering_work);
- wvif->state = WFX_STATE_PASSIVE;
-
- /* Unjoin is a reset. */
- wfx_tx_flush(wvif->wdev);
- hif_keep_alive_period(wvif, 0);
- hif_reset(wvif, false);
- wfx_tx_policy_init(wvif);
- hif_set_macaddr(wvif, wvif->vif->addr);
- wfx_free_event_queue(wvif);
- cancel_work_sync(&wvif->event_handler_work);
- wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
- /* Disable Block ACKs */
- hif_set_block_ack_policy(wvif, 0, 0);
-
- wvif->disable_beacon_filter = false;
- wfx_update_filtering(wvif);
- memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
-
-done:
- mutex_unlock(&wvif->wdev->conf_mutex);
+ hif_wep_default_key_id(wvif, idx);
}
static void wfx_set_mfp(struct wfx_vif *wvif,
@@ -472,8 +343,7 @@ static void wfx_set_mfp(struct wfx_vif *wvif,
rcu_read_lock();
if (bss)
- ptr = (const u16 *) ieee80211_bss_get_ie(bss,
- WLAN_EID_RSN);
+ ptr = (const u16 *)ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
if (ptr) {
ptr += pairwise_cipher_suite_count_offset;
@@ -487,6 +357,24 @@ static void wfx_set_mfp(struct wfx_vif *wvif,
hif_set_mfp(wvif, mfpc, mfpr);
}
+void wfx_reset(struct wfx_vif *wvif)
+{
+ struct wfx_dev *wdev = wvif->wdev;
+
+ wfx_tx_lock_flush(wdev);
+ hif_reset(wvif, false);
+ wfx_tx_policy_init(wvif);
+ if (wvif_count(wdev) <= 1)
+ hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
+ wfx_tx_unlock(wdev);
+ wvif->join_in_progress = false;
+ wvif->bss_not_support_ps_poll = false;
+ cancel_delayed_work_sync(&wvif->beacon_loss_work);
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
+ wfx_update_pm(wvif);
+}
+
static void wfx_do_join(struct wfx_vif *wvif)
{
int ret;
@@ -498,9 +386,6 @@ static void wfx_do_join(struct wfx_vif *wvif)
wfx_tx_lock_flush(wvif->wdev);
- if (wvif->state)
- wfx_do_unjoin(wvif);
-
bss = cfg80211_get_bss(wvif->wdev->hw->wiphy, wvif->channel,
conf->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
@@ -509,109 +394,72 @@ static void wfx_do_join(struct wfx_vif *wvif)
return;
}
- mutex_lock(&wvif->wdev->conf_mutex);
-
- /* Sanity check beacon interval */
- if (!wvif->beacon_int)
- wvif->beacon_int = 1;
-
rcu_read_lock(); // protect ssidie
- if (!conf->ibss_joined)
+ if (bss)
ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
if (ssidie) {
ssidlen = ssidie[1];
- memcpy(ssid, &ssidie[2], ssidie[1]);
+ if (ssidlen > IEEE80211_MAX_SSID_LEN)
+ ssidlen = IEEE80211_MAX_SSID_LEN;
+ memcpy(ssid, &ssidie[2], ssidlen);
}
rcu_read_unlock();
- wfx_tx_flush(wvif->wdev);
-
- if (wvif_count(wvif->wdev) <= 1)
- hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
-
wfx_set_mfp(wvif, bss);
+ cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
- wvif->wdev->tx_burst_idx = -1;
+ wvif->join_in_progress = true;
ret = hif_join(wvif, conf, wvif->channel, ssid, ssidlen);
if (ret) {
ieee80211_connection_loss(wvif->vif);
- wvif->join_complete_status = -1;
- /* Tx lock still held, unjoin will clear it. */
- if (!schedule_work(&wvif->unjoin_work))
- wfx_tx_unlock(wvif->wdev);
+ wfx_reset(wvif);
} else {
- wvif->join_complete_status = 0;
- if (wvif->vif->type == NL80211_IFTYPE_ADHOC)
- wvif->state = WFX_STATE_IBSS;
- else
- wvif->state = WFX_STATE_PRE_STA;
- wfx_tx_unlock(wvif->wdev);
-
- /* Upload keys */
- wfx_upload_keys(wvif);
-
/* Due to beacon filtering it is possible that the
* AP's beacon is not known for the mac80211 stack.
* Disable filtering temporary to make sure the stack
* receives at least one
*/
- wvif->disable_beacon_filter = true;
+ wfx_filter_beacon(wvif, false);
}
- wfx_update_filtering(wvif);
-
- mutex_unlock(&wvif->wdev->conf_mutex);
- if (bss)
- cfg80211_put_bss(wvif->wdev->hw->wiphy, bss);
-}
-
-static void wfx_unjoin_work(struct work_struct *work)
-{
- struct wfx_vif *wvif = container_of(work, struct wfx_vif, unjoin_work);
-
- wfx_do_unjoin(wvif);
wfx_tx_unlock(wvif->wdev);
}
int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
spin_lock_init(&sta_priv->lock);
sta_priv->vif_id = wvif->id;
- // FIXME: in station mode, the current API interprets new link-id as a
- // tdls peer.
- if (vif->type == NL80211_IFTYPE_STATION)
+ // In station mode, the firmware interprets new link-id as a TDLS peer.
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
return 0;
sta_priv->link_id = ffz(wvif->link_id_map);
wvif->link_id_map |= BIT(sta_priv->link_id);
WARN_ON(!sta_priv->link_id);
- WARN_ON(sta_priv->link_id >= WFX_MAX_STA_IN_AP_MODE);
+ WARN_ON(sta_priv->link_id >= HIF_LINK_ID_MAX);
hif_map_link(wvif, sta->addr, 0, sta_priv->link_id);
- spin_lock_bh(&wvif->ps_state_lock);
- if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
- IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
- wvif->sta_asleep_mask |= BIT(sta_priv->link_id);
- spin_unlock_bh(&wvif->ps_state_lock);
return 0;
}
int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
int i;
for (i = 0; i < ARRAY_SIZE(sta_priv->buffered); i++)
if (sta_priv->buffered[i])
- dev_warn(wvif->wdev->dev, "release station while %d pending frame on queue %d",
- sta_priv->buffered[i], i);
- // FIXME: see note in wfx_sta_add()
- if (vif->type == NL80211_IFTYPE_STATION)
+ // Not an error if paired with trace in
+ // wfx_tx_update_sta()
+ dev_dbg(wvif->wdev->dev, "release station while %d pending frame on queue %d",
+ sta_priv->buffered[i], i);
+ // See note in wfx_sta_add()
+ if (!sta_priv->link_id)
return 0;
// FIXME add a mutex?
hif_map_link(wvif, sta->addr, 1, sta_priv->link_id);
@@ -619,50 +467,10 @@ int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
-static int wfx_start_ap(struct wfx_vif *wvif)
-{
- int ret;
-
- wvif->beacon_int = wvif->vif->bss_conf.beacon_int;
- wvif->wdev->tx_burst_idx = -1;
- ret = hif_start(wvif, &wvif->vif->bss_conf, wvif->channel);
- if (ret)
- return ret;
- ret = wfx_upload_keys(wvif);
- if (ret)
- return ret;
- if (wvif_count(wvif->wdev) <= 1)
- hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
- wvif->state = WFX_STATE_AP;
- wfx_update_filtering(wvif);
- return 0;
-}
-
-static int wfx_update_beaconing(struct wfx_vif *wvif)
-{
- if (wvif->vif->type != NL80211_IFTYPE_AP)
- return 0;
- if (wvif->state == WFX_STATE_AP &&
- wvif->beacon_int == wvif->vif->bss_conf.beacon_int)
- return 0;
- wfx_tx_lock_flush(wvif->wdev);
- hif_reset(wvif, false);
- wfx_tx_policy_init(wvif);
- wvif->state = WFX_STATE_PASSIVE;
- wfx_start_ap(wvif);
- wfx_tx_unlock(wvif->wdev);
- return 0;
-}
-
static int wfx_upload_ap_templates(struct wfx_vif *wvif)
{
struct sk_buff *skb;
- if (wvif->vif->type == NL80211_IFTYPE_STATION ||
- wvif->vif->type == NL80211_IFTYPE_MONITOR ||
- wvif->vif->type == NL80211_IFTYPE_UNSPECIFIED)
- return 0;
-
skb = ieee80211_beacon_get(wvif->wdev->hw, wvif->vif);
if (!skb)
return -ENOMEM;
@@ -679,52 +487,77 @@ static int wfx_upload_ap_templates(struct wfx_vif *wvif)
return 0;
}
+int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+ struct wfx_dev *wdev = wvif->wdev;
+ int ret;
+
+ wvif = NULL;
+ while ((wvif = wvif_iterate(wdev, wvif)) != NULL)
+ wfx_update_pm(wvif);
+ wvif = (struct wfx_vif *)vif->drv_priv;
+ wfx_upload_ap_templates(wvif);
+ ret = hif_start(wvif, &vif->bss_conf, wvif->channel);
+ if (ret > 0)
+ return -EIO;
+ return ret;
+}
+
+void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
+
+ wfx_reset(wvif);
+}
+
static void wfx_join_finalize(struct wfx_vif *wvif,
struct ieee80211_bss_conf *info)
{
- struct ieee80211_sta *sta = NULL;
-
- wvif->beacon_int = info->beacon_int;
- rcu_read_lock(); // protect sta
- if (info->bssid && !info->ibss_joined)
- sta = ieee80211_find_sta(wvif->vif, info->bssid);
- if (sta)
- wvif->bss_params.operational_rate_set =
- wfx_rate_mask_to_hw(wvif->wdev, sta->supp_rates[wvif->channel->band]);
- else
- wvif->bss_params.operational_rate_set = -1;
- rcu_read_unlock();
- if (sta &&
- info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
- hif_dual_cts_protection(wvif, true);
- else
- hif_dual_cts_protection(wvif, false);
+ wvif->join_in_progress = false;
+ hif_set_association_mode(wvif, info);
+ hif_keep_alive_period(wvif, 0);
+ // beacon_loss_count is defined to 7 in net/mac80211/mlme.c. Let's use
+ // the same value.
+ hif_set_bss_params(wvif, info->aid, 7);
+ hif_set_beacon_wakeup_period(wvif, 1, 1);
+ wfx_update_pm(wvif);
+}
- wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
- cancel_work_sync(&wvif->unjoin_work);
+int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
- wvif->bss_params.beacon_lost_count = 20;
- wvif->bss_params.aid = info->aid;
+ wfx_upload_ap_templates(wvif);
+ wfx_do_join(wvif);
+ return 0;
+}
- hif_set_association_mode(wvif, info);
+void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
- if (!info->ibss_joined) {
- hif_keep_alive_period(wvif, 30 /* sec */);
- hif_set_bss_params(wvif, &wvif->bss_params);
- hif_set_beacon_wakeup_period(wvif, info->dtim_period,
- info->dtim_period);
- wfx_update_pm(wvif);
+ wfx_reset(wvif);
+}
+
+static void wfx_enable_beacon(struct wfx_vif *wvif, bool enable)
+{
+ // Driver has Content After DTIM Beacon in queue. Driver is waiting for
+ // a signal from the firmware. Since we are going to stop to send
+ // beacons, this signal will never happens. See also
+ // wfx_suspend_resume_mc()
+ if (!enable && wfx_tx_queues_has_cab(wvif)) {
+ wvif->after_dtim_tx_allowed = true;
+ wfx_bh_request_tx(wvif->wdev);
}
+ hif_beacon_transmit(wvif, enable);
}
-void wfx_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u32 changed)
+void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
{
struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- bool do_join = false;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
int i;
mutex_lock(&wdev->conf_mutex);
@@ -742,92 +575,52 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BEACON ||
- changed & BSS_CHANGED_AP_PROBE_RESP ||
- changed & BSS_CHANGED_BSSID ||
- changed & BSS_CHANGED_SSID ||
- changed & BSS_CHANGED_IBSS) {
- wvif->beacon_int = info->beacon_int;
- wfx_update_beaconing(wvif);
- wfx_upload_ap_templates(wvif);
- wfx_fwd_probe_req(wvif, false);
+ if (changed & BSS_CHANGED_BASIC_RATES ||
+ changed & BSS_CHANGED_BEACON_INT ||
+ changed & BSS_CHANGED_BSSID) {
+ if (vif->type == NL80211_IFTYPE_STATION)
+ wfx_do_join(wvif);
}
- if (changed & BSS_CHANGED_BEACON_ENABLED &&
- wvif->state != WFX_STATE_IBSS)
- hif_beacon_transmit(wvif, info->enable_beacon);
+ if (changed & BSS_CHANGED_AP_PROBE_RESP ||
+ changed & BSS_CHANGED_BEACON)
+ wfx_upload_ap_templates(wvif);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ wfx_enable_beacon(wvif, info->enable_beacon);
- if (changed & BSS_CHANGED_BEACON_INFO)
+ if (changed & BSS_CHANGED_BEACON_INFO) {
+ if (vif->type != NL80211_IFTYPE_STATION)
+ dev_warn(wdev->dev, "%s: misunderstood change: BEACON_INFO\n",
+ __func__);
hif_set_beacon_wakeup_period(wvif, info->dtim_period,
info->dtim_period);
-
- /* assoc/disassoc, or maybe AID changed */
- if (changed & BSS_CHANGED_ASSOC) {
- wfx_tx_lock_flush(wdev);
- wvif->wep_default_key_id = -1;
- wfx_tx_unlock(wdev);
+ // We temporary forwarded beacon for join process. It is now no
+ // more necessary.
+ wfx_filter_beacon(wvif, true);
}
- if (changed & BSS_CHANGED_ASSOC && !info->assoc &&
- (wvif->state == WFX_STATE_STA || wvif->state == WFX_STATE_IBSS)) {
- /* Shedule unjoin work */
- wfx_tx_lock(wdev);
- if (!schedule_work(&wvif->unjoin_work))
- wfx_tx_unlock(wdev);
- } else {
- if (changed & BSS_CHANGED_BEACON_INT) {
- if (info->ibss_joined)
- do_join = true;
- else if (wvif->state == WFX_STATE_AP)
- wfx_update_beaconing(wvif);
- }
-
- if (changed & BSS_CHANGED_BSSID)
- do_join = true;
-
- if (changed & BSS_CHANGED_ASSOC ||
- changed & BSS_CHANGED_BSSID ||
- changed & BSS_CHANGED_IBSS ||
- changed & BSS_CHANGED_BASIC_RATES ||
- changed & BSS_CHANGED_HT) {
- if (info->assoc) {
- if (wvif->state < WFX_STATE_PRE_STA) {
- ieee80211_connection_loss(vif);
- mutex_unlock(&wdev->conf_mutex);
- return;
- } else if (wvif->state == WFX_STATE_PRE_STA) {
- wvif->state = WFX_STATE_STA;
- }
- } else {
- do_join = true;
- }
-
- if (info->assoc || info->ibss_joined)
- wfx_join_finalize(wvif, info);
- else
- memset(&wvif->bss_params, 0,
- sizeof(wvif->bss_params));
- }
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (info->assoc || info->ibss_joined)
+ wfx_join_finalize(wvif, info);
+ else if (!info->assoc && vif->type == NL80211_IFTYPE_STATION)
+ wfx_reset(wvif);
+ else
+ dev_warn(wdev->dev, "%s: misunderstood change: ASSOC\n",
+ __func__);
}
- if (changed & BSS_CHANGED_ASSOC ||
- changed & BSS_CHANGED_ERP_CTS_PROT ||
- changed & BSS_CHANGED_ERP_PREAMBLE) {
- u8 erp_ie[3] = { WLAN_EID_ERP_INFO, 1, 0 };
+ if (changed & BSS_CHANGED_KEEP_ALIVE)
+ hif_keep_alive_period(wvif, info->max_idle_period *
+ USEC_PER_TU / USEC_PER_MSEC);
+ if (changed & BSS_CHANGED_ERP_CTS_PROT)
hif_erp_use_protection(wvif, info->use_cts_prot);
- if (info->use_cts_prot)
- erp_ie[2] |= WLAN_ERP_USE_PROTECTION;
- if (info->use_short_preamble)
- erp_ie[2] |= WLAN_ERP_BARKER_PREAMBLE;
- if (wvif->vif->type != NL80211_IFTYPE_STATION)
- hif_update_ie_beacon(wvif, erp_ie, sizeof(erp_ie));
- }
- if (changed & BSS_CHANGED_ASSOC || changed & BSS_CHANGED_ERP_SLOT)
+ if (changed & BSS_CHANGED_ERP_SLOT)
hif_slot_time(wvif, info->use_short_slot ? 9 : 20);
- if (changed & BSS_CHANGED_ASSOC || changed & BSS_CHANGED_CQM)
+ if (changed & BSS_CHANGED_CQM)
hif_set_rcpi_rssi_threshold(wvif, info->cqm_rssi_thold,
info->cqm_rssi_hyst);
@@ -838,31 +631,6 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw,
wfx_update_pm(wvif);
mutex_unlock(&wdev->conf_mutex);
-
- if (do_join)
- wfx_do_join(wvif);
-}
-
-static void wfx_ps_notify_sta(struct wfx_vif *wvif,
- enum sta_notify_cmd notify_cmd, int link_id)
-{
- spin_lock_bh(&wvif->ps_state_lock);
- if (notify_cmd == STA_NOTIFY_SLEEP)
- wvif->sta_asleep_mask |= BIT(link_id);
- else // notify_cmd == STA_NOTIFY_AWAKE
- wvif->sta_asleep_mask &= ~BIT(link_id);
- spin_unlock_bh(&wvif->ps_state_lock);
- if (notify_cmd == STA_NOTIFY_AWAKE)
- wfx_bh_request_tx(wvif->wdev);
-}
-
-void wfx_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum sta_notify_cmd notify_cmd, struct ieee80211_sta *sta)
-{
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
- struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *) &sta->drv_priv;
-
- wfx_ps_notify_sta(wvif, notify_cmd, sta_priv->link_id);
}
static int wfx_update_tim(struct wfx_vif *wvif)
@@ -873,10 +641,8 @@ static int wfx_update_tim(struct wfx_vif *wvif)
skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif,
&tim_offset, &tim_length);
- if (!skb) {
- __wfx_flush(wvif->wdev, true);
+ if (!skb)
return -ENOENT;
- }
tim_ptr = skb->data + tim_offset;
if (tim_offset && tim_length >= 6) {
@@ -886,7 +652,7 @@ static int wfx_update_tim(struct wfx_vif *wvif)
tim_ptr[2] = 0;
/* Set/reset aid0 bit */
- if (wfx_tx_queues_get_after_dtim(wvif))
+ if (wfx_tx_queues_has_cab(wvif))
tim_ptr[4] |= 1;
else
tim_ptr[4] &= ~1;
@@ -917,7 +683,9 @@ int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd)
{
- WARN(!wfx_tx_queues_get_after_dtim(wvif), "incorrect sequence");
+ if (notify_cmd != STA_NOTIFY_AWAKE)
+ return;
+ WARN(!wfx_tx_queues_has_cab(wvif), "incorrect sequence");
WARN(wvif->after_dtim_tx_allowed, "incorrect sequence");
wvif->after_dtim_tx_allowed = true;
wfx_bh_request_tx(wvif->wdev);
@@ -958,7 +726,7 @@ void wfx_change_chanctx(struct ieee80211_hw *hw,
int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *conf)
{
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
struct ieee80211_channel *ch = conf->def.chan;
WARN(wvif->channel, "channel overwrite");
@@ -971,7 +739,7 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *conf)
{
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
struct ieee80211_channel *ch = conf->def.chan;
WARN(wvif->channel != ch, "channel mismatch");
@@ -987,7 +755,7 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
int i, ret = 0;
struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_UAPSD |
@@ -1021,33 +789,18 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
wvif->wdev = wdev;
wvif->link_id_map = 1; // link-id 0 is reserved for multicast
- spin_lock_init(&wvif->ps_state_lock);
INIT_WORK(&wvif->update_tim_work, wfx_update_tim_work);
-
- memset(&wvif->bss_params, 0, sizeof(wvif->bss_params));
-
- mutex_init(&wvif->bss_loss_lock);
- INIT_DELAYED_WORK(&wvif->bss_loss_work, wfx_bss_loss_work);
-
- wvif->wep_default_key_id = -1;
- INIT_WORK(&wvif->wep_key_work, wfx_wep_key_work);
-
- spin_lock_init(&wvif->event_queue_lock);
- INIT_LIST_HEAD(&wvif->event_queue);
- INIT_WORK(&wvif->event_handler_work, wfx_event_handler_work);
+ INIT_DELAYED_WORK(&wvif->beacon_loss_work, wfx_beacon_loss_work);
init_completion(&wvif->set_pm_mode_complete);
complete(&wvif->set_pm_mode_complete);
- INIT_WORK(&wvif->update_filtering_work, wfx_update_filtering_work);
- INIT_WORK(&wvif->bss_params_work, wfx_bss_params_work);
- INIT_WORK(&wvif->unjoin_work, wfx_unjoin_work);
+ INIT_WORK(&wvif->update_pm_work, wfx_update_pm_work);
INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
mutex_init(&wvif->scan_lock);
init_completion(&wvif->scan_complete);
INIT_WORK(&wvif->scan_work, wfx_hw_scan_work);
- INIT_WORK(&wvif->tx_policy_upload_work, wfx_tx_policy_upload_work);
mutex_unlock(&wdev->conf_mutex);
hif_set_macaddr(wvif, vif->addr);
@@ -1060,50 +813,25 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
else
hif_set_block_ack_policy(wvif, 0x00, 0x00);
- // Combo force powersave mode. We can re-enable it now
- ret = wfx_update_pm(wvif);
}
return ret;
}
-void wfx_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct wfx_dev *wdev = hw->priv;
- struct wfx_vif *wvif = (struct wfx_vif *) vif->drv_priv;
+ struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
wait_for_completion_timeout(&wvif->set_pm_mode_complete, msecs_to_jiffies(300));
mutex_lock(&wdev->conf_mutex);
WARN(wvif->link_id_map != 1, "corrupted state");
- switch (wvif->state) {
- case WFX_STATE_PRE_STA:
- case WFX_STATE_STA:
- case WFX_STATE_IBSS:
- wfx_tx_lock_flush(wdev);
- if (!schedule_work(&wvif->unjoin_work))
- wfx_tx_unlock(wdev);
- break;
- case WFX_STATE_AP:
- wvif->sta_asleep_mask = 0;
- /* reset.link_id = 0; */
- hif_reset(wvif, false);
- break;
- default:
- break;
- }
-
- wvif->state = WFX_STATE_PASSIVE;
- wfx_tx_queues_wait_empty_vif(wvif);
- wfx_tx_unlock(wdev);
- /* FIXME: In add to reset MAC address, try to reset interface */
+ hif_reset(wvif, false);
hif_set_macaddr(wvif, NULL);
+ wfx_tx_policy_init(wvif);
- wfx_cqm_bssloss_sm(wvif, 0, 0, 0);
- cancel_work_sync(&wvif->unjoin_work);
- wfx_free_event_queue(wvif);
-
+ cancel_delayed_work_sync(&wvif->beacon_loss_work);
wdev->vif[wvif->id] = NULL;
wvif->vif = NULL;
@@ -1115,8 +843,6 @@ void wfx_remove_interface(struct ieee80211_hw *hw,
hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
else
hif_set_block_ack_policy(wvif, 0x00, 0x00);
- // Combo force powersave mode. We can re-enable it now
- wfx_update_pm(wvif);
}
}
@@ -1129,10 +855,5 @@ void wfx_stop(struct ieee80211_hw *hw)
{
struct wfx_dev *wdev = hw->priv;
- wfx_tx_lock_flush(wdev);
- mutex_lock(&wdev->conf_mutex);
- wfx_tx_queues_clear(wdev);
- mutex_unlock(&wdev->conf_mutex);
- wfx_tx_unlock(wdev);
- WARN(atomic_read(&wdev->tx_lock), "tx_lock is locked");
+ wfx_tx_queues_check_empty(wdev);
}
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
index cf99a8a74a81..8a20ad9ae017 100644
--- a/drivers/staging/wfx/sta.h
+++ b/drivers/staging/wfx/sta.h
@@ -10,34 +10,13 @@
#include <net/mac80211.h>
-#include "hif_api_cmd.h"
-
struct wfx_dev;
struct wfx_vif;
-enum wfx_state {
- WFX_STATE_PASSIVE = 0,
- WFX_STATE_PRE_STA,
- WFX_STATE_STA,
- WFX_STATE_IBSS,
- WFX_STATE_AP,
-};
-
-struct wfx_hif_event {
- struct list_head link;
- struct hif_ind_event evt;
-};
-
-struct wfx_grp_addr_table {
- bool enable;
- int num_addresses;
- u8 address_list[8][ETH_ALEN];
-};
-
struct wfx_sta_priv {
int link_id;
int vif_id;
- u8 buffered[IEEE80211_NUM_TIDS];
+ int buffered[IEEE80211_NUM_TIDS];
// Ensure atomicity of "buffered" and calls to ieee80211_sta_set_buffered()
spinlock_t lock;
};
@@ -47,6 +26,8 @@ int wfx_start(struct ieee80211_hw *hw);
void wfx_stop(struct ieee80211_hw *hw);
int wfx_config(struct ieee80211_hw *hw, u32 changed);
int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+void wfx_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int idx);
u64 wfx_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list);
void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
@@ -54,8 +35,10 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop);
+int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+int wfx_join_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int wfx_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 queue, const struct ieee80211_tx_queue_params *params);
void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -82,12 +65,13 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf);
// WSM Callbacks
+void wfx_cooling_timeout_work(struct work_struct *work);
+void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd);
void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd cmd);
+void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi);
// Other Helpers
-void wfx_cqm_bssloss_sm(struct wfx_vif *wvif, int init, int good, int bad);
-void wfx_update_filtering(struct wfx_vif *wvif);
-int wfx_fwd_probe_req(struct wfx_vif *wvif, bool enable);
+void wfx_reset(struct wfx_vif *wvif);
u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates);
#endif /* WFX_STA_H */
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
index 30c6a13f0e22..0b6fbd518638 100644
--- a/drivers/staging/wfx/traces.h
+++ b/drivers/staging/wfx/traces.h
@@ -32,16 +32,16 @@
* xxx_name(XXX) \
* ...
*
- * 3. Instanciate that list_names:
+ * 3. Instantiate that list_names:
*
* list_names
*
- * 4. Redefine xxx_name() as a entry of array for __print_symbolic()
+ * 4. Redefine xxx_name() as an entry of array for __print_symbolic()
*
* #undef xxx_name
* #define xxx_name(msg) { msg, #msg },
*
- * 5. list_name can now nearlu be used with __print_symbolic() but,
+ * 5. list_name can now nearly be used with __print_symbolic() but,
* __print_symbolic() dislike last comma of list. So we define a new list
* with a dummy element:
*
@@ -104,8 +104,10 @@ hif_msg_list_enum
hif_mib_name(ARP_KEEP_ALIVE_PERIOD) \
hif_mib_name(BEACON_FILTER_ENABLE) \
hif_mib_name(BEACON_FILTER_TABLE) \
+ hif_mib_name(BEACON_STATS) \
hif_mib_name(BEACON_WAKEUP_PERIOD) \
hif_mib_name(BLOCK_ACK_POLICY) \
+ hif_mib_name(CCA_CONFIG) \
hif_mib_name(CONFIG_DATA_FILTER) \
hif_mib_name(COUNTERS_TABLE) \
hif_mib_name(CURRENT_TX_POWER_LEVEL) \
@@ -114,29 +116,32 @@ hif_msg_list_enum
hif_mib_name(DOT11_MAX_TRANSMIT_MSDU_LIFETIME) \
hif_mib_name(DOT11_RTS_THRESHOLD) \
hif_mib_name(DOT11_WEP_DEFAULT_KEY_ID) \
+ hif_mib_name(ETHERTYPE_DATAFRAME_CONDITION) \
+ hif_mib_name(EXTENDED_COUNTERS_TABLE) \
hif_mib_name(GL_BLOCK_ACK_INFO) \
hif_mib_name(GL_OPERATIONAL_POWER_MODE) \
hif_mib_name(GL_SET_MULTI_MSG) \
+ hif_mib_name(GRP_SEQ_COUNTER) \
hif_mib_name(INACTIVITY_TIMER) \
hif_mib_name(INTERFACE_PROTECTION) \
hif_mib_name(IPV4_ADDR_DATAFRAME_CONDITION) \
hif_mib_name(IPV6_ADDR_DATAFRAME_CONDITION) \
hif_mib_name(KEEP_ALIVE_PERIOD) \
hif_mib_name(MAC_ADDR_DATAFRAME_CONDITION) \
+ hif_mib_name(MAGIC_DATAFRAME_CONDITION) \
+ hif_mib_name(MAX_TX_POWER_LEVEL) \
hif_mib_name(NON_ERP_PROTECTION) \
hif_mib_name(NS_IP_ADDRESSES_TABLE) \
hif_mib_name(OVERRIDE_INTERNAL_TX_RATE) \
+ hif_mib_name(PORT_DATAFRAME_CONDITION) \
hif_mib_name(PROTECTED_MGMT_POLICY) \
- hif_mib_name(RX_FILTER) \
hif_mib_name(RCPI_RSSI_THRESHOLD) \
+ hif_mib_name(RX_FILTER) \
hif_mib_name(SET_ASSOCIATION_MODE) \
hif_mib_name(SET_DATA_FILTERING) \
- hif_mib_name(ETHERTYPE_DATAFRAME_CONDITION) \
hif_mib_name(SET_HT_PROTECTION) \
- hif_mib_name(MAGIC_DATAFRAME_CONDITION) \
hif_mib_name(SET_TX_RATE_RETRY_POLICY) \
hif_mib_name(SET_UAPSD_INFORMATION) \
- hif_mib_name(PORT_DATAFRAME_CONDITION) \
hif_mib_name(SLOT_TIME) \
hif_mib_name(STATISTICS_TABLE) \
hif_mib_name(TEMPLATE_FRAME) \
@@ -169,7 +174,7 @@ DECLARE_EVENT_CLASS(hif_data,
int header_len;
__entry->tx_fill_level = tx_fill_level;
- __entry->msg_len = hif->len;
+ __entry->msg_len = le16_to_cpu(hif->len);
__entry->msg_id = hif->id;
__entry->if_id = hif->interface;
if (is_recv)
@@ -179,7 +184,7 @@ DECLARE_EVENT_CLASS(hif_data,
if (!is_recv &&
(__entry->msg_id == HIF_REQ_ID_READ_MIB ||
__entry->msg_id == HIF_REQ_ID_WRITE_MIB)) {
- __entry->mib = le16_to_cpup((u16 *) hif->body);
+ __entry->mib = le16_to_cpup((__le16 *)hif->body);
header_len = 4;
} else {
__entry->mib = -1;
@@ -193,8 +198,8 @@ DECLARE_EVENT_CLASS(hif_data,
TP_printk("%d:%d:%s_%s%s%s: %s%s (%d bytes)",
__entry->tx_fill_level,
__entry->if_id,
- __print_symbolic(__entry->msg_id, hif_msg_list),
__entry->msg_type,
+ __print_symbolic(__entry->msg_id, hif_msg_list),
__entry->mib != -1 ? "/" : "",
__entry->mib != -1 ? __print_symbolic(__entry->mib, hif_mib_list) : "",
__print_hex(__entry->buf, __entry->buf_len),
@@ -382,8 +387,8 @@ TRACE_EVENT(tx_stats,
int i;
__entry->pkt_id = tx_cnf->packet_id;
- __entry->delay_media = tx_cnf->media_delay;
- __entry->delay_queue = tx_cnf->tx_queue_delay;
+ __entry->delay_media = le32_to_cpu(tx_cnf->media_delay);
+ __entry->delay_queue = le32_to_cpu(tx_cnf->tx_queue_delay);
__entry->delay_fw = delay;
__entry->ack_failures = tx_cnf->ack_failures;
if (!tx_cnf->status || __entry->ack_failures)
@@ -409,7 +414,7 @@ TRACE_EVENT(tx_stats,
__entry->flags |= 0x10;
if (tx_cnf->status)
__entry->flags |= 0x20;
- if (tx_cnf->status == HIF_REQUEUE)
+ if (tx_cnf->status == HIF_STATUS_TX_FAIL_REQUEUE)
__entry->flags |= 0x40;
),
TP_printk("packet ID: %08x, rate policy: %s %d|%d %d|%d %d|%d %d|%d -> %d attempt, Delays media/queue/total: %4dus/%4dus/%4dus",
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
index 8b85bb1abb9c..73e216733ce4 100644
--- a/drivers/staging/wfx/wfx.h
+++ b/drivers/staging/wfx/wfx.h
@@ -21,10 +21,7 @@
#include "main.h"
#include "queue.h"
#include "secure_link.h"
-#include "sta.h"
-#include "scan.h"
#include "hif_tx.h"
-#include "hif_api_general.h"
#define USEC_PER_TXOP 32 // see struct ieee80211_tx_queue_params
#define USEC_PER_TU 1024
@@ -45,21 +42,24 @@ struct wfx_dev {
struct hif_ind_startup hw_caps;
struct wfx_hif hif;
struct sl_context sl;
- int chip_frozen;
+ struct delayed_work cooling_timeout_work;
+ bool poll_irq;
+ bool chip_frozen;
struct mutex conf_mutex;
struct wfx_hif_cmd hif_cmd;
struct wfx_queue tx_queue[4];
- struct wfx_queue_stats tx_queue_stats;
- int tx_burst_idx;
+ struct sk_buff_head tx_pending;
+ wait_queue_head_t tx_dequeue;
atomic_t tx_lock;
atomic_t packet_id;
u32 key_map;
- struct hif_req_add_key keys[MAX_KEY_ENTRIES];
struct hif_rx_stats rx_stats;
struct mutex rx_stats_lock;
+ struct hif_tx_power_loop_info tx_power_loop_info;
+ struct mutex tx_power_loop_info_lock;
};
struct wfx_vif {
@@ -67,42 +67,23 @@ struct wfx_vif {
struct ieee80211_vif *vif;
struct ieee80211_channel *channel;
int id;
- enum wfx_state state;
-
- int bss_loss_state;
- u32 bss_loss_confirm_id;
- struct mutex bss_loss_lock;
- struct delayed_work bss_loss_work;
u32 link_id_map;
bool after_dtim_tx_allowed;
- struct wfx_grp_addr_table mcast_filter;
+ bool join_in_progress;
- s8 wep_default_key_id;
- struct sk_buff *wep_pending_skb;
- struct work_struct wep_key_work;
+ struct delayed_work beacon_loss_work;
struct tx_policy_cache tx_policy_cache;
struct work_struct tx_policy_upload_work;
- u32 sta_asleep_mask;
- spinlock_t ps_state_lock;
struct work_struct update_tim_work;
- int beacon_int;
- bool filter_bssid;
- bool fwd_probe_req;
- bool disable_beacon_filter;
- struct work_struct update_filtering_work;
+ int filter_mcast_count;
+ u8 filter_mcast_addr[8][ETH_ALEN];
unsigned long uapsd_mask;
- struct ieee80211_tx_queue_params edca_params[IEEE80211_NUM_ACS];
- struct hif_req_set_bss_params bss_params;
- struct work_struct bss_params_work;
-
- int join_complete_status;
- struct work_struct unjoin_work;
/* avoid some operations in parallel with scan */
struct mutex scan_lock;
@@ -111,11 +92,9 @@ struct wfx_vif {
bool scan_abort;
struct ieee80211_scan_request *scan_req;
+ bool bss_not_support_ps_poll;
+ struct work_struct update_pm_work;
struct completion set_pm_mode_complete;
-
- struct list_head event_queue;
- spinlock_t event_queue_lock;
- struct work_struct event_handler_work;
};
static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
diff --git a/drivers/staging/wilc1000/hif.c b/drivers/staging/wilc1000/hif.c
index 6c7de2f8d3f2..d025a3093015 100644
--- a/drivers/staging/wilc1000/hif.c
+++ b/drivers/staging/wilc1000/hif.c
@@ -11,6 +11,8 @@
#define WILC_FALSE_FRMWR_CHANNEL 100
+#define WILC_SCAN_WID_LIST_SIZE 6
+
struct wilc_rcvd_mac_info {
u8 status;
};
@@ -151,7 +153,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
void *user_arg, struct cfg80211_scan_request *request)
{
int result = 0;
- struct wid wid_list[5];
+ struct wid wid_list[WILC_SCAN_WID_LIST_SIZE];
u32 index = 0;
u32 i, scan_timeout;
u8 *buffer;
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 89183b3b178f..45ba07c6ec27 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1228,18 +1228,20 @@ void iscsit_print_session_params(struct iscsi_session *sess)
iscsi_dump_sess_ops(sess->sess_ops);
}
-static int iscsit_do_rx_data(
+int rx_data(
struct iscsi_conn *conn,
- struct iscsi_data_count *count)
+ struct kvec *iov,
+ int iov_count,
+ int data)
{
- int data = count->data_length, rx_loop = 0, total_rx = 0;
+ int rx_loop = 0, total_rx = 0;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
memset(&msg, 0, sizeof(struct msghdr));
- iov_iter_kvec(&msg.msg_iter, READ, count->iov, count->iov_count, data);
+ iov_iter_kvec(&msg.msg_iter, READ, iov, iov_count, data);
while (msg_data_left(&msg)) {
rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
@@ -1256,26 +1258,6 @@ static int iscsit_do_rx_data(
return total_rx;
}
-int rx_data(
- struct iscsi_conn *conn,
- struct kvec *iov,
- int iov_count,
- int data)
-{
- struct iscsi_data_count c;
-
- if (!conn || !conn->sock || !conn->conn_ops)
- return -1;
-
- memset(&c, 0, sizeof(struct iscsi_data_count));
- c.iov = iov;
- c.iov_count = iov_count;
- c.data_length = data;
- c.type = ISCSI_RX_DATA;
-
- return iscsit_do_rx_data(conn, &c);
-}
-
int tx_data(
struct iscsi_conn *conn,
struct kvec *iov,
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3305b47fdf53..16d5a4e117a2 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -545,32 +545,15 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+static int tcm_loop_queue_data_or_status(const char *func,
+ struct se_cmd *se_cmd, u8 scsi_status)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
- __func__, sc, sc->cmnd[0]);
-
- sc->result = SAM_STAT_GOOD;
- set_host_byte(sc, DID_OK);
- if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
- (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
- scsi_set_resid(sc, se_cmd->residual_count);
- sc->scsi_done(sc);
- return 0;
-}
-
-static int tcm_loop_queue_status(struct se_cmd *se_cmd)
-{
- struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
- struct tcm_loop_cmd, tl_se_cmd);
- struct scsi_cmnd *sc = tl_cmd->sc;
-
- pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
- __func__, sc, sc->cmnd[0]);
+ func, sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
@@ -581,7 +564,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
sc->result = SAM_STAT_CHECK_CONDITION;
set_driver_byte(sc, DRIVER_SENSE);
} else
- sc->result = se_cmd->scsi_status;
+ sc->result = scsi_status;
set_host_byte(sc, DID_OK);
if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
@@ -591,6 +574,17 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
return 0;
}
+static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+{
+ return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD);
+}
+
+static int tcm_loop_queue_status(struct se_cmd *se_cmd)
+{
+ return tcm_loop_queue_data_or_status(__func__,
+ se_cmd, se_cmd->scsi_status);
+}
+
static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 385e4cf9cfa6..6b72afee2f8b 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -677,7 +677,7 @@ target_alua_state_check(struct se_cmd *cmd)
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return 0;
/*
@@ -1090,7 +1090,7 @@ int core_alua_do_port_transition(
struct t10_alua_tg_pt_gp *tg_pt_gp;
int primary, valid_states, rc = 0;
- if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+ if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return -ENODEV;
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
@@ -1920,7 +1920,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
unsigned char buf[TG_PT_GROUP_NAME_BUF];
int move = 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2177,7 +2177,7 @@ ssize_t core_alua_store_offline_bit(
unsigned long tmp;
int ret;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2263,7 +2263,7 @@ ssize_t core_alua_store_secondary_write_metadata(
int core_setup_alua(struct se_device *dev)
{
- if (!(dev->transport->transport_flags &
+ if (!(dev->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
struct t10_alua_lu_gp_member *lu_gp_mem;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index ff82b21fdcce..f04352285155 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1099,21 +1099,73 @@ static ssize_t block_size_store(struct config_item *item,
static ssize_t alua_support_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = to_attrib(item);
- u8 flags = da->da_dev->transport->transport_flags;
+ u8 flags = da->da_dev->transport_flags;
return snprintf(page, PAGE_SIZE, "%d\n",
flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
}
+static ssize_t alua_support_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ if (!(dev->transport->transport_flags_changeable &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
+ pr_err("dev[%p]: Unable to change SE Device alua_support:"
+ " alua_support has fixed value\n", dev);
+ return -EINVAL;
+ }
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag)
+ dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
+ else
+ dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA;
+ return count;
+}
+
static ssize_t pgr_support_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = to_attrib(item);
- u8 flags = da->da_dev->transport->transport_flags;
+ u8 flags = da->da_dev->transport_flags;
return snprintf(page, PAGE_SIZE, "%d\n",
flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
}
+static ssize_t pgr_support_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ if (!(dev->transport->transport_flags_changeable &
+ TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
+ pr_err("dev[%p]: Unable to change SE Device pgr_support:"
+ " pgr_support has fixed value\n", dev);
+ return -EINVAL;
+ }
+
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag)
+ dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
+ else
+ dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR;
+ return count;
+}
+
CONFIGFS_ATTR(, emulate_model_alias);
CONFIGFS_ATTR(, emulate_dpo);
CONFIGFS_ATTR(, emulate_fua_write);
@@ -1146,8 +1198,8 @@ CONFIGFS_ATTR(, unmap_granularity);
CONFIGFS_ATTR(, unmap_granularity_alignment);
CONFIGFS_ATTR(, unmap_zeroes_data);
CONFIGFS_ATTR(, max_write_same_len);
-CONFIGFS_ATTR_RO(, alua_support);
-CONFIGFS_ATTR_RO(, pgr_support);
+CONFIGFS_ATTR(, alua_support);
+CONFIGFS_ATTR(, pgr_support);
/*
* dev_attrib attributes for devices using the target core SBC/SPC
@@ -1203,12 +1255,24 @@ struct configfs_attribute *passthrough_attrib_attrs[] = {
&attr_hw_block_size,
&attr_hw_max_sectors,
&attr_hw_queue_depth,
+ &attr_emulate_pr,
&attr_alua_support,
&attr_pgr_support,
NULL,
};
EXPORT_SYMBOL(passthrough_attrib_attrs);
+/*
+ * pr related dev_attrib attributes for devices passing through CDBs,
+ * but allowing in core pr emulation.
+ */
+struct configfs_attribute *passthrough_pr_attrib_attrs[] = {
+ &attr_enforce_pr_isids,
+ &attr_force_pr_aptpl,
+ NULL,
+};
+EXPORT_SYMBOL(passthrough_pr_attrib_attrs);
+
TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
@@ -1642,7 +1706,7 @@ static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
if (!dev->dev_attrib.emulate_pr)
return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "Passthrough\n");
spin_lock(&dev->dev_reservation_lock);
@@ -1784,7 +1848,7 @@ static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
if (!dev->dev_attrib.emulate_pr)
return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "SPC_PASSTHROUGH\n");
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return sprintf(page, "SPC2_RESERVATIONS\n");
@@ -1798,7 +1862,7 @@ static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr ||
- (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
@@ -1811,7 +1875,7 @@ static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
struct se_device *dev = pr_to_dev(item);
if (!dev->dev_attrib.emulate_pr ||
- (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1858,7 +1922,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
u8 type = 0;
if (!dev->dev_attrib.emulate_pr ||
- (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return count;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return count;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 4cee1138284b..46b0e1ceb77f 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -100,9 +100,10 @@ out_unlock:
*/
if (unpacked_lun != 0) {
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08llx\n",
+ " Access for 0x%08llx from %s\n",
se_cmd->se_tfo->fabric_name,
- unpacked_lun);
+ unpacked_lun,
+ nacl->initiatorname);
return TCM_NON_EXISTENT_LUN;
}
@@ -174,9 +175,10 @@ out_unlock:
if (!se_lun) {
pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08llx\n",
+ " Access for 0x%08llx for %s\n",
se_cmd->se_tfo->fabric_name,
- unpacked_lun);
+ unpacked_lun,
+ nacl->initiatorname);
return -ENODEV;
}
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
@@ -732,6 +734,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->se_hba = hba;
dev->transport = hba->backend->ops;
+ dev->transport_flags = dev->transport->transport_flags_default;
dev->prot_length = sizeof(struct t10_pi_tuple);
dev->hba_index = hba->hba_index;
@@ -1100,7 +1103,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
* emulate the response, since tcmu does not have the information
* required to process these commands.
*/
- if (!(dev->transport->transport_flags &
+ if (!(dev->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
if (cdb[0] == PERSISTENT_RESERVE_IN) {
cmd->execute_cmd = target_scsi3_emulate_pr_in;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5e931690e697..91e41cc55704 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -4086,7 +4086,7 @@ target_check_reservation(struct se_cmd *cmd)
return 0;
if (!dev->dev_attrib.emulate_pr)
return 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return 0;
spin_lock(&dev->dev_reservation_lock);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index c9d92b3e777d..4e37fa9b409d 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1070,9 +1070,9 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
static const struct target_backend_ops pscsi_ops = {
.name = "pscsi",
.owner = THIS_MODULE,
- .transport_flags = TRANSPORT_FLAG_PASSTHROUGH |
- TRANSPORT_FLAG_PASSTHROUGH_ALUA |
- TRANSPORT_FLAG_PASSTHROUGH_PGR,
+ .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH |
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA |
+ TRANSPORT_FLAG_PASSTHROUGH_PGR,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index d24e0a3ba3ff..62aa5fa63ac0 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -582,8 +582,7 @@ int core_tpg_add_lun(
if (ret)
goto out_kill_ref;
- if (!(dev->transport->transport_flags &
- TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
+ if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 264a822c0bfa..da37af9c3a5e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1397,7 +1397,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0;
if (cmd->sam_task_attr == TCM_ACA_TAG) {
@@ -2012,7 +2012,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return false;
cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
@@ -2126,7 +2126,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return;
if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index f769bb1e3735..28fb9441de7a 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -882,41 +882,24 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
return command_size;
}
-static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
- struct timer_list *timer)
+static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
+ struct timer_list *timer)
{
- struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
- int cmd_id;
-
- if (tcmu_cmd->cmd_id)
- goto setup_timer;
-
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
- pr_err("tcmu: Could not allocate cmd id.\n");
- return cmd_id;
- }
- tcmu_cmd->cmd_id = cmd_id;
-
- pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
- udev->name, tmo / MSEC_PER_SEC);
-
-setup_timer:
if (!tmo)
- return 0;
+ return;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
if (!timer_pending(timer))
mod_timer(timer, tcmu_cmd->deadline);
- return 0;
+ pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
+ tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
}
static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
unsigned int tmo;
- int ret;
/*
* For backwards compat if qfull_time_out is not set use
@@ -931,13 +914,11 @@ static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
else
tmo = TCMU_TIME_OUT;
- ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
- if (ret)
- return ret;
+ tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
- pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
- tcmu_cmd->cmd_id, udev->name);
+ pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
+ tcmu_cmd, udev->name);
return 0;
}
@@ -959,7 +940,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
struct tcmu_mailbox *mb;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
- int iov_cnt, ret;
+ int iov_cnt, cmd_id;
uint32_t cmd_head;
uint64_t cdb_off;
bool copy_to_data_area;
@@ -1060,14 +1041,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
}
entry->req.iov_bidi_cnt = iov_cnt;
- ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
- &udev->cmd_timer);
- if (ret) {
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
+ tcmu_cmd->cmd_id = cmd_id;
+
+ pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
+ tcmu_cmd, udev->name);
+
+ tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
+
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
/*
@@ -1279,50 +1267,39 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
return handled;
}
-static int tcmu_check_expired_cmd(int id, void *p, void *data)
+static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
{
- struct tcmu_cmd *cmd = p;
- struct tcmu_dev *udev = cmd->tcmu_dev;
- u8 scsi_status;
struct se_cmd *se_cmd;
- bool is_running;
-
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
- return 0;
if (!time_after(jiffies, cmd->deadline))
- return 0;
+ return;
- is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
+ set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ list_del_init(&cmd->queue_entry);
se_cmd = cmd->se_cmd;
+ cmd->se_cmd = NULL;
- if (is_running) {
- /*
- * If cmd_time_out is disabled but qfull is set deadline
- * will only reflect the qfull timeout. Ignore it.
- */
- if (!udev->cmd_time_out)
- return 0;
+ pr_debug("Timing out inflight cmd %u on dev %s.\n",
+ cmd->cmd_id, cmd->tcmu_dev->name);
- set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
- /*
- * target_complete_cmd will translate this to LUN COMM FAILURE
- */
- scsi_status = SAM_STAT_CHECK_CONDITION;
- list_del_init(&cmd->queue_entry);
- cmd->se_cmd = NULL;
- } else {
- list_del_init(&cmd->queue_entry);
- idr_remove(&udev->commands, id);
- tcmu_free_cmd(cmd);
- scsi_status = SAM_STAT_TASK_SET_FULL;
- }
+ target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
+}
- pr_debug("Timing out cmd %u on dev %s that is %s.\n",
- id, udev->name, is_running ? "inflight" : "queued");
+static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
+{
+ struct se_cmd *se_cmd;
- target_complete_cmd(se_cmd, scsi_status);
- return 0;
+ if (!time_after(jiffies, cmd->deadline))
+ return;
+
+ pr_debug("Timing out queued cmd %p on dev %s.\n",
+ cmd, cmd->tcmu_dev->name);
+
+ list_del_init(&cmd->queue_entry);
+ se_cmd = cmd->se_cmd;
+ tcmu_free_cmd(cmd);
+
+ target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
}
static void tcmu_device_timedout(struct tcmu_dev *udev)
@@ -1407,16 +1384,15 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
return &udev->se_dev;
}
-static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
+static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
LIST_HEAD(cmds);
- bool drained = true;
sense_reason_t scsi_ret;
int ret;
if (list_empty(&udev->qfull_queue))
- return true;
+ return;
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
@@ -1425,11 +1401,10 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
list_del_init(&tcmu_cmd->queue_entry);
- pr_debug("removing cmd %u on dev %s from queue\n",
- tcmu_cmd->cmd_id, udev->name);
+ pr_debug("removing cmd %p on dev %s from queue\n",
+ tcmu_cmd, udev->name);
if (fail) {
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
/*
* We were not able to even start the command, so
* fail with busy to allow a retry in case runner
@@ -1444,10 +1419,8 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0) {
- pr_debug("cmd %u on dev %s failed with %u\n",
- tcmu_cmd->cmd_id, udev->name, scsi_ret);
-
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
+ pr_debug("cmd %p on dev %s failed with %u\n",
+ tcmu_cmd, udev->name, scsi_ret);
/*
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
@@ -1462,13 +1435,11 @@ static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
* the queue
*/
list_splice_tail(&cmds, &udev->qfull_queue);
- drained = false;
break;
}
}
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
- return drained;
}
static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
@@ -1652,6 +1623,8 @@ static void tcmu_dev_kref_release(struct kref *kref)
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
all_expired = false;
}
+ if (!list_empty(&udev->qfull_queue))
+ all_expired = false;
idr_destroy(&udev->commands);
WARN_ON(!all_expired);
@@ -2037,9 +2010,6 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mutex_lock(&udev->cmdr_lock);
idr_for_each_entry(&udev->commands, cmd, i) {
- if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
- continue;
-
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
cmd->cmd_id, udev->name,
test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
@@ -2077,6 +2047,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
del_timer(&udev->cmd_timer);
+ run_qfull_queue(udev, false);
+
mutex_unlock(&udev->cmdr_lock);
}
@@ -2617,7 +2589,9 @@ static struct configfs_attribute *tcmu_action_attrs[] = {
static struct target_backend_ops tcmu_ops = {
.name = "user",
.owner = THIS_MODULE,
- .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
+ .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
+ .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA,
.attach_hba = tcmu_attach_hba,
.detach_hba = tcmu_detach_hba,
.alloc_device = tcmu_alloc_device,
@@ -2698,6 +2672,7 @@ static void find_free_blocks(void)
static void check_timedout_devices(void)
{
struct tcmu_dev *udev, *tmp_dev;
+ struct tcmu_cmd *cmd, *tmp_cmd;
LIST_HEAD(devs);
spin_lock_bh(&timed_out_udevs_lock);
@@ -2708,9 +2683,24 @@ static void check_timedout_devices(void)
spin_unlock_bh(&timed_out_udevs_lock);
mutex_lock(&udev->cmdr_lock);
- idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
- tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+ /*
+ * If cmd_time_out is disabled but qfull is set deadline
+ * will only reflect the qfull timeout. Ignore it.
+ */
+ if (udev->cmd_time_out) {
+ list_for_each_entry_safe(cmd, tmp_cmd,
+ &udev->inflight_queue,
+ queue_entry) {
+ tcmu_check_expired_ring_cmd(cmd);
+ }
+ tcmu_set_next_deadline(&udev->inflight_queue,
+ &udev->cmd_timer);
+ }
+ list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
+ queue_entry) {
+ tcmu_check_expired_queue_cmd(cmd);
+ }
tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
mutex_unlock(&udev->cmdr_lock);
@@ -2753,12 +2743,12 @@ static int __init tcmu_module_init(void)
goto out_unreg_device;
}
- for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
len += sizeof(struct configfs_attribute *);
- }
- for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
+ for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
+ len += sizeof(struct configfs_attribute *);
+ for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
len += sizeof(struct configfs_attribute *);
- }
len += sizeof(struct configfs_attribute *);
tcmu_attrs = kzalloc(len, GFP_KERNEL);
@@ -2767,13 +2757,12 @@ static int __init tcmu_module_init(void)
goto out_unreg_genl;
}
- for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
tcmu_attrs[i] = passthrough_attrib_attrs[i];
- }
- for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
- tcmu_attrs[i] = tcmu_attrib_attrs[k];
- i++;
- }
+ for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
+ tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
+ for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
+ tcmu_attrs[i++] = tcmu_attrib_attrs[k];
tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
ret = transport_backend_register(&tcmu_ops);
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
index 8da63f38e6bd..e99d840c2511 100644
--- a/drivers/tee/Kconfig
+++ b/drivers/tee/Kconfig
@@ -3,6 +3,8 @@
config TEE
tristate "Trusted Execution Environment support"
depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
+ select CRYPTO
+ select CRYPTO_SHA1
select DMA_SHARED_BUFFER
select GENERIC_ALLOCATOR
help
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index cf2367ba08d6..20b6fd7383c5 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -233,9 +233,13 @@ int optee_open_session(struct tee_context *ctx,
msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
OPTEE_MSG_ATTR_META;
memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
- memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid));
msg_arg->params[1].u.value.c = arg->clnt_login;
+ rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
+ arg->clnt_login, arg->clnt_uuid);
+ if (rc)
+ goto out;
+
rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
if (rc)
goto out;
@@ -561,10 +565,10 @@ static int check_mem_type(unsigned long start, size_t num_pages)
if (virt_addr_valid(start))
return 0;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
rc = __check_mem_type(find_vma(mm, start),
start + num_pages * PAGE_SIZE);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return rc;
}
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 6aec502c495c..64637e09a095 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -6,18 +6,33 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/cdev.h>
+#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/uaccess.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
#include "tee_private.h"
#define TEE_NUM_DEVICES 32
#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
+#define TEE_UUID_NS_NAME_SIZE 128
+
+/*
+ * TEE Client UUID name space identifier (UUIDv4)
+ *
+ * Value here is random UUID that is allocated as name space identifier for
+ * forming Client UUID's for TEE environment using UUIDv5 scheme.
+ */
+static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
+ 0xa1, 0xb8, 0xec, 0x4b,
+ 0xc0, 0x8e, 0x01, 0xb6);
+
/*
* Unprivileged devices in the lower half range and privileged devices in
* the upper half range.
@@ -110,6 +125,143 @@ static int tee_release(struct inode *inode, struct file *filp)
return 0;
}
+/**
+ * uuid_v5() - Calculate UUIDv5
+ * @uuid: Resulting UUID
+ * @ns: Name space ID for UUIDv5 function
+ * @name: Name for UUIDv5 function
+ * @size: Size of name
+ *
+ * UUIDv5 is specific in RFC 4122.
+ *
+ * This implements section (for SHA-1):
+ * 4.3. Algorithm for Creating a Name-Based UUID
+ */
+static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
+ size_t size)
+{
+ unsigned char hash[SHA1_DIGEST_SIZE];
+ struct crypto_shash *shash = NULL;
+ struct shash_desc *desc = NULL;
+ int rc;
+
+ shash = crypto_alloc_shash("sha1", 0, 0);
+ if (IS_ERR(shash)) {
+ rc = PTR_ERR(shash);
+ pr_err("shash(sha1) allocation failed\n");
+ return rc;
+ }
+
+ desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
+ GFP_KERNEL);
+ if (!desc) {
+ rc = -ENOMEM;
+ goto out_free_shash;
+ }
+
+ desc->tfm = shash;
+
+ rc = crypto_shash_init(desc);
+ if (rc < 0)
+ goto out_free_desc;
+
+ rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns));
+ if (rc < 0)
+ goto out_free_desc;
+
+ rc = crypto_shash_update(desc, (const u8 *)name, size);
+ if (rc < 0)
+ goto out_free_desc;
+
+ rc = crypto_shash_final(desc, hash);
+ if (rc < 0)
+ goto out_free_desc;
+
+ memcpy(uuid->b, hash, UUID_SIZE);
+
+ /* Tag for version 5 */
+ uuid->b[6] = (hash[6] & 0x0F) | 0x50;
+ uuid->b[8] = (hash[8] & 0x3F) | 0x80;
+
+out_free_desc:
+ kfree(desc);
+
+out_free_shash:
+ crypto_free_shash(shash);
+ return rc;
+}
+
+int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
+ const u8 connection_data[TEE_IOCTL_UUID_LEN])
+{
+ gid_t ns_grp = (gid_t)-1;
+ kgid_t grp = INVALID_GID;
+ char *name = NULL;
+ int name_len;
+ int rc;
+
+ if (connection_method == TEE_IOCTL_LOGIN_PUBLIC) {
+ /* Nil UUID to be passed to TEE environment */
+ uuid_copy(uuid, &uuid_null);
+ return 0;
+ }
+
+ /*
+ * In Linux environment client UUID is based on UUIDv5.
+ *
+ * Determine client UUID with following semantics for 'name':
+ *
+ * For TEEC_LOGIN_USER:
+ * uid=<uid>
+ *
+ * For TEEC_LOGIN_GROUP:
+ * gid=<gid>
+ *
+ */
+
+ name = kzalloc(TEE_UUID_NS_NAME_SIZE, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ switch (connection_method) {
+ case TEE_IOCTL_LOGIN_USER:
+ name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "uid=%x",
+ current_euid().val);
+ if (name_len >= TEE_UUID_NS_NAME_SIZE) {
+ rc = -E2BIG;
+ goto out_free_name;
+ }
+ break;
+
+ case TEE_IOCTL_LOGIN_GROUP:
+ memcpy(&ns_grp, connection_data, sizeof(gid_t));
+ grp = make_kgid(current_user_ns(), ns_grp);
+ if (!gid_valid(grp) || !in_egroup_p(grp)) {
+ rc = -EPERM;
+ goto out_free_name;
+ }
+
+ name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "gid=%x",
+ grp.val);
+ if (name_len >= TEE_UUID_NS_NAME_SIZE) {
+ rc = -E2BIG;
+ goto out_free_name;
+ }
+ break;
+
+ default:
+ rc = -EINVAL;
+ goto out_free_name;
+ }
+
+ rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
+out_free_name:
+ kfree(name);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tee_session_calc_client_uuid);
+
static int tee_ioctl_version(struct tee_context *ctx,
struct tee_ioctl_version_data __user *uvers)
{
@@ -333,6 +485,13 @@ static int tee_ioctl_open_session(struct tee_context *ctx,
goto out;
}
+ if (arg.clnt_login >= TEE_IOCTL_LOGIN_REE_KERNEL_MIN &&
+ arg.clnt_login <= TEE_IOCTL_LOGIN_REE_KERNEL_MAX) {
+ pr_debug("login method not allowed for user-space client\n");
+ rc = -EPERM;
+ goto out;
+ }
+
rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
if (rc)
goto out;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index bd679b72bd05..827ac3d0fea9 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
+#include <linux/uio.h>
#include "tee_private.h"
static void tee_shm_release(struct tee_shm *shm)
@@ -161,8 +162,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
}
}
- if (ctx)
- teedev_ctx_get(ctx);
+ teedev_ctx_get(ctx);
return shm;
err_rem:
@@ -185,14 +185,15 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
size_t length, u32 flags)
{
struct tee_device *teedev = ctx->teedev;
- const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
+ const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
+ const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED;
struct tee_shm *shm;
void *ret;
int rc;
int num_pages;
unsigned long start;
- if (flags != req_flags)
+ if (flags != req_user_flags && flags != req_kernel_flags)
return ERR_PTR(-ENOTSUPP);
if (!tee_device_get(teedev))
@@ -226,7 +227,27 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
goto err;
}
- rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages);
+ if (flags & TEE_SHM_USER_MAPPED) {
+ rc = get_user_pages_fast(start, num_pages, FOLL_WRITE,
+ shm->pages);
+ } else {
+ struct kvec *kiov;
+ int i;
+
+ kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL);
+ if (!kiov) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ for (i = 0; i < num_pages; i++) {
+ kiov[i].iov_base = (void *)(start + i * PAGE_SIZE);
+ kiov[i].iov_len = PAGE_SIZE;
+ }
+
+ rc = get_kernel_pages(kiov, num_pages, 0, shm->pages);
+ kfree(kiov);
+ }
if (rc > 0)
shm->num_pages = rc;
if (rc != num_pages) {
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index a8723b1eb8b0..8938ea81a525 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -3,9 +3,9 @@
* Copyright 2018-2020 NXP.
*/
+#include <dt-bindings/firmware/imx/rsrc.h>
#include <linux/err.h>
#include <linux/firmware/imx/sci.h>
-#include <linux/firmware/imx/types.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index 1eb757e8df3b..f02010738bb6 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -2,7 +2,6 @@
menuconfig USB4
tristate "Unified support for USB4 and Thunderbolt"
depends on PCI
- depends on X86 || COMPILE_TEST
select APPLE_PROPERTIES if EFI_STUB && X86
select CRC32
select CRYPTO
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index fbbe32ca1e69..ffcc8c3459e5 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1633,6 +1633,15 @@ static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
icm_veto_end(tb);
}
+static bool icm_tgl_is_supported(struct tb *tb)
+{
+ /*
+ * If the firmware is not running use software CM. This platform
+ * should fully support both.
+ */
+ return icm_firmware_running(tb->nhi);
+}
+
static void icm_handle_notification(struct work_struct *work)
{
struct icm_notification *n = container_of(work, typeof(*n), work);
@@ -2269,6 +2278,19 @@ struct tb *icm_probe(struct tb_nhi *nhi)
icm->rtd3_veto = icm_icl_rtd3_veto;
tb->cm_ops = &icm_icl_ops;
break;
+
+ case PCI_DEVICE_ID_INTEL_TGL_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_NHI1:
+ icm->is_supported = icm_tgl_is_supported;
+ icm->driver_ready = icm_icl_driver_ready;
+ icm->set_uuid = icm_icl_set_uuid;
+ icm->device_connected = icm_icl_device_connected;
+ icm->device_disconnected = icm_tr_device_disconnected;
+ icm->xdomain_connected = icm_tr_xdomain_connected;
+ icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+ icm->rtd3_veto = icm_icl_rtd3_veto;
+ tb->cm_ops = &icm_icl_ops;
+ break;
}
if (!icm->is_supported || !icm->is_supported(tb)) {
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 1be491ecbb45..d299dc168147 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1270,6 +1270,10 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
@@ -1285,6 +1289,7 @@ static struct pci_driver nhi_driver = {
.id_table = nhi_ids,
.probe = nhi_probe,
.remove = nhi_remove,
+ .shutdown = nhi_remove,
.driver.pm = &nhi_pm_ops,
};
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 5d276ee9b38e..80162e4b013f 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -73,6 +73,8 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
+#define PCI_DEVICE_ID_INTEL_TGL_NHI0 0x9a1b
+#define PCI_DEVICE_ID_INTEL_TGL_NHI1 0x9a1d
#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index a2ce99051c51..d7d60cd9226f 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -263,7 +263,7 @@ static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
* itself. To be on the safe side keep the root port in D0 during
* the whole upgrade process.
*/
- root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ root_port = pcie_find_root_port(sw->tb->nhi->pdev);
if (root_port)
pm_runtime_get_noresume(&root_port->dev);
}
@@ -272,7 +272,7 @@ static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
{
struct pci_dev *root_port;
- root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ root_port = pcie_find_root_port(sw->tb->nhi->pdev);
if (root_port)
pm_runtime_put(&root_port->dev);
}
@@ -348,12 +348,6 @@ out:
return ret;
}
-static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
- size_t bytes)
-{
- return -EPERM;
-}
-
static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
@@ -399,7 +393,6 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
config.read_only = true;
} else {
config.name = "nvm_non_active";
- config.reg_read = tb_switch_nvm_no_read;
config.reg_write = tb_switch_nvm_write;
config.root_only = true;
}
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 436cc51c92c3..cdcc64ea2554 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -371,15 +371,14 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
* tty fields and return the kref reference.
*/
if (rc) {
- tty_port_tty_set(&hp->port, NULL);
- tty->driver_data = NULL;
- tty_port_put(&hp->port);
printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
- } else
+ } else {
/* We are ready... raise DTR/RTS */
if (C_BAUD(tty))
if (hp->ops->dtr_rts)
hp->ops->dtr_rts(hp, 1);
+ tty_port_set_initialized(&hp->port, true);
+ }
/* Force wakeup of the polling thread */
hvc_kick();
@@ -389,22 +388,12 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
static void hvc_close(struct tty_struct *tty, struct file * filp)
{
- struct hvc_struct *hp;
+ struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
if (tty_hung_up_p(filp))
return;
- /*
- * No driver_data means that this close was issued after a failed
- * hvc_open by the tty layer's release_dev() function and we can just
- * exit cleanly because the kref reference wasn't made.
- */
- if (!tty->driver_data)
- return;
-
- hp = tty->driver_data;
-
spin_lock_irqsave(&hp->port.lock, flags);
if (--hp->port.count == 0) {
@@ -412,6 +401,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
/* We are done with the tty pointer now. */
tty_port_tty_set(&hp->port, NULL);
+ if (!tty_port_initialized(&hp->port))
+ return;
+
if (C_HUPCL(tty))
if (hp->ops->dtr_rts)
hp->ops->dtr_rts(hp, 0);
@@ -428,6 +420,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
* waking periodically to check chars_in_buffer().
*/
tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
+ tty_port_set_initialized(&hp->port, false);
} else {
if (hp->port.count < 0)
printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index ee0604cd9c6b..55105ac38f89 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -196,8 +196,6 @@ module_param(hvcs_parm_num_devs, int, 0);
static const char hvcs_driver_name[] = "hvcs";
static const char hvcs_device_node[] = "hvcs";
-static const char hvcs_driver_string[]
- = "IBM hvcs (Hypervisor Virtual Console Server) Driver";
/* Status of partner info rescan triggered via sysfs. */
static int hvcs_rescan_status;
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 9d00ff5ef961..3703987c4666 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -638,16 +638,15 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd)
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
*/
-static int mxser_change_speed(struct tty_struct *tty)
+static void mxser_change_speed(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
unsigned cflag, cval, fcr;
- int ret = 0;
unsigned char status;
cflag = tty->termios.c_cflag;
if (!info->ioaddr)
- return ret;
+ return;
if (mxser_set_baud_method[tty->index] == 0)
mxser_set_baud(tty, tty_get_baud_rate(tty));
@@ -803,8 +802,6 @@ static int mxser_change_speed(struct tty_struct *tty)
outb(fcr, info->ioaddr + UART_FCR); /* set fcr */
outb(cval, info->ioaddr + UART_LCR);
-
- return ret;
}
static void mxser_check_modem_status(struct tty_struct *tty,
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index d77ed82a4840..0a29a94ec438 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -504,18 +504,7 @@ static void gsm_print_packet(const char *hdr, int addr, int cr,
else
pr_cont("(F)");
- if (dlen) {
- int ct = 0;
- while (dlen--) {
- if (ct % 8 == 0) {
- pr_cont("\n");
- pr_debug(" ");
- }
- pr_cont("%02X ", *data++);
- ct++;
- }
- }
- pr_cont("\n");
+ print_hex_dump_bytes("", DUMP_PREFIX_NONE, data, dlen);
}
@@ -673,11 +662,10 @@ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
* FIXME: lock against link layer control transmissions
*/
-static void gsm_data_kick(struct gsm_mux *gsm)
+static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci)
{
struct gsm_msg *msg, *nmsg;
int len;
- int skip_sof = 0;
list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) {
if (gsm->constipated && msg->addr)
@@ -699,18 +687,23 @@ static void gsm_data_kick(struct gsm_mux *gsm)
print_hex_dump_bytes("gsm_data_kick: ",
DUMP_PREFIX_OFFSET,
gsm->txframe, len);
-
- if (gsm->output(gsm, gsm->txframe + skip_sof,
- len - skip_sof) < 0)
+ if (gsm->output(gsm, gsm->txframe, len) < 0)
break;
/* FIXME: Can eliminate one SOF in many more cases */
gsm->tx_bytes -= msg->len;
- /* For a burst of frames skip the extra SOF within the
- burst */
- skip_sof = 1;
list_del(&msg->list);
kfree(msg);
+
+ if (dlci) {
+ tty_port_tty_wakeup(&dlci->port);
+ } else {
+ int i = 0;
+
+ for (i = 0; i < NUM_DLCI; i++)
+ if (gsm->dlci[i])
+ tty_port_tty_wakeup(&gsm->dlci[i]->port);
+ }
}
}
@@ -762,7 +755,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
/* Add to the actual output queue */
list_add_tail(&msg->list, &gsm->tx_list);
gsm->tx_bytes += msg->len;
- gsm_data_kick(gsm);
+ gsm_data_kick(gsm, dlci);
}
/**
@@ -1223,7 +1216,7 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
gsm_control_reply(gsm, CMD_FCON, NULL, 0);
/* Kick the link in case it is idling */
spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm);
+ gsm_data_kick(gsm, NULL);
spin_unlock_irqrestore(&gsm->tx_lock, flags);
break;
case CMD_FCOFF:
@@ -2545,7 +2538,7 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
/* Queue poll */
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
spin_lock_irqsave(&gsm->tx_lock, flags);
- gsm_data_kick(gsm);
+ gsm_data_kick(gsm, NULL);
if (gsm->tx_bytes < TX_THRESH_LO) {
gsm_dlci_data_sweep(gsm);
}
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index e2138e7d5dc6..2540b2e4c8e8 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1885,7 +1885,7 @@ static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
*/
static __init int register_PCI(int i, struct pci_dev *dev)
{
- int num_aiops, aiop, max_num_aiops, num_chan, chan;
+ int num_aiops, aiop, max_num_aiops, chan;
unsigned int aiopio[MAX_AIOPS_PER_BOARD];
CONTROLLER_t *ctlp;
@@ -2157,8 +2157,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
/* Reset the AIOPIC, init the serial ports */
for (aiop = 0; aiop < num_aiops; aiop++) {
sResetAiopByNum(ctlp, aiop);
- num_chan = ports_per_aiop;
- for (chan = 0; chan < num_chan; chan++)
+ for (chan = 0; chan < ports_per_aiop; chan++)
init_r_port(i, aiop, chan, dev);
}
@@ -2166,11 +2165,10 @@ static __init int register_PCI(int i, struct pci_dev *dev)
if ((rcktpt_type[i] == ROCKET_TYPE_MODEM) ||
(rcktpt_type[i] == ROCKET_TYPE_MODEMII) ||
(rcktpt_type[i] == ROCKET_TYPE_MODEMIII)) {
- num_chan = ports_per_aiop;
- for (chan = 0; chan < num_chan; chan++)
+ for (chan = 0; chan < ports_per_aiop; chan++)
sPCIModemReset(ctlp, chan, 1);
msleep(500);
- for (chan = 0; chan < num_chan; chan++)
+ for (chan = 0; chan < ports_per_aiop; chan++)
sPCIModemReset(ctlp, chan, 0);
msleep(500);
rmSpeakerReset(ctlp, rocketModel[i].model);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 45d9117cab68..fc118f649887 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1026,7 +1026,9 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
if (up->port.dev) {
uart->port.dev = up->port.dev;
- uart_get_rs485_mode(uart->port.dev, &uart->port.rs485);
+ ret = uart_get_rs485_mode(&uart->port);
+ if (ret)
+ goto err;
}
if (up->port.flags & UPF_FIXED_TYPE)
@@ -1040,7 +1042,7 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
gpios = mctrl_gpio_init(&uart->port, 0);
if (IS_ERR(gpios)) {
ret = PTR_ERR(gpios);
- goto out_unlock;
+ goto err;
} else {
uart->gpios = gpios;
}
@@ -1089,8 +1091,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
serial8250_apply_quirks(uart);
ret = uart_add_one_port(&serial8250_reg,
&uart->port);
- if (ret == 0)
- ret = uart->port.line;
+ if (ret)
+ goto err;
+
+ ret = uart->port.line;
} else {
dev_info(uart->port.dev,
"skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
@@ -1112,10 +1116,14 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
}
}
-out_unlock:
mutex_unlock(&serial_mutex);
return ret;
+
+err:
+ uart->port.dev = NULL;
+ mutex_unlock(&serial_mutex);
+ return ret;
}
EXPORT_SYMBOL(serial8250_register_8250_port);
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 59449b6500cd..ddb6aeb76dc5 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -25,13 +25,13 @@
#include "8250.h"
-#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052
-#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d
-#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c
-#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8
-#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2
-#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db
-#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea
+#define PCI_DEVICE_ID_ACCESSIO_COM_2S 0x1052
+#define PCI_DEVICE_ID_ACCESSIO_COM_4S 0x105d
+#define PCI_DEVICE_ID_ACCESSIO_COM_8S 0x106c
+#define PCI_DEVICE_ID_ACCESSIO_COM232_8 0x10a8
+#define PCI_DEVICE_ID_ACCESSIO_COM_2SM 0x10d2
+#define PCI_DEVICE_ID_ACCESSIO_COM_4SM 0x10db
+#define PCI_DEVICE_ID_ACCESSIO_COM_8SM 0x10ea
#define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
#define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
@@ -755,9 +755,7 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
(kernel_ulong_t)&bd \
}
-#define EXAR_DEVICE(vend, devid, bd) { \
- PCI_VDEVICE(vend, PCI_DEVICE_ID_##devid), (kernel_ulong_t)&bd \
- }
+#define EXAR_DEVICE(vend, devid, bd) { PCI_DEVICE_DATA(vend, devid, &bd) }
#define IBM_DEVICE(devid, sdevid, bd) { \
PCI_DEVICE_SUB( \
@@ -769,14 +767,13 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
}
static const struct pci_device_id exar_pci_tbl[] = {
- EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x),
- EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x),
-
+ EXAR_DEVICE(ACCESSIO, COM_2S, acces_com_2x),
+ EXAR_DEVICE(ACCESSIO, COM_4S, acces_com_4x),
+ EXAR_DEVICE(ACCESSIO, COM_8S, acces_com_8x),
+ EXAR_DEVICE(ACCESSIO, COM232_8, acces_com_8x),
+ EXAR_DEVICE(ACCESSIO, COM_2SM, acces_com_2x),
+ EXAR_DEVICE(ACCESSIO, COM_4SM, acces_com_4x),
+ EXAR_DEVICE(ACCESSIO, COM_8SM, acces_com_8x),
CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect),
CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect),
@@ -794,24 +791,24 @@ static const struct pci_device_id exar_pci_tbl[] = {
IBM_DEVICE(XR17C152, SATURN_SERIAL_ONE_PORT, pbn_exar_ibm_saturn),
/* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */
- EXAR_DEVICE(EXAR, EXAR_XR17C152, pbn_exar_XR17C15x),
- EXAR_DEVICE(EXAR, EXAR_XR17C154, pbn_exar_XR17C15x),
- EXAR_DEVICE(EXAR, EXAR_XR17C158, pbn_exar_XR17C15x),
+ EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x),
+ EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x),
+ EXAR_DEVICE(EXAR, XR17C158, pbn_exar_XR17C15x),
/* Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs */
- EXAR_DEVICE(EXAR, EXAR_XR17V352, pbn_exar_XR17V35x),
- EXAR_DEVICE(EXAR, EXAR_XR17V354, pbn_exar_XR17V35x),
- EXAR_DEVICE(EXAR, EXAR_XR17V358, pbn_exar_XR17V35x),
- EXAR_DEVICE(EXAR, EXAR_XR17V4358, pbn_exar_XR17V4358),
- EXAR_DEVICE(EXAR, EXAR_XR17V8358, pbn_exar_XR17V8358),
- EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_exar_XR17V35x),
- EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_exar_XR17V35x),
- EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_exar_XR17V35x),
-
- EXAR_DEVICE(COMMTECH, COMMTECH_4222PCI335, pbn_fastcom335_2),
- EXAR_DEVICE(COMMTECH, COMMTECH_4224PCI335, pbn_fastcom335_4),
- EXAR_DEVICE(COMMTECH, COMMTECH_2324PCI335, pbn_fastcom335_4),
- EXAR_DEVICE(COMMTECH, COMMTECH_2328PCI335, pbn_fastcom335_8),
+ EXAR_DEVICE(EXAR, XR17V352, pbn_exar_XR17V35x),
+ EXAR_DEVICE(EXAR, XR17V354, pbn_exar_XR17V35x),
+ EXAR_DEVICE(EXAR, XR17V358, pbn_exar_XR17V35x),
+ EXAR_DEVICE(EXAR, XR17V4358, pbn_exar_XR17V4358),
+ EXAR_DEVICE(EXAR, XR17V8358, pbn_exar_XR17V8358),
+ EXAR_DEVICE(COMMTECH, 4222PCIE, pbn_exar_XR17V35x),
+ EXAR_DEVICE(COMMTECH, 4224PCIE, pbn_exar_XR17V35x),
+ EXAR_DEVICE(COMMTECH, 4228PCIE, pbn_exar_XR17V35x),
+
+ EXAR_DEVICE(COMMTECH, 4222PCI335, pbn_fastcom335_2),
+ EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4),
+ EXAR_DEVICE(COMMTECH, 2324PCI335, pbn_fastcom335_4),
+ EXAR_DEVICE(COMMTECH, 2328PCI335, pbn_fastcom335_8),
{ 0, }
};
MODULE_DEVICE_TABLE(pci, exar_pci_tbl);
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 31c91c2f8c6e..d1d253c4b518 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -19,6 +19,7 @@
#define CHIP_ID2 0x21
#define CHIP_ID_F81865 0x0407
#define CHIP_ID_F81866 0x1010
+#define CHIP_ID_F81966 0x0215
#define CHIP_ID_F81216AD 0x1602
#define CHIP_ID_F81216H 0x0501
#define CHIP_ID_F81216 0x0802
@@ -62,9 +63,9 @@
#define F81216_LDN_HIGH 0x4
/*
- * F81866 registers
+ * F81866/966 registers
*
- * The IRQ setting mode of F81866 is not the same with F81216 series.
+ * The IRQ setting mode of F81866/966 is not the same with F81216 series.
* Level/Low: IRQ_MODE0:0, IRQ_MODE1:0
* Edge/High: IRQ_MODE0:1, IRQ_MODE1:0
*
@@ -155,6 +156,7 @@ static int fintek_8250_check_id(struct fintek_8250 *pdata)
switch (chip) {
case CHIP_ID_F81865:
case CHIP_ID_F81866:
+ case CHIP_ID_F81966:
case CHIP_ID_F81216AD:
case CHIP_ID_F81216H:
case CHIP_ID_F81216:
@@ -171,6 +173,7 @@ static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min,
int *max)
{
switch (pdata->pid) {
+ case CHIP_ID_F81966:
case CHIP_ID_F81865:
case CHIP_ID_F81866:
*min = F81866_LDN_LOW;
@@ -248,6 +251,7 @@ static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level)
sio_write_reg(pdata, LDN, pdata->index);
switch (pdata->pid) {
+ case CHIP_ID_F81966:
case CHIP_ID_F81866:
sio_write_mask_reg(pdata, F81866_FIFO_CTRL, F81866_IRQ_MODE1,
0);
@@ -274,6 +278,7 @@ static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
{
switch (pdata->pid) {
case CHIP_ID_F81216H: /* 128Bytes FIFO */
+ case CHIP_ID_F81966:
case CHIP_ID_F81866:
sio_write_mask_reg(pdata, FIFO_CTRL,
FIFO_MODE_MASK | RXFTHR_MODE_MASK,
@@ -291,6 +296,7 @@ static void fintek_8250_goto_highspeed(struct uart_8250_port *uart,
sio_write_reg(pdata, LDN, pdata->index);
switch (pdata->pid) {
+ case CHIP_ID_F81966:
case CHIP_ID_F81866: /* set uart clock for high speed serial mode */
sio_write_mask_reg(pdata, F81866_UART_CLK,
F81866_UART_CLK_MASK,
@@ -327,6 +333,7 @@ static void fintek_8250_set_termios(struct uart_port *port,
case CHIP_ID_F81216H:
reg = RS485;
break;
+ case CHIP_ID_F81966:
case CHIP_ID_F81866:
reg = F81866_UART_CLK;
break;
@@ -373,6 +380,7 @@ static void fintek_8250_set_termios_handler(struct uart_8250_port *uart)
switch (pdata->pid) {
case CHIP_ID_F81216H:
+ case CHIP_ID_F81966:
case CHIP_ID_F81866:
uart->port.set_termios = fintek_8250_set_termios;
break;
@@ -443,6 +451,7 @@ static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
switch (pdata->pid) {
case CHIP_ID_F81216AD:
case CHIP_ID_F81216H:
+ case CHIP_ID_F81966:
case CHIP_ID_F81866:
case CHIP_ID_F81865:
uart->port.rs485_config = fintek_8250_rs485_config;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 0804469ff052..1a74d511b02a 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1869,12 +1869,6 @@ pci_moxa_setup(struct serial_private *priv,
#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
-#define PCI_VENDOR_ID_PERICOM 0x12D8
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
-#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
-
#define PCI_VENDOR_ID_ACCESIO 0x494f
#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051
#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index f77bf820b7a3..1632f7d25acc 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -681,6 +681,9 @@ int serial8250_em485_config(struct uart_port *port, struct serial_rs485 *rs485)
memset(rs485->padding, 0, sizeof(rs485->padding));
port->rs485 = *rs485;
+ gpiod_set_value(port->rs485_term_gpio,
+ rs485->flags & SER_RS485_TERMINATE_BUS);
+
/*
* Both serial8250_em485_init() and serial8250_em485_destroy()
* are idempotent.
@@ -1432,7 +1435,7 @@ static void serial8250_stop_rx(struct uart_port *port)
/**
* serial8250_em485_stop_tx() - generic ->rs485_stop_tx() callback
- * @up: uart 8250 port
+ * @p: uart 8250 port
*
* Generic callback usable by 8250 uart drivers to stop rs485 transmission.
*/
@@ -2615,6 +2618,8 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
+ unsigned int tolerance = port->uartclk / 100;
+
/*
* Ask the core to calculate the divisor for us.
* Allow 1% tolerance at the upper limit so uart clks marginally
@@ -2623,7 +2628,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
*/
return uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / UART_DIV_MAX,
- port->uartclk);
+ (port->uartclk + tolerance) / 16);
}
void
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index af0688156dd0..8195a31519ea 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -63,6 +63,7 @@ config SERIAL_8250_PNP
config SERIAL_8250_16550A_VARIANTS
bool "Support for variants of the 16550A serial port"
depends on SERIAL_8250
+ default !X86
help
The 8250 driver can probe for many variants of the venerable 16550A
serial port. Doing so takes additional time at boot.
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index c8186a05a453..e3d10794dbba 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -440,7 +440,7 @@ static int simple_config_check_notpicky(struct pcmcia_device *p_dev,
static int simple_config(struct pcmcia_device *link)
{
struct serial_info *info = link->priv;
- int i = -ENODEV, try;
+ int ret, try;
/*
* First pass: look for a config entry that looks normal.
@@ -472,8 +472,8 @@ found_port:
if (info->quirk && info->quirk->config)
info->quirk->config(link);
- i = pcmcia_enable_device(link);
- if (i != 0)
+ ret = pcmcia_enable_device(link);
+ if (ret != 0)
return -1;
return setup_serial(link, info, link->resource[0]->start, link->irq);
}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index adf9e80e7dc9..0282ad9cdaa7 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1034,13 +1034,22 @@ config SERIAL_SIFIVE_CONSOLE
boot time.)
config SERIAL_LANTIQ
- bool "Lantiq serial driver"
- depends on LANTIQ
+ tristate "Lantiq serial driver"
+ depends on (LANTIQ || X86) || COMPILE_TEST
select SERIAL_CORE
+ help
+ Support for UART on Lantiq and Intel SoCs.
+ To compile this driver as a module, select M here. The
+ module will be called lantiq.
+
+config SERIAL_LANTIQ_CONSOLE
+ bool "Console on Lantiq UART"
+ depends on SERIAL_LANTIQ=y
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
- Support for console and UART on Lantiq SoCs.
+ Select this option if you would like to use a Lantiq UART as the
+ system console.
config SERIAL_QE
tristate "Freescale QUICC Engine serial port support"
@@ -1462,6 +1471,7 @@ config SERIAL_STM32
tristate "STMicroelectronics STM32 serial port support"
select SERIAL_CORE
depends on ARCH_STM32 || COMPILE_TEST
+ select SERIAL_MCTRL_GPIO if GPIOLIB
help
This driver is for the on-chip Serial Controller on
STMicroelectronics STM32 MCUs.
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index c010f639298d..8efd7c2a34fe 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2607,6 +2607,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE);
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = index;
+ spin_lock_init(&uap->port.lock);
amba_ports[index] = uap;
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 7e7f1398019f..0c80a79d7442 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -766,8 +766,6 @@ static int ar933x_uart_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- uart_get_rs485_mode(&pdev->dev, &port->rs485);
-
port->mapbase = mem_res->start;
port->line = id;
port->irq = irq_res->start;
@@ -786,6 +784,10 @@ static int ar933x_uart_probe(struct platform_device *pdev)
baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
+ ret = uart_get_rs485_mode(port);
+ if (ret)
+ goto err_disable_clk;
+
up->gpios = mctrl_gpio_init(port, 0);
if (IS_ERR(up->gpios) && PTR_ERR(up->gpios) != -ENOSYS)
return PTR_ERR(up->gpios);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 8d7080efad9b..e43471b33710 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2491,8 +2491,6 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
atmel_init_property(atmel_port, pdev);
atmel_set_ops(port);
- uart_get_rs485_mode(&mpdev->dev, &port->rs485);
-
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
port->ops = &atmel_pops;
@@ -2506,6 +2504,10 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
+ ret = uart_get_rs485_mode(port);
+ if (ret)
+ return ret;
+
/* for console, the clock could already be configured */
if (!atmel_port->clk) {
atmel_port->clk = clk_get(&mpdev->dev, "usart");
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 5d41075964f2..90298c403042 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1231,9 +1231,7 @@ static void lpuart_dma_rx_free(struct uart_port *port)
struct lpuart_port, port);
struct dma_chan *chan = sport->dma_rx_chan;
- if (chan)
- dmaengine_terminate_all(chan);
-
+ dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
kfree(sport->rx_ring.buf);
sport->rx_ring.tail = 0;
@@ -1514,17 +1512,17 @@ static void lpuart_request_dma(struct lpuart_port *sport)
{
sport->dma_tx_chan = dma_request_chan(sport->port.dev, "tx");
if (IS_ERR(sport->dma_tx_chan)) {
- dev_info_once(sport->port.dev,
- "DMA tx channel request failed, operating without tx DMA (%ld)\n",
- PTR_ERR(sport->dma_tx_chan));
+ dev_dbg_once(sport->port.dev,
+ "DMA tx channel request failed, operating without tx DMA (%ld)\n",
+ PTR_ERR(sport->dma_tx_chan));
sport->dma_tx_chan = NULL;
}
sport->dma_rx_chan = dma_request_chan(sport->port.dev, "rx");
if (IS_ERR(sport->dma_rx_chan)) {
- dev_info_once(sport->port.dev,
- "DMA rx channel request failed, operating without rx DMA (%ld)\n",
- PTR_ERR(sport->dma_rx_chan));
+ dev_dbg_once(sport->port.dev,
+ "DMA rx channel request failed, operating without rx DMA (%ld)\n",
+ PTR_ERR(sport->dma_rx_chan));
sport->dma_rx_chan = NULL;
}
}
@@ -2621,7 +2619,9 @@ static int lpuart_probe(struct platform_device *pdev)
if (ret)
goto failed_attach_port;
- uart_get_rs485_mode(&pdev->dev, &sport->port.rs485);
+ ret = uart_get_rs485_mode(&sport->port);
+ if (ret)
+ goto failed_get_rs485;
if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX)
dev_err(&pdev->dev, "driver doesn't support RX during TX\n");
@@ -2634,6 +2634,7 @@ static int lpuart_probe(struct platform_device *pdev)
return 0;
+failed_get_rs485:
failed_attach_port:
failed_irq_request:
lpuart_disable_clks(sport);
@@ -2664,8 +2665,7 @@ static int lpuart_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int lpuart_suspend(struct device *dev)
+static int __maybe_unused lpuart_suspend(struct device *dev)
{
struct lpuart_port *sport = dev_get_drvdata(dev);
unsigned long temp;
@@ -2723,7 +2723,7 @@ static int lpuart_suspend(struct device *dev)
return 0;
}
-static int lpuart_resume(struct device *dev)
+static int __maybe_unused lpuart_resume(struct device *dev)
{
struct lpuart_port *sport = dev_get_drvdata(dev);
bool irq_wake = irqd_is_wakeup_set(irq_get_irq_data(sport->port.irq));
@@ -2754,7 +2754,6 @@ static int lpuart_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(lpuart_pm_ops, lpuart_suspend, lpuart_resume);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index f4d68109bc8b..1265e8d86d8a 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -909,6 +909,8 @@ static irqreturn_t imx_uart_int(int irq, void *dev_id)
usr2 &= ~USR2_ORE;
if (usr1 & (USR1_RRDY | USR1_AGTIM)) {
+ imx_uart_writel(sport, USR1_AGTIM, USR1);
+
__imx_uart_rxint(irq, dev_id);
ret = IRQ_HANDLED;
}
@@ -2252,6 +2254,8 @@ static int imx_uart_probe(struct platform_device *pdev)
return PTR_ERR(base);
rxirq = platform_get_irq(pdev, 0);
+ if (rxirq < 0)
+ return rxirq;
txirq = platform_get_irq_optional(pdev, 1);
rtsirq = platform_get_irq_optional(pdev, 2);
@@ -2302,7 +2306,11 @@ static int imx_uart_probe(struct platform_device *pdev)
sport->ucr4 = readl(sport->port.membase + UCR4);
sport->ufcr = readl(sport->port.membase + UFCR);
- uart_get_rs485_mode(&pdev->dev, &sport->port.rs485);
+ ret = uart_get_rs485_mode(&sport->port);
+ if (ret) {
+ clk_disable_unprepare(sport->clk_ipg);
+ return ret;
+ }
if (sport->port.rs485.flags & SER_RS485_ENABLED &&
(!sport->have_rtscts && !sport->have_rtsgpio))
@@ -2398,6 +2406,9 @@ static int imx_uart_probe(struct platform_device *pdev)
}
}
+ /* We need to initialize lock even for non-registered console */
+ spin_lock_init(&sport->port.lock);
+
imx_uart_ports[sport->port.line] = sport;
platform_set_drvdata(pdev, sport);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index c5e46ff972e4..62813e421f12 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/lantiq.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -597,6 +598,7 @@ static const struct uart_ops lqasc_pops = {
.verify_port = lqasc_verify_port,
};
+#ifdef CONFIG_SERIAL_LANTIQ_CONSOLE
static void
lqasc_console_putchar(struct uart_port *port, int ch)
{
@@ -705,6 +707,14 @@ lqasc_serial_early_console_setup(struct earlycon_device *device,
OF_EARLYCON_DECLARE(lantiq, "lantiq,asc", lqasc_serial_early_console_setup);
OF_EARLYCON_DECLARE(lantiq, "intel,lgm-asc", lqasc_serial_early_console_setup);
+#define LANTIQ_SERIAL_CONSOLE (&lqasc_console)
+
+#else
+
+#define LANTIQ_SERIAL_CONSOLE NULL
+
+#endif /* CONFIG_SERIAL_LANTIQ_CONSOLE */
+
static struct uart_driver lqasc_reg = {
.owner = THIS_MODULE,
.driver_name = DRVNAME,
@@ -712,7 +722,7 @@ static struct uart_driver lqasc_reg = {
.major = 0,
.minor = 0,
.nr = MAXPORTS,
- .cons = &lqasc_console,
+ .cons = LANTIQ_SERIAL_CONSOLE,
};
static int fetch_irq_lantiq(struct device *dev, struct ltq_uart_port *ltq_port)
@@ -814,8 +824,7 @@ static void free_irq_intel(struct uart_port *port)
free_irq(ltq_port->common_irq, port);
}
-static int __init
-lqasc_probe(struct platform_device *pdev)
+static int lqasc_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct ltq_uart_port *ltq_port;
@@ -899,6 +908,13 @@ lqasc_probe(struct platform_device *pdev)
return ret;
}
+static int lqasc_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+
+ return uart_remove_one_port(&lqasc_reg, port);
+}
+
static const struct ltq_soc_data soc_data_lantiq = {
.fetch_irq = fetch_irq_lantiq,
.request_irq = request_irq_lantiq,
@@ -916,8 +932,11 @@ static const struct of_device_id ltq_asc_match[] = {
{ .compatible = "intel,lgm-asc", .data = &soc_data_intel },
{},
};
+MODULE_DEVICE_TABLE(of, ltq_asc_match);
static struct platform_driver lqasc_driver = {
+ .probe = lqasc_probe,
+ .remove = lqasc_remove,
.driver = {
.name = DRVNAME,
.of_match_table = ltq_asc_match,
@@ -933,10 +952,21 @@ init_lqasc(void)
if (ret != 0)
return ret;
- ret = platform_driver_probe(&lqasc_driver, lqasc_probe);
+ ret = platform_driver_register(&lqasc_driver);
if (ret != 0)
uart_unregister_driver(&lqasc_reg);
return ret;
}
-device_initcall(init_lqasc);
+
+static void __exit exit_lqasc(void)
+{
+ platform_driver_unregister(&lqasc_driver);
+ uart_unregister_driver(&lqasc_reg);
+}
+
+module_init(init_lqasc);
+module_exit(exit_lqasc);
+
+MODULE_DESCRIPTION("Serial driver for Lantiq & Intel gateway SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index 9a836dcac157..b5898c932036 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -23,7 +23,6 @@
#include <linux/nmi.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/sizes.h>
#include <linux/soc/nxp/lpc32xx-misc.h>
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index f7d6b3c9ea45..8573fc9cb0cd 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -33,8 +33,7 @@
#include <linux/pm_wakeirq.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_data/serial-omap.h>
#define OMAP_MAX_HSUART_PORTS 10
@@ -153,7 +152,7 @@ struct uart_omap_port {
u32 errata;
u32 features;
- int rts_gpio;
+ struct gpio_desc *rts_gpiod;
struct pm_qos_request pm_qos_request;
u32 latency;
@@ -303,11 +302,11 @@ static void serial_omap_stop_tx(struct uart_port *port)
serial_out(up, UART_OMAP_SCR, up->scr);
res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
1 : 0;
- if (gpio_get_value(up->rts_gpio) != res) {
+ if (gpiod_get_value(up->rts_gpiod) != res) {
if (port->rs485.delay_rts_after_send > 0)
mdelay(
port->rs485.delay_rts_after_send);
- gpio_set_value(up->rts_gpio, res);
+ gpiod_set_value(up->rts_gpiod, res);
}
} else {
/* We're asked to stop, but there's still stuff in the
@@ -412,8 +411,8 @@ static void serial_omap_start_tx(struct uart_port *port)
/* if rts not already enabled */
res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
- if (gpio_get_value(up->rts_gpio) != res) {
- gpio_set_value(up->rts_gpio, res);
+ if (gpiod_get_value(up->rts_gpiod) != res) {
+ gpiod_set_value(up->rts_gpiod, res);
if (port->rs485.delay_rts_before_send > 0)
mdelay(port->rs485.delay_rts_before_send);
}
@@ -1414,12 +1413,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
* Just as a precaution, only allow rs485
* to be enabled if the gpio pin is valid
*/
- if (gpio_is_valid(up->rts_gpio)) {
+ if (up->rts_gpiod) {
/* enable / disable rts */
val = (port->rs485.flags & SER_RS485_ENABLED) ?
SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
val = (port->rs485.flags & val) ? 1 : 0;
- gpio_set_value(up->rts_gpio, val);
+ gpiod_set_value(up->rts_gpiod, val);
} else
port->rs485.flags &= ~SER_RS485_ENABLED;
@@ -1596,18 +1595,22 @@ static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
}
static int serial_omap_probe_rs485(struct uart_omap_port *up,
- struct device_node *np)
+ struct device *dev)
{
struct serial_rs485 *rs485conf = &up->port.rs485;
+ struct device_node *np = dev->of_node;
+ enum gpiod_flags gflags;
int ret;
rs485conf->flags = 0;
- up->rts_gpio = -EINVAL;
+ up->rts_gpiod = NULL;
if (!np)
return 0;
- uart_get_rs485_mode(up->dev, rs485conf);
+ ret = uart_get_rs485_mode(&up->port);
+ if (ret)
+ return ret;
if (of_property_read_bool(np, "rs485-rts-active-high")) {
rs485conf->flags |= SER_RS485_RTS_ON_SEND;
@@ -1618,19 +1621,20 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
}
/* check for tx enable gpio */
- up->rts_gpio = of_get_named_gpio(np, "rts-gpio", 0);
- if (gpio_is_valid(up->rts_gpio)) {
- ret = devm_gpio_request(up->dev, up->rts_gpio, "omap-serial");
- if (ret < 0)
- return ret;
- ret = rs485conf->flags & SER_RS485_RTS_AFTER_SEND ? 1 : 0;
- ret = gpio_direction_output(up->rts_gpio, ret);
- if (ret < 0)
+ gflags = rs485conf->flags & SER_RS485_RTS_AFTER_SEND ?
+ GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ up->rts_gpiod = devm_gpiod_get_optional(dev, "rts", gflags);
+ if (IS_ERR(up->rts_gpiod)) {
+ ret = PTR_ERR(up->rts_gpiod);
+ if (ret == -EPROBE_DEFER)
return ret;
- } else if (up->rts_gpio == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
+ /*
+ * FIXME: the code historically ignored any other error than
+ * -EPROBE_DEFER and just went on without GPIO.
+ */
+ up->rts_gpiod = NULL;
} else {
- up->rts_gpio = -EINVAL;
+ gpiod_set_consumer_name(up->rts_gpiod, "omap-serial");
}
return 0;
@@ -1703,7 +1707,7 @@ static int serial_omap_probe(struct platform_device *pdev)
dev_info(up->port.dev, "no wakeirq for uart%d\n",
up->port.line);
- ret = serial_omap_probe_rs485(up, pdev->dev.of_node);
+ ret = serial_omap_probe_rs485(up, &pdev->dev);
if (ret < 0)
goto err_rs485;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 6bace1c6bb09..457c0bf8cbf8 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -141,9 +141,10 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport);
static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
- 32000000, 48000000, 64000000, 80000000,
- 96000000, 100000000, 102400000,
- 112000000, 120000000, 128000000};
+ 32000000, 48000000, 51200000, 64000000,
+ 80000000, 96000000, 100000000,
+ 102400000, 112000000, 120000000,
+ 128000000};
#define to_dev_port(ptr, member) \
container_of(ptr, struct qcom_geni_serial_port, member)
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 73f951d65b93..d913d9b2762a 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -154,10 +154,33 @@ struct s3c24xx_uart_port {
#define portaddrl(port, reg) \
((unsigned long *)(unsigned long)((port)->membase + (reg)))
-#define rd_regb(port, reg) (readb_relaxed(portaddr(port, reg)))
+static u32 rd_reg(struct uart_port *port, u32 reg)
+{
+ switch (port->iotype) {
+ case UPIO_MEM:
+ return readb_relaxed(portaddr(port, reg));
+ case UPIO_MEM32:
+ return readl_relaxed(portaddr(port, reg));
+ default:
+ return 0;
+ }
+ return 0;
+}
+
#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
-#define wr_regb(port, reg, val) writeb_relaxed(val, portaddr(port, reg))
+static void wr_reg(struct uart_port *port, u32 reg, u32 val)
+{
+ switch (port->iotype) {
+ case UPIO_MEM:
+ writeb_relaxed(val, portaddr(port, reg));
+ break;
+ case UPIO_MEM32:
+ writel_relaxed(val, portaddr(port, reg));
+ break;
+ }
+}
+
#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
/* Byte-order aware bit setting/clearing functions. */
@@ -714,7 +737,7 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
fifocnt--;
uerstat = rd_regl(port, S3C2410_UERSTAT);
- ch = rd_regb(port, S3C2410_URXH);
+ ch = rd_reg(port, S3C2410_URXH);
if (port->flags & UPF_CONS_FLOW) {
int txe = s3c24xx_serial_txempty_nofifo(port);
@@ -826,7 +849,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
}
if (port->x_char) {
- wr_regb(port, S3C2410_UTXH, port->x_char);
+ wr_reg(port, S3C2410_UTXH, port->x_char);
port->icount.tx++;
port->x_char = 0;
goto out;
@@ -852,7 +875,7 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
break;
- wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
+ wr_reg(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
count--;
@@ -916,7 +939,7 @@ static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
/* no modem control lines */
static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
{
- unsigned int umstat = rd_regb(port, S3C2410_UMSTAT);
+ unsigned int umstat = rd_reg(port, S3C2410_UMSTAT);
if (umstat & S3C2410_UMSTAT_CTS)
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
@@ -1281,14 +1304,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
struct s3c24xx_uart_info *info = ourport->info;
struct clk *clk;
unsigned long rate;
- unsigned int cnt, baud, quot, clk_sel, best_quot = 0;
+ unsigned int cnt, baud, quot, best_quot = 0;
char clkname[MAX_CLK_NAME_LENGTH];
int calc_deviation, deviation = (1 << 30) - 1;
- clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel :
- ourport->info->def_clk_sel;
for (cnt = 0; cnt < info->num_clks; cnt++) {
- if (!(clk_sel & (1 << cnt)))
+ /* Keep selected clock if provided */
+ if (ourport->cfg->clk_sel &&
+ !(ourport->cfg->clk_sel & (1 << cnt)))
continue;
sprintf(clkname, "clk_uart_baud%d", cnt);
@@ -1974,7 +1997,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct s3c24xx_uart_port *ourport;
int index = probe_index;
- int ret;
+ int ret, prop = 0;
if (np) {
ret = of_alias_get_id(np, "serial");
@@ -2000,10 +2023,27 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
dev_get_platdata(&pdev->dev) :
ourport->drv_data->def_cfg;
- if (np)
+ if (np) {
of_property_read_u32(np,
"samsung,uart-fifosize", &ourport->port.fifosize);
+ if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
+ switch (prop) {
+ case 1:
+ ourport->port.iotype = UPIO_MEM;
+ break;
+ case 4:
+ ourport->port.iotype = UPIO_MEM32;
+ break;
+ default:
+ dev_warn(&pdev->dev, "unsupported reg-io-width (%d)\n",
+ prop);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
if (ourport->drv_data->fifosize[index])
ourport->port.fifosize = ourport->drv_data->fifosize[index];
else if (ourport->info->fifosize)
@@ -2185,7 +2225,7 @@ static int s3c24xx_serial_get_poll_char(struct uart_port *port)
if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
return NO_POLL_CHAR;
- return rd_regb(port, S3C2410_URXH);
+ return rd_reg(port, S3C2410_URXH);
}
static void s3c24xx_serial_put_poll_char(struct uart_port *port,
@@ -2200,7 +2240,7 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
while (!s3c24xx_serial_console_txrdy(port, ufcon))
cpu_relax();
- wr_regb(port, S3C2410_UTXH, c);
+ wr_reg(port, S3C2410_UTXH, c);
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -2212,7 +2252,7 @@ s3c24xx_serial_console_putchar(struct uart_port *port, int ch)
while (!s3c24xx_serial_console_txrdy(port, ufcon))
cpu_relax();
- wr_regb(port, S3C2410_UTXH, ch);
+ wr_reg(port, S3C2410_UTXH, ch);
}
static void
@@ -2587,6 +2627,18 @@ module_platform_driver(samsung_serial_driver);
* Early console.
*/
+static void wr_reg_barrier(struct uart_port *port, u32 reg, u32 val)
+{
+ switch (port->iotype) {
+ case UPIO_MEM:
+ writeb(val, portaddr(port, reg));
+ break;
+ case UPIO_MEM32:
+ writel(val, portaddr(port, reg));
+ break;
+ }
+}
+
struct samsung_early_console_data {
u32 txfull_mask;
};
@@ -2612,7 +2664,7 @@ static void samsung_early_putc(struct uart_port *port, int c)
else
samsung_early_busyuart(port);
- writeb(c, port->membase + S3C2410_UTXH);
+ wr_reg_barrier(port, S3C2410_UTXH, c);
}
static void samsung_early_write(struct console *con, const char *s,
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 06e8071d5601..d2e5c6c86643 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -315,6 +315,7 @@ struct sc16is7xx_one {
struct kthread_work tx_work;
struct kthread_work reg_work;
struct sc16is7xx_one_config config;
+ bool irda_mode;
};
struct sc16is7xx_port {
@@ -327,7 +328,6 @@ struct sc16is7xx_port {
unsigned char buf[SC16IS7XX_FIFO_SIZE];
struct kthread_worker kworker;
struct task_struct *kworker_task;
- struct kthread_work irq_work;
struct mutex efr_lock;
struct sc16is7xx_one p[];
};
@@ -710,9 +710,9 @@ static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
return true;
}
-static void sc16is7xx_ist(struct kthread_work *ws)
+static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
{
- struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
+ struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
mutex_lock(&s->efr_lock);
@@ -727,13 +727,6 @@ static void sc16is7xx_ist(struct kthread_work *ws)
}
mutex_unlock(&s->efr_lock);
-}
-
-static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
-{
- struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
-
- kthread_queue_work(&s->kworker, &s->irq_work);
return IRQ_HANDLED;
}
@@ -994,6 +987,7 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
static int sc16is7xx_startup(struct uart_port *port)
{
+ struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
unsigned int val;
@@ -1032,6 +1026,13 @@ static int sc16is7xx_startup(struct uart_port *port)
/* Now, initialize the UART */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_WORD_LEN_8);
+ /* Enable IrDA mode if requested in DT */
+ /* This bit must be written with LCR[7] = 0 */
+ sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
+ SC16IS7XX_MCR_IRDA_BIT,
+ one->irda_mode ?
+ SC16IS7XX_MCR_IRDA_BIT : 0);
+
/* Enable the Rx and Tx FIFO */
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG,
SC16IS7XX_EFCR_RXDISABLE_BIT |
@@ -1176,10 +1177,11 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
static int sc16is7xx_probe(struct device *dev,
const struct sc16is7xx_devtype *devtype,
- struct regmap *regmap, int irq, unsigned long flags)
+ struct regmap *regmap, int irq)
{
struct sched_param sched_param = { .sched_priority = MAX_RT_PRIO / 2 };
unsigned long freq = 0, *pfreq = dev_get_platdata(dev);
+ unsigned int val;
u32 uartclk = 0;
int i, ret;
struct sc16is7xx_port *s;
@@ -1187,6 +1189,16 @@ static int sc16is7xx_probe(struct device *dev,
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ /*
+ * This device does not have an identification register that would
+ * tell us if we are really connected to the correct device.
+ * The best we can do is to check if communication is at all possible.
+ */
+ ret = regmap_read(regmap,
+ SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
+ if (ret < 0)
+ return ret;
+
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
if (!s) {
@@ -1221,7 +1233,6 @@ static int sc16is7xx_probe(struct device *dev,
mutex_init(&s->efr_lock);
kthread_init_worker(&s->kworker);
- kthread_init_work(&s->irq_work, sc16is7xx_ist);
s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
"sc16is7xx");
if (IS_ERR(s->kworker_task)) {
@@ -1302,9 +1313,33 @@ static int sc16is7xx_probe(struct device *dev,
sc16is7xx_power(&s->p[i].port, 0);
}
- /* Setup interrupt */
- ret = devm_request_irq(dev, irq, sc16is7xx_irq,
- flags, dev_name(dev), s);
+ if (dev->of_node) {
+ struct property *prop;
+ const __be32 *p;
+ u32 u;
+
+ of_property_for_each_u32(dev->of_node, "irda-mode-ports",
+ prop, p, u)
+ if (u < devtype->nr_uart)
+ s->p[u].irda_mode = true;
+ }
+
+ /*
+ * Setup interrupt. We first try to acquire the IRQ line as level IRQ.
+ * If that succeeds, we can allow sharing the interrupt as well.
+ * In case the interrupt controller doesn't support that, we fall
+ * back to a non-shared falling-edge trigger.
+ */
+ ret = devm_request_threaded_irq(dev, irq, NULL, sc16is7xx_irq,
+ IRQF_TRIGGER_LOW | IRQF_SHARED |
+ IRQF_ONESHOT,
+ dev_name(dev), s);
+ if (!ret)
+ return 0;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, sc16is7xx_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), s);
if (!ret)
return 0;
@@ -1378,7 +1413,6 @@ static struct regmap_config regcfg = {
static int sc16is7xx_spi_probe(struct spi_device *spi)
{
const struct sc16is7xx_devtype *devtype;
- unsigned long flags = 0;
struct regmap *regmap;
int ret;
@@ -1399,14 +1433,13 @@ static int sc16is7xx_spi_probe(struct spi_device *spi)
const struct spi_device_id *id_entry = spi_get_device_id(spi);
devtype = (struct sc16is7xx_devtype *)id_entry->driver_data;
- flags = IRQF_TRIGGER_FALLING;
}
regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
(devtype->nr_uart - 1);
regmap = devm_regmap_init_spi(spi, &regcfg);
- return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq, flags);
+ return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq);
}
static int sc16is7xx_spi_remove(struct spi_device *spi)
@@ -1445,7 +1478,6 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
const struct sc16is7xx_devtype *devtype;
- unsigned long flags = 0;
struct regmap *regmap;
if (i2c->dev.of_node) {
@@ -1454,14 +1486,13 @@ static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
return -ENODEV;
} else {
devtype = (struct sc16is7xx_devtype *)id->driver_data;
- flags = IRQF_TRIGGER_FALLING;
}
regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
(devtype->nr_uart - 1);
regmap = devm_regmap_init_i2c(i2c, &regcfg);
- return sc16is7xx_probe(&i2c->dev, devtype, regmap, i2c->irq, flags);
+ return sc16is7xx_probe(&i2c->dev, devtype, regmap, i2c->irq);
}
static int sc16is7xx_i2c_remove(struct i2c_client *client)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 66a5e2faf57e..57840cf90388 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -3295,8 +3295,10 @@ EXPORT_SYMBOL(uart_remove_one_port);
* This function implements the device tree binding described in
* Documentation/devicetree/bindings/serial/rs485.txt.
*/
-void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf)
+int uart_get_rs485_mode(struct uart_port *port)
{
+ struct serial_rs485 *rs485conf = &port->rs485;
+ struct device *dev = port->dev;
u32 rs485_delay[2];
int ret;
@@ -3315,6 +3317,7 @@ void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf)
* to get to a defined state with the following properties:
*/
rs485conf->flags &= ~(SER_RS485_RX_DURING_TX | SER_RS485_ENABLED |
+ SER_RS485_TERMINATE_BUS |
SER_RS485_RTS_AFTER_SEND);
rs485conf->flags |= SER_RS485_RTS_ON_SEND;
@@ -3328,6 +3331,23 @@ void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf)
rs485conf->flags &= ~SER_RS485_RTS_ON_SEND;
rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
}
+
+ /*
+ * Disabling termination by default is the safe choice: Else if many
+ * bus participants enable it, no communication is possible at all.
+ * Works fine for short cables and users may enable for longer cables.
+ */
+ port->rs485_term_gpio = devm_gpiod_get_optional(dev, "rs485-term",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(port->rs485_term_gpio)) {
+ ret = PTR_ERR(port->rs485_term_gpio);
+ port->rs485_term_gpio = NULL;
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Cannot get rs485-term-gpios\n");
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(uart_get_rs485_mode);
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index 0b9e804e61a9..c0dfe4382898 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -2,7 +2,6 @@
#include <linux/bitops.h>
#include <linux/serial_core.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#define SCI_MAJOR 204
#define SCI_MINOR_START 8
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 5e93e8d40f59..8602ff357321 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -31,6 +31,7 @@
#include <linux/tty_flip.h>
#include <linux/tty.h>
+#include "serial_mctrl_gpio.h"
#include "stm32-usart.h"
static void stm32_stop_tx(struct uart_port *port);
@@ -158,9 +159,7 @@ static int stm32_init_rs485(struct uart_port *port,
if (!pdev->dev.of_node)
return -ENODEV;
- uart_get_rs485_mode(&pdev->dev, rs485conf);
-
- return 0;
+ return uart_get_rs485_mode(port);
}
static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
@@ -510,12 +509,29 @@ static void stm32_set_mctrl(struct uart_port *port, unsigned int mctrl)
stm32_set_bits(port, ofs->cr3, USART_CR3_RTSE);
else
stm32_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
+
+ mctrl_gpio_set(stm32_port->gpios, mctrl);
}
static unsigned int stm32_get_mctrl(struct uart_port *port)
{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ unsigned int ret;
+
/* This routine is used to get signals of: DCD, DSR, RI, and CTS */
- return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+ ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+
+ return mctrl_gpio_get(stm32_port->gpios, &ret);
+}
+
+static void stm32_enable_ms(struct uart_port *port)
+{
+ mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
+}
+
+static void stm32_disable_ms(struct uart_port *port)
+{
+ mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
}
/* Transmit stop */
@@ -626,6 +642,9 @@ static void stm32_shutdown(struct uart_port *port)
u32 val, isr;
int ret;
+ /* Disable modem control interrupts */
+ stm32_disable_ms(port);
+
val = USART_CR1_TXEIE | USART_CR1_TE;
val |= stm32_port->cr1_irq | USART_CR1_RE;
val |= BIT(cfg->uart_enable_bit);
@@ -764,6 +783,12 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
}
+ /* Handle modem control interrupts */
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ stm32_enable_ms(port);
+ else
+ stm32_disable_ms(port);
+
usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
/*
@@ -898,6 +923,7 @@ static const struct uart_ops stm32_uart_ops = {
.throttle = stm32_throttle,
.unthrottle = stm32_unthrottle,
.stop_rx = stm32_stop_rx,
+ .enable_ms = stm32_enable_ms,
.break_ctl = stm32_break_ctl,
.startup = stm32_startup,
.shutdown = stm32_shutdown,
@@ -931,7 +957,9 @@ static int stm32_init_port(struct stm32_port *stm32port,
port->rs485_config = stm32_config_rs485;
- stm32_init_rs485(port, pdev);
+ ret = stm32_init_rs485(port, pdev);
+ if (ret)
+ return ret;
if (stm32port->info->cfg.has_wakeup) {
stm32port->wakeirq = platform_get_irq(pdev, 1);
@@ -960,11 +988,32 @@ static int stm32_init_port(struct stm32_port *stm32port,
stm32port->port.uartclk = clk_get_rate(stm32port->clk);
if (!stm32port->port.uartclk) {
- clk_disable_unprepare(stm32port->clk);
ret = -EINVAL;
+ goto err_clk;
+ }
+
+ stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
+ if (IS_ERR(stm32port->gpios)) {
+ ret = PTR_ERR(stm32port->gpios);
+ goto err_clk;
+ }
+
+ /* Both CTS/RTS gpios and "st,hw-flow-ctrl" should not be specified */
+ if (stm32port->hw_flow_control) {
+ if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
+ mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
+ dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
+ ret = -EINVAL;
+ goto err_clk;
+ }
}
return ret;
+
+err_clk:
+ clk_disable_unprepare(stm32port->clk);
+
+ return ret;
}
static struct stm32_port *stm32_of_get_stm32_port(struct platform_device *pdev)
@@ -1375,7 +1424,18 @@ static int __maybe_unused stm32_serial_suspend(struct device *dev)
else
stm32_serial_enable_wakeup(port, false);
- pinctrl_pm_select_sleep_state(dev);
+ /*
+ * When "no_console_suspend" is enabled, keep the pinctrl default state
+ * and rely on bootloader stage to restore this state upon resume.
+ * Otherwise, apply the idle or sleep states depending on wakeup
+ * capabilities.
+ */
+ if (console_suspend_enabled || !uart_console(port)) {
+ if (device_may_wakeup(dev))
+ pinctrl_pm_select_idle_state(dev);
+ else
+ pinctrl_pm_select_sleep_state(dev);
+ }
return 0;
}
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index db8bf0d4982d..d4c916e78d40 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -274,6 +274,7 @@ struct stm32_port {
bool fifoen;
int wakeirq;
int rdr_mask; /* receive data register mask */
+ struct mctrl_gpios *gpios; /* modem control gpios */
};
static struct stm32_port stm32_ports[STM32_MAX_PORTS];
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 35e9e8faf8de..b9d672af8b65 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1235,9 +1235,7 @@ static void cdns_uart_console_write(struct console *co, const char *s,
writel(ctrl, port->membase + CDNS_UART_CR);
uart_console_write(port, s, count, cdns_uart_console_putchar);
- while ((readl(port->membase + CDNS_UART_SR) &
- (CDNS_UART_SR_TXEMPTY | CDNS_UART_SR_TACTIVE)) !=
- CDNS_UART_SR_TXEMPTY)
+ while (cdns_uart_tx_empty(port) != TIOCSER_TEMT)
cpu_relax();
/* restore interrupt state */
@@ -1262,6 +1260,7 @@ static int cdns_uart_console_setup(struct console *co, char *options)
int bits = 8;
int parity = 'n';
int flow = 'n';
+ unsigned long time_out;
if (!port->membase) {
pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n",
@@ -1272,6 +1271,13 @@ static int cdns_uart_console_setup(struct console *co, char *options)
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
+ /* Wait for tx_empty before setting up the console */
+ time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT);
+
+ while (time_before(jiffies, time_out) &&
+ cdns_uart_tx_empty(port) != TIOCSER_TEMT)
+ cpu_relax();
+
return uart_set_options(port, co, baud, parity, bits, flow);
}
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 0dc3878794fd..7c95afa905a0 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -106,7 +106,7 @@ static void sysrq_handle_loglevel(int key)
pr_info("Loglevel set to %d\n", i);
console_loglevel = i;
}
-static struct sysrq_key_op sysrq_loglevel_op = {
+static const struct sysrq_key_op sysrq_loglevel_op = {
.handler = sysrq_handle_loglevel,
.help_msg = "loglevel(0-9)",
.action_msg = "Changing Loglevel",
@@ -119,14 +119,14 @@ static void sysrq_handle_SAK(int key)
struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
schedule_work(SAK_work);
}
-static struct sysrq_key_op sysrq_SAK_op = {
+static const struct sysrq_key_op sysrq_SAK_op = {
.handler = sysrq_handle_SAK,
.help_msg = "sak(k)",
.action_msg = "SAK",
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
-#define sysrq_SAK_op (*(struct sysrq_key_op *)NULL)
+#define sysrq_SAK_op (*(const struct sysrq_key_op *)NULL)
#endif
#ifdef CONFIG_VT
@@ -135,14 +135,14 @@ static void sysrq_handle_unraw(int key)
vt_reset_unicode(fg_console);
}
-static struct sysrq_key_op sysrq_unraw_op = {
+static const struct sysrq_key_op sysrq_unraw_op = {
.handler = sysrq_handle_unraw,
.help_msg = "unraw(r)",
.action_msg = "Keyboard mode set to system default",
.enable_mask = SYSRQ_ENABLE_KEYBOARD,
};
#else
-#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
+#define sysrq_unraw_op (*(const struct sysrq_key_op *)NULL)
#endif /* CONFIG_VT */
static void sysrq_handle_crash(int key)
@@ -152,7 +152,7 @@ static void sysrq_handle_crash(int key)
panic("sysrq triggered crash\n");
}
-static struct sysrq_key_op sysrq_crash_op = {
+static const struct sysrq_key_op sysrq_crash_op = {
.handler = sysrq_handle_crash,
.help_msg = "crash(c)",
.action_msg = "Trigger a crash",
@@ -165,18 +165,20 @@ static void sysrq_handle_reboot(int key)
local_irq_enable();
emergency_restart();
}
-static struct sysrq_key_op sysrq_reboot_op = {
+static const struct sysrq_key_op sysrq_reboot_op = {
.handler = sysrq_handle_reboot,
.help_msg = "reboot(b)",
.action_msg = "Resetting",
.enable_mask = SYSRQ_ENABLE_BOOT,
};
+const struct sysrq_key_op *__sysrq_reboot_op = &sysrq_reboot_op;
+
static void sysrq_handle_sync(int key)
{
emergency_sync();
}
-static struct sysrq_key_op sysrq_sync_op = {
+static const struct sysrq_key_op sysrq_sync_op = {
.handler = sysrq_handle_sync,
.help_msg = "sync(s)",
.action_msg = "Emergency Sync",
@@ -188,7 +190,7 @@ static void sysrq_handle_show_timers(int key)
sysrq_timer_list_show();
}
-static struct sysrq_key_op sysrq_show_timers_op = {
+static const struct sysrq_key_op sysrq_show_timers_op = {
.handler = sysrq_handle_show_timers,
.help_msg = "show-all-timers(q)",
.action_msg = "Show clockevent devices & pending hrtimers (no others)",
@@ -198,7 +200,7 @@ static void sysrq_handle_mountro(int key)
{
emergency_remount();
}
-static struct sysrq_key_op sysrq_mountro_op = {
+static const struct sysrq_key_op sysrq_mountro_op = {
.handler = sysrq_handle_mountro,
.help_msg = "unmount(u)",
.action_msg = "Emergency Remount R/O",
@@ -211,13 +213,13 @@ static void sysrq_handle_showlocks(int key)
debug_show_all_locks();
}
-static struct sysrq_key_op sysrq_showlocks_op = {
+static const struct sysrq_key_op sysrq_showlocks_op = {
.handler = sysrq_handle_showlocks,
.help_msg = "show-all-locks(d)",
.action_msg = "Show Locks Held",
};
#else
-#define sysrq_showlocks_op (*(struct sysrq_key_op *)NULL)
+#define sysrq_showlocks_op (*(const struct sysrq_key_op *)NULL)
#endif
#ifdef CONFIG_SMP
@@ -233,7 +235,7 @@ static void showacpu(void *dummy)
raw_spin_lock_irqsave(&show_lock, flags);
pr_info("CPU%d:\n", smp_processor_id());
- show_stack(NULL, NULL);
+ show_stack(NULL, NULL, KERN_INFO);
raw_spin_unlock_irqrestore(&show_lock, flags);
}
@@ -264,7 +266,7 @@ static void sysrq_handle_showallcpus(int key)
}
}
-static struct sysrq_key_op sysrq_showallcpus_op = {
+static const struct sysrq_key_op sysrq_showallcpus_op = {
.handler = sysrq_handle_showallcpus,
.help_msg = "show-backtrace-all-active-cpus(l)",
.action_msg = "Show backtrace of all active CPUs",
@@ -282,7 +284,7 @@ static void sysrq_handle_showregs(int key)
show_regs(regs);
perf_event_print_debug();
}
-static struct sysrq_key_op sysrq_showregs_op = {
+static const struct sysrq_key_op sysrq_showregs_op = {
.handler = sysrq_handle_showregs,
.help_msg = "show-registers(p)",
.action_msg = "Show Regs",
@@ -294,7 +296,7 @@ static void sysrq_handle_showstate(int key)
show_state();
show_workqueue_state();
}
-static struct sysrq_key_op sysrq_showstate_op = {
+static const struct sysrq_key_op sysrq_showstate_op = {
.handler = sysrq_handle_showstate,
.help_msg = "show-task-states(t)",
.action_msg = "Show State",
@@ -305,7 +307,7 @@ static void sysrq_handle_showstate_blocked(int key)
{
show_state_filter(TASK_UNINTERRUPTIBLE);
}
-static struct sysrq_key_op sysrq_showstate_blocked_op = {
+static const struct sysrq_key_op sysrq_showstate_blocked_op = {
.handler = sysrq_handle_showstate_blocked,
.help_msg = "show-blocked-tasks(w)",
.action_msg = "Show Blocked State",
@@ -319,21 +321,21 @@ static void sysrq_ftrace_dump(int key)
{
ftrace_dump(DUMP_ALL);
}
-static struct sysrq_key_op sysrq_ftrace_dump_op = {
+static const struct sysrq_key_op sysrq_ftrace_dump_op = {
.handler = sysrq_ftrace_dump,
.help_msg = "dump-ftrace-buffer(z)",
.action_msg = "Dump ftrace buffer",
.enable_mask = SYSRQ_ENABLE_DUMP,
};
#else
-#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
+#define sysrq_ftrace_dump_op (*(const struct sysrq_key_op *)NULL)
#endif
static void sysrq_handle_showmem(int key)
{
show_mem(0, NULL);
}
-static struct sysrq_key_op sysrq_showmem_op = {
+static const struct sysrq_key_op sysrq_showmem_op = {
.handler = sysrq_handle_showmem,
.help_msg = "show-memory-usage(m)",
.action_msg = "Show Memory",
@@ -364,7 +366,7 @@ static void sysrq_handle_term(int key)
send_sig_all(SIGTERM);
console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
}
-static struct sysrq_key_op sysrq_term_op = {
+static const struct sysrq_key_op sysrq_term_op = {
.handler = sysrq_handle_term,
.help_msg = "terminate-all-tasks(e)",
.action_msg = "Terminate All Tasks",
@@ -394,7 +396,7 @@ static void sysrq_handle_moom(int key)
{
schedule_work(&moom_work);
}
-static struct sysrq_key_op sysrq_moom_op = {
+static const struct sysrq_key_op sysrq_moom_op = {
.handler = sysrq_handle_moom,
.help_msg = "memory-full-oom-kill(f)",
.action_msg = "Manual OOM execution",
@@ -406,7 +408,7 @@ static void sysrq_handle_thaw(int key)
{
emergency_thaw_all();
}
-static struct sysrq_key_op sysrq_thaw_op = {
+static const struct sysrq_key_op sysrq_thaw_op = {
.handler = sysrq_handle_thaw,
.help_msg = "thaw-filesystems(j)",
.action_msg = "Emergency Thaw of all frozen filesystems",
@@ -419,7 +421,7 @@ static void sysrq_handle_kill(int key)
send_sig_all(SIGKILL);
console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
}
-static struct sysrq_key_op sysrq_kill_op = {
+static const struct sysrq_key_op sysrq_kill_op = {
.handler = sysrq_handle_kill,
.help_msg = "kill-all-tasks(i)",
.action_msg = "Kill All Tasks",
@@ -430,7 +432,7 @@ static void sysrq_handle_unrt(int key)
{
normalize_rt_tasks();
}
-static struct sysrq_key_op sysrq_unrt_op = {
+static const struct sysrq_key_op sysrq_unrt_op = {
.handler = sysrq_handle_unrt,
.help_msg = "nice-all-RT-tasks(n)",
.action_msg = "Nice All RT Tasks",
@@ -440,7 +442,7 @@ static struct sysrq_key_op sysrq_unrt_op = {
/* Key Operations table and lock */
static DEFINE_SPINLOCK(sysrq_key_table_lock);
-static struct sysrq_key_op *sysrq_key_table[36] = {
+static const struct sysrq_key_op *sysrq_key_table[36] = {
&sysrq_loglevel_op, /* 0 */
&sysrq_loglevel_op, /* 1 */
&sysrq_loglevel_op, /* 2 */
@@ -516,9 +518,9 @@ static int sysrq_key_table_key2index(int key)
/*
* get and put functions for the table, exposed to modules.
*/
-struct sysrq_key_op *__sysrq_get_key_op(int key)
+static const struct sysrq_key_op *__sysrq_get_key_op(int key)
{
- struct sysrq_key_op *op_p = NULL;
+ const struct sysrq_key_op *op_p = NULL;
int i;
i = sysrq_key_table_key2index(key);
@@ -528,7 +530,7 @@ struct sysrq_key_op *__sysrq_get_key_op(int key)
return op_p;
}
-static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
+static void __sysrq_put_key_op(int key, const struct sysrq_key_op *op_p)
{
int i = sysrq_key_table_key2index(key);
@@ -538,7 +540,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
void __handle_sysrq(int key, bool check_mask)
{
- struct sysrq_key_op *op_p;
+ const struct sysrq_key_op *op_p;
int orig_log_level;
int orig_suppress_printk;
int i;
@@ -1061,8 +1063,8 @@ int sysrq_toggle_support(int enable_mask)
}
EXPORT_SYMBOL_GPL(sysrq_toggle_support);
-static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
- struct sysrq_key_op *remove_op_p)
+static int __sysrq_swap_key_ops(int key, const struct sysrq_key_op *insert_op_p,
+ const struct sysrq_key_op *remove_op_p)
{
int retval;
@@ -1085,13 +1087,13 @@ static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
return retval;
}
-int register_sysrq_key(int key, struct sysrq_key_op *op_p)
+int register_sysrq_key(int key, const struct sysrq_key_op *op_p)
{
return __sysrq_swap_key_ops(key, op_p, NULL);
}
EXPORT_SYMBOL(register_sysrq_key);
-int unregister_sysrq_key(int key, struct sysrq_key_op *op_p)
+int unregister_sysrq_key(int key, const struct sysrq_key_op *op_p)
{
return __sysrq_swap_key_ops(key, NULL, op_p);
}
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index d2a1e1228c82..9ffd42e333b8 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -605,6 +605,7 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
port->index = vcc_table_add(port);
if (port->index == -1) {
pr_err("VCC: no more TTY indices left for allocation\n");
+ rv = -ENOMEM;
goto free_ldc;
}
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index b28aa0d289f8..c1be96bb3ecf 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -12,7 +12,7 @@
* Fix bug in inverse translation. Stanislav Voronyi <stas@cnti.uanet.kharkov.ua>, Dec 1998
*
* In order to prevent the following circular lock dependency:
- * &mm->mmap_sem --> cpu_hotplug.lock --> console_lock --> &mm->mmap_sem
+ * &mm->mmap_lock --> cpu_hotplug.lock --> console_lock --> &mm->mmap_lock
*
* We cannot allow page fault to happen while holding the console_lock.
* Therefore, all the userspace copy operations have to be done outside
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 15d33fa0c925..568b2171f335 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -127,7 +127,11 @@ static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */
static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
static bool dead_key_next;
-static int npadch = -1; /* -1 or number assembled on pad */
+
+/* Handles a number being assembled on the number pad */
+static bool npadch_active;
+static unsigned int npadch_value;
+
static unsigned int diacr;
static char rep; /* flag telling character repeat */
@@ -845,12 +849,12 @@ static void k_shift(struct vc_data *vc, unsigned char value, char up_flag)
shift_state &= ~(1 << value);
/* kludge */
- if (up_flag && shift_state != old_state && npadch != -1) {
+ if (up_flag && shift_state != old_state && npadch_active) {
if (kbd->kbdmode == VC_UNICODE)
- to_utf8(vc, npadch);
+ to_utf8(vc, npadch_value);
else
- put_queue(vc, npadch & 0xff);
- npadch = -1;
+ put_queue(vc, npadch_value & 0xff);
+ npadch_active = false;
}
}
@@ -868,7 +872,7 @@ static void k_meta(struct vc_data *vc, unsigned char value, char up_flag)
static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag)
{
- int base;
+ unsigned int base;
if (up_flag)
return;
@@ -882,10 +886,12 @@ static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag)
base = 16;
}
- if (npadch == -1)
- npadch = value;
- else
- npadch = npadch * base + value;
+ if (!npadch_active) {
+ npadch_value = 0;
+ npadch_active = true;
+ }
+
+ npadch_value = npadch_value * base + value;
}
static void k_lock(struct vc_data *vc, unsigned char value, char up_flag)
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index d54a549c5892..31bb3647a99c 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -185,47 +185,54 @@ int set_selection_user(const struct tiocl_selection __user *sel,
return set_selection_kernel(&v, tty);
}
-static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
+static int vc_selection_store_chars(struct vc_data *vc, bool unicode)
{
- struct vc_data *vc = vc_cons[fg_console].d;
- int new_sel_start, new_sel_end, spc;
char *bp, *obp;
- int i, ps, pe;
- u32 c;
- int ret = 0;
- bool unicode;
-
- poke_blanked_console();
-
- v->xs = min_t(u16, v->xs - 1, vc->vc_cols - 1);
- v->ys = min_t(u16, v->ys - 1, vc->vc_rows - 1);
- v->xe = min_t(u16, v->xe - 1, vc->vc_cols - 1);
- v->ye = min_t(u16, v->ye - 1, vc->vc_rows - 1);
- ps = v->ys * vc->vc_size_row + (v->xs << 1);
- pe = v->ye * vc->vc_size_row + (v->xe << 1);
+ unsigned int i;
- if (v->sel_mode == TIOCL_SELCLEAR) {
- /* useful for screendump without selection highlights */
+ /* Allocate a new buffer before freeing the old one ... */
+ /* chars can take up to 4 bytes with unicode */
+ bp = kmalloc_array((vc_sel.end - vc_sel.start) / 2 + 1, unicode ? 4 : 1,
+ GFP_KERNEL);
+ if (!bp) {
+ printk(KERN_WARNING "selection: kmalloc() failed\n");
clear_selection();
- return 0;
+ return -ENOMEM;
}
+ kfree(vc_sel.buffer);
+ vc_sel.buffer = bp;
- if (mouse_reporting() && (v->sel_mode & TIOCL_SELMOUSEREPORT)) {
- mouse_report(tty, v->sel_mode & TIOCL_SELBUTTONMASK, v->xs,
- v->ys);
- return 0;
+ obp = bp;
+ for (i = vc_sel.start; i <= vc_sel.end; i += 2) {
+ u32 c = sel_pos(i, unicode);
+ if (unicode)
+ bp += store_utf8(c, bp);
+ else
+ *bp++ = c;
+ if (!isspace(c))
+ obp = bp;
+ if (!((i + 2) % vc->vc_size_row)) {
+ /* strip trailing blanks from line and add newline,
+ unless non-space at end of line. */
+ if (obp != bp) {
+ bp = obp;
+ *bp++ = '\r';
+ }
+ obp = bp;
+ }
}
+ vc_sel.buf_len = bp - vc_sel.buffer;
- if (ps > pe) /* make vc_sel.start <= vc_sel.end */
- swap(ps, pe);
+ return 0;
+}
- if (vc_sel.cons != vc_cons[fg_console].d) {
- clear_selection();
- vc_sel.cons = vc_cons[fg_console].d;
- }
- unicode = vt_do_kdgkbmode(fg_console) == K_UNICODE;
+static int vc_do_selection(struct vc_data *vc, unsigned short mode, int ps,
+ int pe)
+{
+ int new_sel_start, new_sel_end, spc;
+ bool unicode = vt_do_kdgkbmode(fg_console) == K_UNICODE;
- switch (v->sel_mode) {
+ switch (mode) {
case TIOCL_SELCHAR: /* character-by-character selection */
new_sel_start = ps;
new_sel_end = pe;
@@ -303,40 +310,44 @@ static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *
vc_sel.start = new_sel_start;
vc_sel.end = new_sel_end;
- /* Allocate a new buffer before freeing the old one ... */
- /* chars can take up to 4 bytes with unicode */
- bp = kmalloc_array((vc_sel.end - vc_sel.start) / 2 + 1, unicode ? 4 : 1,
- GFP_KERNEL);
- if (!bp) {
- printk(KERN_WARNING "selection: kmalloc() failed\n");
+ return vc_selection_store_chars(vc, unicode);
+}
+
+static int vc_selection(struct vc_data *vc, struct tiocl_selection *v,
+ struct tty_struct *tty)
+{
+ int ps, pe;
+
+ poke_blanked_console();
+
+ if (v->sel_mode == TIOCL_SELCLEAR) {
+ /* useful for screendump without selection highlights */
clear_selection();
- return -ENOMEM;
+ return 0;
}
- kfree(vc_sel.buffer);
- vc_sel.buffer = bp;
- obp = bp;
- for (i = vc_sel.start; i <= vc_sel.end; i += 2) {
- c = sel_pos(i, unicode);
- if (unicode)
- bp += store_utf8(c, bp);
- else
- *bp++ = c;
- if (!isspace(c))
- obp = bp;
- if (! ((i + 2) % vc->vc_size_row)) {
- /* strip trailing blanks from line and add newline,
- unless non-space at end of line. */
- if (obp != bp) {
- bp = obp;
- *bp++ = '\r';
- }
- obp = bp;
- }
+ v->xs = min_t(u16, v->xs - 1, vc->vc_cols - 1);
+ v->ys = min_t(u16, v->ys - 1, vc->vc_rows - 1);
+ v->xe = min_t(u16, v->xe - 1, vc->vc_cols - 1);
+ v->ye = min_t(u16, v->ye - 1, vc->vc_rows - 1);
+
+ if (mouse_reporting() && (v->sel_mode & TIOCL_SELMOUSEREPORT)) {
+ mouse_report(tty, v->sel_mode & TIOCL_SELBUTTONMASK, v->xs,
+ v->ys);
+ return 0;
}
- vc_sel.buf_len = bp - vc_sel.buffer;
- return ret;
+ ps = v->ys * vc->vc_size_row + (v->xs << 1);
+ pe = v->ye * vc->vc_size_row + (v->xe << 1);
+ if (ps > pe) /* make vc_sel.start <= vc_sel.end */
+ swap(ps, pe);
+
+ if (vc_sel.cons != vc) {
+ clear_selection();
+ vc_sel.cons = vc;
+ }
+
+ return vc_do_selection(vc, v->sel_mode, ps, pe);
}
int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
@@ -345,7 +356,7 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
mutex_lock(&vc_sel.lock);
console_lock();
- ret = __set_selection_kernel(v, tty);
+ ret = vc_selection(vc_cons[fg_console].d, v, tty);
console_unlock();
mutex_unlock(&vc_sel.lock);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 6e725c6c6256..73efb80815db 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -398,7 +398,7 @@ static void uio_dev_del_attributes(struct uio_device *idev)
static int uio_get_minor(struct uio_device *idev)
{
- int retval = -ENOMEM;
+ int retval;
mutex_lock(&minor_lock);
retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL);
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index f6ab3f28c838..6e27fe4fcca3 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -44,7 +44,6 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
{
struct uio_dmem_genirq_platdata *priv = info->priv;
struct uio_mem *uiomem;
- int ret = 0;
int dmem_region = priv->dmem_region_start;
uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
@@ -68,7 +67,7 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
mutex_unlock(&priv->alloc_lock);
/* Wait until the Runtime PM code has woken up the device */
pm_runtime_get_sync(&priv->pdev->dev);
- return ret;
+ return 0;
}
static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 3c5169eb23f5..4dae2320b103 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -361,6 +361,7 @@ hv_uio_remove(struct hv_device *dev)
if (!pdata)
return 0;
+ sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
uio_unregister_device(&pdata->info);
hv_uio_cleanup(dev, pdata);
hv_set_drvdata(dev, NULL);
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
index 5685ba11480b..e701ab56b0a7 100644
--- a/drivers/usb/cdns3/cdns3-ti.c
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -138,7 +138,7 @@ static int cdns_ti_probe(struct platform_device *pdev)
error = pm_runtime_get_sync(dev);
if (error < 0) {
dev_err(dev, "pm_runtime_get_sync failed: %d\n", error);
- goto err_get;
+ goto err;
}
/* assert RESET */
@@ -185,7 +185,6 @@ static int cdns_ti_probe(struct platform_device *pdev)
err:
pm_runtime_put_sync(data->dev);
-err_get:
pm_runtime_disable(data->dev);
return error;
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 4aafba20f450..19bbb5b7e6b6 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -82,8 +82,6 @@ static void cdns3_exit_roles(struct cdns3 *cdns)
cdns3_drd_exit(cdns);
}
-static enum usb_role cdsn3_hw_role_state_machine(struct cdns3 *cdns);
-
/**
* cdns3_core_init_role - initialize role of operation
* @cdns: Pointer to cdns3 structure
@@ -193,12 +191,12 @@ err:
}
/**
- * cdsn3_hw_role_state_machine - role switch state machine based on hw events.
+ * cdns3_hw_role_state_machine - role switch state machine based on hw events.
* @cdns: Pointer to controller structure.
*
* Returns next role to be entered based on hw events.
*/
-static enum usb_role cdsn3_hw_role_state_machine(struct cdns3 *cdns)
+static enum usb_role cdns3_hw_role_state_machine(struct cdns3 *cdns)
{
enum usb_role role;
int id, vbus;
@@ -291,14 +289,10 @@ int cdns3_hw_role_switch(struct cdns3 *cdns)
enum usb_role real_role, current_role;
int ret = 0;
- /* Do nothing if role based on syfs. */
- if (cdns->role_override)
- return 0;
-
pm_runtime_get_sync(cdns->dev);
current_role = cdns->role;
- real_role = cdsn3_hw_role_state_machine(cdns);
+ real_role = cdns3_hw_role_state_machine(cdns);
/* Do nothing if nothing changed */
if (current_role == real_role)
@@ -353,39 +347,6 @@ static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role)
pm_runtime_get_sync(cdns->dev);
- /*
- * FIXME: switch role framework should be extended to meet
- * requirements. Driver assumes that role can be controlled
- * by SW or HW. Temporary workaround is to use USB_ROLE_NONE to
- * switch from SW to HW control.
- *
- * For dr_mode == USB_DR_MODE_OTG:
- * if user sets USB_ROLE_HOST or USB_ROLE_DEVICE then driver
- * sets role_override flag and forces that role.
- * if user sets USB_ROLE_NONE, driver clears role_override and lets
- * HW state machine take over.
- *
- * For dr_mode != USB_DR_MODE_OTG:
- * Assumptions:
- * 1. Restricted user control between NONE and dr_mode.
- * 2. Driver doesn't need to rely on role_override flag.
- * 3. Driver needs to ensure that HW state machine is never called
- * if dr_mode != USB_DR_MODE_OTG.
- */
- if (role == USB_ROLE_NONE)
- cdns->role_override = 0;
- else
- cdns->role_override = 1;
-
- /*
- * HW state might have changed so driver need to trigger
- * HW state machine if dr_mode == USB_DR_MODE_OTG.
- */
- if (!cdns->role_override && cdns->dr_mode == USB_DR_MODE_OTG) {
- cdns3_hw_role_switch(cdns);
- goto pm_put;
- }
-
if (cdns->role == role)
goto pm_put;
@@ -528,6 +489,8 @@ static int cdns3_probe(struct platform_device *pdev)
sw_desc.get = cdns3_role_get;
sw_desc.allow_userspace_control = true;
sw_desc.driver_data = cdns;
+ if (device_property_read_bool(dev, "usb-role-switch"))
+ sw_desc.fwnode = dev->fwnode;
cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
if (IS_ERR(cdns->role_sw)) {
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 969eb94de204..1ad1f1fe61e9 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -62,7 +62,6 @@ struct cdns3_role_driver {
* This field based on firmware setting, kernel configuration
* and hardware configuration.
* @role_sw: pointer to role switch object.
- * @role_override: set 1 if role rely on SW.
*/
struct cdns3 {
struct device *dev;
@@ -90,7 +89,6 @@ struct cdns3 {
struct mutex mutex;
enum usb_dr_mode dr_mode;
struct usb_role_switch *role_sw;
- int role_override;
};
int cdns3_hw_role_switch(struct cdns3 *cdns);
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 16ad485f0b69..58089841ed52 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -329,7 +329,7 @@ int cdns3_drd_init(struct cdns3 *cdns)
cdns->otg_v1_regs = NULL;
cdns->otg_regs = regs;
writel(1, &cdns->otg_v0_regs->simulate);
- dev_info(cdns->dev, "DRD version v0 (%08x)\n",
+ dev_dbg(cdns->dev, "DRD version v0 (%08x)\n",
readl(&cdns->otg_v0_regs->version));
} else {
cdns->otg_v0_regs = NULL;
@@ -337,7 +337,7 @@ int cdns3_drd_init(struct cdns3 *cdns)
cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd;
cdns->version = CDNS3_CONTROLLER_V1;
writel(1, &cdns->otg_v1_regs->simulate);
- dev_info(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
+ dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
readl(&cdns->otg_v1_regs->did),
readl(&cdns->otg_v1_regs->rid));
}
diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
index e71240b386b4..82645a2a0f52 100644
--- a/drivers/usb/cdns3/ep0.c
+++ b/drivers/usb/cdns3/ep0.c
@@ -332,13 +332,6 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
case TEST_K:
case TEST_SE0_NAK:
case TEST_PACKET:
- cdns3_ep0_complete_setup(priv_dev, 0, 1);
- /**
- * Little delay to give the controller some time
- * for sending status stage.
- * This time should be less then 3ms.
- */
- mdelay(1);
cdns3_set_register_bit(&priv_dev->regs->usb_cmd,
USB_CMD_STMODE |
USB_STS_TMODE_SEL(tmode - 1));
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 4d43f3b28309..5e24c2e57c0d 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -512,8 +512,8 @@ static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
}
static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
- struct cdns3_endpoint *priv_ep,
- struct cdns3_request *priv_req)
+ struct cdns3_endpoint *priv_ep,
+ struct cdns3_request *priv_req)
{
if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN &&
priv_req->flags & REQUEST_INTERNAL) {
@@ -552,8 +552,8 @@ static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_d
}
static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
- struct cdns3_endpoint *priv_ep,
- struct cdns3_request *priv_req)
+ struct cdns3_endpoint *priv_ep,
+ struct cdns3_request *priv_req)
{
int deferred = 0;
@@ -1905,7 +1905,7 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
}
static void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
- struct cdns3_endpoint *priv_ep)
+ struct cdns3_endpoint *priv_ep)
{
if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER)
return;
@@ -1926,7 +1926,7 @@ static void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
}
static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
- struct cdns3_endpoint *priv_ep)
+ struct cdns3_endpoint *priv_ep)
{
struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
@@ -2965,7 +2965,7 @@ static int cdns3_init_eps(struct cdns3_device *priv_dev)
priv_ep->flags = 0;
- dev_info(priv_dev->dev, "Initialized %s support: %s %s\n",
+ dev_dbg(priv_dev->dev, "Initialized %s support: %s %s\n",
priv_ep->name,
priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "",
priv_ep->endpoint.caps.type_iso ? "ISO" : "");
@@ -3069,6 +3069,7 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
priv_dev->gadget.name = "usb-ss-gadget";
priv_dev->gadget.sg_supported = 1;
priv_dev->gadget.quirk_avoids_skb_reserve = 1;
+ priv_dev->gadget.irq = cdns->dev_irq;
spin_lock_init(&priv_dev->lock);
INIT_WORK(&priv_dev->pending_status_wq,
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index d53db520e209..8bafcfc6080d 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -18,17 +18,6 @@ config USB_CHIPIDEA
if USB_CHIPIDEA
-config USB_CHIPIDEA_OF
- tristate
- depends on OF
- default USB_CHIPIDEA
-
-config USB_CHIPIDEA_PCI
- tristate
- depends on USB_PCI
- depends on NOP_USB_XCEIV
- default USB_CHIPIDEA
-
config USB_CHIPIDEA_UDC
bool "ChipIdea device controller"
depends on USB_GADGET
@@ -43,4 +32,30 @@ config USB_CHIPIDEA_HOST
help
Say Y here to enable host controller functionality of the
ChipIdea driver.
+
+config USB_CHIPIDEA_PCI
+ tristate "Enable PCI glue driver" if EMBEDDED
+ depends on USB_PCI
+ depends on NOP_USB_XCEIV
+ default USB_CHIPIDEA
+
+config USB_CHIPIDEA_MSM
+ tristate "Enable MSM hsusb glue driver" if EMBEDDED
+ default USB_CHIPIDEA
+
+config USB_CHIPIDEA_IMX
+ tristate "Enable i.MX USB glue driver" if EMBEDDED
+ depends on OF
+ default USB_CHIPIDEA
+
+config USB_CHIPIDEA_GENERIC
+ tristate "Enable generic USB2 glue driver" if EMBEDDED
+ default USB_CHIPIDEA
+
+config USB_CHIPIDEA_TEGRA
+ tristate "Enable Tegra UDC glue driver" if EMBEDDED
+ depends on OF
+ depends on USB_CHIPIDEA_UDC
+ default USB_CHIPIDEA
+
endif
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index 12df94f78f72..fae779a23866 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -8,11 +8,8 @@ ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o
# Glue/Bridge layers go here
-obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_usb2.o
-obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_msm.o
-obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc_zevio.o
-
-obj-$(CONFIG_USB_CHIPIDEA_PCI) += ci_hdrc_pci.o
-
-obj-$(CONFIG_USB_CHIPIDEA_OF) += usbmisc_imx.o ci_hdrc_imx.o
-obj-$(CONFIG_USB_CHIPIDEA_OF) += ci_hdrc_tegra.o
+obj-$(CONFIG_USB_CHIPIDEA_GENERIC) += ci_hdrc_usb2.o
+obj-$(CONFIG_USB_CHIPIDEA_MSM) += ci_hdrc_msm.o
+obj-$(CONFIG_USB_CHIPIDEA_PCI) += ci_hdrc_pci.o
+obj-$(CONFIG_USB_CHIPIDEA_IMX) += ci_hdrc_imx.o usbmisc_imx.o
+obj-$(CONFIG_USB_CHIPIDEA_TEGRA) += ci_hdrc_tegra.o
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 644ecaef17ee..0697eb980e5f 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -25,6 +25,7 @@
#define TD_PAGE_COUNT 5
#define CI_HDRC_PAGE_SIZE 4096ul /* page size for TD's */
#define ENDPT_MAX 32
+#define CI_MAX_BUF_SIZE (TD_PAGE_COUNT * CI_HDRC_PAGE_SIZE)
/******************************************************************************
* REGISTERS
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index a479af3ae31d..5ae16368a0c7 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -271,6 +271,7 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
struct device *dev = ci->dev->parent;
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
+ struct imx_usbmisc_data *mdata = data->usbmisc_data;
switch (event) {
case CI_HDRC_IMX_HSIC_ACTIVE_EVENT:
@@ -284,11 +285,19 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event)
}
break;
case CI_HDRC_IMX_HSIC_SUSPEND_EVENT:
- ret = imx_usbmisc_hsic_set_connect(data->usbmisc_data);
+ ret = imx_usbmisc_hsic_set_connect(mdata);
if (ret)
dev_err(dev,
"hsic_set_connect failed, err=%d\n", ret);
break;
+ case CI_HDRC_CONTROLLER_VBUS_EVENT:
+ if (ci->vbus_active)
+ ret = imx_usbmisc_charger_detection(mdata, true);
+ else
+ ret = imx_usbmisc_charger_detection(mdata, false);
+ if (ci->usb_phy)
+ schedule_work(&ci->usb_phy->chg_work);
+ break;
default:
break;
}
@@ -414,6 +423,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
}
pdata.usb_phy = data->phy;
+ if (data->usbmisc_data)
+ data->usbmisc_data->usb_phy = data->phy;
if ((of_device_is_compatible(np, "fsl,imx53-usb") ||
of_device_is_compatible(np, "fsl,imx51-usb")) && pdata.usb_phy &&
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index c2051aeba13f..727d02b6dbd3 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -24,6 +24,7 @@ struct imx_usbmisc_data {
unsigned int hsic:1; /* HSIC controlller */
unsigned int ext_id:1; /* ID from exteranl event */
unsigned int ext_vbus:1; /* Vbus from exteranl event */
+ struct usb_phy *usb_phy;
};
int imx_usbmisc_init(struct imx_usbmisc_data *data);
@@ -31,5 +32,6 @@ int imx_usbmisc_init_post(struct imx_usbmisc_data *data);
int imx_usbmisc_set_wakeup(struct imx_usbmisc_data *data, bool enabled);
int imx_usbmisc_hsic_set_connect(struct imx_usbmisc_data *data);
int imx_usbmisc_hsic_set_clk(struct imx_usbmisc_data *data, bool on);
+int imx_usbmisc_charger_detection(struct imx_usbmisc_data *data, bool connect);
#endif /* __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H */
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index c044fba463e4..89e1d82d739b 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -28,13 +28,19 @@ static const struct ci_hdrc_platform_data ci_default_pdata = {
.flags = CI_HDRC_DISABLE_STREAMING,
};
-static struct ci_hdrc_platform_data ci_zynq_pdata = {
+static const struct ci_hdrc_platform_data ci_zynq_pdata = {
.capoffset = DEF_CAPOFFSET,
};
+static const struct ci_hdrc_platform_data ci_zevio_pdata = {
+ .capoffset = DEF_CAPOFFSET,
+ .flags = CI_HDRC_REGS_SHARED | CI_HDRC_FORCE_FULLSPEED,
+};
+
static const struct of_device_id ci_hdrc_usb2_of_match[] = {
- { .compatible = "chipidea,usb2"},
- { .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata},
+ { .compatible = "chipidea,usb2" },
+ { .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata },
+ { .compatible = "lsi,zevio-usb", .data = &ci_zevio_pdata },
{ }
};
MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
@@ -64,13 +70,14 @@ static int ci_hdrc_usb2_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->clk = devm_clk_get(dev, NULL);
- if (!IS_ERR(priv->clk)) {
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(dev, "failed to enable the clock: %d\n", ret);
- return ret;
- }
+ priv->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable the clock: %d\n", ret);
+ return ret;
}
ci_pdata->name = dev_name(dev);
@@ -94,8 +101,7 @@ static int ci_hdrc_usb2_probe(struct platform_device *pdev)
return 0;
clk_err:
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_zevio.c b/drivers/usb/chipidea/ci_hdrc_zevio.c
deleted file mode 100644
index e1634da4a4b1..000000000000
--- a/drivers/usb/chipidea/ci_hdrc_zevio.c
+++ /dev/null
@@ -1,67 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
- *
- * Based off drivers/usb/chipidea/ci_hdrc_msm.c
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/chipidea.h>
-
-#include "ci.h"
-
-static struct ci_hdrc_platform_data ci_hdrc_zevio_platdata = {
- .name = "ci_hdrc_zevio",
- .flags = CI_HDRC_REGS_SHARED | CI_HDRC_FORCE_FULLSPEED,
- .capoffset = DEF_CAPOFFSET,
-};
-
-static int ci_hdrc_zevio_probe(struct platform_device *pdev)
-{
- struct platform_device *ci_pdev;
-
- dev_dbg(&pdev->dev, "ci_hdrc_zevio_probe\n");
-
- ci_pdev = ci_hdrc_add_device(&pdev->dev,
- pdev->resource, pdev->num_resources,
- &ci_hdrc_zevio_platdata);
-
- if (IS_ERR(ci_pdev)) {
- dev_err(&pdev->dev, "ci_hdrc_add_device failed!\n");
- return PTR_ERR(ci_pdev);
- }
-
- platform_set_drvdata(pdev, ci_pdev);
-
- return 0;
-}
-
-static int ci_hdrc_zevio_remove(struct platform_device *pdev)
-{
- struct platform_device *ci_pdev = platform_get_drvdata(pdev);
-
- ci_hdrc_remove_device(ci_pdev);
-
- return 0;
-}
-
-static const struct of_device_id ci_hdrc_zevio_dt_ids[] = {
- { .compatible = "lsi,zevio-usb", },
- { /* sentinel */ }
-};
-
-static struct platform_driver ci_hdrc_zevio_driver = {
- .probe = ci_hdrc_zevio_probe,
- .remove = ci_hdrc_zevio_remove,
- .driver = {
- .name = "zevio_usb",
- .of_match_table = ci_hdrc_zevio_dt_ids,
- },
-};
-
-MODULE_DEVICE_TABLE(of, ci_hdrc_zevio_dt_ids);
-module_platform_driver(ci_hdrc_zevio_driver);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index ae0bdc036464..9a7c53d09ab4 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -3,42 +3,16 @@
* core.c - ChipIdea USB IP core family device controller
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2020 NXP
*
* Author: David Lopo
- */
-
-/*
- * Description: ChipIdea USB IP core family device controller
- *
- * This driver is composed of several blocks:
- * - HW: hardware interface
- * - DBG: debug facilities (optional)
- * - UTIL: utilities
- * - ISR: interrupts handling
- * - ENDPT: endpoint operations (Gadget API)
- * - GADGET: gadget operations (Gadget API)
- * - BUS: bus glue code, bus abstraction layer
+ * Peter Chen <peter.chen@nxp.com>
*
- * Compile Options
- * - STALL_IN: non-empty bulk-in pipes cannot be halted
- * if defined mass storage compliance succeeds but with warnings
- * => case 4: Hi > Dn
- * => case 5: Hi > Di
- * => case 8: Hi <> Do
- * if undefined usbtest 13 fails
- * - TRACE: enable function tracing (depends on DEBUG)
- *
- * Main Features
- * - Chapter 9 & Mass Storage Compliance with Gadget File Storage
- * - Chapter 9 Compliance with Gadget Zero (STALL_IN undefined)
- * - Normal & LPM support
- *
- * USBTEST Report
- * - OK: 0-12, 13 (STALL_IN defined) & 14
- * - Not Supported: 15 & 16 (ISO)
- *
- * TODO List
- * - Suspend & Remote Wakeup
+ * Main Features:
+ * - Four transfers are supported, usbtest is passed
+ * - USB Certification for gadget: CH9 and Mass Storage are passed
+ * - Low power mode
+ * - USB wakeup
*/
#include <linux/delay.h>
#include <linux/device.h>
@@ -272,7 +246,7 @@ static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
ci->rev = ci_get_revision(ci);
dev_dbg(ci->dev,
- "ChipIdea HDRC found, revision: %d, lpm: %d; cap: %p op: %p\n",
+ "revision: %d, lpm: %d; cap: %px op: %px\n",
ci->rev, ci->hw_bank.lpm, ci->hw_bank.cap, ci->hw_bank.op);
/* setup lock mode ? */
@@ -666,6 +640,7 @@ static int ci_usb_role_switch_set(struct usb_role_switch *sw,
static struct usb_role_switch_desc ci_role_switch = {
.set = ci_usb_role_switch_set,
.get = ci_usb_role_switch_get,
+ .allow_userspace_control = true,
};
static int ci_get_platdata(struct device *dev,
@@ -1149,8 +1124,11 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (!ci_otg_is_fsm_mode(ci)) {
/* only update vbus status for peripheral */
- if (ci->role == CI_ROLE_GADGET)
+ if (ci->role == CI_ROLE_GADGET) {
+ /* Pull down DP for possible charger detection */
+ hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
ci_handle_vbus_change(ci);
+ }
ret = ci_role_start(ci, ci->role);
if (ret) {
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 921bcf14dc06..db0cfde0cc3c 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -338,7 +338,7 @@ static int hw_usb_reset(struct ci_hdrc *ci)
*****************************************************************************/
static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
- unsigned length)
+ unsigned int length, struct scatterlist *s)
{
int i;
u32 temp;
@@ -366,7 +366,13 @@ static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
}
- temp = (u32) (hwreq->req.dma + hwreq->req.actual);
+ if (s) {
+ temp = (u32) (sg_dma_address(s) + hwreq->req.actual);
+ node->td_remaining_size = CI_MAX_BUF_SIZE - length;
+ } else {
+ temp = (u32) (hwreq->req.dma + hwreq->req.actual);
+ }
+
if (length) {
node->ptr->page[0] = cpu_to_le32(temp);
for (i = 1; i < TD_PAGE_COUNT; i++) {
@@ -400,6 +406,122 @@ static inline u8 _usb_addr(struct ci_hw_ep *ep)
return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
}
+static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
+ struct ci_hw_req *hwreq)
+{
+ unsigned int rest = hwreq->req.length;
+ int pages = TD_PAGE_COUNT;
+ int ret = 0;
+
+ if (rest == 0) {
+ ret = add_td_to_list(hwep, hwreq, 0, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * The first buffer could be not page aligned.
+ * In that case we have to span into one extra td.
+ */
+ if (hwreq->req.dma % PAGE_SIZE)
+ pages--;
+
+ while (rest > 0) {
+ unsigned int count = min(hwreq->req.length - hwreq->req.actual,
+ (unsigned int)(pages * CI_HDRC_PAGE_SIZE));
+
+ ret = add_td_to_list(hwep, hwreq, count, NULL);
+ if (ret < 0)
+ return ret;
+
+ rest -= count;
+ }
+
+ if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
+ && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
+ ret = add_td_to_list(hwep, hwreq, 0, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
+ struct scatterlist *s)
+{
+ unsigned int rest = sg_dma_len(s);
+ int ret = 0;
+
+ hwreq->req.actual = 0;
+ while (rest > 0) {
+ unsigned int count = min_t(unsigned int, rest,
+ CI_MAX_BUF_SIZE);
+
+ ret = add_td_to_list(hwep, hwreq, count, s);
+ if (ret < 0)
+ return ret;
+
+ rest -= count;
+ }
+
+ return ret;
+}
+
+static void ci_add_buffer_entry(struct td_node *node, struct scatterlist *s)
+{
+ int empty_td_slot_index = (CI_MAX_BUF_SIZE - node->td_remaining_size)
+ / CI_HDRC_PAGE_SIZE;
+ int i;
+
+ node->ptr->token +=
+ cpu_to_le32(sg_dma_len(s) << __ffs(TD_TOTAL_BYTES));
+
+ for (i = empty_td_slot_index; i < TD_PAGE_COUNT; i++) {
+ u32 page = (u32) sg_dma_address(s) +
+ (i - empty_td_slot_index) * CI_HDRC_PAGE_SIZE;
+
+ page &= ~TD_RESERVED_MASK;
+ node->ptr->page[i] = cpu_to_le32(page);
+ }
+}
+
+static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
+{
+ struct usb_request *req = &hwreq->req;
+ struct scatterlist *s = req->sg;
+ int ret = 0, i = 0;
+ struct td_node *node = NULL;
+
+ if (!s || req->zero || req->length == 0) {
+ dev_err(hwep->ci->dev, "not supported operation for sg\n");
+ return -EINVAL;
+ }
+
+ while (i++ < req->num_mapped_sgs) {
+ if (sg_dma_address(s) % PAGE_SIZE) {
+ dev_err(hwep->ci->dev, "not page aligned sg buffer\n");
+ return -EINVAL;
+ }
+
+ if (node && (node->td_remaining_size >= sg_dma_len(s))) {
+ ci_add_buffer_entry(node, s);
+ node->td_remaining_size -= sg_dma_len(s);
+ } else {
+ ret = prepare_td_per_sg(hwep, hwreq, s);
+ if (ret)
+ return ret;
+
+ node = list_entry(hwreq->tds.prev,
+ struct td_node, td);
+ }
+
+ s = sg_next(s);
+ }
+
+ return ret;
+}
+
/**
* _hardware_enqueue: configures a request at hardware level
* @hwep: endpoint
@@ -411,8 +533,6 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
{
struct ci_hdrc *ci = hwep->ci;
int ret = 0;
- unsigned rest = hwreq->req.length;
- int pages = TD_PAGE_COUNT;
struct td_node *firstnode, *lastnode;
/* don't queue twice */
@@ -426,35 +546,13 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
if (ret)
return ret;
- /*
- * The first buffer could be not page aligned.
- * In that case we have to span into one extra td.
- */
- if (hwreq->req.dma % PAGE_SIZE)
- pages--;
-
- if (rest == 0) {
- ret = add_td_to_list(hwep, hwreq, 0);
- if (ret < 0)
- goto done;
- }
-
- while (rest > 0) {
- unsigned count = min(hwreq->req.length - hwreq->req.actual,
- (unsigned)(pages * CI_HDRC_PAGE_SIZE));
- ret = add_td_to_list(hwep, hwreq, count);
- if (ret < 0)
- goto done;
-
- rest -= count;
- }
+ if (hwreq->req.num_mapped_sgs)
+ ret = prepare_td_for_sg(hwep, hwreq);
+ else
+ ret = prepare_td_for_non_sg(hwep, hwreq);
- if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
- && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
- ret = add_td_to_list(hwep, hwreq, 0);
- if (ret < 0)
- goto done;
- }
+ if (ret)
+ return ret;
firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
@@ -1561,6 +1659,7 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&ci->lock, flags);
ci->vbus_active = is_active;
@@ -1570,10 +1669,14 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
usb_phy_set_charger_state(ci->usb_phy, is_active ?
USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
+ if (ci->platdata->notify_event)
+ ret = ci->platdata->notify_event(ci,
+ CI_HDRC_CONTROLLER_VBUS_EVENT);
+
if (ci->driver)
ci_hdrc_gadget_connect(_gadget, is_active);
- return 0;
+ return ret;
}
static int ci_udc_wakeup(struct usb_gadget *_gadget)
@@ -1936,6 +2039,7 @@ static int udc_start(struct ci_hdrc *ci)
ci->gadget.max_speed = USB_SPEED_HIGH;
ci->gadget.name = ci->platdata->name;
ci->gadget.otg_caps = otg_caps;
+ ci->gadget.sg_supported = 1;
if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
ci->gadget.quirk_avoids_skb_reserve = 1;
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index ebb11b625bb8..5193df1e18c7 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -61,16 +61,14 @@ struct td_node {
struct list_head td;
dma_addr_t dma;
struct ci_hw_td *ptr;
+ int td_remaining_size;
};
/**
* struct ci_hw_req - usb request representation
* @req: request structure for gadget drivers
* @queue: link to QH list
- * @ptr: transfer descriptor for this request
- * @dma: dma address for the transfer descriptor
- * @zptr: transfer descriptor for the zero packet
- * @zdma: dma address of the zero packet's transfer descriptor
+ * @tds: link to TD list
*/
struct ci_hw_req {
struct usb_request req;
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index e81e33c26e6c..f136876cb4a3 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/usb/otg.h>
#include "ci_hdrc_imx.h"
@@ -99,6 +100,33 @@
#define MX7D_USB_VBUS_WAKEUP_SOURCE_AVALID MX7D_USB_VBUS_WAKEUP_SOURCE(1)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID MX7D_USB_VBUS_WAKEUP_SOURCE(2)
#define MX7D_USB_VBUS_WAKEUP_SOURCE_SESS_END MX7D_USB_VBUS_WAKEUP_SOURCE(3)
+#define MX7D_USBNC_AUTO_RESUME BIT(2)
+/* The default DM/DP value is pull-down */
+#define MX7D_USBNC_USB_CTRL2_OPMODE(v) (v << 6)
+#define MX7D_USBNC_USB_CTRL2_OPMODE_NON_DRIVING MX7D_USBNC_USB_CTRL2_OPMODE(1)
+#define MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK (BIT(7) | BIT(6))
+#define MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN BIT(8)
+#define MX7D_USBNC_USB_CTRL2_DP_OVERRIDE_VAL BIT(12)
+#define MX7D_USBNC_USB_CTRL2_DP_OVERRIDE_EN BIT(13)
+#define MX7D_USBNC_USB_CTRL2_DM_OVERRIDE_VAL BIT(14)
+#define MX7D_USBNC_USB_CTRL2_DM_OVERRIDE_EN BIT(15)
+#define MX7D_USBNC_USB_CTRL2_DP_DM_MASK (BIT(12) | BIT(13) | \
+ BIT(14) | BIT(15))
+
+#define MX7D_USB_OTG_PHY_CFG1 0x30
+#define MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL BIT(0)
+#define MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0 BIT(1)
+#define MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 BIT(2)
+#define MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB BIT(3)
+#define MX7D_USB_OTG_PHY_CFG2_DRVVBUS0 BIT(16)
+
+#define MX7D_USB_OTG_PHY_CFG2 0x34
+
+#define MX7D_USB_OTG_PHY_STATUS 0x3c
+#define MX7D_USB_OTG_PHY_STATUS_LINE_STATE0 BIT(0)
+#define MX7D_USB_OTG_PHY_STATUS_LINE_STATE1 BIT(1)
+#define MX7D_USB_OTG_PHY_STATUS_VBUS_VLD BIT(3)
+#define MX7D_USB_OTG_PHY_STATUS_CHRGDET BIT(29)
#define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
MX6_BM_ID_WAKEUP)
@@ -114,6 +142,8 @@ struct usbmisc_ops {
int (*hsic_set_connect)(struct imx_usbmisc_data *data);
/* It's called during suspend/resume */
int (*hsic_set_clk)(struct imx_usbmisc_data *data, bool enabled);
+ /* usb charger detection */
+ int (*charger_detection)(struct imx_usbmisc_data *data);
};
struct imx_usbmisc {
@@ -609,10 +639,263 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data)
reg |= MX6_BM_PWR_POLARITY;
writel(reg, usbmisc->base);
- reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
- reg &= ~MX7D_USB_VBUS_WAKEUP_SOURCE_MASK;
- writel(reg | MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID,
- usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ /* SoC non-burst setting */
+ reg = readl(usbmisc->base);
+ writel(reg | MX6_BM_NON_BURST_SETTING, usbmisc->base);
+
+ if (!data->hsic) {
+ reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ reg &= ~MX7D_USB_VBUS_WAKEUP_SOURCE_MASK;
+ writel(reg | MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID
+ | MX7D_USBNC_AUTO_RESUME,
+ usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ }
+
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ usbmisc_imx7d_set_wakeup(data, false);
+
+ return 0;
+}
+
+static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ struct usb_phy *usb_phy = data->usb_phy;
+ int val;
+ unsigned long flags;
+
+ /* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ writel(val | MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0 |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL,
+ usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ usleep_range(1000, 2000);
+
+ /*
+ * Per BC 1.2, check voltage of D+:
+ * DCP: if greater than VDAT_REF;
+ * CDP: if less than VDAT_REF.
+ */
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
+ if (val & MX7D_USB_OTG_PHY_STATUS_CHRGDET) {
+ dev_dbg(data->dev, "It is a dedicate charging port\n");
+ usb_phy->chg_type = DCP_TYPE;
+ } else {
+ dev_dbg(data->dev, "It is a charging downstream port\n");
+ usb_phy->chg_type = CDP_TYPE;
+ }
+
+ return 0;
+}
+
+static void imx7_disable_charger_detector(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ val &= ~(MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0 |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL);
+ writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+
+ /* Set OPMODE to be 2'b00 and disable its override */
+ val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK;
+ writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2);
+
+ val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ writel(val & ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN,
+ usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+}
+
+static int imx7d_charger_data_contact_detect(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ unsigned long flags;
+ u32 val;
+ int i, data_pin_contact_count = 0;
+
+ /* Enable Data Contact Detect (DCD) per the USB BC 1.2 */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ writel(val | MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB,
+ usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ for (i = 0; i < 100; i = i + 1) {
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
+ if (!(val & MX7D_USB_OTG_PHY_STATUS_LINE_STATE0)) {
+ if (data_pin_contact_count++ > 5)
+ /* Data pin makes contact */
+ break;
+ usleep_range(5000, 10000);
+ } else {
+ data_pin_contact_count = 0;
+ usleep_range(5000, 6000);
+ }
+ }
+
+ /* Disable DCD after finished data contact check */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ writel(val & ~MX7D_USB_OTG_PHY_CFG2_CHRG_DCDENB,
+ usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ if (i == 100) {
+ dev_err(data->dev,
+ "VBUS is coming from a dedicated power supply.\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ struct usb_phy *usb_phy = data->usb_phy;
+ unsigned long flags;
+ u32 val;
+
+ /* VDP_SRC is connected to D+ and IDM_SINK is connected to D- */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_CHRGSEL;
+ writel(val | MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0 |
+ MX7D_USB_OTG_PHY_CFG2_CHRG_VDATDETENB0,
+ usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ usleep_range(1000, 2000);
+
+ /* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
+ if (!(val & MX7D_USB_OTG_PHY_STATUS_CHRGDET)) {
+ dev_dbg(data->dev, "It is a standard downstream port\n");
+ usb_phy->chg_type = SDP_TYPE;
+ }
+
+ return 0;
+}
+
+/**
+ * Whole charger detection process:
+ * 1. OPMODE override to be non-driving
+ * 2. Data contact check
+ * 3. Primary detection
+ * 4. Secondary detection
+ * 5. Disable charger detection
+ */
+static int imx7d_charger_detection(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ struct usb_phy *usb_phy = data->usb_phy;
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ /* Check if vbus is valid */
+ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
+ if (!(val & MX7D_USB_OTG_PHY_STATUS_VBUS_VLD)) {
+ dev_err(data->dev, "vbus is error\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Keep OPMODE to be non-driving mode during the whole
+ * charger detection process.
+ */
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ val &= ~MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_MASK;
+ val |= MX7D_USBNC_USB_CTRL2_OPMODE_NON_DRIVING;
+ writel(val, usbmisc->base + MX7D_USBNC_USB_CTRL2);
+
+ val = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ writel(val | MX7D_USBNC_USB_CTRL2_OPMODE_OVERRIDE_EN,
+ usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ spin_unlock_irqrestore(&usbmisc->lock, flags);
+
+ ret = imx7d_charger_data_contact_detect(data);
+ if (ret)
+ return ret;
+
+ ret = imx7d_charger_primary_detection(data);
+ if (!ret && usb_phy->chg_type != SDP_TYPE)
+ ret = imx7d_charger_secondary_detection(data);
+
+ imx7_disable_charger_detector(data);
+
+ return ret;
+}
+
+static int usbmisc_imx7ulp_init(struct imx_usbmisc_data *data)
+{
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+ unsigned long flags;
+ u32 reg;
+
+ if (data->index >= 1)
+ return -EINVAL;
+
+ spin_lock_irqsave(&usbmisc->lock, flags);
+ reg = readl(usbmisc->base);
+ if (data->disable_oc) {
+ reg |= MX6_BM_OVER_CUR_DIS;
+ } else {
+ reg &= ~MX6_BM_OVER_CUR_DIS;
+
+ /*
+ * If the polarity is not configured keep it as setup by the
+ * bootloader.
+ */
+ if (data->oc_pol_configured && data->oc_pol_active_low)
+ reg |= MX6_BM_OVER_CUR_POLARITY;
+ else if (data->oc_pol_configured)
+ reg &= ~MX6_BM_OVER_CUR_POLARITY;
+ }
+ /* If the polarity is not set keep it as setup by the bootlader */
+ if (data->pwr_pol == 1)
+ reg |= MX6_BM_PWR_POLARITY;
+
+ writel(reg, usbmisc->base);
+
+ /* SoC non-burst setting */
+ reg = readl(usbmisc->base);
+ writel(reg | MX6_BM_NON_BURST_SETTING, usbmisc->base);
+
+ if (data->hsic) {
+ reg = readl(usbmisc->base);
+ writel(reg | MX6_BM_UTMI_ON_CLOCK, usbmisc->base);
+
+ reg = readl(usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET);
+ reg |= MX6_BM_HSIC_EN | MX6_BM_HSIC_CLK_ON;
+ writel(reg, usbmisc->base + MX6_USB_HSIC_CTRL_OFFSET);
+
+ /*
+ * For non-HSIC controller, the autoresume is enabled
+ * at MXS PHY driver (usbphy_ctrl bit18).
+ */
+ reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ writel(reg | MX7D_USBNC_AUTO_RESUME,
+ usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ } else {
+ reg = readl(usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ reg &= ~MX7D_USB_VBUS_WAKEUP_SOURCE_MASK;
+ writel(reg | MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID,
+ usbmisc->base + MX7D_USBNC_USB_CTRL2);
+ }
spin_unlock_irqrestore(&usbmisc->lock, flags);
@@ -659,6 +942,14 @@ static const struct usbmisc_ops imx6sx_usbmisc_ops = {
static const struct usbmisc_ops imx7d_usbmisc_ops = {
.init = usbmisc_imx7d_init,
.set_wakeup = usbmisc_imx7d_set_wakeup,
+ .charger_detection = imx7d_charger_detection,
+};
+
+static const struct usbmisc_ops imx7ulp_usbmisc_ops = {
+ .init = usbmisc_imx7ulp_init,
+ .set_wakeup = usbmisc_imx7d_set_wakeup,
+ .hsic_set_connect = usbmisc_imx6_hsic_set_connect,
+ .hsic_set_clk = usbmisc_imx6_hsic_set_clk,
};
static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data)
@@ -737,6 +1028,39 @@ int imx_usbmisc_hsic_set_clk(struct imx_usbmisc_data *data, bool on)
return usbmisc->ops->hsic_set_clk(data, on);
}
EXPORT_SYMBOL_GPL(imx_usbmisc_hsic_set_clk);
+
+int imx_usbmisc_charger_detection(struct imx_usbmisc_data *data, bool connect)
+{
+ struct imx_usbmisc *usbmisc;
+ struct usb_phy *usb_phy;
+ int ret = 0;
+
+ if (!data)
+ return -EINVAL;
+
+ usbmisc = dev_get_drvdata(data->dev);
+ usb_phy = data->usb_phy;
+ if (!usbmisc->ops->charger_detection)
+ return -ENOTSUPP;
+
+ if (connect) {
+ ret = usbmisc->ops->charger_detection(data);
+ if (ret) {
+ dev_err(data->dev,
+ "Error occurs during detection: %d\n",
+ ret);
+ usb_phy->chg_state = USB_CHARGER_ABSENT;
+ } else {
+ usb_phy->chg_state = USB_CHARGER_PRESENT;
+ }
+ } else {
+ usb_phy->chg_state = USB_CHARGER_ABSENT;
+ usb_phy->chg_type = UNKNOWN_TYPE;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_usbmisc_charger_detection);
+
static const struct of_device_id usbmisc_imx_dt_ids[] = {
{
.compatible = "fsl,imx25-usbmisc",
@@ -780,7 +1104,7 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
},
{
.compatible = "fsl,imx7ulp-usbmisc",
- .data = &imx7d_usbmisc_ops,
+ .data = &imx7ulp_usbmisc_ops,
},
{ /* sentinel */ }
};
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ded8d93834ca..f67088bb8218 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -584,7 +584,7 @@ static void acm_softint(struct work_struct *work)
}
if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
- for (i = 0; i < ACM_NR; i++)
+ for (i = 0; i < acm->rx_buflimit; i++)
if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
acm_submit_read_urb(acm, i, GFP_NOIO);
}
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 0d8e3f3804a3..084c48c5848f 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -468,7 +468,8 @@ static int usblp_release(struct inode *inode, struct file *file)
usb_autopm_put_interface(usblp->intf);
if (!usblp->present) /* finish cleanup from disconnect */
- usblp_cleanup(usblp);
+ usblp_cleanup(usblp); /* any URBs must be dead */
+
mutex_unlock(&usblp_mutex);
return 0;
}
@@ -1375,9 +1376,11 @@ static void usblp_disconnect(struct usb_interface *intf)
usblp_unlink_urbs(usblp);
mutex_unlock(&usblp->mut);
+ usb_poison_anchored_urbs(&usblp->urbs);
if (!usblp->used)
usblp_cleanup(usblp);
+
mutex_unlock(&usblp_mutex);
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index f0a259937da8..1547aa6e5314 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -159,6 +159,7 @@ static void ehci_wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd,
* usb_hcd_pci_probe - initialize PCI-based HCDs
* @dev: USB Host Controller being probed
* @id: pci hotplug id connecting controller to HCD framework
+ * @driver: USB HC driver handle
* Context: !in_interrupt()
*
* Allocates basic PCI resources for this USB host controller, and
@@ -169,9 +170,9 @@ static void ehci_wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd,
*
* Return: 0 if successful.
*/
-int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id,
+ const struct hc_driver *driver)
{
- struct hc_driver *driver;
struct usb_hcd *hcd;
int retval;
int hcd_irq = 0;
@@ -181,7 +182,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!id)
return -EINVAL;
- driver = (struct hc_driver *)id->driver_data;
+
if (!driver)
return -EINVAL;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index aa45840d8273..de624c47e190 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/genalloc.h>
#include <linux/io.h>
+#include <linux/kcov.h>
#include <linux/phy/phy.h>
#include <linux/usb.h>
@@ -1645,7 +1646,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
+ kcov_remote_start_usb((u64)urb->dev->bus->busnum);
urb->complete(urb);
+ kcov_remote_stop();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fc748c731832..b1e14beaac5f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -93,7 +93,7 @@ module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(old_scheme_first,
"start with the old device initialization scheme");
-static bool use_both_schemes = 1;
+static bool use_both_schemes = true;
module_param(use_both_schemes, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_both_schemes,
"try the other device initialization scheme if the "
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index a97dd1ba964e..73f4482d833a 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* usb hub driver head file
*
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index 2ae90158ded7..fdd4897401e2 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* drivers/usb/core/otg_whitelist.h
*
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 9f4320b9d7fc..a2ca38e25e0c 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -1262,8 +1262,10 @@ void usb_create_sysfs_intf_files(struct usb_interface *intf)
if (!alt->string && !(udev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
alt->string = usb_cache_string(udev, alt->desc.iInterface);
- if (alt->string && device_create_file(&intf->dev, &dev_attr_interface))
- ; /* We don't actually care if the function fails. */
+ if (alt->string && device_create_file(&intf->dev, &dev_attr_interface)) {
+ /* This is not a serious error */
+ dev_dbg(&intf->dev, "interface string descriptor file not created\n");
+ }
intf->sysfs_files_created = 1;
}
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 64ed4023a8c8..19e4c550bc73 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Released under the GPLv2 only.
*/
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 78a4925aa118..fec17a2d2447 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -524,10 +524,25 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
greset |= GRSTCTL_CSFTRST;
dwc2_writel(hsotg, greset, GRSTCTL);
- if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL, GRSTCTL_CSFTRST, 10000)) {
- dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL GRSTCTL_CSFTRST\n",
- __func__);
- return -EBUSY;
+ if ((hsotg->hw_params.snpsid & DWC2_CORE_REV_MASK) <
+ (DWC2_CORE_REV_4_20a & DWC2_CORE_REV_MASK)) {
+ if (dwc2_hsotg_wait_bit_clear(hsotg, GRSTCTL,
+ GRSTCTL_CSFTRST, 10000)) {
+ dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL_CSFTRST\n",
+ __func__);
+ return -EBUSY;
+ }
+ } else {
+ if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL,
+ GRSTCTL_CSFTRST_DONE, 10000)) {
+ dev_warn(hsotg->dev, "%s: HANG! Soft Reset timeout GRSTCTL_CSFTRST_DONE\n",
+ __func__);
+ return -EBUSY;
+ }
+ greset = dwc2_readl(hsotg, GRSTCTL);
+ greset &= ~GRSTCTL_CSFTRST;
+ greset |= GRSTCTL_CSFTRST_DONE;
+ dwc2_writel(hsotg, greset, GRSTCTL);
}
/* Wait for AHB master IDLE state */
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 99b0bdfe0012..132d687f1590 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* core.h - DesignWare HS OTG Controller common declarations
*
@@ -1103,8 +1103,10 @@ struct dwc2_hsotg {
#define DWC2_CORE_REV_3_00a 0x4f54300a
#define DWC2_CORE_REV_3_10a 0x4f54310a
#define DWC2_CORE_REV_4_00a 0x4f54400a
+#define DWC2_CORE_REV_4_20a 0x4f54420a
#define DWC2_FS_IOT_REV_1_00a 0x5531100a
#define DWC2_HS_IOT_REV_1_00a 0x5532100a
+#define DWC2_CORE_REV_MASK 0x0000ffff
/* DWC OTG HW Core ID */
#define DWC2_OTG_ID 0x4f540000
@@ -1309,6 +1311,8 @@ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg);
bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg);
+int dwc2_check_core_version(struct dwc2_hsotg *hsotg);
+
/*
* Common core Functions.
* The following functions support managing the DWC_otg controller in either
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 876ff31261d5..55f1d14fc414 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -416,10 +416,13 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
if (ret && (ret != -ENOTSUPP))
dev_err(hsotg->dev, "exit power_down failed\n");
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
call_gadget(hsotg, resume);
+ } else {
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
}
- /* Change to L0 state */
- hsotg->lx_state = DWC2_L0;
} else {
if (hsotg->params.power_down)
return;
diff --git a/drivers/usb/dwc2/debug.h b/drivers/usb/dwc2/debug.h
index a8c565b6bc34..47252c56d410 100644
--- a/drivers/usb/dwc2/debug.h
+++ b/drivers/usb/dwc2/debug.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* debug.h - Designware USB2 DRD controller debug header
*
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 1224fa9df604..ea02ee63ac6d 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* hcd.h - DesignWare HS OTG Controller host-mode declarations
*
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index c4027bbcedec..c3d6dde2aca4 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* hw.h - DesignWare HS OTG Controller hardware definitions
*
@@ -126,6 +126,7 @@
#define GRSTCTL HSOTG_REG(0x010)
#define GRSTCTL_AHBIDLE BIT(31)
#define GRSTCTL_DMAREQ BIT(30)
+#define GRSTCTL_CSFTRST_DONE BIT(29)
#define GRSTCTL_TXFNUM_MASK (0x1f << 6)
#define GRSTCTL_TXFNUM_SHIFT 6
#define GRSTCTL_TXFNUM_LIMIT 0x1f
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 8ccc83f7eb3f..ce736d67c7c3 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -782,25 +782,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
u32 grxfsiz;
- /*
- * Attempt to ensure this device is really a DWC_otg Controller.
- * Read and verify the GSNPSID register contents. The value should be
- * 0x45f4xxxx, 0x5531xxxx or 0x5532xxxx
- */
-
- hw->snpsid = dwc2_readl(hsotg, GSNPSID);
- if ((hw->snpsid & GSNPSID_ID_MASK) != DWC2_OTG_ID &&
- (hw->snpsid & GSNPSID_ID_MASK) != DWC2_FS_IOT_ID &&
- (hw->snpsid & GSNPSID_ID_MASK) != DWC2_HS_IOT_ID) {
- dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
- hw->snpsid);
- return -ENODEV;
- }
-
- dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
- hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
- hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
-
hwcfg1 = dwc2_readl(hsotg, GHWCFG1);
hwcfg2 = dwc2_readl(hsotg, GHWCFG2);
hwcfg3 = dwc2_readl(hsotg, GHWCFG3);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 69972750e161..e571c8ae65ec 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -363,6 +363,37 @@ static bool dwc2_check_core_endianness(struct dwc2_hsotg *hsotg)
}
/**
+ * Check core version
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+int dwc2_check_core_version(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+
+ /*
+ * Attempt to ensure this device is really a DWC_otg Controller.
+ * Read and verify the GSNPSID register contents. The value should be
+ * 0x45f4xxxx, 0x5531xxxx or 0x5532xxxx
+ */
+
+ hw->snpsid = dwc2_readl(hsotg, GSNPSID);
+ if ((hw->snpsid & GSNPSID_ID_MASK) != DWC2_OTG_ID &&
+ (hw->snpsid & GSNPSID_ID_MASK) != DWC2_FS_IOT_ID &&
+ (hw->snpsid & GSNPSID_ID_MASK) != DWC2_HS_IOT_ID) {
+ dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
+ hw->snpsid);
+ return -ENODEV;
+ }
+
+ dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
+ hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
+ hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
+ return 0;
+}
+
+/**
* dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
* driver
*
@@ -445,6 +476,14 @@ static int dwc2_driver_probe(struct platform_device *dev)
"snps,need-phy-for-wake");
/*
+ * Before performing any core related operations
+ * check core version.
+ */
+ retval = dwc2_check_core_version(hsotg);
+ if (retval)
+ goto error;
+
+ /*
* Reset before dwc2_get_hwparams() then it could get power-on real
* reset value form registers.
*/
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index edc17155cb2b..25c686a752b0 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -85,7 +85,9 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
* specified or set to OTG, then set the mode to peripheral.
*/
if (mode == USB_DR_MODE_OTG &&
- dwc->revision >= DWC3_REVISION_330A)
+ (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) ||
+ !device_property_read_bool(dwc->dev, "usb-role-switch")) &&
+ !DWC3_VER_IS_PRIOR(DWC3, 330A))
mode = USB_DR_MODE_PERIPHERAL;
}
@@ -121,17 +123,19 @@ static void __dwc3_set_mode(struct work_struct *work)
if (dwc->dr_mode != USB_DR_MODE_OTG)
return;
+ pm_runtime_get_sync(dwc->dev);
+
if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
dwc3_otg_update(dwc, 0);
if (!dwc->desired_dr_role)
- return;
+ goto out;
if (dwc->desired_dr_role == dwc->current_dr_role)
- return;
+ goto out;
if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG && dwc->edev)
- return;
+ goto out;
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_HOST:
@@ -190,6 +194,9 @@ static void __dwc3_set_mode(struct work_struct *work)
break;
}
+out:
+ pm_runtime_mark_last_busy(dwc->dev);
+ pm_runtime_put_autosuspend(dwc->dev);
}
void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
@@ -257,7 +264,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
* take a little more than 50ms. Set the polling rate at 20ms
* for 10 times instead.
*/
- if (dwc3_is_usb31(dwc) && dwc->revision >= DWC3_USB31_REVISION_190A)
+ if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
retries = 10;
do {
@@ -265,8 +272,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
if (!(reg & DWC3_DCTL_CSFTRST))
goto done;
- if (dwc3_is_usb31(dwc) &&
- dwc->revision >= DWC3_USB31_REVISION_190A)
+ if (DWC3_VER_IS_WITHIN(DWC31, 190A, ANY) || DWC3_IP_IS(DWC32))
msleep(20);
else
udelay(1);
@@ -283,7 +289,7 @@ done:
* is cleared, we must wait at least 50ms before accessing the PHY
* domain (synchronization delay).
*/
- if (dwc3_is_usb31(dwc) && dwc->revision <= DWC3_USB31_REVISION_180A)
+ if (DWC3_VER_IS_WITHIN(DWC31, ANY, 180A))
msleep(50);
return 0;
@@ -298,7 +304,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
u32 reg;
u32 dft;
- if (dwc->revision < DWC3_REVISION_250A)
+ if (DWC3_VER_IS_PRIOR(DWC3, 250A))
return;
if (dwc->fladj == 0)
@@ -579,7 +585,7 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
* will be '0' when the core is reset. Application needs to set it
* to '1' after the core initialization is completed.
*/
- if (dwc->revision > DWC3_REVISION_194A)
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
reg |= DWC3_GUSB3PIPECTL_SUSPHY;
/*
@@ -670,7 +676,7 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
* be '0' when the core is reset. Application needs to set it to
* '1' after the core initialization is completed.
*/
- if (dwc->revision > DWC3_REVISION_194A)
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
reg |= DWC3_GUSB2PHYCFG_SUSPHY;
/*
@@ -719,15 +725,13 @@ static bool dwc3_core_is_valid(struct dwc3 *dwc)
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
+ dwc->ip = DWC3_GSNPS_ID(reg);
/* This should read as U3 followed by revision number */
- if ((reg & DWC3_GSNPSID_MASK) == 0x55330000) {
- /* Detected DWC_usb3 IP */
+ if (DWC3_IP_IS(DWC3)) {
dwc->revision = reg;
- } else if ((reg & DWC3_GSNPSID_MASK) == 0x33310000) {
- /* Detected DWC_usb31 IP */
+ } else if (DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) {
dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
- dwc->revision |= DWC3_REVISION_IS_DWC31;
dwc->version_type = dwc3_readl(dwc->regs, DWC3_VER_TYPE);
} else {
return false;
@@ -760,8 +764,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
*/
if ((dwc->dr_mode == USB_DR_MODE_HOST ||
dwc->dr_mode == USB_DR_MODE_OTG) &&
- (dwc->revision >= DWC3_REVISION_210A &&
- dwc->revision <= DWC3_REVISION_250A))
+ DWC3_VER_IS_WITHIN(DWC3, 210A, 250A))
reg |= DWC3_GCTL_DSBLCLKGTNG | DWC3_GCTL_SOFITPSYNC;
else
reg &= ~DWC3_GCTL_DSBLCLKGTNG;
@@ -804,7 +807,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
* and falls back to high-speed mode which causes
* the device to enter a Connect/Disconnect loop
*/
- if (dwc->revision < DWC3_REVISION_190A)
+ if (DWC3_VER_IS_PRIOR(DWC3, 190A))
reg |= DWC3_GCTL_U2RSTECN;
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
@@ -957,7 +960,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
goto err0a;
if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
- dwc->revision > DWC3_REVISION_194A) {
+ !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
if (!dwc->dis_u3_susphy_quirk) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
reg |= DWC3_GUSB3PIPECTL_SUSPHY;
@@ -1004,20 +1007,20 @@ static int dwc3_core_init(struct dwc3 *dwc)
* the DWC_usb3 controller. It is NOT available in the
* DWC_usb31 controller.
*/
- if (!dwc3_is_usb31(dwc) && dwc->revision >= DWC3_REVISION_310A) {
+ if (DWC3_VER_IS_WITHIN(DWC3, 310A, ANY)) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL2);
reg |= DWC3_GUCTL2_RST_ACTBITLATER;
dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
}
- if (dwc->revision >= DWC3_REVISION_250A) {
+ if (!DWC3_VER_IS_PRIOR(DWC3, 250A)) {
reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
/*
* Enable hardware control of sending remote wakeup
* in HS when the device is in the L1 state.
*/
- if (dwc->revision >= DWC3_REVISION_290A)
+ if (!DWC3_VER_IS_PRIOR(DWC3, 290A))
reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
if (dwc->dis_tx_ipgap_linecheck_quirk)
@@ -1049,7 +1052,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
* Must config both number of packets and max burst settings to enable
* RX and/or TX threshold.
*/
- if (dwc3_is_usb31(dwc) && dwc->dr_mode == USB_DR_MODE_HOST) {
+ if (!DWC3_IP_IS(DWC3) && dwc->dr_mode == USB_DR_MODE_HOST) {
u8 rx_thr_num = dwc->rx_thr_num_pkt_prd;
u8 rx_maxburst = dwc->rx_max_burst_prd;
u8 tx_thr_num = dwc->tx_thr_num_pkt_prd;
@@ -1371,10 +1374,9 @@ static void dwc3_get_properties(struct dwc3 *dwc)
/* check whether the core supports IMOD */
bool dwc3_has_imod(struct dwc3 *dwc)
{
- return ((dwc3_is_usb3(dwc) &&
- dwc->revision >= DWC3_REVISION_300A) ||
- (dwc3_is_usb31(dwc) &&
- dwc->revision >= DWC3_USB31_REVISION_120A));
+ return DWC3_VER_IS_WITHIN(DWC3, 300A, ANY) ||
+ DWC3_VER_IS_WITHIN(DWC31, 120A, ANY) ||
+ DWC3_IP_IS(DWC32);
}
static void dwc3_check_params(struct dwc3 *dwc)
@@ -1395,7 +1397,7 @@ static void dwc3_check_params(struct dwc3 *dwc)
* affected version.
*/
if (!dwc->imod_interval &&
- (dwc->revision == DWC3_REVISION_300A))
+ DWC3_VER_IS(DWC3, 300A))
dwc->imod_interval = 1;
/* Check the maximum_speed parameter */
@@ -1417,7 +1419,7 @@ static void dwc3_check_params(struct dwc3 *dwc)
/*
* default to superspeed plus if we are capable.
*/
- if (dwc3_is_usb31(dwc) &&
+ if ((DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) &&
(DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 4c171a8e215f..013f42a2b5dc 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* core.h - DesignWare USB3 DRD Core Header
*
@@ -69,6 +69,7 @@
#define DWC3_GEVNTCOUNT_EHB BIT(31)
#define DWC3_GSNPSID_MASK 0xffff0000
#define DWC3_GSNPSREV_MASK 0xffff
+#define DWC3_GSNPS_ID(p) (((p) & DWC3_GSNPSID_MASK) >> 16)
/* DWC3 registers memory space boundries */
#define DWC3_XHCI_REGS_START 0x0
@@ -365,6 +366,9 @@
#define DWC3_GHWPARAMS6_SRPSUPPORT BIT(10)
#define DWC3_GHWPARAMS6_EN_FPGA BIT(7)
+/* DWC_usb32 only */
+#define DWC3_GHWPARAMS6_MDWIDTH(n) ((n) & (0x3 << 8))
+
/* Global HWPARAMS7 Register */
#define DWC3_GHWPARAMS7_RAM1_DEPTH(n) ((n) & 0xffff)
#define DWC3_GHWPARAMS7_RAM2_DEPTH(n) (((n) >> 16) & 0xffff)
@@ -491,6 +495,7 @@
#define DWC3_DGCMD_SELECTED_FIFO_FLUSH 0x09
#define DWC3_DGCMD_ALL_FIFO_FLUSH 0x0a
#define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c
+#define DWC3_DGCMD_SET_ENDPOINT_PRIME 0x0d
#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F)
@@ -697,6 +702,10 @@ struct dwc3_ep {
#define DWC3_EP_END_TRANSFER_PENDING BIT(4)
#define DWC3_EP_PENDING_REQUEST BIT(5)
#define DWC3_EP_DELAY_START BIT(6)
+#define DWC3_EP_WAIT_TRANSFER_COMPLETE BIT(7)
+#define DWC3_EP_IGNORE_NEXT_NOSTREAM BIT(8)
+#define DWC3_EP_FORCE_RESTART_STREAM BIT(9)
+#define DWC3_EP_FIRST_STREAM_PRIMED BIT(10)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN BIT(31)
@@ -949,7 +958,8 @@ struct dwc3_scratchpad_array {
* @nr_scratch: number of scratch buffers
* @u1u2: only used on revisions <1.83a for workaround
* @maximum_speed: maximum speed requested (mainly for testing purposes)
- * @revision: revision register contents
+ * @ip: controller's ID
+ * @revision: controller's version of an IP
* @version_type: VERSIONTYPE register contents, a sub release of a revision
* @dr_mode: requested mode of operation
* @current_dr_role: current role of operation when in dual-role mode
@@ -1110,15 +1120,15 @@ struct dwc3 {
u32 u1u2;
u32 maximum_speed;
- /*
- * All 3.1 IP version constants are greater than the 3.0 IP
- * version constants. This works for most version checks in
- * dwc3. However, in the future, this may not apply as
- * features may be developed on newer versions of the 3.0 IP
- * that are not in the 3.1 IP.
- */
+ u32 ip;
+
+#define DWC3_IP 0x5533
+#define DWC31_IP 0x3331
+#define DWC32_IP 0x3332
+
u32 revision;
+#define DWC3_REVISION_ANY 0x0
#define DWC3_REVISION_173A 0x5533173a
#define DWC3_REVISION_175A 0x5533175a
#define DWC3_REVISION_180A 0x5533180a
@@ -1143,20 +1153,20 @@ struct dwc3 {
#define DWC3_REVISION_310A 0x5533310a
#define DWC3_REVISION_330A 0x5533330a
-/*
- * NOTICE: we're using bit 31 as a "is usb 3.1" flag. This is really
- * just so dwc31 revisions are always larger than dwc3.
- */
-#define DWC3_REVISION_IS_DWC31 0x80000000
-#define DWC3_USB31_REVISION_110A (0x3131302a | DWC3_REVISION_IS_DWC31)
-#define DWC3_USB31_REVISION_120A (0x3132302a | DWC3_REVISION_IS_DWC31)
-#define DWC3_USB31_REVISION_160A (0x3136302a | DWC3_REVISION_IS_DWC31)
-#define DWC3_USB31_REVISION_170A (0x3137302a | DWC3_REVISION_IS_DWC31)
-#define DWC3_USB31_REVISION_180A (0x3138302a | DWC3_REVISION_IS_DWC31)
-#define DWC3_USB31_REVISION_190A (0x3139302a | DWC3_REVISION_IS_DWC31)
+#define DWC31_REVISION_ANY 0x0
+#define DWC31_REVISION_110A 0x3131302a
+#define DWC31_REVISION_120A 0x3132302a
+#define DWC31_REVISION_160A 0x3136302a
+#define DWC31_REVISION_170A 0x3137302a
+#define DWC31_REVISION_180A 0x3138302a
+#define DWC31_REVISION_190A 0x3139302a
+
+#define DWC32_REVISION_ANY 0x0
+#define DWC32_REVISION_100A 0x3130302a
u32 version_type;
+#define DWC31_VERSIONTYPE_ANY 0x0
#define DWC31_VERSIONTYPE_EA01 0x65613031
#define DWC31_VERSIONTYPE_EA02 0x65613032
#define DWC31_VERSIONTYPE_EA03 0x65613033
@@ -1298,6 +1308,10 @@ struct dwc3_event_depevt {
#define DEPEVT_STREAMEVT_FOUND 1
#define DEPEVT_STREAMEVT_NOTFOUND 2
+/* Stream event parameter */
+#define DEPEVT_STREAM_PRIME 0xfffe
+#define DEPEVT_STREAM_NOSTREAM 0x0
+
/* Control-only Status */
#define DEPEVT_STATUS_CONTROL_DATA 1
#define DEPEVT_STATUS_CONTROL_STATUS 2
@@ -1400,17 +1414,26 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode);
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
-/* check whether we are on the DWC_usb3 core */
-static inline bool dwc3_is_usb3(struct dwc3 *dwc)
-{
- return !(dwc->revision & DWC3_REVISION_IS_DWC31);
-}
+#define DWC3_IP_IS(_ip) \
+ (dwc->ip == _ip##_IP)
-/* check whether we are on the DWC_usb31 core */
-static inline bool dwc3_is_usb31(struct dwc3 *dwc)
-{
- return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
-}
+#define DWC3_VER_IS(_ip, _ver) \
+ (DWC3_IP_IS(_ip) && dwc->revision == _ip##_REVISION_##_ver)
+
+#define DWC3_VER_IS_PRIOR(_ip, _ver) \
+ (DWC3_IP_IS(_ip) && dwc->revision < _ip##_REVISION_##_ver)
+
+#define DWC3_VER_IS_WITHIN(_ip, _from, _to) \
+ (DWC3_IP_IS(_ip) && \
+ dwc->revision >= _ip##_REVISION_##_from && \
+ (!(_ip##_REVISION_##_to) || \
+ dwc->revision <= _ip##_REVISION_##_to))
+
+#define DWC3_VER_TYPE_IS_WITHIN(_ip, _ver, _from, _to) \
+ (DWC3_VER_IS(_ip, _ver) && \
+ dwc->version_type >= _ip##_VERSIONTYPE_##_from && \
+ (!(_ip##_VERSIONTYPE_##_to) || \
+ dwc->version_type <= _ip##_VERSIONTYPE_##_to))
bool dwc3_has_imod(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 4a13ceaf4093..d8f600e0e88f 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* debug.h - DesignWare USB3 DRD Controller Debug Header
*
@@ -68,6 +68,8 @@ dwc3_gadget_generic_cmd_string(u8 cmd)
return "All FIFO Flush";
case DWC3_DGCMD_SET_ENDPOINT_NRDY:
return "Set Endpoint NRDY";
+ case DWC3_DGCMD_SET_ENDPOINT_PRIME:
+ return "Set Endpoint Prime";
case DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK:
return "Run SoC Bus Loopback Test";
default:
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 4fe8b1e1485c..6d9de334e46a 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -635,13 +635,18 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
+ int mdwidth;
u32 val;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
/* Convert to bytes */
- val *= DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ if (DWC3_IP_IS(DWC32))
+ mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
+
+ val *= mdwidth;
val >>= 3;
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -654,13 +659,18 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
struct dwc3_ep *dep = s->private;
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
+ int mdwidth;
u32 val;
spin_lock_irqsave(&dwc->lock, flags);
val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
/* Convert to bytes */
- val *= DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ if (DWC3_IP_IS(DWC32))
+ mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
+
+ val *= mdwidth;
val >>= 3;
seq_printf(s, "%u\n", val);
spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 7db1ffc92bbd..2e483448d695 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -56,7 +56,7 @@ static irqreturn_t dwc3_otg_thread_irq(int irq, void *_dwc)
spin_lock(&dwc->lock);
if (dwc->otg_restart_host) {
dwc3_otg_host_init(dwc);
- dwc->otg_restart_host = 0;
+ dwc->otg_restart_host = false;
}
spin_unlock(&dwc->lock);
@@ -82,7 +82,7 @@ static irqreturn_t dwc3_otg_irq(int irq, void *_dwc)
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST &&
!(reg & DWC3_OEVT_DEVICEMODE))
- dwc->otg_restart_host = 1;
+ dwc->otg_restart_host = true;
dwc3_writel(dwc->regs, DWC3_OEVT, reg);
ret = IRQ_WAKE_THREAD;
}
@@ -653,6 +653,6 @@ void dwc3_drd_exit(struct dwc3 *dwc)
break;
}
- if (!dwc->edev)
+ if (dwc->otg_irq)
free_irq(dwc->otg_irq, dwc);
}
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 1e14a6f4884b..6505f7bd69e2 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -14,6 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
/* USBSS register offsets */
@@ -34,6 +35,7 @@
struct dwc3_keystone {
struct device *dev;
void __iomem *usbss;
+ struct phy *usb3_phy;
};
static inline u32 kdwc3_readl(void __iomem *base, u32 offset)
@@ -95,8 +97,38 @@ static int kdwc3_probe(struct platform_device *pdev)
if (IS_ERR(kdwc->usbss))
return PTR_ERR(kdwc->usbss);
- pm_runtime_enable(kdwc->dev);
+ /* PSC dependency on AM65 needs SERDES0 to be powered before USB0 */
+ kdwc->usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
+ if (IS_ERR(kdwc->usb3_phy)) {
+ error = PTR_ERR(kdwc->usb3_phy);
+ if (error != -EPROBE_DEFER)
+ dev_err(dev, "couldn't get usb3 phy: %d\n", error);
+
+ return error;
+ }
+
+ phy_pm_runtime_get_sync(kdwc->usb3_phy);
+ error = phy_reset(kdwc->usb3_phy);
+ if (error < 0) {
+ dev_err(dev, "usb3 phy reset failed: %d\n", error);
+ return error;
+ }
+
+ error = phy_init(kdwc->usb3_phy);
+ if (error < 0) {
+ dev_err(dev, "usb3 phy init failed: %d\n", error);
+ return error;
+ }
+
+ error = phy_power_on(kdwc->usb3_phy);
+ if (error < 0) {
+ dev_err(dev, "usb3 phy power on failed: %d\n", error);
+ phy_exit(kdwc->usb3_phy);
+ return error;
+ }
+
+ pm_runtime_enable(kdwc->dev);
error = pm_runtime_get_sync(kdwc->dev);
if (error < 0) {
dev_err(kdwc->dev, "pm_runtime_get_sync failed, error %d\n",
@@ -138,6 +170,9 @@ err_core:
err_irq:
pm_runtime_put_sync(kdwc->dev);
pm_runtime_disable(kdwc->dev);
+ phy_power_off(kdwc->usb3_phy);
+ phy_exit(kdwc->usb3_phy);
+ phy_pm_runtime_put_sync(kdwc->usb3_phy);
return error;
}
@@ -163,6 +198,10 @@ static int kdwc3_remove(struct platform_device *pdev)
pm_runtime_put_sync(kdwc->dev);
pm_runtime_disable(kdwc->dev);
+ phy_power_off(kdwc->usb3_phy);
+ phy_exit(kdwc->usb3_phy);
+ phy_pm_runtime_put_sync(kdwc->usb3_phy);
+
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index b81d085bc534..1f7f4d88ed9d 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -30,7 +30,7 @@
#include <linux/usb/role.h>
#include <linux/regulator/consumer.h>
-/* USB2 Ports Control Registers */
+/* USB2 Ports Control Registers, offsets are per-port */
#define U2P_REG_SIZE 0x20
@@ -50,14 +50,16 @@
/* USB Glue Control Registers */
-#define USB_R0 0x80
+#define G12A_GLUE_OFFSET 0x80
+
+#define USB_R0 0x00
#define USB_R0_P30_LANE0_TX2RX_LOOPBACK BIT(17)
#define USB_R0_P30_LANE0_EXT_PCLK_REQ BIT(18)
#define USB_R0_P30_PCS_RX_LOS_MASK_VAL_MASK GENMASK(28, 19)
#define USB_R0_U2D_SS_SCALEDOWN_MODE_MASK GENMASK(30, 29)
#define USB_R0_U2D_ACT BIT(31)
-#define USB_R1 0x84
+#define USB_R1 0x04
#define USB_R1_U3H_BIGENDIAN_GS BIT(0)
#define USB_R1_U3H_PME_ENABLE BIT(1)
#define USB_R1_U3H_HUB_PORT_OVERCURRENT_MASK GENMASK(4, 2)
@@ -69,23 +71,23 @@
#define USB_R1_U3H_FLADJ_30MHZ_REG_MASK GENMASK(24, 19)
#define USB_R1_P30_PCS_TX_SWING_FULL_MASK GENMASK(31, 25)
-#define USB_R2 0x88
+#define USB_R2 0x08
#define USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK GENMASK(25, 20)
#define USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK GENMASK(31, 26)
-#define USB_R3 0x8c
+#define USB_R3 0x0c
#define USB_R3_P30_SSC_ENABLE BIT(0)
#define USB_R3_P30_SSC_RANGE_MASK GENMASK(3, 1)
#define USB_R3_P30_SSC_REF_CLK_SEL_MASK GENMASK(12, 4)
#define USB_R3_P30_REF_SSP_EN BIT(13)
-#define USB_R4 0x90
+#define USB_R4 0x10
#define USB_R4_P21_PORT_RESET_0 BIT(0)
#define USB_R4_P21_SLEEP_M0 BIT(1)
#define USB_R4_MEM_PD_MASK GENMASK(3, 2)
#define USB_R4_P21_ONLY BIT(4)
-#define USB_R5 0x94
+#define USB_R5 0x14
#define USB_R5_ID_DIG_SYNC BIT(0)
#define USB_R5_ID_DIG_REG BIT(1)
#define USB_R5_ID_DIG_CFG_MASK GENMASK(3, 2)
@@ -96,15 +98,12 @@
#define USB_R5_ID_DIG_TH_MASK GENMASK(15, 8)
#define USB_R5_ID_DIG_CNT_MASK GENMASK(23, 16)
-enum {
- USB2_HOST_PHY = 0,
- USB2_OTG_PHY,
- USB3_HOST_PHY,
- PHY_COUNT,
-};
+#define PHY_COUNT 3
+#define USB2_OTG_PHY 1
-static const char *phy_names[PHY_COUNT] = {
- "usb2-phy0", "usb2-phy1", "usb3-phy0",
+static struct clk_bulk_data meson_gxl_clocks[] = {
+ { .id = "usb_ctrl" },
+ { .id = "ddr" },
};
static struct clk_bulk_data meson_g12a_clocks[] = {
@@ -117,27 +116,133 @@ static struct clk_bulk_data meson_a1_clocks[] = {
{ .id = "xtal_usb_ctrl" },
};
+static const char *meson_gxm_phy_names[] = {
+ "usb2-phy0", "usb2-phy1", "usb2-phy2",
+};
+
+static const char *meson_g12a_phy_names[] = {
+ "usb2-phy0", "usb2-phy1", "usb3-phy0",
+};
+
+/*
+ * Amlogic A1 has a single physical PHY, in slot 1, but still has the
+ * two U2 PHY controls register blocks like G12A.
+ * Handling the first PHY on slot 1 would need a large amount of code
+ * changes, and the current management is generic enough to handle it
+ * correctly when only the "usb2-phy1" phy is specified on-par with the
+ * DT bindings.
+ */
+static const char *meson_a1_phy_names[] = {
+ "usb2-phy0", "usb2-phy1"
+};
+
+struct dwc3_meson_g12a;
+
struct dwc3_meson_g12a_drvdata {
bool otg_switch_supported;
+ bool otg_phy_host_port_disable;
struct clk_bulk_data *clks;
int num_clks;
+ const char **phy_names;
+ int num_phys;
+ int (*setup_regmaps)(struct dwc3_meson_g12a *priv, void __iomem *base);
+ int (*usb2_init_phy)(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+ int (*set_phy_mode)(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+ int (*usb_init)(struct dwc3_meson_g12a *priv);
+ int (*usb_post_init)(struct dwc3_meson_g12a *priv);
+};
+
+static int dwc3_meson_gxl_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base);
+static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base);
+
+static int dwc3_meson_g12a_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+static int dwc3_meson_gxl_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode);
+
+static int dwc3_meson_g12a_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode);
+static int dwc3_meson_gxl_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode);
+
+static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv);
+static int dwc3_meson_gxl_usb_init(struct dwc3_meson_g12a *priv);
+
+static int dwc3_meson_gxl_usb_post_init(struct dwc3_meson_g12a *priv);
+
+/*
+ * For GXL and GXM SoCs:
+ * USB Phy muxing between the DWC2 Device controller and the DWC3 Host
+ * controller is buggy when switching from Device to Host when USB port
+ * is unpopulated, it causes the DWC3 to hard crash.
+ * When populated (including OTG switching with ID pin), the switch works
+ * like a charm like on the G12A platforms.
+ * In order to still switch from Host to Device on an USB Type-A port,
+ * an U2_PORT_DISABLE bit has been added to disconnect the DWC3 Host
+ * controller from the port, but when used the DWC3 controller must be
+ * reset to recover usage of the port.
+ */
+
+static struct dwc3_meson_g12a_drvdata gxl_drvdata = {
+ .otg_switch_supported = true,
+ .otg_phy_host_port_disable = true,
+ .clks = meson_gxl_clocks,
+ .num_clks = ARRAY_SIZE(meson_g12a_clocks),
+ .phy_names = meson_a1_phy_names,
+ .num_phys = ARRAY_SIZE(meson_a1_phy_names),
+ .setup_regmaps = dwc3_meson_gxl_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_gxl_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_gxl_set_phy_mode,
+ .usb_init = dwc3_meson_gxl_usb_init,
+ .usb_post_init = dwc3_meson_gxl_usb_post_init,
+};
+
+static struct dwc3_meson_g12a_drvdata gxm_drvdata = {
+ .otg_switch_supported = true,
+ .otg_phy_host_port_disable = true,
+ .clks = meson_gxl_clocks,
+ .num_clks = ARRAY_SIZE(meson_g12a_clocks),
+ .phy_names = meson_gxm_phy_names,
+ .num_phys = ARRAY_SIZE(meson_gxm_phy_names),
+ .setup_regmaps = dwc3_meson_gxl_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_gxl_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_gxl_set_phy_mode,
+ .usb_init = dwc3_meson_gxl_usb_init,
+ .usb_post_init = dwc3_meson_gxl_usb_post_init,
};
static struct dwc3_meson_g12a_drvdata g12a_drvdata = {
.otg_switch_supported = true,
.clks = meson_g12a_clocks,
.num_clks = ARRAY_SIZE(meson_g12a_clocks),
+ .phy_names = meson_g12a_phy_names,
+ .num_phys = ARRAY_SIZE(meson_g12a_phy_names),
+ .setup_regmaps = dwc3_meson_g12a_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_g12a_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_g12a_set_phy_mode,
+ .usb_init = dwc3_meson_g12a_usb_init,
};
static struct dwc3_meson_g12a_drvdata a1_drvdata = {
.otg_switch_supported = false,
.clks = meson_a1_clocks,
.num_clks = ARRAY_SIZE(meson_a1_clocks),
+ .phy_names = meson_a1_phy_names,
+ .num_phys = ARRAY_SIZE(meson_a1_phy_names),
+ .setup_regmaps = dwc3_meson_g12a_setup_regmaps,
+ .usb2_init_phy = dwc3_meson_g12a_usb2_init_phy,
+ .set_phy_mode = dwc3_meson_g12a_set_phy_mode,
+ .usb_init = dwc3_meson_g12a_usb_init,
};
struct dwc3_meson_g12a {
struct device *dev;
- struct regmap *regmap;
+ struct regmap *u2p_regmap[PHY_COUNT];
+ struct regmap *usb_glue_regmap;
struct reset_control *reset;
struct phy *phys[PHY_COUNT];
enum usb_dr_mode otg_mode;
@@ -150,49 +255,78 @@ struct dwc3_meson_g12a {
const struct dwc3_meson_g12a_drvdata *drvdata;
};
-static void dwc3_meson_g12a_usb2_set_mode(struct dwc3_meson_g12a *priv,
- int i, enum phy_mode mode)
+static int dwc3_meson_gxl_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode)
+{
+ return phy_set_mode(priv->phys[i], mode);
+}
+
+static int dwc3_meson_gxl_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode)
+{
+ /* On GXL PHY must be started in device mode for DWC2 init */
+ return priv->drvdata->set_phy_mode(priv, i,
+ (i == USB2_OTG_PHY) ? PHY_MODE_USB_DEVICE
+ : PHY_MODE_USB_HOST);
+}
+
+static int dwc3_meson_g12a_set_phy_mode(struct dwc3_meson_g12a *priv,
+ int i, enum phy_mode mode)
{
if (mode == PHY_MODE_USB_HOST)
- regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
U2P_R0_HOST_DEVICE,
U2P_R0_HOST_DEVICE);
else
- regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
U2P_R0_HOST_DEVICE, 0);
+
+ return 0;
}
-static int dwc3_meson_g12a_usb2_init(struct dwc3_meson_g12a *priv)
+static int dwc3_meson_g12a_usb2_init_phy(struct dwc3_meson_g12a *priv, int i,
+ enum phy_mode mode)
{
- int i;
+ int ret;
- if (priv->otg_mode == USB_DR_MODE_PERIPHERAL)
- priv->otg_phy_mode = PHY_MODE_USB_DEVICE;
- else
- priv->otg_phy_mode = PHY_MODE_USB_HOST;
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
+ U2P_R0_POWER_ON_RESET,
+ U2P_R0_POWER_ON_RESET);
- for (i = 0 ; i < USB3_HOST_PHY ; ++i) {
- if (!priv->phys[i])
- continue;
+ if (priv->drvdata->otg_switch_supported && i == USB2_OTG_PHY) {
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
+ U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS,
+ U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS);
- regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
- U2P_R0_POWER_ON_RESET,
- U2P_R0_POWER_ON_RESET);
+ ret = priv->drvdata->set_phy_mode(priv, i, mode);
+ } else
+ ret = priv->drvdata->set_phy_mode(priv, i,
+ PHY_MODE_USB_HOST);
- if (priv->drvdata->otg_switch_supported && i == USB2_OTG_PHY) {
- regmap_update_bits(priv->regmap,
- U2P_R0 + (U2P_REG_SIZE * i),
- U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS,
- U2P_R0_ID_PULLUP | U2P_R0_DRV_VBUS);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(priv->u2p_regmap[i], U2P_R0,
+ U2P_R0_POWER_ON_RESET, 0);
+
+ return 0;
+}
- dwc3_meson_g12a_usb2_set_mode(priv, i,
- priv->otg_phy_mode);
- } else
- dwc3_meson_g12a_usb2_set_mode(priv, i,
- PHY_MODE_USB_HOST);
+static int dwc3_meson_g12a_usb2_init(struct dwc3_meson_g12a *priv,
+ enum phy_mode mode)
+{
+ int i, ret;
- regmap_update_bits(priv->regmap, U2P_R0 + (U2P_REG_SIZE * i),
- U2P_R0_POWER_ON_RESET, 0);
+ for (i = 0; i < priv->drvdata->num_phys; ++i) {
+ if (!priv->phys[i])
+ continue;
+
+ if (!strstr(priv->drvdata->phy_names[i], "usb2"))
+ continue;
+
+ ret = priv->drvdata->usb2_init_phy(priv, i, mode);
+ if (ret)
+ return ret;
}
return 0;
@@ -200,7 +334,7 @@ static int dwc3_meson_g12a_usb2_init(struct dwc3_meson_g12a *priv)
static void dwc3_meson_g12a_usb3_init(struct dwc3_meson_g12a *priv)
{
- regmap_update_bits(priv->regmap, USB_R3,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R3,
USB_R3_P30_SSC_RANGE_MASK |
USB_R3_P30_REF_SSP_EN,
USB_R3_P30_SSC_ENABLE |
@@ -208,61 +342,77 @@ static void dwc3_meson_g12a_usb3_init(struct dwc3_meson_g12a *priv)
USB_R3_P30_REF_SSP_EN);
udelay(2);
- regmap_update_bits(priv->regmap, USB_R2,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R2,
USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK,
FIELD_PREP(USB_R2_P30_PCS_TX_DEEMPH_3P5DB_MASK, 0x15));
- regmap_update_bits(priv->regmap, USB_R2,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R2,
USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK,
FIELD_PREP(USB_R2_P30_PCS_TX_DEEMPH_6DB_MASK, 0x20));
udelay(2);
- regmap_update_bits(priv->regmap, USB_R1,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT,
USB_R1_U3H_HOST_PORT_POWER_CONTROL_PRESENT);
- regmap_update_bits(priv->regmap, USB_R1,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
USB_R1_P30_PCS_TX_SWING_FULL_MASK,
FIELD_PREP(USB_R1_P30_PCS_TX_SWING_FULL_MASK, 127));
}
-static void dwc3_meson_g12a_usb_otg_apply_mode(struct dwc3_meson_g12a *priv)
+static void dwc3_meson_g12a_usb_otg_apply_mode(struct dwc3_meson_g12a *priv,
+ enum phy_mode mode)
{
- if (priv->otg_phy_mode == PHY_MODE_USB_DEVICE) {
- regmap_update_bits(priv->regmap, USB_R0,
+ if (mode == PHY_MODE_USB_DEVICE) {
+ if (priv->otg_mode != USB_DR_MODE_OTG &&
+ priv->drvdata->otg_phy_host_port_disable)
+ /* Isolate the OTG PHY port from the Host Controller */
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
+ USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK,
+ FIELD_PREP(USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK,
+ BIT(USB2_OTG_PHY)));
+
+ regmap_update_bits(priv->usb_glue_regmap, USB_R0,
USB_R0_U2D_ACT, USB_R0_U2D_ACT);
- regmap_update_bits(priv->regmap, USB_R0,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R0,
USB_R0_U2D_SS_SCALEDOWN_MODE_MASK, 0);
- regmap_update_bits(priv->regmap, USB_R4,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R4,
USB_R4_P21_SLEEP_M0, USB_R4_P21_SLEEP_M0);
} else {
- regmap_update_bits(priv->regmap, USB_R0,
+ if (priv->otg_mode != USB_DR_MODE_OTG &&
+ priv->drvdata->otg_phy_host_port_disable) {
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
+ USB_R1_U3H_HOST_U2_PORT_DISABLE_MASK, 0);
+ msleep(500);
+ }
+ regmap_update_bits(priv->usb_glue_regmap, USB_R0,
USB_R0_U2D_ACT, 0);
- regmap_update_bits(priv->regmap, USB_R4,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R4,
USB_R4_P21_SLEEP_M0, 0);
}
}
-static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv)
+static int dwc3_meson_g12a_usb_init_glue(struct dwc3_meson_g12a *priv,
+ enum phy_mode mode)
{
int ret;
- ret = dwc3_meson_g12a_usb2_init(priv);
+ ret = dwc3_meson_g12a_usb2_init(priv, mode);
if (ret)
return ret;
- regmap_update_bits(priv->regmap, USB_R1,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R1,
USB_R1_U3H_FLADJ_30MHZ_REG_MASK,
FIELD_PREP(USB_R1_U3H_FLADJ_30MHZ_REG_MASK, 0x20));
- regmap_update_bits(priv->regmap, USB_R5,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_EN_0,
USB_R5_ID_DIG_EN_0);
- regmap_update_bits(priv->regmap, USB_R5,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_EN_1,
USB_R5_ID_DIG_EN_1);
- regmap_update_bits(priv->regmap, USB_R5,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_TH_MASK,
FIELD_PREP(USB_R5_ID_DIG_TH_MASK, 0xff));
@@ -270,12 +420,13 @@ static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv)
if (priv->usb3_ports)
dwc3_meson_g12a_usb3_init(priv);
- dwc3_meson_g12a_usb_otg_apply_mode(priv);
+ dwc3_meson_g12a_usb_otg_apply_mode(priv, mode);
return 0;
}
-static const struct regmap_config phy_meson_g12a_usb3_regmap_conf = {
+static const struct regmap_config phy_meson_g12a_usb_glue_regmap_conf = {
+ .name = "usb-glue",
.reg_bits = 8,
.val_bits = 32,
.reg_stride = 4,
@@ -284,17 +435,19 @@ static const struct regmap_config phy_meson_g12a_usb3_regmap_conf = {
static int dwc3_meson_g12a_get_phys(struct dwc3_meson_g12a *priv)
{
+ const char *phy_name;
int i;
- for (i = 0 ; i < PHY_COUNT ; ++i) {
- priv->phys[i] = devm_phy_optional_get(priv->dev, phy_names[i]);
+ for (i = 0 ; i < priv->drvdata->num_phys ; ++i) {
+ phy_name = priv->drvdata->phy_names[i];
+ priv->phys[i] = devm_phy_optional_get(priv->dev, phy_name);
if (!priv->phys[i])
continue;
if (IS_ERR(priv->phys[i]))
return PTR_ERR(priv->phys[i]);
- if (i == USB3_HOST_PHY)
+ if (strstr(phy_name, "usb3"))
priv->usb3_ports++;
else
priv->usb2_ports++;
@@ -310,7 +463,7 @@ static enum phy_mode dwc3_meson_g12a_get_id(struct dwc3_meson_g12a *priv)
{
u32 reg;
- regmap_read(priv->regmap, USB_R5, &reg);
+ regmap_read(priv->usb_glue_regmap, USB_R5, &reg);
if (reg & (USB_R5_ID_DIG_SYNC | USB_R5_ID_DIG_REG))
return PHY_MODE_USB_DEVICE;
@@ -342,9 +495,11 @@ static int dwc3_meson_g12a_otg_mode_set(struct dwc3_meson_g12a *priv,
priv->otg_phy_mode = mode;
- dwc3_meson_g12a_usb2_set_mode(priv, USB2_OTG_PHY, mode);
+ ret = priv->drvdata->set_phy_mode(priv, USB2_OTG_PHY, mode);
+ if (ret)
+ return ret;
- dwc3_meson_g12a_usb_otg_apply_mode(priv);
+ dwc3_meson_g12a_usb_otg_apply_mode(priv, mode);
return 0;
}
@@ -364,6 +519,13 @@ static int dwc3_meson_g12a_role_set(struct usb_role_switch *sw,
if (mode == priv->otg_phy_mode)
return 0;
+ if (priv->drvdata->otg_phy_host_port_disable)
+ dev_warn_once(priv->dev, "Manual OTG switch is broken on this "\
+ "SoC, when manual switching from "\
+ "Host to device, DWC3 controller "\
+ "will need to be resetted in order "\
+ "to recover usage of the Host port");
+
return dwc3_meson_g12a_otg_mode_set(priv, mode);
}
@@ -386,7 +548,8 @@ static irqreturn_t dwc3_meson_g12a_irq_thread(int irq, void *data)
dev_warn(priv->dev, "Failed to switch OTG mode\n");
}
- regmap_update_bits(priv->regmap, USB_R5, USB_R5_ID_DIG_IRQ, 0);
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
+ USB_R5_ID_DIG_IRQ, 0);
return IRQ_HANDLED;
}
@@ -421,7 +584,7 @@ static int dwc3_meson_g12a_otg_init(struct platform_device *pdev,
if (priv->otg_mode == USB_DR_MODE_OTG) {
/* Ack irq before registering */
- regmap_update_bits(priv->regmap, USB_R5,
+ regmap_update_bits(priv->usb_glue_regmap, USB_R5,
USB_R5_ID_DIG_IRQ, 0);
irq = platform_get_irq(pdev, 0);
@@ -457,6 +620,77 @@ static int dwc3_meson_g12a_otg_init(struct platform_device *pdev,
return 0;
}
+static int dwc3_meson_gxl_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base)
+{
+ /* GXL controls the PHY mode in the PHY registers unlike G12A */
+ priv->usb_glue_regmap = devm_regmap_init_mmio(priv->dev, base,
+ &phy_meson_g12a_usb_glue_regmap_conf);
+ if (IS_ERR(priv->usb_glue_regmap))
+ return PTR_ERR(priv->usb_glue_regmap);
+
+ return 0;
+}
+
+static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv,
+ void __iomem *base)
+{
+ int i;
+
+ priv->usb_glue_regmap = devm_regmap_init_mmio(priv->dev,
+ base + G12A_GLUE_OFFSET,
+ &phy_meson_g12a_usb_glue_regmap_conf);
+ if (IS_ERR(priv->usb_glue_regmap))
+ return PTR_ERR(priv->usb_glue_regmap);
+
+ /* Create a regmap for each USB2 PHY control register set */
+ for (i = 0; i < priv->usb2_ports; i++) {
+ struct regmap_config u2p_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = U2P_R1,
+ };
+
+ u2p_regmap_config.name = devm_kasprintf(priv->dev, GFP_KERNEL,
+ "u2p-%d", i);
+ if (!u2p_regmap_config.name)
+ return -ENOMEM;
+
+ priv->u2p_regmap[i] = devm_regmap_init_mmio(priv->dev,
+ base + (i * U2P_REG_SIZE),
+ &u2p_regmap_config);
+ if (IS_ERR(priv->u2p_regmap[i]))
+ return PTR_ERR(priv->u2p_regmap[i]);
+ }
+
+ return 0;
+}
+
+static int dwc3_meson_g12a_usb_init(struct dwc3_meson_g12a *priv)
+{
+ return dwc3_meson_g12a_usb_init_glue(priv, priv->otg_phy_mode);
+}
+
+static int dwc3_meson_gxl_usb_init(struct dwc3_meson_g12a *priv)
+{
+ return dwc3_meson_g12a_usb_init_glue(priv, PHY_MODE_USB_DEVICE);
+}
+
+static int dwc3_meson_gxl_usb_post_init(struct dwc3_meson_g12a *priv)
+{
+ int ret;
+
+ ret = priv->drvdata->set_phy_mode(priv, USB2_OTG_PHY,
+ priv->otg_phy_mode);
+ if (ret)
+ return ret;
+
+ dwc3_meson_g12a_usb_otg_apply_mode(priv, priv->otg_phy_mode);
+
+ return 0;
+}
+
static int dwc3_meson_g12a_probe(struct platform_device *pdev)
{
struct dwc3_meson_g12a *priv;
@@ -473,10 +707,8 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- priv->regmap = devm_regmap_init_mmio(dev, base,
- &phy_meson_g12a_usb3_regmap_conf);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
+ priv->drvdata = of_device_get_match_data(&pdev->dev);
+ priv->dev = dev;
priv->vbus = devm_regulator_get_optional(dev, "vbus");
if (IS_ERR(priv->vbus)) {
@@ -485,8 +717,6 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
priv->vbus = NULL;
}
- priv->drvdata = of_device_get_match_data(&pdev->dev);
-
ret = devm_clk_bulk_get(dev,
priv->drvdata->num_clks,
priv->drvdata->clks);
@@ -499,13 +729,12 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
return ret;
platform_set_drvdata(pdev, priv);
- priv->dev = dev;
- priv->reset = devm_reset_control_get(dev, NULL);
+ priv->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(priv->reset)) {
ret = PTR_ERR(priv->reset);
dev_err(dev, "failed to get device reset, err=%d\n", ret);
- return ret;
+ goto err_disable_clks;
}
ret = reset_control_reset(priv->reset);
@@ -516,6 +745,10 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
if (ret)
goto err_disable_clks;
+ ret = priv->drvdata->setup_regmaps(priv, base);
+ if (ret)
+ return ret;
+
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
if (ret)
@@ -525,7 +758,14 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
/* Get dr_mode */
priv->otg_mode = usb_get_dr_mode(dev);
- dwc3_meson_g12a_usb_init(priv);
+ if (priv->otg_mode == USB_DR_MODE_PERIPHERAL)
+ priv->otg_phy_mode = PHY_MODE_USB_DEVICE;
+ else
+ priv->otg_phy_mode = PHY_MODE_USB_HOST;
+
+ ret = priv->drvdata->usb_init(priv);
+ if (ret)
+ goto err_disable_clks;
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
@@ -541,6 +781,12 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
goto err_phys_exit;
}
+ if (priv->drvdata->usb_post_init) {
+ ret = priv->drvdata->usb_post_init(priv);
+ if (ret)
+ goto err_phys_power;
+ }
+
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret)
goto err_phys_power;
@@ -642,7 +888,9 @@ static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
reset_control_deassert(priv->reset);
- dwc3_meson_g12a_usb_init(priv);
+ ret = priv->drvdata->usb_init(priv);
+ if (ret)
+ return ret;
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
@@ -675,6 +923,14 @@ static const struct dev_pm_ops dwc3_meson_g12a_dev_pm_ops = {
static const struct of_device_id dwc3_meson_g12a_match[] = {
{
+ .compatible = "amlogic,meson-gxl-usb-ctrl",
+ .data = &gxl_drvdata,
+ },
+ {
+ .compatible = "amlogic,meson-gxm-usb-ctrl",
+ .data = &gxm_drvdata,
+ },
+ {
.compatible = "amlogic,meson-g12a-usb-ctrl",
.data = &g12a_drvdata,
},
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index e64754be47b4..8852fbfdead4 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -27,7 +27,6 @@ struct dwc3_of_simple {
struct clk_bulk_data *clks;
int num_clocks;
struct reset_control *resets;
- bool pulse_resets;
bool need_reset;
};
@@ -38,7 +37,6 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- bool shared_resets = false;
simple = devm_kzalloc(dev, sizeof(*simple), GFP_KERNEL);
if (!simple)
@@ -54,13 +52,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "rockchip,rk3399-dwc3"))
simple->need_reset = true;
- if (of_device_is_compatible(np, "amlogic,meson-axg-dwc3") ||
- of_device_is_compatible(np, "amlogic,meson-gxl-dwc3")) {
- shared_resets = true;
- simple->pulse_resets = true;
- }
-
- simple->resets = of_reset_control_array_get(np, shared_resets, true,
+ simple->resets = of_reset_control_array_get(np, false, true,
true);
if (IS_ERR(simple->resets)) {
ret = PTR_ERR(simple->resets);
@@ -68,15 +60,9 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
return ret;
}
- if (simple->pulse_resets) {
- ret = reset_control_reset(simple->resets);
- if (ret)
- goto err_resetc_put;
- } else {
- ret = reset_control_deassert(simple->resets);
- if (ret)
- goto err_resetc_put;
- }
+ ret = reset_control_deassert(simple->resets);
+ if (ret)
+ goto err_resetc_put;
ret = clk_bulk_get_all(simple->dev, &simple->clks);
if (ret < 0)
@@ -102,8 +88,7 @@ err_clk_put:
clk_bulk_put_all(simple->num_clocks, simple->clks);
err_resetc_assert:
- if (!simple->pulse_resets)
- reset_control_assert(simple->resets);
+ reset_control_assert(simple->resets);
err_resetc_put:
reset_control_put(simple->resets);
@@ -118,8 +103,7 @@ static void __dwc3_of_simple_teardown(struct dwc3_of_simple *simple)
clk_bulk_put_all(simple->num_clocks, simple->clks);
simple->num_clocks = 0;
- if (!simple->pulse_resets)
- reset_control_assert(simple->resets);
+ reset_control_assert(simple->resets);
reset_control_put(simple->resets);
@@ -191,8 +175,6 @@ static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "xlnx,zynqmp-dwc3" },
{ .compatible = "cavium,octeon-7130-usb-uctl" },
{ .compatible = "sprd,sc9860-dwc3" },
- { .compatible = "amlogic,meson-axg-dwc3" },
- { .compatible = "amlogic,meson-gxl-dwc3" },
{ .compatible = "allwinner,sun50i-h6-dwc3" },
{ /* Sentinel */ }
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 585cb3deea7a..80c3ef134e41 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -95,7 +95,7 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
* Wait until device controller is ready. Only applies to 1.94a and
* later RTL.
*/
- if (dwc->revision >= DWC3_REVISION_194A) {
+ if (!DWC3_VER_IS_PRIOR(DWC3, 194A)) {
while (--retries) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
if (reg & DWC3_DSTS_DCNRD)
@@ -122,7 +122,7 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
* The following code is racy when called from dwc3_gadget_wakeup,
* and is not needed, at least on newer versions
*/
- if (dwc->revision >= DWC3_REVISION_194A)
+ if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
return 0;
/* wait for a change in DSTS */
@@ -273,7 +273,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
{
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
- u32 timeout = 1000;
+ u32 timeout = 5000;
u32 saved_config = 0;
u32 reg;
@@ -356,6 +356,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
ret = 0;
break;
case DEPEVT_TRANSFER_NO_RESOURCE:
+ dev_WARN(dwc->dev, "No resource for %s\n",
+ dep->name);
ret = -EINVAL;
break;
case DEPEVT_TRANSFER_BUS_EXPIRY:
@@ -387,9 +389,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
- if (ret == 0 && DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
- dep->flags |= DWC3_EP_TRANSFER_STARTED;
- dwc3_gadget_ep_get_transfer_index(dep);
+ if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
+ if (ret == 0)
+ dep->flags |= DWC3_EP_TRANSFER_STARTED;
+
+ if (ret != -ETIMEDOUT)
+ dwc3_gadget_ep_get_transfer_index(dep);
}
if (saved_config) {
@@ -415,7 +420,8 @@ static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
* IN transfers due to a mishandled error condition. Synopsys
* STAR 9000614252.
*/
- if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) &&
+ if (dep->direction &&
+ !DWC3_VER_IS_PRIOR(DWC3, 260A) &&
(dwc->gadget.speed >= USB_SPEED_SUPER))
cmd |= DWC3_DEPCMD_CLEARPENDIN;
@@ -573,6 +579,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
+ | DWC3_DEPCFG_XFER_COMPLETE_EN
| DWC3_DEPCFG_STREAM_EVENT_EN;
dep->stream_capable = true;
}
@@ -603,6 +610,9 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
}
+static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ bool interrupt);
+
/**
* __dwc3_gadget_ep_enable - initializes a hw endpoint
* @dep: endpoint to be initialized
@@ -663,7 +673,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
* Issue StartTransfer here with no-op TRB so we can always rely on No
* Response Update Transfer command.
*/
- if ((usb_endpoint_xfer_bulk(desc) && !dep->stream_capable) ||
+ if (usb_endpoint_xfer_bulk(desc) ||
usb_endpoint_xfer_int(desc)) {
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_trb *trb;
@@ -682,6 +692,29 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
if (ret < 0)
return ret;
+
+ if (dep->stream_capable) {
+ /*
+ * For streams, at start, there maybe a race where the
+ * host primes the endpoint before the function driver
+ * queues a request to initiate a stream. In that case,
+ * the controller will not see the prime to generate the
+ * ERDY and start stream. To workaround this, issue a
+ * no-op TRB as normal, but end it immediately. As a
+ * result, when the function driver queues the request,
+ * the next START_TRANSFER command will cause the
+ * controller to generate an ERDY to initiate the
+ * stream.
+ */
+ dwc3_stop_active_transfer(dep, true, true);
+
+ /*
+ * All stream eps will reinitiate stream on NoStream
+ * rejection until we can determine that the host can
+ * prime after the first transfer.
+ */
+ dep->flags |= DWC3_EP_FORCE_RESTART_STREAM;
+ }
}
out:
@@ -690,8 +723,6 @@ out:
return 0;
}
-static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
- bool interrupt);
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
@@ -912,7 +943,8 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
dma_addr_t dma, unsigned length, unsigned chain, unsigned node,
- unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt)
+ unsigned stream_id, unsigned short_not_ok,
+ unsigned no_interrupt, unsigned is_last)
{
struct dwc3 *dwc = dep->dwc;
struct usb_gadget *gadget = &dwc->gadget;
@@ -1005,6 +1037,8 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
+ else if (dep->stream_capable && is_last)
+ trb->ctrl |= DWC3_TRB_CTRL_LST;
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
@@ -1032,6 +1066,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
unsigned stream_id = req->request.stream_id;
unsigned short_not_ok = req->request.short_not_ok;
unsigned no_interrupt = req->request.no_interrupt;
+ unsigned is_last = req->request.is_last;
if (req->request.num_sgs > 0) {
length = sg_dma_len(req->start_sg);
@@ -1052,7 +1087,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
req->num_trbs++;
__dwc3_prepare_one_trb(dep, trb, dma, length, chain, node,
- stream_id, short_not_ok, no_interrupt);
+ stream_id, short_not_ok, no_interrupt, is_last);
}
static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
@@ -1097,7 +1132,8 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
maxp - rem, false, 1,
req->request.stream_id,
req->request.short_not_ok,
- req->request.no_interrupt);
+ req->request.no_interrupt,
+ req->request.is_last);
} else {
dwc3_prepare_one_trb(dep, req, chain, i);
}
@@ -1141,7 +1177,8 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
false, 1, req->request.stream_id,
req->request.short_not_ok,
- req->request.no_interrupt);
+ req->request.no_interrupt,
+ req->request.is_last);
} else if (req->request.zero && req->request.length &&
(IS_ALIGNED(req->request.length, maxp))) {
struct dwc3 *dwc = dep->dwc;
@@ -1158,7 +1195,8 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
__dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
false, 1, req->request.stream_id,
req->request.short_not_ok,
- req->request.no_interrupt);
+ req->request.no_interrupt,
+ req->request.is_last);
} else {
dwc3_prepare_one_trb(dep, req, false, 0);
}
@@ -1194,6 +1232,14 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
if (!dwc3_calc_trbs_left(dep))
return;
+
+ /*
+ * Don't prepare beyond a transfer. In DWC_usb32, its transfer
+ * burst capability may try to read and use TRBs beyond the
+ * active transfer instead of stopping.
+ */
+ if (dep->stream_capable && req->request.is_last)
+ return;
}
list_for_each_entry_safe(req, n, &dep->pending_list, list) {
@@ -1217,9 +1263,19 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
if (!dwc3_calc_trbs_left(dep))
return;
+
+ /*
+ * Don't prepare beyond a transfer. In DWC_usb32, its transfer
+ * burst capability may try to read and use TRBs beyond the
+ * active transfer instead of stopping.
+ */
+ if (dep->stream_capable && req->request.is_last)
+ return;
}
}
+static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
+
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
@@ -1259,17 +1315,26 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
if (ret < 0) {
- /*
- * FIXME we need to iterate over the list of requests
- * here and stop, unmap, free and del each of the linked
- * requests instead of what we do now.
- */
- if (req->trb)
- memset(req->trb, 0, sizeof(struct dwc3_trb));
- dwc3_gadget_del_and_unmap_request(dep, req, ret);
+ struct dwc3_request *tmp;
+
+ if (ret == -EAGAIN)
+ return ret;
+
+ dwc3_stop_active_transfer(dep, true, true);
+
+ list_for_each_entry_safe(req, tmp, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(req);
+
+ /* If ep isn't started, then there's no end transfer pending */
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+
return ret;
}
+ if (dep->stream_capable && req->request.is_last)
+ dep->flags |= DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
return 0;
}
@@ -1402,17 +1467,15 @@ static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
int ret;
int i;
- if (list_empty(&dep->pending_list)) {
+ if (list_empty(&dep->pending_list) &&
+ list_empty(&dep->started_list)) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
return -EAGAIN;
}
- if (!dwc->dis_start_transfer_quirk && dwc3_is_usb31(dwc) &&
- (dwc->revision <= DWC3_USB31_REVISION_160A ||
- (dwc->revision == DWC3_USB31_REVISION_170A &&
- dwc->version_type >= DWC31_VERSIONTYPE_EA01 &&
- dwc->version_type <= DWC31_VERSIONTYPE_EA06))) {
-
+ if (!dwc->dis_start_transfer_quirk &&
+ (DWC3_VER_IS_PRIOR(DWC31, 170A) ||
+ DWC3_VER_TYPE_IS_WITHIN(DWC31, 170A, EA01, EA06))) {
if (dwc->gadget.speed <= USB_SPEED_HIGH && dep->direction)
return dwc3_gadget_start_isoc_quirk(dep);
}
@@ -1425,6 +1488,27 @@ static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
break;
}
+ /*
+ * After a number of unsuccessful start attempts due to bus-expiry
+ * status, issue END_TRANSFER command and retry on the next XferNotReady
+ * event.
+ */
+ if (ret == -EAGAIN) {
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd;
+
+ cmd = DWC3_DEPCMD_ENDTRANSFER |
+ DWC3_DEPCMD_CMDIOC |
+ DWC3_DEPCMD_PARAM(dep->resource_index);
+
+ dep->resource_index = 0;
+ memset(&params, 0, sizeof(params));
+
+ ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ if (!ret)
+ dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
+ }
+
return ret;
}
@@ -1457,6 +1541,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
req->status = DWC3_REQUEST_STATUS_QUEUED;
+ if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)
+ return 0;
+
/* Start the transfer only after the END_TRANSFER is completed */
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
dep->flags |= DWC3_EP_DELAY_START;
@@ -1508,6 +1595,10 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
{
int i;
+ /* If req->trb is not set, then the request has not started */
+ if (!req->trb)
+ return;
+
/*
* If request was already started, this means we had to
* stop the transfer. With that we also need to ignore
@@ -1556,39 +1647,40 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
spin_lock_irqsave(&dwc->lock, flags);
- list_for_each_entry(r, &dep->pending_list, list) {
+ list_for_each_entry(r, &dep->cancelled_list, list) {
if (r == req)
- break;
+ goto out;
}
- if (r != req) {
- list_for_each_entry(r, &dep->started_list, list) {
- if (r == req)
- break;
+ list_for_each_entry(r, &dep->pending_list, list) {
+ if (r == req) {
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ goto out;
}
+ }
+
+ list_for_each_entry(r, &dep->started_list, list) {
if (r == req) {
+ struct dwc3_request *t;
+
/* wait until it is processed */
dwc3_stop_active_transfer(dep, true, true);
- if (!r->trb)
- goto out0;
+ /*
+ * Remove any started request if the transfer is
+ * cancelled.
+ */
+ list_for_each_entry_safe(r, t, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(r);
- dwc3_gadget_move_cancelled_request(req);
- if (dep->flags & DWC3_EP_TRANSFER_STARTED)
- goto out0;
- else
- goto out1;
+ goto out;
}
- dev_err(dwc->dev, "request %pK was not queued to %s\n",
- request, ep->name);
- ret = -EINVAL;
- goto out0;
}
-out1:
- dwc3_gadget_giveback(dep, req, -ECONNRESET);
-
-out0:
+ dev_err(dwc->dev, "request %pK was not queued to %s\n",
+ request, ep->name);
+ ret = -EINVAL;
+out:
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -1598,6 +1690,8 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3 *dwc = dep->dwc;
+ struct dwc3_request *req;
+ struct dwc3_request *tmp;
int ret;
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
@@ -1634,13 +1728,37 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
else
dep->flags |= DWC3_EP_STALL;
} else {
+ /*
+ * Don't issue CLEAR_STALL command to control endpoints. The
+ * controller automatically clears the STALL when it receives
+ * the SETUP token.
+ */
+ if (dep->number <= 1) {
+ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ return 0;
+ }
ret = dwc3_send_clear_stall_ep_cmd(dep);
- if (ret)
+ if (ret) {
dev_err(dwc->dev, "failed to clear STALL on %s\n",
dep->name);
- else
- dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+ return ret;
+ }
+
+ dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
+
+ dwc3_stop_active_transfer(dep, true, true);
+
+ list_for_each_entry_safe(req, tmp, &dep->started_list, list)
+ dwc3_gadget_move_cancelled_request(req);
+
+ list_for_each_entry_safe(req, tmp, &dep->pending_list, list)
+ dwc3_gadget_move_cancelled_request(req);
+
+ if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) {
+ dep->flags &= ~DWC3_EP_DELAY_START;
+ dwc3_gadget_ep_cleanup_cancelled_requests(dep);
+ }
}
return ret;
@@ -1756,7 +1874,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
}
/* Recent versions do this automatically */
- if (dwc->revision < DWC3_REVISION_194A) {
+ if (DWC3_VER_IS_PRIOR(DWC3, 194A)) {
/* write zeroes to Link Change Request */
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
@@ -1818,12 +1936,12 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
- if (dwc->revision <= DWC3_REVISION_187A) {
+ if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
reg &= ~DWC3_DCTL_TRGTULST_MASK;
reg |= DWC3_DCTL_TRGTULST_RX_DET;
}
- if (dwc->revision >= DWC3_REVISION_194A)
+ if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
reg &= ~DWC3_DCTL_KEEP_CONNECT;
reg |= DWC3_DCTL_RUN_STOP;
@@ -1897,7 +2015,7 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
DWC3_DEVTEN_USBRSTEN |
DWC3_DEVTEN_DISCONNEVTEN);
- if (dwc->revision < DWC3_REVISION_250A)
+ if (DWC3_VER_IS_PRIOR(DWC3, 250A))
reg |= DWC3_DEVTEN_ULSTCNGEN;
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
@@ -1942,6 +2060,8 @@ static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
+ if (DWC3_IP_IS(DWC32))
+ mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
nump = min_t(u32, nump, 16);
@@ -1978,10 +2098,10 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
* bursts of data without going through any sort of endpoint throttling.
*/
reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
- if (dwc3_is_usb31(dwc))
- reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
- else
+ if (DWC3_IP_IS(DWC3))
reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
+ else
+ reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
@@ -2154,7 +2274,7 @@ static void dwc3_gadget_set_speed(struct usb_gadget *g,
* STAR#9000525659: Clock Domain Crossing on DCTL in
* USB 2.0 Mode
*/
- if (dwc->revision < DWC3_REVISION_220A &&
+ if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
!dwc->dis_metastability_quirk) {
reg |= DWC3_DCFG_SUPERSPEED;
} else {
@@ -2172,18 +2292,18 @@ static void dwc3_gadget_set_speed(struct usb_gadget *g,
reg |= DWC3_DCFG_SUPERSPEED;
break;
case USB_SPEED_SUPER_PLUS:
- if (dwc3_is_usb31(dwc))
- reg |= DWC3_DCFG_SUPERSPEED_PLUS;
- else
+ if (DWC3_IP_IS(DWC3))
reg |= DWC3_DCFG_SUPERSPEED;
+ else
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
break;
default:
dev_err(dwc->dev, "invalid speed (%d)\n", speed);
- if (dwc->revision & DWC3_REVISION_IS_DWC31)
- reg |= DWC3_DCFG_SUPERSPEED_PLUS;
- else
+ if (DWC3_IP_IS(DWC3))
reg |= DWC3_DCFG_SUPERSPEED;
+ else
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
}
}
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
@@ -2226,14 +2346,17 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
int size;
mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ if (DWC3_IP_IS(DWC32))
+ mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
+
/* MDWIDTH is represented in bits, we need it in bytes */
mdwidth /= 8;
size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1));
- if (dwc3_is_usb31(dwc))
- size = DWC31_GTXFIFOSIZ_TXFDEP(size);
- else
+ if (DWC3_IP_IS(DWC3))
size = DWC3_GTXFIFOSIZ_TXFDEP(size);
+ else
+ size = DWC31_GTXFIFOSIZ_TXFDEP(size);
/* FIFO Depth is in MDWDITH bytes. Multiply */
size *= mdwidth;
@@ -2270,16 +2393,18 @@ static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
int size;
mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ if (DWC3_IP_IS(DWC32))
+ mdwidth += DWC3_GHWPARAMS6_MDWIDTH(dwc->hwparams.hwparams6);
/* MDWIDTH is represented in bits, convert to bytes */
mdwidth /= 8;
/* All OUT endpoints share a single RxFIFO space */
size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
- if (dwc3_is_usb31(dwc))
- size = DWC31_GRXFIFOSIZ_RXFDEP(size);
- else
+ if (DWC3_IP_IS(DWC3))
size = DWC3_GRXFIFOSIZ_RXFDEP(size);
+ else
+ size = DWC31_GRXFIFOSIZ_RXFDEP(size);
/* FIFO depth is in MDWDITH bytes */
size *= mdwidth;
@@ -2531,10 +2656,8 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->request.actual = req->request.length - req->remaining;
- if (!dwc3_gadget_ep_request_completed(req)) {
- __dwc3_gadget_kick_transfer(dep);
+ if (!dwc3_gadget_ep_request_completed(req))
goto out;
- }
dwc3_gadget_giveback(dep, req, status);
@@ -2558,41 +2681,53 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
}
}
+static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
+{
+ struct dwc3_request *req;
+
+ if (!list_empty(&dep->pending_list))
+ return true;
+
+ /*
+ * We only need to check the first entry of the started list. We can
+ * assume the completed requests are removed from the started list.
+ */
+ req = next_request(&dep->started_list);
+ if (!req)
+ return false;
+
+ return !dwc3_gadget_ep_request_completed(req);
+}
+
static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
dep->frame_number = event->parameters;
}
-static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
- const struct dwc3_event_depevt *event)
+static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event, int status)
{
struct dwc3 *dwc = dep->dwc;
- unsigned status = 0;
- bool stop = false;
-
- dwc3_gadget_endpoint_frame_from_event(dep, event);
-
- if (event->status & DEPEVT_STATUS_BUSERR)
- status = -ECONNRESET;
-
- if (event->status & DEPEVT_STATUS_MISSED_ISOC) {
- status = -EXDEV;
-
- if (list_empty(&dep->started_list))
- stop = true;
- }
+ bool no_started_trb = true;
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
- if (stop)
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
+ goto out;
+
+ if (status == -EXDEV && list_empty(&dep->started_list))
dwc3_stop_active_transfer(dep, true, true);
+ else if (dwc3_gadget_ep_should_continue(dep))
+ if (__dwc3_gadget_kick_transfer(dep) == 0)
+ no_started_trb = false;
+out:
/*
* WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
* See dwc3_gadget_linksts_change_interrupt() for 1st half.
*/
- if (dwc->revision < DWC3_REVISION_183A) {
+ if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
u32 reg;
int i;
@@ -2603,7 +2738,7 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
continue;
if (!list_empty(&dep->started_list))
- return;
+ return no_started_trb;
}
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -2612,15 +2747,124 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
dwc->u1u2 = 0;
}
+
+ return no_started_trb;
+}
+
+static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ int status = 0;
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ dwc3_gadget_endpoint_frame_from_event(dep, event);
+
+ if (event->status & DEPEVT_STATUS_BUSERR)
+ status = -ECONNRESET;
+
+ if (event->status & DEPEVT_STATUS_MISSED_ISOC)
+ status = -EXDEV;
+
+ dwc3_gadget_endpoint_trbs_complete(dep, event, status);
+}
+
+static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ int status = 0;
+
+ dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+
+ if (event->status & DEPEVT_STATUS_BUSERR)
+ status = -ECONNRESET;
+
+ if (dwc3_gadget_endpoint_trbs_complete(dep, event, status))
+ dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
}
static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event)
{
dwc3_gadget_endpoint_frame_from_event(dep, event);
+
+ /*
+ * The XferNotReady event is generated only once before the endpoint
+ * starts. It will be generated again when END_TRANSFER command is
+ * issued. For some controller versions, the XferNotReady event may be
+ * generated while the END_TRANSFER command is still in process. Ignore
+ * it and wait for the next XferNotReady event after the command is
+ * completed.
+ */
+ if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
+ return;
+
(void) __dwc3_gadget_start_isoc(dep);
}
+static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
+ const struct dwc3_event_depevt *event)
+{
+ struct dwc3 *dwc = dep->dwc;
+
+ if (event->status == DEPEVT_STREAMEVT_FOUND) {
+ dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
+ goto out;
+ }
+
+ /* Note: NoStream rejection event param value is 0 and not 0xFFFF */
+ switch (event->parameters) {
+ case DEPEVT_STREAM_PRIME:
+ /*
+ * If the host can properly transition the endpoint state from
+ * idle to prime after a NoStream rejection, there's no need to
+ * force restarting the endpoint to reinitiate the stream. To
+ * simplify the check, assume the host follows the USB spec if
+ * it primed the endpoint more than once.
+ */
+ if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM) {
+ if (dep->flags & DWC3_EP_FIRST_STREAM_PRIMED)
+ dep->flags &= ~DWC3_EP_FORCE_RESTART_STREAM;
+ else
+ dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
+ }
+
+ break;
+ case DEPEVT_STREAM_NOSTREAM:
+ if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
+ !(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
+ !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE))
+ break;
+
+ /*
+ * If the host rejects a stream due to no active stream, by the
+ * USB and xHCI spec, the endpoint will be put back to idle
+ * state. When the host is ready (buffer added/updated), it will
+ * prime the endpoint to inform the usb device controller. This
+ * triggers the device controller to issue ERDY to restart the
+ * stream. However, some hosts don't follow this and keep the
+ * endpoint in the idle state. No prime will come despite host
+ * streams are updated, and the device controller will not be
+ * triggered to generate ERDY to move the next stream data. To
+ * workaround this and maintain compatibility with various
+ * hosts, force to reinitate the stream until the host is ready
+ * instead of waiting for the host to prime the endpoint.
+ */
+ if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
+ unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
+
+ dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
+ } else {
+ dep->flags |= DWC3_EP_DELAY_START;
+ dwc3_stop_active_transfer(dep, true, true);
+ return;
+ }
+ break;
+ }
+
+out:
+ dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
+}
+
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
@@ -2665,8 +2909,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep->flags &= ~DWC3_EP_DELAY_START;
}
break;
- case DWC3_DEPEVT_STREAMEVT:
case DWC3_DEPEVT_XFERCOMPLETE:
+ dwc3_gadget_endpoint_transfer_complete(dep, event);
+ break;
+ case DWC3_DEPEVT_STREAMEVT:
+ dwc3_gadget_endpoint_stream_event(dep, event);
+ break;
case DWC3_DEPEVT_RXTXFIFOEVT:
break;
}
@@ -2758,6 +3006,14 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
WARN_ON_ONCE(ret);
dep->resource_index = 0;
+ /*
+ * The END_TRANSFER command will cause the controller to generate a
+ * NoStream Event, and it's not due to the host DP NoStream rejection.
+ * Ignore the next NoStream event.
+ */
+ if (dep->stream_capable)
+ dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
+
if (!interrupt)
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
else
@@ -2838,7 +3094,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
* STAR#9000466709: RTL: Device : Disconnect event not
* generated if setup packet pending in FIFO
*/
- if (dwc->revision < DWC3_REVISION_188A) {
+ if (DWC3_VER_IS_PRIOR(DWC3, 188A)) {
if (dwc->setup_packet_pending)
dwc3_gadget_disconnect_interrupt(dwc);
}
@@ -2897,7 +3153,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
* STAR#9000483510: RTL: SS : USB3 reset event may
* not be generated always when the link enters poll
*/
- if (dwc->revision < DWC3_REVISION_190A)
+ if (DWC3_VER_IS_PRIOR(DWC3, 190A))
dwc3_gadget_reset_interrupt(dwc);
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -2925,7 +3181,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
/* Enable USB2 LPM Capability */
- if ((dwc->revision > DWC3_REVISION_194A) &&
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
(speed != DWC3_DSTS_SUPERSPEED) &&
(speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
@@ -2944,11 +3200,10 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
* BESL value in the LPM token is less than or equal to LPM
* NYET threshold.
*/
- WARN_ONCE(dwc->revision < DWC3_REVISION_240A
- && dwc->has_lpm_erratum,
+ WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
"LPM Erratum not available on dwc3 revisions < 2.40a\n");
- if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
+ if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
dwc3_gadget_dctl_write_safe(dwc, reg);
@@ -3019,7 +3274,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
* operational mode
*/
pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
- if ((dwc->revision < DWC3_REVISION_250A) &&
+ if (DWC3_VER_IS_PRIOR(DWC3, 250A) &&
(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
(next == DWC3_LINK_STATE_RESUME)) {
@@ -3045,7 +3300,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
* STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
* core send LGO_Ux entering U0
*/
- if (dwc->revision < DWC3_REVISION_183A) {
+ if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
if (next == DWC3_LINK_STATE_U0) {
u32 u1u2;
u32 reg;
@@ -3156,7 +3411,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
break;
case DWC3_DEVICE_EVENT_EOPF:
/* It changed to be suspend event for version 2.30a and above */
- if (dwc->revision >= DWC3_REVISION_230A) {
+ if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
/*
* Ignore suspend event until the gadget enters into
* USB_STATE_CONFIGURED state.
@@ -3401,7 +3656,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
* is less than super speed because we don't have means, yet, to tell
* composite.c that we are USB 2.0 + LPM ECN.
*/
- if (dwc->revision < DWC3_REVISION_220A &&
+ if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
!dwc->dis_metastability_quirk)
dev_info(dwc->dev, "changing max_speed on rev %08x\n",
dwc->revision);
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index fbc7d8013f0b..24dca3872022 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* gadget.h - DesignWare USB3 DRD Gadget Header
*
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 86dbd012b984..bef1c1ac2067 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -104,7 +104,7 @@ int dwc3_host_init(struct dwc3 *dwc)
*
* This following flag tells XHCI to do just that.
*/
- if (dwc->revision <= DWC3_REVISION_300A)
+ if (DWC3_VER_IS_WITHIN(DWC3, ANY, 300A))
props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
if (prop_idx) {
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index 70acdf94a0bf..9bbe5d4bf076 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* io.h - DesignWare USB3 DRD IO Header
*
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 3054b89512ff..4c4fc6c41d9b 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* trace.h - DesignWare USB3 DRD Controller Trace Support
*
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 171280c80228..04ba11fff0ed 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -18,7 +18,6 @@
#include <asm/fixmap.h>
#include <linux/bcd.h>
#include <linux/export.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/kthread.h>
diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h
index 6e2b7266a695..8b4d71de45fc 100644
--- a/drivers/usb/early/xhci-dbc.h
+++ b/drivers/usb/early/xhci-dbc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xhci-dbc.h - xHCI debug capability early driver
*
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index cb4950cf1cdc..5c1eb96a5c57 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -96,40 +96,43 @@ function_descriptors(struct usb_function *f,
}
/**
- * next_ep_desc() - advance to the next EP descriptor
+ * next_desc() - advance to the next desc_type descriptor
* @t: currect pointer within descriptor array
+ * @desc_type: descriptor type
*
- * Return: next EP descriptor or NULL
+ * Return: next desc_type descriptor or NULL
*
- * Iterate over @t until either EP descriptor found or
+ * Iterate over @t until either desc_type descriptor found or
* NULL (that indicates end of list) encountered
*/
static struct usb_descriptor_header**
-next_ep_desc(struct usb_descriptor_header **t)
+next_desc(struct usb_descriptor_header **t, u8 desc_type)
{
for (; *t; t++) {
- if ((*t)->bDescriptorType == USB_DT_ENDPOINT)
+ if ((*t)->bDescriptorType == desc_type)
return t;
}
return NULL;
}
/*
- * for_each_ep_desc()- iterate over endpoint descriptors in the
- * descriptors list
- * @start: pointer within descriptor array.
- * @ep_desc: endpoint descriptor to use as the loop cursor
+ * for_each_desc() - iterate over desc_type descriptors in the
+ * descriptors list
+ * @start: pointer within descriptor array.
+ * @iter_desc: desc_type descriptor to use as the loop cursor
+ * @desc_type: wanted descriptr type
*/
-#define for_each_ep_desc(start, ep_desc) \
- for (ep_desc = next_ep_desc(start); \
- ep_desc; ep_desc = next_ep_desc(ep_desc+1))
+#define for_each_desc(start, iter_desc, desc_type) \
+ for (iter_desc = next_desc(start, desc_type); \
+ iter_desc; iter_desc = next_desc(iter_desc + 1, desc_type))
/**
- * config_ep_by_speed() - configures the given endpoint
+ * config_ep_by_speed_and_alt() - configures the given endpoint
* according to gadget speed.
* @g: pointer to the gadget
* @f: usb function
* @_ep: the endpoint to configure
+ * @alt: alternate setting number
*
* Return: error code, 0 on success
*
@@ -142,11 +145,13 @@ next_ep_desc(struct usb_descriptor_header **t)
* Note: the supplied function should hold all the descriptors
* for supported speeds
*/
-int config_ep_by_speed(struct usb_gadget *g,
- struct usb_function *f,
- struct usb_ep *_ep)
+int config_ep_by_speed_and_alt(struct usb_gadget *g,
+ struct usb_function *f,
+ struct usb_ep *_ep,
+ u8 alt)
{
struct usb_endpoint_descriptor *chosen_desc = NULL;
+ struct usb_interface_descriptor *int_desc = NULL;
struct usb_descriptor_header **speed_desc = NULL;
struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
@@ -182,8 +187,21 @@ int config_ep_by_speed(struct usb_gadget *g,
default:
speed_desc = f->fs_descriptors;
}
+
+ /* find correct alternate setting descriptor */
+ for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) {
+ int_desc = (struct usb_interface_descriptor *)*d_spd;
+
+ if (int_desc->bAlternateSetting == alt) {
+ speed_desc = d_spd;
+ goto intf_found;
+ }
+ }
+ return -EIO;
+
+intf_found:
/* find descriptors */
- for_each_ep_desc(speed_desc, d_spd) {
+ for_each_desc(speed_desc, d_spd, USB_DT_ENDPOINT) {
chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
if (chosen_desc->bEndpointAddress == _ep->address)
goto ep_found;
@@ -237,6 +255,32 @@ ep_found:
}
return 0;
}
+EXPORT_SYMBOL_GPL(config_ep_by_speed_and_alt);
+
+/**
+ * config_ep_by_speed() - configures the given endpoint
+ * according to gadget speed.
+ * @g: pointer to the gadget
+ * @f: usb function
+ * @_ep: the endpoint to configure
+ *
+ * Return: error code, 0 on success
+ *
+ * This function chooses the right descriptors for a given
+ * endpoint according to gadget speed and saves it in the
+ * endpoint desc field. If the endpoint already has a descriptor
+ * assigned to it - overwrites it with currently corresponding
+ * descriptor. The endpoint maxpacket field is updated according
+ * to the chosen descriptor.
+ * Note: the supplied function should hold all the descriptors
+ * for supported speeds
+ */
+int config_ep_by_speed(struct usb_gadget *g,
+ struct usb_function *f,
+ struct usb_ep *_ep)
+{
+ return config_ep_by_speed_and_alt(g, f, _ep, 0);
+}
EXPORT_SYMBOL_GPL(config_ep_by_speed);
/**
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 6a9aa4413d64..9dc06a4e1b30 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -13,8 +13,6 @@
int check_user_usb_string(const char *name,
struct usb_gadget_strings *stringtab_dev)
{
- unsigned primary_lang;
- unsigned sub_lang;
u16 num;
int ret;
@@ -22,17 +20,7 @@ int check_user_usb_string(const char *name,
if (ret)
return ret;
- primary_lang = num & 0x3ff;
- sub_lang = num >> 10;
-
- /* simple sanity check for valid langid */
- switch (primary_lang) {
- case 0:
- case 0x62 ... 0xfe:
- case 0x100 ... 0x3ff:
- return -EINVAL;
- }
- if (!sub_lang)
+ if (!usb_validate_langid(num))
return -EINVAL;
stringtab_dev->language = num;
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 7c152c28b26c..200596ea9557 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -723,6 +723,20 @@ static void acm_free_func(struct usb_function *f)
kfree(acm);
}
+static void acm_resume(struct usb_function *f)
+{
+ struct f_acm *acm = func_to_acm(f);
+
+ gserial_resume(&acm->port);
+}
+
+static void acm_suspend(struct usb_function *f)
+{
+ struct f_acm *acm = func_to_acm(f);
+
+ gserial_suspend(&acm->port);
+}
+
static struct usb_function *acm_alloc_func(struct usb_function_instance *fi)
{
struct f_serial_opts *opts;
@@ -750,6 +764,8 @@ static struct usb_function *acm_alloc_func(struct usb_function_instance *fi)
acm->port_num = opts->port_num;
acm->port.func.unbind = acm_unbind;
acm->port.func.free_func = acm_free_func;
+ acm->port.func.resume = acm_resume;
+ acm->port.func.suspend = acm_suspend;
return &acm->port.func;
}
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index b81a91d504bd..cfcc4e81fb77 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -291,8 +291,6 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
eem->port.out_ep = ep;
- status = -ENOMEM;
-
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 10f01f974f67..490d353d5fde 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -32,7 +32,7 @@
#include <linux/usb/functionfs.h>
#include <linux/aio.h>
-#include <linux/mmu_context.h>
+#include <linux/kthread.h>
#include <linux/poll.h>
#include <linux/eventfd.h>
@@ -824,13 +824,9 @@ static void ffs_user_copy_worker(struct work_struct *work)
bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
if (io_data->read && ret > 0) {
- mm_segment_t oldfs = get_fs();
-
- set_fs(USER_DS);
- use_mm(io_data->mm);
+ kthread_use_mm(io_data->mm);
ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
- unuse_mm(io_data->mm);
- set_fs(oldfs);
+ kthread_unuse_mm(io_data->mm);
}
io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
@@ -2508,7 +2504,7 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
os_descs_count = get_unaligned_le32(data);
data += 4;
len -= 4;
- };
+ }
/* Read descriptors */
raw_descs = data;
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index 1406255d0865..e62713846350 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -348,6 +348,20 @@ static void gser_unbind(struct usb_configuration *c, struct usb_function *f)
usb_free_all_descriptors(f);
}
+static void gser_resume(struct usb_function *f)
+{
+ struct f_gser *gser = func_to_gser(f);
+
+ gserial_resume(&gser->port);
+}
+
+static void gser_suspend(struct usb_function *f)
+{
+ struct f_gser *gser = func_to_gser(f);
+
+ gserial_suspend(&gser->port);
+}
+
static struct usb_function *gser_alloc(struct usb_function_instance *fi)
{
struct f_gser *gser;
@@ -369,6 +383,8 @@ static struct usb_function *gser_alloc(struct usb_function_instance *fi)
gser->port.func.set_alt = gser_set_alt;
gser->port.func.disable = gser_disable;
gser->port.func.free_func = gser_free;
+ gser->port.func.resume = gser_resume;
+ gser->port.func.suspend = gser_suspend;
return &gser->port.func;
}
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 36504931b2d1..2979cbe4d95f 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -531,6 +531,7 @@ static int uasp_prepare_r_request(struct usbg_cmd *cmd)
stream->req_in->sg = se_cmd->t_data_sg;
}
+ stream->req_in->is_last = 1;
stream->req_in->complete = uasp_status_data_cmpl;
stream->req_in->length = se_cmd->data_length;
stream->req_in->context = cmd;
@@ -554,6 +555,7 @@ static void uasp_prepare_status(struct usbg_cmd *cmd)
*/
iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
iu->status = se_cmd->scsi_status;
+ stream->req_status->is_last = 1;
stream->req_status->context = cmd;
stream->req_status->length = se_cmd->scsi_sense_length + 16;
stream->req_status->buf = iu;
@@ -991,6 +993,7 @@ static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
req->sg = se_cmd->t_data_sg;
}
+ req->is_last = 1;
req->complete = usbg_data_write_cmpl;
req->length = se_cmd->data_length;
req->context = cmd;
diff --git a/drivers/usb/gadget/function/f_uvc.h b/drivers/usb/gadget/function/f_uvc.h
index a81a17765558..1db972d4beeb 100644
--- a/drivers/usb/gadget/function/f_uvc.h
+++ b/drivers/usb/gadget/function/f_uvc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* f_uvc.h -- USB Video Class Gadget driver
*
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index c7e3a70ce6c1..f6167f7fea82 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* RNDIS Definitions for Remote NDIS
*
diff --git a/drivers/usb/gadget/function/u_audio.h b/drivers/usb/gadget/function/u_audio.h
index 81d3d4ed6dfb..5ea6b86f1fda 100644
--- a/drivers/usb/gadget/function/u_audio.h
+++ b/drivers/usb/gadget/function/u_audio.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* u_audio.h -- interface to USB gadget "ALSA sound card" utilities
*
diff --git a/drivers/usb/gadget/function/u_ecm.h b/drivers/usb/gadget/function/u_ecm.h
index 098ece573a5e..77cfb89932be 100644
--- a/drivers/usb/gadget/function/u_ecm.h
+++ b/drivers/usb/gadget/function/u_ecm.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_ecm.h
*
diff --git a/drivers/usb/gadget/function/u_eem.h b/drivers/usb/gadget/function/u_eem.h
index 921386a375cf..3bd85dfcd71c 100644
--- a/drivers/usb/gadget/function/u_eem.h
+++ b/drivers/usb/gadget/function/u_eem.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_eem.h
*
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index 332307d54292..10dd640684e2 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* u_ether.h -- interface to USB gadget "ethernet link" utilities
*
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index d8b92485b727..bd92b5703013 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_ether_configfs.h
*
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index f9b0cf67360d..f102ec23f3af 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_fs.h
*
diff --git a/drivers/usb/gadget/function/u_gether.h b/drivers/usb/gadget/function/u_gether.h
index ce4f07626f96..2f7a373ed449 100644
--- a/drivers/usb/gadget/function/u_gether.h
+++ b/drivers/usb/gadget/function/u_gether.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_gether.h
*
diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h
index 1594bfa312eb..84e6da302499 100644
--- a/drivers/usb/gadget/function/u_hid.h
+++ b/drivers/usb/gadget/function/u_hid.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_hid.h
*
diff --git a/drivers/usb/gadget/function/u_midi.h b/drivers/usb/gadget/function/u_midi.h
index 29bf006c0a13..f6e14af7f566 100644
--- a/drivers/usb/gadget/function/u_midi.h
+++ b/drivers/usb/gadget/function/u_midi.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_midi.h
*
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index 70da3201a1d0..5408854d8407 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_ncm.h
*
diff --git a/drivers/usb/gadget/function/u_phonet.h b/drivers/usb/gadget/function/u_phonet.h
index 12fb613f85d1..c53233b37192 100644
--- a/drivers/usb/gadget/function/u_phonet.h
+++ b/drivers/usb/gadget/function/u_phonet.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* u_phonet.h - interface to Phonet
*
diff --git a/drivers/usb/gadget/function/u_printer.h b/drivers/usb/gadget/function/u_printer.h
index 78797764f478..318205fb778e 100644
--- a/drivers/usb/gadget/function/u_printer.h
+++ b/drivers/usb/gadget/function/u_printer.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_printer.h
*
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index 1e148b76f339..a8c409b2f52f 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_rndis.h
*
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 8167d379e115..3cfc6e2eba71 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -120,6 +120,8 @@ struct gs_port {
wait_queue_head_t drain_wait; /* wait while writes drain */
bool write_busy;
wait_queue_head_t close_wait;
+ bool suspended; /* port suspended */
+ bool start_delayed; /* delay start when suspended */
/* REVISIT this state ... */
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
@@ -630,13 +632,19 @@ static int gs_open(struct tty_struct *tty, struct file *file)
/* if connected, start the I/O stream */
if (port->port_usb) {
- struct gserial *gser = port->port_usb;
-
- pr_debug("gs_open: start ttyGS%d\n", port->port_num);
- gs_start_io(port);
-
- if (gser->connect)
- gser->connect(gser);
+ /* if port is suspended, wait resume to start I/0 stream */
+ if (!port->suspended) {
+ struct gserial *gser = port->port_usb;
+
+ pr_debug("gs_open: start ttyGS%d\n", port->port_num);
+ gs_start_io(port);
+
+ if (gser->connect)
+ gser->connect(gser);
+ } else {
+ pr_debug("delay start of ttyGS%d\n", port->port_num);
+ port->start_delayed = true;
+ }
}
pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
@@ -680,7 +688,7 @@ raced_with_open:
pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
gser = port->port_usb;
- if (gser && gser->disconnect)
+ if (gser && !port->suspended && gser->disconnect)
gser->disconnect(gser);
/* wait for circular write buffer to drain, disconnect, or at
@@ -708,6 +716,7 @@ raced_with_open:
else
kfifo_reset(&port->port_write_buf);
+ port->start_delayed = false;
port->port.count = 0;
port->port.tty = NULL;
@@ -1403,6 +1412,38 @@ void gserial_disconnect(struct gserial *gser)
}
EXPORT_SYMBOL_GPL(gserial_disconnect);
+void gserial_suspend(struct gserial *gser)
+{
+ struct gs_port *port = gser->ioport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->suspended = true;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gserial_suspend);
+
+void gserial_resume(struct gserial *gser)
+{
+ struct gs_port *port = gser->ioport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ port->suspended = false;
+ if (!port->start_delayed) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ return;
+ }
+
+ pr_debug("delayed start ttyGS%d\n", port->port_num);
+ gs_start_io(port);
+ if (gser->connect)
+ gser->connect(gser);
+ port->start_delayed = false;
+ spin_unlock_irqrestore(&port->port_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gserial_resume);
+
static int userial_init(void)
{
unsigned i;
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index e5b08ab8cf7a..cadb76eecbc7 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* u_serial.h - interface to USB gadget "serial port"/TTY utilities
*
@@ -68,6 +68,8 @@ ssize_t gserial_get_console(unsigned char port_num, char *page);
/* connect/disconnect is handled by individual functions */
int gserial_connect(struct gserial *, u8 port_num);
void gserial_disconnect(struct gserial *);
+void gserial_suspend(struct gserial *p);
+void gserial_resume(struct gserial *p);
/* functions are bound to configurations by a config or gadget driver */
int gser_bind_config(struct usb_configuration *c, u8 port_num);
diff --git a/drivers/usb/gadget/function/u_tcm.h b/drivers/usb/gadget/function/u_tcm.h
index 3f7ccecb0f9b..2cd15d9a1c0d 100644
--- a/drivers/usb/gadget/function/u_tcm.h
+++ b/drivers/usb/gadget/function/u_tcm.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_tcm.h
*
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index 6f1a9d73defe..39c0e29e1b46 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_uac1.h - Utility definitions for UAC1 function
*
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.h b/drivers/usb/gadget/function/u_uac1_legacy.h
index 5c1bdf46fe32..b5df9bcbbeba 100644
--- a/drivers/usb/gadget/function/u_uac1_legacy.h
+++ b/drivers/usb/gadget/function/u_uac1_legacy.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities
*
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index 82048791eb6e..b5035711172d 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_uac2.h
*
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 16da49a2fcf2..9a01a7d4f17f 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* u_uvc.h
*
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 1473d25ff17a..23ee25383c1f 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* uvc_gadget.h -- USB Video Class Gadget driver
*
@@ -77,6 +77,8 @@ struct uvc_video {
struct uvc_device *uvc;
struct usb_ep *ep;
+ struct work_struct pump;
+
/* Frame parameters */
u8 bpp;
u32 fcc;
diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h
index 341391dbc81f..7e1d7ca29bf2 100644
--- a/drivers/usb/gadget/function/uvc_configfs.h
+++ b/drivers/usb/gadget/function/uvc_configfs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* uvc_configfs.h
*
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 495f0ec663ea..4ca89eab6159 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -169,7 +169,9 @@ uvc_v4l2_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
if (ret < 0)
return ret;
- return uvcg_video_pump(video);
+ schedule_work(&video->pump);
+
+ return ret;
}
static int
diff --git a/drivers/usb/gadget/function/uvc_v4l2.h b/drivers/usb/gadget/function/uvc_v4l2.h
index 452d71059b3f..1576005b61fd 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.h
+++ b/drivers/usb/gadget/function/uvc_v4l2.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* uvc_v4l2.h -- USB Video Class Gadget driver
*
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 5c042f380708..633e23d58d86 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -142,44 +142,12 @@ static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
return ret;
}
-/*
- * I somehow feel that synchronisation won't be easy to achieve here. We have
- * three events that control USB requests submission:
- *
- * - USB request completion: the completion handler will resubmit the request
- * if a video buffer is available.
- *
- * - USB interface setting selection: in response to a SET_INTERFACE request,
- * the handler will start streaming if a video buffer is available and if
- * video is not currently streaming.
- *
- * - V4L2 buffer queueing: the driver will start streaming if video is not
- * currently streaming.
- *
- * Race conditions between those 3 events might lead to deadlocks or other
- * nasty side effects.
- *
- * The "video currently streaming" condition can't be detected by the irqqueue
- * being empty, as a request can still be in flight. A separate "queue paused"
- * flag is thus needed.
- *
- * The paused flag will be set when we try to retrieve the irqqueue head if the
- * queue is empty, and cleared when we queue a buffer.
- *
- * The USB request completion handler will get the buffer at the irqqueue head
- * under protection of the queue spinlock. If the queue is empty, the streaming
- * paused flag will be set. Right after releasing the spinlock a userspace
- * application can queue a buffer. The flag will then cleared, and the ioctl
- * handler will restart the video stream.
- */
static void
uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
{
struct uvc_video *video = req->context;
struct uvc_video_queue *queue = &video->queue;
- struct uvc_buffer *buf;
unsigned long flags;
- int ret;
switch (req->status) {
case 0:
@@ -188,39 +156,20 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
case -ESHUTDOWN: /* disconnect from host. */
uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
uvcg_queue_cancel(queue, 1);
- goto requeue;
+ break;
default:
uvcg_info(&video->uvc->func,
"VS request completed with status %d.\n",
req->status);
uvcg_queue_cancel(queue, 0);
- goto requeue;
}
- spin_lock_irqsave(&video->queue.irqlock, flags);
- buf = uvcg_queue_head(&video->queue);
- if (buf == NULL) {
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
- goto requeue;
- }
-
- video->encode(req, video, buf);
-
- ret = uvcg_video_ep_queue(video, req);
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
-
- if (ret < 0) {
- uvcg_queue_cancel(queue, 0);
- goto requeue;
- }
-
- return;
-
-requeue:
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
+
+ schedule_work(&video->pump);
}
static int
@@ -294,18 +243,15 @@ error:
* This function fills the available USB requests (listed in req_free) with
* video data from the queued buffers.
*/
-int uvcg_video_pump(struct uvc_video *video)
+static void uvcg_video_pump(struct work_struct *work)
{
+ struct uvc_video *video = container_of(work, struct uvc_video, pump);
struct uvc_video_queue *queue = &video->queue;
struct usb_request *req;
struct uvc_buffer *buf;
unsigned long flags;
int ret;
- /* FIXME TODO Race between uvcg_video_pump and requests completion
- * handler ???
- */
-
while (1) {
/* Retrieve the first available USB request, protected by the
* request lock.
@@ -313,7 +259,7 @@ int uvcg_video_pump(struct uvc_video *video)
spin_lock_irqsave(&video->req_lock, flags);
if (list_empty(&video->req_free)) {
spin_unlock_irqrestore(&video->req_lock, flags);
- return 0;
+ return;
}
req = list_first_entry(&video->req_free, struct usb_request,
list);
@@ -345,7 +291,7 @@ int uvcg_video_pump(struct uvc_video *video)
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
- return 0;
+ return;
}
/*
@@ -363,6 +309,9 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
}
if (!enable) {
+ cancel_work_sync(&video->pump);
+ uvcg_queue_cancel(&video->queue, 0);
+
for (i = 0; i < UVC_NUM_REQUESTS; ++i)
if (video->req[i])
usb_ep_dequeue(video->ep, video->req[i]);
@@ -384,7 +333,9 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
} else
video->encode = uvc_video_encode_isoc;
- return uvcg_video_pump(video);
+ schedule_work(&video->pump);
+
+ return ret;
}
/*
@@ -394,6 +345,7 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
{
INIT_LIST_HEAD(&video->req_free);
spin_lock_init(&video->req_lock);
+ INIT_WORK(&video->pump, uvcg_video_pump);
video->uvc = uvc;
video->fcc = V4L2_PIX_FMT_YUYV;
diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h
index dff12103f696..03adeefa343b 100644
--- a/drivers/usb/gadget/function/uvc_video.h
+++ b/drivers/usb/gadget/function/uvc_video.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* uvc_video.h -- USB Video Class Gadget driver
*
@@ -14,8 +14,6 @@
struct uvc_video;
-int uvcg_video_pump(struct uvc_video *video);
-
int uvcg_video_enable(struct uvc_video *video, int enable);
int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 3afddd3bea6e..9ee0bfe7bcda 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -21,7 +21,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
-#include <linux/mmu_context.h>
+#include <linux/kthread.h>
#include <linux/aio.h>
#include <linux/uio.h>
#include <linux/refcount.h>
@@ -462,9 +462,9 @@ static void ep_user_copy_worker(struct work_struct *work)
struct kiocb *iocb = priv->iocb;
size_t ret;
- use_mm(mm);
+ kthread_use_mm(mm);
ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
- unuse_mm(mm);
+ kthread_unuse_mm(mm);
if (!ret)
ret = -EFAULT;
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index f18f77584fc2..9ed22c5fb7fe 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -229,18 +229,8 @@ static struct usb_composite_driver msg_driver = {
.unbind = msg_unbind,
};
+module_usb_composite_driver(msg_driver);
+
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Michal Nazarewicz");
MODULE_LICENSE("GPL");
-
-static int __init msg_init(void)
-{
- return usb_composite_probe(&msg_driver);
-}
-module_init(msg_init);
-
-static void __exit msg_cleanup(void)
-{
- usb_composite_unregister(&msg_driver);
-}
-module_exit(msg_cleanup);
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
index f8d35dd60c34..cdf96911e4b1 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
@@ -134,11 +134,15 @@ static irqreturn_t ast_vhub_irq(int irq, void *data)
}
/* Handle device interrupts */
- for (i = 0; i < vhub->max_ports; i++) {
- u32 dev_mask = VHUB_IRQ_DEVICE1 << i;
+ if (istat & vhub->port_irq_mask) {
+ unsigned long bitmap = istat;
+ int offset = VHUB_IRQ_DEV1_BIT;
+ int size = VHUB_IRQ_DEV1_BIT + vhub->max_ports;
- if (istat & dev_mask)
+ for_each_set_bit_from(offset, &bitmap, size) {
+ i = offset - VHUB_IRQ_DEV1_BIT;
ast_vhub_dev_irq(&vhub->ports[i].dev);
+ }
}
/* Handle top-level vHub EP0 interrupts */
@@ -332,6 +336,8 @@ static int ast_vhub_probe(struct platform_device *pdev)
spin_lock_init(&vhub->lock);
vhub->pdev = pdev;
+ vhub->port_irq_mask = GENMASK(VHUB_IRQ_DEV1_BIT + vhub->max_ports - 1,
+ VHUB_IRQ_DEV1_BIT);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
vhub->regs = devm_ioremap_resource(&pdev->dev, res);
@@ -402,7 +408,9 @@ static int ast_vhub_probe(struct platform_device *pdev)
goto err;
/* Init hub emulation */
- ast_vhub_init_hub(vhub);
+ rc = ast_vhub_init_hub(vhub);
+ if (rc)
+ goto err;
/* Initialize HW */
ast_vhub_init_hw(vhub);
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index 6e565c3dbb5b..6497185ec4e7 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -50,6 +50,7 @@
#define KERNEL_VER bin2bcd(((LINUX_VERSION_CODE >> 8) & 0x0ff))
enum {
+ AST_VHUB_STR_INDEX_MAX = 4,
AST_VHUB_STR_MANUF = 3,
AST_VHUB_STR_PRODUCT = 2,
AST_VHUB_STR_SERIAL = 1,
@@ -72,13 +73,6 @@ static const struct usb_device_descriptor ast_vhub_dev_desc = {
.bNumConfigurations = 1,
};
-/* Patches to the above when forcing USB1 mode */
-static void ast_vhub_patch_dev_desc_usb1(struct usb_device_descriptor *desc)
-{
- desc->bcdUSB = cpu_to_le16(0x0100);
- desc->bDeviceProtocol = 0;
-}
-
/*
* Configuration descriptor: same comments as above
* regarding handling USB1 mode.
@@ -302,31 +296,81 @@ static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
if (len > dsize)
len = dsize;
- /* Patch it if forcing USB1 */
- if (desc_type == USB_DT_DEVICE && ep->vhub->force_usb1)
- ast_vhub_patch_dev_desc_usb1(ep->buf);
-
/* Shoot it from the EP buffer */
return ast_vhub_reply(ep, NULL, len);
}
+static struct usb_gadget_strings*
+ast_vhub_str_of_container(struct usb_gadget_string_container *container)
+{
+ return (struct usb_gadget_strings *)container->stash;
+}
+
+static int ast_vhub_collect_languages(struct ast_vhub *vhub, void *buf,
+ size_t size)
+{
+ int rc, hdr_len, nlangs, max_langs;
+ struct usb_gadget_strings *lang_str;
+ struct usb_gadget_string_container *container;
+ struct usb_string_descriptor *sdesc = buf;
+
+ nlangs = 0;
+ hdr_len = sizeof(struct usb_descriptor_header);
+ max_langs = (size - hdr_len) / sizeof(sdesc->wData[0]);
+ list_for_each_entry(container, &vhub->vhub_str_desc, list) {
+ if (nlangs >= max_langs)
+ break;
+
+ lang_str = ast_vhub_str_of_container(container);
+ sdesc->wData[nlangs++] = cpu_to_le16(lang_str->language);
+ }
+
+ rc = hdr_len + nlangs * sizeof(sdesc->wData[0]);
+ sdesc->bLength = rc;
+ sdesc->bDescriptorType = USB_DT_STRING;
+
+ return rc;
+}
+
+static struct usb_gadget_strings *ast_vhub_lookup_string(struct ast_vhub *vhub,
+ u16 lang_id)
+{
+ struct usb_gadget_strings *lang_str;
+ struct usb_gadget_string_container *container;
+
+ list_for_each_entry(container, &vhub->vhub_str_desc, list) {
+ lang_str = ast_vhub_str_of_container(container);
+ if (lang_str->language == lang_id)
+ return lang_str;
+ }
+
+ return NULL;
+}
+
static int ast_vhub_rep_string(struct ast_vhub_ep *ep,
u8 string_id, u16 lang_id,
u16 len)
{
- int rc = usb_gadget_get_string(&ep->vhub->vhub_str_desc,
- string_id, ep->buf);
+ int rc;
+ u8 buf[256];
+ struct ast_vhub *vhub = ep->vhub;
+ struct usb_gadget_strings *lang_str;
- /*
- * This should never happen unless we put too big strings in
- * the array above
- */
- BUG_ON(rc >= AST_VHUB_EP0_MAX_PACKET);
+ if (string_id == 0) {
+ rc = ast_vhub_collect_languages(vhub, buf, sizeof(buf));
+ } else {
+ lang_str = ast_vhub_lookup_string(vhub, lang_id);
+ if (!lang_str)
+ return std_req_stall;
- if (rc < 0)
+ rc = usb_gadget_get_string(lang_str, string_id, buf);
+ }
+
+ if (rc < 0 || rc >= AST_VHUB_EP0_MAX_PACKET)
return std_req_stall;
/* Shoot it from the EP buffer */
+ memcpy(ep->buf, buf, rc);
return ast_vhub_reply(ep, NULL, min_t(u16, rc, len));
}
@@ -832,11 +876,148 @@ void ast_vhub_hub_reset(struct ast_vhub *vhub)
writel(0, vhub->regs + AST_VHUB_EP1_STS_CHG);
}
-static void ast_vhub_init_desc(struct ast_vhub *vhub)
+static void ast_vhub_of_parse_dev_desc(struct ast_vhub *vhub,
+ const struct device_node *vhub_np)
+{
+ u16 id;
+ u32 data;
+
+ if (!of_property_read_u32(vhub_np, "vhub-vendor-id", &data)) {
+ id = (u16)data;
+ vhub->vhub_dev_desc.idVendor = cpu_to_le16(id);
+ }
+ if (!of_property_read_u32(vhub_np, "vhub-product-id", &data)) {
+ id = (u16)data;
+ vhub->vhub_dev_desc.idProduct = cpu_to_le16(id);
+ }
+ if (!of_property_read_u32(vhub_np, "vhub-device-revision", &data)) {
+ id = (u16)data;
+ vhub->vhub_dev_desc.bcdDevice = cpu_to_le16(id);
+ }
+}
+
+static void ast_vhub_fixup_usb1_dev_desc(struct ast_vhub *vhub)
+{
+ vhub->vhub_dev_desc.bcdUSB = cpu_to_le16(0x0100);
+ vhub->vhub_dev_desc.bDeviceProtocol = 0;
+}
+
+static struct usb_gadget_string_container*
+ast_vhub_str_container_alloc(struct ast_vhub *vhub)
+{
+ unsigned int size;
+ struct usb_string *str_array;
+ struct usb_gadget_strings *lang_str;
+ struct usb_gadget_string_container *container;
+
+ size = sizeof(*container);
+ size += sizeof(struct usb_gadget_strings);
+ size += sizeof(struct usb_string) * AST_VHUB_STR_INDEX_MAX;
+ container = devm_kzalloc(&vhub->pdev->dev, size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
+
+ lang_str = ast_vhub_str_of_container(container);
+ str_array = (struct usb_string *)(lang_str + 1);
+ lang_str->strings = str_array;
+ return container;
+}
+
+static void ast_vhub_str_deep_copy(struct usb_gadget_strings *dest,
+ const struct usb_gadget_strings *src)
{
+ struct usb_string *src_array = src->strings;
+ struct usb_string *dest_array = dest->strings;
+
+ dest->language = src->language;
+ if (src_array && dest_array) {
+ do {
+ *dest_array = *src_array;
+ dest_array++;
+ src_array++;
+ } while (src_array->s);
+ }
+}
+
+static int ast_vhub_str_alloc_add(struct ast_vhub *vhub,
+ const struct usb_gadget_strings *src_str)
+{
+ struct usb_gadget_strings *dest_str;
+ struct usb_gadget_string_container *container;
+
+ container = ast_vhub_str_container_alloc(vhub);
+ if (IS_ERR(container))
+ return PTR_ERR(container);
+
+ dest_str = ast_vhub_str_of_container(container);
+ ast_vhub_str_deep_copy(dest_str, src_str);
+ list_add_tail(&container->list, &vhub->vhub_str_desc);
+
+ return 0;
+}
+
+static const struct {
+ const char *name;
+ u8 id;
+} str_id_map[] = {
+ {"manufacturer", AST_VHUB_STR_MANUF},
+ {"product", AST_VHUB_STR_PRODUCT},
+ {"serial-number", AST_VHUB_STR_SERIAL},
+ {},
+};
+
+static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub,
+ const struct device_node *desc_np)
+{
+ u32 langid;
+ int ret = 0;
+ int i, offset;
+ const char *str;
+ struct device_node *child;
+ struct usb_string str_array[AST_VHUB_STR_INDEX_MAX];
+ struct usb_gadget_strings lang_str = {
+ .strings = (struct usb_string *)str_array,
+ };
+
+ for_each_child_of_node(desc_np, child) {
+ if (of_property_read_u32(child, "reg", &langid))
+ continue; /* no language identifier specified */
+
+ if (!usb_validate_langid(langid))
+ continue; /* invalid language identifier */
+
+ lang_str.language = langid;
+ for (i = offset = 0; str_id_map[i].name; i++) {
+ str = of_get_property(child, str_id_map[i].name, NULL);
+ if (str) {
+ str_array[offset].s = str;
+ str_array[offset].id = str_id_map[i].id;
+ offset++;
+ }
+ }
+ str_array[offset].id = 0;
+ str_array[offset].s = NULL;
+
+ ret = ast_vhub_str_alloc_add(vhub, &lang_str);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int ast_vhub_init_desc(struct ast_vhub *vhub)
+{
+ int ret;
+ struct device_node *desc_np;
+ const struct device_node *vhub_np = vhub->pdev->dev.of_node;
+
/* Initialize vhub Device Descriptor. */
memcpy(&vhub->vhub_dev_desc, &ast_vhub_dev_desc,
sizeof(vhub->vhub_dev_desc));
+ ast_vhub_of_parse_dev_desc(vhub, vhub_np);
+ if (vhub->force_usb1)
+ ast_vhub_fixup_usb1_dev_desc(vhub);
/* Initialize vhub Configuration Descriptor. */
memcpy(&vhub->vhub_conf_desc, &ast_vhub_conf_desc,
@@ -848,15 +1029,20 @@ static void ast_vhub_init_desc(struct ast_vhub *vhub)
vhub->vhub_hub_desc.bNbrPorts = vhub->max_ports;
/* Initialize vhub String Descriptors. */
- memcpy(&vhub->vhub_str_desc, &ast_vhub_strings,
- sizeof(vhub->vhub_str_desc));
+ INIT_LIST_HEAD(&vhub->vhub_str_desc);
+ desc_np = of_get_child_by_name(vhub_np, "vhub-strings");
+ if (desc_np)
+ ret = ast_vhub_of_parse_str_desc(vhub, desc_np);
+ else
+ ret = ast_vhub_str_alloc_add(vhub, &ast_vhub_strings);
+
+ return ret;
}
-void ast_vhub_init_hub(struct ast_vhub *vhub)
+int ast_vhub_init_hub(struct ast_vhub *vhub)
{
vhub->speed = USB_SPEED_UNKNOWN;
INIT_WORK(&vhub->wake_work, ast_vhub_wake_work);
- ast_vhub_init_desc(vhub);
+ return ast_vhub_init_desc(vhub);
}
-
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index fac79ef6d669..2e5a1ef14a75 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -51,14 +51,11 @@
#define VHUB_CTRL_UPSTREAM_CONNECT (1 << 0)
/* IER & ISR */
+#define VHUB_IRQ_DEV1_BIT 9
#define VHUB_IRQ_USB_CMD_DEADLOCK (1 << 18)
#define VHUB_IRQ_EP_POOL_NAK (1 << 17)
#define VHUB_IRQ_EP_POOL_ACK_STALL (1 << 16)
-#define VHUB_IRQ_DEVICE5 (1 << 13)
-#define VHUB_IRQ_DEVICE4 (1 << 12)
-#define VHUB_IRQ_DEVICE3 (1 << 11)
-#define VHUB_IRQ_DEVICE2 (1 << 10)
-#define VHUB_IRQ_DEVICE1 (1 << 9)
+#define VHUB_IRQ_DEVICE1 (1 << (VHUB_IRQ_DEV1_BIT))
#define VHUB_IRQ_BUS_RESUME (1 << 8)
#define VHUB_IRQ_BUS_SUSPEND (1 << 7)
#define VHUB_IRQ_BUS_RESET (1 << 6)
@@ -402,6 +399,7 @@ struct ast_vhub {
/* Per-port info */
struct ast_vhub_port *ports;
u32 max_ports;
+ u32 port_irq_mask;
/* Generic EP data structures */
struct ast_vhub_ep *epns;
@@ -423,7 +421,7 @@ struct ast_vhub {
struct usb_device_descriptor vhub_dev_desc;
struct ast_vhub_full_cdesc vhub_conf_desc;
struct usb_hub_descriptor vhub_hub_desc;
- struct usb_gadget_strings vhub_str_desc;
+ struct list_head vhub_str_desc;
};
/* Standard request handlers result codes */
@@ -533,7 +531,7 @@ int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...);
__VA_ARGS__)
/* hub.c */
-void ast_vhub_init_hub(struct ast_vhub *vhub);
+int ast_vhub_init_hub(struct ast_vhub *vhub);
enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep,
struct usb_ctrlrequest *crq);
enum std_req_rc ast_vhub_class_hub_request(struct ast_vhub_ep *ep,
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index b771a854e29c..d69f61ff0181 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2043,10 +2043,56 @@ static const struct usba_udc_errata at91sam9g45_errata = {
.pulse_bias = at91sam9g45_pulse_bias,
};
+static const struct usba_ep_config ep_config_sam9[] __initconst = {
+ { .nr_banks = 1 }, /* ep 0 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */
+ { .nr_banks = 3, .can_dma = 1 }, /* ep 3 */
+ { .nr_banks = 3, .can_dma = 1 }, /* ep 4 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 5 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 6 */
+};
+
+static const struct usba_ep_config ep_config_sama5[] __initconst = {
+ { .nr_banks = 1 }, /* ep 0 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 1 */
+ { .nr_banks = 3, .can_dma = 1, .can_isoc = 1 }, /* ep 2 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 3 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 4 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 5 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 6 */
+ { .nr_banks = 2, .can_dma = 1, .can_isoc = 1 }, /* ep 7 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 8 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 9 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 10 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 11 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 12 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 13 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 14 */
+ { .nr_banks = 2, .can_isoc = 1 }, /* ep 15 */
+};
+
+static const struct usba_udc_config udc_at91sam9rl_cfg = {
+ .errata = &at91sam9rl_errata,
+ .config = ep_config_sam9,
+ .num_ep = ARRAY_SIZE(ep_config_sam9),
+};
+
+static const struct usba_udc_config udc_at91sam9g45_cfg = {
+ .errata = &at91sam9g45_errata,
+ .config = ep_config_sam9,
+ .num_ep = ARRAY_SIZE(ep_config_sam9),
+};
+
+static const struct usba_udc_config udc_sama5d3_cfg = {
+ .config = ep_config_sama5,
+ .num_ep = ARRAY_SIZE(ep_config_sama5),
+};
+
static const struct of_device_id atmel_udc_dt_ids[] = {
- { .compatible = "atmel,at91sam9rl-udc", .data = &at91sam9rl_errata },
- { .compatible = "atmel,at91sam9g45-udc", .data = &at91sam9g45_errata },
- { .compatible = "atmel,sama5d3-udc" },
+ { .compatible = "atmel,at91sam9rl-udc", .data = &udc_at91sam9rl_cfg },
+ { .compatible = "atmel,at91sam9g45-udc", .data = &udc_at91sam9g45_cfg },
+ { .compatible = "atmel,sama5d3-udc", .data = &udc_sama5d3_cfg },
{ /* sentinel */ }
};
@@ -2055,18 +2101,19 @@ MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids);
static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
struct usba_udc *udc)
{
- u32 val;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
struct device_node *pp;
int i, ret;
struct usba_ep *eps, *ep;
+ const struct usba_udc_config *udc_config;
match = of_match_node(atmel_udc_dt_ids, np);
if (!match)
return ERR_PTR(-EINVAL);
- udc->errata = match->data;
+ udc_config = match->data;
+ udc->errata = udc_config->errata;
udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
if (IS_ERR(udc->pmc))
udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc");
@@ -2082,8 +2129,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
if (fifo_mode == 0) {
pp = NULL;
- while ((pp = of_get_next_child(np, pp)))
- udc->num_ep++;
+ udc->num_ep = udc_config->num_ep;
udc->configured_ep = 1;
} else {
udc->num_ep = usba_config_fifo_table(udc);
@@ -2100,52 +2146,38 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
pp = NULL;
i = 0;
- while ((pp = of_get_next_child(np, pp)) && i < udc->num_ep) {
+ while (i < udc->num_ep) {
+ const struct usba_ep_config *ep_cfg = &udc_config->config[i];
+
ep = &eps[i];
- ret = of_property_read_u32(pp, "reg", &val);
- if (ret) {
- dev_err(&pdev->dev, "of_probe: reg error(%d)\n", ret);
- goto err;
- }
- ep->index = fifo_mode ? udc->fifo_cfg[i].hw_ep_num : val;
+ ep->index = fifo_mode ? udc->fifo_cfg[i].hw_ep_num : i;
+
+ /* Only the first EP is 64 bytes */
+ if (ep->index == 0)
+ ep->fifo_size = 64;
+ else
+ ep->fifo_size = 1024;
- ret = of_property_read_u32(pp, "atmel,fifo-size", &val);
- if (ret) {
- dev_err(&pdev->dev, "of_probe: fifo-size error(%d)\n", ret);
- goto err;
- }
if (fifo_mode) {
- if (val < udc->fifo_cfg[i].fifo_size) {
+ if (ep->fifo_size < udc->fifo_cfg[i].fifo_size)
dev_warn(&pdev->dev,
- "Using max fifo-size value from DT\n");
- ep->fifo_size = val;
- } else {
+ "Using default max fifo-size value\n");
+ else
ep->fifo_size = udc->fifo_cfg[i].fifo_size;
- }
- } else {
- ep->fifo_size = val;
}
- ret = of_property_read_u32(pp, "atmel,nb-banks", &val);
- if (ret) {
- dev_err(&pdev->dev, "of_probe: nb-banks error(%d)\n", ret);
- goto err;
- }
+ ep->nr_banks = ep_cfg->nr_banks;
if (fifo_mode) {
- if (val < udc->fifo_cfg[i].nr_banks) {
+ if (ep->nr_banks < udc->fifo_cfg[i].nr_banks)
dev_warn(&pdev->dev,
- "Using max nb-banks value from DT\n");
- ep->nr_banks = val;
- } else {
+ "Using default max nb-banks value\n");
+ else
ep->nr_banks = udc->fifo_cfg[i].nr_banks;
- }
- } else {
- ep->nr_banks = val;
}
- ep->can_dma = of_property_read_bool(pp, "atmel,can-dma");
- ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
+ ep->can_dma = ep_cfg->can_dma;
+ ep->can_isoc = ep_cfg->can_isoc;
sprintf(ep->name, "ep%d", ep->index);
ep->ep.name = ep->name;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index a0225e4543d4..48e332439ed5 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -290,6 +290,12 @@ struct usba_ep {
#endif
};
+struct usba_ep_config {
+ u8 nr_banks;
+ unsigned int can_dma:1;
+ unsigned int can_isoc:1;
+};
+
struct usba_request {
struct usb_request req;
struct list_head queue;
@@ -307,6 +313,12 @@ struct usba_udc_errata {
void (*pulse_bias)(struct usba_udc *udc);
};
+struct usba_udc_config {
+ const struct usba_udc_errata *errata;
+ const struct usba_ep_config *config;
+ const int num_ep;
+};
+
struct usba_udc {
/* Protect hw registers from concurrent modifications */
spinlock_t lock;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 9b11046480fe..2e28dde8376f 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1297,6 +1297,8 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
usb_gadget_disconnect(udc->gadget);
+ if (udc->gadget->irq)
+ synchronize_irq(udc->gadget->irq);
udc->driver->unbind(udc->gadget);
usb_gadget_udc_stop(udc);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 6e3e3ebf715f..0eeaead5acea 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -187,31 +187,31 @@ static const struct {
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
/* and now some generic EPs so we have enough in multi config */
- EP_INFO("ep3out",
+ EP_INFO("ep-aout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep4in",
+ EP_INFO("ep-bin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep5out",
+ EP_INFO("ep-cout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep6out",
+ EP_INFO("ep-dout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep7in",
+ EP_INFO("ep-ein",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep8out",
+ EP_INFO("ep-fout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep9in",
+ EP_INFO("ep-gin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep10out",
+ EP_INFO("ep-hout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep11out",
+ EP_INFO("ep-iout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep12in",
+ EP_INFO("ep-jin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep13out",
+ EP_INFO("ep-kout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
- EP_INFO("ep14in",
+ EP_INFO("ep-lin",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep15out",
+ EP_INFO("ep-mout",
USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
#undef EP_INFO
@@ -427,6 +427,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
/* caller must hold lock */
static void set_link_state(struct dummy_hcd *dum_hcd)
+ __must_hold(&dum->lock)
{
struct dummy *dum = dum_hcd->dum;
unsigned int power_bit;
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index febabde62f71..b2638e83bb49 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2440,8 +2440,8 @@ static int fsl_udc_probe(struct platform_device *pdev)
udc_controller->max_ep = (dccparams & DCCPARAMS_DEN_MASK) * 2;
udc_controller->irq = platform_get_irq(pdev, 0);
- if (!udc_controller->irq) {
- ret = -ENODEV;
+ if (udc_controller->irq <= 0) {
+ ret = udc_controller->irq ? : -ENODEV;
goto err_iounmap;
}
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index aaf975c809bf..7164ad9800f1 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -48,7 +48,6 @@
#define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
static const char driver_name[] = DRIVER_NAME;
-static const char driver_desc[] = DRIVER_DESC;
#define gr_read32(x) (ioread32be((x)))
#define gr_write32(x, v) (iowrite32be((v), (x)))
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index cb997b82c008..465d0b7c6522 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1614,17 +1614,17 @@ static int lpc32xx_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
- struct lpc32xx_udc *udc = ep->udc;
+ struct lpc32xx_udc *udc;
u16 maxpacket;
u32 tmp;
unsigned long flags;
/* Verify EP data */
if ((!_ep) || (!ep) || (!desc) ||
- (desc->bDescriptorType != USB_DT_ENDPOINT)) {
- dev_dbg(udc->dev, "bad ep or descriptor\n");
+ (desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
- }
+
+ udc = ep->udc;
maxpacket = usb_endpoint_maxp(desc);
if ((maxpacket == 0) || (maxpacket > ep->maxpacket)) {
dev_dbg(udc->dev, "bad ep descriptor's packet size\n");
@@ -1872,7 +1872,7 @@ static int lpc32xx_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
{
struct lpc32xx_ep *ep = container_of(_ep, struct lpc32xx_ep, ep);
- struct lpc32xx_udc *udc = ep->udc;
+ struct lpc32xx_udc *udc;
unsigned long flags;
if ((!ep) || (ep->hwep_num <= 1))
@@ -1882,6 +1882,7 @@ static int lpc32xx_ep_set_halt(struct usb_ep *_ep, int value)
if (ep->is_in)
return -EAGAIN;
+ udc = ep->udc;
spin_lock_irqsave(&udc->lock, flags);
if (value == 1) {
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 75d16a8902e6..931e6362a13d 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1667,7 +1667,7 @@ static int m66592_probe(struct platform_device *pdev)
err_add_udc:
m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
-
+ m66592->ep0_req = NULL;
clean_up3:
if (m66592->pdata->on_chip) {
clk_disable(m66592->clk);
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index 8fbc083b6732..23f33946d80c 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -901,7 +901,7 @@ loop:
}
set_current_state(TASK_RUNNING);
- dev_info(udc->dev, "SPI thread exiting");
+ dev_info(udc->dev, "SPI thread exiting\n");
return 0;
}
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 35e02a8d0091..5bb0568b934e 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1548,7 +1548,7 @@ static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num,
delegate = true;
/* delegate USB standard requests to the gadget driver */
- if (delegate == true) {
+ if (delegate) {
/* USB requests handled by gadget */
if (setup->wLength) {
/* DATA phase from gadget, STATUS phase from u3d */
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 5af0fe9c61d7..928057b206f1 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -54,7 +54,7 @@ static const char * const ep_name[] = {
*
* If use_dma is disabled, pio will be used instead.
*/
-static bool use_dma = 0;
+static bool use_dma = false;
module_param(use_dma, bool, 0644);
/*
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index bf87c6c0d7f6..4139da885651 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2576,7 +2576,7 @@ omap_ep_setup(char *name, u8 addr, u8 type,
case USB_ENDPOINT_XFER_INT:
ep->ep.caps.type_int = true;
break;
- };
+ }
if (addr & USB_DIR_IN)
ep->ep.caps.dir_in = true;
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index 0507a2ca0f55..80002d97b59d 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -251,10 +251,6 @@ static void s3c2410_udc_done(struct s3c2410_ep *ep,
static void s3c2410_udc_nuke(struct s3c2410_udc *udc,
struct s3c2410_ep *ep, int status)
{
- /* Sanity check */
- if (&ep->queue == NULL)
- return;
-
while (!list_empty(&ep->queue)) {
struct s3c2410_request *req;
req = list_entry(ep->queue.next, struct s3c2410_request,
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index dfabc54cdc27..bbe1a04686da 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -158,6 +158,30 @@
#define SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK GENMASK(7, 0)
#define SSPX_CORE_CNT32_POLL_TBURST_MAX(x) ((x) & \
SSPX_CORE_CNT32_POLL_TBURST_MAX_MASK)
+#define SSPX_CORE_CNT56 0x6fc
+#define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(x) ((x) & \
+ SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK)
+#define SSPX_CORE_CNT57 0x700
+#define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(x) ((x) & \
+ SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK)
+#define SSPX_CORE_CNT65 0x720
+#define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(x) ((x) & \
+ SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK)
+#define SSPX_CORE_CNT66 0x724
+#define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(x) ((x) & \
+ SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK)
+#define SSPX_CORE_CNT67 0x728
+#define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(x) ((x) & \
+ SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK)
+#define SSPX_CORE_CNT72 0x73c
+#define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK GENMASK(19, 0)
+#define SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(x) ((x) & \
+ SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK)
#define SSPX_CORE_PADCTL4 0x750
#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK GENMASK(19, 0)
#define SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3(x) ((x) & \
@@ -492,6 +516,7 @@ struct tegra_xudc {
bool powergated;
struct usb_phy **usbphy;
+ struct usb_phy *curr_usbphy;
struct notifier_block vbus_nb;
struct completion disconnect_complete;
@@ -530,6 +555,7 @@ struct tegra_xudc_soc {
bool invalid_seq_num;
bool pls_quirk;
bool port_reset_quirk;
+ bool port_speed_quirk;
bool has_ipfs;
};
@@ -599,6 +625,78 @@ static inline void dump_trb(struct tegra_xudc *xudc, const char *type,
trb->control);
}
+static void tegra_xudc_limit_port_speed(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ /* limit port speed to gen 1 */
+ val = xudc_readl(xudc, SSPX_CORE_CNT56);
+ val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x260);
+ xudc_writel(xudc, val, SSPX_CORE_CNT56);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT57);
+ val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x6D6);
+ xudc_writel(xudc, val, SSPX_CORE_CNT57);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT65);
+ val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0x4B0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT66);
+ val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x4B0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT67);
+ val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x4B0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT67);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT72);
+ val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
+ val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x10);
+ xudc_writel(xudc, val, SSPX_CORE_CNT72);
+}
+
+static void tegra_xudc_restore_port_speed(struct tegra_xudc *xudc)
+{
+ u32 val;
+
+ /* restore port speed to gen2 */
+ val = xudc_readl(xudc, SSPX_CORE_CNT56);
+ val &= ~(SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT56_SCD_BIT0_TRPT_MAX(0x438);
+ xudc_writel(xudc, val, SSPX_CORE_CNT56);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT57);
+ val &= ~(SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX_MASK);
+ val |= SSPX_CORE_CNT57_SCD_BIT1_TRPT_MAX(0x528);
+ xudc_writel(xudc, val, SSPX_CORE_CNT57);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT65);
+ val &= ~(SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT65_TX_SCD_END_TRPT_MID(0xE10);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT66);
+ val &= ~(SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT66_TX_SCD_BIT0_TRPT_MID(0x348);
+ xudc_writel(xudc, val, SSPX_CORE_CNT66);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT67);
+ val &= ~(SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID_MASK);
+ val |= SSPX_CORE_CNT67_TX_SCD_BIT1_TRPT_MID(0x5a0);
+ xudc_writel(xudc, val, SSPX_CORE_CNT67);
+
+ val = xudc_readl(xudc, SSPX_CORE_CNT72);
+ val &= ~(SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT_MASK);
+ val |= SSPX_CORE_CNT72_SCD_LFPS_TIMEOUT(0x1c21);
+ xudc_writel(xudc, val, SSPX_CORE_CNT72);
+}
+
static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc)
{
int err;
@@ -631,6 +729,9 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc)
reinit_completion(&xudc->disconnect_complete);
+ if (xudc->soc->port_speed_quirk)
+ tegra_xudc_restore_port_speed(xudc);
+
phy_set_mode_ext(xudc->curr_utmi_phy, PHY_MODE_USB_OTG, USB_ROLE_NONE);
pls = (xudc_readl(xudc, PORTSC) & PORTSC_PLS_MASK) >>
@@ -719,6 +820,7 @@ static int tegra_xudc_vbus_notify(struct notifier_block *nb,
if (!xudc->suspended && phy_index != -1) {
xudc->curr_utmi_phy = xudc->utmi_phy[phy_index];
xudc->curr_usb3_phy = xudc->usb3_phy[phy_index];
+ xudc->curr_usbphy = usbphy;
schedule_work(&xudc->usb_role_sw_work);
}
@@ -2042,6 +2144,20 @@ static int tegra_xudc_gadget_stop(struct usb_gadget *gadget)
return 0;
}
+static int tegra_xudc_gadget_vbus_draw(struct usb_gadget *gadget,
+ unsigned int m_a)
+{
+ int ret = 0;
+ struct tegra_xudc *xudc = to_xudc(gadget);
+
+ dev_dbg(xudc->dev, "%s: %u mA\n", __func__, m_a);
+
+ if (xudc->curr_usbphy->chg_type == SDP_TYPE)
+ ret = usb_phy_set_power(xudc->curr_usbphy, m_a);
+
+ return ret;
+}
+
static int tegra_xudc_set_selfpowered(struct usb_gadget *gadget, int is_on)
{
struct tegra_xudc *xudc = to_xudc(gadget);
@@ -2058,6 +2174,7 @@ static struct usb_gadget_ops tegra_xudc_gadget_ops = {
.pullup = tegra_xudc_gadget_pullup,
.udc_start = tegra_xudc_gadget_start,
.udc_stop = tegra_xudc_gadget_stop,
+ .vbus_draw = tegra_xudc_gadget_vbus_draw,
.set_selfpowered = tegra_xudc_set_selfpowered,
};
@@ -3274,6 +3391,9 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
xudc_writel(xudc, val, BLCG);
}
+ if (xudc->soc->port_speed_quirk)
+ tegra_xudc_limit_port_speed(xudc);
+
/* Set a reasonable U3 exit timer value. */
val = xudc_readl(xudc, SSPX_CORE_PADCTL4);
val &= ~(SSPX_CORE_PADCTL4_RXDAT_VLD_TIMEOUT_U3_MASK);
@@ -3506,6 +3626,7 @@ static struct tegra_xudc_soc tegra210_xudc_soc_data = {
.invalid_seq_num = true,
.pls_quirk = true,
.port_reset_quirk = true,
+ .port_speed_quirk = false,
.has_ipfs = true,
};
@@ -3519,6 +3640,21 @@ static struct tegra_xudc_soc tegra186_xudc_soc_data = {
.invalid_seq_num = false,
.pls_quirk = false,
.port_reset_quirk = false,
+ .port_speed_quirk = false,
+ .has_ipfs = false,
+};
+
+static struct tegra_xudc_soc tegra194_xudc_soc_data = {
+ .clock_names = tegra186_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
+ .num_phys = 4,
+ .u1_enable = true,
+ .u2_enable = true,
+ .lpm_enable = true,
+ .invalid_seq_num = false,
+ .pls_quirk = false,
+ .port_reset_quirk = false,
+ .port_speed_quirk = true,
.has_ipfs = false,
};
@@ -3531,6 +3667,10 @@ static const struct of_device_id tegra_xudc_of_match[] = {
.compatible = "nvidia,tegra186-xudc",
.data = &tegra186_xudc_soc_data
},
+ {
+ .compatible = "nvidia,tegra194-xudc",
+ .data = &tegra194_xudc_soc_data
+ },
{ }
};
MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index b1cfc8279c3d..709553bdb233 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1732,6 +1732,7 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
* Process setup packet and delegate to gadget layer.
*/
static void xudc_handle_setup(struct xusb_udc *udc)
+ __must_hold(&udc->lock)
{
struct xusb_ep *ep0 = &udc->ep[0];
struct usb_ctrlrequest setup;
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 7c24d1ce1088..58a4d3325090 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -65,3 +65,27 @@ usb_gadget_get_string (const struct usb_gadget_strings *table, int id, u8 *buf)
return buf [0];
}
EXPORT_SYMBOL_GPL(usb_gadget_get_string);
+
+/**
+ * usb_validate_langid - validate usb language identifiers
+ * @lang: usb language identifier
+ *
+ * Returns true for valid language identifier, otherwise false.
+ */
+bool usb_validate_langid(u16 langid)
+{
+ u16 primary_lang = langid & 0x3ff; /* bit [9:0] */
+ u16 sub_lang = langid >> 10; /* bit [15:10] */
+
+ switch (primary_lang) {
+ case 0:
+ case 0x62 ... 0xfe:
+ case 0x100 ... 0x3ff:
+ return false;
+ }
+ if (!sub_lang)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(usb_validate_langid);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 55bdfdf11e4c..62c348062e48 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -40,8 +40,17 @@ config USB_XHCI_DBGCAP
config USB_XHCI_PCI
tristate
depends on USB_PCI
+ depends on USB_XHCI_PCI_RENESAS || !USB_XHCI_PCI_RENESAS
default y
+config USB_XHCI_PCI_RENESAS
+ tristate "Support for additional Renesas xHCI controller with firwmare"
+ ---help---
+ Say 'Y' to enable the support for the Renesas xHCI controller with
+ firwmare. Make sure you have the firwmare for the device and
+ installed on your system for this device to work.
+ If unsure, say 'N'.
+
config USB_XHCI_PLATFORM
tristate "Generic xHCI driver for a platform device"
select USB_XHCI_RCAR if ARCH_RENESAS
@@ -97,6 +106,26 @@ config USB_XHCI_TEGRA
endif # USB_XHCI_HCD
+config USB_EHCI_BRCMSTB
+ tristate
+
+config USB_BRCMSTB
+ tristate "Broadcom STB USB support"
+ depends on (ARCH_BRCMSTB && PHY_BRCM_USB) || COMPILE_TEST
+ select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
+ select USB_EHCI_BRCMSTB if USB_EHCI_HCD
+ select USB_XHCI_PLATFORM if USB_XHCI_HCD
+ help
+ Enables support for XHCI, EHCI and OHCI host controllers
+ found in Broadcom STB SoC's.
+
+ To compile these drivers as modules, choose M here: the
+ modules will be called ohci-platform.ko, ehci-brcm.ko and
+ xhci-plat-hcd.ko
+
+ Disabling this will keep the controllers and corresponding
+ PHYs powered down.
+
config USB_EHCI_HCD
tristate "EHCI HCD (USB 2.0) support"
depends on HAS_DMA && HAS_IOMEM
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index b191361257cc..bc731332fed9 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_USB_EHCI_HCD_STI) += ehci-st.o
obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
+obj-$(CONFIG_USB_EHCI_BRCMSTB) += ehci-brcm.o
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
@@ -71,6 +72,7 @@ obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
+obj-$(CONFIG_USB_XHCI_PCI_RENESAS) += xhci-pci-renesas.o
obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
obj-$(CONFIG_USB_XHCI_HISTB) += xhci-histb.o
obj-$(CONFIG_USB_XHCI_MTK) += xhci-mtk.o
diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c
new file mode 100644
index 000000000000..3e0ebe8cc649
--- /dev/null
+++ b/drivers/usb/host/ehci-brcm.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Broadcom */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/iopoll.h>
+
+#include "ehci.h"
+
+#define hcd_to_ehci_priv(h) ((struct brcm_priv *)hcd_to_ehci(h)->priv)
+
+struct brcm_priv {
+ struct clk *clk;
+};
+
+/*
+ * ehci_brcm_wait_for_sof
+ * Wait for start of next microframe, then wait extra delay microseconds
+ */
+static inline void ehci_brcm_wait_for_sof(struct ehci_hcd *ehci, u32 delay)
+{
+ u32 frame_idx = ehci_readl(ehci, &ehci->regs->frame_index);
+ u32 val;
+ int res;
+
+ /* Wait for next microframe (every 125 usecs) */
+ res = readl_relaxed_poll_timeout(&ehci->regs->frame_index, val,
+ val != frame_idx, 1, 130);
+ if (res)
+ ehci_err(ehci, "Error waiting for SOF\n");
+ udelay(delay);
+}
+
+/*
+ * ehci_brcm_hub_control
+ * The EHCI controller has a bug where it can violate the SOF
+ * interval between the first two SOF's transmitted after resume
+ * if the resume occurs near the end of the microframe. This causees
+ * the controller to detect babble on the suspended port and
+ * will eventually cause the controller to reset the port.
+ * The fix is to Intercept the echi-hcd request to complete RESUME and
+ * align it to the start of the next microframe.
+ * See SWLINUX-1909 for more details
+ */
+static int ehci_brcm_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int ports = HCS_N_PORTS(ehci->hcs_params);
+ u32 __iomem *status_reg;
+ unsigned long flags;
+ int retval, irq_disabled = 0;
+
+ status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+
+ /*
+ * RESUME is cleared when GetPortStatus() is called 20ms after start
+ * of RESUME
+ */
+ if ((typeReq == GetPortStatus) &&
+ (wIndex && wIndex <= ports) &&
+ ehci->reset_done[wIndex-1] &&
+ time_after_eq(jiffies, ehci->reset_done[wIndex-1]) &&
+ (ehci_readl(ehci, status_reg) & PORT_RESUME)) {
+
+ /*
+ * to make sure we are not interrupted until RESUME bit
+ * is cleared, disable interrupts on current CPU
+ */
+ ehci_dbg(ehci, "SOF alignment workaround\n");
+ irq_disabled = 1;
+ local_irq_save(flags);
+ ehci_brcm_wait_for_sof(ehci, 5);
+ }
+ retval = ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+ if (irq_disabled)
+ local_irq_restore(flags);
+ return retval;
+}
+
+static int ehci_brcm_reset(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ int len;
+
+ ehci->big_endian_mmio = 1;
+
+ ehci->caps = (void __iomem *)hcd->regs;
+ len = HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
+ ehci->regs = (void __iomem *)(hcd->regs + len);
+
+ /* This fixes the lockup during reboot due to prior interrupts */
+ ehci_writel(ehci, CMD_RESET, &ehci->regs->command);
+ mdelay(10);
+
+ /*
+ * SWLINUX-1705: Avoid OUT packet underflows during high memory
+ * bus usage
+ * port_status[0x0f] = Broadcom-proprietary USB_EHCI_INSNREG00 @ 0x90
+ */
+ ehci_writel(ehci, 0x00800040, &ehci->regs->port_status[0x10]);
+ ehci_writel(ehci, 0x00000001, &ehci->regs->port_status[0x12]);
+
+ return ehci_setup(hcd);
+}
+
+static struct hc_driver __read_mostly ehci_brcm_hc_driver;
+
+static const struct ehci_driver_overrides brcm_overrides __initconst = {
+ .reset = ehci_brcm_reset,
+ .extra_priv_size = sizeof(struct brcm_priv),
+};
+
+static int ehci_brcm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res_mem;
+ struct brcm_priv *priv;
+ struct usb_hcd *hcd;
+ int irq;
+ int err;
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return irq ? irq : -EINVAL;
+
+ /* Hook the hub control routine to work around a bug */
+ ehci_brcm_hc_driver.hub_control = ehci_brcm_hub_control;
+
+ /* initialize hcd */
+ hcd = usb_create_hcd(&ehci_brcm_hc_driver, dev, dev_name(dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hcd);
+ priv = hcd_to_ehci_priv(hcd);
+
+ priv->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ err = PTR_ERR(priv->clk);
+ goto err_hcd;
+ }
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ goto err_hcd;
+
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem);
+ if (IS_ERR(hcd->regs)) {
+ err = PTR_ERR(hcd->regs);
+ goto err_clk;
+ }
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err)
+ goto err_clk;
+
+ device_wakeup_enable(hcd->self.controller);
+ device_enable_async_suspend(hcd->self.controller);
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(priv->clk);
+err_hcd:
+ usb_put_hcd(hcd);
+
+ return err;
+}
+
+static int ehci_brcm_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct brcm_priv *priv = hcd_to_ehci_priv(hcd);
+
+ usb_remove_hcd(hcd);
+ clk_disable_unprepare(priv->clk);
+ usb_put_hcd(hcd);
+ return 0;
+}
+
+static int __maybe_unused ehci_brcm_suspend(struct device *dev)
+{
+ int ret;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct brcm_priv *priv = hcd_to_ehci_priv(hcd);
+ bool do_wakeup = device_may_wakeup(dev);
+
+ ret = ehci_suspend(hcd, do_wakeup);
+ if (ret)
+ return ret;
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static int __maybe_unused ehci_brcm_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct brcm_priv *priv = hcd_to_ehci_priv(hcd);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+ /*
+ * SWLINUX-1705: Avoid OUT packet underflows during high memory
+ * bus usage
+ * port_status[0x0f] = Broadcom-proprietary USB_EHCI_INSNREG00
+ * @ 0x90
+ */
+ ehci_writel(ehci, 0x00800040, &ehci->regs->port_status[0x10]);
+ ehci_writel(ehci, 0x00000001, &ehci->regs->port_status[0x12]);
+
+ ehci_resume(hcd, false);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ehci_brcm_pm_ops, ehci_brcm_suspend,
+ ehci_brcm_resume);
+
+static const struct of_device_id brcm_ehci_of_match[] = {
+ { .compatible = "brcm,ehci-brcm-v2", },
+ { .compatible = "brcm,bcm7445-ehci", },
+ {}
+};
+
+static struct platform_driver ehci_brcm_driver = {
+ .probe = ehci_brcm_probe,
+ .remove = ehci_brcm_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "ehci-brcm",
+ .pm = &ehci_brcm_pm_ops,
+ .of_match_table = brcm_ehci_of_match,
+ }
+};
+
+static int __init ehci_brcm_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ ehci_init_driver(&ehci_brcm_hc_driver, &brcm_overrides);
+ return platform_driver_register(&ehci_brcm_driver);
+}
+module_init(ehci_brcm_init);
+
+static void __exit ehci_brcm_exit(void)
+{
+ platform_driver_unregister(&ehci_brcm_driver);
+}
+module_exit(ehci_brcm_exit);
+
+MODULE_ALIAS("platform:ehci-brcm");
+MODULE_DESCRIPTION("EHCI Broadcom STB driver");
+MODULE_AUTHOR("Al Cooper");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index 9d18c6e6ab27..c95341d472f4 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (C) 2005-2010,2012 Freescale Semiconductor, Inc.
* Copyright (c) 2005 MontaVista Software
*/
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 1300c457d9ed..cffdc8d01b2a 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -108,7 +108,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
struct ehci_hcd *ehci;
struct ehci_hcd_mv *ehci_mv;
struct resource *r;
- int retval = -ENODEV;
+ int retval;
u32 offset;
u32 status;
@@ -143,8 +143,6 @@ static int mv_ehci_probe(struct platform_device *pdev)
goto err_put_hcd;
}
-
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ehci_mv->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(ehci_mv->base)) {
@@ -168,12 +166,10 @@ static int mv_ehci_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(r);
hcd->regs = ehci_mv->op_regs;
- hcd->irq = platform_get_irq(pdev, 0);
- if (!hcd->irq) {
- dev_err(&pdev->dev, "Cannot get irq.");
- retval = -ENODEV;
+ retval = platform_get_irq(pdev, 0);
+ if (retval < 0)
goto err_disable_clk;
- }
+ hcd->irq = retval;
ehci = hcd_to_ehci(hcd);
ehci->caps = (struct ehci_caps __iomem *) ehci_mv->cap_regs;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index c9f91e6c72b6..dc2676320527 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -36,12 +36,12 @@ static const struct ehci_driver_overrides ehci_mxc_overrides __initconst = {
static int ehci_mxc_drv_probe(struct platform_device *pdev)
{
- struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev);
struct usb_hcd *hcd;
struct resource *res;
int irq, ret;
struct ehci_mxc_priv *priv;
- struct device *dev = &pdev->dev;
struct ehci_hcd *ehci;
if (!pdata) {
@@ -50,13 +50,15 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev));
if (!hcd)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto err_alloc;
@@ -69,14 +71,14 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
priv = (struct ehci_mxc_priv *) ehci->priv;
/* enable clocks */
- priv->usbclk = devm_clk_get(&pdev->dev, "ipg");
+ priv->usbclk = devm_clk_get(dev, "ipg");
if (IS_ERR(priv->usbclk)) {
ret = PTR_ERR(priv->usbclk);
goto err_alloc;
}
clk_prepare_enable(priv->usbclk);
- priv->ahbclk = devm_clk_get(&pdev->dev, "ahb");
+ priv->ahbclk = devm_clk_get(dev, "ahb");
if (IS_ERR(priv->ahbclk)) {
ret = PTR_ERR(priv->ahbclk);
goto err_clk_ahb;
@@ -84,13 +86,12 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
clk_prepare_enable(priv->ahbclk);
/* "dr" device has its own clock on i.MX51 */
- priv->phyclk = devm_clk_get(&pdev->dev, "phy");
+ priv->phyclk = devm_clk_get(dev, "phy");
if (IS_ERR(priv->phyclk))
priv->phyclk = NULL;
if (priv->phyclk)
clk_prepare_enable(priv->phyclk);
-
/* call platform specific init function */
if (pdata->init) {
ret = pdata->init(pdev);
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 1a48ab1bd3b2..3c3820ad9092 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -360,23 +360,21 @@ static int ehci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
if (is_bypassed_id(pdev))
return -ENODEV;
- return usb_hcd_pci_probe(pdev, id);
+ return usb_hcd_pci_probe(pdev, id, &ehci_pci_hc_driver);
}
static void ehci_pci_remove(struct pci_dev *pdev)
{
pci_clear_mwi(pdev);
- usb_hcd_pci_remove(pdev);
+ usb_hcd_pci_remove(pdev);
}
/* PCI driver selection metadata; PCI hotplugging uses this */
static const struct pci_device_id pci_ids [] = { {
/* handle any USB 2.0 EHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
- .driver_data = (unsigned long) &ehci_pci_hc_driver,
}, {
PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST),
- .driver_data = (unsigned long) &ehci_pci_hc_driver,
},
{ /* end: all zeroes */ }
};
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index e4fc3f66d43b..e9a49007cce4 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -455,6 +455,10 @@ static int ehci_platform_resume(struct device *dev)
ehci_resume(hcd, priv->reset_on_resume);
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
if (priv->quirk_poll)
quirk_poll_init(priv);
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 10d51daa6a1b..e077b2ca53c5 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -480,7 +480,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (!irq) {
- dev_err(&pdev->dev, "Failed to get IRQ\n");
err = -ENODEV;
goto cleanup_phy;
}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 229b3de319e6..eabf22a78eae 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2001-2002 by David Brownell
*/
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 2ce5031d866d..81fbc019a9b3 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Freescale QUICC Engine USB Host Controller Driver
*
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
index 7b9cf0a38d6e..96d16752a73e 100644
--- a/drivers/usb/host/imx21-hcd.h
+++ b/drivers/usb/host/imx21-hcd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Macros and prototypes for i.MX21
*
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 22117a6aeb4a..585222af24ff 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -277,21 +277,24 @@ static const struct ohci_driver_overrides pci_overrides __initconst = {
static const struct pci_device_id pci_ids[] = { {
/* handle any USB OHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_OHCI, ~0),
- .driver_data = (unsigned long) &ohci_pci_hc_driver,
}, {
/* The device in the ConneXT I/O hub has no class reg */
PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_OHCI),
- .driver_data = (unsigned long) &ohci_pci_hc_driver,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE (pci, pci_ids);
+static int ohci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ return usb_hcd_pci_probe(dev, id, &ohci_pci_hc_driver);
+}
+
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver ohci_pci_driver = {
.name = hcd_name,
.id_table = pci_ids,
- .probe = usb_hcd_pci_probe,
+ .probe = ohci_pci_probe,
.remove = usb_hcd_pci_remove,
.shutdown = usb_hcd_pci_shutdown,
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 7addfc2cbadc..4a8456f12a73 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -299,6 +299,11 @@ static int ohci_platform_resume(struct device *dev)
}
ohci_resume(hcd, false);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
return 0;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index c158cda9e4b9..cff965240327 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -157,9 +157,10 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
* the call to usb_hcd_setup_local_mem() below does just that.
*/
- if (usb_hcd_setup_local_mem(hcd, mem->start,
- mem->start - mem->parent->start,
- resource_size(mem)) < 0)
+ retval = usb_hcd_setup_local_mem(hcd, mem->start,
+ mem->start - mem->parent->start,
+ resource_size(mem));
+ if (retval < 0)
goto err5;
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 27c26ca10bfd..b85a39588f9d 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-1.0+
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index beb2efa71341..0b949acfa258 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -16,6 +16,9 @@
#include <linux/export.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+
+#include <soc/bcm2835/raspberrypi-firmware.h>
+
#include "pci-quirks.h"
#include "xhci-ext-caps.h"
@@ -205,7 +208,7 @@ static void usb_amd_find_chipset_info(void)
{
unsigned long flags;
struct amd_chipset_info info;
- info.need_pll_quirk = 0;
+ info.need_pll_quirk = false;
spin_lock_irqsave(&amd_lock, flags);
@@ -229,10 +232,10 @@ static void usb_amd_find_chipset_info(void)
case AMD_CHIPSET_SB800:
case AMD_CHIPSET_HUDSON2:
case AMD_CHIPSET_BOLTON:
- info.need_pll_quirk = 1;
+ info.need_pll_quirk = true;
break;
default:
- info.need_pll_quirk = 0;
+ info.need_pll_quirk = false;
break;
}
@@ -529,7 +532,7 @@ void usb_amd_dev_put(void)
amd_chipset.nb_type = 0;
memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
amd_chipset.isoc_reqs = 0;
- amd_chipset.need_pll_quirk = 0;
+ amd_chipset.need_pll_quirk = false;
spin_unlock_irqrestore(&amd_lock, flags);
@@ -1243,11 +1246,24 @@ iounmap:
static void quirk_usb_early_handoff(struct pci_dev *pdev)
{
+ int ret;
+
/* Skip Netlogic mips SoC's internal PCI USB controller.
* This device does not need/support EHCI/OHCI handoff
*/
if (pdev->vendor == 0x184e) /* vendor Netlogic */
return;
+
+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
+ ret = rpi_firmware_init_vl805(pdev);
+ if (ret) {
+ /* Firmware might be outdated, or something failed */
+ dev_warn(&pdev->dev,
+ "Failed to load VL805's firmware: %d. Will continue to attempt to work, but bad things might happen. You should fix this...\n",
+ ret);
+ }
+ }
+
if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index 51973a923526..ab081475c113 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* R8A66597 HCD (Host Controller Driver)
*
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index e9209e3e6248..995bc52d2d22 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -81,7 +81,6 @@ static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
static struct mutex u132_module_lock;
static int u132_exiting;
static int u132_instances;
-static struct list_head u132_static_list;
/*
* end of the global variables protected by u132_module_lock
*/
@@ -177,7 +176,6 @@ struct u132_ring {
};
struct u132 {
struct kref kref;
- struct list_head u132_list;
struct mutex sw_lock;
struct mutex scheduler_lock;
struct u132_platform_data *board;
@@ -254,7 +252,6 @@ static void u132_hcd_delete(struct kref *kref)
struct usb_hcd *hcd = u132_to_hcd(u132);
u132->going += 1;
mutex_lock(&u132_module_lock);
- list_del_init(&u132->u132_list);
u132_instances -= 1;
mutex_unlock(&u132_module_lock);
dev_warn(&u132->platform_dev->dev, "FREEING the hcd=%p and thus the u13"
@@ -3089,7 +3086,6 @@ static int u132_probe(struct platform_device *pdev)
retval = 0;
hcd->rsrc_start = 0;
mutex_lock(&u132_module_lock);
- list_add_tail(&u132->u132_list, &u132_static_list);
u132->sequence_num = ++u132_instances;
mutex_unlock(&u132_module_lock);
u132_u132_init_kref(u132);
@@ -3192,7 +3188,6 @@ static struct platform_driver u132_platform_driver = {
static int __init u132_hcd_init(void)
{
int retval;
- INIT_LIST_HEAD(&u132_static_list);
u132_instances = 0;
u132_exiting = 0;
mutex_init(&u132_module_lock);
@@ -3213,14 +3208,9 @@ static int __init u132_hcd_init(void)
module_init(u132_hcd_init);
static void __exit u132_hcd_exit(void)
{
- struct u132 *u132;
- struct u132 *temp;
mutex_lock(&u132_module_lock);
u132_exiting += 1;
mutex_unlock(&u132_module_lock);
- list_for_each_entry_safe(u132, temp, &u132_static_list, u132_list) {
- platform_device_unregister(u132->platform_dev);
- }
platform_driver_unregister(&u132_platform_driver);
printk(KERN_INFO "u132-hcd driver deregistered\n");
wait_event(u132_hcd_wait, u132_instances == 0);
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 957c87efc746..9b88745d247f 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -287,17 +287,21 @@ static const struct hc_driver uhci_driver = {
static const struct pci_device_id uhci_pci_ids[] = { {
/* handle any USB UHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
- .driver_data = (unsigned long) &uhci_driver,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
+static int uhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ return usb_hcd_pci_probe(dev, id, &uhci_driver);
+}
+
static struct pci_driver uhci_pci_driver = {
.name = hcd_name,
.id_table = uhci_pci_ids,
- .probe = usb_hcd_pci_probe,
+ .probe = uhci_pci_probe,
.remove = usb_hcd_pci_remove,
.shutdown = uhci_shutdown,
diff --git a/drivers/usb/host/xhci-debugfs.h b/drivers/usb/host/xhci-debugfs.h
index f7a4e2492b00..56db635fcd6e 100644
--- a/drivers/usb/host/xhci-debugfs.h
+++ b/drivers/usb/host/xhci-debugfs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xhci-debugfs.h - xHCI debugfs interface
*
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 268328c20681..fa59b242cd51 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xHCI host controller driver
*
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index acd56517215a..a93cfe817904 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015 MediaTek Inc.
* Author:
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index ca0a3a5721dd..3be021793cc8 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Marvell
*
diff --git a/drivers/usb/host/xhci-pci-renesas.c b/drivers/usb/host/xhci-pci-renesas.c
new file mode 100644
index 000000000000..59b1965ad0a3
--- /dev/null
+++ b/drivers/usb/host/xhci-pci-renesas.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2020 Linaro Limited */
+
+#include <linux/acpi.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#include "xhci.h"
+#include "xhci-trace.h"
+#include "xhci-pci.h"
+
+#define RENESAS_FW_VERSION 0x6C
+#define RENESAS_ROM_CONFIG 0xF0
+#define RENESAS_FW_STATUS 0xF4
+#define RENESAS_FW_STATUS_MSB 0xF5
+#define RENESAS_ROM_STATUS 0xF6
+#define RENESAS_ROM_STATUS_MSB 0xF7
+#define RENESAS_DATA0 0xF8
+#define RENESAS_DATA1 0xFC
+
+#define RENESAS_FW_VERSION_FIELD GENMASK(23, 7)
+#define RENESAS_FW_VERSION_OFFSET 8
+
+#define RENESAS_FW_STATUS_DOWNLOAD_ENABLE BIT(0)
+#define RENESAS_FW_STATUS_LOCK BIT(1)
+#define RENESAS_FW_STATUS_RESULT GENMASK(6, 4)
+ #define RENESAS_FW_STATUS_INVALID 0
+ #define RENESAS_FW_STATUS_SUCCESS BIT(4)
+ #define RENESAS_FW_STATUS_ERROR BIT(5)
+#define RENESAS_FW_STATUS_SET_DATA0 BIT(8)
+#define RENESAS_FW_STATUS_SET_DATA1 BIT(9)
+
+#define RENESAS_ROM_STATUS_ACCESS BIT(0)
+#define RENESAS_ROM_STATUS_ERASE BIT(1)
+#define RENESAS_ROM_STATUS_RELOAD BIT(2)
+#define RENESAS_ROM_STATUS_RESULT GENMASK(6, 4)
+ #define RENESAS_ROM_STATUS_NO_RESULT 0
+ #define RENESAS_ROM_STATUS_SUCCESS BIT(4)
+ #define RENESAS_ROM_STATUS_ERROR BIT(5)
+#define RENESAS_ROM_STATUS_SET_DATA0 BIT(8)
+#define RENESAS_ROM_STATUS_SET_DATA1 BIT(9)
+#define RENESAS_ROM_STATUS_ROM_EXISTS BIT(15)
+
+#define RENESAS_ROM_ERASE_MAGIC 0x5A65726F
+#define RENESAS_ROM_WRITE_MAGIC 0x53524F4D
+
+#define RENESAS_RETRY 10000
+#define RENESAS_DELAY 10
+
+#define ROM_VALID_01 0x2013
+#define ROM_VALID_02 0x2026
+
+static int renesas_verify_fw_version(struct pci_dev *pdev, u32 version)
+{
+ switch (version) {
+ case ROM_VALID_01:
+ case ROM_VALID_02:
+ return 0;
+ }
+ dev_err(&pdev->dev, "FW has invalid version :%d\n", version);
+ return -EINVAL;
+}
+
+static int renesas_fw_download_image(struct pci_dev *dev,
+ const u32 *fw, size_t step, bool rom)
+{
+ size_t i;
+ int err;
+ u8 fw_status;
+ bool data0_or_data1;
+ u32 status_reg;
+
+ if (rom)
+ status_reg = RENESAS_ROM_STATUS_MSB;
+ else
+ status_reg = RENESAS_FW_STATUS_MSB;
+
+ /*
+ * The hardware does alternate between two 32-bit pages.
+ * (This is because each row of the firmware is 8 bytes).
+ *
+ * for even steps we use DATA0, for odd steps DATA1.
+ */
+ data0_or_data1 = (step & 1) == 1;
+
+ /* step+1. Read "Set DATAX" and confirm it is cleared. */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(dev, status_reg, &fw_status);
+ if (err) {
+ dev_err(&dev->dev, "Read Status failed: %d\n",
+ pcibios_err_to_errno(err));
+ return pcibios_err_to_errno(err);
+ }
+ if (!(fw_status & BIT(data0_or_data1)))
+ break;
+
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY) {
+ dev_err(&dev->dev, "Timeout for Set DATAX step: %zd\n", step);
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * step+2. Write FW data to "DATAX".
+ * "LSB is left" => force little endian
+ */
+ err = pci_write_config_dword(dev, data0_or_data1 ?
+ RENESAS_DATA1 : RENESAS_DATA0,
+ (__force u32)cpu_to_le32(fw[step]));
+ if (err) {
+ dev_err(&dev->dev, "Write to DATAX failed: %d\n",
+ pcibios_err_to_errno(err));
+ return pcibios_err_to_errno(err);
+ }
+
+ udelay(100);
+
+ /* step+3. Set "Set DATAX". */
+ err = pci_write_config_byte(dev, status_reg, BIT(data0_or_data1));
+ if (err) {
+ dev_err(&dev->dev, "Write config for DATAX failed: %d\n",
+ pcibios_err_to_errno(err));
+ return pcibios_err_to_errno(err);
+ }
+
+ return 0;
+}
+
+static int renesas_fw_verify(const void *fw_data,
+ size_t length)
+{
+ u16 fw_version_pointer;
+ u16 fw_version;
+
+ /*
+ * The Firmware's Data Format is describe in
+ * "6.3 Data Format" R19UH0078EJ0500 Rev.5.00 page 124
+ */
+
+ /*
+ * The bootrom chips of the big brother have sizes up to 64k, let's
+ * assume that's the biggest the firmware can get.
+ */
+ if (length < 0x1000 || length >= 0x10000) {
+ pr_err("firmware is size %zd is not (4k - 64k).",
+ length);
+ return -EINVAL;
+ }
+
+ /* The First 2 bytes are fixed value (55aa). "LSB on Left" */
+ if (get_unaligned_le16(fw_data) != 0x55aa) {
+ pr_err("no valid firmware header found.");
+ return -EINVAL;
+ }
+
+ /* verify the firmware version position and print it. */
+ fw_version_pointer = get_unaligned_le16(fw_data + 4);
+ if (fw_version_pointer + 2 >= length) {
+ pr_err("fw ver pointer is outside of the firmware image");
+ return -EINVAL;
+ }
+
+ fw_version = get_unaligned_le16(fw_data + fw_version_pointer);
+ pr_err("got firmware version: %02x.", fw_version);
+
+ return 0;
+}
+
+static bool renesas_check_rom(struct pci_dev *pdev)
+{
+ u16 rom_status;
+ int retval;
+
+ /* Check if external ROM exists */
+ retval = pci_read_config_word(pdev, RENESAS_ROM_STATUS, &rom_status);
+ if (retval)
+ return false;
+
+ rom_status &= RENESAS_ROM_STATUS_ROM_EXISTS;
+ if (rom_status) {
+ dev_dbg(&pdev->dev, "External ROM exists\n");
+ return true; /* External ROM exists */
+ }
+
+ return false;
+}
+
+static int renesas_check_rom_state(struct pci_dev *pdev)
+{
+ u16 rom_state;
+ u32 version;
+ int err;
+
+ /* check FW version */
+ err = pci_read_config_dword(pdev, RENESAS_FW_VERSION, &version);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ version &= RENESAS_FW_VERSION_FIELD;
+ version = version >> RENESAS_FW_VERSION_OFFSET;
+
+ err = renesas_verify_fw_version(pdev, version);
+ if (err)
+ return err;
+
+ /*
+ * Test if ROM is present and loaded, if so we can skip everything
+ */
+ err = pci_read_config_word(pdev, RENESAS_ROM_STATUS, &rom_state);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ if (rom_state & BIT(15)) {
+ /* ROM exists */
+ dev_dbg(&pdev->dev, "ROM exists\n");
+
+ /* Check the "Result Code" Bits (6:4) and act accordingly */
+ switch (rom_state & RENESAS_ROM_STATUS_RESULT) {
+ case RENESAS_ROM_STATUS_SUCCESS:
+ return 0;
+
+ case RENESAS_ROM_STATUS_NO_RESULT: /* No result yet */
+ return 0;
+
+ case RENESAS_ROM_STATUS_ERROR: /* Error State */
+ default: /* All other states are marked as "Reserved states" */
+ dev_err(&pdev->dev, "Invalid ROM..");
+ break;
+ }
+ }
+
+ return -EIO;
+}
+
+static int renesas_fw_check_running(struct pci_dev *pdev)
+{
+ u8 fw_state;
+ int err;
+
+ /* Check if device has ROM and loaded, if so skip everything */
+ err = renesas_check_rom(pdev);
+ if (err) { /* we have rom */
+ err = renesas_check_rom_state(pdev);
+ if (!err)
+ return err;
+ }
+
+ /*
+ * Test if the device is actually needing the firmware. As most
+ * BIOSes will initialize the device for us. If the device is
+ * initialized.
+ */
+ err = pci_read_config_byte(pdev, RENESAS_FW_STATUS, &fw_state);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ /*
+ * Check if "FW Download Lock" is locked. If it is and the FW is
+ * ready we can simply continue. If the FW is not ready, we have
+ * to give up.
+ */
+ if (fw_state & RENESAS_FW_STATUS_LOCK) {
+ dev_dbg(&pdev->dev, "FW Download Lock is engaged.");
+
+ if (fw_state & RENESAS_FW_STATUS_SUCCESS)
+ return 0;
+
+ dev_err(&pdev->dev,
+ "FW Download Lock is set and FW is not ready. Giving Up.");
+ return -EIO;
+ }
+
+ /*
+ * Check if "FW Download Enable" is set. If someone (us?) tampered
+ * with it and it can't be reset, we have to give up too... and
+ * ask for a forgiveness and a reboot.
+ */
+ if (fw_state & RENESAS_FW_STATUS_DOWNLOAD_ENABLE) {
+ dev_err(&pdev->dev,
+ "FW Download Enable is stale. Giving Up (poweroff/reboot needed).");
+ return -EIO;
+ }
+
+ /* Otherwise, Check the "Result Code" Bits (6:4) and act accordingly */
+ switch (fw_state & RENESAS_FW_STATUS_RESULT) {
+ case 0: /* No result yet */
+ dev_dbg(&pdev->dev, "FW is not ready/loaded yet.");
+
+ /* tell the caller, that this device needs the firmware. */
+ return 1;
+
+ case RENESAS_FW_STATUS_SUCCESS: /* Success, device should be working. */
+ dev_dbg(&pdev->dev, "FW is ready.");
+ return 0;
+
+ case RENESAS_FW_STATUS_ERROR: /* Error State */
+ dev_err(&pdev->dev,
+ "hardware is in an error state. Giving up (poweroff/reboot needed).");
+ return -ENODEV;
+
+ default: /* All other states are marked as "Reserved states" */
+ dev_err(&pdev->dev,
+ "hardware is in an invalid state %lx. Giving up (poweroff/reboot needed).",
+ (fw_state & RENESAS_FW_STATUS_RESULT) >> 4);
+ return -EINVAL;
+ }
+}
+
+static int renesas_fw_download(struct pci_dev *pdev,
+ const struct firmware *fw)
+{
+ const u32 *fw_data = (const u32 *)fw->data;
+ size_t i;
+ int err;
+ u8 fw_status;
+
+ /*
+ * For more information and the big picture: please look at the
+ * "Firmware Download Sequence" in "7.1 FW Download Interface"
+ * of R19UH0078EJ0500 Rev.5.00 page 131
+ */
+
+ /*
+ * 0. Set "FW Download Enable" bit in the
+ * "FW Download Control & Status Register" at 0xF4
+ */
+ err = pci_write_config_byte(pdev, RENESAS_FW_STATUS,
+ RENESAS_FW_STATUS_DOWNLOAD_ENABLE);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ /* 1 - 10 follow one step after the other. */
+ for (i = 0; i < fw->size / 4; i++) {
+ err = renesas_fw_download_image(pdev, fw_data, i, false);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Firmware Download Step %zd failed at position %zd bytes with (%d).",
+ i, i * 4, err);
+ return err;
+ }
+ }
+
+ /*
+ * This sequence continues until the last data is written to
+ * "DATA0" or "DATA1". Naturally, we wait until "SET DATA0/1"
+ * is cleared by the hardware beforehand.
+ */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(pdev, RENESAS_FW_STATUS_MSB,
+ &fw_status);
+ if (err)
+ return pcibios_err_to_errno(err);
+ if (!(fw_status & (BIT(0) | BIT(1))))
+ break;
+
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY)
+ dev_warn(&pdev->dev, "Final Firmware Download step timed out.");
+
+ /*
+ * 11. After finishing writing the last data of FW, the
+ * System Software must clear "FW Download Enable"
+ */
+ err = pci_write_config_byte(pdev, RENESAS_FW_STATUS, 0);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ /* 12. Read "Result Code" and confirm it is good. */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(pdev, RENESAS_FW_STATUS, &fw_status);
+ if (err)
+ return pcibios_err_to_errno(err);
+ if (fw_status & RENESAS_FW_STATUS_SUCCESS)
+ break;
+
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY) {
+ /* Timed out / Error - let's see if we can fix this */
+ err = renesas_fw_check_running(pdev);
+ switch (err) {
+ case 0: /*
+ * we shouldn't end up here.
+ * maybe it took a little bit longer.
+ * But all should be well?
+ */
+ break;
+
+ case 1: /* (No result yet! */
+ dev_err(&pdev->dev, "FW Load timedout");
+ return -ETIMEDOUT;
+
+ default:
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void renesas_rom_erase(struct pci_dev *pdev)
+{
+ int retval, i;
+ u8 status;
+
+ dev_dbg(&pdev->dev, "Performing ROM Erase...\n");
+ retval = pci_write_config_dword(pdev, RENESAS_DATA0,
+ RENESAS_ROM_ERASE_MAGIC);
+ if (retval) {
+ dev_err(&pdev->dev, "ROM erase, magic word write failed: %d\n",
+ pcibios_err_to_errno(retval));
+ return;
+ }
+
+ retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
+ if (retval) {
+ dev_err(&pdev->dev, "ROM status read failed: %d\n",
+ pcibios_err_to_errno(retval));
+ return;
+ }
+ status |= RENESAS_ROM_STATUS_ERASE;
+ retval = pci_write_config_byte(pdev, RENESAS_ROM_STATUS, status);
+ if (retval) {
+ dev_err(&pdev->dev, "ROM erase set word write failed\n");
+ return;
+ }
+
+ /* sleep a bit while ROM is erased */
+ msleep(20);
+
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ retval = pci_read_config_byte(pdev, RENESAS_ROM_STATUS,
+ &status);
+ status &= RENESAS_ROM_STATUS_ERASE;
+ if (!status)
+ break;
+
+ mdelay(RENESAS_DELAY);
+ }
+
+ if (i == RENESAS_RETRY)
+ dev_dbg(&pdev->dev, "Chip erase timedout: %x\n", status);
+
+ dev_dbg(&pdev->dev, "ROM Erase... Done success\n");
+}
+
+static bool renesas_setup_rom(struct pci_dev *pdev, const struct firmware *fw)
+{
+ const u32 *fw_data = (const u32 *)fw->data;
+ int err, i;
+ u8 status;
+
+ /* 2. Write magic word to Data0 */
+ err = pci_write_config_dword(pdev, RENESAS_DATA0,
+ RENESAS_ROM_WRITE_MAGIC);
+ if (err)
+ return false;
+
+ /* 3. Set External ROM access */
+ err = pci_write_config_byte(pdev, RENESAS_ROM_STATUS,
+ RENESAS_ROM_STATUS_ACCESS);
+ if (err)
+ goto remove_bypass;
+
+ /* 4. Check the result */
+ err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
+ if (err)
+ goto remove_bypass;
+ status &= GENMASK(6, 4);
+ if (status) {
+ dev_err(&pdev->dev,
+ "setting external rom failed: %x\n", status);
+ goto remove_bypass;
+ }
+
+ /* 5 to 16 Write FW to DATA0/1 while checking SetData0/1 */
+ for (i = 0; i < fw->size / 4; i++) {
+ err = renesas_fw_download_image(pdev, fw_data, i, true);
+ if (err) {
+ dev_err(&pdev->dev,
+ "ROM Download Step %d failed at position %d bytes with (%d)\n",
+ i, i * 4, err);
+ goto remove_bypass;
+ }
+ }
+
+ /*
+ * wait till DATA0/1 is cleared
+ */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS_MSB,
+ &status);
+ if (err)
+ goto remove_bypass;
+ if (!(status & (BIT(0) | BIT(1))))
+ break;
+
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY) {
+ dev_err(&pdev->dev, "Final Firmware ROM Download step timed out\n");
+ goto remove_bypass;
+ }
+
+ /* 17. Remove bypass */
+ err = pci_write_config_byte(pdev, RENESAS_ROM_STATUS, 0);
+ if (err)
+ return false;
+
+ udelay(10);
+
+ /* 18. check result */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
+ if (err) {
+ dev_err(&pdev->dev, "Read ROM status failed:%d\n",
+ pcibios_err_to_errno(err));
+ return false;
+ }
+ status &= RENESAS_ROM_STATUS_RESULT;
+ if (status == RENESAS_ROM_STATUS_SUCCESS) {
+ dev_dbg(&pdev->dev, "Download ROM success\n");
+ break;
+ }
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY) { /* Timed out */
+ dev_err(&pdev->dev,
+ "Download to external ROM TO: %x\n", status);
+ return false;
+ }
+
+ dev_dbg(&pdev->dev, "Download to external ROM succeeded\n");
+
+ /* Last step set Reload */
+ err = pci_write_config_byte(pdev, RENESAS_ROM_STATUS,
+ RENESAS_ROM_STATUS_RELOAD);
+ if (err) {
+ dev_err(&pdev->dev, "Set ROM execute failed: %d\n",
+ pcibios_err_to_errno(err));
+ return false;
+ }
+
+ /*
+ * wait till Reload is cleared
+ */
+ for (i = 0; i < RENESAS_RETRY; i++) {
+ err = pci_read_config_byte(pdev, RENESAS_ROM_STATUS, &status);
+ if (err)
+ return false;
+ if (!(status & RENESAS_ROM_STATUS_RELOAD))
+ break;
+
+ udelay(RENESAS_DELAY);
+ }
+ if (i == RENESAS_RETRY) {
+ dev_err(&pdev->dev, "ROM Exec timed out: %x\n", status);
+ return false;
+ }
+
+ return true;
+
+remove_bypass:
+ pci_write_config_byte(pdev, RENESAS_ROM_STATUS, 0);
+ return false;
+}
+
+static int renesas_load_fw(struct pci_dev *pdev, const struct firmware *fw)
+{
+ int err = 0;
+ bool rom;
+
+ /* Check if the device has external ROM */
+ rom = renesas_check_rom(pdev);
+ if (rom) {
+ /* perform chip erase first */
+ renesas_rom_erase(pdev);
+
+ /* lets try loading fw on ROM first */
+ rom = renesas_setup_rom(pdev, fw);
+ if (!rom) {
+ dev_dbg(&pdev->dev,
+ "ROM load failed, falling back on FW load\n");
+ } else {
+ dev_dbg(&pdev->dev,
+ "ROM load success\n");
+ goto exit;
+ }
+ }
+
+ err = renesas_fw_download(pdev, fw);
+
+exit:
+ if (err)
+ dev_err(&pdev->dev, "firmware failed to download (%d).", err);
+ return err;
+}
+
+int renesas_xhci_check_request_fw(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct xhci_driver_data *driver_data =
+ (struct xhci_driver_data *)id->driver_data;
+ const char *fw_name = driver_data->firmware;
+ const struct firmware *fw;
+ int err;
+
+ err = renesas_fw_check_running(pdev);
+ /* Continue ahead, if the firmware is already running. */
+ if (err == 0)
+ return 0;
+
+ if (err != 1)
+ return err;
+
+ pci_dev_get(pdev);
+ err = request_firmware(&fw, fw_name, &pdev->dev);
+ pci_dev_put(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "request_firmware failed: %d\n", err);
+ return err;
+ }
+
+ err = renesas_fw_verify(fw->data, fw->size);
+ if (err)
+ goto exit;
+
+ err = renesas_load_fw(pdev, fw);
+exit:
+ release_firmware(fw);
+ return err;
+}
+EXPORT_SYMBOL_GPL(renesas_xhci_check_request_fw);
+
+void renesas_xhci_pci_exit(struct pci_dev *dev)
+{
+}
+EXPORT_SYMBOL_GPL(renesas_xhci_pci_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 766b74723e64..ef513c2fb843 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -15,6 +15,7 @@
#include "xhci.h"
#include "xhci-trace.h"
+#include "xhci-pci.h"
#define SSIC_PORT_NUM 2
#define SSIC_PORT_CFG2 0x880c
@@ -87,7 +88,16 @@ static int xhci_pci_reinit(struct xhci_hcd *xhci, struct pci_dev *pdev)
static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xhci_driver_data *driver_data;
+ const struct pci_device_id *id;
+
+ id = pci_match_id(pdev->driver->id_table, pdev);
+
+ if (id && id->driver_data) {
+ driver_data = (struct xhci_driver_data *)id->driver_data;
+ xhci->quirks |= driver_data->quirks;
+ }
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
@@ -327,10 +337,15 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int retval;
struct xhci_hcd *xhci;
- struct hc_driver *driver;
struct usb_hcd *hcd;
+ struct xhci_driver_data *driver_data;
- driver = (struct hc_driver *)id->driver_data;
+ driver_data = (struct xhci_driver_data *)id->driver_data;
+ if (driver_data && driver_data->quirks & XHCI_RENESAS_FW_QUIRK) {
+ retval = renesas_xhci_check_request_fw(dev, id);
+ if (retval)
+ return retval;
+ }
/* Prevent runtime suspending between USB-2 and USB-3 initialization */
pm_runtime_get_noresume(&dev->dev);
@@ -341,7 +356,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
* to say USB 2.0, but I'm not sure what the implications would be in
* the other parts of the HCD code.
*/
- retval = usb_hcd_pci_probe(dev, id);
+ retval = usb_hcd_pci_probe(dev, id, &xhci_pci_hc_driver);
if (retval)
goto put_runtime_pm;
@@ -349,8 +364,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* USB 2.0 roothub is stored in the PCI device now. */
hcd = dev_get_drvdata(&dev->dev);
xhci = hcd_to_xhci(hcd);
- xhci->shared_hcd = usb_create_shared_hcd(driver, &dev->dev,
- pci_name(dev), hcd);
+ xhci->shared_hcd = usb_create_shared_hcd(&xhci_pci_hc_driver, &dev->dev,
+ pci_name(dev), hcd);
if (!xhci->shared_hcd) {
retval = -ENOMEM;
goto dealloc_usb2_hcd;
@@ -392,6 +407,9 @@ static void xhci_pci_remove(struct pci_dev *dev)
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ if (xhci->quirks & XHCI_RENESAS_FW_QUIRK)
+ renesas_xhci_pci_exit(dev);
+
xhci->xhc_state |= XHCI_STATE_REMOVING;
if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
@@ -543,15 +561,26 @@ static void xhci_pci_shutdown(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
+static const struct xhci_driver_data reneses_data = {
+ .quirks = XHCI_RENESAS_FW_QUIRK,
+ .firmware = "renesas_usb_fw.mem",
+};
+
/* PCI driver selection metadata; PCI hotplugging uses this */
-static const struct pci_device_id pci_ids[] = { {
+static const struct pci_device_id pci_ids[] = {
+ { PCI_DEVICE(0x1912, 0x0014),
+ .driver_data = (unsigned long)&reneses_data,
+ },
+ { PCI_DEVICE(0x1912, 0x0015),
+ .driver_data = (unsigned long)&reneses_data,
+ },
/* handle any USB 3.0 xHCI controller */
- PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
- .driver_data = (unsigned long) &xhci_pci_hc_driver,
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_XHCI, ~0),
},
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
+MODULE_FIRMWARE("renesas_usb_fw.mem");
/* pci driver glue; this is a "new style" PCI driver module */
static struct pci_driver xhci_pci_driver = {
diff --git a/drivers/usb/host/xhci-pci.h b/drivers/usb/host/xhci-pci.h
new file mode 100644
index 000000000000..acd7cf0a1706
--- /dev/null
+++ b/drivers/usb/host/xhci-pci.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2020 Linaro Limited */
+
+#ifndef XHCI_PCI_H
+#define XHCI_PCI_H
+
+#if IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS)
+int renesas_xhci_check_request_fw(struct pci_dev *dev,
+ const struct pci_device_id *id);
+void renesas_xhci_pci_exit(struct pci_dev *dev);
+
+#else
+static int renesas_xhci_check_request_fw(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ return 0;
+}
+
+static void renesas_xhci_pci_exit(struct pci_dev *dev) { };
+
+#endif
+
+struct xhci_driver_data {
+ u64 quirks;
+ const char *firmware;
+};
+
+#endif
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ea460b9682d5..f6b4089bfc4a 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -112,6 +112,10 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V3)
};
+static const struct xhci_plat_priv xhci_plat_brcm = {
+ .quirks = XHCI_RESET_ON_RESUME,
+};
+
static const struct of_device_id usb_xhci_of_match[] = {
{
.compatible = "generic-xhci",
@@ -147,6 +151,12 @@ static const struct of_device_id usb_xhci_of_match[] = {
}, {
.compatible = "renesas,rcar-gen3-xhci",
.data = &xhci_plat_renesas_rcar_gen3,
+ }, {
+ .compatible = "brcm,xhci-brcm-v2",
+ .data = &xhci_plat_brcm,
+ }, {
+ .compatible = "brcm,bcm7445-xhci",
+ .data = &xhci_plat_brcm,
},
{},
};
@@ -409,7 +419,15 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
if (ret)
return ret;
- return xhci_resume(xhci, 0);
+ ret = xhci_resume(xhci, 0);
+ if (ret)
+ return ret;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
}
static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev)
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 5681723fc9cd..b49f6447bd3a 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xhci-plat.h - xHCI host controller driver platform Bus Glue.
*
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
index 012744a63a49..048ad3b8a6c7 100644
--- a/drivers/usb/host/xhci-rcar.h
+++ b/drivers/usb/host/xhci-rcar.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* drivers/usb/host/xhci-rcar.h
*
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index b19582b2a72c..627abd236dbe 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xHCI host controller driver
*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 86cfefdd6632..2c6c4f8d1ee1 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* xHCI host controller driver
@@ -1873,6 +1873,7 @@ struct xhci_hcd {
#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
+#define XHCI_RENESAS_FW_QUIRK BIT_ULL(36)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/isp1760/isp1760-core.h b/drivers/usb/isp1760/isp1760-core.h
index 97cb4d7a3e1c..d9a0a4cc467c 100644
--- a/drivers/usb/isp1760/isp1760-core.h
+++ b/drivers/usb/isp1760/isp1760-core.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the NXP ISP1760 chip
*
diff --git a/drivers/usb/isp1760/isp1760-regs.h b/drivers/usb/isp1760/isp1760-regs.h
index 1f00c3850cf7..fedc4f5cded0 100644
--- a/drivers/usb/isp1760/isp1760-regs.h
+++ b/drivers/usb/isp1760/isp1760-regs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the NXP ISP1760 chip
*
diff --git a/drivers/usb/isp1760/isp1760-udc.h b/drivers/usb/isp1760/isp1760-udc.h
index 2d0b88747701..d2df650d54e9 100644
--- a/drivers/usb/isp1760/isp1760-udc.h
+++ b/drivers/usb/isp1760/isp1760-udc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for the NXP ISP1761 device controller
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb.h b/drivers/usb/misc/sisusbvga/sisusb.h
index 8a5e6bb07d05..c0fb9e1c5361 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.h
+++ b/drivers/usb/misc/sisusbvga/sisusb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* sisusb - usb kernel driver for Net2280/SiS315 based USB2VGA dongles
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
index ace09985dae4..aa33bc81ee52 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* $XFree86$ */
/* $XdotOrg$ */
/*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_struct.h b/drivers/usb/misc/sisusbvga/sisusb_struct.h
index 706d77090e00..3df64d2a9d43 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_struct.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_struct.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* General structure definitions for universal mode switching modules
*
diff --git a/drivers/usb/misc/usb_u132.h b/drivers/usb/misc/usb_u132.h
index 4bf77736914f..1584efbbd704 100644
--- a/drivers/usb/misc/usb_u132.h
+++ b/drivers/usb/misc/usb_u132.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common Header File for the Elan Digital Systems U132 adapter
* this file should be included by both the "ftdi-u132" and
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index 6087be236a35..d49db92ab26c 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mtu3.h - MediaTek USB3 DRD header
*
diff --git a/drivers/usb/mtu3/mtu3_debug.h b/drivers/usb/mtu3/mtu3_debug.h
index e96a69234d05..fb6b28277c9b 100644
--- a/drivers/usb/mtu3/mtu3_debug.h
+++ b/drivers/usb/mtu3/mtu3_debug.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mtu3_debug.h - debug header
*
diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h
index 5e58c4dbd54a..760fe7d69c6b 100644
--- a/drivers/usb/mtu3/mtu3_dr.h
+++ b/drivers/usb/mtu3/mtu3_dr.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mtu3_dr.h - dual role switch and host glue layer header
*
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 8382d066749e..bf34f784f84b 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mtu3_hw_regs.h - MediaTek USB3 DRD register and field definitions
*
diff --git a/drivers/usb/mtu3/mtu3_qmu.h b/drivers/usb/mtu3/mtu3_qmu.h
index 9cfde201db63..66e1c0ab5a99 100644
--- a/drivers/usb/mtu3/mtu3_qmu.h
+++ b/drivers/usb/mtu3/mtu3_qmu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mtu3_qmu.h - Queue Management Unit driver header
*
diff --git a/drivers/usb/mtu3/mtu3_trace.h b/drivers/usb/mtu3/mtu3_trace.h
index 050e30f0fbd4..1b897636daf2 100644
--- a/drivers/usb/mtu3/mtu3_trace.h
+++ b/drivers/usb/mtu3/mtu3_trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* mtu3_trace.h - trace support
*
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index e021485c83ae..c8e67d15b510 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2005-2006 by Texas Instruments
*/
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index e64dd30e80e7..c4fe1f4cd17a 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -30,11 +30,11 @@ static irqreturn_t jz4740_musb_interrupt(int irq, void *__hci)
irqreturn_t retval = IRQ_NONE, retval_dma = IRQ_NONE;
struct musb *musb = __hci;
- spin_lock_irqsave(&musb->lock, flags);
-
if (IS_ENABLED(CONFIG_USB_INVENTRA_DMA) && musb->dma_controller)
retval_dma = dma_controller_irq(irq, musb->dma_controller);
+ spin_lock_irqsave(&musb->lock, flags);
+
musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index 6196b0e8d77d..eebeadd26946 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -208,6 +208,12 @@ static irqreturn_t generic_interrupt(int irq, void *__hci)
musb->int_rx = musb_clearw(musb->mregs, MUSB_INTRRX);
musb->int_tx = musb_clearw(musb->mregs, MUSB_INTRTX);
+ if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) {
+ /* ep0 FADDR must be 0 when (re)entering peripheral mode */
+ musb_ep_select(musb->mregs, 0);
+ musb_writeb(musb->mregs, MUSB_FADDR, 0);
+ }
+
if (musb->int_usb || musb->int_tx || musb->int_rx)
retval = musb_interrupt(musb);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index d590110539ab..384a8039a7fd 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1795,7 +1795,7 @@ irqreturn_t musb_interrupt(struct musb *musb)
EXPORT_SYMBOL_GPL(musb_interrupt);
#ifndef CONFIG_MUSB_PIO_ONLY
-static bool use_dma = 1;
+static bool use_dma = true;
/* "modprobe ... use_dma=0" etc */
module_param(use_dma, bool, 0644);
@@ -2877,6 +2877,13 @@ static int musb_resume(struct device *dev)
musb_enable_interrupts(musb);
musb_platform_enable(musb);
+ /* session might be disabled in suspend */
+ if (musb->port_mode == MUSB_HOST &&
+ !(musb->ops->quirks & MUSB_PRESERVE_SESSION)) {
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ }
+
spin_lock_irqsave(&musb->lock, flags);
error = musb_run_resume_work(musb);
if (error)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 290a2bc46606..dbe5623db1e0 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver defines
*
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index c444a80fe1da..e5b3506c7b3f 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver debug defines
*
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 7b6281ab62ed..30a89aa8a3e7 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -168,6 +168,11 @@ static ssize_t musb_test_mode_write(struct file *file,
u8 test;
char buf[24];
+ memset(buf, 0x00, sizeof(buf));
+
+ if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
if (test) {
@@ -176,11 +181,6 @@ static ssize_t musb_test_mode_write(struct file *file,
goto ret;
}
- memset(buf, 0x00, sizeof(buf));
-
- if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
if (strstarts(buf, "force host full-speed"))
test = MUSB_TEST_FORCE_HOST | MUSB_TEST_FORCE_FS;
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 4b4d8dc5d3f2..7d67b69df0a0 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver DMA controller abstraction
*
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index d02663660813..f49f25b3bf56 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver peripheral defines
*
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8736f4251a22..8b7d22a0c0fb 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1774,9 +1774,15 @@ void musb_host_rx(struct musb *musb, u8 epnum)
status = -EPIPE;
} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
- musb_dbg(musb, "end %d RX proto error", epnum);
+ dev_err(musb->controller, "ep%d RX three-strikes error", epnum);
- status = -EPROTO;
+ /*
+ * The three-strikes error could only happen when the USB
+ * device is not accessible, for example detached or powered
+ * off. So return the fatal error -ESHUTDOWN so hopefully the
+ * USB device drivers won't immediately resubmit the same URB.
+ */
+ status = -ESHUTDOWN;
musb_writeb(epio, MUSB_RXINTERVAL, 0);
rx_csr &= ~MUSB_RXCSR_H_ERROR;
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 2999845632ce..32336571f05c 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver host defines
*
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index f17aabd95a50..12874d3b2a64 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver register I/O
*
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 5cd7264fc2cb..5fa110978f1a 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MUSB OTG driver register defines
*
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index b193daf69685..380ebc77eab1 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* musb_trace.h - MUSB Controller Trace Support
*
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index 859008fa0e3c..939a0361ae88 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2005-2006 by Texas Instruments
*/
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
index fd8025bbece7..8a253564fb18 100644
--- a/drivers/usb/musb/tusb6010.h
+++ b/drivers/usb/musb/tusb6010.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
*
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index 43d410f6641b..fbcc28ad9964 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc. */
#include <linux/usb/otg-fsm.h>
diff --git a/drivers/usb/phy/phy-jz4770.c b/drivers/usb/phy/phy-jz4770.c
index 3ea1f5b9bcf8..8f62dc2a90ff 100644
--- a/drivers/usb/phy/phy-jz4770.c
+++ b/drivers/usb/phy/phy-jz4770.c
@@ -125,13 +125,13 @@ static int jz4770_phy_init(struct usb_phy *phy)
err = regulator_enable(priv->vcc_supply);
if (err) {
- dev_err(priv->dev, "Unable to enable VCC: %d", err);
+ dev_err(priv->dev, "Unable to enable VCC: %d\n", err);
return err;
}
err = clk_prepare_enable(priv->clk);
if (err) {
- dev_err(priv->dev, "Unable to start clock: %d", err);
+ dev_err(priv->dev, "Unable to start clock: %d\n", err);
return err;
}
@@ -191,7 +191,7 @@ static int jz4770_phy_probe(struct platform_device *pdev)
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
- dev_err(dev, "Failed to map registers");
+ dev_err(dev, "Failed to map registers\n");
return PTR_ERR(priv->base);
}
@@ -199,7 +199,7 @@ static int jz4770_phy_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk)) {
err = PTR_ERR(priv->clk);
if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get clock");
+ dev_err(dev, "Failed to get clock\n");
return err;
}
@@ -207,14 +207,14 @@ static int jz4770_phy_probe(struct platform_device *pdev)
if (IS_ERR(priv->vcc_supply)) {
err = PTR_ERR(priv->vcc_supply);
if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get regulator");
+ dev_err(dev, "Failed to get regulator\n");
return err;
}
err = usb_add_phy(&priv->phy, USB_PHY_TYPE_USB2);
if (err) {
if (err != -EPROBE_DEFER)
- dev_err(dev, "Unable to register PHY");
+ dev_err(dev, "Unable to register PHY\n");
return err;
}
diff --git a/drivers/usb/phy/phy-mv-usb.h b/drivers/usb/phy/phy-mv-usb.h
index 96701a1229ad..5d5c0abb0c3a 100644
--- a/drivers/usb/phy/phy-mv-usb.h
+++ b/drivers/usb/phy/phy-mv-usb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
*/
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index ef1735d014da..eb34d762a63d 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-1.0+
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* Renesas USB driver
*
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index c3d3cc35cee0..7d3700bf41d9 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-1.0+
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* Renesas USB driver
*
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h
index 65dc19ca528e..56b7106d254d 100644
--- a/drivers/usb/renesas_usbhs/mod.h
+++ b/drivers/usb/renesas_usbhs/mod.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-1.0+
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* Renesas USB driver
*
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 3b130529408b..a4ae9f97d9cd 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-1.0+
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* Renesas USB driver
*
diff --git a/drivers/usb/renesas_usbhs/rcar2.h b/drivers/usb/renesas_usbhs/rcar2.h
index 7d88732c5bff..046d07edb36f 100644
--- a/drivers/usb/renesas_usbhs/rcar2.h
+++ b/drivers/usb/renesas_usbhs/rcar2.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#include "common.h"
extern const struct renesas_usbhs_platform_info usbhs_rcar_gen2_plat_info;
diff --git a/drivers/usb/renesas_usbhs/rcar3.h b/drivers/usb/renesas_usbhs/rcar3.h
index c7c5ec1e3af2..d13db30bd21b 100644
--- a/drivers/usb/renesas_usbhs/rcar3.h
+++ b/drivers/usb/renesas_usbhs/rcar3.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#include "common.h"
extern const struct renesas_usbhs_platform_info usbhs_rcar_gen3_plat_info;
diff --git a/drivers/usb/renesas_usbhs/rza.h b/drivers/usb/renesas_usbhs/rza.h
index 1ca42a6fd480..a29b75fef057 100644
--- a/drivers/usb/renesas_usbhs/rza.h
+++ b/drivers/usb/renesas_usbhs/rza.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#include "common.h"
extern const struct renesas_usbhs_platform_info usbhs_rza1_plat_info;
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 5b17709821df..27d92af29635 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -49,8 +49,10 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
mutex_lock(&sw->lock);
ret = sw->set(sw, role);
- if (!ret)
+ if (!ret) {
sw->role = role;
+ kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+ }
mutex_unlock(&sw->lock);
diff --git a/drivers/usb/serial/belkin_sa.h b/drivers/usb/serial/belkin_sa.h
index a13a98d284f2..89ec30c63cc6 100644
--- a/drivers/usb/serial/belkin_sa.h
+++ b/drivers/usb/serial/belkin_sa.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Definitions for Belkin USB Serial Adapter Driver
*
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index c5ecdcd51ffc..89675ee29645 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -73,6 +73,8 @@
#define CH341_LCR_CS6 0x01
#define CH341_LCR_CS5 0x00
+#define CH341_QUIRK_LIMITED_PRESCALER BIT(0)
+
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x4348, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7523) },
@@ -87,6 +89,7 @@ struct ch341_private {
u8 mcr;
u8 msr;
u8 lcr;
+ unsigned long quirks;
};
static void ch341_set_termios(struct tty_struct *tty,
@@ -159,9 +162,11 @@ static const speed_t ch341_min_rates[] = {
* 2 <= div <= 256 if fact = 0, or
* 9 <= div <= 256 if fact = 1
*/
-static int ch341_get_divisor(speed_t speed)
+static int ch341_get_divisor(struct ch341_private *priv)
{
unsigned int fact, div, clk_div;
+ speed_t speed = priv->baud_rate;
+ bool force_fact0 = false;
int ps;
/*
@@ -187,8 +192,12 @@ static int ch341_get_divisor(speed_t speed)
clk_div = CH341_CLK_DIV(ps, fact);
div = CH341_CLKRATE / (clk_div * speed);
+ /* Some devices require a lower base clock if ps < 3. */
+ if (ps < 3 && (priv->quirks & CH341_QUIRK_LIMITED_PRESCALER))
+ force_fact0 = true;
+
/* Halve base clock (fact = 0) if required. */
- if (div < 9 || div > 255) {
+ if (div < 9 || div > 255 || force_fact0) {
div /= 2;
clk_div *= 2;
fact = 0;
@@ -227,7 +236,7 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
if (!priv->baud_rate)
return -EINVAL;
- val = ch341_get_divisor(priv->baud_rate);
+ val = ch341_get_divisor(priv);
if (val < 0)
return -EINVAL;
@@ -308,6 +317,54 @@ out: kfree(buffer);
return r;
}
+static int ch341_detect_quirks(struct usb_serial_port *port)
+{
+ struct ch341_private *priv = usb_get_serial_port_data(port);
+ struct usb_device *udev = port->serial->dev;
+ const unsigned int size = 2;
+ unsigned long quirks = 0;
+ char *buffer;
+ int r;
+
+ buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ /*
+ * A subset of CH34x devices does not support all features. The
+ * prescaler is limited and there is no support for sending a RS232
+ * break condition. A read failure when trying to set up the latter is
+ * used to detect these devices.
+ */
+ r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), CH341_REQ_READ_REG,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ CH341_REG_BREAK, 0, buffer, size, DEFAULT_TIMEOUT);
+ if (r == -EPIPE) {
+ dev_dbg(&port->dev, "break control not supported\n");
+ quirks = CH341_QUIRK_LIMITED_PRESCALER;
+ r = 0;
+ goto out;
+ }
+
+ if (r != size) {
+ if (r >= 0)
+ r = -EIO;
+ dev_err(&port->dev, "failed to read break control: %d\n", r);
+ goto out;
+ }
+
+ r = 0;
+out:
+ kfree(buffer);
+
+ if (quirks) {
+ dev_dbg(&port->dev, "enabling quirk flags: 0x%02lx\n", quirks);
+ priv->quirks |= quirks;
+ }
+
+ return r;
+}
+
static int ch341_port_probe(struct usb_serial_port *port)
{
struct ch341_private *priv;
@@ -330,6 +387,11 @@ static int ch341_port_probe(struct usb_serial_port *port)
goto error;
usb_set_serial_port_data(port, priv);
+
+ r = ch341_detect_quirks(port);
+ if (r < 0)
+ goto error;
+
return 0;
error: kfree(priv);
diff --git a/drivers/usb/serial/io_16654.h b/drivers/usb/serial/io_16654.h
index 4980f72dc56f..f18501f056cf 100644
--- a/drivers/usb/serial/io_16654.h
+++ b/drivers/usb/serial/io_16654.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/************************************************************************
*
* 16654.H Definitions for 16C654 UART used on EdgePorts
diff --git a/drivers/usb/serial/io_edgeport.h b/drivers/usb/serial/io_edgeport.h
index 2e7fedbaf2ff..43ba53a3a6fa 100644
--- a/drivers/usb/serial/io_edgeport.h
+++ b/drivers/usb/serial/io_edgeport.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/************************************************************************
*
* io_edgeport.h Edgeport Linux Interface definitions
diff --git a/drivers/usb/serial/io_ionsp.h b/drivers/usb/serial/io_ionsp.h
index 4b8e4823bd45..db4fce815c97 100644
--- a/drivers/usb/serial/io_ionsp.h
+++ b/drivers/usb/serial/io_ionsp.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/************************************************************************
*
* IONSP.H Definitions for I/O Networks Serial Protocol
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index 9bbcee37524e..50b899d55ed0 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*****************************************************************************
*
* Copyright (C) 1997-2002 Inside Out Networks, Inc.
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 0d1a5bb4636e..52cbc353051f 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/************************************************************************
*
* USBVEND.H Vendor-specific USB definitions
diff --git a/drivers/usb/serial/iuu_phoenix.h b/drivers/usb/serial/iuu_phoenix.h
index b400b262f72e..87992b24d904 100644
--- a/drivers/usb/serial/iuu_phoenix.h
+++ b/drivers/usb/serial/iuu_phoenix.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Infinity Unlimited USB Phoenix driver
*
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index 0084edf518e8..e3d09a83cab1 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Definitions for MCT (Magic Control Technology) USB-RS232 Converter Driver
*
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 8bfffca3e4ae..254a8bbeea67 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1157,6 +1157,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1031, 0xff), /* Telit LE910C1-EUX */
+ .driver_info = NCTRL(0) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
diff --git a/drivers/usb/serial/oti6858.h b/drivers/usb/serial/oti6858.h
index 1226bf2347eb..5c25836fdcd9 100644
--- a/drivers/usb/serial/oti6858.h
+++ b/drivers/usb/serial/oti6858.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Ours Technology Inc. OTi-6858 USB to serial adapter driver.
*/
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 52db5519aaf0..7d3090ee7e0c 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Prolific PL2303 USB to serial adaptor driver header file
*/
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index ce0401d3137f..d147feae83e6 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */
{DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */
{DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
{DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 13be21aad2f4..4b9845807bee 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -270,6 +270,10 @@ static void usb_wwan_indat_callback(struct urb *urb)
if (status) {
dev_dbg(dev, "%s: nonzero status: %d on endpoint %02x.\n",
__func__, status, endpoint);
+
+ /* don't resubmit on fatal errors */
+ if (status == -ESHUTDOWN || status == -ENOENT)
+ return;
} else {
if (urb->actual_length) {
tty_insert_flip_string(&port->port, data,
diff --git a/drivers/usb/serial/visor.h b/drivers/usb/serial/visor.h
index 4bd69d047036..622d639ce74e 100644
--- a/drivers/usb/serial/visor.h
+++ b/drivers/usb/serial/visor.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* USB HandSpring Visor driver
*
diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h
index 269e727a92f9..7e63074c9128 100644
--- a/drivers/usb/serial/whiteheat.h
+++ b/drivers/usb/serial/whiteheat.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* USB ConnectTech WhiteHEAT driver
*
diff --git a/drivers/usb/storage/debug.h b/drivers/usb/storage/debug.h
index 16ce06039a4d..a6505ceb6693 100644
--- a/drivers/usb/storage/debug.h
+++ b/drivers/usb/storage/debug.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* Debugging Functions Header File
diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
index 2dbf9c7d9749..dcd7b7e5eda8 100644
--- a/drivers/usb/storage/initializers.h
+++ b/drivers/usb/storage/initializers.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Header file for Special Initializers for certain USB Mass Storage devices
*
diff --git a/drivers/usb/storage/protocol.h b/drivers/usb/storage/protocol.h
index 072f1ffda2af..1d102463a66c 100644
--- a/drivers/usb/storage/protocol.h
+++ b/drivers/usb/storage/protocol.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* Protocol Functions Header File
diff --git a/drivers/usb/storage/scsiglue.h b/drivers/usb/storage/scsiglue.h
index 2bc5ea045bf7..2a79c3ed4d86 100644
--- a/drivers/usb/storage/scsiglue.h
+++ b/drivers/usb/storage/scsiglue.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* SCSI Connecting Glue Header File
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index e605cbc3d8bf..b9f78ef3edc3 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -129,15 +129,11 @@ int sierra_ms_init(struct us_data *us)
int result, retries;
struct swoc_info *swocInfo;
struct usb_device *udev;
- struct Scsi_Host *sh;
retries = 3;
result = 0;
udev = us->pusb_dev;
- sh = us_to_host(us);
- scsi_get_host_dev(sh);
-
/* Force Modem mode */
if (swi_tru_install == TRU_FORCE_MODEM) {
usb_stor_dbg(us, "SWIMS: Forcing Modem Mode\n");
diff --git a/drivers/usb/storage/transport.h b/drivers/usb/storage/transport.h
index fb3bb4ee4ccf..74ffd0d7e7b6 100644
--- a/drivers/usb/storage/transport.h
+++ b/drivers/usb/storage/transport.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* Transport Functions Header File
diff --git a/drivers/usb/storage/unusual_alauda.h b/drivers/usb/storage/unusual_alauda.h
index 0ec8c99a4976..13f61ec88cde 100644
--- a/drivers/usb/storage/unusual_alauda.h
+++ b/drivers/usb/storage/unusual_alauda.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Alauda-based card readers
*/
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index fb99e526cd48..0547daf116a2 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for devices based on the Cypress USB/ATA bridge
* with support for ATACB
diff --git a/drivers/usb/storage/unusual_datafab.h b/drivers/usb/storage/unusual_datafab.h
index fdab5e7d68ca..5335b5d2bd79 100644
--- a/drivers/usb/storage/unusual_datafab.h
+++ b/drivers/usb/storage/unusual_datafab.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Datafab USB Compact Flash reader
*/
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f6c3681fa2e9..b6a9a7451620 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* Unusual Devices File
diff --git a/drivers/usb/storage/unusual_ene_ub6250.h b/drivers/usb/storage/unusual_ene_ub6250.h
index 9134b91fbd73..a3b32abc2b2f 100644
--- a/drivers/usb/storage/unusual_ene_ub6250.h
+++ b/drivers/usb/storage/unusual_ene_ub6250.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
#if defined(CONFIG_USB_STORAGE_ENE_UB6250) || \
defined(CONFIG_USB_STORAGE_ENE_UB6250_MODULE)
diff --git a/drivers/usb/storage/unusual_freecom.h b/drivers/usb/storage/unusual_freecom.h
index 949231c7a36b..9ca686364a93 100644
--- a/drivers/usb/storage/unusual_freecom.h
+++ b/drivers/usb/storage/unusual_freecom.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Freecom USB/IDE adaptor
*/
diff --git a/drivers/usb/storage/unusual_isd200.h b/drivers/usb/storage/unusual_isd200.h
index d03a02cc904e..f248190bd666 100644
--- a/drivers/usb/storage/unusual_isd200.h
+++ b/drivers/usb/storage/unusual_isd200.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for In-System Design, Inc. ISD200 ASIC
*/
diff --git a/drivers/usb/storage/unusual_jumpshot.h b/drivers/usb/storage/unusual_jumpshot.h
index c323338881ef..44878f849c1c 100644
--- a/drivers/usb/storage/unusual_jumpshot.h
+++ b/drivers/usb/storage/unusual_jumpshot.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Lexar "Jumpshot" Compact Flash reader
*/
diff --git a/drivers/usb/storage/unusual_karma.h b/drivers/usb/storage/unusual_karma.h
index 8f1eebd71d2c..9fbed4cbc895 100644
--- a/drivers/usb/storage/unusual_karma.h
+++ b/drivers/usb/storage/unusual_karma.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Rio Karma
*/
diff --git a/drivers/usb/storage/unusual_onetouch.h b/drivers/usb/storage/unusual_onetouch.h
index c76d4e990f7b..cdfee8f6cf37 100644
--- a/drivers/usb/storage/unusual_onetouch.h
+++ b/drivers/usb/storage/unusual_onetouch.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for the Maxtor OneTouch USB hard drive's button
*/
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index 7e14c2d7cf73..945dcb19d31d 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for Realtek RTS51xx USB card reader
*
diff --git a/drivers/usb/storage/unusual_sddr09.h b/drivers/usb/storage/unusual_sddr09.h
index 650cf2862754..bfb650974129 100644
--- a/drivers/usb/storage/unusual_sddr09.h
+++ b/drivers/usb/storage/unusual_sddr09.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for SanDisk SDDR-09 SmartMedia reader
*/
diff --git a/drivers/usb/storage/unusual_sddr55.h b/drivers/usb/storage/unusual_sddr55.h
index e89df2cea7bd..6d6f76eb0630 100644
--- a/drivers/usb/storage/unusual_sddr55.h
+++ b/drivers/usb/storage/unusual_sddr55.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for SanDisk SDDR-55 SmartMedia reader
*/
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 37157ed9a881..162b09d69f62 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Attached SCSI devices - Unusual Devices File
*
diff --git a/drivers/usb/storage/unusual_usbat.h b/drivers/usb/storage/unusual_usbat.h
index 05abf6870b8f..f9d3e5efc39d 100644
--- a/drivers/usb/storage/unusual_usbat.h
+++ b/drivers/usb/storage/unusual_usbat.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Unusual Devices File for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
*/
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 5850d624cac7..0451fac1adce 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Driver for USB Mass Storage compliant devices
* Main Header File
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index b4f2aac7ae8a..559dd06117e7 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -64,7 +64,8 @@ config TYPEC_HD3SS3220
config TYPEC_TPS6598X
tristate "TI TPS6598x USB Power Delivery controller driver"
depends on I2C
- select REGMAP_I2C
+ depends on REGMAP_I2C
+ depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
help
Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power
Delivery controller.
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 8d894bdff77d..c9234748537a 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -917,6 +917,12 @@ EXPORT_SYMBOL_GPL(typec_unregister_cable);
/* ------------------------------------------------------------------------- */
/* USB Type-C ports */
+static const char * const typec_orientations[] = {
+ [TYPEC_ORIENTATION_NONE] = "unknown",
+ [TYPEC_ORIENTATION_NORMAL] = "normal",
+ [TYPEC_ORIENTATION_REVERSE] = "reverse",
+};
+
static const char * const typec_roles[] = {
[TYPEC_SINK] = "sink",
[TYPEC_SOURCE] = "source",
@@ -1248,18 +1254,9 @@ static ssize_t orientation_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct typec_port *p = to_typec_port(dev);
- enum typec_orientation orientation = typec_get_orientation(p);
-
- switch (orientation) {
- case TYPEC_ORIENTATION_NORMAL:
- return sprintf(buf, "%s\n", "normal");
- case TYPEC_ORIENTATION_REVERSE:
- return sprintf(buf, "%s\n", "reverse");
- case TYPEC_ORIENTATION_NONE:
- default:
- return sprintf(buf, "%s\n", "unknown");
- }
+ struct typec_port *port = to_typec_port(dev);
+
+ return sprintf(buf, "%s\n", typec_orientations[port->orientation]);
}
static DEVICE_ATTR_RO(orientation);
@@ -1452,6 +1449,21 @@ void typec_set_pwr_opmode(struct typec_port *port,
EXPORT_SYMBOL_GPL(typec_set_pwr_opmode);
/**
+ * typec_find_orientation - Convert orientation string to enum typec_orientation
+ * @name: Orientation string
+ *
+ * This routine is used to find the typec_orientation by its string name @name.
+ *
+ * Returns the orientation value on success, otherwise negative error code.
+ */
+int typec_find_orientation(const char *name)
+{
+ return match_string(typec_orientations, ARRAY_SIZE(typec_orientations),
+ name);
+}
+EXPORT_SYMBOL_GPL(typec_find_orientation);
+
+/**
* typec_find_port_power_role - Get the typec port power capability
* @name: port power capability string
*
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 1ac0a3eb7dd8..962bc69a6a59 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -92,6 +92,9 @@ struct pmc_usb_port {
u8 usb2_port;
u8 usb3_port;
+
+ enum typec_orientation sbu_orientation;
+ enum typec_orientation hsl_orientation;
};
struct pmc_usb {
@@ -101,6 +104,22 @@ struct pmc_usb {
struct pmc_usb_port *port;
};
+static int sbu_orientation(struct pmc_usb_port *port)
+{
+ if (port->sbu_orientation)
+ return port->sbu_orientation - 1;
+
+ return port->orientation - 1;
+}
+
+static int hsl_orientation(struct pmc_usb_port *port)
+{
+ if (port->hsl_orientation)
+ return port->hsl_orientation - 1;
+
+ return port->orientation - 1;
+}
+
static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
{
u8 response[4];
@@ -152,8 +171,9 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
- req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
- req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
+
+ req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
+ req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
req.mode_data |= (state->mode - TYPEC_STATE_MODAL) <<
PMC_USB_ALTMODE_DP_MODE_SHIFT;
@@ -177,8 +197,9 @@ pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state)
req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
- req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
- req.mode_data |= (port->orientation - 1) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
+
+ req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
+ req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
if (TBT_ADAPTER(data->device_mode) == TBT_ADAPTER_TBT3)
req.mode_data |= PMC_USB_ALTMODE_TBT_TYPE;
@@ -215,8 +236,8 @@ static int pmc_usb_connect(struct pmc_usb_port *port)
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT;
- msg[1] |= (port->orientation - 1) << PMC_USB_MSG_ORI_HSL_SHIFT;
- msg[1] |= (port->orientation - 1) << PMC_USB_MSG_ORI_AUX_SHIFT;
+ msg[1] |= hsl_orientation(port) << PMC_USB_MSG_ORI_HSL_SHIFT;
+ msg[1] |= sbu_orientation(port) << PMC_USB_MSG_ORI_AUX_SHIFT;
return pmc_usb_command(port, msg, sizeof(msg));
}
@@ -300,6 +321,7 @@ static int pmc_usb_register_port(struct pmc_usb *pmc, int index,
struct usb_role_switch_desc desc = { };
struct typec_switch_desc sw_desc = { };
struct typec_mux_desc mux_desc = { };
+ const char *str;
int ret;
ret = fwnode_property_read_u8(fwnode, "usb2-port-number", &port->usb2_port);
@@ -310,6 +332,14 @@ static int pmc_usb_register_port(struct pmc_usb *pmc, int index,
if (ret)
return ret;
+ ret = fwnode_property_read_string(fwnode, "sbu-orientation", &str);
+ if (!ret)
+ port->sbu_orientation = typec_find_orientation(str);
+
+ ret = fwnode_property_read_string(fwnode, "hsl-orientation", &str);
+ if (!ret)
+ port->hsl_orientation = typec_find_orientation(str);
+
port->num = index;
port->pmc = pmc;
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index b498960ff72b..b28facece43c 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -9,14 +9,13 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/extcon.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/proc_fs.h>
#include <linux/regulator/consumer.h>
@@ -83,7 +82,7 @@ struct fusb302_chip {
struct work_struct irq_work;
bool irq_suspended;
bool irq_while_suspended;
- int gpio_int_n;
+ struct gpio_desc *gpio_int_n;
int gpio_int_n_irq;
struct extcon_dev *extcon;
@@ -1618,30 +1617,17 @@ done:
static int init_gpio(struct fusb302_chip *chip)
{
- struct device_node *node;
+ struct device *dev = chip->dev;
int ret = 0;
- node = chip->dev->of_node;
- chip->gpio_int_n = of_get_named_gpio(node, "fcs,int_n", 0);
- if (!gpio_is_valid(chip->gpio_int_n)) {
- ret = chip->gpio_int_n;
- dev_err(chip->dev, "cannot get named GPIO Int_N, ret=%d", ret);
- return ret;
- }
- ret = devm_gpio_request(chip->dev, chip->gpio_int_n, "fcs,int_n");
- if (ret < 0) {
- dev_err(chip->dev, "cannot request GPIO Int_N, ret=%d", ret);
- return ret;
- }
- ret = gpio_direction_input(chip->gpio_int_n);
- if (ret < 0) {
- dev_err(chip->dev,
- "cannot set GPIO Int_N to input, ret=%d", ret);
- return ret;
+ chip->gpio_int_n = devm_gpiod_get(dev, "fcs,int_n", GPIOD_IN);
+ if (IS_ERR(chip->gpio_int_n)) {
+ dev_err(dev, "failed to request gpio_int_n\n");
+ return PTR_ERR(chip->gpio_int_n);
}
- ret = gpio_to_irq(chip->gpio_int_n);
+ ret = gpiod_to_irq(chip->gpio_int_n);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"cannot request IRQ for GPIO Int_N, ret=%d", ret);
return ret;
}
diff --git a/drivers/usb/typec/tcpm/fusb302_reg.h b/drivers/usb/typec/tcpm/fusb302_reg.h
index 00b39d365478..edc0e4b0f1e6 100644
--- a/drivers/usb/typec/tcpm/fusb302_reg.h
+++ b/drivers/usb/typec/tcpm/fusb302_reg.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2016-2017 Google, Inc
*
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
index 0698addd1185..b7c9fe5caabe 100644
--- a/drivers/usb/typec/tps6598x.c
+++ b/drivers/usb/typec/tps6598x.c
@@ -12,6 +12,7 @@
#include <linux/regmap.h>
#include <linux/interrupt.h>
#include <linux/usb/typec.h>
+#include <linux/usb/role.h>
/* Register offsets */
#define TPS_REG_VID 0x00
@@ -94,6 +95,7 @@ struct tps6598x {
struct typec_port *port;
struct typec_partner *partner;
struct usb_pd_identity partner_identity;
+ struct usb_role_switch *role_sw;
};
/*
@@ -190,6 +192,23 @@ static int tps6598x_read_partner_identity(struct tps6598x *tps)
return 0;
}
+static void tps6598x_set_data_role(struct tps6598x *tps,
+ enum typec_data_role role, bool connected)
+{
+ enum usb_role role_val;
+
+ if (role == TYPEC_HOST)
+ role_val = USB_ROLE_HOST;
+ else
+ role_val = USB_ROLE_DEVICE;
+
+ if (!connected)
+ role_val = USB_ROLE_NONE;
+
+ usb_role_switch_set_role(tps->role_sw, role_val);
+ typec_set_data_role(tps->port, role);
+}
+
static int tps6598x_connect(struct tps6598x *tps, u32 status)
{
struct typec_partner_desc desc;
@@ -220,7 +239,7 @@ static int tps6598x_connect(struct tps6598x *tps, u32 status)
typec_set_pwr_opmode(tps->port, mode);
typec_set_pwr_role(tps->port, TPS_STATUS_PORTROLE(status));
typec_set_vconn_role(tps->port, TPS_STATUS_VCONN(status));
- typec_set_data_role(tps->port, TPS_STATUS_DATAROLE(status));
+ tps6598x_set_data_role(tps, TPS_STATUS_DATAROLE(status), true);
tps->partner = typec_register_partner(tps->port, &desc);
if (IS_ERR(tps->partner))
@@ -240,7 +259,7 @@ static void tps6598x_disconnect(struct tps6598x *tps, u32 status)
typec_set_pwr_opmode(tps->port, TYPEC_PWR_MODE_USB);
typec_set_pwr_role(tps->port, TPS_STATUS_PORTROLE(status));
typec_set_vconn_role(tps->port, TPS_STATUS_VCONN(status));
- typec_set_data_role(tps->port, TPS_STATUS_DATAROLE(status));
+ tps6598x_set_data_role(tps, TPS_STATUS_DATAROLE(status), false);
}
static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
@@ -328,7 +347,7 @@ static int tps6598x_dr_set(struct typec_port *port, enum typec_data_role role)
goto out_unlock;
}
- typec_set_data_role(tps->port, role);
+ tps6598x_set_data_role(tps, role, true);
out_unlock:
mutex_unlock(&tps->lock);
@@ -452,6 +471,7 @@ static int tps6598x_probe(struct i2c_client *client)
{
struct typec_capability typec_cap = { };
struct tps6598x *tps;
+ struct fwnode_handle *fwnode;
u32 status;
u32 conf;
u32 vid;
@@ -495,11 +515,22 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ fwnode = device_get_named_child_node(&client->dev, "connector");
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
+
+ tps->role_sw = fwnode_usb_role_switch_get(fwnode);
+ if (IS_ERR(tps->role_sw)) {
+ ret = PTR_ERR(tps->role_sw);
+ goto err_fwnode_put;
+ }
+
typec_cap.revision = USB_TYPEC_REV_1_2;
typec_cap.pd_revision = 0x200;
typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
typec_cap.driver_data = tps;
typec_cap.ops = &tps6598x_ops;
+ typec_cap.fwnode = fwnode;
switch (TPS_SYSCONF_PORTINFO(conf)) {
case TPS_PORTINFO_SINK_ACCESSORY:
@@ -525,12 +556,16 @@ static int tps6598x_probe(struct i2c_client *client)
typec_cap.data = TYPEC_PORT_DFP;
break;
default:
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_role_put;
}
tps->port = typec_register_port(&client->dev, &typec_cap);
- if (IS_ERR(tps->port))
- return PTR_ERR(tps->port);
+ if (IS_ERR(tps->port)) {
+ ret = PTR_ERR(tps->port);
+ goto err_role_put;
+ }
+ fwnode_handle_put(fwnode);
if (status & TPS_STATUS_PLUG_PRESENT) {
ret = tps6598x_connect(tps, status);
@@ -545,12 +580,19 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret) {
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
- return ret;
+ goto err_role_put;
}
i2c_set_clientdata(client, tps);
return 0;
+
+err_role_put:
+ usb_role_switch_put(tps->role_sw);
+err_fwnode_put:
+ fwnode_handle_put(fwnode);
+
+ return ret;
}
static int tps6598x_remove(struct i2c_client *client)
@@ -559,10 +601,17 @@ static int tps6598x_remove(struct i2c_client *client)
tps6598x_disconnect(tps, 0);
typec_unregister_port(tps->port);
+ usb_role_switch_put(tps->role_sw);
return 0;
}
+static const struct of_device_id tps6598x_of_match[] = {
+ { .compatible = "ti,tps6598x", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tps6598x_of_match);
+
static const struct i2c_device_id tps6598x_id[] = {
{ "tps6598x" },
{ }
@@ -572,6 +621,7 @@ MODULE_DEVICE_TABLE(i2c, tps6598x_id);
static struct i2c_driver tps6598x_i2c_driver = {
.driver = {
.name = "tps6598x",
+ .of_match_table = tps6598x_of_match,
},
.probe_new = tps6598x_probe,
.remove = tps6598x_remove,
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index b35e15a1f02c..8a8eb5cb8e0f 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -7,6 +7,10 @@ typec_ucsi-y := ucsi.o
typec_ucsi-$(CONFIG_TRACING) += trace.o
+ifneq ($(CONFIG_POWER_SUPPLY),)
+ typec_ucsi-y += psy.o
+endif
+
ifneq ($(CONFIG_TYPEC_DP_ALTMODE),)
typec_ucsi-y += displayport.o
endif
diff --git a/drivers/usb/typec/ucsi/psy.c b/drivers/usb/typec/ucsi/psy.c
new file mode 100644
index 000000000000..26ed0b520749
--- /dev/null
+++ b/drivers/usb/typec/ucsi/psy.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Power Supply for UCSI
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: K V, Abhilash <abhilash.k.v@intel.com>
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/property.h>
+#include <linux/usb/pd.h>
+
+#include "ucsi.h"
+
+/* Power Supply access to expose source power information */
+enum ucsi_psy_online_states {
+ UCSI_PSY_OFFLINE = 0,
+ UCSI_PSY_FIXED_ONLINE,
+ UCSI_PSY_PROG_ONLINE,
+};
+
+static enum power_supply_property ucsi_psy_props[] = {
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+};
+
+static int ucsi_psy_get_online(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ val->intval = UCSI_PSY_OFFLINE;
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED &&
+ (con->status.flags & UCSI_CONSTAT_PWR_DIR) == TYPEC_SINK)
+ val->intval = UCSI_PSY_FIXED_ONLINE;
+ return 0;
+}
+
+static int ucsi_psy_get_voltage_min(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ u32 pdo;
+
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
+ case UCSI_CONSTAT_PWR_OPMODE_PD:
+ pdo = con->src_pdos[0];
+ val->intval = pdo_fixed_voltage(pdo) * 1000;
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ case UCSI_CONSTAT_PWR_OPMODE_BC:
+ case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
+ val->intval = UCSI_TYPEC_VSAFE5V * 1000;
+ break;
+ default:
+ val->intval = 0;
+ break;
+ }
+ return 0;
+}
+
+static int ucsi_psy_get_voltage_max(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ u32 pdo;
+
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
+ case UCSI_CONSTAT_PWR_OPMODE_PD:
+ if (con->num_pdos > 0) {
+ pdo = con->src_pdos[con->num_pdos - 1];
+ val->intval = pdo_fixed_voltage(pdo) * 1000;
+ } else {
+ val->intval = 0;
+ }
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ case UCSI_CONSTAT_PWR_OPMODE_BC:
+ case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
+ val->intval = UCSI_TYPEC_VSAFE5V * 1000;
+ break;
+ default:
+ val->intval = 0;
+ break;
+ }
+ return 0;
+}
+
+static int ucsi_psy_get_voltage_now(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ int index;
+ u32 pdo;
+
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
+ case UCSI_CONSTAT_PWR_OPMODE_PD:
+ index = rdo_index(con->rdo);
+ if (index > 0) {
+ pdo = con->src_pdos[index - 1];
+ val->intval = pdo_fixed_voltage(pdo) * 1000;
+ } else {
+ val->intval = 0;
+ }
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ case UCSI_CONSTAT_PWR_OPMODE_BC:
+ case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
+ val->intval = UCSI_TYPEC_VSAFE5V * 1000;
+ break;
+ default:
+ val->intval = 0;
+ break;
+ }
+ return 0;
+}
+
+static int ucsi_psy_get_current_max(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ u32 pdo;
+
+ switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
+ case UCSI_CONSTAT_PWR_OPMODE_PD:
+ if (con->num_pdos > 0) {
+ pdo = con->src_pdos[con->num_pdos - 1];
+ val->intval = pdo_max_current(pdo) * 1000;
+ } else {
+ val->intval = 0;
+ }
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ val->intval = UCSI_TYPEC_1_5_CURRENT * 1000;
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
+ val->intval = UCSI_TYPEC_3_0_CURRENT * 1000;
+ break;
+ case UCSI_CONSTAT_PWR_OPMODE_BC:
+ case UCSI_CONSTAT_PWR_OPMODE_DEFAULT:
+ /* UCSI can't tell b/w DCP/CDP or USB2/3x1/3x2 SDP chargers */
+ default:
+ val->intval = 0;
+ break;
+ }
+ return 0;
+}
+
+static int ucsi_psy_get_current_now(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ u16 flags = con->status.flags;
+
+ if (UCSI_CONSTAT_PWR_OPMODE(flags) == UCSI_CONSTAT_PWR_OPMODE_PD)
+ val->intval = rdo_op_current(con->rdo) * 1000;
+ else
+ val->intval = 0;
+ return 0;
+}
+
+static int ucsi_psy_get_usb_type(struct ucsi_connector *con,
+ union power_supply_propval *val)
+{
+ u16 flags = con->status.flags;
+
+ val->intval = POWER_SUPPLY_USB_TYPE_C;
+ if (flags & UCSI_CONSTAT_CONNECTED &&
+ UCSI_CONSTAT_PWR_OPMODE(flags) == UCSI_CONSTAT_PWR_OPMODE_PD)
+ val->intval = POWER_SUPPLY_USB_TYPE_PD;
+
+ return 0;
+}
+
+static int ucsi_psy_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ucsi_connector *con = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return ucsi_psy_get_usb_type(con, val);
+ case POWER_SUPPLY_PROP_ONLINE:
+ return ucsi_psy_get_online(con, val);
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ return ucsi_psy_get_voltage_min(con, val);
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ return ucsi_psy_get_voltage_max(con, val);
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ return ucsi_psy_get_voltage_now(con, val);
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return ucsi_psy_get_current_max(con, val);
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return ucsi_psy_get_current_now(con, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum power_supply_usb_type ucsi_psy_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_C,
+ POWER_SUPPLY_USB_TYPE_PD,
+ POWER_SUPPLY_USB_TYPE_PD_PPS,
+};
+
+int ucsi_register_port_psy(struct ucsi_connector *con)
+{
+ struct power_supply_config psy_cfg = {};
+ struct device *dev = con->ucsi->dev;
+ char *psy_name;
+
+ psy_cfg.drv_data = con;
+ psy_cfg.fwnode = dev_fwnode(dev);
+
+ psy_name = devm_kasprintf(dev, GFP_KERNEL, "ucsi-source-psy-%s%d",
+ dev_name(dev), con->num);
+ if (!psy_name)
+ return -ENOMEM;
+
+ con->psy_desc.name = psy_name;
+ con->psy_desc.type = POWER_SUPPLY_TYPE_USB,
+ con->psy_desc.usb_types = ucsi_psy_usb_types;
+ con->psy_desc.num_usb_types = ARRAY_SIZE(ucsi_psy_usb_types);
+ con->psy_desc.properties = ucsi_psy_props,
+ con->psy_desc.num_properties = ARRAY_SIZE(ucsi_psy_props),
+ con->psy_desc.get_property = ucsi_psy_get_prop;
+
+ con->psy = power_supply_register(dev, &con->psy_desc, &psy_cfg);
+
+ return PTR_ERR_OR_ZERO(con->psy);
+}
+
+void ucsi_unregister_port_psy(struct ucsi_connector *con)
+{
+ if (IS_ERR_OR_NULL(con->psy))
+ return;
+
+ power_supply_unregister(con->psy);
+}
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index 48ad1dc1b1b2..cb62ad835761 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -35,16 +35,16 @@ const char *ucsi_cmd_str(u64 raw_cmd)
const char *ucsi_cci_str(u32 cci)
{
- if (cci & GENMASK(7, 0)) {
- if (cci & BIT(29))
+ if (UCSI_CCI_CONNECTOR(cci)) {
+ if (cci & UCSI_CCI_ACK_COMPLETE)
return "Event pending (ACK completed)";
- if (cci & BIT(31))
+ if (cci & UCSI_CCI_COMMAND_COMPLETE)
return "Event pending (command completed)";
return "Connector Change";
}
- if (cci & BIT(29))
+ if (cci & UCSI_CCI_ACK_COMPLETE)
return "ACK completed";
- if (cci & BIT(31))
+ if (cci & UCSI_CCI_COMMAND_COMPLETE)
return "Command completed";
return "";
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index ddf2ad3752de..d0c63afaf345 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -492,19 +492,45 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
}
}
+static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
+{
+ struct ucsi *ucsi = con->ucsi;
+ u64 command;
+ int ret;
+
+ command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
+ command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
+ command |= UCSI_GET_PDOS_SRC_PDOS;
+ ret = ucsi_run_command(ucsi, command, con->src_pdos,
+ sizeof(con->src_pdos));
+ if (ret < 0) {
+ dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
+ return;
+ }
+ con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
+ if (ret == 0)
+ dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
+}
+
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
case UCSI_CONSTAT_PWR_OPMODE_PD:
+ con->rdo = con->status.request_data_obj;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
+ ucsi_get_pdos(con, 1);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
+ con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_1_5A);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0:
+ con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_3_0A);
break;
default:
+ con->rdo = 0;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_USB);
break;
}
@@ -566,6 +592,8 @@ static void ucsi_partner_change(struct ucsi_connector *con)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
@@ -611,7 +639,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
- if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE)
+ if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE ||
+ con->status.change & UCSI_CONSTAT_POWER_LEVEL_CHANGE)
ucsi_pwr_opmode_change(con);
if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
@@ -627,6 +656,8 @@ static void ucsi_handle_connector_change(struct work_struct *work)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
@@ -905,6 +936,10 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
cap->driver_data = con;
cap->ops = &ucsi_ops;
+ ret = ucsi_register_port_psy(con);
+ if (ret)
+ return ret;
+
/* Register the connector */
con->port = typec_register_port(ucsi->dev, cap);
if (IS_ERR(con->port))
@@ -927,6 +962,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
@@ -1029,6 +1066,7 @@ err_unregister:
for (con = ucsi->connector; con->port; con++) {
ucsi_unregister_partner(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(con);
typec_unregister_port(con->port);
con->port = NULL;
}
@@ -1152,6 +1190,7 @@ void ucsi_unregister(struct ucsi *ucsi)
ucsi_unregister_partner(&ucsi->connector[i]);
ucsi_unregister_altmodes(&ucsi->connector[i],
UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(&ucsi->connector[i]);
typec_unregister_port(ucsi->connector[i].port);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 8e831108f481..cba6f77bea61 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -5,6 +5,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/power_supply.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
@@ -21,7 +22,7 @@ struct ucsi_altmode;
#define UCSI_MESSAGE_OUT 32
/* Command Status and Connector Change Indication (CCI) bits */
-#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 0)) >> 1)
+#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 1)) >> 1)
#define UCSI_CCI_LENGTH(_c_) (((_c_) & GENMASK(15, 8)) >> 8)
#define UCSI_CCI_NOT_SUPPORTED BIT(25)
#define UCSI_CCI_CANCEL_COMPLETE BIT(26)
@@ -130,6 +131,11 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_ALTMODE_OFFSET(_r_) ((u64)(_r_) << 32)
#define UCSI_GET_ALTMODE_NUM_ALTMODES(_r_) ((u64)(_r_) << 40)
+/* GET_PDOS command bits */
+#define UCSI_GET_PDOS_PARTNER_PDO(_r_) ((u64)(_r_) << 23)
+#define UCSI_GET_PDOS_NUM_PDOS(_r_) ((u64)(_r_) << 32)
+#define UCSI_GET_PDOS_SRC_PDOS ((u64)1 << 34)
+
/* -------------------------------------------------------------------------- */
/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
@@ -294,6 +300,11 @@ struct ucsi {
#define UCSI_MAX_SVID 5
#define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6)
+#define UCSI_MAX_PDOS (4)
+
+#define UCSI_TYPEC_VSAFE5V 5000
+#define UCSI_TYPEC_1_5_CURRENT 1500
+#define UCSI_TYPEC_3_0_CURRENT 3000
struct ucsi_connector {
int num;
@@ -313,6 +324,11 @@ struct ucsi_connector {
struct ucsi_connector_status status;
struct ucsi_connector_capability cap;
+ struct power_supply *psy;
+ struct power_supply_desc psy_desc;
+ u32 rdo;
+ u32 src_pdos[UCSI_MAX_PDOS];
+ int num_pdos;
};
int ucsi_send_command(struct ucsi *ucsi, u64 command,
@@ -321,6 +337,14 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
void ucsi_altmode_update_active(struct ucsi_connector *con);
int ucsi_resume(struct ucsi *ucsi);
+#if IS_ENABLED(CONFIG_POWER_SUPPLY)
+int ucsi_register_port_psy(struct ucsi_connector *con);
+void ucsi_unregister_port_psy(struct ucsi_connector *con);
+#else
+static inline int ucsi_register_port_psy(struct ucsi_connector *con) { return 0; }
+static inline void ucsi_unregister_port_psy(struct ucsi_connector *con) { }
+#endif /* CONFIG_POWER_SUPPLY */
+
#if IS_ENABLED(CONFIG_TYPEC_DP_ALTMODE)
struct typec_altmode *
ucsi_register_displayport(struct ucsi_connector *con,
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index e8140065c8a5..3e1ceb8e9f2b 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -10,7 +10,7 @@ if VDPA
config VDPA_SIM
tristate "vDPA device simulator"
- depends on RUNTIME_TESTING_MENU && HAS_DMA && VHOST_DPN
+ depends on RUNTIME_TESTING_MENU && HAS_DMA
select VHOST_RING
default n
help
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index e24371d644b5..94bf0328b68d 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -185,6 +185,9 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
void ifcvf_reset(struct ifcvf_hw *hw)
{
+ hw->config_cb.callback = NULL;
+ hw->config_cb.private = NULL;
+
ifcvf_set_status(hw, 0);
/* flush set_status, make sure VF is stopped, reset */
ifcvf_get_status(hw);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index e80307092351..f4554412e607 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -27,6 +27,7 @@
((1ULL << VIRTIO_NET_F_MAC) | \
(1ULL << VIRTIO_F_ANY_LAYOUT) | \
(1ULL << VIRTIO_F_VERSION_1) | \
+ (1ULL << VIRTIO_NET_F_STATUS) | \
(1ULL << VIRTIO_F_ORDER_PLATFORM) | \
(1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
(1ULL << VIRTIO_NET_F_MRG_RXBUF))
@@ -81,6 +82,9 @@ struct ifcvf_hw {
void __iomem *net_cfg;
struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
void __iomem * const *base;
+ char config_msix_name[256];
+ struct vdpa_callback config_cb;
+
};
struct ifcvf_adapter {
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index abf6a061cab6..f5a60c14b979 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -18,6 +18,16 @@
#define DRIVER_AUTHOR "Intel Corporation"
#define IFCVF_DRIVER_NAME "ifcvf"
+static irqreturn_t ifcvf_config_changed(int irq, void *arg)
+{
+ struct ifcvf_hw *vf = arg;
+
+ if (vf->config_cb.callback)
+ return vf->config_cb.callback(vf->config_cb.private);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
{
struct vring_info *vring = arg;
@@ -28,6 +38,68 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static void ifcvf_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ifcvf_hw *vf = &adapter->vf;
+ int i;
+
+
+ for (i = 0; i < queues; i++)
+ devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
+
+ ifcvf_free_irq_vectors(pdev);
+}
+
+static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ifcvf_hw *vf = &adapter->vf;
+ int vector, i, ret, irq;
+
+ ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
+ IFCVF_MAX_INTR, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
+ return ret;
+ }
+
+ snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
+ pci_name(pdev));
+ vector = 0;
+ irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, irq,
+ ifcvf_config_changed, 0,
+ vf->config_msix_name, vf);
+
+ for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
+ pci_name(pdev), i);
+ vector = i + IFCVF_MSI_QUEUE_OFF;
+ irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, irq,
+ ifcvf_intr_handler, 0,
+ vf->vring[i].msix_name,
+ &vf->vring[i]);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed to request irq for vq %d\n", i);
+ ifcvf_free_irq(adapter, i);
+
+ return ret;
+ }
+
+ vf->vring[i].irq = irq;
+ }
+
+ return 0;
+}
+
static int ifcvf_start_datapath(void *private)
{
struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
@@ -118,17 +190,37 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
{
struct ifcvf_adapter *adapter;
struct ifcvf_hw *vf;
+ u8 status_old;
+ int ret;
vf = vdpa_to_vf(vdpa_dev);
adapter = dev_get_drvdata(vdpa_dev->dev.parent);
+ status_old = ifcvf_get_status(vf);
- if (status == 0) {
+ if (status_old == status)
+ return;
+
+ if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
+ !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
ifcvf_stop_datapath(adapter);
+ ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
+ }
+
+ if (status == 0) {
ifcvf_reset_vring(adapter);
return;
}
- if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+ if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+ !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ ret = ifcvf_request_irq(adapter);
+ if (ret) {
+ status = ifcvf_get_status(vf);
+ status |= VIRTIO_CONFIG_S_FAILED;
+ ifcvf_set_status(vf, status);
+ return;
+ }
+
if (ifcvf_start_datapath(adapter) < 0)
IFCVF_ERR(adapter->pdev,
"Failed to set ifcvf vdpa status %u\n",
@@ -254,7 +346,10 @@ static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
struct vdpa_callback *cb)
{
- /* We don't support config interrupt */
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->config_cb.callback = cb->callback;
+ vf->config_cb.private = cb->private;
}
/*
@@ -284,38 +379,6 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.set_config_cb = ifcvf_vdpa_set_config_cb,
};
-static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct ifcvf_hw *vf = &adapter->vf;
- int vector, i, ret, irq;
-
-
- for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
- snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
- pci_name(pdev), i);
- vector = i + IFCVF_MSI_QUEUE_OFF;
- irq = pci_irq_vector(pdev, vector);
- ret = devm_request_irq(&pdev->dev, irq,
- ifcvf_intr_handler, 0,
- vf->vring[i].msix_name,
- &vf->vring[i]);
- if (ret) {
- IFCVF_ERR(pdev,
- "Failed to request irq for vq %d\n", i);
- return ret;
- }
- vf->vring[i].irq = irq;
- }
-
- return 0;
-}
-
-static void ifcvf_free_irq_vectors(void *data)
-{
- pci_free_irq_vectors(data);
-}
-
static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -349,13 +412,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
- IFCVF_MAX_INTR, PCI_IRQ_MSIX);
- if (ret < 0) {
- IFCVF_ERR(pdev, "Failed to alloc irq vectors\n");
- return ret;
- }
-
ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
if (ret) {
IFCVF_ERR(pdev,
@@ -379,12 +435,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
adapter->pdev = pdev;
adapter->vdpa.dma_dev = &pdev->dev;
- ret = ifcvf_request_irq(adapter);
- if (ret) {
- IFCVF_ERR(pdev, "Failed to request MSI-X irq\n");
- goto err;
- }
-
ret = ifcvf_init_hw(vf, pdev);
if (ret) {
IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 01c456f7c1f7..c7334cc65bb2 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -101,7 +101,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
{
- vq->ready = 0;
+ vq->ready = false;
vq->desc_addr = 0;
vq->driver_addr = 0;
vq->device_addr = 0;
@@ -131,9 +131,10 @@ static void vdpasim_work(struct work_struct *work)
vdpasim, work);
struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
- size_t read, write, total_write;
- int err;
+ ssize_t read, write;
+ size_t total_write;
int pkts = 0;
+ int err;
spin_lock(&vdpasim->lock);
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 8ad14e5c02bf..917fd84c1c6f 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -110,7 +110,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
"%s-%s", dev_driver_string(parent->dev),
group->name);
if (ret) {
- kfree(type);
+ kobject_put(&type->kobj);
return ERR_PTR(ret);
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 6c6b37b5c04e..7c0779018b1b 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -26,6 +26,7 @@
#include <linux/vfio.h>
#include <linux/vgaarb.h>
#include <linux/nospec.h>
+#include <linux/sched/mm.h>
#include "vfio_pci_private.h"
@@ -184,6 +185,7 @@ no_mmap:
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
static void vfio_pci_disable(struct vfio_pci_device *vdev);
+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
/*
* INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
@@ -519,6 +521,10 @@ static void vfio_pci_release(void *device_data)
vfio_pci_vf_token_user_add(vdev, -1);
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
+ if (vdev->err_trigger)
+ eventfd_ctx_put(vdev->err_trigger);
+ if (vdev->req_trigger)
+ eventfd_ctx_put(vdev->req_trigger);
}
mutex_unlock(&vdev->reflck->lock);
@@ -736,6 +742,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
return 0;
}
+struct vfio_devices {
+ struct vfio_device **devices;
+ int cur_index;
+ int max_index;
+};
+
static long vfio_pci_ioctl(void *device_data,
unsigned int cmd, unsigned long arg)
{
@@ -809,7 +821,7 @@ static long vfio_pci_ioctl(void *device_data,
{
void __iomem *io;
size_t size;
- u16 orig_cmd;
+ u16 cmd;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
@@ -829,10 +841,7 @@ static long vfio_pci_ioctl(void *device_data,
* Is it really there? Enable memory decode for
* implicit access in pci_map_rom().
*/
- pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
- pci_write_config_word(pdev, PCI_COMMAND,
- orig_cmd | PCI_COMMAND_MEMORY);
-
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
io = pci_map_rom(pdev, &size);
if (io) {
info.flags = VFIO_REGION_INFO_FLAG_READ;
@@ -840,8 +849,8 @@ static long vfio_pci_ioctl(void *device_data,
} else {
info.size = 0;
}
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
- pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
@@ -984,8 +993,16 @@ static long vfio_pci_ioctl(void *device_data,
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
- return vdev->reset_works ?
- pci_try_reset_function(vdev->pdev) : -EINVAL;
+ int ret;
+
+ if (!vdev->reset_works)
+ return -EINVAL;
+
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ ret = pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+
+ return ret;
} else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
struct vfio_pci_hot_reset_info hdr;
@@ -1065,8 +1082,9 @@ reset_info_exit:
int32_t *group_fds;
struct vfio_pci_group_entry *groups;
struct vfio_pci_group_info info;
+ struct vfio_devices devs = { .cur_index = 0 };
bool slot = false;
- int i, count = 0, ret = 0;
+ int i, group_idx, mem_idx = 0, count = 0, ret = 0;
minsz = offsetofend(struct vfio_pci_hot_reset, count);
@@ -1118,9 +1136,9 @@ reset_info_exit:
* user interface and store the group and iommu ID. This
* ensures the group is held across the reset.
*/
- for (i = 0; i < hdr.count; i++) {
+ for (group_idx = 0; group_idx < hdr.count; group_idx++) {
struct vfio_group *group;
- struct fd f = fdget(group_fds[i]);
+ struct fd f = fdget(group_fds[group_idx]);
if (!f.file) {
ret = -EBADF;
break;
@@ -1133,8 +1151,9 @@ reset_info_exit:
break;
}
- groups[i].group = group;
- groups[i].id = vfio_external_user_iommu_id(group);
+ groups[group_idx].group = group;
+ groups[group_idx].id =
+ vfio_external_user_iommu_id(group);
}
kfree(group_fds);
@@ -1153,13 +1172,63 @@ reset_info_exit:
ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
vfio_pci_validate_devs,
&info, slot);
- if (!ret)
- /* User has access, do the reset */
- ret = pci_reset_bus(vdev->pdev);
+ if (ret)
+ goto hot_reset_release;
+
+ devs.max_index = count;
+ devs.devices = kcalloc(count, sizeof(struct vfio_device *),
+ GFP_KERNEL);
+ if (!devs.devices) {
+ ret = -ENOMEM;
+ goto hot_reset_release;
+ }
+
+ /*
+ * We need to get memory_lock for each device, but devices
+ * can share mmap_lock, therefore we need to zap and hold
+ * the vma_lock for each device, and only then get each
+ * memory_lock.
+ */
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+ vfio_pci_try_zap_and_vma_lock_cb,
+ &devs, slot);
+ if (ret)
+ goto hot_reset_release;
+
+ for (; mem_idx < devs.cur_index; mem_idx++) {
+ struct vfio_pci_device *tmp;
+
+ tmp = vfio_device_data(devs.devices[mem_idx]);
+
+ ret = down_write_trylock(&tmp->memory_lock);
+ if (!ret) {
+ ret = -EBUSY;
+ goto hot_reset_release;
+ }
+ mutex_unlock(&tmp->vma_lock);
+ }
+
+ /* User has access, do the reset */
+ ret = pci_reset_bus(vdev->pdev);
hot_reset_release:
- for (i--; i >= 0; i--)
- vfio_group_put_external_user(groups[i].group);
+ for (i = 0; i < devs.cur_index; i++) {
+ struct vfio_device *device;
+ struct vfio_pci_device *tmp;
+
+ device = devs.devices[i];
+ tmp = vfio_device_data(device);
+
+ if (i < mem_idx)
+ up_write(&tmp->memory_lock);
+ else
+ mutex_unlock(&tmp->vma_lock);
+ vfio_device_put(device);
+ }
+ kfree(devs.devices);
+
+ for (group_idx--; group_idx >= 0; group_idx--)
+ vfio_group_put_external_user(groups[group_idx].group);
kfree(groups);
return ret;
@@ -1299,6 +1368,202 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
}
+/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
+static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
+{
+ struct vfio_pci_mmap_vma *mmap_vma, *tmp;
+
+ /*
+ * Lock ordering:
+ * vma_lock is nested under mmap_lock for vm_ops callback paths.
+ * The memory_lock semaphore is used by both code paths calling
+ * into this function to zap vmas and the vm_ops.fault callback
+ * to protect the memory enable state of the device.
+ *
+ * When zapping vmas we need to maintain the mmap_lock => vma_lock
+ * ordering, which requires using vma_lock to walk vma_list to
+ * acquire an mm, then dropping vma_lock to get the mmap_lock and
+ * reacquiring vma_lock. This logic is derived from similar
+ * requirements in uverbs_user_mmap_disassociate().
+ *
+ * mmap_lock must always be the top-level lock when it is taken.
+ * Therefore we can only hold the memory_lock write lock when
+ * vma_list is empty, as we'd need to take mmap_lock to clear
+ * entries. vma_list can only be guaranteed empty when holding
+ * vma_lock, thus memory_lock is nested under vma_lock.
+ *
+ * This enables the vm_ops.fault callback to acquire vma_lock,
+ * followed by memory_lock read lock, while already holding
+ * mmap_lock without risk of deadlock.
+ */
+ while (1) {
+ struct mm_struct *mm = NULL;
+
+ if (try) {
+ if (!mutex_trylock(&vdev->vma_lock))
+ return 0;
+ } else {
+ mutex_lock(&vdev->vma_lock);
+ }
+ while (!list_empty(&vdev->vma_list)) {
+ mmap_vma = list_first_entry(&vdev->vma_list,
+ struct vfio_pci_mmap_vma,
+ vma_next);
+ mm = mmap_vma->vma->vm_mm;
+ if (mmget_not_zero(mm))
+ break;
+
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+ mm = NULL;
+ }
+ if (!mm)
+ return 1;
+ mutex_unlock(&vdev->vma_lock);
+
+ if (try) {
+ if (!mmap_read_trylock(mm)) {
+ mmput(mm);
+ return 0;
+ }
+ } else {
+ mmap_read_lock(mm);
+ }
+ if (mmget_still_valid(mm)) {
+ if (try) {
+ if (!mutex_trylock(&vdev->vma_lock)) {
+ mmap_read_unlock(mm);
+ mmput(mm);
+ return 0;
+ }
+ } else {
+ mutex_lock(&vdev->vma_lock);
+ }
+ list_for_each_entry_safe(mmap_vma, tmp,
+ &vdev->vma_list, vma_next) {
+ struct vm_area_struct *vma = mmap_vma->vma;
+
+ if (vma->vm_mm != mm)
+ continue;
+
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+
+ zap_vma_ptes(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+ }
+ mutex_unlock(&vdev->vma_lock);
+ }
+ mmap_read_unlock(mm);
+ mmput(mm);
+ }
+}
+
+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
+{
+ vfio_pci_zap_and_vma_lock(vdev, false);
+ down_write(&vdev->memory_lock);
+ mutex_unlock(&vdev->vma_lock);
+}
+
+u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
+{
+ u16 cmd;
+
+ down_write(&vdev->memory_lock);
+ pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_MEMORY))
+ pci_write_config_word(vdev->pdev, PCI_COMMAND,
+ cmd | PCI_COMMAND_MEMORY);
+
+ return cmd;
+}
+
+void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
+{
+ pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
+ up_write(&vdev->memory_lock);
+}
+
+/* Caller holds vma_lock */
+static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
+ struct vm_area_struct *vma)
+{
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+ mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
+ if (!mmap_vma)
+ return -ENOMEM;
+
+ mmap_vma->vma = vma;
+ list_add(&mmap_vma->vma_next, &vdev->vma_list);
+
+ return 0;
+}
+
+/*
+ * Zap mmaps on open so that we can fault them in on access and therefore
+ * our vma_list only tracks mappings accessed since last zap.
+ */
+static void vfio_pci_mmap_open(struct vm_area_struct *vma)
+{
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+}
+
+static void vfio_pci_mmap_close(struct vm_area_struct *vma)
+{
+ struct vfio_pci_device *vdev = vma->vm_private_data;
+ struct vfio_pci_mmap_vma *mmap_vma;
+
+ mutex_lock(&vdev->vma_lock);
+ list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+ if (mmap_vma->vma == vma) {
+ list_del(&mmap_vma->vma_next);
+ kfree(mmap_vma);
+ break;
+ }
+ }
+ mutex_unlock(&vdev->vma_lock);
+}
+
+static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct vfio_pci_device *vdev = vma->vm_private_data;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+
+ mutex_lock(&vdev->vma_lock);
+ down_read(&vdev->memory_lock);
+
+ if (!__vfio_pci_memory_enabled(vdev)) {
+ ret = VM_FAULT_SIGBUS;
+ mutex_unlock(&vdev->vma_lock);
+ goto up_out;
+ }
+
+ if (__vfio_pci_add_vma(vdev, vma)) {
+ ret = VM_FAULT_OOM;
+ mutex_unlock(&vdev->vma_lock);
+ goto up_out;
+ }
+
+ mutex_unlock(&vdev->vma_lock);
+
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ ret = VM_FAULT_SIGBUS;
+
+up_out:
+ up_read(&vdev->memory_lock);
+ return ret;
+}
+
+static const struct vm_operations_struct vfio_pci_mmap_ops = {
+ .open = vfio_pci_mmap_open,
+ .close = vfio_pci_mmap_close,
+ .fault = vfio_pci_mmap_fault,
+};
+
static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
{
struct vfio_pci_device *vdev = device_data;
@@ -1357,8 +1622,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
- return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- req_len, vma->vm_page_prot);
+ /*
+ * See remap_pfn_range(), called from vfio_pci_fault() but we can't
+ * change vm_flags within the fault handler. Set them now.
+ */
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &vfio_pci_mmap_ops;
+
+ return 0;
}
static void vfio_pci_request(void *device_data, unsigned int count)
@@ -1608,6 +1879,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&vdev->irqlock);
mutex_init(&vdev->ioeventfds_lock);
INIT_LIST_HEAD(&vdev->ioeventfds_list);
+ mutex_init(&vdev->vma_lock);
+ INIT_LIST_HEAD(&vdev->vma_list);
+ init_rwsem(&vdev->memory_lock);
ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
if (ret)
@@ -1861,12 +2135,6 @@ static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck)
kref_put_mutex(&reflck->kref, vfio_pci_reflck_release, &reflck_lock);
}
-struct vfio_devices {
- struct vfio_device **devices;
- int cur_index;
- int max_index;
-};
-
static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
{
struct vfio_devices *devs = data;
@@ -1897,6 +2165,39 @@ static int vfio_pci_get_unused_devs(struct pci_dev *pdev, void *data)
return 0;
}
+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
+{
+ struct vfio_devices *devs = data;
+ struct vfio_device *device;
+ struct vfio_pci_device *vdev;
+
+ if (devs->cur_index == devs->max_index)
+ return -ENOSPC;
+
+ device = vfio_device_get_from_dev(&pdev->dev);
+ if (!device)
+ return -EINVAL;
+
+ if (pci_dev_driver(pdev) != &vfio_pci_driver) {
+ vfio_device_put(device);
+ return -EBUSY;
+ }
+
+ vdev = vfio_device_data(device);
+
+ /*
+ * Locking multiple devices is prone to deadlock, runaway and
+ * unwind if we hit contention.
+ */
+ if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
+ vfio_device_put(device);
+ return -EBUSY;
+ }
+
+ devs->devices[devs->cur_index++] = device;
+ return 0;
+}
+
/*
* If a bus or slot reset is available for the provided device and:
* - All of the devices affected by that bus or slot reset are unused
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 90c0b80f8acf..8746c943247a 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -395,6 +395,14 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
*(__le32 *)(&p->write[off]) = cpu_to_le32(write);
}
+/* Caller should hold memory_lock semaphore */
+bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
+{
+ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
+
+ return cmd & PCI_COMMAND_MEMORY;
+}
+
/*
* Restore the *real* BARs after we detect a FLR or backdoor reset.
* (backdoor = some device specific technique that we didn't catch)
@@ -556,13 +564,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
new_cmd = le32_to_cpu(val);
+ phys_io = !!(phys_cmd & PCI_COMMAND_IO);
+ virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
+ new_io = !!(new_cmd & PCI_COMMAND_IO);
+
phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
- phys_io = !!(phys_cmd & PCI_COMMAND_IO);
- virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
- new_io = !!(new_cmd & PCI_COMMAND_IO);
+ if (!new_mem)
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
+ else
+ down_write(&vdev->memory_lock);
/*
* If the user is writing mem/io enable (new_mem/io) and we
@@ -579,8 +592,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
}
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
- if (count < 0)
+ if (count < 0) {
+ if (offset == PCI_COMMAND)
+ up_write(&vdev->memory_lock);
return count;
+ }
/*
* Save current memory/io enable bits in vconfig to allow for
@@ -591,6 +607,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
*virt_cmd &= cpu_to_le16(~mask);
*virt_cmd |= cpu_to_le16(new_cmd & mask);
+
+ up_write(&vdev->memory_lock);
}
/* Emulate INTx disable */
@@ -828,8 +846,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
pos - offset + PCI_EXP_DEVCAP,
&cap);
- if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
+ if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+ }
}
/*
@@ -907,8 +928,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
pos - offset + PCI_AF_CAP,
&cap);
- if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
+ if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
+ vfio_pci_zap_and_down_write_memory_lock(vdev);
pci_try_reset_function(vdev->pdev);
+ up_write(&vdev->memory_lock);
+ }
}
return count;
@@ -1462,7 +1486,12 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
if (ret)
return ret;
- if (cap <= PCI_CAP_ID_MAX) {
+ /*
+ * ID 0 is a NULL capability, conflicting with our fake
+ * PCI_CAP_ID_BASIC. As it has no content, consider it
+ * hidden for now.
+ */
+ if (cap && cap <= PCI_CAP_ID_MAX) {
len = pci_cap_length[cap];
if (len == 0xFF) { /* Variable length */
len = vfio_cap_len(vdev, cap, pos);
@@ -1728,8 +1757,11 @@ void vfio_config_free(struct vfio_pci_device *vdev)
vdev->vconfig = NULL;
kfree(vdev->pci_config_map);
vdev->pci_config_map = NULL;
- kfree(vdev->msi_perm);
- vdev->msi_perm = NULL;
+ if (vdev->msi_perm) {
+ free_perm_bits(vdev->msi_perm);
+ kfree(vdev->msi_perm);
+ vdev->msi_perm = NULL;
+ }
}
/*
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 2056f3f85f59..1d9fb2592945 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -249,6 +249,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
struct pci_dev *pdev = vdev->pdev;
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
int ret;
+ u16 cmd;
if (!is_irq_none(vdev))
return -EINVAL;
@@ -258,13 +259,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
return -ENOMEM;
/* return the number of supported vectors if we can't get all: */
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
if (ret < nvec) {
if (ret > 0)
pci_free_irq_vectors(pdev);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
kfree(vdev->ctx);
return ret;
}
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
vdev->num_ctx = nvec;
vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
@@ -287,6 +291,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
struct pci_dev *pdev = vdev->pdev;
struct eventfd_ctx *trigger;
int irq, ret;
+ u16 cmd;
if (vector < 0 || vector >= vdev->num_ctx)
return -EINVAL;
@@ -295,7 +300,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
if (vdev->ctx[vector].trigger) {
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
+
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
free_irq(irq, vdev->ctx[vector].trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
kfree(vdev->ctx[vector].name);
eventfd_ctx_put(vdev->ctx[vector].trigger);
vdev->ctx[vector].trigger = NULL;
@@ -323,6 +332,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
* such a reset it would be unsuccessful. To avoid this, restore the
* cached value of the message prior to enabling.
*/
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
if (msix) {
struct msi_msg msg;
@@ -332,6 +342,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
ret = request_irq(irq, vfio_msihandler, 0,
vdev->ctx[vector].name, trigger);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
if (ret) {
kfree(vdev->ctx[vector].name);
eventfd_ctx_put(trigger);
@@ -376,6 +387,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
int i;
+ u16 cmd;
for (i = 0; i < vdev->num_ctx; i++) {
vfio_virqfd_disable(&vdev->ctx[i].unmask);
@@ -384,7 +396,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+ cmd = vfio_pci_memory_lock_and_enable(vdev);
pci_free_irq_vectors(pdev);
+ vfio_pci_memory_unlock_and_restore(vdev, cmd);
/*
* Both disable paths above use pci_intx_for_msi() to clear DisINTx
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index ed20d73cc27c..65c61710c0e9 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -67,7 +67,7 @@ static size_t vfio_pci_nvgpu_rw(struct vfio_pci_device *vdev,
*
* This is not fast path anyway.
*/
- sizealigned = _ALIGN_UP(posoff + count, PAGE_SIZE);
+ sizealigned = ALIGN(posoff + count, PAGE_SIZE);
ptr = ioremap_cache(data->gpu_hpa + posaligned, sizealigned);
if (!ptr)
return -EFAULT;
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 36ec69081ecd..86a02aff8735 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -92,6 +92,11 @@ struct vfio_pci_vf_token {
int users;
};
+struct vfio_pci_mmap_vma {
+ struct vm_area_struct *vma;
+ struct list_head vma_next;
+};
+
struct vfio_pci_device {
struct pci_dev *pdev;
void __iomem *barmap[PCI_STD_NUM_BARS];
@@ -132,6 +137,9 @@ struct vfio_pci_device {
struct list_head ioeventfds_list;
struct vfio_pci_vf_token *vf_token;
struct notifier_block nb;
+ struct mutex vma_lock;
+ struct list_head vma_list;
+ struct rw_semaphore memory_lock;
};
#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
@@ -174,6 +182,13 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev,
pci_power_t state);
+extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
+extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
+ *vdev);
+extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
+extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
+ u16 cmd);
+
#ifdef CONFIG_VFIO_PCI_IGD
extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
#else
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index a87992892a9f..916b184df3a5 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -162,6 +162,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
size_t x_start = 0, x_end = 0;
resource_size_t end;
void __iomem *io;
+ struct resource *res = &vdev->pdev->resource[bar];
ssize_t done;
if (pci_resource_start(pdev, bar))
@@ -177,6 +178,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
count = min(count, (size_t)(end - pos));
+ if (res->flags & IORESOURCE_MEM) {
+ down_read(&vdev->memory_lock);
+ if (!__vfio_pci_memory_enabled(vdev)) {
+ up_read(&vdev->memory_lock);
+ return -EIO;
+ }
+ }
+
if (bar == PCI_ROM_RESOURCE) {
/*
* The ROM can fill less space than the BAR, so we start the
@@ -184,13 +193,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
* filling large ROM BARs much faster.
*/
io = pci_map_rom(pdev, &x_start);
- if (!io)
- return -ENOMEM;
+ if (!io) {
+ done = -ENOMEM;
+ goto out;
+ }
x_end = end;
} else {
int ret = vfio_pci_setup_barmap(vdev, bar);
- if (ret)
- return ret;
+ if (ret) {
+ done = ret;
+ goto out;
+ }
io = vdev->barmap[bar];
}
@@ -207,6 +220,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
if (bar == PCI_ROM_RESOURCE)
pci_unmap_rom(pdev, io);
+out:
+ if (res->flags & IORESOURCE_MEM)
+ up_read(&vdev->memory_lock);
return done;
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 765e0e5d83ed..580099afeaff 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -85,6 +85,7 @@ struct vfio_group {
atomic_t opened;
wait_queue_head_t container_q;
bool noiommu;
+ unsigned int dev_counter;
struct kvm *kvm;
struct blocking_notifier_head notifier;
};
@@ -555,6 +556,7 @@ struct vfio_device *vfio_group_create_device(struct vfio_group *group,
mutex_lock(&group->device_lock);
list_add(&device->group_next, &group->device_list);
+ group->dev_counter++;
mutex_unlock(&group->device_lock);
return device;
@@ -567,6 +569,7 @@ static void vfio_device_release(struct kref *kref)
struct vfio_group *group = device->group;
list_del(&device->group_next);
+ group->dev_counter--;
mutex_unlock(&group->device_lock);
dev_set_drvdata(device->dev, NULL);
@@ -1945,6 +1948,9 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
if (!group)
return -ENODEV;
+ if (group->dev_counter > 1)
+ return -EINVAL;
+
ret = vfio_group_add_container_user(group);
if (ret)
goto err_pin_pages;
@@ -1952,7 +1958,8 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
container = group->container;
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
- ret = driver->ops->pin_pages(container->iommu_data, user_pfn,
+ ret = driver->ops->pin_pages(container->iommu_data,
+ group->iommu_group, user_pfn,
npage, prot, phys_pfn);
else
ret = -ENOTTY;
@@ -2050,8 +2057,8 @@ int vfio_group_pin_pages(struct vfio_group *group,
driver = container->iommu_driver;
if (likely(driver && driver->ops->pin_pages))
ret = driver->ops->pin_pages(container->iommu_data,
- user_iova_pfn, npage,
- prot, phys_pfn);
+ group->iommu_group, user_iova_pfn,
+ npage, prot, phys_pfn);
else
ret = -ENOTTY;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index cc1d64765ce7..5e556ac9102a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -27,7 +27,7 @@
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
+#include <linux/kthread.h>
#include <linux/rbtree.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
@@ -69,8 +69,11 @@ struct vfio_iommu {
struct rb_root dma_list;
struct blocking_notifier_head notifier;
unsigned int dma_avail;
+ uint64_t pgsize_bitmap;
bool v2;
bool nesting;
+ bool dirty_page_tracking;
+ bool pinned_page_dirty_scope;
};
struct vfio_domain {
@@ -91,12 +94,14 @@ struct vfio_dma {
bool lock_cap; /* capable(CAP_IPC_LOCK) */
struct task_struct *task;
struct rb_root pfn_list; /* Ex-user pinned pfn list */
+ unsigned long *bitmap;
};
struct vfio_group {
struct iommu_group *iommu_group;
struct list_head next;
bool mdev_group; /* An mdev group */
+ bool pinned_page_dirty_scope;
};
struct vfio_iova {
@@ -112,7 +117,7 @@ struct vfio_pfn {
struct rb_node node;
dma_addr_t iova; /* Device address */
unsigned long pfn; /* Host pfn */
- atomic_t ref_count;
+ unsigned int ref_count;
};
struct vfio_regions {
@@ -125,8 +130,25 @@ struct vfio_regions {
#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
(!list_empty(&iommu->domain_list))
+#define DIRTY_BITMAP_BYTES(n) (ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE)
+
+/*
+ * Input argument of number of bits to bitmap_set() is unsigned integer, which
+ * further casts to signed integer for unaligned multi-bit operation,
+ * __bitmap_set().
+ * Then maximum bitmap size supported is 2^31 bits divided by 2^3 bits/byte,
+ * that is 2^28 (256 MB) which maps to 2^31 * 2^12 = 2^43 (8TB) on 4K page
+ * system.
+ */
+#define DIRTY_BITMAP_PAGES_MAX ((u64)INT_MAX)
+#define DIRTY_BITMAP_SIZE_MAX DIRTY_BITMAP_BYTES(DIRTY_BITMAP_PAGES_MAX)
+
static int put_pfn(unsigned long pfn, int prot);
+static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
+ struct iommu_group *iommu_group);
+
+static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
/*
* This code handles mapping and unmapping of user data buffers
* into DMA'ble space using the IOMMU
@@ -175,6 +197,81 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
rb_erase(&old->node, &iommu->dma_list);
}
+
+static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize)
+{
+ uint64_t npages = dma->size / pgsize;
+
+ if (npages > DIRTY_BITMAP_PAGES_MAX)
+ return -EINVAL;
+
+ /*
+ * Allocate extra 64 bits that are used to calculate shift required for
+ * bitmap_shift_left() to manipulate and club unaligned number of pages
+ * in adjacent vfio_dma ranges.
+ */
+ dma->bitmap = kvzalloc(DIRTY_BITMAP_BYTES(npages) + sizeof(u64),
+ GFP_KERNEL);
+ if (!dma->bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void vfio_dma_bitmap_free(struct vfio_dma *dma)
+{
+ kfree(dma->bitmap);
+ dma->bitmap = NULL;
+}
+
+static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize)
+{
+ struct rb_node *p;
+ unsigned long pgshift = __ffs(pgsize);
+
+ for (p = rb_first(&dma->pfn_list); p; p = rb_next(p)) {
+ struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, node);
+
+ bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1);
+ }
+}
+
+static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
+{
+ struct rb_node *n;
+
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+ int ret;
+
+ ret = vfio_dma_bitmap_alloc(dma, pgsize);
+ if (ret) {
+ struct rb_node *p;
+
+ for (p = rb_prev(n); p; p = rb_prev(p)) {
+ struct vfio_dma *dma = rb_entry(n,
+ struct vfio_dma, node);
+
+ vfio_dma_bitmap_free(dma);
+ }
+ return ret;
+ }
+ vfio_dma_populate_bitmap(dma, pgsize);
+ }
+ return 0;
+}
+
+static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu)
+{
+ struct rb_node *n;
+
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
+ vfio_dma_bitmap_free(dma);
+ }
+}
+
/*
* Helper Functions for host iova-pfn list
*/
@@ -233,7 +330,7 @@ static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
vpfn->iova = iova;
vpfn->pfn = pfn;
- atomic_set(&vpfn->ref_count, 1);
+ vpfn->ref_count = 1;
vfio_link_pfn(dma, vpfn);
return 0;
}
@@ -251,7 +348,7 @@ static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma,
struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
if (vpfn)
- atomic_inc(&vpfn->ref_count);
+ vpfn->ref_count++;
return vpfn;
}
@@ -259,7 +356,8 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
{
int ret = 0;
- if (atomic_dec_and_test(&vpfn->ref_count)) {
+ vpfn->ref_count--;
+ if (!vpfn->ref_count) {
ret = put_pfn(vpfn->pfn, dma->prot);
vfio_remove_from_pfn_list(dma, vpfn);
}
@@ -278,11 +376,11 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
if (!mm)
return -ESRCH; /* process exited */
- ret = down_write_killable(&mm->mmap_sem);
+ ret = mmap_write_lock_killable(mm);
if (!ret) {
ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
dma->lock_cap);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
}
if (async)
@@ -317,6 +415,32 @@ static int put_pfn(unsigned long pfn, int prot)
return 0;
}
+static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
+ unsigned long vaddr, unsigned long *pfn,
+ bool write_fault)
+{
+ int ret;
+
+ ret = follow_pfn(vma, vaddr, pfn);
+ if (ret) {
+ bool unlocked = false;
+
+ ret = fixup_user_fault(NULL, mm, vaddr,
+ FAULT_FLAG_REMOTE |
+ (write_fault ? FAULT_FLAG_WRITE : 0),
+ &unlocked);
+ if (unlocked)
+ return -EAGAIN;
+
+ if (ret)
+ return ret;
+
+ ret = follow_pfn(vma, vaddr, pfn);
+ }
+
+ return ret;
+}
+
static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
int prot, unsigned long *pfn)
{
@@ -328,7 +452,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
if (prot & IOMMU_WRITE)
flags |= FOLL_WRITE;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM,
page, NULL, NULL);
if (ret == 1) {
@@ -339,15 +463,19 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
vaddr = untagged_addr(vaddr);
+retry:
vma = find_vma_intersection(mm, vaddr, vaddr + 1);
if (vma && vma->vm_flags & VM_PFNMAP) {
- if (!follow_pfn(vma, vaddr, pfn) &&
- is_invalid_reserved_pfn(*pfn))
- ret = 0;
+ ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
+ if (ret == -EAGAIN)
+ goto retry;
+
+ if (!ret && !is_invalid_reserved_pfn(*pfn))
+ ret = -EFAULT;
}
done:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return ret;
}
@@ -501,11 +629,13 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
}
static int vfio_iommu_type1_pin_pages(void *iommu_data,
+ struct iommu_group *iommu_group,
unsigned long *user_pfn,
int npage, int prot,
unsigned long *phys_pfn)
{
struct vfio_iommu *iommu = iommu_data;
+ struct vfio_group *group;
int i, j, ret;
unsigned long remote_vaddr;
struct vfio_dma *dma;
@@ -566,9 +696,26 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
vfio_unpin_page_external(dma, iova, do_accounting);
goto pin_unwind;
}
- }
+ if (iommu->dirty_page_tracking) {
+ unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
+
+ /*
+ * Bitmap populated with the smallest supported page
+ * size
+ */
+ bitmap_set(dma->bitmap,
+ (iova - dma->iova) >> pgshift, 1);
+ }
+ }
ret = i;
+
+ group = vfio_iommu_find_iommu_group(iommu, iommu_group);
+ if (!group->pinned_page_dirty_scope) {
+ group->pinned_page_dirty_scope = true;
+ update_pinned_page_dirty_scope(iommu);
+ }
+
goto pin_done;
pin_unwind:
@@ -800,19 +947,19 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
vfio_unmap_unpin(iommu, dma, true);
vfio_unlink_dma(iommu, dma);
put_task_struct(dma->task);
+ vfio_dma_bitmap_free(dma);
kfree(dma);
iommu->dma_avail++;
}
-static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
+static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
- unsigned long bitmap = ULONG_MAX;
- mutex_lock(&iommu->lock);
+ iommu->pgsize_bitmap = ULONG_MAX;
+
list_for_each_entry(domain, &iommu->domain_list, next)
- bitmap &= domain->domain->pgsize_bitmap;
- mutex_unlock(&iommu->lock);
+ iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap;
/*
* In case the IOMMU supports page sizes smaller than PAGE_SIZE
@@ -822,36 +969,143 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
* granularity while iommu driver can use the sub-PAGE_SIZE size
* to map the buffer.
*/
- if (bitmap & ~PAGE_MASK) {
- bitmap &= PAGE_MASK;
- bitmap |= PAGE_SIZE;
+ if (iommu->pgsize_bitmap & ~PAGE_MASK) {
+ iommu->pgsize_bitmap &= PAGE_MASK;
+ iommu->pgsize_bitmap |= PAGE_SIZE;
}
+}
+
+static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
+ struct vfio_dma *dma, dma_addr_t base_iova,
+ size_t pgsize)
+{
+ unsigned long pgshift = __ffs(pgsize);
+ unsigned long nbits = dma->size >> pgshift;
+ unsigned long bit_offset = (dma->iova - base_iova) >> pgshift;
+ unsigned long copy_offset = bit_offset / BITS_PER_LONG;
+ unsigned long shift = bit_offset % BITS_PER_LONG;
+ unsigned long leftover;
+
+ /*
+ * mark all pages dirty if any IOMMU capable device is not able
+ * to report dirty pages and all pages are pinned and mapped.
+ */
+ if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
+ bitmap_set(dma->bitmap, 0, nbits);
- return bitmap;
+ if (shift) {
+ bitmap_shift_left(dma->bitmap, dma->bitmap, shift,
+ nbits + shift);
+
+ if (copy_from_user(&leftover,
+ (void __user *)(bitmap + copy_offset),
+ sizeof(leftover)))
+ return -EFAULT;
+
+ bitmap_or(dma->bitmap, dma->bitmap, &leftover, shift);
+ }
+
+ if (copy_to_user((void __user *)(bitmap + copy_offset), dma->bitmap,
+ DIRTY_BITMAP_BYTES(nbits + shift)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
+ dma_addr_t iova, size_t size, size_t pgsize)
+{
+ struct vfio_dma *dma;
+ struct rb_node *n;
+ unsigned long pgshift = __ffs(pgsize);
+ int ret;
+
+ /*
+ * GET_BITMAP request must fully cover vfio_dma mappings. Multiple
+ * vfio_dma mappings may be clubbed by specifying large ranges, but
+ * there must not be any previous mappings bisected by the range.
+ * An error will be returned if these conditions are not met.
+ */
+ dma = vfio_find_dma(iommu, iova, 1);
+ if (dma && dma->iova != iova)
+ return -EINVAL;
+
+ dma = vfio_find_dma(iommu, iova + size - 1, 0);
+ if (dma && dma->iova + dma->size != iova + size)
+ return -EINVAL;
+
+ for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
+ struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
+
+ if (dma->iova < iova)
+ continue;
+
+ if (dma->iova > iova + size - 1)
+ break;
+
+ ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize);
+ if (ret)
+ return ret;
+
+ /*
+ * Re-populate bitmap to include all pinned pages which are
+ * considered as dirty but exclude pages which are unpinned and
+ * pages which are marked dirty by vfio_dma_rw()
+ */
+ bitmap_clear(dma->bitmap, 0, dma->size >> pgshift);
+ vfio_dma_populate_bitmap(dma, pgsize);
+ }
+ return 0;
+}
+
+static int verify_bitmap_size(uint64_t npages, uint64_t bitmap_size)
+{
+ if (!npages || !bitmap_size || (bitmap_size > DIRTY_BITMAP_SIZE_MAX) ||
+ (bitmap_size < DIRTY_BITMAP_BYTES(npages)))
+ return -EINVAL;
+
+ return 0;
}
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
- struct vfio_iommu_type1_dma_unmap *unmap)
+ struct vfio_iommu_type1_dma_unmap *unmap,
+ struct vfio_bitmap *bitmap)
{
- uint64_t mask;
struct vfio_dma *dma, *dma_last = NULL;
- size_t unmapped = 0;
+ size_t unmapped = 0, pgsize;
int ret = 0, retries = 0;
+ unsigned long pgshift;
+
+ mutex_lock(&iommu->lock);
- mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
+ pgshift = __ffs(iommu->pgsize_bitmap);
+ pgsize = (size_t)1 << pgshift;
+
+ if (unmap->iova & (pgsize - 1)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!unmap->size || unmap->size & (pgsize - 1)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
- if (unmap->iova & mask)
- return -EINVAL;
- if (!unmap->size || unmap->size & mask)
- return -EINVAL;
if (unmap->iova + unmap->size - 1 < unmap->iova ||
- unmap->size > SIZE_MAX)
- return -EINVAL;
+ unmap->size > SIZE_MAX) {
+ ret = -EINVAL;
+ goto unlock;
+ }
- WARN_ON(mask & PAGE_MASK);
-again:
- mutex_lock(&iommu->lock);
+ /* When dirty tracking is enabled, allow only min supported pgsize */
+ if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
+ (!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ WARN_ON((pgsize - 1) & PAGE_MASK);
+again:
/*
* vfio-iommu-type1 (v1) - User mappings were coalesced together to
* avoid tracking individual mappings. This means that the granularity
@@ -929,8 +1183,17 @@ again:
blocking_notifier_call_chain(&iommu->notifier,
VFIO_IOMMU_NOTIFY_DMA_UNMAP,
&nb_unmap);
+ mutex_lock(&iommu->lock);
goto again;
}
+
+ if (unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
+ ret = update_user_bitmap(bitmap->data, iommu, dma,
+ unmap->iova, pgsize);
+ if (ret)
+ break;
+ }
+
unmapped += dma->size;
vfio_remove_dma(iommu, dma);
}
@@ -1037,31 +1300,35 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
unsigned long vaddr = map->vaddr;
size_t size = map->size;
int ret = 0, prot = 0;
- uint64_t mask;
+ size_t pgsize;
struct vfio_dma *dma;
/* Verify that none of our __u64 fields overflow */
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
return -EINVAL;
- mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
-
- WARN_ON(mask & PAGE_MASK);
-
/* READ/WRITE from device perspective */
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
prot |= IOMMU_WRITE;
if (map->flags & VFIO_DMA_MAP_FLAG_READ)
prot |= IOMMU_READ;
- if (!prot || !size || (size | iova | vaddr) & mask)
- return -EINVAL;
+ mutex_lock(&iommu->lock);
- /* Don't allow IOVA or virtual address wrap */
- if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
- return -EINVAL;
+ pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
- mutex_lock(&iommu->lock);
+ WARN_ON((pgsize - 1) & PAGE_MASK);
+
+ if (!prot || !size || (size | iova | vaddr) & (pgsize - 1)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Don't allow IOVA or virtual address wrap */
+ if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
if (vfio_find_dma(iommu, iova, size)) {
ret = -EEXIST;
@@ -1129,6 +1396,12 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
else
ret = vfio_pin_map_dma(iommu, dma, size);
+ if (!ret && iommu->dirty_page_tracking) {
+ ret = vfio_dma_bitmap_alloc(dma, pgsize);
+ if (ret)
+ vfio_remove_dma(iommu, dma);
+ }
+
out_unlock:
mutex_unlock(&iommu->lock);
return ret;
@@ -1267,6 +1540,51 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
return NULL;
}
+static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
+ struct iommu_group *iommu_group)
+{
+ struct vfio_domain *domain;
+ struct vfio_group *group = NULL;
+
+ list_for_each_entry(domain, &iommu->domain_list, next) {
+ group = find_iommu_group(domain, iommu_group);
+ if (group)
+ return group;
+ }
+
+ if (iommu->external_domain)
+ group = find_iommu_group(iommu->external_domain, iommu_group);
+
+ return group;
+}
+
+static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
+{
+ struct vfio_domain *domain;
+ struct vfio_group *group;
+
+ list_for_each_entry(domain, &iommu->domain_list, next) {
+ list_for_each_entry(group, &domain->group_list, next) {
+ if (!group->pinned_page_dirty_scope) {
+ iommu->pinned_page_dirty_scope = false;
+ return;
+ }
+ }
+ }
+
+ if (iommu->external_domain) {
+ domain = iommu->external_domain;
+ list_for_each_entry(group, &domain->group_list, next) {
+ if (!group->pinned_page_dirty_scope) {
+ iommu->pinned_page_dirty_scope = false;
+ return;
+ }
+ }
+ }
+
+ iommu->pinned_page_dirty_scope = true;
+}
+
static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
phys_addr_t *base)
{
@@ -1667,12 +1985,23 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (!iommu->external_domain) {
INIT_LIST_HEAD(&domain->group_list);
iommu->external_domain = domain;
+ vfio_update_pgsize_bitmap(iommu);
} else {
kfree(domain);
}
list_add(&group->next,
&iommu->external_domain->group_list);
+ /*
+ * Non-iommu backed group cannot dirty memory directly,
+ * it can only use interfaces that provide dirty
+ * tracking.
+ * The iommu scope can only be promoted with the
+ * addition of a dirty tracking group.
+ */
+ group->pinned_page_dirty_scope = true;
+ if (!iommu->pinned_page_dirty_scope)
+ update_pinned_page_dirty_scope(iommu);
mutex_unlock(&iommu->lock);
return 0;
@@ -1792,9 +2121,17 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
}
list_add(&domain->next, &iommu->domain_list);
+ vfio_update_pgsize_bitmap(iommu);
done:
/* Delete the old one and insert new iova list */
vfio_iommu_iova_insert_copy(iommu, &iova_copy);
+
+ /*
+ * An iommu backed group can dirty memory directly and therefore
+ * demotes the iommu scope until it declares itself dirty tracking
+ * capable via the page pinning interface.
+ */
+ iommu->pinned_page_dirty_scope = false;
mutex_unlock(&iommu->lock);
vfio_iommu_resv_free(&group_resv_regions);
@@ -1947,6 +2284,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain;
struct vfio_group *group;
+ bool update_dirty_scope = false;
LIST_HEAD(iova_copy);
mutex_lock(&iommu->lock);
@@ -1954,6 +2292,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (iommu->external_domain) {
group = find_iommu_group(iommu->external_domain, iommu_group);
if (group) {
+ update_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
@@ -1983,6 +2322,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
continue;
vfio_iommu_detach_group(domain, group);
+ update_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
/*
@@ -2003,6 +2343,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
list_del(&domain->next);
kfree(domain);
vfio_iommu_aper_expand(iommu, &iova_copy);
+ vfio_update_pgsize_bitmap(iommu);
}
break;
}
@@ -2013,6 +2354,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
vfio_iommu_iova_free(&iova_copy);
detach_group_done:
+ /*
+ * Removal of a group without dirty tracking may allow the iommu scope
+ * to be promoted.
+ */
+ if (update_dirty_scope)
+ update_pinned_page_dirty_scope(iommu);
mutex_unlock(&iommu->lock);
}
@@ -2135,8 +2482,6 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
size_t size;
int iovas = 0, i = 0, ret;
- mutex_lock(&iommu->lock);
-
list_for_each_entry(iova, &iommu->iova_list, list)
iovas++;
@@ -2145,17 +2490,14 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
* Return 0 as a container with a single mdev device
* will have an empty list
*/
- ret = 0;
- goto out_unlock;
+ return 0;
}
size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
cap_iovas = kzalloc(size, GFP_KERNEL);
- if (!cap_iovas) {
- ret = -ENOMEM;
- goto out_unlock;
- }
+ if (!cap_iovas)
+ return -ENOMEM;
cap_iovas->nr_iovas = iovas;
@@ -2168,11 +2510,25 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size);
kfree(cap_iovas);
-out_unlock:
- mutex_unlock(&iommu->lock);
return ret;
}
+static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_iommu_type1_info_cap_migration cap_mig;
+
+ cap_mig.header.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION;
+ cap_mig.header.version = 1;
+
+ cap_mig.flags = 0;
+ /* support minimum pgsize */
+ cap_mig.pgsize_bitmap = (size_t)1 << __ffs(iommu->pgsize_bitmap);
+ cap_mig.max_dirty_bitmap_size = DIRTY_BITMAP_SIZE_MAX;
+
+ return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
+}
+
static long vfio_iommu_type1_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
@@ -2214,11 +2570,18 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
info.cap_offset = 0; /* output, no-recopy necessary */
}
+ mutex_lock(&iommu->lock);
info.flags = VFIO_IOMMU_INFO_PGSIZES;
- info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
+ info.iova_pgsizes = iommu->pgsize_bitmap;
+
+ ret = vfio_iommu_migration_build_caps(iommu, &caps);
+
+ if (!ret)
+ ret = vfio_iommu_iova_build_caps(iommu, &caps);
+
+ mutex_unlock(&iommu->lock);
- ret = vfio_iommu_iova_build_caps(iommu, &caps);
if (ret)
return ret;
@@ -2261,22 +2624,143 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
} else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
struct vfio_iommu_type1_dma_unmap unmap;
- long ret;
+ struct vfio_bitmap bitmap = { 0 };
+ int ret;
minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
if (copy_from_user(&unmap, (void __user *)arg, minsz))
return -EFAULT;
- if (unmap.argsz < minsz || unmap.flags)
+ if (unmap.argsz < minsz ||
+ unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
return -EINVAL;
- ret = vfio_dma_do_unmap(iommu, &unmap);
+ if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
+ unsigned long pgshift;
+
+ if (unmap.argsz < (minsz + sizeof(bitmap)))
+ return -EINVAL;
+
+ if (copy_from_user(&bitmap,
+ (void __user *)(arg + minsz),
+ sizeof(bitmap)))
+ return -EFAULT;
+
+ if (!access_ok((void __user *)bitmap.data, bitmap.size))
+ return -EINVAL;
+
+ pgshift = __ffs(bitmap.pgsize);
+ ret = verify_bitmap_size(unmap.size >> pgshift,
+ bitmap.size);
+ if (ret)
+ return ret;
+ }
+
+ ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
if (ret)
return ret;
return copy_to_user((void __user *)arg, &unmap, minsz) ?
-EFAULT : 0;
+ } else if (cmd == VFIO_IOMMU_DIRTY_PAGES) {
+ struct vfio_iommu_type1_dirty_bitmap dirty;
+ uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
+ VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
+ VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
+ int ret = 0;
+
+ if (!iommu->v2)
+ return -EACCES;
+
+ minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap,
+ flags);
+
+ if (copy_from_user(&dirty, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (dirty.argsz < minsz || dirty.flags & ~mask)
+ return -EINVAL;
+
+ /* only one flag should be set at a time */
+ if (__ffs(dirty.flags) != __fls(dirty.flags))
+ return -EINVAL;
+
+ if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
+ size_t pgsize;
+
+ mutex_lock(&iommu->lock);
+ pgsize = 1 << __ffs(iommu->pgsize_bitmap);
+ if (!iommu->dirty_page_tracking) {
+ ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
+ if (!ret)
+ iommu->dirty_page_tracking = true;
+ }
+ mutex_unlock(&iommu->lock);
+ return ret;
+ } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
+ mutex_lock(&iommu->lock);
+ if (iommu->dirty_page_tracking) {
+ iommu->dirty_page_tracking = false;
+ vfio_dma_bitmap_free_all(iommu);
+ }
+ mutex_unlock(&iommu->lock);
+ return 0;
+ } else if (dirty.flags &
+ VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
+ struct vfio_iommu_type1_dirty_bitmap_get range;
+ unsigned long pgshift;
+ size_t data_size = dirty.argsz - minsz;
+ size_t iommu_pgsize;
+
+ if (!data_size || data_size < sizeof(range))
+ return -EINVAL;
+
+ if (copy_from_user(&range, (void __user *)(arg + minsz),
+ sizeof(range)))
+ return -EFAULT;
+
+ if (range.iova + range.size < range.iova)
+ return -EINVAL;
+ if (!access_ok((void __user *)range.bitmap.data,
+ range.bitmap.size))
+ return -EINVAL;
+
+ pgshift = __ffs(range.bitmap.pgsize);
+ ret = verify_bitmap_size(range.size >> pgshift,
+ range.bitmap.size);
+ if (ret)
+ return ret;
+
+ mutex_lock(&iommu->lock);
+
+ iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
+
+ /* allow only smallest supported pgsize */
+ if (range.bitmap.pgsize != iommu_pgsize) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (range.iova & (iommu_pgsize - 1)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (!range.size || range.size & (iommu_pgsize - 1)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (iommu->dirty_page_tracking)
+ ret = vfio_iova_dirty_bitmap(range.bitmap.data,
+ iommu, range.iova, range.size,
+ range.bitmap.pgsize);
+ else
+ ret = -EINVAL;
+out_unlock:
+ mutex_unlock(&iommu->lock);
+
+ return ret;
+ }
}
return -ENOTTY;
@@ -2333,7 +2817,7 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
return -EPERM;
if (kthread)
- use_mm(mm);
+ kthread_use_mm(mm);
else if (current->mm != mm)
goto out;
@@ -2344,14 +2828,23 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
vaddr = dma->vaddr + offset;
- if (write)
+ if (write) {
*copied = copy_to_user((void __user *)vaddr, data,
count) ? 0 : count;
- else
+ if (*copied && iommu->dirty_page_tracking) {
+ unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
+ /*
+ * Bitmap populated with the smallest supported page
+ * size
+ */
+ bitmap_set(dma->bitmap, offset >> pgshift,
+ *copied >> pgshift);
+ }
+ } else
*copied = copy_from_user(data, (void __user *)vaddr,
count) ? 0 : count;
if (kthread)
- unuse_mm(mm);
+ kthread_unuse_mm(mm);
out:
mmput(mm);
return *copied ? 0 : -EFAULT;
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index c4f273793595..2c75d164b827 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -13,15 +13,6 @@ config VHOST_RING
This option is selected by any driver which needs to access
the host side of a virtio ring.
-config VHOST_DPN
- bool
- depends on !ARM || AEABI
- default y
- help
- Anything selecting VHOST or VHOST_RING must depend on VHOST_DPN.
- This excludes the deprecated ARM ABI since that forces a 4 byte
- alignment on all structs - incompatible with virtio spec requirements.
-
config VHOST
tristate
select VHOST_IOTLB
@@ -37,7 +28,7 @@ if VHOST_MENU
config VHOST_NET
tristate "Host kernel accelerator for virtio net"
- depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP) && VHOST_DPN
+ depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP)
select VHOST
---help---
This kernel module can be loaded in host kernel to accelerate
@@ -49,7 +40,7 @@ config VHOST_NET
config VHOST_SCSI
tristate "VHOST_SCSI TCM fabric driver"
- depends on TARGET_CORE && EVENTFD && VHOST_DPN
+ depends on TARGET_CORE && EVENTFD
select VHOST
default n
---help---
@@ -58,7 +49,7 @@ config VHOST_SCSI
config VHOST_VSOCK
tristate "vhost virtio-vsock driver"
- depends on VSOCKETS && EVENTFD && VHOST_DPN
+ depends on VSOCKETS && EVENTFD
select VHOST
select VIRTIO_VSOCKETS_COMMON
default n
@@ -72,7 +63,7 @@ config VHOST_VSOCK
config VHOST_VDPA
tristate "Vhost driver for vDPA-based backend"
- depends on EVENTFD && VHOST_DPN
+ depends on EVENTFD
select VHOST
depends on VDPA
help
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 516519dcc8ff..e992decfec53 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1327,7 +1327,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
UIO_MAXIOV + VHOST_NET_BATCH,
- VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT,
+ VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
NULL);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index c39952243fd3..6fb4d7ecfa19 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1628,7 +1628,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
- VHOST_SCSI_WEIGHT, 0, NULL);
+ VHOST_SCSI_WEIGHT, 0, true, NULL);
vhost_scsi_init_inflight(vs, NULL);
@@ -2280,6 +2280,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
static const struct target_core_fabric_ops vhost_scsi_ops = {
.module = THIS_MODULE,
.fabric_name = "vhost",
+ .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
.tpg_get_wwn = vhost_scsi_get_fabric_wwn,
.tpg_get_tag = vhost_scsi_get_tpgt,
.tpg_check_demo_mode = vhost_scsi_check_true,
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 9a3a09005e03..0466921f4772 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -120,7 +120,7 @@ static int vhost_test_open(struct inode *inode, struct file *f)
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
- VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, NULL);
+ VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL);
f->private_data = n;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 0968361e3b77..7580e34f76c1 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -15,12 +15,14 @@
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/mm.h>
#include <linux/iommu.h>
#include <linux/uuid.h>
#include <linux/vdpa.h>
#include <linux/nospec.h>
#include <linux/vhost.h>
#include <linux/virtio_net.h>
+#include <linux/kernel.h>
#include "vhost.h"
@@ -70,6 +72,7 @@ struct vhost_vdpa {
int nvqs;
int virtio_id;
int minor;
+ struct eventfd_ctx *config_ctx;
};
static DEFINE_IDA(vhost_vdpa_ida);
@@ -101,6 +104,17 @@ static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
return IRQ_HANDLED;
}
+static irqreturn_t vhost_vdpa_config_cb(void *private)
+{
+ struct vhost_vdpa *v = private;
+ struct eventfd_ctx *config_ctx = v->config_ctx;
+
+ if (config_ctx)
+ eventfd_signal(config_ctx, 1);
+
+ return IRQ_HANDLED;
+}
+
static void vhost_vdpa_reset(struct vhost_vdpa *v)
{
struct vdpa_device *vdpa = v->vdpa;
@@ -288,6 +302,36 @@ static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
return 0;
}
+static void vhost_vdpa_config_put(struct vhost_vdpa *v)
+{
+ if (v->config_ctx)
+ eventfd_ctx_put(v->config_ctx);
+}
+
+static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
+{
+ struct vdpa_callback cb;
+ int fd;
+ struct eventfd_ctx *ctx;
+
+ cb.callback = vhost_vdpa_config_cb;
+ cb.private = v->vdpa;
+ if (copy_from_user(&fd, argp, sizeof(fd)))
+ return -EFAULT;
+
+ ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
+ swap(ctx, v->config_ctx);
+
+ if (!IS_ERR_OR_NULL(ctx))
+ eventfd_ctx_put(ctx);
+
+ if (IS_ERR(v->config_ctx))
+ return PTR_ERR(v->config_ctx);
+
+ v->vdpa->config->set_config_cb(v->vdpa, &cb);
+
+ return 0;
+}
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
@@ -395,6 +439,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
case VHOST_SET_LOG_FD:
r = -ENOIOCTLCMD;
break;
+ case VHOST_VDPA_SET_CONFIG_CALL:
+ r = vhost_vdpa_set_config_call(v, argp);
+ break;
default:
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
if (r == -ENOIOCTLCMD)
@@ -527,7 +574,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
if (!npages)
return -EINVAL;
- down_read(&dev->mm->mmap_sem);
+ mmap_read_lock(dev->mm);
locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -580,7 +627,7 @@ out:
vhost_vdpa_unmap(v, msg->iova, msg->size);
atomic64_sub(npages, &dev->mm->pinned_vm);
}
- up_read(&dev->mm->mmap_sem);
+ mmap_read_unlock(dev->mm);
free_page((unsigned long)page_list);
return ret;
}
@@ -694,7 +741,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
vqs[i] = &v->vqs[i];
vqs[i]->handle_kick = handle_vq_kick;
}
- vhost_dev_init(dev, vqs, nvqs, 0, 0, 0,
+ vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
vhost_vdpa_process_iotlb_msg);
dev->iotlb = vhost_iotlb_alloc(0, 0);
@@ -729,6 +776,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
vhost_dev_stop(&v->vdev);
vhost_vdpa_iotlb_free(v);
vhost_vdpa_free_domain(v);
+ vhost_vdpa_config_put(v);
vhost_dev_cleanup(&v->vdev);
kfree(v->vdev.vqs);
mutex_unlock(&d->mutex);
@@ -739,12 +787,74 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
return 0;
}
+#ifdef CONFIG_MMU
+static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
+{
+ struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_notification_area notify;
+ struct vm_area_struct *vma = vmf->vma;
+ u16 index = vma->vm_pgoff;
+
+ notify = ops->get_vq_notification(vdpa, index);
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
+ notify.addr >> PAGE_SHIFT, PAGE_SIZE,
+ vma->vm_page_prot))
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct vhost_vdpa_vm_ops = {
+ .fault = vhost_vdpa_fault,
+};
+
+static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct vhost_vdpa *v = vma->vm_file->private_data;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_notification_area notify;
+ int index = vma->vm_pgoff;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+ if (vma->vm_flags & VM_READ)
+ return -EINVAL;
+ if (index > 65535)
+ return -EINVAL;
+ if (!ops->get_vq_notification)
+ return -ENOTSUPP;
+
+ /* To be safe and easily modelled by userspace, We only
+ * support the doorbell which sits on the page boundary and
+ * does not share the page with other registers.
+ */
+ notify = ops->get_vq_notification(vdpa, index);
+ if (notify.addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+ if (vma->vm_end - vma->vm_start != notify.size)
+ return -ENOTSUPP;
+
+ vma->vm_ops = &vhost_vdpa_vm_ops;
+ return 0;
+}
+#endif /* CONFIG_MMU */
+
static const struct file_operations vhost_vdpa_fops = {
.owner = THIS_MODULE,
.open = vhost_vdpa_open,
.release = vhost_vdpa_release,
.write_iter = vhost_vdpa_chr_write_iter,
.unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
+#ifdef CONFIG_MMU
+ .mmap = vhost_vdpa_mmap,
+#endif /* CONFIG_MMU */
.compat_ioctl = compat_ptr_ioctl,
};
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 21a59b598ed8..d7b8df3edffc 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -14,7 +14,6 @@
#include <linux/vhost.h>
#include <linux/uio.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/poll.h>
@@ -166,11 +165,16 @@ static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
void *key)
{
struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
+ struct vhost_work *work = &poll->work;
if (!(key_to_poll(key) & poll->mask))
return 0;
- vhost_poll_queue(poll);
+ if (!poll->dev->use_worker)
+ work->fn(work);
+ else
+ vhost_poll_queue(poll);
+
return 0;
}
@@ -330,10 +334,8 @@ static int vhost_worker(void *data)
struct vhost_dev *dev = data;
struct vhost_work *work, *work_next;
struct llist_node *node;
- mm_segment_t oldfs = get_fs();
- set_fs(USER_DS);
- use_mm(dev->mm);
+ kthread_use_mm(dev->mm);
for (;;) {
/* mb paired w/ kthread_stop */
@@ -361,8 +363,7 @@ static int vhost_worker(void *data)
schedule();
}
}
- unuse_mm(dev->mm);
- set_fs(oldfs);
+ kthread_unuse_mm(dev->mm);
return 0;
}
@@ -454,6 +455,7 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs,
int iov_limit, int weight, int byte_weight,
+ bool use_worker,
int (*msg_handler)(struct vhost_dev *dev,
struct vhost_iotlb_msg *msg))
{
@@ -471,6 +473,7 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->iov_limit = iov_limit;
dev->weight = weight;
dev->byte_weight = byte_weight;
+ dev->use_worker = use_worker;
dev->msg_handler = msg_handler;
init_llist_head(&dev->work_list);
init_waitqueue_head(&dev->wait);
@@ -534,6 +537,36 @@ bool vhost_dev_has_owner(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
+static void vhost_attach_mm(struct vhost_dev *dev)
+{
+ /* No owner, become one */
+ if (dev->use_worker) {
+ dev->mm = get_task_mm(current);
+ } else {
+ /* vDPA device does not use worker thead, so there's
+ * no need to hold the address space for mm. This help
+ * to avoid deadlock in the case of mmap() which may
+ * held the refcnt of the file and depends on release
+ * method to remove vma.
+ */
+ dev->mm = current->mm;
+ mmgrab(dev->mm);
+ }
+}
+
+static void vhost_detach_mm(struct vhost_dev *dev)
+{
+ if (!dev->mm)
+ return;
+
+ if (dev->use_worker)
+ mmput(dev->mm);
+ else
+ mmdrop(dev->mm);
+
+ dev->mm = NULL;
+}
+
/* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev)
{
@@ -546,21 +579,24 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
goto err_mm;
}
- /* No owner, become one */
- dev->mm = get_task_mm(current);
+ vhost_attach_mm(dev);
+
dev->kcov_handle = kcov_common_handle();
- worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
- if (IS_ERR(worker)) {
- err = PTR_ERR(worker);
- goto err_worker;
- }
+ if (dev->use_worker) {
+ worker = kthread_create(vhost_worker, dev,
+ "vhost-%d", current->pid);
+ if (IS_ERR(worker)) {
+ err = PTR_ERR(worker);
+ goto err_worker;
+ }
- dev->worker = worker;
- wake_up_process(worker); /* avoid contributing to loadavg */
+ dev->worker = worker;
+ wake_up_process(worker); /* avoid contributing to loadavg */
- err = vhost_attach_cgroups(dev);
- if (err)
- goto err_cgroup;
+ err = vhost_attach_cgroups(dev);
+ if (err)
+ goto err_cgroup;
+ }
err = vhost_dev_alloc_iovecs(dev);
if (err)
@@ -568,12 +604,12 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
return 0;
err_cgroup:
- kthread_stop(worker);
- dev->worker = NULL;
+ if (dev->worker) {
+ kthread_stop(dev->worker);
+ dev->worker = NULL;
+ }
err_worker:
- if (dev->mm)
- mmput(dev->mm);
- dev->mm = NULL;
+ vhost_detach_mm(dev);
dev->kcov_handle = 0;
err_mm:
return err;
@@ -670,9 +706,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
dev->worker = NULL;
dev->kcov_handle = 0;
}
- if (dev->mm)
- mmput(dev->mm);
- dev->mm = NULL;
+ vhost_detach_mm(dev);
}
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
@@ -882,7 +916,7 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
#define vhost_put_user(vq, x, ptr) \
({ \
- int ret = -EFAULT; \
+ int ret; \
if (!vq->iotlb) { \
ret = __put_user(x, ptr); \
} else { \
@@ -1244,9 +1278,9 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
}
static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
- struct vring_desc __user *desc,
- struct vring_avail __user *avail,
- struct vring_used __user *used)
+ vring_desc_t __user *desc,
+ vring_avail_t __user *avail,
+ vring_used_t __user *used)
{
return access_ok(desc, vhost_get_desc_size(vq, num)) &&
@@ -1574,7 +1608,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
r = -EFAULT;
break;
}
- eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
+ eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp);
break;
@@ -1590,7 +1624,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
r = -EFAULT;
break;
}
- ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
+ ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
if (IS_ERR(ctx)) {
r = PTR_ERR(ctx);
break;
@@ -1602,7 +1636,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
r = -EFAULT;
break;
}
- ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
+ ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
if (IS_ERR(ctx)) {
r = PTR_ERR(ctx);
break;
@@ -1727,7 +1761,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
r = get_user(fd, (int __user *)argp);
if (r < 0)
break;
- ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd);
+ ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
if (IS_ERR(ctx)) {
r = PTR_ERR(ctx);
break;
@@ -1762,15 +1796,14 @@ static int set_bit_to_user(int nr, void __user *addr)
int bit = nr + (log % PAGE_SIZE) * 8;
int r;
- r = get_user_pages_fast(log, 1, FOLL_WRITE, &page);
+ r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
if (r < 0)
return r;
BUG_ON(r != 1);
base = kmap_atomic(page);
set_bit(bit, base);
kunmap_atomic(base);
- set_page_dirty_lock(page);
- put_page(page);
+ unpin_user_pages_dirty_lock(&page, 1, true);
return 0;
}
@@ -2301,7 +2334,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
struct vring_used_elem *heads,
unsigned count)
{
- struct vring_used_elem __user *used;
+ vring_used_elem_t __user *used;
u16 old, new;
int start;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index f8403bd46b85..c8e96a095d3b 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -67,9 +67,9 @@ struct vhost_virtqueue {
/* The actual ring of buffers. */
struct mutex mutex;
unsigned int num;
- struct vring_desc __user *desc;
- struct vring_avail __user *avail;
- struct vring_used __user *used;
+ vring_desc_t __user *desc;
+ vring_avail_t __user *avail;
+ vring_used_t __user *used;
const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
struct file *kick;
struct eventfd_ctx *call_ctx;
@@ -154,6 +154,7 @@ struct vhost_dev {
int weight;
int byte_weight;
u64 kcov_handle;
+ bool use_worker;
int (*msg_handler)(struct vhost_dev *dev,
struct vhost_iotlb_msg *msg);
};
@@ -161,6 +162,7 @@ struct vhost_dev {
bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
int nvqs, int iov_limit, int weight, int byte_weight,
+ bool use_worker,
int (*msg_handler)(struct vhost_dev *dev,
struct vhost_iotlb_msg *msg));
long vhost_dev_set_owner(struct vhost_dev *dev);
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index ba8e0d6cfd97..e059a9a47cdf 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -620,9 +620,9 @@ static inline int xfer_to_user(const struct vringh *vrh,
*/
int vringh_init_user(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
- struct vring_desc __user *desc,
- struct vring_avail __user *avail,
- struct vring_used __user *used)
+ vring_desc_t __user *desc,
+ vring_avail_t __user *avail,
+ vring_used_t __user *used)
{
/* Sane power of 2 please! */
if (!num || num > 0xffff || (num & (num - 1))) {
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index fb4e944c4d0d..a483cec31d5c 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -632,7 +632,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
- VHOST_VSOCK_WEIGHT, NULL);
+ VHOST_VSOCK_WEIGHT, true, NULL);
file->private_data = vsock;
spin_lock_init(&vsock->send_pkt_list_lock);
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index cac3e35d7630..92d80aa0c0ef 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -433,6 +433,27 @@ struct backlight_device *backlight_device_get_by_type(enum backlight_type type)
EXPORT_SYMBOL(backlight_device_get_by_type);
/**
+ * backlight_device_get_by_name - Get backlight device by name
+ * @name: Device name
+ *
+ * This function looks up a backlight device by its name. It obtains a reference
+ * on the backlight device and it is the caller's responsibility to drop the
+ * reference by calling backlight_put().
+ *
+ * Returns:
+ * A pointer to the backlight device if found, otherwise NULL.
+ */
+struct backlight_device *backlight_device_get_by_name(const char *name)
+{
+ struct device *dev;
+
+ dev = class_find_device_by_name(backlight_class, name);
+
+ return dev ? to_backlight_device(dev) : NULL;
+}
+EXPORT_SYMBOL(backlight_device_get_by_name);
+
+/**
* backlight_device_unregister - unregisters a backlight device object.
* @bd: the backlight device object to be unregistered and freed.
*
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 8554b4aa980c..46f97d1c3d21 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -14,13 +14,11 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/lcd.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-
#include <linux/spi/spi.h>
-#include <linux/spi/l4f00242t03.h>
struct l4f00242t03_priv {
struct spi_device *spi;
@@ -28,16 +26,18 @@ struct l4f00242t03_priv {
int lcd_state;
struct regulator *io_reg;
struct regulator *core_reg;
+ struct gpio_desc *reset;
+ struct gpio_desc *enable;
};
-static void l4f00242t03_reset(unsigned int gpio)
+static void l4f00242t03_reset(struct gpio_desc *gpiod)
{
pr_debug("l4f00242t03_reset.\n");
- gpio_set_value(gpio, 1);
+ gpiod_set_value(gpiod, 1);
mdelay(100);
- gpio_set_value(gpio, 0);
+ gpiod_set_value(gpiod, 0);
mdelay(10); /* tRES >= 100us */
- gpio_set_value(gpio, 1);
+ gpiod_set_value(gpiod, 1);
mdelay(20);
}
@@ -45,7 +45,6 @@ static void l4f00242t03_reset(unsigned int gpio)
static void l4f00242t03_lcd_init(struct spi_device *spi)
{
- struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) };
int ret;
@@ -76,21 +75,20 @@ static void l4f00242t03_lcd_init(struct spi_device *spi)
return;
}
- l4f00242t03_reset(pdata->reset_gpio);
+ l4f00242t03_reset(priv->reset);
- gpio_set_value(pdata->data_enable_gpio, 1);
+ gpiod_set_value(priv->enable, 1);
msleep(60);
spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16));
}
static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
{
- struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
struct l4f00242t03_priv *priv = spi_get_drvdata(spi);
dev_dbg(&spi->dev, "Powering down LCD\n");
- gpio_set_value(pdata->data_enable_gpio, 0);
+ gpiod_set_value(priv->enable, 0);
regulator_disable(priv->io_reg);
regulator_disable(priv->core_reg);
@@ -168,13 +166,6 @@ static struct lcd_ops l4f_ops = {
static int l4f00242t03_probe(struct spi_device *spi)
{
struct l4f00242t03_priv *priv;
- struct l4f00242t03_pdata *pdata = dev_get_platdata(&spi->dev);
- int ret;
-
- if (pdata == NULL) {
- dev_err(&spi->dev, "Uninitialized platform data.\n");
- return -EINVAL;
- }
priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
GFP_KERNEL);
@@ -187,21 +178,21 @@ static int l4f00242t03_probe(struct spi_device *spi)
priv->spi = spi;
- ret = devm_gpio_request_one(&spi->dev, pdata->reset_gpio,
- GPIOF_OUT_INIT_HIGH, "lcd l4f00242t03 reset");
- if (ret) {
+ priv->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset)) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 reset gpio.\n");
- return ret;
+ return PTR_ERR(priv->reset);
}
+ gpiod_set_consumer_name(priv->reset, "lcd l4f00242t03 reset");
- ret = devm_gpio_request_one(&spi->dev, pdata->data_enable_gpio,
- GPIOF_OUT_INIT_LOW, "lcd l4f00242t03 data enable");
- if (ret) {
+ priv->enable = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->enable)) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 data en gpio.\n");
- return ret;
+ return PTR_ERR(priv->enable);
}
+ gpiod_set_consumer_name(priv->enable, "lcd l4f00242t03 data enable");
priv->io_reg = devm_regulator_get(&spi->dev, "vdd");
if (IS_ERR(priv->io_reg)) {
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index f68920131a4a..e94932c69f54 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -456,7 +456,7 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
ret = regulator_enable(lp->enable);
if (ret < 0) {
dev_err(lp->dev, "failed to enable vddio: %d\n", ret);
- return ret;
+ goto disable_supply;
}
/*
@@ -471,24 +471,34 @@ static int lp855x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
ret = lp855x_configure(lp);
if (ret) {
dev_err(lp->dev, "device config err: %d", ret);
- return ret;
+ goto disable_vddio;
}
ret = lp855x_backlight_register(lp);
if (ret) {
dev_err(lp->dev,
"failed to register backlight. err: %d\n", ret);
- return ret;
+ goto disable_vddio;
}
ret = sysfs_create_group(&lp->dev->kobj, &lp855x_attr_group);
if (ret) {
dev_err(lp->dev, "failed to register sysfs. err: %d\n", ret);
- return ret;
+ goto disable_vddio;
}
backlight_update_status(lp->bl);
+
return 0;
+
+disable_vddio:
+ if (lp->enable)
+ regulator_disable(lp->enable);
+disable_supply:
+ if (lp->supply)
+ regulator_disable(lp->supply);
+
+ return ret;
}
static int lp855x_remove(struct i2c_client *cl)
@@ -497,6 +507,8 @@ static int lp855x_remove(struct i2c_client *cl)
lp->bl->props.brightness = 0;
backlight_update_status(lp->bl);
+ if (lp->enable)
+ regulator_disable(lp->enable);
if (lp->supply)
regulator_disable(lp->supply);
sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index 3d276b30a78c..4c8c34b99441 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -15,16 +15,21 @@
/* From DT binding */
#define WLED_MAX_STRINGS 4
+#define MOD_A 0
+#define MOD_B 1
#define WLED_DEFAULT_BRIGHTNESS 2048
#define WLED_SOFT_START_DLY_US 10000
#define WLED3_SINK_REG_BRIGHT_MAX 0xFFF
+#define WLED5_SINK_REG_BRIGHT_MAX_12B 0xFFF
+#define WLED5_SINK_REG_BRIGHT_MAX_15B 0x7FFF
/* WLED3/WLED4 control registers */
#define WLED3_CTRL_REG_FAULT_STATUS 0x08
#define WLED3_CTRL_REG_ILIM_FAULT_BIT BIT(0)
#define WLED3_CTRL_REG_OVP_FAULT_BIT BIT(1)
#define WLED4_CTRL_REG_SC_FAULT_BIT BIT(2)
+#define WLED5_CTRL_REG_OVP_PRE_ALARM_BIT BIT(4)
#define WLED3_CTRL_REG_INT_RT_STS 0x10
#define WLED3_CTRL_REG_OVP_FAULT_STATUS BIT(1)
@@ -40,6 +45,7 @@
#define WLED3_CTRL_REG_OVP 0x4d
#define WLED3_CTRL_REG_OVP_MASK GENMASK(1, 0)
+#define WLED5_CTRL_REG_OVP_MASK GENMASK(3, 0)
#define WLED3_CTRL_REG_ILIMIT 0x4e
#define WLED3_CTRL_REG_ILIMIT_MASK GENMASK(2, 0)
@@ -101,6 +107,44 @@
#define WLED4_SINK_REG_BRIGHT(n) (0x57 + (n * 0x10))
+/* WLED5 specific control registers */
+#define WLED5_CTRL_REG_OVP_INT_CTL 0x5f
+#define WLED5_CTRL_REG_OVP_INT_TIMER_MASK GENMASK(2, 0)
+
+/* WLED5 specific sink registers */
+#define WLED5_SINK_REG_MOD_A_EN 0x50
+#define WLED5_SINK_REG_MOD_B_EN 0x60
+#define WLED5_SINK_REG_MOD_EN_MASK BIT(7)
+
+#define WLED5_SINK_REG_MOD_A_SRC_SEL 0x51
+#define WLED5_SINK_REG_MOD_B_SRC_SEL 0x61
+#define WLED5_SINK_REG_MOD_SRC_SEL_HIGH 0
+#define WLED5_SINK_REG_MOD_SRC_SEL_EXT 0x03
+#define WLED5_SINK_REG_MOD_SRC_SEL_MASK GENMASK(1, 0)
+
+#define WLED5_SINK_REG_MOD_A_BRIGHTNESS_WIDTH_SEL 0x52
+#define WLED5_SINK_REG_MOD_B_BRIGHTNESS_WIDTH_SEL 0x62
+#define WLED5_SINK_REG_BRIGHTNESS_WIDTH_12B 0
+#define WLED5_SINK_REG_BRIGHTNESS_WIDTH_15B 1
+
+#define WLED5_SINK_REG_MOD_A_BRIGHTNESS_LSB 0x53
+#define WLED5_SINK_REG_MOD_A_BRIGHTNESS_MSB 0x54
+#define WLED5_SINK_REG_MOD_B_BRIGHTNESS_LSB 0x63
+#define WLED5_SINK_REG_MOD_B_BRIGHTNESS_MSB 0x64
+
+#define WLED5_SINK_REG_MOD_SYNC_BIT 0x65
+#define WLED5_SINK_REG_SYNC_MOD_A_BIT BIT(0)
+#define WLED5_SINK_REG_SYNC_MOD_B_BIT BIT(1)
+#define WLED5_SINK_REG_SYNC_MASK GENMASK(1, 0)
+
+/* WLED5 specific per-'string' registers below */
+#define WLED5_SINK_REG_STR_FULL_SCALE_CURR(n) (0x72 + (n * 0x10))
+
+#define WLED5_SINK_REG_STR_SRC_SEL(n) (0x73 + (n * 0x10))
+#define WLED5_SINK_REG_SRC_SEL_MOD_A 0
+#define WLED5_SINK_REG_SRC_SEL_MOD_B 1
+#define WLED5_SINK_REG_SRC_SEL_MASK GENMASK(1, 0)
+
struct wled_var_cfg {
const u32 *values;
u32 (*fn)(u32);
@@ -125,6 +169,8 @@ struct wled_config {
u32 num_strings;
u32 string_i_limit;
u32 enabled_strings[WLED_MAX_STRINGS];
+ u32 mod_sel;
+ u32 cabc_sel;
bool cs_out_en;
bool ext_gen;
bool cabc;
@@ -147,14 +193,39 @@ struct wled {
u32 max_brightness;
u32 short_count;
u32 auto_detect_count;
+ u32 version;
bool disabled_by_short;
bool has_short_detect;
+ bool cabc_disabled;
int short_irq;
int ovp_irq;
struct wled_config cfg;
struct delayed_work ovp_work;
+
+ /* Configures the brightness. Applicable for wled3, wled4 and wled5 */
int (*wled_set_brightness)(struct wled *wled, u16 brightness);
+
+ /* Configures the cabc register. Applicable for wled4 and wled5 */
+ int (*wled_cabc_config)(struct wled *wled, bool enable);
+
+ /*
+ * Toggles the sync bit for the brightness update to take place.
+ * Applicable for WLED3, WLED4 and WLED5.
+ */
+ int (*wled_sync_toggle)(struct wled *wled);
+
+ /*
+ * Time to wait before checking the OVP status after wled module enable.
+ * Applicable for WLED4 and WLED5.
+ */
+ int (*wled_ovp_delay)(struct wled *wled);
+
+ /*
+ * Determines if the auto string detection is required.
+ * Applicable for WLED4 and WLED5
+ */
+ bool (*wled_auto_detection_required)(struct wled *wled);
};
static int wled3_set_brightness(struct wled *wled, u16 brightness)
@@ -198,6 +269,28 @@ static int wled4_set_brightness(struct wled *wled, u16 brightness)
return 0;
}
+static int wled5_set_brightness(struct wled *wled, u16 brightness)
+{
+ int rc, offset;
+ u16 low_limit = wled->max_brightness * 1 / 1000;
+ u8 v[2];
+
+ /* WLED5's lower limit is 0.1% */
+ if (brightness < low_limit)
+ brightness = low_limit;
+
+ v[0] = brightness & 0xff;
+ v[1] = (brightness >> 8) & 0x7f;
+
+ offset = (wled->cfg.mod_sel == MOD_A) ?
+ WLED5_SINK_REG_MOD_A_BRIGHTNESS_LSB :
+ WLED5_SINK_REG_MOD_B_BRIGHTNESS_LSB;
+
+ rc = regmap_bulk_write(wled->regmap, wled->sink_addr + offset,
+ v, 2);
+ return rc;
+}
+
static void wled_ovp_work(struct work_struct *work)
{
struct wled *wled = container_of(work,
@@ -237,7 +330,7 @@ static int wled_module_enable(struct wled *wled, int val)
return 0;
}
-static int wled_sync_toggle(struct wled *wled)
+static int wled3_sync_toggle(struct wled *wled)
{
int rc;
unsigned int mask = GENMASK(wled->max_string_count - 1, 0);
@@ -255,6 +348,88 @@ static int wled_sync_toggle(struct wled *wled)
return rc;
}
+static int wled5_sync_toggle(struct wled *wled)
+{
+ int rc;
+ u8 val;
+
+ val = (wled->cfg.mod_sel == MOD_A) ? WLED5_SINK_REG_SYNC_MOD_A_BIT :
+ WLED5_SINK_REG_SYNC_MOD_B_BIT;
+ rc = regmap_update_bits(wled->regmap,
+ wled->sink_addr + WLED5_SINK_REG_MOD_SYNC_BIT,
+ WLED5_SINK_REG_SYNC_MASK, val);
+ if (rc < 0)
+ return rc;
+
+ return regmap_update_bits(wled->regmap,
+ wled->sink_addr + WLED5_SINK_REG_MOD_SYNC_BIT,
+ WLED5_SINK_REG_SYNC_MASK, 0);
+}
+
+static int wled_ovp_fault_status(struct wled *wled, bool *fault_set)
+{
+ int rc;
+ u32 int_rt_sts, fault_sts;
+
+ *fault_set = false;
+ rc = regmap_read(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_INT_RT_STS,
+ &int_rt_sts);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to read INT_RT_STS rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = regmap_read(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FAULT_STATUS,
+ &fault_sts);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to read FAULT_STATUS rc=%d\n", rc);
+ return rc;
+ }
+
+ if (int_rt_sts & WLED3_CTRL_REG_OVP_FAULT_STATUS)
+ *fault_set = true;
+
+ if (wled->version == 4 && (fault_sts & WLED3_CTRL_REG_OVP_FAULT_BIT))
+ *fault_set = true;
+
+ if (wled->version == 5 && (fault_sts & (WLED3_CTRL_REG_OVP_FAULT_BIT |
+ WLED5_CTRL_REG_OVP_PRE_ALARM_BIT)))
+ *fault_set = true;
+
+ if (*fault_set)
+ dev_dbg(wled->dev, "WLED OVP fault detected, int_rt_sts=0x%x fault_sts=0x%x\n",
+ int_rt_sts, fault_sts);
+
+ return rc;
+}
+
+static int wled4_ovp_delay(struct wled *wled)
+{
+ return WLED_SOFT_START_DLY_US;
+}
+
+static int wled5_ovp_delay(struct wled *wled)
+{
+ int rc, delay_us;
+ u32 val;
+ u8 ovp_timer_ms[8] = {1, 2, 4, 8, 12, 16, 20, 24};
+
+ /* For WLED5, get the delay based on OVP timer */
+ rc = regmap_read(wled->regmap, wled->ctrl_addr +
+ WLED5_CTRL_REG_OVP_INT_CTL, &val);
+ if (rc < 0)
+ delay_us =
+ ovp_timer_ms[val & WLED5_CTRL_REG_OVP_INT_TIMER_MASK] * 1000;
+ else
+ delay_us = 2 * WLED_SOFT_START_DLY_US;
+
+ dev_dbg(wled->dev, "delay_time_us: %d\n", delay_us);
+
+ return delay_us;
+}
+
static int wled_update_status(struct backlight_device *bl)
{
struct wled *wled = bl_get_data(bl);
@@ -275,7 +450,7 @@ static int wled_update_status(struct backlight_device *bl)
goto unlock_mutex;
}
- rc = wled_sync_toggle(wled);
+ rc = wled->wled_sync_toggle(wled);
if (rc < 0) {
dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
goto unlock_mutex;
@@ -298,6 +473,50 @@ unlock_mutex:
return rc;
}
+static int wled4_cabc_config(struct wled *wled, bool enable)
+{
+ int i, j, rc;
+ u8 val;
+
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ j = wled->cfg.enabled_strings[i];
+
+ val = enable ? WLED4_SINK_REG_STR_CABC_MASK : 0;
+ rc = regmap_update_bits(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_STR_CABC(j),
+ WLED4_SINK_REG_STR_CABC_MASK, val);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int wled5_cabc_config(struct wled *wled, bool enable)
+{
+ int rc, offset;
+ u8 reg;
+
+ if (wled->cabc_disabled)
+ return 0;
+
+ reg = enable ? wled->cfg.cabc_sel : 0;
+ offset = (wled->cfg.mod_sel == MOD_A) ? WLED5_SINK_REG_MOD_A_SRC_SEL :
+ WLED5_SINK_REG_MOD_B_SRC_SEL;
+
+ rc = regmap_update_bits(wled->regmap, wled->sink_addr + offset,
+ WLED5_SINK_REG_MOD_SRC_SEL_MASK, reg);
+ if (rc < 0) {
+ pr_err("Error in configuring CABC rc=%d\n", rc);
+ return rc;
+ }
+
+ if (!wled->cfg.cabc_sel)
+ wled->cabc_disabled = true;
+
+ return 0;
+}
+
#define WLED_SHORT_DLY_MS 20
#define WLED_SHORT_CNT_MAX 5
#define WLED_SHORT_RESET_CNT_DLY_US USEC_PER_SEC
@@ -345,9 +564,10 @@ unlock_mutex:
static void wled_auto_string_detection(struct wled *wled)
{
- int rc = 0, i;
- u32 sink_config = 0, int_sts;
+ int rc = 0, i, delay_time_us;
+ u32 sink_config = 0;
u8 sink_test = 0, sink_valid = 0, val;
+ bool fault_set;
/* Read configured sink configuration */
rc = regmap_read(wled->regmap, wled->sink_addr +
@@ -376,14 +596,9 @@ static void wled_auto_string_detection(struct wled *wled)
}
if (wled->cfg.cabc) {
- for (i = 0; i < wled->cfg.num_strings; i++) {
- rc = regmap_update_bits(wled->regmap, wled->sink_addr +
- WLED4_SINK_REG_STR_CABC(i),
- WLED4_SINK_REG_STR_CABC_MASK,
- 0);
- if (rc < 0)
- goto failed_detect;
- }
+ rc = wled->wled_cabc_config(wled, false);
+ if (rc < 0)
+ goto failed_detect;
}
/* Disable all sinks */
@@ -427,18 +642,17 @@ static void wled_auto_string_detection(struct wled *wled)
goto failed_detect;
}
- usleep_range(WLED_SOFT_START_DLY_US,
- WLED_SOFT_START_DLY_US + 1000);
+ delay_time_us = wled->wled_ovp_delay(wled);
+ usleep_range(delay_time_us, delay_time_us + 1000);
- rc = regmap_read(wled->regmap, wled->ctrl_addr +
- WLED3_CTRL_REG_INT_RT_STS, &int_sts);
+ rc = wled_ovp_fault_status(wled, &fault_set);
if (rc < 0) {
- dev_err(wled->dev, "Error in reading WLED3_CTRL_INT_RT_STS rc=%d\n",
+ dev_err(wled->dev, "Error in getting OVP fault_sts, rc=%d\n",
rc);
goto failed_detect;
}
- if (int_sts & WLED3_CTRL_REG_OVP_FAULT_STATUS)
+ if (fault_set)
dev_dbg(wled->dev, "WLED OVP fault detected with SINK %d\n",
i + 1);
else
@@ -478,30 +692,30 @@ static void wled_auto_string_detection(struct wled *wled)
}
/* Enable valid sinks */
- for (i = 0; i < wled->cfg.num_strings; i++) {
- if (wled->cfg.cabc) {
- rc = regmap_update_bits(wled->regmap, wled->sink_addr +
- WLED4_SINK_REG_STR_CABC(i),
- WLED4_SINK_REG_STR_CABC_MASK,
- WLED4_SINK_REG_STR_CABC_MASK);
- if (rc < 0)
+ if (wled->version == 4) {
+ for (i = 0; i < wled->cfg.num_strings; i++) {
+ if (sink_config &
+ BIT(WLED4_SINK_REG_CURR_SINK_SHFT + i))
+ val = WLED4_SINK_REG_STR_MOD_MASK;
+ else
+ /* Disable modulator_en for unused sink */
+ val = 0;
+
+ rc = regmap_write(wled->regmap, wled->sink_addr +
+ WLED4_SINK_REG_STR_MOD_EN(i), val);
+ if (rc < 0) {
+ dev_err(wled->dev, "Failed to configure MODULATOR_EN rc=%d\n",
+ rc);
goto failed_detect;
- }
-
- if (sink_config & BIT(WLED4_SINK_REG_CURR_SINK_SHFT + i))
- val = WLED4_SINK_REG_STR_MOD_MASK;
- else
- val = 0x0; /* Disable modulator_en for unused sink */
-
- rc = regmap_write(wled->regmap, wled->sink_addr +
- WLED4_SINK_REG_STR_MOD_EN(i), val);
- if (rc < 0) {
- dev_err(wled->dev, "Failed to configure MODULATOR_EN rc=%d\n",
- rc);
- goto failed_detect;
+ }
}
}
+ /* Enable CABC */
+ rc = wled->wled_cabc_config(wled, true);
+ if (rc < 0)
+ goto failed_detect;
+
/* Restore the feedback setting */
rc = regmap_write(wled->regmap,
wled->ctrl_addr + WLED3_CTRL_REG_FEEDBACK_CONTROL, 0);
@@ -534,7 +748,8 @@ failed_detect:
#define WLED_AUTO_DETECT_OVP_COUNT 5
#define WLED_AUTO_DETECT_CNT_DLY_US USEC_PER_SEC
-static bool wled_auto_detection_required(struct wled *wled)
+
+static bool wled4_auto_detection_required(struct wled *wled)
{
s64 elapsed_time_us;
@@ -567,32 +782,39 @@ static bool wled_auto_detection_required(struct wled *wled)
return false;
}
+static bool wled5_auto_detection_required(struct wled *wled)
+{
+ if (!wled->cfg.auto_detection_enabled)
+ return false;
+
+ /*
+ * Unlike WLED4, WLED5 has OVP fault density interrupt configuration
+ * i.e. to count the number of OVP alarms for a certain duration before
+ * triggering OVP fault interrupt. By default, number of OVP fault
+ * events counted before an interrupt is fired is 32 and the time
+ * interval is 12 ms. If we see one OVP fault interrupt, then that
+ * should qualify for a real OVP fault condition to run auto detection
+ * algorithm.
+ */
+ return true;
+}
+
static int wled_auto_detection_at_init(struct wled *wled)
{
int rc;
- u32 fault_status, rt_status;
+ bool fault_set;
if (!wled->cfg.auto_detection_enabled)
return 0;
- rc = regmap_read(wled->regmap,
- wled->ctrl_addr + WLED3_CTRL_REG_INT_RT_STS,
- &rt_status);
+ rc = wled_ovp_fault_status(wled, &fault_set);
if (rc < 0) {
- dev_err(wled->dev, "Failed to read RT status rc=%d\n", rc);
- return rc;
- }
-
- rc = regmap_read(wled->regmap,
- wled->ctrl_addr + WLED3_CTRL_REG_FAULT_STATUS,
- &fault_status);
- if (rc < 0) {
- dev_err(wled->dev, "Failed to read fault status rc=%d\n", rc);
+ dev_err(wled->dev, "Error in getting OVP fault_sts, rc=%d\n",
+ rc);
return rc;
}
- if ((rt_status & WLED3_CTRL_REG_OVP_FAULT_STATUS) ||
- (fault_status & WLED3_CTRL_REG_OVP_FAULT_BIT)) {
+ if (fault_set) {
mutex_lock(&wled->lock);
wled_auto_string_detection(wled);
mutex_unlock(&wled->lock);
@@ -629,7 +851,7 @@ static irqreturn_t wled_ovp_irq_handler(int irq, void *_wled)
int_sts, fault_sts);
if (fault_sts & WLED3_CTRL_REG_OVP_FAULT_BIT) {
- if (wled_auto_detection_required(wled)) {
+ if (wled->wled_auto_detection_required(wled)) {
mutex_lock(&wled->lock);
wled_auto_string_detection(wled);
mutex_unlock(&wled->lock);
@@ -811,17 +1033,12 @@ static int wled4_setup(struct wled *wled)
wled->cfg.string_i_limit);
if (rc < 0)
return rc;
-
- addr = wled->sink_addr +
- WLED4_SINK_REG_STR_CABC(j);
- rc = regmap_update_bits(wled->regmap, addr,
- WLED4_SINK_REG_STR_CABC_MASK,
- wled->cfg.cabc ?
- WLED4_SINK_REG_STR_CABC_MASK : 0);
- if (rc < 0)
- return rc;
}
+ rc = wled4_cabc_config(wled, wled->cfg.cabc);
+ if (rc < 0)
+ return rc;
+
rc = regmap_update_bits(wled->regmap, wled->ctrl_addr +
WLED3_CTRL_REG_MOD_EN,
WLED3_CTRL_REG_MOD_EN_MASK,
@@ -835,7 +1052,7 @@ static int wled4_setup(struct wled *wled)
if (rc < 0)
return rc;
- rc = wled_sync_toggle(wled);
+ rc = wled->wled_sync_toggle(wled);
if (rc < 0) {
dev_err(wled->dev, "Failed to toggle sync reg rc:%d\n", rc);
return rc;
@@ -857,6 +1074,119 @@ static const struct wled_config wled4_config_defaults = {
.auto_detection_enabled = false,
};
+static int wled5_setup(struct wled *wled)
+{
+ int rc, temp, i, j, offset;
+ u8 sink_en = 0;
+ u16 addr;
+ u32 val;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_OVP,
+ WLED5_CTRL_REG_OVP_MASK, wled->cfg.ovp);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_ILIMIT,
+ WLED3_CTRL_REG_ILIMIT_MASK,
+ wled->cfg.boost_i_limit);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->ctrl_addr + WLED3_CTRL_REG_FREQ,
+ WLED3_CTRL_REG_FREQ_MASK,
+ wled->cfg.switch_freq);
+ if (rc < 0)
+ return rc;
+
+ /* Per sink/string configuration */
+ for (i = 0; i < wled->cfg.num_strings; ++i) {
+ j = wled->cfg.enabled_strings[i];
+ addr = wled->sink_addr +
+ WLED4_SINK_REG_STR_FULL_SCALE_CURR(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED4_SINK_REG_STR_FULL_SCALE_CURR_MASK,
+ wled->cfg.string_i_limit);
+ if (rc < 0)
+ return rc;
+
+ addr = wled->sink_addr + WLED5_SINK_REG_STR_SRC_SEL(j);
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED5_SINK_REG_SRC_SEL_MASK,
+ wled->cfg.mod_sel == MOD_A ?
+ WLED5_SINK_REG_SRC_SEL_MOD_A :
+ WLED5_SINK_REG_SRC_SEL_MOD_B);
+
+ temp = j + WLED4_SINK_REG_CURR_SINK_SHFT;
+ sink_en |= 1 << temp;
+ }
+
+ rc = wled5_cabc_config(wled, wled->cfg.cabc_sel ? true : false);
+ if (rc < 0)
+ return rc;
+
+ /* Enable one of the modulators A or B based on mod_sel */
+ addr = wled->sink_addr + WLED5_SINK_REG_MOD_A_EN;
+ val = (wled->cfg.mod_sel == MOD_A) ? WLED5_SINK_REG_MOD_EN_MASK : 0;
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED5_SINK_REG_MOD_EN_MASK, val);
+ if (rc < 0)
+ return rc;
+
+ addr = wled->sink_addr + WLED5_SINK_REG_MOD_B_EN;
+ val = (wled->cfg.mod_sel == MOD_B) ? WLED5_SINK_REG_MOD_EN_MASK : 0;
+ rc = regmap_update_bits(wled->regmap, addr,
+ WLED5_SINK_REG_MOD_EN_MASK, val);
+ if (rc < 0)
+ return rc;
+
+ offset = (wled->cfg.mod_sel == MOD_A) ?
+ WLED5_SINK_REG_MOD_A_BRIGHTNESS_WIDTH_SEL :
+ WLED5_SINK_REG_MOD_B_BRIGHTNESS_WIDTH_SEL;
+
+ addr = wled->sink_addr + offset;
+ val = (wled->max_brightness == WLED5_SINK_REG_BRIGHT_MAX_15B) ?
+ WLED5_SINK_REG_BRIGHTNESS_WIDTH_15B :
+ WLED5_SINK_REG_BRIGHTNESS_WIDTH_12B;
+ rc = regmap_write(wled->regmap, addr, val);
+ if (rc < 0)
+ return rc;
+
+ rc = regmap_update_bits(wled->regmap,
+ wled->sink_addr + WLED4_SINK_REG_CURR_SINK,
+ WLED4_SINK_REG_CURR_SINK_MASK, sink_en);
+ if (rc < 0)
+ return rc;
+
+ /* This updates only FSC configuration in WLED5 */
+ rc = wled->wled_sync_toggle(wled);
+ if (rc < 0) {
+ pr_err("Failed to toggle sync reg rc:%d\n", rc);
+ return rc;
+ }
+
+ rc = wled_auto_detection_at_init(wled);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static const struct wled_config wled5_config_defaults = {
+ .boost_i_limit = 5,
+ .string_i_limit = 10,
+ .ovp = 4,
+ .num_strings = 4,
+ .switch_freq = 11,
+ .mod_sel = 0,
+ .cabc_sel = 0,
+ .cabc = false,
+ .external_pfet = false,
+ .auto_detection_enabled = false,
+};
+
static const u32 wled3_boost_i_limit_values[] = {
105, 385, 525, 805, 980, 1260, 1400, 1680,
};
@@ -875,6 +1205,16 @@ static const struct wled_var_cfg wled4_boost_i_limit_cfg = {
.size = ARRAY_SIZE(wled4_boost_i_limit_values),
};
+static inline u32 wled5_boost_i_limit_values_fn(u32 idx)
+{
+ return 525 + (idx * 175);
+}
+
+static const struct wled_var_cfg wled5_boost_i_limit_cfg = {
+ .fn = wled5_boost_i_limit_values_fn,
+ .size = 8,
+};
+
static const u32 wled3_ovp_values[] = {
35, 32, 29, 27,
};
@@ -893,6 +1233,21 @@ static const struct wled_var_cfg wled4_ovp_cfg = {
.size = ARRAY_SIZE(wled4_ovp_values),
};
+static inline u32 wled5_ovp_values_fn(u32 idx)
+{
+ /*
+ * 0000 - 38.5 V
+ * 0001 - 37 V ..
+ * 1111 - 16 V
+ */
+ return 38500 - (idx * 1500);
+}
+
+static const struct wled_var_cfg wled5_ovp_cfg = {
+ .fn = wled5_ovp_values_fn,
+ .size = 16,
+};
+
static u32 wled3_num_strings_values_fn(u32 idx)
{
return idx + 1;
@@ -940,6 +1295,14 @@ static const struct wled_var_cfg wled4_string_cfg = {
.size = 16,
};
+static const struct wled_var_cfg wled5_mod_sel_cfg = {
+ .size = 2,
+};
+
+static const struct wled_var_cfg wled5_cabc_sel_cfg = {
+ .size = 4,
+};
+
static u32 wled_values(const struct wled_var_cfg *cfg, u32 idx)
{
if (idx >= cfg->size)
@@ -951,7 +1314,7 @@ static u32 wled_values(const struct wled_var_cfg *cfg, u32 idx)
return idx;
}
-static int wled_configure(struct wled *wled, int version)
+static int wled_configure(struct wled *wled)
{
struct wled_config *cfg = &wled->cfg;
struct device *dev = wled->dev;
@@ -1016,6 +1379,44 @@ static int wled_configure(struct wled *wled, int version)
},
};
+ const struct wled_u32_opts wled5_opts[] = {
+ {
+ .name = "qcom,current-boost-limit",
+ .val_ptr = &cfg->boost_i_limit,
+ .cfg = &wled5_boost_i_limit_cfg,
+ },
+ {
+ .name = "qcom,current-limit-microamp",
+ .val_ptr = &cfg->string_i_limit,
+ .cfg = &wled4_string_i_limit_cfg,
+ },
+ {
+ .name = "qcom,ovp-millivolt",
+ .val_ptr = &cfg->ovp,
+ .cfg = &wled5_ovp_cfg,
+ },
+ {
+ .name = "qcom,switching-freq",
+ .val_ptr = &cfg->switch_freq,
+ .cfg = &wled3_switch_freq_cfg,
+ },
+ {
+ .name = "qcom,num-strings",
+ .val_ptr = &cfg->num_strings,
+ .cfg = &wled4_num_strings_cfg,
+ },
+ {
+ .name = "qcom,modulator-sel",
+ .val_ptr = &cfg->mod_sel,
+ .cfg = &wled5_mod_sel_cfg,
+ },
+ {
+ .name = "qcom,cabc-sel",
+ .val_ptr = &cfg->cabc_sel,
+ .cfg = &wled5_cabc_sel_cfg,
+ },
+ };
+
const struct wled_bool_opts bool_opts[] = {
{ "qcom,cs-out", &cfg->cs_out_en, },
{ "qcom,ext-gen", &cfg->ext_gen, },
@@ -1035,12 +1436,13 @@ static int wled_configure(struct wled *wled, int version)
if (rc)
wled->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
- switch (version) {
+ switch (wled->version) {
case 3:
u32_opts = wled3_opts;
size = ARRAY_SIZE(wled3_opts);
*cfg = wled3_config_defaults;
wled->wled_set_brightness = wled3_set_brightness;
+ wled->wled_sync_toggle = wled3_sync_toggle;
wled->max_string_count = 3;
wled->sink_addr = wled->ctrl_addr;
break;
@@ -1050,6 +1452,31 @@ static int wled_configure(struct wled *wled, int version)
size = ARRAY_SIZE(wled4_opts);
*cfg = wled4_config_defaults;
wled->wled_set_brightness = wled4_set_brightness;
+ wled->wled_sync_toggle = wled3_sync_toggle;
+ wled->wled_cabc_config = wled4_cabc_config;
+ wled->wled_ovp_delay = wled4_ovp_delay;
+ wled->wled_auto_detection_required =
+ wled4_auto_detection_required;
+ wled->max_string_count = 4;
+
+ prop_addr = of_get_address(dev->of_node, 1, NULL, NULL);
+ if (!prop_addr) {
+ dev_err(wled->dev, "invalid IO resources\n");
+ return -EINVAL;
+ }
+ wled->sink_addr = be32_to_cpu(*prop_addr);
+ break;
+
+ case 5:
+ u32_opts = wled5_opts;
+ size = ARRAY_SIZE(wled5_opts);
+ *cfg = wled5_config_defaults;
+ wled->wled_set_brightness = wled5_set_brightness;
+ wled->wled_sync_toggle = wled5_sync_toggle;
+ wled->wled_cabc_config = wled5_cabc_config;
+ wled->wled_ovp_delay = wled5_ovp_delay;
+ wled->wled_auto_detection_required =
+ wled5_auto_detection_required;
wled->max_string_count = 4;
prop_addr = of_get_address(dev->of_node, 1, NULL, NULL);
@@ -1186,7 +1613,6 @@ static int wled_probe(struct platform_device *pdev)
struct backlight_device *bl;
struct wled *wled;
struct regmap *regmap;
- int version;
u32 val;
int rc;
@@ -1203,18 +1629,22 @@ static int wled_probe(struct platform_device *pdev)
wled->regmap = regmap;
wled->dev = &pdev->dev;
- version = (uintptr_t)of_device_get_match_data(&pdev->dev);
- if (!version) {
+ wled->version = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ if (!wled->version) {
dev_err(&pdev->dev, "Unknown device version\n");
return -ENODEV;
}
mutex_init(&wled->lock);
- rc = wled_configure(wled, version);
+ rc = wled_configure(wled);
if (rc)
return rc;
- switch (version) {
+ val = WLED3_SINK_REG_BRIGHT_MAX;
+ of_property_read_u32(pdev->dev.of_node, "max-brightness", &val);
+ wled->max_brightness = val;
+
+ switch (wled->version) {
case 3:
wled->cfg.auto_detection_enabled = false;
rc = wled3_setup(wled);
@@ -1233,6 +1663,18 @@ static int wled_probe(struct platform_device *pdev)
}
break;
+ case 5:
+ wled->has_short_detect = true;
+ if (wled->cfg.cabc_sel)
+ wled->max_brightness = WLED5_SINK_REG_BRIGHT_MAX_12B;
+
+ rc = wled5_setup(wled);
+ if (rc) {
+ dev_err(&pdev->dev, "wled5_setup failed\n");
+ return rc;
+ }
+ break;
+
default:
dev_err(wled->dev, "Invalid WLED version\n");
break;
@@ -1254,7 +1696,7 @@ static int wled_probe(struct platform_device *pdev)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.brightness = val;
- props.max_brightness = WLED3_SINK_REG_BRIGHT_MAX;
+ props.max_brightness = wled->max_brightness;
bl = devm_backlight_device_register(&pdev->dev, wled->name,
&pdev->dev, wled,
&wled_ops, &props);
@@ -1277,6 +1719,7 @@ static const struct of_device_id wled_match_table[] = {
{ .compatible = "qcom,pm8941-wled", .data = (void *)3 },
{ .compatible = "qcom,pmi8998-wled", .data = (void *)4 },
{ .compatible = "qcom,pm660l-wled", .data = (void *)4 },
+ { .compatible = "qcom,pm8150l-wled", .data = (void *)5 },
{}
};
MODULE_DEVICE_TABLE(of, wled_match_table);
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index 00dddf6e08b0..504cda38763e 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -24,7 +24,6 @@
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/gio_device.h>
#include <video/newport.h>
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index fa88e8b9a83d..844ada978bb7 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2008,7 +2008,7 @@ config FB_PS3_DEFAULT_SIZE_M
config FB_XILINX
tristate "Xilinx frame buffer support"
- depends on FB && (XILINX_VIRTEX || MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP)
+ depends on FB && (MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index a3af49529173..09a9ad901dad 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -30,7 +30,6 @@
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
-#include <asm/pgtable.h>
#include "acornfb.h"
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index 51f5d1c56fd9..f253daa05d9d 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -58,7 +58,6 @@
#include <asm/setup.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/io.h>
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index c3a3e344cee3..3df64a973194 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -42,7 +42,6 @@
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_ZORRO
#include <linux/zorro.h>
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 513f58f28b0f..42d37bed518a 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -47,7 +47,6 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <asm/pgtable.h>
#ifdef __arm__
#include <asm/mach-types.h>
diff --git a/drivers/video/fbdev/fb-puv3.c b/drivers/video/fbdev/fb-puv3.c
index 75df6aabac21..030e85c11a78 100644
--- a/drivers/video/fbdev/fb-puv3.c
+++ b/drivers/video/fbdev/fb-puv3.c
@@ -18,7 +18,6 @@
#include <linux/mm.h>
#include <linux/sizes.h>
-#include <asm/pgtable.h>
#include <mach/hardware.h>
/* Platform_data reserved for unifb registers. */
diff --git a/drivers/video/fbdev/hitfb.c b/drivers/video/fbdev/hitfb.c
index 009e5d2aa100..bbb0f1d953cc 100644
--- a/drivers/video/fbdev/hitfb.c
+++ b/drivers/video/fbdev/hitfb.c
@@ -23,7 +23,6 @@
#include <asm/machvec.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/hd64461.h>
#include <cpu/dac.h>
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index e6ea853c1723..f5a676bfd67a 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -70,7 +70,6 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include <video/vga.h>
#include <video/neomagic.h>
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index 834f63edf700..9df78fb77267 100644
--- a/drivers/video/fbdev/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
@@ -44,7 +44,7 @@
#define GPU_CMD_BUF_SIZE (2 * 1024 * 1024)
#define GPU_FB_START (64 * 1024)
#define GPU_IOIF (0x0d000000UL)
-#define GPU_ALIGN_UP(x) _ALIGN_UP((x), 64)
+#define GPU_ALIGN_UP(x) ALIGN((x), 64)
#define GPU_MAX_LINE_LENGTH (65536 - 64)
#define GPU_INTR_STATUS_VSYNC_0 0 /* vsync on head A */
@@ -1015,7 +1015,7 @@ static int ps3fb_probe(struct ps3_system_bus_device *dev)
}
#endif
- max_ps3fb_size = _ALIGN_UP(GPU_IOIF, 256*1024*1024) - GPU_IOIF;
+ max_ps3fb_size = ALIGN(GPU_IOIF, 256*1024*1024) - GPU_IOIF;
if (ps3fb_videomemory.size > max_ps3fb_size) {
dev_info(&dev->core, "Limiting ps3fb mem size to %lu bytes\n",
max_ps3fb_size);
diff --git a/drivers/video/fbdev/q40fb.c b/drivers/video/fbdev/q40fb.c
index 79ff14a35c85..079a2a7fb2c5 100644
--- a/drivers/video/fbdev/q40fb.c
+++ b/drivers/video/fbdev/q40fb.c
@@ -23,7 +23,6 @@
#include <asm/q40_master.h>
#include <linux/fb.h>
#include <linux/module.h>
-#include <asm/pgtable.h>
#define Q40_PHYS_SCREEN_ADDR 0xFE800000
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index aab312a7d9da..3c8ae87f0ea7 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -55,7 +55,6 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include "savagefb.h"
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 69a32dfc318a..4c1e14615001 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -78,6 +78,23 @@ config VIRTIO_BALLOON
If unsure, say M.
+config VIRTIO_MEM
+ tristate "Virtio mem driver"
+ default m
+ depends on X86_64
+ depends on VIRTIO
+ depends on MEMORY_HOTPLUG_SPARSE
+ depends on MEMORY_HOTREMOVE
+ select CONTIG_ALLOC
+ help
+ This driver provides access to virtio-mem paravirtualized memory
+ devices, allowing to hotplug and hotunplug memory.
+
+ This driver was only tested under x86-64, but should theoretically
+ work on all architectures that support memory hotplug and hotremove.
+
+ If unsure, say M.
+
config VIRTIO_INPUT
tristate "Virtio input driver"
depends on VIRTIO
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 29a1386ecc03..4d993791f2d7 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -7,3 +7,4 @@ virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
+obj-$(CONFIG_VIRTIO_MEM) += virtio_mem.o
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 51086a5afdd4..1f157d2f4952 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -1107,11 +1107,18 @@ static int virtballoon_restore(struct virtio_device *vdev)
static int virtballoon_validate(struct virtio_device *vdev)
{
- /* Tell the host whether we care about poisoned pages. */
+ /*
+ * Inform the hypervisor that our pages are poisoned or
+ * initialized. If we cannot do that then we should disable
+ * page reporting as it could potentially change the contents
+ * of our free pages.
+ */
if (!want_init_on_free() &&
(IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY) ||
!page_poisoning_enabled()))
__virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
+ else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON))
+ __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING);
__virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
return 0;
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
new file mode 100644
index 000000000000..50c689f25045
--- /dev/null
+++ b/drivers/virtio/virtio_mem.c
@@ -0,0 +1,1965 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Virtio-mem device driver.
+ *
+ * Copyright Red Hat, Inc. 2020
+ *
+ * Author(s): David Hildenbrand <david@redhat.com>
+ */
+
+#include <linux/virtio.h>
+#include <linux/virtio_mem.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/memory_hotplug.h>
+#include <linux/memory.h>
+#include <linux/hrtimer.h>
+#include <linux/crash_dump.h>
+#include <linux/mutex.h>
+#include <linux/bitmap.h>
+#include <linux/lockdep.h>
+
+#include <acpi/acpi_numa.h>
+
+static bool unplug_online = true;
+module_param(unplug_online, bool, 0644);
+MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
+
+enum virtio_mem_mb_state {
+ /* Unplugged, not added to Linux. Can be reused later. */
+ VIRTIO_MEM_MB_STATE_UNUSED = 0,
+ /* (Partially) plugged, not added to Linux. Error on add_memory(). */
+ VIRTIO_MEM_MB_STATE_PLUGGED,
+ /* Fully plugged, fully added to Linux, offline. */
+ VIRTIO_MEM_MB_STATE_OFFLINE,
+ /* Partially plugged, fully added to Linux, offline. */
+ VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL,
+ /* Fully plugged, fully added to Linux, online (!ZONE_MOVABLE). */
+ VIRTIO_MEM_MB_STATE_ONLINE,
+ /* Partially plugged, fully added to Linux, online (!ZONE_MOVABLE). */
+ VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL,
+ /*
+ * Fully plugged, fully added to Linux, online (ZONE_MOVABLE).
+ * We are not allowed to allocate (unplug) parts of this block that
+ * are not movable (similar to gigantic pages). We will never allow
+ * to online OFFLINE_PARTIAL to ZONE_MOVABLE (as they would contain
+ * unmovable parts).
+ */
+ VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE,
+ VIRTIO_MEM_MB_STATE_COUNT
+};
+
+struct virtio_mem {
+ struct virtio_device *vdev;
+
+ /* We might first have to unplug all memory when starting up. */
+ bool unplug_all_required;
+
+ /* Workqueue that processes the plug/unplug requests. */
+ struct work_struct wq;
+ atomic_t config_changed;
+
+ /* Virtqueue for guest->host requests. */
+ struct virtqueue *vq;
+
+ /* Wait for a host response to a guest request. */
+ wait_queue_head_t host_resp;
+
+ /* Space for one guest request and the host response. */
+ struct virtio_mem_req req;
+ struct virtio_mem_resp resp;
+
+ /* The current size of the device. */
+ uint64_t plugged_size;
+ /* The requested size of the device. */
+ uint64_t requested_size;
+
+ /* The device block size (for communicating with the device). */
+ uint64_t device_block_size;
+ /* The translated node id. NUMA_NO_NODE in case not specified. */
+ int nid;
+ /* Physical start address of the memory region. */
+ uint64_t addr;
+ /* Maximum region size in bytes. */
+ uint64_t region_size;
+
+ /* The subblock size. */
+ uint64_t subblock_size;
+ /* The number of subblocks per memory block. */
+ uint32_t nb_sb_per_mb;
+
+ /* Id of the first memory block of this device. */
+ unsigned long first_mb_id;
+ /* Id of the last memory block of this device. */
+ unsigned long last_mb_id;
+ /* Id of the last usable memory block of this device. */
+ unsigned long last_usable_mb_id;
+ /* Id of the next memory bock to prepare when needed. */
+ unsigned long next_mb_id;
+
+ /* The parent resource for all memory added via this device. */
+ struct resource *parent_resource;
+
+ /* Summary of all memory block states. */
+ unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
+#define VIRTIO_MEM_NB_OFFLINE_THRESHOLD 10
+
+ /*
+ * One byte state per memory block.
+ *
+ * Allocated via vmalloc(). When preparing new blocks, resized
+ * (alloc+copy+free) when needed (crossing pages with the next mb).
+ * (when crossing pages).
+ *
+ * With 128MB memory blocks, we have states for 512GB of memory in one
+ * page.
+ */
+ uint8_t *mb_state;
+
+ /*
+ * $nb_sb_per_mb bit per memory block. Handled similar to mb_state.
+ *
+ * With 4MB subblocks, we manage 128GB of memory in one page.
+ */
+ unsigned long *sb_bitmap;
+
+ /*
+ * Mutex that protects the nb_mb_state, mb_state, and sb_bitmap.
+ *
+ * When this lock is held the pointers can't change, ONLINE and
+ * OFFLINE blocks can't change the state and no subblocks will get
+ * plugged/unplugged.
+ */
+ struct mutex hotplug_mutex;
+ bool hotplug_active;
+
+ /* An error occurred we cannot handle - stop processing requests. */
+ bool broken;
+
+ /* The driver is being removed. */
+ spinlock_t removal_lock;
+ bool removing;
+
+ /* Timer for retrying to plug/unplug memory. */
+ struct hrtimer retry_timer;
+ unsigned int retry_timer_ms;
+#define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000
+#define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000
+
+ /* Memory notifier (online/offline events). */
+ struct notifier_block memory_notifier;
+
+ /* Next device in the list of virtio-mem devices. */
+ struct list_head next;
+};
+
+/*
+ * We have to share a single online_page callback among all virtio-mem
+ * devices. We use RCU to iterate the list in the callback.
+ */
+static DEFINE_MUTEX(virtio_mem_mutex);
+static LIST_HEAD(virtio_mem_devices);
+
+static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
+
+/*
+ * Register a virtio-mem device so it will be considered for the online_page
+ * callback.
+ */
+static int register_virtio_mem_device(struct virtio_mem *vm)
+{
+ int rc = 0;
+
+ /* First device registers the callback. */
+ mutex_lock(&virtio_mem_mutex);
+ if (list_empty(&virtio_mem_devices))
+ rc = set_online_page_callback(&virtio_mem_online_page_cb);
+ if (!rc)
+ list_add_rcu(&vm->next, &virtio_mem_devices);
+ mutex_unlock(&virtio_mem_mutex);
+
+ return rc;
+}
+
+/*
+ * Unregister a virtio-mem device so it will no longer be considered for the
+ * online_page callback.
+ */
+static void unregister_virtio_mem_device(struct virtio_mem *vm)
+{
+ /* Last device unregisters the callback. */
+ mutex_lock(&virtio_mem_mutex);
+ list_del_rcu(&vm->next);
+ if (list_empty(&virtio_mem_devices))
+ restore_online_page_callback(&virtio_mem_online_page_cb);
+ mutex_unlock(&virtio_mem_mutex);
+
+ synchronize_rcu();
+}
+
+/*
+ * Calculate the memory block id of a given address.
+ */
+static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
+{
+ return addr / memory_block_size_bytes();
+}
+
+/*
+ * Calculate the physical start address of a given memory block id.
+ */
+static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
+{
+ return mb_id * memory_block_size_bytes();
+}
+
+/*
+ * Calculate the subblock id of a given address.
+ */
+static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
+ unsigned long addr)
+{
+ const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
+ const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
+
+ return (addr - mb_addr) / vm->subblock_size;
+}
+
+/*
+ * Set the state of a memory block, taking care of the state counter.
+ */
+static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id,
+ enum virtio_mem_mb_state state)
+{
+ const unsigned long idx = mb_id - vm->first_mb_id;
+ enum virtio_mem_mb_state old_state;
+
+ old_state = vm->mb_state[idx];
+ vm->mb_state[idx] = state;
+
+ BUG_ON(vm->nb_mb_state[old_state] == 0);
+ vm->nb_mb_state[old_state]--;
+ vm->nb_mb_state[state]++;
+}
+
+/*
+ * Get the state of a memory block.
+ */
+static enum virtio_mem_mb_state virtio_mem_mb_get_state(struct virtio_mem *vm,
+ unsigned long mb_id)
+{
+ const unsigned long idx = mb_id - vm->first_mb_id;
+
+ return vm->mb_state[idx];
+}
+
+/*
+ * Prepare the state array for the next memory block.
+ */
+static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm)
+{
+ unsigned long old_bytes = vm->next_mb_id - vm->first_mb_id + 1;
+ unsigned long new_bytes = vm->next_mb_id - vm->first_mb_id + 2;
+ int old_pages = PFN_UP(old_bytes);
+ int new_pages = PFN_UP(new_bytes);
+ uint8_t *new_mb_state;
+
+ if (vm->mb_state && old_pages == new_pages)
+ return 0;
+
+ new_mb_state = vzalloc(new_pages * PAGE_SIZE);
+ if (!new_mb_state)
+ return -ENOMEM;
+
+ mutex_lock(&vm->hotplug_mutex);
+ if (vm->mb_state)
+ memcpy(new_mb_state, vm->mb_state, old_pages * PAGE_SIZE);
+ vfree(vm->mb_state);
+ vm->mb_state = new_mb_state;
+ mutex_unlock(&vm->hotplug_mutex);
+
+ return 0;
+}
+
+#define virtio_mem_for_each_mb_state(_vm, _mb_id, _state) \
+ for (_mb_id = _vm->first_mb_id; \
+ _mb_id < _vm->next_mb_id && _vm->nb_mb_state[_state]; \
+ _mb_id++) \
+ if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
+
+#define virtio_mem_for_each_mb_state_rev(_vm, _mb_id, _state) \
+ for (_mb_id = _vm->next_mb_id - 1; \
+ _mb_id >= _vm->first_mb_id && _vm->nb_mb_state[_state]; \
+ _mb_id--) \
+ if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
+
+/*
+ * Mark all selected subblocks plugged.
+ *
+ * Will not modify the state of the memory block.
+ */
+static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
+{
+ const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+
+ __bitmap_set(vm->sb_bitmap, bit, count);
+}
+
+/*
+ * Mark all selected subblocks unplugged.
+ *
+ * Will not modify the state of the memory block.
+ */
+static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
+{
+ const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+
+ __bitmap_clear(vm->sb_bitmap, bit, count);
+}
+
+/*
+ * Test if all selected subblocks are plugged.
+ */
+static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
+{
+ const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+
+ if (count == 1)
+ return test_bit(bit, vm->sb_bitmap);
+
+ /* TODO: Helper similar to bitmap_set() */
+ return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >=
+ bit + count;
+}
+
+/*
+ * Test if all selected subblocks are unplugged.
+ */
+static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
+{
+ const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+
+ /* TODO: Helper similar to bitmap_set() */
+ return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count;
+}
+
+/*
+ * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is
+ * none.
+ */
+static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm,
+ unsigned long mb_id)
+{
+ const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb;
+
+ return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) -
+ bit;
+}
+
+/*
+ * Prepare the subblock bitmap for the next memory block.
+ */
+static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm)
+{
+ const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id;
+ const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb;
+ const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb;
+ int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
+ int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
+ unsigned long *new_sb_bitmap, *old_sb_bitmap;
+
+ if (vm->sb_bitmap && old_pages == new_pages)
+ return 0;
+
+ new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE);
+ if (!new_sb_bitmap)
+ return -ENOMEM;
+
+ mutex_lock(&vm->hotplug_mutex);
+ if (new_sb_bitmap)
+ memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE);
+
+ old_sb_bitmap = vm->sb_bitmap;
+ vm->sb_bitmap = new_sb_bitmap;
+ mutex_unlock(&vm->hotplug_mutex);
+
+ vfree(old_sb_bitmap);
+ return 0;
+}
+
+/*
+ * Try to add a memory block to Linux. This will usually only fail
+ * if out of memory.
+ *
+ * Must not be called with the vm->hotplug_mutex held (possible deadlock with
+ * onlining code).
+ *
+ * Will not modify the state of the memory block.
+ */
+static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
+{
+ const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
+ int nid = vm->nid;
+
+ if (nid == NUMA_NO_NODE)
+ nid = memory_add_physaddr_to_nid(addr);
+
+ dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
+ return add_memory(nid, addr, memory_block_size_bytes());
+}
+
+/*
+ * Try to remove a memory block from Linux. Will only fail if the memory block
+ * is not offline.
+ *
+ * Must not be called with the vm->hotplug_mutex held (possible deadlock with
+ * onlining code).
+ *
+ * Will not modify the state of the memory block.
+ */
+static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id)
+{
+ const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
+ int nid = vm->nid;
+
+ if (nid == NUMA_NO_NODE)
+ nid = memory_add_physaddr_to_nid(addr);
+
+ dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id);
+ return remove_memory(nid, addr, memory_block_size_bytes());
+}
+
+/*
+ * Try to offline and remove a memory block from Linux.
+ *
+ * Must not be called with the vm->hotplug_mutex held (possible deadlock with
+ * onlining code).
+ *
+ * Will not modify the state of the memory block.
+ */
+static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm,
+ unsigned long mb_id)
+{
+ const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
+ int nid = vm->nid;
+
+ if (nid == NUMA_NO_NODE)
+ nid = memory_add_physaddr_to_nid(addr);
+
+ dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n",
+ mb_id);
+ return offline_and_remove_memory(nid, addr, memory_block_size_bytes());
+}
+
+/*
+ * Trigger the workqueue so the device can perform its magic.
+ */
+static void virtio_mem_retry(struct virtio_mem *vm)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vm->removal_lock, flags);
+ if (!vm->removing)
+ queue_work(system_freezable_wq, &vm->wq);
+ spin_unlock_irqrestore(&vm->removal_lock, flags);
+}
+
+static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
+{
+ int node = NUMA_NO_NODE;
+
+#if defined(CONFIG_ACPI_NUMA)
+ if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
+ node = pxm_to_node(node_id);
+#endif
+ return node;
+}
+
+/*
+ * Test if a virtio-mem device overlaps with the given range. Can be called
+ * from (notifier) callbacks lockless.
+ */
+static bool virtio_mem_overlaps_range(struct virtio_mem *vm,
+ unsigned long start, unsigned long size)
+{
+ unsigned long dev_start = virtio_mem_mb_id_to_phys(vm->first_mb_id);
+ unsigned long dev_end = virtio_mem_mb_id_to_phys(vm->last_mb_id) +
+ memory_block_size_bytes();
+
+ return start < dev_end && dev_start < start + size;
+}
+
+/*
+ * Test if a virtio-mem device owns a memory block. Can be called from
+ * (notifier) callbacks lockless.
+ */
+static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id)
+{
+ return mb_id >= vm->first_mb_id && mb_id <= vm->last_mb_id;
+}
+
+static int virtio_mem_notify_going_online(struct virtio_mem *vm,
+ unsigned long mb_id,
+ enum zone_type zone)
+{
+ switch (virtio_mem_mb_get_state(vm, mb_id)) {
+ case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
+ /*
+ * We won't allow to online a partially plugged memory block
+ * to the MOVABLE zone - it would contain unmovable parts.
+ */
+ if (zone == ZONE_MOVABLE) {
+ dev_warn_ratelimited(&vm->vdev->dev,
+ "memory block has holes, MOVABLE not supported\n");
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_OK;
+ case VIRTIO_MEM_MB_STATE_OFFLINE:
+ return NOTIFY_OK;
+ default:
+ break;
+ }
+ dev_warn_ratelimited(&vm->vdev->dev,
+ "memory block onlining denied\n");
+ return NOTIFY_BAD;
+}
+
+static void virtio_mem_notify_offline(struct virtio_mem *vm,
+ unsigned long mb_id)
+{
+ switch (virtio_mem_mb_get_state(vm, mb_id)) {
+ case VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL:
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+ break;
+ case VIRTIO_MEM_MB_STATE_ONLINE:
+ case VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE:
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_OFFLINE);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ /*
+ * Trigger the workqueue, maybe we can now unplug memory. Also,
+ * when we offline and remove a memory block, this will re-trigger
+ * us immediately - which is often nice because the removal of
+ * the memory block (e.g., memmap) might have freed up memory
+ * on other memory blocks we manage.
+ */
+ virtio_mem_retry(vm);
+}
+
+static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id,
+ enum zone_type zone)
+{
+ unsigned long nb_offline;
+
+ switch (virtio_mem_mb_get_state(vm, mb_id)) {
+ case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
+ BUG_ON(zone == ZONE_MOVABLE);
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
+ break;
+ case VIRTIO_MEM_MB_STATE_OFFLINE:
+ if (zone == ZONE_MOVABLE)
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE);
+ else
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_ONLINE);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
+ vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
+
+ /* see if we can add new blocks now that we onlined one block */
+ if (nb_offline == VIRTIO_MEM_NB_OFFLINE_THRESHOLD - 1)
+ virtio_mem_retry(vm);
+}
+
+static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
+ unsigned long mb_id)
+{
+ const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
+ struct page *page;
+ unsigned long pfn;
+ int sb_id, i;
+
+ for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
+ if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ continue;
+ /*
+ * Drop our reference to the pages so the memory can get
+ * offlined and add the unplugged pages to the managed
+ * page counters (so offlining code can correctly subtract
+ * them again).
+ */
+ pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
+ sb_id * vm->subblock_size);
+ adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
+ for (i = 0; i < nr_pages; i++) {
+ page = pfn_to_page(pfn + i);
+ if (WARN_ON(!page_ref_dec_and_test(page)))
+ dump_page(page, "unplugged page referenced");
+ }
+ }
+}
+
+static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
+ unsigned long mb_id)
+{
+ const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
+ unsigned long pfn;
+ int sb_id, i;
+
+ for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
+ if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ continue;
+ /*
+ * Get the reference we dropped when going offline and
+ * subtract the unplugged pages from the managed page
+ * counters.
+ */
+ pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
+ sb_id * vm->subblock_size);
+ adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
+ for (i = 0; i < nr_pages; i++)
+ page_ref_inc(pfn_to_page(pfn + i));
+ }
+}
+
+/*
+ * This callback will either be called synchronously from add_memory() or
+ * asynchronously (e.g., triggered via user space). We have to be careful
+ * with locking when calling add_memory().
+ */
+static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
+ unsigned long action, void *arg)
+{
+ struct virtio_mem *vm = container_of(nb, struct virtio_mem,
+ memory_notifier);
+ struct memory_notify *mhp = arg;
+ const unsigned long start = PFN_PHYS(mhp->start_pfn);
+ const unsigned long size = PFN_PHYS(mhp->nr_pages);
+ const unsigned long mb_id = virtio_mem_phys_to_mb_id(start);
+ enum zone_type zone;
+ int rc = NOTIFY_OK;
+
+ if (!virtio_mem_overlaps_range(vm, start, size))
+ return NOTIFY_DONE;
+
+ /*
+ * Memory is onlined/offlined in memory block granularity. We cannot
+ * cross virtio-mem device boundaries and memory block boundaries. Bail
+ * out if this ever changes.
+ */
+ if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
+ !IS_ALIGNED(start, memory_block_size_bytes())))
+ return NOTIFY_BAD;
+
+ /*
+ * Avoid circular locking lockdep warnings. We lock the mutex
+ * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
+ * blocking_notifier_call_chain() has it's own lock, which gets unlocked
+ * between both notifier calls and will bail out. False positive.
+ */
+ lockdep_off();
+
+ switch (action) {
+ case MEM_GOING_OFFLINE:
+ mutex_lock(&vm->hotplug_mutex);
+ if (vm->removing) {
+ rc = notifier_from_errno(-EBUSY);
+ mutex_unlock(&vm->hotplug_mutex);
+ break;
+ }
+ vm->hotplug_active = true;
+ virtio_mem_notify_going_offline(vm, mb_id);
+ break;
+ case MEM_GOING_ONLINE:
+ mutex_lock(&vm->hotplug_mutex);
+ if (vm->removing) {
+ rc = notifier_from_errno(-EBUSY);
+ mutex_unlock(&vm->hotplug_mutex);
+ break;
+ }
+ vm->hotplug_active = true;
+ zone = page_zonenum(pfn_to_page(mhp->start_pfn));
+ rc = virtio_mem_notify_going_online(vm, mb_id, zone);
+ break;
+ case MEM_OFFLINE:
+ virtio_mem_notify_offline(vm, mb_id);
+ vm->hotplug_active = false;
+ mutex_unlock(&vm->hotplug_mutex);
+ break;
+ case MEM_ONLINE:
+ zone = page_zonenum(pfn_to_page(mhp->start_pfn));
+ virtio_mem_notify_online(vm, mb_id, zone);
+ vm->hotplug_active = false;
+ mutex_unlock(&vm->hotplug_mutex);
+ break;
+ case MEM_CANCEL_OFFLINE:
+ if (!vm->hotplug_active)
+ break;
+ virtio_mem_notify_cancel_offline(vm, mb_id);
+ vm->hotplug_active = false;
+ mutex_unlock(&vm->hotplug_mutex);
+ break;
+ case MEM_CANCEL_ONLINE:
+ if (!vm->hotplug_active)
+ break;
+ vm->hotplug_active = false;
+ mutex_unlock(&vm->hotplug_mutex);
+ break;
+ default:
+ break;
+ }
+
+ lockdep_on();
+
+ return rc;
+}
+
+/*
+ * Set a range of pages PG_offline. Remember pages that were never onlined
+ * (via generic_online_page()) using PageDirty().
+ */
+static void virtio_mem_set_fake_offline(unsigned long pfn,
+ unsigned int nr_pages, bool onlined)
+{
+ for (; nr_pages--; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+
+ __SetPageOffline(page);
+ if (!onlined) {
+ SetPageDirty(page);
+ /* FIXME: remove after cleanups */
+ ClearPageReserved(page);
+ }
+ }
+}
+
+/*
+ * Clear PG_offline from a range of pages. If the pages were never onlined,
+ * (via generic_online_page()), clear PageDirty().
+ */
+static void virtio_mem_clear_fake_offline(unsigned long pfn,
+ unsigned int nr_pages, bool onlined)
+{
+ for (; nr_pages--; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+
+ __ClearPageOffline(page);
+ if (!onlined)
+ ClearPageDirty(page);
+ }
+}
+
+/*
+ * Release a range of fake-offline pages to the buddy, effectively
+ * fake-onlining them.
+ */
+static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages)
+{
+ const int order = MAX_ORDER - 1;
+ int i;
+
+ /*
+ * We are always called with subblock granularity, which is at least
+ * aligned to MAX_ORDER - 1.
+ */
+ for (i = 0; i < nr_pages; i += 1 << order) {
+ struct page *page = pfn_to_page(pfn + i);
+
+ /*
+ * If the page is PageDirty(), it was kept fake-offline when
+ * onlining the memory block. Otherwise, it was allocated
+ * using alloc_contig_range(). All pages in a subblock are
+ * alike.
+ */
+ if (PageDirty(page)) {
+ virtio_mem_clear_fake_offline(pfn + i, 1 << order,
+ false);
+ generic_online_page(page, order);
+ } else {
+ virtio_mem_clear_fake_offline(pfn + i, 1 << order,
+ true);
+ free_contig_range(pfn + i, 1 << order);
+ adjust_managed_page_count(page, 1 << order);
+ }
+ }
+}
+
+static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
+{
+ const unsigned long addr = page_to_phys(page);
+ const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
+ struct virtio_mem *vm;
+ int sb_id;
+
+ /*
+ * We exploit here that subblocks have at least MAX_ORDER - 1
+ * size/alignment and that this callback is is called with such a
+ * size/alignment. So we cannot cross subblocks and therefore
+ * also not memory blocks.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
+ if (!virtio_mem_owned_mb(vm, mb_id))
+ continue;
+
+ sb_id = virtio_mem_phys_to_sb_id(vm, addr);
+ /*
+ * If plugged, online the pages, otherwise, set them fake
+ * offline (PageOffline).
+ */
+ if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ generic_online_page(page, order);
+ else
+ virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
+ false);
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ /* not virtio-mem memory, but e.g., a DIMM. online it */
+ generic_online_page(page, order);
+}
+
+static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
+ const struct virtio_mem_req *req)
+{
+ struct scatterlist *sgs[2], sg_req, sg_resp;
+ unsigned int len;
+ int rc;
+
+ /* don't use the request residing on the stack (vaddr) */
+ vm->req = *req;
+
+ /* out: buffer for request */
+ sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
+ sgs[0] = &sg_req;
+
+ /* in: buffer for response */
+ sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
+ sgs[1] = &sg_resp;
+
+ rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
+ if (rc < 0)
+ return rc;
+
+ virtqueue_kick(vm->vq);
+
+ /* wait for a response */
+ wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
+
+ return virtio16_to_cpu(vm->vdev, vm->resp.type);
+}
+
+static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
+ uint64_t size)
+{
+ const uint64_t nb_vm_blocks = size / vm->device_block_size;
+ const struct virtio_mem_req req = {
+ .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
+ .u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
+ .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
+ };
+
+ if (atomic_read(&vm->config_changed))
+ return -EAGAIN;
+
+ switch (virtio_mem_send_request(vm, &req)) {
+ case VIRTIO_MEM_RESP_ACK:
+ vm->plugged_size += size;
+ return 0;
+ case VIRTIO_MEM_RESP_NACK:
+ return -EAGAIN;
+ case VIRTIO_MEM_RESP_BUSY:
+ return -ETXTBSY;
+ case VIRTIO_MEM_RESP_ERROR:
+ return -EINVAL;
+ default:
+ return -ENOMEM;
+ }
+}
+
+static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
+ uint64_t size)
+{
+ const uint64_t nb_vm_blocks = size / vm->device_block_size;
+ const struct virtio_mem_req req = {
+ .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
+ .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
+ .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
+ };
+
+ if (atomic_read(&vm->config_changed))
+ return -EAGAIN;
+
+ switch (virtio_mem_send_request(vm, &req)) {
+ case VIRTIO_MEM_RESP_ACK:
+ vm->plugged_size -= size;
+ return 0;
+ case VIRTIO_MEM_RESP_BUSY:
+ return -ETXTBSY;
+ case VIRTIO_MEM_RESP_ERROR:
+ return -EINVAL;
+ default:
+ return -ENOMEM;
+ }
+}
+
+static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
+{
+ const struct virtio_mem_req req = {
+ .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
+ };
+
+ switch (virtio_mem_send_request(vm, &req)) {
+ case VIRTIO_MEM_RESP_ACK:
+ vm->unplug_all_required = false;
+ vm->plugged_size = 0;
+ /* usable region might have shrunk */
+ atomic_set(&vm->config_changed, 1);
+ return 0;
+ case VIRTIO_MEM_RESP_BUSY:
+ return -ETXTBSY;
+ default:
+ return -ENOMEM;
+ }
+}
+
+/*
+ * Plug selected subblocks. Updates the plugged state, but not the state
+ * of the memory block.
+ */
+static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
+ int sb_id, int count)
+{
+ const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
+ sb_id * vm->subblock_size;
+ const uint64_t size = count * vm->subblock_size;
+ int rc;
+
+ dev_dbg(&vm->vdev->dev, "plugging memory block: %lu : %i - %i\n", mb_id,
+ sb_id, sb_id + count - 1);
+
+ rc = virtio_mem_send_plug_request(vm, addr, size);
+ if (!rc)
+ virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count);
+ return rc;
+}
+
+/*
+ * Unplug selected subblocks. Updates the plugged state, but not the state
+ * of the memory block.
+ */
+static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
+ int sb_id, int count)
+{
+ const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
+ sb_id * vm->subblock_size;
+ const uint64_t size = count * vm->subblock_size;
+ int rc;
+
+ dev_dbg(&vm->vdev->dev, "unplugging memory block: %lu : %i - %i\n",
+ mb_id, sb_id, sb_id + count - 1);
+
+ rc = virtio_mem_send_unplug_request(vm, addr, size);
+ if (!rc)
+ virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count);
+ return rc;
+}
+
+/*
+ * Unplug the desired number of plugged subblocks of a offline or not-added
+ * memory block. Will fail if any subblock cannot get unplugged (instead of
+ * skipping it).
+ *
+ * Will not modify the state of the memory block.
+ *
+ * Note: can fail after some subblocks were unplugged.
+ */
+static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
+ unsigned long mb_id, uint64_t *nb_sb)
+{
+ int sb_id, count;
+ int rc;
+
+ sb_id = vm->nb_sb_per_mb - 1;
+ while (*nb_sb) {
+ /* Find the next candidate subblock */
+ while (sb_id >= 0 &&
+ virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1))
+ sb_id--;
+ if (sb_id < 0)
+ break;
+ /* Try to unplug multiple subblocks at a time */
+ count = 1;
+ while (count < *nb_sb && sb_id > 0 &&
+ virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
+ count++;
+ sb_id--;
+ }
+
+ rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
+ if (rc)
+ return rc;
+ *nb_sb -= count;
+ sb_id--;
+ }
+
+ return 0;
+}
+
+/*
+ * Unplug all plugged subblocks of an offline or not-added memory block.
+ *
+ * Will not modify the state of the memory block.
+ *
+ * Note: can fail after some subblocks were unplugged.
+ */
+static int virtio_mem_mb_unplug(struct virtio_mem *vm, unsigned long mb_id)
+{
+ uint64_t nb_sb = vm->nb_sb_per_mb;
+
+ return virtio_mem_mb_unplug_any_sb(vm, mb_id, &nb_sb);
+}
+
+/*
+ * Prepare tracking data for the next memory block.
+ */
+static int virtio_mem_prepare_next_mb(struct virtio_mem *vm,
+ unsigned long *mb_id)
+{
+ int rc;
+
+ if (vm->next_mb_id > vm->last_usable_mb_id)
+ return -ENOSPC;
+
+ /* Resize the state array if required. */
+ rc = virtio_mem_mb_state_prepare_next_mb(vm);
+ if (rc)
+ return rc;
+
+ /* Resize the subblock bitmap if required. */
+ rc = virtio_mem_sb_bitmap_prepare_next_mb(vm);
+ if (rc)
+ return rc;
+
+ vm->nb_mb_state[VIRTIO_MEM_MB_STATE_UNUSED]++;
+ *mb_id = vm->next_mb_id++;
+ return 0;
+}
+
+/*
+ * Don't add too many blocks that are not onlined yet to avoid running OOM.
+ */
+static bool virtio_mem_too_many_mb_offline(struct virtio_mem *vm)
+{
+ unsigned long nb_offline;
+
+ nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
+ vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
+ return nb_offline >= VIRTIO_MEM_NB_OFFLINE_THRESHOLD;
+}
+
+/*
+ * Try to plug the desired number of subblocks and add the memory block
+ * to Linux.
+ *
+ * Will modify the state of the memory block.
+ */
+static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
+ unsigned long mb_id,
+ uint64_t *nb_sb)
+{
+ const int count = min_t(int, *nb_sb, vm->nb_sb_per_mb);
+ int rc, rc2;
+
+ if (WARN_ON_ONCE(!count))
+ return -EINVAL;
+
+ /*
+ * Plug the requested number of subblocks before adding it to linux,
+ * so that onlining will directly online all plugged subblocks.
+ */
+ rc = virtio_mem_mb_plug_sb(vm, mb_id, 0, count);
+ if (rc)
+ return rc;
+
+ /*
+ * Mark the block properly offline before adding it to Linux,
+ * so the memory notifiers will find the block in the right state.
+ */
+ if (count == vm->nb_sb_per_mb)
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_OFFLINE);
+ else
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+
+ /* Add the memory block to linux - if that fails, try to unplug. */
+ rc = virtio_mem_mb_add(vm, mb_id);
+ if (rc) {
+ enum virtio_mem_mb_state new_state = VIRTIO_MEM_MB_STATE_UNUSED;
+
+ dev_err(&vm->vdev->dev,
+ "adding memory block %lu failed with %d\n", mb_id, rc);
+ rc2 = virtio_mem_mb_unplug_sb(vm, mb_id, 0, count);
+
+ /*
+ * TODO: Linux MM does not properly clean up yet in all cases
+ * where adding of memory failed - especially on -ENOMEM.
+ */
+ if (rc2)
+ new_state = VIRTIO_MEM_MB_STATE_PLUGGED;
+ virtio_mem_mb_set_state(vm, mb_id, new_state);
+ return rc;
+ }
+
+ *nb_sb -= count;
+ return 0;
+}
+
+/*
+ * Try to plug the desired number of subblocks of a memory block that
+ * is already added to Linux.
+ *
+ * Will modify the state of the memory block.
+ *
+ * Note: Can fail after some subblocks were successfully plugged.
+ */
+static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
+ uint64_t *nb_sb, bool online)
+{
+ unsigned long pfn, nr_pages;
+ int sb_id, count;
+ int rc;
+
+ if (WARN_ON_ONCE(!*nb_sb))
+ return -EINVAL;
+
+ while (*nb_sb) {
+ sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id);
+ if (sb_id >= vm->nb_sb_per_mb)
+ break;
+ count = 1;
+ while (count < *nb_sb &&
+ sb_id + count < vm->nb_sb_per_mb &&
+ !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count,
+ 1))
+ count++;
+
+ rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count);
+ if (rc)
+ return rc;
+ *nb_sb -= count;
+ if (!online)
+ continue;
+
+ /* fake-online the pages if the memory block is online */
+ pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
+ sb_id * vm->subblock_size);
+ nr_pages = PFN_DOWN(count * vm->subblock_size);
+ virtio_mem_fake_online(pfn, nr_pages);
+ }
+
+ if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+ if (online)
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_ONLINE);
+ else
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_OFFLINE);
+ }
+
+ return rc;
+}
+
+/*
+ * Try to plug the requested amount of memory.
+ */
+static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
+{
+ uint64_t nb_sb = diff / vm->subblock_size;
+ unsigned long mb_id;
+ int rc;
+
+ if (!nb_sb)
+ return 0;
+
+ /* Don't race with onlining/offlining */
+ mutex_lock(&vm->hotplug_mutex);
+
+ /* Try to plug subblocks of partially plugged online blocks. */
+ virtio_mem_for_each_mb_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
+ rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, true);
+ if (rc || !nb_sb)
+ goto out_unlock;
+ cond_resched();
+ }
+
+ /* Try to plug subblocks of partially plugged offline blocks. */
+ virtio_mem_for_each_mb_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
+ rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, false);
+ if (rc || !nb_sb)
+ goto out_unlock;
+ cond_resched();
+ }
+
+ /*
+ * We won't be working on online/offline memory blocks from this point,
+ * so we can't race with memory onlining/offlining. Drop the mutex.
+ */
+ mutex_unlock(&vm->hotplug_mutex);
+
+ /* Try to plug and add unused blocks */
+ virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED) {
+ if (virtio_mem_too_many_mb_offline(vm))
+ return -ENOSPC;
+
+ rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
+ if (rc || !nb_sb)
+ return rc;
+ cond_resched();
+ }
+
+ /* Try to prepare, plug and add new blocks */
+ while (nb_sb) {
+ if (virtio_mem_too_many_mb_offline(vm))
+ return -ENOSPC;
+
+ rc = virtio_mem_prepare_next_mb(vm, &mb_id);
+ if (rc)
+ return rc;
+ rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
+ if (rc)
+ return rc;
+ cond_resched();
+ }
+
+ return 0;
+out_unlock:
+ mutex_unlock(&vm->hotplug_mutex);
+ return rc;
+}
+
+/*
+ * Unplug the desired number of plugged subblocks of an offline memory block.
+ * Will fail if any subblock cannot get unplugged (instead of skipping it).
+ *
+ * Will modify the state of the memory block. Might temporarily drop the
+ * hotplug_mutex.
+ *
+ * Note: Can fail after some subblocks were successfully unplugged.
+ */
+static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
+ unsigned long mb_id,
+ uint64_t *nb_sb)
+{
+ int rc;
+
+ rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb);
+
+ /* some subblocks might have been unplugged even on failure */
+ if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb))
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+ if (rc)
+ return rc;
+
+ if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+ /*
+ * Remove the block from Linux - this should never fail.
+ * Hinder the block from getting onlined by marking it
+ * unplugged. Temporarily drop the mutex, so
+ * any pending GOING_ONLINE requests can be serviced/rejected.
+ */
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_UNUSED);
+
+ mutex_unlock(&vm->hotplug_mutex);
+ rc = virtio_mem_mb_remove(vm, mb_id);
+ BUG_ON(rc);
+ mutex_lock(&vm->hotplug_mutex);
+ }
+ return 0;
+}
+
+/*
+ * Unplug the given plugged subblocks of an online memory block.
+ *
+ * Will modify the state of the memory block.
+ */
+static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
+{
+ const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count;
+ unsigned long start_pfn;
+ int rc;
+
+ start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
+ sb_id * vm->subblock_size);
+ rc = alloc_contig_range(start_pfn, start_pfn + nr_pages,
+ MIGRATE_MOVABLE, GFP_KERNEL);
+ if (rc == -ENOMEM)
+ /* whoops, out of memory */
+ return rc;
+ if (rc)
+ return -EBUSY;
+
+ /* Mark it as fake-offline before unplugging it */
+ virtio_mem_set_fake_offline(start_pfn, nr_pages, true);
+ adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
+
+ /* Try to unplug the allocated memory */
+ rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
+ if (rc) {
+ /* Return the memory to the buddy. */
+ virtio_mem_fake_online(start_pfn, nr_pages);
+ return rc;
+ }
+
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
+ return 0;
+}
+
+/*
+ * Unplug the desired number of plugged subblocks of an online memory block.
+ * Will skip subblock that are busy.
+ *
+ * Will modify the state of the memory block. Might temporarily drop the
+ * hotplug_mutex.
+ *
+ * Note: Can fail after some subblocks were successfully unplugged. Can
+ * return 0 even if subblocks were busy and could not get unplugged.
+ */
+static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
+ unsigned long mb_id,
+ uint64_t *nb_sb)
+{
+ int rc, sb_id;
+
+ /* If possible, try to unplug the complete block in one shot. */
+ if (*nb_sb >= vm->nb_sb_per_mb &&
+ virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+ rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0,
+ vm->nb_sb_per_mb);
+ if (!rc) {
+ *nb_sb -= vm->nb_sb_per_mb;
+ goto unplugged;
+ } else if (rc != -EBUSY)
+ return rc;
+ }
+
+ /* Fallback to single subblocks. */
+ for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
+ /* Find the next candidate subblock */
+ while (sb_id >= 0 &&
+ !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ sb_id--;
+ if (sb_id < 0)
+ break;
+
+ rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, sb_id, 1);
+ if (rc == -EBUSY)
+ continue;
+ else if (rc)
+ return rc;
+ *nb_sb -= 1;
+ }
+
+unplugged:
+ /*
+ * Once all subblocks of a memory block were unplugged, offline and
+ * remove it. This will usually not fail, as no memory is in use
+ * anymore - however some other notifiers might NACK the request.
+ */
+ if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+ mutex_unlock(&vm->hotplug_mutex);
+ rc = virtio_mem_mb_offline_and_remove(vm, mb_id);
+ mutex_lock(&vm->hotplug_mutex);
+ if (!rc)
+ virtio_mem_mb_set_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_UNUSED);
+ }
+
+ return 0;
+}
+
+/*
+ * Try to unplug the requested amount of memory.
+ */
+static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
+{
+ uint64_t nb_sb = diff / vm->subblock_size;
+ unsigned long mb_id;
+ int rc;
+
+ if (!nb_sb)
+ return 0;
+
+ /*
+ * We'll drop the mutex a couple of times when it is safe to do so.
+ * This might result in some blocks switching the state (online/offline)
+ * and we could miss them in this run - we will retry again later.
+ */
+ mutex_lock(&vm->hotplug_mutex);
+
+ /* Try to unplug subblocks of partially plugged offline blocks. */
+ virtio_mem_for_each_mb_state_rev(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
+ rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
+ &nb_sb);
+ if (rc || !nb_sb)
+ goto out_unlock;
+ cond_resched();
+ }
+
+ /* Try to unplug subblocks of plugged offline blocks. */
+ virtio_mem_for_each_mb_state_rev(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_OFFLINE) {
+ rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
+ &nb_sb);
+ if (rc || !nb_sb)
+ goto out_unlock;
+ cond_resched();
+ }
+
+ if (!unplug_online) {
+ mutex_unlock(&vm->hotplug_mutex);
+ return 0;
+ }
+
+ /* Try to unplug subblocks of partially plugged online blocks. */
+ virtio_mem_for_each_mb_state_rev(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
+ rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
+ &nb_sb);
+ if (rc || !nb_sb)
+ goto out_unlock;
+ mutex_unlock(&vm->hotplug_mutex);
+ cond_resched();
+ mutex_lock(&vm->hotplug_mutex);
+ }
+
+ /* Try to unplug subblocks of plugged online blocks. */
+ virtio_mem_for_each_mb_state_rev(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_ONLINE) {
+ rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
+ &nb_sb);
+ if (rc || !nb_sb)
+ goto out_unlock;
+ mutex_unlock(&vm->hotplug_mutex);
+ cond_resched();
+ mutex_lock(&vm->hotplug_mutex);
+ }
+
+ mutex_unlock(&vm->hotplug_mutex);
+ return nb_sb ? -EBUSY : 0;
+out_unlock:
+ mutex_unlock(&vm->hotplug_mutex);
+ return rc;
+}
+
+/*
+ * Try to unplug all blocks that couldn't be unplugged before, for example,
+ * because the hypervisor was busy.
+ */
+static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
+{
+ unsigned long mb_id;
+ int rc;
+
+ virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_PLUGGED) {
+ rc = virtio_mem_mb_unplug(vm, mb_id);
+ if (rc)
+ return rc;
+ virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
+ }
+
+ return 0;
+}
+
+/*
+ * Update all parts of the config that could have changed.
+ */
+static void virtio_mem_refresh_config(struct virtio_mem *vm)
+{
+ const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
+ uint64_t new_plugged_size, usable_region_size, end_addr;
+
+ /* the plugged_size is just a reflection of what _we_ did previously */
+ virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
+ &new_plugged_size);
+ if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
+ vm->plugged_size = new_plugged_size;
+
+ /* calculate the last usable memory block id */
+ virtio_cread(vm->vdev, struct virtio_mem_config,
+ usable_region_size, &usable_region_size);
+ end_addr = vm->addr + usable_region_size;
+ end_addr = min(end_addr, phys_limit);
+ vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
+
+ /* see if there is a request to change the size */
+ virtio_cread(vm->vdev, struct virtio_mem_config, requested_size,
+ &vm->requested_size);
+
+ dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
+ dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
+}
+
+/*
+ * Workqueue function for handling plug/unplug requests and config updates.
+ */
+static void virtio_mem_run_wq(struct work_struct *work)
+{
+ struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
+ uint64_t diff;
+ int rc;
+
+ hrtimer_cancel(&vm->retry_timer);
+
+ if (vm->broken)
+ return;
+
+retry:
+ rc = 0;
+
+ /* Make sure we start with a clean state if there are leftovers. */
+ if (unlikely(vm->unplug_all_required))
+ rc = virtio_mem_send_unplug_all_request(vm);
+
+ if (atomic_read(&vm->config_changed)) {
+ atomic_set(&vm->config_changed, 0);
+ virtio_mem_refresh_config(vm);
+ }
+
+ /* Unplug any leftovers from previous runs */
+ if (!rc)
+ rc = virtio_mem_unplug_pending_mb(vm);
+
+ if (!rc && vm->requested_size != vm->plugged_size) {
+ if (vm->requested_size > vm->plugged_size) {
+ diff = vm->requested_size - vm->plugged_size;
+ rc = virtio_mem_plug_request(vm, diff);
+ } else {
+ diff = vm->plugged_size - vm->requested_size;
+ rc = virtio_mem_unplug_request(vm, diff);
+ }
+ }
+
+ switch (rc) {
+ case 0:
+ vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
+ break;
+ case -ENOSPC:
+ /*
+ * We cannot add any more memory (alignment, physical limit)
+ * or we have too many offline memory blocks.
+ */
+ break;
+ case -ETXTBSY:
+ /*
+ * The hypervisor cannot process our request right now
+ * (e.g., out of memory, migrating);
+ */
+ case -EBUSY:
+ /*
+ * We cannot free up any memory to unplug it (all plugged memory
+ * is busy).
+ */
+ case -ENOMEM:
+ /* Out of memory, try again later. */
+ hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
+ HRTIMER_MODE_REL);
+ break;
+ case -EAGAIN:
+ /* Retry immediately (e.g., the config changed). */
+ goto retry;
+ default:
+ /* Unknown error, mark as broken */
+ dev_err(&vm->vdev->dev,
+ "unknown error, marking device broken: %d\n", rc);
+ vm->broken = true;
+ }
+}
+
+static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
+{
+ struct virtio_mem *vm = container_of(timer, struct virtio_mem,
+ retry_timer);
+
+ virtio_mem_retry(vm);
+ vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
+ VIRTIO_MEM_RETRY_TIMER_MAX_MS);
+ return HRTIMER_NORESTART;
+}
+
+static void virtio_mem_handle_response(struct virtqueue *vq)
+{
+ struct virtio_mem *vm = vq->vdev->priv;
+
+ wake_up(&vm->host_resp);
+}
+
+static int virtio_mem_init_vq(struct virtio_mem *vm)
+{
+ struct virtqueue *vq;
+
+ vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
+ "guest-request");
+ if (IS_ERR(vq))
+ return PTR_ERR(vq);
+ vm->vq = vq;
+
+ return 0;
+}
+
+static int virtio_mem_init(struct virtio_mem *vm)
+{
+ const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
+ uint16_t node_id;
+
+ if (!vm->vdev->config->get) {
+ dev_err(&vm->vdev->dev, "config access disabled\n");
+ return -EINVAL;
+ }
+
+ /*
+ * We don't want to (un)plug or reuse any memory when in kdump. The
+ * memory is still accessible (but not mapped).
+ */
+ if (is_kdump_kernel()) {
+ dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
+ return -EBUSY;
+ }
+
+ /* Fetch all properties that can't change. */
+ virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
+ &vm->plugged_size);
+ virtio_cread(vm->vdev, struct virtio_mem_config, block_size,
+ &vm->device_block_size);
+ virtio_cread(vm->vdev, struct virtio_mem_config, node_id,
+ &node_id);
+ vm->nid = virtio_mem_translate_node_id(vm, node_id);
+ virtio_cread(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
+ virtio_cread(vm->vdev, struct virtio_mem_config, region_size,
+ &vm->region_size);
+
+ /*
+ * We always hotplug memory in memory block granularity. This way,
+ * we have to wait for exactly one memory block to online.
+ */
+ if (vm->device_block_size > memory_block_size_bytes()) {
+ dev_err(&vm->vdev->dev,
+ "The block size is not supported (too big).\n");
+ return -EINVAL;
+ }
+
+ /* bad device setup - warn only */
+ if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
+ dev_warn(&vm->vdev->dev,
+ "The alignment of the physical start address can make some memory unusable.\n");
+ if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
+ dev_warn(&vm->vdev->dev,
+ "The alignment of the physical end address can make some memory unusable.\n");
+ if (vm->addr + vm->region_size > phys_limit)
+ dev_warn(&vm->vdev->dev,
+ "Some memory is not addressable. This can make some memory unusable.\n");
+
+ /*
+ * Calculate the subblock size:
+ * - At least MAX_ORDER - 1 / pageblock_order.
+ * - At least the device block size.
+ * In the worst case, a single subblock per memory block.
+ */
+ vm->subblock_size = PAGE_SIZE * 1ul << max_t(uint32_t, MAX_ORDER - 1,
+ pageblock_order);
+ vm->subblock_size = max_t(uint64_t, vm->device_block_size,
+ vm->subblock_size);
+ vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size;
+
+ /* Round up to the next full memory block */
+ vm->first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 +
+ memory_block_size_bytes());
+ vm->next_mb_id = vm->first_mb_id;
+ vm->last_mb_id = virtio_mem_phys_to_mb_id(vm->addr +
+ vm->region_size) - 1;
+
+ dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
+ dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
+ dev_info(&vm->vdev->dev, "device block size: 0x%llx",
+ (unsigned long long)vm->device_block_size);
+ dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
+ memory_block_size_bytes());
+ dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
+ (unsigned long long)vm->subblock_size);
+ if (vm->nid != NUMA_NO_NODE)
+ dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
+
+ return 0;
+}
+
+static int virtio_mem_create_resource(struct virtio_mem *vm)
+{
+ /*
+ * When force-unloading the driver and removing the device, we
+ * could have a garbage pointer. Duplicate the string.
+ */
+ const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
+
+ if (!name)
+ return -ENOMEM;
+
+ vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
+ name, IORESOURCE_SYSTEM_RAM);
+ if (!vm->parent_resource) {
+ kfree(name);
+ dev_warn(&vm->vdev->dev, "could not reserve device region\n");
+ dev_info(&vm->vdev->dev,
+ "reloading the driver is not supported\n");
+ return -EBUSY;
+ }
+
+ /* The memory is not actually busy - make add_memory() work. */
+ vm->parent_resource->flags &= ~IORESOURCE_BUSY;
+ return 0;
+}
+
+static void virtio_mem_delete_resource(struct virtio_mem *vm)
+{
+ const char *name;
+
+ if (!vm->parent_resource)
+ return;
+
+ name = vm->parent_resource->name;
+ release_resource(vm->parent_resource);
+ kfree(vm->parent_resource);
+ kfree(name);
+ vm->parent_resource = NULL;
+}
+
+static int virtio_mem_probe(struct virtio_device *vdev)
+{
+ struct virtio_mem *vm;
+ int rc;
+
+ BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
+ BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
+
+ vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return -ENOMEM;
+
+ init_waitqueue_head(&vm->host_resp);
+ vm->vdev = vdev;
+ INIT_WORK(&vm->wq, virtio_mem_run_wq);
+ mutex_init(&vm->hotplug_mutex);
+ INIT_LIST_HEAD(&vm->next);
+ spin_lock_init(&vm->removal_lock);
+ hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ vm->retry_timer.function = virtio_mem_timer_expired;
+ vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
+
+ /* register the virtqueue */
+ rc = virtio_mem_init_vq(vm);
+ if (rc)
+ goto out_free_vm;
+
+ /* initialize the device by querying the config */
+ rc = virtio_mem_init(vm);
+ if (rc)
+ goto out_del_vq;
+
+ /* create the parent resource for all memory */
+ rc = virtio_mem_create_resource(vm);
+ if (rc)
+ goto out_del_vq;
+
+ /*
+ * If we still have memory plugged, we have to unplug all memory first.
+ * Registering our parent resource makes sure that this memory isn't
+ * actually in use (e.g., trying to reload the driver).
+ */
+ if (vm->plugged_size) {
+ vm->unplug_all_required = 1;
+ dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
+ }
+
+ /* register callbacks */
+ vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
+ rc = register_memory_notifier(&vm->memory_notifier);
+ if (rc)
+ goto out_del_resource;
+ rc = register_virtio_mem_device(vm);
+ if (rc)
+ goto out_unreg_mem;
+
+ virtio_device_ready(vdev);
+
+ /* trigger a config update to start processing the requested_size */
+ atomic_set(&vm->config_changed, 1);
+ queue_work(system_freezable_wq, &vm->wq);
+
+ return 0;
+out_unreg_mem:
+ unregister_memory_notifier(&vm->memory_notifier);
+out_del_resource:
+ virtio_mem_delete_resource(vm);
+out_del_vq:
+ vdev->config->del_vqs(vdev);
+out_free_vm:
+ kfree(vm);
+ vdev->priv = NULL;
+
+ return rc;
+}
+
+static void virtio_mem_remove(struct virtio_device *vdev)
+{
+ struct virtio_mem *vm = vdev->priv;
+ unsigned long mb_id;
+ int rc;
+
+ /*
+ * Make sure the workqueue won't be triggered anymore and no memory
+ * blocks can be onlined/offlined until we're finished here.
+ */
+ mutex_lock(&vm->hotplug_mutex);
+ spin_lock_irq(&vm->removal_lock);
+ vm->removing = true;
+ spin_unlock_irq(&vm->removal_lock);
+ mutex_unlock(&vm->hotplug_mutex);
+
+ /* wait until the workqueue stopped */
+ cancel_work_sync(&vm->wq);
+ hrtimer_cancel(&vm->retry_timer);
+
+ /*
+ * After we unregistered our callbacks, user space can online partially
+ * plugged offline blocks. Make sure to remove them.
+ */
+ virtio_mem_for_each_mb_state(vm, mb_id,
+ VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
+ rc = virtio_mem_mb_remove(vm, mb_id);
+ BUG_ON(rc);
+ virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
+ }
+ /*
+ * After we unregistered our callbacks, user space can no longer
+ * offline partially plugged online memory blocks. No need to worry
+ * about them.
+ */
+
+ /* unregister callbacks */
+ unregister_virtio_mem_device(vm);
+ unregister_memory_notifier(&vm->memory_notifier);
+
+ /*
+ * There is no way we could reliably remove all memory we have added to
+ * the system. And there is no way to stop the driver/device from going
+ * away. Warn at least.
+ */
+ if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] ||
+ vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
+ vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
+ vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] ||
+ vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE])
+ dev_warn(&vdev->dev, "device still has system memory added\n");
+ else
+ virtio_mem_delete_resource(vm);
+
+ /* remove all tracking data - no locking needed */
+ vfree(vm->mb_state);
+ vfree(vm->sb_bitmap);
+
+ /* reset the device and cleanup the queues */
+ vdev->config->reset(vdev);
+ vdev->config->del_vqs(vdev);
+
+ kfree(vm);
+ vdev->priv = NULL;
+}
+
+static void virtio_mem_config_changed(struct virtio_device *vdev)
+{
+ struct virtio_mem *vm = vdev->priv;
+
+ atomic_set(&vm->config_changed, 1);
+ virtio_mem_retry(vm);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtio_mem_freeze(struct virtio_device *vdev)
+{
+ /*
+ * When restarting the VM, all memory is usually unplugged. Don't
+ * allow to suspend/hibernate.
+ */
+ dev_err(&vdev->dev, "save/restore not supported.\n");
+ return -EPERM;
+}
+
+static int virtio_mem_restore(struct virtio_device *vdev)
+{
+ return -EPERM;
+}
+#endif
+
+static unsigned int virtio_mem_features[] = {
+#if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
+ VIRTIO_MEM_F_ACPI_PXM,
+#endif
+};
+
+static struct virtio_device_id virtio_mem_id_table[] = {
+ { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_mem_driver = {
+ .feature_table = virtio_mem_features,
+ .feature_table_size = ARRAY_SIZE(virtio_mem_features),
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = virtio_mem_id_table,
+ .probe = virtio_mem_probe,
+ .remove = virtio_mem_remove,
+ .config_changed = virtio_mem_config_changed,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtio_mem_freeze,
+ .restore = virtio_mem_restore,
+#endif
+};
+
+module_virtio_driver(virtio_mem_driver);
+MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
+MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
+MODULE_DESCRIPTION("Virtio-mem driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 97d5725fd9a2..9d16aaffca9d 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -466,10 +466,8 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
int irq = platform_get_irq(vm_dev->pdev, 0);
int i, err, queue_idx = 0;
- if (irq < 0) {
- dev_err(&vdev->dev, "Cannot get IRQ resource\n");
+ if (irq < 0)
return irq;
- }
err = request_irq(irq, vm_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vm_dev);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 7abcc50838b8..db93cedd262f 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#define VIRTIO_PCI_NO_LEGACY
+#define VIRTIO_RING_NO_LEGACY
#include "virtio_pci_common.h"
/*
diff --git a/drivers/visorbus/controlvmchannel.h b/drivers/visorbus/controlvmchannel.h
index 8c57562a070a..c87213554427 100644
--- a/drivers/visorbus/controlvmchannel.h
+++ b/drivers/visorbus/controlvmchannel.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
diff --git a/drivers/visorbus/vbuschannel.h b/drivers/visorbus/vbuschannel.h
index b1dce26166bf..4aaf6564eb9f 100644
--- a/drivers/visorbus/vbuschannel.h
+++ b/drivers/visorbus/vbuschannel.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
diff --git a/drivers/visorbus/visorbus_private.h b/drivers/visorbus/visorbus_private.h
index 366380b7f8d9..6956de605827 100644
--- a/drivers/visorbus/visorbus_private.h
+++ b/drivers/visorbus/visorbus_private.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index aa09f8527776..bf2ec59c1f9d 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -54,10 +54,10 @@ MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
struct hdq_data {
struct device *dev;
void __iomem *hdq_base;
- /* lock status update */
+ /* lock read/write/break operations */
struct mutex hdq_mutex;
+ /* interrupt status and a lock for it */
u8 hdq_irqstatus;
- /* device lock */
spinlock_t hdq_spinlock;
/* mode: 0-HDQ 1-W1 */
int mode;
@@ -120,13 +120,18 @@ static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
}
/* Clear saved irqstatus after using an interrupt */
-static void hdq_reset_irqstatus(struct hdq_data *hdq_data)
+static u8 hdq_reset_irqstatus(struct hdq_data *hdq_data, u8 bits)
{
unsigned long irqflags;
+ u8 status;
spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
- hdq_data->hdq_irqstatus = 0;
+ status = hdq_data->hdq_irqstatus;
+ /* this is a read-modify-write */
+ hdq_data->hdq_irqstatus &= ~bits;
spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
+
+ return status;
}
/* write out a byte and fill *status with HDQ_INT_STATUS */
@@ -135,6 +140,16 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
int ret;
u8 tmp_status;
+ ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
+ if (ret < 0) {
+ ret = -EINTR;
+ goto rtn;
+ }
+
+ if (hdq_data->hdq_irqstatus)
+ dev_err(hdq_data->dev, "TX irqstatus not cleared (%02x)\n",
+ hdq_data->hdq_irqstatus);
+
*status = 0;
hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
@@ -144,18 +159,19 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
/* wait for the TXCOMPLETE bit */
ret = wait_event_timeout(hdq_wait_queue,
- hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
+ (hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
+ OMAP_HDQ_TIMEOUT);
+ *status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
if (ret == 0) {
dev_dbg(hdq_data->dev, "TX wait elapsed\n");
ret = -ETIMEDOUT;
goto out;
}
- *status = hdq_data->hdq_irqstatus;
/* check irqstatus */
if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
dev_dbg(hdq_data->dev, "timeout waiting for"
- " TXCOMPLETE/RXCOMPLETE, %x", *status);
+ " TXCOMPLETE/RXCOMPLETE, %x\n", *status);
ret = -ETIMEDOUT;
goto out;
}
@@ -166,11 +182,12 @@ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
OMAP_HDQ_FLAG_CLEAR, &tmp_status);
if (ret) {
dev_dbg(hdq_data->dev, "timeout waiting GO bit"
- " return to zero, %x", tmp_status);
+ " return to zero, %x\n", tmp_status);
}
out:
- hdq_reset_irqstatus(hdq_data);
+ mutex_unlock(&hdq_data->hdq_mutex);
+rtn:
return ret;
}
@@ -181,9 +198,9 @@ static irqreturn_t hdq_isr(int irq, void *_hdq)
unsigned long irqflags;
spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
- hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
+ hdq_data->hdq_irqstatus |= hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
- dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
+ dev_dbg(hdq_data->dev, "hdq_isr: %x\n", hdq_data->hdq_irqstatus);
if (hdq_data->hdq_irqstatus &
(OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
@@ -230,6 +247,10 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
goto rtn;
}
+ if (hdq_data->hdq_irqstatus)
+ dev_err(hdq_data->dev, "break irqstatus not cleared (%02x)\n",
+ hdq_data->hdq_irqstatus);
+
/* set the INIT and GO bit */
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
@@ -238,18 +259,19 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
/* wait for the TIMEOUT bit */
ret = wait_event_timeout(hdq_wait_queue,
- hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
+ (hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_TIMEOUT),
+ OMAP_HDQ_TIMEOUT);
+ tmp_status = hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TIMEOUT);
if (ret == 0) {
dev_dbg(hdq_data->dev, "break wait elapsed\n");
ret = -EINTR;
goto out;
}
- tmp_status = hdq_data->hdq_irqstatus;
/* check irqstatus */
if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
- dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
- tmp_status);
+ dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x\n",
+ tmp_status);
ret = -ETIMEDOUT;
goto out;
}
@@ -275,10 +297,9 @@ static int omap_hdq_break(struct hdq_data *hdq_data)
&tmp_status);
if (ret)
dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
- " return to zero, %x", tmp_status);
+ " return to zero, %x\n", tmp_status);
out:
- hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
return ret;
@@ -309,12 +330,15 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
*/
wait_event_timeout(hdq_wait_queue,
(hdq_data->hdq_irqstatus
- & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
+ & (OMAP_HDQ_INT_STATUS_RXCOMPLETE |
+ OMAP_HDQ_INT_STATUS_TIMEOUT)),
OMAP_HDQ_TIMEOUT);
-
+ status = hdq_reset_irqstatus(hdq_data,
+ OMAP_HDQ_INT_STATUS_RXCOMPLETE |
+ OMAP_HDQ_INT_STATUS_TIMEOUT);
hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
OMAP_HDQ_CTRL_STATUS_DIR);
- status = hdq_data->hdq_irqstatus;
+
/* check irqstatus */
if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
dev_dbg(hdq_data->dev, "timeout waiting for"
@@ -322,11 +346,12 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
ret = -ETIMEDOUT;
goto out;
}
+ } else { /* interrupt had occurred before hdq_read_byte was called */
+ hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
}
/* the data is ready. Read it in! */
*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
out:
- hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
return ret;
@@ -367,15 +392,15 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
(hdq_data->hdq_irqstatus
& OMAP_HDQ_INT_STATUS_RXCOMPLETE),
OMAP_HDQ_TIMEOUT);
+ /* Must clear irqstatus for another RXCOMPLETE interrupt */
+ hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
+
if (err == 0) {
dev_dbg(hdq_data->dev, "RX wait elapsed\n");
goto out;
}
id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
- /* Must clear irqstatus for another RXCOMPLETE interrupt */
- hdq_reset_irqstatus(hdq_data);
-
/* read comp_bit */
hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
@@ -383,6 +408,9 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
(hdq_data->hdq_irqstatus
& OMAP_HDQ_INT_STATUS_RXCOMPLETE),
OMAP_HDQ_TIMEOUT);
+ /* Must clear irqstatus for another RXCOMPLETE interrupt */
+ hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_RXCOMPLETE);
+
if (err == 0) {
dev_dbg(hdq_data->dev, "RX wait elapsed\n");
goto out;
@@ -409,6 +437,9 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
(hdq_data->hdq_irqstatus
& OMAP_HDQ_INT_STATUS_TXCOMPLETE),
OMAP_HDQ_TIMEOUT);
+ /* Must clear irqstatus for another TXCOMPLETE interrupt */
+ hdq_reset_irqstatus(hdq_data, OMAP_HDQ_INT_STATUS_TXCOMPLETE);
+
if (err == 0) {
dev_dbg(hdq_data->dev, "TX wait elapsed\n");
goto out;
@@ -418,7 +449,6 @@ static u8 omap_w1_triplet(void *_hdq, u8 bdir)
OMAP_HDQ_CTRL_STATUS_SINGLE);
out:
- hdq_reset_irqstatus(hdq_data);
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
pm_runtime_mark_last_busy(hdq_data->dev);
@@ -464,7 +494,7 @@ static u8 omap_w1_read_byte(void *_hdq)
ret = hdq_read_byte(hdq_data, &val);
if (ret)
- ret = -1;
+ val = -1;
pm_runtime_mark_last_busy(hdq_data->dev);
pm_runtime_put_autosuspend(hdq_data->dev);
diff --git a/drivers/w1/slaves/w1_ds2430.c b/drivers/w1/slaves/w1_ds2430.c
index 6fb0563fb2ae..75bb8a88620b 100644
--- a/drivers/w1/slaves/w1_ds2430.c
+++ b/drivers/w1/slaves/w1_ds2430.c
@@ -290,6 +290,6 @@ static struct w1_family w1_family_14 = {
module_w1_family(w1_family_14);
MODULE_AUTHOR("Angelo Dureghello <angelo.dureghello@timesys.com>");
-MODULE_DESCRIPTION("w1 family 14 driver for DS2430, 256kb EEPROM");
+MODULE_DESCRIPTION("w1 family 14 driver for DS2430, 256b EEPROM");
MODULE_LICENSE("GPL");
MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2430));
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index e028e0092799..c1b4eda16719 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
+#include <linux/string.h>
#include <linux/w1.h>
@@ -25,7 +26,8 @@
#define W1_THERM_DS1825 0x3B
#define W1_THERM_DS28EA00 0x42
-/* Allow the strong pullup to be disabled, but default to enabled.
+/*
+ * Allow the strong pullup to be disabled, but default to enabled.
* If it was disabled a parasite powered device might not get the require
* current to do a temperature conversion. If it is enabled parasite powered
* devices have a better chance of getting the current required.
@@ -41,42 +43,211 @@
static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
+/* Counter for devices supporting bulk reading */
+static u16 bulk_read_device_counter; /* =0 as per C standard */
+
+/* This command should be in public header w1.h but is not */
+#define W1_RECALL_EEPROM 0xB8
+
+/* Nb of try for an operation */
+#define W1_THERM_MAX_TRY 5
+
+/* ms delay to retry bus mutex */
+#define W1_THERM_RETRY_DELAY 20
+
+/* delay in ms to write in EEPROM */
+#define W1_THERM_EEPROM_WRITE_DELAY 10
+
+#define EEPROM_CMD_WRITE "save" /* cmd for write eeprom sysfs */
+#define EEPROM_CMD_READ "restore" /* cmd for read eeprom sysfs */
+#define BULK_TRIGGER_CMD "trigger" /* cmd to trigger a bulk read */
+
+#define MIN_TEMP -55 /* min temperature that can be mesured */
+#define MAX_TEMP 125 /* max temperature that can be mesured */
+
+/* Helpers Macros */
+
+/*
+ * return a pointer on the slave w1_therm_family_converter struct:
+ * always test family data existence before using this macro
+ */
+#define SLAVE_SPECIFIC_FUNC(sl) \
+ (((struct w1_therm_family_data *)(sl->family_data))->specific_functions)
+
+/*
+ * return the power mode of the sl slave : 1-ext, 0-parasite, <0 unknown
+ * always test family data existence before using this macro
+ */
+#define SLAVE_POWERMODE(sl) \
+ (((struct w1_therm_family_data *)(sl->family_data))->external_powered)
+
+/*
+ * return the resolution in bit of the sl slave : <0 unknown
+ * always test family data existence before using this macro
+ */
+#define SLAVE_RESOLUTION(sl) \
+ (((struct w1_therm_family_data *)(sl->family_data))->resolution)
+
+/*
+ * return whether or not a converT command has been issued to the slave
+ * * 0: no bulk read is pending
+ * * -1: conversion is in progress
+ * * 1: conversion done, result to be read
+ */
+#define SLAVE_CONVERT_TRIGGERED(sl) \
+ (((struct w1_therm_family_data *)(sl->family_data))->convert_triggered)
+
+/* return the address of the refcnt in the family data */
+#define THERM_REFCNT(family_data) \
+ (&((struct w1_therm_family_data *)family_data)->refcnt)
+
+/* Structs definition */
+
+/**
+ * struct w1_therm_family_converter - bind device specific functions
+ * @broken: flag for non-registred families
+ * @reserved: not used here
+ * @f: pointer to the device binding structure
+ * @convert: pointer to the device conversion function
+ * @get_conversion_time: pointer to the device conversion time function
+ * @set_resolution: pointer to the device set_resolution function
+ * @get_resolution: pointer to the device get_resolution function
+ * @write_data: pointer to the device writing function (2 or 3 bytes)
+ * @bulk_read: true if device family support bulk read, false otherwise
+ */
+struct w1_therm_family_converter {
+ u8 broken;
+ u16 reserved;
+ struct w1_family *f;
+ int (*convert)(u8 rom[9]);
+ int (*get_conversion_time)(struct w1_slave *sl);
+ int (*set_resolution)(struct w1_slave *sl, int val);
+ int (*get_resolution)(struct w1_slave *sl);
+ int (*write_data)(struct w1_slave *sl, const u8 *data);
+ bool bulk_read;
+};
+
+/**
+ * struct w1_therm_family_data - device data
+ * @rom: ROM device id (64bit Lasered ROM code + 1 CRC byte)
+ * @refcnt: ref count
+ * @external_powered: 1 device powered externally,
+ * 0 device parasite powered,
+ * -x error or undefined
+ * @resolution: current device resolution
+ * @convert_triggered: conversion state of the device
+ * @specific_functions: pointer to struct of device specific function
+ */
struct w1_therm_family_data {
uint8_t rom[9];
atomic_t refcnt;
+ int external_powered;
+ int resolution;
+ int convert_triggered;
+ struct w1_therm_family_converter *specific_functions;
};
+/**
+ * struct therm_info - store temperature reading
+ * @rom: read device data (8 data bytes + 1 CRC byte)
+ * @crc: computed crc from rom
+ * @verdict: 1 crc checked, 0 crc not matching
+ */
struct therm_info {
u8 rom[9];
u8 crc;
u8 verdict;
};
-/* return the address of the refcnt in the family data */
-#define THERM_REFCNT(family_data) \
- (&((struct w1_therm_family_data *)family_data)->refcnt)
+/* Hardware Functions declaration */
-static int w1_therm_add_slave(struct w1_slave *sl)
-{
- sl->family_data = kzalloc(sizeof(struct w1_therm_family_data),
- GFP_KERNEL);
- if (!sl->family_data)
- return -ENOMEM;
- atomic_set(THERM_REFCNT(sl->family_data), 1);
- return 0;
-}
+/**
+ * reset_select_slave() - reset and select a slave
+ * @sl: the slave to select
+ *
+ * Resets the bus and select the slave by sending a ROM MATCH cmd
+ * w1_reset_select_slave() from w1_io.c could not be used here because
+ * it sent a SKIP ROM command if only one device is on the line.
+ * At the beginning of the such process, sl->master->slave_count is 1 even if
+ * more devices are on the line, causing collision on the line.
+ *
+ * Context: The w1 master lock must be held.
+ *
+ * Return: 0 if success, negative kernel error code otherwise.
+ */
+static int reset_select_slave(struct w1_slave *sl);
-static void w1_therm_remove_slave(struct w1_slave *sl)
-{
- int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data));
+/**
+ * convert_t() - Query the device for temperature conversion and read
+ * @sl: pointer to the slave to read
+ * @info: pointer to a structure to store the read results
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int convert_t(struct w1_slave *sl, struct therm_info *info);
- while (refcnt) {
- msleep(1000);
- refcnt = atomic_read(THERM_REFCNT(sl->family_data));
- }
- kfree(sl->family_data);
- sl->family_data = NULL;
-}
+/**
+ * read_scratchpad() - read the data in device RAM
+ * @sl: pointer to the slave to read
+ * @info: pointer to a structure to store the read results
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int read_scratchpad(struct w1_slave *sl, struct therm_info *info);
+
+/**
+ * write_scratchpad() - write nb_bytes in the device RAM
+ * @sl: pointer to the slave to write in
+ * @data: pointer to an array of 3 bytes, as 3 bytes MUST be written
+ * @nb_bytes: number of bytes to be written (2 for DS18S20, 3 otherwise)
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int write_scratchpad(struct w1_slave *sl, const u8 *data, u8 nb_bytes);
+
+/**
+ * copy_scratchpad() - Copy the content of scratchpad in device EEPROM
+ * @sl: slave involved
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int copy_scratchpad(struct w1_slave *sl);
+
+/**
+ * recall_eeprom() - Restore EEPROM data to device RAM
+ * @sl: slave involved
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int recall_eeprom(struct w1_slave *sl);
+
+/**
+ * read_powermode() - Query the power mode of the slave
+ * @sl: slave to retrieve the power mode
+ *
+ * Ask the device to get its power mode (external or parasite)
+ * and store the power status in the &struct w1_therm_family_data.
+ *
+ * Return:
+ * * 0 parasite powered device
+ * * 1 externally powered device
+ * * <0 kernel error code
+ */
+static int read_powermode(struct w1_slave *sl);
+
+/**
+ * trigger_bulk_read() - function to trigger a bulk read on the bus
+ * @dev_master: the device master of the bus
+ *
+ * Send a SKIP ROM follow by a CONVERT T commmand on the bus.
+ * It also set the status flag in each slave &struct w1_therm_family_data
+ * to signal that a conversion is in progress.
+ *
+ * Return: 0 if success, -kernel error code otherwise
+ */
+static int trigger_bulk_read(struct w1_master *dev_master);
+
+/* Sysfs interface declaration */
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf);
@@ -87,21 +258,103 @@ static ssize_t w1_slave_store(struct device *device,
static ssize_t w1_seq_show(struct device *device,
struct device_attribute *attr, char *buf);
+static ssize_t temperature_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t ext_power_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t resolution_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t resolution_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size);
+
+static ssize_t eeprom_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size);
+
+static ssize_t alarms_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size);
+
+static ssize_t alarms_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t therm_bulk_read_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size);
+
+static ssize_t therm_bulk_read_show(struct device *device,
+ struct device_attribute *attr, char *buf);
+
+/* Attributes declarations */
+
static DEVICE_ATTR_RW(w1_slave);
static DEVICE_ATTR_RO(w1_seq);
+static DEVICE_ATTR_RO(temperature);
+static DEVICE_ATTR_RO(ext_power);
+static DEVICE_ATTR_RW(resolution);
+static DEVICE_ATTR_WO(eeprom);
+static DEVICE_ATTR_RW(alarms);
+
+static DEVICE_ATTR_RW(therm_bulk_read); /* attribut at master level */
+
+/* Interface Functions declaration */
+
+/**
+ * w1_therm_add_slave() - Called when a new slave is discovered
+ * @sl: slave just discovered by the master.
+ *
+ * Called by the master when the slave is discovered on the bus. Used to
+ * initialize slave state before the beginning of any communication.
+ *
+ * Return: 0 - If success, negative kernel code otherwise
+ */
+static int w1_therm_add_slave(struct w1_slave *sl);
+
+/**
+ * w1_therm_remove_slave() - Called when a slave is removed
+ * @sl: slave to be removed.
+ *
+ * Called by the master when the slave is considered not to be on the bus
+ * anymore. Used to free memory.
+ */
+static void w1_therm_remove_slave(struct w1_slave *sl);
+
+/* Family attributes */
static struct attribute *w1_therm_attrs[] = {
&dev_attr_w1_slave.attr,
+ &dev_attr_temperature.attr,
+ &dev_attr_ext_power.attr,
+ &dev_attr_resolution.attr,
+ &dev_attr_eeprom.attr,
+ &dev_attr_alarms.attr,
+ NULL,
+};
+
+static struct attribute *w1_ds18s20_attrs[] = {
+ &dev_attr_w1_slave.attr,
+ &dev_attr_temperature.attr,
+ &dev_attr_ext_power.attr,
+ &dev_attr_eeprom.attr,
+ &dev_attr_alarms.attr,
NULL,
};
static struct attribute *w1_ds28ea00_attrs[] = {
&dev_attr_w1_slave.attr,
&dev_attr_w1_seq.attr,
+ &dev_attr_temperature.attr,
+ &dev_attr_ext_power.attr,
+ &dev_attr_resolution.attr,
+ &dev_attr_eeprom.attr,
+ &dev_attr_alarms.attr,
NULL,
};
+/* Attribute groups */
+
ATTRIBUTE_GROUPS(w1_therm);
+ATTRIBUTE_GROUPS(w1_ds18s20);
ATTRIBUTE_GROUPS(w1_ds28ea00);
#if IS_REACHABLE(CONFIG_HWMON)
@@ -154,6 +407,8 @@ static const struct hwmon_chip_info w1_chip_info = {
#define W1_CHIPINFO NULL
#endif
+/* Family operations */
+
static struct w1_family_ops w1_therm_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
@@ -161,6 +416,13 @@ static struct w1_family_ops w1_therm_fops = {
.chip_info = W1_CHIPINFO,
};
+static struct w1_family_ops w1_ds18s20_fops = {
+ .add_slave = w1_therm_add_slave,
+ .remove_slave = w1_therm_remove_slave,
+ .groups = w1_ds18s20_groups,
+ .chip_info = W1_CHIPINFO,
+};
+
static struct w1_family_ops w1_ds28ea00_fops = {
.add_slave = w1_therm_add_slave,
.remove_slave = w1_therm_remove_slave,
@@ -168,9 +430,11 @@ static struct w1_family_ops w1_ds28ea00_fops = {
.chip_info = W1_CHIPINFO,
};
+/* Family binding operations struct */
+
static struct w1_family w1_therm_family_DS18S20 = {
.fid = W1_THERM_DS18S20,
- .fops = &w1_therm_fops,
+ .fops = &w1_ds18s20_fops,
};
static struct w1_family w1_therm_family_DS18B20 = {
@@ -193,377 +457,813 @@ static struct w1_family w1_therm_family_DS1825 = {
.fops = &w1_therm_fops,
};
-struct w1_therm_family_converter {
- u8 broken;
- u16 reserved;
- struct w1_family *f;
- int (*convert)(u8 rom[9]);
- int (*precision)(struct device *device, int val);
- int (*eeprom)(struct device *device);
-};
+/* Device dependent func */
+
+static inline int w1_DS18B20_convert_time(struct w1_slave *sl)
+{
+ int ret;
+
+ if (!sl->family_data)
+ return -ENODEV; /* device unknown */
+
+ /* return time in ms for conversion operation */
+ switch (SLAVE_RESOLUTION(sl)) {
+ case 9:
+ ret = 95;
+ break;
+ case 10:
+ ret = 190;
+ break;
+ case 11:
+ ret = 375;
+ break;
+ case 12:
+ default:
+ ret = 750;
+ }
+ return ret;
+}
+
+static inline int w1_DS18S20_convert_time(struct w1_slave *sl)
+{
+ (void)(sl);
+ return 750; /* always 750ms for DS18S20 */
+}
+
+static inline int w1_DS18B20_write_data(struct w1_slave *sl,
+ const u8 *data)
+{
+ return write_scratchpad(sl, data, 3);
+}
+
+static inline int w1_DS18S20_write_data(struct w1_slave *sl,
+ const u8 *data)
+{
+ /* No config register */
+ return write_scratchpad(sl, data, 2);
+}
+
+static inline int w1_DS18B20_set_resolution(struct w1_slave *sl, int val)
+{
+ int ret;
+ u8 new_config_register[3]; /* array of data to be written */
+ struct therm_info info;
+
+ /* resolution of DS18B20 is in the range [9..12] bits */
+ if (val < 9 || val > 12)
+ return -EINVAL;
+
+ val -= 9; /* soustract 9 the lowest resolution in bit */
+ val = (val << 5); /* shift to position bit 5 & bit 6 */
+
+ /*
+ * Read the scratchpad to change only the required bits
+ * (bit5 & bit 6 from byte 4)
+ */
+ ret = read_scratchpad(sl, &info);
+ if (!ret) {
+ new_config_register[0] = info.rom[2];
+ new_config_register[1] = info.rom[3];
+ /* config register is byte 4 & mask 0b10011111*/
+ new_config_register[2] = (info.rom[4] & 0x9F) |
+ (u8) val;
+ } else
+ return ret;
+
+ /* Write data in the device RAM */
+ ret = w1_DS18B20_write_data(sl, new_config_register);
+
+ return ret;
+}
+
+static inline int w1_DS18B20_get_resolution(struct w1_slave *sl)
+{
+ int ret;
+ u8 config_register;
+ struct therm_info info;
+
+ ret = read_scratchpad(sl, &info);
+
+ if (!ret) {
+ config_register = info.rom[4]; /* config register is byte 4 */
+ config_register &= 0x60; /* 0b01100000 keep only bit 5 & 6 */
+ config_register = (config_register >> 5); /* shift */
+ config_register += 9; /* add 9 the lowest resolution in bit */
+ ret = (int) config_register;
+ }
+ return ret;
+}
-/* write configuration to eeprom */
-static inline int w1_therm_eeprom(struct device *device);
+/**
+ * w1_DS18B20_convert_temp() - temperature computation for DS18B20
+ * @rom: data read from device RAM (8 data bytes + 1 CRC byte)
+ *
+ * Can be called for any DS18B20 compliant device.
+ *
+ * Return: value in millidegrees Celsius.
+ */
+static inline int w1_DS18B20_convert_temp(u8 rom[9])
+{
+ s16 t = le16_to_cpup((__le16 *)rom);
-/* Set precision for conversion */
-static inline int w1_DS18B20_precision(struct device *device, int val);
-static inline int w1_DS18S20_precision(struct device *device, int val);
+ return t*1000/16;
+}
-/* The return value is millidegrees Centigrade. */
-static inline int w1_DS18B20_convert_temp(u8 rom[9]);
-static inline int w1_DS18S20_convert_temp(u8 rom[9]);
+/**
+ * w1_DS18S20_convert_temp() - temperature computation for DS18S20
+ * @rom: data read from device RAM (8 data bytes + 1 CRC byte)
+ *
+ * Can be called for any DS18S20 compliant device.
+ *
+ * Return: value in millidegrees Celsius.
+ */
+static inline int w1_DS18S20_convert_temp(u8 rom[9])
+{
+ int t, h;
+
+ if (!rom[7]) {
+ pr_debug("%s: Invalid argument for conversion\n", __func__);
+ return 0;
+ }
+
+ if (rom[1] == 0)
+ t = ((s32)rom[0] >> 1)*1000;
+ else
+ t = 1000*(-1*(s32)(0x100-rom[0]) >> 1);
+
+ t -= 250;
+ h = 1000*((s32)rom[7] - (s32)rom[6]);
+ h /= (s32)rom[7];
+ t += h;
+
+ return t;
+}
+
+/* Device capability description */
static struct w1_therm_family_converter w1_therm_families[] = {
{
- .f = &w1_therm_family_DS18S20,
- .convert = w1_DS18S20_convert_temp,
- .precision = w1_DS18S20_precision,
- .eeprom = w1_therm_eeprom
+ .f = &w1_therm_family_DS18S20,
+ .convert = w1_DS18S20_convert_temp,
+ .get_conversion_time = w1_DS18S20_convert_time,
+ .set_resolution = NULL, /* no config register */
+ .get_resolution = NULL, /* no config register */
+ .write_data = w1_DS18S20_write_data,
+ .bulk_read = true
},
{
- .f = &w1_therm_family_DS1822,
- .convert = w1_DS18B20_convert_temp,
- .precision = w1_DS18S20_precision,
- .eeprom = w1_therm_eeprom
+ .f = &w1_therm_family_DS1822,
+ .convert = w1_DS18B20_convert_temp,
+ .get_conversion_time = w1_DS18B20_convert_time,
+ .set_resolution = w1_DS18B20_set_resolution,
+ .get_resolution = w1_DS18B20_get_resolution,
+ .write_data = w1_DS18B20_write_data,
+ .bulk_read = true
},
{
- .f = &w1_therm_family_DS18B20,
- .convert = w1_DS18B20_convert_temp,
- .precision = w1_DS18B20_precision,
- .eeprom = w1_therm_eeprom
+ .f = &w1_therm_family_DS18B20,
+ .convert = w1_DS18B20_convert_temp,
+ .get_conversion_time = w1_DS18B20_convert_time,
+ .set_resolution = w1_DS18B20_set_resolution,
+ .get_resolution = w1_DS18B20_get_resolution,
+ .write_data = w1_DS18B20_write_data,
+ .bulk_read = true
},
{
- .f = &w1_therm_family_DS28EA00,
- .convert = w1_DS18B20_convert_temp,
- .precision = w1_DS18S20_precision,
- .eeprom = w1_therm_eeprom
+ .f = &w1_therm_family_DS28EA00,
+ .convert = w1_DS18B20_convert_temp,
+ .get_conversion_time = w1_DS18B20_convert_time,
+ .set_resolution = w1_DS18B20_set_resolution,
+ .get_resolution = w1_DS18B20_get_resolution,
+ .write_data = w1_DS18B20_write_data,
+ .bulk_read = false
},
{
- .f = &w1_therm_family_DS1825,
- .convert = w1_DS18B20_convert_temp,
- .precision = w1_DS18S20_precision,
- .eeprom = w1_therm_eeprom
+ .f = &w1_therm_family_DS1825,
+ .convert = w1_DS18B20_convert_temp,
+ .get_conversion_time = w1_DS18B20_convert_time,
+ .set_resolution = w1_DS18B20_set_resolution,
+ .get_resolution = w1_DS18B20_get_resolution,
+ .write_data = w1_DS18B20_write_data,
+ .bulk_read = true
}
};
-static inline int w1_therm_eeprom(struct device *device)
+/* Helpers Functions */
+
+/**
+ * device_family() - Retrieve a pointer on &struct w1_therm_family_converter
+ * @sl: slave to retrieve the device specific structure
+ *
+ * Return: pointer to the slaves's family converter, NULL if not known
+ */
+static struct w1_therm_family_converter *device_family(struct w1_slave *sl)
{
- struct w1_slave *sl = dev_to_w1_slave(device);
- struct w1_master *dev = sl->master;
- u8 rom[9], external_power;
- int ret, max_trying = 10;
- u8 *family_data = sl->family_data;
+ struct w1_therm_family_converter *ret = NULL;
+ int i;
- if (!sl->family_data) {
- ret = -ENODEV;
- goto error;
+ for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i) {
+ if (w1_therm_families[i].f->fid == sl->family->fid) {
+ ret = &w1_therm_families[i];
+ break;
+ }
}
+ return ret;
+}
- /* prevent the slave from going away in sleep */
- atomic_inc(THERM_REFCNT(family_data));
+/**
+ * bus_mutex_lock() - Acquire the mutex
+ * @lock: w1 bus mutex to acquire
+ *
+ * It try to acquire the mutex W1_THERM_MAX_TRY times and wait
+ * W1_THERM_RETRY_DELAY between 2 attempts.
+ *
+ * Return: true is mutex is acquired and lock, false otherwise
+ */
+static inline bool bus_mutex_lock(struct mutex *lock)
+{
+ int max_trying = W1_THERM_MAX_TRY;
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto dec_refcnt;
+ /* try to acquire the mutex, if not, sleep retry_delay before retry) */
+ while (mutex_lock_interruptible(lock) != 0 && max_trying > 0) {
+ unsigned long sleep_rem;
- memset(rom, 0, sizeof(rom));
+ sleep_rem = msleep_interruptible(W1_THERM_RETRY_DELAY);
+ if (!sleep_rem)
+ max_trying--;
+ }
- while (max_trying--) {
- if (!w1_reset_select_slave(sl)) {
- unsigned int tm = 10;
- unsigned long sleep_rem;
+ if (!max_trying)
+ return false; /* Didn't acquire the bus mutex */
+
+ return true;
+}
- /* check if in parasite mode */
- w1_write_8(dev, W1_READ_PSUPPLY);
- external_power = w1_read_8(dev);
+/**
+ * support_bulk_read() - check if slave support bulk read
+ * @sl: device to check the ability
+ *
+ * Return: true if bulk read is supported, false if not or error
+ */
+static inline bool bulk_read_support(struct w1_slave *sl)
+{
+ if (SLAVE_SPECIFIC_FUNC(sl))
+ return SLAVE_SPECIFIC_FUNC(sl)->bulk_read;
+
+ dev_info(&sl->dev,
+ "%s: Device not supported by the driver\n", __func__);
+
+ return false; /* No device family */
+}
+
+/**
+ * conversion_time() - get the Tconv for the slave
+ * @sl: device to get the conversion time
+ *
+ * On device supporting resolution settings, conversion time depend
+ * on the resolution setting. This helper function get the slave timing,
+ * depending on its current setting.
+ *
+ * Return: conversion time in ms, negative values are kernel error code
+ */
+static inline int conversion_time(struct w1_slave *sl)
+{
+ if (SLAVE_SPECIFIC_FUNC(sl))
+ return SLAVE_SPECIFIC_FUNC(sl)->get_conversion_time(sl);
+
+ dev_info(&sl->dev,
+ "%s: Device not supported by the driver\n", __func__);
+
+ return -ENODEV; /* No device family */
+}
+
+/**
+ * temperature_from_RAM() - Convert the read info to temperature
+ * @sl: device that sent the RAM data
+ * @rom: read value on the slave device RAM
+ *
+ * Device dependent, the function bind the correct computation method.
+ *
+ * Return: temperature in 1/1000degC, 0 on error.
+ */
+static inline int temperature_from_RAM(struct w1_slave *sl, u8 rom[9])
+{
+ if (SLAVE_SPECIFIC_FUNC(sl))
+ return SLAVE_SPECIFIC_FUNC(sl)->convert(rom);
- if (w1_reset_select_slave(sl))
- continue;
+ dev_info(&sl->dev,
+ "%s: Device not supported by the driver\n", __func__);
- /* 10ms strong pullup/delay after the copy command */
- if (w1_strong_pullup == 2 ||
- (!external_power && w1_strong_pullup))
- w1_next_pullup(dev, tm);
+ return 0; /* No device family */
+}
+
+/**
+ * int_to_short() - Safe casting of int to short
+ *
+ * @i: integer to be converted to short
+ *
+ * Device register use 1 byte to store signed integer.
+ * This helper function convert the int in a signed short,
+ * using the min/max values that device can measure as limits.
+ * min/max values are defined by macro.
+ *
+ * Return: a short in the range of min/max value
+ */
+static inline s8 int_to_short(int i)
+{
+ /* Prepare to cast to short by eliminating out of range values */
+ i = i > MAX_TEMP ? MAX_TEMP : i;
+ i = i < MIN_TEMP ? MIN_TEMP : i;
+ return (s8) i;
+}
- w1_write_8(dev, W1_COPY_SCRATCHPAD);
+/* Interface Functions */
- if (external_power) {
- mutex_unlock(&dev->bus_mutex);
+static int w1_therm_add_slave(struct w1_slave *sl)
+{
+ struct w1_therm_family_converter *sl_family_conv;
+
+ /* Allocate memory */
+ sl->family_data = kzalloc(sizeof(struct w1_therm_family_data),
+ GFP_KERNEL);
+ if (!sl->family_data)
+ return -ENOMEM;
+
+ atomic_set(THERM_REFCNT(sl->family_data), 1);
+
+ /* Get a pointer to the device specific function struct */
+ sl_family_conv = device_family(sl);
+ if (!sl_family_conv) {
+ kfree(sl->family_data);
+ return -ENODEV;
+ }
+ /* save this pointer to the device structure */
+ SLAVE_SPECIFIC_FUNC(sl) = sl_family_conv;
+
+ if (bulk_read_support(sl)) {
+ /*
+ * add the sys entry to trigger bulk_read
+ * at master level only the 1st time
+ */
+ if (!bulk_read_device_counter) {
+ int err = device_create_file(&sl->master->dev,
+ &dev_attr_therm_bulk_read);
+
+ if (err)
+ dev_warn(&sl->dev,
+ "%s: Device has been added, but bulk read is unavailable. err=%d\n",
+ __func__, err);
+ }
+ /* Increment the counter */
+ bulk_read_device_counter++;
+ }
+
+ /* Getting the power mode of the device {external, parasite} */
+ SLAVE_POWERMODE(sl) = read_powermode(sl);
+
+ if (SLAVE_POWERMODE(sl) < 0) {
+ /* no error returned as device has been added */
+ dev_warn(&sl->dev,
+ "%s: Device has been added, but power_mode may be corrupted. err=%d\n",
+ __func__, SLAVE_POWERMODE(sl));
+ }
+
+ /* Getting the resolution of the device */
+ if (SLAVE_SPECIFIC_FUNC(sl)->get_resolution) {
+ SLAVE_RESOLUTION(sl) =
+ SLAVE_SPECIFIC_FUNC(sl)->get_resolution(sl);
+ if (SLAVE_RESOLUTION(sl) < 0) {
+ /* no error returned as device has been added */
+ dev_warn(&sl->dev,
+ "%s:Device has been added, but resolution may be corrupted. err=%d\n",
+ __func__, SLAVE_RESOLUTION(sl));
+ }
+ }
+
+ /* Finally initialize convert_triggered flag */
+ SLAVE_CONVERT_TRIGGERED(sl) = 0;
+
+ return 0;
+}
+
+static void w1_therm_remove_slave(struct w1_slave *sl)
+{
+ int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data));
+
+ if (bulk_read_support(sl)) {
+ bulk_read_device_counter--;
+ /* Delete the entry if no more device support the feature */
+ if (!bulk_read_device_counter)
+ device_remove_file(&sl->master->dev,
+ &dev_attr_therm_bulk_read);
+ }
+
+ while (refcnt) {
+ msleep(1000);
+ refcnt = atomic_read(THERM_REFCNT(sl->family_data));
+ }
+ kfree(sl->family_data);
+ sl->family_data = NULL;
+}
+
+/* Hardware Functions */
+
+/* Safe version of reset_select_slave - avoid using the one in w_io.c */
+static int reset_select_slave(struct w1_slave *sl)
+{
+ u8 match[9] = { W1_MATCH_ROM, };
+ u64 rn = le64_to_cpu(*((u64 *)&sl->reg_num));
+
+ if (w1_reset_bus(sl->master))
+ return -ENODEV;
+
+ memcpy(&match[1], &rn, 8);
+ w1_write_block(sl->master, match, 9);
+
+ return 0;
+}
+
+static int convert_t(struct w1_slave *sl, struct therm_info *info)
+{
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int t_conv;
+ int ret = -ENODEV;
+ bool strong_pullup;
- sleep_rem = msleep_interruptible(tm);
+ if (!sl->family_data)
+ goto error;
+
+ strong_pullup = (w1_strong_pullup == 2 ||
+ (!SLAVE_POWERMODE(sl) &&
+ w1_strong_pullup));
+
+ /* get conversion duration device and id dependent */
+ t_conv = conversion_time(sl);
+
+ memset(info->rom, 0, sizeof(info->rom));
+
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(sl->family_data));
+
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
+ goto dec_refcnt;
+ }
+
+ while (max_trying-- && ret) { /* ret should be 0 */
+
+ info->verdict = 0;
+ info->crc = 0;
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+ unsigned long sleep_rem;
+
+ /* 750ms strong pullup (or delay) after the convert */
+ if (strong_pullup)
+ w1_next_pullup(dev_master, t_conv);
+
+ w1_write_8(dev_master, W1_CONVERT_TEMP);
+
+ if (strong_pullup) { /*some device need pullup */
+ sleep_rem = msleep_interruptible(t_conv);
if (sleep_rem != 0) {
ret = -EINTR;
- goto dec_refcnt;
+ goto mt_unlock;
}
+ mutex_unlock(&dev_master->bus_mutex);
+ } else { /*no device need pullup */
+ mutex_unlock(&dev_master->bus_mutex);
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto dec_refcnt;
- } else if (!w1_strong_pullup) {
- sleep_rem = msleep_interruptible(tm);
+ sleep_rem = msleep_interruptible(t_conv);
if (sleep_rem != 0) {
ret = -EINTR;
- goto mt_unlock;
+ goto dec_refcnt;
}
}
-
- break;
+ ret = read_scratchpad(sl, info);
+ goto dec_refcnt;
}
+
}
mt_unlock:
- mutex_unlock(&dev->bus_mutex);
+ mutex_unlock(&dev_master->bus_mutex);
dec_refcnt:
- atomic_dec(THERM_REFCNT(family_data));
+ atomic_dec(THERM_REFCNT(sl->family_data));
error:
return ret;
}
-/* DS18S20 does not feature configuration register */
-static inline int w1_DS18S20_precision(struct device *device, int val)
+static int read_scratchpad(struct w1_slave *sl, struct therm_info *info)
{
- return 0;
-}
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int ret = -ENODEV;
-static inline int w1_DS18B20_precision(struct device *device, int val)
-{
- struct w1_slave *sl = dev_to_w1_slave(device);
- struct w1_master *dev = sl->master;
- u8 rom[9], crc;
- int ret, max_trying = 10;
- u8 *family_data = sl->family_data;
- uint8_t precision_bits;
- uint8_t mask = 0x60;
+ info->verdict = 0;
- if (val > 12 || val < 9) {
- pr_warn("Unsupported precision\n");
- ret = -EINVAL;
+ if (!sl->family_data)
goto error;
- }
- if (!sl->family_data) {
- ret = -ENODEV;
- goto error;
- }
+ memset(info->rom, 0, sizeof(info->rom));
/* prevent the slave from going away in sleep */
- atomic_inc(THERM_REFCNT(family_data));
+ atomic_inc(THERM_REFCNT(sl->family_data));
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
goto dec_refcnt;
+ }
- memset(rom, 0, sizeof(rom));
+ while (max_trying-- && ret) { /* ret should be 0 */
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+ u8 nb_bytes_read;
+
+ w1_write_8(dev_master, W1_READ_SCRATCHPAD);
+
+ nb_bytes_read = w1_read_block(dev_master, info->rom, 9);
+ if (nb_bytes_read != 9) {
+ dev_warn(&sl->dev,
+ "w1_read_block(): returned %u instead of 9.\n",
+ nb_bytes_read);
+ ret = -EIO;
+ }
+
+ info->crc = w1_calc_crc8(info->rom, 8);
+
+ if (info->rom[8] == info->crc) {
+ info->verdict = 1;
+ ret = 0;
+ } else
+ ret = -EIO; /* CRC not checked */
+ }
- /* translate precision to bitmask (see datasheet page 9) */
- switch (val) {
- case 9:
- precision_bits = 0x00;
- break;
- case 10:
- precision_bits = 0x20;
- break;
- case 11:
- precision_bits = 0x40;
- break;
- case 12:
- default:
- precision_bits = 0x60;
- break;
}
+ mutex_unlock(&dev_master->bus_mutex);
- while (max_trying--) {
- crc = 0;
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(sl->family_data));
+error:
+ return ret;
+}
- if (!w1_reset_select_slave(sl)) {
- int count = 0;
+static int write_scratchpad(struct w1_slave *sl, const u8 *data, u8 nb_bytes)
+{
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int ret = -ENODEV;
- /* read values to only alter precision bits */
- w1_write_8(dev, W1_READ_SCRATCHPAD);
- count = w1_read_block(dev, rom, 9);
- if (count != 9)
- dev_warn(device, "w1_read_block() returned %u instead of 9.\n", count);
+ if (!sl->family_data)
+ goto error;
- crc = w1_calc_crc8(rom, 8);
- if (rom[8] == crc) {
- rom[4] = (rom[4] & ~mask) | (precision_bits & mask);
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(sl->family_data));
- if (!w1_reset_select_slave(sl)) {
- w1_write_8(dev, W1_WRITE_SCRATCHPAD);
- w1_write_8(dev, rom[2]);
- w1_write_8(dev, rom[3]);
- w1_write_8(dev, rom[4]);
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
+ goto dec_refcnt;
+ }
- break;
- }
- }
+ while (max_trying-- && ret) { /* ret should be 0 */
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+ w1_write_8(dev_master, W1_WRITE_SCRATCHPAD);
+ w1_write_block(dev_master, data, nb_bytes);
+ ret = 0;
}
}
+ mutex_unlock(&dev_master->bus_mutex);
- mutex_unlock(&dev->bus_mutex);
dec_refcnt:
- atomic_dec(THERM_REFCNT(family_data));
+ atomic_dec(THERM_REFCNT(sl->family_data));
error:
return ret;
}
-static inline int w1_DS18B20_convert_temp(u8 rom[9])
+static int copy_scratchpad(struct w1_slave *sl)
{
- s16 t = le16_to_cpup((__le16 *)rom);
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int t_write, ret = -ENODEV;
+ bool strong_pullup;
- return t*1000/16;
-}
+ if (!sl->family_data)
+ goto error;
-static inline int w1_DS18S20_convert_temp(u8 rom[9])
-{
- int t, h;
+ t_write = W1_THERM_EEPROM_WRITE_DELAY;
+ strong_pullup = (w1_strong_pullup == 2 ||
+ (!SLAVE_POWERMODE(sl) &&
+ w1_strong_pullup));
- if (!rom[7])
- return 0;
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(sl->family_data));
- if (rom[1] == 0)
- t = ((s32)rom[0] >> 1)*1000;
- else
- t = 1000*(-1*(s32)(0x100-rom[0]) >> 1);
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
+ goto dec_refcnt;
+ }
- t -= 250;
- h = 1000*((s32)rom[7] - (s32)rom[6]);
- h /= (s32)rom[7];
- t += h;
+ while (max_trying-- && ret) { /* ret should be 0 */
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+ unsigned long sleep_rem;
- return t;
-}
+ /* 10ms strong pullup (or delay) after the convert */
+ if (strong_pullup)
+ w1_next_pullup(dev_master, t_write);
-static inline int w1_convert_temp(u8 rom[9], u8 fid)
-{
- int i;
+ w1_write_8(dev_master, W1_COPY_SCRATCHPAD);
- for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i)
- if (w1_therm_families[i].f->fid == fid)
- return w1_therm_families[i].convert(rom);
+ if (strong_pullup) {
+ sleep_rem = msleep_interruptible(t_write);
+ if (sleep_rem != 0) {
+ ret = -EINTR;
+ goto mt_unlock;
+ }
+ }
+ ret = 0;
+ }
- return 0;
+ }
+
+mt_unlock:
+ mutex_unlock(&dev_master->bus_mutex);
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(sl->family_data));
+error:
+ return ret;
}
-static ssize_t w1_slave_store(struct device *device,
- struct device_attribute *attr, const char *buf,
- size_t size)
+static int recall_eeprom(struct w1_slave *sl)
{
- int val, ret;
- struct w1_slave *sl = dev_to_w1_slave(device);
- int i;
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int ret = -ENODEV;
- ret = kstrtoint(buf, 0, &val);
- if (ret)
- return ret;
+ if (!sl->family_data)
+ goto error;
- for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i) {
- if (w1_therm_families[i].f->fid == sl->family->fid) {
- /* zero value indicates to write current configuration to eeprom */
- if (val == 0)
- ret = w1_therm_families[i].eeprom(device);
- else
- ret = w1_therm_families[i].precision(device, val);
- break;
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(sl->family_data));
+
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
+ goto dec_refcnt;
+ }
+
+ while (max_trying-- && ret) { /* ret should be 0 */
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+
+ w1_write_8(dev_master, W1_RECALL_EEPROM);
+
+ ret = 1; /* Slave will pull line to 0 */
+ while (ret)
+ ret = 1 - w1_touch_bit(dev_master, 1);
}
+
}
- return ret ? : size;
+
+ mutex_unlock(&dev_master->bus_mutex);
+
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(sl->family_data));
+error:
+ return ret;
}
-static ssize_t read_therm(struct device *device,
- struct w1_slave *sl, struct therm_info *info)
+static int read_powermode(struct w1_slave *sl)
{
- struct w1_master *dev = sl->master;
- u8 external_power;
- int ret, max_trying = 10;
- u8 *family_data = sl->family_data;
+ struct w1_master *dev_master = sl->master;
+ int max_trying = W1_THERM_MAX_TRY;
+ int ret = -ENODEV;
- if (!family_data) {
- ret = -ENODEV;
+ if (!sl->family_data)
goto error;
- }
/* prevent the slave from going away in sleep */
- atomic_inc(THERM_REFCNT(family_data));
+ atomic_inc(THERM_REFCNT(sl->family_data));
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
goto dec_refcnt;
+ }
- memset(info->rom, 0, sizeof(info->rom));
+ while ((max_trying--) && (ret < 0)) {
+ /* safe version to select slave */
+ if (!reset_select_slave(sl)) {
+ w1_write_8(dev_master, W1_READ_PSUPPLY);
+ /*
+ * Emit a read time slot and read only one bit,
+ * 1 is externally powered,
+ * 0 is parasite powered
+ */
+ ret = w1_touch_bit(dev_master, 1);
+ /* ret should be either 1 either 0 */
+ }
+ }
+ mutex_unlock(&dev_master->bus_mutex);
- while (max_trying--) {
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(sl->family_data));
+error:
+ return ret;
+}
- info->verdict = 0;
- info->crc = 0;
+static int trigger_bulk_read(struct w1_master *dev_master)
+{
+ struct w1_slave *sl = NULL; /* used to iterate through slaves */
+ int max_trying = W1_THERM_MAX_TRY;
+ int t_conv = 0;
+ int ret = -ENODEV;
+ bool strong_pullup = false;
+
+ /*
+ * Check whether there are parasite powered device on the bus,
+ * and compute duration of conversion for these devices
+ * so we can apply a strong pullup if required
+ */
+ list_for_each_entry(sl, &dev_master->slist, w1_slave_entry) {
+ if (!sl->family_data)
+ goto error;
+ if (bulk_read_support(sl)) {
+ int t_cur = conversion_time(sl);
+
+ t_conv = t_cur > t_conv ? t_cur : t_conv;
+ strong_pullup = strong_pullup ||
+ (w1_strong_pullup == 2 ||
+ (!SLAVE_POWERMODE(sl) &&
+ w1_strong_pullup));
+ }
+ }
- if (!w1_reset_select_slave(sl)) {
- int count = 0;
- unsigned int tm = 750;
- unsigned long sleep_rem;
+ /*
+ * t_conv is the max conversion time required on the bus
+ * If its 0, no device support the bulk read feature
+ */
+ if (!t_conv)
+ goto error;
- w1_write_8(dev, W1_READ_PSUPPLY);
- external_power = w1_read_8(dev);
+ if (!bus_mutex_lock(&dev_master->bus_mutex)) {
+ ret = -EAGAIN; /* Didn't acquire the mutex */
+ goto error;
+ }
- if (w1_reset_select_slave(sl))
- continue;
+ while ((max_trying--) && (ret < 0)) { /* ret should be either 0 */
- /* 750ms strong pullup (or delay) after the convert */
- if (w1_strong_pullup == 2 ||
- (!external_power && w1_strong_pullup))
- w1_next_pullup(dev, tm);
+ if (!w1_reset_bus(dev_master)) { /* Just reset the bus */
+ unsigned long sleep_rem;
- w1_write_8(dev, W1_CONVERT_TEMP);
+ w1_write_8(dev_master, W1_SKIP_ROM);
- if (external_power) {
- mutex_unlock(&dev->bus_mutex);
+ if (strong_pullup) /* Apply pullup if required */
+ w1_next_pullup(dev_master, t_conv);
- sleep_rem = msleep_interruptible(tm);
- if (sleep_rem != 0) {
- ret = -EINTR;
- goto dec_refcnt;
- }
+ w1_write_8(dev_master, W1_CONVERT_TEMP);
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto dec_refcnt;
- } else if (!w1_strong_pullup) {
- sleep_rem = msleep_interruptible(tm);
+ /* set a flag to instruct that converT pending */
+ list_for_each_entry(sl,
+ &dev_master->slist, w1_slave_entry) {
+ if (bulk_read_support(sl))
+ SLAVE_CONVERT_TRIGGERED(sl) = -1;
+ }
+
+ if (strong_pullup) { /* some device need pullup */
+ sleep_rem = msleep_interruptible(t_conv);
if (sleep_rem != 0) {
ret = -EINTR;
goto mt_unlock;
}
- }
-
- if (!w1_reset_select_slave(sl)) {
-
- w1_write_8(dev, W1_READ_SCRATCHPAD);
- count = w1_read_block(dev, info->rom, 9);
- if (count != 9) {
- dev_warn(device, "w1_read_block() "
- "returned %u instead of 9.\n",
- count);
+ mutex_unlock(&dev_master->bus_mutex);
+ } else {
+ mutex_unlock(&dev_master->bus_mutex);
+ sleep_rem = msleep_interruptible(t_conv);
+ if (sleep_rem != 0) {
+ ret = -EINTR;
+ goto set_flag;
}
-
- info->crc = w1_calc_crc8(info->rom, 8);
-
- if (info->rom[8] == info->crc)
- info->verdict = 1;
}
+ ret = 0;
+ goto set_flag;
}
-
- if (info->verdict)
- break;
}
mt_unlock:
- mutex_unlock(&dev->bus_mutex);
-dec_refcnt:
- atomic_dec(THERM_REFCNT(family_data));
+ mutex_unlock(&dev_master->bus_mutex);
+set_flag:
+ /* set a flag to register convsersion is done */
+ list_for_each_entry(sl, &dev_master->slist, w1_slave_entry) {
+ if (bulk_read_support(sl))
+ SLAVE_CONVERT_TRIGGERED(sl) = 1;
+ }
error:
return ret;
}
+/* Sysfs Interface definition */
+
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -572,43 +1272,405 @@ static ssize_t w1_slave_show(struct device *device,
u8 *family_data = sl->family_data;
int ret, i;
ssize_t c = PAGE_SIZE;
- u8 fid = sl->family->fid;
- ret = read_therm(device, sl, &info);
- if (ret)
- return ret;
+ if (bulk_read_support(sl)) {
+ if (SLAVE_CONVERT_TRIGGERED(sl) < 0) {
+ dev_dbg(device,
+ "%s: Conversion in progress, retry later\n",
+ __func__);
+ return 0;
+ } else if (SLAVE_CONVERT_TRIGGERED(sl) > 0) {
+ /* A bulk read has been issued, read the device RAM */
+ ret = read_scratchpad(sl, &info);
+ SLAVE_CONVERT_TRIGGERED(sl) = 0;
+ } else
+ ret = convert_t(sl, &info);
+ } else
+ ret = convert_t(sl, &info);
+
+ if (ret < 0) {
+ dev_dbg(device,
+ "%s: Temperature data may be corrupted. err=%d\n",
+ __func__, ret);
+ return 0;
+ }
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", info.rom[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
info.crc, (info.verdict) ? "YES" : "NO");
+
if (info.verdict)
memcpy(family_data, info.rom, sizeof(info.rom));
else
- dev_warn(device, "Read failed CRC check\n");
+ dev_warn(device, "%s:Read failed CRC check\n", __func__);
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
((u8 *)family_data)[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
- w1_convert_temp(info.rom, fid));
+ temperature_from_RAM(sl, info.rom));
+
ret = PAGE_SIZE - c;
return ret;
}
+static ssize_t w1_slave_store(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ int val, ret = 0;
+ struct w1_slave *sl = dev_to_w1_slave(device);
+
+ ret = kstrtoint(buf, 10, &val); /* converting user entry to int */
+
+ if (ret) { /* conversion error */
+ dev_info(device,
+ "%s: conversion error. err= %d\n", __func__, ret);
+ return size; /* return size to avoid call back again */
+ }
+
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(device,
+ "%s: Device not supported by the driver\n", __func__);
+ return size; /* No device family */
+ }
+
+ if (val == 0) /* val=0 : trigger a EEPROM save */
+ ret = copy_scratchpad(sl);
+ else {
+ if (SLAVE_SPECIFIC_FUNC(sl)->set_resolution)
+ ret = SLAVE_SPECIFIC_FUNC(sl)->set_resolution(sl, val);
+ }
+
+ if (ret) {
+ dev_info(device,
+ "%s: writing error %d\n", __func__, ret);
+ /* return size to avoid call back again */
+ } else
+ SLAVE_RESOLUTION(sl) = val;
+
+ return size; /* always return size to avoid infinite calling */
+}
+
+static ssize_t temperature_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ struct therm_info info;
+ int ret = 0;
+
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(device,
+ "%s: Device not supported by the driver\n", __func__);
+ return 0; /* No device family */
+ }
+
+ if (bulk_read_support(sl)) {
+ if (SLAVE_CONVERT_TRIGGERED(sl) < 0) {
+ dev_dbg(device,
+ "%s: Conversion in progress, retry later\n",
+ __func__);
+ return 0;
+ } else if (SLAVE_CONVERT_TRIGGERED(sl) > 0) {
+ /* A bulk read has been issued, read the device RAM */
+ ret = read_scratchpad(sl, &info);
+ SLAVE_CONVERT_TRIGGERED(sl) = 0;
+ } else
+ ret = convert_t(sl, &info);
+ } else
+ ret = convert_t(sl, &info);
+
+ if (ret < 0) {
+ dev_dbg(device,
+ "%s: Temperature data may be corrupted. err=%d\n",
+ __func__, ret);
+ return 0;
+ }
+
+ return sprintf(buf, "%d\n", temperature_from_RAM(sl, info.rom));
+}
+
+static ssize_t ext_power_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+
+ if (!sl->family_data) {
+ dev_info(device,
+ "%s: Device not supported by the driver\n", __func__);
+ return 0; /* No device family */
+ }
+
+ /* Getting the power mode of the device {external, parasite} */
+ SLAVE_POWERMODE(sl) = read_powermode(sl);
+
+ if (SLAVE_POWERMODE(sl) < 0) {
+ dev_dbg(device,
+ "%s: Power_mode may be corrupted. err=%d\n",
+ __func__, SLAVE_POWERMODE(sl));
+ }
+ return sprintf(buf, "%d\n", SLAVE_POWERMODE(sl));
+}
+
+static ssize_t resolution_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(device,
+ "%s: Device not supported by the driver\n", __func__);
+ return 0; /* No device family */
+ }
+
+ /* get the correct function depending on the device */
+ SLAVE_RESOLUTION(sl) = SLAVE_SPECIFIC_FUNC(sl)->get_resolution(sl);
+ if (SLAVE_RESOLUTION(sl) < 0) {
+ dev_dbg(device,
+ "%s: Resolution may be corrupted. err=%d\n",
+ __func__, SLAVE_RESOLUTION(sl));
+ }
+
+ return sprintf(buf, "%d\n", SLAVE_RESOLUTION(sl));
+}
+
+static ssize_t resolution_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ int val;
+ int ret = 0;
+
+ ret = kstrtoint(buf, 10, &val); /* converting user entry to int */
+
+ if (ret) { /* conversion error */
+ dev_info(device,
+ "%s: conversion error. err= %d\n", __func__, ret);
+ return size; /* return size to avoid call back again */
+ }
+
+ if ((!sl->family_data) || (!SLAVE_SPECIFIC_FUNC(sl))) {
+ dev_info(device,
+ "%s: Device not supported by the driver\n", __func__);
+ return size; /* No device family */
+ }
+
+ /*
+ * Don't deal with the val enterd by user,
+ * only device knows what is correct or not
+ */
+
+ /* get the correct function depending on the device */
+ ret = SLAVE_SPECIFIC_FUNC(sl)->set_resolution(sl, val);
+
+ if (ret) {
+ dev_info(device,
+ "%s: writing error %d\n", __func__, ret);
+ /* return size to avoid call back again */
+ } else
+ SLAVE_RESOLUTION(sl) = val;
+
+ return size;
+}
+
+static ssize_t eeprom_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ int ret = -EINVAL; /* Invalid argument */
+
+ if (size == sizeof(EEPROM_CMD_WRITE)) {
+ if (!strncmp(buf, EEPROM_CMD_WRITE, sizeof(EEPROM_CMD_WRITE)-1))
+ ret = copy_scratchpad(sl);
+ } else if (size == sizeof(EEPROM_CMD_READ)) {
+ if (!strncmp(buf, EEPROM_CMD_READ, sizeof(EEPROM_CMD_READ)-1))
+ ret = recall_eeprom(sl);
+ }
+
+ if (ret)
+ dev_info(device, "%s: error in process %d\n", __func__, ret);
+
+ return size;
+}
+
+static ssize_t alarms_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ int ret;
+ s8 th = 0, tl = 0;
+ struct therm_info scratchpad;
+
+ ret = read_scratchpad(sl, &scratchpad);
+
+ if (!ret) {
+ th = scratchpad.rom[2]; /* TH is byte 2 */
+ tl = scratchpad.rom[3]; /* TL is byte 3 */
+ } else {
+ dev_info(device,
+ "%s: error reading alarms register %d\n",
+ __func__, ret);
+ }
+
+ return sprintf(buf, "%hd %hd\n", tl, th);
+}
+
+static ssize_t alarms_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ struct therm_info info;
+ u8 new_config_register[3]; /* array of data to be written */
+ int temp, ret;
+ char *token = NULL;
+ s8 tl, th, tt; /* 1 byte per value + temp ring order */
+ char *p_args, *orig;
+
+ p_args = orig = kmalloc(size, GFP_KERNEL);
+ /* Safe string copys as buf is const */
+ if (!p_args) {
+ dev_warn(device,
+ "%s: error unable to allocate memory %d\n",
+ __func__, -ENOMEM);
+ return size;
+ }
+ strcpy(p_args, buf);
+
+ /* Split string using space char */
+ token = strsep(&p_args, " ");
+
+ if (!token) {
+ dev_info(device,
+ "%s: error parsing args %d\n", __func__, -EINVAL);
+ goto free_m;
+ }
+
+ /* Convert 1st entry to int */
+ ret = kstrtoint (token, 10, &temp);
+ if (ret) {
+ dev_info(device,
+ "%s: error parsing args %d\n", __func__, ret);
+ goto free_m;
+ }
+
+ tl = int_to_short(temp);
+
+ /* Split string using space char */
+ token = strsep(&p_args, " ");
+ if (!token) {
+ dev_info(device,
+ "%s: error parsing args %d\n", __func__, -EINVAL);
+ goto free_m;
+ }
+ /* Convert 2nd entry to int */
+ ret = kstrtoint (token, 10, &temp);
+ if (ret) {
+ dev_info(device,
+ "%s: error parsing args %d\n", __func__, ret);
+ goto free_m;
+ }
+
+ /* Prepare to cast to short by eliminating out of range values */
+ th = int_to_short(temp);
+
+ /* Reorder if required th and tl */
+ if (tl > th) {
+ tt = tl; tl = th; th = tt;
+ }
+
+ /*
+ * Read the scratchpad to change only the required bits
+ * (th : byte 2 - tl: byte 3)
+ */
+ ret = read_scratchpad(sl, &info);
+ if (!ret) {
+ new_config_register[0] = th; /* Byte 2 */
+ new_config_register[1] = tl; /* Byte 3 */
+ new_config_register[2] = info.rom[4];/* Byte 4 */
+ } else {
+ dev_info(device,
+ "%s: error reading from the slave device %d\n",
+ __func__, ret);
+ goto free_m;
+ }
+
+ /* Write data in the device RAM */
+ if (!SLAVE_SPECIFIC_FUNC(sl)) {
+ dev_info(device,
+ "%s: Device not supported by the driver %d\n",
+ __func__, -ENODEV);
+ goto free_m;
+ }
+
+ ret = SLAVE_SPECIFIC_FUNC(sl)->write_data(sl, new_config_register);
+ if (ret)
+ dev_info(device,
+ "%s: error writing to the slave device %d\n",
+ __func__, ret);
+
+free_m:
+ /* free allocated memory */
+ kfree(orig);
+
+ return size;
+}
+
+static ssize_t therm_bulk_read_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct w1_master *dev_master = dev_to_w1_master(device);
+ int ret = -EINVAL; /* Invalid argument */
+
+ if (size == sizeof(BULK_TRIGGER_CMD))
+ if (!strncmp(buf, BULK_TRIGGER_CMD,
+ sizeof(BULK_TRIGGER_CMD)-1))
+ ret = trigger_bulk_read(dev_master);
+
+ if (ret)
+ dev_info(device,
+ "%s: unable to trigger a bulk read on the bus. err=%d\n",
+ __func__, ret);
+
+ return size;
+}
+
+static ssize_t therm_bulk_read_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct w1_master *dev_master = dev_to_w1_master(device);
+ struct w1_slave *sl = NULL;
+ int ret = 0;
+
+ list_for_each_entry(sl, &dev_master->slist, w1_slave_entry) {
+ if (sl->family_data) {
+ if (bulk_read_support(sl)) {
+ if (SLAVE_CONVERT_TRIGGERED(sl) == -1) {
+ ret = -1;
+ goto show_result;
+ }
+ if (SLAVE_CONVERT_TRIGGERED(sl) == 1)
+ /* continue to check other slaves */
+ ret = 1;
+ }
+ }
+ }
+show_result:
+ return sprintf(buf, "%d\n", ret);
+}
+
#if IS_REACHABLE(CONFIG_HWMON)
static int w1_read_temp(struct device *device, u32 attr, int channel,
long *val)
{
struct w1_slave *sl = dev_get_drvdata(device);
struct therm_info info;
- u8 fid = sl->family->fid;
int ret;
switch (attr) {
case hwmon_temp_input:
- ret = read_therm(device, sl, &info);
+ ret = convert_t(sl, &info);
if (ret)
return ret;
@@ -617,7 +1679,7 @@ static int w1_read_temp(struct device *device, u32 attr, int channel,
return ret;
}
- *val = w1_convert_temp(info.rom, fid);
+ *val = temperature_from_RAM(sl, info.rom);
ret = 0;
break;
default:
@@ -666,7 +1728,7 @@ static ssize_t w1_seq_show(struct device *device,
if (ack != W1_42_SUCCESS_CONFIRM_BYTE)
goto error;
- /* In case the bus fails to send 0xFF, limit*/
+ /* In case the bus fails to send 0xFF, limit */
for (i = 0; i <= 64; i++) {
if (w1_reset_bus(sl->master))
goto error;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b739c476955b..55b910c453da 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -678,6 +678,7 @@ config TS4800_WATCHDOG
config TS72XX_WATCHDOG
tristate "TS-72XX SBC Watchdog"
depends on MACH_TS72XX || COMPILE_TEST
+ select WATCHDOG_CORE
help
Technologic Systems TS-7200, TS-7250 and TS-7260 boards have
watchdog timer implemented in a external CPLD chip. Say Y here
@@ -867,6 +868,19 @@ config DIGICOLOR_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called digicolor_wdt.
+config ARM_SMC_WATCHDOG
+ tristate "ARM Secure Monitor Call based watchdog support"
+ depends on ARM || ARM64
+ depends on OF
+ depends on HAVE_ARM_SMCCC
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for a watchdog timer
+ implemented by the EL3 Secure Monitor on ARM platforms.
+ Requires firmware support.
+ To compile this driver as a module, choose M here: the
+ module will be called arm_smc_wdt.
+
config LPC18XX_WATCHDOG
tristate "LPC18xx/43xx Watchdog"
depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 6de2e4ceef19..97bed1d3d97c 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_UNIPHIER_WATCHDOG) += uniphier_wdt.o
obj-$(CONFIG_RTD119X_WATCHDOG) += rtd119x_wdt.o
obj-$(CONFIG_SPRD_WATCHDOG) += sprd_wdt.o
obj-$(CONFIG_PM8916_WATCHDOG) += pm8916_wdt.o
+obj-$(CONFIG_ARM_SMC_WATCHDOG) += arm_smc_wdt.o
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
diff --git a/drivers/watchdog/arm_smc_wdt.c b/drivers/watchdog/arm_smc_wdt.c
new file mode 100644
index 000000000000..8f3d0c3a005f
--- /dev/null
+++ b/drivers/watchdog/arm_smc_wdt.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Secure Monitor Call watchdog driver
+ *
+ * Copyright 2020 Google LLC.
+ * Julius Werner <jwerner@chromium.org>
+ * Based on mtk_wdt.c
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <uapi/linux/psci.h>
+
+#define DRV_NAME "arm_smc_wdt"
+#define DRV_VERSION "1.0"
+
+enum smcwd_call {
+ SMCWD_INIT = 0,
+ SMCWD_SET_TIMEOUT = 1,
+ SMCWD_ENABLE = 2,
+ SMCWD_PET = 3,
+ SMCWD_GET_TIMELEFT = 4,
+};
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+static unsigned int timeout;
+
+static int smcwd_call(struct watchdog_device *wdd, enum smcwd_call call,
+ unsigned long arg, struct arm_smccc_res *res)
+{
+ struct arm_smccc_res local_res;
+
+ if (!res)
+ res = &local_res;
+
+ arm_smccc_smc((u32)(uintptr_t)watchdog_get_drvdata(wdd), call, arg, 0,
+ 0, 0, 0, 0, res);
+
+ if (res->a0 == PSCI_RET_NOT_SUPPORTED)
+ return -ENODEV;
+ if (res->a0 == PSCI_RET_INVALID_PARAMS)
+ return -EINVAL;
+ if (res->a0 != PSCI_RET_SUCCESS)
+ return -EIO;
+ return 0;
+}
+
+static int smcwd_ping(struct watchdog_device *wdd)
+{
+ return smcwd_call(wdd, SMCWD_PET, 0, NULL);
+}
+
+static unsigned int smcwd_get_timeleft(struct watchdog_device *wdd)
+{
+ struct arm_smccc_res res;
+
+ smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, &res);
+ if (res.a0)
+ return 0;
+ return res.a1;
+}
+
+static int smcwd_set_timeout(struct watchdog_device *wdd, unsigned int timeout)
+{
+ int res;
+
+ res = smcwd_call(wdd, SMCWD_SET_TIMEOUT, timeout, NULL);
+ if (!res)
+ wdd->timeout = timeout;
+ return res;
+}
+
+static int smcwd_stop(struct watchdog_device *wdd)
+{
+ return smcwd_call(wdd, SMCWD_ENABLE, 0, NULL);
+}
+
+static int smcwd_start(struct watchdog_device *wdd)
+{
+ return smcwd_call(wdd, SMCWD_ENABLE, 1, NULL);
+}
+
+static const struct watchdog_info smcwd_info = {
+ .identity = DRV_NAME,
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+};
+
+static const struct watchdog_ops smcwd_ops = {
+ .start = smcwd_start,
+ .stop = smcwd_stop,
+ .ping = smcwd_ping,
+ .set_timeout = smcwd_set_timeout,
+};
+
+static const struct watchdog_ops smcwd_timeleft_ops = {
+ .start = smcwd_start,
+ .stop = smcwd_stop,
+ .ping = smcwd_ping,
+ .set_timeout = smcwd_set_timeout,
+ .get_timeleft = smcwd_get_timeleft,
+};
+
+static int smcwd_probe(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd;
+ int err;
+ struct arm_smccc_res res;
+ u32 smc_func_id;
+
+ wdd = devm_kzalloc(&pdev->dev, sizeof(*wdd), GFP_KERNEL);
+ if (!wdd)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, wdd);
+
+ if (of_property_read_u32(pdev->dev.of_node, "arm,smc-id",
+ &smc_func_id))
+ smc_func_id = 0x82003D06;
+ watchdog_set_drvdata(wdd, (void *)(uintptr_t)smc_func_id);
+
+ err = smcwd_call(wdd, SMCWD_INIT, 0, &res);
+ if (err < 0)
+ return err;
+
+ wdd->info = &smcwd_info;
+ /* get_timeleft is optional */
+ if (smcwd_call(wdd, SMCWD_GET_TIMELEFT, 0, NULL))
+ wdd->ops = &smcwd_ops;
+ else
+ wdd->ops = &smcwd_timeleft_ops;
+ wdd->timeout = res.a2;
+ wdd->max_timeout = res.a2;
+ wdd->min_timeout = res.a1;
+ wdd->parent = &pdev->dev;
+
+ watchdog_stop_on_reboot(wdd);
+ watchdog_stop_on_unregister(wdd);
+ watchdog_set_nowayout(wdd, nowayout);
+ watchdog_init_timeout(wdd, timeout, &pdev->dev);
+ err = smcwd_set_timeout(wdd, wdd->timeout);
+ if (err)
+ return err;
+
+ err = devm_watchdog_register_device(&pdev->dev, wdd);
+ if (err)
+ return err;
+
+ dev_info(&pdev->dev,
+ "Watchdog registered (timeout=%d sec, nowayout=%d)\n",
+ wdd->timeout, nowayout);
+
+ return 0;
+}
+
+static const struct of_device_id smcwd_dt_ids[] = {
+ { .compatible = "arm,smc-wdt" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, smcwd_dt_ids);
+
+static struct platform_driver smcwd_driver = {
+ .probe = smcwd_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = smcwd_dt_ids,
+ },
+};
+
+module_platform_driver(smcwd_driver);
+
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog heartbeat in seconds");
+
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Julius Werner <jwerner@chromium.org>");
+MODULE_DESCRIPTION("ARM Secure Monitor Call Watchdog Driver");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index 0ad15d55071c..706fb09c2f24 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -35,6 +35,15 @@ struct da9062_watchdog {
bool use_sw_pm;
};
+static unsigned int da9062_wdt_read_timeout(struct da9062_watchdog *wdt)
+{
+ unsigned int val;
+
+ regmap_read(wdt->hw->regmap, DA9062AA_CONTROL_D, &val);
+
+ return wdt_timeout[val & DA9062AA_TWDSCALE_MASK];
+}
+
static unsigned int da9062_wdt_timeout_to_sel(unsigned int secs)
{
unsigned int i;
@@ -58,11 +67,6 @@ static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt,
unsigned int regval)
{
struct da9062 *chip = wdt->hw;
- int ret;
-
- ret = da9062_reset_watchdog_timer(wdt);
- if (ret)
- return ret;
regmap_update_bits(chip->regmap,
DA9062AA_CONTROL_D,
@@ -183,7 +187,7 @@ MODULE_DEVICE_TABLE(of, da9062_compatible_id_table);
static int da9062_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int ret;
+ unsigned int timeout;
struct da9062 *chip;
struct da9062_watchdog *wdt;
@@ -213,11 +217,19 @@ static int da9062_wdt_probe(struct platform_device *pdev)
watchdog_set_drvdata(&wdt->wdtdev, wdt);
dev_set_drvdata(dev, &wdt->wdtdev);
- ret = devm_watchdog_register_device(dev, &wdt->wdtdev);
- if (ret < 0)
- return ret;
+ timeout = da9062_wdt_read_timeout(wdt);
+ if (timeout)
+ wdt->wdtdev.timeout = timeout;
+
+ /* Set timeout from DT value if available */
+ watchdog_init_timeout(&wdt->wdtdev, 0, dev);
+
+ if (timeout) {
+ da9062_wdt_set_timeout(&wdt->wdtdev, wdt->wdtdev.timeout);
+ set_bit(WDOG_HW_RUNNING, &wdt->wdtdev.status);
+ }
- return da9062_wdt_ping(&wdt->wdtdev);
+ return devm_watchdog_register_device(dev, &wdt->wdtdev);
}
static int __maybe_unused da9062_wdt_suspend(struct device *dev)
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index 3d65e92a4e3f..423584252606 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -46,15 +46,16 @@ static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs)
}
/*
- * Return 0 if watchdog is disabled, else non zero.
+ * Read the currently active timeout.
+ * Zero means the watchdog is disabled.
*/
-static unsigned int da9063_wdt_is_running(struct da9063 *da9063)
+static unsigned int da9063_wdt_read_timeout(struct da9063 *da9063)
{
unsigned int val;
regmap_read(da9063->regmap, DA9063_REG_CONTROL_D, &val);
- return val & DA9063_TWDSCALE_MASK;
+ return wdt_timeout[val & DA9063_TWDSCALE_MASK];
}
static int da9063_wdt_disable_timer(struct da9063 *da9063)
@@ -191,6 +192,7 @@ static int da9063_wdt_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct da9063 *da9063;
struct watchdog_device *wdd;
+ unsigned int timeout;
if (!dev->parent)
return -EINVAL;
@@ -214,13 +216,19 @@ static int da9063_wdt_probe(struct platform_device *pdev)
watchdog_set_restart_priority(wdd, 128);
watchdog_set_drvdata(wdd, da9063);
- /* Set default timeout, maybe override it with DT value, scale it */
wdd->timeout = DA9063_WDG_TIMEOUT;
+
+ /* Use pre-configured timeout if watchdog is already running. */
+ timeout = da9063_wdt_read_timeout(da9063);
+ if (timeout)
+ wdd->timeout = timeout;
+
+ /* Set timeout, maybe override it with DT value, scale it */
watchdog_init_timeout(wdd, 0, dev);
da9063_wdt_set_timeout(wdd, wdd->timeout);
- /* Change the timeout to the default value if the watchdog is running */
- if (da9063_wdt_is_running(da9063)) {
+ /* Update timeout if the watchdog is already running. */
+ if (timeout) {
da9063_wdt_update_timeout(da9063, wdd->timeout);
set_bit(WDOG_HW_RUNNING, &wdd->status);
}
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 1fe472f56cb3..b84f80f7d342 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -2,7 +2,7 @@
/*
* Watchdog driver for IMX2 and later processors
*
- * Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <w.sang@pengutronix.de>
+ * Copyright (C) 2010 Wolfram Sang, Pengutronix e.K. <kernel@pengutronix.de>
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
* some parts adapted by similar drivers from Darius Augulis and Vladimir
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
index 60a32469f7de..e9ee22a7cb45 100644
--- a/drivers/watchdog/imx_sc_wdt.c
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -175,6 +175,11 @@ static int imx_sc_wdt_probe(struct platform_device *pdev)
wdog->timeout = DEFAULT_TIMEOUT;
watchdog_init_timeout(wdog, 0, dev);
+
+ ret = imx_sc_wdt_set_timeout(wdog, wdog->timeout);
+ if (ret)
+ return ret;
+
watchdog_stop_on_reboot(wdog);
watchdog_stop_on_unregister(wdog);
diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c
index 22f335e1e164..60ed6252e5f4 100644
--- a/drivers/watchdog/m54xx_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -29,6 +29,7 @@
#include <linux/bitops.h>
#include <linux/ioport.h>
#include <linux/uaccess.h>
+#include <linux/io.h>
#include <asm/coldfire.h>
#include <asm/m54xxsim.h>
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 9b91882fe3c4..1616f93dfad7 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -273,6 +273,7 @@ static int omap_wdt_probe(struct platform_device *pdev)
ret = watchdog_register_device(&wdev->wdog);
if (ret) {
+ pm_runtime_put(wdev->dev);
pm_runtime_disable(wdev->dev);
return ret;
}
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index dc3c06a92f93..1b9a6dc8f982 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -141,7 +141,7 @@ static long riowd_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
default:
return -EINVAL;
- };
+ }
return 0;
}
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 61212fc7f0c7..727f11eb46b2 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -13,12 +13,16 @@ config XEN_BALLOON
config XEN_BALLOON_MEMORY_HOTPLUG
bool "Memory hotplug support for Xen balloon driver"
depends on XEN_BALLOON && MEMORY_HOTPLUG
+ default y
help
Memory hotplug support for Xen balloon driver allows expanding memory
available for the system above limit declared at system startup.
It is very useful on critical systems which require long
run without rebooting.
+ It's also very useful for non PV domains to obtain unpopulated physical
+ memory ranges to use in order to map foreign memory or grants.
+
Memory could be hotplugged in following steps:
1) target domain: ensure that memory auto online policy is in
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 0c142bcab79d..77c57568e5d7 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -59,7 +59,6 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/xen/hypervisor.h>
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index ec975decb5de..b96b11e2b571 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -93,10 +93,8 @@ static int setup_cpu_watcher(struct notifier_block *notifier,
(void)register_xenbus_watch(&cpu_watch);
for_each_possible_cpu(cpu) {
- if (vcpu_online(cpu) == 0) {
- device_offline(get_cpu_device(cpu));
- set_cpu_present(cpu, false);
- }
+ if (vcpu_online(cpu) == 0)
+ disable_hotplug_cpu(cpu);
}
return NOTIFY_DONE;
@@ -119,5 +117,5 @@ static int __init setup_vcpu_hotplug_event(void)
return 0;
}
-arch_initcall(setup_vcpu_hotplug_event);
+late_initcall(setup_vcpu_hotplug_event);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 50651e566564..64a9025a87be 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -625,7 +625,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
return -EFAULT;
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, op.vaddr);
if (!vma || vma->vm_ops != &gntdev_vmops)
goto out_unlock;
@@ -639,7 +639,7 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
rv = 0;
out_unlock:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
return -EFAULT;
@@ -1014,7 +1014,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
* to the PTE from going stale.
*
* Since this vma's mappings can't be touched without the
- * mmap_sem, and we are holding it now, there is no need for
+ * mmap_lock, and we are holding it now, there is no need for
* the notifier_range locking pattern.
*/
mmu_interval_read_begin(&map->notifier);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7b36b51cdb9f..8d06bf1cc347 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -64,7 +64,6 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
-#include <asm/pgtable.h>
#include <asm/sync_bitops.h>
/* External tools reserve first few grant table entries. */
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 59e85e408c23..dd911e1ff782 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -168,7 +168,7 @@ static const struct pci_device_id platform_pci_tbl[] = {
{0,}
};
-static struct dev_pm_ops platform_pm_ops = {
+static const struct dev_pm_ops platform_pm_ops = {
.resume_noirq = platform_pci_resume,
};
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index c6070e70dd73..a250d118144a 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -26,8 +26,6 @@
#include <linux/moduleparam.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/tlb.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
@@ -278,7 +276,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
if (rc || list_empty(&pagelist))
goto out;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
{
struct page *page = list_first_entry(&pagelist,
@@ -303,7 +301,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
out_up:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
out:
free_page_list(&pagelist);
@@ -499,7 +497,7 @@ static long privcmd_ioctl_mmap_batch(
}
}
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
vma = find_vma(mm, m.addr);
if (!vma ||
@@ -555,7 +553,7 @@ static long privcmd_ioctl_mmap_batch(
BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_batch_fn, &state));
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (state.global_error) {
/* Write back errors in second pass. */
@@ -576,7 +574,7 @@ out:
return ret;
out_unlock:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
goto out;
}
@@ -741,7 +739,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
return -EPERM;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
vma = find_vma(mm, kdata.addr);
if (!vma || vma->vm_ops != &privcmd_vm_ops) {
@@ -820,7 +818,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
}
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
kfree(pfns);
return rc;
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index cf4ce3e9358d..9eae1fceec1e 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -24,7 +24,7 @@
#define PVCALLS_VERSIONS "1"
#define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
-struct pvcalls_back_global {
+static struct pvcalls_back_global {
struct list_head frontends;
struct semaphore frontends_lock;
} pvcalls_back_global;
@@ -1088,7 +1088,8 @@ static void set_backend_state(struct xenbus_device *dev,
case XenbusStateInitialised:
switch (state) {
case XenbusStateConnected:
- backend_connect(dev);
+ if (backend_connect(dev))
+ return;
xenbus_switch_state(dev, XenbusStateConnected);
break;
case XenbusStateClosing:
diff --git a/drivers/xen/time.c b/drivers/xen/time.c
index 0968859c29d0..108edbcbc040 100644
--- a/drivers/xen/time.c
+++ b/drivers/xen/time.c
@@ -64,7 +64,7 @@ static void xen_get_runstate_snapshot_cpu_delta(
do {
state_time = get64(&state->state_entry_time);
rmb(); /* Hypervisor might update data. */
- *res = READ_ONCE(*state);
+ *res = __READ_ONCE(*state);
rmb(); /* Hypervisor might update data. */
} while (get64(&state->state_entry_time) != state_time ||
(state_time & XEN_RUNSTATE_UPDATE));
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index da51a5d34e6e..059de92aea7d 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -10,6 +10,8 @@
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
*/
+#define dev_fmt(fmt) DRV_NAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
@@ -154,9 +156,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
* (as if device didn't respond) */
u32 value = 0, tmp_val;
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x\n",
- pci_name(dev), size, offset);
+ dev_dbg(&dev->dev, "read %d bytes at 0x%x\n", size, offset);
if (!valid_request(offset, size)) {
err = XEN_PCI_ERR_invalid_offset;
@@ -195,9 +195,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
}
out:
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x = %x\n",
- pci_name(dev), size, offset, value);
+ dev_dbg(&dev->dev, "read %d bytes at 0x%x = %x\n", size, offset, value);
*ret_val = value;
return xen_pcibios_err_to_errno(err);
@@ -212,10 +210,8 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
u32 tmp_val;
int field_start, field_end;
- if (unlikely(verbose_request))
- printk(KERN_DEBUG
- DRV_NAME ": %s: write request %d bytes at 0x%x = %x\n",
- pci_name(dev), size, offset, value);
+ dev_dbg(&dev->dev, "write request %d bytes at 0x%x = %x\n",
+ size, offset, value);
if (!valid_request(offset, size))
return XEN_PCI_ERR_invalid_offset;
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index fb4fccb4aecc..ac45cdc38e85 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -6,6 +6,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -67,53 +68,39 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
dev_data = pci_get_drvdata(dev);
if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: enable\n",
- pci_name(dev));
+ dev_dbg(&dev->dev, "enable\n");
err = pci_enable_device(dev);
if (err)
return err;
if (dev_data)
dev_data->enable_intx = 1;
} else if (pci_is_enabled(dev) && !is_enable_cmd(value)) {
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: disable\n",
- pci_name(dev));
+ dev_dbg(&dev->dev, "disable\n");
pci_disable_device(dev);
if (dev_data)
dev_data->enable_intx = 0;
}
if (!dev->is_busmaster && is_master_cmd(value)) {
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n",
- pci_name(dev));
+ dev_dbg(&dev->dev, "set bus master\n");
pci_set_master(dev);
} else if (dev->is_busmaster && !is_master_cmd(value)) {
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: clear bus master\n",
- pci_name(dev));
+ dev_dbg(&dev->dev, "clear bus master\n");
pci_clear_master(dev);
}
if (!(cmd->val & PCI_COMMAND_INVALIDATE) &&
(value & PCI_COMMAND_INVALIDATE)) {
- if (unlikely(verbose_request))
- printk(KERN_DEBUG
- DRV_NAME ": %s: enable memory-write-invalidate\n",
- pci_name(dev));
+ dev_dbg(&dev->dev, "enable memory-write-invalidate\n");
err = pci_set_mwi(dev);
if (err) {
- pr_warn("%s: cannot enable memory-write-invalidate (%d)\n",
- pci_name(dev), err);
+ dev_warn(&dev->dev, "cannot enable memory-write-invalidate (%d)\n",
+ err);
value &= ~PCI_COMMAND_INVALIDATE;
}
} else if ((cmd->val & PCI_COMMAND_INVALIDATE) &&
!(value & PCI_COMMAND_INVALIDATE)) {
- if (unlikely(verbose_request))
- printk(KERN_DEBUG
- DRV_NAME ": %s: disable memory-write-invalidate\n",
- pci_name(dev));
+ dev_dbg(&dev->dev, "disable memory-write-invalidate\n");
pci_clear_mwi(dev);
}
@@ -157,8 +144,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
- pr_warn(DRV_NAME ": driver data not found for %s\n",
- pci_name(dev));
+ dev_warn(&dev->dev, "driver data not found\n");
return XEN_PCI_ERR_op_failed;
}
@@ -194,8 +180,7 @@ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
u32 mask;
if (unlikely(!bar)) {
- pr_warn(DRV_NAME ": driver data not found for %s\n",
- pci_name(dev));
+ dev_warn(&dev->dev, "driver data not found\n");
return XEN_PCI_ERR_op_failed;
}
@@ -228,8 +213,7 @@ static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
struct pci_bar_info *bar = data;
if (unlikely(!bar)) {
- pr_warn(DRV_NAME ": driver data not found for %s\n",
- pci_name(dev));
+ dev_warn(&dev->dev, "driver data not found\n");
return XEN_PCI_ERR_op_failed;
}
@@ -433,8 +417,8 @@ int xen_pcibk_config_header_add_fields(struct pci_dev *dev)
default:
err = -EINVAL;
- pr_err("%s: Unsupported header type %d!\n",
- pci_name(dev), dev->hdr_type);
+ dev_err(&dev->dev, "Unsupported header type %d!\n",
+ dev->hdr_type);
break;
}
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c
index ed593d1042a6..7dc281086302 100644
--- a/drivers/xen/xen-pciback/conf_space_quirks.c
+++ b/drivers/xen/xen-pciback/conf_space_quirks.c
@@ -6,6 +6,8 @@
* Author: Chris Bookholt <hap10@epoch.ncsc.mil>
*/
+#define dev_fmt(fmt) DRV_NAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/pci.h>
#include "pciback.h"
@@ -35,8 +37,8 @@ static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev)
if (match_one_device(&tmp_quirk->devid, dev) != NULL)
goto out;
tmp_quirk = NULL;
- printk(KERN_DEBUG DRV_NAME
- ": quirk didn't match any device known\n");
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "quirk didn't match any device known\n");
out:
return tmp_quirk;
}
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 7af93d65ed51..e876c3d6dad1 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -6,6 +6,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
#include <linux/module.h>
#include <linux/init.h>
@@ -626,11 +627,11 @@ static void pcistub_remove(struct pci_dev *dev)
if (found_psdev->pdev) {
int domid = xen_find_device_domain_owner(dev);
- pr_warn("****** removing device %s while still in-use by domain %d! ******\n",
+ dev_warn(&dev->dev, "****** removing device %s while still in-use by domain %d! ******\n",
pci_name(found_psdev->dev), domid);
- pr_warn("****** driver domain may still access this device's i/o resources!\n");
- pr_warn("****** shutdown driver domain before binding device\n");
- pr_warn("****** to other drivers or domains\n");
+ dev_warn(&dev->dev, "****** driver domain may still access this device's i/o resources!\n");
+ dev_warn(&dev->dev, "****** shutdown driver domain before binding device\n");
+ dev_warn(&dev->dev, "****** to other drivers or domains\n");
/* N.B. This ends up calling pcistub_put_pci_dev which ends up
* doing the FLR. */
@@ -711,14 +712,12 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
&aer_op->domain, &aer_op->bus, &aer_op->devfn);
if (!ret) {
- dev_err(&psdev->dev->dev,
- DRV_NAME ": failed to get pcifront device\n");
+ dev_err(&psdev->dev->dev, "failed to get pcifront device\n");
return PCI_ERS_RESULT_NONE;
}
wmb();
- dev_dbg(&psdev->dev->dev,
- DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n",
+ dev_dbg(&psdev->dev->dev, "aer_op %x dom %x bus %x devfn %x\n",
aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
/*local flag to mark there's aer request, xen_pcibk callback will use
* this flag to judge whether we need to check pci-front give aer
@@ -754,8 +753,7 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
if (test_bit(_XEN_PCIF_active,
(unsigned long *)&sh_info->flags)) {
- dev_dbg(&psdev->dev->dev,
- "schedule pci_conf service in " DRV_NAME "\n");
+ dev_dbg(&psdev->dev->dev, "schedule pci_conf service\n");
xen_pcibk_test_and_schedule_op(psdev->pdev);
}
@@ -786,13 +784,12 @@ static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
PCI_FUNC(dev->devfn));
if (!psdev || !psdev->pdev) {
- dev_err(&dev->dev,
- DRV_NAME " device is not found/assigned\n");
+ dev_err(&dev->dev, "device is not found/assigned\n");
goto end;
}
if (!psdev->pdev->sh_info) {
- dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ dev_err(&dev->dev, "device is not connected or owned"
" by HVM, kill it\n");
kill_domain_by_device(psdev);
goto end;
@@ -844,13 +841,12 @@ static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
PCI_FUNC(dev->devfn));
if (!psdev || !psdev->pdev) {
- dev_err(&dev->dev,
- DRV_NAME " device is not found/assigned\n");
+ dev_err(&dev->dev, "device is not found/assigned\n");
goto end;
}
if (!psdev->pdev->sh_info) {
- dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ dev_err(&dev->dev, "device is not connected or owned"
" by HVM, kill it\n");
kill_domain_by_device(psdev);
goto end;
@@ -902,13 +898,12 @@ static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
PCI_FUNC(dev->devfn));
if (!psdev || !psdev->pdev) {
- dev_err(&dev->dev,
- DRV_NAME " device is not found/assigned\n");
+ dev_err(&dev->dev, "device is not found/assigned\n");
goto end;
}
if (!psdev->pdev->sh_info) {
- dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ dev_err(&dev->dev, "device is not connected or owned"
" by HVM, kill it\n");
kill_domain_by_device(psdev);
goto end;
@@ -956,13 +951,12 @@ static void xen_pcibk_error_resume(struct pci_dev *dev)
PCI_FUNC(dev->devfn));
if (!psdev || !psdev->pdev) {
- dev_err(&dev->dev,
- DRV_NAME " device is not found/assigned\n");
+ dev_err(&dev->dev, "device is not found/assigned\n");
goto end;
}
if (!psdev->pdev->sh_info) {
- dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ dev_err(&dev->dev, "device is not connected or owned"
" by HVM, kill it\n");
kill_domain_by_device(psdev);
goto end;
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 7c95516a860f..f1ed2dbf685c 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -186,8 +186,6 @@ void xen_pcibk_do_op(struct work_struct *data);
int xen_pcibk_xenbus_register(void);
void xen_pcibk_xenbus_unregister(void);
-extern int verbose_request;
-
void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev);
#endif
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 787966f44589..e11a7438e1a2 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -6,6 +6,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
#include <linux/moduleparam.h>
#include <linux/wait.h>
@@ -14,9 +15,6 @@
#include <linux/sched.h>
#include "pciback.h"
-int verbose_request;
-module_param(verbose_request, int, 0644);
-
static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id);
/* Ensure a device is has the fake IRQ handler "turned on/off" and is
@@ -147,9 +145,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
struct xen_pcibk_dev_data *dev_data;
int status;
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
-
if (dev->msi_enabled)
status = -EALREADY;
else if (dev->msix_enabled)
@@ -158,9 +153,8 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
status = pci_enable_msi(dev);
if (status) {
- pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
- pci_name(dev), pdev->xdev->otherend_id,
- status);
+ dev_warn_ratelimited(&dev->dev, "error enabling MSI for guest %u: err %d\n",
+ pdev->xdev->otherend_id, status);
op->value = 0;
return XEN_PCI_ERR_op_failed;
}
@@ -169,9 +163,8 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
* the local domain's IRQ number. */
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
- op->value);
+
+ dev_dbg(&dev->dev, "MSI: %d\n", op->value);
dev_data = pci_get_drvdata(dev);
if (dev_data)
@@ -184,10 +177,6 @@ static
int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
- pci_name(dev));
-
if (dev->msi_enabled) {
struct xen_pcibk_dev_data *dev_data;
@@ -198,9 +187,9 @@ int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
dev_data->ack_intr = 1;
}
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
- op->value);
+
+ dev_dbg(&dev->dev, "MSI: %d\n", op->value);
+
return 0;
}
@@ -213,9 +202,7 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
struct msix_entry *entries;
u16 cmd;
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
- pci_name(dev));
+ dev_dbg(&dev->dev, "enable MSI-X\n");
if (op->value > SH_INFO_MAX_VEC)
return -EINVAL;
@@ -248,17 +235,13 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
if (entries[i].vector) {
op->msix_entries[i].vector =
xen_pirq_from_irq(entries[i].vector);
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: " \
- "MSI-X[%d]: %d\n",
- pci_name(dev), i,
- op->msix_entries[i].vector);
+ dev_dbg(&dev->dev, "MSI-X[%d]: %d\n", i,
+ op->msix_entries[i].vector);
}
}
} else
- pr_warn_ratelimited("%s: error enabling MSI-X for guest %u: err %d!\n",
- pci_name(dev), pdev->xdev->otherend_id,
- result);
+ dev_warn_ratelimited(&dev->dev, "error enabling MSI-X for guest %u: err %d!\n",
+ pdev->xdev->otherend_id, result);
kfree(entries);
op->value = result;
@@ -273,10 +256,6 @@ static
int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
- pci_name(dev));
-
if (dev->msix_enabled) {
struct xen_pcibk_dev_data *dev_data;
@@ -291,9 +270,9 @@ int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
* an undefined IRQ value of zero.
*/
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
- if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n",
- pci_name(dev), op->value);
+
+ dev_dbg(&dev->dev, "MSI-X: %d\n", op->value);
+
return 0;
}
#endif
@@ -424,7 +403,7 @@ static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id)
dev_data->handled++;
if ((dev_data->handled % 1000) == 0) {
if (xen_test_irq_shared(irq)) {
- pr_info("%s IRQ line is not shared "
+ dev_info(&dev->dev, "%s IRQ line is not shared "
"with other domains. Turning ISR off\n",
dev_data->irq_name);
dev_data->ack_intr = 0;
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index f6ba18191c0f..5447b5ab7c76 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -7,6 +7,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
#include <linux/list.h>
#include <linux/slab.h>
@@ -105,9 +106,8 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev_entry, list);
if (match_slot(dev, t->dev)) {
- pr_info("vpci: %s: assign to virtual slot %d func %d\n",
- pci_name(dev), slot,
- PCI_FUNC(dev->devfn));
+ dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n",
+ slot, PCI_FUNC(dev->devfn));
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
func = PCI_FUNC(dev->devfn);
@@ -119,8 +119,8 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
/* Assign to a new slot on the virtual PCI bus */
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot])) {
- pr_info("vpci: %s: assign to virtual slot %d\n",
- pci_name(dev), slot);
+ dev_info(&dev->dev, "vpci: assign to virtual slot %d\n",
+ slot);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 8c4d05b687b7..38725d97d909 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -31,6 +31,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define dev_fmt pr_fmt
#define DPRINTK(fmt, args...) \
pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
@@ -51,7 +52,6 @@
#include <linux/module.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
@@ -608,7 +608,7 @@ int xenbus_dev_suspend(struct device *dev)
if (drv->suspend)
err = drv->suspend(xdev);
if (err)
- pr_warn("suspend %s failed: %i\n", dev_name(dev), err);
+ dev_warn(dev, "suspend failed: %i\n", err);
return 0;
}
EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
@@ -627,8 +627,7 @@ int xenbus_dev_resume(struct device *dev)
drv = to_xenbus_driver(dev->driver);
err = talk_to_otherend(xdev);
if (err) {
- pr_warn("resume (talk_to_otherend) %s failed: %i\n",
- dev_name(dev), err);
+ dev_warn(dev, "resume (talk_to_otherend) failed: %i\n", err);
return err;
}
@@ -637,15 +636,14 @@ int xenbus_dev_resume(struct device *dev)
if (drv->resume) {
err = drv->resume(xdev);
if (err) {
- pr_warn("resume %s failed: %i\n", dev_name(dev), err);
+ dev_warn(dev, "resume failed: %i\n", err);
return err;
}
}
err = watch_otherend(xdev);
if (err) {
- pr_warn("resume (watch_otherend) %s failed: %d.\n",
- dev_name(dev), err);
+ dev_warn(dev, "resume (watch_otherend) failed: %d\n", err);
return err;
}
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 9b2fbe69bccc..2ba699897e6d 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -48,7 +48,6 @@
#include <linux/semaphore.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/xen/hypervisor.h>
#include <asm/hypervisor.h>
#include <xen/xenbus.h>
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 8a1650bbe18f..15379089853b 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/xen/hypervisor.h>
#include <xen/xenbus.h>
#include <xen/events.h>
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 04f86b8c100e..8cd471da3255 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -45,7 +45,7 @@ config ARCH_USE_GNU_PROPERTY
config BINFMT_ELF_FDPIC
bool "Kernel support for FDPIC ELF binaries"
default y if !BINFMT_ELF
- depends on (ARM || (SUPERH32 && !MMU) || C6X)
+ depends on (ARM || (SUPERH && !MMU) || C6X)
select ELFCORE
help
ELF FDPIC binaries are based on ELF, but allow the individual load
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index 10359bea7070..75c4e4043d1d 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -18,6 +18,7 @@ kafs-y := \
file.o \
flock.o \
fsclient.o \
+ fs_operation.o \
fs_probe.o \
inode.o \
main.o \
@@ -30,6 +31,7 @@ kafs-y := \
server_list.o \
super.o \
vlclient.o \
+ vl_alias.o \
vl_list.o \
vl_probe.o \
vl_rotate.o \
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
index b6d49d646ade..432cb4b23961 100644
--- a/fs/afs/afs.h
+++ b/fs/afs/afs.h
@@ -10,7 +10,7 @@
#include <linux/in.h>
-#define AFS_MAXCELLNAME 64 /* Maximum length of a cell name */
+#define AFS_MAXCELLNAME 256 /* Maximum length of a cell name */
#define AFS_MAXVOLNAME 64 /* Maximum length of a volume name */
#define AFS_MAXNSERVERS 8 /* Maximum servers in a basic volume record */
#define AFS_NMAXNSERVERS 13 /* Maximum servers in a N/U-class volume record */
@@ -146,7 +146,6 @@ struct afs_file_status {
struct afs_status_cb {
struct afs_file_status status;
struct afs_callback callback;
- unsigned int cb_break; /* Pre-op callback break counter */
bool have_status; /* True if status record was retrieved */
bool have_cb; /* True if cb record was retrieved */
bool have_error; /* True if status.abort_code indicates an error */
diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h
index e9b8029920ec..9c65ffb8a523 100644
--- a/fs/afs/afs_vl.h
+++ b/fs/afs/afs_vl.h
@@ -22,6 +22,7 @@ enum AFSVL_Operations {
VLGETENTRYBYNAMEU = 527, /* AFS Get VLDB entry by name (UUID-variant) */
VLGETADDRSU = 533, /* AFS Get addrs for fileserver */
YVLGETENDPOINTS = 64002, /* YFS Get endpoints for file/volume server */
+ YVLGETCELLNAME = 64014, /* YFS Get actual cell name */
VLGETCAPABILITIES = 65537, /* AFS Get server capabilities */
};
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 2dca8df1a18d..7d9b23d981bf 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -21,192 +21,17 @@
#include "internal.h"
/*
- * Create volume and callback interests on a server.
- */
-static struct afs_cb_interest *afs_create_interest(struct afs_server *server,
- struct afs_vnode *vnode)
-{
- struct afs_vol_interest *new_vi, *vi;
- struct afs_cb_interest *new;
- struct hlist_node **pp;
-
- new_vi = kzalloc(sizeof(struct afs_vol_interest), GFP_KERNEL);
- if (!new_vi)
- return NULL;
-
- new = kzalloc(sizeof(struct afs_cb_interest), GFP_KERNEL);
- if (!new) {
- kfree(new_vi);
- return NULL;
- }
-
- new_vi->usage = 1;
- new_vi->vid = vnode->volume->vid;
- INIT_HLIST_NODE(&new_vi->srv_link);
- INIT_HLIST_HEAD(&new_vi->cb_interests);
-
- refcount_set(&new->usage, 1);
- new->sb = vnode->vfs_inode.i_sb;
- new->vid = vnode->volume->vid;
- new->server = afs_get_server(server, afs_server_trace_get_new_cbi);
- INIT_HLIST_NODE(&new->cb_vlink);
-
- write_lock(&server->cb_break_lock);
-
- for (pp = &server->cb_volumes.first; *pp; pp = &(*pp)->next) {
- vi = hlist_entry(*pp, struct afs_vol_interest, srv_link);
- if (vi->vid < new_vi->vid)
- continue;
- if (vi->vid > new_vi->vid)
- break;
- vi->usage++;
- goto found_vi;
- }
-
- new_vi->srv_link.pprev = pp;
- new_vi->srv_link.next = *pp;
- if (*pp)
- (*pp)->pprev = &new_vi->srv_link.next;
- *pp = &new_vi->srv_link;
- vi = new_vi;
- new_vi = NULL;
-found_vi:
-
- new->vol_interest = vi;
- hlist_add_head(&new->cb_vlink, &vi->cb_interests);
-
- write_unlock(&server->cb_break_lock);
- kfree(new_vi);
- return new;
-}
-
-/*
- * Set up an interest-in-callbacks record for a volume on a server and
- * register it with the server.
- * - Called with vnode->io_lock held.
- */
-int afs_register_server_cb_interest(struct afs_vnode *vnode,
- struct afs_server_list *slist,
- unsigned int index)
-{
- struct afs_server_entry *entry = &slist->servers[index];
- struct afs_cb_interest *cbi, *vcbi, *new, *old;
- struct afs_server *server = entry->server;
-
-again:
- vcbi = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->io_lock));
- if (vcbi && likely(vcbi == entry->cb_interest))
- return 0;
-
- read_lock(&slist->lock);
- cbi = afs_get_cb_interest(entry->cb_interest);
- read_unlock(&slist->lock);
-
- if (vcbi) {
- if (vcbi == cbi) {
- afs_put_cb_interest(afs_v2net(vnode), cbi);
- return 0;
- }
-
- /* Use a new interest in the server list for the same server
- * rather than an old one that's still attached to a vnode.
- */
- if (cbi && vcbi->server == cbi->server) {
- write_seqlock(&vnode->cb_lock);
- old = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->cb_lock.lock));
- rcu_assign_pointer(vnode->cb_interest, cbi);
- write_sequnlock(&vnode->cb_lock);
- afs_put_cb_interest(afs_v2net(vnode), old);
- return 0;
- }
-
- /* Re-use the one attached to the vnode. */
- if (!cbi && vcbi->server == server) {
- write_lock(&slist->lock);
- if (entry->cb_interest) {
- write_unlock(&slist->lock);
- afs_put_cb_interest(afs_v2net(vnode), cbi);
- goto again;
- }
-
- entry->cb_interest = cbi;
- write_unlock(&slist->lock);
- return 0;
- }
- }
-
- if (!cbi) {
- new = afs_create_interest(server, vnode);
- if (!new)
- return -ENOMEM;
-
- write_lock(&slist->lock);
- if (!entry->cb_interest) {
- entry->cb_interest = afs_get_cb_interest(new);
- cbi = new;
- new = NULL;
- } else {
- cbi = afs_get_cb_interest(entry->cb_interest);
- }
- write_unlock(&slist->lock);
- afs_put_cb_interest(afs_v2net(vnode), new);
- }
-
- ASSERT(cbi);
-
- /* Change the server the vnode is using. This entails scrubbing any
- * interest the vnode had in the previous server it was using.
- */
- write_seqlock(&vnode->cb_lock);
-
- old = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->cb_lock.lock));
- rcu_assign_pointer(vnode->cb_interest, cbi);
- vnode->cb_s_break = cbi->server->cb_s_break;
- vnode->cb_v_break = vnode->volume->cb_v_break;
- clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
-
- write_sequnlock(&vnode->cb_lock);
- afs_put_cb_interest(afs_v2net(vnode), old);
- return 0;
-}
-
-/*
- * Remove an interest on a server.
- */
-void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi)
-{
- struct afs_vol_interest *vi;
-
- if (cbi && refcount_dec_and_test(&cbi->usage)) {
- if (!hlist_unhashed(&cbi->cb_vlink)) {
- write_lock(&cbi->server->cb_break_lock);
-
- hlist_del_init(&cbi->cb_vlink);
- vi = cbi->vol_interest;
- cbi->vol_interest = NULL;
- if (--vi->usage == 0)
- hlist_del(&vi->srv_link);
- else
- vi = NULL;
-
- write_unlock(&cbi->server->cb_break_lock);
- if (vi)
- kfree_rcu(vi, rcu);
- afs_put_server(net, cbi->server, afs_server_trace_put_cbi);
- }
- kfree_rcu(cbi, rcu);
- }
-}
-
-/*
- * allow the fileserver to request callback state (re-)initialisation
+ * Allow the fileserver to request callback state (re-)initialisation.
+ * Unfortunately, UUIDs are not guaranteed unique.
*/
void afs_init_callback_state(struct afs_server *server)
{
- server->cb_s_break++;
+ rcu_read_lock();
+ do {
+ server->cb_s_break++;
+ server = rcu_dereference(server->uuid_next);
+ } while (0);
+ rcu_read_unlock();
}
/*
@@ -238,69 +63,109 @@ void afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason
}
/*
+ * Look up a volume by volume ID under RCU conditions.
+ */
+static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell,
+ afs_volid_t vid)
+{
+ struct afs_volume *volume = NULL;
+ struct rb_node *p;
+ int seq = 0;
+
+ do {
+ /* Unfortunately, rbtree walking doesn't give reliable results
+ * under just the RCU read lock, so we have to check for
+ * changes.
+ */
+ read_seqbegin_or_lock(&cell->volume_lock, &seq);
+
+ p = rcu_dereference_raw(cell->volumes.rb_node);
+ while (p) {
+ volume = rb_entry(p, struct afs_volume, cell_node);
+
+ if (volume->vid < vid)
+ p = rcu_dereference_raw(p->rb_left);
+ else if (volume->vid > vid)
+ p = rcu_dereference_raw(p->rb_right);
+ else
+ break;
+ volume = NULL;
+ }
+
+ } while (need_seqretry(&cell->volume_lock, seq));
+
+ done_seqretry(&cell->volume_lock, seq);
+ return volume;
+}
+
+/*
* allow the fileserver to explicitly break one callback
* - happens when
* - the backing file is changed
* - a lock is released
*/
-static void afs_break_one_callback(struct afs_server *server,
+static void afs_break_one_callback(struct afs_volume *volume,
struct afs_fid *fid)
{
- struct afs_vol_interest *vi;
- struct afs_cb_interest *cbi;
- struct afs_iget_data data;
+ struct super_block *sb;
struct afs_vnode *vnode;
struct inode *inode;
- read_lock(&server->cb_break_lock);
- hlist_for_each_entry(vi, &server->cb_volumes, srv_link) {
- if (vi->vid < fid->vid)
- continue;
- if (vi->vid > fid->vid) {
- vi = NULL;
- break;
- }
- //atomic_inc(&vi->usage);
- break;
+ if (fid->vnode == 0 && fid->unique == 0) {
+ /* The callback break applies to an entire volume. */
+ write_lock(&volume->cb_v_break_lock);
+ volume->cb_v_break++;
+ trace_afs_cb_break(fid, volume->cb_v_break,
+ afs_cb_break_for_volume_callback, false);
+ write_unlock(&volume->cb_v_break_lock);
+ return;
}
- /* TODO: Find all matching volumes if we couldn't match the server and
- * break them anyway.
+ /* See if we can find a matching inode - even an I_NEW inode needs to
+ * be marked as it can have its callback broken before we finish
+ * setting up the local inode.
*/
- if (!vi)
- goto out;
+ sb = rcu_dereference(volume->sb);
+ if (!sb)
+ return;
+
+ inode = find_inode_rcu(sb, fid->vnode, afs_ilookup5_test_by_fid, fid);
+ if (inode) {
+ vnode = AFS_FS_I(inode);
+ afs_break_callback(vnode, afs_cb_break_for_callback);
+ } else {
+ trace_afs_cb_miss(fid, afs_cb_break_for_callback);
+ }
+}
+
+static void afs_break_some_callbacks(struct afs_server *server,
+ struct afs_callback_break *cbb,
+ size_t *_count)
+{
+ struct afs_callback_break *residue = cbb;
+ struct afs_volume *volume;
+ afs_volid_t vid = cbb->fid.vid;
+ size_t i;
- /* Step through all interested superblocks. There may be more than one
- * because of cell aliasing.
+ volume = afs_lookup_volume_rcu(server->cell, vid);
+
+ /* TODO: Find all matching volumes if we couldn't match the server and
+ * break them anyway.
*/
- hlist_for_each_entry(cbi, &vi->cb_interests, cb_vlink) {
- if (fid->vnode == 0 && fid->unique == 0) {
- /* The callback break applies to an entire volume. */
- struct afs_super_info *as = AFS_FS_S(cbi->sb);
- struct afs_volume *volume = as->volume;
- write_lock(&volume->cb_v_break_lock);
- volume->cb_v_break++;
- trace_afs_cb_break(fid, volume->cb_v_break,
- afs_cb_break_for_volume_callback, false);
- write_unlock(&volume->cb_v_break_lock);
+ for (i = *_count; i > 0; cbb++, i--) {
+ if (cbb->fid.vid == vid) {
+ _debug("- Fid { vl=%08llx n=%llu u=%u }",
+ cbb->fid.vid,
+ cbb->fid.vnode,
+ cbb->fid.unique);
+ --*_count;
+ if (volume)
+ afs_break_one_callback(volume, &cbb->fid);
} else {
- data.volume = NULL;
- data.fid = *fid;
- inode = ilookup5_nowait(cbi->sb, fid->vnode,
- afs_iget5_test, &data);
- if (inode) {
- vnode = AFS_FS_I(inode);
- afs_break_callback(vnode, afs_cb_break_for_callback);
- iput(inode);
- } else {
- trace_afs_cb_miss(fid, afs_cb_break_for_callback);
- }
+ *residue++ = *cbb;
}
}
-
-out:
- read_unlock(&server->cb_break_lock);
}
/*
@@ -313,29 +178,11 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
ASSERT(server != NULL);
- /* TODO: Sort the callback break list by volume ID */
+ rcu_read_lock();
- for (; count > 0; callbacks++, count--) {
- _debug("- Fid { vl=%08llx n=%llu u=%u }",
- callbacks->fid.vid,
- callbacks->fid.vnode,
- callbacks->fid.unique);
- afs_break_one_callback(server, &callbacks->fid);
- }
+ while (count > 0)
+ afs_break_some_callbacks(server, callbacks, &count);
- _leave("");
+ rcu_read_unlock();
return;
}
-
-/*
- * Clear the callback interests in a server list.
- */
-void afs_clear_callback_interests(struct afs_net *net, struct afs_server_list *slist)
-{
- int i;
-
- for (i = 0; i < slist->nr_servers; i++) {
- afs_put_cb_interest(net, slist->servers[i].cb_interest);
- slist->servers[i].cb_interest = NULL;
- }
-}
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 78ba5f932287..005921e3b38d 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -161,9 +161,13 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
atomic_set(&cell->usage, 2);
INIT_WORK(&cell->manager, afs_manage_cell);
- INIT_LIST_HEAD(&cell->proc_volumes);
- rwlock_init(&cell->proc_lock);
+ cell->volumes = RB_ROOT;
+ INIT_HLIST_HEAD(&cell->proc_volumes);
+ seqlock_init(&cell->volume_lock);
+ cell->fs_servers = RB_ROOT;
+ seqlock_init(&cell->fs_lock);
rwlock_init(&cell->vl_servers_lock);
+ cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
/* Provide a VL server list, filling it in if we were given a list of
* addresses to use.
@@ -481,7 +485,9 @@ static void afs_cell_destroy(struct rcu_head *rcu)
ASSERTCMP(atomic_read(&cell->usage), ==, 0);
+ afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
+ afs_put_cell(cell->net, cell->alias_of);
key_put(cell->anonymous_key);
kfree(cell);
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 380ad5ace7cf..bef413818af7 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -118,8 +118,6 @@ bool afs_cm_incoming_call(struct afs_call *call)
{
_enter("{%u, CB.OP %u}", call->service_id, call->operation_ID);
- call->epoch = rxrpc_kernel_get_epoch(call->net->socket, call->rxcall);
-
switch (call->operation_ID) {
case CBCallBack:
call->type = &afs_SRXCBCallBack;
@@ -150,49 +148,6 @@ bool afs_cm_incoming_call(struct afs_call *call)
}
/*
- * Record a probe to the cache manager from a server.
- */
-static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server)
-{
- _enter("");
-
- if (test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags) &&
- !test_bit(AFS_SERVER_FL_PROBING, &server->flags)) {
- if (server->cm_epoch == call->epoch)
- return 0;
-
- if (!server->probe.said_rebooted) {
- pr_notice("kAFS: FS rebooted %pU\n", &server->uuid);
- server->probe.said_rebooted = true;
- }
- }
-
- spin_lock(&server->probe_lock);
-
- if (!test_and_set_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) {
- server->cm_epoch = call->epoch;
- server->probe.cm_epoch = call->epoch;
- goto out;
- }
-
- if (server->probe.cm_probed &&
- call->epoch != server->probe.cm_epoch &&
- !server->probe.said_inconsistent) {
- pr_notice("kAFS: FS endpoints inconsistent %pU\n",
- &server->uuid);
- server->probe.said_inconsistent = true;
- }
-
- if (!server->probe.cm_probed || call->epoch == server->cm_epoch)
- server->probe.cm_epoch = server->cm_epoch;
-
-out:
- server->probe.cm_probed = true;
- spin_unlock(&server->probe_lock);
- return 0;
-}
-
-/*
* Find the server record by peer address and record a probe to the cache
* manager from a server.
*/
@@ -210,7 +165,7 @@ static int afs_find_cm_server_by_peer(struct afs_call *call)
}
call->server = server;
- return afs_record_cm_probe(call, server);
+ return 0;
}
/*
@@ -231,7 +186,7 @@ static int afs_find_cm_server_by_uuid(struct afs_call *call,
}
call->server = server;
- return afs_record_cm_probe(call, server);
+ return 0;
}
/*
@@ -268,7 +223,9 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
* to maintain cache coherency.
*/
if (call->server) {
- trace_afs_server(call->server, atomic_read(&call->server->usage),
+ trace_afs_server(call->server,
+ atomic_read(&call->server->ref),
+ atomic_read(&call->server->active),
afs_server_trace_callback);
afs_break_callbacks(call->server, call->count, call->request);
}
@@ -305,8 +262,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("FID count: %u", call->count);
if (call->count > AFSCBMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_cb_fid_count);
+ return afs_protocol_error(call, afs_eproto_cb_fid_count);
call->buffer = kmalloc(array3_size(call->count, 3, 4),
GFP_KERNEL);
@@ -351,8 +307,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
call->count2 = ntohl(call->tmp);
_debug("CB count: %u", call->count2);
if (call->count2 != call->count && call->count2 != 0)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_cb_count);
+ return afs_protocol_error(call, afs_eproto_cb_count);
call->iter = &call->def_iter;
iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4);
call->unmarshall++;
@@ -509,7 +464,8 @@ static int afs_deliver_cb_probe(struct afs_call *call)
}
/*
- * allow the fileserver to quickly find out if the fileserver has been rebooted
+ * Allow the fileserver to quickly find out if the cache manager has been
+ * rebooted.
*/
static void SRXAFSCB_ProbeUuid(struct work_struct *work)
{
@@ -581,7 +537,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_uuid(call, call->request);
+ return afs_find_cm_server_by_peer(call);
}
/*
@@ -672,8 +628,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("FID count: %u", call->count);
if (call->count > YFSCBMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_cb_fid_count);
+ return afs_protocol_error(call, afs_eproto_cb_fid_count);
size = array_size(call->count, sizeof(struct yfs_xdr_YFSFid));
call->buffer = kmalloc(size, GFP_KERNEL);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index d1e1caa23c8b..aa1d34141ea3 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -99,8 +99,6 @@ struct afs_lookup_cookie {
bool found;
bool one_only;
unsigned short nr_fids;
- struct inode **inodes;
- struct afs_status_cb *statuses;
struct afs_fid fids[50];
};
@@ -618,8 +616,8 @@ static int afs_lookup_filldir(struct dir_context *ctx, const char *name,
}
} else if (cookie->name.len == nlen &&
memcmp(cookie->name.name, name, nlen) == 0) {
- cookie->fids[0].vnode = ino;
- cookie->fids[0].unique = dtype;
+ cookie->fids[1].vnode = ino;
+ cookie->fids[1].unique = dtype;
cookie->found = 1;
if (cookie->one_only)
return -1;
@@ -631,6 +629,111 @@ static int afs_lookup_filldir(struct dir_context *ctx, const char *name,
}
/*
+ * Deal with the result of a successful lookup operation. Turn all the files
+ * into inodes and save the first one - which is the one we actually want.
+ */
+static void afs_do_lookup_success(struct afs_operation *op)
+{
+ struct afs_vnode_param *vp;
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ u32 abort_code;
+ int i;
+
+ _enter("");
+
+ for (i = 0; i < op->nr_files; i++) {
+ switch (i) {
+ case 0:
+ vp = &op->file[0];
+ abort_code = vp->scb.status.abort_code;
+ if (abort_code != 0) {
+ op->abort_code = abort_code;
+ op->error = afs_abort_to_error(abort_code);
+ }
+ break;
+
+ case 1:
+ vp = &op->file[1];
+ break;
+
+ default:
+ vp = &op->more_files[i - 2];
+ break;
+ }
+
+ if (!vp->scb.have_status && !vp->scb.have_error)
+ continue;
+
+ _debug("do [%u]", i);
+ if (vp->vnode) {
+ if (!test_bit(AFS_VNODE_UNSET, &vp->vnode->flags))
+ afs_vnode_commit_status(op, vp);
+ } else if (vp->scb.status.abort_code == 0) {
+ inode = afs_iget(op, vp);
+ if (!IS_ERR(inode)) {
+ vnode = AFS_FS_I(inode);
+ afs_cache_permit(vnode, op->key,
+ 0 /* Assume vnode->cb_break is 0 */ +
+ op->cb_v_break,
+ &vp->scb);
+ vp->vnode = vnode;
+ vp->put_vnode = true;
+ }
+ } else {
+ _debug("- abort %d %llx:%llx.%x",
+ vp->scb.status.abort_code,
+ vp->fid.vid, vp->fid.vnode, vp->fid.unique);
+ }
+ }
+
+ _leave("");
+}
+
+static const struct afs_operation_ops afs_inline_bulk_status_operation = {
+ .issue_afs_rpc = afs_fs_inline_bulk_status,
+ .issue_yfs_rpc = yfs_fs_inline_bulk_status,
+ .success = afs_do_lookup_success,
+};
+
+static const struct afs_operation_ops afs_fetch_status_operation = {
+ .issue_afs_rpc = afs_fs_fetch_status,
+ .issue_yfs_rpc = yfs_fs_fetch_status,
+ .success = afs_do_lookup_success,
+};
+
+/*
+ * See if we know that the server we expect to use doesn't support
+ * FS.InlineBulkStatus.
+ */
+static bool afs_server_supports_ibulk(struct afs_vnode *dvnode)
+{
+ struct afs_server_list *slist;
+ struct afs_volume *volume = dvnode->volume;
+ struct afs_server *server;
+ bool ret = true;
+ int i;
+
+ if (!test_bit(AFS_VOLUME_MAYBE_NO_IBULK, &volume->flags))
+ return true;
+
+ rcu_read_lock();
+ slist = rcu_dereference(volume->servers);
+
+ for (i = 0; i < slist->nr_servers; i++) {
+ server = slist->servers[i].server;
+ if (server == dvnode->cb_server) {
+ if (test_bit(AFS_SERVER_FL_NO_IBULK, &server->flags))
+ ret = false;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
* Do a lookup in a directory. We make use of bulk lookup to query a slew of
* files in one go and create inodes for them. The inode of the file we were
* asked for is returned.
@@ -639,16 +742,13 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
struct key *key)
{
struct afs_lookup_cookie *cookie;
- struct afs_cb_interest *dcbi, *cbi = NULL;
- struct afs_super_info *as = dir->i_sb->s_fs_info;
- struct afs_status_cb *scb;
- struct afs_iget_data iget_data;
- struct afs_fs_cursor fc;
- struct afs_server *server;
+ struct afs_vnode_param *vp;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct inode *inode = NULL, *ti;
afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
- int ret, i;
+ long ret;
+ int i;
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
@@ -656,72 +756,74 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
if (!cookie)
return ERR_PTR(-ENOMEM);
+ for (i = 0; i < ARRAY_SIZE(cookie->fids); i++)
+ cookie->fids[i].vid = dvnode->fid.vid;
cookie->ctx.actor = afs_lookup_filldir;
cookie->name = dentry->d_name;
- cookie->nr_fids = 1; /* slot 0 is saved for the fid we actually want */
-
- read_seqlock_excl(&dvnode->cb_lock);
- dcbi = rcu_dereference_protected(dvnode->cb_interest,
- lockdep_is_held(&dvnode->cb_lock.lock));
- if (dcbi) {
- server = dcbi->server;
- if (server &&
- test_bit(AFS_SERVER_FL_NO_IBULK, &server->flags))
- cookie->one_only = true;
- }
- read_sequnlock_excl(&dvnode->cb_lock);
+ cookie->nr_fids = 2; /* slot 0 is saved for the fid we actually want
+ * and slot 1 for the directory */
- for (i = 0; i < 50; i++)
- cookie->fids[i].vid = as->volume->vid;
+ if (!afs_server_supports_ibulk(dvnode))
+ cookie->one_only = true;
/* search the directory */
ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
- if (ret < 0) {
- inode = ERR_PTR(ret);
+ if (ret < 0)
goto out;
- }
dentry->d_fsdata = (void *)(unsigned long)data_version;
- inode = ERR_PTR(-ENOENT);
+ ret = -ENOENT;
if (!cookie->found)
goto out;
/* Check to see if we already have an inode for the primary fid. */
- iget_data.fid = cookie->fids[0];
- iget_data.volume = dvnode->volume;
- iget_data.cb_v_break = dvnode->volume->cb_v_break;
- iget_data.cb_s_break = 0;
- inode = ilookup5(dir->i_sb, cookie->fids[0].vnode,
- afs_iget5_test, &iget_data);
+ inode = ilookup5(dir->i_sb, cookie->fids[1].vnode,
+ afs_ilookup5_test_by_fid, &cookie->fids[1]);
if (inode)
- goto out;
+ goto out; /* We do */
- /* Need space for examining all the selected files */
- inode = ERR_PTR(-ENOMEM);
- cookie->statuses = kvcalloc(cookie->nr_fids, sizeof(struct afs_status_cb),
- GFP_KERNEL);
- if (!cookie->statuses)
+ /* Okay, we didn't find it. We need to query the server - and whilst
+ * we're doing that, we're going to attempt to look up a bunch of other
+ * vnodes also.
+ */
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op)) {
+ ret = PTR_ERR(op);
goto out;
+ }
- cookie->inodes = kcalloc(cookie->nr_fids, sizeof(struct inode *),
- GFP_KERNEL);
- if (!cookie->inodes)
- goto out_s;
+ afs_op_set_vnode(op, 0, dvnode);
+ afs_op_set_fid(op, 1, &cookie->fids[1]);
- for (i = 1; i < cookie->nr_fids; i++) {
- scb = &cookie->statuses[i];
+ op->nr_files = cookie->nr_fids;
+ _debug("nr_files %u", op->nr_files);
- /* Find any inodes that already exist and get their
- * callback counters.
- */
- iget_data.fid = cookie->fids[i];
- ti = ilookup5_nowait(dir->i_sb, iget_data.fid.vnode,
- afs_iget5_test, &iget_data);
- if (!IS_ERR_OR_NULL(ti)) {
- vnode = AFS_FS_I(ti);
- scb->cb_break = afs_calc_vnode_cb_break(vnode);
- cookie->inodes[i] = ti;
+ /* Need space for examining all the selected files */
+ op->error = -ENOMEM;
+ if (op->nr_files > 2) {
+ op->more_files = kvcalloc(op->nr_files - 2,
+ sizeof(struct afs_vnode_param),
+ GFP_KERNEL);
+ if (!op->more_files)
+ goto out_op;
+
+ for (i = 2; i < op->nr_files; i++) {
+ vp = &op->more_files[i - 2];
+ vp->fid = cookie->fids[i];
+
+ /* Find any inodes that already exist and get their
+ * callback counters.
+ */
+ ti = ilookup5_nowait(dir->i_sb, vp->fid.vnode,
+ afs_ilookup5_test_by_fid, &vp->fid);
+ if (!IS_ERR_OR_NULL(ti)) {
+ vnode = AFS_FS_I(ti);
+ vp->dv_before = vnode->status.data_version;
+ vp->cb_break_before = afs_calc_vnode_cb_break(vnode);
+ vp->vnode = vnode;
+ vp->put_vnode = true;
+ }
}
}
@@ -729,120 +831,40 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
* lookups contained therein are stored in the reply without aborting
* the whole operation.
*/
- if (cookie->one_only)
- goto no_inline_bulk_status;
-
- inode = ERR_PTR(-ERESTARTSYS);
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- while (afs_select_fileserver(&fc)) {
- if (test_bit(AFS_SERVER_FL_NO_IBULK,
- &fc.cbi->server->flags)) {
- fc.ac.abort_code = RX_INVALID_OPERATION;
- fc.ac.error = -ECONNABORTED;
- break;
- }
- iget_data.cb_v_break = dvnode->volume->cb_v_break;
- iget_data.cb_s_break = fc.cbi->server->cb_s_break;
- afs_fs_inline_bulk_status(&fc,
- afs_v2net(dvnode),
- cookie->fids,
- cookie->statuses,
- cookie->nr_fids, NULL);
- }
-
- if (fc.ac.error == 0)
- cbi = afs_get_cb_interest(fc.cbi);
- if (fc.ac.abort_code == RX_INVALID_OPERATION)
- set_bit(AFS_SERVER_FL_NO_IBULK, &fc.cbi->server->flags);
- inode = ERR_PTR(afs_end_vnode_operation(&fc));
+ op->error = -ENOTSUPP;
+ if (!cookie->one_only) {
+ op->ops = &afs_inline_bulk_status_operation;
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
}
- if (!IS_ERR(inode))
- goto success;
- if (fc.ac.abort_code != RX_INVALID_OPERATION)
- goto out_c;
-
-no_inline_bulk_status:
- /* We could try FS.BulkStatus next, but this aborts the entire op if
- * any of the lookups fails - so, for the moment, revert to
- * FS.FetchStatus for just the primary fid.
- */
- inode = ERR_PTR(-ERESTARTSYS);
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- while (afs_select_fileserver(&fc)) {
- iget_data.cb_v_break = dvnode->volume->cb_v_break;
- iget_data.cb_s_break = fc.cbi->server->cb_s_break;
- scb = &cookie->statuses[0];
- afs_fs_fetch_status(&fc,
- afs_v2net(dvnode),
- cookie->fids,
- scb,
- NULL);
- }
-
- if (fc.ac.error == 0)
- cbi = afs_get_cb_interest(fc.cbi);
- inode = ERR_PTR(afs_end_vnode_operation(&fc));
+ if (op->error == -ENOTSUPP) {
+ /* We could try FS.BulkStatus next, but this aborts the entire
+ * op if any of the lookups fails - so, for the moment, revert
+ * to FS.FetchStatus for op->file[1].
+ */
+ op->fetch_status.which = 1;
+ op->ops = &afs_fetch_status_operation;
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
}
+ inode = ERR_PTR(op->error);
- if (IS_ERR(inode))
- goto out_c;
-
-success:
- /* Turn all the files into inodes and save the first one - which is the
- * one we actually want.
- */
- scb = &cookie->statuses[0];
- if (scb->status.abort_code != 0)
- inode = ERR_PTR(afs_abort_to_error(scb->status.abort_code));
-
- for (i = 0; i < cookie->nr_fids; i++) {
- struct afs_status_cb *scb = &cookie->statuses[i];
-
- if (!scb->have_status && !scb->have_error)
- continue;
-
- if (cookie->inodes[i]) {
- struct afs_vnode *iv = AFS_FS_I(cookie->inodes[i]);
-
- if (test_bit(AFS_VNODE_UNSET, &iv->flags))
- continue;
-
- afs_vnode_commit_status(&fc, iv,
- scb->cb_break, NULL, scb);
- continue;
- }
-
- if (scb->status.abort_code != 0)
- continue;
-
- iget_data.fid = cookie->fids[i];
- ti = afs_iget(dir->i_sb, key, &iget_data, scb, cbi, dvnode);
- if (!IS_ERR(ti))
- afs_cache_permit(AFS_FS_I(ti), key,
- 0 /* Assume vnode->cb_break is 0 */ +
- iget_data.cb_v_break,
- scb);
- if (i == 0) {
- inode = ti;
- } else {
- if (!IS_ERR(ti))
- iput(ti);
- }
+out_op:
+ if (op->error == 0) {
+ inode = &op->file[1].vnode->vfs_inode;
+ op->file[1].vnode = NULL;
}
-out_c:
- afs_put_cb_interest(afs_v2net(dvnode), cbi);
- if (cookie->inodes) {
- for (i = 0; i < cookie->nr_fids; i++)
- iput(cookie->inodes[i]);
- kfree(cookie->inodes);
- }
-out_s:
- kvfree(cookie->statuses);
+ if (op->file[0].scb.have_status)
+ dentry->d_fsdata = (void *)(unsigned long)op->file[0].scb.status.data_version;
+ else
+ dentry->d_fsdata = (void *)(unsigned long)op->file[0].dv_before;
+ ret = afs_put_operation(op);
out:
kfree(cookie);
- return inode;
+ _leave("");
+ return inode ?: ERR_PTR(ret);
}
/*
@@ -958,6 +980,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
if (!IS_ERR_OR_NULL(inode))
fid = AFS_FS_I(inode)->fid;
+ _debug("splice %p", dentry->d_inode);
d = d_splice_alias(inode, dentry);
if (!IS_ERR_OR_NULL(d)) {
d->d_fsdata = dentry->d_fsdata;
@@ -965,6 +988,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
} else {
trace_afs_lookup(dvnode, &dentry->d_name, &fid);
}
+ _leave("");
return d;
}
@@ -1215,130 +1239,97 @@ void afs_d_release(struct dentry *dentry)
/*
* Create a new inode for create/mkdir/symlink
*/
-static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
- struct dentry *new_dentry,
- struct afs_iget_data *new_data,
- struct afs_status_cb *new_scb)
+static void afs_vnode_new_inode(struct afs_operation *op)
{
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_vnode *vnode;
struct inode *inode;
- if (fc->ac.error < 0)
- return;
+ _enter("");
+
+ ASSERTCMP(op->error, ==, 0);
- inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key,
- new_data, new_scb, fc->cbi, fc->vnode);
+ inode = afs_iget(op, vp);
if (IS_ERR(inode)) {
/* ENOMEM or EINTR at a really inconvenient time - just abandon
* the new directory on the server.
*/
- fc->ac.error = PTR_ERR(inode);
+ op->error = PTR_ERR(inode);
return;
}
vnode = AFS_FS_I(inode);
set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
- if (fc->ac.error == 0)
- afs_cache_permit(vnode, fc->key, vnode->cb_break, new_scb);
- d_instantiate(new_dentry, inode);
+ if (!op->error)
+ afs_cache_permit(vnode, op->key, vnode->cb_break, &vp->scb);
+ d_instantiate(op->dentry, inode);
}
-static void afs_prep_for_new_inode(struct afs_fs_cursor *fc,
- struct afs_iget_data *iget_data)
+static void afs_create_success(struct afs_operation *op)
{
- iget_data->volume = fc->vnode->volume;
- iget_data->cb_v_break = fc->vnode->volume->cb_v_break;
- iget_data->cb_s_break = fc->cbi->server->cb_s_break;
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, op->file[0].vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+ afs_update_dentry_version(op, &op->file[0], op->dentry);
+ afs_vnode_new_inode(op);
}
-/*
- * Note that a dentry got changed. We need to set d_fsdata to the data version
- * number derived from the result of the operation. It doesn't matter if
- * d_fsdata goes backwards as we'll just revalidate.
- */
-static void afs_update_dentry_version(struct afs_fs_cursor *fc,
- struct dentry *dentry,
- struct afs_status_cb *scb)
+static void afs_create_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
+ struct afs_vnode *dvnode = dvp->vnode;
+
+ _enter("op=%08x", op->debug_id);
+
+ down_write(&dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
+ afs_edit_dir_add(dvnode, &op->dentry->d_name, &vp->fid,
+ op->create.reason);
+ up_write(&dvnode->validate_lock);
+}
+
+static void afs_create_put(struct afs_operation *op)
{
- if (fc->ac.error == 0)
- dentry->d_fsdata =
- (void *)(unsigned long)scb->status.data_version;
+ _enter("op=%08x", op->debug_id);
+
+ if (op->error)
+ d_drop(op->dentry);
}
+static const struct afs_operation_ops afs_mkdir_operation = {
+ .issue_afs_rpc = afs_fs_make_dir,
+ .issue_yfs_rpc = yfs_fs_make_dir,
+ .success = afs_create_success,
+ .edit_dir = afs_create_edit_dir,
+ .put = afs_create_put,
+};
+
/*
* create a directory on an AFS filesystem
*/
static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
- struct afs_iget_data iget_data;
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct key *key;
- afs_dataversion_t data_version;
- int ret;
-
- mode |= S_IFDIR;
_enter("{%llx:%llu},{%pd},%ho",
dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- goto error;
-
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op)) {
+ d_drop(dentry);
+ return PTR_ERR(op);
}
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_prep_for_new_inode(&fc, &iget_data);
- afs_fs_create(&fc, dentry->d_name.name, mode,
- &scb[0], &iget_data.fid, &scb[1]);
- }
-
- afs_check_for_remote_deletion(&fc, dvnode);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, &scb[0]);
- afs_update_dentry_version(&fc, dentry, &scb[0]);
- afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
- ret = afs_end_vnode_operation(&fc);
- if (ret < 0)
- goto error_key;
- } else {
- goto error_key;
- }
-
- if (ret == 0) {
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
- afs_edit_dir_for_create);
- up_write(&dvnode->validate_lock);
- }
-
- key_put(key);
- kfree(scb);
- _leave(" = 0");
- return 0;
-
-error_key:
- key_put(key);
-error_scb:
- kfree(scb);
-error:
- d_drop(dentry);
- _leave(" = %d", ret);
- return ret;
+ afs_op_set_vnode(op, 0, dvnode);
+ op->file[0].dv_delta = 1;
+ op->dentry = dentry;
+ op->create.mode = S_IFDIR | mode;
+ op->create.reason = afs_edit_dir_for_mkdir;
+ op->ops = &afs_mkdir_operation;
+ return afs_do_sync_operation(op);
}
/*
@@ -1356,76 +1347,86 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
}
}
+static void afs_rmdir_success(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, op->file[0].vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+ afs_update_dentry_version(op, &op->file[0], op->dentry);
+}
+
+static void afs_rmdir_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode *dvnode = dvp->vnode;
+
+ _enter("op=%08x", op->debug_id);
+ afs_dir_remove_subdir(op->dentry);
+
+ down_write(&dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
+ afs_edit_dir_remove(dvnode, &op->dentry->d_name,
+ afs_edit_dir_for_rmdir);
+ up_write(&dvnode->validate_lock);
+}
+
+static void afs_rmdir_put(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ if (op->file[1].vnode)
+ up_write(&op->file[1].vnode->rmdir_lock);
+}
+
+static const struct afs_operation_ops afs_rmdir_operation = {
+ .issue_afs_rpc = afs_fs_remove_dir,
+ .issue_yfs_rpc = yfs_fs_remove_dir,
+ .success = afs_rmdir_success,
+ .edit_dir = afs_rmdir_edit_dir,
+ .put = afs_rmdir_put,
+};
+
/*
* remove a directory from an AFS filesystem
*/
static int afs_rmdir(struct inode *dir, struct dentry *dentry)
{
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL;
- struct key *key;
- afs_dataversion_t data_version;
int ret;
_enter("{%llx:%llu},{%pd}",
dvnode->fid.vid, dvnode->fid.vnode, dentry);
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error;
- }
+ afs_op_set_vnode(op, 0, dvnode);
+ op->file[0].dv_delta = 1;
+
+ op->dentry = dentry;
+ op->ops = &afs_rmdir_operation;
/* Try to make sure we have a callback promise on the victim. */
if (d_really_is_positive(dentry)) {
vnode = AFS_FS_I(d_inode(dentry));
- ret = afs_validate(vnode, key);
+ ret = afs_validate(vnode, op->key);
if (ret < 0)
- goto error_key;
+ goto error;
}
if (vnode) {
ret = down_write_killable(&vnode->rmdir_lock);
if (ret < 0)
- goto error_key;
+ goto error;
+ op->file[1].vnode = vnode;
}
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_fs_remove(&fc, vnode, dentry->d_name.name, true, scb);
- }
-
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, scb);
- afs_update_dentry_version(&fc, dentry, scb);
- ret = afs_end_vnode_operation(&fc);
- if (ret == 0) {
- afs_dir_remove_subdir(dentry);
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_remove(dvnode, &dentry->d_name,
- afs_edit_dir_for_rmdir);
- up_write(&dvnode->validate_lock);
- }
- }
+ return afs_do_sync_operation(op);
- if (vnode)
- up_write(&vnode->rmdir_lock);
-error_key:
- key_put(key);
error:
- kfree(scb);
- return ret;
+ return afs_put_operation(op);
}
/*
@@ -1438,52 +1439,90 @@ error:
* However, if we didn't have a callback promise outstanding, or it was
* outstanding on a different server, then it won't break it either...
*/
-static int afs_dir_remove_link(struct afs_vnode *dvnode, struct dentry *dentry,
- struct key *key)
+static void afs_dir_remove_link(struct afs_operation *op)
{
- int ret = 0;
+ struct afs_vnode *dvnode = op->file[0].vnode;
+ struct afs_vnode *vnode = op->file[1].vnode;
+ struct dentry *dentry = op->dentry;
+ int ret;
- if (d_really_is_positive(dentry)) {
- struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
+ if (op->error != 0 ||
+ (op->file[1].scb.have_status && op->file[1].scb.have_error))
+ return;
+ if (d_really_is_positive(dentry))
+ return;
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
- /* Already done */
- } else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
- write_seqlock(&vnode->cb_lock);
- drop_nlink(&vnode->vfs_inode);
- if (vnode->vfs_inode.i_nlink == 0) {
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- __afs_break_callback(vnode, afs_cb_break_for_unlink);
- }
- write_sequnlock(&vnode->cb_lock);
- ret = 0;
- } else {
- afs_break_callback(vnode, afs_cb_break_for_unlink);
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
+ /* Already done */
+ } else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
+ write_seqlock(&vnode->cb_lock);
+ drop_nlink(&vnode->vfs_inode);
+ if (vnode->vfs_inode.i_nlink == 0) {
+ set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ __afs_break_callback(vnode, afs_cb_break_for_unlink);
+ }
+ write_sequnlock(&vnode->cb_lock);
+ } else {
+ afs_break_callback(vnode, afs_cb_break_for_unlink);
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- kdebug("AFS_VNODE_DELETED");
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+ _debug("AFS_VNODE_DELETED");
- ret = afs_validate(vnode, key);
- if (ret == -ESTALE)
- ret = 0;
- }
- _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret);
+ ret = afs_validate(vnode, op->key);
+ if (ret != -ESTALE)
+ op->error = ret;
}
- return ret;
+ _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, op->error);
+}
+
+static void afs_unlink_success(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, op->file[0].vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+ afs_vnode_commit_status(op, &op->file[1]);
+ afs_update_dentry_version(op, &op->file[0], op->dentry);
+ afs_dir_remove_link(op);
+}
+
+static void afs_unlink_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode *dvnode = dvp->vnode;
+
+ _enter("op=%08x", op->debug_id);
+ down_write(&dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
+ afs_edit_dir_remove(dvnode, &op->dentry->d_name,
+ afs_edit_dir_for_unlink);
+ up_write(&dvnode->validate_lock);
+}
+
+static void afs_unlink_put(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ if (op->unlink.need_rehash && op->error < 0 && op->error != -ENOENT)
+ d_rehash(op->dentry);
}
+static const struct afs_operation_ops afs_unlink_operation = {
+ .issue_afs_rpc = afs_fs_remove_file,
+ .issue_yfs_rpc = yfs_fs_remove_file,
+ .success = afs_unlink_success,
+ .edit_dir = afs_unlink_edit_dir,
+ .put = afs_unlink_put,
+};
+
/*
* Remove a file or symlink from an AFS filesystem.
*/
static int afs_unlink(struct inode *dir, struct dentry *dentry)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
- struct key *key;
- bool need_rehash = false;
int ret;
_enter("{%llx:%llu},{%pd}",
@@ -1492,269 +1531,176 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
if (dentry->d_name.len >= AFSNAMEMAX)
return -ENAMETOOLONG;
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- goto error;
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
+ afs_op_set_vnode(op, 0, dvnode);
+ op->file[0].dv_delta = 1;
/* Try to make sure we have a callback promise on the victim. */
- ret = afs_validate(vnode, key);
- if (ret < 0)
- goto error_key;
+ ret = afs_validate(vnode, op->key);
+ if (ret < 0) {
+ op->error = ret;
+ goto error;
+ }
spin_lock(&dentry->d_lock);
if (d_count(dentry) > 1) {
spin_unlock(&dentry->d_lock);
/* Start asynchronous writeout of the inode */
write_inode_now(d_inode(dentry), 0);
- ret = afs_sillyrename(dvnode, vnode, dentry, key);
- goto error_key;
+ op->error = afs_sillyrename(dvnode, vnode, dentry, op->key);
+ goto error;
}
if (!d_unhashed(dentry)) {
/* Prevent a race with RCU lookup. */
__d_drop(dentry);
- need_rehash = true;
+ op->unlink.need_rehash = true;
}
spin_unlock(&dentry->d_lock);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- afs_dataversion_t data_version = dvnode->status.data_version + 1;
- afs_dataversion_t data_version_2 = vnode->status.data_version;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- fc.cb_break_2 = afs_calc_vnode_cb_break(vnode);
-
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc.cbi->server->flags) &&
- !test_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags)) {
- yfs_fs_remove_file2(&fc, vnode, dentry->d_name.name,
- &scb[0], &scb[1]);
- if (fc.ac.error != -ECONNABORTED ||
- fc.ac.abort_code != RXGEN_OPCODE)
- continue;
- set_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags);
- }
-
- afs_fs_remove(&fc, vnode, dentry->d_name.name, false, &scb[0]);
- }
+ op->file[1].vnode = vnode;
+ op->dentry = dentry;
+ op->ops = &afs_unlink_operation;
+ return afs_do_sync_operation(op);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, &scb[0]);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
- &data_version_2, &scb[1]);
- afs_update_dentry_version(&fc, dentry, &scb[0]);
- ret = afs_end_vnode_operation(&fc);
- if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
- ret = afs_dir_remove_link(dvnode, dentry, key);
-
- if (ret == 0) {
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_remove(dvnode, &dentry->d_name,
- afs_edit_dir_for_unlink);
- up_write(&dvnode->validate_lock);
- }
- }
-
- if (need_rehash && ret < 0 && ret != -ENOENT)
- d_rehash(dentry);
-
-error_key:
- key_put(key);
-error_scb:
- kfree(scb);
error:
- _leave(" = %d", ret);
- return ret;
+ return afs_put_operation(op);
}
+static const struct afs_operation_ops afs_create_operation = {
+ .issue_afs_rpc = afs_fs_create_file,
+ .issue_yfs_rpc = yfs_fs_create_file,
+ .success = afs_create_success,
+ .edit_dir = afs_create_edit_dir,
+ .put = afs_create_put,
+};
+
/*
* create a regular file on an AFS filesystem
*/
static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
- struct afs_iget_data iget_data;
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct key *key;
- afs_dataversion_t data_version;
- int ret;
-
- mode |= S_IFREG;
+ int ret = -ENAMETOOLONG;
- _enter("{%llx:%llu},{%pd},%ho,",
+ _enter("{%llx:%llu},{%pd},%ho",
dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
- ret = -ENAMETOOLONG;
if (dentry->d_name.len >= AFSNAMEMAX)
goto error;
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op)) {
+ ret = PTR_ERR(op);
goto error;
}
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- goto error_scb;
+ afs_op_set_vnode(op, 0, dvnode);
+ op->file[0].dv_delta = 1;
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_prep_for_new_inode(&fc, &iget_data);
- afs_fs_create(&fc, dentry->d_name.name, mode,
- &scb[0], &iget_data.fid, &scb[1]);
- }
+ op->dentry = dentry;
+ op->create.mode = S_IFREG | mode;
+ op->create.reason = afs_edit_dir_for_create;
+ op->ops = &afs_create_operation;
+ return afs_do_sync_operation(op);
- afs_check_for_remote_deletion(&fc, dvnode);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, &scb[0]);
- afs_update_dentry_version(&fc, dentry, &scb[0]);
- afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
- ret = afs_end_vnode_operation(&fc);
- if (ret < 0)
- goto error_key;
- } else {
- goto error_key;
- }
-
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
- afs_edit_dir_for_create);
- up_write(&dvnode->validate_lock);
-
- kfree(scb);
- key_put(key);
- _leave(" = 0");
- return 0;
-
-error_scb:
- kfree(scb);
-error_key:
- key_put(key);
error:
d_drop(dentry);
_leave(" = %d", ret);
return ret;
}
+static void afs_link_success(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
+
+ _enter("op=%08x", op->debug_id);
+ afs_vnode_commit_status(op, dvp);
+ afs_vnode_commit_status(op, vp);
+ afs_update_dentry_version(op, dvp, op->dentry);
+ if (op->dentry_2->d_parent == op->dentry->d_parent)
+ afs_update_dentry_version(op, dvp, op->dentry_2);
+ ihold(&vp->vnode->vfs_inode);
+ d_instantiate(op->dentry, &vp->vnode->vfs_inode);
+}
+
+static void afs_link_put(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ if (op->error)
+ d_drop(op->dentry);
+}
+
+static const struct afs_operation_ops afs_link_operation = {
+ .issue_afs_rpc = afs_fs_link,
+ .issue_yfs_rpc = yfs_fs_link,
+ .success = afs_link_success,
+ .edit_dir = afs_create_edit_dir,
+ .put = afs_link_put,
+};
+
/*
* create a hard link between files in an AFS filesystem
*/
static int afs_link(struct dentry *from, struct inode *dir,
struct dentry *dentry)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_vnode *vnode = AFS_FS_I(d_inode(from));
- struct key *key;
- afs_dataversion_t data_version;
- int ret;
+ int ret = -ENAMETOOLONG;
_enter("{%llx:%llu},{%llx:%llu},{%pd}",
vnode->fid.vid, vnode->fid.vnode,
dvnode->fid.vid, dvnode->fid.vnode,
dentry);
- ret = -ENAMETOOLONG;
if (dentry->d_name.len >= AFSNAMEMAX)
goto error;
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op)) {
+ ret = PTR_ERR(op);
goto error;
-
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- data_version = dvnode->status.data_version + 1;
-
- if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) {
- afs_end_vnode_operation(&fc);
- goto error_key;
- }
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- fc.cb_break_2 = afs_calc_vnode_cb_break(vnode);
- afs_fs_link(&fc, vnode, dentry->d_name.name,
- &scb[0], &scb[1]);
- }
-
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, &scb[0]);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
- NULL, &scb[1]);
- ihold(&vnode->vfs_inode);
- afs_update_dentry_version(&fc, dentry, &scb[0]);
- d_instantiate(dentry, &vnode->vfs_inode);
-
- mutex_unlock(&vnode->io_lock);
- ret = afs_end_vnode_operation(&fc);
- if (ret < 0)
- goto error_key;
- } else {
- goto error_key;
}
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_add(dvnode, &dentry->d_name, &vnode->fid,
- afs_edit_dir_for_link);
- up_write(&dvnode->validate_lock);
+ afs_op_set_vnode(op, 0, dvnode);
+ afs_op_set_vnode(op, 1, vnode);
+ op->file[0].dv_delta = 1;
- key_put(key);
- kfree(scb);
- _leave(" = 0");
- return 0;
+ op->dentry = dentry;
+ op->dentry_2 = from;
+ op->ops = &afs_link_operation;
+ op->create.reason = afs_edit_dir_for_link;
+ return afs_do_sync_operation(op);
-error_key:
- key_put(key);
-error_scb:
- kfree(scb);
error:
d_drop(dentry);
_leave(" = %d", ret);
return ret;
}
+static const struct afs_operation_ops afs_symlink_operation = {
+ .issue_afs_rpc = afs_fs_symlink,
+ .issue_yfs_rpc = yfs_fs_symlink,
+ .success = afs_create_success,
+ .edit_dir = afs_create_edit_dir,
+ .put = afs_create_put,
+};
+
/*
* create a symlink in an AFS filesystem
*/
static int afs_symlink(struct inode *dir, struct dentry *dentry,
const char *content)
{
- struct afs_iget_data iget_data;
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct key *key;
- afs_dataversion_t data_version;
int ret;
_enter("{%llx:%llu},{%pd},%s",
@@ -1769,62 +1715,115 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
if (strlen(content) >= AFSPATHMAX)
goto error;
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op)) {
+ ret = PTR_ERR(op);
goto error;
-
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
}
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_prep_for_new_inode(&fc, &iget_data);
- afs_fs_symlink(&fc, dentry->d_name.name, content,
- &scb[0], &iget_data.fid, &scb[1]);
- }
+ afs_op_set_vnode(op, 0, dvnode);
+ op->file[0].dv_delta = 1;
- afs_check_for_remote_deletion(&fc, dvnode);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, &scb[0]);
- afs_update_dentry_version(&fc, dentry, &scb[0]);
- afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
- ret = afs_end_vnode_operation(&fc);
- if (ret < 0)
- goto error_key;
- } else {
- goto error_key;
- }
-
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
- afs_edit_dir_for_symlink);
- up_write(&dvnode->validate_lock);
+ op->dentry = dentry;
+ op->ops = &afs_symlink_operation;
+ op->create.reason = afs_edit_dir_for_symlink;
+ op->create.symlink = content;
+ return afs_do_sync_operation(op);
- key_put(key);
- kfree(scb);
- _leave(" = 0");
- return 0;
-
-error_key:
- key_put(key);
-error_scb:
- kfree(scb);
error:
d_drop(dentry);
_leave(" = %d", ret);
return ret;
}
+static void afs_rename_success(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+
+ afs_vnode_commit_status(op, &op->file[0]);
+ if (op->file[1].vnode != op->file[0].vnode)
+ afs_vnode_commit_status(op, &op->file[1]);
+}
+
+static void afs_rename_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ struct afs_vnode *orig_dvnode = orig_dvp->vnode;
+ struct afs_vnode *new_dvnode = new_dvp->vnode;
+ struct afs_vnode *vnode = AFS_FS_I(d_inode(op->dentry));
+ struct dentry *old_dentry = op->dentry;
+ struct dentry *new_dentry = op->dentry_2;
+ struct inode *new_inode;
+
+ _enter("op=%08x", op->debug_id);
+
+ if (op->rename.rehash) {
+ d_rehash(op->rename.rehash);
+ op->rename.rehash = NULL;
+ }
+
+ down_write(&orig_dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
+ orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta)
+ afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name,
+ afs_edit_dir_for_rename_0);
+
+ if (new_dvnode != orig_dvnode) {
+ up_write(&orig_dvnode->validate_lock);
+ down_write(&new_dvnode->validate_lock);
+ }
+
+ if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) &&
+ new_dvnode->status.data_version == new_dvp->dv_before + new_dvp->dv_delta) {
+ if (!op->rename.new_negative)
+ afs_edit_dir_remove(new_dvnode, &new_dentry->d_name,
+ afs_edit_dir_for_rename_1);
+
+ afs_edit_dir_add(new_dvnode, &new_dentry->d_name,
+ &vnode->fid, afs_edit_dir_for_rename_2);
+ }
+
+ new_inode = d_inode(new_dentry);
+ if (new_inode) {
+ spin_lock(&new_inode->i_lock);
+ if (new_inode->i_nlink > 0)
+ drop_nlink(new_inode);
+ spin_unlock(&new_inode->i_lock);
+ }
+
+ /* Now we can update d_fsdata on the dentries to reflect their
+ * new parent's data_version.
+ *
+ * Note that if we ever implement RENAME_EXCHANGE, we'll have
+ * to update both dentries with opposing dir versions.
+ */
+ afs_update_dentry_version(op, new_dvp, op->dentry);
+ afs_update_dentry_version(op, new_dvp, op->dentry_2);
+
+ d_move(old_dentry, new_dentry);
+
+ up_write(&new_dvnode->validate_lock);
+}
+
+static void afs_rename_put(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ if (op->rename.rehash)
+ d_rehash(op->rename.rehash);
+ dput(op->rename.tmp);
+ if (op->error)
+ d_rehash(op->dentry);
+}
+
+static const struct afs_operation_ops afs_rename_operation = {
+ .issue_afs_rpc = afs_fs_rename,
+ .issue_yfs_rpc = yfs_fs_rename,
+ .success = afs_rename_success,
+ .edit_dir = afs_rename_edit_dir,
+ .put = afs_rename_put,
+};
+
/*
* rename a file in an AFS filesystem and/or move it between directories
*/
@@ -1832,15 +1831,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *orig_dvnode, *new_dvnode, *vnode;
- struct dentry *tmp = NULL, *rehash = NULL;
- struct inode *new_inode;
- struct key *key;
- afs_dataversion_t orig_data_version;
- afs_dataversion_t new_data_version;
- bool new_negative = d_is_negative(new_dentry);
int ret;
if (flags)
@@ -1860,16 +1852,19 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_dvnode->fid.vid, new_dvnode->fid.vnode,
new_dentry);
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- goto error;
+ op = afs_alloc_operation(NULL, orig_dvnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- key = afs_request_key(orig_dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
+ afs_op_set_vnode(op, 0, orig_dvnode);
+ afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
+ op->file[0].dv_delta = 1;
+ op->file[1].dv_delta = 1;
+
+ op->dentry = old_dentry;
+ op->dentry_2 = new_dentry;
+ op->rename.new_negative = d_is_negative(new_dentry);
+ op->ops = &afs_rename_operation;
/* For non-directories, check whether the target is busy and if so,
* make a copy of the dentry and then do a silly-rename. If the
@@ -1882,26 +1877,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
if (!d_unhashed(new_dentry)) {
d_drop(new_dentry);
- rehash = new_dentry;
+ op->rename.rehash = new_dentry;
}
if (d_count(new_dentry) > 2) {
/* copy the target dentry's name */
ret = -ENOMEM;
- tmp = d_alloc(new_dentry->d_parent,
- &new_dentry->d_name);
- if (!tmp)
- goto error_rehash;
+ op->rename.tmp = d_alloc(new_dentry->d_parent,
+ &new_dentry->d_name);
+ if (!op->rename.tmp)
+ goto error;
ret = afs_sillyrename(new_dvnode,
AFS_FS_I(d_inode(new_dentry)),
- new_dentry, key);
+ new_dentry, op->key);
if (ret)
- goto error_rehash;
+ goto error;
- new_dentry = tmp;
- rehash = NULL;
- new_negative = true;
+ op->dentry_2 = op->rename.tmp;
+ op->rename.rehash = NULL;
+ op->rename.new_negative = true;
}
}
@@ -1916,98 +1911,10 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
d_drop(old_dentry);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
- orig_data_version = orig_dvnode->status.data_version + 1;
-
- if (orig_dvnode != new_dvnode) {
- if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
- afs_end_vnode_operation(&fc);
- goto error_rehash_old;
- }
- new_data_version = new_dvnode->status.data_version + 1;
- } else {
- new_data_version = orig_data_version;
- }
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(orig_dvnode);
- fc.cb_break_2 = afs_calc_vnode_cb_break(new_dvnode);
- afs_fs_rename(&fc, old_dentry->d_name.name,
- new_dvnode, new_dentry->d_name.name,
- &scb[0], &scb[1]);
- }
+ return afs_do_sync_operation(op);
- afs_vnode_commit_status(&fc, orig_dvnode, fc.cb_break,
- &orig_data_version, &scb[0]);
- if (new_dvnode != orig_dvnode) {
- afs_vnode_commit_status(&fc, new_dvnode, fc.cb_break_2,
- &new_data_version, &scb[1]);
- mutex_unlock(&new_dvnode->io_lock);
- }
- ret = afs_end_vnode_operation(&fc);
- if (ret < 0)
- goto error_rehash_old;
- }
-
- if (ret == 0) {
- if (rehash)
- d_rehash(rehash);
- down_write(&orig_dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
- orig_dvnode->status.data_version == orig_data_version)
- afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name,
- afs_edit_dir_for_rename_0);
- if (orig_dvnode != new_dvnode) {
- up_write(&orig_dvnode->validate_lock);
-
- down_write(&new_dvnode->validate_lock);
- }
- if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) &&
- orig_dvnode->status.data_version == new_data_version) {
- if (!new_negative)
- afs_edit_dir_remove(new_dvnode, &new_dentry->d_name,
- afs_edit_dir_for_rename_1);
-
- afs_edit_dir_add(new_dvnode, &new_dentry->d_name,
- &vnode->fid, afs_edit_dir_for_rename_2);
- }
-
- new_inode = d_inode(new_dentry);
- if (new_inode) {
- spin_lock(&new_inode->i_lock);
- if (new_inode->i_nlink > 0)
- drop_nlink(new_inode);
- spin_unlock(&new_inode->i_lock);
- }
-
- /* Now we can update d_fsdata on the dentries to reflect their
- * new parent's data_version.
- *
- * Note that if we ever implement RENAME_EXCHANGE, we'll have
- * to update both dentries with opposing dir versions.
- */
- afs_update_dentry_version(&fc, old_dentry, &scb[1]);
- afs_update_dentry_version(&fc, new_dentry, &scb[1]);
- d_move(old_dentry, new_dentry);
- up_write(&new_dvnode->validate_lock);
- goto error_tmp;
- }
-
-error_rehash_old:
- d_rehash(new_dentry);
-error_rehash:
- if (rehash)
- d_rehash(rehash);
-error_tmp:
- if (tmp)
- dput(tmp);
- key_put(key);
-error_scb:
- kfree(scb);
error:
- _leave(" = %d", ret);
- return ret;
+ return afs_put_operation(op);
}
/*
diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
index d94e2b7cddff..b14e3d9a25e2 100644
--- a/fs/afs/dir_silly.c
+++ b/fs/afs/dir_silly.c
@@ -12,6 +12,47 @@
#include <linux/fsnotify.h>
#include "internal.h"
+static void afs_silly_rename_success(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+
+ afs_vnode_commit_status(op, &op->file[0]);
+}
+
+static void afs_silly_rename_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode *dvnode = dvp->vnode;
+ struct afs_vnode *vnode = AFS_FS_I(d_inode(op->dentry));
+ struct dentry *old = op->dentry;
+ struct dentry *new = op->dentry_2;
+
+ spin_lock(&old->d_lock);
+ old->d_flags |= DCACHE_NFSFS_RENAMED;
+ spin_unlock(&old->d_lock);
+ if (dvnode->silly_key != op->key) {
+ key_put(dvnode->silly_key);
+ dvnode->silly_key = key_get(op->key);
+ }
+
+ down_write(&dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) {
+ afs_edit_dir_remove(dvnode, &old->d_name,
+ afs_edit_dir_for_silly_0);
+ afs_edit_dir_add(dvnode, &new->d_name,
+ &vnode->fid, afs_edit_dir_for_silly_1);
+ }
+ up_write(&dvnode->validate_lock);
+}
+
+static const struct afs_operation_ops afs_silly_rename_operation = {
+ .issue_afs_rpc = afs_fs_rename,
+ .issue_yfs_rpc = yfs_fs_rename,
+ .success = afs_silly_rename_success,
+ .edit_dir = afs_silly_rename_edit_dir,
+};
+
/*
* Actually perform the silly rename step.
*/
@@ -19,56 +60,22 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
struct dentry *old, struct dentry *new,
struct key *key)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
- afs_dataversion_t dir_data_version;
- int ret = -ERESTARTSYS;
+ struct afs_operation *op;
_enter("%pd,%pd", old, new);
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(key, dvnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- trace_afs_silly_rename(vnode, false);
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- dir_data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_fs_rename(&fc, old->d_name.name,
- dvnode, new->d_name.name,
- scb, scb);
- }
-
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &dir_data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ afs_op_set_vnode(op, 0, dvnode);
- if (ret == 0) {
- spin_lock(&old->d_lock);
- old->d_flags |= DCACHE_NFSFS_RENAMED;
- spin_unlock(&old->d_lock);
- if (dvnode->silly_key != key) {
- key_put(dvnode->silly_key);
- dvnode->silly_key = key_get(key);
- }
-
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == dir_data_version) {
- afs_edit_dir_remove(dvnode, &old->d_name,
- afs_edit_dir_for_silly_0);
- afs_edit_dir_add(dvnode, &new->d_name,
- &vnode->fid, afs_edit_dir_for_silly_1);
- }
- up_write(&dvnode->validate_lock);
- }
+ op->dentry = old;
+ op->dentry_2 = new;
+ op->ops = &afs_silly_rename_operation;
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ trace_afs_silly_rename(vnode, false);
+ return afs_do_sync_operation(op);
}
/**
@@ -139,65 +146,66 @@ out:
return ret;
}
+static void afs_silly_unlink_success(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[1].vnode;
+
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, op->file[0].vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+ afs_vnode_commit_status(op, &op->file[1]);
+ afs_update_dentry_version(op, &op->file[0], op->dentry);
+
+ drop_nlink(&vnode->vfs_inode);
+ if (vnode->vfs_inode.i_nlink == 0) {
+ set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ }
+}
+
+static void afs_silly_unlink_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode *dvnode = dvp->vnode;
+
+ _enter("op=%08x", op->debug_id);
+ down_write(&dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
+ afs_edit_dir_remove(dvnode, &op->dentry->d_name,
+ afs_edit_dir_for_unlink);
+ up_write(&dvnode->validate_lock);
+}
+
+static const struct afs_operation_ops afs_silly_unlink_operation = {
+ .issue_afs_rpc = afs_fs_remove_file,
+ .issue_yfs_rpc = yfs_fs_remove_file,
+ .success = afs_silly_unlink_success,
+ .edit_dir = afs_silly_unlink_edit_dir,
+};
+
/*
* Tell the server to remove a sillyrename file.
*/
static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode,
struct dentry *dentry, struct key *key)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
- int ret = -ERESTARTSYS;
+ struct afs_operation *op;
_enter("");
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- trace_afs_silly_rename(vnode, true);
- if (afs_begin_vnode_operation(&fc, dvnode, key, false)) {
- afs_dataversion_t dir_data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
-
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc.cbi->server->flags) &&
- !test_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags)) {
- yfs_fs_remove_file2(&fc, vnode, dentry->d_name.name,
- &scb[0], &scb[1]);
- if (fc.ac.error != -ECONNABORTED ||
- fc.ac.abort_code != RXGEN_OPCODE)
- continue;
- set_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags);
- }
-
- afs_fs_remove(&fc, vnode, dentry->d_name.name, false, &scb[0]);
- }
+ afs_op_set_vnode(op, 0, dvnode);
+ afs_op_set_vnode(op, 1, vnode);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &dir_data_version, &scb[0]);
- ret = afs_end_vnode_operation(&fc);
- if (ret == 0) {
- drop_nlink(&vnode->vfs_inode);
- if (vnode->vfs_inode.i_nlink == 0) {
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
- }
- }
- if (ret == 0) {
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == dir_data_version)
- afs_edit_dir_remove(dvnode, &dentry->d_name,
- afs_edit_dir_for_unlink);
- up_write(&dvnode->validate_lock);
- }
- }
+ op->dentry = dentry;
+ op->ops = &afs_silly_unlink_operation;
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ trace_afs_silly_rename(vnode, true);
+ return afs_do_sync_operation(op);
}
/*
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 7503899c0a1b..b79879aacc02 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -10,6 +10,99 @@
#include <linux/dns_resolver.h>
#include "internal.h"
+static atomic_t afs_autocell_ino;
+
+/*
+ * iget5() comparator for inode created by autocell operations
+ *
+ * These pseudo inodes don't match anything.
+ */
+static int afs_iget5_pseudo_test(struct inode *inode, void *opaque)
+{
+ return 0;
+}
+
+/*
+ * iget5() inode initialiser
+ */
+static int afs_iget5_pseudo_set(struct inode *inode, void *opaque)
+{
+ struct afs_super_info *as = AFS_FS_S(inode->i_sb);
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_fid *fid = opaque;
+
+ vnode->volume = as->volume;
+ vnode->fid = *fid;
+ inode->i_ino = fid->vnode;
+ inode->i_generation = fid->unique;
+ return 0;
+}
+
+/*
+ * Create an inode for a dynamic root directory or an autocell dynamic
+ * automount dir.
+ */
+struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
+{
+ struct afs_super_info *as = AFS_FS_S(sb);
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ struct afs_fid fid = {};
+
+ _enter("");
+
+ if (as->volume)
+ fid.vid = as->volume->vid;
+ if (root) {
+ fid.vnode = 1;
+ fid.unique = 1;
+ } else {
+ fid.vnode = atomic_inc_return(&afs_autocell_ino);
+ fid.unique = 0;
+ }
+
+ inode = iget5_locked(sb, fid.vnode,
+ afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
+ if (!inode) {
+ _leave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ _debug("GOT INODE %p { ino=%lu, vl=%llx, vn=%llx, u=%x }",
+ inode, inode->i_ino, fid.vid, fid.vnode, fid.unique);
+
+ vnode = AFS_FS_I(inode);
+
+ /* there shouldn't be an existing inode */
+ BUG_ON(!(inode->i_state & I_NEW));
+
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
+ if (root) {
+ inode->i_op = &afs_dynroot_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ } else {
+ inode->i_op = &afs_autocell_inode_operations;
+ }
+ set_nlink(inode, 2);
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode);
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
+ if (!root) {
+ set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
+ inode->i_flags |= S_AUTOMOUNT;
+ }
+
+ inode->i_flags |= S_NOATIME;
+ unlock_new_inode(inode);
+ _leave(" = %p", inode);
+ return inode;
+}
+
/*
* Probe to see if a cell may exist. This prevents positive dentries from
* being created unnecessarily.
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 8415733f7bc1..506c47471b42 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -69,7 +69,7 @@ static const struct vm_operations_struct afs_vm_ops = {
*/
void afs_put_wb_key(struct afs_wb_key *wbk)
{
- if (refcount_dec_and_test(&wbk->usage)) {
+ if (wbk && refcount_dec_and_test(&wbk->usage)) {
key_put(wbk->key);
kfree(wbk);
}
@@ -220,14 +220,35 @@ static void afs_file_readpage_read_complete(struct page *page,
}
#endif
+static void afs_fetch_data_success(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
+
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+ afs_stat_v(vnode, n_fetches);
+ atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
+}
+
+static void afs_fetch_data_put(struct afs_operation *op)
+{
+ afs_put_read(op->fetch.req);
+}
+
+static const struct afs_operation_ops afs_fetch_data_operation = {
+ .issue_afs_rpc = afs_fs_fetch_data,
+ .issue_yfs_rpc = yfs_fs_fetch_data,
+ .success = afs_fetch_data_success,
+ .put = afs_fetch_data_put,
+};
+
/*
* Fetch file data from the volume.
*/
int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *req)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
- int ret;
+ struct afs_operation *op;
_enter("%s{%llx:%llu.%u},%x,,,",
vnode->volume->name,
@@ -236,34 +257,15 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *re
vnode->fid.unique,
key_serial(key));
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
+ op = afs_alloc_operation(key, vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_fetch_data(&fc, scb, req);
- }
-
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ afs_op_set_vnode(op, 0, vnode);
- if (ret == 0) {
- afs_stat_v(vnode, n_fetches);
- atomic_long_add(req->actual_len,
- &afs_v2net(vnode)->n_fetch_bytes);
- }
-
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ op->fetch.req = afs_get_read(req);
+ op->ops = &afs_fetch_data_operation;
+ return afs_do_sync_operation(op);
}
/*
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 0f2a94ba73cb..71eea2a908c7 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -70,7 +70,8 @@ static void afs_schedule_lock_extension(struct afs_vnode *vnode)
*/
void afs_lock_op_done(struct afs_call *call)
{
- struct afs_vnode *vnode = call->lvnode;
+ struct afs_operation *op = call->op;
+ struct afs_vnode *vnode = op->file[0].vnode;
if (call->error == 0) {
spin_lock(&vnode->lock);
@@ -172,15 +173,28 @@ static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
vnode->lock_key = NULL;
}
+static void afs_lock_success(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
+
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+}
+
+static const struct afs_operation_ops afs_set_lock_operation = {
+ .issue_afs_rpc = afs_fs_set_lock,
+ .issue_yfs_rpc = yfs_fs_set_lock,
+ .success = afs_lock_success,
+};
+
/*
* Get a lock on a file
*/
static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
afs_lock_type_t type)
{
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
- int ret;
+ struct afs_operation *op;
_enter("%s{%llx:%llu.%u},%x,%u",
vnode->volume->name,
@@ -189,35 +203,29 @@ static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
vnode->fid.unique,
key_serial(key), type);
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(key, vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_set_lock(&fc, type, scb);
- }
-
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ afs_op_set_vnode(op, 0, vnode);
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ op->lock.type = type;
+ op->ops = &afs_set_lock_operation;
+ return afs_do_sync_operation(op);
}
+static const struct afs_operation_ops afs_extend_lock_operation = {
+ .issue_afs_rpc = afs_fs_extend_lock,
+ .issue_yfs_rpc = yfs_fs_extend_lock,
+ .success = afs_lock_success,
+};
+
/*
* Extend a lock on a file
*/
static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
{
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
- int ret;
+ struct afs_operation *op;
_enter("%s{%llx:%llu.%u},%x",
vnode->volume->name,
@@ -226,35 +234,29 @@ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
vnode->fid.unique,
key_serial(key));
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, false)) {
- while (afs_select_current_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_extend_lock(&fc, scb);
- }
+ op = afs_alloc_operation(key, vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ afs_op_set_vnode(op, 0, vnode);
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ op->flags |= AFS_OPERATION_UNINTR;
+ op->ops = &afs_extend_lock_operation;
+ return afs_do_sync_operation(op);
}
+static const struct afs_operation_ops afs_release_lock_operation = {
+ .issue_afs_rpc = afs_fs_release_lock,
+ .issue_yfs_rpc = yfs_fs_release_lock,
+ .success = afs_lock_success,
+};
+
/*
* Release a lock on a file
*/
static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
{
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
- int ret;
+ struct afs_operation *op;
_enter("%s{%llx:%llu.%u},%x",
vnode->volume->name,
@@ -263,25 +265,15 @@ static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
vnode->fid.unique,
key_serial(key));
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(key, vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, false)) {
- while (afs_select_current_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_release_lock(&fc, scb);
- }
+ afs_op_set_vnode(op, 0, vnode);
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
- ret = afs_end_vnode_operation(&fc);
- }
-
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ op->flags |= AFS_OPERATION_UNINTR;
+ op->ops = &afs_release_lock_operation;
+ return afs_do_sync_operation(op);
}
/*
diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
new file mode 100644
index 000000000000..2d2dff5688a4
--- /dev/null
+++ b/fs/afs/fs_operation.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Fileserver-directed operation handling.
+ *
+ * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include "internal.h"
+
+static atomic_t afs_operation_debug_counter;
+
+/*
+ * Create an operation against a volume.
+ */
+struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *volume)
+{
+ struct afs_operation *op;
+
+ _enter("");
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return ERR_PTR(-ENOMEM);
+
+ if (!key) {
+ key = afs_request_key(volume->cell);
+ if (IS_ERR(key)) {
+ kfree(op);
+ return ERR_CAST(key);
+ }
+ } else {
+ key_get(key);
+ }
+
+ op->key = key;
+ op->volume = afs_get_volume(volume, afs_volume_trace_get_new_op);
+ op->net = volume->cell->net;
+ op->cb_v_break = volume->cb_v_break;
+ op->debug_id = atomic_inc_return(&afs_operation_debug_counter);
+ op->error = -EDESTADDRREQ;
+ op->ac.error = SHRT_MAX;
+
+ _leave(" = [op=%08x]", op->debug_id);
+ return op;
+}
+
+/*
+ * Lock the vnode(s) being operated upon.
+ */
+static bool afs_get_io_locks(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
+ struct afs_vnode *vnode2 = op->file[1].vnode;
+
+ _enter("");
+
+ if (op->flags & AFS_OPERATION_UNINTR) {
+ mutex_lock(&vnode->io_lock);
+ op->flags |= AFS_OPERATION_LOCK_0;
+ _leave(" = t [1]");
+ return true;
+ }
+
+ if (!vnode2 || !op->file[1].need_io_lock || vnode == vnode2)
+ vnode2 = NULL;
+
+ if (vnode2 > vnode)
+ swap(vnode, vnode2);
+
+ if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
+ op->error = -EINTR;
+ op->flags |= AFS_OPERATION_STOP;
+ _leave(" = f [I 0]");
+ return false;
+ }
+ op->flags |= AFS_OPERATION_LOCK_0;
+
+ if (vnode2) {
+ if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) {
+ op->error = -EINTR;
+ op->flags |= AFS_OPERATION_STOP;
+ mutex_unlock(&vnode->io_lock);
+ op->flags &= ~AFS_OPERATION_LOCK_0;
+ _leave(" = f [I 1]");
+ return false;
+ }
+ op->flags |= AFS_OPERATION_LOCK_1;
+ }
+
+ _leave(" = t [2]");
+ return true;
+}
+
+static void afs_drop_io_locks(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
+ struct afs_vnode *vnode2 = op->file[1].vnode;
+
+ _enter("");
+
+ if (op->flags & AFS_OPERATION_LOCK_1)
+ mutex_unlock(&vnode2->io_lock);
+ if (op->flags & AFS_OPERATION_LOCK_0)
+ mutex_unlock(&vnode->io_lock);
+}
+
+static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *vp,
+ unsigned int index)
+{
+ struct afs_vnode *vnode = vp->vnode;
+
+ if (vnode) {
+ vp->fid = vnode->fid;
+ vp->dv_before = vnode->status.data_version;
+ vp->cb_break_before = afs_calc_vnode_cb_break(vnode);
+ if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
+ op->flags |= AFS_OPERATION_CUR_ONLY;
+ }
+
+ if (vp->fid.vnode)
+ _debug("PREP[%u] {%llx:%llu.%u}",
+ index, vp->fid.vid, vp->fid.vnode, vp->fid.unique);
+}
+
+/*
+ * Begin an operation on the fileserver.
+ *
+ * Fileserver operations are serialised on the server by vnode, so we serialise
+ * them here also using the io_lock.
+ */
+bool afs_begin_vnode_operation(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
+
+ ASSERT(vnode);
+
+ _enter("");
+
+ if (op->file[0].need_io_lock)
+ if (!afs_get_io_locks(op))
+ return false;
+
+ afs_prepare_vnode(op, &op->file[0], 0);
+ afs_prepare_vnode(op, &op->file[1], 1);
+ op->cb_v_break = op->volume->cb_v_break;
+ _leave(" = true");
+ return true;
+}
+
+/*
+ * Tidy up a filesystem cursor and unlock the vnode.
+ */
+static void afs_end_vnode_operation(struct afs_operation *op)
+{
+ _enter("");
+
+ if (op->error == -EDESTADDRREQ ||
+ op->error == -EADDRNOTAVAIL ||
+ op->error == -ENETUNREACH ||
+ op->error == -EHOSTUNREACH)
+ afs_dump_edestaddrreq(op);
+
+ afs_drop_io_locks(op);
+
+ if (op->error == -ECONNABORTED)
+ op->error = afs_abort_to_error(op->ac.abort_code);
+}
+
+/*
+ * Wait for an in-progress operation to complete.
+ */
+void afs_wait_for_operation(struct afs_operation *op)
+{
+ _enter("");
+
+ while (afs_select_fileserver(op)) {
+ op->cb_s_break = op->server->cb_s_break;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) &&
+ op->ops->issue_yfs_rpc)
+ op->ops->issue_yfs_rpc(op);
+ else
+ op->ops->issue_afs_rpc(op);
+
+ op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
+ }
+
+ if (op->error == 0) {
+ _debug("success");
+ op->ops->success(op);
+ }
+
+ afs_end_vnode_operation(op);
+
+ if (op->error == 0 && op->ops->edit_dir) {
+ _debug("edit_dir");
+ op->ops->edit_dir(op);
+ }
+ _leave("");
+}
+
+/*
+ * Dispose of an operation.
+ */
+int afs_put_operation(struct afs_operation *op)
+{
+ int i, ret = op->error;
+
+ _enter("op=%08x,%d", op->debug_id, ret);
+
+ if (op->ops && op->ops->put)
+ op->ops->put(op);
+ if (op->file[0].put_vnode)
+ iput(&op->file[0].vnode->vfs_inode);
+ if (op->file[1].put_vnode)
+ iput(&op->file[1].vnode->vfs_inode);
+
+ if (op->more_files) {
+ for (i = 0; i < op->nr_files - 2; i++)
+ if (op->more_files[i].put_vnode)
+ iput(&op->more_files[i].vnode->vfs_inode);
+ kfree(op->more_files);
+ }
+
+ afs_end_cursor(&op->ac);
+ afs_put_serverlist(op->net, op->server_list);
+ afs_put_volume(op->net, op->volume, afs_volume_trace_put_put_op);
+ kfree(op);
+ return ret;
+}
+
+int afs_do_sync_operation(struct afs_operation *op)
+{
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
+ return afs_put_operation(op);
+}
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index 37d1bba57b00..b34f74b0f319 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* AFS fileserver probing
*
- * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2018, 2020 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -11,15 +11,86 @@
#include "internal.h"
#include "protocol_yfs.h"
-static bool afs_fs_probe_done(struct afs_server *server)
+static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
+static unsigned int afs_fs_probe_slow_poll_interval = 5 * 60 * HZ;
+
+/*
+ * Start the probe polling timer. We have to supply it with an inc on the
+ * outstanding server count.
+ */
+static void afs_schedule_fs_probe(struct afs_net *net,
+ struct afs_server *server, bool fast)
+{
+ unsigned long atj;
+
+ if (!net->live)
+ return;
+
+ atj = server->probed_at;
+ atj += fast ? afs_fs_probe_fast_poll_interval : afs_fs_probe_slow_poll_interval;
+
+ afs_inc_servers_outstanding(net);
+ if (timer_reduce(&net->fs_probe_timer, atj))
+ afs_dec_servers_outstanding(net);
+}
+
+/*
+ * Handle the completion of a set of probes.
+ */
+static void afs_finished_fs_probe(struct afs_net *net, struct afs_server *server)
+{
+ bool responded = server->probe.responded;
+
+ write_seqlock(&net->fs_lock);
+ if (responded) {
+ list_add_tail(&server->probe_link, &net->fs_probe_slow);
+ } else {
+ server->rtt = UINT_MAX;
+ clear_bit(AFS_SERVER_FL_RESPONDING, &server->flags);
+ list_add_tail(&server->probe_link, &net->fs_probe_fast);
+ }
+ write_sequnlock(&net->fs_lock);
+
+ afs_schedule_fs_probe(net, server, !responded);
+}
+
+/*
+ * Handle the completion of a probe.
+ */
+static void afs_done_one_fs_probe(struct afs_net *net, struct afs_server *server)
+{
+ _enter("");
+
+ if (atomic_dec_and_test(&server->probe_outstanding))
+ afs_finished_fs_probe(net, server);
+
+ wake_up_all(&server->probe_wq);
+}
+
+/*
+ * Handle inability to send a probe due to ENOMEM when trying to allocate a
+ * call struct.
+ */
+static void afs_fs_probe_not_done(struct afs_net *net,
+ struct afs_server *server,
+ struct afs_addr_cursor *ac)
{
- if (!atomic_dec_and_test(&server->probe_outstanding))
- return false;
+ struct afs_addr_list *alist = ac->alist;
+ unsigned int index = ac->index;
+
+ _enter("");
+
+ trace_afs_io_error(0, -ENOMEM, afs_io_error_fs_probe_fail);
+ spin_lock(&server->probe_lock);
- wake_up_var(&server->probe_outstanding);
- clear_bit_unlock(AFS_SERVER_FL_PROBING, &server->flags);
- wake_up_bit(&server->flags, AFS_SERVER_FL_PROBING);
- return true;
+ server->probe.local_failure = true;
+ if (server->probe.error == 0)
+ server->probe.error = -ENOMEM;
+
+ set_bit(index, &alist->failed);
+
+ spin_unlock(&server->probe_lock);
+ return afs_done_one_fs_probe(net, server);
}
/*
@@ -30,10 +101,8 @@ void afs_fileserver_probe_result(struct afs_call *call)
{
struct afs_addr_list *alist = call->alist;
struct afs_server *server = call->server;
- unsigned int server_index = call->server_index;
unsigned int index = call->addr_ix;
unsigned int rtt_us = 0;
- bool have_result = false;
int ret = call->error;
_enter("%pU,%u", &server->uuid, index);
@@ -52,8 +121,9 @@ void afs_fileserver_probe_result(struct afs_call *call)
goto responded;
case -ENOMEM:
case -ENONET:
+ clear_bit(index, &alist->responded);
server->probe.local_failure = true;
- afs_io_error(call, afs_io_error_fs_probe_fail);
+ trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail);
goto out;
case -ECONNRESET: /* Responded, but call expired. */
case -ERFKILL:
@@ -72,12 +142,11 @@ void afs_fileserver_probe_result(struct afs_call *call)
server->probe.error == -ETIMEDOUT ||
server->probe.error == -ETIME))
server->probe.error = ret;
- afs_io_error(call, afs_io_error_fs_probe_fail);
+ trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail);
goto out;
}
responded:
- set_bit(index, &alist->responded);
clear_bit(index, &alist->failed);
if (call->service_id == YFS_FS_SERVICE) {
@@ -95,39 +164,34 @@ responded:
rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
if (rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us;
+ server->rtt = rtt_us;
alist->preferred = index;
- have_result = true;
}
smp_wmb(); /* Set rtt before responded. */
server->probe.responded = true;
- set_bit(AFS_SERVER_FL_PROBED, &server->flags);
+ set_bit(index, &alist->responded);
+ set_bit(AFS_SERVER_FL_RESPONDING, &server->flags);
out:
spin_unlock(&server->probe_lock);
- _debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
- server_index, index, &alist->addrs[index].transport, rtt_us, ret);
+ _debug("probe %pU [%u] %pISpc rtt=%u ret=%d",
+ &server->uuid, index, &alist->addrs[index].transport,
+ rtt_us, ret);
- have_result |= afs_fs_probe_done(server);
- if (have_result)
- wake_up_all(&server->probe_wq);
+ return afs_done_one_fs_probe(call->net, server);
}
/*
- * Probe all of a fileserver's addresses to find out the best route and to
- * query its capabilities.
+ * Probe one or all of a fileserver's addresses to find out the best route and
+ * to query its capabilities.
*/
-static int afs_do_probe_fileserver(struct afs_net *net,
- struct afs_server *server,
- struct key *key,
- unsigned int server_index,
- struct afs_error *_e)
+void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
+ struct key *key, bool all)
{
struct afs_addr_cursor ac = {
.index = 0,
};
- struct afs_call *call;
- bool in_progress = false;
_enter("%pU", &server->uuid);
@@ -137,50 +201,25 @@ static int afs_do_probe_fileserver(struct afs_net *net,
afs_get_addrlist(ac.alist);
read_unlock(&server->fs_lock);
- atomic_set(&server->probe_outstanding, ac.alist->nr_addrs);
+ server->probed_at = jiffies;
+ atomic_set(&server->probe_outstanding, all ? ac.alist->nr_addrs : 1);
memset(&server->probe, 0, sizeof(server->probe));
server->probe.rtt = UINT_MAX;
- for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
- call = afs_fs_get_capabilities(net, server, &ac, key, server_index);
- if (!IS_ERR(call)) {
- afs_put_call(call);
- in_progress = true;
- } else {
- afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code);
- }
- }
-
- if (!in_progress)
- afs_fs_probe_done(server);
- afs_put_addrlist(ac.alist);
- return in_progress;
-}
+ ac.index = ac.alist->preferred;
+ if (ac.index < 0 || ac.index >= ac.alist->nr_addrs)
+ all = true;
-/*
- * Send off probes to all unprobed servers.
- */
-int afs_probe_fileservers(struct afs_net *net, struct key *key,
- struct afs_server_list *list)
-{
- struct afs_server *server;
- struct afs_error e;
- bool in_progress = false;
- int i;
-
- e.error = 0;
- e.responded = false;
- for (i = 0; i < list->nr_servers; i++) {
- server = list->servers[i].server;
- if (test_bit(AFS_SERVER_FL_PROBED, &server->flags))
- continue;
-
- if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags) &&
- afs_do_probe_fileserver(net, server, key, i, &e))
- in_progress = true;
+ if (all) {
+ for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++)
+ if (!afs_fs_get_capabilities(net, server, &ac, key))
+ afs_fs_probe_not_done(net, server, &ac);
+ } else {
+ if (!afs_fs_get_capabilities(net, server, &ac, key))
+ afs_fs_probe_not_done(net, server, &ac);
}
- return in_progress ? 0 : e.error;
+ afs_put_addrlist(ac.alist);
}
/*
@@ -190,7 +229,7 @@ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried)
{
struct wait_queue_entry *waits;
struct afs_server *server;
- unsigned int rtt = UINT_MAX;
+ unsigned int rtt = UINT_MAX, rtt_s;
bool have_responders = false;
int pref = -1, i;
@@ -200,7 +239,7 @@ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried)
for (i = 0; i < slist->nr_servers; i++) {
if (test_bit(i, &untried)) {
server = slist->servers[i].server;
- if (!test_bit(AFS_SERVER_FL_PROBING, &server->flags))
+ if (!atomic_read(&server->probe_outstanding))
__clear_bit(i, &untried);
if (server->probe.responded)
have_responders = true;
@@ -230,7 +269,7 @@ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried)
server = slist->servers[i].server;
if (server->probe.responded)
goto stop;
- if (test_bit(AFS_SERVER_FL_PROBING, &server->flags))
+ if (atomic_read(&server->probe_outstanding))
still_probing = true;
}
}
@@ -246,10 +285,11 @@ stop:
for (i = 0; i < slist->nr_servers; i++) {
if (test_bit(i, &untried)) {
server = slist->servers[i].server;
- if (server->probe.responded &&
- server->probe.rtt < rtt) {
+ rtt_s = READ_ONCE(server->rtt);
+ if (test_bit(AFS_SERVER_FL_RESPONDING, &server->flags) &&
+ rtt_s < rtt) {
pref = i;
- rtt = server->probe.rtt;
+ rtt = rtt_s;
}
remove_wait_queue(&server->probe_wq, &waits[i]);
@@ -265,3 +305,156 @@ stop:
slist->preferred = pref;
return 0;
}
+
+/*
+ * Probe timer. We have an increment on fs_outstanding that we need to pass
+ * along to the work item.
+ */
+void afs_fs_probe_timer(struct timer_list *timer)
+{
+ struct afs_net *net = container_of(timer, struct afs_net, fs_probe_timer);
+
+ if (!queue_work(afs_wq, &net->fs_prober))
+ afs_dec_servers_outstanding(net);
+}
+
+/*
+ * Dispatch a probe to a server.
+ */
+static void afs_dispatch_fs_probe(struct afs_net *net, struct afs_server *server, bool all)
+ __releases(&net->fs_lock)
+{
+ struct key *key = NULL;
+
+ /* We remove it from the queues here - it will be added back to
+ * one of the queues on the completion of the probe.
+ */
+ list_del_init(&server->probe_link);
+
+ afs_get_server(server, afs_server_trace_get_probe);
+ write_sequnlock(&net->fs_lock);
+
+ afs_fs_probe_fileserver(net, server, key, all);
+ afs_put_server(net, server, afs_server_trace_put_probe);
+}
+
+/*
+ * Probe a server immediately without waiting for its due time to come
+ * round. This is used when all of the addresses have been tried.
+ */
+void afs_probe_fileserver(struct afs_net *net, struct afs_server *server)
+{
+ write_seqlock(&net->fs_lock);
+ if (!list_empty(&server->probe_link))
+ return afs_dispatch_fs_probe(net, server, true);
+ write_sequnlock(&net->fs_lock);
+}
+
+/*
+ * Probe dispatcher to regularly dispatch probes to keep NAT alive.
+ */
+void afs_fs_probe_dispatcher(struct work_struct *work)
+{
+ struct afs_net *net = container_of(work, struct afs_net, fs_prober);
+ struct afs_server *fast, *slow, *server;
+ unsigned long nowj, timer_at, poll_at;
+ bool first_pass = true, set_timer = false;
+
+ if (!net->live)
+ return;
+
+ _enter("");
+
+ if (list_empty(&net->fs_probe_fast) && list_empty(&net->fs_probe_slow)) {
+ _leave(" [none]");
+ return;
+ }
+
+again:
+ write_seqlock(&net->fs_lock);
+
+ fast = slow = server = NULL;
+ nowj = jiffies;
+ timer_at = nowj + MAX_JIFFY_OFFSET;
+
+ if (!list_empty(&net->fs_probe_fast)) {
+ fast = list_first_entry(&net->fs_probe_fast, struct afs_server, probe_link);
+ poll_at = fast->probed_at + afs_fs_probe_fast_poll_interval;
+ if (time_before(nowj, poll_at)) {
+ timer_at = poll_at;
+ set_timer = true;
+ fast = NULL;
+ }
+ }
+
+ if (!list_empty(&net->fs_probe_slow)) {
+ slow = list_first_entry(&net->fs_probe_slow, struct afs_server, probe_link);
+ poll_at = slow->probed_at + afs_fs_probe_slow_poll_interval;
+ if (time_before(nowj, poll_at)) {
+ if (time_before(poll_at, timer_at))
+ timer_at = poll_at;
+ set_timer = true;
+ slow = NULL;
+ }
+ }
+
+ server = fast ?: slow;
+ if (server)
+ _debug("probe %pU", &server->uuid);
+
+ if (server && (first_pass || !need_resched())) {
+ afs_dispatch_fs_probe(net, server, server == fast);
+ first_pass = false;
+ goto again;
+ }
+
+ write_sequnlock(&net->fs_lock);
+
+ if (server) {
+ if (!queue_work(afs_wq, &net->fs_prober))
+ afs_dec_servers_outstanding(net);
+ _leave(" [requeue]");
+ } else if (set_timer) {
+ if (timer_reduce(&net->fs_probe_timer, timer_at))
+ afs_dec_servers_outstanding(net);
+ _leave(" [timer]");
+ } else {
+ afs_dec_servers_outstanding(net);
+ _leave(" [quiesce]");
+ }
+}
+
+/*
+ * Wait for a probe on a particular fileserver to complete for 2s.
+ */
+int afs_wait_for_one_fs_probe(struct afs_server *server, bool is_intr)
+{
+ struct wait_queue_entry wait;
+ unsigned long timo = 2 * HZ;
+
+ if (atomic_read(&server->probe_outstanding) == 0)
+ goto dont_wait;
+
+ init_wait_entry(&wait, 0);
+ for (;;) {
+ prepare_to_wait_event(&server->probe_wq, &wait,
+ is_intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (timo == 0 ||
+ server->probe.responded ||
+ atomic_read(&server->probe_outstanding) == 0 ||
+ (is_intr && signal_pending(current)))
+ break;
+ timo = schedule_timeout(timo);
+ }
+
+ finish_wait(&server->probe_wq, &wait);
+
+dont_wait:
+ if (server->probe.responded)
+ return 0;
+ if (is_intr && signal_pending(current))
+ return -ERESTARTSYS;
+ if (timo == 0)
+ return -ETIME;
+ return -EDESTADDRREQ;
+}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index d2b3798c1932..acb4d0ca2649 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -13,12 +13,6 @@
#include "internal.h"
#include "afs_fs.h"
#include "xdr_fs.h"
-#include "protocol_yfs.h"
-
-static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi)
-{
- call->cbi = afs_get_cb_interest(cbi);
-}
/*
* decode an AFSFid block
@@ -56,16 +50,15 @@ static void xdr_dump_bad(const __be32 *bp)
/*
* decode an AFSFetchStatus block
*/
-static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
- struct afs_call *call,
- struct afs_status_cb *scb)
+static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
+ struct afs_call *call,
+ struct afs_status_cb *scb)
{
const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp;
struct afs_file_status *status = &scb->status;
bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus);
u64 data_version, size;
u32 type, abort_code;
- int ret;
abort_code = ntohl(xdr->abort_code);
@@ -79,7 +72,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
*/
status->abort_code = abort_code;
scb->have_error = true;
- goto good;
+ goto advance;
}
pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version));
@@ -89,7 +82,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
if (abort_code != 0 && inline_error) {
status->abort_code = abort_code;
scb->have_error = true;
- goto good;
+ goto advance;
}
type = ntohl(xdr->type);
@@ -125,15 +118,13 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
data_version |= (u64)ntohl(xdr->data_version_hi) << 32;
status->data_version = data_version;
scb->have_status = true;
-good:
- ret = 0;
advance:
*_bp = (const void *)*_bp + sizeof(*xdr);
- return ret;
+ return;
bad:
xdr_dump_bad(*_bp);
- ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
+ afs_protocol_error(call, afs_eproto_bad_status);
goto advance;
}
@@ -243,8 +234,10 @@ static void xdr_decode_AFSFetchVolumeStatus(const __be32 **_bp,
/*
* deliver reply data to an FS.FetchStatus
*/
-static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
+static int afs_deliver_fs_fetch_status(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[op->fetch_status.which];
const __be32 *bp;
int ret;
@@ -254,11 +247,9 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSCallBack(&bp, call, call->out_scb);
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSCallBack(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -267,54 +258,39 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
/*
* FS.FetchStatus operation type
*/
-static const struct afs_call_type afs_RXFSFetchStatus_vnode = {
- .name = "FS.FetchStatus(vnode)",
+static const struct afs_call_type afs_RXFSFetchStatus = {
+ .name = "FS.FetchStatus",
.op = afs_FS_FetchStatus,
- .deliver = afs_deliver_fs_fetch_status_vnode,
+ .deliver = afs_deliver_fs_fetch_status,
.destructor = afs_flat_call_destructor,
};
/*
* fetch the status information for a file
*/
-int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
- struct afs_volsync *volsync)
+void afs_fs_fetch_status(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[op->fetch_status.which];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_fetch_file_status(fc, scb, volsync);
-
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus_vnode,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSFetchStatus,
16, (21 + 3 + 6) * 4);
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = volsync;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHSTATUS);
- bp[1] = htonl(vnode->fid.vid);
- bp[2] = htonl(vnode->fid.vnode);
- bp[3] = htonl(vnode->fid.unique);
+ bp[1] = htonl(vp->fid.vid);
+ bp[2] = htonl(vp->fid.vnode);
+ bp[3] = htonl(vp->fid.unique);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
-
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -322,7 +298,9 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb
*/
static int afs_deliver_fs_fetch_data(struct afs_call *call)
{
- struct afs_read *req = call->read_request;
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
+ struct afs_read *req = op->fetch.req;
const __be32 *bp;
unsigned int size;
int ret;
@@ -419,14 +397,12 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSCallBack(&bp, call, call->out_scb);
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSCallBack(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
- req->data_version = call->out_scb->status.data_version;
- req->file_size = call->out_scb->status.size;
+ req->data_version = vp->scb.status.data_version;
+ req->file_size = vp->scb.status.size;
call->unmarshall++;
@@ -449,14 +425,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
return 0;
}
-static void afs_fetch_data_destructor(struct afs_call *call)
-{
- struct afs_read *req = call->read_request;
-
- afs_put_read(req);
- afs_flat_call_destructor(call);
-}
-
/*
* FS.FetchData operation type
*/
@@ -464,102 +432,79 @@ static const struct afs_call_type afs_RXFSFetchData = {
.name = "FS.FetchData",
.op = afs_FS_FetchData,
.deliver = afs_deliver_fs_fetch_data,
- .destructor = afs_fetch_data_destructor,
+ .destructor = afs_flat_call_destructor,
};
static const struct afs_call_type afs_RXFSFetchData64 = {
.name = "FS.FetchData64",
.op = afs_FS_FetchData64,
.deliver = afs_deliver_fs_fetch_data,
- .destructor = afs_fetch_data_destructor,
+ .destructor = afs_flat_call_destructor,
};
/*
* fetch data from a very large file
*/
-static int afs_fs_fetch_data64(struct afs_fs_cursor *fc,
- struct afs_status_cb *scb,
- struct afs_read *req)
+static void afs_fs_fetch_data64(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
+ struct afs_read *req = op->fetch.req;
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = NULL;
- call->read_request = afs_get_read(req);
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHDATA64);
- bp[1] = htonl(vnode->fid.vid);
- bp[2] = htonl(vnode->fid.vnode);
- bp[3] = htonl(vnode->fid.unique);
+ bp[1] = htonl(vp->fid.vid);
+ bp[2] = htonl(vp->fid.vnode);
+ bp[3] = htonl(vp->fid.unique);
bp[4] = htonl(upper_32_bits(req->pos));
bp[5] = htonl(lower_32_bits(req->pos));
bp[6] = 0;
bp[7] = htonl(lower_32_bits(req->len));
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* fetch data from a file
*/
-int afs_fs_fetch_data(struct afs_fs_cursor *fc,
- struct afs_status_cb *scb,
- struct afs_read *req)
+void afs_fs_fetch_data(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct afs_read *req = op->fetch.req;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_fetch_data(fc, scb, req);
-
if (upper_32_bits(req->pos) ||
upper_32_bits(req->len) ||
upper_32_bits(req->pos + req->len))
- return afs_fs_fetch_data64(fc, scb, req);
+ return afs_fs_fetch_data64(op);
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSFetchData, 24, (21 + 3 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSFetchData, 24, (21 + 3 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = NULL;
- call->read_request = afs_get_read(req);
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHDATA);
- bp[1] = htonl(vnode->fid.vid);
- bp[2] = htonl(vnode->fid.vnode);
- bp[3] = htonl(vnode->fid.unique);
+ bp[1] = htonl(vp->fid.vid);
+ bp[2] = htonl(vp->fid.vnode);
+ bp[3] = htonl(vp->fid.unique);
bp[4] = htonl(lower_32_bits(req->pos));
bp[5] = htonl(lower_32_bits(req->len));
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -567,6 +512,9 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_create_vnode(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -576,15 +524,11 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSFid(&bp, call->out_fid);
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSCallBack(&bp, call, call->out_scb);
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFid(&bp, &op->file[1].fid);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_AFSCallBack(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -600,6 +544,52 @@ static const struct afs_call_type afs_RXFSCreateFile = {
.destructor = afs_flat_call_destructor,
};
+/*
+ * Create a file.
+ */
+void afs_fs_create_file(struct afs_operation *op)
+{
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_call *call;
+ size_t namesz, reqsz, padsz;
+ __be32 *bp;
+
+ _enter("");
+
+ namesz = name->len;
+ padsz = (4 - (namesz & 3)) & 3;
+ reqsz = (5 * 4) + namesz + padsz + (6 * 4);
+
+ call = afs_alloc_flat_call(op->net, &afs_RXFSCreateFile,
+ reqsz, (3 + 21 + 21 + 3 + 6) * 4);
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* marshall the parameters */
+ bp = call->request;
+ *bp++ = htonl(FSCREATEFILE);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
+ *bp++ = htonl(namesz);
+ memcpy(bp, name->name, namesz);
+ bp = (void *) bp + namesz;
+ if (padsz > 0) {
+ memset(bp, 0, padsz);
+ bp = (void *) bp + padsz;
+ }
+ *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+ *bp++ = htonl(op->mtime.tv_sec); /* mtime */
+ *bp++ = 0; /* owner */
+ *bp++ = 0; /* group */
+ *bp++ = htonl(op->create.mode & S_IALLUGO); /* unix mode */
+ *bp++ = 0; /* segment size */
+
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
static const struct afs_call_type afs_RXFSMakeDir = {
.name = "FS.MakeDir",
.op = afs_FS_MakeDir,
@@ -608,80 +598,58 @@ static const struct afs_call_type afs_RXFSMakeDir = {
};
/*
- * create a file or make a directory
+ * Create a new directory
*/
-int afs_fs_create(struct afs_fs_cursor *fc,
- const char *name,
- umode_t mode,
- struct afs_status_cb *dvnode_scb,
- struct afs_fid *newfid,
- struct afs_status_cb *new_scb)
+void afs_fs_make_dir(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
size_t namesz, reqsz, padsz;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)){
- if (S_ISDIR(mode))
- return yfs_fs_make_dir(fc, name, mode, dvnode_scb,
- newfid, new_scb);
- else
- return yfs_fs_create_file(fc, name, mode, dvnode_scb,
- newfid, new_scb);
- }
-
_enter("");
- namesz = strlen(name);
+ namesz = name->len;
padsz = (4 - (namesz & 3)) & 3;
reqsz = (5 * 4) + namesz + padsz + (6 * 4);
- call = afs_alloc_flat_call(
- net, S_ISDIR(mode) ? &afs_RXFSMakeDir : &afs_RXFSCreateFile,
- reqsz, (3 + 21 + 21 + 3 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSMakeDir,
+ reqsz, (3 + 21 + 21 + 3 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_fid = newfid;
- call->out_scb = new_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
- *bp++ = htonl(S_ISDIR(mode) ? FSMAKEDIR : FSCREATEFILE);
- *bp++ = htonl(dvnode->fid.vid);
- *bp++ = htonl(dvnode->fid.vnode);
- *bp++ = htonl(dvnode->fid.unique);
+ *bp++ = htonl(FSMAKEDIR);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
*bp++ = htonl(namesz);
- memcpy(bp, name, namesz);
+ memcpy(bp, name->name, namesz);
bp = (void *) bp + namesz;
if (padsz > 0) {
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
- *bp++ = htonl(dvnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ *bp++ = htonl(op->mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
- *bp++ = htonl(mode & S_IALLUGO); /* unix mode */
+ *bp++ = htonl(op->create.mode & S_IALLUGO); /* unix mode */
*bp++ = 0; /* segment size */
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
- * Deliver reply data to any operation that returns directory status and volume
- * sync.
+ * Deliver reply data to any operation that returns status and volume sync.
*/
-static int afs_deliver_fs_dir_status_and_vol(struct afs_call *call)
+static int afs_deliver_fs_file_status_and_vol(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
const __be32 *bp;
int ret;
@@ -691,81 +659,108 @@ static int afs_deliver_fs_dir_status_and_vol(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
}
/*
- * FS.RemoveDir/FS.RemoveFile operation type
+ * FS.RemoveFile operation type
*/
static const struct afs_call_type afs_RXFSRemoveFile = {
.name = "FS.RemoveFile",
.op = afs_FS_RemoveFile,
- .deliver = afs_deliver_fs_dir_status_and_vol,
+ .deliver = afs_deliver_fs_file_status_and_vol,
.destructor = afs_flat_call_destructor,
};
+/*
+ * Remove a file.
+ */
+void afs_fs_remove_file(struct afs_operation *op)
+{
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_call *call;
+ size_t namesz, reqsz, padsz;
+ __be32 *bp;
+
+ _enter("");
+
+ namesz = name->len;
+ padsz = (4 - (namesz & 3)) & 3;
+ reqsz = (5 * 4) + namesz + padsz;
+
+ call = afs_alloc_flat_call(op->net, &afs_RXFSRemoveFile,
+ reqsz, (21 + 6) * 4);
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* marshall the parameters */
+ bp = call->request;
+ *bp++ = htonl(FSREMOVEFILE);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
+ *bp++ = htonl(namesz);
+ memcpy(bp, name->name, namesz);
+ bp = (void *) bp + namesz;
+ if (padsz > 0) {
+ memset(bp, 0, padsz);
+ bp = (void *) bp + padsz;
+ }
+
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
static const struct afs_call_type afs_RXFSRemoveDir = {
.name = "FS.RemoveDir",
.op = afs_FS_RemoveDir,
- .deliver = afs_deliver_fs_dir_status_and_vol,
+ .deliver = afs_deliver_fs_file_status_and_vol,
.destructor = afs_flat_call_destructor,
};
/*
- * remove a file or directory
+ * Remove a directory.
*/
-int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name, bool isdir, struct afs_status_cb *dvnode_scb)
+void afs_fs_remove_dir(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
size_t namesz, reqsz, padsz;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_remove(fc, vnode, name, isdir, dvnode_scb);
-
_enter("");
- namesz = strlen(name);
+ namesz = name->len;
padsz = (4 - (namesz & 3)) & 3;
reqsz = (5 * 4) + namesz + padsz;
- call = afs_alloc_flat_call(
- net, isdir ? &afs_RXFSRemoveDir : &afs_RXFSRemoveFile,
- reqsz, (21 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSRemoveDir,
+ reqsz, (21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
- *bp++ = htonl(isdir ? FSREMOVEDIR : FSREMOVEFILE);
- *bp++ = htonl(dvnode->fid.vid);
- *bp++ = htonl(dvnode->fid.vnode);
- *bp++ = htonl(dvnode->fid.unique);
+ *bp++ = htonl(FSREMOVEDIR);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
*bp++ = htonl(namesz);
- memcpy(bp, name, namesz);
+ memcpy(bp, name->name, namesz);
bp = (void *) bp + namesz;
if (padsz > 0) {
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -773,6 +768,9 @@ int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int afs_deliver_fs_link(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -784,13 +782,9 @@ static int afs_deliver_fs_link(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -809,56 +803,44 @@ static const struct afs_call_type afs_RXFSLink = {
/*
* make a hard link
*/
-int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name,
- struct afs_status_cb *dvnode_scb,
- struct afs_status_cb *vnode_scb)
+void afs_fs_link(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
size_t namesz, reqsz, padsz;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_link(fc, vnode, name, dvnode_scb, vnode_scb);
-
_enter("");
- namesz = strlen(name);
+ namesz = name->len;
padsz = (4 - (namesz & 3)) & 3;
reqsz = (5 * 4) + namesz + padsz + (3 * 4);
- call = afs_alloc_flat_call(net, &afs_RXFSLink, reqsz, (21 + 21 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSLink, reqsz, (21 + 21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_scb = vnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSLINK);
- *bp++ = htonl(dvnode->fid.vid);
- *bp++ = htonl(dvnode->fid.vnode);
- *bp++ = htonl(dvnode->fid.unique);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
*bp++ = htonl(namesz);
- memcpy(bp, name, namesz);
+ memcpy(bp, name->name, namesz);
bp = (void *) bp + namesz;
if (padsz > 0) {
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &vnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
+
+ trace_afs_make_fs_call1(call, &vp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -866,6 +848,9 @@ int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int afs_deliver_fs_symlink(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -877,14 +862,10 @@ static int afs_deliver_fs_symlink(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSFid(&bp, call->out_fid);
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFid(&bp, &vp->fid);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -903,75 +884,58 @@ static const struct afs_call_type afs_RXFSSymlink = {
/*
* create a symbolic link
*/
-int afs_fs_symlink(struct afs_fs_cursor *fc,
- const char *name,
- const char *contents,
- struct afs_status_cb *dvnode_scb,
- struct afs_fid *newfid,
- struct afs_status_cb *new_scb)
+void afs_fs_symlink(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
size_t namesz, reqsz, padsz, c_namesz, c_padsz;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_symlink(fc, name, contents, dvnode_scb,
- newfid, new_scb);
-
_enter("");
- namesz = strlen(name);
+ namesz = name->len;
padsz = (4 - (namesz & 3)) & 3;
- c_namesz = strlen(contents);
+ c_namesz = strlen(op->create.symlink);
c_padsz = (4 - (c_namesz & 3)) & 3;
reqsz = (6 * 4) + namesz + padsz + c_namesz + c_padsz + (6 * 4);
- call = afs_alloc_flat_call(net, &afs_RXFSSymlink, reqsz,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSSymlink, reqsz,
(3 + 21 + 21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_fid = newfid;
- call->out_scb = new_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSYMLINK);
- *bp++ = htonl(dvnode->fid.vid);
- *bp++ = htonl(dvnode->fid.vnode);
- *bp++ = htonl(dvnode->fid.unique);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
*bp++ = htonl(namesz);
- memcpy(bp, name, namesz);
+ memcpy(bp, name->name, namesz);
bp = (void *) bp + namesz;
if (padsz > 0) {
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
*bp++ = htonl(c_namesz);
- memcpy(bp, contents, c_namesz);
+ memcpy(bp, op->create.symlink, c_namesz);
bp = (void *) bp + c_namesz;
if (c_padsz > 0) {
memset(bp, 0, c_padsz);
bp = (void *) bp + c_padsz;
}
*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
- *bp++ = htonl(dvnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ *bp++ = htonl(op->mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(S_IRWXUGO); /* unix mode */
*bp++ = 0; /* segment size */
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -979,6 +943,9 @@ int afs_fs_symlink(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_rename(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
const __be32 *bp;
int ret;
@@ -986,17 +953,13 @@ static int afs_deliver_fs_rename(struct afs_call *call)
if (ret < 0)
return ret;
+ bp = call->buffer;
/* If the two dirs are the same, we have two copies of the same status
* report, so we just decode it twice.
*/
- bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &orig_dvp->scb);
+ xdr_decode_AFSFetchStatus(&bp, call, &new_dvp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -1015,31 +978,22 @@ static const struct afs_call_type afs_RXFSRename = {
/*
* Rename/move a file or directory.
*/
-int afs_fs_rename(struct afs_fs_cursor *fc,
- const char *orig_name,
- struct afs_vnode *new_dvnode,
- const char *new_name,
- struct afs_status_cb *orig_dvnode_scb,
- struct afs_status_cb *new_dvnode_scb)
+void afs_fs_rename(struct afs_operation *op)
{
- struct afs_vnode *orig_dvnode = fc->vnode;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ const struct qstr *orig_name = &op->dentry->d_name;
+ const struct qstr *new_name = &op->dentry_2->d_name;
struct afs_call *call;
- struct afs_net *net = afs_v2net(orig_dvnode);
size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_rename(fc, orig_name,
- new_dvnode, new_name,
- orig_dvnode_scb,
- new_dvnode_scb);
-
_enter("");
- o_namesz = strlen(orig_name);
+ o_namesz = orig_name->len;
o_padsz = (4 - (o_namesz & 3)) & 3;
- n_namesz = strlen(new_name);
+ n_namesz = new_name->len;
n_padsz = (4 - (n_namesz & 3)) & 3;
reqsz = (4 * 4) +
@@ -1047,51 +1001,46 @@ int afs_fs_rename(struct afs_fs_cursor *fc,
(3 * 4) +
4 + n_namesz + n_padsz;
- call = afs_alloc_flat_call(net, &afs_RXFSRename, reqsz, (21 + 21 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSRename, reqsz, (21 + 21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = orig_dvnode_scb;
- call->out_scb = new_dvnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSRENAME);
- *bp++ = htonl(orig_dvnode->fid.vid);
- *bp++ = htonl(orig_dvnode->fid.vnode);
- *bp++ = htonl(orig_dvnode->fid.unique);
+ *bp++ = htonl(orig_dvp->fid.vid);
+ *bp++ = htonl(orig_dvp->fid.vnode);
+ *bp++ = htonl(orig_dvp->fid.unique);
*bp++ = htonl(o_namesz);
- memcpy(bp, orig_name, o_namesz);
+ memcpy(bp, orig_name->name, o_namesz);
bp = (void *) bp + o_namesz;
if (o_padsz > 0) {
memset(bp, 0, o_padsz);
bp = (void *) bp + o_padsz;
}
- *bp++ = htonl(new_dvnode->fid.vid);
- *bp++ = htonl(new_dvnode->fid.vnode);
- *bp++ = htonl(new_dvnode->fid.unique);
+ *bp++ = htonl(new_dvp->fid.vid);
+ *bp++ = htonl(new_dvp->fid.vnode);
+ *bp++ = htonl(new_dvp->fid.unique);
*bp++ = htonl(n_namesz);
- memcpy(bp, new_name, n_namesz);
+ memcpy(bp, new_name->name, n_namesz);
bp = (void *) bp + n_namesz;
if (n_padsz > 0) {
memset(bp, 0, n_padsz);
bp = (void *) bp + n_padsz;
}
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call2(call, &orig_dvnode->fid, orig_name, new_name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
- * deliver reply data to an FS.StoreData
+ * Deliver reply data to FS.StoreData or FS.StoreStatus
*/
static int afs_deliver_fs_store_data(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
const __be32 *bp;
int ret;
@@ -1103,10 +1052,8 @@ static int afs_deliver_fs_store_data(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -1132,90 +1079,69 @@ static const struct afs_call_type afs_RXFSStoreData64 = {
/*
* store a set of pages to a very large file
*/
-static int afs_fs_store_data64(struct afs_fs_cursor *fc,
- struct address_space *mapping,
- pgoff_t first, pgoff_t last,
- unsigned offset, unsigned to,
- loff_t size, loff_t pos, loff_t i_size,
- struct afs_status_cb *scb)
+static void afs_fs_store_data64(struct afs_operation *op,
+ loff_t pos, loff_t size, loff_t i_size)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreData64,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData64,
(4 + 6 + 3 * 2) * 4,
(21 + 6) * 4);
if (!call)
- return -ENOMEM;
+ return afs_op_nomem(op);
- call->key = fc->key;
- call->mapping = mapping;
- call->first = first;
- call->last = last;
- call->first_offset = offset;
- call->last_to = to;
call->send_pages = true;
- call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSTOREDATA64);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
*bp++ = htonl(AFS_SET_MTIME); /* mask */
- *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ *bp++ = htonl(op->mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */
*bp++ = 0; /* segment size */
- *bp++ = htonl(pos >> 32);
- *bp++ = htonl((u32) pos);
- *bp++ = htonl(size >> 32);
- *bp++ = htonl((u32) size);
- *bp++ = htonl(i_size >> 32);
- *bp++ = htonl((u32) i_size);
-
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(upper_32_bits(pos));
+ *bp++ = htonl(lower_32_bits(pos));
+ *bp++ = htonl(upper_32_bits(size));
+ *bp++ = htonl(lower_32_bits(size));
+ *bp++ = htonl(upper_32_bits(i_size));
+ *bp++ = htonl(lower_32_bits(i_size));
+
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* store a set of pages
*/
-int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
- pgoff_t first, pgoff_t last,
- unsigned offset, unsigned to,
- struct afs_status_cb *scb)
+void afs_fs_store_data(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
loff_t size, pos, i_size;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_store_data(fc, mapping, first, last, offset, to, scb);
-
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- size = (loff_t)to - (loff_t)offset;
- if (first != last)
- size += (loff_t)(last - first) << PAGE_SHIFT;
- pos = (loff_t)first << PAGE_SHIFT;
- pos += offset;
+ size = (loff_t)op->store.last_to - (loff_t)op->store.first_offset;
+ if (op->store.first != op->store.last)
+ size += (loff_t)(op->store.last - op->store.first) << PAGE_SHIFT;
+ pos = (loff_t)op->store.first << PAGE_SHIFT;
+ pos += op->store.first_offset;
- i_size = i_size_read(&vnode->vfs_inode);
+ i_size = i_size_read(&vp->vnode->vfs_inode);
if (pos + size > i_size)
i_size = size + pos;
@@ -1223,73 +1149,38 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
(unsigned long long) size, (unsigned long long) pos,
(unsigned long long) i_size);
- if (pos >> 32 || i_size >> 32 || size >> 32 || (pos + size) >> 32)
- return afs_fs_store_data64(fc, mapping, first, last, offset, to,
- size, pos, i_size, scb);
+ if (upper_32_bits(pos) || upper_32_bits(i_size) || upper_32_bits(size) ||
+ upper_32_bits(pos + size))
+ return afs_fs_store_data64(op, pos, size, i_size);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreData,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData,
(4 + 6 + 3) * 4,
(21 + 6) * 4);
if (!call)
- return -ENOMEM;
+ return afs_op_nomem(op);
- call->key = fc->key;
- call->mapping = mapping;
- call->first = first;
- call->last = last;
- call->first_offset = offset;
- call->last_to = to;
call->send_pages = true;
- call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSTOREDATA);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
*bp++ = htonl(AFS_SET_MTIME); /* mask */
- *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ *bp++ = htonl(op->mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */
*bp++ = 0; /* segment size */
- *bp++ = htonl(pos);
- *bp++ = htonl(size);
- *bp++ = htonl(i_size);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
-}
-
-/*
- * deliver reply data to an FS.StoreStatus
- */
-static int afs_deliver_fs_store_status(struct afs_call *call)
-{
- const __be32 *bp;
- int ret;
-
- _enter("");
+ *bp++ = htonl(lower_32_bits(pos));
+ *bp++ = htonl(lower_32_bits(size));
+ *bp++ = htonl(lower_32_bits(i_size));
- ret = afs_transfer_reply(call);
- if (ret < 0)
- return ret;
-
- /* unmarshall the reply once we've received all of it */
- bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
-
- _leave(" = 0 [done]");
- return 0;
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1298,21 +1189,21 @@ static int afs_deliver_fs_store_status(struct afs_call *call)
static const struct afs_call_type afs_RXFSStoreStatus = {
.name = "FS.StoreStatus",
.op = afs_FS_StoreStatus,
- .deliver = afs_deliver_fs_store_status,
+ .deliver = afs_deliver_fs_store_data,
.destructor = afs_flat_call_destructor,
};
static const struct afs_call_type afs_RXFSStoreData_as_Status = {
.name = "FS.StoreData",
.op = afs_FS_StoreData,
- .deliver = afs_deliver_fs_store_status,
+ .deliver = afs_deliver_fs_store_data,
.destructor = afs_flat_call_destructor,
};
static const struct afs_call_type afs_RXFSStoreData64_as_Status = {
.name = "FS.StoreData64",
.op = afs_FS_StoreData64,
- .deliver = afs_deliver_fs_store_status,
+ .deliver = afs_deliver_fs_store_data,
.destructor = afs_flat_call_destructor,
};
@@ -1320,85 +1211,74 @@ static const struct afs_call_type afs_RXFSStoreData64_as_Status = {
* set the attributes on a very large file, using FS.StoreData rather than
* FS.StoreStatus so as to alter the file size also
*/
-static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr,
- struct afs_status_cb *scb)
+static void afs_fs_setattr_size64(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct iattr *attr = op->setattr.attr;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
ASSERT(attr->ia_valid & ATTR_SIZE);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreData64_as_Status,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData64_as_Status,
(4 + 6 + 3 * 2) * 4,
(21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSTOREDATA64);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
xdr_encode_AFS_StoreStatus(&bp, attr);
- *bp++ = htonl(attr->ia_size >> 32); /* position of start of write */
- *bp++ = htonl((u32) attr->ia_size);
- *bp++ = 0; /* size of write */
+ *bp++ = htonl(upper_32_bits(attr->ia_size)); /* position of start of write */
+ *bp++ = htonl(lower_32_bits(attr->ia_size));
+ *bp++ = 0; /* size of write */
*bp++ = 0;
- *bp++ = htonl(attr->ia_size >> 32); /* new file length */
- *bp++ = htonl((u32) attr->ia_size);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(upper_32_bits(attr->ia_size)); /* new file length */
+ *bp++ = htonl(lower_32_bits(attr->ia_size));
+
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* set the attributes on a file, using FS.StoreData rather than FS.StoreStatus
* so as to alter the file size also
*/
-static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr,
- struct afs_status_cb *scb)
+static void afs_fs_setattr_size(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct iattr *attr = op->setattr.attr;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
ASSERT(attr->ia_valid & ATTR_SIZE);
- if (attr->ia_size >> 32)
- return afs_fs_setattr_size64(fc, attr, scb);
+ if (upper_32_bits(attr->ia_size))
+ return afs_fs_setattr_size64(op);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreData_as_Status,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status,
(4 + 6 + 3) * 4,
(21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSTOREDATA);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
xdr_encode_AFS_StoreStatus(&bp, attr);
@@ -1406,57 +1286,44 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr,
*bp++ = 0; /* size of write */
*bp++ = htonl(attr->ia_size); /* new file length */
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* set the attributes on a file, using FS.StoreData if there's a change in file
* size, and FS.StoreStatus otherwise
*/
-int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr,
- struct afs_status_cb *scb)
+void afs_fs_setattr(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct iattr *attr = op->setattr.attr;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_setattr(fc, attr, scb);
-
if (attr->ia_valid & ATTR_SIZE)
- return afs_fs_setattr_size(fc, attr, scb);
+ return afs_fs_setattr_size(op);
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreStatus,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreStatus,
(4 + 6) * 4,
(21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSTORESTATUS);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
- xdr_encode_AFS_StoreStatus(&bp, attr);
+ xdr_encode_AFS_StoreStatus(&bp, op->setattr.attr);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1464,6 +1331,7 @@ int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr,
*/
static int afs_deliver_fs_get_volume_status(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
const __be32 *bp;
char *p;
u32 size;
@@ -1485,7 +1353,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_AFSFetchVolumeStatus(&bp, call->out_volstatus);
+ xdr_decode_AFSFetchVolumeStatus(&bp, &op->volstatus.vs);
call->unmarshall++;
afs_extract_to_tmp(call);
/* Fall through */
@@ -1499,8 +1367,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("volname length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_volname_len);
+ return afs_protocol_error(call, afs_eproto_volname_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1529,8 +1396,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("offline msg length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_offline_msg_len);
+ return afs_protocol_error(call, afs_eproto_offline_msg_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1560,8 +1426,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("motd length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_motd_len);
+ return afs_protocol_error(call, afs_eproto_motd_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1601,37 +1466,26 @@ static const struct afs_call_type afs_RXFSGetVolumeStatus = {
/*
* fetch the status of a volume
*/
-int afs_fs_get_volume_status(struct afs_fs_cursor *fc,
- struct afs_volume_status *vs)
+void afs_fs_get_volume_status(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_get_volume_status(fc, vs);
-
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSGetVolumeStatus, 2 * 4,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSGetVolumeStatus, 2 * 4,
max(12 * 4, AFSOPAQUEMAX + 1));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_volstatus = vs;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSGETVOLUMESTATUS);
- bp[1] = htonl(vnode->fid.vid);
+ bp[1] = htonl(vp->fid.vid);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1639,6 +1493,7 @@ int afs_fs_get_volume_status(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_xxxx_lock(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
const __be32 *bp;
int ret;
@@ -1650,7 +1505,7 @@ static int afs_deliver_fs_xxxx_lock(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -1691,114 +1546,80 @@ static const struct afs_call_type afs_RXFSReleaseLock = {
/*
* Set a lock on a file
*/
-int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type,
- struct afs_status_cb *scb)
+void afs_fs_set_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_set_lock(fc, type, scb);
-
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSSetLock, 5 * 4, 6 * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSSetLock, 5 * 4, 6 * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSETLOCK);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
- *bp++ = htonl(type);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_calli(call, &vnode->fid, type);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
+ *bp++ = htonl(op->lock.type);
+
+ trace_afs_make_fs_calli(call, &vp->fid, op->lock.type);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* extend a lock on a file
*/
-int afs_fs_extend_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
+void afs_fs_extend_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_extend_lock(fc, scb);
-
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSExtendLock, 4 * 4, 6 * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSExtendLock, 4 * 4, 6 * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSEXTENDLOCK);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
+
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* release a lock on a file
*/
-int afs_fs_release_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
+void afs_fs_release_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_release_lock(fc, scb);
-
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSRELEASELOCK);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
+
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1842,7 +1663,7 @@ int afs_fs_give_up_all_callbacks(struct afs_net *net,
bp = call->request;
*bp++ = htonl(FSGIVEUPALLCALLBACKS);
- /* Can't take a ref on server */
+ call->server = afs_use_server(server, afs_server_trace_give_up_cb);
afs_make_call(ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, ac);
}
@@ -1905,14 +1726,13 @@ static const struct afs_call_type afs_RXFSGetCapabilities = {
};
/*
- * Probe a fileserver for the capabilities that it supports. This can
- * return up to 196 words.
+ * Probe a fileserver for the capabilities that it supports. This RPC can
+ * reply with up to 196 words. The operation is asynchronous and if we managed
+ * to allocate a call, true is returned the result is delivered through the
+ * ->done() - otherwise we return false to indicate we didn't even try.
*/
-struct afs_call *afs_fs_get_capabilities(struct afs_net *net,
- struct afs_server *server,
- struct afs_addr_cursor *ac,
- struct key *key,
- unsigned int server_index)
+bool afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server,
+ struct afs_addr_cursor *ac, struct key *key)
{
struct afs_call *call;
__be32 *bp;
@@ -1921,11 +1741,10 @@ struct afs_call *afs_fs_get_capabilities(struct afs_net *net,
call = afs_alloc_flat_call(net, &afs_RXFSGetCapabilities, 1 * 4, 16 * 4);
if (!call)
- return ERR_PTR(-ENOMEM);
+ return false;
call->key = key;
- call->server = afs_get_server(server, afs_server_trace_get_caps);
- call->server_index = server_index;
+ call->server = afs_use_server(server, afs_server_trace_get_caps);
call->upgrade = true;
call->async = true;
call->max_lifespan = AFS_PROBE_MAX_LIFESPAN;
@@ -1934,87 +1753,10 @@ struct afs_call *afs_fs_get_capabilities(struct afs_net *net,
bp = call->request;
*bp++ = htonl(FSGETCAPABILITIES);
- /* Can't take a ref on server */
trace_afs_make_fs_call(call, NULL);
afs_make_call(ac, call, GFP_NOFS);
- return call;
-}
-
-/*
- * Deliver reply data to an FS.FetchStatus with no vnode.
- */
-static int afs_deliver_fs_fetch_status(struct afs_call *call)
-{
- const __be32 *bp;
- int ret;
-
- ret = afs_transfer_reply(call);
- if (ret < 0)
- return ret;
-
- /* unmarshall the reply once we've received all of it */
- bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSCallBack(&bp, call, call->out_scb);
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
-
- _leave(" = 0 [done]");
- return 0;
-}
-
-/*
- * FS.FetchStatus operation type
- */
-static const struct afs_call_type afs_RXFSFetchStatus = {
- .name = "FS.FetchStatus",
- .op = afs_FS_FetchStatus,
- .deliver = afs_deliver_fs_fetch_status,
- .destructor = afs_flat_call_destructor,
-};
-
-/*
- * Fetch the status information for a fid without needing a vnode handle.
- */
-int afs_fs_fetch_status(struct afs_fs_cursor *fc,
- struct afs_net *net,
- struct afs_fid *fid,
- struct afs_status_cb *scb,
- struct afs_volsync *volsync)
-{
- struct afs_call *call;
- __be32 *bp;
-
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_fetch_status(fc, net, fid, scb, volsync);
-
- _enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), fid->vid, fid->vnode);
-
- call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_fid = fid;
- call->out_scb = scb;
- call->out_volsync = volsync;
-
- /* marshall the parameters */
- bp = call->request;
- bp[0] = htonl(FSFETCHSTATUS);
- bp[1] = htonl(fid->vid);
- bp[2] = htonl(fid->vnode);
- bp[3] = htonl(fid->unique);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ afs_put_call(call);
+ return true;
}
/*
@@ -2022,6 +1764,7 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
struct afs_status_cb *scb;
const __be32 *bp;
u32 tmp;
@@ -2043,10 +1786,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
tmp = ntohl(call->tmp);
- _debug("status count: %u/%u", tmp, call->count2);
- if (tmp != call->count2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_ibulkst_count);
+ _debug("status count: %u/%u", tmp, op->nr_files);
+ if (tmp != op->nr_files)
+ return afs_protocol_error(call, afs_eproto_ibulkst_count);
call->count = 0;
call->unmarshall++;
@@ -2060,14 +1802,23 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
if (ret < 0)
return ret;
+ switch (call->count) {
+ case 0:
+ scb = &op->file[0].scb;
+ break;
+ case 1:
+ scb = &op->file[1].scb;
+ break;
+ default:
+ scb = &op->more_files[call->count - 2].scb;
+ break;
+ }
+
bp = call->buffer;
- scb = &call->out_scb[call->count];
- ret = xdr_decode_AFSFetchStatus(&bp, call, scb);
- if (ret < 0)
- return ret;
+ xdr_decode_AFSFetchStatus(&bp, call, scb);
call->count++;
- if (call->count < call->count2)
+ if (call->count < op->nr_files)
goto more_counts;
call->count = 0;
@@ -2084,9 +1835,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
tmp = ntohl(call->tmp);
_debug("CB count: %u", tmp);
- if (tmp != call->count2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_ibulkst_cb_count);
+ if (tmp != op->nr_files)
+ return afs_protocol_error(call, afs_eproto_ibulkst_cb_count);
call->count = 0;
call->unmarshall++;
more_cbs:
@@ -2100,11 +1850,22 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
_debug("unmarshall CB array");
+ switch (call->count) {
+ case 0:
+ scb = &op->file[0].scb;
+ break;
+ case 1:
+ scb = &op->file[1].scb;
+ break;
+ default:
+ scb = &op->more_files[call->count - 2].scb;
+ break;
+ }
+
bp = call->buffer;
- scb = &call->out_scb[call->count];
xdr_decode_AFSCallBack(&bp, call, scb);
call->count++;
- if (call->count < call->count2)
+ if (call->count < op->nr_files)
goto more_cbs;
afs_extract_to_buf(call, 6 * sizeof(__be32));
@@ -2117,7 +1878,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
call->unmarshall++;
@@ -2129,6 +1890,16 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
return 0;
}
+static void afs_done_fs_inline_bulk_status(struct afs_call *call)
+{
+ if (call->error == -ECONNABORTED &&
+ call->abort_code == RX_INVALID_OPERATION) {
+ set_bit(AFS_SERVER_FL_NO_IBULK, &call->server->flags);
+ if (call->op)
+ set_bit(AFS_VOLUME_MAYBE_NO_IBULK, &call->op->volume->flags);
+ }
+}
+
/*
* FS.InlineBulkStatus operation type
*/
@@ -2136,58 +1907,53 @@ static const struct afs_call_type afs_RXFSInlineBulkStatus = {
.name = "FS.InlineBulkStatus",
.op = afs_FS_InlineBulkStatus,
.deliver = afs_deliver_fs_inline_bulk_status,
+ .done = afs_done_fs_inline_bulk_status,
.destructor = afs_flat_call_destructor,
};
/*
* Fetch the status information for up to 50 files
*/
-int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
- struct afs_net *net,
- struct afs_fid *fids,
- struct afs_status_cb *statuses,
- unsigned int nr_fids,
- struct afs_volsync *volsync)
+void afs_fs_inline_bulk_status(struct afs_operation *op)
{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_call *call;
__be32 *bp;
int i;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_inline_bulk_status(fc, net, fids, statuses,
- nr_fids, volsync);
+ if (test_bit(AFS_SERVER_FL_NO_IBULK, &op->server->flags)) {
+ op->error = -ENOTSUPP;
+ return;
+ }
_enter(",%x,{%llx:%llu},%u",
- key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode, op->nr_files);
- call = afs_alloc_flat_call(net, &afs_RXFSInlineBulkStatus,
- (2 + nr_fids * 3) * 4,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSInlineBulkStatus,
+ (2 + op->nr_files * 3) * 4,
21 * 4);
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = statuses;
- call->out_volsync = volsync;
- call->count2 = nr_fids;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSINLINEBULKSTATUS);
- *bp++ = htonl(nr_fids);
- for (i = 0; i < nr_fids; i++) {
- *bp++ = htonl(fids[i].vid);
- *bp++ = htonl(fids[i].vnode);
- *bp++ = htonl(fids[i].unique);
+ *bp++ = htonl(op->nr_files);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
+ for (i = 0; i < op->nr_files - 2; i++) {
+ *bp++ = htonl(op->more_files[i].fid.vid);
+ *bp++ = htonl(op->more_files[i].fid.vnode);
+ *bp++ = htonl(op->more_files[i].fid.unique);
}
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &fids[0]);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -2195,6 +1961,8 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_fetch_acl(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_acl *acl;
const __be32 *bp;
unsigned int size;
@@ -2220,7 +1988,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
acl = kmalloc(struct_size(acl, data, size), GFP_KERNEL);
if (!acl)
return -ENOMEM;
- call->ret_acl = acl;
+ op->acl = acl;
acl->size = call->count2;
afs_extract_begin(call, acl->data, size);
call->unmarshall++;
@@ -2243,10 +2011,8 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
call->unmarshall++;
@@ -2258,12 +2024,6 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
return 0;
}
-static void afs_destroy_fs_fetch_acl(struct afs_call *call)
-{
- kfree(call->ret_acl);
- afs_flat_call_destructor(call);
-}
-
/*
* FS.FetchACL operation type
*/
@@ -2271,68 +2031,33 @@ static const struct afs_call_type afs_RXFSFetchACL = {
.name = "FS.FetchACL",
.op = afs_FS_FetchACL,
.deliver = afs_deliver_fs_fetch_acl,
- .destructor = afs_destroy_fs_fetch_acl,
};
/*
* Fetch the ACL for a file.
*/
-struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *fc,
- struct afs_status_cb *scb)
+void afs_fs_fetch_acl(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &afs_RXFSFetchACL, 16, (21 + 6) * 4);
- if (!call) {
- fc->ac.error = -ENOMEM;
- return ERR_PTR(-ENOMEM);
- }
-
- call->key = fc->key;
- call->ret_acl = NULL;
- call->out_scb = scb;
- call->out_volsync = NULL;
+ call = afs_alloc_flat_call(op->net, &afs_RXFSFetchACL, 16, (21 + 6) * 4);
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHACL);
- bp[1] = htonl(vnode->fid.vid);
- bp[2] = htonl(vnode->fid.vnode);
- bp[3] = htonl(vnode->fid.unique);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_make_call(&fc->ac, call, GFP_KERNEL);
- return (struct afs_acl *)afs_wait_for_call_to_complete(call, &fc->ac);
-}
-
-/*
- * Deliver reply data to any operation that returns file status and volume
- * sync.
- */
-static int afs_deliver_fs_file_status_and_vol(struct afs_call *call)
-{
- const __be32 *bp;
- int ret;
+ bp[1] = htonl(vp->fid.vid);
+ bp[2] = htonl(vp->fid.vnode);
+ bp[3] = htonl(vp->fid.unique);
- ret = afs_transfer_reply(call);
- if (ret < 0)
- return ret;
-
- bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
-
- _leave(" = 0 [done]");
- return 0;
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_KERNEL);
}
/*
@@ -2348,42 +2073,34 @@ static const struct afs_call_type afs_RXFSStoreACL = {
/*
* Fetch the ACL for a file.
*/
-int afs_fs_store_acl(struct afs_fs_cursor *fc, const struct afs_acl *acl,
- struct afs_status_cb *scb)
+void afs_fs_store_acl(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ const struct afs_acl *acl = op->acl;
size_t size;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
size = round_up(acl->size, 4);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreACL,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreACL,
5 * 4 + size, (21 + 6) * 4);
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = NULL;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSSTOREACL);
- bp[1] = htonl(vnode->fid.vid);
- bp[2] = htonl(vnode->fid.vnode);
- bp[3] = htonl(vnode->fid.unique);
+ bp[1] = htonl(vp->fid.vid);
+ bp[2] = htonl(vp->fid.vnode);
+ bp[3] = htonl(vp->fid.unique);
bp[4] = htonl(acl->size);
memcpy(&bp[5], acl->data, acl->size);
if (acl->size != size)
memset((void *)&bp[5] + acl->size, 0, size - acl->size);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_make_call(&fc->ac, call, GFP_KERNEL);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_KERNEL);
}
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 281470fe1183..cd0a0060950b 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -67,16 +67,18 @@ static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
/*
* Initialise an inode from the vnode status.
*/
-static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
- struct afs_cb_interest *cbi,
- struct afs_vnode *parent_vnode,
- struct afs_status_cb *scb)
+static int afs_inode_init_from_status(struct afs_operation *op,
+ struct afs_vnode_param *vp,
+ struct afs_vnode *vnode)
{
- struct afs_cb_interest *old_cbi = NULL;
- struct afs_file_status *status = &scb->status;
+ struct afs_file_status *status = &vp->scb.status;
struct inode *inode = AFS_VNODE_TO_I(vnode);
struct timespec64 t;
+ _enter("{%llx:%llu.%u} %s",
+ vp->fid.vid, vp->fid.vnode, vp->fid.unique,
+ op->type ? op->type->name : "???");
+
_debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu",
status->type,
status->nlink,
@@ -86,12 +88,15 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
write_seqlock(&vnode->cb_lock);
+ vnode->cb_v_break = op->cb_v_break;
+ vnode->cb_s_break = op->cb_s_break;
vnode->status = *status;
t = status->mtime_client;
inode->i_ctime = t;
inode->i_mtime = t;
inode->i_atime = t;
+ inode->i_flags |= S_NOATIME;
inode->i_uid = make_kuid(&init_user_ns, status->owner);
inode->i_gid = make_kgid(&init_user_ns, status->group);
set_nlink(&vnode->vfs_inode, status->nlink);
@@ -128,9 +133,9 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
inode_nohighmem(inode);
break;
default:
- dump_vnode(vnode, parent_vnode);
+ dump_vnode(vnode, op->file[0].vnode != vnode ? op->file[0].vnode : NULL);
write_sequnlock(&vnode->cb_lock);
- return afs_protocol_error(NULL, -EBADMSG, afs_eproto_file_type);
+ return afs_protocol_error(NULL, afs_eproto_file_type);
}
afs_set_i_size(vnode, status->size);
@@ -138,39 +143,36 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
vnode->invalid_before = status->data_version;
inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
- if (!scb->have_cb) {
+ if (!vp->scb.have_cb) {
/* it's a symlink we just created (the fileserver
* didn't give us a callback) */
vnode->cb_expires_at = ktime_get_real_seconds();
} else {
- vnode->cb_expires_at = scb->callback.expires_at;
- old_cbi = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->cb_lock.lock));
- if (cbi != old_cbi)
- rcu_assign_pointer(vnode->cb_interest, afs_get_cb_interest(cbi));
- else
- old_cbi = NULL;
+ vnode->cb_expires_at = vp->scb.callback.expires_at;
+ vnode->cb_server = op->server;
set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
}
write_sequnlock(&vnode->cb_lock);
- afs_put_cb_interest(afs_v2net(vnode), old_cbi);
return 0;
}
/*
* Update the core inode struct from a returned status record.
*/
-static void afs_apply_status(struct afs_fs_cursor *fc,
- struct afs_vnode *vnode,
- struct afs_status_cb *scb,
- const afs_dataversion_t *expected_version)
+static void afs_apply_status(struct afs_operation *op,
+ struct afs_vnode_param *vp)
{
- struct afs_file_status *status = &scb->status;
+ struct afs_file_status *status = &vp->scb.status;
+ struct afs_vnode *vnode = vp->vnode;
struct timespec64 t;
umode_t mode;
bool data_changed = false;
+ _enter("{%llx:%llu.%u} %s",
+ vp->fid.vid, vp->fid.vnode, vp->fid.unique,
+ op->type ? op->type->name : "???");
+
BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags));
if (status->type != vnode->status.type) {
@@ -179,7 +181,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
vnode->fid.vnode,
vnode->fid.unique,
status->type, vnode->status.type);
- afs_protocol_error(NULL, -EBADMSG, afs_eproto_bad_status);
+ afs_protocol_error(NULL, afs_eproto_bad_status);
return;
}
@@ -209,14 +211,13 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
vnode->status = *status;
- if (expected_version &&
- *expected_version != status->data_version) {
+ if (vp->dv_before + vp->dv_delta != status->data_version) {
if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags))
pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s\n",
vnode->fid.vid, vnode->fid.vnode,
- (unsigned long long)*expected_version,
+ (unsigned long long)vp->dv_before + vp->dv_delta,
(unsigned long long)status->data_version,
- fc->type ? fc->type->name : "???");
+ op->type ? op->type->name : "???");
vnode->invalid_before = status->data_version;
if (vnode->status.type == AFS_FTYPE_DIR) {
@@ -243,22 +244,15 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
/*
* Apply a callback to a vnode.
*/
-static void afs_apply_callback(struct afs_fs_cursor *fc,
- struct afs_vnode *vnode,
- struct afs_status_cb *scb,
- unsigned int cb_break)
+static void afs_apply_callback(struct afs_operation *op,
+ struct afs_vnode_param *vp)
{
- struct afs_cb_interest *old;
- struct afs_callback *cb = &scb->callback;
+ struct afs_callback *cb = &vp->scb.callback;
+ struct afs_vnode *vnode = vp->vnode;
- if (!afs_cb_is_broken(cb_break, vnode, fc->cbi)) {
+ if (!afs_cb_is_broken(vp->cb_break_before, vnode)) {
vnode->cb_expires_at = cb->expires_at;
- old = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->cb_lock.lock));
- if (old != fc->cbi) {
- rcu_assign_pointer(vnode->cb_interest, afs_get_cb_interest(fc->cbi));
- afs_put_cb_interest(afs_v2net(vnode), old);
- }
+ vnode->cb_server = op->server;
set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
}
}
@@ -267,106 +261,108 @@ static void afs_apply_callback(struct afs_fs_cursor *fc,
* Apply the received status and callback to an inode all in the same critical
* section to avoid races with afs_validate().
*/
-void afs_vnode_commit_status(struct afs_fs_cursor *fc,
- struct afs_vnode *vnode,
- unsigned int cb_break,
- const afs_dataversion_t *expected_version,
- struct afs_status_cb *scb)
+void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *vp)
{
- if (fc->ac.error != 0)
- return;
+ struct afs_vnode *vnode = vp->vnode;
+
+ _enter("");
+
+ ASSERTCMP(op->error, ==, 0);
write_seqlock(&vnode->cb_lock);
- if (scb->have_error) {
- if (scb->status.abort_code == VNOVNODE) {
+ if (vp->scb.have_error) {
+ if (vp->scb.status.abort_code == VNOVNODE) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
clear_nlink(&vnode->vfs_inode);
__afs_break_callback(vnode, afs_cb_break_for_deleted);
}
} else {
- if (scb->have_status)
- afs_apply_status(fc, vnode, scb, expected_version);
- if (scb->have_cb)
- afs_apply_callback(fc, vnode, scb, cb_break);
+ if (vp->scb.have_status)
+ afs_apply_status(op, vp);
+ if (vp->scb.have_cb)
+ afs_apply_callback(op, vp);
}
write_sequnlock(&vnode->cb_lock);
- if (fc->ac.error == 0 && scb->have_status)
- afs_cache_permit(vnode, fc->key, cb_break, scb);
+ if (op->error == 0 && vp->scb.have_status)
+ afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb);
}
+static void afs_fetch_status_success(struct afs_operation *op)
+{
+ struct afs_vnode_param *vp = &op->file[0];
+ struct afs_vnode *vnode = vp->vnode;
+ int ret;
+
+ if (vnode->vfs_inode.i_state & I_NEW) {
+ ret = afs_inode_init_from_status(op, vp, vnode);
+ op->error = ret;
+ if (ret == 0)
+ afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb);
+ } else {
+ afs_vnode_commit_status(op, vp);
+ }
+}
+
+static const struct afs_operation_ops afs_fetch_status_operation = {
+ .issue_afs_rpc = afs_fs_fetch_status,
+ .issue_yfs_rpc = yfs_fs_fetch_status,
+ .success = afs_fetch_status_success,
+};
+
/*
* Fetch file status from the volume.
*/
int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool is_new,
afs_access_t *_caller_access)
{
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
- int ret;
+ struct afs_operation *op;
_enter("%s,{%llx:%llu.%u,S=%lx}",
vnode->volume->name,
vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique,
vnode->flags);
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(key, vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
+ afs_op_set_vnode(op, 0, vnode);
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_fetch_file_status(&fc, scb, NULL);
- }
-
- if (fc.error) {
- /* Do nothing. */
- } else if (is_new) {
- ret = afs_inode_init_from_status(vnode, key, fc.cbi,
- NULL, scb);
- fc.error = ret;
- if (ret == 0)
- afs_cache_permit(vnode, key, fc.cb_break, scb);
- } else {
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- }
- afs_check_for_remote_deletion(&fc, vnode);
- ret = afs_end_vnode_operation(&fc);
- }
+ op->nr_files = 1;
+ op->ops = &afs_fetch_status_operation;
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
- if (ret == 0 && _caller_access)
- *_caller_access = scb->status.caller_access;
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ if (_caller_access)
+ *_caller_access = op->file[0].scb.status.caller_access;
+ return afs_put_operation(op);
}
/*
- * iget5() comparator
+ * ilookup() comparator
*/
-int afs_iget5_test(struct inode *inode, void *opaque)
+int afs_ilookup5_test_by_fid(struct inode *inode, void *opaque)
{
- struct afs_iget_data *iget_data = opaque;
struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_fid *fid = opaque;
- return memcmp(&vnode->fid, &iget_data->fid, sizeof(iget_data->fid)) == 0;
+ return (fid->vnode == vnode->fid.vnode &&
+ fid->vnode_hi == vnode->fid.vnode_hi &&
+ fid->unique == vnode->fid.unique);
}
/*
- * iget5() comparator for inode created by autocell operations
- *
- * These pseudo inodes don't match anything.
+ * iget5() comparator
*/
-static int afs_iget5_pseudo_dir_test(struct inode *inode, void *opaque)
+static int afs_iget5_test(struct inode *inode, void *opaque)
{
- return 0;
+ struct afs_vnode_param *vp = opaque;
+ //struct afs_vnode *vnode = AFS_FS_I(inode);
+
+ return afs_ilookup5_test_by_fid(inode, &vp->fid);
}
/*
@@ -374,99 +370,22 @@ static int afs_iget5_pseudo_dir_test(struct inode *inode, void *opaque)
*/
static int afs_iget5_set(struct inode *inode, void *opaque)
{
- struct afs_iget_data *iget_data = opaque;
+ struct afs_vnode_param *vp = opaque;
+ struct afs_super_info *as = AFS_FS_S(inode->i_sb);
struct afs_vnode *vnode = AFS_FS_I(inode);
- vnode->fid = iget_data->fid;
- vnode->volume = iget_data->volume;
- vnode->cb_v_break = iget_data->cb_v_break;
- vnode->cb_s_break = iget_data->cb_s_break;
+ vnode->volume = as->volume;
+ vnode->fid = vp->fid;
/* YFS supports 96-bit vnode IDs, but Linux only supports
* 64-bit inode numbers.
*/
- inode->i_ino = iget_data->fid.vnode;
- inode->i_generation = iget_data->fid.unique;
+ inode->i_ino = vnode->fid.vnode;
+ inode->i_generation = vnode->fid.unique;
return 0;
}
/*
- * Create an inode for a dynamic root directory or an autocell dynamic
- * automount dir.
- */
-struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
-{
- struct afs_super_info *as;
- struct afs_vnode *vnode;
- struct inode *inode;
- static atomic_t afs_autocell_ino;
-
- struct afs_iget_data iget_data = {
- .cb_v_break = 0,
- .cb_s_break = 0,
- };
-
- _enter("");
-
- as = sb->s_fs_info;
- if (as->volume) {
- iget_data.volume = as->volume;
- iget_data.fid.vid = as->volume->vid;
- }
- if (root) {
- iget_data.fid.vnode = 1;
- iget_data.fid.unique = 1;
- } else {
- iget_data.fid.vnode = atomic_inc_return(&afs_autocell_ino);
- iget_data.fid.unique = 0;
- }
-
- inode = iget5_locked(sb, iget_data.fid.vnode,
- afs_iget5_pseudo_dir_test, afs_iget5_set,
- &iget_data);
- if (!inode) {
- _leave(" = -ENOMEM");
- return ERR_PTR(-ENOMEM);
- }
-
- _debug("GOT INODE %p { ino=%lu, vl=%llx, vn=%llx, u=%x }",
- inode, inode->i_ino, iget_data.fid.vid, iget_data.fid.vnode,
- iget_data.fid.unique);
-
- vnode = AFS_FS_I(inode);
-
- /* there shouldn't be an existing inode */
- BUG_ON(!(inode->i_state & I_NEW));
-
- inode->i_size = 0;
- inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
- if (root) {
- inode->i_op = &afs_dynroot_inode_operations;
- inode->i_fop = &simple_dir_operations;
- } else {
- inode->i_op = &afs_autocell_inode_operations;
- }
- set_nlink(inode, 2);
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode);
- inode->i_blocks = 0;
- inode_set_iversion_raw(inode, 0);
- inode->i_generation = 0;
-
- set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
- if (!root) {
- set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
- inode->i_flags |= S_AUTOMOUNT;
- }
-
- inode->i_flags |= S_NOATIME;
- unlock_new_inode(inode);
- _leave(" = %p", inode);
- return inode;
-}
-
-/*
* Get a cache cookie for an inode.
*/
static void afs_get_inode_cache(struct afs_vnode *vnode)
@@ -501,58 +420,41 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
/*
* inode retrieval
*/
-struct inode *afs_iget(struct super_block *sb, struct key *key,
- struct afs_iget_data *iget_data,
- struct afs_status_cb *scb,
- struct afs_cb_interest *cbi,
- struct afs_vnode *parent_vnode)
+struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp)
{
- struct afs_super_info *as;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct super_block *sb = dvp->vnode->vfs_inode.i_sb;
struct afs_vnode *vnode;
- struct afs_fid *fid = &iget_data->fid;
struct inode *inode;
int ret;
- _enter(",{%llx:%llu.%u},,", fid->vid, fid->vnode, fid->unique);
+ _enter(",{%llx:%llu.%u},,", vp->fid.vid, vp->fid.vnode, vp->fid.unique);
- as = sb->s_fs_info;
- iget_data->volume = as->volume;
-
- inode = iget5_locked(sb, fid->vnode, afs_iget5_test, afs_iget5_set,
- iget_data);
+ inode = iget5_locked(sb, vp->fid.vnode, afs_iget5_test, afs_iget5_set, vp);
if (!inode) {
_leave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
}
- _debug("GOT INODE %p { vl=%llx vn=%llx, u=%x }",
- inode, fid->vid, fid->vnode, fid->unique);
-
vnode = AFS_FS_I(inode);
+ _debug("GOT INODE %p { vl=%llx vn=%llx, u=%x }",
+ inode, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
+
/* deal with an existing inode */
if (!(inode->i_state & I_NEW)) {
_leave(" = %p", inode);
return inode;
}
- if (!scb) {
- /* it's a remotely extant inode */
- ret = afs_fetch_status(vnode, key, true, NULL);
- if (ret < 0)
- goto bad_inode;
- } else {
- ret = afs_inode_init_from_status(vnode, key, cbi, parent_vnode,
- scb);
- if (ret < 0)
- goto bad_inode;
- }
+ ret = afs_inode_init_from_status(op, vp, vnode);
+ if (ret < 0)
+ goto bad_inode;
afs_get_inode_cache(vnode);
/* success */
clear_bit(AFS_VNODE_UNSET, &vnode->flags);
- inode->i_flags |= S_NOATIME;
unlock_new_inode(inode);
_leave(" = %p", inode);
return inode;
@@ -564,11 +466,79 @@ bad_inode:
return ERR_PTR(ret);
}
+static int afs_iget5_set_root(struct inode *inode, void *opaque)
+{
+ struct afs_super_info *as = AFS_FS_S(inode->i_sb);
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+
+ vnode->volume = as->volume;
+ vnode->fid.vid = as->volume->vid,
+ vnode->fid.vnode = 1;
+ vnode->fid.unique = 1;
+ inode->i_ino = 1;
+ inode->i_generation = 1;
+ return 0;
+}
+
+/*
+ * Set up the root inode for a volume. This is always vnode 1, unique 1 within
+ * the volume.
+ */
+struct inode *afs_root_iget(struct super_block *sb, struct key *key)
+{
+ struct afs_super_info *as = AFS_FS_S(sb);
+ struct afs_operation *op;
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ int ret;
+
+ _enter(",{%llx},,", as->volume->vid);
+
+ inode = iget5_locked(sb, 1, NULL, afs_iget5_set_root, NULL);
+ if (!inode) {
+ _leave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ _debug("GOT ROOT INODE %p { vl=%llx }", inode, as->volume->vid);
+
+ BUG_ON(!(inode->i_state & I_NEW));
+
+ vnode = AFS_FS_I(inode);
+ vnode->cb_v_break = as->volume->cb_v_break,
+
+ op = afs_alloc_operation(key, as->volume);
+ if (IS_ERR(op)) {
+ ret = PTR_ERR(op);
+ goto error;
+ }
+
+ afs_op_set_vnode(op, 0, vnode);
+
+ op->nr_files = 1;
+ op->ops = &afs_fetch_status_operation;
+ ret = afs_do_sync_operation(op);
+ if (ret < 0)
+ goto error;
+
+ afs_get_inode_cache(vnode);
+
+ clear_bit(AFS_VNODE_UNSET, &vnode->flags);
+ unlock_new_inode(inode);
+ _leave(" = %p", inode);
+ return inode;
+
+error:
+ iget_failed(inode);
+ _leave(" = %d [bad]", ret);
+ return ERR_PTR(ret);
+}
+
/*
* mark the data attached to an inode as obsolete due to a write on the server
* - might also want to ditch all the outstanding writes and dirty pages
*/
-void afs_zap_data(struct afs_vnode *vnode)
+static void afs_zap_data(struct afs_vnode *vnode)
{
_enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
@@ -586,12 +556,30 @@ void afs_zap_data(struct afs_vnode *vnode)
}
/*
+ * Get the server reinit counter for a vnode's current server.
+ */
+static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
+{
+ struct afs_server_list *slist = rcu_dereference(vnode->volume->servers);
+ struct afs_server *server;
+ int i;
+
+ for (i = 0; i < slist->nr_servers; i++) {
+ server = slist->servers[i].server;
+ if (server == vnode->cb_server) {
+ *_s_break = READ_ONCE(server->cb_s_break);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
* Check the validity of a vnode/inode.
*/
bool afs_check_validity(struct afs_vnode *vnode)
{
- struct afs_cb_interest *cbi;
- struct afs_server *server;
struct afs_volume *volume = vnode->volume;
enum afs_cb_break_reason need_clear = afs_cb_break_no_break;
time64_t now = ktime_get_real_seconds();
@@ -604,11 +592,8 @@ bool afs_check_validity(struct afs_vnode *vnode)
cb_v_break = READ_ONCE(volume->cb_v_break);
cb_break = vnode->cb_break;
- if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
- cbi = rcu_dereference(vnode->cb_interest);
- server = rcu_dereference(cbi->server);
- cb_s_break = READ_ONCE(server->cb_s_break);
-
+ if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
+ afs_get_s_break_rcu(vnode, &cb_s_break)) {
if (vnode->cb_s_break != cb_s_break ||
vnode->cb_v_break != cb_v_break) {
vnode->cb_s_break = cb_s_break;
@@ -755,7 +740,6 @@ int afs_drop_inode(struct inode *inode)
*/
void afs_evict_inode(struct inode *inode)
{
- struct afs_cb_interest *cbi;
struct afs_vnode *vnode;
vnode = AFS_FS_I(inode);
@@ -772,15 +756,6 @@ void afs_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
- write_seqlock(&vnode->cb_lock);
- cbi = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->cb_lock.lock));
- if (cbi) {
- afs_put_cb_interest(afs_i2net(inode), cbi);
- rcu_assign_pointer(vnode->cb_interest, NULL);
- }
- write_sequnlock(&vnode->cb_lock);
-
while (!list_empty(&vnode->wb_keys)) {
struct afs_wb_key *wbk = list_entry(vnode->wb_keys.next,
struct afs_wb_key, vnode_link);
@@ -808,16 +783,24 @@ void afs_evict_inode(struct inode *inode)
_leave("");
}
+static void afs_setattr_success(struct afs_operation *op)
+{
+ afs_vnode_commit_status(op, &op->file[0]);
+}
+
+static const struct afs_operation_ops afs_setattr_operation = {
+ .issue_afs_rpc = afs_fs_setattr,
+ .issue_yfs_rpc = yfs_fs_setattr,
+ .success = afs_setattr_success,
+};
+
/*
* set the attributes of an inode
*/
int afs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
- struct key *key;
- int ret = -ENOMEM;
_enter("{%llx:%llu},{n=%pd},%x",
vnode->fid.vid, vnode->fid.vnode, dentry,
@@ -829,48 +812,22 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
return 0;
}
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- goto error;
-
/* flush any dirty data outstanding on a regular file */
if (S_ISREG(vnode->vfs_inode.i_mode))
filemap_write_and_wait(vnode->vfs_inode.i_mapping);
- if (attr->ia_valid & ATTR_FILE) {
- key = afs_file_key(attr->ia_file);
- } else {
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
- }
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, false)) {
- afs_dataversion_t data_version = vnode->status.data_version;
+ op = afs_alloc_operation(((attr->ia_valid & ATTR_FILE) ?
+ afs_file_key(attr->ia_file) : NULL),
+ vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- if (attr->ia_valid & ATTR_SIZE)
- data_version++;
+ afs_op_set_vnode(op, 0, vnode);
+ op->setattr.attr = attr;
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_setattr(&fc, attr, scb);
- }
-
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ if (attr->ia_valid & ATTR_SIZE)
+ op->file[0].dv_delta = 1;
- if (!(attr->ia_valid & ATTR_FILE))
- key_put(key);
-
-error_scb:
- kfree(scb);
-error:
- _leave(" = %d", ret);
- return ret;
+ op->ops = &afs_setattr_operation;
+ return afs_do_sync_operation(op);
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 80255513e230..0c9806ef2a19 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -59,13 +59,6 @@ struct afs_fs_context {
struct key *key; /* key to use for secure mounting */
};
-struct afs_iget_data {
- struct afs_fid fid;
- struct afs_volume *volume; /* volume on which resides */
- unsigned int cb_v_break; /* Pre-fetch volume break count */
- unsigned int cb_s_break; /* Pre-fetch server break count */
-};
-
enum afs_call_state {
AFS_CALL_CL_REQUESTING, /* Client: Request is being sent */
AFS_CALL_CL_AWAIT_REPLY, /* Client: Awaiting reply */
@@ -90,7 +83,6 @@ struct afs_addr_list {
unsigned char nr_ipv4; /* Number of IPv4 addresses */
enum dns_record_source source:8;
enum dns_lookup_status status:8;
- unsigned long probed; /* Mask of servers that have been probed */
unsigned long failed; /* Mask of addrs that failed locally/ICMP */
unsigned long responded; /* Mask of addrs that responded */
struct sockaddr_rxrpc addrs[];
@@ -111,10 +103,7 @@ struct afs_call {
struct afs_net *net; /* The network namespace */
struct afs_server *server; /* The fileserver record if fs op (pins ref) */
struct afs_vlserver *vlserver; /* The vlserver record if vl op */
- struct afs_cb_interest *cbi; /* Callback interest for server used */
- struct afs_vnode *lvnode; /* vnode being locked */
void *request; /* request data (first part) */
- struct address_space *mapping; /* Pages being written from */
struct iov_iter def_iter; /* Default buffer/data iterator */
struct iov_iter *iter; /* Iterator currently in use */
union { /* Convenience for ->def_iter */
@@ -126,32 +115,19 @@ struct afs_call {
long ret0; /* Value to reply with instead of 0 */
struct afs_addr_list *ret_alist;
struct afs_vldb_entry *ret_vldb;
- struct afs_acl *ret_acl;
+ char *ret_str;
};
- struct afs_fid *out_fid;
- struct afs_status_cb *out_dir_scb;
- struct afs_status_cb *out_scb;
- struct yfs_acl *out_yacl;
- struct afs_volsync *out_volsync;
- struct afs_volume_status *out_volstatus;
- struct afs_read *read_request;
+ struct afs_operation *op;
unsigned int server_index;
- pgoff_t first; /* first page in mapping to deal with */
- pgoff_t last; /* last page in mapping to deal with */
atomic_t usage;
enum afs_call_state state;
spinlock_t state_lock;
int error; /* error code */
u32 abort_code; /* Remote abort ID or 0 */
- u32 epoch;
unsigned int max_lifespan; /* Maximum lifespan to set if not 0 */
unsigned request_size; /* size of request data */
unsigned reply_max; /* maximum size of reply */
- unsigned first_offset; /* offset into mapping[first] */
- union {
- unsigned last_to; /* amount of mapping[last] */
- unsigned count2; /* count used in unmarshalling */
- };
+ unsigned count2; /* count used in unmarshalling */
unsigned char unmarshall; /* unmarshalling phase */
unsigned char addr_ix; /* Address in ->alist */
bool drop_ref; /* T if need to drop ref for incoming call */
@@ -161,6 +137,7 @@ struct afs_call {
bool upgrade; /* T to request service upgrade */
bool have_reply_time; /* T if have got reply_time */
bool intr; /* T if interruptible */
+ bool unmarshalling_error; /* T if an unmarshalling error occurred */
u16 service_id; /* Actual service ID (after upgrade) */
unsigned int debug_id; /* Trace ID */
u32 operation_ID; /* operation ID for an incoming call */
@@ -291,6 +268,7 @@ struct afs_net {
struct timer_list cells_timer;
atomic_t cells_outstanding;
seqlock_t cells_lock;
+ struct mutex cells_alias_lock;
struct mutex proc_cells_lock;
struct hlist_head proc_cells;
@@ -299,9 +277,10 @@ struct afs_net {
* cell, but in practice, people create aliases and subsets and there's
* no easy way to distinguish them.
*/
- seqlock_t fs_lock; /* For fs_servers */
+ seqlock_t fs_lock; /* For fs_servers, fs_probe_*, fs_proc */
struct rb_root fs_servers; /* afs_server (by server UUID or address) */
- struct list_head fs_updates; /* afs_server (by update_at) */
+ struct list_head fs_probe_fast; /* List of afs_server to probe at 30s intervals */
+ struct list_head fs_probe_slow; /* List of afs_server to probe at 5m intervals */
struct hlist_head fs_proc; /* procfs servers list */
struct hlist_head fs_addresses4; /* afs_server (by lowest IPv4 addr) */
@@ -310,6 +289,9 @@ struct afs_net {
struct work_struct fs_manager;
struct timer_list fs_timer;
+
+ struct work_struct fs_prober;
+ struct timer_list fs_probe_timer;
atomic_t servers_outstanding;
/* File locking renewal management */
@@ -360,8 +342,10 @@ enum afs_cell_state {
* for authentication and encryption. The cell name is not typically used in
* the protocol.
*
- * There is no easy way to determine if two cells are aliases or one is a
- * subset of another.
+ * Two cells are determined to be aliases if they have an explicit alias (YFS
+ * only), share any VL servers in common or have at least one volume in common.
+ * "In common" means that the address list of the VL servers or the fileservers
+ * share at least one endpoint.
*/
struct afs_cell {
union {
@@ -369,6 +353,8 @@ struct afs_cell {
struct rb_node net_node; /* Node in net->cells */
};
struct afs_net *net;
+ struct afs_cell *alias_of; /* The cell this is an alias of */
+ struct afs_volume *root_volume; /* The root.cell volume if there is one */
struct key *anonymous_key; /* anonymous user key for this cell */
struct work_struct manager; /* Manager for init/deinit/dns */
struct hlist_node proc_link; /* /proc cell list link */
@@ -381,15 +367,21 @@ struct afs_cell {
unsigned long flags;
#define AFS_CELL_FL_NO_GC 0 /* The cell was added manually, don't auto-gc */
#define AFS_CELL_FL_DO_LOOKUP 1 /* DNS lookup requested */
+#define AFS_CELL_FL_CHECK_ALIAS 2 /* Need to check for aliases */
enum afs_cell_state state;
short error;
enum dns_record_source dns_source:8; /* Latest source of data from lookup */
enum dns_lookup_status dns_status:8; /* Latest status of data from lookup */
unsigned int dns_lookup_count; /* Counter of DNS lookups */
+ /* The volumes belonging to this cell */
+ struct rb_root volumes; /* Tree of volumes on this server */
+ struct hlist_head proc_volumes; /* procfs volume list */
+ seqlock_t volume_lock; /* For volumes */
+
/* Active fileserver interaction state. */
- struct list_head proc_volumes; /* procfs volume list */
- rwlock_t proc_lock;
+ struct rb_root fs_servers; /* afs_server (by server UUID) */
+ seqlock_t fs_lock; /* For fs_servers */
/* VL server list. */
rwlock_t vl_servers_lock; /* Lock on vl_servers */
@@ -471,6 +463,7 @@ struct afs_vldb_entry {
#define AFS_VLDB_QUERY_ERROR 4 /* - VL server returned error */
uuid_t fs_server[AFS_NMAXNSERVERS];
+ u32 addr_version[AFS_NMAXNSERVERS]; /* Registration change counters */
u8 fs_mask[AFS_NMAXNSERVERS];
#define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */
#define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */
@@ -492,94 +485,64 @@ struct afs_server {
};
struct afs_addr_list __rcu *addresses;
- struct rb_node uuid_rb; /* Link in net->servers */
+ struct afs_cell *cell; /* Cell to which belongs (pins ref) */
+ struct rb_node uuid_rb; /* Link in net->fs_servers */
+ struct afs_server __rcu *uuid_next; /* Next server with same UUID */
+ struct afs_server *uuid_prev; /* Previous server with same UUID */
+ struct list_head probe_link; /* Link in net->fs_probe_list */
struct hlist_node addr4_link; /* Link in net->fs_addresses4 */
struct hlist_node addr6_link; /* Link in net->fs_addresses6 */
struct hlist_node proc_link; /* Link in net->fs_proc */
struct afs_server *gc_next; /* Next server in manager's list */
- time64_t put_time; /* Time at which last put */
- time64_t update_at; /* Time at which to next update the record */
+ time64_t unuse_time; /* Time at which last unused */
unsigned long flags;
-#define AFS_SERVER_FL_NOT_READY 1 /* The record is not ready for use */
-#define AFS_SERVER_FL_NOT_FOUND 2 /* VL server says no such server */
-#define AFS_SERVER_FL_VL_FAIL 3 /* Failed to access VL server */
-#define AFS_SERVER_FL_UPDATING 4
-#define AFS_SERVER_FL_PROBED 5 /* The fileserver has been probed */
-#define AFS_SERVER_FL_PROBING 6 /* Fileserver is being probed */
-#define AFS_SERVER_FL_NO_IBULK 7 /* Fileserver doesn't support FS.InlineBulkStatus */
+#define AFS_SERVER_FL_RESPONDING 0 /* The server is responding */
+#define AFS_SERVER_FL_UPDATING 1
+#define AFS_SERVER_FL_NEEDS_UPDATE 2 /* Fileserver address list is out of date */
+#define AFS_SERVER_FL_NOT_READY 4 /* The record is not ready for use */
+#define AFS_SERVER_FL_NOT_FOUND 5 /* VL server says no such server */
+#define AFS_SERVER_FL_VL_FAIL 6 /* Failed to access VL server */
#define AFS_SERVER_FL_MAY_HAVE_CB 8 /* May have callbacks on this fileserver */
-#define AFS_SERVER_FL_IS_YFS 9 /* Server is YFS not AFS */
-#define AFS_SERVER_FL_NO_RM2 10 /* Fileserver doesn't support YFS.RemoveFile2 */
-#define AFS_SERVER_FL_HAVE_EPOCH 11 /* ->epoch is valid */
- atomic_t usage;
+#define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
+#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
+#define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */
+ atomic_t ref; /* Object refcount */
+ atomic_t active; /* Active user count */
u32 addr_version; /* Address list version */
- u32 cm_epoch; /* Server RxRPC epoch */
+ unsigned int rtt; /* Server's current RTT in uS */
unsigned int debug_id; /* Debugging ID for traces */
/* file service access */
rwlock_t fs_lock; /* access lock */
/* callback promise management */
- struct hlist_head cb_volumes; /* List of volume interests on this server */
unsigned cb_s_break; /* Break-everything counter. */
- rwlock_t cb_break_lock; /* Volume finding lock */
/* Probe state */
+ unsigned long probed_at; /* Time last probe was dispatched (jiffies) */
wait_queue_head_t probe_wq;
atomic_t probe_outstanding;
spinlock_t probe_lock;
struct {
- unsigned int rtt; /* RTT as ktime/64 */
+ unsigned int rtt; /* RTT in uS */
u32 abort_code;
- u32 cm_epoch;
short error;
bool responded:1;
bool is_yfs:1;
bool not_yfs:1;
bool local_failure:1;
- bool cm_probed:1;
- bool said_rebooted:1;
- bool said_inconsistent:1;
} probe;
};
/*
- * Volume collation in the server's callback interest list.
- */
-struct afs_vol_interest {
- struct hlist_node srv_link; /* Link in server->cb_volumes */
- struct hlist_head cb_interests; /* List of callback interests on the server */
- union {
- struct rcu_head rcu;
- afs_volid_t vid; /* Volume ID to match */
- };
- unsigned int usage;
-};
-
-/*
- * Interest by a superblock on a server.
- */
-struct afs_cb_interest {
- struct hlist_node cb_vlink; /* Link in vol_interest->cb_interests */
- struct afs_vol_interest *vol_interest;
- struct afs_server *server; /* Server on which this interest resides */
- struct super_block *sb; /* Superblock on which inodes reside */
- union {
- struct rcu_head rcu;
- afs_volid_t vid; /* Volume ID to match */
- };
- refcount_t usage;
-};
-
-/*
- * Replaceable server list.
+ * Replaceable volume server list.
*/
struct afs_server_entry {
struct afs_server *server;
- struct afs_cb_interest *cb_interest;
};
struct afs_server_list {
+ afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */
refcount_t usage;
unsigned char nr_servers;
unsigned char preferred; /* Preferred server */
@@ -593,11 +556,16 @@ struct afs_server_list {
* Live AFS volume management.
*/
struct afs_volume {
- afs_volid_t vid; /* volume ID */
+ union {
+ struct rcu_head rcu;
+ afs_volid_t vid; /* volume ID */
+ };
atomic_t usage;
time64_t update_at; /* Time at which to next update */
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
- struct list_head proc_link; /* Link in cell->vl_proc */
+ struct rb_node cell_node; /* Link in cell->volumes */
+ struct hlist_node proc_link; /* Link in cell->proc_volumes */
+ struct super_block __rcu *sb; /* Superblock on which inodes reside */
unsigned long flags;
#define AFS_VOLUME_NEEDS_UPDATE 0 /* - T if an update needs performing */
#define AFS_VOLUME_UPDATING 1 /* - T if an update is in progress */
@@ -605,10 +573,11 @@ struct afs_volume {
#define AFS_VOLUME_DELETED 3 /* - T if volume appears deleted */
#define AFS_VOLUME_OFFLINE 4 /* - T if volume offline notice given */
#define AFS_VOLUME_BUSY 5 /* - T if volume busy notice given */
+#define AFS_VOLUME_MAYBE_NO_IBULK 6 /* - T if some servers don't have InlineBulkStatus */
#ifdef CONFIG_AFS_FSCACHE
struct fscache_cookie *cache; /* caching cookie */
#endif
- struct afs_server_list *servers; /* List of servers on which volume resides */
+ struct afs_server_list __rcu *servers; /* List of servers on which volume resides */
rwlock_t servers_lock; /* Lock for ->servers */
unsigned int servers_seq; /* Incremented each time ->servers changes */
@@ -616,7 +585,6 @@ struct afs_volume {
rwlock_t cb_v_break_lock;
afs_voltype_t type; /* type of volume */
- short error;
char type_force; /* force volume type (suppress R/O -> R/W) */
u8 name_len;
u8 name[AFS_MAXVOLNAME + 1]; /* NUL-padded volume name */
@@ -677,11 +645,11 @@ struct afs_vnode {
afs_lock_type_t lock_type : 8;
/* outstanding callback notification on this file */
- struct afs_cb_interest __rcu *cb_interest; /* Server on which this resides */
+ void *cb_server; /* Server with callback/filelock */
unsigned int cb_s_break; /* Mass break counter on ->server */
unsigned int cb_v_break; /* Mass break counter on ->volume */
unsigned int cb_break; /* Break counter on vnode */
- seqlock_t cb_lock; /* Lock for ->cb_interest, ->status, ->cb_*break */
+ seqlock_t cb_lock; /* Lock for ->cb_server, ->status, ->cb_*break */
time64_t cb_expires_at; /* time at which callback expires */
};
@@ -758,29 +726,117 @@ struct afs_vl_cursor {
};
/*
- * Cursor for iterating over a set of fileservers.
+ * Fileserver operation methods.
+ */
+struct afs_operation_ops {
+ void (*issue_afs_rpc)(struct afs_operation *op);
+ void (*issue_yfs_rpc)(struct afs_operation *op);
+ void (*success)(struct afs_operation *op);
+ void (*aborted)(struct afs_operation *op);
+ void (*edit_dir)(struct afs_operation *op);
+ void (*put)(struct afs_operation *op);
+};
+
+struct afs_vnode_param {
+ struct afs_vnode *vnode;
+ struct afs_fid fid; /* Fid to access */
+ struct afs_status_cb scb; /* Returned status and callback promise */
+ afs_dataversion_t dv_before; /* Data version before the call */
+ unsigned int cb_break_before; /* cb_break + cb_s_break before the call */
+ u8 dv_delta; /* Expected change in data version */
+ bool put_vnode; /* T if we have a ref on the vnode */
+ bool need_io_lock; /* T if we need the I/O lock on this */
+};
+
+/*
+ * Fileserver operation wrapper, handling server and address rotation
+ * asynchronously. May make simultaneous calls to multiple servers.
*/
-struct afs_fs_cursor {
+struct afs_operation {
+ struct afs_net *net; /* Network namespace */
+ struct key *key; /* Key for the cell */
const struct afs_call_type *type; /* Type of call done */
+ const struct afs_operation_ops *ops;
+
+ /* Parameters/results for the operation */
+ struct afs_volume *volume; /* Volume being accessed */
+ struct afs_vnode_param file[2];
+ struct afs_vnode_param *more_files;
+ struct afs_volsync volsync;
+ struct dentry *dentry; /* Dentry to be altered */
+ struct dentry *dentry_2; /* Second dentry to be altered */
+ struct timespec64 mtime; /* Modification time to record */
+ short nr_files; /* Number of entries in file[], more_files */
+ short error;
+ unsigned int abort_code;
+ unsigned int debug_id;
+
+ unsigned int cb_v_break; /* Volume break counter before op */
+ unsigned int cb_s_break; /* Server break counter before op */
+
+ union {
+ struct {
+ int which; /* Which ->file[] to fetch for */
+ } fetch_status;
+ struct {
+ int reason; /* enum afs_edit_dir_reason */
+ mode_t mode;
+ const char *symlink;
+ } create;
+ struct {
+ bool need_rehash;
+ } unlink;
+ struct {
+ struct dentry *rehash;
+ struct dentry *tmp;
+ bool new_negative;
+ } rename;
+ struct {
+ struct afs_read *req;
+ } fetch;
+ struct {
+ afs_lock_type_t type;
+ } lock;
+ struct {
+ struct address_space *mapping; /* Pages being written from */
+ pgoff_t first; /* first page in mapping to deal with */
+ pgoff_t last; /* last page in mapping to deal with */
+ unsigned first_offset; /* offset into mapping[first] */
+ unsigned last_to; /* amount of mapping[last] */
+ } store;
+ struct {
+ struct iattr *attr;
+ } setattr;
+ struct afs_acl *acl;
+ struct yfs_acl *yacl;
+ struct {
+ struct afs_volume_status vs;
+ struct kstatfs *buf;
+ } volstatus;
+ };
+
+ /* Fileserver iteration state */
struct afs_addr_cursor ac;
- struct afs_vnode *vnode;
struct afs_server_list *server_list; /* Current server list (pins ref) */
- struct afs_cb_interest *cbi; /* Server on which this resides (pins ref) */
- struct key *key; /* Key for the server */
+ struct afs_server *server; /* Server we're using (ref pinned by server_list) */
+ struct afs_call *call;
unsigned long untried; /* Bitmask of untried servers */
- unsigned int cb_break; /* cb_break + cb_s_break before the call */
- unsigned int cb_break_2; /* cb_break + cb_s_break (2nd vnode) */
short index; /* Current server */
- short error;
- unsigned short flags;
-#define AFS_FS_CURSOR_STOP 0x0001 /* Set to cease iteration */
-#define AFS_FS_CURSOR_VBUSY 0x0002 /* Set if seen VBUSY */
-#define AFS_FS_CURSOR_VMOVED 0x0004 /* Set if seen VMOVED */
-#define AFS_FS_CURSOR_VNOVOL 0x0008 /* Set if seen VNOVOL */
-#define AFS_FS_CURSOR_CUR_ONLY 0x0010 /* Set if current server only (file lock held) */
-#define AFS_FS_CURSOR_NO_VSLEEP 0x0020 /* Set to prevent sleep on VBUSY, VOFFLINE, ... */
-#define AFS_FS_CURSOR_INTR 0x0040 /* Set if op is interruptible */
unsigned short nr_iterations; /* Number of server iterations */
+
+ unsigned int flags;
+#define AFS_OPERATION_STOP 0x0001 /* Set to cease iteration */
+#define AFS_OPERATION_VBUSY 0x0002 /* Set if seen VBUSY */
+#define AFS_OPERATION_VMOVED 0x0004 /* Set if seen VMOVED */
+#define AFS_OPERATION_VNOVOL 0x0008 /* Set if seen VNOVOL */
+#define AFS_OPERATION_CUR_ONLY 0x0010 /* Set if current server only (file lock held) */
+#define AFS_OPERATION_NO_VSLEEP 0x0020 /* Set to prevent sleep on VBUSY, VOFFLINE, ... */
+#define AFS_OPERATION_UNINTR 0x0040 /* Set if op is uninterruptible */
+#define AFS_OPERATION_DOWNGRADE 0x0080 /* Set to retry with downgraded opcode */
+#define AFS_OPERATION_LOCK_0 0x0100 /* Set if have io_lock on file[0] */
+#define AFS_OPERATION_LOCK_1 0x0200 /* Set if have io_lock on file[1] */
+#define AFS_OPERATION_TRIED_ALL 0x0400 /* Set if we've tried all the fileservers */
+#define AFS_OPERATION_RETRY_SERVER 0x0800 /* Set if we should retry the current server */
};
/*
@@ -838,29 +894,15 @@ extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break *);
-extern int afs_register_server_cb_interest(struct afs_vnode *,
- struct afs_server_list *, unsigned int);
-extern void afs_put_cb_interest(struct afs_net *, struct afs_cb_interest *);
-extern void afs_clear_callback_interests(struct afs_net *, struct afs_server_list *);
-
-static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest *cbi)
-{
- if (cbi)
- refcount_inc(&cbi->usage);
- return cbi;
-}
-
static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode)
{
return vnode->cb_break + vnode->cb_v_break;
}
static inline bool afs_cb_is_broken(unsigned int cb_break,
- const struct afs_vnode *vnode,
- const struct afs_cb_interest *cbi)
+ const struct afs_vnode *vnode)
{
- return !cbi || cb_break != (vnode->cb_break +
- vnode->volume->cb_v_break);
+ return cb_break != (vnode->cb_break + vnode->volume->cb_v_break);
}
/*
@@ -952,72 +994,81 @@ extern int afs_flock(struct file *, int, struct file_lock *);
/*
* fsclient.c
*/
-extern int afs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_status_cb *,
- struct afs_volsync *);
-extern int afs_fs_give_up_callbacks(struct afs_net *, struct afs_server *);
-extern int afs_fs_fetch_data(struct afs_fs_cursor *, struct afs_status_cb *, struct afs_read *);
-extern int afs_fs_create(struct afs_fs_cursor *, const char *, umode_t,
- struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *);
-extern int afs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool,
- struct afs_status_cb *);
-extern int afs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *,
- struct afs_status_cb *, struct afs_status_cb *);
-extern int afs_fs_symlink(struct afs_fs_cursor *, const char *, const char *,
- struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *);
-extern int afs_fs_rename(struct afs_fs_cursor *, const char *,
- struct afs_vnode *, const char *,
- struct afs_status_cb *, struct afs_status_cb *);
-extern int afs_fs_store_data(struct afs_fs_cursor *, struct address_space *,
- pgoff_t, pgoff_t, unsigned, unsigned, struct afs_status_cb *);
-extern int afs_fs_setattr(struct afs_fs_cursor *, struct iattr *, struct afs_status_cb *);
-extern int afs_fs_get_volume_status(struct afs_fs_cursor *, struct afs_volume_status *);
-extern int afs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t, struct afs_status_cb *);
-extern int afs_fs_extend_lock(struct afs_fs_cursor *, struct afs_status_cb *);
-extern int afs_fs_release_lock(struct afs_fs_cursor *, struct afs_status_cb *);
+extern void afs_fs_fetch_status(struct afs_operation *);
+extern void afs_fs_fetch_data(struct afs_operation *);
+extern void afs_fs_create_file(struct afs_operation *);
+extern void afs_fs_make_dir(struct afs_operation *);
+extern void afs_fs_remove_file(struct afs_operation *);
+extern void afs_fs_remove_dir(struct afs_operation *);
+extern void afs_fs_link(struct afs_operation *);
+extern void afs_fs_symlink(struct afs_operation *);
+extern void afs_fs_rename(struct afs_operation *);
+extern void afs_fs_store_data(struct afs_operation *);
+extern void afs_fs_setattr(struct afs_operation *);
+extern void afs_fs_get_volume_status(struct afs_operation *);
+extern void afs_fs_set_lock(struct afs_operation *);
+extern void afs_fs_extend_lock(struct afs_operation *);
+extern void afs_fs_release_lock(struct afs_operation *);
extern int afs_fs_give_up_all_callbacks(struct afs_net *, struct afs_server *,
struct afs_addr_cursor *, struct key *);
-extern struct afs_call *afs_fs_get_capabilities(struct afs_net *, struct afs_server *,
- struct afs_addr_cursor *, struct key *,
- unsigned int);
-extern int afs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_status_cb *,
- unsigned int, struct afs_volsync *);
-extern int afs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_status_cb *,
- struct afs_volsync *);
+extern bool afs_fs_get_capabilities(struct afs_net *, struct afs_server *,
+ struct afs_addr_cursor *, struct key *);
+extern void afs_fs_inline_bulk_status(struct afs_operation *);
struct afs_acl {
u32 size;
u8 data[];
};
-extern struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *, struct afs_status_cb *);
-extern int afs_fs_store_acl(struct afs_fs_cursor *, const struct afs_acl *,
- struct afs_status_cb *);
+extern void afs_fs_fetch_acl(struct afs_operation *);
+extern void afs_fs_store_acl(struct afs_operation *);
+
+/*
+ * fs_operation.c
+ */
+extern struct afs_operation *afs_alloc_operation(struct key *, struct afs_volume *);
+extern int afs_put_operation(struct afs_operation *);
+extern bool afs_begin_vnode_operation(struct afs_operation *);
+extern void afs_wait_for_operation(struct afs_operation *);
+extern int afs_do_sync_operation(struct afs_operation *);
+
+static inline void afs_op_nomem(struct afs_operation *op)
+{
+ op->error = -ENOMEM;
+}
+
+static inline void afs_op_set_vnode(struct afs_operation *op, unsigned int n,
+ struct afs_vnode *vnode)
+{
+ op->file[n].vnode = vnode;
+ op->file[n].need_io_lock = true;
+}
+
+static inline void afs_op_set_fid(struct afs_operation *op, unsigned int n,
+ const struct afs_fid *fid)
+{
+ op->file[n].fid = *fid;
+}
/*
* fs_probe.c
*/
extern void afs_fileserver_probe_result(struct afs_call *);
-extern int afs_probe_fileservers(struct afs_net *, struct key *, struct afs_server_list *);
+extern void afs_fs_probe_fileserver(struct afs_net *, struct afs_server *, struct key *, bool);
extern int afs_wait_for_fs_probes(struct afs_server_list *, unsigned long);
+extern void afs_probe_fileserver(struct afs_net *, struct afs_server *);
+extern void afs_fs_probe_dispatcher(struct work_struct *);
+extern int afs_wait_for_one_fs_probe(struct afs_server *, bool);
/*
* inode.c
*/
-extern void afs_vnode_commit_status(struct afs_fs_cursor *,
- struct afs_vnode *,
- unsigned int,
- const afs_dataversion_t *,
- struct afs_status_cb *);
+extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *);
extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *);
-extern int afs_iget5_test(struct inode *, void *);
+extern int afs_ilookup5_test_by_fid(struct inode *, void *);
extern struct inode *afs_iget_pseudo_dir(struct super_block *, bool);
-extern struct inode *afs_iget(struct super_block *, struct key *,
- struct afs_iget_data *, struct afs_status_cb *,
- struct afs_cb_interest *,
- struct afs_vnode *);
-extern void afs_zap_data(struct afs_vnode *);
+extern struct inode *afs_iget(struct afs_operation *, struct afs_vnode_param *);
+extern struct inode *afs_root_iget(struct super_block *, struct key *);
extern bool afs_check_validity(struct afs_vnode *);
extern int afs_validate(struct afs_vnode *, struct key *);
extern int afs_getattr(const struct path *, struct kstat *, u32, unsigned int);
@@ -1104,11 +1155,8 @@ static inline void afs_put_sysnames(struct afs_sysnames *sysnames) {}
/*
* rotate.c
*/
-extern bool afs_begin_vnode_operation(struct afs_fs_cursor *, struct afs_vnode *,
- struct key *, bool);
-extern bool afs_select_fileserver(struct afs_fs_cursor *);
-extern bool afs_select_current_fileserver(struct afs_fs_cursor *);
-extern int afs_end_vnode_operation(struct afs_fs_cursor *);
+extern bool afs_select_fileserver(struct afs_operation *);
+extern void afs_dump_edestaddrreq(const struct afs_operation *);
/*
* rxrpc.c
@@ -1128,12 +1176,17 @@ extern void afs_flat_call_destructor(struct afs_call *);
extern void afs_send_empty_reply(struct afs_call *);
extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
extern int afs_extract_data(struct afs_call *, bool);
-extern int afs_protocol_error(struct afs_call *, int, enum afs_eproto_cause);
+extern int afs_protocol_error(struct afs_call *, enum afs_eproto_cause);
-static inline void afs_set_fc_call(struct afs_call *call, struct afs_fs_cursor *fc)
+static inline void afs_make_op_call(struct afs_operation *op, struct afs_call *call,
+ gfp_t gfp)
{
- call->intr = fc->flags & AFS_FS_CURSOR_INTR;
- fc->type = call->type;
+ op->call = call;
+ op->type = call->type;
+ call->op = op;
+ call->key = op->key;
+ call->intr = !(op->flags & AFS_OPERATION_UNINTR);
+ afs_make_call(&op->ac, call, gfp);
}
static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t size)
@@ -1241,13 +1294,33 @@ extern spinlock_t afs_server_peer_lock;
extern struct afs_server *afs_find_server(struct afs_net *,
const struct sockaddr_rxrpc *);
extern struct afs_server *afs_find_server_by_uuid(struct afs_net *, const uuid_t *);
-extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *);
+extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *, u32);
extern struct afs_server *afs_get_server(struct afs_server *, enum afs_server_trace);
+extern struct afs_server *afs_use_server(struct afs_server *, enum afs_server_trace);
+extern void afs_unuse_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
+extern void afs_unuse_server_notime(struct afs_net *, struct afs_server *, enum afs_server_trace);
extern void afs_put_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
extern void afs_manage_servers(struct work_struct *);
extern void afs_servers_timer(struct timer_list *);
+extern void afs_fs_probe_timer(struct timer_list *);
extern void __net_exit afs_purge_servers(struct afs_net *);
-extern bool afs_check_server_record(struct afs_fs_cursor *, struct afs_server *);
+extern bool afs_check_server_record(struct afs_operation *, struct afs_server *);
+
+static inline void afs_inc_servers_outstanding(struct afs_net *net)
+{
+ atomic_inc(&net->servers_outstanding);
+}
+
+static inline void afs_dec_servers_outstanding(struct afs_net *net)
+{
+ if (atomic_dec_and_test(&net->servers_outstanding))
+ wake_up_var(&net->servers_outstanding);
+}
+
+static inline bool afs_is_probing_server(struct afs_server *server)
+{
+ return list_empty(&server->probe_link);
+}
/*
* server_list.c
@@ -1279,6 +1352,12 @@ extern struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *, const uu
extern struct afs_call *afs_vl_get_capabilities(struct afs_net *, struct afs_addr_cursor *,
struct key *, struct afs_vlserver *, unsigned int);
extern struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *, const uuid_t *);
+extern char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *);
+
+/*
+ * vl_alias.c
+ */
+extern int afs_cell_detect_alias(struct afs_cell *, struct key *);
/*
* vl_probe.c
@@ -1322,18 +1401,12 @@ extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *,
/*
* volume.c
*/
-static inline struct afs_volume *__afs_get_volume(struct afs_volume *volume)
-{
- if (volume)
- atomic_inc(&volume->usage);
- return volume;
-}
-
extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
extern void afs_activate_volume(struct afs_volume *);
extern void afs_deactivate_volume(struct afs_volume *);
-extern void afs_put_volume(struct afs_cell *, struct afs_volume *);
-extern int afs_check_volume_status(struct afs_volume *, struct afs_fs_cursor *);
+extern struct afs_volume *afs_get_volume(struct afs_volume *, enum afs_volume_trace);
+extern void afs_put_volume(struct afs_net *, struct afs_volume *, enum afs_volume_trace);
+extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
/*
* write.c
@@ -1362,36 +1435,24 @@ extern ssize_t afs_listxattr(struct dentry *, char *, size_t);
/*
* yfsclient.c
*/
-extern int yfs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_status_cb *,
- struct afs_volsync *);
-extern int yfs_fs_fetch_data(struct afs_fs_cursor *, struct afs_status_cb *, struct afs_read *);
-extern int yfs_fs_create_file(struct afs_fs_cursor *, const char *, umode_t, struct afs_status_cb *,
- struct afs_fid *, struct afs_status_cb *);
-extern int yfs_fs_make_dir(struct afs_fs_cursor *, const char *, umode_t, struct afs_status_cb *,
- struct afs_fid *, struct afs_status_cb *);
-extern int yfs_fs_remove_file2(struct afs_fs_cursor *, struct afs_vnode *, const char *,
- struct afs_status_cb *, struct afs_status_cb *);
-extern int yfs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool,
- struct afs_status_cb *);
-extern int yfs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *,
- struct afs_status_cb *, struct afs_status_cb *);
-extern int yfs_fs_symlink(struct afs_fs_cursor *, const char *, const char *,
- struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *);
-extern int yfs_fs_rename(struct afs_fs_cursor *, const char *, struct afs_vnode *, const char *,
- struct afs_status_cb *, struct afs_status_cb *);
-extern int yfs_fs_store_data(struct afs_fs_cursor *, struct address_space *,
- pgoff_t, pgoff_t, unsigned, unsigned, struct afs_status_cb *);
-extern int yfs_fs_setattr(struct afs_fs_cursor *, struct iattr *, struct afs_status_cb *);
-extern int yfs_fs_get_volume_status(struct afs_fs_cursor *, struct afs_volume_status *);
-extern int yfs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t, struct afs_status_cb *);
-extern int yfs_fs_extend_lock(struct afs_fs_cursor *, struct afs_status_cb *);
-extern int yfs_fs_release_lock(struct afs_fs_cursor *, struct afs_status_cb *);
-extern int yfs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_status_cb *,
- struct afs_volsync *);
-extern int yfs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_status_cb *,
- unsigned int, struct afs_volsync *);
+extern void yfs_fs_fetch_file_status(struct afs_operation *);
+extern void yfs_fs_fetch_data(struct afs_operation *);
+extern void yfs_fs_create_file(struct afs_operation *);
+extern void yfs_fs_make_dir(struct afs_operation *);
+extern void yfs_fs_remove_file2(struct afs_operation *);
+extern void yfs_fs_remove_file(struct afs_operation *);
+extern void yfs_fs_remove_dir(struct afs_operation *);
+extern void yfs_fs_link(struct afs_operation *);
+extern void yfs_fs_symlink(struct afs_operation *);
+extern void yfs_fs_rename(struct afs_operation *);
+extern void yfs_fs_store_data(struct afs_operation *);
+extern void yfs_fs_setattr(struct afs_operation *);
+extern void yfs_fs_get_volume_status(struct afs_operation *);
+extern void yfs_fs_set_lock(struct afs_operation *);
+extern void yfs_fs_extend_lock(struct afs_operation *);
+extern void yfs_fs_release_lock(struct afs_operation *);
+extern void yfs_fs_fetch_status(struct afs_operation *);
+extern void yfs_fs_inline_bulk_status(struct afs_operation *);
struct yfs_acl {
struct afs_acl *acl; /* Dir/file/symlink ACL */
@@ -1404,10 +1465,8 @@ struct yfs_acl {
};
extern void yfs_free_opaque_acl(struct yfs_acl *);
-extern struct yfs_acl *yfs_fs_fetch_opaque_acl(struct afs_fs_cursor *, struct yfs_acl *,
- struct afs_status_cb *);
-extern int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *, const struct afs_acl *,
- struct afs_status_cb *);
+extern void yfs_fs_fetch_opaque_acl(struct afs_operation *);
+extern void yfs_fs_store_opaque_acl2(struct afs_operation *);
/*
* Miscellaneous inline functions.
@@ -1422,15 +1481,29 @@ static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
return &vnode->vfs_inode;
}
-static inline void afs_check_for_remote_deletion(struct afs_fs_cursor *fc,
+static inline void afs_check_for_remote_deletion(struct afs_operation *op,
struct afs_vnode *vnode)
{
- if (fc->ac.error == -ENOENT) {
+ if (op->error == -ENOENT) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
afs_break_callback(vnode, afs_cb_break_for_deleted);
}
}
+/*
+ * Note that a dentry got changed. We need to set d_fsdata to the data version
+ * number derived from the result of the operation. It doesn't matter if
+ * d_fsdata goes backwards as we'll just revalidate.
+ */
+static inline void afs_update_dentry_version(struct afs_operation *op,
+ struct afs_vnode_param *dir_vp,
+ struct dentry *dentry)
+{
+ if (!op->error)
+ dentry->d_fsdata =
+ (void *)(unsigned long)dir_vp->scb.status.data_version;
+}
+
static inline int afs_io_error(struct afs_call *call, enum afs_io_error where)
{
trace_afs_io_error(call->debug_id, -EIO, where);
diff --git a/fs/afs/main.c b/fs/afs/main.c
index c9c45d7078bd..9c79c91e8005 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -82,12 +82,14 @@ static int __net_init afs_net_init(struct net *net_ns)
INIT_WORK(&net->cells_manager, afs_manage_cells);
timer_setup(&net->cells_timer, afs_cells_timer, 0);
+ mutex_init(&net->cells_alias_lock);
mutex_init(&net->proc_cells_lock);
INIT_HLIST_HEAD(&net->proc_cells);
seqlock_init(&net->fs_lock);
net->fs_servers = RB_ROOT;
- INIT_LIST_HEAD(&net->fs_updates);
+ INIT_LIST_HEAD(&net->fs_probe_fast);
+ INIT_LIST_HEAD(&net->fs_probe_slow);
INIT_HLIST_HEAD(&net->fs_proc);
INIT_HLIST_HEAD(&net->fs_addresses4);
@@ -96,6 +98,8 @@ static int __net_init afs_net_init(struct net *net_ns)
INIT_WORK(&net->fs_manager, afs_manage_servers);
timer_setup(&net->fs_timer, afs_servers_timer, 0);
+ INIT_WORK(&net->fs_prober, afs_fs_probe_dispatcher);
+ timer_setup(&net->fs_probe_timer, afs_fs_probe_timer, 0);
ret = -ENOMEM;
sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL);
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 468e1713bce1..e817fc740ba0 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -38,7 +38,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
if (v == SEQ_START_TOKEN) {
/* display header on line 1 */
- seq_puts(m, "USE TTL SV NAME\n");
+ seq_puts(m, "USE TTL SV ST NAME\n");
return 0;
}
@@ -46,10 +46,11 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
vllist = rcu_dereference(cell->vl_servers);
/* display one cell per line on subsequent lines */
- seq_printf(m, "%3u %6lld %2u %s\n",
+ seq_printf(m, "%3u %6lld %2u %2u %s\n",
atomic_read(&cell->usage),
cell->dns_expiry - ktime_get_real_seconds(),
vllist->nr_servers,
+ cell->state,
cell->name);
return 0;
}
@@ -208,11 +209,10 @@ static const char afs_vol_types[3][3] = {
*/
static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
{
- struct afs_cell *cell = PDE_DATA(file_inode(m->file));
- struct afs_volume *vol = list_entry(v, struct afs_volume, proc_link);
+ struct afs_volume *vol = hlist_entry(v, struct afs_volume, proc_link);
/* Display header on line 1 */
- if (v == &cell->proc_volumes) {
+ if (v == SEQ_START_TOKEN) {
seq_puts(m, "USE VID TY NAME\n");
return 0;
}
@@ -230,8 +230,8 @@ static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
{
struct afs_cell *cell = PDE_DATA(file_inode(m->file));
- read_lock(&cell->proc_lock);
- return seq_list_start_head(&cell->proc_volumes, *_pos);
+ rcu_read_lock();
+ return seq_hlist_start_head_rcu(&cell->proc_volumes, *_pos);
}
static void *afs_proc_cell_volumes_next(struct seq_file *m, void *v,
@@ -239,15 +239,13 @@ static void *afs_proc_cell_volumes_next(struct seq_file *m, void *v,
{
struct afs_cell *cell = PDE_DATA(file_inode(m->file));
- return seq_list_next(v, &cell->proc_volumes, _pos);
+ return seq_hlist_next_rcu(v, &cell->proc_volumes, _pos);
}
static void afs_proc_cell_volumes_stop(struct seq_file *m, void *v)
__releases(cell->proc_lock)
{
- struct afs_cell *cell = PDE_DATA(file_inode(m->file));
-
- read_unlock(&cell->proc_lock);
+ rcu_read_unlock();
}
static const struct seq_operations afs_proc_cell_volumes_ops = {
@@ -378,20 +376,26 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
int i;
if (v == SEQ_START_TOKEN) {
- seq_puts(m, "UUID USE ADDR\n");
+ seq_puts(m, "UUID REF ACT\n");
return 0;
}
server = list_entry(v, struct afs_server, proc_link);
alist = rcu_dereference(server->addresses);
- seq_printf(m, "%pU %3d %pISpc%s\n",
+ seq_printf(m, "%pU %3d %3d\n",
&server->uuid,
- atomic_read(&server->usage),
- &alist->addrs[0].transport,
- alist->preferred == 0 ? "*" : "");
- for (i = 1; i < alist->nr_addrs; i++)
- seq_printf(m, " %pISpc%s\n",
- &alist->addrs[i].transport,
+ atomic_read(&server->ref),
+ atomic_read(&server->active));
+ seq_printf(m, " - info: fl=%lx rtt=%u brk=%x\n",
+ server->flags, server->rtt, server->cb_s_break);
+ seq_printf(m, " - probe: last=%d out=%d\n",
+ (int)(jiffies - server->probed_at) / HZ,
+ atomic_read(&server->probe_outstanding));
+ seq_printf(m, " - ALIST v=%u rsp=%lx f=%lx\n",
+ alist->version, alist->responded, alist->failed);
+ for (i = 0; i < alist->nr_addrs; i++)
+ seq_printf(m, " [%x] %pISpc%s\n",
+ i, &alist->addrs[i].transport,
alist->preferred == i ? "*" : "");
return 0;
}
@@ -563,6 +567,7 @@ void afs_put_sysnames(struct afs_sysnames *sysnames)
if (sysnames->subs[i] != afs_init_sysname &&
sysnames->subs[i] != sysnames->blank)
kfree(sysnames->subs[i]);
+ kfree(sysnames);
}
}
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
index 32be9c698348..b5bd03b1d3c7 100644
--- a/fs/afs/protocol_yfs.h
+++ b/fs/afs/protocol_yfs.h
@@ -8,7 +8,7 @@
#define YFS_FS_SERVICE 2500
#define YFS_CM_SERVICE 2501
-#define YFSCBMAX 1024
+#define YFSCBMAX 1024
enum YFS_CM_Operations {
YFSCBProbe = 206, /* probe client */
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index 2a3305e42b14..6a0935cb822f 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -15,60 +15,32 @@
#include "afs_fs.h"
/*
- * Begin an operation on the fileserver.
- *
- * Fileserver operations are serialised on the server by vnode, so we serialise
- * them here also using the io_lock.
- */
-bool afs_begin_vnode_operation(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- struct key *key, bool intr)
-{
- memset(fc, 0, sizeof(*fc));
- fc->vnode = vnode;
- fc->key = key;
- fc->ac.error = SHRT_MAX;
- fc->error = -EDESTADDRREQ;
-
- if (intr) {
- fc->flags |= AFS_FS_CURSOR_INTR;
- if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
- fc->error = -EINTR;
- fc->flags |= AFS_FS_CURSOR_STOP;
- return false;
- }
- } else {
- mutex_lock(&vnode->io_lock);
- }
-
- if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
- fc->flags |= AFS_FS_CURSOR_CUR_ONLY;
- return true;
-}
-
-/*
* Begin iteration through a server list, starting with the vnode's last used
* server if possible, or the last recorded good server if not.
*/
-static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
+static bool afs_start_fs_iteration(struct afs_operation *op,
struct afs_vnode *vnode)
{
- struct afs_cb_interest *cbi;
+ struct afs_server *server;
+ void *cb_server;
int i;
- read_lock(&vnode->volume->servers_lock);
- fc->server_list = afs_get_serverlist(vnode->volume->servers);
- read_unlock(&vnode->volume->servers_lock);
+ read_lock(&op->volume->servers_lock);
+ op->server_list = afs_get_serverlist(
+ rcu_dereference_protected(op->volume->servers,
+ lockdep_is_held(&op->volume->servers_lock)));
+ read_unlock(&op->volume->servers_lock);
- fc->untried = (1UL << fc->server_list->nr_servers) - 1;
- fc->index = READ_ONCE(fc->server_list->preferred);
+ op->untried = (1UL << op->server_list->nr_servers) - 1;
+ op->index = READ_ONCE(op->server_list->preferred);
- cbi = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->io_lock));
- if (cbi) {
+ cb_server = vnode->cb_server;
+ if (cb_server) {
/* See if the vnode's preferred record is still available */
- for (i = 0; i < fc->server_list->nr_servers; i++) {
- if (fc->server_list->servers[i].cb_interest == cbi) {
- fc->index = i;
+ for (i = 0; i < op->server_list->nr_servers; i++) {
+ server = op->server_list->servers[i].server;
+ if (server == cb_server) {
+ op->index = i;
goto found_interest;
}
}
@@ -77,21 +49,18 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
* serving this vnode, then we can't switch to another server
* and have to return an error.
*/
- if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) {
- fc->error = -ESTALE;
+ if (op->flags & AFS_OPERATION_CUR_ONLY) {
+ op->error = -ESTALE;
return false;
}
/* Note that the callback promise is effectively broken */
write_seqlock(&vnode->cb_lock);
- ASSERTCMP(cbi, ==, rcu_access_pointer(vnode->cb_interest));
- rcu_assign_pointer(vnode->cb_interest, NULL);
+ ASSERTCMP(cb_server, ==, vnode->cb_server);
+ vnode->cb_server = NULL;
if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags))
vnode->cb_break++;
write_sequnlock(&vnode->cb_lock);
-
- afs_put_cb_interest(afs_v2net(vnode), cbi);
- cbi = NULL;
}
found_interest:
@@ -118,12 +87,12 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code)
/*
* Sleep and retry the operation to the same fileserver.
*/
-static bool afs_sleep_and_retry(struct afs_fs_cursor *fc)
+static bool afs_sleep_and_retry(struct afs_operation *op)
{
- if (fc->flags & AFS_FS_CURSOR_INTR) {
+ if (!(op->flags & AFS_OPERATION_UNINTR)) {
msleep_interruptible(1000);
if (signal_pending(current)) {
- fc->error = -ERESTARTSYS;
+ op->error = -ERESTARTSYS;
return false;
}
} else {
@@ -137,26 +106,26 @@ static bool afs_sleep_and_retry(struct afs_fs_cursor *fc)
* Select the fileserver to use. May be called multiple times to rotate
* through the fileservers.
*/
-bool afs_select_fileserver(struct afs_fs_cursor *fc)
+bool afs_select_fileserver(struct afs_operation *op)
{
struct afs_addr_list *alist;
struct afs_server *server;
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode *vnode = op->file[0].vnode;
struct afs_error e;
u32 rtt;
- int error = fc->ac.error, i;
+ int error = op->ac.error, i;
_enter("%lx[%d],%lx[%d],%d,%d",
- fc->untried, fc->index,
- fc->ac.tried, fc->ac.index,
- error, fc->ac.abort_code);
+ op->untried, op->index,
+ op->ac.tried, op->ac.index,
+ error, op->ac.abort_code);
- if (fc->flags & AFS_FS_CURSOR_STOP) {
+ if (op->flags & AFS_OPERATION_STOP) {
_leave(" = f [stopped]");
return false;
}
- fc->nr_iterations++;
+ op->nr_iterations++;
/* Evaluate the result of the previous operation, if there was one. */
switch (error) {
@@ -166,8 +135,8 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
case 0:
default:
/* Success or local failure. Stop. */
- fc->error = error;
- fc->flags |= AFS_FS_CURSOR_STOP;
+ op->error = error;
+ op->flags |= AFS_OPERATION_STOP;
_leave(" = f [okay/local %d]", error);
return false;
@@ -175,42 +144,42 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
/* The far side rejected the operation on some grounds. This
* might involve the server being busy or the volume having been moved.
*/
- switch (fc->ac.abort_code) {
+ switch (op->ac.abort_code) {
case VNOVOL:
/* This fileserver doesn't know about the volume.
* - May indicate that the VL is wrong - retry once and compare
* the results.
* - May indicate that the fileserver couldn't attach to the vol.
*/
- if (fc->flags & AFS_FS_CURSOR_VNOVOL) {
- fc->error = -EREMOTEIO;
+ if (op->flags & AFS_OPERATION_VNOVOL) {
+ op->error = -EREMOTEIO;
goto next_server;
}
- write_lock(&vnode->volume->servers_lock);
- fc->server_list->vnovol_mask |= 1 << fc->index;
- write_unlock(&vnode->volume->servers_lock);
+ write_lock(&op->volume->servers_lock);
+ op->server_list->vnovol_mask |= 1 << op->index;
+ write_unlock(&op->volume->servers_lock);
- set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
- error = afs_check_volume_status(vnode->volume, fc);
+ set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags);
+ error = afs_check_volume_status(op->volume, op);
if (error < 0)
goto failed_set_error;
- if (test_bit(AFS_VOLUME_DELETED, &vnode->volume->flags)) {
- fc->error = -ENOMEDIUM;
+ if (test_bit(AFS_VOLUME_DELETED, &op->volume->flags)) {
+ op->error = -ENOMEDIUM;
goto failed;
}
/* If the server list didn't change, then assume that
* it's the fileserver having trouble.
*/
- if (vnode->volume->servers == fc->server_list) {
- fc->error = -EREMOTEIO;
+ if (rcu_access_pointer(op->volume->servers) == op->server_list) {
+ op->error = -EREMOTEIO;
goto next_server;
}
/* Try again */
- fc->flags |= AFS_FS_CURSOR_VNOVOL;
+ op->flags |= AFS_OPERATION_VNOVOL;
_leave(" = t [vnovol]");
return true;
@@ -220,20 +189,20 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
case VONLINE:
case VDISKFULL:
case VOVERQUOTA:
- fc->error = afs_abort_to_error(fc->ac.abort_code);
+ op->error = afs_abort_to_error(op->ac.abort_code);
goto next_server;
case VOFFLINE:
- if (!test_and_set_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags)) {
- afs_busy(vnode->volume, fc->ac.abort_code);
- clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags);
+ if (!test_and_set_bit(AFS_VOLUME_OFFLINE, &op->volume->flags)) {
+ afs_busy(op->volume, op->ac.abort_code);
+ clear_bit(AFS_VOLUME_BUSY, &op->volume->flags);
}
- if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) {
- fc->error = -EADV;
+ if (op->flags & AFS_OPERATION_NO_VSLEEP) {
+ op->error = -EADV;
goto failed;
}
- if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) {
- fc->error = -ESTALE;
+ if (op->flags & AFS_OPERATION_CUR_ONLY) {
+ op->error = -ESTALE;
goto failed;
}
goto busy;
@@ -244,17 +213,17 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
/* Retry after going round all the servers unless we
* have a file lock we need to maintain.
*/
- if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) {
- fc->error = -EBUSY;
+ if (op->flags & AFS_OPERATION_NO_VSLEEP) {
+ op->error = -EBUSY;
goto failed;
}
- if (!test_and_set_bit(AFS_VOLUME_BUSY, &vnode->volume->flags)) {
- afs_busy(vnode->volume, fc->ac.abort_code);
- clear_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags);
+ if (!test_and_set_bit(AFS_VOLUME_BUSY, &op->volume->flags)) {
+ afs_busy(op->volume, op->ac.abort_code);
+ clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags);
}
busy:
- if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) {
- if (!afs_sleep_and_retry(fc))
+ if (op->flags & AFS_OPERATION_CUR_ONLY) {
+ if (!afs_sleep_and_retry(op))
goto failed;
/* Retry with same server & address */
@@ -262,7 +231,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
return true;
}
- fc->flags |= AFS_FS_CURSOR_VBUSY;
+ op->flags |= AFS_OPERATION_VBUSY;
goto next_server;
case VMOVED:
@@ -273,15 +242,15 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
* We also limit the number of VMOVED hops we will
* honour, just in case someone sets up a loop.
*/
- if (fc->flags & AFS_FS_CURSOR_VMOVED) {
- fc->error = -EREMOTEIO;
+ if (op->flags & AFS_OPERATION_VMOVED) {
+ op->error = -EREMOTEIO;
goto failed;
}
- fc->flags |= AFS_FS_CURSOR_VMOVED;
+ op->flags |= AFS_OPERATION_VMOVED;
- set_bit(AFS_VOLUME_WAIT, &vnode->volume->flags);
- set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
- error = afs_check_volume_status(vnode->volume, fc);
+ set_bit(AFS_VOLUME_WAIT, &op->volume->flags);
+ set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags);
+ error = afs_check_volume_status(op->volume, op);
if (error < 0)
goto failed_set_error;
@@ -294,23 +263,23 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
*
* TODO: Retry a few times with sleeps.
*/
- if (vnode->volume->servers == fc->server_list) {
- fc->error = -ENOMEDIUM;
+ if (rcu_access_pointer(op->volume->servers) == op->server_list) {
+ op->error = -ENOMEDIUM;
goto failed;
}
goto restart_from_beginning;
default:
- clear_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags);
- clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags);
- fc->error = afs_abort_to_error(fc->ac.abort_code);
+ clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags);
+ clear_bit(AFS_VOLUME_BUSY, &op->volume->flags);
+ op->error = afs_abort_to_error(op->ac.abort_code);
goto failed;
}
case -ETIMEDOUT:
case -ETIME:
- if (fc->error != -EDESTADDRREQ)
+ if (op->error != -EDESTADDRREQ)
goto iterate_address;
/* Fall through */
case -ERFKILL:
@@ -320,103 +289,94 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
case -EHOSTDOWN:
case -ECONNREFUSED:
_debug("no conn");
- fc->error = error;
+ op->error = error;
goto iterate_address;
case -ECONNRESET:
_debug("call reset");
- fc->error = error;
+ op->error = error;
goto failed;
}
restart_from_beginning:
_debug("restart");
- afs_end_cursor(&fc->ac);
- afs_put_cb_interest(afs_v2net(vnode), fc->cbi);
- fc->cbi = NULL;
- afs_put_serverlist(afs_v2net(vnode), fc->server_list);
- fc->server_list = NULL;
+ afs_end_cursor(&op->ac);
+ op->server = NULL;
+ afs_put_serverlist(op->net, op->server_list);
+ op->server_list = NULL;
start:
_debug("start");
/* See if we need to do an update of the volume record. Note that the
* volume may have moved or even have been deleted.
*/
- error = afs_check_volume_status(vnode->volume, fc);
+ error = afs_check_volume_status(op->volume, op);
if (error < 0)
goto failed_set_error;
- if (!afs_start_fs_iteration(fc, vnode))
+ if (!afs_start_fs_iteration(op, vnode))
goto failed;
- _debug("__ VOL %llx __", vnode->volume->vid);
- error = afs_probe_fileservers(afs_v2net(vnode), fc->key, fc->server_list);
- if (error < 0)
- goto failed_set_error;
+ _debug("__ VOL %llx __", op->volume->vid);
pick_server:
- _debug("pick [%lx]", fc->untried);
+ _debug("pick [%lx]", op->untried);
- error = afs_wait_for_fs_probes(fc->server_list, fc->untried);
+ error = afs_wait_for_fs_probes(op->server_list, op->untried);
if (error < 0)
goto failed_set_error;
/* Pick the untried server with the lowest RTT. If we have outstanding
* callbacks, we stick with the server we're already using if we can.
*/
- if (fc->cbi) {
- _debug("cbi %u", fc->index);
- if (test_bit(fc->index, &fc->untried))
+ if (op->server) {
+ _debug("server %u", op->index);
+ if (test_bit(op->index, &op->untried))
goto selected_server;
- afs_put_cb_interest(afs_v2net(vnode), fc->cbi);
- fc->cbi = NULL;
- _debug("nocbi");
+ op->server = NULL;
+ _debug("no server");
}
- fc->index = -1;
+ op->index = -1;
rtt = U32_MAX;
- for (i = 0; i < fc->server_list->nr_servers; i++) {
- struct afs_server *s = fc->server_list->servers[i].server;
+ for (i = 0; i < op->server_list->nr_servers; i++) {
+ struct afs_server *s = op->server_list->servers[i].server;
- if (!test_bit(i, &fc->untried) || !s->probe.responded)
+ if (!test_bit(i, &op->untried) ||
+ !test_bit(AFS_SERVER_FL_RESPONDING, &s->flags))
continue;
if (s->probe.rtt < rtt) {
- fc->index = i;
+ op->index = i;
rtt = s->probe.rtt;
}
}
- if (fc->index == -1)
+ if (op->index == -1)
goto no_more_servers;
selected_server:
- _debug("use %d", fc->index);
- __clear_bit(fc->index, &fc->untried);
+ _debug("use %d", op->index);
+ __clear_bit(op->index, &op->untried);
/* We're starting on a different fileserver from the list. We need to
* check it, create a callback intercept, find its address list and
* probe its capabilities before we use it.
*/
- ASSERTCMP(fc->ac.alist, ==, NULL);
- server = fc->server_list->servers[fc->index].server;
+ ASSERTCMP(op->ac.alist, ==, NULL);
+ server = op->server_list->servers[op->index].server;
- if (!afs_check_server_record(fc, server))
+ if (!afs_check_server_record(op, server))
goto failed;
_debug("USING SERVER: %pU", &server->uuid);
- /* Make sure we've got a callback interest record for this server. We
- * have to link it in before we send the request as we can be sent a
- * break request before we've finished decoding the reply and
- * installing the vnode.
- */
- error = afs_register_server_cb_interest(vnode, fc->server_list,
- fc->index);
- if (error < 0)
- goto failed_set_error;
-
- fc->cbi = afs_get_cb_interest(
- rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->io_lock)));
+ op->flags |= AFS_OPERATION_RETRY_SERVER;
+ op->server = server;
+ if (vnode->cb_server != server) {
+ vnode->cb_server = server;
+ vnode->cb_s_break = server->cb_s_break;
+ vnode->cb_v_break = vnode->volume->cb_v_break;
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ }
read_lock(&server->fs_lock);
alist = rcu_dereference_protected(server->addresses,
@@ -424,44 +384,68 @@ selected_server:
afs_get_addrlist(alist);
read_unlock(&server->fs_lock);
- memset(&fc->ac, 0, sizeof(fc->ac));
+retry_server:
+ memset(&op->ac, 0, sizeof(op->ac));
- if (!fc->ac.alist)
- fc->ac.alist = alist;
+ if (!op->ac.alist)
+ op->ac.alist = alist;
else
afs_put_addrlist(alist);
- fc->ac.index = -1;
+ op->ac.index = -1;
iterate_address:
- ASSERT(fc->ac.alist);
+ ASSERT(op->ac.alist);
/* Iterate over the current server's address list to try and find an
* address on which it will respond to us.
*/
- if (!afs_iterate_addresses(&fc->ac))
- goto next_server;
+ if (!afs_iterate_addresses(&op->ac))
+ goto out_of_addresses;
- _debug("address [%u] %u/%u", fc->index, fc->ac.index, fc->ac.alist->nr_addrs);
+ _debug("address [%u] %u/%u %pISp",
+ op->index, op->ac.index, op->ac.alist->nr_addrs,
+ &op->ac.alist->addrs[op->ac.index].transport);
_leave(" = t");
return true;
+out_of_addresses:
+ /* We've now had a failure to respond on all of a server's addresses -
+ * immediately probe them again and consider retrying the server.
+ */
+ afs_probe_fileserver(op->net, op->server);
+ if (op->flags & AFS_OPERATION_RETRY_SERVER) {
+ alist = op->ac.alist;
+ error = afs_wait_for_one_fs_probe(
+ op->server, !(op->flags & AFS_OPERATION_UNINTR));
+ switch (error) {
+ case 0:
+ op->flags &= ~AFS_OPERATION_RETRY_SERVER;
+ goto retry_server;
+ case -ERESTARTSYS:
+ goto failed_set_error;
+ case -ETIME:
+ case -EDESTADDRREQ:
+ goto next_server;
+ }
+ }
+
next_server:
_debug("next");
- afs_end_cursor(&fc->ac);
+ afs_end_cursor(&op->ac);
goto pick_server;
no_more_servers:
/* That's all the servers poked to no good effect. Try again if some
* of them were busy.
*/
- if (fc->flags & AFS_FS_CURSOR_VBUSY)
+ if (op->flags & AFS_OPERATION_VBUSY)
goto restart_from_beginning;
e.error = -EDESTADDRREQ;
e.responded = false;
- for (i = 0; i < fc->server_list->nr_servers; i++) {
- struct afs_server *s = fc->server_list->servers[i].server;
+ for (i = 0; i < op->server_list->nr_servers; i++) {
+ struct afs_server *s = op->server_list->servers[i].server;
afs_prioritise_error(&e, READ_ONCE(s->probe.error),
s->probe.abort_code);
@@ -470,101 +454,18 @@ no_more_servers:
error = e.error;
failed_set_error:
- fc->error = error;
+ op->error = error;
failed:
- fc->flags |= AFS_FS_CURSOR_STOP;
- afs_end_cursor(&fc->ac);
- _leave(" = f [failed %d]", fc->error);
- return false;
-}
-
-/*
- * Select the same fileserver we used for a vnode before and only that
- * fileserver. We use this when we have a lock on that file, which is backed
- * only by the fileserver we obtained it from.
- */
-bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
-{
- struct afs_vnode *vnode = fc->vnode;
- struct afs_cb_interest *cbi;
- struct afs_addr_list *alist;
- int error = fc->ac.error;
-
- _enter("");
-
- cbi = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->io_lock));
-
- switch (error) {
- case SHRT_MAX:
- if (!cbi) {
- fc->error = -ESTALE;
- fc->flags |= AFS_FS_CURSOR_STOP;
- return false;
- }
-
- fc->cbi = afs_get_cb_interest(cbi);
-
- read_lock(&cbi->server->fs_lock);
- alist = rcu_dereference_protected(cbi->server->addresses,
- lockdep_is_held(&cbi->server->fs_lock));
- afs_get_addrlist(alist);
- read_unlock(&cbi->server->fs_lock);
- if (!alist) {
- fc->error = -ESTALE;
- fc->flags |= AFS_FS_CURSOR_STOP;
- return false;
- }
-
- memset(&fc->ac, 0, sizeof(fc->ac));
- fc->ac.alist = alist;
- fc->ac.index = -1;
- goto iterate_address;
-
- case 0:
- default:
- /* Success or local failure. Stop. */
- fc->error = error;
- fc->flags |= AFS_FS_CURSOR_STOP;
- _leave(" = f [okay/local %d]", error);
- return false;
-
- case -ECONNABORTED:
- fc->error = afs_abort_to_error(fc->ac.abort_code);
- fc->flags |= AFS_FS_CURSOR_STOP;
- _leave(" = f [abort]");
- return false;
-
- case -ERFKILL:
- case -EADDRNOTAVAIL:
- case -ENETUNREACH:
- case -EHOSTUNREACH:
- case -EHOSTDOWN:
- case -ECONNREFUSED:
- case -ETIMEDOUT:
- case -ETIME:
- _debug("no conn");
- fc->error = error;
- goto iterate_address;
- }
-
-iterate_address:
- /* Iterate over the current server's address list to try and find an
- * address on which it will respond to us.
- */
- if (afs_iterate_addresses(&fc->ac)) {
- _leave(" = t");
- return true;
- }
-
- afs_end_cursor(&fc->ac);
+ op->flags |= AFS_OPERATION_STOP;
+ afs_end_cursor(&op->ac);
+ _leave(" = f [failed %d]", op->error);
return false;
}
/*
* Dump cursor state in the case of the error being EDESTADDRREQ.
*/
-static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc)
+void afs_dump_edestaddrreq(const struct afs_operation *op)
{
static int count;
int i;
@@ -576,13 +477,14 @@ static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc)
rcu_read_lock();
pr_notice("EDESTADDR occurred\n");
- pr_notice("FC: cbb=%x cbb2=%x fl=%hx err=%hd\n",
- fc->cb_break, fc->cb_break_2, fc->flags, fc->error);
+ pr_notice("FC: cbb=%x cbb2=%x fl=%x err=%hd\n",
+ op->file[0].cb_break_before,
+ op->file[1].cb_break_before, op->flags, op->error);
pr_notice("FC: ut=%lx ix=%d ni=%u\n",
- fc->untried, fc->index, fc->nr_iterations);
+ op->untried, op->index, op->nr_iterations);
- if (fc->server_list) {
- const struct afs_server_list *sl = fc->server_list;
+ if (op->server_list) {
+ const struct afs_server_list *sl = op->server_list;
pr_notice("FC: SL nr=%u pr=%u vnov=%hx\n",
sl->nr_servers, sl->preferred, sl->vnovol_mask);
for (i = 0; i < sl->nr_servers; i++) {
@@ -596,41 +498,16 @@ static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc)
a->version,
a->nr_ipv4, a->nr_addrs, a->max_addrs,
a->preferred);
- pr_notice("FC: - pr=%lx R=%lx F=%lx\n",
- a->probed, a->responded, a->failed);
- if (a == fc->ac.alist)
+ pr_notice("FC: - R=%lx F=%lx\n",
+ a->responded, a->failed);
+ if (a == op->ac.alist)
pr_notice("FC: - current\n");
}
}
}
pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n",
- fc->ac.tried, fc->ac.index, fc->ac.abort_code, fc->ac.error,
- fc->ac.responded, fc->ac.nr_iterations);
+ op->ac.tried, op->ac.index, op->ac.abort_code, op->ac.error,
+ op->ac.responded, op->ac.nr_iterations);
rcu_read_unlock();
}
-
-/*
- * Tidy up a filesystem cursor and unlock the vnode.
- */
-int afs_end_vnode_operation(struct afs_fs_cursor *fc)
-{
- struct afs_net *net = afs_v2net(fc->vnode);
-
- if (fc->error == -EDESTADDRREQ ||
- fc->error == -EADDRNOTAVAIL ||
- fc->error == -ENETUNREACH ||
- fc->error == -EHOSTUNREACH)
- afs_dump_edestaddrreq(fc);
-
- mutex_unlock(&fc->vnode->io_lock);
-
- afs_end_cursor(&fc->ac);
- afs_put_cb_interest(net, fc->cbi);
- afs_put_serverlist(net, fc->server_list);
-
- if (fc->error == -ECONNABORTED)
- fc->error = afs_abort_to_error(fc->ac.abort_code);
-
- return fc->error;
-}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index e313dae01674..8fc8fb406a5a 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -181,8 +181,7 @@ void afs_put_call(struct afs_call *call)
if (call->type->destructor)
call->type->destructor(call);
- afs_put_server(call->net, call->server, afs_server_trace_put_call);
- afs_put_cb_interest(call->net, call->cbi);
+ afs_unuse_server_notime(call->net, call->server, afs_server_trace_put_call);
afs_put_addrlist(call->alist);
kfree(call->request);
@@ -281,18 +280,19 @@ static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
struct bio_vec *bv, pgoff_t first, pgoff_t last,
unsigned offset)
{
+ struct afs_operation *op = call->op;
struct page *pages[AFS_BVEC_MAX];
unsigned int nr, n, i, to, bytes = 0;
nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX);
- n = find_get_pages_contig(call->mapping, first, nr, pages);
+ n = find_get_pages_contig(op->store.mapping, first, nr, pages);
ASSERTCMP(n, ==, nr);
msg->msg_flags |= MSG_MORE;
for (i = 0; i < nr; i++) {
to = PAGE_SIZE;
if (first + i >= last) {
- to = call->last_to;
+ to = op->store.last_to;
msg->msg_flags &= ~MSG_MORE;
}
bv[i].bv_page = pages[i];
@@ -322,13 +322,14 @@ static void afs_notify_end_request_tx(struct sock *sock,
*/
static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
{
+ struct afs_operation *op = call->op;
struct bio_vec bv[AFS_BVEC_MAX];
unsigned int bytes, nr, loop, offset;
- pgoff_t first = call->first, last = call->last;
+ pgoff_t first = op->store.first, last = op->store.last;
int ret;
- offset = call->first_offset;
- call->first_offset = 0;
+ offset = op->store.first_offset;
+ op->store.first_offset = 0;
do {
afs_load_bvec(call, msg, bv, first, last, offset);
@@ -338,7 +339,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
bytes = msg->msg_iter.count;
nr = msg->msg_iter.nr_segs;
- ret = rxrpc_kernel_send_data(call->net->socket, call->rxcall, msg,
+ ret = rxrpc_kernel_send_data(op->net->socket, call->rxcall, msg,
bytes, afs_notify_end_request_tx);
for (loop = 0; loop < nr; loop++)
put_page(bv[loop].bv_page);
@@ -348,7 +349,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
first += nr;
} while (first <= last);
- trace_afs_sent_pages(call, call->first, last, first, ret);
+ trace_afs_sent_pages(call, op->store.first, last, first, ret);
return ret;
}
@@ -383,16 +384,18 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
*/
tx_total_len = call->request_size;
if (call->send_pages) {
- if (call->last == call->first) {
- tx_total_len += call->last_to - call->first_offset;
+ struct afs_operation *op = call->op;
+
+ if (op->store.last == op->store.first) {
+ tx_total_len += op->store.last_to - op->store.first_offset;
} else {
/* It looks mathematically like you should be able to
* combine the following lines with the ones above, but
* unsigned arithmetic is fun when it wraps...
*/
- tx_total_len += PAGE_SIZE - call->first_offset;
- tx_total_len += call->last_to;
- tx_total_len += (call->last - call->first - 1) * PAGE_SIZE;
+ tx_total_len += PAGE_SIZE - op->store.first_offset;
+ tx_total_len += op->store.last_to;
+ tx_total_len += (op->store.last - op->store.first - 1) * PAGE_SIZE;
}
}
@@ -538,13 +541,15 @@ static void afs_deliver_to_call(struct afs_call *call)
ret = call->type->deliver(call);
state = READ_ONCE(call->state);
+ if (ret == 0 && call->unmarshalling_error)
+ ret = -EBADMSG;
switch (ret) {
case 0:
afs_queue_call_work(call);
if (state == AFS_CALL_CL_PROC_REPLY) {
- if (call->cbi)
+ if (call->op)
set_bit(AFS_SERVER_FL_MAY_HAVE_CB,
- &call->cbi->server->flags);
+ &call->op->server->flags);
goto call_complete;
}
ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY);
@@ -957,9 +962,11 @@ int afs_extract_data(struct afs_call *call, bool want_more)
/*
* Log protocol error production.
*/
-noinline int afs_protocol_error(struct afs_call *call, int error,
+noinline int afs_protocol_error(struct afs_call *call,
enum afs_eproto_cause cause)
{
- trace_afs_protocol_error(call, error, cause);
- return error;
+ trace_afs_protocol_error(call, cause);
+ if (call)
+ call->unmarshalling_error = true;
+ return -EBADMSG;
}
diff --git a/fs/afs/security.c b/fs/afs/security.c
index ce9de1e6742b..90d852704328 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -170,8 +170,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
break;
}
- if (afs_cb_is_broken(cb_break, vnode,
- rcu_dereference(vnode->cb_interest))) {
+ if (afs_cb_is_broken(cb_break, vnode)) {
changed = true;
break;
}
@@ -201,7 +200,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
}
}
- if (afs_cb_is_broken(cb_break, vnode, rcu_dereference(vnode->cb_interest)))
+ if (afs_cb_is_broken(cb_break, vnode))
goto someone_else_changed_it;
/* We need a ref on any permits list we want to copy as we'll have to
@@ -281,8 +280,7 @@ found:
rcu_read_lock();
spin_lock(&vnode->lock);
zap = rcu_access_pointer(vnode->permit_cache);
- if (!afs_cb_is_broken(cb_break, vnode, rcu_dereference(vnode->cb_interest)) &&
- zap == permits)
+ if (!afs_cb_is_broken(cb_break, vnode) && zap == permits)
rcu_assign_pointer(vnode->permit_cache, replacement);
else
zap = replacement;
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 11b90ac7ea30..039e3488511c 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -12,19 +12,11 @@
#include "protocol_yfs.h"
static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */
-static unsigned afs_server_update_delay = 30; /* Time till VLDB recheck in secs */
static atomic_t afs_server_debug_id;
-static void afs_inc_servers_outstanding(struct afs_net *net)
-{
- atomic_inc(&net->servers_outstanding);
-}
-
-static void afs_dec_servers_outstanding(struct afs_net *net)
-{
- if (atomic_dec_and_test(&net->servers_outstanding))
- wake_up_var(&net->servers_outstanding);
-}
+static struct afs_server *afs_maybe_use_server(struct afs_server *,
+ enum afs_server_trace);
+static void __afs_put_server(struct afs_net *, struct afs_server *);
/*
* Find a server by one of its addresses.
@@ -41,7 +33,7 @@ struct afs_server *afs_find_server(struct afs_net *net,
do {
if (server)
- afs_put_server(net, server, afs_server_trace_put_find_rsq);
+ afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq);
server = NULL;
read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
@@ -79,9 +71,9 @@ struct afs_server *afs_find_server(struct afs_net *net,
}
server = NULL;
+ continue;
found:
- if (server && !atomic_inc_not_zero(&server->usage))
- server = NULL;
+ server = afs_maybe_use_server(server, afs_server_trace_get_by_addr);
} while (need_seqretry(&net->fs_addr_lock, seq));
@@ -92,7 +84,7 @@ struct afs_server *afs_find_server(struct afs_net *net,
}
/*
- * Look up a server by its UUID
+ * Look up a server by its UUID and mark it active.
*/
struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid)
{
@@ -108,7 +100,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
* changes.
*/
if (server)
- afs_put_server(net, server, afs_server_trace_put_uuid_rsq);
+ afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq);
server = NULL;
read_seqbegin_or_lock(&net->fs_lock, &seq);
@@ -123,7 +115,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
} else if (diff > 0) {
p = p->rb_right;
} else {
- afs_get_server(server, afs_server_trace_get_by_uuid);
+ afs_use_server(server, afs_server_trace_get_by_uuid);
break;
}
@@ -138,13 +130,16 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
}
/*
- * Install a server record in the namespace tree
+ * Install a server record in the namespace tree. If there's a clash, we stick
+ * it into a list anchored on whichever afs_server struct is actually in the
+ * tree.
*/
-static struct afs_server *afs_install_server(struct afs_net *net,
+static struct afs_server *afs_install_server(struct afs_cell *cell,
struct afs_server *candidate)
{
const struct afs_addr_list *alist;
- struct afs_server *server;
+ struct afs_server *server, *next;
+ struct afs_net *net = cell->net;
struct rb_node **pp, *p;
int diff;
@@ -160,12 +155,30 @@ static struct afs_server *afs_install_server(struct afs_net *net,
_debug("- consider %p", p);
server = rb_entry(p, struct afs_server, uuid_rb);
diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t));
- if (diff < 0)
+ if (diff < 0) {
pp = &(*pp)->rb_left;
- else if (diff > 0)
+ } else if (diff > 0) {
pp = &(*pp)->rb_right;
- else
- goto exists;
+ } else {
+ if (server->cell == cell)
+ goto exists;
+
+ /* We have the same UUID representing servers in
+ * different cells. Append the new server to the list.
+ */
+ for (;;) {
+ next = rcu_dereference_protected(
+ server->uuid_next,
+ lockdep_is_held(&net->fs_lock.lock));
+ if (!next)
+ break;
+ server = next;
+ }
+ rcu_assign_pointer(server->uuid_next, candidate);
+ candidate->uuid_prev = server;
+ server = candidate;
+ goto added_dup;
+ }
}
server = candidate;
@@ -173,6 +186,7 @@ static struct afs_server *afs_install_server(struct afs_net *net,
rb_insert_color(&server->uuid_rb, &net->fs_servers);
hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
+added_dup:
write_seqlock(&net->fs_addr_lock);
alist = rcu_dereference_protected(server->addresses,
lockdep_is_held(&net->fs_addr_lock.lock));
@@ -199,13 +213,14 @@ exists:
}
/*
- * allocate a new server record
+ * Allocate a new server record and mark it active.
*/
-static struct afs_server *afs_alloc_server(struct afs_net *net,
+static struct afs_server *afs_alloc_server(struct afs_cell *cell,
const uuid_t *uuid,
struct afs_addr_list *alist)
{
struct afs_server *server;
+ struct afs_net *net = cell->net;
_enter("");
@@ -213,20 +228,21 @@ static struct afs_server *afs_alloc_server(struct afs_net *net,
if (!server)
goto enomem;
- atomic_set(&server->usage, 1);
+ atomic_set(&server->ref, 1);
+ atomic_set(&server->active, 1);
server->debug_id = atomic_inc_return(&afs_server_debug_id);
RCU_INIT_POINTER(server->addresses, alist);
server->addr_version = alist->version;
server->uuid = *uuid;
- server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
rwlock_init(&server->fs_lock);
- INIT_HLIST_HEAD(&server->cb_volumes);
- rwlock_init(&server->cb_break_lock);
init_waitqueue_head(&server->probe_wq);
+ INIT_LIST_HEAD(&server->probe_link);
spin_lock_init(&server->probe_lock);
+ server->cell = cell;
+ server->rtt = UINT_MAX;
afs_inc_servers_outstanding(net);
- trace_afs_server(server, 1, afs_server_trace_alloc);
+ trace_afs_server(server, 1, 1, afs_server_trace_alloc);
_leave(" = %p", server);
return server;
@@ -264,7 +280,7 @@ static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
* Get or create a fileserver record.
*/
struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key,
- const uuid_t *uuid)
+ const uuid_t *uuid, u32 addr_version)
{
struct afs_addr_list *alist;
struct afs_server *server, *candidate;
@@ -272,26 +288,34 @@ struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key,
_enter("%p,%pU", cell->net, uuid);
server = afs_find_server_by_uuid(cell->net, uuid);
- if (server)
+ if (server) {
+ if (server->addr_version != addr_version)
+ set_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags);
return server;
+ }
alist = afs_vl_lookup_addrs(cell, key, uuid);
if (IS_ERR(alist))
return ERR_CAST(alist);
- candidate = afs_alloc_server(cell->net, uuid, alist);
+ candidate = afs_alloc_server(cell, uuid, alist);
if (!candidate) {
afs_put_addrlist(alist);
return ERR_PTR(-ENOMEM);
}
- server = afs_install_server(cell->net, candidate);
+ server = afs_install_server(cell, candidate);
if (server != candidate) {
afs_put_addrlist(alist);
kfree(candidate);
+ } else {
+ /* Immediately dispatch an asynchronous probe to each interface
+ * on the fileserver. This will make sure the repeat-probing
+ * service is started.
+ */
+ afs_fs_probe_fileserver(cell->net, server, key, true);
}
- _leave(" = %p{%d}", server, atomic_read(&server->usage));
return server;
}
@@ -327,9 +351,38 @@ void afs_servers_timer(struct timer_list *timer)
struct afs_server *afs_get_server(struct afs_server *server,
enum afs_server_trace reason)
{
- unsigned int u = atomic_inc_return(&server->usage);
+ unsigned int u = atomic_inc_return(&server->ref);
+
+ trace_afs_server(server, u, atomic_read(&server->active), reason);
+ return server;
+}
+
+/*
+ * Try to get a reference on a server object.
+ */
+static struct afs_server *afs_maybe_use_server(struct afs_server *server,
+ enum afs_server_trace reason)
+{
+ unsigned int r = atomic_fetch_add_unless(&server->ref, 1, 0);
+ unsigned int a;
+
+ if (r == 0)
+ return NULL;
+
+ a = atomic_inc_return(&server->active);
+ trace_afs_server(server, r, a, reason);
+ return server;
+}
+
+/*
+ * Get an active count on a server object.
+ */
+struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_trace reason)
+{
+ unsigned int r = atomic_inc_return(&server->ref);
+ unsigned int a = atomic_inc_return(&server->active);
- trace_afs_server(server, u, reason);
+ trace_afs_server(server, r, a, reason);
return server;
}
@@ -344,32 +397,57 @@ void afs_put_server(struct afs_net *net, struct afs_server *server,
if (!server)
return;
- server->put_time = ktime_get_real_seconds();
-
- usage = atomic_dec_return(&server->usage);
+ usage = atomic_dec_return(&server->ref);
+ trace_afs_server(server, usage, atomic_read(&server->active), reason);
+ if (unlikely(usage == 0))
+ __afs_put_server(net, server);
+}
- trace_afs_server(server, usage, reason);
+/*
+ * Drop an active count on a server object without updating the last-unused
+ * time.
+ */
+void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason)
+{
+ if (server) {
+ unsigned int active = atomic_dec_return(&server->active);
- if (likely(usage > 0))
- return;
+ if (active == 0)
+ afs_set_server_timer(net, afs_server_gc_delay);
+ afs_put_server(net, server, reason);
+ }
+}
- afs_set_server_timer(net, afs_server_gc_delay);
+/*
+ * Drop an active count on a server object.
+ */
+void afs_unuse_server(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason)
+{
+ if (server) {
+ server->unuse_time = ktime_get_real_seconds();
+ afs_unuse_server_notime(net, server, reason);
+ }
}
static void afs_server_rcu(struct rcu_head *rcu)
{
struct afs_server *server = container_of(rcu, struct afs_server, rcu);
- trace_afs_server(server, atomic_read(&server->usage),
- afs_server_trace_free);
+ trace_afs_server(server, atomic_read(&server->ref),
+ atomic_read(&server->active), afs_server_trace_free);
afs_put_addrlist(rcu_access_pointer(server->addresses));
kfree(server);
}
-/*
- * destroy a dead server
- */
-static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
+static void __afs_put_server(struct afs_net *net, struct afs_server *server)
+{
+ call_rcu(&server->rcu, afs_server_rcu);
+ afs_dec_servers_outstanding(net);
+}
+
+static void afs_give_up_callbacks(struct afs_net *net, struct afs_server *server)
{
struct afs_addr_list *alist = rcu_access_pointer(server->addresses);
struct afs_addr_cursor ac = {
@@ -378,19 +456,18 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
.error = 0,
};
- trace_afs_server(server, atomic_read(&server->usage),
- afs_server_trace_give_up_cb);
+ afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
+}
+/*
+ * destroy a dead server
+ */
+static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
+{
if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
- afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
+ afs_give_up_callbacks(net, server);
- wait_var_event(&server->probe_outstanding,
- atomic_read(&server->probe_outstanding) == 0);
-
- trace_afs_server(server, atomic_read(&server->usage),
- afs_server_trace_destroy);
- call_rcu(&server->rcu, afs_server_rcu);
- afs_dec_servers_outstanding(net);
+ afs_put_server(net, server, afs_server_trace_destroy);
}
/*
@@ -398,32 +475,49 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
*/
static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
{
- struct afs_server *server;
- bool deleted;
- int usage;
+ struct afs_server *server, *next, *prev;
+ int active;
while ((server = gc_list)) {
gc_list = server->gc_next;
write_seqlock(&net->fs_lock);
- usage = 1;
- deleted = atomic_try_cmpxchg(&server->usage, &usage, 0);
- trace_afs_server(server, usage, afs_server_trace_gc);
- if (deleted) {
- rb_erase(&server->uuid_rb, &net->fs_servers);
- hlist_del_rcu(&server->proc_link);
- }
- write_sequnlock(&net->fs_lock);
- if (deleted) {
- write_seqlock(&net->fs_addr_lock);
+ active = atomic_read(&server->active);
+ if (active == 0) {
+ trace_afs_server(server, atomic_read(&server->ref),
+ active, afs_server_trace_gc);
+ next = rcu_dereference_protected(
+ server->uuid_next, lockdep_is_held(&net->fs_lock.lock));
+ prev = server->uuid_prev;
+ if (!prev) {
+ /* The one at the front is in the tree */
+ if (!next) {
+ rb_erase(&server->uuid_rb, &net->fs_servers);
+ } else {
+ rb_replace_node_rcu(&server->uuid_rb,
+ &next->uuid_rb,
+ &net->fs_servers);
+ next->uuid_prev = NULL;
+ }
+ } else {
+ /* This server is not at the front */
+ rcu_assign_pointer(prev->uuid_next, next);
+ if (next)
+ next->uuid_prev = prev;
+ }
+
+ list_del(&server->probe_link);
+ hlist_del_rcu(&server->proc_link);
if (!hlist_unhashed(&server->addr4_link))
hlist_del_rcu(&server->addr4_link);
if (!hlist_unhashed(&server->addr6_link))
hlist_del_rcu(&server->addr6_link);
- write_sequnlock(&net->fs_addr_lock);
- afs_destroy_server(net, server);
}
+ write_sequnlock(&net->fs_lock);
+
+ if (active == 0)
+ afs_destroy_server(net, server);
}
}
@@ -452,15 +546,14 @@ void afs_manage_servers(struct work_struct *work)
for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) {
struct afs_server *server =
rb_entry(cursor, struct afs_server, uuid_rb);
- int usage = atomic_read(&server->usage);
+ int active = atomic_read(&server->active);
- _debug("manage %pU %u", &server->uuid, usage);
+ _debug("manage %pU %u", &server->uuid, active);
- ASSERTCMP(usage, >=, 1);
- ASSERTIFCMP(purging, usage, ==, 1);
+ ASSERTIFCMP(purging, active, ==, 0);
- if (usage == 1) {
- time64_t expire_at = server->put_time;
+ if (active == 0) {
+ time64_t expire_at = server->unuse_time;
if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
!test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
@@ -525,26 +618,27 @@ void afs_purge_servers(struct afs_net *net)
/*
* Get an update for a server's address list.
*/
-static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct afs_server *server)
+static noinline bool afs_update_server_record(struct afs_operation *op,
+ struct afs_server *server)
{
struct afs_addr_list *alist, *discard;
_enter("");
- trace_afs_server(server, atomic_read(&server->usage), afs_server_trace_update);
+ trace_afs_server(server, atomic_read(&server->ref), atomic_read(&server->active),
+ afs_server_trace_update);
- alist = afs_vl_lookup_addrs(fc->vnode->volume->cell, fc->key,
- &server->uuid);
+ alist = afs_vl_lookup_addrs(op->volume->cell, op->key, &server->uuid);
if (IS_ERR(alist)) {
if ((PTR_ERR(alist) == -ERESTARTSYS ||
PTR_ERR(alist) == -EINTR) &&
- !(fc->flags & AFS_FS_CURSOR_INTR) &&
+ (op->flags & AFS_OPERATION_UNINTR) &&
server->addresses) {
_leave(" = t [intr]");
return true;
}
- fc->error = PTR_ERR(alist);
- _leave(" = f [%d]", fc->error);
+ op->error = PTR_ERR(alist);
+ _leave(" = f [%d]", op->error);
return false;
}
@@ -558,7 +652,6 @@ static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct a
write_unlock(&server->fs_lock);
}
- server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
afs_put_addrlist(discard);
_leave(" = t");
return true;
@@ -567,10 +660,8 @@ static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct a
/*
* See if a server's address list needs updating.
*/
-bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server)
+bool afs_check_server_record(struct afs_operation *op, struct afs_server *server)
{
- time64_t now = ktime_get_real_seconds();
- long diff;
bool success;
int ret, retries = 0;
@@ -579,25 +670,29 @@ bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server
ASSERT(server);
retry:
- diff = READ_ONCE(server->update_at) - now;
- if (diff > 0) {
- _leave(" = t [not now %ld]", diff);
- return true;
- }
+ if (test_bit(AFS_SERVER_FL_UPDATING, &server->flags))
+ goto wait;
+ if (test_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags))
+ goto update;
+ _leave(" = t [good]");
+ return true;
+update:
if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING, &server->flags)) {
- success = afs_update_server_record(fc, server);
+ clear_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags);
+ success = afs_update_server_record(op, server);
clear_bit_unlock(AFS_SERVER_FL_UPDATING, &server->flags);
wake_up_bit(&server->flags, AFS_SERVER_FL_UPDATING);
_leave(" = %d", success);
return success;
}
+wait:
ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING,
- (fc->flags & AFS_FS_CURSOR_INTR) ?
- TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ (op->flags & AFS_OPERATION_UNINTR) ?
+ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
if (ret == -ERESTARTSYS) {
- fc->error = ret;
+ op->error = ret;
_leave(" = f [intr]");
return false;
}
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 888d91d195d9..ed9056703505 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -14,11 +14,9 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
int i;
if (slist && refcount_dec_and_test(&slist->usage)) {
- for (i = 0; i < slist->nr_servers; i++) {
- afs_put_cb_interest(net, slist->servers[i].cb_interest);
- afs_put_server(net, slist->servers[i].server,
- afs_server_trace_put_slist);
- }
+ for (i = 0; i < slist->nr_servers; i++)
+ afs_unuse_server(net, slist->servers[i].server,
+ afs_server_trace_put_slist);
kfree(slist);
}
}
@@ -46,12 +44,16 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
refcount_set(&slist->usage, 1);
rwlock_init(&slist->lock);
+ for (i = 0; i < AFS_MAXTYPES; i++)
+ slist->vids[i] = vldb->vid[i];
+
/* Make sure a records exists for each server in the list. */
for (i = 0; i < vldb->nr_servers; i++) {
if (!(vldb->fs_mask[i] & type_mask))
continue;
- server = afs_lookup_server(cell, key, &vldb->fs_server[i]);
+ server = afs_lookup_server(cell, key, &vldb->fs_server[i],
+ vldb->addr_version[i]);
if (IS_ERR(server)) {
ret = PTR_ERR(server);
if (ret == -ENOENT ||
@@ -123,31 +125,5 @@ changed:
}
}
- /* Keep the old callback interest records where possible so that we
- * maintain callback interception.
- */
- i = 0;
- j = 0;
- while (i < old->nr_servers && j < new->nr_servers) {
- if (new->servers[j].server == old->servers[i].server) {
- struct afs_cb_interest *cbi = old->servers[i].cb_interest;
- if (cbi) {
- new->servers[j].cb_interest = cbi;
- refcount_inc(&cbi->usage);
- }
- i++;
- j++;
- continue;
- }
-
- if (new->servers[j].server < old->servers[i].server) {
- j++;
- continue;
- }
-
- i++;
- continue;
- }
-
return true;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index dda7a9a66848..b552357b1d13 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -352,7 +352,9 @@ static int afs_validate_fc(struct fs_context *fc)
{
struct afs_fs_context *ctx = fc->fs_private;
struct afs_volume *volume;
+ struct afs_cell *cell;
struct key *key;
+ int ret;
if (!ctx->dyn_root) {
if (ctx->no_cell) {
@@ -365,6 +367,7 @@ static int afs_validate_fc(struct fs_context *fc)
return -EDESTADDRREQ;
}
+ reget_key:
/* We try to do the mount securely. */
key = afs_request_key(ctx->cell);
if (IS_ERR(key))
@@ -373,10 +376,26 @@ static int afs_validate_fc(struct fs_context *fc)
ctx->key = key;
if (ctx->volume) {
- afs_put_volume(ctx->cell, ctx->volume);
+ afs_put_volume(ctx->net, ctx->volume,
+ afs_volume_trace_put_validate_fc);
ctx->volume = NULL;
}
+ if (test_bit(AFS_CELL_FL_CHECK_ALIAS, &ctx->cell->flags)) {
+ ret = afs_cell_detect_alias(ctx->cell, key);
+ if (ret < 0)
+ return ret;
+ if (ret == 1) {
+ _debug("switch to alias");
+ key_put(ctx->key);
+ ctx->key = NULL;
+ cell = afs_get_cell(ctx->cell->alias_of);
+ afs_put_cell(ctx->net, ctx->cell);
+ ctx->cell = cell;
+ goto reget_key;
+ }
+ }
+
volume = afs_create_volume(ctx);
if (IS_ERR(volume))
return PTR_ERR(volume);
@@ -421,7 +440,6 @@ static int afs_set_super(struct super_block *sb, struct fs_context *fc)
static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
{
struct afs_super_info *as = AFS_FS_S(sb);
- struct afs_iget_data iget_data;
struct inode *inode = NULL;
int ret;
@@ -446,13 +464,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
} else {
sprintf(sb->s_id, "%llu", as->volume->vid);
afs_activate_volume(as->volume);
- iget_data.fid.vid = as->volume->vid;
- iget_data.fid.vnode = 1;
- iget_data.fid.vnode_hi = 0;
- iget_data.fid.unique = 1;
- iget_data.cb_v_break = as->volume->cb_v_break;
- iget_data.cb_s_break = 0;
- inode = afs_iget(sb, ctx->key, &iget_data, NULL, NULL, NULL);
+ inode = afs_root_iget(sb, ctx->key);
}
if (IS_ERR(inode))
@@ -473,6 +485,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
goto error;
} else {
sb->s_d_op = &afs_fs_dentry_operations;
+ rcu_assign_pointer(as->volume->sb, sb);
}
_leave(" = 0");
@@ -496,7 +509,8 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
as->dyn_root = true;
} else {
as->cell = afs_get_cell(ctx->cell);
- as->volume = __afs_get_volume(ctx->volume);
+ as->volume = afs_get_volume(ctx->volume,
+ afs_volume_trace_get_alloc_sbi);
}
}
return as;
@@ -505,8 +519,9 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
static void afs_destroy_sbi(struct afs_super_info *as)
{
if (as) {
- afs_put_volume(as->cell, as->volume);
- afs_put_cell(afs_net(as->net_ns), as->cell);
+ struct afs_net *net = afs_net(as->net_ns);
+ afs_put_volume(net, as->volume, afs_volume_trace_put_destroy_sbi);
+ afs_put_cell(net, as->cell);
put_net(as->net_ns);
kfree(as);
}
@@ -515,7 +530,6 @@ static void afs_destroy_sbi(struct afs_super_info *as)
static void afs_kill_super(struct super_block *sb)
{
struct afs_super_info *as = AFS_FS_S(sb);
- struct afs_net *net = afs_net(as->net_ns);
if (as->dyn_root)
afs_dynroot_depopulate(sb);
@@ -524,7 +538,7 @@ static void afs_kill_super(struct super_block *sb)
* deactivating the superblock.
*/
if (as->volume)
- afs_clear_callback_interests(net, as->volume->servers);
+ rcu_assign_pointer(as->volume->sb, NULL);
kill_anon_super(sb);
if (as->volume)
afs_deactivate_volume(as->volume);
@@ -592,7 +606,7 @@ static void afs_free_fc(struct fs_context *fc)
struct afs_fs_context *ctx = fc->fs_private;
afs_destroy_sbi(fc->s_fs_info);
- afs_put_volume(ctx->cell, ctx->volume);
+ afs_put_volume(ctx->net, ctx->volume, afs_volume_trace_put_free_fc);
afs_put_cell(ctx->net, ctx->cell);
key_put(ctx->key);
kfree(ctx);
@@ -674,7 +688,6 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
vnode->volume = NULL;
vnode->lock_key = NULL;
vnode->permit_cache = NULL;
- RCU_INIT_POINTER(vnode->cb_interest, NULL);
#ifdef CONFIG_AFS_FSCACHE
vnode->cache = NULL;
#endif
@@ -704,22 +717,38 @@ static void afs_destroy_inode(struct inode *inode)
_debug("DESTROY INODE %p", inode);
- ASSERTCMP(rcu_access_pointer(vnode->cb_interest), ==, NULL);
-
atomic_dec(&afs_count_active_inodes);
}
+static void afs_get_volume_status_success(struct afs_operation *op)
+{
+ struct afs_volume_status *vs = &op->volstatus.vs;
+ struct kstatfs *buf = op->volstatus.buf;
+
+ if (vs->max_quota == 0)
+ buf->f_blocks = vs->part_max_blocks;
+ else
+ buf->f_blocks = vs->max_quota;
+
+ if (buf->f_blocks > vs->blocks_in_use)
+ buf->f_bavail = buf->f_bfree =
+ buf->f_blocks - vs->blocks_in_use;
+}
+
+static const struct afs_operation_ops afs_get_volume_status_operation = {
+ .issue_afs_rpc = afs_fs_get_volume_status,
+ .issue_yfs_rpc = yfs_fs_get_volume_status,
+ .success = afs_get_volume_status_success,
+};
+
/*
* return information about an AFS volume
*/
static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct afs_super_info *as = AFS_FS_S(dentry->d_sb);
- struct afs_fs_cursor fc;
- struct afs_volume_status vs;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
- struct key *key;
- int ret;
buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = AFS_BLOCK_SIZE;
@@ -732,31 +761,13 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key))
- return PTR_ERR(key);
+ op = afs_alloc_operation(NULL, as->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- fc.flags |= AFS_FS_CURSOR_NO_VSLEEP;
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_get_volume_status(&fc, &vs);
- }
-
- afs_check_for_remote_deletion(&fc, fc.vnode);
- ret = afs_end_vnode_operation(&fc);
- }
-
- key_put(key);
-
- if (ret == 0) {
- if (vs.max_quota == 0)
- buf->f_blocks = vs.part_max_blocks;
- else
- buf->f_blocks = vs.max_quota;
- buf->f_bavail = buf->f_bfree = buf->f_blocks - vs.blocks_in_use;
- }
-
- return ret;
+ afs_op_set_vnode(op, 0, vnode);
+ op->nr_files = 1;
+ op->volstatus.buf = buf;
+ op->ops = &afs_get_volume_status_operation;
+ return afs_do_sync_operation(op);
}
diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
new file mode 100644
index 000000000000..5082ef04e99c
--- /dev/null
+++ b/fs/afs/vl_alias.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* AFS cell alias detection
+ *
+ * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/namei.h>
+#include <keys/rxrpc-type.h>
+#include "internal.h"
+
+/*
+ * Sample a volume.
+ */
+static struct afs_volume *afs_sample_volume(struct afs_cell *cell, struct key *key,
+ const char *name, unsigned int namelen)
+{
+ struct afs_volume *volume;
+ struct afs_fs_context fc = {
+ .type = 0, /* Explicitly leave it to the VLDB */
+ .volnamesz = namelen,
+ .volname = name,
+ .net = cell->net,
+ .cell = cell,
+ .key = key, /* This might need to be something */
+ };
+
+ volume = afs_create_volume(&fc);
+ _leave(" = %p", volume);
+ return volume;
+}
+
+/*
+ * Compare two addresses.
+ */
+static int afs_compare_addrs(const struct sockaddr_rxrpc *srx_a,
+ const struct sockaddr_rxrpc *srx_b)
+{
+ short port_a, port_b;
+ int addr_a, addr_b, diff;
+
+ diff = (short)srx_a->transport_type - (short)srx_b->transport_type;
+ if (diff)
+ goto out;
+
+ switch (srx_a->transport_type) {
+ case AF_INET: {
+ const struct sockaddr_in *a = &srx_a->transport.sin;
+ const struct sockaddr_in *b = &srx_b->transport.sin;
+ addr_a = ntohl(a->sin_addr.s_addr);
+ addr_b = ntohl(b->sin_addr.s_addr);
+ diff = addr_a - addr_b;
+ if (diff == 0) {
+ port_a = ntohs(a->sin_port);
+ port_b = ntohs(b->sin_port);
+ diff = port_a - port_b;
+ }
+ break;
+ }
+
+ case AF_INET6: {
+ const struct sockaddr_in6 *a = &srx_a->transport.sin6;
+ const struct sockaddr_in6 *b = &srx_b->transport.sin6;
+ diff = memcmp(&a->sin6_addr, &b->sin6_addr, 16);
+ if (diff == 0) {
+ port_a = ntohs(a->sin6_port);
+ port_b = ntohs(b->sin6_port);
+ diff = port_a - port_b;
+ }
+ break;
+ }
+
+ default:
+ WARN_ON(1);
+ diff = 1;
+ }
+
+out:
+ return diff;
+}
+
+/*
+ * Compare the address lists of a pair of fileservers.
+ */
+static int afs_compare_fs_alists(const struct afs_server *server_a,
+ const struct afs_server *server_b)
+{
+ const struct afs_addr_list *la, *lb;
+ int a = 0, b = 0, addr_matches = 0;
+
+ la = rcu_dereference(server_a->addresses);
+ lb = rcu_dereference(server_b->addresses);
+
+ while (a < la->nr_addrs && b < lb->nr_addrs) {
+ const struct sockaddr_rxrpc *srx_a = &la->addrs[a];
+ const struct sockaddr_rxrpc *srx_b = &lb->addrs[b];
+ int diff = afs_compare_addrs(srx_a, srx_b);
+
+ if (diff < 0) {
+ a++;
+ } else if (diff > 0) {
+ b++;
+ } else {
+ addr_matches++;
+ a++;
+ b++;
+ }
+ }
+
+ return addr_matches;
+}
+
+/*
+ * Compare the fileserver lists of two volumes. The server lists are sorted in
+ * order of ascending UUID.
+ */
+static int afs_compare_volume_slists(const struct afs_volume *vol_a,
+ const struct afs_volume *vol_b)
+{
+ const struct afs_server_list *la, *lb;
+ int i, a = 0, b = 0, uuid_matches = 0, addr_matches = 0;
+
+ la = rcu_dereference(vol_a->servers);
+ lb = rcu_dereference(vol_b->servers);
+
+ for (i = 0; i < AFS_MAXTYPES; i++)
+ if (la->vids[i] != lb->vids[i])
+ return 0;
+
+ while (a < la->nr_servers && b < lb->nr_servers) {
+ const struct afs_server *server_a = la->servers[a].server;
+ const struct afs_server *server_b = lb->servers[b].server;
+ int diff = memcmp(&server_a->uuid, &server_b->uuid, sizeof(uuid_t));
+
+ if (diff < 0) {
+ a++;
+ } else if (diff > 0) {
+ b++;
+ } else {
+ uuid_matches++;
+ addr_matches += afs_compare_fs_alists(server_a, server_b);
+ a++;
+ b++;
+ }
+ }
+
+ _leave(" = %d [um %d]", addr_matches, uuid_matches);
+ return addr_matches;
+}
+
+/*
+ * Compare root.cell volumes.
+ */
+static int afs_compare_cell_roots(struct afs_cell *cell)
+{
+ struct afs_cell *p;
+
+ _enter("");
+
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(p, &cell->net->proc_cells, proc_link) {
+ if (p == cell || p->alias_of)
+ continue;
+ if (!p->root_volume)
+ continue; /* Ignore cells that don't have a root.cell volume. */
+
+ if (afs_compare_volume_slists(cell->root_volume, p->root_volume) != 0)
+ goto is_alias;
+ }
+
+ rcu_read_unlock();
+ _leave(" = 0");
+ return 0;
+
+is_alias:
+ rcu_read_unlock();
+ cell->alias_of = afs_get_cell(p);
+ return 1;
+}
+
+/*
+ * Query the new cell for a volume from a cell we're already using.
+ */
+static int afs_query_for_alias_one(struct afs_cell *cell, struct key *key,
+ struct afs_cell *p)
+{
+ struct afs_volume *volume, *pvol = NULL;
+ int ret;
+
+ /* Arbitrarily pick a volume from the list. */
+ read_seqlock_excl(&p->volume_lock);
+ if (!RB_EMPTY_ROOT(&p->volumes))
+ pvol = afs_get_volume(rb_entry(p->volumes.rb_node,
+ struct afs_volume, cell_node),
+ afs_volume_trace_get_query_alias);
+ read_sequnlock_excl(&p->volume_lock);
+ if (!pvol)
+ return 0;
+
+ _enter("%s:%s", cell->name, pvol->name);
+
+ /* And see if it's in the new cell. */
+ volume = afs_sample_volume(cell, key, pvol->name, pvol->name_len);
+ if (IS_ERR(volume)) {
+ afs_put_volume(cell->net, pvol, afs_volume_trace_put_query_alias);
+ if (PTR_ERR(volume) != -ENOMEDIUM)
+ return PTR_ERR(volume);
+ /* That volume is not in the new cell, so not an alias */
+ return 0;
+ }
+
+ /* The new cell has a like-named volume also - compare volume ID,
+ * server and address lists.
+ */
+ ret = 0;
+ if (pvol->vid == volume->vid) {
+ rcu_read_lock();
+ if (afs_compare_volume_slists(volume, pvol))
+ ret = 1;
+ rcu_read_unlock();
+ }
+
+ afs_put_volume(cell->net, volume, afs_volume_trace_put_query_alias);
+ afs_put_volume(cell->net, pvol, afs_volume_trace_put_query_alias);
+ return ret;
+}
+
+/*
+ * Query the new cell for volumes we know exist in cells we're already using.
+ */
+static int afs_query_for_alias(struct afs_cell *cell, struct key *key)
+{
+ struct afs_cell *p;
+
+ _enter("%s", cell->name);
+
+ if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0)
+ return -ERESTARTSYS;
+
+ hlist_for_each_entry(p, &cell->net->proc_cells, proc_link) {
+ if (p == cell || p->alias_of)
+ continue;
+ if (RB_EMPTY_ROOT(&p->volumes))
+ continue;
+ if (p->root_volume)
+ continue; /* Ignore cells that have a root.cell volume. */
+ afs_get_cell(p);
+ mutex_unlock(&cell->net->proc_cells_lock);
+
+ if (afs_query_for_alias_one(cell, key, p) != 0)
+ goto is_alias;
+
+ if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) {
+ afs_put_cell(cell->net, p);
+ return -ERESTARTSYS;
+ }
+
+ afs_put_cell(cell->net, p);
+ }
+
+ mutex_unlock(&cell->net->proc_cells_lock);
+ _leave(" = 0");
+ return 0;
+
+is_alias:
+ cell->alias_of = p; /* Transfer our ref */
+ return 1;
+}
+
+/*
+ * Look up a VLDB record for a volume.
+ */
+static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key)
+{
+ struct afs_vl_cursor vc;
+ char *cell_name = ERR_PTR(-EDESTADDRREQ);
+ bool skipped = false, not_skipped = false;
+ int ret;
+
+ if (!afs_begin_vlserver_operation(&vc, cell, key))
+ return ERR_PTR(-ERESTARTSYS);
+
+ while (afs_select_vlserver(&vc)) {
+ if (!test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags)) {
+ vc.ac.error = -EOPNOTSUPP;
+ skipped = true;
+ continue;
+ }
+ not_skipped = true;
+ cell_name = afs_yfsvl_get_cell_name(&vc);
+ }
+
+ ret = afs_end_vlserver_operation(&vc);
+ if (skipped && !not_skipped)
+ ret = -EOPNOTSUPP;
+ return ret < 0 ? ERR_PTR(ret) : cell_name;
+}
+
+static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
+{
+ struct afs_cell *master;
+ char *cell_name;
+
+ cell_name = afs_vl_get_cell_name(cell, key);
+ if (IS_ERR(cell_name))
+ return PTR_ERR(cell_name);
+
+ if (strcmp(cell_name, cell->name) == 0) {
+ kfree(cell_name);
+ return 0;
+ }
+
+ master = afs_lookup_cell(cell->net, cell_name, strlen(cell_name),
+ NULL, false);
+ kfree(cell_name);
+ if (IS_ERR(master))
+ return PTR_ERR(master);
+
+ cell->alias_of = master; /* Transfer our ref */
+ return 1;
+}
+
+static int afs_do_cell_detect_alias(struct afs_cell *cell, struct key *key)
+{
+ struct afs_volume *root_volume;
+ int ret;
+
+ _enter("%s", cell->name);
+
+ ret = yfs_check_canonical_cell_name(cell, key);
+ if (ret != -EOPNOTSUPP)
+ return ret;
+
+ /* Try and get the root.cell volume for comparison with other cells */
+ root_volume = afs_sample_volume(cell, key, "root.cell", 9);
+ if (!IS_ERR(root_volume)) {
+ cell->root_volume = root_volume;
+ return afs_compare_cell_roots(cell);
+ }
+
+ if (PTR_ERR(root_volume) != -ENOMEDIUM)
+ return PTR_ERR(root_volume);
+
+ /* Okay, this cell doesn't have an root.cell volume. We need to
+ * locate some other random volume and use that to check.
+ */
+ return afs_query_for_alias(cell, key);
+}
+
+/*
+ * Check to see if a new cell is an alias of a cell we already have. At this
+ * point we have the cell's volume server list.
+ *
+ * Returns 0 if we didn't detect an alias, 1 if we found an alias and an error
+ * if we had problems gathering the data required. In the case the we did
+ * detect an alias, cell->alias_of is set to point to the assumed master.
+ */
+int afs_cell_detect_alias(struct afs_cell *cell, struct key *key)
+{
+ struct afs_net *net = cell->net;
+ int ret;
+
+ if (mutex_lock_interruptible(&net->cells_alias_lock) < 0)
+ return -ERESTARTSYS;
+
+ if (test_bit(AFS_CELL_FL_CHECK_ALIAS, &cell->flags)) {
+ ret = afs_do_cell_detect_alias(cell, key);
+ if (ret >= 0)
+ clear_bit_unlock(AFS_CELL_FL_CHECK_ALIAS, &cell->flags);
+ } else {
+ ret = cell->alias_of ? 1 : 0;
+ }
+
+ mutex_unlock(&net->cells_alias_lock);
+
+ if (ret == 1)
+ pr_notice("kAFS: Cell %s is an alias of %s\n",
+ cell->name, cell->alias_of->name);
+ return ret;
+}
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index 72eacc14e6e1..f405ca8b240a 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -151,6 +151,10 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
vc->error = error;
vc->flags |= AFS_VL_CURSOR_RETRY;
goto next_server;
+
+ case -EOPNOTSUPP:
+ _debug("notsupp");
+ goto next_server;
}
restart_from_beginning:
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index 516e9a3bb5b4..fd82850cd424 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -82,6 +82,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
for (j = 0; j < 6; j++)
uuid->node[j] = (u8)ntohl(xdr->node[j]);
+ entry->addr_version[n] = ntohl(uvldb->serverUnique[i]);
entry->nr_servers++;
}
@@ -447,8 +448,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
call->count2 = ntohl(*bp); /* Type or next count */
if (call->count > YFS_MAXENDPOINTS)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_fsendpt_num);
+ return afs_protocol_error(call, afs_eproto_yvl_fsendpt_num);
alist = afs_alloc_addrlist(call->count, FS_SERVICE, AFS_FS_PORT);
if (!alist)
@@ -468,8 +468,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
size = sizeof(__be32) * (1 + 4 + 1);
break;
default:
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_fsendpt_type);
+ return afs_protocol_error(call, afs_eproto_yvl_fsendpt_type);
}
size += sizeof(__be32);
@@ -487,21 +486,20 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
switch (call->count2) {
case YFS_ENDPOINT_IPV4:
if (ntohl(bp[0]) != sizeof(__be32) * 2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_fsendpt4_len);
+ return afs_protocol_error(
+ call, afs_eproto_yvl_fsendpt4_len);
afs_merge_fs_addr4(alist, bp[1], ntohl(bp[2]));
bp += 3;
break;
case YFS_ENDPOINT_IPV6:
if (ntohl(bp[0]) != sizeof(__be32) * 5)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_fsendpt6_len);
+ return afs_protocol_error(
+ call, afs_eproto_yvl_fsendpt6_len);
afs_merge_fs_addr6(alist, bp + 1, ntohl(bp[5]));
bp += 6;
break;
default:
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_fsendpt_type);
+ return afs_protocol_error(call, afs_eproto_yvl_fsendpt_type);
}
/* Got either the type of the next entry or the count of
@@ -519,8 +517,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
if (!call->count)
goto end;
if (call->count > YFS_MAXENDPOINTS)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_vlendpt_type);
+ return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type);
afs_extract_to_buf(call, 1 * sizeof(__be32));
call->unmarshall = 3;
@@ -547,8 +544,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
size = sizeof(__be32) * (1 + 4 + 1);
break;
default:
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_vlendpt_type);
+ return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type);
}
if (call->count > 1)
@@ -566,19 +562,18 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
switch (call->count2) {
case YFS_ENDPOINT_IPV4:
if (ntohl(bp[0]) != sizeof(__be32) * 2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_vlendpt4_len);
+ return afs_protocol_error(
+ call, afs_eproto_yvl_vlendpt4_len);
bp += 3;
break;
case YFS_ENDPOINT_IPV6:
if (ntohl(bp[0]) != sizeof(__be32) * 5)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_vlendpt6_len);
+ return afs_protocol_error(
+ call, afs_eproto_yvl_vlendpt6_len);
bp += 6;
break;
default:
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_vlendpt_type);
+ return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type);
}
/* Got either the type of the next entry or the count of
@@ -650,3 +645,114 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc,
afs_make_call(&vc->ac, call, GFP_KERNEL);
return (struct afs_addr_list *)afs_wait_for_call_to_complete(call, &vc->ac);
}
+
+/*
+ * Deliver reply data to a YFSVL.GetCellName operation.
+ */
+static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call)
+{
+ char *cell_name;
+ u32 namesz, paddedsz;
+ int ret;
+
+ _enter("{%u,%zu/%u}",
+ call->unmarshall, iov_iter_count(call->iter), call->count);
+
+ switch (call->unmarshall) {
+ case 0:
+ afs_extract_to_tmp(call);
+ call->unmarshall++;
+
+ /* Fall through - and extract the cell name length */
+ case 1:
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ namesz = ntohl(call->tmp);
+ if (namesz > AFS_MAXCELLNAME)
+ return afs_protocol_error(call, afs_eproto_cellname_len);
+ paddedsz = (namesz + 3) & ~3;
+ call->count = namesz;
+ call->count2 = paddedsz - namesz;
+
+ cell_name = kmalloc(namesz + 1, GFP_KERNEL);
+ if (!cell_name)
+ return -ENOMEM;
+ cell_name[namesz] = 0;
+ call->ret_str = cell_name;
+
+ afs_extract_begin(call, cell_name, namesz);
+ call->unmarshall++;
+
+ /* Fall through - and extract cell name */
+ case 2:
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ afs_extract_discard(call, call->count2);
+ call->unmarshall++;
+
+ /* Fall through - and extract padding */
+ case 3:
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
+
+ call->unmarshall++;
+ break;
+ }
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+static void afs_destroy_yfsvl_get_cell_name(struct afs_call *call)
+{
+ kfree(call->ret_str);
+ afs_flat_call_destructor(call);
+}
+
+/*
+ * VL.GetCapabilities operation type
+ */
+static const struct afs_call_type afs_YFSVLGetCellName = {
+ .name = "YFSVL.GetCellName",
+ .op = afs_YFSVL_GetCellName,
+ .deliver = afs_deliver_yfsvl_get_cell_name,
+ .destructor = afs_destroy_yfsvl_get_cell_name,
+};
+
+/*
+ * Probe a volume server for the capabilities that it supports. This can
+ * return up to 196 words.
+ *
+ * We use this to probe for service upgrade to determine what the server at the
+ * other end supports.
+ */
+char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *vc)
+{
+ struct afs_call *call;
+ struct afs_net *net = vc->cell->net;
+ __be32 *bp;
+
+ _enter("");
+
+ call = afs_alloc_flat_call(net, &afs_YFSVLGetCellName, 1 * 4, 0);
+ if (!call)
+ return ERR_PTR(-ENOMEM);
+
+ call->key = vc->key;
+ call->ret_str = NULL;
+ call->max_lifespan = AFS_VL_MAX_LIFESPAN;
+
+ /* marshall the parameters */
+ bp = call->request;
+ *bp++ = htonl(YVLGETCELLNAME);
+
+ /* Can't take a ref on server */
+ trace_afs_make_vl_call(call);
+ afs_make_call(&vc->ac, call, GFP_KERNEL);
+ return (char *)afs_wait_for_call_to_complete(call, &vc->ac);
+}
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 4310336b9bb8..9bc0509e3634 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -13,6 +13,56 @@ unsigned __read_mostly afs_volume_gc_delay = 10;
unsigned __read_mostly afs_volume_record_life = 60 * 60;
/*
+ * Insert a volume into a cell. If there's an existing volume record, that is
+ * returned instead with a ref held.
+ */
+static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell,
+ struct afs_volume *volume)
+{
+ struct afs_volume *p;
+ struct rb_node *parent = NULL, **pp;
+
+ write_seqlock(&cell->volume_lock);
+
+ pp = &cell->volumes.rb_node;
+ while (*pp) {
+ parent = *pp;
+ p = rb_entry(parent, struct afs_volume, cell_node);
+ if (p->vid < volume->vid) {
+ pp = &(*pp)->rb_left;
+ } else if (p->vid > volume->vid) {
+ pp = &(*pp)->rb_right;
+ } else {
+ volume = afs_get_volume(p, afs_volume_trace_get_cell_insert);
+ goto found;
+ }
+ }
+
+ rb_link_node_rcu(&volume->cell_node, parent, pp);
+ rb_insert_color(&volume->cell_node, &cell->volumes);
+ hlist_add_head_rcu(&volume->proc_link, &cell->proc_volumes);
+
+found:
+ write_sequnlock(&cell->volume_lock);
+ return volume;
+
+}
+
+static void afs_remove_volume_from_cell(struct afs_volume *volume)
+{
+ struct afs_cell *cell = volume->cell;
+
+ if (!hlist_unhashed(&volume->proc_link)) {
+ trace_afs_volume(volume->vid, atomic_read(&volume->usage),
+ afs_volume_trace_remove);
+ write_seqlock(&cell->volume_lock);
+ hlist_del_rcu(&volume->proc_link);
+ rb_erase(&volume->cell_node, &cell->volumes);
+ write_sequnlock(&cell->volume_lock);
+ }
+}
+
+/*
* Allocate a volume record and load it up from a vldb record.
*/
static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
@@ -39,7 +89,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
volume->name_len = vldb->name_len;
atomic_set(&volume->usage, 1);
- INIT_LIST_HEAD(&volume->proc_link);
+ INIT_HLIST_NODE(&volume->proc_link);
rwlock_init(&volume->servers_lock);
rwlock_init(&volume->cb_v_break_lock);
memcpy(volume->name, vldb->name, vldb->name_len + 1);
@@ -51,7 +101,8 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
}
refcount_set(&slist->usage, 1);
- volume->servers = slist;
+ rcu_assign_pointer(volume->servers, slist);
+ trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc);
return volume;
error_1:
@@ -62,6 +113,25 @@ error_0:
}
/*
+ * Look up or allocate a volume record.
+ */
+static struct afs_volume *afs_lookup_volume(struct afs_fs_context *params,
+ struct afs_vldb_entry *vldb,
+ unsigned long type_mask)
+{
+ struct afs_volume *candidate, *volume;
+
+ candidate = afs_alloc_volume(params, vldb, type_mask);
+ if (IS_ERR(candidate))
+ return candidate;
+
+ volume = afs_insert_volume_into_cell(params->cell, candidate);
+ if (volume != candidate)
+ afs_put_volume(params->net, candidate, afs_volume_trace_put_cell_dup);
+ return volume;
+}
+
+/*
* Look up a VLDB record for a volume.
*/
static struct afs_vldb_entry *afs_vl_lookup_vldb(struct afs_cell *cell,
@@ -138,7 +208,7 @@ struct afs_volume *afs_create_volume(struct afs_fs_context *params)
}
type_mask = 1UL << params->type;
- volume = afs_alloc_volume(params, vldb, type_mask);
+ volume = afs_lookup_volume(params, vldb, type_mask);
error:
kfree(vldb);
@@ -156,23 +226,42 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
ASSERTCMP(volume->cache, ==, NULL);
#endif
- afs_put_serverlist(net, volume->servers);
+ afs_remove_volume_from_cell(volume);
+ afs_put_serverlist(net, rcu_access_pointer(volume->servers));
afs_put_cell(net, volume->cell);
- kfree(volume);
+ trace_afs_volume(volume->vid, atomic_read(&volume->usage),
+ afs_volume_trace_free);
+ kfree_rcu(volume, rcu);
_leave(" [destroyed]");
}
/*
- * Drop a reference on a volume record.
+ * Get a reference on a volume record.
*/
-void afs_put_volume(struct afs_cell *cell, struct afs_volume *volume)
+struct afs_volume *afs_get_volume(struct afs_volume *volume,
+ enum afs_volume_trace reason)
{
if (volume) {
- _enter("%s", volume->name);
+ int u = atomic_inc_return(&volume->usage);
+ trace_afs_volume(volume->vid, u, reason);
+ }
+ return volume;
+}
+
- if (atomic_dec_and_test(&volume->usage))
- afs_destroy_volume(cell->net, volume);
+/*
+ * Drop a reference on a volume record.
+ */
+void afs_put_volume(struct afs_net *net, struct afs_volume *volume,
+ enum afs_volume_trace reason)
+{
+ if (volume) {
+ afs_volid_t vid = volume->vid;
+ int u = atomic_dec_return(&volume->usage);
+ trace_afs_volume(vid, u, reason);
+ if (u == 0)
+ afs_destroy_volume(net, volume);
}
}
@@ -188,10 +277,6 @@ void afs_activate_volume(struct afs_volume *volume)
NULL, 0,
volume, 0, true);
#endif
-
- write_lock(&volume->cell->proc_lock);
- list_add_tail(&volume->proc_link, &volume->cell->proc_volumes);
- write_unlock(&volume->cell->proc_lock);
}
/*
@@ -201,10 +286,6 @@ void afs_deactivate_volume(struct afs_volume *volume)
{
_enter("%s", volume->name);
- write_lock(&volume->cell->proc_lock);
- list_del_init(&volume->proc_link);
- write_unlock(&volume->cell->proc_lock);
-
#ifdef CONFIG_AFS_FSCACHE
fscache_relinquish_cookie(volume->cache, NULL,
test_bit(AFS_VOLUME_DELETED, &volume->flags));
@@ -256,17 +337,17 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
write_lock(&volume->servers_lock);
discard = new;
- old = volume->servers;
+ old = rcu_dereference_protected(volume->servers,
+ lockdep_is_held(&volume->servers_lock));
if (afs_annotate_server_list(new, old)) {
new->seq = volume->servers_seq + 1;
- volume->servers = new;
+ rcu_assign_pointer(volume->servers, new);
smp_wmb();
volume->servers_seq++;
discard = old;
}
volume->update_at = ktime_get_real_seconds() + afs_volume_record_life;
- clear_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags);
write_unlock(&volume->servers_lock);
ret = 0;
@@ -281,25 +362,27 @@ error:
/*
* Make sure the volume record is up to date.
*/
-int afs_check_volume_status(struct afs_volume *volume, struct afs_fs_cursor *fc)
+int afs_check_volume_status(struct afs_volume *volume, struct afs_operation *op)
{
- time64_t now = ktime_get_real_seconds();
int ret, retries = 0;
_enter("");
- if (volume->update_at <= now)
- set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags);
-
retry:
- if (!test_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags) &&
- !test_bit(AFS_VOLUME_WAIT, &volume->flags)) {
- _leave(" = 0");
- return 0;
- }
-
+ if (test_bit(AFS_VOLUME_WAIT, &volume->flags))
+ goto wait;
+ if (volume->update_at <= ktime_get_real_seconds() ||
+ test_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags))
+ goto update;
+ _leave(" = 0");
+ return 0;
+
+update:
if (!test_and_set_bit_lock(AFS_VOLUME_UPDATING, &volume->flags)) {
- ret = afs_update_volume_status(volume, fc->key);
+ clear_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags);
+ ret = afs_update_volume_status(volume, op->key);
+ if (ret < 0)
+ set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags);
clear_bit_unlock(AFS_VOLUME_WAIT, &volume->flags);
clear_bit_unlock(AFS_VOLUME_UPDATING, &volume->flags);
wake_up_bit(&volume->flags, AFS_VOLUME_WAIT);
@@ -307,14 +390,15 @@ retry:
return ret;
}
+wait:
if (!test_bit(AFS_VOLUME_WAIT, &volume->flags)) {
_leave(" = 0 [no wait]");
return 0;
}
ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT,
- (fc->flags & AFS_FS_CURSOR_INTR) ?
- TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ (op->flags & AFS_OPERATION_UNINTR) ?
+ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
if (ret == -ERESTARTSYS) {
_leave(" = %d", ret);
return ret;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index cb76566763db..768497f82aee 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -349,82 +349,112 @@ static void afs_pages_written_back(struct afs_vnode *vnode,
}
/*
- * write to a file
+ * Find a key to use for the writeback. We cached the keys used to author the
+ * writes on the vnode. *_wbk will contain the last writeback key used or NULL
+ * and we need to start from there if it's set.
*/
-static int afs_store_data(struct address_space *mapping,
- pgoff_t first, pgoff_t last,
- unsigned offset, unsigned to)
+static int afs_get_writeback_key(struct afs_vnode *vnode,
+ struct afs_wb_key **_wbk)
{
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
struct afs_wb_key *wbk = NULL;
struct list_head *p;
int ret = -ENOKEY, ret2;
- _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
- vnode->volume->name,
- vnode->fid.vid,
- vnode->fid.vnode,
- vnode->fid.unique,
- first, last, offset, to);
-
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
- if (!scb)
- return -ENOMEM;
-
spin_lock(&vnode->wb_lock);
- p = vnode->wb_keys.next;
+ if (*_wbk)
+ p = (*_wbk)->vnode_link.next;
+ else
+ p = vnode->wb_keys.next;
- /* Iterate through the list looking for a valid key to use. */
-try_next_key:
while (p != &vnode->wb_keys) {
wbk = list_entry(p, struct afs_wb_key, vnode_link);
_debug("wbk %u", key_serial(wbk->key));
ret2 = key_validate(wbk->key);
- if (ret2 == 0)
- goto found_key;
+ if (ret2 == 0) {
+ refcount_inc(&wbk->usage);
+ _debug("USE WB KEY %u", key_serial(wbk->key));
+ break;
+ }
+
+ wbk = NULL;
if (ret == -ENOKEY)
ret = ret2;
p = p->next;
}
spin_unlock(&vnode->wb_lock);
- afs_put_wb_key(wbk);
- kfree(scb);
- _leave(" = %d [no keys]", ret);
- return ret;
+ if (*_wbk)
+ afs_put_wb_key(*_wbk);
+ *_wbk = wbk;
+ return 0;
+}
-found_key:
- refcount_inc(&wbk->usage);
- spin_unlock(&vnode->wb_lock);
+static void afs_store_data_success(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
- _debug("USE WB KEY %u", key_serial(wbk->key));
+ afs_vnode_commit_status(op, &op->file[0]);
+ if (op->error == 0) {
+ afs_pages_written_back(vnode, op->store.first, op->store.last);
+ afs_stat_v(vnode, n_stores);
+ atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
+ (op->store.first * PAGE_SIZE + op->store.first_offset),
+ &afs_v2net(vnode)->n_store_bytes);
+ }
+}
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, wbk->key, false)) {
- afs_dataversion_t data_version = vnode->status.data_version + 1;
+static const struct afs_operation_ops afs_store_data_operation = {
+ .issue_afs_rpc = afs_fs_store_data,
+ .issue_yfs_rpc = yfs_fs_store_data,
+ .success = afs_store_data_success,
+};
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_store_data(&fc, mapping, first, last, offset, to, scb);
- }
+/*
+ * write to a file
+ */
+static int afs_store_data(struct address_space *mapping,
+ pgoff_t first, pgoff_t last,
+ unsigned offset, unsigned to)
+{
+ struct afs_vnode *vnode = AFS_FS_I(mapping->host);
+ struct afs_operation *op;
+ struct afs_wb_key *wbk = NULL;
+ int ret;
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- if (fc.ac.error == 0)
- afs_pages_written_back(vnode, first, last);
- ret = afs_end_vnode_operation(&fc);
+ _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
+ vnode->volume->name,
+ vnode->fid.vid,
+ vnode->fid.vnode,
+ vnode->fid.unique,
+ first, last, offset, to);
+
+ ret = afs_get_writeback_key(vnode, &wbk);
+ if (ret) {
+ _leave(" = %d [no keys]", ret);
+ return ret;
}
- switch (ret) {
- case 0:
- afs_stat_v(vnode, n_stores);
- atomic_long_add((last * PAGE_SIZE + to) -
- (first * PAGE_SIZE + offset),
- &afs_v2net(vnode)->n_store_bytes);
- break;
+ op = afs_alloc_operation(wbk->key, vnode->volume);
+ if (IS_ERR(op)) {
+ afs_put_wb_key(wbk);
+ return -ENOMEM;
+ }
+
+ afs_op_set_vnode(op, 0, vnode);
+ op->file[0].dv_delta = 1;
+ op->store.mapping = mapping;
+ op->store.first = first;
+ op->store.last = last;
+ op->store.first_offset = offset;
+ op->store.last_to = to;
+ op->mtime = vnode->vfs_inode.i_mtime;
+ op->ops = &afs_store_data_operation;
+
+try_next_key:
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
+
+ switch (op->error) {
case -EACCES:
case -EPERM:
case -ENOKEY:
@@ -432,16 +462,19 @@ found_key:
case -EKEYREJECTED:
case -EKEYREVOKED:
_debug("next");
- spin_lock(&vnode->wb_lock);
- p = wbk->vnode_link.next;
- afs_put_wb_key(wbk);
- goto try_next_key;
+
+ ret = afs_get_writeback_key(vnode, &wbk);
+ if (ret == 0) {
+ key_put(op->key);
+ op->key = key_get(wbk->key);
+ goto try_next_key;
+ }
+ break;
}
afs_put_wb_key(wbk);
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ _leave(" = %d", op->error);
+ return afs_put_operation(op);
}
/*
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
index 7af41fd5f3ee..84f3c4f57531 100644
--- a/fs/afs/xattr.c
+++ b/fs/afs/xattr.c
@@ -35,6 +35,25 @@ ssize_t afs_listxattr(struct dentry *dentry, char *buffer, size_t size)
}
/*
+ * Deal with the result of a successful fetch ACL operation.
+ */
+static void afs_acl_success(struct afs_operation *op)
+{
+ afs_vnode_commit_status(op, &op->file[0]);
+}
+
+static void afs_acl_put(struct afs_operation *op)
+{
+ kfree(op->acl);
+}
+
+static const struct afs_operation_ops afs_fetch_acl_operation = {
+ .issue_afs_rpc = afs_fs_fetch_acl,
+ .success = afs_acl_success,
+ .put = afs_acl_put,
+};
+
+/*
* Get a file's ACL.
*/
static int afs_xattr_get_acl(const struct xattr_handler *handler,
@@ -42,37 +61,23 @@ static int afs_xattr_get_acl(const struct xattr_handler *handler,
struct inode *inode, const char *name,
void *buffer, size_t size)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_acl *acl = NULL;
- struct key *key;
- int ret = -ENOMEM;
+ int ret;
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
- if (!scb)
- goto error;
-
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
+ op = afs_alloc_operation(NULL, vnode->volume);
+ if (IS_ERR(op))
+ return -ENOMEM;
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- acl = afs_fs_fetch_acl(&fc, scb);
- }
+ afs_op_set_vnode(op, 0, vnode);
+ op->ops = &afs_fetch_acl_operation;
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
+ acl = op->acl;
+ op->acl = NULL;
+ ret = afs_put_operation(op);
if (ret == 0) {
ret = acl->size;
@@ -80,18 +85,37 @@ static int afs_xattr_get_acl(const struct xattr_handler *handler,
if (acl->size <= size)
memcpy(buffer, acl->data, acl->size);
else
- ret = -ERANGE;
+ op->error = -ERANGE;
}
- kfree(acl);
}
- key_put(key);
-error_scb:
- kfree(scb);
-error:
+ kfree(acl);
return ret;
}
+static bool afs_make_acl(struct afs_operation *op,
+ const void *buffer, size_t size)
+{
+ struct afs_acl *acl;
+
+ acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL);
+ if (!acl) {
+ afs_op_nomem(op);
+ return false;
+ }
+
+ acl->size = size;
+ memcpy(acl->data, buffer, size);
+ op->acl = acl;
+ return true;
+}
+
+static const struct afs_operation_ops afs_store_acl_operation = {
+ .issue_afs_rpc = afs_fs_store_acl,
+ .success = afs_acl_success,
+ .put = afs_acl_put,
+};
+
/*
* Set a file's AFS3 ACL.
*/
@@ -100,55 +124,22 @@ static int afs_xattr_set_acl(const struct xattr_handler *handler,
struct inode *inode, const char *name,
const void *buffer, size_t size, int flags)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(inode);
- struct afs_acl *acl = NULL;
- struct key *key;
- int ret = -ENOMEM;
if (flags == XATTR_CREATE)
return -EINVAL;
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
- if (!scb)
- goto error;
-
- acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL);
- if (!acl)
- goto error_scb;
-
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_acl;
- }
-
- acl->size = size;
- memcpy(acl->data, buffer, size);
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
+ op = afs_alloc_operation(NULL, vnode->volume);
+ if (IS_ERR(op))
+ return -ENOMEM;
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_store_acl(&fc, acl, scb);
- }
+ afs_op_set_vnode(op, 0, vnode);
+ if (!afs_make_acl(op, buffer, size))
+ return afs_put_operation(op);
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
-
- key_put(key);
-error_acl:
- kfree(acl);
-error_scb:
- kfree(scb);
-error:
- return ret;
+ op->ops = &afs_store_acl_operation;
+ return afs_do_sync_operation(op);
}
static const struct xattr_handler afs_xattr_afs_acl_handler = {
@@ -157,6 +148,17 @@ static const struct xattr_handler afs_xattr_afs_acl_handler = {
.set = afs_xattr_set_acl,
};
+static void yfs_acl_put(struct afs_operation *op)
+{
+ yfs_free_opaque_acl(op->yacl);
+}
+
+static const struct afs_operation_ops yfs_fetch_opaque_acl_operation = {
+ .issue_yfs_rpc = yfs_fs_fetch_opaque_acl,
+ .success = afs_acl_success,
+ /* Don't free op->yacl in .put here */
+};
+
/*
* Get a file's YFS ACL.
*/
@@ -165,11 +167,9 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
struct inode *inode, const char *name,
void *buffer, size_t size)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(inode);
struct yfs_acl *yacl = NULL;
- struct key *key;
char buf[16], *data;
int which = 0, dsize, ret = -ENOMEM;
@@ -193,75 +193,62 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
else if (which == 3)
yacl->flags |= YFS_ACL_WANT_VOL_ACL;
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
- if (!scb)
+ op = afs_alloc_operation(NULL, vnode->volume);
+ if (IS_ERR(op))
goto error_yacl;
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
+ afs_op_set_vnode(op, 0, vnode);
+ op->yacl = yacl;
+ op->ops = &yfs_fetch_opaque_acl_operation;
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
+ ret = afs_put_operation(op);
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- yfs_fs_fetch_opaque_acl(&fc, yacl, scb);
+ if (ret == 0) {
+ switch (which) {
+ case 0:
+ data = yacl->acl->data;
+ dsize = yacl->acl->size;
+ break;
+ case 1:
+ data = buf;
+ dsize = scnprintf(buf, sizeof(buf), "%u", yacl->inherit_flag);
+ break;
+ case 2:
+ data = buf;
+ dsize = scnprintf(buf, sizeof(buf), "%u", yacl->num_cleaned);
+ break;
+ case 3:
+ data = yacl->vol_acl->data;
+ dsize = yacl->vol_acl->size;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto error_yacl;
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
-
- if (ret < 0)
- goto error_key;
-
- switch (which) {
- case 0:
- data = yacl->acl->data;
- dsize = yacl->acl->size;
- break;
- case 1:
- data = buf;
- dsize = scnprintf(buf, sizeof(buf), "%u", yacl->inherit_flag);
- break;
- case 2:
- data = buf;
- dsize = scnprintf(buf, sizeof(buf), "%u", yacl->num_cleaned);
- break;
- case 3:
- data = yacl->vol_acl->data;
- dsize = yacl->vol_acl->size;
- break;
- default:
- ret = -EOPNOTSUPP;
- goto error_key;
- }
-
- ret = dsize;
- if (size > 0) {
- if (dsize > size) {
- ret = -ERANGE;
- goto error_key;
+ ret = dsize;
+ if (size > 0) {
+ if (dsize <= size)
+ memcpy(buffer, data, dsize);
+ else
+ ret = -ERANGE;
}
- memcpy(buffer, data, dsize);
}
-error_key:
- key_put(key);
-error_scb:
- kfree(scb);
error_yacl:
yfs_free_opaque_acl(yacl);
error:
return ret;
}
+static const struct afs_operation_ops yfs_store_opaque_acl2_operation = {
+ .issue_yfs_rpc = yfs_fs_store_opaque_acl2,
+ .success = afs_acl_success,
+ .put = yfs_acl_put,
+};
+
/*
* Set a file's YFS ACL.
*/
@@ -270,56 +257,23 @@ static int afs_xattr_set_yfs(const struct xattr_handler *handler,
struct inode *inode, const char *name,
const void *buffer, size_t size, int flags)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(inode);
- struct afs_acl *acl = NULL;
- struct key *key;
- int ret = -ENOMEM;
if (flags == XATTR_CREATE ||
strcmp(name, "acl") != 0)
return -EINVAL;
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
- if (!scb)
- goto error;
-
- acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL);
- if (!acl)
- goto error_scb;
+ op = afs_alloc_operation(NULL, vnode->volume);
+ if (IS_ERR(op))
+ return -ENOMEM;
- acl->size = size;
- memcpy(acl->data, buffer, size);
+ afs_op_set_vnode(op, 0, vnode);
+ if (!afs_make_acl(op, buffer, size))
+ return afs_put_operation(op);
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_acl;
- }
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- yfs_fs_store_opaque_acl2(&fc, acl, scb);
- }
-
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
-
-error_acl:
- kfree(acl);
- key_put(key);
-error_scb:
- kfree(scb);
-error:
- return ret;
+ op->ops = &yfs_store_opaque_acl2_operation;
+ return afs_do_sync_operation(op);
}
static const struct xattr_handler afs_xattr_yfs_handler = {
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index fe413e7a5cf4..52d5af5fcd44 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -15,13 +15,6 @@
#include "xdr_fs.h"
#include "protocol_yfs.h"
-static const struct afs_fid afs_zero_fid;
-
-static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi)
-{
- call->cbi = afs_get_cb_interest(cbi);
-}
-
#define xdr_size(x) (sizeof(*x) / sizeof(__be32))
static void xdr_decode_YFSFid(const __be32 **_bp, struct afs_fid *fid)
@@ -79,6 +72,11 @@ static __be32 *xdr_encode_string(__be32 *bp, const char *p, unsigned int len)
return bp + len / sizeof(__be32);
}
+static __be32 *xdr_encode_name(__be32 *bp, const struct qstr *p)
+{
+ return xdr_encode_string(bp, p->name, p->len);
+}
+
static s64 linux_to_yfs_time(const struct timespec64 *t)
{
/* Convert to 100ns intervals. */
@@ -179,21 +177,20 @@ static void xdr_dump_bad(const __be32 *bp)
/*
* Decode a YFSFetchStatus block
*/
-static int xdr_decode_YFSFetchStatus(const __be32 **_bp,
- struct afs_call *call,
- struct afs_status_cb *scb)
+static void xdr_decode_YFSFetchStatus(const __be32 **_bp,
+ struct afs_call *call,
+ struct afs_status_cb *scb)
{
const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp;
struct afs_file_status *status = &scb->status;
u32 type;
- int ret;
status->abort_code = ntohl(xdr->abort_code);
if (status->abort_code != 0) {
if (status->abort_code == VNOVNODE)
status->nlink = 0;
scb->have_error = true;
- goto good;
+ goto advance;
}
type = ntohl(xdr->type);
@@ -221,15 +218,13 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp,
status->size = xdr_to_u64(xdr->size);
status->data_version = xdr_to_u64(xdr->data_version);
scb->have_status = true;
-good:
- ret = 0;
advance:
*_bp += xdr_size(xdr);
- return ret;
+ return;
bad:
xdr_dump_bad(*_bp);
- ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
+ afs_protocol_error(call, afs_eproto_bad_status);
goto advance;
}
@@ -339,6 +334,7 @@ static void xdr_decode_YFSFetchVolumeStatus(const __be32 **_bp,
*/
static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
const __be32 *bp;
int ret;
@@ -348,11 +344,9 @@ static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSCallBack(&bp, call, call->out_scb);
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &op->file[0].scb);
+ xdr_decode_YFSCallBack(&bp, call, &op->file[0].scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -364,6 +358,7 @@ static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call)
*/
static int yfs_deliver_status_and_volsync(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
const __be32 *bp;
int ret;
@@ -372,10 +367,8 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &op->file[0].scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -394,44 +387,33 @@ static const struct afs_call_type yfs_RXYFSFetchStatus_vnode = {
/*
* Fetch the status information for a file.
*/
-int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
- struct afs_volsync *volsync)
+void yfs_fs_fetch_file_status(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus_vnode,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchStatus_vnode,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = volsync;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSFETCHSTATUS);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -439,7 +421,9 @@ int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb
*/
static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
{
- struct afs_read *req = call->read_request;
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
+ struct afs_read *req = op->fetch.req;
const __be32 *bp;
unsigned int size;
int ret;
@@ -534,14 +518,12 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSCallBack(&bp, call, call->out_scb);
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_YFSCallBack(&bp, call, &vp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
- req->data_version = call->out_scb->status.data_version;
- req->file_size = call->out_scb->status.size;
+ req->data_version = vp->scb.status.data_version;
+ req->file_size = vp->scb.status.size;
call->unmarshall++;
/* Fall through */
@@ -565,12 +547,6 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
return 0;
}
-static void yfs_fetch_data_destructor(struct afs_call *call)
-{
- afs_put_read(call->read_request);
- afs_flat_call_destructor(call);
-}
-
/*
* YFS.FetchData64 operation type
*/
@@ -578,25 +554,24 @@ static const struct afs_call_type yfs_RXYFSFetchData64 = {
.name = "YFS.FetchData64",
.op = yfs_FS_FetchData64,
.deliver = yfs_deliver_fs_fetch_data64,
- .destructor = yfs_fetch_data_destructor,
+ .destructor = afs_flat_call_destructor,
};
/*
* Fetch data from a file.
*/
-int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
- struct afs_read *req)
+void yfs_fs_fetch_data(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
+ struct afs_read *req = op->fetch.req;
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%llx:%llu},%llx,%llx",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode,
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode,
req->pos, req->len);
- call = afs_alloc_flat_call(net, &yfs_RXYFSFetchData64,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchData64,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(struct yfs_xdr_u64) * 2,
@@ -604,27 +579,19 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = NULL;
- call->read_request = afs_get_read(req);
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSFETCHDATA64);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
bp = xdr_encode_u64(bp, req->pos);
bp = xdr_encode_u64(bp, req->len);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -632,6 +599,9 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
*/
static int yfs_deliver_fs_create_vnode(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -643,15 +613,11 @@ static int yfs_deliver_fs_create_vnode(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_YFSFid(&bp, call->out_fid);
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSCallBack(&bp, call, call->out_scb);
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFid(&bp, &op->file[1].fid);
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_YFSCallBack(&bp, call, &vp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -670,26 +636,20 @@ static const struct afs_call_type afs_RXFSCreateFile = {
/*
* Create a file.
*/
-int yfs_fs_create_file(struct afs_fs_cursor *fc,
- const char *name,
- umode_t mode,
- struct afs_status_cb *dvnode_scb,
- struct afs_fid *newfid,
- struct afs_status_cb *new_scb)
+void yfs_fs_create_file(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
- size_t namesz, reqsz, rplsz;
+ size_t reqsz, rplsz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
reqsz = (sizeof(__be32) +
sizeof(__be32) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz) +
+ xdr_strlen(name->len) +
sizeof(struct yfs_xdr_YFSStoreStatus) +
sizeof(__be32));
rplsz = (sizeof(struct yfs_xdr_YFSFid) +
@@ -698,30 +658,22 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc,
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
- call = afs_alloc_flat_call(net, &afs_RXFSCreateFile, reqsz, rplsz);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSCreateFile, reqsz, rplsz);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_fid = newfid;
- call->out_scb = new_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSCREATEFILE);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
- bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
+ bp = xdr_encode_YFSStoreStatus_mode(bp, op->create.mode);
bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
static const struct afs_call_type yfs_RXFSMakeDir = {
@@ -734,26 +686,20 @@ static const struct afs_call_type yfs_RXFSMakeDir = {
/*
* Make a directory.
*/
-int yfs_fs_make_dir(struct afs_fs_cursor *fc,
- const char *name,
- umode_t mode,
- struct afs_status_cb *dvnode_scb,
- struct afs_fid *newfid,
- struct afs_status_cb *new_scb)
+void yfs_fs_make_dir(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
- size_t namesz, reqsz, rplsz;
+ size_t reqsz, rplsz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
reqsz = (sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz) +
+ xdr_strlen(name->len) +
sizeof(struct yfs_xdr_YFSStoreStatus));
rplsz = (sizeof(struct yfs_xdr_YFSFid) +
sizeof(struct yfs_xdr_YFSFetchStatus) +
@@ -761,29 +707,21 @@ int yfs_fs_make_dir(struct afs_fs_cursor *fc,
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
- call = afs_alloc_flat_call(net, &yfs_RXFSMakeDir, reqsz, rplsz);
+ call = afs_alloc_flat_call(op->net, &yfs_RXFSMakeDir, reqsz, rplsz);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_fid = newfid;
- call->out_scb = new_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSMAKEDIR);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
- bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
+ bp = xdr_encode_YFSStoreStatus_mode(bp, op->create.mode);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -791,6 +729,9 @@ int yfs_fs_make_dir(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_remove_file2(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_fid fid;
const __be32 *bp;
int ret;
@@ -802,20 +743,24 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
-
+ xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb);
xdr_decode_YFSFid(&bp, &fid);
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
/* Was deleted if vnode->status.abort_code == VNOVNODE. */
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
return 0;
}
+static void yfs_done_fs_remove_file2(struct afs_call *call)
+{
+ if (call->error == -ECONNABORTED &&
+ call->abort_code == RX_INVALID_OPERATION) {
+ set_bit(AFS_SERVER_FL_NO_RM2, &call->server->flags);
+ call->op->flags |= AFS_OPERATION_DOWNGRADE;
+ }
+}
+
/*
* YFS.RemoveFile2 operation type.
*/
@@ -823,55 +768,44 @@ static const struct afs_call_type yfs_RXYFSRemoveFile2 = {
.name = "YFS.RemoveFile2",
.op = yfs_FS_RemoveFile2,
.deliver = yfs_deliver_fs_remove_file2,
+ .done = yfs_done_fs_remove_file2,
.destructor = afs_flat_call_destructor,
};
/*
* Remove a file and retrieve new file status.
*/
-int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name, struct afs_status_cb *dvnode_scb,
- struct afs_status_cb *vnode_scb)
+void yfs_fs_remove_file2(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ struct afs_vnode_param *dvp = &op->file[0];
+ const struct qstr *name = &op->dentry->d_name;
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
- size_t namesz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
-
- call = afs_alloc_flat_call(net, &yfs_RXYFSRemoveFile2,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveFile2,
sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz),
+ xdr_strlen(name->len),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_scb = vnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSREMOVEFILE2);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -879,6 +813,8 @@ int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int yfs_deliver_fs_remove(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
const __be32 *bp;
int ret;
@@ -889,11 +825,8 @@ static int yfs_deliver_fs_remove(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
-
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
return 0;
}
@@ -907,6 +840,43 @@ static const struct afs_call_type yfs_RXYFSRemoveFile = {
.destructor = afs_flat_call_destructor,
};
+/*
+ * Remove a file.
+ */
+void yfs_fs_remove_file(struct afs_operation *op)
+{
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_call *call;
+ __be32 *bp;
+
+ _enter("");
+
+ if (!test_bit(AFS_SERVER_FL_NO_RM2, &op->server->flags))
+ return yfs_fs_remove_file2(op);
+
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveFile,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(name->len),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSREMOVEFILE);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
+ yfs_check_req(call, bp);
+
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
static const struct afs_call_type yfs_RXYFSRemoveDir = {
.name = "YFS.RemoveDir",
.op = yfs_FS_RemoveDir,
@@ -915,48 +885,37 @@ static const struct afs_call_type yfs_RXYFSRemoveDir = {
};
/*
- * remove a file or directory
+ * Remove a directory.
*/
-int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name, bool isdir,
- struct afs_status_cb *dvnode_scb)
+void yfs_fs_remove_dir(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
- size_t namesz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
- call = afs_alloc_flat_call(
- net, isdir ? &yfs_RXYFSRemoveDir : &yfs_RXYFSRemoveFile,
- sizeof(__be32) +
- sizeof(struct yfs_xdr_RPCFlags) +
- sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz),
- sizeof(struct yfs_xdr_YFSFetchStatus) +
- sizeof(struct yfs_xdr_YFSVolSync));
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveDir,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(name->len),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
- bp = xdr_encode_u32(bp, isdir ? YFSREMOVEDIR : YFSREMOVEFILE);
+ bp = xdr_encode_u32(bp, YFSREMOVEDIR);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -964,6 +923,9 @@ int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int yfs_deliver_fs_link(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -974,13 +936,9 @@ static int yfs_deliver_fs_link(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
}
@@ -998,50 +956,39 @@ static const struct afs_call_type yfs_RXYFSLink = {
/*
* Make a hard link.
*/
-int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name,
- struct afs_status_cb *dvnode_scb,
- struct afs_status_cb *vnode_scb)
+void yfs_fs_link(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
- size_t namesz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
- call = afs_alloc_flat_call(net, &yfs_RXYFSLink,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSLink,
sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz) +
+ xdr_strlen(name->len) +
sizeof(struct yfs_xdr_YFSFid),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_scb = vnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSLINK);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &vnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &vp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1049,6 +996,9 @@ int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int yfs_deliver_fs_symlink(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -1060,14 +1010,10 @@ static int yfs_deliver_fs_symlink(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_YFSFid(&bp, call->out_fid);
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFid(&bp, &vp->fid);
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -1086,28 +1032,22 @@ static const struct afs_call_type yfs_RXYFSSymlink = {
/*
* Create a symbolic link.
*/
-int yfs_fs_symlink(struct afs_fs_cursor *fc,
- const char *name,
- const char *contents,
- struct afs_status_cb *dvnode_scb,
- struct afs_fid *newfid,
- struct afs_status_cb *vnode_scb)
+void yfs_fs_symlink(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
- size_t namesz, contents_sz;
+ size_t contents_sz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
- contents_sz = strlen(contents);
- call = afs_alloc_flat_call(net, &yfs_RXYFSSymlink,
+ contents_sz = strlen(op->create.symlink);
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSSymlink,
sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz) +
+ xdr_strlen(name->len) +
xdr_strlen(contents_sz) +
sizeof(struct yfs_xdr_YFSStoreStatus),
sizeof(struct yfs_xdr_YFSFid) +
@@ -1115,28 +1055,20 @@ int yfs_fs_symlink(struct afs_fs_cursor *fc,
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_fid = newfid;
- call->out_scb = vnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSYMLINK);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
- bp = xdr_encode_string(bp, contents, contents_sz);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
+ bp = xdr_encode_string(bp, op->create.symlink, contents_sz);
bp = xdr_encode_YFSStoreStatus_mode(bp, S_IRWXUGO);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1144,6 +1076,9 @@ int yfs_fs_symlink(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_rename(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
const __be32 *bp;
int ret;
@@ -1154,14 +1089,12 @@ static int yfs_deliver_fs_rename(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
-
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ /* If the two dirs are the same, we have two copies of the same status
+ * report, so we just decode it twice.
+ */
+ xdr_decode_YFSFetchStatus(&bp, call, &orig_dvp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &new_dvp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
}
@@ -1179,55 +1112,42 @@ static const struct afs_call_type yfs_RXYFSRename = {
/*
* Rename a file or directory.
*/
-int yfs_fs_rename(struct afs_fs_cursor *fc,
- const char *orig_name,
- struct afs_vnode *new_dvnode,
- const char *new_name,
- struct afs_status_cb *orig_dvnode_scb,
- struct afs_status_cb *new_dvnode_scb)
+void yfs_fs_rename(struct afs_operation *op)
{
- struct afs_vnode *orig_dvnode = fc->vnode;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ const struct qstr *orig_name = &op->dentry->d_name;
+ const struct qstr *new_name = &op->dentry_2->d_name;
struct afs_call *call;
- struct afs_net *net = afs_v2net(orig_dvnode);
- size_t o_namesz, n_namesz;
__be32 *bp;
_enter("");
- o_namesz = strlen(orig_name);
- n_namesz = strlen(new_name);
- call = afs_alloc_flat_call(net, &yfs_RXYFSRename,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename,
sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(o_namesz) +
+ xdr_strlen(orig_name->len) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(n_namesz),
+ xdr_strlen(new_name->len),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = orig_dvnode_scb;
- call->out_scb = new_dvnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSRENAME);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &orig_dvnode->fid);
- bp = xdr_encode_string(bp, orig_name, o_namesz);
- bp = xdr_encode_YFSFid(bp, &new_dvnode->fid);
- bp = xdr_encode_string(bp, new_name, n_namesz);
+ bp = xdr_encode_YFSFid(bp, &orig_dvp->fid);
+ bp = xdr_encode_name(bp, orig_name);
+ bp = xdr_encode_YFSFid(bp, &new_dvp->fid);
+ bp = xdr_encode_name(bp, new_name);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call2(call, &orig_dvnode->fid, orig_name, new_name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1243,27 +1163,23 @@ static const struct afs_call_type yfs_RXYFSStoreData64 = {
/*
* Store a set of pages to a large file.
*/
-int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
- pgoff_t first, pgoff_t last,
- unsigned offset, unsigned to,
- struct afs_status_cb *scb)
+void yfs_fs_store_data(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
loff_t size, pos, i_size;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- size = (loff_t)to - (loff_t)offset;
- if (first != last)
- size += (loff_t)(last - first) << PAGE_SHIFT;
- pos = (loff_t)first << PAGE_SHIFT;
- pos += offset;
+ size = (loff_t)op->store.last_to - (loff_t)op->store.first_offset;
+ if (op->store.first != op->store.last)
+ size += (loff_t)(op->store.last - op->store.first) << PAGE_SHIFT;
+ pos = (loff_t)op->store.first << PAGE_SHIFT;
+ pos += op->store.first_offset;
- i_size = i_size_read(&vnode->vfs_inode);
+ i_size = i_size_read(&vp->vnode->vfs_inode);
if (pos + size > i_size)
i_size = size + pos;
@@ -1271,7 +1187,7 @@ int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
(unsigned long long)size, (unsigned long long)pos,
(unsigned long long)i_size);
- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreData64,
sizeof(__be32) +
sizeof(__be32) +
sizeof(struct yfs_xdr_YFSFid) +
@@ -1280,33 +1196,24 @@ int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->mapping = mapping;
- call->first = first;
- call->last = last;
- call->first_offset = offset;
- call->last_to = to;
+ return afs_op_nomem(op);
+
+ call->key = op->key;
call->send_pages = true;
- call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSTOREDATA64);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
- bp = xdr_encode_YFSStoreStatus_mtime(bp, &vnode->vfs_inode.i_mtime);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
+ bp = xdr_encode_YFSStoreStatus_mtime(bp, &op->mtime);
bp = xdr_encode_u64(bp, pos);
bp = xdr_encode_u64(bp, size);
bp = xdr_encode_u64(bp, i_size);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1330,18 +1237,17 @@ static const struct afs_call_type yfs_RXYFSStoreData64_as_Status = {
* Set the attributes on a file, using YFS.StoreData64 rather than
* YFS.StoreStatus so as to alter the file size also.
*/
-static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr,
- struct afs_status_cb *scb)
+static void yfs_fs_setattr_size(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct iattr *attr = op->setattr.attr;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64_as_Status,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreData64_as_Status,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(struct yfs_xdr_YFSStoreStatus) +
@@ -1349,72 +1255,59 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr,
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSTOREDATA64);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
bp = xdr_encode_YFS_StoreStatus(bp, attr);
bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */
bp = xdr_encode_u64(bp, 0); /* size of write */
bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* Set the attributes on a file, using YFS.StoreData64 if there's a change in
* file size, and YFS.StoreStatus otherwise.
*/
-int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr,
- struct afs_status_cb *scb)
+void yfs_fs_setattr(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct iattr *attr = op->setattr.attr;
__be32 *bp;
if (attr->ia_valid & ATTR_SIZE)
- return yfs_fs_setattr_size(fc, attr, scb);
+ return yfs_fs_setattr_size(op);
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreStatus,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(struct yfs_xdr_YFSStoreStatus),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSTORESTATUS);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
bp = xdr_encode_YFS_StoreStatus(bp, attr);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1422,6 +1315,7 @@ int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr,
*/
static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
const __be32 *bp;
char *p;
u32 size;
@@ -1443,7 +1337,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_YFSFetchVolumeStatus(&bp, call->out_volstatus);
+ xdr_decode_YFSFetchVolumeStatus(&bp, &op->volstatus.vs);
call->unmarshall++;
afs_extract_to_tmp(call);
/* Fall through */
@@ -1457,8 +1351,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("volname length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_volname_len);
+ return afs_protocol_error(call, afs_eproto_volname_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1487,8 +1380,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("offline msg length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_offline_msg_len);
+ return afs_protocol_error(call, afs_eproto_offline_msg_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1518,8 +1410,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("motd length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_motd_len);
+ return afs_protocol_error(call, afs_eproto_motd_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1560,17 +1451,15 @@ static const struct afs_call_type yfs_RXYFSGetVolumeStatus = {
/*
* fetch the status of a volume
*/
-int yfs_fs_get_volume_status(struct afs_fs_cursor *fc,
- struct afs_volume_status *vs)
+void yfs_fs_get_volume_status(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(net, &yfs_RXYFSGetVolumeStatus,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSGetVolumeStatus,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_u64),
max_t(size_t,
@@ -1578,23 +1467,17 @@ int yfs_fs_get_volume_status(struct afs_fs_cursor *fc,
sizeof(__be32),
AFSOPAQUEMAX + 1));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_volstatus = vs;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSGETVOLUMESTATUS);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_u64(bp, vnode->fid.vid);
+ bp = xdr_encode_u64(bp, vp->fid.vid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1632,118 +1515,93 @@ static const struct afs_call_type yfs_RXYFSReleaseLock = {
/*
* Set a lock on a file
*/
-int yfs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type,
- struct afs_status_cb *scb)
+void yfs_fs_set_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(net, &yfs_RXYFSSetLock,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSSetLock,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(__be32),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSETLOCK);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
- bp = xdr_encode_u32(bp, type);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
+ bp = xdr_encode_u32(bp, op->lock.type);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_calli(call, &vnode->fid, type);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_calli(call, &vp->fid, op->lock.type);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* extend a lock on a file
*/
-int yfs_fs_extend_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
+void yfs_fs_extend_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(net, &yfs_RXYFSExtendLock,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSExtendLock,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSEXTENDLOCK);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* release a lock on a file
*/
-int yfs_fs_release_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
+void yfs_fs_release_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(net, &yfs_RXYFSReleaseLock,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSReleaseLock,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSRELEASELOCK);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1759,45 +1617,33 @@ static const struct afs_call_type yfs_RXYFSFetchStatus = {
/*
* Fetch the status information for a fid without needing a vnode handle.
*/
-int yfs_fs_fetch_status(struct afs_fs_cursor *fc,
- struct afs_net *net,
- struct afs_fid *fid,
- struct afs_status_cb *scb,
- struct afs_volsync *volsync)
+void yfs_fs_fetch_status(struct afs_operation *op)
{
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), fid->vid, fid->vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchStatus,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = volsync;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSFETCHSTATUS);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1805,6 +1651,7 @@ int yfs_fs_fetch_status(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
struct afs_status_cb *scb;
const __be32 *bp;
u32 tmp;
@@ -1826,10 +1673,9 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
tmp = ntohl(call->tmp);
- _debug("status count: %u/%u", tmp, call->count2);
- if (tmp != call->count2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_ibulkst_count);
+ _debug("status count: %u/%u", tmp, op->nr_files);
+ if (tmp != op->nr_files)
+ return afs_protocol_error(call, afs_eproto_ibulkst_count);
call->count = 0;
call->unmarshall++;
@@ -1843,14 +1689,23 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
if (ret < 0)
return ret;
+ switch (call->count) {
+ case 0:
+ scb = &op->file[0].scb;
+ break;
+ case 1:
+ scb = &op->file[1].scb;
+ break;
+ default:
+ scb = &op->more_files[call->count - 2].scb;
+ break;
+ }
+
bp = call->buffer;
- scb = &call->out_scb[call->count];
- ret = xdr_decode_YFSFetchStatus(&bp, call, scb);
- if (ret < 0)
- return ret;
+ xdr_decode_YFSFetchStatus(&bp, call, scb);
call->count++;
- if (call->count < call->count2)
+ if (call->count < op->nr_files)
goto more_counts;
call->count = 0;
@@ -1867,9 +1722,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
tmp = ntohl(call->tmp);
_debug("CB count: %u", tmp);
- if (tmp != call->count2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_ibulkst_cb_count);
+ if (tmp != op->nr_files)
+ return afs_protocol_error(call, afs_eproto_ibulkst_cb_count);
call->count = 0;
call->unmarshall++;
more_cbs:
@@ -1883,11 +1737,22 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
_debug("unmarshall CB array");
+ switch (call->count) {
+ case 0:
+ scb = &op->file[0].scb;
+ break;
+ case 1:
+ scb = &op->file[1].scb;
+ break;
+ default:
+ scb = &op->more_files[call->count - 2].scb;
+ break;
+ }
+
bp = call->buffer;
- scb = &call->out_scb[call->count];
xdr_decode_YFSCallBack(&bp, call, scb);
call->count++;
- if (call->count < call->count2)
+ if (call->count < op->nr_files)
goto more_cbs;
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync));
@@ -1900,7 +1765,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
call->unmarshall++;
/* Fall through */
@@ -1926,50 +1791,39 @@ static const struct afs_call_type yfs_RXYFSInlineBulkStatus = {
/*
* Fetch the status information for up to 1024 files
*/
-int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
- struct afs_net *net,
- struct afs_fid *fids,
- struct afs_status_cb *statuses,
- unsigned int nr_fids,
- struct afs_volsync *volsync)
+void yfs_fs_inline_bulk_status(struct afs_operation *op)
{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_call *call;
__be32 *bp;
int i;
_enter(",%x,{%llx:%llu},%u",
- key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode, op->nr_files);
- call = afs_alloc_flat_call(net, &yfs_RXYFSInlineBulkStatus,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSInlineBulkStatus,
sizeof(__be32) +
sizeof(__be32) +
sizeof(__be32) +
- sizeof(struct yfs_xdr_YFSFid) * nr_fids,
+ sizeof(struct yfs_xdr_YFSFid) * op->nr_files,
sizeof(struct yfs_xdr_YFSFetchStatus));
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = statuses;
- call->out_volsync = volsync;
- call->count2 = nr_fids;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSINLINEBULKSTATUS);
bp = xdr_encode_u32(bp, 0); /* RPCFlags */
- bp = xdr_encode_u32(bp, nr_fids);
- for (i = 0; i < nr_fids; i++)
- bp = xdr_encode_YFSFid(bp, &fids[i]);
+ bp = xdr_encode_u32(bp, op->nr_files);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
+ for (i = 0; i < op->nr_files - 2; i++)
+ bp = xdr_encode_YFSFid(bp, &op->more_files[i].fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &fids[0]);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1977,7 +1831,9 @@ int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
{
- struct yfs_acl *yacl = call->out_yacl;
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
+ struct yfs_acl *yacl = op->yacl;
struct afs_acl *acl;
const __be32 *bp;
unsigned int size;
@@ -2067,10 +1923,8 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
bp = call->buffer;
yacl->inherit_flag = ntohl(*bp++);
yacl->num_cleaned = ntohl(*bp++);
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
call->unmarshall++;
/* Fall through */
@@ -2105,45 +1959,33 @@ static const struct afs_call_type yfs_RXYFSFetchOpaqueACL = {
/*
* Fetch the YFS advanced ACLs for a file.
*/
-struct yfs_acl *yfs_fs_fetch_opaque_acl(struct afs_fs_cursor *fc,
- struct yfs_acl *yacl,
- struct afs_status_cb *scb)
+void yfs_fs_fetch_opaque_acl(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &yfs_RXYFSFetchOpaqueACL,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchOpaqueACL,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid),
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
- if (!call) {
- fc->ac.error = -ENOMEM;
- return ERR_PTR(-ENOMEM);
- }
-
- call->key = fc->key;
- call->out_yacl = yacl;
- call->out_scb = scb;
- call->out_volsync = NULL;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSFETCHOPAQUEACL);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_make_call(&fc->ac, call, GFP_KERNEL);
- return (struct yfs_acl *)afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_KERNEL);
}
/*
@@ -2159,46 +2001,38 @@ static const struct afs_call_type yfs_RXYFSStoreOpaqueACL2 = {
/*
* Fetch the YFS ACL for a file.
*/
-int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl,
- struct afs_status_cb *scb)
+void yfs_fs_store_opaque_acl2(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct afs_acl *acl = op->acl;
size_t size;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
size = round_up(acl->size, 4);
- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreOpaqueACL2,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(__be32) + size,
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = NULL;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSTOREOPAQUEACL2);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
bp = xdr_encode_u32(bp, acl->size);
memcpy(bp, acl->data, acl->size);
if (acl->size != size)
memset((void *)bp + acl->size, 0, size - acl->size);
yfs_check_req(call, bp);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_make_call(&fc->ac, call, GFP_KERNEL);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_KERNEL);
}
diff --git a/fs/aio.c b/fs/aio.c
index 6483f9274d5e..7ecddc2f38db 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -27,7 +27,6 @@
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/mmu_context.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/timer.h>
@@ -520,7 +519,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->mmap_size = nr_pages * PAGE_SIZE;
pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mmap_write_lock_killable(mm)) {
ctx->mmap_size = 0;
aio_free_ring(ctx);
return -EINTR;
@@ -529,7 +528,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
PROT_READ | PROT_WRITE,
MAP_SHARED, 0, &unused, NULL);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (IS_ERR((void *)ctx->mmap_base)) {
ctx->mmap_size = 0;
aio_free_ring(ctx);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 8035d2a44561..54f0ce444272 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -15,6 +15,7 @@
#include <linux/time.h>
#include <linux/namei.h>
#include <linux/poll.h>
+#include <linux/fiemap.h>
static int bad_file_open(struct inode *inode, struct file *filp)
{
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 8e8346a81723..3e84e9bb9084 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -151,7 +151,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
return -ENOMEM;
/* Flush all traces of the currently running executable */
- retval = flush_old_exec(bprm);
+ retval = begin_new_exec(bprm);
if (retval)
return retval;
@@ -174,7 +174,6 @@ static int load_aout_binary(struct linux_binprm * bprm)
if (retval < 0)
return retval;
- install_exec_creds(bprm);
if (N_MAGIC(ex) == OMAGIC) {
unsigned long text_addr, map_size;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 8945671fe0e5..9fe3b51c116a 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -208,7 +208,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
size_t len = strlen(k_platform) + 1;
u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
- if (__copy_to_user(u_platform, k_platform, len))
+ if (copy_to_user(u_platform, k_platform, len))
return -EFAULT;
}
@@ -221,7 +221,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
size_t len = strlen(k_base_platform) + 1;
u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
- if (__copy_to_user(u_base_platform, k_base_platform, len))
+ if (copy_to_user(u_base_platform, k_base_platform, len))
return -EFAULT;
}
@@ -231,7 +231,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
u_rand_bytes = (elf_addr_t __user *)
STACK_ALLOC(p, sizeof(k_rand_bytes));
- if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+ if (copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
return -EFAULT;
/* Create the ELF interpreter info */
@@ -279,8 +279,8 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
NEW_AUX_ENT(AT_BASE_PLATFORM,
(elf_addr_t)(unsigned long)u_base_platform);
}
- if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
- NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
+ if (bprm->have_execfd) {
+ NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
}
#undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
@@ -314,21 +314,21 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
return -EFAULT;
/* Now, let's put argc (and argv, envp if appropriate) on the stack */
- if (__put_user(argc, sp++))
+ if (put_user(argc, sp++))
return -EFAULT;
/* Populate list of argv pointers back to argv strings. */
p = mm->arg_end = mm->arg_start;
while (argc-- > 0) {
size_t len;
- if (__put_user((elf_addr_t)p, sp++))
+ if (put_user((elf_addr_t)p, sp++))
return -EFAULT;
len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- if (__put_user(0, sp++))
+ if (put_user(0, sp++))
return -EFAULT;
mm->arg_end = p;
@@ -336,14 +336,14 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
mm->env_end = mm->env_start = p;
while (envc-- > 0) {
size_t len;
- if (__put_user((elf_addr_t)p, sp++))
+ if (put_user((elf_addr_t)p, sp++))
return -EFAULT;
len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- if (__put_user(0, sp++))
+ if (put_user(0, sp++))
return -EFAULT;
mm->env_end = p;
@@ -353,8 +353,6 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
return 0;
}
-#ifndef elf_map
-
static unsigned long elf_map(struct file *filep, unsigned long addr,
const struct elf_phdr *eppnt, int prot, int type,
unsigned long total_size)
@@ -394,8 +392,6 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
return(map_addr);
}
-#endif /* !elf_map */
-
static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
{
int i, first_idx = -1, last_idx = -1;
@@ -975,7 +971,7 @@ out_free_interp:
goto out_free_dentry;
/* Flush all traces of the currently running executable */
- retval = flush_old_exec(bprm);
+ retval = begin_new_exec(bprm);
if (retval)
goto out_free_dentry;
@@ -989,7 +985,6 @@ out_free_interp:
current->flags |= PF_RANDOMIZE;
setup_new_exec(bprm);
- install_exec_creds(bprm);
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index d9501a86cec9..0f45521b237c 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -338,7 +338,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
interp_params.flags |= ELF_FDPIC_FLAG_CONSTDISP;
/* flush all traces of the currently running executable */
- retval = flush_old_exec(bprm);
+ retval = begin_new_exec(bprm);
if (retval)
goto error;
@@ -434,7 +434,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
current->mm->start_stack = current->mm->start_brk + stack_size;
#endif
- install_exec_creds(bprm);
if (create_elf_fdpic_tables(bprm, current->mm,
&exec_params, &interp_params) < 0)
goto error;
@@ -537,7 +536,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
platform_len = strlen(k_platform) + 1;
sp -= platform_len;
u_platform = (char __user *) sp;
- if (__copy_to_user(u_platform, k_platform, platform_len) != 0)
+ if (copy_to_user(u_platform, k_platform, platform_len) != 0)
return -EFAULT;
}
@@ -552,7 +551,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
platform_len = strlen(k_base_platform) + 1;
sp -= platform_len;
u_base_platform = (char __user *) sp;
- if (__copy_to_user(u_base_platform, k_base_platform, platform_len) != 0)
+ if (copy_to_user(u_base_platform, k_base_platform, platform_len) != 0)
return -EFAULT;
}
@@ -589,7 +588,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
nitems = 1 + DLINFO_ITEMS + (k_platform ? 1 : 0) +
(k_base_platform ? 1 : 0) + AT_VECTOR_SIZE_ARCH;
- if (bprm->interp_flags & BINPRM_FLAGS_EXECFD)
+ if (bprm->have_execfd)
nitems++;
csp = sp;
@@ -604,11 +603,13 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
/* put the ELF interpreter info on the stack */
#define NEW_AUX_ENT(id, val) \
do { \
- struct { unsigned long _id, _val; } __user *ent; \
+ struct { unsigned long _id, _val; } __user *ent, v; \
\
ent = (void __user *) csp; \
- __put_user((id), &ent[nr]._id); \
- __put_user((val), &ent[nr]._val); \
+ v._id = (id); \
+ v._val = (val); \
+ if (copy_to_user(ent + nr, &v, sizeof(v))) \
+ return -EFAULT; \
nr++; \
} while (0)
@@ -629,10 +630,10 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
(elf_addr_t) (unsigned long) u_base_platform);
}
- if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
+ if (bprm->have_execfd) {
nr = 0;
csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
+ NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
}
nr = 0;
@@ -675,7 +676,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
/* stack argc */
csp -= sizeof(unsigned long);
- __put_user(bprm->argc, (unsigned long __user *) csp);
+ if (put_user(bprm->argc, (unsigned long __user *) csp))
+ return -EFAULT;
BUG_ON(csp != sp);
@@ -689,25 +691,29 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
p = (char __user *) current->mm->arg_start;
for (loop = bprm->argc; loop > 0; loop--) {
- __put_user((elf_caddr_t) p, argv++);
+ if (put_user((elf_caddr_t) p, argv++))
+ return -EFAULT;
len = strnlen_user(p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- __put_user(NULL, argv);
+ if (put_user(NULL, argv))
+ return -EFAULT;
current->mm->arg_end = (unsigned long) p;
/* fill in the envv[] array */
current->mm->env_start = (unsigned long) p;
for (loop = bprm->envc; loop > 0; loop--) {
- __put_user((elf_caddr_t)(unsigned long) p, envp++);
+ if (put_user((elf_caddr_t)(unsigned long) p, envp++))
+ return -EFAULT;
len = strnlen_user(p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- __put_user(NULL, envp);
+ if (put_user(NULL, envp))
+ return -EFAULT;
current->mm->env_end = (unsigned long) p;
mm->start_stack = (unsigned long) sp;
@@ -849,8 +855,8 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
tmp = phdr->p_memsz / sizeof(Elf32_Dyn);
dyn = (Elf32_Dyn __user *)params->dynamic_addr;
- __get_user(d_tag, &dyn[tmp - 1].d_tag);
- if (d_tag != 0)
+ if (get_user(d_tag, &dyn[tmp - 1].d_tag) ||
+ d_tag != 0)
goto dynamic_error;
break;
}
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
index 466497860c62..06b9b9fddf70 100644
--- a/fs/binfmt_em86.c
+++ b/fs/binfmt_em86.c
@@ -48,10 +48,6 @@ static int load_em86(struct linux_binprm *bprm)
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
return -ENOENT;
- allow_write_access(bprm->file);
- fput(bprm->file);
- bprm->file = NULL;
-
/* Unlike in the script case, we don't have to do any hairy
* parsing to find our interpreter... it's hardcoded!
*/
@@ -68,15 +64,15 @@ static int load_em86(struct linux_binprm *bprm)
* user environment and arguments are stored.
*/
remove_arg_zero(bprm);
- retval = copy_strings_kernel(1, &bprm->filename, bprm);
+ retval = copy_string_kernel(bprm->filename, bprm);
if (retval < 0) return retval;
bprm->argc++;
if (i_arg) {
- retval = copy_strings_kernel(1, &i_arg, bprm);
+ retval = copy_string_kernel(i_arg, bprm);
if (retval < 0) return retval;
bprm->argc++;
}
- retval = copy_strings_kernel(1, &i_name, bprm);
+ retval = copy_string_kernel(i_name, bprm);
if (retval < 0) return retval;
bprm->argc++;
@@ -89,13 +85,8 @@ static int load_em86(struct linux_binprm *bprm)
if (IS_ERR(file))
return PTR_ERR(file);
- bprm->file = file;
-
- retval = prepare_binprm(bprm);
- if (retval < 0)
- return retval;
-
- return search_binary_handler(bprm);
+ bprm->interpreter = file;
+ return 0;
}
static struct linux_binfmt em86_format = {
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 831a2b25ba79..f2f9086ebe98 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -138,35 +138,40 @@ static int create_flat_tables(struct linux_binprm *bprm, unsigned long arg_start
current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN;
sp = (unsigned long __user *)current->mm->start_stack;
- __put_user(bprm->argc, sp++);
+ if (put_user(bprm->argc, sp++))
+ return -EFAULT;
if (IS_ENABLED(CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK)) {
unsigned long argv, envp;
argv = (unsigned long)(sp + 2);
envp = (unsigned long)(sp + 2 + bprm->argc + 1);
- __put_user(argv, sp++);
- __put_user(envp, sp++);
+ if (put_user(argv, sp++) || put_user(envp, sp++))
+ return -EFAULT;
}
current->mm->arg_start = (unsigned long)p;
for (i = bprm->argc; i > 0; i--) {
- __put_user((unsigned long)p, sp++);
+ if (put_user((unsigned long)p, sp++))
+ return -EFAULT;
len = strnlen_user(p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- __put_user(0, sp++);
+ if (put_user(0, sp++))
+ return -EFAULT;
current->mm->arg_end = (unsigned long)p;
current->mm->env_start = (unsigned long) p;
for (i = bprm->envc; i > 0; i--) {
- __put_user((unsigned long)p, sp++);
+ if (put_user((unsigned long)p, sp++))
+ return -EFAULT;
len = strnlen_user(p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- __put_user(0, sp++);
+ if (put_user(0, sp++))
+ return -EFAULT;
current->mm->env_end = (unsigned long)p;
return 0;
@@ -534,7 +539,7 @@ static int load_flat_file(struct linux_binprm *bprm,
/* Flush all traces of the currently running executable */
if (id == 0) {
- ret = flush_old_exec(bprm);
+ ret = begin_new_exec(bprm);
if (ret)
goto err;
@@ -854,7 +859,7 @@ static int load_flat_file(struct linux_binprm *bprm,
#endif /* CONFIG_BINFMT_FLAT_OLD */
}
- flush_icache_range(start_code, end_code);
+ flush_icache_user_range(start_code, end_code);
/* zero the BSS, BRK and stack areas */
if (clear_user((void __user *)(datapos + data_len), bss_len +
@@ -963,8 +968,6 @@ static int load_flat_binary(struct linux_binprm *bprm)
}
}
- install_exec_creds(bprm);
-
set_binfmt(&flat_format);
#ifdef CONFIG_MMU
@@ -998,7 +1001,8 @@ static int load_flat_binary(struct linux_binprm *bprm)
unsigned long __user *sp;
current->mm->start_stack -= sizeof(unsigned long);
sp = (unsigned long __user *)current->mm->start_stack;
- __put_user(start_addr, sp);
+ if (put_user(start_addr, sp))
+ return -EFAULT;
start_addr = libinfo.lib_list[i].entry;
}
}
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index cdb45829354d..3880a82da1dc 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -134,7 +134,6 @@ static int load_misc_binary(struct linux_binprm *bprm)
Node *fmt;
struct file *interp_file = NULL;
int retval;
- int fd_binary = -1;
retval = -ENOEXEC;
if (!enabled)
@@ -160,51 +159,25 @@ static int load_misc_binary(struct linux_binprm *bprm)
goto ret;
}
- if (fmt->flags & MISC_FMT_OPEN_BINARY) {
+ if (fmt->flags & MISC_FMT_OPEN_BINARY)
+ bprm->have_execfd = 1;
- /* if the binary should be opened on behalf of the
- * interpreter than keep it open and assign descriptor
- * to it
- */
- fd_binary = get_unused_fd_flags(0);
- if (fd_binary < 0) {
- retval = fd_binary;
- goto ret;
- }
- fd_install(fd_binary, bprm->file);
-
- /* if the binary is not readable than enforce mm->dumpable=0
- regardless of the interpreter's permissions */
- would_dump(bprm, bprm->file);
-
- allow_write_access(bprm->file);
- bprm->file = NULL;
-
- /* mark the bprm that fd should be passed to interp */
- bprm->interp_flags |= BINPRM_FLAGS_EXECFD;
- bprm->interp_data = fd_binary;
-
- } else {
- allow_write_access(bprm->file);
- fput(bprm->file);
- bprm->file = NULL;
- }
/* make argv[1] be the path to the binary */
- retval = copy_strings_kernel(1, &bprm->interp, bprm);
+ retval = copy_string_kernel(bprm->interp, bprm);
if (retval < 0)
- goto error;
+ goto ret;
bprm->argc++;
/* add the interp as argv[0] */
- retval = copy_strings_kernel(1, &fmt->interpreter, bprm);
+ retval = copy_string_kernel(fmt->interpreter, bprm);
if (retval < 0)
- goto error;
+ goto ret;
bprm->argc++;
/* Update interp in case binfmt_script needs it. */
retval = bprm_change_interp(fmt->interpreter, bprm);
if (retval < 0)
- goto error;
+ goto ret;
if (fmt->flags & MISC_FMT_OPEN_FILE) {
interp_file = file_clone_open(fmt->interp_file);
@@ -215,38 +188,16 @@ static int load_misc_binary(struct linux_binprm *bprm)
}
retval = PTR_ERR(interp_file);
if (IS_ERR(interp_file))
- goto error;
-
- bprm->file = interp_file;
- if (fmt->flags & MISC_FMT_CREDENTIALS) {
- loff_t pos = 0;
-
- /*
- * No need to call prepare_binprm(), it's already been
- * done. bprm->buf is stale, update from interp_file.
- */
- memset(bprm->buf, 0, BINPRM_BUF_SIZE);
- retval = kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE,
- &pos);
- } else
- retval = prepare_binprm(bprm);
-
- if (retval < 0)
- goto error;
+ goto ret;
- retval = search_binary_handler(bprm);
- if (retval < 0)
- goto error;
+ bprm->interpreter = interp_file;
+ if (fmt->flags & MISC_FMT_CREDENTIALS)
+ bprm->execfd_creds = 1;
+ retval = 0;
ret:
dput(fmt->dentry);
return retval;
-error:
- if (fd_binary > 0)
- ksys_close(fd_binary);
- bprm->interp_flags = 0;
- bprm->interp_data = 0;
- goto ret;
}
/* Command parsers */
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index e9e6a6f4a35f..1b6625e95958 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -16,14 +16,14 @@
#include <linux/fs.h>
static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
-static inline char *next_non_spacetab(char *first, const char *last)
+static inline const char *next_non_spacetab(const char *first, const char *last)
{
for (; first <= last; first++)
if (!spacetab(*first))
return first;
return NULL;
}
-static inline char *next_terminator(char *first, const char *last)
+static inline const char *next_terminator(const char *first, const char *last)
{
for (; first <= last; first++)
if (spacetab(*first) || !*first)
@@ -33,8 +33,7 @@ static inline char *next_terminator(char *first, const char *last)
static int load_script(struct linux_binprm *bprm)
{
- const char *i_arg, *i_name;
- char *cp, *buf_end;
+ const char *i_name, *i_sep, *i_arg, *i_end, *buf_end;
struct file *file;
int retval;
@@ -43,20 +42,6 @@ static int load_script(struct linux_binprm *bprm)
return -ENOEXEC;
/*
- * If the script filename will be inaccessible after exec, typically
- * because it is a "/dev/fd/<fd>/.." path against an O_CLOEXEC fd, give
- * up now (on the assumption that the interpreter will want to load
- * this file).
- */
- if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
- return -ENOENT;
-
- /* Release since we are not mapping a binary into memory. */
- allow_write_access(bprm->file);
- fput(bprm->file);
- bprm->file = NULL;
-
- /*
* This section handles parsing the #! line into separate
* interpreter path and argument strings. We must be careful
* because bprm->buf is not yet guaranteed to be NUL-terminated
@@ -71,39 +56,43 @@ static int load_script(struct linux_binprm *bprm)
* parse them on its own.
*/
buf_end = bprm->buf + sizeof(bprm->buf) - 1;
- cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
- if (!cp) {
- cp = next_non_spacetab(bprm->buf + 2, buf_end);
- if (!cp)
+ i_end = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
+ if (!i_end) {
+ i_end = next_non_spacetab(bprm->buf + 2, buf_end);
+ if (!i_end)
return -ENOEXEC; /* Entire buf is spaces/tabs */
/*
* If there is no later space/tab/NUL we must assume the
* interpreter path is truncated.
*/
- if (!next_terminator(cp, buf_end))
+ if (!next_terminator(i_end, buf_end))
return -ENOEXEC;
- cp = buf_end;
+ i_end = buf_end;
}
- /* NUL-terminate the buffer and any trailing spaces/tabs. */
- *cp = '\0';
- while (cp > bprm->buf) {
- cp--;
- if ((*cp == ' ') || (*cp == '\t'))
- *cp = '\0';
- else
- break;
- }
- for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++);
- if (*cp == '\0')
+ /* Trim any trailing spaces/tabs from i_end */
+ while (spacetab(i_end[-1]))
+ i_end--;
+
+ /* Skip over leading spaces/tabs */
+ i_name = next_non_spacetab(bprm->buf+2, i_end);
+ if (!i_name || (i_name == i_end))
return -ENOEXEC; /* No interpreter name found */
- i_name = cp;
+
+ /* Is there an optional argument? */
i_arg = NULL;
- for ( ; *cp && (*cp != ' ') && (*cp != '\t'); cp++)
- /* nothing */ ;
- while ((*cp == ' ') || (*cp == '\t'))
- *cp++ = '\0';
- if (*cp)
- i_arg = cp;
+ i_sep = next_terminator(i_name, i_end);
+ if (i_sep && (*i_sep != '\0'))
+ i_arg = next_non_spacetab(i_sep, i_end);
+
+ /*
+ * If the script filename will be inaccessible after exec, typically
+ * because it is a "/dev/fd/<fd>/.." path against an O_CLOEXEC fd, give
+ * up now (on the assumption that the interpreter will want to load
+ * this file).
+ */
+ if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
+ return -ENOENT;
+
/*
* OK, we've parsed out the interpreter name and
* (optional) argument.
@@ -117,17 +106,19 @@ static int load_script(struct linux_binprm *bprm)
retval = remove_arg_zero(bprm);
if (retval)
return retval;
- retval = copy_strings_kernel(1, &bprm->interp, bprm);
+ retval = copy_string_kernel(bprm->interp, bprm);
if (retval < 0)
return retval;
bprm->argc++;
+ *((char *)i_end) = '\0';
if (i_arg) {
- retval = copy_strings_kernel(1, &i_arg, bprm);
+ *((char *)i_sep) = '\0';
+ retval = copy_string_kernel(i_arg, bprm);
if (retval < 0)
return retval;
bprm->argc++;
}
- retval = copy_strings_kernel(1, &i_name, bprm);
+ retval = copy_string_kernel(i_name, bprm);
if (retval)
return retval;
bprm->argc++;
@@ -142,11 +133,8 @@ static int load_script(struct linux_binprm *bprm)
if (IS_ERR(file))
return PTR_ERR(file);
- bprm->file = file;
- retval = prepare_binprm(bprm);
- if (retval < 0)
- return retval;
- return search_binary_handler(bprm);
+ bprm->interpreter = file;
+ return 0;
}
static struct linux_binfmt script_format = {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 602bf3af9fb4..87f60a48f750 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -5,6 +5,7 @@
#include <linux/rbtree.h>
#include <linux/refcount.h>
+#include <linux/fiemap.h>
#include "ulist.h"
/*
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 768c8be4c765..31ac8c682f19 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7828,14 +7828,12 @@ const struct iomap_dio_ops btrfs_dops = {
.submit_io = btrfs_submit_direct,
};
-#define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
-
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
int ret;
- ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
+ ret = fiemap_prep(inode, fieinfo, start, &len, 0);
if (ret)
return ret;
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index 0a0823d378db..50c635dc7f71 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_CEPH_FS) += ceph.o
ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
export.o caps.o snap.o xattr.o quota.o io.o \
mds_client.o mdsmap.o strings.o ceph_frag.o \
- debugfs.o util.o
+ debugfs.o util.o metric.o
ceph-$(CONFIG_CEPH_FSCACHE) += cache.o
ceph-$(CONFIG_CEPH_FS_POSIX_ACL) += acl.o
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 26be6520d3fb..e0465741c591 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -22,7 +22,7 @@ static inline void ceph_set_cached_acl(struct inode *inode,
struct ceph_inode_info *ci = ceph_inode(inode);
spin_lock(&ci->i_ceph_lock);
- if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0))
+ if (__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 0))
set_cached_acl(inode, type, acl);
else
forget_cached_acl(inode, type);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 6f4678d98df7..01ad09733ac7 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -11,10 +11,12 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/signal.h>
#include <linux/iversion.h>
+#include <linux/ktime.h>
#include "super.h"
#include "mds_client.h"
#include "cache.h"
+#include "metric.h"
#include <linux/ceph/osd_client.h>
#include <linux/ceph/striper.h>
@@ -216,6 +218,9 @@ static int ceph_sync_readpages(struct ceph_fs_client *fsc,
if (!rc)
rc = ceph_osdc_wait_request(osdc, req);
+ ceph_update_read_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, rc);
+
ceph_osdc_put_request(req);
dout("readpages result %d\n", rc);
return rc;
@@ -299,6 +304,7 @@ static int ceph_readpage(struct file *filp, struct page *page)
static void finish_read(struct ceph_osd_request *req)
{
struct inode *inode = req->r_inode;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_data *osd_data;
int rc = req->r_result <= 0 ? req->r_result : 0;
int bytes = req->r_result >= 0 ? req->r_result : 0;
@@ -336,6 +342,10 @@ unlock:
put_page(page);
bytes -= PAGE_SIZE;
}
+
+ ceph_update_read_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, rc);
+
kfree(osd_data->pages);
}
@@ -643,6 +653,9 @@ static int ceph_sync_writepages(struct ceph_fs_client *fsc,
if (!rc)
rc = ceph_osdc_wait_request(osdc, req);
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, rc);
+
ceph_osdc_put_request(req);
if (rc == 0)
rc = len;
@@ -794,6 +807,9 @@ static void writepages_finish(struct ceph_osd_request *req)
ceph_clear_error_write(ci);
}
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, rc);
+
/*
* We lost the cache cap, need to truncate the page before
* it is unlocked, otherwise we'd truncate it later in the
@@ -1852,6 +1868,10 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!err)
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
+
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, err);
+
out_put:
ceph_osdc_put_request(req);
if (err == -ECANCELED)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index f1acde6fb9a6..972c13aa4225 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -597,6 +597,27 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
}
}
+/**
+ * change_auth_cap_ses - move inode to appropriate lists when auth caps change
+ * @ci: inode to be moved
+ * @session: new auth caps session
+ */
+static void change_auth_cap_ses(struct ceph_inode_info *ci,
+ struct ceph_mds_session *session)
+{
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ if (list_empty(&ci->i_dirty_item) && list_empty(&ci->i_flushing_item))
+ return;
+
+ spin_lock(&session->s_mdsc->cap_dirty_lock);
+ if (!list_empty(&ci->i_dirty_item))
+ list_move(&ci->i_dirty_item, &session->s_cap_dirty);
+ if (!list_empty(&ci->i_flushing_item))
+ list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
+ spin_unlock(&session->s_mdsc->cap_dirty_lock);
+}
+
/*
* Add a capability under the given MDS session.
*
@@ -727,6 +748,9 @@ void ceph_add_cap(struct inode *inode,
if (flags & CEPH_CAP_FLAG_AUTH) {
if (!ci->i_auth_cap ||
ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) {
+ if (ci->i_auth_cap &&
+ ci->i_auth_cap->session != cap->session)
+ change_auth_cap_ses(ci, cap->session);
ci->i_auth_cap = cap;
cap->mds_wanted = wanted;
}
@@ -912,6 +936,20 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
return 0;
}
+int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
+ int touch)
+{
+ struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ int r;
+
+ r = __ceph_caps_issued_mask(ci, mask, touch);
+ if (r)
+ ceph_update_cap_hit(&fsc->mdsc->metric);
+ else
+ ceph_update_cap_mis(&fsc->mdsc->metric);
+ return r;
+}
+
/*
* Return true if mask caps are currently being revoked by an MDS.
*/
@@ -1109,8 +1147,10 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
/* remove from inode's cap rbtree, and clear auth cap */
rb_erase(&cap->ci_node, &ci->i_caps);
- if (ci->i_auth_cap == cap)
+ if (ci->i_auth_cap == cap) {
+ WARN_ON_ONCE(!list_empty(&ci->i_dirty_item));
ci->i_auth_cap = NULL;
+ }
/* remove from session list */
spin_lock(&session->s_cap_lock);
@@ -1167,6 +1207,7 @@ struct cap_msg_args {
u64 xattr_version;
u64 change_attr;
struct ceph_buffer *xattr_buf;
+ struct ceph_buffer *old_xattr_buf;
struct timespec64 atime, mtime, ctime, btime;
int op, caps, wanted, dirty;
u32 seq, issue_seq, mseq, time_warp_seq;
@@ -1175,6 +1216,7 @@ struct cap_msg_args {
kgid_t gid;
umode_t mode;
bool inline_data;
+ bool wake;
};
/*
@@ -1304,44 +1346,29 @@ void __ceph_remove_caps(struct ceph_inode_info *ci)
}
/*
- * Send a cap msg on the given inode. Update our caps state, then
- * drop i_ceph_lock and send the message.
+ * Prepare to send a cap message to an MDS. Update the cap state, and populate
+ * the arg struct with the parameters that will need to be sent. This should
+ * be done under the i_ceph_lock to guard against changes to cap state.
*
* Make note of max_size reported/requested from mds, revoked caps
* that have now been implemented.
- *
- * Return non-zero if delayed release, or we experienced an error
- * such that the caller should requeue + retry later.
- *
- * called with i_ceph_lock, then drops it.
- * caller should hold snap_rwsem (read), s_mutex.
*/
-static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
- int op, int flags, int used, int want, int retain,
- int flushing, u64 flush_tid, u64 oldest_flush_tid)
- __releases(cap->ci->i_ceph_lock)
+static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
+ int op, int flags, int used, int want, int retain,
+ int flushing, u64 flush_tid, u64 oldest_flush_tid)
{
struct ceph_inode_info *ci = cap->ci;
struct inode *inode = &ci->vfs_inode;
- struct ceph_buffer *old_blob = NULL;
- struct cap_msg_args arg;
int held, revoking;
- int wake = 0;
- int ret;
- /* Don't send anything if it's still being created. Return delayed */
- if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
- spin_unlock(&ci->i_ceph_lock);
- dout("%s async create in flight for %p\n", __func__, inode);
- return 1;
- }
+ lockdep_assert_held(&ci->i_ceph_lock);
held = cap->issued | cap->implemented;
revoking = cap->implemented & ~cap->issued;
retain &= ~revoking;
- dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
- inode, cap, cap->session,
+ dout("%s %p cap %p session %p %s -> %s (revoking %s)\n",
+ __func__, inode, cap, cap->session,
ceph_cap_string(held), ceph_cap_string(held & retain),
ceph_cap_string(revoking));
BUG_ON((retain & CEPH_CAP_PIN) == 0);
@@ -1349,60 +1376,62 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
ci->i_ceph_flags &= ~CEPH_I_FLUSH;
cap->issued &= retain; /* drop bits we don't want */
- if (cap->implemented & ~cap->issued) {
- /*
- * Wake up any waiters on wanted -> needed transition.
- * This is due to the weird transition from buffered
- * to sync IO... we need to flush dirty pages _before_
- * allowing sync writes to avoid reordering.
- */
- wake = 1;
- }
+ /*
+ * Wake up any waiters on wanted -> needed transition. This is due to
+ * the weird transition from buffered to sync IO... we need to flush
+ * dirty pages _before_ allowing sync writes to avoid reordering.
+ */
+ arg->wake = cap->implemented & ~cap->issued;
cap->implemented &= cap->issued | used;
cap->mds_wanted = want;
- arg.session = cap->session;
- arg.ino = ceph_vino(inode).ino;
- arg.cid = cap->cap_id;
- arg.follows = flushing ? ci->i_head_snapc->seq : 0;
- arg.flush_tid = flush_tid;
- arg.oldest_flush_tid = oldest_flush_tid;
-
- arg.size = inode->i_size;
- ci->i_reported_size = arg.size;
- arg.max_size = ci->i_wanted_max_size;
- if (cap == ci->i_auth_cap)
- ci->i_requested_max_size = arg.max_size;
+ arg->session = cap->session;
+ arg->ino = ceph_vino(inode).ino;
+ arg->cid = cap->cap_id;
+ arg->follows = flushing ? ci->i_head_snapc->seq : 0;
+ arg->flush_tid = flush_tid;
+ arg->oldest_flush_tid = oldest_flush_tid;
+
+ arg->size = inode->i_size;
+ ci->i_reported_size = arg->size;
+ arg->max_size = ci->i_wanted_max_size;
+ if (cap == ci->i_auth_cap) {
+ if (want & CEPH_CAP_ANY_FILE_WR)
+ ci->i_requested_max_size = arg->max_size;
+ else
+ ci->i_requested_max_size = 0;
+ }
if (flushing & CEPH_CAP_XATTR_EXCL) {
- old_blob = __ceph_build_xattrs_blob(ci);
- arg.xattr_version = ci->i_xattrs.version;
- arg.xattr_buf = ci->i_xattrs.blob;
+ arg->old_xattr_buf = __ceph_build_xattrs_blob(ci);
+ arg->xattr_version = ci->i_xattrs.version;
+ arg->xattr_buf = ci->i_xattrs.blob;
} else {
- arg.xattr_buf = NULL;
+ arg->xattr_buf = NULL;
+ arg->old_xattr_buf = NULL;
}
- arg.mtime = inode->i_mtime;
- arg.atime = inode->i_atime;
- arg.ctime = inode->i_ctime;
- arg.btime = ci->i_btime;
- arg.change_attr = inode_peek_iversion_raw(inode);
+ arg->mtime = inode->i_mtime;
+ arg->atime = inode->i_atime;
+ arg->ctime = inode->i_ctime;
+ arg->btime = ci->i_btime;
+ arg->change_attr = inode_peek_iversion_raw(inode);
- arg.op = op;
- arg.caps = cap->implemented;
- arg.wanted = want;
- arg.dirty = flushing;
+ arg->op = op;
+ arg->caps = cap->implemented;
+ arg->wanted = want;
+ arg->dirty = flushing;
- arg.seq = cap->seq;
- arg.issue_seq = cap->issue_seq;
- arg.mseq = cap->mseq;
- arg.time_warp_seq = ci->i_time_warp_seq;
+ arg->seq = cap->seq;
+ arg->issue_seq = cap->issue_seq;
+ arg->mseq = cap->mseq;
+ arg->time_warp_seq = ci->i_time_warp_seq;
- arg.uid = inode->i_uid;
- arg.gid = inode->i_gid;
- arg.mode = inode->i_mode;
+ arg->uid = inode->i_uid;
+ arg->gid = inode->i_gid;
+ arg->mode = inode->i_mode;
- arg.inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
+ arg->inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
if (!(flags & CEPH_CLIENT_CAPS_PENDING_CAPSNAP) &&
!list_empty(&ci->i_cap_snaps)) {
struct ceph_cap_snap *capsnap;
@@ -1415,27 +1444,35 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
}
}
}
- arg.flags = flags;
-
- spin_unlock(&ci->i_ceph_lock);
+ arg->flags = flags;
+}
- ceph_buffer_put(old_blob);
+/*
+ * Send a cap msg on the given inode.
+ *
+ * Caller should hold snap_rwsem (read), s_mutex.
+ */
+static void __send_cap(struct ceph_mds_client *mdsc, struct cap_msg_args *arg,
+ struct ceph_inode_info *ci)
+{
+ struct inode *inode = &ci->vfs_inode;
+ int ret;
- ret = send_cap_msg(&arg);
+ ret = send_cap_msg(arg);
if (ret < 0) {
pr_err("error sending cap msg, ino (%llx.%llx) "
"flushing %s tid %llu, requeue\n",
- ceph_vinop(inode), ceph_cap_string(flushing),
- flush_tid);
+ ceph_vinop(inode), ceph_cap_string(arg->dirty),
+ arg->flush_tid);
spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
spin_unlock(&ci->i_ceph_lock);
}
- if (wake)
- wake_up_all(&ci->i_cap_wq);
+ ceph_buffer_put(arg->old_xattr_buf);
- return ret;
+ if (arg->wake)
+ wake_up_all(&ci->i_cap_wq);
}
static inline int __send_flush_snap(struct inode *inode,
@@ -1456,6 +1493,7 @@ static inline int __send_flush_snap(struct inode *inode,
arg.max_size = 0;
arg.xattr_version = capsnap->xattr_version;
arg.xattr_buf = capsnap->xattr_blob;
+ arg.old_xattr_buf = NULL;
arg.atime = capsnap->atime;
arg.mtime = capsnap->mtime;
@@ -1479,6 +1517,7 @@ static inline int __send_flush_snap(struct inode *inode,
arg.inline_data = capsnap->inline_data;
arg.flags = 0;
+ arg.wake = false;
return send_cap_msg(&arg);
}
@@ -1676,6 +1715,8 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
ceph_cap_string(was | mask));
ci->i_dirty_caps |= mask;
if (was == 0) {
+ struct ceph_mds_session *session = ci->i_auth_cap->session;
+
WARN_ON_ONCE(ci->i_prealloc_cap_flush);
swap(ci->i_prealloc_cap_flush, *pcf);
@@ -1688,7 +1729,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
&ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
BUG_ON(!list_empty(&ci->i_dirty_item));
spin_lock(&mdsc->cap_dirty_lock);
- list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
+ list_add(&ci->i_dirty_item, &session->s_cap_dirty);
spin_unlock(&mdsc->cap_dirty_lock);
if (ci->i_flushing_caps == 0) {
ihold(inode);
@@ -1731,30 +1772,33 @@ static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
* Remove cap_flush from the mdsc's or inode's flushing cap list.
* Return true if caller needs to wake up flush waiters.
*/
-static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
- struct ceph_inode_info *ci,
- struct ceph_cap_flush *cf)
+static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc,
+ struct ceph_cap_flush *cf)
{
struct ceph_cap_flush *prev;
bool wake = cf->wake;
- if (mdsc) {
- /* are there older pending cap flushes? */
- if (wake && cf->g_list.prev != &mdsc->cap_flush_list) {
- prev = list_prev_entry(cf, g_list);
- prev->wake = true;
- wake = false;
- }
- list_del(&cf->g_list);
- } else if (ci) {
- if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
- prev = list_prev_entry(cf, i_list);
- prev->wake = true;
- wake = false;
- }
- list_del(&cf->i_list);
- } else {
- BUG_ON(1);
+
+ if (wake && cf->g_list.prev != &mdsc->cap_flush_list) {
+ prev = list_prev_entry(cf, g_list);
+ prev->wake = true;
+ wake = false;
}
+ list_del(&cf->g_list);
+ return wake;
+}
+
+static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci,
+ struct ceph_cap_flush *cf)
+{
+ struct ceph_cap_flush *prev;
+ bool wake = cf->wake;
+
+ if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
+ prev = list_prev_entry(cf, i_list);
+ prev->wake = true;
+ wake = false;
+ }
+ list_del(&cf->i_list);
return wake;
}
@@ -1953,6 +1997,9 @@ retry_locked:
}
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
+ int mflags = 0;
+ struct cap_msg_args arg;
+
cap = rb_entry(p, struct ceph_cap, ci_node);
/* avoid looping forever */
@@ -2030,12 +2077,24 @@ ack:
if (mutex_trylock(&session->s_mutex) == 0) {
dout("inverting session/ino locks on %p\n",
session);
+ session = ceph_get_mds_session(session);
spin_unlock(&ci->i_ceph_lock);
if (took_snap_rwsem) {
up_read(&mdsc->snap_rwsem);
took_snap_rwsem = 0;
}
- mutex_lock(&session->s_mutex);
+ if (session) {
+ mutex_lock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ } else {
+ /*
+ * Because we take the reference while
+ * holding the i_ceph_lock, it should
+ * never be NULL. Throw a warning if it
+ * ever is.
+ */
+ WARN_ON_ONCE(true);
+ }
goto retry;
}
}
@@ -2070,6 +2129,9 @@ ack:
flushing = ci->i_dirty_caps;
flush_tid = __mark_caps_flushing(inode, session, false,
&oldest_flush_tid);
+ if (flags & CHECK_CAPS_FLUSH &&
+ list_empty(&session->s_cap_dirty))
+ mflags |= CEPH_CLIENT_CAPS_SYNC;
} else {
flushing = 0;
flush_tid = 0;
@@ -2080,9 +2142,12 @@ ack:
mds = cap->mds; /* remember mds, so we don't repeat */
- /* __send_cap drops i_ceph_lock */
- __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, 0, cap_used, want,
- retain, flushing, flush_tid, oldest_flush_tid);
+ __prep_cap(&arg, cap, CEPH_CAP_OP_UPDATE, mflags, cap_used,
+ want, retain, flushing, flush_tid, oldest_flush_tid);
+ spin_unlock(&ci->i_ceph_lock);
+
+ __send_cap(mdsc, &arg, ci);
+
goto retry; /* retake i_ceph_lock and restart our cap scan. */
}
@@ -2121,6 +2186,7 @@ retry:
retry_locked:
if (ci->i_dirty_caps && ci->i_auth_cap) {
struct ceph_cap *cap = ci->i_auth_cap;
+ struct cap_msg_args arg;
if (session != cap->session) {
spin_unlock(&ci->i_ceph_lock);
@@ -2148,11 +2214,13 @@ retry_locked:
flush_tid = __mark_caps_flushing(inode, session, true,
&oldest_flush_tid);
- /* __send_cap drops i_ceph_lock */
- __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, CEPH_CLIENT_CAPS_SYNC,
+ __prep_cap(&arg, cap, CEPH_CAP_OP_FLUSH, CEPH_CLIENT_CAPS_SYNC,
__ceph_caps_used(ci), __ceph_caps_wanted(ci),
(cap->issued | cap->implemented),
flushing, flush_tid, oldest_flush_tid);
+ spin_unlock(&ci->i_ceph_lock);
+
+ __send_cap(mdsc, &arg, ci);
} else {
if (!list_empty(&ci->i_cap_flush_list)) {
struct ceph_cap_flush *cf =
@@ -2354,15 +2422,19 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
first_tid = cf->tid + 1;
if (cf->caps) {
+ struct cap_msg_args arg;
+
dout("kick_flushing_caps %p cap %p tid %llu %s\n",
inode, cap, cf->tid, ceph_cap_string(cf->caps));
- __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
+ __prep_cap(&arg, cap, CEPH_CAP_OP_FLUSH,
(cf->tid < last_snap_flush ?
CEPH_CLIENT_CAPS_PENDING_CAPSNAP : 0),
__ceph_caps_used(ci),
__ceph_caps_wanted(ci),
(cap->issued | cap->implemented),
cf->caps, cf->tid, oldest_flush_tid);
+ spin_unlock(&ci->i_ceph_lock);
+ __send_cap(mdsc, &arg, ci);
} else {
struct ceph_cap_snap *capsnap =
container_of(cf, struct ceph_cap_snap,
@@ -2446,6 +2518,8 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_cap *cap;
u64 oldest_flush_tid;
+ lockdep_assert_held(&session->s_mutex);
+
dout("kick_flushing_caps mds%d\n", session->s_mds);
spin_lock(&mdsc->cap_dirty_lock);
@@ -2685,6 +2759,11 @@ out_unlock:
if (snap_rwsem_locked)
up_read(&mdsc->snap_rwsem);
+ if (!ret)
+ ceph_update_cap_mis(&mdsc->metric);
+ else if (ret == 1)
+ ceph_update_cap_hit(&mdsc->metric);
+
dout("get_cap_refs %p ret %d got %s\n", inode,
ret, ceph_cap_string(*got));
return ret;
@@ -2937,7 +3016,8 @@ static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
* If we are releasing a WR cap (from a sync write), finalize any affected
* cap_snap, and wake up any waiters.
*/
-void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
+static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
+ bool skip_checking_caps)
{
struct inode *inode = &ci->vfs_inode;
int last = 0, put = 0, flushsnaps = 0, wake = 0;
@@ -2993,7 +3073,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
last ? " last" : "", put ? " put" : "");
- if (last)
+ if (last && !skip_checking_caps)
ceph_check_caps(ci, 0, NULL);
else if (flushsnaps)
ceph_flush_snaps(ci, NULL);
@@ -3003,6 +3083,16 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
iput(inode);
}
+void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
+{
+ __ceph_put_cap_refs(ci, had, false);
+}
+
+void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had)
+{
+ __ceph_put_cap_refs(ci, had, true);
+}
+
/*
* Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
* context. Adjust per-snap dirty page accounting as appropriate.
@@ -3301,10 +3391,6 @@ static void handle_cap_grant(struct inode *inode,
ci->i_requested_max_size = 0;
}
wake = true;
- } else if (ci->i_wanted_max_size > ci->i_max_size &&
- ci->i_wanted_max_size > ci->i_requested_max_size) {
- /* CEPH_CAP_OP_IMPORT */
- wake = true;
}
}
@@ -3380,9 +3466,18 @@ static void handle_cap_grant(struct inode *inode,
fill_inline = true;
}
- if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
+ if (ci->i_auth_cap == cap &&
+ le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
if (newcaps & ~extra_info->issued)
wake = true;
+
+ if (ci->i_requested_max_size > max_size ||
+ !(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) {
+ /* re-request max_size if necessary */
+ ci->i_requested_max_size = 0;
+ wake = true;
+ }
+
ceph_kick_flushing_inode_caps(session, ci);
spin_unlock(&ci->i_ceph_lock);
up_read(&session->s_mdsc->snap_rwsem);
@@ -3442,15 +3537,26 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
bool wake_mdsc = false;
list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) {
+ /* Is this the one that was flushed? */
if (cf->tid == flush_tid)
cleaned = cf->caps;
- if (cf->caps == 0) /* capsnap */
+
+ /* Is this a capsnap? */
+ if (cf->caps == 0)
continue;
+
if (cf->tid <= flush_tid) {
- if (__finish_cap_flush(NULL, ci, cf))
- wake_ci = true;
+ /*
+ * An earlier or current tid. The FLUSH_ACK should
+ * represent a superset of this flush's caps.
+ */
+ wake_ci |= __detach_cap_flush_from_ci(ci, cf);
list_add_tail(&cf->i_list, &to_remove);
} else {
+ /*
+ * This is a later one. Any caps in it are still dirty
+ * so don't count them as cleaned.
+ */
cleaned &= ~cf->caps;
if (!cleaned)
break;
@@ -3470,10 +3576,8 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
spin_lock(&mdsc->cap_dirty_lock);
- list_for_each_entry(cf, &to_remove, i_list) {
- if (__finish_cap_flush(mdsc, NULL, cf))
- wake_mdsc = true;
- }
+ list_for_each_entry(cf, &to_remove, i_list)
+ wake_mdsc |= __detach_cap_flush_from_mdsc(mdsc, cf);
if (ci->i_flushing_caps == 0) {
if (list_empty(&ci->i_cap_flush_list)) {
@@ -3565,17 +3669,15 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
dout(" removing %p cap_snap %p follows %lld\n",
inode, capsnap, follows);
list_del(&capsnap->ci_item);
- if (__finish_cap_flush(NULL, ci, &capsnap->cap_flush))
- wake_ci = true;
+ wake_ci |= __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
spin_lock(&mdsc->cap_dirty_lock);
if (list_empty(&ci->i_cap_flush_list))
list_del_init(&ci->i_flushing_item);
- if (__finish_cap_flush(mdsc, NULL, &capsnap->cap_flush))
- wake_mdsc = true;
-
+ wake_mdsc |= __detach_cap_flush_from_mdsc(mdsc,
+ &capsnap->cap_flush);
spin_unlock(&mdsc->cap_dirty_lock);
}
spin_unlock(&ci->i_ceph_lock);
@@ -3595,10 +3697,9 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
*
* caller hold s_mutex.
*/
-static void handle_cap_trunc(struct inode *inode,
+static bool handle_cap_trunc(struct inode *inode,
struct ceph_mds_caps *trunc,
struct ceph_mds_session *session)
- __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
@@ -3609,7 +3710,9 @@ static void handle_cap_trunc(struct inode *inode,
int implemented = 0;
int dirty = __ceph_caps_dirty(ci);
int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
- int queue_trunc = 0;
+ bool queue_trunc = false;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
issued |= implemented | dirty;
@@ -3617,10 +3720,7 @@ static void handle_cap_trunc(struct inode *inode,
inode, mds, seq, truncate_size, truncate_seq);
queue_trunc = ceph_fill_file_size(inode, issued,
truncate_seq, truncate_size, size);
- spin_unlock(&ci->i_ceph_lock);
-
- if (queue_trunc)
- ceph_queue_vmtruncate(inode);
+ return queue_trunc;
}
/*
@@ -3694,15 +3794,9 @@ retry:
tcap->issue_seq = t_seq - 1;
tcap->issued |= issued;
tcap->implemented |= issued;
- if (cap == ci->i_auth_cap)
+ if (cap == ci->i_auth_cap) {
ci->i_auth_cap = tcap;
-
- if (!list_empty(&ci->i_cap_flush_list) &&
- ci->i_auth_cap == tcap) {
- spin_lock(&mdsc->cap_dirty_lock);
- list_move_tail(&ci->i_flushing_item,
- &tcap->session->s_cap_flushing);
- spin_unlock(&mdsc->cap_dirty_lock);
+ change_auth_cap_ses(ci, tcap->session);
}
}
__ceph_remove_cap(cap, false);
@@ -3771,7 +3865,6 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
struct ceph_mds_cap_peer *ph,
struct ceph_mds_session *session,
struct ceph_cap **target_cap, int *old_issued)
- __acquires(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap *cap, *ocap, *new_cap = NULL;
@@ -3796,14 +3889,13 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
inode, ci, mds, mseq, peer);
-
retry:
- spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
if (!new_cap) {
spin_unlock(&ci->i_ceph_lock);
new_cap = ceph_get_cap(mdsc, NULL);
+ spin_lock(&ci->i_ceph_lock);
goto retry;
}
cap = new_cap;
@@ -3838,9 +3930,6 @@ retry:
__ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
}
- /* make sure we re-request max_size, if necessary */
- ci->i_requested_max_size = 0;
-
*old_issued = issued;
*target_cap = cap;
}
@@ -3869,6 +3958,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
size_t snaptrace_len;
void *p, *end;
struct cap_extra_info extra_info = {};
+ bool queue_trunc;
dout("handle_caps from mds%d\n", session->s_mds);
@@ -4016,6 +4106,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
} else {
down_read(&mdsc->snap_rwsem);
}
+ spin_lock(&ci->i_ceph_lock);
handle_cap_import(mdsc, inode, h, peer, session,
&cap, &extra_info.issued);
handle_cap_grant(inode, session, cap,
@@ -4052,7 +4143,10 @@ void ceph_handle_caps(struct ceph_mds_session *session,
break;
case CEPH_CAP_OP_TRUNC:
- handle_cap_trunc(inode, h, session);
+ queue_trunc = handle_cap_trunc(inode, h, session);
+ spin_unlock(&ci->i_ceph_lock);
+ if (queue_trunc)
+ ceph_queue_vmtruncate(inode);
break;
default:
@@ -4121,15 +4215,16 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
/*
* Flush all dirty caps to the mds
*/
-void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
+static void flush_dirty_session_caps(struct ceph_mds_session *s)
{
+ struct ceph_mds_client *mdsc = s->s_mdsc;
struct ceph_inode_info *ci;
struct inode *inode;
dout("flush_dirty_caps\n");
spin_lock(&mdsc->cap_dirty_lock);
- while (!list_empty(&mdsc->cap_dirty)) {
- ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
+ while (!list_empty(&s->s_cap_dirty)) {
+ ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info,
i_dirty_item);
inode = &ci->vfs_inode;
ihold(inode);
@@ -4143,6 +4238,35 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
dout("flush_dirty_caps done\n");
}
+static void iterate_sessions(struct ceph_mds_client *mdsc,
+ void (*cb)(struct ceph_mds_session *))
+{
+ int mds;
+
+ mutex_lock(&mdsc->mutex);
+ for (mds = 0; mds < mdsc->max_sessions; ++mds) {
+ struct ceph_mds_session *s;
+
+ if (!mdsc->sessions[mds])
+ continue;
+
+ s = ceph_get_mds_session(mdsc->sessions[mds]);
+ if (!s)
+ continue;
+
+ mutex_unlock(&mdsc->mutex);
+ cb(s);
+ ceph_put_mds_session(s);
+ mutex_lock(&mdsc->mutex);
+ }
+ mutex_unlock(&mdsc->mutex);
+}
+
+void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
+{
+ iterate_sessions(mdsc, flush_dirty_session_caps);
+}
+
void __ceph_touch_fmode(struct ceph_inode_info *ci,
struct ceph_mds_client *mdsc, int fmode)
{
@@ -4269,6 +4393,9 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
cap->issued &= ~drop;
cap->implemented &= ~drop;
cap->mds_wanted = wanted;
+ if (cap == ci->i_auth_cap &&
+ !(wanted & CEPH_CAP_ANY_FILE_WR))
+ ci->i_requested_max_size = 0;
} else {
dout("encode_inode_release %p cap %p %s"
" (force)\n", inode, cap,
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index dcaed75de9e6..070ed8481340 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -7,6 +7,8 @@
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/math64.h>
+#include <linux/ktime.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/mon_client.h>
@@ -18,6 +20,7 @@
#ifdef CONFIG_DEBUG_FS
#include "mds_client.h"
+#include "metric.h"
static int mdsmap_show(struct seq_file *s, void *p)
{
@@ -124,6 +127,87 @@ static int mdsc_show(struct seq_file *s, void *p)
return 0;
}
+#define CEPH_METRIC_SHOW(name, total, avg, min, max, sq) { \
+ s64 _total, _avg, _min, _max, _sq, _st; \
+ _avg = ktime_to_us(avg); \
+ _min = ktime_to_us(min == KTIME_MAX ? 0 : min); \
+ _max = ktime_to_us(max); \
+ _total = total - 1; \
+ _sq = _total > 0 ? DIV64_U64_ROUND_CLOSEST(sq, _total) : 0; \
+ _st = int_sqrt64(_sq); \
+ _st = ktime_to_us(_st); \
+ seq_printf(s, "%-14s%-12lld%-16lld%-16lld%-16lld%lld\n", \
+ name, total, _avg, _min, _max, _st); \
+}
+
+static int metric_show(struct seq_file *s, void *p)
+{
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_client_metric *m = &mdsc->metric;
+ int i, nr_caps = 0;
+ s64 total, sum, avg, min, max, sq;
+
+ seq_printf(s, "item total avg_lat(us) min_lat(us) max_lat(us) stdev(us)\n");
+ seq_printf(s, "-----------------------------------------------------------------------------------\n");
+
+ spin_lock(&m->read_latency_lock);
+ total = m->total_reads;
+ sum = m->read_latency_sum;
+ avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
+ min = m->read_latency_min;
+ max = m->read_latency_max;
+ sq = m->read_latency_sq_sum;
+ spin_unlock(&m->read_latency_lock);
+ CEPH_METRIC_SHOW("read", total, avg, min, max, sq);
+
+ spin_lock(&m->write_latency_lock);
+ total = m->total_writes;
+ sum = m->write_latency_sum;
+ avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
+ min = m->write_latency_min;
+ max = m->write_latency_max;
+ sq = m->write_latency_sq_sum;
+ spin_unlock(&m->write_latency_lock);
+ CEPH_METRIC_SHOW("write", total, avg, min, max, sq);
+
+ spin_lock(&m->metadata_latency_lock);
+ total = m->total_metadatas;
+ sum = m->metadata_latency_sum;
+ avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
+ min = m->metadata_latency_min;
+ max = m->metadata_latency_max;
+ sq = m->metadata_latency_sq_sum;
+ spin_unlock(&m->metadata_latency_lock);
+ CEPH_METRIC_SHOW("metadata", total, avg, min, max, sq);
+
+ seq_printf(s, "\n");
+ seq_printf(s, "item total miss hit\n");
+ seq_printf(s, "-------------------------------------------------\n");
+
+ seq_printf(s, "%-14s%-16lld%-16lld%lld\n", "d_lease",
+ atomic64_read(&m->total_dentries),
+ percpu_counter_sum(&m->d_lease_mis),
+ percpu_counter_sum(&m->d_lease_hit));
+
+ mutex_lock(&mdsc->mutex);
+ for (i = 0; i < mdsc->max_sessions; i++) {
+ struct ceph_mds_session *s;
+
+ s = __ceph_lookup_mds_session(mdsc, i);
+ if (!s)
+ continue;
+ nr_caps += s->s_nr_caps;
+ ceph_put_mds_session(s);
+ }
+ mutex_unlock(&mdsc->mutex);
+ seq_printf(s, "%-14s%-16d%-16lld%lld\n", "caps", nr_caps,
+ percpu_counter_sum(&m->i_caps_mis),
+ percpu_counter_sum(&m->i_caps_hit));
+
+ return 0;
+}
+
static int caps_show_cb(struct inode *inode, struct ceph_cap *cap, void *p)
{
struct seq_file *s = p;
@@ -222,6 +306,7 @@ DEFINE_SHOW_ATTRIBUTE(mdsmap);
DEFINE_SHOW_ATTRIBUTE(mdsc);
DEFINE_SHOW_ATTRIBUTE(caps);
DEFINE_SHOW_ATTRIBUTE(mds_sessions);
+DEFINE_SHOW_ATTRIBUTE(metric);
/*
@@ -255,6 +340,7 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
debugfs_remove(fsc->debugfs_mdsmap);
debugfs_remove(fsc->debugfs_mds_sessions);
debugfs_remove(fsc->debugfs_caps);
+ debugfs_remove(fsc->debugfs_metric);
debugfs_remove(fsc->debugfs_mdsc);
}
@@ -295,11 +381,17 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
fsc,
&mdsc_fops);
+ fsc->debugfs_metric = debugfs_create_file("metrics",
+ 0400,
+ fsc->client->debugfs_dir,
+ fsc,
+ &metric_fops);
+
fsc->debugfs_caps = debugfs_create_file("caps",
- 0400,
- fsc->client->debugfs_dir,
- fsc,
- &caps_fops);
+ 0400,
+ fsc->client->debugfs_dir,
+ fsc,
+ &caps_fops);
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 4c4202c93b71..39f5311404b0 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -38,6 +38,8 @@ static int __dir_lease_try_check(const struct dentry *dentry);
static int ceph_d_init(struct dentry *dentry)
{
struct ceph_dentry_info *di;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
if (!di)
@@ -48,6 +50,9 @@ static int ceph_d_init(struct dentry *dentry)
di->time = jiffies;
dentry->d_fsdata = di;
INIT_LIST_HEAD(&di->lease_list);
+
+ atomic64_inc(&mdsc->metric.total_dentries);
+
return 0;
}
@@ -344,8 +349,9 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
ceph_snap(inode) != CEPH_SNAPDIR &&
__ceph_dir_is_complete_ordered(ci) &&
- __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
+ __ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) {
int shared_gen = atomic_read(&ci->i_shared_gen);
+
spin_unlock(&ci->i_ceph_lock);
err = __dcache_readdir(file, ctx, shared_gen);
if (err != -EAGAIN)
@@ -762,7 +768,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
!is_root_ceph_dentry(dir, dentry) &&
ceph_test_mount_opt(fsc, DCACHE) &&
__ceph_dir_is_complete(ci) &&
- (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
+ __ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) {
__ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD);
spin_unlock(&ci->i_ceph_lock);
dout(" dir %p complete, -ENOENT\n", dir);
@@ -1203,11 +1209,12 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
op = CEPH_MDS_OP_RENAMESNAP;
else
return -EROFS;
+ } else if (old_dir != new_dir) {
+ err = ceph_quota_check_rename(mdsc, d_inode(old_dentry),
+ new_dir);
+ if (err)
+ return err;
}
- /* don't allow cross-quota renames */
- if ((old_dir != new_dir) &&
- (!ceph_quota_is_same_realm(old_dir, new_dir)))
- return -EXDEV;
dout("rename dir %p dentry %p to dir %p dentry %p\n",
old_dir, old_dentry, new_dir, new_dentry);
@@ -1709,6 +1716,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
+ percpu_counter_inc(&mdsc->metric.d_lease_mis);
+
op = ceph_snap(dir) == CEPH_SNAPDIR ?
CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
@@ -1740,6 +1749,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
dout("d_revalidate %p lookup result=%d\n",
dentry, err);
}
+ } else {
+ percpu_counter_inc(&mdsc->metric.d_lease_hit);
}
dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
@@ -1782,9 +1793,12 @@ static int ceph_d_delete(const struct dentry *dentry)
static void ceph_d_release(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
dout("d_release %p\n", dentry);
+ atomic64_dec(&fsc->mdsc->metric.total_dentries);
+
spin_lock(&dentry->d_lock);
__dentry_lease_unlist(di);
dentry->d_fsdata = NULL;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 79dc06881e78..e088843a7734 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -172,9 +172,16 @@ struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino)
static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
{
struct inode *inode = __lookup_inode(sb, ino);
+ int err;
+
if (IS_ERR(inode))
return ERR_CAST(inode);
- if (inode->i_nlink == 0) {
+ /* We need LINK caps to reliably check i_nlink */
+ err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false);
+ if (err)
+ return ERR_PTR(err);
+ /* -ESTALE if inode as been unlinked and no file is open */
+ if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) {
iput(inode);
return ERR_PTR(-ESTALE);
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index afdfca965a7f..160644ddaeed 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -11,11 +11,13 @@
#include <linux/writeback.h>
#include <linux/falloc.h>
#include <linux/iversion.h>
+#include <linux/ktime.h>
#include "super.h"
#include "mds_client.h"
#include "cache.h"
#include "io.h"
+#include "metric.h"
static __le32 ceph_flags_sys2wire(u32 flags)
{
@@ -906,6 +908,12 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
ret = ceph_osdc_start_request(osdc, req, false);
if (!ret)
ret = ceph_osdc_wait_request(osdc, req);
+
+ ceph_update_read_latency(&fsc->mdsc->metric,
+ req->r_start_latency,
+ req->r_end_latency,
+ ret);
+
ceph_osdc_put_request(req);
i_size = i_size_read(inode);
@@ -1044,6 +1052,8 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
struct inode *inode = req->r_inode;
struct ceph_aio_request *aio_req = req->r_priv;
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_client_metric *metric = &fsc->mdsc->metric;
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
BUG_ON(!osd_data->num_bvecs);
@@ -1051,6 +1061,16 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
dout("ceph_aio_complete_req %p rc %d bytes %u\n",
inode, rc, osd_data->bvec_pos.iter.bi_size);
+ /* r_start_latency == 0 means the request was not submitted */
+ if (req->r_start_latency) {
+ if (aio_req->write)
+ ceph_update_write_latency(metric, req->r_start_latency,
+ req->r_end_latency, rc);
+ else
+ ceph_update_read_latency(metric, req->r_start_latency,
+ req->r_end_latency, rc);
+ }
+
if (rc == -EOLDSNAPC) {
struct ceph_aio_work *aio_work;
BUG_ON(!aio_req->write);
@@ -1179,6 +1199,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_client_metric *metric = &fsc->mdsc->metric;
struct ceph_vino vino;
struct ceph_osd_request *req;
struct bio_vec *bvecs;
@@ -1295,6 +1316,13 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (!ret)
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ if (write)
+ ceph_update_write_latency(metric, req->r_start_latency,
+ req->r_end_latency, ret);
+ else
+ ceph_update_read_latency(metric, req->r_start_latency,
+ req->r_end_latency, ret);
+
size = i_size_read(inode);
if (!write) {
if (ret == -ENOENT)
@@ -1466,6 +1494,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
if (!ret)
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, ret);
out:
ceph_osdc_put_request(req);
if (ret != 0) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 7fef94fd1e55..357c937699d5 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2288,8 +2288,8 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
dout("do_getattr inode %p mask %s mode 0%o\n",
inode, ceph_cap_string(mask), inode->i_mode);
- if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
- return 0;
+ if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
+ return 0;
mode = (mask & CEPH_STAT_RSTAT) ? USE_AUTH_MDS : USE_ANY_MDS;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 7c63abf5bea9..a50497142e59 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -10,6 +10,7 @@
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/bits.h>
+#include <linux/ktime.h>
#include "super.h"
#include "mds_client.h"
@@ -658,6 +659,7 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
if (refcount_dec_and_test(&s->s_ref)) {
if (s->s_auth.authorizer)
ceph_auth_destroy_authorizer(s->s_auth.authorizer);
+ WARN_ON(mutex_is_locked(&s->s_mutex));
xa_destroy(&s->s_delegated_inos);
kfree(s);
}
@@ -753,6 +755,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
INIT_LIST_HEAD(&s->s_cap_releases);
INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
+ INIT_LIST_HEAD(&s->s_cap_dirty);
INIT_LIST_HEAD(&s->s_cap_flushing);
mdsc->sessions[mds] = s;
@@ -801,7 +804,7 @@ void ceph_mdsc_release_request(struct kref *kref)
struct ceph_mds_request *req = container_of(kref,
struct ceph_mds_request,
r_kref);
- ceph_mdsc_release_dir_caps(req);
+ ceph_mdsc_release_dir_caps_no_check(req);
destroy_reply_info(&req->r_reply_info);
if (req->r_request)
ceph_msg_put(req->r_request);
@@ -2201,6 +2204,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
mutex_init(&req->r_fill_mutex);
req->r_mdsc = mdsc;
req->r_started = jiffies;
+ req->r_start_latency = ktime_get();
req->r_resend_mds = -1;
INIT_LIST_HEAD(&req->r_unsafe_dir_item);
INIT_LIST_HEAD(&req->r_unsafe_target_item);
@@ -2547,6 +2551,8 @@ out:
static void complete_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
+ req->r_end_latency = ktime_get();
+
if (req->r_callback)
req->r_callback(mdsc, req);
complete_all(&req->r_completion);
@@ -3155,6 +3161,9 @@ out_err:
/* kick calling process */
complete_request(mdsc, req);
+
+ ceph_update_metadata_latency(&mdsc->metric, req->r_start_latency,
+ req->r_end_latency, err);
out:
ceph_mdsc_put_request(req);
return;
@@ -3393,6 +3402,18 @@ void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
}
}
+void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
+{
+ int dcaps;
+
+ dcaps = xchg(&req->r_dir_caps, 0);
+ if (dcaps) {
+ dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
+ ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
+ dcaps);
+ }
+}
+
/*
* called under session->mutex.
*/
@@ -3425,7 +3446,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
if (req->r_session->s_mds != session->s_mds)
continue;
- ceph_mdsc_release_dir_caps(req);
+ ceph_mdsc_release_dir_caps_no_check(req);
__send_request(mdsc, session, req, true);
}
@@ -3760,8 +3781,6 @@ fail:
* recovering MDS might have.
*
* This is a relatively heavyweight operation, but it's rare.
- *
- * called with mdsc->mutex held.
*/
static void send_mds_reconnect(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
@@ -4015,7 +4034,11 @@ static void check_new_map(struct ceph_mds_client *mdsc,
oldstate != CEPH_MDS_STATE_STARTING)
pr_info("mds%d recovery completed\n", s->s_mds);
kick_requests(mdsc, i);
+ mutex_unlock(&mdsc->mutex);
+ mutex_lock(&s->s_mutex);
+ mutex_lock(&mdsc->mutex);
ceph_kick_flushing_caps(mdsc, s);
+ mutex_unlock(&s->s_mutex);
wake_up_session_caps(s, RECONNECT);
}
}
@@ -4323,6 +4346,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
{
struct ceph_mds_client *mdsc;
+ int err;
mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
if (!mdsc)
@@ -4331,8 +4355,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
mutex_init(&mdsc->mutex);
mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
if (!mdsc->mdsmap) {
- kfree(mdsc);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_mdsc;
}
fsc->mdsc = mdsc;
@@ -4364,13 +4388,15 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
spin_lock_init(&mdsc->snap_flush_lock);
mdsc->last_cap_flush_tid = 1;
INIT_LIST_HEAD(&mdsc->cap_flush_list);
- INIT_LIST_HEAD(&mdsc->cap_dirty);
INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
mdsc->num_cap_flushing = 0;
spin_lock_init(&mdsc->cap_dirty_lock);
init_waitqueue_head(&mdsc->cap_flushing_wq);
INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
atomic_set(&mdsc->cap_reclaim_pending, 0);
+ err = ceph_metric_init(&mdsc->metric);
+ if (err)
+ goto err_mdsmap;
spin_lock_init(&mdsc->dentry_list_lock);
INIT_LIST_HEAD(&mdsc->dentry_leases);
@@ -4389,6 +4415,12 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
strscpy(mdsc->nodename, utsname()->nodename,
sizeof(mdsc->nodename));
return 0;
+
+err_mdsmap:
+ kfree(mdsc->mdsmap);
+err_mdsc:
+ kfree(mdsc);
+ return err;
}
/*
@@ -4646,6 +4678,8 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
ceph_mdsc_stop(mdsc);
+ ceph_metric_destroy(&mdsc->metric);
+
fsc->mdsc = NULL;
kfree(mdsc);
dout("mdsc_destroy %p done\n", mdsc);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 903d9edfd4bf..5e0c4073a6be 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -10,12 +10,15 @@
#include <linux/spinlock.h>
#include <linux/refcount.h>
#include <linux/utsname.h>
+#include <linux/ktime.h>
#include <linux/ceph/types.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/mdsmap.h>
#include <linux/ceph/auth.h>
+#include "metric.h"
+
/* The first 8 bits are reserved for old ceph releases */
enum ceph_feature_type {
CEPHFS_FEATURE_MIMIC = 8,
@@ -196,8 +199,12 @@ struct ceph_mds_session {
struct list_head s_cap_releases; /* waiting cap_release messages */
struct work_struct s_cap_release_work;
- /* protected by mutex */
+ /* See ceph_inode_info->i_dirty_item. */
+ struct list_head s_cap_dirty; /* inodes w/ dirty caps */
+
+ /* See ceph_inode_info->i_flushing_item. */
struct list_head s_cap_flushing; /* inodes w/ flushing caps */
+
unsigned long s_renew_requested; /* last time we sent a renew req */
u64 s_renew_seq;
@@ -297,6 +304,8 @@ struct ceph_mds_request {
unsigned long r_timeout; /* optional. jiffies, 0 is "wait forever" */
unsigned long r_started; /* start time to measure timeout against */
+ unsigned long r_start_latency; /* start time to measure latency */
+ unsigned long r_end_latency; /* finish time to measure latency */
unsigned long r_request_started; /* start time for mds request only,
used to measure lease durations */
@@ -419,7 +428,6 @@ struct ceph_mds_client {
u64 last_cap_flush_tid;
struct list_head cap_flush_list;
- struct list_head cap_dirty; /* inodes with dirty caps */
struct list_head cap_dirty_migrating; /* ...that are migration... */
int num_cap_flushing; /* # caps we are flushing */
spinlock_t cap_dirty_lock; /* protects above items */
@@ -454,6 +462,8 @@ struct ceph_mds_client {
struct list_head dentry_leases; /* fifo list */
struct list_head dentry_dir_leases; /* lru list */
+ struct ceph_client_metric metric;
+
spinlock_t snapid_map_lock;
struct rb_root snapid_map_tree;
struct list_head snapid_map_lru;
@@ -497,6 +507,7 @@ extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
struct inode *dir,
struct ceph_mds_request *req);
extern void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req);
+extern void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req);
static inline void ceph_mdsc_get_request(struct ceph_mds_request *req)
{
kref_get(&req->r_kref);
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
new file mode 100644
index 000000000000..9217f35bc2b9
--- /dev/null
+++ b/fs/ceph/metric.c
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/types.h>
+#include <linux/percpu_counter.h>
+#include <linux/math64.h>
+
+#include "metric.h"
+
+int ceph_metric_init(struct ceph_client_metric *m)
+{
+ int ret;
+
+ if (!m)
+ return -EINVAL;
+
+ atomic64_set(&m->total_dentries, 0);
+ ret = percpu_counter_init(&m->d_lease_hit, 0, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ ret = percpu_counter_init(&m->d_lease_mis, 0, GFP_KERNEL);
+ if (ret)
+ goto err_d_lease_mis;
+
+ ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL);
+ if (ret)
+ goto err_i_caps_hit;
+
+ ret = percpu_counter_init(&m->i_caps_mis, 0, GFP_KERNEL);
+ if (ret)
+ goto err_i_caps_mis;
+
+ spin_lock_init(&m->read_latency_lock);
+ m->read_latency_sq_sum = 0;
+ m->read_latency_min = KTIME_MAX;
+ m->read_latency_max = 0;
+ m->total_reads = 0;
+ m->read_latency_sum = 0;
+
+ spin_lock_init(&m->write_latency_lock);
+ m->write_latency_sq_sum = 0;
+ m->write_latency_min = KTIME_MAX;
+ m->write_latency_max = 0;
+ m->total_writes = 0;
+ m->write_latency_sum = 0;
+
+ spin_lock_init(&m->metadata_latency_lock);
+ m->metadata_latency_sq_sum = 0;
+ m->metadata_latency_min = KTIME_MAX;
+ m->metadata_latency_max = 0;
+ m->total_metadatas = 0;
+ m->metadata_latency_sum = 0;
+
+ return 0;
+
+err_i_caps_mis:
+ percpu_counter_destroy(&m->i_caps_hit);
+err_i_caps_hit:
+ percpu_counter_destroy(&m->d_lease_mis);
+err_d_lease_mis:
+ percpu_counter_destroy(&m->d_lease_hit);
+
+ return ret;
+}
+
+void ceph_metric_destroy(struct ceph_client_metric *m)
+{
+ if (!m)
+ return;
+
+ percpu_counter_destroy(&m->i_caps_mis);
+ percpu_counter_destroy(&m->i_caps_hit);
+ percpu_counter_destroy(&m->d_lease_mis);
+ percpu_counter_destroy(&m->d_lease_hit);
+}
+
+static inline void __update_latency(ktime_t *totalp, ktime_t *lsump,
+ ktime_t *min, ktime_t *max,
+ ktime_t *sq_sump, ktime_t lat)
+{
+ ktime_t total, avg, sq, lsum;
+
+ total = ++(*totalp);
+ lsum = (*lsump += lat);
+
+ if (unlikely(lat < *min))
+ *min = lat;
+ if (unlikely(lat > *max))
+ *max = lat;
+
+ if (unlikely(total == 1))
+ return;
+
+ /* the sq is (lat - old_avg) * (lat - new_avg) */
+ avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
+ sq = lat - avg;
+ avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
+ sq = sq * (lat - avg);
+ *sq_sump += sq;
+}
+
+void ceph_update_read_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc)
+{
+ ktime_t lat = ktime_sub(r_end, r_start);
+
+ if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
+ return;
+
+ spin_lock(&m->read_latency_lock);
+ __update_latency(&m->total_reads, &m->read_latency_sum,
+ &m->read_latency_min, &m->read_latency_max,
+ &m->read_latency_sq_sum, lat);
+ spin_unlock(&m->read_latency_lock);
+}
+
+void ceph_update_write_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc)
+{
+ ktime_t lat = ktime_sub(r_end, r_start);
+
+ if (unlikely(rc && rc != -ETIMEDOUT))
+ return;
+
+ spin_lock(&m->write_latency_lock);
+ __update_latency(&m->total_writes, &m->write_latency_sum,
+ &m->write_latency_min, &m->write_latency_max,
+ &m->write_latency_sq_sum, lat);
+ spin_unlock(&m->write_latency_lock);
+}
+
+void ceph_update_metadata_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc)
+{
+ ktime_t lat = ktime_sub(r_end, r_start);
+
+ if (unlikely(rc && rc != -ENOENT))
+ return;
+
+ spin_lock(&m->metadata_latency_lock);
+ __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
+ &m->metadata_latency_min, &m->metadata_latency_max,
+ &m->metadata_latency_sq_sum, lat);
+ spin_unlock(&m->metadata_latency_lock);
+}
diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
new file mode 100644
index 000000000000..ccd81285a450
--- /dev/null
+++ b/fs/ceph/metric.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FS_CEPH_MDS_METRIC_H
+#define _FS_CEPH_MDS_METRIC_H
+
+#include <linux/types.h>
+#include <linux/percpu_counter.h>
+#include <linux/ktime.h>
+
+/* This is the global metrics */
+struct ceph_client_metric {
+ atomic64_t total_dentries;
+ struct percpu_counter d_lease_hit;
+ struct percpu_counter d_lease_mis;
+
+ struct percpu_counter i_caps_hit;
+ struct percpu_counter i_caps_mis;
+
+ spinlock_t read_latency_lock;
+ u64 total_reads;
+ ktime_t read_latency_sum;
+ ktime_t read_latency_sq_sum;
+ ktime_t read_latency_min;
+ ktime_t read_latency_max;
+
+ spinlock_t write_latency_lock;
+ u64 total_writes;
+ ktime_t write_latency_sum;
+ ktime_t write_latency_sq_sum;
+ ktime_t write_latency_min;
+ ktime_t write_latency_max;
+
+ spinlock_t metadata_latency_lock;
+ u64 total_metadatas;
+ ktime_t metadata_latency_sum;
+ ktime_t metadata_latency_sq_sum;
+ ktime_t metadata_latency_min;
+ ktime_t metadata_latency_max;
+};
+
+extern int ceph_metric_init(struct ceph_client_metric *m);
+extern void ceph_metric_destroy(struct ceph_client_metric *m);
+
+static inline void ceph_update_cap_hit(struct ceph_client_metric *m)
+{
+ percpu_counter_inc(&m->i_caps_hit);
+}
+
+static inline void ceph_update_cap_mis(struct ceph_client_metric *m)
+{
+ percpu_counter_inc(&m->i_caps_mis);
+}
+
+extern void ceph_update_read_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc);
+extern void ceph_update_write_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc);
+extern void ceph_update_metadata_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc);
+#endif /* _FS_CEPH_MDS_METRIC_H */
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 19507e2fdb57..198ddde5c1e6 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -264,7 +264,7 @@ restart:
return NULL;
}
-bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
+static bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
{
struct ceph_mds_client *mdsc = ceph_inode_to_client(old)->mdsc;
struct ceph_snap_realm *old_realm, *new_realm;
@@ -361,8 +361,6 @@ restart:
spin_unlock(&ci->i_ceph_lock);
switch (op) {
case QUOTA_CHECK_MAX_FILES_OP:
- exceeded = (max && (rvalue >= max));
- break;
case QUOTA_CHECK_MAX_BYTES_OP:
exceeded = (max && (rvalue + delta > max));
break;
@@ -417,7 +415,7 @@ bool ceph_quota_is_max_files_exceeded(struct inode *inode)
WARN_ON(!S_ISDIR(inode->i_mode));
- return check_quota_exceeded(inode, QUOTA_CHECK_MAX_FILES_OP, 0);
+ return check_quota_exceeded(inode, QUOTA_CHECK_MAX_FILES_OP, 1);
}
/*
@@ -518,3 +516,59 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
return is_updated;
}
+/*
+ * ceph_quota_check_rename - check if a rename can be executed
+ * @mdsc: MDS client instance
+ * @old: inode to be copied
+ * @new: destination inode (directory)
+ *
+ * This function verifies if a rename (e.g. moving a file or directory) can be
+ * executed. It forces an rstat update in the @new target directory (and in the
+ * source @old as well, if it's a directory). The actual check is done both for
+ * max_files and max_bytes.
+ *
+ * This function returns 0 if it's OK to do the rename, or, if quotas are
+ * exceeded, -EXDEV (if @old is a directory) or -EDQUOT.
+ */
+int ceph_quota_check_rename(struct ceph_mds_client *mdsc,
+ struct inode *old, struct inode *new)
+{
+ struct ceph_inode_info *ci_old = ceph_inode(old);
+ int ret = 0;
+
+ if (ceph_quota_is_same_realm(old, new))
+ return 0;
+
+ /*
+ * Get the latest rstat for target directory (and for source, if a
+ * directory)
+ */
+ ret = ceph_do_getattr(new, CEPH_STAT_RSTAT, false);
+ if (ret)
+ return ret;
+
+ if (S_ISDIR(old->i_mode)) {
+ ret = ceph_do_getattr(old, CEPH_STAT_RSTAT, false);
+ if (ret)
+ return ret;
+ ret = check_quota_exceeded(new, QUOTA_CHECK_MAX_BYTES_OP,
+ ci_old->i_rbytes);
+ if (!ret)
+ ret = check_quota_exceeded(new,
+ QUOTA_CHECK_MAX_FILES_OP,
+ ci_old->i_rfiles +
+ ci_old->i_rsubdirs);
+ if (ret)
+ ret = -EXDEV;
+ } else {
+ ret = check_quota_exceeded(new, QUOTA_CHECK_MAX_BYTES_OP,
+ i_size_read(old));
+ if (!ret)
+ ret = check_quota_exceeded(new,
+ QUOTA_CHECK_MAX_FILES_OP, 1);
+ if (ret)
+ ret = -EDQUOT;
+ }
+
+ return ret;
+}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 60aac3aee055..5a6cdd39bc10 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -128,6 +128,7 @@ struct ceph_fs_client {
struct dentry *debugfs_congestion_kb;
struct dentry *debugfs_bdi;
struct dentry *debugfs_mdsc, *debugfs_mdsmap;
+ struct dentry *debugfs_metric;
struct dentry *debugfs_mds_sessions;
#endif
@@ -350,7 +351,25 @@ struct ceph_inode_info {
struct rb_root i_caps; /* cap list */
struct ceph_cap *i_auth_cap; /* authoritative cap, if any */
unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */
- struct list_head i_dirty_item, i_flushing_item;
+
+ /*
+ * Link to the the auth cap's session's s_cap_dirty list. s_cap_dirty
+ * is protected by the mdsc->cap_dirty_lock, but each individual item
+ * is also protected by the inode's i_ceph_lock. Walking s_cap_dirty
+ * requires the mdsc->cap_dirty_lock. List presence for an item can
+ * be tested under the i_ceph_lock. Changing anything requires both.
+ */
+ struct list_head i_dirty_item;
+
+ /*
+ * Link to session's s_cap_flushing list. Protected in a similar
+ * fashion to i_dirty_item, but also by the s_mutex for changes. The
+ * s_cap_flushing list can be walked while holding either the s_mutex
+ * or msdc->cap_dirty_lock. List presence can also be checked while
+ * holding the i_ceph_lock for this inode.
+ */
+ struct list_head i_flushing_item;
+
/* we need to track cap writeback on a per-cap-bit basis, to allow
* overlapping, pipelined cap flushes to the mds. we can probably
* reduce the tid to 8 bits if we're concerned about inode size. */
@@ -644,6 +663,8 @@ static inline bool __ceph_is_any_real_caps(struct ceph_inode_info *ci)
extern int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented);
extern int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int t);
+extern int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
+ int t);
extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
struct ceph_cap *cap);
@@ -656,12 +677,12 @@ static inline int ceph_caps_issued(struct ceph_inode_info *ci)
return issued;
}
-static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
- int touch)
+static inline int ceph_caps_issued_mask_metric(struct ceph_inode_info *ci,
+ int mask, int touch)
{
int r;
spin_lock(&ci->i_ceph_lock);
- r = __ceph_caps_issued_mask(ci, mask, touch);
+ r = __ceph_caps_issued_mask_metric(ci, mask, touch);
spin_unlock(&ci->i_ceph_lock);
return r;
}
@@ -1074,6 +1095,8 @@ extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
bool snap_rwsem_locked);
extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
+extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
+ int had);
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc);
extern void ceph_flush_snaps(struct ceph_inode_info *ci,
@@ -1189,13 +1212,14 @@ extern void ceph_handle_quota(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg);
extern bool ceph_quota_is_max_files_exceeded(struct inode *inode);
-extern bool ceph_quota_is_same_realm(struct inode *old, struct inode *new);
extern bool ceph_quota_is_max_bytes_exceeded(struct inode *inode,
loff_t newlen);
extern bool ceph_quota_is_max_bytes_approaching(struct inode *inode,
loff_t newlen);
extern bool ceph_quota_update_statfs(struct ceph_fs_client *fsc,
struct kstatfs *buf);
+extern int ceph_quota_check_rename(struct ceph_mds_client *mdsc,
+ struct inode *old, struct inode *new);
extern void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc);
#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 7b8a070a782d..71ee34d160c3 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -856,7 +856,7 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
if (ci->i_xattrs.version == 0 ||
!((req_mask & CEPH_CAP_XATTR_SHARED) ||
- __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1))) {
+ __ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1))) {
spin_unlock(&ci->i_ceph_lock);
/* security module gets xattr while filling trace */
@@ -914,7 +914,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
ci->i_xattrs.version, ci->i_xattrs.index_version);
if (ci->i_xattrs.version == 0 ||
- !__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1)) {
+ !__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) {
spin_unlock(&ci->i_ceph_lock);
err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
if (err)
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 916567d770f5..3ad1a98fd567 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -221,6 +221,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
struct cifs_ses *ses;
struct cifs_tcon *tcon;
int i, j;
+ const char *security_types[] = {"Unspecified", "LANMAN", "NTLM",
+ "NTLMv2", "RawNTLMSSP", "Kerberos"};
seq_puts(m,
"Display Internal CIFS Data Structures for Debugging\n"
@@ -375,6 +377,10 @@ skip_rdma:
ses->ses_count, ses->serverOS, ses->serverNOS,
ses->capabilities, ses->status);
}
+
+ seq_printf(m,"Security type: %s\n",
+ security_types[server->ops->select_sectype(server, ses->sectype)]);
+
if (server->rdma)
seq_printf(m, "RDMA\n\t");
seq_printf(m, "TCP status: %d Instance: %d\n\tLocal Users To "
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 100b0056a369..5e66dab712d0 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -8,6 +8,12 @@
#ifndef _H_CIFS_DEBUG
#define _H_CIFS_DEBUG
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "CIFS: " fmt
+
void cifs_dump_mem(char *label, void *data, int length);
void cifs_dump_detail(void *buf, struct TCP_Server_Info *ptcp_info);
void cifs_dump_mids(struct TCP_Server_Info *);
@@ -46,92 +52,81 @@ extern int cifsFYI;
*/
/* Information level messages, minor events */
-#define cifs_info_func(ratefunc, fmt, ...) \
-do { \
- pr_info_ ## ratefunc("CIFS: " fmt, ##__VA_ARGS__); \
-} while (0)
+#define cifs_info_func(ratefunc, fmt, ...) \
+ pr_info_ ## ratefunc(fmt, ##__VA_ARGS__)
-#define cifs_info(fmt, ...) \
-do { \
- cifs_info_func(ratelimited, fmt, ##__VA_ARGS__); \
-} while (0)
+#define cifs_info(fmt, ...) \
+ cifs_info_func(ratelimited, fmt, ##__VA_ARGS__)
/* information message: e.g., configuration, major event */
-#define cifs_dbg_func(ratefunc, type, fmt, ...) \
-do { \
- if ((type) & FYI && cifsFYI & CIFS_INFO) { \
- pr_debug_ ## ratefunc("%s: " \
- fmt, __FILE__, ##__VA_ARGS__); \
- } else if ((type) & VFS) { \
- pr_err_ ## ratefunc("CIFS VFS: " \
- fmt, ##__VA_ARGS__); \
- } else if ((type) & NOISY && (NOISY != 0)) { \
- pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \
- } \
+#define cifs_dbg_func(ratefunc, type, fmt, ...) \
+do { \
+ if ((type) & FYI && cifsFYI & CIFS_INFO) { \
+ pr_debug_ ## ratefunc("%s: " fmt, \
+ __FILE__, ##__VA_ARGS__); \
+ } else if ((type) & VFS) { \
+ pr_err_ ## ratefunc("VFS: " fmt, ##__VA_ARGS__); \
+ } else if ((type) & NOISY && (NOISY != 0)) { \
+ pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \
+ } \
} while (0)
-#define cifs_dbg(type, fmt, ...) \
-do { \
- if ((type) & ONCE) \
- cifs_dbg_func(once, \
- type, fmt, ##__VA_ARGS__); \
- else \
- cifs_dbg_func(ratelimited, \
- type, fmt, ##__VA_ARGS__); \
+#define cifs_dbg(type, fmt, ...) \
+do { \
+ if ((type) & ONCE) \
+ cifs_dbg_func(once, type, fmt, ##__VA_ARGS__); \
+ else \
+ cifs_dbg_func(ratelimited, type, fmt, ##__VA_ARGS__); \
} while (0)
-#define cifs_server_dbg_func(ratefunc, type, fmt, ...) \
-do { \
- const char *sn = ""; \
- if (server && server->hostname) \
- sn = server->hostname; \
- if ((type) & FYI && cifsFYI & CIFS_INFO) { \
- pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \
- __FILE__, sn, ##__VA_ARGS__); \
- } else if ((type) & VFS) { \
- pr_err_ ## ratefunc("CIFS VFS: \\\\%s " fmt, \
- sn, ##__VA_ARGS__); \
- } else if ((type) & NOISY && (NOISY != 0)) { \
- pr_debug_ ## ratefunc("\\\\%s " fmt, \
- sn, ##__VA_ARGS__); \
- } \
+#define cifs_server_dbg_func(ratefunc, type, fmt, ...) \
+do { \
+ const char *sn = ""; \
+ if (server && server->hostname) \
+ sn = server->hostname; \
+ if ((type) & FYI && cifsFYI & CIFS_INFO) { \
+ pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \
+ __FILE__, sn, ##__VA_ARGS__); \
+ } else if ((type) & VFS) { \
+ pr_err_ ## ratefunc("VFS: \\\\%s " fmt, \
+ sn, ##__VA_ARGS__); \
+ } else if ((type) & NOISY && (NOISY != 0)) { \
+ pr_debug_ ## ratefunc("\\\\%s " fmt, \
+ sn, ##__VA_ARGS__); \
+ } \
} while (0)
-#define cifs_server_dbg(type, fmt, ...) \
-do { \
- if ((type) & ONCE) \
- cifs_server_dbg_func(once, \
- type, fmt, ##__VA_ARGS__); \
- else \
- cifs_server_dbg_func(ratelimited, \
- type, fmt, ##__VA_ARGS__); \
+#define cifs_server_dbg(type, fmt, ...) \
+do { \
+ if ((type) & ONCE) \
+ cifs_server_dbg_func(once, type, fmt, ##__VA_ARGS__); \
+ else \
+ cifs_server_dbg_func(ratelimited, type, fmt, \
+ ##__VA_ARGS__); \
} while (0)
-#define cifs_tcon_dbg_func(ratefunc, type, fmt, ...) \
-do { \
- const char *tn = ""; \
- if (tcon && tcon->treeName) \
- tn = tcon->treeName; \
- if ((type) & FYI && cifsFYI & CIFS_INFO) { \
- pr_debug_ ## ratefunc("%s: %s " fmt, \
- __FILE__, tn, ##__VA_ARGS__); \
- } else if ((type) & VFS) { \
- pr_err_ ## ratefunc("CIFS VFS: %s " fmt, \
- tn, ##__VA_ARGS__); \
- } else if ((type) & NOISY && (NOISY != 0)) { \
- pr_debug_ ## ratefunc("%s " fmt, \
- tn, ##__VA_ARGS__); \
- } \
+#define cifs_tcon_dbg_func(ratefunc, type, fmt, ...) \
+do { \
+ const char *tn = ""; \
+ if (tcon && tcon->treeName) \
+ tn = tcon->treeName; \
+ if ((type) & FYI && cifsFYI & CIFS_INFO) { \
+ pr_debug_ ## ratefunc("%s: %s " fmt, \
+ __FILE__, tn, ##__VA_ARGS__); \
+ } else if ((type) & VFS) { \
+ pr_err_ ## ratefunc("VFS: %s " fmt, tn, ##__VA_ARGS__); \
+ } else if ((type) & NOISY && (NOISY != 0)) { \
+ pr_debug_ ## ratefunc("%s " fmt, tn, ##__VA_ARGS__); \
+ } \
} while (0)
-#define cifs_tcon_dbg(type, fmt, ...) \
-do { \
- if ((type) & ONCE) \
- cifs_tcon_dbg_func(once, \
- type, fmt, ##__VA_ARGS__); \
- else \
- cifs_tcon_dbg_func(ratelimited, \
- type, fmt, ##__VA_ARGS__); \
+#define cifs_tcon_dbg(type, fmt, ...) \
+do { \
+ if ((type) & ONCE) \
+ cifs_tcon_dbg_func(once, type, fmt, ##__VA_ARGS__); \
+ else \
+ cifs_tcon_dbg_func(ratelimited, type, fmt, \
+ ##__VA_ARGS__); \
} while (0)
/*
@@ -159,9 +154,7 @@ do { \
} while (0)
#define cifs_info(fmt, ...) \
-do { \
- pr_info("CIFS: "fmt, ##__VA_ARGS__); \
-} while (0)
+ pr_info(fmt, ##__VA_ARGS__)
#endif
#endif /* _H_CIFS_DEBUG */
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 97b7497c13ef..874a551f339c 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -520,7 +520,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
if (rc) {
- cifs_dbg(VFS, "%s: could not init hmacmd5\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
return rc;
}
@@ -624,7 +624,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
if (rc) {
- cifs_dbg(VFS, "%s: could not init hmacmd5\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
return rc;
}
@@ -723,7 +723,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
/* calculate ntlmv2_hash */
rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
if (rc) {
- cifs_dbg(VFS, "could not get v2 hash rc %d\n", rc);
+ cifs_dbg(VFS, "Could not get v2 hash rc %d\n", rc);
goto unlock;
}
@@ -783,7 +783,7 @@ calc_seckey(struct cifs_ses *ses)
ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
if (!ctx_arc4) {
- cifs_dbg(VFS, "could not allocate arc4 context\n");
+ cifs_dbg(VFS, "Could not allocate arc4 context\n");
return -ENOMEM;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index c31f362fa098..889f9c71049b 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -534,6 +534,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",signloosely");
if (tcon->nocase)
seq_puts(s, ",nocase");
+ if (tcon->nodelete)
+ seq_puts(s, ",nodelete");
if (tcon->local_lease)
seq_puts(s, ",locallease");
if (tcon->retry)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index c9e2e6bbca13..c7a311d28d3d 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -156,5 +156,5 @@ extern int cifs_truncate_page(struct address_space *mapping, loff_t from);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.26"
+#define CIFS_VERSION "2.27"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 39b708d9d86d..e133bb3e172f 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -562,6 +562,7 @@ struct smb_vol {
bool override_gid:1;
bool dynperm:1;
bool noperm:1;
+ bool nodelete:1;
bool mode_ace:1;
bool no_psx_acl:1; /* set if posix acl support should be disabled */
bool cifs_acl:1;
@@ -1029,6 +1030,7 @@ struct cifs_ses {
#define CIFS_MAX_CHANNELS 16
struct cifs_chan chans[CIFS_MAX_CHANNELS];
+ struct cifs_chan *binding_chan;
size_t chan_count;
size_t chan_max;
atomic_t chan_seq; /* round robin state */
@@ -1036,23 +1038,31 @@ struct cifs_ses {
/*
* When binding a new channel, we need to access the channel which isn't fully
- * established yet (one past the established count)
+ * established yet.
*/
static inline
struct cifs_chan *cifs_ses_binding_channel(struct cifs_ses *ses)
{
if (ses->binding)
- return &ses->chans[ses->chan_count];
+ return ses->binding_chan;
else
return NULL;
}
+/*
+ * Returns the server pointer of the session. When binding a new
+ * channel this returns the last channel which isn't fully established
+ * yet.
+ *
+ * This function should be use for negprot/sess.setup codepaths. For
+ * the other requests see cifs_pick_channel().
+ */
static inline
struct TCP_Server_Info *cifs_ses_server(struct cifs_ses *ses)
{
if (ses->binding)
- return ses->chans[ses->chan_count].server;
+ return ses->binding_chan->server;
else
return ses->server;
}
@@ -1136,6 +1146,7 @@ struct cifs_tcon {
bool retry:1;
bool nocase:1;
bool nohandlecache:1; /* if strange server resource prob can turn off */
+ bool nodelete:1;
bool seal:1; /* transport encryption for this mounted share */
bool unix_ext:1; /* if false disable Linux extensions to CIFS protocol
for this mount even if server would support */
@@ -1333,6 +1344,7 @@ struct cifs_io_parms {
__u64 offset;
unsigned int length;
struct cifs_tcon *tcon;
+ struct TCP_Server_Info *server;
};
struct cifs_aio_ctx {
@@ -1380,6 +1392,7 @@ struct cifs_readdata {
struct cifs_readdata *rdata,
struct iov_iter *iter);
struct kvec iov[2];
+ struct TCP_Server_Info *server;
#ifdef CONFIG_CIFS_SMB_DIRECT
struct smbd_mr *mr;
#endif
@@ -1406,6 +1419,7 @@ struct cifs_writedata {
pid_t pid;
unsigned int bytes;
int result;
+ struct TCP_Server_Info *server;
#ifdef CONFIG_CIFS_SMB_DIRECT
struct smbd_mr *mr;
#endif
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 12a895e02db4..bd92070ca30c 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -45,25 +45,25 @@ extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
unsigned int /* length */);
extern unsigned int _get_xid(void);
extern void _free_xid(unsigned int);
-#define get_xid() \
-({ \
+#define get_xid() \
+({ \
unsigned int __xid = _get_xid(); \
- cifs_dbg(FYI, "CIFS VFS: in %s as Xid: %u with uid: %d\n", \
+ cifs_dbg(FYI, "VFS: in %s as Xid: %u with uid: %d\n", \
__func__, __xid, \
from_kuid(&init_user_ns, current_fsuid())); \
- trace_smb3_enter(__xid, __func__); \
- __xid; \
+ trace_smb3_enter(__xid, __func__); \
+ __xid; \
})
-#define free_xid(curr_xid) \
-do { \
- _free_xid(curr_xid); \
- cifs_dbg(FYI, "CIFS VFS: leaving %s (xid = %u) rc = %d\n", \
- __func__, curr_xid, (int)rc); \
- if (rc) \
+#define free_xid(curr_xid) \
+do { \
+ _free_xid(curr_xid); \
+ cifs_dbg(FYI, "VFS: leaving %s (xid = %u) rc = %d\n", \
+ __func__, curr_xid, (int)rc); \
+ if (rc) \
trace_smb3_exit_err(curr_xid, __func__, (int)rc); \
- else \
- trace_smb3_exit_done(curr_xid, __func__); \
+ else \
+ trace_smb3_exit_done(curr_xid, __func__); \
} while (0)
extern int init_cifs_idmap(void);
extern void exit_cifs_idmap(void);
@@ -89,16 +89,20 @@ extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry);
extern void cifs_wake_up_task(struct mid_q_entry *mid);
extern int cifs_handle_standard(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
+extern bool cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs);
extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
extern int cifs_call_async(struct TCP_Server_Info *server,
struct smb_rqst *rqst,
mid_receive_t *receive, mid_callback_t *callback,
mid_handle_t *handle, void *cbdata, const int flags,
const struct cifs_credits *exist_credits);
+extern struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses);
extern int cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
struct smb_rqst *rqst, int *resp_buf_type,
const int flags, struct kvec *resp_iov);
extern int compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const int flags, const int num_rqst,
struct smb_rqst *rqst, int *resp_buf_type,
struct kvec *resp_iov);
@@ -589,6 +593,8 @@ void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc);
extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
unsigned int *len, unsigned int *offset);
+struct cifs_chan *
+cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server);
int cifs_try_adding_channels(struct cifs_ses *ses);
int cifs_ses_add_channel(struct cifs_ses *ses,
struct cifs_server_iface *iface);
@@ -616,6 +622,10 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
return dfs_cache_find(xid, ses, nls_codepage, remap, old_path,
referral, NULL);
}
+
+int match_target_ip(struct TCP_Server_Info *server,
+ const char *share, size_t share_len,
+ bool *result);
#endif
static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
diff --git a/fs/cifs/cifsroot.c b/fs/cifs/cifsroot.c
index 37edbfb8e096..9e91a5a40aae 100644
--- a/fs/cifs/cifsroot.c
+++ b/fs/cifs/cifsroot.c
@@ -56,7 +56,7 @@ static int __init cifs_root_setup(char *line)
/* len is strlen(unc) + '\0' */
len = s - line + 1;
if (len > sizeof(root_dev)) {
- printk(KERN_ERR "Root-CIFS: UNC path too long\n");
+ pr_err("Root-CIFS: UNC path too long\n");
return 1;
}
strlcpy(root_dev, line, len);
@@ -66,7 +66,7 @@ static int __init cifs_root_setup(char *line)
sizeof(root_opts), "%s,%s",
DEFAULT_MNT_OPTS, s + 1);
if (n >= sizeof(root_opts)) {
- printk(KERN_ERR "Root-CIFS: mount options string too long\n");
+ pr_err("Root-CIFS: mount options string too long\n");
root_opts[sizeof(root_opts)-1] = '\0';
return 1;
}
@@ -83,7 +83,7 @@ __setup("cifsroot=", cifs_root_setup);
int __init cifs_root_data(char **dev, char **opts)
{
if (!root_dev[0] || root_server_addr == htonl(INADDR_NONE)) {
- printk(KERN_ERR "Root-CIFS: no SMB server address\n");
+ pr_err("Root-CIFS: no SMB server address\n");
return -1;
}
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 5014a82391ff..bf41ee048396 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -129,6 +129,7 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
struct cifs_tcon *tcon)
{
int rc;
+ struct TCP_Server_Info *server = tcon->ses->server;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *it = NULL;
char *tree;
@@ -141,15 +142,14 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
if (!tree)
return -ENOMEM;
- if (tcon->ipc) {
- scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
- tcon->ses->server->hostname);
- rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
- goto out;
- }
-
if (!tcon->dfs_path) {
- rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (tcon->ipc) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
+ server->hostname);
+ rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
+ } else {
+ rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ }
goto out;
}
@@ -157,13 +157,13 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
if (rc)
goto out;
- extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
- &tcp_host_len);
+ extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
for (it = dfs_cache_get_tgt_iterator(&tl); it;
it = dfs_cache_get_next_tgt(&tl, it)) {
const char *share, *prefix;
size_t share_len, prefix_len;
+ bool target_match;
rc = dfs_cache_get_tgt_share(it, &share, &share_len, &prefix,
&prefix_len);
@@ -177,19 +177,38 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
if (dfs_host_len != tcp_host_len
|| strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
- cifs_dbg(FYI, "%s: skipping %.*s, doesn't match %.*s",
+ cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n",
__func__,
(int)dfs_host_len, dfs_host,
(int)tcp_host_len, tcp_host);
- continue;
- }
- scnprintf(tree, MAX_TREE_SIZE, "\\%.*s", (int)share_len, share);
+ rc = match_target_ip(server, dfs_host, dfs_host_len,
+ &target_match);
+ if (rc) {
+ cifs_dbg(VFS, "%s: failed to match target ip: %d\n",
+ __func__, rc);
+ break;
+ }
+
+ if (!target_match) {
+ cifs_dbg(FYI, "%s: skipping target\n", __func__);
+ continue;
+ }
+ }
- rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
- if (!rc) {
- rc = update_super_prepath(tcon, prefix, prefix_len);
- break;
+ if (tcon->ipc) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%.*s\\IPC$",
+ (int)share_len, share);
+ rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
+ } else {
+ scnprintf(tree, MAX_TREE_SIZE, "\\%.*s", (int)share_len,
+ share);
+ rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
+ if (!rc) {
+ rc = update_super_prepath(tcon, prefix,
+ prefix_len);
+ break;
+ }
}
if (rc == -EREMOTE)
break;
@@ -262,8 +281,8 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
(server->tcpStatus != CifsNeedReconnect),
10 * HZ);
if (rc < 0) {
- cifs_dbg(FYI, "%s: aborting reconnect due to a received"
- " signal by the process\n", __func__);
+ cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
+ __func__);
return -ERESTARTSYS;
}
@@ -324,7 +343,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
if (rc) {
- printk_once(KERN_WARNING "reconnect tcon failed rc = %d\n", rc);
+ pr_warn_once("reconnect tcon failed rc = %d\n", rc);
goto out;
}
@@ -557,7 +576,7 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
/* If server requires signing, does client allow it? */
if (srv_sign_required) {
if (!mnt_sign_enabled) {
- cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!");
+ cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
return -ENOTSUPP;
}
server->sign = true;
@@ -566,14 +585,14 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
/* If client requires signing, does server allow it? */
if (mnt_sign_required) {
if (!srv_sign_enabled) {
- cifs_dbg(VFS, "Server does not support signing!");
+ cifs_dbg(VFS, "Server does not support signing!\n");
return -ENOTSUPP;
}
server->sign = true;
}
if (cifs_rdma_enabled(server) && server->sign)
- cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled");
+ cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
return 0;
}
@@ -703,7 +722,7 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
if (should_set_ext_sec_flag(ses->sectype)) {
- cifs_dbg(FYI, "Requesting extended security.");
+ cifs_dbg(FYI, "Requesting extended security\n");
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
}
@@ -2375,7 +2394,7 @@ int
CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec)
{
- int rc = -EACCES;
+ int rc;
WRITE_REQ *pSMB = NULL;
int wct;
int smb_hdr_len;
@@ -3868,7 +3887,7 @@ GetExtAttrRetry:
struct file_chattr_info *pfinfo;
/* BB Do we need a cast or hash here ? */
if (count != 16) {
- cifs_dbg(FYI, "Illegal size ret in GetExtAttr\n");
+ cifs_dbg(FYI, "Invalid size ret in GetExtAttr\n");
rc = -EIO;
goto GetExtAttrOut;
}
@@ -4244,7 +4263,7 @@ QFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in QFileInfo = %d", rc);
+ cifs_dbg(FYI, "Send error in QFileInfo = %d\n", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4411,7 +4430,7 @@ UnixQFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in UnixQFileInfo = %d", rc);
+ cifs_dbg(FYI, "Send error in UnixQFileInfo = %d\n", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4493,7 +4512,7 @@ UnixQPathInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in UnixQPathInfo = %d", rc);
+ cifs_dbg(FYI, "Send error in UnixQPathInfo = %d\n", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4913,7 +4932,7 @@ GetInodeNumberRetry:
struct file_internal_info *pfinfo;
/* BB Do we need a cast or hash here ? */
if (count < 8) {
- cifs_dbg(FYI, "Illegal size ret in QryIntrnlInf\n");
+ cifs_dbg(FYI, "Invalid size ret in QryIntrnlInf\n");
rc = -EIO;
goto GetInodeNumOut;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index ad8fb53b3682..5fac34f192af 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -75,7 +75,7 @@ enum {
Opt_forceuid, Opt_noforceuid,
Opt_forcegid, Opt_noforcegid,
Opt_noblocksend, Opt_noautotune, Opt_nolease,
- Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
+ Opt_hard, Opt_soft, Opt_perm, Opt_noperm, Opt_nodelete,
Opt_mapposix, Opt_nomapposix,
Opt_mapchars, Opt_nomapchars, Opt_sfu,
Opt_nosfu, Opt_nodfs, Opt_posixpaths,
@@ -141,6 +141,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_soft, "soft" },
{ Opt_perm, "perm" },
{ Opt_noperm, "noperm" },
+ { Opt_nodelete, "nodelete" },
{ Opt_mapchars, "mapchars" }, /* SFU style */
{ Opt_nomapchars, "nomapchars" },
{ Opt_mapposix, "mapposix" }, /* SFM style */
@@ -426,8 +427,7 @@ static void reconn_inval_dfs_target(struct TCP_Server_Info *server,
}
static inline int reconn_setup_dfs_targets(struct cifs_sb_info *cifs_sb,
- struct dfs_cache_tgt_list *tl,
- struct dfs_cache_tgt_iterator **it)
+ struct dfs_cache_tgt_list *tl)
{
if (!cifs_sb->origin_fullpath)
return -EOPNOTSUPP;
@@ -472,7 +472,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
} else {
cifs_sb = CIFS_SB(sb);
- rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list, &tgt_it);
+ rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list);
if (rc && (rc != -EOPNOTSUPP)) {
cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n",
__func__);
@@ -572,26 +572,26 @@ cifs_reconnect(struct TCP_Server_Info *server)
try_to_freeze();
mutex_lock(&server->srv_mutex);
+#ifdef CONFIG_CIFS_DFS_UPCALL
/*
* Set up next DFS target server (if any) for reconnect. If DFS
* feature is disabled, then we will retry last server we
* connected to before.
*/
+ reconn_inval_dfs_target(server, cifs_sb, &tgt_list, &tgt_it);
+#endif
+ rc = reconn_set_ipaddr(server);
+ if (rc) {
+ cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+ __func__, rc);
+ }
+
if (cifs_rdma_enabled(server))
rc = smbd_reconnect(server);
else
rc = generic_ip_connect(server);
if (rc) {
cifs_dbg(FYI, "reconnect error %d\n", rc);
-#ifdef CONFIG_CIFS_DFS_UPCALL
- reconn_inval_dfs_target(server, cifs_sb, &tgt_list,
- &tgt_it);
-#endif
- rc = reconn_set_ipaddr(server);
- if (rc) {
- cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
- __func__, rc);
- }
mutex_unlock(&server->srv_mutex);
msleep(3000);
} else {
@@ -879,8 +879,7 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
* function has finished processing it is a bug.
*/
if (mid->mid_flags & MID_DELETED)
- printk_once(KERN_WARNING
- "trying to dequeue a deleted mid\n");
+ pr_warn_once("trying to dequeue a deleted mid\n");
else {
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
@@ -1229,9 +1228,8 @@ next_pdu:
smb2_add_credits_from_hdr(bufs[i], server);
cifs_dbg(FYI, "Received oplock break\n");
} else {
- cifs_server_dbg(VFS, "No task to wake, unknown frame "
- "received! NumMids %d\n",
- atomic_read(&midCount));
+ cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
+ atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", bufs[i],
HEADER_SIZE(server));
smb2_add_credits_from_hdr(bufs[i], server);
@@ -1476,9 +1474,7 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol, bool is_smb3)
cifs_dbg(VFS, "vers=1.0 (cifs) not permitted when mounting with smb3\n");
return 1;
}
- cifs_dbg(VFS, "Use of the less secure dialect vers=1.0 "
- "is not recommended unless required for "
- "access to very old servers\n");
+ cifs_dbg(VFS, "Use of the less secure dialect vers=1.0 is not recommended unless required for access to very old servers\n");
vol->ops = &smb1_operations;
vol->vals = &smb1_values;
break;
@@ -1545,7 +1541,7 @@ cifs_parse_devname(const char *devname, struct smb_vol *vol)
size_t len;
if (unlikely(!devname || !*devname)) {
- cifs_dbg(VFS, "Device name not specified.\n");
+ cifs_dbg(VFS, "Device name not specified\n");
return -EINVAL;
}
@@ -1695,13 +1691,13 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case 0:
break;
case -ENOMEM:
- cifs_dbg(VFS, "Unable to allocate memory for devname.\n");
+ cifs_dbg(VFS, "Unable to allocate memory for devname\n");
goto cifs_parse_mount_err;
case -EINVAL:
- cifs_dbg(VFS, "Malformed UNC in devname.\n");
+ cifs_dbg(VFS, "Malformed UNC in devname\n");
goto cifs_parse_mount_err;
default:
- cifs_dbg(VFS, "Unknown error parsing devname.\n");
+ cifs_dbg(VFS, "Unknown error parsing devname\n");
goto cifs_parse_mount_err;
}
@@ -1761,6 +1757,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_noperm:
vol->noperm = 1;
break;
+ case Opt_nodelete:
+ vol->nodelete = 1;
+ break;
case Opt_mapchars:
vol->sfu_remap = true;
vol->remap = false; /* disable SFM mapping */
@@ -1909,7 +1908,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->seal = 1;
break;
case Opt_noac:
- pr_warn("CIFS: Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n");
+ pr_warn("Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n");
break;
case Opt_fsc:
#ifndef CONFIG_CIFS_FSCACHE
@@ -1965,9 +1964,13 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
break;
case Opt_multichannel:
vol->multichannel = true;
+ /* if number of channels not specified, default to 2 */
+ if (vol->max_channels < 2)
+ vol->max_channels = 2;
break;
case Opt_nomultichannel:
vol->multichannel = false;
+ vol->max_channels = 1;
break;
case Opt_compress:
vol->compression = UNKNOWN_TYPE;
@@ -2156,7 +2159,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (strnlen(string, CIFS_MAX_USERNAME_LEN) >
CIFS_MAX_USERNAME_LEN) {
- pr_warn("CIFS: username too long\n");
+ pr_warn("username too long\n");
goto cifs_parse_mount_err;
}
@@ -2222,7 +2225,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
temp_len = strlen(value);
vol->password = kzalloc(temp_len+1, GFP_KERNEL);
if (vol->password == NULL) {
- pr_warn("CIFS: no memory for password\n");
+ pr_warn("no memory for password\n");
goto cifs_parse_mount_err;
}
@@ -2246,7 +2249,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (!cifs_convert_address(dstaddr, string,
strlen(string))) {
- pr_err("CIFS: bad ip= option (%s).\n", string);
+ pr_err("bad ip= option (%s)\n", string);
goto cifs_parse_mount_err;
}
got_ip = true;
@@ -2258,14 +2261,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
== CIFS_MAX_DOMAINNAME_LEN) {
- pr_warn("CIFS: domain name too long\n");
+ pr_warn("domain name too long\n");
goto cifs_parse_mount_err;
}
kfree(vol->domainname);
vol->domainname = kstrdup(string, GFP_KERNEL);
if (!vol->domainname) {
- pr_warn("CIFS: no memory for domainname\n");
+ pr_warn("no memory for domainname\n");
goto cifs_parse_mount_err;
}
cifs_dbg(FYI, "Domain name set\n");
@@ -2278,7 +2281,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (!cifs_convert_address(
(struct sockaddr *)&vol->srcaddr,
string, strlen(string))) {
- pr_warn("CIFS: Could not parse srcaddr: %s\n",
+ pr_warn("Could not parse srcaddr: %s\n",
string);
goto cifs_parse_mount_err;
}
@@ -2289,7 +2292,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
goto out_nomem;
if (strnlen(string, 1024) >= 65) {
- pr_warn("CIFS: iocharset name too long.\n");
+ pr_warn("iocharset name too long\n");
goto cifs_parse_mount_err;
}
@@ -2298,7 +2301,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->iocharset = kstrdup(string,
GFP_KERNEL);
if (!vol->iocharset) {
- pr_warn("CIFS: no memory for charset\n");
+ pr_warn("no memory for charset\n");
goto cifs_parse_mount_err;
}
}
@@ -2329,7 +2332,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
* set at top of the function
*/
if (i == RFC1001_NAME_LEN && string[i] != 0)
- pr_warn("CIFS: netbiosname longer than 15 truncated.\n");
+ pr_warn("netbiosname longer than 15 truncated\n");
break;
case Opt_servern:
/* servernetbiosname specified override *SMBSERVER */
@@ -2355,7 +2358,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
/* The string has 16th byte zero still from
set at top of the function */
if (i == RFC1001_NAME_LEN && string[i] != 0)
- pr_warn("CIFS: server netbiosname longer than 15 truncated.\n");
+ pr_warn("server netbiosname longer than 15 truncated\n");
break;
case Opt_ver:
/* version of mount userspace tools, not dialect */
@@ -2366,17 +2369,15 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
/* If interface changes in mount.cifs bump to new ver */
if (strncasecmp(string, "1", 1) == 0) {
if (strlen(string) > 1) {
- pr_warn("Bad mount helper ver=%s. Did "
- "you want SMB1 (CIFS) dialect "
- "and mean to type vers=1.0 "
- "instead?\n", string);
+ pr_warn("Bad mount helper ver=%s. Did you want SMB1 (CIFS) dialect and mean to type vers=1.0 instead?\n",
+ string);
goto cifs_parse_mount_err;
}
/* This is the default */
break;
}
/* For all other value, error */
- pr_warn("CIFS: Invalid mount helper version specified\n");
+ pr_warn("Invalid mount helper version specified\n");
goto cifs_parse_mount_err;
case Opt_vers:
/* protocol version (dialect) */
@@ -2419,7 +2420,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
if (!sloppy && invalid) {
- pr_err("CIFS: Unknown mount option \"%s\"\n", invalid);
+ pr_err("Unknown mount option \"%s\"\n", invalid);
goto cifs_parse_mount_err;
}
@@ -2455,7 +2456,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
slash = strchr(&vol->UNC[2], '\\');
len = slash - &vol->UNC[2];
if (!cifs_convert_address(dstaddr, &vol->UNC[2], len)) {
- pr_err("Unable to determine destination address.\n");
+ pr_err("Unable to determine destination address\n");
goto cifs_parse_mount_err;
}
}
@@ -2466,20 +2467,15 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (uid_specified)
vol->override_uid = override_uid;
else if (override_uid == 1)
- pr_notice("CIFS: ignoring forceuid mount option specified with no uid= option.\n");
+ pr_notice("ignoring forceuid mount option specified with no uid= option\n");
if (gid_specified)
vol->override_gid = override_gid;
else if (override_gid == 1)
- pr_notice("CIFS: ignoring forcegid mount option specified with no gid= option.\n");
+ pr_notice("ignoring forcegid mount option specified with no gid= option\n");
if (got_version == false)
- pr_warn_once("No dialect specified on mount. Default has changed"
- " to a more secure dialect, SMB2.1 or later (e.g. "
- "SMB3.1.1), from CIFS (SMB1). To use the less secure "
- "SMB1 dialect to access old servers which do not "
- "support SMB3.1.1 (or even SMB3 or SMB2.1) specify "
- "vers=1.0 on mount.\n");
+ pr_warn_once("No dialect specified on mount. Default has changed to a more secure dialect, SMB2.1 or later (e.g. SMB3.1.1), from CIFS (SMB1). To use the less secure SMB1 dialect to access old servers which do not support SMB3.1.1 (or even SMB3 or SMB2.1) specify vers=1.0 on mount.\n");
kfree(mountdata_copy);
return 0;
@@ -2496,8 +2492,8 @@ cifs_parse_mount_err:
* specified, or if srcaddr is specified and
* matches the IP address of the rhs argument.
*/
-static bool
-srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
+bool
+cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
{
switch (srcaddr->sa_family) {
case AF_UNSPEC:
@@ -2588,7 +2584,7 @@ match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
return false; /* don't expect to be here */
}
- if (!srcip_matches(srcaddr, (struct sockaddr *)&server->srcaddr))
+ if (!cifs_match_ipaddr(srcaddr, (struct sockaddr *)&server->srcaddr))
return false;
return true;
@@ -3197,8 +3193,8 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
strlen(ses->domainName),
GFP_KERNEL);
if (!vol->domainname) {
- cifs_dbg(FYI, "Unable to allocate %zd bytes for "
- "domain\n", len);
+ cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n",
+ len);
rc = -ENOMEM;
kfree(vol->username);
vol->username = NULL;
@@ -3363,6 +3359,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
return 0;
if (tcon->no_lease != volume_info->no_lease)
return 0;
+ if (tcon->nodelete != volume_info->nodelete)
+ return 0;
return 1;
}
@@ -3519,10 +3517,9 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
if (volume_info->linux_ext) {
if (ses->server->posix_ext_supported) {
tcon->posix_extensions = true;
- printk_once(KERN_WARNING
- "SMB3.11 POSIX Extensions are experimental\n");
+ pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
} else {
- cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions.\n");
+ cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
rc = -EOPNOTSUPP;
goto out_fail;
}
@@ -3580,6 +3577,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
}
+ if (volume_info->no_lease) {
+ if (ses->server->vals->protocol_id == 0) {
+ cifs_dbg(VFS,
+ "SMB2 or later required for nolease option\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
+ } else
+ tcon->no_lease = volume_info->no_lease;
+ }
+
/*
* We can have only one retry value for a connection to a share so for
* resources mounted more than once to the same server share the last
@@ -3588,8 +3595,8 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
tcon->retry = volume_info->retry;
tcon->nocase = volume_info->nocase;
tcon->nohandlecache = volume_info->nohandlecache;
+ tcon->nodelete = volume_info->nodelete;
tcon->local_lease = volume_info->local_lease;
- tcon->no_lease = volume_info->no_lease;
INIT_LIST_HEAD(&tcon->pending_opens);
spin_lock(&cifs_tcp_ses_lock);
@@ -4736,8 +4743,7 @@ static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb_vol *vol,
rc = cifs_are_all_path_components_accessible(server, xid, tcon,
cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
if (rc != 0) {
- cifs_server_dbg(VFS, "cannot query dirs between root and final path, "
- "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+ cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
rc = 0;
}
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index a67f88bf7ae1..df81c718d2fa 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -198,7 +198,7 @@ static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
if (c != '0')
return -EINVAL;
- cifs_dbg(FYI, "clearing dfs cache");
+ cifs_dbg(FYI, "clearing dfs cache\n");
down_write(&htable_rw_lock);
flush_cache_ents();
@@ -234,8 +234,8 @@ static inline void dump_tgts(const struct cache_entry *ce)
static inline void dump_ce(const struct cache_entry *ce)
{
- cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
- "interlink=%s,path_consumed=%d,expired=%s\n", ce->path,
+ cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,interlink=%s,path_consumed=%d,expired=%s\n",
+ ce->path,
ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
ce->etime.tv_nsec,
IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
@@ -453,11 +453,11 @@ static void remove_oldest_entry(void)
}
if (!to_del) {
- cifs_dbg(FYI, "%s: no entry to remove", __func__);
+ cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
return;
}
- cifs_dbg(FYI, "%s: removing entry", __func__);
+ cifs_dbg(FYI, "%s: removing entry\n", __func__);
dump_ce(to_del);
flush_cache_ent(to_del);
}
@@ -696,8 +696,8 @@ static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
}
if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
- cifs_dbg(FYI, "%s: reached max cache size (%d)", __func__,
- CACHE_MAX_ENTRIES);
+ cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
+ __func__, CACHE_MAX_ENTRIES);
down_write(&htable_rw_lock);
remove_oldest_entry();
up_write(&htable_rw_lock);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 17a4f49c34f5..8277859d12a3 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -857,7 +857,7 @@ cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
tcon->need_reopen_files = false;
- cifs_dbg(FYI, "Reopen persistent handles");
+ cifs_dbg(FYI, "Reopen persistent handles\n");
INIT_LIST_HEAD(&tmp_list);
/* list all files open on tree connection, reopen resilient handles */
@@ -1853,7 +1853,7 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
unsigned int xid;
struct dentry *dentry = open_file->dentry;
struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
write_size, *offset, dentry);
@@ -2056,7 +2056,7 @@ find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
if (rc)
- cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
+ cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
return cfile;
}
@@ -2292,8 +2292,6 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
struct address_space *mapping, struct writeback_control *wbc)
{
int rc;
- struct TCP_Server_Info *server =
- tlink_tcon(wdata->cfile->tlink)->ses->server;
wdata->sync_mode = wbc->sync_mode;
wdata->nr_pages = nr_pages;
@@ -2305,14 +2303,15 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
wdata->pid = wdata->cfile->pid;
- rc = adjust_credits(server, &wdata->credits, wdata->bytes);
+ rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
if (rc)
return rc;
if (wdata->cfile->invalidHandle)
rc = -EAGAIN;
else
- rc = server->ops->async_writev(wdata, cifs_writedata_release);
+ rc = wdata->server->ops->async_writev(wdata,
+ cifs_writedata_release);
return rc;
}
@@ -2349,7 +2348,8 @@ static int cifs_writepages(struct address_space *mapping,
range_whole = true;
scanned = true;
}
- server = cifs_sb_master_tcon(cifs_sb)->ses->server;
+ server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
+
retry:
while (!done && index <= end) {
unsigned int i, nr_pages, found_pages, wsize;
@@ -2403,6 +2403,7 @@ retry:
wdata->credits = credits_on_stack;
wdata->cfile = cfile;
+ wdata->server = server;
cfile = NULL;
if (!wdata->cfile) {
@@ -2806,8 +2807,7 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
unsigned int wsize;
struct cifs_credits credits;
int rc;
- struct TCP_Server_Info *server =
- tlink_tcon(wdata->cfile->tlink)->ses->server;
+ struct TCP_Server_Info *server = wdata->server;
do {
if (wdata->cfile->invalidHandle) {
@@ -2893,7 +2893,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
else
pid = current->tgid;
- server = tlink_tcon(open_file->tlink)->ses->server;
+ server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
xid = get_xid();
do {
@@ -2923,11 +2923,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
from, &pagevec, cur_len, &start);
if (result < 0) {
cifs_dbg(VFS,
- "direct_writev couldn't get user pages "
- "(rc=%zd) iter type %d iov_offset %zd "
- "count %zd\n",
- result, iov_iter_type(from),
- from->iov_offset, from->count);
+ "direct_writev couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
+ result, iov_iter_type(from),
+ from->iov_offset, from->count);
dump_stack();
rc = result;
@@ -2999,6 +2997,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
wdata->nr_pages = nr_pages;
wdata->offset = (__u64)offset;
wdata->cfile = cifsFileInfo_get(open_file);
+ wdata->server = server;
wdata->pid = pid;
wdata->bytes = cur_len;
wdata->pagesz = PAGE_SIZE;
@@ -3540,8 +3539,10 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
unsigned int rsize;
struct cifs_credits credits;
int rc;
- struct TCP_Server_Info *server =
- tlink_tcon(rdata->cfile->tlink)->ses->server;
+ struct TCP_Server_Info *server;
+
+ /* XXX: should we pick a new channel here? */
+ server = rdata->server;
do {
if (rdata->cfile->invalidHandle) {
@@ -3620,7 +3621,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
size_t start;
struct iov_iter direct_iov = ctx->iter;
- server = tlink_tcon(open_file->tlink)->ses->server;
+ server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
@@ -3654,12 +3655,10 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
cur_len, &start);
if (result < 0) {
cifs_dbg(VFS,
- "couldn't get user pages (rc=%zd)"
- " iter type %d"
- " iov_offset %zd count %zd\n",
- result, iov_iter_type(&direct_iov),
- direct_iov.iov_offset,
- direct_iov.count);
+ "Couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
+ result, iov_iter_type(&direct_iov),
+ direct_iov.iov_offset,
+ direct_iov.count);
dump_stack();
rc = result;
@@ -3706,6 +3705,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
rdata->tailsz = PAGE_SIZE;
}
+ rdata->server = server;
rdata->cfile = cifsFileInfo_get(open_file);
rdata->nr_pages = npages;
rdata->offset = offset;
@@ -4018,7 +4018,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
unsigned int xid;
char *cur_offset;
struct cifsFileInfo *open_file;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
int buf_type = CIFS_NO_BUFFER;
__u32 pid;
@@ -4035,7 +4035,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
}
open_file = file->private_data;
tcon = tlink_tcon(open_file->tlink);
- server = tcon->ses->server;
+ server = cifs_pick_channel(tcon->ses);
if (!server->ops->sync_read) {
free_xid(xid);
@@ -4074,6 +4074,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
io_parms.tcon = tcon;
io_parms.offset = *offset;
io_parms.length = current_read_size;
+ io_parms.server = server;
rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
&bytes_read, &cur_offset,
&buf_type);
@@ -4376,7 +4377,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
pid = current->tgid;
rc = 0;
- server = tlink_tcon(open_file->tlink)->ses->server;
+ server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
__func__, file, mapping, num_pages);
@@ -4447,6 +4448,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
}
rdata->cfile = cifsFileInfo_get(open_file);
+ rdata->server = server;
rdata->mapping = mapping;
rdata->offset = offset;
rdata->bytes = bytes;
@@ -4828,7 +4830,7 @@ static int cifs_swap_activate(struct swap_info_struct *sis,
}
*span = sis->pages;
- printk_once(KERN_WARNING "Swap support over SMB3 is experimental\n");
+ pr_warn_once("Swap support over SMB3 is experimental\n");
/*
* TODO: consider adding ACL (or documenting how) to prevent other
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 5d2965a23730..5072bcaf4be1 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -25,6 +25,7 @@
#include <linux/freezer.h>
#include <linux/sched/signal.h>
#include <linux/wait_bit.h>
+#include <linux/fiemap.h>
#include <asm/div64.h>
#include "cifsfs.h"
@@ -447,7 +448,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
struct cifs_tcon *tcon;
struct cifs_fid fid;
struct cifs_open_parms oparms;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
char buf[24];
unsigned int bytes_read;
char *pbuf;
@@ -1155,7 +1156,7 @@ struct inode *cifs_root_iget(struct super_block *sb)
/* some servers mistakenly claim POSIX support */
if (rc != -EOPNOTSUPP)
goto iget_no_retry;
- cifs_dbg(VFS, "server does not support POSIX extensions");
+ cifs_dbg(VFS, "server does not support POSIX extensions\n");
tcon->unix_ext = false;
}
@@ -1418,6 +1419,11 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
xid = get_xid();
+ if (tcon->nodelete) {
+ rc = -EACCES;
+ goto unlink_out;
+ }
+
/* Unlink can be called from rename so we can not take the
* sb->s_vfs_rename_mutex here */
full_path = build_path_from_dentry(dentry);
@@ -1746,6 +1752,12 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
goto rmdir_exit;
}
+ if (tcon->nodelete) {
+ rc = -EACCES;
+ cifs_put_tlink(tlink);
+ goto rmdir_exit;
+ }
+
rc = server->ops->rmdir(xid, tcon, full_path, cifs_sb);
cifs_put_tlink(tlink);
@@ -1999,7 +2011,7 @@ cifs_invalidate_mapping(struct inode *inode)
if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
rc = invalidate_inode_pages2(inode->i_mapping);
if (rc)
- cifs_dbg(VFS, "%s: could not invalidate inode %p\n",
+ cifs_dbg(VFS, "%s: Could not invalidate inode %p\n",
__func__, inode);
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index a25ef35b023e..c381d2d03ef6 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -308,7 +308,7 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
int oplock = 0;
struct cifs_fid fid;
struct cifs_open_parms oparms;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
int buf_type = CIFS_NO_BUFFER;
FILE_ALL_INFO file_info;
@@ -352,7 +352,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
int oplock = 0;
struct cifs_fid fid;
struct cifs_open_parms oparms;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
@@ -389,7 +389,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
int rc;
struct cifs_fid fid;
struct cifs_open_parms oparms;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
int buf_type = CIFS_NO_BUFFER;
__le16 *utf16_path;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
@@ -450,7 +450,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
int rc;
struct cifs_fid fid;
struct cifs_open_parms oparms;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
__le16 *utf16_path;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct kvec iov[2];
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 550ce9020a3e..56791a692c8b 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -32,6 +32,9 @@
#include "cifs_unicode.h"
#include "smb2pdu.h"
#include "cifsfs.h"
+#ifdef CONFIG_CIFS_DFS_UPCALL
+#include "dns_resolve.h"
+#endif
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
@@ -421,7 +424,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
if (data_offset >
len - sizeof(struct file_notify_information)) {
- cifs_dbg(FYI, "invalid data_offset %u\n",
+ cifs_dbg(FYI, "Invalid data_offset %u\n",
data_offset);
return true;
}
@@ -449,7 +452,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
large dirty files cached on the client */
if ((NT_STATUS_INVALID_HANDLE) ==
le32_to_cpu(pSMB->hdr.Status.CifsError)) {
- cifs_dbg(FYI, "invalid handle on oplock break\n");
+ cifs_dbg(FYI, "Invalid handle on oplock break\n");
return true;
} else if (ERRbadfid ==
le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
@@ -530,9 +533,9 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
cifs_sb->mnt_cifs_serverino_autodisabled = true;
- cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s.\n",
+ cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
tcon ? tcon->treeName : "new server");
- cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS).\n");
+ cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
}
@@ -874,7 +877,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
while (count && npages < max_pages) {
rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
if (rc < 0) {
- cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
+ cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
break;
}
@@ -933,7 +936,7 @@ cifs_alloc_hash(const char *name,
*shash = crypto_alloc_shash(name, 0, 0);
if (IS_ERR(*shash)) {
- cifs_dbg(VFS, "could not allocate crypto %s\n", name);
+ cifs_dbg(VFS, "Could not allocate crypto %s\n", name);
rc = PTR_ERR(*shash);
*shash = NULL;
*sdesc = NULL;
@@ -1083,6 +1086,51 @@ void cifs_put_tcp_super(struct super_block *sb)
}
#ifdef CONFIG_CIFS_DFS_UPCALL
+int match_target_ip(struct TCP_Server_Info *server,
+ const char *share, size_t share_len,
+ bool *result)
+{
+ int rc;
+ char *target, *tip = NULL;
+ struct sockaddr tipaddr;
+
+ *result = false;
+
+ target = kzalloc(share_len + 3, GFP_KERNEL);
+ if (!target) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
+
+ cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
+
+ rc = dns_resolve_server_name_to_ip(target, &tip);
+ if (rc < 0)
+ goto out;
+
+ cifs_dbg(FYI, "%s: target ip: %s\n", __func__, tip);
+
+ if (!cifs_convert_address(&tipaddr, tip, strlen(tip))) {
+ cifs_dbg(VFS, "%s: failed to convert target ip address\n",
+ __func__);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr,
+ &tipaddr);
+ cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
+ rc = 0;
+
+out:
+ kfree(target);
+ kfree(tip);
+
+ return rc;
+}
+
static void tcon_super_cb(struct super_block *sb, void *arg)
{
struct super_cb_data *sd = arg;
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 9b41436fb8db..b7ca4960d4ca 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -957,15 +957,15 @@ struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
sec = 2 * st->TwoSeconds;
min = st->Minutes;
if ((sec > 59) || (min > 59))
- cifs_dbg(VFS, "illegal time min %d sec %lld\n", min, sec);
+ cifs_dbg(VFS, "Invalid time min %d sec %lld\n", min, sec);
sec += (min * 60);
sec += 60 * 60 * st->Hours;
if (st->Hours > 24)
- cifs_dbg(VFS, "illegal hours %d\n", st->Hours);
+ cifs_dbg(VFS, "Invalid hours %d\n", st->Hours);
day = sd->Day;
month = sd->Month;
if (day < 1 || day > 31 || month < 1 || month > 12) {
- cifs_dbg(VFS, "illegal date, month %d day: %d\n", month, day);
+ cifs_dbg(VFS, "Invalid date, month %d day: %d\n", month, day);
day = clamp(day, 1, 31);
month = clamp(month, 1, 12);
}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 50f776a8d4ba..6df0922e7e30 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -53,7 +53,7 @@ static void dump_cifs_file_struct(struct file *file, char *label)
return;
}
if (cf->invalidHandle)
- cifs_dbg(FYI, "invalid handle\n");
+ cifs_dbg(FYI, "Invalid handle\n");
if (cf->srch_inf.endOfSearch)
cifs_dbg(FYI, "end of search\n");
if (cf->srch_inf.emptyDir)
@@ -246,7 +246,7 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
*/
fattr->cf_mode = le32_to_cpu(info->Mode) & ~S_IFMT;
- cifs_dbg(FYI, "posix fattr: dev %d, reparse %d, mode %o",
+ cifs_dbg(FYI, "posix fattr: dev %d, reparse %d, mode %o\n",
le32_to_cpu(info->DeviceId),
le32_to_cpu(info->ReparseTag),
le32_to_cpu(info->Mode));
@@ -478,7 +478,7 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
if (old_entry + next_offset < old_entry) {
- cifs_dbg(VFS, "invalid offset %u\n", next_offset);
+ cifs_dbg(VFS, "Invalid offset %u\n", next_offset);
return NULL;
}
new_entry = old_entry + next_offset;
@@ -515,7 +515,7 @@ static void cifs_fill_dirent_posix(struct cifs_dirent *de,
/* payload should have already been checked at this point */
if (posix_info_parse(info, NULL, &parsed) < 0) {
- cifs_dbg(VFS, "invalid POSIX info payload");
+ cifs_dbg(VFS, "Invalid POSIX info payload\n");
return;
}
@@ -968,7 +968,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
} else if (current_entry != NULL) {
cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
} else {
- cifs_dbg(FYI, "could not find entry\n");
+ cifs_dbg(FYI, "Could not find entry\n");
goto rddir2_exit;
}
cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n",
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 43a88e26d26b..5d05bd2822d2 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -122,7 +122,7 @@ int cifs_try_adding_channels(struct cifs_ses *ses)
tries++;
if (tries > 3*ses->chan_max) {
- cifs_dbg(FYI, "too many attempt at opening channels (%d channels left to open)\n",
+ cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n",
left);
break;
}
@@ -150,6 +150,22 @@ int cifs_try_adding_channels(struct cifs_ses *ses)
return ses->chan_count - old_chan_count;
}
+/*
+ * If server is a channel of ses, return the corresponding enclosing
+ * cifs_chan otherwise return NULL.
+ */
+struct cifs_chan *
+cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server)
+{
+ int i;
+
+ for (i = 0; i < ses->chan_count; i++) {
+ if (ses->chans[i].server == server)
+ return &ses->chans[i];
+ }
+ return NULL;
+}
+
int
cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
{
@@ -162,12 +178,14 @@ cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
int rc;
unsigned int xid = get_xid();
- cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ",
- ses, iface->speed, iface->rdma_capable ? "yes" : "no");
if (iface->sockaddr.ss_family == AF_INET)
- cifs_dbg(FYI, "ip:%pI4)\n", &ipv4->sin_addr);
+ cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI4)\n",
+ ses, iface->speed, iface->rdma_capable ? "yes" : "no",
+ &ipv4->sin_addr);
else
- cifs_dbg(FYI, "ip:%pI6)\n", &ipv6->sin6_addr);
+ cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI4)\n",
+ ses, iface->speed, iface->rdma_capable ? "yes" : "no",
+ &ipv6->sin6_addr);
/*
* Setup a smb_vol with mostly the same info as the existing
@@ -198,7 +216,7 @@ cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
vol.UNC = unc;
vol.prepath = "";
- /* Re-use same version as master connection */
+ /* Reuse same version as master connection */
vol.vals = ses->server->vals;
vol.ops = ses->server->ops;
@@ -229,7 +247,7 @@ cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
mutex_lock(&ses->session_mutex);
- chan = &ses->chans[ses->chan_count];
+ chan = ses->binding_chan = &ses->chans[ses->chan_count];
chan->server = cifs_get_tcp_session(&vol);
if (IS_ERR(chan->server)) {
rc = PTR_ERR(chan->server);
@@ -261,7 +279,7 @@ cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
goto out;
/* success, put it on the list
- * XXX: sharing ses between 2 tcp server is not possible, the
+ * XXX: sharing ses between 2 tcp servers is not possible, the
* way "internal" linked lists works in linux makes element
* only able to belong to one list
*
@@ -274,6 +292,7 @@ cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
atomic_set(&ses->chan_seq, 0);
out:
ses->binding = false;
+ ses->binding_chan = NULL;
mutex_unlock(&ses->session_mutex);
if (rc && chan->server)
@@ -569,15 +588,15 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
if (tioffset > blob_len || tioffset + tilen > blob_len) {
- cifs_dbg(VFS, "tioffset + tilen too high %u + %u",
- tioffset, tilen);
+ cifs_dbg(VFS, "tioffset + tilen too high %u + %u\n",
+ tioffset, tilen);
return -EINVAL;
}
if (tilen) {
ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen,
GFP_KERNEL);
if (!ses->auth_key.response) {
- cifs_dbg(VFS, "Challenge target info alloc failure");
+ cifs_dbg(VFS, "Challenge target info alloc failure\n");
return -ENOMEM;
}
ses->auth_key.len = tilen;
@@ -970,7 +989,7 @@ sess_auth_lanman(struct sess_data *sess_data)
/* Calculate hash with password and copy into bcc_ptr.
* Encryption Key (stored as in cryptkey) gets used if the
- * security mode bit in Negottiate Protocol response states
+ * security mode bit in Negotiate Protocol response states
* to use challenge/response method (i.e. Password bit is 1).
*/
rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
@@ -1303,9 +1322,8 @@ sess_auth_kerberos(struct sess_data *sess_data)
* sending us a response in an expected form
*/
if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
- cifs_dbg(VFS,
- "incorrect version of cifs.upcall (expected %d but got %d)",
- CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+ cifs_dbg(VFS, "incorrect version of cifs.upcall (expected %d but got %d)\n",
+ CIFS_SPNEGO_UPCALL_VERSION, msg->version);
rc = -EKEYREJECTED;
goto out_put_spnego_key;
}
@@ -1313,8 +1331,8 @@ sess_auth_kerberos(struct sess_data *sess_data)
ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
GFP_KERNEL);
if (!ses->auth_key.response) {
- cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory",
- msg->sesskey_len);
+ cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
+ msg->sesskey_len);
rc = -ENOMEM;
goto out_put_spnego_key;
}
@@ -1657,8 +1675,7 @@ static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data)
type = cifs_select_sectype(ses->server, ses->sectype);
cifs_dbg(FYI, "sess setup type %d\n", type);
if (type == Unspecified) {
- cifs_dbg(VFS,
- "Unable to select appropriate authentication method!");
+ cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
return -EINVAL;
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index b130efaf8feb..197ed455e657 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -247,7 +247,7 @@ check2ndT2(char *buf)
/* check for plausible wct, bcc and t2 data and parm sizes */
/* check for parm and data offset going beyond end of smb */
if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
- cifs_dbg(FYI, "invalid transact2 word count\n");
+ cifs_dbg(FYI, "Invalid transact2 word count\n");
return -EINVAL;
}
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index a8c301ae00ed..0a116fc490a9 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -47,6 +47,18 @@ free_set_inf_compound(struct smb_rqst *rqst)
}
+struct cop_vars {
+ struct cifs_open_parms oparms;
+ struct kvec rsp_iov[3];
+ struct smb_rqst rqst[3];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec qi_iov[1];
+ struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
+ struct kvec close_iov[1];
+ struct smb2_file_rename_info rename_info;
+ struct smb2_file_link_info link_info;
+};
+
static int
smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path,
@@ -54,35 +66,36 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
__u32 create_options, umode_t mode, void *ptr, int command,
struct cifsFileInfo *cfile)
{
+ struct cop_vars *vars = NULL;
+ struct kvec *rsp_iov;
+ struct smb_rqst *rqst;
int rc;
__le16 *utf16_path = NULL;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
- struct cifs_open_parms oparms;
struct cifs_fid fid;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server;
int num_rqst = 0;
- struct smb_rqst rqst[3];
int resp_buftype[3];
- struct kvec rsp_iov[3];
- struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
- struct kvec qi_iov[1];
- struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
- struct kvec close_iov[1];
struct smb2_query_info_rsp *qi_rsp = NULL;
int flags = 0;
__u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
unsigned int size[2];
void *data[2];
- struct smb2_file_rename_info rename_info;
- struct smb2_file_link_info link_info;
int len;
+ vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
+ if (vars == NULL)
+ return -ENOMEM;
+ rqst = &vars->rqst[0];
+ rsp_iov = &vars->rsp_iov[0];
+
+ server = cifs_pick_channel(ses);
+
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- memset(rqst, 0, sizeof(rqst));
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
- memset(rsp_iov, 0, sizeof(rsp_iov));
/* We already have a handle so we can skip the open */
if (cfile)
@@ -95,19 +108,18 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
goto finished;
}
- memset(&oparms, 0, sizeof(struct cifs_open_parms));
- oparms.tcon = tcon;
- oparms.desired_access = desired_access;
- oparms.disposition = create_disposition;
- oparms.create_options = cifs_create_options(cifs_sb, create_options);
- oparms.fid = &fid;
- oparms.reconnect = false;
- oparms.mode = mode;
-
- memset(&open_iov, 0, sizeof(open_iov));
- rqst[num_rqst].rq_iov = open_iov;
+ vars->oparms.tcon = tcon;
+ vars->oparms.desired_access = desired_access;
+ vars->oparms.disposition = create_disposition;
+ vars->oparms.create_options = cifs_create_options(cifs_sb, create_options);
+ vars->oparms.fid = &fid;
+ vars->oparms.reconnect = false;
+ vars->oparms.mode = mode;
+
+ rqst[num_rqst].rq_iov = &vars->open_iov[0];
rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE;
- rc = SMB2_open_init(tcon, &rqst[num_rqst], &oplock, &oparms,
+ rc = SMB2_open_init(tcon, server,
+ &rqst[num_rqst], &oplock, &vars->oparms,
utf16_path);
kfree(utf16_path);
if (rc)
@@ -121,12 +133,12 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
/* Operation */
switch (command) {
case SMB2_OP_QUERY_INFO:
- memset(&qi_iov, 0, sizeof(qi_iov));
- rqst[num_rqst].rq_iov = qi_iov;
+ rqst[num_rqst].rq_iov = &vars->qi_iov[0];
rqst[num_rqst].rq_nvec = 1;
if (cfile)
- rc = SMB2_query_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[num_rqst],
cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
FILE_ALL_INFORMATION,
@@ -134,10 +146,11 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
sizeof(struct smb2_file_all_info) +
PATH_MAX * 2, 0, NULL);
else {
- rc = SMB2_query_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[num_rqst],
COMPOUND_FID,
COMPOUND_FID,
- FILE_ALL_INFORMATION,
+ FILE_ALL_INFORMATION,
SMB2_O_INFO_FILE, 0,
sizeof(struct smb2_file_all_info) +
PATH_MAX * 2, 0, NULL);
@@ -164,14 +177,14 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_RMDIR:
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_iov = &vars->si_iov[0];
rqst[num_rqst].rq_nvec = 1;
size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
data[0] = &delete_pending[0];
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst], COMPOUND_FID,
COMPOUND_FID, current->tgid,
FILE_DISPOSITION_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
@@ -182,14 +195,14 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_SET_EOF:
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_iov = &vars->si_iov[0];
rqst[num_rqst].rq_nvec = 1;
size[0] = 8; /* sizeof __le64 */
data[0] = ptr;
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst], COMPOUND_FID,
COMPOUND_FID, current->tgid,
FILE_END_OF_FILE_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
@@ -200,8 +213,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_SET_INFO:
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_iov = &vars->si_iov[0];
rqst[num_rqst].rq_nvec = 1;
@@ -209,13 +221,15 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
data[0] = ptr;
if (cfile)
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
cfile->fid.persistent_fid,
cfile->fid.volatile_fid, current->tgid,
FILE_BASIC_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
else {
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
COMPOUND_FID,
COMPOUND_FID, current->tgid,
FILE_BASIC_INFORMATION,
@@ -233,30 +247,31 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
full_path);
break;
case SMB2_OP_RENAME:
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_iov = &vars->si_iov[0];
rqst[num_rqst].rq_nvec = 2;
len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
- rename_info.ReplaceIfExists = 1;
- rename_info.RootDirectory = 0;
- rename_info.FileNameLength = cpu_to_le32(len);
+ vars->rename_info.ReplaceIfExists = 1;
+ vars->rename_info.RootDirectory = 0;
+ vars->rename_info.FileNameLength = cpu_to_le32(len);
size[0] = sizeof(struct smb2_file_rename_info);
- data[0] = &rename_info;
+ data[0] = &vars->rename_info;
size[1] = len + 2 /* null */;
data[1] = (__le16 *)ptr;
if (cfile)
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
current->tgid, FILE_RENAME_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
else {
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
COMPOUND_FID, COMPOUND_FID,
current->tgid, FILE_RENAME_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
@@ -271,23 +286,23 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_HARDLINK:
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_iov = &vars->si_iov[0];
rqst[num_rqst].rq_nvec = 2;
len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
- link_info.ReplaceIfExists = 0;
- link_info.RootDirectory = 0;
- link_info.FileNameLength = cpu_to_le32(len);
+ vars->link_info.ReplaceIfExists = 0;
+ vars->link_info.RootDirectory = 0;
+ vars->link_info.FileNameLength = cpu_to_le32(len);
size[0] = sizeof(struct smb2_file_link_info);
- data[0] = &link_info;
+ data[0] = &vars->link_info;
size[1] = len + 2 /* null */;
data[1] = (__le16 *)ptr;
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst], COMPOUND_FID,
COMPOUND_FID, current->tgid,
FILE_LINK_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
@@ -308,10 +323,10 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
if (cfile)
goto after_close;
/* Close */
- memset(&close_iov, 0, sizeof(close_iov));
- rqst[num_rqst].rq_iov = close_iov;
+ rqst[num_rqst].rq_iov = &vars->close_iov[0];
rqst[num_rqst].rq_nvec = 1;
- rc = SMB2_close_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ rc = SMB2_close_init(tcon, server,
+ &rqst[num_rqst], COMPOUND_FID,
COMPOUND_FID, false);
smb2_set_related(&rqst[num_rqst]);
if (rc)
@@ -322,11 +337,13 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
if (cfile) {
cifsFileInfo_put(cfile);
cfile = NULL;
- rc = compound_send_recv(xid, ses, flags, num_rqst - 2,
+ rc = compound_send_recv(xid, ses, server,
+ flags, num_rqst - 2,
&rqst[1], &resp_buftype[1],
&rsp_iov[1]);
} else
- rc = compound_send_recv(xid, ses, flags, num_rqst,
+ rc = compound_send_recv(xid, ses, server,
+ flags, num_rqst,
rqst, resp_buftype,
rsp_iov);
@@ -336,8 +353,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_open_free(&rqst[0]);
if (rc == -EREMCHG) {
- printk_once(KERN_WARNING "server share %s deleted\n",
- tcon->treeName);
+ pr_warn_once("server share %s deleted\n", tcon->treeName);
tcon->need_reconnect = true;
}
@@ -420,6 +436,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+ kfree(vars);
return rc;
}
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 497afb0b9960..6a39451973f8 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -110,14 +110,14 @@ static __u32 get_neg_ctxt_len(struct smb2_sync_hdr *hdr, __u32 len,
/* Make sure that negotiate contexts start after gss security blob */
nc_offset = le32_to_cpu(pneg_rsp->NegotiateContextOffset);
if (nc_offset < non_ctxlen) {
- printk_once(KERN_WARNING "invalid negotiate context offset\n");
+ pr_warn_once("Invalid negotiate context offset\n");
return 0;
}
size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen;
/* Verify that at least minimal negotiate contexts fit within frame */
if (len < nc_offset + (neg_count * sizeof(struct smb2_neg_context))) {
- printk_once(KERN_WARNING "negotiate context goes beyond end\n");
+ pr_warn_once("negotiate context goes beyond end\n");
return 0;
}
@@ -190,14 +190,14 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
return 1;
if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
- cifs_dbg(VFS, "Illegal structure size %u\n",
+ cifs_dbg(VFS, "Invalid structure size %u\n",
le16_to_cpu(shdr->StructureSize));
return 1;
}
command = le16_to_cpu(shdr->Command);
if (command >= NUMBER_OF_SMB2_COMMANDS) {
- cifs_dbg(VFS, "Illegal SMB2 command %d\n", command);
+ cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
return 1;
}
@@ -205,7 +205,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2)) {
/* error packets have 9 byte structure size */
- cifs_dbg(VFS, "Illegal response size %u for command %d\n",
+ cifs_dbg(VFS, "Invalid response size %u for command %d\n",
le16_to_cpu(pdu->StructureSize2), command);
return 1;
} else if (command == SMB2_OPLOCK_BREAK_HE
@@ -213,7 +213,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
&& (le16_to_cpu(pdu->StructureSize2) != 44)
&& (le16_to_cpu(pdu->StructureSize2) != 36)) {
/* special case for SMB2.1 lease break message */
- cifs_dbg(VFS, "Illegal response size %d for oplock break\n",
+ cifs_dbg(VFS, "Invalid response size %d for oplock break\n",
le16_to_cpu(pdu->StructureSize2));
return 1;
}
@@ -864,14 +864,14 @@ ok:
d = server->secmech.sdescsha512;
rc = crypto_shash_init(&d->shash);
if (rc) {
- cifs_dbg(VFS, "%s: could not init sha512 shash\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init sha512 shash\n", __func__);
return rc;
}
rc = crypto_shash_update(&d->shash, ses->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: could not update sha512 shash\n", __func__);
+ cifs_dbg(VFS, "%s: Could not update sha512 shash\n", __func__);
return rc;
}
@@ -879,7 +879,7 @@ ok:
rc = crypto_shash_update(&d->shash,
iov[i].iov_base, iov[i].iov_len);
if (rc) {
- cifs_dbg(VFS, "%s: could not update sha512 shash\n",
+ cifs_dbg(VFS, "%s: Could not update sha512 shash\n",
__func__);
return rc;
}
@@ -887,7 +887,7 @@ ok:
rc = crypto_shash_final(&d->shash, ses->preauth_sha_hash);
if (rc) {
- cifs_dbg(VFS, "%s: could not finalize sha512 shash\n",
+ cifs_dbg(VFS, "%s: Could not finalize sha512 shash\n",
__func__);
return rc;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index f829f4165d38..736d86b8a910 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -12,6 +12,7 @@
#include <linux/uuid.h>
#include <linux/sort.h>
#include <crypto/aead.h>
+#include <linux/fiemap.h>
#include "cifsfs.h"
#include "cifsglob.h"
#include "smb2pdu.h"
@@ -79,7 +80,7 @@ smb2_add_credits(struct TCP_Server_Info *server,
if (*val > 65000) {
*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
- printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
+ pr_warn_once("server overflowed SMB3 credits\n");
}
server->in_flight--;
if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
@@ -708,7 +709,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = pfid;
oparms.reconnect = false;
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, &utf16_path);
if (rc)
goto oshr_free;
smb2_set_next_command(tcon, &rqst[0]);
@@ -717,7 +719,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
rqst[1].rq_iov = qi_iov;
rqst[1].rq_nvec = 1;
- rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[1], COMPOUND_FID,
COMPOUND_FID, FILE_ALL_INFORMATION,
SMB2_O_INFO_FILE, 0,
sizeof(struct smb2_file_all_info) +
@@ -727,7 +730,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
smb2_set_related(&rqst[1]);
- rc = compound_send_recv(xid, ses, flags, 2, rqst,
+ rc = compound_send_recv(xid, ses, server,
+ flags, 2, rqst,
resp_buftype, rsp_iov);
mutex_lock(&tcon->crfid.fid_mutex);
@@ -767,8 +771,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
if (rc) {
if (rc == -EREMCHG) {
tcon->need_reconnect = true;
- printk_once(KERN_WARNING "server share %s deleted\n",
- tcon->treeName);
+ pr_warn_once("server share %s deleted\n",
+ tcon->treeName);
}
goto oshr_exit;
}
@@ -1102,6 +1106,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb)
{
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
__le16 *utf16_path = NULL;
int ea_name_len = strlen(ea_name);
int flags = 0;
@@ -1190,7 +1195,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, utf16_path);
if (rc)
goto sea_exit;
smb2_set_next_command(tcon, &rqst[0]);
@@ -1216,7 +1222,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
size[0] = len;
data[0] = ea;
- rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[1], COMPOUND_FID,
COMPOUND_FID, current->tgid,
FILE_FULL_EA_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
@@ -1228,10 +1235,12 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
memset(&close_iov, 0, sizeof(close_iov));
rqst[2].rq_iov = close_iov;
rqst[2].rq_nvec = 1;
- rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+ rc = SMB2_close_init(tcon, server,
+ &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
smb2_set_related(&rqst[2]);
- rc = compound_send_recv(xid, ses, flags, 3, rqst,
+ rc = compound_send_recv(xid, ses, server,
+ flags, 3, rqst,
resp_buftype, rsp_iov);
/* no need to bump num_remote_opens because handle immediately closed */
@@ -1452,6 +1461,16 @@ req_res_key_exit:
return rc;
}
+struct iqi_vars {
+ struct smb_rqst rqst[3];
+ struct kvec rsp_iov[3];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec qi_iov[1];
+ struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
+ struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
+ struct kvec close_iov[1];
+};
+
static int
smb2_ioctl_query_info(const unsigned int xid,
struct cifs_tcon *tcon,
@@ -1459,7 +1478,11 @@ smb2_ioctl_query_info(const unsigned int xid,
__le16 *path, int is_dir,
unsigned long p)
{
+ struct iqi_vars *vars;
+ struct smb_rqst *rqst;
+ struct kvec *rsp_iov;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
char __user *arg = (char __user *)p;
struct smb_query_info qi;
struct smb_query_info __user *pqi;
@@ -1468,45 +1491,47 @@ smb2_ioctl_query_info(const unsigned int xid,
struct smb2_query_info_rsp *qi_rsp = NULL;
struct smb2_ioctl_rsp *io_rsp = NULL;
void *buffer = NULL;
- struct smb_rqst rqst[3];
int resp_buftype[3];
- struct kvec rsp_iov[3];
- struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
struct cifs_open_parms oparms;
u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_fid fid;
- struct kvec qi_iov[1];
- struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
- struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
- struct kvec close_iov[1];
unsigned int size[2];
void *data[2];
int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
- memset(rqst, 0, sizeof(rqst));
+ vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
+ if (vars == NULL)
+ return -ENOMEM;
+ rqst = &vars->rqst[0];
+ rsp_iov = &vars->rsp_iov[0];
+
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
- memset(rsp_iov, 0, sizeof(rsp_iov));
if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
- return -EFAULT;
+ goto e_fault;
- if (qi.output_buffer_length > 1024)
+ if (qi.output_buffer_length > 1024) {
+ kfree(vars);
return -EINVAL;
+ }
- if (!ses || !(ses->server))
+ if (!ses || !server) {
+ kfree(vars);
return -EIO;
+ }
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
buffer = memdup_user(arg + sizeof(struct smb_query_info),
qi.output_buffer_length);
- if (IS_ERR(buffer))
+ if (IS_ERR(buffer)) {
+ kfree(vars);
return PTR_ERR(buffer);
+ }
/* Open */
- memset(&open_iov, 0, sizeof(open_iov));
- rqst[0].rq_iov = open_iov;
+ rqst[0].rq_iov = &vars->open_iov[0];
rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
memset(&oparms, 0, sizeof(oparms));
@@ -1537,7 +1562,8 @@ smb2_ioctl_query_info(const unsigned int xid,
oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
}
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, path);
if (rc)
goto iqinf_exit;
smb2_set_next_command(tcon, &rqst[0]);
@@ -1548,11 +1574,11 @@ smb2_ioctl_query_info(const unsigned int xid,
if (!capable(CAP_SYS_ADMIN))
rc = -EPERM;
else {
- memset(&io_iov, 0, sizeof(io_iov));
- rqst[1].rq_iov = io_iov;
+ rqst[1].rq_iov = &vars->io_iov[0];
rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
- rc = SMB2_ioctl_init(tcon, &rqst[1],
+ rc = SMB2_ioctl_init(tcon, server,
+ &rqst[1],
COMPOUND_FID, COMPOUND_FID,
qi.info_type, true, buffer,
qi.output_buffer_length,
@@ -1565,31 +1591,32 @@ smb2_ioctl_query_info(const unsigned int xid,
if (!capable(CAP_SYS_ADMIN))
rc = -EPERM;
else {
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[1].rq_iov = si_iov;
+ rqst[1].rq_iov = &vars->si_iov[0];
rqst[1].rq_nvec = 1;
size[0] = 8;
data[0] = buffer;
- rc = SMB2_set_info_init(tcon, &rqst[1],
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[1],
COMPOUND_FID, COMPOUND_FID,
current->tgid,
FILE_END_OF_FILE_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
}
} else if (qi.flags == PASSTHRU_QUERY_INFO) {
- memset(&qi_iov, 0, sizeof(qi_iov));
- rqst[1].rq_iov = qi_iov;
+ rqst[1].rq_iov = &vars->qi_iov[0];
rqst[1].rq_nvec = 1;
- rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[1], COMPOUND_FID,
COMPOUND_FID, qi.file_info_class,
qi.info_type, qi.additional_information,
qi.input_buffer_length,
qi.output_buffer_length, buffer);
} else { /* unknown flags */
- cifs_tcon_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
+ cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
+ qi.flags);
rc = -EINVAL;
}
@@ -1599,16 +1626,17 @@ smb2_ioctl_query_info(const unsigned int xid,
smb2_set_related(&rqst[1]);
/* Close */
- memset(&close_iov, 0, sizeof(close_iov));
- rqst[2].rq_iov = close_iov;
+ rqst[2].rq_iov = &vars->close_iov[0];
rqst[2].rq_nvec = 1;
- rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+ rc = SMB2_close_init(tcon, server,
+ &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
if (rc)
goto iqinf_exit;
smb2_set_related(&rqst[2]);
- rc = compound_send_recv(xid, ses, flags, 3, rqst,
+ rc = compound_send_recv(xid, ses, server,
+ flags, 3, rqst,
resp_buftype, rsp_iov);
if (rc)
goto iqinf_exit;
@@ -1649,6 +1677,7 @@ smb2_ioctl_query_info(const unsigned int xid,
}
iqinf_exit:
+ kfree(vars);
kfree(buffer);
SMB2_open_free(&rqst[0]);
if (qi.flags & PASSTHRU_FSCTL)
@@ -1719,7 +1748,7 @@ smb2_copychunk_range(const unsigned int xid,
if (rc == 0) {
if (ret_data_len !=
sizeof(struct copychunk_ioctl_rsp)) {
- cifs_tcon_dbg(VFS, "invalid cchunk response size\n");
+ cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
rc = -EIO;
goto cchunk_out;
}
@@ -1733,12 +1762,12 @@ smb2_copychunk_range(const unsigned int xid,
*/
if (le32_to_cpu(retbuf->TotalBytesWritten) >
le32_to_cpu(pcchunk->Length)) {
- cifs_tcon_dbg(VFS, "invalid copy chunk response\n");
+ cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
rc = -EIO;
goto cchunk_out;
}
if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
- cifs_tcon_dbg(VFS, "invalid num chunks written\n");
+ cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
rc = -EIO;
goto cchunk_out;
}
@@ -2159,6 +2188,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct smb2_query_directory_rsp *qd_rsp = NULL;
struct smb2_create_rsp *op_rsp = NULL;
+ struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path)
@@ -2183,7 +2213,8 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = fid;
oparms.reconnect = false;
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, utf16_path);
if (rc)
goto qdf_free;
smb2_set_next_command(tcon, &rqst[0]);
@@ -2196,7 +2227,8 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
rqst[1].rq_iov = qd_iov;
rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
- rc = SMB2_query_directory_init(xid, tcon, &rqst[1],
+ rc = SMB2_query_directory_init(xid, tcon, server,
+ &rqst[1],
COMPOUND_FID, COMPOUND_FID,
0, srch_inf->info_level);
if (rc)
@@ -2204,7 +2236,8 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
smb2_set_related(&rqst[1]);
- rc = compound_send_recv(xid, tcon->ses, flags, 2, rqst,
+ rc = compound_send_recv(xid, tcon->ses, server,
+ flags, 2, rqst,
resp_buftype, rsp_iov);
/* If the open failed there is nothing to do */
@@ -2409,6 +2442,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb)
{
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
int flags = 0;
struct smb_rqst rqst[3];
int resp_buftype[3];
@@ -2439,7 +2473,8 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, utf16_path);
if (rc)
goto qic_exit;
smb2_set_next_command(tcon, &rqst[0]);
@@ -2448,7 +2483,8 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
rqst[1].rq_iov = qi_iov;
rqst[1].rq_nvec = 1;
- rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[1], COMPOUND_FID, COMPOUND_FID,
class, type, 0,
output_len, 0,
NULL);
@@ -2461,19 +2497,21 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
rqst[2].rq_iov = close_iov;
rqst[2].rq_nvec = 1;
- rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+ rc = SMB2_close_init(tcon, server,
+ &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
if (rc)
goto qic_exit;
smb2_set_related(&rqst[2]);
- rc = compound_send_recv(xid, ses, flags, 3, rqst,
+ rc = compound_send_recv(xid, ses, server,
+ flags, 3, rqst,
resp_buftype, rsp_iov);
if (rc) {
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
if (rc == -EREMCHG) {
tcon->need_reconnect = true;
- printk_once(KERN_WARNING "server share %s deleted\n",
- tcon->treeName);
+ pr_warn_once("server share %s deleted\n",
+ tcon->treeName);
}
goto qic_exit;
}
@@ -2753,15 +2791,15 @@ parse_reparse_point(struct reparse_data_buffer *buf,
struct cifs_sb_info *cifs_sb)
{
if (plen < sizeof(struct reparse_data_buffer)) {
- cifs_dbg(VFS, "reparse buffer is too small. Must be "
- "at least 8 bytes but was %d\n", plen);
+ cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n",
+ plen);
return -EIO;
}
if (plen < le16_to_cpu(buf->ReparseDataLength) +
sizeof(struct reparse_data_buffer)) {
- cifs_dbg(VFS, "srv returned invalid reparse buf "
- "length: %d\n", plen);
+ cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n",
+ plen);
return -EIO;
}
@@ -2776,8 +2814,8 @@ parse_reparse_point(struct reparse_data_buffer *buf,
(struct reparse_symlink_data_buffer *)buf,
plen, target_path, cifs_sb);
default:
- cifs_dbg(VFS, "srv returned unknown symlink buffer "
- "tag:0x%08x\n", le32_to_cpu(buf->ReparseTag));
+ cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n",
+ le32_to_cpu(buf->ReparseTag));
return -EOPNOTSUPP;
}
}
@@ -2798,6 +2836,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec err_iov = {NULL, 0};
struct smb2_err_rsp *err_buf = NULL;
struct smb2_symlink_err_rsp *symlink;
+ struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
unsigned int sub_len;
unsigned int sub_offset;
unsigned int print_len;
@@ -2843,7 +2882,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, utf16_path);
if (rc)
goto querty_exit;
smb2_set_next_command(tcon, &rqst[0]);
@@ -2854,7 +2894,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
rqst[1].rq_iov = io_iov;
rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
- rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
+ rc = SMB2_ioctl_init(tcon, server,
+ &rqst[1], fid.persistent_fid,
fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
true /* is_fctl */, NULL, 0,
CIFSMaxBufSize -
@@ -2872,13 +2913,15 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
rqst[2].rq_iov = close_iov;
rqst[2].rq_nvec = 1;
- rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+ rc = SMB2_close_init(tcon, server,
+ &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
if (rc)
goto querty_exit;
smb2_set_related(&rqst[2]);
- rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
+ rc = compound_send_recv(xid, tcon->ses, server,
+ flags, 3, rqst,
resp_buftype, rsp_iov);
create_rsp = rsp_iov[0].iov_base;
@@ -3407,8 +3450,9 @@ static int smb3_fiemap(struct cifs_tcon *tcon,
int i, num, rc, flags, last_blob;
u64 next;
- if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
- return -EBADR;
+ rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
+ if (rc)
+ return rc;
xid = get_xid();
again:
@@ -4571,7 +4615,7 @@ smb2_make_node(unsigned int xid, struct inode *inode,
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
int rc = -EPERM;
FILE_ALL_INFO *buf = NULL;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
__u32 oplock = 0;
struct cifs_fid fid;
struct cifs_open_parms oparms;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index b30aa3cdd845..ded96b529a4d 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -85,7 +85,7 @@ static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
int smb3_encryption_required(const struct cifs_tcon *tcon)
{
- if (!tcon)
+ if (!tcon || !tcon->ses)
return 0;
if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
(tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
@@ -98,14 +98,13 @@ int smb3_encryption_required(const struct cifs_tcon *tcon)
static void
smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd,
- const struct cifs_tcon *tcon)
+ const struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server)
{
shdr->ProtocolId = SMB2_PROTO_NUMBER;
shdr->StructureSize = cpu_to_le16(64);
shdr->Command = smb2_cmd;
- if (tcon && tcon->ses && tcon->ses->server) {
- struct TCP_Server_Info *server = tcon->ses->server;
-
+ if (server) {
spin_lock(&server->req_lock);
/* Request up to 10 credits but don't go over the limit. */
if (server->credits >= server->max_credits)
@@ -125,8 +124,7 @@ smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd,
/* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
/* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
- if ((tcon->ses) && (tcon->ses->server) &&
- (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
shdr->CreditCharge = cpu_to_le16(1);
/* else CreditCharge MBZ */
@@ -148,8 +146,7 @@ smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd,
/* if (tcon->share_flags & SHI1005_FLAGS_DFS)
shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
- if (tcon->ses && tcon->ses->server && tcon->ses->server->sign &&
- !smb3_encryption_required(tcon))
+ if (server && server->sign && !smb3_encryption_required(tcon))
shdr->Flags |= SMB2_FLAGS_SIGNED;
out:
return;
@@ -160,6 +157,7 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
struct cifs_tcon *tcon)
{
int rc;
+ struct TCP_Server_Info *server = tcon->ses->server;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *it = NULL;
char *tree;
@@ -172,15 +170,15 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
if (!tree)
return -ENOMEM;
- if (tcon->ipc) {
- scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
- tcon->ses->server->hostname);
- rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
- goto out;
- }
-
if (!tcon->dfs_path) {
- rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (tcon->ipc) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
+ server->hostname);
+ rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
+ } else {
+ rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon,
+ nlsc);
+ }
goto out;
}
@@ -188,13 +186,13 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
if (rc)
goto out;
- extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
- &tcp_host_len);
+ extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
for (it = dfs_cache_get_tgt_iterator(&tl); it;
it = dfs_cache_get_next_tgt(&tl, it)) {
const char *share, *prefix;
size_t share_len, prefix_len;
+ bool target_match;
rc = dfs_cache_get_tgt_share(it, &share, &share_len, &prefix,
&prefix_len);
@@ -208,19 +206,38 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
if (dfs_host_len != tcp_host_len
|| strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
- cifs_dbg(FYI, "%s: skipping %.*s, doesn't match %.*s",
+ cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n",
__func__,
(int)dfs_host_len, dfs_host,
(int)tcp_host_len, tcp_host);
- continue;
- }
- scnprintf(tree, MAX_TREE_SIZE, "\\%.*s", (int)share_len, share);
+ rc = match_target_ip(server, dfs_host, dfs_host_len,
+ &target_match);
+ if (rc) {
+ cifs_dbg(VFS, "%s: failed to match target ip: %d\n",
+ __func__, rc);
+ break;
+ }
- rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
- if (!rc) {
- rc = update_super_prepath(tcon, prefix, prefix_len);
- break;
+ if (!target_match) {
+ cifs_dbg(FYI, "%s: skipping target\n", __func__);
+ continue;
+ }
+ }
+
+ if (tcon->ipc) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%.*s\\IPC$",
+ (int)share_len, share);
+ rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
+ } else {
+ scnprintf(tree, MAX_TREE_SIZE, "\\%.*s", (int)share_len,
+ share);
+ rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
+ if (!rc) {
+ rc = update_super_prepath(tcon, prefix,
+ prefix_len);
+ break;
+ }
}
if (rc == -EREMOTE)
break;
@@ -247,12 +264,12 @@ static inline int __smb2_reconnect(const struct nls_table *nlsc,
#endif
static int
-smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
+smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server)
{
int rc;
struct nls_table *nls_codepage;
struct cifs_ses *ses;
- struct TCP_Server_Info *server;
int retries;
/*
@@ -281,12 +298,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
}
}
if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
- (!tcon->ses->server))
+ (!tcon->ses->server) || !server)
return -EIO;
ses = tcon->ses;
- server = ses->server;
-
retries = server->nr_targets;
/*
@@ -314,8 +329,8 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
(server->tcpStatus != CifsNeedReconnect),
10 * HZ);
if (rc < 0) {
- cifs_dbg(FYI, "%s: aborting reconnect due to a received"
- " signal by the process\n", __func__);
+ cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
+ __func__);
return -ERESTARTSYS;
}
@@ -360,15 +375,31 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
goto out;
}
+ /*
+ * If we are reconnecting an extra channel, bind
+ */
+ if (server->is_channel) {
+ ses->binding = true;
+ ses->binding_chan = cifs_ses_find_chan(ses, server);
+ }
+
rc = cifs_negotiate_protocol(0, tcon->ses);
if (!rc && tcon->ses->need_reconnect) {
rc = cifs_setup_session(0, tcon->ses, nls_codepage);
if ((rc == -EACCES) && !tcon->retry) {
rc = -EHOSTDOWN;
+ ses->binding = false;
+ ses->binding_chan = NULL;
mutex_unlock(&tcon->ses->session_mutex);
goto failed;
}
}
+ /*
+ * End of channel binding
+ */
+ ses->binding = false;
+ ses->binding_chan = NULL;
+
if (rc || !tcon->need_reconnect) {
mutex_unlock(&tcon->ses->session_mutex);
goto out;
@@ -384,7 +415,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
if (rc) {
/* If sess reconnected but tcon didn't, something strange ... */
- printk_once(KERN_WARNING "reconnect tcon failed rc = %d\n", rc);
+ pr_warn_once("reconnect tcon failed rc = %d\n", rc);
goto out;
}
@@ -419,7 +450,9 @@ failed:
}
static void
-fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
+fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ void *buf,
unsigned int *total_len)
{
struct smb2_sync_pdu *spdu = (struct smb2_sync_pdu *)buf;
@@ -432,7 +465,7 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
*/
memset(buf, 0, 256);
- smb2_hdr_assemble(&spdu->sync_hdr, smb2_command, tcon);
+ smb2_hdr_assemble(&spdu->sync_hdr, smb2_command, tcon, server);
spdu->StructureSize2 = cpu_to_le16(parmsize);
*total_len = parmsize + sizeof(struct smb2_sync_hdr);
@@ -444,7 +477,8 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
* function must have filled in request_buf pointer.
*/
static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
- void **request_buf, unsigned int *total_len)
+ struct TCP_Server_Info *server,
+ void **request_buf, unsigned int *total_len)
{
/* BB eventually switch this to SMB2 specific small buf size */
if (smb2_command == SMB2_SET_INFO)
@@ -456,7 +490,7 @@ static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
return -ENOMEM;
}
- fill_small_buf(smb2_command, tcon,
+ fill_small_buf(smb2_command, tcon, server,
(struct smb2_sync_hdr *)(*request_buf),
total_len);
@@ -470,27 +504,30 @@ static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
}
static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
void **request_buf, unsigned int *total_len)
{
int rc;
- rc = smb2_reconnect(smb2_command, tcon);
+ rc = smb2_reconnect(smb2_command, tcon, server);
if (rc)
return rc;
- return __smb2_plain_req_init(smb2_command, tcon, request_buf,
+ return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
total_len);
}
static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
void **request_buf, unsigned int *total_len)
{
/* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
- return __smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf,
- total_len);
+ return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
+ request_buf, total_len);
}
- return smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf, total_len);
+ return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
+ request_buf, total_len);
}
/* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
@@ -626,13 +663,13 @@ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
/* If invalid preauth context warn but use what we requested, SHA-512 */
if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
- printk_once(KERN_WARNING "server sent bad preauth context\n");
+ pr_warn_once("server sent bad preauth context\n");
return;
}
if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
- printk_once(KERN_WARNING "illegal SMB3 hash algorithm count\n");
+ pr_warn_once("Invalid SMB3 hash algorithm count\n");
if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
- printk_once(KERN_WARNING "unknown SMB3 hash algorithm\n");
+ pr_warn_once("unknown SMB3 hash algorithm\n");
}
static void decode_compress_ctx(struct TCP_Server_Info *server,
@@ -642,15 +679,15 @@ static void decode_compress_ctx(struct TCP_Server_Info *server,
/* sizeof compress context is a one element compression capbility struct */
if (len < 10) {
- printk_once(KERN_WARNING "server sent bad compression cntxt\n");
+ pr_warn_once("server sent bad compression cntxt\n");
return;
}
if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
- printk_once(KERN_WARNING "illegal SMB3 compress algorithm count\n");
+ pr_warn_once("Invalid SMB3 compress algorithm count\n");
return;
}
if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) {
- printk_once(KERN_WARNING "unknown compression algorithm\n");
+ pr_warn_once("unknown compression algorithm\n");
return;
}
server->compress_algorithm = ctxt->CompressionAlgorithms[0];
@@ -663,18 +700,18 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
- printk_once(KERN_WARNING "server sent bad crypto ctxt len\n");
+ pr_warn_once("server sent bad crypto ctxt len\n");
return -EINVAL;
}
if (le16_to_cpu(ctxt->CipherCount) != 1) {
- printk_once(KERN_WARNING "illegal SMB3.11 cipher count\n");
+ pr_warn_once("Invalid SMB3.11 cipher count\n");
return -EINVAL;
}
cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
(ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM)) {
- printk_once(KERN_WARNING "invalid SMB3.11 cipher returned\n");
+ pr_warn_once("Invalid SMB3.11 cipher returned\n");
return -EINVAL;
}
server->cipher_type = ctxt->Ciphers[0];
@@ -774,7 +811,7 @@ create_posix_buf(umode_t mode)
buf->Name[14] = 0xCD;
buf->Name[15] = 0x7C;
buf->Mode = cpu_to_le32(mode);
- cifs_dbg(FYI, "mode on posix create 0%o", mode);
+ cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
return buf;
}
@@ -786,7 +823,7 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
iov[num].iov_base = create_posix_buf(mode);
if (mode == ACL_NO_MODE)
- cifs_dbg(FYI, "illegal mode\n");
+ cifs_dbg(FYI, "Invalid mode\n");
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_posix);
@@ -838,7 +875,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
return -EIO;
}
- rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -896,7 +934,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
/*
@@ -904,9 +943,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
* cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
*/
if (rc == -EOPNOTSUPP) {
- cifs_server_dbg(VFS, "Dialect not supported by server. Consider "
- "specifying vers=1.0 or vers=2.0 on mount for accessing"
- " older servers\n");
+ cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
goto neg_exit;
} else if (rc != 0)
goto neg_exit;
@@ -939,8 +976,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
} else if (le16_to_cpu(rsp->DialectRevision) !=
server->vals->protocol_id) {
/* if requested single dialect ensure returned dialect matched */
- cifs_server_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n",
- le16_to_cpu(rsp->DialectRevision));
+ cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
+ le16_to_cpu(rsp->DialectRevision));
return -EIO;
}
@@ -957,8 +994,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
else {
- cifs_server_dbg(VFS, "Illegal dialect returned by server 0x%x\n",
- le16_to_cpu(rsp->DialectRevision));
+ cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
+ le16_to_cpu(rsp->DialectRevision));
rc = -EIO;
goto neg_exit;
}
@@ -1116,15 +1153,16 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
rc = 0;
goto out_free_inbuf;
} else if (rc != 0) {
- cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
+ cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
+ rc);
rc = -EIO;
goto out_free_inbuf;
}
rc = -EIO;
if (rsplen != sizeof(*pneg_rsp)) {
- cifs_tcon_dbg(VFS, "invalid protocol negotiate response size: %d\n",
- rsplen);
+ cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
+ rsplen);
/* relax check since Mac returns max bufsize allowed on ioctl */
if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
@@ -1208,8 +1246,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
struct TCP_Server_Info *server = cifs_ses_server(ses);
unsigned int total_len;
- rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
+ (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -1286,6 +1325,7 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
/* BB add code to build os and lm fields */
rc = cifs_send_recv(sess_data->xid, sess_data->ses,
+ cifs_ses_server(sess_data->ses),
&rqst,
&sess_data->buf0_type,
CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
@@ -1357,9 +1397,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
* sending us a response in an expected form
*/
if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
- cifs_dbg(VFS,
- "bad cifs.upcall version. Expected %d got %d",
- CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+ cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
+ CIFS_SPNEGO_UPCALL_VERSION, msg->version);
rc = -EKEYREJECTED;
goto out_put_spnego_key;
}
@@ -1369,8 +1408,7 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
GFP_KERNEL);
if (!ses->auth_key.response) {
- cifs_dbg(VFS,
- "Kerberos can't allocate (%u bytes) memory",
+ cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
msg->sesskey_len);
rc = -ENOMEM;
goto out_put_spnego_key;
@@ -1584,8 +1622,7 @@ SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
type = smb2_select_sectype(cifs_ses_server(ses), ses->sectype);
cifs_dbg(FYI, "sess setup type %d\n", type);
if (type == Unspecified) {
- cifs_dbg(VFS,
- "Unable to select appropriate authentication method!");
+ cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
return -EINVAL;
}
@@ -1673,7 +1710,8 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
if (ses->need_reconnect)
goto smb2_session_already_dead;
- rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -1694,7 +1732,8 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, ses->server,
+ &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
/*
* No tcon so can't do
@@ -1735,7 +1774,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
__le16 *unc_path = NULL;
int flags = 0;
unsigned int total_len;
- struct TCP_Server_Info *server = ses->server;
+ struct TCP_Server_Info *server;
+
+ /* always use master channel */
+ server = ses->server;
cifs_dbg(FYI, "TCON\n");
@@ -1756,8 +1798,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
tcon->tid = 0;
atomic_set(&tcon->num_remote_opens, 0);
- rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
+ (void **) &req, &total_len);
if (rc) {
kfree(unc_path);
return rc;
@@ -1796,7 +1838,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
/* Need 64 for max size write so ask for more in case not there yet */
req->sync_hdr.CreditRequest = cpu_to_le16(64);
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
@@ -1881,8 +1924,9 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
close_shroot_lease(&tcon->crfid);
- rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
+ (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -1898,7 +1942,8 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, ses->server,
+ &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc)
cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
@@ -2452,6 +2497,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
int flags = 0;
unsigned int total_len;
__le16 *utf16_path = NULL;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
cifs_dbg(FYI, "mkdir\n");
@@ -2460,13 +2506,14 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
if (!utf16_path)
return -ENOMEM;
- if (!ses || !(ses->server)) {
+ if (!ses || !server) {
rc = -EIO;
goto err_free_path;
}
/* resource #2: request */
- rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
+ (void **) &req, &total_len);
if (rc)
goto err_free_path;
@@ -2552,7 +2599,8 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
FILE_WRITE_ATTRIBUTES);
/* resource #4: response buffer */
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
@@ -2581,10 +2629,10 @@ err_free_path:
}
int
-SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
+SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst, __u8 *oplock,
struct cifs_open_parms *oparms, __le16 *path)
{
- struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_create_req *req;
unsigned int n_iov = 2;
__u32 file_attributes = 0;
@@ -2595,7 +2643,8 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
__le16 *copy_path;
int rc;
- rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -2767,9 +2816,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
{
struct smb_rqst rqst;
struct smb2_create_rsp *rsp = NULL;
- struct TCP_Server_Info *server;
struct cifs_tcon *tcon = oparms->tcon;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
struct kvec iov[SMB2_CREATE_IOV_SIZE];
struct kvec rsp_iov = {NULL, 0};
int resp_buftype = CIFS_NO_BUFFER;
@@ -2777,9 +2826,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
int flags = 0;
cifs_dbg(FYI, "create/open\n");
- if (ses && (ses->server))
- server = ses->server;
- else
+ if (!ses || !server)
return -EIO;
if (smb3_encryption_required(tcon))
@@ -2790,14 +2837,16 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
rqst.rq_iov = iov;
rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
- rc = SMB2_open_init(tcon, &rqst, oplock, oparms, path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst, oplock, oparms, path);
if (rc)
goto creat_exit;
trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
oparms->create_options, oparms->desired_access);
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags,
&rsp_iov);
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -2812,8 +2861,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
trace_smb3_open_err(xid, tcon->tid, ses->Suid,
oparms->create_options, oparms->desired_access, rc);
if (rc == -EREMCHG) {
- printk_once(KERN_WARNING "server share %s deleted\n",
- tcon->treeName);
+ pr_warn_once("server share %s deleted\n",
+ tcon->treeName);
tcon->need_reconnect = true;
}
goto creat_exit;
@@ -2849,7 +2898,8 @@ creat_exit:
}
int
-SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen,
__u32 max_response_size)
@@ -2860,7 +2910,8 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
int rc;
char *in_data_buf;
- rc = smb2_ioctl_req_init(opcode, tcon, (void **) &req, &total_len);
+ rc = smb2_ioctl_req_init(opcode, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -2922,7 +2973,7 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
* response size smaller.
*/
req->MaxOutputResponse = cpu_to_le32(max_response_size);
-
+ req->sync_hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(max_response_size, SMB2_MAX_BUFFER_SIZE));
if (is_fsctl)
req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
else
@@ -2960,12 +3011,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
struct smb_rqst rqst;
struct smb2_ioctl_rsp *rsp = NULL;
struct cifs_ses *ses;
+ struct TCP_Server_Info *server;
struct kvec iov[SMB2_IOCTL_IOV_SIZE];
struct kvec rsp_iov = {NULL, 0};
int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
- struct TCP_Server_Info *server;
cifs_dbg(FYI, "SMB2 IOCTL\n");
@@ -2976,14 +3027,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if (plen)
*plen = 0;
- if (tcon)
- ses = tcon->ses;
- else
+ if (!tcon)
return -EIO;
+ ses = tcon->ses;
if (!ses)
return -EIO;
- server = ses->server;
+
+ server = cifs_pick_channel(ses);
if (!server)
return -EIO;
@@ -2995,12 +3046,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
rqst.rq_iov = iov;
rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
- rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode,
+ rc = SMB2_ioctl_init(tcon, server,
+ &rqst, persistent_fid, volatile_fid, opcode,
is_fsctl, in_data, indatalen, max_out_data_len);
if (rc)
goto ioctl_exit;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags,
&rsp_iov);
rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
@@ -3088,7 +3141,8 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
}
int
-SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, bool query_attrs)
{
struct smb2_close_req *req;
@@ -3096,7 +3150,8 @@ SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
unsigned int total_len;
int rc;
- rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -3127,6 +3182,7 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
struct smb_rqst rqst;
struct smb2_close_rsp *rsp = NULL;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buftype = CIFS_NO_BUFFER;
@@ -3136,7 +3192,7 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
cifs_dbg(FYI, "Close\n");
- if (!ses || !(ses->server))
+ if (!ses || !server)
return -EIO;
if (smb3_encryption_required(tcon))
@@ -3152,12 +3208,14 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
query_attrs = true;
trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
- rc = SMB2_close_init(tcon, &rqst, persistent_fid, volatile_fid,
+ rc = SMB2_close_init(tcon, server,
+ &rqst, persistent_fid, volatile_fid,
query_attrs);
if (rc)
goto close_exit;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
if (rc != 0) {
@@ -3225,7 +3283,7 @@ smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
}
if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
- cifs_dbg(VFS, "illegal server response, bad offset to data\n");
+ cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
return -EINVAL;
}
@@ -3257,7 +3315,8 @@ smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
}
int
-SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
u8 info_class, u8 info_type, u32 additional_info,
size_t output_len, size_t input_len, void *input)
@@ -3267,8 +3326,8 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
unsigned int total_len;
int rc;
- rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -3320,7 +3379,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
if (!ses)
return -EIO;
- server = ses->server;
+ server = cifs_pick_channel(ses);
if (!server)
return -EIO;
@@ -3332,7 +3391,8 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = SMB2_query_info_init(tcon, &rqst, persistent_fid, volatile_fid,
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst, persistent_fid, volatile_fid,
info_class, info_type, additional_info,
output_len, 0, NULL);
if (rc)
@@ -3341,7 +3401,8 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
ses->Suid, info_class, (__u32)info_type);
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
if (rc) {
@@ -3426,15 +3487,17 @@ SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
static int
SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
- struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid,
- u32 completion_filter, bool watch_tree)
+ struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ u64 persistent_fid, u64 volatile_fid,
+ u32 completion_filter, bool watch_tree)
{
struct smb2_change_notify_req *req;
struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
int rc;
- rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -3461,6 +3524,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
u32 completion_filter)
{
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
struct smb_rqst rqst;
struct kvec iov[1];
struct kvec rsp_iov = {NULL, 0};
@@ -3469,7 +3533,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
cifs_dbg(FYI, "change notify\n");
- if (!ses || !(ses->server))
+ if (!ses || !server)
return -EIO;
if (smb3_encryption_required(tcon))
@@ -3480,14 +3544,16 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = SMB2_notify_init(xid, &rqst, tcon, persistent_fid, volatile_fid,
+ rc = SMB2_notify_init(xid, &rqst, tcon, server,
+ persistent_fid, volatile_fid,
completion_filter, watch_tree);
if (rc)
goto cnotify_exit;
trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
(u8)watch_tree, completion_filter);
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
if (rc != 0) {
cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
@@ -3577,7 +3643,7 @@ void smb2_reconnect_server(struct work_struct *work)
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
- rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
+ rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
if (!rc)
cifs_reopen_persistent_handles(tcon);
else
@@ -3617,7 +3683,8 @@ SMB2_echo(struct TCP_Server_Info *server)
return rc;
}
- rc = smb2_plain_req_init(SMB2_ECHO, NULL, (void **)&req, &total_len);
+ rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
+ (void **)&req, &total_len);
if (rc)
return rc;
@@ -3644,14 +3711,16 @@ SMB2_flush_free(struct smb_rqst *rqst)
int
SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
- struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid)
+ struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ u64 persistent_fid, u64 volatile_fid)
{
struct smb2_flush_req *req;
struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
int rc;
- rc = smb2_plain_req_init(SMB2_FLUSH, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -3672,6 +3741,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
struct smb_rqst rqst;
struct kvec iov[1];
struct kvec rsp_iov = {NULL, 0};
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
int resp_buftype = CIFS_NO_BUFFER;
int flags = 0;
int rc = 0;
@@ -3688,12 +3758,14 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = SMB2_flush_init(xid, &rqst, tcon, persistent_fid, volatile_fid);
+ rc = SMB2_flush_init(xid, &rqst, tcon, server,
+ persistent_fid, volatile_fid);
if (rc)
goto flush_exit;
trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
if (rc != 0) {
cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
@@ -3721,14 +3793,13 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
int rc = -EACCES;
struct smb2_read_plain_req *req = NULL;
struct smb2_sync_hdr *shdr;
- struct TCP_Server_Info *server;
+ struct TCP_Server_Info *server = io_parms->server;
- rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req,
- total_len);
+ rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
+ (void **) &req, total_len);
if (rc)
return rc;
- server = io_parms->tcon->ses->server;
if (server == NULL)
return -ECONNABORTED;
@@ -3757,8 +3828,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
struct smbd_buffer_descriptor_v1 *v1;
- bool need_invalidate =
- io_parms->tcon->ses->server->dialect == SMB30_PROT_ID;
+ bool need_invalidate = server->dialect == SMB30_PROT_ID;
rdata->mr = smbd_register_mr(
server->smbd_conn, rdata->pages,
@@ -3815,7 +3885,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
{
struct cifs_readdata *rdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
- struct TCP_Server_Info *server = tcon->ses->server;
+ struct TCP_Server_Info *server = rdata->server;
struct smb2_sync_hdr *shdr =
(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
struct cifs_credits credits = { .value = 0, .instance = 0 };
@@ -3827,6 +3897,10 @@ smb2_readv_callback(struct mid_q_entry *mid)
.rq_pagesz = rdata->pagesz,
.rq_tailsz = rdata->tailsz };
+ WARN_ONCE(rdata->server != mid->server,
+ "rdata server %p != mid server %p",
+ rdata->server, mid->server);
+
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
__func__, mid->mid, mid->mid_state, rdata->result,
rdata->bytes);
@@ -3904,20 +3978,23 @@ smb2_async_readv(struct cifs_readdata *rdata)
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 1 };
struct TCP_Server_Info *server;
+ struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
unsigned int total_len;
cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
__func__, rdata->offset, rdata->bytes);
+ if (!rdata->server)
+ rdata->server = cifs_pick_channel(tcon->ses);
+
io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
+ io_parms.server = server = rdata->server;
io_parms.offset = rdata->offset;
io_parms.length = rdata->bytes;
io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
io_parms.pid = rdata->pid;
- server = io_parms.tcon->ses->server;
-
rc = smb2_new_read_req(
(void **) &buf, &total_len, &io_parms, rdata, 0, 0);
if (rc)
@@ -3945,7 +4022,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
}
kref_get(&rdata->refcount);
- rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
+ rc = cifs_call_async(server, &rqst,
cifs_readv_receive, smb2_readv_callback,
smb3_handle_read_data, rdata, flags,
&rdata->credits);
@@ -3977,6 +4054,9 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
int flags = CIFS_LOG_ERROR;
struct cifs_ses *ses = io_parms->tcon->ses;
+ if (!io_parms->server)
+ io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
+
*nbytes = 0;
rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
if (rc)
@@ -3992,7 +4072,8 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, io_parms->server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
if (rc) {
@@ -4048,11 +4129,15 @@ smb2_writev_callback(struct mid_q_entry *mid)
{
struct cifs_writedata *wdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
- struct TCP_Server_Info *server = tcon->ses->server;
+ struct TCP_Server_Info *server = wdata->server;
unsigned int written;
struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
struct cifs_credits credits = { .value = 0, .instance = 0 };
+ WARN_ONCE(wdata->server != mid->server,
+ "wdata server %p != mid server %p",
+ wdata->server, mid->server);
+
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
@@ -4108,8 +4193,8 @@ smb2_writev_callback(struct mid_q_entry *mid)
tcon->tid, tcon->ses->Suid, wdata->offset,
wdata->bytes, wdata->result);
if (wdata->result == -ENOSPC)
- printk_once(KERN_WARNING "Out of space writing to %s\n",
- tcon->treeName);
+ pr_warn_once("Out of space writing to %s\n",
+ tcon->treeName);
} else
trace_smb3_write_done(0 /* no xid */,
wdata->cfile->fid.persistent_fid,
@@ -4130,12 +4215,16 @@ smb2_async_writev(struct cifs_writedata *wdata,
struct smb2_write_req *req = NULL;
struct smb2_sync_hdr *shdr;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
- struct TCP_Server_Info *server = tcon->ses->server;
+ struct TCP_Server_Info *server = wdata->server;
struct kvec iov[1];
struct smb_rqst rqst = { };
unsigned int total_len;
- rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
+ if (!wdata->server)
+ server = wdata->server = cifs_pick_channel(tcon->ses);
+
+ rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -4274,20 +4363,24 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
struct kvec rsp_iov;
int flags = 0;
unsigned int total_len;
+ struct TCP_Server_Info *server;
*nbytes = 0;
if (n_vec < 1)
return rc;
- rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, (void **) &req,
- &total_len);
+ if (!io_parms->server)
+ io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
+ server = io_parms->server;
+ if (server == NULL)
+ return -ECONNABORTED;
+
+ rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
- if (io_parms->tcon->ses->server == NULL)
- return -ECONNABORTED;
-
if (smb3_encryption_required(io_parms->tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -4316,7 +4409,8 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
rqst.rq_iov = iov;
rqst.rq_nvec = n_vec + 1;
- rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
+ rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
+ &rqst,
&resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
@@ -4490,11 +4584,12 @@ num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
* Readdir/FindFirst
*/
int SMB2_query_directory_init(const unsigned int xid,
- struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
int index, int info_level)
{
- struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_query_directory_req *req;
unsigned char *bufptr;
__le16 asteriks = cpu_to_le16('*');
@@ -4505,8 +4600,8 @@ int SMB2_query_directory_init(const unsigned int xid,
struct kvec *iov = rqst->rq_iov;
int len, rc;
- rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -4632,7 +4727,7 @@ smb2_parse_query_directory(struct cifs_tcon *tcon,
else if (resp_buftype == CIFS_SMALL_BUFFER)
srch_inf->smallBuf = true;
else
- cifs_tcon_dbg(VFS, "illegal search buffer type\n");
+ cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
return 0;
}
@@ -4649,6 +4744,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec rsp_iov;
int rc = 0;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
int flags = 0;
if (!ses || !(ses->server))
@@ -4662,13 +4758,15 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
- rc = SMB2_query_directory_init(xid, tcon, &rqst, persistent_fid,
+ rc = SMB2_query_directory_init(xid, tcon, server,
+ &rqst, persistent_fid,
volatile_fid, index,
srch_inf->info_level);
if (rc)
goto qdir_exit;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
if (rc) {
@@ -4705,17 +4803,19 @@ qdir_exit:
}
int
-SMB2_set_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
- u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
- u8 info_type, u32 additional_info,
- void **data, unsigned int *size)
+SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid, u32 pid,
+ u8 info_class, u8 info_type, u32 additional_info,
+ void **data, unsigned int *size)
{
struct smb2_set_info_req *req;
struct kvec *iov = rqst->rq_iov;
unsigned int i, total_len;
int rc;
- rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -4766,9 +4866,10 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
int flags = 0;
- if (!ses || !(ses->server))
+ if (!ses || !server)
return -EIO;
if (!num)
@@ -4785,7 +4886,8 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = num;
- rc = SMB2_set_info_init(tcon, &rqst, persistent_fid, volatile_fid, pid,
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst, persistent_fid, volatile_fid, pid,
info_class, info_type, additional_info,
data, size);
if (rc) {
@@ -4794,7 +4896,8 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
}
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags,
&rsp_iov);
SMB2_set_info_free(&rqst);
rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
@@ -4857,6 +4960,7 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
int rc;
struct smb2_oplock_break *req = NULL;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
int flags = CIFS_OBREAK_OP;
unsigned int total_len;
struct kvec iov[1];
@@ -4864,8 +4968,8 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buf_type;
cifs_dbg(FYI, "SMB2_oplock_break\n");
- rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -4886,7 +4990,8 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
@@ -4929,8 +5034,10 @@ copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
}
static int
-build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
- int outbuf_len, u64 persistent_fid, u64 volatile_fid)
+build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ int level, int outbuf_len, u64 persistent_fid,
+ u64 volatile_fid)
{
int rc;
struct smb2_query_info_req *req;
@@ -4938,11 +5045,11 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
cifs_dbg(FYI, "Query FSInfo level %d\n", level);
- if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
+ if ((tcon->ses == NULL) || server == NULL)
return -EIO;
- rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -4972,10 +5079,12 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
FILE_SYSTEM_POSIX_INFO *info = NULL;
int flags = 0;
- rc = build_qfs_info_req(&iov, tcon, FS_POSIX_INFORMATION,
+ rc = build_qfs_info_req(&iov, tcon, server,
+ FS_POSIX_INFORMATION,
sizeof(FILE_SYSTEM_POSIX_INFO),
persistent_fid, volatile_fid);
if (rc)
@@ -4988,7 +5097,8 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -5020,10 +5130,12 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
struct smb2_fs_full_size_info *info = NULL;
int flags = 0;
- rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
+ rc = build_qfs_info_req(&iov, tcon, server,
+ FS_FULL_SIZE_INFORMATION,
sizeof(struct smb2_fs_full_size_info),
persistent_fid, volatile_fid);
if (rc)
@@ -5036,7 +5148,8 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -5068,6 +5181,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype, max_len, min_len;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
unsigned int rsp_len, offset;
int flags = 0;
@@ -5088,7 +5202,8 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
return -EINVAL;
}
- rc = build_qfs_info_req(&iov, tcon, level, max_len,
+ rc = build_qfs_info_req(&iov, tcon, server,
+ level, max_len,
persistent_fid, volatile_fid);
if (rc)
return rc;
@@ -5100,7 +5215,8 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -5153,10 +5269,12 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
unsigned int count;
int flags = CIFS_NO_RSP_BUF;
unsigned int total_len;
+ struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
- rc = smb2_plain_req_init(SMB2_LOCK, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -5182,7 +5300,8 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
- rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags,
+ rc = cifs_send_recv(xid, tcon->ses, server,
+ &rqst, &resp_buf_type, flags,
&rsp_iov);
cifs_small_buf_release(req);
if (rc) {
@@ -5227,10 +5346,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buf_type;
__u64 *please_key_high;
__u64 *please_key_low;
+ struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
cifs_dbg(FYI, "SMB2_lease_break\n");
- rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -5253,7 +5373,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
please_key_low = (__u64 *)lease_key;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 10acf90f858d..3b0e6acf9d7d 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -143,8 +143,17 @@ struct smb2_transform_hdr {
__u64 SessionId;
} __packed;
+/* See MS-SMB2 2.2.42 */
+struct smb2_compression_transform_hdr {
+ __le32 ProtocolId; /* 0xFC 'S' 'M' 'B' */
+ __le32 OriginalCompressedSegmentSize;
+ __le16 CompressionAlgorithm;
+ __le16 Flags;
+ __le16 Length; /* if chained it is length, else offset */
+} __packed;
+
/* See MS-SMB2 2.2.42.1 */
-struct compression_playload_header {
+struct compression_payload_header {
__le16 AlgorithmId;
__le16 Reserved;
__le32 Length;
@@ -333,7 +342,7 @@ struct smb2_encryption_neg_context {
#define SMB3_COMPRESS_LZ77 cpu_to_le16(0x0002)
#define SMB3_COMPRESS_LZ77_HUFF cpu_to_le16(0x0003)
/* Pattern scanning algorithm See MS-SMB2 3.1.4.4.1 */
-#define SMB3_COMPRESS_PATTERN cpu_to_le16(0x0004)
+#define SMB3_COMPRESS_PATTERN cpu_to_le16(0x0004) /* Pattern_V1 */
/* Compression Flags */
#define SMB2_COMPRESSION_CAPABILITIES_FLAG_NONE cpu_to_le32(0x00000000)
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 087d5f14320b..71ba74792c9e 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -143,7 +143,9 @@ extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms,
struct smb2_file_all_info *buf,
struct create_posix_rsp *posix,
struct kvec *err_iov, int *resp_buftype);
-extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+extern int SMB2_open_init(struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
__u8 *oplock, struct cifs_open_parms *oparms,
__le16 *path);
extern void SMB2_open_free(struct smb_rqst *rqst);
@@ -151,7 +153,9 @@ extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
char **out_data, u32 *plen /* returned data len */);
-extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+extern int SMB2_ioctl_init(struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen,
__u32 max_response_size);
@@ -165,19 +169,25 @@ extern int __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
struct smb2_file_network_open_info *pbuf);
extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
-extern int SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
- u64 persistent_fid, u64 volatile_fid, bool query_attrs);
+extern int SMB2_close_init(struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid,
+ bool query_attrs);
extern void SMB2_close_free(struct smb_rqst *rqst);
extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
extern int SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
u64 persistent_file_id, u64 volatile_file_id);
extern void SMB2_flush_free(struct smb_rqst *rqst);
extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct smb2_file_all_info *data);
-extern int SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+extern int SMB2_query_info_init(struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
u8 info_class, u8 info_type,
u32 additional_info, size_t output_len,
@@ -201,6 +211,7 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf);
extern int SMB2_query_directory_init(unsigned int xid, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
int index, int info_level);
@@ -208,7 +219,9 @@ extern void SMB2_query_directory_free(struct smb_rqst *rqst);
extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 pid,
__le64 *eof);
-extern int SMB2_set_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+extern int SMB2_set_info_init(struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 pid,
u8 info_class, u8 info_type, u32 additional_info,
void **data, unsigned int *size);
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 1a5834a5d597..b029ed31ef91 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -294,15 +294,12 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
{
- log_rdma_event(INFO, "resp message min_version %u max_version %u "
- "negotiated_version %u credits_requested %u "
- "credits_granted %u status %u max_readwrite_size %u "
- "preferred_send_size %u max_receive_size %u "
- "max_fragmented_size %u\n",
- resp->min_version, resp->max_version, resp->negotiated_version,
- resp->credits_requested, resp->credits_granted, resp->status,
- resp->max_readwrite_size, resp->preferred_send_size,
- resp->max_receive_size, resp->max_fragmented_size);
+ log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n",
+ resp->min_version, resp->max_version,
+ resp->negotiated_version, resp->credits_requested,
+ resp->credits_granted, resp->status,
+ resp->max_readwrite_size, resp->preferred_send_size,
+ resp->max_receive_size, resp->max_fragmented_size);
}
/*
@@ -450,10 +447,9 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
struct smbd_connection *info = response->info;
int data_length = 0;
- log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d "
- "byte_len=%d pkey_index=%x\n",
- response, response->type, wc->status, wc->opcode,
- wc->byte_len, wc->pkey_index);
+ log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%x\n",
+ response, response->type, wc->status, wc->opcode,
+ wc->byte_len, wc->pkey_index);
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
@@ -519,12 +515,11 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
wake_up_interruptible(&info->wait_send_queue);
}
- log_incoming(INFO, "data flags %d data_offset %d "
- "data_length %d remaining_data_length %d\n",
- le16_to_cpu(data_transfer->flags),
- le32_to_cpu(data_transfer->data_offset),
- le32_to_cpu(data_transfer->data_length),
- le32_to_cpu(data_transfer->remaining_data_length));
+ log_incoming(INFO, "data flags %d data_offset %d data_length %d remaining_data_length %d\n",
+ le16_to_cpu(data_transfer->flags),
+ le32_to_cpu(data_transfer->data_offset),
+ le32_to_cpu(data_transfer->data_length),
+ le32_to_cpu(data_transfer->remaining_data_length));
/* Send a KEEP_ALIVE response right away if requested */
info->keep_alive_requested = KEEP_ALIVE_NONE;
@@ -632,14 +627,10 @@ static int smbd_ia_open(
}
if (!frwr_is_supported(&info->id->device->attrs)) {
- log_rdma_event(ERR,
- "Fast Registration Work Requests "
- "(FRWR) is not supported\n");
- log_rdma_event(ERR,
- "Device capability flags = %llx "
- "max_fast_reg_page_list_len = %u\n",
- info->id->device->attrs.device_cap_flags,
- info->id->device->attrs.max_fast_reg_page_list_len);
+ log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
+ log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
+ info->id->device->attrs.device_cap_flags,
+ info->id->device->attrs.max_fast_reg_page_list_len);
rc = -EPROTONOSUPPORT;
goto out2;
}
@@ -898,13 +889,12 @@ wait_send_queue:
packet->remaining_data_length = cpu_to_le32(remaining_data_length);
packet->padding = 0;
- log_outgoing(INFO, "credits_requested=%d credits_granted=%d "
- "data_offset=%d data_length=%d remaining_data_length=%d\n",
- le16_to_cpu(packet->credits_requested),
- le16_to_cpu(packet->credits_granted),
- le32_to_cpu(packet->data_offset),
- le32_to_cpu(packet->data_length),
- le32_to_cpu(packet->remaining_data_length));
+ log_outgoing(INFO, "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
+ le16_to_cpu(packet->credits_requested),
+ le16_to_cpu(packet->credits_granted),
+ le32_to_cpu(packet->data_offset),
+ le32_to_cpu(packet->data_length),
+ le32_to_cpu(packet->remaining_data_length));
/* Map the packet to DMA */
header_length = sizeof(struct smbd_data_transfer);
@@ -1078,11 +1068,9 @@ static int smbd_negotiate(struct smbd_connection *info)
response->type = SMBD_NEGOTIATE_RESP;
rc = smbd_post_recv(info, response);
- log_rdma_event(INFO,
- "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x "
- "iov.lkey=%x\n",
- rc, response->sge.addr,
- response->sge.length, response->sge.lkey);
+ log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x iov.lkey=%x\n",
+ rc, response->sge.addr,
+ response->sge.length, response->sge.lkey);
if (rc)
return rc;
@@ -1540,25 +1528,19 @@ static struct smbd_connection *_smbd_get_connection(
if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
- log_rdma_event(ERR,
- "consider lowering send_credit_target = %d. "
- "Possible CQE overrun, device "
- "reporting max_cpe %d max_qp_wr %d\n",
- smbd_send_credit_target,
- info->id->device->attrs.max_cqe,
- info->id->device->attrs.max_qp_wr);
+ log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
+ smbd_send_credit_target,
+ info->id->device->attrs.max_cqe,
+ info->id->device->attrs.max_qp_wr);
goto config_failed;
}
if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
- log_rdma_event(ERR,
- "consider lowering receive_credit_max = %d. "
- "Possible CQE overrun, device "
- "reporting max_cpe %d max_qp_wr %d\n",
- smbd_receive_credit_max,
- info->id->device->attrs.max_cqe,
- info->id->device->attrs.max_qp_wr);
+ log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
+ smbd_receive_credit_max,
+ info->id->device->attrs.max_cqe,
+ info->id->device->attrs.max_qp_wr);
goto config_failed;
}
@@ -1865,11 +1847,9 @@ again:
to_read -= to_copy;
data_read += to_copy;
- log_read(INFO, "_get_first_reassembly memcpy %d bytes "
- "data_transfer_length-offset=%d after that "
- "to_read=%d data_read=%d offset=%d\n",
- to_copy, data_length - offset,
- to_read, data_read, offset);
+ log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to_read=%d data_read=%d offset=%d\n",
+ to_copy, data_length - offset,
+ to_read, data_read, offset);
}
spin_lock_irq(&info->reassembly_queue_lock);
@@ -1878,10 +1858,9 @@ again:
spin_unlock_irq(&info->reassembly_queue_lock);
info->first_entry_offset = offset;
- log_read(INFO, "returning to thread data_read=%d "
- "reassembly_data_length=%d first_entry_offset=%d\n",
- data_read, info->reassembly_data_length,
- info->first_entry_offset);
+ log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
+ data_read, info->reassembly_data_length,
+ info->first_entry_offset);
read_rfc1002_done:
return data_read;
}
@@ -1952,7 +1931,7 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
if (iov_iter_rw(&msg->msg_iter) == WRITE) {
/* It's a bug in upper layer to get there */
- cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n",
+ cifs_dbg(VFS, "Invalid msg iter dir %u\n",
iov_iter_rw(&msg->msg_iter));
rc = -EINVAL;
goto out;
@@ -1974,7 +1953,7 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
default:
/* It's a bug in upper layer to get there */
- cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
+ cifs_dbg(VFS, "Invalid msg type %d\n",
iov_iter_type(&msg->msg_iter));
rc = -EINVAL;
}
@@ -2043,10 +2022,9 @@ next_rqst:
dump_smb(iov[i].iov_base, iov[i].iov_len);
- log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
- "rq_tailsz=%d buflen=%lu\n",
- rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
- rqst->rq_tailsz, smb_rqst_len(server, rqst));
+ log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d rq_tailsz=%d buflen=%lu\n",
+ rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
+ rqst->rq_tailsz, smb_rqst_len(server, rqst));
start = i = 0;
buflen = 0;
@@ -2056,11 +2034,9 @@ next_rqst:
if (i > start) {
remaining_data_length -=
(buflen-iov[i].iov_len);
- log_write(INFO, "sending iov[] from start=%d "
- "i=%d nvecs=%d "
- "remaining_data_length=%d\n",
- start, i, i-start,
- remaining_data_length);
+ log_write(INFO, "sending iov[] from start=%d i=%d nvecs=%d remaining_data_length=%d\n",
+ start, i, i - start,
+ remaining_data_length);
rc = smbd_post_send_data(
info, &iov[start], i-start,
remaining_data_length);
@@ -2069,10 +2045,9 @@ next_rqst:
} else {
/* iov[start] is too big, break it */
nvecs = (buflen+max_iov_size-1)/max_iov_size;
- log_write(INFO, "iov[%d] iov_base=%p buflen=%d"
- " break to %d vectors\n",
- start, iov[start].iov_base,
- buflen, nvecs);
+ log_write(INFO, "iov[%d] iov_base=%p buflen=%d break to %d vectors\n",
+ start, iov[start].iov_base,
+ buflen, nvecs);
for (j = 0; j < nvecs; j++) {
vec.iov_base =
(char *)iov[start].iov_base +
@@ -2084,11 +2059,9 @@ next_rqst:
max_iov_size*(nvecs-1);
remaining_data_length -= vec.iov_len;
log_write(INFO,
- "sending vec j=%d iov_base=%p"
- " iov_len=%zu "
- "remaining_data_length=%d\n",
- j, vec.iov_base, vec.iov_len,
- remaining_data_length);
+ "sending vec j=%d iov_base=%p iov_len=%zu remaining_data_length=%d\n",
+ j, vec.iov_base, vec.iov_len,
+ remaining_data_length);
rc = smbd_post_send_data(
info, &vec, 1,
remaining_data_length);
@@ -2106,11 +2079,9 @@ next_rqst:
if (i == rqst->rq_nvec) {
/* send out all remaining vecs */
remaining_data_length -= buflen;
- log_write(INFO,
- "sending iov[] from start=%d i=%d "
- "nvecs=%d remaining_data_length=%d\n",
- start, i, i-start,
- remaining_data_length);
+ log_write(INFO, "sending iov[] from start=%d i=%d nvecs=%d remaining_data_length=%d\n",
+ start, i, i - start,
+ remaining_data_length);
rc = smbd_post_send_data(info, &iov[start],
i-start, remaining_data_length);
if (rc)
@@ -2134,10 +2105,9 @@ next_rqst:
if (j == nvecs-1)
size = buflen - j*max_iov_size;
remaining_data_length -= size;
- log_write(INFO, "sending pages i=%d offset=%d size=%d"
- " remaining_data_length=%d\n",
- i, j*max_iov_size+offset, size,
- remaining_data_length);
+ log_write(INFO, "sending pages i=%d offset=%d size=%d remaining_data_length=%d\n",
+ i, j * max_iov_size + offset, size,
+ remaining_data_length);
rc = smbd_post_send_page(
info, rqst->rq_pages[i],
j*max_iov_size + offset,
@@ -2211,11 +2181,9 @@ static void smbd_mr_recovery_work(struct work_struct *work)
info->pd, info->mr_type,
info->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
- log_rdma_mr(ERR,
- "ib_alloc_mr failed mr_type=%x "
- "max_frmr_depth=%x\n",
- info->mr_type,
- info->max_frmr_depth);
+ log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+ info->mr_type,
+ info->max_frmr_depth);
smbd_disconnect_rdma_connection(info);
continue;
}
@@ -2278,9 +2246,8 @@ static int allocate_mr_list(struct smbd_connection *info)
smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
info->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
- log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x "
- "max_frmr_depth=%x\n",
- info->mr_type, info->max_frmr_depth);
+ log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+ info->mr_type, info->max_frmr_depth);
goto out;
}
smbdirect_mr->sgl = kcalloc(
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 99760063e000..d11e31064679 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -112,7 +112,7 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
#ifdef CONFIG_CIFS_STATS2
now = jiffies;
if (now < midEntry->when_alloc)
- cifs_server_dbg(VFS, "invalid mid allocation time\n");
+ cifs_server_dbg(VFS, "Invalid mid allocation time\n");
roundtrip_time = now - midEntry->when_alloc;
if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
@@ -151,12 +151,12 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
midEntry->when_sent, midEntry->when_received);
if (cifsFYI & CIFS_TIMER) {
- pr_debug(" CIFS slow rsp: cmd %d mid %llu",
- midEntry->command, midEntry->mid);
- cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
- now - midEntry->when_alloc,
- now - midEntry->when_sent,
- now - midEntry->when_received);
+ pr_debug("slow rsp: cmd %d mid %llu",
+ midEntry->command, midEntry->mid);
+ cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
+ now - midEntry->when_alloc,
+ now - midEntry->when_sent,
+ now - midEntry->when_received);
}
}
#endif
@@ -473,8 +473,7 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
return -ENOMEM;
if (!server->ops->init_transform_rq) {
- cifs_server_dbg(VFS, "Encryption requested but transform "
- "callback is missing\n");
+ cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
return -EIO;
}
@@ -989,8 +988,35 @@ cifs_cancelled_callback(struct mid_q_entry *mid)
DeleteMidQEntry(mid);
}
+/*
+ * Return a channel (master if none) of @ses that can be used to send
+ * regular requests.
+ *
+ * If we are currently binding a new channel (negprot/sess.setup),
+ * return the new incomplete channel.
+ */
+struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
+{
+ uint index = 0;
+
+ if (!ses)
+ return NULL;
+
+ if (!ses->binding) {
+ /* round robin */
+ if (ses->chan_count > 1) {
+ index = (uint)atomic_inc_return(&ses->chan_seq);
+ index %= ses->chan_count;
+ }
+ return ses->chans[index].server;
+ } else {
+ return cifs_ses_server(ses);
+ }
+}
+
int
compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const int flags, const int num_rqst, struct smb_rqst *rqst,
int *resp_buf_type, struct kvec *resp_iov)
{
@@ -1002,30 +1028,17 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
};
unsigned int instance;
char *buf;
- struct TCP_Server_Info *server;
optype = flags & CIFS_OP_MASK;
for (i = 0; i < num_rqst; i++)
resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
- if ((ses == NULL) || (ses->server == NULL)) {
+ if (!ses || !ses->server || !server) {
cifs_dbg(VFS, "Null session\n");
return -EIO;
}
- if (!ses->binding) {
- uint index = 0;
-
- if (ses->chan_count > 1) {
- index = (uint)atomic_inc_return(&ses->chan_seq);
- index %= ses->chan_count;
- }
- server = ses->chans[index].server;
- } else {
- server = cifs_ses_server(ses);
- }
-
if (server->tcpStatus == CifsExiting)
return -ENOENT;
@@ -1220,11 +1233,12 @@ out:
int
cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
struct smb_rqst *rqst, int *resp_buf_type, const int flags,
struct kvec *resp_iov)
{
- return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
- resp_iov);
+ return compound_send_recv(xid, ses, server, flags, 1,
+ rqst, resp_buf_type, resp_iov);
}
int
@@ -1259,7 +1273,8 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
rqst.rq_iov = new_iov;
rqst.rq_nvec = n_vec + 1;
- rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
+ rc = cifs_send_recv(xid, ses, ses->server,
+ &rqst, resp_buf_type, flags, resp_iov);
if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
kfree(new_iov);
return rc;
@@ -1296,8 +1311,8 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
use ses->maxReq */
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
- len);
+ cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
+ len);
return -EIO;
}
@@ -1437,8 +1452,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
use ses->maxReq */
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
- len);
+ cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
+ len);
return -EIO;
}
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index e61f3fe8e32a..2d24c765cbd7 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -117,6 +117,11 @@
#define arch_setup_additional_pages compat_arch_setup_additional_pages
#endif
+#ifdef compat_elf_read_implies_exec
+#undef elf_read_implies_exec
+#define elf_read_implies_exec compat_elf_read_implies_exec
+#endif
+
/*
* Rename a few of the symbols that binfmt_elf.c will define.
* These are all local so the names don't really matter, but it
diff --git a/fs/coredump.c b/fs/coredump.c
index 478a0d810136..7237f07ff6be 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -393,7 +393,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
* of ->siglock provides a memory barrier.
*
* do_exit:
- * The caller holds mm->mmap_sem. This means that the task which
+ * The caller holds mm->mmap_lock. This means that the task which
* uses this mm can't pass exit_mm(), so it can't exit or clear
* its ->mm.
*
@@ -401,7 +401,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
* It does list_replace_rcu(&leader->tasks, &current->tasks),
* we must see either old or new leader, this does not matter.
* However, it can change p->sighand, so lock_task_sighand(p)
- * must be used. Since p->mm != NULL and we hold ->mmap_sem
+ * must be used. Since p->mm != NULL and we hold ->mmap_lock
* it can't fail.
*
* Note also that "g" can be the old leader with ->mm == NULL
@@ -445,12 +445,12 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
core_state->dumper.task = tsk;
core_state->dumper.next = NULL;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
if (!mm->core_state)
core_waiters = zap_threads(tsk, mm, core_state, exit_code);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (core_waiters > 0) {
struct core_thread *ptr;
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index f0d73d86cc1a..034e6973cead 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* internal.h - declarations internal to debugfs
*
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 416d9de35679..04fe9f525ac7 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -97,7 +97,6 @@ do { \
__LINE__, __FILE__, #x, jiffies); \
{do} \
printk("\n"); \
- BUG(); \
panic("DLM: Record message above and reboot.\n"); \
} \
}
@@ -421,7 +420,7 @@ struct dlm_message {
int m_bastmode;
int m_asts;
int m_result; /* 0 or -EXXX */
- char m_extra[0]; /* name or lvb */
+ char m_extra[]; /* name or lvb */
};
@@ -450,7 +449,7 @@ struct dlm_rcom {
uint64_t rc_id; /* match reply with request */
uint64_t rc_seq; /* sender's ls_recover_seq */
uint64_t rc_seq_reply; /* remote ls_recover_seq */
- char rc_buf[0];
+ char rc_buf[];
};
union dlm_packet {
@@ -506,7 +505,7 @@ struct rcom_lock {
__le16 rl_wait_type;
__le16 rl_namelen;
char rl_name[DLM_RESNAME_MAXLEN];
- char rl_lvb[0];
+ char rl_lvb[];
};
/*
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index afb8340918b8..e93670ecfae5 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -197,8 +197,6 @@ static struct kset *dlm_kset;
static int do_uevent(struct dlm_ls *ls, int in)
{
- int error;
-
if (in)
kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
else
@@ -209,20 +207,12 @@ static int do_uevent(struct dlm_ls *ls, int in)
/* dlm_controld will see the uevent, do the necessary group management
and then write to sysfs to wake us */
- error = wait_event_interruptible(ls->ls_uevent_wait,
- test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
+ wait_event(ls->ls_uevent_wait,
+ test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
- log_rinfo(ls, "group event done %d %d", error, ls->ls_uevent_result);
-
- if (error)
- goto out;
+ log_rinfo(ls, "group event done %d", ls->ls_uevent_result);
- error = ls->ls_uevent_result;
- out:
- if (error)
- log_error(ls, "group %s failed %d %d", in ? "join" : "leave",
- error, ls->ls_uevent_result);
- return error;
+ return ls->ls_uevent_result;
}
static int dlm_uevent(struct kset *kset, struct kobject *kobj,
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index e3d9f72c640d..4daf5dc2b51c 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -563,7 +563,7 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
lock = 1;
reply = 1;
break;
- };
+ }
spin_lock(&ls->ls_recover_lock);
status = ls->ls_recover_status;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 5264bac75115..e5cefa90b1ce 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -46,7 +46,7 @@ struct dlm_lock_params32 {
__u32 bastaddr;
__u32 lksb;
char lvb[DLM_USER_LVB_LEN];
- char name[0];
+ char name[];
};
struct dlm_write_request32 {
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 78e41c7c3d05..df466ef81ddd 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -23,6 +23,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/idr.h>
+#include <linux/uio.h>
DEFINE_PER_CPU(int, eventfd_wake_count);
@@ -216,32 +217,32 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
}
EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
-static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
{
+ struct file *file = iocb->ki_filp;
struct eventfd_ctx *ctx = file->private_data;
- ssize_t res;
__u64 ucnt = 0;
DECLARE_WAITQUEUE(wait, current);
- if (count < sizeof(ucnt))
+ if (iov_iter_count(to) < sizeof(ucnt))
return -EINVAL;
-
spin_lock_irq(&ctx->wqh.lock);
- res = -EAGAIN;
- if (ctx->count > 0)
- res = sizeof(ucnt);
- else if (!(file->f_flags & O_NONBLOCK)) {
+ if (!ctx->count) {
+ if ((file->f_flags & O_NONBLOCK) ||
+ (iocb->ki_flags & IOCB_NOWAIT)) {
+ spin_unlock_irq(&ctx->wqh.lock);
+ return -EAGAIN;
+ }
__add_wait_queue(&ctx->wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (ctx->count > 0) {
- res = sizeof(ucnt);
+ if (ctx->count)
break;
- }
if (signal_pending(current)) {
- res = -ERESTARTSYS;
- break;
+ __remove_wait_queue(&ctx->wqh, &wait);
+ __set_current_state(TASK_RUNNING);
+ spin_unlock_irq(&ctx->wqh.lock);
+ return -ERESTARTSYS;
}
spin_unlock_irq(&ctx->wqh.lock);
schedule();
@@ -250,17 +251,14 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
__remove_wait_queue(&ctx->wqh, &wait);
__set_current_state(TASK_RUNNING);
}
- if (likely(res > 0)) {
- eventfd_ctx_do_read(ctx, &ucnt);
- if (waitqueue_active(&ctx->wqh))
- wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
- }
+ eventfd_ctx_do_read(ctx, &ucnt);
+ if (waitqueue_active(&ctx->wqh))
+ wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
spin_unlock_irq(&ctx->wqh.lock);
-
- if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
+ if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
return -EFAULT;
- return res;
+ return sizeof(ucnt);
}
static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
@@ -329,7 +327,7 @@ static const struct file_operations eventfd_fops = {
#endif
.release = eventfd_release,
.poll = eventfd_poll,
- .read = eventfd_read,
+ .read_iter = eventfd_read,
.write = eventfd_write,
.llseek = noop_llseek,
};
@@ -406,6 +404,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
static int do_eventfd(unsigned int count, int flags)
{
struct eventfd_ctx *ctx;
+ struct file *file;
int fd;
/* Check the EFD_* constants for consistency. */
@@ -425,11 +424,24 @@ static int do_eventfd(unsigned int count, int flags)
ctx->flags = flags;
ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
- fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
- O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
+ flags &= EFD_SHARED_FCNTL_FLAGS;
+ flags |= O_RDWR;
+ fd = get_unused_fd_flags(flags);
if (fd < 0)
- eventfd_free_ctx(ctx);
+ goto err;
+
+ file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ fd = PTR_ERR(file);
+ goto err;
+ }
+ file->f_mode |= FMODE_NOWAIT;
+ fd_install(fd, file);
+ return fd;
+err:
+ eventfd_free_ctx(ctx);
return fd;
}
diff --git a/fs/exec.c b/fs/exec.c
index 2c465119affc..e6e8a9a70327 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -72,6 +72,8 @@
#include <trace/events/sched.h>
+static int bprm_creds_from_file(struct linux_binprm *bprm);
+
int suid_dumpable = 0;
static LIST_HEAD(formats);
@@ -250,7 +252,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
return -ENOMEM;
vma_set_anonymous(vma);
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mmap_write_lock_killable(mm)) {
err = -EINTR;
goto err_free;
}
@@ -272,11 +274,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
goto err;
mm->stack_vm = mm->total_vm = 1;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
bprm->p = vma->vm_end - sizeof(void *);
return 0;
err:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
err_free:
bprm->vma = NULL;
vm_area_free(vma);
@@ -588,24 +590,48 @@ out:
}
/*
- * Like copy_strings, but get argv and its values from kernel memory.
+ * Copy and argument/environment string from the kernel to the processes stack.
*/
-int copy_strings_kernel(int argc, const char *const *__argv,
- struct linux_binprm *bprm)
+int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
{
- int r;
- mm_segment_t oldfs = get_fs();
- struct user_arg_ptr argv = {
- .ptr.native = (const char __user *const __user *)__argv,
- };
+ int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
+ unsigned long pos = bprm->p;
+
+ if (len == 0)
+ return -EFAULT;
+ if (!valid_arg_len(bprm, len))
+ return -E2BIG;
+
+ /* We're going to work our way backwards. */
+ arg += len;
+ bprm->p -= len;
+ if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
+ return -E2BIG;
- set_fs(KERNEL_DS);
- r = copy_strings(argc, argv, bprm);
- set_fs(oldfs);
+ while (len > 0) {
+ unsigned int bytes_to_copy = min_t(unsigned int, len,
+ min_not_zero(offset_in_page(pos), PAGE_SIZE));
+ struct page *page;
+ char *kaddr;
+
+ pos -= bytes_to_copy;
+ arg -= bytes_to_copy;
+ len -= bytes_to_copy;
+
+ page = get_arg_page(bprm, pos, 1);
+ if (!page)
+ return -E2BIG;
+ kaddr = kmap_atomic(page);
+ flush_arg_page(bprm, pos & PAGE_MASK, page);
+ memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
+ flush_kernel_dcache_page(page);
+ kunmap_atomic(kaddr);
+ put_arg_page(page);
+ }
- return r;
+ return 0;
}
-EXPORT_SYMBOL(copy_strings_kernel);
+EXPORT_SYMBOL(copy_string_kernel);
#ifdef CONFIG_MMU
@@ -737,7 +763,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
bprm->loader -= stack_shift;
bprm->exec -= stack_shift;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
vm_flags = VM_STACK_FLAGS;
@@ -799,7 +825,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
ret = -EFAULT;
out_unlock:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL(setup_arg_pages);
@@ -1027,14 +1053,17 @@ out:
}
EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
+#if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \
+ defined(CONFIG_BINFMT_ELF_FDPIC)
ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
{
ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
if (res > 0)
- flush_icache_range(addr, addr + len);
+ flush_icache_user_range(addr, addr + len);
return res;
}
EXPORT_SYMBOL(read_code);
+#endif
/*
* Maps the mm_struct mm into the current task struct.
@@ -1051,22 +1080,23 @@ static int exec_mmap(struct mm_struct *mm)
tsk = current;
old_mm = current->mm;
exec_mm_release(tsk, old_mm);
+ if (old_mm)
+ sync_mm_rss(old_mm);
ret = mutex_lock_killable(&tsk->signal->exec_update_mutex);
if (ret)
return ret;
if (old_mm) {
- sync_mm_rss(old_mm);
/*
* Make sure that if there is a core dump in progress
* for the old mm, we get out and die instead of going
- * through with the exec. We must hold mmap_sem around
+ * through with the exec. We must hold mmap_lock around
* checking core_state and changing tsk->mm.
*/
- down_read(&old_mm->mmap_sem);
+ mmap_read_lock(old_mm);
if (unlikely(old_mm->core_state)) {
- up_read(&old_mm->mmap_sem);
+ mmap_read_unlock(old_mm);
mutex_unlock(&tsk->signal->exec_update_mutex);
return -EINTR;
}
@@ -1082,7 +1112,7 @@ static int exec_mmap(struct mm_struct *mm)
vmacache_flush(tsk);
task_unlock(tsk);
if (old_mm) {
- up_read(&old_mm->mmap_sem);
+ mmap_read_unlock(old_mm);
BUG_ON(active_mm != old_mm);
setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
mm_update_next_owner(old_mm);
@@ -1093,12 +1123,6 @@ static int exec_mmap(struct mm_struct *mm)
return 0;
}
-/*
- * This function makes sure the current process has its own signal table,
- * so that flush_signal_handlers can later reset the handlers without
- * disturbing other processes. (Other processes might share the signal
- * table via the CLONE_SIGHAND option to clone().)
- */
static int de_thread(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
@@ -1176,7 +1200,6 @@ static int de_thread(struct task_struct *tsk)
tsk->start_boottime = leader->start_boottime;
BUG_ON(!same_thread_group(leader, tsk));
- BUG_ON(has_group_leader_pid(tsk));
/*
* An exec() starts a new thread group with the
* TGID of the previous thread group. Rehash the
@@ -1186,11 +1209,8 @@ static int de_thread(struct task_struct *tsk)
/* Become a process group leader with the old leader's pid.
* The old leader becomes a thread of the this thread group.
- * Note: The old leader also uses this pid until release_task
- * is called. Odd but simple and correct.
*/
- tsk->pid = leader->pid;
- change_pid(tsk, PIDTYPE_PID, task_pid(leader));
+ exchange_tids(tsk, leader);
transfer_pid(leader, tsk, PIDTYPE_TGID);
transfer_pid(leader, tsk, PIDTYPE_PGID);
transfer_pid(leader, tsk, PIDTYPE_SID);
@@ -1240,6 +1260,12 @@ killed:
}
+/*
+ * This function makes sure the current process has its own signal table,
+ * so that flush_signal_handlers can later reset the handlers without
+ * disturbing other processes. (Other processes might share the signal
+ * table via the CLONE_SIGHAND option to clone().)
+ */
static int unshare_sighand(struct task_struct *me)
{
struct sighand_struct *oldsighand = me->sighand;
@@ -1296,13 +1322,23 @@ void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
* Calling this is the point of no return. None of the failures will be
* seen by userspace since either the process is already taking a fatal
* signal (via de_thread() or coredump), or will have SEGV raised
- * (after exec_mmap()) by search_binary_handlers (see below).
+ * (after exec_mmap()) by search_binary_handler (see below).
*/
-int flush_old_exec(struct linux_binprm * bprm)
+int begin_new_exec(struct linux_binprm * bprm)
{
struct task_struct *me = current;
int retval;
+ /* Once we are committed compute the creds */
+ retval = bprm_creds_from_file(bprm);
+ if (retval)
+ return retval;
+
+ /*
+ * Ensure all future errors are fatal.
+ */
+ bprm->point_of_no_return = true;
+
/*
* Make this the only thread in the thread group.
*/
@@ -1317,7 +1353,10 @@ int flush_old_exec(struct linux_binprm * bprm)
*/
set_mm_exe_file(bprm->mm, bprm->file);
+ /* If the binary is not readable then enforce mm->dumpable=0 */
would_dump(bprm, bprm->file);
+ if (bprm->have_execfd)
+ would_dump(bprm, bprm->executable);
/*
* Release all of the old mmap stuff
@@ -1327,13 +1366,6 @@ int flush_old_exec(struct linux_binprm * bprm)
if (retval)
goto out;
- /*
- * After setting bprm->called_exec_mmap (to mark that current is
- * using the prepared mm now), we have nothing left of the original
- * process. If anything from here on returns an error, the check
- * in search_binary_handler() will SEGV current.
- */
- bprm->called_exec_mmap = 1;
bprm->mm = NULL;
#ifdef CONFIG_POSIX_TIMERS
@@ -1346,7 +1378,7 @@ int flush_old_exec(struct linux_binprm * bprm)
*/
retval = unshare_sighand(me);
if (retval)
- goto out;
+ goto out_unlock;
set_fs(USER_DS);
me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
@@ -1361,12 +1393,84 @@ int flush_old_exec(struct linux_binprm * bprm)
* undergoing exec(2).
*/
do_close_on_exec(me->files);
+
+ if (bprm->secureexec) {
+ /* Make sure parent cannot signal privileged process. */
+ me->pdeath_signal = 0;
+
+ /*
+ * For secureexec, reset the stack limit to sane default to
+ * avoid bad behavior from the prior rlimits. This has to
+ * happen before arch_pick_mmap_layout(), which examines
+ * RLIMIT_STACK, but after the point of no return to avoid
+ * needing to clean up the change on failure.
+ */
+ if (bprm->rlim_stack.rlim_cur > _STK_LIM)
+ bprm->rlim_stack.rlim_cur = _STK_LIM;
+ }
+
+ me->sas_ss_sp = me->sas_ss_size = 0;
+
+ /*
+ * Figure out dumpability. Note that this checking only of current
+ * is wrong, but userspace depends on it. This should be testing
+ * bprm->secureexec instead.
+ */
+ if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
+ !(uid_eq(current_euid(), current_uid()) &&
+ gid_eq(current_egid(), current_gid())))
+ set_dumpable(current->mm, suid_dumpable);
+ else
+ set_dumpable(current->mm, SUID_DUMP_USER);
+
+ perf_event_exec();
+ __set_task_comm(me, kbasename(bprm->filename), true);
+
+ /* An exec changes our domain. We are no longer part of the thread
+ group */
+ WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
+ flush_signal_handlers(me, 0);
+
+ /*
+ * install the new credentials for this executable
+ */
+ security_bprm_committing_creds(bprm);
+
+ commit_creds(bprm->cred);
+ bprm->cred = NULL;
+
+ /*
+ * Disable monitoring for regular users
+ * when executing setuid binaries. Must
+ * wait until new credentials are committed
+ * by commit_creds() above
+ */
+ if (get_dumpable(me->mm) != SUID_DUMP_USER)
+ perf_event_exit_task(me);
+ /*
+ * cred_guard_mutex must be held at least to this point to prevent
+ * ptrace_attach() from altering our determination of the task's
+ * credentials; any time after this it may be unlocked.
+ */
+ security_bprm_committed_creds(bprm);
+
+ /* Pass the opened binary to the interpreter. */
+ if (bprm->have_execfd) {
+ retval = get_unused_fd_flags(0);
+ if (retval < 0)
+ goto out_unlock;
+ fd_install(retval, bprm->executable);
+ bprm->executable = NULL;
+ bprm->execfd = retval;
+ }
return 0;
+out_unlock:
+ mutex_unlock(&me->signal->exec_update_mutex);
out:
return retval;
}
-EXPORT_SYMBOL(flush_old_exec);
+EXPORT_SYMBOL(begin_new_exec);
void would_dump(struct linux_binprm *bprm, struct file *file)
{
@@ -1391,58 +1495,20 @@ EXPORT_SYMBOL(would_dump);
void setup_new_exec(struct linux_binprm * bprm)
{
- /*
- * Once here, prepare_binrpm() will not be called any more, so
- * the final state of setuid/setgid/fscaps can be merged into the
- * secureexec flag.
- */
- bprm->secureexec |= bprm->cap_elevated;
-
- if (bprm->secureexec) {
- /* Make sure parent cannot signal privileged process. */
- current->pdeath_signal = 0;
-
- /*
- * For secureexec, reset the stack limit to sane default to
- * avoid bad behavior from the prior rlimits. This has to
- * happen before arch_pick_mmap_layout(), which examines
- * RLIMIT_STACK, but after the point of no return to avoid
- * needing to clean up the change on failure.
- */
- if (bprm->rlim_stack.rlim_cur > _STK_LIM)
- bprm->rlim_stack.rlim_cur = _STK_LIM;
- }
-
- arch_pick_mmap_layout(current->mm, &bprm->rlim_stack);
-
- current->sas_ss_sp = current->sas_ss_size = 0;
+ /* Setup things that can depend upon the personality */
+ struct task_struct *me = current;
- /*
- * Figure out dumpability. Note that this checking only of current
- * is wrong, but userspace depends on it. This should be testing
- * bprm->secureexec instead.
- */
- if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
- !(uid_eq(current_euid(), current_uid()) &&
- gid_eq(current_egid(), current_gid())))
- set_dumpable(current->mm, suid_dumpable);
- else
- set_dumpable(current->mm, SUID_DUMP_USER);
+ arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
arch_setup_new_exec();
- perf_event_exec();
- __set_task_comm(current, kbasename(bprm->filename), true);
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
* some architectures like powerpc
*/
- current->mm->task_size = TASK_SIZE;
-
- /* An exec changes our domain. We are no longer part of the thread
- group */
- WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1);
- flush_signal_handlers(current, 0);
+ me->mm->task_size = TASK_SIZE;
+ mutex_unlock(&me->signal->exec_update_mutex);
+ mutex_unlock(&me->signal->cred_guard_mutex);
}
EXPORT_SYMBOL(setup_new_exec);
@@ -1458,7 +1524,7 @@ EXPORT_SYMBOL(finalize_exec);
/*
* Prepare credentials and lock ->cred_guard_mutex.
- * install_exec_creds() commits the new creds and drops the lock.
+ * setup_new_exec() commits the new creds and drops the lock.
* Or, if exec fails before, free_bprm() should release ->cred and
* and unlock.
*/
@@ -1479,8 +1545,6 @@ static void free_bprm(struct linux_binprm *bprm)
{
free_arg_pages(bprm);
if (bprm->cred) {
- if (bprm->called_exec_mmap)
- mutex_unlock(&current->signal->exec_update_mutex);
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
@@ -1488,6 +1552,8 @@ static void free_bprm(struct linux_binprm *bprm)
allow_write_access(bprm->file);
fput(bprm->file);
}
+ if (bprm->executable)
+ fput(bprm->executable);
/* If a binfmt changed the interp, free it. */
if (bprm->interp != bprm->filename)
kfree(bprm->interp);
@@ -1507,35 +1573,6 @@ int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
EXPORT_SYMBOL(bprm_change_interp);
/*
- * install the new credentials for this executable
- */
-void install_exec_creds(struct linux_binprm *bprm)
-{
- security_bprm_committing_creds(bprm);
-
- commit_creds(bprm->cred);
- bprm->cred = NULL;
-
- /*
- * Disable monitoring for regular users
- * when executing setuid binaries. Must
- * wait until new credentials are committed
- * by commit_creds() above
- */
- if (get_dumpable(current->mm) != SUID_DUMP_USER)
- perf_event_exit_task(current);
- /*
- * cred_guard_mutex must be held at least to this point to prevent
- * ptrace_attach() from altering our determination of the task's
- * credentials; any time after this it may be unlocked.
- */
- security_bprm_committed_creds(bprm);
- mutex_unlock(&current->signal->exec_update_mutex);
- mutex_unlock(&current->signal->cred_guard_mutex);
-}
-EXPORT_SYMBOL(install_exec_creds);
-
-/*
* determine how safe it is to execute the proposed program
* - the caller must hold ->cred_guard_mutex to protect against
* PTRACE_ATTACH or seccomp thread-sync
@@ -1572,29 +1609,21 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
spin_unlock(&p->fs->lock);
}
-static void bprm_fill_uid(struct linux_binprm *bprm)
+static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
{
+ /* Handle suid and sgid on files */
struct inode *inode;
unsigned int mode;
kuid_t uid;
kgid_t gid;
- /*
- * Since this can be called multiple times (via prepare_binprm),
- * we must clear any previous work done when setting set[ug]id
- * bits from any earlier bprm->file uses (for example when run
- * first for a setuid script then again for its interpreter).
- */
- bprm->cred->euid = current_euid();
- bprm->cred->egid = current_egid();
-
- if (!mnt_may_suid(bprm->file->f_path.mnt))
+ if (!mnt_may_suid(file->f_path.mnt))
return;
if (task_no_new_privs(current))
return;
- inode = bprm->file->f_path.dentry->d_inode;
+ inode = file->f_path.dentry->d_inode;
mode = READ_ONCE(inode->i_mode);
if (!(mode & (S_ISUID|S_ISGID)))
return;
@@ -1625,30 +1654,31 @@ static void bprm_fill_uid(struct linux_binprm *bprm)
}
/*
+ * Compute brpm->cred based upon the final binary.
+ */
+static int bprm_creds_from_file(struct linux_binprm *bprm)
+{
+ /* Compute creds based on which file? */
+ struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
+
+ bprm_fill_uid(bprm, file);
+ return security_bprm_creds_from_file(bprm, file);
+}
+
+/*
* Fill the binprm structure from the inode.
- * Check permissions, then read the first BINPRM_BUF_SIZE bytes
+ * Read the first BINPRM_BUF_SIZE bytes
*
* This may be called multiple times for binary chains (scripts for example).
*/
-int prepare_binprm(struct linux_binprm *bprm)
+static int prepare_binprm(struct linux_binprm *bprm)
{
- int retval;
loff_t pos = 0;
- bprm_fill_uid(bprm);
-
- /* fill in binprm security blob */
- retval = security_bprm_set_creds(bprm);
- if (retval)
- return retval;
- bprm->called_set_creds = 1;
-
memset(bprm->buf, 0, BINPRM_BUF_SIZE);
return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
}
-EXPORT_SYMBOL(prepare_binprm);
-
/*
* Arguments are '\0' separated strings found at the location bprm->p
* points to; chop off the first by relocating brpm->p to right after
@@ -1694,15 +1724,15 @@ EXPORT_SYMBOL(remove_arg_zero);
/*
* cycle the list of binary formats handler, until one recognizes the image
*/
-int search_binary_handler(struct linux_binprm *bprm)
+static int search_binary_handler(struct linux_binprm *bprm)
{
bool need_retry = IS_ENABLED(CONFIG_MODULES);
struct linux_binfmt *fmt;
int retval;
- /* This allows 4 levels of binfmt rewrites before failing hard. */
- if (bprm->recursion_depth > 5)
- return -ELOOP;
+ retval = prepare_binprm(bprm);
+ if (retval < 0)
+ return retval;
retval = security_bprm_check(bprm);
if (retval)
@@ -1716,19 +1746,11 @@ int search_binary_handler(struct linux_binprm *bprm)
continue;
read_unlock(&binfmt_lock);
- bprm->recursion_depth++;
retval = fmt->load_binary(bprm);
- bprm->recursion_depth--;
read_lock(&binfmt_lock);
put_binfmt(fmt);
- if (retval < 0 && bprm->called_exec_mmap) {
- /* we got to flush_old_exec() and failed after it */
- read_unlock(&binfmt_lock);
- force_sigsegv(SIGSEGV);
- return retval;
- }
- if (retval != -ENOEXEC || !bprm->file) {
+ if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
read_unlock(&binfmt_lock);
return retval;
}
@@ -1747,12 +1769,11 @@ int search_binary_handler(struct linux_binprm *bprm)
return retval;
}
-EXPORT_SYMBOL(search_binary_handler);
static int exec_binprm(struct linux_binprm *bprm)
{
pid_t old_pid, old_vpid;
- int ret;
+ int ret, depth;
/* Need to fetch pid before load_binary changes it */
old_pid = current->pid;
@@ -1760,15 +1781,38 @@ static int exec_binprm(struct linux_binprm *bprm)
old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
rcu_read_unlock();
- ret = search_binary_handler(bprm);
- if (ret >= 0) {
- audit_bprm(bprm);
- trace_sched_process_exec(current, old_pid, bprm);
- ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
- proc_exec_connector(current);
+ /* This allows 4 levels of binfmt rewrites before failing hard. */
+ for (depth = 0;; depth++) {
+ struct file *exec;
+ if (depth > 5)
+ return -ELOOP;
+
+ ret = search_binary_handler(bprm);
+ if (ret < 0)
+ return ret;
+ if (!bprm->interpreter)
+ break;
+
+ exec = bprm->file;
+ bprm->file = bprm->interpreter;
+ bprm->interpreter = NULL;
+
+ allow_write_access(exec);
+ if (unlikely(bprm->have_execfd)) {
+ if (bprm->executable) {
+ fput(exec);
+ return -ENOEXEC;
+ }
+ bprm->executable = exec;
+ } else
+ fput(exec);
}
- return ret;
+ audit_bprm(bprm);
+ trace_sched_process_exec(current, old_pid, bprm);
+ ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
+ proc_exec_connector(current);
+ return 0;
}
/*
@@ -1861,11 +1905,12 @@ static int __do_execve_file(int fd, struct filename *filename,
if (retval < 0)
goto out;
- retval = prepare_binprm(bprm);
- if (retval < 0)
+ /* Set the unchanging part of bprm->cred */
+ retval = security_bprm_creds_for_exec(bprm);
+ if (retval)
goto out;
- retval = copy_strings_kernel(1, &bprm->filename, bprm);
+ retval = copy_string_kernel(bprm->filename, bprm);
if (retval < 0)
goto out;
@@ -1897,6 +1942,14 @@ static int __do_execve_file(int fd, struct filename *filename,
return retval;
out:
+ /*
+ * If past the point of no return ensure the the code never
+ * returns to the userspace process. Use an existing fatal
+ * signal if present otherwise terminate the process with
+ * SIGSEGV.
+ */
+ if (bprm->point_of_no_return && !fatal_signal_pending(current))
+ force_sigsegv(SIGSEGV);
if (bprm->mm) {
acct_arg_size(bprm, 0);
mmput(bprm->mm);
diff --git a/fs/exfat/Kconfig b/fs/exfat/Kconfig
index 2d3636dc5b8c..5a65071b5ecf 100644
--- a/fs/exfat/Kconfig
+++ b/fs/exfat/Kconfig
@@ -16,6 +16,7 @@ config EXFAT_DEFAULT_IOCHARSET
depends on EXFAT_FS
help
Set this to the default input/output character set to use for
- converting between the encoding is used for user visible filename and
- UTF-16 character that exfat filesystem use, and can be overridden with
- the "iocharset" mount option for exFAT filesystems.
+ converting between the encoding that is used for user visible
+ filenames and the UTF-16 character encoding that the exFAT
+ filesystem uses. This can be overridden with the "iocharset" mount
+ option for the exFAT filesystems.
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
index 6774a5a6ded8..4055eb00ea9b 100644
--- a/fs/exfat/balloc.c
+++ b/fs/exfat/balloc.c
@@ -58,9 +58,8 @@ static int exfat_allocate_bitmap(struct super_block *sb,
need_map_size = ((EXFAT_DATA_CLUSTER_COUNT(sbi) - 1) / BITS_PER_BYTE)
+ 1;
if (need_map_size != map_size) {
- exfat_msg(sb, KERN_ERR,
- "bogus allocation bitmap size(need : %u, cur : %lld)",
- need_map_size, map_size);
+ exfat_err(sb, "bogus allocation bitmap size(need : %u, cur : %lld)",
+ need_map_size, map_size);
/*
* Only allowed when bogus allocation
* bitmap size is large
@@ -192,8 +191,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
(1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
if (ret_discard == -EOPNOTSUPP) {
- exfat_msg(sb, KERN_ERR,
- "discard not supported by device, disabling");
+ exfat_err(sb, "discard not supported by device, disabling");
opts->discard = 0;
}
}
diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
index 4b91afb0f051..de43534aa299 100644
--- a/fs/exfat/dir.c
+++ b/fs/exfat/dir.c
@@ -32,35 +32,30 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
struct exfat_chain *p_dir, int entry, unsigned short *uniname)
{
int i;
- struct exfat_dentry *ep;
struct exfat_entry_set_cache *es;
- es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES, &ep);
+ es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES);
if (!es)
return;
- if (es->num_entries < 3)
- goto free_es;
-
- ep += 2;
-
/*
* First entry : file entry
* Second entry : stream-extension entry
* Third entry : first file-name entry
* So, the index of first file-name dentry should start from 2.
*/
- for (i = 2; i < es->num_entries; i++, ep++) {
+ for (i = 2; i < es->num_entries; i++) {
+ struct exfat_dentry *ep = exfat_get_dentry_cached(es, i);
+
/* end of name entry */
if (exfat_get_entry_type(ep) != TYPE_EXTEND)
- goto free_es;
+ break;
exfat_extract_uni_name(ep, uniname);
uniname += EXFAT_FILE_NAME_LEN;
}
-free_es:
- kfree(es);
+ exfat_free_dentry_set(es, false);
}
/* read a directory entry from the opened directory */
@@ -137,12 +132,12 @@ static int exfat_readdir(struct inode *inode, struct exfat_dir_entry *dir_entry)
ep->dentry.file.create_tz,
ep->dentry.file.create_time,
ep->dentry.file.create_date,
- ep->dentry.file.create_time_ms);
+ ep->dentry.file.create_time_cs);
exfat_get_entry_time(sbi, &dir_entry->mtime,
ep->dentry.file.modify_tz,
ep->dentry.file.modify_time,
ep->dentry.file.modify_date,
- ep->dentry.file.modify_time_ms);
+ ep->dentry.file.modify_time_cs);
exfat_get_entry_time(sbi, &dir_entry->atime,
ep->dentry.file.access_tz,
ep->dentry.file.access_time,
@@ -461,12 +456,12 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
&ep->dentry.file.create_tz,
&ep->dentry.file.create_time,
&ep->dentry.file.create_date,
- &ep->dentry.file.create_time_ms);
+ &ep->dentry.file.create_time_cs);
exfat_set_entry_time(sbi, &ts,
&ep->dentry.file.modify_tz,
&ep->dentry.file.modify_time,
&ep->dentry.file.modify_date,
- &ep->dentry.file.modify_time_ms);
+ &ep->dentry.file.modify_time_cs);
exfat_set_entry_time(sbi, &ts,
&ep->dentry.file.access_tz,
&ep->dentry.file.access_time,
@@ -496,7 +491,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
int ret = 0;
int i, num_entries;
sector_t sector;
- unsigned short chksum;
+ u16 chksum;
struct exfat_dentry *ep, *fep;
struct buffer_head *fbh, *bh;
@@ -505,7 +500,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
return -EIO;
num_entries = fep->dentry.file.num_ext + 1;
- chksum = exfat_calc_chksum_2byte(fep, DENTRY_SIZE, 0, CS_DIR_ENTRY);
+ chksum = exfat_calc_chksum16(fep, DENTRY_SIZE, 0, CS_DIR_ENTRY);
for (i = 1; i < num_entries; i++) {
ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, NULL);
@@ -513,7 +508,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
ret = -EIO;
goto release_fbh;
}
- chksum = exfat_calc_chksum_2byte(ep, DENTRY_SIZE, chksum,
+ chksum = exfat_calc_chksum16(ep, DENTRY_SIZE, chksum,
CS_DEFAULT);
brelse(bh);
}
@@ -590,62 +585,33 @@ int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir,
return 0;
}
-int exfat_update_dir_chksum_with_entry_set(struct super_block *sb,
- struct exfat_entry_set_cache *es, int sync)
+void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es)
{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct buffer_head *bh;
- sector_t sec = es->sector;
- unsigned int off = es->offset;
- int chksum_type = CS_DIR_ENTRY, i, num_entries = es->num_entries;
- unsigned int buf_off = (off - es->offset);
- unsigned int remaining_byte_in_sector, copy_entries, clu;
+ int chksum_type = CS_DIR_ENTRY, i;
unsigned short chksum = 0;
+ struct exfat_dentry *ep;
- for (i = 0; i < num_entries; i++) {
- chksum = exfat_calc_chksum_2byte(&es->entries[i], DENTRY_SIZE,
- chksum, chksum_type);
+ for (i = 0; i < es->num_entries; i++) {
+ ep = exfat_get_dentry_cached(es, i);
+ chksum = exfat_calc_chksum16(ep, DENTRY_SIZE, chksum,
+ chksum_type);
chksum_type = CS_DEFAULT;
}
+ ep = exfat_get_dentry_cached(es, 0);
+ ep->dentry.file.checksum = cpu_to_le16(chksum);
+ es->modified = true;
+}
- es->entries[0].dentry.file.checksum = cpu_to_le16(chksum);
+void exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync)
+{
+ int i;
- while (num_entries) {
- /* write per sector base */
- remaining_byte_in_sector = (1 << sb->s_blocksize_bits) - off;
- copy_entries = min_t(int,
- EXFAT_B_TO_DEN(remaining_byte_in_sector),
- num_entries);
- bh = sb_bread(sb, sec);
- if (!bh)
- goto err_out;
- memcpy(bh->b_data + off,
- (unsigned char *)&es->entries[0] + buf_off,
- EXFAT_DEN_TO_B(copy_entries));
- exfat_update_bh(sb, bh, sync);
- brelse(bh);
- num_entries -= copy_entries;
-
- if (num_entries) {
- /* get next sector */
- if (exfat_is_last_sector_in_cluster(sbi, sec)) {
- clu = exfat_sector_to_cluster(sbi, sec);
- if (es->alloc_flag == ALLOC_NO_FAT_CHAIN)
- clu++;
- else if (exfat_get_next_cluster(sb, &clu))
- goto err_out;
- sec = exfat_cluster_to_sector(sbi, clu);
- } else {
- sec++;
- }
- off = 0;
- buf_off += EXFAT_DEN_TO_B(copy_entries);
- }
+ for (i = 0; i < es->num_bh; i++) {
+ if (es->modified)
+ exfat_update_bh(es->sb, es->bh[i], sync);
+ brelse(es->bh[i]);
}
-
- return 0;
-err_out:
- return -EIO;
+ kfree(es);
}
static int exfat_walk_fat_chain(struct super_block *sb,
@@ -720,9 +686,8 @@ static int exfat_dir_readahead(struct super_block *sb, sector_t sec)
return 0;
if (sec < sbi->data_start_sector) {
- exfat_msg(sb, KERN_ERR,
- "requested sector is invalid(sect:%llu, root:%llu)",
- (unsigned long long)sec, sbi->data_start_sector);
+ exfat_err(sb, "requested sector is invalid(sect:%llu, root:%llu)",
+ (unsigned long long)sec, sbi->data_start_sector);
return -EIO;
}
@@ -750,7 +715,7 @@ struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
sector_t sec;
if (p_dir->dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry\n");
+ exfat_err(sb, "abnormal access to deleted dentry");
return NULL;
}
@@ -821,39 +786,45 @@ static bool exfat_validate_entry(unsigned int type,
}
}
+struct exfat_dentry *exfat_get_dentry_cached(
+ struct exfat_entry_set_cache *es, int num)
+{
+ int off = es->start_off + num * DENTRY_SIZE;
+ struct buffer_head *bh = es->bh[EXFAT_B_TO_BLK(off, es->sb)];
+ char *p = bh->b_data + EXFAT_BLK_OFFSET(off, es->sb);
+
+ return (struct exfat_dentry *)p;
+}
+
/*
* Returns a set of dentries for a file or dir.
*
- * Note that this is a copy (dump) of dentries so that user should
- * call write_entry_set() to apply changes made in this entry set
- * to the real device.
+ * Note It provides a direct pointer to bh->data via exfat_get_dentry_cached().
+ * User should call exfat_get_dentry_set() after setting 'modified' to apply
+ * changes made in this entry set to the real device.
*
* in:
* sb+p_dir+entry: indicates a file/dir
* type: specifies how many dentries should be included.
- * out:
- * file_ep: will point the first dentry(= file dentry) on success
* return:
* pointer of entry set on success,
* NULL on failure.
*/
struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
- struct exfat_chain *p_dir, int entry, unsigned int type,
- struct exfat_dentry **file_ep)
+ struct exfat_chain *p_dir, int entry, unsigned int type)
{
- int ret;
+ int ret, i, num_bh;
unsigned int off, byte_offset, clu = 0;
- unsigned int entry_type;
sector_t sec;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_entry_set_cache *es;
- struct exfat_dentry *ep, *pos;
- unsigned char num_entries;
+ struct exfat_dentry *ep;
+ int num_entries;
enum exfat_validate_dentry_mode mode = ES_MODE_STARTED;
struct buffer_head *bh;
if (p_dir->dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR, "access to deleted dentry\n");
+ exfat_err(sb, "access to deleted dentry");
return NULL;
}
@@ -862,11 +833,18 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
if (ret)
return NULL;
+ es = kzalloc(sizeof(*es), GFP_KERNEL);
+ if (!es)
+ return NULL;
+ es->sb = sb;
+ es->modified = false;
+
/* byte offset in cluster */
byte_offset = EXFAT_CLU_OFFSET(byte_offset, sbi);
/* byte offset in sector */
off = EXFAT_BLK_OFFSET(byte_offset, sb);
+ es->start_off = off;
/* sector offset in cluster */
sec = EXFAT_B_TO_BLK(byte_offset, sb);
@@ -874,72 +852,46 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
bh = sb_bread(sb, sec);
if (!bh)
- return NULL;
-
- ep = (struct exfat_dentry *)(bh->b_data + off);
- entry_type = exfat_get_entry_type(ep);
+ goto free_es;
+ es->bh[es->num_bh++] = bh;
- if (entry_type != TYPE_FILE && entry_type != TYPE_DIR)
- goto release_bh;
+ ep = exfat_get_dentry_cached(es, 0);
+ if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode))
+ goto free_es;
num_entries = type == ES_ALL_ENTRIES ?
ep->dentry.file.num_ext + 1 : type;
- es = kmalloc(struct_size(es, entries, num_entries), GFP_KERNEL);
- if (!es)
- goto release_bh;
-
es->num_entries = num_entries;
- es->sector = sec;
- es->offset = off;
- es->alloc_flag = p_dir->flags;
-
- pos = &es->entries[0];
-
- while (num_entries) {
- if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode))
- goto free_es;
-
- /* copy dentry */
- memcpy(pos, ep, sizeof(struct exfat_dentry));
-
- if (--num_entries == 0)
- break;
-
- if (((off + DENTRY_SIZE) & (sb->s_blocksize - 1)) <
- (off & (sb->s_blocksize - 1))) {
- /* get the next sector */
- if (exfat_is_last_sector_in_cluster(sbi, sec)) {
- if (es->alloc_flag == ALLOC_NO_FAT_CHAIN)
- clu++;
- else if (exfat_get_next_cluster(sb, &clu))
- goto free_es;
- sec = exfat_cluster_to_sector(sbi, clu);
- } else {
- sec++;
- }
- brelse(bh);
- bh = sb_bread(sb, sec);
- if (!bh)
+ num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb);
+ for (i = 1; i < num_bh; i++) {
+ /* get the next sector */
+ if (exfat_is_last_sector_in_cluster(sbi, sec)) {
+ if (p_dir->flags == ALLOC_NO_FAT_CHAIN)
+ clu++;
+ else if (exfat_get_next_cluster(sb, &clu))
goto free_es;
- off = 0;
- ep = (struct exfat_dentry *)bh->b_data;
+ sec = exfat_cluster_to_sector(sbi, clu);
} else {
- ep++;
- off += DENTRY_SIZE;
+ sec++;
}
- pos++;
+
+ bh = sb_bread(sb, sec);
+ if (!bh)
+ goto free_es;
+ es->bh[es->num_bh++] = bh;
}
- if (file_ep)
- *file_ep = &es->entries[0];
- brelse(bh);
+ /* validiate cached dentries */
+ for (i = 1; i < num_entries; i++) {
+ ep = exfat_get_dentry_cached(es, i);
+ if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode))
+ goto free_es;
+ }
return es;
free_es:
- kfree(es);
-release_bh:
- brelse(bh);
+ exfat_free_dentry_set(es, false);
return NULL;
}
@@ -1048,7 +1000,7 @@ rewind:
}
if (entry_type == TYPE_STREAM) {
- unsigned short name_hash;
+ u16 name_hash;
if (step != DIRENT_STEP_STRM) {
step = DIRENT_STEP_FILE;
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index d67fb8a6f770..595f3117f492 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -71,10 +71,8 @@ enum {
#define MAX_NAME_LENGTH 255 /* max len of file name excluding NULL */
#define MAX_VFSNAME_BUF_SIZE ((MAX_NAME_LENGTH + 1) * MAX_CHARSET_SIZE)
-#define FAT_CACHE_SIZE 128
-#define FAT_CACHE_HASH_SIZE 64
-#define BUF_CACHE_SIZE 256
-#define BUF_CACHE_HASH_SIZE 64
+/* Enough size to hold 256 dentry (even 512 Byte sector) */
+#define DIR_CACHE_SIZE (256*sizeof(struct exfat_dentry)/512+1)
#define EXFAT_HINT_NONE -1
#define EXFAT_MIN_SUBDIR 2
@@ -139,7 +137,7 @@ struct exfat_dentry_namebuf {
struct exfat_uni_name {
/* +3 for null and for converting */
unsigned short name[MAX_NAME_LENGTH + 3];
- unsigned short name_hash;
+ u16 name_hash;
unsigned char name_len;
};
@@ -170,14 +168,12 @@ struct exfat_hint {
};
struct exfat_entry_set_cache {
- /* sector number that contains file_entry */
- sector_t sector;
- /* byte offset in the sector */
- unsigned int offset;
- /* flag in stream entry. 01 for cluster chain, 03 for contig. */
- int alloc_flag;
+ struct super_block *sb;
+ bool modified;
+ unsigned int start_off;
+ int num_bh;
+ struct buffer_head *bh[DIR_CACHE_SIZE];
unsigned int num_entries;
- struct exfat_dentry entries[];
};
struct exfat_dir_entry {
@@ -231,7 +227,7 @@ struct exfat_sb_info {
unsigned int root_dir; /* root dir cluster */
unsigned int dentries_per_clu; /* num of dentries per cluster */
unsigned int vol_flag; /* volume dirty flag */
- struct buffer_head *pbr_bh; /* buffer_head of PBR sector */
+ struct buffer_head *boot_bh; /* buffer_head of BOOT sector */
unsigned int map_clu; /* allocation bitmap start cluster */
unsigned int map_sectors; /* num of allocation bitmap sectors */
@@ -451,8 +447,7 @@ int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir,
int entry, int order, int num_entries);
int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
int entry);
-int exfat_update_dir_chksum_with_entry_set(struct super_block *sb,
- struct exfat_entry_set_cache *es, int sync);
+void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es);
int exfat_calc_num_entries(struct exfat_uni_name *p_uniname);
int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname,
@@ -463,9 +458,11 @@ int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
struct exfat_chain *p_dir, int entry, struct buffer_head **bh,
sector_t *sector);
+struct exfat_dentry *exfat_get_dentry_cached(struct exfat_entry_set_cache *es,
+ int num);
struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
- struct exfat_chain *p_dir, int entry, unsigned int type,
- struct exfat_dentry **file_ep);
+ struct exfat_chain *p_dir, int entry, unsigned int type);
+void exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync);
int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir);
/* inode.c */
@@ -492,8 +489,6 @@ int exfat_nls_to_utf16(struct super_block *sb,
struct exfat_uni_name *uniname, int *p_lossy);
int exfat_create_upcase_table(struct super_block *sb);
void exfat_free_upcase_table(struct exfat_sb_info *sbi);
-unsigned short exfat_high_surrogate(unicode_t u);
-unsigned short exfat_low_surrogate(unicode_t u);
/* exfat/misc.c */
void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
@@ -505,13 +500,20 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
fmt, ## args)
void exfat_msg(struct super_block *sb, const char *lv, const char *fmt, ...)
__printf(3, 4) __cold;
+#define exfat_err(sb, fmt, ...) \
+ exfat_msg(sb, KERN_ERR, fmt, ##__VA_ARGS__)
+#define exfat_warn(sb, fmt, ...) \
+ exfat_msg(sb, KERN_WARNING, fmt, ##__VA_ARGS__)
+#define exfat_info(sb, fmt, ...) \
+ exfat_msg(sb, KERN_INFO, fmt, ##__VA_ARGS__)
+
void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
- u8 tz, __le16 time, __le16 date, u8 time_ms);
+ u8 tz, __le16 time, __le16 date, u8 time_cs);
void exfat_truncate_atime(struct timespec64 *ts);
void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
- u8 *tz, __le16 *time, __le16 *date, u8 *time_ms);
-unsigned short exfat_calc_chksum_2byte(void *data, int len,
- unsigned short chksum, int type);
+ u8 *tz, __le16 *time, __le16 *date, u8 *time_cs);
+u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type);
+u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type);
void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync);
void exfat_chain_set(struct exfat_chain *ec, unsigned int dir,
unsigned int size, unsigned char flags);
diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h
index 2a841010e649..350ce59cc324 100644
--- a/fs/exfat/exfat_raw.h
+++ b/fs/exfat/exfat_raw.h
@@ -8,12 +8,15 @@
#include <linux/types.h>
-#define PBR_SIGNATURE 0xAA55
+#define BOOT_SIGNATURE 0xAA55
+#define EXBOOT_SIGNATURE 0xAA550000
+#define STR_EXFAT "EXFAT " /* size should be 8 */
#define EXFAT_MAX_FILE_LEN 255
#define VOL_CLEAN 0x0000
#define VOL_DIRTY 0x0002
+#define ERR_MEDIUM 0x0004
#define EXFAT_EOF_CLUSTER 0xFFFFFFFFu
#define EXFAT_BAD_CLUSTER 0xFFFFFFF7u
@@ -55,7 +58,7 @@
/* checksum types */
#define CS_DIR_ENTRY 0
-#define CS_PBR_SECTOR 1
+#define CS_BOOT_SECTOR 1
#define CS_DEFAULT 2
/* file attributes */
@@ -69,57 +72,35 @@
#define ATTR_RWMASK (ATTR_HIDDEN | ATTR_SYSTEM | ATTR_VOLUME | \
ATTR_SUBDIR | ATTR_ARCHIVE)
-#define PBR64_JUMP_BOOT_LEN 3
-#define PBR64_OEM_NAME_LEN 8
-#define PBR64_RESERVED_LEN 53
+#define BOOTSEC_JUMP_BOOT_LEN 3
+#define BOOTSEC_FS_NAME_LEN 8
+#define BOOTSEC_OLDBPB_LEN 53
#define EXFAT_FILE_NAME_LEN 15
-/* EXFAT BIOS parameter block (64 bytes) */
-struct bpb64 {
- __u8 jmp_boot[PBR64_JUMP_BOOT_LEN];
- __u8 oem_name[PBR64_OEM_NAME_LEN];
- __u8 res_zero[PBR64_RESERVED_LEN];
-} __packed;
-
-/* EXFAT EXTEND BIOS parameter block (56 bytes) */
-struct bsx64 {
- __le64 vol_offset;
- __le64 vol_length;
- __le32 fat_offset;
- __le32 fat_length;
- __le32 clu_offset;
- __le32 clu_count;
- __le32 root_cluster;
- __le32 vol_serial;
- __u8 fs_version[2];
- __le16 vol_flags;
- __u8 sect_size_bits;
- __u8 sect_per_clus_bits;
- __u8 num_fats;
- __u8 phy_drv_no;
- __u8 perc_in_use;
- __u8 reserved2[7];
-} __packed;
-
-/* EXFAT PBR[BPB+BSX] (120 bytes) */
-struct pbr64 {
- struct bpb64 bpb;
- struct bsx64 bsx;
-} __packed;
-
-/* Common PBR[Partition Boot Record] (512 bytes) */
-struct pbr {
- union {
- __u8 raw[64];
- struct bpb64 f64;
- } bpb;
- union {
- __u8 raw[56];
- struct bsx64 f64;
- } bsx;
- __u8 boot_code[390];
- __le16 signature;
+/* EXFAT: Main and Backup Boot Sector (512 bytes) */
+struct boot_sector {
+ __u8 jmp_boot[BOOTSEC_JUMP_BOOT_LEN];
+ __u8 fs_name[BOOTSEC_FS_NAME_LEN];
+ __u8 must_be_zero[BOOTSEC_OLDBPB_LEN];
+ __le64 partition_offset;
+ __le64 vol_length;
+ __le32 fat_offset;
+ __le32 fat_length;
+ __le32 clu_offset;
+ __le32 clu_count;
+ __le32 root_cluster;
+ __le32 vol_serial;
+ __u8 fs_revision[2];
+ __le16 vol_flags;
+ __u8 sect_size_bits;
+ __u8 sect_per_clus_bits;
+ __u8 num_fats;
+ __u8 drv_sel;
+ __u8 percent_in_use;
+ __u8 reserved[7];
+ __u8 boot_code[390];
+ __le16 signature;
} __packed;
struct exfat_dentry {
@@ -136,8 +117,8 @@ struct exfat_dentry {
__le16 modify_date;
__le16 access_time;
__le16 access_date;
- __u8 create_time_ms;
- __u8 modify_time_ms;
+ __u8 create_time_cs;
+ __u8 modify_time_cs;
__u8 create_tz;
__u8 modify_tz;
__u8 access_tz;
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index a855b1769a96..4e5c5c9c0f2d 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -169,9 +169,8 @@ int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain)
return 0;
/* check cluster validation */
- if (p_chain->dir < 2 && p_chain->dir >= sbi->num_clusters) {
- exfat_msg(sb, KERN_ERR, "invalid start cluster (%u)",
- p_chain->dir);
+ if (!is_valid_cluster(sbi, p_chain->dir)) {
+ exfat_err(sb, "invalid start cluster (%u)", p_chain->dir);
return -EIO;
}
@@ -305,8 +304,7 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
return 0;
release_bhs:
- exfat_msg(sb, KERN_ERR, "failed zeroed sect %llu\n",
- (unsigned long long)blknr);
+ exfat_err(sb, "failed zeroed sect %llu\n", (unsigned long long)blknr);
for (i = 0; i < n; i++)
bforget(bhs[i]);
return err;
@@ -337,9 +335,8 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
/* find new cluster */
if (hint_clu == EXFAT_EOF_CLUSTER) {
if (sbi->clu_srch_ptr < EXFAT_FIRST_CLUSTER) {
- exfat_msg(sb, KERN_ERR,
- "sbi->clu_srch_ptr is invalid (%u)\n",
- sbi->clu_srch_ptr);
+ exfat_err(sb, "sbi->clu_srch_ptr is invalid (%u)\n",
+ sbi->clu_srch_ptr);
sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
}
@@ -349,8 +346,8 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
}
/* check cluster validation */
- if (hint_clu < EXFAT_FIRST_CLUSTER && hint_clu >= sbi->num_clusters) {
- exfat_msg(sb, KERN_ERR, "hint_cluster is invalid (%u)\n",
+ if (!is_valid_cluster(sbi, hint_clu)) {
+ exfat_err(sb, "hint_cluster is invalid (%u)",
hint_clu);
hint_clu = EXFAT_FIRST_CLUSTER;
if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index c9db8eb0cfc3..fce03f318787 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -96,11 +96,9 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
unsigned int num_clusters_new, num_clusters_phys;
unsigned int last_clu = EXFAT_FREE_CLUSTER;
struct exfat_chain clu;
- struct exfat_dentry *ep, *ep2;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- struct exfat_entry_set_cache *es = NULL;
int evict = (ei->dir.dir == DIR_DELETED) ? 1 : 0;
/* check if the given file ID is opened */
@@ -153,28 +151,31 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
/* update the directory entry */
if (!evict) {
struct timespec64 ts;
+ struct exfat_dentry *ep, *ep2;
+ struct exfat_entry_set_cache *es;
es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
- ES_ALL_ENTRIES, &ep);
+ ES_ALL_ENTRIES);
if (!es)
return -EIO;
- ep2 = ep + 1;
+ ep = exfat_get_dentry_cached(es, 0);
+ ep2 = exfat_get_dentry_cached(es, 1);
ts = current_time(inode);
exfat_set_entry_time(sbi, &ts,
&ep->dentry.file.modify_tz,
&ep->dentry.file.modify_time,
&ep->dentry.file.modify_date,
- &ep->dentry.file.modify_time_ms);
+ &ep->dentry.file.modify_time_cs);
ep->dentry.file.attr = cpu_to_le16(ei->attr);
/* File size should be zero if there is no cluster allocated */
if (ei->start_clu == EXFAT_EOF_CLUSTER) {
- ep->dentry.stream.valid_size = 0;
- ep->dentry.stream.size = 0;
+ ep2->dentry.stream.valid_size = 0;
+ ep2->dentry.stream.size = 0;
} else {
- ep->dentry.stream.valid_size = cpu_to_le64(new_size);
- ep->dentry.stream.size = ep->dentry.stream.valid_size;
+ ep2->dentry.stream.valid_size = cpu_to_le64(new_size);
+ ep2->dentry.stream.size = ep->dentry.stream.valid_size;
}
if (new_size == 0) {
@@ -185,10 +186,8 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
ep2->dentry.stream.start_clu = EXFAT_FREE_CLUSTER;
}
- if (exfat_update_dir_chksum_with_entry_set(sb, es,
- inode_needs_sync(inode)))
- return -EIO;
- kfree(es);
+ exfat_update_dir_chksum_with_entry_set(es);
+ exfat_free_dentry_set(es, inode_needs_sync(inode));
}
/* cut off from the FAT chain */
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 785ead346543..cf9ca6c4d046 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -19,7 +19,6 @@
static int __exfat_write_inode(struct inode *inode, int sync)
{
- int ret = -EIO;
unsigned long long on_disk_size;
struct exfat_dentry *ep, *ep2;
struct exfat_entry_set_cache *es = NULL;
@@ -43,11 +42,11 @@ static int __exfat_write_inode(struct inode *inode, int sync)
exfat_set_vol_flags(sb, VOL_DIRTY);
/* get the directory entry of given file or directory */
- es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES,
- &ep);
+ es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES);
if (!es)
return -EIO;
- ep2 = ep + 1;
+ ep = exfat_get_dentry_cached(es, 0);
+ ep2 = exfat_get_dentry_cached(es, 1);
ep->dentry.file.attr = cpu_to_le16(exfat_make_attr(inode));
@@ -56,12 +55,12 @@ static int __exfat_write_inode(struct inode *inode, int sync)
&ep->dentry.file.create_tz,
&ep->dentry.file.create_time,
&ep->dentry.file.create_date,
- &ep->dentry.file.create_time_ms);
+ &ep->dentry.file.create_time_cs);
exfat_set_entry_time(sbi, &inode->i_mtime,
&ep->dentry.file.modify_tz,
&ep->dentry.file.modify_time,
&ep->dentry.file.modify_date,
- &ep->dentry.file.modify_time_ms);
+ &ep->dentry.file.modify_time_cs);
exfat_set_entry_time(sbi, &inode->i_atime,
&ep->dentry.file.access_tz,
&ep->dentry.file.access_time,
@@ -77,9 +76,9 @@ static int __exfat_write_inode(struct inode *inode, int sync)
ep2->dentry.stream.valid_size = cpu_to_le64(on_disk_size);
ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
- ret = exfat_update_dir_chksum_with_entry_set(sb, es, sync);
- kfree(es);
- return ret;
+ exfat_update_dir_chksum_with_entry_set(es);
+ exfat_free_dentry_set(es, sync);
+ return 0;
}
int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -110,8 +109,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
int ret, modified = false;
unsigned int last_clu;
struct exfat_chain new_clu;
- struct exfat_dentry *ep;
- struct exfat_entry_set_cache *es = NULL;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
@@ -222,34 +219,28 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
num_clusters += num_to_be_allocated;
*clu = new_clu.dir;
- if (ei->dir.dir != DIR_DELETED) {
+ if (ei->dir.dir != DIR_DELETED && modified) {
+ struct exfat_dentry *ep;
+ struct exfat_entry_set_cache *es;
+
es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
- ES_ALL_ENTRIES, &ep);
+ ES_ALL_ENTRIES);
if (!es)
return -EIO;
/* get stream entry */
- ep++;
+ ep = exfat_get_dentry_cached(es, 1);
/* update directory entry */
- if (modified) {
- if (ep->dentry.stream.flags != ei->flags)
- ep->dentry.stream.flags = ei->flags;
-
- if (le32_to_cpu(ep->dentry.stream.start_clu) !=
- ei->start_clu)
- ep->dentry.stream.start_clu =
- cpu_to_le32(ei->start_clu);
-
- ep->dentry.stream.valid_size =
- cpu_to_le64(i_size_read(inode));
- ep->dentry.stream.size =
- ep->dentry.stream.valid_size;
- }
-
- if (exfat_update_dir_chksum_with_entry_set(sb, es,
- inode_needs_sync(inode)))
- return -EIO;
- kfree(es);
+ ep->dentry.stream.flags = ei->flags;
+ ep->dentry.stream.start_clu =
+ cpu_to_le32(ei->start_clu);
+ ep->dentry.stream.valid_size =
+ cpu_to_le64(i_size_read(inode));
+ ep->dentry.stream.size =
+ ep->dentry.stream.valid_size;
+
+ exfat_update_dir_chksum_with_entry_set(es);
+ exfat_free_dentry_set(es, inode_needs_sync(inode));
} /* end of if != DIR_DELETED */
diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c
index ebd2cbe3cbc1..17d41f3d3709 100644
--- a/fs/exfat/misc.c
+++ b/fs/exfat/misc.c
@@ -32,7 +32,7 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- exfat_msg(sb, KERN_ERR, "error, %pV\n", &vaf);
+ exfat_err(sb, "error, %pV", &vaf);
va_end(args);
}
@@ -41,7 +41,7 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
sb->s_id);
} else if (opts->errors == EXFAT_ERRORS_RO && !sb_rdonly(sb)) {
sb->s_flags |= SB_RDONLY;
- exfat_msg(sb, KERN_ERR, "Filesystem has been set read-only");
+ exfat_err(sb, "Filesystem has been set read-only");
}
}
@@ -75,7 +75,7 @@ static void exfat_adjust_tz(struct timespec64 *ts, u8 tz_off)
/* Convert a EXFAT time/date pair to a UNIX date (seconds since 1 1 70). */
void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
- u8 tz, __le16 time, __le16 date, u8 time_ms)
+ u8 tz, __le16 time, __le16 date, u8 time_cs)
{
u16 t = le16_to_cpu(time);
u16 d = le16_to_cpu(date);
@@ -84,10 +84,10 @@ void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
t >> 11, (t >> 5) & 0x003F, (t & 0x001F) << 1);
- /* time_ms field represent 0 ~ 199(1990 ms) */
- if (time_ms) {
- ts->tv_sec += time_ms / 100;
- ts->tv_nsec = (time_ms % 100) * 10 * NSEC_PER_MSEC;
+ /* time_cs field represent 0 ~ 199cs(1990 ms) */
+ if (time_cs) {
+ ts->tv_sec += time_cs / 100;
+ ts->tv_nsec = (time_cs % 100) * 10 * NSEC_PER_MSEC;
} else
ts->tv_nsec = 0;
@@ -101,7 +101,7 @@ void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
/* Convert linear UNIX date to a EXFAT time/date pair. */
void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
- u8 *tz, __le16 *time, __le16 *date, u8 *time_ms)
+ u8 *tz, __le16 *time, __le16 *date, u8 *time_cs)
{
struct tm tm;
u16 t, d;
@@ -113,9 +113,9 @@ void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
*time = cpu_to_le16(t);
*date = cpu_to_le16(d);
- /* time_ms field represent 0 ~ 199(1990 ms) */
- if (time_ms)
- *time_ms = (tm.tm_sec & 1) * 100 +
+ /* time_cs field represent 0 ~ 199cs(1990 ms) */
+ if (time_cs)
+ *time_cs = (tm.tm_sec & 1) * 100 +
ts->tv_nsec / (10 * NSEC_PER_MSEC);
/*
@@ -136,17 +136,29 @@ void exfat_truncate_atime(struct timespec64 *ts)
ts->tv_nsec = 0;
}
-unsigned short exfat_calc_chksum_2byte(void *data, int len,
- unsigned short chksum, int type)
+u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type)
{
int i;
- unsigned char *c = (unsigned char *)data;
+ u8 *c = (u8 *)data;
for (i = 0; i < len; i++, c++) {
- if (((i == 2) || (i == 3)) && (type == CS_DIR_ENTRY))
+ if (unlikely(type == CS_DIR_ENTRY && (i == 2 || i == 3)))
continue;
- chksum = (((chksum & 1) << 15) | ((chksum & 0xFFFE) >> 1)) +
- (unsigned short)*c;
+ chksum = ((chksum << 15) | (chksum >> 1)) + *c;
+ }
+ return chksum;
+}
+
+u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type)
+{
+ int i;
+ u8 *c = (u8 *)data;
+
+ for (i = 0; i < len; i++, c++) {
+ if (unlikely(type == CS_BOOT_SECTOR &&
+ (i == 106 || i == 107 || i == 112)))
+ continue;
+ chksum = ((chksum << 31) | (chksum >> 1)) + *c;
}
return chksum;
}
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index a2659a8a68a1..5b0f35329d63 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -147,16 +147,10 @@ static int exfat_utf8_d_hash(const struct dentry *dentry, struct qstr *qstr)
return charlen;
/*
- * Convert to UTF-16: code points above U+FFFF are encoded as
- * surrogate pairs.
* exfat_toupper() works only for code points up to the U+FFFF.
*/
- if (u > 0xFFFF) {
- hash = partial_name_hash(exfat_high_surrogate(u), hash);
- hash = partial_name_hash(exfat_low_surrogate(u), hash);
- } else {
- hash = partial_name_hash(exfat_toupper(sb, u), hash);
- }
+ hash = partial_name_hash(u <= 0xFFFF ? exfat_toupper(sb, u) : u,
+ hash);
}
qstr->hash = end_name_hash(hash);
@@ -185,14 +179,9 @@ static int exfat_utf8_d_cmp(const struct dentry *dentry, unsigned int len,
if (u_a <= 0xFFFF && u_b <= 0xFFFF) {
if (exfat_toupper(sb, u_a) != exfat_toupper(sb, u_b))
return 1;
- } else if (u_a > 0xFFFF && u_b > 0xFFFF) {
- if (exfat_low_surrogate(u_a) !=
- exfat_low_surrogate(u_b) ||
- exfat_high_surrogate(u_a) !=
- exfat_high_surrogate(u_b))
- return 1;
} else {
- return 1;
+ if (u_a != u_b)
+ return 1;
}
}
@@ -611,8 +600,6 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
int ret, dentry, num_entries, count;
struct exfat_chain cdir;
struct exfat_uni_name uni_name;
- struct exfat_dentry *ep, *ep2;
- struct exfat_entry_set_cache *es = NULL;
struct super_block *sb = dir->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(dir);
@@ -671,10 +658,14 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
info->num_subdirs = count;
} else {
- es = exfat_get_dentry_set(sb, &cdir, dentry, ES_2_ENTRIES, &ep);
+ struct exfat_dentry *ep, *ep2;
+ struct exfat_entry_set_cache *es;
+
+ es = exfat_get_dentry_set(sb, &cdir, dentry, ES_2_ENTRIES);
if (!es)
return -EIO;
- ep2 = ep + 1;
+ ep = exfat_get_dentry_cached(es, 0);
+ ep2 = exfat_get_dentry_cached(es, 1);
info->type = exfat_get_entry_type(ep);
info->attr = le16_to_cpu(ep->dentry.file.attr);
@@ -692,7 +683,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
exfat_fs_error(sb,
"non-zero size file starts with zero cluster (size : %llu, p_dir : %u, entry : 0x%08x)",
i_size_read(dir), ei->dir.dir, ei->entry);
- kfree(es);
+ exfat_free_dentry_set(es, false);
return -EIO;
}
@@ -700,18 +691,18 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
ep->dentry.file.create_tz,
ep->dentry.file.create_time,
ep->dentry.file.create_date,
- ep->dentry.file.create_time_ms);
+ ep->dentry.file.create_time_cs);
exfat_get_entry_time(sbi, &info->mtime,
ep->dentry.file.modify_tz,
ep->dentry.file.modify_time,
ep->dentry.file.modify_date,
- ep->dentry.file.modify_time_ms);
+ ep->dentry.file.modify_time_cs);
exfat_get_entry_time(sbi, &info->atime,
ep->dentry.file.access_tz,
ep->dentry.file.access_time,
ep->dentry.file.access_date,
0);
- kfree(es);
+ exfat_free_dentry_set(es, false);
if (info->type == TYPE_DIR) {
exfat_chain_set(&cdir, info->start_clu,
@@ -778,8 +769,8 @@ static struct dentry *exfat_lookup(struct inode *dir, struct dentry *dentry,
if (d_unhashed(alias)) {
WARN_ON(alias->d_name.hash_len !=
dentry->d_name.hash_len);
- exfat_msg(sb, KERN_INFO,
- "rehashed a dentry(%p) in read lookup", alias);
+ exfat_info(sb, "rehashed a dentry(%p) in read lookup",
+ alias);
d_drop(dentry);
d_rehash(alias);
} else if (!S_ISDIR(i_mode)) {
@@ -824,7 +815,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
exfat_chain_dup(&cdir, &ei->dir);
entry = ei->entry;
if (ei->dir.dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry");
+ exfat_err(sb, "abnormal access to deleted dentry");
err = -ENOENT;
goto unlock;
}
@@ -979,7 +970,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
entry = ei->entry;
if (ei->dir.dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry");
+ exfat_err(sb, "abnormal access to deleted dentry");
err = -ENOENT;
goto unlock;
}
@@ -991,9 +982,8 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
err = exfat_check_dir_empty(sb, &clu_to_free);
if (err) {
if (err == -EIO)
- exfat_msg(sb, KERN_ERR,
- "failed to exfat_check_dir_empty : err(%d)",
- err);
+ exfat_err(sb, "failed to exfat_check_dir_empty : err(%d)",
+ err);
goto unlock;
}
@@ -1014,9 +1004,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries);
if (err) {
- exfat_msg(sb, KERN_ERR,
- "failed to exfat_remove_entries : err(%d)",
- err);
+ exfat_err(sb, "failed to exfat_remove_entries : err(%d)", err);
goto unlock;
}
ei->dir.dir = DIR_DELETED;
@@ -1245,8 +1233,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
return -EINVAL;
if (ei->dir.dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR,
- "abnormal access to deleted source dentry");
+ exfat_err(sb, "abnormal access to deleted source dentry");
return -ENOENT;
}
@@ -1268,8 +1255,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
new_ei = EXFAT_I(new_inode);
if (new_ei->dir.dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR,
- "abnormal access to deleted target dentry");
+ exfat_err(sb, "abnormal access to deleted target dentry");
goto out;
}
@@ -1431,8 +1417,7 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
if (S_ISDIR(new_inode->i_mode))
drop_nlink(new_inode);
} else {
- exfat_msg(sb, KERN_WARNING,
- "abnormal access to an inode dropped");
+ exfat_warn(sb, "abnormal access to an inode dropped");
WARN_ON(new_inode->i_nlink == 0);
}
new_inode->i_ctime = EXFAT_I(new_inode)->i_crtime =
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
index 6d1c3ae130ff..57b5a7a4d1f7 100644
--- a/fs/exfat/nls.c
+++ b/fs/exfat/nls.c
@@ -503,21 +503,17 @@ static int exfat_utf8_to_utf16(struct super_block *sb,
unilen = utf8s_to_utf16s(p_cstring, len, UTF16_HOST_ENDIAN,
(wchar_t *)uniname, MAX_NAME_LENGTH + 2);
if (unilen < 0) {
- exfat_msg(sb, KERN_ERR,
- "failed to %s (err : %d) nls len : %d",
- __func__, unilen, len);
+ exfat_err(sb, "failed to %s (err : %d) nls len : %d",
+ __func__, unilen, len);
return unilen;
}
if (unilen > MAX_NAME_LENGTH) {
- exfat_msg(sb, KERN_ERR,
- "failed to %s (estr:ENAMETOOLONG) nls len : %d, unilen : %d > %d",
- __func__, len, unilen, MAX_NAME_LENGTH);
+ exfat_err(sb, "failed to %s (estr:ENAMETOOLONG) nls len : %d, unilen : %d > %d",
+ __func__, len, unilen, MAX_NAME_LENGTH);
return -ENAMETOOLONG;
}
- p_uniname->name_len = unilen & 0xFF;
-
for (i = 0; i < unilen; i++) {
if (*uniname < 0x0020 ||
exfat_wstrchr(bad_uni_chars, *uniname))
@@ -529,7 +525,7 @@ static int exfat_utf8_to_utf16(struct super_block *sb,
*uniname = '\0';
p_uniname->name_len = unilen;
- p_uniname->name_hash = exfat_calc_chksum_2byte(upname, unilen << 1, 0,
+ p_uniname->name_hash = exfat_calc_chksum16(upname, unilen << 1, 0,
CS_DEFAULT);
if (p_lossy)
@@ -537,22 +533,9 @@ static int exfat_utf8_to_utf16(struct super_block *sb,
return unilen;
}
-#define PLANE_SIZE 0x00010000
#define SURROGATE_MASK 0xfffff800
#define SURROGATE_PAIR 0x0000d800
#define SURROGATE_LOW 0x00000400
-#define SURROGATE_BITS 0x000003ff
-
-unsigned short exfat_high_surrogate(unicode_t u)
-{
- return ((u - PLANE_SIZE) >> 10) + SURROGATE_PAIR;
-}
-
-unsigned short exfat_low_surrogate(unicode_t u)
-{
- return ((u - PLANE_SIZE) & SURROGATE_BITS) | SURROGATE_PAIR |
- SURROGATE_LOW;
-}
static int __exfat_utf16_to_nls(struct super_block *sb,
struct exfat_uni_name *p_uniname, unsigned char *p_cstring,
@@ -638,7 +621,7 @@ static int exfat_nls_to_ucs2(struct super_block *sb,
*uniname = '\0';
p_uniname->name_len = unilen;
- p_uniname->name_hash = exfat_calc_chksum_2byte(upname, unilen << 1, 0,
+ p_uniname->name_hash = exfat_calc_chksum16(upname, unilen << 1, 0,
CS_DEFAULT);
if (p_lossy)
@@ -670,7 +653,8 @@ static int exfat_load_upcase_table(struct super_block *sb,
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
unsigned int sect_size = sb->s_blocksize;
- unsigned int i, index = 0, checksum = 0;
+ unsigned int i, index = 0;
+ u32 chksum = 0;
int ret;
unsigned char skip = false;
unsigned short *upcase_table;
@@ -687,9 +671,8 @@ static int exfat_load_upcase_table(struct super_block *sb,
bh = sb_bread(sb, sector);
if (!bh) {
- exfat_msg(sb, KERN_ERR,
- "failed to read sector(0x%llx)\n",
- (unsigned long long)sector);
+ exfat_err(sb, "failed to read sector(0x%llx)\n",
+ (unsigned long long)sector);
ret = -EIO;
goto free_table;
}
@@ -697,13 +680,6 @@ static int exfat_load_upcase_table(struct super_block *sb,
for (i = 0; i < sect_size && index <= 0xFFFF; i += 2) {
unsigned short uni = get_unaligned_le16(bh->b_data + i);
- checksum = ((checksum & 1) ? 0x80000000 : 0) +
- (checksum >> 1) +
- *(((unsigned char *)bh->b_data) + i);
- checksum = ((checksum & 1) ? 0x80000000 : 0) +
- (checksum >> 1) +
- *(((unsigned char *)bh->b_data) + (i + 1));
-
if (skip) {
index += uni;
skip = false;
@@ -716,15 +692,15 @@ static int exfat_load_upcase_table(struct super_block *sb,
index++;
}
}
+ chksum = exfat_calc_chksum32(bh->b_data, i, chksum, CS_DEFAULT);
brelse(bh);
}
- if (index >= 0xFFFF && utbl_checksum == checksum)
+ if (index >= 0xFFFF && utbl_checksum == chksum)
return 0;
- exfat_msg(sb, KERN_ERR,
- "failed to load upcase table (idx : 0x%08x, chksum : 0x%08x, utbl_chksum : 0x%08x)\n",
- index, checksum, utbl_checksum);
+ exfat_err(sb, "failed to load upcase table (idx : 0x%08x, chksum : 0x%08x, utbl_chksum : 0x%08x)",
+ index, chksum, utbl_checksum);
ret = -EINVAL;
free_table:
exfat_free_upcase_table(sbi);
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index a846ff555656..e650e65536f8 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -49,7 +49,7 @@ static void exfat_put_super(struct super_block *sb)
sync_blockdev(sb->s_bdev);
exfat_set_vol_flags(sb, VOL_CLEAN);
exfat_free_bitmap(sbi);
- brelse(sbi->pbr_bh);
+ brelse(sbi->boot_bh);
mutex_unlock(&sbi->s_lock);
call_rcu(&sbi->rcu, exfat_delayed_free);
@@ -101,8 +101,8 @@ static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct pbr64 *bpb = (struct pbr64 *)sbi->pbr_bh->b_data;
- bool sync = 0;
+ struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data;
+ bool sync;
/* flags are not changed */
if (sbi->vol_flag == new_flag)
@@ -116,18 +116,18 @@ int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag)
if (sb_rdonly(sb))
return 0;
- bpb->bsx.vol_flags = cpu_to_le16(new_flag);
+ p_boot->vol_flags = cpu_to_le16(new_flag);
- if (new_flag == VOL_DIRTY && !buffer_dirty(sbi->pbr_bh))
+ if (new_flag == VOL_DIRTY && !buffer_dirty(sbi->boot_bh))
sync = true;
else
sync = false;
- set_buffer_uptodate(sbi->pbr_bh);
- mark_buffer_dirty(sbi->pbr_bh);
+ set_buffer_uptodate(sbi->boot_bh);
+ mark_buffer_dirty(sbi->boot_bh);
if (sync)
- sync_dirty_buffer(sbi->pbr_bh);
+ sync_dirty_buffer(sbi->boot_bh);
return 0;
}
@@ -273,9 +273,8 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_charset:
exfat_free_iocharset(sbi);
- opts->iocharset = kstrdup(param->string, GFP_KERNEL);
- if (!opts->iocharset)
- return -ENOMEM;
+ opts->iocharset = param->string;
+ param->string = NULL;
break;
case Opt_errors:
opts->errors = result.uint_32;
@@ -366,151 +365,208 @@ static int exfat_read_root(struct inode *inode)
return 0;
}
-static struct pbr *exfat_read_pbr_with_logical_sector(struct super_block *sb)
+static int exfat_calibrate_blocksize(struct super_block *sb, int logical_sect)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct pbr *p_pbr = (struct pbr *) (sbi->pbr_bh)->b_data;
- unsigned short logical_sect = 0;
-
- logical_sect = 1 << p_pbr->bsx.f64.sect_size_bits;
if (!is_power_of_2(logical_sect) ||
logical_sect < 512 || logical_sect > 4096) {
- exfat_msg(sb, KERN_ERR, "bogus logical sector size %u",
- logical_sect);
- return NULL;
+ exfat_err(sb, "bogus logical sector size %u", logical_sect);
+ return -EIO;
}
if (logical_sect < sb->s_blocksize) {
- exfat_msg(sb, KERN_ERR,
- "logical sector size too small for device (logical sector size = %u)",
- logical_sect);
- return NULL;
+ exfat_err(sb, "logical sector size too small for device (logical sector size = %u)",
+ logical_sect);
+ return -EIO;
}
if (logical_sect > sb->s_blocksize) {
- brelse(sbi->pbr_bh);
- sbi->pbr_bh = NULL;
+ brelse(sbi->boot_bh);
+ sbi->boot_bh = NULL;
if (!sb_set_blocksize(sb, logical_sect)) {
- exfat_msg(sb, KERN_ERR,
- "unable to set blocksize %u", logical_sect);
- return NULL;
+ exfat_err(sb, "unable to set blocksize %u",
+ logical_sect);
+ return -EIO;
}
- sbi->pbr_bh = sb_bread(sb, 0);
- if (!sbi->pbr_bh) {
- exfat_msg(sb, KERN_ERR,
- "unable to read boot sector (logical sector size = %lu)",
- sb->s_blocksize);
- return NULL;
+ sbi->boot_bh = sb_bread(sb, 0);
+ if (!sbi->boot_bh) {
+ exfat_err(sb, "unable to read boot sector (logical sector size = %lu)",
+ sb->s_blocksize);
+ return -EIO;
}
-
- p_pbr = (struct pbr *)sbi->pbr_bh->b_data;
}
- return p_pbr;
+ return 0;
}
-/* mount the file system volume */
-static int __exfat_fill_super(struct super_block *sb)
+static int exfat_read_boot_sector(struct super_block *sb)
{
- int ret;
- struct pbr *p_pbr;
- struct pbr64 *p_bpb;
+ struct boot_sector *p_boot;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
/* set block size to read super block */
sb_min_blocksize(sb, 512);
/* read boot sector */
- sbi->pbr_bh = sb_bread(sb, 0);
- if (!sbi->pbr_bh) {
- exfat_msg(sb, KERN_ERR, "unable to read boot sector");
+ sbi->boot_bh = sb_bread(sb, 0);
+ if (!sbi->boot_bh) {
+ exfat_err(sb, "unable to read boot sector");
return -EIO;
}
+ p_boot = (struct boot_sector *)sbi->boot_bh->b_data;
- /* PRB is read */
- p_pbr = (struct pbr *)sbi->pbr_bh->b_data;
-
- /* check the validity of PBR */
- if (le16_to_cpu((p_pbr->signature)) != PBR_SIGNATURE) {
- exfat_msg(sb, KERN_ERR, "invalid boot record signature");
- ret = -EINVAL;
- goto free_bh;
+ /* check the validity of BOOT */
+ if (le16_to_cpu((p_boot->signature)) != BOOT_SIGNATURE) {
+ exfat_err(sb, "invalid boot record signature");
+ return -EINVAL;
}
-
- /* check logical sector size */
- p_pbr = exfat_read_pbr_with_logical_sector(sb);
- if (!p_pbr) {
- ret = -EIO;
- goto free_bh;
+ if (memcmp(p_boot->fs_name, STR_EXFAT, BOOTSEC_FS_NAME_LEN)) {
+ exfat_err(sb, "invalid fs_name"); /* fs_name may unprintable */
+ return -EINVAL;
}
/*
- * res_zero field must be filled with zero to prevent mounting
+ * must_be_zero field must be filled with zero to prevent mounting
* from FAT volume.
*/
- if (memchr_inv(p_pbr->bpb.f64.res_zero, 0,
- sizeof(p_pbr->bpb.f64.res_zero))) {
- ret = -EINVAL;
- goto free_bh;
- }
+ if (memchr_inv(p_boot->must_be_zero, 0, sizeof(p_boot->must_be_zero)))
+ return -EINVAL;
- p_bpb = (struct pbr64 *)p_pbr;
- if (!p_bpb->bsx.num_fats) {
- exfat_msg(sb, KERN_ERR, "bogus number of FAT structure");
- ret = -EINVAL;
- goto free_bh;
+ if (p_boot->num_fats != 1 && p_boot->num_fats != 2) {
+ exfat_err(sb, "bogus number of FAT structure");
+ return -EINVAL;
}
- sbi->sect_per_clus = 1 << p_bpb->bsx.sect_per_clus_bits;
- sbi->sect_per_clus_bits = p_bpb->bsx.sect_per_clus_bits;
- sbi->cluster_size_bits = sbi->sect_per_clus_bits + sb->s_blocksize_bits;
+ sbi->sect_per_clus = 1 << p_boot->sect_per_clus_bits;
+ sbi->sect_per_clus_bits = p_boot->sect_per_clus_bits;
+ sbi->cluster_size_bits = p_boot->sect_per_clus_bits +
+ p_boot->sect_size_bits;
sbi->cluster_size = 1 << sbi->cluster_size_bits;
- sbi->num_FAT_sectors = le32_to_cpu(p_bpb->bsx.fat_length);
- sbi->FAT1_start_sector = le32_to_cpu(p_bpb->bsx.fat_offset);
- sbi->FAT2_start_sector = p_bpb->bsx.num_fats == 1 ?
- sbi->FAT1_start_sector :
- sbi->FAT1_start_sector + sbi->num_FAT_sectors;
- sbi->data_start_sector = le32_to_cpu(p_bpb->bsx.clu_offset);
- sbi->num_sectors = le64_to_cpu(p_bpb->bsx.vol_length);
+ sbi->num_FAT_sectors = le32_to_cpu(p_boot->fat_length);
+ sbi->FAT1_start_sector = le32_to_cpu(p_boot->fat_offset);
+ sbi->FAT2_start_sector = le32_to_cpu(p_boot->fat_offset);
+ if (p_boot->num_fats == 2)
+ sbi->FAT2_start_sector += sbi->num_FAT_sectors;
+ sbi->data_start_sector = le32_to_cpu(p_boot->clu_offset);
+ sbi->num_sectors = le64_to_cpu(p_boot->vol_length);
/* because the cluster index starts with 2 */
- sbi->num_clusters = le32_to_cpu(p_bpb->bsx.clu_count) +
+ sbi->num_clusters = le32_to_cpu(p_boot->clu_count) +
EXFAT_RESERVED_CLUSTERS;
- sbi->root_dir = le32_to_cpu(p_bpb->bsx.root_cluster);
+ sbi->root_dir = le32_to_cpu(p_boot->root_cluster);
sbi->dentries_per_clu = 1 <<
(sbi->cluster_size_bits - DENTRY_SIZE_BITS);
- sbi->vol_flag = le16_to_cpu(p_bpb->bsx.vol_flags);
+ sbi->vol_flag = le16_to_cpu(p_boot->vol_flags);
sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED;
- if (le16_to_cpu(p_bpb->bsx.vol_flags) & VOL_DIRTY) {
- sbi->vol_flag |= VOL_DIRTY;
- exfat_msg(sb, KERN_WARNING,
- "Volume was not properly unmounted. Some data may be corrupt. Please run fsck.");
+ /* check consistencies */
+ if (sbi->num_FAT_sectors << p_boot->sect_size_bits <
+ sbi->num_clusters * 4) {
+ exfat_err(sb, "bogus fat length");
+ return -EINVAL;
}
+ if (sbi->data_start_sector <
+ sbi->FAT1_start_sector + sbi->num_FAT_sectors * p_boot->num_fats) {
+ exfat_err(sb, "bogus data start sector");
+ return -EINVAL;
+ }
+ if (sbi->vol_flag & VOL_DIRTY)
+ exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck.");
+ if (sbi->vol_flag & ERR_MEDIUM)
+ exfat_warn(sb, "Medium has reported failures. Some data may be lost.");
/* exFAT file size is limited by a disk volume size */
sb->s_maxbytes = (u64)(sbi->num_clusters - EXFAT_RESERVED_CLUSTERS) <<
sbi->cluster_size_bits;
+ /* check logical sector size */
+ if (exfat_calibrate_blocksize(sb, 1 << p_boot->sect_size_bits))
+ return -EIO;
+
+ return 0;
+}
+
+static int exfat_verify_boot_region(struct super_block *sb)
+{
+ struct buffer_head *bh = NULL;
+ u32 chksum = 0;
+ __le32 *p_sig, *p_chksum;
+ int sn, i;
+
+ /* read boot sector sub-regions */
+ for (sn = 0; sn < 11; sn++) {
+ bh = sb_bread(sb, sn);
+ if (!bh)
+ return -EIO;
+
+ if (sn != 0 && sn <= 8) {
+ /* extended boot sector sub-regions */
+ p_sig = (__le32 *)&bh->b_data[sb->s_blocksize - 4];
+ if (le32_to_cpu(*p_sig) != EXBOOT_SIGNATURE)
+ exfat_warn(sb, "Invalid exboot-signature(sector = %d): 0x%08x",
+ sn, le32_to_cpu(*p_sig));
+ }
+
+ chksum = exfat_calc_chksum32(bh->b_data, sb->s_blocksize,
+ chksum, sn ? CS_DEFAULT : CS_BOOT_SECTOR);
+ brelse(bh);
+ }
+
+ /* boot checksum sub-regions */
+ bh = sb_bread(sb, sn);
+ if (!bh)
+ return -EIO;
+
+ for (i = 0; i < sb->s_blocksize; i += sizeof(u32)) {
+ p_chksum = (__le32 *)&bh->b_data[i];
+ if (le32_to_cpu(*p_chksum) != chksum) {
+ exfat_err(sb, "Invalid boot checksum (boot checksum : 0x%08x, checksum : 0x%08x)",
+ le32_to_cpu(*p_chksum), chksum);
+ brelse(bh);
+ return -EINVAL;
+ }
+ }
+ brelse(bh);
+ return 0;
+}
+
+/* mount the file system volume */
+static int __exfat_fill_super(struct super_block *sb)
+{
+ int ret;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ ret = exfat_read_boot_sector(sb);
+ if (ret) {
+ exfat_err(sb, "failed to read boot sector");
+ goto free_bh;
+ }
+
+ ret = exfat_verify_boot_region(sb);
+ if (ret) {
+ exfat_err(sb, "invalid boot region");
+ goto free_bh;
+ }
+
ret = exfat_create_upcase_table(sb);
if (ret) {
- exfat_msg(sb, KERN_ERR, "failed to load upcase table");
+ exfat_err(sb, "failed to load upcase table");
goto free_bh;
}
ret = exfat_load_bitmap(sb);
if (ret) {
- exfat_msg(sb, KERN_ERR, "failed to load alloc-bitmap");
+ exfat_err(sb, "failed to load alloc-bitmap");
goto free_upcase_table;
}
ret = exfat_count_used_clusters(sb, &sbi->used_clusters);
if (ret) {
- exfat_msg(sb, KERN_ERR, "failed to scan clusters");
+ exfat_err(sb, "failed to scan clusters");
goto free_alloc_bitmap;
}
@@ -521,7 +577,7 @@ free_alloc_bitmap:
free_upcase_table:
exfat_free_upcase_table(sbi);
free_bh:
- brelse(sbi->pbr_bh);
+ brelse(sbi->boot_bh);
return ret;
}
@@ -539,8 +595,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
struct request_queue *q = bdev_get_queue(sb->s_bdev);
if (!blk_queue_discard(q)) {
- exfat_msg(sb, KERN_WARNING,
- "mounting with \"discard\" option, but the device does not support discard");
+ exfat_warn(sb, "mounting with \"discard\" option, but the device does not support discard");
opts->discard = 0;
}
}
@@ -555,7 +610,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
err = __exfat_fill_super(sb);
if (err) {
- exfat_msg(sb, KERN_ERR, "failed to recognize exfat type");
+ exfat_err(sb, "failed to recognize exfat type");
goto check_nls_io;
}
@@ -567,8 +622,8 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
else {
sbi->nls_io = load_nls(sbi->options.iocharset);
if (!sbi->nls_io) {
- exfat_msg(sb, KERN_ERR, "IO charset %s not found",
- sbi->options.iocharset);
+ exfat_err(sb, "IO charset %s not found",
+ sbi->options.iocharset);
err = -EINVAL;
goto free_table;
}
@@ -581,7 +636,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
root_inode = new_inode(sb);
if (!root_inode) {
- exfat_msg(sb, KERN_ERR, "failed to allocate root inode.");
+ exfat_err(sb, "failed to allocate root inode");
err = -ENOMEM;
goto free_table;
}
@@ -590,7 +645,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
inode_set_iversion(root_inode, 1);
err = exfat_read_root(root_inode);
if (err) {
- exfat_msg(sb, KERN_ERR, "failed to initialize root inode.");
+ exfat_err(sb, "failed to initialize root inode");
goto put_inode;
}
@@ -599,7 +654,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_root = d_make_root(root_inode);
if (!sb->s_root) {
- exfat_msg(sb, KERN_ERR, "failed to get the root dentry");
+ exfat_err(sb, "failed to get the root dentry");
err = -ENOMEM;
goto put_inode;
}
@@ -613,7 +668,7 @@ put_inode:
free_table:
exfat_free_upcase_table(sbi);
exfat_free_bitmap(sbi);
- brelse(sbi->pbr_bh);
+ brelse(sbi->boot_bh);
check_nls_io:
unload_nls(sbi->nls_io);
@@ -630,7 +685,12 @@ static int exfat_get_tree(struct fs_context *fc)
static void exfat_free(struct fs_context *fc)
{
- kfree(fc->s_fs_info);
+ struct exfat_sb_info *sbi = fc->s_fs_info;
+
+ if (sbi) {
+ exfat_free_iocharset(sbi);
+ kfree(sbi);
+ }
}
static const struct fs_context_operations exfat_context_ops = {
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 39c4772e96c9..60378ddf1424 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -79,7 +79,7 @@ out_unlock:
/*
* The lock ordering for ext2 DAX fault paths is:
*
- * mmap_sem (MM)
+ * mmap_lock (MM)
* sb_start_pagefault (vfs, freeze)
* ext2_inode_info->dax_sem
* address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX)
@@ -196,9 +196,7 @@ const struct file_operations ext2_file_operations = {
};
const struct inode_operations ext2_file_inode_operations = {
-#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
-#endif
.getattr = ext2_getattr,
.setattr = ext2_setattr,
.get_acl = ext2_get_acl,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 2875c0a705b5..c8b371c82b4f 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -36,6 +36,7 @@
#include <linux/iomap.h>
#include <linux/namei.h>
#include <linux/uio.h>
+#include <linux/fiemap.h>
#include "ext2.h"
#include "acl.h"
#include "xattr.h"
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index ccfbbf59e2fc..ba3e3e075891 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -136,9 +136,7 @@ static int ext2_mknod (struct inode * dir, struct dentry *dentry, umode_t mode,
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
-#ifdef CONFIG_EXT2_FS_XATTR
inode->i_op = &ext2_special_inode_operations;
-#endif
mark_inode_dirty(inode);
err = ext2_add_nondir(dentry, inode);
}
@@ -413,9 +411,7 @@ const struct inode_operations ext2_dir_inode_operations = {
.rmdir = ext2_rmdir,
.mknod = ext2_mknod,
.rename = ext2_rename,
-#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
-#endif
.getattr = ext2_getattr,
.setattr = ext2_setattr,
.get_acl = ext2_get_acl,
@@ -424,9 +420,7 @@ const struct inode_operations ext2_dir_inode_operations = {
};
const struct inode_operations ext2_special_inode_operations = {
-#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
-#endif
.getattr = ext2_getattr,
.setattr = ext2_setattr,
.get_acl = ext2_get_acl,
diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c
index 00cdb8679486..948d3a441403 100644
--- a/fs/ext2/symlink.c
+++ b/fs/ext2/symlink.c
@@ -25,16 +25,12 @@ const struct inode_operations ext2_symlink_inode_operations = {
.get_link = page_get_link,
.getattr = ext2_getattr,
.setattr = ext2_setattr,
-#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
-#endif
};
const struct inode_operations ext2_fast_symlink_inode_operations = {
.get_link = simple_get_link,
.getattr = ext2_getattr,
.setattr = ext2_setattr,
-#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
-#endif
};
diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h
index 16272e6ddcf4..7925f596e8e2 100644
--- a/fs/ext2/xattr.h
+++ b/fs/ext2/xattr.h
@@ -100,6 +100,7 @@ static inline void ext2_xattr_destroy_cache(struct mb_cache *cache)
}
#define ext2_xattr_handlers NULL
+#define ext2_listxattr NULL
# endif /* CONFIG_EXT2_FS_XATTR */
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 2a592e38cdfe..1afa5a4bcb5f 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -99,13 +99,13 @@ config EXT4_DEBUG
Enables run-time debugging support for the ext4 filesystem.
If you select Y here, then you will be able to turn on debugging
- with a command such as:
- echo 1 > /sys/module/ext4/parameters/mballoc_debug
+ using dynamic debug control for mb_debug() / ext_debug() msgs.
config EXT4_KUNIT_TESTS
- tristate "KUnit tests for ext4"
+ tristate "KUnit tests for ext4" if !KUNIT_ALL_TESTS
select EXT4_FS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the ext4 KUnit tests.
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 8c7bbf3e566d..76f634d185f1 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -215,9 +215,8 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
value, size, xattr_flags);
kfree(value);
- if (!error) {
+ if (!error)
set_cached_acl(inode, type, acl);
- }
return error;
}
@@ -256,7 +255,7 @@ retry:
if (!error && update_mode) {
inode->i_mode = mode;
inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ error = ext4_mark_inode_dirty(handle, inode);
}
out_stop:
ext4_journal_stop(handle);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index a32e5f7b5385..1ba46d87cdf1 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -903,10 +903,11 @@ ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
return bg_start;
if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
- colour = (current->pid % 16) *
+ colour = (task_pid_nr(current) % 16) *
(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
else
- colour = (current->pid % 16) * ((last_block - bg_start) / 16);
+ colour = (task_pid_nr(current) % 16) *
+ ((last_block - bg_start) / 16);
return bg_start + colour;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 15b062efcff1..b08841f70b69 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -36,6 +36,7 @@
#include <crypto/hash.h>
#include <linux/falloc.h>
#include <linux/percpu-rwsem.h>
+#include <linux/fiemap.h>
#ifdef __KERNEL__
#include <linux/compat.h>
#endif
@@ -80,14 +81,22 @@
#define ext4_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
+ /*
+ * Turn on EXT_DEBUG to enable ext4_ext_show_path/leaf/move in extents.c
+ */
+#define EXT_DEBUG__
+
/*
- * Turn on EXT_DEBUG to get lots of info about extents operations.
+ * Dynamic printk for controlled extents debugging.
*/
-#define EXT_DEBUG__
-#ifdef EXT_DEBUG
-#define ext_debug(fmt, ...) printk(fmt, ##__VA_ARGS__)
+#ifdef CONFIG_EXT4_DEBUG
+#define ext_debug(ino, fmt, ...) \
+ pr_debug("[%s/%d] EXT4-fs (%s): ino %lu: (%s, %d): %s:" fmt, \
+ current->comm, task_pid_nr(current), \
+ ino->i_sb->s_id, ino->i_ino, __FILE__, __LINE__, \
+ __func__, ##__VA_ARGS__)
#else
-#define ext_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
+#define ext_debug(ino, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
/* data type for block offset of block group */
@@ -142,6 +151,8 @@ enum SHIFT_DIRECTION {
#define EXT4_MB_USE_ROOT_BLOCKS 0x1000
/* Use blocks from reserved pool */
#define EXT4_MB_USE_RESERVED 0x2000
+/* Do strict check for free blocks while retrying block allocation */
+#define EXT4_MB_STRICT_CHECK 0x4000
struct ext4_allocation_request {
/* target inode for block we're allocating */
@@ -171,10 +182,10 @@ struct ext4_allocation_request {
* well as to store the information returned by ext4_map_blocks(). It
* takes less room on the stack than a struct buffer_head.
*/
-#define EXT4_MAP_NEW (1 << BH_New)
-#define EXT4_MAP_MAPPED (1 << BH_Mapped)
-#define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten)
-#define EXT4_MAP_BOUNDARY (1 << BH_Boundary)
+#define EXT4_MAP_NEW BIT(BH_New)
+#define EXT4_MAP_MAPPED BIT(BH_Mapped)
+#define EXT4_MAP_UNWRITTEN BIT(BH_Unwritten)
+#define EXT4_MAP_BOUNDARY BIT(BH_Boundary)
#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
@@ -417,7 +428,7 @@ struct flex_groups {
/* 0x00400000 was formerly EXT4_EOFBLOCKS_FL */
#define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
#define EXT4_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
-#define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded file */
+#define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded directory */
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
#define EXT4_FL_USER_VISIBLE 0x705BDFFF /* User visible flags */
@@ -490,6 +501,7 @@ enum {
/* 22 was formerly EXT4_INODE_EOFBLOCKS */
EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */
EXT4_INODE_PROJINHERIT = 29, /* Create with parents projid */
+ EXT4_INODE_CASEFOLD = 30, /* Casefolded directory */
EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */
};
@@ -535,6 +547,7 @@ static inline void ext4_check_flag_values(void)
CHECK_FLAG_VALUE(EA_INODE);
CHECK_FLAG_VALUE(INLINE_DATA);
CHECK_FLAG_VALUE(PROJINHERIT);
+ CHECK_FLAG_VALUE(CASEFOLD);
CHECK_FLAG_VALUE(RESERVED);
}
@@ -609,8 +622,6 @@ enum {
#define EXT4_GET_BLOCKS_METADATA_NOFAIL 0x0020
/* Don't normalize allocation size (used for fallocate) */
#define EXT4_GET_BLOCKS_NO_NORMALIZE 0x0040
- /* Request will not result in inode size update (user for fallocate) */
-#define EXT4_GET_BLOCKS_KEEP_SIZE 0x0080
/* Convert written extents to unwritten */
#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0100
/* Write zeros to newly created written extents */
@@ -632,6 +643,7 @@ enum {
*/
#define EXT4_EX_NOCACHE 0x40000000
#define EXT4_EX_FORCE_CACHE 0x20000000
+#define EXT4_EX_NOFAIL 0x10000000
/*
* Flags used by ext4_free_blocks
@@ -2051,7 +2063,7 @@ struct ext4_dir_entry_2 {
__le32 inode; /* Inode number */
__le16 rec_len; /* Directory entry length */
__u8 name_len; /* Name length */
- __u8 file_type;
+ __u8 file_type; /* See file type macros EXT4_FT_* below */
char name[EXT4_NAME_LEN]; /* File name */
};
@@ -3354,7 +3366,7 @@ struct ext4_extent;
*/
#define EXT_MAX_BLOCKS 0xffffffff
-extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
+extern void ext4_ext_tree_init(handle_t *handle, struct inode *inode);
extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 1c216fcc202a..44e59881a1f0 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -170,10 +170,13 @@ struct partial_cluster {
(EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
#define EXT_LAST_INDEX(__hdr__) \
(EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
-#define EXT_MAX_EXTENT(__hdr__) \
- (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
+#define EXT_MAX_EXTENT(__hdr__) \
+ ((le16_to_cpu((__hdr__)->eh_max)) ? \
+ ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \
+ : 0)
#define EXT_MAX_INDEX(__hdr__) \
- (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
+ ((le16_to_cpu((__hdr__)->eh_max)) ? \
+ ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0)
static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode)
{
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 4b9002f0e84c..00dc668e052b 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -222,7 +222,10 @@ ext4_mark_iloc_dirty(handle_t *handle,
int ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
struct ext4_iloc *iloc);
-int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode);
+#define ext4_mark_inode_dirty(__h, __i) \
+ __ext4_mark_inode_dirty((__h), (__i), __func__, __LINE__)
+int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
+ const char *func, unsigned int line);
int ext4_expand_extra_isize(struct inode *inode,
unsigned int new_extra_isize,
@@ -335,12 +338,6 @@ static inline handle_t *__ext4_journal_start(struct inode *inode,
handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
int type);
-static inline void ext4_journal_free_reserved(handle_t *handle)
-{
- if (ext4_handle_valid(handle))
- jbd2_journal_free_reserved(handle);
-}
-
static inline handle_t *ext4_journal_current_handle(void)
{
return journal_current_handle();
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2b4b94542e34..7d088ff1e902 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -297,11 +297,14 @@ ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
{
struct ext4_ext_path *path = *ppath;
int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
+ int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
+
+ if (nofail)
+ flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
- EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
- (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
+ flags);
}
static int
@@ -487,8 +490,12 @@ __read_extent_tree_block(const char *function, unsigned int line,
{
struct buffer_head *bh;
int err;
+ gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS;
+
+ if (flags & EXT4_EX_NOFAIL)
+ gfp_flags |= __GFP_NOFAIL;
- bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
+ bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
if (unlikely(!bh))
return ERR_PTR(-ENOMEM);
@@ -600,22 +607,22 @@ static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
int k, l = path->p_depth;
- ext_debug("path:");
+ ext_debug(inode, "path:");
for (k = 0; k <= l; k++, path++) {
if (path->p_idx) {
- ext_debug(" %d->%llu",
+ ext_debug(inode, " %d->%llu",
le32_to_cpu(path->p_idx->ei_block),
ext4_idx_pblock(path->p_idx));
} else if (path->p_ext) {
- ext_debug(" %d:[%d]%d:%llu ",
+ ext_debug(inode, " %d:[%d]%d:%llu ",
le32_to_cpu(path->p_ext->ee_block),
ext4_ext_is_unwritten(path->p_ext),
ext4_ext_get_actual_len(path->p_ext),
ext4_ext_pblock(path->p_ext));
} else
- ext_debug(" []");
+ ext_debug(inode, " []");
}
- ext_debug("\n");
+ ext_debug(inode, "\n");
}
static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
@@ -631,14 +638,14 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
eh = path[depth].p_hdr;
ex = EXT_FIRST_EXTENT(eh);
- ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
+ ext_debug(inode, "Displaying leaf extents\n");
for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
- ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
+ ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
ext4_ext_is_unwritten(ex),
ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
}
- ext_debug("\n");
+ ext_debug(inode, "\n");
}
static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
@@ -651,10 +658,9 @@ static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
struct ext4_extent_idx *idx;
idx = path[level].p_idx;
while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
- ext_debug("%d: move %d:%llu in new index %llu\n", level,
- le32_to_cpu(idx->ei_block),
- ext4_idx_pblock(idx),
- newblock);
+ ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
+ level, le32_to_cpu(idx->ei_block),
+ ext4_idx_pblock(idx), newblock);
idx++;
}
@@ -663,7 +669,7 @@ static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
ex = path[depth].p_ext;
while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
- ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
+ ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
le32_to_cpu(ex->ee_block),
ext4_ext_pblock(ex),
ext4_ext_is_unwritten(ex),
@@ -707,7 +713,7 @@ ext4_ext_binsearch_idx(struct inode *inode,
struct ext4_extent_idx *r, *l, *m;
- ext_debug("binsearch for %u(idx): ", block);
+ ext_debug(inode, "binsearch for %u(idx): ", block);
l = EXT_FIRST_INDEX(eh) + 1;
r = EXT_LAST_INDEX(eh);
@@ -717,13 +723,13 @@ ext4_ext_binsearch_idx(struct inode *inode,
r = m - 1;
else
l = m + 1;
- ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
- m, le32_to_cpu(m->ei_block),
- r, le32_to_cpu(r->ei_block));
+ ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
+ le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
+ r, le32_to_cpu(r->ei_block));
}
path->p_idx = l - 1;
- ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
+ ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
ext4_idx_pblock(path->p_idx));
#ifdef CHECK_BINSEARCH
@@ -774,7 +780,7 @@ ext4_ext_binsearch(struct inode *inode,
return;
}
- ext_debug("binsearch for %u: ", block);
+ ext_debug(inode, "binsearch for %u: ", block);
l = EXT_FIRST_EXTENT(eh) + 1;
r = EXT_LAST_EXTENT(eh);
@@ -785,13 +791,13 @@ ext4_ext_binsearch(struct inode *inode,
r = m - 1;
else
l = m + 1;
- ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
- m, le32_to_cpu(m->ee_block),
- r, le32_to_cpu(r->ee_block));
+ ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
+ le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
+ r, le32_to_cpu(r->ee_block));
}
path->p_ext = l - 1;
- ext_debug(" -> %d:%llu:[%d]%d ",
+ ext_debug(inode, " -> %d:%llu:[%d]%d ",
le32_to_cpu(path->p_ext->ee_block),
ext4_ext_pblock(path->p_ext),
ext4_ext_is_unwritten(path->p_ext),
@@ -816,7 +822,7 @@ ext4_ext_binsearch(struct inode *inode,
}
-int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
+void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
{
struct ext4_extent_header *eh;
@@ -826,7 +832,6 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
eh->eh_magic = EXT4_EXT_MAGIC;
eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
ext4_mark_inode_dirty(handle, inode);
- return 0;
}
struct ext4_ext_path *
@@ -838,6 +843,10 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
short int depth, i, ppos = 0;
int ret;
+ gfp_t gfp_flags = GFP_NOFS;
+
+ if (flags & EXT4_EX_NOFAIL)
+ gfp_flags |= __GFP_NOFAIL;
eh = ext_inode_hdr(inode);
depth = ext_depth(inode);
@@ -858,7 +867,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
if (!path) {
/* account possible depth increase */
path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
- GFP_NOFS);
+ gfp_flags);
if (unlikely(!path))
return ERR_PTR(-ENOMEM);
path[0].p_maxdepth = depth + 1;
@@ -871,7 +880,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
ext4_cache_extents(inode, eh);
/* walk through the tree */
while (i) {
- ext_debug("depth %d: num %d, max %d\n",
+ ext_debug(inode, "depth %d: num %d, max %d\n",
ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
ext4_ext_binsearch_idx(inode, path + ppos, block);
@@ -948,18 +957,20 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
/* insert after */
- ext_debug("insert new index %d after: %llu\n", logical, ptr);
+ ext_debug(inode, "insert new index %d after: %llu\n",
+ logical, ptr);
ix = curp->p_idx + 1;
} else {
/* insert before */
- ext_debug("insert new index %d before: %llu\n", logical, ptr);
+ ext_debug(inode, "insert new index %d before: %llu\n",
+ logical, ptr);
ix = curp->p_idx;
}
len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
BUG_ON(len < 0);
if (len > 0) {
- ext_debug("insert new index %d: "
+ ext_debug(inode, "insert new index %d: "
"move %d indices from 0x%p to 0x%p\n",
logical, len, ix, ix + 1);
memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
@@ -1008,9 +1019,13 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
ext4_fsblk_t newblock, oldblock;
__le32 border;
ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
+ gfp_t gfp_flags = GFP_NOFS;
int err = 0;
size_t ext_size = 0;
+ if (flags & EXT4_EX_NOFAIL)
+ gfp_flags |= __GFP_NOFAIL;
+
/* make decision: where to split? */
/* FIXME: now decision is simplest: at current extent */
@@ -1022,12 +1037,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
}
if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
border = path[depth].p_ext[1].ee_block;
- ext_debug("leaf will be split."
+ ext_debug(inode, "leaf will be split."
" next leaf starts at %d\n",
le32_to_cpu(border));
} else {
border = newext->ee_block;
- ext_debug("leaf will be added."
+ ext_debug(inode, "leaf will be added."
" next leaf starts at %d\n",
le32_to_cpu(border));
}
@@ -1044,12 +1059,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
* We need this to handle errors and free blocks
* upon them.
*/
- ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), GFP_NOFS);
+ ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
if (!ablocks)
return -ENOMEM;
/* allocate all needed blocks */
- ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
+ ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
for (a = 0; a < depth - at; a++) {
newblock = ext4_ext_new_meta_block(handle, inode, path,
newext, &err, flags);
@@ -1135,7 +1150,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
goto cleanup;
}
if (k)
- ext_debug("create %d intermediate indices\n", k);
+ ext_debug(inode, "create %d intermediate indices\n", k);
/* insert new index into current index block */
/* current depth stored in i var */
i = depth - 1;
@@ -1162,7 +1177,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
fidx->ei_block = border;
ext4_idx_store_pblock(fidx, oldblock);
- ext_debug("int.index at %d (block %llu): %u -> %llu\n",
+ ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
i, newblock, le32_to_cpu(border), oldblock);
/* move remainder of path[i] to the new index block */
@@ -1176,7 +1191,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
}
/* start copy indexes */
m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
- ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
+ ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
EXT_MAX_INDEX(path[i].p_hdr));
ext4_ext_show_move(inode, path, newblock, i);
if (m) {
@@ -1313,13 +1328,13 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
EXT_FIRST_INDEX(neh)->ei_block =
EXT_FIRST_EXTENT(neh)->ee_block;
}
- ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
+ ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
le16_add_cpu(&neh->eh_depth, 1);
- ext4_mark_inode_dirty(handle, inode);
+ err = ext4_mark_inode_dirty(handle, inode);
out:
brelse(bh);
@@ -1955,7 +1970,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
/* Try to append newex to the ex */
if (ext4_can_extents_be_merged(inode, ex, newext)) {
- ext_debug("append [%d]%d block to %u:[%d]%d"
+ ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
"(from %llu)\n",
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
@@ -1980,7 +1995,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
prepend:
/* Try to prepend newex to the ex */
if (ext4_can_extents_be_merged(inode, newext, ex)) {
- ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
+ ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
"(from %llu)\n",
le32_to_cpu(newext->ee_block),
ext4_ext_is_unwritten(newext),
@@ -2018,20 +2033,20 @@ prepend:
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
next = ext4_ext_next_leaf_block(path);
if (next != EXT_MAX_BLOCKS) {
- ext_debug("next leaf block - %u\n", next);
+ ext_debug(inode, "next leaf block - %u\n", next);
BUG_ON(npath != NULL);
- npath = ext4_find_extent(inode, next, NULL, 0);
+ npath = ext4_find_extent(inode, next, NULL, gb_flags);
if (IS_ERR(npath))
return PTR_ERR(npath);
BUG_ON(npath->p_depth != path->p_depth);
eh = npath[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
- ext_debug("next leaf isn't full(%d)\n",
+ ext_debug(inode, "next leaf isn't full(%d)\n",
le16_to_cpu(eh->eh_entries));
path = npath;
goto has_space;
}
- ext_debug("next leaf has no free space(%d,%d)\n",
+ ext_debug(inode, "next leaf has no free space(%d,%d)\n",
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
}
@@ -2057,7 +2072,7 @@ has_space:
if (!nearex) {
/* there is no extent in this leaf, create first one */
- ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
+ ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_unwritten(newext),
@@ -2067,7 +2082,7 @@ has_space:
if (le32_to_cpu(newext->ee_block)
> le32_to_cpu(nearex->ee_block)) {
/* Insert after */
- ext_debug("insert %u:%llu:[%d]%d before: "
+ ext_debug(inode, "insert %u:%llu:[%d]%d before: "
"nearest %p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
@@ -2078,7 +2093,7 @@ has_space:
} else {
/* Insert before */
BUG_ON(newext->ee_block == nearex->ee_block);
- ext_debug("insert %u:%llu:[%d]%d after: "
+ ext_debug(inode, "insert %u:%llu:[%d]%d after: "
"nearest %p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
@@ -2088,7 +2103,7 @@ has_space:
}
len = EXT_LAST_EXTENT(eh) - nearex + 1;
if (len > 0) {
- ext_debug("insert %u:%llu:[%d]%d: "
+ ext_debug(inode, "insert %u:%llu:[%d]%d: "
"move %d extents from 0x%p to 0x%p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
@@ -2232,7 +2247,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
return;
hole_len = min(es.es_lblk - hole_start, hole_len);
}
- ext_debug(" -> %u:%u\n", hole_start, hole_len);
+ ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
EXTENT_STATUS_HOLE);
}
@@ -2269,7 +2284,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
err = ext4_ext_dirty(handle, inode, path);
if (err)
return err;
- ext_debug("index is empty, remove it, free block %llu\n", leaf);
+ ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
trace_ext4_ext_rm_idx(inode, leaf);
ext4_free_blocks(handle, inode, NULL, leaf, 1,
@@ -2548,7 +2563,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ext4_fsblk_t pblk;
/* the header must be checked already in ext4_ext_remove_space() */
- ext_debug("truncate since %u in leaf to %u\n", start, end);
+ ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
if (!path[depth].p_hdr)
path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
eh = path[depth].p_hdr;
@@ -2574,7 +2589,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
else
unwritten = 0;
- ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
+ ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
unwritten, ex_ee_len);
path[depth].p_ext = ex;
@@ -2582,7 +2597,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
b = ex_ee_block+ex_ee_len - 1 < end ?
ex_ee_block+ex_ee_len - 1 : end;
- ext_debug(" border %u:%u\n", a, b);
+ ext_debug(inode, " border %u:%u\n", a, b);
/* If this extent is beyond the end of the hole, skip it */
if (end < ex_ee_block) {
@@ -2691,7 +2706,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
if (err)
goto out;
- ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
+ ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
ext4_ext_pblock(ex));
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
@@ -2768,7 +2783,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
partial.lblk = 0;
partial.state = initial;
- ext_debug("truncate since %u to %u\n", start, end);
+ ext_debug(inode, "truncate since %u to %u\n", start, end);
/* probably first extent we're gonna free will be last in block */
handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
@@ -2793,7 +2808,8 @@ again:
ext4_fsblk_t pblk;
/* find extent for or closest extent to this block */
- path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
+ path = ext4_find_extent(inode, end, NULL,
+ EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
if (IS_ERR(path)) {
ext4_journal_stop(handle);
return PTR_ERR(path);
@@ -2879,7 +2895,7 @@ again:
le16_to_cpu(path[k].p_hdr->eh_entries)+1;
} else {
path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
- GFP_NOFS);
+ GFP_NOFS | __GFP_NOFAIL);
if (path == NULL) {
ext4_journal_stop(handle);
return -ENOMEM;
@@ -2909,7 +2925,7 @@ again:
/* this is index block */
if (!path[i].p_hdr) {
- ext_debug("initialize header\n");
+ ext_debug(inode, "initialize header\n");
path[i].p_hdr = ext_block_hdr(path[i].p_bh);
}
@@ -2917,7 +2933,7 @@ again:
/* this level hasn't been touched yet */
path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
- ext_debug("init index ptr: hdr 0x%p, num %d\n",
+ ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
path[i].p_hdr,
le16_to_cpu(path[i].p_hdr->eh_entries));
} else {
@@ -2925,13 +2941,13 @@ again:
path[i].p_idx--;
}
- ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
+ ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
i, EXT_FIRST_INDEX(path[i].p_hdr),
path[i].p_idx);
if (ext4_ext_more_to_rm(path + i)) {
struct buffer_head *bh;
/* go to the next level */
- ext_debug("move to level %d (block %llu)\n",
+ ext_debug(inode, "move to level %d (block %llu)\n",
i + 1, ext4_idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path));
bh = read_extent_tree_block(inode,
@@ -2967,7 +2983,7 @@ again:
brelse(path[i].p_bh);
path[i].p_bh = NULL;
i--;
- ext_debug("return to level %d\n", i);
+ ext_debug(inode, "return to level %d\n", i);
}
}
@@ -3135,8 +3151,7 @@ static int ext4_split_extent_at(handle_t *handle,
BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
(EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
- ext_debug("ext4_split_extents_at: inode %lu, logical"
- "block %llu\n", inode->i_ino, (unsigned long long)split);
+ ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
ext4_ext_show_leaf(inode, path);
@@ -3244,6 +3259,10 @@ out:
fix_extent_len:
ex->ee_len = orig_ex.ee_len;
+ /*
+ * Ignore ext4_ext_dirty return value since we are already in error path
+ * and err is a non-zero error code.
+ */
ext4_ext_dirty(handle, inode, path + path->p_depth);
return err;
}
@@ -3300,7 +3319,7 @@ static int ext4_split_extent(handle_t *handle,
* Update path is required because previous ext4_split_extent_at() may
* result in split of original leaf or extent zeroout.
*/
- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+ path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
if (IS_ERR(path))
return PTR_ERR(path);
depth = ext_depth(inode);
@@ -3369,9 +3388,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
int err = 0;
int split_flag = EXT4_EXT_DATA_VALID2;
- ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
- "block %llu, max_blocks %u\n", inode->i_ino,
- (unsigned long long)map->m_lblk, map_len);
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
+ (unsigned long long)map->m_lblk, map_len);
sbi = EXT4_SB(inode->i_sb);
eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
@@ -3503,7 +3521,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
}
if (allocated) {
/* Mark the block containing both extents as dirty */
- ext4_ext_dirty(handle, inode, path + depth);
+ err = ext4_ext_dirty(handle, inode, path + depth);
/* Update path to point to the right extent */
path[depth].p_ext = abut_ex;
@@ -3623,8 +3641,7 @@ static int ext4_split_convert_extents(handle_t *handle,
unsigned int ee_len;
int split_flag = 0, depth;
- ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
- __func__, inode->i_ino,
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
(unsigned long long)map->m_lblk, map->m_len);
eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
@@ -3670,8 +3687,7 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
- ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
- "block %llu, max_blocks %u\n", inode->i_ino,
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
(unsigned long long)ee_block, ee_len);
/* If extent is larger than requested it is a clear sign that we still
@@ -3741,8 +3757,7 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
- ext_debug("%s: inode %lu, logical"
- "block %llu, max_blocks %u\n", __func__, inode->i_ino,
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
(unsigned long long)ee_block, ee_len);
if (ee_block != map->m_lblk || ee_len > map->m_len) {
@@ -3794,16 +3809,13 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
struct ext4_ext_path **ppath, int flags,
unsigned int allocated, ext4_fsblk_t newblock)
{
-#ifdef EXT_DEBUG
- struct ext4_ext_path *path = *ppath;
-#endif
+ struct ext4_ext_path __maybe_unused *path = *ppath;
int ret = 0;
int err = 0;
- ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
- "block %llu, max_blocks %u, flags %x, allocated %u\n",
- inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
- flags, allocated);
+ ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
+ (unsigned long long)map->m_lblk, map->m_len, flags,
+ allocated);
ext4_ext_show_leaf(inode, path);
/*
@@ -3815,39 +3827,38 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
allocated, newblock);
- /* get_block() before submit the IO, split the extent */
+ /* get_block() before submitting IO, split the extent */
if (flags & EXT4_GET_BLOCKS_PRE_IO) {
ret = ext4_split_convert_extents(handle, inode, map, ppath,
flags | EXT4_GET_BLOCKS_CONVERT);
- if (ret <= 0)
- goto out;
+ if (ret < 0) {
+ err = ret;
+ goto out2;
+ }
+ /*
+ * shouldn't get a 0 return when splitting an extent unless
+ * m_len is 0 (bug) or extent has been corrupted
+ */
+ if (unlikely(ret == 0)) {
+ EXT4_ERROR_INODE(inode,
+ "unexpected ret == 0, m_len = %u",
+ map->m_len);
+ err = -EFSCORRUPTED;
+ goto out2;
+ }
map->m_flags |= EXT4_MAP_UNWRITTEN;
goto out;
}
/* IO end_io complete, convert the filled extent to written */
if (flags & EXT4_GET_BLOCKS_CONVERT) {
- if (flags & EXT4_GET_BLOCKS_ZERO) {
- if (allocated > map->m_len)
- allocated = map->m_len;
- err = ext4_issue_zeroout(inode, map->m_lblk, newblock,
- allocated);
- if (err < 0)
- goto out2;
- }
- ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
+ err = ext4_convert_unwritten_extents_endio(handle, inode, map,
ppath);
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
- else
- err = ret;
- map->m_flags |= EXT4_MAP_MAPPED;
- map->m_pblk = newblock;
- if (allocated > map->m_len)
- allocated = map->m_len;
- map->m_len = allocated;
- goto out2;
+ if (err < 0)
+ goto out2;
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+ goto map_out;
}
- /* buffered IO case */
+ /* buffered IO cases */
/*
* repeat fallocate creation request
* we already have an unwritten extent
@@ -3870,29 +3881,39 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
goto out1;
}
- /* buffered write, writepage time, convert*/
+ /*
+ * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1.
+ * For buffered writes, at writepage time, etc. Convert a
+ * discovered unwritten extent to written.
+ */
ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
-out:
- if (ret <= 0) {
+ if (ret < 0) {
err = ret;
goto out2;
- } else
- allocated = ret;
- map->m_flags |= EXT4_MAP_NEW;
- if (allocated > map->m_len)
- allocated = map->m_len;
- map->m_len = allocated;
+ }
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+ /*
+ * shouldn't get a 0 return when converting an unwritten extent
+ * unless m_len is 0 (bug) or extent has been corrupted
+ */
+ if (unlikely(ret == 0)) {
+ EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
+ map->m_len);
+ err = -EFSCORRUPTED;
+ goto out2;
+ }
+out:
+ allocated = ret;
+ map->m_flags |= EXT4_MAP_NEW;
map_out:
map->m_flags |= EXT4_MAP_MAPPED;
out1:
+ map->m_pblk = newblock;
if (allocated > map->m_len)
allocated = map->m_len;
- ext4_ext_show_leaf(inode, path);
- map->m_pblk = newblock;
map->m_len = allocated;
+ ext4_ext_show_leaf(inode, path);
out2:
return err ? err : allocated;
}
@@ -4024,15 +4045,14 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path = NULL;
struct ext4_extent newex, *ex, *ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- ext4_fsblk_t newblock = 0;
+ ext4_fsblk_t newblock = 0, pblk;
int err = 0, depth, ret;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
ext4_lblk_t cluster_offset;
- ext_debug("blocks %u/%u requested for inode %lu\n",
- map->m_lblk, map->m_len, inode->i_ino);
+ ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* find extent for this block */
@@ -4040,7 +4060,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
if (IS_ERR(path)) {
err = PTR_ERR(path);
path = NULL;
- goto out2;
+ goto out;
}
depth = ext_depth(inode);
@@ -4056,7 +4076,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
(unsigned long) map->m_lblk, depth,
path[depth].p_block);
err = -EFSCORRUPTED;
- goto out2;
+ goto out;
}
ex = path[depth].p_ext;
@@ -4079,8 +4099,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
newblock = map->m_lblk - ee_block + ee_start;
/* number of remaining blocks in the extent */
allocated = ee_len - (map->m_lblk - ee_block);
- ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
- ee_block, ee_len, newblock);
+ ext_debug(inode, "%u fit into %u:%d -> %llu\n",
+ map->m_lblk, ee_block, ee_len, newblock);
/*
* If the extent is initialized check whether the
@@ -4090,8 +4110,14 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
err = convert_initialized_extent(handle,
inode, map, &path, &allocated);
- goto out2;
+ goto out;
} else if (!ext4_ext_is_unwritten(ex)) {
+ map->m_flags |= EXT4_MAP_MAPPED;
+ map->m_pblk = newblock;
+ if (allocated > map->m_len)
+ allocated = map->m_len;
+ map->m_len = allocated;
+ ext4_ext_show_leaf(inode, path);
goto out;
}
@@ -4102,7 +4128,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
err = ret;
else
allocated = ret;
- goto out2;
+ goto out;
}
}
@@ -4127,7 +4153,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
map->m_pblk = 0;
map->m_len = min_t(unsigned int, map->m_len, hole_len);
- goto out2;
+ goto out;
}
/*
@@ -4151,12 +4177,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ar.lleft = map->m_lblk;
err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
if (err)
- goto out2;
+ goto out;
ar.lright = map->m_lblk;
ex2 = NULL;
err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
if (err)
- goto out2;
+ goto out;
/* Check if the extent after searching to the right implies a
* cluster we can use. */
@@ -4217,17 +4243,18 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ar.flags |= EXT4_MB_USE_RESERVED;
newblock = ext4_mb_new_blocks(handle, &ar, &err);
if (!newblock)
- goto out2;
- ext_debug("allocate new block: goal %llu, found %llu/%u\n",
- ar.goal, newblock, allocated);
+ goto out;
allocated_clusters = ar.len;
ar.len = EXT4_C2B(sbi, ar.len) - offset;
+ ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
+ ar.goal, newblock, ar.len, allocated);
if (ar.len > allocated)
ar.len = allocated;
got_allocated_blocks:
/* try to insert new extent into found leaf and return */
- ext4_ext_store_pblock(&newex, newblock + offset);
+ pblk = newblock + offset;
+ ext4_ext_store_pblock(&newex, pblk);
newex.ee_len = cpu_to_le16(ar.len);
/* Mark unwritten */
if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
@@ -4252,16 +4279,9 @@ got_allocated_blocks:
EXT4_C2B(sbi, allocated_clusters),
fb_flags);
}
- goto out2;
+ goto out;
}
- /* previous routine could use block we allocated */
- newblock = ext4_ext_pblock(&newex);
- allocated = ext4_ext_get_actual_len(&newex);
- if (allocated > map->m_len)
- allocated = map->m_len;
- map->m_flags |= EXT4_MAP_NEW;
-
/*
* Reduce the reserved cluster count to reflect successful deferred
* allocation of delayed allocated clusters or direct allocation of
@@ -4307,14 +4327,14 @@ got_allocated_blocks:
ext4_update_inode_fsync_trans(handle, inode, 1);
else
ext4_update_inode_fsync_trans(handle, inode, 0);
-out:
- if (allocated > map->m_len)
- allocated = map->m_len;
+
+ map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
+ map->m_pblk = pblk;
+ map->m_len = ar.len;
+ allocated = map->m_len;
ext4_ext_show_leaf(inode, path);
- map->m_flags |= EXT4_MAP_MAPPED;
- map->m_pblk = newblock;
- map->m_len = allocated;
-out2:
+
+out:
ext4_ext_drop_refs(path);
kfree(path);
@@ -4353,7 +4373,14 @@ retry:
}
if (err)
return err;
- return ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
+retry_remove_space:
+ err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
+ if (err == -ENOMEM) {
+ cond_resched();
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry_remove_space;
+ }
+ return err;
}
static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
@@ -4363,7 +4390,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
struct inode *inode = file_inode(file);
handle_t *handle;
int ret = 0;
- int ret2 = 0;
+ int ret2 = 0, ret3 = 0;
int retries = 0;
int depth = 0;
struct ext4_map_blocks map;
@@ -4423,10 +4450,11 @@ retry:
if (ext4_update_inode_size(inode, epos) & 0x1)
inode->i_mtime = inode->i_ctime;
}
- ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
- ret2 = ext4_journal_stop(handle);
- if (ret2)
+ ret3 = ext4_journal_stop(handle);
+ ret2 = ret3 ? ret3 : ret2;
+ if (unlikely(ret2))
break;
}
if (ret == -ENOSPC &&
@@ -4490,7 +4518,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
inode_lock(inode);
/*
- * Indirect files do not support unwritten extnets
+ * Indirect files do not support unwritten extents
*/
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
ret = -EOPNOTSUPP;
@@ -4507,8 +4535,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
}
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
- if (mode & FALLOC_FL_KEEP_SIZE)
- flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
/* Wait all existing dio workers, newcomers will block on i_mutex */
inode_dio_wait(inode);
@@ -4577,7 +4603,9 @@ static long ext4_zero_range(struct file *file, loff_t offset,
inode->i_mtime = inode->i_ctime = current_time(inode);
if (new_size)
ext4_update_inode_size(inode, new_size);
- ext4_mark_inode_dirty(handle, inode);
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret))
+ goto out_handle;
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
@@ -4587,6 +4615,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
+out_handle:
ext4_journal_stop(handle);
out_mutex:
inode_unlock(inode);
@@ -4647,8 +4676,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
- if (mode & FALLOC_FL_KEEP_SIZE)
- flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
inode_lock(inode);
@@ -4700,8 +4727,7 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
loff_t offset, ssize_t len)
{
unsigned int max_blocks;
- int ret = 0;
- int ret2 = 0;
+ int ret = 0, ret2 = 0, ret3 = 0;
struct ext4_map_blocks map;
unsigned int blkbits = inode->i_blkbits;
unsigned int credits = 0;
@@ -4734,9 +4760,13 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
"ext4_ext_map_blocks returned %d",
inode->i_ino, map.m_lblk,
map.m_len, ret);
- ext4_mark_inode_dirty(handle, inode);
- if (credits)
- ret2 = ext4_journal_stop(handle);
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ if (credits) {
+ ret3 = ext4_journal_stop(handle);
+ if (unlikely(ret3))
+ ret2 = ret3;
+ }
+
if (ret <= 0 || ret2)
break;
}
@@ -4854,11 +4884,9 @@ static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
return 0;
}
-static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len, bool from_es_cache)
+int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len)
{
- ext4_lblk_t start_blk;
- u32 ext4_fiemap_flags = FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR;
int error = 0;
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
@@ -4868,12 +4896,6 @@ static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
}
- if (from_es_cache)
- ext4_fiemap_flags &= FIEMAP_FLAG_XATTR;
-
- if (fiemap_check_flags(fieinfo, ext4_fiemap_flags))
- return -EBADR;
-
/*
* For bitmap files the maximum size limit could be smaller than
* s_maxbytes, so check len here manually instead of just relying on the
@@ -4885,40 +4907,20 @@ static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
- error = iomap_fiemap(inode, fieinfo, start, len,
- &ext4_iomap_xattr_ops);
- } else if (!from_es_cache) {
- error = iomap_fiemap(inode, fieinfo, start, len,
- &ext4_iomap_report_ops);
- } else {
- ext4_lblk_t len_blks;
- __u64 last_blk;
-
- start_blk = start >> inode->i_sb->s_blocksize_bits;
- last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
- if (last_blk >= EXT_MAX_BLOCKS)
- last_blk = EXT_MAX_BLOCKS-1;
- len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
-
- /*
- * Walk the extent tree gathering extent information
- * and pushing extents back to the user.
- */
- error = ext4_fill_es_cache_info(inode, start_blk, len_blks,
- fieinfo);
+ return iomap_fiemap(inode, fieinfo, start, len,
+ &ext4_iomap_xattr_ops);
}
- return error;
-}
-int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len)
-{
- return _ext4_fiemap(inode, fieinfo, start, len, false);
+ return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
}
int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
+ ext4_lblk_t start_blk, len_blks;
+ __u64 last_blk;
+ int error = 0;
+
if (ext4_has_inline_data(inode)) {
int has_inline;
@@ -4929,9 +4931,33 @@ int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
return 0;
}
- return _ext4_fiemap(inode, fieinfo, start, len, true);
-}
+ if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
+ error = ext4_ext_precache(inode);
+ if (error)
+ return error;
+ fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
+ }
+
+ error = fiemap_prep(inode, fieinfo, start, &len, 0);
+ if (error)
+ return error;
+
+ error = ext4_fiemap_check_ranges(inode, start, &len);
+ if (error)
+ return error;
+ start_blk = start >> inode->i_sb->s_blocksize_bits;
+ last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
+ if (last_blk >= EXT_MAX_BLOCKS)
+ last_blk = EXT_MAX_BLOCKS-1;
+ len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
+
+ /*
+ * Walk the extent tree gathering extent information
+ * and pushing extents back to the user.
+ */
+ return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
+}
/*
* ext4_access_path:
@@ -5304,7 +5330,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ ret = ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index d996b44d2265..e75171535375 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -1054,7 +1054,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end;
/* record the first block of the first delonly extent seen */
- if (rc->first_do_lblk_found == false) {
+ if (!rc->first_do_lblk_found) {
rc->first_do_lblk = i;
rc->first_do_lblk_found = true;
}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 0d624250a62b..2a01e31a032c 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -287,6 +287,7 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
bool truncate = false;
u8 blkbits = inode->i_blkbits;
ext4_lblk_t written_blk, end_blk;
+ int ret;
/*
* Note that EXT4_I(inode)->i_disksize can get extended up to
@@ -327,8 +328,14 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
goto truncate;
}
- if (ext4_update_inode_size(inode, offset + written))
- ext4_mark_inode_dirty(handle, inode);
+ if (ext4_update_inode_size(inode, offset + written)) {
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret)) {
+ written = ret;
+ ext4_journal_stop(handle);
+ goto truncate;
+ }
+ }
/*
* We may need to truncate allocated but not written blocks beyond EOF.
@@ -495,6 +502,12 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret <= 0)
return ret;
+ /* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
+ if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
offset = iocb->ki_pos;
count = ret;
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 35ff9a56db67..1d668c8f131f 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -44,30 +44,28 @@
*/
static int ext4_sync_parent(struct inode *inode)
{
- struct dentry *dentry = NULL;
- struct inode *next;
+ struct dentry *dentry, *next;
int ret = 0;
if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
return 0;
- inode = igrab(inode);
+ dentry = d_find_any_alias(inode);
+ if (!dentry)
+ return 0;
while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
- dentry = d_find_any_alias(inode);
- if (!dentry)
- break;
- next = igrab(d_inode(dentry->d_parent));
+
+ next = dget_parent(dentry);
dput(dentry);
- if (!next)
- break;
- iput(inode);
- inode = next;
+ dentry = next;
+ inode = dentry->d_inode;
+
/*
* The directory inode may have gone through rmdir by now. But
* the inode itself and its blocks are still allocated (we hold
- * a reference to the inode so it didn't go through
- * ext4_evict_inode()) and so we are safe to flush metadata
- * blocks and the inode.
+ * a reference to the inode via its dentry), so it didn't go
+ * through ext4_evict_inode()) and so we are safe to flush
+ * metadata blocks and the inode.
*/
ret = sync_mapping_buffers(inode->i_mapping);
if (ret)
@@ -76,7 +74,7 @@ static int ext4_sync_parent(struct inode *inode)
if (ret)
break;
}
- iput(inode);
+ dput(dentry);
return ret;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 499f08d8522e..54d324e80fe5 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1246,6 +1246,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
ext4_error_err(sb, -err,
"couldn't read orphan inode %lu (err %d)",
ino, err);
+ brelse(bitmap_bh);
return inode;
}
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 107f0043f67f..be2b66eb65f7 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -467,7 +467,9 @@ static int ext4_splice_branch(handle_t *handle,
/*
* OK, we spliced it into the inode itself on a direct block.
*/
- ext4_mark_inode_dirty(handle, ar->inode);
+ err = ext4_mark_inode_dirty(handle, ar->inode);
+ if (unlikely(err))
+ goto err_out;
jbd_debug(5, "splicing direct\n");
}
return err;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index f35e289e17aa..c3a1ad2db122 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1260,7 +1260,7 @@ out:
int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
struct inode *dir, struct inode *inode)
{
- int ret, inline_size, no_expand;
+ int ret, ret2, inline_size, no_expand;
void *inline_start;
struct ext4_iloc iloc;
@@ -1314,7 +1314,9 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
out:
ext4_write_unlock_xattr(dir, &no_expand);
- ext4_mark_inode_dirty(handle, dir);
+ ret2 = ext4_mark_inode_dirty(handle, dir);
+ if (unlikely(ret2 && !ret))
+ ret = ret2;
brelse(iloc.bh);
return ret;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 52be85f96159..40ec5c7ef0d3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -221,6 +221,16 @@ void ext4_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
/*
+ * For inodes with journalled data, transaction commit could have
+ * dirtied the inode. Flush worker is ignoring it because of I_FREEING
+ * flag but we still need to remove the inode from the writeback lists.
+ */
+ if (!list_empty_careful(&inode->i_io_list)) {
+ WARN_ON_ONCE(!ext4_should_journal_data(inode));
+ inode_io_list_del(inode);
+ }
+
+ /*
* Protect us against freezing - iput() caller didn't have to have any
* protection against it
*/
@@ -432,11 +442,9 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- retval = ext4_ext_map_blocks(handle, inode, map, flags &
- EXT4_GET_BLOCKS_KEEP_SIZE);
+ retval = ext4_ext_map_blocks(handle, inode, map, 0);
} else {
- retval = ext4_ind_map_blocks(handle, inode, map, flags &
- EXT4_GET_BLOCKS_KEEP_SIZE);
+ retval = ext4_ind_map_blocks(handle, inode, map, 0);
}
up_read((&EXT4_I(inode)->i_data_sem));
@@ -493,9 +501,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
#endif
map->m_flags = 0;
- ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
- "logical block %lu\n", inode->i_ino, flags, map->m_len,
- (unsigned long) map->m_lblk);
+ ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
+ flags, map->m_len, (unsigned long) map->m_lblk);
/*
* ext4_map_blocks returns an int, and m_len is an unsigned int
@@ -541,11 +548,9 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- retval = ext4_ext_map_blocks(handle, inode, map, flags &
- EXT4_GET_BLOCKS_KEEP_SIZE);
+ retval = ext4_ext_map_blocks(handle, inode, map, 0);
} else {
- retval = ext4_ind_map_blocks(handle, inode, map, flags &
- EXT4_GET_BLOCKS_KEEP_SIZE);
+ retval = ext4_ind_map_blocks(handle, inode, map, 0);
}
if (retval > 0) {
unsigned int status;
@@ -726,6 +731,9 @@ out_sem:
return ret;
}
}
+
+ if (retval < 0)
+ ext_debug(inode, "failed with err %d\n", retval);
return retval;
}
@@ -1296,7 +1304,7 @@ static int ext4_write_end(struct file *file,
* filesystems.
*/
if (i_size_changed || inline_data)
- ext4_mark_inode_dirty(handle, inode);
+ ret = ext4_mark_inode_dirty(handle, inode);
if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
@@ -1526,6 +1534,7 @@ struct mpage_da_data {
struct ext4_map_blocks map;
struct ext4_io_submit io_submit; /* IO submission data */
unsigned int do_map:1;
+ unsigned int scanned_until_end:1;
};
static void mpage_release_unused_pages(struct mpage_da_data *mpd,
@@ -1541,6 +1550,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
if (mpd->first_page >= mpd->next_page)
return;
+ mpd->scanned_until_end = 0;
index = mpd->first_page;
end = mpd->next_page - 1;
if (invalidate) {
@@ -1681,8 +1691,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
invalid_block = ~0;
map->m_flags = 0;
- ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
- "logical block %lu\n", inode->i_ino, map->m_len,
+ ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
(unsigned long) map->m_lblk);
/* Lookup extent status tree firstly */
@@ -2078,7 +2087,7 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
return err;
}
-#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
+#define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
/*
* mballoc gives us at most this number of blocks...
@@ -2188,7 +2197,11 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
if (err < 0)
return err;
}
- return lblk < blocks;
+ if (lblk >= blocks) {
+ mpd->scanned_until_end = 1;
+ return 0;
+ }
+ return 1;
}
/*
@@ -2311,7 +2324,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
* mapping, or maybe the page was submitted for IO.
* So we return to call further extent mapping.
*/
- if (err < 0 || map_bh == true)
+ if (err < 0 || map_bh)
goto out;
/* Page fully mapped - let IO run! */
err = mpage_submit_page(mpd, page);
@@ -2358,7 +2371,7 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
dioread_nolock = ext4_should_dioread_nolock(inode);
if (dioread_nolock)
get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
- if (map->m_flags & (1 << BH_Delay))
+ if (map->m_flags & BIT(BH_Delay))
get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
@@ -2546,7 +2559,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
tag);
if (nr_pages == 0)
- goto out;
+ break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -2601,6 +2614,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
pagevec_release(&pvec);
cond_resched();
}
+ mpd->scanned_until_end = 1;
return 0;
out:
pagevec_release(&pvec);
@@ -2619,7 +2633,6 @@ static int ext4_writepages(struct address_space *mapping,
struct inode *inode = mapping->host;
int needed_blocks, rsv_blocks = 0, ret = 0;
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
- bool done;
struct blk_plug plug;
bool give_up_on_write = false;
@@ -2705,7 +2718,6 @@ static int ext4_writepages(struct address_space *mapping,
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
- done = false;
blk_start_plug(&plug);
/*
@@ -2715,6 +2727,7 @@ retry:
* started.
*/
mpd.do_map = 0;
+ mpd.scanned_until_end = 0;
mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
if (!mpd.io_submit.io_end) {
ret = -ENOMEM;
@@ -2730,7 +2743,7 @@ retry:
if (ret < 0)
goto unplug;
- while (!done && mpd.first_page <= mpd.last_page) {
+ while (!mpd.scanned_until_end && wbc->nr_to_write > 0) {
/* For each extent of pages we use new io_end */
mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
if (!mpd.io_submit.io_end) {
@@ -2765,20 +2778,9 @@ retry:
trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
ret = mpage_prepare_extent_to_map(&mpd);
- if (!ret) {
- if (mpd.map.m_len)
- ret = mpage_map_and_submit_extent(handle, &mpd,
+ if (!ret && mpd.map.m_len)
+ ret = mpage_map_and_submit_extent(handle, &mpd,
&give_up_on_write);
- else {
- /*
- * We scanned the whole range (or exhausted
- * nr_to_write), submitted what was mapped and
- * didn't find anything needing mapping. We are
- * done.
- */
- done = true;
- }
- }
/*
* Caution: If the handle is synchronous,
* ext4_journal_stop() can wait for transaction commit
@@ -3077,7 +3079,7 @@ static int ext4_da_write_end(struct file *file,
* new_i_size is less that inode->i_size
* bu greater than i_disksize.(hint delalloc)
*/
- ext4_mark_inode_dirty(handle, inode);
+ ret = ext4_mark_inode_dirty(handle, inode);
}
}
@@ -3094,7 +3096,7 @@ static int ext4_da_write_end(struct file *file,
if (ret2 < 0)
ret = ret2;
ret2 = ext4_journal_stop(handle);
- if (!ret)
+ if (unlikely(ret2 && !ret))
ret = ret2;
return ret ? ret : copied;
@@ -3883,6 +3885,8 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
loff_t len)
{
handle_t *handle;
+ int ret;
+
loff_t size = i_size_read(inode);
WARN_ON(!inode_is_locked(inode));
@@ -3896,10 +3900,10 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
if (IS_ERR(handle))
return PTR_ERR(handle);
ext4_update_i_disksize(inode, size);
- ext4_mark_inode_dirty(handle, inode);
+ ret = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
- return 0;
+ return ret;
}
static void ext4_wait_dax_page(struct ext4_inode_info *ei)
@@ -3951,7 +3955,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
loff_t first_block_offset, last_block_offset;
handle_t *handle;
unsigned int credits;
- int ret = 0;
+ int ret = 0, ret2 = 0;
trace_ext4_punch_hole(inode, offset, length, 0);
@@ -4074,7 +4078,9 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret2))
+ ret = ret2;
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
@@ -4143,7 +4149,7 @@ int ext4_truncate(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int credits;
- int err = 0;
+ int err = 0, err2;
handle_t *handle;
struct address_space *mapping = inode->i_mapping;
@@ -4231,7 +4237,9 @@ out_stop:
ext4_orphan_del(handle, inode);
inode->i_mtime = inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ err2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err2 && !err))
+ err = err2;
ext4_journal_stop(handle);
trace_ext4_truncate_exit(inode);
@@ -4857,21 +4865,22 @@ static int ext4_inode_blocks_set(handle_t *handle,
return 0;
}
-struct other_inode {
- unsigned long orig_ino;
- struct ext4_inode *raw_inode;
-};
-
-static int other_inode_match(struct inode * inode, unsigned long ino,
- void *data)
+static void __ext4_update_other_inode_time(struct super_block *sb,
+ unsigned long orig_ino,
+ unsigned long ino,
+ struct ext4_inode *raw_inode)
{
- struct other_inode *oi = (struct other_inode *) data;
+ struct inode *inode;
+
+ inode = find_inode_by_ino_rcu(sb, ino);
+ if (!inode)
+ return;
- if ((inode->i_ino != ino) ||
- (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
+ if ((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
I_DIRTY_INODE)) ||
((inode->i_state & I_DIRTY_TIME) == 0))
- return 0;
+ return;
+
spin_lock(&inode->i_lock);
if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
I_DIRTY_INODE)) == 0) &&
@@ -4882,16 +4891,15 @@ static int other_inode_match(struct inode * inode, unsigned long ino,
spin_unlock(&inode->i_lock);
spin_lock(&ei->i_raw_lock);
- EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode);
- EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode);
- EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode);
- ext4_inode_csum_set(inode, oi->raw_inode, ei);
+ EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
+ EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
+ EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+ ext4_inode_csum_set(inode, raw_inode, ei);
spin_unlock(&ei->i_raw_lock);
- trace_ext4_other_inode_update_time(inode, oi->orig_ino);
- return -1;
+ trace_ext4_other_inode_update_time(inode, orig_ino);
+ return;
}
spin_unlock(&inode->i_lock);
- return -1;
}
/*
@@ -4901,24 +4909,24 @@ static int other_inode_match(struct inode * inode, unsigned long ino,
static void ext4_update_other_inodes_time(struct super_block *sb,
unsigned long orig_ino, char *buf)
{
- struct other_inode oi;
unsigned long ino;
int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
int inode_size = EXT4_INODE_SIZE(sb);
- oi.orig_ino = orig_ino;
/*
* Calculate the first inode in the inode table block. Inode
* numbers are one-based. That is, the first inode in a block
* (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
*/
ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
+ rcu_read_lock();
for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
if (ino == orig_ino)
continue;
- oi.raw_inode = (struct ext4_inode *) buf;
- (void) find_inode_nowait(sb, ino, other_inode_match, &oi);
+ __ext4_update_other_inode_time(sb, orig_ino, ino,
+ (struct ext4_inode *)buf);
}
+ rcu_read_unlock();
}
/*
@@ -5289,6 +5297,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
inode->i_gid = attr->ia_gid;
error = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
+ if (unlikely(error))
+ return error;
}
if (attr->ia_valid & ATTR_SIZE) {
@@ -5774,7 +5784,8 @@ out_unlock:
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
*/
-int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
+ const char *func, unsigned int line)
{
struct ext4_iloc iloc;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -5784,13 +5795,18 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
trace_ext4_mark_inode_dirty(inode, _RET_IP_);
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
- return err;
+ goto out;
if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
iloc, handle);
- return ext4_mark_iloc_dirty(handle, inode, &iloc);
+ err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+out:
+ if (unlikely(err))
+ ext4_error_inode_err(inode, func, line, 0, err,
+ "mark_inode_dirty error");
+ return err;
}
/*
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0746532ba463..2162db0c747d 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -754,14 +754,6 @@ static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg)
fieinfo.fi_extents_max = fiemap.fm_extent_count;
fieinfo.fi_extents_start = ufiemap->fm_extents;
- if (fiemap.fm_extent_count != 0 &&
- !access_ok(fieinfo.fi_extents_start,
- fieinfo.fi_extents_max * sizeof(struct fiemap_extent)))
- return -EFAULT;
-
- if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
- filemap_write_and_wait(inode->i_mapping);
-
error = ext4_get_es_cache(inode, &fieinfo, fiemap.fm_start,
fiemap.fm_length);
fiemap.fm_flags = fieinfo.fi_flags;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 30d5d97548c4..a9083113a8c0 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -18,13 +18,6 @@
#include <linux/backing-dev.h>
#include <trace/events/ext4.h>
-#ifdef CONFIG_EXT4_DEBUG
-ushort ext4_mballoc_debug __read_mostly;
-
-module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644);
-MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
-#endif
-
/*
* MUSTDO:
* - test ext4_ext_search_left() and ext4_ext_search_right()
@@ -356,6 +349,36 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_group_t group);
static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
ext4_group_t group);
+static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
+
+/*
+ * The algorithm using this percpu seq counter goes below:
+ * 1. We sample the percpu discard_pa_seq counter before trying for block
+ * allocation in ext4_mb_new_blocks().
+ * 2. We increment this percpu discard_pa_seq counter when we either allocate
+ * or free these blocks i.e. while marking those blocks as used/free in
+ * mb_mark_used()/mb_free_blocks().
+ * 3. We also increment this percpu seq counter when we successfully identify
+ * that the bb_prealloc_list is not empty and hence proceed for discarding
+ * of those PAs inside ext4_mb_discard_group_preallocations().
+ *
+ * Now to make sure that the regular fast path of block allocation is not
+ * affected, as a small optimization we only sample the percpu seq counter
+ * on that cpu. Only when the block allocation fails and when freed blocks
+ * found were 0, that is when we sample percpu seq counter for all cpus using
+ * below function ext4_get_discard_pa_seq_sum(). This happens after making
+ * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
+ */
+static DEFINE_PER_CPU(u64, discard_pa_seq);
+static inline u64 ext4_get_discard_pa_seq_sum(void)
+{
+ int __cpu;
+ u64 __seq = 0;
+
+ for_each_possible_cpu(__cpu)
+ __seq += per_cpu(discard_pa_seq, __cpu);
+ return __seq;
+}
static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
{
@@ -493,6 +516,8 @@ static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
{
+ if (unlikely(e4b->bd_info->bb_bitmap == NULL))
+ return;
if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
unsigned char *b1, *b2;
int i;
@@ -511,6 +536,31 @@ static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
}
}
+static void mb_group_bb_bitmap_alloc(struct super_block *sb,
+ struct ext4_group_info *grp, ext4_group_t group)
+{
+ struct buffer_head *bh;
+
+ grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
+ if (!grp->bb_bitmap)
+ return;
+
+ bh = ext4_read_block_bitmap(sb, group);
+ if (IS_ERR_OR_NULL(bh)) {
+ kfree(grp->bb_bitmap);
+ grp->bb_bitmap = NULL;
+ return;
+ }
+
+ memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
+ put_bh(bh);
+}
+
+static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
+{
+ kfree(grp->bb_bitmap);
+}
+
#else
static inline void mb_free_blocks_double(struct inode *inode,
struct ext4_buddy *e4b, int first, int count)
@@ -526,6 +576,17 @@ static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
{
return;
}
+
+static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
+ struct ext4_group_info *grp, ext4_group_t group)
+{
+ return;
+}
+
+static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
+{
+ return;
+}
#endif
#ifdef AGGRESSIVE_CHECK
@@ -820,14 +881,14 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
char *bitmap;
struct ext4_group_info *grinfo;
- mb_debug(1, "init page %lu\n", page->index);
-
inode = page->mapping->host;
sb = inode->i_sb;
ngroups = ext4_get_groups_count(sb);
blocksize = i_blocksize(inode);
blocks_per_page = PAGE_SIZE / blocksize;
+ mb_debug(sb, "init page %lu\n", page->index);
+
groups_per_page = blocks_per_page >> 1;
if (groups_per_page == 0)
groups_per_page = 1;
@@ -867,7 +928,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
bh[i] = NULL;
goto out;
}
- mb_debug(1, "read bitmap for group %u\n", group);
+ mb_debug(sb, "read bitmap for group %u\n", group);
}
/* wait for I/O completion */
@@ -912,7 +973,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
if ((first_block + i) & 1) {
/* this is block of buddy */
BUG_ON(incore == NULL);
- mb_debug(1, "put buddy for group %u in page %lu/%x\n",
+ mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
group, page->index, i * blocksize);
trace_ext4_mb_buddy_bitmap_load(sb, group);
grinfo = ext4_get_group_info(sb, group);
@@ -932,7 +993,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
} else {
/* this is block of bitmap */
BUG_ON(incore != NULL);
- mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
+ mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
group, page->index, i * blocksize);
trace_ext4_mb_bitmap_load(sb, group);
@@ -1038,7 +1099,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
int ret = 0;
might_sleep();
- mb_debug(1, "init group %u\n", group);
+ mb_debug(sb, "init group %u\n", group);
this_grp = ext4_get_group_info(sb, group);
/*
* This ensures that we don't reinit the buddy cache
@@ -1110,7 +1171,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
struct inode *inode = sbi->s_buddy_cache;
might_sleep();
- mb_debug(1, "load group %u\n", group);
+ mb_debug(sb, "load group %u\n", group);
blocks_per_page = PAGE_SIZE / sb->s_blocksize;
grp = ext4_get_group_info(sb, group);
@@ -1430,6 +1491,7 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
mb_check_buddy(e4b);
mb_free_blocks_double(inode, e4b, first, count);
+ this_cpu_inc(discard_pa_seq);
e4b->bd_info->bb_free += count;
if (first < e4b->bd_info->bb_first_free)
e4b->bd_info->bb_first_free = first;
@@ -1571,6 +1633,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
mb_check_buddy(e4b);
mb_mark_used_double(e4b, start, len);
+ this_cpu_inc(discard_pa_seq);
e4b->bd_info->bb_free -= len;
if (e4b->bd_info->bb_first_free == start)
e4b->bd_info->bb_first_free += len;
@@ -1670,6 +1733,14 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
spin_unlock(&sbi->s_md_lock);
}
+ /*
+ * As we've just preallocated more space than
+ * user requested originally, we store allocated
+ * space in a special descriptor.
+ */
+ if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
+ ext4_mb_new_preallocation(ac);
+
}
/*
@@ -1918,7 +1989,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
ext4_mb_use_best_found(ac, e4b);
- BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
+ BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
if (EXT4_SB(sb)->s_mb_stats)
atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
@@ -2035,15 +2106,14 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
}
/*
- * This is now called BEFORE we load the buddy bitmap.
+ * This is also called BEFORE we load the buddy bitmap.
* Returns either 1 or 0 indicating that the group is either suitable
- * for the allocation or not. In addition it can also return negative
- * error code when something goes wrong.
+ * for the allocation or not.
*/
-static int ext4_mb_good_group(struct ext4_allocation_context *ac,
+static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
ext4_group_t group, int cr)
{
- unsigned free, fragments;
+ ext4_grpblk_t free, fragments;
int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
@@ -2051,23 +2121,16 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
free = grp->bb_free;
if (free == 0)
- return 0;
+ return false;
if (cr <= 2 && free < ac->ac_g_ex.fe_len)
- return 0;
+ return false;
if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
- return 0;
-
- /* We only do this if the grp has never been initialized */
- if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
- int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
- if (ret)
- return ret;
- }
+ return false;
fragments = grp->bb_fragments;
if (fragments == 0)
- return 0;
+ return false;
switch (cr) {
case 0:
@@ -2077,38 +2140,80 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
(flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
((group % flex_size) == 0))
- return 0;
+ return false;
if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
(free / fragments) >= ac->ac_g_ex.fe_len)
- return 1;
+ return true;
if (grp->bb_largest_free_order < ac->ac_2order)
- return 0;
+ return false;
- return 1;
+ return true;
case 1:
if ((free / fragments) >= ac->ac_g_ex.fe_len)
- return 1;
+ return true;
break;
case 2:
if (free >= ac->ac_g_ex.fe_len)
- return 1;
+ return true;
break;
case 3:
- return 1;
+ return true;
default:
BUG();
}
- return 0;
+ return false;
+}
+
+/*
+ * This could return negative error code if something goes wrong
+ * during ext4_mb_init_group(). This should not be called with
+ * ext4_lock_group() held.
+ */
+static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
+ ext4_group_t group, int cr)
+{
+ struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
+ struct super_block *sb = ac->ac_sb;
+ bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
+ ext4_grpblk_t free;
+ int ret = 0;
+
+ if (should_lock)
+ ext4_lock_group(sb, group);
+ free = grp->bb_free;
+ if (free == 0)
+ goto out;
+ if (cr <= 2 && free < ac->ac_g_ex.fe_len)
+ goto out;
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+ goto out;
+ if (should_lock)
+ ext4_unlock_group(sb, group);
+
+ /* We only do this if the grp has never been initialized */
+ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+ ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
+ if (ret)
+ return ret;
+ }
+
+ if (should_lock)
+ ext4_lock_group(sb, group);
+ ret = ext4_mb_good_group(ac, group, cr);
+out:
+ if (should_lock)
+ ext4_unlock_group(sb, group);
+ return ret;
}
static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
{
ext4_group_t ngroups, group, i;
- int cr;
+ int cr = -1;
int err = 0, first_err = 0;
struct ext4_sb_info *sbi;
struct super_block *sb;
@@ -2189,7 +2294,7 @@ repeat:
group = 0;
/* This now checks without needing the buddy page */
- ret = ext4_mb_good_group(ac, group, cr);
+ ret = ext4_mb_good_group_nolock(ac, group, cr);
if (ret <= 0) {
if (!first_err)
first_err = ret;
@@ -2207,11 +2312,9 @@ repeat:
* block group
*/
ret = ext4_mb_good_group(ac, group, cr);
- if (ret <= 0) {
+ if (ret == 0) {
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
- if (!first_err)
- first_err = ret;
continue;
}
@@ -2260,6 +2363,10 @@ repeat:
out:
if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
err = first_err;
+
+ mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
+ ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
+ ac->ac_flags, cr, err);
return err;
}
@@ -2452,20 +2559,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
meta_group_info[i]->bb_free_root = RB_ROOT;
meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
-#ifdef DOUBLE_CHECK
- {
- struct buffer_head *bh;
- meta_group_info[i]->bb_bitmap =
- kmalloc(sb->s_blocksize, GFP_NOFS);
- BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
- bh = ext4_read_block_bitmap(sb, group);
- BUG_ON(IS_ERR_OR_NULL(bh));
- memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
- sb->s_blocksize);
- put_bh(bh);
- }
-#endif
-
+ mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
return 0;
exit_group_info:
@@ -2702,7 +2796,7 @@ out:
}
/* need to called with the ext4 group lock held */
-static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
+static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
{
struct ext4_prealloc_space *pa;
struct list_head *cur, *tmp;
@@ -2714,9 +2808,7 @@ static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
count++;
kmem_cache_free(ext4_pspace_cachep, pa);
}
- if (count)
- mb_debug(1, "mballoc: %u PAs left\n", count);
-
+ return count;
}
int ext4_mb_release(struct super_block *sb)
@@ -2727,16 +2819,18 @@ int ext4_mb_release(struct super_block *sb)
struct ext4_group_info *grinfo, ***group_info;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+ int count;
if (sbi->s_group_info) {
for (i = 0; i < ngroups; i++) {
cond_resched();
grinfo = ext4_get_group_info(sb, i);
-#ifdef DOUBLE_CHECK
- kfree(grinfo->bb_bitmap);
-#endif
+ mb_group_bb_bitmap_free(grinfo);
ext4_lock_group(sb, i);
- ext4_mb_cleanup_pa(grinfo);
+ count = ext4_mb_cleanup_pa(grinfo);
+ if (count)
+ mb_debug(sb, "mballoc: %d PAs left\n",
+ count);
ext4_unlock_group(sb, i);
kmem_cache_free(cachep, grinfo);
}
@@ -2809,7 +2903,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
struct ext4_group_info *db;
int err, count = 0, count2 = 0;
- mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
+ mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
entry->efd_count, entry->efd_group, entry);
err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
@@ -2849,7 +2943,8 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
kmem_cache_free(ext4_free_data_cachep, entry);
ext4_mb_unload_buddy(&e4b);
- mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
+ mb_debug(sb, "freed %d blocks in %d structures\n", count,
+ count2);
}
/*
@@ -2909,23 +3004,26 @@ int __init ext4_init_mballoc(void)
ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
SLAB_RECLAIM_ACCOUNT);
if (ext4_pspace_cachep == NULL)
- return -ENOMEM;
+ goto out;
ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
SLAB_RECLAIM_ACCOUNT);
- if (ext4_ac_cachep == NULL) {
- kmem_cache_destroy(ext4_pspace_cachep);
- return -ENOMEM;
- }
+ if (ext4_ac_cachep == NULL)
+ goto out_pa_free;
ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
SLAB_RECLAIM_ACCOUNT);
- if (ext4_free_data_cachep == NULL) {
- kmem_cache_destroy(ext4_pspace_cachep);
- kmem_cache_destroy(ext4_ac_cachep);
- return -ENOMEM;
- }
+ if (ext4_free_data_cachep == NULL)
+ goto out_ac_free;
+
return 0;
+
+out_ac_free:
+ kmem_cache_destroy(ext4_ac_cachep);
+out_pa_free:
+ kmem_cache_destroy(ext4_pspace_cachep);
+out:
+ return -ENOMEM;
}
void ext4_exit_mballoc(void)
@@ -3077,8 +3175,7 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
BUG_ON(lg == NULL);
ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
- mb_debug(1, "#%u: goal %u blocks for locality group\n",
- current->pid, ac->ac_g_ex.fe_len);
+ mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
}
/*
@@ -3276,8 +3373,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
}
- mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
- (unsigned) orig_size, (unsigned) start);
+ mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
+ orig_size, start);
}
static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
@@ -3366,7 +3463,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
BUG_ON(pa->pa_free < len);
pa->pa_free -= len;
- mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
+ mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
}
/*
@@ -3390,7 +3487,8 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
* in on-disk bitmap -- see ext4_mb_release_context()
* Other CPUs are prevented from allocating from this pa by lg_mutex
*/
- mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
+ mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
+ pa->pa_lstart-len, len, pa);
}
/*
@@ -3425,7 +3523,7 @@ ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
/*
* search goal blocks in preallocated space
*/
-static noinline_for_stack int
+static noinline_for_stack bool
ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
@@ -3437,7 +3535,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
/* only data can be preallocated */
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
- return 0;
+ return false;
/* first, try per-file preallocation */
rcu_read_lock();
@@ -3464,7 +3562,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
spin_unlock(&pa->pa_lock);
ac->ac_criteria = 10;
rcu_read_unlock();
- return 1;
+ return true;
}
spin_unlock(&pa->pa_lock);
}
@@ -3472,12 +3570,12 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
/* can we use group allocation? */
if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
- return 0;
+ return false;
/* inode may have no locality group for some reason */
lg = ac->ac_lg;
if (lg == NULL)
- return 0;
+ return false;
order = fls(ac->ac_o_ex.fe_len) - 1;
if (order > PREALLOC_TB_SIZE - 1)
/* The max size of hash table is PREALLOC_TB_SIZE */
@@ -3506,9 +3604,9 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
if (cpa) {
ext4_mb_use_group_pa(ac, cpa);
ac->ac_criteria = 20;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
@@ -3573,7 +3671,7 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_set_bits(bitmap, start, len);
preallocated += len;
}
- mb_debug(1, "preallocated %u for group %u\n", preallocated, group);
+ mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
}
static void ext4_mb_pa_callback(struct rcu_head *head)
@@ -3649,7 +3747,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
/*
* creates new preallocated space for given inode
*/
-static noinline_for_stack int
+static noinline_for_stack void
ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
@@ -3662,10 +3760,9 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
BUG_ON(ac->ac_status != AC_STATUS_FOUND);
BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
+ BUG_ON(ac->ac_pa == NULL);
- pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
- if (pa == NULL)
- return -ENOMEM;
+ pa = ac->ac_pa;
if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
int winl;
@@ -3709,15 +3806,14 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
pa->pa_len = ac->ac_b_ex.fe_len;
pa->pa_free = pa->pa_len;
- atomic_set(&pa->pa_count, 1);
spin_lock_init(&pa->pa_lock);
INIT_LIST_HEAD(&pa->pa_inode_list);
INIT_LIST_HEAD(&pa->pa_group_list);
pa->pa_deleted = 0;
pa->pa_type = MB_INODE_PA;
- mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
- pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+ mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
+ pa->pa_len, pa->pa_lstart);
trace_ext4_mb_new_inode_pa(ac, pa);
ext4_mb_use_inode_pa(ac, pa);
@@ -3729,21 +3825,17 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
pa->pa_obj_lock = &ei->i_prealloc_lock;
pa->pa_inode = ac->ac_inode;
- ext4_lock_group(sb, ac->ac_b_ex.fe_group);
list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
- ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
spin_lock(pa->pa_obj_lock);
list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
spin_unlock(pa->pa_obj_lock);
-
- return 0;
}
/*
* creates new preallocated space for locality group inodes belongs to
*/
-static noinline_for_stack int
+static noinline_for_stack void
ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
@@ -3755,11 +3847,9 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
BUG_ON(ac->ac_status != AC_STATUS_FOUND);
BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
+ BUG_ON(ac->ac_pa == NULL);
- BUG_ON(ext4_pspace_cachep == NULL);
- pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
- if (pa == NULL)
- return -ENOMEM;
+ pa = ac->ac_pa;
/* preallocation can change ac_b_ex, thus we store actually
* allocated blocks for history */
@@ -3769,15 +3859,14 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
pa->pa_lstart = pa->pa_pstart;
pa->pa_len = ac->ac_b_ex.fe_len;
pa->pa_free = pa->pa_len;
- atomic_set(&pa->pa_count, 1);
spin_lock_init(&pa->pa_lock);
INIT_LIST_HEAD(&pa->pa_inode_list);
INIT_LIST_HEAD(&pa->pa_group_list);
pa->pa_deleted = 0;
pa->pa_type = MB_GROUP_PA;
- mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
- pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+ mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
+ pa->pa_len, pa->pa_lstart);
trace_ext4_mb_new_group_pa(ac, pa);
ext4_mb_use_group_pa(ac, pa);
@@ -3790,26 +3879,20 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
pa->pa_obj_lock = &lg->lg_prealloc_lock;
pa->pa_inode = NULL;
- ext4_lock_group(sb, ac->ac_b_ex.fe_group);
list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
- ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
/*
* We will later add the new pa to the right bucket
* after updating the pa_free in ext4_mb_release_context
*/
- return 0;
}
-static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
+static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
{
- int err;
-
if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
- err = ext4_mb_new_group_pa(ac);
+ ext4_mb_new_group_pa(ac);
else
- err = ext4_mb_new_inode_pa(ac);
- return err;
+ ext4_mb_new_inode_pa(ac);
}
/*
@@ -3844,7 +3927,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
if (bit >= end)
break;
next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
- mb_debug(1, " free preallocated %u/%u in group %u\n",
+ mb_debug(sb, "free preallocated %u/%u in group %u\n",
(unsigned) ext4_group_first_block_no(sb, group) + bit,
(unsigned) next - bit, (unsigned) group);
free += next - bit;
@@ -3858,10 +3941,10 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
}
if (free != pa->pa_free) {
ext4_msg(e4b->bd_sb, KERN_CRIT,
- "pa %p: logic %lu, phys. %lu, len %lu",
+ "pa %p: logic %lu, phys. %lu, len %d",
pa, (unsigned long) pa->pa_lstart,
(unsigned long) pa->pa_pstart,
- (unsigned long) pa->pa_len);
+ pa->pa_len);
ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
free, pa->pa_free);
/*
@@ -3915,10 +3998,9 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
int busy = 0;
int free = 0;
- mb_debug(1, "discard preallocation for group %u\n", group);
-
+ mb_debug(sb, "discard preallocation for group %u\n", group);
if (list_empty(&grp->bb_prealloc_list))
- return 0;
+ goto out_dbg;
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (IS_ERR(bitmap_bh)) {
@@ -3926,7 +4008,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
ext4_error_err(sb, -err,
"Error %d reading block bitmap for %u",
err, group);
- return 0;
+ goto out_dbg;
}
err = ext4_mb_load_buddy(sb, group, &e4b);
@@ -3934,7 +4016,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
ext4_warning(sb, "Error %d loading buddy information for %u",
err, group);
put_bh(bitmap_bh);
- return 0;
+ goto out_dbg;
}
if (needed == 0)
@@ -3943,6 +4025,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
INIT_LIST_HEAD(&list);
repeat:
ext4_lock_group(sb, group);
+ this_cpu_inc(discard_pa_seq);
list_for_each_entry_safe(pa, tmp,
&grp->bb_prealloc_list, pa_group_list) {
spin_lock(&pa->pa_lock);
@@ -3979,6 +4062,8 @@ repeat:
/* found anything to free? */
if (list_empty(&list)) {
BUG_ON(free != 0);
+ mb_debug(sb, "Someone else may have freed PA for this group %u\n",
+ group);
goto out;
}
@@ -4003,6 +4088,9 @@ out:
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
+out_dbg:
+ mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
+ free, group, grp->bb_free);
return free;
}
@@ -4031,7 +4119,8 @@ void ext4_discard_preallocations(struct inode *inode)
return;
}
- mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
+ mb_debug(sb, "discard preallocation for inode %lu\n",
+ inode->i_ino);
trace_ext4_discard_preallocations(inode);
INIT_LIST_HEAD(&list);
@@ -4119,22 +4208,74 @@ repeat:
}
}
+static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
+{
+ struct ext4_prealloc_space *pa;
+
+ BUG_ON(ext4_pspace_cachep == NULL);
+ pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
+ if (!pa)
+ return -ENOMEM;
+ atomic_set(&pa->pa_count, 1);
+ ac->ac_pa = pa;
+ return 0;
+}
+
+static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
+{
+ struct ext4_prealloc_space *pa = ac->ac_pa;
+
+ BUG_ON(!pa);
+ ac->ac_pa = NULL;
+ WARN_ON(!atomic_dec_and_test(&pa->pa_count));
+ kmem_cache_free(ext4_pspace_cachep, pa);
+}
+
#ifdef CONFIG_EXT4_DEBUG
+static inline void ext4_mb_show_pa(struct super_block *sb)
+{
+ ext4_group_t i, ngroups;
+
+ if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+ return;
+
+ ngroups = ext4_get_groups_count(sb);
+ mb_debug(sb, "groups: ");
+ for (i = 0; i < ngroups; i++) {
+ struct ext4_group_info *grp = ext4_get_group_info(sb, i);
+ struct ext4_prealloc_space *pa;
+ ext4_grpblk_t start;
+ struct list_head *cur;
+ ext4_lock_group(sb, i);
+ list_for_each(cur, &grp->bb_prealloc_list) {
+ pa = list_entry(cur, struct ext4_prealloc_space,
+ pa_group_list);
+ spin_lock(&pa->pa_lock);
+ ext4_get_group_no_and_offset(sb, pa->pa_pstart,
+ NULL, &start);
+ spin_unlock(&pa->pa_lock);
+ mb_debug(sb, "PA:%u:%d:%d\n", i, start,
+ pa->pa_len);
+ }
+ ext4_unlock_group(sb, i);
+ mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
+ grp->bb_fragments);
+ }
+}
+
static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
- ext4_group_t ngroups, i;
- if (!ext4_mballoc_debug ||
- (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
+ if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
return;
- ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
+ mb_debug(sb, "Can't allocate:"
" Allocation context details:");
- ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
+ mb_debug(sb, "status %u flags 0x%x",
ac->ac_status, ac->ac_flags);
- ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
- "goal %lu/%lu/%lu@%lu, "
+ mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
+ "goal %lu/%lu/%lu@%lu, "
"best %lu/%lu/%lu@%lu cr %d",
(unsigned long)ac->ac_o_ex.fe_group,
(unsigned long)ac->ac_o_ex.fe_start,
@@ -4149,37 +4290,17 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
(unsigned long)ac->ac_b_ex.fe_len,
(unsigned long)ac->ac_b_ex.fe_logical,
(int)ac->ac_criteria);
- ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found);
- ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
- ngroups = ext4_get_groups_count(sb);
- for (i = 0; i < ngroups; i++) {
- struct ext4_group_info *grp = ext4_get_group_info(sb, i);
- struct ext4_prealloc_space *pa;
- ext4_grpblk_t start;
- struct list_head *cur;
- ext4_lock_group(sb, i);
- list_for_each(cur, &grp->bb_prealloc_list) {
- pa = list_entry(cur, struct ext4_prealloc_space,
- pa_group_list);
- spin_lock(&pa->pa_lock);
- ext4_get_group_no_and_offset(sb, pa->pa_pstart,
- NULL, &start);
- spin_unlock(&pa->pa_lock);
- printk(KERN_ERR "PA:%u:%d:%u \n", i,
- start, pa->pa_len);
- }
- ext4_unlock_group(sb, i);
-
- if (grp->bb_free == 0)
- continue;
- printk(KERN_ERR "%u: %d/%d \n",
- i, grp->bb_free, grp->bb_fragments);
- }
- printk(KERN_ERR "\n");
+ mb_debug(sb, "%u found", ac->ac_found);
+ ext4_mb_show_pa(sb);
}
#else
+static inline void ext4_mb_show_pa(struct super_block *sb)
+{
+ return;
+}
static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
+ ext4_mb_show_pa(ac->ac_sb);
return;
}
#endif
@@ -4282,7 +4403,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
* locality group. this is a policy, actually */
ext4_mb_group_or_file(ac);
- mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
+ mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
"left: %u/%u, right %u/%u to %swritable\n",
(unsigned) ar->len, (unsigned) ar->logical,
(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
@@ -4303,7 +4424,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
struct list_head discard_list;
struct ext4_prealloc_space *pa, *tmp;
- mb_debug(1, "discard locality group preallocation\n");
+ mb_debug(sb, "discard locality group preallocation\n");
INIT_LIST_HEAD(&discard_list);
@@ -4486,6 +4607,30 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
return freed;
}
+static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
+ struct ext4_allocation_context *ac, u64 *seq)
+{
+ int freed;
+ u64 seq_retry = 0;
+ bool ret = false;
+
+ freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
+ if (freed) {
+ ret = true;
+ goto out_dbg;
+ }
+ seq_retry = ext4_get_discard_pa_seq_sum();
+ if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
+ ac->ac_flags |= EXT4_MB_STRICT_CHECK;
+ *seq = seq_retry;
+ ret = true;
+ }
+
+out_dbg:
+ mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
+ return ret;
+}
+
/*
* Main entry point into mballoc to allocate blocks
* it tries to use preallocation first, then falls back
@@ -4494,13 +4639,13 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
struct ext4_allocation_request *ar, int *errp)
{
- int freed;
struct ext4_allocation_context *ac = NULL;
struct ext4_sb_info *sbi;
struct super_block *sb;
ext4_fsblk_t block = 0;
unsigned int inquota = 0;
unsigned int reserv_clstrs = 0;
+ u64 seq;
might_sleep();
sb = ar->inode->i_sb;
@@ -4525,6 +4670,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
ar->len = ar->len >> 1;
}
if (!ar->len) {
+ ext4_mb_show_pa(sb);
*errp = -ENOSPC;
return 0;
}
@@ -4562,26 +4708,32 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
}
ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
+ seq = *this_cpu_ptr(&discard_pa_seq);
if (!ext4_mb_use_preallocated(ac)) {
ac->ac_op = EXT4_MB_HISTORY_ALLOC;
ext4_mb_normalize_request(ac, ar);
+
+ *errp = ext4_mb_pa_alloc(ac);
+ if (*errp)
+ goto errout;
repeat:
/* allocate space in core */
*errp = ext4_mb_regular_allocator(ac);
- if (*errp)
- goto discard_and_exit;
-
- /* as we've just preallocated more space than
- * user requested originally, we store allocated
- * space in a special descriptor */
- if (ac->ac_status == AC_STATUS_FOUND &&
- ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
- *errp = ext4_mb_new_preallocation(ac);
+ /*
+ * pa allocated above is added to grp->bb_prealloc_list only
+ * when we were able to allocate some block i.e. when
+ * ac->ac_status == AC_STATUS_FOUND.
+ * And error from above mean ac->ac_status != AC_STATUS_FOUND
+ * So we have to free this pa here itself.
+ */
if (*errp) {
- discard_and_exit:
+ ext4_mb_pa_free(ac);
ext4_discard_allocated_blocks(ac);
goto errout;
}
+ if (ac->ac_status == AC_STATUS_FOUND &&
+ ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
+ ext4_mb_pa_free(ac);
}
if (likely(ac->ac_status == AC_STATUS_FOUND)) {
*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
@@ -4593,9 +4745,13 @@ repeat:
ar->len = ac->ac_b_ex.fe_len;
}
} else {
- freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
- if (freed)
+ if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
goto repeat;
+ /*
+ * If block allocation fails then the pa allocated above
+ * needs to be freed here itself.
+ */
+ ext4_mb_pa_free(ac);
*errp = -ENOSPC;
}
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 88c98f17e3d9..6b4d17c2935d 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -24,19 +24,15 @@
#include "ext4.h"
/*
+ * mb_debug() dynamic printk msgs could be used to debug mballoc code.
*/
#ifdef CONFIG_EXT4_DEBUG
-extern ushort ext4_mballoc_debug;
-
-#define mb_debug(n, fmt, ...) \
-do { \
- if ((n) <= ext4_mballoc_debug) { \
- printk(KERN_DEBUG "(%s, %d): %s: " fmt, \
- __FILE__, __LINE__, __func__, ##__VA_ARGS__); \
- } \
-} while (0)
+#define mb_debug(sb, fmt, ...) \
+ pr_debug("[%s/%d] EXT4-fs (%s): (%s, %d): %s: " fmt, \
+ current->comm, task_pid_nr(current), sb->s_id, \
+ __FILE__, __LINE__, __func__, ##__VA_ARGS__)
#else
-#define mb_debug(n, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
+#define mb_debug(sb, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index fb6520f37135..c5e3fc998211 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -287,7 +287,7 @@ static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
struct inode *tmp_inode)
{
- int retval;
+ int retval, retval2 = 0;
__le32 i_data[3];
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
@@ -342,7 +342,9 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
* i_blocks when freeing the indirect meta-data blocks
*/
retval = free_ind_block(handle, inode, i_data);
- ext4_mark_inode_dirty(handle, inode);
+ retval2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(retval2 && !retval))
+ retval = retval2;
err_out:
return retval;
@@ -601,7 +603,7 @@ int ext4_ind_migrate(struct inode *inode)
ext4_lblk_t start, end;
ext4_fsblk_t blk;
handle_t *handle;
- int ret;
+ int ret, ret2 = 0;
if (!ext4_has_feature_extents(inode->i_sb) ||
(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
@@ -655,7 +657,9 @@ int ext4_ind_migrate(struct inode *inode)
memset(ei->i_data, 0, sizeof(ei->i_data));
for (i = start; i <= end; i++)
ei->i_data[i] = cpu_to_le32(blk++);
- ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret2 && !ret))
+ ret = ret2;
errout:
ext4_journal_stop(handle);
up_write(&EXT4_I(inode)->i_data_sem);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index a8aca4772aaa..56738b538ddf 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1993,7 +1993,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
{
unsigned int blocksize = dir->i_sb->s_blocksize;
int csum_size = 0;
- int err;
+ int err, err2;
if (ext4_has_metadata_csum(inode->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
@@ -2028,12 +2028,12 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
dir->i_mtime = dir->i_ctime = current_time(dir);
ext4_update_dx_flag(dir);
inode_inc_iversion(dir);
- ext4_mark_inode_dirty(handle, dir);
+ err2 = ext4_mark_inode_dirty(handle, dir);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_dirblock(handle, dir, bh);
if (err)
ext4_std_error(dir->i_sb, err);
- return 0;
+ return err ? err : err2;
}
/*
@@ -2223,7 +2223,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
}
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
- ext4_mark_inode_dirty(handle, dir);
+ retval = ext4_mark_inode_dirty(handle, dir);
+ if (unlikely(retval))
+ goto out;
}
blocks = dir->i_size >> sb->s_blocksize_bits;
for (block = 0; block < blocks; block++) {
@@ -2576,12 +2578,12 @@ static int ext4_add_nondir(handle_t *handle,
struct inode *inode = *inodep;
int err = ext4_add_entry(handle, dentry, inode);
if (!err) {
- ext4_mark_inode_dirty(handle, inode);
+ err = ext4_mark_inode_dirty(handle, inode);
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
d_instantiate_new(dentry, inode);
*inodep = NULL;
- return 0;
+ return err;
}
drop_nlink(inode);
ext4_orphan_add(handle, inode);
@@ -2775,7 +2777,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
handle_t *handle;
struct inode *inode;
- int err, credits, retries = 0;
+ int err, err2 = 0, credits, retries = 0;
if (EXT4_DIR_LINK_MAX(dir))
return -EMLINK;
@@ -2808,7 +2810,9 @@ out_clear_inode:
clear_nlink(inode);
ext4_orphan_add(handle, inode);
unlock_new_inode(inode);
- ext4_mark_inode_dirty(handle, inode);
+ err2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err2))
+ err = err2;
ext4_journal_stop(handle);
iput(inode);
goto out_retry;
@@ -3148,10 +3152,12 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
inode->i_size = 0;
ext4_orphan_add(handle, inode);
inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ retval = ext4_mark_inode_dirty(handle, inode);
+ if (retval)
+ goto end_rmdir;
ext4_dec_count(handle, dir);
ext4_update_dx_flag(dir);
- ext4_mark_inode_dirty(handle, dir);
+ retval = ext4_mark_inode_dirty(handle, dir);
#ifdef CONFIG_UNICODE
/* VFS negative dentries are incompatible with Encoding and
@@ -3221,7 +3227,9 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
goto end_unlink;
dir->i_ctime = dir->i_mtime = current_time(dir);
ext4_update_dx_flag(dir);
- ext4_mark_inode_dirty(handle, dir);
+ retval = ext4_mark_inode_dirty(handle, dir);
+ if (retval)
+ goto end_unlink;
if (inode->i_nlink == 0)
ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
dentry->d_name.len, dentry->d_name.name);
@@ -3230,7 +3238,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ retval = ext4_mark_inode_dirty(handle, inode);
#ifdef CONFIG_UNICODE
/* VFS negative dentries are incompatible with Encoding and
@@ -3419,7 +3427,7 @@ retry:
err = ext4_add_entry(handle, dentry, inode);
if (!err) {
- ext4_mark_inode_dirty(handle, inode);
+ err = ext4_mark_inode_dirty(handle, inode);
/* this can happen only for tmpfile being
* linked the first time
*/
@@ -3531,7 +3539,7 @@ static int ext4_rename_dir_finish(handle_t *handle, struct ext4_renament *ent,
static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
unsigned ino, unsigned file_type)
{
- int retval;
+ int retval, retval2;
BUFFER_TRACE(ent->bh, "get write access");
retval = ext4_journal_get_write_access(handle, ent->bh);
@@ -3543,19 +3551,19 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
inode_inc_iversion(ent->dir);
ent->dir->i_ctime = ent->dir->i_mtime =
current_time(ent->dir);
- ext4_mark_inode_dirty(handle, ent->dir);
+ retval = ext4_mark_inode_dirty(handle, ent->dir);
BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
if (!ent->inlined) {
- retval = ext4_handle_dirty_dirblock(handle, ent->dir, ent->bh);
- if (unlikely(retval)) {
- ext4_std_error(ent->dir->i_sb, retval);
- return retval;
+ retval2 = ext4_handle_dirty_dirblock(handle, ent->dir, ent->bh);
+ if (unlikely(retval2)) {
+ ext4_std_error(ent->dir->i_sb, retval2);
+ return retval2;
}
}
brelse(ent->bh);
ent->bh = NULL;
- return 0;
+ return retval;
}
static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
@@ -3790,7 +3798,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
EXT4_FT_CHRDEV);
if (retval)
goto end_rename;
- ext4_mark_inode_dirty(handle, whiteout);
+ retval = ext4_mark_inode_dirty(handle, whiteout);
+ if (unlikely(retval))
+ goto end_rename;
}
if (!new.bh) {
retval = ext4_add_entry(handle, new.dentry, old.inode);
@@ -3811,7 +3821,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
* rename.
*/
old.inode->i_ctime = current_time(old.inode);
- ext4_mark_inode_dirty(handle, old.inode);
+ retval = ext4_mark_inode_dirty(handle, old.inode);
+ if (unlikely(retval))
+ goto end_rename;
if (!whiteout) {
/*
@@ -3840,12 +3852,18 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
} else {
ext4_inc_count(handle, new.dir);
ext4_update_dx_flag(new.dir);
- ext4_mark_inode_dirty(handle, new.dir);
+ retval = ext4_mark_inode_dirty(handle, new.dir);
+ if (unlikely(retval))
+ goto end_rename;
}
}
- ext4_mark_inode_dirty(handle, old.dir);
+ retval = ext4_mark_inode_dirty(handle, old.dir);
+ if (unlikely(retval))
+ goto end_rename;
if (new.inode) {
- ext4_mark_inode_dirty(handle, new.inode);
+ retval = ext4_mark_inode_dirty(handle, new.inode);
+ if (unlikely(retval))
+ goto end_rename;
if (!new.inode->i_nlink)
ext4_orphan_add(handle, new.inode);
}
@@ -3979,8 +3997,12 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
ctime = current_time(old.inode);
old.inode->i_ctime = ctime;
new.inode->i_ctime = ctime;
- ext4_mark_inode_dirty(handle, old.inode);
- ext4_mark_inode_dirty(handle, new.inode);
+ retval = ext4_mark_inode_dirty(handle, old.inode);
+ if (unlikely(retval))
+ goto end_rename;
+ retval = ext4_mark_inode_dirty(handle, new.inode);
+ if (unlikely(retval))
+ goto end_rename;
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9824cd8203e8..c668f6b42374 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -93,11 +93,11 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
* i_mmap_rwsem (inode->i_mmap_rwsem)!
*
* page fault path:
- * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
+ * mmap_lock -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
* page lock -> i_data_sem (rw)
*
* buffered write path:
- * sb_start_write -> i_mutex -> mmap_sem
+ * sb_start_write -> i_mutex -> mmap_lock
* sb_start_write -> i_mutex -> transaction start -> page lock ->
* i_data_sem (rw)
*
@@ -107,7 +107,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
* i_data_sem (rw)
*
* direct IO:
- * sb_start_write -> i_mutex -> mmap_sem
+ * sb_start_write -> i_mutex -> mmap_lock
* sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
*
* writepages:
@@ -3718,7 +3718,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
int blocksize, clustersize;
unsigned int db_count;
unsigned int i;
- int needs_recovery, has_huge_files, has_bigalloc;
+ int needs_recovery, has_huge_files;
__u64 blocks_count;
int err = 0;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
@@ -4010,17 +4010,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, and O_DIRECT support!\n");
+ /* can't mount with both data=journal and dioread_nolock. */
clear_opt(sb, DIOREAD_NOLOCK);
if (test_opt2(sb, EXPLICIT_DELALLOC)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"both data=journal and delalloc");
goto failed_mount;
}
- if (test_opt(sb, DIOREAD_NOLOCK)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "both data=journal and dioread_nolock");
- goto failed_mount;
- }
if (test_opt(sb, DAX)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"both data=journal and dax");
@@ -4237,8 +4233,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
/* Handle clustersize */
clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
- has_bigalloc = ext4_has_feature_bigalloc(sb);
- if (has_bigalloc) {
+ if (ext4_has_feature_bigalloc(sb)) {
if (clustersize < blocksize) {
ext4_msg(sb, KERN_ERR,
"cluster size (%d) smaller than "
@@ -5925,7 +5920,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
S_NOATIME | S_IMMUTABLE);
- ext4_mark_inode_dirty(handle, inode);
+ err = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
unlock_inode:
inode_unlock(inode);
@@ -6027,12 +6022,14 @@ static int ext4_quota_off(struct super_block *sb, int type)
* this is not a hard failure and quotas are already disabled.
*/
handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
goto out_unlock;
+ }
EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
inode->i_mtime = inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ err = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
out_unlock:
inode_unlock(inode);
@@ -6090,7 +6087,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
{
struct inode *inode = sb_dqopt(sb)->files[type];
ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
- int err, offset = off & (sb->s_blocksize - 1);
+ int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1);
int retries = 0;
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
@@ -6138,9 +6135,11 @@ out:
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT4_I(inode)->i_disksize = inode->i_size;
- ext4_mark_inode_dirty(handle, inode);
+ err2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err2 && !err))
+ err = err2;
}
- return len;
+ return err ? err : len;
}
#endif
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 21df43a25328..9b29a40738ac 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1327,7 +1327,7 @@ static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
int blocksize = ea_inode->i_sb->s_blocksize;
int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
int csize, wsize = 0;
- int ret = 0;
+ int ret = 0, ret2 = 0;
int retries = 0;
retry:
@@ -1385,7 +1385,9 @@ retry:
ext4_update_i_disksize(ea_inode, wsize);
inode_unlock(ea_inode);
- ext4_mark_inode_dirty(handle, ea_inode);
+ ret2 = ext4_mark_inode_dirty(handle, ea_inode);
+ if (unlikely(ret2 && !ret))
+ ret = ret2;
out:
brelse(bh);
@@ -1800,8 +1802,11 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
if (EXT4_I(inode)->i_file_acl) {
/* The inode already has an extended attribute block. */
bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
- if (IS_ERR(bs->bh))
- return PTR_ERR(bs->bh);
+ if (IS_ERR(bs->bh)) {
+ error = PTR_ERR(bs->bh);
+ bs->bh = NULL;
+ return error;
+ }
ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
atomic_read(&(bs->bh->b_count)),
le32_to_cpu(BHDR(bs->bh)->h_refcount));
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index bb68d21e1f8c..d13c5c6a9787 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -127,3 +127,13 @@ config F2FS_FS_ZSTD
default y
help
Support ZSTD compress algorithm, if unsure, say Y.
+
+config F2FS_FS_LZORLE
+ bool "LZO-RLE compression support"
+ depends on F2FS_FS_COMPRESSION
+ depends on F2FS_FS_LZO
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+ default y
+ help
+ Support LZO-RLE compress algorithm, if unsure, say Y.
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index b96823c59b15..124868c13f80 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/acl.h
*
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 852890b72d6a..236064930251 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -86,6 +86,8 @@ repeat:
return ERR_PTR(err);
}
+ f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
+
lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
@@ -220,6 +222,7 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
.is_por = (type == META_POR),
};
struct blk_plug plug;
+ int err;
if (unlikely(type == META_POR))
fio.op_flags &= ~REQ_META;
@@ -263,8 +266,11 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
}
fio.page = page;
- f2fs_submit_page_bio(&fio);
- f2fs_put_page(page, 0);
+ err = f2fs_submit_page_bio(&fio);
+ f2fs_put_page(page, err ? 1 : 0);
+
+ if (!err)
+ f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
}
out:
blk_finish_plug(&plug);
@@ -889,8 +895,8 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
int i;
int err;
- sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
- GFP_KERNEL);
+ sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks),
+ GFP_KERNEL);
if (!sbi->ckpt)
return -ENOMEM;
/*
@@ -1160,10 +1166,12 @@ static int block_operations(struct f2fs_sb_info *sbi)
.nr_to_write = LONG_MAX,
.for_reclaim = 0,
};
- struct blk_plug plug;
int err = 0, cnt = 0;
- blk_start_plug(&plug);
+ /*
+ * Let's flush inline_data in dirty node pages.
+ */
+ f2fs_flush_inline_data(sbi);
retry_flush_quotas:
f2fs_lock_all(sbi);
@@ -1192,7 +1200,7 @@ retry_flush_dents:
f2fs_unlock_all(sbi);
err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
if (err)
- goto out;
+ return err;
cond_resched();
goto retry_flush_quotas;
}
@@ -1208,7 +1216,7 @@ retry_flush_dents:
f2fs_unlock_all(sbi);
err = f2fs_sync_inode_meta(sbi);
if (err)
- goto out;
+ return err;
cond_resched();
goto retry_flush_quotas;
}
@@ -1224,7 +1232,7 @@ retry_flush_nodes:
if (err) {
up_write(&sbi->node_change);
f2fs_unlock_all(sbi);
- goto out;
+ return err;
}
cond_resched();
goto retry_flush_nodes;
@@ -1236,8 +1244,6 @@ retry_flush_nodes:
*/
__prepare_cp_block(sbi);
up_write(&sbi->node_change);
-out:
- blk_finish_plug(&plug);
return err;
}
@@ -1260,6 +1266,9 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
if (unlikely(f2fs_cp_error(sbi)))
break;
+ if (type == F2FS_DIRTY_META)
+ f2fs_sync_meta_pages(sbi, META, LONG_MAX,
+ FS_CP_META_IO);
io_schedule_timeout(DEFAULT_IO_TIMEOUT);
}
finish_wait(&sbi->cp_wait, &wait);
@@ -1553,7 +1562,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
return 0;
f2fs_warn(sbi, "Start checkpoint disabled!");
}
- mutex_lock(&sbi->cp_mutex);
+ if (cpc->reason != CP_RESIZE)
+ mutex_lock(&sbi->cp_mutex);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
@@ -1622,7 +1632,8 @@ stop:
f2fs_update_time(sbi, CP_TIME);
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
out:
- mutex_unlock(&sbi->cp_mutex);
+ if (cpc->reason != CP_RESIZE)
+ mutex_unlock(&sbi->cp_mutex);
return err;
}
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index df7b2d15eacd..1e02a8c106b0 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -65,15 +65,6 @@ static void f2fs_set_compressed_page(struct page *page,
page->mapping = inode->i_mapping;
}
-static void f2fs_put_compressed_page(struct page *page)
-{
- set_page_private(page, (unsigned long)NULL);
- ClearPagePrivate(page);
- page->mapping = NULL;
- unlock_page(page);
- put_page(page);
-}
-
static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
{
int i;
@@ -98,8 +89,7 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
f2fs_drop_rpages(cc, len, true);
}
-static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
- struct address_space *mapping,
+static void f2fs_put_rpages_mapping(struct address_space *mapping,
pgoff_t start, int len)
{
int i;
@@ -236,7 +226,12 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc)
if (!cc->private)
return -ENOMEM;
- cc->clen = LZ4_compressBound(PAGE_SIZE << cc->log_cluster_size);
+ /*
+ * we do not change cc->clen to LZ4_compressBound(inputsize) to
+ * adapt worst compress case, because lz4 compressor can handle
+ * output budget properly.
+ */
+ cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
return 0;
}
@@ -252,11 +247,9 @@ static int lz4_compress_pages(struct compress_ctx *cc)
len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
cc->clen, cc->private);
- if (!len) {
- printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n",
- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id);
- return -EIO;
- }
+ if (!len)
+ return -EAGAIN;
+
cc->clen = len;
return 0;
}
@@ -366,6 +359,13 @@ static int zstd_compress_pages(struct compress_ctx *cc)
return -EIO;
}
+ /*
+ * there is compressed data remained in intermediate buffer due to
+ * no more space in cbuf.cdata
+ */
+ if (ret)
+ return -EAGAIN;
+
cc->clen = outbuf.pos;
return 0;
}
@@ -451,6 +451,31 @@ static const struct f2fs_compress_ops f2fs_zstd_ops = {
};
#endif
+#ifdef CONFIG_F2FS_FS_LZO
+#ifdef CONFIG_F2FS_FS_LZORLE
+static int lzorle_compress_pages(struct compress_ctx *cc)
+{
+ int ret;
+
+ ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
+ &cc->clen, cc->private);
+ if (ret != LZO_E_OK) {
+ printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
+ KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
+ return -EIO;
+ }
+ return 0;
+}
+
+static const struct f2fs_compress_ops f2fs_lzorle_ops = {
+ .init_compress_ctx = lzo_init_compress_ctx,
+ .destroy_compress_ctx = lzo_destroy_compress_ctx,
+ .compress_pages = lzorle_compress_pages,
+ .decompress_pages = lzo_decompress_pages,
+};
+#endif
+#endif
+
static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
#ifdef CONFIG_F2FS_FS_LZO
&f2fs_lzo_ops,
@@ -467,6 +492,11 @@ static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
#else
NULL,
#endif
+#if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
+ &f2fs_lzorle_ops,
+#else
+ NULL,
+#endif
};
bool f2fs_is_compress_backend_ready(struct inode *inode)
@@ -476,17 +506,47 @@ bool f2fs_is_compress_backend_ready(struct inode *inode)
return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
}
-static struct page *f2fs_grab_page(void)
+static mempool_t *compress_page_pool = NULL;
+static int num_compress_pages = 512;
+module_param(num_compress_pages, uint, 0444);
+MODULE_PARM_DESC(num_compress_pages,
+ "Number of intermediate compress pages to preallocate");
+
+int f2fs_init_compress_mempool(void)
+{
+ compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
+ if (!compress_page_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void f2fs_destroy_compress_mempool(void)
+{
+ mempool_destroy(compress_page_pool);
+}
+
+static struct page *f2fs_compress_alloc_page(void)
{
struct page *page;
- page = alloc_page(GFP_NOFS);
- if (!page)
- return NULL;
+ page = mempool_alloc(compress_page_pool, GFP_NOFS);
lock_page(page);
+
return page;
}
+static void f2fs_compress_free_page(struct page *page)
+{
+ if (!page)
+ return;
+ set_page_private(page, (unsigned long)NULL);
+ ClearPagePrivate(page);
+ page->mapping = NULL;
+ unlock_page(page);
+ mempool_free(page, compress_page_pool);
+}
+
static int f2fs_compress_pages(struct compress_ctx *cc)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
@@ -516,7 +576,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
}
for (i = 0; i < cc->nr_cpages; i++) {
- cc->cpages[i] = f2fs_grab_page();
+ cc->cpages[i] = f2fs_compress_alloc_page();
if (!cc->cpages[i]) {
ret = -ENOMEM;
goto out_free_cpages;
@@ -561,7 +621,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
vunmap(cc->rbuf);
for (i = nr_cpages; i < cc->nr_cpages; i++) {
- f2fs_put_compressed_page(cc->cpages[i]);
+ f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
@@ -581,7 +641,7 @@ out_vunmap_rbuf:
out_free_cpages:
for (i = 0; i < cc->nr_cpages; i++) {
if (cc->cpages[i])
- f2fs_put_compressed_page(cc->cpages[i]);
+ f2fs_compress_free_page(cc->cpages[i]);
}
kfree(cc->cpages);
cc->cpages = NULL;
@@ -788,6 +848,8 @@ static bool cluster_may_compress(struct compress_ctx *cc)
return false;
if (!f2fs_cluster_is_full(cc))
return false;
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
+ return false;
return __cluster_may_compress(cc);
}
@@ -879,7 +941,7 @@ retry:
if (!PageUptodate(page)) {
f2fs_unlock_rpages(cc, i + 1);
- f2fs_put_rpages_mapping(cc, mapping, start_idx,
+ f2fs_put_rpages_mapping(mapping, start_idx,
cc->cluster_size);
f2fs_destroy_compress_ctx(cc);
goto retry;
@@ -914,7 +976,7 @@ retry:
unlock_pages:
f2fs_unlock_rpages(cc, i);
release_pages:
- f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
+ f2fs_put_rpages_mapping(mapping, start_idx, i);
f2fs_destroy_compress_ctx(cc);
return ret;
}
@@ -954,6 +1016,55 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
return first_index;
}
+int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
+{
+ void *fsdata = NULL;
+ struct page *pagep;
+ int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
+ pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
+ log_cluster_size;
+ int err;
+
+ err = f2fs_is_compressed_cluster(inode, start_idx);
+ if (err < 0)
+ return err;
+
+ /* truncate normal cluster */
+ if (!err)
+ return f2fs_do_truncate_blocks(inode, from, lock);
+
+ /* truncate compressed cluster */
+ err = f2fs_prepare_compress_overwrite(inode, &pagep,
+ start_idx, &fsdata);
+
+ /* should not be a normal cluster */
+ f2fs_bug_on(F2FS_I_SB(inode), err == 0);
+
+ if (err <= 0)
+ return err;
+
+ if (err > 0) {
+ struct page **rpages = fsdata;
+ int cluster_size = F2FS_I(inode)->i_cluster_size;
+ int i;
+
+ for (i = cluster_size - 1; i >= 0; i--) {
+ loff_t start = rpages[i]->index << PAGE_SHIFT;
+
+ if (from <= start) {
+ zero_user_segment(rpages[i], 0, PAGE_SIZE);
+ } else {
+ zero_user_segment(rpages[i], from - start,
+ PAGE_SIZE);
+ break;
+ }
+ }
+
+ f2fs_compress_write_end(inode, fsdata, start_idx, true);
+ }
+ return 0;
+}
+
static int f2fs_write_compressed_pages(struct compress_ctx *cc,
int *submitted,
struct writeback_control *wbc,
@@ -985,7 +1096,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
loff_t psize;
int i, err;
- if (!f2fs_trylock_op(sbi))
+ if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi))
return -EAGAIN;
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
@@ -1092,7 +1203,8 @@ unlock_continue:
set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
+ if (!IS_NOQUOTA(inode))
+ f2fs_unlock_op(sbi);
spin_lock(&fi->i_size_lock);
if (fi->last_disk_size < psize)
@@ -1118,7 +1230,8 @@ out_put_cic:
out_put_dnode:
f2fs_put_dnode(&dn);
out_unlock_op:
- f2fs_unlock_op(sbi);
+ if (!IS_NOQUOTA(inode))
+ f2fs_unlock_op(sbi);
return -EAGAIN;
}
@@ -1132,7 +1245,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
if (unlikely(bio->bi_status))
mapping_set_error(cic->inode->i_mapping, -EIO);
- f2fs_put_compressed_page(page);
+ f2fs_compress_free_page(page);
dec_page_count(sbi, F2FS_WB_DATA);
@@ -1293,7 +1406,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
for (i = 0; i < dic->nr_cpages; i++) {
struct page *page;
- page = f2fs_grab_page();
+ page = f2fs_compress_alloc_page();
if (!page)
goto out_free;
@@ -1313,7 +1426,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
continue;
}
- dic->tpages[i] = f2fs_grab_page();
+ dic->tpages[i] = f2fs_compress_alloc_page();
if (!dic->tpages[i])
goto out_free;
}
@@ -1335,8 +1448,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
if (!dic->tpages[i])
continue;
- unlock_page(dic->tpages[i]);
- put_page(dic->tpages[i]);
+ f2fs_compress_free_page(dic->tpages[i]);
}
kfree(dic->tpages);
}
@@ -1345,7 +1457,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
for (i = 0; i < dic->nr_cpages; i++) {
if (!dic->cpages[i])
continue;
- f2fs_put_compressed_page(dic->cpages[i]);
+ f2fs_compress_free_page(dic->cpages[i]);
}
kfree(dic->cpages);
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 03ec97f28235..326c63879ddc 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -19,6 +19,7 @@
#include <linux/uio.h>
#include <linux/cleancache.h>
#include <linux/sched/signal.h>
+#include <linux/fiemap.h>
#include "f2fs.h"
#include "node.h"
@@ -114,7 +115,8 @@ static enum count_type __read_io_type(struct page *page)
/* postprocessing steps for read bios */
enum bio_post_read_step {
STEP_DECRYPT,
- STEP_DECOMPRESS,
+ STEP_DECOMPRESS_NOWQ, /* handle normal cluster data inplace */
+ STEP_DECOMPRESS, /* handle compressed cluster data in workqueue */
STEP_VERITY,
};
@@ -513,6 +515,34 @@ void f2fs_submit_bio(struct f2fs_sb_info *sbi,
__submit_bio(sbi, bio, type);
}
+static void __attach_io_flag(struct f2fs_io_info *fio)
+{
+ struct f2fs_sb_info *sbi = fio->sbi;
+ unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
+ unsigned int io_flag, fua_flag, meta_flag;
+
+ if (fio->type == DATA)
+ io_flag = sbi->data_io_flag;
+ else if (fio->type == NODE)
+ io_flag = sbi->node_io_flag;
+ else
+ return;
+
+ fua_flag = io_flag & temp_mask;
+ meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
+
+ /*
+ * data/node io flag bits per temp:
+ * REQ_META | REQ_FUA |
+ * 5 | 4 | 3 | 2 | 1 | 0 |
+ * Cold | Warm | Hot | Cold | Warm | Hot |
+ */
+ if ((1 << fio->temp) & meta_flag)
+ fio->op_flags |= REQ_META;
+ if ((1 << fio->temp) & fua_flag)
+ fio->op_flags |= REQ_FUA;
+}
+
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
@@ -520,6 +550,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
if (!io->bio)
return;
+ __attach_io_flag(fio);
bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
if (is_read_io(fio->op))
@@ -661,6 +692,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
if (fio->io_wbc && !is_read_io(fio->op))
wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
+ __attach_io_flag(fio);
bio_set_op_attrs(bio, fio->op, fio->op_flags);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
@@ -847,6 +879,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
alloc_new:
if (!bio) {
bio = __bio_alloc(fio, BIO_MAX_PAGES);
+ __attach_io_flag(fio);
bio_set_op_attrs(bio, fio->op, fio->op_flags);
add_bio_entry(fio->sbi, bio, page, fio->temp);
@@ -967,7 +1000,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
if (f2fs_encrypted_file(inode))
post_read_steps |= 1 << STEP_DECRYPT;
if (f2fs_compressed_file(inode))
- post_read_steps |= 1 << STEP_DECOMPRESS;
+ post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
if (f2fs_need_verity(inode, first_idx))
post_read_steps |= 1 << STEP_VERITY;
@@ -1010,6 +1043,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
}
ClearPageError(page);
inc_page_count(sbi, F2FS_RD_DATA);
+ f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
__submit_bio(sbi, bio, DATA);
return 0;
}
@@ -1808,6 +1842,25 @@ static int f2fs_xattr_fiemap(struct inode *inode,
return (err < 0 ? err : 0);
}
+static loff_t max_inode_blocks(struct inode *inode)
+{
+ loff_t result = ADDRS_PER_INODE(inode);
+ loff_t leaf_count = ADDRS_PER_BLOCK(inode);
+
+ /* two direct node blocks */
+ result += (leaf_count * 2);
+
+ /* two indirect node blocks */
+ leaf_count *= NIDS_PER_BLOCK;
+ result += (leaf_count * 2);
+
+ /* one double indirect node block */
+ leaf_count *= NIDS_PER_BLOCK;
+ result += leaf_count;
+
+ return result;
+}
+
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
@@ -1817,6 +1870,8 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 logical = 0, phys = 0, size = 0;
u32 flags = 0;
int ret = 0;
+ bool compr_cluster = false;
+ unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
ret = f2fs_precache_extents(inode);
@@ -1824,7 +1879,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return ret;
}
- ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
+ ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
if (ret)
return ret;
@@ -1851,6 +1906,9 @@ next:
memset(&map_bh, 0, sizeof(struct buffer_head));
map_bh.b_size = len;
+ if (compr_cluster)
+ map_bh.b_size = blk_to_logical(inode, cluster_size - 1);
+
ret = get_data_block(inode, start_blk, &map_bh, 0,
F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
if (ret)
@@ -1861,7 +1919,7 @@ next:
start_blk = next_pgofs;
if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
- F2FS_I_SB(inode)->max_file_blocks))
+ max_inode_blocks(inode)))
goto prep_next;
flags |= FIEMAP_EXTENT_LAST;
@@ -1873,11 +1931,38 @@ next:
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size, flags);
+ if (ret)
+ goto out;
+ size = 0;
}
- if (start_blk > last_blk || ret)
+ if (start_blk > last_blk)
goto out;
+ if (compr_cluster) {
+ compr_cluster = false;
+
+
+ logical = blk_to_logical(inode, start_blk - 1);
+ phys = blk_to_logical(inode, map_bh.b_blocknr);
+ size = blk_to_logical(inode, cluster_size);
+
+ flags |= FIEMAP_EXTENT_ENCODED;
+
+ start_blk += cluster_size - 1;
+
+ if (start_blk > last_blk)
+ goto out;
+
+ goto prep_next;
+ }
+
+ if (map_bh.b_blocknr == COMPRESS_ADDR) {
+ compr_cluster = true;
+ start_blk++;
+ goto prep_next;
+ }
+
logical = blk_to_logical(inode, start_blk);
phys = blk_to_logical(inode, map_bh.b_blocknr);
size = map_bh.b_size;
@@ -2015,6 +2100,7 @@ submit_and_realloc:
goto submit_and_realloc;
inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
+ f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
ClearPageError(page);
*last_block_in_bio = block_nr;
goto out;
@@ -2113,6 +2199,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
for (i = 0; i < dic->nr_cpages; i++) {
struct page *page = dic->cpages[i];
block_t blkaddr;
+ struct bio_post_read_ctx *ctx;
blkaddr = data_blkaddr(dn.inode, dn.node_page,
dn.ofs_in_node + i + 1);
@@ -2130,16 +2217,16 @@ submit_and_realloc:
page->index, for_write);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- bio = NULL;
dic->failed = true;
if (refcount_sub_and_test(dic->nr_cpages - i,
- &dic->ref))
+ &dic->ref)) {
f2fs_decompress_end_io(dic->rpages,
cc->cluster_size, true,
false);
- f2fs_free_dic(dic);
+ f2fs_free_dic(dic);
+ }
f2fs_put_dnode(&dn);
- *bio_ret = bio;
+ *bio_ret = NULL;
return ret;
}
}
@@ -2149,7 +2236,14 @@ submit_and_realloc:
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto submit_and_realloc;
+ /* tag STEP_DECOMPRESS to handle IO in wq */
+ ctx = bio->bi_private;
+ if (!(ctx->enabled_steps & (1 << STEP_DECOMPRESS)))
+ ctx->enabled_steps |= 1 << STEP_DECOMPRESS;
+
inc_page_count(sbi, F2FS_RD_DATA);
+ f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
ClearPageError(page);
*last_block_in_bio = blkaddr;
}
@@ -2623,8 +2717,8 @@ write:
f2fs_available_free_memory(sbi, BASE_CHECK))))
goto redirty_out;
- /* Dentry blocks are controlled by checkpoint */
- if (S_ISDIR(inode->i_mode)) {
+ /* Dentry/quota blocks are controlled by checkpoint */
+ if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
fio.need_lock = LOCK_DONE;
err = f2fs_do_write_data_page(&fio);
goto done;
@@ -2766,7 +2860,6 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
- int cycled;
int range_whole = 0;
xa_mark_t tag;
int nwritten = 0;
@@ -2784,17 +2877,12 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
- if (index == 0)
- cycled = 1;
- else
- cycled = 0;
end = -1;
} else {
index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
- cycled = 1; /* ignore range_cyclic tests */
}
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
@@ -2959,12 +3047,13 @@ next:
}
}
#endif
- if ((!cycled && !done) || retry) {
- cycled = 1;
+ if (retry) {
index = 0;
- end = writeback_index - 1;
+ end = -1;
goto retry;
}
+ if (wbc->range_cyclic && !done)
+ done_index = 0;
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
@@ -3493,6 +3582,9 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
} else if (err < 0) {
f2fs_write_failed(mapping, offset + count);
}
+ } else {
+ if (err > 0)
+ f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
}
out:
@@ -3576,6 +3668,37 @@ static int f2fs_set_data_page_dirty(struct page *page)
return 0;
}
+
+static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
+{
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct dnode_of_data dn;
+ sector_t start_idx, blknr = 0;
+ int ret;
+
+ start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
+ if (ret)
+ return 0;
+
+ if (dn.data_blkaddr != COMPRESS_ADDR) {
+ dn.ofs_in_node += block - start_idx;
+ blknr = f2fs_data_blkaddr(&dn);
+ if (!__is_valid_data_blkaddr(blknr))
+ blknr = 0;
+ }
+
+ f2fs_put_dnode(&dn);
+
+ return blknr;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
@@ -3587,6 +3710,9 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
filemap_write_and_wait(mapping);
+ if (f2fs_compressed_file(inode))
+ return f2fs_bmap_compress(inode, block);
+
return generic_block_bmap(mapping, block, get_data_block_bmap);
}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 44bfc464df78..d35976785e8c 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -70,6 +70,111 @@ unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de)
return DT_UNKNOWN;
}
+/* If @dir is casefolded, initialize @fname->cf_name from @fname->usr_fname. */
+int f2fs_init_casefolded_name(const struct inode *dir,
+ struct f2fs_filename *fname)
+{
+#ifdef CONFIG_UNICODE
+ struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+
+ if (IS_CASEFOLDED(dir)) {
+ fname->cf_name.name = f2fs_kmalloc(sbi, F2FS_NAME_LEN,
+ GFP_NOFS);
+ if (!fname->cf_name.name)
+ return -ENOMEM;
+ fname->cf_name.len = utf8_casefold(sbi->s_encoding,
+ fname->usr_fname,
+ fname->cf_name.name,
+ F2FS_NAME_LEN);
+ if ((int)fname->cf_name.len <= 0) {
+ kfree(fname->cf_name.name);
+ fname->cf_name.name = NULL;
+ if (f2fs_has_strict_mode(sbi))
+ return -EINVAL;
+ /* fall back to treating name as opaque byte sequence */
+ }
+ }
+#endif
+ return 0;
+}
+
+static int __f2fs_setup_filename(const struct inode *dir,
+ const struct fscrypt_name *crypt_name,
+ struct f2fs_filename *fname)
+{
+ int err;
+
+ memset(fname, 0, sizeof(*fname));
+
+ fname->usr_fname = crypt_name->usr_fname;
+ fname->disk_name = crypt_name->disk_name;
+#ifdef CONFIG_FS_ENCRYPTION
+ fname->crypto_buf = crypt_name->crypto_buf;
+#endif
+ if (crypt_name->is_ciphertext_name) {
+ /* hash was decoded from the no-key name */
+ fname->hash = cpu_to_le32(crypt_name->hash);
+ } else {
+ err = f2fs_init_casefolded_name(dir, fname);
+ if (err) {
+ f2fs_free_filename(fname);
+ return err;
+ }
+ f2fs_hash_filename(dir, fname);
+ }
+ return 0;
+}
+
+/*
+ * Prepare to search for @iname in @dir. This is similar to
+ * fscrypt_setup_filename(), but this also handles computing the casefolded name
+ * and the f2fs dirhash if needed, then packing all the information about this
+ * filename up into a 'struct f2fs_filename'.
+ */
+int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
+ int lookup, struct f2fs_filename *fname)
+{
+ struct fscrypt_name crypt_name;
+ int err;
+
+ err = fscrypt_setup_filename(dir, iname, lookup, &crypt_name);
+ if (err)
+ return err;
+
+ return __f2fs_setup_filename(dir, &crypt_name, fname);
+}
+
+/*
+ * Prepare to look up @dentry in @dir. This is similar to
+ * fscrypt_prepare_lookup(), but this also handles computing the casefolded name
+ * and the f2fs dirhash if needed, then packing all the information about this
+ * filename up into a 'struct f2fs_filename'.
+ */
+int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct f2fs_filename *fname)
+{
+ struct fscrypt_name crypt_name;
+ int err;
+
+ err = fscrypt_prepare_lookup(dir, dentry, &crypt_name);
+ if (err)
+ return err;
+
+ return __f2fs_setup_filename(dir, &crypt_name, fname);
+}
+
+void f2fs_free_filename(struct f2fs_filename *fname)
+{
+#ifdef CONFIG_FS_ENCRYPTION
+ kfree(fname->crypto_buf.name);
+ fname->crypto_buf.name = NULL;
+#endif
+#ifdef CONFIG_UNICODE
+ kfree(fname->cf_name.name);
+ fname->cf_name.name = NULL;
+#endif
+}
+
static unsigned long dir_block_index(unsigned int level,
int dir_level, unsigned int idx)
{
@@ -84,8 +189,7 @@ static unsigned long dir_block_index(unsigned int level,
static struct f2fs_dir_entry *find_in_block(struct inode *dir,
struct page *dentry_page,
- struct fscrypt_name *fname,
- f2fs_hash_t namehash,
+ const struct f2fs_filename *fname,
int *max_slots,
struct page **res_page)
{
@@ -96,7 +200,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
make_dentry_ptr_block(dir, &d, dentry_blk);
- de = f2fs_find_target_dentry(fname, namehash, max_slots, &d);
+ de = f2fs_find_target_dentry(&d, fname, max_slots);
if (de)
*res_page = dentry_page;
@@ -107,112 +211,57 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
/*
* Test whether a case-insensitive directory entry matches the filename
* being searched for.
- *
- * Returns: 0 if the directory entry matches, more than 0 if it
- * doesn't match or less than zero on error.
*/
-int f2fs_ci_compare(const struct inode *parent, const struct qstr *name,
- const struct qstr *entry, bool quick)
+static bool f2fs_match_ci_name(const struct inode *dir, const struct qstr *name,
+ const u8 *de_name, u32 de_name_len)
{
- const struct f2fs_sb_info *sbi = F2FS_SB(parent->i_sb);
+ const struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
const struct unicode_map *um = sbi->s_encoding;
- int ret;
-
- if (quick)
- ret = utf8_strncasecmp_folded(um, name, entry);
- else
- ret = utf8_strncasecmp(um, name, entry);
+ struct qstr entry = QSTR_INIT(de_name, de_name_len);
+ int res;
- if (ret < 0) {
- /* Handle invalid character sequence as either an error
- * or as an opaque byte sequence.
+ res = utf8_strncasecmp_folded(um, name, &entry);
+ if (res < 0) {
+ /*
+ * In strict mode, ignore invalid names. In non-strict mode,
+ * fall back to treating them as opaque byte sequences.
*/
- if (f2fs_has_strict_mode(sbi))
- return -EINVAL;
-
- if (name->len != entry->len)
- return 1;
-
- return !!memcmp(name->name, entry->name, name->len);
+ if (f2fs_has_strict_mode(sbi) || name->len != entry.len)
+ return false;
+ return !memcmp(name->name, entry.name, name->len);
}
-
- return ret;
+ return res == 0;
}
+#endif /* CONFIG_UNICODE */
-static void f2fs_fname_setup_ci_filename(struct inode *dir,
- const struct qstr *iname,
- struct fscrypt_str *cf_name)
+static inline bool f2fs_match_name(const struct inode *dir,
+ const struct f2fs_filename *fname,
+ const u8 *de_name, u32 de_name_len)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
-
- if (!IS_CASEFOLDED(dir)) {
- cf_name->name = NULL;
- return;
- }
+ struct fscrypt_name f;
- cf_name->name = f2fs_kmalloc(sbi, F2FS_NAME_LEN, GFP_NOFS);
- if (!cf_name->name)
- return;
-
- cf_name->len = utf8_casefold(sbi->s_encoding,
- iname, cf_name->name,
- F2FS_NAME_LEN);
- if ((int)cf_name->len <= 0) {
- kvfree(cf_name->name);
- cf_name->name = NULL;
- }
-}
-#endif
-
-static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d,
- struct f2fs_dir_entry *de,
- struct fscrypt_name *fname,
- struct fscrypt_str *cf_str,
- unsigned long bit_pos,
- f2fs_hash_t namehash)
-{
#ifdef CONFIG_UNICODE
- struct inode *parent = d->inode;
- struct f2fs_sb_info *sbi = F2FS_I_SB(parent);
- struct qstr entry;
-#endif
-
- if (de->hash_code != namehash)
- return false;
+ if (fname->cf_name.name) {
+ struct qstr cf = FSTR_TO_QSTR(&fname->cf_name);
-#ifdef CONFIG_UNICODE
- entry.name = d->filename[bit_pos];
- entry.len = de->name_len;
-
- if (sbi->s_encoding && IS_CASEFOLDED(parent)) {
- if (cf_str->name) {
- struct qstr cf = {.name = cf_str->name,
- .len = cf_str->len};
- return !f2fs_ci_compare(parent, &cf, &entry, true);
- }
- return !f2fs_ci_compare(parent, fname->usr_fname, &entry,
- false);
+ return f2fs_match_ci_name(dir, &cf, de_name, de_name_len);
}
#endif
- if (fscrypt_match_name(fname, d->filename[bit_pos],
- le16_to_cpu(de->name_len)))
- return true;
- return false;
+ f.usr_fname = fname->usr_fname;
+ f.disk_name = fname->disk_name;
+#ifdef CONFIG_FS_ENCRYPTION
+ f.crypto_buf = fname->crypto_buf;
+#endif
+ return fscrypt_match_name(&f, de_name, de_name_len);
}
-struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
- f2fs_hash_t namehash, int *max_slots,
- struct f2fs_dentry_ptr *d)
+struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+ const struct f2fs_filename *fname, int *max_slots)
{
struct f2fs_dir_entry *de;
- struct fscrypt_str cf_str = { .name = NULL, .len = 0 };
unsigned long bit_pos = 0;
int max_len = 0;
-#ifdef CONFIG_UNICODE
- f2fs_fname_setup_ci_filename(d->inode, fname->usr_fname, &cf_str);
-#endif
-
if (max_slots)
*max_slots = 0;
while (bit_pos < d->max) {
@@ -229,7 +278,9 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
continue;
}
- if (f2fs_match_name(d, de, fname, &cf_str, bit_pos, namehash))
+ if (de->hash_code == fname->hash &&
+ f2fs_match_name(d->inode, fname, d->filename[bit_pos],
+ le16_to_cpu(de->name_len)))
goto found;
if (max_slots && max_len > *max_slots)
@@ -243,33 +294,27 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
found:
if (max_slots && max_len > *max_slots)
*max_slots = max_len;
-
-#ifdef CONFIG_UNICODE
- kvfree(cf_str.name);
-#endif
return de;
}
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
unsigned int level,
- struct fscrypt_name *fname,
+ const struct f2fs_filename *fname,
struct page **res_page)
{
- struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
- int s = GET_DENTRY_SLOTS(name.len);
+ int s = GET_DENTRY_SLOTS(fname->disk_name.len);
unsigned int nbucket, nblock;
unsigned int bidx, end_block;
struct page *dentry_page;
struct f2fs_dir_entry *de = NULL;
bool room = false;
int max_slots;
- f2fs_hash_t namehash = f2fs_dentry_hash(dir, &name, fname);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
- le32_to_cpu(namehash) % nbucket);
+ le32_to_cpu(fname->hash) % nbucket);
end_block = bidx + nblock;
for (; bidx < end_block; bidx++) {
@@ -285,8 +330,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
}
}
- de = find_in_block(dir, dentry_page, fname, namehash,
- &max_slots, res_page);
+ de = find_in_block(dir, dentry_page, fname, &max_slots,
+ res_page);
if (de)
break;
@@ -295,8 +340,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
f2fs_put_page(dentry_page, 0);
}
- if (!de && room && F2FS_I(dir)->chash != namehash) {
- F2FS_I(dir)->chash = namehash;
+ if (!de && room && F2FS_I(dir)->chash != fname->hash) {
+ F2FS_I(dir)->chash = fname->hash;
F2FS_I(dir)->clevel = level;
}
@@ -304,7 +349,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
}
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
- struct fscrypt_name *fname, struct page **res_page)
+ const struct f2fs_filename *fname,
+ struct page **res_page)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
@@ -353,18 +399,10 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
const struct qstr *child, struct page **res_page)
{
struct f2fs_dir_entry *de = NULL;
- struct fscrypt_name fname;
+ struct f2fs_filename fname;
int err;
-#ifdef CONFIG_UNICODE
- if (f2fs_has_strict_mode(F2FS_I_SB(dir)) && IS_CASEFOLDED(dir) &&
- utf8_validate(F2FS_I_SB(dir)->s_encoding, child)) {
- *res_page = ERR_PTR(-EINVAL);
- return NULL;
- }
-#endif
-
- err = fscrypt_setup_filename(dir, child, 1, &fname);
+ err = f2fs_setup_filename(dir, child, 1, &fname);
if (err) {
if (err == -ENOENT)
*res_page = NULL;
@@ -375,7 +413,7 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
de = __f2fs_find_entry(dir, &fname, res_page);
- fscrypt_free_filename(&fname);
+ f2fs_free_filename(&fname);
return de;
}
@@ -416,7 +454,8 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
f2fs_put_page(page, 1);
}
-static void init_dent_inode(const struct qstr *name, struct page *ipage)
+static void init_dent_inode(const struct f2fs_filename *fname,
+ struct page *ipage)
{
struct f2fs_inode *ri;
@@ -424,16 +463,16 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
/* copy name info. to this inode page */
ri = F2FS_INODE(ipage);
- ri->i_namelen = cpu_to_le32(name->len);
- memcpy(ri->i_name, name->name, name->len);
+ ri->i_namelen = cpu_to_le32(fname->disk_name.len);
+ memcpy(ri->i_name, fname->disk_name.name, fname->disk_name.len);
set_page_dirty(ipage);
}
void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d)
{
- struct qstr dot = QSTR_INIT(".", 1);
- struct qstr dotdot = QSTR_INIT("..", 2);
+ struct fscrypt_str dot = FSTR_INIT(".", 1);
+ struct fscrypt_str dotdot = FSTR_INIT("..", 2);
/* update dirent of "." */
f2fs_update_dentry(inode->i_ino, inode->i_mode, d, &dot, 0, 0);
@@ -467,8 +506,7 @@ static int make_empty_dir(struct inode *inode,
}
struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct qstr *new_name, const struct qstr *orig_name,
- struct page *dpage)
+ const struct f2fs_filename *fname, struct page *dpage)
{
struct page *page;
int err;
@@ -493,7 +531,8 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
if (err)
goto put_error;
- err = f2fs_init_security(inode, dir, orig_name, page);
+ err = f2fs_init_security(inode, dir,
+ fname ? fname->usr_fname : NULL, page);
if (err)
goto put_error;
@@ -508,8 +547,8 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
return page;
}
- if (new_name) {
- init_dent_inode(new_name, page);
+ if (fname) {
+ init_dent_inode(fname, page);
if (IS_ENCRYPTED(dir))
file_set_enc_name(inode);
}
@@ -577,11 +616,11 @@ next:
}
bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
- struct fscrypt_name *fname)
+ const struct f2fs_filename *fname)
{
struct f2fs_dentry_ptr d;
unsigned int bit_pos;
- int slots = GET_DENTRY_SLOTS(fname_len(fname));
+ int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ipage));
@@ -591,8 +630,8 @@ bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
}
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
- const struct qstr *name, f2fs_hash_t name_hash,
- unsigned int bit_pos)
+ const struct fscrypt_str *name, f2fs_hash_t name_hash,
+ unsigned int bit_pos)
{
struct f2fs_dir_entry *de;
int slots = GET_DENTRY_SLOTS(name->len);
@@ -612,15 +651,13 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
}
}
-int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
- const struct qstr *orig_name,
- struct inode *inode, nid_t ino, umode_t mode)
+int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
+ struct inode *inode, nid_t ino, umode_t mode)
{
unsigned int bit_pos;
unsigned int level;
unsigned int current_depth;
unsigned long bidx, block;
- f2fs_hash_t dentry_hash;
unsigned int nbucket, nblock;
struct page *dentry_page = NULL;
struct f2fs_dentry_block *dentry_blk = NULL;
@@ -629,11 +666,10 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
int slots, err = 0;
level = 0;
- slots = GET_DENTRY_SLOTS(new_name->len);
- dentry_hash = f2fs_dentry_hash(dir, new_name, NULL);
+ slots = GET_DENTRY_SLOTS(fname->disk_name.len);
current_depth = F2FS_I(dir)->i_current_depth;
- if (F2FS_I(dir)->chash == dentry_hash) {
+ if (F2FS_I(dir)->chash == fname->hash) {
level = F2FS_I(dir)->clevel;
F2FS_I(dir)->chash = 0;
}
@@ -655,7 +691,7 @@ start:
nblock = bucket_blocks(level);
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
- (le32_to_cpu(dentry_hash) % nbucket));
+ (le32_to_cpu(fname->hash) % nbucket));
for (block = bidx; block <= (bidx + nblock - 1); block++) {
dentry_page = f2fs_get_new_data_page(dir, NULL, block, true);
@@ -679,8 +715,7 @@ add_dentry:
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, new_name,
- orig_name, NULL);
+ page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@@ -688,7 +723,8 @@ add_dentry:
}
make_dentry_ptr_block(NULL, &d, dentry_blk);
- f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos);
+ f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
+ bit_pos);
set_page_dirty(dentry_page);
@@ -712,21 +748,15 @@ fail:
return err;
}
-int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname,
- struct inode *inode, nid_t ino, umode_t mode)
+int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
+ struct inode *inode, nid_t ino, umode_t mode)
{
- struct qstr new_name;
int err = -EAGAIN;
- new_name.name = fname_name(fname);
- new_name.len = fname_len(fname);
-
if (f2fs_has_inline_dentry(dir))
- err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname,
- inode, ino, mode);
+ err = f2fs_add_inline_entry(dir, fname, inode, ino, mode);
if (err == -EAGAIN)
- err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname,
- inode, ino, mode);
+ err = f2fs_add_regular_entry(dir, fname, inode, ino, mode);
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
return err;
@@ -739,12 +769,12 @@ int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname,
int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode)
{
- struct fscrypt_name fname;
+ struct f2fs_filename fname;
struct page *page = NULL;
struct f2fs_dir_entry *de = NULL;
int err;
- err = fscrypt_setup_filename(dir, name, 0, &fname);
+ err = f2fs_setup_filename(dir, name, 0, &fname);
if (err)
return err;
@@ -767,7 +797,7 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
} else {
err = f2fs_add_dentry(dir, &fname, inode, ino, mode);
}
- fscrypt_free_filename(&fname);
+ f2fs_free_filename(&fname);
return err;
}
@@ -777,7 +807,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
int err = 0;
down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, NULL, NULL, NULL);
+ page = f2fs_init_inode_metadata(inode, dir, NULL, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@@ -1080,17 +1110,41 @@ const struct file_operations f2fs_dir_operations = {
static int f2fs_d_compare(const struct dentry *dentry, unsigned int len,
const char *str, const struct qstr *name)
{
- struct qstr qstr = {.name = str, .len = len };
const struct dentry *parent = READ_ONCE(dentry->d_parent);
- const struct inode *inode = READ_ONCE(parent->d_inode);
+ const struct inode *dir = READ_ONCE(parent->d_inode);
+ const struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+ struct qstr entry = QSTR_INIT(str, len);
+ char strbuf[DNAME_INLINE_LEN];
+ int res;
+
+ if (!dir || !IS_CASEFOLDED(dir))
+ goto fallback;
- if (!inode || !IS_CASEFOLDED(inode)) {
- if (len != name->len)
- return -1;
- return memcmp(str, name->name, len);
+ /*
+ * If the dentry name is stored in-line, then it may be concurrently
+ * modified by a rename. If this happens, the VFS will eventually retry
+ * the lookup, so it doesn't matter what ->d_compare() returns.
+ * However, it's unsafe to call utf8_strncasecmp() with an unstable
+ * string. Therefore, we have to copy the name into a temporary buffer.
+ */
+ if (len <= DNAME_INLINE_LEN - 1) {
+ memcpy(strbuf, str, len);
+ strbuf[len] = 0;
+ entry.name = strbuf;
+ /* prevent compiler from optimizing out the temporary buffer */
+ barrier();
}
- return f2fs_ci_compare(inode, name, &qstr, false);
+ res = utf8_strncasecmp(sbi->s_encoding, name, &entry);
+ if (res >= 0)
+ return res;
+
+ if (f2fs_has_strict_mode(sbi))
+ return -EINVAL;
+fallback:
+ if (len != name->len)
+ return 1;
+ return !!memcmp(str, name->name, len);
}
static int f2fs_d_hash(const struct dentry *dentry, struct qstr *str)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 5c0149d2f46a..b35a50f4953c 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/f2fs.h
*
@@ -139,6 +139,7 @@ struct f2fs_mount_info {
int fs_mode; /* fs mode: LFS or ADAPTIVE */
int bggc_mode; /* bggc mode: off, on or sync */
struct fscrypt_dummy_context dummy_enc_ctx; /* test dummy encryption */
+ block_t unusable_cap_perc; /* percentage for cap */
block_t unusable_cap; /* Amount of space allowed to be
* unusable when disabling checkpoint
*/
@@ -194,6 +195,7 @@ enum {
#define CP_DISCARD 0x00000010
#define CP_TRIMMED 0x00000020
#define CP_PAUSE 0x00000040
+#define CP_RESIZE 0x00000080
#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
@@ -428,6 +430,10 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15)
#define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64)
#define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64)
+#define F2FS_IOC_RELEASE_COMPRESS_BLOCKS \
+ _IOR(F2FS_IOCTL_MAGIC, 18, __u64)
+#define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \
+ _IOR(F2FS_IOCTL_MAGIC, 19, __u64)
#define F2FS_IOC_GET_VOLUME_NAME FS_IOC_GETFSLABEL
#define F2FS_IOC_SET_VOLUME_NAME FS_IOC_SETFSLABEL
@@ -506,6 +512,42 @@ static inline int get_inline_xattr_addrs(struct inode *inode);
* For INODE and NODE manager
*/
/* for directory operations */
+
+struct f2fs_filename {
+ /*
+ * The filename the user specified. This is NULL for some
+ * filesystem-internal operations, e.g. converting an inline directory
+ * to a non-inline one, or roll-forward recovering an encrypted dentry.
+ */
+ const struct qstr *usr_fname;
+
+ /*
+ * The on-disk filename. For encrypted directories, this is encrypted.
+ * This may be NULL for lookups in an encrypted dir without the key.
+ */
+ struct fscrypt_str disk_name;
+
+ /* The dirhash of this filename */
+ f2fs_hash_t hash;
+
+#ifdef CONFIG_FS_ENCRYPTION
+ /*
+ * For lookups in encrypted directories: either the buffer backing
+ * disk_name, or a buffer that holds the decoded no-key name.
+ */
+ struct fscrypt_str crypto_buf;
+#endif
+#ifdef CONFIG_UNICODE
+ /*
+ * For casefolded directories: the casefolded name, but it's left NULL
+ * if the original name is not valid Unicode or if the filesystem is
+ * doing an internal operation where usr_fname is also NULL. In these
+ * cases we fall back to treating the name as an opaque byte sequence.
+ */
+ struct fscrypt_str cf_name;
+#endif
+};
+
struct f2fs_dentry_ptr {
struct inode *inode;
void *bitmap;
@@ -1088,8 +1130,9 @@ enum cp_reason_type {
};
enum iostat_type {
- APP_DIRECT_IO, /* app direct IOs */
- APP_BUFFERED_IO, /* app buffered IOs */
+ /* WRITE IO */
+ APP_DIRECT_IO, /* app direct write IOs */
+ APP_BUFFERED_IO, /* app buffered write IOs */
APP_WRITE_IO, /* app write IOs */
APP_MAPPED_IO, /* app mapped IOs */
FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
@@ -1100,6 +1143,19 @@ enum iostat_type {
FS_CP_DATA_IO, /* data IOs from checkpoint */
FS_CP_NODE_IO, /* node IOs from checkpoint */
FS_CP_META_IO, /* meta IOs from checkpoint */
+
+ /* READ IO */
+ APP_DIRECT_READ_IO, /* app direct read IOs */
+ APP_BUFFERED_READ_IO, /* app buffered read IOs */
+ APP_READ_IO, /* app read IOs */
+ APP_MAPPED_READ_IO, /* app mapped read IOs */
+ FS_DATA_READ_IO, /* data read IOs */
+ FS_GDATA_READ_IO, /* data read IOs from background gc */
+ FS_CDATA_READ_IO, /* compressed data read IOs */
+ FS_NODE_READ_IO, /* node read IOs */
+ FS_META_READ_IO, /* meta read IOs */
+
+ /* other */
FS_DISCARD, /* discard */
NR_IO_TYPE,
};
@@ -1269,6 +1325,7 @@ enum compress_algorithm_type {
COMPRESS_LZO,
COMPRESS_LZ4,
COMPRESS_ZSTD,
+ COMPRESS_LZORLE,
COMPRESS_MAX,
};
@@ -1418,7 +1475,6 @@ struct f2fs_sb_info {
unsigned int segs_per_sec; /* segments per section */
unsigned int secs_per_zone; /* sections per zone */
unsigned int total_sections; /* total section count */
- struct mutex resize_mutex; /* for resize exclusion */
unsigned int total_node_count; /* total node block count */
unsigned int total_valid_node_count; /* valid node block count */
loff_t max_file_blocks; /* max block index of file */
@@ -1504,8 +1560,15 @@ struct f2fs_sb_info {
/* For app/fs IO statistics */
spinlock_t iostat_lock;
- unsigned long long write_iostat[NR_IO_TYPE];
+ unsigned long long rw_iostat[NR_IO_TYPE];
+ unsigned long long prev_rw_iostat[NR_IO_TYPE];
bool iostat_enable;
+ unsigned long iostat_next_period;
+ unsigned int iostat_period_ms;
+
+ /* to attach REQ_META|REQ_FUA flags */
+ unsigned int data_io_flag;
+ unsigned int node_io_flag;
/* For sysfs suppport */
struct kobject s_kobj;
@@ -2902,12 +2965,12 @@ static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
}
-static inline bool is_dot_dotdot(const struct qstr *str)
+static inline bool is_dot_dotdot(const u8 *name, size_t len)
{
- if (str->len == 1 && str->name[0] == '.')
+ if (len == 1 && name[0] == '.')
return true;
- if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+ if (len == 2 && name[0] == '.' && name[1] == '.')
return true;
return false;
@@ -2935,18 +2998,12 @@ static inline bool f2fs_may_extent_tree(struct inode *inode)
static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
- void *ret;
-
if (time_to_inject(sbi, FAULT_KMALLOC)) {
f2fs_show_injection_info(sbi, FAULT_KMALLOC);
return NULL;
}
- ret = kmalloc(size, flags);
- if (ret)
- return ret;
-
- return kvmalloc(size, flags);
+ return kmalloc(size, flags);
}
static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
@@ -2996,29 +3053,45 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
sizeof((f2fs_inode)->field)) \
<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
+#define DEFAULT_IOSTAT_PERIOD_MS 3000
+#define MIN_IOSTAT_PERIOD_MS 100
+/* maximum period of iostat tracing is 1 day */
+#define MAX_IOSTAT_PERIOD_MS 8640000
+
static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
{
int i;
spin_lock(&sbi->iostat_lock);
- for (i = 0; i < NR_IO_TYPE; i++)
- sbi->write_iostat[i] = 0;
+ for (i = 0; i < NR_IO_TYPE; i++) {
+ sbi->rw_iostat[i] = 0;
+ sbi->prev_rw_iostat[i] = 0;
+ }
spin_unlock(&sbi->iostat_lock);
}
+extern void f2fs_record_iostat(struct f2fs_sb_info *sbi);
+
static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
enum iostat_type type, unsigned long long io_bytes)
{
if (!sbi->iostat_enable)
return;
spin_lock(&sbi->iostat_lock);
- sbi->write_iostat[type] += io_bytes;
+ sbi->rw_iostat[type] += io_bytes;
if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
- sbi->write_iostat[APP_BUFFERED_IO] =
- sbi->write_iostat[APP_WRITE_IO] -
- sbi->write_iostat[APP_DIRECT_IO];
+ sbi->rw_iostat[APP_BUFFERED_IO] =
+ sbi->rw_iostat[APP_WRITE_IO] -
+ sbi->rw_iostat[APP_DIRECT_IO];
+
+ if (type == APP_READ_IO || type == APP_DIRECT_READ_IO)
+ sbi->rw_iostat[APP_BUFFERED_READ_IO] =
+ sbi->rw_iostat[APP_READ_IO] -
+ sbi->rw_iostat[APP_DIRECT_READ_IO];
spin_unlock(&sbi->iostat_lock);
+
+ f2fs_record_iostat(sbi);
}
#define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
@@ -3064,6 +3137,7 @@ static inline void f2fs_clear_page_private(struct page *page)
*/
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
+int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate(struct inode *inode);
int f2fs_getattr(const struct path *path, struct kstat *stat,
@@ -3099,31 +3173,32 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
bool hot, bool set);
struct dentry *f2fs_get_parent(struct dentry *child);
-extern int f2fs_ci_compare(const struct inode *parent,
- const struct qstr *name,
- const struct qstr *entry,
- bool quick);
-
/*
* dir.c
*/
unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
-struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
- f2fs_hash_t namehash, int *max_slots,
- struct f2fs_dentry_ptr *d);
+int f2fs_init_casefolded_name(const struct inode *dir,
+ struct f2fs_filename *fname);
+int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
+ int lookup, struct f2fs_filename *fname);
+int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct f2fs_filename *fname);
+void f2fs_free_filename(struct f2fs_filename *fname);
+struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+ const struct f2fs_filename *fname, int *max_slots);
int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
unsigned int start_pos, struct fscrypt_str *fstr);
void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d);
struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct qstr *new_name,
- const struct qstr *orig_name, struct page *dpage);
+ const struct f2fs_filename *fname, struct page *dpage);
void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth);
int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
- struct fscrypt_name *fname, struct page **res_page);
+ const struct f2fs_filename *fname,
+ struct page **res_page);
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
const struct qstr *child, struct page **res_page);
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
@@ -3132,14 +3207,13 @@ ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
struct page *page, struct inode *inode);
bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
- struct fscrypt_name *fname);
+ const struct f2fs_filename *fname);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
- const struct qstr *name, f2fs_hash_t name_hash,
+ const struct fscrypt_str *name, f2fs_hash_t name_hash,
unsigned int bit_pos);
-int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
- const struct qstr *orig_name,
+int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
-int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname,
+int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode);
@@ -3169,8 +3243,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
/*
* hash.c
*/
-f2fs_hash_t f2fs_dentry_hash(const struct inode *dir,
- const struct qstr *name_info, struct fscrypt_name *fname);
+void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
/*
* node.c
@@ -3202,6 +3275,7 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
struct page *f2fs_get_node_page_ra(struct page *parent, int start);
int f2fs_move_node_page(struct page *node_page, int gc_type);
+int f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
unsigned int *seq_id);
@@ -3645,7 +3719,7 @@ static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
static inline void __init f2fs_create_root_stats(void) { }
static inline void f2fs_destroy_root_stats(void) { }
-static inline void update_sit_info(struct f2fs_sb_info *sbi) {}
+static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
#endif
extern const struct file_operations f2fs_dir_operations;
@@ -3678,11 +3752,11 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
int f2fs_write_inline_data(struct inode *inode, struct page *page);
bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
- struct fscrypt_name *fname, struct page **res_page);
+ const struct f2fs_filename *fname,
+ struct page **res_page);
int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
struct page *ipage);
-int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
- const struct qstr *orig_name,
+int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
struct page *page, struct inode *dir,
@@ -3781,8 +3855,11 @@ int f2fs_prepare_compress_overwrite(struct inode *inode,
struct page **pagep, pgoff_t index, void **fsdata);
bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
pgoff_t index, unsigned copied);
+int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
bool f2fs_is_compress_backend_ready(struct inode *inode);
+int f2fs_init_compress_mempool(void);
+void f2fs_destroy_compress_mempool(void);
void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
@@ -3816,6 +3893,8 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL);
}
+static inline int f2fs_init_compress_mempool(void) { return 0; }
+static inline void f2fs_destroy_compress_mempool(void) { }
#endif
static inline void set_compress_context(struct inode *inode)
@@ -3962,6 +4041,10 @@ static inline void f2fs_i_compr_blocks_update(struct inode *inode,
{
int diff = F2FS_I(inode)->i_cluster_size - blocks;
+ /* don't update i_compr_blocks if saved blocks were released */
+ if (!add && !F2FS_I(inode)->i_compr_blocks)
+ return;
+
if (add) {
F2FS_I(inode)->i_compr_blocks += diff;
stat_add_compr_blocks(inode, diff);
@@ -4003,8 +4086,6 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
return true;
if (f2fs_is_multi_device(sbi))
return true;
- if (f2fs_compressed_file(inode))
- return true;
/*
* for blkzoned device, fallback direct IO to buffered IO, so
* all IOs can be serialized by log-structured write.
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 6ab8f621a3c5..3268f8dd59bb 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -40,6 +40,10 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
ret = filemap_fault(vmf);
up_read(&F2FS_I(inode)->i_mmap_sem);
+ if (!ret)
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
+ F2FS_BLKSIZE);
+
trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
return ret;
@@ -165,9 +169,11 @@ static int get_parent_ino(struct inode *inode, nid_t *pino)
{
struct dentry *dentry;
- inode = igrab(inode);
- dentry = d_find_any_alias(inode);
- iput(inode);
+ /*
+ * Make sure to get the non-deleted alias. The alias associated with
+ * the open file descriptor being fsync()'ed may be deleted already.
+ */
+ dentry = d_find_alias(inode);
if (!dentry)
return 0;
@@ -557,6 +563,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
bool compressed_cluster = false;
int cluster_index = 0, valid_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ bool released = !F2FS_I(dn->inode)->i_compr_blocks;
if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
base = get_extra_isize(dn->inode);
@@ -595,7 +602,9 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
f2fs_invalidate_blocks(sbi, blkaddr);
- nr_free++;
+
+ if (!released || blkaddr != COMPRESS_ADDR)
+ nr_free++;
}
if (compressed_cluster)
@@ -643,9 +652,6 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
return 0;
}
- if (f2fs_compressed_file(inode))
- return 0;
-
page = f2fs_get_lock_data_page(inode, index, true);
if (IS_ERR(page))
return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
@@ -661,7 +667,7 @@ truncate_out:
return 0;
}
-static int do_truncate_blocks(struct inode *inode, u64 from, bool lock)
+int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
@@ -729,23 +735,28 @@ free_partial:
int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
{
u64 free_from = from;
+ int err;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
/*
* for compressed file, only support cluster size
* aligned truncation.
*/
- if (f2fs_compressed_file(inode)) {
- size_t cluster_shift = PAGE_SHIFT +
- F2FS_I(inode)->i_log_cluster_size;
- size_t cluster_mask = (1 << cluster_shift) - 1;
+ if (f2fs_compressed_file(inode))
+ free_from = round_up(from,
+ F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
+#endif
- free_from = from >> cluster_shift;
- if (from & cluster_mask)
- free_from++;
- free_from <<= cluster_shift;
- }
+ err = f2fs_do_truncate_blocks(inode, free_from, lock);
+ if (err)
+ return err;
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (from != free_from)
+ err = f2fs_truncate_partial_cluster(inode, from, lock);
+#endif
- return do_truncate_blocks(inode, free_from, lock);
+ return err;
}
int f2fs_truncate(struct inode *inode)
@@ -968,9 +979,7 @@ const struct inode_operations f2fs_file_inode_operations = {
.setattr = f2fs_setattr,
.get_acl = f2fs_get_acl,
.set_acl = f2fs_set_acl,
-#ifdef CONFIG_F2FS_FS_XATTR
.listxattr = f2fs_listxattr,
-#endif
.fiemap = f2fs_fiemap,
};
@@ -1649,7 +1658,11 @@ next_alloc:
down_write(&sbi->pin_sem);
map.m_seg_type = CURSEG_COLD_DATA_PINNED;
+
+ f2fs_lock_op(sbi);
f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
+ f2fs_unlock_op(sbi);
+
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
up_write(&sbi->pin_sem);
@@ -2219,8 +2232,15 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
if (in != F2FS_GOING_DOWN_FULLSYNC) {
ret = mnt_want_write_file(filp);
- if (ret)
+ if (ret) {
+ if (ret == -EROFS) {
+ ret = 0;
+ f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+ trace_f2fs_shutdown(sbi, in, ret);
+ }
return ret;
+ }
}
switch (in) {
@@ -3301,7 +3321,6 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
__u64 block_count;
- int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -3313,9 +3332,7 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
sizeof(block_count)))
return -EFAULT;
- ret = f2fs_resize_fs(sbi, block_count);
-
- return ret;
+ return f2fs_resize_fs(sbi, block_count);
}
static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
@@ -3419,6 +3436,326 @@ static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
return put_user(blocks, (u64 __user *)arg);
}
+static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ unsigned int released_blocks = 0;
+ int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ block_t blkaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ dn->ofs_in_node + i);
+
+ if (!__is_valid_data_blkaddr(blkaddr))
+ continue;
+ if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
+ DATA_GENERIC_ENHANCE)))
+ return -EFSCORRUPTED;
+ }
+
+ while (count) {
+ int compr_blocks = 0;
+
+ for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
+ blkaddr = f2fs_data_blkaddr(dn);
+
+ if (i == 0) {
+ if (blkaddr == COMPRESS_ADDR)
+ continue;
+ dn->ofs_in_node += cluster_size;
+ goto next;
+ }
+
+ if (__is_valid_data_blkaddr(blkaddr))
+ compr_blocks++;
+
+ if (blkaddr != NEW_ADDR)
+ continue;
+
+ dn->data_blkaddr = NULL_ADDR;
+ f2fs_set_data_blkaddr(dn);
+ }
+
+ f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
+ dec_valid_block_count(sbi, dn->inode,
+ cluster_size - compr_blocks);
+
+ released_blocks += cluster_size - compr_blocks;
+next:
+ count -= cluster_size;
+ }
+
+ return released_blocks;
+}
+
+static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ pgoff_t page_idx = 0, last_idx;
+ unsigned int released_blocks = 0;
+ int ret;
+ int writecount;
+
+ if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ return -EOPNOTSUPP;
+
+ if (!f2fs_compressed_file(inode))
+ return -EINVAL;
+
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
+
+ inode_lock(inode);
+
+ writecount = atomic_read(&inode->i_writecount);
+ if ((filp->f_mode & FMODE_WRITE && writecount != 1) || writecount) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (IS_IMMUTABLE(inode)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+ if (ret)
+ goto out;
+
+ if (!F2FS_I(inode)->i_compr_blocks)
+ goto out;
+
+ F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
+ f2fs_set_inode_flags(inode);
+ inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+
+ while (page_idx < last_idx) {
+ struct dnode_of_data dn;
+ pgoff_t end_offset, count;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
+ if (ret) {
+ if (ret == -ENOENT) {
+ page_idx = f2fs_get_next_page_offset(&dn,
+ page_idx);
+ ret = 0;
+ continue;
+ }
+ break;
+ }
+
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
+ count = round_up(count, F2FS_I(inode)->i_cluster_size);
+
+ ret = release_compress_blocks(&dn, count);
+
+ f2fs_put_dnode(&dn);
+
+ if (ret < 0)
+ break;
+
+ page_idx += count;
+ released_blocks += ret;
+ }
+
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+out:
+ inode_unlock(inode);
+
+ mnt_drop_write_file(filp);
+
+ if (ret >= 0) {
+ ret = put_user(released_blocks, (u64 __user *)arg);
+ } else if (released_blocks && F2FS_I(inode)->i_compr_blocks) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
+ "iblocks=%llu, released=%u, compr_blocks=%llu, "
+ "run fsck to fix.",
+ __func__, inode->i_ino, inode->i_blocks,
+ released_blocks,
+ F2FS_I(inode)->i_compr_blocks);
+ }
+
+ return ret;
+}
+
+static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ unsigned int reserved_blocks = 0;
+ int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ block_t blkaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ dn->ofs_in_node + i);
+
+ if (!__is_valid_data_blkaddr(blkaddr))
+ continue;
+ if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
+ DATA_GENERIC_ENHANCE)))
+ return -EFSCORRUPTED;
+ }
+
+ while (count) {
+ int compr_blocks = 0;
+ blkcnt_t reserved;
+ int ret;
+
+ for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
+ blkaddr = f2fs_data_blkaddr(dn);
+
+ if (i == 0) {
+ if (blkaddr == COMPRESS_ADDR)
+ continue;
+ dn->ofs_in_node += cluster_size;
+ goto next;
+ }
+
+ if (__is_valid_data_blkaddr(blkaddr)) {
+ compr_blocks++;
+ continue;
+ }
+
+ dn->data_blkaddr = NEW_ADDR;
+ f2fs_set_data_blkaddr(dn);
+ }
+
+ reserved = cluster_size - compr_blocks;
+ ret = inc_valid_block_count(sbi, dn->inode, &reserved);
+ if (ret)
+ return ret;
+
+ if (reserved != cluster_size - compr_blocks)
+ return -ENOSPC;
+
+ f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
+
+ reserved_blocks += reserved;
+next:
+ count -= cluster_size;
+ }
+
+ return reserved_blocks;
+}
+
+static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ pgoff_t page_idx = 0, last_idx;
+ unsigned int reserved_blocks = 0;
+ int ret;
+
+ if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ return -EOPNOTSUPP;
+
+ if (!f2fs_compressed_file(inode))
+ return -EINVAL;
+
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ if (F2FS_I(inode)->i_compr_blocks)
+ goto out;
+
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
+
+ inode_lock(inode);
+
+ if (!IS_IMMUTABLE(inode)) {
+ ret = -EINVAL;
+ goto unlock_inode;
+ }
+
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+
+ while (page_idx < last_idx) {
+ struct dnode_of_data dn;
+ pgoff_t end_offset, count;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
+ if (ret) {
+ if (ret == -ENOENT) {
+ page_idx = f2fs_get_next_page_offset(&dn,
+ page_idx);
+ ret = 0;
+ continue;
+ }
+ break;
+ }
+
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
+ count = round_up(count, F2FS_I(inode)->i_cluster_size);
+
+ ret = reserve_compress_blocks(&dn, count);
+
+ f2fs_put_dnode(&dn);
+
+ if (ret < 0)
+ break;
+
+ page_idx += count;
+ reserved_blocks += ret;
+ }
+
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+
+ if (ret >= 0) {
+ F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
+ f2fs_set_inode_flags(inode);
+ inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
+unlock_inode:
+ inode_unlock(inode);
+out:
+ mnt_drop_write_file(filp);
+
+ if (ret >= 0) {
+ ret = put_user(reserved_blocks, (u64 __user *)arg);
+ } else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
+ "iblocks=%llu, reserved=%u, compr_blocks=%llu, "
+ "run fsck to fix.",
+ __func__, inode->i_ino, inode->i_blocks,
+ reserved_blocks,
+ F2FS_I(inode)->i_compr_blocks);
+ }
+
+ return ret;
+}
+
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
@@ -3501,6 +3838,10 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_set_volume_name(filp, arg);
case F2FS_IOC_GET_COMPRESS_BLOCKS:
return f2fs_get_compress_blocks(filp, arg);
+ case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
+ return f2fs_release_compress_blocks(filp, arg);
+ case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
+ return f2fs_reserve_compress_blocks(filp, arg);
default:
return -ENOTTY;
}
@@ -3510,11 +3851,17 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ int ret;
if (!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
- return generic_file_read_iter(iocb, iter);
+ ret = generic_file_read_iter(iocb, iter);
+
+ if (ret > 0)
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
+
+ return ret;
}
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
@@ -3662,6 +4009,8 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_GET_VOLUME_NAME:
case F2FS_IOC_SET_VOLUME_NAME:
case F2FS_IOC_GET_COMPRESS_BLOCKS:
+ case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
+ case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 26248c8936db..5b95d5a146eb 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -13,6 +13,7 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/freezer.h>
+#include <linux/sched/signal.h>
#include "f2fs.h"
#include "node.h"
@@ -737,6 +738,10 @@ got_it:
goto put_encrypted_page;
f2fs_put_page(fio.encrypted_page, 0);
f2fs_put_page(page, 1);
+
+ f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
+
return 0;
put_encrypted_page:
f2fs_put_page(fio.encrypted_page, 1);
@@ -840,6 +845,10 @@ static int move_data_block(struct inode *inode, block_t bidx,
f2fs_put_page(mpage, 1);
goto up_out;
}
+
+ f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
+
lock_page(mpage);
if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
!PageUptodate(mpage))) {
@@ -1399,12 +1408,29 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
}
-static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
- unsigned int end)
+static int free_segment_range(struct f2fs_sb_info *sbi,
+ unsigned int secs, bool gc_only)
{
- int type;
- unsigned int segno, next_inuse;
+ unsigned int segno, next_inuse, start, end;
+ struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
+ int gc_mode, gc_type;
int err = 0;
+ int type;
+
+ /* Force block allocation for GC */
+ MAIN_SECS(sbi) -= secs;
+ start = MAIN_SECS(sbi) * sbi->segs_per_sec;
+ end = MAIN_SEGS(sbi) - 1;
+
+ mutex_lock(&DIRTY_I(sbi)->seglist_lock);
+ for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
+ if (SIT_I(sbi)->last_victim[gc_mode] >= start)
+ SIT_I(sbi)->last_victim[gc_mode] = 0;
+
+ for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
+ if (sbi->next_victim_seg[gc_type] >= start)
+ sbi->next_victim_seg[gc_type] = NULL_SEGNO;
+ mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
/* Move out cursegs from the target range */
for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
@@ -1417,18 +1443,24 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
- down_write(&sbi->gc_lock);
do_garbage_collect(sbi, segno, &gc_list, FG_GC);
- up_write(&sbi->gc_lock);
put_gc_inode(&gc_list);
- if (get_valid_blocks(sbi, segno, true))
- return -EAGAIN;
+ if (!gc_only && get_valid_blocks(sbi, segno, true)) {
+ err = -EAGAIN;
+ goto out;
+ }
+ if (fatal_signal_pending(current)) {
+ err = -ERESTARTSYS;
+ goto out;
+ }
}
+ if (gc_only)
+ goto out;
- err = f2fs_sync_fs(sbi->sb, 1);
+ err = f2fs_write_checkpoint(sbi, &cpc);
if (err)
- return err;
+ goto out;
next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
if (next_inuse <= end) {
@@ -1436,6 +1468,8 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
next_inuse);
f2fs_bug_on(sbi, 1);
}
+out:
+ MAIN_SECS(sbi) += secs;
return err;
}
@@ -1481,6 +1515,7 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
+ MAIN_SECS(sbi) += secs;
FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
@@ -1502,8 +1537,8 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
{
__u64 old_block_count, shrunk_blocks;
+ struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
unsigned int secs;
- int gc_mode, gc_type;
int err = 0;
__u32 rem;
@@ -1538,10 +1573,27 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
return -EINVAL;
}
- freeze_bdev(sbi->sb->s_bdev);
-
shrunk_blocks = old_block_count - block_count;
secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
+
+ /* stop other GC */
+ if (!down_write_trylock(&sbi->gc_lock))
+ return -EAGAIN;
+
+ /* stop CP to protect MAIN_SEC in free_segment_range */
+ f2fs_lock_op(sbi);
+ err = free_segment_range(sbi, secs, true);
+ f2fs_unlock_op(sbi);
+ up_write(&sbi->gc_lock);
+ if (err)
+ return err;
+
+ set_sbi_flag(sbi, SBI_IS_RESIZEFS);
+
+ freeze_super(sbi->sb);
+ down_write(&sbi->gc_lock);
+ mutex_lock(&sbi->cp_mutex);
+
spin_lock(&sbi->stat_lock);
if (shrunk_blocks + valid_user_blocks(sbi) +
sbi->current_reserved_blocks + sbi->unusable_block_count +
@@ -1550,69 +1602,44 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
else
sbi->user_block_count -= shrunk_blocks;
spin_unlock(&sbi->stat_lock);
- if (err) {
- thaw_bdev(sbi->sb->s_bdev, sbi->sb);
- return err;
- }
-
- mutex_lock(&sbi->resize_mutex);
- set_sbi_flag(sbi, SBI_IS_RESIZEFS);
-
- mutex_lock(&DIRTY_I(sbi)->seglist_lock);
-
- MAIN_SECS(sbi) -= secs;
-
- for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
- if (SIT_I(sbi)->last_victim[gc_mode] >=
- MAIN_SECS(sbi) * sbi->segs_per_sec)
- SIT_I(sbi)->last_victim[gc_mode] = 0;
-
- for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
- if (sbi->next_victim_seg[gc_type] >=
- MAIN_SECS(sbi) * sbi->segs_per_sec)
- sbi->next_victim_seg[gc_type] = NULL_SEGNO;
-
- mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
+ if (err)
+ goto out_err;
- err = free_segment_range(sbi, MAIN_SECS(sbi) * sbi->segs_per_sec,
- MAIN_SEGS(sbi) - 1);
+ err = free_segment_range(sbi, secs, false);
if (err)
- goto out;
+ goto recover_out;
update_sb_metadata(sbi, -secs);
err = f2fs_commit_super(sbi, false);
if (err) {
update_sb_metadata(sbi, secs);
- goto out;
+ goto recover_out;
}
- mutex_lock(&sbi->cp_mutex);
update_fs_metadata(sbi, -secs);
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
set_sbi_flag(sbi, SBI_IS_DIRTY);
- mutex_unlock(&sbi->cp_mutex);
- err = f2fs_sync_fs(sbi->sb, 1);
+ err = f2fs_write_checkpoint(sbi, &cpc);
if (err) {
- mutex_lock(&sbi->cp_mutex);
update_fs_metadata(sbi, secs);
- mutex_unlock(&sbi->cp_mutex);
update_sb_metadata(sbi, secs);
f2fs_commit_super(sbi, false);
}
-out:
+recover_out:
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
- MAIN_SECS(sbi) += secs;
spin_lock(&sbi->stat_lock);
sbi->user_block_count += shrunk_blocks;
spin_unlock(&sbi->stat_lock);
}
+out_err:
+ mutex_unlock(&sbi->cp_mutex);
+ up_write(&sbi->gc_lock);
+ thaw_super(sbi->sb);
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
- mutex_unlock(&sbi->resize_mutex);
- thaw_bdev(sbi->sb->s_bdev, sbi->sb);
return err;
}
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index bbac9d3787bd..db3c61046aa4 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/gc.h
*
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index 8c4ea5003ef8..de841aaf3c43 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -67,22 +67,9 @@ static void str2hashbuf(const unsigned char *msg, size_t len,
*buf++ = pad;
}
-static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info,
- struct fscrypt_name *fname)
+static u32 TEA_hash_name(const u8 *p, size_t len)
{
- __u32 hash;
- f2fs_hash_t f2fs_hash;
- const unsigned char *p;
__u32 in[8], buf[4];
- const unsigned char *name = name_info->name;
- size_t len = name_info->len;
-
- /* encrypted bigname case */
- if (fname && !fname->disk_name.name)
- return cpu_to_le32(fname->hash);
-
- if (is_dot_dotdot(name_info))
- return 0;
/* Initialize the default seed for the hash checksum functions */
buf[0] = 0x67452301;
@@ -90,7 +77,6 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info,
buf[2] = 0x98badcfe;
buf[3] = 0x10325476;
- p = name;
while (1) {
str2hashbuf(p, len, in, 4);
TEA_transform(buf, in);
@@ -99,41 +85,43 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info,
break;
len -= 16;
}
- hash = buf[0];
- f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT);
- return f2fs_hash;
+ return buf[0] & ~F2FS_HASH_COL_BIT;
}
-f2fs_hash_t f2fs_dentry_hash(const struct inode *dir,
- const struct qstr *name_info, struct fscrypt_name *fname)
+/*
+ * Compute @fname->hash. For all directories, @fname->disk_name must be set.
+ * For casefolded directories, @fname->usr_fname must be set, and also
+ * @fname->cf_name if the filename is valid Unicode.
+ */
+void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname)
{
-#ifdef CONFIG_UNICODE
- struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
- const struct unicode_map *um = sbi->s_encoding;
- int r, dlen;
- unsigned char *buff;
- struct qstr folded;
+ const u8 *name = fname->disk_name.name;
+ size_t len = fname->disk_name.len;
- if (!name_info->len || !IS_CASEFOLDED(dir))
- goto opaque_seq;
+ WARN_ON_ONCE(!name);
- buff = f2fs_kzalloc(sbi, sizeof(char) * PATH_MAX, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- dlen = utf8_casefold(um, name_info, buff, PATH_MAX);
- if (dlen < 0) {
- kvfree(buff);
- goto opaque_seq;
+ if (is_dot_dotdot(name, len)) {
+ fname->hash = 0;
+ return;
}
- folded.name = buff;
- folded.len = dlen;
- r = __f2fs_dentry_hash(&folded, fname);
-
- kvfree(buff);
- return r;
-opaque_seq:
+#ifdef CONFIG_UNICODE
+ if (IS_CASEFOLDED(dir)) {
+ /*
+ * If the casefolded name is provided, hash it instead of the
+ * on-disk name. If the casefolded name is *not* provided, that
+ * should only be because the name wasn't valid Unicode, so fall
+ * back to treating the name as an opaque byte sequence.
+ */
+ WARN_ON_ONCE(!fname->usr_fname->name);
+ if (fname->cf_name.name) {
+ name = fname->cf_name.name;
+ len = fname->cf_name.len;
+ } else {
+ name = fname->usr_fname->name;
+ len = fname->usr_fname->len;
+ }
+ }
#endif
- return __f2fs_dentry_hash(name_info, fname);
+ fname->hash = cpu_to_le32(TEA_hash_name(name, len));
}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 4167e5408151..dbade310dc79 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -8,6 +8,7 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
+#include <linux/fiemap.h>
#include "f2fs.h"
#include "node.h"
@@ -305,15 +306,14 @@ process_inline:
}
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
- struct fscrypt_name *fname, struct page **res_page)
+ const struct f2fs_filename *fname,
+ struct page **res_page)
{
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
- struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
struct f2fs_dir_entry *de;
struct f2fs_dentry_ptr d;
struct page *ipage;
void *inline_dentry;
- f2fs_hash_t namehash;
ipage = f2fs_get_node_page(sbi, dir->i_ino);
if (IS_ERR(ipage)) {
@@ -321,12 +321,10 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
return NULL;
}
- namehash = f2fs_dentry_hash(dir, &name, fname);
-
inline_dentry = inline_data_addr(dir, ipage);
make_dentry_ptr_inline(dir, &d, inline_dentry);
- de = f2fs_find_target_dentry(fname, namehash, NULL, &d);
+ de = f2fs_find_target_dentry(&d, fname, NULL);
unlock_page(ipage);
if (de)
*res_page = ipage;
@@ -443,7 +441,7 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
while (bit_pos < d.max) {
struct f2fs_dir_entry *de;
- struct qstr new_name;
+ struct f2fs_filename fname;
nid_t ino;
umode_t fake_mode;
@@ -459,14 +457,19 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
continue;
}
- new_name.name = d.filename[bit_pos];
- new_name.len = le16_to_cpu(de->name_len);
+ /*
+ * We only need the disk_name and hash to move the dentry.
+ * We don't need the original or casefolded filenames.
+ */
+ memset(&fname, 0, sizeof(fname));
+ fname.disk_name.name = d.filename[bit_pos];
+ fname.disk_name.len = le16_to_cpu(de->name_len);
+ fname.hash = de->hash_code;
ino = le32_to_cpu(de->ino);
fake_mode = f2fs_get_de_type(de) << S_SHIFT;
- err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
- ino, fake_mode);
+ err = f2fs_add_regular_entry(dir, &fname, NULL, ino, fake_mode);
if (err)
goto punch_dentry_pages;
@@ -543,7 +546,7 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage;
- struct fscrypt_name fname;
+ struct f2fs_filename fname;
void *inline_dentry = NULL;
int err = 0;
@@ -552,19 +555,19 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
f2fs_lock_op(sbi);
- err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
+ err = f2fs_setup_filename(dir, &dentry->d_name, 0, &fname);
if (err)
goto out;
ipage = f2fs_get_node_page(sbi, dir->i_ino);
if (IS_ERR(ipage)) {
err = PTR_ERR(ipage);
- goto out;
+ goto out_fname;
}
if (f2fs_has_enough_room(dir, ipage, &fname)) {
f2fs_put_page(ipage, 1);
- goto out;
+ goto out_fname;
}
inline_dentry = inline_data_addr(dir, ipage);
@@ -572,22 +575,22 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
err = do_convert_inline_dir(dir, ipage, inline_dentry);
if (!err)
f2fs_put_page(ipage, 1);
+out_fname:
+ f2fs_free_filename(&fname);
out:
f2fs_unlock_op(sbi);
return err;
}
-int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
- const struct qstr *orig_name,
- struct inode *inode, nid_t ino, umode_t mode)
+int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
+ struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage;
unsigned int bit_pos;
- f2fs_hash_t name_hash;
void *inline_dentry = NULL;
struct f2fs_dentry_ptr d;
- int slots = GET_DENTRY_SLOTS(new_name->len);
+ int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
struct page *page = NULL;
int err = 0;
@@ -609,8 +612,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, new_name,
- orig_name, ipage);
+ page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@@ -619,8 +621,8 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
f2fs_wait_on_page_writeback(ipage, NODE, true, true);
- name_hash = f2fs_dentry_hash(dir, new_name, NULL);
- f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
+ f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
+ bit_pos);
set_page_dirty(ipage);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index f54119da2217..e94e02c6580a 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -482,7 +482,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
nid_t ino = -1;
int err = 0;
unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
- struct fscrypt_name fname;
+ struct f2fs_filename fname;
trace_f2fs_lookup_start(dir, dentry, flags);
@@ -491,19 +491,20 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- err = fscrypt_prepare_lookup(dir, dentry, &fname);
+ err = f2fs_prepare_lookup(dir, dentry, &fname);
if (err == -ENOENT)
goto out_splice;
if (err)
goto out;
de = __f2fs_find_entry(dir, &fname, &page);
- fscrypt_free_filename(&fname);
+ f2fs_free_filename(&fname);
if (!de) {
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto out;
}
+ err = -ENOENT;
goto out_splice;
}
@@ -549,7 +550,7 @@ out_splice:
#endif
new = d_splice_alias(inode, dentry);
err = PTR_ERR_OR_ZERO(new);
- trace_f2fs_lookup_end(dir, dentry, ino, err);
+ trace_f2fs_lookup_end(dir, dentry, ino, !new ? -ENOENT : err);
return new;
out_iput:
iput(inode);
@@ -564,7 +565,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
struct inode *inode = d_inode(dentry);
struct f2fs_dir_entry *de;
struct page *page;
- int err = -ENOENT;
+ int err;
trace_f2fs_unlink_enter(dir, dentry);
@@ -1287,9 +1288,7 @@ const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
.get_link = f2fs_encrypted_get_link,
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
-#ifdef CONFIG_F2FS_FS_XATTR
.listxattr = f2fs_listxattr,
-#endif
};
const struct inode_operations f2fs_dir_inode_operations = {
@@ -1307,9 +1306,7 @@ const struct inode_operations f2fs_dir_inode_operations = {
.setattr = f2fs_setattr,
.get_acl = f2fs_get_acl,
.set_acl = f2fs_set_acl,
-#ifdef CONFIG_F2FS_FS_XATTR
.listxattr = f2fs_listxattr,
-#endif
.fiemap = f2fs_fiemap,
};
@@ -1317,9 +1314,7 @@ const struct inode_operations f2fs_symlink_inode_operations = {
.get_link = f2fs_get_link,
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
-#ifdef CONFIG_F2FS_FS_XATTR
.listxattr = f2fs_listxattr,
-#endif
};
const struct inode_operations f2fs_special_inode_operations = {
@@ -1327,7 +1322,5 @@ const struct inode_operations f2fs_special_inode_operations = {
.setattr = f2fs_setattr,
.get_acl = f2fs_get_acl,
.set_acl = f2fs_set_acl,
-#ifdef CONFIG_F2FS_FS_XATTR
.listxattr = f2fs_listxattr,
-#endif
};
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index ecbd6bd14a49..03e24df1c84f 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1300,7 +1300,13 @@ static int read_node_page(struct page *page, int op_flags)
}
fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
- return f2fs_submit_page_bio(&fio);
+
+ err = f2fs_submit_page_bio(&fio);
+
+ if (!err)
+ f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE);
+
+ return err;
}
/*
@@ -1514,8 +1520,15 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
trace_f2fs_writepage(page, NODE);
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
+ ClearPageUptodate(page);
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ unlock_page(page);
+ return 0;
+ }
goto redirty_out;
+ }
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
@@ -1801,6 +1814,53 @@ static bool flush_dirty_inode(struct page *page)
return true;
}
+int f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
+{
+ pgoff_t index = 0;
+ struct pagevec pvec;
+ int nr_pages;
+ int ret = 0;
+
+ pagevec_init(&pvec);
+
+ while ((nr_pages = pagevec_lookup_tag(&pvec,
+ NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ if (!IS_DNODE(page))
+ continue;
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+continue_unlock:
+ unlock_page(page);
+ continue;
+ }
+
+ if (!PageDirty(page)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ /* flush inline_data, if it's async context. */
+ if (is_inline_node(page)) {
+ clear_inline_node(page);
+ unlock_page(page);
+ flush_inline_data(sbi, ino_of_node(page));
+ continue;
+ }
+ unlock_page(page);
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+ return ret;
+}
+
int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
struct writeback_control *wbc,
bool do_balance, enum iostat_type io_type)
@@ -1864,8 +1924,8 @@ continue_unlock:
goto continue_unlock;
}
- /* flush inline_data */
- if (is_inline_node(page)) {
+ /* flush inline_data, if it's async context. */
+ if (do_balance && is_inline_node(page)) {
clear_inline_node(page);
unlock_page(page);
flush_inline_data(sbi, ino_of_node(page));
@@ -2482,7 +2542,6 @@ void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct free_nid *i, *next;
int nr = nr_shrink;
if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
@@ -2491,17 +2550,23 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
if (!mutex_trylock(&nm_i->build_lock))
return 0;
- spin_lock(&nm_i->nid_list_lock);
- list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
- if (nr_shrink <= 0 ||
- nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
- break;
+ while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
+ struct free_nid *i, *next;
+ unsigned int batch = SHRINK_NID_BATCH_SIZE;
- __remove_free_nid(sbi, i, FREE_NID);
- kmem_cache_free(free_nid_slab, i);
- nr_shrink--;
+ spin_lock(&nm_i->nid_list_lock);
+ list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
+ if (!nr_shrink || !batch ||
+ nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
+ break;
+ __remove_free_nid(sbi, i, FREE_NID);
+ kmem_cache_free(free_nid_slab, i);
+ nr_shrink--;
+ batch--;
+ }
+ spin_unlock(&nm_i->nid_list_lock);
}
- spin_unlock(&nm_i->nid_list_lock);
+
mutex_unlock(&nm_i->build_lock);
return nr - nr_shrink;
@@ -2928,7 +2993,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
return 0;
nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
- nm_i->nat_bits = f2fs_kzalloc(sbi,
+ nm_i->nat_bits = f2fs_kvzalloc(sbi,
nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
if (!nm_i->nat_bits)
return -ENOMEM;
@@ -3061,9 +3126,9 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi)
int i;
nm_i->free_nid_bitmap =
- f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *),
- nm_i->nat_blocks),
- GFP_KERNEL);
+ f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
+ nm_i->nat_blocks),
+ GFP_KERNEL);
if (!nm_i->free_nid_bitmap)
return -ENOMEM;
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index e05af5df5648..69e5859e993c 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/node.h
*
@@ -15,6 +15,9 @@
#define FREE_NID_PAGES 8
#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
+/* size of free nid batch when shrinking */
+#define SHRINK_NID_BATCH_SIZE 8
+
#define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */
/* maximum readahead size for node during getting data blocks */
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index dd804c07eeb0..ae5310f02e7f 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -107,13 +107,51 @@ static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
kmem_cache_free(fsync_entry_slab, entry);
}
+static int init_recovered_filename(const struct inode *dir,
+ struct f2fs_inode *raw_inode,
+ struct f2fs_filename *fname,
+ struct qstr *usr_fname)
+{
+ int err;
+
+ memset(fname, 0, sizeof(*fname));
+ fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
+ fname->disk_name.name = raw_inode->i_name;
+
+ if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
+ return -ENAMETOOLONG;
+
+ if (!IS_ENCRYPTED(dir)) {
+ usr_fname->name = fname->disk_name.name;
+ usr_fname->len = fname->disk_name.len;
+ fname->usr_fname = usr_fname;
+ }
+
+ /* Compute the hash of the filename */
+ if (IS_CASEFOLDED(dir)) {
+ err = f2fs_init_casefolded_name(dir, fname);
+ if (err)
+ return err;
+ f2fs_hash_filename(dir, fname);
+#ifdef CONFIG_UNICODE
+ /* Case-sensitive match is fine for recovery */
+ kfree(fname->cf_name.name);
+ fname->cf_name.name = NULL;
+#endif
+ } else {
+ f2fs_hash_filename(dir, fname);
+ }
+ return 0;
+}
+
static int recover_dentry(struct inode *inode, struct page *ipage,
struct list_head *dir_list)
{
struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de;
- struct fscrypt_name fname;
+ struct f2fs_filename fname;
+ struct qstr usr_fname;
struct page *page;
struct inode *dir, *einode;
struct fsync_inode_entry *entry;
@@ -132,16 +170,9 @@ static int recover_dentry(struct inode *inode, struct page *ipage,
}
dir = entry->inode;
-
- memset(&fname, 0, sizeof(struct fscrypt_name));
- fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
- fname.disk_name.name = raw_inode->i_name;
-
- if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
- WARN_ON(1);
- err = -ENAMETOOLONG;
+ err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
+ if (err)
goto out;
- }
retry:
de = __f2fs_find_entry(dir, &fname, &page);
if (de && inode->i_ino == le32_to_cpu(de->ino))
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index b7a9421472a7..196f31503511 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1029,9 +1029,9 @@ static void f2fs_submit_discard_endio(struct bio *bio)
struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
unsigned long flags;
- dc->error = blk_status_to_errno(bio->bi_status);
-
spin_lock_irqsave(&dc->lock, flags);
+ if (!dc->error)
+ dc->error = blk_status_to_errno(bio->bi_status);
dc->bio_ref--;
if (!dc->bio_ref && dc->state == D_SUBMIT) {
dc->state = D_DONE;
@@ -1101,7 +1101,6 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
} else if (discard_type == DPOLICY_FSTRIM) {
dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_UMOUNT) {
- dpolicy->max_requests = UINT_MAX;
dpolicy->io_aware = false;
/* we need to issue all to keep CP_TRIMMED_FLAG */
dpolicy->granularity = 1;
@@ -1215,12 +1214,14 @@ submit:
len = total_len;
}
- if (!err && len)
+ if (!err && len) {
+ dcc->undiscard_blks -= len;
__update_discard_tree_range(sbi, bdev, lstart, start, len);
+ }
return err;
}
-static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
+static void __insert_discard_tree(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len,
struct rb_node **insert_p,
@@ -1229,7 +1230,6 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct rb_node **p;
struct rb_node *parent = NULL;
- struct discard_cmd *dc = NULL;
bool leftmost = true;
if (insert_p && insert_parent) {
@@ -1241,12 +1241,8 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
lstart, &leftmost);
do_insert:
- dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
+ __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
p, leftmost);
- if (!dc)
- return NULL;
-
- return dc;
}
static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
@@ -1463,6 +1459,8 @@ next:
return issued;
}
+static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
+ struct discard_policy *dpolicy);
static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy)
@@ -1471,12 +1469,14 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
struct list_head *pend_list;
struct discard_cmd *dc, *tmp;
struct blk_plug plug;
- int i, issued = 0;
+ int i, issued;
bool io_interrupted = false;
if (dpolicy->timeout)
f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
+retry:
+ issued = 0;
for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
if (dpolicy->timeout &&
f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
@@ -1523,6 +1523,11 @@ next:
break;
}
+ if (dpolicy->type == DPOLICY_UMOUNT && issued) {
+ __wait_all_discard_cmd(sbi, dpolicy);
+ goto retry;
+ }
+
if (!issued && io_interrupted)
issued = -1;
@@ -3102,6 +3107,14 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
type = CURSEG_COLD_DATA;
}
+ /*
+ * We need to wait for node_write to avoid block allocation during
+ * checkpoint. This can only happen to quota writes which can cause
+ * the below discard race condition.
+ */
+ if (IS_DATASEG(type))
+ down_write(&sbi->node_write);
+
down_read(&SM_I(sbi)->curseg_lock);
mutex_lock(&curseg->curseg_mutex);
@@ -3167,6 +3180,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
up_read(&SM_I(sbi)->curseg_lock);
+ if (IS_DATASEG(type))
+ up_write(&sbi->node_write);
+
if (put_pin_sem)
up_read(&sbi->pin_sem);
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 7a83bd530812..cba16cca5189 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/segment.h
*
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 8a9955902d84..20e56b0fa46a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -285,6 +285,22 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
F2FS_OPTION(sbi).s_resgid));
}
+static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
+{
+ if (!F2FS_OPTION(sbi).unusable_cap_perc)
+ return;
+
+ if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
+ F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
+ else
+ F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
+ F2FS_OPTION(sbi).unusable_cap_perc;
+
+ f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
+ F2FS_OPTION(sbi).unusable_cap,
+ F2FS_OPTION(sbi).unusable_cap_perc);
+}
+
static void init_once(void *foo)
{
struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
@@ -471,11 +487,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!name)
return -ENOMEM;
- if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
+ if (!strcmp(name, "on")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
- } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
+ } else if (!strcmp(name, "off")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
- } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
+ } else if (!strcmp(name, "sync")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
} else {
kvfree(name);
@@ -635,16 +651,14 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!name)
return -ENOMEM;
- if (strlen(name) == 8 &&
- !strncmp(name, "adaptive", 8)) {
+ if (!strcmp(name, "adaptive")) {
if (f2fs_sb_has_blkzoned(sbi)) {
f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
kvfree(name);
return -EINVAL;
}
F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
- } else if (strlen(name) == 3 &&
- !strncmp(name, "lfs", 3)) {
+ } else if (!strcmp(name, "lfs")) {
F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
} else {
kvfree(name);
@@ -769,14 +783,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
- if (strlen(name) == 10 &&
- !strncmp(name, "user-based", 10)) {
+ if (!strcmp(name, "user-based")) {
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
- } else if (strlen(name) == 3 &&
- !strncmp(name, "off", 3)) {
+ } else if (!strcmp(name, "off")) {
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
- } else if (strlen(name) == 8 &&
- !strncmp(name, "fs-based", 8)) {
+ } else if (!strcmp(name, "fs-based")) {
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
} else {
kvfree(name);
@@ -789,11 +800,9 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!name)
return -ENOMEM;
- if (strlen(name) == 7 &&
- !strncmp(name, "default", 7)) {
+ if (!strcmp(name, "default")) {
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
- } else if (strlen(name) == 5 &&
- !strncmp(name, "reuse", 5)) {
+ } else if (!strcmp(name, "reuse")) {
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
} else {
kvfree(name);
@@ -805,14 +814,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
- if (strlen(name) == 5 &&
- !strncmp(name, "posix", 5)) {
+ if (!strcmp(name, "posix")) {
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
- } else if (strlen(name) == 6 &&
- !strncmp(name, "strict", 6)) {
+ } else if (!strcmp(name, "strict")) {
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
- } else if (strlen(name) == 9 &&
- !strncmp(name, "nobarrier", 9)) {
+ } else if (!strcmp(name, "nobarrier")) {
F2FS_OPTION(sbi).fsync_mode =
FSYNC_MODE_NOBARRIER;
} else {
@@ -832,12 +838,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
return -EINVAL;
if (arg < 0 || arg > 100)
return -EINVAL;
- if (arg == 100)
- F2FS_OPTION(sbi).unusable_cap =
- sbi->user_block_count;
- else
- F2FS_OPTION(sbi).unusable_cap =
- (sbi->user_block_count / 100) * arg;
+ F2FS_OPTION(sbi).unusable_cap_perc = arg;
set_opt(sbi, DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_disable_cap:
@@ -860,17 +861,18 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
- if (strlen(name) == 3 && !strcmp(name, "lzo")) {
+ if (!strcmp(name, "lzo")) {
F2FS_OPTION(sbi).compress_algorithm =
COMPRESS_LZO;
- } else if (strlen(name) == 3 &&
- !strcmp(name, "lz4")) {
+ } else if (!strcmp(name, "lz4")) {
F2FS_OPTION(sbi).compress_algorithm =
COMPRESS_LZ4;
- } else if (strlen(name) == 4 &&
- !strcmp(name, "zstd")) {
+ } else if (!strcmp(name, "zstd")) {
F2FS_OPTION(sbi).compress_algorithm =
COMPRESS_ZSTD;
+ } else if (!strcmp(name, "lzo-rle")) {
+ F2FS_OPTION(sbi).compress_algorithm =
+ COMPRESS_LZORLE;
} else {
kfree(name);
return -EINVAL;
@@ -1330,7 +1332,8 @@ static int f2fs_statfs_project(struct super_block *sb,
limit >>= sb->s_blocksize_bits;
if (limit && buf->f_blocks > limit) {
- curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
+ curblock = (dquot->dq_dqb.dqb_curspace +
+ dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
buf->f_blocks = limit;
buf->f_bfree = buf->f_bavail =
(buf->f_blocks > curblock) ?
@@ -1465,6 +1468,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
case COMPRESS_ZSTD:
algtype = "zstd";
break;
+ case COMPRESS_LZORLE:
+ algtype = "lzo-rle";
+ break;
}
seq_printf(seq, ",compress_algorithm=%s", algtype);
@@ -1880,6 +1886,7 @@ skip:
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
limit_reserve_root(sbi);
+ adjust_unusable_cap_perc(sbi);
*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
return 0;
restore_gc:
@@ -3062,7 +3069,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
FDEV(devi).nr_blkz++;
- FDEV(devi).blkz_seq = f2fs_kzalloc(sbi,
+ FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
BITS_TO_LONGS(FDEV(devi).nr_blkz)
* sizeof(unsigned long),
GFP_KERNEL);
@@ -3449,7 +3456,6 @@ try_onemore:
init_rwsem(&sbi->gc_lock);
mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
- mutex_init(&sbi->resize_mutex);
init_rwsem(&sbi->node_write);
init_rwsem(&sbi->node_change);
@@ -3460,6 +3466,7 @@ try_onemore:
/* init iostat info */
spin_lock_init(&sbi->iostat_lock);
sbi->iostat_enable = false;
+ sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
for (i = 0; i < NR_PAGE_TYPE; i++) {
int n = (i == META) ? 1: NR_TEMP_TYPE;
@@ -3557,6 +3564,7 @@ try_onemore:
sbi->reserved_blocks = 0;
sbi->current_reserved_blocks = 0;
limit_reserve_root(sbi);
+ adjust_unusable_cap_perc(sbi);
for (i = 0; i < NR_INODE_TYPE; i++) {
INIT_LIST_HEAD(&sbi->inode_list[i]);
@@ -3927,7 +3935,12 @@ static int __init init_f2fs_fs(void)
err = f2fs_init_bioset();
if (err)
goto free_bio_enrty_cache;
+ err = f2fs_init_compress_mempool();
+ if (err)
+ goto free_bioset;
return 0;
+free_bioset:
+ f2fs_destroy_bioset();
free_bio_enrty_cache:
f2fs_destroy_bio_entry_cache();
free_post_read:
@@ -3955,6 +3968,7 @@ fail:
static void __exit exit_f2fs_fs(void)
{
+ f2fs_destroy_compress_mempool();
f2fs_destroy_bioset();
f2fs_destroy_bio_entry_cache();
f2fs_destroy_post_read_processing();
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 3162f46b3c9b..e877c59b9fdb 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -15,6 +15,7 @@
#include "f2fs.h"
#include "segment.h"
#include "gc.h"
+#include <trace/events/f2fs.h>
static struct proc_dir_entry *f2fs_proc_root;
@@ -372,7 +373,6 @@ out:
return count;
}
-
if (!strcmp(a->attr.name, "iostat_enable")) {
sbi->iostat_enable = !!t;
if (!sbi->iostat_enable)
@@ -380,6 +380,15 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "iostat_period_ms")) {
+ if (t < MIN_IOSTAT_PERIOD_MS || t > MAX_IOSTAT_PERIOD_MS)
+ return -EINVAL;
+ spin_lock(&sbi->iostat_lock);
+ sbi->iostat_period_ms = (unsigned int)t;
+ spin_unlock(&sbi->iostat_lock);
+ return count;
+ }
+
*ui = (unsigned int)t;
return count;
@@ -538,6 +547,7 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle_interval, interval_time[GC_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info,
umount_discard_timeout, interval_time[UMOUNT_DISCARD_TIMEOUT]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_period_ms, iostat_period_ms);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list);
@@ -545,6 +555,8 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list);
F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
#endif
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, node_io_flag, node_io_flag);
F2FS_GENERAL_RO_ATTR(dirty_segments);
F2FS_GENERAL_RO_ATTR(free_segments);
F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
@@ -618,6 +630,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_idle_interval),
ATTR_LIST(umount_discard_timeout),
ATTR_LIST(iostat_enable),
+ ATTR_LIST(iostat_period_ms),
ATTR_LIST(readdir_ra),
ATTR_LIST(gc_pin_file_thresh),
ATTR_LIST(extension_list),
@@ -625,6 +638,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(inject_rate),
ATTR_LIST(inject_type),
#endif
+ ATTR_LIST(data_io_flag),
+ ATTR_LIST(node_io_flag),
ATTR_LIST(dirty_segments),
ATTR_LIST(free_segments),
ATTR_LIST(unusable),
@@ -754,6 +769,33 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
return 0;
}
+void f2fs_record_iostat(struct f2fs_sb_info *sbi)
+{
+ unsigned long long iostat_diff[NR_IO_TYPE];
+ int i;
+
+ if (time_is_after_jiffies(sbi->iostat_next_period))
+ return;
+
+ /* Need double check under the lock */
+ spin_lock(&sbi->iostat_lock);
+ if (time_is_after_jiffies(sbi->iostat_next_period)) {
+ spin_unlock(&sbi->iostat_lock);
+ return;
+ }
+ sbi->iostat_next_period = jiffies +
+ msecs_to_jiffies(sbi->iostat_period_ms);
+
+ for (i = 0; i < NR_IO_TYPE; i++) {
+ iostat_diff[i] = sbi->rw_iostat[i] -
+ sbi->prev_rw_iostat[i];
+ sbi->prev_rw_iostat[i] = sbi->rw_iostat[i];
+ }
+ spin_unlock(&sbi->iostat_lock);
+
+ trace_f2fs_iostat(sbi, iostat_diff);
+}
+
static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
void *offset)
{
@@ -766,33 +808,58 @@ static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
seq_printf(seq, "time: %-16llu\n", now);
- /* print app IOs */
+ /* print app write IOs */
+ seq_puts(seq, "[WRITE]\n");
seq_printf(seq, "app buffered: %-16llu\n",
- sbi->write_iostat[APP_BUFFERED_IO]);
+ sbi->rw_iostat[APP_BUFFERED_IO]);
seq_printf(seq, "app direct: %-16llu\n",
- sbi->write_iostat[APP_DIRECT_IO]);
+ sbi->rw_iostat[APP_DIRECT_IO]);
seq_printf(seq, "app mapped: %-16llu\n",
- sbi->write_iostat[APP_MAPPED_IO]);
+ sbi->rw_iostat[APP_MAPPED_IO]);
- /* print fs IOs */
+ /* print fs write IOs */
seq_printf(seq, "fs data: %-16llu\n",
- sbi->write_iostat[FS_DATA_IO]);
+ sbi->rw_iostat[FS_DATA_IO]);
seq_printf(seq, "fs node: %-16llu\n",
- sbi->write_iostat[FS_NODE_IO]);
+ sbi->rw_iostat[FS_NODE_IO]);
seq_printf(seq, "fs meta: %-16llu\n",
- sbi->write_iostat[FS_META_IO]);
+ sbi->rw_iostat[FS_META_IO]);
seq_printf(seq, "fs gc data: %-16llu\n",
- sbi->write_iostat[FS_GC_DATA_IO]);
+ sbi->rw_iostat[FS_GC_DATA_IO]);
seq_printf(seq, "fs gc node: %-16llu\n",
- sbi->write_iostat[FS_GC_NODE_IO]);
+ sbi->rw_iostat[FS_GC_NODE_IO]);
seq_printf(seq, "fs cp data: %-16llu\n",
- sbi->write_iostat[FS_CP_DATA_IO]);
+ sbi->rw_iostat[FS_CP_DATA_IO]);
seq_printf(seq, "fs cp node: %-16llu\n",
- sbi->write_iostat[FS_CP_NODE_IO]);
+ sbi->rw_iostat[FS_CP_NODE_IO]);
seq_printf(seq, "fs cp meta: %-16llu\n",
- sbi->write_iostat[FS_CP_META_IO]);
+ sbi->rw_iostat[FS_CP_META_IO]);
+
+ /* print app read IOs */
+ seq_puts(seq, "[READ]\n");
+ seq_printf(seq, "app buffered: %-16llu\n",
+ sbi->rw_iostat[APP_BUFFERED_READ_IO]);
+ seq_printf(seq, "app direct: %-16llu\n",
+ sbi->rw_iostat[APP_DIRECT_READ_IO]);
+ seq_printf(seq, "app mapped: %-16llu\n",
+ sbi->rw_iostat[APP_MAPPED_READ_IO]);
+
+ /* print fs read IOs */
+ seq_printf(seq, "fs data: %-16llu\n",
+ sbi->rw_iostat[FS_DATA_READ_IO]);
+ seq_printf(seq, "fs gc data: %-16llu\n",
+ sbi->rw_iostat[FS_GDATA_READ_IO]);
+ seq_printf(seq, "fs compr_data: %-16llu\n",
+ sbi->rw_iostat[FS_CDATA_READ_IO]);
+ seq_printf(seq, "fs node: %-16llu\n",
+ sbi->rw_iostat[FS_NODE_READ_IO]);
+ seq_printf(seq, "fs meta: %-16llu\n",
+ sbi->rw_iostat[FS_META_READ_IO]);
+
+ /* print other IOs */
+ seq_puts(seq, "[OTHER]\n");
seq_printf(seq, "fs discard: %-16llu\n",
- sbi->write_iostat[FS_DISCARD]);
+ sbi->rw_iostat[FS_DISCARD]);
return 0;
}
diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h
index e8075fc5b228..789f6aa727fc 100644
--- a/fs/f2fs/trace.h
+++ b/fs/f2fs/trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* f2fs IO tracer
*
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index 938fcd20565d..416d652774a3 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/xattr.h
*
@@ -136,6 +136,7 @@ extern void f2fs_destroy_xattr_caches(struct f2fs_sb_info *);
#else
#define f2fs_xattr_handlers NULL
+#define f2fs_listxattr NULL
static inline int f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
struct page *page, int flags)
@@ -148,11 +149,6 @@ static inline int f2fs_getxattr(struct inode *inode, int index,
{
return -EOPNOTSUPP;
}
-static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer,
- size_t buffer_size)
-{
- return -EOPNOTSUPP;
-}
static inline int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { }
#endif
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 3647c65a0f48..bbfe18c07417 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -632,20 +632,80 @@ error:
}
EXPORT_SYMBOL_GPL(fat_free_clusters);
-/* 128kb is the whole sectors for FAT12 and FAT16 */
-#define FAT_READA_SIZE (128 * 1024)
+struct fatent_ra {
+ sector_t cur;
+ sector_t limit;
+
+ unsigned int ra_blocks;
+ sector_t ra_advance;
+ sector_t ra_next;
+ sector_t ra_limit;
+};
-static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent,
- unsigned long reada_blocks)
+static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra,
+ struct fat_entry *fatent, int ent_limit)
{
- const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
- sector_t blocknr;
- int i, offset;
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ const struct fatent_operations *ops = sbi->fatent_ops;
+ sector_t blocknr, block_end;
+ int offset;
+ /*
+ * This is the sequential read, so ra_pages * 2 (but try to
+ * align the optimal hardware IO size).
+ * [BTW, 128kb covers the whole sectors for FAT12 and FAT16]
+ */
+ unsigned long ra_pages = sb->s_bdi->ra_pages;
+ unsigned int reada_blocks;
+ if (ra_pages > sb->s_bdi->io_pages)
+ ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages);
+ reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1);
+
+ /* Initialize the range for sequential read */
ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
+ ops->ent_blocknr(sb, ent_limit - 1, &offset, &block_end);
+ ra->cur = 0;
+ ra->limit = (block_end + 1) - blocknr;
- for (i = 0; i < reada_blocks; i++)
- sb_breadahead(sb, blocknr + i);
+ /* Advancing the window at half size */
+ ra->ra_blocks = reada_blocks >> 1;
+ ra->ra_advance = ra->cur;
+ ra->ra_next = ra->cur;
+ ra->ra_limit = ra->cur + min_t(sector_t, reada_blocks, ra->limit);
+}
+
+/* Assuming to be called before reading a new block (increments ->cur). */
+static void fat_ent_reada(struct super_block *sb, struct fatent_ra *ra,
+ struct fat_entry *fatent)
+{
+ if (ra->ra_next >= ra->ra_limit)
+ return;
+
+ if (ra->cur >= ra->ra_advance) {
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ const struct fatent_operations *ops = sbi->fatent_ops;
+ struct blk_plug plug;
+ sector_t blocknr, diff;
+ int offset;
+
+ ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
+
+ diff = blocknr - ra->cur;
+ blk_start_plug(&plug);
+ /*
+ * FIXME: we would want to directly use the bio with
+ * pages to reduce the number of segments.
+ */
+ for (; ra->ra_next < ra->ra_limit; ra->ra_next++)
+ sb_breadahead(sb, ra->ra_next + diff);
+ blk_finish_plug(&plug);
+
+ /* Advance the readahead window */
+ ra->ra_advance += ra->ra_blocks;
+ ra->ra_limit += min_t(sector_t,
+ ra->ra_blocks, ra->limit - ra->ra_limit);
+ }
+ ra->cur++;
}
int fat_count_free_clusters(struct super_block *sb)
@@ -653,27 +713,20 @@ int fat_count_free_clusters(struct super_block *sb)
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const struct fatent_operations *ops = sbi->fatent_ops;
struct fat_entry fatent;
- unsigned long reada_blocks, reada_mask, cur_block;
+ struct fatent_ra fatent_ra;
int err = 0, free;
lock_fat(sbi);
if (sbi->free_clusters != -1 && sbi->free_clus_valid)
goto out;
- reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
- reada_mask = reada_blocks - 1;
- cur_block = 0;
-
free = 0;
fatent_init(&fatent);
fatent_set_entry(&fatent, FAT_START_ENT);
+ fat_ra_init(sb, &fatent_ra, &fatent, sbi->max_cluster);
while (fatent.entry < sbi->max_cluster) {
/* readahead of fat blocks */
- if ((cur_block & reada_mask) == 0) {
- unsigned long rest = sbi->fat_length - cur_block;
- fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
- }
- cur_block++;
+ fat_ent_reada(sb, &fatent_ra, &fatent);
err = fat_ent_read_block(sb, &fatent);
if (err)
@@ -707,9 +760,9 @@ int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const struct fatent_operations *ops = sbi->fatent_ops;
struct fat_entry fatent;
+ struct fatent_ra fatent_ra;
u64 ent_start, ent_end, minlen, trimmed = 0;
u32 free = 0;
- unsigned long reada_blocks, reada_mask, cur_block = 0;
int err = 0;
/*
@@ -727,19 +780,13 @@ int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
if (ent_end >= sbi->max_cluster)
ent_end = sbi->max_cluster - 1;
- reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
- reada_mask = reada_blocks - 1;
-
fatent_init(&fatent);
lock_fat(sbi);
fatent_set_entry(&fatent, ent_start);
+ fat_ra_init(sb, &fatent_ra, &fatent, ent_end + 1);
while (fatent.entry <= ent_end) {
/* readahead of fat blocks */
- if ((cur_block & reada_mask) == 0) {
- unsigned long rest = sbi->fat_length - cur_block;
- fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
- }
- cur_block++;
+ fat_ent_reada(sb, &fatent_ra, &fatent);
err = fat_ent_read_block(sb, &fatent);
if (err)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index e6e68b2274a5..a0cf99debb1e 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1519,6 +1519,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b,
goto out;
}
+ if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) {
+ if (!silent)
+ fat_msg(sb, KERN_ERR, "bogus number of FAT sectors");
+ goto out;
+ }
+
error = 0;
out:
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index a750381d554a..a605c3dddabc 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1125,6 +1125,7 @@ void inode_io_list_del(struct inode *inode)
inode_io_list_del_locked(inode, wb);
spin_unlock(&wb->list_lock);
}
+EXPORT_SYMBOL(inode_io_list_del);
/*
* mark an inode as under writeback on the sb
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 8ccc97356cb5..02b3c36b3676 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -342,7 +342,7 @@ static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
list_add_tail(&req->intr_entry, &fiq->interrupts);
/*
* Pairs with smp_mb() implied by test_and_set_bit()
- * from request_end().
+ * from fuse_request_end().
*/
smp_mb();
if (test_bit(FR_FINISHED, &req->flags)) {
@@ -764,16 +764,15 @@ static int fuse_check_page(struct page *page)
{
if (page_mapcount(page) ||
page->mapping != NULL ||
- page_count(page) != 1 ||
(page->flags & PAGE_FLAGS_CHECK_AT_PREP &
~(1 << PG_locked |
1 << PG_referenced |
1 << PG_uptodate |
1 << PG_lru |
1 << PG_active |
- 1 << PG_reclaim))) {
- pr_warn("trying to steal weird page\n");
- pr_warn(" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
+ 1 << PG_reclaim |
+ 1 << PG_waiters))) {
+ dump_page(page, "fuse: trying to steal weird page");
return 1;
}
return 0;
@@ -1977,8 +1976,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct pipe_buffer *ibuf;
struct pipe_buffer *obuf;
- BUG_ON(nbuf >= pipe->ring_size);
- BUG_ON(tail == head);
+ if (WARN_ON(nbuf >= count || tail == head))
+ goto out_free;
+
ibuf = &pipe->bufs[tail & mask];
obuf = &bufs[nbuf];
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index de1e2fde60bd..26f028bc760b 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1689,8 +1689,18 @@ static int fuse_getattr(const struct path *path, struct kstat *stat,
struct inode *inode = d_inode(path->dentry);
struct fuse_conn *fc = get_fuse_conn(inode);
- if (!fuse_allow_current_process(fc))
+ if (!fuse_allow_current_process(fc)) {
+ if (!request_mask) {
+ /*
+ * If user explicitly requested *nothing* then don't
+ * error out, but return st_dev only.
+ */
+ stat->result_mask = 0;
+ stat->dev = inode->i_sb->s_dev;
+ return 0;
+ }
return -EACCES;
+ }
return fuse_update_get_attr(inode, NULL, stat, request_mask, flags);
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index bac51c32d660..e573b0cd2737 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -357,7 +357,7 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
struct fuse_writepage_args {
struct fuse_io_args ia;
- struct list_head writepages_entry;
+ struct rb_node writepages_entry;
struct list_head queue_entry;
struct fuse_writepage_args *next;
struct inode *inode;
@@ -366,17 +366,23 @@ struct fuse_writepage_args {
static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
pgoff_t idx_from, pgoff_t idx_to)
{
- struct fuse_writepage_args *wpa;
+ struct rb_node *n;
+
+ n = fi->writepages.rb_node;
- list_for_each_entry(wpa, &fi->writepages, writepages_entry) {
+ while (n) {
+ struct fuse_writepage_args *wpa;
pgoff_t curr_index;
+ wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry);
WARN_ON(get_fuse_inode(wpa->inode) != fi);
curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
- if (idx_from < curr_index + wpa->ia.ap.num_pages &&
- curr_index <= idx_to) {
+ if (idx_from >= curr_index + wpa->ia.ap.num_pages)
+ n = n->rb_right;
+ else if (idx_to < curr_index)
+ n = n->rb_left;
+ else
return wpa;
- }
}
return NULL;
}
@@ -445,9 +451,6 @@ static int fuse_flush(struct file *file, fl_owner_t id)
if (is_bad_inode(inode))
return -EIO;
- if (fc->no_flush)
- return 0;
-
err = write_inode_now(inode, 1);
if (err)
return err;
@@ -460,6 +463,10 @@ static int fuse_flush(struct file *file, fl_owner_t id)
if (err)
return err;
+ err = 0;
+ if (fc->no_flush)
+ goto inval_attr_out;
+
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
inarg.lock_owner = fuse_lock_owner_id(fc, id);
@@ -475,6 +482,14 @@ static int fuse_flush(struct file *file, fl_owner_t id)
fc->no_flush = 1;
err = 0;
}
+
+inval_attr_out:
+ /*
+ * In memory i_blocks is not maintained by fuse, if writeback cache is
+ * enabled, i_blocks from cached attr may not be accurate.
+ */
+ if (!err && fc->writeback_cache)
+ fuse_invalidate_attr(inode);
return err;
}
@@ -712,6 +727,7 @@ static ssize_t fuse_async_req_send(struct fuse_conn *fc,
spin_unlock(&io->lock);
ia->ap.args.end = fuse_aio_complete_req;
+ ia->ap.args.may_block = io->should_dirty;
err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL);
if (err)
fuse_aio_complete_req(fc, &ia->ap.args, err);
@@ -1570,7 +1586,7 @@ static void fuse_writepage_finish(struct fuse_conn *fc,
struct backing_dev_info *bdi = inode_to_bdi(inode);
int i;
- list_del(&wpa->writepages_entry);
+ rb_erase(&wpa->writepages_entry, &fi->writepages);
for (i = 0; i < ap->num_pages; i++) {
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
@@ -1658,6 +1674,36 @@ __acquires(fi->lock)
}
}
+static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
+{
+ pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
+ pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ WARN_ON(!wpa->ia.ap.num_pages);
+ while (*p) {
+ struct fuse_writepage_args *curr;
+ pgoff_t curr_index;
+
+ parent = *p;
+ curr = rb_entry(parent, struct fuse_writepage_args,
+ writepages_entry);
+ WARN_ON(curr->inode != wpa->inode);
+ curr_index = curr->ia.write.in.offset >> PAGE_SHIFT;
+
+ if (idx_from >= curr_index + curr->ia.ap.num_pages)
+ p = &(*p)->rb_right;
+ else if (idx_to < curr_index)
+ p = &(*p)->rb_left;
+ else
+ return (void) WARN_ON(true);
+ }
+
+ rb_link_node(&wpa->writepages_entry, parent, p);
+ rb_insert_color(&wpa->writepages_entry, root);
+}
+
static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args,
int error)
{
@@ -1676,7 +1722,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args,
wpa->next = next->next;
next->next = NULL;
next->ia.ff = fuse_file_get(wpa->ia.ff);
- list_add(&next->writepages_entry, &fi->writepages);
+ tree_insert(&fi->writepages, next);
/*
* Skip fuse_flush_writepages() to make it easy to crop requests
@@ -1811,7 +1857,7 @@ static int fuse_writepage_locked(struct page *page)
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
spin_lock(&fi->lock);
- list_add(&wpa->writepages_entry, &fi->writepages);
+ tree_insert(&fi->writepages, wpa);
list_add_tail(&wpa->queue_entry, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fi->lock);
@@ -1923,10 +1969,10 @@ static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa,
WARN_ON(new_ap->num_pages != 0);
spin_lock(&fi->lock);
- list_del(&new_wpa->writepages_entry);
+ rb_erase(&new_wpa->writepages_entry, &fi->writepages);
old_wpa = fuse_find_writeback(fi, page->index, page->index);
if (!old_wpa) {
- list_add(&new_wpa->writepages_entry, &fi->writepages);
+ tree_insert(&fi->writepages, new_wpa);
spin_unlock(&fi->lock);
return false;
}
@@ -2041,7 +2087,7 @@ static int fuse_writepages_fill(struct page *page,
wpa->inode = inode;
spin_lock(&fi->lock);
- list_add(&wpa->writepages_entry, &fi->writepages);
+ tree_insert(&fi->writepages, wpa);
spin_unlock(&fi->lock);
data->wpa = wpa;
@@ -3235,13 +3281,11 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
return -EXDEV;
- if (fc->writeback_cache) {
- inode_lock(inode_in);
- err = fuse_writeback_range(inode_in, pos_in, pos_in + len);
- inode_unlock(inode_in);
- if (err)
- return err;
- }
+ inode_lock(inode_in);
+ err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
+ inode_unlock(inode_in);
+ if (err)
+ return err;
inode_lock(inode_out);
@@ -3249,11 +3293,27 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
if (err)
goto out;
- if (fc->writeback_cache) {
- err = fuse_writeback_range(inode_out, pos_out, pos_out + len);
- if (err)
- goto out;
- }
+ /*
+ * Write out dirty pages in the destination file before sending the COPY
+ * request to userspace. After the request is completed, truncate off
+ * pages (including partial ones) from the cache that have been copied,
+ * since these contain stale data at that point.
+ *
+ * This should be mostly correct, but if the COPY writes to partial
+ * pages (at the start or end) and the parts not covered by the COPY are
+ * written through a memory map after calling fuse_writeback_range(),
+ * then these partial page modifications will be lost on truncation.
+ *
+ * It is unlikely that someone would rely on such mixed style
+ * modifications. Yet this does give less guarantees than if the
+ * copying was performed with write(2).
+ *
+ * To fix this a i_mmap_sem style lock could be used to prevent new
+ * faults while the copy is ongoing.
+ */
+ err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
+ if (err)
+ goto out;
if (is_unstable)
set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
@@ -3274,6 +3334,10 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
if (err)
goto out;
+ truncate_inode_pages_range(inode_out->i_mapping,
+ ALIGN_DOWN(pos_out, PAGE_SIZE),
+ ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
+
if (fc->writeback_cache) {
fuse_write_update_size(inode_out, pos_out + outarg.size);
file_update_time(file_out);
@@ -3351,5 +3415,5 @@ void fuse_init_file_inode(struct inode *inode)
INIT_LIST_HEAD(&fi->queued_writes);
fi->writectr = 0;
init_waitqueue_head(&fi->page_waitq);
- INIT_LIST_HEAD(&fi->writepages);
+ fi->writepages = RB_ROOT;
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index ca344bf71404..740a8a7d7ae6 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -111,7 +111,7 @@ struct fuse_inode {
wait_queue_head_t page_waitq;
/* List of writepage requestst (pending or sent) */
- struct list_head writepages;
+ struct rb_root writepages;
};
/* readdir cache (directory only) */
@@ -249,6 +249,7 @@ struct fuse_args {
bool out_argvar:1;
bool page_zeroing:1;
bool page_replace:1;
+ bool may_block:1;
struct fuse_in_arg in_args[3];
struct fuse_arg out_args[2];
void (*end)(struct fuse_conn *fc, struct fuse_args *args, int error);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 95d712d44ca1..5b4aebf5821f 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -321,6 +321,8 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
loff_t offset, loff_t len)
{
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+ struct fuse_inode *fi;
struct inode *inode;
pgoff_t pg_start;
pgoff_t pg_end;
@@ -329,6 +331,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
if (!inode)
return -ENOENT;
+ fi = get_fuse_inode(inode);
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
+ spin_unlock(&fi->lock);
+
fuse_invalidate_attr(inode);
forget_all_cached_acls(inode);
if (offset >= 0) {
@@ -1113,7 +1120,7 @@ EXPORT_SYMBOL_GPL(fuse_dev_free);
int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
{
- struct fuse_dev *fud;
+ struct fuse_dev *fud = NULL;
struct fuse_conn *fc = get_fuse_conn_super(sb);
struct inode *root;
struct dentry *root_dentry;
@@ -1155,9 +1162,12 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
if (sb->s_user_ns != &init_user_ns)
sb->s_xattr = fuse_no_acl_xattr_handlers;
- fud = fuse_dev_alloc_install(fc);
- if (!fud)
- goto err;
+ if (ctx->fudptr) {
+ err = -ENOMEM;
+ fud = fuse_dev_alloc_install(fc);
+ if (!fud)
+ goto err;
+ }
fc->dev = sb->s_dev;
fc->sb = sb;
@@ -1191,7 +1201,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
mutex_lock(&fuse_mutex);
err = -EINVAL;
- if (*ctx->fudptr)
+ if (ctx->fudptr && *ctx->fudptr)
goto err_unlock;
err = fuse_ctl_add_conn(fc);
@@ -1200,7 +1210,8 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
list_add_tail(&fc->entry, &fuse_conn_list);
sb->s_root = root_dentry;
- *ctx->fudptr = fud;
+ if (ctx->fudptr)
+ *ctx->fudptr = fud;
mutex_unlock(&fuse_mutex);
return 0;
@@ -1208,7 +1219,8 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
mutex_unlock(&fuse_mutex);
dput(root_dentry);
err_dev_free:
- fuse_dev_free(fud);
+ if (fud)
+ fuse_dev_free(fud);
err:
return err;
}
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index bade74768903..4c4ef5d69298 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -60,6 +60,12 @@ struct virtio_fs_forget {
struct virtio_fs_forget_req req;
};
+struct virtio_fs_req_work {
+ struct fuse_req *req;
+ struct virtio_fs_vq *fsvq;
+ struct work_struct done_work;
+};
+
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
struct fuse_req *req, bool in_flight);
@@ -485,19 +491,67 @@ static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
}
/* Work function for request completion */
+static void virtio_fs_request_complete(struct fuse_req *req,
+ struct virtio_fs_vq *fsvq)
+{
+ struct fuse_pqueue *fpq = &fsvq->fud->pq;
+ struct fuse_conn *fc = fsvq->fud->fc;
+ struct fuse_args *args;
+ struct fuse_args_pages *ap;
+ unsigned int len, i, thislen;
+ struct page *page;
+
+ /*
+ * TODO verify that server properly follows FUSE protocol
+ * (oh.uniq, oh.len)
+ */
+ args = req->args;
+ copy_args_from_argbuf(args, req);
+
+ if (args->out_pages && args->page_zeroing) {
+ len = args->out_args[args->out_numargs - 1].size;
+ ap = container_of(args, typeof(*ap), args);
+ for (i = 0; i < ap->num_pages; i++) {
+ thislen = ap->descs[i].length;
+ if (len < thislen) {
+ WARN_ON(ap->descs[i].offset);
+ page = ap->pages[i];
+ zero_user_segment(page, len, thislen);
+ len = 0;
+ } else {
+ len -= thislen;
+ }
+ }
+ }
+
+ spin_lock(&fpq->lock);
+ clear_bit(FR_SENT, &req->flags);
+ spin_unlock(&fpq->lock);
+
+ fuse_request_end(fc, req);
+ spin_lock(&fsvq->lock);
+ dec_in_flight_req(fsvq);
+ spin_unlock(&fsvq->lock);
+}
+
+static void virtio_fs_complete_req_work(struct work_struct *work)
+{
+ struct virtio_fs_req_work *w =
+ container_of(work, typeof(*w), done_work);
+
+ virtio_fs_request_complete(w->req, w->fsvq);
+ kfree(w);
+}
+
static void virtio_fs_requests_done_work(struct work_struct *work)
{
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
done_work);
struct fuse_pqueue *fpq = &fsvq->fud->pq;
- struct fuse_conn *fc = fsvq->fud->fc;
struct virtqueue *vq = fsvq->vq;
struct fuse_req *req;
- struct fuse_args_pages *ap;
struct fuse_req *next;
- struct fuse_args *args;
- unsigned int len, i, thislen;
- struct page *page;
+ unsigned int len;
LIST_HEAD(reqs);
/* Collect completed requests off the virtqueue */
@@ -515,38 +569,20 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
/* End requests */
list_for_each_entry_safe(req, next, &reqs, list) {
- /*
- * TODO verify that server properly follows FUSE protocol
- * (oh.uniq, oh.len)
- */
- args = req->args;
- copy_args_from_argbuf(args, req);
-
- if (args->out_pages && args->page_zeroing) {
- len = args->out_args[args->out_numargs - 1].size;
- ap = container_of(args, typeof(*ap), args);
- for (i = 0; i < ap->num_pages; i++) {
- thislen = ap->descs[i].length;
- if (len < thislen) {
- WARN_ON(ap->descs[i].offset);
- page = ap->pages[i];
- zero_user_segment(page, len, thislen);
- len = 0;
- } else {
- len -= thislen;
- }
- }
- }
-
- spin_lock(&fpq->lock);
- clear_bit(FR_SENT, &req->flags);
list_del_init(&req->list);
- spin_unlock(&fpq->lock);
- fuse_request_end(fc, req);
- spin_lock(&fsvq->lock);
- dec_in_flight_req(fsvq);
- spin_unlock(&fsvq->lock);
+ /* blocking async request completes in a worker context */
+ if (req->args->may_block) {
+ struct virtio_fs_req_work *w;
+
+ w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
+ INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
+ w->fsvq = fsvq;
+ w->req = req;
+ schedule_work(&w->done_work);
+ } else {
+ virtio_fs_request_complete(req, fsvq);
+ }
}
}
@@ -1067,7 +1103,7 @@ static int virtio_fs_fill_super(struct super_block *sb)
err = -ENOMEM;
/* Allocate fuse_dev for hiprio and notification queues */
- for (i = 0; i < VQ_REQUEST; i++) {
+ for (i = 0; i < fs->nvqs; i++) {
struct virtio_fs_vq *fsvq = &fs->vqs[i];
fsvq->fud = fuse_dev_alloc();
@@ -1075,18 +1111,15 @@ static int virtio_fs_fill_super(struct super_block *sb)
goto err_free_fuse_devs;
}
- ctx.fudptr = (void **)&fs->vqs[VQ_REQUEST].fud;
+ /* virtiofs allocates and installs its own fuse devices */
+ ctx.fudptr = NULL;
err = fuse_fill_super_common(sb, &ctx);
if (err < 0)
goto err_free_fuse_devs;
- fc = fs->vqs[VQ_REQUEST].fud->fc;
-
for (i = 0; i < fs->nvqs; i++) {
struct virtio_fs_vq *fsvq = &fs->vqs[i];
- if (i == VQ_REQUEST)
- continue; /* already initialized */
fuse_dev_install(fsvq->fud, fc);
}
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 3f717285ee48..756d05779200 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -134,7 +134,9 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
struct gfs2_sbd *sdp = sb->s_fs_info;
struct inode *inode;
- inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino,
+ if (!inum->no_formal_ino)
+ return ERR_PTR(-ESTALE);
+ inode = gfs2_lookup_by_inum(sdp, inum->no_addr, inum->no_formal_ino,
GFS2_BLKST_DINODE);
if (IS_ERR(inode))
return ERR_CAST(inode);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index bf70e3b14938..2299dcc417ea 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -125,12 +125,11 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
- if (gl->gl_ops->go_flags & GLOF_ASPACE) {
+ kfree(gl->gl_lksb.sb_lvbptr);
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
- } else {
- kfree(gl->gl_lksb.sb_lvbptr);
+ else
kmem_cache_free(gfs2_glock_cachep, gl);
- }
}
/**
@@ -164,7 +163,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- BUG_ON(atomic_read(&gl->gl_revokes));
+ gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
smp_mb();
wake_up_glock(gl);
@@ -465,6 +464,15 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
gl->gl_tchange = jiffies;
}
+static void gfs2_set_demote(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ set_bit(GLF_DEMOTE, &gl->gl_flags);
+ smp_mb();
+ wake_up(&sdp->sd_async_glock_wait);
+}
+
static void gfs2_demote_wake(struct gfs2_glock *gl)
{
gl->gl_demote_state = LM_ST_EXCLUSIVE;
@@ -626,7 +634,8 @@ __acquires(&gl->gl_lockref.lock)
*/
if ((atomic_read(&gl->gl_ail_count) != 0) &&
(!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
- gfs2_assert_warn(sdp, !atomic_read(&gl->gl_ail_count));
+ gfs2_glock_assert_warn(gl,
+ !atomic_read(&gl->gl_ail_count));
gfs2_dump_glock(NULL, gl, true);
}
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
@@ -756,20 +765,127 @@ out_unlock:
return;
}
+void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
+{
+ struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
+
+ if (ri->ri_magic == 0)
+ ri->ri_magic = cpu_to_be32(GFS2_MAGIC);
+ if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC))
+ ri->ri_generation_deleted = cpu_to_be64(generation);
+}
+
+bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
+{
+ struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
+
+ if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC))
+ return false;
+ return generation <= be64_to_cpu(ri->ri_generation_deleted);
+}
+
+static void gfs2_glock_poke(struct gfs2_glock *gl)
+{
+ int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP;
+ struct gfs2_holder gh;
+ int error;
+
+ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, flags, &gh);
+ if (!error)
+ gfs2_glock_dq(&gh);
+}
+
+static bool gfs2_try_evict(struct gfs2_glock *gl)
+{
+ struct gfs2_inode *ip;
+ bool evicted = false;
+
+ /*
+ * If there is contention on the iopen glock and we have an inode, try
+ * to grab and release the inode so that it can be evicted. This will
+ * allow the remote node to go ahead and delete the inode without us
+ * having to do it, which will avoid rgrp glock thrashing.
+ *
+ * The remote node is likely still holding the corresponding inode
+ * glock, so it will run before we get to verify that the delete has
+ * happened below.
+ */
+ spin_lock(&gl->gl_lockref.lock);
+ ip = gl->gl_object;
+ if (ip && !igrab(&ip->i_inode))
+ ip = NULL;
+ spin_unlock(&gl->gl_lockref.lock);
+ if (ip) {
+ struct gfs2_glock *inode_gl = NULL;
+
+ gl->gl_no_formal_ino = ip->i_no_formal_ino;
+ set_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
+ d_prune_aliases(&ip->i_inode);
+ iput(&ip->i_inode);
+
+ /* If the inode was evicted, gl->gl_object will now be NULL. */
+ spin_lock(&gl->gl_lockref.lock);
+ ip = gl->gl_object;
+ if (ip) {
+ inode_gl = ip->i_gl;
+ lockref_get(&inode_gl->gl_lockref);
+ clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
+ }
+ spin_unlock(&gl->gl_lockref.lock);
+ if (inode_gl) {
+ gfs2_glock_poke(inode_gl);
+ gfs2_glock_put(inode_gl);
+ }
+ evicted = !ip;
+ }
+ return evicted;
+}
+
static void delete_work_func(struct work_struct *work)
{
- struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct inode *inode;
u64 no_addr = gl->gl_name.ln_number;
+ spin_lock(&gl->gl_lockref.lock);
+ clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ spin_unlock(&gl->gl_lockref.lock);
+
/* If someone's using this glock to create a new dinode, the block must
have been freed by another node, then re-used, in which case our
iopen callback is too late after the fact. Ignore it. */
if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
goto out;
- inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
+ if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
+ /*
+ * If we can evict the inode, give the remote node trying to
+ * delete the inode some time before verifying that the delete
+ * has happened. Otherwise, if we cause contention on the inode glock
+ * immediately, the remote node will think that we still have
+ * the inode in use, and so it will give up waiting.
+ *
+ * If we can't evict the inode, signal to the remote node that
+ * the inode is still in use. We'll later try to delete the
+ * inode locally in gfs2_evict_inode.
+ *
+ * FIXME: We only need to verify that the remote node has
+ * deleted the inode because nodes before this remote delete
+ * rework won't cooperate. At a later time, when we no longer
+ * care about compatibility with such nodes, we can skip this
+ * step entirely.
+ */
+ if (gfs2_try_evict(gl)) {
+ if (gfs2_queue_delete_work(gl, 5 * HZ))
+ return;
+ }
+ goto out;
+ }
+
+ inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
+ GFS2_BLKST_UNLINKED);
if (!IS_ERR_OR_NULL(inode)) {
d_prune_aliases(inode);
iput(inode);
@@ -800,7 +916,7 @@ static void glock_work_func(struct work_struct *work)
if (!delay) {
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
- set_bit(GLF_DEMOTE, &gl->gl_flags);
+ gfs2_set_demote(gl);
}
}
run_queue(gl, 0);
@@ -931,7 +1047,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_object = NULL;
gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
- INIT_WORK(&gl->gl_delete, delete_work_func);
+ INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
mapping = gfs2_glock2aspace(gl);
if (mapping) {
@@ -1145,9 +1261,10 @@ wait_for_dlm:
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
unsigned long delay, bool remote)
{
- int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
-
- set_bit(bit, &gl->gl_flags);
+ if (delay)
+ set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
+ else
+ gfs2_set_demote(gl);
if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
gl->gl_demote_state = state;
gl->gl_demote_time = jiffies;
@@ -1754,6 +1871,44 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
rhashtable_walk_exit(&iter);
}
+bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
+{
+ bool queued;
+
+ spin_lock(&gl->gl_lockref.lock);
+ queued = queue_delayed_work(gfs2_delete_workqueue,
+ &gl->gl_delete, delay);
+ if (queued)
+ set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ spin_unlock(&gl->gl_lockref.lock);
+ return queued;
+}
+
+void gfs2_cancel_delete_work(struct gfs2_glock *gl)
+{
+ if (cancel_delayed_work_sync(&gl->gl_delete)) {
+ clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ gfs2_glock_put(gl);
+ }
+}
+
+bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
+{
+ return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+}
+
+static void flush_delete_work(struct gfs2_glock *gl)
+{
+ flush_delayed_work(&gl->gl_delete);
+ gfs2_glock_queue_work(gl, 0);
+}
+
+void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
+{
+ glock_hash_walk(flush_delete_work, sdp);
+ flush_workqueue(gfs2_delete_workqueue);
+}
+
/**
* thaw_glock - thaw out a glock which has an unprocessed reply waiting
* @gl: The glock to thaw
@@ -1836,7 +1991,7 @@ void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
int ret;
ret = gfs2_truncatei_resume(ip);
- gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
+ gfs2_glock_assert_withdraw(gl, ret == 0);
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_LOCK, &gl->gl_flags);
@@ -1978,7 +2133,13 @@ void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
char gflags_buf[32];
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
+ unsigned long nrpages = 0;
+
+ if (gl->gl_ops->go_flags & GLOF_ASPACE) {
+ struct address_space *mapping = gfs2_glock2aspace(gl);
+ nrpages = mapping->nrpages;
+ }
memset(fs_id_buf, 0, sizeof(fs_id_buf));
if (fsid && sdp) /* safety precaution */
sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
@@ -1987,15 +2148,16 @@ void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
dtime = 0;
gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
- "v:%d r:%d m:%ld\n", fs_id_buf, state2str(gl->gl_state),
- gl->gl_name.ln_type,
- (unsigned long long)gl->gl_name.ln_number,
- gflags2str(gflags_buf, gl),
- state2str(gl->gl_target),
- state2str(gl->gl_demote_state), dtime,
- atomic_read(&gl->gl_ail_count),
- atomic_read(&gl->gl_revokes),
- (int)gl->gl_lockref.count, gl->gl_hold_time);
+ "v:%d r:%d m:%ld p:%lu\n",
+ fs_id_buf, state2str(gl->gl_state),
+ gl->gl_name.ln_type,
+ (unsigned long long)gl->gl_name.ln_number,
+ gflags2str(gflags_buf, gl),
+ state2str(gl->gl_target),
+ state2str(gl->gl_demote_state), dtime,
+ atomic_read(&gl->gl_ail_count),
+ atomic_read(&gl->gl_revokes),
+ (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages);
list_for_each_entry(gh, &gl->gl_holders, gh_list)
dump_holder(seq, gh, fs_id_buf);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index b8adaf80e4c5..53813364517b 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -205,6 +205,15 @@ extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \
gfs2_dump_glock(NULL, gl, true); \
BUG(); } } while(0)
+#define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \
+ gfs2_dump_glock(NULL, gl, true); \
+ gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
+ while (0)
+#define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \
+ gfs2_dump_glock(NULL, gl, true); \
+ gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
+ while (0)
+
extern __printf(2, 3)
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
@@ -235,6 +244,10 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
+extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
+extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
+extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
+extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
@@ -306,4 +319,7 @@ static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
spin_unlock(&gl->gl_lockref.lock);
}
+extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
+extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
+
#endif /* __GLOCK_DOT_H__ */
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 9e9c7a4b8c66..c84887769b5a 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -91,6 +91,8 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
memset(&tr, 0, sizeof(tr));
INIT_LIST_HEAD(&tr.tr_buf);
INIT_LIST_HEAD(&tr.tr_databuf);
+ INIT_LIST_HEAD(&tr.tr_ail1_list);
+ INIT_LIST_HEAD(&tr.tr_ail2_list);
tr.tr_revokes = atomic_read(&gl->gl_ail_count);
if (!tr.tr_revokes) {
@@ -268,7 +270,7 @@ static int inode_go_sync(struct gfs2_glock *gl)
struct gfs2_inode *ip = gfs2_glock2inode(gl);
int isreg = ip && S_ISREG(ip->i_inode.i_mode);
struct address_space *metamapping = gfs2_glock2aspace(gl);
- int error = 0;
+ int error = 0, ret;
if (isreg) {
if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
@@ -289,8 +291,10 @@ static int inode_go_sync(struct gfs2_glock *gl)
error = filemap_fdatawait(mapping);
mapping_set_error(mapping, error);
}
- error = filemap_fdatawait(metamapping);
- mapping_set_error(metamapping, error);
+ ret = filemap_fdatawait(metamapping);
+ mapping_set_error(metamapping, ret);
+ if (!error)
+ error = ret;
gfs2_ail_empty_gl(gl);
/*
* Writeback of the data mapping may cause the dirty flag to be set
@@ -608,11 +612,17 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
gl->gl_lockref.count++;
- if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+ if (!queue_delayed_work(gfs2_delete_workqueue,
+ &gl->gl_delete, 0))
gl->gl_lockref.count--;
}
}
+static int iopen_go_demote_ok(const struct gfs2_glock *gl)
+{
+ return !gfs2_delete_work_queued(gl);
+}
+
/**
* inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
* @gl: glock being freed
@@ -692,7 +702,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_lock = inode_go_lock,
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
- .go_flags = GLOF_ASPACE | GLOF_LRU,
+ .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
.go_free = inode_go_free,
};
@@ -716,6 +726,7 @@ const struct gfs2_glock_operations gfs2_freeze_glops = {
const struct gfs2_glock_operations gfs2_iopen_glops = {
.go_type = LM_TYPE_IOPEN,
.go_callback = iopen_go_callback,
+ .go_demote_ok = iopen_go_demote_ok,
.go_flags = GLOF_LRU | GLOF_NONDISK,
};
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 84a824293a78..03ab11fab962 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -345,6 +345,7 @@ enum {
GLF_OBJECT = 14, /* Used only for tracing */
GLF_BLOCKING = 15,
GLF_INODE_CREATING = 16, /* Inode creation occurring */
+ GLF_PENDING_DELETE = 17,
GLF_FREEING = 18, /* Wait for glock to be freed */
};
@@ -378,8 +379,11 @@ struct gfs2_glock {
atomic_t gl_revokes;
struct delayed_work gl_work;
union {
- /* For inode and iopen glocks only */
- struct work_struct gl_delete;
+ /* For iopen glocks only */
+ struct {
+ struct delayed_work gl_delete;
+ u64 gl_no_formal_ino;
+ };
/* For rgrp glocks only */
struct {
loff_t start;
@@ -398,6 +402,7 @@ enum {
GIF_ORDERED = 4,
GIF_FREE_VFS_INODE = 5,
GIF_GLOP_PENDING = 6,
+ GIF_DEFERRED_DELETE = 7,
};
struct gfs2_inode {
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 5acd3ce30759..370c3a4b31ac 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -17,6 +17,7 @@
#include <linux/crc32.h>
#include <linux/iomap.h>
#include <linux/security.h>
+#include <linux/fiemap.h>
#include <linux/uaccess.h>
#include "gfs2.h"
@@ -114,6 +115,10 @@ static void gfs2_set_iop(struct inode *inode)
* placeholder because it doesn't otherwise make sense), the on-disk block type
* is verified to be @blktype.
*
+ * When @no_formal_ino is non-zero, this function will return ERR_PTR(-ESTALE)
+ * if it detects that @no_formal_ino doesn't match the actual inode generation
+ * number. However, it doesn't always know unless @type is DT_UNKNOWN.
+ *
* Returns: A VFS inode, or an error
*/
@@ -157,6 +162,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
if (error)
goto fail;
+ error = -ESTALE;
+ if (no_formal_ino &&
+ gfs2_inode_already_deleted(ip->i_gl, no_formal_ino))
+ goto fail;
+
if (blktype != GFS2_BLKST_FREE) {
error = gfs2_check_blk_type(sdp, no_addr,
blktype);
@@ -170,6 +180,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
if (unlikely(error))
goto fail;
+ gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_glock_put(io_gl);
io_gl = NULL;
@@ -188,13 +199,23 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
inode->i_mode = DT2IF(type);
}
+ if (gfs2_holder_initialized(&i_gh))
+ gfs2_glock_dq_uninit(&i_gh);
+
gfs2_set_iop(inode);
+ }
- unlock_new_inode(inode);
+ if (no_formal_ino && ip->i_no_formal_ino &&
+ no_formal_ino != ip->i_no_formal_ino) {
+ if (inode->i_state & I_NEW)
+ goto fail;
+ iput(inode);
+ return ERR_PTR(-ESTALE);
}
- if (gfs2_holder_initialized(&i_gh))
- gfs2_glock_dq_uninit(&i_gh);
+ if (inode->i_state & I_NEW)
+ unlock_new_inode(inode);
+
return inode;
fail:
@@ -206,23 +227,26 @@ fail:
return ERR_PTR(error);
}
+/**
+ * gfs2_lookup_by_inum - look up an inode by inode number
+ * @sdp: The super block
+ * @no_addr: The inode number
+ * @no_formal_ino: The inode generation number (0 for any)
+ * @blktype: Requested block type (see gfs2_inode_lookup)
+ */
struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
- u64 *no_formal_ino, unsigned int blktype)
+ u64 no_formal_ino, unsigned int blktype)
{
struct super_block *sb = sdp->sd_vfs;
struct inode *inode;
int error;
- inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, blktype);
+ inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, no_formal_ino,
+ blktype);
if (IS_ERR(inode))
return inode;
- /* Two extra checks for NFS only */
if (no_formal_ino) {
- error = -ESTALE;
- if (GFS2_I(inode)->i_no_formal_ino != *no_formal_ino)
- goto fail_iput;
-
error = -EIO;
if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM)
goto fail_iput;
@@ -724,6 +748,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error)
goto fail_gunlock2;
+ gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_set_iop(inode);
insert_inode_hash(inode);
@@ -780,7 +805,8 @@ fail_gunlock2:
fail_free_inode:
if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip);
- gfs2_glock_put(ip->i_gl);
+ if (free_vfs_inode) /* else evict will do the put for us */
+ gfs2_glock_put(ip->i_gl);
}
gfs2_rs_delete(ip, NULL);
gfs2_qa_put(ip);
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 580adbf0b5e1..b52ecf4ffe63 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -92,7 +92,7 @@ extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
u64 no_addr, u64 no_formal_ino,
unsigned int blktype);
extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
- u64 *no_formal_ino,
+ u64 no_formal_ino,
unsigned int blktype);
extern int gfs2_inode_refresh(struct gfs2_inode *ip);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 0644e58c6191..3e4734431783 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -30,6 +30,7 @@
#include "util.h"
#include "dir.h"
#include "trace_gfs2.h"
+#include "trans.h"
static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
@@ -145,9 +146,6 @@ static void dump_ail_list(struct gfs2_sbd *sdp)
struct gfs2_bufdata *bd;
struct buffer_head *bh;
- fs_err(sdp, "Error: In gfs2_ail1_flush for ten minutes! t=%d\n",
- current->journal_info ? 1 : 0);
-
list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
list_for_each_entry_reverse(bd, &tr->tr_ail1_list,
bd_ail_st_list) {
@@ -197,6 +195,8 @@ void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
restart:
ret = 0;
if (time_after(jiffies, flush_start + (HZ * 600))) {
+ fs_err(sdp, "Error: In %s for ten minutes! t=%d\n",
+ __func__, current->journal_info ? 1 : 0);
dump_ail_list(sdp);
goto out;
}
@@ -379,7 +379,7 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
list_del(&tr->tr_list);
gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
}
spin_unlock(&sdp->sd_ail_lock);
@@ -864,19 +864,41 @@ static void ail_drain(struct gfs2_sbd *sdp)
gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
list_del(&tr->tr_list);
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
}
while (!list_empty(&sdp->sd_ail2_list)) {
tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
tr_list);
gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
list_del(&tr->tr_list);
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
}
spin_unlock(&sdp->sd_ail_lock);
}
/**
+ * empty_ail1_list - try to start IO and empty the ail1 list
+ * @sdp: Pointer to GFS2 superblock
+ */
+static void empty_ail1_list(struct gfs2_sbd *sdp)
+{
+ unsigned long start = jiffies;
+
+ for (;;) {
+ if (time_after(jiffies, start + (HZ * 600))) {
+ fs_err(sdp, "Error: In %s for 10 minutes! t=%d\n",
+ __func__, current->journal_info ? 1 : 0);
+ dump_ail_list(sdp);
+ return;
+ }
+ gfs2_ail1_start(sdp);
+ gfs2_ail1_wait(sdp);
+ if (gfs2_ail1_empty(sdp, 0))
+ return;
+ }
+}
+
+/**
* gfs2_log_flush - flush incore transaction(s)
* @sdp: the filesystem
* @gl: The glock structure to flush. If NULL, flush the whole incore log
@@ -912,8 +934,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
tr = sdp->sd_log_tr;
if (tr) {
sdp->sd_log_tr = NULL;
- INIT_LIST_HEAD(&tr->tr_ail1_list);
- INIT_LIST_HEAD(&tr->tr_ail2_list);
tr->tr_first = sdp->sd_log_flush_head;
if (unlikely (state == SFS_FROZEN))
if (gfs2_assert_withdraw_delayed(sdp,
@@ -965,12 +985,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
if (!sdp->sd_log_idle) {
- for (;;) {
- gfs2_ail1_start(sdp);
- gfs2_ail1_wait(sdp);
- if (gfs2_ail1_empty(sdp, 0))
- break;
- }
+ empty_ail1_list(sdp);
if (gfs2_withdrawn(sdp))
goto out;
atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
@@ -994,7 +1009,7 @@ out:
trace_gfs2_log_flush(sdp, 0, flags);
up_write(&sdp->sd_log_flush_lock);
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
}
/**
@@ -1003,8 +1018,10 @@ out:
* @new: New transaction to be merged
*/
-static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
+static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
{
+ struct gfs2_trans *old = sdp->sd_log_tr;
+
WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
old->tr_num_buf_new += new->tr_num_buf_new;
@@ -1016,6 +1033,11 @@ static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
list_splice_tail_init(&new->tr_buf, &old->tr_buf);
+
+ spin_lock(&sdp->sd_ail_lock);
+ list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list);
+ list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list);
+ spin_unlock(&sdp->sd_ail_lock);
}
static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
@@ -1027,7 +1049,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_log_lock(sdp);
if (sdp->sd_log_tr) {
- gfs2_merge_trans(sdp->sd_log_tr, tr);
+ gfs2_merge_trans(sdp, tr);
} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
sdp->sd_log_tr = tr;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index a1a295b739fb..733470ca6be9 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -143,6 +143,12 @@ static int __init init_gfs2_fs(void)
if (!gfs2_qadata_cachep)
goto fail_cachep7;
+ gfs2_trans_cachep = kmem_cache_create("gfs2_trans",
+ sizeof(struct gfs2_trans),
+ 0, 0, NULL);
+ if (!gfs2_trans_cachep)
+ goto fail_cachep8;
+
error = register_shrinker(&gfs2_qd_shrinker);
if (error)
goto fail_shrinker;
@@ -194,6 +200,8 @@ fail_fs2:
fail_fs1:
unregister_shrinker(&gfs2_qd_shrinker);
fail_shrinker:
+ kmem_cache_destroy(gfs2_trans_cachep);
+fail_cachep8:
kmem_cache_destroy(gfs2_qadata_cachep);
fail_cachep7:
kmem_cache_destroy(gfs2_quotad_cachep);
@@ -236,6 +244,7 @@ static void __exit exit_gfs2_fs(void)
rcu_barrier();
mempool_destroy(gfs2_page_pool);
+ kmem_cache_destroy(gfs2_trans_cachep);
kmem_cache_destroy(gfs2_qadata_cachep);
kmem_cache_destroy(gfs2_quotad_cachep);
kmem_cache_destroy(gfs2_rgrpd_cachep);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index e2b69ffcc6a8..094f5fe7c009 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -880,7 +880,7 @@ fail:
}
static const match_table_t nolock_tokens = {
- { Opt_jid, "jid=%d\n", },
+ { Opt_jid, "jid=%d", },
{ Opt_err, NULL },
};
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index a321c34e3d6e..074f228ea839 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1835,7 +1835,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
*/
ip = gl->gl_object;
- if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+ if (ip || !gfs2_queue_delete_work(gl, 0))
gfs2_glock_put(gl);
else
found++;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 956fced0a8ec..32d8d26126a1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -626,7 +626,7 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
}
}
- flush_workqueue(gfs2_delete_workqueue);
+ gfs2_flush_delete_work(sdp);
if (!log_write_allowed && current == sdp->sd_quotad_process)
fs_warn(sdp, "The quotad daemon is withdrawing.\n");
else if (sdp->sd_quotad_process)
@@ -1054,7 +1054,7 @@ static int gfs2_drop_inode(struct inode *inode)
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
gfs2_glock_hold(gl);
- if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+ if (!gfs2_queue_delete_work(gl, 0))
gfs2_glock_queue_put(gl);
return false;
}
@@ -1258,6 +1258,55 @@ static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
gfs2_glock_put(gl);
}
+static bool gfs2_upgrade_iopen_glock(struct inode *inode)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_holder *gh = &ip->i_iopen_gh;
+ long timeout = 5 * HZ;
+ int error;
+
+ gh->gh_flags |= GL_NOCACHE;
+ gfs2_glock_dq_wait(gh);
+
+ /*
+ * If there are no other lock holders, we'll get the lock immediately.
+ * Otherwise, the other nodes holding the lock will be notified about
+ * our locking request. If they don't have the inode open, they'll
+ * evict the cached inode and release the lock. Otherwise, if they
+ * poke the inode glock, we'll take this as an indication that they
+ * still need the iopen glock and that they'll take care of deleting
+ * the inode when they're done. As a last resort, if another node
+ * keeps holding the iopen glock without showing any activity on the
+ * inode glock, we'll eventually time out.
+ *
+ * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
+ * locking request as an optimization to notify lock holders as soon as
+ * possible. Without that flag, they'd be notified implicitly by the
+ * second locking request.
+ */
+
+ gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
+ error = gfs2_glock_nq(gh);
+ if (error != GLR_TRYFAILED)
+ return !error;
+
+ gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
+ error = gfs2_glock_nq(gh);
+ if (error)
+ return false;
+
+ timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
+ !test_bit(HIF_WAIT, &gh->gh_iflags) ||
+ test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
+ timeout);
+ if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
+ gfs2_glock_dq(gh);
+ return false;
+ }
+ return true;
+}
+
/**
* gfs2_evict_inode - Remove an inode from cache
* @inode: The inode to evict
@@ -1299,9 +1348,12 @@ static void gfs2_evict_inode(struct inode *inode)
if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
gfs2_holder_mark_uninitialized(&gh);
- goto alloc_failed;
+ goto out_delete;
}
+ if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
+ goto out;
+
/* Deletes should never happen under memory pressure anymore. */
if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
goto out;
@@ -1315,6 +1367,8 @@ static void gfs2_evict_inode(struct inode *inode)
goto out;
}
+ if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
+ goto out_truncate;
error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
if (error)
goto out_truncate;
@@ -1331,16 +1385,13 @@ static void gfs2_evict_inode(struct inode *inode)
if (inode->i_nlink)
goto out_truncate;
-alloc_failed:
+out_delete:
if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
- gfs2_glock_dq_wait(&ip->i_iopen_gh);
- gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE,
- &ip->i_iopen_gh);
- error = gfs2_glock_nq(&ip->i_iopen_gh);
- if (error)
+ if (!gfs2_upgrade_iopen_glock(inode)) {
+ gfs2_holder_uninit(&ip->i_iopen_gh);
goto out_truncate;
+ }
}
if (S_ISDIR(inode->i_mode) &&
@@ -1368,6 +1419,7 @@ alloc_failed:
that subsequent inode creates don't see an old gl_object. */
glock_clear_object(ip->i_gl, ip);
error = gfs2_dinode_dealloc(ip);
+ gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
goto out_unlock;
out_truncate:
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index ffe840505082..a3dfa3aa87ad 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -37,7 +37,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
return -EROFS;
- tr = kzalloc(sizeof(struct gfs2_trans), GFP_NOFS);
+ tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS);
if (!tr)
return -ENOMEM;
@@ -52,6 +52,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
tr->tr_reserved += gfs2_struct2blk(sdp, revokes);
INIT_LIST_HEAD(&tr->tr_databuf);
INIT_LIST_HEAD(&tr->tr_buf);
+ INIT_LIST_HEAD(&tr->tr_ail1_list);
+ INIT_LIST_HEAD(&tr->tr_ail2_list);
sb_start_intwrite(sdp->sd_vfs);
@@ -65,7 +67,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
fail:
sb_end_intwrite(sdp->sd_vfs);
- kfree(tr);
+ kmem_cache_free(gfs2_trans_cachep, tr);
return error;
}
@@ -93,7 +95,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
gfs2_log_release(sdp, tr->tr_reserved);
if (alloced) {
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
sb_end_intwrite(sdp->sd_vfs);
}
return;
@@ -109,7 +111,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
gfs2_log_commit(sdp, tr);
if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags))
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
up_read(&sdp->sd_log_flush_lock);
if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
@@ -276,3 +278,14 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
gfs2_log_unlock(sdp);
}
+void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+{
+ if (tr == NULL)
+ return;
+
+ gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
+ gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
+ gfs2_assert_warn(sdp, list_empty(&tr->tr_databuf));
+ gfs2_assert_warn(sdp, list_empty(&tr->tr_buf));
+ kmem_cache_free(gfs2_trans_cachep, tr);
+}
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index 6071334de035..83199ce5a5c5 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -42,5 +42,6 @@ extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
extern void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
extern void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
+extern void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr);
#endif /* __TRANS_DOT_H__ */
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index aa087a5675af..1cd0328cae20 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -32,6 +32,7 @@ struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
struct kmem_cache *gfs2_quotad_cachep __read_mostly;
struct kmem_cache *gfs2_qadata_cachep __read_mostly;
+struct kmem_cache *gfs2_trans_cachep __read_mostly;
mempool_t *gfs2_page_pool __read_mostly;
void gfs2_assert_i(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index a3542560da6f..6d9157efe16c 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -172,6 +172,7 @@ extern struct kmem_cache *gfs2_bufdata_cachep;
extern struct kmem_cache *gfs2_rgrpd_cachep;
extern struct kmem_cache *gfs2_quotad_cachep;
extern struct kmem_cache *gfs2_qadata_cachep;
+extern struct kmem_cache *gfs2_trans_cachep;
extern mempool_t *gfs2_page_pool;
extern struct workqueue_struct *gfs2_control_wq;
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index e285d6b3bba4..d39246865c51 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -53,7 +53,7 @@ void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
return;
}
brelse(bh);
- };
+ }
blk_start_plug(&plug);
while (n > 0) {
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 2de0d3492d15..077c25128eb7 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -9,6 +9,7 @@
#include "hpfs_fn.h"
#include <linux/mpage.h>
+#include <linux/fiemap.h>
#define BLOCKS(size) (((size) + 511) >> 9)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index f3420a643b4f..ef5313f9c78f 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -187,7 +187,7 @@ out:
}
/*
- * Called under down_write(mmap_sem).
+ * Called under mmap_write_lock(mm).
*/
#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
diff --git a/fs/inode.c b/fs/inode.c
index 9fcec07a9d7c..72c4c347afb7 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -497,7 +497,7 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
spin_lock(&inode_hash_lock);
spin_lock(&inode->i_lock);
- hlist_add_head(&inode->i_hash, b);
+ hlist_add_head_rcu(&inode->i_hash, b);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
}
@@ -513,7 +513,7 @@ void __remove_inode_hash(struct inode *inode)
{
spin_lock(&inode_hash_lock);
spin_lock(&inode->i_lock);
- hlist_del_init(&inode->i_hash);
+ hlist_del_init_rcu(&inode->i_hash);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
}
@@ -1107,7 +1107,7 @@ again:
*/
spin_lock(&inode->i_lock);
inode->i_state |= I_NEW;
- hlist_add_head(&inode->i_hash, head);
+ hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
if (!creating)
inode_sb_list_add(inode);
@@ -1201,7 +1201,7 @@ again:
inode->i_ino = ino;
spin_lock(&inode->i_lock);
inode->i_state = I_NEW;
- hlist_add_head(&inode->i_hash, head);
+ hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
inode_sb_list_add(inode);
spin_unlock(&inode_hash_lock);
@@ -1244,15 +1244,10 @@ static int test_inode_iunique(struct super_block *sb, unsigned long ino)
struct hlist_head *b = inode_hashtable + hash(sb, ino);
struct inode *inode;
- spin_lock(&inode_hash_lock);
- hlist_for_each_entry(inode, b, i_hash) {
- if (inode->i_ino == ino && inode->i_sb == sb) {
- spin_unlock(&inode_hash_lock);
+ hlist_for_each_entry_rcu(inode, b, i_hash) {
+ if (inode->i_ino == ino && inode->i_sb == sb)
return 0;
- }
}
- spin_unlock(&inode_hash_lock);
-
return 1;
}
@@ -1281,6 +1276,7 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
static unsigned int counter;
ino_t res;
+ rcu_read_lock();
spin_lock(&iunique_lock);
do {
if (counter <= max_reserved)
@@ -1288,6 +1284,7 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
res = counter++;
} while (!test_inode_iunique(sb, res));
spin_unlock(&iunique_lock);
+ rcu_read_unlock();
return res;
}
@@ -1456,6 +1453,84 @@ out:
}
EXPORT_SYMBOL(find_inode_nowait);
+/**
+ * find_inode_rcu - find an inode in the inode cache
+ * @sb: Super block of file system to search
+ * @hashval: Key to hash
+ * @test: Function to test match on an inode
+ * @data: Data for test function
+ *
+ * Search for the inode specified by @hashval and @data in the inode cache,
+ * where the helper function @test will return 0 if the inode does not match
+ * and 1 if it does. The @test function must be responsible for taking the
+ * i_lock spin_lock and checking i_state for an inode being freed or being
+ * initialized.
+ *
+ * If successful, this will return the inode for which the @test function
+ * returned 1 and NULL otherwise.
+ *
+ * The @test function is not permitted to take a ref on any inode presented.
+ * It is also not permitted to sleep.
+ *
+ * The caller must hold the RCU read lock.
+ */
+struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
+ int (*test)(struct inode *, void *), void *data)
+{
+ struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+ struct inode *inode;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "suspicious find_inode_rcu() usage");
+
+ hlist_for_each_entry_rcu(inode, head, i_hash) {
+ if (inode->i_sb == sb &&
+ !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
+ test(inode, data))
+ return inode;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(find_inode_rcu);
+
+/**
+ * find_inode_by_rcu - Find an inode in the inode cache
+ * @sb: Super block of file system to search
+ * @ino: The inode number to match
+ *
+ * Search for the inode specified by @hashval and @data in the inode cache,
+ * where the helper function @test will return 0 if the inode does not match
+ * and 1 if it does. The @test function must be responsible for taking the
+ * i_lock spin_lock and checking i_state for an inode being freed or being
+ * initialized.
+ *
+ * If successful, this will return the inode for which the @test function
+ * returned 1 and NULL otherwise.
+ *
+ * The @test function is not permitted to take a ref on any inode presented.
+ * It is also not permitted to sleep.
+ *
+ * The caller must hold the RCU read lock.
+ */
+struct inode *find_inode_by_ino_rcu(struct super_block *sb,
+ unsigned long ino)
+{
+ struct hlist_head *head = inode_hashtable + hash(sb, ino);
+ struct inode *inode;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "suspicious find_inode_by_ino_rcu() usage");
+
+ hlist_for_each_entry_rcu(inode, head, i_hash) {
+ if (inode->i_ino == ino &&
+ inode->i_sb == sb &&
+ !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
+ return inode;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(find_inode_by_ino_rcu);
+
int insert_inode_locked(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
@@ -1480,7 +1555,7 @@ int insert_inode_locked(struct inode *inode)
if (likely(!old)) {
spin_lock(&inode->i_lock);
inode->i_state |= I_NEW | I_CREATING;
- hlist_add_head(&inode->i_hash, head);
+ hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
return 0;
@@ -1540,6 +1615,7 @@ static void iput_final(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
const struct super_operations *op = inode->i_sb->s_op;
+ unsigned long state;
int drop;
WARN_ON(inode->i_state & I_NEW);
@@ -1555,16 +1631,20 @@ static void iput_final(struct inode *inode)
return;
}
+ state = inode->i_state;
if (!drop) {
- inode->i_state |= I_WILL_FREE;
+ WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
spin_unlock(&inode->i_lock);
+
write_inode_now(inode, 1);
+
spin_lock(&inode->i_lock);
- WARN_ON(inode->i_state & I_NEW);
- inode->i_state &= ~I_WILL_FREE;
+ state = inode->i_state;
+ WARN_ON(state & I_NEW);
+ state &= ~I_WILL_FREE;
}
- inode->i_state |= I_FREEING;
+ WRITE_ONCE(inode->i_state, state | I_FREEING);
if (!list_empty(&inode->i_lru))
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
diff --git a/fs/internal.h b/fs/internal.h
index b89d78f10396..9b863a7bd708 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -142,8 +142,6 @@ extern int dentry_needs_remove_privs(struct dentry *dentry);
/*
* fs-writeback.c
*/
-extern void inode_io_list_del(struct inode *inode);
-
extern long get_nr_dirty_inodes(void);
extern int invalidate_inodes(struct super_block *, bool);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 4023c9846860..0b65a912b036 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -10,7 +10,6 @@
#include <linux/errno.h>
#include <linux/sched/signal.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
#include <linux/sched/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
@@ -112,6 +111,7 @@ struct io_wq {
unsigned long state;
free_work_fn *free_work;
+ io_wq_work_fn *do_work;
struct task_struct *manager;
struct user_struct *user;
@@ -170,8 +170,7 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
dropped_lock = true;
}
__set_current_state(TASK_RUNNING);
- set_fs(KERNEL_DS);
- unuse_mm(worker->mm);
+ kthread_unuse_mm(worker->mm);
mmput(worker->mm);
worker->mm = NULL;
}
@@ -418,18 +417,15 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
{
if (worker->mm) {
- unuse_mm(worker->mm);
+ kthread_unuse_mm(worker->mm);
mmput(worker->mm);
worker->mm = NULL;
}
- if (!work->mm) {
- set_fs(KERNEL_DS);
+ if (!work->mm)
return;
- }
+
if (mmget_not_zero(work->mm)) {
- use_mm(work->mm);
- if (!worker->mm)
- set_fs(USER_DS);
+ kthread_use_mm(work->mm);
worker->mm = work->mm;
/* hang on to this mm */
work->mm = NULL;
@@ -528,7 +524,7 @@ get_next:
hash = io_get_work_hash(work);
linked = old_work = work;
- linked->func(&linked);
+ wq->do_work(&linked);
linked = (old_work == linked) ? NULL : linked;
work = next_hashed;
@@ -785,7 +781,7 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
struct io_wq_work *old_work = work;
work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ wq->do_work(&work);
work = (work == old_work) ? NULL : work;
wq->free_work(old_work);
} while (work);
@@ -1023,7 +1019,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
int ret = -ENOMEM, node;
struct io_wq *wq;
- if (WARN_ON_ONCE(!data->free_work))
+ if (WARN_ON_ONCE(!data->free_work || !data->do_work))
return ERR_PTR(-EINVAL);
wq = kzalloc(sizeof(*wq), GFP_KERNEL);
@@ -1037,6 +1033,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
}
wq->free_work = data->free_work;
+ wq->do_work = data->do_work;
/* caller must already hold a reference to this */
wq->user = data->user;
@@ -1093,7 +1090,7 @@ err:
bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
{
- if (data->free_work != wq->free_work)
+ if (data->free_work != wq->free_work || data->do_work != wq->do_work)
return false;
return refcount_inc_not_zero(&wq->use_refs);
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 5ba12de7572f..8e138fa88b9f 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -85,7 +85,6 @@ static inline void wq_list_del(struct io_wq_work_list *list,
struct io_wq_work {
struct io_wq_work_node list;
- void (*func)(struct io_wq_work **);
struct files_struct *files;
struct mm_struct *mm;
const struct cred *creds;
@@ -94,11 +93,6 @@ struct io_wq_work {
pid_t task_pid;
};
-#define INIT_IO_WORK(work, _func) \
- do { \
- *(work) = (struct io_wq_work){ .func = _func }; \
- } while (0) \
-
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
{
if (!work->list.next)
@@ -108,10 +102,12 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
}
typedef void (free_work_fn)(struct io_wq_work *);
+typedef void (io_wq_work_fn)(struct io_wq_work **);
struct io_wq_data {
struct user_struct *user;
+ io_wq_work_fn *do_work;
free_work_fn *free_work;
};
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 9d4bd0d3a080..155f3d830ddb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -55,7 +55,6 @@
#include <linux/fdtable.h>
#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/mmu_context.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/kthread.h>
@@ -529,7 +528,6 @@ enum {
REQ_F_INFLIGHT_BIT,
REQ_F_CUR_POS_BIT,
REQ_F_NOWAIT_BIT,
- REQ_F_IOPOLL_COMPLETED_BIT,
REQ_F_LINK_TIMEOUT_BIT,
REQ_F_TIMEOUT_BIT,
REQ_F_ISREG_BIT,
@@ -541,6 +539,8 @@ enum {
REQ_F_POLLED_BIT,
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_NO_FILE_TABLE_BIT,
+ REQ_F_QUEUE_TIMEOUT_BIT,
+ REQ_F_WORK_INITIALIZED_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -572,8 +572,6 @@ enum {
REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
/* must not punt to workers */
REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
- /* polled IO has completed */
- REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
/* has linked timeout */
REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
/* timeout request */
@@ -596,6 +594,10 @@ enum {
REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
/* doesn't need file table for this request */
REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
+ /* needs to queue linked timeout */
+ REQ_F_QUEUE_TIMEOUT = BIT(REQ_F_QUEUE_TIMEOUT_BIT),
+ /* io_wq_work is initialized */
+ REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
};
struct async_poll {
@@ -634,6 +636,8 @@ struct io_kiocb {
struct io_async_ctx *io;
int cflags;
u8 opcode;
+ /* polled IO has completed */
+ u8 iopoll_completed;
u16 buf_index;
@@ -698,6 +702,8 @@ struct io_op_def {
unsigned needs_mm : 1;
/* needs req->file assigned */
unsigned needs_file : 1;
+ /* don't fail if file grab fails */
+ unsigned needs_file_no_error : 1;
/* hash wq insertion if file is a regular file */
unsigned hash_reg_file : 1;
/* unbound wq insertion if file is a non-regular file */
@@ -804,6 +810,8 @@ static const struct io_op_def io_op_defs[] = {
.needs_fs = 1,
},
[IORING_OP_CLOSE] = {
+ .needs_file = 1,
+ .needs_file_no_error = 1,
.file_table = 1,
},
[IORING_OP_FILES_UPDATE] = {
@@ -904,6 +912,19 @@ EXPORT_SYMBOL(io_uring_get_socket);
static void io_file_put_work(struct work_struct *work);
+/*
+ * Note: must call io_req_init_async() for the first time you
+ * touch any members of io_wq_work.
+ */
+static inline void io_req_init_async(struct io_kiocb *req)
+{
+ if (req->flags & REQ_F_WORK_INITIALIZED)
+ return;
+
+ memset(&req->work, 0, sizeof(req->work));
+ req->flags |= REQ_F_WORK_INITIALIZED;
+}
+
static inline bool io_async_submit(struct io_ring_ctx *ctx)
{
return ctx->flags & IORING_SETUP_SQPOLL;
@@ -1030,6 +1051,9 @@ static inline void io_req_work_grab_env(struct io_kiocb *req,
static inline void io_req_work_drop_env(struct io_kiocb *req)
{
+ if (!(req->flags & REQ_F_WORK_INITIALIZED))
+ return;
+
if (req->work.mm) {
mmdrop(req->work.mm);
req->work.mm = NULL;
@@ -1576,16 +1600,6 @@ static void io_free_req(struct io_kiocb *req)
io_queue_async_work(nxt);
}
-static void io_link_work_cb(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *link;
-
- link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
- io_queue_linked_timeout(link);
- io_wq_submit_work(workptr);
-}
-
static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
{
struct io_kiocb *link;
@@ -1597,7 +1611,7 @@ static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
*workptr = &nxt->work;
link = io_prep_linked_timeout(nxt);
if (link)
- nxt->work.func = io_link_work_cb;
+ nxt->flags |= REQ_F_QUEUE_TIMEOUT;
}
/*
@@ -1782,7 +1796,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
* If we find a request that requires polling, break out
* and complete those lists first, if we have entries there.
*/
- if (req->flags & REQ_F_IOPOLL_COMPLETED) {
+ if (READ_ONCE(req->iopoll_completed)) {
list_move_tail(&req->list, &done);
continue;
}
@@ -1963,7 +1977,7 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
req_set_fail_links(req);
req->result = res;
if (res != -EAGAIN)
- req->flags |= REQ_F_IOPOLL_COMPLETED;
+ WRITE_ONCE(req->iopoll_completed, 1);
}
/*
@@ -1996,7 +2010,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
* For fast devices, IO may have already completed. If it has, add
* it to the front so we find it first.
*/
- if (req->flags & REQ_F_IOPOLL_COMPLETED)
+ if (READ_ONCE(req->iopoll_completed))
list_add(&req->list, &ctx->poll_list);
else
list_add_tail(&req->list, &ctx->poll_list);
@@ -2064,6 +2078,10 @@ static bool io_file_supports_async(struct file *file, int rw)
if (S_ISREG(mode) && file->f_op != &io_uring_fops)
return true;
+ /* any ->read/write should understand O_NONBLOCK */
+ if (file->f_flags & O_NONBLOCK)
+ return true;
+
if (!(file->f_mode & FMODE_NOWAIT))
return false;
@@ -2106,8 +2124,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
kiocb->ki_ioprio = get_current_ioprio();
/* don't allow async punt if RWF_NOWAIT was requested */
- if ((kiocb->ki_flags & IOCB_NOWAIT) ||
- (req->file->f_flags & O_NONBLOCK))
+ if (kiocb->ki_flags & IOCB_NOWAIT)
req->flags |= REQ_F_NOWAIT;
if (force_nonblock)
@@ -2121,6 +2138,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
kiocb->ki_flags |= IOCB_HIPRI;
kiocb->ki_complete = io_complete_rw_iopoll;
req->result = 0;
+ req->iopoll_completed = 0;
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
@@ -2359,8 +2377,14 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
bool needs_lock)
{
- if (req->flags & REQ_F_BUFFER_SELECTED)
+ if (req->flags & REQ_F_BUFFER_SELECTED) {
+ struct io_buffer *kbuf;
+
+ kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
+ iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
+ iov[0].iov_len = kbuf->len;
return 0;
+ }
if (!req->rw.len)
return 0;
else if (req->rw.len > 1)
@@ -2742,7 +2766,8 @@ copy_iov:
if (ret)
goto out_free;
/* any defer here is final, must blocking retry */
- if (!file_can_poll(req->file))
+ if (!(req->flags & REQ_F_NOWAIT) &&
+ !file_can_poll(req->file))
req->flags |= REQ_F_MUST_PUNT;
return -EAGAIN;
}
@@ -2762,6 +2787,8 @@ static int __io_splice_prep(struct io_kiocb *req,
if (req->flags & REQ_F_NEED_CLEANUP)
return 0;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
sp->file_in = NULL;
sp->len = READ_ONCE(sqe->len);
@@ -2776,8 +2803,14 @@ static int __io_splice_prep(struct io_kiocb *req,
return ret;
req->flags |= REQ_F_NEED_CLEANUP;
- if (!S_ISREG(file_inode(sp->file_in)->i_mode))
+ if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
+ /*
+ * Splice operation will be punted aync, and here need to
+ * modify io_wq_work.flags, so initialize io_wq_work firstly.
+ */
+ io_req_init_async(req);
req->work.flags |= IO_WQ_WORK_UNBOUND;
+ }
return 0;
}
@@ -2886,23 +2919,15 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static bool io_req_cancelled(struct io_kiocb *req)
-{
- if (req->work.flags & IO_WQ_WORK_CANCEL) {
- req_set_fail_links(req);
- io_cqring_add_event(req, -ECANCELED);
- io_put_req(req);
- return true;
- }
-
- return false;
-}
-
-static void __io_fsync(struct io_kiocb *req)
+static int io_fsync(struct io_kiocb *req, bool force_nonblock)
{
loff_t end = req->sync.off + req->sync.len;
int ret;
+ /* fsync always requires a blocking context */
+ if (force_nonblock)
+ return -EAGAIN;
+
ret = vfs_fsync_range(req->file, req->sync.off,
end > 0 ? end : LLONG_MAX,
req->sync.flags & IORING_FSYNC_DATASYNC);
@@ -2910,58 +2935,16 @@ static void __io_fsync(struct io_kiocb *req)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
io_put_req(req);
-}
-
-static void io_fsync_finish(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
-
- if (io_req_cancelled(req))
- return;
- __io_fsync(req);
- io_steal_work(req, workptr);
-}
-
-static int io_fsync(struct io_kiocb *req, bool force_nonblock)
-{
- /* fsync always requires a blocking context */
- if (force_nonblock) {
- req->work.func = io_fsync_finish;
- return -EAGAIN;
- }
- __io_fsync(req);
return 0;
}
-static void __io_fallocate(struct io_kiocb *req)
-{
- int ret;
-
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
- ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
- req->sync.len);
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
- if (ret < 0)
- req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
-}
-
-static void io_fallocate_finish(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
-
- if (io_req_cancelled(req))
- return;
- __io_fallocate(req);
- io_steal_work(req, workptr);
-}
-
static int io_fallocate_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
return -EINVAL;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
req->sync.off = READ_ONCE(sqe->off);
req->sync.len = READ_ONCE(sqe->addr);
@@ -2972,66 +2955,74 @@ static int io_fallocate_prep(struct io_kiocb *req,
static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
{
+ int ret;
+
/* fallocate always requiring blocking context */
- if (force_nonblock) {
- req->work.func = io_fallocate_finish;
+ if (force_nonblock)
return -EAGAIN;
- }
- __io_fallocate(req);
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
+ ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
+ req->sync.len);
+ current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ io_put_req(req);
return 0;
}
-static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
const char __user *fname;
int ret;
- if (sqe->ioprio || sqe->buf_index)
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
- if (req->flags & REQ_F_FIXED_FILE)
+ if (unlikely(sqe->ioprio || sqe->buf_index))
+ return -EINVAL;
+ if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF;
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
- req->open.dfd = READ_ONCE(sqe->fd);
- req->open.how.mode = READ_ONCE(sqe->len);
- fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
- req->open.how.flags = READ_ONCE(sqe->open_flags);
- if (force_o_largefile())
+ /* open.how should be already initialised */
+ if (!(req->open.how.flags & O_PATH) && force_o_largefile())
req->open.how.flags |= O_LARGEFILE;
+ req->open.dfd = READ_ONCE(sqe->fd);
+ fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
req->open.filename = getname(fname);
if (IS_ERR(req->open.filename)) {
ret = PTR_ERR(req->open.filename);
req->open.filename = NULL;
return ret;
}
-
req->open.nofile = rlimit(RLIMIT_NOFILE);
req->flags |= REQ_F_NEED_CLEANUP;
return 0;
}
+static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ u64 flags, mode;
+
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ return 0;
+ mode = READ_ONCE(sqe->len);
+ flags = READ_ONCE(sqe->open_flags);
+ req->open.how = build_open_how(flags, mode);
+ return __io_openat_prep(req, sqe);
+}
+
static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct open_how __user *how;
- const char __user *fname;
size_t len;
int ret;
- if (sqe->ioprio || sqe->buf_index)
- return -EINVAL;
- if (req->flags & REQ_F_FIXED_FILE)
- return -EBADF;
if (req->flags & REQ_F_NEED_CLEANUP)
return 0;
-
- req->open.dfd = READ_ONCE(sqe->fd);
- fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
len = READ_ONCE(sqe->len);
-
if (len < OPEN_HOW_SIZE_VER0)
return -EINVAL;
@@ -3040,19 +3031,7 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (ret)
return ret;
- if (!(req->open.how.flags & O_PATH) && force_o_largefile())
- req->open.how.flags |= O_LARGEFILE;
-
- req->open.filename = getname(fname);
- if (IS_ERR(req->open.filename)) {
- ret = PTR_ERR(req->open.filename);
- req->open.filename = NULL;
- return ret;
- }
-
- req->open.nofile = rlimit(RLIMIT_NOFILE);
- req->flags |= REQ_F_NEED_CLEANUP;
- return 0;
+ return __io_openat_prep(req, sqe);
}
static int io_openat2(struct io_kiocb *req, bool force_nonblock)
@@ -3092,7 +3071,6 @@ err:
static int io_openat(struct io_kiocb *req, bool force_nonblock)
{
- req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
return io_openat2(req, force_nonblock);
}
@@ -3181,7 +3159,7 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
p->addr = READ_ONCE(sqe->addr);
p->len = READ_ONCE(sqe->len);
- if (!access_ok(u64_to_user_ptr(p->addr), p->len))
+ if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
return -EFAULT;
p->bgid = READ_ONCE(sqe->buf_group);
@@ -3259,6 +3237,8 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
#if defined(CONFIG_EPOLL)
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
req->epoll.epfd = READ_ONCE(sqe->fd);
req->epoll.op = READ_ONCE(sqe->len);
@@ -3303,6 +3283,8 @@ static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
if (sqe->ioprio || sqe->buf_index || sqe->off)
return -EINVAL;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
req->madvise.addr = READ_ONCE(sqe->addr);
req->madvise.len = READ_ONCE(sqe->len);
@@ -3337,6 +3319,8 @@ static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
if (sqe->ioprio || sqe->buf_index || sqe->addr)
return -EINVAL;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
req->fadvise.offset = READ_ONCE(sqe->off);
req->fadvise.len = READ_ONCE(sqe->len);
@@ -3370,6 +3354,8 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE)
@@ -3410,10 +3396,14 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
/*
* If we queue this for async, it must not be cancellable. That would
- * leave the 'file' in an undeterminate state.
+ * leave the 'file' in an undeterminate state, and here need to modify
+ * io_wq_work.flags, so initialize io_wq_work firstly.
*/
+ io_req_init_async(req);
req->work.flags |= IO_WQ_WORK_NO_CANCEL;
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
sqe->rw_flags || sqe->buf_index)
return -EINVAL;
@@ -3421,53 +3411,41 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EBADF;
req->close.fd = READ_ONCE(sqe->fd);
- return 0;
-}
-
-/* only called when __close_fd_get_file() is done */
-static void __io_close_finish(struct io_kiocb *req)
-{
- int ret;
-
- ret = filp_close(req->close.put_file, req->work.files);
- if (ret < 0)
- req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- fput(req->close.put_file);
- io_put_req(req);
-}
-
-static void io_close_finish(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ if ((req->file && req->file->f_op == &io_uring_fops) ||
+ req->close.fd == req->ctx->ring_fd)
+ return -EBADF;
- /* not cancellable, don't do io_req_cancelled() */
- __io_close_finish(req);
- io_steal_work(req, workptr);
+ req->close.put_file = NULL;
+ return 0;
}
static int io_close(struct io_kiocb *req, bool force_nonblock)
{
+ struct io_close *close = &req->close;
int ret;
- req->close.put_file = NULL;
- ret = __close_fd_get_file(req->close.fd, &req->close.put_file);
- if (ret < 0)
- return (ret == -ENOENT) ? -EBADF : ret;
+ /* might be already done during nonblock submission */
+ if (!close->put_file) {
+ ret = __close_fd_get_file(close->fd, &close->put_file);
+ if (ret < 0)
+ return (ret == -ENOENT) ? -EBADF : ret;
+ }
/* if the file has a flush method, be safe and punt to async */
- if (req->close.put_file->f_op->flush && force_nonblock) {
+ if (close->put_file->f_op->flush && force_nonblock) {
/* avoid grabbing files - we don't need the files */
req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT;
- req->work.func = io_close_finish;
return -EAGAIN;
}
- /*
- * No ->flush(), safely close from here and just punt the
- * fput() to async context.
- */
- __io_close_finish(req);
+ /* No ->flush() or already async, safely close from here */
+ ret = filp_close(close->put_file, req->work.files);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ fput(close->put_file);
+ close->put_file = NULL;
+ io_put_req(req);
return 0;
}
@@ -3489,38 +3467,20 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static void __io_sync_file_range(struct io_kiocb *req)
+static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
{
int ret;
+ /* sync_file_range always requires a blocking context */
+ if (force_nonblock)
+ return -EAGAIN;
+
ret = sync_file_range(req->file, req->sync.off, req->sync.len,
req->sync.flags);
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
io_put_req(req);
-}
-
-
-static void io_sync_file_range_finish(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
-
- if (io_req_cancelled(req))
- return;
- __io_sync_file_range(req);
- io_steal_work(req, workptr);
-}
-
-static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
-{
- /* sync_file_range always requires a blocking context */
- if (force_nonblock) {
- req->work.func = io_sync_file_range_finish;
- return -EAGAIN;
- }
-
- __io_sync_file_range(req);
return 0;
}
@@ -3546,6 +3506,9 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_async_ctx *io = req->io;
int ret;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+
sr->msg_flags = READ_ONCE(sqe->msg_flags);
sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
@@ -3575,9 +3538,6 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
struct socket *sock;
int ret;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
sock = sock_from_file(req->file, &ret);
if (sock) {
struct io_async_ctx io;
@@ -3631,9 +3591,6 @@ static int io_send(struct io_kiocb *req, bool force_nonblock)
struct socket *sock;
int ret;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
sock = sock_from_file(req->file, &ret);
if (sock) {
struct io_sr_msg *sr = &req->sr_msg;
@@ -3786,6 +3743,9 @@ static int io_recvmsg_prep(struct io_kiocb *req,
struct io_async_ctx *io = req->io;
int ret;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+
sr->msg_flags = READ_ONCE(sqe->msg_flags);
sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
@@ -3814,9 +3774,6 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
struct socket *sock;
int ret, cflags = 0;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
sock = sock_from_file(req->file, &ret);
if (sock) {
struct io_buffer *kbuf;
@@ -3878,9 +3835,6 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock)
struct socket *sock;
int ret, cflags = 0;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
sock = sock_from_file(req->file, &ret);
if (sock) {
struct io_sr_msg *sr = &req->sr_msg;
@@ -3948,49 +3902,30 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static int __io_accept(struct io_kiocb *req, bool force_nonblock)
+static int io_accept(struct io_kiocb *req, bool force_nonblock)
{
struct io_accept *accept = &req->accept;
- unsigned file_flags;
+ unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
int ret;
- file_flags = force_nonblock ? O_NONBLOCK : 0;
+ if (req->file->f_flags & O_NONBLOCK)
+ req->flags |= REQ_F_NOWAIT;
+
ret = __sys_accept4_file(req->file, file_flags, accept->addr,
accept->addr_len, accept->flags,
accept->nofile);
if (ret == -EAGAIN && force_nonblock)
return -EAGAIN;
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
- if (ret < 0)
+ if (ret < 0) {
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
req_set_fail_links(req);
+ }
io_cqring_add_event(req, ret);
io_put_req(req);
return 0;
}
-static void io_accept_finish(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
-
- if (io_req_cancelled(req))
- return;
- __io_accept(req, false);
- io_steal_work(req, workptr);
-}
-
-static int io_accept(struct io_kiocb *req, bool force_nonblock)
-{
- int ret;
-
- ret = __io_accept(req, force_nonblock);
- if (ret == -EAGAIN && force_nonblock) {
- req->work.func = io_accept_finish;
- return -EAGAIN;
- }
- return 0;
-}
-
static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_connect *conn = &req->connect;
@@ -4329,7 +4264,8 @@ static void io_async_task_func(struct callback_head *cb)
spin_unlock_irq(&ctx->completion_lock);
/* restore ->work in case we need to retry again */
- memcpy(&req->work, &apoll->work, sizeof(req->work));
+ if (req->flags & REQ_F_WORK_INITIALIZED)
+ memcpy(&req->work, &apoll->work, sizeof(req->work));
kfree(apoll);
if (!canceled) {
@@ -4426,7 +4362,8 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
return false;
req->flags |= REQ_F_POLLED;
- memcpy(&apoll->work, &req->work, sizeof(req->work));
+ if (req->flags & REQ_F_WORK_INITIALIZED)
+ memcpy(&apoll->work, &req->work, sizeof(req->work));
had_io = req->io != NULL;
get_task_struct(current);
@@ -4451,7 +4388,8 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
if (!had_io)
io_poll_remove_double(req);
spin_unlock_irq(&ctx->completion_lock);
- memcpy(&req->work, &apoll->work, sizeof(req->work));
+ if (req->flags & REQ_F_WORK_INITIALIZED)
+ memcpy(&req->work, &apoll->work, sizeof(req->work));
kfree(apoll);
return false;
}
@@ -4496,7 +4434,9 @@ static bool io_poll_remove_one(struct io_kiocb *req)
* io_req_work_drop_env below when dropping the
* final reference.
*/
- memcpy(&req->work, &apoll->work, sizeof(req->work));
+ if (req->flags & REQ_F_WORK_INITIALIZED)
+ memcpy(&req->work, &apoll->work,
+ sizeof(req->work));
kfree(apoll);
}
}
@@ -4945,6 +4885,8 @@ static int io_req_defer_prep(struct io_kiocb *req,
if (!sqe)
return 0;
+ io_req_init_async(req);
+
if (io_op_defs[req->opcode].file_table) {
ret = io_grab_files(req);
if (unlikely(ret))
@@ -5382,12 +5324,26 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return 0;
}
+static void io_arm_async_linked_timeout(struct io_kiocb *req)
+{
+ struct io_kiocb *link;
+
+ /* link head's timeout is queued in io_queue_async_work() */
+ if (!(req->flags & REQ_F_QUEUE_TIMEOUT))
+ return;
+
+ link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
+ io_queue_linked_timeout(link);
+}
+
static void io_wq_submit_work(struct io_wq_work **workptr)
{
struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
int ret = 0;
+ io_arm_async_linked_timeout(req);
+
/* if NO_CANCEL is set, we must still run the work */
if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
IO_WQ_WORK_CANCEL) {
@@ -5438,19 +5394,20 @@ static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
return -EBADF;
fd = array_index_nospec(fd, ctx->nr_user_files);
file = io_file_from_index(ctx, fd);
- if (!file)
- return -EBADF;
- req->fixed_file_refs = ctx->file_data->cur_refs;
- percpu_ref_get(req->fixed_file_refs);
+ if (file) {
+ req->fixed_file_refs = ctx->file_data->cur_refs;
+ percpu_ref_get(req->fixed_file_refs);
+ }
} else {
trace_io_uring_file_get(ctx, fd);
file = __io_file_get(state, fd);
- if (unlikely(!file))
- return -EBADF;
}
- *out_file = file;
- return 0;
+ if (file || io_op_defs[req->opcode].needs_file_no_error) {
+ *out_file = file;
+ return 0;
+ }
+ return -EBADF;
}
static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
@@ -5584,7 +5541,8 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
again:
linked_timeout = io_prep_linked_timeout(req);
- if (req->work.creds && req->work.creds != current_cred()) {
+ if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
+ req->work.creds != current_cred()) {
if (old_creds)
revert_creds(old_creds);
if (old_creds == req->work.creds)
@@ -5607,6 +5565,8 @@ again:
goto exit;
}
punt:
+ io_req_init_async(req);
+
if (io_op_defs[req->opcode].file_table) {
ret = io_grab_files(req);
if (ret)
@@ -5859,7 +5819,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
refcount_set(&req->refs, 2);
req->task = NULL;
req->result = 0;
- INIT_IO_WORK(&req->work, io_wq_submit_work);
if (unlikely(req->opcode >= IORING_OP_LAST))
return -EINVAL;
@@ -5867,7 +5826,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (io_op_defs[req->opcode].needs_mm && !current->mm) {
if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
return -EFAULT;
- use_mm(ctx->sqo_mm);
+ kthread_use_mm(ctx->sqo_mm);
}
sqe_flags = READ_ONCE(sqe->flags);
@@ -5881,6 +5840,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
id = READ_ONCE(sqe->personality);
if (id) {
+ io_req_init_async(req);
req->work.creds = idr_find(&ctx->personality_idr, id);
if (unlikely(!req->work.creds))
return -EINVAL;
@@ -5981,7 +5941,7 @@ static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
struct mm_struct *mm = current->mm;
if (mm) {
- unuse_mm(mm);
+ kthread_unuse_mm(mm);
mmput(mm);
}
}
@@ -5990,15 +5950,12 @@ static int io_sq_thread(void *data)
{
struct io_ring_ctx *ctx = data;
const struct cred *old_cred;
- mm_segment_t old_fs;
DEFINE_WAIT(wait);
unsigned long timeout;
int ret = 0;
complete(&ctx->sq_thread_comp);
- old_fs = get_fs();
- set_fs(USER_DS);
old_cred = override_creds(ctx->creds);
timeout = jiffies + ctx->sq_thread_idle;
@@ -6103,7 +6060,6 @@ static int io_sq_thread(void *data)
if (current->task_works)
task_work_run();
- set_fs(old_fs);
io_sq_thread_drop_mm(ctx);
revert_creds(old_cred);
@@ -6879,6 +6835,7 @@ static int io_init_wq_offload(struct io_ring_ctx *ctx,
data.user = ctx->user;
data.free_work = io_free_work;
+ data.do_work = io_wq_submit_work;
if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
/* Do QD, or 4 * CPUS, whatever is smallest */
@@ -7160,8 +7117,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
ret = 0;
if (!pages || nr_pages > got_pages) {
- kfree(vmas);
- kfree(pages);
+ kvfree(vmas);
+ kvfree(pages);
pages = kvmalloc_array(nr_pages, sizeof(struct page *),
GFP_KERNEL);
vmas = kvmalloc_array(nr_pages,
@@ -7186,7 +7143,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
}
ret = 0;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
pret = pin_user_pages(ubuf, nr_pages,
FOLL_WRITE | FOLL_LONGTERM,
pages, vmas);
@@ -7204,7 +7161,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
} else {
ret = pret < 0 ? pret : -EFAULT;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (ret) {
/*
* if we did partial map, or found file backed vmas,
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 5e80b40bc1b5..d69786d1dd91 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -18,6 +18,7 @@
#include <linux/buffer_head.h>
#include <linux/falloc.h>
#include <linux/sched/signal.h>
+#include <linux/fiemap.h>
#include "internal.h"
@@ -148,61 +149,55 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
EXPORT_SYMBOL(fiemap_fill_next_extent);
/**
- * fiemap_check_flags - check validity of requested flags for fiemap
+ * fiemap_prep - check validity of requested flags for fiemap
+ * @inode: Inode to operate on
* @fieinfo: Fiemap context passed into ->fiemap
- * @fs_flags: Set of fiemap flags that the file system understands
+ * @start: Start of the mapped range
+ * @len: Length of the mapped range, can be truncated by this function.
+ * @supported_flags: Set of fiemap flags that the file system understands
*
- * Called from file system ->fiemap callback. This will compute the
- * intersection of valid fiemap flags and those that the fs supports. That
- * value is then compared against the user supplied flags. In case of bad user
- * flags, the invalid values will be written into the fieinfo structure, and
- * -EBADR is returned, which tells ioctl_fiemap() to return those values to
- * userspace. For this reason, a return code of -EBADR should be preserved.
+ * This function must be called from each ->fiemap instance to validate the
+ * fiemap request against the file system parameters.
*
- * Returns 0 on success, -EBADR on bad flags.
+ * Returns 0 on success, or a negative error on failure.
*/
-int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags)
+int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 *len, u32 supported_flags)
{
+ u64 maxbytes = inode->i_sb->s_maxbytes;
u32 incompat_flags;
+ int ret = 0;
- incompat_flags = fieinfo->fi_flags & ~(FIEMAP_FLAGS_COMPAT & fs_flags);
- if (incompat_flags) {
- fieinfo->fi_flags = incompat_flags;
- return -EBADR;
- }
- return 0;
-}
-EXPORT_SYMBOL(fiemap_check_flags);
-
-static int fiemap_check_ranges(struct super_block *sb,
- u64 start, u64 len, u64 *new_len)
-{
- u64 maxbytes = (u64) sb->s_maxbytes;
-
- *new_len = len;
-
- if (len == 0)
+ if (*len == 0)
return -EINVAL;
-
if (start > maxbytes)
return -EFBIG;
/*
* Shrink request scope to what the fs can actually handle.
*/
- if (len > maxbytes || (maxbytes - len) < start)
- *new_len = maxbytes - start;
+ if (*len > maxbytes || (maxbytes - *len) < start)
+ *len = maxbytes - start;
+
+ supported_flags |= FIEMAP_FLAG_SYNC;
+ supported_flags &= FIEMAP_FLAGS_COMPAT;
+ incompat_flags = fieinfo->fi_flags & ~supported_flags;
+ if (incompat_flags) {
+ fieinfo->fi_flags = incompat_flags;
+ return -EBADR;
+ }
- return 0;
+ if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
+ ret = filemap_write_and_wait(inode->i_mapping);
+ return ret;
}
+EXPORT_SYMBOL(fiemap_prep);
static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap)
{
struct fiemap fiemap;
struct fiemap_extent_info fieinfo = { 0, };
struct inode *inode = file_inode(filp);
- struct super_block *sb = inode->i_sb;
- u64 len;
int error;
if (!inode->i_op->fiemap)
@@ -214,24 +209,13 @@ static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap)
if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
return -EINVAL;
- error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length,
- &len);
- if (error)
- return error;
-
fieinfo.fi_flags = fiemap.fm_flags;
fieinfo.fi_extents_max = fiemap.fm_extent_count;
fieinfo.fi_extents_start = ufiemap->fm_extents;
- if (fiemap.fm_extent_count != 0 &&
- !access_ok(fieinfo.fi_extents_start,
- fieinfo.fi_extents_max * sizeof(struct fiemap_extent)))
- return -EFAULT;
-
- if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
- filemap_write_and_wait(inode->i_mapping);
+ error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start,
+ fiemap.fm_length);
- error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
fiemap.fm_flags = fieinfo.fi_flags;
fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
@@ -307,8 +291,7 @@ static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
* If you use this function directly, you need to do your own locking. Use
* generic_block_fiemap if you want the locking done for you.
*/
-
-int __generic_block_fiemap(struct inode *inode,
+static int __generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo, loff_t start,
loff_t len, get_block_t *get_block)
{
@@ -320,7 +303,7 @@ int __generic_block_fiemap(struct inode *inode,
bool past_eof = false, whole_file = false;
int ret = 0;
- ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+ ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_SYNC);
if (ret)
return ret;
@@ -453,7 +436,6 @@ int __generic_block_fiemap(struct inode *inode,
return ret;
}
-EXPORT_SYMBOL(__generic_block_fiemap);
/**
* generic_block_fiemap - FIEMAP for block based inodes
diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c
index d55e8f491a5e..aab070df4a21 100644
--- a/fs/iomap/fiemap.c
+++ b/fs/iomap/fiemap.c
@@ -6,6 +6,7 @@
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/iomap.h>
+#include <linux/fiemap.h>
struct fiemap_ctx {
struct fiemap_extent_info *fi;
@@ -65,7 +66,7 @@ iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
}
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
- loff_t start, loff_t len, const struct iomap_ops *ops)
+ u64 start, u64 len, const struct iomap_ops *ops)
{
struct fiemap_ctx ctx;
loff_t ret;
@@ -74,16 +75,10 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
ctx.fi = fi;
ctx.prev.type = IOMAP_HOLE;
- ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
+ ret = fiemap_prep(inode, fi, start, &len, 0);
if (ret)
return ret;
- if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
- ret = filemap_write_and_wait(inode->i_mapping);
- if (ret)
- return ret;
- }
-
while (len > 0) {
ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
iomap_fiemap_actor);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 3dccc23cf010..e91aad3637a2 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -541,17 +541,24 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
}
EXPORT_SYMBOL(jbd2_journal_start);
-static void __jbd2_journal_unreserve_handle(handle_t *handle)
+static void __jbd2_journal_unreserve_handle(handle_t *handle, transaction_t *t)
{
journal_t *journal = handle->h_journal;
WARN_ON(!handle->h_reserved);
sub_reserved_credits(journal, handle->h_total_credits);
+ if (t)
+ atomic_sub(handle->h_total_credits, &t->t_outstanding_credits);
}
void jbd2_journal_free_reserved(handle_t *handle)
{
- __jbd2_journal_unreserve_handle(handle);
+ journal_t *journal = handle->h_journal;
+
+ /* Get j_state_lock to pin running transaction if it exists */
+ read_lock(&journal->j_state_lock);
+ __jbd2_journal_unreserve_handle(handle, journal->j_running_transaction);
+ read_unlock(&journal->j_state_lock);
jbd2_free_handle(handle);
}
EXPORT_SYMBOL(jbd2_journal_free_reserved);
@@ -722,7 +729,8 @@ static void stop_this_handle(handle_t *handle)
atomic_sub(handle->h_total_credits,
&transaction->t_outstanding_credits);
if (handle->h_rsv_handle)
- __jbd2_journal_unreserve_handle(handle->h_rsv_handle);
+ __jbd2_journal_unreserve_handle(handle->h_rsv_handle,
+ transaction);
if (atomic_dec_and_test(&transaction->t_updates))
wake_up(&journal->j_wait_updates);
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 34366db3620d..06b342d8462b 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -652,9 +652,9 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
* The following is done to give a different lockdep key to
* @of->mutex for files which implement mmap. This is a rather
* crude way to avoid false positive lockdep warning around
- * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
+ * mm->mmap_lock - mmap nests @of->mutex under mm->mmap_lock and
* reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
- * which mm->mmap_sem nests, while holding @of->mutex. As each
+ * which mm->mmap_lock nests, while holding @of->mutex. As each
* open file has a separate mutex, it's okay as long as those don't
* happen on the same file. At this point, we can't easily give
* each file a separate locking class. Let's differentiate on
@@ -1010,7 +1010,7 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (key) {
- lockdep_init_map(&kn->dep_map, "kn->count", key, 0);
+ lockdep_init_map(&kn->dep_map, "kn->active", key, 0);
kn->flags |= KERNFS_LOCKDEP;
}
#endif
diff --git a/fs/locks.c b/fs/locks.c
index 1d4f4d5da704..7df0f9fa66f4 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1557,6 +1557,9 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
{
bool rc;
+ if (lease->fl_lmops->lm_breaker_owns_lease
+ && lease->fl_lmops->lm_breaker_owns_lease(lease))
+ return false;
if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
rc = false;
goto trace;
@@ -2823,7 +2826,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
{
struct inode *inode = NULL;
unsigned int fl_pid;
- struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
+ struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
fl_pid = locks_translate_pid(fl, proc_pidns);
/*
@@ -2901,7 +2904,7 @@ static int locks_show(struct seq_file *f, void *v)
{
struct locks_iterator *iter = f->private;
struct file_lock *fl, *bfl;
- struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
+ struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
fl = hlist_entry(v, struct file_lock, fl_link);
diff --git a/fs/namei.c b/fs/namei.c
index d81f73ff1a8b..72d4219c93ac 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -271,7 +271,7 @@ static int check_acl(struct inode *inode, int mask)
/* no ->get_acl() calls in RCU mode... */
if (is_uncached_acl(acl))
return -ECHILD;
- return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
+ return posix_acl_permission(inode, acl, mask);
}
acl = get_acl(inode, ACL_TYPE_ACCESS);
@@ -288,37 +288,51 @@ static int check_acl(struct inode *inode, int mask)
}
/*
- * This does the basic permission checking
+ * This does the basic UNIX permission checking.
+ *
+ * Note that the POSIX ACL check cares about the MAY_NOT_BLOCK bit,
+ * for RCU walking.
*/
static int acl_permission_check(struct inode *inode, int mask)
{
unsigned int mode = inode->i_mode;
- if (likely(uid_eq(current_fsuid(), inode->i_uid)))
+ /* Are we the owner? If so, ACL's don't matter */
+ if (likely(uid_eq(current_fsuid(), inode->i_uid))) {
+ mask &= 7;
mode >>= 6;
- else {
- if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
- int error = check_acl(inode, mask);
- if (error != -EAGAIN)
- return error;
- }
+ return (mask & ~mode) ? -EACCES : 0;
+ }
- if (in_group_p(inode->i_gid))
- mode >>= 3;
+ /* Do we have ACL's? */
+ if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
+ int error = check_acl(inode, mask);
+ if (error != -EAGAIN)
+ return error;
}
+ /* Only RWX matters for group/other mode bits */
+ mask &= 7;
+
/*
- * If the DACs are ok we don't need any capability check.
+ * Are the group permissions different from
+ * the other permissions in the bits we care
+ * about? Need to check group ownership if so.
*/
- if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
- return 0;
- return -EACCES;
+ if (mask & (mode ^ (mode >> 3))) {
+ if (in_group_p(inode->i_gid))
+ mode >>= 3;
+ }
+
+ /* Bits in 'mode' clear that we require? */
+ return (mask & ~mode) ? -EACCES : 0;
}
/**
* generic_permission - check for access rights on a Posix-like filesystem
* @inode: inode to check access rights for
- * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
+ * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC,
+ * %MAY_NOT_BLOCK ...)
*
* Used to check for read/write/execute permissions on a file.
* We use "fsuid" for this, letting us set arbitrary permissions
diff --git a/fs/namespace.c b/fs/namespace.c
index 6d499ab254b7..f30ed401cc6d 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -684,9 +684,6 @@ bool __is_local_mountpoint(struct dentry *dentry)
struct mount *mnt;
bool is_covered = false;
- if (!d_mountpoint(dentry))
- goto out;
-
down_read(&namespace_sem);
lock_ns_list(ns);
list_for_each_entry(mnt, &ns->list, mnt_list) {
@@ -698,7 +695,7 @@ bool __is_local_mountpoint(struct dentry *dentry)
}
unlock_ns_list(ns);
up_read(&namespace_sem);
-out:
+
return is_covered;
}
@@ -1937,6 +1934,9 @@ struct vfsmount *clone_private_mount(const struct path *path)
if (IS_ERR(new_mnt))
return ERR_CAST(new_mnt);
+ /* Longterm mount to be removed by kern_unmount*() */
+ new_mnt->mnt_ns = MNT_NS_INTERNAL;
+
return &new_mnt->mnt;
}
EXPORT_SYMBOL_GPL(clone_private_mount);
@@ -3863,6 +3863,19 @@ void kern_unmount(struct vfsmount *mnt)
}
EXPORT_SYMBOL(kern_unmount);
+void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (mnt[i])
+ real_mount(mnt[i])->mnt_ns = NULL;
+ synchronize_rcu_expedited();
+ for (i = 0; i < num; i++)
+ mntput(mnt[i]);
+}
+EXPORT_SYMBOL(kern_unmount_array);
+
bool our_mnt(struct vfsmount *mnt)
{
return check_mnt(real_mount(mnt));
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index a57e7c72c7f4..1b79dd5cf661 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -446,7 +446,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
struct inode *inode = mapping->host;
struct nfs_direct_req *dreq;
struct nfs_lock_context *l_ctx;
- ssize_t result = -EINVAL, requested;
+ ssize_t result, requested;
size_t count = iov_iter_count(iter);
nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
@@ -731,6 +731,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
nfs_list_remove_request(req);
if (request_commit) {
kref_get(&req->wb_kref);
+ memcpy(&req->wb_verf, &hdr->verf.verifier,
+ sizeof(req->wb_verf));
nfs_mark_request_commit(req, hdr->lseg, &cinfo,
hdr->ds_commit_idx);
}
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index 963800037609..e87d500ad95a 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -39,7 +39,6 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
#include <linux/string.h>
#include <linux/kmod.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/socket.h>
#include <linux/seq_file.h>
#include <linux/inet.h>
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b9d0921cb4fe..0bf1f835de01 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -833,6 +833,8 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
do_update |= cache_validity & NFS_INO_INVALID_ATIME;
if (request_mask & (STATX_CTIME|STATX_MTIME))
do_update |= cache_validity & NFS_INO_REVAL_PAGECACHE;
+ if (request_mask & STATX_BLOCKS)
+ do_update |= cache_validity & NFS_INO_INVALID_BLOCKS;
if (do_update) {
/* Update the attribute cache */
if (!(server->flags & NFS_MOUNT_NOAC))
@@ -1764,7 +1766,8 @@ out_noforce:
status = nfs_post_op_update_inode_locked(inode, fattr,
NFS_INO_INVALID_CHANGE
| NFS_INO_INVALID_CTIME
- | NFS_INO_INVALID_MTIME);
+ | NFS_INO_INVALID_MTIME
+ | NFS_INO_INVALID_BLOCKS);
return status;
}
@@ -1871,7 +1874,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ATIME
| NFS_INO_REVAL_FORCED
- | NFS_INO_REVAL_PAGECACHE);
+ | NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_INVALID_BLOCKS);
/* Do atomic weak cache consistency updates */
nfs_wcc_update_inode(inode, fattr);
@@ -2033,8 +2037,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
} else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
- else
+ else {
+ nfsi->cache_validity |= save_cache_validity &
+ (NFS_INO_INVALID_BLOCKS
+ | NFS_INO_REVAL_FORCED);
cache_revalidated = false;
+ }
/* Update attrtimeo value if we're out of the unstable period */
if (attr_changed) {
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index a46d1d5d16d8..2397ceedba8a 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -179,11 +179,11 @@ nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
if (nfs_lookup_is_soft_revalidate(dentry))
task_flags |= RPC_TASK_TIMEOUT;
- dprintk("NFS call lookup %pd2\n", dentry);
res.dir_attr = nfs_alloc_fattr();
if (res.dir_attr == NULL)
return -ENOMEM;
+ dprintk("NFS call lookup %pd2\n", dentry);
nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, task_flags);
nfs_refresh_inode(dir, res.dir_attr);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 9056f3dd380e..e32717fd1169 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -7909,7 +7909,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
}
static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
- .rpc_call_done = &nfs4_bind_one_conn_to_session_done,
+ .rpc_call_done = nfs4_bind_one_conn_to_session_done,
};
/*
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 7e7a97ae21ed..547cec79899f 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -961,6 +961,97 @@ TRACE_EVENT(nfs_readpage_done,
)
);
+TRACE_EVENT(nfs_readpage_short,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct nfs_pgio_header *hdr
+ ),
+
+ TP_ARGS(task, hdr),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(loff_t, offset)
+ __field(u32, arg_count)
+ __field(u32, res_count)
+ __field(bool, eof)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = hdr->inode;
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = hdr->args.fh ?
+ hdr->args.fh : &nfsi->fh;
+
+ __entry->status = task->tk_status;
+ __entry->offset = hdr->args.offset;
+ __entry->arg_count = hdr->args.count;
+ __entry->res_count = hdr->res.count;
+ __entry->eof = hdr->res.eof;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ ),
+
+ TP_printk(
+ "fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u status=%d%s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle,
+ (long long)__entry->offset, __entry->arg_count,
+ __entry->res_count, __entry->status,
+ __entry->eof ? " eof" : ""
+ )
+);
+
+TRACE_EVENT(nfs_pgio_error,
+ TP_PROTO(
+ const struct nfs_pgio_header *hdr,
+ int error,
+ loff_t pos
+ ),
+
+ TP_ARGS(hdr, error, pos),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(loff_t, offset)
+ __field(u32, arg_count)
+ __field(u32, res_count)
+ __field(loff_t, pos)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = hdr->inode;
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = hdr->args.fh ?
+ hdr->args.fh : &nfsi->fh;
+
+ __entry->status = error;
+ __entry->offset = hdr->args.offset;
+ __entry->arg_count = hdr->args.count;
+ __entry->res_count = hdr->res.count;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ ),
+
+ TP_printk("fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u pos=%llu status=%d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid, __entry->fhandle,
+ (long long)__entry->offset, __entry->arg_count, __entry->res_count,
+ __entry->pos, __entry->status
+ )
+);
+
TRACE_DEFINE_ENUM(NFS_UNSTABLE);
TRACE_DEFINE_ENUM(NFS_DATA_SYNC);
TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
@@ -1312,7 +1403,12 @@ TRACE_EVENT(nfs_xdr_status,
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
+ __field(int, version)
__field(unsigned long, error)
+ __string(program,
+ xdr->rqst->rq_task->tk_client->cl_program->name)
+ __string(procedure,
+ xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
),
TP_fast_assign(
@@ -1322,13 +1418,19 @@ TRACE_EVENT(nfs_xdr_status,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->version = task->tk_client->cl_vers;
__entry->error = error;
+ __assign_str(program,
+ task->tk_client->cl_program->name)
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
),
TP_printk(
- "task:%u@%d xid=0x%08x error=%ld (%s)",
+ "task:%u@%d xid=0x%08x %sv%d %s error=%ld (%s)",
__entry->task_id, __entry->client_id, __entry->xid,
- -__entry->error, nfs_show_status(__entry->error)
+ __get_str(program), __entry->version,
+ __get_str(procedure), -__entry->error,
+ nfs_show_status(__entry->error)
)
);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 6ca421cbe19c..6ea4cac41e46 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -24,6 +24,7 @@
#include "internal.h"
#include "pnfs.h"
+#include "nfstrace.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
@@ -64,6 +65,7 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
{
unsigned int new = pos - hdr->io_start;
+ trace_nfs_pgio_error(hdr, error, pos);
if (hdr->good_bytes > new) {
hdr->good_bytes = new;
clear_bit(NFS_IOHDR_EOF, &hdr->flags);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 13b22e898116..eb854f1f86e2 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -264,6 +264,8 @@ static void nfs_readpage_retry(struct rpc_task *task,
/* This is a short read! */
nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
+ trace_nfs_readpage_short(task, hdr);
+
/* Has the server at least made some progress? */
if (resp->count == 0) {
nfs_set_pgio_error(hdr, -EIO, argp->offset);
diff --git a/fs/nfs/sysfs.h b/fs/nfs/sysfs.h
index f1b27411dcc0..ebcbdc40483b 100644
--- a/fs/nfs/sysfs.h
+++ b/fs/nfs/sysfs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2019 Hammerspace Inc
*/
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
index 10ec5ecdf117..65c331f75e9c 100644
--- a/fs/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -78,6 +78,8 @@ enum {
/* Checksum this amount of the request */
#define RC_CSUMLEN (256U)
+int nfsd_drc_slab_create(void);
+void nfsd_drc_slab_free(void);
int nfsd_reply_cache_init(struct nfsd_net *);
void nfsd_reply_cache_shutdown(struct nfsd_net *);
int nfsd_cache_lookup(struct svc_rqst *);
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 09aa545825bd..9217cb64bf0e 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -139,7 +139,6 @@ struct nfsd_net {
* Duplicate reply cache
*/
struct nfsd_drc_bucket *drc_hashtbl;
- struct kmem_cache *drc_slab;
/* max number of entries allowed in the cache */
unsigned int max_drc_entries;
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 5cf91322de0f..7fbe9840a03e 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -38,6 +38,7 @@
#include "nfsd.h"
#include "state.h"
#include "netns.h"
+#include "trace.h"
#include "xdr4cb.h"
#include "xdr4.h"
@@ -904,16 +905,20 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
if (clp->cl_minorversion == 0) {
if (!clp->cl_cred.cr_principal &&
- (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
+ (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) {
+ trace_nfsd_cb_setup_err(clp, -EINVAL);
return -EINVAL;
+ }
args.client_name = clp->cl_cred.cr_principal;
args.prognumber = conn->cb_prog;
args.protocol = XPRT_TRANSPORT_TCP;
args.authflavor = clp->cl_cred.cr_flavor;
clp->cl_cb_ident = conn->cb_ident;
} else {
- if (!conn->cb_xprt)
+ if (!conn->cb_xprt) {
+ trace_nfsd_cb_setup_err(clp, -EINVAL);
return -EINVAL;
+ }
clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt;
@@ -925,32 +930,27 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
/* Create RPC client */
client = rpc_create(&args);
if (IS_ERR(client)) {
- dprintk("NFSD: couldn't create callback client: %ld\n",
- PTR_ERR(client));
+ trace_nfsd_cb_setup_err(clp, PTR_ERR(client));
return PTR_ERR(client);
}
cred = get_backchannel_cred(clp, client, ses);
if (!cred) {
+ trace_nfsd_cb_setup_err(clp, -ENOMEM);
rpc_shutdown_client(client);
return -ENOMEM;
}
clp->cl_cb_client = client;
clp->cl_cb_cred = cred;
+ trace_nfsd_cb_setup(clp);
return 0;
}
-static void warn_no_callback_path(struct nfs4_client *clp, int reason)
-{
- dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
- (int)clp->cl_name.len, clp->cl_name.data, reason);
-}
-
static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
{
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return;
clp->cl_cb_state = NFSD4_CB_DOWN;
- warn_no_callback_path(clp, reason);
+ trace_nfsd_cb_state(clp);
}
static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
@@ -958,17 +958,20 @@ static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return;
clp->cl_cb_state = NFSD4_CB_FAULT;
- warn_no_callback_path(clp, reason);
+ trace_nfsd_cb_state(clp);
}
static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
{
struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
+ trace_nfsd_cb_done(clp, task->tk_status);
if (task->tk_status)
nfsd4_mark_cb_down(clp, task->tk_status);
- else
+ else {
clp->cl_cb_state = NFSD4_CB_UP;
+ trace_nfsd_cb_state(clp);
+ }
}
static void nfsd4_cb_probe_release(void *calldata)
@@ -993,6 +996,7 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = {
void nfsd4_probe_callback(struct nfs4_client *clp)
{
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+ trace_nfsd_cb_state(clp);
set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
nfsd4_run_cb(&clp->cl_cb_null);
}
@@ -1009,6 +1013,7 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
spin_lock(&clp->cl_lock);
memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
spin_unlock(&clp->cl_lock);
+ trace_nfsd_cb_state(clp);
}
/*
@@ -1165,8 +1170,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
struct nfsd4_callback *cb = calldata;
struct nfs4_client *clp = cb->cb_clp;
- dprintk("%s: minorversion=%d\n", __func__,
- clp->cl_minorversion);
+ trace_nfsd_cb_done(clp, task->tk_status);
if (!nfsd4_cb_sequence_done(task, cb))
return;
@@ -1271,6 +1275,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
* kill the old client:
*/
if (clp->cl_cb_client) {
+ trace_nfsd_cb_shutdown(clp);
rpc_shutdown_client(clp->cl_cb_client);
clp->cl_cb_client = NULL;
put_cred(clp->cl_cb_cred);
@@ -1301,6 +1306,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
err = setup_callback_client(clp, &conn, ses);
if (err) {
nfsd4_mark_cb_down(clp, err);
+ if (c)
+ svc_xprt_put(c->cn_xprt);
return;
}
}
@@ -1314,6 +1321,8 @@ nfsd4_run_cb_work(struct work_struct *work)
struct rpc_clnt *clnt;
int flags;
+ trace_nfsd_cb_work(clp, cb->cb_msg.rpc_proc->p_name);
+
if (cb->cb_need_restart) {
cb->cb_need_restart = false;
} else {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 0e75f7fb5fec..a09c35f0f6f0 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1155,7 +1155,7 @@ extern void nfs_sb_deactive(struct super_block *sb);
#define NFSD42_INTERSSC_MOUNTOPS "vers=4.2,addr=%s,sec=sys"
-/**
+/*
* Support one copy source server for now.
*/
static __be32
@@ -1245,10 +1245,9 @@ nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
mntput(ss_mnt);
}
-/**
- * nfsd4_setup_inter_ssc
- *
+/*
* Verify COPY destination stateid.
+ *
* Connect to the source server with NFSv4.1.
* Create the source struct file for nfsd_copy_range.
* Called with COPY cstate:
@@ -2302,6 +2301,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
}
check_if_stalefh_allowed(args);
+ rqstp->rq_lease_breaker = (void **)&cstate->clp;
+
trace_nfsd_compound(rqstp, args->opcnt);
while (!status && resp->opcnt < args->opcnt) {
op = &args->ops[resp->opcnt++];
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c107caa56525..bb3d2c32664a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -51,6 +51,7 @@
#include "netns.h"
#include "pnfs.h"
#include "filecache.h"
+#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -167,9 +168,6 @@ renew_client_locked(struct nfs4_client *clp)
return;
}
- dprintk("renewing client (clientid %08x/%08x)\n",
- clp->cl_clientid.cl_boot,
- clp->cl_clientid.cl_id);
list_move_tail(&clp->cl_lru, &nn->client_lru);
clp->cl_time = ktime_get_boottime_seconds();
}
@@ -1922,8 +1920,7 @@ STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
*/
if (clid->cl_boot == (u32)nn->boot_time)
return 0;
- dprintk("NFSD stale clientid (%08x/%08x) boot_time %08llx\n",
- clid->cl_boot, clid->cl_id, nn->boot_time);
+ trace_nfsd_clid_stale(clid);
return 1;
}
@@ -2406,6 +2403,11 @@ static void states_stop(struct seq_file *s, void *v)
spin_unlock(&clp->cl_lock);
}
+static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
+{
+ seq_printf(s, "filename: \"%pD2\"", f->nf_file);
+}
+
static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
{
struct inode *inode = f->nf_inode;
@@ -2422,6 +2424,12 @@ static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
}
+static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
+{
+ seq_printf(s, "0x%.8x", stid->si_generation);
+ seq_printf(s, "%12phN", &stid->si_opaque);
+}
+
static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_ol_stateid *ols;
@@ -2437,7 +2445,9 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
nf = st->sc_file;
file = find_any_file(nf);
- seq_printf(s, "- 0x%16phN: { type: open, ", &st->sc_stateid);
+ seq_printf(s, "- ");
+ nfs4_show_stateid(s, &st->sc_stateid);
+ seq_printf(s, ": { type: open, ");
access = bmap_to_share_mode(ols->st_access_bmap);
deny = bmap_to_share_mode(ols->st_deny_bmap);
@@ -2451,6 +2461,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
nfs4_show_superblock(s, file);
seq_printf(s, ", ");
+ nfs4_show_fname(s, file);
+ seq_printf(s, ", ");
nfs4_show_owner(s, oo);
seq_printf(s, " }\n");
nfsd_file_put(file);
@@ -2470,7 +2482,9 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
nf = st->sc_file;
file = find_any_file(nf);
- seq_printf(s, "- 0x%16phN: { type: lock, ", &st->sc_stateid);
+ seq_printf(s, "- ");
+ nfs4_show_stateid(s, &st->sc_stateid);
+ seq_printf(s, ": { type: lock, ");
/*
* Note: a lock stateid isn't really the same thing as a lock,
@@ -2482,6 +2496,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
nfs4_show_superblock(s, file);
/* XXX: open stateid? */
seq_printf(s, ", ");
+ nfs4_show_fname(s, file);
+ seq_printf(s, ", ");
nfs4_show_owner(s, oo);
seq_printf(s, " }\n");
nfsd_file_put(file);
@@ -2499,7 +2515,9 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
nf = st->sc_file;
file = nf->fi_deleg_file;
- seq_printf(s, "- 0x%16phN: { type: deleg, ", &st->sc_stateid);
+ seq_printf(s, "- ");
+ nfs4_show_stateid(s, &st->sc_stateid);
+ seq_printf(s, ": { type: deleg, ");
/* Kinda dead code as long as we only support read delegs: */
seq_printf(s, "access: %s, ",
@@ -2508,6 +2526,8 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
/* XXX: lease time, whether it's being recalled. */
nfs4_show_superblock(s, file);
+ seq_printf(s, ", ");
+ nfs4_show_fname(s, file);
seq_printf(s, " }\n");
return 0;
@@ -2521,11 +2541,15 @@ static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
file = ls->ls_file;
- seq_printf(s, "- 0x%16phN: { type: layout, ", &st->sc_stateid);
+ seq_printf(s, "- ");
+ nfs4_show_stateid(s, &st->sc_stateid);
+ seq_printf(s, ": { type: layout, ");
/* XXX: What else would be useful? */
nfs4_show_superblock(s, file);
+ seq_printf(s, ", ");
+ nfs4_show_fname(s, file);
seq_printf(s, " }\n");
return 0;
@@ -2845,14 +2869,12 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_r
conn->cb_prog = se->se_callback_prog;
conn->cb_ident = se->se_callback_ident;
memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
+ trace_nfsd_cb_args(clp, conn);
return;
out_err:
conn->cb_addr.ss_family = AF_UNSPEC;
conn->cb_addrlen = 0;
- dprintk("NFSD: this client (clientid %08x/%08x) "
- "will not receive delegations\n",
- clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
-
+ trace_nfsd_cb_nodelegs(clp);
return;
}
@@ -3458,6 +3480,45 @@ __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
return nfs_ok;
}
+static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
+{
+ struct nfsd4_conn *c;
+
+ list_for_each_entry(c, &s->se_conns, cn_persession) {
+ if (c->cn_xprt == xpt) {
+ return c;
+ }
+ }
+ return NULL;
+}
+
+static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
+ struct nfsd4_session *session, u32 req)
+{
+ struct nfs4_client *clp = session->se_client;
+ struct svc_xprt *xpt = rqst->rq_xprt;
+ struct nfsd4_conn *c;
+ __be32 status;
+
+ /* Following the last paragraph of RFC 5661 Section 18.34.3: */
+ spin_lock(&clp->cl_lock);
+ c = __nfsd4_find_conn(xpt, session);
+ if (!c)
+ status = nfserr_noent;
+ else if (req == c->cn_flags)
+ status = nfs_ok;
+ else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
+ c->cn_flags != NFS4_CDFC4_BACK)
+ status = nfs_ok;
+ else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
+ c->cn_flags != NFS4_CDFC4_FORE)
+ status = nfs_ok;
+ else
+ status = nfserr_inval;
+ spin_unlock(&clp->cl_lock);
+ return status;
+}
+
__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -3479,6 +3540,9 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(session->se_client, rqstp))
goto out;
+ status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
+ if (status == nfs_ok || status == nfserr_inval)
+ goto out;
status = nfsd4_map_bcts_dir(&bcts->dir);
if (status)
goto out;
@@ -3544,18 +3608,6 @@ out:
return status;
}
-static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
-{
- struct nfsd4_conn *c;
-
- list_for_each_entry(c, &s->se_conns, cn_persession) {
- if (c->cn_xprt == xpt) {
- return c;
- }
- }
- return NULL;
-}
-
static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
@@ -3879,23 +3931,18 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (clp_used_exchangeid(conf))
goto out;
if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
- char addr_str[INET6_ADDRSTRLEN];
- rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
- sizeof(addr_str));
- dprintk("NFSD: setclientid: string in use by client "
- "at %s\n", addr_str);
+ trace_nfsd_clid_inuse_err(conf);
goto out;
}
}
unconf = find_unconfirmed_client_by_name(&clname, nn);
if (unconf)
unhash_client_locked(unconf);
+ /* We need to handle only case 1: probable callback update */
if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
- /* case 1: probable callback update */
copy_clid(new, conf);
gen_confirm(new, nn);
- } else /* case 4 (new client) or cases 2, 3 (client reboot): */
- ;
+ }
new->cl_minorversion = 0;
gen_callback(new, setclid, rqstp);
add_to_unconfirmed(new);
@@ -4076,7 +4123,6 @@ out_free_openowner_slab:
out_free_client_slab:
kmem_cache_destroy(client_slab);
out:
- dprintk("nfsd4: out of memory while initializing nfsv4\n");
return -ENOMEM;
}
@@ -4508,6 +4554,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
struct nfs4_file *fp = dp->dl_stid.sc_file;
+ trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid);
+
/*
* We don't want the locks code to timeout the lease for us;
* we'll remove it ourself if a delegation isn't returned
@@ -4522,6 +4570,19 @@ nfsd_break_deleg_cb(struct file_lock *fl)
return ret;
}
+static bool nfsd_breaker_owns_lease(struct file_lock *fl)
+{
+ struct nfs4_delegation *dl = fl->fl_owner;
+ struct svc_rqst *rqst;
+ struct nfs4_client *clp;
+
+ if (!i_am_nfsd())
+ return NULL;
+ rqst = kthread_data(current);
+ clp = *(rqst->rq_lease_breaker);
+ return dl->dl_stid.sc_client == clp;
+}
+
static int
nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
struct list_head *dispose)
@@ -4533,6 +4594,7 @@ nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
}
static const struct lock_manager_operations nfsd_lease_mng_ops = {
+ .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
.lm_break = nfsd_break_deleg_cb,
.lm_change = nfsd_change_deleg_cb,
};
@@ -5018,8 +5080,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
- dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
- STATEID_VAL(&dp->dl_stid.sc_stateid));
+ trace_nfsd_deleg_open(&dp->dl_stid.sc_stateid);
open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
nfs4_put_stid(&dp->dl_stid);
return;
@@ -5136,9 +5197,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
nfs4_open_delegation(current_fh, open, stp);
nodeleg:
status = nfs_ok;
-
- dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
- STATEID_VAL(&stp->st_stid.sc_stateid));
+ trace_nfsd_deleg_none(&stp->st_stid.sc_stateid);
out:
/* 4.1 client trying to upgrade/downgrade delegation? */
if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
@@ -5192,8 +5251,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- dprintk("process_renew(%08x/%08x): starting\n",
- clid->cl_boot, clid->cl_id);
+ trace_nfsd_clid_renew(clid);
status = lookup_clientid(clid, cstate, nn, false);
if (status)
goto out;
@@ -5214,6 +5272,7 @@ nfsd4_end_grace(struct nfsd_net *nn)
if (nn->grace_ended)
return;
+ trace_nfsd_grace_complete(nn);
nn->grace_ended = true;
/*
* If the server goes down again right now, an NFSv4
@@ -5279,13 +5338,10 @@ nfs4_laundromat(struct nfsd_net *nn)
copy_stateid_t *cps_t;
int i;
- dprintk("NFSD: laundromat service - starting\n");
-
if (clients_still_reclaiming(nn)) {
new_timeo = 0;
goto out;
}
- dprintk("NFSD: end of grace period\n");
nfsd4_end_grace(nn);
INIT_LIST_HEAD(&reaplist);
@@ -5307,8 +5363,7 @@ nfs4_laundromat(struct nfsd_net *nn)
break;
}
if (mark_client_expired_locked(clp)) {
- dprintk("NFSD: client in use (clientid %08x)\n",
- clp->cl_clientid.cl_id);
+ trace_nfsd_clid_expired(&clp->cl_clientid);
continue;
}
list_add(&clp->cl_lru, &reaplist);
@@ -5316,8 +5371,7 @@ nfs4_laundromat(struct nfsd_net *nn)
spin_unlock(&nn->client_lock);
list_for_each_safe(pos, next, &reaplist) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
- dprintk("NFSD: purging unused client (clientid %08x)\n",
- clp->cl_clientid.cl_id);
+ trace_nfsd_clid_purged(&clp->cl_clientid);
list_del_init(&clp->cl_lru);
expire_client(clp);
}
@@ -5407,7 +5461,6 @@ laundromat_main(struct work_struct *laundry)
laundromat_work);
t = nfs4_laundromat(nn);
- dprintk("NFSD: laundromat_main - sleeping for %lld seconds\n", t);
queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
}
@@ -5948,8 +6001,7 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
struct nfs4_stid *s;
struct nfs4_ol_stateid *stp = NULL;
- dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
- seqid, STATEID_VAL(stateid));
+ trace_nfsd_preprocess(seqid, stateid);
*stpp = NULL;
status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
@@ -6018,9 +6070,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
oo->oo_flags |= NFS4_OO_CONFIRMED;
nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
- dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
- __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
-
+ trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
nfsd4_client_record_create(oo->oo_owner.so_client);
status = nfs_ok;
put_stateid:
@@ -7072,7 +7122,7 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
unsigned int strhashval;
struct nfs4_client_reclaim *crp;
- dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", name.len, name.data);
+ trace_nfsd_clid_reclaim(nn, name.len, name.data);
crp = alloc_reclaim();
if (crp) {
strhashval = clientstr_hashval(name);
@@ -7122,7 +7172,7 @@ nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
unsigned int strhashval;
struct nfs4_client_reclaim *crp = NULL;
- dprintk("NFSD: nfs4_find_reclaim_client for name %.*s\n", name.len, name.data);
+ trace_nfsd_clid_find(nn, name.len, name.data);
strhashval = clientstr_hashval(name);
list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
@@ -7686,6 +7736,9 @@ nfsd_recall_delegations(struct list_head *reaplist)
list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
list_del_init(&dp->dl_recall_lru);
clp = dp->dl_stid.sc_client;
+
+ trace_nfsd_deleg_recall(&dp->dl_stid.sc_stateid);
+
/*
* We skipped all entries that had a zero dl_time before,
* so we can now reset the dl_time back to 0. If a delegation
@@ -7868,6 +7921,7 @@ nfs4_state_start_net(struct net *net)
goto skip_grace;
printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
nn->nfsd4_grace, net->ns.inum);
+ trace_nfsd_grace_start(nn);
queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
return 0;
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 96352ab7bd81..0a0cf1fd77d3 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -20,8 +20,7 @@
#include "nfsd.h"
#include "cache.h"
-
-#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
+#include "trace.h"
/*
* We use this value to determine the number of hash buckets from the max
@@ -36,6 +35,8 @@ struct nfsd_drc_bucket {
spinlock_t cache_lock;
};
+static struct kmem_cache *drc_slab;
+
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
struct shrink_control *sc);
@@ -95,7 +96,7 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
{
struct svc_cacherep *rp;
- rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL);
+ rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
if (rp) {
rp->c_state = RC_UNUSED;
rp->c_type = RC_NOCACHE;
@@ -129,7 +130,7 @@ nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
atomic_dec(&nn->num_drc_entries);
nn->drc_mem_usage -= sizeof(*rp);
}
- kmem_cache_free(nn->drc_slab, rp);
+ kmem_cache_free(drc_slab, rp);
}
static void
@@ -141,6 +142,18 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
spin_unlock(&b->cache_lock);
}
+int nfsd_drc_slab_create(void)
+{
+ drc_slab = kmem_cache_create("nfsd_drc",
+ sizeof(struct svc_cacherep), 0, 0, NULL);
+ return drc_slab ? 0: -ENOMEM;
+}
+
+void nfsd_drc_slab_free(void)
+{
+ kmem_cache_destroy(drc_slab);
+}
+
int nfsd_reply_cache_init(struct nfsd_net *nn)
{
unsigned int hashsize;
@@ -159,18 +172,13 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
if (status)
goto out_nomem;
- nn->drc_slab = kmem_cache_create("nfsd_drc",
- sizeof(struct svc_cacherep), 0, 0, NULL);
- if (!nn->drc_slab)
- goto out_shrinker;
-
nn->drc_hashtbl = kcalloc(hashsize,
sizeof(*nn->drc_hashtbl), GFP_KERNEL);
if (!nn->drc_hashtbl) {
nn->drc_hashtbl = vzalloc(array_size(hashsize,
sizeof(*nn->drc_hashtbl)));
if (!nn->drc_hashtbl)
- goto out_slab;
+ goto out_shrinker;
}
for (i = 0; i < hashsize; i++) {
@@ -180,8 +188,6 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
nn->drc_hashsize = hashsize;
return 0;
-out_slab:
- kmem_cache_destroy(nn->drc_slab);
out_shrinker:
unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
out_nomem:
@@ -209,8 +215,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
nn->drc_hashtbl = NULL;
nn->drc_hashsize = 0;
- kmem_cache_destroy(nn->drc_slab);
- nn->drc_slab = NULL;
}
/*
@@ -323,8 +327,10 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key,
const struct svc_cacherep *rp, struct nfsd_net *nn)
{
if (key->c_key.k_xid == rp->c_key.k_xid &&
- key->c_key.k_csum != rp->c_key.k_csum)
+ key->c_key.k_csum != rp->c_key.k_csum) {
++nn->payload_misses;
+ trace_nfsd_drc_mismatch(nn, key, rp);
+ }
return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
}
@@ -377,15 +383,22 @@ out:
return ret;
}
-/*
+/**
+ * nfsd_cache_lookup - Find an entry in the duplicate reply cache
+ * @rqstp: Incoming Call to find
+ *
* Try to find an entry matching the current call in the cache. When none
* is found, we try to grab the oldest expired entry off the LRU list. If
* a suitable one isn't there, then drop the cache_lock and allocate a
* new one, then search again in case one got inserted while this thread
* didn't hold the lock.
+ *
+ * Return values:
+ * %RC_DOIT: Process the request normally
+ * %RC_REPLY: Reply from cache
+ * %RC_DROPIT: Do not process the request further
*/
-int
-nfsd_cache_lookup(struct svc_rqst *rqstp)
+int nfsd_cache_lookup(struct svc_rqst *rqstp)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct svc_cacherep *rp, *found;
@@ -399,7 +412,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
rqstp->rq_cacherep = NULL;
if (type == RC_NOCACHE) {
nfsdstats.rcnocache++;
- return rtn;
+ goto out;
}
csum = nfsd_cache_csum(rqstp);
@@ -409,10 +422,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
* preallocate an entry.
*/
rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
- if (!rp) {
- dprintk("nfsd: unable to allocate DRC entry!\n");
- return rtn;
- }
+ if (!rp)
+ goto out;
spin_lock(&b->cache_lock);
found = nfsd_cache_insert(b, rp, nn);
@@ -431,8 +442,10 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
/* go ahead and prune the cache */
prune_bucket(b, nn);
- out:
+
+out_unlock:
spin_unlock(&b->cache_lock);
+out:
return rtn;
found_entry:
@@ -442,13 +455,13 @@ found_entry:
/* Request being processed */
if (rp->c_state == RC_INPROG)
- goto out;
+ goto out_trace;
/* From the hall of fame of impractical attacks:
* Is this a user who tries to snoop on the cache? */
rtn = RC_DOIT;
if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
- goto out;
+ goto out_trace;
/* Compose RPC reply header */
switch (rp->c_type) {
@@ -460,21 +473,26 @@ found_entry:
break;
case RC_REPLBUFF:
if (!nfsd_cache_append(rqstp, &rp->c_replvec))
- goto out; /* should not happen */
+ goto out_unlock; /* should not happen */
rtn = RC_REPLY;
break;
default:
- printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
- nfsd_reply_cache_free_locked(b, rp, nn);
+ WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
}
- goto out;
+out_trace:
+ trace_nfsd_drc_found(nn, rqstp, rtn);
+ goto out_unlock;
}
-/*
- * Update a cache entry. This is called from nfsd_dispatch when
- * the procedure has been executed and the complete reply is in
- * rqstp->rq_res.
+/**
+ * nfsd_cache_update - Update an entry in the duplicate reply cache.
+ * @rqstp: svc_rqst with a finished Reply
+ * @cachetype: which cache to update
+ * @statp: Reply's status code
+ *
+ * This is called from nfsd_dispatch when the procedure has been
+ * executed and the complete reply is in rqstp->rq_res.
*
* We're copying around data here rather than swapping buffers because
* the toplevel loop requires max-sized buffers, which would be a waste
@@ -487,8 +505,7 @@ found_entry:
* nfsd failed to encode a reply that otherwise would have been cached.
* In this case, nfsd_cache_update is called with statp == NULL.
*/
-void
-nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct svc_cacherep *rp = rqstp->rq_cacherep;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 3bb2db947d29..b68e96681522 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -238,7 +238,7 @@ static inline struct net *netns(struct file *file)
return file_inode(file)->i_sb->s_fs_info;
}
-/**
+/*
* write_unlock_ip - Release all locks used by a client
*
* Experimental.
@@ -277,7 +277,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
return nlmsvc_unlock_all_by_ip(sap);
}
-/**
+/*
* write_unlock_fs - Release all locks on a local file system
*
* Experimental.
@@ -327,7 +327,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size)
return error;
}
-/**
+/*
* write_filehandle - Get a variable-length NFS file handle by path
*
* On input, the buffer contains a '\n'-terminated C string comprised of
@@ -402,7 +402,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
return mesg - buf;
}
-/**
+/*
* write_threads - Start NFSD, or report the current number of running threads
*
* Input:
@@ -452,7 +452,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", rv);
}
-/**
+/*
* write_pool_threads - Set or report the current number of threads per pool
*
* Input:
@@ -661,7 +661,7 @@ out:
return tlen + len;
}
-/**
+/*
* write_versions - Set or report the available NFS protocol versions
*
* Input:
@@ -811,7 +811,7 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size,
return -EINVAL;
}
-/**
+/*
* write_ports - Pass a socket file descriptor or transport name to listen on
*
* Input:
@@ -867,7 +867,7 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size)
int nfsd_max_blksize;
-/**
+/*
* write_maxblksize - Set or report the current NFS blksize
*
* Input:
@@ -917,7 +917,7 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
nfsd_max_blksize);
}
-/**
+/*
* write_maxconn - Set or report the current max number of connections
*
* Input:
@@ -998,7 +998,7 @@ static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size,
return rv;
}
-/**
+/*
* write_leasetime - Set or report the current NFSv4 lease time
*
* Input:
@@ -1025,7 +1025,7 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
return nfsd4_write_time(file, buf, size, &nn->nfsd4_lease, nn);
}
-/**
+/*
* write_gracetime - Set or report current NFSv4 grace period time
*
* As above, but sets the time of the NFSv4 grace period.
@@ -1069,7 +1069,7 @@ static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size,
nfs4_recoverydir());
}
-/**
+/*
* write_recoverydir - Set or report the pathname of the recovery directory
*
* Input:
@@ -1101,7 +1101,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
return rv;
}
-/**
+/*
* write_v4_end_grace - release grace period for nfsd's v4.x lock manager
*
* Input:
@@ -1533,6 +1533,9 @@ static int __init init_nfsd(void)
goto out_free_slabs;
nfsd_fault_inject_init(); /* nfsd fault injection controls */
nfsd_stat_init(); /* Statistics */
+ retval = nfsd_drc_slab_create();
+ if (retval)
+ goto out_free_stat;
nfsd_lockd_init(); /* lockd->nfsd callbacks */
retval = create_proc_exports_entry();
if (retval)
@@ -1546,6 +1549,8 @@ out_free_all:
remove_proc_entry("fs/nfs", NULL);
out_free_lockd:
nfsd_lockd_shutdown();
+ nfsd_drc_slab_free();
+out_free_stat:
nfsd_stat_shutdown();
nfsd_fault_inject_cleanup();
nfsd4_exit_pnfs();
@@ -1560,6 +1565,7 @@ out_unregister_pernet:
static void __exit exit_nfsd(void)
{
+ nfsd_drc_slab_free();
remove_proc_entry("fs/nfs/exports", NULL);
remove_proc_entry("fs/nfs", NULL);
nfsd_stat_shutdown();
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 2ab5569126b8..36cdd81b6688 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -88,6 +88,8 @@ int nfsd_pool_stats_release(struct inode *, struct file *);
void nfsd_destroy(struct net *net);
+bool i_am_nfsd(void);
+
struct nfsdfs_client {
struct kref cl_ref;
void (*cl_release)(struct kref *kref);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index ca9fd348548b..b603dfcdd361 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -601,6 +601,11 @@ static const struct svc_serv_ops nfsd_thread_sv_ops = {
.svo_module = THIS_MODULE,
};
+bool i_am_nfsd(void)
+{
+ return kthread_func(current) == nfsd;
+}
+
int nfsd_create_serv(struct net *net)
{
int error;
@@ -1011,6 +1016,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
*statp = rpc_garbage_args;
return 1;
}
+ rqstp->rq_lease_breaker = NULL;
/*
* Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case)
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 68d3f30ee760..3b408532a5dc 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -64,13 +64,6 @@ typedef struct {
refcount_t sc_count;
} copy_stateid_t;
-#define STATEID_FMT "(%08x/%08x/%08x/%08x)"
-#define STATEID_VAL(s) \
- (s)->si_opaque.so_clid.cl_boot, \
- (s)->si_opaque.so_clid.cl_id, \
- (s)->si_opaque.so_id, \
- (s)->si_generation
-
struct nfsd4_callback {
struct nfs4_client *cb_clp;
struct rpc_message cb_msg;
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 78c574251c60..1861db1bdc67 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -277,6 +277,7 @@ DECLARE_EVENT_CLASS(nfsd_stateid_class,
DEFINE_EVENT(nfsd_stateid_class, nfsd_##name, \
TP_PROTO(stateid_t *stp), \
TP_ARGS(stp))
+
DEFINE_STATEID_EVENT(layoutstate_alloc);
DEFINE_STATEID_EVENT(layoutstate_unhash);
DEFINE_STATEID_EVENT(layoutstate_free);
@@ -288,6 +289,138 @@ DEFINE_STATEID_EVENT(layout_recall_done);
DEFINE_STATEID_EVENT(layout_recall_fail);
DEFINE_STATEID_EVENT(layout_recall_release);
+DEFINE_STATEID_EVENT(deleg_open);
+DEFINE_STATEID_EVENT(deleg_none);
+DEFINE_STATEID_EVENT(deleg_break);
+DEFINE_STATEID_EVENT(deleg_recall);
+
+DECLARE_EVENT_CLASS(nfsd_stateseqid_class,
+ TP_PROTO(u32 seqid, const stateid_t *stp),
+ TP_ARGS(seqid, stp),
+ TP_STRUCT__entry(
+ __field(u32, seqid)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, si_id)
+ __field(u32, si_generation)
+ ),
+ TP_fast_assign(
+ __entry->seqid = seqid;
+ __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
+ __entry->cl_id = stp->si_opaque.so_clid.cl_id;
+ __entry->si_id = stp->si_opaque.so_id;
+ __entry->si_generation = stp->si_generation;
+ ),
+ TP_printk("seqid=%u client %08x:%08x stateid %08x:%08x",
+ __entry->seqid, __entry->cl_boot, __entry->cl_id,
+ __entry->si_id, __entry->si_generation)
+)
+
+#define DEFINE_STATESEQID_EVENT(name) \
+DEFINE_EVENT(nfsd_stateseqid_class, nfsd_##name, \
+ TP_PROTO(u32 seqid, const stateid_t *stp), \
+ TP_ARGS(seqid, stp))
+
+DEFINE_STATESEQID_EVENT(preprocess);
+DEFINE_STATESEQID_EVENT(open_confirm);
+
+DECLARE_EVENT_CLASS(nfsd_clientid_class,
+ TP_PROTO(const clientid_t *clid),
+ TP_ARGS(clid),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clid->cl_boot;
+ __entry->cl_id = clid->cl_id;
+ ),
+ TP_printk("client %08x:%08x", __entry->cl_boot, __entry->cl_id)
+)
+
+#define DEFINE_CLIENTID_EVENT(name) \
+DEFINE_EVENT(nfsd_clientid_class, nfsd_clid_##name, \
+ TP_PROTO(const clientid_t *clid), \
+ TP_ARGS(clid))
+
+DEFINE_CLIENTID_EVENT(expired);
+DEFINE_CLIENTID_EVENT(purged);
+DEFINE_CLIENTID_EVENT(renew);
+DEFINE_CLIENTID_EVENT(stale);
+
+DECLARE_EVENT_CLASS(nfsd_net_class,
+ TP_PROTO(const struct nfsd_net *nn),
+ TP_ARGS(nn),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ ),
+ TP_printk("boot_time=%16llx", __entry->boot_time)
+)
+
+#define DEFINE_NET_EVENT(name) \
+DEFINE_EVENT(nfsd_net_class, nfsd_##name, \
+ TP_PROTO(const struct nfsd_net *nn), \
+ TP_ARGS(nn))
+
+DEFINE_NET_EVENT(grace_start);
+DEFINE_NET_EVENT(grace_complete);
+
+DECLARE_EVENT_CLASS(nfsd_clid_class,
+ TP_PROTO(const struct nfsd_net *nn,
+ unsigned int namelen,
+ const unsigned char *namedata),
+ TP_ARGS(nn, namelen, namedata),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ __field(unsigned int, namelen)
+ __dynamic_array(unsigned char, name, namelen)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ __entry->namelen = namelen;
+ memcpy(__get_dynamic_array(name), namedata, namelen);
+ ),
+ TP_printk("boot_time=%16llx nfs4_clientid=%.*s",
+ __entry->boot_time, __entry->namelen, __get_str(name))
+)
+
+#define DEFINE_CLID_EVENT(name) \
+DEFINE_EVENT(nfsd_clid_class, nfsd_clid_##name, \
+ TP_PROTO(const struct nfsd_net *nn, \
+ unsigned int namelen, \
+ const unsigned char *namedata), \
+ TP_ARGS(nn, namelen, namedata))
+
+DEFINE_CLID_EVENT(find);
+DEFINE_CLID_EVENT(reclaim);
+
+TRACE_EVENT(nfsd_clid_inuse_err,
+ TP_PROTO(const struct nfs4_client *clp),
+ TP_ARGS(clp),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __field(unsigned int, namelen)
+ __dynamic_array(unsigned char, name, clp->cl_name.len)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ memcpy(__entry->addr, &clp->cl_addr,
+ sizeof(struct sockaddr_in6));
+ __entry->namelen = clp->cl_name.len;
+ memcpy(__get_dynamic_array(name), clp->cl_name.data,
+ clp->cl_name.len);
+ ),
+ TP_printk("nfs4_clientid %.*s already in use by %pISpc, client %08x:%08x",
+ __entry->namelen, __get_str(name), __entry->addr,
+ __entry->cl_boot, __entry->cl_id)
+)
+
TRACE_DEFINE_ENUM(NFSD_FILE_HASHED);
TRACE_DEFINE_ENUM(NFSD_FILE_PENDING);
TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_READ);
@@ -432,6 +565,218 @@ TRACE_EVENT(nfsd_file_fsnotify_handle_event,
__entry->nlink, __entry->mode, __entry->mask)
);
+#include "cache.h"
+
+TRACE_DEFINE_ENUM(RC_DROPIT);
+TRACE_DEFINE_ENUM(RC_REPLY);
+TRACE_DEFINE_ENUM(RC_DOIT);
+
+#define show_drc_retval(x) \
+ __print_symbolic(x, \
+ { RC_DROPIT, "DROPIT" }, \
+ { RC_REPLY, "REPLY" }, \
+ { RC_DOIT, "DOIT" })
+
+TRACE_EVENT(nfsd_drc_found,
+ TP_PROTO(
+ const struct nfsd_net *nn,
+ const struct svc_rqst *rqstp,
+ int result
+ ),
+ TP_ARGS(nn, rqstp, result),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ __field(unsigned long, result)
+ __field(u32, xid)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ __entry->result = result;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ ),
+ TP_printk("boot_time=%16llx xid=0x%08x result=%s",
+ __entry->boot_time, __entry->xid,
+ show_drc_retval(__entry->result))
+
+);
+
+TRACE_EVENT(nfsd_drc_mismatch,
+ TP_PROTO(
+ const struct nfsd_net *nn,
+ const struct svc_cacherep *key,
+ const struct svc_cacherep *rp
+ ),
+ TP_ARGS(nn, key, rp),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ __field(u32, xid)
+ __field(u32, cached)
+ __field(u32, ingress)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ __entry->xid = be32_to_cpu(key->c_key.k_xid);
+ __entry->cached = (__force u32)key->c_key.k_csum;
+ __entry->ingress = (__force u32)rp->c_key.k_csum;
+ ),
+ TP_printk("boot_time=%16llx xid=0x%08x cached-csum=0x%08x ingress-csum=0x%08x",
+ __entry->boot_time, __entry->xid, __entry->cached,
+ __entry->ingress)
+);
+
+TRACE_EVENT(nfsd_cb_args,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const struct nfs4_cb_conn *conn
+ ),
+ TP_ARGS(clp, conn),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, prog)
+ __field(u32, ident)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __entry->prog = conn->cb_prog;
+ __entry->ident = conn->cb_ident;
+ memcpy(__entry->addr, &conn->cb_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("client %08x:%08x callback addr=%pISpc prog=%u ident=%u",
+ __entry->cl_boot, __entry->cl_id,
+ __entry->addr, __entry->prog, __entry->ident)
+);
+
+TRACE_EVENT(nfsd_cb_nodelegs,
+ TP_PROTO(const struct nfs4_client *clp),
+ TP_ARGS(clp),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ ),
+ TP_printk("client %08x:%08x", __entry->cl_boot, __entry->cl_id)
+)
+
+TRACE_DEFINE_ENUM(NFSD4_CB_UP);
+TRACE_DEFINE_ENUM(NFSD4_CB_UNKNOWN);
+TRACE_DEFINE_ENUM(NFSD4_CB_DOWN);
+TRACE_DEFINE_ENUM(NFSD4_CB_FAULT);
+
+#define show_cb_state(val) \
+ __print_symbolic(val, \
+ { NFSD4_CB_UP, "UP" }, \
+ { NFSD4_CB_UNKNOWN, "UNKNOWN" }, \
+ { NFSD4_CB_DOWN, "DOWN" }, \
+ { NFSD4_CB_FAULT, "FAULT"})
+
+DECLARE_EVENT_CLASS(nfsd_cb_class,
+ TP_PROTO(const struct nfs4_client *clp),
+ TP_ARGS(clp),
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->state = clp->cl_cb_state;
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x state=%s",
+ __entry->addr, __entry->cl_boot, __entry->cl_id,
+ show_cb_state(__entry->state))
+);
+
+#define DEFINE_NFSD_CB_EVENT(name) \
+DEFINE_EVENT(nfsd_cb_class, nfsd_cb_##name, \
+ TP_PROTO(const struct nfs4_client *clp), \
+ TP_ARGS(clp))
+
+DEFINE_NFSD_CB_EVENT(setup);
+DEFINE_NFSD_CB_EVENT(state);
+DEFINE_NFSD_CB_EVENT(shutdown);
+
+TRACE_EVENT(nfsd_cb_setup_err,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ long error
+ ),
+ TP_ARGS(clp, error),
+ TP_STRUCT__entry(
+ __field(long, error)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->error = error;
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x error=%ld",
+ __entry->addr, __entry->cl_boot, __entry->cl_id, __entry->error)
+);
+
+TRACE_EVENT(nfsd_cb_work,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const char *procedure
+ ),
+ TP_ARGS(clp, procedure),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __string(procedure, procedure)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __assign_str(procedure, procedure)
+ memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x procedure=%s",
+ __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __get_str(procedure))
+);
+
+TRACE_EVENT(nfsd_cb_done,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ int status
+ ),
+ TP_ARGS(clp, status),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(int, status)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __entry->status = status;
+ memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x status=%d",
+ __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __entry->status)
+);
+
#endif /* _NFSD_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index ceeb3b441844..28009ec54420 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/uio.h>
+#include <linux/fiemap.h>
#include "nilfs.h"
#include "btnode.h"
#include "segment.h"
@@ -996,7 +997,7 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
unsigned int blkbits = inode->i_blkbits;
int ret, n;
- ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+ ret = fiemap_prep(inode, fieinfo, start, &len, 0);
if (ret)
return ret;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 445eef41bfaf..91b58c897f92 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2780,6 +2780,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
if (!nilfs->ns_writer)
return -ENOMEM;
+ inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
+
err = nilfs_segctor_start_thread(nilfs->ns_writer);
if (err) {
kfree(nilfs->ns_writer);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index c18459cea6f4..85eda539b35f 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -70,7 +70,7 @@ static bool fanotify_name_event_equal(struct fanotify_name_event *fne1,
return !memcmp(fne1->name, fne2->name, fne1->name_len);
}
-static bool should_merge(struct fsnotify_event *old_fsn,
+static bool fanotify_should_merge(struct fsnotify_event *old_fsn,
struct fsnotify_event *new_fsn)
{
struct fanotify_event *old, *new;
@@ -129,7 +129,7 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
return 0;
list_for_each_entry_reverse(test_event, list, list) {
- if (should_merge(test_event, event)) {
+ if (fanotify_should_merge(test_event, event)) {
FANOTIFY_E(test_event)->mask |= new->mask;
return 1;
}
@@ -232,6 +232,10 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
if (!fsnotify_iter_should_report_type(iter_info, type))
continue;
mark = iter_info->marks[type];
+
+ /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */
+ marks_ignored_mask |= mark->ignored_mask;
+
/*
* If the event is on dir and this mark doesn't care about
* events on dir, don't send it!
@@ -249,7 +253,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
continue;
marks_mask |= mark->mask;
- marks_ignored_mask |= mark->ignored_mask;
}
test_mask = event_mask & marks_mask & ~marks_ignored_mask;
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index 35bfbf4a7aac..8ce7ccfc4b0d 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -89,7 +89,7 @@ struct fanotify_name_event {
__kernel_fsid_t fsid;
struct fanotify_fh dir_fh;
u8 name_len;
- char name[0];
+ char name[];
};
static inline struct fanotify_name_event *
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 42cb794c62ac..63b5dffdca9e 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -328,7 +328,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
ret = -EFAULT;
/*
* Sanity check copy size in case get_one_event() and
- * fill_event_metadata() event_len sizes ever get out of sync.
+ * event_len sizes ever get out of sync.
*/
if (WARN_ON_ONCE(metadata.event_len > count))
goto out_close_fd;
@@ -487,8 +487,10 @@ static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t
group = file->private_data;
- if (count > sizeof(response))
- count = sizeof(response);
+ if (count < sizeof(response))
+ return -EINVAL;
+
+ count = sizeof(response);
pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index ef83f4020554..f0d6b54be412 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -11,7 +11,6 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
#include <linux/exportfs.h>
#include "inotify/inotify.h"
diff --git a/fs/notify/group.c b/fs/notify/group.c
index 133f723aca07..a4a4b1c64d32 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -25,6 +25,7 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
group->ops->free_group_priv(group);
mem_cgroup_put(group->memcg);
+ mutex_destroy(&group->mark_mutex);
kfree(group);
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 81ffc8629fc4..f88bbcc9efeb 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -764,20 +764,18 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
struct fsnotify_group *group;
struct inotify_inode_mark *i_mark;
struct fd f;
- int ret = 0;
+ int ret = -EINVAL;
f = fdget(fd);
if (unlikely(!f.file))
return -EBADF;
/* verify that this is indeed an inotify instance */
- ret = -EINVAL;
if (unlikely(f.file->f_op != &inotify_fops))
goto out;
group = f.file->private_data;
- ret = -EINVAL;
i_mark = inotify_idr_find(group, wd);
if (unlikely(!i_mark))
goto out;
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 1d96216dffd1..8387937b9d01 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -325,13 +325,16 @@ static void fsnotify_put_mark_wake(struct fsnotify_mark *mark)
}
bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
+ __releases(&fsnotify_mark_srcu)
{
int type;
fsnotify_foreach_obj_type(type) {
/* This can fail if mark is being removed */
- if (!fsnotify_get_mark_safe(iter_info->marks[type]))
+ if (!fsnotify_get_mark_safe(iter_info->marks[type])) {
+ __release(&fsnotify_mark_srcu);
goto fail;
+ }
}
/*
@@ -350,6 +353,7 @@ fail:
}
void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
+ __acquires(&fsnotify_mark_srcu)
{
int type;
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig
index 1177c33df895..aca16624b370 100644
--- a/fs/ocfs2/Kconfig
+++ b/fs/ocfs2/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config OCFS2_FS
tristate "OCFS2 file system support"
- depends on NET && SYSFS && CONFIGFS_FS
+ depends on INET && SYSFS && CONFIGFS_FS
select JBD2
select CRC32
select QUOTA
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index e3e2d1b2af51..a94852af5510 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -733,8 +733,6 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
return 0;
}
-#define OCFS2_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
-
int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 map_start, u64 map_len)
{
@@ -746,7 +744,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
struct buffer_head *di_bh = NULL;
struct ocfs2_extent_rec rec;
- ret = fiemap_check_flags(fieinfo, OCFS2_FIEMAP_FLAGS);
+ ret = fiemap_prep(inode, fieinfo, map_start, &map_len, 0);
if (ret)
return ret;
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 3a44e461828a..25cabbfe87fc 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -62,7 +62,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
last_index = (size - 1) >> PAGE_SHIFT;
/*
- * There are cases that lead to the page no longer bebongs to the
+ * There are cases that lead to the page no longer belonging to the
* mapping.
* 1) pagecache truncates locally due to memory pressure.
* 2) pagecache truncates when another is taking EX lock against
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index 2bb916d68576..538e839590ef 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -168,10 +168,7 @@ static DEFINE_SPINLOCK(orangefs_bufmap_lock);
static void
orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
{
- int i;
-
- for (i = 0; i < bufmap->page_count; i++)
- put_page(bufmap->page_array[i]);
+ unpin_user_pages(bufmap->page_array, bufmap->page_count);
}
static void
@@ -268,7 +265,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
int offset = 0, ret, i;
/* map the pages */
- ret = get_user_pages_fast((unsigned long)user_desc->ptr,
+ ret = pin_user_pages_fast((unsigned long)user_desc->ptr,
bufmap->page_count, FOLL_WRITE, bufmap->page_array);
if (ret < 0)
@@ -280,7 +277,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
for (i = 0; i < ret; i++) {
SetPageError(bufmap->page_array[i]);
- put_page(bufmap->page_array[i]);
+ unpin_user_page(bufmap->page_array[i]);
}
return -ENOMEM;
}
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
index c010c1fddafc..289b648ae196 100644
--- a/fs/orangefs/orangefs-mod.c
+++ b/fs/orangefs/orangefs-mod.c
@@ -79,7 +79,7 @@ DECLARE_WAIT_QUEUE_HEAD(orangefs_request_list_waitq);
static int __init orangefs_init(void)
{
- int ret = -1;
+ int ret;
__u32 i = 0;
if (op_timeout_secs < 0)
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 9709cf22cab3..79dd052c7dbf 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -47,7 +47,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
{
ssize_t list_size, size, value_size = 0;
char *buf, *name, *value = NULL;
- int uninitialized_var(error);
+ int error = 0;
size_t slen;
if (!(old->d_inode->i_opflags & IOP_XATTR) ||
@@ -584,9 +584,10 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
.link = c->link
};
- err = ovl_lock_rename_workdir(c->workdir, c->destdir);
- if (err)
- return err;
+ /* workdir and destdir could be the same when copying up to indexdir */
+ err = -EIO;
+ if (lock_rename(c->workdir, c->destdir) != NULL)
+ goto unlock;
err = ovl_prep_cu_creds(c->dentry, &cc);
if (err)
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 279009dee366..1bba4813f9cb 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -62,35 +62,59 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir)
}
/* caller holds i_mutex on workdir */
-static struct dentry *ovl_whiteout(struct dentry *workdir)
+static struct dentry *ovl_whiteout(struct ovl_fs *ofs)
{
int err;
struct dentry *whiteout;
+ struct dentry *workdir = ofs->workdir;
struct inode *wdir = workdir->d_inode;
- whiteout = ovl_lookup_temp(workdir);
- if (IS_ERR(whiteout))
- return whiteout;
+ if (!ofs->whiteout) {
+ whiteout = ovl_lookup_temp(workdir);
+ if (IS_ERR(whiteout))
+ goto out;
- err = ovl_do_whiteout(wdir, whiteout);
- if (err) {
- dput(whiteout);
- whiteout = ERR_PTR(err);
+ err = ovl_do_whiteout(wdir, whiteout);
+ if (err) {
+ dput(whiteout);
+ whiteout = ERR_PTR(err);
+ goto out;
+ }
+ ofs->whiteout = whiteout;
}
+ if (ofs->share_whiteout) {
+ whiteout = ovl_lookup_temp(workdir);
+ if (IS_ERR(whiteout))
+ goto out;
+
+ err = ovl_do_link(ofs->whiteout, wdir, whiteout);
+ if (!err)
+ goto out;
+
+ if (err != -EMLINK) {
+ pr_warn("Failed to link whiteout - disabling whiteout inode sharing(nlink=%u, err=%i)\n",
+ ofs->whiteout->d_inode->i_nlink, err);
+ ofs->share_whiteout = false;
+ }
+ dput(whiteout);
+ }
+ whiteout = ofs->whiteout;
+ ofs->whiteout = NULL;
+out:
return whiteout;
}
/* Caller must hold i_mutex on both workdir and dir */
-int ovl_cleanup_and_whiteout(struct dentry *workdir, struct inode *dir,
+int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir,
struct dentry *dentry)
{
- struct inode *wdir = workdir->d_inode;
+ struct inode *wdir = ofs->workdir->d_inode;
struct dentry *whiteout;
int err;
int flags = 0;
- whiteout = ovl_whiteout(workdir);
+ whiteout = ovl_whiteout(ofs);
err = PTR_ERR(whiteout);
if (IS_ERR(whiteout))
return err;
@@ -262,6 +286,8 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
inode = ovl_get_inode(dentry->d_sb, &oip);
if (IS_ERR(inode))
return PTR_ERR(inode);
+ if (inode == oip.newinode)
+ ovl_set_flag(OVL_UPPERDATA, inode);
} else {
WARN_ON(ovl_inode_real(inode) != d_inode(newdentry));
dput(newdentry);
@@ -715,6 +741,7 @@ static bool ovl_matches_upper(struct dentry *dentry, struct dentry *upper)
static int ovl_remove_and_whiteout(struct dentry *dentry,
struct list_head *list)
{
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
struct dentry *workdir = ovl_workdir(dentry);
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
struct dentry *upper;
@@ -748,7 +775,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry,
goto out_dput_upper;
}
- err = ovl_cleanup_and_whiteout(workdir, d_inode(upperdir), upper);
+ err = ovl_cleanup_and_whiteout(ofs, d_inode(upperdir), upper);
if (err)
goto out_d_drop;
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index ed5c1078919c..8f4286450f92 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -204,7 +204,7 @@ static int ovl_check_encode_origin(struct dentry *dentry)
* ovl_connect_layer() will try to make origin's layer "connected" by
* copying up a "connectable" ancestor.
*/
- if (d_is_dir(dentry) && ofs->upper_mnt)
+ if (d_is_dir(dentry) && ovl_upper_mnt(ofs))
return ovl_connect_layer(dentry);
/* Lower file handle for indexed and non-upper dir/non-dir */
@@ -231,12 +231,9 @@ static int ovl_dentry_to_fid(struct dentry *dentry, u32 *fid, int buflen)
if (IS_ERR(fh))
return PTR_ERR(fh);
- err = -EOVERFLOW;
len = OVL_FH_LEN(fh);
- if (len > buflen)
- goto fail;
-
- memcpy(fid, fh, len);
+ if (len <= buflen)
+ memcpy(fid, fh, len);
err = len;
out:
@@ -244,9 +241,8 @@ out:
return err;
fail:
- pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i, buflen=%d, len=%d, type=%d)\n",
- dentry, err, buflen, fh ? (int)fh->fb.len : 0,
- fh ? fh->fb.type : 0);
+ pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i)\n",
+ dentry, err);
goto out;
}
@@ -254,7 +250,7 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
struct inode *parent)
{
struct dentry *dentry;
- int bytes = *max_len << 2;
+ int bytes, buflen = *max_len << 2;
/* TODO: encode connectable file handles */
if (parent)
@@ -264,12 +260,14 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
if (WARN_ON(!dentry))
return FILEID_INVALID;
- bytes = ovl_dentry_to_fid(dentry, fid, bytes);
+ bytes = ovl_dentry_to_fid(dentry, fid, buflen);
dput(dentry);
if (bytes <= 0)
return FILEID_INVALID;
*max_len = bytes >> 2;
+ if (bytes > buflen)
+ return FILEID_INVALID;
return OVL_FILEID_V1;
}
@@ -679,10 +677,10 @@ static struct dentry *ovl_upper_fh_to_d(struct super_block *sb,
struct dentry *dentry;
struct dentry *upper;
- if (!ofs->upper_mnt)
+ if (!ovl_upper_mnt(ofs))
return ERR_PTR(-EACCES);
- upper = ovl_decode_real_fh(fh, ofs->upper_mnt, true);
+ upper = ovl_decode_real_fh(fh, ovl_upper_mnt(ofs), true);
if (IS_ERR_OR_NULL(upper))
return upper;
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 87c362f65448..01820e654a21 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -10,6 +10,7 @@
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/splice.h>
+#include <linux/security.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include "overlayfs.h"
@@ -39,10 +40,22 @@ static struct file *ovl_open_realfile(const struct file *file,
struct file *realfile;
const struct cred *old_cred;
int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY;
+ int acc_mode = ACC_MODE(flags);
+ int err;
+
+ if (flags & O_APPEND)
+ acc_mode |= MAY_APPEND;
old_cred = ovl_override_creds(inode->i_sb);
- realfile = open_with_fake_path(&file->f_path, flags, realinode,
- current_cred());
+ err = inode_permission(realinode, MAY_OPEN | acc_mode);
+ if (err) {
+ realfile = ERR_PTR(err);
+ } else if (!inode_owner_or_capable(realinode)) {
+ realfile = ERR_PTR(-EPERM);
+ } else {
+ realfile = open_with_fake_path(&file->f_path, flags, realinode,
+ current_cred());
+ }
revert_creds(old_cred);
pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n",
@@ -219,9 +232,8 @@ static void ovl_file_accessed(struct file *file)
touch_atime(&file->f_path);
}
-static rwf_t ovl_iocb_to_rwf(struct kiocb *iocb)
+static rwf_t ovl_iocb_to_rwf(int ifl)
{
- int ifl = iocb->ki_flags;
rwf_t flags = 0;
if (ifl & IOCB_NOWAIT)
@@ -283,7 +295,7 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
old_cred = ovl_override_creds(file_inode(file)->i_sb);
if (is_sync_kiocb(iocb)) {
ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
- ovl_iocb_to_rwf(iocb));
+ ovl_iocb_to_rwf(iocb->ki_flags));
} else {
struct ovl_aio_req *aio_req;
@@ -336,7 +348,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
if (is_sync_kiocb(iocb)) {
file_start_write(real.file);
ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
- ovl_iocb_to_rwf(iocb));
+ ovl_iocb_to_rwf(iocb->ki_flags));
file_end_write(real.file);
/* Update size */
ovl_copyattr(ovl_inode_real(inode), inode);
@@ -520,7 +532,9 @@ static long ovl_real_ioctl(struct file *file, unsigned int cmd,
return ret;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- ret = vfs_ioctl(real.file, cmd, arg);
+ ret = security_file_ioctl(real.file, cmd, arg);
+ if (!ret)
+ ret = vfs_ioctl(real.file, cmd, arg);
revert_creds(old_cred);
fdput(real);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 981f11ec51bc..8be6cd264f66 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -10,6 +10,7 @@
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/ratelimit.h>
+#include <linux/fiemap.h>
#include "overlayfs.h"
@@ -456,7 +457,7 @@ int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags)
if (flags & S_ATIME) {
struct ovl_fs *ofs = inode->i_sb->s_fs_info;
struct path upperpath = {
- .mnt = ofs->upper_mnt,
+ .mnt = ovl_upper_mnt(ofs),
.dentry = ovl_upperdentry_dereference(OVL_I(inode)),
};
@@ -479,10 +480,6 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return -EOPNOTSUPP;
old_cred = ovl_override_creds(inode->i_sb);
-
- if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
- filemap_write_and_wait(realinode->i_mapping);
-
err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
revert_creds(old_cred);
@@ -908,7 +905,7 @@ struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir)
* Does overlay inode need to be hashed by lower inode?
*/
static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper,
- struct dentry *lower, struct dentry *index)
+ struct dentry *lower, bool index)
{
struct ovl_fs *ofs = sb->s_fs_info;
@@ -921,7 +918,7 @@ static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper,
return true;
/* Yes, if won't be copied up */
- if (!ofs->upper_mnt)
+ if (!ovl_upper_mnt(ofs))
return true;
/* No, if lower hardlink is or will be broken on copy up */
@@ -957,7 +954,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry,
oip->index);
int fsid = bylower ? lowerpath->layer->fsid : 0;
- bool is_dir, metacopy = false;
+ bool is_dir;
unsigned long ino = 0;
int err = oip->newinode ? -EEXIST : -ENOMEM;
@@ -1018,15 +1015,6 @@ struct inode *ovl_get_inode(struct super_block *sb,
if (oip->index)
ovl_set_flag(OVL_INDEX, inode);
- if (upperdentry) {
- err = ovl_check_metacopy_xattr(upperdentry);
- if (err < 0)
- goto out_err;
- metacopy = err;
- if (!metacopy)
- ovl_set_flag(OVL_UPPERDATA, inode);
- }
-
OVL_I(inode)->redirect = oip->redirect;
if (bylower)
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 0db23baf98e7..3566282a9199 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -191,16 +191,36 @@ static bool ovl_is_opaquedir(struct dentry *dentry)
return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE);
}
+static struct dentry *ovl_lookup_positive_unlocked(const char *name,
+ struct dentry *base, int len,
+ bool drop_negative)
+{
+ struct dentry *ret = lookup_one_len_unlocked(name, base, len);
+
+ if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
+ if (drop_negative && ret->d_lockref.count == 1) {
+ spin_lock(&ret->d_lock);
+ /* Recheck condition under lock */
+ if (d_is_negative(ret) && ret->d_lockref.count == 1)
+ __d_drop(ret);
+ spin_unlock(&ret->d_lock);
+ }
+ dput(ret);
+ ret = ERR_PTR(-ENOENT);
+ }
+ return ret;
+}
+
static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
const char *name, unsigned int namelen,
size_t prelen, const char *post,
- struct dentry **ret)
+ struct dentry **ret, bool drop_negative)
{
struct dentry *this;
int err;
bool last_element = !post[0];
- this = lookup_positive_unlocked(name, base, namelen);
+ this = ovl_lookup_positive_unlocked(name, base, namelen, drop_negative);
if (IS_ERR(this)) {
err = PTR_ERR(this);
this = NULL;
@@ -276,7 +296,7 @@ out_err:
}
static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
- struct dentry **ret)
+ struct dentry **ret, bool drop_negative)
{
/* Counting down from the end, since the prefix can change */
size_t rem = d->name.len - 1;
@@ -285,7 +305,7 @@ static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
if (d->name.name[0] != '/')
return ovl_lookup_single(base, d, d->name.name, d->name.len,
- 0, "", ret);
+ 0, "", ret, drop_negative);
while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
const char *s = d->name.name + d->name.len - rem;
@@ -298,7 +318,8 @@ static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
return -EIO;
err = ovl_lookup_single(base, d, s, thislen,
- d->name.len - rem, next, &base);
+ d->name.len - rem, next, &base,
+ drop_negative);
dput(dentry);
if (err)
return err;
@@ -468,7 +489,7 @@ struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index)
if (IS_ERR_OR_NULL(fh))
return ERR_CAST(fh);
- upper = ovl_decode_real_fh(fh, ofs->upper_mnt, true);
+ upper = ovl_decode_real_fh(fh, ovl_upper_mnt(ofs), true);
kfree(fh);
if (IS_ERR_OR_NULL(upper))
@@ -484,12 +505,6 @@ struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index)
return upper;
}
-/* Is this a leftover from create/whiteout of directory index entry? */
-static bool ovl_is_temp_index(struct dentry *index)
-{
- return index->d_name.name[0] == '#';
-}
-
/*
* Verify that an index entry name matches the origin file handle stored in
* OVL_XATTR_ORIGIN and that origin file handle can be decoded to lower path.
@@ -507,11 +522,6 @@ int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index)
if (!d_inode(index))
return 0;
- /* Cleanup leftover from index create/cleanup attempt */
- err = -ESTALE;
- if (ovl_is_temp_index(index))
- goto fail;
-
err = -EINVAL;
if (index->d_name.len < sizeof(struct ovl_fb)*2)
goto fail;
@@ -823,7 +833,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
struct dentry *this;
unsigned int i;
int err;
- bool metacopy = false;
+ bool uppermetacopy = false;
struct ovl_lookup_data d = {
.sb = dentry->d_sb,
.name = dentry->d_name,
@@ -841,7 +851,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
old_cred = ovl_override_creds(dentry->d_sb);
upperdir = ovl_dentry_upper(dentry->d_parent);
if (upperdir) {
- err = ovl_lookup_layer(upperdir, &d, &upperdentry);
+ err = ovl_lookup_layer(upperdir, &d, &upperdentry, true);
if (err)
goto out;
@@ -869,7 +879,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
goto out_put_upper;
if (d.metacopy)
- metacopy = true;
+ uppermetacopy = true;
}
if (d.redirect) {
@@ -899,13 +909,19 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
else
d.last = lower.layer->idx == roe->numlower;
- err = ovl_lookup_layer(lower.dentry, &d, &this);
+ err = ovl_lookup_layer(lower.dentry, &d, &this, false);
if (err)
goto out_put;
if (!this)
continue;
+ if ((uppermetacopy || d.metacopy) && !ofs->config.metacopy) {
+ err = -EPERM;
+ pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", dentry);
+ goto out_put;
+ }
+
/*
* If no origin fh is stored in upper of a merge dir, store fh
* of lower dir and set upper parent "impure".
@@ -940,21 +956,21 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
origin = this;
}
- if (d.metacopy)
- metacopy = true;
- /*
- * Do not store intermediate metacopy dentries in chain,
- * except top most lower metacopy dentry
- */
if (d.metacopy && ctr) {
+ /*
+ * Do not store intermediate metacopy dentries in
+ * lower chain, except top most lower metacopy dentry.
+ * Continue the loop so that if there is an absolute
+ * redirect on this dentry, poe can be reset to roe.
+ */
dput(this);
- continue;
+ this = NULL;
+ } else {
+ stack[ctr].dentry = this;
+ stack[ctr].layer = lower.layer;
+ ctr++;
}
- stack[ctr].dentry = this;
- stack[ctr].layer = lower.layer;
- ctr++;
-
/*
* Following redirects can have security consequences: it's like
* a symlink into the lower layer without the permission checks.
@@ -982,22 +998,17 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
}
}
- if (metacopy) {
- /*
- * Found a metacopy dentry but did not find corresponding
- * data dentry
- */
- if (d.metacopy) {
- err = -EIO;
- goto out_put;
- }
-
- err = -EPERM;
- if (!ofs->config.metacopy) {
- pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n",
- dentry);
- goto out_put;
- }
+ /*
+ * For regular non-metacopy upper dentries, there is no lower
+ * path based lookup, hence ctr will be zero. If a dentry is found
+ * using ORIGIN xattr on upper, install it in stack.
+ *
+ * For metacopy dentry, path based lookup will find lower dentries.
+ * Just make sure a corresponding data dentry has been found.
+ */
+ if (d.metacopy || (uppermetacopy && !ctr)) {
+ err = -EIO;
+ goto out_put;
} else if (!d.is_dir && upperdentry && !ctr && origin_path) {
if (WARN_ON(stack != NULL)) {
err = -EIO;
@@ -1005,25 +1016,30 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
}
stack = origin_path;
ctr = 1;
+ origin = origin_path->dentry;
origin_path = NULL;
}
/*
- * Lookup index by lower inode and verify it matches upper inode.
- * We only trust dir index if we verified that lower dir matches
- * origin, otherwise dir index entries may be inconsistent and we
- * ignore them.
+ * Always lookup index if there is no-upperdentry.
+ *
+ * For the case of upperdentry, we have set origin by now if it
+ * needed to be set. There are basically three cases.
+ *
+ * For directories, lookup index by lower inode and verify it matches
+ * upper inode. We only trust dir index if we verified that lower dir
+ * matches origin, otherwise dir index entries may be inconsistent
+ * and we ignore them.
+ *
+ * For regular upper, we already set origin if upper had ORIGIN
+ * xattr. There is no verification though as there is no path
+ * based dentry lookup in lower in this case.
*
- * For non-dir upper metacopy dentry, we already set "origin" if we
- * verified that lower matched upper origin. If upper origin was
- * not present (because lower layer did not support fh encode/decode),
- * or indexing is not enabled, do not set "origin" and skip looking up
- * index. This case should be handled in same way as a non-dir upper
- * without ORIGIN is handled.
+ * For metacopy upper, we set a verified origin already if index
+ * is enabled and if upper had an ORIGIN xattr.
*
- * Always lookup index of non-dir non-metacopy and non-upper.
*/
- if (ctr && (!upperdentry || (!d.is_dir && !metacopy)))
+ if (!upperdentry && ctr)
origin = stack[0].dentry;
if (origin && ovl_indexdir(dentry->d_sb) &&
@@ -1074,6 +1090,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_free_oe;
+ if (upperdentry && !uppermetacopy)
+ ovl_set_flag(OVL_UPPERDATA, inode);
}
ovl_dentry_update_reval(dentry, upperdentry,
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index e6f3670146ed..b725c7f15ff4 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -355,6 +355,9 @@ int ovl_check_fb_len(struct ovl_fb *fb, int fb_len);
static inline int ovl_check_fh_len(struct ovl_fh *fh, int fh_len)
{
+ if (fh_len < sizeof(struct ovl_fh))
+ return -EINVAL;
+
return ovl_check_fb_len(&fh->fb, fh_len - OVL_FH_WIRE_OFFSET);
}
@@ -394,8 +397,8 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
void ovl_cache_free(struct list_head *list);
void ovl_dir_cache_free(struct inode *inode);
int ovl_check_d_type_supported(struct path *realpath);
-void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
- struct dentry *dentry, int level);
+int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+ struct dentry *dentry, int level);
int ovl_indexdir_cleanup(struct ovl_fs *ofs);
/* inode.c */
@@ -421,7 +424,7 @@ struct ovl_inode_params {
struct inode *newinode;
struct dentry *upperdentry;
struct ovl_path *lowerpath;
- struct dentry *index;
+ bool index;
unsigned int numlower;
char *redirect;
struct dentry *lowerdata;
@@ -455,7 +458,7 @@ static inline void ovl_copyflags(struct inode *from, struct inode *to)
/* dir.c */
extern const struct inode_operations ovl_dir_inode_operations;
-int ovl_cleanup_and_whiteout(struct dentry *workdir, struct inode *dir,
+int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir,
struct dentry *dentry);
struct ovl_cattr {
dev_t rdev;
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 5762d802fe01..b429c80879ee 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -46,7 +46,6 @@ struct ovl_path {
/* private information held for overlayfs's superblock */
struct ovl_fs {
- struct vfsmount *upper_mnt;
unsigned int numlayer;
/* Number of unique fs among layers including upper fs */
unsigned int numfs;
@@ -68,8 +67,8 @@ struct ovl_fs {
/* Did we take the inuse lock? */
bool upperdir_locked;
bool workdir_locked;
+ bool share_whiteout;
/* Traps in ovl inode cache */
- struct inode *upperdir_trap;
struct inode *workbasedir_trap;
struct inode *workdir_trap;
struct inode *indexdir_trap;
@@ -77,8 +76,15 @@ struct ovl_fs {
int xino_mode;
/* For allocation of non-persistent inode numbers */
atomic_long_t last_ino;
+ /* Whiteout dentry cache */
+ struct dentry *whiteout;
};
+static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
+{
+ return ofs->layers[0].mnt;
+}
+
static inline struct ovl_fs *OVL_FS(struct super_block *sb)
{
return (struct ovl_fs *)sb->s_fs_info;
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index e452ff7d583d..6918b98faeb6 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -297,7 +297,7 @@ static inline int ovl_dir_read(struct path *realpath,
struct file *realfile;
int err;
- realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
+ realfile = ovl_path_open(realpath, O_RDONLY | O_LARGEFILE);
if (IS_ERR(realfile))
return PTR_ERR(realfile);
@@ -743,8 +743,10 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
struct ovl_dir_file *od = file->private_data;
struct dentry *dentry = file->f_path.dentry;
struct ovl_cache_entry *p;
+ const struct cred *old_cred;
int err;
+ old_cred = ovl_override_creds(dentry->d_sb);
if (!ctx->pos)
ovl_dir_reset(file);
@@ -758,17 +760,20 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
(ovl_same_fs(dentry->d_sb) &&
(ovl_is_impure_dir(file) ||
OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
- return ovl_iterate_real(file, ctx);
+ err = ovl_iterate_real(file, ctx);
+ } else {
+ err = iterate_dir(od->realfile, ctx);
}
- return iterate_dir(od->realfile, ctx);
+ goto out;
}
if (!od->cache) {
struct ovl_dir_cache *cache;
cache = ovl_cache_get(dentry);
+ err = PTR_ERR(cache);
if (IS_ERR(cache))
- return PTR_ERR(cache);
+ goto out;
od->cache = cache;
ovl_seek_cursor(od, ctx->pos);
@@ -780,7 +785,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
if (!p->ino) {
err = ovl_cache_update_ino(&file->f_path, p);
if (err)
- return err;
+ goto out;
}
if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
break;
@@ -788,7 +793,10 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
od->cursor = p->l_node.next;
ctx->pos++;
}
- return 0;
+ err = 0;
+out:
+ revert_creds(old_cred);
+ return err;
}
static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
@@ -831,6 +839,19 @@ out_unlock:
return res;
}
+static struct file *ovl_dir_open_realfile(struct file *file,
+ struct path *realpath)
+{
+ struct file *res;
+ const struct cred *old_cred;
+
+ old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ res = ovl_path_open(realpath, O_RDONLY | (file->f_flags & O_LARGEFILE));
+ revert_creds(old_cred);
+
+ return res;
+}
+
static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
@@ -853,7 +874,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
struct path upperpath;
ovl_path_upper(dentry, &upperpath);
- realfile = ovl_path_open(&upperpath, O_RDONLY);
+ realfile = ovl_dir_open_realfile(file, &upperpath);
inode_lock(inode);
if (!od->upperfile) {
@@ -904,7 +925,7 @@ static int ovl_dir_open(struct inode *inode, struct file *file)
return -ENOMEM;
type = ovl_path_real(file->f_path.dentry, &realpath);
- realfile = ovl_path_open(&realpath, file->f_flags);
+ realfile = ovl_dir_open_realfile(file, &realpath);
if (IS_ERR(realfile)) {
kfree(od);
return PTR_ERR(realfile);
@@ -1071,14 +1092,13 @@ out:
ovl_cache_free(&list);
}
-void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
struct dentry *dentry, int level)
{
int err;
if (!d_is_dir(dentry) || level > 1) {
- ovl_cleanup(dir, dentry);
- return;
+ return ovl_cleanup(dir, dentry);
}
err = ovl_do_rmdir(dir, dentry);
@@ -1088,8 +1108,10 @@ void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
inode_unlock(dir);
ovl_workdir_cleanup_recurse(&path, level + 1);
inode_lock_nested(dir, I_MUTEX_PARENT);
- ovl_cleanup(dir, dentry);
+ err = ovl_cleanup(dir, dentry);
}
+
+ return err;
}
int ovl_indexdir_cleanup(struct ovl_fs *ofs)
@@ -1098,7 +1120,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
struct dentry *indexdir = ofs->indexdir;
struct dentry *index = NULL;
struct inode *dir = indexdir->d_inode;
- struct path path = { .mnt = ofs->upper_mnt, .dentry = indexdir };
+ struct path path = { .mnt = ovl_upper_mnt(ofs), .dentry = indexdir };
LIST_HEAD(list);
struct rb_root root = RB_ROOT;
struct ovl_cache_entry *p;
@@ -1128,6 +1150,13 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
index = NULL;
break;
}
+ /* Cleanup leftover from index create/cleanup attempt */
+ if (index->d_name.name[0] == '#') {
+ err = ovl_workdir_cleanup(dir, path.mnt, index, 1);
+ if (err)
+ break;
+ goto next;
+ }
err = ovl_verify_index(ofs, index);
if (!err) {
goto next;
@@ -1146,7 +1175,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
* Whiteout orphan index to block future open by
* handle after overlay nlink dropped to zero.
*/
- err = ovl_cleanup_and_whiteout(indexdir, dir, index);
+ err = ovl_cleanup_and_whiteout(ofs, dir, index);
} else {
/* Cleanup orphan index entries */
err = ovl_cleanup(dir, index);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 732ad5495c92..91476bc422f9 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -211,24 +211,28 @@ static void ovl_destroy_inode(struct inode *inode)
static void ovl_free_fs(struct ovl_fs *ofs)
{
+ struct vfsmount **mounts;
unsigned i;
iput(ofs->workbasedir_trap);
iput(ofs->indexdir_trap);
iput(ofs->workdir_trap);
- iput(ofs->upperdir_trap);
+ dput(ofs->whiteout);
dput(ofs->indexdir);
dput(ofs->workdir);
if (ofs->workdir_locked)
ovl_inuse_unlock(ofs->workbasedir);
dput(ofs->workbasedir);
if (ofs->upperdir_locked)
- ovl_inuse_unlock(ofs->upper_mnt->mnt_root);
- mntput(ofs->upper_mnt);
- for (i = 1; i < ofs->numlayer; i++) {
+ ovl_inuse_unlock(ovl_upper_mnt(ofs)->mnt_root);
+
+ /* Hack! Reuse ofs->layers as a vfsmount array before freeing it */
+ mounts = (struct vfsmount **) ofs->layers;
+ for (i = 0; i < ofs->numlayer; i++) {
iput(ofs->layers[i].trap);
- mntput(ofs->layers[i].mnt);
+ mounts[i] = ofs->layers[i].mnt;
}
+ kern_unmount_array(mounts, ofs->numlayer);
kfree(ofs->layers);
for (i = 0; i < ofs->numfs; i++)
free_anon_bdev(ofs->fs[i].pseudo_dev);
@@ -257,12 +261,12 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
struct super_block *upper_sb;
int ret;
- if (!ofs->upper_mnt)
+ if (!ovl_upper_mnt(ofs))
return 0;
/*
- * If this is a sync(2) call or an emergency sync, all the super blocks
- * will be iterated, including upper_sb, so no need to do anything.
+ * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
+ * All the super blocks will be iterated, including upper_sb.
*
* If this is a syncfs(2) call, then we do need to call
* sync_filesystem() on upper_sb, but enough if we do it when being
@@ -271,7 +275,7 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
if (!wait)
return 0;
- upper_sb = ofs->upper_mnt->mnt_sb;
+ upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
down_read(&upper_sb->s_umount);
ret = sync_filesystem(upper_sb);
@@ -309,7 +313,7 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
/* Will this overlay be forced to mount/remount ro? */
static bool ovl_force_readonly(struct ovl_fs *ofs)
{
- return (!ofs->upper_mnt || !ofs->workdir);
+ return (!ovl_upper_mnt(ofs) || !ofs->workdir);
}
static const char *ovl_redirect_mode_def(void)
@@ -364,11 +368,20 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
static int ovl_remount(struct super_block *sb, int *flags, char *data)
{
struct ovl_fs *ofs = sb->s_fs_info;
+ struct super_block *upper_sb;
+ int ret = 0;
if (!(*flags & SB_RDONLY) && ovl_force_readonly(ofs))
return -EROFS;
- return 0;
+ if (*flags & SB_RDONLY && !sb_rdonly(sb)) {
+ upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
+ down_read(&upper_sb->s_umount);
+ ret = sync_filesystem(upper_sb);
+ up_read(&upper_sb->s_umount);
+ }
+
+ return ret;
}
static const struct super_operations ovl_super_operations = {
@@ -470,6 +483,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
char *p;
int err;
bool metacopy_opt = false, redirect_opt = false;
+ bool nfs_export_opt = false, index_opt = false;
config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
if (!config->redirect_mode)
@@ -519,18 +533,22 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
case OPT_INDEX_ON:
config->index = true;
+ index_opt = true;
break;
case OPT_INDEX_OFF:
config->index = false;
+ index_opt = true;
break;
case OPT_NFS_EXPORT_ON:
config->nfs_export = true;
+ nfs_export_opt = true;
break;
case OPT_NFS_EXPORT_OFF:
config->nfs_export = false;
+ nfs_export_opt = true;
break;
case OPT_XINO_ON:
@@ -552,6 +570,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
case OPT_METACOPY_OFF:
config->metacopy = false;
+ metacopy_opt = true;
break;
default:
@@ -601,6 +620,48 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
}
}
+ /* Resolve nfs_export -> index dependency */
+ if (config->nfs_export && !config->index) {
+ if (nfs_export_opt && index_opt) {
+ pr_err("conflicting options: nfs_export=on,index=off\n");
+ return -EINVAL;
+ }
+ if (index_opt) {
+ /*
+ * There was an explicit index=off that resulted
+ * in this conflict.
+ */
+ pr_info("disabling nfs_export due to index=off\n");
+ config->nfs_export = false;
+ } else {
+ /* Automatically enable index otherwise. */
+ config->index = true;
+ }
+ }
+
+ /* Resolve nfs_export -> !metacopy dependency */
+ if (config->nfs_export && config->metacopy) {
+ if (nfs_export_opt && metacopy_opt) {
+ pr_err("conflicting options: nfs_export=on,metacopy=on\n");
+ return -EINVAL;
+ }
+ if (metacopy_opt) {
+ /*
+ * There was an explicit metacopy=on that resulted
+ * in this conflict.
+ */
+ pr_info("disabling nfs_export due to metacopy=on\n");
+ config->nfs_export = false;
+ } else {
+ /*
+ * There was an explicit nfs_export=on that resulted
+ * in this conflict.
+ */
+ pr_info("disabling metacopy due to nfs_export=on\n");
+ config->metacopy = false;
+ }
+ }
+
return 0;
}
@@ -611,15 +672,12 @@ static struct dentry *ovl_workdir_create(struct ovl_fs *ofs,
const char *name, bool persist)
{
struct inode *dir = ofs->workbasedir->d_inode;
- struct vfsmount *mnt = ofs->upper_mnt;
+ struct vfsmount *mnt = ovl_upper_mnt(ofs);
struct dentry *work;
int err;
bool retried = false;
- bool locked = false;
inode_lock_nested(dir, I_MUTEX_PARENT);
- locked = true;
-
retry:
work = lookup_one_len(name, ofs->workbasedir, strlen(name));
@@ -680,9 +738,7 @@ retry:
goto out_err;
}
out_unlock:
- if (locked)
- inode_unlock(dir);
-
+ inode_unlock(dir);
return work;
out_dput:
@@ -779,11 +835,11 @@ static int ovl_lower_dir(const char *name, struct path *path,
err = ovl_mount_dir_noesc(name, path);
if (err)
- goto out;
+ return err;
err = ovl_check_namelen(path, ofs, name);
if (err)
- goto out_put;
+ return err;
*stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth);
@@ -805,11 +861,6 @@ static int ovl_lower_dir(const char *name, struct path *path,
ofs->xino_mode = -1;
return 0;
-
-out_put:
- path_put_init(path);
-out:
- return err;
}
/* Workdir should not be subdir of upperdir and vice versa */
@@ -1016,7 +1067,7 @@ static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
}
static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
- struct path *upperpath)
+ struct ovl_layer *upper_layer, struct path *upperpath)
{
struct vfsmount *upper_mnt;
int err;
@@ -1036,7 +1087,7 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
if (err)
goto out;
- err = ovl_setup_trap(sb, upperpath->dentry, &ofs->upperdir_trap,
+ err = ovl_setup_trap(sb, upperpath->dentry, &upper_layer->trap,
"upperdir");
if (err)
goto out;
@@ -1050,9 +1101,23 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
/* Don't inherit atime flags */
upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
- ofs->upper_mnt = upper_mnt;
+ upper_layer->mnt = upper_mnt;
+ upper_layer->idx = 0;
+ upper_layer->fsid = 0;
- if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
+ /*
+ * Inherit SB_NOSEC flag from upperdir.
+ *
+ * This optimization changes behavior when a security related attribute
+ * (suid/sgid/security.*) is changed on an underlying layer. This is
+ * okay because we don't yet have guarantees in that case, but it will
+ * need careful treatment once we want to honour changes to underlying
+ * filesystems.
+ */
+ if (upper_mnt->mnt_sb->s_flags & SB_NOSEC)
+ sb->s_flags |= SB_NOSEC;
+
+ if (ovl_inuse_trylock(ovl_upper_mnt(ofs)->mnt_root)) {
ofs->upperdir_locked = true;
} else {
err = ovl_report_in_use(ofs, "upperdir");
@@ -1128,7 +1193,7 @@ out_unlock:
static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
struct path *workpath)
{
- struct vfsmount *mnt = ofs->upper_mnt;
+ struct vfsmount *mnt = ovl_upper_mnt(ofs);
struct dentry *temp;
bool rename_whiteout;
bool d_type;
@@ -1272,7 +1337,7 @@ out:
static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
struct ovl_entry *oe, struct path *upperpath)
{
- struct vfsmount *mnt = ofs->upper_mnt;
+ struct vfsmount *mnt = ovl_upper_mnt(ofs);
int err;
err = mnt_want_write(mnt);
@@ -1328,7 +1393,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
{
unsigned int i;
- if (!ofs->config.nfs_export && !ofs->upper_mnt)
+ if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs))
return true;
for (i = 0; i < ofs->numfs; i++) {
@@ -1388,18 +1453,13 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
}
static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
- struct path *stack, unsigned int numlower)
+ struct path *stack, unsigned int numlower,
+ struct ovl_layer *layers)
{
int err;
unsigned int i;
- struct ovl_layer *layers;
err = -ENOMEM;
- layers = kcalloc(numlower + 1, sizeof(struct ovl_layer), GFP_KERNEL);
- if (!layers)
- goto out;
- ofs->layers = layers;
-
ofs->fs = kcalloc(numlower + 1, sizeof(struct ovl_sb), GFP_KERNEL);
if (ofs->fs == NULL)
goto out;
@@ -1407,11 +1467,6 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
/* idx/fsid 0 are reserved for upper fs even with lower only overlay */
ofs->numfs++;
- layers[0].mnt = ofs->upper_mnt;
- layers[0].idx = 0;
- layers[0].fsid = 0;
- ofs->numlayer = 1;
-
/*
* All lower layers that share the same fs as upper layer, use the same
* pseudo_dev as upper layer. Allocate fs[0].pseudo_dev even for lower
@@ -1424,8 +1479,8 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
goto out;
}
- if (ofs->upper_mnt) {
- ofs->fs[0].sb = ofs->upper_mnt->mnt_sb;
+ if (ovl_upper_mnt(ofs)) {
+ ofs->fs[0].sb = ovl_upper_mnt(ofs)->mnt_sb;
ofs->fs[0].is_lower = false;
}
@@ -1480,7 +1535,7 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
* inode number or a non persistent inode number allocated from a
* dedicated range.
*/
- if (ofs->numfs - !ofs->upper_mnt == 1) {
+ if (ofs->numfs - !ovl_upper_mnt(ofs) == 1) {
if (ofs->config.xino == OVL_XINO_ON)
pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n");
ofs->xino_mode = 0;
@@ -1509,44 +1564,30 @@ out:
}
static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
- struct ovl_fs *ofs)
+ const char *lower, unsigned int numlower,
+ struct ovl_fs *ofs, struct ovl_layer *layers)
{
int err;
- char *lowertmp, *lower;
struct path *stack = NULL;
- unsigned int stacklen, numlower = 0, i;
+ unsigned int i;
struct ovl_entry *oe;
- err = -ENOMEM;
- lowertmp = kstrdup(ofs->config.lowerdir, GFP_KERNEL);
- if (!lowertmp)
- goto out_err;
-
- err = -EINVAL;
- stacklen = ovl_split_lowerdirs(lowertmp);
- if (stacklen > OVL_MAX_STACK) {
- pr_err("too many lower directories, limit is %d\n",
- OVL_MAX_STACK);
- goto out_err;
- } else if (!ofs->config.upperdir && stacklen == 1) {
+ if (!ofs->config.upperdir && numlower == 1) {
pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n");
- goto out_err;
+ return ERR_PTR(-EINVAL);
} else if (!ofs->config.upperdir && ofs->config.nfs_export &&
ofs->config.redirect_follow) {
pr_warn("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
ofs->config.nfs_export = false;
}
- err = -ENOMEM;
- stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL);
+ stack = kcalloc(numlower, sizeof(struct path), GFP_KERNEL);
if (!stack)
- goto out_err;
+ return ERR_PTR(-ENOMEM);
err = -EINVAL;
- lower = lowertmp;
- for (numlower = 0; numlower < stacklen; numlower++) {
- err = ovl_lower_dir(lower, &stack[numlower], ofs,
- &sb->s_stack_depth);
+ for (i = 0; i < numlower; i++) {
+ err = ovl_lower_dir(lower, &stack[i], ofs, &sb->s_stack_depth);
if (err)
goto out_err;
@@ -1560,7 +1601,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
goto out_err;
}
- err = ovl_get_layers(sb, ofs, stack, numlower);
+ err = ovl_get_layers(sb, ofs, stack, numlower, layers);
if (err)
goto out_err;
@@ -1578,7 +1619,6 @@ out:
for (i = 0; i < numlower; i++)
path_put(&stack[i]);
kfree(stack);
- kfree(lowertmp);
return oe;
@@ -1629,8 +1669,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
{
int i, err;
- if (ofs->upper_mnt) {
- err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root,
+ if (ovl_upper_mnt(ofs)) {
+ err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root,
"upperdir");
if (err)
return err;
@@ -1702,7 +1742,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
struct dentry *root_dentry;
struct ovl_entry *oe;
struct ovl_fs *ofs;
+ struct ovl_layer *layers;
struct cred *cred;
+ char *splitlower = NULL;
+ unsigned int numlower;
int err;
sb->s_d_op = &ovl_dentry_operations;
@@ -1716,6 +1759,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
if (!cred)
goto out_err;
+ /* Is there a reason anyone would want not to share whiteouts? */
+ ofs->share_whiteout = true;
+
ofs->config.index = ovl_index_def;
ofs->config.nfs_export = ovl_nfs_export_def;
ofs->config.xino = ovl_xino_def();
@@ -1731,6 +1777,26 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
goto out_err;
}
+ err = -ENOMEM;
+ splitlower = kstrdup(ofs->config.lowerdir, GFP_KERNEL);
+ if (!splitlower)
+ goto out_err;
+
+ numlower = ovl_split_lowerdirs(splitlower);
+ if (numlower > OVL_MAX_STACK) {
+ pr_err("too many lower directories, limit is %d\n",
+ OVL_MAX_STACK);
+ goto out_err;
+ }
+
+ layers = kcalloc(numlower + 1, sizeof(struct ovl_layer), GFP_KERNEL);
+ if (!layers)
+ goto out_err;
+
+ ofs->layers = layers;
+ /* Layer 0 is reserved for upper even if there's no upper */
+ ofs->numlayer = 1;
+
sb->s_stack_depth = 0;
sb->s_maxbytes = MAX_LFS_FILESIZE;
atomic_long_set(&ofs->last_ino, 1);
@@ -1752,7 +1818,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
goto out_err;
}
- err = ovl_get_upper(sb, ofs, &upperpath);
+ err = ovl_get_upper(sb, ofs, &layers[0], &upperpath);
if (err)
goto out_err;
@@ -1763,31 +1829,35 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
if (!ofs->workdir)
sb->s_flags |= SB_RDONLY;
- sb->s_stack_depth = ofs->upper_mnt->mnt_sb->s_stack_depth;
- sb->s_time_gran = ofs->upper_mnt->mnt_sb->s_time_gran;
+ sb->s_stack_depth = ovl_upper_mnt(ofs)->mnt_sb->s_stack_depth;
+ sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran;
}
- oe = ovl_get_lowerstack(sb, ofs);
+ oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers);
err = PTR_ERR(oe);
if (IS_ERR(oe))
goto out_err;
/* If the upper fs is nonexistent, we mark overlayfs r/o too */
- if (!ofs->upper_mnt)
+ if (!ovl_upper_mnt(ofs))
sb->s_flags |= SB_RDONLY;
if (!(ovl_force_readonly(ofs)) && ofs->config.index) {
+ /* index dir will act also as workdir */
+ dput(ofs->workdir);
+ ofs->workdir = NULL;
+ iput(ofs->workdir_trap);
+ ofs->workdir_trap = NULL;
+
err = ovl_get_indexdir(sb, ofs, oe, &upperpath);
if (err)
goto out_free_oe;
/* Force r/o mount with no index dir */
- if (!ofs->indexdir) {
- dput(ofs->workdir);
- ofs->workdir = NULL;
+ if (ofs->indexdir)
+ ofs->workdir = dget(ofs->indexdir);
+ else
sb->s_flags |= SB_RDONLY;
- }
-
}
err = ovl_check_overlapping_layers(sb, ofs);
@@ -1797,7 +1867,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
/* Show index=off in /proc/mounts for forced r/o mount */
if (!ofs->indexdir) {
ofs->config.index = false;
- if (ofs->upper_mnt && ofs->config.nfs_export) {
+ if (ovl_upper_mnt(ofs) && ofs->config.nfs_export) {
pr_warn("NFS export requires an index dir, falling back to nfs_export=off.\n");
ofs->config.nfs_export = false;
}
@@ -1818,6 +1888,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_xattr = ovl_xattr_handlers;
sb->s_fs_info = ofs;
sb->s_flags |= SB_POSIXACL;
+ sb->s_iflags |= SB_I_SKIP_SYNC;
err = -ENOMEM;
root_dentry = ovl_get_root(sb, upperpath.dentry, oe);
@@ -1825,6 +1896,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
goto out_free_oe;
mntput(upperpath.mnt);
+ kfree(splitlower);
sb->s_root = root_dentry;
@@ -1834,6 +1906,7 @@ out_free_oe:
ovl_entry_stack_free(oe);
kfree(oe);
out_err:
+ kfree(splitlower);
path_put(&upperpath);
ovl_free_fs(ofs);
out:
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 36b60788ee47..56c1f89f20c9 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -18,13 +18,13 @@
int ovl_want_write(struct dentry *dentry)
{
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
- return mnt_want_write(ofs->upper_mnt);
+ return mnt_want_write(ovl_upper_mnt(ofs));
}
void ovl_drop_write(struct dentry *dentry)
{
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
- mnt_drop_write(ofs->upper_mnt);
+ mnt_drop_write(ovl_upper_mnt(ofs));
}
struct dentry *ovl_workdir(struct dentry *dentry)
@@ -150,7 +150,7 @@ void ovl_path_upper(struct dentry *dentry, struct path *path)
{
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
- path->mnt = ofs->upper_mnt;
+ path->mnt = ovl_upper_mnt(ofs);
path->dentry = ovl_dentry_upper(dentry);
}
@@ -459,7 +459,32 @@ bool ovl_is_whiteout(struct dentry *dentry)
struct file *ovl_path_open(struct path *path, int flags)
{
- return dentry_open(path, flags | O_NOATIME, current_cred());
+ struct inode *inode = d_inode(path->dentry);
+ int err, acc_mode;
+
+ if (flags & ~(O_ACCMODE | O_LARGEFILE))
+ BUG();
+
+ switch (flags & O_ACCMODE) {
+ case O_RDONLY:
+ acc_mode = MAY_READ;
+ break;
+ case O_WRONLY:
+ acc_mode = MAY_WRITE;
+ break;
+ default:
+ BUG();
+ }
+
+ err = inode_permission(inode, acc_mode | MAY_OPEN);
+ if (err)
+ return ERR_PTR(err);
+
+ /* O_NOATIME is an optimization, don't fail if not permitted */
+ if (inode_owner_or_capable(inode))
+ flags |= O_NOATIME;
+
+ return dentry_open(path, flags, current_cred());
}
/* Caller should hold ovl_inode->lock */
@@ -707,7 +732,8 @@ static void ovl_cleanup_index(struct dentry *dentry)
index = NULL;
} else if (ovl_index_all(dentry->d_sb)) {
/* Whiteout orphan index to block future open by handle */
- err = ovl_cleanup_and_whiteout(indexdir, dir, index);
+ err = ovl_cleanup_and_whiteout(OVL_FS(dentry->d_sb),
+ dir, index);
} else {
/* Cleanup orphan index entries */
err = ovl_cleanup(dir, index);
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 249672bf54fe..95882b3f5f62 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -350,7 +350,7 @@ posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want)
const struct posix_acl_entry *pa, *pe, *mask_obj;
int found = 0;
- want &= MAY_READ | MAY_WRITE | MAY_EXEC | MAY_NOT_BLOCK;
+ want &= MAY_READ | MAY_WRITE | MAY_EXEC;
FOREACH_ACL_ENTRY(pa, acl, pe) {
switch(pa->e_tag) {
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 8e16f14bb05a..55ecbeb3a721 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -92,7 +92,6 @@
#include <linux/user_namespace.h>
#include <linux/fs_struct.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include "internal.h"
@@ -248,8 +247,8 @@ void render_sigset_t(struct seq_file *m, const char *header,
seq_putc(m, '\n');
}
-static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
- sigset_t *catch)
+static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *sigign,
+ sigset_t *sigcatch)
{
struct k_sigaction *k;
int i;
@@ -257,9 +256,9 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
k = p->sighand->action;
for (i = 1; i <= _NSIG; ++i, ++k) {
if (k->sa.sa_handler == SIG_IGN)
- sigaddset(ign, i);
+ sigaddset(sigign, i);
else if (k->sa.sa_handler != SIG_DFL)
- sigaddset(catch, i);
+ sigaddset(sigcatch, i);
}
}
@@ -728,7 +727,7 @@ static int children_seq_show(struct seq_file *seq, void *v)
{
struct inode *inode = file_inode(seq->file);
- seq_printf(seq, "%d ", pid_nr_ns(v, proc_pid_ns(inode)));
+ seq_printf(seq, "%d ", pid_nr_ns(v, proc_pid_ns(inode->i_sb)));
return 0;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index eb2255e95f62..d86c0afc8a85 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -697,13 +697,21 @@ int proc_setattr(struct dentry *dentry, struct iattr *attr)
* May current process learn task's sched/cmdline info (for hide_pid_min=1)
* or euid/egid (for hide_pid_min=2)?
*/
-static bool has_pid_permissions(struct pid_namespace *pid,
+static bool has_pid_permissions(struct proc_fs_info *fs_info,
struct task_struct *task,
- int hide_pid_min)
+ enum proc_hidepid hide_pid_min)
{
- if (pid->hide_pid < hide_pid_min)
+ /*
+ * If 'hidpid' mount option is set force a ptrace check,
+ * we indicate that we are using a filesystem syscall
+ * by passing PTRACE_MODE_READ_FSCREDS
+ */
+ if (fs_info->hide_pid == HIDEPID_NOT_PTRACEABLE)
+ return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
+
+ if (fs_info->hide_pid < hide_pid_min)
return true;
- if (in_group_p(pid->pid_gid))
+ if (in_group_p(fs_info->pid_gid))
return true;
return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
}
@@ -711,18 +719,18 @@ static bool has_pid_permissions(struct pid_namespace *pid,
static int proc_pid_permission(struct inode *inode, int mask)
{
- struct pid_namespace *pid = proc_pid_ns(inode);
+ struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
struct task_struct *task;
bool has_perms;
task = get_proc_task(inode);
if (!task)
return -ESRCH;
- has_perms = has_pid_permissions(pid, task, HIDEPID_NO_ACCESS);
+ has_perms = has_pid_permissions(fs_info, task, HIDEPID_NO_ACCESS);
put_task_struct(task);
if (!has_perms) {
- if (pid->hide_pid == HIDEPID_INVISIBLE) {
+ if (fs_info->hide_pid == HIDEPID_INVISIBLE) {
/*
* Let's make getdents(), stat(), and open()
* consistent with each other. If a process
@@ -746,7 +754,7 @@ static const struct inode_operations proc_def_inode_operations = {
static int proc_single_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
- struct pid_namespace *ns = proc_pid_ns(inode);
+ struct pid_namespace *ns = proc_pid_ns(inode->i_sb);
struct pid *pid = proc_pid(inode);
struct task_struct *task;
int ret;
@@ -1415,7 +1423,7 @@ static const struct file_operations proc_fail_nth_operations = {
static int sched_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
- struct pid_namespace *ns = proc_pid_ns(inode);
+ struct pid_namespace *ns = proc_pid_ns(inode->i_sb);
struct task_struct *p;
p = get_proc_task(inode);
@@ -1909,7 +1917,7 @@ int pid_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
- struct pid_namespace *pid = proc_pid_ns(inode);
+ struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
struct task_struct *task;
generic_fillattr(inode, stat);
@@ -1919,7 +1927,7 @@ int pid_getattr(const struct path *path, struct kstat *stat,
rcu_read_lock();
task = pid_task(proc_pid(inode), PIDTYPE_PID);
if (task) {
- if (!has_pid_permissions(pid, task, HIDEPID_INVISIBLE)) {
+ if (!has_pid_permissions(fs_info, task, HIDEPID_INVISIBLE)) {
rcu_read_unlock();
/*
* This doesn't prevent learning whether PID exists,
@@ -2104,11 +2112,11 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out;
if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
- status = down_read_killable(&mm->mmap_sem);
+ status = mmap_read_lock_killable(mm);
if (!status) {
exact_vma_exists = !!find_exact_vma(mm, vm_start,
vm_end);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
}
@@ -2155,7 +2163,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
if (rc)
goto out_mmput;
- rc = down_read_killable(&mm->mmap_sem);
+ rc = mmap_read_lock_killable(mm);
if (rc)
goto out_mmput;
@@ -2166,7 +2174,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
path_get(path);
rc = 0;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_mmput:
mmput(mm);
@@ -2256,7 +2264,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
goto out_put_task;
result = ERR_PTR(-EINTR);
- if (down_read_killable(&mm->mmap_sem))
+ if (mmap_read_lock_killable(mm))
goto out_put_mm;
result = ERR_PTR(-ENOENT);
@@ -2269,7 +2277,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
(void *)(unsigned long)vma->vm_file->f_mode);
out_no_vma:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_put_mm:
mmput(mm);
out_put_task:
@@ -2314,7 +2322,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
if (!mm)
goto out_put_task;
- ret = down_read_killable(&mm->mmap_sem);
+ ret = mmap_read_lock_killable(mm);
if (ret) {
mmput(mm);
goto out_put_task;
@@ -2325,11 +2333,11 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
/*
* We need two passes here:
*
- * 1) Collect vmas of mapped files with mmap_sem taken
- * 2) Release mmap_sem and instantiate entries
+ * 1) Collect vmas of mapped files with mmap_lock taken
+ * 2) Release mmap_lock and instantiate entries
*
* otherwise we get lockdep complained, since filldir()
- * routine might require mmap_sem taken in might_fault().
+ * routine might require mmap_lock taken in might_fault().
*/
for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
@@ -2341,7 +2349,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
goto out_put_task;
}
@@ -2350,7 +2358,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
p->end = vma->vm_end;
p->mode = vma->vm_file->f_mode;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
for (i = 0; i < nr_files; i++) {
@@ -2470,7 +2478,7 @@ static int proc_timers_open(struct inode *inode, struct file *file)
return -ENOMEM;
tp->pid = proc_pid(inode);
- tp->ns = proc_pid_ns(inode);
+ tp->ns = proc_pid_ns(inode->i_sb);
return 0;
}
@@ -2770,6 +2778,15 @@ static const struct pid_entry smack_attr_dir_stuff[] = {
LSM_DIR_OPS(smack);
#endif
+#ifdef CONFIG_SECURITY_APPARMOR
+static const struct pid_entry apparmor_attr_dir_stuff[] = {
+ ATTR("apparmor", "current", 0666),
+ ATTR("apparmor", "prev", 0444),
+ ATTR("apparmor", "exec", 0666),
+};
+LSM_DIR_OPS(apparmor);
+#endif
+
static const struct pid_entry attr_dir_stuff[] = {
ATTR(NULL, "current", 0666),
ATTR(NULL, "prev", 0444),
@@ -2781,6 +2798,10 @@ static const struct pid_entry attr_dir_stuff[] = {
DIR("smack", 0555,
proc_smack_attr_dir_inode_ops, proc_smack_attr_dir_ops),
#endif
+#ifdef CONFIG_SECURITY_APPARMOR
+ DIR("apparmor", 0555,
+ proc_apparmor_attr_dir_inode_ops, proc_apparmor_attr_dir_ops),
+#endif
};
static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx)
@@ -3312,6 +3333,7 @@ struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags)
{
struct task_struct *task;
unsigned tgid;
+ struct proc_fs_info *fs_info;
struct pid_namespace *ns;
struct dentry *result = ERR_PTR(-ENOENT);
@@ -3319,7 +3341,8 @@ struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags)
if (tgid == ~0U)
goto out;
- ns = dentry->d_sb->s_fs_info;
+ fs_info = proc_sb_info(dentry->d_sb);
+ ns = fs_info->pid_ns;
rcu_read_lock();
task = find_task_by_pid_ns(tgid, ns);
if (task)
@@ -3328,7 +3351,14 @@ struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags)
if (!task)
goto out;
+ /* Limit procfs to only ptraceable tasks */
+ if (fs_info->hide_pid == HIDEPID_NOT_PTRACEABLE) {
+ if (!has_pid_permissions(fs_info, task, HIDEPID_NO_ACCESS))
+ goto out_put_task;
+ }
+
result = proc_pid_instantiate(dentry, task, NULL);
+out_put_task:
put_task_struct(task);
out:
return result;
@@ -3354,20 +3384,8 @@ retry:
pid = find_ge_pid(iter.tgid, ns);
if (pid) {
iter.tgid = pid_nr_ns(pid, ns);
- iter.task = pid_task(pid, PIDTYPE_PID);
- /* What we to know is if the pid we have find is the
- * pid of a thread_group_leader. Testing for task
- * being a thread_group_leader is the obvious thing
- * todo but there is a window when it fails, due to
- * the pid transfer logic in de_thread.
- *
- * So we perform the straight forward test of seeing
- * if the pid we have found is the pid of a thread
- * group leader, and don't worry if the task we have
- * found doesn't happen to be a thread group leader.
- * As we don't care in the case of readdir.
- */
- if (!iter.task || !has_group_leader_pid(iter.task)) {
+ iter.task = pid_task(pid, PIDTYPE_TGID);
+ if (!iter.task) {
iter.tgid += 1;
goto retry;
}
@@ -3383,20 +3401,21 @@ retry:
int proc_pid_readdir(struct file *file, struct dir_context *ctx)
{
struct tgid_iter iter;
- struct pid_namespace *ns = proc_pid_ns(file_inode(file));
+ struct proc_fs_info *fs_info = proc_sb_info(file_inode(file)->i_sb);
+ struct pid_namespace *ns = proc_pid_ns(file_inode(file)->i_sb);
loff_t pos = ctx->pos;
if (pos >= PID_MAX_LIMIT + TGID_OFFSET)
return 0;
if (pos == TGID_OFFSET - 2) {
- struct inode *inode = d_inode(ns->proc_self);
+ struct inode *inode = d_inode(fs_info->proc_self);
if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK))
return 0;
ctx->pos = pos = pos + 1;
}
if (pos == TGID_OFFSET - 1) {
- struct inode *inode = d_inode(ns->proc_thread_self);
+ struct inode *inode = d_inode(fs_info->proc_thread_self);
if (!dir_emit(ctx, "thread-self", 11, inode->i_ino, DT_LNK))
return 0;
ctx->pos = pos = pos + 1;
@@ -3410,7 +3429,7 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
unsigned int len;
cond_resched();
- if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE))
+ if (!has_pid_permissions(fs_info, iter.task, HIDEPID_INVISIBLE))
continue;
len = snprintf(name, sizeof(name), "%u", iter.tgid);
@@ -3610,6 +3629,7 @@ static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry
struct task_struct *task;
struct task_struct *leader = get_proc_task(dir);
unsigned tid;
+ struct proc_fs_info *fs_info;
struct pid_namespace *ns;
struct dentry *result = ERR_PTR(-ENOENT);
@@ -3620,7 +3640,8 @@ static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry
if (tid == ~0U)
goto out;
- ns = dentry->d_sb->s_fs_info;
+ fs_info = proc_sb_info(dentry->d_sb);
+ ns = fs_info->pid_ns;
rcu_read_lock();
task = find_task_by_pid_ns(tid, ns);
if (task)
@@ -3734,7 +3755,7 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx)
/* f_version caches the tgid value that the last readdir call couldn't
* return. lseek aka telldir automagically resets f_version to 0.
*/
- ns = proc_pid_ns(inode);
+ ns = proc_pid_ns(inode->i_sb);
tid = (int)file->f_version;
file->f_version = 0;
for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 4ed6dabdf6ff..2f9fa179194d 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -269,6 +269,11 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
+ struct proc_fs_info *fs_info = proc_sb_info(dir->i_sb);
+
+ if (fs_info->pidonly == PROC_PIDONLY_ON)
+ return ERR_PTR(-ENOENT);
+
return proc_lookup_de(dir, dentry, PDE(dir));
}
@@ -325,6 +330,10 @@ int proc_readdir_de(struct file *file, struct dir_context *ctx,
int proc_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
+ struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
+
+ if (fs_info->pidonly == PROC_PIDONLY_ON)
+ return 1;
return proc_readdir_de(file, ctx, PDE(inode));
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index fb4cace9ea41..f40c2532c057 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -24,6 +24,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/mount.h>
+#include <linux/bug.h>
#include <linux/uaccess.h>
@@ -165,15 +166,28 @@ void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock
deactivate_super(old_sb);
}
+static inline const char *hidepid2str(enum proc_hidepid v)
+{
+ switch (v) {
+ case HIDEPID_OFF: return "off";
+ case HIDEPID_NO_ACCESS: return "noaccess";
+ case HIDEPID_INVISIBLE: return "invisible";
+ case HIDEPID_NOT_PTRACEABLE: return "ptraceable";
+ }
+ WARN_ONCE(1, "bad hide_pid value: %d\n", v);
+ return "unknown";
+}
+
static int proc_show_options(struct seq_file *seq, struct dentry *root)
{
- struct super_block *sb = root->d_sb;
- struct pid_namespace *pid = sb->s_fs_info;
+ struct proc_fs_info *fs_info = proc_sb_info(root->d_sb);
- if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID))
- seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid));
- if (pid->hide_pid != HIDEPID_OFF)
- seq_printf(seq, ",hidepid=%u", pid->hide_pid);
+ if (!gid_eq(fs_info->pid_gid, GLOBAL_ROOT_GID))
+ seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, fs_info->pid_gid));
+ if (fs_info->hide_pid != HIDEPID_OFF)
+ seq_printf(seq, ",hidepid=%s", hidepid2str(fs_info->hide_pid));
+ if (fs_info->pidonly != PROC_PIDONLY_OFF)
+ seq_printf(seq, ",subset=pid");
return 0;
}
@@ -464,6 +478,7 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
static int proc_reg_open(struct inode *inode, struct file *file)
{
+ struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
struct proc_dir_entry *pde = PDE(inode);
int rv = 0;
typeof_member(struct proc_ops, proc_open) open;
@@ -477,6 +492,9 @@ static int proc_reg_open(struct inode *inode, struct file *file)
return rv;
}
+ if (fs_info->pidonly == PROC_PIDONLY_ON)
+ return -ENOENT;
+
/*
* Ensure that
* 1) PDE's ->release hook will be called no matter what
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index ecc63ce01be7..e9a6841fc25b 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -17,7 +17,6 @@
#include <linux/cma.h>
#endif
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "internal.h"
void __attribute__((weak)) arch_report_meminfo(struct seq_file *m)
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 14c2badb8fd9..13452b32e2bd 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -22,7 +22,6 @@
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/div64.h>
#include "internal.h"
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index df2143e05c57..42c5128c7d1c 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/bpf-cgroup.h>
+#include <linux/mount.h>
#include "internal.h"
static const struct dentry_operations proc_sys_dentry_operations;
@@ -564,6 +565,10 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
if (!table->proc_handler)
goto out;
+ /* don't even try if the size is too large */
+ if (count > KMALLOC_MAX_SIZE)
+ return -ENOMEM;
+
if (write) {
kbuf = memdup_user_nul(ubuf, count);
if (IS_ERR(kbuf)) {
@@ -1703,3 +1708,147 @@ int __init proc_sys_init(void)
return sysctl_init();
}
+
+struct sysctl_alias {
+ const char *kernel_param;
+ const char *sysctl_param;
+};
+
+/*
+ * Historically some settings had both sysctl and a command line parameter.
+ * With the generic sysctl. parameter support, we can handle them at a single
+ * place and only keep the historical name for compatibility. This is not meant
+ * to add brand new aliases. When adding existing aliases, consider whether
+ * the possibly different moment of changing the value (e.g. from early_param
+ * to the moment do_sysctl_args() is called) is an issue for the specific
+ * parameter.
+ */
+static const struct sysctl_alias sysctl_aliases[] = {
+ {"hardlockup_all_cpu_backtrace", "kernel.hardlockup_all_cpu_backtrace" },
+ {"hung_task_panic", "kernel.hung_task_panic" },
+ {"numa_zonelist_order", "vm.numa_zonelist_order" },
+ {"softlockup_all_cpu_backtrace", "kernel.softlockup_all_cpu_backtrace" },
+ {"softlockup_panic", "kernel.softlockup_panic" },
+ { }
+};
+
+static const char *sysctl_find_alias(char *param)
+{
+ const struct sysctl_alias *alias;
+
+ for (alias = &sysctl_aliases[0]; alias->kernel_param != NULL; alias++) {
+ if (strcmp(alias->kernel_param, param) == 0)
+ return alias->sysctl_param;
+ }
+
+ return NULL;
+}
+
+/* Set sysctl value passed on kernel command line. */
+static int process_sysctl_arg(char *param, char *val,
+ const char *unused, void *arg)
+{
+ char *path;
+ struct vfsmount **proc_mnt = arg;
+ struct file_system_type *proc_fs_type;
+ struct file *file;
+ int len;
+ int err;
+ loff_t pos = 0;
+ ssize_t wret;
+
+ if (strncmp(param, "sysctl", sizeof("sysctl") - 1) == 0) {
+ param += sizeof("sysctl") - 1;
+
+ if (param[0] != '/' && param[0] != '.')
+ return 0;
+
+ param++;
+ } else {
+ param = (char *) sysctl_find_alias(param);
+ if (!param)
+ return 0;
+ }
+
+ /*
+ * To set sysctl options, we use a temporary mount of proc, look up the
+ * respective sys/ file and write to it. To avoid mounting it when no
+ * options were given, we mount it only when the first sysctl option is
+ * found. Why not a persistent mount? There are problems with a
+ * persistent mount of proc in that it forces userspace not to use any
+ * proc mount options.
+ */
+ if (!*proc_mnt) {
+ proc_fs_type = get_fs_type("proc");
+ if (!proc_fs_type) {
+ pr_err("Failed to find procfs to set sysctl from command line\n");
+ return 0;
+ }
+ *proc_mnt = kern_mount(proc_fs_type);
+ put_filesystem(proc_fs_type);
+ if (IS_ERR(*proc_mnt)) {
+ pr_err("Failed to mount procfs to set sysctl from command line\n");
+ return 0;
+ }
+ }
+
+ path = kasprintf(GFP_KERNEL, "sys/%s", param);
+ if (!path)
+ panic("%s: Failed to allocate path for %s\n", __func__, param);
+ strreplace(path, '.', '/');
+
+ file = file_open_root((*proc_mnt)->mnt_root, *proc_mnt, path, O_WRONLY, 0);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ if (err == -ENOENT)
+ pr_err("Failed to set sysctl parameter '%s=%s': parameter not found\n",
+ param, val);
+ else if (err == -EACCES)
+ pr_err("Failed to set sysctl parameter '%s=%s': permission denied (read-only?)\n",
+ param, val);
+ else
+ pr_err("Error %pe opening proc file to set sysctl parameter '%s=%s'\n",
+ file, param, val);
+ goto out;
+ }
+ len = strlen(val);
+ wret = kernel_write(file, val, len, &pos);
+ if (wret < 0) {
+ err = wret;
+ if (err == -EINVAL)
+ pr_err("Failed to set sysctl parameter '%s=%s': invalid value\n",
+ param, val);
+ else
+ pr_err("Error %pe writing to proc file to set sysctl parameter '%s=%s'\n",
+ ERR_PTR(err), param, val);
+ } else if (wret != len) {
+ pr_err("Wrote only %zd bytes of %d writing to proc file %s to set sysctl parameter '%s=%s\n",
+ wret, len, path, param, val);
+ }
+
+ err = filp_close(file, NULL);
+ if (err)
+ pr_err("Error %pe closing proc file to set sysctl parameter '%s=%s\n",
+ ERR_PTR(err), param, val);
+out:
+ kfree(path);
+ return 0;
+}
+
+void do_sysctl_args(void)
+{
+ char *command_line;
+ struct vfsmount *proc_mnt = NULL;
+
+ command_line = kstrdup(saved_command_line, GFP_KERNEL);
+ if (!command_line)
+ panic("%s: Failed to allocate copy of command line\n", __func__);
+
+ parse_args("Setting sysctl args", command_line,
+ NULL, 0, -1, -1, &proc_mnt, process_sysctl_arg);
+
+ if (proc_mnt)
+ kern_unmount(proc_mnt);
+
+ kfree(command_line);
+}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index cdbe9293ea55..5e444d4f9717 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -32,21 +32,86 @@
struct proc_fs_context {
struct pid_namespace *pid_ns;
unsigned int mask;
- int hidepid;
+ enum proc_hidepid hidepid;
int gid;
+ enum proc_pidonly pidonly;
};
enum proc_param {
Opt_gid,
Opt_hidepid,
+ Opt_subset,
};
static const struct fs_parameter_spec proc_fs_parameters[] = {
fsparam_u32("gid", Opt_gid),
- fsparam_u32("hidepid", Opt_hidepid),
+ fsparam_string("hidepid", Opt_hidepid),
+ fsparam_string("subset", Opt_subset),
{}
};
+static inline int valid_hidepid(unsigned int value)
+{
+ return (value == HIDEPID_OFF ||
+ value == HIDEPID_NO_ACCESS ||
+ value == HIDEPID_INVISIBLE ||
+ value == HIDEPID_NOT_PTRACEABLE);
+}
+
+static int proc_parse_hidepid_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct proc_fs_context *ctx = fc->fs_private;
+ struct fs_parameter_spec hidepid_u32_spec = fsparam_u32("hidepid", Opt_hidepid);
+ struct fs_parse_result result;
+ int base = (unsigned long)hidepid_u32_spec.data;
+
+ if (param->type != fs_value_is_string)
+ return invalf(fc, "proc: unexpected type of hidepid value\n");
+
+ if (!kstrtouint(param->string, base, &result.uint_32)) {
+ if (!valid_hidepid(result.uint_32))
+ return invalf(fc, "proc: unknown value of hidepid - %s\n", param->string);
+ ctx->hidepid = result.uint_32;
+ return 0;
+ }
+
+ if (!strcmp(param->string, "off"))
+ ctx->hidepid = HIDEPID_OFF;
+ else if (!strcmp(param->string, "noaccess"))
+ ctx->hidepid = HIDEPID_NO_ACCESS;
+ else if (!strcmp(param->string, "invisible"))
+ ctx->hidepid = HIDEPID_INVISIBLE;
+ else if (!strcmp(param->string, "ptraceable"))
+ ctx->hidepid = HIDEPID_NOT_PTRACEABLE;
+ else
+ return invalf(fc, "proc: unknown value of hidepid - %s\n", param->string);
+
+ return 0;
+}
+
+static int proc_parse_subset_param(struct fs_context *fc, char *value)
+{
+ struct proc_fs_context *ctx = fc->fs_private;
+
+ while (value) {
+ char *ptr = strchr(value, ',');
+
+ if (ptr != NULL)
+ *ptr++ = '\0';
+
+ if (*value != '\0') {
+ if (!strcmp(value, "pid")) {
+ ctx->pidonly = PROC_PIDONLY_ON;
+ } else {
+ return invalf(fc, "proc: unsupported subset option - %s\n", value);
+ }
+ }
+ value = ptr;
+ }
+
+ return 0;
+}
+
static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct proc_fs_context *ctx = fc->fs_private;
@@ -63,10 +128,13 @@ static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_hidepid:
- ctx->hidepid = result.uint_32;
- if (ctx->hidepid < HIDEPID_OFF ||
- ctx->hidepid > HIDEPID_INVISIBLE)
- return invalfc(fc, "hidepid value must be between 0 and 2.\n");
+ if (proc_parse_hidepid_param(fc, param))
+ return -EINVAL;
+ break;
+
+ case Opt_subset:
+ if (proc_parse_subset_param(fc, param->string) < 0)
+ return -EINVAL;
break;
default:
@@ -77,26 +145,33 @@ static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param)
return 0;
}
-static void proc_apply_options(struct super_block *s,
+static void proc_apply_options(struct proc_fs_info *fs_info,
struct fs_context *fc,
- struct pid_namespace *pid_ns,
struct user_namespace *user_ns)
{
struct proc_fs_context *ctx = fc->fs_private;
if (ctx->mask & (1 << Opt_gid))
- pid_ns->pid_gid = make_kgid(user_ns, ctx->gid);
+ fs_info->pid_gid = make_kgid(user_ns, ctx->gid);
if (ctx->mask & (1 << Opt_hidepid))
- pid_ns->hide_pid = ctx->hidepid;
+ fs_info->hide_pid = ctx->hidepid;
+ if (ctx->mask & (1 << Opt_subset))
+ fs_info->pidonly = ctx->pidonly;
}
static int proc_fill_super(struct super_block *s, struct fs_context *fc)
{
- struct pid_namespace *pid_ns = get_pid_ns(s->s_fs_info);
+ struct proc_fs_context *ctx = fc->fs_private;
struct inode *root_inode;
+ struct proc_fs_info *fs_info;
int ret;
- proc_apply_options(s, fc, pid_ns, current_user_ns());
+ fs_info = kzalloc(sizeof(*fs_info), GFP_KERNEL);
+ if (!fs_info)
+ return -ENOMEM;
+
+ fs_info->pid_ns = get_pid_ns(ctx->pid_ns);
+ proc_apply_options(fs_info, fc, current_user_ns());
/* User space would break if executables or devices appear on proc */
s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
@@ -106,6 +181,7 @@ static int proc_fill_super(struct super_block *s, struct fs_context *fc)
s->s_magic = PROC_SUPER_MAGIC;
s->s_op = &proc_sops;
s->s_time_gran = 1;
+ s->s_fs_info = fs_info;
/*
* procfs isn't actually a stacking filesystem; however, there is
@@ -113,7 +189,7 @@ static int proc_fill_super(struct super_block *s, struct fs_context *fc)
* top of it
*/
s->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
-
+
/* procfs dentries and inodes don't require IO to create */
s->s_shrink.seeks = 0;
@@ -140,19 +216,17 @@ static int proc_fill_super(struct super_block *s, struct fs_context *fc)
static int proc_reconfigure(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
- struct pid_namespace *pid = sb->s_fs_info;
+ struct proc_fs_info *fs_info = proc_sb_info(sb);
sync_filesystem(sb);
- proc_apply_options(sb, fc, pid, current_user_ns());
+ proc_apply_options(fs_info, fc, current_user_ns());
return 0;
}
static int proc_get_tree(struct fs_context *fc)
{
- struct proc_fs_context *ctx = fc->fs_private;
-
- return get_tree_keyed(fc, proc_fill_super, ctx->pid_ns);
+ return get_tree_nodev(fc, proc_fill_super);
}
static void proc_fs_context_free(struct fs_context *fc)
@@ -188,22 +262,19 @@ static int proc_init_fs_context(struct fs_context *fc)
static void proc_kill_sb(struct super_block *sb)
{
- struct pid_namespace *ns;
+ struct proc_fs_info *fs_info = proc_sb_info(sb);
- ns = (struct pid_namespace *)sb->s_fs_info;
- if (ns->proc_self)
- dput(ns->proc_self);
- if (ns->proc_thread_self)
- dput(ns->proc_thread_self);
- kill_anon_super(sb);
+ if (!fs_info) {
+ kill_anon_super(sb);
+ return;
+ }
- /* Make the pid namespace safe for the next mount of proc */
- ns->proc_self = NULL;
- ns->proc_thread_self = NULL;
- ns->pid_gid = GLOBAL_ROOT_GID;
- ns->hide_pid = 0;
+ dput(fs_info->proc_self);
+ dput(fs_info->proc_thread_self);
- put_pid_ns(ns);
+ kill_anon_super(sb);
+ put_pid_ns(fs_info->pid_ns);
+ kfree(fs_info);
}
static struct file_system_type proc_fs_type = {
diff --git a/fs/proc/self.c b/fs/proc/self.c
index 57c0a1047250..ca5158fa561c 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -12,7 +12,7 @@ static const char *proc_self_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- struct pid_namespace *ns = proc_pid_ns(inode);
+ struct pid_namespace *ns = proc_pid_ns(inode->i_sb);
pid_t tgid = task_tgid_nr_ns(current, ns);
char *name;
@@ -36,10 +36,10 @@ static unsigned self_inum __ro_after_init;
int proc_setup_self(struct super_block *s)
{
struct inode *root_inode = d_inode(s->s_root);
- struct pid_namespace *ns = proc_pid_ns(root_inode);
+ struct proc_fs_info *fs_info = proc_sb_info(s);
struct dentry *self;
int ret = -ENOMEM;
-
+
inode_lock(root_inode);
self = d_alloc_name(s->s_root, "self");
if (self) {
@@ -62,7 +62,7 @@ int proc_setup_self(struct super_block *s)
if (ret)
pr_err("proc_fill_super: can't allocate /proc/self\n");
else
- ns->proc_self = self;
+ fs_info->proc_self = self;
return ret;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 6ad407d5efe2..dbda4499a859 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -145,7 +145,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
return NULL;
}
- if (down_read_killable(&mm->mmap_sem)) {
+ if (mmap_read_lock_killable(mm)) {
mmput(mm);
put_task_struct(priv->task);
priv->task = NULL;
@@ -188,7 +188,7 @@ static void m_stop(struct seq_file *m, void *v)
return;
release_task_mempolicy(priv);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
put_task_struct(priv->task);
priv->task = NULL;
@@ -593,7 +593,7 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (pmd_trans_unstable(pmd))
goto out;
/*
- * The mmap_sem held all the way back in m_start() is what
+ * The mmap_lock held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
* in here.
*/
@@ -752,7 +752,7 @@ static void smap_gather_stats(struct vm_area_struct *vma,
}
}
#endif
- /* mmap_sem is held in m_start */
+ /* mmap_lock is held in m_start */
walk_page_vma(vma, &smaps_walk_ops, mss);
}
@@ -847,7 +847,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
memset(&mss, 0, sizeof(mss));
- ret = down_read_killable(&mm->mmap_sem);
+ ret = mmap_read_lock_killable(mm);
if (ret)
goto out_put_mm;
@@ -866,7 +866,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
__show_smap(m, &mss, true);
release_task_mempolicy(priv);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_put_mm:
mmput(mm);
@@ -1140,7 +1140,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
};
if (type == CLEAR_REFS_MM_HIWATER_RSS) {
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mmap_write_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
@@ -1150,11 +1150,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
* resident set size to this mm's current rss value.
*/
reset_mm_hiwater_rss(mm);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
goto out_mm;
}
- if (down_read_killable(&mm->mmap_sem)) {
+ if (mmap_read_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
@@ -1163,8 +1163,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
- up_read(&mm->mmap_sem);
- if (down_write_killable(&mm->mmap_sem)) {
+ mmap_read_unlock(mm);
+ if (mmap_write_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
@@ -1183,14 +1183,14 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
* failed like if
* get_proc_task() fails?
*/
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
goto out_mm;
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma->vm_flags &= ~VM_SOFTDIRTY;
vma_set_page_prot(vma);
}
- downgrade_write(&mm->mmap_sem);
+ mmap_write_downgrade(mm);
break;
}
@@ -1203,7 +1203,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb, 0, -1);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_mm:
mmput(mm);
}
@@ -1564,11 +1564,11 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
/* overflow ? */
if (end < start_vaddr || end > end_vaddr)
end = end_vaddr;
- ret = down_read_killable(&mm->mmap_sem);
+ ret = mmap_read_lock_killable(mm);
if (ret)
goto out_free;
ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
start_vaddr = end;
len = min(count, PM_ENTRY_BYTES * pm.pos);
@@ -1827,7 +1827,7 @@ static int show_numa_map(struct seq_file *m, void *v)
if (is_vm_hugetlb_page(vma))
seq_puts(m, " huge");
- /* mmap_sem is held by m_start */
+ /* mmap_lock is held by m_start */
walk_page_vma(vma, &show_numa_ops, md);
if (!md->pages)
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 7907e6419e57..a6d21fc0033c 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -25,7 +25,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
struct rb_node *p;
unsigned long bytes = 0, sbytes = 0, slack = 0, size;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb);
@@ -77,7 +77,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
"Shared:\t%8lu bytes\n",
bytes, slack, sbytes);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
unsigned long task_vsize(struct mm_struct *mm)
@@ -86,12 +86,12 @@ unsigned long task_vsize(struct mm_struct *mm)
struct rb_node *p;
unsigned long vsize = 0;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb);
vsize += vma->vm_end - vma->vm_start;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return vsize;
}
@@ -104,7 +104,7 @@ unsigned long task_statm(struct mm_struct *mm,
struct rb_node *p;
unsigned long size = kobjsize(mm);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb);
size += kobjsize(vma);
@@ -119,7 +119,7 @@ unsigned long task_statm(struct mm_struct *mm,
>> PAGE_SHIFT;
*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
>> PAGE_SHIFT;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
size >>= PAGE_SHIFT;
size += *text + *data;
*resident = size;
@@ -211,7 +211,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (!mm || !mmget_not_zero(mm))
return NULL;
- if (down_read_killable(&mm->mmap_sem)) {
+ if (mmap_read_lock_killable(mm)) {
mmput(mm);
return ERR_PTR(-EINTR);
}
@@ -221,7 +221,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (n-- == 0)
return p;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
return NULL;
}
@@ -231,7 +231,7 @@ static void m_stop(struct seq_file *m, void *_vml)
struct proc_maps_private *priv = m->private;
if (!IS_ERR_OR_NULL(_vml)) {
- up_read(&priv->mm->mmap_sem);
+ mmap_read_unlock(priv->mm);
mmput(priv->mm);
}
if (priv->task) {
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
index f61ae53533f5..ac284f409568 100644
--- a/fs/proc/thread_self.c
+++ b/fs/proc/thread_self.c
@@ -12,7 +12,7 @@ static const char *proc_thread_self_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- struct pid_namespace *ns = proc_pid_ns(inode);
+ struct pid_namespace *ns = proc_pid_ns(inode->i_sb);
pid_t tgid = task_tgid_nr_ns(current, ns);
pid_t pid = task_pid_nr_ns(current, ns);
char *name;
@@ -36,7 +36,7 @@ static unsigned thread_self_inum __ro_after_init;
int proc_setup_thread_self(struct super_block *s)
{
struct inode *root_inode = d_inode(s->s_root);
- struct pid_namespace *ns = proc_pid_ns(root_inode);
+ struct proc_fs_info *fs_info = proc_sb_info(s);
struct dentry *thread_self;
int ret = -ENOMEM;
@@ -60,9 +60,9 @@ int proc_setup_thread_self(struct super_block *s)
inode_unlock(root_inode);
if (ret)
- pr_err("proc_fill_super: can't allocate /proc/thread_self\n");
+ pr_err("proc_fill_super: can't allocate /proc/thread-self\n");
else
- ns->proc_thread_self = thread_self;
+ fs_info->proc_thread_self = thread_self;
return ret;
}
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index c663202da8de..c3a345c28a93 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -27,7 +27,6 @@
#include <linux/pagemap.h>
#include <linux/uaccess.h>
#include <linux/mem_encrypt.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include "internal.h"
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index e4d70c0dffe9..3059a9394c2d 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -37,23 +37,23 @@ static __poll_t mounts_poll(struct file *file, poll_table *wait)
return res;
}
-struct proc_fs_info {
+struct proc_fs_opts {
int flag;
const char *str;
};
static int show_sb_opts(struct seq_file *m, struct super_block *sb)
{
- static const struct proc_fs_info fs_info[] = {
+ static const struct proc_fs_opts fs_opts[] = {
{ SB_SYNCHRONOUS, ",sync" },
{ SB_DIRSYNC, ",dirsync" },
{ SB_MANDLOCK, ",mand" },
{ SB_LAZYTIME, ",lazytime" },
{ 0, NULL }
};
- const struct proc_fs_info *fs_infop;
+ const struct proc_fs_opts *fs_infop;
- for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
+ for (fs_infop = fs_opts; fs_infop->flag; fs_infop++) {
if (sb->s_flags & fs_infop->flag)
seq_puts(m, fs_infop->str);
}
@@ -63,7 +63,7 @@ static int show_sb_opts(struct seq_file *m, struct super_block *sb)
static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
{
- static const struct proc_fs_info mnt_info[] = {
+ static const struct proc_fs_opts mnt_opts[] = {
{ MNT_NOSUID, ",nosuid" },
{ MNT_NODEV, ",nodev" },
{ MNT_NOEXEC, ",noexec" },
@@ -72,9 +72,9 @@ static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
{ MNT_RELATIME, ",relatime" },
{ 0, NULL }
};
- const struct proc_fs_info *fs_infop;
+ const struct proc_fs_opts *fs_infop;
- for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
+ for (fs_infop = mnt_opts; fs_infop->flag; fs_infop++) {
if (mnt->mnt_flags & fs_infop->flag)
seq_puts(m, fs_infop->str);
}
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 0031070b3692..1509775da040 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1066,7 +1066,7 @@ research:
} else {
/* paste hole to the indirect item */
/*
- * If kmalloc failed, max_to_insert becomes
+ * If kcalloc failed, max_to_insert becomes
* zero and it means we only have space for
* one block
*/
diff --git a/fs/select.c b/fs/select.c
index 11d0285d46b7..7aef49552d4c 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -766,22 +766,38 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
* which has a pointer to the sigset_t itself followed by a size_t containing
* the sigset size.
*/
+struct sigset_argpack {
+ sigset_t __user *p;
+ size_t size;
+};
+
+static inline int get_sigset_argpack(struct sigset_argpack *to,
+ struct sigset_argpack __user *from)
+{
+ // the path is hot enough for overhead of copy_from_user() to matter
+ if (from) {
+ if (!user_read_access_begin(from, sizeof(*from)))
+ return -EFAULT;
+ unsafe_get_user(to->p, &from->p, Efault);
+ unsafe_get_user(to->size, &from->size, Efault);
+ user_read_access_end();
+ }
+ return 0;
+Efault:
+ user_access_end();
+ return -EFAULT;
+}
+
SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
fd_set __user *, exp, struct __kernel_timespec __user *, tsp,
void __user *, sig)
{
- size_t sigsetsize = 0;
- sigset_t __user *up = NULL;
-
- if (sig) {
- if (!access_ok(sig, sizeof(void *)+sizeof(size_t))
- || __get_user(up, (sigset_t __user * __user *)sig)
- || __get_user(sigsetsize,
- (size_t __user *)(sig+sizeof(void *))))
- return -EFAULT;
- }
+ struct sigset_argpack x = {NULL, 0};
+
+ if (get_sigset_argpack(&x, sig))
+ return -EFAULT;
- return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_TIMESPEC);
+ return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_TIMESPEC);
}
#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
@@ -790,18 +806,12 @@ SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *,
fd_set __user *, exp, struct old_timespec32 __user *, tsp,
void __user *, sig)
{
- size_t sigsetsize = 0;
- sigset_t __user *up = NULL;
-
- if (sig) {
- if (!access_ok(sig, sizeof(void *)+sizeof(size_t))
- || __get_user(up, (sigset_t __user * __user *)sig)
- || __get_user(sigsetsize,
- (size_t __user *)(sig+sizeof(void *))))
- return -EFAULT;
- }
+ struct sigset_argpack x = {NULL, 0};
+
+ if (get_sigset_argpack(&x, sig))
+ return -EFAULT;
- return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_OLD_TIMESPEC);
+ return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_OLD_TIMESPEC);
}
#endif
@@ -1325,24 +1335,37 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
return poll_select_finish(&end_time, tsp, type, ret);
}
+struct compat_sigset_argpack {
+ compat_uptr_t p;
+ compat_size_t size;
+};
+static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to,
+ struct compat_sigset_argpack __user *from)
+{
+ if (from) {
+ if (!user_read_access_begin(from, sizeof(*from)))
+ return -EFAULT;
+ unsafe_get_user(to->p, &from->p, Efault);
+ unsafe_get_user(to->size, &from->size, Efault);
+ user_read_access_end();
+ }
+ return 0;
+Efault:
+ user_access_end();
+ return -EFAULT;
+}
+
COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
struct __kernel_timespec __user *, tsp, void __user *, sig)
{
- compat_size_t sigsetsize = 0;
- compat_uptr_t up = 0;
-
- if (sig) {
- if (!access_ok(sig,
- sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
- __get_user(up, (compat_uptr_t __user *)sig) ||
- __get_user(sigsetsize,
- (compat_size_t __user *)(sig+sizeof(up))))
- return -EFAULT;
- }
+ struct compat_sigset_argpack x = {0, 0};
+
+ if (get_compat_sigset_argpack(&x, sig))
+ return -EFAULT;
- return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
- sigsetsize, PT_TIMESPEC);
+ return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
+ x.size, PT_TIMESPEC);
}
#if defined(CONFIG_COMPAT_32BIT_TIME)
@@ -1351,20 +1374,13 @@ COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
struct old_timespec32 __user *, tsp, void __user *, sig)
{
- compat_size_t sigsetsize = 0;
- compat_uptr_t up = 0;
-
- if (sig) {
- if (!access_ok(sig,
- sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
- __get_user(up, (compat_uptr_t __user *)sig) ||
- __get_user(sigsetsize,
- (compat_size_t __user *)(sig+sizeof(up))))
- return -EFAULT;
- }
+ struct compat_sigset_argpack x = {0, 0};
+
+ if (get_compat_sigset_argpack(&x, sig))
+ return -EFAULT;
- return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
- sigsetsize, PT_OLD_TIMESPEC);
+ return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
+ x.size, PT_OLD_TIMESPEC);
}
#endif
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 70f5fdf99bf6..4e6239f33c06 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -6,6 +6,8 @@
* initial implementation -- AV, Oct 2001.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cache.h>
#include <linux/fs.h>
#include <linux/export.h>
@@ -233,9 +235,8 @@ Fill:
p = m->op->next(m, p, &m->index);
if (pos == m->index) {
- pr_info_ratelimited("buggy seq_file .next function %ps "
- "did not updated position index\n",
- m->op->next);
+ pr_info_ratelimited("buggy .next function %ps did not update position index\n",
+ m->op->next);
m->index++;
}
if (!p || IS_ERR(p)) {
diff --git a/fs/super.c b/fs/super.c
index bf3b7685b52a..904459b35119 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -361,7 +361,7 @@ EXPORT_SYMBOL(deactivate_locked_super);
*/
void deactivate_super(struct super_block *s)
{
- if (!atomic_add_unless(&s->s_active, -1, 1)) {
+ if (!atomic_add_unless(&s->s_active, -1, 1)) {
down_write(&s->s_umount);
deactivate_locked_super(s);
}
diff --git a/fs/sync.c b/fs/sync.c
index c6f6f5be5682..1373a610dc78 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -76,7 +76,8 @@ static void sync_inodes_one_sb(struct super_block *sb, void *arg)
static void sync_fs_one_sb(struct super_block *sb, void *arg)
{
- if (!sb_rdonly(sb) && sb->s_op->sync_fs)
+ if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) &&
+ sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, *(int *)arg);
}
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index f275fcda62fb..eb6897ab78e7 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -492,6 +492,7 @@ bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr)
kernfs_put(kn);
return ret;
}
+EXPORT_SYMBOL_GPL(sysfs_remove_file_self);
void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *ptr)
{
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index e39fdec8a0b0..52de29000c7e 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -234,7 +234,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
pte_t *ptep, pte;
bool ret = true;
- VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ mmap_assert_locked(mm);
ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
@@ -286,7 +286,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
pte_t *pte;
bool ret = true;
- VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ mmap_assert_locked(mm);
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
@@ -369,13 +369,13 @@ static inline bool userfaultfd_signal_pending(unsigned int flags)
* FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
* recommendation in __lock_page_or_retry is not an understatement.
*
- * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
+ * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
* before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
* not set.
*
* If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
* set, VM_FAULT_RETRY can still be returned if and only if there are
- * fatal_signal_pending()s, and the mmap_sem must be released before
+ * fatal_signal_pending()s, and the mmap_lock must be released before
* returning it.
*/
vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
@@ -396,16 +396,16 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
* FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
* the no_page_table() helper in follow_page_mask(), but the
* shmem_vm_ops->fault method is invoked even during
- * coredumping without mmap_sem and it ends up here.
+ * coredumping without mmap_lock and it ends up here.
*/
if (current->flags & (PF_EXITING|PF_DUMPCORE))
goto out;
/*
- * Coredumping runs without mmap_sem so we can only check that
- * the mmap_sem is held, if PF_DUMPCORE was not set.
+ * Coredumping runs without mmap_lock so we can only check that
+ * the mmap_lock is held, if PF_DUMPCORE was not set.
*/
- WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+ mmap_assert_locked(mm);
ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
if (!ctx)
@@ -422,7 +422,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
/*
* If it's already released don't get it. This avoids to loop
* in __get_user_pages if userfaultfd_release waits on the
- * caller of handle_userfault to release the mmap_sem.
+ * caller of handle_userfault to release the mmap_lock.
*/
if (unlikely(READ_ONCE(ctx->released))) {
/*
@@ -481,7 +481,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out;
- /* take the reference before dropping the mmap_sem */
+ /* take the reference before dropping the mmap_lock */
userfaultfd_ctx_get(ctx);
init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
@@ -514,7 +514,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
vmf->address,
vmf->flags, reason);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (likely(must_wait && !READ_ONCE(ctx->released) &&
!userfaultfd_signal_pending(vmf->flags))) {
@@ -637,7 +637,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
struct mm_struct *mm = release_new_ctx->mm;
/* the various vma->vm_userfaultfd_ctx still points to it */
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
/* no task can run (and in turn coredump) yet */
VM_WARN_ON(!mmget_still_valid(mm));
for (vma = mm->mmap; vma; vma = vma->vm_next)
@@ -645,7 +645,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
userfaultfd_ctx_put(release_new_ctx);
}
@@ -799,7 +799,7 @@ bool userfaultfd_remove(struct vm_area_struct *vma,
userfaultfd_ctx_get(ctx);
WRITE_ONCE(ctx->mmap_changing, true);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
msg_init(&ewq.msg);
@@ -890,11 +890,11 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
* Flush page faults out of all CPUs. NOTE: all page faults
* must be retried without returning VM_FAULT_SIGBUS if
* userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
- * changes while handle_userfault released the mmap_sem. So
+ * changes while handle_userfault released the mmap_lock. So
* it's critical that released is set to true (above), before
- * taking the mmap_sem for writing.
+ * taking the mmap_lock for writing.
*/
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
still_valid = mmget_still_valid(mm);
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -920,7 +920,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
mmput(mm);
wakeup:
/*
@@ -1248,7 +1248,7 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
/*
* To be sure waitqueue_active() is not reordered by the CPU
* before the pagetable update, use an explicit SMP memory
- * barrier here. PT lock release or up_read(mmap_sem) still
+ * barrier here. PT lock release or mmap_read_unlock(mm) still
* have release semantics that can allow the
* waitqueue_active() to be reordered before the pte update.
*/
@@ -1345,7 +1345,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
if (!mmget_not_zero(mm))
goto out;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
if (!mmget_still_valid(mm))
goto out_unlock;
vma = find_vma_prev(mm, start, &prev);
@@ -1492,7 +1492,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
out_unlock:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
mmput(mm);
if (!ret) {
__u64 ioctls_out;
@@ -1547,7 +1547,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (!mmget_not_zero(mm))
goto out;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
if (!mmget_still_valid(mm))
goto out_unlock;
vma = find_vma_prev(mm, start, &prev);
@@ -1664,7 +1664,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
out_unlock:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
mmput(mm);
out:
return ret;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 403c90309a8f..00db81eac80d 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1173,7 +1173,7 @@ xfs_file_llseek(
* Locking for serialisation of IO during page faults. This results in a lock
* ordering of:
*
- * mmap_sem (MM)
+ * mmap_lock (MM)
* sb_start_pagefault(vfs, freeze)
* i_mmaplock (XFS - truncate serialisation)
* page_lock (MM)
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 64f5f9a440ae..4c91fb25ec66 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -145,17 +145,17 @@ xfs_ilock_attr_map_shared(
*
* i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
*
- * mmap_sem locking order:
+ * mmap_lock locking order:
*
- * i_rwsem -> page lock -> mmap_sem
- * mmap_sem -> i_mmap_lock -> page_lock
+ * i_rwsem -> page lock -> mmap_lock
+ * mmap_lock -> i_mmap_lock -> page_lock
*
- * The difference in mmap_sem locking order mean that we cannot hold the
+ * The difference in mmap_lock locking order mean that we cannot hold the
* i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
- * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
+ * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
* in get_user_pages() to map the user pages into the kernel address space for
* direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
- * page faults already hold the mmap_sem.
+ * page faults already hold the mmap_lock.
*
* Hence to serialise fully against both syscall and mmap based IO, we need to
* take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
@@ -1630,7 +1630,7 @@ xfs_release(
return 0;
/*
* If we can't get the iolock just skip truncating the blocks
- * past EOF because we could deadlock with the mmap_sem
+ * past EOF because we could deadlock with the mmap_lock
* otherwise. We'll get another chance to drop them once the
* last reference to the inode is dropped, so we'll never leak
* blocks permanently.
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index a40f88cf3ab7..a190212ca85d 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1236,64 +1236,26 @@ xfs_ioctl_setattr_xflags(
return 0;
}
-/*
- * If we are changing DAX flags, we have to ensure the file is clean and any
- * cached objects in the address space are invalidated and removed. This
- * requires us to lock out other IO and page faults similar to a truncate
- * operation. The locks need to be held until the transaction has been committed
- * so that the cache invalidation is atomic with respect to the DAX flag
- * manipulation.
- */
-static int
-xfs_ioctl_setattr_dax_invalidate(
+static void
+xfs_ioctl_setattr_prepare_dax(
struct xfs_inode *ip,
- struct fsxattr *fa,
- int *join_flags)
+ struct fsxattr *fa)
{
- struct inode *inode = VFS_I(ip);
- struct super_block *sb = inode->i_sb;
- int error;
-
- *join_flags = 0;
-
- /*
- * It is only valid to set the DAX flag on regular files and
- * directories on filesystems where the block size is equal to the page
- * size. On directories it serves as an inherited hint so we don't
- * have to check the device for dax support or flush pagecache.
- */
- if (fa->fsx_xflags & FS_XFLAG_DAX) {
- struct xfs_buftarg *target = xfs_inode_buftarg(ip);
-
- if (!bdev_dax_supported(target->bt_bdev, sb->s_blocksize))
- return -EINVAL;
- }
-
- /* If the DAX state is not changing, we have nothing to do here. */
- if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode))
- return 0;
- if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode))
- return 0;
+ struct xfs_mount *mp = ip->i_mount;
+ struct inode *inode = VFS_I(ip);
if (S_ISDIR(inode->i_mode))
- return 0;
-
- /* lock, flush and invalidate mapping in preparation for flag change */
- xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
- error = filemap_write_and_wait(inode->i_mapping);
- if (error)
- goto out_unlock;
- error = invalidate_inode_pages2(inode->i_mapping);
- if (error)
- goto out_unlock;
-
- *join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL;
- return 0;
+ return;
-out_unlock:
- xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
- return error;
+ if ((mp->m_flags & XFS_MOUNT_DAX_ALWAYS) ||
+ (mp->m_flags & XFS_MOUNT_DAX_NEVER))
+ return;
+ if (((fa->fsx_xflags & FS_XFLAG_DAX) &&
+ !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) ||
+ (!(fa->fsx_xflags & FS_XFLAG_DAX) &&
+ (ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)))
+ d_mark_dontcache(inode);
}
/*
@@ -1301,17 +1263,10 @@ out_unlock:
* have permission to do so. On success, return a clean transaction and the
* inode locked exclusively ready for further operation specific checks. On
* failure, return an error without modifying or locking the inode.
- *
- * The inode might already be IO locked on call. If this is the case, it is
- * indicated in @join_flags and we take full responsibility for ensuring they
- * are unlocked from now on. Hence if we have an error here, we still have to
- * unlock them. Otherwise, once they are joined to the transaction, they will
- * be unlocked on commit/cancel.
*/
static struct xfs_trans *
xfs_ioctl_setattr_get_trans(
- struct xfs_inode *ip,
- int join_flags)
+ struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
@@ -1328,8 +1283,7 @@ xfs_ioctl_setattr_get_trans(
goto out_unlock;
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags);
- join_flags = 0;
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
/*
* CAP_FOWNER overrides the following restrictions:
@@ -1350,8 +1304,6 @@ xfs_ioctl_setattr_get_trans(
out_cancel:
xfs_trans_cancel(tp);
out_unlock:
- if (join_flags)
- xfs_iunlock(ip, join_flags);
return ERR_PTR(error);
}
@@ -1476,7 +1428,6 @@ xfs_ioctl_setattr(
struct xfs_dquot *pdqp = NULL;
struct xfs_dquot *olddquot = NULL;
int code;
- int join_flags = 0;
trace_xfs_ioctl_setattr(ip);
@@ -1500,18 +1451,9 @@ xfs_ioctl_setattr(
return code;
}
- /*
- * Changing DAX config may require inode locking for mapping
- * invalidation. These need to be held all the way to transaction commit
- * or cancel time, so need to be passed through to
- * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
- * appropriately.
- */
- code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags);
- if (code)
- goto error_free_dquots;
+ xfs_ioctl_setattr_prepare_dax(ip, fa);
- tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
+ tp = xfs_ioctl_setattr_get_trans(ip);
if (IS_ERR(tp)) {
code = PTR_ERR(tp);
goto error_free_dquots;
@@ -1639,7 +1581,6 @@ xfs_ioc_setxflags(
struct fsxattr fa;
struct fsxattr old_fa;
unsigned int flags;
- int join_flags = 0;
int error;
if (copy_from_user(&flags, arg, sizeof(flags)))
@@ -1656,18 +1597,9 @@ xfs_ioc_setxflags(
if (error)
return error;
- /*
- * Changing DAX config may require inode locking for mapping
- * invalidation. These need to be held all the way to transaction commit
- * or cancel time, so need to be passed through to
- * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
- * appropriately.
- */
- error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags);
- if (error)
- goto out_drop_write;
+ xfs_ioctl_setattr_prepare_dax(ip, &fa);
- tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
+ tp = xfs_ioctl_setattr_get_trans(ip);
if (IS_ERR(tp)) {
error = PTR_ERR(tp);
goto out_drop_write;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index d66528fa3657..80a13c8561d8 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -25,13 +25,14 @@
#include <linux/posix_acl.h>
#include <linux/security.h>
#include <linux/iversion.h>
+#include <linux/fiemap.h>
/*
- * Directories have different lock order w.r.t. mmap_sem compared to regular
+ * Directories have different lock order w.r.t. mmap_lock compared to regular
* files. This is due to readdir potentially triggering page faults on a user
* buffer inside filldir(), and this happens with the ilock on the directory
* held. For regular files, the lock order is the other way around - the
- * mmap_sem is taken during the page fault, and then we lock the ilock to do
+ * mmap_lock is taken during the page fault, and then we lock the ilock to do
* block mapping. Hence we need a different class for the directory ilock so
* that lockdep can tell them apart.
*/
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index d79b821ed1c7..07bc42d62673 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -1330,7 +1330,7 @@ static int zonefs_read_super(struct super_block *sb)
goto unmap;
}
- uuid_copy(&sbi->s_uuid, (uuid_t *)super->s_uuid);
+ import_uuid(&sbi->s_uuid, super->s_uuid);
ret = 0;
unmap:
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 1dc8d262035b..459d6981ca96 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20200430
+#define ACPI_CA_VERSION 0x20200528
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 4defed58ea33..aa236b9e6f24 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -815,8 +815,9 @@ typedef u8 acpi_adr_space_type;
#define ACPI_ADR_SPACE_GPIO (acpi_adr_space_type) 8
#define ACPI_ADR_SPACE_GSBUS (acpi_adr_space_type) 9
#define ACPI_ADR_SPACE_PLATFORM_COMM (acpi_adr_space_type) 10
+#define ACPI_ADR_SPACE_PLATFORM_RT (acpi_adr_space_type) 11
-#define ACPI_NUM_PREDEFINED_REGIONS 11
+#define ACPI_NUM_PREDEFINED_REGIONS 12
/*
* Special Address Spaces
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
deleted file mode 100644
index 58046ddc08d0..000000000000
--- a/include/asm-generic/5level-fixup.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _5LEVEL_FIXUP_H
-#define _5LEVEL_FIXUP_H
-
-#define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED 1
-
-#define P4D_SHIFT PGDIR_SHIFT
-#define P4D_SIZE PGDIR_SIZE
-#define P4D_MASK PGDIR_MASK
-#define MAX_PTRS_PER_P4D 1
-#define PTRS_PER_P4D 1
-
-#define p4d_t pgd_t
-
-#define pud_alloc(mm, p4d, address) \
- ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \
- NULL : pud_offset(p4d, address))
-
-#define p4d_alloc(mm, pgd, address) (pgd)
-#define p4d_alloc_track(mm, pgd, address, mask) (pgd)
-#define p4d_offset(pgd, start) (pgd)
-
-#ifndef __ASSEMBLY__
-static inline int p4d_none(p4d_t p4d)
-{
- return 0;
-}
-
-static inline int p4d_bad(p4d_t p4d)
-{
- return 0;
-}
-
-static inline int p4d_present(p4d_t p4d)
-{
- return 1;
-}
-#endif
-
-#define p4d_ERROR(p4d) do { } while (0)
-#define p4d_clear(p4d) pgd_clear(p4d)
-#define p4d_val(p4d) pgd_val(p4d)
-#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud)
-#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud)
-#define p4d_page(p4d) pgd_page(p4d)
-#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d)
-
-#define __p4d(x) __pgd(x)
-#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d)
-
-#undef p4d_free_tlb
-#define p4d_free_tlb(tlb, x, addr) do { } while (0)
-#define p4d_free(mm, x) do { } while (0)
-
-#undef p4d_addr_end
-#define p4d_addr_end(addr, end) (end)
-
-#endif
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index e8730c6b9fe2..379986e40159 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -18,1623 +18,1624 @@
#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
#include <linux/build_bug.h>
-#include <linux/kasan-checks.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
-static inline int
+static __always_inline int
atomic_read(const atomic_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read(v);
}
#define atomic_read atomic_read
#if defined(arch_atomic_read_acquire)
-static inline int
+static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic_read_acquire(v);
}
#define atomic_read_acquire atomic_read_acquire
#endif
-static inline void
+static __always_inline void
atomic_set(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_set(v, i);
}
#define atomic_set atomic_set
#if defined(arch_atomic_set_release)
-static inline void
+static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_set_release(v, i);
}
#define atomic_set_release atomic_set_release
#endif
-static inline void
+static __always_inline void
atomic_add(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_add(i, v);
}
#define atomic_add atomic_add
#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
-static inline int
+static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return(i, v);
}
#define atomic_add_return atomic_add_return
#endif
#if defined(arch_atomic_add_return_acquire)
-static inline int
+static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_acquire(i, v);
}
#define atomic_add_return_acquire atomic_add_return_acquire
#endif
#if defined(arch_atomic_add_return_release)
-static inline int
+static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_release(i, v);
}
#define atomic_add_return_release atomic_add_return_release
#endif
#if defined(arch_atomic_add_return_relaxed)
-static inline int
+static __always_inline int
atomic_add_return_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_return_relaxed(i, v);
}
#define atomic_add_return_relaxed atomic_add_return_relaxed
#endif
#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
-static inline int
+static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add(i, v);
}
#define atomic_fetch_add atomic_fetch_add
#endif
#if defined(arch_atomic_fetch_add_acquire)
-static inline int
+static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_acquire(i, v);
}
#define atomic_fetch_add_acquire atomic_fetch_add_acquire
#endif
#if defined(arch_atomic_fetch_add_release)
-static inline int
+static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_release(i, v);
}
#define atomic_fetch_add_release atomic_fetch_add_release
#endif
#if defined(arch_atomic_fetch_add_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_add_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_relaxed(i, v);
}
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#endif
-static inline void
+static __always_inline void
atomic_sub(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_sub(i, v);
}
#define atomic_sub atomic_sub
#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
-static inline int
+static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return(i, v);
}
#define atomic_sub_return atomic_sub_return
#endif
#if defined(arch_atomic_sub_return_acquire)
-static inline int
+static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_acquire(i, v);
}
#define atomic_sub_return_acquire atomic_sub_return_acquire
#endif
#if defined(arch_atomic_sub_return_release)
-static inline int
+static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_release(i, v);
}
#define atomic_sub_return_release atomic_sub_return_release
#endif
#if defined(arch_atomic_sub_return_relaxed)
-static inline int
+static __always_inline int
atomic_sub_return_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_return_relaxed(i, v);
}
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#endif
#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
-static inline int
+static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub(i, v);
}
#define atomic_fetch_sub atomic_fetch_sub
#endif
#if defined(arch_atomic_fetch_sub_acquire)
-static inline int
+static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_acquire(i, v);
}
#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
#endif
#if defined(arch_atomic_fetch_sub_release)
-static inline int
+static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_release(i, v);
}
#define atomic_fetch_sub_release atomic_fetch_sub_release
#endif
#if defined(arch_atomic_fetch_sub_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_sub_relaxed(i, v);
}
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#endif
#if defined(arch_atomic_inc)
-static inline void
+static __always_inline void
atomic_inc(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_inc(v);
}
#define atomic_inc atomic_inc
#endif
#if defined(arch_atomic_inc_return)
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
#define atomic_inc_return atomic_inc_return
#endif
#if defined(arch_atomic_inc_return_acquire)
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_acquire(v);
}
#define atomic_inc_return_acquire atomic_inc_return_acquire
#endif
#if defined(arch_atomic_inc_return_release)
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_release(v);
}
#define atomic_inc_return_release atomic_inc_return_release
#endif
#if defined(arch_atomic_inc_return_relaxed)
-static inline int
+static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_return_relaxed(v);
}
#define atomic_inc_return_relaxed atomic_inc_return_relaxed
#endif
#if defined(arch_atomic_fetch_inc)
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc(v);
}
#define atomic_fetch_inc atomic_fetch_inc
#endif
#if defined(arch_atomic_fetch_inc_acquire)
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_acquire(v);
}
#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
#endif
#if defined(arch_atomic_fetch_inc_release)
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_release(v);
}
#define atomic_fetch_inc_release atomic_fetch_inc_release
#endif
#if defined(arch_atomic_fetch_inc_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_inc_relaxed(v);
}
#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
#endif
#if defined(arch_atomic_dec)
-static inline void
+static __always_inline void
atomic_dec(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_dec(v);
}
#define atomic_dec atomic_dec
#endif
#if defined(arch_atomic_dec_return)
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
#define atomic_dec_return atomic_dec_return
#endif
#if defined(arch_atomic_dec_return_acquire)
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_acquire(v);
}
#define atomic_dec_return_acquire atomic_dec_return_acquire
#endif
#if defined(arch_atomic_dec_return_release)
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_release(v);
}
#define atomic_dec_return_release atomic_dec_return_release
#endif
#if defined(arch_atomic_dec_return_relaxed)
-static inline int
+static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_return_relaxed(v);
}
#define atomic_dec_return_relaxed atomic_dec_return_relaxed
#endif
#if defined(arch_atomic_fetch_dec)
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec(v);
}
#define atomic_fetch_dec atomic_fetch_dec
#endif
#if defined(arch_atomic_fetch_dec_acquire)
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_acquire(v);
}
#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
#endif
#if defined(arch_atomic_fetch_dec_release)
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_release(v);
}
#define atomic_fetch_dec_release atomic_fetch_dec_release
#endif
#if defined(arch_atomic_fetch_dec_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_dec_relaxed(v);
}
#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
#endif
-static inline void
+static __always_inline void
atomic_and(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_and(i, v);
}
#define atomic_and atomic_and
#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
-static inline int
+static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and(i, v);
}
#define atomic_fetch_and atomic_fetch_and
#endif
#if defined(arch_atomic_fetch_and_acquire)
-static inline int
+static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_acquire(i, v);
}
#define atomic_fetch_and_acquire atomic_fetch_and_acquire
#endif
#if defined(arch_atomic_fetch_and_release)
-static inline int
+static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_release(i, v);
}
#define atomic_fetch_and_release atomic_fetch_and_release
#endif
#if defined(arch_atomic_fetch_and_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_and_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_and_relaxed(i, v);
}
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#endif
#if defined(arch_atomic_andnot)
-static inline void
+static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_andnot(i, v);
}
#define atomic_andnot atomic_andnot
#endif
#if defined(arch_atomic_fetch_andnot)
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot(i, v);
}
#define atomic_fetch_andnot atomic_fetch_andnot
#endif
#if defined(arch_atomic_fetch_andnot_acquire)
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_acquire(i, v);
}
#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
#endif
#if defined(arch_atomic_fetch_andnot_release)
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_release(i, v);
}
#define atomic_fetch_andnot_release atomic_fetch_andnot_release
#endif
#if defined(arch_atomic_fetch_andnot_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_andnot_relaxed(i, v);
}
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
#endif
-static inline void
+static __always_inline void
atomic_or(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_or(i, v);
}
#define atomic_or atomic_or
#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
-static inline int
+static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or(i, v);
}
#define atomic_fetch_or atomic_fetch_or
#endif
#if defined(arch_atomic_fetch_or_acquire)
-static inline int
+static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_acquire(i, v);
}
#define atomic_fetch_or_acquire atomic_fetch_or_acquire
#endif
#if defined(arch_atomic_fetch_or_release)
-static inline int
+static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_release(i, v);
}
#define atomic_fetch_or_release atomic_fetch_or_release
#endif
#if defined(arch_atomic_fetch_or_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_or_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_or_relaxed(i, v);
}
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#endif
-static inline void
+static __always_inline void
atomic_xor(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic_xor(i, v);
}
#define atomic_xor atomic_xor
#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
-static inline int
+static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor(i, v);
}
#define atomic_fetch_xor atomic_fetch_xor
#endif
#if defined(arch_atomic_fetch_xor_acquire)
-static inline int
+static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_acquire(i, v);
}
#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
#endif
#if defined(arch_atomic_fetch_xor_release)
-static inline int
+static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_release(i, v);
}
#define atomic_fetch_xor_release atomic_fetch_xor_release
#endif
#if defined(arch_atomic_fetch_xor_relaxed)
-static inline int
+static __always_inline int
atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_xor_relaxed(i, v);
}
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#endif
#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
-static inline int
+static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg(v, i);
}
#define atomic_xchg atomic_xchg
#endif
#if defined(arch_atomic_xchg_acquire)
-static inline int
+static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_acquire(v, i);
}
#define atomic_xchg_acquire atomic_xchg_acquire
#endif
#if defined(arch_atomic_xchg_release)
-static inline int
+static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_release(v, i);
}
#define atomic_xchg_release atomic_xchg_release
#endif
#if defined(arch_atomic_xchg_relaxed)
-static inline int
+static __always_inline int
atomic_xchg_relaxed(atomic_t *v, int i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_xchg_relaxed(v, i);
}
#define atomic_xchg_relaxed atomic_xchg_relaxed
#endif
#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
-static inline int
+static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg(v, old, new);
}
#define atomic_cmpxchg atomic_cmpxchg
#endif
#if defined(arch_atomic_cmpxchg_acquire)
-static inline int
+static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_acquire(v, old, new);
}
#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
#endif
#if defined(arch_atomic_cmpxchg_release)
-static inline int
+static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_release(v, old, new);
}
#define atomic_cmpxchg_release atomic_cmpxchg_release
#endif
#if defined(arch_atomic_cmpxchg_relaxed)
-static inline int
+static __always_inline int
atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_cmpxchg_relaxed(v, old, new);
}
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
#endif
#if defined(arch_atomic_try_cmpxchg)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg(v, old, new);
}
#define atomic_try_cmpxchg atomic_try_cmpxchg
#endif
#if defined(arch_atomic_try_cmpxchg_acquire)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_acquire(v, old, new);
}
#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
#endif
#if defined(arch_atomic_try_cmpxchg_release)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_release(v, old, new);
}
#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
#endif
#if defined(arch_atomic_try_cmpxchg_relaxed)
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}
#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
#endif
#if defined(arch_atomic_sub_and_test)
-static inline bool
+static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
#define atomic_sub_and_test atomic_sub_and_test
#endif
#if defined(arch_atomic_dec_and_test)
-static inline bool
+static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
#define atomic_dec_and_test atomic_dec_and_test
#endif
#if defined(arch_atomic_inc_and_test)
-static inline bool
+static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
#define atomic_inc_and_test atomic_inc_and_test
#endif
#if defined(arch_atomic_add_negative)
-static inline bool
+static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
#define atomic_add_negative atomic_add_negative
#endif
#if defined(arch_atomic_fetch_add_unless)
-static inline int
+static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_fetch_add_unless(v, a, u);
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif
#if defined(arch_atomic_add_unless)
-static inline bool
+static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_add_unless(v, a, u);
}
#define atomic_add_unless atomic_add_unless
#endif
#if defined(arch_atomic_inc_not_zero)
-static inline bool
+static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_not_zero(v);
}
#define atomic_inc_not_zero atomic_inc_not_zero
#endif
#if defined(arch_atomic_inc_unless_negative)
-static inline bool
+static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_inc_unless_negative(v);
}
#define atomic_inc_unless_negative atomic_inc_unless_negative
#endif
#if defined(arch_atomic_dec_unless_positive)
-static inline bool
+static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_unless_positive(v);
}
#define atomic_dec_unless_positive atomic_dec_unless_positive
#endif
#if defined(arch_atomic_dec_if_positive)
-static inline int
+static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic_dec_if_positive(v);
}
#define atomic_dec_if_positive atomic_dec_if_positive
#endif
-static inline s64
+static __always_inline s64
atomic64_read(const atomic64_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read(v);
}
#define atomic64_read atomic64_read
#if defined(arch_atomic64_read_acquire)
-static inline s64
+static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
- kasan_check_read(v, sizeof(*v));
+ instrument_atomic_read(v, sizeof(*v));
return arch_atomic64_read_acquire(v);
}
#define atomic64_read_acquire atomic64_read_acquire
#endif
-static inline void
+static __always_inline void
atomic64_set(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set(v, i);
}
#define atomic64_set atomic64_set
#if defined(arch_atomic64_set_release)
-static inline void
+static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_set_release(v, i);
}
#define atomic64_set_release atomic64_set_release
#endif
-static inline void
+static __always_inline void
atomic64_add(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_add(i, v);
}
#define atomic64_add atomic64_add
#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
-static inline s64
+static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return(i, v);
}
#define atomic64_add_return atomic64_add_return
#endif
#if defined(arch_atomic64_add_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_acquire(i, v);
}
#define atomic64_add_return_acquire atomic64_add_return_acquire
#endif
#if defined(arch_atomic64_add_return_release)
-static inline s64
+static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_release(i, v);
}
#define atomic64_add_return_release atomic64_add_return_release
#endif
#if defined(arch_atomic64_add_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_return_relaxed(i, v);
}
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#endif
#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
-static inline s64
+static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add(i, v);
}
#define atomic64_fetch_add atomic64_fetch_add
#endif
#if defined(arch_atomic64_fetch_add_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_acquire(i, v);
}
#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
#endif
#if defined(arch_atomic64_fetch_add_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_release(i, v);
}
#define atomic64_fetch_add_release atomic64_fetch_add_release
#endif
#if defined(arch_atomic64_fetch_add_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_relaxed(i, v);
}
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_sub(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_sub(i, v);
}
#define atomic64_sub atomic64_sub
#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
-static inline s64
+static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return(i, v);
}
#define atomic64_sub_return atomic64_sub_return
#endif
#if defined(arch_atomic64_sub_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_acquire(i, v);
}
#define atomic64_sub_return_acquire atomic64_sub_return_acquire
#endif
#if defined(arch_atomic64_sub_return_release)
-static inline s64
+static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_release(i, v);
}
#define atomic64_sub_return_release atomic64_sub_return_release
#endif
#if defined(arch_atomic64_sub_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_return_relaxed(i, v);
}
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#endif
#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub(i, v);
}
#define atomic64_fetch_sub atomic64_fetch_sub
#endif
#if defined(arch_atomic64_fetch_sub_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_acquire(i, v);
}
#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
#endif
#if defined(arch_atomic64_fetch_sub_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_release(i, v);
}
#define atomic64_fetch_sub_release atomic64_fetch_sub_release
#endif
#if defined(arch_atomic64_fetch_sub_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_sub_relaxed(i, v);
}
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#endif
#if defined(arch_atomic64_inc)
-static inline void
+static __always_inline void
atomic64_inc(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
#define atomic64_inc atomic64_inc
#endif
#if defined(arch_atomic64_inc_return)
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
#define atomic64_inc_return atomic64_inc_return
#endif
#if defined(arch_atomic64_inc_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_acquire(v);
}
#define atomic64_inc_return_acquire atomic64_inc_return_acquire
#endif
#if defined(arch_atomic64_inc_return_release)
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_release(v);
}
#define atomic64_inc_return_release atomic64_inc_return_release
#endif
#if defined(arch_atomic64_inc_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_return_relaxed(v);
}
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#endif
#if defined(arch_atomic64_fetch_inc)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc(v);
}
#define atomic64_fetch_inc atomic64_fetch_inc
#endif
#if defined(arch_atomic64_fetch_inc_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_acquire(v);
}
#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
#endif
#if defined(arch_atomic64_fetch_inc_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_release(v);
}
#define atomic64_fetch_inc_release atomic64_fetch_inc_release
#endif
#if defined(arch_atomic64_fetch_inc_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_inc_relaxed(v);
}
#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
#endif
#if defined(arch_atomic64_dec)
-static inline void
+static __always_inline void
atomic64_dec(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
#define atomic64_dec atomic64_dec
#endif
#if defined(arch_atomic64_dec_return)
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
#define atomic64_dec_return atomic64_dec_return
#endif
#if defined(arch_atomic64_dec_return_acquire)
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_acquire(v);
}
#define atomic64_dec_return_acquire atomic64_dec_return_acquire
#endif
#if defined(arch_atomic64_dec_return_release)
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_release(v);
}
#define atomic64_dec_return_release atomic64_dec_return_release
#endif
#if defined(arch_atomic64_dec_return_relaxed)
-static inline s64
+static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_return_relaxed(v);
}
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
#endif
#if defined(arch_atomic64_fetch_dec)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec(v);
}
#define atomic64_fetch_dec atomic64_fetch_dec
#endif
#if defined(arch_atomic64_fetch_dec_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_acquire(v);
}
#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
#endif
#if defined(arch_atomic64_fetch_dec_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_release(v);
}
#define atomic64_fetch_dec_release atomic64_fetch_dec_release
#endif
#if defined(arch_atomic64_fetch_dec_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_dec_relaxed(v);
}
#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_and(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_and(i, v);
}
#define atomic64_and atomic64_and
#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
-static inline s64
+static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and(i, v);
}
#define atomic64_fetch_and atomic64_fetch_and
#endif
#if defined(arch_atomic64_fetch_and_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_acquire(i, v);
}
#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
#endif
#if defined(arch_atomic64_fetch_and_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_release(i, v);
}
#define atomic64_fetch_and_release atomic64_fetch_and_release
#endif
#if defined(arch_atomic64_fetch_and_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_and_relaxed(i, v);
}
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#endif
#if defined(arch_atomic64_andnot)
-static inline void
+static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_andnot(i, v);
}
#define atomic64_andnot atomic64_andnot
#endif
#if defined(arch_atomic64_fetch_andnot)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot(i, v);
}
#define atomic64_fetch_andnot atomic64_fetch_andnot
#endif
#if defined(arch_atomic64_fetch_andnot_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_acquire(i, v);
}
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
#endif
#if defined(arch_atomic64_fetch_andnot_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_release(i, v);
}
#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
#endif
#if defined(arch_atomic64_fetch_andnot_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_andnot_relaxed(i, v);
}
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_or(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_or(i, v);
}
#define atomic64_or atomic64_or
#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
-static inline s64
+static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or(i, v);
}
#define atomic64_fetch_or atomic64_fetch_or
#endif
#if defined(arch_atomic64_fetch_or_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_acquire(i, v);
}
#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
#endif
#if defined(arch_atomic64_fetch_or_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_release(i, v);
}
#define atomic64_fetch_or_release atomic64_fetch_or_release
#endif
#if defined(arch_atomic64_fetch_or_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_or_relaxed(i, v);
}
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#endif
-static inline void
+static __always_inline void
atomic64_xor(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
arch_atomic64_xor(i, v);
}
#define atomic64_xor atomic64_xor
#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor(i, v);
}
#define atomic64_fetch_xor atomic64_fetch_xor
#endif
#if defined(arch_atomic64_fetch_xor_acquire)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_acquire(i, v);
}
#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
#endif
#if defined(arch_atomic64_fetch_xor_release)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_release(i, v);
}
#define atomic64_fetch_xor_release atomic64_fetch_xor_release
#endif
#if defined(arch_atomic64_fetch_xor_relaxed)
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_xor_relaxed(i, v);
}
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#endif
#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
-static inline s64
+static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg(v, i);
}
#define atomic64_xchg atomic64_xchg
#endif
#if defined(arch_atomic64_xchg_acquire)
-static inline s64
+static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_acquire(v, i);
}
#define atomic64_xchg_acquire atomic64_xchg_acquire
#endif
#if defined(arch_atomic64_xchg_release)
-static inline s64
+static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_release(v, i);
}
#define atomic64_xchg_release atomic64_xchg_release
#endif
#if defined(arch_atomic64_xchg_relaxed)
-static inline s64
+static __always_inline s64
atomic64_xchg_relaxed(atomic64_t *v, s64 i)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_xchg_relaxed(v, i);
}
#define atomic64_xchg_relaxed atomic64_xchg_relaxed
#endif
#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg(v, old, new);
}
#define atomic64_cmpxchg atomic64_cmpxchg
#endif
#if defined(arch_atomic64_cmpxchg_acquire)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_acquire(v, old, new);
}
#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
#endif
#if defined(arch_atomic64_cmpxchg_release)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_release(v, old, new);
}
#define atomic64_cmpxchg_release atomic64_cmpxchg_release
#endif
#if defined(arch_atomic64_cmpxchg_relaxed)
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_cmpxchg_relaxed(v, old, new);
}
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
#endif
#if defined(arch_atomic64_try_cmpxchg)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg(v, old, new);
}
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
#endif
#if defined(arch_atomic64_try_cmpxchg_acquire)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_acquire(v, old, new);
}
#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
#endif
#if defined(arch_atomic64_try_cmpxchg_release)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_release(v, old, new);
}
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
#endif
#if defined(arch_atomic64_try_cmpxchg_relaxed)
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
- kasan_check_write(v, sizeof(*v));
- kasan_check_write(old, sizeof(*old));
+ instrument_atomic_write(v, sizeof(*v));
+ instrument_atomic_write(old, sizeof(*old));
return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
}
#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
#endif
#if defined(arch_atomic64_sub_and_test)
-static inline bool
+static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
#define atomic64_sub_and_test atomic64_sub_and_test
#endif
#if defined(arch_atomic64_dec_and_test)
-static inline bool
+static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
#define atomic64_dec_and_test atomic64_dec_and_test
#endif
#if defined(arch_atomic64_inc_and_test)
-static inline bool
+static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
#define atomic64_inc_and_test atomic64_inc_and_test
#endif
#if defined(arch_atomic64_add_negative)
-static inline bool
+static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
#define atomic64_add_negative atomic64_add_negative
#endif
#if defined(arch_atomic64_fetch_add_unless)
-static inline s64
+static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_fetch_add_unless(v, a, u);
}
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif
#if defined(arch_atomic64_add_unless)
-static inline bool
+static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_add_unless(v, a, u);
}
#define atomic64_add_unless atomic64_add_unless
#endif
#if defined(arch_atomic64_inc_not_zero)
-static inline bool
+static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
#define atomic64_inc_not_zero atomic64_inc_not_zero
#endif
#if defined(arch_atomic64_inc_unless_negative)
-static inline bool
+static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_inc_unless_negative(v);
}
#define atomic64_inc_unless_negative atomic64_inc_unless_negative
#endif
#if defined(arch_atomic64_dec_unless_positive)
-static inline bool
+static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_unless_positive(v);
}
#define atomic64_dec_unless_positive atomic64_dec_unless_positive
#endif
#if defined(arch_atomic64_dec_if_positive)
-static inline s64
+static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
- kasan_check_write(v, sizeof(*v));
+ instrument_atomic_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
#define atomic64_dec_if_positive atomic64_dec_if_positive
@@ -1644,7 +1645,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1653,7 +1654,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1662,7 +1663,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1671,7 +1672,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define xchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1680,7 +1681,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1689,7 +1690,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1698,7 +1699,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1707,7 +1708,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1716,7 +1717,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1725,7 +1726,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1734,7 +1735,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_release(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1743,7 +1744,7 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg64_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
})
#endif
@@ -1751,28 +1752,28 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
})
#define sync_cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg_double(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
})
@@ -1780,9 +1781,9 @@ atomic64_dec_if_positive(atomic64_t *v)
#define cmpxchg_double_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
+ instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
})
#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// b29b625d5de9280f680e42c7be859b55b15e5f6a
+// 89bf97f3a7509b740845e51ddf31055b48a81f40
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 881c7e27af28..073cf40f431b 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -6,6 +6,7 @@
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
+#include <linux/compiler.h>
#include <asm/types.h>
#ifdef CONFIG_64BIT
@@ -22,493 +23,493 @@ typedef atomic_t atomic_long_t;
#ifdef CONFIG_64BIT
-static inline long
+static __always_inline long
atomic_long_read(const atomic_long_t *v)
{
return atomic64_read(v);
}
-static inline long
+static __always_inline long
atomic_long_read_acquire(const atomic_long_t *v)
{
return atomic64_read_acquire(v);
}
-static inline void
+static __always_inline void
atomic_long_set(atomic_long_t *v, long i)
{
atomic64_set(v, i);
}
-static inline void
+static __always_inline void
atomic_long_set_release(atomic_long_t *v, long i)
{
atomic64_set_release(v, i);
}
-static inline void
+static __always_inline void
atomic_long_add(long i, atomic_long_t *v)
{
atomic64_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return(long i, atomic_long_t *v)
{
return atomic64_add_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
return atomic64_add_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_release(long i, atomic_long_t *v)
{
return atomic64_add_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
return atomic64_add_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add(long i, atomic_long_t *v)
{
return atomic64_fetch_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_add_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
return atomic64_fetch_add_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_add_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_sub(long i, atomic_long_t *v)
{
atomic64_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return(long i, atomic_long_t *v)
{
return atomic64_sub_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
return atomic64_sub_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_release(long i, atomic_long_t *v)
{
return atomic64_sub_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
return atomic64_sub_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub(long i, atomic_long_t *v)
{
return atomic64_fetch_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_sub_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_inc(atomic_long_t *v)
{
atomic64_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return(atomic_long_t *v)
{
return atomic64_inc_return(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_acquire(atomic_long_t *v)
{
return atomic64_inc_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_release(atomic_long_t *v)
{
return atomic64_inc_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_relaxed(atomic_long_t *v)
{
return atomic64_inc_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc(atomic_long_t *v)
{
return atomic64_fetch_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
return atomic64_fetch_inc_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_release(atomic_long_t *v)
{
return atomic64_fetch_inc_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
return atomic64_fetch_inc_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_dec(atomic_long_t *v)
{
atomic64_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return(atomic_long_t *v)
{
return atomic64_dec_return(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_acquire(atomic_long_t *v)
{
return atomic64_dec_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_release(atomic_long_t *v)
{
return atomic64_dec_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_relaxed(atomic_long_t *v)
{
return atomic64_dec_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec(atomic_long_t *v)
{
return atomic64_fetch_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
return atomic64_fetch_dec_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_release(atomic_long_t *v)
{
return atomic64_fetch_dec_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
return atomic64_fetch_dec_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_and(long i, atomic_long_t *v)
{
atomic64_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and(long i, atomic_long_t *v)
{
return atomic64_fetch_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_and_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
return atomic64_fetch_and_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_and_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_andnot(long i, atomic_long_t *v)
{
atomic64_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_andnot_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_or(long i, atomic_long_t *v)
{
atomic64_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or(long i, atomic_long_t *v)
{
return atomic64_fetch_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_or_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
return atomic64_fetch_or_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_or_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_xor(long i, atomic_long_t *v)
{
atomic64_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor(long i, atomic_long_t *v)
{
return atomic64_fetch_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
return atomic64_fetch_xor_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_xchg(atomic_long_t *v, long i)
{
return atomic64_xchg(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
return atomic64_xchg_acquire(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_release(atomic_long_t *v, long i)
{
return atomic64_xchg_release(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
return atomic64_xchg_relaxed(v, i);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_acquire(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_release(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
return atomic64_cmpxchg_relaxed(v, old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_sub_and_test(long i, atomic_long_t *v)
{
return atomic64_sub_and_test(i, v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_and_test(atomic_long_t *v)
{
return atomic64_dec_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_and_test(atomic_long_t *v)
{
return atomic64_inc_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_add_negative(long i, atomic_long_t *v)
{
return atomic64_add_negative(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
return atomic64_fetch_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
return atomic64_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_not_zero(atomic_long_t *v)
{
return atomic64_inc_not_zero(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_unless_negative(atomic_long_t *v)
{
return atomic64_inc_unless_negative(v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_unless_positive(atomic_long_t *v)
{
return atomic64_dec_unless_positive(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_if_positive(atomic_long_t *v)
{
return atomic64_dec_if_positive(v);
@@ -516,493 +517,493 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#else /* CONFIG_64BIT */
-static inline long
+static __always_inline long
atomic_long_read(const atomic_long_t *v)
{
return atomic_read(v);
}
-static inline long
+static __always_inline long
atomic_long_read_acquire(const atomic_long_t *v)
{
return atomic_read_acquire(v);
}
-static inline void
+static __always_inline void
atomic_long_set(atomic_long_t *v, long i)
{
atomic_set(v, i);
}
-static inline void
+static __always_inline void
atomic_long_set_release(atomic_long_t *v, long i)
{
atomic_set_release(v, i);
}
-static inline void
+static __always_inline void
atomic_long_add(long i, atomic_long_t *v)
{
atomic_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return(long i, atomic_long_t *v)
{
return atomic_add_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
return atomic_add_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_release(long i, atomic_long_t *v)
{
return atomic_add_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
return atomic_add_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add(long i, atomic_long_t *v)
{
return atomic_fetch_add(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_add_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
return atomic_fetch_add_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_add_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_sub(long i, atomic_long_t *v)
{
atomic_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return(long i, atomic_long_t *v)
{
return atomic_sub_return(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
return atomic_sub_return_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_release(long i, atomic_long_t *v)
{
return atomic_sub_return_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
return atomic_sub_return_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub(long i, atomic_long_t *v)
{
return atomic_fetch_sub(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_sub_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
return atomic_fetch_sub_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_sub_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_inc(atomic_long_t *v)
{
atomic_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return(atomic_long_t *v)
{
return atomic_inc_return(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_acquire(atomic_long_t *v)
{
return atomic_inc_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_release(atomic_long_t *v)
{
return atomic_inc_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_inc_return_relaxed(atomic_long_t *v)
{
return atomic_inc_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc(atomic_long_t *v)
{
return atomic_fetch_inc(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
return atomic_fetch_inc_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_release(atomic_long_t *v)
{
return atomic_fetch_inc_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
return atomic_fetch_inc_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_dec(atomic_long_t *v)
{
atomic_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return(atomic_long_t *v)
{
return atomic_dec_return(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_acquire(atomic_long_t *v)
{
return atomic_dec_return_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_release(atomic_long_t *v)
{
return atomic_dec_return_release(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_return_relaxed(atomic_long_t *v)
{
return atomic_dec_return_relaxed(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec(atomic_long_t *v)
{
return atomic_fetch_dec(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
return atomic_fetch_dec_acquire(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_release(atomic_long_t *v)
{
return atomic_fetch_dec_release(v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
return atomic_fetch_dec_relaxed(v);
}
-static inline void
+static __always_inline void
atomic_long_and(long i, atomic_long_t *v)
{
atomic_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and(long i, atomic_long_t *v)
{
return atomic_fetch_and(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_and_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
return atomic_fetch_and_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_and_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_andnot(long i, atomic_long_t *v)
{
atomic_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
return atomic_fetch_andnot(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_andnot_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_or(long i, atomic_long_t *v)
{
atomic_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or(long i, atomic_long_t *v)
{
return atomic_fetch_or(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_or_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
return atomic_fetch_or_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_or_relaxed(i, v);
}
-static inline void
+static __always_inline void
atomic_long_xor(long i, atomic_long_t *v)
{
atomic_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor(long i, atomic_long_t *v)
{
return atomic_fetch_xor(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
return atomic_fetch_xor_acquire(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
return atomic_fetch_xor_release(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
return atomic_fetch_xor_relaxed(i, v);
}
-static inline long
+static __always_inline long
atomic_long_xchg(atomic_long_t *v, long i)
{
return atomic_xchg(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_acquire(atomic_long_t *v, long i)
{
return atomic_xchg_acquire(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_release(atomic_long_t *v, long i)
{
return atomic_xchg_release(v, i);
}
-static inline long
+static __always_inline long
atomic_long_xchg_relaxed(atomic_long_t *v, long i)
{
return atomic_xchg_relaxed(v, i);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_acquire(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_release(v, old, new);
}
-static inline long
+static __always_inline long
atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
return atomic_cmpxchg_relaxed(v, old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_acquire(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_release(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
}
-static inline bool
+static __always_inline bool
atomic_long_sub_and_test(long i, atomic_long_t *v)
{
return atomic_sub_and_test(i, v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_and_test(atomic_long_t *v)
{
return atomic_dec_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_and_test(atomic_long_t *v)
{
return atomic_inc_and_test(v);
}
-static inline bool
+static __always_inline bool
atomic_long_add_negative(long i, atomic_long_t *v)
{
return atomic_add_negative(i, v);
}
-static inline long
+static __always_inline long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
return atomic_fetch_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
return atomic_add_unless(v, a, u);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_not_zero(atomic_long_t *v)
{
return atomic_inc_not_zero(v);
}
-static inline bool
+static __always_inline bool
atomic_long_inc_unless_negative(atomic_long_t *v)
{
return atomic_inc_unless_negative(v);
}
-static inline bool
+static __always_inline bool
atomic_long_dec_unless_positive(atomic_long_t *v)
{
return atomic_dec_unless_positive(v);
}
-static inline long
+static __always_inline long
atomic_long_dec_if_positive(atomic_long_t *v)
{
return atomic_dec_if_positive(v);
@@ -1010,4 +1011,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* CONFIG_64BIT */
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
-// 77558968132ce4f911ad53f6f52ce423006f6268
+// a624200981f552b2c6be4f32fe44da8289f30d87
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 85b28eb80b11..2eacaf7d62f6 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -128,10 +128,10 @@ do { \
#ifndef __smp_load_acquire
#define __smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = READ_ONCE(*p); \
+ __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
__smp_mb(); \
- ___p1; \
+ (typeof(*p))___p1; \
})
#endif
@@ -183,10 +183,10 @@ do { \
#ifndef smp_load_acquire
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1 = READ_ONCE(*p); \
+ __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
barrier(); \
- ___p1; \
+ (typeof(*p))___p1; \
})
#endif
@@ -229,14 +229,14 @@ do { \
#ifndef smp_cond_load_relaxed
#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
typeof(ptr) __PTR = (ptr); \
- typeof(*ptr) VAL; \
+ __unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
cpu_relax(); \
} \
- VAL; \
+ (typeof(*ptr))VAL; \
})
#endif
@@ -250,10 +250,10 @@ do { \
*/
#ifndef smp_cond_load_acquire
#define smp_cond_load_acquire(ptr, cond_expr) ({ \
- typeof(*ptr) _val; \
+ __unqual_scalar_typeof(*ptr) _val; \
_val = smp_cond_load_relaxed(ptr, cond_expr); \
smp_acquire__after_ctrl_dep(); \
- _val; \
+ (typeof(*ptr))_val; \
})
#endif
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index 18ce3c9e8eec..fb2cb33a4013 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* set_bit - Atomically set a bit in memory
@@ -25,7 +25,7 @@
*/
static inline void set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);
}
@@ -38,7 +38,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
*/
static inline void clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit(nr, addr);
}
@@ -54,7 +54,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
}
@@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
@@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
@@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index ec53fdeea9ec..b9bec468ae03 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
@@ -22,7 +22,7 @@
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
@@ -37,7 +37,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
@@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit_lock(nr, addr);
}
@@ -71,7 +71,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
static inline bool
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
}
/* Let everybody know we have it. */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 95ff28d128a1..20f788a25ef9 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -11,7 +11,7 @@
#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
#define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
-#include <linux/kasan-checks.h>
+#include <linux/instrumented.h>
/**
* __set_bit - Set a bit in memory
@@ -24,7 +24,7 @@
*/
static inline void __set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
@@ -39,7 +39,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
@@ -54,7 +54,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void __change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
@@ -68,7 +68,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_set_bit(nr, addr);
}
@@ -82,7 +82,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_clear_bit(nr, addr);
}
@@ -96,7 +96,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ instrument_write(addr + BIT_WORD(nr), sizeof(long));
return arch___test_and_change_bit(nr, addr);
}
@@ -107,7 +107,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool test_bit(long nr, const volatile unsigned long *addr)
{
- kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);
}
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index cac7404b2bdd..907fa5d16494 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -1,11 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_CACHEFLUSH_H
-#define __ASM_CACHEFLUSH_H
-
-/* Keep includes the same across arches. */
-#include <linux/mm.h>
-
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#ifndef _ASM_GENERIC_CACHEFLUSH_H
+#define _ASM_GENERIC_CACHEFLUSH_H
/*
* The cache doesn't need to be flushed when TLB entries change when
@@ -45,12 +40,14 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
}
#endif
-#ifndef flush_dcache_page
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
static inline void flush_dcache_page(struct page *page)
{
}
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#endif
+
#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
@@ -69,6 +66,10 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
}
#endif
+#ifndef flush_icache_user_range
+#define flush_icache_user_range flush_icache_range
+#endif
+
#ifndef flush_icache_page
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
@@ -76,8 +77,8 @@ static inline void flush_icache_page(struct vm_area_struct *vma,
}
#endif
-#ifndef flush_icache_user_range
-static inline void flush_icache_user_range(struct vm_area_struct *vma,
+#ifndef flush_icache_user_page
+static inline void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
@@ -100,7 +101,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
+ flush_icache_user_page(vma, page, vaddr, len); \
} while (0)
#endif
@@ -109,4 +110,4 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
memcpy(dst, src, len)
#endif
-#endif /* __ASM_CACHEFLUSH_H */
+#endif /* _ASM_GENERIC_CACHEFLUSH_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d39ac997dda8..8b1e020e9a03 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -448,17 +448,15 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#define IO_SPACE_LIMIT 0xffff
#endif
-#include <linux/logic_pio.h>
-
/*
* {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
* implemented on hardware that needs an additional delay for I/O accesses to
* take effect.
*/
-#ifndef inb
-#define inb inb
-static inline u8 inb(unsigned long addr)
+#if !defined(inb) && !defined(_inb)
+#define _inb _inb
+static inline u16 _inb(unsigned long addr)
{
u8 val;
@@ -469,9 +467,9 @@ static inline u8 inb(unsigned long addr)
}
#endif
-#ifndef inw
-#define inw inw
-static inline u16 inw(unsigned long addr)
+#if !defined(inw) && !defined(_inw)
+#define _inw _inw
+static inline u16 _inw(unsigned long addr)
{
u16 val;
@@ -482,9 +480,9 @@ static inline u16 inw(unsigned long addr)
}
#endif
-#ifndef inl
-#define inl inl
-static inline u32 inl(unsigned long addr)
+#if !defined(inl) && !defined(_inl)
+#define _inl _inl
+static inline u16 _inl(unsigned long addr)
{
u32 val;
@@ -495,9 +493,9 @@ static inline u32 inl(unsigned long addr)
}
#endif
-#ifndef outb
-#define outb outb
-static inline void outb(u8 value, unsigned long addr)
+#if !defined(outb) && !defined(_outb)
+#define _outb _outb
+static inline void _outb(u8 value, unsigned long addr)
{
__io_pbw();
__raw_writeb(value, PCI_IOBASE + addr);
@@ -505,9 +503,9 @@ static inline void outb(u8 value, unsigned long addr)
}
#endif
-#ifndef outw
-#define outw outw
-static inline void outw(u16 value, unsigned long addr)
+#if !defined(outw) && !defined(_outw)
+#define _outw _outw
+static inline void _outw(u16 value, unsigned long addr)
{
__io_pbw();
__raw_writew(cpu_to_le16(value), PCI_IOBASE + addr);
@@ -515,9 +513,9 @@ static inline void outw(u16 value, unsigned long addr)
}
#endif
-#ifndef outl
-#define outl outl
-static inline void outl(u32 value, unsigned long addr)
+#if !defined(outl) && !defined(_outl)
+#define _outl _outl
+static inline void _outl(u32 value, unsigned long addr)
{
__io_pbw();
__raw_writel(cpu_to_le32(value), PCI_IOBASE + addr);
@@ -525,6 +523,32 @@ static inline void outl(u32 value, unsigned long addr)
}
#endif
+#include <linux/logic_pio.h>
+
+#ifndef inb
+#define inb _inb
+#endif
+
+#ifndef inw
+#define inw _inw
+#endif
+
+#ifndef inl
+#define inl _inl
+#endif
+
+#ifndef outb
+#define outb _outb
+#endif
+
+#ifndef outw
+#define outw _outw
+#endif
+
+#ifndef outl
+#define outl _outl
+#endif
+
#ifndef inb_p
#define inb_p inb_p
static inline u8 inb_p(unsigned long addr)
@@ -948,7 +972,7 @@ static inline void iounmap(void __iomem *addr)
}
#endif
#elif defined(CONFIG_GENERIC_IOREMAP)
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
void iounmap(volatile void __iomem *addr);
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
deleted file mode 100644
index 829bdb0d6327..000000000000
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _PGTABLE_NOP4D_HACK_H
-#define _PGTABLE_NOP4D_HACK_H
-
-#ifndef __ASSEMBLY__
-#include <asm-generic/5level-fixup.h>
-
-#define __PAGETABLE_PUD_FOLDED 1
-
-/*
- * Having the pud type consist of a pgd gets the size right, and allows
- * us to conceptually access the pgd entry that this pud is folded into
- * without casting.
- */
-typedef struct { pgd_t pgd; } pud_t;
-
-#define PUD_SHIFT PGDIR_SHIFT
-#define PTRS_PER_PUD 1
-#define PUD_SIZE (1UL << PUD_SHIFT)
-#define PUD_MASK (~(PUD_SIZE-1))
-
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pud is never bad, and a pud always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-static inline int pgd_present(pgd_t pgd) { return 1; }
-static inline void pgd_clear(pgd_t *pgd) { }
-#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
-
-#define pgd_populate(mm, pgd, pud) do { } while (0)
-#define pgd_populate_safe(mm, pgd, pud) do { } while (0)
-/*
- * (puds are folded into pgds so this doesn't get actually called,
- * but the define is needed for a generic inline function.)
- */
-#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
-
-static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
-{
- return (pud_t *)pgd;
-}
-
-#define pud_val(x) (pgd_val((x).pgd))
-#define __pud(x) ((pud_t) { __pgd(x) })
-
-#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
-#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
-
-/*
- * allocating and freeing a pud is trivial: the 1-entry pud is
- * inside the pgd, so has no extra memory associated with it.
- */
-#define pud_alloc_one(mm, address) NULL
-#define pud_free(mm, x) do { } while (0)
-#define __pud_free_tlb(tlb, x, a) do { } while (0)
-
-#undef pud_addr_end
-#define pud_addr_end(addr, end) (end)
-
-#endif /* __ASSEMBLY__ */
-#endif /* _PGTABLE_NOP4D_HACK_H */
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index 0d9b28cba16d..3e13acd019ae 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -45,6 +45,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
{
return (pmd_t *)pud;
}
+#define pmd_offset pmd_offset
#define pmd_val(x) (pud_val((x).pud))
#define __pmd(x) ((pmd_t) { __pud(x) } )
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index d3776cb494c0..a9d751fbda9e 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -4,9 +4,6 @@
#ifndef __ASSEMBLY__
-#ifdef __ARCH_USE_5LEVEL_HACK
-#include <asm-generic/pgtable-nop4d-hack.h>
-#else
#include <asm-generic/pgtable-nop4d.h>
#define __PAGETABLE_PUD_FOLDED 1
@@ -46,6 +43,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
return (pud_t *)p4d;
}
+#define pud_offset pud_offset
#define pud_val(x) (p4d_val((x).p4d))
#define __pud(x) ((pud_t) { __p4d(x) })
@@ -65,5 +63,4 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
#define pud_addr_end(addr, end) (end)
#endif /* __ASSEMBLY__ */
-#endif /* !__ARCH_USE_5LEVEL_HACK */
#endif /* _PGTABLE_NOPUD_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
deleted file mode 100644
index 7056a25479d6..000000000000
--- a/include/asm-generic/pgtable.h
+++ /dev/null
@@ -1,1322 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_PGTABLE_H
-#define _ASM_GENERIC_PGTABLE_H
-
-#include <linux/pfn.h>
-
-#ifndef __ASSEMBLY__
-#ifdef CONFIG_MMU
-
-#include <linux/mm_types.h>
-#include <linux/bug.h>
-#include <linux/errno.h>
-#include <asm-generic/pgtable_uffd.h>
-
-#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
- defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
-#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
-#endif
-
-/*
- * On almost all architectures and configurations, 0 can be used as the
- * upper ceiling to free_pgtables(): on many architectures it has the same
- * effect as using TASK_SIZE. However, there is one configuration which
- * must impose a more careful limit, to avoid freeing kernel pgtables.
- */
-#ifndef USER_PGTABLES_CEILING
-#define USER_PGTABLES_CEILING 0UL
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-extern int ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep,
- pte_t entry, int dirty);
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern int pmdp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp,
- pmd_t entry, int dirty);
-extern int pudp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pud_t *pudp,
- pud_t entry, int dirty);
-#else
-static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp,
- pmd_t entry, int dirty)
-{
- BUILD_BUG();
- return 0;
-}
-static inline int pudp_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pud_t *pudp,
- pud_t entry, int dirty)
-{
- BUILD_BUG();
- return 0;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep)
-{
- pte_t pte = *ptep;
- int r = 1;
- if (!pte_young(pte))
- r = 0;
- else
- set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
- return r;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-{
- pmd_t pmd = *pmdp;
- int r = 1;
- if (!pmd_young(pmd))
- r = 0;
- else
- set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
- return r;
-}
-#else
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-{
- BUILD_BUG();
- return 0;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-int ptep_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep);
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
-#else
-/*
- * Despite relevant to THP only, this API is called from generic rmap code
- * under PageTransHuge(), hence needs a dummy implementation for !THP
- */
-static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp)
-{
- BUILD_BUG();
- return 0;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pte_t *ptep)
-{
- pte_t pte = *ptep;
- pte_clear(mm, address, ptep);
- return pte;
-}
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
-static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pmd_t *pmdp)
-{
- pmd_t pmd = *pmdp;
- pmd_clear(pmdp);
- return pmd;
-}
-#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
-#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
-static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
- unsigned long address,
- pud_t *pudp)
-{
- pud_t pud = *pudp;
-
- pud_clear(pudp);
- return pud;
-}
-#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
-static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp,
- int full)
-{
- return pmdp_huge_get_and_clear(mm, address, pmdp);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
-static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
- unsigned long address, pud_t *pudp,
- int full)
-{
- return pudp_huge_get_and_clear(mm, address, pudp);
-}
-#endif
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
-static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
- unsigned long address, pte_t *ptep,
- int full)
-{
- pte_t pte;
- pte = ptep_get_and_clear(mm, address, ptep);
- return pte;
-}
-#endif
-
-
-/*
- * If two threads concurrently fault at the same page, the thread that
- * won the race updates the PTE and its local TLB/Cache. The other thread
- * gives up, simply does nothing, and continues; on architectures where
- * software can update TLB, local TLB can be updated here to avoid next page
- * fault. This function updates TLB only, do nothing with cache or others.
- * It is the difference with function update_mmu_cache.
- */
-#ifndef __HAVE_ARCH_UPDATE_MMU_TLB
-static inline void update_mmu_tlb(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
-{
-}
-#define __HAVE_ARCH_UPDATE_MMU_TLB
-#endif
-
-/*
- * Some architectures may be able to avoid expensive synchronization
- * primitives when modifications are made to PTE's which are already
- * not present, or in the process of an address space destruction.
- */
-#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
-static inline void pte_clear_not_present_full(struct mm_struct *mm,
- unsigned long address,
- pte_t *ptep,
- int full)
-{
- pte_clear(mm, address, ptep);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
-extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep);
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
-extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp);
-extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
- unsigned long address,
- pud_t *pudp);
-#endif
-
-#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
-struct mm_struct;
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
-{
- pte_t old_pte = *ptep;
- set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
-}
-#endif
-
-/*
- * On some architectures hardware does not set page access bit when accessing
- * memory page, it is responsibilty of software setting this bit. It brings
- * out extra page fault penalty to track page access bit. For optimization page
- * access bit can be set during all page fault flow on these arches.
- * To be differentiate with macro pte_mkyoung, this macro is used on platforms
- * where software maintains page access bit.
- */
-#ifndef pte_sw_mkyoung
-static inline pte_t pte_sw_mkyoung(pte_t pte)
-{
- return pte;
-}
-#define pte_sw_mkyoung pte_sw_mkyoung
-#endif
-
-#ifndef pte_savedwrite
-#define pte_savedwrite pte_write
-#endif
-
-#ifndef pte_mk_savedwrite
-#define pte_mk_savedwrite pte_mkwrite
-#endif
-
-#ifndef pte_clear_savedwrite
-#define pte_clear_savedwrite pte_wrprotect
-#endif
-
-#ifndef pmd_savedwrite
-#define pmd_savedwrite pmd_write
-#endif
-
-#ifndef pmd_mk_savedwrite
-#define pmd_mk_savedwrite pmd_mkwrite
-#endif
-
-#ifndef pmd_clear_savedwrite
-#define pmd_clear_savedwrite pmd_wrprotect
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline void pmdp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
-{
- pmd_t old_pmd = *pmdp;
- set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
-}
-#else
-static inline void pmdp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pmd_t *pmdp)
-{
- BUILD_BUG();
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static inline void pudp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pud_t *pudp)
-{
- pud_t old_pud = *pudp;
-
- set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
-}
-#else
-static inline void pudp_set_wrprotect(struct mm_struct *mm,
- unsigned long address, pud_t *pudp)
-{
- BUILD_BUG();
-}
-#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
-#endif
-
-#ifndef pmdp_collapse_flush
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp);
-#else
-static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
-{
- BUILD_BUG();
- return *pmdp;
-}
-#define pmdp_collapse_flush pmdp_collapse_flush
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
-
-#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pgtable);
-#endif
-
-#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/*
- * This is an implementation of pmdp_establish() that is only suitable for an
- * architecture that doesn't have hardware dirty/accessed bits. In this case we
- * can't race with CPU which sets these bits and non-atomic aproach is fine.
- */
-static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
- unsigned long address, pmd_t *pmdp, pmd_t pmd)
-{
- pmd_t old_pmd = *pmdp;
- set_pmd_at(vma->vm_mm, address, pmdp, pmd);
- return old_pmd;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PMDP_INVALIDATE
-extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmdp);
-#endif
-
-#ifndef __HAVE_ARCH_PTE_SAME
-static inline int pte_same(pte_t pte_a, pte_t pte_b)
-{
- return pte_val(pte_a) == pte_val(pte_b);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PTE_UNUSED
-/*
- * Some architectures provide facilities to virtualization guests
- * so that they can flag allocated pages as unused. This allows the
- * host to transparently reclaim unused pages. This function returns
- * whether the pte's page is unused.
- */
-static inline int pte_unused(pte_t pte)
-{
- return 0;
-}
-#endif
-
-#ifndef pte_access_permitted
-#define pte_access_permitted(pte, write) \
- (pte_present(pte) && (!(write) || pte_write(pte)))
-#endif
-
-#ifndef pmd_access_permitted
-#define pmd_access_permitted(pmd, write) \
- (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
-#endif
-
-#ifndef pud_access_permitted
-#define pud_access_permitted(pud, write) \
- (pud_present(pud) && (!(write) || pud_write(pud)))
-#endif
-
-#ifndef p4d_access_permitted
-#define p4d_access_permitted(p4d, write) \
- (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
-#endif
-
-#ifndef pgd_access_permitted
-#define pgd_access_permitted(pgd, write) \
- (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
-#endif
-
-#ifndef __HAVE_ARCH_PMD_SAME
-static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
-{
- return pmd_val(pmd_a) == pmd_val(pmd_b);
-}
-
-static inline int pud_same(pud_t pud_a, pud_t pud_b)
-{
- return pud_val(pud_a) == pud_val(pud_b);
-}
-#endif
-
-#ifndef __HAVE_ARCH_P4D_SAME
-static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
-{
- return p4d_val(p4d_a) == p4d_val(p4d_b);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PGD_SAME
-static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
-{
- return pgd_val(pgd_a) == pgd_val(pgd_b);
-}
-#endif
-
-/*
- * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
- * TLB flush will be required as a result of the "set". For example, use
- * in scenarios where it is known ahead of time that the routine is
- * setting non-present entries, or re-setting an existing entry to the
- * same value. Otherwise, use the typical "set" helpers and flush the
- * TLB.
- */
-#define set_pte_safe(ptep, pte) \
-({ \
- WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
- set_pte(ptep, pte); \
-})
-
-#define set_pmd_safe(pmdp, pmd) \
-({ \
- WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
- set_pmd(pmdp, pmd); \
-})
-
-#define set_pud_safe(pudp, pud) \
-({ \
- WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
- set_pud(pudp, pud); \
-})
-
-#define set_p4d_safe(p4dp, p4d) \
-({ \
- WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
- set_p4d(p4dp, p4d); \
-})
-
-#define set_pgd_safe(pgdp, pgd) \
-({ \
- WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
- set_pgd(pgdp, pgd); \
-})
-
-#ifndef __HAVE_ARCH_DO_SWAP_PAGE
-/*
- * Some architectures support metadata associated with a page. When a
- * page is being swapped out, this metadata must be saved so it can be
- * restored when the page is swapped back in. SPARC M7 and newer
- * processors support an ADI (Application Data Integrity) tag for the
- * page as metadata for the page. arch_do_swap_page() can restore this
- * metadata when a page is swapped back in.
- */
-static inline void arch_do_swap_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long addr,
- pte_t pte, pte_t oldpte)
-{
-
-}
-#endif
-
-#ifndef __HAVE_ARCH_UNMAP_ONE
-/*
- * Some architectures support metadata associated with a page. When a
- * page is being swapped out, this metadata must be saved so it can be
- * restored when the page is swapped back in. SPARC M7 and newer
- * processors support an ADI (Application Data Integrity) tag for the
- * page as metadata for the page. arch_unmap_one() can save this
- * metadata on a swap-out of a page.
- */
-static inline int arch_unmap_one(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long addr,
- pte_t orig_pte)
-{
- return 0;
-}
-#endif
-
-#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
-#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
-#endif
-
-#ifndef __HAVE_ARCH_MOVE_PTE
-#define move_pte(pte, prot, old_addr, new_addr) (pte)
-#endif
-
-#ifndef pte_accessible
-# define pte_accessible(mm, pte) ((void)(pte), 1)
-#endif
-
-#ifndef flush_tlb_fix_spurious_fault
-#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
-#endif
-
-#ifndef pgprot_nx
-#define pgprot_nx(prot) (prot)
-#endif
-
-#ifndef pgprot_noncached
-#define pgprot_noncached(prot) (prot)
-#endif
-
-#ifndef pgprot_writecombine
-#define pgprot_writecombine pgprot_noncached
-#endif
-
-#ifndef pgprot_writethrough
-#define pgprot_writethrough pgprot_noncached
-#endif
-
-#ifndef pgprot_device
-#define pgprot_device pgprot_noncached
-#endif
-
-#ifndef pgprot_modify
-#define pgprot_modify pgprot_modify
-static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
-{
- if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
- newprot = pgprot_noncached(newprot);
- if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
- newprot = pgprot_writecombine(newprot);
- if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
- newprot = pgprot_device(newprot);
- return newprot;
-}
-#endif
-
-/*
- * When walking page tables, get the address of the next boundary,
- * or the end address of the range if that comes earlier. Although no
- * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
- */
-
-#define pgd_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-
-#ifndef p4d_addr_end
-#define p4d_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-#endif
-
-#ifndef pud_addr_end
-#define pud_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-#endif
-
-#ifndef pmd_addr_end
-#define pmd_addr_end(addr, end) \
-({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
- (__boundary - 1 < (end) - 1)? __boundary: (end); \
-})
-#endif
-
-/*
- * When walking page tables, we usually want to skip any p?d_none entries;
- * and any p?d_bad entries - reporting the error before resetting to none.
- * Do the tests inline, but report and clear the bad entry in mm/memory.c.
- */
-void pgd_clear_bad(pgd_t *);
-
-#ifndef __PAGETABLE_P4D_FOLDED
-void p4d_clear_bad(p4d_t *);
-#else
-#define p4d_clear_bad(p4d) do { } while (0)
-#endif
-
-#ifndef __PAGETABLE_PUD_FOLDED
-void pud_clear_bad(pud_t *);
-#else
-#define pud_clear_bad(p4d) do { } while (0)
-#endif
-
-void pmd_clear_bad(pmd_t *);
-
-static inline int pgd_none_or_clear_bad(pgd_t *pgd)
-{
- if (pgd_none(*pgd))
- return 1;
- if (unlikely(pgd_bad(*pgd))) {
- pgd_clear_bad(pgd);
- return 1;
- }
- return 0;
-}
-
-static inline int p4d_none_or_clear_bad(p4d_t *p4d)
-{
- if (p4d_none(*p4d))
- return 1;
- if (unlikely(p4d_bad(*p4d))) {
- p4d_clear_bad(p4d);
- return 1;
- }
- return 0;
-}
-
-static inline int pud_none_or_clear_bad(pud_t *pud)
-{
- if (pud_none(*pud))
- return 1;
- if (unlikely(pud_bad(*pud))) {
- pud_clear_bad(pud);
- return 1;
- }
- return 0;
-}
-
-static inline int pmd_none_or_clear_bad(pmd_t *pmd)
-{
- if (pmd_none(*pmd))
- return 1;
- if (unlikely(pmd_bad(*pmd))) {
- pmd_clear_bad(pmd);
- return 1;
- }
- return 0;
-}
-
-static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep)
-{
- /*
- * Get the current pte state, but zero it out to make it
- * non-present, preventing the hardware from asynchronously
- * updating it.
- */
- return ptep_get_and_clear(vma->vm_mm, addr, ptep);
-}
-
-static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- /*
- * The pte is non-present, so there's no hardware state to
- * preserve.
- */
- set_pte_at(vma->vm_mm, addr, ptep, pte);
-}
-
-#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
-/*
- * Start a pte protection read-modify-write transaction, which
- * protects against asynchronous hardware modifications to the pte.
- * The intention is not to prevent the hardware from making pte
- * updates, but to prevent any updates it may make from being lost.
- *
- * This does not protect against other software modifications of the
- * pte; the appropriate pte lock must be held over the transation.
- *
- * Note that this interface is intended to be batchable, meaning that
- * ptep_modify_prot_commit may not actually update the pte, but merely
- * queue the update to be done at some later time. The update must be
- * actually committed before the pte lock is released, however.
- */
-static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep)
-{
- return __ptep_modify_prot_start(vma, addr, ptep);
-}
-
-/*
- * Commit an update to a pte, leaving any hardware-controlled bits in
- * the PTE unmodified.
- */
-static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
- unsigned long addr,
- pte_t *ptep, pte_t old_pte, pte_t pte)
-{
- __ptep_modify_prot_commit(vma, addr, ptep, pte);
-}
-#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
-#endif /* CONFIG_MMU */
-
-/*
- * No-op macros that just return the current protection value. Defined here
- * because these macros can be used used even if CONFIG_MMU is not defined.
- */
-#ifndef pgprot_encrypted
-#define pgprot_encrypted(prot) (prot)
-#endif
-
-#ifndef pgprot_decrypted
-#define pgprot_decrypted(prot) (prot)
-#endif
-
-/*
- * A facility to provide lazy MMU batching. This allows PTE updates and
- * page invalidations to be delayed until a call to leave lazy MMU mode
- * is issued. Some architectures may benefit from doing this, and it is
- * beneficial for both shadow and direct mode hypervisors, which may batch
- * the PTE updates which happen during this window. Note that using this
- * interface requires that read hazards be removed from the code. A read
- * hazard could result in the direct mode hypervisor case, since the actual
- * write to the page tables may not yet have taken place, so reads though
- * a raw PTE pointer after it has been modified are not guaranteed to be
- * up to date. This mode can only be entered and left under the protection of
- * the page table locks for all page tables which may be modified. In the UP
- * case, this is required so that preemption is disabled, and in the SMP case,
- * it must synchronize the delayed page table writes properly on other CPUs.
- */
-#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-#define arch_enter_lazy_mmu_mode() do {} while (0)
-#define arch_leave_lazy_mmu_mode() do {} while (0)
-#define arch_flush_lazy_mmu_mode() do {} while (0)
-#endif
-
-/*
- * A facility to provide batching of the reload of page tables and
- * other process state with the actual context switch code for
- * paravirtualized guests. By convention, only one of the batched
- * update (lazy) modes (CPU, MMU) should be active at any given time,
- * entry should never be nested, and entry and exits should always be
- * paired. This is for sanity of maintaining and reasoning about the
- * kernel code. In this case, the exit (end of the context switch) is
- * in architecture-specific code, and so doesn't need a generic
- * definition.
- */
-#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
-#define arch_start_context_switch(prev) do {} while (0)
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
-#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
-static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline int pmd_swp_soft_dirty(pmd_t pmd)
-{
- return 0;
-}
-
-static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-#endif
-#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
-static inline int pte_soft_dirty(pte_t pte)
-{
- return 0;
-}
-
-static inline int pmd_soft_dirty(pmd_t pmd)
-{
- return 0;
-}
-
-static inline pte_t pte_mksoft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline pte_t pte_clear_soft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline int pte_swp_soft_dirty(pte_t pte)
-{
- return 0;
-}
-
-static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
-{
- return pte;
-}
-
-static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-
-static inline int pmd_swp_soft_dirty(pmd_t pmd)
-{
- return 0;
-}
-
-static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
-{
- return pmd;
-}
-#endif
-
-#ifndef __HAVE_PFNMAP_TRACKING
-/*
- * Interfaces that can be used by architecture code to keep track of
- * memory type of pfn mappings specified by the remap_pfn_range,
- * vmf_insert_pfn.
- */
-
-/*
- * track_pfn_remap is called when a _new_ pfn mapping is being established
- * by remap_pfn_range() for physical range indicated by pfn and size.
- */
-static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long addr,
- unsigned long size)
-{
- return 0;
-}
-
-/*
- * track_pfn_insert is called when a _new_ single pfn is established
- * by vmf_insert_pfn().
- */
-static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn)
-{
-}
-
-/*
- * track_pfn_copy is called when vma that is covering the pfnmap gets
- * copied through copy_page_range().
- */
-static inline int track_pfn_copy(struct vm_area_struct *vma)
-{
- return 0;
-}
-
-/*
- * untrack_pfn is called while unmapping a pfnmap for a region.
- * untrack can be called for a specific region indicated by pfn and size or
- * can be for the entire vma (in which case pfn, size are zero).
- */
-static inline void untrack_pfn(struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size)
-{
-}
-
-/*
- * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
- */
-static inline void untrack_pfn_moved(struct vm_area_struct *vma)
-{
-}
-#else
-extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long addr,
- unsigned long size);
-extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn);
-extern int track_pfn_copy(struct vm_area_struct *vma);
-extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
- unsigned long size);
-extern void untrack_pfn_moved(struct vm_area_struct *vma);
-#endif
-
-#ifdef __HAVE_COLOR_ZERO_PAGE
-static inline int is_zero_pfn(unsigned long pfn)
-{
- extern unsigned long zero_pfn;
- unsigned long offset_from_zero_pfn = pfn - zero_pfn;
- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
-}
-
-#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
-
-#else
-static inline int is_zero_pfn(unsigned long pfn)
-{
- extern unsigned long zero_pfn;
- return pfn == zero_pfn;
-}
-
-static inline unsigned long my_zero_pfn(unsigned long addr)
-{
- extern unsigned long zero_pfn;
- return zero_pfn;
-}
-#endif
-
-#ifdef CONFIG_MMU
-
-#ifndef CONFIG_TRANSPARENT_HUGEPAGE
-static inline int pmd_trans_huge(pmd_t pmd)
-{
- return 0;
-}
-#ifndef pmd_write
-static inline int pmd_write(pmd_t pmd)
-{
- BUG();
- return 0;
-}
-#endif /* pmd_write */
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-#ifndef pud_write
-static inline int pud_write(pud_t pud)
-{
- BUG();
- return 0;
-}
-#endif /* pud_write */
-
-#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline int pmd_devmap(pmd_t pmd)
-{
- return 0;
-}
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
-#endif
-
-#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
- (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
-static inline int pud_trans_huge(pud_t pud)
-{
- return 0;
-}
-#endif
-
-/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
-static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
-{
- pud_t pudval = READ_ONCE(*pud);
-
- if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
- return 1;
- if (unlikely(pud_bad(pudval))) {
- pud_clear_bad(pud);
- return 1;
- }
- return 0;
-}
-
-/* See pmd_trans_unstable for discussion. */
-static inline int pud_trans_unstable(pud_t *pud)
-{
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
- return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
-#else
- return 0;
-#endif
-}
-
-#ifndef pmd_read_atomic
-static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
-{
- /*
- * Depend on compiler for an atomic pmd read. NOTE: this is
- * only going to work, if the pmdval_t isn't larger than
- * an unsigned long.
- */
- return *pmdp;
-}
-#endif
-
-#ifndef arch_needs_pgtable_deposit
-#define arch_needs_pgtable_deposit() (false)
-#endif
-/*
- * This function is meant to be used by sites walking pagetables with
- * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
- * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
- * into a null pmd and the transhuge page fault can convert a null pmd
- * into an hugepmd or into a regular pmd (if the hugepage allocation
- * fails). While holding the mmap_sem in read mode the pmd becomes
- * stable and stops changing under us only if it's not null and not a
- * transhuge pmd. When those races occurs and this function makes a
- * difference vs the standard pmd_none_or_clear_bad, the result is
- * undefined so behaving like if the pmd was none is safe (because it
- * can return none anyway). The compiler level barrier() is critically
- * important to compute the two checks atomically on the same pmdval.
- *
- * For 32bit kernels with a 64bit large pmd_t this automatically takes
- * care of reading the pmd atomically to avoid SMP race conditions
- * against pmd_populate() when the mmap_sem is hold for reading by the
- * caller (a special atomic read not done by "gcc" as in the generic
- * version above, is also needed when THP is disabled because the page
- * fault can populate the pmd from under us).
- */
-static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
-{
- pmd_t pmdval = pmd_read_atomic(pmd);
- /*
- * The barrier will stabilize the pmdval in a register or on
- * the stack so that it will stop changing under the code.
- *
- * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
- * pmd_read_atomic is allowed to return a not atomic pmdval
- * (for example pointing to an hugepage that has never been
- * mapped in the pmd). The below checks will only care about
- * the low part of the pmd with 32bit PAE x86 anyway, with the
- * exception of pmd_none(). So the important thing is that if
- * the low part of the pmd is found null, the high part will
- * be also null or the pmd_none() check below would be
- * confused.
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- barrier();
-#endif
- /*
- * !pmd_present() checks for pmd migration entries
- *
- * The complete check uses is_pmd_migration_entry() in linux/swapops.h
- * But using that requires moving current function and pmd_trans_unstable()
- * to linux/swapops.h to resovle dependency, which is too much code move.
- *
- * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
- * because !pmd_present() pages can only be under migration not swapped
- * out.
- *
- * pmd_none() is preseved for future condition checks on pmd migration
- * entries and not confusing with this function name, although it is
- * redundant with !pmd_present().
- */
- if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
- (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
- return 1;
- if (unlikely(pmd_bad(pmdval))) {
- pmd_clear_bad(pmd);
- return 1;
- }
- return 0;
-}
-
-/*
- * This is a noop if Transparent Hugepage Support is not built into
- * the kernel. Otherwise it is equivalent to
- * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
- * places that already verified the pmd is not none and they want to
- * walk ptes while holding the mmap sem in read mode (write mode don't
- * need this). If THP is not enabled, the pmd can't go away under the
- * code even if MADV_DONTNEED runs, but if THP is enabled we need to
- * run a pmd_trans_unstable before walking the ptes after
- * split_huge_pmd returns (because it may have run when the pmd become
- * null, but then a page fault can map in a THP and not a regular page).
- */
-static inline int pmd_trans_unstable(pmd_t *pmd)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- return pmd_none_or_trans_huge_or_clear_bad(pmd);
-#else
- return 0;
-#endif
-}
-
-#ifndef CONFIG_NUMA_BALANCING
-/*
- * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
- * the only case the kernel cares is for NUMA balancing and is only ever set
- * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
- * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
- * is the responsibility of the caller to distinguish between PROT_NONE
- * protections and NUMA hinting fault protections.
- */
-static inline int pte_protnone(pte_t pte)
-{
- return 0;
-}
-
-static inline int pmd_protnone(pmd_t pmd)
-{
- return 0;
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
-#endif /* CONFIG_MMU */
-
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-
-#ifndef __PAGETABLE_P4D_FOLDED
-int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
-int p4d_clear_huge(p4d_t *p4d);
-#else
-static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int p4d_clear_huge(p4d_t *p4d)
-{
- return 0;
-}
-#endif /* !__PAGETABLE_P4D_FOLDED */
-
-int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
-int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
-int pud_clear_huge(pud_t *pud);
-int pmd_clear_huge(pmd_t *pmd);
-int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
-int pud_free_pmd_page(pud_t *pud, unsigned long addr);
-int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
-#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
-static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
-{
- return 0;
-}
-static inline int p4d_clear_huge(p4d_t *p4d)
-{
- return 0;
-}
-static inline int pud_clear_huge(pud_t *pud)
-{
- return 0;
-}
-static inline int pmd_clear_huge(pmd_t *pmd)
-{
- return 0;
-}
-static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
-{
- return 0;
-}
-static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
-{
- return 0;
-}
-static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
-{
- return 0;
-}
-#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
-
-#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/*
- * ARCHes with special requirements for evicting THP backing TLB entries can
- * implement this. Otherwise also, it can help optimize normal TLB flush in
- * THP regime. stock flush_tlb_range() typically has optimization to nuke the
- * entire TLB TLB if flush span is greater than a threshold, which will
- * likely be true for a single huge page. Thus a single thp flush will
- * invalidate the entire TLB which is not desitable.
- * e.g. see arch/arc: flush_pmd_tlb_range
- */
-#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
-#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
-#else
-#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
-#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
-#endif
-#endif
-
-struct file;
-int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t *vma_prot);
-
-#ifndef CONFIG_X86_ESPFIX64
-static inline void init_espfix_bsp(void) { }
-#endif
-
-extern void __init pgtable_cache_init(void);
-
-#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
-static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
-{
- return true;
-}
-
-static inline bool arch_has_pfn_modify_check(void)
-{
- return false;
-}
-#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
-
-/*
- * Architecture PAGE_KERNEL_* fallbacks
- *
- * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
- * because they really don't support them, or the port needs to be updated to
- * reflect the required functionality. Below are a set of relatively safe
- * fallbacks, as best effort, which we can count on in lieu of the architectures
- * not defining them on their own yet.
- */
-
-#ifndef PAGE_KERNEL_RO
-# define PAGE_KERNEL_RO PAGE_KERNEL
-#endif
-
-#ifndef PAGE_KERNEL_EXEC
-# define PAGE_KERNEL_EXEC PAGE_KERNEL
-#endif
-
-/*
- * Page Table Modification bits for pgtbl_mod_mask.
- *
- * These are used by the p?d_alloc_track*() set of functions an in the generic
- * vmalloc/ioremap code to track at which page-table levels entries have been
- * modified. Based on that the code can better decide when vmalloc and ioremap
- * mapping changes need to be synchronized to other page-tables in the system.
- */
-#define __PGTBL_PGD_MODIFIED 0
-#define __PGTBL_P4D_MODIFIED 1
-#define __PGTBL_PUD_MODIFIED 2
-#define __PGTBL_PMD_MODIFIED 3
-#define __PGTBL_PTE_MODIFIED 4
-
-#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED)
-#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED)
-#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED)
-#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED)
-#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED)
-
-/* Page-Table Modification Mask */
-typedef unsigned int pgtbl_mod_mask;
-
-#endif /* !__ASSEMBLY__ */
-
-#ifndef io_remap_pfn_range
-#define io_remap_pfn_range remap_pfn_range
-#endif
-
-#ifndef has_transparent_hugepage
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define has_transparent_hugepage() 1
-#else
-#define has_transparent_hugepage() 0
-#endif
-#endif
-
-/*
- * On some architectures it depends on the mm if the p4d/pud or pmd
- * layer of the page table hierarchy is folded or not.
- */
-#ifndef mm_p4d_folded
-#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
-#endif
-
-#ifndef mm_pud_folded
-#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
-#endif
-
-#ifndef mm_pmd_folded
-#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
-#endif
-
-/*
- * p?d_leaf() - true if this entry is a final mapping to a physical address.
- * This differs from p?d_huge() by the fact that they are always available (if
- * the architecture supports large pages at the appropriate level) even
- * if CONFIG_HUGETLB_PAGE is not defined.
- * Only meaningful when called on a valid entry.
- */
-#ifndef pgd_leaf
-#define pgd_leaf(x) 0
-#endif
-#ifndef p4d_leaf
-#define p4d_leaf(x) 0
-#endif
-#ifndef pud_leaf
-#define pud_leaf(x) 0
-#endif
-#ifndef pmd_leaf
-#define pmd_leaf(x) 0
-#endif
-
-#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 0a9d042e075a..de1ccdcd5703 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -668,10 +668,6 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev);
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot);
-
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot);
-
/**
* ttm_bo_io
*
diff --git a/include/dt-bindings/clock/agilex-clock.h b/include/dt-bindings/clock/agilex-clock.h
new file mode 100644
index 000000000000..f19cf8ccbdd2
--- /dev/null
+++ b/include/dt-bindings/clock/agilex-clock.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#ifndef __AGILEX_CLOCK_H
+#define __AGILEX_CLOCK_H
+
+/* fixed rate clocks */
+#define AGILEX_OSC1 0
+#define AGILEX_CB_INTOSC_HS_DIV2_CLK 1
+#define AGILEX_CB_INTOSC_LS_CLK 2
+#define AGILEX_L4_SYS_FREE_CLK 3
+#define AGILEX_F2S_FREE_CLK 4
+
+/* PLL clocks */
+#define AGILEX_MAIN_PLL_CLK 5
+#define AGILEX_MAIN_PLL_C0_CLK 6
+#define AGILEX_MAIN_PLL_C1_CLK 7
+#define AGILEX_MAIN_PLL_C2_CLK 8
+#define AGILEX_MAIN_PLL_C3_CLK 9
+#define AGILEX_PERIPH_PLL_CLK 10
+#define AGILEX_PERIPH_PLL_C0_CLK 11
+#define AGILEX_PERIPH_PLL_C1_CLK 12
+#define AGILEX_PERIPH_PLL_C2_CLK 13
+#define AGILEX_PERIPH_PLL_C3_CLK 14
+#define AGILEX_MPU_FREE_CLK 15
+#define AGILEX_MPU_CCU_CLK 16
+#define AGILEX_BOOT_CLK 17
+
+/* fixed factor clocks */
+#define AGILEX_L3_MAIN_FREE_CLK 18
+#define AGILEX_NOC_FREE_CLK 19
+#define AGILEX_S2F_USR0_CLK 20
+#define AGILEX_NOC_CLK 21
+#define AGILEX_EMAC_A_FREE_CLK 22
+#define AGILEX_EMAC_B_FREE_CLK 23
+#define AGILEX_EMAC_PTP_FREE_CLK 24
+#define AGILEX_GPIO_DB_FREE_CLK 25
+#define AGILEX_SDMMC_FREE_CLK 26
+#define AGILEX_S2F_USER0_FREE_CLK 27
+#define AGILEX_S2F_USER1_FREE_CLK 28
+#define AGILEX_PSI_REF_FREE_CLK 29
+
+/* Gate clocks */
+#define AGILEX_MPU_CLK 30
+#define AGILEX_MPU_L2RAM_CLK 31
+#define AGILEX_MPU_PERIPH_CLK 32
+#define AGILEX_L4_MAIN_CLK 33
+#define AGILEX_L4_MP_CLK 34
+#define AGILEX_L4_SP_CLK 35
+#define AGILEX_CS_AT_CLK 36
+#define AGILEX_CS_TRACE_CLK 37
+#define AGILEX_CS_PDBG_CLK 38
+#define AGILEX_CS_TIMER_CLK 39
+#define AGILEX_S2F_USER0_CLK 40
+#define AGILEX_EMAC0_CLK 41
+#define AGILEX_EMAC1_CLK 43
+#define AGILEX_EMAC2_CLK 44
+#define AGILEX_EMAC_PTP_CLK 45
+#define AGILEX_GPIO_DB_CLK 46
+#define AGILEX_NAND_CLK 47
+#define AGILEX_PSI_REF_CLK 48
+#define AGILEX_S2F_USER1_CLK 49
+#define AGILEX_SDMMC_CLK 50
+#define AGILEX_SPI_M_CLK 51
+#define AGILEX_USB_CLK 52
+#define AGILEX_NUM_CLKS 53
+
+#endif /* __AGILEX_CLOCK_H */
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index 38b5554153c8..eba17106608b 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -12,6 +12,7 @@
#define PMC_TYPE_SYSTEM 1
#define PMC_TYPE_PERIPHERAL 2
#define PMC_TYPE_GCK 3
+#define PMC_TYPE_PROGRAMMABLE 4
#define PMC_SLOW 0
#define PMC_MCK 1
@@ -20,6 +21,9 @@
#define PMC_MCK2 4
#define PMC_I2S0_MUX 5
#define PMC_I2S1_MUX 6
+#define PMC_PLLACK 7
+#define PMC_PLLBCK 8
+#define PMC_AUDIOPLLCK 9
#ifndef AT91_PMC_MOSCS
#define AT91_PMC_MOSCS 0 /* MOSCS Flag */
diff --git a/include/dt-bindings/clock/bt1-ccu.h b/include/dt-bindings/clock/bt1-ccu.h
new file mode 100644
index 000000000000..5f166d27a00a
--- /dev/null
+++ b/include/dt-bindings/clock/bt1-ccu.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU clock indices
+ */
+#ifndef __DT_BINDINGS_CLOCK_BT1_CCU_H
+#define __DT_BINDINGS_CLOCK_BT1_CCU_H
+
+#define CCU_CPU_PLL 0
+#define CCU_SATA_PLL 1
+#define CCU_DDR_PLL 2
+#define CCU_PCIE_PLL 3
+#define CCU_ETH_PLL 4
+
+#define CCU_AXI_MAIN_CLK 0
+#define CCU_AXI_DDR_CLK 1
+#define CCU_AXI_SATA_CLK 2
+#define CCU_AXI_GMAC0_CLK 3
+#define CCU_AXI_GMAC1_CLK 4
+#define CCU_AXI_XGMAC_CLK 5
+#define CCU_AXI_PCIE_M_CLK 6
+#define CCU_AXI_PCIE_S_CLK 7
+#define CCU_AXI_USB_CLK 8
+#define CCU_AXI_HWA_CLK 9
+#define CCU_AXI_SRAM_CLK 10
+
+#define CCU_SYS_SATA_REF_CLK 0
+#define CCU_SYS_APB_CLK 1
+#define CCU_SYS_GMAC0_TX_CLK 2
+#define CCU_SYS_GMAC0_PTP_CLK 3
+#define CCU_SYS_GMAC1_TX_CLK 4
+#define CCU_SYS_GMAC1_PTP_CLK 5
+#define CCU_SYS_XGMAC_REF_CLK 6
+#define CCU_SYS_XGMAC_PTP_CLK 7
+#define CCU_SYS_USB_CLK 8
+#define CCU_SYS_PVT_CLK 9
+#define CCU_SYS_HWA_CLK 10
+#define CCU_SYS_UART_CLK 11
+#define CCU_SYS_I2C1_CLK 12
+#define CCU_SYS_I2C2_CLK 13
+#define CCU_SYS_GPIO_CLK 14
+#define CCU_SYS_TIMER0_CLK 15
+#define CCU_SYS_TIMER1_CLK 16
+#define CCU_SYS_TIMER2_CLK 17
+#define CCU_SYS_WDT_CLK 18
+
+#endif /* __DT_BINDINGS_CLOCK_BT1_CCU_H */
diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h
index 38145bdcd975..b58370d146e2 100644
--- a/include/dt-bindings/clock/imx7ulp-clock.h
+++ b/include/dt-bindings/clock/imx7ulp-clock.h
@@ -58,7 +58,10 @@
#define IMX7ULP_CLK_HSRUN_SYS_SEL 44
#define IMX7ULP_CLK_HSRUN_CORE_DIV 45
-#define IMX7ULP_CLK_SCG1_END 46
+#define IMX7ULP_CLK_CORE 46
+#define IMX7ULP_CLK_HSRUN_CORE 47
+
+#define IMX7ULP_CLK_SCG1_END 48
/* PCC2 */
#define IMX7ULP_CLK_DMA1 0
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
index 47ab082238b4..7a23f289b27f 100644
--- a/include/dt-bindings/clock/imx8mp-clock.h
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -296,6 +296,94 @@
#define IMX8MP_CLK_ARM 287
#define IMX8MP_CLK_A53_CORE 288
-#define IMX8MP_CLK_END 289
+#define IMX8MP_SYS_PLL1_40M_CG 289
+#define IMX8MP_SYS_PLL1_80M_CG 290
+#define IMX8MP_SYS_PLL1_100M_CG 291
+#define IMX8MP_SYS_PLL1_133M_CG 292
+#define IMX8MP_SYS_PLL1_160M_CG 293
+#define IMX8MP_SYS_PLL1_200M_CG 294
+#define IMX8MP_SYS_PLL1_266M_CG 295
+#define IMX8MP_SYS_PLL1_400M_CG 296
+#define IMX8MP_SYS_PLL2_50M_CG 297
+#define IMX8MP_SYS_PLL2_100M_CG 298
+#define IMX8MP_SYS_PLL2_125M_CG 299
+#define IMX8MP_SYS_PLL2_166M_CG 300
+#define IMX8MP_SYS_PLL2_200M_CG 301
+#define IMX8MP_SYS_PLL2_250M_CG 302
+#define IMX8MP_SYS_PLL2_333M_CG 303
+#define IMX8MP_SYS_PLL2_500M_CG 304
+
+#define IMX8MP_CLK_M7_CORE 305
+#define IMX8MP_CLK_ML_CORE 306
+#define IMX8MP_CLK_GPU3D_CORE 307
+#define IMX8MP_CLK_GPU3D_SHADER_CORE 308
+#define IMX8MP_CLK_GPU2D_CORE 309
+#define IMX8MP_CLK_AUDIO_AXI 310
+#define IMX8MP_CLK_HSIO_AXI 311
+#define IMX8MP_CLK_MEDIA_ISP 312
+
+#define IMX8MP_CLK_END 313
+
+#define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2 2
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK3 3
+#define IMX8MP_CLK_AUDIOMIX_SAI2_IPG 4
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1 5
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2 6
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK3 7
+#define IMX8MP_CLK_AUDIOMIX_SAI3_IPG 8
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1 9
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2 10
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK3 11
+#define IMX8MP_CLK_AUDIOMIX_SAI5_IPG 12
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1 13
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2 14
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK3 15
+#define IMX8MP_CLK_AUDIOMIX_SAI6_IPG 16
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1 17
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2 18
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK3 19
+#define IMX8MP_CLK_AUDIOMIX_SAI7_IPG 20
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1 21
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2 22
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK3 23
+#define IMX8MP_CLK_AUDIOMIX_ASRC_IPG 24
+#define IMX8MP_CLK_AUDIOMIX_PDM_IPG 25
+#define IMX8MP_CLK_AUDIOMIX_SDMA2_ROOT 26
+#define IMX8MP_CLK_AUDIOMIX_SDMA3_ROOT 27
+#define IMX8MP_CLK_AUDIOMIX_SPBA2_ROOT 28
+#define IMX8MP_CLK_AUDIOMIX_DSP_ROOT 29
+#define IMX8MP_CLK_AUDIOMIX_DSPDBG_ROOT 30
+#define IMX8MP_CLK_AUDIOMIX_EARC_IPG 31
+#define IMX8MP_CLK_AUDIOMIX_OCRAMA_IPG 32
+#define IMX8MP_CLK_AUDIOMIX_AUD2HTX_IPG 33
+#define IMX8MP_CLK_AUDIOMIX_EDMA_ROOT 34
+#define IMX8MP_CLK_AUDIOMIX_AUDPLL_ROOT 35
+#define IMX8MP_CLK_AUDIOMIX_MU2_ROOT 36
+#define IMX8MP_CLK_AUDIOMIX_MU3_ROOT 37
+#define IMX8MP_CLK_AUDIOMIX_EARC_PHY 38
+#define IMX8MP_CLK_AUDIOMIX_PDM_ROOT 39
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1_SEL 40
+#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2_SEL 41
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1_SEL 42
+#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2_SEL 43
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1_SEL 44
+#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2_SEL 45
+#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK1_SEL 46
+#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK2_SEL 47
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1_SEL 48
+#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2_SEL 49
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1_SEL 50
+#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2_SEL 51
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1_SEL 52
+#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2_SEL 53
+#define IMX8MP_CLK_AUDIOMIX_PDM_SEL 54
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL 55
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL 56
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS 57
+#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT 58
+
+#define IMX8MP_CLK_AUDIOMIX_END 59
#endif
diff --git a/include/dt-bindings/clock/intel,lgm-clk.h b/include/dt-bindings/clock/intel,lgm-clk.h
new file mode 100644
index 000000000000..92f5be6490bb
--- /dev/null
+++ b/include/dt-bindings/clock/intel,lgm-clk.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ * Lei Chuanhua <Chuanhua.lei@intel.com>
+ * Zhu Yixin <Yixin.zhu@intel.com>
+ */
+#ifndef __INTEL_LGM_CLK_H
+#define __INTEL_LGM_CLK_H
+
+/* PLL clocks */
+#define LGM_CLK_OSC 1
+#define LGM_CLK_PLLPP 2
+#define LGM_CLK_PLL2 3
+#define LGM_CLK_PLL0CZ 4
+#define LGM_CLK_PLL0B 5
+#define LGM_CLK_PLL1 6
+#define LGM_CLK_LJPLL3 7
+#define LGM_CLK_LJPLL4 8
+#define LGM_CLK_PLL0CM0 9
+#define LGM_CLK_PLL0CM1 10
+
+/* clocks from PLLs */
+
+/* ROPLL clocks */
+#define LGM_CLK_PP_HW 15
+#define LGM_CLK_PP_UC 16
+#define LGM_CLK_PP_FXD 17
+#define LGM_CLK_PP_TBM 18
+
+/* PLL2 clocks */
+#define LGM_CLK_DDR 20
+
+/* PLL0CZ */
+#define LGM_CLK_CM 25
+#define LGM_CLK_IC 26
+#define LGM_CLK_SDXC3 27
+
+/* PLL0B */
+#define LGM_CLK_NGI 30
+#define LGM_CLK_NOC4 31
+#define LGM_CLK_SW 32
+#define LGM_CLK_QSPI 33
+#define LGM_CLK_CQEM LGM_CLK_SW
+#define LGM_CLK_EMMC5 LGM_CLK_NOC4
+
+/* PLL1 */
+#define LGM_CLK_CT 35
+#define LGM_CLK_DSP 36
+#define LGM_CLK_VIF 37
+
+/* LJPLL3 */
+#define LGM_CLK_CML 40
+#define LGM_CLK_SERDES 41
+#define LGM_CLK_POOL 42
+#define LGM_CLK_PTP 43
+
+/* LJPLL4 */
+#define LGM_CLK_PCIE 45
+#define LGM_CLK_SATA LGM_CLK_PCIE
+
+/* PLL0CM0 */
+#define LGM_CLK_CPU0 50
+
+/* PLL0CM1 */
+#define LGM_CLK_CPU1 55
+
+/* Miscellaneous clocks */
+#define LGM_CLK_EMMC4 60
+#define LGM_CLK_SDXC2 61
+#define LGM_CLK_EMMC 62
+#define LGM_CLK_SDXC 63
+#define LGM_CLK_SLIC 64
+#define LGM_CLK_DCL 65
+#define LGM_CLK_DOCSIS 66
+#define LGM_CLK_PCM 67
+#define LGM_CLK_DDR_PHY 68
+#define LGM_CLK_PONDEF 69
+#define LGM_CLK_PL25M 70
+#define LGM_CLK_PL10M 71
+#define LGM_CLK_PL1544K 72
+#define LGM_CLK_PL2048K 73
+#define LGM_CLK_PL8K 74
+#define LGM_CLK_PON_NTR 75
+#define LGM_CLK_SYNC0 76
+#define LGM_CLK_SYNC1 77
+#define LGM_CLK_PROGDIV 78
+#define LGM_CLK_OD0 79
+#define LGM_CLK_OD1 80
+#define LGM_CLK_CBPHY0 81
+#define LGM_CLK_CBPHY1 82
+#define LGM_CLK_CBPHY2 83
+#define LGM_CLK_CBPHY3 84
+
+/* Gate clocks */
+/* Gate CLK0 */
+#define LGM_GCLK_C55 100
+#define LGM_GCLK_QSPI 101
+#define LGM_GCLK_EIP197 102
+#define LGM_GCLK_VAULT 103
+#define LGM_GCLK_TOE 104
+#define LGM_GCLK_SDXC 105
+#define LGM_GCLK_EMMC 106
+#define LGM_GCLK_SPI_DBG 107
+#define LGM_GCLK_DMA3 108
+
+/* Gate CLK1 */
+#define LGM_GCLK_DMA0 120
+#define LGM_GCLK_LEDC0 121
+#define LGM_GCLK_LEDC1 122
+#define LGM_GCLK_I2S0 123
+#define LGM_GCLK_I2S1 124
+#define LGM_GCLK_EBU 125
+#define LGM_GCLK_PWM 126
+#define LGM_GCLK_I2C0 127
+#define LGM_GCLK_I2C1 128
+#define LGM_GCLK_I2C2 129
+#define LGM_GCLK_I2C3 130
+#define LGM_GCLK_SSC0 131
+#define LGM_GCLK_SSC1 132
+#define LGM_GCLK_SSC2 133
+#define LGM_GCLK_SSC3 134
+#define LGM_GCLK_GPTC0 135
+#define LGM_GCLK_GPTC1 136
+#define LGM_GCLK_GPTC2 137
+#define LGM_GCLK_GPTC3 138
+#define LGM_GCLK_ASC0 139
+#define LGM_GCLK_ASC1 140
+#define LGM_GCLK_ASC2 141
+#define LGM_GCLK_ASC3 142
+#define LGM_GCLK_PCM0 143
+#define LGM_GCLK_PCM1 144
+#define LGM_GCLK_PCM2 145
+
+/* Gate CLK2 */
+#define LGM_GCLK_PCIE10 150
+#define LGM_GCLK_PCIE11 151
+#define LGM_GCLK_PCIE30 152
+#define LGM_GCLK_PCIE31 153
+#define LGM_GCLK_PCIE20 154
+#define LGM_GCLK_PCIE21 155
+#define LGM_GCLK_PCIE40 156
+#define LGM_GCLK_PCIE41 157
+#define LGM_GCLK_XPCS0 158
+#define LGM_GCLK_XPCS1 159
+#define LGM_GCLK_XPCS2 160
+#define LGM_GCLK_XPCS3 161
+#define LGM_GCLK_SATA0 162
+#define LGM_GCLK_SATA1 163
+#define LGM_GCLK_SATA2 164
+#define LGM_GCLK_SATA3 165
+
+/* Gate CLK3 */
+#define LGM_GCLK_ARCEM4 170
+#define LGM_GCLK_IDMAR1 171
+#define LGM_GCLK_IDMAT0 172
+#define LGM_GCLK_IDMAT1 173
+#define LGM_GCLK_IDMAT2 174
+#define LGM_GCLK_PPV4 175
+#define LGM_GCLK_GSWIPO 176
+#define LGM_GCLK_CQEM 177
+#define LGM_GCLK_XPCS5 178
+#define LGM_GCLK_USB1 179
+#define LGM_GCLK_USB2 180
+
+#endif /* __INTEL_LGM_CLK_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2-audio.h b/include/dt-bindings/clock/marvell,mmp2-audio.h
new file mode 100644
index 000000000000..20664776f497
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,mmp2-audio.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */
+#ifndef __DT_BINDINGS_CLOCK_MARVELL_MMP2_AUDIO_H
+#define __DT_BINDINGS_CLOCK_MARVELL_MMP2_AUDIO_H
+
+#define MMP2_CLK_AUDIO_SYSCLK 0
+#define MMP2_CLK_AUDIO_SSPA0 1
+#define MMP2_CLK_AUDIO_SSPA1 2
+
+#define MMP2_CLK_AUDIO_NR_CLKS 3
+#endif
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 06bb7fe4c62f..87f5ad5df72f 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -29,6 +29,8 @@
#define MMP3_CLK_PLL1_P 28
#define MMP3_CLK_PLL2_P 29
#define MMP3_CLK_PLL3 30
+#define MMP2_CLK_I2S0 31
+#define MMP2_CLK_I2S1 32
/* apb periphrals */
#define MMP2_CLK_TWSI0 60
@@ -87,6 +89,7 @@
#define MMP3_CLK_GPU_3D MMP2_CLK_GPU_3D
#define MMP3_CLK_GPU_2D 125
#define MMP3_CLK_SDH4 126
+#define MMP2_CLK_AUDIO 127
#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 68862aaf977e..4c5965ae1df4 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -107,6 +107,7 @@
#define CLKID_PERIPH 126
#define CLKID_AXI 128
#define CLKID_L2_DRAM 130
+#define CLKID_HDMI_SYS 174
#define CLKID_VPU 190
#define CLKID_VDEC_1 196
#define CLKID_VDEC_HCODEC 199
diff --git a/include/dt-bindings/clock/mt6765-clk.h b/include/dt-bindings/clock/mt6765-clk.h
new file mode 100644
index 000000000000..eb97e568518e
--- /dev/null
+++ b/include/dt-bindings/clock/mt6765-clk.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_CLK_MT6765_H
+#define _DT_BINDINGS_CLK_MT6765_H
+
+/* FIX Clks */
+#define CLK_TOP_CLK26M 0
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL_L 0
+#define CLK_APMIXED_ARMPLL 1
+#define CLK_APMIXED_CCIPLL 2
+#define CLK_APMIXED_MAINPLL 3
+#define CLK_APMIXED_MFGPLL 4
+#define CLK_APMIXED_MMPLL 5
+#define CLK_APMIXED_UNIV2PLL 6
+#define CLK_APMIXED_MSDCPLL 7
+#define CLK_APMIXED_APLL1 8
+#define CLK_APMIXED_MPLL 9
+#define CLK_APMIXED_ULPOSC1 10
+#define CLK_APMIXED_ULPOSC2 11
+#define CLK_APMIXED_SSUSB26M 12
+#define CLK_APMIXED_APPLL26M 13
+#define CLK_APMIXED_MIPIC0_26M 14
+#define CLK_APMIXED_MDPLLGP26M 15
+#define CLK_APMIXED_MMSYS_F26M 16
+#define CLK_APMIXED_UFS26M 17
+#define CLK_APMIXED_MIPIC1_26M 18
+#define CLK_APMIXED_MEMPLL26M 19
+#define CLK_APMIXED_CLKSQ_LVPLL_26M 20
+#define CLK_APMIXED_MIPID0_26M 21
+#define CLK_APMIXED_NR_CLK 22
+
+/* TOPCKGEN */
+#define CLK_TOP_SYSPLL 0
+#define CLK_TOP_SYSPLL_D2 1
+#define CLK_TOP_SYSPLL1_D2 2
+#define CLK_TOP_SYSPLL1_D4 3
+#define CLK_TOP_SYSPLL1_D8 4
+#define CLK_TOP_SYSPLL1_D16 5
+#define CLK_TOP_SYSPLL_D3 6
+#define CLK_TOP_SYSPLL2_D2 7
+#define CLK_TOP_SYSPLL2_D4 8
+#define CLK_TOP_SYSPLL2_D8 9
+#define CLK_TOP_SYSPLL_D5 10
+#define CLK_TOP_SYSPLL3_D2 11
+#define CLK_TOP_SYSPLL3_D4 12
+#define CLK_TOP_SYSPLL_D7 13
+#define CLK_TOP_SYSPLL4_D2 14
+#define CLK_TOP_SYSPLL4_D4 15
+#define CLK_TOP_USB20_192M 16
+#define CLK_TOP_USB20_192M_D4 17
+#define CLK_TOP_USB20_192M_D8 18
+#define CLK_TOP_USB20_192M_D16 19
+#define CLK_TOP_USB20_192M_D32 20
+#define CLK_TOP_UNIVPLL 21
+#define CLK_TOP_UNIVPLL_D2 22
+#define CLK_TOP_UNIVPLL1_D2 23
+#define CLK_TOP_UNIVPLL1_D4 24
+#define CLK_TOP_UNIVPLL_D3 25
+#define CLK_TOP_UNIVPLL2_D2 26
+#define CLK_TOP_UNIVPLL2_D4 27
+#define CLK_TOP_UNIVPLL2_D8 28
+#define CLK_TOP_UNIVPLL2_D32 29
+#define CLK_TOP_UNIVPLL_D5 30
+#define CLK_TOP_UNIVPLL3_D2 31
+#define CLK_TOP_UNIVPLL3_D4 32
+#define CLK_TOP_MMPLL 33
+#define CLK_TOP_MMPLL_D2 34
+#define CLK_TOP_MPLL 35
+#define CLK_TOP_DA_MPLL_104M_DIV 36
+#define CLK_TOP_DA_MPLL_52M_DIV 37
+#define CLK_TOP_MFGPLL 38
+#define CLK_TOP_MSDCPLL 39
+#define CLK_TOP_MSDCPLL_D2 40
+#define CLK_TOP_APLL1 41
+#define CLK_TOP_APLL1_D2 42
+#define CLK_TOP_APLL1_D4 43
+#define CLK_TOP_APLL1_D8 44
+#define CLK_TOP_ULPOSC1 45
+#define CLK_TOP_ULPOSC1_D2 46
+#define CLK_TOP_ULPOSC1_D4 47
+#define CLK_TOP_ULPOSC1_D8 48
+#define CLK_TOP_ULPOSC1_D16 49
+#define CLK_TOP_ULPOSC1_D32 50
+#define CLK_TOP_DMPLL 51
+#define CLK_TOP_F_FRTC 52
+#define CLK_TOP_F_F26M 53
+#define CLK_TOP_AXI 54
+#define CLK_TOP_MM 55
+#define CLK_TOP_SCP 56
+#define CLK_TOP_MFG 57
+#define CLK_TOP_F_FUART 58
+#define CLK_TOP_SPI 59
+#define CLK_TOP_MSDC50_0 60
+#define CLK_TOP_MSDC30_1 61
+#define CLK_TOP_AUDIO 62
+#define CLK_TOP_AUD_1 63
+#define CLK_TOP_AUD_ENGEN1 64
+#define CLK_TOP_F_FDISP_PWM 65
+#define CLK_TOP_SSPM 66
+#define CLK_TOP_DXCC 67
+#define CLK_TOP_I2C 68
+#define CLK_TOP_F_FPWM 69
+#define CLK_TOP_F_FSENINF 70
+#define CLK_TOP_AES_FDE 71
+#define CLK_TOP_F_BIST2FPC 72
+#define CLK_TOP_ARMPLL_DIVIDER_PLL0 73
+#define CLK_TOP_ARMPLL_DIVIDER_PLL1 74
+#define CLK_TOP_ARMPLL_DIVIDER_PLL2 75
+#define CLK_TOP_DA_USB20_48M_DIV 76
+#define CLK_TOP_DA_UNIV_48M_DIV 77
+#define CLK_TOP_APLL12_DIV0 78
+#define CLK_TOP_APLL12_DIV1 79
+#define CLK_TOP_APLL12_DIV2 80
+#define CLK_TOP_APLL12_DIV3 81
+#define CLK_TOP_ARMPLL_DIVIDER_PLL0_EN 82
+#define CLK_TOP_ARMPLL_DIVIDER_PLL1_EN 83
+#define CLK_TOP_ARMPLL_DIVIDER_PLL2_EN 84
+#define CLK_TOP_FMEM_OCC_DRC_EN 85
+#define CLK_TOP_USB20_48M_EN 86
+#define CLK_TOP_UNIVPLL_48M_EN 87
+#define CLK_TOP_MPLL_104M_EN 88
+#define CLK_TOP_MPLL_52M_EN 89
+#define CLK_TOP_F_UFS_MP_SAP_CFG_EN 90
+#define CLK_TOP_F_BIST2FPC_EN 91
+#define CLK_TOP_MD_32K 92
+#define CLK_TOP_MD_26M 93
+#define CLK_TOP_MD2_32K 94
+#define CLK_TOP_MD2_26M 95
+#define CLK_TOP_AXI_SEL 96
+#define CLK_TOP_MEM_SEL 97
+#define CLK_TOP_MM_SEL 98
+#define CLK_TOP_SCP_SEL 99
+#define CLK_TOP_MFG_SEL 100
+#define CLK_TOP_ATB_SEL 101
+#define CLK_TOP_CAMTG_SEL 102
+#define CLK_TOP_CAMTG1_SEL 103
+#define CLK_TOP_CAMTG2_SEL 104
+#define CLK_TOP_CAMTG3_SEL 105
+#define CLK_TOP_UART_SEL 106
+#define CLK_TOP_SPI_SEL 107
+#define CLK_TOP_MSDC50_0_HCLK_SEL 108
+#define CLK_TOP_MSDC50_0_SEL 109
+#define CLK_TOP_MSDC30_1_SEL 110
+#define CLK_TOP_AUDIO_SEL 111
+#define CLK_TOP_AUD_INTBUS_SEL 112
+#define CLK_TOP_AUD_1_SEL 113
+#define CLK_TOP_AUD_ENGEN1_SEL 114
+#define CLK_TOP_DISP_PWM_SEL 115
+#define CLK_TOP_SSPM_SEL 116
+#define CLK_TOP_DXCC_SEL 117
+#define CLK_TOP_USB_TOP_SEL 118
+#define CLK_TOP_SPM_SEL 119
+#define CLK_TOP_I2C_SEL 120
+#define CLK_TOP_PWM_SEL 121
+#define CLK_TOP_SENINF_SEL 122
+#define CLK_TOP_AES_FDE_SEL 123
+#define CLK_TOP_PWRAP_ULPOSC_SEL 124
+#define CLK_TOP_CAMTM_SEL 125
+#define CLK_TOP_NR_CLK 126
+
+/* INFRACFG */
+#define CLK_IFR_ICUSB 0
+#define CLK_IFR_GCE 1
+#define CLK_IFR_THERM 2
+#define CLK_IFR_I2C_AP 3
+#define CLK_IFR_I2C_CCU 4
+#define CLK_IFR_I2C_SSPM 5
+#define CLK_IFR_I2C_RSV 6
+#define CLK_IFR_PWM_HCLK 7
+#define CLK_IFR_PWM1 8
+#define CLK_IFR_PWM2 9
+#define CLK_IFR_PWM3 10
+#define CLK_IFR_PWM4 11
+#define CLK_IFR_PWM5 12
+#define CLK_IFR_PWM 13
+#define CLK_IFR_UART0 14
+#define CLK_IFR_UART1 15
+#define CLK_IFR_GCE_26M 16
+#define CLK_IFR_CQ_DMA_FPC 17
+#define CLK_IFR_BTIF 18
+#define CLK_IFR_SPI0 19
+#define CLK_IFR_MSDC0 20
+#define CLK_IFR_MSDC1 21
+#define CLK_IFR_TRNG 22
+#define CLK_IFR_AUXADC 23
+#define CLK_IFR_CCIF1_AP 24
+#define CLK_IFR_CCIF1_MD 25
+#define CLK_IFR_AUXADC_MD 26
+#define CLK_IFR_AP_DMA 27
+#define CLK_IFR_DEVICE_APC 28
+#define CLK_IFR_CCIF_AP 29
+#define CLK_IFR_AUDIO 30
+#define CLK_IFR_CCIF_MD 31
+#define CLK_IFR_RG_PWM_FBCLK6 32
+#define CLK_IFR_DISP_PWM 33
+#define CLK_IFR_CLDMA_BCLK 34
+#define CLK_IFR_AUDIO_26M_BCLK 35
+#define CLK_IFR_SPI1 36
+#define CLK_IFR_I2C4 37
+#define CLK_IFR_SPI2 38
+#define CLK_IFR_SPI3 39
+#define CLK_IFR_I2C5 40
+#define CLK_IFR_I2C5_ARBITER 41
+#define CLK_IFR_I2C5_IMM 42
+#define CLK_IFR_I2C1_ARBITER 43
+#define CLK_IFR_I2C1_IMM 44
+#define CLK_IFR_I2C2_ARBITER 45
+#define CLK_IFR_I2C2_IMM 46
+#define CLK_IFR_SPI4 47
+#define CLK_IFR_SPI5 48
+#define CLK_IFR_CQ_DMA 49
+#define CLK_IFR_FAES_FDE 50
+#define CLK_IFR_MSDC0_SELF 51
+#define CLK_IFR_MSDC1_SELF 52
+#define CLK_IFR_I2C6 53
+#define CLK_IFR_AP_MSDC0 54
+#define CLK_IFR_MD_MSDC0 55
+#define CLK_IFR_MSDC0_SRC 56
+#define CLK_IFR_MSDC1_SRC 57
+#define CLK_IFR_AES_TOP0_BCLK 58
+#define CLK_IFR_MCU_PM_BCLK 59
+#define CLK_IFR_CCIF2_AP 60
+#define CLK_IFR_CCIF2_MD 61
+#define CLK_IFR_CCIF3_AP 62
+#define CLK_IFR_CCIF3_MD 63
+#define CLK_IFR_NR_CLK 64
+
+/* AUDIO */
+#define CLK_AUDIO_AFE 0
+#define CLK_AUDIO_22M 1
+#define CLK_AUDIO_APLL_TUNER 2
+#define CLK_AUDIO_ADC 3
+#define CLK_AUDIO_DAC 4
+#define CLK_AUDIO_DAC_PREDIS 5
+#define CLK_AUDIO_TML 6
+#define CLK_AUDIO_I2S1_BCLK 7
+#define CLK_AUDIO_I2S2_BCLK 8
+#define CLK_AUDIO_I2S3_BCLK 9
+#define CLK_AUDIO_I2S4_BCLK 10
+#define CLK_AUDIO_NR_CLK 11
+
+/* MIPI_RX_ANA_CSI0A */
+
+#define CLK_MIPI0A_CSR_CSI_EN_0A 0
+#define CLK_MIPI0A_NR_CLK 1
+
+/* MMSYS_CONFIG */
+
+#define CLK_MM_MDP_RDMA0 0
+#define CLK_MM_MDP_CCORR0 1
+#define CLK_MM_MDP_RSZ0 2
+#define CLK_MM_MDP_RSZ1 3
+#define CLK_MM_MDP_TDSHP0 4
+#define CLK_MM_MDP_WROT0 5
+#define CLK_MM_MDP_WDMA0 6
+#define CLK_MM_DISP_OVL0 7
+#define CLK_MM_DISP_OVL0_2L 8
+#define CLK_MM_DISP_RSZ0 9
+#define CLK_MM_DISP_RDMA0 10
+#define CLK_MM_DISP_WDMA0 11
+#define CLK_MM_DISP_COLOR0 12
+#define CLK_MM_DISP_CCORR0 13
+#define CLK_MM_DISP_AAL0 14
+#define CLK_MM_DISP_GAMMA0 15
+#define CLK_MM_DISP_DITHER0 16
+#define CLK_MM_DSI0 17
+#define CLK_MM_FAKE_ENG 18
+#define CLK_MM_SMI_COMMON 19
+#define CLK_MM_SMI_LARB0 20
+#define CLK_MM_SMI_COMM0 21
+#define CLK_MM_SMI_COMM1 22
+#define CLK_MM_CAM_MDP 23
+#define CLK_MM_SMI_IMG 24
+#define CLK_MM_SMI_CAM 25
+#define CLK_MM_IMG_DL_RELAY 26
+#define CLK_MM_IMG_DL_ASYNC_TOP 27
+#define CLK_MM_DIG_DSI 28
+#define CLK_MM_F26M_HRTWT 29
+#define CLK_MM_NR_CLK 30
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB2 0
+#define CLK_IMG_DIP 1
+#define CLK_IMG_FDVT 2
+#define CLK_IMG_DPE 3
+#define CLK_IMG_RSC 4
+#define CLK_IMG_NR_CLK 5
+
+/* VENCSYS */
+
+#define CLK_VENC_SET0_LARB 0
+#define CLK_VENC_SET1_VENC 1
+#define CLK_VENC_SET2_JPGENC 2
+#define CLK_VENC_SET3_VDEC 3
+#define CLK_VENC_NR_CLK 4
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB3 0
+#define CLK_CAM_DFP_VAD 1
+#define CLK_CAM 2
+#define CLK_CAMTG 3
+#define CLK_CAM_SENINF 4
+#define CLK_CAMSV0 5
+#define CLK_CAMSV1 6
+#define CLK_CAMSV2 7
+#define CLK_CAM_CCU 8
+#define CLK_CAM_NR_CLK 9
+
+#endif /* _DT_BINDINGS_CLK_MT6765_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h
new file mode 100644
index 000000000000..0634467c4ce5
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8939_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8939_H
+
+#define GPLL0 0
+#define GPLL0_VOTE 1
+#define BIMC_PLL 2
+#define BIMC_PLL_VOTE 3
+#define GPLL1 4
+#define GPLL1_VOTE 5
+#define GPLL2 6
+#define GPLL2_VOTE 7
+#define PCNOC_BFDCD_CLK_SRC 8
+#define SYSTEM_NOC_BFDCD_CLK_SRC 9
+#define CAMSS_AHB_CLK_SRC 10
+#define APSS_AHB_CLK_SRC 11
+#define CSI0_CLK_SRC 12
+#define CSI1_CLK_SRC 13
+#define GFX3D_CLK_SRC 14
+#define VFE0_CLK_SRC 15
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 16
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 17
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 18
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 19
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 20
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 21
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 22
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 23
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 24
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 25
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 26
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 27
+#define BLSP1_UART1_APPS_CLK_SRC 28
+#define BLSP1_UART2_APPS_CLK_SRC 29
+#define CCI_CLK_SRC 30
+#define CAMSS_GP0_CLK_SRC 31
+#define CAMSS_GP1_CLK_SRC 32
+#define JPEG0_CLK_SRC 33
+#define MCLK0_CLK_SRC 34
+#define MCLK1_CLK_SRC 35
+#define CSI0PHYTIMER_CLK_SRC 36
+#define CSI1PHYTIMER_CLK_SRC 37
+#define CPP_CLK_SRC 38
+#define CRYPTO_CLK_SRC 39
+#define GP1_CLK_SRC 40
+#define GP2_CLK_SRC 41
+#define GP3_CLK_SRC 42
+#define BYTE0_CLK_SRC 43
+#define ESC0_CLK_SRC 44
+#define MDP_CLK_SRC 45
+#define PCLK0_CLK_SRC 46
+#define VSYNC_CLK_SRC 47
+#define PDM2_CLK_SRC 48
+#define SDCC1_APPS_CLK_SRC 49
+#define SDCC2_APPS_CLK_SRC 50
+#define APSS_TCU_CLK_SRC 51
+#define USB_HS_SYSTEM_CLK_SRC 52
+#define VCODEC0_CLK_SRC 53
+#define GCC_BLSP1_AHB_CLK 54
+#define GCC_BLSP1_SLEEP_CLK 55
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 56
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 57
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 58
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 59
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 60
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 61
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 62
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 63
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 64
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 65
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 66
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 67
+#define GCC_BLSP1_UART1_APPS_CLK 68
+#define GCC_BLSP1_UART2_APPS_CLK 69
+#define GCC_BOOT_ROM_AHB_CLK 70
+#define GCC_CAMSS_CCI_AHB_CLK 71
+#define GCC_CAMSS_CCI_CLK 72
+#define GCC_CAMSS_CSI0_AHB_CLK 73
+#define GCC_CAMSS_CSI0_CLK 74
+#define GCC_CAMSS_CSI0PHY_CLK 75
+#define GCC_CAMSS_CSI0PIX_CLK 76
+#define GCC_CAMSS_CSI0RDI_CLK 77
+#define GCC_CAMSS_CSI1_AHB_CLK 78
+#define GCC_CAMSS_CSI1_CLK 79
+#define GCC_CAMSS_CSI1PHY_CLK 80
+#define GCC_CAMSS_CSI1PIX_CLK 81
+#define GCC_CAMSS_CSI1RDI_CLK 82
+#define GCC_CAMSS_CSI_VFE0_CLK 83
+#define GCC_CAMSS_GP0_CLK 84
+#define GCC_CAMSS_GP1_CLK 85
+#define GCC_CAMSS_ISPIF_AHB_CLK 86
+#define GCC_CAMSS_JPEG0_CLK 87
+#define GCC_CAMSS_JPEG_AHB_CLK 88
+#define GCC_CAMSS_JPEG_AXI_CLK 89
+#define GCC_CAMSS_MCLK0_CLK 90
+#define GCC_CAMSS_MCLK1_CLK 91
+#define GCC_CAMSS_MICRO_AHB_CLK 92
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 93
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 94
+#define GCC_CAMSS_AHB_CLK 95
+#define GCC_CAMSS_TOP_AHB_CLK 96
+#define GCC_CAMSS_CPP_AHB_CLK 97
+#define GCC_CAMSS_CPP_CLK 98
+#define GCC_CAMSS_VFE0_CLK 99
+#define GCC_CAMSS_VFE_AHB_CLK 100
+#define GCC_CAMSS_VFE_AXI_CLK 101
+#define GCC_CRYPTO_AHB_CLK 102
+#define GCC_CRYPTO_AXI_CLK 103
+#define GCC_CRYPTO_CLK 104
+#define GCC_OXILI_GMEM_CLK 105
+#define GCC_GP1_CLK 106
+#define GCC_GP2_CLK 107
+#define GCC_GP3_CLK 108
+#define GCC_MDSS_AHB_CLK 109
+#define GCC_MDSS_AXI_CLK 110
+#define GCC_MDSS_BYTE0_CLK 111
+#define GCC_MDSS_ESC0_CLK 112
+#define GCC_MDSS_MDP_CLK 113
+#define GCC_MDSS_PCLK0_CLK 114
+#define GCC_MDSS_VSYNC_CLK 115
+#define GCC_MSS_CFG_AHB_CLK 116
+#define GCC_OXILI_AHB_CLK 117
+#define GCC_OXILI_GFX3D_CLK 118
+#define GCC_PDM2_CLK 119
+#define GCC_PDM_AHB_CLK 120
+#define GCC_PRNG_AHB_CLK 121
+#define GCC_SDCC1_AHB_CLK 122
+#define GCC_SDCC1_APPS_CLK 123
+#define GCC_SDCC2_AHB_CLK 124
+#define GCC_SDCC2_APPS_CLK 125
+#define GCC_GTCU_AHB_CLK 126
+#define GCC_JPEG_TBU_CLK 127
+#define GCC_MDP_TBU_CLK 128
+#define GCC_SMMU_CFG_CLK 129
+#define GCC_VENUS_TBU_CLK 130
+#define GCC_VFE_TBU_CLK 131
+#define GCC_USB2A_PHY_SLEEP_CLK 132
+#define GCC_USB_HS_AHB_CLK 133
+#define GCC_USB_HS_SYSTEM_CLK 134
+#define GCC_VENUS0_AHB_CLK 135
+#define GCC_VENUS0_AXI_CLK 136
+#define GCC_VENUS0_VCODEC0_CLK 137
+#define BIMC_DDR_CLK_SRC 138
+#define GCC_APSS_TCU_CLK 139
+#define GCC_GFX_TCU_CLK 140
+#define BIMC_GPU_CLK_SRC 141
+#define GCC_BIMC_GFX_CLK 142
+#define GCC_BIMC_GPU_CLK 143
+#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 144
+#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 145
+#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 146
+#define ULTAUDIO_XO_CLK_SRC 147
+#define ULTAUDIO_AHBFABRIC_CLK_SRC 148
+#define CODEC_DIGCODEC_CLK_SRC 149
+#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 150
+#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 151
+#define GCC_ULTAUDIO_AVSYNC_XO_CLK 152
+#define GCC_ULTAUDIO_STC_XO_CLK 153
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 154
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 155
+#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 156
+#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 157
+#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 158
+#define GCC_CODEC_DIGCODEC_CLK 159
+#define GCC_MSS_Q6_BIMC_AXI_CLK 160
+#define GPLL3 161
+#define GPLL3_VOTE 162
+#define GPLL4 163
+#define GPLL4_VOTE 164
+#define GPLL5 165
+#define GPLL5_VOTE 166
+#define GPLL6 167
+#define GPLL6_VOTE 168
+#define BYTE1_CLK_SRC 169
+#define GCC_MDSS_BYTE1_CLK 170
+#define ESC1_CLK_SRC 171
+#define GCC_MDSS_ESC1_CLK 172
+#define PCLK1_CLK_SRC 173
+#define GCC_MDSS_PCLK1_CLK 174
+#define GCC_GFX_TBU_CLK 175
+#define GCC_CPP_TBU_CLK 176
+#define GCC_MDP_RT_TBU_CLK 177
+#define USB_FS_SYSTEM_CLK_SRC 178
+#define USB_FS_IC_CLK_SRC 179
+#define GCC_USB_FS_AHB_CLK 180
+#define GCC_USB_FS_IC_CLK 181
+#define GCC_USB_FS_SYSTEM_CLK 182
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 183
+#define GCC_VENUS0_CORE1_VCODEC0_CLK 184
+#define GCC_OXILI_TIMER_CLK 185
+
+/* Indexes for GDSCs */
+#define BIMC_GDSC 0
+#define VENUS_GDSC 1
+#define MDSS_GDSC 2
+#define JPEG_GDSC 3
+#define VFE_GDSC 4
+#define OXILI_GDSC 5
+#define VENUS_CORE0_GDSC 6
+#define VENUS_CORE1_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index 63e02dc32a0b..6a73a174f049 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -183,6 +183,7 @@
#define GCC_MSS_SNOC_AXI_CLK 174
#define GCC_MSS_MNOC_BIMC_AXI_CLK 175
#define GCC_BIMC_GFX_CLK 176
+#define UFS_UNIPRO_CORE_CLK_SRC 177
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
diff --git a/include/dt-bindings/clock/qcom,gcc-sc7180.h b/include/dt-bindings/clock/qcom,gcc-sc7180.h
index 1258fd05db68..992b67b7e5e4 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc7180.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc7180.h
@@ -137,6 +137,7 @@
#define GCC_MSS_NAV_AXI_CLK 127
#define GCC_MSS_Q6_MEMNOC_AXI_CLK 128
#define GCC_MSS_SNOC_AXI_CLK 129
+#define GCC_SEC_CTRL_CLK_SRC 130
/* GCC resets */
#define GCC_QUSB2PHY_PRIM_BCR 0
diff --git a/include/dt-bindings/clock/r8a7742-cpg-mssr.h b/include/dt-bindings/clock/r8a7742-cpg-mssr.h
new file mode 100644
index 000000000000..e68191c24881
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7742-cpg-mssr.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A7742_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7742_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7742 CPG Core Clocks */
+#define R8A7742_CLK_Z 0
+#define R8A7742_CLK_Z2 1
+#define R8A7742_CLK_ZG 2
+#define R8A7742_CLK_ZTR 3
+#define R8A7742_CLK_ZTRD2 4
+#define R8A7742_CLK_ZT 5
+#define R8A7742_CLK_ZX 6
+#define R8A7742_CLK_ZS 7
+#define R8A7742_CLK_HP 8
+#define R8A7742_CLK_B 9
+#define R8A7742_CLK_LB 10
+#define R8A7742_CLK_P 11
+#define R8A7742_CLK_CL 12
+#define R8A7742_CLK_M2 13
+#define R8A7742_CLK_ZB3 14
+#define R8A7742_CLK_ZB3D2 15
+#define R8A7742_CLK_DDR 16
+#define R8A7742_CLK_SDH 17
+#define R8A7742_CLK_SD0 18
+#define R8A7742_CLK_SD1 19
+#define R8A7742_CLK_SD2 20
+#define R8A7742_CLK_SD3 21
+#define R8A7742_CLK_MMC0 22
+#define R8A7742_CLK_MMC1 23
+#define R8A7742_CLK_MP 24
+#define R8A7742_CLK_QSPI 25
+#define R8A7742_CLK_CP 26
+#define R8A7742_CLK_RCAN 27
+#define R8A7742_CLK_R 28
+#define R8A7742_CLK_OSC 29
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7742_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/sprd,sc9863a-clk.h b/include/dt-bindings/clock/sprd,sc9863a-clk.h
index 901ba59676c2..4e030421641f 100644
--- a/include/dt-bindings/clock/sprd,sc9863a-clk.h
+++ b/include/dt-bindings/clock/sprd,sc9863a-clk.h
@@ -308,6 +308,11 @@
#define CLK_MCPHY_CFG_EB 14
#define CLK_MM_GATE_NUM (CLK_MCPHY_CFG_EB + 1)
+#define CLK_MIPI_CSI 0
+#define CLK_MIPI_CSI_S 1
+#define CLK_MIPI_CSI_M 2
+#define CLK_MM_CLK_NUM (CLK_MIPI_CSI_M + 1)
+
#define CLK_SIM0_EB 0
#define CLK_IIS0_EB 1
#define CLK_IIS1_EB 2
diff --git a/include/dt-bindings/clock/tegra114-car.h b/include/dt-bindings/clock/tegra114-car.h
index df59aaf5bf34..a93426f008ac 100644
--- a/include/dt-bindings/clock/tegra114-car.h
+++ b/include/dt-bindings/clock/tegra114-car.h
@@ -272,10 +272,10 @@
#define TEGRA114_CLK_AUDIO3 242
#define TEGRA114_CLK_AUDIO4 243
#define TEGRA114_CLK_SPDIF 244
-#define TEGRA114_CLK_CLK_OUT_1 245
-#define TEGRA114_CLK_CLK_OUT_2 246
-#define TEGRA114_CLK_CLK_OUT_3 247
-#define TEGRA114_CLK_BLINK 248
+/* 245 */
+/* 246 */
+/* 247 */
+/* 248 */
#define TEGRA114_CLK_OSC 249
/* 250 */
/* 251 */
@@ -335,9 +335,9 @@
#define TEGRA114_CLK_AUDIO3_MUX 303
#define TEGRA114_CLK_AUDIO4_MUX 304
#define TEGRA114_CLK_SPDIF_MUX 305
-#define TEGRA114_CLK_CLK_OUT_1_MUX 306
-#define TEGRA114_CLK_CLK_OUT_2_MUX 307
-#define TEGRA114_CLK_CLK_OUT_3_MUX 308
+/* 306 */
+/* 307 */
+/* 308 */
#define TEGRA114_CLK_DSIA_MUX 309
#define TEGRA114_CLK_DSIB_MUX 310
#define TEGRA114_CLK_XUSB_SS_DIV2 311
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
index 2a9acd592bff..c59f9de01b4d 100644
--- a/include/dt-bindings/clock/tegra124-car-common.h
+++ b/include/dt-bindings/clock/tegra124-car-common.h
@@ -271,10 +271,10 @@
#define TEGRA124_CLK_AUDIO3 242
#define TEGRA124_CLK_AUDIO4 243
#define TEGRA124_CLK_SPDIF 244
-#define TEGRA124_CLK_CLK_OUT_1 245
-#define TEGRA124_CLK_CLK_OUT_2 246
-#define TEGRA124_CLK_CLK_OUT_3 247
-#define TEGRA124_CLK_BLINK 248
+/* 245 */
+/* 246 */
+/* 247 */
+/* 248 */
#define TEGRA124_CLK_OSC 249
/* 250 */
/* 251 */
@@ -334,9 +334,9 @@
#define TEGRA124_CLK_AUDIO3_MUX 303
#define TEGRA124_CLK_AUDIO4_MUX 304
#define TEGRA124_CLK_SPDIF_MUX 305
-#define TEGRA124_CLK_CLK_OUT_1_MUX 306
-#define TEGRA124_CLK_CLK_OUT_2_MUX 307
-#define TEGRA124_CLK_CLK_OUT_3_MUX 308
+/* 306 */
+/* 307 */
+/* 308 */
/* 309 */
/* 310 */
#define TEGRA124_CLK_SOR0_LVDS 311 /* deprecated */
diff --git a/include/dt-bindings/clock/tegra20-car.h b/include/dt-bindings/clock/tegra20-car.h
index b21a0eb32921..fe541f627965 100644
--- a/include/dt-bindings/clock/tegra20-car.h
+++ b/include/dt-bindings/clock/tegra20-car.h
@@ -131,7 +131,7 @@
#define TEGRA20_CLK_CCLK 108
#define TEGRA20_CLK_HCLK 109
#define TEGRA20_CLK_PCLK 110
-#define TEGRA20_CLK_BLINK 111
+/* 111 */
#define TEGRA20_CLK_PLL_A 112
#define TEGRA20_CLK_PLL_A_OUT0 113
#define TEGRA20_CLK_PLL_C 114
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 7a8f10b9a66d..ab8b8a737a0a 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -306,10 +306,10 @@
#define TEGRA210_CLK_AUDIO3 274
#define TEGRA210_CLK_AUDIO4 275
#define TEGRA210_CLK_SPDIF 276
-#define TEGRA210_CLK_CLK_OUT_1 277
-#define TEGRA210_CLK_CLK_OUT_2 278
-#define TEGRA210_CLK_CLK_OUT_3 279
-#define TEGRA210_CLK_BLINK 280
+/* 277 */
+/* 278 */
+/* 279 */
+/* 280 */
#define TEGRA210_CLK_SOR0_LVDS 281 /* deprecated */
#define TEGRA210_CLK_SOR0_OUT 281
#define TEGRA210_CLK_SOR1_OUT 282
@@ -351,14 +351,14 @@
#define TEGRA210_CLK_PLL_P_OUT_XUSB 317
#define TEGRA210_CLK_XUSB_SSP_SRC 318
#define TEGRA210_CLK_PLL_RE_OUT1 319
-/* 320 */
-/* 321 */
+#define TEGRA210_CLK_PLL_MB_UD 320
+#define TEGRA210_CLK_PLL_P_UD 321
#define TEGRA210_CLK_ISP 322
#define TEGRA210_CLK_PLL_A_OUT_ADSP 323
#define TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP 324
/* 325 */
#define TEGRA210_CLK_OSC 326
-/* 327 */
+#define TEGRA210_CLK_CSI_TPG 327
/* 328 */
/* 329 */
/* 330 */
@@ -388,9 +388,9 @@
#define TEGRA210_CLK_AUDIO3_MUX 353
#define TEGRA210_CLK_AUDIO4_MUX 354
#define TEGRA210_CLK_SPDIF_MUX 355
-#define TEGRA210_CLK_CLK_OUT_1_MUX 356
-#define TEGRA210_CLK_CLK_OUT_2_MUX 357
-#define TEGRA210_CLK_CLK_OUT_3_MUX 358
+/* 356 */
+/* 357 */
+/* 358 */
#define TEGRA210_CLK_DSIA_MUX 359
#define TEGRA210_CLK_DSIB_MUX 360
/* 361 */
diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h
index 7b542c10fc27..f193663e6f28 100644
--- a/include/dt-bindings/clock/tegra30-car.h
+++ b/include/dt-bindings/clock/tegra30-car.h
@@ -232,11 +232,11 @@
#define TEGRA30_CLK_AUDIO3 204
#define TEGRA30_CLK_AUDIO4 205
#define TEGRA30_CLK_SPDIF 206
-#define TEGRA30_CLK_CLK_OUT_1 207 /* (extern1) */
-#define TEGRA30_CLK_CLK_OUT_2 208 /* (extern2) */
-#define TEGRA30_CLK_CLK_OUT_3 209 /* (extern3) */
+/* 207 */
+/* 208 */
+/* 209 */
#define TEGRA30_CLK_SCLK 210
-#define TEGRA30_CLK_BLINK 211
+/* 211 */
#define TEGRA30_CLK_CCLK_G 212
#define TEGRA30_CLK_CCLK_LP 213
#define TEGRA30_CLK_TWD 214
@@ -262,9 +262,9 @@
/* 297 */
/* 298 */
/* 299 */
-#define TEGRA30_CLK_CLK_OUT_1_MUX 300
-#define TEGRA30_CLK_CLK_OUT_2_MUX 301
-#define TEGRA30_CLK_CLK_OUT_3_MUX 302
+/* 300 */
+/* 301 */
+/* 302 */
#define TEGRA30_CLK_AUDIO0_MUX 303
#define TEGRA30_CLK_AUDIO1_MUX 304
#define TEGRA30_CLK_AUDIO2_MUX 305
diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/x1000-cgu.h
index bbaebaf7adb9..0367c8c02e16 100644
--- a/include/dt-bindings/clock/x1000-cgu.h
+++ b/include/dt-bindings/clock/x1000-cgu.h
@@ -12,33 +12,41 @@
#ifndef __DT_BINDINGS_CLOCK_X1000_CGU_H__
#define __DT_BINDINGS_CLOCK_X1000_CGU_H__
-#define X1000_CLK_EXCLK 0
-#define X1000_CLK_RTCLK 1
-#define X1000_CLK_APLL 2
-#define X1000_CLK_MPLL 3
-#define X1000_CLK_SCLKA 4
-#define X1000_CLK_CPUMUX 5
-#define X1000_CLK_CPU 6
-#define X1000_CLK_L2CACHE 7
-#define X1000_CLK_AHB0 8
-#define X1000_CLK_AHB2PMUX 9
-#define X1000_CLK_AHB2 10
-#define X1000_CLK_PCLK 11
-#define X1000_CLK_DDR 12
-#define X1000_CLK_MAC 13
-#define X1000_CLK_MSCMUX 14
-#define X1000_CLK_MSC0 15
-#define X1000_CLK_MSC1 16
-#define X1000_CLK_SSIPLL 17
-#define X1000_CLK_SSIMUX 18
-#define X1000_CLK_SFC 19
-#define X1000_CLK_I2C0 20
-#define X1000_CLK_I2C1 21
-#define X1000_CLK_I2C2 22
-#define X1000_CLK_UART0 23
-#define X1000_CLK_UART1 24
-#define X1000_CLK_UART2 25
-#define X1000_CLK_SSI 26
-#define X1000_CLK_PDMA 27
+#define X1000_CLK_EXCLK 0
+#define X1000_CLK_RTCLK 1
+#define X1000_CLK_APLL 2
+#define X1000_CLK_MPLL 3
+#define X1000_CLK_OTGPHY 4
+#define X1000_CLK_SCLKA 5
+#define X1000_CLK_CPUMUX 6
+#define X1000_CLK_CPU 7
+#define X1000_CLK_L2CACHE 8
+#define X1000_CLK_AHB0 9
+#define X1000_CLK_AHB2PMUX 10
+#define X1000_CLK_AHB2 11
+#define X1000_CLK_PCLK 12
+#define X1000_CLK_DDR 13
+#define X1000_CLK_MAC 14
+#define X1000_CLK_LCD 15
+#define X1000_CLK_MSCMUX 16
+#define X1000_CLK_MSC0 17
+#define X1000_CLK_MSC1 18
+#define X1000_CLK_OTG 19
+#define X1000_CLK_SSIPLL 20
+#define X1000_CLK_SSIPLL_DIV2 21
+#define X1000_CLK_SSIMUX 22
+#define X1000_CLK_EMC 23
+#define X1000_CLK_EFUSE 24
+#define X1000_CLK_SFC 25
+#define X1000_CLK_I2C0 26
+#define X1000_CLK_I2C1 27
+#define X1000_CLK_I2C2 28
+#define X1000_CLK_UART0 29
+#define X1000_CLK_UART1 30
+#define X1000_CLK_UART2 31
+#define X1000_CLK_TCU 32
+#define X1000_CLK_SSI 33
+#define X1000_CLK_OST 34
+#define X1000_CLK_PDMA 35
#endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */
diff --git a/include/dt-bindings/clock/x1830-cgu.h b/include/dt-bindings/clock/x1830-cgu.h
new file mode 100644
index 000000000000..801e1d09c881
--- /dev/null
+++ b/include/dt-bindings/clock/x1830-cgu.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,x1830-cgu DT binding.
+ *
+ * They are roughly ordered as:
+ * - external clocks
+ * - PLLs
+ * - muxes/dividers in the order they appear in the x1830 programmers manual
+ * - gates in order of their bit in the CLKGR* registers
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_X1830_CGU_H__
+#define __DT_BINDINGS_CLOCK_X1830_CGU_H__
+
+#define X1830_CLK_EXCLK 0
+#define X1830_CLK_RTCLK 1
+#define X1830_CLK_APLL 2
+#define X1830_CLK_MPLL 3
+#define X1830_CLK_EPLL 4
+#define X1830_CLK_VPLL 5
+#define X1830_CLK_OTGPHY 6
+#define X1830_CLK_SCLKA 7
+#define X1830_CLK_CPUMUX 8
+#define X1830_CLK_CPU 9
+#define X1830_CLK_L2CACHE 10
+#define X1830_CLK_AHB0 11
+#define X1830_CLK_AHB2PMUX 12
+#define X1830_CLK_AHB2 13
+#define X1830_CLK_PCLK 14
+#define X1830_CLK_DDR 15
+#define X1830_CLK_MAC 16
+#define X1830_CLK_LCD 17
+#define X1830_CLK_MSCMUX 18
+#define X1830_CLK_MSC0 19
+#define X1830_CLK_MSC1 20
+#define X1830_CLK_SSIPLL 21
+#define X1830_CLK_SSIPLL_DIV2 22
+#define X1830_CLK_SSIMUX 23
+#define X1830_CLK_EMC 24
+#define X1830_CLK_EFUSE 25
+#define X1830_CLK_OTG 26
+#define X1830_CLK_SSI0 27
+#define X1830_CLK_SMB0 28
+#define X1830_CLK_SMB1 29
+#define X1830_CLK_SMB2 30
+#define X1830_CLK_UART0 31
+#define X1830_CLK_UART1 32
+#define X1830_CLK_SSI1 33
+#define X1830_CLK_SFC 34
+#define X1830_CLK_PDMA 35
+#define X1830_CLK_TCU 36
+#define X1830_CLK_DTRNG 37
+#define X1830_CLK_OST 38
+
+#endif /* __DT_BINDINGS_CLOCK_X1830_CGU_H__ */
diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h
index 4e61f6485097..54278d5c1856 100644
--- a/include/dt-bindings/firmware/imx/rsrc.h
+++ b/include/dt-bindings/firmware/imx/rsrc.h
@@ -547,4 +547,88 @@
#define IMX_SC_R_ATTESTATION 545
#define IMX_SC_R_LAST 546
+/*
+ * Defines for SC PM CLK
+ */
+#define IMX_SC_PM_CLK_SLV_BUS 0 /* Slave bus clock */
+#define IMX_SC_PM_CLK_MST_BUS 1 /* Master bus clock */
+#define IMX_SC_PM_CLK_PER 2 /* Peripheral clock */
+#define IMX_SC_PM_CLK_PHY 3 /* Phy clock */
+#define IMX_SC_PM_CLK_MISC 4 /* Misc clock */
+#define IMX_SC_PM_CLK_MISC0 0 /* Misc 0 clock */
+#define IMX_SC_PM_CLK_MISC1 1 /* Misc 1 clock */
+#define IMX_SC_PM_CLK_MISC2 2 /* Misc 2 clock */
+#define IMX_SC_PM_CLK_MISC3 3 /* Misc 3 clock */
+#define IMX_SC_PM_CLK_MISC4 4 /* Misc 4 clock */
+#define IMX_SC_PM_CLK_CPU 2 /* CPU clock */
+#define IMX_SC_PM_CLK_PLL 4 /* PLL */
+#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */
+
+/*
+ * Defines for SC CONTROL
+ */
+#define IMX_SC_C_TEMP 0
+#define IMX_SC_C_TEMP_HI 1
+#define IMX_SC_C_TEMP_LOW 2
+#define IMX_SC_C_PXL_LINK_MST1_ADDR 3
+#define IMX_SC_C_PXL_LINK_MST2_ADDR 4
+#define IMX_SC_C_PXL_LINK_MST_ENB 5
+#define IMX_SC_C_PXL_LINK_MST1_ENB 6
+#define IMX_SC_C_PXL_LINK_MST2_ENB 7
+#define IMX_SC_C_PXL_LINK_SLV1_ADDR 8
+#define IMX_SC_C_PXL_LINK_SLV2_ADDR 9
+#define IMX_SC_C_PXL_LINK_MST_VLD 10
+#define IMX_SC_C_PXL_LINK_MST1_VLD 11
+#define IMX_SC_C_PXL_LINK_MST2_VLD 12
+#define IMX_SC_C_SINGLE_MODE 13
+#define IMX_SC_C_ID 14
+#define IMX_SC_C_PXL_CLK_POLARITY 15
+#define IMX_SC_C_LINESTATE 16
+#define IMX_SC_C_PCIE_G_RST 17
+#define IMX_SC_C_PCIE_BUTTON_RST 18
+#define IMX_SC_C_PCIE_PERST 19
+#define IMX_SC_C_PHY_RESET 20
+#define IMX_SC_C_PXL_LINK_RATE_CORRECTION 21
+#define IMX_SC_C_PANIC 22
+#define IMX_SC_C_PRIORITY_GROUP 23
+#define IMX_SC_C_TXCLK 24
+#define IMX_SC_C_CLKDIV 25
+#define IMX_SC_C_DISABLE_50 26
+#define IMX_SC_C_DISABLE_125 27
+#define IMX_SC_C_SEL_125 28
+#define IMX_SC_C_MODE 29
+#define IMX_SC_C_SYNC_CTRL0 30
+#define IMX_SC_C_KACHUNK_CNT 31
+#define IMX_SC_C_KACHUNK_SEL 32
+#define IMX_SC_C_SYNC_CTRL1 33
+#define IMX_SC_C_DPI_RESET 34
+#define IMX_SC_C_MIPI_RESET 35
+#define IMX_SC_C_DUAL_MODE 36
+#define IMX_SC_C_VOLTAGE 37
+#define IMX_SC_C_PXL_LINK_SEL 38
+#define IMX_SC_C_OFS_SEL 39
+#define IMX_SC_C_OFS_AUDIO 40
+#define IMX_SC_C_OFS_PERIPH 41
+#define IMX_SC_C_OFS_IRQ 42
+#define IMX_SC_C_RST0 43
+#define IMX_SC_C_RST1 44
+#define IMX_SC_C_SEL0 45
+#define IMX_SC_C_CALIB0 46
+#define IMX_SC_C_CALIB1 47
+#define IMX_SC_C_CALIB2 48
+#define IMX_SC_C_IPG_DEBUG 49
+#define IMX_SC_C_IPG_DOZE 50
+#define IMX_SC_C_IPG_WAIT 51
+#define IMX_SC_C_IPG_STOP 52
+#define IMX_SC_C_IPG_STOP_MODE 53
+#define IMX_SC_C_IPG_STOP_ACK 54
+#define IMX_SC_C_SYNC_CTRL 55
+#define IMX_SC_C_OFS_AUDIO_ALT 56
+#define IMX_SC_C_DSP_BYP 57
+#define IMX_SC_C_CLK_GEN_EN 58
+#define IMX_SC_C_INTF_SEL 59
+#define IMX_SC_C_RXC_DLY 60
+#define IMX_SC_C_TIMER_SEL 61
+#define IMX_SC_C_LAST 62
+
#endif /* __DT_BINDINGS_RSCRC_IMX_H */
diff --git a/include/dt-bindings/interconnect/imx8mm.h b/include/dt-bindings/interconnect/imx8mm.h
new file mode 100644
index 000000000000..8f10bb06cb59
--- /dev/null
+++ b/include/dt-bindings/interconnect/imx8mm.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Copyright (c) 2019-2020, NXP
+ * Author: Alexandre Bailon <abailon@baylibre.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MM_H
+#define __DT_BINDINGS_INTERCONNECT_IMX8MM_H
+
+#define IMX8MM_ICN_NOC 1
+#define IMX8MM_ICS_DRAM 2
+#define IMX8MM_ICS_OCRAM 3
+#define IMX8MM_ICM_A53 4
+
+#define IMX8MM_ICM_VPU_H1 5
+#define IMX8MM_ICM_VPU_G1 6
+#define IMX8MM_ICM_VPU_G2 7
+#define IMX8MM_ICN_VIDEO 8
+
+#define IMX8MM_ICM_GPU2D 9
+#define IMX8MM_ICM_GPU3D 10
+#define IMX8MM_ICN_GPU 11
+
+#define IMX8MM_ICM_CSI 12
+#define IMX8MM_ICM_LCDIF 13
+#define IMX8MM_ICN_MIPI 14
+
+#define IMX8MM_ICM_USB1 15
+#define IMX8MM_ICM_USB2 16
+#define IMX8MM_ICM_PCIE 17
+#define IMX8MM_ICN_HSIO 18
+
+#define IMX8MM_ICM_SDMA2 19
+#define IMX8MM_ICM_SDMA3 20
+#define IMX8MM_ICN_AUDIO 21
+
+#define IMX8MM_ICN_ENET 22
+#define IMX8MM_ICM_ENET 23
+
+#define IMX8MM_ICN_MAIN 24
+#define IMX8MM_ICM_NAND 25
+#define IMX8MM_ICM_SDMA1 26
+#define IMX8MM_ICM_USDHC1 27
+#define IMX8MM_ICM_USDHC2 28
+#define IMX8MM_ICM_USDHC3 29
+
+#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MM_H */
diff --git a/include/dt-bindings/interconnect/imx8mn.h b/include/dt-bindings/interconnect/imx8mn.h
new file mode 100644
index 000000000000..307b977100b6
--- /dev/null
+++ b/include/dt-bindings/interconnect/imx8mn.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019-2020, NXP
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MN_H
+#define __DT_BINDINGS_INTERCONNECT_IMX8MN_H
+
+#define IMX8MN_ICN_NOC 1
+#define IMX8MN_ICS_DRAM 2
+#define IMX8MN_ICS_OCRAM 3
+#define IMX8MN_ICM_A53 4
+
+#define IMX8MN_ICM_GPU 5
+#define IMX8MN_ICN_GPU 6
+
+#define IMX8MN_ICM_CSI1 7
+#define IMX8MN_ICM_CSI2 8
+#define IMX8MN_ICM_ISI 9
+#define IMX8MN_ICM_LCDIF 10
+#define IMX8MN_ICN_MIPI 11
+
+#define IMX8MN_ICM_USB 12
+
+#define IMX8MN_ICM_SDMA2 13
+#define IMX8MN_ICM_SDMA3 14
+#define IMX8MN_ICN_AUDIO 15
+
+#define IMX8MN_ICN_ENET 16
+#define IMX8MN_ICM_ENET 17
+
+#define IMX8MN_ICM_NAND 18
+#define IMX8MN_ICM_SDMA1 19
+#define IMX8MN_ICM_USDHC1 20
+#define IMX8MN_ICM_USDHC2 21
+#define IMX8MN_ICM_USDHC3 22
+#define IMX8MN_ICN_MAIN 23
+
+#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MN_H */
diff --git a/include/dt-bindings/interconnect/imx8mq.h b/include/dt-bindings/interconnect/imx8mq.h
new file mode 100644
index 000000000000..1a4cae7f8be2
--- /dev/null
+++ b/include/dt-bindings/interconnect/imx8mq.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019-2020, NXP
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MQ_H
+#define __DT_BINDINGS_INTERCONNECT_IMX8MQ_H
+
+#define IMX8MQ_ICN_NOC 1
+#define IMX8MQ_ICS_DRAM 2
+#define IMX8MQ_ICS_OCRAM 3
+#define IMX8MQ_ICM_A53 4
+
+#define IMX8MQ_ICM_VPU 5
+#define IMX8MQ_ICN_VIDEO 6
+
+#define IMX8MQ_ICM_GPU 7
+#define IMX8MQ_ICN_GPU 8
+
+#define IMX8MQ_ICM_DCSS 9
+#define IMX8MQ_ICN_DCSS 10
+
+#define IMX8MQ_ICM_USB1 11
+#define IMX8MQ_ICM_USB2 12
+#define IMX8MQ_ICN_USB 13
+
+#define IMX8MQ_ICM_CSI1 14
+#define IMX8MQ_ICM_CSI2 15
+#define IMX8MQ_ICM_LCDIF 16
+#define IMX8MQ_ICN_DISPLAY 17
+
+#define IMX8MQ_ICM_SDMA2 18
+#define IMX8MQ_ICN_AUDIO 19
+
+#define IMX8MQ_ICN_ENET 20
+#define IMX8MQ_ICM_ENET 21
+
+#define IMX8MQ_ICM_SDMA1 22
+#define IMX8MQ_ICM_NAND 23
+#define IMX8MQ_ICM_USDHC1 24
+#define IMX8MQ_ICM_USDHC2 25
+#define IMX8MQ_ICM_PCIE1 26
+#define IMX8MQ_ICM_PCIE2 27
+#define IMX8MQ_ICN_MAIN 28
+
+#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MQ_H */
diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h
new file mode 100644
index 000000000000..4c23eefed5f3
--- /dev/null
+++ b/include/dt-bindings/mailbox/qcom-ipcc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_MAILBOX_IPCC_H
+#define __DT_BINDINGS_MAILBOX_IPCC_H
+
+/* Signal IDs for MPROC protocol */
+#define IPCC_MPROC_SIGNAL_GLINK_QMP 0
+#define IPCC_MPROC_SIGNAL_SMP2P 2
+#define IPCC_MPROC_SIGNAL_PING 3
+
+/* Client IDs */
+#define IPCC_CLIENT_AOP 0
+#define IPCC_CLIENT_TZ 1
+#define IPCC_CLIENT_MPSS 2
+#define IPCC_CLIENT_LPASS 3
+#define IPCC_CLIENT_SLPI 4
+#define IPCC_CLIENT_SDC 5
+#define IPCC_CLIENT_CDSP 6
+#define IPCC_CLIENT_NPU 7
+#define IPCC_CLIENT_APSS 8
+#define IPCC_CLIENT_GPU 9
+#define IPCC_CLIENT_CVP 10
+#define IPCC_CLIENT_CAM 11
+#define IPCC_CLIENT_VPU 12
+#define IPCC_CLIENT_PCIE0 13
+#define IPCC_CLIENT_PCIE1 14
+#define IPCC_CLIENT_PCIE2 15
+#define IPCC_CLIENT_SPSS 16
+
+#endif
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index 1f3f866fae7b..3727ef72138b 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -17,5 +17,6 @@
#define PHY_TYPE_USB3 4
#define PHY_TYPE_UFS 5
#define PHY_TYPE_DP 6
+#define PHY_TYPE_XPCS 7
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/pads-imx8dxl.h b/include/dt-bindings/pinctrl/pads-imx8dxl.h
new file mode 100644
index 000000000000..b1d7b84c3e0a
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pads-imx8dxl.h
@@ -0,0 +1,639 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2019~2020 NXP
+ */
+
+#ifndef _IMX8DXL_PADS_H
+#define _IMX8DXL_PADS_H
+
+/* pin id */
+#define IMX8DXL_PCIE_CTRL0_PERST_B 0
+#define IMX8DXL_PCIE_CTRL0_CLKREQ_B 1
+#define IMX8DXL_PCIE_CTRL0_WAKE_B 2
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_PCIESEP 3
+#define IMX8DXL_USB_SS3_TC0 4
+#define IMX8DXL_USB_SS3_TC1 5
+#define IMX8DXL_USB_SS3_TC2 6
+#define IMX8DXL_USB_SS3_TC3 7
+#define IMX8DXL_COMP_CTL_GPIO_3V3_USB3IO 8
+#define IMX8DXL_EMMC0_CLK 9
+#define IMX8DXL_EMMC0_CMD 10
+#define IMX8DXL_EMMC0_DATA0 11
+#define IMX8DXL_EMMC0_DATA1 12
+#define IMX8DXL_EMMC0_DATA2 13
+#define IMX8DXL_EMMC0_DATA3 14
+#define IMX8DXL_EMMC0_DATA4 15
+#define IMX8DXL_EMMC0_DATA5 16
+#define IMX8DXL_EMMC0_DATA6 17
+#define IMX8DXL_EMMC0_DATA7 18
+#define IMX8DXL_EMMC0_STROBE 19
+#define IMX8DXL_EMMC0_RESET_B 20
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_SD1FIX0 21
+#define IMX8DXL_USDHC1_RESET_B 22
+#define IMX8DXL_USDHC1_VSELECT 23
+#define IMX8DXL_CTL_NAND_RE_P_N 24
+#define IMX8DXL_USDHC1_WP 25
+#define IMX8DXL_USDHC1_CD_B 26
+#define IMX8DXL_CTL_NAND_DQS_P_N 27
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_VSELSEP 28
+#define IMX8DXL_ENET0_RGMII_TXC 29
+#define IMX8DXL_ENET0_RGMII_TX_CTL 30
+#define IMX8DXL_ENET0_RGMII_TXD0 31
+#define IMX8DXL_ENET0_RGMII_TXD1 32
+#define IMX8DXL_ENET0_RGMII_TXD2 33
+#define IMX8DXL_ENET0_RGMII_TXD3 34
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0 35
+#define IMX8DXL_ENET0_RGMII_RXC 36
+#define IMX8DXL_ENET0_RGMII_RX_CTL 37
+#define IMX8DXL_ENET0_RGMII_RXD0 38
+#define IMX8DXL_ENET0_RGMII_RXD1 39
+#define IMX8DXL_ENET0_RGMII_RXD2 40
+#define IMX8DXL_ENET0_RGMII_RXD3 41
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1 42
+#define IMX8DXL_ENET0_REFCLK_125M_25M 43
+#define IMX8DXL_ENET0_MDIO 44
+#define IMX8DXL_ENET0_MDC 45
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOCT 46
+#define IMX8DXL_ENET1_RGMII_TXC 47
+#define IMX8DXL_ENET1_RGMII_TXD2 48
+#define IMX8DXL_ENET1_RGMII_TX_CTL 49
+#define IMX8DXL_ENET1_RGMII_TXD3 50
+#define IMX8DXL_ENET1_RGMII_RXC 51
+#define IMX8DXL_ENET1_RGMII_RXD3 52
+#define IMX8DXL_ENET1_RGMII_RXD2 53
+#define IMX8DXL_ENET1_RGMII_RXD1 54
+#define IMX8DXL_ENET1_RGMII_TXD0 55
+#define IMX8DXL_ENET1_RGMII_TXD1 56
+#define IMX8DXL_ENET1_RGMII_RXD0 57
+#define IMX8DXL_ENET1_RGMII_RX_CTL 58
+#define IMX8DXL_ENET1_REFCLK_125M_25M 59
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHB 60
+#define IMX8DXL_SPI3_SCK 61
+#define IMX8DXL_SPI3_SDO 62
+#define IMX8DXL_SPI3_SDI 63
+#define IMX8DXL_SPI3_CS0 64
+#define IMX8DXL_SPI3_CS1 65
+#define IMX8DXL_MCLK_IN1 66
+#define IMX8DXL_MCLK_IN0 67
+#define IMX8DXL_MCLK_OUT0 68
+#define IMX8DXL_UART1_TX 69
+#define IMX8DXL_UART1_RX 70
+#define IMX8DXL_UART1_RTS_B 71
+#define IMX8DXL_UART1_CTS_B 72
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHK 73
+#define IMX8DXL_SPI0_SCK 74
+#define IMX8DXL_SPI0_SDI 75
+#define IMX8DXL_SPI0_SDO 76
+#define IMX8DXL_SPI0_CS1 77
+#define IMX8DXL_SPI0_CS0 78
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHT 79
+#define IMX8DXL_ADC_IN1 80
+#define IMX8DXL_ADC_IN0 81
+#define IMX8DXL_ADC_IN3 82
+#define IMX8DXL_ADC_IN2 83
+#define IMX8DXL_ADC_IN5 84
+#define IMX8DXL_ADC_IN4 85
+#define IMX8DXL_FLEXCAN0_RX 86
+#define IMX8DXL_FLEXCAN0_TX 87
+#define IMX8DXL_FLEXCAN1_RX 88
+#define IMX8DXL_FLEXCAN1_TX 89
+#define IMX8DXL_FLEXCAN2_RX 90
+#define IMX8DXL_FLEXCAN2_TX 91
+#define IMX8DXL_UART0_RX 92
+#define IMX8DXL_UART0_TX 93
+#define IMX8DXL_UART2_TX 94
+#define IMX8DXL_UART2_RX 95
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOLH 96
+#define IMX8DXL_JTAG_TRST_B 97
+#define IMX8DXL_PMIC_I2C_SCL 98
+#define IMX8DXL_PMIC_I2C_SDA 99
+#define IMX8DXL_PMIC_INT_B 100
+#define IMX8DXL_SCU_GPIO0_00 101
+#define IMX8DXL_SCU_GPIO0_01 102
+#define IMX8DXL_SCU_PMIC_STANDBY 103
+#define IMX8DXL_SCU_BOOT_MODE1 104
+#define IMX8DXL_SCU_BOOT_MODE0 105
+#define IMX8DXL_SCU_BOOT_MODE2 106
+#define IMX8DXL_SNVS_TAMPER_OUT1 107
+#define IMX8DXL_SNVS_TAMPER_OUT2 108
+#define IMX8DXL_SNVS_TAMPER_OUT3 109
+#define IMX8DXL_SNVS_TAMPER_OUT4 110
+#define IMX8DXL_SNVS_TAMPER_IN0 111
+#define IMX8DXL_SNVS_TAMPER_IN1 112
+#define IMX8DXL_SNVS_TAMPER_IN2 113
+#define IMX8DXL_SNVS_TAMPER_IN3 114
+#define IMX8DXL_SPI1_SCK 115
+#define IMX8DXL_SPI1_SDO 116
+#define IMX8DXL_SPI1_SDI 117
+#define IMX8DXL_SPI1_CS0 118
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHD 119
+#define IMX8DXL_QSPI0A_DATA1 120
+#define IMX8DXL_QSPI0A_DATA0 121
+#define IMX8DXL_QSPI0A_DATA3 122
+#define IMX8DXL_QSPI0A_DATA2 123
+#define IMX8DXL_QSPI0A_SS0_B 124
+#define IMX8DXL_QSPI0A_DQS 125
+#define IMX8DXL_QSPI0A_SCLK 126
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0A 127
+#define IMX8DXL_QSPI0B_SCLK 128
+#define IMX8DXL_QSPI0B_DQS 129
+#define IMX8DXL_QSPI0B_DATA1 130
+#define IMX8DXL_QSPI0B_DATA0 131
+#define IMX8DXL_QSPI0B_DATA3 132
+#define IMX8DXL_QSPI0B_DATA2 133
+#define IMX8DXL_QSPI0B_SS0_B 134
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0B 135
+
+/* format: <pin_id mux_mode> */
+#define IMX8DXL_PCIE_CTRL0_PERST_B_HSIO_PCIE0_PERST_B IMX8DXL_PCIE_CTRL0_PERST_B 0
+#define IMX8DXL_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO00 IMX8DXL_PCIE_CTRL0_PERST_B 4
+#define IMX8DXL_PCIE_CTRL0_PERST_B_LSIO_GPIO7_IO00 IMX8DXL_PCIE_CTRL0_PERST_B 5
+#define IMX8DXL_PCIE_CTRL0_CLKREQ_B_HSIO_PCIE0_CLKREQ_B IMX8DXL_PCIE_CTRL0_CLKREQ_B 0
+#define IMX8DXL_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO4_IO01 IMX8DXL_PCIE_CTRL0_CLKREQ_B 4
+#define IMX8DXL_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO7_IO01 IMX8DXL_PCIE_CTRL0_CLKREQ_B 5
+#define IMX8DXL_PCIE_CTRL0_WAKE_B_HSIO_PCIE0_WAKE_B IMX8DXL_PCIE_CTRL0_WAKE_B 0
+#define IMX8DXL_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO02 IMX8DXL_PCIE_CTRL0_WAKE_B 4
+#define IMX8DXL_PCIE_CTRL0_WAKE_B_LSIO_GPIO7_IO02 IMX8DXL_PCIE_CTRL0_WAKE_B 5
+#define IMX8DXL_USB_SS3_TC0_ADMA_I2C1_SCL IMX8DXL_USB_SS3_TC0 0
+#define IMX8DXL_USB_SS3_TC0_CONN_USB_OTG1_PWR IMX8DXL_USB_SS3_TC0 1
+#define IMX8DXL_USB_SS3_TC0_CONN_USB_OTG2_PWR IMX8DXL_USB_SS3_TC0 2
+#define IMX8DXL_USB_SS3_TC0_LSIO_GPIO4_IO03 IMX8DXL_USB_SS3_TC0 4
+#define IMX8DXL_USB_SS3_TC0_LSIO_GPIO7_IO03 IMX8DXL_USB_SS3_TC0 5
+#define IMX8DXL_USB_SS3_TC1_ADMA_I2C1_SCL IMX8DXL_USB_SS3_TC1 0
+#define IMX8DXL_USB_SS3_TC1_CONN_USB_OTG2_PWR IMX8DXL_USB_SS3_TC1 1
+#define IMX8DXL_USB_SS3_TC1_LSIO_GPIO4_IO04 IMX8DXL_USB_SS3_TC1 4
+#define IMX8DXL_USB_SS3_TC1_LSIO_GPIO7_IO04 IMX8DXL_USB_SS3_TC1 5
+#define IMX8DXL_USB_SS3_TC2_ADMA_I2C1_SDA IMX8DXL_USB_SS3_TC2 0
+#define IMX8DXL_USB_SS3_TC2_CONN_USB_OTG1_OC IMX8DXL_USB_SS3_TC2 1
+#define IMX8DXL_USB_SS3_TC2_CONN_USB_OTG2_OC IMX8DXL_USB_SS3_TC2 2
+#define IMX8DXL_USB_SS3_TC2_LSIO_GPIO4_IO05 IMX8DXL_USB_SS3_TC2 4
+#define IMX8DXL_USB_SS3_TC2_LSIO_GPIO7_IO05 IMX8DXL_USB_SS3_TC2 5
+#define IMX8DXL_USB_SS3_TC3_ADMA_I2C1_SDA IMX8DXL_USB_SS3_TC3 0
+#define IMX8DXL_USB_SS3_TC3_CONN_USB_OTG2_OC IMX8DXL_USB_SS3_TC3 1
+#define IMX8DXL_USB_SS3_TC3_LSIO_GPIO4_IO06 IMX8DXL_USB_SS3_TC3 4
+#define IMX8DXL_USB_SS3_TC3_LSIO_GPIO7_IO06 IMX8DXL_USB_SS3_TC3 5
+#define IMX8DXL_EMMC0_CLK_CONN_EMMC0_CLK IMX8DXL_EMMC0_CLK 0
+#define IMX8DXL_EMMC0_CLK_CONN_NAND_READY_B IMX8DXL_EMMC0_CLK 1
+#define IMX8DXL_EMMC0_CLK_LSIO_GPIO4_IO07 IMX8DXL_EMMC0_CLK 4
+#define IMX8DXL_EMMC0_CMD_CONN_EMMC0_CMD IMX8DXL_EMMC0_CMD 0
+#define IMX8DXL_EMMC0_CMD_CONN_NAND_DQS IMX8DXL_EMMC0_CMD 1
+#define IMX8DXL_EMMC0_CMD_LSIO_GPIO4_IO08 IMX8DXL_EMMC0_CMD 4
+#define IMX8DXL_EMMC0_DATA0_CONN_EMMC0_DATA0 IMX8DXL_EMMC0_DATA0 0
+#define IMX8DXL_EMMC0_DATA0_CONN_NAND_DATA00 IMX8DXL_EMMC0_DATA0 1
+#define IMX8DXL_EMMC0_DATA0_LSIO_GPIO4_IO09 IMX8DXL_EMMC0_DATA0 4
+#define IMX8DXL_EMMC0_DATA1_CONN_EMMC0_DATA1 IMX8DXL_EMMC0_DATA1 0
+#define IMX8DXL_EMMC0_DATA1_CONN_NAND_DATA01 IMX8DXL_EMMC0_DATA1 1
+#define IMX8DXL_EMMC0_DATA1_LSIO_GPIO4_IO10 IMX8DXL_EMMC0_DATA1 4
+#define IMX8DXL_EMMC0_DATA2_CONN_EMMC0_DATA2 IMX8DXL_EMMC0_DATA2 0
+#define IMX8DXL_EMMC0_DATA2_CONN_NAND_DATA02 IMX8DXL_EMMC0_DATA2 1
+#define IMX8DXL_EMMC0_DATA2_LSIO_GPIO4_IO11 IMX8DXL_EMMC0_DATA2 4
+#define IMX8DXL_EMMC0_DATA3_CONN_EMMC0_DATA3 IMX8DXL_EMMC0_DATA3 0
+#define IMX8DXL_EMMC0_DATA3_CONN_NAND_DATA03 IMX8DXL_EMMC0_DATA3 1
+#define IMX8DXL_EMMC0_DATA3_LSIO_GPIO4_IO12 IMX8DXL_EMMC0_DATA3 4
+#define IMX8DXL_EMMC0_DATA4_CONN_EMMC0_DATA4 IMX8DXL_EMMC0_DATA4 0
+#define IMX8DXL_EMMC0_DATA4_CONN_NAND_DATA04 IMX8DXL_EMMC0_DATA4 1
+#define IMX8DXL_EMMC0_DATA4_LSIO_GPIO4_IO13 IMX8DXL_EMMC0_DATA4 4
+#define IMX8DXL_EMMC0_DATA5_CONN_EMMC0_DATA5 IMX8DXL_EMMC0_DATA5 0
+#define IMX8DXL_EMMC0_DATA5_CONN_NAND_DATA05 IMX8DXL_EMMC0_DATA5 1
+#define IMX8DXL_EMMC0_DATA5_LSIO_GPIO4_IO14 IMX8DXL_EMMC0_DATA5 4
+#define IMX8DXL_EMMC0_DATA6_CONN_EMMC0_DATA6 IMX8DXL_EMMC0_DATA6 0
+#define IMX8DXL_EMMC0_DATA6_CONN_NAND_DATA06 IMX8DXL_EMMC0_DATA6 1
+#define IMX8DXL_EMMC0_DATA6_LSIO_GPIO4_IO15 IMX8DXL_EMMC0_DATA6 4
+#define IMX8DXL_EMMC0_DATA7_CONN_EMMC0_DATA7 IMX8DXL_EMMC0_DATA7 0
+#define IMX8DXL_EMMC0_DATA7_CONN_NAND_DATA07 IMX8DXL_EMMC0_DATA7 1
+#define IMX8DXL_EMMC0_DATA7_LSIO_GPIO4_IO16 IMX8DXL_EMMC0_DATA7 4
+#define IMX8DXL_EMMC0_STROBE_CONN_EMMC0_STROBE IMX8DXL_EMMC0_STROBE 0
+#define IMX8DXL_EMMC0_STROBE_CONN_NAND_CLE IMX8DXL_EMMC0_STROBE 1
+#define IMX8DXL_EMMC0_STROBE_LSIO_GPIO4_IO17 IMX8DXL_EMMC0_STROBE 4
+#define IMX8DXL_EMMC0_RESET_B_CONN_EMMC0_RESET_B IMX8DXL_EMMC0_RESET_B 0
+#define IMX8DXL_EMMC0_RESET_B_CONN_NAND_WP_B IMX8DXL_EMMC0_RESET_B 1
+#define IMX8DXL_EMMC0_RESET_B_LSIO_GPIO4_IO18 IMX8DXL_EMMC0_RESET_B 4
+#define IMX8DXL_USDHC1_RESET_B_CONN_USDHC1_RESET_B IMX8DXL_USDHC1_RESET_B 0
+#define IMX8DXL_USDHC1_RESET_B_CONN_NAND_RE_N IMX8DXL_USDHC1_RESET_B 1
+#define IMX8DXL_USDHC1_RESET_B_ADMA_SPI2_SCK IMX8DXL_USDHC1_RESET_B 2
+#define IMX8DXL_USDHC1_RESET_B_CONN_NAND_WE_B IMX8DXL_USDHC1_RESET_B 3
+#define IMX8DXL_USDHC1_RESET_B_LSIO_GPIO4_IO19 IMX8DXL_USDHC1_RESET_B 4
+#define IMX8DXL_USDHC1_RESET_B_LSIO_GPIO7_IO08 IMX8DXL_USDHC1_RESET_B 5
+#define IMX8DXL_USDHC1_VSELECT_CONN_USDHC1_VSELECT IMX8DXL_USDHC1_VSELECT 0
+#define IMX8DXL_USDHC1_VSELECT_CONN_NAND_RE_P IMX8DXL_USDHC1_VSELECT 1
+#define IMX8DXL_USDHC1_VSELECT_ADMA_SPI2_SDO IMX8DXL_USDHC1_VSELECT 2
+#define IMX8DXL_USDHC1_VSELECT_CONN_NAND_RE_B IMX8DXL_USDHC1_VSELECT 3
+#define IMX8DXL_USDHC1_VSELECT_LSIO_GPIO4_IO20 IMX8DXL_USDHC1_VSELECT 4
+#define IMX8DXL_USDHC1_VSELECT_LSIO_GPIO7_IO09 IMX8DXL_USDHC1_VSELECT 5
+#define IMX8DXL_USDHC1_WP_CONN_USDHC1_WP IMX8DXL_USDHC1_WP 0
+#define IMX8DXL_USDHC1_WP_CONN_NAND_DQS_N IMX8DXL_USDHC1_WP 1
+#define IMX8DXL_USDHC1_WP_ADMA_SPI2_SDI IMX8DXL_USDHC1_WP 2
+#define IMX8DXL_USDHC1_WP_CONN_NAND_ALE IMX8DXL_USDHC1_WP 3
+#define IMX8DXL_USDHC1_WP_LSIO_GPIO4_IO21 IMX8DXL_USDHC1_WP 4
+#define IMX8DXL_USDHC1_WP_LSIO_GPIO7_IO10 IMX8DXL_USDHC1_WP 5
+#define IMX8DXL_USDHC1_CD_B_CONN_USDHC1_CD_B IMX8DXL_USDHC1_CD_B 0
+#define IMX8DXL_USDHC1_CD_B_CONN_NAND_DQS_P IMX8DXL_USDHC1_CD_B 1
+#define IMX8DXL_USDHC1_CD_B_ADMA_SPI2_CS0 IMX8DXL_USDHC1_CD_B 2
+#define IMX8DXL_USDHC1_CD_B_CONN_NAND_DQS IMX8DXL_USDHC1_CD_B 3
+#define IMX8DXL_USDHC1_CD_B_LSIO_GPIO4_IO22 IMX8DXL_USDHC1_CD_B 4
+#define IMX8DXL_USDHC1_CD_B_LSIO_GPIO7_IO11 IMX8DXL_USDHC1_CD_B 5
+#define IMX8DXL_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC IMX8DXL_ENET0_RGMII_TXC 0
+#define IMX8DXL_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_OUT IMX8DXL_ENET0_RGMII_TXC 1
+#define IMX8DXL_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_IN IMX8DXL_ENET0_RGMII_TXC 2
+#define IMX8DXL_ENET0_RGMII_TXC_CONN_NAND_CE1_B IMX8DXL_ENET0_RGMII_TXC 3
+#define IMX8DXL_ENET0_RGMII_TXC_LSIO_GPIO4_IO29 IMX8DXL_ENET0_RGMII_TXC 4
+#define IMX8DXL_ENET0_RGMII_TXC_CONN_USDHC2_CLK IMX8DXL_ENET0_RGMII_TXC 5
+#define IMX8DXL_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL IMX8DXL_ENET0_RGMII_TX_CTL 0
+#define IMX8DXL_ENET0_RGMII_TX_CTL_CONN_USDHC1_RESET_B IMX8DXL_ENET0_RGMII_TX_CTL 3
+#define IMX8DXL_ENET0_RGMII_TX_CTL_LSIO_GPIO4_IO30 IMX8DXL_ENET0_RGMII_TX_CTL 4
+#define IMX8DXL_ENET0_RGMII_TX_CTL_CONN_USDHC2_CMD IMX8DXL_ENET0_RGMII_TX_CTL 5
+#define IMX8DXL_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 IMX8DXL_ENET0_RGMII_TXD0 0
+#define IMX8DXL_ENET0_RGMII_TXD0_CONN_USDHC1_VSELECT IMX8DXL_ENET0_RGMII_TXD0 3
+#define IMX8DXL_ENET0_RGMII_TXD0_LSIO_GPIO4_IO31 IMX8DXL_ENET0_RGMII_TXD0 4
+#define IMX8DXL_ENET0_RGMII_TXD0_CONN_USDHC2_DATA0 IMX8DXL_ENET0_RGMII_TXD0 5
+#define IMX8DXL_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 IMX8DXL_ENET0_RGMII_TXD1 0
+#define IMX8DXL_ENET0_RGMII_TXD1_CONN_USDHC1_WP IMX8DXL_ENET0_RGMII_TXD1 3
+#define IMX8DXL_ENET0_RGMII_TXD1_LSIO_GPIO5_IO00 IMX8DXL_ENET0_RGMII_TXD1 4
+#define IMX8DXL_ENET0_RGMII_TXD1_CONN_USDHC2_DATA1 IMX8DXL_ENET0_RGMII_TXD1 5
+#define IMX8DXL_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 IMX8DXL_ENET0_RGMII_TXD2 0
+#define IMX8DXL_ENET0_RGMII_TXD2_CONN_NAND_CE0_B IMX8DXL_ENET0_RGMII_TXD2 2
+#define IMX8DXL_ENET0_RGMII_TXD2_CONN_USDHC1_CD_B IMX8DXL_ENET0_RGMII_TXD2 3
+#define IMX8DXL_ENET0_RGMII_TXD2_LSIO_GPIO5_IO01 IMX8DXL_ENET0_RGMII_TXD2 4
+#define IMX8DXL_ENET0_RGMII_TXD2_CONN_USDHC2_DATA2 IMX8DXL_ENET0_RGMII_TXD2 5
+#define IMX8DXL_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 IMX8DXL_ENET0_RGMII_TXD3 0
+#define IMX8DXL_ENET0_RGMII_TXD3_CONN_NAND_RE_B IMX8DXL_ENET0_RGMII_TXD3 2
+#define IMX8DXL_ENET0_RGMII_TXD3_LSIO_GPIO5_IO02 IMX8DXL_ENET0_RGMII_TXD3 4
+#define IMX8DXL_ENET0_RGMII_TXD3_CONN_USDHC2_DATA3 IMX8DXL_ENET0_RGMII_TXD3 5
+#define IMX8DXL_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC IMX8DXL_ENET0_RGMII_RXC 0
+#define IMX8DXL_ENET0_RGMII_RXC_CONN_NAND_WE_B IMX8DXL_ENET0_RGMII_RXC 2
+#define IMX8DXL_ENET0_RGMII_RXC_CONN_USDHC1_CLK IMX8DXL_ENET0_RGMII_RXC 3
+#define IMX8DXL_ENET0_RGMII_RXC_LSIO_GPIO5_IO03 IMX8DXL_ENET0_RGMII_RXC 4
+#define IMX8DXL_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL IMX8DXL_ENET0_RGMII_RX_CTL 0
+#define IMX8DXL_ENET0_RGMII_RX_CTL_CONN_USDHC1_CMD IMX8DXL_ENET0_RGMII_RX_CTL 3
+#define IMX8DXL_ENET0_RGMII_RX_CTL_LSIO_GPIO5_IO04 IMX8DXL_ENET0_RGMII_RX_CTL 4
+#define IMX8DXL_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 IMX8DXL_ENET0_RGMII_RXD0 0
+#define IMX8DXL_ENET0_RGMII_RXD0_CONN_USDHC1_DATA0 IMX8DXL_ENET0_RGMII_RXD0 3
+#define IMX8DXL_ENET0_RGMII_RXD0_LSIO_GPIO5_IO05 IMX8DXL_ENET0_RGMII_RXD0 4
+#define IMX8DXL_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 IMX8DXL_ENET0_RGMII_RXD1 0
+#define IMX8DXL_ENET0_RGMII_RXD1_CONN_USDHC1_DATA1 IMX8DXL_ENET0_RGMII_RXD1 3
+#define IMX8DXL_ENET0_RGMII_RXD1_LSIO_GPIO5_IO06 IMX8DXL_ENET0_RGMII_RXD1 4
+#define IMX8DXL_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 IMX8DXL_ENET0_RGMII_RXD2 0
+#define IMX8DXL_ENET0_RGMII_RXD2_CONN_ENET0_RMII_RX_ER IMX8DXL_ENET0_RGMII_RXD2 1
+#define IMX8DXL_ENET0_RGMII_RXD2_CONN_USDHC1_DATA2 IMX8DXL_ENET0_RGMII_RXD2 3
+#define IMX8DXL_ENET0_RGMII_RXD2_LSIO_GPIO5_IO07 IMX8DXL_ENET0_RGMII_RXD2 4
+#define IMX8DXL_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 IMX8DXL_ENET0_RGMII_RXD3 0
+#define IMX8DXL_ENET0_RGMII_RXD3_CONN_NAND_ALE IMX8DXL_ENET0_RGMII_RXD3 2
+#define IMX8DXL_ENET0_RGMII_RXD3_CONN_USDHC1_DATA3 IMX8DXL_ENET0_RGMII_RXD3 3
+#define IMX8DXL_ENET0_RGMII_RXD3_LSIO_GPIO5_IO08 IMX8DXL_ENET0_RGMII_RXD3 4
+#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_ENET0_REFCLK_125M_25M IMX8DXL_ENET0_REFCLK_125M_25M 0
+#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_ENET0_PPS IMX8DXL_ENET0_REFCLK_125M_25M 1
+#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_EQOS_PPS_IN IMX8DXL_ENET0_REFCLK_125M_25M 2
+#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_EQOS_PPS_OUT IMX8DXL_ENET0_REFCLK_125M_25M 3
+#define IMX8DXL_ENET0_REFCLK_125M_25M_LSIO_GPIO5_IO09 IMX8DXL_ENET0_REFCLK_125M_25M 4
+#define IMX8DXL_ENET0_MDIO_CONN_ENET0_MDIO IMX8DXL_ENET0_MDIO 0
+#define IMX8DXL_ENET0_MDIO_ADMA_I2C3_SDA IMX8DXL_ENET0_MDIO 1
+#define IMX8DXL_ENET0_MDIO_CONN_EQOS_MDIO IMX8DXL_ENET0_MDIO 2
+#define IMX8DXL_ENET0_MDIO_LSIO_GPIO5_IO10 IMX8DXL_ENET0_MDIO 4
+#define IMX8DXL_ENET0_MDIO_LSIO_GPIO7_IO16 IMX8DXL_ENET0_MDIO 5
+#define IMX8DXL_ENET0_MDC_CONN_ENET0_MDC IMX8DXL_ENET0_MDC 0
+#define IMX8DXL_ENET0_MDC_ADMA_I2C3_SCL IMX8DXL_ENET0_MDC 1
+#define IMX8DXL_ENET0_MDC_CONN_EQOS_MDC IMX8DXL_ENET0_MDC 2
+#define IMX8DXL_ENET0_MDC_LSIO_GPIO5_IO11 IMX8DXL_ENET0_MDC 4
+#define IMX8DXL_ENET0_MDC_LSIO_GPIO7_IO17 IMX8DXL_ENET0_MDC 5
+#define IMX8DXL_ENET1_RGMII_TXC_LSIO_GPIO0_IO00 IMX8DXL_ENET1_RGMII_TXC 0
+#define IMX8DXL_ENET1_RGMII_TXC_CONN_EQOS_RCLK50M_OUT IMX8DXL_ENET1_RGMII_TXC 1
+#define IMX8DXL_ENET1_RGMII_TXC_ADMA_LCDIF_D00 IMX8DXL_ENET1_RGMII_TXC 2
+#define IMX8DXL_ENET1_RGMII_TXC_CONN_EQOS_RGMII_TXC IMX8DXL_ENET1_RGMII_TXC 3
+#define IMX8DXL_ENET1_RGMII_TXC_CONN_EQOS_RCLK50M_IN IMX8DXL_ENET1_RGMII_TXC 4
+#define IMX8DXL_ENET1_RGMII_TXD2_ADMA_LCDIF_D01 IMX8DXL_ENET1_RGMII_TXD2 2
+#define IMX8DXL_ENET1_RGMII_TXD2_CONN_EQOS_RGMII_TXD2 IMX8DXL_ENET1_RGMII_TXD2 3
+#define IMX8DXL_ENET1_RGMII_TXD2_LSIO_GPIO0_IO01 IMX8DXL_ENET1_RGMII_TXD2 4
+#define IMX8DXL_ENET1_RGMII_TX_CTL_ADMA_LCDIF_D02 IMX8DXL_ENET1_RGMII_TX_CTL 2
+#define IMX8DXL_ENET1_RGMII_TX_CTL_CONN_EQOS_RGMII_TX_CTL IMX8DXL_ENET1_RGMII_TX_CTL 3
+#define IMX8DXL_ENET1_RGMII_TX_CTL_LSIO_GPIO0_IO02 IMX8DXL_ENET1_RGMII_TX_CTL 4
+#define IMX8DXL_ENET1_RGMII_TXD3_ADMA_LCDIF_D03 IMX8DXL_ENET1_RGMII_TXD3 2
+#define IMX8DXL_ENET1_RGMII_TXD3_CONN_EQOS_RGMII_TXD3 IMX8DXL_ENET1_RGMII_TXD3 3
+#define IMX8DXL_ENET1_RGMII_TXD3_LSIO_GPIO0_IO03 IMX8DXL_ENET1_RGMII_TXD3 4
+#define IMX8DXL_ENET1_RGMII_RXC_ADMA_LCDIF_D04 IMX8DXL_ENET1_RGMII_RXC 2
+#define IMX8DXL_ENET1_RGMII_RXC_CONN_EQOS_RGMII_RXC IMX8DXL_ENET1_RGMII_RXC 3
+#define IMX8DXL_ENET1_RGMII_RXC_LSIO_GPIO0_IO04 IMX8DXL_ENET1_RGMII_RXC 4
+#define IMX8DXL_ENET1_RGMII_RXD3_ADMA_LCDIF_D05 IMX8DXL_ENET1_RGMII_RXD3 2
+#define IMX8DXL_ENET1_RGMII_RXD3_CONN_EQOS_RGMII_RXD3 IMX8DXL_ENET1_RGMII_RXD3 3
+#define IMX8DXL_ENET1_RGMII_RXD3_LSIO_GPIO0_IO05 IMX8DXL_ENET1_RGMII_RXD3 4
+#define IMX8DXL_ENET1_RGMII_RXD2_ADMA_LCDIF_D06 IMX8DXL_ENET1_RGMII_RXD2 2
+#define IMX8DXL_ENET1_RGMII_RXD2_CONN_EQOS_RGMII_RXD2 IMX8DXL_ENET1_RGMII_RXD2 3
+#define IMX8DXL_ENET1_RGMII_RXD2_LSIO_GPIO0_IO06 IMX8DXL_ENET1_RGMII_RXD2 4
+#define IMX8DXL_ENET1_RGMII_RXD2_LSIO_GPIO6_IO00 IMX8DXL_ENET1_RGMII_RXD2 5
+#define IMX8DXL_ENET1_RGMII_RXD1_ADMA_LCDIF_D07 IMX8DXL_ENET1_RGMII_RXD1 2
+#define IMX8DXL_ENET1_RGMII_RXD1_CONN_EQOS_RGMII_RXD1 IMX8DXL_ENET1_RGMII_RXD1 3
+#define IMX8DXL_ENET1_RGMII_RXD1_LSIO_GPIO0_IO07 IMX8DXL_ENET1_RGMII_RXD1 4
+#define IMX8DXL_ENET1_RGMII_RXD1_LSIO_GPIO6_IO01 IMX8DXL_ENET1_RGMII_RXD1 5
+#define IMX8DXL_ENET1_RGMII_TXD0_ADMA_LCDIF_D08 IMX8DXL_ENET1_RGMII_TXD0 2
+#define IMX8DXL_ENET1_RGMII_TXD0_CONN_EQOS_RGMII_TXD0 IMX8DXL_ENET1_RGMII_TXD0 3
+#define IMX8DXL_ENET1_RGMII_TXD0_LSIO_GPIO0_IO08 IMX8DXL_ENET1_RGMII_TXD0 4
+#define IMX8DXL_ENET1_RGMII_TXD0_LSIO_GPIO6_IO02 IMX8DXL_ENET1_RGMII_TXD0 5
+#define IMX8DXL_ENET1_RGMII_TXD1_ADMA_LCDIF_D09 IMX8DXL_ENET1_RGMII_TXD1 2
+#define IMX8DXL_ENET1_RGMII_TXD1_CONN_EQOS_RGMII_TXD1 IMX8DXL_ENET1_RGMII_TXD1 3
+#define IMX8DXL_ENET1_RGMII_TXD1_LSIO_GPIO0_IO09 IMX8DXL_ENET1_RGMII_TXD1 4
+#define IMX8DXL_ENET1_RGMII_TXD1_LSIO_GPIO6_IO03 IMX8DXL_ENET1_RGMII_TXD1 5
+#define IMX8DXL_ENET1_RGMII_RXD0_ADMA_SPDIF0_RX IMX8DXL_ENET1_RGMII_RXD0 0
+#define IMX8DXL_ENET1_RGMII_RXD0_ADMA_MQS_R IMX8DXL_ENET1_RGMII_RXD0 1
+#define IMX8DXL_ENET1_RGMII_RXD0_ADMA_LCDIF_D10 IMX8DXL_ENET1_RGMII_RXD0 2
+#define IMX8DXL_ENET1_RGMII_RXD0_CONN_EQOS_RGMII_RXD0 IMX8DXL_ENET1_RGMII_RXD0 3
+#define IMX8DXL_ENET1_RGMII_RXD0_LSIO_GPIO0_IO10 IMX8DXL_ENET1_RGMII_RXD0 4
+#define IMX8DXL_ENET1_RGMII_RXD0_LSIO_GPIO6_IO04 IMX8DXL_ENET1_RGMII_RXD0 5
+#define IMX8DXL_ENET1_RGMII_RX_CTL_ADMA_SPDIF0_TX IMX8DXL_ENET1_RGMII_RX_CTL 0
+#define IMX8DXL_ENET1_RGMII_RX_CTL_ADMA_MQS_L IMX8DXL_ENET1_RGMII_RX_CTL 1
+#define IMX8DXL_ENET1_RGMII_RX_CTL_ADMA_LCDIF_D11 IMX8DXL_ENET1_RGMII_RX_CTL 2
+#define IMX8DXL_ENET1_RGMII_RX_CTL_CONN_EQOS_RGMII_RX_CTL IMX8DXL_ENET1_RGMII_RX_CTL 3
+#define IMX8DXL_ENET1_RGMII_RX_CTL_LSIO_GPIO0_IO11 IMX8DXL_ENET1_RGMII_RX_CTL 4
+#define IMX8DXL_ENET1_RGMII_RX_CTL_LSIO_GPIO6_IO05 IMX8DXL_ENET1_RGMII_RX_CTL 5
+#define IMX8DXL_ENET1_REFCLK_125M_25M_ADMA_SPDIF0_EXT_CLK IMX8DXL_ENET1_REFCLK_125M_25M 0
+#define IMX8DXL_ENET1_REFCLK_125M_25M_ADMA_LCDIF_D12 IMX8DXL_ENET1_REFCLK_125M_25M 2
+#define IMX8DXL_ENET1_REFCLK_125M_25M_CONN_EQOS_REFCLK_125M_25M IMX8DXL_ENET1_REFCLK_125M_25M 3
+#define IMX8DXL_ENET1_REFCLK_125M_25M_LSIO_GPIO0_IO12 IMX8DXL_ENET1_REFCLK_125M_25M 4
+#define IMX8DXL_ENET1_REFCLK_125M_25M_LSIO_GPIO6_IO06 IMX8DXL_ENET1_REFCLK_125M_25M 5
+#define IMX8DXL_SPI3_SCK_ADMA_SPI3_SCK IMX8DXL_SPI3_SCK 0
+#define IMX8DXL_SPI3_SCK_ADMA_LCDIF_D13 IMX8DXL_SPI3_SCK 2
+#define IMX8DXL_SPI3_SCK_LSIO_GPIO0_IO13 IMX8DXL_SPI3_SCK 4
+#define IMX8DXL_SPI3_SCK_ADMA_LCDIF_D00 IMX8DXL_SPI3_SCK 5
+#define IMX8DXL_SPI3_SDO_ADMA_SPI3_SDO IMX8DXL_SPI3_SDO 0
+#define IMX8DXL_SPI3_SDO_ADMA_LCDIF_D14 IMX8DXL_SPI3_SDO 2
+#define IMX8DXL_SPI3_SDO_LSIO_GPIO0_IO14 IMX8DXL_SPI3_SDO 4
+#define IMX8DXL_SPI3_SDO_ADMA_LCDIF_D01 IMX8DXL_SPI3_SDO 5
+#define IMX8DXL_SPI3_SDI_ADMA_SPI3_SDI IMX8DXL_SPI3_SDI 0
+#define IMX8DXL_SPI3_SDI_ADMA_LCDIF_D15 IMX8DXL_SPI3_SDI 2
+#define IMX8DXL_SPI3_SDI_LSIO_GPIO0_IO15 IMX8DXL_SPI3_SDI 4
+#define IMX8DXL_SPI3_SDI_ADMA_LCDIF_D02 IMX8DXL_SPI3_SDI 5
+#define IMX8DXL_SPI3_CS0_ADMA_SPI3_CS0 IMX8DXL_SPI3_CS0 0
+#define IMX8DXL_SPI3_CS0_ADMA_ACM_MCLK_OUT1 IMX8DXL_SPI3_CS0 1
+#define IMX8DXL_SPI3_CS0_ADMA_LCDIF_HSYNC IMX8DXL_SPI3_CS0 2
+#define IMX8DXL_SPI3_CS0_LSIO_GPIO0_IO16 IMX8DXL_SPI3_CS0 4
+#define IMX8DXL_SPI3_CS0_ADMA_LCDIF_CS IMX8DXL_SPI3_CS0 5
+#define IMX8DXL_SPI3_CS1_ADMA_SPI3_CS1 IMX8DXL_SPI3_CS1 0
+#define IMX8DXL_SPI3_CS1_ADMA_I2C3_SCL IMX8DXL_SPI3_CS1 1
+#define IMX8DXL_SPI3_CS1_ADMA_LCDIF_RESET IMX8DXL_SPI3_CS1 2
+#define IMX8DXL_SPI3_CS1_ADMA_SPI2_CS0 IMX8DXL_SPI3_CS1 3
+#define IMX8DXL_SPI3_CS1_ADMA_LCDIF_D16 IMX8DXL_SPI3_CS1 4
+#define IMX8DXL_SPI3_CS1_ADMA_LCDIF_RD_E IMX8DXL_SPI3_CS1 5
+#define IMX8DXL_MCLK_IN1_ADMA_ACM_MCLK_IN1 IMX8DXL_MCLK_IN1 0
+#define IMX8DXL_MCLK_IN1_ADMA_I2C3_SDA IMX8DXL_MCLK_IN1 1
+#define IMX8DXL_MCLK_IN1_ADMA_LCDIF_EN IMX8DXL_MCLK_IN1 2
+#define IMX8DXL_MCLK_IN1_ADMA_SPI2_SCK IMX8DXL_MCLK_IN1 3
+#define IMX8DXL_MCLK_IN1_ADMA_LCDIF_D17 IMX8DXL_MCLK_IN1 4
+#define IMX8DXL_MCLK_IN1_ADMA_LCDIF_D03 IMX8DXL_MCLK_IN1 5
+#define IMX8DXL_MCLK_IN0_ADMA_ACM_MCLK_IN0 IMX8DXL_MCLK_IN0 0
+#define IMX8DXL_MCLK_IN0_ADMA_LCDIF_VSYNC IMX8DXL_MCLK_IN0 2
+#define IMX8DXL_MCLK_IN0_ADMA_SPI2_SDI IMX8DXL_MCLK_IN0 3
+#define IMX8DXL_MCLK_IN0_LSIO_GPIO0_IO19 IMX8DXL_MCLK_IN0 4
+#define IMX8DXL_MCLK_IN0_ADMA_LCDIF_RS IMX8DXL_MCLK_IN0 5
+#define IMX8DXL_MCLK_OUT0_ADMA_ACM_MCLK_OUT0 IMX8DXL_MCLK_OUT0 0
+#define IMX8DXL_MCLK_OUT0_ADMA_LCDIF_CLK IMX8DXL_MCLK_OUT0 2
+#define IMX8DXL_MCLK_OUT0_ADMA_SPI2_SDO IMX8DXL_MCLK_OUT0 3
+#define IMX8DXL_MCLK_OUT0_LSIO_GPIO0_IO20 IMX8DXL_MCLK_OUT0 4
+#define IMX8DXL_MCLK_OUT0_ADMA_LCDIF_WR_RWN IMX8DXL_MCLK_OUT0 5
+#define IMX8DXL_UART1_TX_ADMA_UART1_TX IMX8DXL_UART1_TX 0
+#define IMX8DXL_UART1_TX_LSIO_PWM0_OUT IMX8DXL_UART1_TX 1
+#define IMX8DXL_UART1_TX_LSIO_GPT0_CAPTURE IMX8DXL_UART1_TX 2
+#define IMX8DXL_UART1_TX_LSIO_GPIO0_IO21 IMX8DXL_UART1_TX 4
+#define IMX8DXL_UART1_TX_ADMA_LCDIF_D04 IMX8DXL_UART1_TX 5
+#define IMX8DXL_UART1_RX_ADMA_UART1_RX IMX8DXL_UART1_RX 0
+#define IMX8DXL_UART1_RX_LSIO_PWM1_OUT IMX8DXL_UART1_RX 1
+#define IMX8DXL_UART1_RX_LSIO_GPT0_COMPARE IMX8DXL_UART1_RX 2
+#define IMX8DXL_UART1_RX_LSIO_GPT1_CLK IMX8DXL_UART1_RX 3
+#define IMX8DXL_UART1_RX_LSIO_GPIO0_IO22 IMX8DXL_UART1_RX 4
+#define IMX8DXL_UART1_RX_ADMA_LCDIF_D05 IMX8DXL_UART1_RX 5
+#define IMX8DXL_UART1_RTS_B_ADMA_UART1_RTS_B IMX8DXL_UART1_RTS_B 0
+#define IMX8DXL_UART1_RTS_B_LSIO_PWM2_OUT IMX8DXL_UART1_RTS_B 1
+#define IMX8DXL_UART1_RTS_B_ADMA_LCDIF_D16 IMX8DXL_UART1_RTS_B 2
+#define IMX8DXL_UART1_RTS_B_LSIO_GPT1_CAPTURE IMX8DXL_UART1_RTS_B 3
+#define IMX8DXL_UART1_RTS_B_LSIO_GPT0_CLK IMX8DXL_UART1_RTS_B 4
+#define IMX8DXL_UART1_RTS_B_ADMA_LCDIF_D06 IMX8DXL_UART1_RTS_B 5
+#define IMX8DXL_UART1_CTS_B_ADMA_UART1_CTS_B IMX8DXL_UART1_CTS_B 0
+#define IMX8DXL_UART1_CTS_B_LSIO_PWM3_OUT IMX8DXL_UART1_CTS_B 1
+#define IMX8DXL_UART1_CTS_B_ADMA_LCDIF_D17 IMX8DXL_UART1_CTS_B 2
+#define IMX8DXL_UART1_CTS_B_LSIO_GPT1_COMPARE IMX8DXL_UART1_CTS_B 3
+#define IMX8DXL_UART1_CTS_B_LSIO_GPIO0_IO24 IMX8DXL_UART1_CTS_B 4
+#define IMX8DXL_UART1_CTS_B_ADMA_LCDIF_D07 IMX8DXL_UART1_CTS_B 5
+#define IMX8DXL_SPI0_SCK_ADMA_SPI0_SCK IMX8DXL_SPI0_SCK 0
+#define IMX8DXL_SPI0_SCK_ADMA_SAI0_TXC IMX8DXL_SPI0_SCK 1
+#define IMX8DXL_SPI0_SCK_M40_I2C0_SCL IMX8DXL_SPI0_SCK 2
+#define IMX8DXL_SPI0_SCK_M40_GPIO0_IO00 IMX8DXL_SPI0_SCK 3
+#define IMX8DXL_SPI0_SCK_LSIO_GPIO1_IO04 IMX8DXL_SPI0_SCK 4
+#define IMX8DXL_SPI0_SCK_ADMA_LCDIF_D08 IMX8DXL_SPI0_SCK 5
+#define IMX8DXL_SPI0_SDI_ADMA_SPI0_SDI IMX8DXL_SPI0_SDI 0
+#define IMX8DXL_SPI0_SDI_ADMA_SAI0_TXD IMX8DXL_SPI0_SDI 1
+#define IMX8DXL_SPI0_SDI_M40_TPM0_CH0 IMX8DXL_SPI0_SDI 2
+#define IMX8DXL_SPI0_SDI_M40_GPIO0_IO02 IMX8DXL_SPI0_SDI 3
+#define IMX8DXL_SPI0_SDI_LSIO_GPIO1_IO05 IMX8DXL_SPI0_SDI 4
+#define IMX8DXL_SPI0_SDI_ADMA_LCDIF_D09 IMX8DXL_SPI0_SDI 5
+#define IMX8DXL_SPI0_SDO_ADMA_SPI0_SDO IMX8DXL_SPI0_SDO 0
+#define IMX8DXL_SPI0_SDO_ADMA_SAI0_TXFS IMX8DXL_SPI0_SDO 1
+#define IMX8DXL_SPI0_SDO_M40_I2C0_SDA IMX8DXL_SPI0_SDO 2
+#define IMX8DXL_SPI0_SDO_M40_GPIO0_IO01 IMX8DXL_SPI0_SDO 3
+#define IMX8DXL_SPI0_SDO_LSIO_GPIO1_IO06 IMX8DXL_SPI0_SDO 4
+#define IMX8DXL_SPI0_SDO_ADMA_LCDIF_D10 IMX8DXL_SPI0_SDO 5
+#define IMX8DXL_SPI0_CS1_ADMA_SPI0_CS1 IMX8DXL_SPI0_CS1 0
+#define IMX8DXL_SPI0_CS1_ADMA_SAI0_RXC IMX8DXL_SPI0_CS1 1
+#define IMX8DXL_SPI0_CS1_ADMA_SAI1_TXD IMX8DXL_SPI0_CS1 2
+#define IMX8DXL_SPI0_CS1_ADMA_LCD_PWM0_OUT IMX8DXL_SPI0_CS1 3
+#define IMX8DXL_SPI0_CS1_LSIO_GPIO1_IO07 IMX8DXL_SPI0_CS1 4
+#define IMX8DXL_SPI0_CS1_ADMA_LCDIF_D11 IMX8DXL_SPI0_CS1 5
+#define IMX8DXL_SPI0_CS0_ADMA_SPI0_CS0 IMX8DXL_SPI0_CS0 0
+#define IMX8DXL_SPI0_CS0_ADMA_SAI0_RXD IMX8DXL_SPI0_CS0 1
+#define IMX8DXL_SPI0_CS0_M40_TPM0_CH1 IMX8DXL_SPI0_CS0 2
+#define IMX8DXL_SPI0_CS0_M40_GPIO0_IO03 IMX8DXL_SPI0_CS0 3
+#define IMX8DXL_SPI0_CS0_LSIO_GPIO1_IO08 IMX8DXL_SPI0_CS0 4
+#define IMX8DXL_SPI0_CS0_ADMA_LCDIF_D12 IMX8DXL_SPI0_CS0 5
+#define IMX8DXL_ADC_IN1_ADMA_ADC_IN1 IMX8DXL_ADC_IN1 0
+#define IMX8DXL_ADC_IN1_M40_I2C0_SDA IMX8DXL_ADC_IN1 1
+#define IMX8DXL_ADC_IN1_M40_GPIO0_IO01 IMX8DXL_ADC_IN1 2
+#define IMX8DXL_ADC_IN1_ADMA_I2C0_SDA IMX8DXL_ADC_IN1 3
+#define IMX8DXL_ADC_IN1_LSIO_GPIO1_IO09 IMX8DXL_ADC_IN1 4
+#define IMX8DXL_ADC_IN1_ADMA_LCDIF_D13 IMX8DXL_ADC_IN1 5
+#define IMX8DXL_ADC_IN0_ADMA_ADC_IN0 IMX8DXL_ADC_IN0 0
+#define IMX8DXL_ADC_IN0_M40_I2C0_SCL IMX8DXL_ADC_IN0 1
+#define IMX8DXL_ADC_IN0_M40_GPIO0_IO00 IMX8DXL_ADC_IN0 2
+#define IMX8DXL_ADC_IN0_ADMA_I2C0_SCL IMX8DXL_ADC_IN0 3
+#define IMX8DXL_ADC_IN0_LSIO_GPIO1_IO10 IMX8DXL_ADC_IN0 4
+#define IMX8DXL_ADC_IN0_ADMA_LCDIF_D14 IMX8DXL_ADC_IN0 5
+#define IMX8DXL_ADC_IN3_ADMA_ADC_IN3 IMX8DXL_ADC_IN3 0
+#define IMX8DXL_ADC_IN3_M40_UART0_TX IMX8DXL_ADC_IN3 1
+#define IMX8DXL_ADC_IN3_M40_GPIO0_IO03 IMX8DXL_ADC_IN3 2
+#define IMX8DXL_ADC_IN3_ADMA_ACM_MCLK_OUT0 IMX8DXL_ADC_IN3 3
+#define IMX8DXL_ADC_IN3_LSIO_GPIO1_IO11 IMX8DXL_ADC_IN3 4
+#define IMX8DXL_ADC_IN3_ADMA_LCDIF_D15 IMX8DXL_ADC_IN3 5
+#define IMX8DXL_ADC_IN2_ADMA_ADC_IN2 IMX8DXL_ADC_IN2 0
+#define IMX8DXL_ADC_IN2_M40_UART0_RX IMX8DXL_ADC_IN2 1
+#define IMX8DXL_ADC_IN2_M40_GPIO0_IO02 IMX8DXL_ADC_IN2 2
+#define IMX8DXL_ADC_IN2_ADMA_ACM_MCLK_IN0 IMX8DXL_ADC_IN2 3
+#define IMX8DXL_ADC_IN2_LSIO_GPIO1_IO12 IMX8DXL_ADC_IN2 4
+#define IMX8DXL_ADC_IN2_ADMA_LCDIF_D16 IMX8DXL_ADC_IN2 5
+#define IMX8DXL_ADC_IN5_ADMA_ADC_IN5 IMX8DXL_ADC_IN5 0
+#define IMX8DXL_ADC_IN5_M40_TPM0_CH1 IMX8DXL_ADC_IN5 1
+#define IMX8DXL_ADC_IN5_M40_GPIO0_IO05 IMX8DXL_ADC_IN5 2
+#define IMX8DXL_ADC_IN5_ADMA_LCDIF_LCDBUSY IMX8DXL_ADC_IN5 3
+#define IMX8DXL_ADC_IN5_LSIO_GPIO1_IO13 IMX8DXL_ADC_IN5 4
+#define IMX8DXL_ADC_IN5_ADMA_LCDIF_D17 IMX8DXL_ADC_IN5 5
+#define IMX8DXL_ADC_IN4_ADMA_ADC_IN4 IMX8DXL_ADC_IN4 0
+#define IMX8DXL_ADC_IN4_M40_TPM0_CH0 IMX8DXL_ADC_IN4 1
+#define IMX8DXL_ADC_IN4_M40_GPIO0_IO04 IMX8DXL_ADC_IN4 2
+#define IMX8DXL_ADC_IN4_ADMA_LCDIF_LCDRESET IMX8DXL_ADC_IN4 3
+#define IMX8DXL_ADC_IN4_LSIO_GPIO1_IO14 IMX8DXL_ADC_IN4 4
+#define IMX8DXL_FLEXCAN0_RX_ADMA_FLEXCAN0_RX IMX8DXL_FLEXCAN0_RX 0
+#define IMX8DXL_FLEXCAN0_RX_ADMA_SAI2_RXC IMX8DXL_FLEXCAN0_RX 1
+#define IMX8DXL_FLEXCAN0_RX_ADMA_UART0_RTS_B IMX8DXL_FLEXCAN0_RX 2
+#define IMX8DXL_FLEXCAN0_RX_ADMA_SAI1_TXC IMX8DXL_FLEXCAN0_RX 3
+#define IMX8DXL_FLEXCAN0_RX_LSIO_GPIO1_IO15 IMX8DXL_FLEXCAN0_RX 4
+#define IMX8DXL_FLEXCAN0_RX_LSIO_GPIO6_IO08 IMX8DXL_FLEXCAN0_RX 5
+#define IMX8DXL_FLEXCAN0_TX_ADMA_FLEXCAN0_TX IMX8DXL_FLEXCAN0_TX 0
+#define IMX8DXL_FLEXCAN0_TX_ADMA_SAI2_RXD IMX8DXL_FLEXCAN0_TX 1
+#define IMX8DXL_FLEXCAN0_TX_ADMA_UART0_CTS_B IMX8DXL_FLEXCAN0_TX 2
+#define IMX8DXL_FLEXCAN0_TX_ADMA_SAI1_TXFS IMX8DXL_FLEXCAN0_TX 3
+#define IMX8DXL_FLEXCAN0_TX_LSIO_GPIO1_IO16 IMX8DXL_FLEXCAN0_TX 4
+#define IMX8DXL_FLEXCAN0_TX_LSIO_GPIO6_IO09 IMX8DXL_FLEXCAN0_TX 5
+#define IMX8DXL_FLEXCAN1_RX_ADMA_FLEXCAN1_RX IMX8DXL_FLEXCAN1_RX 0
+#define IMX8DXL_FLEXCAN1_RX_ADMA_SAI2_RXFS IMX8DXL_FLEXCAN1_RX 1
+#define IMX8DXL_FLEXCAN1_RX_ADMA_FTM_CH2 IMX8DXL_FLEXCAN1_RX 2
+#define IMX8DXL_FLEXCAN1_RX_ADMA_SAI1_TXD IMX8DXL_FLEXCAN1_RX 3
+#define IMX8DXL_FLEXCAN1_RX_LSIO_GPIO1_IO17 IMX8DXL_FLEXCAN1_RX 4
+#define IMX8DXL_FLEXCAN1_RX_LSIO_GPIO6_IO10 IMX8DXL_FLEXCAN1_RX 5
+#define IMX8DXL_FLEXCAN1_TX_ADMA_FLEXCAN1_TX IMX8DXL_FLEXCAN1_TX 0
+#define IMX8DXL_FLEXCAN1_TX_ADMA_SAI3_RXC IMX8DXL_FLEXCAN1_TX 1
+#define IMX8DXL_FLEXCAN1_TX_ADMA_DMA0_REQ_IN0 IMX8DXL_FLEXCAN1_TX 2
+#define IMX8DXL_FLEXCAN1_TX_ADMA_SAI1_RXD IMX8DXL_FLEXCAN1_TX 3
+#define IMX8DXL_FLEXCAN1_TX_LSIO_GPIO1_IO18 IMX8DXL_FLEXCAN1_TX 4
+#define IMX8DXL_FLEXCAN1_TX_LSIO_GPIO6_IO11 IMX8DXL_FLEXCAN1_TX 5
+#define IMX8DXL_FLEXCAN2_RX_ADMA_FLEXCAN2_RX IMX8DXL_FLEXCAN2_RX 0
+#define IMX8DXL_FLEXCAN2_RX_ADMA_SAI3_RXD IMX8DXL_FLEXCAN2_RX 1
+#define IMX8DXL_FLEXCAN2_RX_ADMA_UART3_RX IMX8DXL_FLEXCAN2_RX 2
+#define IMX8DXL_FLEXCAN2_RX_ADMA_SAI1_RXFS IMX8DXL_FLEXCAN2_RX 3
+#define IMX8DXL_FLEXCAN2_RX_LSIO_GPIO1_IO19 IMX8DXL_FLEXCAN2_RX 4
+#define IMX8DXL_FLEXCAN2_RX_LSIO_GPIO6_IO12 IMX8DXL_FLEXCAN2_RX 5
+#define IMX8DXL_FLEXCAN2_TX_ADMA_FLEXCAN2_TX IMX8DXL_FLEXCAN2_TX 0
+#define IMX8DXL_FLEXCAN2_TX_ADMA_SAI3_RXFS IMX8DXL_FLEXCAN2_TX 1
+#define IMX8DXL_FLEXCAN2_TX_ADMA_UART3_TX IMX8DXL_FLEXCAN2_TX 2
+#define IMX8DXL_FLEXCAN2_TX_ADMA_SAI1_RXC IMX8DXL_FLEXCAN2_TX 3
+#define IMX8DXL_FLEXCAN2_TX_LSIO_GPIO1_IO20 IMX8DXL_FLEXCAN2_TX 4
+#define IMX8DXL_FLEXCAN2_TX_LSIO_GPIO6_IO13 IMX8DXL_FLEXCAN2_TX 5
+#define IMX8DXL_UART0_RX_ADMA_UART0_RX IMX8DXL_UART0_RX 0
+#define IMX8DXL_UART0_RX_ADMA_MQS_R IMX8DXL_UART0_RX 1
+#define IMX8DXL_UART0_RX_ADMA_FLEXCAN0_RX IMX8DXL_UART0_RX 2
+#define IMX8DXL_UART0_RX_SCU_UART0_RX IMX8DXL_UART0_RX 3
+#define IMX8DXL_UART0_RX_LSIO_GPIO1_IO21 IMX8DXL_UART0_RX 4
+#define IMX8DXL_UART0_RX_LSIO_GPIO6_IO14 IMX8DXL_UART0_RX 5
+#define IMX8DXL_UART0_TX_ADMA_UART0_TX IMX8DXL_UART0_TX 0
+#define IMX8DXL_UART0_TX_ADMA_MQS_L IMX8DXL_UART0_TX 1
+#define IMX8DXL_UART0_TX_ADMA_FLEXCAN0_TX IMX8DXL_UART0_TX 2
+#define IMX8DXL_UART0_TX_SCU_UART0_TX IMX8DXL_UART0_TX 3
+#define IMX8DXL_UART0_TX_LSIO_GPIO1_IO22 IMX8DXL_UART0_TX 4
+#define IMX8DXL_UART0_TX_LSIO_GPIO6_IO15 IMX8DXL_UART0_TX 5
+#define IMX8DXL_UART2_TX_ADMA_UART2_TX IMX8DXL_UART2_TX 0
+#define IMX8DXL_UART2_TX_ADMA_FTM_CH1 IMX8DXL_UART2_TX 1
+#define IMX8DXL_UART2_TX_ADMA_FLEXCAN1_TX IMX8DXL_UART2_TX 2
+#define IMX8DXL_UART2_TX_LSIO_GPIO1_IO23 IMX8DXL_UART2_TX 4
+#define IMX8DXL_UART2_TX_LSIO_GPIO6_IO16 IMX8DXL_UART2_TX 5
+#define IMX8DXL_UART2_RX_ADMA_UART2_RX IMX8DXL_UART2_RX 0
+#define IMX8DXL_UART2_RX_ADMA_FTM_CH0 IMX8DXL_UART2_RX 1
+#define IMX8DXL_UART2_RX_ADMA_FLEXCAN1_RX IMX8DXL_UART2_RX 2
+#define IMX8DXL_UART2_RX_LSIO_GPIO1_IO24 IMX8DXL_UART2_RX 4
+#define IMX8DXL_UART2_RX_LSIO_GPIO6_IO17 IMX8DXL_UART2_RX 5
+#define IMX8DXL_JTAG_TRST_B_SCU_JTAG_TRST_B IMX8DXL_JTAG_TRST_B 0
+#define IMX8DXL_JTAG_TRST_B_SCU_WDOG0_WDOG_OUT IMX8DXL_JTAG_TRST_B 1
+#define IMX8DXL_PMIC_I2C_SCL_SCU_PMIC_I2C_SCL IMX8DXL_PMIC_I2C_SCL 0
+#define IMX8DXL_PMIC_I2C_SCL_SCU_GPIO0_IOXX_PMIC_A35_ON IMX8DXL_PMIC_I2C_SCL 1
+#define IMX8DXL_PMIC_I2C_SCL_LSIO_GPIO2_IO01 IMX8DXL_PMIC_I2C_SCL 4
+#define IMX8DXL_PMIC_I2C_SDA_SCU_PMIC_I2C_SDA IMX8DXL_PMIC_I2C_SDA 0
+#define IMX8DXL_PMIC_I2C_SDA_SCU_GPIO0_IOXX_PMIC_GPU_ON IMX8DXL_PMIC_I2C_SDA 1
+#define IMX8DXL_PMIC_I2C_SDA_LSIO_GPIO2_IO02 IMX8DXL_PMIC_I2C_SDA 4
+#define IMX8DXL_PMIC_INT_B_SCU_DSC_PMIC_INT_B IMX8DXL_PMIC_INT_B 0
+#define IMX8DXL_SCU_GPIO0_00_SCU_GPIO0_IO00 IMX8DXL_SCU_GPIO0_00 0
+#define IMX8DXL_SCU_GPIO0_00_SCU_UART0_RX IMX8DXL_SCU_GPIO0_00 1
+#define IMX8DXL_SCU_GPIO0_00_M40_UART0_RX IMX8DXL_SCU_GPIO0_00 2
+#define IMX8DXL_SCU_GPIO0_00_ADMA_UART3_RX IMX8DXL_SCU_GPIO0_00 3
+#define IMX8DXL_SCU_GPIO0_00_LSIO_GPIO2_IO03 IMX8DXL_SCU_GPIO0_00 4
+#define IMX8DXL_SCU_GPIO0_01_SCU_GPIO0_IO01 IMX8DXL_SCU_GPIO0_01 0
+#define IMX8DXL_SCU_GPIO0_01_SCU_UART0_TX IMX8DXL_SCU_GPIO0_01 1
+#define IMX8DXL_SCU_GPIO0_01_M40_UART0_TX IMX8DXL_SCU_GPIO0_01 2
+#define IMX8DXL_SCU_GPIO0_01_ADMA_UART3_TX IMX8DXL_SCU_GPIO0_01 3
+#define IMX8DXL_SCU_GPIO0_01_SCU_WDOG0_WDOG_OUT IMX8DXL_SCU_GPIO0_01 4
+#define IMX8DXL_SCU_PMIC_STANDBY_SCU_DSC_PMIC_STANDBY IMX8DXL_SCU_PMIC_STANDBY 0
+#define IMX8DXL_SCU_BOOT_MODE1_SCU_DSC_BOOT_MODE1 IMX8DXL_SCU_BOOT_MODE1 0
+#define IMX8DXL_SCU_BOOT_MODE0_SCU_DSC_BOOT_MODE0 IMX8DXL_SCU_BOOT_MODE0 0
+#define IMX8DXL_SCU_BOOT_MODE2_SCU_DSC_BOOT_MODE2 IMX8DXL_SCU_BOOT_MODE2 0
+#define IMX8DXL_SCU_BOOT_MODE2_SCU_DSC_RTC_CLOCK_OUTPUT_32K IMX8DXL_SCU_BOOT_MODE2 1
+#define IMX8DXL_SNVS_TAMPER_OUT1_LSIO_GPIO2_IO05_IN IMX8DXL_SNVS_TAMPER_OUT1 4
+#define IMX8DXL_SNVS_TAMPER_OUT1_LSIO_GPIO6_IO19_IN IMX8DXL_SNVS_TAMPER_OUT1 5
+#define IMX8DXL_SNVS_TAMPER_OUT2_LSIO_GPIO2_IO06_IN IMX8DXL_SNVS_TAMPER_OUT2 4
+#define IMX8DXL_SNVS_TAMPER_OUT2_LSIO_GPIO6_IO20_IN IMX8DXL_SNVS_TAMPER_OUT2 5
+#define IMX8DXL_SNVS_TAMPER_OUT3_ADMA_SAI2_RXC IMX8DXL_SNVS_TAMPER_OUT3 2
+#define IMX8DXL_SNVS_TAMPER_OUT3_LSIO_GPIO2_IO07_IN IMX8DXL_SNVS_TAMPER_OUT3 4
+#define IMX8DXL_SNVS_TAMPER_OUT3_LSIO_GPIO6_IO21_IN IMX8DXL_SNVS_TAMPER_OUT3 5
+#define IMX8DXL_SNVS_TAMPER_OUT4_ADMA_SAI2_RXD IMX8DXL_SNVS_TAMPER_OUT4 2
+#define IMX8DXL_SNVS_TAMPER_OUT4_LSIO_GPIO2_IO08_IN IMX8DXL_SNVS_TAMPER_OUT4 4
+#define IMX8DXL_SNVS_TAMPER_OUT4_LSIO_GPIO6_IO22_IN IMX8DXL_SNVS_TAMPER_OUT4 5
+#define IMX8DXL_SNVS_TAMPER_IN0_ADMA_SAI2_RXFS IMX8DXL_SNVS_TAMPER_IN0 2
+#define IMX8DXL_SNVS_TAMPER_IN0_LSIO_GPIO2_IO09_IN IMX8DXL_SNVS_TAMPER_IN0 4
+#define IMX8DXL_SNVS_TAMPER_IN0_LSIO_GPIO6_IO23_IN IMX8DXL_SNVS_TAMPER_IN0 5
+#define IMX8DXL_SNVS_TAMPER_IN1_ADMA_SAI3_RXC IMX8DXL_SNVS_TAMPER_IN1 2
+#define IMX8DXL_SNVS_TAMPER_IN1_LSIO_GPIO2_IO10_IN IMX8DXL_SNVS_TAMPER_IN1 4
+#define IMX8DXL_SNVS_TAMPER_IN1_LSIO_GPIO6_IO24_IN IMX8DXL_SNVS_TAMPER_IN1 5
+#define IMX8DXL_SNVS_TAMPER_IN2_ADMA_SAI3_RXD IMX8DXL_SNVS_TAMPER_IN2 2
+#define IMX8DXL_SNVS_TAMPER_IN2_LSIO_GPIO2_IO11_IN IMX8DXL_SNVS_TAMPER_IN2 4
+#define IMX8DXL_SNVS_TAMPER_IN2_LSIO_GPIO6_IO25_IN IMX8DXL_SNVS_TAMPER_IN2 5
+#define IMX8DXL_SNVS_TAMPER_IN3_ADMA_SAI3_RXFS IMX8DXL_SNVS_TAMPER_IN3 2
+#define IMX8DXL_SNVS_TAMPER_IN3_LSIO_GPIO2_IO12_IN IMX8DXL_SNVS_TAMPER_IN3 4
+#define IMX8DXL_SNVS_TAMPER_IN3_LSIO_GPIO6_IO26_IN IMX8DXL_SNVS_TAMPER_IN3 5
+#define IMX8DXL_SPI1_SCK_ADMA_I2C2_SDA IMX8DXL_SPI1_SCK 2
+#define IMX8DXL_SPI1_SCK_ADMA_SPI1_SCK IMX8DXL_SPI1_SCK 3
+#define IMX8DXL_SPI1_SCK_LSIO_GPIO3_IO00 IMX8DXL_SPI1_SCK 4
+#define IMX8DXL_SPI1_SDO_ADMA_I2C2_SCL IMX8DXL_SPI1_SDO 2
+#define IMX8DXL_SPI1_SDO_ADMA_SPI1_SDO IMX8DXL_SPI1_SDO 3
+#define IMX8DXL_SPI1_SDO_LSIO_GPIO3_IO01 IMX8DXL_SPI1_SDO 4
+#define IMX8DXL_SPI1_SDI_ADMA_I2C3_SCL IMX8DXL_SPI1_SDI 2
+#define IMX8DXL_SPI1_SDI_ADMA_SPI1_SDI IMX8DXL_SPI1_SDI 3
+#define IMX8DXL_SPI1_SDI_LSIO_GPIO3_IO02 IMX8DXL_SPI1_SDI 4
+#define IMX8DXL_SPI1_CS0_ADMA_I2C3_SDA IMX8DXL_SPI1_CS0 2
+#define IMX8DXL_SPI1_CS0_ADMA_SPI1_CS0 IMX8DXL_SPI1_CS0 3
+#define IMX8DXL_SPI1_CS0_LSIO_GPIO3_IO03 IMX8DXL_SPI1_CS0 4
+#define IMX8DXL_QSPI0A_DATA1_LSIO_QSPI0A_DATA1 IMX8DXL_QSPI0A_DATA1 0
+#define IMX8DXL_QSPI0A_DATA1_LSIO_GPIO3_IO10 IMX8DXL_QSPI0A_DATA1 4
+#define IMX8DXL_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 IMX8DXL_QSPI0A_DATA0 0
+#define IMX8DXL_QSPI0A_DATA0_LSIO_GPIO3_IO09 IMX8DXL_QSPI0A_DATA0 4
+#define IMX8DXL_QSPI0A_DATA3_LSIO_QSPI0A_DATA3 IMX8DXL_QSPI0A_DATA3 0
+#define IMX8DXL_QSPI0A_DATA3_LSIO_GPIO3_IO12 IMX8DXL_QSPI0A_DATA3 4
+#define IMX8DXL_QSPI0A_DATA2_LSIO_QSPI0A_DATA2 IMX8DXL_QSPI0A_DATA2 0
+#define IMX8DXL_QSPI0A_DATA2_LSIO_GPIO3_IO11 IMX8DXL_QSPI0A_DATA2 4
+#define IMX8DXL_QSPI0A_SS0_B_LSIO_QSPI0A_SS0_B IMX8DXL_QSPI0A_SS0_B 0
+#define IMX8DXL_QSPI0A_SS0_B_LSIO_GPIO3_IO14 IMX8DXL_QSPI0A_SS0_B 4
+#define IMX8DXL_QSPI0A_DQS_LSIO_QSPI0A_DQS IMX8DXL_QSPI0A_DQS 0
+#define IMX8DXL_QSPI0A_DQS_LSIO_GPIO3_IO13 IMX8DXL_QSPI0A_DQS 4
+#define IMX8DXL_QSPI0A_SCLK_LSIO_QSPI0A_SCLK IMX8DXL_QSPI0A_SCLK 0
+#define IMX8DXL_QSPI0A_SCLK_LSIO_GPIO3_IO16 IMX8DXL_QSPI0A_SCLK 4
+#define IMX8DXL_QSPI0B_SCLK_LSIO_QSPI0B_SCLK IMX8DXL_QSPI0B_SCLK 0
+#define IMX8DXL_QSPI0B_SCLK_LSIO_GPIO3_IO17 IMX8DXL_QSPI0B_SCLK 4
+#define IMX8DXL_QSPI0B_DQS_LSIO_QSPI0B_DQS IMX8DXL_QSPI0B_DQS 0
+#define IMX8DXL_QSPI0B_DQS_LSIO_GPIO3_IO22 IMX8DXL_QSPI0B_DQS 4
+#define IMX8DXL_QSPI0B_DATA1_LSIO_QSPI0B_DATA1 IMX8DXL_QSPI0B_DATA1 0
+#define IMX8DXL_QSPI0B_DATA1_LSIO_GPIO3_IO19 IMX8DXL_QSPI0B_DATA1 4
+#define IMX8DXL_QSPI0B_DATA0_LSIO_QSPI0B_DATA0 IMX8DXL_QSPI0B_DATA0 0
+#define IMX8DXL_QSPI0B_DATA0_LSIO_GPIO3_IO18 IMX8DXL_QSPI0B_DATA0 4
+#define IMX8DXL_QSPI0B_DATA3_LSIO_QSPI0B_DATA3 IMX8DXL_QSPI0B_DATA3 0
+#define IMX8DXL_QSPI0B_DATA3_LSIO_GPIO3_IO21 IMX8DXL_QSPI0B_DATA3 4
+#define IMX8DXL_QSPI0B_DATA2_LSIO_QSPI0B_DATA2 IMX8DXL_QSPI0B_DATA2 0
+#define IMX8DXL_QSPI0B_DATA2_LSIO_GPIO3_IO20 IMX8DXL_QSPI0B_DATA2 4
+#define IMX8DXL_QSPI0B_SS0_B_LSIO_QSPI0B_SS0_B IMX8DXL_QSPI0B_SS0_B 0
+#define IMX8DXL_QSPI0B_SS0_B_LSIO_GPIO3_IO23 IMX8DXL_QSPI0B_SS0_B 4
+#define IMX8DXL_QSPI0B_SS0_B_LSIO_QSPI0A_SS1_B IMX8DXL_QSPI0B_SS0_B 5
+
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_PCIESEP_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_PCIESEP 0
+#define IMX8DXL_COMP_CTL_GPIO_3V3_USB3IO_PAD IMX8DXL_COMP_CTL_GPIO_3V3_USB3IO 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_SD1FIX0_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_SD1FIX0 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_VSELSEP_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_VSELSEP 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOCT_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOCT 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHB_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHB 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHK_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHK 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHT_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHT 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOLH_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOLH 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHD_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHD 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0A_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0A 0
+#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0B_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0B 0
+
+#endif
diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h
index 6d6bac1c26d7..5f291045e8fd 100644
--- a/include/dt-bindings/pinctrl/rockchip.h
+++ b/include/dt-bindings/pinctrl/rockchip.h
@@ -9,13 +9,6 @@
#ifndef __DT_BINDINGS_ROCKCHIP_PINCTRL_H__
#define __DT_BINDINGS_ROCKCHIP_PINCTRL_H__
-#define RK_GPIO0 0
-#define RK_GPIO1 1
-#define RK_GPIO2 2
-#define RK_GPIO3 3
-#define RK_GPIO4 4
-#define RK_GPIO6 6
-
#define RK_PA0 0
#define RK_PA1 1
#define RK_PA2 2
@@ -50,9 +43,5 @@
#define RK_PD7 31
#define RK_FUNC_GPIO 0
-#define RK_FUNC_1 1 /* deprecated */
-#define RK_FUNC_2 2 /* deprecated */
-#define RK_FUNC_3 3 /* deprecated */
-#define RK_FUNC_4 4 /* deprecated */
#endif
diff --git a/include/dt-bindings/power/marvell,mmp2.h b/include/dt-bindings/power/marvell,mmp2.h
new file mode 100644
index 000000000000..c53d2b3e1057
--- /dev/null
+++ b/include/dt-bindings/power/marvell,mmp2.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DTS_MARVELL_MMP2_POWER_H
+#define __DTS_MARVELL_MMP2_POWER_H
+
+#define MMP2_POWER_DOMAIN_GPU 0
+#define MMP2_POWER_DOMAIN_AUDIO 1
+#define MMP3_POWER_DOMAIN_CAMERA 2
+
+#define MMP2_NR_POWER_DOMAINS 3
+
+#endif
diff --git a/include/dt-bindings/power/meson-gxbb-power.h b/include/dt-bindings/power/meson-gxbb-power.h
new file mode 100644
index 000000000000..1262dac696c0
--- /dev/null
+++ b/include/dt-bindings/power/meson-gxbb-power.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2019 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_GXBB_POWER_H
+#define _DT_BINDINGS_MESON_GXBB_POWER_H
+
+#define PWRC_GXBB_VPU_ID 0
+#define PWRC_GXBB_ETHERNET_MEM_ID 1
+
+#endif
diff --git a/include/dt-bindings/power/meson8-power.h b/include/dt-bindings/power/meson8-power.h
new file mode 100644
index 000000000000..dd8b2ddb82a7
--- /dev/null
+++ b/include/dt-bindings/power/meson8-power.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON8_POWER_H
+#define _DT_BINDINGS_MESON8_POWER_H
+
+#define PWRC_MESON8_VPU_ID 0
+#define PWRC_MESON8_ETHERNET_MEM_ID 1
+#define PWRC_MESON8_AUDIO_DSP_MEM_ID 2
+
+#endif /* _DT_BINDINGS_MESON8_POWER_H */
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 3f74096d5a7c..dc146e44228b 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -28,6 +28,18 @@
#define SM8150_MMCX 9
#define SM8150_MMCX_AO 10
+/* SM8250 Power Domain Indexes */
+#define SM8250_CX 0
+#define SM8250_CX_AO 1
+#define SM8250_EBI 2
+#define SM8250_GFX 3
+#define SM8250_LCX 4
+#define SM8250_LMX 5
+#define SM8250_MMCX 6
+#define SM8250_MMCX_AO 7
+#define SM8250_MX 8
+#define SM8250_MX_AO 9
+
/* SC7180 Power Domain Indexes */
#define SC7180_CX 0
#define SC7180_CX_AO 1
diff --git a/include/dt-bindings/power/r8a7742-sysc.h b/include/dt-bindings/power/r8a7742-sysc.h
new file mode 100644
index 000000000000..1b1bd3cf95db
--- /dev/null
+++ b/include/dt-bindings/power/r8a7742-sysc.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7742_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7742_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7742_PD_CA15_CPU0 0
+#define R8A7742_PD_CA15_CPU1 1
+#define R8A7742_PD_CA15_CPU2 2
+#define R8A7742_PD_CA15_CPU3 3
+#define R8A7742_PD_CA7_CPU0 5
+#define R8A7742_PD_CA7_CPU1 6
+#define R8A7742_PD_CA7_CPU2 7
+#define R8A7742_PD_CA7_CPU3 8
+#define R8A7742_PD_CA15_SCU 12
+#define R8A7742_PD_RGX 20
+#define R8A7742_PD_CA7_SCU 21
+
+/* Always-on power area */
+#define R8A7742_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7742_SYSC_H__ */
diff --git a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h
index ea5058618863..883bfd3bcbad 100644
--- a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h
@@ -69,7 +69,7 @@
#define RESET_SYS_CPU_L2 58
#define RESET_SYS_CPU_P 59
#define RESET_SYS_CPU_MBIST 60
-/* 61 */
+#define RESET_ACODEC 61
/* 62 */
/* 63 */
/* RESET2 */
diff --git a/include/dt-bindings/reset/bt1-ccu.h b/include/dt-bindings/reset/bt1-ccu.h
new file mode 100644
index 000000000000..3578e83026bc
--- /dev/null
+++ b/include/dt-bindings/reset/bt1-ccu.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Baikal-T1 CCU reset indices
+ */
+#ifndef __DT_BINDINGS_RESET_BT1_CCU_H
+#define __DT_BINDINGS_RESET_BT1_CCU_H
+
+#define CCU_AXI_MAIN_RST 0
+#define CCU_AXI_DDR_RST 1
+#define CCU_AXI_SATA_RST 2
+#define CCU_AXI_GMAC0_RST 3
+#define CCU_AXI_GMAC1_RST 4
+#define CCU_AXI_XGMAC_RST 5
+#define CCU_AXI_PCIE_M_RST 6
+#define CCU_AXI_PCIE_S_RST 7
+#define CCU_AXI_USB_RST 8
+#define CCU_AXI_HWA_RST 9
+#define CCU_AXI_SRAM_RST 10
+
+#define CCU_SYS_SATA_REF_RST 0
+#define CCU_SYS_APB_RST 1
+
+#endif /* __DT_BINDINGS_RESET_BT1_CCU_H */
diff --git a/include/dt-bindings/reset/imx8mp-reset.h b/include/dt-bindings/reset/imx8mp-reset.h
new file mode 100644
index 000000000000..2e8c9104b666
--- /dev/null
+++ b/include/dt-bindings/reset/imx8mp-reset.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 NXP
+ */
+
+#ifndef DT_BINDING_RESET_IMX8MP_H
+#define DT_BINDING_RESET_IMX8MP_H
+
+#define IMX8MP_RESET_A53_CORE_POR_RESET0 0
+#define IMX8MP_RESET_A53_CORE_POR_RESET1 1
+#define IMX8MP_RESET_A53_CORE_POR_RESET2 2
+#define IMX8MP_RESET_A53_CORE_POR_RESET3 3
+#define IMX8MP_RESET_A53_CORE_RESET0 4
+#define IMX8MP_RESET_A53_CORE_RESET1 5
+#define IMX8MP_RESET_A53_CORE_RESET2 6
+#define IMX8MP_RESET_A53_CORE_RESET3 7
+#define IMX8MP_RESET_A53_DBG_RESET0 8
+#define IMX8MP_RESET_A53_DBG_RESET1 9
+#define IMX8MP_RESET_A53_DBG_RESET2 10
+#define IMX8MP_RESET_A53_DBG_RESET3 11
+#define IMX8MP_RESET_A53_ETM_RESET0 12
+#define IMX8MP_RESET_A53_ETM_RESET1 13
+#define IMX8MP_RESET_A53_ETM_RESET2 14
+#define IMX8MP_RESET_A53_ETM_RESET3 15
+#define IMX8MP_RESET_A53_SOC_DBG_RESET 16
+#define IMX8MP_RESET_A53_L2RESET 17
+#define IMX8MP_RESET_SW_NON_SCLR_M7C_RST 18
+#define IMX8MP_RESET_OTG1_PHY_RESET 19
+#define IMX8MP_RESET_OTG2_PHY_RESET 20
+#define IMX8MP_RESET_SUPERMIX_RESET 21
+#define IMX8MP_RESET_AUDIOMIX_RESET 22
+#define IMX8MP_RESET_MLMIX_RESET 23
+#define IMX8MP_RESET_PCIEPHY 24
+#define IMX8MP_RESET_PCIEPHY_PERST 25
+#define IMX8MP_RESET_PCIE_CTRL_APPS_EN 26
+#define IMX8MP_RESET_PCIE_CTRL_APPS_TURNOFF 27
+#define IMX8MP_RESET_HDMI_PHY_APB_RESET 28
+#define IMX8MP_RESET_MEDIA_RESET 29
+#define IMX8MP_RESET_GPU2D_RESET 30
+#define IMX8MP_RESET_GPU3D_RESET 31
+#define IMX8MP_RESET_GPU_RESET 32
+#define IMX8MP_RESET_VPU_RESET 33
+#define IMX8MP_RESET_VPU_G1_RESET 34
+#define IMX8MP_RESET_VPU_G2_RESET 35
+#define IMX8MP_RESET_VPUVC8KE_RESET 36
+#define IMX8MP_RESET_NOC_RESET 37
+
+#define IMX8MP_RESET_NUM 38
+
+#endif
diff --git a/include/dt-bindings/reset/imx8mq-reset.h b/include/dt-bindings/reset/imx8mq-reset.h
index 9a301082d361..a5b570737582 100644
--- a/include/dt-bindings/reset/imx8mq-reset.h
+++ b/include/dt-bindings/reset/imx8mq-reset.h
@@ -28,36 +28,36 @@
#define IMX8MQ_RESET_A53_L2RESET 17
#define IMX8MQ_RESET_SW_NON_SCLR_M4C_RST 18
#define IMX8MQ_RESET_OTG1_PHY_RESET 19
-#define IMX8MQ_RESET_OTG2_PHY_RESET 20
-#define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21
-#define IMX8MQ_RESET_MIPI_DSI_RESET_N 22
-#define IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N 23
-#define IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N 24
-#define IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N 25
-#define IMX8MQ_RESET_PCIEPHY 26
-#define IMX8MQ_RESET_PCIEPHY_PERST 27
-#define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28
-#define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29
-#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 /* i.MX8MM does NOT support */
+#define IMX8MQ_RESET_OTG2_PHY_RESET 20 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_DSI_RESET_N 22 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N 23 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N 24 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N 25 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIEPHY 26 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIEPHY_PERST 27 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 /* i.MX8MM/i.MX8MN does NOT support */
#define IMX8MQ_RESET_DISP_RESET 31
#define IMX8MQ_RESET_GPU_RESET 32
-#define IMX8MQ_RESET_VPU_RESET 33
-#define IMX8MQ_RESET_PCIEPHY2 34 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_PCIEPHY2_PERST 35 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_DDRC1_PRST 44
-#define IMX8MQ_RESET_DDRC1_CORE_RESET 45
-#define IMX8MQ_RESET_DDRC1_PHY_RESET 46
-#define IMX8MQ_RESET_DDRC2_PRST 47 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 /* i.MX8MM does NOT support */
-#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 /* i.MX8MM does NOT support */
+#define IMX8MQ_RESET_VPU_RESET 33 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIEPHY2 34 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIEPHY2_PERST 35 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC1_PRST 44 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC1_CORE_RESET 45 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC1_PHY_RESET 46 /* i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC2_PRST 47 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 /* i.MX8MM/i.MX8MN does NOT support */
+#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 /* i.MX8MM/i.MX8MN does NOT support */
#define IMX8MQ_RESET_NUM 50
diff --git a/include/dt-bindings/reset/qcom,gcc-msm8939.h b/include/dt-bindings/reset/qcom,gcc-msm8939.h
new file mode 100644
index 000000000000..fa41ffeae7a2
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-msm8939.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_MSM_GCC_8939_H
+#define _DT_BINDINGS_RESET_MSM_GCC_8939_H
+
+#define GCC_BLSP1_BCR 0
+#define GCC_BLSP1_QUP1_BCR 1
+#define GCC_BLSP1_UART1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_UART2_BCR 4
+#define GCC_BLSP1_QUP3_BCR 5
+#define GCC_BLSP1_QUP4_BCR 6
+#define GCC_BLSP1_QUP5_BCR 7
+#define GCC_BLSP1_QUP6_BCR 8
+#define GCC_IMEM_BCR 9
+#define GCC_SMMU_BCR 10
+#define GCC_APSS_TCU_BCR 11
+#define GCC_SMMU_XPU_BCR 12
+#define GCC_PCNOC_TBU_BCR 13
+#define GCC_PRNG_BCR 14
+#define GCC_BOOT_ROM_BCR 15
+#define GCC_CRYPTO_BCR 16
+#define GCC_SEC_CTRL_BCR 17
+#define GCC_AUDIO_CORE_BCR 18
+#define GCC_ULT_AUDIO_BCR 19
+#define GCC_DEHR_BCR 20
+#define GCC_SYSTEM_NOC_BCR 21
+#define GCC_PCNOC_BCR 22
+#define GCC_TCSR_BCR 23
+#define GCC_QDSS_BCR 24
+#define GCC_DCD_BCR 25
+#define GCC_MSG_RAM_BCR 26
+#define GCC_MPM_BCR 27
+#define GCC_SPMI_BCR 28
+#define GCC_SPDM_BCR 29
+#define GCC_MM_SPDM_BCR 30
+#define GCC_BIMC_BCR 31
+#define GCC_RBCPR_BCR 32
+#define GCC_TLMM_BCR 33
+#define GCC_USB_HS_BCR 34
+#define GCC_USB2A_PHY_BCR 35
+#define GCC_SDCC1_BCR 36
+#define GCC_SDCC2_BCR 37
+#define GCC_PDM_BCR 38
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 39
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 40
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 41
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 42
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 43
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 44
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 45
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 46
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 47
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 48
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 49
+#define GCC_MMSS_BCR 50
+#define GCC_VENUS0_BCR 51
+#define GCC_MDSS_BCR 52
+#define GCC_CAMSS_PHY0_BCR 53
+#define GCC_CAMSS_CSI0_BCR 54
+#define GCC_CAMSS_CSI0PHY_BCR 55
+#define GCC_CAMSS_CSI0RDI_BCR 56
+#define GCC_CAMSS_CSI0PIX_BCR 57
+#define GCC_CAMSS_PHY1_BCR 58
+#define GCC_CAMSS_CSI1_BCR 59
+#define GCC_CAMSS_CSI1PHY_BCR 60
+#define GCC_CAMSS_CSI1RDI_BCR 61
+#define GCC_CAMSS_CSI1PIX_BCR 62
+#define GCC_CAMSS_ISPIF_BCR 63
+#define GCC_CAMSS_CCI_BCR 64
+#define GCC_CAMSS_MCLK0_BCR 65
+#define GCC_CAMSS_MCLK1_BCR 66
+#define GCC_CAMSS_GP0_BCR 67
+#define GCC_CAMSS_GP1_BCR 68
+#define GCC_CAMSS_TOP_BCR 69
+#define GCC_CAMSS_MICRO_BCR 70
+#define GCC_CAMSS_JPEG_BCR 71
+#define GCC_CAMSS_VFE_BCR 72
+#define GCC_CAMSS_CSI_VFE0_BCR 73
+#define GCC_OXILI_BCR 74
+#define GCC_GMEM_BCR 75
+#define GCC_CAMSS_AHB_BCR 76
+#define GCC_MDP_TBU_BCR 77
+#define GCC_GFX_TBU_BCR 78
+#define GCC_GFX_TCU_BCR 79
+#define GCC_MSS_TBU_AXI_BCR 80
+#define GCC_MSS_TBU_GSS_AXI_BCR 81
+#define GCC_MSS_TBU_Q6_AXI_BCR 82
+#define GCC_GTCU_AHB_BCR 83
+#define GCC_SMMU_CFG_BCR 84
+#define GCC_VFE_TBU_BCR 85
+#define GCC_VENUS_TBU_BCR 86
+#define GCC_JPEG_TBU_BCR 87
+#define GCC_PRONTO_TBU_BCR 88
+#define GCC_SMMU_CATS_BCR 89
+#define GCC_BLSP1_UART3_BCR 90
+#define GCC_CAMSS_CSI2_BCR 91
+#define GCC_CAMSS_CSI2PHY_BCR 92
+#define GCC_CAMSS_CSI2RDI_BCR 93
+#define GCC_CAMSS_CSI2PIX_BCR 94
+#define GCC_USB_FS_BCR 95
+#define GCC_BLSP1_QUP4_SPI_APPS_CBCR 96
+#define GCC_CAMSS_MCLK2_BCR 97
+#define GCC_CPP_TBU_BCR 98
+#define GCC_MDP_RT_TBU_BCR 99
+
+#endif
diff --git a/include/dt-bindings/reset/realtek,rtd1195.h b/include/dt-bindings/reset/realtek,rtd1195.h
new file mode 100644
index 000000000000..27902abf935b
--- /dev/null
+++ b/include/dt-bindings/reset/realtek,rtd1195.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */
+/*
+ * Realtek RTD1195 reset controllers
+ *
+ * Copyright (c) 2017 Andreas Färber
+ */
+#ifndef DT_BINDINGS_RESET_RTD1195_H
+#define DT_BINDINGS_RESET_RTD1195_H
+
+/* soft reset 1 */
+#define RTD1195_RSTN_MISC 0
+#define RTD1195_RSTN_RNG 1
+#define RTD1195_RSTN_USB3_POW 2
+#define RTD1195_RSTN_GSPI 3
+#define RTD1195_RSTN_USB3_P0_MDIO 4
+#define RTD1195_RSTN_VE_H265 5
+#define RTD1195_RSTN_USB 6
+#define RTD1195_RSTN_USB_PHY0 8
+#define RTD1195_RSTN_USB_PHY1 9
+#define RTD1195_RSTN_HDMIRX 11
+#define RTD1195_RSTN_HDMI 12
+#define RTD1195_RSTN_ETN 14
+#define RTD1195_RSTN_AIO 15
+#define RTD1195_RSTN_GPU 16
+#define RTD1195_RSTN_VE_H264 17
+#define RTD1195_RSTN_VE_JPEG 18
+#define RTD1195_RSTN_TVE 19
+#define RTD1195_RSTN_VO 20
+#define RTD1195_RSTN_LVDS 21
+#define RTD1195_RSTN_SE 22
+#define RTD1195_RSTN_DCU 23
+#define RTD1195_RSTN_DC_PHY 24
+#define RTD1195_RSTN_CP 25
+#define RTD1195_RSTN_MD 26
+#define RTD1195_RSTN_TP 27
+#define RTD1195_RSTN_AE 28
+#define RTD1195_RSTN_NF 29
+#define RTD1195_RSTN_MIPI 30
+
+/* soft reset 2 */
+#define RTD1195_RSTN_ACPU 0
+#define RTD1195_RSTN_VCPU 1
+#define RTD1195_RSTN_PCR 9
+#define RTD1195_RSTN_CR 10
+#define RTD1195_RSTN_EMMC 11
+#define RTD1195_RSTN_SDIO 12
+#define RTD1195_RSTN_I2C_5 18
+#define RTD1195_RSTN_RTC 20
+#define RTD1195_RSTN_I2C_4 23
+#define RTD1195_RSTN_I2C_3 24
+#define RTD1195_RSTN_I2C_2 25
+#define RTD1195_RSTN_I2C_1 26
+#define RTD1195_RSTN_UR1 28
+
+/* soft reset 3 */
+#define RTD1195_RSTN_SB2 0
+
+/* iso soft reset */
+#define RTD1195_ISO_RSTN_VFD 0
+#define RTD1195_ISO_RSTN_IR 1
+#define RTD1195_ISO_RSTN_CEC0 2
+#define RTD1195_ISO_RSTN_CEC1 3
+#define RTD1195_ISO_RSTN_DP 4
+#define RTD1195_ISO_RSTN_CBUSTX 5
+#define RTD1195_ISO_RSTN_CBUSRX 6
+#define RTD1195_ISO_RSTN_EFUSE 7
+#define RTD1195_ISO_RSTN_UR0 8
+#define RTD1195_ISO_RSTN_GMAC 9
+#define RTD1195_ISO_RSTN_GPHY 10
+#define RTD1195_ISO_RSTN_I2C_0 11
+#define RTD1195_ISO_RSTN_I2C_6 12
+#define RTD1195_ISO_RSTN_CBUS 13
+
+#endif
diff --git a/include/dt-bindings/reset/realtek,rtd1295.h b/include/dt-bindings/reset/realtek,rtd1295.h
index 2c0cb6afe816..dd89e4c80264 100644
--- a/include/dt-bindings/reset/realtek,rtd1295.h
+++ b/include/dt-bindings/reset/realtek,rtd1295.h
@@ -75,6 +75,9 @@
#define RTD1295_RSTN_CBUS_TX 30
#define RTD1295_RSTN_SDS_PHY 31
+/* soft reset 3 */
+#define RTD1295_RSTN_SB2 0
+
/* soft reset 4 */
#define RTD1295_RSTN_DCPHY_CRT 0
#define RTD1295_RSTN_DCPHY_ALERT_RX 1
diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h
index 3fee04f81439..988d90d77f53 100644
--- a/include/keys/big_key-type.h
+++ b/include/keys/big_key-type.h
@@ -18,5 +18,6 @@ extern void big_key_revoke(struct key *key);
extern void big_key_destroy(struct key *key);
extern void big_key_describe(const struct key *big_key, struct seq_file *m);
extern long big_key_read(const struct key *key, char *buffer, size_t buflen);
+extern int big_key_update(struct key *key, struct key_preparsed_payload *prep);
#endif /* _KEYS_BIG_KEY_TYPE_H */
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index be61fcddc02a..386c31432789 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -27,7 +27,7 @@
struct user_key_payload {
struct rcu_head rcu; /* RCU destructor */
unsigned short datalen; /* length of this data */
- char data[0] __aligned(__alignof__(u64)); /* actual data */
+ char data[] __aligned(__alignof__(u64)); /* actual data */
};
extern struct key_type key_type_user;
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 9b0c46a6ca1f..47e61e1d5337 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -175,7 +175,7 @@ struct kunit_suite {
void (*exit)(struct kunit *test);
struct kunit_case *test_cases;
- /* private - internal use only */
+ /* private: internal use only */
struct dentry *debugfs;
char *log;
};
@@ -232,12 +232,12 @@ void __kunit_test_suites_exit(struct kunit_suite **suites);
* kunit_test_suites() - used to register one or more &struct kunit_suite
* with KUnit.
*
- * @suites: a statically allocated list of &struct kunit_suite.
+ * @suites_list...: a statically allocated list of &struct kunit_suite.
*
- * Registers @suites with the test framework. See &struct kunit_suite for
+ * Registers @suites_list with the test framework. See &struct kunit_suite for
* more information.
*
- * When builtin, KUnit tests are all run as late_initcalls; this means
+ * When builtin, KUnit tests are all run as late_initcalls; this means
* that they cannot test anything where tests must run at a different init
* phase. One significant restriction resulting from this is that KUnit
* cannot reliably test anything that is initialize in the late_init phase;
@@ -253,8 +253,8 @@ void __kunit_test_suites_exit(struct kunit_suite **suites);
* tests from the same place, and at the very least to do so after
* everything else is definitely initialized.
*/
-#define kunit_test_suites(...) \
- static struct kunit_suite *suites[] = { __VA_ARGS__, NULL}; \
+#define kunit_test_suites(suites_list...) \
+ static struct kunit_suite *suites[] = {suites_list, NULL}; \
static int kunit_test_suites_init(void) \
{ \
return __kunit_test_suites_init(suites); \
diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h
new file mode 100644
index 000000000000..bcb6aa27cfa6
--- /dev/null
+++ b/include/linux/atomic-arch-fallback.h
@@ -0,0 +1,2291 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#include <linux/compiler.h>
+
+#ifndef arch_xchg_relaxed
+#define arch_xchg_relaxed arch_xchg
+#define arch_xchg_acquire arch_xchg
+#define arch_xchg_release arch_xchg
+#else /* arch_xchg_relaxed */
+
+#ifndef arch_xchg_acquire
+#define arch_xchg_acquire(...) \
+ __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg_release
+#define arch_xchg_release(...) \
+ __atomic_op_release(arch_xchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_xchg
+#define arch_xchg(...) \
+ __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_xchg_relaxed */
+
+#ifndef arch_cmpxchg_relaxed
+#define arch_cmpxchg_relaxed arch_cmpxchg
+#define arch_cmpxchg_acquire arch_cmpxchg
+#define arch_cmpxchg_release arch_cmpxchg
+#else /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg_acquire
+#define arch_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg_release
+#define arch_cmpxchg_release(...) \
+ __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg
+#define arch_cmpxchg(...) \
+ __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg_relaxed */
+
+#ifndef arch_cmpxchg64_relaxed
+#define arch_cmpxchg64_relaxed arch_cmpxchg64
+#define arch_cmpxchg64_acquire arch_cmpxchg64
+#define arch_cmpxchg64_release arch_cmpxchg64
+#else /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_cmpxchg64_acquire
+#define arch_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64_release
+#define arch_cmpxchg64_release(...) \
+ __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef arch_cmpxchg64
+#define arch_cmpxchg64(...) \
+ __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* arch_cmpxchg64_relaxed */
+
+#ifndef arch_atomic_read_acquire
+static __always_inline int
+arch_atomic_read_acquire(const atomic_t *v)
+{
+ return smp_load_acquire(&(v)->counter);
+}
+#define arch_atomic_read_acquire arch_atomic_read_acquire
+#endif
+
+#ifndef arch_atomic_set_release
+static __always_inline void
+arch_atomic_set_release(atomic_t *v, int i)
+{
+ smp_store_release(&(v)->counter, i);
+}
+#define arch_atomic_set_release arch_atomic_set_release
+#endif
+
+#ifndef arch_atomic_add_return_relaxed
+#define arch_atomic_add_return_acquire arch_atomic_add_return
+#define arch_atomic_add_return_release arch_atomic_add_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return
+#else /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_add_return_acquire
+static __always_inline int
+arch_atomic_add_return_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
+#endif
+
+#ifndef arch_atomic_add_return_release
+static __always_inline int
+arch_atomic_add_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_add_return_relaxed(i, v);
+}
+#define arch_atomic_add_return_release arch_atomic_add_return_release
+#endif
+
+#ifndef arch_atomic_add_return
+static __always_inline int
+arch_atomic_add_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_add_return arch_atomic_add_return
+#endif
+
+#endif /* arch_atomic_add_return_relaxed */
+
+#ifndef arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
+#else /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_fetch_add_acquire
+static __always_inline int
+arch_atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic_fetch_add_release
+static __always_inline int
+arch_atomic_fetch_add_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_add_relaxed(i, v);
+}
+#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
+#endif
+
+#ifndef arch_atomic_fetch_add
+static __always_inline int
+arch_atomic_fetch_add(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#endif
+
+#endif /* arch_atomic_fetch_add_relaxed */
+
+#ifndef arch_atomic_sub_return_relaxed
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return
+#define arch_atomic_sub_return_release arch_atomic_sub_return
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
+#else /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_sub_return_acquire
+static __always_inline int
+arch_atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
+#endif
+
+#ifndef arch_atomic_sub_return_release
+static __always_inline int
+arch_atomic_sub_return_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_sub_return_relaxed(i, v);
+}
+#define arch_atomic_sub_return_release arch_atomic_sub_return_release
+#endif
+
+#ifndef arch_atomic_sub_return
+static __always_inline int
+arch_atomic_sub_return(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_sub_return arch_atomic_sub_return
+#endif
+
+#endif /* arch_atomic_sub_return_relaxed */
+
+#ifndef arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
+#else /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_fetch_sub_acquire
+static __always_inline int
+arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic_fetch_sub_release
+static __always_inline int
+arch_atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
+#endif
+
+#ifndef arch_atomic_fetch_sub
+static __always_inline int
+arch_atomic_fetch_sub(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#endif
+
+#endif /* arch_atomic_fetch_sub_relaxed */
+
+#ifndef arch_atomic_inc
+static __always_inline void
+arch_atomic_inc(atomic_t *v)
+{
+ arch_atomic_add(1, v);
+}
+#define arch_atomic_inc arch_atomic_inc
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+#ifdef arch_atomic_inc_return
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return
+#define arch_atomic_inc_return_release arch_atomic_inc_return
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
+#endif /* arch_atomic_inc_return */
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+ return arch_atomic_add_return(1, v);
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+ return arch_atomic_add_return_acquire(1, v);
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+ return arch_atomic_add_return_release(1, v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return_relaxed
+static __always_inline int
+arch_atomic_inc_return_relaxed(atomic_t *v)
+{
+ return arch_atomic_add_return_relaxed(1, v);
+}
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+#endif
+
+#else /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_inc_return_acquire
+static __always_inline int
+arch_atomic_inc_return_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#endif
+
+#ifndef arch_atomic_inc_return_release
+static __always_inline int
+arch_atomic_inc_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_inc_return_relaxed(v);
+}
+#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#endif
+
+#ifndef arch_atomic_inc_return
+static __always_inline int
+arch_atomic_inc_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_inc_return arch_atomic_inc_return
+#endif
+
+#endif /* arch_atomic_inc_return_relaxed */
+
+#ifndef arch_atomic_fetch_inc_relaxed
+#ifdef arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
+#endif /* arch_atomic_fetch_inc */
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+ return arch_atomic_fetch_add(1, v);
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+ return arch_atomic_fetch_add_acquire(1, v);
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+ return arch_atomic_fetch_add_release(1, v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc_relaxed
+static __always_inline int
+arch_atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ return arch_atomic_fetch_add_relaxed(1, v);
+}
+#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_fetch_inc_acquire
+static __always_inline int
+arch_atomic_fetch_inc_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic_fetch_inc_release
+static __always_inline int
+arch_atomic_fetch_inc_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_inc_relaxed(v);
+}
+#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#endif
+
+#ifndef arch_atomic_fetch_inc
+static __always_inline int
+arch_atomic_fetch_inc(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#endif
+
+#endif /* arch_atomic_fetch_inc_relaxed */
+
+#ifndef arch_atomic_dec
+static __always_inline void
+arch_atomic_dec(atomic_t *v)
+{
+ arch_atomic_sub(1, v);
+}
+#define arch_atomic_dec arch_atomic_dec
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+#ifdef arch_atomic_dec_return
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return
+#define arch_atomic_dec_return_release arch_atomic_dec_return
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
+#endif /* arch_atomic_dec_return */
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+ return arch_atomic_sub_return(1, v);
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+ return arch_atomic_sub_return_acquire(1, v);
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+ return arch_atomic_sub_return_release(1, v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return_relaxed
+static __always_inline int
+arch_atomic_dec_return_relaxed(atomic_t *v)
+{
+ return arch_atomic_sub_return_relaxed(1, v);
+}
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
+#endif
+
+#else /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_dec_return_acquire
+static __always_inline int
+arch_atomic_dec_return_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#endif
+
+#ifndef arch_atomic_dec_return_release
+static __always_inline int
+arch_atomic_dec_return_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_dec_return_relaxed(v);
+}
+#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#endif
+
+#ifndef arch_atomic_dec_return
+static __always_inline int
+arch_atomic_dec_return(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_dec_return arch_atomic_dec_return
+#endif
+
+#endif /* arch_atomic_dec_return_relaxed */
+
+#ifndef arch_atomic_fetch_dec_relaxed
+#ifdef arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
+#endif /* arch_atomic_fetch_dec */
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+ return arch_atomic_fetch_sub(1, v);
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_acquire(1, v);
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_release(1, v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec_relaxed
+static __always_inline int
+arch_atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ return arch_atomic_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_dec_acquire
+static __always_inline int
+arch_atomic_fetch_dec_acquire(atomic_t *v)
+{
+ int ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic_fetch_dec_release
+static __always_inline int
+arch_atomic_fetch_dec_release(atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_dec_relaxed(v);
+}
+#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
+#endif
+
+#ifndef arch_atomic_fetch_dec
+static __always_inline int
+arch_atomic_fetch_dec(atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#endif
+
+#endif /* arch_atomic_fetch_dec_relaxed */
+
+#ifndef arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
+#else /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_fetch_and_acquire
+static __always_inline int
+arch_atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic_fetch_and_release
+static __always_inline int
+arch_atomic_fetch_and_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_and_relaxed(i, v);
+}
+#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
+#endif
+
+#ifndef arch_atomic_fetch_and
+static __always_inline int
+arch_atomic_fetch_and(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#endif
+
+#endif /* arch_atomic_fetch_and_relaxed */
+
+#ifndef arch_atomic_andnot
+static __always_inline void
+arch_atomic_andnot(int i, atomic_t *v)
+{
+ arch_atomic_and(~i, v);
+}
+#define arch_atomic_andnot arch_atomic_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+#ifdef arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
+#endif /* arch_atomic_fetch_andnot */
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and(~i, v);
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_acquire(~i, v);
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_release(~i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot_relaxed
+static __always_inline int
+arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ return arch_atomic_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_andnot_acquire
+static __always_inline int
+arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic_fetch_andnot_release
+static __always_inline int
+arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic_fetch_andnot
+static __always_inline int
+arch_atomic_fetch_andnot(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#endif
+
+#endif /* arch_atomic_fetch_andnot_relaxed */
+
+#ifndef arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
+#else /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_or_acquire
+static __always_inline int
+arch_atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic_fetch_or_release
+static __always_inline int
+arch_atomic_fetch_or_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_or_relaxed(i, v);
+}
+#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
+#endif
+
+#ifndef arch_atomic_fetch_or
+static __always_inline int
+arch_atomic_fetch_or(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#endif
+
+#endif /* arch_atomic_fetch_or_relaxed */
+
+#ifndef arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
+#else /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_fetch_xor_acquire
+static __always_inline int
+arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+ int ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic_fetch_xor_release
+static __always_inline int
+arch_atomic_fetch_xor_release(int i, atomic_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
+#endif
+
+#ifndef arch_atomic_fetch_xor
+static __always_inline int
+arch_atomic_fetch_xor(int i, atomic_t *v)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#endif
+
+#endif /* arch_atomic_fetch_xor_relaxed */
+
+#ifndef arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire arch_atomic_xchg
+#define arch_atomic_xchg_release arch_atomic_xchg
+#define arch_atomic_xchg_relaxed arch_atomic_xchg
+#else /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_xchg_acquire
+static __always_inline int
+arch_atomic_xchg_acquire(atomic_t *v, int i)
+{
+ int ret = arch_atomic_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#endif
+
+#ifndef arch_atomic_xchg_release
+static __always_inline int
+arch_atomic_xchg_release(atomic_t *v, int i)
+{
+ __atomic_release_fence();
+ return arch_atomic_xchg_relaxed(v, i);
+}
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#endif
+
+#ifndef arch_atomic_xchg
+static __always_inline int
+arch_atomic_xchg(atomic_t *v, int i)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_xchg arch_atomic_xchg
+#endif
+
+#endif /* arch_atomic_xchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
+#else /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_cmpxchg_acquire
+static __always_inline int
+arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_cmpxchg_release
+static __always_inline int
+arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ __atomic_release_fence();
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_cmpxchg
+static __always_inline int
+arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+#endif
+
+#endif /* arch_atomic_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+#ifdef arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
+#endif /* arch_atomic_try_cmpxchg */
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ int r, o = *old;
+ r = arch_atomic_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic_try_cmpxchg_release
+static __always_inline bool
+arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ __atomic_release_fence();
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic_try_cmpxchg
+static __always_inline bool
+arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
+#endif
+
+#endif /* arch_atomic_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic_sub_and_test
+/**
+ * arch_atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_sub_and_test(int i, atomic_t *v)
+{
+ return arch_atomic_sub_return(i, v) == 0;
+}
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
+#endif
+
+#ifndef arch_atomic_dec_and_test
+/**
+ * arch_atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic_dec_and_test(atomic_t *v)
+{
+ return arch_atomic_dec_return(v) == 0;
+}
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
+#endif
+
+#ifndef arch_atomic_inc_and_test
+/**
+ * arch_atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic_inc_and_test(atomic_t *v)
+{
+ return arch_atomic_inc_return(v) == 0;
+}
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
+#endif
+
+#ifndef arch_atomic_add_negative
+/**
+ * arch_atomic_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic_add_negative(int i, atomic_t *v)
+{
+ return arch_atomic_add_return(i, v) < 0;
+}
+#define arch_atomic_add_negative arch_atomic_add_negative
+#endif
+
+#ifndef arch_atomic_fetch_add_unless
+/**
+ * arch_atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline int
+arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+#endif
+
+#ifndef arch_atomic_add_unless
+/**
+ * arch_atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic_add_unless(atomic_t *v, int a, int u)
+{
+ return arch_atomic_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic_add_unless arch_atomic_add_unless
+#endif
+
+#ifndef arch_atomic_inc_not_zero
+/**
+ * arch_atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic_inc_not_zero(atomic_t *v)
+{
+ return arch_atomic_add_unless(v, 1, 0);
+}
+#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
+#endif
+
+#ifndef arch_atomic_inc_unless_negative
+static __always_inline bool
+arch_atomic_inc_unless_negative(atomic_t *v)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+#endif
+
+#ifndef arch_atomic_dec_unless_positive
+static __always_inline bool
+arch_atomic_dec_unless_positive(atomic_t *v)
+{
+ int c = arch_atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+#endif
+
+#ifndef arch_atomic_dec_if_positive
+static __always_inline int
+arch_atomic_dec_if_positive(atomic_t *v)
+{
+ int dec, c = arch_atomic_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!arch_atomic_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
+#endif
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+#ifndef arch_atomic64_read_acquire
+static __always_inline s64
+arch_atomic64_read_acquire(const atomic64_t *v)
+{
+ return smp_load_acquire(&(v)->counter);
+}
+#define arch_atomic64_read_acquire arch_atomic64_read_acquire
+#endif
+
+#ifndef arch_atomic64_set_release
+static __always_inline void
+arch_atomic64_set_release(atomic64_t *v, s64 i)
+{
+ smp_store_release(&(v)->counter, i);
+}
+#define arch_atomic64_set_release arch_atomic64_set_release
+#endif
+
+#ifndef arch_atomic64_add_return_relaxed
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return
+#define arch_atomic64_add_return_release arch_atomic64_add_return
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
+#else /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_add_return_acquire
+static __always_inline s64
+arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
+#endif
+
+#ifndef arch_atomic64_add_return_release
+static __always_inline s64
+arch_atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_add_return_relaxed(i, v);
+}
+#define arch_atomic64_add_return_release arch_atomic64_add_return_release
+#endif
+
+#ifndef arch_atomic64_add_return
+static __always_inline s64
+arch_atomic64_add_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_add_return arch_atomic64_add_return
+#endif
+
+#endif /* arch_atomic64_add_return_relaxed */
+
+#ifndef arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
+#else /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_fetch_add_acquire
+static __always_inline s64
+arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_add_release
+static __always_inline s64
+arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_add_relaxed(i, v);
+}
+#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
+#endif
+
+#ifndef arch_atomic64_fetch_add
+static __always_inline s64
+arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#endif
+
+#endif /* arch_atomic64_fetch_add_relaxed */
+
+#ifndef arch_atomic64_sub_return_relaxed
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
+#else /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_sub_return_acquire
+static __always_inline s64
+arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
+#endif
+
+#ifndef arch_atomic64_sub_return_release
+static __always_inline s64
+arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_sub_return_relaxed(i, v);
+}
+#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
+#endif
+
+#ifndef arch_atomic64_sub_return
+static __always_inline s64
+arch_atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+#endif
+
+#endif /* arch_atomic64_sub_return_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
+#else /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_fetch_sub_acquire
+static __always_inline s64
+arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_sub_release
+static __always_inline s64
+arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
+#endif
+
+#ifndef arch_atomic64_fetch_sub
+static __always_inline s64
+arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#endif
+
+#endif /* arch_atomic64_fetch_sub_relaxed */
+
+#ifndef arch_atomic64_inc
+static __always_inline void
+arch_atomic64_inc(atomic64_t *v)
+{
+ arch_atomic64_add(1, v);
+}
+#define arch_atomic64_inc arch_atomic64_inc
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+#ifdef arch_atomic64_inc_return
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
+#endif /* arch_atomic64_inc_return */
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+ return arch_atomic64_add_return(1, v);
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+ return arch_atomic64_add_return_acquire(1, v);
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+ return arch_atomic64_add_return_release(1, v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return_relaxed
+static __always_inline s64
+arch_atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_add_return_relaxed(1, v);
+}
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#endif
+
+#else /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_inc_return_acquire
+static __always_inline s64
+arch_atomic64_inc_return_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#endif
+
+#ifndef arch_atomic64_inc_return_release
+static __always_inline s64
+arch_atomic64_inc_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_inc_return_relaxed(v);
+}
+#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#endif
+
+#ifndef arch_atomic64_inc_return
+static __always_inline s64
+arch_atomic64_inc_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_inc_return arch_atomic64_inc_return
+#endif
+
+#endif /* arch_atomic64_inc_return_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+#ifdef arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
+#endif /* arch_atomic64_fetch_inc */
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add(1, v);
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_acquire(1, v);
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_release(1, v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc_relaxed
+static __always_inline s64
+arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_fetch_add_relaxed(1, v);
+}
+#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_fetch_inc_acquire
+static __always_inline s64
+arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_inc_release
+static __always_inline s64
+arch_atomic64_fetch_inc_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_inc_relaxed(v);
+}
+#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#endif
+
+#ifndef arch_atomic64_fetch_inc
+static __always_inline s64
+arch_atomic64_fetch_inc(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#endif
+
+#endif /* arch_atomic64_fetch_inc_relaxed */
+
+#ifndef arch_atomic64_dec
+static __always_inline void
+arch_atomic64_dec(atomic64_t *v)
+{
+ arch_atomic64_sub(1, v);
+}
+#define arch_atomic64_dec arch_atomic64_dec
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+#ifdef arch_atomic64_dec_return
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
+#endif /* arch_atomic64_dec_return */
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+ return arch_atomic64_sub_return(1, v);
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_acquire(1, v);
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_release(1, v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return_relaxed
+static __always_inline s64
+arch_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_sub_return_relaxed(1, v);
+}
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
+#endif
+
+#else /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_dec_return_acquire
+static __always_inline s64
+arch_atomic64_dec_return_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#endif
+
+#ifndef arch_atomic64_dec_return_release
+static __always_inline s64
+arch_atomic64_dec_return_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_dec_return_relaxed(v);
+}
+#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#endif
+
+#ifndef arch_atomic64_dec_return
+static __always_inline s64
+arch_atomic64_dec_return(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_dec_return arch_atomic64_dec_return
+#endif
+
+#endif /* arch_atomic64_dec_return_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+#ifdef arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
+#endif /* arch_atomic64_fetch_dec */
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub(1, v);
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_acquire(1, v);
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_release(1, v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec_relaxed
+static __always_inline s64
+arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ return arch_atomic64_fetch_sub_relaxed(1, v);
+}
+#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_dec_acquire
+static __always_inline s64
+arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_dec_release
+static __always_inline s64
+arch_atomic64_fetch_dec_release(atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_dec_relaxed(v);
+}
+#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
+#endif
+
+#ifndef arch_atomic64_fetch_dec
+static __always_inline s64
+arch_atomic64_fetch_dec(atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#endif
+
+#endif /* arch_atomic64_fetch_dec_relaxed */
+
+#ifndef arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
+#else /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_fetch_and_acquire
+static __always_inline s64
+arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_and_release
+static __always_inline s64
+arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_and_relaxed(i, v);
+}
+#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
+#endif
+
+#ifndef arch_atomic64_fetch_and
+static __always_inline s64
+arch_atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#endif
+
+#endif /* arch_atomic64_fetch_and_relaxed */
+
+#ifndef arch_atomic64_andnot
+static __always_inline void
+arch_atomic64_andnot(s64 i, atomic64_t *v)
+{
+ arch_atomic64_and(~i, v);
+}
+#define arch_atomic64_andnot arch_atomic64_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+#ifdef arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
+#endif /* arch_atomic64_fetch_andnot */
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and(~i, v);
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_acquire(~i, v);
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_release(~i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_relaxed
+static __always_inline s64
+arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_fetch_and_relaxed(~i, v);
+}
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#endif
+
+#else /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_andnot_acquire
+static __always_inline s64
+arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_andnot_release
+static __always_inline s64
+arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#endif
+
+#ifndef arch_atomic64_fetch_andnot
+static __always_inline s64
+arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#endif
+
+#endif /* arch_atomic64_fetch_andnot_relaxed */
+
+#ifndef arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
+#else /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_or_acquire
+static __always_inline s64
+arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_or_release
+static __always_inline s64
+arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_or_relaxed(i, v);
+}
+#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
+#endif
+
+#ifndef arch_atomic64_fetch_or
+static __always_inline s64
+arch_atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#endif
+
+#endif /* arch_atomic64_fetch_or_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
+#else /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_fetch_xor_acquire
+static __always_inline s64
+arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+ s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
+#endif
+
+#ifndef arch_atomic64_fetch_xor_release
+static __always_inline s64
+arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ __atomic_release_fence();
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
+#endif
+
+#ifndef arch_atomic64_fetch_xor
+static __always_inline s64
+arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#endif
+
+#endif /* arch_atomic64_fetch_xor_relaxed */
+
+#ifndef arch_atomic64_xchg_relaxed
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg
+#define arch_atomic64_xchg_release arch_atomic64_xchg
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
+#else /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_xchg_acquire
+static __always_inline s64
+arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+ s64 ret = arch_atomic64_xchg_relaxed(v, i);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
+#endif
+
+#ifndef arch_atomic64_xchg_release
+static __always_inline s64
+arch_atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+ __atomic_release_fence();
+ return arch_atomic64_xchg_relaxed(v, i);
+}
+#define arch_atomic64_xchg_release arch_atomic64_xchg_release
+#endif
+
+#ifndef arch_atomic64_xchg
+static __always_inline s64
+arch_atomic64_xchg(atomic64_t *v, s64 i)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_xchg_relaxed(v, i);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_xchg arch_atomic64_xchg
+#endif
+
+#endif /* arch_atomic64_xchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
+#else /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_cmpxchg_acquire
+static __always_inline s64
+arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_cmpxchg_release
+static __always_inline s64
+arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ __atomic_release_fence();
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_cmpxchg
+static __always_inline s64
+arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+#endif
+
+#endif /* arch_atomic64_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+#ifdef arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
+#endif /* arch_atomic64_try_cmpxchg */
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_relaxed
+static __always_inline bool
+arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ s64 r, o = *old;
+ r = arch_atomic64_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+}
+#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
+#endif
+
+#else /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_try_cmpxchg_acquire
+static __always_inline bool
+arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+}
+#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg_release
+static __always_inline bool
+arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ __atomic_release_fence();
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
+#endif
+
+#ifndef arch_atomic64_try_cmpxchg
+static __always_inline bool
+arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
+#endif
+
+#endif /* arch_atomic64_try_cmpxchg_relaxed */
+
+#ifndef arch_atomic64_sub_and_test
+/**
+ * arch_atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_sub_return(i, v) == 0;
+}
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
+#endif
+
+#ifndef arch_atomic64_dec_and_test
+/**
+ * arch_atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __always_inline bool
+arch_atomic64_dec_and_test(atomic64_t *v)
+{
+ return arch_atomic64_dec_return(v) == 0;
+}
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
+#endif
+
+#ifndef arch_atomic64_inc_and_test
+/**
+ * arch_atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __always_inline bool
+arch_atomic64_inc_and_test(atomic64_t *v)
+{
+ return arch_atomic64_inc_return(v) == 0;
+}
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
+#endif
+
+#ifndef arch_atomic64_add_negative
+/**
+ * arch_atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __always_inline bool
+arch_atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ return arch_atomic64_add_return(i, v) < 0;
+}
+#define arch_atomic64_add_negative arch_atomic64_add_negative
+#endif
+
+#ifndef arch_atomic64_fetch_add_unless
+/**
+ * arch_atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static __always_inline s64
+arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+#endif
+
+#ifndef arch_atomic64_add_unless
+/**
+ * arch_atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static __always_inline bool
+arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ return arch_atomic64_fetch_add_unless(v, a, u) != u;
+}
+#define arch_atomic64_add_unless arch_atomic64_add_unless
+#endif
+
+#ifndef arch_atomic64_inc_not_zero
+/**
+ * arch_atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static __always_inline bool
+arch_atomic64_inc_not_zero(atomic64_t *v)
+{
+ return arch_atomic64_add_unless(v, 1, 0);
+}
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
+#endif
+
+#ifndef arch_atomic64_inc_unless_negative
+static __always_inline bool
+arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+#endif
+
+#ifndef arch_atomic64_dec_unless_positive
+static __always_inline bool
+arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+ s64 c = arch_atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+#endif
+
+#ifndef arch_atomic64_dec_if_positive
+static __always_inline s64
+arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 dec, c = arch_atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
+}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+#endif
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+// 90cd26cfd69d2250303d654955a0cc12620fb91b
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
index a7d240e465c0..2c4927bf7b8d 100644
--- a/include/linux/atomic-fallback.h
+++ b/include/linux/atomic-fallback.h
@@ -6,6 +6,8 @@
#ifndef _LINUX_ATOMIC_FALLBACK_H
#define _LINUX_ATOMIC_FALLBACK_H
+#include <linux/compiler.h>
+
#ifndef xchg_relaxed
#define xchg_relaxed xchg
#define xchg_acquire xchg
@@ -76,7 +78,7 @@
#endif /* cmpxchg64_relaxed */
#ifndef atomic_read_acquire
-static inline int
+static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
return smp_load_acquire(&(v)->counter);
@@ -85,7 +87,7 @@ atomic_read_acquire(const atomic_t *v)
#endif
#ifndef atomic_set_release
-static inline void
+static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
smp_store_release(&(v)->counter, i);
@@ -100,7 +102,7 @@ atomic_set_release(atomic_t *v, int i)
#else /* atomic_add_return_relaxed */
#ifndef atomic_add_return_acquire
-static inline int
+static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
int ret = atomic_add_return_relaxed(i, v);
@@ -111,7 +113,7 @@ atomic_add_return_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_add_return_release
-static inline int
+static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -121,7 +123,7 @@ atomic_add_return_release(int i, atomic_t *v)
#endif
#ifndef atomic_add_return
-static inline int
+static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
int ret;
@@ -142,7 +144,7 @@ atomic_add_return(int i, atomic_t *v)
#else /* atomic_fetch_add_relaxed */
#ifndef atomic_fetch_add_acquire
-static inline int
+static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_add_relaxed(i, v);
@@ -153,7 +155,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_add_release
-static inline int
+static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -163,7 +165,7 @@ atomic_fetch_add_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_add
-static inline int
+static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
int ret;
@@ -184,7 +186,7 @@ atomic_fetch_add(int i, atomic_t *v)
#else /* atomic_sub_return_relaxed */
#ifndef atomic_sub_return_acquire
-static inline int
+static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
int ret = atomic_sub_return_relaxed(i, v);
@@ -195,7 +197,7 @@ atomic_sub_return_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_sub_return_release
-static inline int
+static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -205,7 +207,7 @@ atomic_sub_return_release(int i, atomic_t *v)
#endif
#ifndef atomic_sub_return
-static inline int
+static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
int ret;
@@ -226,7 +228,7 @@ atomic_sub_return(int i, atomic_t *v)
#else /* atomic_fetch_sub_relaxed */
#ifndef atomic_fetch_sub_acquire
-static inline int
+static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_sub_relaxed(i, v);
@@ -237,7 +239,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_sub_release
-static inline int
+static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -247,7 +249,7 @@ atomic_fetch_sub_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_sub
-static inline int
+static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
int ret;
@@ -262,7 +264,7 @@ atomic_fetch_sub(int i, atomic_t *v)
#endif /* atomic_fetch_sub_relaxed */
#ifndef atomic_inc
-static inline void
+static __always_inline void
atomic_inc(atomic_t *v)
{
atomic_add(1, v);
@@ -278,7 +280,7 @@ atomic_inc(atomic_t *v)
#endif /* atomic_inc_return */
#ifndef atomic_inc_return
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
return atomic_add_return(1, v);
@@ -287,7 +289,7 @@ atomic_inc_return(atomic_t *v)
#endif
#ifndef atomic_inc_return_acquire
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
return atomic_add_return_acquire(1, v);
@@ -296,7 +298,7 @@ atomic_inc_return_acquire(atomic_t *v)
#endif
#ifndef atomic_inc_return_release
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
return atomic_add_return_release(1, v);
@@ -305,7 +307,7 @@ atomic_inc_return_release(atomic_t *v)
#endif
#ifndef atomic_inc_return_relaxed
-static inline int
+static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
return atomic_add_return_relaxed(1, v);
@@ -316,7 +318,7 @@ atomic_inc_return_relaxed(atomic_t *v)
#else /* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_acquire
-static inline int
+static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
int ret = atomic_inc_return_relaxed(v);
@@ -327,7 +329,7 @@ atomic_inc_return_acquire(atomic_t *v)
#endif
#ifndef atomic_inc_return_release
-static inline int
+static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
__atomic_release_fence();
@@ -337,7 +339,7 @@ atomic_inc_return_release(atomic_t *v)
#endif
#ifndef atomic_inc_return
-static inline int
+static __always_inline int
atomic_inc_return(atomic_t *v)
{
int ret;
@@ -359,7 +361,7 @@ atomic_inc_return(atomic_t *v)
#endif /* atomic_fetch_inc */
#ifndef atomic_fetch_inc
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
return atomic_fetch_add(1, v);
@@ -368,7 +370,7 @@ atomic_fetch_inc(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_acquire
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
return atomic_fetch_add_acquire(1, v);
@@ -377,7 +379,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_release
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
return atomic_fetch_add_release(1, v);
@@ -386,7 +388,7 @@ atomic_fetch_inc_release(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_relaxed
-static inline int
+static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
return atomic_fetch_add_relaxed(1, v);
@@ -397,7 +399,7 @@ atomic_fetch_inc_relaxed(atomic_t *v)
#else /* atomic_fetch_inc_relaxed */
#ifndef atomic_fetch_inc_acquire
-static inline int
+static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
int ret = atomic_fetch_inc_relaxed(v);
@@ -408,7 +410,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_inc_release
-static inline int
+static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
__atomic_release_fence();
@@ -418,7 +420,7 @@ atomic_fetch_inc_release(atomic_t *v)
#endif
#ifndef atomic_fetch_inc
-static inline int
+static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
int ret;
@@ -433,7 +435,7 @@ atomic_fetch_inc(atomic_t *v)
#endif /* atomic_fetch_inc_relaxed */
#ifndef atomic_dec
-static inline void
+static __always_inline void
atomic_dec(atomic_t *v)
{
atomic_sub(1, v);
@@ -449,7 +451,7 @@ atomic_dec(atomic_t *v)
#endif /* atomic_dec_return */
#ifndef atomic_dec_return
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
return atomic_sub_return(1, v);
@@ -458,7 +460,7 @@ atomic_dec_return(atomic_t *v)
#endif
#ifndef atomic_dec_return_acquire
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
return atomic_sub_return_acquire(1, v);
@@ -467,7 +469,7 @@ atomic_dec_return_acquire(atomic_t *v)
#endif
#ifndef atomic_dec_return_release
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
return atomic_sub_return_release(1, v);
@@ -476,7 +478,7 @@ atomic_dec_return_release(atomic_t *v)
#endif
#ifndef atomic_dec_return_relaxed
-static inline int
+static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
return atomic_sub_return_relaxed(1, v);
@@ -487,7 +489,7 @@ atomic_dec_return_relaxed(atomic_t *v)
#else /* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_acquire
-static inline int
+static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
int ret = atomic_dec_return_relaxed(v);
@@ -498,7 +500,7 @@ atomic_dec_return_acquire(atomic_t *v)
#endif
#ifndef atomic_dec_return_release
-static inline int
+static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
__atomic_release_fence();
@@ -508,7 +510,7 @@ atomic_dec_return_release(atomic_t *v)
#endif
#ifndef atomic_dec_return
-static inline int
+static __always_inline int
atomic_dec_return(atomic_t *v)
{
int ret;
@@ -530,7 +532,7 @@ atomic_dec_return(atomic_t *v)
#endif /* atomic_fetch_dec */
#ifndef atomic_fetch_dec
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
return atomic_fetch_sub(1, v);
@@ -539,7 +541,7 @@ atomic_fetch_dec(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_acquire
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
return atomic_fetch_sub_acquire(1, v);
@@ -548,7 +550,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_release
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
return atomic_fetch_sub_release(1, v);
@@ -557,7 +559,7 @@ atomic_fetch_dec_release(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_relaxed
-static inline int
+static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
return atomic_fetch_sub_relaxed(1, v);
@@ -568,7 +570,7 @@ atomic_fetch_dec_relaxed(atomic_t *v)
#else /* atomic_fetch_dec_relaxed */
#ifndef atomic_fetch_dec_acquire
-static inline int
+static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
int ret = atomic_fetch_dec_relaxed(v);
@@ -579,7 +581,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
#endif
#ifndef atomic_fetch_dec_release
-static inline int
+static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
__atomic_release_fence();
@@ -589,7 +591,7 @@ atomic_fetch_dec_release(atomic_t *v)
#endif
#ifndef atomic_fetch_dec
-static inline int
+static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
int ret;
@@ -610,7 +612,7 @@ atomic_fetch_dec(atomic_t *v)
#else /* atomic_fetch_and_relaxed */
#ifndef atomic_fetch_and_acquire
-static inline int
+static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_and_relaxed(i, v);
@@ -621,7 +623,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_and_release
-static inline int
+static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -631,7 +633,7 @@ atomic_fetch_and_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_and
-static inline int
+static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
int ret;
@@ -646,7 +648,7 @@ atomic_fetch_and(int i, atomic_t *v)
#endif /* atomic_fetch_and_relaxed */
#ifndef atomic_andnot
-static inline void
+static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
atomic_and(~i, v);
@@ -662,7 +664,7 @@ atomic_andnot(int i, atomic_t *v)
#endif /* atomic_fetch_andnot */
#ifndef atomic_fetch_andnot
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
return atomic_fetch_and(~i, v);
@@ -671,7 +673,7 @@ atomic_fetch_andnot(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_acquire
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
return atomic_fetch_and_acquire(~i, v);
@@ -680,7 +682,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_release
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
return atomic_fetch_and_release(~i, v);
@@ -689,7 +691,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_relaxed
-static inline int
+static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
return atomic_fetch_and_relaxed(~i, v);
@@ -700,7 +702,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v)
#else /* atomic_fetch_andnot_relaxed */
#ifndef atomic_fetch_andnot_acquire
-static inline int
+static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_andnot_relaxed(i, v);
@@ -711,7 +713,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot_release
-static inline int
+static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -721,7 +723,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_andnot
-static inline int
+static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
int ret;
@@ -742,7 +744,7 @@ atomic_fetch_andnot(int i, atomic_t *v)
#else /* atomic_fetch_or_relaxed */
#ifndef atomic_fetch_or_acquire
-static inline int
+static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_or_relaxed(i, v);
@@ -753,7 +755,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_or_release
-static inline int
+static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -763,7 +765,7 @@ atomic_fetch_or_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_or
-static inline int
+static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
int ret;
@@ -784,7 +786,7 @@ atomic_fetch_or(int i, atomic_t *v)
#else /* atomic_fetch_xor_relaxed */
#ifndef atomic_fetch_xor_acquire
-static inline int
+static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
int ret = atomic_fetch_xor_relaxed(i, v);
@@ -795,7 +797,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_xor_release
-static inline int
+static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
__atomic_release_fence();
@@ -805,7 +807,7 @@ atomic_fetch_xor_release(int i, atomic_t *v)
#endif
#ifndef atomic_fetch_xor
-static inline int
+static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
int ret;
@@ -826,7 +828,7 @@ atomic_fetch_xor(int i, atomic_t *v)
#else /* atomic_xchg_relaxed */
#ifndef atomic_xchg_acquire
-static inline int
+static __always_inline int
atomic_xchg_acquire(atomic_t *v, int i)
{
int ret = atomic_xchg_relaxed(v, i);
@@ -837,7 +839,7 @@ atomic_xchg_acquire(atomic_t *v, int i)
#endif
#ifndef atomic_xchg_release
-static inline int
+static __always_inline int
atomic_xchg_release(atomic_t *v, int i)
{
__atomic_release_fence();
@@ -847,7 +849,7 @@ atomic_xchg_release(atomic_t *v, int i)
#endif
#ifndef atomic_xchg
-static inline int
+static __always_inline int
atomic_xchg(atomic_t *v, int i)
{
int ret;
@@ -868,7 +870,7 @@ atomic_xchg(atomic_t *v, int i)
#else /* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_acquire
-static inline int
+static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
int ret = atomic_cmpxchg_relaxed(v, old, new);
@@ -879,7 +881,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
#endif
#ifndef atomic_cmpxchg_release
-static inline int
+static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
__atomic_release_fence();
@@ -889,7 +891,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new)
#endif
#ifndef atomic_cmpxchg
-static inline int
+static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
@@ -911,7 +913,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
#endif /* atomic_try_cmpxchg */
#ifndef atomic_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -924,7 +926,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -937,7 +939,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -950,7 +952,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_relaxed
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -965,7 +967,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
#else /* atomic_try_cmpxchg_relaxed */
#ifndef atomic_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
bool ret = atomic_try_cmpxchg_relaxed(v, old, new);
@@ -976,7 +978,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
__atomic_release_fence();
@@ -986,7 +988,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
#endif
#ifndef atomic_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
bool ret;
@@ -1010,7 +1012,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
* true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
return atomic_sub_return(i, v) == 0;
@@ -1027,7 +1029,7 @@ atomic_sub_and_test(int i, atomic_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline bool
+static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
return atomic_dec_return(v) == 0;
@@ -1044,7 +1046,7 @@ atomic_dec_and_test(atomic_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
return atomic_inc_return(v) == 0;
@@ -1062,7 +1064,7 @@ atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline bool
+static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
return atomic_add_return(i, v) < 0;
@@ -1080,7 +1082,7 @@ atomic_add_negative(int i, atomic_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
-static inline int
+static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c = atomic_read(v);
@@ -1105,7 +1107,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
-static inline bool
+static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
return atomic_fetch_add_unless(v, a, u) != u;
@@ -1121,7 +1123,7 @@ atomic_add_unless(atomic_t *v, int a, int u)
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
-static inline bool
+static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
return atomic_add_unless(v, 1, 0);
@@ -1130,7 +1132,7 @@ atomic_inc_not_zero(atomic_t *v)
#endif
#ifndef atomic_inc_unless_negative
-static inline bool
+static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
int c = atomic_read(v);
@@ -1146,7 +1148,7 @@ atomic_inc_unless_negative(atomic_t *v)
#endif
#ifndef atomic_dec_unless_positive
-static inline bool
+static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
int c = atomic_read(v);
@@ -1162,7 +1164,7 @@ atomic_dec_unless_positive(atomic_t *v)
#endif
#ifndef atomic_dec_if_positive
-static inline int
+static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
int dec, c = atomic_read(v);
@@ -1178,15 +1180,12 @@ atomic_dec_if_positive(atomic_t *v)
#define atomic_dec_if_positive atomic_dec_if_positive
#endif
-#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
#ifndef atomic64_read_acquire
-static inline s64
+static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
return smp_load_acquire(&(v)->counter);
@@ -1195,7 +1194,7 @@ atomic64_read_acquire(const atomic64_t *v)
#endif
#ifndef atomic64_set_release
-static inline void
+static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
smp_store_release(&(v)->counter, i);
@@ -1210,7 +1209,7 @@ atomic64_set_release(atomic64_t *v, s64 i)
#else /* atomic64_add_return_relaxed */
#ifndef atomic64_add_return_acquire
-static inline s64
+static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_add_return_relaxed(i, v);
@@ -1221,7 +1220,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_add_return_release
-static inline s64
+static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1231,7 +1230,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_add_return
-static inline s64
+static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1252,7 +1251,7 @@ atomic64_add_return(s64 i, atomic64_t *v)
#else /* atomic64_fetch_add_relaxed */
#ifndef atomic64_fetch_add_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_add_relaxed(i, v);
@@ -1263,7 +1262,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_add_release
-static inline s64
+static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1273,7 +1272,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_add
-static inline s64
+static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1294,7 +1293,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v)
#else /* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_acquire
-static inline s64
+static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_sub_return_relaxed(i, v);
@@ -1305,7 +1304,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_sub_return_release
-static inline s64
+static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1315,7 +1314,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_sub_return
-static inline s64
+static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1336,7 +1335,7 @@ atomic64_sub_return(s64 i, atomic64_t *v)
#else /* atomic64_fetch_sub_relaxed */
#ifndef atomic64_fetch_sub_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_sub_relaxed(i, v);
@@ -1347,7 +1346,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_sub_release
-static inline s64
+static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1357,7 +1356,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_sub
-static inline s64
+static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1372,7 +1371,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_sub_relaxed */
#ifndef atomic64_inc
-static inline void
+static __always_inline void
atomic64_inc(atomic64_t *v)
{
atomic64_add(1, v);
@@ -1388,7 +1387,7 @@ atomic64_inc(atomic64_t *v)
#endif /* atomic64_inc_return */
#ifndef atomic64_inc_return
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
return atomic64_add_return(1, v);
@@ -1397,7 +1396,7 @@ atomic64_inc_return(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_acquire
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
return atomic64_add_return_acquire(1, v);
@@ -1406,7 +1405,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_release
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
return atomic64_add_return_release(1, v);
@@ -1415,7 +1414,7 @@ atomic64_inc_return_release(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_relaxed
-static inline s64
+static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
return atomic64_add_return_relaxed(1, v);
@@ -1426,7 +1425,7 @@ atomic64_inc_return_relaxed(atomic64_t *v)
#else /* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_acquire
-static inline s64
+static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
s64 ret = atomic64_inc_return_relaxed(v);
@@ -1437,7 +1436,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_inc_return_release
-static inline s64
+static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1447,7 +1446,7 @@ atomic64_inc_return_release(atomic64_t *v)
#endif
#ifndef atomic64_inc_return
-static inline s64
+static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
s64 ret;
@@ -1469,7 +1468,7 @@ atomic64_inc_return(atomic64_t *v)
#endif /* atomic64_fetch_inc */
#ifndef atomic64_fetch_inc
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
return atomic64_fetch_add(1, v);
@@ -1478,7 +1477,7 @@ atomic64_fetch_inc(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
return atomic64_fetch_add_acquire(1, v);
@@ -1487,7 +1486,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_release
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
return atomic64_fetch_add_release(1, v);
@@ -1496,7 +1495,7 @@ atomic64_fetch_inc_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
return atomic64_fetch_add_relaxed(1, v);
@@ -1507,7 +1506,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v)
#else /* atomic64_fetch_inc_relaxed */
#ifndef atomic64_fetch_inc_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
s64 ret = atomic64_fetch_inc_relaxed(v);
@@ -1518,7 +1517,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc_release
-static inline s64
+static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1528,7 +1527,7 @@ atomic64_fetch_inc_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_inc
-static inline s64
+static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
s64 ret;
@@ -1543,7 +1542,7 @@ atomic64_fetch_inc(atomic64_t *v)
#endif /* atomic64_fetch_inc_relaxed */
#ifndef atomic64_dec
-static inline void
+static __always_inline void
atomic64_dec(atomic64_t *v)
{
atomic64_sub(1, v);
@@ -1559,7 +1558,7 @@ atomic64_dec(atomic64_t *v)
#endif /* atomic64_dec_return */
#ifndef atomic64_dec_return
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
return atomic64_sub_return(1, v);
@@ -1568,7 +1567,7 @@ atomic64_dec_return(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_acquire
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
return atomic64_sub_return_acquire(1, v);
@@ -1577,7 +1576,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_release
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
return atomic64_sub_return_release(1, v);
@@ -1586,7 +1585,7 @@ atomic64_dec_return_release(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_relaxed
-static inline s64
+static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
return atomic64_sub_return_relaxed(1, v);
@@ -1597,7 +1596,7 @@ atomic64_dec_return_relaxed(atomic64_t *v)
#else /* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_acquire
-static inline s64
+static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
s64 ret = atomic64_dec_return_relaxed(v);
@@ -1608,7 +1607,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
#endif
#ifndef atomic64_dec_return_release
-static inline s64
+static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1618,7 +1617,7 @@ atomic64_dec_return_release(atomic64_t *v)
#endif
#ifndef atomic64_dec_return
-static inline s64
+static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
s64 ret;
@@ -1640,7 +1639,7 @@ atomic64_dec_return(atomic64_t *v)
#endif /* atomic64_fetch_dec */
#ifndef atomic64_fetch_dec
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
return atomic64_fetch_sub(1, v);
@@ -1649,7 +1648,7 @@ atomic64_fetch_dec(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
return atomic64_fetch_sub_acquire(1, v);
@@ -1658,7 +1657,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_release
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
return atomic64_fetch_sub_release(1, v);
@@ -1667,7 +1666,7 @@ atomic64_fetch_dec_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
return atomic64_fetch_sub_relaxed(1, v);
@@ -1678,7 +1677,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v)
#else /* atomic64_fetch_dec_relaxed */
#ifndef atomic64_fetch_dec_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
s64 ret = atomic64_fetch_dec_relaxed(v);
@@ -1689,7 +1688,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec_release
-static inline s64
+static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
__atomic_release_fence();
@@ -1699,7 +1698,7 @@ atomic64_fetch_dec_release(atomic64_t *v)
#endif
#ifndef atomic64_fetch_dec
-static inline s64
+static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
s64 ret;
@@ -1720,7 +1719,7 @@ atomic64_fetch_dec(atomic64_t *v)
#else /* atomic64_fetch_and_relaxed */
#ifndef atomic64_fetch_and_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_and_relaxed(i, v);
@@ -1731,7 +1730,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_and_release
-static inline s64
+static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1741,7 +1740,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_and
-static inline s64
+static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1756,7 +1755,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_and_relaxed */
#ifndef atomic64_andnot
-static inline void
+static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
atomic64_and(~i, v);
@@ -1772,7 +1771,7 @@ atomic64_andnot(s64 i, atomic64_t *v)
#endif /* atomic64_fetch_andnot */
#ifndef atomic64_fetch_andnot
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
return atomic64_fetch_and(~i, v);
@@ -1781,7 +1780,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_acquire(~i, v);
@@ -1790,7 +1789,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_release
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_release(~i, v);
@@ -1799,7 +1798,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_relaxed
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
return atomic64_fetch_and_relaxed(~i, v);
@@ -1810,7 +1809,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
#else /* atomic64_fetch_andnot_relaxed */
#ifndef atomic64_fetch_andnot_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_andnot_relaxed(i, v);
@@ -1821,7 +1820,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot_release
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1831,7 +1830,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_andnot
-static inline s64
+static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1852,7 +1851,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
#else /* atomic64_fetch_or_relaxed */
#ifndef atomic64_fetch_or_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_or_relaxed(i, v);
@@ -1863,7 +1862,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_or_release
-static inline s64
+static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1873,7 +1872,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_or
-static inline s64
+static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1894,7 +1893,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v)
#else /* atomic64_fetch_xor_relaxed */
#ifndef atomic64_fetch_xor_acquire
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
s64 ret = atomic64_fetch_xor_relaxed(i, v);
@@ -1905,7 +1904,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_xor_release
-static inline s64
+static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
__atomic_release_fence();
@@ -1915,7 +1914,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v)
#endif
#ifndef atomic64_fetch_xor
-static inline s64
+static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
s64 ret;
@@ -1936,7 +1935,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v)
#else /* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_acquire
-static inline s64
+static __always_inline s64
atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
s64 ret = atomic64_xchg_relaxed(v, i);
@@ -1947,7 +1946,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i)
#endif
#ifndef atomic64_xchg_release
-static inline s64
+static __always_inline s64
atomic64_xchg_release(atomic64_t *v, s64 i)
{
__atomic_release_fence();
@@ -1957,7 +1956,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i)
#endif
#ifndef atomic64_xchg
-static inline s64
+static __always_inline s64
atomic64_xchg(atomic64_t *v, s64 i)
{
s64 ret;
@@ -1978,7 +1977,7 @@ atomic64_xchg(atomic64_t *v, s64 i)
#else /* atomic64_cmpxchg_relaxed */
#ifndef atomic64_cmpxchg_acquire
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
s64 ret = atomic64_cmpxchg_relaxed(v, old, new);
@@ -1989,7 +1988,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
#endif
#ifndef atomic64_cmpxchg_release
-static inline s64
+static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
__atomic_release_fence();
@@ -1999,7 +1998,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
#endif
#ifndef atomic64_cmpxchg
-static inline s64
+static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
s64 ret;
@@ -2021,7 +2020,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
#endif /* atomic64_try_cmpxchg */
#ifndef atomic64_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2034,7 +2033,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2047,7 +2046,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2060,7 +2059,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_relaxed
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
s64 r, o = *old;
@@ -2075,7 +2074,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
#else /* atomic64_try_cmpxchg_relaxed */
#ifndef atomic64_try_cmpxchg_acquire
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
bool ret = atomic64_try_cmpxchg_relaxed(v, old, new);
@@ -2086,7 +2085,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg_release
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
__atomic_release_fence();
@@ -2096,7 +2095,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
#endif
#ifndef atomic64_try_cmpxchg
-static inline bool
+static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
bool ret;
@@ -2120,7 +2119,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
* true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
return atomic64_sub_return(i, v) == 0;
@@ -2137,7 +2136,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline bool
+static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
return atomic64_dec_return(v) == 0;
@@ -2154,7 +2153,7 @@ atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline bool
+static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
return atomic64_inc_return(v) == 0;
@@ -2172,7 +2171,7 @@ atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline bool
+static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
return atomic64_add_return(i, v) < 0;
@@ -2190,7 +2189,7 @@ atomic64_add_negative(s64 i, atomic64_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
-static inline s64
+static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c = atomic64_read(v);
@@ -2215,7 +2214,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
-static inline bool
+static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
return atomic64_fetch_add_unless(v, a, u) != u;
@@ -2231,7 +2230,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
-static inline bool
+static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
return atomic64_add_unless(v, 1, 0);
@@ -2240,7 +2239,7 @@ atomic64_inc_not_zero(atomic64_t *v)
#endif
#ifndef atomic64_inc_unless_negative
-static inline bool
+static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
s64 c = atomic64_read(v);
@@ -2256,7 +2255,7 @@ atomic64_inc_unless_negative(atomic64_t *v)
#endif
#ifndef atomic64_dec_unless_positive
-static inline bool
+static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
s64 c = atomic64_read(v);
@@ -2272,7 +2271,7 @@ atomic64_dec_unless_positive(atomic64_t *v)
#endif
#ifndef atomic64_dec_if_positive
-static inline s64
+static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
s64 dec, c = atomic64_read(v);
@@ -2288,8 +2287,5 @@ atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif
-#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 25de4a2804d70f57e994fe3b419148658bb5378a
+// 1fac0941c79bf0ae100723cc2ac9b94061f0b67a
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 4c0d009a46f0..571a11008ab5 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -25,6 +25,12 @@
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
@@ -71,7 +77,12 @@
__ret; \
})
+#ifdef ARCH_ATOMIC
+#include <linux/atomic-arch-fallback.h>
+#include <asm-generic/atomic-instrumented.h>
+#else
#include <linux/atomic-fallback.h>
+#endif
#include <asm-generic/atomic-long.h>
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index c7d6b2e8c3b5..56e4580d4f55 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -190,6 +190,7 @@ extern void backlight_force_update(struct backlight_device *bd,
extern int backlight_register_notifier(struct notifier_block *nb);
extern int backlight_unregister_notifier(struct notifier_block *nb);
extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
+struct backlight_device *backlight_device_get_by_name(const char *name);
extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness);
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
diff --git a/include/linux/bch.h b/include/linux/bch.h
index aa765af85c38..85fdce83d4e2 100644
--- a/include/linux/bch.h
+++ b/include/linux/bch.h
@@ -33,6 +33,7 @@
* @cache: log-based polynomial representation buffer
* @elp: error locator polynomial
* @poly_2t: temporary polynomials of degree 2t
+ * @swap_bits: swap bits within data and syndrome bytes
*/
struct bch_control {
unsigned int m;
@@ -51,16 +52,18 @@ struct bch_control {
int *cache;
struct gf_poly *elp;
struct gf_poly *poly_2t[4];
+ bool swap_bits;
};
-struct bch_control *init_bch(int m, int t, unsigned int prim_poly);
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+ bool swap_bits);
-void free_bch(struct bch_control *bch);
+void bch_free(struct bch_control *bch);
-void encode_bch(struct bch_control *bch, const uint8_t *data,
+void bch_encode(struct bch_control *bch, const uint8_t *data,
unsigned int len, uint8_t *ecc);
-int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
const uint8_t *recv_ecc, const uint8_t *calc_ecc,
const unsigned int *syn, unsigned int *errloc);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index a345d9fed3d8..4a20b7517dd0 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -26,35 +26,27 @@ struct linux_binprm {
unsigned long p; /* current top of mem */
unsigned long argmin; /* rlimit marker for copy_strings() */
unsigned int
+ /* Should an execfd be passed to userspace? */
+ have_execfd:1,
+
+ /* Use the creds of a script (see binfmt_misc) */
+ execfd_creds:1,
/*
- * True after the bprm_set_creds hook has been called once
- * (multiple calls can be made via prepare_binprm() for
- * binfmt_script/misc).
- */
- called_set_creds:1,
- /*
- * True if most recent call to the commoncaps bprm_set_creds
- * hook (due to multiple prepare_binprm() calls from the
- * binfmt_script/misc handlers) resulted in elevated
- * privileges.
- */
- cap_elevated:1,
- /*
- * Set by bprm_set_creds hook to indicate a privilege-gaining
- * exec has happened. Used to sanitize execution environment
- * and to set AT_SECURE auxv for glibc.
+ * Set by bprm_creds_for_exec hook to indicate a
+ * privilege-gaining exec has happened. Used to set
+ * AT_SECURE auxv for glibc.
*/
secureexec:1,
/*
- * Set by flush_old_exec, when exec_mmap has been called.
- * This is past the point of no return, when the
- * exec_update_mutex has been taken.
+ * Set when errors can no longer be returned to the
+ * original userspace.
*/
- called_exec_mmap:1;
+ point_of_no_return:1;
#ifdef __alpha__
unsigned int taso:1;
#endif
- unsigned int recursion_depth; /* only for search_binary_handler() */
+ struct file * executable; /* Executable to pass to the interpreter */
+ struct file * interpreter;
struct file * file;
struct cred *cred; /* new credentials */
int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
@@ -65,7 +57,7 @@ struct linux_binprm {
of the time same as filename, but could be
different for binfmt_{misc,script} */
unsigned interp_flags;
- unsigned interp_data;
+ int execfd; /* File descriptor of the executable */
unsigned long loader, exec;
struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */
@@ -76,10 +68,6 @@ struct linux_binprm {
#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
-/* fd of the binary should be passed to the interpreter */
-#define BINPRM_FLAGS_EXECFD_BIT 1
-#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
-
/* filename of the binary will be inaccessible after exec */
#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
@@ -123,10 +111,8 @@ static inline void insert_binfmt(struct linux_binfmt *fmt)
extern void unregister_binfmt(struct linux_binfmt *);
-extern int prepare_binprm(struct linux_binprm *);
extern int __must_check remove_arg_zero(struct linux_binprm *);
-extern int search_binary_handler(struct linux_binprm *);
-extern int flush_old_exec(struct linux_binprm * bprm);
+extern int begin_new_exec(struct linux_binprm * bprm);
extern void setup_new_exec(struct linux_binprm * bprm);
extern void finalize_exec(struct linux_binprm *bprm);
extern void would_dump(struct linux_binprm *, struct file *);
@@ -144,9 +130,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
extern int transfer_args_to_stack(struct linux_binprm *bprm,
unsigned long *sp_location);
extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
-extern int copy_strings_kernel(int argc, const char *const *argv,
- struct linux_binprm *bprm);
-extern void install_exec_creds(struct linux_binprm *bprm);
+int copy_string_kernel(const char *arg, struct linux_binprm *bprm);
extern void set_binfmt(struct linux_binfmt *new);
extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 9acf654f0b19..99f2ac30b1d9 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -72,7 +72,7 @@ static inline int get_bitmask_order(unsigned int count)
static __always_inline unsigned long hweight_long(unsigned long w)
{
- return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+ return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
}
/**
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 750621e41d1c..1aa8009f6d06 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -15,8 +15,14 @@
/*
* __read_mostly is used to keep rarely changing variables out of frequently
- * updated cachelines. If an architecture doesn't support it, ignore the
- * hint.
+ * updated cachelines. Its use should be reserved for data that is used
+ * frequently in hot paths. Performance traces can help decide when to use
+ * this. You want __read_mostly data to be tightly packed, so that in the
+ * best case multiple frequently read variables for a hot path will be next
+ * to each other in order to reduce the number of cachelines needed to
+ * execute a critical path. We should be mindful and selective of its use.
+ * ie: if you're going to use it please supply a *good* justification in your
+ * commit log
*/
#ifndef __read_mostly
#define __read_mostly
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 525b7c3f1c81..2247e71beb83 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -53,6 +53,8 @@ struct ceph_options {
unsigned long osd_keepalive_timeout; /* jiffies */
unsigned long osd_request_timeout; /* jiffies */
+ u32 osd_req_flags; /* CEPH_OSD_FLAG_*, applied to each OSD request */
+
/*
* any type that can't be simply compared or doesn't need
* to be compared should go beyond this point,
@@ -64,6 +66,7 @@ struct ceph_options {
int num_mon;
char *name;
struct ceph_crypto_key *key;
+ struct rb_root crush_locs;
};
/*
@@ -188,7 +191,7 @@ static inline int calc_pages_for(u64 off, u64 len)
#define RB_CMP3WAY(a, b) ((a) < (b) ? -1 : (a) > (b))
#define DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \
-static void insert_##name(struct rb_root *root, type *t) \
+static bool __insert_##name(struct rb_root *root, type *t) \
{ \
struct rb_node **n = &root->rb_node; \
struct rb_node *parent = NULL; \
@@ -206,11 +209,17 @@ static void insert_##name(struct rb_root *root, type *t) \
else if (cmp > 0) \
n = &(*n)->rb_right; \
else \
- BUG(); \
+ return false; \
} \
\
rb_link_node(&t->nodefld, parent, n); \
rb_insert_color(&t->nodefld, root); \
+ return true; \
+} \
+static void __maybe_unused insert_##name(struct rb_root *root, type *t) \
+{ \
+ if (!__insert_##name(root, t)) \
+ BUG(); \
} \
static void erase_##name(struct rb_root *root, type *t) \
{ \
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index dbb8a6959a73..ce4ffeb384d7 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -19,7 +19,7 @@ struct ceph_monmap {
struct ceph_fsid fsid;
u32 epoch;
u32 num_mon;
- struct ceph_entity_inst mon_inst[0];
+ struct ceph_entity_inst mon_inst[];
};
struct ceph_mon_client;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 9d9f745b98a1..c60b59e9291b 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -8,6 +8,7 @@
#include <linux/mempool.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
+#include <linux/ktime.h>
#include <linux/ceph/types.h>
#include <linux/ceph/osdmap.h>
@@ -135,6 +136,7 @@ struct ceph_osd_req_op {
struct {
u64 expected_object_size;
u64 expected_write_size;
+ u32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
} alloc_hint;
struct {
u64 snapid;
@@ -164,6 +166,7 @@ struct ceph_osd_request_target {
bool recovery_deletes;
unsigned int flags; /* CEPH_OSD_FLAG_* */
+ bool used_replica;
bool paused;
u32 epoch;
@@ -213,6 +216,8 @@ struct ceph_osd_request {
/* internal */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
+ ktime_t r_start_latency; /* ktime_t */
+ ktime_t r_end_latency; /* ktime_t */
int r_attempts;
u32 r_map_dne_bound;
@@ -468,7 +473,8 @@ extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int
extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
unsigned int which,
u64 expected_object_size,
- u64 expected_write_size);
+ u64 expected_write_size,
+ u32 flags);
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 5e601975745f..3f4498fef6ad 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -302,9 +302,26 @@ bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
const struct ceph_pg *raw_pgid);
+struct crush_loc {
+ char *cl_type_name;
+ char *cl_name;
+};
+
+struct crush_loc_node {
+ struct rb_node cl_node;
+ struct crush_loc cl_loc; /* pointers into cl_data */
+ char cl_data[];
+};
+
+int ceph_parse_crush_location(char *crush_location, struct rb_root *locs);
+int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2);
+void ceph_clear_crush_locs(struct rb_root *locs);
+
+int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id,
+ struct rb_root *locs);
+
extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
u64 id);
-
extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 88ed3c5c04c5..3a518fd0eaad 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -465,6 +465,19 @@ enum {
const char *ceph_osd_watch_op_name(int o);
enum {
+ CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_WRITE = 1,
+ CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE = 2,
+ CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ = 4,
+ CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ = 8,
+ CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY = 16,
+ CEPH_OSD_ALLOC_HINT_FLAG_IMMUTABLE = 32,
+ CEPH_OSD_ALLOC_HINT_FLAG_SHORTLIVED = 64,
+ CEPH_OSD_ALLOC_HINT_FLAG_LONGLIVED = 128,
+ CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE = 256,
+ CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE = 512,
+};
+
+enum {
CEPH_OSD_BACKOFF_OP_BLOCK = 1,
CEPH_OSD_BACKOFF_OP_ACK_BLOCK = 2,
CEPH_OSD_BACKOFF_OP_UNBLOCK = 3,
@@ -517,6 +530,7 @@ struct ceph_osd_op {
struct {
__le64 expected_object_size;
__le64 expected_write_size;
+ __le32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
} __attribute__ ((packed)) alloc_hint;
struct {
__le64 snapid;
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 2b1b35240074..3f01d43f0598 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -131,6 +131,9 @@ extern void tegra210_set_sata_pll_seq_sw(bool state);
extern void tegra210_put_utmipll_in_iddq(void);
extern void tegra210_put_utmipll_out_iddq(void);
extern int tegra210_clk_handle_mbist_war(unsigned int id);
+extern void tegra210_clk_emc_dll_enable(bool flag);
+extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value);
+extern void tegra210_clk_emc_update_setting(u32 emc_src_value);
struct clk;
@@ -143,4 +146,28 @@ void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
void *cb_arg);
int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+struct tegra210_clk_emc_config {
+ unsigned long rate;
+ bool same_freq;
+ u32 value;
+
+ unsigned long parent_rate;
+ u8 parent;
+};
+
+struct tegra210_clk_emc_provider {
+ struct module *owner;
+ struct device *dev;
+
+ struct tegra210_clk_emc_config *configs;
+ unsigned int num_configs;
+
+ int (*set_rate)(struct device *dev,
+ const struct tegra210_clk_emc_config *config);
+};
+
+int tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider);
+void tegra210_clk_emc_detach(struct clk *clk);
+
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 790c0c6b8552..ee37256ec8bd 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -16,7 +16,7 @@
#define KASAN_ABI_VERSION 5
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
-/* emulate gcc's __SANITIZE_ADDRESS__ flag */
+/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
@@ -24,6 +24,15 @@
#define __no_sanitize_address
#endif
+#if __has_feature(thread_sanitizer)
+/* emulate gcc's __SANITIZE_THREAD__ flag */
+#define __SANITIZE_THREAD__
+#define __no_sanitize_thread \
+ __attribute__((no_sanitize("thread")))
+#else
+#define __no_sanitize_thread
+#endif
+
/*
* Not all versions of clang implement the the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index d7ee4c6bad48..7dd4e0349ef3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -10,7 +10,8 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-#if GCC_VERSION < 40600
+/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
+#if GCC_VERSION < 40800
# error Sorry, your compiler is too old - please upgrade it.
#endif
@@ -126,9 +127,7 @@
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
-#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
-#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
#if GCC_VERSION >= 70000
@@ -145,6 +144,12 @@
#define __no_sanitize_address
#endif
+#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
+#define __no_sanitize_thread __attribute__((no_sanitize_thread))
+#else
+#define __no_sanitize_thread
+#endif
+
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 6325d64e3c3b..30827f82ad62 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -230,60 +230,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
#endif
-#include <uapi/linux/types.h>
-
-#define __READ_ONCE_SIZE \
-({ \
- switch (size) { \
- case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
- case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
- case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
- case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
- default: \
- barrier(); \
- __builtin_memcpy((void *)res, (const void *)p, size); \
- barrier(); \
- } \
-})
-
-static __always_inline
-void __read_once_size(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
-#ifdef CONFIG_KASAN
-/*
- * We can't declare function 'inline' because __no_sanitize_address confilcts
- * with inlining. Attempt to inline it may cause a build failure.
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
- */
-# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
-#else
-# define __no_kasan_or_inline __always_inline
-#endif
-
-static __no_kasan_or_inline
-void __read_once_size_nocheck(const volatile void *p, void *res, int size)
-{
- __READ_ONCE_SIZE;
-}
-
-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
-{
- switch (size) {
- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
- default:
- barrier();
- __builtin_memcpy((void *)p, (const void *)res, size);
- barrier();
- }
-}
-
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
@@ -293,11 +239,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* statements.
*
* These two macros will also work on aggregate data types like structs or
- * unions. If the size of the accessed data type exceeds the word size of
- * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
- * fall back to memcpy(). There's at least two memcpy()s: one for the
- * __builtin_memcpy() and then one for the macro doing the copy of variable
- * - '__u' allocated on the stack.
+ * unions.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
@@ -308,24 +250,79 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
*/
#include <asm/barrier.h>
#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+
+/**
+ * data_race - mark an expression as containing intentional data races
+ *
+ * This data_race() macro is useful for situations in which data races
+ * should be forgiven. One example is diagnostic code that accesses
+ * shared variables but is not a part of the core synchronization design.
+ *
+ * This macro *does not* affect normal code generation, but is a hint
+ * to tooling that data races here are to be ignored.
+ */
+#define data_race(expr) \
+({ \
+ __unqual_scalar_typeof(({ expr; })) __v = ({ \
+ __kcsan_disable_current(); \
+ expr; \
+ }); \
+ __kcsan_enable_current(); \
+ __v; \
+})
+
+/*
+ * Use __READ_ONCE() instead of READ_ONCE() if you do not require any
+ * atomicity or dependency ordering guarantees. Note that this may result
+ * in tears!
+ */
+#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
-#define __READ_ONCE(x, check) \
+#define __READ_ONCE_SCALAR(x) \
({ \
- union { typeof(x) __val; char __c[1]; } __u; \
- if (check) \
- __read_once_size(&(x), __u.__c, sizeof(x)); \
- else \
- __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
- smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
- __u.__val; \
+ __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
+ smp_read_barrier_depends(); \
+ (typeof(x))__x; \
})
-#define READ_ONCE(x) __READ_ONCE(x, 1)
+
+#define READ_ONCE(x) \
+({ \
+ compiletime_assert_rwonce_type(x); \
+ __READ_ONCE_SCALAR(x); \
+})
+
+#define __WRITE_ONCE(x, val) \
+do { \
+ *(volatile typeof(x) *)&(x) = (val); \
+} while (0)
+
+#define WRITE_ONCE(x, val) \
+do { \
+ compiletime_assert_rwonce_type(x); \
+ __WRITE_ONCE(x, val); \
+} while (0)
+
+static __no_sanitize_or_inline
+unsigned long __read_once_word_nocheck(const void *addr)
+{
+ return __READ_ONCE(*(unsigned long *)addr);
+}
/*
- * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
- * to hide memory access from KASAN.
+ * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
+ * word from memory atomically but without telling KASAN/KCSAN. This is
+ * usually used by unwinding code when walking the stack of a running process.
*/
-#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
+#define READ_ONCE_NOCHECK(x) \
+({ \
+ unsigned long __x; \
+ compiletime_assert(sizeof(x) == sizeof(__x), \
+ "Unsupported access size for READ_ONCE_NOCHECK()."); \
+ __x = __read_once_word_nocheck(&(x)); \
+ smp_read_barrier_depends(); \
+ (typeof(x))__x; \
+})
static __no_kasan_or_inline
unsigned long read_word_at_a_time(const void *addr)
@@ -334,14 +331,6 @@ unsigned long read_word_at_a_time(const void *addr)
return *(unsigned long *)addr;
}
-#define WRITE_ONCE(x, val) \
-({ \
- union { typeof(x) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(x)) (val) }; \
- __write_once_size(&(x), __u.__c, sizeof(x)); \
- __u.__val; \
-})
-
#endif /* __KERNEL__ */
/*
@@ -406,6 +395,16 @@ static inline void *offset_to_ptr(const int *off)
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
+/*
+ * Yes, this permits 64-bit accesses on 32-bit architectures. These will
+ * actually be atomic in some cases (namely Armv7 + LPAE), but for others we
+ * rely on the access being split into 2x32-bit accesses for a 32-bit quantity
+ * (e.g. a virtual address) and a strong prevailing wind.
+ */
+#define compiletime_assert_rwonce_type(t) \
+ compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
+ "Unsupported access size for {READ,WRITE}_ONCE().")
+
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 6fcf73200b67..21aed0981edf 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -171,6 +171,38 @@ struct ftrace_likely_data {
*/
#define noinline_for_stack noinline
+/*
+ * Sanitizer helper attributes: Because using __always_inline and
+ * __no_sanitize_* conflict, provide helper attributes that will either expand
+ * to __no_sanitize_* in compilation units where instrumentation is enabled
+ * (__SANITIZE_*__), or __always_inline in compilation units without
+ * instrumentation (__SANITIZE_*__ undefined).
+ */
+#ifdef __SANITIZE_ADDRESS__
+/*
+ * We can't declare function 'inline' because __no_sanitize_address conflicts
+ * with inlining. Attempt to inline it may cause a build failure.
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
+ * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
+ */
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kasan_or_inline
+#else
+# define __no_kasan_or_inline __always_inline
+#endif
+
+#define __no_kcsan __no_sanitize_thread
+#ifdef __SANITIZE_THREAD__
+# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
+# define __no_sanitize_or_inline __no_kcsan_or_inline
+#else
+# define __no_kcsan_or_inline __always_inline
+#endif
+
+#ifndef __no_sanitize_or_inline
+#define __no_sanitize_or_inline __always_inline
+#endif
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
@@ -218,6 +250,53 @@ struct ftrace_likely_data {
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+/*
+ * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
+ * non-scalar types unchanged.
+ */
+#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__)
+/*
+ * We build this out of a couple of helper macros in a vain attempt to
+ * help you keep your lunch down while reading it.
+ */
+#define __pick_scalar_type(x, type, otherwise) \
+ __builtin_choose_expr(__same_type(x, type), (type)0, otherwise)
+
+/*
+ * 'char' is not type-compatible with either 'signed char' or 'unsigned char',
+ * so we include the naked type here as well as the signed/unsigned variants.
+ */
+#define __pick_integer_type(x, type, otherwise) \
+ __pick_scalar_type(x, type, \
+ __pick_scalar_type(x, unsigned type, \
+ __pick_scalar_type(x, signed type, otherwise)))
+
+#define __unqual_scalar_typeof(x) typeof( \
+ __pick_integer_type(x, char, \
+ __pick_integer_type(x, short, \
+ __pick_integer_type(x, int, \
+ __pick_integer_type(x, long, \
+ __pick_integer_type(x, long long, x))))))
+#else
+/*
+ * If supported, prefer C11 _Generic for better compile-times. As above, 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ */
+#define __scalar_type_to_expr_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (signed type)0
+
+#define __unqual_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (char)0, \
+ __scalar_type_to_expr_cases(char), \
+ __scalar_type_to_expr_cases(short), \
+ __scalar_type_to_expr_cases(int), \
+ __scalar_type_to_expr_cases(long), \
+ __scalar_type_to_expr_cases(long long), \
+ default: (x)))
+#endif
+
/* Is this type a native word size -- useful for atomic operations */
#define __native_word(t) \
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 193cc9dbf448..e3e9f0e3a878 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -100,10 +100,12 @@ union coresight_dev_subtype {
};
/**
- * struct coresight_platform_data - data harvested from the DT specification
- * @nr_inport: number of input ports for this component.
- * @nr_outport: number of output ports for this component.
- * @conns: Array of nr_outport connections from this component
+ * struct coresight_platform_data - data harvested from the firmware
+ * specification.
+ *
+ * @nr_inport: Number of elements for the input connections.
+ * @nr_outport: Number of elements for the output connections.
+ * @conns: Sparse array of nr_outport connections from this component.
*/
struct coresight_platform_data {
int nr_inport;
@@ -140,12 +142,28 @@ struct coresight_desc {
* @chid_fwnode: remote component's fwnode handle.
* @child_dev: a @coresight_device representation of the component
connected to @outport.
+ * @link: Representation of the connection as a sysfs link.
*/
struct coresight_connection {
int outport;
int child_port;
struct fwnode_handle *child_fwnode;
struct coresight_device *child_dev;
+ struct coresight_sysfs_link *link;
+};
+
+/**
+ * struct coresight_sysfs_link - representation of a connection in sysfs.
+ * @orig: Originating (master) coresight device for the link.
+ * @orig_name: Name to use for the link orig->target.
+ * @target: Target (slave) coresight device for the link.
+ * @target_name: Name to use for the link target->orig.
+ */
+struct coresight_sysfs_link {
+ struct coresight_device *orig;
+ const char *orig_name;
+ struct coresight_device *target;
+ const char *target_name;
};
/**
@@ -165,6 +183,9 @@ struct coresight_connection {
* @ea: Device attribute for sink representation under PMU directory.
* @ect_dev: Associated cross trigger device. Not part of the trace data
* path or connections.
+ * @nr_links: number of sysfs links created to other components from this
+ * device. These will appear in the "connections" group.
+ * @has_conns_grp: Have added a "connections" group for sysfs links.
*/
struct coresight_device {
struct coresight_platform_data *pdata;
@@ -180,6 +201,9 @@ struct coresight_device {
struct dev_ext_attribute *ea;
/* cross trigger handling */
struct coresight_device *ect_dev;
+ /* sysfs links between components */
+ int nr_links;
+ bool has_conns_grp;
};
/*
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 67d5950bd878..3494f6763597 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -367,7 +367,7 @@ struct cpufreq_driver {
/* platform specific boost support code */
bool boost_enabled;
- int (*set_boost)(int state);
+ int (*set_boost)(struct cpufreq_policy *policy, int state);
};
/* flags */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 24b3a77810b6..191772d4a4d7 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -102,6 +102,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER,
@@ -142,6 +143,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_XEN_STARTING,
CPUHP_AP_ARM_KVMPV_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index bc156285d097..a5192b718dbe 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -5,9 +5,10 @@
#include <linux/kexec.h>
#include <linux/proc_fs.h>
#include <linux/elf.h>
+#include <linux/pgtable.h>
#include <uapi/linux/vmcore.h>
-#include <asm/pgtable.h> /* for pgprot_t */
+#include <linux/pgtable.h> /* for pgprot_t */
#ifdef CONFIG_CRASH_DUMP
#define ELFCORE_ADDR_MAX (-1ULL)
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 54741295c70b..33c16f2de7f6 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -87,7 +87,7 @@ struct crush_rule_mask {
struct crush_rule {
__u32 len;
struct crush_rule_mask mask;
- struct crush_rule_step steps[0];
+ struct crush_rule_step steps[];
};
#define crush_rule_size(len) (sizeof(struct crush_rule) + \
@@ -301,6 +301,12 @@ struct crush_map {
__u32 *choose_tries;
#else
+ /* device/bucket type id -> type name (CrushWrapper::type_map) */
+ struct rb_root type_names;
+
+ /* device/bucket id -> name (CrushWrapper::name_map) */
+ struct rb_root names;
+
/* CrushWrapper::choose_args */
struct rb_root choose_args;
#endif
@@ -342,4 +348,10 @@ struct crush_work {
struct crush_work_bucket **work; /* Per-bucket working store */
};
+#ifdef __KERNEL__
+/* osdmap.c */
+void clear_crush_names(struct rb_root *root);
+void clear_choose_args(struct crush_map *c);
+#endif
+
#endif
diff --git a/include/linux/dax.h b/include/linux/dax.h
index d7af5d243f24..6904d4e0b2e0 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -5,7 +5,6 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/radix-tree.h>
-#include <asm/pgtable.h>
/* Flag for synchronous flush */
#define DAXDEV_F_SYNC (1UL << 0)
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index 5aad06b4ca7b..3028b644b4fb 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -109,7 +109,8 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
#define dev_info(dev, fmt, ...) \
_dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define dev_dbg(dev, fmt, ...) \
dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__)
#elif defined(DEBUG)
@@ -181,7 +182,8 @@ do { \
dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
#define dev_info_ratelimited(dev, fmt, ...) \
dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define dev_dbg_ratelimited(dev, fmt, ...) \
do { \
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index af48d9da3916..8750f2dc5613 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -332,6 +332,8 @@ void *dm_per_bio_data(struct bio *bio, size_t data_size);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
+u64 dm_start_time_ns_from_clone(struct bio *bio);
+
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
@@ -557,13 +559,8 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
#define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
-#ifdef CONFIG_DM_DEBUG
-#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
+#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
-#else
-#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
-#endif
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
0 : scnprintf(result + sz, maxlen - sz, x))
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
index 3c8b7d274bd9..29d255fdd5d6 100644
--- a/include/linux/dm-bufio.h
+++ b/include/linux/dm-bufio.h
@@ -119,6 +119,11 @@ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c);
int dm_bufio_issue_flush(struct dm_bufio_client *c);
/*
+ * Send a discard request to the underlying device.
+ */
+int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count);
+
+/*
* Like dm_bufio_release but also move the buffer to the new
* block. dm_bufio_write_dirty_buffers is needed to commit the new block.
*/
@@ -132,6 +137,13 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
void dm_bufio_forget(struct dm_bufio_client *c, sector_t block);
/*
+ * Free the given range of buffers.
+ * This is just a hint, if the buffer is in use or dirty, this function
+ * does nothing.
+ */
+void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks);
+
+/*
* Set the minimum number of buffers before cleanup happens.
*/
void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 24b8684aa21d..136f984df0d9 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -67,6 +67,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
}
u64 dma_direct_get_required_mask(struct device *dev);
+gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
+ u64 *phys_mask);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 330ad58fbf4d..78f677cf45ab 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -609,6 +609,86 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
+/**
+ * dma_map_sgtable - Map the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ * @attrs: Optional DMA attributes for the map operation
+ *
+ * Maps a buffer described by a scatterlist stored in the given sg_table
+ * object for the @dir DMA operation by the @dev device. After success the
+ * ownership for the buffer is transferred to the DMA domain. One has to
+ * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
+ * ownership of the buffer back to the CPU domain before touching the
+ * buffer by the CPU.
+ *
+ * Returns 0 on success or -EINVAL on error during mapping the buffer.
+ */
+static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ int nents;
+
+ nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
+ if (nents <= 0)
+ return -EINVAL;
+ sgt->nents = nents;
+ return 0;
+}
+
+/**
+ * dma_unmap_sgtable - Unmap the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ * @attrs: Optional DMA attributes for the unmap operation
+ *
+ * Unmaps a buffer described by a scatterlist stored in the given sg_table
+ * object for the @dir DMA operation by the @dev device. After this function
+ * the ownership of the buffer is transferred back to the CPU domain.
+ */
+static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
+}
+
+/**
+ * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the CPU domain, so it is safe to perform any access to it
+ * by the CPU. Before doing any further DMA operations, one has to transfer
+ * the ownership of the buffer back to the DMA domain by calling the
+ * dma_sync_sgtable_for_device().
+ */
+static inline void dma_sync_sgtable_for_cpu(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
+}
+
+/**
+ * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
+ * @dev: The device for which to perform the DMA operation
+ * @sgt: The sg_table object describing the buffer
+ * @dir: DMA direction
+ *
+ * Performs the needed cache synchronization and moves the ownership of the
+ * buffer back to the DMA domain, so it is safe to perform the DMA operation.
+ * Once finished, one has to call dma_sync_sgtable_for_cpu() or
+ * dma_unmap_sgtable().
+ */
+static inline void dma_sync_sgtable_for_device(struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
+}
+
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
@@ -630,9 +710,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
pgprot_t prot, const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size);
-bool dma_in_atomic_pool(void *start, size_t size);
-void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
-bool dma_free_from_pool(void *start, size_t size);
+void *dma_alloc_from_pool(struct device *dev, size_t size,
+ struct page **ret_page, gfp_t flags);
+bool dma_free_from_pool(struct device *dev, void *start, size_t size);
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index b59f1b6be3e9..ca09a4e07d2d 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -3,7 +3,7 @@
#define _LINUX_DMA_NONCOHERENT_H 1
#include <linux/dma-mapping.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
#include <asm/dma-coherence.h>
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 4cf02ecd67de..abcd5fde30eb 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -48,7 +48,7 @@ struct _ddebug {
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG_CORE)
int ddebug_add_module(struct _ddebug *tab, unsigned int n,
const char *modname);
extern int ddebug_remove_module(const char *mod_name);
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index 594d4e78654f..69b136e4dd2b 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -54,7 +54,7 @@
.popsection ;
#define ELFNOTE(name, type, desc) \
- ELFNOTE_START(name, type, "") \
+ ELFNOTE_START(name, type, "a") \
desc ; \
ELFNOTE_END
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index 8fbe4f97ffad..1e7bf78cb382 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -67,5 +67,5 @@ static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first,
{
return -EOPNOTSUPP;
}
-#endif /* IS_ENABLED(ETHTOOL_NETLINK) */
+#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
new file mode 100644
index 000000000000..4e624c466583
--- /dev/null
+++ b/include/linux/fiemap.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FIEMAP_H
+#define _LINUX_FIEMAP_H 1
+
+#include <uapi/linux/fiemap.h>
+#include <linux/fs.h>
+
+struct fiemap_extent_info {
+ unsigned int fi_flags; /* Flags as passed from user */
+ unsigned int fi_extents_mapped; /* Number of mapped extents */
+ unsigned int fi_extents_max; /* Size of fiemap_extent array */
+ struct fiemap_extent __user *fi_extents_start; /* Start of
+ fiemap_extent array */
+};
+
+int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 *len, u32 supported_flags);
+int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
+ u64 phys, u64 len, u32 flags);
+
+int generic_block_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo, u64 start, u64 len,
+ get_block_t *get_block);
+
+#endif /* _LINUX_FIEMAP_H 1 */
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 4bbd0afd91b7..cb3e2c06ed8a 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -12,7 +12,6 @@
struct firmware {
size_t size;
const u8 *data;
- struct page **pages;
/* firmware loader private fields */
void *priv;
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 17ba4e405129..3fa418a4ca67 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -11,7 +11,6 @@
#define _SC_SCI_H
#include <linux/firmware/imx/ipc.h>
-#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/svc/misc.h>
#include <linux/firmware/imx/svc/pm.h>
diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h
deleted file mode 100644
index 80821100e85f..000000000000
--- a/include/linux/firmware/imx/types.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Copyright 2017~2018 NXP
- *
- * Header file containing types used across multiple service APIs.
- */
-
-#ifndef _SC_TYPES_H
-#define _SC_TYPES_H
-
-/*
- * This type is used to indicate a control.
- */
-enum imx_sc_ctrl {
- IMX_SC_C_TEMP = 0,
- IMX_SC_C_TEMP_HI = 1,
- IMX_SC_C_TEMP_LOW = 2,
- IMX_SC_C_PXL_LINK_MST1_ADDR = 3,
- IMX_SC_C_PXL_LINK_MST2_ADDR = 4,
- IMX_SC_C_PXL_LINK_MST_ENB = 5,
- IMX_SC_C_PXL_LINK_MST1_ENB = 6,
- IMX_SC_C_PXL_LINK_MST2_ENB = 7,
- IMX_SC_C_PXL_LINK_SLV1_ADDR = 8,
- IMX_SC_C_PXL_LINK_SLV2_ADDR = 9,
- IMX_SC_C_PXL_LINK_MST_VLD = 10,
- IMX_SC_C_PXL_LINK_MST1_VLD = 11,
- IMX_SC_C_PXL_LINK_MST2_VLD = 12,
- IMX_SC_C_SINGLE_MODE = 13,
- IMX_SC_C_ID = 14,
- IMX_SC_C_PXL_CLK_POLARITY = 15,
- IMX_SC_C_LINESTATE = 16,
- IMX_SC_C_PCIE_G_RST = 17,
- IMX_SC_C_PCIE_BUTTON_RST = 18,
- IMX_SC_C_PCIE_PERST = 19,
- IMX_SC_C_PHY_RESET = 20,
- IMX_SC_C_PXL_LINK_RATE_CORRECTION = 21,
- IMX_SC_C_PANIC = 22,
- IMX_SC_C_PRIORITY_GROUP = 23,
- IMX_SC_C_TXCLK = 24,
- IMX_SC_C_CLKDIV = 25,
- IMX_SC_C_DISABLE_50 = 26,
- IMX_SC_C_DISABLE_125 = 27,
- IMX_SC_C_SEL_125 = 28,
- IMX_SC_C_MODE = 29,
- IMX_SC_C_SYNC_CTRL0 = 30,
- IMX_SC_C_KACHUNK_CNT = 31,
- IMX_SC_C_KACHUNK_SEL = 32,
- IMX_SC_C_SYNC_CTRL1 = 33,
- IMX_SC_C_DPI_RESET = 34,
- IMX_SC_C_MIPI_RESET = 35,
- IMX_SC_C_DUAL_MODE = 36,
- IMX_SC_C_VOLTAGE = 37,
- IMX_SC_C_PXL_LINK_SEL = 38,
- IMX_SC_C_OFS_SEL = 39,
- IMX_SC_C_OFS_AUDIO = 40,
- IMX_SC_C_OFS_PERIPH = 41,
- IMX_SC_C_OFS_IRQ = 42,
- IMX_SC_C_RST0 = 43,
- IMX_SC_C_RST1 = 44,
- IMX_SC_C_SEL0 = 45,
- IMX_SC_C_LAST
-};
-
-#endif /* _SC_TYPES_H */
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index 013ae4819deb..682dbf694007 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -54,32 +54,25 @@
* Secure monitor software doesn't recognize the request.
*
* INTEL_SIP_SMC_STATUS_OK:
- * FPGA configuration completed successfully,
- * In case of FPGA configuration write operation, it means secure monitor
- * software can accept the next chunk of FPGA configuration data.
+ * Secure monitor software accepts the service client's request.
*
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY:
- * In case of FPGA configuration write operation, it means secure monitor
- * software is still processing previous data & can't accept the next chunk
- * of data. Service driver needs to issue
- * INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE call to query the
- * completed block(s).
+ * INTEL_SIP_SMC_STATUS_BUSY:
+ * Secure monitor software is still processing service client's request.
*
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR:
- * There is error during the FPGA configuration process.
+ * INTEL_SIP_SMC_STATUS_REJECTED:
+ * Secure monitor software reject the service client's request.
*
- * INTEL_SIP_SMC_REG_ERROR:
- * There is error during a read or write operation of the protected registers.
+ * INTEL_SIP_SMC_STATUS_ERROR:
+ * There is error during the process of service request.
*
* INTEL_SIP_SMC_RSU_ERROR:
- * There is error during a remote status update.
+ * There is error during the process of remote status update request.
*/
#define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
#define INTEL_SIP_SMC_STATUS_OK 0x0
-#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY 0x1
-#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED 0x2
-#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR 0x4
-#define INTEL_SIP_SMC_REG_ERROR 0x5
+#define INTEL_SIP_SMC_STATUS_BUSY 0x1
+#define INTEL_SIP_SMC_STATUS_REJECTED 0x2
+#define INTEL_SIP_SMC_STATUS_ERROR 0x4
#define INTEL_SIP_SMC_RSU_ERROR 0x7
/**
@@ -95,7 +88,7 @@
* a2-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_STATUS_ERROR.
* a1-3: not used.
*/
#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START 1
@@ -115,8 +108,8 @@
* a3-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
* a1: 64bit physical address of 1st completed memory block if any completed
* block, otherwise zero value.
* a2: 64bit physical address of 2nd completed memory block if any completed
@@ -133,15 +126,15 @@
*
* Sync call used by service driver at EL1 to track the completed write
* transactions. This request is called after INTEL_SIP_SMC_FPGA_CONFIG_WRITE
- * call returns INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY.
+ * call returns INTEL_SIP_SMC_STATUS_BUSY.
*
* Call register usage:
* a0: INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE.
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
* a1: 64bit physical address of 1st completed memory block.
* a2: 64bit physical address of 2nd completed memory block if
* any completed block, otherwise zero value.
@@ -164,8 +157,8 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or
- * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or
+ * INTEL_SIP_SMC_STATUS_ERROR.
* a1-3: not used.
*/
#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE 4
@@ -183,7 +176,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR.
* a1: start of physical address of reserved memory block.
* a2: size of reserved memory block.
* a3: not used.
@@ -203,7 +196,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
* a1-7: not used.
*
* Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR.
+ * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR.
* a1-3: not used.
*/
#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK 6
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index 59bc6e2af693..64213c3e82f5 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -18,45 +18,37 @@
/**
* Status of the sent command, in bit number
*
- * SVC_COMMAND_STATUS_RECONFIG_REQUEST_OK:
- * Secure firmware accepts the request of FPGA reconfiguration.
+ * SVC_STATUS_OK:
+ * Secure firmware accepts the request issued by one of service clients.
*
- * SVC_STATUS_RECONFIG_BUFFER_SUBMITTED:
- * Service client successfully submits FPGA configuration
- * data buffer to secure firmware.
+ * SVC_STATUS_BUFFER_SUBMITTED:
+ * Service client successfully submits data buffer to secure firmware.
*
- * SVC_COMMAND_STATUS_RECONFIG_BUFFER_DONE:
+ * SVC_STATUS_BUFFER_DONE:
* Secure firmware completes data process, ready to accept the
* next WRITE transaction.
*
- * SVC_COMMAND_STATUS_RECONFIG_COMPLETED:
- * Secure firmware completes FPGA configuration successfully, FPGA should
- * be in user mode.
+ * SVC_STATUS_COMPLETED:
+ * Secure firmware completes service request successfully. In case of
+ * FPGA configuration, FPGA should be in user mode.
*
- * SVC_COMMAND_STATUS_RECONFIG_BUSY:
- * FPGA configuration is still in process.
+ * SVC_COMMAND_STATUS_BUSY:
+ * Service request is still in process.
*
- * SVC_COMMAND_STATUS_RECONFIG_ERROR:
- * Error encountered during FPGA configuration.
+ * SVC_COMMAND_STATUS_ERROR:
+ * Error encountered during the process of the service request.
*
- * SVC_STATUS_RSU_OK:
- * Secure firmware accepts the request of remote status update (RSU).
- *
- * SVC_STATUS_RSU_ERROR:
- * Error encountered during remote system update.
- *
- * SVC_STATUS_RSU_NO_SUPPORT:
- * Secure firmware doesn't support RSU retry or notify feature.
+ * SVC_STATUS_NO_SUPPORT:
+ * Secure firmware doesn't support requested features such as RSU retry
+ * or RSU notify.
*/
-#define SVC_STATUS_RECONFIG_REQUEST_OK 0
-#define SVC_STATUS_RECONFIG_BUFFER_SUBMITTED 1
-#define SVC_STATUS_RECONFIG_BUFFER_DONE 2
-#define SVC_STATUS_RECONFIG_COMPLETED 3
-#define SVC_STATUS_RECONFIG_BUSY 4
-#define SVC_STATUS_RECONFIG_ERROR 5
-#define SVC_STATUS_RSU_OK 6
-#define SVC_STATUS_RSU_ERROR 7
-#define SVC_STATUS_RSU_NO_SUPPORT 8
+#define SVC_STATUS_OK 0
+#define SVC_STATUS_BUFFER_SUBMITTED 1
+#define SVC_STATUS_BUFFER_DONE 2
+#define SVC_STATUS_COMPLETED 3
+#define SVC_STATUS_BUSY 4
+#define SVC_STATUS_ERROR 5
+#define SVC_STATUS_NO_SUPPORT 6
/**
* Flag bit for COMMAND_RECONFIG
@@ -84,32 +76,29 @@ struct stratix10_svc_chan;
* @COMMAND_NOOP: do 'dummy' request for integration/debug/trouble-shooting
*
* @COMMAND_RECONFIG: ask for FPGA configuration preparation, return status
- * is SVC_STATUS_RECONFIG_REQUEST_OK
+ * is SVC_STATUS_OK
*
* @COMMAND_RECONFIG_DATA_SUBMIT: submit buffer(s) of bit-stream data for the
- * FPGA configuration, return status is SVC_STATUS_RECONFIG_BUFFER_SUBMITTED,
- * or SVC_STATUS_RECONFIG_ERROR
+ * FPGA configuration, return status is SVC_STATUS_SUBMITTED or SVC_STATUS_ERROR
*
* @COMMAND_RECONFIG_DATA_CLAIM: check the status of the configuration, return
- * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or
- * SVC_STATUS_RECONFIG_ERROR
+ * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR
*
* @COMMAND_RECONFIG_STATUS: check the status of the configuration, return
- * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or
- * SVC_STATUS_RECONFIG_ERROR
+ * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR
*
* @COMMAND_RSU_STATUS: request remote system update boot log, return status
* is log data or SVC_STATUS_RSU_ERROR
*
* @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot,
- * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
*
* @COMMAND_RSU_NOTIFY: report the status of hard processor system
- * software to firmware, return status is SVC_STATUS_RSU_OK or
- * SVC_STATUS_RSU_ERROR
+ * software to firmware, return status is SVC_STATUS_OK or
+ * SVC_STATUS_ERROR
*
* @COMMAND_RSU_RETRY: query firmware for the current image's retry counter,
- * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
*/
enum stratix10_svc_command_code {
COMMAND_NOOP = 0,
diff --git a/include/linux/firmware/trusted_foundations.h b/include/linux/firmware/trusted_foundations.h
index 2549a2db56aa..be5984bda592 100644
--- a/include/linux/firmware/trusted_foundations.h
+++ b/include/linux/firmware/trusted_foundations.h
@@ -32,6 +32,7 @@
#define TF_PM_MODE_LP1_NO_MC_CLK 2
#define TF_PM_MODE_LP2 3
#define TF_PM_MODE_LP2_NOFLUSH_L2 4
+#define TF_PM_MODE_NONE 5
struct trusted_foundations_platform_data {
unsigned int version_major;
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 8efa5ac22d7e..5968df82b991 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -42,6 +42,8 @@
#define ZYNQMP_PM_MAX_QOS 100U
+#define GSS_NUM_REGS (4)
+
/* Node capabilities */
#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U
#define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U
@@ -62,6 +64,7 @@
enum pm_api_id {
PM_GET_API_VERSION = 1,
+ PM_SYSTEM_SHUTDOWN = 12,
PM_REQUEST_NODE = 13,
PM_RELEASE_NODE,
PM_SET_REQUIREMENT,
@@ -107,6 +110,12 @@ enum pm_ioctl_id {
IOCTL_GET_PLL_FRAC_MODE,
IOCTL_SET_PLL_FRAC_DATA,
IOCTL_GET_PLL_FRAC_DATA,
+ IOCTL_WRITE_GGS = 12,
+ IOCTL_READ_GGS = 13,
+ IOCTL_WRITE_PGGS = 14,
+ IOCTL_READ_PGGS = 15,
+ /* Set healthy bit value */
+ IOCTL_SET_BOOT_HEALTH_STATUS = 17,
};
enum pm_query_id {
@@ -279,6 +288,18 @@ enum dll_reset_type {
PM_DLL_RESET_PULSE,
};
+enum zynqmp_pm_shutdown_type {
+ ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN,
+ ZYNQMP_PM_SHUTDOWN_TYPE_RESET,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+};
+
+enum zynqmp_pm_shutdown_subtype {
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
@@ -293,49 +314,199 @@ struct zynqmp_pm_query_data {
u32 arg3;
};
-struct zynqmp_eemi_ops {
- int (*get_api_version)(u32 *version);
- int (*get_chipid)(u32 *idcode, u32 *version);
- int (*fpga_load)(const u64 address, const u32 size, const u32 flags);
- int (*fpga_get_status)(u32 *value);
- int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
- int (*clock_enable)(u32 clock_id);
- int (*clock_disable)(u32 clock_id);
- int (*clock_getstate)(u32 clock_id, u32 *state);
- int (*clock_setdivider)(u32 clock_id, u32 divider);
- int (*clock_getdivider)(u32 clock_id, u32 *divider);
- int (*clock_setrate)(u32 clock_id, u64 rate);
- int (*clock_getrate)(u32 clock_id, u64 *rate);
- int (*clock_setparent)(u32 clock_id, u32 parent_id);
- int (*clock_getparent)(u32 clock_id, u32 *parent_id);
- int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out);
- int (*reset_assert)(const enum zynqmp_pm_reset reset,
- const enum zynqmp_pm_reset_action assert_flag);
- int (*reset_get_status)(const enum zynqmp_pm_reset reset, u32 *status);
- int (*init_finalize)(void);
- int (*set_suspend_mode)(u32 mode);
- int (*request_node)(const u32 node,
- const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack);
- int (*release_node)(const u32 node);
- int (*set_requirement)(const u32 node,
- const u32 capabilities,
- const u32 qos,
- const enum zynqmp_pm_request_ack ack);
- int (*aes)(const u64 address, u32 *out);
-};
int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
u32 arg2, u32 arg3, u32 *ret_payload);
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
-const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
+int zynqmp_pm_get_api_version(u32 *version);
+int zynqmp_pm_get_chipid(u32 *idcode, u32 *version);
+int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out);
+int zynqmp_pm_clock_enable(u32 clock_id);
+int zynqmp_pm_clock_disable(u32 clock_id);
+int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state);
+int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider);
+int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider);
+int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate);
+int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate);
+int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id);
+int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id);
+int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode);
+int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode);
+int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data);
+int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data);
+int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value);
+int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type);
+int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag);
+int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status);
+int zynqmp_pm_init_finalize(void);
+int zynqmp_pm_set_suspend_mode(u32 mode);
+int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos, const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_release_node(const u32 node);
+int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_aes_engine(const u64 address, u32 *out);
+int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
+int zynqmp_pm_fpga_get_status(u32 *value);
+int zynqmp_pm_write_ggs(u32 index, u32 value);
+int zynqmp_pm_read_ggs(u32 index, u32 *value);
+int zynqmp_pm_write_pggs(u32 index, u32 value);
+int zynqmp_pm_read_pggs(u32 index, u32 *value);
+int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
+int zynqmp_pm_set_boot_health_status(u32 value);
#else
static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
{
return ERR_PTR(-ENODEV);
}
+static inline int zynqmp_pm_get_api_version(u32 *version)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata,
+ u32 *out)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_enable(u32 clock_id)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_disable(u32 clock_id)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
+ u32 *status)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_init_finalize(void)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_release_node(const u32 node)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_requirement(const u32 node,
+ const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_fpga_get_status(u32 *value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_write_ggs(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_read_ggs(u32 index, u32 *value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_write_pggs(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_read_pggs(u32 index, u32 *value)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
+{
+ return -ENODEV;
+}
+static inline int zynqmp_pm_set_boot_health_status(u32 value)
+{
+ return -ENODEV;
+}
#endif
#endif /* __FIRMWARE_ZYNQMP_H__ */
diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h
index 7fc95d5c95bb..141ac3f251e6 100644
--- a/include/linux/fpga/adi-axi-common.h
+++ b/include/linux/fpga/adi-axi-common.h
@@ -11,9 +11,13 @@
#ifndef ADI_AXI_COMMON_H_
#define ADI_AXI_COMMON_H_
-#define ADI_AXI_REG_VERSION 0x0000
+#define ADI_AXI_REG_VERSION 0x0000
#define ADI_AXI_PCORE_VER(major, minor, patch) \
(((major) << 16) | ((minor) << 8) | (patch))
+#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
+#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
+#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
+
#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9780ac31387c..8e1f8f93108f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -24,7 +24,6 @@
#include <linux/capability.h>
#include <linux/semaphore.h>
#include <linux/fcntl.h>
-#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
#include <linux/atomic.h>
#include <linux/shrinker.h>
@@ -48,6 +47,7 @@ struct backing_dev_info;
struct bdi_writeback;
struct bio;
struct export_operations;
+struct fiemap_extent_info;
struct hd_geometry;
struct iovec;
struct kiocb;
@@ -1048,6 +1048,7 @@ struct lock_manager_operations {
bool (*lm_break)(struct file_lock *);
int (*lm_change)(struct file_lock *, int, struct list_head *);
void (*lm_setup)(struct file_lock *, void **);
+ bool (*lm_breaker_owns_lease)(struct file_lock *);
};
struct lock_manager {
@@ -1412,6 +1413,8 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+
/* Possible states of 'frozen' field */
enum {
SB_UNFROZEN = 0, /* FS is unfrozen */
@@ -1679,10 +1682,10 @@ static inline int sb_start_write_trylock(struct super_block *sb)
*
* Since page fault freeze protection behaves as a lock, users have to preserve
* ordering of freeze protection and other filesystem locks. It is advised to
- * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault
+ * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
* handling code implies lock dependency:
*
- * mmap_sem
+ * mmap_lock
* -> sb_start_pagefault
*/
static inline void sb_start_pagefault(struct super_block *sb)
@@ -1755,19 +1758,6 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
extern void inode_init_owner(struct inode *inode, const struct inode *dir,
umode_t mode);
extern bool may_open_dev(const struct path *path);
-/*
- * VFS FS_IOC_FIEMAP helper definitions.
- */
-struct fiemap_extent_info {
- unsigned int fi_flags; /* Flags as passed from user */
- unsigned int fi_extents_mapped; /* Number of mapped extents */
- unsigned int fi_extents_max; /* Size of fiemap_extent array */
- struct fiemap_extent __user *fi_extents_start; /* Start of
- fiemap_extent array */
-};
-int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
- u64 phys, u64 len, u32 flags);
-int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
/*
* This is the "filldir" function type, used by readdir() to let
@@ -3094,6 +3084,9 @@ extern struct inode *find_inode_nowait(struct super_block *,
int (*match)(struct inode *,
unsigned long, void *),
void *data);
+extern struct inode *find_inode_rcu(struct super_block *, unsigned long,
+ int (*)(struct inode *, void *), void *);
+extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long);
extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
extern int insert_inode_locked(struct inode *);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -3323,14 +3316,6 @@ static inline int vfs_fstat(int fd, struct kstat *stat)
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
extern int vfs_readlink(struct dentry *, char __user *, int);
-extern int __generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo,
- loff_t start, loff_t len,
- get_block_t *get_block);
-extern int generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start,
- u64 len, get_block_t *get_block);
-
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
diff --git a/include/linux/fsl/bestcomm/bestcomm.h b/include/linux/fsl/bestcomm/bestcomm.h
index a0e2e6b19b57..154e541ce57e 100644
--- a/include/linux/fsl/bestcomm/bestcomm.h
+++ b/include/linux/fsl/bestcomm/bestcomm.h
@@ -27,7 +27,7 @@
*/
struct bcom_bd {
u32 status;
- u32 data[0]; /* variable payload size */
+ u32 data[]; /* variable payload size */
};
/* ======================================================================== */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dfbbf7a7208b..e339dac91ee6 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -342,9 +342,8 @@ static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
extern int stack_tracer_enabled;
-int stack_trace_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
DECLARE_PER_CPU(int, disable_stack_tracer);
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index e0abafbb17f8..9506f8ec0974 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -171,5 +171,7 @@ struct fwnode_operations {
#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
extern u32 fw_devlink_get_flags(void);
+void fw_devlink_pause(void);
+void fw_devlink_resume(void);
#endif
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index b8fc92c177eb..c4f272af7af5 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -253,6 +253,19 @@ struct gpio_irq_chip {
* Store old irq_chip irq_disable callback
*/
void (*irq_disable)(struct irq_data *data);
+ /**
+ * @irq_unmask:
+ *
+ * Store old irq_chip irq_unmask callback
+ */
+ void (*irq_unmask)(struct irq_data *data);
+
+ /**
+ * @irq_mask:
+ *
+ * Store old irq_chip irq_mask callback
+ */
+ void (*irq_mask)(struct irq_data *data);
};
/**
@@ -267,9 +280,9 @@ struct gpio_irq_chip {
* @free: optional hook for chip-specific deactivation, such as
* disabling module power and clock; may sleep
* @get_direction: returns direction for signal "offset", 0=out, 1=in,
- * (same as GPIOF_DIR_XXX), or negative error.
- * It is recommended to always implement this function, even on
- * input-only or output-only gpio chips.
+ * (same as GPIO_LINE_DIRECTION_OUT / GPIO_LINE_DIRECTION_IN),
+ * or negative error. It is recommended to always implement this
+ * function, even on input-only or output-only gpio chips.
* @direction_input: configures signal "offset" as input, or returns error
* This can be omitted on input-only or output-only gpio chips.
* @direction_output: configures signal "offset" as output, or returns error
@@ -349,30 +362,30 @@ struct gpio_chip {
struct module *owner;
int (*request)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
void (*free)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
int (*get_direction)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
int (*direction_input)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
int (*direction_output)(struct gpio_chip *gc,
- unsigned offset, int value);
+ unsigned int offset, int value);
int (*get)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
int (*get_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
void (*set)(struct gpio_chip *gc,
- unsigned offset, int value);
+ unsigned int offset, int value);
void (*set_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
int (*set_config)(struct gpio_chip *gc,
- unsigned offset,
+ unsigned int offset,
unsigned long config);
int (*to_irq)(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
void (*dbg_show)(struct seq_file *s,
struct gpio_chip *gc);
@@ -459,7 +472,7 @@ struct gpio_chip {
};
extern const char *gpiochip_is_requested(struct gpio_chip *gc,
- unsigned offset);
+ unsigned int offset);
/* add/remove chips */
extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
@@ -599,6 +612,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gc,
bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
unsigned int offset);
+int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
+ struct irq_domain *domain);
+
#ifdef CONFIG_LOCKDEP
/*
@@ -657,9 +673,9 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc,
}
#endif /* CONFIG_LOCKDEP */
-int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset);
-void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset);
-int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset,
+int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
+void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
+int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config);
/**
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 1ebe5be05d5f..781a053abbb9 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -20,8 +20,11 @@ enum gpio_lookup_flags {
/**
* struct gpiod_lookup - lookup table
- * @chip_label: name of the chip the GPIO belongs to
- * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
+ * @key: either the name of the chip the GPIO belongs to, or the GPIO line name
+ * Note that GPIO line names are not guaranteed to be globally unique,
+ * so this will use the first match found!
+ * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO, or
+ * U16_MAX to indicate that @key is a GPIO line name
* @con_id: name of the GPIO from the device's point of view
* @idx: index of the GPIO in case several GPIOs share the same name
* @flags: bitmask of gpio_lookup_flags GPIO_* values
@@ -30,7 +33,7 @@ enum gpio_lookup_flags {
* functions using platform data.
*/
struct gpiod_lookup {
- const char *chip_label;
+ const char *key;
u16 chip_hwnum;
const char *con_id;
unsigned int idx;
@@ -63,17 +66,17 @@ struct gpiod_hog {
/*
* Simple definition of a single GPIO under a con_id
*/
-#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \
- GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags)
+#define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \
+ GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, 0, _flags)
/*
* Use this macro if you need to have several GPIOs under the same con_id.
* Each GPIO needs to use a different index and can be accessed using
* gpiod_get_index()
*/
-#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \
+#define GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, _idx, _flags) \
{ \
- .chip_label = _chip_label, \
+ .key = _key, \
.chip_hwnum = _chip_hwnum, \
.con_id = _con_id, \
.idx = _idx, \
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
new file mode 100644
index 000000000000..4c1e6b34e824
--- /dev/null
+++ b/include/linux/gpio/regmap.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_GPIO_REGMAP_H
+#define _LINUX_GPIO_REGMAP_H
+
+struct device;
+struct gpio_regmap;
+struct irq_domain;
+struct regmap;
+
+#define GPIO_REGMAP_ADDR_ZERO ((unsigned long)(-1))
+#define GPIO_REGMAP_ADDR(addr) ((addr) ? : GPIO_REGMAP_ADDR_ZERO)
+
+/**
+ * struct gpio_regmap_config - Description of a generic regmap gpio_chip.
+ * @parent: The parent device
+ * @regmap: The regmap used to access the registers
+ * given, the name of the device is used
+ * @label: (Optional) Descriptive name for GPIO controller.
+ * If not given, the name of the device is used.
+ * @ngpio: Number of GPIOs
+ * @names: (Optional) Array of names for gpios
+ * @reg_dat_base: (Optional) (in) register base address
+ * @reg_set_base: (Optional) set register base address
+ * @reg_clr_base: (Optional) clear register base address
+ * @reg_dir_in_base: (Optional) in setting register base address
+ * @reg_dir_out_base: (Optional) out setting register base address
+ * @reg_stride: (Optional) May be set if the registers (of the
+ * same type, dat, set, etc) are not consecutive.
+ * @ngpio_per_reg: Number of GPIOs per register
+ * @irq_domain: (Optional) IRQ domain if the controller is
+ * interrupt-capable
+ * @reg_mask_xlate: (Optional) Translates base address and GPIO
+ * offset to a register/bitmask pair. If not
+ * given the default gpio_regmap_simple_xlate()
+ * is used.
+ *
+ * The ->reg_mask_xlate translates a given base address and GPIO offset to
+ * register and mask pair. The base address is one of the given register
+ * base addresses in this structure.
+ *
+ * Although all register base addresses are marked as optional, there are
+ * several rules:
+ * 1. if you only have @reg_dat_base set, then it is input-only
+ * 2. if you only have @reg_set_base set, then it is output-only
+ * 3. if you have either @reg_dir_in_base or @reg_dir_out_base set, then
+ * you have to set both @reg_dat_base and @reg_set_base
+ * 4. if you have @reg_set_base set, you may also set @reg_clr_base to have
+ * two different registers for setting and clearing the output. This is
+ * also valid for the output-only case.
+ * 5. @reg_dir_in_base and @reg_dir_out_base are exclusive; is there really
+ * hardware which has redundant registers?
+ *
+ * Note: All base addresses may have the special value %GPIO_REGMAP_ADDR_ZERO
+ * which forces the address to the value 0.
+ */
+struct gpio_regmap_config {
+ struct device *parent;
+ struct regmap *regmap;
+
+ const char *label;
+ int ngpio;
+ const char *const *names;
+
+ unsigned int reg_dat_base;
+ unsigned int reg_set_base;
+ unsigned int reg_clr_base;
+ unsigned int reg_dir_in_base;
+ unsigned int reg_dir_out_base;
+ int reg_stride;
+ int ngpio_per_reg;
+ struct irq_domain *irq_domain;
+
+ int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask);
+};
+
+struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config);
+void gpio_regmap_unregister(struct gpio_regmap *gpio);
+struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
+ const struct gpio_regmap_config *config);
+void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data);
+void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio);
+
+#endif /* _LINUX_GPIO_REGMAP_H */
diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h
index dfbc6c39a74b..aeb8f9243545 100644
--- a/include/linux/greybus/greybus_protocols.h
+++ b/include/linux/greybus/greybus_protocols.h
@@ -345,7 +345,7 @@ struct gb_cap_get_ims_certificate_request {
struct gb_cap_get_ims_certificate_response {
__u8 result_code;
- __u8 certificate[0];
+ __u8 certificate[];
} __packed;
/* CAP authenticate request/response */
@@ -358,7 +358,7 @@ struct gb_cap_authenticate_request {
struct gb_cap_authenticate_response {
__u8 result_code;
__u8 response[64];
- __u8 signature[0];
+ __u8 signature[];
} __packed;
@@ -642,7 +642,7 @@ struct gb_hid_get_report_request {
struct gb_hid_set_report_request {
__u8 report_type;
__u8 report_id;
- __u8 report[0];
+ __u8 report[];
} __packed;
/* HID input report request, via interrupt pipe */
@@ -680,7 +680,7 @@ struct gb_i2c_transfer_op {
struct gb_i2c_transfer_request {
__le16 op_count;
- struct gb_i2c_transfer_op ops[0]; /* op_count of these */
+ struct gb_i2c_transfer_op ops[]; /* op_count of these */
} __packed;
struct gb_i2c_transfer_response {
__u8 data[0]; /* inbound data */
@@ -908,7 +908,7 @@ struct gb_spi_transfer_request {
__u8 chip_select; /* of the spi device */
__u8 mode; /* of the spi device */
__le16 count;
- struct gb_spi_transfer transfers[0]; /* count of these */
+ struct gb_spi_transfer transfers[]; /* count of these */
} __packed;
struct gb_spi_transfer_response {
@@ -1188,7 +1188,7 @@ struct gb_svc_pwrmon_rail_count_get_response {
struct gb_svc_pwrmon_rail_names_get_response {
__u8 status;
- __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
+ __u8 name[][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
} __packed;
#define GB_SVC_PWRMON_TYPE_CURR 0x01
@@ -1281,7 +1281,7 @@ struct gb_svc_intf_oops_request {
struct gb_raw_send_request {
__le32 len;
- __u8 data[0];
+ __u8 data[];
} __packed;
@@ -1300,7 +1300,7 @@ struct gb_raw_send_request {
/* Represents data from AP -> Module */
struct gb_uart_send_data_request {
__le16 size;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* recv-data-request flags */
@@ -1313,7 +1313,7 @@ struct gb_uart_send_data_request {
struct gb_uart_recv_data_request {
__le16 size;
__u8 flags;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_uart_receive_credits_request {
@@ -1382,14 +1382,14 @@ struct gb_loopback_transfer_request {
__le32 len;
__le32 reserved0;
__le32 reserved1;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_loopback_transfer_response {
__le32 len;
__le32 reserved0;
__le32 reserved1;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* SDIO */
@@ -1530,13 +1530,13 @@ struct gb_sdio_transfer_request {
__le16 data_blocks;
__le16 data_blksz;
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_sdio_transfer_response {
__le16 data_blocks;
__le16 data_blksz;
- __u8 data[0];
+ __u8 data[];
} __packed;
/* event request: generated by module and is defined as unidirectional */
@@ -1572,7 +1572,7 @@ struct gb_camera_configure_streams_request {
__u8 flags;
#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01
__le16 padding;
- struct gb_camera_stream_config_request config[0];
+ struct gb_camera_stream_config_request config[];
} __packed;
/* Greybus Camera Configure Streams response payload */
@@ -1593,7 +1593,7 @@ struct gb_camera_configure_streams_response {
__u8 flags;
__u8 padding[2];
__le32 data_rate;
- struct gb_camera_stream_config_response config[0];
+ struct gb_camera_stream_config_response config[];
};
/* Greybus Camera Capture request payload - response has no payload */
@@ -1602,7 +1602,7 @@ struct gb_camera_capture_request {
__u8 streams;
__u8 padding;
__le16 num_frames;
- __u8 settings[0];
+ __u8 settings[];
} __packed;
/* Greybus Camera Flush response payload - request has no payload */
@@ -1616,7 +1616,7 @@ struct gb_camera_metadata_request {
__le16 frame_number;
__u8 stream;
__u8 padding;
- __u8 metadata[0];
+ __u8 metadata[];
} __packed;
/* Lights */
@@ -1993,7 +1993,7 @@ struct gb_audio_integer64 {
struct gb_audio_enumerated {
__le32 items;
__le16 names_length;
- __u8 names[0];
+ __u8 names[];
} __packed;
struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */
@@ -2033,7 +2033,7 @@ struct gb_audio_widget {
__u8 type; /* GB_AUDIO_WIDGET_TYPE_* */
__u8 state; /* GB_AUDIO_WIDGET_STATE_* */
__u8 ncontrols;
- struct gb_audio_control ctl[0]; /* 'ncontrols' entries */
+ struct gb_audio_control ctl[]; /* 'ncontrols' entries */
} __packed;
struct gb_audio_route {
@@ -2059,7 +2059,7 @@ struct gb_audio_topology {
* struct gb_audio_widget widgets[num_widgets];
* struct gb_audio_route routes[num_routes];
*/
- __u8 data[0];
+ __u8 data[];
} __packed;
struct gb_audio_get_topology_size_response {
@@ -2157,7 +2157,7 @@ struct gb_audio_streaming_event_request {
struct gb_audio_send_data_request {
__le64 timestamp;
- __u8 data[0];
+ __u8 data[];
} __packed;
@@ -2171,7 +2171,7 @@ struct gb_audio_send_data_request {
struct gb_log_send_log_request {
__le16 len;
- __u8 msg[0];
+ __u8 msg[];
} __packed;
#endif /* __GREYBUS_PROTOCOLS_H */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index ea5cdbd8c2c3..d6e82e3de027 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -32,8 +32,65 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
#include <asm/kmap_types.h>
#ifdef CONFIG_HIGHMEM
+extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
+extern void kunmap_atomic_high(void *kvaddr);
#include <asm/highmem.h>
+#ifndef ARCH_HAS_KMAP_FLUSH_TLB
+static inline void kmap_flush_tlb(unsigned long addr) { }
+#endif
+
+#ifndef kmap_prot
+#define kmap_prot PAGE_KERNEL
+#endif
+
+void *kmap_high(struct page *page);
+static inline void *kmap(struct page *page)
+{
+ void *addr;
+
+ might_sleep();
+ if (!PageHighMem(page))
+ addr = page_address(page);
+ else
+ addr = kmap_high(page);
+ kmap_flush_tlb((unsigned long)addr);
+ return addr;
+}
+
+void kunmap_high(struct page *page);
+
+static inline void kunmap(struct page *page)
+{
+ might_sleep();
+ if (!PageHighMem(page))
+ return;
+ kunmap_high(page);
+}
+
+/*
+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+ * no global lock is needed and because the kmap code must perform a global TLB
+ * invalidation when the kmap pool wraps.
+ *
+ * However when holding an atomic kmap is is not legal to sleep, so atomic
+ * kmaps are appropriate for short, tight code paths only.
+ *
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ preempt_disable();
+ pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
+ return kmap_atomic_high_prot(page, prot);
+}
+#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
+
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
extern atomic_long_t _totalhigh_pages;
@@ -77,15 +134,21 @@ static inline struct page *kmap_to_page(void *addr)
static inline unsigned long totalhigh_pages(void) { return 0UL; }
-#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)
{
might_sleep();
return page_address(page);
}
+static inline void kunmap_high(struct page *page)
+{
+}
+
static inline void kunmap(struct page *page)
{
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(page_address(page));
+#endif
}
static inline void *kmap_atomic(struct page *page)
@@ -96,16 +159,20 @@ static inline void *kmap_atomic(struct page *page)
}
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-static inline void __kunmap_atomic(void *addr)
+static inline void kunmap_atomic_high(void *addr)
{
- pagefault_enable();
- preempt_enable();
+ /*
+ * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
+ * handles re-enabling faults + preemption
+ */
+#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(addr);
+#endif
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_flush_unused() do {} while(0)
-#endif
#endif /* CONFIG_HIGHMEM */
@@ -149,7 +216,9 @@ static inline void kmap_atomic_idx_pop(void)
#define kunmap_atomic(addr) \
do { \
BUILD_BUG_ON(__same_type((addr), struct page *)); \
- __kunmap_atomic(addr); \
+ kunmap_atomic_high(addr); \
+ pagefault_enable(); \
+ preempt_enable(); \
} while (0)
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index e912b9dc4633..f4a09ed223ac 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -10,7 +10,7 @@
#define LINUX_HMM_H
#include <linux/kconfig.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/device.h>
#include <linux/migrate.h>
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index cfbb0a87c5f0..71f20776b06c 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -248,7 +248,7 @@ static inline int is_swap_pmd(pmd_t pmd)
return !pmd_none(pmd) && !pmd_present(pmd);
}
-/* mmap_sem must be held on entry */
+/* mmap_lock must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0cced410e0bd..50650d0d01b9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -9,7 +9,7 @@
#include <linux/cgroup.h>
#include <linux/list.h>
#include <linux/kref.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
struct ctl_table;
struct user_struct;
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 6058c3844a76..d7d4250cd1e4 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -80,6 +80,10 @@ extern int dbg_reserve_bp_slot(struct perf_event *bp);
extern int dbg_release_bp_slot(struct perf_event *bp);
extern int reserve_bp_slot(struct perf_event *bp);
extern void release_bp_slot(struct perf_event *bp);
+int hw_breakpoint_weight(struct perf_event *bp);
+int arch_reserve_bp_slot(struct perf_event *bp);
+void arch_release_bp_slot(struct perf_event *bp);
+void arch_unregister_hw_breakpoint(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 5a127c0ed200..a3a838dcf8e4 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -133,62 +133,4 @@ void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
-#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, _extend_name, _type, _mask_all) \
- { \
- .type = (_type), \
- .differential = (_channel2 == -1 ? 0 : 1), \
- .indexed = 1, \
- .channel = (_channel1), \
- .channel2 = (_channel2), \
- .address = (_address), \
- .extend_name = (_extend_name), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_OFFSET), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = _mask_all, \
- .scan_index = (_si), \
- .scan_type = { \
- .sign = 'u', \
- .realbits = (_bits), \
- .storagebits = (_storagebits), \
- .shift = (_shift), \
- .endianness = IIO_BE, \
- }, \
- }
-
-#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \
- _storagebits, _shift, "shorted", IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_CHANNEL_NO_SAMP_FREQ(_si, _channel, _address, _bits, \
- _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_VOLTAGE, 0)
-
-#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
- __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \
- _storagebits, _shift, NULL, IIO_TEMP, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
-#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
- _shift) \
- __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
- _storagebits, _shift, "supply", IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SAMP_FREQ))
-
#endif
diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h
new file mode 100644
index 000000000000..c5d48e1c2d36
--- /dev/null
+++ b/include/linux/iio/adc/adi-axi-adc.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices Generic AXI ADC IP core driver/library
+ * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
+ *
+ * Copyright 2012-2020 Analog Devices Inc.
+ */
+#ifndef __ADI_AXI_ADC_H__
+#define __ADI_AXI_ADC_H__
+
+struct device;
+struct iio_chan_spec;
+
+/**
+ * struct adi_axi_adc_chip_info - Chip specific information
+ * @name Chip name
+ * @id Chip ID (usually product ID)
+ * @channels Channel specifications of type @struct axi_adc_chan_spec
+ * @num_channels Number of @channels
+ * @scale_table Supported scales by the chip; tuples of 2 ints
+ * @num_scales Number of scales in the table
+ * @max_rate Maximum sampling rate supported by the device
+ */
+struct adi_axi_adc_chip_info {
+ const char *name;
+ unsigned int id;
+
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+
+ const unsigned int (*scale_table)[2];
+ int num_scales;
+
+ unsigned long max_rate;
+};
+
+/**
+ * struct adi_axi_adc_conv - data of the ADC attached to the AXI ADC
+ * @chip_info chip info details for the client ADC
+ * @preenable_setup op to run in the client before enabling the AXI ADC
+ * @reg_access IIO debugfs_reg_access hook for the client ADC
+ * @read_raw IIO read_raw hook for the client ADC
+ * @write_raw IIO write_raw hook for the client ADC
+ */
+struct adi_axi_adc_conv {
+ const struct adi_axi_adc_chip_info *chip_info;
+
+ int (*preenable_setup)(struct adi_axi_adc_conv *conv);
+ int (*reg_access)(struct adi_axi_adc_conv *conv, unsigned int reg,
+ unsigned int writeval, unsigned int *readval);
+ int (*read_raw)(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask);
+ int (*write_raw)(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask);
+};
+
+struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
+ size_t sizeof_priv);
+
+void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv);
+
+#endif
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 016d8a068353..ff15c61bf319 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -11,7 +11,7 @@
#include <linux/kref.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index b3a57444a886..0e503db71289 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -14,4 +14,7 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
const char *channel);
void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
+struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
+ const char *channel);
+
#endif
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index a4d2d8061ef6..a63dc07b7350 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -94,12 +94,6 @@ struct iio_buffer {
unsigned int watermark;
/* private: */
- /*
- * @scan_el_attrs: Control of scan elements if that scan mode
- * control method is used.
- */
- struct attribute_group *scan_el_attrs;
-
/* @scan_timestamp: Does the scan mode include a timestamp. */
bool scan_timestamp;
@@ -115,9 +109,6 @@ struct iio_buffer {
*/
struct attribute_group scan_el_group;
- /* @stufftoread: Flag to indicate new data. */
- bool stufftoread;
-
/* @attrs: Standard attributes of the buffer. */
const struct attribute **attrs;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 2bde8c912d4d..c4118dcb8e05 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -64,15 +64,6 @@ void iio_channel_release(struct iio_channel *chan);
struct iio_channel *devm_iio_channel_get(struct device *dev,
const char *consumer_channel);
/**
- * devm_iio_channel_release() - Resource managed version of
- * iio_channel_release().
- * @dev: Pointer to consumer device for which resource
- * is allocared.
- * @chan: The channel to be released.
- */
-void devm_iio_channel_release(struct device *dev, struct iio_channel *chan);
-
-/**
* iio_channel_get_all() - get all channels associated with a client
* @dev: Pointer to consumer device.
*
@@ -106,15 +97,6 @@ void iio_channel_release_all(struct iio_channel *chan);
*/
struct iio_channel *devm_iio_channel_get_all(struct device *dev);
-/**
- * devm_iio_channel_release_all() - Resource managed version of
- * iio_channel_release_all().
- * @dev: Pointer to consumer device for which resource
- * is allocared.
- * @chan: Array channel to be released.
- */
-void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan);
-
struct iio_cb_buffer;
/**
* iio_channel_get_all_cb() - register callback for triggered capture
diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h
index 44d48bb1d39f..e8255c2e33bc 100644
--- a/include/linux/iio/hw-consumer.h
+++ b/include/linux/iio/hw-consumer.h
@@ -14,7 +14,6 @@ struct iio_hw_consumer;
struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev);
void iio_hw_consumer_free(struct iio_hw_consumer *hwc);
struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev);
-void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc);
int iio_hw_consumer_enable(struct iio_hw_consumer *hwc);
void iio_hw_consumer_disable(struct iio_hw_consumer *hwc);
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 25c87507a1fa..a1be82e74c93 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -492,7 +492,7 @@ struct iio_buffer_setup_ops {
* @buffer: [DRIVER] any buffer present
* @buffer_list: [INTERN] list of all buffers currently attached
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
- * @mlock: [DRIVER] lock used to prevent simultaneous device state
+ * @mlock: [INTERN] lock used to prevent simultaneous device state
* changes
* @available_scan_masks: [DRIVER] optional array of allowed bitmasks
* @masklength: [INTERN] the length of the mask established from
@@ -593,9 +593,6 @@ void iio_device_unregister(struct iio_dev *indio_dev);
* calls iio_device_register() internally. Refer to that function for more
* information.
*
- * If an iio_dev registered with this function needs to be unregistered
- * separately, devm_iio_device_unregister() must be used.
- *
* RETURNS:
* 0 on success, negative error number on failure.
*/
@@ -603,7 +600,6 @@ void iio_device_unregister(struct iio_dev *indio_dev);
__devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
struct module *this_mod);
-void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
void iio_device_release_direct_mode(struct iio_dev *indio_dev);
@@ -694,13 +690,9 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv)
}
void iio_device_free(struct iio_dev *indio_dev);
-int devm_iio_device_match(struct device *dev, void *res, void *data);
struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
-void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
const char *fmt, ...);
-void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
-
/**
* iio_buffer_enabled() - helper function to test if the buffer is enabled
* @indio_dev: IIO device structure for device
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index dd8219138c2e..2df67448f0d1 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -83,10 +83,13 @@ struct adis_data {
* @trig: IIO trigger object data
* @data: ADIS chip variant specific data
* @burst: ADIS burst transfer information
+ * @burst_extra_len: Burst extra length. Should only be used by devices that can
+ * dynamically change their burst mode length.
* @state_lock: Lock used by the device to protect state
* @msg: SPI message object
* @xfer: SPI transfer objects to be used for a @msg
* @current_page: Some ADIS devices have registers, this selects current page
+ * @irq_flag: IRQ handling flags as passed to request_irq()
* @buffer: Data buffer for information read from the device
* @tx: DMA safe TX buffer for SPI transfers
* @rx: DMA safe RX buffer for SPI transfers
@@ -97,7 +100,7 @@ struct adis {
const struct adis_data *data;
struct adis_burst *burst;
-
+ unsigned int burst_extra_len;
/**
* The state_lock is meant to be used during operations that require
* a sequence of SPI R/W in order to protect the SPI transfer
@@ -113,6 +116,7 @@ struct adis {
struct spi_message msg;
struct spi_transfer *xfer;
unsigned int current_page;
+ unsigned long irq_flag;
void *buffer;
uint8_t tx[10] ____cacheline_aligned;
@@ -331,6 +335,65 @@ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
return ret;
}
+int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
+ const u32 val, u8 size);
+/**
+ * adis_update_bits_base() - ADIS Update bits function - Locked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @size: Size of the register to update
+ *
+ * Updates the desired bits of @reg in accordance with @mask and @val.
+ */
+static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
+ const u32 mask, const u32 val, u8 size)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_update_bits_base(adis, reg, mask, val, size);
+ mutex_unlock(&adis->state_lock);
+ return ret;
+}
+
+/**
+ * adis_update_bits() - Wrapper macro for adis_update_bits_base - Locked version
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * This macro evaluates the sizeof of @val at compile time and calls
+ * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for
+ * @val can lead to undesired behavior if the register to update is 16bit.
+ */
+#define adis_update_bits(adis, reg, mask, val) ({ \
+ BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \
+ __builtin_choose_expr(sizeof(val) == 4, \
+ adis_update_bits_base(adis, reg, mask, val, 4), \
+ adis_update_bits_base(adis, reg, mask, val, 2)); \
+})
+
+/**
+ * adis_update_bits() - Wrapper macro for adis_update_bits_base
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * This macro evaluates the sizeof of @val at compile time and calls
+ * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for
+ * @val can lead to undesired behavior if the register to update is 16bit.
+ */
+#define __adis_update_bits(adis, reg, mask, val) ({ \
+ BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \
+ __builtin_choose_expr(sizeof(val) == 4, \
+ __adis_update_bits_base(adis, reg, mask, val, 4), \
+ __adis_update_bits_base(adis, reg, mask, val, 2)); \
+})
+
int adis_enable_irq(struct adis *adis, bool enable);
int __adis_check_status(struct adis *adis);
int __adis_initial_startup(struct adis *adis);
@@ -441,18 +504,25 @@ int adis_single_conversion(struct iio_dev *indio_dev,
* @en burst mode enabled
* @reg_cmd register command that triggers burst
* @extra_len extra length to account in the SPI RX buffer
+ * @burst_max_len holds the maximum burst size when the device supports
+ * more than one burst mode with different sizes
*/
struct adis_burst {
bool en;
unsigned int reg_cmd;
- unsigned int extra_len;
+ const u32 extra_len;
+ const u32 burst_max_len;
};
+int
+devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler);
int adis_setup_buffer_and_trigger(struct adis *adis,
struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *));
void adis_cleanup_buffer_and_trigger(struct adis *adis,
struct iio_dev *indio_dev);
+int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
void adis_remove_trigger(struct adis *adis);
@@ -461,6 +531,13 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
#else /* CONFIG_IIO_BUFFER */
+static inline int
+devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler)
+{
+ return 0;
+}
+
static inline int adis_setup_buffer_and_trigger(struct adis *adis,
struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *))
{
@@ -472,6 +549,12 @@ static inline void adis_cleanup_buffer_and_trigger(struct adis *adis,
{
}
+static inline int devm_adis_probe_trigger(struct adis *adis,
+ struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
static inline int adis_probe_trigger(struct adis *adis,
struct iio_dev *indio_dev)
{
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index 764659e01b68..1fc1efa7799d 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -9,6 +9,5 @@ struct iio_buffer *iio_kfifo_allocate(void);
void iio_kfifo_free(struct iio_buffer *r);
struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev);
-void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r);
#endif
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 84995e2967ac..cad8325903f9 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -141,9 +141,6 @@ int __devm_iio_trigger_register(struct device *dev,
**/
void iio_trigger_unregister(struct iio_trigger *trig_info);
-void devm_iio_trigger_unregister(struct device *dev,
- struct iio_trigger *trig_info);
-
/**
* iio_trigger_set_immutable() - set an immutable trigger on destination
*
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index 238ad30ce166..e99c91799359 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -18,7 +18,5 @@ int devm_iio_triggered_buffer_setup(struct device *dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
const struct iio_buffer_setup_ops *ops);
-void devm_iio_triggered_buffer_cleanup(struct device *dev,
- struct iio_dev *indio_dev);
#endif
diff --git a/include/linux/ima.h b/include/linux/ima.h
index aefe758f4466..9164e1534ec9 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -18,6 +18,7 @@ extern int ima_file_check(struct file *file, int mask);
extern void ima_post_create_tmpfile(struct inode *inode);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
+extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot);
extern int ima_load_data(enum kernel_load_data_id id);
extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
@@ -70,6 +71,12 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
return 0;
}
+static inline int ima_file_mprotect(struct vm_area_struct *vma,
+ unsigned long prot)
+{
+ return 0;
+}
+
static inline int ima_load_data(enum kernel_load_data_id id)
{
return 0;
diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h
deleted file mode 100644
index 3614a13a8297..000000000000
--- a/include/linux/input/gp2ap002a00f.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GP2AP002A00F_H_
-#define _GP2AP002A00F_H_
-
-#include <linux/i2c.h>
-
-#define GP2A_I2C_NAME "gp2ap002a00f"
-
-/**
- * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data
- * @vout_gpio: The gpio connected to the object detected pin (VOUT)
- * @wakeup: Set to true if the proximity can wake the device from suspend
- * @hw_setup: Callback for setting up hardware such as gpios and vregs
- * @hw_shutdown: Callback for properly shutting down hardware
- */
-struct gp2a_platform_data {
- int vout_gpio;
- bool wakeup;
- int (*hw_setup)(struct i2c_client *client);
- int (*hw_shutdown)(struct i2c_client *client);
-};
-
-#endif
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index 9e409bb13642..3b8580bd33c1 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -100,6 +100,11 @@ static inline bool input_is_mt_axis(int axis)
bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active);
+static inline void input_mt_report_slot_inactive(struct input_dev *dev)
+{
+ input_mt_report_slot_state(dev, 0, false);
+}
+
void input_mt_report_finger_count(struct input_dev *dev, int count);
void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count);
void input_mt_drop_unused(struct input_dev *dev);
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
new file mode 100644
index 000000000000..43e6ea591975
--- /dev/null
+++ b/include/linux/instrumented.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This header provides generic wrappers for memory access instrumentation that
+ * the compiler cannot emit for: KASAN, KCSAN.
+ */
+#ifndef _LINUX_INSTRUMENTED_H
+#define _LINUX_INSTRUMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+/**
+ * instrument_read - instrument regular read access
+ *
+ * Instrument a regular read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_read(v, size);
+}
+
+/**
+ * instrument_write - instrument regular write access
+ *
+ * Instrument a regular write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_write(v, size);
+}
+
+/**
+ * instrument_atomic_read - instrument atomic read access
+ *
+ * Instrument an atomic read access. The instrumentation should be inserted
+ * before the actual read happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
+{
+ kasan_check_read(v, size);
+ kcsan_check_atomic_read(v, size);
+}
+
+/**
+ * instrument_atomic_write - instrument atomic write access
+ *
+ * Instrument an atomic write access. The instrumentation should be inserted
+ * before the actual write happens.
+ *
+ * @ptr address of access
+ * @size size of access
+ */
+static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
+{
+ kasan_check_write(v, size);
+ kcsan_check_atomic_write(v, size);
+}
+
+/**
+ * instrument_copy_to_user - instrument reads of copy_to_user
+ *
+ * Instrument reads from kernel memory, that are due to copy_to_user (and
+ * variants). The instrumentation must be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ kcsan_check_read(from, n);
+}
+
+/**
+ * instrument_copy_from_user - instrument writes of copy_from_user
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted before the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ */
+static __always_inline void
+instrument_copy_from_user(const void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ kcsan_check_write(to, n);
+}
+
+#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 980234ae0312..4100bd224f5c 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -19,6 +19,7 @@
#include <linux/iommu.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dmar.h>
+#include <linux/ioasid.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -42,6 +43,9 @@
#define DMA_FL_PTE_PRESENT BIT_ULL(0)
#define DMA_FL_PTE_XD BIT_ULL(63)
+#define ADDR_WIDTH_5LEVEL (57)
+#define ADDR_WIDTH_4LEVEL (48)
+
#define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_DEV_IOTLB 1
#define CONTEXT_TT_PASS_THROUGH 2
@@ -166,6 +170,7 @@
#define ecap_smpwc(e) (((e) >> 48) & 0x1)
#define ecap_flts(e) (((e) >> 47) & 0x1)
#define ecap_slts(e) (((e) >> 46) & 0x1)
+#define ecap_vcs(e) (((e) >> 44) & 0x1)
#define ecap_smts(e) (((e) >> 43) & 0x1)
#define ecap_dit(e) ((e >> 41) & 0x1)
#define ecap_pasid(e) ((e >> 40) & 0x1)
@@ -191,6 +196,9 @@
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
+/* Virtual command interface capability */
+#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
+
/* IOTLB_REG */
#define DMA_TLB_FLUSH_GRANU_OFFSET 60
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
@@ -284,6 +292,9 @@
/* PRS_REG */
#define DMA_PRS_PPR ((u32)1)
+#define DMA_PRS_PRO ((u32)2)
+
+#define DMA_VCS_PAS ((u64)1)
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
do { \
@@ -324,6 +335,8 @@ enum {
#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
+#define QI_IWD_FENCE (((u64)1) << 6)
+#define QI_IWD_PRQ_DRAIN (((u64)1) << 7)
#define QI_IOTLB_DID(did) (((u64)did) << 16)
#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
@@ -331,7 +344,7 @@ enum {
#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_IOTLB_AM(am) (((u8)am))
+#define QI_IOTLB_AM(am) (((u8)am) & 0x3f)
#define QI_CC_FM(fm) (((u64)fm) << 48)
#define QI_CC_SID(sid) (((u64)sid) << 32)
@@ -350,16 +363,21 @@ enum {
#define QI_PC_DID(did) (((u64)did) << 16)
#define QI_PC_GRAN(gran) (((u64)gran) << 4)
-#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0))
-#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
+/* PASID cache invalidation granu */
+#define QI_PC_ALL_PASIDS 0
+#define QI_PC_PASID_SEL 1
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_EIOTLB_AM(am) (((u64)am))
+#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f)
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
#define QI_EIOTLB_DID(did) (((u64)did) << 16)
#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
+/* QI Dev-IOTLB inv granu */
+#define QI_DEV_IOTLB_GRAN_ALL 1
+#define QI_DEV_IOTLB_GRAN_PASID_SEL 0
+
#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
@@ -480,6 +498,23 @@ struct context_entry {
u64 hi;
};
+/* si_domain contains mulitple devices */
+#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
+
+/*
+ * When VT-d works in the scalable mode, it allows DMA translation to
+ * happen through either first level or second level page table. This
+ * bit marks that the DMA translation for the domain goes through the
+ * first level page table, otherwise, it goes through the second level.
+ */
+#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
+
+/*
+ * Domain represents a virtual machine which demands iommu nested
+ * translation mode support.
+ */
+#define DOMAIN_FLAG_NESTING_MODE BIT(2)
+
struct dmar_domain {
int nid; /* node id */
@@ -529,6 +564,7 @@ struct intel_iommu {
u64 reg_size; /* size of hw register set */
u64 cap;
u64 ecap;
+ u64 vccap;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
raw_spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
@@ -549,6 +585,8 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU_SVM
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
+ struct completion prq_complete;
+ struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
@@ -571,6 +609,7 @@ struct device_domain_info {
struct list_head auxiliary_domains; /* auxiliary domains
* attached to this device
*/
+ u32 segment; /* PCI segment number */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
u16 pfsid; /* SRIOV physical function source ID */
@@ -595,6 +634,12 @@ static inline void __iommu_flush_cache(
clflush_cache_range(addr, size);
}
+/* Convert generic struct iommu_domain to private struct dmar_domain */
+static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct dmar_domain, domain);
+}
+
/*
* 0: readable
* 1: writable
@@ -653,9 +698,23 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u16 qdep, u64 addr, unsigned mask);
+
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
unsigned long npages, bool ih);
-extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
+
+void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u32 pasid, u16 qdep, u64 addr,
+ unsigned int size_order, u64 granu);
+void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
+ int pasid);
+
+int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+ unsigned int count, unsigned long options);
+/*
+ * Options used in qi_submit_sync:
+ * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
+ */
+#define QI_OPT_WAIT_DRAIN BIT(0)
extern int dmar_ir_support(void);
@@ -667,12 +726,19 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info,
void iommu_flush_write_buffer(struct intel_iommu *iommu);
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct dmar_domain *find_domain(struct device *dev);
+struct device_domain_info *get_domain_info(struct device *dev);
#ifdef CONFIG_INTEL_IOMMU_SVM
extern void intel_svm_check(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
-
+int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
+ struct iommu_gpasid_bind_data *data);
+int intel_svm_unbind_gpasid(struct device *dev, int pasid);
+struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
+ void *drvdata);
+void intel_svm_unbind(struct iommu_sva *handle);
+int intel_svm_get_pasid(struct iommu_sva *handle);
struct svm_dev_ops;
struct intel_svm_dev {
@@ -680,6 +746,8 @@ struct intel_svm_dev {
struct rcu_head rcu;
struct device *dev;
struct svm_dev_ops *ops;
+ struct iommu_sva sva;
+ int pasid;
int users;
u16 did;
u16 dev_iotlb:1;
@@ -689,9 +757,11 @@ struct intel_svm_dev {
struct intel_svm {
struct mmu_notifier notifier;
struct mm_struct *mm;
+
struct intel_iommu *iommu;
int flags;
int pasid;
+ int gpasid; /* In case that guest PASID is different from host PASID */
struct list_head devs;
struct list_head list;
};
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index d7c403d0dd27..c9e7e601950d 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -21,7 +21,6 @@ struct svm_dev_ops {
#define SVM_REQ_EXEC (1<<1)
#define SVM_REQ_PRIV (1<<0)
-
/*
* The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main"
* PASID for the current process. Even if a PASID already exists, a new one
@@ -44,90 +43,17 @@ struct svm_dev_ops {
* do such IOTLB flushes automatically.
*/
#define SVM_FLAG_SUPERVISOR_MODE (1<<1)
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-
-/**
- * intel_svm_bind_mm() - Bind the current process to a PASID
- * @dev: Device to be granted access
- * @pasid: Address for allocated PASID
- * @flags: Flags. Later for requesting supervisor mode, etc.
- * @ops: Callbacks to device driver
- *
- * This function attempts to enable PASID support for the given device.
- * If the @pasid argument is non-%NULL, a PASID is allocated for access
- * to the MM of the current process.
- *
- * By using a %NULL value for the @pasid argument, this function can
- * be used to simply validate that PASID support is available for the
- * given device — i.e. that it is behind an IOMMU which has the
- * requisite support, and is enabled.
- *
- * Page faults are handled transparently by the IOMMU code, and there
- * should be no need for the device driver to be involved. If a page
- * fault cannot be handled (i.e. is an invalid address rather than
- * just needs paging in), then the page request will be completed by
- * the core IOMMU code with appropriate status, and the device itself
- * can then report the resulting fault to its driver via whatever
- * mechanism is appropriate.
- *
- * Multiple calls from the same process may result in the same PASID
- * being re-used. A reference count is kept.
- */
-extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
- struct svm_dev_ops *ops);
-
-/**
- * intel_svm_unbind_mm() - Unbind a specified PASID
- * @dev: Device for which PASID was allocated
- * @pasid: PASID value to be unbound
- *
- * This function allows a PASID to be retired when the device no
- * longer requires access to the address space of a given process.
- *
- * If the use count for the PASID in question reaches zero, the
- * PASID is revoked and may no longer be used by hardware.
- *
- * Device drivers are required to ensure that no access (including
- * page requests) is currently outstanding for the PASID in question,
- * before calling this function.
+/*
+ * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest
+ * processes. Compared to the host bind, the primary differences are:
+ * 1. mm life cycle management
+ * 2. fault reporting
*/
-extern int intel_svm_unbind_mm(struct device *dev, int pasid);
-
-/**
- * intel_svm_is_pasid_valid() - check if pasid is valid
- * @dev: Device for which PASID was allocated
- * @pasid: PASID value to be checked
- *
- * This function checks if the specified pasid is still valid. A
- * valid pasid means the backing mm is still having a valid user.
- * For kernel callers init_mm is always valid. for other mm, if mm->mm_users
- * is non-zero, it is valid.
- *
- * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid
- * 1 if pasid is valid.
+#define SVM_FLAG_GUEST_MODE (1<<2)
+/*
+ * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space,
+ * which requires guest and host PASID translation at both directions.
*/
-extern int intel_svm_is_pasid_valid(struct device *dev, int pasid);
-
-#else /* CONFIG_INTEL_IOMMU_SVM */
-
-static inline int intel_svm_bind_mm(struct device *dev, int *pasid,
- int flags, struct svm_dev_ops *ops)
-{
- return -ENOSYS;
-}
-
-static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
-{
- BUG();
-}
-
-static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_INTEL_IOMMU_SVM */
-
-#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL))
+#define SVM_FLAG_GUEST_PASID (1<<3)
#endif /* __INTEL_SVM_H__ */
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
index d70a914cba11..3a63d98613fc 100644
--- a/include/linux/interconnect.h
+++ b/include/linux/interconnect.h
@@ -28,9 +28,14 @@ struct device;
struct icc_path *icc_get(struct device *dev, const int src_id,
const int dst_id);
struct icc_path *of_icc_get(struct device *dev, const char *name);
+struct icc_path *devm_of_icc_get(struct device *dev, const char *name);
+struct icc_path *of_icc_get_by_index(struct device *dev, int idx);
void icc_put(struct icc_path *path);
+int icc_enable(struct icc_path *path);
+int icc_disable(struct icc_path *path);
int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
void icc_set_tag(struct icc_path *path, u32 tag);
+const char *icc_get_name(struct icc_path *path);
#else
@@ -46,10 +51,31 @@ static inline struct icc_path *of_icc_get(struct device *dev,
return NULL;
}
+static inline struct icc_path *devm_of_icc_get(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
+{
+ return NULL;
+}
+
static inline void icc_put(struct icc_path *path)
{
}
+static inline int icc_enable(struct icc_path *path)
+{
+ return 0;
+}
+
+static inline int icc_disable(struct icc_path *path)
+{
+ return 0;
+}
+
static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
{
return 0;
@@ -59,6 +85,11 @@ static inline void icc_set_tag(struct icc_path *path, u32 tag)
{
}
+static inline const char *icc_get_name(struct icc_path *path)
+{
+ return NULL;
+}
+
#endif /* CONFIG_INTERCONNECT */
#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index b336622612f3..0beaa3eba155 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/io.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
/*
@@ -99,7 +100,6 @@ io_mapping_unmap(void __iomem *vaddr)
#else
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
/* Create the io_mapping object*/
static inline struct io_mapping *
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index a5c219c29b10..4d1d3c3469e9 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -177,7 +177,7 @@ int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
const struct iomap_ops *ops);
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- loff_t start, loff_t len, const struct iomap_ops *ops);
+ u64 start, u64 len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
const struct iomap_ops *ops);
loff_t iomap_seek_data(struct inode *inode, loff_t offset,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7ef8b0bda695..5f0b7859d2eb 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -53,8 +53,6 @@ struct iommu_fault_event;
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
-typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
- void *);
typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
struct iommu_domain_geometry {
@@ -171,25 +169,6 @@ enum iommu_dev_features {
#define IOMMU_PASID_INVALID (-1U)
-/**
- * struct iommu_sva_ops - device driver callbacks for an SVA context
- *
- * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
- * @mm_exit returns, the device must not issue any more transaction
- * with the PASID given as argument.
- *
- * The @mm_exit handler is allowed to sleep. Be careful about the
- * locks taken in @mm_exit, because they might lead to deadlocks if
- * they are also held when dropping references to the mm. Consider the
- * following call chain:
- * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
- * Using mmput_async() prevents this scenario.
- *
- */
-struct iommu_sva_ops {
- iommu_mm_exit_handler_t mm_exit;
-};
-
#ifdef CONFIG_IOMMU_API
/**
@@ -223,8 +202,10 @@ struct iommu_iotlb_gather {
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
* @iova_to_phys: translate iova to physical address
- * @add_device: add device to iommu grouping
- * @remove_device: remove device from iommu grouping
+ * @probe_device: Add device to iommu driver handling
+ * @release_device: Remove device from iommu driver handling
+ * @probe_finalize: Do final setup work after the device is added to an IOMMU
+ * group and attached to the groups domain
* @device_group: find iommu group for a particular device
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
@@ -248,6 +229,10 @@ struct iommu_iotlb_gather {
* @cache_invalidate: invalidate translation caches
* @sva_bind_gpasid: bind guest pasid and mm
* @sva_unbind_gpasid: unbind guest pasid and mm
+ * @def_domain_type: device default domain type, return value:
+ * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
+ * - IOMMU_DOMAIN_DMA: must use a dma domain
+ * - 0: use the default setting
* @pgsize_bitmap: bitmap of all possible supported page sizes
* @owner: Driver module providing these ops
*/
@@ -269,8 +254,9 @@ struct iommu_ops {
void (*iotlb_sync)(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
- int (*add_device)(struct device *dev);
- void (*remove_device)(struct device *dev);
+ struct iommu_device *(*probe_device)(struct device *dev);
+ void (*release_device)(struct device *dev);
+ void (*probe_finalize)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
int (*domain_get_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
@@ -318,6 +304,8 @@ struct iommu_ops {
int (*sva_unbind_gpasid)(struct device *dev, int pasid);
+ int (*def_domain_type)(struct device *dev);
+
unsigned long pgsize_bitmap;
struct module *owner;
};
@@ -369,6 +357,7 @@ struct iommu_fault_param {
*
* @fault_param: IOMMU detected device fault reporting data
* @fwspec: IOMMU fwspec data
+ * @iommu_dev: IOMMU device this device is linked to
* @priv: IOMMU Driver private data
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
@@ -378,6 +367,7 @@ struct dev_iommu {
struct mutex lock;
struct iommu_fault_param *fault_param;
struct iommu_fwspec *fwspec;
+ struct iommu_device *iommu_dev;
void *priv;
};
@@ -430,6 +420,7 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
+extern int bus_iommu_probe(struct bus_type *bus);
extern bool iommu_present(struct bus_type *bus);
extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
@@ -466,12 +457,26 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
+/**
+ * iommu_map_sgtable - Map the given buffer to the IOMMU domain
+ * @domain: The IOMMU domain to perform the mapping
+ * @iova: The start address to map the buffer
+ * @sgt: The sg_table object describing the buffer
+ * @prot: IOMMU protection bits
+ *
+ * Creates a mapping at @iova for the buffer described by a scatterlist
+ * stored in the given sg_table object in the provided IOMMU domain.
+ */
+static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+ unsigned long iova, struct sg_table *sgt, int prot)
+{
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
+}
+
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern void generic_iommu_put_resv_regions(struct device *dev,
struct list_head *list);
-extern int iommu_request_dm_for_dev(struct device *dev);
-extern int iommu_request_dma_domain_for_dev(struct device *dev);
extern void iommu_set_default_passthrough(bool cmd_line);
extern void iommu_set_default_translated(bool cmd_line);
extern bool iommu_default_passthrough(void);
@@ -515,7 +520,6 @@ extern int iommu_page_response(struct device *dev,
struct iommu_page_response *msg);
extern int iommu_group_id(struct iommu_group *group);
-extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
@@ -605,7 +609,6 @@ struct iommu_fwspec {
*/
struct iommu_sva {
struct device *dev;
- const struct iommu_sva_ops *ops;
};
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
@@ -653,8 +656,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev,
struct mm_struct *mm,
void *drvdata);
void iommu_sva_unbind_device(struct iommu_sva *handle);
-int iommu_sva_set_ops(struct iommu_sva *handle,
- const struct iommu_sva_ops *ops);
int iommu_sva_get_pasid(struct iommu_sva *handle);
#else /* CONFIG_IOMMU_API */
@@ -793,16 +794,6 @@ static inline int iommu_get_group_resv_regions(struct iommu_group *group,
return -ENODEV;
}
-static inline int iommu_request_dm_for_dev(struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int iommu_request_dma_domain_for_dev(struct device *dev)
-{
- return -ENODEV;
-}
-
static inline void iommu_set_default_passthrough(bool cmd_line)
{
}
@@ -1058,12 +1049,6 @@ static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
{
}
-static inline int iommu_sva_set_ops(struct iommu_sva *handle,
- const struct iommu_sva_ops *ops)
-{
- return -EINVAL;
-}
-
static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
{
return IOMMU_PASID_INVALID;
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index a9b9170b5dd2..6c2b06fe8beb 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -103,6 +103,7 @@ struct resource {
#define IORESOURCE_MEM_32BIT (3<<3)
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
+#define IORESOURCE_MEM_DRIVER_MANAGED (1<<7)
/* PnP I/O specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
@@ -301,5 +302,11 @@ struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *request_free_mem_region(struct resource *base,
unsigned long size, const char *name);
+#ifdef CONFIG_IO_STRICT_DEVMEM
+void revoke_devmem(struct resource *res);
+#else
+static inline void revoke_devmem(struct resource *res) { };
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index c309f43bde45..a06a78c67f19 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -68,6 +68,8 @@ struct ipc_namespace {
struct user_namespace *user_ns;
struct ucounts *ucounts;
+ struct llist_node mnt_llist;
+
struct ns_common ns;
} __randomize_layout;
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 657a83b943f0..98338dc6b5d2 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -165,9 +165,9 @@ static inline int kallsyms_show_value(void)
#endif /*CONFIG_KALLSYMS*/
-static inline void print_ip_sym(unsigned long ip)
+static inline void print_ip_sym(const char *loglvl, unsigned long ip)
{
- printk("[<%px>] %pS\n", (void *) ip, (void *) ip);
+ printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip);
}
#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 31314ca7c635..82522e996c76 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -11,8 +11,8 @@ struct task_struct;
#ifdef CONFIG_KASAN
+#include <linux/pgtable.h>
#include <asm/kasan.h>
-#include <asm/pgtable.h>
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index cc8fa109cfa3..9d12c970f18f 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -2,6 +2,8 @@
#ifndef __LINUX_KCONFIG_H
#define __LINUX_KCONFIG_H
+/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */
+
#include <generated/autoconf.h>
#ifdef CONFIG_CPU_BIG_ENDIAN
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
new file mode 100644
index 000000000000..7b0b9c44f5f3
--- /dev/null
+++ b/include/linux/kcsan-checks.h
@@ -0,0 +1,430 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KCSAN_CHECKS_H
+#define _LINUX_KCSAN_CHECKS_H
+
+/* Note: Only include what is already included by compiler.h. */
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+/*
+ * ACCESS TYPE MODIFIERS
+ *
+ * <none>: normal read access;
+ * WRITE : write access;
+ * ATOMIC: access is atomic;
+ * ASSERT: access is not a regular access, but an assertion;
+ * SCOPED: access is a scoped access;
+ */
+#define KCSAN_ACCESS_WRITE 0x1
+#define KCSAN_ACCESS_ATOMIC 0x2
+#define KCSAN_ACCESS_ASSERT 0x4
+#define KCSAN_ACCESS_SCOPED 0x8
+
+/*
+ * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
+ * even in compilation units that selectively disable KCSAN, but must use KCSAN
+ * to validate access to an address. Never use these in header files!
+ */
+#ifdef CONFIG_KCSAN
+/**
+ * __kcsan_check_access - check generic access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ */
+void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
+
+/**
+ * kcsan_disable_current - disable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_disable_current(void);
+
+/**
+ * kcsan_enable_current - re-enable KCSAN for the current context
+ *
+ * Supports nesting.
+ */
+void kcsan_enable_current(void);
+void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */
+
+/**
+ * kcsan_nestable_atomic_begin - begin nestable atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_nestable_atomic_begin(void);
+
+/**
+ * kcsan_nestable_atomic_end - end nestable atomic region
+ */
+void kcsan_nestable_atomic_end(void);
+
+/**
+ * kcsan_flat_atomic_begin - begin flat atomic region
+ *
+ * Accesses within the atomic region may appear to race with other accesses but
+ * should be considered atomic.
+ */
+void kcsan_flat_atomic_begin(void);
+
+/**
+ * kcsan_flat_atomic_end - end flat atomic region
+ */
+void kcsan_flat_atomic_end(void);
+
+/**
+ * kcsan_atomic_next - consider following accesses as atomic
+ *
+ * Force treating the next n memory accesses for the current context as atomic
+ * operations.
+ *
+ * @n: number of following memory accesses to treat as atomic.
+ */
+void kcsan_atomic_next(int n);
+
+/**
+ * kcsan_set_access_mask - set access mask
+ *
+ * Set the access mask for all accesses for the current context if non-zero.
+ * Only value changes to bits set in the mask will be reported.
+ *
+ * @mask: bitmask
+ */
+void kcsan_set_access_mask(unsigned long mask);
+
+/* Scoped access information. */
+struct kcsan_scoped_access {
+ struct list_head list;
+ const volatile void *ptr;
+ size_t size;
+ int type;
+};
+/*
+ * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
+ * out of scope; relies on attribute "cleanup", which is supported by all
+ * compilers that support KCSAN.
+ */
+#define __kcsan_cleanup_scoped \
+ __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access)))
+
+/**
+ * kcsan_begin_scoped_access - begin scoped access
+ *
+ * Begin scoped access and initialize @sa, which will cause KCSAN to
+ * continuously check the memory range in the current thread until
+ * kcsan_end_scoped_access() is called for @sa.
+ *
+ * Scoped accesses are implemented by appending @sa to an internal list for the
+ * current execution context, and then checked on every call into the KCSAN
+ * runtime.
+ *
+ * @ptr: address of access
+ * @size: size of access
+ * @type: access type modifier
+ * @sa: struct kcsan_scoped_access to use for the scope of the access
+ */
+struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa);
+
+/**
+ * kcsan_end_scoped_access - end scoped access
+ *
+ * End a scoped access, which will stop KCSAN checking the memory range.
+ * Requires that kcsan_begin_scoped_access() was previously called once for @sa.
+ *
+ * @sa: a previously initialized struct kcsan_scoped_access
+ */
+void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
+
+
+#else /* CONFIG_KCSAN */
+
+static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+
+static inline void kcsan_disable_current(void) { }
+static inline void kcsan_enable_current(void) { }
+static inline void kcsan_enable_current_nowarn(void) { }
+static inline void kcsan_nestable_atomic_begin(void) { }
+static inline void kcsan_nestable_atomic_end(void) { }
+static inline void kcsan_flat_atomic_begin(void) { }
+static inline void kcsan_flat_atomic_end(void) { }
+static inline void kcsan_atomic_next(int n) { }
+static inline void kcsan_set_access_mask(unsigned long mask) { }
+
+struct kcsan_scoped_access { };
+#define __kcsan_cleanup_scoped __maybe_unused
+static inline struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa) { return sa; }
+static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
+
+#endif /* CONFIG_KCSAN */
+
+#ifdef __SANITIZE_THREAD__
+/*
+ * Only calls into the runtime when the particular compilation unit has KCSAN
+ * instrumentation enabled. May be used in header files.
+ */
+#define kcsan_check_access __kcsan_check_access
+
+/*
+ * Only use these to disable KCSAN for accesses in the current compilation unit;
+ * calls into libraries may still perform KCSAN checks.
+ */
+#define __kcsan_disable_current kcsan_disable_current
+#define __kcsan_enable_current kcsan_enable_current_nowarn
+#else
+static inline void kcsan_check_access(const volatile void *ptr, size_t size,
+ int type) { }
+static inline void __kcsan_enable_current(void) { }
+static inline void __kcsan_disable_current(void) { }
+#endif
+
+/**
+ * __kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0)
+
+/**
+ * __kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define __kcsan_check_write(ptr, size) \
+ __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/**
+ * kcsan_check_read - check regular read access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0)
+
+/**
+ * kcsan_check_write - check regular write access for races
+ *
+ * @ptr: address of access
+ * @size: size of access
+ */
+#define kcsan_check_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
+
+/*
+ * Check for atomic accesses: if atomic accesses are not ignored, this simply
+ * aliases to kcsan_check_access(), otherwise becomes a no-op.
+ */
+#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
+#define kcsan_check_atomic_read(...) do { } while (0)
+#define kcsan_check_atomic_write(...) do { } while (0)
+#else
+#define kcsan_check_atomic_read(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
+#define kcsan_check_atomic_write(ptr, size) \
+ kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
+#endif
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var
+ *
+ * Assert that there are no concurrent writes to @var; other readers are
+ * allowed. This assertion can be used to specify properties of concurrent code,
+ * where violation cannot be detected as a normal data race.
+ *
+ * For example, if we only have a single writer, but multiple concurrent
+ * readers, to avoid data races, all these accesses must be marked; even
+ * concurrent marked writes racing with the single writer are bugs.
+ * Unfortunately, due to being marked, they are no longer data races. For cases
+ * like these, we can use the macro as follows:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * ASSERT_EXCLUSIVE_WRITER(shared_foo);
+ * WRITE_ONCE(shared_foo, ...);
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void reader(void) {
+ * // update_foo_lock does not need to be held!
+ * ... = READ_ONCE(shared_foo);
+ * }
+ *
+ * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent writes are expected exists.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT)
+
+/*
+ * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is
+ * expected to be unique for the scope in which instances of kcsan_scoped_access
+ * are declared.
+ */
+#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix
+#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \
+ struct kcsan_scoped_access __kcsan_scoped_name(id, _) \
+ __kcsan_cleanup_scoped; \
+ struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \
+ __maybe_unused = kcsan_begin_scoped_access( \
+ &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \
+ &__kcsan_scoped_name(id, _))
+
+/**
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to @var for the duration of the
+ * scope in which it is introduced. This provides a better way to fully cover
+ * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and
+ * increases the likelihood for KCSAN to detect racing accesses.
+ *
+ * For example, it allows finding race-condition bugs that only occur due to
+ * state changes within the scope itself:
+ *
+ * .. code-block:: c
+ *
+ * void writer(void) {
+ * spin_lock(&update_foo_lock);
+ * {
+ * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo);
+ * WRITE_ONCE(shared_foo, 42);
+ * ...
+ * // shared_foo should still be 42 here!
+ * }
+ * spin_unlock(&update_foo_lock);
+ * }
+ * void buggy(void) {
+ * if (READ_ONCE(shared_foo) == 42)
+ * WRITE_ONCE(shared_foo, 1); // bug!
+ * }
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor
+ * writers). This assertion can be used to specify properties of concurrent
+ * code, where violation cannot be detected as a normal data race.
+ *
+ * For example, where exclusive access is expected after determining no other
+ * users of an object are left, but the object is not actually freed. We can
+ * check that this property actually holds as follows:
+ *
+ * .. code-block:: c
+ *
+ * if (refcount_dec_and_test(&obj->refcnt)) {
+ * ASSERT_EXCLUSIVE_ACCESS(*obj);
+ * do_some_cleanup(obj);
+ * release_for_reuse(obj);
+ * }
+ *
+ * Note: ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough
+ * checking if a clear scope where no concurrent accesses are expected exists.
+ *
+ * Note: For cases where the object is freed, `KASAN <kasan.html>`_ is a better
+ * fit to detect use-after-free bugs.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS(var) \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT)
+
+/**
+ * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope
+ *
+ * Scoped variant of ASSERT_EXCLUSIVE_ACCESS().
+ *
+ * Assert that there are no concurrent accesses to @var (no readers nor writers)
+ * for the entire duration of the scope in which it is introduced. This provides
+ * a better way to fully cover the enclosing scope, compared to multiple
+ * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect
+ * racing accesses.
+ *
+ * @var: variable to assert on
+ */
+#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \
+ __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__)
+
+/**
+ * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var
+ *
+ * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER().
+ *
+ * Assert that there are no concurrent writes to a subset of bits in @var;
+ * concurrent readers are permitted. This assertion captures more detailed
+ * bit-level properties, compared to the other (word granularity) assertions.
+ * Only the bits set in @mask are checked for concurrent modifications, while
+ * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits
+ * are ignored.
+ *
+ * Use this for variables, where some bits must not be modified concurrently,
+ * yet other bits are expected to be modified concurrently.
+ *
+ * For example, variables where, after initialization, some bits are read-only,
+ * but other bits may still be modified concurrently. A reader may wish to
+ * assert that this is true as follows:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed
+ * to access the masked bits only, and KCSAN optimistically assumes it is
+ * therefore safe, even in the presence of data races, and marking it with
+ * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that
+ * it may still be advisable to do so, since we cannot reason about all compiler
+ * optimizations when it comes to bit manipulations (on the reader and writer
+ * side). If you are sure nothing can go wrong, we can write the above simply
+ * as:
+ *
+ * .. code-block:: c
+ *
+ * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK);
+ * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT;
+ *
+ * Another example, where this may be used, is when certain bits of @var may
+ * only be modified when holding the appropriate lock, but other bits may still
+ * be modified concurrently. Writers, where other bits may change concurrently,
+ * could use the assertion as follows:
+ *
+ * .. code-block:: c
+ *
+ * spin_lock(&foo_lock);
+ * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK);
+ * old_flags = flags;
+ * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT);
+ * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... }
+ * spin_unlock(&foo_lock);
+ *
+ * @var: variable to assert on
+ * @mask: only check for modifications to bits set in @mask
+ */
+#define ASSERT_EXCLUSIVE_BITS(var, mask) \
+ do { \
+ kcsan_set_access_mask(mask); \
+ __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\
+ kcsan_set_access_mask(0); \
+ kcsan_atomic_next(1); \
+ } while (0)
+
+#endif /* _LINUX_KCSAN_CHECKS_H */
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
new file mode 100644
index 000000000000..53340d8789f9
--- /dev/null
+++ b/include/linux/kcsan.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_KCSAN_H
+#define _LINUX_KCSAN_H
+
+#include <linux/kcsan-checks.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KCSAN
+
+/*
+ * Context for each thread of execution: for tasks, this is stored in
+ * task_struct, and interrupts access internal per-CPU storage.
+ */
+struct kcsan_ctx {
+ int disable_count; /* disable counter */
+ int atomic_next; /* number of following atomic ops */
+
+ /*
+ * We distinguish between: (a) nestable atomic regions that may contain
+ * other nestable regions; and (b) flat atomic regions that do not keep
+ * track of nesting. Both (a) and (b) are entirely independent of each
+ * other, and a flat region may be started in a nestable region or
+ * vice-versa.
+ *
+ * This is required because, for example, in the annotations for
+ * seqlocks, we declare seqlock writer critical sections as (a) nestable
+ * atomic regions, but reader critical sections as (b) flat atomic
+ * regions, but have encountered cases where seqlock reader critical
+ * sections are contained within writer critical sections (the opposite
+ * may be possible, too).
+ *
+ * To support these cases, we independently track the depth of nesting
+ * for (a), and whether the leaf level is flat for (b).
+ */
+ int atomic_nest_count;
+ bool in_flat_atomic;
+
+ /*
+ * Access mask for all accesses if non-zero.
+ */
+ unsigned long access_mask;
+
+ /* List of scoped accesses. */
+ struct list_head scoped_accesses;
+};
+
+/**
+ * kcsan_init - initialize KCSAN runtime
+ */
+void kcsan_init(void);
+
+#else /* CONFIG_KCSAN */
+
+static inline void kcsan_init(void) { }
+
+#endif /* CONFIG_KCSAN */
+
+#endif /* _LINUX_KCSAN_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 9b7a8d74a9d6..82d91547d122 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -520,6 +520,12 @@ static inline u32 int_sqrt64(u64 x)
}
#endif
+#ifdef CONFIG_SMP
+extern unsigned int sysctl_oops_all_cpu_backtrace;
+#else
+#define sysctl_oops_all_cpu_backtrace 0
+#endif /* CONFIG_SMP */
+
extern void bust_spinlocks(int yes);
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
extern int panic_timeout;
@@ -528,6 +534,8 @@ extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern int panic_on_warn;
+extern unsigned long panic_on_taint;
+extern bool panic_on_taint_nousertaint;
extern int sysctl_panic_on_rcu_stall;
extern int sysctl_panic_on_stackoverflow;
@@ -596,6 +604,7 @@ extern enum system_states {
#define TAINT_AUX 16
#define TAINT_RANDSTRUCT 17
#define TAINT_FLAGS_COUNT 18
+#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
struct taint_flag {
char c_true; /* character printed when tainted */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index fc8d83e91379..6cba088bee24 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -29,7 +29,7 @@
#include <linux/uidgid.h>
#define UEVENT_HELPER_PATH_LEN 256
-#define UEVENT_NUM_ENVP 32 /* number of env pointers */
+#define UEVENT_NUM_ENVP 64 /* number of env pointers */
#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
#ifdef CONFIG_UEVENT_HELPER
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 8bbcaad7ef0f..65b81e0c494d 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -5,6 +5,8 @@
#include <linux/err.h>
#include <linux/sched.h>
+struct mm_struct;
+
__printf(4, 5)
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void *data,
@@ -57,6 +59,7 @@ bool kthread_should_stop(void);
bool kthread_should_park(void);
bool __kthread_should_park(struct task_struct *k);
bool kthread_freezable_should_stop(bool *was_frozen);
+void *kthread_func(struct task_struct *k);
void *kthread_data(struct task_struct *k);
void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
@@ -198,6 +201,9 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
void kthread_destroy_worker(struct kthread_worker *worker);
+void kthread_use_mm(struct mm_struct *mm);
+void kthread_unuse_mm(struct mm_struct *mm);
+
struct cgroup_subsys_state;
#ifdef CONFIG_BLK_CGROUP
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index e894e74905f3..2614247a9781 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -195,9 +195,6 @@ struct klp_patch {
int klp_enable_patch(struct klp_patch *);
-void arch_klp_init_object_loaded(struct klp_patch *patch,
- struct klp_object *obj);
-
/* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
@@ -234,6 +231,11 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
struct klp_state *klp_get_prev_state(unsigned long id);
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname);
+
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; }
@@ -242,6 +244,15 @@ static inline bool klp_patch_pending(struct task_struct *task) { return false; }
static inline void klp_update_patch_state(struct task_struct *task) {}
static inline void klp_copy_process(struct task_struct *child) {}
+static inline
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symindex, unsigned int secindex,
+ const char *objname)
+{
+ return 0;
+}
+
#endif /* CONFIG_LIVEPATCH */
#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 5616b2567aa7..fb3ce6cec997 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -49,7 +49,8 @@ LSM_HOOK(int, 0, syslog, int type)
LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
const struct timezone *tz)
LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
-LSM_HOOK(int, 0, bprm_set_creds, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 44d5422c18e4..3e62dab77699 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -34,40 +34,48 @@
*
* Security hooks for program execution operations.
*
- * @bprm_set_creds:
- * Save security information in the bprm->security field, typically based
- * on information about the bprm->file, for later use by the apply_creds
- * hook. This hook may also optionally check permissions (e.g. for
- * transitions between security domains).
- * This hook may be called multiple times during a single execve, e.g. for
- * interpreters. The hook can tell whether it has already been called by
- * checking to see if @bprm->security is non-NULL. If so, then the hook
- * may decide either to retain the security information saved earlier or
- * to replace it. The hook must set @bprm->secureexec to 1 if a "secure
- * exec" has happened as a result of this hook call. The flag is used to
- * indicate the need for a sanitized execution environment, and is also
- * passed in the ELF auxiliary table on the initial stack to indicate
- * whether libc should enable secure mode.
+ * @bprm_creds_for_exec:
+ * If the setup in prepare_exec_creds did not setup @bprm->cred->security
+ * properly for executing @bprm->file, update the LSM's portion of
+ * @bprm->cred->security to be what commit_creds needs to install for the
+ * new program. This hook may also optionally check permissions
+ * (e.g. for transitions between security domains).
+ * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to
+ * request libc enable secure mode.
+ * @bprm contains the linux_binprm structure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @bprm_creds_from_file:
+ * If @file is setpcap, suid, sgid or otherwise marked to change
+ * privilege upon exec, update @bprm->cred to reflect that change.
+ * This is called after finding the binary that will be executed.
+ * without an interpreter. This ensures that the credentials will not
+ * be derived from a script that the binary will need to reopen, which
+ * when reopend may end up being a completely different file. This
+ * hook may also optionally check permissions (e.g. for transitions
+ * between security domains).
+ * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to
+ * request libc enable secure mode.
+ * The hook must add to @bprm->per_clear any personality flags that
+ * should be cleared from current->personality.
* @bprm contains the linux_binprm structure.
* Return 0 if the hook is successful and permission is granted.
* @bprm_check_security:
* This hook mediates the point when a search for a binary handler will
- * begin. It allows a check the @bprm->security value which is set in the
- * preceding set_creds call. The primary difference from set_creds is
- * that the argv list and envp list are reliably available in @bprm. This
- * hook may be called multiple times during a single execve; and in each
- * pass set_creds is called first.
+ * begin. It allows a check against the @bprm->cred->security value
+ * which was set in the preceding creds_for_exec call. The argv list and
+ * envp list are reliably available in @bprm. This hook may be called
+ * multiple times during a single execve.
* @bprm contains the linux_binprm structure.
* Return 0 if the hook is successful and permission is granted.
* @bprm_committing_creds:
* Prepare to install the new security attributes of a process being
* transformed by an execve operation, based on the old credentials
* pointed to by @current->cred and the information set in @bprm->cred by
- * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
- * This hook is a good place to perform state changes on the process such
- * as closing open file descriptors to which access will no longer be
- * granted when the attributes are changed. This is called immediately
- * before commit_creds().
+ * the bprm_creds_for_exec hook. @bprm points to the linux_binprm
+ * structure. This hook is a good place to perform state changes on the
+ * process such as closing open file descriptors to which access will no
+ * longer be granted when the attributes are changed. This is called
+ * immediately before commit_creds().
* @bprm_committed_creds:
* Tidy up after the installation of the new security attributes of a
* process being transformed by an execve operation. The new credentials
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 93d9ada74ddd..375515803cd8 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -314,19 +314,13 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern int remove_memory(int nid, u64 start, u64 size);
extern void __remove_memory(int nid, u64 start, u64 size);
+extern int offline_and_remove_memory(int nid, u64 start, u64 size);
#else
-static inline bool is_mem_section_removable(unsigned long pfn,
- unsigned long nr_pages)
-{
- return false;
-}
-
static inline void try_offline_node(int nid) {}
static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
@@ -349,6 +343,8 @@ extern void __ref free_area_init_core_hotplug(int nid);
extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource);
+extern int add_memory_driver_managed(int nid, u64 start, u64 size,
+ const char *resource_name);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
extern void remove_pfn_range_from_zone(struct zone *zone,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 8165278c348a..ea9c15b60a96 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -31,7 +31,7 @@ struct mm_struct;
* Locking policy for interlave:
* In process context there is no locking because only the process accesses
* its own state. All vma manipulation is somewhat protected by a down_read on
- * mmap_sem.
+ * mmap_lock.
*
* Freeing policy:
* Mempolicy objects are reference counted. A mempolicy will be freed when
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index d01d1299e49d..ab76cdd06199 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -70,11 +70,11 @@ struct mfd_cell {
size_t pdata_size;
/* device properties passed to the sub devices drivers */
- struct property_entry *properties;
+ const struct property_entry *properties;
/*
* Device Tree compatible string
- * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details
+ * See: Documentation/devicetree/usage-model.rst Chapter 2.2 for details
*/
const char *of_compatible;
diff --git a/include/linux/mfd/mp2629.h b/include/linux/mfd/mp2629.h
new file mode 100644
index 000000000000..89b706900b57
--- /dev/null
+++ b/include/linux/mfd/mp2629.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020 Monolithic Power Systems, Inc
+ */
+
+#ifndef __MP2629_H__
+#define __MP2629_H__
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+struct mp2629_data {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+enum mp2629_adc_chan {
+ MP2629_BATT_VOLT,
+ MP2629_SYSTEM_VOLT,
+ MP2629_INPUT_VOLT,
+ MP2629_BATT_CURRENT,
+ MP2629_INPUT_CURRENT,
+ MP2629_ADC_CHAN_END
+};
+
+#endif
diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h
new file mode 100644
index 000000000000..c5a11b7458d4
--- /dev/null
+++ b/include/linux/mfd/mt6358/core.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_CORE_H__
+#define __MFD_MT6358_CORE_H__
+
+#define MT6358_REG_WIDTH 16
+
+struct irq_top_t {
+ int hwirq_base;
+ unsigned int num_int_regs;
+ unsigned int num_int_bits;
+ unsigned int en_reg;
+ unsigned int en_reg_shift;
+ unsigned int sta_reg;
+ unsigned int sta_reg_shift;
+ unsigned int top_offset;
+};
+
+struct pmic_irq_data {
+ unsigned int num_top;
+ unsigned int num_pmic_irqs;
+ unsigned short top_int_status_reg;
+ bool *enable_hwirq;
+ bool *cache_hwirq;
+};
+
+enum mt6358_irq_top_status_shift {
+ MT6358_BUCK_TOP = 0,
+ MT6358_LDO_TOP,
+ MT6358_PSC_TOP,
+ MT6358_SCK_TOP,
+ MT6358_BM_TOP,
+ MT6358_HK_TOP,
+ MT6358_AUD_TOP,
+ MT6358_MISC_TOP,
+};
+
+enum mt6358_irq_numbers {
+ MT6358_IRQ_VPROC11_OC = 0,
+ MT6358_IRQ_VPROC12_OC,
+ MT6358_IRQ_VCORE_OC,
+ MT6358_IRQ_VGPU_OC,
+ MT6358_IRQ_VMODEM_OC,
+ MT6358_IRQ_VDRAM1_OC,
+ MT6358_IRQ_VS1_OC,
+ MT6358_IRQ_VS2_OC,
+ MT6358_IRQ_VPA_OC,
+ MT6358_IRQ_VCORE_PREOC,
+ MT6358_IRQ_VFE28_OC = 16,
+ MT6358_IRQ_VXO22_OC,
+ MT6358_IRQ_VRF18_OC,
+ MT6358_IRQ_VRF12_OC,
+ MT6358_IRQ_VEFUSE_OC,
+ MT6358_IRQ_VCN33_OC,
+ MT6358_IRQ_VCN28_OC,
+ MT6358_IRQ_VCN18_OC,
+ MT6358_IRQ_VCAMA1_OC,
+ MT6358_IRQ_VCAMA2_OC,
+ MT6358_IRQ_VCAMD_OC,
+ MT6358_IRQ_VCAMIO_OC,
+ MT6358_IRQ_VLDO28_OC,
+ MT6358_IRQ_VA12_OC,
+ MT6358_IRQ_VAUX18_OC,
+ MT6358_IRQ_VAUD28_OC,
+ MT6358_IRQ_VIO28_OC,
+ MT6358_IRQ_VIO18_OC,
+ MT6358_IRQ_VSRAM_PROC11_OC,
+ MT6358_IRQ_VSRAM_PROC12_OC,
+ MT6358_IRQ_VSRAM_OTHERS_OC,
+ MT6358_IRQ_VSRAM_GPU_OC,
+ MT6358_IRQ_VDRAM2_OC,
+ MT6358_IRQ_VMC_OC,
+ MT6358_IRQ_VMCH_OC,
+ MT6358_IRQ_VEMC_OC,
+ MT6358_IRQ_VSIM1_OC,
+ MT6358_IRQ_VSIM2_OC,
+ MT6358_IRQ_VIBR_OC,
+ MT6358_IRQ_VUSB_OC,
+ MT6358_IRQ_VBIF28_OC,
+ MT6358_IRQ_PWRKEY = 48,
+ MT6358_IRQ_HOMEKEY,
+ MT6358_IRQ_PWRKEY_R,
+ MT6358_IRQ_HOMEKEY_R,
+ MT6358_IRQ_NI_LBAT_INT,
+ MT6358_IRQ_CHRDET,
+ MT6358_IRQ_CHRDET_EDGE,
+ MT6358_IRQ_VCDT_HV_DET,
+ MT6358_IRQ_RTC = 64,
+ MT6358_IRQ_FG_BAT0_H = 80,
+ MT6358_IRQ_FG_BAT0_L,
+ MT6358_IRQ_FG_CUR_H,
+ MT6358_IRQ_FG_CUR_L,
+ MT6358_IRQ_FG_ZCV,
+ MT6358_IRQ_FG_BAT1_H,
+ MT6358_IRQ_FG_BAT1_L,
+ MT6358_IRQ_FG_N_CHARGE_L,
+ MT6358_IRQ_FG_IAVG_H,
+ MT6358_IRQ_FG_IAVG_L,
+ MT6358_IRQ_FG_TIME_H,
+ MT6358_IRQ_FG_DISCHARGE,
+ MT6358_IRQ_FG_CHARGE,
+ MT6358_IRQ_BATON_LV = 96,
+ MT6358_IRQ_BATON_HT,
+ MT6358_IRQ_BATON_BAT_IN,
+ MT6358_IRQ_BATON_BAT_OUT,
+ MT6358_IRQ_BIF,
+ MT6358_IRQ_BAT_H = 112,
+ MT6358_IRQ_BAT_L,
+ MT6358_IRQ_BAT2_H,
+ MT6358_IRQ_BAT2_L,
+ MT6358_IRQ_BAT_TEMP_H,
+ MT6358_IRQ_BAT_TEMP_L,
+ MT6358_IRQ_AUXADC_IMP,
+ MT6358_IRQ_NAG_C_DLTV,
+ MT6358_IRQ_AUDIO = 128,
+ MT6358_IRQ_ACCDET = 133,
+ MT6358_IRQ_ACCDET_EINT0,
+ MT6358_IRQ_ACCDET_EINT1,
+ MT6358_IRQ_SPI_CMD_ALERT = 144,
+ MT6358_IRQ_NR,
+};
+
+#define MT6358_IRQ_BUCK_BASE MT6358_IRQ_VPROC11_OC
+#define MT6358_IRQ_LDO_BASE MT6358_IRQ_VFE28_OC
+#define MT6358_IRQ_PSC_BASE MT6358_IRQ_PWRKEY
+#define MT6358_IRQ_SCK_BASE MT6358_IRQ_RTC
+#define MT6358_IRQ_BM_BASE MT6358_IRQ_FG_BAT0_H
+#define MT6358_IRQ_HK_BASE MT6358_IRQ_BAT_H
+#define MT6358_IRQ_AUD_BASE MT6358_IRQ_AUDIO
+#define MT6358_IRQ_MISC_BASE MT6358_IRQ_SPI_CMD_ALERT
+
+#define MT6358_IRQ_BUCK_BITS (MT6358_IRQ_VCORE_PREOC - MT6358_IRQ_BUCK_BASE + 1)
+#define MT6358_IRQ_LDO_BITS (MT6358_IRQ_VBIF28_OC - MT6358_IRQ_LDO_BASE + 1)
+#define MT6358_IRQ_PSC_BITS (MT6358_IRQ_VCDT_HV_DET - MT6358_IRQ_PSC_BASE + 1)
+#define MT6358_IRQ_SCK_BITS (MT6358_IRQ_RTC - MT6358_IRQ_SCK_BASE + 1)
+#define MT6358_IRQ_BM_BITS (MT6358_IRQ_BIF - MT6358_IRQ_BM_BASE + 1)
+#define MT6358_IRQ_HK_BITS (MT6358_IRQ_NAG_C_DLTV - MT6358_IRQ_HK_BASE + 1)
+#define MT6358_IRQ_AUD_BITS (MT6358_IRQ_ACCDET_EINT1 - MT6358_IRQ_AUD_BASE + 1)
+#define MT6358_IRQ_MISC_BITS \
+ (MT6358_IRQ_SPI_CMD_ALERT - MT6358_IRQ_MISC_BASE + 1)
+
+#define MT6358_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6358_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6358_IRQ_##sp##_BITS - 1) / MT6358_REG_WIDTH) + 1, \
+ .num_int_bits = MT6358_IRQ_##sp##_BITS, \
+ .en_reg = MT6358_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6358_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6358_CORE_H__ */
diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h
new file mode 100644
index 000000000000..2ad0b312aa28
--- /dev/null
+++ b/include/linux/mfd/mt6358/registers.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6358_REGISTERS_H__
+#define __MFD_MT6358_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6358_SWCID 0xa
+#define MT6358_MISC_TOP_INT_CON0 0x188
+#define MT6358_MISC_TOP_INT_STATUS0 0x194
+#define MT6358_TOP_INT_STATUS0 0x19e
+#define MT6358_SCK_TOP_INT_CON0 0x52e
+#define MT6358_SCK_TOP_INT_STATUS0 0x53a
+#define MT6358_EOSC_CALI_CON0 0x540
+#define MT6358_EOSC_CALI_CON1 0x542
+#define MT6358_RTC_MIX_CON0 0x544
+#define MT6358_RTC_MIX_CON1 0x546
+#define MT6358_RTC_MIX_CON2 0x548
+#define MT6358_RTC_DSN_ID 0x580
+#define MT6358_RTC_DSN_REV0 0x582
+#define MT6358_RTC_DBI 0x584
+#define MT6358_RTC_DXI 0x586
+#define MT6358_RTC_BBPU 0x588
+#define MT6358_RTC_IRQ_STA 0x58a
+#define MT6358_RTC_IRQ_EN 0x58c
+#define MT6358_RTC_CII_EN 0x58e
+#define MT6358_RTC_AL_MASK 0x590
+#define MT6358_RTC_TC_SEC 0x592
+#define MT6358_RTC_TC_MIN 0x594
+#define MT6358_RTC_TC_HOU 0x596
+#define MT6358_RTC_TC_DOM 0x598
+#define MT6358_RTC_TC_DOW 0x59a
+#define MT6358_RTC_TC_MTH 0x59c
+#define MT6358_RTC_TC_YEA 0x59e
+#define MT6358_RTC_AL_SEC 0x5a0
+#define MT6358_RTC_AL_MIN 0x5a2
+#define MT6358_RTC_AL_HOU 0x5a4
+#define MT6358_RTC_AL_DOM 0x5a6
+#define MT6358_RTC_AL_DOW 0x5a8
+#define MT6358_RTC_AL_MTH 0x5aa
+#define MT6358_RTC_AL_YEA 0x5ac
+#define MT6358_RTC_OSC32CON 0x5ae
+#define MT6358_RTC_POWERKEY1 0x5b0
+#define MT6358_RTC_POWERKEY2 0x5b2
+#define MT6358_RTC_PDN1 0x5b4
+#define MT6358_RTC_PDN2 0x5b6
+#define MT6358_RTC_SPAR0 0x5b8
+#define MT6358_RTC_SPAR1 0x5ba
+#define MT6358_RTC_PROT 0x5bc
+#define MT6358_RTC_DIFF 0x5be
+#define MT6358_RTC_CALI 0x5c0
+#define MT6358_RTC_WRTGR 0x5c2
+#define MT6358_RTC_CON 0x5c4
+#define MT6358_RTC_SEC_CTRL 0x5c6
+#define MT6358_RTC_INT_CNT 0x5c8
+#define MT6358_RTC_SEC_DAT0 0x5ca
+#define MT6358_RTC_SEC_DAT1 0x5cc
+#define MT6358_RTC_SEC_DAT2 0x5ce
+#define MT6358_RTC_SEC_DSN_ID 0x600
+#define MT6358_RTC_SEC_DSN_REV0 0x602
+#define MT6358_RTC_SEC_DBI 0x604
+#define MT6358_RTC_SEC_DXI 0x606
+#define MT6358_RTC_TC_SEC_SEC 0x608
+#define MT6358_RTC_TC_MIN_SEC 0x60a
+#define MT6358_RTC_TC_HOU_SEC 0x60c
+#define MT6358_RTC_TC_DOM_SEC 0x60e
+#define MT6358_RTC_TC_DOW_SEC 0x610
+#define MT6358_RTC_TC_MTH_SEC 0x612
+#define MT6358_RTC_TC_YEA_SEC 0x614
+#define MT6358_RTC_SEC_CK_PDN 0x616
+#define MT6358_RTC_SEC_WRTGR 0x618
+#define MT6358_PSC_TOP_INT_CON0 0x910
+#define MT6358_PSC_TOP_INT_STATUS0 0x91c
+#define MT6358_BM_TOP_INT_CON0 0xc32
+#define MT6358_BM_TOP_INT_CON1 0xc38
+#define MT6358_BM_TOP_INT_STATUS0 0xc4a
+#define MT6358_BM_TOP_INT_STATUS1 0xc4c
+#define MT6358_HK_TOP_INT_CON0 0xf92
+#define MT6358_HK_TOP_INT_STATUS0 0xf9e
+#define MT6358_BUCK_TOP_INT_CON0 0x1318
+#define MT6358_BUCK_TOP_INT_STATUS0 0x1324
+#define MT6358_BUCK_VPROC11_CON0 0x1388
+#define MT6358_BUCK_VPROC11_DBG0 0x139e
+#define MT6358_BUCK_VPROC11_DBG1 0x13a0
+#define MT6358_BUCK_VPROC11_ELR0 0x13a6
+#define MT6358_BUCK_VPROC12_CON0 0x1408
+#define MT6358_BUCK_VPROC12_DBG0 0x141e
+#define MT6358_BUCK_VPROC12_DBG1 0x1420
+#define MT6358_BUCK_VPROC12_ELR0 0x1426
+#define MT6358_BUCK_VCORE_CON0 0x1488
+#define MT6358_BUCK_VCORE_DBG0 0x149e
+#define MT6358_BUCK_VCORE_DBG1 0x14a0
+#define MT6358_BUCK_VCORE_ELR0 0x14aa
+#define MT6358_BUCK_VGPU_CON0 0x1508
+#define MT6358_BUCK_VGPU_DBG0 0x151e
+#define MT6358_BUCK_VGPU_DBG1 0x1520
+#define MT6358_BUCK_VGPU_ELR0 0x1526
+#define MT6358_BUCK_VMODEM_CON0 0x1588
+#define MT6358_BUCK_VMODEM_DBG0 0x159e
+#define MT6358_BUCK_VMODEM_DBG1 0x15a0
+#define MT6358_BUCK_VMODEM_ELR0 0x15a6
+#define MT6358_BUCK_VDRAM1_CON0 0x1608
+#define MT6358_BUCK_VDRAM1_DBG0 0x161e
+#define MT6358_BUCK_VDRAM1_DBG1 0x1620
+#define MT6358_BUCK_VDRAM1_ELR0 0x1626
+#define MT6358_BUCK_VS1_CON0 0x1688
+#define MT6358_BUCK_VS1_DBG0 0x169e
+#define MT6358_BUCK_VS1_DBG1 0x16a0
+#define MT6358_BUCK_VS1_ELR0 0x16ae
+#define MT6358_BUCK_VS2_CON0 0x1708
+#define MT6358_BUCK_VS2_DBG0 0x171e
+#define MT6358_BUCK_VS2_DBG1 0x1720
+#define MT6358_BUCK_VS2_ELR0 0x172e
+#define MT6358_BUCK_VPA_CON0 0x1788
+#define MT6358_BUCK_VPA_CON1 0x178a
+#define MT6358_BUCK_VPA_ELR0 MT6358_BUCK_VPA_CON1
+#define MT6358_BUCK_VPA_DBG0 0x1792
+#define MT6358_BUCK_VPA_DBG1 0x1794
+#define MT6358_VPROC_ANA_CON0 0x180c
+#define MT6358_VCORE_VGPU_ANA_CON0 0x1828
+#define MT6358_VMODEM_ANA_CON0 0x1888
+#define MT6358_VDRAM1_ANA_CON0 0x1896
+#define MT6358_VS1_ANA_CON0 0x18a2
+#define MT6358_VS2_ANA_CON0 0x18ae
+#define MT6358_VPA_ANA_CON0 0x18ba
+#define MT6358_LDO_TOP_INT_CON0 0x1a50
+#define MT6358_LDO_TOP_INT_CON1 0x1a56
+#define MT6358_LDO_TOP_INT_STATUS0 0x1a68
+#define MT6358_LDO_TOP_INT_STATUS1 0x1a6a
+#define MT6358_LDO_VXO22_CON0 0x1a88
+#define MT6358_LDO_VXO22_CON1 0x1a96
+#define MT6358_LDO_VA12_CON0 0x1a9c
+#define MT6358_LDO_VA12_CON1 0x1aaa
+#define MT6358_LDO_VAUX18_CON0 0x1ab0
+#define MT6358_LDO_VAUX18_CON1 0x1abe
+#define MT6358_LDO_VAUD28_CON0 0x1ac4
+#define MT6358_LDO_VAUD28_CON1 0x1ad2
+#define MT6358_LDO_VIO28_CON0 0x1ad8
+#define MT6358_LDO_VIO28_CON1 0x1ae6
+#define MT6358_LDO_VIO18_CON0 0x1aec
+#define MT6358_LDO_VIO18_CON1 0x1afa
+#define MT6358_LDO_VDRAM2_CON0 0x1b08
+#define MT6358_LDO_VDRAM2_CON1 0x1b16
+#define MT6358_LDO_VEMC_CON0 0x1b1c
+#define MT6358_LDO_VEMC_CON1 0x1b2a
+#define MT6358_LDO_VUSB_CON0_0 0x1b30
+#define MT6358_LDO_VUSB_CON1 0x1b40
+#define MT6358_LDO_VSRAM_PROC11_CON0 0x1b46
+#define MT6358_LDO_VSRAM_PROC11_DBG0 0x1b60
+#define MT6358_LDO_VSRAM_PROC11_DBG1 0x1b62
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON0 0x1b64
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON1 0x1b66
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON2 0x1b68
+#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON3 0x1b6a
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON0 0x1b6c
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON1 0x1b6e
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON2 0x1b70
+#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON3 0x1b72
+#define MT6358_LDO_VSRAM_WAKEUP_CON0 0x1b74
+#define MT6358_LDO_GON1_ELR_NUM 0x1b76
+#define MT6358_LDO_VDRAM2_ELR0 0x1b78
+#define MT6358_LDO_VSRAM_PROC12_CON0 0x1b88
+#define MT6358_LDO_VSRAM_PROC12_DBG0 0x1ba2
+#define MT6358_LDO_VSRAM_PROC12_DBG1 0x1ba4
+#define MT6358_LDO_VSRAM_OTHERS_CON0 0x1ba6
+#define MT6358_LDO_VSRAM_OTHERS_DBG0 0x1bc0
+#define MT6358_LDO_VSRAM_OTHERS_DBG1 0x1bc2
+#define MT6358_LDO_VSRAM_GPU_CON0 0x1bc8
+#define MT6358_LDO_VSRAM_GPU_DBG0 0x1be2
+#define MT6358_LDO_VSRAM_GPU_DBG1 0x1be4
+#define MT6358_LDO_VSRAM_CON0 0x1bee
+#define MT6358_LDO_VSRAM_CON1 0x1bf0
+#define MT6358_LDO_VSRAM_CON2 0x1bf2
+#define MT6358_LDO_VSRAM_CON3 0x1bf4
+#define MT6358_LDO_VFE28_CON0 0x1c08
+#define MT6358_LDO_VFE28_CON1 0x1c16
+#define MT6358_LDO_VFE28_CON2 0x1c18
+#define MT6358_LDO_VFE28_CON3 0x1c1a
+#define MT6358_LDO_VRF18_CON0 0x1c1c
+#define MT6358_LDO_VRF18_CON1 0x1c2a
+#define MT6358_LDO_VRF18_CON2 0x1c2c
+#define MT6358_LDO_VRF18_CON3 0x1c2e
+#define MT6358_LDO_VRF12_CON0 0x1c30
+#define MT6358_LDO_VRF12_CON1 0x1c3e
+#define MT6358_LDO_VRF12_CON2 0x1c40
+#define MT6358_LDO_VRF12_CON3 0x1c42
+#define MT6358_LDO_VEFUSE_CON0 0x1c44
+#define MT6358_LDO_VEFUSE_CON1 0x1c52
+#define MT6358_LDO_VEFUSE_CON2 0x1c54
+#define MT6358_LDO_VEFUSE_CON3 0x1c56
+#define MT6358_LDO_VCN18_CON0 0x1c58
+#define MT6358_LDO_VCN18_CON1 0x1c66
+#define MT6358_LDO_VCN18_CON2 0x1c68
+#define MT6358_LDO_VCN18_CON3 0x1c6a
+#define MT6358_LDO_VCAMA1_CON0 0x1c6c
+#define MT6358_LDO_VCAMA1_CON1 0x1c7a
+#define MT6358_LDO_VCAMA1_CON2 0x1c7c
+#define MT6358_LDO_VCAMA1_CON3 0x1c7e
+#define MT6358_LDO_VCAMA2_CON0 0x1c88
+#define MT6358_LDO_VCAMA2_CON1 0x1c96
+#define MT6358_LDO_VCAMA2_CON2 0x1c98
+#define MT6358_LDO_VCAMA2_CON3 0x1c9a
+#define MT6358_LDO_VCAMD_CON0 0x1c9c
+#define MT6358_LDO_VCAMD_CON1 0x1caa
+#define MT6358_LDO_VCAMD_CON2 0x1cac
+#define MT6358_LDO_VCAMD_CON3 0x1cae
+#define MT6358_LDO_VCAMIO_CON0 0x1cb0
+#define MT6358_LDO_VCAMIO_CON1 0x1cbe
+#define MT6358_LDO_VCAMIO_CON2 0x1cc0
+#define MT6358_LDO_VCAMIO_CON3 0x1cc2
+#define MT6358_LDO_VMC_CON0 0x1cc4
+#define MT6358_LDO_VMC_CON1 0x1cd2
+#define MT6358_LDO_VMC_CON2 0x1cd4
+#define MT6358_LDO_VMC_CON3 0x1cd6
+#define MT6358_LDO_VMCH_CON0 0x1cd8
+#define MT6358_LDO_VMCH_CON1 0x1ce6
+#define MT6358_LDO_VMCH_CON2 0x1ce8
+#define MT6358_LDO_VMCH_CON3 0x1cea
+#define MT6358_LDO_VIBR_CON0 0x1d08
+#define MT6358_LDO_VIBR_CON1 0x1d16
+#define MT6358_LDO_VIBR_CON2 0x1d18
+#define MT6358_LDO_VIBR_CON3 0x1d1a
+#define MT6358_LDO_VCN33_CON0_0 0x1d1c
+#define MT6358_LDO_VCN33_CON0_1 0x1d2a
+#define MT6358_LDO_VCN33_CON1 0x1d2c
+#define MT6358_LDO_VCN33_BT_CON1 MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_WIFI_CON1 MT6358_LDO_VCN33_CON1
+#define MT6358_LDO_VCN33_CON2 0x1d2e
+#define MT6358_LDO_VCN33_CON3 0x1d30
+#define MT6358_LDO_VLDO28_CON0_0 0x1d32
+#define MT6358_LDO_VLDO28_CON0_1 0x1d40
+#define MT6358_LDO_VLDO28_CON1 0x1d42
+#define MT6358_LDO_VLDO28_CON2 0x1d44
+#define MT6358_LDO_VLDO28_CON3 0x1d46
+#define MT6358_LDO_VSIM1_CON0 0x1d48
+#define MT6358_LDO_VSIM1_CON1 0x1d56
+#define MT6358_LDO_VSIM1_CON2 0x1d58
+#define MT6358_LDO_VSIM1_CON3 0x1d5a
+#define MT6358_LDO_VSIM2_CON0 0x1d5c
+#define MT6358_LDO_VSIM2_CON1 0x1d6a
+#define MT6358_LDO_VSIM2_CON2 0x1d6c
+#define MT6358_LDO_VSIM2_CON3 0x1d6e
+#define MT6358_LDO_VCN28_CON0 0x1d88
+#define MT6358_LDO_VCN28_CON1 0x1d96
+#define MT6358_LDO_VCN28_CON2 0x1d98
+#define MT6358_LDO_VCN28_CON3 0x1d9a
+#define MT6358_VRTC28_CON0 0x1d9c
+#define MT6358_LDO_VBIF28_CON0 0x1d9e
+#define MT6358_LDO_VBIF28_CON1 0x1dac
+#define MT6358_LDO_VBIF28_CON2 0x1dae
+#define MT6358_LDO_VBIF28_CON3 0x1db0
+#define MT6358_VCAMA1_ANA_CON0 0x1e08
+#define MT6358_VCAMA2_ANA_CON0 0x1e0c
+#define MT6358_VCN33_ANA_CON0 0x1e28
+#define MT6358_VSIM1_ANA_CON0 0x1e2c
+#define MT6358_VSIM2_ANA_CON0 0x1e30
+#define MT6358_VUSB_ANA_CON0 0x1e34
+#define MT6358_VEMC_ANA_CON0 0x1e38
+#define MT6358_VLDO28_ANA_CON0 0x1e3c
+#define MT6358_VIO28_ANA_CON0 0x1e40
+#define MT6358_VIBR_ANA_CON0 0x1e44
+#define MT6358_VMCH_ANA_CON0 0x1e48
+#define MT6358_VMC_ANA_CON0 0x1e4c
+#define MT6358_VRF18_ANA_CON0 0x1e88
+#define MT6358_VCN18_ANA_CON0 0x1e8c
+#define MT6358_VCAMIO_ANA_CON0 0x1e90
+#define MT6358_VIO18_ANA_CON0 0x1e94
+#define MT6358_VEFUSE_ANA_CON0 0x1e98
+#define MT6358_VRF12_ANA_CON0 0x1e9c
+#define MT6358_VSRAM_PROC11_ANA_CON0 0x1ea0
+#define MT6358_VSRAM_PROC12_ANA_CON0 0x1ea4
+#define MT6358_VSRAM_OTHERS_ANA_CON0 0x1ea6
+#define MT6358_VSRAM_GPU_ANA_CON0 0x1ea8
+#define MT6358_VDRAM2_ANA_CON0 0x1eaa
+#define MT6358_VCAMD_ANA_CON0 0x1eae
+#define MT6358_VA12_ANA_CON0 0x1eb2
+#define MT6358_AUD_TOP_INT_CON0 0x2228
+#define MT6358_AUD_TOP_INT_STATUS0 0x2234
+
+#endif /* __MFD_MT6358_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6360.h b/include/linux/mfd/mt6360.h
new file mode 100644
index 000000000000..ea1304035d4d
--- /dev/null
+++ b/include/linux/mfd/mt6360.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ */
+
+#ifndef __MT6360_H__
+#define __MT6360_H__
+
+#include <linux/regmap.h>
+
+enum {
+ MT6360_SLAVE_PMU = 0,
+ MT6360_SLAVE_PMIC,
+ MT6360_SLAVE_LDO,
+ MT6360_SLAVE_TCPC,
+ MT6360_SLAVE_MAX,
+};
+
+#define MT6360_PMU_SLAVEID (0x34)
+#define MT6360_PMIC_SLAVEID (0x1A)
+#define MT6360_LDO_SLAVEID (0x64)
+#define MT6360_TCPC_SLAVEID (0x4E)
+
+struct mt6360_pmu_data {
+ struct i2c_client *i2c[MT6360_SLAVE_MAX];
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ unsigned int chip_rev;
+};
+
+/* PMU register defininition */
+#define MT6360_PMU_DEV_INFO (0x00)
+#define MT6360_PMU_CORE_CTRL1 (0x01)
+#define MT6360_PMU_RST1 (0x02)
+#define MT6360_PMU_CRCEN (0x03)
+#define MT6360_PMU_RST_PAS_CODE1 (0x04)
+#define MT6360_PMU_RST_PAS_CODE2 (0x05)
+#define MT6360_PMU_CORE_CTRL2 (0x06)
+#define MT6360_PMU_TM_PAS_CODE1 (0x07)
+#define MT6360_PMU_TM_PAS_CODE2 (0x08)
+#define MT6360_PMU_TM_PAS_CODE3 (0x09)
+#define MT6360_PMU_TM_PAS_CODE4 (0x0A)
+#define MT6360_PMU_IRQ_IND (0x0B)
+#define MT6360_PMU_IRQ_MASK (0x0C)
+#define MT6360_PMU_IRQ_SET (0x0D)
+#define MT6360_PMU_SHDN_CTRL (0x0E)
+#define MT6360_PMU_TM_INF (0x0F)
+#define MT6360_PMU_I2C_CTRL (0x10)
+#define MT6360_PMU_CHG_CTRL1 (0x11)
+#define MT6360_PMU_CHG_CTRL2 (0x12)
+#define MT6360_PMU_CHG_CTRL3 (0x13)
+#define MT6360_PMU_CHG_CTRL4 (0x14)
+#define MT6360_PMU_CHG_CTRL5 (0x15)
+#define MT6360_PMU_CHG_CTRL6 (0x16)
+#define MT6360_PMU_CHG_CTRL7 (0x17)
+#define MT6360_PMU_CHG_CTRL8 (0x18)
+#define MT6360_PMU_CHG_CTRL9 (0x19)
+#define MT6360_PMU_CHG_CTRL10 (0x1A)
+#define MT6360_PMU_CHG_CTRL11 (0x1B)
+#define MT6360_PMU_CHG_CTRL12 (0x1C)
+#define MT6360_PMU_CHG_CTRL13 (0x1D)
+#define MT6360_PMU_CHG_CTRL14 (0x1E)
+#define MT6360_PMU_CHG_CTRL15 (0x1F)
+#define MT6360_PMU_CHG_CTRL16 (0x20)
+#define MT6360_PMU_CHG_AICC_RESULT (0x21)
+#define MT6360_PMU_DEVICE_TYPE (0x22)
+#define MT6360_PMU_QC_CONTROL1 (0x23)
+#define MT6360_PMU_QC_CONTROL2 (0x24)
+#define MT6360_PMU_QC30_CONTROL1 (0x25)
+#define MT6360_PMU_QC30_CONTROL2 (0x26)
+#define MT6360_PMU_USB_STATUS1 (0x27)
+#define MT6360_PMU_QC_STATUS1 (0x28)
+#define MT6360_PMU_QC_STATUS2 (0x29)
+#define MT6360_PMU_CHG_PUMP (0x2A)
+#define MT6360_PMU_CHG_CTRL17 (0x2B)
+#define MT6360_PMU_CHG_CTRL18 (0x2C)
+#define MT6360_PMU_CHRDET_CTRL1 (0x2D)
+#define MT6360_PMU_CHRDET_CTRL2 (0x2E)
+#define MT6360_PMU_DPDN_CTRL (0x2F)
+#define MT6360_PMU_CHG_HIDDEN_CTRL1 (0x30)
+#define MT6360_PMU_CHG_HIDDEN_CTRL2 (0x31)
+#define MT6360_PMU_CHG_HIDDEN_CTRL3 (0x32)
+#define MT6360_PMU_CHG_HIDDEN_CTRL4 (0x33)
+#define MT6360_PMU_CHG_HIDDEN_CTRL5 (0x34)
+#define MT6360_PMU_CHG_HIDDEN_CTRL6 (0x35)
+#define MT6360_PMU_CHG_HIDDEN_CTRL7 (0x36)
+#define MT6360_PMU_CHG_HIDDEN_CTRL8 (0x37)
+#define MT6360_PMU_CHG_HIDDEN_CTRL9 (0x38)
+#define MT6360_PMU_CHG_HIDDEN_CTRL10 (0x39)
+#define MT6360_PMU_CHG_HIDDEN_CTRL11 (0x3A)
+#define MT6360_PMU_CHG_HIDDEN_CTRL12 (0x3B)
+#define MT6360_PMU_CHG_HIDDEN_CTRL13 (0x3C)
+#define MT6360_PMU_CHG_HIDDEN_CTRL14 (0x3D)
+#define MT6360_PMU_CHG_HIDDEN_CTRL15 (0x3E)
+#define MT6360_PMU_CHG_HIDDEN_CTRL16 (0x3F)
+#define MT6360_PMU_CHG_HIDDEN_CTRL17 (0x40)
+#define MT6360_PMU_CHG_HIDDEN_CTRL18 (0x41)
+#define MT6360_PMU_CHG_HIDDEN_CTRL19 (0x42)
+#define MT6360_PMU_CHG_HIDDEN_CTRL20 (0x43)
+#define MT6360_PMU_CHG_HIDDEN_CTRL21 (0x44)
+#define MT6360_PMU_CHG_HIDDEN_CTRL22 (0x45)
+#define MT6360_PMU_CHG_HIDDEN_CTRL23 (0x46)
+#define MT6360_PMU_CHG_HIDDEN_CTRL24 (0x47)
+#define MT6360_PMU_CHG_HIDDEN_CTRL25 (0x48)
+#define MT6360_PMU_BC12_CTRL (0x49)
+#define MT6360_PMU_CHG_STAT (0x4A)
+#define MT6360_PMU_RESV1 (0x4B)
+#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEH (0x4E)
+#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEL (0x4F)
+#define MT6360_PMU_TYPEC_OTP_HYST_TH (0x50)
+#define MT6360_PMU_TYPEC_OTP_CTRL (0x51)
+#define MT6360_PMU_ADC_BAT_DATA_H (0x52)
+#define MT6360_PMU_ADC_BAT_DATA_L (0x53)
+#define MT6360_PMU_IMID_BACKBST_ON (0x54)
+#define MT6360_PMU_IMID_BACKBST_OFF (0x55)
+#define MT6360_PMU_ADC_CONFIG (0x56)
+#define MT6360_PMU_ADC_EN2 (0x57)
+#define MT6360_PMU_ADC_IDLE_T (0x58)
+#define MT6360_PMU_ADC_RPT_1 (0x5A)
+#define MT6360_PMU_ADC_RPT_2 (0x5B)
+#define MT6360_PMU_ADC_RPT_3 (0x5C)
+#define MT6360_PMU_ADC_RPT_ORG1 (0x5D)
+#define MT6360_PMU_ADC_RPT_ORG2 (0x5E)
+#define MT6360_PMU_BAT_OVP_TH_SEL_CODEH (0x5F)
+#define MT6360_PMU_BAT_OVP_TH_SEL_CODEL (0x60)
+#define MT6360_PMU_CHG_CTRL19 (0x61)
+#define MT6360_PMU_VDDASUPPLY (0x62)
+#define MT6360_PMU_BC12_MANUAL (0x63)
+#define MT6360_PMU_CHGDET_FUNC (0x64)
+#define MT6360_PMU_FOD_CTRL (0x65)
+#define MT6360_PMU_CHG_CTRL20 (0x66)
+#define MT6360_PMU_CHG_HIDDEN_CTRL26 (0x67)
+#define MT6360_PMU_CHG_HIDDEN_CTRL27 (0x68)
+#define MT6360_PMU_RESV2 (0x69)
+#define MT6360_PMU_USBID_CTRL1 (0x6D)
+#define MT6360_PMU_USBID_CTRL2 (0x6E)
+#define MT6360_PMU_USBID_CTRL3 (0x6F)
+#define MT6360_PMU_FLED_CFG (0x70)
+#define MT6360_PMU_RESV3 (0x71)
+#define MT6360_PMU_FLED1_CTRL (0x72)
+#define MT6360_PMU_FLED_STRB_CTRL (0x73)
+#define MT6360_PMU_FLED1_STRB_CTRL2 (0x74)
+#define MT6360_PMU_FLED1_TOR_CTRL (0x75)
+#define MT6360_PMU_FLED2_CTRL (0x76)
+#define MT6360_PMU_RESV4 (0x77)
+#define MT6360_PMU_FLED2_STRB_CTRL2 (0x78)
+#define MT6360_PMU_FLED2_TOR_CTRL (0x79)
+#define MT6360_PMU_FLED_VMIDTRK_CTRL1 (0x7A)
+#define MT6360_PMU_FLED_VMID_RTM (0x7B)
+#define MT6360_PMU_FLED_VMIDTRK_CTRL2 (0x7C)
+#define MT6360_PMU_FLED_PWSEL (0x7D)
+#define MT6360_PMU_FLED_EN (0x7E)
+#define MT6360_PMU_FLED_Hidden1 (0x7F)
+#define MT6360_PMU_RGB_EN (0x80)
+#define MT6360_PMU_RGB1_ISNK (0x81)
+#define MT6360_PMU_RGB2_ISNK (0x82)
+#define MT6360_PMU_RGB3_ISNK (0x83)
+#define MT6360_PMU_RGB_ML_ISNK (0x84)
+#define MT6360_PMU_RGB1_DIM (0x85)
+#define MT6360_PMU_RGB2_DIM (0x86)
+#define MT6360_PMU_RGB3_DIM (0x87)
+#define MT6360_PMU_RESV5 (0x88)
+#define MT6360_PMU_RGB12_Freq (0x89)
+#define MT6360_PMU_RGB34_Freq (0x8A)
+#define MT6360_PMU_RGB1_Tr (0x8B)
+#define MT6360_PMU_RGB1_Tf (0x8C)
+#define MT6360_PMU_RGB1_TON_TOFF (0x8D)
+#define MT6360_PMU_RGB2_Tr (0x8E)
+#define MT6360_PMU_RGB2_Tf (0x8F)
+#define MT6360_PMU_RGB2_TON_TOFF (0x90)
+#define MT6360_PMU_RGB3_Tr (0x91)
+#define MT6360_PMU_RGB3_Tf (0x92)
+#define MT6360_PMU_RGB3_TON_TOFF (0x93)
+#define MT6360_PMU_RGB_Hidden_CTRL1 (0x94)
+#define MT6360_PMU_RGB_Hidden_CTRL2 (0x95)
+#define MT6360_PMU_RESV6 (0x97)
+#define MT6360_PMU_SPARE1 (0x9A)
+#define MT6360_PMU_SPARE2 (0xA0)
+#define MT6360_PMU_SPARE3 (0xB0)
+#define MT6360_PMU_SPARE4 (0xC0)
+#define MT6360_PMU_CHG_IRQ1 (0xD0)
+#define MT6360_PMU_CHG_IRQ2 (0xD1)
+#define MT6360_PMU_CHG_IRQ3 (0xD2)
+#define MT6360_PMU_CHG_IRQ4 (0xD3)
+#define MT6360_PMU_CHG_IRQ5 (0xD4)
+#define MT6360_PMU_CHG_IRQ6 (0xD5)
+#define MT6360_PMU_QC_IRQ (0xD6)
+#define MT6360_PMU_FOD_IRQ (0xD7)
+#define MT6360_PMU_BASE_IRQ (0xD8)
+#define MT6360_PMU_FLED_IRQ1 (0xD9)
+#define MT6360_PMU_FLED_IRQ2 (0xDA)
+#define MT6360_PMU_RGB_IRQ (0xDB)
+#define MT6360_PMU_BUCK1_IRQ (0xDC)
+#define MT6360_PMU_BUCK2_IRQ (0xDD)
+#define MT6360_PMU_LDO_IRQ1 (0xDE)
+#define MT6360_PMU_LDO_IRQ2 (0xDF)
+#define MT6360_PMU_CHG_STAT1 (0xE0)
+#define MT6360_PMU_CHG_STAT2 (0xE1)
+#define MT6360_PMU_CHG_STAT3 (0xE2)
+#define MT6360_PMU_CHG_STAT4 (0xE3)
+#define MT6360_PMU_CHG_STAT5 (0xE4)
+#define MT6360_PMU_CHG_STAT6 (0xE5)
+#define MT6360_PMU_QC_STAT (0xE6)
+#define MT6360_PMU_FOD_STAT (0xE7)
+#define MT6360_PMU_BASE_STAT (0xE8)
+#define MT6360_PMU_FLED_STAT1 (0xE9)
+#define MT6360_PMU_FLED_STAT2 (0xEA)
+#define MT6360_PMU_RGB_STAT (0xEB)
+#define MT6360_PMU_BUCK1_STAT (0xEC)
+#define MT6360_PMU_BUCK2_STAT (0xED)
+#define MT6360_PMU_LDO_STAT1 (0xEE)
+#define MT6360_PMU_LDO_STAT2 (0xEF)
+#define MT6360_PMU_CHG_MASK1 (0xF0)
+#define MT6360_PMU_CHG_MASK2 (0xF1)
+#define MT6360_PMU_CHG_MASK3 (0xF2)
+#define MT6360_PMU_CHG_MASK4 (0xF3)
+#define MT6360_PMU_CHG_MASK5 (0xF4)
+#define MT6360_PMU_CHG_MASK6 (0xF5)
+#define MT6360_PMU_QC_MASK (0xF6)
+#define MT6360_PMU_FOD_MASK (0xF7)
+#define MT6360_PMU_BASE_MASK (0xF8)
+#define MT6360_PMU_FLED_MASK1 (0xF9)
+#define MT6360_PMU_FLED_MASK2 (0xFA)
+#define MT6360_PMU_FAULTB_MASK (0xFB)
+#define MT6360_PMU_BUCK1_MASK (0xFC)
+#define MT6360_PMU_BUCK2_MASK (0xFD)
+#define MT6360_PMU_LDO_MASK1 (0xFE)
+#define MT6360_PMU_LDO_MASK2 (0xFF)
+#define MT6360_PMU_MAXREG (MT6360_PMU_LDO_MASK2)
+
+/* MT6360_PMU_IRQ_SET */
+#define MT6360_PMU_IRQ_REGNUM (MT6360_PMU_LDO_IRQ2 - MT6360_PMU_CHG_IRQ1 + 1)
+#define MT6360_IRQ_RETRIG BIT(2)
+
+#define CHIP_VEN_MASK (0xF0)
+#define CHIP_VEN_MT6360 (0x50)
+#define CHIP_REV_MASK (0x0F)
+
+#endif /* __MT6360_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index fc88d315bdde..949268581b36 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -8,9 +8,11 @@
#define __MFD_MT6397_CORE_H__
#include <linux/mutex.h>
+#include <linux/notifier.h>
enum chip_id {
MT6323_CHIP_ID = 0x23,
+ MT6358_CHIP_ID = 0x58,
MT6391_CHIP_ID = 0x91,
MT6397_CHIP_ID = 0x97,
};
@@ -54,6 +56,7 @@ enum mt6397_irq_numbers {
struct mt6397_chip {
struct device *dev;
struct regmap *regmap;
+ struct notifier_block pm_nb;
int irq;
struct irq_domain *irq_domain;
struct mutex irqlock;
@@ -63,8 +66,10 @@ struct mt6397_chip {
u16 int_con[2];
u16 int_status[2];
u16 chip_id;
+ void *irq_data;
};
+int mt6358_irq_init(struct mt6397_chip *chip);
int mt6397_irq_init(struct mt6397_chip *chip);
#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
index 7dfb63b81373..66989a16221a 100644
--- a/include/linux/mfd/mt6397/rtc.h
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -18,7 +18,9 @@
#define RTC_BBPU_CBUSY BIT(6)
#define RTC_BBPU_KEY (0x43 << 8)
-#define RTC_WRTGR 0x003c
+#define RTC_WRTGR_MT6358 0x003a
+#define RTC_WRTGR_MT6397 0x003c
+#define RTC_WRTGR_MT6323 RTC_WRTGR_MT6397
#define RTC_IRQ_STA 0x0002
#define RTC_IRQ_STA_AL BIT(0)
@@ -65,6 +67,10 @@
#define MTK_RTC_POLL_DELAY_US 10
#define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+struct mtk_rtc_data {
+ u32 wrtgr;
+};
+
struct mt6397_rtc {
struct device *dev;
struct rtc_device *rtc_dev;
@@ -74,6 +80,7 @@ struct mt6397_rtc {
struct regmap *regmap;
int irq;
u32 addr_base;
+ const struct mtk_rtc_data *data;
};
#endif /* _LINUX_MFD_MT6397_RTC_H_ */
diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h
index 3c67983678ec..744dce63946e 100644
--- a/include/linux/mfd/stmfx.h
+++ b/include/linux/mfd/stmfx.h
@@ -109,6 +109,7 @@ struct stmfx {
struct device *dev;
struct regmap *map;
struct regulator *vdd;
+ int irq;
struct irq_domain *irq_domain;
struct mutex lock; /* IRQ bus lock */
u8 irq_src;
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 3d7c3c26eeb9..c4a940d98912 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -331,8 +331,6 @@ struct mhi_controller_config {
* @wlock: Lock for protecting device wakeup
* @mhi_link_info: Device bandwidth info
* @st_worker: State transition worker
- * @fw_worker: Firmware download worker
- * @syserr_worker: System error worker
* @state_event: State change event
* @status_cb: CB function to notify power states of the device (required)
* @wake_get: CB function to assert device wake (optional)
@@ -412,8 +410,6 @@ struct mhi_controller {
spinlock_t wlock;
struct mhi_link_info mhi_link_info;
struct work_struct st_worker;
- struct work_struct fw_worker;
- struct work_struct syserr_worker;
wait_queue_head_t state_event;
void (*status_cb)(struct mhi_controller *mhi_cntrl,
@@ -573,6 +569,13 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
enum mhi_state state);
/**
+ * mhi_notify - Notify the MHI client driver about client device status
+ * @mhi_dev: MHI device instance
+ * @cb_reason: MHI callback reason
+ */
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
+
+/**
* mhi_prepare_for_power_up - Do pre-initialization before power up.
* This is optional, call this before power up if
* the controller does not want bus framework to
@@ -609,6 +612,18 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
/**
+ * mhi_pm_suspend - Move MHI into a suspended state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
+
+/**
+ * mhi_pm_resume - Resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ */
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
+
+/**
* mhi_download_rddm_img - Download ramdump image from device for
* debugging purpose.
* @mhi_cntrl: MHI controller
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 20372de0b587..06e066e04a4b 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -573,7 +573,6 @@ struct mlx4_caps {
int reserved_eqs;
int num_comp_vectors;
int num_mpts;
- int max_fmr_maps;
int num_mtts;
int fmr_reserved_mtts;
int reserved_mtts;
@@ -707,17 +706,6 @@ struct mlx4_mw {
int enabled;
};
-struct mlx4_fmr {
- struct mlx4_mr mr;
- struct mlx4_mpt_entry *mpt;
- __be64 *mtts;
- dma_addr_t dma_handle;
- int max_pages;
- int max_maps;
- int maps;
- u8 page_shift;
-};
-
struct mlx4_uar {
unsigned long pfn;
int index;
@@ -1412,14 +1400,6 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
-int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
- int npages, u64 iova, u32 *lkey, u32 *rkey);
-int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
- int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
-int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
-void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
- u32 *lkey, u32 *rkey);
-int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
int mlx4_test_async(struct mlx4_dev *dev);
@@ -1522,6 +1502,8 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
int enable);
+
+struct mlx4_mpt_entry;
int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
struct mlx4_mpt_entry ***mpt_entry);
int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 1a56dc079c32..116bd9bb347f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1321,7 +1321,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 stat_rate_support[0x10];
u8 reserved_at_1f0[0x1];
u8 pci_sync_for_fw_update_event[0x1];
- u8 reserved_at_1f2[0xa];
+ u8 reserved_at_1f2[0x6];
+ u8 init2_lag_tx_port_affinity[0x1];
+ u8 reserved_at_1fa[0x3];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
@@ -3688,7 +3690,8 @@ struct mlx5_ifc_dctc_bits {
u8 ecn[0x2];
u8 dscp[0x6];
- u8 reserved_at_1c0[0x40];
+ u8 reserved_at_1c0[0x20];
+ u8 ece[0x20];
};
enum {
@@ -7938,7 +7941,7 @@ struct mlx5_ifc_create_dct_out_bits {
u8 reserved_at_40[0x8];
u8 dctn[0x18];
- u8 reserved_at_60[0x20];
+ u8 ece[0x20];
};
struct mlx5_ifc_create_dct_in_bits {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 1af5e460b5f6..b8992b861ae6 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -66,6 +66,7 @@ enum mlx5_qp_optpar {
MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
+ MLX5_QP_OPTPAR_LAG_TX_AFF = 1 << 15,
MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
MLX5_QP_OPTPAR_SRQN = 1 << 18,
MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
@@ -321,6 +322,7 @@ struct mlx5_av {
struct mlx5_ib_ah {
struct ib_ah ibah;
struct mlx5_av av;
+ u8 xmit_port;
};
static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
@@ -493,72 +495,6 @@ struct mlx5_core_dct {
struct completion drained;
};
-struct mlx5_qp_path {
- u8 fl_free_ar;
- u8 rsvd3;
- __be16 pkey_index;
- u8 rsvd0;
- u8 grh_mlid;
- __be16 rlid;
- u8 ackto_lt;
- u8 mgid_index;
- u8 static_rate;
- u8 hop_limit;
- __be32 tclass_flowlabel;
- union {
- u8 rgid[16];
- u8 rip[16];
- };
- u8 f_dscp_ecn_prio;
- u8 ecn_dscp;
- __be16 udp_sport;
- u8 dci_cfi_prio_sl;
- u8 port;
- u8 rmac[6];
-};
-
-/* FIXME: use mlx5_ifc.h qpc */
-struct mlx5_qp_context {
- __be32 flags;
- __be32 flags_pd;
- u8 mtu_msgmax;
- u8 rq_size_stride;
- __be16 sq_crq_size;
- __be32 qp_counter_set_usr_page;
- __be32 wire_qpn;
- __be32 log_pg_sz_remote_qpn;
- struct mlx5_qp_path pri_path;
- struct mlx5_qp_path alt_path;
- __be32 params1;
- u8 reserved2[4];
- __be32 next_send_psn;
- __be32 cqn_send;
- __be32 deth_sqpn;
- u8 reserved3[4];
- __be32 last_acked_psn;
- __be32 ssn;
- __be32 params2;
- __be32 rnr_nextrecvpsn;
- __be32 xrcd;
- __be32 cqn_recv;
- __be64 db_rec_addr;
- __be32 qkey;
- __be32 rq_type_srqn;
- __be32 rmsn;
- __be16 hw_sq_wqe_counter;
- __be16 sw_sq_wqe_counter;
- __be16 hw_rcyclic_byte_counter;
- __be16 hw_rq_counter;
- __be16 sw_rcyclic_byte_counter;
- __be16 sw_rq_counter;
- u8 rsvd0[5];
- u8 cgs;
- u8 cs_req;
- u8 cs_res;
- __be64 dc_access_key;
- u8 rsvd1[24];
-};
-
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 66e0977f970a..dc7b87310c10 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -15,6 +15,7 @@
#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/percpu-refcount.h>
@@ -28,6 +29,7 @@
#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/sched.h>
+#include <linux/pgtable.h>
struct mempolicy;
struct anon_vma;
@@ -92,7 +94,6 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
/*
@@ -401,7 +402,7 @@ extern pgprot_t protection_map[16];
* @FAULT_FLAG_WRITE: Fault was a write fault.
* @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
* @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
- * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_sem and wait when retrying.
+ * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
* @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
* @FAULT_FLAG_TRIED: The fault has been tried once.
* @FAULT_FLAG_USER: The fault originated in userspace.
@@ -451,10 +452,10 @@ extern pgprot_t protection_map[16];
* fault_flag_allow_retry_first - check ALLOW_RETRY the first time
*
* This is mostly used for places where we want to try to avoid taking
- * the mmap_sem for too long a time when waiting for another condition
+ * the mmap_lock for too long a time when waiting for another condition
* to change, in which case we can try to be polite to release the
- * mmap_sem in the first round to avoid potential starvation of other
- * processes that would also want the mmap_sem.
+ * mmap_lock in the first round to avoid potential starvation of other
+ * processes that would also want the mmap_lock.
*
* Return: true if the page fault allows retry and this is the first
* attempt of the fault handling; false otherwise.
@@ -581,7 +582,7 @@ struct vm_operations_struct {
* (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
* in mm/mempolicy.c will do this automatically.
* get_policy() must NOT add a ref if the policy at (vma,addr) is not
- * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+ * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
* If no [shared/vma] mempolicy exists at the addr, get_policy() op
* must return NULL--i.e., do not "fallback" to task or system default
* policy.
@@ -776,6 +777,7 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
}
extern void kvfree(const void *addr);
+extern void kvfree_sensitive(const void *addr, size_t len);
/*
* Mapcount of compound page as a whole, does not include mapped sub-pages.
@@ -1372,7 +1374,7 @@ static inline int cpu_pid_to_cpupid(int nid, int pid)
static inline bool cpupid_pid_unset(int cpupid)
{
- return 1;
+ return true;
}
static inline void page_cpupid_reset_last(struct page *page)
@@ -1705,6 +1707,8 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages,
struct vm_area_struct **vmas);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, int *locked);
+long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages, int *locked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
@@ -1725,7 +1729,7 @@ struct frame_vector {
unsigned int nr_frames; /* Number of frames stored in ptrs array */
bool got_ref; /* Did we pin pages by getting page ref? */
bool is_pfns; /* Does array contain pages or pfns? */
- void *ptrs[0]; /* Array of pinned pfns / pages. Use
+ void *ptrs[]; /* Array of pinned pfns / pages. Use
* pfns_vector_pages() or pfns_vector_pfns()
* for access */
};
@@ -1823,10 +1827,16 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
/*
* doesn't attempt to fault and will return short.
*/
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages);
+int get_user_pages_fast_only(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages);
int pin_user_pages_fast_only(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
+
+static inline bool get_user_page_fast_only(unsigned long addr,
+ unsigned int gup_flags, struct page **pagep)
+{
+ return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
+}
/*
* per-process(per-mm_struct) statistics.
*/
@@ -2069,11 +2079,6 @@ int __pte_alloc_kernel(pmd_t *pmd);
#if defined(CONFIG_MMU)
-/*
- * The following ifdef needed to get the 5level-fixup.h header to work.
- * Remove it when 5level-fixup.h has been removed.
- */
-#ifndef __ARCH_HAS_5LEVEL_HACK
static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
unsigned long address)
{
@@ -2102,8 +2107,6 @@ static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd,
return p4d_offset(pgd, address);
}
-#endif /* !__ARCH_HAS_5LEVEL_HACK */
-
static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d,
unsigned long address,
pgtbl_mod_mask *mod_mask)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ef6d3aface8a..64ede5f150dc 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -344,7 +344,7 @@ struct vm_area_struct {
* can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
* or brk vma (with NULL file) can only be in an anon_vma list.
*/
- struct list_head anon_vma_chain; /* Serialized by mmap_sem &
+ struct list_head anon_vma_chain; /* Serialized by mmap_lock &
* page_table_lock */
struct anon_vma *anon_vma; /* Serialized by page_table_lock */
@@ -440,7 +440,7 @@ struct mm_struct {
spinlock_t page_table_lock; /* Protects page tables and some
* counters
*/
- struct rw_semaphore mmap_sem;
+ struct rw_semaphore mmap_lock;
struct list_head mmlist; /* List of maybe swapped mm's. These
* are globally strung together off
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
new file mode 100644
index 000000000000..0707671851a8
--- /dev/null
+++ b/include/linux/mmap_lock.h
@@ -0,0 +1,90 @@
+#ifndef _LINUX_MMAP_LOCK_H
+#define _LINUX_MMAP_LOCK_H
+
+#include <linux/mmdebug.h>
+
+#define MMAP_LOCK_INITIALIZER(name) \
+ .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
+
+static inline void mmap_init_lock(struct mm_struct *mm)
+{
+ init_rwsem(&mm->mmap_lock);
+}
+
+static inline void mmap_write_lock(struct mm_struct *mm)
+{
+ down_write(&mm->mmap_lock);
+}
+
+static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
+{
+ down_write_nested(&mm->mmap_lock, subclass);
+}
+
+static inline int mmap_write_lock_killable(struct mm_struct *mm)
+{
+ return down_write_killable(&mm->mmap_lock);
+}
+
+static inline bool mmap_write_trylock(struct mm_struct *mm)
+{
+ return down_write_trylock(&mm->mmap_lock) != 0;
+}
+
+static inline void mmap_write_unlock(struct mm_struct *mm)
+{
+ up_write(&mm->mmap_lock);
+}
+
+static inline void mmap_write_downgrade(struct mm_struct *mm)
+{
+ downgrade_write(&mm->mmap_lock);
+}
+
+static inline void mmap_read_lock(struct mm_struct *mm)
+{
+ down_read(&mm->mmap_lock);
+}
+
+static inline int mmap_read_lock_killable(struct mm_struct *mm)
+{
+ return down_read_killable(&mm->mmap_lock);
+}
+
+static inline bool mmap_read_trylock(struct mm_struct *mm)
+{
+ return down_read_trylock(&mm->mmap_lock) != 0;
+}
+
+static inline void mmap_read_unlock(struct mm_struct *mm)
+{
+ up_read(&mm->mmap_lock);
+}
+
+static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
+{
+ if (down_read_trylock(&mm->mmap_lock)) {
+ rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
+ return true;
+ }
+ return false;
+}
+
+static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
+{
+ up_read_non_owner(&mm->mmap_lock);
+}
+
+static inline void mmap_assert_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+static inline void mmap_assert_write_locked(struct mm_struct *mm)
+{
+ lockdep_assert_held_write(&mm->mmap_lock);
+ VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+}
+
+#endif /* _LINUX_MMAP_LOCK_H */
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index d9a543a9e1cc..c51a84132d7c 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -4,11 +4,6 @@
#include <asm/mmu_context.h>
-struct mm_struct;
-
-void use_mm(struct mm_struct *mm);
-void unuse_mm(struct mm_struct *mm);
-
/* Architectures that care about IRQ state in switch_mm can override this. */
#ifndef switch_mm_irqs_off
# define switch_mm_irqs_off switch_mm
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 736f6918335e..fc68f3570e19 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <linux/srcu.h>
#include <linux/interval_tree.h>
@@ -121,7 +122,7 @@ struct mmu_notifier_ops {
/*
* invalidate_range_start() and invalidate_range_end() must be
- * paired and are called only when the mmap_sem and/or the
+ * paired and are called only when the mmap_lock and/or the
* locks protecting the reverse maps are held. If the subsystem
* can't guarantee that no additional references are taken to
* the pages in the range, it has to implement the
@@ -212,13 +213,13 @@ struct mmu_notifier_ops {
};
/*
- * The notifier chains are protected by mmap_sem and/or the reverse map
+ * The notifier chains are protected by mmap_lock and/or the reverse map
* semaphores. Notifier chains are only changed when all reverse maps and
- * the mmap_sem locks are taken.
+ * the mmap_lock locks are taken.
*
* Therefore notifier chains can only be traversed when either
*
- * 1. mmap_sem is held.
+ * 1. mmap_lock is held.
* 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
* 3. No other concurrent thread can access the list (release)
*/
@@ -277,9 +278,9 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
{
struct mmu_notifier *ret;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
ret = mmu_notifier_get_locked(ops, mm);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
void mmu_notifier_put(struct mmu_notifier *subscription);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index df1f08486d81..c4c37fd12104 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -660,9 +660,21 @@ struct deferred_split {
* per-zone basis.
*/
typedef struct pglist_data {
+ /*
+ * node_zones contains just the zones for THIS node. Not all of the
+ * zones may be populated, but it is the full list. It is referenced by
+ * this node's node_zonelists as well as other node's node_zonelists.
+ */
struct zone node_zones[MAX_NR_ZONES];
+
+ /*
+ * node_zonelists contains references to all zones in all nodes.
+ * Generally the first zones will be references to this node's
+ * node_zones.
+ */
struct zonelist node_zonelists[MAX_ZONELISTS];
- int nr_zones;
+
+ int nr_zones; /* number of populated zones in this node */
#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
struct page *node_mem_map;
#ifdef CONFIG_PAGE_EXTENSION
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 580e554fb6dc..8d764aab29de 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -532,6 +532,8 @@ enum dmi_field {
DMI_BIOS_VENDOR,
DMI_BIOS_VERSION,
DMI_BIOS_DATE,
+ DMI_BIOS_RELEASE,
+ DMI_EC_FIRMWARE_RELEASE,
DMI_SYS_VENDOR,
DMI_PRODUCT_NAME,
DMI_PRODUCT_VERSION,
diff --git a/include/linux/module.h b/include/linux/module.h
index d849d06e4d44..2e6670860d27 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -866,14 +866,6 @@ extern int module_sysfs_initialized;
#define __MODULE_STRING(x) __stringify(x)
-#ifdef CONFIG_STRICT_MODULE_RWX
-extern void module_enable_ro(const struct module *mod, bool after_init);
-extern void module_disable_ro(const struct module *mod);
-#else
-static inline void module_enable_ro(const struct module *mod, bool after_init) { }
-static inline void module_disable_ro(const struct module *mod) { }
-#endif
-
#ifdef CONFIG_GENERIC_BUG
void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
struct module *);
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 7edac8c7a9c1..de657bd211fa 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -111,4 +111,6 @@ extern unsigned int sysctl_mount_max;
extern bool path_is_mountpoint(const struct path *path);
+extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 886e30441c90..d890805f5494 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -98,7 +98,7 @@ struct nand_bbt_descr {
/*
* Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
- * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning
* in nand_chip.bbt_options.
*/
#define NAND_BBT_DYNAMICSTRUCT 0x80000000
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index c98a21108688..fd1ecb821106 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -138,7 +138,7 @@ struct cfi_ident {
uint16_t InterfaceDesc;
uint16_t MaxBufWriteSize;
uint8_t NumEraseRegions;
- uint32_t EraseRegionInfo[0]; /* Not host ordered */
+ uint32_t EraseRegionInfo[]; /* Not host ordered */
} __packed;
/* Extended Query Structure for both PRI and ALT */
@@ -165,7 +165,7 @@ struct cfi_pri_intelext {
uint16_t ProtRegAddr;
uint8_t FactProtRegSize;
uint8_t UserProtRegSize;
- uint8_t extra[0];
+ uint8_t extra[];
} __packed;
struct cfi_intelext_otpinfo {
@@ -286,7 +286,7 @@ struct cfi_private {
map_word sector_erase_cmd;
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
- struct flchip chips[0]; /* per-chip data structure for each chip */
+ struct flchip chips[]; /* per-chip data structure for each chip */
};
uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 2d1f4a61f4ac..157357ec1441 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -200,6 +200,8 @@ struct mtd_debug_info {
*
* @node: list node used to add an MTD partition to the parent partition list
* @offset: offset of the partition relatively to the parent offset
+ * @size: partition size. Should be equal to mtd->size unless
+ * MTD_SLC_ON_MLC_EMULATION is set
* @flags: original flags (before the mtdpart logic decided to tweak them based
* on flash constraints, like eraseblock/pagesize alignment)
*
@@ -209,6 +211,7 @@ struct mtd_debug_info {
struct mtd_part {
struct list_head node;
u64 offset;
+ u64 size;
u32 flags;
};
@@ -622,7 +625,9 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
static inline int mtd_wunit_per_eb(struct mtd_info *mtd)
{
- return mtd->erasesize / mtd->writesize;
+ struct mtd_info *master = mtd_get_master(mtd);
+
+ return master->erasesize / mtd->writesize;
}
static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index e545c050d3e8..b74a539ec581 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -37,6 +37,7 @@
* master MTD flag set for the corresponding MTD partition.
* For example, to force a read-only partition, simply adding
* MTD_WRITEABLE to the mask_flags will do the trick.
+ * add_flags: contains flags to add to the parent flags
*
* Note: writeable partitions require their size and offset be
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
@@ -48,6 +49,7 @@ struct mtd_partition {
uint64_t size; /* partition size */
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
+ uint32_t add_flags; /* flags to add to the partition */
struct device_node *of_node;
};
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index df5b9fddea16..2e3f43788d48 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -24,7 +24,7 @@ struct lpddr_private {
struct qinfo_chip *qinfo;
int numchips;
unsigned long chipshift;
- struct flchip chips[0];
+ struct flchip chips[];
};
/* qinfo_query_info structure contains request information for
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 1e76196f9829..65b1c1c18b41 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -83,14 +83,14 @@ struct nand_chip;
/*
* Constants for ECC_MODES
*/
-typedef enum {
+enum nand_ecc_mode {
+ NAND_ECC_INVALID,
NAND_ECC_NONE,
NAND_ECC_SOFT,
NAND_ECC_HW,
NAND_ECC_HW_SYNDROME,
- NAND_ECC_HW_OOB_FIRST,
NAND_ECC_ON_DIE,
-} nand_ecc_modes_t;
+};
enum nand_ecc_algo {
NAND_ECC_UNKNOWN,
@@ -119,85 +119,73 @@ enum nand_ecc_algo {
#define NAND_ECC_MAXIMIZE BIT(1)
/*
+ * Option constants for bizarre disfunctionality and real
+ * features.
+ */
+
+/* Buswidth is 16 bit */
+#define NAND_BUSWIDTH_16 BIT(1)
+
+/*
* When using software implementation of Hamming, we can specify which byte
* ordering should be used.
*/
#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
-/*
- * Option constants for bizarre disfunctionality and real
- * features.
- */
-/* Buswidth is 16 bit */
-#define NAND_BUSWIDTH_16 0x00000002
/* Chip has cache program function */
-#define NAND_CACHEPRG 0x00000008
+#define NAND_CACHEPRG BIT(3)
+/* Options valid for Samsung large page devices */
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+
/*
* Chip requires ready check on read (for auto-incremented sequential read).
* True only for small page devices; large page devices do not support
* autoincrement.
*/
-#define NAND_NEED_READRDY 0x00000100
+#define NAND_NEED_READRDY BIT(8)
/* Chip does not allow subpage writes */
-#define NAND_NO_SUBPAGE_WRITE 0x00000200
+#define NAND_NO_SUBPAGE_WRITE BIT(9)
/* Device is one of 'new' xD cards that expose fake nand command set */
-#define NAND_BROKEN_XD 0x00000400
+#define NAND_BROKEN_XD BIT(10)
/* Device behaves just like nand, but is readonly */
-#define NAND_ROM 0x00000800
+#define NAND_ROM BIT(11)
/* Device supports subpage reads */
-#define NAND_SUBPAGE_READ 0x00001000
+#define NAND_SUBPAGE_READ BIT(12)
+/* Macros to identify the above */
+#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
/*
* Some MLC NANDs need data scrambling to limit bitflips caused by repeated
* patterns.
*/
-#define NAND_NEED_SCRAMBLING 0x00002000
+#define NAND_NEED_SCRAMBLING BIT(13)
/* Device needs 3rd row address cycle */
-#define NAND_ROW_ADDR_3 0x00004000
-
-/* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
-
-/* Macros to identify the above */
-#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
-
-/*
- * There are different places where the manufacturer stores the factory bad
- * block markers.
- *
- * Position within the block: Each of these pages needs to be checked for a
- * bad block marking pattern.
- */
-#define NAND_BBM_FIRSTPAGE 0x01000000
-#define NAND_BBM_SECONDPAGE 0x02000000
-#define NAND_BBM_LASTPAGE 0x04000000
-
-/* Position within the OOB data of the page */
-#define NAND_BBM_POS_SMALL 5
-#define NAND_BBM_POS_LARGE 0
+#define NAND_ROW_ADDR_3 BIT(14)
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
-#define NAND_SKIP_BBTSCAN 0x00010000
+#define NAND_SKIP_BBTSCAN BIT(16)
/* Chip may not exist, so silence any errors in scan */
-#define NAND_SCAN_SILENT_NODEV 0x00040000
+#define NAND_SCAN_SILENT_NODEV BIT(18)
+
/*
* Autodetect nand buswidth with readid/onfi.
* This suppose the driver will configure the hardware in 8 bits mode
* when calling nand_scan_ident, and update its configuration
* before calling nand_scan_tail.
*/
-#define NAND_BUSWIDTH_AUTO 0x00080000
+#define NAND_BUSWIDTH_AUTO BIT(19)
+
/*
* This option could be defined by controller drivers to protect against
* kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
*/
-#define NAND_USE_BOUNCE_BUFFER 0x00100000
+#define NAND_USES_DMA BIT(20)
/*
* In case your controller is implementing ->legacy.cmd_ctrl() and is relying
@@ -207,26 +195,49 @@ enum nand_ecc_algo {
* If your controller already takes care of this delay, you don't need to set
* this flag.
*/
-#define NAND_WAIT_TCCS 0x00200000
+#define NAND_WAIT_TCCS BIT(21)
/*
* Whether the NAND chip is a boot medium. Drivers might use this information
* to select ECC algorithms supported by the boot ROM or similar restrictions.
*/
-#define NAND_IS_BOOT_MEDIUM 0x00400000
+#define NAND_IS_BOOT_MEDIUM BIT(22)
/*
* Do not try to tweak the timings at runtime. This is needed when the
* controller initializes the timings on itself or when it relies on
* configuration done by the bootloader.
*/
-#define NAND_KEEP_TIMINGS 0x00800000
+#define NAND_KEEP_TIMINGS BIT(23)
+
+/*
+ * There are different places where the manufacturer stores the factory bad
+ * block markers.
+ *
+ * Position within the block: Each of these pages needs to be checked for a
+ * bad block marking pattern.
+ */
+#define NAND_BBM_FIRSTPAGE BIT(24)
+#define NAND_BBM_SECONDPAGE BIT(25)
+#define NAND_BBM_LASTPAGE BIT(26)
+
+/*
+ * Some controllers with pipelined ECC engines override the BBM marker with
+ * data or ECC bytes, thus making bad block detection through bad block marker
+ * impossible. Let's flag those chips so the core knows it shouldn't check the
+ * BBM and consider all blocks good.
+ */
+#define NAND_NO_BBM_QUIRK BIT(27)
/* Cell info constants */
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
#define NAND_CI_CELLTYPE_SHIFT 2
+/* Position within the OOB data of the page */
+#define NAND_BBM_POS_SMALL 5
+#define NAND_BBM_POS_LARGE 0
+
/**
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
@@ -351,7 +362,7 @@ static const struct nand_ecc_caps __name = { \
* @write_oob: function to write chip OOB data
*/
struct nand_ecc_ctrl {
- nand_ecc_modes_t mode;
+ enum nand_ecc_mode mode;
enum nand_ecc_algo algo;
int steps;
int size;
@@ -491,13 +502,17 @@ enum nand_data_interface_type {
/**
* struct nand_data_interface - NAND interface timing
* @type: type of the timing
- * @timings: The timing, type according to @type
+ * @timings: The timing information
+ * @timings.mode: Timing mode as defined in the specification
* @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
*/
struct nand_data_interface {
enum nand_data_interface_type type;
- union {
- struct nand_sdr_timings sdr;
+ struct nand_timings {
+ unsigned int mode;
+ union {
+ struct nand_sdr_timings sdr;
+ };
} timings;
};
@@ -694,6 +709,7 @@ struct nand_op_instr {
/**
* struct nand_subop - a sub operation
+ * @cs: the CS line to select for this NAND sub-operation
* @instrs: array of instructions
* @ninstrs: length of the @instrs array
* @first_instr_start_off: offset to start from for the first instruction
@@ -709,6 +725,7 @@ struct nand_op_instr {
* controller driver.
*/
struct nand_subop {
+ unsigned int cs;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
unsigned int first_instr_start_off;
@@ -1321,13 +1338,17 @@ int nand_read_oob_std(struct nand_chip *chip, int page);
int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
u8 *subfeature_param);
-/* Default read_page_raw implementation */
+/* read_page_raw implementations */
int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
int page);
+int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
-/* Default write_page_raw implementation */
+/* write_page_raw implementations */
int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page);
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1356,7 +1377,7 @@ int nand_change_write_column_op(struct nand_chip *chip,
unsigned int offset_in_page, const void *buf,
unsigned int len, bool force_8bit);
int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
- bool force_8bit);
+ bool force_8bit, bool check_only);
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
@@ -1377,8 +1398,6 @@ void nand_wait_ready(struct nand_chip *chip);
* sucessful nand_scan().
*/
void nand_cleanup(struct nand_chip *chip);
-/* Unregister the MTD device and calls nand_cleanup() */
-void nand_release(struct nand_chip *chip);
/*
* External helper for controller drivers that have to implement the WAITRDY
@@ -1393,6 +1412,10 @@ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
void nand_select_target(struct nand_chip *chip, unsigned int cs);
void nand_deselect_target(struct nand_chip *chip);
+/* Bitops */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+ unsigned int src_off, unsigned int nbits);
+
/**
* nand_get_data_buf() - Get the internal page buffer
* @chip: NAND chip object
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 1e2af0ec1f03..60bac2c0ec45 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -20,6 +20,7 @@
*/
/* Flash opcodes. */
+#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_WREN 0x06 /* Write enable */
#define SPINOR_OP_RDSR 0x05 /* Read status register */
#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
@@ -80,7 +81,6 @@
/* Used for SST flashes only. */
#define SPINOR_OP_BP 0x02 /* Byte program */
-#define SPINOR_OP_WRDI 0x04 /* Write disable */
#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
/* Used for S3AN flashes only */
@@ -302,7 +302,7 @@ struct spi_nor;
* @read: read data from the SPI NOR.
* @write: write data to the SPI NOR.
* @erase: erase a sector of the SPI NOR at the offset @offs; if
- * not provided by the driver, spi-nor will send the erase
+ * not provided by the driver, SPI NOR will send the erase
* opcode via write_reg().
*/
struct spi_nor_controller_ops {
@@ -327,16 +327,16 @@ struct spi_nor_manufacturer;
struct spi_nor_flash_parameter;
/**
- * struct spi_nor - Structure for defining a the SPI NOR layer
- * @mtd: point to a mtd_info structure
+ * struct spi_nor - Structure for defining the SPI NOR layer
+ * @mtd: an mtd_info structure
* @lock: the lock for the read/write/erase/lock/unlock operations
- * @dev: point to a spi device, or a spi nor controller device.
- * @spimem: point to the spi mem device
+ * @dev: pointer to an SPI device or an SPI NOR controller device
+ * @spimem: pointer to the SPI memory device
* @bouncebuf: bounce buffer used when the buffer passed by the MTD
* layer is not DMA-able
* @bouncebuf_size: size of the bounce buffer
- * @info: spi-nor part JDEC MFR id and other info
- * @manufacturer: spi-nor manufacturer
+ * @info: SPI NOR part JEDEC MFR ID and other info
+ * @manufacturer: SPI NOR manufacturer
* @page_size: the page size of the SPI NOR
* @addr_width: number of address bytes
* @erase_opcode: the opcode for erasing a sector
@@ -344,17 +344,17 @@ struct spi_nor_flash_parameter;
* @read_dummy: the dummy needed by the read operation
* @program_opcode: the program opcode
* @sst_write_second: used by the SST write operation
- * @flags: flag options for the current SPI-NOR (SNOR_F_*)
+ * @flags: flag options for the current SPI NOR (SNOR_F_*)
* @read_proto: the SPI protocol for read operations
* @write_proto: the SPI protocol for write operations
- * @reg_proto the SPI protocol for read_reg/write_reg/erase operations
+ * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations
* @controller_ops: SPI NOR controller driver specific operations.
- * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings.
+ * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings.
* The structure includes legacy flash parameters and
* settings that can be overwritten by the spi_nor_fixups
* hooks, or dynamically when parsing the SFDP tables.
* @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes.
- * @priv: the private data
+ * @priv: pointer to the private data
*/
struct spi_nor {
struct mtd_info mtd;
diff --git a/include/linux/net.h b/include/linux/net.h
index e10f378194a5..016a9c5faa34 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -264,7 +264,8 @@ do { \
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define net_dbg_ratelimited(fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1a96e9c4ec36..5b364a2e0006 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4942,7 +4942,8 @@ do { \
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define netdev_dbg(__dev, format, args...) \
do { \
dynamic_netdev_dbg(__dev, format, ##args); \
@@ -5012,7 +5013,8 @@ do { \
#define netif_info(priv, type, dev, fmt, args...) \
netif_level(info, priv, type, dev, fmt, ##args)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define netif_dbg(priv, type, netdev, format, args...) \
do { \
if (netif_msg_##type(priv)) \
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 82d8fb422092..4dba3c948932 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -38,7 +38,7 @@ struct nfs4_ace {
struct nfs4_acl {
uint32_t naces;
- struct nfs4_ace aces[0];
+ struct nfs4_ace aces[];
};
#define NFS4_MAXLABELLEN 2048
@@ -295,7 +295,7 @@ static inline bool seqid_mutating_err(u32 err)
case NFS4ERR_NOFILEHANDLE:
case NFS4ERR_MOVED:
return false;
- };
+ }
return true;
}
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 73eda45f1cfd..6ee9119acc5d 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -230,6 +230,7 @@ struct nfs4_copy_state {
#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */
#define NFS_INO_DATA_INVAL_DEFER \
BIT(13) /* Deferred cache invalidation */
+#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */
#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
| NFS_INO_INVALID_CTIME \
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index e5f3e7d8d3d5..5fd0a9ef425f 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1227,7 +1227,7 @@ struct nfs4_secinfo4 {
struct nfs4_secinfo_flavors {
unsigned int num_flavors;
- struct nfs4_secinfo4 flavors[0];
+ struct nfs4_secinfo4 flavors[];
};
struct nfs4_secinfo_arg {
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 8c13538aeffe..191b524e5c0d 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -478,7 +478,7 @@ void ntb_unregister_client(struct ntb_client *client);
int ntb_register_device(struct ntb_dev *ntb);
/**
- * ntb_register_device() - unregister a ntb device
+ * ntb_unregister_device() - unregister a ntb device
* @ntb: NTB device context.
*
* The device will be removed from the list of ntb devices. If the ntb device
@@ -1351,7 +1351,7 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val)
* @sidx: Scratchpad index.
* @spad_addr: OUT - The address of the peer scratchpad register.
*
- * Return the address of the peer doorbell register. This may be used, for
+ * Return the address of the peer scratchpad register. This may be used, for
* example, by drivers that offload memory copy operations to a dma engine.
*
* Return: Zero on success, otherwise an error number.
@@ -1373,7 +1373,7 @@ static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
*
* Read the peer scratchpad register, and return the value.
*
- * Return: The value of the local scratchpad register.
+ * Return: The value of the peer scratchpad register.
*/
static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
{
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 60f541912ccf..8216a4156263 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -3,6 +3,7 @@
#define __OF_RESERVED_MEM_H
#include <linux/device.h>
+#include <linux/of.h>
struct of_phandle_args;
struct reserved_mem_ops;
@@ -33,6 +34,9 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx);
+int of_reserved_mem_device_init_by_name(struct device *dev,
+ struct device_node *np,
+ const char *name);
void of_reserved_mem_device_release(struct device *dev);
void fdt_init_reserved_mem(void);
@@ -45,6 +49,14 @@ static inline int of_reserved_mem_device_init_by_idx(struct device *dev,
{
return -ENOSYS;
}
+
+static inline int of_reserved_mem_device_init_by_name(struct device *dev,
+ struct device_node *np,
+ const char *name)
+{
+ return -ENOSYS;
+}
+
static inline void of_reserved_mem_device_release(struct device *pdev) { }
static inline void fdt_init_reserved_mem(void) { }
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 222f6f7b2bb3..6be1aa559b1e 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -777,6 +777,16 @@ PAGE_TYPE_OPS(Buddy, buddy)
* not onlined when onlining the section).
* The content of these pages is effectively stale. Such pages should not
* be touched (read/write/dump/save) except by their owner.
+ *
+ * If a driver wants to allow to offline unmovable PageOffline() pages without
+ * putting them back to the buddy, it can do so via the memory notifier by
+ * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
+ * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
+ * pages (now with a reference count of zero) are treated like free pages,
+ * allowing the containing memory block to get offlined. A driver that
+ * relies on this feature is aware that re-onlining the memory block will
+ * require to re-set the pages PageOffline() and not giving them to the
+ * buddy via online_page_callback_t.
*/
PAGE_TYPE_OPS(Offline, offline)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 8e085713150c..cf2468da68e9 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -538,7 +538,7 @@ static inline int lock_page_killable(struct page *page)
* lock_page_or_retry - Lock the page, unless this would block and the
* caller indicated that it can handle a retry.
*
- * Return value and mmap_sem implications depend on flags; see
+ * Return value and mmap_lock implications depend on flags; see
* __lock_page_or_retry().
*/
static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 13932ce8b37b..1fb508c19e83 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -325,18 +325,10 @@ struct pardev_cb {
unsigned int flags;
};
-/* parport_register_device declares that a device is connected to a
- port, and tells the kernel all it needs to know.
- - pf is the preemption function (may be NULL for no callback)
- - kf is the wake-up function (may be NULL for no callback)
- - irq_func is the interrupt handler (may be NULL for no interrupts)
- - handle is a user pointer that gets handed to callback functions. */
-struct pardevice *parport_register_device(struct parport *port,
- const char *name,
- int (*pf)(void *), void (*kf)(void *),
- void (*irq_func)(void *),
- int flags, void *handle);
-
+/*
+ * parport_register_dev_model declares that a device is connected to a
+ * port, and tells the kernel all it needs to know.
+ */
struct pardevice *
parport_register_dev_model(struct parport *port, const char *name,
const struct pardev_cb *par_dev_cb, int cnt);
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 2d155bfb8fbf..5ba475ca9078 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -27,7 +27,7 @@ extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
struct pci_ecam_ops;
extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
- struct pci_ecam_ops **ecam_ops);
+ const struct pci_ecam_ops **ecam_ops);
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{
@@ -107,10 +107,12 @@ static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
#endif
extern const guid_t pci_acpi_dsm_guid;
-#define IGNORE_PCI_BOOT_CONFIG_DSM 0x05
-#define DEVICE_LABEL_DSM 0x07
-#define RESET_DELAY_DSM 0x08
-#define FUNCTION_DELAY_DSM 0x09
+
+/* _DSM Definitions for PCI */
+#define DSM_PCI_PRESERVE_BOOT_CONFIG 0x05
+#define DSM_PCI_DEVICE_NAME 0x07
+#define DSM_PCI_POWER_ON_RESET_DELAY 0x08
+#define DSM_PCI_DEVICE_READINESS_DURATIONS 0x09
#ifdef CONFIG_PCIE_EDR
void pci_acpi_add_edr_notifier(struct pci_dev *pdev);
@@ -125,10 +127,4 @@ static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
#endif /* CONFIG_ACPI */
-#ifdef CONFIG_ACPI_APEI
-extern bool aer_acpi_firmware_first(void);
-#else
-static inline bool aer_acpi_firmware_first(void) { return false; }
-#endif
-
#endif /* _PCI_ACPI_H_ */
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index d08f0869f121..f75c307f346d 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -6,11 +6,14 @@
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
+bool pci_ats_supported(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
int pci_ats_page_aligned(struct pci_dev *dev);
#else /* CONFIG_PCI_ATS */
+static inline bool pci_ats_supported(struct pci_dev *d)
+{ return false; }
static inline int pci_enable_ats(struct pci_dev *d, int ps)
{ return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index a73164c85e78..1af5cb02ef7f 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -29,7 +29,7 @@ struct pci_config_window {
struct resource res;
struct resource busr;
void *priv;
- struct pci_ecam_ops *ops;
+ const struct pci_ecam_ops *ops;
union {
void __iomem *win; /* 64-bit single mapping */
void __iomem **winp; /* 32-bit per-bus mapping */
@@ -40,29 +40,28 @@ struct pci_config_window {
/* create and free pci_config_window */
struct pci_config_window *pci_ecam_create(struct device *dev,
struct resource *cfgres, struct resource *busr,
- struct pci_ecam_ops *ops);
+ const struct pci_ecam_ops *ops);
void pci_ecam_free(struct pci_config_window *cfg);
/* map_bus when ->sysdata is an instance of pci_config_window */
void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
/* default ECAM ops */
-extern struct pci_ecam_ops pci_generic_ecam_ops;
+extern const struct pci_ecam_ops pci_generic_ecam_ops;
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
-extern struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */
-extern struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */
-extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
-extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
-extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
-extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
-extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
+extern const struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */
+extern const struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */
+extern const struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
+extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
+extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
+extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
+extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
#endif
-#ifdef CONFIG_PCI_HOST_COMMON
+#if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
/* for DT-based PCI controllers that support ECAM */
-int pci_host_common_probe(struct platform_device *pdev,
- struct pci_ecam_ops *ops);
+int pci_host_common_probe(struct platform_device *pdev);
int pci_host_common_remove(struct platform_device *pdev);
#endif
#endif
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index e0ed9d01f6e5..cc66bec8be90 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -66,19 +66,27 @@ struct pci_epc_ops {
};
/**
+ * struct pci_epc_mem_window - address window of the endpoint controller
+ * @phys_base: physical base address of the PCI address window
+ * @size: the size of the PCI address window
+ * @page_size: size of each page
+ */
+struct pci_epc_mem_window {
+ phys_addr_t phys_base;
+ size_t size;
+ size_t page_size;
+};
+
+/**
* struct pci_epc_mem - address space of the endpoint controller
- * @phys_base: physical base address of the PCI address space
- * @size: the size of the PCI address space
+ * @window: address window of the endpoint controller
* @bitmap: bitmap to manage the PCI address space
* @pages: number of bits representing the address region
- * @page_size: size of each page
* @lock: mutex to protect bitmap
*/
struct pci_epc_mem {
- phys_addr_t phys_base;
- size_t size;
+ struct pci_epc_mem_window window;
unsigned long *bitmap;
- size_t page_size;
int pages;
/* mutex to protect against concurrent access for memory allocation*/
struct mutex lock;
@@ -89,7 +97,11 @@ struct pci_epc_mem {
* @dev: PCI EPC device
* @pci_epf: list of endpoint functions present in this EPC device
* @ops: function pointers for performing endpoint operations
- * @mem: address space of the endpoint controller
+ * @windows: array of address space of the endpoint controller
+ * @mem: first window of the endpoint controller, which corresponds to
+ * default address space of the endpoint controller supporting
+ * single window.
+ * @num_windows: number of windows supported by device
* @max_functions: max number of functions that can be configured in this EPC
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
@@ -100,7 +112,9 @@ struct pci_epc {
struct device dev;
struct list_head pci_epf;
const struct pci_epc_ops *ops;
+ struct pci_epc_mem **windows;
struct pci_epc_mem *mem;
+ unsigned int num_windows;
u8 max_functions;
struct config_group *group;
/* mutex to protect against concurrent access of EP controller */
@@ -137,9 +151,6 @@ struct pci_epc_features {
#define devm_pci_epc_create(dev, ops) \
__devm_pci_epc_create((dev), (ops), THIS_MODULE)
-#define pci_epc_mem_init(epc, phys_addr, size) \
- __pci_epc_mem_init((epc), (phys_addr), (size), PAGE_SIZE)
-
static inline void epc_set_drvdata(struct pci_epc *epc, void *data)
{
dev_set_drvdata(&epc->dev, data);
@@ -195,8 +206,11 @@ unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
struct pci_epc *pci_epc_get(const char *epc_name);
void pci_epc_put(struct pci_epc *epc);
-int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size,
- size_t page_size);
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base,
+ size_t size, size_t page_size);
+int pci_epc_multi_mem_init(struct pci_epc *epc,
+ struct pci_epc_mem_window *window,
+ unsigned int num_windows);
void pci_epc_mem_exit(struct pci_epc *epc);
void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 83ce1cdf5676..c79d83304e52 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -100,9 +100,21 @@ enum {
PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
#endif
- /* Resources assigned to buses behind the bridge */
+/* PCI-to-PCI (P2P) bridge windows */
+#define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0)
+#define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1)
+#define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2)
+
+/* CardBus bridge windows */
+#define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0)
+#define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1)
+#define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2)
+#define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
+
+/* Total number of bridge resources for P2P and CardBus */
#define PCI_BRIDGE_RESOURCE_NUM 4
+ /* Resources assigned to buses behind the bridge */
PCI_BRIDGE_RESOURCES,
PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
PCI_BRIDGE_RESOURCE_NUM - 1,
@@ -279,7 +291,7 @@ struct pci_cap_saved_data {
u16 cap_nr;
bool cap_extended;
unsigned int size;
- u32 data[0];
+ u32 data[];
};
struct pci_cap_saved_state {
@@ -420,8 +432,6 @@ struct pci_dev {
* mappings to make sure they cannot access arbitrary memory.
*/
unsigned int untrusted:1;
- unsigned int __aer_firmware_first_valid:1;
- unsigned int __aer_firmware_first:1;
unsigned int broken_intx_masking:1; /* INTx masking can't be used */
unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
unsigned int irq_managed:1;
@@ -532,7 +542,7 @@ struct pci_host_bridge {
resource_size_t start,
resource_size_t size,
resource_size_t align);
- unsigned long private[0] ____cacheline_aligned;
+ unsigned long private[] ____cacheline_aligned;
};
#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
@@ -1025,7 +1035,6 @@ void pci_bus_add_device(struct pci_dev *dev);
void pci_read_bridge_bases(struct pci_bus *child);
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
struct resource *res);
-struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev);
u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
@@ -2048,6 +2057,8 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
+
+int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
int pci_iov_add_virtfn(struct pci_dev *dev, int id);
void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
int pci_num_vf(struct pci_dev *dev);
@@ -2073,6 +2084,12 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
}
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
+
+static inline int pci_iov_sysfs_link(struct pci_dev *dev,
+ struct pci_dev *virtfn, int id)
+{
+ return -ENODEV;
+}
static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
return -ENOSYS;
@@ -2143,17 +2160,23 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
}
+/**
+ * pcie_find_root_port - Get the PCIe root port device
+ * @dev: PCI device
+ *
+ * Traverse up the parent chain and return the PCIe Root Port PCI Device
+ * for a given PCI/PCIe Device.
+ */
static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
{
- while (1) {
- if (!pci_is_pcie(dev))
- break;
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- return dev;
- if (!dev->bus->self)
- break;
- dev = dev->bus->self;
+ struct pci_dev *bridge = pci_upstream_bridge(dev);
+
+ while (bridge) {
+ if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
+ return bridge;
+ bridge = pci_upstream_bridge(bridge);
}
+
return NULL;
}
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 1dfc4e1dcb94..9a57e6717e5c 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1832,6 +1832,12 @@
#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+#define PCI_VENDOR_ID_PERICOM 0x12D8
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
+
#define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0
#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031
#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
new file mode 100644
index 000000000000..32b6c52d41b9
--- /dev/null
+++ b/include/linux/pgtable.h
@@ -0,0 +1,1438 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PGTABLE_H
+#define _LINUX_PGTABLE_H
+
+#include <linux/pfn.h>
+#include <asm/pgtable.h>
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_MMU
+
+#include <linux/mm_types.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <asm-generic/pgtable_uffd.h>
+
+#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
+ defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
+#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
+#endif
+
+/*
+ * On almost all architectures and configurations, 0 can be used as the
+ * upper ceiling to free_pgtables(): on many architectures it has the same
+ * effect as using TASK_SIZE. However, there is one configuration which
+ * must impose a more careful limit, to avoid freeing kernel pgtables.
+ */
+#ifndef USER_PGTABLES_CEILING
+#define USER_PGTABLES_CEILING 0UL
+#endif
+
+/*
+ * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
+ *
+ * The pXx_index() functions return the index of the entry in the page
+ * table page which would control the given virtual address
+ *
+ * As these functions may be used by the same code for different levels of
+ * the page table folding, they are always available, regardless of
+ * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
+ * because in such cases PTRS_PER_PxD equals 1.
+ */
+
+static inline unsigned long pte_index(unsigned long address)
+{
+ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+}
+
+#ifndef pmd_index
+static inline unsigned long pmd_index(unsigned long address)
+{
+ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+}
+#define pmd_index pmd_index
+#endif
+
+#ifndef pud_index
+static inline unsigned long pud_index(unsigned long address)
+{
+ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+}
+#define pud_index pud_index
+#endif
+
+#ifndef pgd_index
+/* Must be a compile-time constant, so implement it as a macro */
+#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
+#endif
+
+#ifndef pte_offset_kernel
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
+{
+ return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
+}
+#define pte_offset_kernel pte_offset_kernel
+#endif
+
+#if defined(CONFIG_HIGHPTE)
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
+ pte_index((address)))
+#define pte_unmap(pte) kunmap_atomic((pte))
+#else
+#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
+#define pte_unmap(pte) ((void)(pte)) /* NOP */
+#endif
+
+/* Find an entry in the second-level page table.. */
+#ifndef pmd_offset
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+ return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+}
+#define pmd_offset pmd_offset
+#endif
+
+#ifndef pud_offset
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
+}
+#define pud_offset pud_offset
+#endif
+
+static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
+{
+ return (pgd + pgd_index(address));
+};
+
+/*
+ * a shortcut to get a pgd_t in a given mm
+ */
+#ifndef pgd_offset
+#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
+#endif
+
+/*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
+
+/*
+ * In many cases it is known that a virtual address is mapped at PMD or PTE
+ * level, so instead of traversing all the page table levels, we can get a
+ * pointer to the PMD entry in user or kernel page table or translate a virtual
+ * address to the pointer in the PTE in the kernel page tables with simple
+ * helpers.
+ */
+static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
+{
+ return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
+}
+
+static inline pmd_t *pmd_off_k(unsigned long va)
+{
+ return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
+}
+
+static inline pte_t *virt_to_kpte(unsigned long vaddr)
+{
+ pmd_t *pmd = pmd_off_k(vaddr);
+
+ return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
+}
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
+extern int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty);
+#else
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
+static inline int pudp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ pud_t entry, int dirty)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ int r = 1;
+ if (!pte_young(pte))
+ r = 0;
+ else
+ set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
+ return r;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+ int r = 1;
+ if (!pmd_young(pmd))
+ r = 0;
+ else
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
+ return r;
+}
+#else
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+/*
+ * Despite relevant to THP only, this API is called from generic rmap code
+ * under PageTransHuge(), hence needs a dummy implementation for !THP
+ */
+static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(mm, address, ptep);
+ return pte;
+}
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+ pmd_clear(pmdp);
+ return pmd;
+}
+#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
+static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pud_t *pudp)
+{
+ pud_t pud = *pudp;
+
+ pud_clear(pudp);
+ return pud;
+}
+#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
+static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ int full)
+{
+ return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
+static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp,
+ int full)
+{
+ return pudp_huge_get_and_clear(mm, address, pudp);
+}
+#endif
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep,
+ int full)
+{
+ pte_t pte;
+ pte = ptep_get_and_clear(mm, address, ptep);
+ return pte;
+}
+#endif
+
+
+/*
+ * If two threads concurrently fault at the same page, the thread that
+ * won the race updates the PTE and its local TLB/Cache. The other thread
+ * gives up, simply does nothing, and continues; on architectures where
+ * software can update TLB, local TLB can be updated here to avoid next page
+ * fault. This function updates TLB only, do nothing with cache or others.
+ * It is the difference with function update_mmu_cache.
+ */
+#ifndef __HAVE_ARCH_UPDATE_MMU_TLB
+static inline void update_mmu_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+}
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#endif
+
+/*
+ * Some architectures may be able to avoid expensive synchronization
+ * primitives when modifications are made to PTE's which are already
+ * not present, or in the process of an address space destruction.
+ */
+#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
+static inline void pte_clear_not_present_full(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep,
+ int full)
+{
+ pte_clear(mm, address, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep);
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
+extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp);
+extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pud_t *pudp);
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
+struct mm_struct;
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
+}
+#endif
+
+/*
+ * On some architectures hardware does not set page access bit when accessing
+ * memory page, it is responsibilty of software setting this bit. It brings
+ * out extra page fault penalty to track page access bit. For optimization page
+ * access bit can be set during all page fault flow on these arches.
+ * To be differentiate with macro pte_mkyoung, this macro is used on platforms
+ * where software maintains page access bit.
+ */
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+ return pte;
+}
+#define pte_sw_mkyoung pte_sw_mkyoung
+#endif
+
+#ifndef pte_savedwrite
+#define pte_savedwrite pte_write
+#endif
+
+#ifndef pte_mk_savedwrite
+#define pte_mk_savedwrite pte_mkwrite
+#endif
+
+#ifndef pte_clear_savedwrite
+#define pte_clear_savedwrite pte_wrprotect
+#endif
+
+#ifndef pmd_savedwrite
+#define pmd_savedwrite pmd_write
+#endif
+
+#ifndef pmd_mk_savedwrite
+#define pmd_mk_savedwrite pmd_mkwrite
+#endif
+
+#ifndef pmd_clear_savedwrite
+#define pmd_clear_savedwrite pmd_wrprotect
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
+}
+#else
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ pud_t old_pud = *pudp;
+
+ set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
+}
+#else
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pud_t *pudp)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#endif
+
+#ifndef pmdp_collapse_flush
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#else
+static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ BUILD_BUG();
+ return *pmdp;
+}
+#define pmdp_collapse_flush pmdp_collapse_flush
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif
+
+#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable);
+#endif
+
+#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * This is an implementation of pmdp_establish() that is only suitable for an
+ * architecture that doesn't have hardware dirty/accessed bits. In this case we
+ * can't race with CPU which sets these bits and non-atomic aproach is fine.
+ */
+static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ pmd_t old_pmd = *pmdp;
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+ return old_pmd;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE
+extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+#endif
+
+#ifndef __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_UNUSED
+/*
+ * Some architectures provide facilities to virtualization guests
+ * so that they can flag allocated pages as unused. This allows the
+ * host to transparently reclaim unused pages. This function returns
+ * whether the pte's page is unused.
+ */
+static inline int pte_unused(pte_t pte)
+{
+ return 0;
+}
+#endif
+
+#ifndef pte_access_permitted
+#define pte_access_permitted(pte, write) \
+ (pte_present(pte) && (!(write) || pte_write(pte)))
+#endif
+
+#ifndef pmd_access_permitted
+#define pmd_access_permitted(pmd, write) \
+ (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
+#endif
+
+#ifndef pud_access_permitted
+#define pud_access_permitted(pud, write) \
+ (pud_present(pud) && (!(write) || pud_write(pud)))
+#endif
+
+#ifndef p4d_access_permitted
+#define p4d_access_permitted(p4d, write) \
+ (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
+#endif
+
+#ifndef pgd_access_permitted
+#define pgd_access_permitted(pgd, write) \
+ (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
+#endif
+
+#ifndef __HAVE_ARCH_PMD_SAME
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+ return pmd_val(pmd_a) == pmd_val(pmd_b);
+}
+
+static inline int pud_same(pud_t pud_a, pud_t pud_b)
+{
+ return pud_val(pud_a) == pud_val(pud_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_P4D_SAME
+static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
+{
+ return p4d_val(p4d_a) == p4d_val(p4d_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PGD_SAME
+static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
+{
+ return pgd_val(pgd_a) == pgd_val(pgd_b);
+}
+#endif
+
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_pte_safe(ptep, pte) \
+({ \
+ WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
+ set_pte(ptep, pte); \
+})
+
+#define set_pmd_safe(pmdp, pmd) \
+({ \
+ WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
+ set_pmd(pmdp, pmd); \
+})
+
+#define set_pud_safe(pudp, pud) \
+({ \
+ WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
+ set_pud(pudp, pud); \
+})
+
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+ set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+ set_pgd(pgdp, pgd); \
+})
+
+#ifndef __HAVE_ARCH_DO_SWAP_PAGE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_do_swap_page() can restore this
+ * metadata when a page is swapped back in.
+ */
+static inline void arch_do_swap_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte)
+{
+
+}
+#endif
+
+#ifndef __HAVE_ARCH_UNMAP_ONE
+/*
+ * Some architectures support metadata associated with a page. When a
+ * page is being swapped out, this metadata must be saved so it can be
+ * restored when the page is swapped back in. SPARC M7 and newer
+ * processors support an ADI (Application Data Integrity) tag for the
+ * page as metadata for the page. arch_unmap_one() can save this
+ * metadata on a swap-out of a page.
+ */
+static inline int arch_unmap_one(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t orig_pte)
+{
+ return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
+#endif
+
+#ifndef __HAVE_ARCH_MOVE_PTE
+#define move_pte(pte, prot, old_addr, new_addr) (pte)
+#endif
+
+#ifndef pte_accessible
+# define pte_accessible(mm, pte) ((void)(pte), 1)
+#endif
+
+#ifndef flush_tlb_fix_spurious_fault
+#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
+#endif
+
+#ifndef pgprot_nx
+#define pgprot_nx(prot) (prot)
+#endif
+
+#ifndef pgprot_noncached
+#define pgprot_noncached(prot) (prot)
+#endif
+
+#ifndef pgprot_writecombine
+#define pgprot_writecombine pgprot_noncached
+#endif
+
+#ifndef pgprot_writethrough
+#define pgprot_writethrough pgprot_noncached
+#endif
+
+#ifndef pgprot_device
+#define pgprot_device pgprot_noncached
+#endif
+
+#ifndef pgprot_modify
+#define pgprot_modify pgprot_modify
+static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
+{
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
+ newprot = pgprot_noncached(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
+ newprot = pgprot_writecombine(newprot);
+ if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
+ newprot = pgprot_device(newprot);
+ return newprot;
+}
+#endif
+
+/*
+ * When walking page tables, get the address of the next boundary,
+ * or the end address of the range if that comes earlier. Although no
+ * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
+ */
+
+#define pgd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
+#ifndef p4d_addr_end
+#define p4d_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pud_addr_end
+#define pud_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pmd_addr_end
+#define pmd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+/*
+ * When walking page tables, we usually want to skip any p?d_none entries;
+ * and any p?d_bad entries - reporting the error before resetting to none.
+ * Do the tests inline, but report and clear the bad entry in mm/memory.c.
+ */
+void pgd_clear_bad(pgd_t *);
+
+#ifndef __PAGETABLE_P4D_FOLDED
+void p4d_clear_bad(p4d_t *);
+#else
+#define p4d_clear_bad(p4d) do { } while (0)
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+void pud_clear_bad(pud_t *);
+#else
+#define pud_clear_bad(p4d) do { } while (0)
+#endif
+
+void pmd_clear_bad(pmd_t *);
+
+static inline int pgd_none_or_clear_bad(pgd_t *pgd)
+{
+ if (pgd_none(*pgd))
+ return 1;
+ if (unlikely(pgd_bad(*pgd))) {
+ pgd_clear_bad(pgd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int p4d_none_or_clear_bad(p4d_t *p4d)
+{
+ if (p4d_none(*p4d))
+ return 1;
+ if (unlikely(p4d_bad(*p4d))) {
+ p4d_clear_bad(p4d);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pud_none_or_clear_bad(pud_t *pud)
+{
+ if (pud_none(*pud))
+ return 1;
+ if (unlikely(pud_bad(*pud))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pmd_none_or_clear_bad(pmd_t *pmd)
+{
+ if (pmd_none(*pmd))
+ return 1;
+ if (unlikely(pmd_bad(*pmd))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ /*
+ * Get the current pte state, but zero it out to make it
+ * non-present, preventing the hardware from asynchronously
+ * updating it.
+ */
+ return ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ /*
+ * The pte is non-present, so there's no hardware state to
+ * preserve.
+ */
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+}
+
+#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+/*
+ * Start a pte protection read-modify-write transaction, which
+ * protects against asynchronous hardware modifications to the pte.
+ * The intention is not to prevent the hardware from making pte
+ * updates, but to prevent any updates it may make from being lost.
+ *
+ * This does not protect against other software modifications of the
+ * pte; the appropriate pte lock must be held over the transation.
+ *
+ * Note that this interface is intended to be batchable, meaning that
+ * ptep_modify_prot_commit may not actually update the pte, but merely
+ * queue the update to be done at some later time. The update must be
+ * actually committed before the pte lock is released, however.
+ */
+static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ return __ptep_modify_prot_start(vma, addr, ptep);
+}
+
+/*
+ * Commit an update to a pte, leaving any hardware-controlled bits in
+ * the PTE unmodified.
+ */
+static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte)
+{
+ __ptep_modify_prot_commit(vma, addr, ptep, pte);
+}
+#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
+#endif /* CONFIG_MMU */
+
+/*
+ * No-op macros that just return the current protection value. Defined here
+ * because these macros can be used used even if CONFIG_MMU is not defined.
+ */
+#ifndef pgprot_encrypted
+#define pgprot_encrypted(prot) (prot)
+#endif
+
+#ifndef pgprot_decrypted
+#define pgprot_decrypted(prot) (prot)
+#endif
+
+/*
+ * A facility to provide lazy MMU batching. This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued. Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window. Note that using this
+ * interface requires that read hazards be removed from the code. A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date. This mode can only be entered and left under the protection of
+ * the page table locks for all page tables which may be modified. In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+#endif
+
+/*
+ * A facility to provide batching of the reload of page tables and
+ * other process state with the actual context switch code for
+ * paravirtualized guests. By convention, only one of the batched
+ * update (lazy) modes (CPU, MMU) should be active at any given time,
+ * entry should never be nested, and entry and exits should always be
+ * paired. This is for sanity of maintaining and reasoning about the
+ * kernel code. In this case, the exit (end of the context switch) is
+ * in architecture-specific code, and so doesn't need a generic
+ * definition.
+ */
+#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
+#define arch_start_context_switch(prev) do {} while (0)
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+#endif
+#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
+static inline int pte_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline int pmd_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline int pmd_swp_soft_dirty(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
+{
+ return pmd;
+}
+#endif
+
+#ifndef __HAVE_PFNMAP_TRACKING
+/*
+ * Interfaces that can be used by architecture code to keep track of
+ * memory type of pfn mappings specified by the remap_pfn_range,
+ * vmf_insert_pfn.
+ */
+
+/*
+ * track_pfn_remap is called when a _new_ pfn mapping is being established
+ * by remap_pfn_range() for physical range indicated by pfn and size.
+ */
+static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size)
+{
+ return 0;
+}
+
+/*
+ * track_pfn_insert is called when a _new_ single pfn is established
+ * by vmf_insert_pfn().
+ */
+static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn)
+{
+}
+
+/*
+ * track_pfn_copy is called when vma that is covering the pfnmap gets
+ * copied through copy_page_range().
+ */
+static inline int track_pfn_copy(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+/*
+ * untrack_pfn is called while unmapping a pfnmap for a region.
+ * untrack can be called for a specific region indicated by pfn and size or
+ * can be for the entire vma (in which case pfn, size are zero).
+ */
+static inline void untrack_pfn(struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size)
+{
+}
+
+/*
+ * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
+ */
+static inline void untrack_pfn_moved(struct vm_area_struct *vma)
+{
+}
+#else
+extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long addr,
+ unsigned long size);
+extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn);
+extern int track_pfn_copy(struct vm_area_struct *vma);
+extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
+ unsigned long size);
+extern void untrack_pfn_moved(struct vm_area_struct *vma);
+#endif
+
+#ifdef __HAVE_COLOR_ZERO_PAGE
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ unsigned long offset_from_zero_pfn = pfn - zero_pfn;
+ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
+}
+
+#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
+
+#else
+static inline int is_zero_pfn(unsigned long pfn)
+{
+ extern unsigned long zero_pfn;
+ return pfn == zero_pfn;
+}
+
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+ extern unsigned long zero_pfn;
+ return zero_pfn;
+}
+#endif
+
+#ifdef CONFIG_MMU
+
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return 0;
+}
+#ifndef pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ BUG();
+ return 0;
+}
+#endif /* pmd_write */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+ BUG();
+ return 0;
+}
+#endif /* pud_write */
+
+#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return 0;
+}
+static inline int pud_devmap(pud_t pud)
+{
+ return 0;
+}
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
+#endif
+
+#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
+ (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+static inline int pud_trans_huge(pud_t pud)
+{
+ return 0;
+}
+#endif
+
+/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
+static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
+{
+ pud_t pudval = READ_ONCE(*pud);
+
+ if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+ return 1;
+ if (unlikely(pud_bad(pudval))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+/* See pmd_trans_unstable for discussion. */
+static inline int pud_trans_unstable(pud_t *pud)
+{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+ return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
+#else
+ return 0;
+#endif
+}
+
+#ifndef pmd_read_atomic
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ /*
+ * Depend on compiler for an atomic pmd read. NOTE: this is
+ * only going to work, if the pmdval_t isn't larger than
+ * an unsigned long.
+ */
+ return *pmdp;
+}
+#endif
+
+#ifndef arch_needs_pgtable_deposit
+#define arch_needs_pgtable_deposit() (false)
+#endif
+/*
+ * This function is meant to be used by sites walking pagetables with
+ * the mmap_lock held in read mode to protect against MADV_DONTNEED and
+ * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
+ * into a null pmd and the transhuge page fault can convert a null pmd
+ * into an hugepmd or into a regular pmd (if the hugepage allocation
+ * fails). While holding the mmap_lock in read mode the pmd becomes
+ * stable and stops changing under us only if it's not null and not a
+ * transhuge pmd. When those races occurs and this function makes a
+ * difference vs the standard pmd_none_or_clear_bad, the result is
+ * undefined so behaving like if the pmd was none is safe (because it
+ * can return none anyway). The compiler level barrier() is critically
+ * important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_lock is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
+ */
+static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+{
+ pmd_t pmdval = pmd_read_atomic(pmd);
+ /*
+ * The barrier will stabilize the pmdval in a register or on
+ * the stack so that it will stop changing under the code.
+ *
+ * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
+ * pmd_read_atomic is allowed to return a not atomic pmdval
+ * (for example pointing to an hugepage that has never been
+ * mapped in the pmd). The below checks will only care about
+ * the low part of the pmd with 32bit PAE x86 anyway, with the
+ * exception of pmd_none(). So the important thing is that if
+ * the low part of the pmd is found null, the high part will
+ * be also null or the pmd_none() check below would be
+ * confused.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ barrier();
+#endif
+ /*
+ * !pmd_present() checks for pmd migration entries
+ *
+ * The complete check uses is_pmd_migration_entry() in linux/swapops.h
+ * But using that requires moving current function and pmd_trans_unstable()
+ * to linux/swapops.h to resovle dependency, which is too much code move.
+ *
+ * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
+ * because !pmd_present() pages can only be under migration not swapped
+ * out.
+ *
+ * pmd_none() is preseved for future condition checks on pmd migration
+ * entries and not confusing with this function name, although it is
+ * redundant with !pmd_present().
+ */
+ if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
+ (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
+ return 1;
+ if (unlikely(pmd_bad(pmdval))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * This is a noop if Transparent Hugepage Support is not built into
+ * the kernel. Otherwise it is equivalent to
+ * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
+ * places that already verified the pmd is not none and they want to
+ * walk ptes while holding the mmap sem in read mode (write mode don't
+ * need this). If THP is not enabled, the pmd can't go away under the
+ * code even if MADV_DONTNEED runs, but if THP is enabled we need to
+ * run a pmd_trans_unstable before walking the ptes after
+ * split_huge_pmd returns (because it may have run when the pmd become
+ * null, but then a page fault can map in a THP and not a regular page).
+ */
+static inline int pmd_trans_unstable(pmd_t *pmd)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return pmd_none_or_trans_huge_or_clear_bad(pmd);
+#else
+ return 0;
+#endif
+}
+
+#ifndef CONFIG_NUMA_BALANCING
+/*
+ * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
+ * the only case the kernel cares is for NUMA balancing and is only ever set
+ * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
+ * _PAGE_PROTNONE so by by default, implement the helper as "always no". It
+ * is the responsibility of the caller to distinguish between PROT_NONE
+ * protections and NUMA hinting fault protections.
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return 0;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return 0;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#ifndef __PAGETABLE_P4D_FOLDED
+int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
+int p4d_clear_huge(p4d_t *p4d);
+#else
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int p4d_clear_huge(p4d_t *p4d)
+{
+ return 0;
+}
+#endif /* !__PAGETABLE_P4D_FOLDED */
+
+int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
+int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
+int pud_clear_huge(pud_t *pud);
+int pmd_clear_huge(pmd_t *pmd);
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
+#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
+static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+static inline int p4d_clear_huge(p4d_t *p4d)
+{
+ return 0;
+}
+static inline int pud_clear_huge(pud_t *pud)
+{
+ return 0;
+}
+static inline int pmd_clear_huge(pmd_t *pmd)
+{
+ return 0;
+}
+static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+ return 0;
+}
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ return 0;
+}
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
+#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * ARCHes with special requirements for evicting THP backing TLB entries can
+ * implement this. Otherwise also, it can help optimize normal TLB flush in
+ * THP regime. stock flush_tlb_range() typically has optimization to nuke the
+ * entire TLB TLB if flush span is greater than a threshold, which will
+ * likely be true for a single huge page. Thus a single thp flush will
+ * invalidate the entire TLB which is not desitable.
+ * e.g. see arch/arc: flush_pmd_tlb_range
+ */
+#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+#else
+#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
+#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
+#endif
+#endif
+
+struct file;
+int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t *vma_prot);
+
+#ifndef CONFIG_X86_ESPFIX64
+static inline void init_espfix_bsp(void) { }
+#endif
+
+extern void __init pgtable_cache_init(void);
+
+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ return true;
+}
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return false;
+}
+#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+
+/*
+ * Architecture PAGE_KERNEL_* fallbacks
+ *
+ * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
+ * because they really don't support them, or the port needs to be updated to
+ * reflect the required functionality. Below are a set of relatively safe
+ * fallbacks, as best effort, which we can count on in lieu of the architectures
+ * not defining them on their own yet.
+ */
+
+#ifndef PAGE_KERNEL_RO
+# define PAGE_KERNEL_RO PAGE_KERNEL
+#endif
+
+#ifndef PAGE_KERNEL_EXEC
+# define PAGE_KERNEL_EXEC PAGE_KERNEL
+#endif
+
+/*
+ * Page Table Modification bits for pgtbl_mod_mask.
+ *
+ * These are used by the p?d_alloc_track*() set of functions an in the generic
+ * vmalloc/ioremap code to track at which page-table levels entries have been
+ * modified. Based on that the code can better decide when vmalloc and ioremap
+ * mapping changes need to be synchronized to other page-tables in the system.
+ */
+#define __PGTBL_PGD_MODIFIED 0
+#define __PGTBL_P4D_MODIFIED 1
+#define __PGTBL_PUD_MODIFIED 2
+#define __PGTBL_PMD_MODIFIED 3
+#define __PGTBL_PTE_MODIFIED 4
+
+#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED)
+#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED)
+#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED)
+#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED)
+#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED)
+
+/* Page-Table Modification Mask */
+typedef unsigned int pgtbl_mod_mask;
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef io_remap_pfn_range
+#define io_remap_pfn_range remap_pfn_range
+#endif
+
+#ifndef has_transparent_hugepage
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage() 1
+#else
+#define has_transparent_hugepage() 0
+#endif
+#endif
+
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
+/*
+ * p?d_leaf() - true if this entry is a final mapping to a physical address.
+ * This differs from p?d_huge() by the fact that they are always available (if
+ * the architecture supports large pages at the appropriate level) even
+ * if CONFIG_HUGETLB_PAGE is not defined.
+ * Only meaningful when called on a valid entry.
+ */
+#ifndef pgd_leaf
+#define pgd_leaf(x) 0
+#endif
+#ifndef p4d_leaf
+#define p4d_leaf(x) 0
+#endif
+#ifndef pud_leaf
+#define pud_leaf(x) 0
+#endif
+#ifndef pmd_leaf
+#define pmd_leaf(x) 0
+#endif
+
+#endif /* _LINUX_PGTABLE_H */
diff --git a/include/linux/phy/omap_usb.h b/include/linux/phy/omap_usb.h
index 5973a6313529..e23b52df93ec 100644
--- a/include/linux/phy/omap_usb.h
+++ b/include/linux/phy/omap_usb.h
@@ -2,68 +2,14 @@
/*
* omap_usb.h -- omap usb2 phy header file
*
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2012-2020 Texas Instruments Incorporated - http://www.ti.com
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
#ifndef __DRIVERS_OMAP_USB2_H
#define __DRIVERS_OMAP_USB2_H
-#include <linux/io.h>
-#include <linux/usb/otg.h>
-
-struct usb_dpll_params {
- u16 m;
- u8 n;
- u8 freq:3;
- u8 sd;
- u32 mf;
-};
-
-enum omap_usb_phy_type {
- TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */
- TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */
- TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */
-};
-
-struct omap_usb {
- struct usb_phy phy;
- struct phy_companion *comparator;
- void __iomem *pll_ctrl_base;
- void __iomem *phy_base;
- struct device *dev;
- struct device *control_dev;
- struct clk *wkupclk;
- struct clk *optclk;
- u8 flags;
- enum omap_usb_phy_type type;
- struct regmap *syscon_phy_power; /* ctrl. reg. acces */
- unsigned int power_reg; /* power reg. index within syscon */
- u32 mask;
- u32 power_on;
- u32 power_off;
-};
-
-struct usb_phy_data {
- const char *label;
- u8 flags;
- u32 mask;
- u32 power_on;
- u32 power_off;
-};
-
-/* Driver Flags */
-#define OMAP_USB2_HAS_START_SRP (1 << 0)
-#define OMAP_USB2_HAS_SET_VBUS (1 << 1)
-#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2)
-
-#define OMAP_DEV_PHY_PD BIT(0)
-#define OMAP_USB2_PHY_PD BIT(28)
-
-#define AM437X_USB2_PHY_PD BIT(0)
-#define AM437X_USB2_OTG_PD BIT(1)
-#define AM437X_USB2_OTGVDET_EN BIT(19)
-#define AM437X_USB2_OTGSESSEND_EN BIT(20)
+#include <linux/usb/phy_companion.h>
#define phy_to_omapusb(x) container_of((x), struct omap_usb, phy)
@@ -76,15 +22,4 @@ static inline int omap_usb2_set_comparator(struct phy_companion *comparator)
}
#endif
-static inline u32 omap_usb_readl(void __iomem *addr, unsigned offset)
-{
- return __raw_readl(addr + offset);
-}
-
-static inline void omap_usb_writel(void __iomem *addr, unsigned offset,
- u32 data)
-{
- __raw_writel(data, addr + offset);
-}
-
#endif /* __DRIVERS_OMAP_USB_H */
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 93543cbc0e6b..176d6cf80e7c 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -102,6 +102,7 @@ extern void attach_pid(struct task_struct *task, enum pid_type);
extern void detach_pid(struct task_struct *task, enum pid_type);
extern void change_pid(struct task_struct *task, enum pid_type,
struct pid *pid);
+extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 4956e362e55e..5a5cb45ac57e 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -17,12 +17,6 @@
struct fs_pin;
-enum { /* definitions for pid_namespace's hide_pid field */
- HIDEPID_OFF = 0,
- HIDEPID_NO_ACCESS = 1,
- HIDEPID_INVISIBLE = 2,
-};
-
struct pid_namespace {
struct kref kref;
struct idr idr;
@@ -32,17 +26,11 @@ struct pid_namespace {
struct kmem_cache *pid_cachep;
unsigned int level;
struct pid_namespace *parent;
-#ifdef CONFIG_PROC_FS
- struct dentry *proc_self;
- struct dentry *proc_thread_self;
-#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
struct fs_pin *bacct;
#endif
struct user_namespace *user_ns;
struct ucounts *ucounts;
- kgid_t pid_gid;
- int hide_pid;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
} __randomize_layout;
diff --git a/include/linux/platform_data/clk-integrator.h b/include/linux/platform_data/clk-integrator.h
deleted file mode 100644
index addd48cac625..000000000000
--- a/include/linux/platform_data/clk-integrator.h
+++ /dev/null
@@ -1,2 +0,0 @@
-void integrator_impd1_clk_init(void __iomem *base, unsigned int id);
-void integrator_impd1_clk_exit(unsigned int id);
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
index 3c606c450d05..ff1be737bad6 100644
--- a/include/linux/platform_data/gpio-dwapb.h
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -12,7 +12,6 @@ struct dwapb_port_property {
unsigned int ngpio;
unsigned int gpio_base;
int irq[32];
- bool has_irq;
bool irq_shared;
};
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
index 08e639e047e5..03e92c71b3fa 100644
--- a/include/linux/platform_data/mtd-davinci.h
+++ b/include/linux/platform_data/mtd-davinci.h
@@ -68,7 +68,7 @@ struct davinci_nand_pdata { /* platform_data */
* Newer ones also support 4-bit ECC, but are awkward
* using it with large page chips.
*/
- nand_ecc_modes_t ecc_mode;
+ enum nand_ecc_mode ecc_mode;
u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index deb849bcf0ec..08675b16f9e1 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -49,7 +49,7 @@ struct s3c2410_platform_nand {
unsigned int ignore_unset_ecc:1;
- nand_ecc_modes_t ecc_mode;
+ enum nand_ecc_mode ecc_mode;
int nr_sets;
struct s3c2410_nand_set *sets;
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 747861816f4f..d5c4a329321d 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -42,6 +42,18 @@ struct dev_pm_opp_supply {
};
/**
+ * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
+ * @avg: Average bandwidth corresponding to this OPP (in icc units)
+ * @peak: Peak bandwidth corresponding to this OPP (in icc units)
+ *
+ * This structure stores the bandwidth values for a single interconnect path.
+ */
+struct dev_pm_opp_icc_bw {
+ u32 avg;
+ u32 peak;
+};
+
+/**
* struct dev_pm_opp_info - OPP freq/voltage/current values
* @rate: Target clk rate in hz
* @supplies: Array of voltage/current values for all power supplies
@@ -360,6 +372,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
+int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
void dev_pm_opp_of_register_em(struct cpumask *cpus);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
@@ -408,6 +421,11 @@ static inline int of_get_required_opp_performance_state(struct device_node *np,
{
return -ENOTSUPP;
}
+
+static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
+{
+ return -ENOTSUPP;
+}
#endif
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index dcd5a71e6c67..ac1345a48ad0 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -61,6 +61,7 @@ enum {
POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_OVERCURRENT,
+ POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED,
};
enum {
@@ -139,6 +140,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CAPACITY, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_MAX,
@@ -158,6 +160,9 @@ enum power_supply_property {
POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
POWER_SUPPLY_PROP_CALIBRATE,
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+ POWER_SUPPLY_PROP_MANUFACTURE_DAY,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
@@ -223,9 +228,9 @@ struct power_supply_config {
struct power_supply_desc {
const char *name;
enum power_supply_type type;
- enum power_supply_usb_type *usb_types;
+ const enum power_supply_usb_type *usb_types;
size_t num_usb_types;
- enum power_supply_property *properties;
+ const enum power_supply_property *properties;
size_t num_properties;
/*
@@ -346,8 +351,12 @@ struct power_supply_battery_info {
int charge_full_design_uah; /* microAmp-hours */
int voltage_min_design_uv; /* microVolts */
int voltage_max_design_uv; /* microVolts */
+ int tricklecharge_current_ua; /* microAmps */
int precharge_current_ua; /* microAmps */
+ int precharge_voltage_max_uv; /* microVolts */
int charge_term_current_ua; /* microAmps */
+ int charge_restart_voltage_uv; /* microVolts */
+ int overvoltage_limit_uv; /* microVolts */
int constant_charge_current_max_ua; /* microAmps */
int constant_charge_voltage_max_uv; /* microVolts */
int factory_internal_resistance_uohm; /* microOhms */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 3cc2f178bf06..fc8f03c54543 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -399,7 +399,8 @@ extern int kptr_restrict;
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#include <linux/dynamic_debug.h>
/**
@@ -535,7 +536,8 @@ extern int kptr_restrict;
#endif
/* If you are writing a driver, please use dev_dbg instead */
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define pr_debug_ratelimited(fmt, ...) \
do { \
@@ -582,7 +584,8 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
#endif
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index ad4ff711fc02..d1eed1b43651 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -42,6 +42,34 @@ struct proc_ops {
unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
} __randomize_layout;
+/* definitions for hide_pid field */
+enum proc_hidepid {
+ HIDEPID_OFF = 0,
+ HIDEPID_NO_ACCESS = 1,
+ HIDEPID_INVISIBLE = 2,
+ HIDEPID_NOT_PTRACEABLE = 4, /* Limit pids to only ptraceable pids */
+};
+
+/* definitions for proc mount option pidonly */
+enum proc_pidonly {
+ PROC_PIDONLY_OFF = 0,
+ PROC_PIDONLY_ON = 1,
+};
+
+struct proc_fs_info {
+ struct pid_namespace *pid_ns;
+ struct dentry *proc_self; /* For /proc/self */
+ struct dentry *proc_thread_self; /* For /proc/thread-self */
+ kgid_t pid_gid;
+ enum proc_hidepid hide_pid;
+ enum proc_pidonly pidonly;
+};
+
+static inline struct proc_fs_info *proc_sb_info(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
#ifdef CONFIG_PROC_FS
typedef int (*proc_write_t)(struct file *, char *, size_t);
@@ -177,9 +205,9 @@ int open_related_ns(struct ns_common *ns,
struct ns_common *(*get_ns)(struct ns_common *ns));
/* get the associated pid namespace for a file in procfs */
-static inline struct pid_namespace *proc_pid_ns(const struct inode *inode)
+static inline struct pid_namespace *proc_pid_ns(struct super_block *sb)
{
- return inode->i_sb->s_fs_info;
+ return proc_sb_info(sb)->pid_ns;
}
bool proc_ns_file(const struct file *file);
diff --git a/include/linux/property.h b/include/linux/property.h
index c7b5f3db36aa..10d03572f52e 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -444,6 +444,7 @@ int software_node_register_node_group(const struct software_node **node_group);
void software_node_unregister_node_group(const struct software_node **node_group);
int software_node_register(const struct software_node *node);
+void software_node_unregister(const struct software_node *node);
int software_node_notify(struct device *dev, unsigned long action);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 48325d7790f8..8cb76405cbce 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -828,6 +828,7 @@ struct qed_common_cb_ops {
void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
void (*get_protocol_tlv_data)(void *dev, void *data);
+ void (*bw_update)(void *dev);
};
struct qed_selftest_ops {
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index 584077565f12..2d3ddd2b85e0 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -98,7 +98,6 @@ struct qed_rdma_device {
u64 max_mr_size;
u32 max_cqe;
u32 max_mw;
- u32 max_fmr;
u32 max_mr_mw_fmr_pbl;
u64 max_mr_mw_fmr_size;
u32 max_pd;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 7375bb3da140..df587d181844 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -506,6 +506,27 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
WRITE_ONCE(old->pprev, LIST_POISON2);
}
+/**
+ * hlists_swap_heads_rcu - swap the lists the hlist heads point to
+ * @left: The hlist head on the left
+ * @right: The hlist head on the right
+ *
+ * The lists start out as [@left ][node1 ... ] and
+ [@right ][node2 ... ]
+ * The lists end up as [@left ][node2 ... ]
+ * [@right ][node1 ... ]
+ */
+static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right)
+{
+ struct hlist_node *node1 = left->first;
+ struct hlist_node *node2 = right->first;
+
+ rcu_assign_pointer(left->first, node2);
+ rcu_assign_pointer(right->first, node1);
+ WRITE_ONCE(node2->pprev, &left->first);
+ WRITE_ONCE(node1->pprev, &right->first);
+}
+
/*
* return the first or the next element in an RCU protected hlist
*/
diff --git a/include/linux/regset.h b/include/linux/regset.h
index bf0243779738..46d6ae68c455 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -320,7 +320,7 @@ static inline int user_regset_copyout_zero(unsigned int *pos,
if (*kbuf) {
memset(*kbuf, 0, copy);
*kbuf += copy;
- } else if (__clear_user(*ubuf, copy))
+ } else if (clear_user(*ubuf, copy))
return -EFAULT;
else
*ubuf += copy;
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 9c07d7958c53..e7b7bab8b235 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -73,7 +73,7 @@ struct resource_table {
u32 ver;
u32 num;
u32 reserved[2];
- u32 offset[0];
+ u32 offset[];
} __packed;
/**
@@ -87,7 +87,7 @@ struct resource_table {
*/
struct fw_rsc_hdr {
u32 type;
- u8 data[0];
+ u8 data[];
} __packed;
/**
@@ -306,7 +306,7 @@ struct fw_rsc_vdev {
u8 status;
u8 num_of_vrings;
u8 reserved[2];
- struct fw_rsc_vdev_vring vring[0];
+ struct fw_rsc_vdev_vring vring[];
} __packed;
struct rproc;
@@ -355,6 +355,8 @@ enum rsc_handling_status {
/**
* struct rproc_ops - platform-specific device handlers
+ * @prepare: prepare device for code loading
+ * @unprepare: unprepare device after stop
* @start: power on the device and boot it
* @stop: power off the device
* @kick: kick a virtqueue (virtqueue id given as a parameter)
@@ -373,6 +375,8 @@ enum rsc_handling_status {
* panic at least the returned number of milliseconds
*/
struct rproc_ops {
+ int (*prepare)(struct rproc *rproc);
+ int (*unprepare)(struct rproc *rproc);
int (*start)(struct rproc *rproc);
int (*stop)(struct rproc *rproc);
void (*kick)(struct rproc *rproc, int vqid);
@@ -489,7 +493,7 @@ struct rproc {
struct list_head node;
struct iommu_domain *domain;
const char *name;
- char *firmware;
+ const char *firmware;
void *priv;
struct rproc_ops *ops;
struct device dev;
@@ -518,6 +522,7 @@ struct rproc {
struct list_head dump_segments;
int nb_vdev;
u8 elf_class;
+ u16 elf_machine;
};
/**
@@ -599,6 +604,11 @@ int rproc_add(struct rproc *rproc);
int rproc_del(struct rproc *rproc);
void rproc_free(struct rproc *rproc);
+struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
+ const struct rproc_ops *ops,
+ const char *firmware, int len);
+int devm_rproc_add(struct device *dev, struct rproc *rproc);
+
void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem);
struct rproc_mem_entry *
@@ -622,6 +632,7 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc,
struct rproc_dump_segment *segment,
void *dest),
void *priv);
+int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine);
static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
{
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 988d176472df..3a6adfa70fb0 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -77,7 +77,7 @@ struct anon_vma {
struct anon_vma_chain {
struct vm_area_struct *vma;
struct anon_vma *anon_vma;
- struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
+ struct list_head same_vma; /* locked by mmap_lock & page_table_lock */
struct rb_node rb; /* locked by anon_vma->rwsem */
unsigned long rb_subtree_last;
#ifdef CONFIG_DEBUG_VM_RB
diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h
index 96e26d94719f..daded9fddf36 100644
--- a/include/linux/rpmsg/qcom_glink.h
+++ b/include/linux/rpmsg/qcom_glink.h
@@ -12,6 +12,7 @@ struct qcom_glink;
struct qcom_glink *qcom_glink_smem_register(struct device *parent,
struct device_node *node);
void qcom_glink_smem_unregister(struct qcom_glink *glink);
+void qcom_glink_ssr_notify(const char *ssr_name);
#else
@@ -23,7 +24,7 @@ qcom_glink_smem_register(struct device *parent,
}
static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {}
-
+static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
#endif
#endif
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 65b8142a7fed..e8780d4e4636 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1080,11 +1080,7 @@ struct pcr_ops {
void (*stop_cmd)(struct rtsx_pcr *pcr);
void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
- int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
- int (*set_l1off_sub)(struct rtsx_pcr *pcr, u8 val);
void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
- void (*full_on)(struct rtsx_pcr *pcr);
- void (*power_saving)(struct rtsx_pcr *pcr);
void (*enable_ocp)(struct rtsx_pcr *pcr);
void (*disable_ocp)(struct rtsx_pcr *pcr);
void (*init_ocp)(struct rtsx_pcr *pcr);
@@ -1108,13 +1104,6 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
#define L1_SNOOZE_TEST_EN BIT(5)
#define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6)
-enum dev_aspm_mode {
- DEV_ASPM_DYNAMIC,
- DEV_ASPM_BACKDOOR,
- DEV_ASPM_STATIC,
- DEV_ASPM_DISABLE,
-};
-
/*
* struct rtsx_cr_option - card reader option
* @dev_flags: device flags
@@ -1125,7 +1114,6 @@ enum dev_aspm_mode {
* @ltr_active_latency: ltr mode active latency
* @ltr_idle_latency: ltr mode idle latency
* @ltr_l1off_latency: ltr mode l1off latency
- * @dev_aspm_mode: device aspm mode
* @l1_snooze_delay: l1 snooze delay
* @ltr_l1off_sspwrgate: ltr l1off sspwrgate
* @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
@@ -1142,7 +1130,6 @@ struct rtsx_cr_option {
u32 ltr_active_latency;
u32 ltr_idle_latency;
u32 ltr_l1off_latency;
- enum dev_aspm_mode dev_aspm_mode;
u32 l1_snooze_delay;
u8 ltr_l1off_sspwrgate;
u8 ltr_l1off_snooze_sspwrgate;
@@ -1320,18 +1307,6 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr)
return (u8 *)(pcr->host_cmds_ptr);
}
-static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr,
- u8 mask, u8 append)
-{
- int err;
- u8 val;
-
- err = pci_read_config_byte(pcr->pci, addr, &val);
- if (err < 0)
- return err;
- return pci_write_config_byte(pcr->pci, addr, (val & mask) | append);
-}
-
static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
{
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24);
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 6eec50fb36c8..4f922afb607a 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -151,6 +151,20 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
+/*
+ * Loop over each sg element in the given sg_table object.
+ */
+#define for_each_sgtable_sg(sgt, sg, i) \
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+
+/*
+ * Loop over each sg element in the given *DMA mapped* sg_table object.
+ * Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses
+ * of the each element.
+ */
+#define for_each_sgtable_dma_sg(sgt, sg, i) \
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
@@ -401,9 +415,10 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* @sglist: sglist to iterate over
* @piter: page iterator to hold current page, sg, sg_pgoffset
* @nents: maximum number of sg entries to iterate over
- * @pgoffset: starting page offset
+ * @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_page() to get each page pointer.
+ * In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_page(sglist, piter, nents, pgoffset) \
for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
@@ -412,18 +427,47 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
/**
* for_each_sg_dma_page - iterate over the pages of the given sg list
* @sglist: sglist to iterate over
- * @dma_iter: page iterator to hold current page
+ * @dma_iter: DMA page iterator to hold current page
* @dma_nents: maximum number of sg entries to iterate over, this is the value
* returned from dma_map_sg
- * @pgoffset: starting page offset
+ * @pgoffset: starting page offset (in pages)
*
* Callers may use sg_page_iter_dma_address() to get each page's DMA address.
+ * In each loop it operates on PAGE_SIZE unit.
*/
#define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \
for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
pgoffset); \
__sg_page_iter_dma_next(dma_iter);)
+/**
+ * for_each_sgtable_page - iterate over all pages in the sg_table object
+ * @sgt: sg_table object to iterate over
+ * @piter: page iterator to hold current page
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Iterates over the all memory pages in the buffer described by
+ * a scatterlist stored in the given sg_table object.
+ * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
+ */
+#define for_each_sgtable_page(sgt, piter, pgoffset) \
+ for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset)
+
+/**
+ * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
+ * @sgt: sg_table object to iterate over
+ * @dma_iter: DMA page iterator to hold current page
+ * @pgoffset: starting page offset (in pages)
+ *
+ * Iterates over the all DMA mapped pages in the buffer described by
+ * a scatterlist stored in the given sg_table object.
+ * See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE
+ * unit.
+ */
+#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \
+ for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset)
+
+
/*
* Mapping sg iterator
*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 57a5ce9f33c5..4ea612e9ad27 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -31,6 +31,7 @@
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
+#include <linux/kcsan.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -1197,6 +1198,9 @@ struct task_struct {
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
#endif
+#ifdef CONFIG_KCSAN
+ struct kcsan_ctx kcsan_ctx;
+#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
@@ -1247,6 +1251,9 @@ struct task_struct {
/* KCOV sequence number: */
int kcov_sequence;
+
+ /* Collect coverage from softirq context: */
+ unsigned int kcov_softirq;
#endif
#ifdef CONFIG_MEMCG
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
index 95fb9e025247..00c45a0e6abe 100644
--- a/include/linux/sched/debug.h
+++ b/include/linux/sched/debug.h
@@ -30,7 +30,8 @@ extern void show_regs(struct pt_regs *);
* task), SP is the stack pointer of the first frame that should be shown in the back
* trace (or NULL if the entire call-chain of the task should be shown).
*/
-extern void show_stack(struct task_struct *task, unsigned long *sp);
+extern void show_stack(struct task_struct *task, unsigned long *sp,
+ const char *loglvl);
extern void sched_show_task(struct task_struct *p);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index a132d875d351..480a4d1b7dd8 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -53,7 +53,7 @@ void mmdrop(struct mm_struct *mm);
/*
* This has to be called after a get_task_mm()/mmget_not_zero()
- * followed by taking the mmap_sem for writing before modifying the
+ * followed by taking the mmap_lock for writing before modifying the
* vmas or anything the coredump pretends not to change from under it.
*
* It also has to be called when mmgrab() is used in the context of
@@ -61,14 +61,14 @@ void mmdrop(struct mm_struct *mm);
* the context of the process to run down_write() on that pinned mm.
*
* NOTE: find_extend_vma() called from GUP context is the only place
- * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * that can modify the "mm" (notably the vm_start/end) under mmap_lock
* for reading and outside the context of the process, so it is also
- * the only case that holds the mmap_sem for reading that must call
- * this function. Generally if the mmap_sem is hold for reading
+ * the only case that holds the mmap_lock for reading that must call
+ * this function. Generally if the mmap_lock is hold for reading
* there's no need of this check after get_task_mm()/mmget_not_zero().
*
* This function can be obsoleted and the check can be removed, after
- * the coredump code will hold the mmap_sem for writing before
+ * the coredump code will hold the mmap_lock for writing before
* invoking the ->core_dump methods.
*/
static inline bool mmget_still_valid(struct mm_struct *mm)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 3e5b090c16d4..0ee5e696c5d8 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -654,17 +654,6 @@ static inline bool thread_group_leader(struct task_struct *p)
return p->exit_signal >= 0;
}
-/* Do to the insanities of de_thread it is possible for a process
- * to have the pid of the thread group leader without actually being
- * the thread group leader. For iteration through the pids in proc
- * all we care about is that we have a task with the appropriate
- * pid, we don't actually care if we have the right task.
- */
-static inline bool has_group_leader_pid(struct task_struct *p)
-{
- return task_pid(p) == task_tgid(p);
-}
-
static inline
bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 7b4d3a49b6c5..660ac49f2b53 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -7,6 +7,13 @@
struct ctl_table;
#ifdef CONFIG_DETECT_HUNG_TASK
+
+#ifdef CONFIG_SMP
+extern unsigned int sysctl_hung_task_all_cpu_backtrace;
+#else
+#define sysctl_hung_task_all_cpu_backtrace 0
+#endif /* CONFIG_SMP */
+
extern int sysctl_hung_task_check_count;
extern unsigned int sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_timeout_secs;
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 5c873a59b387..ce2f5c28b2df 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -4,6 +4,10 @@
*
* Copyright (C) 2018 ARM Ltd.
*/
+
+#ifndef _LINUX_SCMI_PROTOCOL_H
+#define _LINUX_SCMI_PROTOCOL_H
+
#include <linux/device.h>
#include <linux/types.h>
@@ -319,3 +323,5 @@ static inline void scmi_driver_unregister(struct scmi_driver *driver) {}
typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *);
int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn);
void scmi_protocol_unregister(int protocol_id);
+
+#endif /* _LINUX_SCMI_PROTOCOL_H */
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index ecb004711acf..afbf8037d8db 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -4,6 +4,10 @@
*
* Copyright (C) 2014 ARM Ltd.
*/
+
+#ifndef _LINUX_SCPI_PROTOCOL_H
+#define _LINUX_SCPI_PROTOCOL_H
+
#include <linux/types.h>
struct scpi_opp {
@@ -71,3 +75,5 @@ struct scpi_ops *get_scpi_ops(void);
#else
static inline struct scpi_ops *get_scpi_ops(void) { return NULL; }
#endif
+
+#endif /* _LINUX_SCPI_PROTOCOL_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 6aa229b252ce..b3f2cb21b4f2 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -140,7 +140,7 @@ extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
-extern int cap_bprm_set_creds(struct linux_binprm *bprm);
+extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
@@ -276,7 +276,8 @@ int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
-int security_bprm_set_creds(struct linux_binprm *bprm);
+int security_bprm_creds_for_exec(struct linux_binprm *bprm);
+int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
int security_bprm_check(struct linux_binprm *bprm);
void security_bprm_committing_creds(struct linux_binprm *bprm);
void security_bprm_committed_creds(struct linux_binprm *bprm);
@@ -569,9 +570,15 @@ static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
}
-static inline int security_bprm_set_creds(struct linux_binprm *bprm)
+static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
{
- return cap_bprm_set_creds(bprm);
+ return 0;
+}
+
+static inline int security_bprm_creds_from_file(struct linux_binprm *bprm,
+ struct file *file)
+{
+ return cap_bprm_creds_from_file(bprm, file);
}
static inline int security_bprm_check(struct linux_binprm *bprm)
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 1672cf6f7614..813614d4b71f 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -145,6 +145,25 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
+#define DEFINE_SEQ_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ int ret = seq_open(file, &__name ## _sops); \
+ if (!ret && inode->i_private) { \
+ struct seq_file *seq_f = file->private_data; \
+ seq_f->private = inode->i_private; \
+ } \
+ return ret; \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = seq_release, \
+}
+
#define DEFINE_SHOW_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 0491d963d47e..8b97204f35a7 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -37,9 +37,25 @@
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
#include <asm/processor.h>
/*
+ * The seqlock interface does not prescribe a precise sequence of read
+ * begin/retry/end. For readers, typically there is a call to
+ * read_seqcount_begin() and read_seqcount_retry(), however, there are more
+ * esoteric cases which do not follow this pattern.
+ *
+ * As a consequence, we take the following best-effort approach for raw usage
+ * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
+ * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
+ * atomics; if there is a matching read_seqcount_retry() call, no following
+ * memory operations are considered atomic. Usage of seqlocks via seqlock_t
+ * interface is not affected.
+ */
+#define KCSAN_SEQLOCK_REGION_MAX 1000
+
+/*
* Version using sequence counter only.
* This can be used when code has its own mutex protecting the
* updating starting before the write_seqcountbeqin() and ending
@@ -115,6 +131,7 @@ repeat:
cpu_relax();
goto repeat;
}
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
@@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret;
}
@@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
return ret & ~1;
}
@@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
*/
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
{
- return unlikely(s->sequence != start);
+ kcsan_atomic_next(0);
+ return unlikely(READ_ONCE(s->sequence) != start);
}
/**
@@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
static inline void raw_write_seqcount_begin(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
}
@@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
/**
@@ -243,6 +265,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* usual consistency guarantee. It is one wmb cheaper, because we can
* collapse the two back-to-back wmb()s.
*
+ * Note that writes surrounding the barrier should be declared atomic (e.g.
+ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
+ * atomically, avoiding compiler optimizations; b) to document which writes are
+ * meant to propagate to the reader critical section. This is necessary because
+ * neither writes before and after the barrier are enclosed in a seq-writer
+ * critical section that would ensure readers are aware of ongoing writes.
+ *
* seqcount_t seq;
* bool X = true, Y = false;
*
@@ -262,18 +291,20 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
*
* void write(void)
* {
- * Y = true;
+ * WRITE_ONCE(Y, true);
*
* raw_write_seqcount_barrier(seq);
*
- * X = false;
+ * WRITE_ONCE(X, false);
* }
*/
static inline void raw_write_seqcount_barrier(seqcount_t *s)
{
+ kcsan_nestable_atomic_begin();
s->sequence++;
smp_wmb();
s->sequence++;
+ kcsan_nestable_atomic_end();
}
static inline int raw_read_seqcount_latch(seqcount_t *s)
@@ -398,7 +429,9 @@ static inline void write_seqcount_end(seqcount_t *s)
static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
+ kcsan_nestable_atomic_begin();
s->sequence+=2;
+ kcsan_nestable_atomic_end();
}
typedef struct {
@@ -430,11 +463,21 @@ typedef struct {
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
- return read_seqcount_begin(&sl->seqcount);
+ unsigned ret = read_seqcount_begin(&sl->seqcount);
+
+ kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
+ kcsan_flat_atomic_begin();
+ return ret;
}
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
+ /*
+ * Assume not nested: read_seqretry() may be called multiple times when
+ * completing read critical section.
+ */
+ kcsan_flat_atomic_end();
+
return read_seqcount_retry(&sl->seqcount, start);
}
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 92f5eba86052..9fd550e7946a 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/console.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/circ_buf.h>
#include <linux/spinlock.h>
@@ -251,6 +252,7 @@ struct uart_port {
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
+ struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */
struct serial_iso7816 iso7816;
void *private_data; /* generic platform data pointer */
};
@@ -472,5 +474,5 @@ extern int uart_handle_break(struct uart_port *port);
(cflag) & CRTSCTS || \
!((cflag) & CLOCAL))
-void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf);
+int uart_get_rs485_mode(struct uart_port *port);
#endif /* LINUX_SERIAL_CORE_H */
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
new file mode 100644
index 000000000000..7bab5d9a3d31
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ */
+
+#ifndef __MTK_MMSYS_H
+#define __MTK_MMSYS_H
+
+enum mtk_ddp_comp_id;
+struct device;
+
+void mtk_mmsys_ddp_connect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next);
+
+void mtk_mmsys_ddp_disconnect(struct device *dev,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next);
+
+#endif /* __MTK_MMSYS_H */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 00f5826092e3..9c27a32df9bb 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -291,8 +291,8 @@ struct sdw_dpn_audio_mode {
* implementation-defined interrupts
* @max_ch: Maximum channels supported
* @min_ch: Minimum channels supported
- * @num_ch: Number of discrete channels supported
- * @ch: Discrete channels supported
+ * @num_channels: Number of discrete channels supported
+ * @channels: Discrete channels supported
* @num_ch_combinations: Number of channel combinations supported
* @ch_combinations: Channel combinations supported
* @modes: SDW mode supported
@@ -316,8 +316,8 @@ struct sdw_dpn_prop {
u32 imp_def_interrupts;
u32 max_ch;
u32 min_ch;
- u32 num_ch;
- u32 *ch;
+ u32 num_channels;
+ u32 *channels;
u32 num_ch_combinations;
u32 *ch_combinations;
u32 modes;
@@ -632,6 +632,19 @@ struct sdw_slave {
#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev)
+/**
+ * struct sdw_master_device - SoundWire 'Master Device' representation
+ * @dev: Linux device for this Master
+ * @bus: Bus handle shortcut
+ */
+struct sdw_master_device {
+ struct device dev;
+ struct sdw_bus *bus;
+};
+
+#define dev_to_sdw_master_device(d) \
+ container_of(d, struct sdw_master_device, dev)
+
struct sdw_driver {
const char *name;
@@ -787,8 +800,10 @@ struct sdw_master_ops {
/**
* struct sdw_bus - SoundWire bus
- * @dev: Master linux device
+ * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
+ * @md: Master device
* @link_id: Link id number, can be 0 to N, unique for each Master
+ * @id: bus system-wide unique id
* @slaves: list of Slaves on this bus
* @assigned: Bitmap for Slave device numbers.
* Bit set implies used number, bit clear implies unused number.
@@ -812,7 +827,9 @@ struct sdw_master_ops {
*/
struct sdw_bus {
struct device *dev;
+ struct sdw_master_device *md;
unsigned int link_id;
+ int id;
struct list_head slaves;
DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
struct mutex bus_lock;
@@ -832,8 +849,9 @@ struct sdw_bus {
bool multi_link;
};
-int sdw_add_bus_master(struct sdw_bus *bus);
-void sdw_delete_bus_master(struct sdw_bus *bus);
+int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
+ struct fwnode_handle *fwnode);
+void sdw_bus_master_delete(struct sdw_bus *bus);
/**
* sdw_port_config: Master or Slave Port configuration
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
index aaa7f4267c14..52eb66cd11bc 100644
--- a/include/linux/soundwire/sdw_type.h
+++ b/include/linux/soundwire/sdw_type.h
@@ -5,6 +5,13 @@
#define __SOUNDWIRE_TYPES_H
extern struct bus_type sdw_bus_type;
+extern struct device_type sdw_slave_type;
+extern struct device_type sdw_master_type;
+
+static inline int is_sdw_slave(const struct device *dev)
+{
+ return dev->type == &sdw_slave_type;
+}
#define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver)
@@ -14,7 +21,7 @@ extern struct bus_type sdw_bus_type;
int __sdw_register_driver(struct sdw_driver *drv, struct module *owner);
void sdw_unregister_driver(struct sdw_driver *drv);
-int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
+int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env);
/**
* module_sdw_driver() - Helper macro for registering a Soundwire driver
diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h
deleted file mode 100644
index 831a5de7a0e2..000000000000
--- a/include/linux/spi/l4f00242t03.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * l4f00242t03.h -- Platform glue for Epson L4F00242T03 LCD
- *
- * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
- * Based on Marek Vasut work in lms283gf05.h
-*/
-
-#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
-#define _INCLUDE_LINUX_SPI_L4F00242T03_H_
-
-struct l4f00242t03_pdata {
- unsigned int reset_gpio;
- unsigned int data_enable_gpio;
-};
-
-#endif /* _INCLUDE_LINUX_SPI_L4F00242T03_H_ */
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
deleted file mode 100644
index 738a45b435f2..000000000000
--- a/include/linux/spi/mcp23s08.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-struct mcp23s08_platform_data {
- /* For mcp23s08, up to 4 slaves (numbered 0..3) can share one SPI
- * chipselect, each providing 1 gpio_chip instance with 8 gpios.
- * For mpc23s17, up to 8 slaves (numbered 0..7) can share one SPI
- * chipselect, each providing 1 gpio_chip (port A + port B) with
- * 16 gpios.
- */
- u32 spi_present_mask;
-
- /* "base" is the number of the first GPIO or -1 for dynamic
- * assignment. If there are gaps in chip addressing the GPIO
- * numbers are sequential .. so for example if only slaves 0
- * and 3 are present, their GPIOs range from base to base+15
- * (or base+31 for s17 variant).
- */
- unsigned base;
-};
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 83bd8cb475d7..b7af8cc13eda 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -64,7 +64,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
- int skip; /* input argument: How many entries to skip */
+ unsigned int skip; /* input argument: How many entries to skip */
};
extern void save_stack_trace(struct stack_trace *trace);
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 4f6b28487f28..98da816b5fc2 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -76,7 +76,7 @@ struct rpc_auth {
unsigned int au_verfsize; /* size of reply verifier */
unsigned int au_ralign; /* words before UL header */
- unsigned int au_flags;
+ unsigned long au_flags;
const struct rpc_authops *au_ops;
rpc_authflavor_t au_flavor; /* pseudoflavor (note may
* differ from the flavor in
@@ -89,7 +89,8 @@ struct rpc_auth {
};
/* rpc_auth au_flags */
-#define RPCAUTH_AUTH_DATATOUCH 0x00000002
+#define RPCAUTH_AUTH_DATATOUCH (1)
+#define RPCAUTH_AUTH_UPDATE_SLACK (2)
struct rpc_auth_create_args {
rpc_authflavor_t pseudoflavor;
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index bc07e51f20d1..bf4ac8a0268c 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -84,6 +84,7 @@ struct pf_desc {
u32 service;
char *name;
char *auth_domain_name;
+ struct auth_domain *domain;
bool datatouch;
};
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index fd390894a584..386628b36bc7 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -254,6 +254,7 @@ struct svc_rqst {
struct page * *rq_page_end; /* one past the last page */
struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
+ struct bio_vec rq_bvec[RPCSVC_MAXPAGES];
__be32 rq_xid; /* transmission id */
u32 rq_prog; /* program number */
@@ -299,6 +300,7 @@ struct svc_rqst {
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
+ void ** rq_lease_breaker; /* The v4 client breaking a lease */
};
#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index cbcfbd0521e3..7ed82625dc0b 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -48,7 +48,6 @@
#include <linux/sunrpc/rpc_rdma.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
-#define SVCRDMA_DEBUG
/* Default and maximum inline threshold sizes */
enum {
@@ -160,9 +159,8 @@ struct svc_rdma_send_ctxt {
};
/* svc_rdma_backchannel.c */
-extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
- __be32 *rdma_resp,
- struct xdr_buf *rcvbuf);
+extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp,
+ struct svc_rdma_recv_ctxt *rctxt);
/* svc_rdma_recvfrom.c */
extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 9e1e046de176..aca35ab5cff2 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -117,6 +117,12 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u
return 0;
}
+static inline bool svc_xprt_is_dead(const struct svc_xprt *xprt)
+{
+ return (test_bit(XPT_DEAD, &xprt->xpt_flags) != 0) ||
+ (test_bit(XPT_CLOSE, &xprt->xpt_flags) != 0);
+}
+
int svc_reg_xprt_class(struct svc_xprt_class *);
void svc_unreg_xprt_class(struct svc_xprt_class *);
void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index ca39a388dc22..f09c82b0a7ae 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -20,7 +20,8 @@ int gss_svc_init(void);
void gss_svc_shutdown(void);
int gss_svc_init_net(struct net *net);
void gss_svc_shutdown_net(struct net *net);
-int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
+struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor,
+ char *name);
u32 svcauth_gss_flavor(struct auth_domain *dom);
#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 771baadaee9d..b7ac7fe68306 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -28,7 +28,7 @@ struct svc_sock {
/* private TCP part */
/* On-the-wire fragment header: */
- __be32 sk_reclen;
+ __be32 sk_marker;
/* As we receive a record, this includes the length received so
* far (including the fragment header): */
u32 sk_tcplen;
@@ -41,12 +41,12 @@ struct svc_sock {
static inline u32 svc_sock_reclen(struct svc_sock *svsk)
{
- return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK;
+ return be32_to_cpu(svsk->sk_marker) & RPC_FRAGMENT_SIZE_MASK;
}
static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
{
- return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT;
+ return be32_to_cpu(svsk->sk_marker) & RPC_LAST_STREAM_FRAGMENT;
}
/*
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index f2401e45a3c2..50bb7f383a1b 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -197,6 +197,7 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
void unregister_sysctl_table(struct ctl_table_header * table);
extern int sysctl_init(void);
+void do_sysctl_args(void);
extern int pwrsw_enabled;
extern int unaligned_enabled;
@@ -235,6 +236,9 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
{
}
+static inline void do_sysctl_args(void)
+{
+}
#endif /* CONFIG_SYSCTL */
int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 8e159e16850f..3a582ec7a2f1 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -30,10 +30,10 @@
#define SYSRQ_ENABLE_RTNICE 0x0100
struct sysrq_key_op {
- void (*handler)(int);
- char *help_msg;
- char *action_msg;
- int enable_mask;
+ void (* const handler)(int);
+ const char * const help_msg;
+ const char * const action_msg;
+ const int enable_mask;
};
#ifdef CONFIG_MAGIC_SYSRQ
@@ -45,9 +45,9 @@ struct sysrq_key_op {
void handle_sysrq(int key);
void __handle_sysrq(int key, bool check_mask);
-int register_sysrq_key(int key, struct sysrq_key_op *op);
-int unregister_sysrq_key(int key, struct sysrq_key_op *op);
-struct sysrq_key_op *__sysrq_get_key_op(int key);
+int register_sysrq_key(int key, const struct sysrq_key_op *op);
+int unregister_sysrq_key(int key, const struct sysrq_key_op *op);
+extern const struct sysrq_key_op *__sysrq_reboot_op;
int sysrq_toggle_support(int enable_mask);
int sysrq_mask(void);
@@ -62,12 +62,12 @@ static inline void __handle_sysrq(int key, bool check_mask)
{
}
-static inline int register_sysrq_key(int key, struct sysrq_key_op *op)
+static inline int register_sysrq_key(int key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
-static inline int unregister_sysrq_key(int key, struct sysrq_key_op *op)
+static inline int unregister_sysrq_key(int key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 1412e9cc79ce..d074302989dd 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -26,6 +26,7 @@
#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */
#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
+#define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */
struct device;
struct tee_device;
@@ -166,6 +167,22 @@ int tee_device_register(struct tee_device *teedev);
void tee_device_unregister(struct tee_device *teedev);
/**
+ * tee_session_calc_client_uuid() - Calculates client UUID for session
+ * @uuid: Resulting UUID
+ * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*)
+ * @connectuon_data: Connection data for opening session
+ *
+ * Based on connection method calculates UUIDv5 based client UUID.
+ *
+ * For group based logins verifies that calling process has specified
+ * credentials.
+ *
+ * @return < 0 on failure
+ */
+int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
+ const u8 connection_data[TEE_IOCTL_UUID_LEN]);
+
+/**
* struct tee_shm - shared memory object
* @ctx: context using the object
* @paddr: physical address of the shared memory
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index ece782ef5466..ff397c0d5c07 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -80,7 +80,7 @@ struct tb {
int index;
enum tb_security_level security_level;
size_t nboot_acl;
- unsigned long privdata[0];
+ unsigned long privdata[];
};
extern struct bus_type tb_bus_type;
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 9de5c10293f5..c6abb79501b3 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -3,33 +3,36 @@
#define _LINUX_U64_STATS_SYNC_H
/*
- * To properly implement 64bits network statistics on 32bit and 64bit hosts,
- * we provide a synchronization point, that is a noop on 64bit or UP kernels.
+ * Protect against 64-bit values tearing on 32-bit architectures. This is
+ * typically used for statistics read/update in different subsystems.
*
* Key points :
- * 1) Use a seqcount on SMP 32bits, with low overhead.
- * 2) Whole thing is a noop on 64bit arches or UP kernels.
- * 3) Write side must ensure mutual exclusion or one seqcount update could
+ *
+ * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP.
+ * - The whole thing is a no-op on 64-bit architectures.
+ *
+ * Usage constraints:
+ *
+ * 1) Write side must ensure mutual exclusion, or one seqcount update could
* be lost, thus blocking readers forever.
- * If this synchronization point is not a mutex, but a spinlock or
- * spinlock_bh() or disable_bh() :
- * 3.1) Write side should not sleep.
- * 3.2) Write side should not allow preemption.
- * 3.3) If applicable, interrupts should be disabled.
*
- * 4) If reader fetches several counters, there is no guarantee the whole values
- * are consistent (remember point 1) : this is a noop on 64bit arches anyway)
+ * 2) Write side must disable preemption, or a seqcount reader can preempt the
+ * writer and also spin forever.
+ *
+ * 3) Write side must use the _irqsave() variant if other writers, or a reader,
+ * can be invoked from an IRQ context.
*
- * 5) readers are allowed to sleep or be preempted/interrupted : They perform
- * pure reads. But if they have to fetch many values, it's better to not allow
- * preemptions/interruptions to avoid many retries.
+ * 4) If reader fetches several counters, there is no guarantee the whole values
+ * are consistent w.r.t. each other (remember point #2: seqcounts are not
+ * used for 64bit architectures).
*
- * 6) If counter might be written by an interrupt, readers should block interrupts.
- * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
- * read partial values)
+ * 5) Readers are allowed to sleep or be preempted/interrupted: they perform
+ * pure reads.
*
- * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
- * u64_stats_fetch_retry_irq() helpers
+ * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats
+ * might be updated from a hardirq or softirq context (remember point #1:
+ * seqcounts are not used for UP kernels). 32-bit UP stat readers could read
+ * corrupted 64-bit values otherwise.
*
* Usage :
*
diff --git a/include/linux/uacce.h b/include/linux/uacce.h
index 0e215e6d0534..454c2f6672d7 100644
--- a/include/linux/uacce.h
+++ b/include/linux/uacce.h
@@ -68,19 +68,21 @@ enum uacce_q_state {
* @uacce: pointer to uacce
* @priv: private pointer
* @wait: wait queue head
- * @list: index into uacce_mm
- * @uacce_mm: the corresponding mm
+ * @list: index into uacce queues list
* @qfrs: pointer of qfr regions
* @state: queue state machine
+ * @pasid: pasid associated to the mm
+ * @handle: iommu_sva handle returned by iommu_sva_bind_device()
*/
struct uacce_queue {
struct uacce_device *uacce;
void *priv;
wait_queue_head_t wait;
struct list_head list;
- struct uacce_mm *uacce_mm;
struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
enum uacce_q_state state;
+ int pasid;
+ struct iommu_sva *handle;
};
/**
@@ -96,8 +98,8 @@ struct uacce_queue {
* @cdev: cdev of the uacce
* @dev: dev of the uacce
* @priv: private pointer of the uacce
- * @mm_list: list head of uacce_mm->list
- * @mm_lock: lock for mm_list
+ * @queues: list of queues
+ * @queues_lock: lock for queues list
* @inode: core vfs
*/
struct uacce_device {
@@ -112,27 +114,9 @@ struct uacce_device {
struct cdev *cdev;
struct device dev;
void *priv;
- struct list_head mm_list;
- struct mutex mm_lock;
- struct inode *inode;
-};
-
-/**
- * struct uacce_mm - keep track of queues bound to a process
- * @list: index into uacce_device
- * @queues: list of queues
- * @mm: the mm struct
- * @lock: protects the list of queues
- * @pasid: pasid of the uacce_mm
- * @handle: iommu_sva handle return from iommu_sva_bind_device
- */
-struct uacce_mm {
- struct list_head list;
struct list_head queues;
- struct mm_struct *mm;
- struct mutex lock;
- int pasid;
- struct iommu_sva *handle;
+ struct mutex queues_lock;
+ struct inode *inode;
};
#if IS_ENABLED(CONFIG_UACCE)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 9861c89f93be..7bcadca22100 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,9 +2,9 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/instrumented.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
-#include <linux/kasan-checks.h>
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
@@ -58,7 +58,7 @@
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
@@ -67,7 +67,7 @@ static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
return raw_copy_from_user(to, from, n);
}
@@ -88,7 +88,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -97,7 +97,7 @@ static __always_inline __must_check unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
check_object_size(from, n, true);
return raw_copy_to_user(to, from, n);
}
@@ -109,7 +109,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long res = n;
might_fault();
if (likely(access_ok(from, n))) {
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
if (unlikely(res))
@@ -127,7 +127,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
@@ -301,62 +301,20 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
return 0;
}
-/*
- * probe_kernel_read(): safely attempt to read from a location
- * @dst: pointer to the buffer that shall take the data
- * @src: address to read from
- * @size: size of the data chunk
- *
- * Safely read from address @src to the buffer at @dst. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-extern long probe_kernel_read(void *dst, const void *src, size_t size);
-extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
-extern long __probe_kernel_read(void *dst, const void *src, size_t size);
+bool probe_kernel_read_allowed(const void *unsafe_src, size_t size);
-/*
- * probe_user_read(): safely attempt to read from a location in user space
- * @dst: pointer to the buffer that shall take the data
- * @src: address to read from
- * @size: size of the data chunk
- *
- * Safely read from address @src to the buffer at @dst. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
+extern long probe_kernel_read(void *dst, const void *src, size_t size);
extern long probe_user_read(void *dst, const void __user *src, size_t size);
-extern long __probe_user_read(void *dst, const void __user *src, size_t size);
-/*
- * probe_kernel_write(): safely attempt to write to a location
- * @dst: address to write to
- * @src: pointer to the data that shall be written
- * @size: size of the data chunk
- *
- * Safely write to address @dst from the buffer at @src. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
-extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
-
-/*
- * probe_user_write(): safely attempt to write to a location in user space
- * @dst: address to write to
- * @src: pointer to the data that shall be written
- * @size: size of the data chunk
- *
- * Safely write to address @dst from the buffer at @src. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
-extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
-
-extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
-extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
- long count);
-extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
-extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
- long count);
-extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
+
+long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
+ long count);
+
+long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
+ long count);
+long strnlen_user_nofault(const void __user *unsafe_addr, long count);
/**
* probe_kernel_address(): safely attempt to read from a location
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index edd89b7c8f18..54167a2d28ea 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -67,6 +67,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
#define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2
#define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3
+#define CI_HDRC_CONTROLLER_VBUS_EVENT 4
int (*notify_event) (struct ci_hdrc *ci, unsigned event);
struct regulator *reg_vbus;
struct usb_otg_caps ci_otg_caps;
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 8675e145ea8b..2040696d75b6 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -249,6 +249,9 @@ int usb_function_activate(struct usb_function *);
int usb_interface_id(struct usb_configuration *, struct usb_function *);
+int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f,
+ struct usb_ep *_ep, u8 alt);
+
int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
struct usb_ep *_ep);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 9411c08a5c7e..6a178177e4c9 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -42,6 +42,8 @@ struct usb_ep;
* @num_mapped_sgs: number of SG entries mapped to DMA (internal)
* @length: Length of that data
* @stream_id: The stream id, when USB3.0 bulk streams are being used
+ * @is_last: Indicates if this is the last request of a stream_id before
+ * switching to a different stream (required for DWC3 controllers).
* @no_interrupt: If true, hints that no completion irq is needed.
* Helpful sometimes with deep request queues that are handled
* directly by DMA controllers.
@@ -104,6 +106,7 @@ struct usb_request {
unsigned num_mapped_sgs;
unsigned stream_id:16;
+ unsigned is_last:1;
unsigned no_interrupt:1;
unsigned zero:1;
unsigned short_not_ok:1;
@@ -373,6 +376,7 @@ struct usb_gadget_ops {
* @connected: True if gadget is connected.
* @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag
* indicates that it supports LPM as per the LPM ECN & errata.
+ * @irq: the interrupt number for device controller.
*
* Gadgets have a mostly-portable "gadget driver" implementing device
* functions, handling all usb configurations and interfaces. Gadget
@@ -427,6 +431,7 @@ struct usb_gadget {
unsigned deactivated:1;
unsigned connected:1;
unsigned lpm_capable:1;
+ int irq;
};
#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
@@ -773,6 +778,9 @@ struct usb_gadget_string_container {
/* put descriptor for string with that id into buf (buflen >= 256) */
int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *buf);
+/* check if the given language identifier is valid */
+bool usb_validate_langid(u16 langid);
+
/*-------------------------------------------------------------------------*/
/* utility to simplify managing config descriptors */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index e12105ed3834..3dbb42c637c1 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -479,7 +479,8 @@ extern void usb_hcd_platform_shutdown(struct platform_device *dev);
struct pci_dev;
struct pci_device_id;
extern int usb_hcd_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id);
+ const struct pci_device_id *id,
+ const struct hc_driver *driver);
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index b00a2642a9cd..5daa1c49761c 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -254,6 +254,7 @@ int typec_set_mode(struct typec_port *port, int mode);
void *typec_get_drvdata(struct typec_port *port);
+int typec_find_orientation(const char *name);
int typec_find_port_power_role(const char *name);
int typec_find_power_role(const char *name);
int typec_find_port_data_role(const char *name);
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 5453af87a33e..239db794357c 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -18,6 +18,16 @@ struct vdpa_callback {
};
/**
+ * vDPA notification area
+ * @addr: base address of the notification area
+ * @size: size of the notification area
+ */
+struct vdpa_notification_area {
+ resource_size_t addr;
+ resource_size_t size;
+};
+
+/**
* vDPA device - representation of a vDPA device
* @dev: underlying device
* @dma_dev: the actual device that is performing DMA
@@ -73,6 +83,10 @@ struct vdpa_device {
* @vdev: vdpa device
* @idx: virtqueue index
* Returns virtqueue state (last_avail_idx)
+ * @get_vq_notification: Get the notification area for a virtqueue
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns the notifcation area
* @get_vq_align: Get the virtqueue align requirement
* for the device
* @vdev: vdpa device
@@ -162,6 +176,8 @@ struct vdpa_config_ops {
bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state);
u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx);
+ struct vdpa_notification_area
+ (*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
index 0e130b5077a5..2f9dd072f11f 100644
--- a/include/linux/vexpress.h
+++ b/include/linux/vexpress.h
@@ -10,38 +10,8 @@
#include <linux/device.h>
#include <linux/regmap.h>
-#define VEXPRESS_SITE_MB 0
-#define VEXPRESS_SITE_DB1 1
-#define VEXPRESS_SITE_DB2 2
-#define VEXPRESS_SITE_MASTER 0xf
-
-/* Config infrastructure */
-
-void vexpress_config_set_master(u32 site);
-u32 vexpress_config_get_master(void);
-
-void vexpress_config_lock(void *arg);
-void vexpress_config_unlock(void *arg);
-
-int vexpress_config_get_topo(struct device_node *node, u32 *site,
- u32 *position, u32 *dcc);
-
-/* Config bridge API */
-
-struct vexpress_config_bridge_ops {
- struct regmap * (*regmap_init)(struct device *dev, void *context);
- void (*regmap_exit)(struct regmap *regmap, void *context);
-};
-
-struct device *vexpress_config_bridge_register(struct device *parent,
- struct vexpress_config_bridge_ops *ops, void *context);
-
/* Config regmap API */
struct regmap *devm_regmap_init_vexpress_config(struct device *dev);
-/* Platform control */
-
-void vexpress_flags_set(u32 data);
-
#endif
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 5d92ee15d098..38d3c6a8dc7e 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -76,7 +76,9 @@ struct vfio_iommu_driver_ops {
struct iommu_group *group);
void (*detach_group)(void *iommu_data,
struct iommu_group *group);
- int (*pin_pages)(void *iommu_data, unsigned long *user_pfn,
+ int (*pin_pages)(void *iommu_data,
+ struct iommu_group *group,
+ unsigned long *user_pfn,
int npage, int prot,
unsigned long *phys_pfn);
int (*unpin_pages)(void *iommu_data,
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index 9e2763d7c159..59bd50f99291 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -105,9 +105,9 @@ struct vringh_kiov {
/* Helpers for userspace vrings. */
int vringh_init_user(struct vringh *vrh, u64 features,
unsigned int num, bool weak_barriers,
- struct vring_desc __user *desc,
- struct vring_avail __user *avail,
- struct vring_used __user *used);
+ vring_desc_t __user *desc,
+ vring_avail_t __user *avail,
+ vring_used_t __user *used);
static inline void vringh_iov_init(struct vringh_iov *iov,
struct iovec *iovec, unsigned num)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 8b505d22fc0e..26de0cae2a0a 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -62,7 +62,7 @@ enum {
WORK_CPU_UNBOUND = NR_CPUS,
/*
- * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
+ * Reserve 8 bits off of pwq pointer w/ debugobjects turned off.
* This makes pwqs aligned to 256 bytes and allows 15 workqueue
* flush colors.
*/
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index f8a7e1a850fb..8e5c5bb16e2d 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -197,6 +197,7 @@ void wakeup_flusher_threads(enum wb_reason reason);
void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
+void inode_io_list_del(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 14c893433139..b4d70e7568b2 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -576,7 +576,7 @@ void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
*
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
- * Return: The entry which used to be at this index.
+ * Return: The old entry at this index or xa_err() if an error happened.
*/
static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
@@ -602,7 +602,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
*
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts.
- * Return: The entry which used to be at this index.
+ * Return: The old entry at this index or xa_err() if an error happened.
*/
static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 2f1f8c3efb26..e5b388f5fa20 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -292,7 +292,6 @@ static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
/* The below has to be done to allow calling inet_csk_destroy_sock */
sock_set_flag(sk, SOCK_DEAD);
percpu_counter_inc(sk->sk_prot->orphan_count);
- inet_sk(sk)->inet_num = 0;
}
void inet_csk_destroy_sock(struct sock *sk);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index e1476775769c..81ee17594c32 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -392,13 +392,12 @@ void *neigh_seq_next(struct seq_file *, void *, loff_t *);
void neigh_seq_stop(struct seq_file *, void *);
int neigh_proc_dointvec(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
- void __user *buffer,
+ void *buffer,
size_t *lenp, loff_t *ppos);
int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
proc_handler *proc_handler);
diff --git a/include/net/seg6.h b/include/net/seg6.h
index 640724b35273..9d19c15e8545 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -57,7 +57,7 @@ extern void seg6_iptunnel_exit(void);
extern int seg6_local_init(void);
extern void seg6_local_exit(void);
-extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len);
+extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
int proto);
extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
diff --git a/include/pcmcia/cistpl.h b/include/pcmcia/cistpl.h
index 59a011101e0e..749320cc9aba 100644
--- a/include/pcmcia/cistpl.h
+++ b/include/pcmcia/cistpl.h
@@ -161,7 +161,7 @@ typedef struct cistpl_funcid_t {
typedef struct cistpl_funce_t {
u_char type;
- u_char data[0];
+ u_char data[];
} cistpl_funce_t;
/*======================================================================
@@ -255,7 +255,7 @@ typedef struct cistpl_data_serv_t {
u_char escape;
u_char encrypt;
u_char misc_features;
- u_char ccitt_code[0];
+ u_char ccitt_code[];
} cistpl_data_serv_t;
typedef struct cistpl_fax_serv_t {
@@ -265,7 +265,7 @@ typedef struct cistpl_fax_serv_t {
u_char encrypt;
u_char features_0;
u_char features_1;
- u_char ccitt_code[0];
+ u_char ccitt_code[];
} cistpl_fax_serv_t;
typedef struct cistpl_voice_serv_t {
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 058cfbc2b37f..0f1ea5f2d01c 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -11,6 +11,7 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_sa.h>
+#include <rdma/rdma_cm.h>
/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
extern struct class cm_class;
@@ -115,6 +116,7 @@ struct ib_cm_req_event_param {
unsigned int retry_count:3;
unsigned int rnr_retry_count:3;
unsigned int srq:1;
+ struct rdma_ucm_ece ece;
};
struct ib_cm_rep_event_param {
@@ -129,6 +131,7 @@ struct ib_cm_rep_event_param {
unsigned int flow_control:1;
unsigned int rnr_retry_count:3;
unsigned int srq:1;
+ struct rdma_ucm_ece ece;
};
enum ib_cm_rej_reason {
@@ -164,7 +167,8 @@ enum ib_cm_rej_reason {
IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = 30,
IB_CM_REJ_INVALID_CLASS_VERSION = 31,
IB_CM_REJ_INVALID_FLOW_LABEL = 32,
- IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33
+ IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33,
+ IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED = 35,
};
struct ib_cm_rej_event_param {
@@ -369,6 +373,7 @@ struct ib_cm_req_param {
u8 rnr_retry_count;
u8 max_cm_retries;
u8 srq;
+ struct rdma_ucm_ece ece;
};
/**
@@ -392,6 +397,7 @@ struct ib_cm_rep_param {
u8 flow_control;
u8 rnr_retry_count;
u8 srq;
+ struct rdma_ucm_ece ece;
};
/**
@@ -546,6 +552,7 @@ struct ib_cm_sidr_rep_param {
u8 info_length;
const void *private_data;
u8 private_data_len;
+ struct rdma_ucm_ece ece;
};
/**
diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
deleted file mode 100644
index 2fd9bfb6d648..000000000000
--- a/include/rdma/ib_fmr_pool.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Corporation. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#if !defined(IB_FMR_POOL_H)
-#define IB_FMR_POOL_H
-
-#include <rdma/ib_verbs.h>
-
-struct ib_fmr_pool;
-
-/**
- * struct ib_fmr_pool_param - Parameters for creating FMR pool
- * @max_pages_per_fmr:Maximum number of pages per map request.
- * @page_shift: Log2 of sizeof "pages" mapped by this fmr
- * @access:Access flags for FMRs in pool.
- * @pool_size:Number of FMRs to allocate for pool.
- * @dirty_watermark:Flush is triggered when @dirty_watermark dirty
- * FMRs are present.
- * @flush_function:Callback called when unmapped FMRs are flushed and
- * more FMRs are possibly available for mapping
- * @flush_arg:Context passed to user's flush function.
- * @cache:If set, FMRs may be reused after unmapping for identical map
- * requests.
- */
-struct ib_fmr_pool_param {
- int max_pages_per_fmr;
- int page_shift;
- enum ib_access_flags access;
- int pool_size;
- int dirty_watermark;
- void (*flush_function)(struct ib_fmr_pool *pool,
- void *arg);
- void *flush_arg;
- unsigned cache:1;
-};
-
-struct ib_pool_fmr {
- struct ib_fmr *fmr;
- struct ib_fmr_pool *pool;
- struct list_head list;
- struct hlist_node cache_node;
- int ref_count;
- int remap_count;
- u64 io_virtual_address;
- int page_list_len;
- u64 page_list[];
-};
-
-struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
- struct ib_fmr_pool_param *params);
-
-void ib_destroy_fmr_pool(struct ib_fmr_pool *pool);
-
-int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
-
-struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
- u64 *page_list,
- int list_len,
- u64 io_virtual_address);
-
-void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
-
-#endif /* IB_FMR_POOL_H */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 4e62650e2127..8c093fc1bb9f 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -559,20 +559,6 @@ typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc);
/**
- * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
- * @mad_agent: MAD agent that snooped the MAD.
- * @send_buf: send MAD data buffer.
- * @mad_send_wc: Work completion information on the sent MAD. Valid
- * only for snooping that occurs on a send completion.
- *
- * Clients snooping MADs should not modify data referenced by the @send_buf
- * or @mad_send_wc.
- */
-typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf,
- struct ib_mad_send_wc *mad_send_wc);
-
-/**
* ib_mad_recv_handler - callback handler for a received MAD.
* @mad_agent: MAD agent requesting the received MAD.
* @send_buf: Send buffer if found, else NULL
@@ -581,8 +567,7 @@ typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
* MADs received in response to a send request operation will be handed to
* the user before the send operation completes. All data buffers given
* to registered agents through this routine are owned by the receiving
- * client, except for snooping agents. Clients snooping MADs should not
- * modify the data referenced by @mad_recv_wc.
+ * client.
*/
typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf,
@@ -595,7 +580,6 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
* @mr: Memory region for system memory usable for DMA.
* @recv_handler: Callback handler for a received MAD.
* @send_handler: Callback handler for a sent MAD.
- * @snoop_handler: Callback handler for snooped sent MADs.
* @context: User-specified context associated with this registration.
* @hi_tid: Access layer assigned transaction ID for this client.
* Unsolicited MADs sent by this client will have the upper 32-bits
@@ -612,7 +596,6 @@ struct ib_mad_agent {
struct ib_qp *qp;
ib_mad_recv_handler recv_handler;
ib_mad_send_handler send_handler;
- ib_mad_snoop_handler snoop_handler;
void *context;
u32 hi_tid;
u32 flags;
@@ -720,36 +703,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
ib_mad_recv_handler recv_handler,
void *context,
u32 registration_flags);
-
-enum ib_mad_snoop_flags {
- /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/
- /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/
- IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2),
- /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/
- IB_MAD_SNOOP_RECVS = (1<<4)
- /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/
- /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/
-};
-
-/**
- * ib_register_mad_snoop - Register to snoop sent and received MADs.
- * @device: The device to register with.
- * @port_num: The port on the specified device to use.
- * @qp_type: Specifies which QP traffic to snoop. Must be either
- * IB_QPT_SMI or IB_QPT_GSI.
- * @mad_snoop_flags: Specifies information where snooping occurs.
- * @send_handler: The callback routine invoked for a snooped send.
- * @recv_handler: The callback routine invoked for a snooped receive.
- * @context: User specified context associated with the registration.
- */
-struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
- u8 port_num,
- enum ib_qp_type qp_type,
- int mad_snoop_flags,
- ib_mad_snoop_handler snoop_handler,
- ib_mad_recv_handler recv_handler,
- void *context);
-
/**
* ib_unregister_mad_agent - Unregisters a client from using MAD services.
* @mad_agent: Corresponding MAD registration request to deregister.
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index bbc5cfb57cd2..ef2f3986c493 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -100,7 +100,8 @@ void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
__printf(2, 3) __cold
void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
#define ibdev_dbg(__dev, format, args...) \
dynamic_ibdev_dbg(__dev, format, ##args)
#else
@@ -133,7 +134,8 @@ do { \
#define ibdev_info_ratelimited(ibdev, fmt, ...) \
ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
do { \
@@ -305,7 +307,7 @@ enum ib_device_cap_flags {
IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
+ IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35),
/* The device supports padding incoming writes to cacheline. */
IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
@@ -430,8 +432,6 @@ struct ib_device_attr {
int max_mcast_qp_attach;
int max_total_mcast_qp_attach;
int max_ah;
- int max_fmr;
- int max_map_per_fmr;
int max_srq;
int max_srq_wr;
int max_srq_sge;
@@ -462,6 +462,11 @@ enum ib_mtu {
IB_MTU_4096 = 5
};
+enum opa_mtu {
+ OPA_MTU_8192 = 6,
+ OPA_MTU_10240 = 7
+};
+
static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
{
switch (mtu) {
@@ -488,6 +493,28 @@ static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
return IB_MTU_256;
}
+static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
+{
+ switch (mtu) {
+ case OPA_MTU_8192:
+ return 8192;
+ case OPA_MTU_10240:
+ return 10240;
+ default:
+ return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
+ }
+}
+
+static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
+{
+ if (mtu >= 10240)
+ return OPA_MTU_10240;
+ else if (mtu >= 8192)
+ return OPA_MTU_8192;
+ else
+ return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
+}
+
enum ib_port_state {
IB_PORT_NOP = 0,
IB_PORT_DOWN = 1,
@@ -651,6 +678,7 @@ struct ib_port_attr {
enum ib_port_state state;
enum ib_mtu max_mtu;
enum ib_mtu active_mtu;
+ u32 phys_mtu;
int gid_tbl_len;
unsigned int ip_gids:1;
/* This is the value from PortInfo CapabilityMask, defined by IBA */
@@ -880,6 +908,12 @@ struct ib_mr_status {
*/
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
+struct rdma_ah_init_attr {
+ struct rdma_ah_attr *ah_attr;
+ u32 flags;
+ struct net_device *xmit_slave;
+};
+
enum rdma_ah_attr_type {
RDMA_AH_ATTR_TYPE_UNDEFINED,
RDMA_AH_ATTR_TYPE_IB,
@@ -1006,9 +1040,9 @@ enum ib_cq_notify_flags {
};
enum ib_srq_type {
- IB_SRQT_BASIC,
- IB_SRQT_XRC,
- IB_SRQT_TM,
+ IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
+ IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
+ IB_SRQT_TM = IB_UVERBS_SRQT_TM,
};
static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
@@ -1077,16 +1111,16 @@ enum ib_qp_type {
IB_QPT_SMI,
IB_QPT_GSI,
- IB_QPT_RC,
- IB_QPT_UC,
- IB_QPT_UD,
+ IB_QPT_RC = IB_UVERBS_QPT_RC,
+ IB_QPT_UC = IB_UVERBS_QPT_UC,
+ IB_QPT_UD = IB_UVERBS_QPT_UD,
IB_QPT_RAW_IPV6,
IB_QPT_RAW_ETHERTYPE,
- IB_QPT_RAW_PACKET = 8,
- IB_QPT_XRC_INI = 9,
- IB_QPT_XRC_TGT,
+ IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
+ IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
+ IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
IB_QPT_MAX,
- IB_QPT_DRIVER = 0xFF,
+ IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
/* Reserve a range for qp types internal to the low level driver.
* These qp types will not be visible at the IB core layer, so the
* IB_QPT_MAX usages should not be affected in the core layer
@@ -1105,17 +1139,21 @@ enum ib_qp_type {
enum ib_qp_create_flags {
IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
- IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK =
+ IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
IB_QP_CREATE_MANAGED_SEND = 1 << 3,
IB_QP_CREATE_MANAGED_RECV = 1 << 4,
IB_QP_CREATE_NETIF_QP = 1 << 5,
IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
- /* FREE = 1 << 7, */
- IB_QP_CREATE_SCATTER_FCS = 1 << 8,
- IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
+ IB_QP_CREATE_NETDEV_USE = 1 << 7,
+ IB_QP_CREATE_SCATTER_FCS =
+ IB_UVERBS_QP_CREATE_SCATTER_FCS,
+ IB_QP_CREATE_CVLAN_STRIPPING =
+ IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
IB_QP_CREATE_SOURCE_QPN = 1 << 10,
- IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
+ IB_QP_CREATE_PCI_WRITE_END_PADDING =
+ IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31,
@@ -1267,6 +1305,7 @@ struct ib_qp_attr {
u8 alt_port_num;
u8 alt_timeout;
u32 rate_limit;
+ struct net_device *xmit_slave;
};
enum ib_wr_opcode {
@@ -1436,12 +1475,6 @@ enum ib_mr_rereg_flags {
IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
};
-struct ib_fmr_attr {
- int max_pages;
- int max_maps;
- u8 page_shift;
-};
-
struct ib_umem;
enum rdma_remove_reason {
@@ -1456,6 +1489,11 @@ enum rdma_remove_reason {
RDMA_REMOVE_DRIVER_REMOVE,
/* uobj is being cleaned-up before being committed */
RDMA_REMOVE_ABORT,
+ /*
+ * uobj has been fully created, with the uobj->object set, but is being
+ * cleaned up before being comitted
+ */
+ RDMA_REMOVE_ABORT_HWOBJ,
};
struct ib_rdmacg_object {
@@ -1544,10 +1582,12 @@ struct ib_ah {
typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
enum ib_poll_context {
- IB_POLL_DIRECT, /* caller context, no hw completions */
IB_POLL_SOFTIRQ, /* poll from softirq context */
IB_POLL_WORKQUEUE, /* poll from workqueue */
IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
+ IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
+
+ IB_POLL_DIRECT, /* caller context, no hw completions */
};
struct ib_cq {
@@ -1557,9 +1597,11 @@ struct ib_cq {
void (*event_handler)(struct ib_event *, void *);
void *cq_context;
int cqe;
+ unsigned int cqe_used;
atomic_t usecnt; /* count number of work queues */
enum ib_poll_context poll_ctx;
struct ib_wc *wc;
+ struct list_head pool_entry;
union {
struct irq_poll iop;
struct work_struct work;
@@ -1569,7 +1611,9 @@ struct ib_cq {
/* updated only by trace points */
ktime_t timestamp;
- bool interrupt;
+ u8 interrupt:1;
+ u8 shared:1;
+ unsigned int comp_vector;
/*
* Implementation details of the RDMA core, don't use in drivers:
@@ -1614,7 +1658,7 @@ enum ib_raw_packet_caps {
};
enum ib_wq_type {
- IB_WQT_RQ
+ IB_WQT_RQ = IB_UVERBS_WQT_RQ,
};
enum ib_wq_state {
@@ -1637,10 +1681,11 @@ struct ib_wq {
};
enum ib_wq_flags {
- IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
- IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
- IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
- IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
+ IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
+ IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
+ IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
+ IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
+ IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
};
struct ib_wq_init_attr {
@@ -1804,14 +1849,6 @@ struct ib_mw {
enum ib_mw_type type;
};
-struct ib_fmr {
- struct ib_device *device;
- struct ib_pd *pd;
- struct list_head list;
- u32 lkey;
- u32 rkey;
-};
-
/* Supported steering options */
enum ib_flow_attr_type {
/* steering according to rule specifications */
@@ -2198,6 +2235,7 @@ struct rdma_netdev {
void *clnt_priv;
struct ib_device *hca;
u8 port_num;
+ int mtu;
/*
* cleanup function must be specified.
@@ -2403,8 +2441,8 @@ struct ib_device_ops {
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
- int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata);
+ int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata);
int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
void (*destroy_ah)(struct ib_ah *ah, u32 flags);
@@ -2453,12 +2491,6 @@ struct ib_device_ops {
struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
int (*dealloc_mw)(struct ib_mw *mw);
- struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
- int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
- u64 iova);
- int (*unmap_fmr)(struct list_head *fmr_list);
- int (*dealloc_fmr)(struct ib_fmr *fmr);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
@@ -2687,6 +2719,10 @@ struct ib_device {
#endif
u32 index;
+
+ spinlock_t cq_pools_lock;
+ struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
+
struct rdma_restrack_root *res;
const struct uapi_definition *driver_def;
@@ -2709,12 +2745,13 @@ struct ib_device {
/* Used by iWarp CM */
char iw_ifname[IFNAMSIZ];
u32 iw_driver_flags;
+ u32 lag_flags;
};
struct ib_client_nl_info;
struct ib_client {
const char *name;
- void (*add) (struct ib_device *);
+ int (*add)(struct ib_device *ibdev);
void (*remove)(struct ib_device *, void *client_data);
void (*rename)(struct ib_device *dev, void *client_data);
int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
@@ -3355,6 +3392,55 @@ static inline unsigned int rdma_find_pg_bit(unsigned long addr,
return __fls(pgsz);
}
+/**
+ * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
+ * @device: Device
+ * @port_num: 1 based Port number
+ *
+ * Return true if port is an Intel OPA port , false if not
+ */
+static inline bool rdma_core_cap_opa_port(struct ib_device *device,
+ u32 port_num)
+{
+ return (device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
+}
+
+/**
+ * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
+ * @device: Device
+ * @port_num: Port number
+ * @mtu: enum value of MTU
+ *
+ * Return the MTU size supported by the port as an integer value. Will return
+ * -1 if enum value of mtu is not supported.
+ */
+static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
+ int mtu)
+{
+ if (rdma_core_cap_opa_port(device, port))
+ return opa_mtu_enum_to_int((enum opa_mtu)mtu);
+ else
+ return ib_mtu_enum_to_int((enum ib_mtu)mtu);
+}
+
+/**
+ * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
+ * @device: Device
+ * @port_num: Port number
+ * @attr: port attribute
+ *
+ * Return the MTU size supported by the port as an integer value.
+ */
+static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
+ struct ib_port_attr *attr)
+{
+ if (rdma_core_cap_opa_port(device, port))
+ return attr->phys_mtu;
+ else
+ return ib_mtu_enum_to_int(attr->max_mtu);
+}
+
int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
int state);
int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
@@ -3551,21 +3637,18 @@ static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
return rdma_destroy_ah_user(ah, flags, NULL);
}
-/**
- * ib_create_srq - Creates a SRQ associated with the specified protection
- * domain.
- * @pd: The protection domain associated with the SRQ.
- * @srq_init_attr: A list of initial attributes required to create the
- * SRQ. If SRQ creation succeeds, then the attributes are updated to
- * the actual capabilities of the created SRQ.
- *
- * srq_attr->max_wr and srq_attr->max_sge are read the determine the
- * requested size of the SRQ, and set to the actual values allocated
- * on return. If ib_create_srq() succeeds, then max_wr and max_sge
- * will always be at least as large as the requested values.
- */
-struct ib_srq *ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr);
+struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_usrq_object *uobject,
+ struct ib_udata *udata);
+static inline struct ib_srq *
+ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
+{
+ if (!pd->device->ops.create_srq)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
+}
/**
* ib_modify_srq - Modifies the attributes for the specified SRQ.
@@ -3816,6 +3899,8 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
* ib_free_cq_user - Free kernel/user CQ
* @cq: The CQ to free
* @udata: Valid user data or NULL for kernel objects
+ *
+ * NOTE: This function shouldn't be called on shared CQs.
*/
void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
@@ -3941,6 +4026,12 @@ static inline int ib_req_notify_cq(struct ib_cq *cq,
return cq->device->ops.req_notify_cq(cq, flags);
}
+struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
+ int comp_vector_hint,
+ enum ib_poll_context poll_ctx);
+
+void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
+
/**
* ib_req_ncomp_notif - Request completion notification when there are
* at least the specified number of unreaped completions on the CQ.
@@ -4209,45 +4300,6 @@ static inline u32 ib_inc_rkey(u32 rkey)
}
/**
- * ib_alloc_fmr - Allocates a unmapped fast memory region.
- * @pd: The protection domain associated with the unmapped region.
- * @mr_access_flags: Specifies the memory access rights.
- * @fmr_attr: Attributes of the unmapped region.
- *
- * A fast memory region must be mapped before it can be used as part of
- * a work request.
- */
-struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-
-/**
- * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
- * @fmr: The fast memory region to associate with the pages.
- * @page_list: An array of physical pages to map to the fast memory region.
- * @list_len: The number of pages in page_list.
- * @iova: The I/O virtual address to use with the mapped region.
- */
-static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
- u64 *page_list, int list_len,
- u64 iova)
-{
- return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
-}
-
-/**
- * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
- * @fmr_list: A linked list of fast memory regions to unmap.
- */
-int ib_unmap_fmr(struct list_head *fmr_list);
-
-/**
- * ib_dealloc_fmr - Deallocates a fast memory region.
- * @fmr: The fast memory region to deallocate.
- */
-int ib_dealloc_fmr(struct ib_fmr *fmr);
-
-/**
* ib_attach_mcast - Attaches the specified QP to a multicast group.
* @qp: QP to attach to the multicast group. The QP must be type
* IB_QPT_UD.
@@ -4701,4 +4753,48 @@ static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
bool rdma_dev_access_netns(const struct ib_device *device,
const struct net *net);
+
+#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
+#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
+
+/**
+ * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
+ * on the flow_label
+ *
+ * This function will convert the 20 bit flow_label input to a valid RoCE v2
+ * UDP src port 14 bit value. All RoCE V2 drivers should use this same
+ * convention.
+ */
+static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
+{
+ u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
+
+ fl_low ^= fl_high >> 14;
+ return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
+}
+
+/**
+ * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
+ * local and remote qpn values
+ *
+ * This function folded the multiplication results of two qpns, 24 bit each,
+ * fields, and converts it to a 20 bit results.
+ *
+ * This function will create symmetric flow_label value based on the local
+ * and remote qpn values. this will allow both the requester and responder
+ * to calculate the same flow_label for a given connection.
+ *
+ * This helper function should be used by driver in case the upper layer
+ * provide a zero flow_label value. This is to improve entropy of RDMA
+ * traffic in the network.
+ */
+static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
+{
+ u64 v = (u64)lqpn * rqpn;
+
+ v ^= v >> 20;
+ v ^= v >> 40;
+
+ return (u32)(v & IB_GRH_FLOWLABEL_MASK);
+}
#endif /* IB_VERBS_H */
diff --git a/include/rdma/ibta_vol1_c12.h b/include/rdma/ibta_vol1_c12.h
index 269904425d3f..960c86bec76c 100644
--- a/include/rdma/ibta_vol1_c12.h
+++ b/include/rdma/ibta_vol1_c12.h
@@ -38,6 +38,7 @@
/* Table 106 REQ Message Contents */
#define CM_REQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_req_msg, 0, 32)
+#define CM_REQ_VENDOR_ID CM_FIELD32_LOC(struct cm_req_msg, 5, 24)
#define CM_REQ_SERVICE_ID CM_FIELD64_LOC(struct cm_req_msg, 8)
#define CM_REQ_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_req_msg, 16)
#define CM_REQ_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_req_msg, 28, 32)
@@ -119,8 +120,11 @@ CM_STRUCT(struct cm_rej_msg, 84 * 8 + 1184);
#define CM_REP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 4, 32)
#define CM_REP_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_rep_msg, 8, 32)
#define CM_REP_LOCAL_QPN CM_FIELD32_LOC(struct cm_rep_msg, 12, 24)
+#define CM_REP_VENDOR_ID_H CM_FIELD8_LOC(struct cm_rep_msg, 15, 8)
#define CM_REP_LOCAL_EE_CONTEXT_NUMBER CM_FIELD32_LOC(struct cm_rep_msg, 16, 24)
+#define CM_REP_VENDOR_ID_M CM_FIELD8_LOC(struct cm_rep_msg, 19, 8)
#define CM_REP_STARTING_PSN CM_FIELD32_LOC(struct cm_rep_msg, 20, 24)
+#define CM_REP_VENDOR_ID_L CM_FIELD8_LOC(struct cm_rep_msg, 23, 8)
#define CM_REP_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_rep_msg, 24, 8)
#define CM_REP_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_rep_msg, 25, 8)
#define CM_REP_TARGET_ACK_DELAY CM_FIELD8_LOC(struct cm_rep_msg, 26, 5)
@@ -201,7 +205,9 @@ CM_STRUCT(struct cm_sidr_req_msg, 16 * 8 + 1728);
#define CM_SIDR_REP_STATUS CM_FIELD8_LOC(struct cm_sidr_rep_msg, 4, 8)
#define CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH \
CM_FIELD8_LOC(struct cm_sidr_rep_msg, 5, 8)
+#define CM_SIDR_REP_VENDOR_ID_H CM_FIELD16_LOC(struct cm_sidr_rep_msg, 6, 16)
#define CM_SIDR_REP_QPN CM_FIELD32_LOC(struct cm_sidr_rep_msg, 8, 24)
+#define CM_SIDR_REP_VENDOR_ID_L CM_FIELD8_LOC(struct cm_sidr_rep_msg, 11, 8)
#define CM_SIDR_REP_SERVICEID CM_FIELD64_LOC(struct cm_sidr_rep_msg, 12)
#define CM_SIDR_REP_Q_KEY CM_FIELD32_LOC(struct cm_sidr_rep_msg, 20, 32)
#define CM_SIDR_REP_ADDITIONAL_INFORMATION \
diff --git a/include/rdma/lag.h b/include/rdma/lag.h
new file mode 100644
index 000000000000..7c06ec9b2eef
--- /dev/null
+++ b/include/rdma/lag.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _RDMA_LAG_H_
+#define _RDMA_LAG_H_
+
+#include <net/lag.h>
+
+struct ib_device;
+struct rdma_ah_attr;
+
+enum rdma_lag_flags {
+ RDMA_LAG_FLAGS_HASH_ALL_SLAVES = 1 << 0
+};
+
+void rdma_lag_put_ah_roce_slave(struct net_device *xmit_slave);
+struct net_device *rdma_lag_get_ah_roce_slave(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags);
+
+#endif /* _RDMA_LAG_H_ */
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index bdbfe25d3854..0d9e6d74c385 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 Intel Corporation. All rights reserved.
+ * Copyright (c) 2014-2020 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -139,14 +139,6 @@
#define OPA_CAP_MASK3_IsVLMarkerSupported (1 << 1)
#define OPA_CAP_MASK3_IsVLrSupported (1 << 0)
-/**
- * new MTU values
- */
-enum {
- OPA_MTU_8192 = 6,
- OPA_MTU_10240 = 7,
-};
-
enum {
OPA_PORT_PHYS_CONF_DISCONNECTED = 0,
OPA_PORT_PHYS_CONF_STANDARD = 1,
diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h
index e90b149fe92a..6f244e759b4f 100644
--- a/include/rdma/opa_vnic.h
+++ b/include/rdma/opa_vnic.h
@@ -1,7 +1,7 @@
#ifndef _OPA_VNIC_H
#define _OPA_VNIC_H
/*
- * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -132,7 +132,7 @@ struct opa_vnic_stats {
static inline bool rdma_cap_opa_vnic(struct ib_device *device)
{
return !!(device->attrs.device_cap_flags &
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC);
+ IB_DEVICE_RDMA_NETDEV_OPA);
}
#endif /* _OPA_VNIC_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 71f48cfdc24c..939d7abe026f 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -111,6 +111,7 @@ struct rdma_cm_event {
struct rdma_conn_param conn;
struct rdma_ud_param ud;
} param;
+ struct rdma_ucm_ece ece;
};
struct rdma_cm_id;
@@ -264,6 +265,9 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
*/
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
+int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ struct rdma_ucm_ece *ece);
+
/**
* rdma_listen - This function is called by the passive side to
* listen for incoming connection requests.
@@ -276,6 +280,9 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
const char *caller);
+int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ const char *caller, struct rdma_ucm_ece *ece);
+
/**
* rdma_accept - Called to accept a connection request or response.
* @id: Connection identifier associated with the request.
@@ -313,7 +320,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event);
* rdma_reject - Called to reject a connection request or response.
*/
int rdma_reject(struct rdma_cm_id *id, const void *private_data,
- u8 private_data_len);
+ u8 private_data_len, u8 reason);
/**
* rdma_disconnect - This function disconnects the associated QP and
@@ -390,14 +397,6 @@ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr);
const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
int reason);
/**
- * rdma_is_consumer_reject - return true if the consumer rejected the connect
- * request.
- * @id: Communication identifier that received the REJECT event.
- * @reason: Value returned in the REJECT event status field.
- */
-bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
-
-/**
* rdma_consumer_reject_data - return the consumer reject private data and
* length, if any.
* @id: Communication identifier that received the REJECT event.
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 5fc10108703a..c4369a6c2951 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -2,7 +2,7 @@
#define DEF_RDMAVT_INCQP_H
/*
- * Copyright(c) 2016 - 2019 Intel Corporation.
+ * Copyright(c) 2016 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -69,6 +69,33 @@
#define RVT_R_COMM_EST 0x10
/*
+ * If a packet's QP[23:16] bits match this value, then it is
+ * a PSM packet and the hardware will expect a KDETH header
+ * following the BTH.
+ */
+#define RVT_KDETH_QP_PREFIX 0x80
+#define RVT_KDETH_QP_SUFFIX 0xffff
+#define RVT_KDETH_QP_PREFIX_MASK 0x00ff0000
+#define RVT_KDETH_QP_PREFIX_SHIFT 16
+#define RVT_KDETH_QP_BASE (u32)(RVT_KDETH_QP_PREFIX << \
+ RVT_KDETH_QP_PREFIX_SHIFT)
+#define RVT_KDETH_QP_MAX (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
+
+/*
+ * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this
+ * prefix value, then it is an AIP packet with a DETH containing the entropy
+ * value in byte 4 following the BTH.
+ */
+#define RVT_AIP_QP_PREFIX 0x81
+#define RVT_AIP_QP_SUFFIX 0xffff
+#define RVT_AIP_QP_PREFIX_MASK 0x00ff0000
+#define RVT_AIP_QP_PREFIX_SHIFT 16
+#define RVT_AIP_QP_BASE (u32)(RVT_AIP_QP_PREFIX << \
+ RVT_AIP_QP_PREFIX_SHIFT)
+#define RVT_AIP_QPN_MAX BIT(RVT_AIP_QP_PREFIX_SHIFT)
+#define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
+
+/*
* Bit definitions for s_flags.
*
* RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
@@ -440,7 +467,7 @@ struct rvt_qp {
/*
* This sge list MUST be last. Do not add anything below here.
*/
- struct rvt_sge r_sg_list[0] /* verified SGEs */
+ struct rvt_sge r_sg_list[] /* verified SGEs */
____cacheline_aligned_in_smp;
};
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 9f3b1e004046..86de10ea30af 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -420,9 +420,9 @@ struct uapi_definition {
.scope = UAPI_SCOPE_OBJECT, \
.needs_fn_offset = \
offsetof(struct ib_device_ops, ibdev_fn) + \
- BUILD_BUG_ON_ZERO( \
- sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
- sizeof(void *)), \
+ BUILD_BUG_ON_ZERO(sizeof_field(struct ib_device_ops, \
+ ibdev_fn) != \
+ sizeof(void *)), \
}
/*
@@ -435,9 +435,9 @@ struct uapi_definition {
.scope = UAPI_SCOPE_METHOD, \
.needs_fn_offset = \
offsetof(struct ib_device_ops, ibdev_fn) + \
- BUILD_BUG_ON_ZERO( \
- sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
- sizeof(void *)), \
+ BUILD_BUG_ON_ZERO(sizeof_field(struct ib_device_ops, \
+ ibdev_fn) != \
+ sizeof(void *)), \
}
/* Call a function to determine if the entire object is supported or not */
@@ -491,8 +491,7 @@ struct uapi_definition {
*/
#define UVERBS_ATTR_STRUCT(_type, _last) \
.zero_trailing = 1, \
- UVERBS_ATTR_SIZE(((uintptr_t)(&((_type *)0)->_last + 1)), \
- sizeof(_type))
+ UVERBS_ATTR_SIZE(offsetofend(_type, _last), sizeof(_type))
/*
* Specifies at least min_len bytes must be passed in, but the amount can be
* larger, up to the protocol maximum size. No check for zeroing is done.
@@ -737,6 +736,9 @@ uverbs_attr_get_len(const struct uverbs_attr_bundle *attrs_bundle, u16 idx)
return attr->ptr_attr.len;
}
+void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx);
+
/*
* uverbs_attr_ptr_get_array_size() - Get array size pointer by a ptr
* attribute.
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 325fdaa3bb66..bf0392ae15eb 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -107,7 +107,7 @@ static inline void uobj_put_write(struct ib_uobject *uobj)
static inline void uobj_alloc_abort(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs)
{
- rdma_alloc_abort_uobject(uobj, attrs);
+ rdma_alloc_abort_uobject(uobj, attrs, false);
}
static inline struct ib_uobject *
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index f1cbdae67250..c15b298aa62f 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -139,7 +139,8 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs);
void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
- struct uverbs_attr_bundle *attrs);
+ struct uverbs_attr_bundle *attrs,
+ bool hw_obj_valid);
void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs);
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index a5d8ae49198c..4726c1bbec65 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -324,7 +324,7 @@ struct ssp_response_iu {
__be32 response_data_len;
u8 resp_data[0];
- u8 sense_data[0];
+ u8 sense_data[];
} __attribute__ ((packed));
struct ssp_command_iu {
@@ -346,7 +346,7 @@ struct ssp_command_iu {
u8 add_cdb_len:6;
u8 cdb[16];
- u8 add_cdb[0];
+ u8 add_cdb[];
} __attribute__ ((packed));
struct xfer_rdy_iu {
@@ -555,7 +555,7 @@ struct ssp_response_iu {
__be32 response_data_len;
u8 resp_data[0];
- u8 sense_data[0];
+ u8 sense_data[];
} __attribute__ ((packed));
struct ssp_command_iu {
@@ -577,7 +577,7 @@ struct ssp_command_iu {
u8 _r_c:2;
u8 cdb[16];
- u8 add_cdb[0];
+ u8 add_cdb[];
} __attribute__ ((packed));
struct xfer_rdy_iu {
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index f93c0b800790..e76bac4d14c5 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -68,7 +68,6 @@ struct scsi_pointer {
struct scsi_cmnd {
struct scsi_request req;
struct scsi_device *device;
- struct list_head list; /* scsi_cmnd participates in queue lists */
struct list_head eh_entry; /* entry for the host eh_cmd_q */
struct delayed_work abort_work;
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 7800e12ee042..3025aca3c358 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -10,6 +10,7 @@
#include <linux/of_device.h>
struct rpi_firmware;
+struct pci_dev;
enum rpi_firmware_property_status {
RPI_FIRMWARE_STATUS_REQUEST = 0,
@@ -90,7 +91,7 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_SET_PERIPH_REG = 0x00038045,
RPI_FIRMWARE_GET_POE_HAT_VAL = 0x00030049,
RPI_FIRMWARE_SET_POE_HAT_VAL = 0x00030050,
-
+ RPI_FIRMWARE_NOTIFY_XHCI_RESET = 0x00030058,
/* Dispmanx TAGS */
RPI_FIRMWARE_FRAMEBUFFER_ALLOCATE = 0x00040001,
@@ -141,6 +142,7 @@ int rpi_firmware_property(struct rpi_firmware *fw,
int rpi_firmware_property_list(struct rpi_firmware *fw,
void *data, size_t tag_size);
struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
+int rpi_firmware_init_vl805(struct pci_dev *pdev);
#else
static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
void *data, size_t len)
@@ -158,6 +160,11 @@ static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware
{
return NULL;
}
+
+static inline int rpi_firmware_init_vl805(struct pci_dev *pdev)
+{
+ return 0;
+}
#endif
#endif /* __SOC_RASPBERRY_FIRMWARE_H__ */
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index e282ac01ec08..3feddfec9f87 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -307,7 +307,7 @@ struct qe_firmware {
u8 revision; /* The microcode version revision */
u8 padding; /* Reserved, for alignment */
u8 reserved[4]; /* Reserved, for future expansion */
- } __attribute__ ((packed)) microcode[1];
+ } __packed microcode[];
/* All microcode binaries should be located here */
/* CRC32 should be located here, after the microcode binaries */
} __attribute__ ((packed));
diff --git a/include/soc/imx/cpu.h b/include/soc/imx/cpu.h
new file mode 100644
index 000000000000..42d6aeb951fa
--- /dev/null
+++ b/include/soc/imx/cpu.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __IMX_CPU_H__
+#define __IMX_CPU_H__
+
+#define MXC_CPU_MX1 1
+#define MXC_CPU_MX21 21
+#define MXC_CPU_MX25 25
+#define MXC_CPU_MX27 27
+#define MXC_CPU_MX31 31
+#define MXC_CPU_MX35 35
+#define MXC_CPU_MX51 51
+#define MXC_CPU_MX53 53
+#define MXC_CPU_IMX6SL 0x60
+#define MXC_CPU_IMX6DL 0x61
+#define MXC_CPU_IMX6SX 0x62
+#define MXC_CPU_IMX6Q 0x63
+#define MXC_CPU_IMX6UL 0x64
+#define MXC_CPU_IMX6ULL 0x65
+/* virtual cpu id for i.mx6ulz */
+#define MXC_CPU_IMX6ULZ 0x6b
+#define MXC_CPU_IMX6SLL 0x67
+#define MXC_CPU_IMX7D 0x72
+#define MXC_CPU_IMX7ULP 0xff
+
+#define MXC_CPU_VFx10 0x010
+#define MXC_CPU_VF500 0x500
+#define MXC_CPU_VF510 (MXC_CPU_VF500 | MXC_CPU_VFx10)
+#define MXC_CPU_VF600 0x600
+#define MXC_CPU_VF610 (MXC_CPU_VF600 | MXC_CPU_VFx10)
+
+#ifndef __ASSEMBLY__
+extern unsigned int __mxc_cpu_type;
+#endif
+
+#endif
diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h
index af9722223925..c8bb56e6852a 100644
--- a/include/soc/qcom/cmd-db.h
+++ b/include/soc/qcom/cmd-db.h
@@ -4,6 +4,7 @@
#ifndef __QCOM_COMMAND_DB_H__
#define __QCOM_COMMAND_DB_H__
+#include <linux/err.h>
enum cmd_db_hw_type {
CMD_DB_HW_INVALID = 0,
diff --git a/include/sound/control.h b/include/sound/control.h
index 11feeee31e35..aeaed2a05bae 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -75,7 +75,7 @@ struct snd_kcontrol {
unsigned long private_value;
void *private_data;
void (*private_free)(struct snd_kcontrol *kcontrol);
- struct snd_kcontrol_volatile vd[0]; /* volatile data */
+ struct snd_kcontrol_volatile vd[]; /* volatile data */
};
#define snd_kcontrol(n) list_entry(n, struct snd_kcontrol, list)
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 225154a4f2ed..d16a4229209b 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -288,6 +288,10 @@ struct hda_codec {
#define dev_to_hda_codec(_dev) container_of(_dev, struct hda_codec, core.dev)
#define hda_codec_dev(_dev) (&(_dev)->core.dev)
+#define hdac_to_hda_priv(_hdac) \
+ container_of(_hdac, struct hdac_hda_priv, codec.core)
+#define hdac_to_hda_codec(_hdac) container_of(_hdac, struct hda_codec, core)
+
#define list_for_each_codec(c, bus) \
list_for_each_entry(c, &(bus)->core.codec_list, core.list)
#define list_for_each_codec_safe(c, n, bus) \
@@ -362,13 +366,6 @@ struct hda_verb {
void snd_hda_sequence_write(struct hda_codec *codec,
const struct hda_verb *seq);
-/* unsolicited event */
-static inline void
-snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
-{
- snd_hdac_bus_queue_event(&bus->core, res, res_ex);
-}
-
/* cached write */
static inline int
snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid,
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index affedc2801c4..c1f78d9a6e47 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -207,8 +207,8 @@ static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0;
static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
static inline void snd_hdac_enter_pm(struct hdac_device *codec) {}
static inline void snd_hdac_leave_pm(struct hdac_device *codec) {}
-static inline bool snd_hdac_is_in_pm(struct hdac_device *codec) { return 0; }
-static inline bool snd_hdac_is_power_on(struct hdac_device *codec) { return 1; }
+static inline bool snd_hdac_is_in_pm(struct hdac_device *codec) { return false; }
+static inline bool snd_hdac_is_power_on(struct hdac_device *codec) { return true; }
#endif
/*
@@ -364,16 +364,16 @@ struct hdac_bus {
/* link management */
struct list_head hlink_list;
bool cmd_dma_state;
+
+ /* factor used to derive STRIPE control value */
+ unsigned int sdo_limit;
};
int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
const struct hdac_bus_ops *ops);
void snd_hdac_bus_exit(struct hdac_bus *bus);
-int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr,
- unsigned int cmd, unsigned int *res);
int snd_hdac_bus_exec_verb_unlocked(struct hdac_bus *bus, unsigned int addr,
unsigned int cmd, unsigned int *res);
-void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex);
static inline void snd_hdac_codec_link_up(struct hdac_device *codec)
{
diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
index f657fd8fc0ad..743c2f442280 100644
--- a/include/sound/intel-nhlt.h
+++ b/include/sound/intel-nhlt.h
@@ -50,7 +50,7 @@ enum nhlt_device_type {
struct nhlt_specific_cfg {
u32 size;
- u8 caps[0];
+ u8 caps[];
} __packed;
struct nhlt_fmt_cfg {
@@ -60,7 +60,7 @@ struct nhlt_fmt_cfg {
struct nhlt_fmt {
u8 fmt_count;
- struct nhlt_fmt_cfg fmt_config[0];
+ struct nhlt_fmt_cfg fmt_config[];
} __packed;
struct nhlt_endpoint {
@@ -80,7 +80,7 @@ struct nhlt_endpoint {
struct nhlt_acpi_table {
struct acpi_table_header header;
u8 endpoint_count;
- struct nhlt_endpoint desc[0];
+ struct nhlt_endpoint desc[];
} __packed;
struct nhlt_resource_desc {
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 392e953d561e..d2e9e3b4d7ea 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (C) 2013-15, Intel Corporation. All rights reserved.
*/
diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
new file mode 100644
index 000000000000..4f2cc4fb56b7
--- /dev/null
+++ b/include/sound/soc-card.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * soc-card.h
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ */
+#ifndef __SOC_CARD_H
+#define __SOC_CARD_H
+
+enum snd_soc_card_subclass {
+ SND_SOC_CARD_CLASS_INIT = 0,
+ SND_SOC_CARD_CLASS_RUNTIME = 1,
+};
+
+struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
+ const char *name);
+int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
+ struct snd_soc_jack *jack,
+ struct snd_soc_jack_pin *pins, unsigned int num_pins);
+
+int snd_soc_card_suspend_pre(struct snd_soc_card *card);
+int snd_soc_card_suspend_post(struct snd_soc_card *card);
+int snd_soc_card_resume_pre(struct snd_soc_card *card);
+int snd_soc_card_resume_post(struct snd_soc_card *card);
+
+int snd_soc_card_probe(struct snd_soc_card *card);
+int snd_soc_card_late_probe(struct snd_soc_card *card);
+int snd_soc_card_remove(struct snd_soc_card *card);
+
+int snd_soc_card_set_bias_level(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level);
+int snd_soc_card_set_bias_level_post(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level);
+
+int snd_soc_card_add_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link);
+void snd_soc_card_remove_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link);
+
+/* device driver data */
+static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
+ void *data)
+{
+ card->drvdata = data;
+}
+
+static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card)
+{
+ return card->drvdata;
+}
+
+static inline
+struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card,
+ const char *dai_name)
+{
+ struct snd_soc_pcm_runtime *rtd;
+
+ for_each_card_rtds(card, rtd) {
+ if (!strcmp(asoc_rtd_to_codec(rtd, 0)->name, dai_name))
+ return asoc_rtd_to_codec(rtd, 0);
+ }
+
+ return NULL;
+}
+
+#endif /* __SOC_CARD_H */
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 154d02fbbfed..5663891148e3 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -25,6 +25,44 @@
order++)
/* component interface */
+struct snd_compress_ops {
+ int (*open)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream);
+ int (*free)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream);
+ int (*set_params)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_params *params);
+ int (*get_params)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_codec *params);
+ int (*set_metadata)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_metadata *metadata);
+ int (*get_metadata)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_metadata *metadata);
+ int (*trigger)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream, int cmd);
+ int (*pointer)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_tstamp *tstamp);
+ int (*copy)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream, char __user *buf,
+ size_t count);
+ int (*mmap)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct vm_area_struct *vma);
+ int (*ack)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream, size_t bytes);
+ int (*get_caps)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_caps *caps);
+ int (*get_codec_caps)(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_codec_caps *codec);
+};
+
struct snd_soc_component_driver {
const char *name;
@@ -108,7 +146,7 @@ struct snd_soc_component_driver {
struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
- const struct snd_compr_ops *compr_ops;
+ const struct snd_compress_ops *compress_ops;
/* probe ordering - for components with runtime dependencies */
int probe_order;
@@ -351,10 +389,10 @@ static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c)
return dev_get_drvdata(c->dev);
}
-static inline bool snd_soc_component_is_active(
- struct snd_soc_component *component)
+static inline unsigned int
+snd_soc_component_active(struct snd_soc_component *component)
{
- return component->active != 0;
+ return component->active;
}
/* component pin */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index b33abe93b905..212257e84fac 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -154,21 +154,59 @@ int snd_soc_dai_startup(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream);
void snd_soc_dai_shutdown(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream);
-int snd_soc_dai_prepare(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream);
-int snd_soc_dai_trigger(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream, int cmd);
-int snd_soc_dai_bespoke_trigger(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream, int cmd);
snd_pcm_sframes_t snd_soc_dai_delay(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream);
void snd_soc_dai_suspend(struct snd_soc_dai *dai);
void snd_soc_dai_resume(struct snd_soc_dai *dai);
-int snd_soc_dai_probe(struct snd_soc_dai *dai);
-int snd_soc_dai_remove(struct snd_soc_dai *dai);
int snd_soc_dai_compress_new(struct snd_soc_dai *dai,
struct snd_soc_pcm_runtime *rtd, int num);
bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream);
+void snd_soc_dai_action(struct snd_soc_dai *dai,
+ int stream, int action);
+static inline void snd_soc_dai_activate(struct snd_soc_dai *dai,
+ int stream)
+{
+ snd_soc_dai_action(dai, stream, 1);
+}
+static inline void snd_soc_dai_deactivate(struct snd_soc_dai *dai,
+ int stream)
+{
+ snd_soc_dai_action(dai, stream, -1);
+}
+int snd_soc_dai_active(struct snd_soc_dai *dai);
+
+int snd_soc_pcm_dai_probe(struct snd_soc_pcm_runtime *rtd, int order);
+int snd_soc_pcm_dai_remove(struct snd_soc_pcm_runtime *rtd, int order);
+int snd_soc_pcm_dai_new(struct snd_soc_pcm_runtime *rtd);
+int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream);
+int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int cmd);
+int snd_soc_pcm_dai_bespoke_trigger(struct snd_pcm_substream *substream,
+ int cmd);
+
+int snd_soc_dai_compr_startup(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream);
+void snd_soc_dai_compr_shutdown(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream);
+int snd_soc_dai_compr_trigger(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream, int cmd);
+int snd_soc_dai_compr_set_params(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params);
+int snd_soc_dai_compr_get_params(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_codec *params);
+int snd_soc_dai_compr_ack(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ size_t bytes);
+int snd_soc_dai_compr_pointer(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp);
+int snd_soc_dai_compr_set_metadata(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata);
+int snd_soc_dai_compr_get_metadata(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata);
struct snd_soc_dai_ops {
/*
@@ -326,8 +364,6 @@ struct snd_soc_dai {
/* DAI runtime info */
unsigned int stream_active[SNDRV_PCM_STREAM_LAST + 1]; /* usage count */
- unsigned int active;
-
struct snd_soc_dapm_widget *playback_widget;
struct snd_soc_dapm_widget *capture_widget;
@@ -443,4 +479,10 @@ static inline void *snd_soc_dai_get_sdw_stream(struct snd_soc_dai *dai,
return ERR_PTR(-ENOTSUPP);
}
+static inline unsigned int
+snd_soc_dai_stream_active(struct snd_soc_dai *dai, int stream)
+{
+ return dai->stream_active[stream];
+}
+
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 08495f8d86dc..cc3dcb815282 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -689,7 +689,7 @@ struct snd_soc_dapm_context {
/* A list of widgets associated with an object, typically a snd_kcontrol */
struct snd_soc_dapm_widget_list {
int num_widgets;
- struct snd_soc_dapm_widget *widgets[0];
+ struct snd_soc_dapm_widget *widgets[];
};
#define for_each_dapm_widgets(list, i, widget) \
diff --git a/include/sound/soc-link.h b/include/sound/soc-link.h
new file mode 100644
index 000000000000..3dd6e33e94ec
--- /dev/null
+++ b/include/sound/soc-link.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * soc-link.h
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ */
+#ifndef __SOC_LINK_H
+#define __SOC_LINK_H
+
+int snd_soc_link_init(struct snd_soc_pcm_runtime *rtd);
+int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params);
+
+int snd_soc_link_startup(struct snd_pcm_substream *substream);
+void snd_soc_link_shutdown(struct snd_pcm_substream *substream);
+int snd_soc_link_prepare(struct snd_pcm_substream *substream);
+int snd_soc_link_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+void snd_soc_link_hw_free(struct snd_pcm_substream *substream);
+int snd_soc_link_trigger(struct snd_pcm_substream *substream, int cmd);
+
+int snd_soc_link_compr_startup(struct snd_compr_stream *cstream);
+void snd_soc_link_compr_shutdown(struct snd_compr_stream *cstream);
+int snd_soc_link_compr_set_params(struct snd_compr_stream *cstream);
+
+#endif /* __SOC_LINK_H */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 946f88a6c63d..ef5dd28e10a9 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -414,11 +414,6 @@ enum snd_soc_pcm_subclass {
SND_SOC_PCM_CLASS_BE = 1,
};
-enum snd_soc_card_subclass {
- SND_SOC_CARD_CLASS_INIT = 0,
- SND_SOC_CARD_CLASS_RUNTIME = 1,
-};
-
int snd_soc_register_card(struct snd_soc_card *card);
int snd_soc_unregister_card(struct snd_soc_card *card);
int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
@@ -468,8 +463,19 @@ struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link);
bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd);
-void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream);
-void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream);
+
+void snd_soc_runtime_action(struct snd_soc_pcm_runtime *rtd,
+ int stream, int action);
+static inline void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd,
+ int stream)
+{
+ snd_soc_runtime_action(rtd, stream, 1);
+}
+static inline void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd,
+ int stream)
+{
+ snd_soc_runtime_action(rtd, stream, -1);
+}
int snd_soc_runtime_calc_hw(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hardware *hw, int stream);
@@ -498,10 +504,6 @@ int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream,
const struct snd_pcm_hardware *hw);
/* Jack reporting */
-int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
- struct snd_soc_jack *jack, struct snd_soc_jack_pin *pins,
- unsigned int num_pins);
-
void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask);
int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_pin *pins);
@@ -571,8 +573,6 @@ static inline int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops)
struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template,
void *data, const char *long_name,
const char *prefix);
-struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
- const char *name);
int snd_soc_add_component_controls(struct snd_soc_component *component,
const struct snd_kcontrol_new *controls, unsigned int num_controls);
int snd_soc_add_card_controls(struct snd_soc_card *soc_card,
@@ -790,9 +790,6 @@ struct snd_soc_dai_link {
const struct snd_soc_pcm_stream *params;
unsigned int num_params;
- struct snd_soc_dapm_widget *playback_widget;
- struct snd_soc_dapm_widget *capture_widget;
-
unsigned int dai_fmt; /* format to set on init */
enum snd_soc_dpcm_trigger trigger[2]; /* trigger type for DPCM */
@@ -809,7 +806,7 @@ struct snd_soc_dai_link {
const struct snd_soc_compr_ops *compr_ops;
/* Mark this pcm with non atomic ops */
- bool nonatomic;
+ unsigned int nonatomic:1;
/* For unidirectional dai links */
unsigned int playback_only:1;
@@ -1005,9 +1002,6 @@ struct snd_soc_card {
spinlock_t dpcm_lock;
- bool instantiated;
- bool topology_shortname_created;
-
int (*probe)(struct snd_soc_card *card);
int (*late_probe)(struct snd_soc_card *card);
int (*remove)(struct snd_soc_card *card);
@@ -1068,8 +1062,6 @@ struct snd_soc_card {
int num_of_dapm_widgets;
const struct snd_soc_dapm_route *of_dapm_routes;
int num_of_dapm_routes;
- bool fully_routed;
- bool disable_route_checks;
/* lists of probed devices belonging to this card */
struct list_head component_dev_list;
@@ -1096,6 +1088,13 @@ struct snd_soc_card {
#endif
u32 pop_time;
+ /* bit field */
+ unsigned int instantiated:1;
+ unsigned int topology_shortname_created:1;
+ unsigned int fully_routed:1;
+ unsigned int disable_route_checks:1;
+ unsigned int probed:1;
+
void *drvdata;
};
#define for_each_card_prelinks(card, i, link) \
@@ -1146,16 +1145,21 @@ struct snd_soc_pcm_runtime {
/* runtime devices */
struct snd_pcm *pcm;
struct snd_compr *compr;
- struct snd_soc_dai *codec_dai;
- struct snd_soc_dai *cpu_dai;
- struct snd_soc_dai **dais;
- struct snd_soc_dai **codec_dais;
+ /*
+ * dais = cpu_dai + codec_dai
+ * see
+ * soc_new_pcm_runtime()
+ * asoc_rtd_to_cpu()
+ * asoc_rtd_to_codec()
+ */
+ struct snd_soc_dai **dais;
unsigned int num_codecs;
-
- struct snd_soc_dai **cpu_dais;
unsigned int num_cpus;
+ struct snd_soc_dapm_widget *playback_widget;
+ struct snd_soc_dapm_widget *capture_widget;
+
struct delayed_work delayed_work;
void (*close_delayed_work_func)(struct snd_soc_pcm_runtime *rtd);
#ifdef CONFIG_DEBUG_FS
@@ -1170,28 +1174,28 @@ struct snd_soc_pcm_runtime {
unsigned int fe_compr:1; /* for Dynamic PCM */
int num_components;
- struct snd_soc_component *components[0]; /* CPU/Codec/Platform */
+ struct snd_soc_component *components[]; /* CPU/Codec/Platform */
};
/* see soc_new_pcm_runtime() */
#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->num_cpus]
#define for_each_rtd_components(rtd, i, component) \
- for ((i) = 0; \
+ for ((i) = 0, component = NULL; \
((i) < rtd->num_components) && ((component) = rtd->components[i]);\
(i)++)
#define for_each_rtd_cpu_dais(rtd, i, dai) \
for ((i) = 0; \
- ((i) < rtd->num_cpus) && ((dai) = rtd->cpu_dais[i]); \
+ ((i) < rtd->num_cpus) && ((dai) = asoc_rtd_to_cpu(rtd, i)); \
(i)++)
#define for_each_rtd_cpu_dais_rollback(rtd, i, dai) \
- for (; (--(i) >= 0) && ((dai) = rtd->cpu_dais[i]);)
+ for (; (--(i) >= 0) && ((dai) = asoc_rtd_to_cpu(rtd, i));)
#define for_each_rtd_codec_dais(rtd, i, dai) \
for ((i) = 0; \
- ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
+ ((i) < rtd->num_codecs) && ((dai) = asoc_rtd_to_codec(rtd, i)); \
(i)++)
#define for_each_rtd_codec_dais_rollback(rtd, i, dai) \
- for (; (--(i) >= 0) && ((dai) = rtd->codec_dais[i]);)
+ for (; (--(i) >= 0) && ((dai) = asoc_rtd_to_codec(rtd, i));)
#define for_each_rtd_dais(rtd, i, dai) \
for ((i) = 0; \
((i) < (rtd)->num_cpus + (rtd)->num_codecs) && \
@@ -1252,29 +1256,16 @@ struct soc_enum {
#endif
};
-/* device driver data */
-
-static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
- void *data)
-{
- card->drvdata = data;
-}
-
-static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card)
-{
- return card->drvdata;
-}
-
static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc)
{
if (mc->reg == mc->rreg && mc->shift == mc->rshift)
- return 0;
+ return false;
/*
* mc->reg == mc->rreg && mc->shift != mc->rshift, or
* mc->reg != mc->rreg means that the control is
* stereo (bits in one register or in two registers)
*/
- return 1;
+ return true;
}
static inline unsigned int snd_soc_enum_val_to_item(struct soc_enum *e,
@@ -1378,20 +1369,6 @@ struct snd_soc_dai *snd_soc_find_dai(
#include <sound/soc-dai.h>
static inline
-struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card,
- const char *dai_name)
-{
- struct snd_soc_pcm_runtime *rtd;
-
- list_for_each_entry(rtd, &card->rtd_list, list) {
- if (!strcmp(rtd->codec_dai->name, dai_name))
- return rtd->codec_dai;
- }
-
- return NULL;
-}
-
-static inline
int snd_soc_fixup_dai_links_platform_name(struct snd_soc_card *card,
const char *platform_name)
{
@@ -1436,5 +1413,6 @@ static inline void snd_soc_dapm_mutex_unlock(struct snd_soc_dapm_context *dapm)
}
#include <sound/soc-component.h>
+#include <sound/soc-card.h>
#endif
diff --git a/include/sound/sof.h b/include/sound/sof.h
index a0cbca021230..f3e716c8ce1c 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -27,6 +27,9 @@ struct snd_sof_pdata {
struct device *dev;
+ /* indicate how many first bytes shouldn't be loaded into DSP memory. */
+ size_t fw_offset;
+
/*
* notification callback used if the hardware initialization
* can take time or is handled in a workqueue. This callback
diff --git a/include/sound/sof/channel_map.h b/include/sound/sof/channel_map.h
index 21044eb5f377..fd3a30fcf756 100644
--- a/include/sound/sof/channel_map.h
+++ b/include/sound/sof/channel_map.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/control.h b/include/sound/sof/control.h
index 6080ea0facd7..7379a33d7247 100644
--- a/include/sound/sof/control.h
+++ b/include/sound/sof/control.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/dai-imx.h b/include/sound/sof/dai-imx.h
index ff9088dcc6f2..ca8325353d41 100644
--- a/include/sound/sof/dai-imx.h
+++ b/include/sound/sof/dai-imx.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* Copyright 2019 NXP
*
diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h
index 04e48227f542..136adf6686e2 100644
--- a/include/sound/sof/dai-intel.h
+++ b/include/sound/sof/dai-intel.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -49,6 +49,9 @@
/* bclk idle */
#define SOF_DAI_INTEL_SSP_CLKCTRL_BCLK_IDLE_HIGH BIT(5)
+/* DMIC max. four controllers for eight microphone channels */
+#define SOF_DAI_INTEL_DMIC_NUM_CTRL 4
+
/* SSP Configuration Request - SOF_IPC_DAI_SSP_CONFIG */
struct sof_ipc_dai_ssp_params {
struct sof_ipc_hdr hdr;
@@ -85,15 +88,19 @@ struct sof_ipc_dai_ssp_params {
struct sof_ipc_dai_hda_params {
struct sof_ipc_hdr hdr;
uint32_t link_dma_ch;
+ uint32_t rate;
+ uint32_t channels;
} __packed;
/* ALH Configuration Request - SOF_IPC_DAI_ALH_CONFIG */
struct sof_ipc_dai_alh_params {
struct sof_ipc_hdr hdr;
uint32_t stream_id;
+ uint32_t rate;
+ uint32_t channels;
/* reserved for future use */
- uint32_t reserved[15];
+ uint32_t reserved[13];
} __packed;
/* DMIC Configuration Request - SOF_IPC_DAI_DMIC_CONFIG */
@@ -135,7 +142,7 @@ struct sof_ipc_dai_dmic_pdm_ctrl {
* version number used in configuration data is checked vs. version used by
* device driver src/drivers/dmic.c need to match. It is incremented from
* initial value 1 if updates done for the to driver would alter the operation
- * of the microhone.
+ * of the microphone.
*
* Note: The microphone clock (pdmclk_min, pdmclk_max, duty_min, duty_max)
* parameters need to be set as defined in microphone data sheet. E.g. clock
@@ -170,12 +177,13 @@ struct sof_ipc_dai_dmic_params {
uint32_t fifo_fs; /**< FIFO sample rate in Hz (8000..96000) */
uint32_t reserved_1; /**< Reserved */
uint16_t fifo_bits; /**< FIFO word length (16 or 32) */
- uint16_t reserved_2; /**< Reserved */
+ uint16_t fifo_bits_b; /**< Deprecated since firmware ABI 3.0.1 */
uint16_t duty_min; /**< Min. mic clock duty cycle in % (20..80) */
uint16_t duty_max; /**< Max. mic clock duty cycle in % (min..80) */
- uint32_t num_pdm_active; /**< Number of active pdm controllers */
+ uint32_t num_pdm_active; /**< Number of active pdm controllers. */
+ /**< Range is 1..SOF_DAI_INTEL_DMIC_NUM_CTRL */
uint32_t wake_up_time; /**< Time from clock start to data (us) */
uint32_t min_clock_on_time; /**< Min. time that clk is kept on (us) */
@@ -184,8 +192,8 @@ struct sof_ipc_dai_dmic_params {
/* reserved for future use */
uint32_t reserved[5];
- /**< variable number of pdm controller config */
- struct sof_ipc_dai_dmic_pdm_ctrl pdm[0];
+ /**< PDM controllers configuration */
+ struct sof_ipc_dai_dmic_pdm_ctrl pdm[SOF_DAI_INTEL_DMIC_NUM_CTRL];
} __packed;
#endif
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 2565edd336f1..34f135adf8ec 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/ext_manifest.h b/include/sound/sof/ext_manifest.h
new file mode 100644
index 000000000000..04359cda92dc
--- /dev/null
+++ b/include/sound/sof/ext_manifest.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2020 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Extended manifest is a place to store metadata about firmware, known during
+ * compilation time - for example firmware version or used compiler.
+ * Given information are read on host side before firmware startup.
+ * This part of output binary is not signed.
+ */
+
+#ifndef __SOF_FIRMWARE_EXT_MANIFEST_H__
+#define __SOF_FIRMWARE_EXT_MANIFEST_H__
+
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <sound/sof/info.h>
+
+/* In ASCII `XMan` */
+#define SOF_EXT_MAN_MAGIC_NUMBER 0x6e614d58
+
+/* Build u32 number in format MMmmmppp */
+#define SOF_EXT_MAN_BUILD_VERSION(MAJOR, MINOR, PATH) ((uint32_t)( \
+ ((MAJOR) << 24) | \
+ ((MINOR) << 12) | \
+ (PATH)))
+
+/* check extended manifest version consistency */
+#define SOF_EXT_MAN_VERSION_INCOMPATIBLE(host_ver, cli_ver) ( \
+ ((host_ver) & GENMASK(31, 24)) != \
+ ((cli_ver) & GENMASK(31, 24)))
+
+/* used extended manifest header version */
+#define SOF_EXT_MAN_VERSION SOF_EXT_MAN_BUILD_VERSION(1, 0, 0)
+
+/* extended manifest header, deleting any field breaks backward compatibility */
+struct sof_ext_man_header {
+ uint32_t magic; /*< identification number, */
+ /*< EXT_MAN_MAGIC_NUMBER */
+ uint32_t full_size; /*< [bytes] full size of ext_man, */
+ /*< (header + content + padding) */
+ uint32_t header_size; /*< [bytes] makes header extensionable, */
+ /*< after append new field to ext_man header */
+ /*< then backward compatible won't be lost */
+ uint32_t header_version; /*< value of EXT_MAN_VERSION */
+ /*< not related with following content */
+
+ /* just after this header should be list of ext_man_elem_* elements */
+} __packed;
+
+/* Now define extended manifest elements */
+
+/* Extended manifest elements types */
+enum sof_ext_man_elem_type {
+ SOF_EXT_MAN_ELEM_FW_VERSION = 0,
+ SOF_EXT_MAN_ELEM_WINDOW = SOF_IPC_EXT_WINDOW,
+ SOF_EXT_MAN_ELEM_CC_VERSION = SOF_IPC_EXT_CC_INFO,
+};
+
+/* extended manifest element header */
+struct sof_ext_man_elem_header {
+ uint32_t type; /*< SOF_EXT_MAN_ELEM_ */
+ uint32_t size; /*< in bytes, including header size */
+
+ /* just after this header should be type dependent content */
+} __packed;
+
+/* FW version */
+struct sof_ext_man_fw_version {
+ struct sof_ext_man_elem_header hdr;
+ /* use sof_ipc struct because of code re-use */
+ struct sof_ipc_fw_version version;
+ uint32_t flags;
+} __packed;
+
+/* extended data memory windows for IPC, trace and debug */
+struct sof_ext_man_window {
+ struct sof_ext_man_elem_header hdr;
+ /* use sof_ipc struct because of code re-use */
+ struct sof_ipc_window ipc_window;
+} __packed;
+
+/* Used C compiler description */
+struct sof_ext_man_cc_version {
+ struct sof_ext_man_elem_header hdr;
+ /* use sof_ipc struct because of code re-use */
+ struct sof_ipc_cc_version cc_version;
+} __packed;
+
+#endif /* __SOF_FIRMWARE_EXT_MANIFEST_H__ */
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
index b79479575cc8..2d35997ace40 100644
--- a/include/sound/sof/header.h
+++ b/include/sound/sof/header.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
index 438a11fcf272..5a55ba8b7e56 100644
--- a/include/sound/sof/info.h
+++ b/include/sound/sof/info.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -31,6 +31,8 @@ enum sof_ipc_ext_data {
SOF_IPC_EXT_UNUSED = 0,
SOF_IPC_EXT_WINDOW = 1,
SOF_IPC_EXT_CC_INFO = 2,
+ SOF_IPC_EXT_PROBE_INFO = 3,
+ SOF_IPC_EXT_USER_ABI_INFO = 4,
};
/* FW version - SOF_IPC_GLB_VERSION */
@@ -109,9 +111,27 @@ struct sof_ipc_cc_version {
/* reserved for future use */
uint32_t reserved[4];
- char name[16]; /* null terminated compiler name */
- char optim[4]; /* null terminated compiler -O flag value */
- char desc[]; /* null terminated compiler description */
+ uint8_t name[16]; /* null terminated compiler name */
+ uint8_t optim[4]; /* null terminated compiler -O flag value */
+ uint8_t desc[32]; /* null terminated compiler description */
} __packed;
+/* extended data: Probe setup */
+struct sof_ipc_probe_support {
+ struct sof_ipc_ext_data_hdr ext_hdr;
+
+ uint32_t probe_points_max;
+ uint32_t injection_dmas_max;
+
+ /* reserved for future use */
+ uint32_t reserved[2];
+} __packed;
+
+/* extended data: user abi version(s) */
+struct sof_ipc_user_abi_version {
+ struct sof_ipc_ext_data_hdr ext_hdr;
+
+ uint32_t abi_dbg_version;
+} __packed;
+
#endif
diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h
index 3cf2e0f39d94..366aa6ec442b 100644
--- a/include/sound/sof/pm.h
+++ b/include/sound/sof/pm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h
index 7facefb541b3..58a0d49977d6 100644
--- a/include/sound/sof/stream.h
+++ b/include/sound/sof/stream.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index 402e0250c508..f56e80d09b32 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -37,6 +37,8 @@ enum sof_comp_type {
SOF_COMP_SELECTOR, /**< channel selector component */
SOF_COMP_DEMUX,
SOF_COMP_ASRC, /**< Asynchronous sample rate converter */
+ SOF_COMP_DCBLOCK,
+ SOF_COMP_SMART_AMP, /**< smart amplifier component */
/* keep FILEREAD/FILEWRITE as the last ones */
SOF_COMP_FILEREAD = 10000, /**< host test based file IO */
SOF_COMP_FILEWRITE = 10001, /**< host test based file IO */
@@ -75,11 +77,23 @@ struct sof_ipc_comp {
#define SOF_MEM_CAPS_CACHE (1 << 6) /**< cacheable */
#define SOF_MEM_CAPS_EXEC (1 << 7) /**< executable */
+/*
+ * overrun will cause ring buffer overwrite, instead of XRUN.
+ */
+#define SOF_BUF_OVERRUN_PERMITTED BIT(0)
+
+/*
+ * underrun will cause readback of 0s, instead of XRUN.
+ */
+#define SOF_BUF_UNDERRUN_PERMITTED BIT(1)
+
/* create new component buffer - SOF_IPC_TPLG_BUFFER_NEW */
struct sof_ipc_buffer {
struct sof_ipc_comp comp;
uint32_t size; /**< buffer size in bytes */
uint32_t caps; /**< SOF_MEM_CAPS_ */
+ uint32_t flags; /**< SOF_BUF_ flags defined above */
+ uint32_t reserved; /**< reserved for future use */
} __packed;
/* generic component config data - must always be after struct sof_ipc_comp */
@@ -206,6 +220,8 @@ enum sof_ipc_process_type {
SOF_PROCESS_CHAN_SELECTOR, /**< Channel Selector */
SOF_PROCESS_MUX,
SOF_PROCESS_DEMUX,
+ SOF_PROCESS_DCBLOCK,
+ SOF_PROCESS_SMART_AMP, /**< Smart Amplifier */
};
/* generic "effect", "codec" or proprietary processing component */
@@ -218,7 +234,7 @@ struct sof_ipc_comp_process {
/* reserved for future use */
uint32_t reserved[7];
- unsigned char data[0];
+ uint8_t data[0];
} __packed;
/* frees components, buffers and pipelines
diff --git a/include/sound/sof/trace.h b/include/sound/sof/trace.h
index fda6e8f6ead4..c31a94a13ce0 100644
--- a/include/sound/sof/trace.h
+++ b/include/sound/sof/trace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -72,7 +72,7 @@ struct sof_ipc_dma_trace_posn {
struct sof_ipc_panic_info {
struct sof_ipc_hdr hdr;
uint32_t code; /* SOF_IPC_PANIC_ */
- char filename[SOF_TRACE_FILENAME_SIZE];
+ uint8_t filename[SOF_TRACE_FILENAME_SIZE];
uint32_t linenum;
} __packed;
diff --git a/include/sound/sof/xtensa.h b/include/sound/sof/xtensa.h
index dd53d36b34e1..87a07e520415 100644
--- a/include/sound/sof/xtensa.h
+++ b/include/sound/sof/xtensa.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 591cd9e4692c..4fda324f4b35 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -301,16 +301,6 @@ struct iscsi_queue_req {
struct list_head qr_list;
};
-struct iscsi_data_count {
- int data_length;
- int sync_and_steering;
- enum data_count_type type;
- u32 iov_count;
- u32 ss_iov_count;
- u32 ss_marker_count;
- struct kvec *iov;
-};
-
struct iscsi_param_list {
bool iser;
struct list_head param_list;
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 1b752d8ea529..f51452e3b984 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -23,7 +23,8 @@ struct target_backend_ops {
char inquiry_rev[4];
struct module *owner;
- u8 transport_flags;
+ u8 transport_flags_default;
+ u8 transport_flags_changeable;
int (*attach_hba)(struct se_hba *, u32);
void (*detach_hba)(struct se_hba *);
@@ -94,6 +95,7 @@ int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
extern struct configfs_attribute *sbc_attrib_attrs[];
extern struct configfs_attribute *passthrough_attrib_attrs[];
+extern struct configfs_attribute *passthrough_pr_attrib_attrs[];
/* core helpers also used by command snooping in pscsi */
void *transport_kmap_data_sg(struct se_cmd *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 6d4a694f6ea7..18c3f277b770 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -772,6 +772,7 @@ struct se_device {
#define DF_USING_UDEV_PATH 0x00000008
#define DF_USING_ALIAS 0x00000010
#define DF_READ_ONLY 0x00000020
+ u8 transport_flags;
/* Physical device queue depth */
u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index c612cabbc378..5f0c1cf1ea13 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -33,20 +33,40 @@ enum afs_server_trace {
afs_server_trace_destroy,
afs_server_trace_free,
afs_server_trace_gc,
+ afs_server_trace_get_by_addr,
afs_server_trace_get_by_uuid,
afs_server_trace_get_caps,
afs_server_trace_get_install,
afs_server_trace_get_new_cbi,
+ afs_server_trace_get_probe,
afs_server_trace_give_up_cb,
afs_server_trace_put_call,
afs_server_trace_put_cbi,
afs_server_trace_put_find_rsq,
+ afs_server_trace_put_probe,
afs_server_trace_put_slist,
afs_server_trace_put_slist_isort,
afs_server_trace_put_uuid_rsq,
afs_server_trace_update,
};
+enum afs_volume_trace {
+ afs_volume_trace_alloc,
+ afs_volume_trace_free,
+ afs_volume_trace_get_alloc_sbi,
+ afs_volume_trace_get_cell_insert,
+ afs_volume_trace_get_new_op,
+ afs_volume_trace_get_query_alias,
+ afs_volume_trace_put_cell_dup,
+ afs_volume_trace_put_cell_root,
+ afs_volume_trace_put_destroy_sbi,
+ afs_volume_trace_put_free_fc,
+ afs_volume_trace_put_put_op,
+ afs_volume_trace_put_query_alias,
+ afs_volume_trace_put_validate_fc,
+ afs_volume_trace_remove,
+};
+
enum afs_fs_operation {
afs_FS_FetchData = 130, /* AFS Fetch file data */
afs_FS_FetchACL = 131, /* AFS Fetch file ACL */
@@ -108,6 +128,7 @@ enum afs_vl_operation {
afs_VL_GetEntryByNameU = 527, /* AFS Get Vol Entry By Name operation ID */
afs_VL_GetAddrsU = 533, /* AFS Get FS server addresses */
afs_YFSVL_GetEndpoints = 64002, /* YFS Get FS & Vol server addresses */
+ afs_YFSVL_GetCellName = 64014, /* YFS Get actual cell name */
afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */
};
@@ -140,6 +161,7 @@ enum afs_eproto_cause {
afs_eproto_bad_status,
afs_eproto_cb_count,
afs_eproto_cb_fid_count,
+ afs_eproto_cellname_len,
afs_eproto_file_type,
afs_eproto_ibulkst_cb_count,
afs_eproto_ibulkst_count,
@@ -241,19 +263,38 @@ enum afs_cb_break_reason {
EM(afs_server_trace_destroy, "DESTROY ") \
EM(afs_server_trace_free, "FREE ") \
EM(afs_server_trace_gc, "GC ") \
+ EM(afs_server_trace_get_by_addr, "GET addr ") \
EM(afs_server_trace_get_by_uuid, "GET uuid ") \
EM(afs_server_trace_get_caps, "GET caps ") \
EM(afs_server_trace_get_install, "GET inst ") \
EM(afs_server_trace_get_new_cbi, "GET cbi ") \
+ EM(afs_server_trace_get_probe, "GET probe") \
EM(afs_server_trace_give_up_cb, "giveup-cb") \
EM(afs_server_trace_put_call, "PUT call ") \
EM(afs_server_trace_put_cbi, "PUT cbi ") \
EM(afs_server_trace_put_find_rsq, "PUT f-rsq") \
+ EM(afs_server_trace_put_probe, "PUT probe") \
EM(afs_server_trace_put_slist, "PUT slist") \
EM(afs_server_trace_put_slist_isort, "PUT isort") \
EM(afs_server_trace_put_uuid_rsq, "PUT u-req") \
E_(afs_server_trace_update, "UPDATE")
+#define afs_volume_traces \
+ EM(afs_volume_trace_alloc, "ALLOC ") \
+ EM(afs_volume_trace_free, "FREE ") \
+ EM(afs_volume_trace_get_alloc_sbi, "GET sbi-alloc ") \
+ EM(afs_volume_trace_get_cell_insert, "GET cell-insrt") \
+ EM(afs_volume_trace_get_new_op, "GET op-new ") \
+ EM(afs_volume_trace_get_query_alias, "GET cell-alias") \
+ EM(afs_volume_trace_put_cell_dup, "PUT cell-dup ") \
+ EM(afs_volume_trace_put_cell_root, "PUT cell-root ") \
+ EM(afs_volume_trace_put_destroy_sbi, "PUT sbi-destry") \
+ EM(afs_volume_trace_put_free_fc, "PUT fc-free ") \
+ EM(afs_volume_trace_put_put_op, "PUT op-put ") \
+ EM(afs_volume_trace_put_query_alias, "PUT cell-alias") \
+ EM(afs_volume_trace_put_validate_fc, "PUT fc-validat") \
+ E_(afs_volume_trace_remove, "REMOVE ")
+
#define afs_fs_operations \
EM(afs_FS_FetchData, "FS.FetchData") \
EM(afs_FS_FetchStatus, "FS.FetchStatus") \
@@ -310,6 +351,7 @@ enum afs_cb_break_reason {
EM(afs_VL_GetEntryByNameU, "VL.GetEntryByNameU") \
EM(afs_VL_GetAddrsU, "VL.GetAddrsU") \
EM(afs_YFSVL_GetEndpoints, "YFSVL.GetEndpoints") \
+ EM(afs_YFSVL_GetCellName, "YFSVL.GetCellName") \
E_(afs_VL_GetCapabilities, "VL.GetCapabilities")
#define afs_edit_dir_ops \
@@ -339,6 +381,7 @@ enum afs_cb_break_reason {
EM(afs_eproto_bad_status, "BadStatus") \
EM(afs_eproto_cb_count, "CbCount") \
EM(afs_eproto_cb_fid_count, "CbFidCount") \
+ EM(afs_eproto_cellname_len, "CellNameLen") \
EM(afs_eproto_file_type, "FileTYpe") \
EM(afs_eproto_ibulkst_cb_count, "IBS.CbCount") \
EM(afs_eproto_ibulkst_count, "IBS.FidCount") \
@@ -636,7 +679,7 @@ TRACE_EVENT(afs_make_fs_calli,
TRACE_EVENT(afs_make_fs_call1,
TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
- const char *name),
+ const struct qstr *name),
TP_ARGS(call, fid, name),
@@ -648,8 +691,7 @@ TRACE_EVENT(afs_make_fs_call1,
),
TP_fast_assign(
- int __len = strlen(name);
- __len = min(__len, 23);
+ unsigned int __len = min_t(unsigned int, name->len, 23);
__entry->call = call->debug_id;
__entry->op = call->operation_ID;
if (fid) {
@@ -659,7 +701,7 @@ TRACE_EVENT(afs_make_fs_call1,
__entry->fid.vnode = 0;
__entry->fid.unique = 0;
}
- memcpy(__entry->name, name, __len);
+ memcpy(__entry->name, name->name, __len);
__entry->name[__len] = 0;
),
@@ -674,7 +716,7 @@ TRACE_EVENT(afs_make_fs_call1,
TRACE_EVENT(afs_make_fs_call2,
TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
- const char *name, const char *name2),
+ const struct qstr *name, const struct qstr *name2),
TP_ARGS(call, fid, name, name2),
@@ -687,10 +729,8 @@ TRACE_EVENT(afs_make_fs_call2,
),
TP_fast_assign(
- int __len = strlen(name);
- int __len2 = strlen(name2);
- __len = min(__len, 23);
- __len2 = min(__len2, 23);
+ unsigned int __len = min_t(unsigned int, name->len, 23);
+ unsigned int __len2 = min_t(unsigned int, name2->len, 23);
__entry->call = call->debug_id;
__entry->op = call->operation_ID;
if (fid) {
@@ -700,9 +740,9 @@ TRACE_EVENT(afs_make_fs_call2,
__entry->fid.vnode = 0;
__entry->fid.unique = 0;
}
- memcpy(__entry->name, name, __len);
+ memcpy(__entry->name, name->name, __len);
__entry->name[__len] = 0;
- memcpy(__entry->name2, name2, __len2);
+ memcpy(__entry->name2, name2->name, __len2);
__entry->name2[__len2] = 0;
),
@@ -988,24 +1028,22 @@ TRACE_EVENT(afs_edit_dir,
);
TRACE_EVENT(afs_protocol_error,
- TP_PROTO(struct afs_call *call, int error, enum afs_eproto_cause cause),
+ TP_PROTO(struct afs_call *call, enum afs_eproto_cause cause),
- TP_ARGS(call, error, cause),
+ TP_ARGS(call, cause),
TP_STRUCT__entry(
__field(unsigned int, call )
- __field(int, error )
__field(enum afs_eproto_cause, cause )
),
TP_fast_assign(
__entry->call = call ? call->debug_id : 0;
- __entry->error = error;
__entry->cause = cause;
),
- TP_printk("c=%08x r=%d %s",
- __entry->call, __entry->error,
+ TP_printk("c=%08x %s",
+ __entry->call,
__print_symbolic(__entry->cause, afs_eproto_causes))
);
@@ -1271,26 +1309,53 @@ TRACE_EVENT(afs_cb_miss,
);
TRACE_EVENT(afs_server,
- TP_PROTO(struct afs_server *server, int usage, enum afs_server_trace reason),
+ TP_PROTO(struct afs_server *server, int ref, int active,
+ enum afs_server_trace reason),
- TP_ARGS(server, usage, reason),
+ TP_ARGS(server, ref, active, reason),
TP_STRUCT__entry(
__field(unsigned int, server )
- __field(int, usage )
+ __field(int, ref )
+ __field(int, active )
__field(int, reason )
),
TP_fast_assign(
__entry->server = server->debug_id;
- __entry->usage = usage;
+ __entry->ref = ref;
+ __entry->active = active;
__entry->reason = reason;
),
- TP_printk("s=%08x %s u=%d",
+ TP_printk("s=%08x %s u=%d a=%d",
__entry->server,
__print_symbolic(__entry->reason, afs_server_traces),
- __entry->usage)
+ __entry->ref,
+ __entry->active)
+ );
+
+TRACE_EVENT(afs_volume,
+ TP_PROTO(afs_volid_t vid, int ref, enum afs_volume_trace reason),
+
+ TP_ARGS(vid, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(afs_volid_t, vid )
+ __field(int, ref )
+ __field(enum afs_volume_trace, reason )
+ ),
+
+ TP_fast_assign(
+ __entry->vid = vid;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("V=%llx %s u=%d",
+ __entry->vid,
+ __print_symbolic(__entry->reason, afs_volume_traces),
+ __entry->ref)
);
#endif /* _TRACE_AFS_H */
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 81b43f5bdf23..1257f26bb887 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -261,9 +261,9 @@ TRACE_EVENT(block_bio_bounce,
*/
TRACE_EVENT(block_bio_complete,
- TP_PROTO(struct request_queue *q, struct bio *bio, int error),
+ TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio, error),
+ TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -277,7 +277,7 @@ TRACE_EVENT(block_bio_complete,
__entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio);
- __entry->error = error;
+ __entry->error = blk_status_to_errno(bio->bi_status);
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
),
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 19c87661eeec..cc41d692ae8e 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -35,7 +35,8 @@ struct partial_cluster;
{ EXT4_MB_DELALLOC_RESERVED, "DELALLOC_RESV" }, \
{ EXT4_MB_STREAM_ALLOC, "STREAM_ALLOC" }, \
{ EXT4_MB_USE_ROOT_BLOCKS, "USE_ROOT_BLKS" }, \
- { EXT4_MB_USE_RESERVED, "USE_RESV" })
+ { EXT4_MB_USE_RESERVED, "USE_RESV" }, \
+ { EXT4_MB_STRICT_CHECK, "STRICT_CHECK" })
#define show_map_flags(flags) __print_flags(flags, "|", \
{ EXT4_GET_BLOCKS_CREATE, "CREATE" }, \
@@ -45,8 +46,10 @@ struct partial_cluster;
{ EXT4_GET_BLOCKS_CONVERT, "CONVERT" }, \
{ EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \
{ EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \
- { EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \
- { EXT4_GET_BLOCKS_ZERO, "ZERO" })
+ { EXT4_GET_BLOCKS_CONVERT_UNWRITTEN, "CONVERT_UNWRITTEN" }, \
+ { EXT4_GET_BLOCKS_ZERO, "ZERO" }, \
+ { EXT4_GET_BLOCKS_IO_SUBMIT, "IO_SUBMIT" }, \
+ { EXT4_EX_NOCACHE, "EX_NOCACHE" })
/*
* __print_flags() requires that all enum values be wrapped in the
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 24c2557c37f0..8639ab962a71 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -50,6 +50,7 @@ TRACE_DEFINE_ENUM(CP_RECOVERY);
TRACE_DEFINE_ENUM(CP_DISCARD);
TRACE_DEFINE_ENUM(CP_TRIMMED);
TRACE_DEFINE_ENUM(CP_PAUSE);
+TRACE_DEFINE_ENUM(CP_RESIZE);
#define show_block_type(type) \
__print_symbolic(type, \
@@ -126,7 +127,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
{ CP_RECOVERY, "Recovery" }, \
{ CP_DISCARD, "Discard" }, \
{ CP_PAUSE, "Pause" }, \
- { CP_TRIMMED, "Trimmed" })
+ { CP_TRIMMED, "Trimmed" }, \
+ { CP_RESIZE, "Resize" })
#define show_fsync_cpreason(type) \
__print_symbolic(type, \
@@ -154,7 +156,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
__print_symbolic(type, \
{ COMPRESS_LZO, "LZO" }, \
{ COMPRESS_LZ4, "LZ4" }, \
- { COMPRESS_ZSTD, "ZSTD" })
+ { COMPRESS_ZSTD, "ZSTD" }, \
+ { COMPRESS_LZORLE, "LZO-RLE" })
struct f2fs_sb_info;
struct f2fs_io_info;
@@ -1812,6 +1815,82 @@ DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end,
TP_ARGS(inode, cluster_idx, compressed_size, ret)
);
+TRACE_EVENT(f2fs_iostat,
+
+ TP_PROTO(struct f2fs_sb_info *sbi, unsigned long long *iostat),
+
+ TP_ARGS(sbi, iostat),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned long long, app_dio)
+ __field(unsigned long long, app_bio)
+ __field(unsigned long long, app_wio)
+ __field(unsigned long long, app_mio)
+ __field(unsigned long long, fs_dio)
+ __field(unsigned long long, fs_nio)
+ __field(unsigned long long, fs_mio)
+ __field(unsigned long long, fs_gc_dio)
+ __field(unsigned long long, fs_gc_nio)
+ __field(unsigned long long, fs_cp_dio)
+ __field(unsigned long long, fs_cp_nio)
+ __field(unsigned long long, fs_cp_mio)
+ __field(unsigned long long, app_drio)
+ __field(unsigned long long, app_brio)
+ __field(unsigned long long, app_rio)
+ __field(unsigned long long, app_mrio)
+ __field(unsigned long long, fs_drio)
+ __field(unsigned long long, fs_gdrio)
+ __field(unsigned long long, fs_cdrio)
+ __field(unsigned long long, fs_nrio)
+ __field(unsigned long long, fs_mrio)
+ __field(unsigned long long, fs_discard)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sbi->sb->s_dev;
+ __entry->app_dio = iostat[APP_DIRECT_IO];
+ __entry->app_bio = iostat[APP_BUFFERED_IO];
+ __entry->app_wio = iostat[APP_WRITE_IO];
+ __entry->app_mio = iostat[APP_MAPPED_IO];
+ __entry->fs_dio = iostat[FS_DATA_IO];
+ __entry->fs_nio = iostat[FS_NODE_IO];
+ __entry->fs_mio = iostat[FS_META_IO];
+ __entry->fs_gc_dio = iostat[FS_GC_DATA_IO];
+ __entry->fs_gc_nio = iostat[FS_GC_NODE_IO];
+ __entry->fs_cp_dio = iostat[FS_CP_DATA_IO];
+ __entry->fs_cp_nio = iostat[FS_CP_NODE_IO];
+ __entry->fs_cp_mio = iostat[FS_CP_META_IO];
+ __entry->app_drio = iostat[APP_DIRECT_READ_IO];
+ __entry->app_brio = iostat[APP_BUFFERED_READ_IO];
+ __entry->app_rio = iostat[APP_READ_IO];
+ __entry->app_mrio = iostat[APP_MAPPED_READ_IO];
+ __entry->fs_drio = iostat[FS_DATA_READ_IO];
+ __entry->fs_gdrio = iostat[FS_GDATA_READ_IO];
+ __entry->fs_cdrio = iostat[FS_CDATA_READ_IO];
+ __entry->fs_nrio = iostat[FS_NODE_READ_IO];
+ __entry->fs_mrio = iostat[FS_META_READ_IO];
+ __entry->fs_discard = iostat[FS_DISCARD];
+ ),
+
+ TP_printk("dev = (%d,%d), "
+ "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
+ "fs [data=%llu, node=%llu, meta=%llu, discard=%llu], "
+ "gc [data=%llu, node=%llu], "
+ "cp [data=%llu, node=%llu, meta=%llu], "
+ "app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
+ "fs [data=%llu, (gc_data=%llu, compr_data=%llu), "
+ "node=%llu, meta=%llu]",
+ show_dev(__entry->dev), __entry->app_wio, __entry->app_dio,
+ __entry->app_bio, __entry->app_mio, __entry->fs_dio,
+ __entry->fs_nio, __entry->fs_mio, __entry->fs_discard,
+ __entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio,
+ __entry->fs_cp_nio, __entry->fs_cp_mio,
+ __entry->app_rio, __entry->app_drio, __entry->app_brio,
+ __entry->app_mrio, __entry->fs_drio, __entry->fs_gdrio,
+ __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h
index b71f680968eb..5857cf682ee7 100644
--- a/include/trace/events/qla.h
+++ b/include/trace/events/qla.h
@@ -9,6 +9,11 @@
#define QLA_MSG_MAX 256
+#pragma GCC diagnostic push
+#ifndef __clang__
+#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
+#endif
+
DECLARE_EVENT_CLASS(qla_log_event,
TP_PROTO(const char *buf,
struct va_format *vaf),
@@ -27,6 +32,8 @@ DECLARE_EVENT_CLASS(qla_log_event,
TP_printk("%s %s", __get_str(buf), __get_str(msg))
);
+#pragma GCC diagnostic pop
+
DEFINE_EVENT(qla_log_event, ql_dbg_log,
TP_PROTO(const char *buf, struct va_format *vaf),
TP_ARGS(buf, vaf)
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index 32d88c4fb063..b9b51a4b1db1 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -17,6 +17,16 @@
** GSS-API related trace events
**/
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_NONE);
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_INTEGRITY);
+TRACE_DEFINE_ENUM(RPC_GSS_SVC_PRIVACY);
+
+#define show_gss_service(x) \
+ __print_symbolic(x, \
+ { RPC_GSS_SVC_NONE, "none" }, \
+ { RPC_GSS_SVC_INTEGRITY, "integrity" }, \
+ { RPC_GSS_SVC_PRIVACY, "privacy" })
+
TRACE_DEFINE_ENUM(GSS_S_BAD_MECH);
TRACE_DEFINE_ENUM(GSS_S_BAD_NAME);
TRACE_DEFINE_ENUM(GSS_S_BAD_NAMETYPE);
@@ -126,6 +136,40 @@ DEFINE_GSSAPI_EVENT(verify_mic);
DEFINE_GSSAPI_EVENT(wrap);
DEFINE_GSSAPI_EVENT(unwrap);
+DECLARE_EVENT_CLASS(rpcgss_ctx_class,
+ TP_PROTO(
+ const struct gss_cred *gc
+ ),
+
+ TP_ARGS(gc),
+
+ TP_STRUCT__entry(
+ __field(const void *, cred)
+ __field(unsigned long, service)
+ __string(principal, gc->gc_principal)
+ ),
+
+ TP_fast_assign(
+ __entry->cred = gc;
+ __entry->service = gc->gc_service;
+ __assign_str(principal, gc->gc_principal)
+ ),
+
+ TP_printk("cred=%p service=%s principal='%s'",
+ __entry->cred, show_gss_service(__entry->service),
+ __get_str(principal))
+);
+
+#define DEFINE_CTX_EVENT(name) \
+ DEFINE_EVENT(rpcgss_ctx_class, rpcgss_ctx_##name, \
+ TP_PROTO( \
+ const struct gss_cred *gc \
+ ), \
+ TP_ARGS(gc))
+
+DEFINE_CTX_EVENT(init);
+DEFINE_CTX_EVENT(destroy);
+
TRACE_EVENT(rpcgss_svc_accept_upcall,
TP_PROTO(
__be32 xid,
@@ -291,6 +335,40 @@ TRACE_EVENT(rpcgss_need_reencode,
__entry->ret ? "" : "un")
);
+TRACE_EVENT(rpcgss_update_slack,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpc_auth *auth
+ ),
+
+ TP_ARGS(task, auth),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(const void *, auth)
+ __field(unsigned int, rslack)
+ __field(unsigned int, ralign)
+ __field(unsigned int, verfsize)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
+ __entry->auth = auth;
+ __entry->rslack = auth->au_rslack;
+ __entry->ralign = auth->au_ralign;
+ __entry->verfsize = auth->au_verfsize;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x auth=%p rslack=%u ralign=%u verfsize=%u\n",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->auth, __entry->rslack, __entry->ralign,
+ __entry->verfsize)
+);
+
DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class,
TP_PROTO(
__be32 xid,
@@ -371,6 +449,7 @@ TRACE_EVENT(rpcgss_upcall_result,
TRACE_EVENT(rpcgss_context,
TP_PROTO(
+ u32 window_size,
unsigned long expiry,
unsigned long now,
unsigned int timeout,
@@ -378,12 +457,13 @@ TRACE_EVENT(rpcgss_context,
const u8 *data
),
- TP_ARGS(expiry, now, timeout, len, data),
+ TP_ARGS(window_size, expiry, now, timeout, len, data),
TP_STRUCT__entry(
__field(unsigned long, expiry)
__field(unsigned long, now)
__field(unsigned int, timeout)
+ __field(u32, window_size)
__field(int, len)
__string(acceptor, data)
),
@@ -392,13 +472,14 @@ TRACE_EVENT(rpcgss_context,
__entry->expiry = expiry;
__entry->now = now;
__entry->timeout = timeout;
+ __entry->window_size = window_size;
__entry->len = len;
strncpy(__get_str(acceptor), data, len);
),
- TP_printk("gc_expiry=%lu now=%lu timeout=%u acceptor=%.*s",
- __entry->expiry, __entry->now, __entry->timeout,
- __entry->len, __get_str(acceptor))
+ TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
+ __entry->window_size, __entry->expiry, __entry->now,
+ __entry->timeout, __entry->len, __get_str(acceptor))
);
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 132c3c778a43..0f05a6e2b9cb 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -380,12 +380,8 @@ TRACE_EVENT(xprtrdma_inline_thresh,
DEFINE_CONN_EVENT(connect);
DEFINE_CONN_EVENT(disconnect);
-DEFINE_CONN_EVENT(flush_dct);
-DEFINE_RXPRT_EVENT(xprtrdma_create);
-DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
-DEFINE_RXPRT_EVENT(xprtrdma_op_close);
DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
TRACE_EVENT(xprtrdma_op_connect,
@@ -1279,38 +1275,42 @@ TRACE_EVENT(xprtrdma_leaked_rep,
** Server-side RPC/RDMA events
**/
-DECLARE_EVENT_CLASS(svcrdma_xprt_event,
+DECLARE_EVENT_CLASS(svcrdma_accept_class,
TP_PROTO(
- const struct svc_xprt *xprt
+ const struct svcxprt_rdma *rdma,
+ long status
),
- TP_ARGS(xprt),
+ TP_ARGS(rdma, status),
TP_STRUCT__entry(
- __field(const void *, xprt)
- __string(addr, xprt->xpt_remotebuf)
+ __field(long, status)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = xprt;
- __assign_str(addr, xprt->xpt_remotebuf);
+ __entry->status = status;
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s",
- __entry->xprt, __get_str(addr)
+ TP_printk("addr=%s status=%ld",
+ __get_str(addr), __entry->status
)
);
-#define DEFINE_XPRT_EVENT(name) \
- DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \
- TP_PROTO( \
- const struct svc_xprt *xprt \
- ), \
- TP_ARGS(xprt))
+#define DEFINE_ACCEPT_EVENT(name) \
+ DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
+ TP_PROTO( \
+ const struct svcxprt_rdma *rdma, \
+ long status \
+ ), \
+ TP_ARGS(rdma, status))
-DEFINE_XPRT_EVENT(accept);
-DEFINE_XPRT_EVENT(fail);
-DEFINE_XPRT_EVENT(free);
+DEFINE_ACCEPT_EVENT(pd);
+DEFINE_ACCEPT_EVENT(qp);
+DEFINE_ACCEPT_EVENT(fabric);
+DEFINE_ACCEPT_EVENT(initdepth);
+DEFINE_ACCEPT_EVENT(accept);
TRACE_DEFINE_ENUM(RDMA_MSG);
TRACE_DEFINE_ENUM(RDMA_NOMSG);
@@ -1355,7 +1355,7 @@ TRACE_EVENT(svcrdma_decode_rqst,
show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
);
-TRACE_EVENT(svcrdma_decode_short,
+TRACE_EVENT(svcrdma_decode_short_err,
TP_PROTO(
unsigned int hdrlen
),
@@ -1399,7 +1399,8 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event,
);
#define DEFINE_BADREQ_EVENT(name) \
- DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
+ DEFINE_EVENT(svcrdma_badreq_event, \
+ svcrdma_decode_##name##_err, \
TP_PROTO( \
__be32 *p \
), \
@@ -1583,28 +1584,117 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
DEFINE_SVC_DMA_EVENT(dma_map_page);
DEFINE_SVC_DMA_EVENT(dma_unmap_page);
-TRACE_EVENT(svcrdma_dma_map_rwctx,
+TRACE_EVENT(svcrdma_dma_map_rw_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ unsigned int nents,
int status
),
- TP_ARGS(rdma, status),
+ TP_ARGS(rdma, nents, status),
TP_STRUCT__entry(
__field(int, status)
+ __field(unsigned int, nents)
__string(device, rdma->sc_cm_id->device->name)
__string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
__entry->status = status;
+ __entry->nents = nents;
+ __assign_str(device, rdma->sc_cm_id->device->name);
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s device=%s nents=%u status=%d",
+ __get_str(addr), __get_str(device), __entry->nents,
+ __entry->status
+ )
+);
+
+TRACE_EVENT(svcrdma_no_rwctx_err,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ unsigned int num_sges
+ ),
+
+ TP_ARGS(rdma, num_sges),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, num_sges)
+ __string(device, rdma->sc_cm_id->device->name)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->num_sges = num_sges;
+ __assign_str(device, rdma->sc_cm_id->device->name);
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s device=%s num_sges=%d",
+ __get_str(addr), __get_str(device), __entry->num_sges
+ )
+);
+
+TRACE_EVENT(svcrdma_page_overrun_err,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ const struct svc_rqst *rqst,
+ unsigned int pageno
+ ),
+
+ TP_ARGS(rdma, rqst, pageno),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, pageno)
+ __field(u32, xid)
+ __string(device, rdma->sc_cm_id->device->name)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->pageno = pageno;
+ __entry->xid = __be32_to_cpu(rqst->rq_xid);
+ __assign_str(device, rdma->sc_cm_id->device->name);
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
+ __get_str(device), __entry->xid, __entry->pageno
+ )
+);
+
+TRACE_EVENT(svcrdma_small_wrch_err,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ unsigned int remaining,
+ unsigned int seg_no,
+ unsigned int num_segs
+ ),
+
+ TP_ARGS(rdma, remaining, seg_no, num_segs),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, remaining)
+ __field(unsigned int, seg_no)
+ __field(unsigned int, num_segs)
+ __string(device, rdma->sc_cm_id->device->name)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->remaining = remaining;
+ __entry->seg_no = seg_no;
+ __entry->num_segs = num_segs;
__assign_str(device, rdma->sc_cm_id->device->name);
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s status=%d",
- __get_str(addr), __get_str(device), __entry->status
+ TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
+ __get_str(addr), __get_str(device), __entry->remaining,
+ __entry->seg_no, __entry->num_segs
)
);
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index ffd2215950dc..6a12935b8b14 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -14,14 +14,50 @@
#include <linux/net.h>
#include <linux/tracepoint.h>
-DECLARE_EVENT_CLASS(xdr_buf_class,
+TRACE_DEFINE_ENUM(SOCK_STREAM);
+TRACE_DEFINE_ENUM(SOCK_DGRAM);
+TRACE_DEFINE_ENUM(SOCK_RAW);
+TRACE_DEFINE_ENUM(SOCK_RDM);
+TRACE_DEFINE_ENUM(SOCK_SEQPACKET);
+TRACE_DEFINE_ENUM(SOCK_DCCP);
+TRACE_DEFINE_ENUM(SOCK_PACKET);
+
+#define show_socket_type(type) \
+ __print_symbolic(type, \
+ { SOCK_STREAM, "STREAM" }, \
+ { SOCK_DGRAM, "DGRAM" }, \
+ { SOCK_RAW, "RAW" }, \
+ { SOCK_RDM, "RDM" }, \
+ { SOCK_SEQPACKET, "SEQPACKET" }, \
+ { SOCK_DCCP, "DCCP" }, \
+ { SOCK_PACKET, "PACKET" })
+
+/* This list is known to be incomplete, add new enums as needed. */
+TRACE_DEFINE_ENUM(AF_UNSPEC);
+TRACE_DEFINE_ENUM(AF_UNIX);
+TRACE_DEFINE_ENUM(AF_LOCAL);
+TRACE_DEFINE_ENUM(AF_INET);
+TRACE_DEFINE_ENUM(AF_INET6);
+
+#define rpc_show_address_family(family) \
+ __print_symbolic(family, \
+ { AF_UNSPEC, "AF_UNSPEC" }, \
+ { AF_UNIX, "AF_UNIX" }, \
+ { AF_LOCAL, "AF_LOCAL" }, \
+ { AF_INET, "AF_INET" }, \
+ { AF_INET6, "AF_INET6" })
+
+DECLARE_EVENT_CLASS(rpc_xdr_buf_class,
TP_PROTO(
+ const struct rpc_task *task,
const struct xdr_buf *xdr
),
- TP_ARGS(xdr),
+ TP_ARGS(task, xdr),
TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
__field(const void *, head_base)
__field(size_t, head_len)
__field(const void *, tail_base)
@@ -31,6 +67,8 @@ DECLARE_EVENT_CLASS(xdr_buf_class,
),
TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
__entry->head_base = xdr->head[0].iov_base;
__entry->head_len = xdr->head[0].iov_len;
__entry->tail_base = xdr->tail[0].iov_base;
@@ -39,23 +77,137 @@ DECLARE_EVENT_CLASS(xdr_buf_class,
__entry->msg_len = xdr->len;
),
- TP_printk("head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ TP_printk("task:%u@%u head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ __entry->task_id, __entry->client_id,
__entry->head_base, __entry->head_len, __entry->page_len,
__entry->tail_base, __entry->tail_len, __entry->msg_len
)
);
-#define DEFINE_XDRBUF_EVENT(name) \
- DEFINE_EVENT(xdr_buf_class, name, \
+#define DEFINE_RPCXDRBUF_EVENT(name) \
+ DEFINE_EVENT(rpc_xdr_buf_class, \
+ rpc_xdr_##name, \
TP_PROTO( \
+ const struct rpc_task *task, \
const struct xdr_buf *xdr \
), \
- TP_ARGS(xdr))
+ TP_ARGS(task, xdr))
+
+DEFINE_RPCXDRBUF_EVENT(sendto);
+DEFINE_RPCXDRBUF_EVENT(recvfrom);
+DEFINE_RPCXDRBUF_EVENT(reply_pages);
+
+
+DECLARE_EVENT_CLASS(rpc_clnt_class,
+ TP_PROTO(
+ const struct rpc_clnt *clnt
+ ),
+
+ TP_ARGS(clnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, client_id)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = clnt->cl_clid;
+ ),
+
+ TP_printk("clid=%u", __entry->client_id)
+);
+
+#define DEFINE_RPC_CLNT_EVENT(name) \
+ DEFINE_EVENT(rpc_clnt_class, \
+ rpc_clnt_##name, \
+ TP_PROTO( \
+ const struct rpc_clnt *clnt \
+ ), \
+ TP_ARGS(clnt))
+
+DEFINE_RPC_CLNT_EVENT(free);
+DEFINE_RPC_CLNT_EVENT(killall);
+DEFINE_RPC_CLNT_EVENT(shutdown);
+DEFINE_RPC_CLNT_EVENT(release);
+DEFINE_RPC_CLNT_EVENT(replace_xprt);
+DEFINE_RPC_CLNT_EVENT(replace_xprt_err);
+
+TRACE_EVENT(rpc_clnt_new,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ const struct rpc_xprt *xprt,
+ const char *program,
+ const char *server
+ ),
+
+ TP_ARGS(clnt, xprt, program, server),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, client_id)
+ __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+ __string(program, program)
+ __string(server, server)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = clnt->cl_clid;
+ __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ __assign_str(program, program)
+ __assign_str(server, server)
+ ),
+
+ TP_printk("client=%u peer=[%s]:%s program=%s server=%s",
+ __entry->client_id, __get_str(addr), __get_str(port),
+ __get_str(program), __get_str(server))
+);
+
+TRACE_EVENT(rpc_clnt_new_err,
+ TP_PROTO(
+ const char *program,
+ const char *server,
+ int error
+ ),
+
+ TP_ARGS(program, server, error),
+
+ TP_STRUCT__entry(
+ __field(int, error)
+ __string(program, program)
+ __string(server, server)
+ ),
+
+ TP_fast_assign(
+ __entry->error = error;
+ __assign_str(program, program)
+ __assign_str(server, server)
+ ),
+
+ TP_printk("program=%s server=%s error=%d",
+ __get_str(program), __get_str(server), __entry->error)
+);
+
+TRACE_EVENT(rpc_clnt_clone_err,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ int error
+ ),
+
+ TP_ARGS(clnt, error),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, client_id)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = clnt->cl_clid;
+ __entry->error = error;
+ ),
+
+ TP_printk("client=%u error=%d", __entry->client_id, __entry->error)
+);
-DEFINE_XDRBUF_EVENT(xprt_sendto);
-DEFINE_XDRBUF_EVENT(xprt_recvfrom);
-DEFINE_XDRBUF_EVENT(svc_recvfrom);
-DEFINE_XDRBUF_EVENT(svc_sendto);
TRACE_DEFINE_ENUM(RPC_AUTH_OK);
TRACE_DEFINE_ENUM(RPC_AUTH_BADCRED);
@@ -142,29 +294,35 @@ TRACE_EVENT(rpc_request,
TRACE_DEFINE_ENUM(RPC_TASK_ASYNC);
TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER);
+TRACE_DEFINE_ENUM(RPC_TASK_NULLCREDS);
TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN);
TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS);
TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC);
+TRACE_DEFINE_ENUM(RPC_TASK_NO_ROUND_ROBIN);
TRACE_DEFINE_ENUM(RPC_TASK_SOFT);
TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN);
TRACE_DEFINE_ENUM(RPC_TASK_SENT);
TRACE_DEFINE_ENUM(RPC_TASK_TIMEOUT);
TRACE_DEFINE_ENUM(RPC_TASK_NOCONNECT);
TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT);
+TRACE_DEFINE_ENUM(RPC_TASK_CRED_NOREF);
#define rpc_show_task_flags(flags) \
__print_flags(flags, "|", \
{ RPC_TASK_ASYNC, "ASYNC" }, \
{ RPC_TASK_SWAPPER, "SWAPPER" }, \
+ { RPC_TASK_NULLCREDS, "NULLCREDS" }, \
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
{ RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \
+ { RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \
{ RPC_TASK_SOFT, "SOFT" }, \
{ RPC_TASK_SOFTCONN, "SOFTCONN" }, \
{ RPC_TASK_SENT, "SENT" }, \
{ RPC_TASK_TIMEOUT, "TIMEOUT" }, \
{ RPC_TASK_NOCONNECT, "NOCONNECT" }, \
- { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" })
+ { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" }, \
+ { RPC_TASK_CRED_NOREF, "CRED_NOREF" })
TRACE_DEFINE_ENUM(RPC_TASK_RUNNING);
TRACE_DEFINE_ENUM(RPC_TASK_QUEUED);
@@ -359,6 +517,34 @@ DEFINE_RPC_REPLY_EVENT(stale_creds);
DEFINE_RPC_REPLY_EVENT(bad_creds);
DEFINE_RPC_REPLY_EVENT(auth_tooweak);
+TRACE_EVENT(rpc_call_rpcerror,
+ TP_PROTO(
+ const struct rpc_task *task,
+ int tk_status,
+ int rpc_status
+ ),
+
+ TP_ARGS(task, tk_status, rpc_status),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(int, tk_status)
+ __field(int, rpc_status)
+ ),
+
+ TP_fast_assign(
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->task_id = task->tk_pid;
+ __entry->tk_status = tk_status;
+ __entry->rpc_status = rpc_status;
+ ),
+
+ TP_printk("task:%u@%u tk_status=%d rpc_status=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->tk_status, __entry->rpc_status)
+);
+
TRACE_EVENT(rpc_stats_latency,
TP_PROTO(
@@ -526,43 +712,6 @@ TRACE_EVENT(rpc_xdr_alignment,
)
);
-TRACE_EVENT(rpc_reply_pages,
- TP_PROTO(
- const struct rpc_rqst *req
- ),
-
- TP_ARGS(req),
-
- TP_STRUCT__entry(
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(const void *, head_base)
- __field(size_t, head_len)
- __field(const void *, tail_base)
- __field(size_t, tail_len)
- __field(unsigned int, page_len)
- ),
-
- TP_fast_assign(
- __entry->task_id = req->rq_task->tk_pid;
- __entry->client_id = req->rq_task->tk_client->cl_clid;
-
- __entry->head_base = req->rq_rcv_buf.head[0].iov_base;
- __entry->head_len = req->rq_rcv_buf.head[0].iov_len;
- __entry->page_len = req->rq_rcv_buf.page_len;
- __entry->tail_base = req->rq_rcv_buf.tail[0].iov_base;
- __entry->tail_len = req->rq_rcv_buf.tail[0].iov_len;
- ),
-
- TP_printk(
- "task:%u@%u xdr=[%p,%zu]/%u/[%p,%zu]\n",
- __entry->task_id, __entry->client_id,
- __entry->head_base, __entry->head_len,
- __entry->page_len,
- __entry->tail_base, __entry->tail_len
- )
-);
-
/*
* First define the enums in the below macros to be exported to userspace
* via TRACE_DEFINE_ENUM().
@@ -575,9 +724,9 @@ TRACE_EVENT(rpc_reply_pages,
#define RPC_SHOW_SOCKET \
EM( SS_FREE, "FREE" ) \
EM( SS_UNCONNECTED, "UNCONNECTED" ) \
- EM( SS_CONNECTING, "CONNECTING," ) \
- EM( SS_CONNECTED, "CONNECTED," ) \
- EMe(SS_DISCONNECTING, "DISCONNECTING" )
+ EM( SS_CONNECTING, "CONNECTING" ) \
+ EM( SS_CONNECTED, "CONNECTED" ) \
+ EMe( SS_DISCONNECTING, "DISCONNECTING" )
#define rpc_show_socket_state(state) \
__print_symbolic(state, RPC_SHOW_SOCKET)
@@ -719,6 +868,69 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_close);
DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown);
+TRACE_DEFINE_ENUM(XPRT_LOCKED);
+TRACE_DEFINE_ENUM(XPRT_CONNECTED);
+TRACE_DEFINE_ENUM(XPRT_CONNECTING);
+TRACE_DEFINE_ENUM(XPRT_CLOSE_WAIT);
+TRACE_DEFINE_ENUM(XPRT_BOUND);
+TRACE_DEFINE_ENUM(XPRT_BINDING);
+TRACE_DEFINE_ENUM(XPRT_CLOSING);
+TRACE_DEFINE_ENUM(XPRT_CONGESTED);
+TRACE_DEFINE_ENUM(XPRT_CWND_WAIT);
+TRACE_DEFINE_ENUM(XPRT_WRITE_SPACE);
+
+#define rpc_show_xprt_state(x) \
+ __print_flags(x, "|", \
+ { (1UL << XPRT_LOCKED), "LOCKED"}, \
+ { (1UL << XPRT_CONNECTED), "CONNECTED"}, \
+ { (1UL << XPRT_CONNECTING), "CONNECTING"}, \
+ { (1UL << XPRT_CLOSE_WAIT), "CLOSE_WAIT"}, \
+ { (1UL << XPRT_BOUND), "BOUND"}, \
+ { (1UL << XPRT_BINDING), "BINDING"}, \
+ { (1UL << XPRT_CLOSING), "CLOSING"}, \
+ { (1UL << XPRT_CONGESTED), "CONGESTED"}, \
+ { (1UL << XPRT_CWND_WAIT), "CWND_WAIT"}, \
+ { (1UL << XPRT_WRITE_SPACE), "WRITE_SPACE"})
+
+DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
+ TP_PROTO(
+ const struct rpc_xprt *xprt
+ ),
+
+ TP_ARGS(xprt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
+ __string(port, xprt->address_strings[RPC_DISPLAY_PORT])
+ ),
+
+ TP_fast_assign(
+ __entry->state = xprt->state;
+ __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
+ ),
+
+ TP_printk("peer=[%s]:%s state=%s",
+ __get_str(addr), __get_str(port),
+ rpc_show_xprt_state(__entry->state))
+);
+
+#define DEFINE_RPC_XPRT_LIFETIME_EVENT(name) \
+ DEFINE_EVENT(rpc_xprt_lifetime_class, \
+ xprt_##name, \
+ TP_PROTO( \
+ const struct rpc_xprt *xprt \
+ ), \
+ TP_ARGS(xprt))
+
+DEFINE_RPC_XPRT_LIFETIME_EVENT(create);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup);
+DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
+
DECLARE_EVENT_CLASS(rpc_xprt_event,
TP_PROTO(
const struct rpc_xprt *xprt,
@@ -990,6 +1202,54 @@ TRACE_EVENT(xs_stream_read_request,
__entry->copied, __entry->reclen, __entry->offset)
);
+
+DECLARE_EVENT_CLASS(svc_xdr_buf_class,
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ const struct xdr_buf *xdr
+ ),
+
+ TP_ARGS(rqst, xdr),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(const void *, head_base)
+ __field(size_t, head_len)
+ __field(const void *, tail_base)
+ __field(size_t, tail_len)
+ __field(unsigned int, page_len)
+ __field(unsigned int, msg_len)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->head_base = xdr->head[0].iov_base;
+ __entry->head_len = xdr->head[0].iov_len;
+ __entry->tail_base = xdr->tail[0].iov_base;
+ __entry->tail_len = xdr->tail[0].iov_len;
+ __entry->page_len = xdr->page_len;
+ __entry->msg_len = xdr->len;
+ ),
+
+ TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+ __entry->xid,
+ __entry->head_base, __entry->head_len, __entry->page_len,
+ __entry->tail_base, __entry->tail_len, __entry->msg_len
+ )
+);
+
+#define DEFINE_SVCXDRBUF_EVENT(name) \
+ DEFINE_EVENT(svc_xdr_buf_class, \
+ svc_xdr_##name, \
+ TP_PROTO( \
+ const struct svc_rqst *rqst, \
+ const struct xdr_buf *xdr \
+ ), \
+ TP_ARGS(rqst, xdr))
+
+DEFINE_SVCXDRBUF_EVENT(recvfrom);
+DEFINE_SVCXDRBUF_EVENT(sendto);
+
#define show_rqstp_flags(flags) \
__print_flags(flags, "|", \
{ (1UL << RQ_SECURE), "RQ_SECURE"}, \
@@ -1024,6 +1284,17 @@ TRACE_EVENT(svc_recv,
show_rqstp_flags(__entry->flags))
);
+TRACE_DEFINE_ENUM(SVC_GARBAGE);
+TRACE_DEFINE_ENUM(SVC_SYSERR);
+TRACE_DEFINE_ENUM(SVC_VALID);
+TRACE_DEFINE_ENUM(SVC_NEGATIVE);
+TRACE_DEFINE_ENUM(SVC_OK);
+TRACE_DEFINE_ENUM(SVC_DROP);
+TRACE_DEFINE_ENUM(SVC_CLOSE);
+TRACE_DEFINE_ENUM(SVC_DENIED);
+TRACE_DEFINE_ENUM(SVC_PENDING);
+TRACE_DEFINE_ENUM(SVC_COMPLETE);
+
#define svc_show_status(status) \
__print_symbolic(status, \
{ SVC_GARBAGE, "SVC_GARBAGE" }, \
@@ -1167,28 +1438,54 @@ DEFINE_EVENT(svc_rqst_status, svc_send,
{ (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \
{ (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"})
+TRACE_EVENT(svc_xprt_create_err,
+ TP_PROTO(
+ const char *program,
+ const char *protocol,
+ struct sockaddr *sap,
+ const struct svc_xprt *xprt
+ ),
+
+ TP_ARGS(program, protocol, sap, xprt),
+
+ TP_STRUCT__entry(
+ __field(long, error)
+ __string(program, program)
+ __string(protocol, protocol)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->error = PTR_ERR(xprt);
+ __assign_str(program, program);
+ __assign_str(protocol, protocol);
+ memcpy(__entry->addr, sap, sizeof(__entry->addr));
+ ),
+
+ TP_printk("addr=%pISpc program=%s protocol=%s error=%ld",
+ __entry->addr, __get_str(program), __get_str(protocol),
+ __entry->error)
+);
+
TRACE_EVENT(svc_xprt_do_enqueue,
TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
TP_ARGS(xprt, rqst),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
__field(int, pid)
__field(unsigned long, flags)
__string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = xprt;
__entry->pid = rqst? rqst->rq_task->pid : 0;
__entry->flags = xprt->xpt_flags;
__assign_str(addr, xprt->xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s pid=%d flags=%s",
- __entry->xprt, __get_str(addr),
- __entry->pid, show_svc_xprt_flags(__entry->flags))
+ TP_printk("addr=%s pid=%d flags=%s", __get_str(addr),
+ __entry->pid, show_svc_xprt_flags(__entry->flags))
);
DECLARE_EVENT_CLASS(svc_xprt_event,
@@ -1197,25 +1494,55 @@ DECLARE_EVENT_CLASS(svc_xprt_event,
TP_ARGS(xprt),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
__field(unsigned long, flags)
__string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = xprt;
__entry->flags = xprt->xpt_flags;
__assign_str(addr, xprt->xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s flags=%s",
- __entry->xprt, __get_str(addr),
- show_svc_xprt_flags(__entry->flags))
+ TP_printk("addr=%s flags=%s", __get_str(addr),
+ show_svc_xprt_flags(__entry->flags))
);
-DEFINE_EVENT(svc_xprt_event, svc_xprt_no_write_space,
- TP_PROTO(struct svc_xprt *xprt),
- TP_ARGS(xprt));
+#define DEFINE_SVC_XPRT_EVENT(name) \
+ DEFINE_EVENT(svc_xprt_event, svc_xprt_##name, \
+ TP_PROTO( \
+ struct svc_xprt *xprt \
+ ), \
+ TP_ARGS(xprt))
+
+DEFINE_SVC_XPRT_EVENT(no_write_space);
+DEFINE_SVC_XPRT_EVENT(close);
+DEFINE_SVC_XPRT_EVENT(detach);
+DEFINE_SVC_XPRT_EVENT(free);
+
+TRACE_EVENT(svc_xprt_accept,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ const char *service
+ ),
+
+ TP_ARGS(xprt, service),
+
+ TP_STRUCT__entry(
+ __string(addr, xprt->xpt_remotebuf)
+ __string(protocol, xprt->xpt_class->xcl_name)
+ __string(service, service)
+ ),
+
+ TP_fast_assign(
+ __assign_str(addr, xprt->xpt_remotebuf);
+ __assign_str(protocol, xprt->xpt_class->xcl_name)
+ __assign_str(service, service);
+ ),
+
+ TP_printk("addr=%s protocol=%s service=%s",
+ __get_str(addr), __get_str(protocol), __get_str(service)
+ )
+);
TRACE_EVENT(svc_xprt_dequeue,
TP_PROTO(struct svc_rqst *rqst),
@@ -1223,24 +1550,20 @@ TRACE_EVENT(svc_xprt_dequeue,
TP_ARGS(rqst),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
__field(unsigned long, flags)
__field(unsigned long, wakeup)
__string(addr, rqst->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = rqst->rq_xprt;
__entry->flags = rqst->rq_xprt->xpt_flags;
__entry->wakeup = ktime_to_us(ktime_sub(ktime_get(),
rqst->rq_qtime));
__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s flags=%s wakeup-us=%lu",
- __entry->xprt, __get_str(addr),
- show_svc_xprt_flags(__entry->flags),
- __entry->wakeup)
+ TP_printk("addr=%s flags=%s wakeup-us=%lu", __get_str(addr),
+ show_svc_xprt_flags(__entry->flags), __entry->wakeup)
);
TRACE_EVENT(svc_wake_up,
@@ -1265,21 +1588,18 @@ TRACE_EVENT(svc_handle_xprt,
TP_ARGS(xprt, len),
TP_STRUCT__entry(
- __field(struct svc_xprt *, xprt)
__field(int, len)
__field(unsigned long, flags)
__string(addr, xprt->xpt_remotebuf)
),
TP_fast_assign(
- __entry->xprt = xprt;
__entry->len = len;
__entry->flags = xprt->xpt_flags;
__assign_str(addr, xprt->xpt_remotebuf);
),
- TP_printk("xprt=%p addr=%s len=%d flags=%s",
- __entry->xprt, __get_str(addr),
+ TP_printk("addr=%s len=%d flags=%s", __get_str(addr),
__entry->len, show_svc_xprt_flags(__entry->flags))
);
@@ -1313,27 +1633,221 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
TP_ARGS(dr),
TP_STRUCT__entry(
+ __field(const void *, dr)
__field(u32, xid)
__string(addr, dr->xprt->xpt_remotebuf)
),
TP_fast_assign(
+ __entry->dr = dr;
__entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
(dr->xprt_hlen>>2)));
__assign_str(addr, dr->xprt->xpt_remotebuf);
),
- TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
+ TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr,
+ __entry->xid)
);
+
#define DEFINE_SVC_DEFERRED_EVENT(name) \
- DEFINE_EVENT(svc_deferred_event, svc_##name##_deferred, \
+ DEFINE_EVENT(svc_deferred_event, svc_defer_##name, \
TP_PROTO( \
const struct svc_deferred_req *dr \
), \
TP_ARGS(dr))
DEFINE_SVC_DEFERRED_EVENT(drop);
-DEFINE_SVC_DEFERRED_EVENT(revisit);
+DEFINE_SVC_DEFERRED_EVENT(queue);
+DEFINE_SVC_DEFERRED_EVENT(recv);
+
+TRACE_EVENT(svcsock_new_socket,
+ TP_PROTO(
+ const struct socket *socket
+ ),
+
+ TP_ARGS(socket),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, type)
+ __field(unsigned long, family)
+ __field(bool, listener)
+ ),
+
+ TP_fast_assign(
+ __entry->type = socket->type;
+ __entry->family = socket->sk->sk_family;
+ __entry->listener = (socket->sk->sk_state == TCP_LISTEN);
+ ),
+
+ TP_printk("type=%s family=%s%s",
+ show_socket_type(__entry->type),
+ rpc_show_address_family(__entry->family),
+ __entry->listener ? " (listener)" : ""
+ )
+);
+
+TRACE_EVENT(svcsock_marker,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ __be32 marker
+ ),
+
+ TP_ARGS(xprt, marker),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, length)
+ __field(bool, last)
+ __string(addr, xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->length = be32_to_cpu(marker) & RPC_FRAGMENT_SIZE_MASK;
+ __entry->last = be32_to_cpu(marker) & RPC_LAST_STREAM_FRAGMENT;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s length=%u%s", __get_str(addr),
+ __entry->length, __entry->last ? " (last)" : "")
+);
+
+DECLARE_EVENT_CLASS(svcsock_class,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ ssize_t result
+ ),
+
+ TP_ARGS(xprt, result),
+
+ TP_STRUCT__entry(
+ __field(ssize_t, result)
+ __field(unsigned long, flags)
+ __string(addr, xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->result = result;
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s result=%zd flags=%s", __get_str(addr),
+ __entry->result, show_svc_xprt_flags(__entry->flags)
+ )
+);
+
+#define DEFINE_SVCSOCK_EVENT(name) \
+ DEFINE_EVENT(svcsock_class, svcsock_##name, \
+ TP_PROTO( \
+ const struct svc_xprt *xprt, \
+ ssize_t result \
+ ), \
+ TP_ARGS(xprt, result))
+
+DEFINE_SVCSOCK_EVENT(udp_send);
+DEFINE_SVCSOCK_EVENT(udp_recv);
+DEFINE_SVCSOCK_EVENT(udp_recv_err);
+DEFINE_SVCSOCK_EVENT(tcp_send);
+DEFINE_SVCSOCK_EVENT(tcp_recv);
+DEFINE_SVCSOCK_EVENT(tcp_recv_eagain);
+DEFINE_SVCSOCK_EVENT(tcp_recv_err);
+DEFINE_SVCSOCK_EVENT(data_ready);
+DEFINE_SVCSOCK_EVENT(write_space);
+
+TRACE_EVENT(svcsock_tcp_recv_short,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ u32 expected,
+ u32 received
+ ),
+
+ TP_ARGS(xprt, expected, received),
+
+ TP_STRUCT__entry(
+ __field(u32, expected)
+ __field(u32, received)
+ __field(unsigned long, flags)
+ __string(addr, xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->expected = expected;
+ __entry->received = received;
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s flags=%s expected=%u received=%u",
+ __get_str(addr), show_svc_xprt_flags(__entry->flags),
+ __entry->expected, __entry->received
+ )
+);
+
+TRACE_EVENT(svcsock_tcp_state,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ const struct socket *socket
+ ),
+
+ TP_ARGS(xprt, socket),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, socket_state)
+ __field(unsigned long, sock_state)
+ __field(unsigned long, flags)
+ __string(addr, xprt->xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->socket_state = socket->state;
+ __entry->sock_state = socket->sk->sk_state;
+ __entry->flags = xprt->xpt_flags;
+ __assign_str(addr, xprt->xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s state=%s sk_state=%s flags=%s", __get_str(addr),
+ rpc_show_socket_state(__entry->socket_state),
+ rpc_show_sock_state(__entry->sock_state),
+ show_svc_xprt_flags(__entry->flags)
+ )
+);
+
+DECLARE_EVENT_CLASS(svcsock_accept_class,
+ TP_PROTO(
+ const struct svc_xprt *xprt,
+ const char *service,
+ long status
+ ),
+
+ TP_ARGS(xprt, service, status),
+
+ TP_STRUCT__entry(
+ __field(long, status)
+ __string(service, service)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __assign_str(service, service);
+ memcpy(__entry->addr, &xprt->xpt_local, sizeof(__entry->addr));
+ ),
+
+ TP_printk("listener=%pISpc service=%s status=%ld",
+ __entry->addr, __get_str(service), __entry->status
+ )
+);
+
+#define DEFINE_ACCEPT_EVENT(name) \
+ DEFINE_EVENT(svcsock_accept_class, svcsock_##name##_err, \
+ TP_PROTO( \
+ const struct svc_xprt *xprt, \
+ const char *service, \
+ long status \
+ ), \
+ TP_ARGS(xprt, service, status))
+
+DEFINE_ACCEPT_EVENT(accept);
+DEFINE_ACCEPT_EVENT(getpeername);
DECLARE_EVENT_CLASS(cache_event,
TP_PROTO(
@@ -1368,6 +1882,86 @@ DEFINE_CACHE_EVENT(cache_entry_update);
DEFINE_CACHE_EVENT(cache_entry_make_negative);
DEFINE_CACHE_EVENT(cache_entry_no_listener);
+DECLARE_EVENT_CLASS(register_class,
+ TP_PROTO(
+ const char *program,
+ const u32 version,
+ const int family,
+ const unsigned short protocol,
+ const unsigned short port,
+ int error
+ ),
+
+ TP_ARGS(program, version, family, protocol, port, error),
+
+ TP_STRUCT__entry(
+ __field(u32, version)
+ __field(unsigned long, family)
+ __field(unsigned short, protocol)
+ __field(unsigned short, port)
+ __field(int, error)
+ __string(program, program)
+ ),
+
+ TP_fast_assign(
+ __entry->version = version;
+ __entry->family = family;
+ __entry->protocol = protocol;
+ __entry->port = port;
+ __entry->error = error;
+ __assign_str(program, program);
+ ),
+
+ TP_printk("program=%sv%u proto=%s port=%u family=%s error=%d",
+ __get_str(program), __entry->version,
+ __entry->protocol == IPPROTO_UDP ? "udp" : "tcp",
+ __entry->port, rpc_show_address_family(__entry->family),
+ __entry->error
+ )
+);
+
+#define DEFINE_REGISTER_EVENT(name) \
+ DEFINE_EVENT(register_class, svc_##name, \
+ TP_PROTO( \
+ const char *program, \
+ const u32 version, \
+ const int family, \
+ const unsigned short protocol, \
+ const unsigned short port, \
+ int error \
+ ), \
+ TP_ARGS(program, version, family, protocol, \
+ port, error))
+
+DEFINE_REGISTER_EVENT(register);
+DEFINE_REGISTER_EVENT(noregister);
+
+TRACE_EVENT(svc_unregister,
+ TP_PROTO(
+ const char *program,
+ const u32 version,
+ int error
+ ),
+
+ TP_ARGS(program, version, error),
+
+ TP_STRUCT__entry(
+ __field(u32, version)
+ __field(int, error)
+ __string(program, program)
+ ),
+
+ TP_fast_assign(
+ __entry->version = version;
+ __entry->error = error;
+ __assign_str(program, program);
+ ),
+
+ TP_printk("program=%sv%u error=%d",
+ __get_str(program), __entry->version, __entry->error
+ )
+);
+
#endif /* _TRACE_SUNRPC_H */
#include <trace/define_trace.h>
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 0b85ed6a3710..19806eb3a8e8 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -217,13 +217,28 @@ struct drm_msm_gem_submit_bo {
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
+#define MSM_SUBMIT_SYNCOBJ_IN 0x08000000 /* enable input syncobj */
+#define MSM_SUBMIT_SYNCOBJ_OUT 0x04000000 /* enable output syncobj */
#define MSM_SUBMIT_FLAGS ( \
MSM_SUBMIT_NO_IMPLICIT | \
MSM_SUBMIT_FENCE_FD_IN | \
MSM_SUBMIT_FENCE_FD_OUT | \
MSM_SUBMIT_SUDO | \
+ MSM_SUBMIT_SYNCOBJ_IN | \
+ MSM_SUBMIT_SYNCOBJ_OUT | \
0)
+#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
+#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \
+ MSM_SUBMIT_SYNCOBJ_RESET | \
+ 0)
+
+struct drm_msm_gem_submit_syncobj {
+ __u32 handle; /* in, syncobj handle. */
+ __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
+ __u64 point; /* in, timepoint for timeline syncobjs. */
+};
+
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
@@ -236,7 +251,14 @@ struct drm_msm_gem_submit {
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
- __u32 queueid; /* in, submitqueue id */
+ __u32 queueid; /* in, submitqueue id */
+ __u64 in_syncobjs; /* in, ptr to to array of drm_msm_gem_submit_syncobj */
+ __u64 out_syncobjs; /* in, ptr to to array of drm_msm_gem_submit_syncobj */
+ __u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */
+ __u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
+ __u32 syncobj_stride; /* in, stride of syncobj arrays. */
+ __u32 pad; /*in, reserved for future use, always 0. */
+
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
diff --git a/include/uapi/linux/fiemap.h b/include/uapi/linux/fiemap.h
index 8c0bc24d5d95..07c1cdcb715e 100644
--- a/include/uapi/linux/fiemap.h
+++ b/include/uapi/linux/fiemap.h
@@ -9,8 +9,8 @@
* Andreas Dilger <adilger@sun.com>
*/
-#ifndef _LINUX_FIEMAP_H
-#define _LINUX_FIEMAP_H
+#ifndef _UAPI_LINUX_FIEMAP_H
+#define _UAPI_LINUX_FIEMAP_H
#include <linux/types.h>
@@ -67,4 +67,4 @@ struct fiemap {
#define FIEMAP_EXTENT_SHARED 0x00002000 /* Space shared with other
* files. */
-#endif /* _LINUX_FIEMAP_H */
+#endif /* _UAPI_LINUX_FIEMAP_H */
diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h
index 2dc10a034de1..07e508e6691b 100644
--- a/include/uapi/linux/gfs2_ondisk.h
+++ b/include/uapi/linux/gfs2_ondisk.h
@@ -171,6 +171,12 @@ struct gfs2_rindex {
#define GFS2_RGF_NOALLOC 0x00000008
#define GFS2_RGF_TRIMMED 0x00000010
+struct gfs2_inode_lvb {
+ __be32 ri_magic;
+ __be32 __pad;
+ __be64 ri_generation_deleted;
+};
+
struct gfs2_rgrp_lvb {
__be32 rl_magic;
__be32 rl_flags;
diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h
index 4ad3496e5c43..e907b7091a46 100644
--- a/include/uapi/linux/iommu.h
+++ b/include/uapi/linux/iommu.h
@@ -285,6 +285,11 @@ struct iommu_gpasid_bind_data_vtd {
__u32 emt;
};
+#define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \
+ IOMMU_SVA_VTD_GPASID_EMTE | \
+ IOMMU_SVA_VTD_GPASID_PCD | \
+ IOMMU_SVA_VTD_GPASID_PWT)
+
/**
* struct iommu_gpasid_bind_data - Information about device and guest PASID binding
* @version: Version of this data structure
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index d78064007b17..f3956fc11de6 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -94,6 +94,7 @@
#define BALLOON_KVM_MAGIC 0x13661366
#define ZSMALLOC_MAGIC 0x58295829
#define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */
+#define DEVMEM_MAGIC 0x454d444d /* "DMEM" */
#define Z3FOLD_MAGIC 0x33
#define PPC_CMM_MAGIC 0xc7571590
diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h
index 83bba58d47f4..fa9aff91cbf2 100644
--- a/include/uapi/linux/rtc.h
+++ b/include/uapi/linux/rtc.h
@@ -99,6 +99,7 @@ struct rtc_pll_info {
#define RTC_VL_BACKUP_LOW _BITUL(1) /* Backup voltage is low */
#define RTC_VL_BACKUP_EMPTY _BITUL(2) /* Backup empty or not present */
#define RTC_VL_ACCURACY_LOW _BITUL(3) /* Voltage is low, RTC accuracy is reduced */
+#define RTC_VL_BACKUP_SWITCH _BITUL(4) /* Backup switchover happened */
#define RTC_VL_READ _IOR('p', 0x13, unsigned int) /* Voltage low detection */
#define RTC_VL_CLR _IO('p', 0x14) /* Clear voltage low information */
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index 6596f3a09e54..b619f37ee03e 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -173,6 +173,15 @@ struct tee_ioctl_buf_data {
#define TEE_IOCTL_LOGIN_APPLICATION 4
#define TEE_IOCTL_LOGIN_USER_APPLICATION 5
#define TEE_IOCTL_LOGIN_GROUP_APPLICATION 6
+/*
+ * Disallow user-space to use GP implementation specific login
+ * method range (0x80000000 - 0xBFFFFFFF). This range is rather
+ * being reserved for REE kernel clients or TEE implementation.
+ */
+#define TEE_IOCTL_LOGIN_REE_KERNEL_MIN 0x80000000
+#define TEE_IOCTL_LOGIN_REE_KERNEL_MAX 0xBFFFFFFF
+/* Private login method for REE kernel clients */
+#define TEE_IOCTL_LOGIN_REE_KERNEL 0x80000000
/**
* struct tee_ioctl_param - parameter
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 015516bcfaa3..eca6692667a3 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -305,6 +305,7 @@ struct vfio_region_info_cap_type {
#define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff)
#define VFIO_REGION_TYPE_GFX (1)
#define VFIO_REGION_TYPE_CCW (2)
+#define VFIO_REGION_TYPE_MIGRATION (3)
/* sub-types for VFIO_REGION_TYPE_PCI_* */
@@ -378,6 +379,235 @@ struct vfio_region_gfx_edid {
/* sub-types for VFIO_REGION_TYPE_CCW */
#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1)
+#define VFIO_REGION_SUBTYPE_CCW_SCHIB (2)
+#define VFIO_REGION_SUBTYPE_CCW_CRW (3)
+
+/* sub-types for VFIO_REGION_TYPE_MIGRATION */
+#define VFIO_REGION_SUBTYPE_MIGRATION (1)
+
+/*
+ * The structure vfio_device_migration_info is placed at the 0th offset of
+ * the VFIO_REGION_SUBTYPE_MIGRATION region to get and set VFIO device related
+ * migration information. Field accesses from this structure are only supported
+ * at their native width and alignment. Otherwise, the result is undefined and
+ * vendor drivers should return an error.
+ *
+ * device_state: (read/write)
+ * - The user application writes to this field to inform the vendor driver
+ * about the device state to be transitioned to.
+ * - The vendor driver should take the necessary actions to change the
+ * device state. After successful transition to a given state, the
+ * vendor driver should return success on write(device_state, state)
+ * system call. If the device state transition fails, the vendor driver
+ * should return an appropriate -errno for the fault condition.
+ * - On the user application side, if the device state transition fails,
+ * that is, if write(device_state, state) returns an error, read
+ * device_state again to determine the current state of the device from
+ * the vendor driver.
+ * - The vendor driver should return previous state of the device unless
+ * the vendor driver has encountered an internal error, in which case
+ * the vendor driver may report the device_state VFIO_DEVICE_STATE_ERROR.
+ * - The user application must use the device reset ioctl to recover the
+ * device from VFIO_DEVICE_STATE_ERROR state. If the device is
+ * indicated to be in a valid device state by reading device_state, the
+ * user application may attempt to transition the device to any valid
+ * state reachable from the current state or terminate itself.
+ *
+ * device_state consists of 3 bits:
+ * - If bit 0 is set, it indicates the _RUNNING state. If bit 0 is clear,
+ * it indicates the _STOP state. When the device state is changed to
+ * _STOP, driver should stop the device before write() returns.
+ * - If bit 1 is set, it indicates the _SAVING state, which means that the
+ * driver should start gathering device state information that will be
+ * provided to the VFIO user application to save the device's state.
+ * - If bit 2 is set, it indicates the _RESUMING state, which means that
+ * the driver should prepare to resume the device. Data provided through
+ * the migration region should be used to resume the device.
+ * Bits 3 - 31 are reserved for future use. To preserve them, the user
+ * application should perform a read-modify-write operation on this
+ * field when modifying the specified bits.
+ *
+ * +------- _RESUMING
+ * |+------ _SAVING
+ * ||+----- _RUNNING
+ * |||
+ * 000b => Device Stopped, not saving or resuming
+ * 001b => Device running, which is the default state
+ * 010b => Stop the device & save the device state, stop-and-copy state
+ * 011b => Device running and save the device state, pre-copy state
+ * 100b => Device stopped and the device state is resuming
+ * 101b => Invalid state
+ * 110b => Error state
+ * 111b => Invalid state
+ *
+ * State transitions:
+ *
+ * _RESUMING _RUNNING Pre-copy Stop-and-copy _STOP
+ * (100b) (001b) (011b) (010b) (000b)
+ * 0. Running or default state
+ * |
+ *
+ * 1. Normal Shutdown (optional)
+ * |------------------------------------->|
+ *
+ * 2. Save the state or suspend
+ * |------------------------->|---------->|
+ *
+ * 3. Save the state during live migration
+ * |----------->|------------>|---------->|
+ *
+ * 4. Resuming
+ * |<---------|
+ *
+ * 5. Resumed
+ * |--------->|
+ *
+ * 0. Default state of VFIO device is _RUNNNG when the user application starts.
+ * 1. During normal shutdown of the user application, the user application may
+ * optionally change the VFIO device state from _RUNNING to _STOP. This
+ * transition is optional. The vendor driver must support this transition but
+ * must not require it.
+ * 2. When the user application saves state or suspends the application, the
+ * device state transitions from _RUNNING to stop-and-copy and then to _STOP.
+ * On state transition from _RUNNING to stop-and-copy, driver must stop the
+ * device, save the device state and send it to the application through the
+ * migration region. The sequence to be followed for such transition is given
+ * below.
+ * 3. In live migration of user application, the state transitions from _RUNNING
+ * to pre-copy, to stop-and-copy, and to _STOP.
+ * On state transition from _RUNNING to pre-copy, the driver should start
+ * gathering the device state while the application is still running and send
+ * the device state data to application through the migration region.
+ * On state transition from pre-copy to stop-and-copy, the driver must stop
+ * the device, save the device state and send it to the user application
+ * through the migration region.
+ * Vendor drivers must support the pre-copy state even for implementations
+ * where no data is provided to the user before the stop-and-copy state. The
+ * user must not be required to consume all migration data before the device
+ * transitions to a new state, including the stop-and-copy state.
+ * The sequence to be followed for above two transitions is given below.
+ * 4. To start the resuming phase, the device state should be transitioned from
+ * the _RUNNING to the _RESUMING state.
+ * In the _RESUMING state, the driver should use the device state data
+ * received through the migration region to resume the device.
+ * 5. After providing saved device data to the driver, the application should
+ * change the state from _RESUMING to _RUNNING.
+ *
+ * reserved:
+ * Reads on this field return zero and writes are ignored.
+ *
+ * pending_bytes: (read only)
+ * The number of pending bytes still to be migrated from the vendor driver.
+ *
+ * data_offset: (read only)
+ * The user application should read data_offset field from the migration
+ * region. The user application should read the device data from this
+ * offset within the migration region during the _SAVING state or write
+ * the device data during the _RESUMING state. See below for details of
+ * sequence to be followed.
+ *
+ * data_size: (read/write)
+ * The user application should read data_size to get the size in bytes of
+ * the data copied in the migration region during the _SAVING state and
+ * write the size in bytes of the data copied in the migration region
+ * during the _RESUMING state.
+ *
+ * The format of the migration region is as follows:
+ * ------------------------------------------------------------------
+ * |vfio_device_migration_info| data section |
+ * | | /////////////////////////////// |
+ * ------------------------------------------------------------------
+ * ^ ^
+ * offset 0-trapped part data_offset
+ *
+ * The structure vfio_device_migration_info is always followed by the data
+ * section in the region, so data_offset will always be nonzero. The offset
+ * from where the data is copied is decided by the kernel driver. The data
+ * section can be trapped, mmapped, or partitioned, depending on how the kernel
+ * driver defines the data section. The data section partition can be defined
+ * as mapped by the sparse mmap capability. If mmapped, data_offset must be
+ * page aligned, whereas initial section which contains the
+ * vfio_device_migration_info structure, might not end at the offset, which is
+ * page aligned. The user is not required to access through mmap regardless
+ * of the capabilities of the region mmap.
+ * The vendor driver should determine whether and how to partition the data
+ * section. The vendor driver should return data_offset accordingly.
+ *
+ * The sequence to be followed while in pre-copy state and stop-and-copy state
+ * is as follows:
+ * a. Read pending_bytes, indicating the start of a new iteration to get device
+ * data. Repeated read on pending_bytes at this stage should have no side
+ * effects.
+ * If pending_bytes == 0, the user application should not iterate to get data
+ * for that device.
+ * If pending_bytes > 0, perform the following steps.
+ * b. Read data_offset, indicating that the vendor driver should make data
+ * available through the data section. The vendor driver should return this
+ * read operation only after data is available from (region + data_offset)
+ * to (region + data_offset + data_size).
+ * c. Read data_size, which is the amount of data in bytes available through
+ * the migration region.
+ * Read on data_offset and data_size should return the offset and size of
+ * the current buffer if the user application reads data_offset and
+ * data_size more than once here.
+ * d. Read data_size bytes of data from (region + data_offset) from the
+ * migration region.
+ * e. Process the data.
+ * f. Read pending_bytes, which indicates that the data from the previous
+ * iteration has been read. If pending_bytes > 0, go to step b.
+ *
+ * The user application can transition from the _SAVING|_RUNNING
+ * (pre-copy state) to the _SAVING (stop-and-copy) state regardless of the
+ * number of pending bytes. The user application should iterate in _SAVING
+ * (stop-and-copy) until pending_bytes is 0.
+ *
+ * The sequence to be followed while _RESUMING device state is as follows:
+ * While data for this device is available, repeat the following steps:
+ * a. Read data_offset from where the user application should write data.
+ * b. Write migration data starting at the migration region + data_offset for
+ * the length determined by data_size from the migration source.
+ * c. Write data_size, which indicates to the vendor driver that data is
+ * written in the migration region. Vendor driver must return this write
+ * operations on consuming data. Vendor driver should apply the
+ * user-provided migration region data to the device resume state.
+ *
+ * If an error occurs during the above sequences, the vendor driver can return
+ * an error code for next read() or write() operation, which will terminate the
+ * loop. The user application should then take the next necessary action, for
+ * example, failing migration or terminating the user application.
+ *
+ * For the user application, data is opaque. The user application should write
+ * data in the same order as the data is received and the data should be of
+ * same transaction size at the source.
+ */
+
+struct vfio_device_migration_info {
+ __u32 device_state; /* VFIO device state */
+#define VFIO_DEVICE_STATE_STOP (0)
+#define VFIO_DEVICE_STATE_RUNNING (1 << 0)
+#define VFIO_DEVICE_STATE_SAVING (1 << 1)
+#define VFIO_DEVICE_STATE_RESUMING (1 << 2)
+#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_RUNNING | \
+ VFIO_DEVICE_STATE_SAVING | \
+ VFIO_DEVICE_STATE_RESUMING)
+
+#define VFIO_DEVICE_STATE_VALID(state) \
+ (state & VFIO_DEVICE_STATE_RESUMING ? \
+ (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_RESUMING : 1)
+
+#define VFIO_DEVICE_STATE_IS_ERROR(state) \
+ ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_SAVING | \
+ VFIO_DEVICE_STATE_RESUMING))
+
+#define VFIO_DEVICE_STATE_SET_ERROR(state) \
+ ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_SATE_SAVING | \
+ VFIO_DEVICE_STATE_RESUMING)
+
+ __u32 reserved;
+ __u64 pending_bytes;
+ __u64 data_offset;
+ __u64 data_size;
+};
/*
* The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
@@ -577,6 +807,7 @@ enum {
enum {
VFIO_CCW_IO_IRQ_INDEX,
+ VFIO_CCW_CRW_IRQ_INDEX,
VFIO_CCW_NUM_IRQS
};
@@ -785,6 +1016,29 @@ struct vfio_iommu_type1_info_cap_iova_range {
struct vfio_iova_range iova_ranges[];
};
+/*
+ * The migration capability allows to report supported features for migration.
+ *
+ * The structures below define version 1 of this capability.
+ *
+ * The existence of this capability indicates that IOMMU kernel driver supports
+ * dirty page logging.
+ *
+ * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty
+ * page logging.
+ * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap
+ * size in bytes that can be used by user applications when getting the dirty
+ * bitmap.
+ */
+#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION 1
+
+struct vfio_iommu_type1_info_cap_migration {
+ struct vfio_info_cap_header header;
+ __u32 flags;
+ __u64 pgsize_bitmap;
+ __u64 max_dirty_bitmap_size; /* in bytes */
+};
+
#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
/**
@@ -805,6 +1059,12 @@ struct vfio_iommu_type1_dma_map {
#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
+struct vfio_bitmap {
+ __u64 pgsize; /* page size for bitmap in bytes */
+ __u64 size; /* in bytes */
+ __u64 __user *data; /* one bit per page */
+};
+
/**
* VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
* struct vfio_dma_unmap)
@@ -814,12 +1074,23 @@ struct vfio_iommu_type1_dma_map {
* field. No guarantee is made to the user that arbitrary unmaps of iova
* or size different from those used in the original mapping call will
* succeed.
+ * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap
+ * before unmapping IO virtual addresses. When this flag is set, the user must
+ * provide a struct vfio_bitmap in data[]. User must provide zero-allocated
+ * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field.
+ * A bit in the bitmap represents one page, of user provided page size in
+ * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set
+ * indicates that the page at that offset from iova is dirty. A Bitmap of the
+ * pages in the range of unmapped size is returned in the user-provided
+ * vfio_bitmap.data.
*/
struct vfio_iommu_type1_dma_unmap {
__u32 argsz;
__u32 flags;
+#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0)
__u64 iova; /* IO virtual address */
__u64 size; /* Size of mapping (bytes) */
+ __u8 data[];
};
#define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
@@ -831,6 +1102,57 @@ struct vfio_iommu_type1_dma_unmap {
#define VFIO_IOMMU_ENABLE _IO(VFIO_TYPE, VFIO_BASE + 15)
#define VFIO_IOMMU_DISABLE _IO(VFIO_TYPE, VFIO_BASE + 16)
+/**
+ * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17,
+ * struct vfio_iommu_type1_dirty_bitmap)
+ * IOCTL is used for dirty pages logging.
+ * Caller should set flag depending on which operation to perform, details as
+ * below:
+ *
+ * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs
+ * the IOMMU driver to log pages that are dirtied or potentially dirtied by
+ * the device; designed to be used when a migration is in progress. Dirty pages
+ * are logged until logging is disabled by user application by calling the IOCTL
+ * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag.
+ *
+ * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs
+ * the IOMMU driver to stop logging dirtied pages.
+ *
+ * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set
+ * returns the dirty pages bitmap for IOMMU container for a given IOVA range.
+ * The user must specify the IOVA range and the pgsize through the structure
+ * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface
+ * supports getting a bitmap of the smallest supported pgsize only and can be
+ * modified in future to get a bitmap of any specified supported pgsize. The
+ * user must provide a zeroed memory area for the bitmap memory and specify its
+ * size in bitmap.size. One bit is used to represent one page consecutively
+ * starting from iova offset. The user should provide page size in bitmap.pgsize
+ * field. A bit set in the bitmap indicates that the page at that offset from
+ * iova is dirty. The caller must set argsz to a value including the size of
+ * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the
+ * actual bitmap. If dirty pages logging is not enabled, an error will be
+ * returned.
+ *
+ * Only one of the flags _START, _STOP and _GET may be specified at a time.
+ *
+ */
+struct vfio_iommu_type1_dirty_bitmap {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START (1 << 0)
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP (1 << 1)
+#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP (1 << 2)
+ __u8 data[];
+};
+
+struct vfio_iommu_type1_dirty_bitmap_get {
+ __u64 iova; /* IO virtual address */
+ __u64 size; /* Size of iova range */
+ struct vfio_bitmap bitmap;
+};
+
+#define VFIO_IOMMU_DIRTY_PAGES _IO(VFIO_TYPE, VFIO_BASE + 17)
+
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*
diff --git a/include/uapi/linux/vfio_ccw.h b/include/uapi/linux/vfio_ccw.h
index cbecbf0cd54f..aa04f3aa6db0 100644
--- a/include/uapi/linux/vfio_ccw.h
+++ b/include/uapi/linux/vfio_ccw.h
@@ -34,4 +34,23 @@ struct ccw_cmd_region {
__u32 ret_code;
} __packed;
+/*
+ * Used for processing commands that read the subchannel-information block
+ * Reading this region triggers a stsch() to hardware
+ * Note: this is controlled by a capability
+ */
+struct ccw_schib_region {
+#define SCHIB_AREA_SIZE 52
+ __u8 schib_area[SCHIB_AREA_SIZE];
+} __packed;
+
+/*
+ * Used for returning a Channel Report Word to userspace.
+ * Note: this is controlled by a capability
+ */
+struct ccw_crw_region {
+ __u32 crw;
+ __u32 pad;
+} __packed;
+
#endif
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 9fe72e4b1373..0c2349612e77 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -15,6 +15,8 @@
#include <linux/types.h>
#include <linux/ioctl.h>
+#define VHOST_FILE_UNBIND -1
+
/* ioctls */
#define VHOST_VIRTIO 0xAF
@@ -140,4 +142,6 @@
/* Get the max ring size. */
#define VHOST_VDPA_GET_VRING_NUM _IOR(VHOST_VIRTIO, 0x76, __u16)
+/* Set event fd for config interrupt*/
+#define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int)
#endif
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index ecc27a17401a..b052355ac7a3 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -44,6 +44,7 @@
#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
+#define VIRTIO_ID_MEM 24 /* virtio mem */
#define VIRTIO_ID_FS 26 /* virtio filesystem */
#define VIRTIO_ID_PMEM 27 /* virtio pmem */
#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h
new file mode 100644
index 000000000000..a9ffe041843c
--- /dev/null
+++ b/include/uapi/linux/virtio_mem.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Virtio Mem Device
+ *
+ * Copyright Red Hat, Inc. 2020
+ *
+ * Authors:
+ * David Hildenbrand <david@redhat.com>
+ *
+ * This header is BSD licensed so anyone can use the definitions
+ * to implement compatible drivers/servers:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_VIRTIO_MEM_H
+#define _LINUX_VIRTIO_MEM_H
+
+#include <linux/types.h>
+#include <linux/virtio_types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+/*
+ * Each virtio-mem device manages a dedicated region in physical address
+ * space. Each device can belong to a single NUMA node, multiple devices
+ * for a single NUMA node are possible. A virtio-mem device is like a
+ * "resizable DIMM" consisting of small memory blocks that can be plugged
+ * or unplugged. The device driver is responsible for (un)plugging memory
+ * blocks on demand.
+ *
+ * Virtio-mem devices can only operate on their assigned memory region in
+ * order to (un)plug memory. A device cannot (un)plug memory belonging to
+ * other devices.
+ *
+ * The "region_size" corresponds to the maximum amount of memory that can
+ * be provided by a device. The "size" corresponds to the amount of memory
+ * that is currently plugged. "requested_size" corresponds to a request
+ * from the device to the device driver to (un)plug blocks. The
+ * device driver should try to (un)plug blocks in order to reach the
+ * "requested_size". It is impossible to plug more memory than requested.
+ *
+ * The "usable_region_size" represents the memory region that can actually
+ * be used to (un)plug memory. It is always at least as big as the
+ * "requested_size" and will grow dynamically. It will only shrink when
+ * explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
+ *
+ * There are no guarantees what will happen if unplugged memory is
+ * read/written. Such memory should, in general, not be touched. E.g.,
+ * even writing might succeed, but the values will simply be discarded at
+ * random points in time.
+ *
+ * It can happen that the device cannot process a request, because it is
+ * busy. The device driver has to retry later.
+ *
+ * Usually, during system resets all memory will get unplugged, so the
+ * device driver can start with a clean state. However, in specific
+ * scenarios (if the device is busy) it can happen that the device still
+ * has memory plugged. The device driver can request to unplug all memory
+ * (VIRTIO_MEM_REQ_UNPLUG) - which might take a while to succeed if the
+ * device is busy.
+ */
+
+/* --- virtio-mem: feature bits --- */
+
+/* node_id is an ACPI PXM and is valid */
+#define VIRTIO_MEM_F_ACPI_PXM 0
+
+
+/* --- virtio-mem: guest -> host requests --- */
+
+/* request to plug memory blocks */
+#define VIRTIO_MEM_REQ_PLUG 0
+/* request to unplug memory blocks */
+#define VIRTIO_MEM_REQ_UNPLUG 1
+/* request to unplug all blocks and shrink the usable size */
+#define VIRTIO_MEM_REQ_UNPLUG_ALL 2
+/* request information about the plugged state of memory blocks */
+#define VIRTIO_MEM_REQ_STATE 3
+
+struct virtio_mem_req_plug {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req_unplug {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req_state {
+ __virtio64 addr;
+ __virtio16 nb_blocks;
+ __virtio16 padding[3];
+};
+
+struct virtio_mem_req {
+ __virtio16 type;
+ __virtio16 padding[3];
+
+ union {
+ struct virtio_mem_req_plug plug;
+ struct virtio_mem_req_unplug unplug;
+ struct virtio_mem_req_state state;
+ } u;
+};
+
+
+/* --- virtio-mem: host -> guest response --- */
+
+/*
+ * Request processed successfully, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_UNPLUG_ALL
+ * - VIRTIO_MEM_REQ_STATE
+ */
+#define VIRTIO_MEM_RESP_ACK 0
+/*
+ * Request denied - e.g. trying to plug more than requested, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ */
+#define VIRTIO_MEM_RESP_NACK 1
+/*
+ * Request cannot be processed right now, try again later, applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_UNPLUG_ALL
+ */
+#define VIRTIO_MEM_RESP_BUSY 2
+/*
+ * Error in request (e.g. addresses/alignment), applicable for
+ * - VIRTIO_MEM_REQ_PLUG
+ * - VIRTIO_MEM_REQ_UNPLUG
+ * - VIRTIO_MEM_REQ_STATE
+ */
+#define VIRTIO_MEM_RESP_ERROR 3
+
+
+/* State of memory blocks is "plugged" */
+#define VIRTIO_MEM_STATE_PLUGGED 0
+/* State of memory blocks is "unplugged" */
+#define VIRTIO_MEM_STATE_UNPLUGGED 1
+/* State of memory blocks is "mixed" */
+#define VIRTIO_MEM_STATE_MIXED 2
+
+struct virtio_mem_resp_state {
+ __virtio16 state;
+};
+
+struct virtio_mem_resp {
+ __virtio16 type;
+ __virtio16 padding[3];
+
+ union {
+ struct virtio_mem_resp_state state;
+ } u;
+};
+
+/* --- virtio-mem: configuration --- */
+
+struct virtio_mem_config {
+ /* Block size and alignment. Cannot change. */
+ __u64 block_size;
+ /* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */
+ __u16 node_id;
+ __u8 padding[6];
+ /* Start address of the memory region. Cannot change. */
+ __u64 addr;
+ /* Region size (maximum). Cannot change. */
+ __u64 region_size;
+ /*
+ * Currently usable region size. Can grow up to region_size. Can
+ * shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config
+ * update will be sent).
+ */
+ __u64 usable_region_size;
+ /*
+ * Currently used size. Changes due to plug/unplug requests, but no
+ * config updates will be sent.
+ */
+ __u64 plugged_size;
+ /* Requested size. New plug requests cannot exceed it. Can change. */
+ __u64 requested_size;
+};
+
+#endif /* _LINUX_VIRTIO_MEM_H */
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 559f42e73315..476d3e5c0fe7 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -86,6 +86,13 @@
* at the end of the used ring. Guest should ignore the used->flags field. */
#define VIRTIO_RING_F_EVENT_IDX 29
+/* Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc {
/* Address (guest-physical). */
@@ -112,28 +119,47 @@ struct vring_used_elem {
__virtio32 len;
};
+typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+ vring_used_elem_t;
+
struct vring_used {
__virtio16 flags;
__virtio16 idx;
- struct vring_used_elem ring[];
+ vring_used_elem_t ring[];
};
+/*
+ * The ring element addresses are passed between components with different
+ * alignments assumptions. Thus, we might need to decrease the compiler-selected
+ * alignment, and so must use a typedef to make sure the aligned attribute
+ * actually takes hold:
+ *
+ * https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes
+ *
+ * When used on a struct, or struct member, the aligned attribute can only
+ * increase the alignment; in order to decrease it, the packed attribute must
+ * be specified as well. When used as part of a typedef, the aligned attribute
+ * can both increase and decrease alignment, and specifying the packed
+ * attribute generates a warning.
+ */
+typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
+ vring_desc_t;
+typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
+ vring_avail_t;
+typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
+ vring_used_t;
+
struct vring {
unsigned int num;
- struct vring_desc *desc;
+ vring_desc_t *desc;
- struct vring_avail *avail;
+ vring_avail_t *avail;
- struct vring_used *used;
+ vring_used_t *used;
};
-/* Alignment requirements for vring elements.
- * When using pre-virtio 1.0 layout, these fall out naturally.
- */
-#define VRING_AVAIL_ALIGN_SIZE 2
-#define VRING_USED_ALIGN_SIZE 4
-#define VRING_DESC_ALIGN_SIZE 16
+#ifndef VIRTIO_RING_NO_LEGACY
/* The standard layout for the ring is a continuous chunk of memory which looks
* like this. We assume num is a power of 2.
@@ -181,6 +207,8 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
+ sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
}
+#endif /* VIRTIO_RING_NO_LEGACY */
+
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 4faa2c9767e5..f6267a8d7416 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -15,10 +15,13 @@
* Defines that are asic-specific but constitutes as ABI between kernel driver
* and userspace
*/
-#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */
+#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */
+#define GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START 0x80 /* 128 bytes */
+#define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT 48
+#define GAUDI_FIRST_AVAILABLE_W_S_MONITOR 24
/*
- * Queue Numbering
+ * Goya queue Numbering
*
* The external queues (PCI DMA channels) MUST be before the internal queues
* and each group (PCI DMA channels and internal) must be contiguous inside
@@ -46,6 +49,129 @@ enum goya_queue_id {
};
/*
+ * Gaudi queue Numbering
+ * External queues (PCI DMA channels) are DMA_0_*, DMA_1_* and DMA_5_*.
+ * Except one CPU queue, all the rest are internal queues.
+ */
+
+enum gaudi_queue_id {
+ GAUDI_QUEUE_ID_DMA_0_0 = 0, /* external */
+ GAUDI_QUEUE_ID_DMA_0_1 = 1, /* external */
+ GAUDI_QUEUE_ID_DMA_0_2 = 2, /* external */
+ GAUDI_QUEUE_ID_DMA_0_3 = 3, /* external */
+ GAUDI_QUEUE_ID_DMA_1_0 = 4, /* external */
+ GAUDI_QUEUE_ID_DMA_1_1 = 5, /* external */
+ GAUDI_QUEUE_ID_DMA_1_2 = 6, /* external */
+ GAUDI_QUEUE_ID_DMA_1_3 = 7, /* external */
+ GAUDI_QUEUE_ID_CPU_PQ = 8, /* CPU */
+ GAUDI_QUEUE_ID_DMA_2_0 = 9, /* internal */
+ GAUDI_QUEUE_ID_DMA_2_1 = 10, /* internal */
+ GAUDI_QUEUE_ID_DMA_2_2 = 11, /* internal */
+ GAUDI_QUEUE_ID_DMA_2_3 = 12, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_0 = 13, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_1 = 14, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_2 = 15, /* internal */
+ GAUDI_QUEUE_ID_DMA_3_3 = 16, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_0 = 17, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_1 = 18, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_2 = 19, /* internal */
+ GAUDI_QUEUE_ID_DMA_4_3 = 20, /* internal */
+ GAUDI_QUEUE_ID_DMA_5_0 = 21, /* external */
+ GAUDI_QUEUE_ID_DMA_5_1 = 22, /* external */
+ GAUDI_QUEUE_ID_DMA_5_2 = 23, /* external */
+ GAUDI_QUEUE_ID_DMA_5_3 = 24, /* external */
+ GAUDI_QUEUE_ID_DMA_6_0 = 25, /* internal */
+ GAUDI_QUEUE_ID_DMA_6_1 = 26, /* internal */
+ GAUDI_QUEUE_ID_DMA_6_2 = 27, /* internal */
+ GAUDI_QUEUE_ID_DMA_6_3 = 28, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_0 = 29, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_1 = 30, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_2 = 31, /* internal */
+ GAUDI_QUEUE_ID_DMA_7_3 = 32, /* internal */
+ GAUDI_QUEUE_ID_MME_0_0 = 33, /* internal */
+ GAUDI_QUEUE_ID_MME_0_1 = 34, /* internal */
+ GAUDI_QUEUE_ID_MME_0_2 = 35, /* internal */
+ GAUDI_QUEUE_ID_MME_0_3 = 36, /* internal */
+ GAUDI_QUEUE_ID_MME_1_0 = 37, /* internal */
+ GAUDI_QUEUE_ID_MME_1_1 = 38, /* internal */
+ GAUDI_QUEUE_ID_MME_1_2 = 39, /* internal */
+ GAUDI_QUEUE_ID_MME_1_3 = 40, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_0 = 41, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_1 = 42, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_2 = 43, /* internal */
+ GAUDI_QUEUE_ID_TPC_0_3 = 44, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_0 = 45, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_1 = 46, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_2 = 47, /* internal */
+ GAUDI_QUEUE_ID_TPC_1_3 = 48, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_0 = 49, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_1 = 50, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_2 = 51, /* internal */
+ GAUDI_QUEUE_ID_TPC_2_3 = 52, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_0 = 53, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_1 = 54, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_2 = 55, /* internal */
+ GAUDI_QUEUE_ID_TPC_3_3 = 56, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_0 = 57, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_1 = 58, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_2 = 59, /* internal */
+ GAUDI_QUEUE_ID_TPC_4_3 = 60, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_0 = 61, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_1 = 62, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_2 = 63, /* internal */
+ GAUDI_QUEUE_ID_TPC_5_3 = 64, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_0 = 65, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_1 = 66, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_2 = 67, /* internal */
+ GAUDI_QUEUE_ID_TPC_6_3 = 68, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_0 = 69, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_1 = 70, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_2 = 71, /* internal */
+ GAUDI_QUEUE_ID_TPC_7_3 = 72, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_0 = 73, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_1 = 74, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_2 = 75, /* internal */
+ GAUDI_QUEUE_ID_NIC_0_3 = 76, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_0 = 77, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_1 = 78, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_2 = 79, /* internal */
+ GAUDI_QUEUE_ID_NIC_1_3 = 80, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_0 = 81, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_1 = 82, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_2 = 83, /* internal */
+ GAUDI_QUEUE_ID_NIC_2_3 = 84, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_0 = 85, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_1 = 86, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_2 = 87, /* internal */
+ GAUDI_QUEUE_ID_NIC_3_3 = 88, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_0 = 89, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_1 = 90, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_2 = 91, /* internal */
+ GAUDI_QUEUE_ID_NIC_4_3 = 92, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_0 = 93, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_1 = 94, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_2 = 95, /* internal */
+ GAUDI_QUEUE_ID_NIC_5_3 = 96, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_0 = 97, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_1 = 98, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_2 = 99, /* internal */
+ GAUDI_QUEUE_ID_NIC_6_3 = 100, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_0 = 101, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_1 = 102, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_2 = 103, /* internal */
+ GAUDI_QUEUE_ID_NIC_7_3 = 104, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_0 = 105, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_1 = 106, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_2 = 107, /* internal */
+ GAUDI_QUEUE_ID_NIC_8_3 = 108, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_0 = 109, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_1 = 110, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_2 = 111, /* internal */
+ GAUDI_QUEUE_ID_NIC_9_3 = 112, /* internal */
+ GAUDI_QUEUE_ID_SIZE
+};
+
+/*
* Engine Numbering
*
* Used in the "busy_engines_mask" field in `struct hl_info_hw_idle'
@@ -69,6 +195,40 @@ enum goya_engine_id {
GOYA_ENGINE_ID_SIZE
};
+enum gaudi_engine_id {
+ GAUDI_ENGINE_ID_DMA_0 = 0,
+ GAUDI_ENGINE_ID_DMA_1,
+ GAUDI_ENGINE_ID_DMA_2,
+ GAUDI_ENGINE_ID_DMA_3,
+ GAUDI_ENGINE_ID_DMA_4,
+ GAUDI_ENGINE_ID_DMA_5,
+ GAUDI_ENGINE_ID_DMA_6,
+ GAUDI_ENGINE_ID_DMA_7,
+ GAUDI_ENGINE_ID_MME_0,
+ GAUDI_ENGINE_ID_MME_1,
+ GAUDI_ENGINE_ID_MME_2,
+ GAUDI_ENGINE_ID_MME_3,
+ GAUDI_ENGINE_ID_TPC_0,
+ GAUDI_ENGINE_ID_TPC_1,
+ GAUDI_ENGINE_ID_TPC_2,
+ GAUDI_ENGINE_ID_TPC_3,
+ GAUDI_ENGINE_ID_TPC_4,
+ GAUDI_ENGINE_ID_TPC_5,
+ GAUDI_ENGINE_ID_TPC_6,
+ GAUDI_ENGINE_ID_TPC_7,
+ GAUDI_ENGINE_ID_NIC_0,
+ GAUDI_ENGINE_ID_NIC_1,
+ GAUDI_ENGINE_ID_NIC_2,
+ GAUDI_ENGINE_ID_NIC_3,
+ GAUDI_ENGINE_ID_NIC_4,
+ GAUDI_ENGINE_ID_NIC_5,
+ GAUDI_ENGINE_ID_NIC_6,
+ GAUDI_ENGINE_ID_NIC_7,
+ GAUDI_ENGINE_ID_NIC_8,
+ GAUDI_ENGINE_ID_NIC_9,
+ GAUDI_ENGINE_ID_SIZE
+};
+
enum hl_device_status {
HL_DEVICE_STATUS_OPERATIONAL,
HL_DEVICE_STATUS_IN_RESET,
@@ -101,6 +261,8 @@ enum hl_device_status {
* HL_INFO_RESET_COUNT - Retrieve the counts of the soft and hard reset
* operations performed on the device since the last
* time the driver was loaded.
+ * HL_INFO_TIME_SYNC - Retrieve the device's time alongside the host's time
+ * for synchronization.
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@@ -111,6 +273,7 @@ enum hl_device_status {
#define HL_INFO_HW_EVENTS_AGGREGATE 7
#define HL_INFO_CLK_RATE 8
#define HL_INFO_RESET_COUNT 9
+#define HL_INFO_TIME_SYNC 10
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
@@ -122,7 +285,8 @@ struct hl_info_hw_ip_info {
__u32 sram_size;
__u32 num_of_events;
__u32 device_id; /* PCI Device ID */
- __u32 reserved[3];
+ __u32 module_id; /* For mezzanine cards in servers (From OCP spec.) */
+ __u32 reserved[2];
__u32 armcp_cpld_version;
__u32 psoc_pci_pll_nr;
__u32 psoc_pci_pll_nf;
@@ -169,6 +333,11 @@ struct hl_info_reset_count {
__u32 soft_reset_cnt;
};
+struct hl_info_time_sync {
+ __u64 device_time;
+ __u64 host_time;
+};
+
struct hl_info_args {
/* Location of relevant struct in userspace */
__u64 return_pointer;
@@ -201,7 +370,8 @@ struct hl_info_args {
/* Opcode to destroy previously created command buffer */
#define HL_CB_OP_DESTROY 1
-#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
+/* 2MB minus 32 bytes for 2xMSG_PROT */
+#define HL_MAX_CB_SIZE (0x200000 - 32)
struct hl_cb_in {
/* Handle of CB or 0 if we want to create one */
@@ -232,52 +402,87 @@ union hl_cb_args {
* compatibility
*/
struct hl_cs_chunk {
- /*
- * For external queue, this represents a Handle of CB on the Host
- * For internal queue, this represents an SRAM or DRAM address of the
- * internal CB
- */
- __u64 cb_handle;
+ union {
+ /* For external queue, this represents a Handle of CB on the
+ * Host.
+ * For internal queue in Goya, this represents an SRAM or
+ * a DRAM address of the internal CB. In Gaudi, this might also
+ * represent a mapped host address of the CB.
+ *
+ * A mapped host address is in the device address space, after
+ * a host address was mapped by the device MMU.
+ */
+ __u64 cb_handle;
+
+ /* Relevant only when HL_CS_FLAGS_WAIT is set.
+ * This holds address of array of u64 values that contain
+ * signal CS sequence numbers. The wait described by this job
+ * will listen on all those signals (wait event per signal)
+ */
+ __u64 signal_seq_arr;
+ };
+
/* Index of queue to put the CB on */
__u32 queue_index;
- /*
- * Size of command buffer with valid packets
- * Can be smaller then actual CB size
- */
- __u32 cb_size;
+
+ union {
+ /*
+ * Size of command buffer with valid packets
+ * Can be smaller then actual CB size
+ */
+ __u32 cb_size;
+
+ /* Relevant only when HL_CS_FLAGS_WAIT is set.
+ * Number of entries in signal_seq_arr
+ */
+ __u32 num_signal_seq_arr;
+ };
+
/* HL_CS_CHUNK_FLAGS_* */
__u32 cs_chunk_flags;
+
/* Align structure to 64 bytes */
__u32 pad[11];
};
+/* SIGNAL and WAIT flags are mutually exclusive */
#define HL_CS_FLAGS_FORCE_RESTORE 0x1
+#define HL_CS_FLAGS_SIGNAL 0x2
+#define HL_CS_FLAGS_WAIT 0x4
#define HL_CS_STATUS_SUCCESS 0
#define HL_MAX_JOBS_PER_CS 512
struct hl_cs_in {
+
/* this holds address of array of hl_cs_chunk for restore phase */
__u64 chunks_restore;
- /* this holds address of array of hl_cs_chunk for execution phase */
+
+ /* holds address of array of hl_cs_chunk for execution phase */
__u64 chunks_execute;
+
/* this holds address of array of hl_cs_chunk for store phase -
* Currently not in use
*/
__u64 chunks_store;
+
/* Number of chunks in restore phase array. Maximum number is
* HL_MAX_JOBS_PER_CS
*/
__u32 num_chunks_restore;
+
/* Number of chunks in execution array. Maximum number is
* HL_MAX_JOBS_PER_CS
*/
__u32 num_chunks_execute;
+
/* Number of chunks in restore phase array - Currently not in use */
__u32 num_chunks_store;
+
/* HL_CS_FLAGS_* */
__u32 cs_flags;
+
/* Context ID - Currently not in use */
__u32 ctx_id;
};
@@ -588,8 +793,8 @@ struct hl_debug_args {
* For jobs on external queues, the user needs to create command buffers
* through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on
* internal queues, the user needs to prepare a "command buffer" with packets
- * on either the SRAM or DRAM, and give the device address of that buffer to
- * the CS ioctl.
+ * on either the device SRAM/DRAM or the host, and give the device address of
+ * that buffer to the CS ioctl.
*
* This IOCTL is asynchronous in regard to the actual execution of the CS. This
* means it returns immediately after ALL the JOBS were enqueued on their
@@ -601,7 +806,7 @@ struct hl_debug_args {
* external JOBS have been completed. Note that if the CS has internal JOBS
* which can execute AFTER the external JOBS have finished, the driver might
* report that the CS has finished executing BEFORE the internal JOBS have
- * actually finish executing.
+ * actually finished executing.
*
* Even though the sequence number increments per CS, the user can NOT
* automatically assume that if CS with sequence number N finished, then CS
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index 47ffe3208c27..4b48fbf7d343 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -104,6 +104,7 @@ struct mtd_write_req {
#define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */
#define MTD_NO_ERASE 0x1000 /* No erase necessary */
#define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */
+#define MTD_SLC_ON_MLC_EMULATION 0x4000 /* Emulate SLC behavior on MLC NANDs */
/* Some common devices / combinations of capabilities */
#define MTD_CAP_ROM 0
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index 01ac5853d9ac..d95ef9a2b032 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -6,7 +6,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 - 2018 Intel Corporation.
+ * Copyright(c) 2015 - 2020 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -109,6 +109,7 @@
#define HFI1_CAP_OPFN (1UL << 16) /* Enable the OPFN protocol */
#define HFI1_CAP_SDMA_HEAD_CHECK (1UL << 17) /* SDMA head checking */
#define HFI1_CAP_EARLY_CREDIT_RETURN (1UL << 18) /* early credit return */
+#define HFI1_CAP_AIP (1UL << 19) /* Enable accelerated IP */
#define HFI1_RCVHDR_ENTSIZE_2 (1UL << 0)
#define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1)
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index d4ddbe4e696c..4961d5e858eb 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -95,6 +95,7 @@ enum uverbs_attrs_create_cq_cmd_attr_ids {
UVERBS_ATTR_CREATE_CQ_COMP_VECTOR,
UVERBS_ATTR_CREATE_CQ_FLAGS,
UVERBS_ATTR_CREATE_CQ_RESP_CQE,
+ UVERBS_ATTR_CREATE_CQ_EVENT_FD,
};
enum uverbs_attrs_destroy_cq_cmd_attr_ids {
@@ -120,11 +121,91 @@ enum uverbs_attrs_destroy_flow_action_esp {
UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
};
+enum uverbs_attrs_create_qp_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_QP_HANDLE,
+ UVERBS_ATTR_CREATE_QP_XRCD_HANDLE,
+ UVERBS_ATTR_CREATE_QP_PD_HANDLE,
+ UVERBS_ATTR_CREATE_QP_SRQ_HANDLE,
+ UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE,
+ UVERBS_ATTR_CREATE_QP_USER_HANDLE,
+ UVERBS_ATTR_CREATE_QP_CAP,
+ UVERBS_ATTR_CREATE_QP_TYPE,
+ UVERBS_ATTR_CREATE_QP_FLAGS,
+ UVERBS_ATTR_CREATE_QP_SOURCE_QPN,
+ UVERBS_ATTR_CREATE_QP_EVENT_FD,
+ UVERBS_ATTR_CREATE_QP_RESP_CAP,
+ UVERBS_ATTR_CREATE_QP_RESP_QP_NUM,
+};
+
+enum uverbs_attrs_destroy_qp_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_QP_HANDLE,
+ UVERBS_ATTR_DESTROY_QP_RESP,
+};
+
+enum uverbs_methods_qp {
+ UVERBS_METHOD_QP_CREATE,
+ UVERBS_METHOD_QP_DESTROY,
+};
+
+enum uverbs_attrs_create_srq_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_SRQ_HANDLE,
+ UVERBS_ATTR_CREATE_SRQ_PD_HANDLE,
+ UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE,
+ UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_SRQ_USER_HANDLE,
+ UVERBS_ATTR_CREATE_SRQ_MAX_WR,
+ UVERBS_ATTR_CREATE_SRQ_MAX_SGE,
+ UVERBS_ATTR_CREATE_SRQ_LIMIT,
+ UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS,
+ UVERBS_ATTR_CREATE_SRQ_TYPE,
+ UVERBS_ATTR_CREATE_SRQ_EVENT_FD,
+ UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR,
+ UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE,
+ UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM,
+};
+
+enum uverbs_attrs_destroy_srq_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_SRQ_HANDLE,
+ UVERBS_ATTR_DESTROY_SRQ_RESP,
+};
+
+enum uverbs_methods_srq {
+ UVERBS_METHOD_SRQ_CREATE,
+ UVERBS_METHOD_SRQ_DESTROY,
+};
+
enum uverbs_methods_cq {
UVERBS_METHOD_CQ_CREATE,
UVERBS_METHOD_CQ_DESTROY,
};
+enum uverbs_attrs_create_wq_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_WQ_HANDLE,
+ UVERBS_ATTR_CREATE_WQ_PD_HANDLE,
+ UVERBS_ATTR_CREATE_WQ_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_WQ_USER_HANDLE,
+ UVERBS_ATTR_CREATE_WQ_TYPE,
+ UVERBS_ATTR_CREATE_WQ_EVENT_FD,
+ UVERBS_ATTR_CREATE_WQ_MAX_WR,
+ UVERBS_ATTR_CREATE_WQ_MAX_SGE,
+ UVERBS_ATTR_CREATE_WQ_FLAGS,
+ UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR,
+ UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE,
+ UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM,
+};
+
+enum uverbs_attrs_destroy_wq_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_WQ_HANDLE,
+ UVERBS_ATTR_DESTROY_WQ_RESP,
+};
+
+enum uverbs_methods_wq {
+ UVERBS_METHOD_WQ_CREATE,
+ UVERBS_METHOD_WQ_DESTROY,
+};
+
enum uverbs_methods_actions_flow_action_ops {
UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
UVERBS_METHOD_FLOW_ACTION_DESTROY,
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index a640bb814be0..5debab45ebcb 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -64,6 +64,41 @@ enum ib_uverbs_access_flags {
~(IB_UVERBS_ACCESS_OPTIONAL_FIRST - 1)
};
+enum ib_uverbs_srq_type {
+ IB_UVERBS_SRQT_BASIC,
+ IB_UVERBS_SRQT_XRC,
+ IB_UVERBS_SRQT_TM,
+};
+
+enum ib_uverbs_wq_type {
+ IB_UVERBS_WQT_RQ,
+};
+
+enum ib_uverbs_wq_flags {
+ IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
+ IB_UVERBS_WQ_FLAGS_SCATTER_FCS = 1 << 1,
+ IB_UVERBS_WQ_FLAGS_DELAY_DROP = 1 << 2,
+ IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
+};
+
+enum ib_uverbs_qp_type {
+ IB_UVERBS_QPT_RC = 2,
+ IB_UVERBS_QPT_UC,
+ IB_UVERBS_QPT_UD,
+ IB_UVERBS_QPT_RAW_PACKET = 8,
+ IB_UVERBS_QPT_XRC_INI,
+ IB_UVERBS_QPT_XRC_TGT,
+ IB_UVERBS_QPT_DRIVER = 0xFF,
+};
+
+enum ib_uverbs_qp_create_flags {
+ IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
+ IB_UVERBS_QP_CREATE_SCATTER_FCS = 1 << 8,
+ IB_UVERBS_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
+ IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
+ IB_UVERBS_QP_CREATE_SQ_SIG_ALL = 1 << 12,
+};
+
enum ib_uverbs_query_port_cap_flags {
IB_UVERBS_PCF_SM = 1 << 1,
IB_UVERBS_PCF_NOTICE_SUP = 1 << 2,
@@ -185,6 +220,14 @@ struct ib_uverbs_query_port_resp_ex {
__u8 reserved[6];
};
+struct ib_uverbs_qp_cap {
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
+};
+
enum rdma_driver_id {
RDMA_DRIVER_UNKNOWN,
RDMA_DRIVER_MLX5,
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index df1cc3641bda..27905a0268c9 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -100,6 +100,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
enum mlx5_ib_alloc_ucontext_resp_mask {
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1,
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE = 1UL << 2,
};
enum mlx5_user_cmds_supp_uhw {
@@ -322,6 +323,8 @@ struct mlx5_ib_create_qp {
__aligned_u64 sq_buf_addr;
__aligned_u64 access_key;
};
+ __u32 ece_options;
+ __u32 reserved;
};
/* RX Hash function flags */
@@ -371,7 +374,7 @@ enum mlx5_ib_create_qp_resp_mask {
struct mlx5_ib_create_qp_resp {
__u32 bfreg_index;
- __u32 reserved;
+ __u32 ece_options;
__u32 comp_mask;
__u32 tirn;
__u32 tisn;
@@ -420,12 +423,14 @@ struct mlx5_ib_burst_info {
struct mlx5_ib_modify_qp {
__u32 comp_mask;
struct mlx5_ib_burst_info burst_info;
- __u32 reserved;
+ __u32 ece_options;
};
struct mlx5_ib_modify_qp_resp {
__u32 response_length;
__u32 dctn;
+ __u32 ece_options;
+ __u32 reserved;
};
struct mlx5_ib_create_wq_resp {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 24f3388c3182..8e316ef896b5 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -241,6 +241,11 @@ enum mlx5_ib_flow_type {
MLX5_IB_FLOW_TYPE_MC_DEFAULT,
};
+enum mlx5_ib_create_flow_flags {
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS = 1 << 0,
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP = 1 << 1,
+};
+
enum mlx5_ib_create_flow_attrs {
MLX5_IB_ATTR_CREATE_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
@@ -251,6 +256,7 @@ enum mlx5_ib_create_flow_attrs {
MLX5_IB_ATTR_CREATE_FLOW_TAG,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
};
enum mlx5_ib_destoy_flow_attrs {
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index e42940a215a3..ed5a514305c1 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -164,6 +164,8 @@ struct rdma_ucm_query_route_resp {
__u32 num_paths;
__u8 port_num;
__u8 reserved[3];
+ __u32 ibdev_index;
+ __u32 reserved1;
};
struct rdma_ucm_query_addr_resp {
@@ -175,6 +177,8 @@ struct rdma_ucm_query_addr_resp {
__u16 dst_size;
struct __kernel_sockaddr_storage src_addr;
struct __kernel_sockaddr_storage dst_addr;
+ __u32 ibdev_index;
+ __u32 reserved1;
};
struct rdma_ucm_query_path_resp {
@@ -206,10 +210,16 @@ struct rdma_ucm_ud_param {
__u8 reserved[7];
};
+struct rdma_ucm_ece {
+ __u32 vendor_id;
+ __u32 attr_mod;
+};
+
struct rdma_ucm_connect {
struct rdma_ucm_conn_param conn_param;
__u32 id;
__u32 reserved;
+ struct rdma_ucm_ece ece;
};
struct rdma_ucm_listen {
@@ -222,12 +232,14 @@ struct rdma_ucm_accept {
struct rdma_ucm_conn_param conn_param;
__u32 id;
__u32 reserved;
+ struct rdma_ucm_ece ece;
};
struct rdma_ucm_reject {
__u32 id;
__u8 private_data_len;
- __u8 reserved[3];
+ __u8 reason;
+ __u8 reserved[2];
__u8 private_data[RDMA_MAX_PRIVATE_DATA];
};
@@ -287,6 +299,7 @@ struct rdma_ucm_event_resp {
struct rdma_ucm_ud_param ud;
} param;
__u32 reserved;
+ struct rdma_ucm_ece ece;
};
/* Option levels */
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
index 9eee32f5e407..a93c0decfdd5 100644
--- a/include/uapi/sound/skl-tplg-interface.h
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -18,6 +18,8 @@
*/
#define SKL_CONTROL_TYPE_BYTE_TLV 0x100
#define SKL_CONTROL_TYPE_MIC_SELECT 0x102
+#define SKL_CONTROL_TYPE_MULTI_IO_SELECT 0x103
+#define SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC 0x104
#define HDA_SST_CFG_MAX 900 /* size of copier cfg*/
#define MAX_IN_QUEUE 8
diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h
index 5995b79d6df1..d54be303090f 100644
--- a/include/uapi/sound/sof/abi.h
+++ b/include/uapi/sound/sof/abi.h
@@ -26,7 +26,7 @@
/* SOF ABI version major, minor and patch numbers */
#define SOF_ABI_MAJOR 3
-#define SOF_ABI_MINOR 13
+#define SOF_ABI_MINOR 16
#define SOF_ABI_PATCH 0
/* SOF ABI version number. Format within 32bit word is MMmmmppp */
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index 2a25cd8da503..5941e2eb1588 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -126,4 +126,12 @@
#define SOF_TKN_MUTE_LED_USE 1300
#define SOF_TKN_MUTE_LED_DIRECTION 1301
+/* ALH */
+#define SOF_TKN_INTEL_ALH_RATE 1400
+#define SOF_TKN_INTEL_ALH_CH 1401
+
+/* HDA */
+#define SOF_TKN_INTEL_HDA_RATE 1500
+#define SOF_TKN_INTEL_HDA_CH 1501
+
#endif
diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h
index f77dcbcba5a6..d7f6af50e200 100644
--- a/include/xen/arm/page.h
+++ b/include/xen/arm/page.h
@@ -3,11 +3,11 @@
#define _ASM_ARM_XEN_PAGE_H
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <linux/pfn.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
#include <xen/xen.h>
#include <xen/interface/grant_table.h>
diff --git a/init/Kconfig b/init/Kconfig
index fdb4f52609c6..49eb7a3568ec 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -8,8 +8,25 @@ config DEFCONFIG_LIST
default "/boot/config-$(shell,uname -r)"
default "arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)"
+config CC_VERSION_TEXT
+ string
+ default "$(CC_VERSION_TEXT)"
+ help
+ This is used in unclear ways:
+
+ - Re-run Kconfig when the compiler is updated
+ The 'default' property references the environment variable,
+ CC_VERSION_TEXT so it is recorded in include/config/auto.conf.cmd.
+ When the compiler is updated, Kconfig will be invoked.
+
+ - Ensure full rebuild when the compier is updated
+ include/linux/kconfig.h contains this option in the comment line so
+ fixdep adds include/config/cc/version/text.h into the auto-generated
+ dependency. When the compiler is updated, syncconfig will touch it
+ and then every file will be rebuilt.
+
config CC_IS_GCC
- def_bool $(success,$(CC) --version | head -n 1 | grep -q gcc)
+ def_bool $(success,echo "$(CC_VERSION_TEXT)" | grep -q gcc)
config GCC_VERSION
int
@@ -21,7 +38,7 @@ config LD_VERSION
default $(shell,$(LD) --version | $(srctree)/scripts/ld-version.sh)
config CC_IS_CLANG
- def_bool $(success,$(CC) --version | head -n 1 | grep -q clang)
+ def_bool $(success,echo "$(CC_VERSION_TEXT)" | grep -q clang)
config LD_IS_LLD
def_bool $(success,$(LD) -v | head -n 1 | grep -q LLD)
@@ -31,11 +48,22 @@ config CLANG_VERSION
default $(shell,$(srctree)/scripts/clang-version.sh $(CC))
config CC_CAN_LINK
- def_bool $(success,$(srctree)/scripts/cc-can-link.sh $(CC))
+ bool
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m64-flag)) if 64BIT
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m32-flag))
+
+config CC_CAN_LINK_STATIC
+ bool
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m64-flag)) if 64BIT
+ default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m32-flag))
config CC_HAS_ASM_GOTO
def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
+config CC_HAS_ASM_GOTO_OUTPUT
+ depends on CC_HAS_ASM_GOTO
+ def_bool $(success,echo 'int foo(int x) { asm goto ("": "=r"(x) ::: bar); return x; bar: return 0; }' | $(CC) -x c - -c -o /dev/null)
+
config TOOLS_SUPPORT_RELR
def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)
@@ -260,6 +288,16 @@ config KERNEL_UNCOMPRESSED
endchoice
+config DEFAULT_INIT
+ string "Default init path"
+ default ""
+ help
+ This option determines the default init for the system if no init=
+ option is passed on the kernel command line. If the requested path is
+ not present, we will still then move on to attempting further
+ locations (e.g. /sbin/init, etc). If this is empty, we will just use
+ the fallback list when init= is not passed.
+
config DEFAULT_HOSTNAME
string "Default hostname"
default "(none)"
@@ -1255,7 +1293,6 @@ config LD_DEAD_CODE_DATA_ELIMINATION
bool "Dead code and data elimination (EXPERIMENTAL)"
depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
depends on EXPERT
- depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
depends on $(cc-option,-ffunction-sections -fdata-sections)
depends on $(ld-option,--gc-sections)
help
diff --git a/init/Makefile b/init/Makefile
index d45e967483b2..57499b1ff471 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -35,4 +35,4 @@ include/generated/compile.h: FORCE
@$($(quiet)chk_compile.h)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" \
- "$(CONFIG_PREEMPT_RT)" "$(CC)" "$(LD)"
+ "$(CONFIG_PREEMPT_RT)" $(CONFIG_CC_VERSION_TEXT) "$(LD)"
diff --git a/init/init_task.c b/init/init_task.c
index 15303d58d9db..15089d15010a 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -13,7 +13,6 @@
#include <linux/numa.h>
#include <linux/scs.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
static struct signal_struct init_signals = {
@@ -175,6 +174,16 @@ struct task_struct init_task
#ifdef CONFIG_KASAN
.kasan_depth = 1,
#endif
+#ifdef CONFIG_KCSAN
+ .kcsan_ctx = {
+ .disable_count = 0,
+ .atomic_next = 0,
+ .atomic_nest_count = 0,
+ .in_flat_atomic = false,
+ .access_mask = 0,
+ .scoped_accesses = {LIST_POISON1, NULL},
+ },
+#endif
#ifdef CONFIG_TRACE_IRQFLAGS
.softirqs_enabled = 1,
#endif
diff --git a/init/main.c b/init/main.c
index df32f67214d2..0ead83e86b5a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -95,6 +95,7 @@
#include <linux/rodata_test.h>
#include <linux/jump_label.h>
#include <linux/mem_encrypt.h>
+#include <linux/kcsan.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -1036,6 +1037,7 @@ asmlinkage __visible void __init start_kernel(void)
acpi_subsystem_init();
arch_post_acpi_subsys_init();
sfi_init_late();
+ kcsan_init();
/* Do the rest non-__init'ed, we're now alive */
arch_call_rest_init();
@@ -1412,6 +1414,8 @@ static int __ref kernel_init(void *unused)
rcu_end_inkernel_boot();
+ do_sysctl_args();
+
if (ramdisk_execute_command) {
ret = run_init_process(ramdisk_execute_command);
if (!ret)
@@ -1433,6 +1437,16 @@ static int __ref kernel_init(void *unused)
panic("Requested init %s failed (error %d).",
execute_command, ret);
}
+
+ if (CONFIG_DEFAULT_INIT[0] != '\0') {
+ ret = run_init_process(CONFIG_DEFAULT_INIT);
+ if (ret)
+ pr_err("Default init %s failed (error %d)\n",
+ CONFIG_DEFAULT_INIT, ret);
+ else
+ return 0;
+ }
+
if (!try_to_run_init_process("/sbin/init") ||
!try_to_run_init_process("/etc/init") ||
!try_to_run_init_process("/bin/init") ||
diff --git a/ipc/msg.c b/ipc/msg.c
index caca67368cb5..acd1bc7af55a 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -268,6 +268,8 @@ static void expunge_all(struct msg_queue *msq, int res,
* before freeque() is called. msg_ids.rwsem remains locked on exit.
*/
static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
+ __releases(RCU)
+ __releases(&msq->q_perm)
{
struct msg_msg *msg, *t;
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
diff --git a/ipc/namespace.c b/ipc/namespace.c
index fdc3b5f3f53a..24e7b45320f7 100644
--- a/ipc/namespace.c
+++ b/ipc/namespace.c
@@ -117,6 +117,10 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
static void free_ipc_ns(struct ipc_namespace *ns)
{
+ /* mq_put_mnt() waits for a grace period as kern_unmount()
+ * uses synchronize_rcu().
+ */
+ mq_put_mnt(ns);
sem_exit_ns(ns);
msg_exit_ns(ns);
shm_exit_ns(ns);
@@ -127,6 +131,21 @@ static void free_ipc_ns(struct ipc_namespace *ns)
kfree(ns);
}
+static LLIST_HEAD(free_ipc_list);
+static void free_ipc(struct work_struct *unused)
+{
+ struct llist_node *node = llist_del_all(&free_ipc_list);
+ struct ipc_namespace *n, *t;
+
+ llist_for_each_entry_safe(n, t, node, mnt_llist)
+ free_ipc_ns(n);
+}
+
+/*
+ * The work queue is used to avoid the cost of synchronize_rcu in kern_unmount.
+ */
+static DECLARE_WORK(free_ipc_work, free_ipc);
+
/*
* put_ipc_ns - drop a reference to an ipc namespace.
* @ns: the namespace to put
@@ -148,8 +167,9 @@ void put_ipc_ns(struct ipc_namespace *ns)
if (refcount_dec_and_lock(&ns->count, &mq_lock)) {
mq_clear_sbinfo(ns);
spin_unlock(&mq_lock);
- mq_put_mnt(ns);
- free_ipc_ns(ns);
+
+ if (llist_add(&ns->mnt_llist, &free_ipc_list))
+ schedule_work(&free_ipc_work);
}
}
diff --git a/ipc/shm.c b/ipc/shm.c
index 0ba6add05b35..0a6dd94afa21 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1544,7 +1544,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
if (err)
goto out_fput;
- if (down_write_killable(&current->mm->mmap_sem)) {
+ if (mmap_write_lock_killable(current->mm)) {
err = -EINTR;
goto out_fput;
}
@@ -1564,7 +1564,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
if (IS_ERR_VALUE(addr))
err = (long)addr;
invalid:
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
if (populate)
mm_populate(addr, populate);
@@ -1638,7 +1638,7 @@ long ksys_shmdt(char __user *shmaddr)
if (addr & ~PAGE_MASK)
return retval;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
/*
@@ -1726,7 +1726,7 @@ long ksys_shmdt(char __user *shmaddr)
#endif
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return retval;
}
diff --git a/kernel/Makefile b/kernel/Makefile
index c332eb9d4841..ce8716a04d0e 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -23,6 +23,9 @@ endif
# Prevents flicker of uninteresting __do_softirq()/__local_bh_disable_ip()
# in coverage traces.
KCOV_INSTRUMENT_softirq.o := n
+# Avoid KCSAN instrumentation in softirq ("No shared variables, all the data
+# are CPU local" => assume no data races), to reduce overhead in interrupts.
+KCSAN_SANITIZE_softirq.o = n
# These are called from save_stack_trace() on slub debug path,
# and produce insane amounts of uninteresting coverage.
KCOV_INSTRUMENT_module.o := n
@@ -31,6 +34,7 @@ KCOV_INSTRUMENT_stacktrace.o := n
# Don't self-instrument.
KCOV_INSTRUMENT_kcov.o := n
KASAN_SANITIZE_kcov.o := n
+KCSAN_SANITIZE_kcov.o := n
CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
# cond_syscall is currently not LTO compatible
@@ -103,6 +107,7 @@ obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_BPF) += bpf/
+obj-$(CONFIG_KCSAN) += kcsan/
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_PERF_EVENTS) += events/
@@ -121,6 +126,7 @@ obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o
obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
KASAN_SANITIZE_stackleak.o := n
+KCSAN_SANITIZE_stackleak.o := n
KCOV_INSTRUMENT_stackleak.o := n
$(obj)/configs.o: $(obj)/config_data.gz
diff --git a/kernel/acct.c b/kernel/acct.c
index 11ff4a596d6b..b0c5b3a9f5af 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -40,7 +40,7 @@
* is one more bug... 10/11/98, AV.
*
* Oh, fsck... Oopsable SMP race in do_process_acct() - we must hold
- * ->mmap_sem to walk the vma list of current->mm. Nasty, since it leaks
+ * ->mmap_lock to walk the vma list of current->mm. Nasty, since it leaks
* a struct file opened for write. Fixed. 2/6/2000, AV.
*/
@@ -541,13 +541,13 @@ void acct_collect(long exitcode, int group_dead)
if (group_dead && current->mm) {
struct vm_area_struct *vma;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = current->mm->mmap;
while (vma) {
vsize += vma->vm_end - vma->vm_start;
vma = vma->vm_next;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
spin_lock_irq(&current->sighand->siglock);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 7b8381ce40a0..599488f25e40 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -33,7 +33,7 @@ struct bpf_stack_map {
/* irq_work to run up_read() for build_id lookup in nmi context */
struct stack_map_irq_work {
struct irq_work irq_work;
- struct rw_semaphore *sem;
+ struct mm_struct *mm;
};
static void do_up_read(struct irq_work *entry)
@@ -44,8 +44,7 @@ static void do_up_read(struct irq_work *entry)
return;
work = container_of(entry, struct stack_map_irq_work, irq_work);
- up_read_non_owner(work->sem);
- work->sem = NULL;
+ mmap_read_unlock_non_owner(work->mm);
}
static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
@@ -317,7 +316,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
* with build_id.
*/
if (!user || !current || !current->mm || irq_work_busy ||
- down_read_trylock(&current->mm->mmap_sem) == 0) {
+ !mmap_read_trylock_non_owner(current->mm)) {
/* cannot access current->mm, fall back to ips */
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
@@ -342,16 +341,10 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
}
if (!work) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock_non_owner(current->mm);
} else {
- work->sem = &current->mm->mmap_sem;
+ work->mm = current->mm;
irq_work_queue(&work->irq_work);
- /*
- * The irq_work will release the mmap_sem with
- * up_read_non_owner(). The rwsem_release() is called
- * here to release the lock from lockdep's perspective.
- */
- rwsem_release(&current->mm->mmap_sem.dep_map, _RET_IP_);
}
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4d530b1d5683..9693730833d2 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -25,7 +25,7 @@
#include <linux/nospec.h>
#include <linux/audit.h>
#include <uapi/linux/btf.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/bpf_lsm.h>
#include <linux/poll.h>
#include <linux/bpf-netns.h>
@@ -74,32 +74,19 @@ int bpf_check_uarg_tail_zero(void __user *uaddr,
size_t expected_size,
size_t actual_size)
{
- unsigned char __user *addr;
- unsigned char __user *end;
- unsigned char val;
- int err;
+ unsigned char __user *addr = uaddr + expected_size;
+ int res;
if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
return -E2BIG;
- if (unlikely(!access_ok(uaddr, actual_size)))
- return -EFAULT;
-
if (actual_size <= expected_size)
return 0;
- addr = uaddr + expected_size;
- end = uaddr + actual_size;
-
- for (; addr < end; addr++) {
- err = get_user(val, addr);
- if (err)
- return err;
- if (val)
- return -E2BIG;
- }
-
- return 0;
+ res = check_zeroed_user(addr, actual_size - expected_size);
+ if (res < 0)
+ return res;
+ return res ? 0 : -E2BIG;
}
const struct bpf_map_ops bpf_map_offload_ops = {
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 557a9b9d2244..1ea181a58465 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -153,11 +153,7 @@ static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
static DEFINE_PER_CPU(struct cgroup_rstat_cpu, cgrp_dfl_root_rstat_cpu);
-/*
- * The default hierarchy, reserved for the subsystems that are otherwise
- * unattached - it never has more than a single cgroup, and all tasks are
- * part of that cgroup.
- */
+/* the default hierarchy */
struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu };
EXPORT_SYMBOL_GPL(cgrp_dfl_root);
@@ -251,9 +247,6 @@ bool cgroup_ssid_enabled(int ssid)
* cases where a subsystem should behave differnetly depending on the
* interface version.
*
- * The set of behaviors which change on the default hierarchy are still
- * being determined and the mount option is prefixed with __DEVEL__.
- *
* List of changed behaviors:
*
* - Mount options "noprefix", "xattr", "clone_children", "release_agent"
@@ -4881,7 +4874,6 @@ static struct cftype cgroup_base_files[] = {
},
{
.name = "cpu.stat",
- .flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cpu_stat_show,
},
#ifdef CONFIG_PSI
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 729d3a5c772e..642415b8c3c9 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1655,7 +1655,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
guarantee_online_mems(cs, &newmems);
/*
- * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
+ * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
* take while holding tasklist_lock. Forks can happen - the
* mpol_dup() cpuset_being_rebound check will catch such forks,
* and rebind their vma mempolicies too. Because we still hold
@@ -1760,7 +1760,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
*
* Call with cpuset_mutex held. May take callback_lock during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
- * lock each such tasks mm->mmap_sem, scan its vma's and rebind
+ * lock each such tasks mm->mmap_lock, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
*/
static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 41ca996568df..b6397a186ce9 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -389,18 +389,62 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp,
cgroup_base_stat_cputime_account_end(cgrp, rstatc);
}
+/*
+ * compute the cputime for the root cgroup by getting the per cpu data
+ * at a global level, then categorizing the fields in a manner consistent
+ * with how it is done by __cgroup_account_cputime_field for each bit of
+ * cpu time attributed to a cgroup.
+ */
+static void root_cgroup_cputime(struct task_cputime *cputime)
+{
+ int i;
+
+ cputime->stime = 0;
+ cputime->utime = 0;
+ cputime->sum_exec_runtime = 0;
+ for_each_possible_cpu(i) {
+ struct kernel_cpustat kcpustat;
+ u64 *cpustat = kcpustat.cpustat;
+ u64 user = 0;
+ u64 sys = 0;
+
+ kcpustat_cpu_fetch(&kcpustat, i);
+
+ user += cpustat[CPUTIME_USER];
+ user += cpustat[CPUTIME_NICE];
+ cputime->utime += user;
+
+ sys += cpustat[CPUTIME_SYSTEM];
+ sys += cpustat[CPUTIME_IRQ];
+ sys += cpustat[CPUTIME_SOFTIRQ];
+ cputime->stime += sys;
+
+ cputime->sum_exec_runtime += user;
+ cputime->sum_exec_runtime += sys;
+ cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
+ cputime->sum_exec_runtime += cpustat[CPUTIME_GUEST];
+ cputime->sum_exec_runtime += cpustat[CPUTIME_GUEST_NICE];
+ }
+}
+
void cgroup_base_stat_cputime_show(struct seq_file *seq)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
u64 usage, utime, stime;
-
- if (!cgroup_parent(cgrp))
- return;
-
- cgroup_rstat_flush_hold(cgrp);
- usage = cgrp->bstat.cputime.sum_exec_runtime;
- cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime);
- cgroup_rstat_flush_release();
+ struct task_cputime cputime;
+
+ if (cgroup_parent(cgrp)) {
+ cgroup_rstat_flush_hold(cgrp);
+ usage = cgrp->bstat.cputime.sum_exec_runtime;
+ cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
+ &utime, &stime);
+ cgroup_rstat_flush_release();
+ } else {
+ root_cgroup_cputime(&cputime);
+ usage = cputime.sum_exec_runtime;
+ utime = cputime.utime;
+ stime = cputime.stime;
+ }
do_div(usage, NSEC_PER_USEC);
do_div(utime, NSEC_PER_USEC);
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index cbca6879ab7d..44a259338e33 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
*/
int cpu_pm_enter(void)
{
- int nr_calls;
+ int nr_calls = 0;
int ret = 0;
ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
@@ -131,7 +131,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
*/
int cpu_cluster_pm_enter(void)
{
- int nr_calls;
+ int nr_calls = 0;
int ret = 0;
ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
diff --git a/kernel/cred.c b/kernel/cred.c
index 71a792616917..421b1149c651 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -315,6 +315,9 @@ struct cred *prepare_exec_creds(void)
new->process_keyring = NULL;
#endif
+ new->suid = new->fsuid = new->euid;
+ new->sgid = new->fsgid = new->egid;
+
return new;
}
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index ef94e906f05a..ccc0f98abdd4 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -415,6 +415,18 @@ int kgdb_isremovedbreak(unsigned long addr)
return 0;
}
+int kgdb_has_hit_break(unsigned long addr)
+{
+ int i;
+
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state == BP_ACTIVE &&
+ kgdb_break[i].bpt_addr == addr)
+ return 1;
+ }
+ return 0;
+}
+
int dbg_remove_all_break(void)
{
int error;
@@ -923,7 +935,7 @@ static void sysrq_handle_dbg(int key)
kgdb_breakpoint();
}
-static struct sysrq_key_op sysrq_dbg_op = {
+static const struct sysrq_key_op sysrq_dbg_op = {
.handler = sysrq_handle_dbg,
.help_msg = "debug(g)",
.action_msg = "DEBUG",
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
index 3de0cc780c16..18e03aba2cfc 100644
--- a/kernel/debug/kdb/kdb_bt.c
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -21,17 +21,18 @@
static void kdb_show_stack(struct task_struct *p, void *addr)
{
- int old_lvl = console_loglevel;
-
- console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
kdb_trap_printk++;
- if (!addr && kdb_task_has_cpu(p))
+ if (!addr && kdb_task_has_cpu(p)) {
+ int old_lvl = console_loglevel;
+
+ console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
kdb_dump_stack_on_cpu(kdb_process_cpu(p));
- else
- show_stack(p, addr);
+ console_loglevel = old_lvl;
+ } else {
+ show_stack(p, addr, KERN_EMERG);
+ }
- console_loglevel = old_lvl;
kdb_trap_printk--;
}
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 4c103a24e380..d006668c0027 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -79,10 +79,14 @@ config DMA_REMAP
select DMA_NONCOHERENT_MMAP
bool
-config DMA_DIRECT_REMAP
+config DMA_COHERENT_POOL
bool
select DMA_REMAP
+config DMA_DIRECT_REMAP
+ bool
+ select DMA_COHERENT_POOL
+
config DMA_CMA
bool "DMA Contiguous Memory Allocator"
depends on HAVE_DMA_CONTIGUOUS && CMA
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index d237cf3dc181..370f63344e9c 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
+obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o
obj-$(CONFIG_DMA_REMAP) += remap.o
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 8bc6f2d670f9..15bc5026c485 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -222,8 +222,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
* @gfp: Allocation flags.
*
* This function allocates contiguous memory buffer for specified device. It
- * first tries to use device specific contiguous memory area if available or
- * the default global one, then tries a fallback allocation of normal pages.
+ * tries to use device specific contiguous memory area if available, or the
+ * default global one.
*
* Note that it byapss one-page size of allocations from the global area as
* the addresses within one page are always contiguous, so there is no need
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 9e1777c81f55..36c962a86bf2 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -656,7 +656,7 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
return entry;
}
-void __dma_entry_alloc_check_leak(void)
+static void __dma_entry_alloc_check_leak(void)
{
u32 tmp = nr_total_entries % nr_prealloc_entries;
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 8f4bbdaf965e..0a4881e59aa7 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -45,8 +45,8 @@ u64 dma_direct_get_required_mask(struct device *dev)
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
}
-static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
- u64 *phys_limit)
+gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
+ u64 *phys_limit)
{
u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
@@ -76,6 +76,39 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}
+/*
+ * Decrypting memory is allowed to block, so if this device requires
+ * unencrypted memory it must come from atomic pools.
+ */
+static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp,
+ unsigned long attrs)
+{
+ if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
+ return false;
+ if (gfpflags_allow_blocking(gfp))
+ return false;
+ if (force_dma_unencrypted(dev))
+ return true;
+ if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
+ return false;
+ if (dma_alloc_need_uncached(dev, attrs))
+ return true;
+ return false;
+}
+
+static inline bool dma_should_free_from_pool(struct device *dev,
+ unsigned long attrs)
+{
+ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
+ return true;
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
+ !force_dma_unencrypted(dev))
+ return false;
+ if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
+ return true;
+ return false;
+}
+
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp, unsigned long attrs)
{
@@ -89,8 +122,8 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
/* we always manually zero the memory once we are done: */
gfp &= ~__GFP_ZERO;
- gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
- &phys_limit);
+ gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
+ &phys_limit);
page = dma_alloc_contiguous(dev, alloc_size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, alloc_size);
@@ -125,10 +158,8 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page;
void *ret;
- if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- dma_alloc_need_uncached(dev, attrs) &&
- !gfpflags_allow_blocking(gfp)) {
- ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+ if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
+ ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
if (!ret)
return NULL;
goto done;
@@ -204,6 +235,11 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{
unsigned int page_order = get_order(size);
+ /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
+ if (dma_should_free_from_pool(dev, attrs) &&
+ dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
+ return;
+
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
@@ -211,10 +247,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
return;
}
- if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
- return;
-
if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
new file mode 100644
index 000000000000..35bb51c31fff
--- /dev/null
+++ b/kernel/dma/pool.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2020 Google LLC
+ */
+#include <linux/debugfs.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/dma-contiguous.h>
+#include <linux/init.h>
+#include <linux/genalloc.h>
+#include <linux/set_memory.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+static struct gen_pool *atomic_pool_dma __ro_after_init;
+static unsigned long pool_size_dma;
+static struct gen_pool *atomic_pool_dma32 __ro_after_init;
+static unsigned long pool_size_dma32;
+static struct gen_pool *atomic_pool_kernel __ro_after_init;
+static unsigned long pool_size_kernel;
+
+/* Size can be defined by the coherent_pool command line */
+static size_t atomic_pool_size;
+
+/* Dynamic background expansion when the atomic pool is near capacity */
+static struct work_struct atomic_pool_work;
+
+static int __init early_coherent_pool(char *p)
+{
+ atomic_pool_size = memparse(p, &p);
+ return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+static void __init dma_atomic_pool_debugfs_init(void)
+{
+ struct dentry *root;
+
+ root = debugfs_create_dir("dma_pools", NULL);
+ if (IS_ERR_OR_NULL(root))
+ return;
+
+ debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
+ debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
+ debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
+}
+
+static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
+{
+ if (gfp & __GFP_DMA)
+ pool_size_dma += size;
+ else if (gfp & __GFP_DMA32)
+ pool_size_dma32 += size;
+ else
+ pool_size_kernel += size;
+}
+
+static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
+ gfp_t gfp)
+{
+ unsigned int order;
+ struct page *page;
+ void *addr;
+ int ret = -ENOMEM;
+
+ /* Cannot allocate larger than MAX_ORDER-1 */
+ order = min(get_order(pool_size), MAX_ORDER-1);
+
+ do {
+ pool_size = 1 << (PAGE_SHIFT + order);
+
+ if (dev_get_cma_area(NULL))
+ page = dma_alloc_from_contiguous(NULL, 1 << order,
+ order, false);
+ else
+ page = alloc_pages(gfp, order);
+ } while (!page && order-- > 0);
+ if (!page)
+ goto out;
+
+ arch_dma_prep_coherent(page, pool_size);
+
+#ifdef CONFIG_DMA_DIRECT_REMAP
+ addr = dma_common_contiguous_remap(page, pool_size,
+ pgprot_dmacoherent(PAGE_KERNEL),
+ __builtin_return_address(0));
+ if (!addr)
+ goto free_page;
+#else
+ addr = page_to_virt(page);
+#endif
+ /*
+ * Memory in the atomic DMA pools must be unencrypted, the pools do not
+ * shrink so no re-encryption occurs in dma_direct_free_pages().
+ */
+ ret = set_memory_decrypted((unsigned long)page_to_virt(page),
+ 1 << order);
+ if (ret)
+ goto remove_mapping;
+ ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
+ pool_size, NUMA_NO_NODE);
+ if (ret)
+ goto encrypt_mapping;
+
+ dma_atomic_pool_size_add(gfp, pool_size);
+ return 0;
+
+encrypt_mapping:
+ ret = set_memory_encrypted((unsigned long)page_to_virt(page),
+ 1 << order);
+ if (WARN_ON_ONCE(ret)) {
+ /* Decrypt succeeded but encrypt failed, purposely leak */
+ goto out;
+ }
+remove_mapping:
+#ifdef CONFIG_DMA_DIRECT_REMAP
+ dma_common_free_remap(addr, pool_size);
+#endif
+free_page: __maybe_unused
+ if (!dma_release_from_contiguous(NULL, page, 1 << order))
+ __free_pages(page, order);
+out:
+ return ret;
+}
+
+static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
+{
+ if (pool && gen_pool_avail(pool) < atomic_pool_size)
+ atomic_pool_expand(pool, gen_pool_size(pool), gfp);
+}
+
+static void atomic_pool_work_fn(struct work_struct *work)
+{
+ if (IS_ENABLED(CONFIG_ZONE_DMA))
+ atomic_pool_resize(atomic_pool_dma,
+ GFP_KERNEL | GFP_DMA);
+ if (IS_ENABLED(CONFIG_ZONE_DMA32))
+ atomic_pool_resize(atomic_pool_dma32,
+ GFP_KERNEL | GFP_DMA32);
+ atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
+}
+
+static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
+ gfp_t gfp)
+{
+ struct gen_pool *pool;
+ int ret;
+
+ pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
+ if (!pool)
+ return NULL;
+
+ gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
+
+ ret = atomic_pool_expand(pool, pool_size, gfp);
+ if (ret) {
+ gen_pool_destroy(pool);
+ pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
+ pool_size >> 10, &gfp);
+ return NULL;
+ }
+
+ pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
+ gen_pool_size(pool) >> 10, &gfp);
+ return pool;
+}
+
+static int __init dma_atomic_pool_init(void)
+{
+ int ret = 0;
+
+ /*
+ * If coherent_pool was not used on the command line, default the pool
+ * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
+ */
+ if (!atomic_pool_size) {
+ atomic_pool_size = max(totalram_pages() >> PAGE_SHIFT, 1UL) *
+ SZ_128K;
+ atomic_pool_size = min_t(size_t, atomic_pool_size,
+ 1 << (PAGE_SHIFT + MAX_ORDER-1));
+ }
+ INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
+
+ atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
+ GFP_KERNEL);
+ if (!atomic_pool_kernel)
+ ret = -ENOMEM;
+ if (IS_ENABLED(CONFIG_ZONE_DMA)) {
+ atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
+ GFP_KERNEL | GFP_DMA);
+ if (!atomic_pool_dma)
+ ret = -ENOMEM;
+ }
+ if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
+ atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
+ GFP_KERNEL | GFP_DMA32);
+ if (!atomic_pool_dma32)
+ ret = -ENOMEM;
+ }
+
+ dma_atomic_pool_debugfs_init();
+ return ret;
+}
+postcore_initcall(dma_atomic_pool_init);
+
+static inline struct gen_pool *dev_to_pool(struct device *dev)
+{
+ u64 phys_mask;
+ gfp_t gfp;
+
+ gfp = dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
+ &phys_mask);
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && gfp == GFP_DMA)
+ return atomic_pool_dma;
+ if (IS_ENABLED(CONFIG_ZONE_DMA32) && gfp == GFP_DMA32)
+ return atomic_pool_dma32;
+ return atomic_pool_kernel;
+}
+
+static bool dma_in_atomic_pool(struct device *dev, void *start, size_t size)
+{
+ struct gen_pool *pool = dev_to_pool(dev);
+
+ if (unlikely(!pool))
+ return false;
+ return gen_pool_has_addr(pool, (unsigned long)start, size);
+}
+
+void *dma_alloc_from_pool(struct device *dev, size_t size,
+ struct page **ret_page, gfp_t flags)
+{
+ struct gen_pool *pool = dev_to_pool(dev);
+ unsigned long val;
+ void *ptr = NULL;
+
+ if (!pool) {
+ WARN(1, "%pGg atomic pool not initialised!\n", &flags);
+ return NULL;
+ }
+
+ val = gen_pool_alloc(pool, size);
+ if (val) {
+ phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
+
+ *ret_page = pfn_to_page(__phys_to_pfn(phys));
+ ptr = (void *)val;
+ memset(ptr, 0, size);
+ }
+ if (gen_pool_avail(pool) < atomic_pool_size)
+ schedule_work(&atomic_pool_work);
+
+ return ptr;
+}
+
+bool dma_free_from_pool(struct device *dev, void *start, size_t size)
+{
+ struct gen_pool *pool = dev_to_pool(dev);
+
+ if (!dma_in_atomic_pool(dev, start, size))
+ return false;
+ gen_pool_free(pool, (unsigned long)start, size);
+ return true;
+}
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index 914ff5a58dd5..e739a6eea6e7 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -1,13 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (C) 2012 ARM Ltd.
* Copyright (c) 2014 The Linux Foundation
*/
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/dma-contiguous.h>
-#include <linux/init.h>
-#include <linux/genalloc.h>
+#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -73,117 +68,3 @@ void dma_common_free_remap(void *cpu_addr, size_t size)
unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
vunmap(cpu_addr);
}
-
-#ifdef CONFIG_DMA_DIRECT_REMAP
-static struct gen_pool *atomic_pool __ro_after_init;
-
-#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
-static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
-
-static int __init early_coherent_pool(char *p)
-{
- atomic_pool_size = memparse(p, &p);
- return 0;
-}
-early_param("coherent_pool", early_coherent_pool);
-
-static gfp_t dma_atomic_pool_gfp(void)
-{
- if (IS_ENABLED(CONFIG_ZONE_DMA))
- return GFP_DMA;
- if (IS_ENABLED(CONFIG_ZONE_DMA32))
- return GFP_DMA32;
- return GFP_KERNEL;
-}
-
-static int __init dma_atomic_pool_init(void)
-{
- unsigned int pool_size_order = get_order(atomic_pool_size);
- unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
- struct page *page;
- void *addr;
- int ret;
-
- if (dev_get_cma_area(NULL))
- page = dma_alloc_from_contiguous(NULL, nr_pages,
- pool_size_order, false);
- else
- page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order);
- if (!page)
- goto out;
-
- arch_dma_prep_coherent(page, atomic_pool_size);
-
- atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
- if (!atomic_pool)
- goto free_page;
-
- addr = dma_common_contiguous_remap(page, atomic_pool_size,
- pgprot_dmacoherent(PAGE_KERNEL),
- __builtin_return_address(0));
- if (!addr)
- goto destroy_genpool;
-
- ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
- page_to_phys(page), atomic_pool_size, -1);
- if (ret)
- goto remove_mapping;
- gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
-
- pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
- atomic_pool_size / 1024);
- return 0;
-
-remove_mapping:
- dma_common_free_remap(addr, atomic_pool_size);
-destroy_genpool:
- gen_pool_destroy(atomic_pool);
- atomic_pool = NULL;
-free_page:
- if (!dma_release_from_contiguous(NULL, page, nr_pages))
- __free_pages(page, pool_size_order);
-out:
- pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
- atomic_pool_size / 1024);
- return -ENOMEM;
-}
-postcore_initcall(dma_atomic_pool_init);
-
-bool dma_in_atomic_pool(void *start, size_t size)
-{
- if (unlikely(!atomic_pool))
- return false;
-
- return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
-}
-
-void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
-{
- unsigned long val;
- void *ptr = NULL;
-
- if (!atomic_pool) {
- WARN(1, "coherent pool not initialised!\n");
- return NULL;
- }
-
- val = gen_pool_alloc(atomic_pool, size);
- if (val) {
- phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
-
- *ret_page = pfn_to_page(__phys_to_pfn(phys));
- ptr = (void *)val;
- memset(ptr, 0, size);
- }
-
- return ptr;
-}
-
-bool dma_free_from_pool(void *start, size_t size)
-{
- if (!dma_in_atomic_pool(start, size))
- return false;
- gen_pool_free(atomic_pool, (unsigned long)start, size);
- return true;
-}
-#endif /* CONFIG_DMA_DIRECT_REMAP */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2e330f330303..856d98c36f56 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1316,7 +1316,7 @@ static void put_ctx(struct perf_event_context *ctx)
* perf_event::child_mutex;
* perf_event_context::lock
* perf_event::mmap_mutex
- * mmap_sem
+ * mmap_lock
* perf_addr_filters_head::lock
*
* cpu_hotplug_lock
@@ -3080,7 +3080,7 @@ static int perf_event_stop(struct perf_event *event, int restart)
* pre-existing mappings, called once when new filters arrive via SET_FILTER
* ioctl;
* (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
- * registered mapping, called for every new mmap(), with mm::mmap_sem down
+ * registered mapping, called for every new mmap(), with mm::mmap_lock down
* for reading;
* (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
* of exec.
@@ -6934,12 +6934,12 @@ static u64 perf_virt_to_phys(u64 virt)
* Walking the pages tables for user address.
* Interrupts are disabled, so it prevents any tear down
* of the page tables.
- * Try IRQ-safe __get_user_pages_fast first.
+ * Try IRQ-safe get_user_page_fast_only first.
* If failed, leave phys_addr as 0.
*/
if (current->mm != NULL) {
pagefault_disable();
- if (__get_user_pages_fast(virt, 1, 0, &p) == 1)
+ if (get_user_page_fast_only(virt, 0, &p))
phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
pagefault_enable();
}
@@ -9742,7 +9742,7 @@ static void perf_addr_filters_splice(struct perf_event *event,
/*
* Scan through mm's vmas and see if one of them matches the
* @filter; if so, adjust filter's address range.
- * Called with mm::mmap_sem down for reading.
+ * Called with mm::mmap_lock down for reading.
*/
static void perf_addr_filter_apply(struct perf_addr_filter *filter,
struct mm_struct *mm,
@@ -9784,7 +9784,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
if (!mm)
goto restart;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
raw_spin_lock_irqsave(&ifh->lock, flags);
@@ -9810,7 +9810,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
raw_spin_unlock_irqrestore(&ifh->lock, flags);
if (ifh->nr_file_filters) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
@@ -12220,7 +12220,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
* When a child task exits, feed back event values to parent events.
*
* Can be called with exec_update_mutex held when called from
- * install_exec_creds().
+ * setup_new_exec().
*/
void perf_event_exit_task(struct task_struct *child)
{
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 3cc8416ec844..b48d7039a015 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -213,6 +213,15 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
list_del(&bp->hw.bp_list);
}
+__weak int arch_reserve_bp_slot(struct perf_event *bp)
+{
+ return 0;
+}
+
+__weak void arch_release_bp_slot(struct perf_event *bp)
+{
+}
+
/*
* Function to perform processor-specific cleanup during unregistration
*/
@@ -270,6 +279,7 @@ static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
struct bp_busy_slots slots = {0};
enum bp_type_idx type;
int weight;
+ int ret;
/* We couldn't initialize breakpoint constraints on boot */
if (!constraints_initialized)
@@ -294,6 +304,10 @@ static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
if (slots.pinned + (!!slots.flexible) > nr_slots[type])
return -ENOSPC;
+ ret = arch_reserve_bp_slot(bp);
+ if (ret)
+ return ret;
+
toggle_bp_slot(bp, true, type, weight);
return 0;
@@ -317,6 +331,8 @@ static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
enum bp_type_idx type;
int weight;
+ arch_release_bp_slot(bp);
+
type = find_slot_idx(bp_type);
weight = hw_breakpoint_weight(bp);
toggle_bp_slot(bp, false, type, weight);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index eddc8db96027..bb0862873dba 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -457,7 +457,7 @@ static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
* @vaddr: the virtual address to store the opcode.
* @opcode: opcode to be written at @vaddr.
*
- * Called with mm->mmap_sem held for write.
+ * Called with mm->mmap_lock held for write.
* Return 0 (success) or a negative errno.
*/
int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
@@ -861,10 +861,6 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
if (ret)
goto out;
- /* uprobe_write_opcode() assumes we don't cross page boundary */
- BUG_ON((uprobe->offset & ~PAGE_MASK) +
- UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
-
smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
set_bit(UPROBE_COPY_INSN, &uprobe->flags);
@@ -1058,7 +1054,7 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
if (err && is_register)
goto free;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
vma = find_vma(mm, info->vaddr);
if (!vma || !valid_vma(vma, is_register) ||
file_inode(vma->vm_file) != uprobe->inode)
@@ -1080,7 +1076,7 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
}
unlock:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
free:
mmput(mm);
info = free_map_info(info);
@@ -1160,6 +1156,15 @@ static int __uprobe_register(struct inode *inode, loff_t offset,
if (offset > i_size_read(inode))
return -EINVAL;
+ /*
+ * This ensures that copy_from_page(), copy_to_page() and
+ * __update_ref_ctr() can't cross page boundary.
+ */
+ if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
+ return -EINVAL;
+ if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
+ return -EINVAL;
+
retry:
uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
if (!uprobe)
@@ -1235,7 +1240,7 @@ static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
struct vm_area_struct *vma;
int err = 0;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
unsigned long vaddr;
loff_t offset;
@@ -1252,7 +1257,7 @@ static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
vaddr = offset_to_vaddr(vma, uprobe->offset);
err |= remove_breakpoint(uprobe, mm, vaddr);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return err;
}
@@ -1349,7 +1354,7 @@ static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
}
/*
- * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
+ * Called from mmap_region/vma_adjust with mm->mmap_lock acquired.
*
* Currently we ignore all errors and always return 0, the callers
* can't handle the failure anyway.
@@ -1439,7 +1444,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
struct vm_area_struct *vma;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
if (mm->uprobes_state.xol_area) {
@@ -1469,7 +1474,7 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
/* pairs with get_xol_area() */
smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
@@ -1668,7 +1673,7 @@ void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
copy_to_page(page, vaddr, src, len);
/*
- * We probably need flush_icache_user_range() but it needs vma.
+ * We probably need flush_icache_user_page() but it needs vma.
* This should work on most of architectures by default. If
* architecture needs to do something different it can define
* its own version of the function.
@@ -2008,6 +2013,9 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
uprobe_opcode_t opcode;
int result;
+ if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
+ return -EINVAL;
+
pagefault_disable();
result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
pagefault_enable();
@@ -2039,7 +2047,7 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
struct uprobe *uprobe = NULL;
struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, bp_vaddr);
if (vma && vma->vm_start <= bp_vaddr) {
if (valid_vma(vma, false)) {
@@ -2057,7 +2065,7 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
mmf_recalc_uprobes(mm);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return uprobe;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index c300253a7b8e..727150f28103 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -66,7 +66,6 @@
#include <linux/uaccess.h>
#include <asm/unistd.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
static void __unhash_process(struct task_struct *p, bool group_dead)
@@ -441,17 +440,17 @@ static void exit_mm(void)
sync_mm_rss(mm);
/*
* Serialize with any possible pending coredump.
- * We must hold mmap_sem around checking core_state
+ * We must hold mmap_lock around checking core_state
* and clearing tsk->mm. The core-inducing thread
* will increment ->nr_threads for each thread in the
* group with ->mm != NULL.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
core_state = mm->core_state;
if (core_state) {
struct core_thread self;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
self.task = current;
self.next = xchg(&core_state->dumper.next, &self);
@@ -469,14 +468,14 @@ static void exit_mm(void)
freezable_schedule();
}
__set_current_state(TASK_RUNNING);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
mmgrab(mm);
BUG_ON(mm != current->active_mm);
/* more a memory barrier than a real lock */
task_lock(current);
current->mm = NULL;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
enter_lazy_tlb(mm, current);
task_unlock(current);
mm_update_next_owner(mm);
diff --git a/kernel/fork.c b/kernel/fork.c
index be98e94cb3cc..142b23645d82 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -96,7 +96,6 @@
#include <linux/kasan.h>
#include <linux/scs.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -493,7 +492,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
LIST_HEAD(uf);
uprobe_start_dup_mmap();
- if (down_write_killable(&oldmm->mmap_sem)) {
+ if (mmap_write_lock_killable(oldmm)) {
retval = -EINTR;
goto fail_uprobe_end;
}
@@ -502,7 +501,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
/*
* Not linked in yet - no deadlock potential:
*/
- down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
+ mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
/* No ordering required: file already has been exposed. */
RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
@@ -618,9 +617,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
/* a new mm has just been created */
retval = arch_dup_mmap(oldmm, mm);
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
flush_tlb_mm(oldmm);
- up_write(&oldmm->mmap_sem);
+ mmap_write_unlock(oldmm);
dup_userfaultfd_complete(&uf);
fail_uprobe_end:
uprobe_end_dup_mmap();
@@ -650,9 +649,9 @@ static inline void mm_free_pgd(struct mm_struct *mm)
#else
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
- down_write(&oldmm->mmap_sem);
+ mmap_write_lock(oldmm);
RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
- up_write(&oldmm->mmap_sem);
+ mmap_write_unlock(oldmm);
return 0;
}
#define mm_alloc_pgd(mm) (0)
@@ -1023,7 +1022,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->vmacache_seqnum = 0;
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
- init_rwsem(&mm->mmap_sem);
+ mmap_init_lock(mm);
INIT_LIST_HEAD(&mm->mmlist);
mm->core_state = NULL;
mm_pgtables_bytes_init(mm);
@@ -1759,7 +1758,7 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
pid_t nr = -1;
if (likely(pid_has_task(pid, PIDTYPE_PID))) {
- ns = proc_pid_ns(file_inode(m->file));
+ ns = proc_pid_ns(file_inode(m->file)->i_sb);
nr = pid_nr_ns(pid, ns);
}
diff --git a/kernel/futex.c b/kernel/futex.c
index b4b9f960b610..e646661f6282 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -698,10 +698,10 @@ static int fault_in_user_writeable(u32 __user *uaddr)
struct mm_struct *mm = current->mm;
int ret;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
FAULT_FLAG_WRITE, NULL);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return ret < 0 ? ret : 0;
}
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 3941a9c48f83..feaad597b3f4 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -51,28 +51,4 @@ config GCOV_PROFILE_ALL
larger and run slower. Also be sure to exclude files from profiling
which are not linked to the kernel image to prevent linker errors.
-choice
- prompt "Specify GCOV format"
- depends on GCOV_KERNEL
- depends on CC_IS_GCC
- ---help---
- The gcov format is usually determined by the GCC version, and the
- default is chosen according to your GCC version. However, there are
- exceptions where format changes are integrated in lower-version GCCs.
- In such a case, change this option to adjust the format used in the
- kernel accordingly.
-
-config GCOV_FORMAT_3_4
- bool "GCC 3.4 format"
- depends on GCC_VERSION < 40700
- ---help---
- Select this option to use the format defined by GCC 3.4.
-
-config GCOV_FORMAT_4_7
- bool "GCC 4.7 format"
- ---help---
- Select this option to use the format defined by GCC 4.7.
-
-endchoice
-
endmenu
diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile
index d66a74b0f100..16f8ecc7d882 100644
--- a/kernel/gcov/Makefile
+++ b/kernel/gcov/Makefile
@@ -2,6 +2,5 @@
ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"'
obj-y := base.o fs.o
-obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_base.o gcc_3_4.o
-obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_base.o gcc_4_7.o
+obj-$(CONFIG_CC_IS_GCC) += gcc_base.o gcc_4_7.o
obj-$(CONFIG_CC_IS_CLANG) += clang.o
diff --git a/kernel/gcov/gcc_3_4.c b/kernel/gcov/gcc_3_4.c
deleted file mode 100644
index acb83558e5df..000000000000
--- a/kernel/gcov/gcc_3_4.c
+++ /dev/null
@@ -1,573 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This code provides functions to handle gcc's profiling data format
- * introduced with gcc 3.4. Future versions of gcc may change the gcov
- * format (as happened before), so all format-specific information needs
- * to be kept modular and easily exchangeable.
- *
- * This file is based on gcc-internal definitions. Functions and data
- * structures are defined to be compatible with gcc counterparts.
- * For a better understanding, refer to gcc source: gcc/gcov-io.h.
- *
- * Copyright IBM Corp. 2009
- * Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
- *
- * Uses gcc-internal data definitions.
- */
-
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/seq_file.h>
-#include <linux/vmalloc.h>
-#include "gcov.h"
-
-#define GCOV_COUNTERS 5
-
-static struct gcov_info *gcov_info_head;
-
-/**
- * struct gcov_fn_info - profiling meta data per function
- * @ident: object file-unique function identifier
- * @checksum: function checksum
- * @n_ctrs: number of values per counter type belonging to this function
- *
- * This data is generated by gcc during compilation and doesn't change
- * at run-time.
- */
-struct gcov_fn_info {
- unsigned int ident;
- unsigned int checksum;
- unsigned int n_ctrs[];
-};
-
-/**
- * struct gcov_ctr_info - profiling data per counter type
- * @num: number of counter values for this type
- * @values: array of counter values for this type
- * @merge: merge function for counter values of this type (unused)
- *
- * This data is generated by gcc during compilation and doesn't change
- * at run-time with the exception of the values array.
- */
-struct gcov_ctr_info {
- unsigned int num;
- gcov_type *values;
- void (*merge)(gcov_type *, unsigned int);
-};
-
-/**
- * struct gcov_info - profiling data per object file
- * @version: gcov version magic indicating the gcc version used for compilation
- * @next: list head for a singly-linked list
- * @stamp: time stamp
- * @filename: name of the associated gcov data file
- * @n_functions: number of instrumented functions
- * @functions: function data
- * @ctr_mask: mask specifying which counter types are active
- * @counts: counter data per counter type
- *
- * This data is generated by gcc during compilation and doesn't change
- * at run-time with the exception of the next pointer.
- */
-struct gcov_info {
- unsigned int version;
- struct gcov_info *next;
- unsigned int stamp;
- const char *filename;
- unsigned int n_functions;
- const struct gcov_fn_info *functions;
- unsigned int ctr_mask;
- struct gcov_ctr_info counts[];
-};
-
-/**
- * gcov_info_filename - return info filename
- * @info: profiling data set
- */
-const char *gcov_info_filename(struct gcov_info *info)
-{
- return info->filename;
-}
-
-/**
- * gcov_info_version - return info version
- * @info: profiling data set
- */
-unsigned int gcov_info_version(struct gcov_info *info)
-{
- return info->version;
-}
-
-/**
- * gcov_info_next - return next profiling data set
- * @info: profiling data set
- *
- * Returns next gcov_info following @info or first gcov_info in the chain if
- * @info is %NULL.
- */
-struct gcov_info *gcov_info_next(struct gcov_info *info)
-{
- if (!info)
- return gcov_info_head;
-
- return info->next;
-}
-
-/**
- * gcov_info_link - link/add profiling data set to the list
- * @info: profiling data set
- */
-void gcov_info_link(struct gcov_info *info)
-{
- info->next = gcov_info_head;
- gcov_info_head = info;
-}
-
-/**
- * gcov_info_unlink - unlink/remove profiling data set from the list
- * @prev: previous profiling data set
- * @info: profiling data set
- */
-void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info)
-{
- if (prev)
- prev->next = info->next;
- else
- gcov_info_head = info->next;
-}
-
-/**
- * gcov_info_within_module - check if a profiling data set belongs to a module
- * @info: profiling data set
- * @mod: module
- *
- * Returns true if profiling data belongs module, false otherwise.
- */
-bool gcov_info_within_module(struct gcov_info *info, struct module *mod)
-{
- return within_module((unsigned long)info, mod);
-}
-
-/* Symbolic links to be created for each profiling data file. */
-const struct gcov_link gcov_link[] = {
- { OBJ_TREE, "gcno" }, /* Link to .gcno file in $(objtree). */
- { 0, NULL},
-};
-
-/*
- * Determine whether a counter is active. Based on gcc magic. Doesn't change
- * at run-time.
- */
-static int counter_active(struct gcov_info *info, unsigned int type)
-{
- return (1 << type) & info->ctr_mask;
-}
-
-/* Determine number of active counters. Based on gcc magic. */
-static unsigned int num_counter_active(struct gcov_info *info)
-{
- unsigned int i;
- unsigned int result = 0;
-
- for (i = 0; i < GCOV_COUNTERS; i++) {
- if (counter_active(info, i))
- result++;
- }
- return result;
-}
-
-/**
- * gcov_info_reset - reset profiling data to zero
- * @info: profiling data set
- */
-void gcov_info_reset(struct gcov_info *info)
-{
- unsigned int active = num_counter_active(info);
- unsigned int i;
-
- for (i = 0; i < active; i++) {
- memset(info->counts[i].values, 0,
- info->counts[i].num * sizeof(gcov_type));
- }
-}
-
-/**
- * gcov_info_is_compatible - check if profiling data can be added
- * @info1: first profiling data set
- * @info2: second profiling data set
- *
- * Returns non-zero if profiling data can be added, zero otherwise.
- */
-int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
-{
- return (info1->stamp == info2->stamp);
-}
-
-/**
- * gcov_info_add - add up profiling data
- * @dest: profiling data set to which data is added
- * @source: profiling data set which is added
- *
- * Adds profiling counts of @source to @dest.
- */
-void gcov_info_add(struct gcov_info *dest, struct gcov_info *source)
-{
- unsigned int i;
- unsigned int j;
-
- for (i = 0; i < num_counter_active(dest); i++) {
- for (j = 0; j < dest->counts[i].num; j++) {
- dest->counts[i].values[j] +=
- source->counts[i].values[j];
- }
- }
-}
-
-/* Get size of function info entry. Based on gcc magic. */
-static size_t get_fn_size(struct gcov_info *info)
-{
- size_t size;
-
- size = sizeof(struct gcov_fn_info) + num_counter_active(info) *
- sizeof(unsigned int);
- if (__alignof__(struct gcov_fn_info) > sizeof(unsigned int))
- size = ALIGN(size, __alignof__(struct gcov_fn_info));
- return size;
-}
-
-/* Get address of function info entry. Based on gcc magic. */
-static struct gcov_fn_info *get_fn_info(struct gcov_info *info, unsigned int fn)
-{
- return (struct gcov_fn_info *)
- ((char *) info->functions + fn * get_fn_size(info));
-}
-
-/**
- * gcov_info_dup - duplicate profiling data set
- * @info: profiling data set to duplicate
- *
- * Return newly allocated duplicate on success, %NULL on error.
- */
-struct gcov_info *gcov_info_dup(struct gcov_info *info)
-{
- struct gcov_info *dup;
- unsigned int i;
- unsigned int active;
-
- /* Duplicate gcov_info. */
- active = num_counter_active(info);
- dup = kzalloc(struct_size(dup, counts, active), GFP_KERNEL);
- if (!dup)
- return NULL;
- dup->version = info->version;
- dup->stamp = info->stamp;
- dup->n_functions = info->n_functions;
- dup->ctr_mask = info->ctr_mask;
- /* Duplicate filename. */
- dup->filename = kstrdup(info->filename, GFP_KERNEL);
- if (!dup->filename)
- goto err_free;
- /* Duplicate table of functions. */
- dup->functions = kmemdup(info->functions, info->n_functions *
- get_fn_size(info), GFP_KERNEL);
- if (!dup->functions)
- goto err_free;
- /* Duplicate counter arrays. */
- for (i = 0; i < active ; i++) {
- struct gcov_ctr_info *ctr = &info->counts[i];
- size_t size = ctr->num * sizeof(gcov_type);
-
- dup->counts[i].num = ctr->num;
- dup->counts[i].merge = ctr->merge;
- dup->counts[i].values = vmalloc(size);
- if (!dup->counts[i].values)
- goto err_free;
- memcpy(dup->counts[i].values, ctr->values, size);
- }
- return dup;
-
-err_free:
- gcov_info_free(dup);
- return NULL;
-}
-
-/**
- * gcov_info_free - release memory for profiling data set duplicate
- * @info: profiling data set duplicate to free
- */
-void gcov_info_free(struct gcov_info *info)
-{
- unsigned int active = num_counter_active(info);
- unsigned int i;
-
- for (i = 0; i < active ; i++)
- vfree(info->counts[i].values);
- kfree(info->functions);
- kfree(info->filename);
- kfree(info);
-}
-
-/**
- * struct type_info - iterator helper array
- * @ctr_type: counter type
- * @offset: index of the first value of the current function for this type
- *
- * This array is needed to convert the in-memory data format into the in-file
- * data format:
- *
- * In-memory:
- * for each counter type
- * for each function
- * values
- *
- * In-file:
- * for each function
- * for each counter type
- * values
- *
- * See gcc source gcc/gcov-io.h for more information on data organization.
- */
-struct type_info {
- int ctr_type;
- unsigned int offset;
-};
-
-/**
- * struct gcov_iterator - specifies current file position in logical records
- * @info: associated profiling data
- * @record: record type
- * @function: function number
- * @type: counter type
- * @count: index into values array
- * @num_types: number of counter types
- * @type_info: helper array to get values-array offset for current function
- */
-struct gcov_iterator {
- struct gcov_info *info;
-
- int record;
- unsigned int function;
- unsigned int type;
- unsigned int count;
-
- int num_types;
- struct type_info type_info[];
-};
-
-static struct gcov_fn_info *get_func(struct gcov_iterator *iter)
-{
- return get_fn_info(iter->info, iter->function);
-}
-
-static struct type_info *get_type(struct gcov_iterator *iter)
-{
- return &iter->type_info[iter->type];
-}
-
-/**
- * gcov_iter_new - allocate and initialize profiling data iterator
- * @info: profiling data set to be iterated
- *
- * Return file iterator on success, %NULL otherwise.
- */
-struct gcov_iterator *gcov_iter_new(struct gcov_info *info)
-{
- struct gcov_iterator *iter;
-
- iter = kzalloc(struct_size(iter, type_info, num_counter_active(info)),
- GFP_KERNEL);
- if (iter)
- iter->info = info;
-
- return iter;
-}
-
-/**
- * gcov_iter_free - release memory for iterator
- * @iter: file iterator to free
- */
-void gcov_iter_free(struct gcov_iterator *iter)
-{
- kfree(iter);
-}
-
-/**
- * gcov_iter_get_info - return profiling data set for given file iterator
- * @iter: file iterator
- */
-struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter)
-{
- return iter->info;
-}
-
-/**
- * gcov_iter_start - reset file iterator to starting position
- * @iter: file iterator
- */
-void gcov_iter_start(struct gcov_iterator *iter)
-{
- int i;
-
- iter->record = 0;
- iter->function = 0;
- iter->type = 0;
- iter->count = 0;
- iter->num_types = 0;
- for (i = 0; i < GCOV_COUNTERS; i++) {
- if (counter_active(iter->info, i)) {
- iter->type_info[iter->num_types].ctr_type = i;
- iter->type_info[iter->num_types++].offset = 0;
- }
- }
-}
-
-/* Mapping of logical record number to actual file content. */
-#define RECORD_FILE_MAGIC 0
-#define RECORD_GCOV_VERSION 1
-#define RECORD_TIME_STAMP 2
-#define RECORD_FUNCTION_TAG 3
-#define RECORD_FUNCTON_TAG_LEN 4
-#define RECORD_FUNCTION_IDENT 5
-#define RECORD_FUNCTION_CHECK 6
-#define RECORD_COUNT_TAG 7
-#define RECORD_COUNT_LEN 8
-#define RECORD_COUNT 9
-
-/**
- * gcov_iter_next - advance file iterator to next logical record
- * @iter: file iterator
- *
- * Return zero if new position is valid, non-zero if iterator has reached end.
- */
-int gcov_iter_next(struct gcov_iterator *iter)
-{
- switch (iter->record) {
- case RECORD_FILE_MAGIC:
- case RECORD_GCOV_VERSION:
- case RECORD_FUNCTION_TAG:
- case RECORD_FUNCTON_TAG_LEN:
- case RECORD_FUNCTION_IDENT:
- case RECORD_COUNT_TAG:
- /* Advance to next record */
- iter->record++;
- break;
- case RECORD_COUNT:
- /* Advance to next count */
- iter->count++;
- /* fall through */
- case RECORD_COUNT_LEN:
- if (iter->count < get_func(iter)->n_ctrs[iter->type]) {
- iter->record = 9;
- break;
- }
- /* Advance to next counter type */
- get_type(iter)->offset += iter->count;
- iter->count = 0;
- iter->type++;
- /* fall through */
- case RECORD_FUNCTION_CHECK:
- if (iter->type < iter->num_types) {
- iter->record = 7;
- break;
- }
- /* Advance to next function */
- iter->type = 0;
- iter->function++;
- /* fall through */
- case RECORD_TIME_STAMP:
- if (iter->function < iter->info->n_functions)
- iter->record = 3;
- else
- iter->record = -1;
- break;
- }
- /* Check for EOF. */
- if (iter->record == -1)
- return -EINVAL;
- else
- return 0;
-}
-
-/**
- * seq_write_gcov_u32 - write 32 bit number in gcov format to seq_file
- * @seq: seq_file handle
- * @v: value to be stored
- *
- * Number format defined by gcc: numbers are recorded in the 32 bit
- * unsigned binary form of the endianness of the machine generating the
- * file.
- */
-static int seq_write_gcov_u32(struct seq_file *seq, u32 v)
-{
- return seq_write(seq, &v, sizeof(v));
-}
-
-/**
- * seq_write_gcov_u64 - write 64 bit number in gcov format to seq_file
- * @seq: seq_file handle
- * @v: value to be stored
- *
- * Number format defined by gcc: numbers are recorded in the 32 bit
- * unsigned binary form of the endianness of the machine generating the
- * file. 64 bit numbers are stored as two 32 bit numbers, the low part
- * first.
- */
-static int seq_write_gcov_u64(struct seq_file *seq, u64 v)
-{
- u32 data[2];
-
- data[0] = (v & 0xffffffffUL);
- data[1] = (v >> 32);
- return seq_write(seq, data, sizeof(data));
-}
-
-/**
- * gcov_iter_write - write data for current pos to seq_file
- * @iter: file iterator
- * @seq: seq_file handle
- *
- * Return zero on success, non-zero otherwise.
- */
-int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq)
-{
- int rc = -EINVAL;
-
- switch (iter->record) {
- case RECORD_FILE_MAGIC:
- rc = seq_write_gcov_u32(seq, GCOV_DATA_MAGIC);
- break;
- case RECORD_GCOV_VERSION:
- rc = seq_write_gcov_u32(seq, iter->info->version);
- break;
- case RECORD_TIME_STAMP:
- rc = seq_write_gcov_u32(seq, iter->info->stamp);
- break;
- case RECORD_FUNCTION_TAG:
- rc = seq_write_gcov_u32(seq, GCOV_TAG_FUNCTION);
- break;
- case RECORD_FUNCTON_TAG_LEN:
- rc = seq_write_gcov_u32(seq, 2);
- break;
- case RECORD_FUNCTION_IDENT:
- rc = seq_write_gcov_u32(seq, get_func(iter)->ident);
- break;
- case RECORD_FUNCTION_CHECK:
- rc = seq_write_gcov_u32(seq, get_func(iter)->checksum);
- break;
- case RECORD_COUNT_TAG:
- rc = seq_write_gcov_u32(seq,
- GCOV_TAG_FOR_COUNTER(get_type(iter)->ctr_type));
- break;
- case RECORD_COUNT_LEN:
- rc = seq_write_gcov_u32(seq,
- get_func(iter)->n_ctrs[iter->type] * 2);
- break;
- case RECORD_COUNT:
- rc = seq_write_gcov_u64(seq,
- iter->info->counts[iter->type].
- values[iter->count + get_type(iter)->offset]);
- break;
- }
- return rc;
-}
diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
index e13ca842eb7e..c1510f0ab3ea 100755
--- a/kernel/gen_kheaders.sh
+++ b/kernel/gen_kheaders.sh
@@ -88,7 +88,7 @@ find $cpio_dir -type f -print0 |
find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
--owner=0 --group=0 --numeric-owner --no-recursion \
- -Jcf $tarfile -C $cpio_dir/ -T - > /dev/null
+ -I $XZ -cf $tarfile -C $cpio_dir/ -T - > /dev/null
echo $headers_md5 > kernel/kheaders.md5
echo "$this_file_md5" >> kernel/kheaders.md5
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 14a625c16cb3..ce76f490126c 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -53,9 +53,18 @@ int __read_mostly sysctl_hung_task_warnings = 10;
static int __read_mostly did_panic;
static bool hung_task_show_lock;
static bool hung_task_call_panic;
+static bool hung_task_show_all_bt;
static struct task_struct *watchdog_task;
+#ifdef CONFIG_SMP
+/*
+ * Should we dump all CPUs backtraces in a hung task event?
+ * Defaults to 0, can be changed via sysctl.
+ */
+unsigned int __read_mostly sysctl_hung_task_all_cpu_backtrace;
+#endif /* CONFIG_SMP */
+
/*
* Should we panic (and reboot, if panic_timeout= is set) when a
* hung task is detected:
@@ -63,16 +72,6 @@ static struct task_struct *watchdog_task;
unsigned int __read_mostly sysctl_hung_task_panic =
CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE;
-static int __init hung_task_panic_setup(char *str)
-{
- int rc = kstrtouint(str, 0, &sysctl_hung_task_panic);
-
- if (rc)
- return rc;
- return 1;
-}
-__setup("hung_task_panic=", hung_task_panic_setup);
-
static int
hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr)
{
@@ -137,6 +136,9 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
" disables this message.\n");
sched_show_task(t);
hung_task_show_lock = true;
+
+ if (sysctl_hung_task_all_cpu_backtrace)
+ hung_task_show_all_bt = true;
}
touch_nmi_watchdog();
@@ -201,10 +203,14 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
rcu_read_unlock();
if (hung_task_show_lock)
debug_show_all_locks();
- if (hung_task_call_panic) {
+
+ if (hung_task_show_all_bt) {
+ hung_task_show_all_bt = false;
trigger_all_cpu_backtrace();
- panic("hung_task: blocked tasks");
}
+
+ if (hung_task_call_panic)
+ panic("hung_task: blocked tasks");
}
static long hung_timeout_jiffies(unsigned long last_checked,
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 8accc9722a81..6afae0bcbac4 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -86,6 +86,18 @@ static DEFINE_SPINLOCK(kcov_remote_lock);
static DEFINE_HASHTABLE(kcov_remote_map, 4);
static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
+struct kcov_percpu_data {
+ void *irq_area;
+
+ unsigned int saved_mode;
+ unsigned int saved_size;
+ void *saved_area;
+ struct kcov *saved_kcov;
+ int saved_sequence;
+};
+
+DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data);
+
/* Must be called with kcov_remote_lock locked. */
static struct kcov_remote *kcov_remote_find(u64 handle)
{
@@ -98,6 +110,7 @@ static struct kcov_remote *kcov_remote_find(u64 handle)
return NULL;
}
+/* Must be called with kcov_remote_lock locked. */
static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
{
struct kcov_remote *remote;
@@ -119,16 +132,13 @@ static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
struct kcov_remote_area *area;
struct list_head *pos;
- kcov_debug("size = %u\n", size);
list_for_each(pos, &kcov_remote_areas) {
area = list_entry(pos, struct kcov_remote_area, list);
if (area->size == size) {
list_del(&area->list);
- kcov_debug("rv = %px\n", area);
return area;
}
}
- kcov_debug("rv = NULL\n");
return NULL;
}
@@ -136,7 +146,6 @@ static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
static void kcov_remote_area_put(struct kcov_remote_area *area,
unsigned int size)
{
- kcov_debug("area = %px, size = %u\n", area, size);
INIT_LIST_HEAD(&area->list);
area->size = size;
list_add(&area->list, &kcov_remote_areas);
@@ -148,9 +157,10 @@ static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_stru
/*
* We are interested in code coverage as a function of a syscall inputs,
- * so we ignore code executed in interrupts.
+ * so we ignore code executed in interrupts, unless we are in a remote
+ * coverage collection section in a softirq.
*/
- if (!in_task())
+ if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
return false;
mode = READ_ONCE(t->kcov_mode);
/*
@@ -312,23 +322,26 @@ void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
#endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
-static void kcov_start(struct task_struct *t, unsigned int size,
- void *area, enum kcov_mode mode, int sequence)
+static void kcov_start(struct task_struct *t, struct kcov *kcov,
+ unsigned int size, void *area, enum kcov_mode mode,
+ int sequence)
{
kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
+ t->kcov = kcov;
/* Cache in task struct for performance. */
t->kcov_size = size;
t->kcov_area = area;
+ t->kcov_sequence = sequence;
/* See comment in check_kcov_mode(). */
barrier();
WRITE_ONCE(t->kcov_mode, mode);
- t->kcov_sequence = sequence;
}
static void kcov_stop(struct task_struct *t)
{
WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
barrier();
+ t->kcov = NULL;
t->kcov_size = 0;
t->kcov_area = NULL;
}
@@ -336,7 +349,6 @@ static void kcov_stop(struct task_struct *t)
static void kcov_task_reset(struct task_struct *t)
{
kcov_stop(t);
- t->kcov = NULL;
t->kcov_sequence = 0;
t->kcov_handle = 0;
}
@@ -361,18 +373,18 @@ static void kcov_remote_reset(struct kcov *kcov)
int bkt;
struct kcov_remote *remote;
struct hlist_node *tmp;
+ unsigned long flags;
- spin_lock(&kcov_remote_lock);
+ spin_lock_irqsave(&kcov_remote_lock, flags);
hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
if (remote->kcov != kcov)
continue;
- kcov_debug("removing handle %llx\n", remote->handle);
hash_del(&remote->hnode);
kfree(remote);
}
/* Do reset before unlock to prevent races with kcov_remote_start(). */
kcov_reset(kcov);
- spin_unlock(&kcov_remote_lock);
+ spin_unlock_irqrestore(&kcov_remote_lock, flags);
}
static void kcov_disable(struct task_struct *t, struct kcov *kcov)
@@ -401,12 +413,13 @@ static void kcov_put(struct kcov *kcov)
void kcov_task_exit(struct task_struct *t)
{
struct kcov *kcov;
+ unsigned long flags;
kcov = t->kcov;
if (kcov == NULL)
return;
- spin_lock(&kcov->lock);
+ spin_lock_irqsave(&kcov->lock, flags);
kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
/*
* For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
@@ -414,7 +427,8 @@ void kcov_task_exit(struct task_struct *t)
* WARN_ON(!kcov->remote && kcov->t != t);
*
* For KCOV_REMOTE_ENABLE devices, the exiting task is either:
- * 2. A remote task between kcov_remote_start() and kcov_remote_stop().
+ *
+ * 1. A remote task between kcov_remote_start() and kcov_remote_stop().
* In this case we should print a warning right away, since a task
* shouldn't be exiting when it's in a kcov coverage collection
* section. Here t points to the task that is collecting remote
@@ -424,18 +438,18 @@ void kcov_task_exit(struct task_struct *t)
* WARN_ON(kcov->remote && kcov->t != t);
*
* 2. The task that created kcov exiting without calling KCOV_DISABLE,
- * and then again we can make sure that t->kcov->t == t:
+ * and then again we make sure that t->kcov->t == t:
* WARN_ON(kcov->remote && kcov->t != t);
*
* By combining all three checks into one we get:
*/
if (WARN_ON(kcov->t != t)) {
- spin_unlock(&kcov->lock);
+ spin_unlock_irqrestore(&kcov->lock, flags);
return;
}
/* Just to not leave dangling references behind. */
kcov_disable(t, kcov);
- spin_unlock(&kcov->lock);
+ spin_unlock_irqrestore(&kcov->lock, flags);
kcov_put(kcov);
}
@@ -446,12 +460,13 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
struct kcov *kcov = vma->vm_file->private_data;
unsigned long size, off;
struct page *page;
+ unsigned long flags;
area = vmalloc_user(vma->vm_end - vma->vm_start);
if (!area)
return -ENOMEM;
- spin_lock(&kcov->lock);
+ spin_lock_irqsave(&kcov->lock, flags);
size = kcov->size * sizeof(unsigned long);
if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
vma->vm_end - vma->vm_start != size) {
@@ -461,7 +476,7 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
if (!kcov->area) {
kcov->area = area;
vma->vm_flags |= VM_DONTEXPAND;
- spin_unlock(&kcov->lock);
+ spin_unlock_irqrestore(&kcov->lock, flags);
for (off = 0; off < size; off += PAGE_SIZE) {
page = vmalloc_to_page(kcov->area + off);
if (vm_insert_page(vma, vma->vm_start + off, page))
@@ -470,7 +485,7 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
return 0;
}
exit:
- spin_unlock(&kcov->lock);
+ spin_unlock_irqrestore(&kcov->lock, flags);
vfree(area);
return res;
}
@@ -550,10 +565,10 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
int mode, i;
struct kcov_remote_arg *remote_arg;
struct kcov_remote *remote;
+ unsigned long flags;
switch (cmd) {
case KCOV_INIT_TRACE:
- kcov_debug("KCOV_INIT_TRACE\n");
/*
* Enable kcov in trace mode and setup buffer size.
* Must happen before anything else.
@@ -572,7 +587,6 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
kcov->mode = KCOV_MODE_INIT;
return 0;
case KCOV_ENABLE:
- kcov_debug("KCOV_ENABLE\n");
/*
* Enable coverage for the current task.
* At this point user must have been enabled trace mode,
@@ -590,15 +604,13 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
return mode;
kcov_fault_in_area(kcov);
kcov->mode = mode;
- kcov_start(t, kcov->size, kcov->area, kcov->mode,
+ kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode,
kcov->sequence);
- t->kcov = kcov;
kcov->t = t;
/* Put either in kcov_task_exit() or in KCOV_DISABLE. */
kcov_get(kcov);
return 0;
case KCOV_DISABLE:
- kcov_debug("KCOV_DISABLE\n");
/* Disable coverage for the current task. */
unused = arg;
if (unused != 0 || current->kcov != kcov)
@@ -610,7 +622,6 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
kcov_put(kcov);
return 0;
case KCOV_REMOTE_ENABLE:
- kcov_debug("KCOV_REMOTE_ENABLE\n");
if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
return -EINVAL;
t = current;
@@ -627,41 +638,42 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
kcov->t = t;
kcov->remote = true;
kcov->remote_size = remote_arg->area_size;
- spin_lock(&kcov_remote_lock);
+ spin_lock_irqsave(&kcov_remote_lock, flags);
for (i = 0; i < remote_arg->num_handles; i++) {
- kcov_debug("handle %llx\n", remote_arg->handles[i]);
if (!kcov_check_handle(remote_arg->handles[i],
false, true, false)) {
- spin_unlock(&kcov_remote_lock);
+ spin_unlock_irqrestore(&kcov_remote_lock,
+ flags);
kcov_disable(t, kcov);
return -EINVAL;
}
remote = kcov_remote_add(kcov, remote_arg->handles[i]);
if (IS_ERR(remote)) {
- spin_unlock(&kcov_remote_lock);
+ spin_unlock_irqrestore(&kcov_remote_lock,
+ flags);
kcov_disable(t, kcov);
return PTR_ERR(remote);
}
}
if (remote_arg->common_handle) {
- kcov_debug("common handle %llx\n",
- remote_arg->common_handle);
if (!kcov_check_handle(remote_arg->common_handle,
true, false, false)) {
- spin_unlock(&kcov_remote_lock);
+ spin_unlock_irqrestore(&kcov_remote_lock,
+ flags);
kcov_disable(t, kcov);
return -EINVAL;
}
remote = kcov_remote_add(kcov,
remote_arg->common_handle);
if (IS_ERR(remote)) {
- spin_unlock(&kcov_remote_lock);
+ spin_unlock_irqrestore(&kcov_remote_lock,
+ flags);
kcov_disable(t, kcov);
return PTR_ERR(remote);
}
t->kcov_handle = remote_arg->common_handle;
}
- spin_unlock(&kcov_remote_lock);
+ spin_unlock_irqrestore(&kcov_remote_lock, flags);
/* Put either in kcov_task_exit() or in KCOV_DISABLE. */
kcov_get(kcov);
return 0;
@@ -677,6 +689,7 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
struct kcov_remote_arg *remote_arg = NULL;
unsigned int remote_num_handles;
unsigned long remote_arg_size;
+ unsigned long flags;
if (cmd == KCOV_REMOTE_ENABLE) {
if (get_user(remote_num_handles, (unsigned __user *)(arg +
@@ -697,9 +710,9 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
}
kcov = filep->private_data;
- spin_lock(&kcov->lock);
+ spin_lock_irqsave(&kcov->lock, flags);
res = kcov_ioctl_locked(kcov, cmd, arg);
- spin_unlock(&kcov->lock);
+ spin_unlock_irqrestore(&kcov->lock, flags);
kfree(remote_arg);
@@ -716,8 +729,8 @@ static const struct file_operations kcov_fops = {
/*
* kcov_remote_start() and kcov_remote_stop() can be used to annotate a section
- * of code in a kernel background thread to allow kcov to be used to collect
- * coverage from that part of code.
+ * of code in a kernel background thread or in a softirq to allow kcov to be
+ * used to collect coverage from that part of code.
*
* The handle argument of kcov_remote_start() identifies a code section that is
* used for coverage collection. A userspace process passes this handle to
@@ -728,9 +741,9 @@ static const struct file_operations kcov_fops = {
* the type of the kernel thread whose code is being annotated.
*
* For global kernel threads that are spawned in a limited number of instances
- * (e.g. one USB hub_event() worker thread is spawned per USB HCD), each
- * instance must be assigned a unique 4-byte instance id. The instance id is
- * then combined with a 1-byte subsystem id to get a handle via
+ * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for
+ * softirqs, each instance must be assigned a unique 4-byte instance id. The
+ * instance id is then combined with a 1-byte subsystem id to get a handle via
* kcov_remote_handle(subsystem_id, instance_id).
*
* For local kernel threads that are spawned from system calls handler when a
@@ -749,70 +762,136 @@ static const struct file_operations kcov_fops = {
*
* See Documentation/dev-tools/kcov.rst for more details.
*
- * Internally, this function looks up the kcov device associated with the
+ * Internally, kcov_remote_start() looks up the kcov device associated with the
* provided handle, allocates an area for coverage collection, and saves the
* pointers to kcov and area into the current task_struct to allow coverage to
- * be collected via __sanitizer_cov_trace_pc()
+ * be collected via __sanitizer_cov_trace_pc().
* In turns kcov_remote_stop() clears those pointers from task_struct to stop
* collecting coverage and copies all collected coverage into the kcov area.
*/
+
+static inline bool kcov_mode_enabled(unsigned int mode)
+{
+ return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED;
+}
+
+void kcov_remote_softirq_start(struct task_struct *t)
+{
+ struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
+ unsigned int mode;
+
+ mode = READ_ONCE(t->kcov_mode);
+ barrier();
+ if (kcov_mode_enabled(mode)) {
+ data->saved_mode = mode;
+ data->saved_size = t->kcov_size;
+ data->saved_area = t->kcov_area;
+ data->saved_sequence = t->kcov_sequence;
+ data->saved_kcov = t->kcov;
+ kcov_stop(t);
+ }
+}
+
+void kcov_remote_softirq_stop(struct task_struct *t)
+{
+ struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
+
+ if (data->saved_kcov) {
+ kcov_start(t, data->saved_kcov, data->saved_size,
+ data->saved_area, data->saved_mode,
+ data->saved_sequence);
+ data->saved_mode = 0;
+ data->saved_size = 0;
+ data->saved_area = NULL;
+ data->saved_sequence = 0;
+ data->saved_kcov = NULL;
+ }
+}
+
void kcov_remote_start(u64 handle)
{
+ struct task_struct *t = current;
struct kcov_remote *remote;
+ struct kcov *kcov;
+ unsigned int mode;
void *area;
- struct task_struct *t;
unsigned int size;
- enum kcov_mode mode;
int sequence;
+ unsigned long flags;
if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
return;
- if (WARN_ON(!in_task()))
+ if (!in_task() && !in_serving_softirq())
return;
- t = current;
+
+ local_irq_save(flags);
+
/*
- * Check that kcov_remote_start is not called twice
- * nor called by user tasks (with enabled kcov).
+ * Check that kcov_remote_start() is not called twice in background
+ * threads nor called by user tasks (with enabled kcov).
*/
- if (WARN_ON(t->kcov))
+ mode = READ_ONCE(t->kcov_mode);
+ if (WARN_ON(in_task() && kcov_mode_enabled(mode))) {
+ local_irq_restore(flags);
return;
-
- kcov_debug("handle = %llx\n", handle);
+ }
+ /*
+ * Check that kcov_remote_start() is not called twice in softirqs.
+ * Note, that kcov_remote_start() can be called from a softirq that
+ * happened while collecting coverage from a background thread.
+ */
+ if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) {
+ local_irq_restore(flags);
+ return;
+ }
spin_lock(&kcov_remote_lock);
remote = kcov_remote_find(handle);
if (!remote) {
- kcov_debug("no remote found");
- spin_unlock(&kcov_remote_lock);
+ spin_unlock_irqrestore(&kcov_remote_lock, flags);
return;
}
+ kcov_debug("handle = %llx, context: %s\n", handle,
+ in_task() ? "task" : "softirq");
+ kcov = remote->kcov;
/* Put in kcov_remote_stop(). */
- kcov_get(remote->kcov);
- t->kcov = remote->kcov;
+ kcov_get(kcov);
/*
* Read kcov fields before unlock to prevent races with
* KCOV_DISABLE / kcov_remote_reset().
*/
- size = remote->kcov->remote_size;
- mode = remote->kcov->mode;
- sequence = remote->kcov->sequence;
- area = kcov_remote_area_get(size);
- spin_unlock(&kcov_remote_lock);
+ mode = kcov->mode;
+ sequence = kcov->sequence;
+ if (in_task()) {
+ size = kcov->remote_size;
+ area = kcov_remote_area_get(size);
+ } else {
+ size = CONFIG_KCOV_IRQ_AREA_SIZE;
+ area = this_cpu_ptr(&kcov_percpu_data)->irq_area;
+ }
+ spin_unlock_irqrestore(&kcov_remote_lock, flags);
+ /* Can only happen when in_task(). */
if (!area) {
area = vmalloc(size * sizeof(unsigned long));
if (!area) {
- t->kcov = NULL;
- kcov_put(remote->kcov);
+ kcov_put(kcov);
return;
}
}
+
+ local_irq_save(flags);
+
/* Reset coverage size. */
*(u64 *)area = 0;
- kcov_debug("area = %px, size = %u", area, size);
+ if (in_serving_softirq()) {
+ kcov_remote_softirq_start(t);
+ t->kcov_softirq = 1;
+ }
+ kcov_start(t, kcov, size, area, mode, sequence);
- kcov_start(t, size, area, mode, sequence);
+ local_irq_restore(flags);
}
EXPORT_SYMBOL(kcov_remote_start);
@@ -876,34 +955,67 @@ static void kcov_move_area(enum kcov_mode mode, void *dst_area,
void kcov_remote_stop(void)
{
struct task_struct *t = current;
- struct kcov *kcov = t->kcov;
- void *area = t->kcov_area;
- unsigned int size = t->kcov_size;
- int sequence = t->kcov_sequence;
+ struct kcov *kcov;
+ unsigned int mode;
+ void *area;
+ unsigned int size;
+ int sequence;
+ unsigned long flags;
- if (!kcov) {
- kcov_debug("no kcov found\n");
+ if (!in_task() && !in_serving_softirq())
+ return;
+
+ local_irq_save(flags);
+
+ mode = READ_ONCE(t->kcov_mode);
+ barrier();
+ if (!kcov_mode_enabled(mode)) {
+ local_irq_restore(flags);
+ return;
+ }
+ /*
+ * When in softirq, check if the corresponding kcov_remote_start()
+ * actually found the remote handle and started collecting coverage.
+ */
+ if (in_serving_softirq() && !t->kcov_softirq) {
+ local_irq_restore(flags);
+ return;
+ }
+ /* Make sure that kcov_softirq is only set when in softirq. */
+ if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) {
+ local_irq_restore(flags);
return;
}
+ kcov = t->kcov;
+ area = t->kcov_area;
+ size = t->kcov_size;
+ sequence = t->kcov_sequence;
+
kcov_stop(t);
- t->kcov = NULL;
+ if (in_serving_softirq()) {
+ t->kcov_softirq = 0;
+ kcov_remote_softirq_stop(t);
+ }
spin_lock(&kcov->lock);
/*
* KCOV_DISABLE could have been called between kcov_remote_start()
- * and kcov_remote_stop(), hence the check.
+ * and kcov_remote_stop(), hence the sequence check.
*/
- kcov_debug("move if: %d == %d && %d\n",
- sequence, kcov->sequence, (int)kcov->remote);
if (sequence == kcov->sequence && kcov->remote)
kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
spin_unlock(&kcov->lock);
- spin_lock(&kcov_remote_lock);
- kcov_remote_area_put(area, size);
- spin_unlock(&kcov_remote_lock);
+ if (in_task()) {
+ spin_lock(&kcov_remote_lock);
+ kcov_remote_area_put(area, size);
+ spin_unlock(&kcov_remote_lock);
+ }
+
+ local_irq_restore(flags);
+ /* Get in kcov_remote_start(). */
kcov_put(kcov);
}
EXPORT_SYMBOL(kcov_remote_stop);
@@ -917,6 +1029,16 @@ EXPORT_SYMBOL(kcov_common_handle);
static int __init kcov_init(void)
{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ void *area = vmalloc(CONFIG_KCOV_IRQ_AREA_SIZE *
+ sizeof(unsigned long));
+ if (!area)
+ return -ENOMEM;
+ per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
+ }
+
/*
* The kcov debugfs file won't ever get removed and thus,
* there is no need to protect it against removal races. The
diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
new file mode 100644
index 000000000000..d4999b38d1be
--- /dev/null
+++ b/kernel/kcsan/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+KCSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+UBSAN_SANITIZE := n
+
+CFLAGS_REMOVE_core.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_debugfs.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE)
+
+CFLAGS_core.o := $(call cc-option,-fno-conserve-stack,) \
+ $(call cc-option,-fno-stack-protector,)
+
+obj-y := core.o debugfs.o report.o
+obj-$(CONFIG_KCSAN_SELFTEST) += test.o
diff --git a/kernel/kcsan/atomic.h b/kernel/kcsan/atomic.h
new file mode 100644
index 000000000000..be9e625227f3
--- /dev/null
+++ b/kernel/kcsan/atomic.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _KERNEL_KCSAN_ATOMIC_H
+#define _KERNEL_KCSAN_ATOMIC_H
+
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+
+/*
+ * Special rules for certain memory where concurrent conflicting accesses are
+ * common, however, the current convention is to not mark them; returns true if
+ * access to @ptr should be considered atomic. Called from slow-path.
+ */
+static bool kcsan_is_atomic_special(const volatile void *ptr)
+{
+ /* volatile globals that have been observed in data races. */
+ return ptr == &jiffies || ptr == &current->state;
+}
+
+#endif /* _KERNEL_KCSAN_ATOMIC_H */
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
new file mode 100644
index 000000000000..15f67949d11e
--- /dev/null
+++ b/kernel/kcsan/core.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+
+#include "atomic.h"
+#include "encoding.h"
+#include "kcsan.h"
+
+static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
+unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
+unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
+static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
+static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "kcsan."
+module_param_named(early_enable, kcsan_early_enable, bool, 0);
+module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
+module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
+module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
+module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
+
+bool kcsan_enabled;
+
+/* Per-CPU kcsan_ctx for interrupts */
+static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
+ .disable_count = 0,
+ .atomic_next = 0,
+ .atomic_nest_count = 0,
+ .in_flat_atomic = false,
+ .access_mask = 0,
+ .scoped_accesses = {LIST_POISON1, NULL},
+};
+
+/*
+ * Helper macros to index into adjacent slots, starting from address slot
+ * itself, followed by the right and left slots.
+ *
+ * The purpose is 2-fold:
+ *
+ * 1. if during insertion the address slot is already occupied, check if
+ * any adjacent slots are free;
+ * 2. accesses that straddle a slot boundary due to size that exceeds a
+ * slot's range may check adjacent slots if any watchpoint matches.
+ *
+ * Note that accesses with very large size may still miss a watchpoint; however,
+ * given this should be rare, this is a reasonable trade-off to make, since this
+ * will avoid:
+ *
+ * 1. excessive contention between watchpoint checks and setup;
+ * 2. larger number of simultaneous watchpoints without sacrificing
+ * performance.
+ *
+ * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
+ *
+ * slot=0: [ 1, 2, 0]
+ * slot=9: [10, 11, 9]
+ * slot=63: [64, 65, 63]
+ */
+#define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
+
+/*
+ * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
+ * slot (middle) is fine if we assume that races occur rarely. The set of
+ * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
+ * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
+ */
+#define SLOT_IDX_FAST(slot, i) (slot + i)
+
+/*
+ * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
+ * able to safely update and access a watchpoint without introducing locking
+ * overhead, we encode each watchpoint as a single atomic long. The initial
+ * zero-initialized state matches INVALID_WATCHPOINT.
+ *
+ * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
+ * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
+ */
+static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
+
+/*
+ * Instructions to skip watching counter, used in should_watch(). We use a
+ * per-CPU counter to avoid excessive contention.
+ */
+static DEFINE_PER_CPU(long, kcsan_skip);
+
+static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
+ size_t size,
+ bool expect_write,
+ long *encoded_watchpoint)
+{
+ const int slot = watchpoint_slot(addr);
+ const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
+ atomic_long_t *watchpoint;
+ unsigned long wp_addr_masked;
+ size_t wp_size;
+ bool is_write;
+ int i;
+
+ BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
+
+ for (i = 0; i < NUM_SLOTS; ++i) {
+ watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
+ *encoded_watchpoint = atomic_long_read(watchpoint);
+ if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
+ &wp_size, &is_write))
+ continue;
+
+ if (expect_write && !is_write)
+ continue;
+
+ /* Check if the watchpoint matches the access. */
+ if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
+ return watchpoint;
+ }
+
+ return NULL;
+}
+
+static inline atomic_long_t *
+insert_watchpoint(unsigned long addr, size_t size, bool is_write)
+{
+ const int slot = watchpoint_slot(addr);
+ const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
+ atomic_long_t *watchpoint;
+ int i;
+
+ /* Check slot index logic, ensuring we stay within array bounds. */
+ BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
+ BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
+ BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
+ BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
+
+ for (i = 0; i < NUM_SLOTS; ++i) {
+ long expect_val = INVALID_WATCHPOINT;
+
+ /* Try to acquire this slot. */
+ watchpoint = &watchpoints[SLOT_IDX(slot, i)];
+ if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
+ return watchpoint;
+ }
+
+ return NULL;
+}
+
+/*
+ * Return true if watchpoint was successfully consumed, false otherwise.
+ *
+ * This may return false if:
+ *
+ * 1. another thread already consumed the watchpoint;
+ * 2. the thread that set up the watchpoint already removed it;
+ * 3. the watchpoint was removed and then re-used.
+ */
+static __always_inline bool
+try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
+{
+ return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
+}
+
+/* Return true if watchpoint was not touched, false if already consumed. */
+static inline bool consume_watchpoint(atomic_long_t *watchpoint)
+{
+ return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
+}
+
+/* Remove the watchpoint -- its slot may be reused after. */
+static inline void remove_watchpoint(atomic_long_t *watchpoint)
+{
+ atomic_long_set(watchpoint, INVALID_WATCHPOINT);
+}
+
+static __always_inline struct kcsan_ctx *get_ctx(void)
+{
+ /*
+ * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
+ * also result in calls that generate warnings in uaccess regions.
+ */
+ return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
+}
+
+/* Check scoped accesses; never inline because this is a slow-path! */
+static noinline void kcsan_check_scoped_accesses(void)
+{
+ struct kcsan_ctx *ctx = get_ctx();
+ struct list_head *prev_save = ctx->scoped_accesses.prev;
+ struct kcsan_scoped_access *scoped_access;
+
+ ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
+ list_for_each_entry(scoped_access, &ctx->scoped_accesses, list)
+ __kcsan_check_access(scoped_access->ptr, scoped_access->size, scoped_access->type);
+ ctx->scoped_accesses.prev = prev_save;
+}
+
+/* Rules for generic atomic accesses. Called from fast-path. */
+static __always_inline bool
+is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
+{
+ if (type & KCSAN_ACCESS_ATOMIC)
+ return true;
+
+ /*
+ * Unless explicitly declared atomic, never consider an assertion access
+ * as atomic. This allows using them also in atomic regions, such as
+ * seqlocks, without implicitly changing their semantics.
+ */
+ if (type & KCSAN_ACCESS_ASSERT)
+ return false;
+
+ if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
+ (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
+ IS_ALIGNED((unsigned long)ptr, size))
+ return true; /* Assume aligned writes up to word size are atomic. */
+
+ if (ctx->atomic_next > 0) {
+ /*
+ * Because we do not have separate contexts for nested
+ * interrupts, in case atomic_next is set, we simply assume that
+ * the outer interrupt set atomic_next. In the worst case, we
+ * will conservatively consider operations as atomic. This is a
+ * reasonable trade-off to make, since this case should be
+ * extremely rare; however, even if extremely rare, it could
+ * lead to false positives otherwise.
+ */
+ if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
+ --ctx->atomic_next; /* in task, or outer interrupt */
+ return true;
+ }
+
+ return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
+}
+
+static __always_inline bool
+should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx)
+{
+ /*
+ * Never set up watchpoints when memory operations are atomic.
+ *
+ * Need to check this first, before kcsan_skip check below: (1) atomics
+ * should not count towards skipped instructions, and (2) to actually
+ * decrement kcsan_atomic_next for consecutive instruction stream.
+ */
+ if (is_atomic(ptr, size, type, ctx))
+ return false;
+
+ if (this_cpu_dec_return(kcsan_skip) >= 0)
+ return false;
+
+ /*
+ * NOTE: If we get here, kcsan_skip must always be reset in slow path
+ * via reset_kcsan_skip() to avoid underflow.
+ */
+
+ /* this operation should be watched */
+ return true;
+}
+
+static inline void reset_kcsan_skip(void)
+{
+ long skip_count = kcsan_skip_watch -
+ (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
+ prandom_u32_max(kcsan_skip_watch) :
+ 0);
+ this_cpu_write(kcsan_skip, skip_count);
+}
+
+static __always_inline bool kcsan_is_enabled(void)
+{
+ return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
+}
+
+static inline unsigned int get_delay(void)
+{
+ unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
+ return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
+ prandom_u32_max(delay) :
+ 0);
+}
+
+/*
+ * Pull everything together: check_access() below contains the performance
+ * critical operations; the fast-path (including check_access) functions should
+ * all be inlinable by the instrumentation functions.
+ *
+ * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
+ * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
+ * be filtered from the stacktrace, as well as give them unique names for the
+ * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
+ * since they do not access any user memory, but instrumentation is still
+ * emitted in UACCESS regions.
+ */
+
+static noinline void kcsan_found_watchpoint(const volatile void *ptr,
+ size_t size,
+ int type,
+ atomic_long_t *watchpoint,
+ long encoded_watchpoint)
+{
+ unsigned long flags;
+ bool consumed;
+
+ if (!kcsan_is_enabled())
+ return;
+
+ /*
+ * The access_mask check relies on value-change comparison. To avoid
+ * reporting a race where e.g. the writer set up the watchpoint, but the
+ * reader has access_mask!=0, we have to ignore the found watchpoint.
+ */
+ if (get_ctx()->access_mask != 0)
+ return;
+
+ /*
+ * Consume the watchpoint as soon as possible, to minimize the chances
+ * of !consumed. Consuming the watchpoint must always be guarded by
+ * kcsan_is_enabled() check, as otherwise we might erroneously
+ * triggering reports when disabled.
+ */
+ consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
+
+ /* keep this after try_consume_watchpoint */
+ flags = user_access_save();
+
+ if (consumed) {
+ kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_MAYBE,
+ KCSAN_REPORT_CONSUMED_WATCHPOINT,
+ watchpoint - watchpoints);
+ } else {
+ /*
+ * The other thread may not print any diagnostics, as it has
+ * already removed the watchpoint, or another thread consumed
+ * the watchpoint before this thread.
+ */
+ kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES);
+ }
+
+ if ((type & KCSAN_ACCESS_ASSERT) != 0)
+ kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+ else
+ kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
+
+ user_access_restore(flags);
+}
+
+static noinline void
+kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
+{
+ const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
+ const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
+ atomic_long_t *watchpoint;
+ union {
+ u8 _1;
+ u16 _2;
+ u32 _4;
+ u64 _8;
+ } expect_value;
+ unsigned long access_mask;
+ enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
+ unsigned long ua_flags = user_access_save();
+ unsigned long irq_flags = 0;
+
+ /*
+ * Always reset kcsan_skip counter in slow-path to avoid underflow; see
+ * should_watch().
+ */
+ reset_kcsan_skip();
+
+ if (!kcsan_is_enabled())
+ goto out;
+
+ /*
+ * Special atomic rules: unlikely to be true, so we check them here in
+ * the slow-path, and not in the fast-path in is_atomic(). Call after
+ * kcsan_is_enabled(), as we may access memory that is not yet
+ * initialized during early boot.
+ */
+ if (!is_assert && kcsan_is_atomic_special(ptr))
+ goto out;
+
+ if (!check_encodable((unsigned long)ptr, size)) {
+ kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES);
+ goto out;
+ }
+
+ if (!kcsan_interrupt_watcher)
+ /* Use raw to avoid lockdep recursion via IRQ flags tracing. */
+ raw_local_irq_save(irq_flags);
+
+ watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
+ if (watchpoint == NULL) {
+ /*
+ * Out of capacity: the size of 'watchpoints', and the frequency
+ * with which should_watch() returns true should be tweaked so
+ * that this case happens very rarely.
+ */
+ kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
+ goto out_unlock;
+ }
+
+ kcsan_counter_inc(KCSAN_COUNTER_SETUP_WATCHPOINTS);
+ kcsan_counter_inc(KCSAN_COUNTER_USED_WATCHPOINTS);
+
+ /*
+ * Read the current value, to later check and infer a race if the data
+ * was modified via a non-instrumented access, e.g. from a device.
+ */
+ expect_value._8 = 0;
+ switch (size) {
+ case 1:
+ expect_value._1 = READ_ONCE(*(const u8 *)ptr);
+ break;
+ case 2:
+ expect_value._2 = READ_ONCE(*(const u16 *)ptr);
+ break;
+ case 4:
+ expect_value._4 = READ_ONCE(*(const u32 *)ptr);
+ break;
+ case 8:
+ expect_value._8 = READ_ONCE(*(const u64 *)ptr);
+ break;
+ default:
+ break; /* ignore; we do not diff the values */
+ }
+
+ if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
+ kcsan_disable_current();
+ pr_err("KCSAN: watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
+ is_write ? "write" : "read", size, ptr,
+ watchpoint_slot((unsigned long)ptr),
+ encode_watchpoint((unsigned long)ptr, size, is_write));
+ kcsan_enable_current();
+ }
+
+ /*
+ * Delay this thread, to increase probability of observing a racy
+ * conflicting access.
+ */
+ udelay(get_delay());
+
+ /*
+ * Re-read value, and check if it is as expected; if not, we infer a
+ * racy access.
+ */
+ access_mask = get_ctx()->access_mask;
+ switch (size) {
+ case 1:
+ expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
+ if (access_mask)
+ expect_value._1 &= (u8)access_mask;
+ break;
+ case 2:
+ expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
+ if (access_mask)
+ expect_value._2 &= (u16)access_mask;
+ break;
+ case 4:
+ expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
+ if (access_mask)
+ expect_value._4 &= (u32)access_mask;
+ break;
+ case 8:
+ expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
+ if (access_mask)
+ expect_value._8 &= (u64)access_mask;
+ break;
+ default:
+ break; /* ignore; we do not diff the values */
+ }
+
+ /* Were we able to observe a value-change? */
+ if (expect_value._8 != 0)
+ value_change = KCSAN_VALUE_CHANGE_TRUE;
+
+ /* Check if this access raced with another. */
+ if (!consume_watchpoint(watchpoint)) {
+ /*
+ * Depending on the access type, map a value_change of MAYBE to
+ * TRUE (always report) or FALSE (never report).
+ */
+ if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
+ if (access_mask != 0) {
+ /*
+ * For access with access_mask, we require a
+ * value-change, as it is likely that races on
+ * ~access_mask bits are expected.
+ */
+ value_change = KCSAN_VALUE_CHANGE_FALSE;
+ } else if (size > 8 || is_assert) {
+ /* Always assume a value-change. */
+ value_change = KCSAN_VALUE_CHANGE_TRUE;
+ }
+ }
+
+ /*
+ * No need to increment 'data_races' counter, as the racing
+ * thread already did.
+ *
+ * Count 'assert_failures' for each failed ASSERT access,
+ * therefore both this thread and the racing thread may
+ * increment this counter.
+ */
+ if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
+ kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+
+ kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
+ watchpoint - watchpoints);
+ } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
+ /* Inferring a race, since the value should not have changed. */
+
+ kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
+ if (is_assert)
+ kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+
+ if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
+ kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
+ KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
+ watchpoint - watchpoints);
+ }
+
+ /*
+ * Remove watchpoint; must be after reporting, since the slot may be
+ * reused after this point.
+ */
+ remove_watchpoint(watchpoint);
+ kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
+out_unlock:
+ if (!kcsan_interrupt_watcher)
+ raw_local_irq_restore(irq_flags);
+out:
+ user_access_restore(ua_flags);
+}
+
+static __always_inline void check_access(const volatile void *ptr, size_t size,
+ int type)
+{
+ const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
+ atomic_long_t *watchpoint;
+ long encoded_watchpoint;
+
+ /*
+ * Do nothing for 0 sized check; this comparison will be optimized out
+ * for constant sized instrumentation (__tsan_{read,write}N).
+ */
+ if (unlikely(size == 0))
+ return;
+
+ /*
+ * Avoid user_access_save in fast-path: find_watchpoint is safe without
+ * user_access_save, as the address that ptr points to is only used to
+ * check if a watchpoint exists; ptr is never dereferenced.
+ */
+ watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
+ &encoded_watchpoint);
+ /*
+ * It is safe to check kcsan_is_enabled() after find_watchpoint in the
+ * slow-path, as long as no state changes that cause a race to be
+ * detected and reported have occurred until kcsan_is_enabled() is
+ * checked.
+ */
+
+ if (unlikely(watchpoint != NULL))
+ kcsan_found_watchpoint(ptr, size, type, watchpoint,
+ encoded_watchpoint);
+ else {
+ struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
+
+ if (unlikely(should_watch(ptr, size, type, ctx)))
+ kcsan_setup_watchpoint(ptr, size, type);
+ else if (unlikely(ctx->scoped_accesses.prev))
+ kcsan_check_scoped_accesses();
+ }
+}
+
+/* === Public interface ===================================================== */
+
+void __init kcsan_init(void)
+{
+ BUG_ON(!in_task());
+
+ kcsan_debugfs_init();
+
+ /*
+ * We are in the init task, and no other tasks should be running;
+ * WRITE_ONCE without memory barrier is sufficient.
+ */
+ if (kcsan_early_enable)
+ WRITE_ONCE(kcsan_enabled, true);
+}
+
+/* === Exported interface =================================================== */
+
+void kcsan_disable_current(void)
+{
+ ++get_ctx()->disable_count;
+}
+EXPORT_SYMBOL(kcsan_disable_current);
+
+void kcsan_enable_current(void)
+{
+ if (get_ctx()->disable_count-- == 0) {
+ /*
+ * Warn if kcsan_enable_current() calls are unbalanced with
+ * kcsan_disable_current() calls, which causes disable_count to
+ * become negative and should not happen.
+ */
+ kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
+ kcsan_disable_current(); /* disable to generate warning */
+ WARN(1, "Unbalanced %s()", __func__);
+ kcsan_enable_current();
+ }
+}
+EXPORT_SYMBOL(kcsan_enable_current);
+
+void kcsan_enable_current_nowarn(void)
+{
+ if (get_ctx()->disable_count-- == 0)
+ kcsan_disable_current();
+}
+EXPORT_SYMBOL(kcsan_enable_current_nowarn);
+
+void kcsan_nestable_atomic_begin(void)
+{
+ /*
+ * Do *not* check and warn if we are in a flat atomic region: nestable
+ * and flat atomic regions are independent from each other.
+ * See include/linux/kcsan.h: struct kcsan_ctx comments for more
+ * comments.
+ */
+
+ ++get_ctx()->atomic_nest_count;
+}
+EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
+
+void kcsan_nestable_atomic_end(void)
+{
+ if (get_ctx()->atomic_nest_count-- == 0) {
+ /*
+ * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
+ * kcsan_nestable_atomic_begin() calls, which causes
+ * atomic_nest_count to become negative and should not happen.
+ */
+ kcsan_nestable_atomic_begin(); /* restore to 0 */
+ kcsan_disable_current(); /* disable to generate warning */
+ WARN(1, "Unbalanced %s()", __func__);
+ kcsan_enable_current();
+ }
+}
+EXPORT_SYMBOL(kcsan_nestable_atomic_end);
+
+void kcsan_flat_atomic_begin(void)
+{
+ get_ctx()->in_flat_atomic = true;
+}
+EXPORT_SYMBOL(kcsan_flat_atomic_begin);
+
+void kcsan_flat_atomic_end(void)
+{
+ get_ctx()->in_flat_atomic = false;
+}
+EXPORT_SYMBOL(kcsan_flat_atomic_end);
+
+void kcsan_atomic_next(int n)
+{
+ get_ctx()->atomic_next = n;
+}
+EXPORT_SYMBOL(kcsan_atomic_next);
+
+void kcsan_set_access_mask(unsigned long mask)
+{
+ get_ctx()->access_mask = mask;
+}
+EXPORT_SYMBOL(kcsan_set_access_mask);
+
+struct kcsan_scoped_access *
+kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
+ struct kcsan_scoped_access *sa)
+{
+ struct kcsan_ctx *ctx = get_ctx();
+
+ __kcsan_check_access(ptr, size, type);
+
+ ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
+
+ INIT_LIST_HEAD(&sa->list);
+ sa->ptr = ptr;
+ sa->size = size;
+ sa->type = type;
+
+ if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
+ INIT_LIST_HEAD(&ctx->scoped_accesses);
+ list_add(&sa->list, &ctx->scoped_accesses);
+
+ ctx->disable_count--;
+ return sa;
+}
+EXPORT_SYMBOL(kcsan_begin_scoped_access);
+
+void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
+{
+ struct kcsan_ctx *ctx = get_ctx();
+
+ if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
+ return;
+
+ ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
+
+ list_del(&sa->list);
+ if (list_empty(&ctx->scoped_accesses))
+ /*
+ * Ensure we do not enter kcsan_check_scoped_accesses()
+ * slow-path if unnecessary, and avoids requiring list_empty()
+ * in the fast-path (to avoid a READ_ONCE() and potential
+ * uaccess warning).
+ */
+ ctx->scoped_accesses.prev = NULL;
+
+ ctx->disable_count--;
+
+ __kcsan_check_access(sa->ptr, sa->size, sa->type);
+}
+EXPORT_SYMBOL(kcsan_end_scoped_access);
+
+void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
+{
+ check_access(ptr, size, type);
+}
+EXPORT_SYMBOL(__kcsan_check_access);
+
+/*
+ * KCSAN uses the same instrumentation that is emitted by supported compilers
+ * for ThreadSanitizer (TSAN).
+ *
+ * When enabled, the compiler emits instrumentation calls (the functions
+ * prefixed with "__tsan" below) for all loads and stores that it generated;
+ * inline asm is not instrumented.
+ *
+ * Note that, not all supported compiler versions distinguish aligned/unaligned
+ * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
+ * version to the generic version, which can handle both.
+ */
+
+#define DEFINE_TSAN_READ_WRITE(size) \
+ void __tsan_read##size(void *ptr) \
+ { \
+ check_access(ptr, size, 0); \
+ } \
+ EXPORT_SYMBOL(__tsan_read##size); \
+ void __tsan_unaligned_read##size(void *ptr) \
+ __alias(__tsan_read##size); \
+ EXPORT_SYMBOL(__tsan_unaligned_read##size); \
+ void __tsan_write##size(void *ptr) \
+ { \
+ check_access(ptr, size, KCSAN_ACCESS_WRITE); \
+ } \
+ EXPORT_SYMBOL(__tsan_write##size); \
+ void __tsan_unaligned_write##size(void *ptr) \
+ __alias(__tsan_write##size); \
+ EXPORT_SYMBOL(__tsan_unaligned_write##size)
+
+DEFINE_TSAN_READ_WRITE(1);
+DEFINE_TSAN_READ_WRITE(2);
+DEFINE_TSAN_READ_WRITE(4);
+DEFINE_TSAN_READ_WRITE(8);
+DEFINE_TSAN_READ_WRITE(16);
+
+void __tsan_read_range(void *ptr, size_t size)
+{
+ check_access(ptr, size, 0);
+}
+EXPORT_SYMBOL(__tsan_read_range);
+
+void __tsan_write_range(void *ptr, size_t size)
+{
+ check_access(ptr, size, KCSAN_ACCESS_WRITE);
+}
+EXPORT_SYMBOL(__tsan_write_range);
+
+/*
+ * Use of explicit volatile is generally disallowed [1], however, volatile is
+ * still used in various concurrent context, whether in low-level
+ * synchronization primitives or for legacy reasons.
+ * [1] https://lwn.net/Articles/233479/
+ *
+ * We only consider volatile accesses atomic if they are aligned and would pass
+ * the size-check of compiletime_assert_rwonce_type().
+ */
+#define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
+ void __tsan_volatile_read##size(void *ptr) \
+ { \
+ const bool is_atomic = size <= sizeof(long long) && \
+ IS_ALIGNED((unsigned long)ptr, size); \
+ if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
+ return; \
+ check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0); \
+ } \
+ EXPORT_SYMBOL(__tsan_volatile_read##size); \
+ void __tsan_unaligned_volatile_read##size(void *ptr) \
+ __alias(__tsan_volatile_read##size); \
+ EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
+ void __tsan_volatile_write##size(void *ptr) \
+ { \
+ const bool is_atomic = size <= sizeof(long long) && \
+ IS_ALIGNED((unsigned long)ptr, size); \
+ if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
+ return; \
+ check_access(ptr, size, \
+ KCSAN_ACCESS_WRITE | \
+ (is_atomic ? KCSAN_ACCESS_ATOMIC : 0)); \
+ } \
+ EXPORT_SYMBOL(__tsan_volatile_write##size); \
+ void __tsan_unaligned_volatile_write##size(void *ptr) \
+ __alias(__tsan_volatile_write##size); \
+ EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
+
+DEFINE_TSAN_VOLATILE_READ_WRITE(1);
+DEFINE_TSAN_VOLATILE_READ_WRITE(2);
+DEFINE_TSAN_VOLATILE_READ_WRITE(4);
+DEFINE_TSAN_VOLATILE_READ_WRITE(8);
+DEFINE_TSAN_VOLATILE_READ_WRITE(16);
+
+/*
+ * The below are not required by KCSAN, but can still be emitted by the
+ * compiler.
+ */
+void __tsan_func_entry(void *call_pc)
+{
+}
+EXPORT_SYMBOL(__tsan_func_entry);
+void __tsan_func_exit(void)
+{
+}
+EXPORT_SYMBOL(__tsan_func_exit);
+void __tsan_init(void)
+{
+}
+EXPORT_SYMBOL(__tsan_init);
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
new file mode 100644
index 000000000000..023e49c58d55
--- /dev/null
+++ b/kernel/kcsan/debugfs.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/atomic.h>
+#include <linux/bsearch.h>
+#include <linux/bug.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+#include "kcsan.h"
+
+/*
+ * Statistics counters.
+ */
+static atomic_long_t counters[KCSAN_COUNTER_COUNT];
+
+/*
+ * Addresses for filtering functions from reporting. This list can be used as a
+ * whitelist or blacklist.
+ */
+static struct {
+ unsigned long *addrs; /* array of addresses */
+ size_t size; /* current size */
+ int used; /* number of elements used */
+ bool sorted; /* if elements are sorted */
+ bool whitelist; /* if list is a blacklist or whitelist */
+} report_filterlist = {
+ .addrs = NULL,
+ .size = 8, /* small initial size */
+ .used = 0,
+ .sorted = false,
+ .whitelist = false, /* default is blacklist */
+};
+static DEFINE_SPINLOCK(report_filterlist_lock);
+
+static const char *counter_to_name(enum kcsan_counter_id id)
+{
+ switch (id) {
+ case KCSAN_COUNTER_USED_WATCHPOINTS: return "used_watchpoints";
+ case KCSAN_COUNTER_SETUP_WATCHPOINTS: return "setup_watchpoints";
+ case KCSAN_COUNTER_DATA_RACES: return "data_races";
+ case KCSAN_COUNTER_ASSERT_FAILURES: return "assert_failures";
+ case KCSAN_COUNTER_NO_CAPACITY: return "no_capacity";
+ case KCSAN_COUNTER_REPORT_RACES: return "report_races";
+ case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN: return "races_unknown_origin";
+ case KCSAN_COUNTER_UNENCODABLE_ACCESSES: return "unencodable_accesses";
+ case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES: return "encoding_false_positives";
+ case KCSAN_COUNTER_COUNT:
+ BUG();
+ }
+ return NULL;
+}
+
+void kcsan_counter_inc(enum kcsan_counter_id id)
+{
+ atomic_long_inc(&counters[id]);
+}
+
+void kcsan_counter_dec(enum kcsan_counter_id id)
+{
+ atomic_long_dec(&counters[id]);
+}
+
+/*
+ * The microbenchmark allows benchmarking KCSAN core runtime only. To run
+ * multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
+ * debugfs file. This will not generate any conflicts, and tests fast-path only.
+ */
+static noinline void microbenchmark(unsigned long iters)
+{
+ const struct kcsan_ctx ctx_save = current->kcsan_ctx;
+ const bool was_enabled = READ_ONCE(kcsan_enabled);
+ cycles_t cycles;
+
+ /* We may have been called from an atomic region; reset context. */
+ memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
+ /*
+ * Disable to benchmark fast-path for all accesses, and (expected
+ * negligible) call into slow-path, but never set up watchpoints.
+ */
+ WRITE_ONCE(kcsan_enabled, false);
+
+ pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
+
+ cycles = get_cycles();
+ while (iters--) {
+ unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
+ int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC :
+ (!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0);
+ __kcsan_check_access((void *)addr, sizeof(long), type);
+ }
+ cycles = get_cycles() - cycles;
+
+ pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
+
+ WRITE_ONCE(kcsan_enabled, was_enabled);
+ /* restore context */
+ current->kcsan_ctx = ctx_save;
+}
+
+/*
+ * Simple test to create conflicting accesses. Write 'test=<iters>' to KCSAN's
+ * debugfs file from multiple tasks to generate real conflicts and show reports.
+ */
+static long test_dummy;
+static long test_flags;
+static long test_scoped;
+static noinline void test_thread(unsigned long iters)
+{
+ const long CHANGE_BITS = 0xff00ff00ff00ff00L;
+ const struct kcsan_ctx ctx_save = current->kcsan_ctx;
+ cycles_t cycles;
+
+ /* We may have been called from an atomic region; reset context. */
+ memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
+
+ pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
+ pr_info("test_dummy@%px, test_flags@%px, test_scoped@%px,\n",
+ &test_dummy, &test_flags, &test_scoped);
+
+ cycles = get_cycles();
+ while (iters--) {
+ /* These all should generate reports. */
+ __kcsan_check_read(&test_dummy, sizeof(test_dummy));
+ ASSERT_EXCLUSIVE_WRITER(test_dummy);
+ ASSERT_EXCLUSIVE_ACCESS(test_dummy);
+
+ ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
+ __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
+
+ ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
+ __kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
+
+ /* not actually instrumented */
+ WRITE_ONCE(test_dummy, iters); /* to observe value-change */
+ __kcsan_check_write(&test_dummy, sizeof(test_dummy));
+
+ test_flags ^= CHANGE_BITS; /* generate value-change */
+ __kcsan_check_write(&test_flags, sizeof(test_flags));
+
+ BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
+ {
+ /* Should generate reports anywhere in this block. */
+ ASSERT_EXCLUSIVE_WRITER_SCOPED(test_scoped);
+ ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_scoped);
+ BUG_ON(!current->kcsan_ctx.scoped_accesses.prev);
+ /* Unrelated accesses. */
+ __kcsan_check_access(&cycles, sizeof(cycles), 0);
+ __kcsan_check_access(&cycles, sizeof(cycles), KCSAN_ACCESS_ATOMIC);
+ }
+ BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
+ }
+ cycles = get_cycles() - cycles;
+
+ pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
+
+ /* restore context */
+ current->kcsan_ctx = ctx_save;
+}
+
+static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
+{
+ const unsigned long a = *(const unsigned long *)rhs;
+ const unsigned long b = *(const unsigned long *)lhs;
+
+ return a < b ? -1 : a == b ? 0 : 1;
+}
+
+bool kcsan_skip_report_debugfs(unsigned long func_addr)
+{
+ unsigned long symbolsize, offset;
+ unsigned long flags;
+ bool ret = false;
+
+ if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
+ return false;
+ func_addr -= offset; /* Get function start */
+
+ spin_lock_irqsave(&report_filterlist_lock, flags);
+ if (report_filterlist.used == 0)
+ goto out;
+
+ /* Sort array if it is unsorted, and then do a binary search. */
+ if (!report_filterlist.sorted) {
+ sort(report_filterlist.addrs, report_filterlist.used,
+ sizeof(unsigned long), cmp_filterlist_addrs, NULL);
+ report_filterlist.sorted = true;
+ }
+ ret = !!bsearch(&func_addr, report_filterlist.addrs,
+ report_filterlist.used, sizeof(unsigned long),
+ cmp_filterlist_addrs);
+ if (report_filterlist.whitelist)
+ ret = !ret;
+
+out:
+ spin_unlock_irqrestore(&report_filterlist_lock, flags);
+ return ret;
+}
+
+static void set_report_filterlist_whitelist(bool whitelist)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&report_filterlist_lock, flags);
+ report_filterlist.whitelist = whitelist;
+ spin_unlock_irqrestore(&report_filterlist_lock, flags);
+}
+
+/* Returns 0 on success, error-code otherwise. */
+static ssize_t insert_report_filterlist(const char *func)
+{
+ unsigned long flags;
+ unsigned long addr = kallsyms_lookup_name(func);
+ ssize_t ret = 0;
+
+ if (!addr) {
+ pr_err("KCSAN: could not find function: '%s'\n", func);
+ return -ENOENT;
+ }
+
+ spin_lock_irqsave(&report_filterlist_lock, flags);
+
+ if (report_filterlist.addrs == NULL) {
+ /* initial allocation */
+ report_filterlist.addrs =
+ kmalloc_array(report_filterlist.size,
+ sizeof(unsigned long), GFP_ATOMIC);
+ if (report_filterlist.addrs == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ } else if (report_filterlist.used == report_filterlist.size) {
+ /* resize filterlist */
+ size_t new_size = report_filterlist.size * 2;
+ unsigned long *new_addrs =
+ krealloc(report_filterlist.addrs,
+ new_size * sizeof(unsigned long), GFP_ATOMIC);
+
+ if (new_addrs == NULL) {
+ /* leave filterlist itself untouched */
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ report_filterlist.size = new_size;
+ report_filterlist.addrs = new_addrs;
+ }
+
+ /* Note: deduplicating should be done in userspace. */
+ report_filterlist.addrs[report_filterlist.used++] =
+ kallsyms_lookup_name(func);
+ report_filterlist.sorted = false;
+
+out:
+ spin_unlock_irqrestore(&report_filterlist_lock, flags);
+
+ return ret;
+}
+
+static int show_info(struct seq_file *file, void *v)
+{
+ int i;
+ unsigned long flags;
+
+ /* show stats */
+ seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
+ for (i = 0; i < KCSAN_COUNTER_COUNT; ++i)
+ seq_printf(file, "%s: %ld\n", counter_to_name(i),
+ atomic_long_read(&counters[i]));
+
+ /* show filter functions, and filter type */
+ spin_lock_irqsave(&report_filterlist_lock, flags);
+ seq_printf(file, "\n%s functions: %s\n",
+ report_filterlist.whitelist ? "whitelisted" : "blacklisted",
+ report_filterlist.used == 0 ? "none" : "");
+ for (i = 0; i < report_filterlist.used; ++i)
+ seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
+ spin_unlock_irqrestore(&report_filterlist_lock, flags);
+
+ return 0;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_info, NULL);
+}
+
+static ssize_t
+debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
+{
+ char kbuf[KSYM_NAME_LEN];
+ char *arg;
+ int read_len = count < (sizeof(kbuf) - 1) ? count : (sizeof(kbuf) - 1);
+
+ if (copy_from_user(kbuf, buf, read_len))
+ return -EFAULT;
+ kbuf[read_len] = '\0';
+ arg = strstrip(kbuf);
+
+ if (!strcmp(arg, "on")) {
+ WRITE_ONCE(kcsan_enabled, true);
+ } else if (!strcmp(arg, "off")) {
+ WRITE_ONCE(kcsan_enabled, false);
+ } else if (!strncmp(arg, "microbench=", sizeof("microbench=") - 1)) {
+ unsigned long iters;
+
+ if (kstrtoul(&arg[sizeof("microbench=") - 1], 0, &iters))
+ return -EINVAL;
+ microbenchmark(iters);
+ } else if (!strncmp(arg, "test=", sizeof("test=") - 1)) {
+ unsigned long iters;
+
+ if (kstrtoul(&arg[sizeof("test=") - 1], 0, &iters))
+ return -EINVAL;
+ test_thread(iters);
+ } else if (!strcmp(arg, "whitelist")) {
+ set_report_filterlist_whitelist(true);
+ } else if (!strcmp(arg, "blacklist")) {
+ set_report_filterlist_whitelist(false);
+ } else if (arg[0] == '!') {
+ ssize_t ret = insert_report_filterlist(&arg[1]);
+
+ if (ret < 0)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static const struct file_operations debugfs_ops =
+{
+ .read = seq_read,
+ .open = debugfs_open,
+ .write = debugfs_write,
+ .release = single_release
+};
+
+void __init kcsan_debugfs_init(void)
+{
+ debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
+}
diff --git a/kernel/kcsan/encoding.h b/kernel/kcsan/encoding.h
new file mode 100644
index 000000000000..f03562aaf2eb
--- /dev/null
+++ b/kernel/kcsan/encoding.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _KERNEL_KCSAN_ENCODING_H
+#define _KERNEL_KCSAN_ENCODING_H
+
+#include <linux/bits.h>
+#include <linux/log2.h>
+#include <linux/mm.h>
+
+#include "kcsan.h"
+
+#define SLOT_RANGE PAGE_SIZE
+
+#define INVALID_WATCHPOINT 0
+#define CONSUMED_WATCHPOINT 1
+
+/*
+ * The maximum useful size of accesses for which we set up watchpoints is the
+ * max range of slots we check on an access.
+ */
+#define MAX_ENCODABLE_SIZE (SLOT_RANGE * (1 + KCSAN_CHECK_ADJACENT))
+
+/*
+ * Number of bits we use to store size info.
+ */
+#define WATCHPOINT_SIZE_BITS bits_per(MAX_ENCODABLE_SIZE)
+/*
+ * This encoding for addresses discards the upper (1 for is-write + SIZE_BITS);
+ * however, most 64-bit architectures do not use the full 64-bit address space.
+ * Also, in order for a false positive to be observable 2 things need to happen:
+ *
+ * 1. different addresses but with the same encoded address race;
+ * 2. and both map onto the same watchpoint slots;
+ *
+ * Both these are assumed to be very unlikely. However, in case it still happens
+ * happens, the report logic will filter out the false positive (see report.c).
+ */
+#define WATCHPOINT_ADDR_BITS (BITS_PER_LONG-1 - WATCHPOINT_SIZE_BITS)
+
+/*
+ * Masks to set/retrieve the encoded data.
+ */
+#define WATCHPOINT_WRITE_MASK BIT(BITS_PER_LONG-1)
+#define WATCHPOINT_SIZE_MASK \
+ GENMASK(BITS_PER_LONG-2, BITS_PER_LONG-2 - WATCHPOINT_SIZE_BITS)
+#define WATCHPOINT_ADDR_MASK \
+ GENMASK(BITS_PER_LONG-3 - WATCHPOINT_SIZE_BITS, 0)
+
+static inline bool check_encodable(unsigned long addr, size_t size)
+{
+ return size <= MAX_ENCODABLE_SIZE;
+}
+
+static inline long
+encode_watchpoint(unsigned long addr, size_t size, bool is_write)
+{
+ return (long)((is_write ? WATCHPOINT_WRITE_MASK : 0) |
+ (size << WATCHPOINT_ADDR_BITS) |
+ (addr & WATCHPOINT_ADDR_MASK));
+}
+
+static __always_inline bool decode_watchpoint(long watchpoint,
+ unsigned long *addr_masked,
+ size_t *size,
+ bool *is_write)
+{
+ if (watchpoint == INVALID_WATCHPOINT ||
+ watchpoint == CONSUMED_WATCHPOINT)
+ return false;
+
+ *addr_masked = (unsigned long)watchpoint & WATCHPOINT_ADDR_MASK;
+ *size = ((unsigned long)watchpoint & WATCHPOINT_SIZE_MASK) >> WATCHPOINT_ADDR_BITS;
+ *is_write = !!((unsigned long)watchpoint & WATCHPOINT_WRITE_MASK);
+
+ return true;
+}
+
+/*
+ * Return watchpoint slot for an address.
+ */
+static __always_inline int watchpoint_slot(unsigned long addr)
+{
+ return (addr / PAGE_SIZE) % CONFIG_KCSAN_NUM_WATCHPOINTS;
+}
+
+static __always_inline bool matching_access(unsigned long addr1, size_t size1,
+ unsigned long addr2, size_t size2)
+{
+ unsigned long end_range1 = addr1 + size1 - 1;
+ unsigned long end_range2 = addr2 + size2 - 1;
+
+ return addr1 <= end_range2 && addr2 <= end_range1;
+}
+
+#endif /* _KERNEL_KCSAN_ENCODING_H */
diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
new file mode 100644
index 000000000000..763d6d08d94b
--- /dev/null
+++ b/kernel/kcsan/kcsan.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. For more info please
+ * see Documentation/dev-tools/kcsan.rst.
+ */
+
+#ifndef _KERNEL_KCSAN_KCSAN_H
+#define _KERNEL_KCSAN_KCSAN_H
+
+#include <linux/kcsan.h>
+
+/* The number of adjacent watchpoints to check. */
+#define KCSAN_CHECK_ADJACENT 1
+#define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT)
+
+extern unsigned int kcsan_udelay_task;
+extern unsigned int kcsan_udelay_interrupt;
+
+/*
+ * Globally enable and disable KCSAN.
+ */
+extern bool kcsan_enabled;
+
+/*
+ * Initialize debugfs file.
+ */
+void kcsan_debugfs_init(void);
+
+enum kcsan_counter_id {
+ /*
+ * Number of watchpoints currently in use.
+ */
+ KCSAN_COUNTER_USED_WATCHPOINTS,
+
+ /*
+ * Total number of watchpoints set up.
+ */
+ KCSAN_COUNTER_SETUP_WATCHPOINTS,
+
+ /*
+ * Total number of data races.
+ */
+ KCSAN_COUNTER_DATA_RACES,
+
+ /*
+ * Total number of ASSERT failures due to races. If the observed race is
+ * due to two conflicting ASSERT type accesses, then both will be
+ * counted.
+ */
+ KCSAN_COUNTER_ASSERT_FAILURES,
+
+ /*
+ * Number of times no watchpoints were available.
+ */
+ KCSAN_COUNTER_NO_CAPACITY,
+
+ /*
+ * A thread checking a watchpoint raced with another checking thread;
+ * only one will be reported.
+ */
+ KCSAN_COUNTER_REPORT_RACES,
+
+ /*
+ * Observed data value change, but writer thread unknown.
+ */
+ KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN,
+
+ /*
+ * The access cannot be encoded to a valid watchpoint.
+ */
+ KCSAN_COUNTER_UNENCODABLE_ACCESSES,
+
+ /*
+ * Watchpoint encoding caused a watchpoint to fire on mismatching
+ * accesses.
+ */
+ KCSAN_COUNTER_ENCODING_FALSE_POSITIVES,
+
+ KCSAN_COUNTER_COUNT, /* number of counters */
+};
+
+/*
+ * Increment/decrement counter with given id; avoid calling these in fast-path.
+ */
+extern void kcsan_counter_inc(enum kcsan_counter_id id);
+extern void kcsan_counter_dec(enum kcsan_counter_id id);
+
+/*
+ * Returns true if data races in the function symbol that maps to func_addr
+ * (offsets are ignored) should *not* be reported.
+ */
+extern bool kcsan_skip_report_debugfs(unsigned long func_addr);
+
+/*
+ * Value-change states.
+ */
+enum kcsan_value_change {
+ /*
+ * Did not observe a value-change, however, it is valid to report the
+ * race, depending on preferences.
+ */
+ KCSAN_VALUE_CHANGE_MAYBE,
+
+ /*
+ * Did not observe a value-change, and it is invalid to report the race.
+ */
+ KCSAN_VALUE_CHANGE_FALSE,
+
+ /*
+ * The value was observed to change, and the race should be reported.
+ */
+ KCSAN_VALUE_CHANGE_TRUE,
+};
+
+enum kcsan_report_type {
+ /*
+ * The thread that set up the watchpoint and briefly stalled was
+ * signalled that another thread triggered the watchpoint.
+ */
+ KCSAN_REPORT_RACE_SIGNAL,
+
+ /*
+ * A thread found and consumed a matching watchpoint.
+ */
+ KCSAN_REPORT_CONSUMED_WATCHPOINT,
+
+ /*
+ * No other thread was observed to race with the access, but the data
+ * value before and after the stall differs.
+ */
+ KCSAN_REPORT_RACE_UNKNOWN_ORIGIN,
+};
+
+/*
+ * Print a race report from thread that encountered the race.
+ */
+extern void kcsan_report(const volatile void *ptr, size_t size, int access_type,
+ enum kcsan_value_change value_change,
+ enum kcsan_report_type type, int watchpoint_idx);
+
+#endif /* _KERNEL_KCSAN_KCSAN_H */
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
new file mode 100644
index 000000000000..ac5f8345bae9
--- /dev/null
+++ b/kernel/kcsan/report.c
@@ -0,0 +1,634 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/debug_locks.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/lockdep.h>
+#include <linux/preempt.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/stacktrace.h>
+
+#include "kcsan.h"
+#include "encoding.h"
+
+/*
+ * Max. number of stack entries to show in the report.
+ */
+#define NUM_STACK_ENTRIES 64
+
+/* Common access info. */
+struct access_info {
+ const volatile void *ptr;
+ size_t size;
+ int access_type;
+ int task_pid;
+ int cpu_id;
+};
+
+/*
+ * Other thread info: communicated from other racing thread to thread that set
+ * up the watchpoint, which then prints the complete report atomically.
+ */
+struct other_info {
+ struct access_info ai;
+ unsigned long stack_entries[NUM_STACK_ENTRIES];
+ int num_stack_entries;
+
+ /*
+ * Optionally pass @current. Typically we do not need to pass @current
+ * via @other_info since just @task_pid is sufficient. Passing @current
+ * has additional overhead.
+ *
+ * To safely pass @current, we must either use get_task_struct/
+ * put_task_struct, or stall the thread that populated @other_info.
+ *
+ * We cannot rely on get_task_struct/put_task_struct in case
+ * release_report() races with a task being released, and would have to
+ * free it in release_report(). This may result in deadlock if we want
+ * to use KCSAN on the allocators.
+ *
+ * Since we also want to reliably print held locks for
+ * CONFIG_KCSAN_VERBOSE, the current implementation stalls the thread
+ * that populated @other_info until it has been consumed.
+ */
+ struct task_struct *task;
+};
+
+/*
+ * To never block any producers of struct other_info, we need as many elements
+ * as we have watchpoints (upper bound on concurrent races to report).
+ */
+static struct other_info other_infos[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
+
+/*
+ * Information about reported races; used to rate limit reporting.
+ */
+struct report_time {
+ /*
+ * The last time the race was reported.
+ */
+ unsigned long time;
+
+ /*
+ * The frames of the 2 threads; if only 1 thread is known, one frame
+ * will be 0.
+ */
+ unsigned long frame1;
+ unsigned long frame2;
+};
+
+/*
+ * Since we also want to be able to debug allocators with KCSAN, to avoid
+ * deadlock, report_times cannot be dynamically resized with krealloc in
+ * rate_limit_report.
+ *
+ * Therefore, we use a fixed-size array, which at most will occupy a page. This
+ * still adequately rate limits reports, assuming that a) number of unique data
+ * races is not excessive, and b) occurrence of unique races within the
+ * same time window is limited.
+ */
+#define REPORT_TIMES_MAX (PAGE_SIZE / sizeof(struct report_time))
+#define REPORT_TIMES_SIZE \
+ (CONFIG_KCSAN_REPORT_ONCE_IN_MS > REPORT_TIMES_MAX ? \
+ REPORT_TIMES_MAX : \
+ CONFIG_KCSAN_REPORT_ONCE_IN_MS)
+static struct report_time report_times[REPORT_TIMES_SIZE];
+
+/*
+ * Spinlock serializing report generation, and access to @other_infos. Although
+ * it could make sense to have a finer-grained locking story for @other_infos,
+ * report generation needs to be serialized either way, so not much is gained.
+ */
+static DEFINE_RAW_SPINLOCK(report_lock);
+
+/*
+ * Checks if the race identified by thread frames frame1 and frame2 has
+ * been reported since (now - KCSAN_REPORT_ONCE_IN_MS).
+ */
+static bool rate_limit_report(unsigned long frame1, unsigned long frame2)
+{
+ struct report_time *use_entry = &report_times[0];
+ unsigned long invalid_before;
+ int i;
+
+ BUILD_BUG_ON(CONFIG_KCSAN_REPORT_ONCE_IN_MS != 0 && REPORT_TIMES_SIZE == 0);
+
+ if (CONFIG_KCSAN_REPORT_ONCE_IN_MS == 0)
+ return false;
+
+ invalid_before = jiffies - msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS);
+
+ /* Check if a matching race report exists. */
+ for (i = 0; i < REPORT_TIMES_SIZE; ++i) {
+ struct report_time *rt = &report_times[i];
+
+ /*
+ * Must always select an entry for use to store info as we
+ * cannot resize report_times; at the end of the scan, use_entry
+ * will be the oldest entry, which ideally also happened before
+ * KCSAN_REPORT_ONCE_IN_MS ago.
+ */
+ if (time_before(rt->time, use_entry->time))
+ use_entry = rt;
+
+ /*
+ * Initially, no need to check any further as this entry as well
+ * as following entries have never been used.
+ */
+ if (rt->time == 0)
+ break;
+
+ /* Check if entry expired. */
+ if (time_before(rt->time, invalid_before))
+ continue; /* before KCSAN_REPORT_ONCE_IN_MS ago */
+
+ /* Reported recently, check if race matches. */
+ if ((rt->frame1 == frame1 && rt->frame2 == frame2) ||
+ (rt->frame1 == frame2 && rt->frame2 == frame1))
+ return true;
+ }
+
+ use_entry->time = jiffies;
+ use_entry->frame1 = frame1;
+ use_entry->frame2 = frame2;
+ return false;
+}
+
+/*
+ * Special rules to skip reporting.
+ */
+static bool
+skip_report(enum kcsan_value_change value_change, unsigned long top_frame)
+{
+ /* Should never get here if value_change==FALSE. */
+ WARN_ON_ONCE(value_change == KCSAN_VALUE_CHANGE_FALSE);
+
+ /*
+ * The first call to skip_report always has value_change==TRUE, since we
+ * cannot know the value written of an instrumented access. For the 2nd
+ * call there are 6 cases with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY:
+ *
+ * 1. read watchpoint, conflicting write (value_change==TRUE): report;
+ * 2. read watchpoint, conflicting write (value_change==MAYBE): skip;
+ * 3. write watchpoint, conflicting write (value_change==TRUE): report;
+ * 4. write watchpoint, conflicting write (value_change==MAYBE): skip;
+ * 5. write watchpoint, conflicting read (value_change==MAYBE): skip;
+ * 6. write watchpoint, conflicting read (value_change==TRUE): report;
+ *
+ * Cases 1-4 are intuitive and expected; case 5 ensures we do not report
+ * data races where the write may have rewritten the same value; case 6
+ * is possible either if the size is larger than what we check value
+ * changes for or the access type is KCSAN_ACCESS_ASSERT.
+ */
+ if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) &&
+ value_change == KCSAN_VALUE_CHANGE_MAYBE) {
+ /*
+ * The access is a write, but the data value did not change.
+ *
+ * We opt-out of this filter for certain functions at request of
+ * maintainers.
+ */
+ char buf[64];
+ int len = scnprintf(buf, sizeof(buf), "%ps", (void *)top_frame);
+
+ if (!strnstr(buf, "rcu_", len) &&
+ !strnstr(buf, "_rcu", len) &&
+ !strnstr(buf, "_srcu", len))
+ return true;
+ }
+
+ return kcsan_skip_report_debugfs(top_frame);
+}
+
+static const char *get_access_type(int type)
+{
+ if (type & KCSAN_ACCESS_ASSERT) {
+ if (type & KCSAN_ACCESS_SCOPED) {
+ if (type & KCSAN_ACCESS_WRITE)
+ return "assert no accesses (scoped)";
+ else
+ return "assert no writes (scoped)";
+ } else {
+ if (type & KCSAN_ACCESS_WRITE)
+ return "assert no accesses";
+ else
+ return "assert no writes";
+ }
+ }
+
+ switch (type) {
+ case 0:
+ return "read";
+ case KCSAN_ACCESS_ATOMIC:
+ return "read (marked)";
+ case KCSAN_ACCESS_WRITE:
+ return "write";
+ case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
+ return "write (marked)";
+ case KCSAN_ACCESS_SCOPED:
+ return "read (scoped)";
+ case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
+ return "read (marked, scoped)";
+ case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE:
+ return "write (scoped)";
+ case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
+ return "write (marked, scoped)";
+ default:
+ BUG();
+ }
+}
+
+static const char *get_bug_type(int type)
+{
+ return (type & KCSAN_ACCESS_ASSERT) != 0 ? "assert: race" : "data-race";
+}
+
+/* Return thread description: in task or interrupt. */
+static const char *get_thread_desc(int task_id)
+{
+ if (task_id != -1) {
+ static char buf[32]; /* safe: protected by report_lock */
+
+ snprintf(buf, sizeof(buf), "task %i", task_id);
+ return buf;
+ }
+ return "interrupt";
+}
+
+/* Helper to skip KCSAN-related functions in stack-trace. */
+static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries)
+{
+ char buf[64];
+ char *cur;
+ int len, skip;
+
+ for (skip = 0; skip < num_entries; ++skip) {
+ len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skip]);
+
+ /* Never show tsan_* or {read,write}_once_size. */
+ if (strnstr(buf, "tsan_", len) ||
+ strnstr(buf, "_once_size", len))
+ continue;
+
+ cur = strnstr(buf, "kcsan_", len);
+ if (cur) {
+ cur += sizeof("kcsan_") - 1;
+ if (strncmp(cur, "test", sizeof("test") - 1))
+ continue; /* KCSAN runtime function. */
+ /* KCSAN related test. */
+ }
+
+ /*
+ * No match for runtime functions -- @skip entries to skip to
+ * get to first frame of interest.
+ */
+ break;
+ }
+
+ return skip;
+}
+
+/* Compares symbolized strings of addr1 and addr2. */
+static int sym_strcmp(void *addr1, void *addr2)
+{
+ char buf1[64];
+ char buf2[64];
+
+ snprintf(buf1, sizeof(buf1), "%pS", addr1);
+ snprintf(buf2, sizeof(buf2), "%pS", addr2);
+
+ return strncmp(buf1, buf2, sizeof(buf1));
+}
+
+static void print_verbose_info(struct task_struct *task)
+{
+ if (!task)
+ return;
+
+ pr_err("\n");
+ debug_show_held_locks(task);
+ print_irqtrace_events(task);
+}
+
+/*
+ * Returns true if a report was generated, false otherwise.
+ */
+static bool print_report(enum kcsan_value_change value_change,
+ enum kcsan_report_type type,
+ const struct access_info *ai,
+ const struct other_info *other_info)
+{
+ unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
+ int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
+ int skipnr = get_stack_skipnr(stack_entries, num_stack_entries);
+ unsigned long this_frame = stack_entries[skipnr];
+ unsigned long other_frame = 0;
+ int other_skipnr = 0; /* silence uninit warnings */
+
+ /*
+ * Must check report filter rules before starting to print.
+ */
+ if (skip_report(KCSAN_VALUE_CHANGE_TRUE, stack_entries[skipnr]))
+ return false;
+
+ if (type == KCSAN_REPORT_RACE_SIGNAL) {
+ other_skipnr = get_stack_skipnr(other_info->stack_entries,
+ other_info->num_stack_entries);
+ other_frame = other_info->stack_entries[other_skipnr];
+
+ /* @value_change is only known for the other thread */
+ if (skip_report(value_change, other_frame))
+ return false;
+ }
+
+ if (rate_limit_report(this_frame, other_frame))
+ return false;
+
+ /* Print report header. */
+ pr_err("==================================================================\n");
+ switch (type) {
+ case KCSAN_REPORT_RACE_SIGNAL: {
+ int cmp;
+
+ /*
+ * Order functions lexographically for consistent bug titles.
+ * Do not print offset of functions to keep title short.
+ */
+ cmp = sym_strcmp((void *)other_frame, (void *)this_frame);
+ pr_err("BUG: KCSAN: %s in %ps / %ps\n",
+ get_bug_type(ai->access_type | other_info->ai.access_type),
+ (void *)(cmp < 0 ? other_frame : this_frame),
+ (void *)(cmp < 0 ? this_frame : other_frame));
+ } break;
+
+ case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN:
+ pr_err("BUG: KCSAN: %s in %pS\n", get_bug_type(ai->access_type),
+ (void *)this_frame);
+ break;
+
+ default:
+ BUG();
+ }
+
+ pr_err("\n");
+
+ /* Print information about the racing accesses. */
+ switch (type) {
+ case KCSAN_REPORT_RACE_SIGNAL:
+ pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
+ get_access_type(other_info->ai.access_type), other_info->ai.ptr,
+ other_info->ai.size, get_thread_desc(other_info->ai.task_pid),
+ other_info->ai.cpu_id);
+
+ /* Print the other thread's stack trace. */
+ stack_trace_print(other_info->stack_entries + other_skipnr,
+ other_info->num_stack_entries - other_skipnr,
+ 0);
+
+ if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
+ print_verbose_info(other_info->task);
+
+ pr_err("\n");
+ pr_err("%s to 0x%px of %zu bytes by %s on cpu %i:\n",
+ get_access_type(ai->access_type), ai->ptr, ai->size,
+ get_thread_desc(ai->task_pid), ai->cpu_id);
+ break;
+
+ case KCSAN_REPORT_RACE_UNKNOWN_ORIGIN:
+ pr_err("race at unknown origin, with %s to 0x%px of %zu bytes by %s on cpu %i:\n",
+ get_access_type(ai->access_type), ai->ptr, ai->size,
+ get_thread_desc(ai->task_pid), ai->cpu_id);
+ break;
+
+ default:
+ BUG();
+ }
+ /* Print stack trace of this thread. */
+ stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr,
+ 0);
+
+ if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
+ print_verbose_info(current);
+
+ /* Print report footer. */
+ pr_err("\n");
+ pr_err("Reported by Kernel Concurrency Sanitizer on:\n");
+ dump_stack_print_info(KERN_DEFAULT);
+ pr_err("==================================================================\n");
+
+ return true;
+}
+
+static void release_report(unsigned long *flags, struct other_info *other_info)
+{
+ if (other_info)
+ /*
+ * Use size to denote valid/invalid, since KCSAN entirely
+ * ignores 0-sized accesses.
+ */
+ other_info->ai.size = 0;
+
+ raw_spin_unlock_irqrestore(&report_lock, *flags);
+}
+
+/*
+ * Sets @other_info->task and awaits consumption of @other_info.
+ *
+ * Precondition: report_lock is held.
+ * Postcondition: report_lock is held.
+ */
+static void set_other_info_task_blocking(unsigned long *flags,
+ const struct access_info *ai,
+ struct other_info *other_info)
+{
+ /*
+ * We may be instrumenting a code-path where current->state is already
+ * something other than TASK_RUNNING.
+ */
+ const bool is_running = current->state == TASK_RUNNING;
+ /*
+ * To avoid deadlock in case we are in an interrupt here and this is a
+ * race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a
+ * timeout to ensure this works in all contexts.
+ *
+ * Await approximately the worst case delay of the reporting thread (if
+ * we are not interrupted).
+ */
+ int timeout = max(kcsan_udelay_task, kcsan_udelay_interrupt);
+
+ other_info->task = current;
+ do {
+ if (is_running) {
+ /*
+ * Let lockdep know the real task is sleeping, to print
+ * the held locks (recall we turned lockdep off, so
+ * locking/unlocking @report_lock won't be recorded).
+ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+ raw_spin_unlock_irqrestore(&report_lock, *flags);
+ /*
+ * We cannot call schedule() since we also cannot reliably
+ * determine if sleeping here is permitted -- see in_atomic().
+ */
+
+ udelay(1);
+ raw_spin_lock_irqsave(&report_lock, *flags);
+ if (timeout-- < 0) {
+ /*
+ * Abort. Reset @other_info->task to NULL, since it
+ * appears the other thread is still going to consume
+ * it. It will result in no verbose info printed for
+ * this task.
+ */
+ other_info->task = NULL;
+ break;
+ }
+ /*
+ * If invalid, or @ptr nor @current matches, then @other_info
+ * has been consumed and we may continue. If not, retry.
+ */
+ } while (other_info->ai.size && other_info->ai.ptr == ai->ptr &&
+ other_info->task == current);
+ if (is_running)
+ set_current_state(TASK_RUNNING);
+}
+
+/* Populate @other_info; requires that the provided @other_info not in use. */
+static void prepare_report_producer(unsigned long *flags,
+ const struct access_info *ai,
+ struct other_info *other_info)
+{
+ raw_spin_lock_irqsave(&report_lock, *flags);
+
+ /*
+ * The same @other_infos entry cannot be used concurrently, because
+ * there is a one-to-one mapping to watchpoint slots (@watchpoints in
+ * core.c), and a watchpoint is only released for reuse after reporting
+ * is done by the consumer of @other_info. Therefore, it is impossible
+ * for another concurrent prepare_report_producer() to set the same
+ * @other_info, and are guaranteed exclusivity for the @other_infos
+ * entry pointed to by @other_info.
+ *
+ * To check this property holds, size should never be non-zero here,
+ * because every consumer of struct other_info resets size to 0 in
+ * release_report().
+ */
+ WARN_ON(other_info->ai.size);
+
+ other_info->ai = *ai;
+ other_info->num_stack_entries = stack_trace_save(other_info->stack_entries, NUM_STACK_ENTRIES, 2);
+
+ if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
+ set_other_info_task_blocking(flags, ai, other_info);
+
+ raw_spin_unlock_irqrestore(&report_lock, *flags);
+}
+
+/* Awaits producer to fill @other_info and then returns. */
+static bool prepare_report_consumer(unsigned long *flags,
+ const struct access_info *ai,
+ struct other_info *other_info)
+{
+
+ raw_spin_lock_irqsave(&report_lock, *flags);
+ while (!other_info->ai.size) { /* Await valid @other_info. */
+ raw_spin_unlock_irqrestore(&report_lock, *flags);
+ cpu_relax();
+ raw_spin_lock_irqsave(&report_lock, *flags);
+ }
+
+ /* Should always have a matching access based on watchpoint encoding. */
+ if (WARN_ON(!matching_access((unsigned long)other_info->ai.ptr & WATCHPOINT_ADDR_MASK, other_info->ai.size,
+ (unsigned long)ai->ptr & WATCHPOINT_ADDR_MASK, ai->size)))
+ goto discard;
+
+ if (!matching_access((unsigned long)other_info->ai.ptr, other_info->ai.size,
+ (unsigned long)ai->ptr, ai->size)) {
+ /*
+ * If the actual accesses to not match, this was a false
+ * positive due to watchpoint encoding.
+ */
+ kcsan_counter_inc(KCSAN_COUNTER_ENCODING_FALSE_POSITIVES);
+ goto discard;
+ }
+
+ return true;
+
+discard:
+ release_report(flags, other_info);
+ return false;
+}
+
+/*
+ * Depending on the report type either sets @other_info and returns false, or
+ * awaits @other_info and returns true. If @other_info is not required for the
+ * report type, simply acquires @report_lock and returns true.
+ */
+static noinline bool prepare_report(unsigned long *flags,
+ enum kcsan_report_type type,
+ const struct access_info *ai,
+ struct other_info *other_info)
+{
+ switch (type) {
+ case KCSAN_REPORT_CONSUMED_WATCHPOINT:
+ prepare_report_producer(flags, ai, other_info);
+ return false;
+ case KCSAN_REPORT_RACE_SIGNAL:
+ return prepare_report_consumer(flags, ai, other_info);
+ default:
+ /* @other_info not required; just acquire @report_lock. */
+ raw_spin_lock_irqsave(&report_lock, *flags);
+ return true;
+ }
+}
+
+void kcsan_report(const volatile void *ptr, size_t size, int access_type,
+ enum kcsan_value_change value_change,
+ enum kcsan_report_type type, int watchpoint_idx)
+{
+ unsigned long flags = 0;
+ const struct access_info ai = {
+ .ptr = ptr,
+ .size = size,
+ .access_type = access_type,
+ .task_pid = in_task() ? task_pid_nr(current) : -1,
+ .cpu_id = raw_smp_processor_id()
+ };
+ struct other_info *other_info = type == KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
+ ? NULL : &other_infos[watchpoint_idx];
+
+ kcsan_disable_current();
+ if (WARN_ON(watchpoint_idx < 0 || watchpoint_idx >= ARRAY_SIZE(other_infos)))
+ goto out;
+
+ /*
+ * With TRACE_IRQFLAGS, lockdep's IRQ trace state becomes corrupted if
+ * we do not turn off lockdep here; this could happen due to recursion
+ * into lockdep via KCSAN if we detect a race in utilities used by
+ * lockdep.
+ */
+ lockdep_off();
+
+ if (prepare_report(&flags, type, &ai, other_info)) {
+ /*
+ * Never report if value_change is FALSE, only if we it is
+ * either TRUE or MAYBE. In case of MAYBE, further filtering may
+ * be done once we know the full stack trace in print_report().
+ */
+ bool reported = value_change != KCSAN_VALUE_CHANGE_FALSE &&
+ print_report(value_change, type, &ai, other_info);
+
+ if (reported && panic_on_warn)
+ panic("panic_on_warn set ...\n");
+
+ release_report(&flags, other_info);
+ }
+
+ lockdep_on();
+out:
+ kcsan_enable_current();
+}
diff --git a/kernel/kcsan/test.c b/kernel/kcsan/test.c
new file mode 100644
index 000000000000..d26a052d3383
--- /dev/null
+++ b/kernel/kcsan/test.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/types.h>
+
+#include "encoding.h"
+
+#define ITERS_PER_TEST 2000
+
+/* Test requirements. */
+static bool test_requires(void)
+{
+ /* random should be initialized for the below tests */
+ return prandom_u32() + prandom_u32() != 0;
+}
+
+/*
+ * Test watchpoint encode and decode: check that encoding some access's info,
+ * and then subsequent decode preserves the access's info.
+ */
+static bool test_encode_decode(void)
+{
+ int i;
+
+ for (i = 0; i < ITERS_PER_TEST; ++i) {
+ size_t size = prandom_u32_max(MAX_ENCODABLE_SIZE) + 1;
+ bool is_write = !!prandom_u32_max(2);
+ unsigned long addr;
+
+ prandom_bytes(&addr, sizeof(addr));
+ if (WARN_ON(!check_encodable(addr, size)))
+ return false;
+
+ /* Encode and decode */
+ {
+ const long encoded_watchpoint =
+ encode_watchpoint(addr, size, is_write);
+ unsigned long verif_masked_addr;
+ size_t verif_size;
+ bool verif_is_write;
+
+ /* Check special watchpoints */
+ if (WARN_ON(decode_watchpoint(
+ INVALID_WATCHPOINT, &verif_masked_addr,
+ &verif_size, &verif_is_write)))
+ return false;
+ if (WARN_ON(decode_watchpoint(
+ CONSUMED_WATCHPOINT, &verif_masked_addr,
+ &verif_size, &verif_is_write)))
+ return false;
+
+ /* Check decoding watchpoint returns same data */
+ if (WARN_ON(!decode_watchpoint(
+ encoded_watchpoint, &verif_masked_addr,
+ &verif_size, &verif_is_write)))
+ return false;
+ if (WARN_ON(verif_masked_addr !=
+ (addr & WATCHPOINT_ADDR_MASK)))
+ goto fail;
+ if (WARN_ON(verif_size != size))
+ goto fail;
+ if (WARN_ON(is_write != verif_is_write))
+ goto fail;
+
+ continue;
+fail:
+ pr_err("%s fail: %s %zu bytes @ %lx -> encoded: %lx -> %s %zu bytes @ %lx\n",
+ __func__, is_write ? "write" : "read", size,
+ addr, encoded_watchpoint,
+ verif_is_write ? "write" : "read", verif_size,
+ verif_masked_addr);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* Test access matching function. */
+static bool test_matching_access(void)
+{
+ if (WARN_ON(!matching_access(10, 1, 10, 1)))
+ return false;
+ if (WARN_ON(!matching_access(10, 2, 11, 1)))
+ return false;
+ if (WARN_ON(!matching_access(10, 1, 9, 2)))
+ return false;
+ if (WARN_ON(matching_access(10, 1, 11, 1)))
+ return false;
+ if (WARN_ON(matching_access(9, 1, 10, 1)))
+ return false;
+
+ /*
+ * An access of size 0 could match another access, as demonstrated here.
+ * Rather than add more comparisons to 'matching_access()', which would
+ * end up in the fast-path for *all* checks, check_access() simply
+ * returns for all accesses of size 0.
+ */
+ if (WARN_ON(!matching_access(8, 8, 12, 0)))
+ return false;
+
+ return true;
+}
+
+static int __init kcsan_selftest(void)
+{
+ int passed = 0;
+ int total = 0;
+
+#define RUN_TEST(do_test) \
+ do { \
+ ++total; \
+ if (do_test()) \
+ ++passed; \
+ else \
+ pr_err("KCSAN selftest: " #do_test " failed"); \
+ } while (0)
+
+ RUN_TEST(test_requires);
+ RUN_TEST(test_encode_decode);
+ RUN_TEST(test_matching_access);
+
+ pr_info("KCSAN selftest: %d/%d tests passed\n", passed, total);
+ if (passed != total)
+ panic("KCSAN selftests failed");
+ return 0;
+}
+postcore_initcall(kcsan_selftest);
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index faa74d5f6941..bb05fd52de85 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -540,6 +540,11 @@ static int locate_mem_hole_callback(struct resource *res, void *arg)
unsigned long sz = end - start + 1;
/* Returning 0 will take to next memory range */
+
+ /* Don't use memory that will be detected and handled by a driver. */
+ if (res->flags & IORESOURCE_MEM_DRIVER_MANAGED)
+ return 0;
+
if (sz < kbuf->memsz)
return 0;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 0fbdee78266b..50cd84f53df0 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2475,24 +2475,14 @@ static int show_kprobe_addr(struct seq_file *pi, void *v)
return 0;
}
-static const struct seq_operations kprobes_seq_ops = {
+static const struct seq_operations kprobes_sops = {
.start = kprobe_seq_start,
.next = kprobe_seq_next,
.stop = kprobe_seq_stop,
.show = show_kprobe_addr
};
-static int kprobes_open(struct inode *inode, struct file *filp)
-{
- return seq_open(filp, &kprobes_seq_ops);
-}
-
-static const struct file_operations debugfs_kprobes_operations = {
- .open = kprobes_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(kprobes);
/* kprobes/blacklist -- shows which functions can not be probed */
static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
@@ -2529,24 +2519,13 @@ static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
mutex_unlock(&kprobe_mutex);
}
-static const struct seq_operations kprobe_blacklist_seq_ops = {
+static const struct seq_operations kprobe_blacklist_sops = {
.start = kprobe_blacklist_seq_start,
.next = kprobe_blacklist_seq_next,
.stop = kprobe_blacklist_seq_stop,
.show = kprobe_blacklist_seq_show,
};
-
-static int kprobe_blacklist_open(struct inode *inode, struct file *filp)
-{
- return seq_open(filp, &kprobe_blacklist_seq_ops);
-}
-
-static const struct file_operations debugfs_kprobe_blacklist_ops = {
- .open = kprobe_blacklist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
static int arm_all_kprobes(void)
{
@@ -2705,13 +2684,12 @@ static int __init debugfs_kprobe_init(void)
dir = debugfs_create_dir("kprobes", NULL);
- debugfs_create_file("list", 0400, dir, NULL,
- &debugfs_kprobes_operations);
+ debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
debugfs_create_file("enabled", 0600, dir, &value, &fops_kp);
debugfs_create_file("blacklist", 0400, dir, NULL,
- &debugfs_kprobe_blacklist_ops);
+ &kprobe_blacklist_fops);
return 0;
}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index bfbfa481be3a..8e3d2d7fdf5e 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -1,13 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel thread helper functions.
* Copyright (C) 2004 IBM Corporation, Rusty Russell.
+ * Copyright (C) 2009 Red Hat, Inc.
*
* Creation is done via kthreadd, so that we get a clean environment
* even if we're invoked from userspace (think modprobe, hotplug cpu,
* etc.).
*/
#include <uapi/linux/sched/types.h>
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/kthread.h>
#include <linux/completion.h>
@@ -25,6 +29,7 @@
#include <linux/numa.h>
#include <trace/events/sched.h>
+
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;
@@ -46,7 +51,9 @@ struct kthread_create_info
struct kthread {
unsigned long flags;
unsigned int cpu;
+ int (*threadfn)(void *);
void *data;
+ mm_segment_t oldfs;
struct completion parked;
struct completion exited;
#ifdef CONFIG_BLK_CGROUP
@@ -153,6 +160,20 @@ bool kthread_freezable_should_stop(bool *was_frozen)
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
/**
+ * kthread_func - return the function specified on kthread creation
+ * @task: kthread task in question
+ *
+ * Returns NULL if the task is not a kthread.
+ */
+void *kthread_func(struct task_struct *task)
+{
+ if (task->flags & PF_KTHREAD)
+ return to_kthread(task)->threadfn;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(kthread_func);
+
+/**
* kthread_data - return data value specified on kthread creation
* @task: kthread task in question
*
@@ -164,6 +185,7 @@ void *kthread_data(struct task_struct *task)
{
return to_kthread(task)->data;
}
+EXPORT_SYMBOL_GPL(kthread_data);
/**
* kthread_probe_data - speculative version of kthread_data()
@@ -244,6 +266,7 @@ static int kthread(void *_create)
do_exit(-ENOMEM);
}
+ self->threadfn = threadfn;
self->data = data;
init_completion(&self->exited);
init_completion(&self->parked);
@@ -1203,6 +1226,61 @@ void kthread_destroy_worker(struct kthread_worker *worker)
}
EXPORT_SYMBOL(kthread_destroy_worker);
+/**
+ * kthread_use_mm - make the calling kthread operate on an address space
+ * @mm: address space to operate on
+ */
+void kthread_use_mm(struct mm_struct *mm)
+{
+ struct mm_struct *active_mm;
+ struct task_struct *tsk = current;
+
+ WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
+ WARN_ON_ONCE(tsk->mm);
+
+ task_lock(tsk);
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ mmgrab(mm);
+ tsk->active_mm = mm;
+ }
+ tsk->mm = mm;
+ switch_mm(active_mm, mm, tsk);
+ task_unlock(tsk);
+#ifdef finish_arch_post_lock_switch
+ finish_arch_post_lock_switch();
+#endif
+
+ if (active_mm != mm)
+ mmdrop(active_mm);
+
+ to_kthread(tsk)->oldfs = get_fs();
+ set_fs(USER_DS);
+}
+EXPORT_SYMBOL_GPL(kthread_use_mm);
+
+/**
+ * kthread_unuse_mm - reverse the effect of kthread_use_mm()
+ * @mm: address space to operate on
+ */
+void kthread_unuse_mm(struct mm_struct *mm)
+{
+ struct task_struct *tsk = current;
+
+ WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
+ WARN_ON_ONCE(!tsk->mm);
+
+ set_fs(to_kthread(tsk)->oldfs);
+
+ task_lock(tsk);
+ sync_mm_rss(mm);
+ tsk->mm = NULL;
+ /* active_mm is still 'mm' */
+ enter_lazy_tlb(mm, tsk);
+ task_unlock(tsk);
+}
+EXPORT_SYMBOL_GPL(kthread_unuse_mm);
+
#ifdef CONFIG_BLK_CGROUP
/**
* kthread_associate_blkcg - associate blkcg to current kthread
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index c3512e7e0801..f76fdb925532 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -191,18 +191,21 @@ static int klp_find_object_symbol(const char *objname, const char *name,
return -EINVAL;
}
-static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
+static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
+ unsigned int symndx, Elf_Shdr *relasec,
+ const char *sec_objname)
{
- int i, cnt, vmlinux, ret;
- char objname[MODULE_NAME_LEN];
- char symname[KSYM_NAME_LEN];
- char *strtab = pmod->core_kallsyms.strtab;
+ int i, cnt, ret;
+ char sym_objname[MODULE_NAME_LEN];
+ char sym_name[KSYM_NAME_LEN];
Elf_Rela *relas;
Elf_Sym *sym;
unsigned long sympos, addr;
+ bool sym_vmlinux;
+ bool sec_vmlinux = !strcmp(sec_objname, "vmlinux");
/*
- * Since the field widths for objname and symname in the sscanf()
+ * Since the field widths for sym_objname and sym_name in the sscanf()
* call are hard-coded and correspond to MODULE_NAME_LEN and
* KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
* and KSYM_NAME_LEN have the values we expect them to have.
@@ -216,27 +219,40 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
relas = (Elf_Rela *) relasec->sh_addr;
/* For each rela in this klp relocation section */
for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
- sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
+ sym = (Elf64_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
if (sym->st_shndx != SHN_LIVEPATCH) {
pr_err("symbol %s is not marked as a livepatch symbol\n",
strtab + sym->st_name);
return -EINVAL;
}
- /* Format: .klp.sym.objname.symname,sympos */
+ /* Format: .klp.sym.sym_objname.sym_name,sympos */
cnt = sscanf(strtab + sym->st_name,
".klp.sym.%55[^.].%127[^,],%lu",
- objname, symname, &sympos);
+ sym_objname, sym_name, &sympos);
if (cnt != 3) {
pr_err("symbol %s has an incorrectly formatted name\n",
strtab + sym->st_name);
return -EINVAL;
}
+ sym_vmlinux = !strcmp(sym_objname, "vmlinux");
+
+ /*
+ * Prevent module-specific KLP rela sections from referencing
+ * vmlinux symbols. This helps prevent ordering issues with
+ * module special section initializations. Presumably such
+ * symbols are exported and normal relas can be used instead.
+ */
+ if (!sec_vmlinux && sym_vmlinux) {
+ pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
+ sym_name);
+ return -EINVAL;
+ }
+
/* klp_find_object_symbol() treats a NULL objname as vmlinux */
- vmlinux = !strcmp(objname, "vmlinux");
- ret = klp_find_object_symbol(vmlinux ? NULL : objname,
- symname, sympos, &addr);
+ ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
+ sym_name, sympos, &addr);
if (ret)
return ret;
@@ -246,54 +262,59 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
return 0;
}
-static int klp_write_object_relocations(struct module *pmod,
- struct klp_object *obj)
+/*
+ * At a high-level, there are two types of klp relocation sections: those which
+ * reference symbols which live in vmlinux; and those which reference symbols
+ * which live in other modules. This function is called for both types:
+ *
+ * 1) When a klp module itself loads, the module code calls this function to
+ * write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
+ * These relocations are written to the klp module text to allow the patched
+ * code/data to reference unexported vmlinux symbols. They're written as
+ * early as possible to ensure that other module init code (.e.g.,
+ * jump_label_apply_nops) can access any unexported vmlinux symbols which
+ * might be referenced by the klp module's special sections.
+ *
+ * 2) When a to-be-patched module loads -- or is already loaded when a
+ * corresponding klp module loads -- klp code calls this function to write
+ * module-specific klp relocations (.klp.rela.{module}.* sections). These
+ * are written to the klp module text to allow the patched code/data to
+ * reference symbols which live in the to-be-patched module or one of its
+ * module dependencies. Exported symbols are supported, in addition to
+ * unexported symbols, in order to enable late module patching, which allows
+ * the to-be-patched module to be loaded and patched sometime *after* the
+ * klp module is loaded.
+ */
+int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
+ const char *shstrtab, const char *strtab,
+ unsigned int symndx, unsigned int secndx,
+ const char *objname)
{
- int i, cnt, ret = 0;
- const char *objname, *secname;
+ int cnt, ret;
char sec_objname[MODULE_NAME_LEN];
- Elf_Shdr *sec;
+ Elf_Shdr *sec = sechdrs + secndx;
- if (WARN_ON(!klp_is_object_loaded(obj)))
+ /*
+ * Format: .klp.rela.sec_objname.section_name
+ * See comment in klp_resolve_symbols() for an explanation
+ * of the selected field width value.
+ */
+ cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
+ sec_objname);
+ if (cnt != 1) {
+ pr_err("section %s has an incorrectly formatted name\n",
+ shstrtab + sec->sh_name);
return -EINVAL;
+ }
- objname = klp_is_module(obj) ? obj->name : "vmlinux";
-
- /* For each klp relocation section */
- for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
- sec = pmod->klp_info->sechdrs + i;
- secname = pmod->klp_info->secstrings + sec->sh_name;
- if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
- continue;
-
- /*
- * Format: .klp.rela.sec_objname.section_name
- * See comment in klp_resolve_symbols() for an explanation
- * of the selected field width value.
- */
- cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
- if (cnt != 1) {
- pr_err("section %s has an incorrectly formatted name\n",
- secname);
- ret = -EINVAL;
- break;
- }
-
- if (strcmp(objname, sec_objname))
- continue;
-
- ret = klp_resolve_symbols(sec, pmod);
- if (ret)
- break;
+ if (strcmp(objname ? objname : "vmlinux", sec_objname))
+ return 0;
- ret = apply_relocate_add(pmod->klp_info->sechdrs,
- pmod->core_kallsyms.strtab,
- pmod->klp_info->symndx, i, pmod);
- if (ret)
- break;
- }
+ ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec, sec_objname);
+ if (ret)
+ return ret;
- return ret;
+ return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
}
/*
@@ -724,10 +745,27 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
func->old_sympos ? func->old_sympos : 1);
}
-/* Arches may override this to finish any remaining arch-specific tasks */
-void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
- struct klp_object *obj)
+static int klp_apply_object_relocs(struct klp_patch *patch,
+ struct klp_object *obj)
{
+ int i, ret;
+ struct klp_modinfo *info = patch->mod->klp_info;
+
+ for (i = 1; i < info->hdr.e_shnum; i++) {
+ Elf_Shdr *sec = info->sechdrs + i;
+
+ if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
+ continue;
+
+ ret = klp_apply_section_relocs(patch->mod, info->sechdrs,
+ info->secstrings,
+ patch->mod->core_kallsyms.strtab,
+ info->symndx, i, obj->name);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
/* parts of the initialization that is done only when the object is loaded */
@@ -737,21 +775,18 @@ static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_func *func;
int ret;
- mutex_lock(&text_mutex);
-
- module_disable_ro(patch->mod);
- ret = klp_write_object_relocations(patch->mod, obj);
- if (ret) {
- module_enable_ro(patch->mod, true);
- mutex_unlock(&text_mutex);
- return ret;
+ if (klp_is_module(obj)) {
+ /*
+ * Only write module-specific relocations here
+ * (.klp.rela.{module}.*). vmlinux-specific relocations were
+ * written earlier during the initialization of the klp module
+ * itself.
+ */
+ ret = klp_apply_object_relocs(patch, obj);
+ if (ret)
+ return ret;
}
- arch_klp_init_object_loaded(patch, obj);
- module_enable_ro(patch->mod, true);
-
- mutex_unlock(&text_mutex);
-
klp_for_each_func(obj, func) {
ret = klp_find_object_symbol(obj->name, func->old_name,
func->old_sympos,
@@ -1139,6 +1174,11 @@ int klp_module_coming(struct module *mod)
if (WARN_ON(mod->state != MODULE_STATE_COMING))
return -EINVAL;
+ if (!strcmp(mod->name, "vmlinux")) {
+ pr_err("vmlinux.ko: invalid module name");
+ return -EINVAL;
+ }
+
mutex_lock(&klp_mutex);
/*
* Each module has to know that klp_module_coming()
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 45452facff3b..6d11cfb9b41f 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -5,6 +5,9 @@ KCOV_INSTRUMENT := n
obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
+# Avoid recursion lockdep -> KCSAN -> ... -> lockdep.
+KCSAN_SANITIZE_lockdep.o := n
+
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_lockdep_proc.o = $(CC_FLAGS_FTRACE)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4c057dd8e93b..38cce34d03dc 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4424,7 +4424,7 @@ static void print_unlock_imbalance_bug(struct task_struct *curr,
curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
pr_cont(") at:\n");
- print_ip_sym(ip);
+ print_ip_sym(KERN_WARNING, ip);
pr_warn("but there are no more locks to release!\n");
pr_warn("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
@@ -5075,7 +5075,7 @@ static void print_lock_contention_bug(struct task_struct *curr,
curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
pr_cont(") at:\n");
- print_ip_sym(ip);
+ print_ip_sym(KERN_WARNING, ip);
pr_warn("but there are no locks held!\n");
pr_warn("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
index fd4fe1f5b458..36e69100e8e0 100644
--- a/kernel/locking/rtmutex-debug.c
+++ b/kernel/locking/rtmutex-debug.c
@@ -125,7 +125,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
printk("\n%s/%d's [blocked] stackdump:\n\n",
task->comm, task_pid_nr(task));
- show_stack(task, NULL);
+ show_stack(task, NULL, KERN_DEFAULT);
printk("\n%s/%d's [current] stackdump:\n\n",
current->comm, task_pid_nr(current));
dump_stack();
diff --git a/kernel/module.c b/kernel/module.c
index be5413903d20..e8a198588f26 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1946,7 +1946,6 @@ static void mod_sysfs_teardown(struct module *mod)
mod_sysfs_fini(mod);
}
-#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
/*
* LKM RO/NX protection: protect module's text/ro-data
* from modification and any data from execution.
@@ -1960,6 +1959,14 @@ static void mod_sysfs_teardown(struct module *mod)
*
* These values are always page-aligned (as is base)
*/
+
+/*
+ * Since some arches are moving towards PAGE_KERNEL module allocations instead
+ * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() outside of the
+ * CONFIG_STRICT_MODULE_RWX block below because they are needed regardless of
+ * whether we are strict.
+ */
+#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
static void frob_text(const struct module_layout *layout,
int (*set_memory)(unsigned long start, int num_pages))
{
@@ -1969,6 +1976,15 @@ static void frob_text(const struct module_layout *layout,
layout->text_size >> PAGE_SHIFT);
}
+static void module_enable_x(const struct module *mod)
+{
+ frob_text(&mod->core_layout, set_memory_x);
+ frob_text(&mod->init_layout, set_memory_x);
+}
+#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
+static void module_enable_x(const struct module *mod) { }
+#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
+
#ifdef CONFIG_STRICT_MODULE_RWX
static void frob_rodata(const struct module_layout *layout,
int (*set_memory)(unsigned long start, int num_pages))
@@ -2000,20 +2016,7 @@ static void frob_writable_data(const struct module_layout *layout,
(layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
}
-/* livepatching wants to disable read-only so it can frob module. */
-void module_disable_ro(const struct module *mod)
-{
- if (!rodata_enabled)
- return;
-
- frob_text(&mod->core_layout, set_memory_rw);
- frob_rodata(&mod->core_layout, set_memory_rw);
- frob_ro_after_init(&mod->core_layout, set_memory_rw);
- frob_text(&mod->init_layout, set_memory_rw);
- frob_rodata(&mod->init_layout, set_memory_rw);
-}
-
-void module_enable_ro(const struct module *mod, bool after_init)
+static void module_enable_ro(const struct module *mod, bool after_init)
{
if (!rodata_enabled)
return;
@@ -2039,19 +2042,29 @@ static void module_enable_nx(const struct module *mod)
frob_writable_data(&mod->init_layout, set_memory_nx);
}
+static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ const unsigned long shf_wx = SHF_WRITE|SHF_EXECINSTR;
+ int i;
+
+ for (i = 0; i < hdr->e_shnum; i++) {
+ if ((sechdrs[i].sh_flags & shf_wx) == shf_wx)
+ return -ENOEXEC;
+ }
+
+ return 0;
+}
+
#else /* !CONFIG_STRICT_MODULE_RWX */
static void module_enable_nx(const struct module *mod) { }
-#endif /* CONFIG_STRICT_MODULE_RWX */
-static void module_enable_x(const struct module *mod)
+static void module_enable_ro(const struct module *mod, bool after_init) {}
+static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
{
- frob_text(&mod->core_layout, set_memory_x);
- frob_text(&mod->init_layout, set_memory_x);
+ return 0;
}
-#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
-static void module_enable_nx(const struct module *mod) { }
-static void module_enable_x(const struct module *mod) { }
-#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
-
+#endif /* CONFIG_STRICT_MODULE_RWX */
#ifdef CONFIG_LIVEPATCH
/*
@@ -2337,11 +2350,13 @@ static int apply_relocations(struct module *mod, const struct load_info *info)
if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
continue;
- /* Livepatch relocation sections are applied by livepatch */
if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
- continue;
-
- if (info->sechdrs[i].sh_type == SHT_REL)
+ err = klp_apply_section_relocs(mod, info->sechdrs,
+ info->secstrings,
+ info->strtab,
+ info->index.sym, i,
+ NULL);
+ else if (info->sechdrs[i].sh_type == SHT_REL)
err = apply_relocate(info->sechdrs, info->strtab,
info->index.sym, i, mod);
else if (info->sechdrs[i].sh_type == SHT_RELA)
@@ -3329,12 +3344,6 @@ static int check_module_license_and_versions(struct module *mod)
static void flush_module_icache(const struct module *mod)
{
- mm_segment_t old_fs;
-
- /* flush the icache in correct context */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
/*
* Flush the instruction cache, since we've played with text.
* Do it before processing of module parameters, so the module
@@ -3346,8 +3355,6 @@ static void flush_module_icache(const struct module *mod)
+ mod->init_layout.size);
flush_icache_range((unsigned long)mod->core_layout.base,
(unsigned long)mod->core_layout.base + mod->core_layout.size);
-
- set_fs(old_fs);
}
int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
@@ -3395,6 +3402,11 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
if (err < 0)
return ERR_PTR(err);
+ err = module_enforce_rwx_sections(info->hdr, info->sechdrs,
+ info->secstrings, info->mod);
+ if (err < 0)
+ return ERR_PTR(err);
+
/* We will do a special allocation for per-cpu sections later. */
info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
diff --git a/kernel/panic.c b/kernel/panic.c
index b69ee9e76cb2..85568bbfb12b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -36,6 +36,14 @@
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
+#ifdef CONFIG_SMP
+/*
+ * Should we dump all CPUs backtraces in an oops event?
+ * Defaults to 0, can be changed via sysctl.
+ */
+unsigned int __read_mostly sysctl_oops_all_cpu_backtrace;
+#endif /* CONFIG_SMP */
+
int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
static unsigned long tainted_mask =
IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0;
@@ -44,6 +52,8 @@ static int pause_on_oops_flag;
static DEFINE_SPINLOCK(pause_on_oops_lock);
bool crash_kexec_post_notifiers;
int panic_on_warn __read_mostly;
+unsigned long panic_on_taint;
+bool panic_on_taint_nousertaint = false;
int panic_timeout = CONFIG_PANIC_TIMEOUT;
EXPORT_SYMBOL_GPL(panic_timeout);
@@ -434,6 +444,11 @@ void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
pr_warn("Disabling lock debugging due to kernel taint\n");
set_bit(flag, &tainted_mask);
+
+ if (tainted_mask & panic_on_taint) {
+ panic_on_taint = 0;
+ panic("panic_on_taint set ...");
+ }
}
EXPORT_SYMBOL(add_taint);
@@ -515,6 +530,9 @@ void oops_enter(void)
/* can't trust the integrity of the kernel anymore: */
debug_locks_off();
do_oops_enter_exit();
+
+ if (sysctl_oops_all_cpu_backtrace)
+ trigger_all_cpu_backtrace();
}
/*
@@ -686,3 +704,30 @@ static int __init oops_setup(char *s)
return 0;
}
early_param("oops", oops_setup);
+
+static int __init panic_on_taint_setup(char *s)
+{
+ char *taint_str;
+
+ if (!s)
+ return -EINVAL;
+
+ taint_str = strsep(&s, ",");
+ if (kstrtoul(taint_str, 16, &panic_on_taint))
+ return -EINVAL;
+
+ /* make sure panic_on_taint doesn't hold out-of-range TAINT flags */
+ panic_on_taint &= TAINT_FLAGS_MAX;
+
+ if (!panic_on_taint)
+ return -EINVAL;
+
+ if (s && !strcmp(s, "nousertaint"))
+ panic_on_taint_nousertaint = true;
+
+ pr_info("panic_on_taint: bitmask=0x%lx nousertaint_mode=%sabled\n",
+ panic_on_taint, panic_on_taint_nousertaint ? "en" : "dis");
+
+ return 0;
+}
+early_param("panic_on_taint", panic_on_taint_setup);
diff --git a/kernel/pid.c b/kernel/pid.c
index c835b844aca7..f1496b757162 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -363,6 +363,25 @@ void change_pid(struct task_struct *task, enum pid_type type,
attach_pid(task, type);
}
+void exchange_tids(struct task_struct *left, struct task_struct *right)
+{
+ struct pid *pid1 = left->thread_pid;
+ struct pid *pid2 = right->thread_pid;
+ struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID];
+ struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID];
+
+ /* Swap the single entry tid lists */
+ hlists_swap_heads_rcu(head1, head2);
+
+ /* Swap the per task_struct pid */
+ rcu_assign_pointer(left->thread_pid, pid2);
+ rcu_assign_pointer(right->thread_pid, pid1);
+
+ /* Swap the cached value */
+ WRITE_ONCE(left->pid, pid_nr(pid2));
+ WRITE_ONCE(right->pid, pid_nr(pid1));
+}
+
/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type type)
@@ -476,8 +495,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
rcu_read_lock();
if (!ns)
ns = task_active_pid_ns(current);
- if (likely(pid_alive(task)))
- nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
+ nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns);
rcu_read_unlock();
return nr;
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 6d475281c730..562aa0e450ed 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -29,7 +29,7 @@ static void handle_poweroff(int key)
schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
}
-static struct sysrq_key_op sysrq_poweroff_op = {
+static const struct sysrq_key_op sysrq_poweroff_op = {
.handler = handle_poweroff,
.help_msg = "poweroff(o)",
.action_msg = "Power Off",
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 659800157b17..881128b9351e 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -34,7 +34,6 @@
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index ca0fcb5ced71..01e2858b5fe3 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -1590,7 +1590,7 @@ int swsusp_unmark(void)
}
#endif
-static int swsusp_header_init(void)
+static int __init swsusp_header_init(void)
{
swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
if (!swsusp_header)
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index ae76bd329582..54a6dba0280d 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -807,7 +807,7 @@ static void sysrq_show_rcu(int key)
show_rcu_gp_kthreads();
}
-static struct sysrq_key_op sysrq_rcudump_op = {
+static const struct sysrq_key_op sysrq_rcudump_op = {
.handler = sysrq_show_rcu,
.help_msg = "show-rcu(y)",
.action_msg = "Show RCU tree",
diff --git a/kernel/relay.c b/kernel/relay.c
index 90c7a002436d..72fe443ea78f 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -91,7 +91,7 @@ static void relay_free_page_array(struct page **array)
*
* Returns 0 if ok, negative on error
*
- * Caller should already have grabbed mmap_sem.
+ * Caller should already have grabbed mmap_lock.
*/
static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
{
@@ -581,6 +581,11 @@ struct rchan *relay_open(const char *base_filename,
return NULL;
chan->buf = alloc_percpu(struct rchan_buf *);
+ if (!chan->buf) {
+ kfree(chan);
+ return NULL;
+ }
+
chan->version = RELAYFS_CHANNEL_VERSION;
chan->n_subbufs = n_subbufs;
chan->subbuf_size = subbuf_size;
@@ -991,14 +996,14 @@ static void relay_file_read_consume(struct rchan_buf *buf,
/*
* relay_file_read_avail - boolean, are there unconsumed bytes available?
*/
-static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
+static int relay_file_read_avail(struct rchan_buf *buf)
{
size_t subbuf_size = buf->chan->subbuf_size;
size_t n_subbufs = buf->chan->n_subbufs;
size_t produced = buf->subbufs_produced;
size_t consumed = buf->subbufs_consumed;
- relay_file_read_consume(buf, read_pos, 0);
+ relay_file_read_consume(buf, 0, 0);
consumed = buf->subbufs_consumed;
@@ -1059,23 +1064,20 @@ static size_t relay_file_read_subbuf_avail(size_t read_pos,
/**
* relay_file_read_start_pos - find the first available byte to read
- * @read_pos: file read position
* @buf: relay channel buffer
*
- * If the @read_pos is in the middle of padding, return the
+ * If the read_pos is in the middle of padding, return the
* position of the first actually available byte, otherwise
* return the original value.
*/
-static size_t relay_file_read_start_pos(size_t read_pos,
- struct rchan_buf *buf)
+static size_t relay_file_read_start_pos(struct rchan_buf *buf)
{
size_t read_subbuf, padding, padding_start, padding_end;
size_t subbuf_size = buf->chan->subbuf_size;
size_t n_subbufs = buf->chan->n_subbufs;
size_t consumed = buf->subbufs_consumed % n_subbufs;
+ size_t read_pos = consumed * subbuf_size + buf->bytes_consumed;
- if (!read_pos)
- read_pos = consumed * subbuf_size + buf->bytes_consumed;
read_subbuf = read_pos / subbuf_size;
padding = buf->padding[read_subbuf];
padding_start = (read_subbuf + 1) * subbuf_size - padding;
@@ -1131,10 +1133,10 @@ static ssize_t relay_file_read(struct file *filp,
do {
void *from;
- if (!relay_file_read_avail(buf, *ppos))
+ if (!relay_file_read_avail(buf))
break;
- read_start = relay_file_read_start_pos(*ppos, buf);
+ read_start = relay_file_read_start_pos(buf);
avail = relay_file_read_subbuf_avail(read_start, buf);
if (!avail)
break;
diff --git a/kernel/resource.c b/kernel/resource.c
index 76036a41143b..841737bbda9e 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1126,6 +1126,7 @@ struct resource * __request_region(struct resource *parent,
{
DECLARE_WAITQUEUE(wait, current);
struct resource *res = alloc_resource(GFP_KERNEL);
+ struct resource *orig_parent = parent;
if (!res)
return NULL;
@@ -1176,6 +1177,10 @@ struct resource * __request_region(struct resource *parent,
break;
}
write_unlock(&resource_lock);
+
+ if (res && orig_parent == &iomem_resource)
+ revoke_devmem(res);
+
return res;
}
EXPORT_SYMBOL(__request_region);
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 21fb5a5662b5..5fc9c9b70862 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -7,6 +7,12 @@ endif
# that is not a function of syscall inputs. E.g. involuntary context switches.
KCOV_INSTRUMENT := n
+# There are numerous data races here, however, most of them are due to plain accesses.
+# This would make it even harder for syzbot to find reproducers, because these
+# bugs trigger without specific input. Disable by default, but should re-enable
+# eventually.
+KCSAN_SANITIZE := n
+
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
# needed for x86 only. Why this used to be enabled for all architectures is beyond
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8298b2c240ce..8f360326861e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3922,8 +3922,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
&& in_atomic_preempt_off()) {
pr_err("Preemption disabled at:");
- print_ip_sym(preempt_disable_ip);
- pr_cont("\n");
+ print_ip_sym(KERN_ERR, preempt_disable_ip);
}
if (panic_on_warn)
panic("scheduling while atomic\n");
@@ -6026,7 +6025,7 @@ void sched_show_task(struct task_struct *p)
(unsigned long)task_thread_info(p)->flags);
print_worker_info(KERN_INFO, p);
- show_stack(p, NULL);
+ show_stack(p, NULL, KERN_INFO);
put_task_stack(p);
}
EXPORT_SYMBOL_GPL(sched_show_task);
@@ -6871,8 +6870,7 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
&& !preempt_count_equals(preempt_offset)) {
pr_err("Preemption disabled at:");
- print_ip_sym(preempt_disable_ip);
- pr_cont("\n");
+ print_ip_sym(KERN_ERR, preempt_disable_ip);
}
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 35f4cc024dcf..cbcb2f71599b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2770,7 +2770,7 @@ static void task_numa_work(struct callback_head *work)
return;
- if (!down_read_trylock(&mm->mmap_sem))
+ if (!mmap_read_trylock(mm))
return;
vma = find_vma(mm, start);
if (!vma) {
@@ -2838,7 +2838,7 @@ out:
mm->numa_scan_offset = start;
else
reset_ptenuma_scan(p);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Make sure tasks use at least 32x as much time to run other code
diff --git a/kernel/scs.c b/kernel/scs.c
index 222a7a9ad543..5d4d9bbdec36 100644
--- a/kernel/scs.c
+++ b/kernel/scs.c
@@ -74,7 +74,7 @@ static void scs_check_usage(struct task_struct *tsk)
for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) {
if (!READ_ONCE_NOCHECK(*p))
break;
- used++;
+ used += sizeof(*p);
}
while (used > curr) {
diff --git a/kernel/sys.c b/kernel/sys.c
index 891667a49bb7..fd46865b46ba 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1846,7 +1846,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
if (exe_file) {
struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!vma->vm_file)
continue;
@@ -1855,7 +1855,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
goto exit_err;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
fput(exe_file);
}
@@ -1869,7 +1869,7 @@ exit:
fdput(exe);
return err;
exit_err:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
fput(exe_file);
goto exit;
}
@@ -2007,10 +2007,10 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
}
/*
- * arg_lock protects concurent updates but we still need mmap_sem for
+ * arg_lock protects concurent updates but we still need mmap_lock for
* read to exclude races with sys_brk.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
/*
* We don't validate if these members are pointing to
@@ -2049,7 +2049,7 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
if (prctl_map.auxv_size)
memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return 0;
}
#endif /* CONFIG_CHECKPOINT_RESTORE */
@@ -2122,10 +2122,10 @@ static int prctl_set_mm(int opt, unsigned long addr,
/*
* arg_lock protects concurent updates of arg boundaries, we need
- * mmap_sem for a) concurrent sys_brk, b) finding VMA for addr
+ * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr
* validation.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, addr);
spin_lock(&mm->arg_lock);
@@ -2217,7 +2217,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
error = 0;
out:
spin_unlock(&mm->arg_lock);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return error;
}
@@ -2442,13 +2442,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
case PR_SET_THP_DISABLE:
if (arg3 || arg4 || arg5)
return -EINVAL;
- if (down_write_killable(&me->mm->mmap_sem))
+ if (mmap_write_lock_killable(me->mm))
return -EINTR;
if (arg2)
set_bit(MMF_DISABLE_THP, &me->mm->flags);
else
clear_bit(MMF_DISABLE_THP, &me->mm->flags);
- up_write(&me->mm->mmap_sem);
+ mmap_write_unlock(me->mm);
break;
case PR_MPX_ENABLE_MANAGEMENT:
case PR_MPX_DISABLE_MANAGEMENT:
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 715774d8c55f..db1ce7af2563 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -866,15 +866,23 @@ static int proc_taint(struct ctl_table *table, int write,
return err;
if (write) {
+ int i;
+
+ /*
+ * If we are relying on panic_on_taint not producing
+ * false positives due to userspace input, bail out
+ * before setting the requested taint flags.
+ */
+ if (panic_on_taint_nousertaint && (tmptaint & panic_on_taint))
+ return -EINVAL;
+
/*
* Poor man's atomic or. Not worth adding a primitive
* to everyone's atomic.h for this
*/
- int i;
- for (i = 0; i < BITS_PER_LONG && tmptaint >> i; i++) {
- if ((tmptaint >> i) & 1)
+ for (i = 0; i < TAINT_FLAGS_COUNT; i++)
+ if ((1UL << i) & tmptaint)
add_taint(i, LOCKDEP_STILL_OK);
- }
}
return err;
@@ -2141,6 +2149,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
+#ifdef CONFIG_SMP
+ {
+ .procname = "oops_all_cpu_backtrace",
+ .data = &sysctl_oops_all_cpu_backtrace,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+#endif /* CONFIG_SMP */
{
.procname = "pid_max",
.data = &pid_max,
@@ -2428,6 +2447,17 @@ static struct ctl_table kern_table[] = {
},
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
+#ifdef CONFIG_SMP
+ {
+ .procname = "hung_task_all_cpu_backtrace",
+ .data = &sysctl_hung_task_all_cpu_backtrace,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+#endif /* CONFIG_SMP */
{
.procname = "hung_task_panic",
.data = &sysctl_hung_task_panic,
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 7cb09c4cf21c..02441ead3c3b 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -928,14 +928,12 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
clocksource_arch_init(cs);
-#ifdef CONFIG_GENERIC_VDSO_CLOCK_MODE
if (cs->vdso_clock_mode < 0 ||
cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
cs->name, cs->vdso_clock_mode);
cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
}
-#endif
/* Initialize mult/shift and max_idle_ns */
__clocksource_update_freq_scale(cs, scale, freq);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 2fd3b3fa68bf..165117996ea0 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -47,85 +47,65 @@ void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
/*
* Functions for validating access to tasks.
*/
-static struct task_struct *lookup_task(const pid_t pid, bool thread,
- bool gettime)
+static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
{
- struct task_struct *p;
+ const bool thread = !!CPUCLOCK_PERTHREAD(clock);
+ const pid_t upid = CPUCLOCK_PID(clock);
+ struct pid *pid;
+
+ if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
+ return NULL;
/*
* If the encoded PID is 0, then the timer is targeted at current
* or the process to which current belongs.
*/
- if (!pid)
- return thread ? current : current->group_leader;
+ if (upid == 0)
+ return thread ? task_pid(current) : task_tgid(current);
- p = find_task_by_vpid(pid);
- if (!p)
- return p;
-
- if (thread)
- return same_thread_group(p, current) ? p : NULL;
+ pid = find_vpid(upid);
+ if (!pid)
+ return NULL;
- if (gettime) {
- /*
- * For clock_gettime(PROCESS) the task does not need to be
- * the actual group leader. tsk->sighand gives
- * access to the group's clock.
- *
- * Timers need the group leader because they take a
- * reference on it and store the task pointer until the
- * timer is destroyed.
- */
- return (p == current || thread_group_leader(p)) ? p : NULL;
+ if (thread) {
+ struct task_struct *tsk = pid_task(pid, PIDTYPE_PID);
+ return (tsk && same_thread_group(tsk, current)) ? pid : NULL;
}
/*
- * For processes require that p is group leader.
+ * For clock_gettime(PROCESS) allow finding the process by
+ * with the pid of the current task. The code needs the tgid
+ * of the process so that pid_task(pid, PIDTYPE_TGID) can be
+ * used to find the process.
*/
- return has_group_leader_pid(p) ? p : NULL;
+ if (gettime && (pid == task_pid(current)))
+ return task_tgid(current);
+
+ /*
+ * For processes require that pid identifies a process.
+ */
+ return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL;
}
-static struct task_struct *__get_task_for_clock(const clockid_t clock,
- bool getref, bool gettime)
+static inline int validate_clock_permissions(const clockid_t clock)
{
- const bool thread = !!CPUCLOCK_PERTHREAD(clock);
- const pid_t pid = CPUCLOCK_PID(clock);
- struct task_struct *p;
-
- if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
- return NULL;
+ int ret;
rcu_read_lock();
- p = lookup_task(pid, thread, gettime);
- if (p && getref)
- get_task_struct(p);
+ ret = pid_for_clock(clock, false) ? 0 : -EINVAL;
rcu_read_unlock();
- return p;
-}
-
-static inline struct task_struct *get_task_for_clock(const clockid_t clock)
-{
- return __get_task_for_clock(clock, true, false);
-}
-static inline struct task_struct *get_task_for_clock_get(const clockid_t clock)
-{
- return __get_task_for_clock(clock, true, true);
-}
-
-static inline int validate_clock_permissions(const clockid_t clock)
-{
- return __get_task_for_clock(clock, false, false) ? 0 : -EINVAL;
+ return ret;
}
-static inline enum pid_type cpu_timer_pid_type(struct k_itimer *timer)
+static inline enum pid_type clock_pid_type(const clockid_t clock)
{
- return CPUCLOCK_PERTHREAD(timer->it_clock) ? PIDTYPE_PID : PIDTYPE_TGID;
+ return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID;
}
static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
{
- return pid_task(timer->it.cpu.pid, cpu_timer_pid_type(timer));
+ return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
}
/*
@@ -373,15 +353,18 @@ static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
struct task_struct *tsk;
u64 t;
- tsk = get_task_for_clock_get(clock);
- if (!tsk)
+ rcu_read_lock();
+ tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock));
+ if (!tsk) {
+ rcu_read_unlock();
return -EINVAL;
+ }
if (CPUCLOCK_PERTHREAD(clock))
t = cpu_clock_sample(clkid, tsk);
else
t = cpu_clock_sample_group(clkid, tsk, false);
- put_task_struct(tsk);
+ rcu_read_unlock();
*tp = ns_to_timespec64(t);
return 0;
@@ -394,19 +377,19 @@ static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
*/
static int posix_cpu_timer_create(struct k_itimer *new_timer)
{
- struct task_struct *p = get_task_for_clock(new_timer->it_clock);
+ struct pid *pid;
- if (!p)
+ rcu_read_lock();
+ pid = pid_for_clock(new_timer->it_clock, false);
+ if (!pid) {
+ rcu_read_unlock();
return -EINVAL;
+ }
new_timer->kclock = &clock_posix_cpu;
timerqueue_init(&new_timer->it.cpu.node);
- new_timer->it.cpu.pid = get_task_pid(p, cpu_timer_pid_type(new_timer));
- /*
- * get_task_for_clock() took a reference on @p. Drop it as the timer
- * holds a reference on the pid of @p.
- */
- put_task_struct(p);
+ new_timer->it.cpu.pid = get_pid(pid);
+ rcu_read_unlock();
return 0;
}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 24876faac753..a4020c0b4508 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -249,15 +249,6 @@ config TRACE_PREEMPT_TOGGLE
Enables hooks which will be called when preemption is first disabled,
and last enabled.
-config PREEMPTIRQ_EVENTS
- bool "Enable trace events for preempt and irq disable/enable"
- select TRACE_IRQFLAGS
- select TRACE_PREEMPT_TOGGLE if PREEMPTION
- select GENERIC_TRACER
- default n
- help
- Enable tracing of disable and enable events for preemption and irqs.
-
config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
default n
@@ -614,12 +605,30 @@ config TRACING_MAP
generally used outside of that context, and is normally
selected by tracers that use it.
+config SYNTH_EVENTS
+ bool "Synthetic trace events"
+ select TRACING
+ select DYNAMIC_EVENTS
+ default n
+ help
+ Synthetic events are user-defined trace events that can be
+ used to combine data from other trace events or in fact any
+ data source. Synthetic events can be generated indirectly
+ via the trace() action of histogram triggers or directly
+ by way of an in-kernel API.
+
+ See Documentation/trace/events.rst or
+ Documentation/trace/histogram.rst for details and examples.
+
+ If in doubt, say N.
+
config HIST_TRIGGERS
bool "Histogram triggers"
depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
select TRACING_MAP
select TRACING
select DYNAMIC_EVENTS
+ select SYNTH_EVENTS
default n
help
Hist triggers allow one or more arbitrary trace event fields
@@ -815,7 +824,7 @@ config PREEMPTIRQ_DELAY_TEST
config SYNTH_EVENT_GEN_TEST
tristate "Test module for in-kernel synthetic event generation"
- depends on HIST_TRIGGERS
+ depends on SYNTH_EVENTS
help
This option creates a test module to check the base
functionality of in-kernel synthetic event definition and
@@ -838,6 +847,29 @@ config KPROBE_EVENT_GEN_TEST
If unsure, say N.
+config HIST_TRIGGERS_DEBUG
+ bool "Hist trigger debug support"
+ depends on HIST_TRIGGERS
+ help
+ Add "hist_debug" file for each event, which when read will
+ dump out a bunch of internal details about the hist triggers
+ defined on that event.
+
+ The hist_debug file serves a couple of purposes:
+
+ - Helps developers verify that nothing is broken.
+
+ - Provides educational information to support the details
+ of the hist trigger internals as described by
+ Documentation/trace/histogram-design.rst.
+
+ The hist_debug output only covers the data structures
+ related to the histogram definitions themselves and doesn't
+ display the internals of map buckets or variable values of
+ running histograms.
+
+ If unsure, say N.
+
endif # FTRACE
endif # TRACING_SUPPORT
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index f9dcd19165fa..6575bb0a0434 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -6,6 +6,9 @@ ifdef CONFIG_FUNCTION_TRACER
ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
+# Avoid recursion due to instrumentation.
+KCSAN_SANITIZE := n
+
ifdef CONFIG_FTRACE_SELFTEST
# selftest needs instrumentation
CFLAGS_trace_selftest_dynamic.o = $(CC_FLAGS_FTRACE)
@@ -72,6 +75,7 @@ endif
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o
obj-$(CONFIG_TRACE_EVENT_INJECT) += trace_events_inject.o
+obj-$(CONFIG_SYNTH_EVENTS) += trace_events_synth.o
obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o
obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o
obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index ea47f2084087..5773f0ba7e76 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -885,10 +885,10 @@ static void blk_add_trace_bio_bounce(void *ignore,
}
static void blk_add_trace_bio_complete(void *ignore,
- struct request_queue *q, struct bio *bio,
- int error)
+ struct request_queue *q, struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
+ blk_add_trace_bio(q, bio, BLK_TA_COMPLETE,
+ blk_status_to_errno(bio->bi_status));
}
static void blk_add_trace_bio_backmerge(void *ignore,
@@ -995,8 +995,10 @@ static void blk_add_trace_split(void *ignore,
__blk_add_trace(bt, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
- BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
- &rpdu, blk_trace_bio_get_cgid(q, bio));
+ BLK_TA_SPLIT,
+ blk_status_to_errno(bio->bi_status),
+ sizeof(rpdu), &rpdu,
+ blk_trace_bio_get_cgid(q, bio));
}
rcu_read_unlock();
}
@@ -1033,7 +1035,8 @@ static void blk_add_trace_bio_remap(void *ignore,
r.sector_from = cpu_to_be64(from);
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
- bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
+ bio_op(bio), bio->bi_opf, BLK_TA_REMAP,
+ blk_status_to_errno(bio->bi_status),
sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
rcu_read_unlock();
}
@@ -1253,21 +1256,10 @@ static inline __u16 t_error(const struct trace_entry *ent)
static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
{
- const __u64 *val = pdu_start(ent, has_cg);
+ const __be64 *val = pdu_start(ent, has_cg);
return be64_to_cpu(*val);
}
-static void get_pdu_remap(const struct trace_entry *ent,
- struct blk_io_trace_remap *r, bool has_cg)
-{
- const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
- __u64 sector_from = __r->sector_from;
-
- r->device_from = be32_to_cpu(__r->device_from);
- r->device_to = be32_to_cpu(__r->device_to);
- r->sector_from = be64_to_cpu(sector_from);
-}
-
typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
bool has_cg);
@@ -1407,13 +1399,13 @@ static void blk_log_with_error(struct trace_seq *s,
static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
{
- struct blk_io_trace_remap r = { .device_from = 0, };
+ const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
- get_pdu_remap(ent, &r, has_cg);
trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
t_sector(ent), t_sec(ent),
- MAJOR(r.device_from), MINOR(r.device_from),
- (unsigned long long)r.sector_from);
+ MAJOR(be32_to_cpu(__r->device_from)),
+ MINOR(be32_to_cpu(__r->device_from)),
+ be64_to_cpu(__r->sector_from));
}
static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 3744372a24e2..e729c9e587a0 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -136,17 +136,23 @@ static const struct bpf_func_proto bpf_override_return_proto = {
};
#endif
-BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
- const void __user *, unsafe_ptr)
+static __always_inline int
+bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
{
- int ret = probe_user_read(dst, unsafe_ptr, size);
+ int ret;
+ ret = probe_user_read(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
memset(dst, 0, size);
-
return ret;
}
+BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
+ const void __user *, unsafe_ptr)
+{
+ return bpf_probe_read_user_common(dst, size, unsafe_ptr);
+}
+
const struct bpf_func_proto bpf_probe_read_user_proto = {
.func = bpf_probe_read_user,
.gpl_only = true,
@@ -156,17 +162,24 @@ const struct bpf_func_proto bpf_probe_read_user_proto = {
.arg3_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
- const void __user *, unsafe_ptr)
+static __always_inline int
+bpf_probe_read_user_str_common(void *dst, u32 size,
+ const void __user *unsafe_ptr)
{
- int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size);
+ int ret;
+ ret = strncpy_from_user_nofault(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
memset(dst, 0, size);
-
return ret;
}
+BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
+ const void __user *, unsafe_ptr)
+{
+ return bpf_probe_read_user_str_common(dst, size, unsafe_ptr);
+}
+
const struct bpf_func_proto bpf_probe_read_user_str_proto = {
.func = bpf_probe_read_user_str,
.gpl_only = true,
@@ -177,25 +190,25 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
};
static __always_inline int
-bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr,
- const bool compat)
+bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
{
int ret = security_locked_down(LOCKDOWN_BPF_READ);
if (unlikely(ret < 0))
- goto out;
- ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) :
- probe_kernel_read_strict(dst, unsafe_ptr, size);
+ goto fail;
+ ret = probe_kernel_read(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
-out:
- memset(dst, 0, size);
+ goto fail;
+ return ret;
+fail:
+ memset(dst, 0, size);
return ret;
}
BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
const void *, unsafe_ptr)
{
- return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
+ return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
}
const struct bpf_func_proto bpf_probe_read_kernel_proto = {
@@ -207,50 +220,37 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = {
.arg3_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
- const void *, unsafe_ptr)
-{
- return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true);
-}
-
-static const struct bpf_func_proto bpf_probe_read_compat_proto = {
- .func = bpf_probe_read_compat,
- .gpl_only = true,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_UNINIT_MEM,
- .arg2_type = ARG_CONST_SIZE_OR_ZERO,
- .arg3_type = ARG_ANYTHING,
-};
-
static __always_inline int
-bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr,
- const bool compat)
+bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
{
int ret = security_locked_down(LOCKDOWN_BPF_READ);
if (unlikely(ret < 0))
- goto out;
+ goto fail;
+
/*
- * The strncpy_from_unsafe_*() call will likely not fill the entire
- * buffer, but that's okay in this circumstance as we're probing
+ * The strncpy_from_kernel_nofault() call will likely not fill the
+ * entire buffer, but that's okay in this circumstance as we're probing
* arbitrary memory anyway similar to bpf_probe_read_*() and might
* as well probe the stack. Thus, memory is explicitly cleared
* only in error case, so that improper users ignoring return
* code altogether don't copy garbage; otherwise length of string
* is returned that can be used for bpf_perf_event_output() et al.
*/
- ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) :
- strncpy_from_unsafe_strict(dst, unsafe_ptr, size);
+ ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
-out:
- memset(dst, 0, size);
+ goto fail;
+
+ return 0;
+fail:
+ memset(dst, 0, size);
return ret;
}
BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
const void *, unsafe_ptr)
{
- return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
+ return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
}
const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
@@ -262,10 +262,34 @@ const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
.arg3_type = ARG_ANYTHING,
};
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ if ((unsigned long)unsafe_ptr < TASK_SIZE) {
+ return bpf_probe_read_user_common(dst, size,
+ (__force void __user *)unsafe_ptr);
+ }
+ return bpf_probe_read_kernel_common(dst, size, unsafe_ptr);
+}
+
+static const struct bpf_func_proto bpf_probe_read_compat_proto = {
+ .func = bpf_probe_read_compat,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
const void *, unsafe_ptr)
{
- return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true);
+ if ((unsigned long)unsafe_ptr < TASK_SIZE) {
+ return bpf_probe_read_user_str_common(dst, size,
+ (__force void __user *)unsafe_ptr);
+ }
+ return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr);
}
static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
@@ -276,6 +300,7 @@ static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
.arg3_type = ARG_ANYTHING,
};
+#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
u32, size)
@@ -324,6 +349,31 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
return &bpf_probe_write_user_proto;
}
+static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
+ size_t bufsz)
+{
+ void __user *user_ptr = (__force void __user *)unsafe_ptr;
+
+ buf[0] = 0;
+
+ switch (fmt_ptype) {
+ case 's':
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ if ((unsigned long)unsafe_ptr < TASK_SIZE) {
+ strncpy_from_user_nofault(buf, user_ptr, bufsz);
+ break;
+ }
+ fallthrough;
+#endif
+ case 'k':
+ strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
+ break;
+ case 'u':
+ strncpy_from_user_nofault(buf, user_ptr, bufsz);
+ break;
+ }
+}
+
/*
* Only limited trace_printk() conversion specifiers allowed:
* %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s
@@ -406,24 +456,8 @@ fmt_str:
break;
}
- buf[0] = 0;
- switch (fmt_ptype) {
- case 's':
-#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
- strncpy_from_unsafe(buf, unsafe_ptr,
- sizeof(buf));
- break;
-#endif
- case 'k':
- strncpy_from_unsafe_strict(buf, unsafe_ptr,
- sizeof(buf));
- break;
- case 'u':
- strncpy_from_unsafe_user(buf,
- (__force void __user *)unsafe_ptr,
- sizeof(buf));
- break;
- }
+ bpf_trace_copy_string(buf, unsafe_ptr, fmt_ptype,
+ sizeof(buf));
goto fmt_next;
}
@@ -579,15 +613,17 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
}
if (fmt[i] == 's') {
+ void *unsafe_ptr;
+
/* try our best to copy */
if (memcpy_cnt >= MAX_SEQ_PRINTF_MAX_MEMCPY) {
err = -E2BIG;
goto out;
}
- err = strncpy_from_unsafe_strict(bufs->buf[memcpy_cnt],
- (void *) (long) args[fmt_cnt],
- MAX_SEQ_PRINTF_STR_LEN);
+ unsafe_ptr = (void *)(long)args[fmt_cnt];
+ err = strncpy_from_kernel_nofault(bufs->buf[memcpy_cnt],
+ unsafe_ptr, MAX_SEQ_PRINTF_STR_LEN);
if (err < 0)
bufs->buf[memcpy_cnt][0] = '\0';
params[fmt_cnt] = (u64)(long)bufs->buf[memcpy_cnt];
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b5765aeea698..c163c3531faf 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2016,16 +2016,16 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec)
{
unsigned long ip = rec ? rec->ip : 0;
+ pr_info("------------[ ftrace bug ]------------\n");
+
switch (failed) {
case -EFAULT:
- FTRACE_WARN_ON_ONCE(1);
pr_info("ftrace faulted on modifying ");
- print_ip_sym(ip);
+ print_ip_sym(KERN_INFO, ip);
break;
case -EINVAL:
- FTRACE_WARN_ON_ONCE(1);
pr_info("ftrace failed to modify ");
- print_ip_sym(ip);
+ print_ip_sym(KERN_INFO, ip);
print_ip_ins(" actual: ", (unsigned char *)ip);
pr_cont("\n");
if (ftrace_expected) {
@@ -2034,14 +2034,12 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec)
}
break;
case -EPERM:
- FTRACE_WARN_ON_ONCE(1);
pr_info("ftrace faulted on writing ");
- print_ip_sym(ip);
+ print_ip_sym(KERN_INFO, ip);
break;
default:
- FTRACE_WARN_ON_ONCE(1);
pr_info("ftrace faulted on unknown error ");
- print_ip_sym(ip);
+ print_ip_sym(KERN_INFO, ip);
}
print_bug_type();
if (rec) {
@@ -2066,6 +2064,8 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec)
ip = ftrace_get_addr_curr(rec);
pr_cont("\n expected tramp: %lx\n", ip);
}
+
+ FTRACE_WARN_ON_ONCE(1);
}
static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3ab27022c20f..ec44b0e2a19c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1299,8 +1299,11 @@ EXPORT_SYMBOL_GPL(tracing_off);
void disable_trace_on_warning(void)
{
- if (__disable_trace_on_warning)
+ if (__disable_trace_on_warning) {
+ trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
+ "Disabling tracing due to warning\n");
tracing_off();
+ }
}
/**
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4eb1d004d5f2..def769df5bf1 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1661,6 +1661,7 @@ extern struct list_head ftrace_events;
extern const struct file_operations event_trigger_fops;
extern const struct file_operations event_hist_fops;
+extern const struct file_operations event_hist_debug_fops;
extern const struct file_operations event_inject_fops;
#ifdef CONFIG_HIST_TRIGGERS
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 242f59e7f17d..f6f55682d3e2 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2209,6 +2209,10 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
trace_create_file("hist", 0444, file->dir, file,
&event_hist_fops);
#endif
+#ifdef CONFIG_HIST_TRIGGERS_DEBUG
+ trace_create_file("hist_debug", 0444, file->dir, file,
+ &event_hist_debug_fops);
+#endif
trace_create_file("format", 0444, file->dir, call,
&ftrace_event_format_fops);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index fcab11cc6833..0b933546142e 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -19,13 +19,7 @@
#include <trace/events/mmflags.h>
#include "tracing_map.h"
-#include "trace.h"
-#include "trace_dynevent.h"
-
-#define SYNTH_SYSTEM "synthetic"
-#define SYNTH_FIELDS_MAX 32
-
-#define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
+#include "trace_synth.h"
#define ERRORS \
C(NONE, "No error"), \
@@ -380,69 +374,6 @@ struct hist_trigger_data {
unsigned int n_save_var_str;
};
-static int create_synth_event(int argc, const char **argv);
-static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
-static int synth_event_release(struct dyn_event *ev);
-static bool synth_event_is_busy(struct dyn_event *ev);
-static bool synth_event_match(const char *system, const char *event,
- int argc, const char **argv, struct dyn_event *ev);
-
-static struct dyn_event_operations synth_event_ops = {
- .create = create_synth_event,
- .show = synth_event_show,
- .is_busy = synth_event_is_busy,
- .free = synth_event_release,
- .match = synth_event_match,
-};
-
-struct synth_field {
- char *type;
- char *name;
- size_t size;
- unsigned int offset;
- bool is_signed;
- bool is_string;
-};
-
-struct synth_event {
- struct dyn_event devent;
- int ref;
- char *name;
- struct synth_field **fields;
- unsigned int n_fields;
- unsigned int n_u64;
- struct trace_event_class class;
- struct trace_event_call call;
- struct tracepoint *tp;
- struct module *mod;
-};
-
-static bool is_synth_event(struct dyn_event *ev)
-{
- return ev->ops == &synth_event_ops;
-}
-
-static struct synth_event *to_synth_event(struct dyn_event *ev)
-{
- return container_of(ev, struct synth_event, devent);
-}
-
-static bool synth_event_is_busy(struct dyn_event *ev)
-{
- struct synth_event *event = to_synth_event(ev);
-
- return event->ref != 0;
-}
-
-static bool synth_event_match(const char *system, const char *event,
- int argc, const char **argv, struct dyn_event *ev)
-{
- struct synth_event *sev = to_synth_event(ev);
-
- return strcmp(sev->name, event) == 0 &&
- (!system || strcmp(system, SYNTH_SYSTEM) == 0);
-}
-
struct action_data;
typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
@@ -589,6 +520,7 @@ static struct track_data *track_data_alloc(unsigned int key_len,
track_data_free(data);
return ERR_PTR(-ENOMEM);
}
+
data->elt.private_data = elt_data;
elt_data->comm = kzalloc(TASK_COMM_LEN, GFP_KERNEL);
@@ -621,7 +553,6 @@ static void last_cmd_set(struct trace_event_file *file, char *str)
if (file) {
call = file->event_call;
-
system = call->class->system;
if (system) {
name = trace_event_name(call);
@@ -646,510 +577,6 @@ static void hist_err_clear(void)
last_cmd_loc[0] = '\0';
}
-struct synth_trace_event {
- struct trace_entry ent;
- u64 fields[];
-};
-
-static int synth_event_define_fields(struct trace_event_call *call)
-{
- struct synth_trace_event trace;
- int offset = offsetof(typeof(trace), fields);
- struct synth_event *event = call->data;
- unsigned int i, size, n_u64;
- char *name, *type;
- bool is_signed;
- int ret = 0;
-
- for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
- size = event->fields[i]->size;
- is_signed = event->fields[i]->is_signed;
- type = event->fields[i]->type;
- name = event->fields[i]->name;
- ret = trace_define_field(call, type, name, offset, size,
- is_signed, FILTER_OTHER);
- if (ret)
- break;
-
- event->fields[i]->offset = n_u64;
-
- if (event->fields[i]->is_string) {
- offset += STR_VAR_LEN_MAX;
- n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
- } else {
- offset += sizeof(u64);
- n_u64++;
- }
- }
-
- event->n_u64 = n_u64;
-
- return ret;
-}
-
-static bool synth_field_signed(char *type)
-{
- if (str_has_prefix(type, "u"))
- return false;
- if (strcmp(type, "gfp_t") == 0)
- return false;
-
- return true;
-}
-
-static int synth_field_is_string(char *type)
-{
- if (strstr(type, "char[") != NULL)
- return true;
-
- return false;
-}
-
-static int synth_field_string_size(char *type)
-{
- char buf[4], *end, *start;
- unsigned int len;
- int size, err;
-
- start = strstr(type, "char[");
- if (start == NULL)
- return -EINVAL;
- start += sizeof("char[") - 1;
-
- end = strchr(type, ']');
- if (!end || end < start)
- return -EINVAL;
-
- len = end - start;
- if (len > 3)
- return -EINVAL;
-
- strncpy(buf, start, len);
- buf[len] = '\0';
-
- err = kstrtouint(buf, 0, &size);
- if (err)
- return err;
-
- if (size > STR_VAR_LEN_MAX)
- return -EINVAL;
-
- return size;
-}
-
-static int synth_field_size(char *type)
-{
- int size = 0;
-
- if (strcmp(type, "s64") == 0)
- size = sizeof(s64);
- else if (strcmp(type, "u64") == 0)
- size = sizeof(u64);
- else if (strcmp(type, "s32") == 0)
- size = sizeof(s32);
- else if (strcmp(type, "u32") == 0)
- size = sizeof(u32);
- else if (strcmp(type, "s16") == 0)
- size = sizeof(s16);
- else if (strcmp(type, "u16") == 0)
- size = sizeof(u16);
- else if (strcmp(type, "s8") == 0)
- size = sizeof(s8);
- else if (strcmp(type, "u8") == 0)
- size = sizeof(u8);
- else if (strcmp(type, "char") == 0)
- size = sizeof(char);
- else if (strcmp(type, "unsigned char") == 0)
- size = sizeof(unsigned char);
- else if (strcmp(type, "int") == 0)
- size = sizeof(int);
- else if (strcmp(type, "unsigned int") == 0)
- size = sizeof(unsigned int);
- else if (strcmp(type, "long") == 0)
- size = sizeof(long);
- else if (strcmp(type, "unsigned long") == 0)
- size = sizeof(unsigned long);
- else if (strcmp(type, "pid_t") == 0)
- size = sizeof(pid_t);
- else if (strcmp(type, "gfp_t") == 0)
- size = sizeof(gfp_t);
- else if (synth_field_is_string(type))
- size = synth_field_string_size(type);
-
- return size;
-}
-
-static const char *synth_field_fmt(char *type)
-{
- const char *fmt = "%llu";
-
- if (strcmp(type, "s64") == 0)
- fmt = "%lld";
- else if (strcmp(type, "u64") == 0)
- fmt = "%llu";
- else if (strcmp(type, "s32") == 0)
- fmt = "%d";
- else if (strcmp(type, "u32") == 0)
- fmt = "%u";
- else if (strcmp(type, "s16") == 0)
- fmt = "%d";
- else if (strcmp(type, "u16") == 0)
- fmt = "%u";
- else if (strcmp(type, "s8") == 0)
- fmt = "%d";
- else if (strcmp(type, "u8") == 0)
- fmt = "%u";
- else if (strcmp(type, "char") == 0)
- fmt = "%d";
- else if (strcmp(type, "unsigned char") == 0)
- fmt = "%u";
- else if (strcmp(type, "int") == 0)
- fmt = "%d";
- else if (strcmp(type, "unsigned int") == 0)
- fmt = "%u";
- else if (strcmp(type, "long") == 0)
- fmt = "%ld";
- else if (strcmp(type, "unsigned long") == 0)
- fmt = "%lu";
- else if (strcmp(type, "pid_t") == 0)
- fmt = "%d";
- else if (strcmp(type, "gfp_t") == 0)
- fmt = "%x";
- else if (synth_field_is_string(type))
- fmt = "%s";
-
- return fmt;
-}
-
-static void print_synth_event_num_val(struct trace_seq *s,
- char *print_fmt, char *name,
- int size, u64 val, char *space)
-{
- switch (size) {
- case 1:
- trace_seq_printf(s, print_fmt, name, (u8)val, space);
- break;
-
- case 2:
- trace_seq_printf(s, print_fmt, name, (u16)val, space);
- break;
-
- case 4:
- trace_seq_printf(s, print_fmt, name, (u32)val, space);
- break;
-
- default:
- trace_seq_printf(s, print_fmt, name, val, space);
- break;
- }
-}
-
-static enum print_line_t print_synth_event(struct trace_iterator *iter,
- int flags,
- struct trace_event *event)
-{
- struct trace_array *tr = iter->tr;
- struct trace_seq *s = &iter->seq;
- struct synth_trace_event *entry;
- struct synth_event *se;
- unsigned int i, n_u64;
- char print_fmt[32];
- const char *fmt;
-
- entry = (struct synth_trace_event *)iter->ent;
- se = container_of(event, struct synth_event, call.event);
-
- trace_seq_printf(s, "%s: ", se->name);
-
- for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
- if (trace_seq_has_overflowed(s))
- goto end;
-
- fmt = synth_field_fmt(se->fields[i]->type);
-
- /* parameter types */
- if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
- trace_seq_printf(s, "%s ", fmt);
-
- snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
-
- /* parameter values */
- if (se->fields[i]->is_string) {
- trace_seq_printf(s, print_fmt, se->fields[i]->name,
- (char *)&entry->fields[n_u64],
- i == se->n_fields - 1 ? "" : " ");
- n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
- } else {
- struct trace_print_flags __flags[] = {
- __def_gfpflag_names, {-1, NULL} };
- char *space = (i == se->n_fields - 1 ? "" : " ");
-
- print_synth_event_num_val(s, print_fmt,
- se->fields[i]->name,
- se->fields[i]->size,
- entry->fields[n_u64],
- space);
-
- if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
- trace_seq_puts(s, " (");
- trace_print_flags_seq(s, "|",
- entry->fields[n_u64],
- __flags);
- trace_seq_putc(s, ')');
- }
- n_u64++;
- }
- }
-end:
- trace_seq_putc(s, '\n');
-
- return trace_handle_return(s);
-}
-
-static struct trace_event_functions synth_event_funcs = {
- .trace = print_synth_event
-};
-
-static notrace void trace_event_raw_event_synth(void *__data,
- u64 *var_ref_vals,
- unsigned int *var_ref_idx)
-{
- struct trace_event_file *trace_file = __data;
- struct synth_trace_event *entry;
- struct trace_event_buffer fbuffer;
- struct trace_buffer *buffer;
- struct synth_event *event;
- unsigned int i, n_u64, val_idx;
- int fields_size = 0;
-
- event = trace_file->event_call->data;
-
- if (trace_trigger_soft_disabled(trace_file))
- return;
-
- fields_size = event->n_u64 * sizeof(u64);
-
- /*
- * Avoid ring buffer recursion detection, as this event
- * is being performed within another event.
- */
- buffer = trace_file->tr->array_buffer.buffer;
- ring_buffer_nest_start(buffer);
-
- entry = trace_event_buffer_reserve(&fbuffer, trace_file,
- sizeof(*entry) + fields_size);
- if (!entry)
- goto out;
-
- for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
- val_idx = var_ref_idx[i];
- if (event->fields[i]->is_string) {
- char *str_val = (char *)(long)var_ref_vals[val_idx];
- char *str_field = (char *)&entry->fields[n_u64];
-
- strscpy(str_field, str_val, STR_VAR_LEN_MAX);
- n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
- } else {
- struct synth_field *field = event->fields[i];
- u64 val = var_ref_vals[val_idx];
-
- switch (field->size) {
- case 1:
- *(u8 *)&entry->fields[n_u64] = (u8)val;
- break;
-
- case 2:
- *(u16 *)&entry->fields[n_u64] = (u16)val;
- break;
-
- case 4:
- *(u32 *)&entry->fields[n_u64] = (u32)val;
- break;
-
- default:
- entry->fields[n_u64] = val;
- break;
- }
- n_u64++;
- }
- }
-
- trace_event_buffer_commit(&fbuffer);
-out:
- ring_buffer_nest_end(buffer);
-}
-
-static void free_synth_event_print_fmt(struct trace_event_call *call)
-{
- if (call) {
- kfree(call->print_fmt);
- call->print_fmt = NULL;
- }
-}
-
-static int __set_synth_event_print_fmt(struct synth_event *event,
- char *buf, int len)
-{
- const char *fmt;
- int pos = 0;
- int i;
-
- /* When len=0, we just calculate the needed length */
-#define LEN_OR_ZERO (len ? len - pos : 0)
-
- pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
- for (i = 0; i < event->n_fields; i++) {
- fmt = synth_field_fmt(event->fields[i]->type);
- pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
- event->fields[i]->name, fmt,
- i == event->n_fields - 1 ? "" : ", ");
- }
- pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
-
- for (i = 0; i < event->n_fields; i++) {
- pos += snprintf(buf + pos, LEN_OR_ZERO,
- ", REC->%s", event->fields[i]->name);
- }
-
-#undef LEN_OR_ZERO
-
- /* return the length of print_fmt */
- return pos;
-}
-
-static int set_synth_event_print_fmt(struct trace_event_call *call)
-{
- struct synth_event *event = call->data;
- char *print_fmt;
- int len;
-
- /* First: called with 0 length to calculate the needed length */
- len = __set_synth_event_print_fmt(event, NULL, 0);
-
- print_fmt = kmalloc(len + 1, GFP_KERNEL);
- if (!print_fmt)
- return -ENOMEM;
-
- /* Second: actually write the @print_fmt */
- __set_synth_event_print_fmt(event, print_fmt, len + 1);
- call->print_fmt = print_fmt;
-
- return 0;
-}
-
-static void free_synth_field(struct synth_field *field)
-{
- kfree(field->type);
- kfree(field->name);
- kfree(field);
-}
-
-static struct synth_field *parse_synth_field(int argc, const char **argv,
- int *consumed)
-{
- struct synth_field *field;
- const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
- int len, ret = 0;
-
- if (field_type[0] == ';')
- field_type++;
-
- if (!strcmp(field_type, "unsigned")) {
- if (argc < 3)
- return ERR_PTR(-EINVAL);
- prefix = "unsigned ";
- field_type = argv[1];
- field_name = argv[2];
- *consumed = 3;
- } else {
- field_name = argv[1];
- *consumed = 2;
- }
-
- field = kzalloc(sizeof(*field), GFP_KERNEL);
- if (!field)
- return ERR_PTR(-ENOMEM);
-
- len = strlen(field_name);
- array = strchr(field_name, '[');
- if (array)
- len -= strlen(array);
- else if (field_name[len - 1] == ';')
- len--;
-
- field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
- if (!field->name) {
- ret = -ENOMEM;
- goto free;
- }
-
- if (field_type[0] == ';')
- field_type++;
- len = strlen(field_type) + 1;
- if (array)
- len += strlen(array);
- if (prefix)
- len += strlen(prefix);
-
- field->type = kzalloc(len, GFP_KERNEL);
- if (!field->type) {
- ret = -ENOMEM;
- goto free;
- }
- if (prefix)
- strcat(field->type, prefix);
- strcat(field->type, field_type);
- if (array) {
- strcat(field->type, array);
- if (field->type[len - 1] == ';')
- field->type[len - 1] = '\0';
- }
-
- field->size = synth_field_size(field->type);
- if (!field->size) {
- ret = -EINVAL;
- goto free;
- }
-
- if (synth_field_is_string(field->type))
- field->is_string = true;
-
- field->is_signed = synth_field_signed(field->type);
-
- out:
- return field;
- free:
- free_synth_field(field);
- field = ERR_PTR(ret);
- goto out;
-}
-
-static void free_synth_tracepoint(struct tracepoint *tp)
-{
- if (!tp)
- return;
-
- kfree(tp->name);
- kfree(tp);
-}
-
-static struct tracepoint *alloc_synth_tracepoint(char *name)
-{
- struct tracepoint *tp;
-
- tp = kzalloc(sizeof(*tp), GFP_KERNEL);
- if (!tp)
- return ERR_PTR(-ENOMEM);
-
- tp->name = kstrdup(name, GFP_KERNEL);
- if (!tp->name) {
- kfree(tp);
- return ERR_PTR(-ENOMEM);
- }
-
- return tp;
-}
-
typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
unsigned int *var_ref_idx);
@@ -1177,145 +604,6 @@ static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
}
}
-static struct synth_event *find_synth_event(const char *name)
-{
- struct dyn_event *pos;
- struct synth_event *event;
-
- for_each_dyn_event(pos) {
- if (!is_synth_event(pos))
- continue;
- event = to_synth_event(pos);
- if (strcmp(event->name, name) == 0)
- return event;
- }
-
- return NULL;
-}
-
-static struct trace_event_fields synth_event_fields_array[] = {
- { .type = TRACE_FUNCTION_TYPE,
- .define_fields = synth_event_define_fields },
- {}
-};
-
-static int register_synth_event(struct synth_event *event)
-{
- struct trace_event_call *call = &event->call;
- int ret = 0;
-
- event->call.class = &event->class;
- event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
- if (!event->class.system) {
- ret = -ENOMEM;
- goto out;
- }
-
- event->tp = alloc_synth_tracepoint(event->name);
- if (IS_ERR(event->tp)) {
- ret = PTR_ERR(event->tp);
- event->tp = NULL;
- goto out;
- }
-
- INIT_LIST_HEAD(&call->class->fields);
- call->event.funcs = &synth_event_funcs;
- call->class->fields_array = synth_event_fields_array;
-
- ret = register_trace_event(&call->event);
- if (!ret) {
- ret = -ENODEV;
- goto out;
- }
- call->flags = TRACE_EVENT_FL_TRACEPOINT;
- call->class->reg = trace_event_reg;
- call->class->probe = trace_event_raw_event_synth;
- call->data = event;
- call->tp = event->tp;
-
- ret = trace_add_event_call(call);
- if (ret) {
- pr_warn("Failed to register synthetic event: %s\n",
- trace_event_name(call));
- goto err;
- }
-
- ret = set_synth_event_print_fmt(call);
- if (ret < 0) {
- trace_remove_event_call(call);
- goto err;
- }
- out:
- return ret;
- err:
- unregister_trace_event(&call->event);
- goto out;
-}
-
-static int unregister_synth_event(struct synth_event *event)
-{
- struct trace_event_call *call = &event->call;
- int ret;
-
- ret = trace_remove_event_call(call);
-
- return ret;
-}
-
-static void free_synth_event(struct synth_event *event)
-{
- unsigned int i;
-
- if (!event)
- return;
-
- for (i = 0; i < event->n_fields; i++)
- free_synth_field(event->fields[i]);
-
- kfree(event->fields);
- kfree(event->name);
- kfree(event->class.system);
- free_synth_tracepoint(event->tp);
- free_synth_event_print_fmt(&event->call);
- kfree(event);
-}
-
-static struct synth_event *alloc_synth_event(const char *name, int n_fields,
- struct synth_field **fields)
-{
- struct synth_event *event;
- unsigned int i;
-
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event) {
- event = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- event->name = kstrdup(name, GFP_KERNEL);
- if (!event->name) {
- kfree(event);
- event = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
- if (!event->fields) {
- free_synth_event(event);
- event = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- dyn_event_init(&event->devent, &synth_event_ops);
-
- for (i = 0; i < n_fields; i++)
- event->fields[i] = fields[i];
-
- event->n_fields = n_fields;
- out:
- return event;
-}
-
static void action_trace(struct hist_trigger_data *hist_data,
struct tracing_map_elt *elt, void *rec,
struct ring_buffer_event *rbe, void *key,
@@ -1331,1056 +619,6 @@ struct hist_var_data {
struct hist_trigger_data *hist_data;
};
-static int synth_event_check_arg_fn(void *data)
-{
- struct dynevent_arg_pair *arg_pair = data;
- int size;
-
- size = synth_field_size((char *)arg_pair->lhs);
-
- return size ? 0 : -EINVAL;
-}
-
-/**
- * synth_event_add_field - Add a new field to a synthetic event cmd
- * @cmd: A pointer to the dynevent_cmd struct representing the new event
- * @type: The type of the new field to add
- * @name: The name of the new field to add
- *
- * Add a new field to a synthetic event cmd object. Field ordering is in
- * the same order the fields are added.
- *
- * See synth_field_size() for available types. If field_name contains
- * [n] the field is considered to be an array.
- *
- * Return: 0 if successful, error otherwise.
- */
-int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
- const char *name)
-{
- struct dynevent_arg_pair arg_pair;
- int ret;
-
- if (cmd->type != DYNEVENT_TYPE_SYNTH)
- return -EINVAL;
-
- if (!type || !name)
- return -EINVAL;
-
- dynevent_arg_pair_init(&arg_pair, 0, ';');
-
- arg_pair.lhs = type;
- arg_pair.rhs = name;
-
- ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
- if (ret)
- return ret;
-
- if (++cmd->n_fields > SYNTH_FIELDS_MAX)
- ret = -EINVAL;
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(synth_event_add_field);
-
-/**
- * synth_event_add_field_str - Add a new field to a synthetic event cmd
- * @cmd: A pointer to the dynevent_cmd struct representing the new event
- * @type_name: The type and name of the new field to add, as a single string
- *
- * Add a new field to a synthetic event cmd object, as a single
- * string. The @type_name string is expected to be of the form 'type
- * name', which will be appended by ';'. No sanity checking is done -
- * what's passed in is assumed to already be well-formed. Field
- * ordering is in the same order the fields are added.
- *
- * See synth_field_size() for available types. If field_name contains
- * [n] the field is considered to be an array.
- *
- * Return: 0 if successful, error otherwise.
- */
-int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
-{
- struct dynevent_arg arg;
- int ret;
-
- if (cmd->type != DYNEVENT_TYPE_SYNTH)
- return -EINVAL;
-
- if (!type_name)
- return -EINVAL;
-
- dynevent_arg_init(&arg, ';');
-
- arg.str = type_name;
-
- ret = dynevent_arg_add(cmd, &arg, NULL);
- if (ret)
- return ret;
-
- if (++cmd->n_fields > SYNTH_FIELDS_MAX)
- ret = -EINVAL;
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(synth_event_add_field_str);
-
-/**
- * synth_event_add_fields - Add multiple fields to a synthetic event cmd
- * @cmd: A pointer to the dynevent_cmd struct representing the new event
- * @fields: An array of type/name field descriptions
- * @n_fields: The number of field descriptions contained in the fields array
- *
- * Add a new set of fields to a synthetic event cmd object. The event
- * fields that will be defined for the event should be passed in as an
- * array of struct synth_field_desc, and the number of elements in the
- * array passed in as n_fields. Field ordering will retain the
- * ordering given in the fields array.
- *
- * See synth_field_size() for available types. If field_name contains
- * [n] the field is considered to be an array.
- *
- * Return: 0 if successful, error otherwise.
- */
-int synth_event_add_fields(struct dynevent_cmd *cmd,
- struct synth_field_desc *fields,
- unsigned int n_fields)
-{
- unsigned int i;
- int ret = 0;
-
- for (i = 0; i < n_fields; i++) {
- if (fields[i].type == NULL || fields[i].name == NULL) {
- ret = -EINVAL;
- break;
- }
-
- ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
- if (ret)
- break;
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(synth_event_add_fields);
-
-/**
- * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
- * @cmd: A pointer to the dynevent_cmd struct representing the new event
- * @name: The name of the synthetic event
- * @mod: The module creating the event, NULL if not created from a module
- * @args: Variable number of arg (pairs), one pair for each field
- *
- * NOTE: Users normally won't want to call this function directly, but
- * rather use the synth_event_gen_cmd_start() wrapper, which
- * automatically adds a NULL to the end of the arg list. If this
- * function is used directly, make sure the last arg in the variable
- * arg list is NULL.
- *
- * Generate a synthetic event command to be executed by
- * synth_event_gen_cmd_end(). This function can be used to generate
- * the complete command or only the first part of it; in the latter
- * case, synth_event_add_field(), synth_event_add_field_str(), or
- * synth_event_add_fields() can be used to add more fields following
- * this.
- *
- * There should be an even number variable args, each pair consisting
- * of a type followed by a field name.
- *
- * See synth_field_size() for available types. If field_name contains
- * [n] the field is considered to be an array.
- *
- * Return: 0 if successful, error otherwise.
- */
-int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
- struct module *mod, ...)
-{
- struct dynevent_arg arg;
- va_list args;
- int ret;
-
- cmd->event_name = name;
- cmd->private_data = mod;
-
- if (cmd->type != DYNEVENT_TYPE_SYNTH)
- return -EINVAL;
-
- dynevent_arg_init(&arg, 0);
- arg.str = name;
- ret = dynevent_arg_add(cmd, &arg, NULL);
- if (ret)
- return ret;
-
- va_start(args, mod);
- for (;;) {
- const char *type, *name;
-
- type = va_arg(args, const char *);
- if (!type)
- break;
- name = va_arg(args, const char *);
- if (!name)
- break;
-
- if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
- ret = -EINVAL;
- break;
- }
-
- ret = synth_event_add_field(cmd, type, name);
- if (ret)
- break;
- }
- va_end(args);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
-
-/**
- * synth_event_gen_cmd_array_start - Start synthetic event command from an array
- * @cmd: A pointer to the dynevent_cmd struct representing the new event
- * @name: The name of the synthetic event
- * @fields: An array of type/name field descriptions
- * @n_fields: The number of field descriptions contained in the fields array
- *
- * Generate a synthetic event command to be executed by
- * synth_event_gen_cmd_end(). This function can be used to generate
- * the complete command or only the first part of it; in the latter
- * case, synth_event_add_field(), synth_event_add_field_str(), or
- * synth_event_add_fields() can be used to add more fields following
- * this.
- *
- * The event fields that will be defined for the event should be
- * passed in as an array of struct synth_field_desc, and the number of
- * elements in the array passed in as n_fields. Field ordering will
- * retain the ordering given in the fields array.
- *
- * See synth_field_size() for available types. If field_name contains
- * [n] the field is considered to be an array.
- *
- * Return: 0 if successful, error otherwise.
- */
-int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
- struct module *mod,
- struct synth_field_desc *fields,
- unsigned int n_fields)
-{
- struct dynevent_arg arg;
- unsigned int i;
- int ret = 0;
-
- cmd->event_name = name;
- cmd->private_data = mod;
-
- if (cmd->type != DYNEVENT_TYPE_SYNTH)
- return -EINVAL;
-
- if (n_fields > SYNTH_FIELDS_MAX)
- return -EINVAL;
-
- dynevent_arg_init(&arg, 0);
- arg.str = name;
- ret = dynevent_arg_add(cmd, &arg, NULL);
- if (ret)
- return ret;
-
- for (i = 0; i < n_fields; i++) {
- if (fields[i].type == NULL || fields[i].name == NULL)
- return -EINVAL;
-
- ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
- if (ret)
- break;
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
-
-static int __create_synth_event(int argc, const char *name, const char **argv)
-{
- struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
- struct synth_event *event = NULL;
- int i, consumed = 0, n_fields = 0, ret = 0;
-
- /*
- * Argument syntax:
- * - Add synthetic event: <event_name> field[;field] ...
- * - Remove synthetic event: !<event_name> field[;field] ...
- * where 'field' = type field_name
- */
-
- if (name[0] == '\0' || argc < 1)
- return -EINVAL;
-
- mutex_lock(&event_mutex);
-
- event = find_synth_event(name);
- if (event) {
- ret = -EEXIST;
- goto out;
- }
-
- for (i = 0; i < argc - 1; i++) {
- if (strcmp(argv[i], ";") == 0)
- continue;
- if (n_fields == SYNTH_FIELDS_MAX) {
- ret = -EINVAL;
- goto err;
- }
-
- field = parse_synth_field(argc - i, &argv[i], &consumed);
- if (IS_ERR(field)) {
- ret = PTR_ERR(field);
- goto err;
- }
- fields[n_fields++] = field;
- i += consumed - 1;
- }
-
- if (i < argc && strcmp(argv[i], ";") != 0) {
- ret = -EINVAL;
- goto err;
- }
-
- event = alloc_synth_event(name, n_fields, fields);
- if (IS_ERR(event)) {
- ret = PTR_ERR(event);
- event = NULL;
- goto err;
- }
- ret = register_synth_event(event);
- if (!ret)
- dyn_event_add(&event->devent);
- else
- free_synth_event(event);
- out:
- mutex_unlock(&event_mutex);
-
- return ret;
- err:
- for (i = 0; i < n_fields; i++)
- free_synth_field(fields[i]);
-
- goto out;
-}
-
-/**
- * synth_event_create - Create a new synthetic event
- * @name: The name of the new sythetic event
- * @fields: An array of type/name field descriptions
- * @n_fields: The number of field descriptions contained in the fields array
- * @mod: The module creating the event, NULL if not created from a module
- *
- * Create a new synthetic event with the given name under the
- * trace/events/synthetic/ directory. The event fields that will be
- * defined for the event should be passed in as an array of struct
- * synth_field_desc, and the number elements in the array passed in as
- * n_fields. Field ordering will retain the ordering given in the
- * fields array.
- *
- * If the new synthetic event is being created from a module, the mod
- * param must be non-NULL. This will ensure that the trace buffer
- * won't contain unreadable events.
- *
- * The new synth event should be deleted using synth_event_delete()
- * function. The new synthetic event can be generated from modules or
- * other kernel code using trace_synth_event() and related functions.
- *
- * Return: 0 if successful, error otherwise.
- */
-int synth_event_create(const char *name, struct synth_field_desc *fields,
- unsigned int n_fields, struct module *mod)
-{
- struct dynevent_cmd cmd;
- char *buf;
- int ret;
-
- buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
-
- ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
- fields, n_fields);
- if (ret)
- goto out;
-
- ret = synth_event_gen_cmd_end(&cmd);
- out:
- kfree(buf);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(synth_event_create);
-
-static int destroy_synth_event(struct synth_event *se)
-{
- int ret;
-
- if (se->ref)
- ret = -EBUSY;
- else {
- ret = unregister_synth_event(se);
- if (!ret) {
- dyn_event_remove(&se->devent);
- free_synth_event(se);
- }
- }
-
- return ret;
-}
-
-/**
- * synth_event_delete - Delete a synthetic event
- * @event_name: The name of the new sythetic event
- *
- * Delete a synthetic event that was created with synth_event_create().
- *
- * Return: 0 if successful, error otherwise.
- */
-int synth_event_delete(const char *event_name)
-{
- struct synth_event *se = NULL;
- struct module *mod = NULL;
- int ret = -ENOENT;
-
- mutex_lock(&event_mutex);
- se = find_synth_event(event_name);
- if (se) {
- mod = se->mod;
- ret = destroy_synth_event(se);
- }
- mutex_unlock(&event_mutex);
-
- if (mod) {
- mutex_lock(&trace_types_lock);
- /*
- * It is safest to reset the ring buffer if the module
- * being unloaded registered any events that were
- * used. The only worry is if a new module gets
- * loaded, and takes on the same id as the events of
- * this module. When printing out the buffer, traced
- * events left over from this module may be passed to
- * the new module events and unexpected results may
- * occur.
- */
- tracing_reset_all_online_cpus();
- mutex_unlock(&trace_types_lock);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(synth_event_delete);
-
-static int create_or_delete_synth_event(int argc, char **argv)
-{
- const char *name = argv[0];
- int ret;
-
- /* trace_run_command() ensures argc != 0 */
- if (name[0] == '!') {
- ret = synth_event_delete(name + 1);
- return ret;
- }
-
- ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
- return ret == -ECANCELED ? -EINVAL : ret;
-}
-
-static int synth_event_run_command(struct dynevent_cmd *cmd)
-{
- struct synth_event *se;
- int ret;
-
- ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event);
- if (ret)
- return ret;
-
- se = find_synth_event(cmd->event_name);
- if (WARN_ON(!se))
- return -ENOENT;
-
- se->mod = cmd->private_data;
-
- return ret;
-}
-
-/**
- * synth_event_cmd_init - Initialize a synthetic event command object
- * @cmd: A pointer to the dynevent_cmd struct representing the new event
- * @buf: A pointer to the buffer used to build the command
- * @maxlen: The length of the buffer passed in @buf
- *
- * Initialize a synthetic event command object. Use this before
- * calling any of the other dyenvent_cmd functions.
- */
-void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
-{
- dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
- synth_event_run_command);
-}
-EXPORT_SYMBOL_GPL(synth_event_cmd_init);
-
-static inline int
-__synth_event_trace_start(struct trace_event_file *file,
- struct synth_event_trace_state *trace_state)
-{
- int entry_size, fields_size = 0;
- int ret = 0;
-
- memset(trace_state, '\0', sizeof(*trace_state));
-
- /*
- * Normal event tracing doesn't get called at all unless the
- * ENABLED bit is set (which attaches the probe thus allowing
- * this code to be called, etc). Because this is called
- * directly by the user, we don't have that but we still need
- * to honor not logging when disabled. For the the iterated
- * trace case, we save the enabed state upon start and just
- * ignore the following data calls.
- */
- if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
- trace_trigger_soft_disabled(file)) {
- trace_state->disabled = true;
- ret = -ENOENT;
- goto out;
- }
-
- trace_state->event = file->event_call->data;
-
- fields_size = trace_state->event->n_u64 * sizeof(u64);
-
- /*
- * Avoid ring buffer recursion detection, as this event
- * is being performed within another event.
- */
- trace_state->buffer = file->tr->array_buffer.buffer;
- ring_buffer_nest_start(trace_state->buffer);
-
- entry_size = sizeof(*trace_state->entry) + fields_size;
- trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
- file,
- entry_size);
- if (!trace_state->entry) {
- ring_buffer_nest_end(trace_state->buffer);
- ret = -EINVAL;
- }
-out:
- return ret;
-}
-
-static inline void
-__synth_event_trace_end(struct synth_event_trace_state *trace_state)
-{
- trace_event_buffer_commit(&trace_state->fbuffer);
-
- ring_buffer_nest_end(trace_state->buffer);
-}
-
-/**
- * synth_event_trace - Trace a synthetic event
- * @file: The trace_event_file representing the synthetic event
- * @n_vals: The number of values in vals
- * @args: Variable number of args containing the event values
- *
- * Trace a synthetic event using the values passed in the variable
- * argument list.
- *
- * The argument list should be a list 'n_vals' u64 values. The number
- * of vals must match the number of field in the synthetic event, and
- * must be in the same order as the synthetic event fields.
- *
- * All vals should be cast to u64, and string vals are just pointers
- * to strings, cast to u64. Strings will be copied into space
- * reserved in the event for the string, using these pointers.
- *
- * Return: 0 on success, err otherwise.
- */
-int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
-{
- struct synth_event_trace_state state;
- unsigned int i, n_u64;
- va_list args;
- int ret;
-
- ret = __synth_event_trace_start(file, &state);
- if (ret) {
- if (ret == -ENOENT)
- ret = 0; /* just disabled, not really an error */
- return ret;
- }
-
- if (n_vals != state.event->n_fields) {
- ret = -EINVAL;
- goto out;
- }
-
- va_start(args, n_vals);
- for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
- u64 val;
-
- val = va_arg(args, u64);
-
- if (state.event->fields[i]->is_string) {
- char *str_val = (char *)(long)val;
- char *str_field = (char *)&state.entry->fields[n_u64];
-
- strscpy(str_field, str_val, STR_VAR_LEN_MAX);
- n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
- } else {
- struct synth_field *field = state.event->fields[i];
-
- switch (field->size) {
- case 1:
- *(u8 *)&state.entry->fields[n_u64] = (u8)val;
- break;
-
- case 2:
- *(u16 *)&state.entry->fields[n_u64] = (u16)val;
- break;
-
- case 4:
- *(u32 *)&state.entry->fields[n_u64] = (u32)val;
- break;
-
- default:
- state.entry->fields[n_u64] = val;
- break;
- }
- n_u64++;
- }
- }
- va_end(args);
-out:
- __synth_event_trace_end(&state);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(synth_event_trace);
-
-/**
- * synth_event_trace_array - Trace a synthetic event from an array
- * @file: The trace_event_file representing the synthetic event
- * @vals: Array of values
- * @n_vals: The number of values in vals
- *
- * Trace a synthetic event using the values passed in as 'vals'.
- *
- * The 'vals' array is just an array of 'n_vals' u64. The number of
- * vals must match the number of field in the synthetic event, and
- * must be in the same order as the synthetic event fields.
- *
- * All vals should be cast to u64, and string vals are just pointers
- * to strings, cast to u64. Strings will be copied into space
- * reserved in the event for the string, using these pointers.
- *
- * Return: 0 on success, err otherwise.
- */
-int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
- unsigned int n_vals)
-{
- struct synth_event_trace_state state;
- unsigned int i, n_u64;
- int ret;
-
- ret = __synth_event_trace_start(file, &state);
- if (ret) {
- if (ret == -ENOENT)
- ret = 0; /* just disabled, not really an error */
- return ret;
- }
-
- if (n_vals != state.event->n_fields) {
- ret = -EINVAL;
- goto out;
- }
-
- for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
- if (state.event->fields[i]->is_string) {
- char *str_val = (char *)(long)vals[i];
- char *str_field = (char *)&state.entry->fields[n_u64];
-
- strscpy(str_field, str_val, STR_VAR_LEN_MAX);
- n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
- } else {
- struct synth_field *field = state.event->fields[i];
- u64 val = vals[i];
-
- switch (field->size) {
- case 1:
- *(u8 *)&state.entry->fields[n_u64] = (u8)val;
- break;
-
- case 2:
- *(u16 *)&state.entry->fields[n_u64] = (u16)val;
- break;
-
- case 4:
- *(u32 *)&state.entry->fields[n_u64] = (u32)val;
- break;
-
- default:
- state.entry->fields[n_u64] = val;
- break;
- }
- n_u64++;
- }
- }
-out:
- __synth_event_trace_end(&state);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(synth_event_trace_array);
-
-/**
- * synth_event_trace_start - Start piecewise synthetic event trace
- * @file: The trace_event_file representing the synthetic event
- * @trace_state: A pointer to object tracking the piecewise trace state
- *
- * Start the trace of a synthetic event field-by-field rather than all
- * at once.
- *
- * This function 'opens' an event trace, which means space is reserved
- * for the event in the trace buffer, after which the event's
- * individual field values can be set through either
- * synth_event_add_next_val() or synth_event_add_val().
- *
- * A pointer to a trace_state object is passed in, which will keep
- * track of the current event trace state until the event trace is
- * closed (and the event finally traced) using
- * synth_event_trace_end().
- *
- * Note that synth_event_trace_end() must be called after all values
- * have been added for each event trace, regardless of whether adding
- * all field values succeeded or not.
- *
- * Note also that for a given event trace, all fields must be added
- * using either synth_event_add_next_val() or synth_event_add_val()
- * but not both together or interleaved.
- *
- * Return: 0 on success, err otherwise.
- */
-int synth_event_trace_start(struct trace_event_file *file,
- struct synth_event_trace_state *trace_state)
-{
- int ret;
-
- if (!trace_state)
- return -EINVAL;
-
- ret = __synth_event_trace_start(file, trace_state);
- if (ret == -ENOENT)
- ret = 0; /* just disabled, not really an error */
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(synth_event_trace_start);
-
-static int __synth_event_add_val(const char *field_name, u64 val,
- struct synth_event_trace_state *trace_state)
-{
- struct synth_field *field = NULL;
- struct synth_trace_event *entry;
- struct synth_event *event;
- int i, ret = 0;
-
- if (!trace_state) {
- ret = -EINVAL;
- goto out;
- }
-
- /* can't mix add_next_synth_val() with add_synth_val() */
- if (field_name) {
- if (trace_state->add_next) {
- ret = -EINVAL;
- goto out;
- }
- trace_state->add_name = true;
- } else {
- if (trace_state->add_name) {
- ret = -EINVAL;
- goto out;
- }
- trace_state->add_next = true;
- }
-
- if (trace_state->disabled)
- goto out;
-
- event = trace_state->event;
- if (trace_state->add_name) {
- for (i = 0; i < event->n_fields; i++) {
- field = event->fields[i];
- if (strcmp(field->name, field_name) == 0)
- break;
- }
- if (!field) {
- ret = -EINVAL;
- goto out;
- }
- } else {
- if (trace_state->cur_field >= event->n_fields) {
- ret = -EINVAL;
- goto out;
- }
- field = event->fields[trace_state->cur_field++];
- }
-
- entry = trace_state->entry;
- if (field->is_string) {
- char *str_val = (char *)(long)val;
- char *str_field;
-
- if (!str_val) {
- ret = -EINVAL;
- goto out;
- }
-
- str_field = (char *)&entry->fields[field->offset];
- strscpy(str_field, str_val, STR_VAR_LEN_MAX);
- } else {
- switch (field->size) {
- case 1:
- *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
- break;
-
- case 2:
- *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
- break;
-
- case 4:
- *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
- break;
-
- default:
- trace_state->entry->fields[field->offset] = val;
- break;
- }
- }
- out:
- return ret;
-}
-
-/**
- * synth_event_add_next_val - Add the next field's value to an open synth trace
- * @val: The value to set the next field to
- * @trace_state: A pointer to object tracking the piecewise trace state
- *
- * Set the value of the next field in an event that's been opened by
- * synth_event_trace_start().
- *
- * The val param should be the value cast to u64. If the value points
- * to a string, the val param should be a char * cast to u64.
- *
- * This function assumes all the fields in an event are to be set one
- * after another - successive calls to this function are made, one for
- * each field, in the order of the fields in the event, until all
- * fields have been set. If you'd rather set each field individually
- * without regard to ordering, synth_event_add_val() can be used
- * instead.
- *
- * Note however that synth_event_add_next_val() and
- * synth_event_add_val() can't be intermixed for a given event trace -
- * one or the other but not both can be used at the same time.
- *
- * Note also that synth_event_trace_end() must be called after all
- * values have been added for each event trace, regardless of whether
- * adding all field values succeeded or not.
- *
- * Return: 0 on success, err otherwise.
- */
-int synth_event_add_next_val(u64 val,
- struct synth_event_trace_state *trace_state)
-{
- return __synth_event_add_val(NULL, val, trace_state);
-}
-EXPORT_SYMBOL_GPL(synth_event_add_next_val);
-
-/**
- * synth_event_add_val - Add a named field's value to an open synth trace
- * @field_name: The name of the synthetic event field value to set
- * @val: The value to set the next field to
- * @trace_state: A pointer to object tracking the piecewise trace state
- *
- * Set the value of the named field in an event that's been opened by
- * synth_event_trace_start().
- *
- * The val param should be the value cast to u64. If the value points
- * to a string, the val param should be a char * cast to u64.
- *
- * This function looks up the field name, and if found, sets the field
- * to the specified value. This lookup makes this function more
- * expensive than synth_event_add_next_val(), so use that or the
- * none-piecewise synth_event_trace() instead if efficiency is more
- * important.
- *
- * Note however that synth_event_add_next_val() and
- * synth_event_add_val() can't be intermixed for a given event trace -
- * one or the other but not both can be used at the same time.
- *
- * Note also that synth_event_trace_end() must be called after all
- * values have been added for each event trace, regardless of whether
- * adding all field values succeeded or not.
- *
- * Return: 0 on success, err otherwise.
- */
-int synth_event_add_val(const char *field_name, u64 val,
- struct synth_event_trace_state *trace_state)
-{
- return __synth_event_add_val(field_name, val, trace_state);
-}
-EXPORT_SYMBOL_GPL(synth_event_add_val);
-
-/**
- * synth_event_trace_end - End piecewise synthetic event trace
- * @trace_state: A pointer to object tracking the piecewise trace state
- *
- * End the trace of a synthetic event opened by
- * synth_event_trace__start().
- *
- * This function 'closes' an event trace, which basically means that
- * it commits the reserved event and cleans up other loose ends.
- *
- * A pointer to a trace_state object is passed in, which will keep
- * track of the current event trace state opened with
- * synth_event_trace_start().
- *
- * Note that this function must be called after all values have been
- * added for each event trace, regardless of whether adding all field
- * values succeeded or not.
- *
- * Return: 0 on success, err otherwise.
- */
-int synth_event_trace_end(struct synth_event_trace_state *trace_state)
-{
- if (!trace_state)
- return -EINVAL;
-
- __synth_event_trace_end(trace_state);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(synth_event_trace_end);
-
-static int create_synth_event(int argc, const char **argv)
-{
- const char *name = argv[0];
- int len;
-
- if (name[0] != 's' || name[1] != ':')
- return -ECANCELED;
- name += 2;
-
- /* This interface accepts group name prefix */
- if (strchr(name, '/')) {
- len = str_has_prefix(name, SYNTH_SYSTEM "/");
- if (len == 0)
- return -EINVAL;
- name += len;
- }
- return __create_synth_event(argc - 1, name, argv + 1);
-}
-
-static int synth_event_release(struct dyn_event *ev)
-{
- struct synth_event *event = to_synth_event(ev);
- int ret;
-
- if (event->ref)
- return -EBUSY;
-
- ret = unregister_synth_event(event);
- if (ret)
- return ret;
-
- dyn_event_remove(ev);
- free_synth_event(event);
- return 0;
-}
-
-static int __synth_event_show(struct seq_file *m, struct synth_event *event)
-{
- struct synth_field *field;
- unsigned int i;
-
- seq_printf(m, "%s\t", event->name);
-
- for (i = 0; i < event->n_fields; i++) {
- field = event->fields[i];
-
- /* parameter values */
- seq_printf(m, "%s %s%s", field->type, field->name,
- i == event->n_fields - 1 ? "" : "; ");
- }
-
- seq_putc(m, '\n');
-
- return 0;
-}
-
-static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
-{
- struct synth_event *event = to_synth_event(ev);
-
- seq_printf(m, "s:%s/", event->class.system);
-
- return __synth_event_show(m, event);
-}
-
-static int synth_events_seq_show(struct seq_file *m, void *v)
-{
- struct dyn_event *ev = v;
-
- if (!is_synth_event(ev))
- return 0;
-
- return __synth_event_show(m, to_synth_event(ev));
-}
-
-static const struct seq_operations synth_events_seq_op = {
- .start = dyn_event_seq_start,
- .next = dyn_event_seq_next,
- .stop = dyn_event_seq_stop,
- .show = synth_events_seq_show,
-};
-
-static int synth_events_open(struct inode *inode, struct file *file)
-{
- int ret;
-
- ret = security_locked_down(LOCKDOWN_TRACEFS);
- if (ret)
- return ret;
-
- if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
- ret = dyn_events_release_all(&synth_event_ops);
- if (ret < 0)
- return ret;
- }
-
- return seq_open(file, &synth_events_seq_op);
-}
-
-static ssize_t synth_events_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- return trace_parse_run_command(file, buffer, count, ppos,
- create_or_delete_synth_event);
-}
-
-static const struct file_operations synth_events_fops = {
- .open = synth_events_open,
- .write = synth_events_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
static u64 hist_field_timestamp(struct hist_field *hist_field,
struct tracing_map_elt *elt,
struct ring_buffer_event *rbe,
@@ -6491,6 +4729,279 @@ const struct file_operations event_hist_fops = {
.release = single_release,
};
+#ifdef CONFIG_HIST_TRIGGERS_DEBUG
+static void hist_field_debug_show_flags(struct seq_file *m,
+ unsigned long flags)
+{
+ seq_puts(m, " flags:\n");
+
+ if (flags & HIST_FIELD_FL_KEY)
+ seq_puts(m, " HIST_FIELD_FL_KEY\n");
+ else if (flags & HIST_FIELD_FL_HITCOUNT)
+ seq_puts(m, " VAL: HIST_FIELD_FL_HITCOUNT\n");
+ else if (flags & HIST_FIELD_FL_VAR)
+ seq_puts(m, " HIST_FIELD_FL_VAR\n");
+ else if (flags & HIST_FIELD_FL_VAR_REF)
+ seq_puts(m, " HIST_FIELD_FL_VAR_REF\n");
+ else
+ seq_puts(m, " VAL: normal u64 value\n");
+
+ if (flags & HIST_FIELD_FL_ALIAS)
+ seq_puts(m, " HIST_FIELD_FL_ALIAS\n");
+}
+
+static int hist_field_debug_show(struct seq_file *m,
+ struct hist_field *field, unsigned long flags)
+{
+ if ((field->flags & flags) != flags) {
+ seq_printf(m, "ERROR: bad flags - %lx\n", flags);
+ return -EINVAL;
+ }
+
+ hist_field_debug_show_flags(m, field->flags);
+ if (field->field)
+ seq_printf(m, " ftrace_event_field name: %s\n",
+ field->field->name);
+
+ if (field->flags & HIST_FIELD_FL_VAR) {
+ seq_printf(m, " var.name: %s\n", field->var.name);
+ seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
+ field->var.idx);
+ }
+
+ if (field->flags & HIST_FIELD_FL_ALIAS)
+ seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n",
+ field->var_ref_idx);
+
+ if (field->flags & HIST_FIELD_FL_VAR_REF) {
+ seq_printf(m, " name: %s\n", field->name);
+ seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
+ field->var.idx);
+ seq_printf(m, " var.hist_data: %p\n", field->var.hist_data);
+ seq_printf(m, " var_ref_idx (into hist_data->var_refs[]): %u\n",
+ field->var_ref_idx);
+ if (field->system)
+ seq_printf(m, " system: %s\n", field->system);
+ if (field->event_name)
+ seq_printf(m, " event_name: %s\n", field->event_name);
+ }
+
+ seq_printf(m, " type: %s\n", field->type);
+ seq_printf(m, " size: %u\n", field->size);
+ seq_printf(m, " is_signed: %u\n", field->is_signed);
+
+ return 0;
+}
+
+static int field_var_debug_show(struct seq_file *m,
+ struct field_var *field_var, unsigned int i,
+ bool save_vars)
+{
+ const char *vars_name = save_vars ? "save_vars" : "field_vars";
+ struct hist_field *field;
+ int ret = 0;
+
+ seq_printf(m, "\n hist_data->%s[%d]:\n", vars_name, i);
+
+ field = field_var->var;
+
+ seq_printf(m, "\n %s[%d].var:\n", vars_name, i);
+
+ hist_field_debug_show_flags(m, field->flags);
+ seq_printf(m, " var.name: %s\n", field->var.name);
+ seq_printf(m, " var.idx (into tracing_map_elt.vars[]): %u\n",
+ field->var.idx);
+
+ field = field_var->val;
+
+ seq_printf(m, "\n %s[%d].val:\n", vars_name, i);
+ if (field->field)
+ seq_printf(m, " ftrace_event_field name: %s\n",
+ field->field->name);
+ else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ seq_printf(m, " type: %s\n", field->type);
+ seq_printf(m, " size: %u\n", field->size);
+ seq_printf(m, " is_signed: %u\n", field->is_signed);
+out:
+ return ret;
+}
+
+static int hist_action_debug_show(struct seq_file *m,
+ struct action_data *data, int i)
+{
+ int ret = 0;
+
+ if (data->handler == HANDLER_ONMAX ||
+ data->handler == HANDLER_ONCHANGE) {
+ seq_printf(m, "\n hist_data->actions[%d].track_data.var_ref:\n", i);
+ ret = hist_field_debug_show(m, data->track_data.var_ref,
+ HIST_FIELD_FL_VAR_REF);
+ if (ret)
+ goto out;
+
+ seq_printf(m, "\n hist_data->actions[%d].track_data.track_var:\n", i);
+ ret = hist_field_debug_show(m, data->track_data.track_var,
+ HIST_FIELD_FL_VAR);
+ if (ret)
+ goto out;
+ }
+
+ if (data->handler == HANDLER_ONMATCH) {
+ seq_printf(m, "\n hist_data->actions[%d].match_data.event_system: %s\n",
+ i, data->match_data.event_system);
+ seq_printf(m, " hist_data->actions[%d].match_data.event: %s\n",
+ i, data->match_data.event);
+ }
+out:
+ return ret;
+}
+
+static int hist_actions_debug_show(struct seq_file *m,
+ struct hist_trigger_data *hist_data)
+{
+ int i, ret = 0;
+
+ if (hist_data->n_actions)
+ seq_puts(m, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n");
+
+ for (i = 0; i < hist_data->n_actions; i++) {
+ struct action_data *action = hist_data->actions[i];
+
+ ret = hist_action_debug_show(m, action, i);
+ if (ret)
+ goto out;
+ }
+
+ if (hist_data->n_save_vars)
+ seq_puts(m, "\n save action variables (save() params):\n");
+
+ for (i = 0; i < hist_data->n_save_vars; i++) {
+ ret = field_var_debug_show(m, hist_data->save_vars[i], i, true);
+ if (ret)
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static void hist_trigger_debug_show(struct seq_file *m,
+ struct event_trigger_data *data, int n)
+{
+ struct hist_trigger_data *hist_data;
+ int i, ret;
+
+ if (n > 0)
+ seq_puts(m, "\n\n");
+
+ seq_puts(m, "# event histogram\n#\n# trigger info: ");
+ data->ops->print(m, data->ops, data);
+ seq_puts(m, "#\n\n");
+
+ hist_data = data->private_data;
+
+ seq_printf(m, "hist_data: %p\n\n", hist_data);
+ seq_printf(m, " n_vals: %u\n", hist_data->n_vals);
+ seq_printf(m, " n_keys: %u\n", hist_data->n_keys);
+ seq_printf(m, " n_fields: %u\n", hist_data->n_fields);
+
+ seq_puts(m, "\n val fields:\n\n");
+
+ seq_puts(m, " hist_data->fields[0]:\n");
+ ret = hist_field_debug_show(m, hist_data->fields[0],
+ HIST_FIELD_FL_HITCOUNT);
+ if (ret)
+ return;
+
+ for (i = 1; i < hist_data->n_vals; i++) {
+ seq_printf(m, "\n hist_data->fields[%d]:\n", i);
+ ret = hist_field_debug_show(m, hist_data->fields[i], 0);
+ if (ret)
+ return;
+ }
+
+ seq_puts(m, "\n key fields:\n");
+
+ for (i = hist_data->n_vals; i < hist_data->n_fields; i++) {
+ seq_printf(m, "\n hist_data->fields[%d]:\n", i);
+ ret = hist_field_debug_show(m, hist_data->fields[i],
+ HIST_FIELD_FL_KEY);
+ if (ret)
+ return;
+ }
+
+ if (hist_data->n_var_refs)
+ seq_puts(m, "\n variable reference fields:\n");
+
+ for (i = 0; i < hist_data->n_var_refs; i++) {
+ seq_printf(m, "\n hist_data->var_refs[%d]:\n", i);
+ ret = hist_field_debug_show(m, hist_data->var_refs[i],
+ HIST_FIELD_FL_VAR_REF);
+ if (ret)
+ return;
+ }
+
+ if (hist_data->n_field_vars)
+ seq_puts(m, "\n field variables:\n");
+
+ for (i = 0; i < hist_data->n_field_vars; i++) {
+ ret = field_var_debug_show(m, hist_data->field_vars[i], i, false);
+ if (ret)
+ return;
+ }
+
+ ret = hist_actions_debug_show(m, hist_data);
+ if (ret)
+ return;
+}
+
+static int hist_debug_show(struct seq_file *m, void *v)
+{
+ struct event_trigger_data *data;
+ struct trace_event_file *event_file;
+ int n = 0, ret = 0;
+
+ mutex_lock(&event_mutex);
+
+ event_file = event_file_data(m->private);
+ if (unlikely(!event_file)) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ list_for_each_entry(data, &event_file->triggers, list) {
+ if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
+ hist_trigger_debug_show(m, data, n++);
+ }
+
+ out_unlock:
+ mutex_unlock(&event_mutex);
+
+ return ret;
+}
+
+static int event_hist_debug_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
+ return single_open(file, hist_debug_show, file);
+}
+
+const struct file_operations event_hist_debug_fops = {
+ .open = event_hist_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
{
const char *field_name = hist_field_name(hist_field, 0);
@@ -7393,37 +5904,3 @@ __init int register_trigger_hist_enable_disable_cmds(void)
return ret;
}
-
-static __init int trace_events_hist_init(void)
-{
- struct dentry *entry = NULL;
- struct dentry *d_tracer;
- int err = 0;
-
- err = dyn_event_register(&synth_event_ops);
- if (err) {
- pr_warn("Could not register synth_event_ops\n");
- return err;
- }
-
- d_tracer = tracing_init_dentry();
- if (IS_ERR(d_tracer)) {
- err = PTR_ERR(d_tracer);
- goto err;
- }
-
- entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
- NULL, &synth_events_fops);
- if (!entry) {
- err = -ENODEV;
- goto err;
- }
-
- return err;
- err:
- pr_warn("Could not create tracefs 'synthetic_events' entry\n");
-
- return err;
-}
-
-fs_initcall(trace_events_hist_init);
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
new file mode 100644
index 000000000000..c6cca0d1d584
--- /dev/null
+++ b/kernel/trace/trace_events_synth.c
@@ -0,0 +1,1789 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * trace_events_synth - synthetic trace events
+ *
+ * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/security.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/rculist.h>
+#include <linux/tracefs.h>
+
+/* for gfp flag names */
+#include <linux/trace_events.h>
+#include <trace/events/mmflags.h>
+
+#include "trace_synth.h"
+
+static int create_synth_event(int argc, const char **argv);
+static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
+static int synth_event_release(struct dyn_event *ev);
+static bool synth_event_is_busy(struct dyn_event *ev);
+static bool synth_event_match(const char *system, const char *event,
+ int argc, const char **argv, struct dyn_event *ev);
+
+static struct dyn_event_operations synth_event_ops = {
+ .create = create_synth_event,
+ .show = synth_event_show,
+ .is_busy = synth_event_is_busy,
+ .free = synth_event_release,
+ .match = synth_event_match,
+};
+
+static bool is_synth_event(struct dyn_event *ev)
+{
+ return ev->ops == &synth_event_ops;
+}
+
+static struct synth_event *to_synth_event(struct dyn_event *ev)
+{
+ return container_of(ev, struct synth_event, devent);
+}
+
+static bool synth_event_is_busy(struct dyn_event *ev)
+{
+ struct synth_event *event = to_synth_event(ev);
+
+ return event->ref != 0;
+}
+
+static bool synth_event_match(const char *system, const char *event,
+ int argc, const char **argv, struct dyn_event *ev)
+{
+ struct synth_event *sev = to_synth_event(ev);
+
+ return strcmp(sev->name, event) == 0 &&
+ (!system || strcmp(system, SYNTH_SYSTEM) == 0);
+}
+
+struct synth_trace_event {
+ struct trace_entry ent;
+ u64 fields[];
+};
+
+static int synth_event_define_fields(struct trace_event_call *call)
+{
+ struct synth_trace_event trace;
+ int offset = offsetof(typeof(trace), fields);
+ struct synth_event *event = call->data;
+ unsigned int i, size, n_u64;
+ char *name, *type;
+ bool is_signed;
+ int ret = 0;
+
+ for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+ size = event->fields[i]->size;
+ is_signed = event->fields[i]->is_signed;
+ type = event->fields[i]->type;
+ name = event->fields[i]->name;
+ ret = trace_define_field(call, type, name, offset, size,
+ is_signed, FILTER_OTHER);
+ if (ret)
+ break;
+
+ event->fields[i]->offset = n_u64;
+
+ if (event->fields[i]->is_string) {
+ offset += STR_VAR_LEN_MAX;
+ n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ } else {
+ offset += sizeof(u64);
+ n_u64++;
+ }
+ }
+
+ event->n_u64 = n_u64;
+
+ return ret;
+}
+
+static bool synth_field_signed(char *type)
+{
+ if (str_has_prefix(type, "u"))
+ return false;
+ if (strcmp(type, "gfp_t") == 0)
+ return false;
+
+ return true;
+}
+
+static int synth_field_is_string(char *type)
+{
+ if (strstr(type, "char[") != NULL)
+ return true;
+
+ return false;
+}
+
+static int synth_field_string_size(char *type)
+{
+ char buf[4], *end, *start;
+ unsigned int len;
+ int size, err;
+
+ start = strstr(type, "char[");
+ if (start == NULL)
+ return -EINVAL;
+ start += sizeof("char[") - 1;
+
+ end = strchr(type, ']');
+ if (!end || end < start)
+ return -EINVAL;
+
+ len = end - start;
+ if (len > 3)
+ return -EINVAL;
+
+ strncpy(buf, start, len);
+ buf[len] = '\0';
+
+ err = kstrtouint(buf, 0, &size);
+ if (err)
+ return err;
+
+ if (size > STR_VAR_LEN_MAX)
+ return -EINVAL;
+
+ return size;
+}
+
+static int synth_field_size(char *type)
+{
+ int size = 0;
+
+ if (strcmp(type, "s64") == 0)
+ size = sizeof(s64);
+ else if (strcmp(type, "u64") == 0)
+ size = sizeof(u64);
+ else if (strcmp(type, "s32") == 0)
+ size = sizeof(s32);
+ else if (strcmp(type, "u32") == 0)
+ size = sizeof(u32);
+ else if (strcmp(type, "s16") == 0)
+ size = sizeof(s16);
+ else if (strcmp(type, "u16") == 0)
+ size = sizeof(u16);
+ else if (strcmp(type, "s8") == 0)
+ size = sizeof(s8);
+ else if (strcmp(type, "u8") == 0)
+ size = sizeof(u8);
+ else if (strcmp(type, "char") == 0)
+ size = sizeof(char);
+ else if (strcmp(type, "unsigned char") == 0)
+ size = sizeof(unsigned char);
+ else if (strcmp(type, "int") == 0)
+ size = sizeof(int);
+ else if (strcmp(type, "unsigned int") == 0)
+ size = sizeof(unsigned int);
+ else if (strcmp(type, "long") == 0)
+ size = sizeof(long);
+ else if (strcmp(type, "unsigned long") == 0)
+ size = sizeof(unsigned long);
+ else if (strcmp(type, "pid_t") == 0)
+ size = sizeof(pid_t);
+ else if (strcmp(type, "gfp_t") == 0)
+ size = sizeof(gfp_t);
+ else if (synth_field_is_string(type))
+ size = synth_field_string_size(type);
+
+ return size;
+}
+
+static const char *synth_field_fmt(char *type)
+{
+ const char *fmt = "%llu";
+
+ if (strcmp(type, "s64") == 0)
+ fmt = "%lld";
+ else if (strcmp(type, "u64") == 0)
+ fmt = "%llu";
+ else if (strcmp(type, "s32") == 0)
+ fmt = "%d";
+ else if (strcmp(type, "u32") == 0)
+ fmt = "%u";
+ else if (strcmp(type, "s16") == 0)
+ fmt = "%d";
+ else if (strcmp(type, "u16") == 0)
+ fmt = "%u";
+ else if (strcmp(type, "s8") == 0)
+ fmt = "%d";
+ else if (strcmp(type, "u8") == 0)
+ fmt = "%u";
+ else if (strcmp(type, "char") == 0)
+ fmt = "%d";
+ else if (strcmp(type, "unsigned char") == 0)
+ fmt = "%u";
+ else if (strcmp(type, "int") == 0)
+ fmt = "%d";
+ else if (strcmp(type, "unsigned int") == 0)
+ fmt = "%u";
+ else if (strcmp(type, "long") == 0)
+ fmt = "%ld";
+ else if (strcmp(type, "unsigned long") == 0)
+ fmt = "%lu";
+ else if (strcmp(type, "pid_t") == 0)
+ fmt = "%d";
+ else if (strcmp(type, "gfp_t") == 0)
+ fmt = "%x";
+ else if (synth_field_is_string(type))
+ fmt = "%s";
+
+ return fmt;
+}
+
+static void print_synth_event_num_val(struct trace_seq *s,
+ char *print_fmt, char *name,
+ int size, u64 val, char *space)
+{
+ switch (size) {
+ case 1:
+ trace_seq_printf(s, print_fmt, name, (u8)val, space);
+ break;
+
+ case 2:
+ trace_seq_printf(s, print_fmt, name, (u16)val, space);
+ break;
+
+ case 4:
+ trace_seq_printf(s, print_fmt, name, (u32)val, space);
+ break;
+
+ default:
+ trace_seq_printf(s, print_fmt, name, val, space);
+ break;
+ }
+}
+
+static enum print_line_t print_synth_event(struct trace_iterator *iter,
+ int flags,
+ struct trace_event *event)
+{
+ struct trace_array *tr = iter->tr;
+ struct trace_seq *s = &iter->seq;
+ struct synth_trace_event *entry;
+ struct synth_event *se;
+ unsigned int i, n_u64;
+ char print_fmt[32];
+ const char *fmt;
+
+ entry = (struct synth_trace_event *)iter->ent;
+ se = container_of(event, struct synth_event, call.event);
+
+ trace_seq_printf(s, "%s: ", se->name);
+
+ for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
+ if (trace_seq_has_overflowed(s))
+ goto end;
+
+ fmt = synth_field_fmt(se->fields[i]->type);
+
+ /* parameter types */
+ if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
+ trace_seq_printf(s, "%s ", fmt);
+
+ snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
+
+ /* parameter values */
+ if (se->fields[i]->is_string) {
+ trace_seq_printf(s, print_fmt, se->fields[i]->name,
+ (char *)&entry->fields[n_u64],
+ i == se->n_fields - 1 ? "" : " ");
+ n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ } else {
+ struct trace_print_flags __flags[] = {
+ __def_gfpflag_names, {-1, NULL} };
+ char *space = (i == se->n_fields - 1 ? "" : " ");
+
+ print_synth_event_num_val(s, print_fmt,
+ se->fields[i]->name,
+ se->fields[i]->size,
+ entry->fields[n_u64],
+ space);
+
+ if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
+ trace_seq_puts(s, " (");
+ trace_print_flags_seq(s, "|",
+ entry->fields[n_u64],
+ __flags);
+ trace_seq_putc(s, ')');
+ }
+ n_u64++;
+ }
+ }
+end:
+ trace_seq_putc(s, '\n');
+
+ return trace_handle_return(s);
+}
+
+static struct trace_event_functions synth_event_funcs = {
+ .trace = print_synth_event
+};
+
+static notrace void trace_event_raw_event_synth(void *__data,
+ u64 *var_ref_vals,
+ unsigned int *var_ref_idx)
+{
+ struct trace_event_file *trace_file = __data;
+ struct synth_trace_event *entry;
+ struct trace_event_buffer fbuffer;
+ struct trace_buffer *buffer;
+ struct synth_event *event;
+ unsigned int i, n_u64, val_idx;
+ int fields_size = 0;
+
+ event = trace_file->event_call->data;
+
+ if (trace_trigger_soft_disabled(trace_file))
+ return;
+
+ fields_size = event->n_u64 * sizeof(u64);
+
+ /*
+ * Avoid ring buffer recursion detection, as this event
+ * is being performed within another event.
+ */
+ buffer = trace_file->tr->array_buffer.buffer;
+ ring_buffer_nest_start(buffer);
+
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+ sizeof(*entry) + fields_size);
+ if (!entry)
+ goto out;
+
+ for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+ val_idx = var_ref_idx[i];
+ if (event->fields[i]->is_string) {
+ char *str_val = (char *)(long)var_ref_vals[val_idx];
+ char *str_field = (char *)&entry->fields[n_u64];
+
+ strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+ n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ } else {
+ struct synth_field *field = event->fields[i];
+ u64 val = var_ref_vals[val_idx];
+
+ switch (field->size) {
+ case 1:
+ *(u8 *)&entry->fields[n_u64] = (u8)val;
+ break;
+
+ case 2:
+ *(u16 *)&entry->fields[n_u64] = (u16)val;
+ break;
+
+ case 4:
+ *(u32 *)&entry->fields[n_u64] = (u32)val;
+ break;
+
+ default:
+ entry->fields[n_u64] = val;
+ break;
+ }
+ n_u64++;
+ }
+ }
+
+ trace_event_buffer_commit(&fbuffer);
+out:
+ ring_buffer_nest_end(buffer);
+}
+
+static void free_synth_event_print_fmt(struct trace_event_call *call)
+{
+ if (call) {
+ kfree(call->print_fmt);
+ call->print_fmt = NULL;
+ }
+}
+
+static int __set_synth_event_print_fmt(struct synth_event *event,
+ char *buf, int len)
+{
+ const char *fmt;
+ int pos = 0;
+ int i;
+
+ /* When len=0, we just calculate the needed length */
+#define LEN_OR_ZERO (len ? len - pos : 0)
+
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
+ for (i = 0; i < event->n_fields; i++) {
+ fmt = synth_field_fmt(event->fields[i]->type);
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
+ event->fields[i]->name, fmt,
+ i == event->n_fields - 1 ? "" : ", ");
+ }
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
+
+ for (i = 0; i < event->n_fields; i++) {
+ pos += snprintf(buf + pos, LEN_OR_ZERO,
+ ", REC->%s", event->fields[i]->name);
+ }
+
+#undef LEN_OR_ZERO
+
+ /* return the length of print_fmt */
+ return pos;
+}
+
+static int set_synth_event_print_fmt(struct trace_event_call *call)
+{
+ struct synth_event *event = call->data;
+ char *print_fmt;
+ int len;
+
+ /* First: called with 0 length to calculate the needed length */
+ len = __set_synth_event_print_fmt(event, NULL, 0);
+
+ print_fmt = kmalloc(len + 1, GFP_KERNEL);
+ if (!print_fmt)
+ return -ENOMEM;
+
+ /* Second: actually write the @print_fmt */
+ __set_synth_event_print_fmt(event, print_fmt, len + 1);
+ call->print_fmt = print_fmt;
+
+ return 0;
+}
+
+static void free_synth_field(struct synth_field *field)
+{
+ kfree(field->type);
+ kfree(field->name);
+ kfree(field);
+}
+
+static struct synth_field *parse_synth_field(int argc, const char **argv,
+ int *consumed)
+{
+ struct synth_field *field;
+ const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
+ int len, ret = 0;
+
+ if (field_type[0] == ';')
+ field_type++;
+
+ if (!strcmp(field_type, "unsigned")) {
+ if (argc < 3)
+ return ERR_PTR(-EINVAL);
+ prefix = "unsigned ";
+ field_type = argv[1];
+ field_name = argv[2];
+ *consumed = 3;
+ } else {
+ field_name = argv[1];
+ *consumed = 2;
+ }
+
+ field = kzalloc(sizeof(*field), GFP_KERNEL);
+ if (!field)
+ return ERR_PTR(-ENOMEM);
+
+ len = strlen(field_name);
+ array = strchr(field_name, '[');
+ if (array)
+ len -= strlen(array);
+ else if (field_name[len - 1] == ';')
+ len--;
+
+ field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
+ if (!field->name) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ if (field_type[0] == ';')
+ field_type++;
+ len = strlen(field_type) + 1;
+ if (array)
+ len += strlen(array);
+ if (prefix)
+ len += strlen(prefix);
+
+ field->type = kzalloc(len, GFP_KERNEL);
+ if (!field->type) {
+ ret = -ENOMEM;
+ goto free;
+ }
+ if (prefix)
+ strcat(field->type, prefix);
+ strcat(field->type, field_type);
+ if (array) {
+ strcat(field->type, array);
+ if (field->type[len - 1] == ';')
+ field->type[len - 1] = '\0';
+ }
+
+ field->size = synth_field_size(field->type);
+ if (!field->size) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ if (synth_field_is_string(field->type))
+ field->is_string = true;
+
+ field->is_signed = synth_field_signed(field->type);
+
+ out:
+ return field;
+ free:
+ free_synth_field(field);
+ field = ERR_PTR(ret);
+ goto out;
+}
+
+static void free_synth_tracepoint(struct tracepoint *tp)
+{
+ if (!tp)
+ return;
+
+ kfree(tp->name);
+ kfree(tp);
+}
+
+static struct tracepoint *alloc_synth_tracepoint(char *name)
+{
+ struct tracepoint *tp;
+
+ tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+ if (!tp)
+ return ERR_PTR(-ENOMEM);
+
+ tp->name = kstrdup(name, GFP_KERNEL);
+ if (!tp->name) {
+ kfree(tp);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return tp;
+}
+
+struct synth_event *find_synth_event(const char *name)
+{
+ struct dyn_event *pos;
+ struct synth_event *event;
+
+ for_each_dyn_event(pos) {
+ if (!is_synth_event(pos))
+ continue;
+ event = to_synth_event(pos);
+ if (strcmp(event->name, name) == 0)
+ return event;
+ }
+
+ return NULL;
+}
+
+static struct trace_event_fields synth_event_fields_array[] = {
+ { .type = TRACE_FUNCTION_TYPE,
+ .define_fields = synth_event_define_fields },
+ {}
+};
+
+static int register_synth_event(struct synth_event *event)
+{
+ struct trace_event_call *call = &event->call;
+ int ret = 0;
+
+ event->call.class = &event->class;
+ event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
+ if (!event->class.system) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ event->tp = alloc_synth_tracepoint(event->name);
+ if (IS_ERR(event->tp)) {
+ ret = PTR_ERR(event->tp);
+ event->tp = NULL;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&call->class->fields);
+ call->event.funcs = &synth_event_funcs;
+ call->class->fields_array = synth_event_fields_array;
+
+ ret = register_trace_event(&call->event);
+ if (!ret) {
+ ret = -ENODEV;
+ goto out;
+ }
+ call->flags = TRACE_EVENT_FL_TRACEPOINT;
+ call->class->reg = trace_event_reg;
+ call->class->probe = trace_event_raw_event_synth;
+ call->data = event;
+ call->tp = event->tp;
+
+ ret = trace_add_event_call(call);
+ if (ret) {
+ pr_warn("Failed to register synthetic event: %s\n",
+ trace_event_name(call));
+ goto err;
+ }
+
+ ret = set_synth_event_print_fmt(call);
+ if (ret < 0) {
+ trace_remove_event_call(call);
+ goto err;
+ }
+ out:
+ return ret;
+ err:
+ unregister_trace_event(&call->event);
+ goto out;
+}
+
+static int unregister_synth_event(struct synth_event *event)
+{
+ struct trace_event_call *call = &event->call;
+ int ret;
+
+ ret = trace_remove_event_call(call);
+
+ return ret;
+}
+
+static void free_synth_event(struct synth_event *event)
+{
+ unsigned int i;
+
+ if (!event)
+ return;
+
+ for (i = 0; i < event->n_fields; i++)
+ free_synth_field(event->fields[i]);
+
+ kfree(event->fields);
+ kfree(event->name);
+ kfree(event->class.system);
+ free_synth_tracepoint(event->tp);
+ free_synth_event_print_fmt(&event->call);
+ kfree(event);
+}
+
+static struct synth_event *alloc_synth_event(const char *name, int n_fields,
+ struct synth_field **fields)
+{
+ struct synth_event *event;
+ unsigned int i;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event) {
+ event = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ event->name = kstrdup(name, GFP_KERNEL);
+ if (!event->name) {
+ kfree(event);
+ event = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
+ if (!event->fields) {
+ free_synth_event(event);
+ event = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ dyn_event_init(&event->devent, &synth_event_ops);
+
+ for (i = 0; i < n_fields; i++)
+ event->fields[i] = fields[i];
+
+ event->n_fields = n_fields;
+ out:
+ return event;
+}
+
+static int synth_event_check_arg_fn(void *data)
+{
+ struct dynevent_arg_pair *arg_pair = data;
+ int size;
+
+ size = synth_field_size((char *)arg_pair->lhs);
+
+ return size ? 0 : -EINVAL;
+}
+
+/**
+ * synth_event_add_field - Add a new field to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @type: The type of the new field to add
+ * @name: The name of the new field to add
+ *
+ * Add a new field to a synthetic event cmd object. Field ordering is in
+ * the same order the fields are added.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
+ const char *name)
+{
+ struct dynevent_arg_pair arg_pair;
+ int ret;
+
+ if (cmd->type != DYNEVENT_TYPE_SYNTH)
+ return -EINVAL;
+
+ if (!type || !name)
+ return -EINVAL;
+
+ dynevent_arg_pair_init(&arg_pair, 0, ';');
+
+ arg_pair.lhs = type;
+ arg_pair.rhs = name;
+
+ ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
+ if (ret)
+ return ret;
+
+ if (++cmd->n_fields > SYNTH_FIELDS_MAX)
+ ret = -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_field);
+
+/**
+ * synth_event_add_field_str - Add a new field to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @type_name: The type and name of the new field to add, as a single string
+ *
+ * Add a new field to a synthetic event cmd object, as a single
+ * string. The @type_name string is expected to be of the form 'type
+ * name', which will be appended by ';'. No sanity checking is done -
+ * what's passed in is assumed to already be well-formed. Field
+ * ordering is in the same order the fields are added.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
+{
+ struct dynevent_arg arg;
+ int ret;
+
+ if (cmd->type != DYNEVENT_TYPE_SYNTH)
+ return -EINVAL;
+
+ if (!type_name)
+ return -EINVAL;
+
+ dynevent_arg_init(&arg, ';');
+
+ arg.str = type_name;
+
+ ret = dynevent_arg_add(cmd, &arg, NULL);
+ if (ret)
+ return ret;
+
+ if (++cmd->n_fields > SYNTH_FIELDS_MAX)
+ ret = -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_field_str);
+
+/**
+ * synth_event_add_fields - Add multiple fields to a synthetic event cmd
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ *
+ * Add a new set of fields to a synthetic event cmd object. The event
+ * fields that will be defined for the event should be passed in as an
+ * array of struct synth_field_desc, and the number of elements in the
+ * array passed in as n_fields. Field ordering will retain the
+ * ordering given in the fields array.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_add_fields(struct dynevent_cmd *cmd,
+ struct synth_field_desc *fields,
+ unsigned int n_fields)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < n_fields; i++) {
+ if (fields[i].type == NULL || fields[i].name == NULL) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_add_fields);
+
+/**
+ * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @name: The name of the synthetic event
+ * @mod: The module creating the event, NULL if not created from a module
+ * @args: Variable number of arg (pairs), one pair for each field
+ *
+ * NOTE: Users normally won't want to call this function directly, but
+ * rather use the synth_event_gen_cmd_start() wrapper, which
+ * automatically adds a NULL to the end of the arg list. If this
+ * function is used directly, make sure the last arg in the variable
+ * arg list is NULL.
+ *
+ * Generate a synthetic event command to be executed by
+ * synth_event_gen_cmd_end(). This function can be used to generate
+ * the complete command or only the first part of it; in the latter
+ * case, synth_event_add_field(), synth_event_add_field_str(), or
+ * synth_event_add_fields() can be used to add more fields following
+ * this.
+ *
+ * There should be an even number variable args, each pair consisting
+ * of a type followed by a field name.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
+ struct module *mod, ...)
+{
+ struct dynevent_arg arg;
+ va_list args;
+ int ret;
+
+ cmd->event_name = name;
+ cmd->private_data = mod;
+
+ if (cmd->type != DYNEVENT_TYPE_SYNTH)
+ return -EINVAL;
+
+ dynevent_arg_init(&arg, 0);
+ arg.str = name;
+ ret = dynevent_arg_add(cmd, &arg, NULL);
+ if (ret)
+ return ret;
+
+ va_start(args, mod);
+ for (;;) {
+ const char *type, *name;
+
+ type = va_arg(args, const char *);
+ if (!type)
+ break;
+ name = va_arg(args, const char *);
+ if (!name)
+ break;
+
+ if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = synth_event_add_field(cmd, type, name);
+ if (ret)
+ break;
+ }
+ va_end(args);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
+
+/**
+ * synth_event_gen_cmd_array_start - Start synthetic event command from an array
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @name: The name of the synthetic event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ *
+ * Generate a synthetic event command to be executed by
+ * synth_event_gen_cmd_end(). This function can be used to generate
+ * the complete command or only the first part of it; in the latter
+ * case, synth_event_add_field(), synth_event_add_field_str(), or
+ * synth_event_add_fields() can be used to add more fields following
+ * this.
+ *
+ * The event fields that will be defined for the event should be
+ * passed in as an array of struct synth_field_desc, and the number of
+ * elements in the array passed in as n_fields. Field ordering will
+ * retain the ordering given in the fields array.
+ *
+ * See synth_field_size() for available types. If field_name contains
+ * [n] the field is considered to be an array.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
+ struct module *mod,
+ struct synth_field_desc *fields,
+ unsigned int n_fields)
+{
+ struct dynevent_arg arg;
+ unsigned int i;
+ int ret = 0;
+
+ cmd->event_name = name;
+ cmd->private_data = mod;
+
+ if (cmd->type != DYNEVENT_TYPE_SYNTH)
+ return -EINVAL;
+
+ if (n_fields > SYNTH_FIELDS_MAX)
+ return -EINVAL;
+
+ dynevent_arg_init(&arg, 0);
+ arg.str = name;
+ ret = dynevent_arg_add(cmd, &arg, NULL);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < n_fields; i++) {
+ if (fields[i].type == NULL || fields[i].name == NULL)
+ return -EINVAL;
+
+ ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
+
+static int __create_synth_event(int argc, const char *name, const char **argv)
+{
+ struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
+ struct synth_event *event = NULL;
+ int i, consumed = 0, n_fields = 0, ret = 0;
+
+ /*
+ * Argument syntax:
+ * - Add synthetic event: <event_name> field[;field] ...
+ * - Remove synthetic event: !<event_name> field[;field] ...
+ * where 'field' = type field_name
+ */
+
+ if (name[0] == '\0' || argc < 1)
+ return -EINVAL;
+
+ mutex_lock(&event_mutex);
+
+ event = find_synth_event(name);
+ if (event) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ for (i = 0; i < argc - 1; i++) {
+ if (strcmp(argv[i], ";") == 0)
+ continue;
+ if (n_fields == SYNTH_FIELDS_MAX) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ field = parse_synth_field(argc - i, &argv[i], &consumed);
+ if (IS_ERR(field)) {
+ ret = PTR_ERR(field);
+ goto err;
+ }
+ fields[n_fields++] = field;
+ i += consumed - 1;
+ }
+
+ if (i < argc && strcmp(argv[i], ";") != 0) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ event = alloc_synth_event(name, n_fields, fields);
+ if (IS_ERR(event)) {
+ ret = PTR_ERR(event);
+ event = NULL;
+ goto err;
+ }
+ ret = register_synth_event(event);
+ if (!ret)
+ dyn_event_add(&event->devent);
+ else
+ free_synth_event(event);
+ out:
+ mutex_unlock(&event_mutex);
+
+ return ret;
+ err:
+ for (i = 0; i < n_fields; i++)
+ free_synth_field(fields[i]);
+
+ goto out;
+}
+
+/**
+ * synth_event_create - Create a new synthetic event
+ * @name: The name of the new sythetic event
+ * @fields: An array of type/name field descriptions
+ * @n_fields: The number of field descriptions contained in the fields array
+ * @mod: The module creating the event, NULL if not created from a module
+ *
+ * Create a new synthetic event with the given name under the
+ * trace/events/synthetic/ directory. The event fields that will be
+ * defined for the event should be passed in as an array of struct
+ * synth_field_desc, and the number elements in the array passed in as
+ * n_fields. Field ordering will retain the ordering given in the
+ * fields array.
+ *
+ * If the new synthetic event is being created from a module, the mod
+ * param must be non-NULL. This will ensure that the trace buffer
+ * won't contain unreadable events.
+ *
+ * The new synth event should be deleted using synth_event_delete()
+ * function. The new synthetic event can be generated from modules or
+ * other kernel code using trace_synth_event() and related functions.
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_create(const char *name, struct synth_field_desc *fields,
+ unsigned int n_fields, struct module *mod)
+{
+ struct dynevent_cmd cmd;
+ char *buf;
+ int ret;
+
+ buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
+
+ ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
+ fields, n_fields);
+ if (ret)
+ goto out;
+
+ ret = synth_event_gen_cmd_end(&cmd);
+ out:
+ kfree(buf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_create);
+
+static int destroy_synth_event(struct synth_event *se)
+{
+ int ret;
+
+ if (se->ref)
+ ret = -EBUSY;
+ else {
+ ret = unregister_synth_event(se);
+ if (!ret) {
+ dyn_event_remove(&se->devent);
+ free_synth_event(se);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * synth_event_delete - Delete a synthetic event
+ * @event_name: The name of the new sythetic event
+ *
+ * Delete a synthetic event that was created with synth_event_create().
+ *
+ * Return: 0 if successful, error otherwise.
+ */
+int synth_event_delete(const char *event_name)
+{
+ struct synth_event *se = NULL;
+ struct module *mod = NULL;
+ int ret = -ENOENT;
+
+ mutex_lock(&event_mutex);
+ se = find_synth_event(event_name);
+ if (se) {
+ mod = se->mod;
+ ret = destroy_synth_event(se);
+ }
+ mutex_unlock(&event_mutex);
+
+ if (mod) {
+ mutex_lock(&trace_types_lock);
+ /*
+ * It is safest to reset the ring buffer if the module
+ * being unloaded registered any events that were
+ * used. The only worry is if a new module gets
+ * loaded, and takes on the same id as the events of
+ * this module. When printing out the buffer, traced
+ * events left over from this module may be passed to
+ * the new module events and unexpected results may
+ * occur.
+ */
+ tracing_reset_all_online_cpus();
+ mutex_unlock(&trace_types_lock);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_delete);
+
+static int create_or_delete_synth_event(int argc, char **argv)
+{
+ const char *name = argv[0];
+ int ret;
+
+ /* trace_run_command() ensures argc != 0 */
+ if (name[0] == '!') {
+ ret = synth_event_delete(name + 1);
+ return ret;
+ }
+
+ ret = __create_synth_event(argc - 1, name, (const char **)argv + 1);
+ return ret == -ECANCELED ? -EINVAL : ret;
+}
+
+static int synth_event_run_command(struct dynevent_cmd *cmd)
+{
+ struct synth_event *se;
+ int ret;
+
+ ret = trace_run_command(cmd->seq.buffer, create_or_delete_synth_event);
+ if (ret)
+ return ret;
+
+ se = find_synth_event(cmd->event_name);
+ if (WARN_ON(!se))
+ return -ENOENT;
+
+ se->mod = cmd->private_data;
+
+ return ret;
+}
+
+/**
+ * synth_event_cmd_init - Initialize a synthetic event command object
+ * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @buf: A pointer to the buffer used to build the command
+ * @maxlen: The length of the buffer passed in @buf
+ *
+ * Initialize a synthetic event command object. Use this before
+ * calling any of the other dyenvent_cmd functions.
+ */
+void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
+{
+ dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
+ synth_event_run_command);
+}
+EXPORT_SYMBOL_GPL(synth_event_cmd_init);
+
+static inline int
+__synth_event_trace_start(struct trace_event_file *file,
+ struct synth_event_trace_state *trace_state)
+{
+ int entry_size, fields_size = 0;
+ int ret = 0;
+
+ memset(trace_state, '\0', sizeof(*trace_state));
+
+ /*
+ * Normal event tracing doesn't get called at all unless the
+ * ENABLED bit is set (which attaches the probe thus allowing
+ * this code to be called, etc). Because this is called
+ * directly by the user, we don't have that but we still need
+ * to honor not logging when disabled. For the the iterated
+ * trace case, we save the enabed state upon start and just
+ * ignore the following data calls.
+ */
+ if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
+ trace_trigger_soft_disabled(file)) {
+ trace_state->disabled = true;
+ ret = -ENOENT;
+ goto out;
+ }
+
+ trace_state->event = file->event_call->data;
+
+ fields_size = trace_state->event->n_u64 * sizeof(u64);
+
+ /*
+ * Avoid ring buffer recursion detection, as this event
+ * is being performed within another event.
+ */
+ trace_state->buffer = file->tr->array_buffer.buffer;
+ ring_buffer_nest_start(trace_state->buffer);
+
+ entry_size = sizeof(*trace_state->entry) + fields_size;
+ trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
+ file,
+ entry_size);
+ if (!trace_state->entry) {
+ ring_buffer_nest_end(trace_state->buffer);
+ ret = -EINVAL;
+ }
+out:
+ return ret;
+}
+
+static inline void
+__synth_event_trace_end(struct synth_event_trace_state *trace_state)
+{
+ trace_event_buffer_commit(&trace_state->fbuffer);
+
+ ring_buffer_nest_end(trace_state->buffer);
+}
+
+/**
+ * synth_event_trace - Trace a synthetic event
+ * @file: The trace_event_file representing the synthetic event
+ * @n_vals: The number of values in vals
+ * @args: Variable number of args containing the event values
+ *
+ * Trace a synthetic event using the values passed in the variable
+ * argument list.
+ *
+ * The argument list should be a list 'n_vals' u64 values. The number
+ * of vals must match the number of field in the synthetic event, and
+ * must be in the same order as the synthetic event fields.
+ *
+ * All vals should be cast to u64, and string vals are just pointers
+ * to strings, cast to u64. Strings will be copied into space
+ * reserved in the event for the string, using these pointers.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
+{
+ struct synth_event_trace_state state;
+ unsigned int i, n_u64;
+ va_list args;
+ int ret;
+
+ ret = __synth_event_trace_start(file, &state);
+ if (ret) {
+ if (ret == -ENOENT)
+ ret = 0; /* just disabled, not really an error */
+ return ret;
+ }
+
+ if (n_vals != state.event->n_fields) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ va_start(args, n_vals);
+ for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
+ u64 val;
+
+ val = va_arg(args, u64);
+
+ if (state.event->fields[i]->is_string) {
+ char *str_val = (char *)(long)val;
+ char *str_field = (char *)&state.entry->fields[n_u64];
+
+ strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+ n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ } else {
+ struct synth_field *field = state.event->fields[i];
+
+ switch (field->size) {
+ case 1:
+ *(u8 *)&state.entry->fields[n_u64] = (u8)val;
+ break;
+
+ case 2:
+ *(u16 *)&state.entry->fields[n_u64] = (u16)val;
+ break;
+
+ case 4:
+ *(u32 *)&state.entry->fields[n_u64] = (u32)val;
+ break;
+
+ default:
+ state.entry->fields[n_u64] = val;
+ break;
+ }
+ n_u64++;
+ }
+ }
+ va_end(args);
+out:
+ __synth_event_trace_end(&state);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace);
+
+/**
+ * synth_event_trace_array - Trace a synthetic event from an array
+ * @file: The trace_event_file representing the synthetic event
+ * @vals: Array of values
+ * @n_vals: The number of values in vals
+ *
+ * Trace a synthetic event using the values passed in as 'vals'.
+ *
+ * The 'vals' array is just an array of 'n_vals' u64. The number of
+ * vals must match the number of field in the synthetic event, and
+ * must be in the same order as the synthetic event fields.
+ *
+ * All vals should be cast to u64, and string vals are just pointers
+ * to strings, cast to u64. Strings will be copied into space
+ * reserved in the event for the string, using these pointers.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
+ unsigned int n_vals)
+{
+ struct synth_event_trace_state state;
+ unsigned int i, n_u64;
+ int ret;
+
+ ret = __synth_event_trace_start(file, &state);
+ if (ret) {
+ if (ret == -ENOENT)
+ ret = 0; /* just disabled, not really an error */
+ return ret;
+ }
+
+ if (n_vals != state.event->n_fields) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
+ if (state.event->fields[i]->is_string) {
+ char *str_val = (char *)(long)vals[i];
+ char *str_field = (char *)&state.entry->fields[n_u64];
+
+ strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+ n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+ } else {
+ struct synth_field *field = state.event->fields[i];
+ u64 val = vals[i];
+
+ switch (field->size) {
+ case 1:
+ *(u8 *)&state.entry->fields[n_u64] = (u8)val;
+ break;
+
+ case 2:
+ *(u16 *)&state.entry->fields[n_u64] = (u16)val;
+ break;
+
+ case 4:
+ *(u32 *)&state.entry->fields[n_u64] = (u32)val;
+ break;
+
+ default:
+ state.entry->fields[n_u64] = val;
+ break;
+ }
+ n_u64++;
+ }
+ }
+out:
+ __synth_event_trace_end(&state);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_array);
+
+/**
+ * synth_event_trace_start - Start piecewise synthetic event trace
+ * @file: The trace_event_file representing the synthetic event
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Start the trace of a synthetic event field-by-field rather than all
+ * at once.
+ *
+ * This function 'opens' an event trace, which means space is reserved
+ * for the event in the trace buffer, after which the event's
+ * individual field values can be set through either
+ * synth_event_add_next_val() or synth_event_add_val().
+ *
+ * A pointer to a trace_state object is passed in, which will keep
+ * track of the current event trace state until the event trace is
+ * closed (and the event finally traced) using
+ * synth_event_trace_end().
+ *
+ * Note that synth_event_trace_end() must be called after all values
+ * have been added for each event trace, regardless of whether adding
+ * all field values succeeded or not.
+ *
+ * Note also that for a given event trace, all fields must be added
+ * using either synth_event_add_next_val() or synth_event_add_val()
+ * but not both together or interleaved.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_start(struct trace_event_file *file,
+ struct synth_event_trace_state *trace_state)
+{
+ int ret;
+
+ if (!trace_state)
+ return -EINVAL;
+
+ ret = __synth_event_trace_start(file, trace_state);
+ if (ret == -ENOENT)
+ ret = 0; /* just disabled, not really an error */
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_start);
+
+static int __synth_event_add_val(const char *field_name, u64 val,
+ struct synth_event_trace_state *trace_state)
+{
+ struct synth_field *field = NULL;
+ struct synth_trace_event *entry;
+ struct synth_event *event;
+ int i, ret = 0;
+
+ if (!trace_state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* can't mix add_next_synth_val() with add_synth_val() */
+ if (field_name) {
+ if (trace_state->add_next) {
+ ret = -EINVAL;
+ goto out;
+ }
+ trace_state->add_name = true;
+ } else {
+ if (trace_state->add_name) {
+ ret = -EINVAL;
+ goto out;
+ }
+ trace_state->add_next = true;
+ }
+
+ if (trace_state->disabled)
+ goto out;
+
+ event = trace_state->event;
+ if (trace_state->add_name) {
+ for (i = 0; i < event->n_fields; i++) {
+ field = event->fields[i];
+ if (strcmp(field->name, field_name) == 0)
+ break;
+ }
+ if (!field) {
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ if (trace_state->cur_field >= event->n_fields) {
+ ret = -EINVAL;
+ goto out;
+ }
+ field = event->fields[trace_state->cur_field++];
+ }
+
+ entry = trace_state->entry;
+ if (field->is_string) {
+ char *str_val = (char *)(long)val;
+ char *str_field;
+
+ if (!str_val) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ str_field = (char *)&entry->fields[field->offset];
+ strscpy(str_field, str_val, STR_VAR_LEN_MAX);
+ } else {
+ switch (field->size) {
+ case 1:
+ *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
+ break;
+
+ case 2:
+ *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
+ break;
+
+ case 4:
+ *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
+ break;
+
+ default:
+ trace_state->entry->fields[field->offset] = val;
+ break;
+ }
+ }
+ out:
+ return ret;
+}
+
+/**
+ * synth_event_add_next_val - Add the next field's value to an open synth trace
+ * @val: The value to set the next field to
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Set the value of the next field in an event that's been opened by
+ * synth_event_trace_start().
+ *
+ * The val param should be the value cast to u64. If the value points
+ * to a string, the val param should be a char * cast to u64.
+ *
+ * This function assumes all the fields in an event are to be set one
+ * after another - successive calls to this function are made, one for
+ * each field, in the order of the fields in the event, until all
+ * fields have been set. If you'd rather set each field individually
+ * without regard to ordering, synth_event_add_val() can be used
+ * instead.
+ *
+ * Note however that synth_event_add_next_val() and
+ * synth_event_add_val() can't be intermixed for a given event trace -
+ * one or the other but not both can be used at the same time.
+ *
+ * Note also that synth_event_trace_end() must be called after all
+ * values have been added for each event trace, regardless of whether
+ * adding all field values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_add_next_val(u64 val,
+ struct synth_event_trace_state *trace_state)
+{
+ return __synth_event_add_val(NULL, val, trace_state);
+}
+EXPORT_SYMBOL_GPL(synth_event_add_next_val);
+
+/**
+ * synth_event_add_val - Add a named field's value to an open synth trace
+ * @field_name: The name of the synthetic event field value to set
+ * @val: The value to set the next field to
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * Set the value of the named field in an event that's been opened by
+ * synth_event_trace_start().
+ *
+ * The val param should be the value cast to u64. If the value points
+ * to a string, the val param should be a char * cast to u64.
+ *
+ * This function looks up the field name, and if found, sets the field
+ * to the specified value. This lookup makes this function more
+ * expensive than synth_event_add_next_val(), so use that or the
+ * none-piecewise synth_event_trace() instead if efficiency is more
+ * important.
+ *
+ * Note however that synth_event_add_next_val() and
+ * synth_event_add_val() can't be intermixed for a given event trace -
+ * one or the other but not both can be used at the same time.
+ *
+ * Note also that synth_event_trace_end() must be called after all
+ * values have been added for each event trace, regardless of whether
+ * adding all field values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_add_val(const char *field_name, u64 val,
+ struct synth_event_trace_state *trace_state)
+{
+ return __synth_event_add_val(field_name, val, trace_state);
+}
+EXPORT_SYMBOL_GPL(synth_event_add_val);
+
+/**
+ * synth_event_trace_end - End piecewise synthetic event trace
+ * @trace_state: A pointer to object tracking the piecewise trace state
+ *
+ * End the trace of a synthetic event opened by
+ * synth_event_trace__start().
+ *
+ * This function 'closes' an event trace, which basically means that
+ * it commits the reserved event and cleans up other loose ends.
+ *
+ * A pointer to a trace_state object is passed in, which will keep
+ * track of the current event trace state opened with
+ * synth_event_trace_start().
+ *
+ * Note that this function must be called after all values have been
+ * added for each event trace, regardless of whether adding all field
+ * values succeeded or not.
+ *
+ * Return: 0 on success, err otherwise.
+ */
+int synth_event_trace_end(struct synth_event_trace_state *trace_state)
+{
+ if (!trace_state)
+ return -EINVAL;
+
+ __synth_event_trace_end(trace_state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(synth_event_trace_end);
+
+static int create_synth_event(int argc, const char **argv)
+{
+ const char *name = argv[0];
+ int len;
+
+ if (name[0] != 's' || name[1] != ':')
+ return -ECANCELED;
+ name += 2;
+
+ /* This interface accepts group name prefix */
+ if (strchr(name, '/')) {
+ len = str_has_prefix(name, SYNTH_SYSTEM "/");
+ if (len == 0)
+ return -EINVAL;
+ name += len;
+ }
+ return __create_synth_event(argc - 1, name, argv + 1);
+}
+
+static int synth_event_release(struct dyn_event *ev)
+{
+ struct synth_event *event = to_synth_event(ev);
+ int ret;
+
+ if (event->ref)
+ return -EBUSY;
+
+ ret = unregister_synth_event(event);
+ if (ret)
+ return ret;
+
+ dyn_event_remove(ev);
+ free_synth_event(event);
+ return 0;
+}
+
+static int __synth_event_show(struct seq_file *m, struct synth_event *event)
+{
+ struct synth_field *field;
+ unsigned int i;
+
+ seq_printf(m, "%s\t", event->name);
+
+ for (i = 0; i < event->n_fields; i++) {
+ field = event->fields[i];
+
+ /* parameter values */
+ seq_printf(m, "%s %s%s", field->type, field->name,
+ i == event->n_fields - 1 ? "" : "; ");
+ }
+
+ seq_putc(m, '\n');
+
+ return 0;
+}
+
+static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
+{
+ struct synth_event *event = to_synth_event(ev);
+
+ seq_printf(m, "s:%s/", event->class.system);
+
+ return __synth_event_show(m, event);
+}
+
+static int synth_events_seq_show(struct seq_file *m, void *v)
+{
+ struct dyn_event *ev = v;
+
+ if (!is_synth_event(ev))
+ return 0;
+
+ return __synth_event_show(m, to_synth_event(ev));
+}
+
+static const struct seq_operations synth_events_seq_op = {
+ .start = dyn_event_seq_start,
+ .next = dyn_event_seq_next,
+ .stop = dyn_event_seq_stop,
+ .show = synth_events_seq_show,
+};
+
+static int synth_events_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ ret = dyn_events_release_all(&synth_event_ops);
+ if (ret < 0)
+ return ret;
+ }
+
+ return seq_open(file, &synth_events_seq_op);
+}
+
+static ssize_t synth_events_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return trace_parse_run_command(file, buffer, count, ppos,
+ create_or_delete_synth_event);
+}
+
+static const struct file_operations synth_events_fops = {
+ .open = synth_events_open,
+ .write = synth_events_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static __init int trace_events_synth_init(void)
+{
+ struct dentry *entry = NULL;
+ struct dentry *d_tracer;
+ int err = 0;
+
+ err = dyn_event_register(&synth_event_ops);
+ if (err) {
+ pr_warn("Could not register synth_event_ops\n");
+ return err;
+ }
+
+ d_tracer = tracing_init_dentry();
+ if (IS_ERR(d_tracer)) {
+ err = PTR_ERR(d_tracer);
+ goto err;
+ }
+
+ entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
+ NULL, &synth_events_fops);
+ if (!entry) {
+ err = -ENODEV;
+ goto err;
+ }
+
+ return err;
+ err:
+ pr_warn("Could not create tracefs 'synthetic_events' entry\n");
+
+ return err;
+}
+
+fs_initcall(trace_events_synth_init);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 35989383ae11..ea8d0b094f1b 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1202,11 +1202,25 @@ static const struct file_operations kprobe_profile_ops = {
/* Return the length of string -- including null terminal byte */
static nokprobe_inline int
+fetch_store_strlen_user(unsigned long addr)
+{
+ const void __user *uaddr = (__force const void __user *)addr;
+
+ return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
+}
+
+/* Return the length of string -- including null terminal byte */
+static nokprobe_inline int
fetch_store_strlen(unsigned long addr)
{
int ret, len = 0;
u8 c;
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ if (addr < TASK_SIZE)
+ return fetch_store_strlen_user(addr);
+#endif
+
do {
ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
len++;
@@ -1215,22 +1229,14 @@ fetch_store_strlen(unsigned long addr)
return (ret < 0) ? ret : len;
}
-/* Return the length of string -- including null terminal byte */
-static nokprobe_inline int
-fetch_store_strlen_user(unsigned long addr)
-{
- const void __user *uaddr = (__force const void __user *)addr;
-
- return strnlen_unsafe_user(uaddr, MAX_STRING_SIZE);
-}
-
/*
- * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
- * length and relative data location.
+ * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
+ * with max length and relative data location.
*/
static nokprobe_inline int
-fetch_store_string(unsigned long addr, void *dest, void *base)
+fetch_store_string_user(unsigned long addr, void *dest, void *base)
{
+ const void __user *uaddr = (__force const void __user *)addr;
int maxlen = get_loc_len(*(u32 *)dest);
void *__dest;
long ret;
@@ -1240,11 +1246,7 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
__dest = get_loc_data(dest, base);
- /*
- * Try to get string again, since the string can be changed while
- * probing.
- */
- ret = strncpy_from_unsafe(__dest, (void *)addr, maxlen);
+ ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
if (ret >= 0)
*(u32 *)dest = make_data_loc(ret, __dest - base);
@@ -1252,23 +1254,31 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
}
/*
- * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
- * with max length and relative data location.
+ * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
+ * length and relative data location.
*/
static nokprobe_inline int
-fetch_store_string_user(unsigned long addr, void *dest, void *base)
+fetch_store_string(unsigned long addr, void *dest, void *base)
{
- const void __user *uaddr = (__force const void __user *)addr;
int maxlen = get_loc_len(*(u32 *)dest);
void *__dest;
long ret;
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ if ((unsigned long)addr < TASK_SIZE)
+ return fetch_store_string_user(addr, dest, base);
+#endif
+
if (unlikely(!maxlen))
return -ENOMEM;
__dest = get_loc_data(dest, base);
- ret = strncpy_from_unsafe_user(__dest, uaddr, maxlen);
+ /*
+ * Try to get string again, since the string can be changed while
+ * probing.
+ */
+ ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
if (ret >= 0)
*(u32 *)dest = make_data_loc(ret, __dest - base);
@@ -1276,12 +1286,6 @@ fetch_store_string_user(unsigned long addr, void *dest, void *base)
}
static nokprobe_inline int
-probe_mem_read(void *dest, void *src, size_t size)
-{
- return probe_kernel_read(dest, src, size);
-}
-
-static nokprobe_inline int
probe_mem_read_user(void *dest, void *src, size_t size)
{
const void __user *uaddr = (__force const void __user *)src;
@@ -1289,6 +1293,16 @@ probe_mem_read_user(void *dest, void *src, size_t size)
return probe_user_read(dest, uaddr, size);
}
+static nokprobe_inline int
+probe_mem_read(void *dest, void *src, size_t size)
+{
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ if ((unsigned long)src < TASK_SIZE)
+ return probe_mem_read_user(dest, src, size);
+#endif
+ return probe_kernel_read(dest, src, size);
+}
+
/* Note that we don't verify it, since the code does not come from user space */
static int
process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 9a121e147102..73976de7f8cc 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -393,7 +393,7 @@ static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
if (mm) {
const struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, ip);
if (vma) {
file = vma->vm_file;
@@ -405,7 +405,7 @@ static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
trace_seq_printf(s, "[+0x%lx]",
ip - vmstart);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
trace_seq_printf(s, " <" IP_FMT ">", ip);
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index ab8b6436d53f..b8a928e925c7 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -1006,7 +1006,7 @@ int trace_probe_init(struct trace_probe *tp, const char *event,
INIT_LIST_HEAD(&tp->event->class.fields);
INIT_LIST_HEAD(&tp->event->probes);
INIT_LIST_HEAD(&tp->list);
- list_add(&tp->event->probes, &tp->list);
+ list_add(&tp->list, &tp->event->probes);
call = trace_probe_event_call(tp);
call->class = &tp->event->class;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index c557f42a9397..98bba4764c52 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -515,9 +515,8 @@ static const struct file_operations stack_trace_filter_fops = {
#endif /* CONFIG_DYNAMIC_FTRACE */
int
-stack_trace_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
int was_enabled;
int ret;
diff --git a/kernel/trace/trace_synth.h b/kernel/trace/trace_synth.h
new file mode 100644
index 000000000000..ac35c45207c4
--- /dev/null
+++ b/kernel/trace/trace_synth.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __TRACE_SYNTH_H
+#define __TRACE_SYNTH_H
+
+#include "trace_dynevent.h"
+
+#define SYNTH_SYSTEM "synthetic"
+#define SYNTH_FIELDS_MAX 32
+
+#define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
+
+struct synth_field {
+ char *type;
+ char *name;
+ size_t size;
+ unsigned int offset;
+ bool is_signed;
+ bool is_string;
+};
+
+struct synth_event {
+ struct dyn_event devent;
+ int ref;
+ char *name;
+ struct synth_field **fields;
+ unsigned int n_fields;
+ unsigned int n_u64;
+ struct trace_event_class class;
+ struct trace_event_call call;
+ struct tracepoint *tp;
+ struct module *mod;
+};
+
+extern struct synth_event *find_synth_event(const char *name);
+
+#endif /* __TRACE_SYNTH_H */
diff --git a/kernel/user.c b/kernel/user.c
index 5235d7f49982..b1635d94a1f2 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -82,7 +82,7 @@ EXPORT_SYMBOL_GPL(init_user_ns);
#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
static struct kmem_cache *uid_cachep;
-struct hlist_head uidhash_table[UIDHASH_SZ];
+static struct hlist_head uidhash_table[UIDHASH_SZ];
/*
* The uidhash_lock is mostly taken from process context, but it is
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 53ff2c81b084..5abb5b22ad13 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -50,6 +50,11 @@ struct cpumask watchdog_cpumask __read_mostly;
unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
#ifdef CONFIG_HARDLOCKUP_DETECTOR
+
+# ifdef CONFIG_SMP
+int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
+# endif /* CONFIG_SMP */
+
/*
* Should we panic when a soft-lockup or hard-lockup occurs:
*/
@@ -82,16 +87,6 @@ static int __init hardlockup_panic_setup(char *str)
}
__setup("nmi_watchdog=", hardlockup_panic_setup);
-# ifdef CONFIG_SMP
-int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
-
-static int __init hardlockup_all_cpu_backtrace_setup(char *str)
-{
- sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
- return 1;
-}
-__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
-# endif /* CONFIG_SMP */
#endif /* CONFIG_HARDLOCKUP_DETECTOR */
/*
@@ -163,6 +158,10 @@ static void lockup_detector_update_enable(void)
#define SOFTLOCKUP_RESET ULONG_MAX
+#ifdef CONFIG_SMP
+int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+#endif
+
/* Global variables, exported for sysctl */
unsigned int __read_mostly softlockup_panic =
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
@@ -178,13 +177,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
static unsigned long soft_lockup_nmi_warn;
-static int __init softlockup_panic_setup(char *str)
-{
- softlockup_panic = simple_strtoul(str, NULL, 0);
- return 1;
-}
-__setup("softlockup_panic=", softlockup_panic_setup);
-
static int __init nowatchdog_setup(char *str)
{
watchdog_user_enabled = 0;
@@ -206,17 +198,6 @@ static int __init watchdog_thresh_setup(char *str)
}
__setup("watchdog_thresh=", watchdog_thresh_setup);
-#ifdef CONFIG_SMP
-int __read_mostly sysctl_softlockup_all_cpu_backtrace;
-
-static int __init softlockup_all_cpu_backtrace_setup(char *str)
-{
- sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
- return 1;
-}
-__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
-#endif
-
static void __lockup_detector_cleanup(void);
/*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 891ccad5f271..9fbe1e237563 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -145,7 +145,7 @@ enum {
/* struct worker is defined in workqueue_internal.h */
struct worker_pool {
- spinlock_t lock; /* the pool lock */
+ raw_spinlock_t lock; /* the pool lock */
int cpu; /* I: the associated cpu */
int node; /* I: the associated node ID */
int id; /* I: pool ID */
@@ -300,8 +300,9 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
-static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
-static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
+static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
+/* wait for manager to go away */
+static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
@@ -826,7 +827,7 @@ static struct worker *first_idle_worker(struct worker_pool *pool)
* Wake up the first idle worker of @pool.
*
* CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
*/
static void wake_up_worker(struct worker_pool *pool)
{
@@ -881,7 +882,7 @@ void wq_worker_sleeping(struct task_struct *task)
return;
worker->sleeping = 1;
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
/*
* The counterpart of the following dec_and_test, implied mb,
@@ -900,7 +901,7 @@ void wq_worker_sleeping(struct task_struct *task)
if (next)
wake_up_process(next->task);
}
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
}
/**
@@ -911,7 +912,7 @@ void wq_worker_sleeping(struct task_struct *task)
* the scheduler to get a worker's last known identity.
*
* CONTEXT:
- * spin_lock_irq(rq->lock)
+ * raw_spin_lock_irq(rq->lock)
*
* This function is called during schedule() when a kworker is going
* to sleep. It's used by psi to identify aggregation workers during
@@ -942,7 +943,7 @@ work_func_t wq_worker_last_func(struct task_struct *task)
* Set @flags in @worker->flags and adjust nr_running accordingly.
*
* CONTEXT:
- * spin_lock_irq(pool->lock)
+ * raw_spin_lock_irq(pool->lock)
*/
static inline void worker_set_flags(struct worker *worker, unsigned int flags)
{
@@ -967,7 +968,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
* Clear @flags in @worker->flags and adjust nr_running accordingly.
*
* CONTEXT:
- * spin_lock_irq(pool->lock)
+ * raw_spin_lock_irq(pool->lock)
*/
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
{
@@ -1015,7 +1016,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
* actually occurs, it should be easy to locate the culprit work function.
*
* CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
*
* Return:
* Pointer to worker which is executing @work if found, %NULL
@@ -1050,7 +1051,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
* nested inside outer list_for_each_entry_safe().
*
* CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
*/
static void move_linked_works(struct work_struct *work, struct list_head *head,
struct work_struct **nextp)
@@ -1128,9 +1129,9 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
* As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
- spin_lock_irq(&pwq->pool->lock);
+ raw_spin_lock_irq(&pwq->pool->lock);
put_pwq(pwq);
- spin_unlock_irq(&pwq->pool->lock);
+ raw_spin_unlock_irq(&pwq->pool->lock);
}
}
@@ -1163,7 +1164,7 @@ static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
* decrement nr_in_flight of its pwq and handle workqueue flushing.
*
* CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
*/
static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
{
@@ -1262,7 +1263,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
if (!pool)
goto fail;
- spin_lock(&pool->lock);
+ raw_spin_lock(&pool->lock);
/*
* work->data is guaranteed to point to pwq only while the work
* item is queued on pwq->wq, and both updating work->data to point
@@ -1291,11 +1292,11 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
/* work->data points to pwq iff queued, point to pool */
set_work_pool_and_keep_pending(work, pool->id);
- spin_unlock(&pool->lock);
+ raw_spin_unlock(&pool->lock);
rcu_read_unlock();
return 1;
}
- spin_unlock(&pool->lock);
+ raw_spin_unlock(&pool->lock);
fail:
rcu_read_unlock();
local_irq_restore(*flags);
@@ -1316,7 +1317,7 @@ fail:
* work_struct flags.
*
* CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
*/
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
struct list_head *head, unsigned int extra_flags)
@@ -1433,7 +1434,7 @@ retry:
if (last_pool && last_pool != pwq->pool) {
struct worker *worker;
- spin_lock(&last_pool->lock);
+ raw_spin_lock(&last_pool->lock);
worker = find_worker_executing_work(last_pool, work);
@@ -1441,11 +1442,11 @@ retry:
pwq = worker->current_pwq;
} else {
/* meh... not running there, queue here */
- spin_unlock(&last_pool->lock);
- spin_lock(&pwq->pool->lock);
+ raw_spin_unlock(&last_pool->lock);
+ raw_spin_lock(&pwq->pool->lock);
}
} else {
- spin_lock(&pwq->pool->lock);
+ raw_spin_lock(&pwq->pool->lock);
}
/*
@@ -1458,7 +1459,7 @@ retry:
*/
if (unlikely(!pwq->refcnt)) {
if (wq->flags & WQ_UNBOUND) {
- spin_unlock(&pwq->pool->lock);
+ raw_spin_unlock(&pwq->pool->lock);
cpu_relax();
goto retry;
}
@@ -1490,7 +1491,7 @@ retry:
insert_work(pwq, work, worklist, work_flags);
out:
- spin_unlock(&pwq->pool->lock);
+ raw_spin_unlock(&pwq->pool->lock);
rcu_read_unlock();
}
@@ -1759,7 +1760,7 @@ EXPORT_SYMBOL(queue_rcu_work);
* necessary.
*
* LOCKING:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
*/
static void worker_enter_idle(struct worker *worker)
{
@@ -1799,7 +1800,7 @@ static void worker_enter_idle(struct worker *worker)
* @worker is leaving idle state. Update stats.
*
* LOCKING:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
*/
static void worker_leave_idle(struct worker *worker)
{
@@ -1937,11 +1938,11 @@ static struct worker *create_worker(struct worker_pool *pool)
worker_attach_to_pool(worker, pool);
/* start the newly created worker */
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
worker->pool->nr_workers++;
worker_enter_idle(worker);
wake_up_process(worker->task);
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
return worker;
@@ -1960,7 +1961,7 @@ fail:
* be idle.
*
* CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
*/
static void destroy_worker(struct worker *worker)
{
@@ -1986,7 +1987,7 @@ static void idle_worker_timeout(struct timer_list *t)
{
struct worker_pool *pool = from_timer(pool, t, idle_timer);
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
while (too_many_workers(pool)) {
struct worker *worker;
@@ -2004,7 +2005,7 @@ static void idle_worker_timeout(struct timer_list *t)
destroy_worker(worker);
}
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
}
static void send_mayday(struct work_struct *work)
@@ -2035,8 +2036,8 @@ static void pool_mayday_timeout(struct timer_list *t)
struct worker_pool *pool = from_timer(pool, t, mayday_timer);
struct work_struct *work;
- spin_lock_irq(&pool->lock);
- spin_lock(&wq_mayday_lock); /* for wq->maydays */
+ raw_spin_lock_irq(&pool->lock);
+ raw_spin_lock(&wq_mayday_lock); /* for wq->maydays */
if (need_to_create_worker(pool)) {
/*
@@ -2049,8 +2050,8 @@ static void pool_mayday_timeout(struct timer_list *t)
send_mayday(work);
}
- spin_unlock(&wq_mayday_lock);
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock(&wq_mayday_lock);
+ raw_spin_unlock_irq(&pool->lock);
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
}
@@ -2069,7 +2070,7 @@ static void pool_mayday_timeout(struct timer_list *t)
* may_start_working() %true.
*
* LOCKING:
- * spin_lock_irq(pool->lock) which may be released and regrabbed
+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
*/
@@ -2078,7 +2079,7 @@ __releases(&pool->lock)
__acquires(&pool->lock)
{
restart:
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
@@ -2094,7 +2095,7 @@ restart:
}
del_timer_sync(&pool->mayday_timer);
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
/*
* This is necessary even after a new worker was just successfully
* created as @pool->lock was dropped and the new worker might have
@@ -2117,7 +2118,7 @@ restart:
* and may_start_working() is true.
*
* CONTEXT:
- * spin_lock_irq(pool->lock) which may be released and regrabbed
+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations.
*
* Return:
@@ -2140,7 +2141,7 @@ static bool manage_workers(struct worker *worker)
pool->manager = NULL;
pool->flags &= ~POOL_MANAGER_ACTIVE;
- wake_up(&wq_manager_wait);
+ rcuwait_wake_up(&manager_wait);
return true;
}
@@ -2156,7 +2157,7 @@ static bool manage_workers(struct worker *worker)
* call this function to process a work.
*
* CONTEXT:
- * spin_lock_irq(pool->lock) which is released and regrabbed.
+ * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
*/
static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&pool->lock)
@@ -2238,7 +2239,7 @@ __acquires(&pool->lock)
*/
set_work_pool_and_clear_pending(work, pool->id);
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
@@ -2293,7 +2294,7 @@ __acquires(&pool->lock)
*/
cond_resched();
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
/* clear cpu intensive status */
if (unlikely(cpu_intensive))
@@ -2319,7 +2320,7 @@ __acquires(&pool->lock)
* fetches a work from the top and executes it.
*
* CONTEXT:
- * spin_lock_irq(pool->lock) which may be released and regrabbed
+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times.
*/
static void process_scheduled_works(struct worker *worker)
@@ -2361,11 +2362,11 @@ static int worker_thread(void *__worker)
/* tell the scheduler that this is a workqueue worker */
set_pf_worker(true);
woke_up:
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
/* am I supposed to die? */
if (unlikely(worker->flags & WORKER_DIE)) {
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
WARN_ON_ONCE(!list_empty(&worker->entry));
set_pf_worker(false);
@@ -2431,7 +2432,7 @@ sleep:
*/
worker_enter_idle(worker);
__set_current_state(TASK_IDLE);
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
schedule();
goto woke_up;
}
@@ -2485,7 +2486,7 @@ repeat:
should_stop = kthread_should_stop();
/* see whether any pwq is asking for help */
- spin_lock_irq(&wq_mayday_lock);
+ raw_spin_lock_irq(&wq_mayday_lock);
while (!list_empty(&wq->maydays)) {
struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
@@ -2497,11 +2498,11 @@ repeat:
__set_current_state(TASK_RUNNING);
list_del_init(&pwq->mayday_node);
- spin_unlock_irq(&wq_mayday_lock);
+ raw_spin_unlock_irq(&wq_mayday_lock);
worker_attach_to_pool(rescuer, pool);
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
/*
* Slurp in all works issued via this workqueue and
@@ -2529,8 +2530,8 @@ repeat:
* being used to relieve memory pressure, don't
* incur MAYDAY_INTERVAL delay inbetween.
*/
- if (need_to_create_worker(pool)) {
- spin_lock(&wq_mayday_lock);
+ if (pwq->nr_active && need_to_create_worker(pool)) {
+ raw_spin_lock(&wq_mayday_lock);
/*
* Queue iff we aren't racing destruction
* and somebody else hasn't queued it already.
@@ -2539,7 +2540,7 @@ repeat:
get_pwq(pwq);
list_add_tail(&pwq->mayday_node, &wq->maydays);
}
- spin_unlock(&wq_mayday_lock);
+ raw_spin_unlock(&wq_mayday_lock);
}
}
@@ -2557,14 +2558,14 @@ repeat:
if (need_more_worker(pool))
wake_up_worker(pool);
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
worker_detach_from_pool(rescuer);
- spin_lock_irq(&wq_mayday_lock);
+ raw_spin_lock_irq(&wq_mayday_lock);
}
- spin_unlock_irq(&wq_mayday_lock);
+ raw_spin_unlock_irq(&wq_mayday_lock);
if (should_stop) {
__set_current_state(TASK_RUNNING);
@@ -2644,7 +2645,7 @@ static void wq_barrier_func(struct work_struct *work)
* underneath us, so we can't reliably determine pwq from @target.
*
* CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
*/
static void insert_wq_barrier(struct pool_workqueue *pwq,
struct wq_barrier *barr,
@@ -2731,7 +2732,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
for_each_pwq(pwq, wq) {
struct worker_pool *pool = pwq->pool;
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
if (flush_color >= 0) {
WARN_ON_ONCE(pwq->flush_color != -1);
@@ -2748,7 +2749,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
pwq->work_color = work_color;
}
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
}
if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
@@ -2948,9 +2949,9 @@ reflush:
for_each_pwq(pwq, wq) {
bool drained;
- spin_lock_irq(&pwq->pool->lock);
+ raw_spin_lock_irq(&pwq->pool->lock);
drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
- spin_unlock_irq(&pwq->pool->lock);
+ raw_spin_unlock_irq(&pwq->pool->lock);
if (drained)
continue;
@@ -2986,7 +2987,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
return false;
}
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
@@ -3002,7 +3003,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
check_flush_dependency(pwq->wq, work);
insert_wq_barrier(pwq, barr, work, worker);
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
/*
* Force a lock recursion deadlock when using flush_work() inside a
@@ -3021,7 +3022,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
rcu_read_unlock();
return true;
already_gone:
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
rcu_read_unlock();
return false;
}
@@ -3414,7 +3415,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
*/
static int init_worker_pool(struct worker_pool *pool)
{
- spin_lock_init(&pool->lock);
+ raw_spin_lock_init(&pool->lock);
pool->id = -1;
pool->cpu = -1;
pool->node = NUMA_NO_NODE;
@@ -3491,7 +3492,6 @@ static void rcu_free_wq(struct rcu_head *rcu)
else
free_workqueue_attrs(wq->unbound_attrs);
- kfree(wq->rescuer);
kfree(wq);
}
@@ -3504,6 +3504,18 @@ static void rcu_free_pool(struct rcu_head *rcu)
kfree(pool);
}
+/* This returns with the lock held on success (pool manager is inactive). */
+static bool wq_manager_inactive(struct worker_pool *pool)
+{
+ raw_spin_lock_irq(&pool->lock);
+
+ if (pool->flags & POOL_MANAGER_ACTIVE) {
+ raw_spin_unlock_irq(&pool->lock);
+ return false;
+ }
+ return true;
+}
+
/**
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
@@ -3539,16 +3551,17 @@ static void put_unbound_pool(struct worker_pool *pool)
* Become the manager and destroy all workers. This prevents
* @pool's workers from blocking on attach_mutex. We're the last
* manager and @pool gets freed with the flag set.
+ * Because of how wq_manager_inactive() works, we will hold the
+ * spinlock after a successful wait.
*/
- spin_lock_irq(&pool->lock);
- wait_event_lock_irq(wq_manager_wait,
- !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+ rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool),
+ TASK_UNINTERRUPTIBLE);
pool->flags |= POOL_MANAGER_ACTIVE;
while ((worker = first_idle_worker(pool)))
destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle);
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
mutex_lock(&wq_pool_attach_mutex);
if (!list_empty(&pool->workers))
@@ -3704,7 +3717,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
return;
/* this function can be called during early boot w/ irq disabled */
- spin_lock_irqsave(&pwq->pool->lock, flags);
+ raw_spin_lock_irqsave(&pwq->pool->lock, flags);
/*
* During [un]freezing, the caller is responsible for ensuring that
@@ -3727,7 +3740,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
pwq->max_active = 0;
}
- spin_unlock_irqrestore(&pwq->pool->lock, flags);
+ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
}
/* initialize newly alloced @pwq which is associated with @wq and @pool */
@@ -4129,9 +4142,9 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
use_dfl_pwq:
mutex_lock(&wq->mutex);
- spin_lock_irq(&wq->dfl_pwq->pool->lock);
+ raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
get_pwq(wq->dfl_pwq);
- spin_unlock_irq(&wq->dfl_pwq->pool->lock);
+ raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
out_unlock:
mutex_unlock(&wq->mutex);
@@ -4208,8 +4221,8 @@ static int init_rescuer(struct workqueue_struct *wq)
rescuer->rescue_wq = wq;
rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
- ret = PTR_ERR_OR_ZERO(rescuer->task);
- if (ret) {
+ if (IS_ERR(rescuer->task)) {
+ ret = PTR_ERR(rescuer->task);
kfree(rescuer);
return ret;
}
@@ -4360,9 +4373,9 @@ void destroy_workqueue(struct workqueue_struct *wq)
struct worker *rescuer = wq->rescuer;
/* this prevents new queueing */
- spin_lock_irq(&wq_mayday_lock);
+ raw_spin_lock_irq(&wq_mayday_lock);
wq->rescuer = NULL;
- spin_unlock_irq(&wq_mayday_lock);
+ raw_spin_unlock_irq(&wq_mayday_lock);
/* rescuer will empty maydays list before exiting */
kthread_stop(rescuer->task);
@@ -4376,27 +4389,25 @@ void destroy_workqueue(struct workqueue_struct *wq)
mutex_lock(&wq_pool_mutex);
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
- spin_lock_irq(&pwq->pool->lock);
+ raw_spin_lock_irq(&pwq->pool->lock);
if (WARN_ON(pwq_busy(pwq))) {
pr_warn("%s: %s has the following busy pwq\n",
__func__, wq->name);
show_pwq(pwq);
- spin_unlock_irq(&pwq->pool->lock);
+ raw_spin_unlock_irq(&pwq->pool->lock);
mutex_unlock(&wq->mutex);
mutex_unlock(&wq_pool_mutex);
show_workqueue_state();
return;
}
- spin_unlock_irq(&pwq->pool->lock);
+ raw_spin_unlock_irq(&pwq->pool->lock);
}
mutex_unlock(&wq->mutex);
- mutex_unlock(&wq_pool_mutex);
/*
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
- mutex_lock(&wq_pool_mutex);
list_del_rcu(&wq->list);
mutex_unlock(&wq_pool_mutex);
@@ -4558,10 +4569,10 @@ unsigned int work_busy(struct work_struct *work)
rcu_read_lock();
pool = get_work_pool(work);
if (pool) {
- spin_lock_irqsave(&pool->lock, flags);
+ raw_spin_lock_irqsave(&pool->lock, flags);
if (find_worker_executing_work(pool, work))
ret |= WORK_BUSY_RUNNING;
- spin_unlock_irqrestore(&pool->lock, flags);
+ raw_spin_unlock_irqrestore(&pool->lock, flags);
}
rcu_read_unlock();
@@ -4768,10 +4779,10 @@ void show_workqueue_state(void)
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
for_each_pwq(pwq, wq) {
- spin_lock_irqsave(&pwq->pool->lock, flags);
+ raw_spin_lock_irqsave(&pwq->pool->lock, flags);
if (pwq->nr_active || !list_empty(&pwq->delayed_works))
show_pwq(pwq);
- spin_unlock_irqrestore(&pwq->pool->lock, flags);
+ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
@@ -4785,7 +4796,7 @@ void show_workqueue_state(void)
struct worker *worker;
bool first = true;
- spin_lock_irqsave(&pool->lock, flags);
+ raw_spin_lock_irqsave(&pool->lock, flags);
if (pool->nr_workers == pool->nr_idle)
goto next_pool;
@@ -4804,7 +4815,7 @@ void show_workqueue_state(void)
}
pr_cont("\n");
next_pool:
- spin_unlock_irqrestore(&pool->lock, flags);
+ raw_spin_unlock_irqrestore(&pool->lock, flags);
/*
* We could be printing a lot from atomic context, e.g.
* sysrq-t -> show_workqueue_state(). Avoid triggering
@@ -4834,7 +4845,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
struct worker_pool *pool = worker->pool;
if (pool) {
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
/*
* ->desc tracks information (wq name or
* set_worker_desc()) for the latest execution. If
@@ -4848,7 +4859,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
scnprintf(buf + off, size - off, "-%s",
worker->desc);
}
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
}
}
@@ -4879,7 +4890,7 @@ static void unbind_workers(int cpu)
for_each_cpu_worker_pool(pool, cpu) {
mutex_lock(&wq_pool_attach_mutex);
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
/*
* We've blocked all attach/detach operations. Make all workers
@@ -4893,7 +4904,7 @@ static void unbind_workers(int cpu)
pool->flags |= POOL_DISASSOCIATED;
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
mutex_unlock(&wq_pool_attach_mutex);
/*
@@ -4919,9 +4930,9 @@ static void unbind_workers(int cpu)
* worker blocking could lead to lengthy stalls. Kick off
* unbound chain execution of currently pending work items.
*/
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
wake_up_worker(pool);
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
}
}
@@ -4948,7 +4959,7 @@ static void rebind_workers(struct worker_pool *pool)
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
pool->attrs->cpumask) < 0);
- spin_lock_irq(&pool->lock);
+ raw_spin_lock_irq(&pool->lock);
pool->flags &= ~POOL_DISASSOCIATED;
@@ -4987,7 +4998,7 @@ static void rebind_workers(struct worker_pool *pool)
WRITE_ONCE(worker->flags, worker_flags);
}
- spin_unlock_irq(&pool->lock);
+ raw_spin_unlock_irq(&pool->lock);
}
/**
@@ -5906,7 +5917,7 @@ void __init workqueue_init_early(void)
int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
int i, cpu;
- WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
+ BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0217ed126f77..ef675beccab1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -99,6 +99,7 @@ config DYNAMIC_DEBUG
default n
depends on PRINTK
depends on (DEBUG_FS || PROC_FS)
+ select DYNAMIC_DEBUG_CORE
help
Compiles debug level messages into the kernel, which would not
@@ -165,6 +166,17 @@ config DYNAMIC_DEBUG
See Documentation/admin-guide/dynamic-debug-howto.rst for additional
information.
+config DYNAMIC_DEBUG_CORE
+ bool "Enable core function of dynamic debug support"
+ depends on PRINTK
+ depends on (DEBUG_FS || PROC_FS)
+ help
+ Enable core functional support of dynamic debug. It is useful
+ when you want to tie dynamic debug to your kernel modules with
+ DYNAMIC_DEBUG_MODULE defined for each of them, especially for
+ the case of embedded system where the kernel image size is
+ sensitive for people.
+
config SYMBOLIC_ERRNAME
bool "Support symbolic error names in printf"
default y if PRINTK
@@ -213,6 +225,23 @@ config DEBUG_INFO_REDUCED
DEBUG_INFO build and compile times are reduced too.
Only works with newer gcc versions.
+config DEBUG_INFO_COMPRESSED
+ bool "Compressed debugging information"
+ depends on DEBUG_INFO
+ depends on $(cc-option,-gz=zlib)
+ depends on $(as-option,-Wa$(comma)--compress-debug-sections=zlib)
+ depends on $(ld-option,--compress-debug-sections=zlib)
+ help
+ Compress the debug information using zlib. Requires GCC 5.0+ or Clang
+ 5.0+, binutils 2.26+, and zlib.
+
+ Users of dpkg-deb via scripts/package/builddeb may find an increase in
+ size of their debug .deb packages with this config set, due to the
+ debug info being compressed with zlib, then the object files being
+ recompressed with a different compression scheme. But this is still
+ preferable to setting $KDEB_COMPRESS to "none" which would be even
+ larger.
+
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
depends on DEBUG_INFO
@@ -658,6 +687,12 @@ config SCHED_STACK_END_CHECK
data corruption or a sporadic crash at a later stage once the region
is examined. The runtime overhead introduced is minimal.
+config ARCH_HAS_DEBUG_VM_PGTABLE
+ bool
+ help
+ An architecture should select this when it can successfully
+ build and run DEBUG_VM_PGTABLE.
+
config DEBUG_VM
bool "Debug VM"
depends on DEBUG_KERNEL
@@ -693,6 +728,22 @@ config DEBUG_VM_PGFLAGS
If unsure, say N.
+config DEBUG_VM_PGTABLE
+ bool "Debug arch page table for semantics compliance"
+ depends on MMU
+ depends on ARCH_HAS_DEBUG_VM_PGTABLE
+ default y if DEBUG_VM
+ help
+ This option provides a debug method which can be used to test
+ architecture page table helper functions on various platforms in
+ verifying if they comply with expected generic MM semantics. This
+ will help architecture code in making sure that any changes or
+ new additions of these helpers still conform to expected
+ semantics of the generic MM. Platforms will have to opt in for
+ this through ARCH_HAS_DEBUG_VM_PGTABLE.
+
+ If unsure, say N.
+
config ARCH_HAS_DEBUG_VIRTUAL
bool
@@ -1519,6 +1570,8 @@ config PROVIDE_OHCI1394_DMA_INIT
source "samples/Kconfig"
+source "lib/Kconfig.kcsan"
+
config ARCH_HAS_DEVMEM_IS_ALLOWED
bool
@@ -1774,6 +1827,15 @@ config KCOV_INSTRUMENT_ALL
filesystem fuzzing with AFL) then you will want to enable coverage
for more specific subsets of files, and should say n here.
+config KCOV_IRQ_AREA_SIZE
+ hex "Size of interrupt coverage collection area in words"
+ depends on KCOV
+ default 0x40000
+ help
+ KCOV uses preallocated per-cpu areas to collect coverage from
+ soft interrupts. This specifies the size of those areas in the
+ number of unsigned long words.
+
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
def_bool y
@@ -1991,6 +2053,19 @@ config TEST_LKM
If unsure, say N.
+config TEST_BITOPS
+ tristate "Test module for compilation of bitops operations"
+ depends on m
+ help
+ This builds the "test_bitops" module that is much like the
+ TEST_LKM module except that it does a basic exercise of the
+ set/clear_bit macros and get_count_order/long to make sure there are
+ no compiler warnings from C=1 sparse checker or -Wextra
+ compilations. It has no dependencies and doesn't run or load unless
+ explicitly requested by name. for example: modprobe test_bitops.
+
+ If unsure, say N.
+
config TEST_VMALLOC
tristate "Test module for stress/performance analysis of vmalloc allocator"
default n
@@ -2069,8 +2144,9 @@ config TEST_SYSCTL
If unsure, say N.
config SYSCTL_KUNIT_TEST
- tristate "KUnit test for sysctl"
+ tristate "KUnit test for sysctl" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the proc sysctl unit test, which runs on boot.
Tests the API contract and implementation correctness of sysctl.
@@ -2080,8 +2156,9 @@ config SYSCTL_KUNIT_TEST
If unsure, say N.
config LIST_KUNIT_TEST
- tristate "KUnit Test for Kernel Linked-list structures"
+ tristate "KUnit Test for Kernel Linked-list structures" if !KUNIT_ALL_TESTS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the linked list KUnit test suite.
It tests that the API and basic functionality of the list_head type
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
new file mode 100644
index 000000000000..5ee88e5119c2
--- /dev/null
+++ b/lib/Kconfig.kcsan
@@ -0,0 +1,199 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_ARCH_KCSAN
+ bool
+
+config HAVE_KCSAN_COMPILER
+ def_bool CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-distinguish-volatile=1)
+ help
+ For the list of compilers that support KCSAN, please see
+ <file:Documentation/dev-tools/kcsan.rst>.
+
+config KCSAN_KCOV_BROKEN
+ def_bool KCOV && CC_HAS_SANCOV_TRACE_PC
+ depends on CC_IS_CLANG
+ depends on !$(cc-option,-Werror=unused-command-line-argument -fsanitize=thread -fsanitize-coverage=trace-pc)
+ help
+ Some versions of clang support either KCSAN and KCOV but not the
+ combination of the two.
+ See https://bugs.llvm.org/show_bug.cgi?id=45831 for the status
+ in newer releases.
+
+menuconfig KCSAN
+ bool "KCSAN: dynamic data race detector"
+ depends on HAVE_ARCH_KCSAN && HAVE_KCSAN_COMPILER
+ depends on DEBUG_KERNEL && !KASAN
+ depends on !KCSAN_KCOV_BROKEN
+ select STACKTRACE
+ help
+ The Kernel Concurrency Sanitizer (KCSAN) is a dynamic
+ data-race detector that relies on compile-time instrumentation.
+ KCSAN uses a watchpoint-based sampling approach to detect races.
+
+ While KCSAN's primary purpose is to detect data races, it
+ also provides assertions to check data access constraints.
+ These assertions can expose bugs that do not manifest as
+ data races.
+
+ See <file:Documentation/dev-tools/kcsan.rst> for more details.
+
+if KCSAN
+
+config KCSAN_VERBOSE
+ bool "Show verbose reports with more information about system state"
+ depends on PROVE_LOCKING
+ help
+ If enabled, reports show more information about the system state that
+ may help better analyze and debug races. This includes held locks and
+ IRQ trace events.
+
+ While this option should generally be benign, we call into more
+ external functions on report generation; if a race report is
+ generated from any one of them, system stability may suffer due to
+ deadlocks or recursion. If in doubt, say N.
+
+config KCSAN_DEBUG
+ bool "Debugging of KCSAN internals"
+
+config KCSAN_SELFTEST
+ bool "Perform short selftests on boot"
+ default y
+ help
+ Run KCSAN selftests on boot. On test failure, causes the kernel to panic.
+
+config KCSAN_EARLY_ENABLE
+ bool "Early enable during boot"
+ default y
+ help
+ If KCSAN should be enabled globally as soon as possible. KCSAN can
+ later be enabled/disabled via debugfs.
+
+config KCSAN_NUM_WATCHPOINTS
+ int "Number of available watchpoints"
+ default 64
+ help
+ Total number of available watchpoints. An address range maps into a
+ specific watchpoint slot as specified in kernel/kcsan/encoding.h.
+ Although larger number of watchpoints may not be usable due to
+ limited number of CPUs, a larger value helps to improve performance
+ due to reducing cache-line contention. The chosen default is a
+ conservative value; we should almost never observe "no_capacity"
+ events (see /sys/kernel/debug/kcsan).
+
+config KCSAN_UDELAY_TASK
+ int "Delay in microseconds (for tasks)"
+ default 80
+ help
+ For tasks, the microsecond delay after setting up a watchpoint.
+
+config KCSAN_UDELAY_INTERRUPT
+ int "Delay in microseconds (for interrupts)"
+ default 20
+ help
+ For interrupts, the microsecond delay after setting up a watchpoint.
+ Interrupts have tighter latency requirements, and their delay should
+ be lower than for tasks.
+
+config KCSAN_DELAY_RANDOMIZE
+ bool "Randomize above delays"
+ default y
+ help
+ If delays should be randomized, where the maximum is KCSAN_UDELAY_*.
+ If false, the chosen delays are always the KCSAN_UDELAY_* values
+ as defined above.
+
+config KCSAN_SKIP_WATCH
+ int "Skip instructions before setting up watchpoint"
+ default 4000
+ help
+ The number of per-CPU memory operations to skip, before another
+ watchpoint is set up, i.e. one in KCSAN_WATCH_SKIP per-CPU
+ memory operations are used to set up a watchpoint. A smaller value
+ results in more aggressive race detection, whereas a larger value
+ improves system performance at the cost of missing some races.
+
+config KCSAN_SKIP_WATCH_RANDOMIZE
+ bool "Randomize watchpoint instruction skip count"
+ default y
+ help
+ If instruction skip count should be randomized, where the maximum is
+ KCSAN_WATCH_SKIP. If false, the chosen value is always
+ KCSAN_WATCH_SKIP.
+
+config KCSAN_INTERRUPT_WATCHER
+ bool "Interruptible watchers"
+ help
+ If enabled, a task that set up a watchpoint may be interrupted while
+ delayed. This option will allow KCSAN to detect races between
+ interrupted tasks and other threads of execution on the same CPU.
+
+ Currently disabled by default, because not all safe per-CPU access
+ primitives and patterns may be accounted for, and therefore could
+ result in false positives.
+
+config KCSAN_REPORT_ONCE_IN_MS
+ int "Duration in milliseconds, in which any given race is only reported once"
+ default 3000
+ help
+ Any given race is only reported once in the defined time window.
+ Different races may still generate reports within a duration that is
+ smaller than the duration defined here. This allows rate limiting
+ reporting to avoid flooding the console with reports. Setting this
+ to 0 disables rate limiting.
+
+# The main purpose of the below options is to control reported data races (e.g.
+# in fuzzer configs), and are not expected to be switched frequently by other
+# users. We could turn some of them into boot parameters, but given they should
+# not be switched normally, let's keep them here to simplify configuration.
+#
+# The defaults below are chosen to be very conservative, and may miss certain
+# bugs.
+
+config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
+ bool "Report races of unknown origin"
+ default y
+ help
+ If KCSAN should report races where only one access is known, and the
+ conflicting access is of unknown origin. This type of race is
+ reported if it was only possible to infer a race due to a data value
+ change while an access is being delayed on a watchpoint.
+
+config KCSAN_REPORT_VALUE_CHANGE_ONLY
+ bool "Only report races where watcher observed a data value change"
+ default y
+ help
+ If enabled and a conflicting write is observed via a watchpoint, but
+ the data value of the memory location was observed to remain
+ unchanged, do not report the data race.
+
+config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
+ bool "Assume that plain aligned writes up to word size are atomic"
+ default y
+ help
+ Assume that plain aligned writes up to word size are atomic by
+ default, and also not subject to other unsafe compiler optimizations
+ resulting in data races. This will cause KCSAN to not report data
+ races due to conflicts where the only plain accesses are aligned
+ writes up to word size: conflicts between marked reads and plain
+ aligned writes up to word size will not be reported as data races;
+ notice that data races between two conflicting plain aligned writes
+ will also not be reported.
+
+config KCSAN_IGNORE_ATOMICS
+ bool "Do not instrument marked atomic accesses"
+ help
+ Never instrument marked atomic accesses. This option can be used for
+ additional filtering. Conflicting marked atomic reads and plain
+ writes will never be reported as a data race, however, will cause
+ plain reads and marked writes to result in "unknown origin" reports.
+ If combined with CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n, data
+ races where at least one access is marked atomic will never be
+ reported.
+
+ Similar to KCSAN_ASSUME_PLAIN_WRITES_ATOMIC, but including unaligned
+ accesses, conflicting marked atomic reads and plain writes will not
+ be reported as data races; however, unlike that option, data races
+ due to two conflicting plain writes will be reported (aligned and
+ unaligned, if CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n).
+
+endif # KCSAN
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 27bcc2568c95..774315de555a 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -26,9 +26,20 @@ config UBSAN_TRAP
the system. For some system builders this is an acceptable
trade-off.
+config UBSAN_KCOV_BROKEN
+ def_bool KCOV && CC_HAS_SANCOV_TRACE_PC
+ depends on CC_IS_CLANG
+ depends on !$(cc-option,-Werror=unused-command-line-argument -fsanitize=bounds -fsanitize-coverage=trace-pc)
+ help
+ Some versions of clang support either UBSAN or KCOV but not the
+ combination of the two.
+ See https://bugs.llvm.org/show_bug.cgi?id=45831 for the status
+ in newer releases.
+
config UBSAN_BOUNDS
bool "Perform array index bounds checking"
default UBSAN
+ depends on !UBSAN_KCOV_BROKEN
help
This option enables detection of directly indexed out of bounds
array accesses, where the array size is known at compile time.
diff --git a/lib/Makefile b/lib/Makefile
index 5adf8949a757..b1c42c10073b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,6 +25,9 @@ KASAN_SANITIZE_string.o := n
CFLAGS_string.o := $(call cc-option, -fno-stack-protector)
endif
+# Used by KCSAN while enabled, avoid recursion.
+KCSAN_SANITIZE_random32.o := n
+
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o extable.o sha1.o irq_regs.o argv_split.o \
@@ -56,6 +59,8 @@ obj-y += kstrtox.o
obj-$(CONFIG_FIND_BIT_BENCHMARK) += find_bit_benchmark.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
+CFLAGS_test_bitops.o += -Werror
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
@@ -188,7 +193,7 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
-obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
+obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o
obj-$(CONFIG_SYMBOLIC_ERRNAME) += errname.o
obj-$(CONFIG_NLATTR) += nlattr.o
@@ -294,6 +299,7 @@ endif
UBSAN_SANITIZE_ubsan.o := n
KASAN_SANITIZE_ubsan.o := n
+KCSAN_SANITIZE_ubsan.o := n
CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --git a/lib/bch.c b/lib/bch.c
index 052d3fb753a0..7c031ee8b93b 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -23,15 +23,15 @@
* This library provides runtime configurable encoding/decoding of binary
* Bose-Chaudhuri-Hocquenghem (BCH) codes.
*
- * Call init_bch to get a pointer to a newly allocated bch_control structure for
+ * Call bch_init to get a pointer to a newly allocated bch_control structure for
* the given m (Galois field order), t (error correction capability) and
* (optional) primitive polynomial parameters.
*
- * Call encode_bch to compute and store ecc parity bytes to a given buffer.
- * Call decode_bch to detect and locate errors in received data.
+ * Call bch_encode to compute and store ecc parity bytes to a given buffer.
+ * Call bch_decode to detect and locate errors in received data.
*
* On systems supporting hw BCH features, intermediate results may be provided
- * to decode_bch in order to skip certain steps. See decode_bch() documentation
+ * to bch_decode in order to skip certain steps. See bch_decode() documentation
* for details.
*
* Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of
@@ -114,10 +114,53 @@ struct gf_poly_deg1 {
unsigned int c[2];
};
+static u8 swap_bits_table[] = {
+ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
+ 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+ 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
+ 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+ 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
+ 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+ 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
+ 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+ 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
+ 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+ 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+ 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+ 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
+ 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+ 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
+ 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+ 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
+ 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+ 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
+ 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+ 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
+ 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+ 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
+ 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+ 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
+ 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+ 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
+ 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+ 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
+ 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+ 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
+ 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
+};
+
+static u8 swap_bits(struct bch_control *bch, u8 in)
+{
+ if (!bch->swap_bits)
+ return in;
+
+ return swap_bits_table[in];
+}
+
/*
- * same as encode_bch(), but process input data one byte at a time
+ * same as bch_encode(), but process input data one byte at a time
*/
-static void encode_bch_unaligned(struct bch_control *bch,
+static void bch_encode_unaligned(struct bch_control *bch,
const unsigned char *data, unsigned int len,
uint32_t *ecc)
{
@@ -126,7 +169,9 @@ static void encode_bch_unaligned(struct bch_control *bch,
const int l = BCH_ECC_WORDS(bch)-1;
while (len--) {
- p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(*data++)) & 0xff);
+ u8 tmp = swap_bits(bch, *data++);
+
+ p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(tmp)) & 0xff);
for (i = 0; i < l; i++)
ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++);
@@ -145,10 +190,16 @@ static void load_ecc8(struct bch_control *bch, uint32_t *dst,
unsigned int i, nwords = BCH_ECC_WORDS(bch)-1;
for (i = 0; i < nwords; i++, src += 4)
- dst[i] = (src[0] << 24)|(src[1] << 16)|(src[2] << 8)|src[3];
+ dst[i] = ((u32)swap_bits(bch, src[0]) << 24) |
+ ((u32)swap_bits(bch, src[1]) << 16) |
+ ((u32)swap_bits(bch, src[2]) << 8) |
+ swap_bits(bch, src[3]);
memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords);
- dst[nwords] = (pad[0] << 24)|(pad[1] << 16)|(pad[2] << 8)|pad[3];
+ dst[nwords] = ((u32)swap_bits(bch, pad[0]) << 24) |
+ ((u32)swap_bits(bch, pad[1]) << 16) |
+ ((u32)swap_bits(bch, pad[2]) << 8) |
+ swap_bits(bch, pad[3]);
}
/*
@@ -161,20 +212,20 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst,
unsigned int i, nwords = BCH_ECC_WORDS(bch)-1;
for (i = 0; i < nwords; i++) {
- *dst++ = (src[i] >> 24);
- *dst++ = (src[i] >> 16) & 0xff;
- *dst++ = (src[i] >> 8) & 0xff;
- *dst++ = (src[i] >> 0) & 0xff;
+ *dst++ = swap_bits(bch, src[i] >> 24);
+ *dst++ = swap_bits(bch, src[i] >> 16);
+ *dst++ = swap_bits(bch, src[i] >> 8);
+ *dst++ = swap_bits(bch, src[i]);
}
- pad[0] = (src[nwords] >> 24);
- pad[1] = (src[nwords] >> 16) & 0xff;
- pad[2] = (src[nwords] >> 8) & 0xff;
- pad[3] = (src[nwords] >> 0) & 0xff;
+ pad[0] = swap_bits(bch, src[nwords] >> 24);
+ pad[1] = swap_bits(bch, src[nwords] >> 16);
+ pad[2] = swap_bits(bch, src[nwords] >> 8);
+ pad[3] = swap_bits(bch, src[nwords]);
memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords);
}
/**
- * encode_bch - calculate BCH ecc parity of data
+ * bch_encode - calculate BCH ecc parity of data
* @bch: BCH control structure
* @data: data to encode
* @len: data length in bytes
@@ -187,7 +238,7 @@ static void store_ecc8(struct bch_control *bch, uint8_t *dst,
* The exact number of computed ecc parity bits is given by member @ecc_bits of
* @bch; it may be less than m*t for large values of t.
*/
-void encode_bch(struct bch_control *bch, const uint8_t *data,
+void bch_encode(struct bch_control *bch, const uint8_t *data,
unsigned int len, uint8_t *ecc)
{
const unsigned int l = BCH_ECC_WORDS(bch)-1;
@@ -215,7 +266,7 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
m = ((unsigned long)data) & 3;
if (m) {
mlen = (len < (4-m)) ? len : 4-m;
- encode_bch_unaligned(bch, data, mlen, bch->ecc_buf);
+ bch_encode_unaligned(bch, data, mlen, bch->ecc_buf);
data += mlen;
len -= mlen;
}
@@ -240,7 +291,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
*/
while (mlen--) {
/* input data is read in big-endian format */
- w = r[0]^cpu_to_be32(*pdata++);
+ w = cpu_to_be32(*pdata++);
+ if (bch->swap_bits)
+ w = (u32)swap_bits(bch, w) |
+ ((u32)swap_bits(bch, w >> 8) << 8) |
+ ((u32)swap_bits(bch, w >> 16) << 16) |
+ ((u32)swap_bits(bch, w >> 24) << 24);
+ w ^= r[0];
p0 = tab0 + (l+1)*((w >> 0) & 0xff);
p1 = tab1 + (l+1)*((w >> 8) & 0xff);
p2 = tab2 + (l+1)*((w >> 16) & 0xff);
@@ -255,13 +312,13 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
/* process last unaligned bytes */
if (len)
- encode_bch_unaligned(bch, data, len, bch->ecc_buf);
+ bch_encode_unaligned(bch, data, len, bch->ecc_buf);
/* store ecc parity bytes into original parity buffer */
if (ecc)
store_ecc8(bch, ecc, bch->ecc_buf);
}
-EXPORT_SYMBOL_GPL(encode_bch);
+EXPORT_SYMBOL_GPL(bch_encode);
static inline int modulo(struct bch_control *bch, unsigned int v)
{
@@ -952,7 +1009,7 @@ static int chien_search(struct bch_control *bch, unsigned int len,
#endif /* USE_CHIEN_SEARCH */
/**
- * decode_bch - decode received codeword and find bit error locations
+ * bch_decode - decode received codeword and find bit error locations
* @bch: BCH control structure
* @data: received data, ignored if @calc_ecc is provided
* @len: data length in bytes, must always be provided
@@ -966,22 +1023,22 @@ static int chien_search(struct bch_control *bch, unsigned int len,
* invalid parameters were provided
*
* Depending on the available hw BCH support and the need to compute @calc_ecc
- * separately (using encode_bch()), this function should be called with one of
+ * separately (using bch_encode()), this function should be called with one of
* the following parameter configurations -
*
* by providing @data and @recv_ecc only:
- * decode_bch(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc)
+ * bch_decode(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc)
*
* by providing @recv_ecc and @calc_ecc:
- * decode_bch(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc)
+ * bch_decode(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc)
*
* by providing ecc = recv_ecc XOR calc_ecc:
- * decode_bch(@bch, NULL, @len, NULL, ecc, NULL, @errloc)
+ * bch_decode(@bch, NULL, @len, NULL, ecc, NULL, @errloc)
*
* by providing syndrome results @syn:
- * decode_bch(@bch, NULL, @len, NULL, NULL, @syn, @errloc)
+ * bch_decode(@bch, NULL, @len, NULL, NULL, @syn, @errloc)
*
- * Once decode_bch() has successfully returned with a positive value, error
+ * Once bch_decode() has successfully returned with a positive value, error
* locations returned in array @errloc should be interpreted as follows -
*
* if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for
@@ -993,7 +1050,7 @@ static int chien_search(struct bch_control *bch, unsigned int len,
* Note that this function does not perform any data correction by itself, it
* merely indicates error locations.
*/
-int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len,
const uint8_t *recv_ecc, const uint8_t *calc_ecc,
const unsigned int *syn, unsigned int *errloc)
{
@@ -1012,7 +1069,7 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
/* compute received data ecc into an internal buffer */
if (!data || !recv_ecc)
return -EINVAL;
- encode_bch(bch, data, len, NULL);
+ bch_encode(bch, data, len, NULL);
} else {
/* load provided calculated ecc */
load_ecc8(bch, bch->ecc_buf, calc_ecc);
@@ -1048,12 +1105,14 @@ int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
break;
}
errloc[i] = nbits-1-errloc[i];
- errloc[i] = (errloc[i] & ~7)|(7-(errloc[i] & 7));
+ if (!bch->swap_bits)
+ errloc[i] = (errloc[i] & ~7) |
+ (7-(errloc[i] & 7));
}
}
return (err >= 0) ? err : -EBADMSG;
}
-EXPORT_SYMBOL_GPL(decode_bch);
+EXPORT_SYMBOL_GPL(bch_decode);
/*
* generate Galois field lookup tables
@@ -1236,27 +1295,29 @@ finish:
}
/**
- * init_bch - initialize a BCH encoder/decoder
+ * bch_init - initialize a BCH encoder/decoder
* @m: Galois field order, should be in the range 5-15
* @t: maximum error correction capability, in bits
* @prim_poly: user-provided primitive polynomial (or 0 to use default)
+ * @swap_bits: swap bits within data and syndrome bytes
*
* Returns:
* a newly allocated BCH control structure if successful, NULL otherwise
*
* This initialization can take some time, as lookup tables are built for fast
* encoding/decoding; make sure not to call this function from a time critical
- * path. Usually, init_bch() should be called on module/driver init and
- * free_bch() should be called to release memory on exit.
+ * path. Usually, bch_init() should be called on module/driver init and
+ * bch_free() should be called to release memory on exit.
*
* You may provide your own primitive polynomial of degree @m in argument
- * @prim_poly, or let init_bch() use its default polynomial.
+ * @prim_poly, or let bch_init() use its default polynomial.
*
- * Once init_bch() has successfully returned a pointer to a newly allocated
+ * Once bch_init() has successfully returned a pointer to a newly allocated
* BCH control structure, ecc length in bytes is given by member @ecc_bytes of
* the structure.
*/
-struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
+struct bch_control *bch_init(int m, int t, unsigned int prim_poly,
+ bool swap_bits)
{
int err = 0;
unsigned int i, words;
@@ -1321,6 +1382,7 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
bch->syn = bch_alloc(2*t*sizeof(*bch->syn), &err);
bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err);
bch->elp = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err);
+ bch->swap_bits = swap_bits;
for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++)
bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err);
@@ -1347,16 +1409,16 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
return bch;
fail:
- free_bch(bch);
+ bch_free(bch);
return NULL;
}
-EXPORT_SYMBOL_GPL(init_bch);
+EXPORT_SYMBOL_GPL(bch_init);
/**
- * free_bch - free the BCH control structure
+ * bch_free - free the BCH control structure
* @bch: BCH control structure to release
*/
-void free_bch(struct bch_control *bch)
+void bch_free(struct bch_control *bch)
{
unsigned int i;
@@ -1377,7 +1439,7 @@ void free_bch(struct bch_control *bch)
kfree(bch);
}
}
-EXPORT_SYMBOL_GPL(free_bch);
+EXPORT_SYMBOL_GPL(bch_free);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 21a7640c5eed..0364452b1617 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -741,8 +741,9 @@ int bitmap_parse(const char *start, unsigned int buflen,
int chunks = BITS_TO_U32(nmaskbits);
u32 *bitmap = (u32 *)maskp;
int unset_bit;
+ int chunk;
- while (1) {
+ for (chunk = 0; ; chunk++) {
end = bitmap_find_region_reverse(start, end);
if (start > end)
break;
@@ -750,7 +751,11 @@ int bitmap_parse(const char *start, unsigned int buflen,
if (!chunks--)
return -EOVERFLOW;
- end = bitmap_get_x32_reverse(start, end, bitmap++);
+#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk ^ 1]);
+#else
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk]);
+#endif
if (IS_ERR(end))
return PTR_ERR(end);
}
diff --git a/lib/bug.c b/lib/bug.c
index 8c98af0bf585..7103440c0ee1 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -47,6 +47,7 @@
#include <linux/bug.h>
#include <linux/sched.h>
#include <linux/rculist.h>
+#include <linux/ftrace.h>
extern struct bug_entry __start___bug_table[], __stop___bug_table[];
@@ -153,6 +154,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
if (!bug)
return BUG_TRAP_TYPE_NONE;
+ disable_trace_on_warning();
+
file = NULL;
line = 0;
warning = 0;
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 33ffbf308853..a00ee6eedc7c 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -74,7 +74,7 @@ void show_regs_print_info(const char *log_lvl)
static void __dump_stack(void)
{
dump_stack_print_info(KERN_DEFAULT);
- show_stack(NULL, NULL);
+ show_stack(NULL, NULL, KERN_DEFAULT);
}
/**
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 8f199f403ab5..321437bbf87d 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -1032,8 +1032,13 @@ static int __init dynamic_debug_init(void)
int verbose_bytes = 0;
if (&__start___verbose == &__stop___verbose) {
- pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
- return 1;
+ if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) {
+ pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n");
+ return 1;
+ }
+ pr_info("Ignore empty _ddebug table in a CONFIG_DYNAMIC_DEBUG_CORE build\n");
+ ddebug_init_success = 1;
+ return 0;
}
iter = __start___verbose;
modname = iter->modname;
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 8186ca84910b..ce12621b4275 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -106,7 +106,9 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
unsigned int fail_nth = READ_ONCE(current->fail_nth);
if (fail_nth) {
- if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
+ fail_nth--;
+ WRITE_ONCE(current->fail_nth, fail_nth);
+ if (!fail_nth)
goto fail;
return false;
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 7852bfff50b1..451543937524 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -266,8 +266,7 @@ void __fprop_inc_percpu_max(struct fprop_global *p,
if (numerator >
(((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
return;
- } else
- fprop_reflect_period_percpu(p, pl);
- percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
- percpu_counter_add(&p->events, 1);
+ }
+
+ __fprop_inc_percpu(p, pl);
}
diff --git a/lib/ioremap.c b/lib/ioremap.c
index ad485f08173b..5ee3526f71b8 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -12,7 +12,6 @@
#include <linux/io.h>
#include <linux/export.h>
#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
static int __read_mostly ioremap_p4d_capable;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 51595bf3af85..bf538c2bec77 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -8,6 +8,7 @@
#include <linux/splice.h>
#include <net/checksum.h>
#include <linux/scatterlist.h>
+#include <linux/instrumented.h>
#define PIPE_PARANOIA /* for now */
@@ -138,7 +139,7 @@
static int copyout(void __user *to, const void *from, size_t n)
{
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
@@ -147,7 +148,7 @@ static int copyout(void __user *to, const void *from, size_t n)
static int copyin(void *to, const void __user *from, size_t n)
{
if (access_ok(from, n)) {
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
n = raw_copy_from_user(to, from, n);
}
return n;
@@ -639,7 +640,7 @@ EXPORT_SYMBOL(_copy_to_iter);
static int copyout_mcsafe(void __user *to, const void *from, size_t n)
{
if (access_ok(to, n)) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = copy_to_user_mcsafe((__force void *) to, from, n);
}
return n;
diff --git a/lib/kobject.c b/lib/kobject.c
index 65fa7bf70c57..1e4b7382a88e 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -620,6 +620,13 @@ void kobject_del(struct kobject *kobj)
if (ktype)
sysfs_remove_groups(kobj, ktype->default_groups);
+ /* send "remove" if the caller did not do it but sent "add" */
+ if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
+ pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n",
+ kobject_name(kobj), kobj);
+ kobject_uevent(kobj, KOBJ_REMOVE);
+ }
+
sysfs_remove_dir(kobj);
sysfs_put(sd);
@@ -673,13 +680,6 @@ static void kobject_cleanup(struct kobject *kobj)
pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
kobject_name(kobj), kobj);
- /* send "remove" if the caller did not do it but sent "add" */
- if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
- pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n",
- kobject_name(kobj), kobj);
- kobject_uevent(kobj, KOBJ_REMOVE);
- }
-
/* remove from sysfs if the caller did not do it */
if (kobj->state_in_sysfs) {
pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n",
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 95d12e3d6d95..00909e6a2443 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -15,7 +15,8 @@ menuconfig KUNIT
if KUNIT
config KUNIT_DEBUGFS
- bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation"
+ bool "KUnit - Enable /sys/kernel/debug/kunit debugfs representation" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
help
Enable debugfs representation for kunit. Currently this consists
of /sys/kernel/debug/kunit/<test_suite>/results files for each
@@ -23,7 +24,8 @@ config KUNIT_DEBUGFS
run that occurred.
config KUNIT_TEST
- tristate "KUnit test for KUnit"
+ tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
help
Enables the unit tests for the KUnit test framework. These tests test
the KUnit test framework itself; the tests are both written using
@@ -32,7 +34,8 @@ config KUNIT_TEST
expected.
config KUNIT_EXAMPLE_TEST
- tristate "Example test for KUnit"
+ tristate "Example test for KUnit" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
help
Enables an example unit test that illustrates some of the basic
features of KUnit. This test only exists to help new users understand
@@ -41,4 +44,18 @@ config KUNIT_EXAMPLE_TEST
is intended for curious hackers who would like to understand how to
use KUnit for kernel development.
+config KUNIT_ALL_TESTS
+ tristate "All KUnit tests with satisfied dependencies"
+ help
+ Enables all KUnit tests, if they can be enabled.
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
endif # KUNIT
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index f511a99bb389..f32fe481b492 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -229,13 +229,13 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
}
#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
-#define BUILD_LOGIC_IO(bw, type) \
-type logic_in##bw(unsigned long addr) \
+#define BUILD_LOGIC_IO(bwl, type) \
+type logic_in##bwl(unsigned long addr) \
{ \
type ret = (type)~0; \
\
if (addr < MMIO_UPPER_LIMIT) { \
- ret = read##bw(PCI_IOBASE + addr); \
+ ret = _in##bwl(addr); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
@@ -248,10 +248,10 @@ type logic_in##bw(unsigned long addr) \
return ret; \
} \
\
-void logic_out##bw(type value, unsigned long addr) \
+void logic_out##bwl(type value, unsigned long addr) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
- write##bw(value, PCI_IOBASE + addr); \
+ _out##bwl(value, addr); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
@@ -263,11 +263,11 @@ void logic_out##bw(type value, unsigned long addr) \
} \
} \
\
-void logic_ins##bw(unsigned long addr, void *buffer, \
- unsigned int count) \
+void logic_ins##bwl(unsigned long addr, void *buffer, \
+ unsigned int count) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
- reads##bw(PCI_IOBASE + addr, buffer, count); \
+ reads##bwl(PCI_IOBASE + addr, buffer, count); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
@@ -280,11 +280,11 @@ void logic_ins##bw(unsigned long addr, void *buffer, \
\
} \
\
-void logic_outs##bw(unsigned long addr, const void *buffer, \
- unsigned int count) \
+void logic_outs##bwl(unsigned long addr, const void *buffer, \
+ unsigned int count) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
- writes##bw(PCI_IOBASE + addr, buffer, count); \
+ writes##bwl(PCI_IOBASE + addr, buffer, count); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 0c9d3ad17e0f..5371dab6b481 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -141,6 +141,9 @@ static FORCE_INLINE int LZ4_decompress_generic(
* space in the output for those 18 bytes earlier, upon
* entering the shortcut (in other words, there is a
* combined check for both stages).
+ *
+ * The & in the likely() below is intentionally not && so that
+ * some compilers can produce better parallelized runtime code
*/
if ((endOnInput ? length != RUN_MASK : length <= 8)
/*
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index 717c940112f9..8ad5ba2b86e2 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -268,6 +268,19 @@ m_len_done:
*op++ = (M4_MARKER | ((m_off >> 11) & 8)
| (m_len - 2));
else {
+ if (unlikely(((m_off & 0x403f) == 0x403f)
+ && (m_len >= 261)
+ && (m_len <= 264))
+ && likely(bitstream_version)) {
+ // Under lzo-rle, block copies
+ // for 261 <= length <= 264 and
+ // (distance & 0x80f3) == 0x80f3
+ // can result in ambiguous
+ // output. Adjust length
+ // to 260 to prevent ambiguity.
+ ip -= m_len - 260;
+ m_len = 260;
+ }
m_len -= M4_MAX_LEN;
*op++ = (M4_MARKER | ((m_off >> 11) & 8));
while (unlikely(m_len > 255)) {
diff --git a/lib/math/Kconfig b/lib/math/Kconfig
index 15bd50d92308..f19bc9734fa7 100644
--- a/lib/math/Kconfig
+++ b/lib/math/Kconfig
@@ -6,7 +6,12 @@ config CORDIC
calculations are in fixed point. Module will be called cordic.
config PRIME_NUMBERS
- tristate
+ tristate "Simple prime number generator for testing"
+ help
+ This option provides a simple prime number generator for test
+ modules.
+
+ If unsure, say N.
config RATIONAL
bool
diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
index 052f5b727be7..d42cebf7407f 100644
--- a/lib/math/prime_numbers.c
+++ b/lib/math/prime_numbers.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "prime numbers: " fmt "\n"
+#define pr_fmt(fmt) "prime numbers: " fmt
#include <linux/module.h>
#include <linux/mutex.h>
@@ -253,7 +253,7 @@ static void dump_primes(void)
if (buf)
bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
- pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s",
+ pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s\n",
p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
rcu_read_unlock();
@@ -273,7 +273,7 @@ static int selftest(unsigned long max)
bool fast = is_prime_number(x);
if (slow != fast) {
- pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!",
+ pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!\n",
x, slow ? "yes" : "no", fast ? "yes" : "no");
goto err;
}
@@ -282,14 +282,14 @@ static int selftest(unsigned long max)
continue;
if (next_prime_number(last) != x) {
- pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu",
+ pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu\n",
last, x, next_prime_number(last));
goto err;
}
last = x;
}
- pr_info("selftest(%lu) passed, last prime was %lu", x, last);
+ pr_info("%s(%lu) passed, last prime was %lu\n", __func__, x, last);
return 0;
err:
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 8d092609928e..0ba686b8fe57 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -141,8 +141,8 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
for_each_possible_cpu(cpu)
count += *per_cpu_ptr(percpu_count, cpu);
- pr_debug("global %ld percpu %ld",
- atomic_long_read(&ref->count), (long)count);
+ pr_debug("global %lu percpu %lu\n",
+ atomic_long_read(&ref->count), count);
/*
* It's crucial that we sum the percpu counters _before_ adding the sum
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index bdb7e4cadf05..9f6890aedd1a 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -63,13 +63,22 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#define ASSERT_RHT_MUTEX(HT)
#endif
+static inline union nested_table *nested_table_top(
+ const struct bucket_table *tbl)
+{
+ /* The top-level bucket entry does not need RCU protection
+ * because it's set at the same time as tbl->nest.
+ */
+ return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
+}
+
static void nested_table_free(union nested_table *ntbl, unsigned int size)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
const unsigned int len = 1 << shift;
unsigned int i;
- ntbl = rcu_dereference_raw(ntbl->table);
+ ntbl = rcu_dereference_protected(ntbl->table, 1);
if (!ntbl)
return;
@@ -89,7 +98,7 @@ static void nested_bucket_table_free(const struct bucket_table *tbl)
union nested_table *ntbl;
unsigned int i;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
for (i = 0; i < len; i++)
nested_table_free(ntbl + i, size);
@@ -1173,7 +1182,7 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int subhash = hash;
union nested_table *ntbl;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
subhash >>= tbl->nest;
@@ -1213,7 +1222,7 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
hash >>= tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift));
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index b90ec550183a..34696a348864 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -98,6 +98,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
{
unsigned long max_addr, src_addr;
+ might_fault();
if (unlikely(count <= 0))
return 0;
diff --git a/lib/test_bitops.c b/lib/test_bitops.c
new file mode 100644
index 000000000000..ced25e3a779b
--- /dev/null
+++ b/lib/test_bitops.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+/* a tiny module only meant to test
+ *
+ * set/clear_bit
+ * get_count_order/long
+ */
+
+/* use an enum because thats the most common BITMAP usage */
+enum bitops_fun {
+ BITOPS_4 = 4,
+ BITOPS_7 = 7,
+ BITOPS_11 = 11,
+ BITOPS_31 = 31,
+ BITOPS_88 = 88,
+ BITOPS_LAST = 255,
+ BITOPS_LENGTH = 256
+};
+
+static DECLARE_BITMAP(g_bitmap, BITOPS_LENGTH);
+
+static unsigned int order_comb[][2] = {
+ {0x00000003, 2},
+ {0x00000004, 2},
+ {0x00001fff, 13},
+ {0x00002000, 13},
+ {0x50000000, 31},
+ {0x80000000, 31},
+ {0x80003000, 32},
+};
+
+#ifdef CONFIG_64BIT
+static unsigned long order_comb_long[][2] = {
+ {0x0000000300000000, 34},
+ {0x0000000400000000, 34},
+ {0x00001fff00000000, 45},
+ {0x0000200000000000, 45},
+ {0x5000000000000000, 63},
+ {0x8000000000000000, 63},
+ {0x8000300000000000, 64},
+};
+#endif
+
+static int __init test_bitops_startup(void)
+{
+ int i;
+
+ pr_warn("Loaded test module\n");
+ set_bit(BITOPS_4, g_bitmap);
+ set_bit(BITOPS_7, g_bitmap);
+ set_bit(BITOPS_11, g_bitmap);
+ set_bit(BITOPS_31, g_bitmap);
+ set_bit(BITOPS_88, g_bitmap);
+
+ for (i = 0; i < ARRAY_SIZE(order_comb); i++) {
+ if (order_comb[i][1] != get_count_order(order_comb[i][0]))
+ pr_warn("get_count_order wrong for %x\n",
+ order_comb[i][0]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(order_comb); i++) {
+ if (order_comb[i][1] != get_count_order_long(order_comb[i][0]))
+ pr_warn("get_count_order_long wrong for %x\n",
+ order_comb[i][0]);
+ }
+
+#ifdef CONFIG_64BIT
+ for (i = 0; i < ARRAY_SIZE(order_comb_long); i++) {
+ if (order_comb_long[i][1] !=
+ get_count_order_long(order_comb_long[i][0]))
+ pr_warn("get_count_order_long wrong for %lx\n",
+ order_comb_long[i][0]);
+ }
+#endif
+ return 0;
+}
+
+static void __exit test_bitops_unstartup(void)
+{
+ int bit_set;
+
+ clear_bit(BITOPS_4, g_bitmap);
+ clear_bit(BITOPS_7, g_bitmap);
+ clear_bit(BITOPS_11, g_bitmap);
+ clear_bit(BITOPS_31, g_bitmap);
+ clear_bit(BITOPS_88, g_bitmap);
+
+ bit_set = find_first_bit(g_bitmap, BITOPS_LAST);
+ if (bit_set != BITOPS_LAST)
+ pr_err("ERROR: FOUND SET BIT %d\n", bit_set);
+
+ pr_warn("Unloaded test module\n");
+}
+
+module_init(test_bitops_startup);
+module_exit(test_bitops_unstartup);
+
+MODULE_AUTHOR("Jesse Brandeburg <jesse.brandeburg@intel.com>, Wei Yang <richard.weiyang@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bit testing module");
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 0c7fbcf07ac5..9fee2b93a8d1 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -310,27 +310,13 @@ static int test_dev_config_update_bool(const char *buf, size_t size,
return ret;
}
-static ssize_t
-test_dev_config_show_bool(char *buf,
- bool config)
+static ssize_t test_dev_config_show_bool(char *buf, bool val)
{
- bool val;
-
- mutex_lock(&test_fw_mutex);
- val = config;
- mutex_unlock(&test_fw_mutex);
-
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static ssize_t test_dev_config_show_int(char *buf, int cfg)
+static ssize_t test_dev_config_show_int(char *buf, int val)
{
- int val;
-
- mutex_lock(&test_fw_mutex);
- val = cfg;
- mutex_unlock(&test_fw_mutex);
-
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
@@ -354,14 +340,8 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
return size;
}
-static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
+static ssize_t test_dev_config_show_u8(char *buf, u8 val)
{
- u8 val;
-
- mutex_lock(&test_fw_mutex);
- val = cfg;
- mutex_unlock(&test_fw_mutex);
-
return snprintf(buf, PAGE_SIZE, "%u\n", val);
}
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 5c1858e325ba..28528285942c 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -245,9 +245,9 @@ static int dmirror_range_fault(struct dmirror *dmirror,
}
range->notifier_seq = mmu_interval_read_begin(range->notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = hmm_range_fault(range);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret) {
if (ret == -EBUSY)
continue;
@@ -686,7 +686,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
if (!mmget_not_zero(mm))
return -EINVAL;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (addr = start; addr < end; addr = next) {
vma = find_vma(mm, addr);
if (!vma || addr < vma->vm_start ||
@@ -713,7 +713,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
dmirror_migrate_finalize_and_map(&args, dmirror);
migrate_vma_finalize(&args);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
/* Return the migrated data for verification. */
@@ -733,7 +733,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
return ret;
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
return ret;
}
@@ -825,9 +825,9 @@ static int dmirror_range_snapshot(struct dmirror *dmirror,
range->notifier_seq = mmu_interval_read_begin(range->notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = hmm_range_fault(range);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (ret) {
if (ret == -EBUSY)
continue;
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
index ea09ca335b21..f258743a0d83 100644
--- a/lib/test_lockup.c
+++ b/lib/test_lockup.c
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls");
static bool lock_mmap_sem;
module_param(lock_mmap_sem, bool, 0400);
-MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_sem: block procfs interfaces");
+MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_lock: block procfs interfaces");
static unsigned long lock_rwsem_ptr;
module_param_unsafe(lock_rwsem_ptr, ulong, 0400);
@@ -142,7 +142,7 @@ module_param(reallocate_pages, bool, 0400);
MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations");
struct file *test_file;
-struct inode *test_inode;
+static struct inode *test_inode;
static char test_file_path[256];
module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400);
MODULE_PARM_DESC(file_path, "file path to test");
@@ -191,11 +191,11 @@ static void test_lock(bool master, bool verbose)
if (lock_mmap_sem && master) {
if (verbose)
- pr_notice("lock mmap_sem pid=%d\n", main_task->pid);
+ pr_notice("lock mmap_lock pid=%d\n", main_task->pid);
if (lock_read)
- down_read(&main_task->mm->mmap_sem);
+ mmap_read_lock(main_task->mm);
else
- down_write(&main_task->mm->mmap_sem);
+ mmap_write_lock(main_task->mm);
}
if (test_disable_irq)
@@ -276,11 +276,11 @@ static void test_unlock(bool master, bool verbose)
if (lock_mmap_sem && master) {
if (lock_read)
- up_read(&main_task->mm->mmap_sem);
+ mmap_read_unlock(main_task->mm);
else
- up_write(&main_task->mm->mmap_sem);
+ mmap_write_unlock(main_task->mm);
if (verbose)
- pr_notice("unlock mmap_sem pid=%d\n", main_task->pid);
+ pr_notice("unlock mmap_lock pid=%d\n", main_task->pid);
}
if (lock_rwsem_ptr && master) {
@@ -505,7 +505,7 @@ static int __init test_lockup_init(void)
}
if (lock_mmap_sem && !main_task->mm) {
- pr_err("no mm to lock mmap_sem\n");
+ pr_err("no mm to lock mmap_lock\n");
return -EINVAL;
}
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 7d60f24240a4..7ac87f18a10f 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -644,7 +644,9 @@ static void __init fwnode_pointer(void)
test(second_name, "%pfwP", software_node_fwnode(&softnodes[1]));
test(third_name, "%pfwP", software_node_fwnode(&softnodes[2]));
- software_node_unregister_nodes(softnodes);
+ software_node_unregister(&softnodes[2]);
+ software_node_unregister(&softnodes[1]);
+ software_node_unregister(&softnodes[0]);
}
static void __init
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 566dad3f4196..98bc92a91662 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -44,6 +44,8 @@ struct test_sysctl_data {
int int_0002;
int int_0003[4];
+ int boot_int;
+
unsigned int uint_0001;
char string_0001[65];
@@ -61,6 +63,8 @@ static struct test_sysctl_data test_data = {
.int_0003[2] = 2,
.int_0003[3] = 3,
+ .boot_int = 0,
+
.uint_0001 = 314,
.string_0001 = "(none)",
@@ -92,6 +96,15 @@ static struct ctl_table test_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "boot_int",
+ .data = &test_data.boot_int,
+ .maxlen = sizeof(test_data.boot_int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
.procname = "uint_0001",
.data = &test_data.uint_0001,
.maxlen = sizeof(unsigned int),
@@ -149,7 +162,7 @@ static int __init test_sysctl_init(void)
}
return 0;
}
-late_initcall(test_sysctl_init);
+module_init(test_sysctl_init);
static void __exit test_sysctl_exit(void)
{
diff --git a/lib/ubsan.c b/lib/ubsan.c
index f8c0ccf35f29..cb9af3f6b77e 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -189,7 +189,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
ubsan_epilogue();
}
-void __ubsan_handle_add_overflow(struct overflow_data *data,
+void __ubsan_handle_add_overflow(void *data,
void *lhs, void *rhs)
{
@@ -197,23 +197,23 @@ void __ubsan_handle_add_overflow(struct overflow_data *data,
}
EXPORT_SYMBOL(__ubsan_handle_add_overflow);
-void __ubsan_handle_sub_overflow(struct overflow_data *data,
+void __ubsan_handle_sub_overflow(void *data,
void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '-');
}
EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
-void __ubsan_handle_mul_overflow(struct overflow_data *data,
+void __ubsan_handle_mul_overflow(void *data,
void *lhs, void *rhs)
{
handle_overflow(data, lhs, rhs, '*');
}
EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
-void __ubsan_handle_negate_overflow(struct overflow_data *data,
- void *old_val)
+void __ubsan_handle_negate_overflow(void *_data, void *old_val)
{
+ struct overflow_data *data = _data;
char old_val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
@@ -231,9 +231,9 @@ void __ubsan_handle_negate_overflow(struct overflow_data *data,
EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
-void __ubsan_handle_divrem_overflow(struct overflow_data *data,
- void *lhs, void *rhs)
+void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
{
+ struct overflow_data *data = _data;
char rhs_val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
@@ -326,10 +326,9 @@ void __ubsan_handle_type_mismatch(struct type_mismatch_data *data,
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch);
-void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
- void *ptr)
+void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr)
{
-
+ struct type_mismatch_data_v1 *data = _data;
struct type_mismatch_data_common common_data = {
.location = &data->location,
.type = data->type,
@@ -341,8 +340,9 @@ void __ubsan_handle_type_mismatch_v1(struct type_mismatch_data_v1 *data,
}
EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
-void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
+void __ubsan_handle_out_of_bounds(void *_data, void *index)
{
+ struct out_of_bounds_data *data = _data;
char index_str[VALUE_LENGTH];
if (suppress_report(&data->location))
@@ -357,9 +357,9 @@ void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
}
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
-void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
- void *lhs, void *rhs)
+void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs)
{
+ struct shift_out_of_bounds_data *data = _data;
struct type_descriptor *rhs_type = data->rhs_type;
struct type_descriptor *lhs_type = data->lhs_type;
char rhs_str[VALUE_LENGTH];
@@ -399,8 +399,9 @@ out:
EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
-void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
+void __ubsan_handle_builtin_unreachable(void *_data)
{
+ struct unreachable_data *data = _data;
ubsan_prologue(&data->location, "unreachable");
pr_err("calling __builtin_unreachable()\n");
ubsan_epilogue();
@@ -408,9 +409,9 @@ void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
}
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
-void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
- void *val)
+void __ubsan_handle_load_invalid_value(void *_data, void *val)
{
+ struct invalid_value_data *data = _data;
char val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
diff --git a/lib/usercopy.c b/lib/usercopy.c
index ca2a697a2061..b26509f112f9 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/uaccess.h>
#include <linux/bitops.h>
+#include <linux/instrumented.h>
+#include <linux/uaccess.h>
/* out-of-line parts */
@@ -10,7 +11,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
unsigned long res = n;
might_fault();
if (likely(access_ok(from, n))) {
- kasan_check_write(to, n);
+ instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n);
}
if (unlikely(res))
@@ -25,7 +26,7 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (likely(access_ok(to, n))) {
- kasan_check_read(from, n);
+ instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index a2909af4b924..bcc9a98a0524 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -38,6 +38,13 @@ static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
}
#endif
+#ifndef vdso_cycles_ok
+static inline bool vdso_cycles_ok(u64 cycles)
+{
+ return true;
+}
+#endif
+
#ifdef CONFIG_TIME_NS
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
@@ -62,6 +69,8 @@ static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
return -1;
cycles = __arch_get_hw_counter(vd->clock_mode);
+ if (unlikely(!vdso_cycles_ok(cycles)))
+ return -1;
ns = vdso_ts->nsec;
last = vd->cycle_last;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
@@ -130,6 +139,8 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
return -1;
cycles = __arch_get_hw_counter(vd->clock_mode);
+ if (unlikely(!vdso_cycles_ok(cycles)))
+ return -1;
ns = vdso_ts->nsec;
last = vd->cycle_last;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
@@ -210,7 +221,7 @@ static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
return 0;
}
-static __maybe_unused int
+static __always_inline int
__cvdso_clock_gettime_common(const struct vdso_data *vd, clockid_t clock,
struct __kernel_timespec *ts)
{
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 2c13ecc5bb2c..ed1f3df27260 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -10,17 +10,6 @@
#ifndef ASMINF
-/* Allow machine dependent optimization for post-increment or pre-increment.
- Based on testing to date,
- Pre-increment preferred for:
- - PowerPC G3 (Adler)
- - MIPS R5000 (Randers-Pehrson)
- Post-increment preferred for:
- - none
- No measurable difference:
- - Pentium III (Anderson)
- - M68060 (Nikl)
- */
union uu {
unsigned short us;
unsigned char b[2];
@@ -38,16 +27,6 @@ get_unaligned16(const unsigned short *p)
return mm.us;
}
-#ifdef POSTINC
-# define OFF 0
-# define PUP(a) *(a)++
-# define UP_UNALIGNED(a) get_unaligned16((a)++)
-#else
-# define OFF 1
-# define PUP(a) *++(a)
-# define UP_UNALIGNED(a) get_unaligned16(++(a))
-#endif
-
/*
Decode literal, length, and distance codes and write out the resulting
literal and match bytes until either not enough input or output is
@@ -115,9 +94,9 @@ void inflate_fast(z_streamp strm, unsigned start)
/* copy state to local variables */
state = (struct inflate_state *)strm->state;
- in = strm->next_in - OFF;
+ in = strm->next_in;
last = in + (strm->avail_in - 5);
- out = strm->next_out - OFF;
+ out = strm->next_out;
beg = out - (start - strm->avail_out);
end = out + (strm->avail_out - 257);
#ifdef INFLATE_STRICT
@@ -138,9 +117,9 @@ void inflate_fast(z_streamp strm, unsigned start)
input data or output space */
do {
if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = lcode[hold & lmask];
@@ -150,14 +129,14 @@ void inflate_fast(z_streamp strm, unsigned start)
bits -= op;
op = (unsigned)(this.op);
if (op == 0) { /* literal */
- PUP(out) = (unsigned char)(this.val);
+ *out++ = (unsigned char)(this.val);
}
else if (op & 16) { /* length base */
len = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (op) {
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
len += (unsigned)hold & ((1U << op) - 1);
@@ -165,9 +144,9 @@ void inflate_fast(z_streamp strm, unsigned start)
bits -= op;
}
if (bits < 15) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
this = dcode[hold & dmask];
@@ -180,10 +159,10 @@ void inflate_fast(z_streamp strm, unsigned start)
dist = (unsigned)(this.val);
op &= 15; /* number of extra bits */
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
if (bits < op) {
- hold += (unsigned long)(PUP(in)) << bits;
+ hold += (unsigned long)(*in++) << bits;
bits += 8;
}
}
@@ -205,13 +184,13 @@ void inflate_fast(z_streamp strm, unsigned start)
state->mode = BAD;
break;
}
- from = window - OFF;
+ from = window;
if (write == 0) { /* very common case */
from += wsize - op;
if (op < len) { /* some from window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
@@ -222,14 +201,14 @@ void inflate_fast(z_streamp strm, unsigned start)
if (op < len) { /* some from end of window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
- from = window - OFF;
+ from = window;
if (write < len) { /* some from start of window */
op = write;
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
@@ -240,21 +219,21 @@ void inflate_fast(z_streamp strm, unsigned start)
if (op < len) { /* some from window */
len -= op;
do {
- PUP(out) = PUP(from);
+ *out++ = *from++;
} while (--op);
from = out - dist; /* rest from output */
}
}
while (len > 2) {
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
- PUP(out) = PUP(from);
+ *out++ = *from++;
+ *out++ = *from++;
+ *out++ = *from++;
len -= 3;
}
if (len) {
- PUP(out) = PUP(from);
+ *out++ = *from++;
if (len > 1)
- PUP(out) = PUP(from);
+ *out++ = *from++;
}
}
else {
@@ -264,29 +243,29 @@ void inflate_fast(z_streamp strm, unsigned start)
from = out - dist; /* copy direct from output */
/* minimum length is three */
/* Align out addr */
- if (!((long)(out - 1 + OFF) & 1)) {
- PUP(out) = PUP(from);
+ if (!((long)(out - 1) & 1)) {
+ *out++ = *from++;
len--;
}
- sout = (unsigned short *)(out - OFF);
+ sout = (unsigned short *)(out);
if (dist > 2) {
unsigned short *sfrom;
- sfrom = (unsigned short *)(from - OFF);
+ sfrom = (unsigned short *)(from);
loops = len >> 1;
do
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- PUP(sout) = PUP(sfrom);
+ *sout++ = *sfrom++;
#else
- PUP(sout) = UP_UNALIGNED(sfrom);
+ *sout++ = get_unaligned16(sfrom++);
#endif
while (--loops);
- out = (unsigned char *)sout + OFF;
- from = (unsigned char *)sfrom + OFF;
+ out = (unsigned char *)sout;
+ from = (unsigned char *)sfrom;
} else { /* dist == 1 or dist == 2 */
unsigned short pat16;
- pat16 = *(sout-1+OFF);
+ pat16 = *(sout-1);
if (dist == 1) {
union uu mm;
/* copy one char pattern to both bytes */
@@ -296,12 +275,12 @@ void inflate_fast(z_streamp strm, unsigned start)
}
loops = len >> 1;
do
- PUP(sout) = pat16;
+ *sout++ = pat16;
while (--loops);
- out = (unsigned char *)sout + OFF;
+ out = (unsigned char *)sout;
}
if (len & 1)
- PUP(out) = PUP(from);
+ *out++ = *from++;
}
}
else if ((op & 64) == 0) { /* 2nd level distance code */
@@ -336,8 +315,8 @@ void inflate_fast(z_streamp strm, unsigned start)
hold &= (1U << bits) - 1;
/* update state and return */
- strm->next_in = in + OFF;
- strm->next_out = out + OFF;
+ strm->next_in = in;
+ strm->next_out = out;
strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last));
strm->avail_out = (unsigned)(out < end ?
257 + (end - out) : 257 - (out - end));
diff --git a/mm/Kconfig b/mm/Kconfig
index e3490ecac839..f2104cc0d35c 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -133,6 +133,9 @@ config HAVE_FAST_GUP
depends on MMU
bool
+# Don't discard allocated memory used to track "memory" and "reserved" memblocks
+# after early boot, so it can still be used to test for validity of memory.
+# Also, memblocks are updated with memory hot(un)plug.
config ARCH_KEEP_MEMBLOCK
bool
@@ -155,6 +158,7 @@ config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
depends on SPARSEMEM || X86_64_ACPI_NUMA
depends on ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on 64BIT || BROKEN
select NUMA_KEEP_MEMINFO if NUMA
config MEMORY_HOTPLUG_SPARSE
@@ -189,6 +193,9 @@ config MEMORY_HOTREMOVE
# Default to 4 for wider testing, though 8 might be more appropriate.
# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
+# SPARC32 allocates multiple pte tables within a single page, and therefore
+# a per-page lock leads to problems when multiple tables need to be locked
+# at the same time (e.g. copy_page_range()).
# DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
#
config SPLIT_PTLOCK_CPUS
@@ -196,6 +203,7 @@ config SPLIT_PTLOCK_CPUS
default "999999" if !MMU
default "999999" if ARM && !CPU_CACHE_VIPT
default "999999" if PARISC && !PA20
+ default "999999" if SPARC32
default "4"
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
diff --git a/mm/Makefile b/mm/Makefile
index fccd3756b25f..6e9d46b2efc9 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -8,6 +8,14 @@ KASAN_SANITIZE_slab.o := n
KASAN_SANITIZE_slub.o := n
KCSAN_SANITIZE_kmemleak.o := n
+# These produce frequent data race reports: most of them are due to races on
+# the same word but accesses to different bits of that word. Re-enable KCSAN
+# for these when we have more consensus on what to do about them.
+KCSAN_SANITIZE_slab_common.o := n
+KCSAN_SANITIZE_slab.o := n
+KCSAN_SANITIZE_slub.o := n
+KCSAN_SANITIZE_page_alloc.o := n
+
# These files are disabled because they produce non-interesting and/or
# flaky coverage that is not a function of syscall inputs. E.g. slab is out of
# free pages, or a task is migrated between nodes.
@@ -41,7 +49,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
util.o mmzone.o vmstat.o backing-dev.o \
- mm_init.o mmu_context.o percpu.o slab_common.o \
+ mm_init.o percpu.o slab_common.o \
compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \
debug.o gup.o $(mmu-y)
@@ -88,6 +96,7 @@ obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
obj-$(CONFIG_DEBUG_RODATA_TEST) += rodata_test.o
+obj-$(CONFIG_DEBUG_VM_PGTABLE) += debug_vm_pgtable.o
obj-$(CONFIG_PAGE_OWNER) += page_owner.o
obj-$(CONFIG_CLEANCACHE) += cleancache.o
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
diff --git a/mm/compaction.c b/mm/compaction.c
index 14d2fe231ea4..fd988b7e5f2b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1401,7 +1401,7 @@ fast_isolate_freepages(struct compact_control *cc)
if (scan_start) {
/*
* Use the highest PFN found above min. If one was
- * not found, be pessemistic for direct compaction
+ * not found, be pessimistic for direct compaction
* and use the min mark.
*/
if (highest) {
diff --git a/mm/debug.c b/mm/debug.c
index f2ede2df585a..b5b1de8c71ac 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -120,9 +120,9 @@ void __dump_page(struct page *page, const char *reason)
* mapping can be invalid pointer and we don't want to crash
* accessing it, so probe everything depending on it carefully
*/
- if (probe_kernel_read_strict(&host, &mapping->host,
- sizeof(struct inode *)) ||
- probe_kernel_read_strict(&a_ops, &mapping->a_ops,
+ if (probe_kernel_read(&host, &mapping->host,
+ sizeof(struct inode *)) ||
+ probe_kernel_read(&a_ops, &mapping->a_ops,
sizeof(struct address_space_operations *))) {
pr_warn("failed to read mapping->host or a_ops, mapping not a valid kernel address?\n");
goto out_mapping;
@@ -133,7 +133,7 @@ void __dump_page(struct page *page, const char *reason)
goto out_mapping;
}
- if (probe_kernel_read_strict(&dentry_first,
+ if (probe_kernel_read(&dentry_first,
&host->i_dentry.first, sizeof(struct hlist_node *))) {
pr_warn("mapping->a_ops:%ps with invalid mapping->host inode address %px\n",
a_ops, host);
@@ -146,7 +146,7 @@ void __dump_page(struct page *page, const char *reason)
}
dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
- if (probe_kernel_read_strict(&dentry, dentry_ptr,
+ if (probe_kernel_read(&dentry, dentry_ptr,
sizeof(struct dentry))) {
pr_warn("mapping->aops:%ps with invalid mapping->host->i_dentry.first %px\n",
a_ops, dentry_ptr);
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
new file mode 100644
index 000000000000..e45623016aea
--- /dev/null
+++ b/mm/debug_vm_pgtable.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This kernel test validates architecture page table helpers and
+ * accessors and helps in verifying their continued compliance with
+ * expected generic MM semantics.
+ *
+ * Copyright (C) 2019 ARM Ltd.
+ *
+ * Author: Anshuman Khandual <anshuman.khandual@arm.com>
+ */
+#define pr_fmt(fmt) "debug_vm_pgtable: %s: " fmt, __func__
+
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/hugetlb.h>
+#include <linux/kernel.h>
+#include <linux/kconfig.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/pfn_t.h>
+#include <linux/printk.h>
+#include <linux/random.h>
+#include <linux/spinlock.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/start_kernel.h>
+#include <linux/sched/mm.h>
+#include <asm/pgalloc.h>
+
+#define VMFLAGS (VM_READ|VM_WRITE|VM_EXEC)
+
+/*
+ * On s390 platform, the lower 4 bits are used to identify given page table
+ * entry type. But these bits might affect the ability to clear entries with
+ * pxx_clear() because of how dynamic page table folding works on s390. So
+ * while loading up the entries do not change the lower 4 bits. It does not
+ * have affect any other platform.
+ */
+#define S390_MASK_BITS 4
+#define RANDOM_ORVALUE GENMASK(BITS_PER_LONG - 1, S390_MASK_BITS)
+#define RANDOM_NZVALUE GENMASK(7, 0)
+
+static void __init pte_basic_tests(unsigned long pfn, pgprot_t prot)
+{
+ pte_t pte = pfn_pte(pfn, prot);
+
+ WARN_ON(!pte_same(pte, pte));
+ WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
+ WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
+ WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
+ WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
+ WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
+ WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot)
+{
+ pmd_t pmd = pfn_pmd(pfn, prot);
+
+ if (!has_transparent_hugepage())
+ return;
+
+ WARN_ON(!pmd_same(pmd, pmd));
+ WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
+ WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
+ WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
+ WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
+ WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
+ WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
+ /*
+ * A huge page does not point to next level page table
+ * entry. Hence this must qualify as pmd_bad().
+ */
+ WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
+}
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot)
+{
+ pud_t pud = pfn_pud(pfn, prot);
+
+ if (!has_transparent_hugepage())
+ return;
+
+ WARN_ON(!pud_same(pud, pud));
+ WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
+ WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
+ WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
+ WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
+
+ if (mm_pmd_folded(mm))
+ return;
+
+ /*
+ * A huge page does not point to next level page table
+ * entry. Hence this must qualify as pud_bad().
+ */
+ WARN_ON(!pud_bad(pud_mkhuge(pud)));
+}
+#else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#else /* !CONFIG_TRANSPARENT_HUGEPAGE */
+static void __init pmd_basic_tests(unsigned long pfn, pgprot_t prot) { }
+static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) { }
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static void __init p4d_basic_tests(unsigned long pfn, pgprot_t prot)
+{
+ p4d_t p4d;
+
+ memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
+ WARN_ON(!p4d_same(p4d, p4d));
+}
+
+static void __init pgd_basic_tests(unsigned long pfn, pgprot_t prot)
+{
+ pgd_t pgd;
+
+ memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
+ WARN_ON(!pgd_same(pgd, pgd));
+}
+
+#ifndef __PAGETABLE_PUD_FOLDED
+static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
+{
+ pud_t pud = READ_ONCE(*pudp);
+
+ if (mm_pmd_folded(mm))
+ return;
+
+ pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
+ WRITE_ONCE(*pudp, pud);
+ pud_clear(pudp);
+ pud = READ_ONCE(*pudp);
+ WARN_ON(!pud_none(pud));
+}
+
+static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
+ pmd_t *pmdp)
+{
+ pud_t pud;
+
+ if (mm_pmd_folded(mm))
+ return;
+ /*
+ * This entry points to next level page table page.
+ * Hence this must not qualify as pud_bad().
+ */
+ pmd_clear(pmdp);
+ pud_clear(pudp);
+ pud_populate(mm, pudp, pmdp);
+ pud = READ_ONCE(*pudp);
+ WARN_ON(pud_bad(pud));
+}
+#else /* !__PAGETABLE_PUD_FOLDED */
+static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
+static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
+ pmd_t *pmdp)
+{
+}
+#endif /* PAGETABLE_PUD_FOLDED */
+
+#ifndef __PAGETABLE_P4D_FOLDED
+static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
+{
+ p4d_t p4d = READ_ONCE(*p4dp);
+
+ if (mm_pud_folded(mm))
+ return;
+
+ p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
+ WRITE_ONCE(*p4dp, p4d);
+ p4d_clear(p4dp);
+ p4d = READ_ONCE(*p4dp);
+ WARN_ON(!p4d_none(p4d));
+}
+
+static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
+ pud_t *pudp)
+{
+ p4d_t p4d;
+
+ if (mm_pud_folded(mm))
+ return;
+
+ /*
+ * This entry points to next level page table page.
+ * Hence this must not qualify as p4d_bad().
+ */
+ pud_clear(pudp);
+ p4d_clear(p4dp);
+ p4d_populate(mm, p4dp, pudp);
+ p4d = READ_ONCE(*p4dp);
+ WARN_ON(p4d_bad(p4d));
+}
+
+static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
+{
+ pgd_t pgd = READ_ONCE(*pgdp);
+
+ if (mm_p4d_folded(mm))
+ return;
+
+ pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
+ WRITE_ONCE(*pgdp, pgd);
+ pgd_clear(pgdp);
+ pgd = READ_ONCE(*pgdp);
+ WARN_ON(!pgd_none(pgd));
+}
+
+static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
+ p4d_t *p4dp)
+{
+ pgd_t pgd;
+
+ if (mm_p4d_folded(mm))
+ return;
+
+ /*
+ * This entry points to next level page table page.
+ * Hence this must not qualify as pgd_bad().
+ */
+ p4d_clear(p4dp);
+ pgd_clear(pgdp);
+ pgd_populate(mm, pgdp, p4dp);
+ pgd = READ_ONCE(*pgdp);
+ WARN_ON(pgd_bad(pgd));
+}
+#else /* !__PAGETABLE_P4D_FOLDED */
+static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
+static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
+static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
+ pud_t *pudp)
+{
+}
+static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
+ p4d_t *p4dp)
+{
+}
+#endif /* PAGETABLE_P4D_FOLDED */
+
+static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
+ unsigned long vaddr)
+{
+ pte_t pte = READ_ONCE(*ptep);
+
+ pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
+ set_pte_at(mm, vaddr, ptep, pte);
+ barrier();
+ pte_clear(mm, vaddr, ptep);
+ pte = READ_ONCE(*ptep);
+ WARN_ON(!pte_none(pte));
+}
+
+static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
+{
+ pmd_t pmd = READ_ONCE(*pmdp);
+
+ pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
+ WRITE_ONCE(*pmdp, pmd);
+ pmd_clear(pmdp);
+ pmd = READ_ONCE(*pmdp);
+ WARN_ON(!pmd_none(pmd));
+}
+
+static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
+ pgtable_t pgtable)
+{
+ pmd_t pmd;
+
+ /*
+ * This entry points to next level page table page.
+ * Hence this must not qualify as pmd_bad().
+ */
+ pmd_clear(pmdp);
+ pmd_populate(mm, pmdp, pgtable);
+ pmd = READ_ONCE(*pmdp);
+ WARN_ON(pmd_bad(pmd));
+}
+
+static unsigned long __init get_random_vaddr(void)
+{
+ unsigned long random_vaddr, random_pages, total_user_pages;
+
+ total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
+
+ random_pages = get_random_long() % total_user_pages;
+ random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
+
+ return random_vaddr;
+}
+
+static int __init debug_vm_pgtable(void)
+{
+ struct mm_struct *mm;
+ pgd_t *pgdp;
+ p4d_t *p4dp, *saved_p4dp;
+ pud_t *pudp, *saved_pudp;
+ pmd_t *pmdp, *saved_pmdp, pmd;
+ pte_t *ptep;
+ pgtable_t saved_ptep;
+ pgprot_t prot;
+ phys_addr_t paddr;
+ unsigned long vaddr, pte_aligned, pmd_aligned;
+ unsigned long pud_aligned, p4d_aligned, pgd_aligned;
+ spinlock_t *uninitialized_var(ptl);
+
+ pr_info("Validating architecture page table helpers\n");
+ prot = vm_get_page_prot(VMFLAGS);
+ vaddr = get_random_vaddr();
+ mm = mm_alloc();
+ if (!mm) {
+ pr_err("mm_struct allocation failed\n");
+ return 1;
+ }
+
+ /*
+ * PFN for mapping at PTE level is determined from a standard kernel
+ * text symbol. But pfns for higher page table levels are derived by
+ * masking lower bits of this real pfn. These derived pfns might not
+ * exist on the platform but that does not really matter as pfn_pxx()
+ * helpers will still create appropriate entries for the test. This
+ * helps avoid large memory block allocations to be used for mapping
+ * at higher page table levels.
+ */
+ paddr = __pa_symbol(&start_kernel);
+
+ pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
+ pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
+ pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
+ p4d_aligned = (paddr & P4D_MASK) >> PAGE_SHIFT;
+ pgd_aligned = (paddr & PGDIR_MASK) >> PAGE_SHIFT;
+ WARN_ON(!pfn_valid(pte_aligned));
+
+ pgdp = pgd_offset(mm, vaddr);
+ p4dp = p4d_alloc(mm, pgdp, vaddr);
+ pudp = pud_alloc(mm, p4dp, vaddr);
+ pmdp = pmd_alloc(mm, pudp, vaddr);
+ ptep = pte_alloc_map_lock(mm, pmdp, vaddr, &ptl);
+
+ /*
+ * Save all the page table page addresses as the page table
+ * entries will be used for testing with random or garbage
+ * values. These saved addresses will be used for freeing
+ * page table pages.
+ */
+ pmd = READ_ONCE(*pmdp);
+ saved_p4dp = p4d_offset(pgdp, 0UL);
+ saved_pudp = pud_offset(p4dp, 0UL);
+ saved_pmdp = pmd_offset(pudp, 0UL);
+ saved_ptep = pmd_pgtable(pmd);
+
+ pte_basic_tests(pte_aligned, prot);
+ pmd_basic_tests(pmd_aligned, prot);
+ pud_basic_tests(pud_aligned, prot);
+ p4d_basic_tests(p4d_aligned, prot);
+ pgd_basic_tests(pgd_aligned, prot);
+
+ pte_clear_tests(mm, ptep, vaddr);
+ pmd_clear_tests(mm, pmdp);
+ pud_clear_tests(mm, pudp);
+ p4d_clear_tests(mm, p4dp);
+ pgd_clear_tests(mm, pgdp);
+
+ pte_unmap_unlock(ptep, ptl);
+
+ pmd_populate_tests(mm, pmdp, saved_ptep);
+ pud_populate_tests(mm, pudp, saved_pmdp);
+ p4d_populate_tests(mm, p4dp, saved_pudp);
+ pgd_populate_tests(mm, pgdp, saved_p4dp);
+
+ p4d_free(mm, saved_p4dp);
+ pud_free(mm, saved_pudp);
+ pmd_free(mm, saved_pmdp);
+ pte_free(mm, saved_ptep);
+
+ mm_dec_nr_puds(mm);
+ mm_dec_nr_pmds(mm);
+ mm_dec_nr_ptes(mm);
+ mmdrop(mm);
+ return 0;
+}
+late_initcall(debug_vm_pgtable);
diff --git a/mm/filemap.c b/mm/filemap.c
index 455990621989..f0ae9a6308cb 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -76,16 +76,16 @@
* ->i_mutex
* ->i_mmap_rwsem (truncate->unmap_mapping_range)
*
- * ->mmap_sem
+ * ->mmap_lock
* ->i_mmap_rwsem
* ->page_table_lock or pte_lock (various, mainly in memory.c)
* ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
*
- * ->mmap_sem
+ * ->mmap_lock
* ->lock_page (access_process_vm)
*
* ->i_mutex (generic_perform_write)
- * ->mmap_sem (fault_in_pages_readable->do_page_fault)
+ * ->mmap_lock (fault_in_pages_readable->do_page_fault)
*
* bdi->wb.list_lock
* sb_lock (fs/fs-writeback.c)
@@ -1256,7 +1256,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
* instead.
*
* The read of PG_waiters has to be after (or concurrently with) PG_locked
- * being cleared, but a memory barrier should be unneccssary since it is
+ * being cleared, but a memory barrier should be unnecessary since it is
* in the same byte as PG_locked.
*/
static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
@@ -1371,27 +1371,27 @@ EXPORT_SYMBOL_GPL(__lock_page_killable);
/*
* Return values:
- * 1 - page is locked; mmap_sem is still held.
+ * 1 - page is locked; mmap_lock is still held.
* 0 - page is not locked.
- * mmap_sem has been released (up_read()), unless flags had both
+ * mmap_lock has been released (mmap_read_unlock(), unless flags had both
* FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
- * which case mmap_sem is still held.
+ * which case mmap_lock is still held.
*
* If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
- * with the page locked and the mmap_sem unperturbed.
+ * with the page locked and the mmap_lock unperturbed.
*/
int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
{
if (fault_flag_allow_retry_first(flags)) {
/*
- * CAUTION! In this case, mmap_sem is not released
+ * CAUTION! In this case, mmap_lock is not released
* even though return 0.
*/
if (flags & FAULT_FLAG_RETRY_NOWAIT)
return 0;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (flags & FAULT_FLAG_KILLABLE)
wait_on_page_locked_killable(page);
else
@@ -1403,7 +1403,7 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
ret = __lock_page_killable(page);
if (ret) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return 0;
}
} else
@@ -2313,14 +2313,14 @@ EXPORT_SYMBOL(generic_file_read_iter);
#ifdef CONFIG_MMU
#define MMAP_LOTSAMISS (100)
/*
- * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
+ * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
* @vmf - the vm_fault for this fault.
* @page - the page to lock.
* @fpin - the pointer to the file we may pin (or is already pinned).
*
- * This works similar to lock_page_or_retry in that it can drop the mmap_sem.
+ * This works similar to lock_page_or_retry in that it can drop the mmap_lock.
* It differs in that it actually returns the page locked if it returns 1 and 0
- * if it couldn't lock the page. If we did have to drop the mmap_sem then fpin
+ * if it couldn't lock the page. If we did have to drop the mmap_lock then fpin
* will point to the pinned file and needs to be fput()'ed at a later point.
*/
static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
@@ -2331,7 +2331,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
/*
* NOTE! This will make us return with VM_FAULT_RETRY, but with
- * the mmap_sem still held. That's how FAULT_FLAG_RETRY_NOWAIT
+ * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
* is supposed to work. We have way too many special cases..
*/
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
@@ -2341,13 +2341,13 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
if (vmf->flags & FAULT_FLAG_KILLABLE) {
if (__lock_page_killable(page)) {
/*
- * We didn't have the right flags to drop the mmap_sem,
+ * We didn't have the right flags to drop the mmap_lock,
* but all fault_handlers only check for fatal signals
* if we return VM_FAULT_RETRY, so we need to drop the
- * mmap_sem here and return 0 if we don't have a fpin.
+ * mmap_lock here and return 0 if we don't have a fpin.
*/
if (*fpin == NULL)
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mmap_read_unlock(vmf->vma->vm_mm);
return 0;
}
} else
@@ -2409,7 +2409,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
/*
* Asynchronous readahead happens when we find the page and PG_readahead,
* so we want to possibly extend the readahead further. We return the file that
- * was pinned if we have to drop the mmap_sem in order to do IO.
+ * was pinned if we have to drop the mmap_lock in order to do IO.
*/
static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
struct page *page)
@@ -2444,12 +2444,12 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
* it in the page cache, and handles the special cases reasonably without
* having a lot of duplicated code.
*
- * vma->vm_mm->mmap_sem must be held on entry.
+ * vma->vm_mm->mmap_lock must be held on entry.
*
- * If our return value has VM_FAULT_RETRY set, it's because the mmap_sem
+ * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
* may be dropped before doing I/O or by lock_page_maybe_drop_mmap().
*
- * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
+ * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
* has not been released.
*
* We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
@@ -2519,7 +2519,7 @@ retry_find:
goto page_not_uptodate;
/*
- * We've made it this far and we had to drop our mmap_sem, now is the
+ * We've made it this far and we had to drop our mmap_lock, now is the
* time to return to the upper layer and have it re-find the vma and
* redo the fault.
*/
@@ -2569,7 +2569,7 @@ page_not_uptodate:
out_retry:
/*
- * We dropped the mmap_sem, we need to return to the fault handler to
+ * We dropped the mmap_lock, we need to return to the fault handler to
* re-find the vma and come back and find our hopefully still populated
* page.
*/
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index c431ca81dad5..10f82d5643b6 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -29,7 +29,7 @@
* different type underlying the specified range of virtual addresses.
* When the function isn't able to map a single page, it returns error.
*
- * This function takes care of grabbing mmap_sem as necessary.
+ * This function takes care of grabbing mmap_lock as necessary.
*/
int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
unsigned int gup_flags, struct frame_vector *vec)
@@ -48,7 +48,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
start = untagged_addr(start);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
locked = 1;
vma = find_vma_intersection(mm, start, start + 1);
if (!vma) {
@@ -72,7 +72,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
vec->got_ref = true;
vec->is_pfns = false;
- ret = get_user_pages_locked(start, nr_frames,
+ ret = pin_user_pages_locked(start, nr_frames,
gup_flags, (struct page **)(vec->ptrs), &locked);
goto out;
}
@@ -102,7 +102,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
} while (vma && vma->vm_flags & (VM_IO | VM_PFNMAP));
out:
if (locked)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!ret)
ret = -EFAULT;
if (ret > 0)
@@ -122,7 +122,6 @@ EXPORT_SYMBOL(get_vaddr_frames);
*/
void put_vaddr_frames(struct frame_vector *vec)
{
- int i;
struct page **pages;
if (!vec->got_ref)
@@ -135,8 +134,8 @@ void put_vaddr_frames(struct frame_vector *vec)
*/
if (WARN_ON(IS_ERR(pages)))
goto out;
- for (i = 0; i < vec->nr_frames; i++)
- put_page(pages[i]);
+
+ unpin_user_pages(pages, vec->nr_frames);
vec->got_ref = false;
out:
vec->nr_frames = 0;
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 60bb20e8a951..bfa3a339253e 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -87,7 +87,7 @@ static inline void inc_frontswap_invalidates(void) { }
*
* This would not guards us against the user deciding to call swapoff right as
* we are calling the backend to initialize (so swapon is in action).
- * Fortunatly for us, the swapon_mutex has been taked by the callee so we are
+ * Fortunately for us, the swapon_mutex has been taken by the callee so we are
* OK. The other scenario where calls to frontswap_store (called via
* swap_writepage) is racing with frontswap_invalidate_area (called via
* swapoff) is again guarded by the swap subsystem.
@@ -413,8 +413,8 @@ static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
}
/*
- * Used to check if it's necessory and feasible to unuse pages.
- * Return 1 when nothing to do, 0 when need to shink pages,
+ * Used to check if it's necessary and feasible to unuse pages.
+ * Return 1 when nothing to do, 0 when need to shrink pages,
* error code when there is an error.
*/
static int __frontswap_shrink(unsigned long target_pages,
diff --git a/mm/gup.c b/mm/gup.c
index e19ff770eb4c..de9e36262ccb 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -19,7 +19,6 @@
#include <linux/sched/mm.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "internal.h"
@@ -593,7 +592,7 @@ retry:
pmdval = READ_ONCE(*pmd);
/*
* MADV_DONTNEED may convert the pmd to null because
- * mmap_sem is held in read mode
+ * mmap_lock is held in read mode
*/
if (pmd_none(pmdval))
return no_page_table(vma, flags);
@@ -856,8 +855,8 @@ unmap:
}
/*
- * mmap_sem must be held on entry. If @locked != NULL and *@flags
- * does not include FOLL_NOWAIT, the mmap_sem may be released. If it
+ * mmap_lock must be held on entry. If @locked != NULL and *@flags
+ * does not include FOLL_NOWAIT, the mmap_lock may be released. If it
* is, *@locked will be set to 0 and -EBUSY returned.
*/
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
@@ -980,7 +979,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
* only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them.
- * @locked: whether we're still with the mmap_sem held
+ * @locked: whether we're still with the mmap_lock held
*
* Returns either number of pages pinned (which may be less than the
* number requested), or an error. Details about the return value:
@@ -993,9 +992,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
*
* The caller is responsible for releasing returned @pages, via put_page().
*
- * @vmas are valid only as long as mmap_sem is held.
+ * @vmas are valid only as long as mmap_lock is held.
*
- * Must be called with mmap_sem held. It may be released. See below.
+ * Must be called with mmap_lock held. It may be released. See below.
*
* __get_user_pages walks a process's page tables and takes a reference to
* each struct page that each user address corresponds to at a given
@@ -1016,12 +1015,12 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
* appropriate) must be called after the page is finished with, and
* before put_page is called.
*
- * If @locked != NULL, *@locked will be set to 0 when mmap_sem is
+ * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
* released by an up_read(). That can happen if @gup_flags does not
* have FOLL_NOWAIT.
*
* A caller using such a combination of @locked and @gup_flags
- * must therefore hold the mmap_sem for reading only, and recognize
+ * must therefore hold the mmap_lock for reading only, and recognize
* when it's been released. Otherwise, it must be held for either
* reading or writing and will not be released.
*
@@ -1084,7 +1083,7 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (locked && *locked == 0) {
/*
* We've got a VM_FAULT_RETRY
- * and we've lost mmap_sem.
+ * and we've lost mmap_lock.
* We must stop here.
*/
BUG_ON(gup_flags & FOLL_NOWAIT);
@@ -1191,7 +1190,7 @@ static bool vma_permits_fault(struct vm_area_struct *vma,
* @mm: mm_struct of target mm
* @address: user address
* @fault_flags:flags to pass down to handle_mm_fault()
- * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller
+ * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
* does not allow retry. If NULL, the caller must guarantee
* that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
*
@@ -1212,8 +1211,8 @@ static bool vma_permits_fault(struct vm_area_struct *vma,
* such architectures, gup() will not be enough to make a subsequent access
* succeed.
*
- * This function will not return with an unlocked mmap_sem. So it has not the
- * same semantics wrt the @mm->mmap_sem as does filemap_fault().
+ * This function will not return with an unlocked mmap_lock. So it has not the
+ * same semantics wrt the @mm->mmap_lock as does filemap_fault().
*/
int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
@@ -1250,7 +1249,7 @@ retry:
}
if (ret & VM_FAULT_RETRY) {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
*unlocked = true;
fault_flags |= FAULT_FLAG_TRIED;
goto retry;
@@ -1355,7 +1354,7 @@ retry:
break;
}
- ret = down_read_killable(&mm->mmap_sem);
+ ret = mmap_read_lock_killable(mm);
if (ret) {
BUG_ON(ret > 0);
if (!pages_done)
@@ -1390,7 +1389,7 @@ retry:
* We must let the caller know we temporarily dropped the lock
* and so the critical section protected by it was lost.
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
*locked = 0;
}
return pages_done;
@@ -1401,13 +1400,13 @@ retry:
* @vma: target vma
* @start: start address
* @end: end address
- * @locked: whether the mmap_sem is still held
+ * @locked: whether the mmap_lock is still held
*
* This takes care of mlocking the pages too if VM_LOCKED is set.
*
* return 0 on success, negative error code on error.
*
- * vma->vm_mm->mmap_sem must be held.
+ * vma->vm_mm->mmap_lock must be held.
*
* If @locked is NULL, it may be held for read or write and will
* be unperturbed.
@@ -1426,7 +1425,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
VM_BUG_ON(end & ~PAGE_MASK);
VM_BUG_ON_VMA(start < vma->vm_start, vma);
VM_BUG_ON_VMA(end > vma->vm_end, vma);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+ mmap_assert_locked(mm);
gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
if (vma->vm_flags & VM_LOCKONFAULT)
@@ -1459,7 +1458,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
*
* This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
* flags. VMAs must be already marked with the desired vm_flags, and
- * mmap_sem must not be held.
+ * mmap_lock must not be held.
*/
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
{
@@ -1478,7 +1477,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
*/
if (!locked) {
locked = 1;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, nstart);
} else if (nstart >= vma->vm_end)
vma = vma->vm_next;
@@ -1510,7 +1509,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
ret = 0;
}
if (locked)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return ret; /* 0 or negative error code */
}
@@ -1526,7 +1525,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
* NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
* allowing a hole to be left in the corefile to save diskspace.
*
- * Called without mmap_sem, but after all other threads have been killed.
+ * Called without mmap_lock, but after all other threads have been killed.
*/
#ifdef CONFIG_ELF_CORE
struct page *get_dump_page(unsigned long addr)
@@ -1887,9 +1886,9 @@ static long __get_user_pages_remote(struct task_struct *tsk,
*
* The caller is responsible for releasing returned @pages, via put_page().
*
- * @vmas are valid only as long as mmap_sem is held.
+ * @vmas are valid only as long as mmap_lock is held.
*
- * Must be called with mmap_sem held for read or write.
+ * Must be called with mmap_lock held for read or write.
*
* get_user_pages_remote walks a process's page tables and takes a reference
* to each struct page that each user address corresponds to at a given
@@ -1994,19 +1993,19 @@ EXPORT_SYMBOL(get_user_pages);
/**
* get_user_pages_locked() is suitable to replace the form:
*
- * down_read(&mm->mmap_sem);
+ * mmap_read_lock(mm);
* do_something()
* get_user_pages(tsk, mm, ..., pages, NULL);
- * up_read(&mm->mmap_sem);
+ * mmap_read_unlock(mm);
*
* to:
*
* int locked = 1;
- * down_read(&mm->mmap_sem);
+ * mmap_read_lock(mm);
* do_something()
* get_user_pages_locked(tsk, mm, ..., pages, &locked);
* if (locked)
- * up_read(&mm->mmap_sem);
+ * mmap_read_unlock(mm);
*
* @start: starting user address
* @nr_pages: number of pages from start to pin
@@ -2035,6 +2034,12 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
*/
if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
return -EINVAL;
+ /*
+ * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
+ * never directly by the caller, so enforce that:
+ */
+ if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
+ return -EINVAL;
return __get_user_pages_locked(current, current->mm, start, nr_pages,
pages, NULL, locked,
@@ -2045,9 +2050,9 @@ EXPORT_SYMBOL(get_user_pages_locked);
/*
* get_user_pages_unlocked() is suitable to replace the form:
*
- * down_read(&mm->mmap_sem);
+ * mmap_read_lock(mm);
* get_user_pages(tsk, mm, ..., pages, NULL);
- * up_read(&mm->mmap_sem);
+ * mmap_read_unlock(mm);
*
* with:
*
@@ -2073,11 +2078,11 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
return -EINVAL;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
&locked, gup_flags | FOLL_TOUCH);
if (locked)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return ret;
}
EXPORT_SYMBOL(get_user_pages_unlocked);
@@ -2294,7 +2299,7 @@ pte_unmap:
* to be special.
*
* For a futex to be placed on a THP tail page, get_futex_key requires a
- * __get_user_pages_fast implementation that can pin pages. Thus it's still
+ * get_user_pages_fast_only implementation that can pin pages. Thus it's still
* useful to have gup_huge_pmd even if we can't operate on ptes.
*/
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
@@ -2699,7 +2704,7 @@ static inline void gup_pgd_range(unsigned long addr, unsigned long end,
#ifndef gup_fast_permitted
/*
- * Check if it's allowed to use __get_user_pages_fast() for the range, or
+ * Check if it's allowed to use get_user_pages_fast_only() for the range, or
* we need to fall back to the slow version:
*/
static bool gup_fast_permitted(unsigned long start, unsigned long end)
@@ -2718,11 +2723,11 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
* get_user_pages_unlocked() (see comments in that function)
*/
if (gup_flags & FOLL_LONGTERM) {
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
ret = __gup_longterm_locked(current, current->mm,
start, nr_pages,
pages, NULL, gup_flags);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
} else {
ret = get_user_pages_unlocked(start, nr_pages,
pages, gup_flags);
@@ -2745,7 +2750,7 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
return -EINVAL;
if (!(gup_flags & FOLL_FAST_ONLY))
- might_lock_read(&current->mm->mmap_sem);
+ might_lock_read(&current->mm->mmap_lock);
start = untagged_addr(start) & PAGE_MASK;
addr = start;
@@ -2811,8 +2816,14 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
return ret;
}
-
-/*
+/**
+ * get_user_pages_fast_only() - pin user pages in memory
+ * @start: starting user address
+ * @nr_pages: number of pages from start to pin
+ * @gup_flags: flags modifying pin behaviour
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_pages long.
+ *
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
* the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
@@ -2825,8 +2836,8 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
* access can get ambiguous page results. If you call this function without
* 'write' set, you'd better be sure that you're ok with that ambiguity.
*/
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
+int get_user_pages_fast_only(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages)
{
int nr_pinned;
/*
@@ -2836,10 +2847,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* FOLL_FAST_ONLY is required in order to match the API description of
* this routine: no fall back to regular ("slow") GUP.
*/
- unsigned int gup_flags = FOLL_GET | FOLL_FAST_ONLY;
-
- if (write)
- gup_flags |= FOLL_WRITE;
+ gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
pages);
@@ -2855,7 +2863,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
return nr_pinned;
}
-EXPORT_SYMBOL_GPL(__get_user_pages_fast);
+EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
/**
* get_user_pages_fast() - pin user pages in memory
@@ -2865,7 +2873,7 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
+ * Attempt to pin user pages in memory without taking mm->mmap_lock.
* If not successful, it will fall back to taking the lock and
* calling get_user_pages().
*
@@ -2909,9 +2917,6 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast);
*
* FOLL_PIN means that the pages must be released via unpin_user_page(). Please
* see Documentation/core-api/pin_user_pages.rst for further details.
- *
- * This is intended for Case 1 (DIO) in Documentation/core-api/pin_user_pages.rst. It
- * is NOT intended for Case 2 (RDMA: long-term pins).
*/
int pin_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages)
@@ -2926,8 +2931,8 @@ int pin_user_pages_fast(unsigned long start, int nr_pages,
EXPORT_SYMBOL_GPL(pin_user_pages_fast);
/*
- * This is the FOLL_PIN equivalent of __get_user_pages_fast(). Behavior is the
- * same, except that this one sets FOLL_PIN instead of FOLL_GET.
+ * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
+ * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
*
* The API rules are the same, too: no negative values may be returned.
*/
@@ -2985,9 +2990,6 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
*
* FOLL_PIN means that the pages must be released via unpin_user_page(). Please
* see Documentation/core-api/pin_user_pages.rst for details.
- *
- * This is intended for Case 1 (DIO) in Documentation/core-api/pin_user_pages.rst. It
- * is NOT intended for Case 2 (RDMA: long-term pins).
*/
long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
@@ -3021,9 +3023,6 @@ EXPORT_SYMBOL(pin_user_pages_remote);
*
* FOLL_PIN means that the pages must be released via unpin_user_page(). Please
* see Documentation/core-api/pin_user_pages.rst for details.
- *
- * This is intended for Case 1 (DIO) in Documentation/core-api/pin_user_pages.rst. It
- * is NOT intended for Case 2 (RDMA: long-term pins).
*/
long pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
@@ -3055,3 +3054,32 @@ long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
}
EXPORT_SYMBOL(pin_user_pages_unlocked);
+
+/*
+ * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked().
+ * Behavior is the same, except that this one sets FOLL_PIN and rejects
+ * FOLL_GET.
+ */
+long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ int *locked)
+{
+ /*
+ * FIXME: Current FOLL_LONGTERM behavior is incompatible with
+ * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
+ * vmas. As there are no users of this flag in this call we simply
+ * disallow this option for now.
+ */
+ if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
+ return -EINVAL;
+
+ /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+ if (WARN_ON_ONCE(gup_flags & FOLL_GET))
+ return -EINVAL;
+
+ gup_flags |= FOLL_PIN;
+ return __get_user_pages_locked(current, current->mm, start, nr_pages,
+ pages, NULL, locked,
+ gup_flags | FOLL_TOUCH);
+}
+EXPORT_SYMBOL(pin_user_pages_locked);
diff --git a/mm/hmm.c b/mm/hmm.c
index 41673a6d8d46..e9a545751108 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -563,7 +563,7 @@ int hmm_range_fault(struct hmm_range *range)
struct mm_struct *mm = range->notifier->mm;
int ret;
- lockdep_assert_held(&mm->mmap_sem);
+ mmap_assert_locked(mm);
do {
/* If range is no longer valid force retry. */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e8669885232f..78c84bee7e29 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -522,7 +522,7 @@ void prep_transhuge_page(struct page *page)
bool is_transparent_hugepage(struct page *page)
{
if (!PageCompound(page))
- return 0;
+ return false;
page = compound_head(page);
return is_huge_zero_page(page) ||
@@ -1647,8 +1647,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
* pgtable_trans_huge_withdraw after finishing pmdp related
* operations.
*/
- orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
- tlb->fullmm);
+ orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
+ tlb->fullmm);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
if (vma_is_special_huge(vma)) {
if (arch_needs_pgtable_deposit())
@@ -1746,7 +1746,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
/*
* We don't have to worry about the ordering of src and dst
- * ptlocks because exclusive mmap_sem prevents deadlock.
+ * ptlocks because exclusive mmap_lock prevents deadlock.
*/
old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
if (old_ptl) {
@@ -1833,9 +1833,9 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
goto unlock;
/*
- * In case prot_numa, we are under down_read(mmap_sem). It's critical
+ * In case prot_numa, we are under mmap_read_lock(mm). It's critical
* to not clear pmd intermittently to avoid race with MADV_DONTNEED
- * which is also under down_read(mmap_sem):
+ * which is also under mmap_read_lock(mm):
*
* CPU0: CPU1:
* change_huge_pmd(prot_numa=1)
@@ -2618,7 +2618,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
if (PageAnon(head)) {
/*
- * The caller does not necessarily hold an mmap_sem that would
+ * The caller does not necessarily hold an mmap_lock that would
* prevent the anon_vma disappearing so we first we take a
* reference to it and then lock the anon_vma for write. This
* is similar to page_lock_anon_vma_read except the write lock
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ac0d7bbc0692..57ece74e3aae 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -31,7 +31,6 @@
#include <linux/cma.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <linux/io.h>
@@ -85,7 +84,7 @@ static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
spin_unlock(&spool->lock);
/* If no pages are used, and no other handles to the subpool
- * remain, give up any reservations mased on minimum size and
+ * remain, give up any reservations based on minimum size and
* free the subpool */
if (free) {
if (spool->min_hpages != -1)
@@ -133,7 +132,7 @@ void hugepage_put_subpool(struct hugepage_subpool *spool)
* the request. Otherwise, return the number of pages by which the
* global pools must be adjusted (upward). The returned value may
* only be different than the passed value (delta) in the case where
- * a subpool minimum size must be manitained.
+ * a subpool minimum size must be maintained.
*/
static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
long delta)
@@ -473,7 +472,7 @@ out_of_memory:
*
* Return the number of new huge pages added to the map. This number is greater
* than or equal to zero. If file_region entries needed to be allocated for
- * this operation and we were not able to allocate, it ruturns -ENOMEM.
+ * this operation and we were not able to allocate, it returns -ENOMEM.
* region_add of regions of length 1 never allocate file_regions and cannot
* fail; region_chg will always allocate at least 1 entry and a region_add for
* 1 page will only require at most 1 entry.
@@ -988,7 +987,7 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
* We know VM_NORESERVE is not set. Therefore, there SHOULD
* be a region map for all pages. The only situation where
* there is no region map is if a hole was punched via
- * fallocate. In this case, there really are no reverves to
+ * fallocate. In this case, there really are no reserves to
* use. This situation is indicated if chg != 0.
*/
if (chg)
@@ -1519,7 +1518,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
* For gigantic hugepages allocated through bootmem at
* boot, it's safer to be consistent with the not-gigantic
* hugepages and clear the PG_reserved bit from all tail pages
- * too. Otherwse drivers using get_user_pages() to access tail
+ * too. Otherwise drivers using get_user_pages() to access tail
* pages may get the reference counting wrong if they see
* PG_reserved set on a tail page (despite the head page not
* having PG_reserved set). Enforcing this consistency between
@@ -4579,9 +4578,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/*
* entry could be a migration/hwpoison entry at this point, so this
* check prevents the kernel from going below assuming that we have
- * a active hugepage in pagecache. This goto expects the 2nd page fault,
- * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
- * handle it.
+ * an active hugepage in pagecache. This goto expects the 2nd page
+ * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
+ * properly handle it.
*/
if (!pte_present(entry))
goto out_mutex;
@@ -4696,7 +4695,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
(const void __user *) src_addr,
pages_per_huge_page(h), false);
- /* fallback to copy_from_user outside mmap_sem */
+ /* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) {
ret = -ENOENT;
*pagep = page;
diff --git a/mm/init-mm.c b/mm/init-mm.c
index 19603302a77f..3a613c85f9ed 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -6,10 +6,10 @@
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/mman.h>
+#include <linux/pgtable.h>
#include <linux/atomic.h>
#include <linux/user_namespace.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#ifndef INIT_MM_CONTEXT
@@ -31,7 +31,7 @@ struct mm_struct init_mm = {
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
- .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
+ MMAP_LOCK_INITIALIZER(init_mm)
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
.arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
diff --git a/mm/internal.h b/mm/internal.h
index 9117bca90f4b..9886db20d94f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -132,7 +132,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
*
* zonelist, preferred_zone and highest_zoneidx are set first in
* __alloc_pages_nodemask() for the fast path, and might be later changed
- * in __alloc_pages_slowpath(). All other functions pass the whole strucure
+ * in __alloc_pages_slowpath(). All other functions pass the whole structure
* by a const pointer.
*/
struct alloc_context {
@@ -344,7 +344,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
}
/*
- * must be called with vma's mmap_sem held for read or write, and page locked.
+ * must be called with vma's mmap_lock held for read or write, and page locked.
*/
extern void mlock_vma_page(struct page *page);
extern unsigned int munlock_vma_page(struct page *page);
@@ -413,13 +413,13 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
/*
* FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
- * anything, so we only pin the file and drop the mmap_sem if only
+ * anything, so we only pin the file and drop the mmap_lock if only
* FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
*/
if (fault_flag_allow_retry_first(flags) &&
!(flags & FAULT_FLAG_RETRY_NOWAIT)) {
fpin = get_file(vmf->vma->vm_file);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mmap_read_unlock(vmf->vma->vm_mm);
}
return fpin;
}
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index ce45c491ebcd..fe6be0be1f76 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -250,20 +250,9 @@ int __ref kasan_populate_early_shadow(const void *shadow_start,
* 3,2 - level page tables where we don't have
* puds,pmds, so pgd_populate(), pud_populate()
* is noops.
- *
- * The ifndef is required to avoid build breakage.
- *
- * With 5level-fixup.h, pgd_populate() is not nop and
- * we reference kasan_early_shadow_p4d. It's not defined
- * unless 5-level paging enabled.
- *
- * The ifndef can be dropped once all KASAN-enabled
- * architectures will switch to pgtable-nop4d.h.
*/
-#ifndef __ARCH_HAS_5LEVEL_HACK
pgd_populate(&init_mm, pgd,
lm_alias(kasan_early_shadow_p4d));
-#endif
p4d = p4d_offset(pgd, addr);
p4d_populate(&init_mm, p4d,
lm_alias(kasan_early_shadow_pud));
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 3f032487825b..b043c40a21d4 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -534,10 +534,10 @@ void __khugepaged_exit(struct mm_struct *mm)
* under mmap sem read mode). Stop here (after we
* return all pagetables will be destroyed) until
* khugepaged has finished working on the pagetables
- * under the mmap_sem.
+ * under the mmap_lock.
*/
- down_write(&mm->mmap_sem);
- up_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
+ mmap_write_unlock(mm);
}
}
@@ -933,8 +933,8 @@ khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
#endif
/*
- * If mmap_sem temporarily dropped, revalidate vma
- * before taking mmap_sem.
+ * If mmap_lock temporarily dropped, revalidate vma
+ * before taking mmap_lock.
* Return 0 if succeeds, otherwise return none-zero
* value (scan code).
*/
@@ -966,7 +966,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
* Only done if khugepaged_scan_pmd believes it is worthwhile.
*
* Called and returns without pte mapped or spinlocks held,
- * but with mmap_sem held to protect against vma changes.
+ * but with mmap_lock held to protect against vma changes.
*/
static bool __collapse_huge_page_swapin(struct mm_struct *mm,
@@ -993,9 +993,9 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
swapped_in++;
ret = do_swap_page(&vmf);
- /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
+ /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
if (ret & VM_FAULT_RETRY) {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
/* vma is no longer available, don't continue to swapin */
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
@@ -1047,12 +1047,12 @@ static void collapse_huge_page(struct mm_struct *mm,
gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
/*
- * Before allocating the hugepage, release the mmap_sem read lock.
+ * Before allocating the hugepage, release the mmap_lock read lock.
* The allocation can take potentially a long time if it involves
- * sync compaction, and we do not need to hold the mmap_sem during
+ * sync compaction, and we do not need to hold the mmap_lock during
* that. We will recheck the vma after taking it again in write mode.
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
new_page = khugepaged_alloc_page(hpage, gfp, node);
if (!new_page) {
result = SCAN_ALLOC_HUGE_PAGE_FAIL;
@@ -1065,38 +1065,38 @@ static void collapse_huge_page(struct mm_struct *mm,
}
count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
result = hugepage_vma_revalidate(mm, address, &vma);
if (result) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
goto out_nolock;
}
pmd = mm_find_pmd(mm, address);
if (!pmd) {
result = SCAN_PMD_NULL;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
goto out_nolock;
}
/*
- * __collapse_huge_page_swapin always returns with mmap_sem locked.
- * If it fails, we release mmap_sem and jump out_nolock.
+ * __collapse_huge_page_swapin always returns with mmap_lock locked.
+ * If it fails, we release mmap_lock and jump out_nolock.
* Continuing to collapse causes inconsistency.
*/
if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
pmd, referenced)) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
goto out_nolock;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Prevent all access to pagetables with the exception of
* gup_fast later handled by the ptep_clear_flush and the VM
* handled by the anon_vma lock + PG_lock.
*/
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
result = SCAN_ANY_PROCESS;
if (!mmget_still_valid(mm))
goto out;
@@ -1184,7 +1184,7 @@ static void collapse_huge_page(struct mm_struct *mm,
khugepaged_pages_collapsed++;
result = SCAN_SUCCEED;
out_up_write:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
out_nolock:
if (!IS_ERR_OR_NULL(*hpage))
mem_cgroup_uncharge(*hpage);
@@ -1345,7 +1345,7 @@ out_unmap:
pte_unmap_unlock(pte, ptl);
if (ret) {
node = khugepaged_find_target_node();
- /* collapse_huge_page will return with the mmap_sem released */
+ /* collapse_huge_page will return with the mmap_lock released */
collapse_huge_page(mm, address, hpage, node,
referenced, unmapped);
}
@@ -1517,7 +1517,7 @@ static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
if (likely(mm_slot->nr_pte_mapped_thp == 0))
return 0;
- if (!down_write_trylock(&mm->mmap_sem))
+ if (!mmap_write_trylock(mm))
return -EBUSY;
if (unlikely(khugepaged_test_exit(mm)))
@@ -1528,7 +1528,7 @@ static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
out:
mm_slot->nr_pte_mapped_thp = 0;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return 0;
}
@@ -1543,11 +1543,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
/*
* Check vma->anon_vma to exclude MAP_PRIVATE mappings that
* got written to. These VMAs are likely not worth investing
- * down_write(mmap_sem) as PMD-mapping is likely to be split
+ * mmap_write_lock(mm) as PMD-mapping is likely to be split
* later.
*
* Not that vma->anon_vma check is racy: it can be set up after
- * the check but before we took mmap_sem by the fault path.
+ * the check but before we took mmap_lock by the fault path.
* But page lock would prevent establishing any new ptes of the
* page, so we are safe.
*
@@ -1567,18 +1567,18 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
if (!pmd)
continue;
/*
- * We need exclusive mmap_sem to retract page table.
+ * We need exclusive mmap_lock to retract page table.
*
* We use trylock due to lock inversion: we need to acquire
- * mmap_sem while holding page lock. Fault path does it in
+ * mmap_lock while holding page lock. Fault path does it in
* reverse order. Trylock is a way to avoid deadlock.
*/
- if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
+ if (mmap_write_trylock(vma->vm_mm)) {
spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
/* assume page table is clear */
_pmd = pmdp_collapse_flush(vma, addr, pmd);
spin_unlock(ptl);
- up_write(&vma->vm_mm->mmap_sem);
+ mmap_write_unlock(vma->vm_mm);
mm_dec_nr_ptes(vma->vm_mm);
pte_free(vma->vm_mm, pmd_pgtable(_pmd));
} else {
@@ -2057,8 +2057,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
* the next mm on the list.
*/
vma = NULL;
- if (unlikely(!down_read_trylock(&mm->mmap_sem)))
- goto breakouterloop_mmap_sem;
+ if (unlikely(!mmap_read_trylock(mm)))
+ goto breakouterloop_mmap_lock;
if (likely(!khugepaged_test_exit(mm)))
vma = find_vma(mm, khugepaged_scan.address);
@@ -2102,7 +2102,7 @@ skip:
pgoff_t pgoff = linear_page_index(vma,
khugepaged_scan.address);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
ret = 1;
khugepaged_scan_file(mm, file, pgoff, hpage);
fput(file);
@@ -2115,15 +2115,15 @@ skip:
khugepaged_scan.address += HPAGE_PMD_SIZE;
progress += HPAGE_PMD_NR;
if (ret)
- /* we released mmap_sem so break loop */
- goto breakouterloop_mmap_sem;
+ /* we released mmap_lock so break loop */
+ goto breakouterloop_mmap_lock;
if (progress >= pages)
goto breakouterloop;
}
}
breakouterloop:
- up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
-breakouterloop_mmap_sem:
+ mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
+breakouterloop_mmap_lock:
spin_lock(&khugepaged_mm_lock);
VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
diff --git a/mm/ksm.c b/mm/ksm.c
index 281c00129a2e..4102034cd55a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -442,7 +442,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
/*
* ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
* page tables after it has passed through ksm_exit() - which, if necessary,
- * takes mmap_sem briefly to serialize against them. ksm_exit() does not set
+ * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
* a special flag: they can just back out as soon as mm_users goes to zero.
* ksm_test_exit() is used throughout to make this test for exit: in some
* places for correctness, in some places just to avoid unnecessary work.
@@ -542,11 +542,11 @@ static void break_cow(struct rmap_item *rmap_item)
*/
put_anon_vma(rmap_item->anon_vma);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_mergeable_vma(mm, addr);
if (vma)
break_ksm(vma, addr);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
static struct page *get_mergeable_page(struct rmap_item *rmap_item)
@@ -556,7 +556,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
struct vm_area_struct *vma;
struct page *page;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_mergeable_vma(mm, addr);
if (!vma)
goto out;
@@ -572,7 +572,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
out:
page = NULL;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return page;
}
@@ -612,7 +612,7 @@ static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
* Move the old stable node to the second dimension
* queued in the hlist_dup. The invariant is that all
* dup stable_nodes in the chain->hlist point to pages
- * that are wrprotected and have the exact same
+ * that are write protected and have the exact same
* content.
*/
stable_node_chain_add_dup(dup, chain);
@@ -831,7 +831,7 @@ static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
* Though it's very tempting to unmerge rmap_items from stable tree rather
* than check every pte of a given vma, the locking doesn't quite work for
* that - an rmap_item is assigned to the stable tree after inserting ksm
- * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
+ * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
* rmap_items from parent to child at fork time (so as not to waste time
* if exit comes before the next scan reaches it).
*
@@ -976,7 +976,7 @@ static int unmerge_and_remove_all_rmap_items(void)
for (mm_slot = ksm_scan.mm_slot;
mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
mm = mm_slot->mm;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (ksm_test_exit(mm))
break;
@@ -989,7 +989,7 @@ static int unmerge_and_remove_all_rmap_items(void)
}
remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
@@ -1012,7 +1012,7 @@ static int unmerge_and_remove_all_rmap_items(void)
return 0;
error:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
spin_lock(&ksm_mmlist_lock);
ksm_scan.mm_slot = &ksm_mm_head;
spin_unlock(&ksm_mmlist_lock);
@@ -1148,7 +1148,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
/*
* No need to check ksm_use_zero_pages here: we can only have a
- * zero_page here if ksm_use_zero_pages was enabled alreaady.
+ * zero_page here if ksm_use_zero_pages was enabled already.
*/
if (!is_zero_pfn(page_to_pfn(kpage))) {
get_page(kpage);
@@ -1280,7 +1280,7 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
struct vm_area_struct *vma;
int err = -EFAULT;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_mergeable_vma(mm, rmap_item->address);
if (!vma)
goto out;
@@ -1292,11 +1292,11 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
/* Unstable nid is in union with stable anon_vma: remove first */
remove_rmap_item_from_tree(rmap_item);
- /* Must get reference to anon_vma while still holding mmap_sem */
+ /* Must get reference to anon_vma while still holding mmap_lock */
rmap_item->anon_vma = vma->anon_vma;
get_anon_vma(vma->anon_vma);
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return err;
}
@@ -1608,7 +1608,7 @@ again:
* continue. All KSM pages belonging to the
* stable_node dups in a stable_node chain
* have the same content and they're
- * wrprotected at all times. Any will work
+ * write protected at all times. Any will work
* fine to continue the walk.
*/
tree_page = get_ksm_page(stable_node_any,
@@ -1843,7 +1843,7 @@ again:
* continue. All KSM pages belonging to the
* stable_node dups in a stable_node chain
* have the same content and they're
- * wrprotected at all times. Any will work
+ * write protected at all times. Any will work
* fine to continue the walk.
*/
tree_page = get_ksm_page(stable_node_any,
@@ -2001,7 +2001,7 @@ static void stable_tree_append(struct rmap_item *rmap_item,
* duplicate. page_migration could break later if rmap breaks,
* so we can as well crash here. We really need to check for
* rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
- * for other negative values as an undeflow if detected here
+ * for other negative values as an underflow if detected here
* for the first time (and not when decreasing rmap_hlist_len)
* would be sign of memory corruption in the stable_node.
*/
@@ -2110,7 +2110,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
if (ksm_use_zero_pages && (checksum == zero_checksum)) {
struct vm_area_struct *vma;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_mergeable_vma(mm, rmap_item->address);
if (vma) {
err = try_to_merge_one_page(vma, page,
@@ -2122,7 +2122,7 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
*/
err = 0;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* In case of failure, the page was not really empty, so we
* need to continue. Otherwise we're done.
@@ -2285,7 +2285,7 @@ next_mm:
}
mm = slot->mm;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (ksm_test_exit(mm))
vma = NULL;
else
@@ -2319,7 +2319,7 @@ next_mm:
ksm_scan.address += PAGE_SIZE;
} else
put_page(*page);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return rmap_item;
}
put_page(*page);
@@ -2343,13 +2343,13 @@ next_mm:
struct mm_slot, mm_list);
if (ksm_scan.address == 0) {
/*
- * We've completed a full scan of all vmas, holding mmap_sem
+ * We've completed a full scan of all vmas, holding mmap_lock
* throughout, and found no VM_MERGEABLE: so do the same as
* __ksm_exit does to remove this mm from all our lists now.
* This applies either when cleaning up after __ksm_exit
* (but beware: we can reach here even before __ksm_exit),
* or when all VM_MERGEABLE areas have been unmapped (and
- * mmap_sem then protects against race with MADV_MERGEABLE).
+ * mmap_lock then protects against race with MADV_MERGEABLE).
*/
hash_del(&slot->link);
list_del(&slot->mm_list);
@@ -2357,12 +2357,12 @@ next_mm:
free_mm_slot(slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmdrop(mm);
} else {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
- * up_read(&mm->mmap_sem) first because after
+ * mmap_read_unlock(mm) first because after
* spin_unlock(&ksm_mmlist_lock) run, the "mm" may
* already have been freed under us by __ksm_exit()
* because the "mm_slot" is still hashed and
@@ -2536,7 +2536,7 @@ void __ksm_exit(struct mm_struct *mm)
* This process is exiting: if it's straightforward (as is the
* case when ksmd was never running), free mm_slot immediately.
* But if it's at the cursor or has rmap_items linked to it, use
- * mmap_sem to synchronize with any break_cows before pagetables
+ * mmap_lock to synchronize with any break_cows before pagetables
* are freed, and leave the mm_slot on the list for ksmd to free.
* Beware: ksm may already have noticed it exiting and freed the slot.
*/
@@ -2560,8 +2560,8 @@ void __ksm_exit(struct mm_struct *mm)
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
mmdrop(mm);
} else if (mm_slot) {
- down_write(&mm->mmap_sem);
- up_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
+ mmap_write_unlock(mm);
}
}
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 4d5294c39bba..9222910ab1cb 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -213,7 +213,7 @@ restart:
/*
* decrement nr_to_walk first so that we don't livelock if we
- * get stuck on large numbesr of LRU_RETRY items
+ * get stuck on large numbers of LRU_RETRY items
*/
if (!*nr_to_walk)
break;
diff --git a/mm/maccess.c b/mm/maccess.c
index 3ca8d97e5010..88845eda5047 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -1,103 +1,128 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Access kernel memory without faulting.
+ * Access kernel or user memory without faulting.
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
-static __always_inline long
-probe_read_common(void *dst, const void __user *src, size_t size)
+bool __weak probe_kernel_read_allowed(const void *unsafe_src, size_t size)
{
- long ret;
+ return true;
+}
+
+#ifdef HAVE_GET_KERNEL_NOFAULT
+
+#define probe_kernel_read_loop(dst, src, len, type, err_label) \
+ while (len >= sizeof(type)) { \
+ __get_kernel_nofault(dst, src, type, err_label); \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
+ }
+
+long probe_kernel_read(void *dst, const void *src, size_t size)
+{
+ if (!probe_kernel_read_allowed(src, size))
+ return -ERANGE;
pagefault_disable();
- ret = __copy_from_user_inatomic(dst, src, size);
+ probe_kernel_read_loop(dst, src, size, u64, Efault);
+ probe_kernel_read_loop(dst, src, size, u32, Efault);
+ probe_kernel_read_loop(dst, src, size, u16, Efault);
+ probe_kernel_read_loop(dst, src, size, u8, Efault);
pagefault_enable();
+ return 0;
+Efault:
+ pagefault_enable();
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(probe_kernel_read);
+
+#define probe_kernel_write_loop(dst, src, len, type, err_label) \
+ while (len >= sizeof(type)) { \
+ __put_kernel_nofault(dst, src, type, err_label); \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
+ }
- return ret ? -EFAULT : 0;
+long probe_kernel_write(void *dst, const void *src, size_t size)
+{
+ pagefault_disable();
+ probe_kernel_write_loop(dst, src, size, u64, Efault);
+ probe_kernel_write_loop(dst, src, size, u32, Efault);
+ probe_kernel_write_loop(dst, src, size, u16, Efault);
+ probe_kernel_write_loop(dst, src, size, u8, Efault);
+ pagefault_enable();
+ return 0;
+Efault:
+ pagefault_enable();
+ return -EFAULT;
}
-static __always_inline long
-probe_write_common(void __user *dst, const void *src, size_t size)
+long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
{
- long ret;
+ const void *src = unsafe_addr;
+
+ if (unlikely(count <= 0))
+ return 0;
+ if (!probe_kernel_read_allowed(unsafe_addr, count))
+ return -ERANGE;
pagefault_disable();
- ret = __copy_to_user_inatomic(dst, src, size);
+ do {
+ __get_kernel_nofault(dst, src, u8, Efault);
+ dst++;
+ src++;
+ } while (dst[-1] && src - unsafe_addr < count);
pagefault_enable();
- return ret ? -EFAULT : 0;
+ dst[-1] = '\0';
+ return src - unsafe_addr;
+Efault:
+ pagefault_enable();
+ dst[-1] = '\0';
+ return -EFAULT;
}
-
+#else /* HAVE_GET_KERNEL_NOFAULT */
/**
- * probe_kernel_read(): safely attempt to read from a kernel-space location
+ * probe_kernel_read(): safely attempt to read from kernel-space
* @dst: pointer to the buffer that shall take the data
* @src: address to read from
* @size: size of the data chunk
*
- * Safely read from address @src to the buffer at @dst. If a kernel fault
- * happens, handle that and return -EFAULT.
+ * Safely read from kernel address @src to the buffer at @dst. If a kernel
+ * fault happens, handle that and return -EFAULT. If @src is not a valid kernel
+ * address, return -ERANGE.
*
* We ensure that the copy_from_user is executed in atomic context so that
- * do_page_fault() doesn't attempt to take mmap_sem. This makes
+ * do_page_fault() doesn't attempt to take mmap_lock. This makes
* probe_kernel_read() suitable for use within regions where the caller
- * already holds mmap_sem, or other locks which nest inside mmap_sem.
- *
- * probe_kernel_read_strict() is the same as probe_kernel_read() except for
- * the case where architectures have non-overlapping user and kernel address
- * ranges: probe_kernel_read_strict() will additionally return -EFAULT for
- * probing memory on a user address range where probe_user_read() is supposed
- * to be used instead.
+ * already holds mmap_lock, or other locks which nest inside mmap_lock.
*/
-
-long __weak probe_kernel_read(void *dst, const void *src, size_t size)
- __attribute__((alias("__probe_kernel_read")));
-
-long __weak probe_kernel_read_strict(void *dst, const void *src, size_t size)
- __attribute__((alias("__probe_kernel_read")));
-
-long __probe_kernel_read(void *dst, const void *src, size_t size)
+long probe_kernel_read(void *dst, const void *src, size_t size)
{
long ret;
mm_segment_t old_fs = get_fs();
+ if (!probe_kernel_read_allowed(src, size))
+ return -ERANGE;
+
set_fs(KERNEL_DS);
- ret = probe_read_common(dst, (__force const void __user *)src, size);
+ pagefault_disable();
+ ret = __copy_from_user_inatomic(dst, (__force const void __user *)src,
+ size);
+ pagefault_enable();
set_fs(old_fs);
- return ret;
+ if (ret)
+ return -EFAULT;
+ return 0;
}
EXPORT_SYMBOL_GPL(probe_kernel_read);
/**
- * probe_user_read(): safely attempt to read from a user-space location
- * @dst: pointer to the buffer that shall take the data
- * @src: address to read from. This must be a user address.
- * @size: size of the data chunk
- *
- * Safely read from user address @src to the buffer at @dst. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-
-long __weak probe_user_read(void *dst, const void __user *src, size_t size)
- __attribute__((alias("__probe_user_read")));
-
-long __probe_user_read(void *dst, const void __user *src, size_t size)
-{
- long ret = -EFAULT;
- mm_segment_t old_fs = get_fs();
-
- set_fs(USER_DS);
- if (access_ok(src, size))
- ret = probe_read_common(dst, src, size);
- set_fs(old_fs);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(probe_user_read);
-
-/**
* probe_kernel_write(): safely attempt to write to a location
* @dst: address to write to
* @src: pointer to the data that shall be written
@@ -106,52 +131,25 @@ EXPORT_SYMBOL_GPL(probe_user_read);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-
-long __weak probe_kernel_write(void *dst, const void *src, size_t size)
- __attribute__((alias("__probe_kernel_write")));
-
-long __probe_kernel_write(void *dst, const void *src, size_t size)
+long probe_kernel_write(void *dst, const void *src, size_t size)
{
long ret;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
- ret = probe_write_common((__force void __user *)dst, src, size);
- set_fs(old_fs);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(probe_kernel_write);
-
-/**
- * probe_user_write(): safely attempt to write to a user-space location
- * @dst: address to write to
- * @src: pointer to the data that shall be written
- * @size: size of the data chunk
- *
- * Safely write to address @dst from the buffer at @src. If a kernel fault
- * happens, handle that and return -EFAULT.
- */
-
-long __weak probe_user_write(void __user *dst, const void *src, size_t size)
- __attribute__((alias("__probe_user_write")));
-
-long __probe_user_write(void __user *dst, const void *src, size_t size)
-{
- long ret = -EFAULT;
- mm_segment_t old_fs = get_fs();
-
- set_fs(USER_DS);
- if (access_ok(dst, size))
- ret = probe_write_common(dst, src, size);
+ pagefault_disable();
+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
+ pagefault_enable();
set_fs(old_fs);
- return ret;
+ if (ret)
+ return -EFAULT;
+ return 0;
}
-EXPORT_SYMBOL_GPL(probe_user_write);
/**
- * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
+ * strncpy_from_kernel_nofault: - Copy a NUL terminated string from unsafe
+ * address.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
* @unsafe_addr: Unsafe address.
@@ -161,27 +159,14 @@ EXPORT_SYMBOL_GPL(probe_user_write);
*
* On success, returns the length of the string INCLUDING the trailing NUL.
*
- * If access fails, returns -EFAULT (some data may have been copied
- * and the trailing NUL added).
+ * If access fails, returns -EFAULT (some data may have been copied and the
+ * trailing NUL added). If @unsafe_addr is not a valid kernel address, return
+ * -ERANGE.
*
* If @count is smaller than the length of the string, copies @count-1 bytes,
* sets the last byte of @dst buffer to NUL and returns @count.
- *
- * strncpy_from_unsafe_strict() is the same as strncpy_from_unsafe() except
- * for the case where architectures have non-overlapping user and kernel address
- * ranges: strncpy_from_unsafe_strict() will additionally return -EFAULT for
- * probing memory on a user address range where strncpy_from_unsafe_user() is
- * supposed to be used instead.
*/
-
-long __weak strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
- __attribute__((alias("__strncpy_from_unsafe")));
-
-long __weak strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
- long count)
- __attribute__((alias("__strncpy_from_unsafe")));
-
-long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
+long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
{
mm_segment_t old_fs = get_fs();
const void *src = unsafe_addr;
@@ -189,6 +174,8 @@ long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
if (unlikely(count <= 0))
return 0;
+ if (!probe_kernel_read_allowed(unsafe_addr, count))
+ return -ERANGE;
set_fs(KERNEL_DS);
pagefault_disable();
@@ -203,9 +190,66 @@ long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
return ret ? -EFAULT : src - unsafe_addr;
}
+#endif /* HAVE_GET_KERNEL_NOFAULT */
+
+/**
+ * probe_user_read(): safely attempt to read from a user-space location
+ * @dst: pointer to the buffer that shall take the data
+ * @src: address to read from. This must be a user address.
+ * @size: size of the data chunk
+ *
+ * Safely read from user address @src to the buffer at @dst. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+long probe_user_read(void *dst, const void __user *src, size_t size)
+{
+ long ret = -EFAULT;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(USER_DS);
+ if (access_ok(src, size)) {
+ pagefault_disable();
+ ret = __copy_from_user_inatomic(dst, src, size);
+ pagefault_enable();
+ }
+ set_fs(old_fs);
+
+ if (ret)
+ return -EFAULT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(probe_user_read);
+
+/**
+ * probe_user_write(): safely attempt to write to a user-space location
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+long probe_user_write(void __user *dst, const void *src, size_t size)
+{
+ long ret = -EFAULT;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(USER_DS);
+ if (access_ok(dst, size)) {
+ pagefault_disable();
+ ret = __copy_to_user_inatomic(dst, src, size);
+ pagefault_enable();
+ }
+ set_fs(old_fs);
+
+ if (ret)
+ return -EFAULT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(probe_user_write);
/**
- * strncpy_from_unsafe_user: - Copy a NUL terminated string from unsafe user
+ * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user
* address.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
@@ -222,7 +266,7 @@ long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
* If @count is smaller than the length of the string, copies @count-1 bytes,
* sets the last byte of @dst buffer to NUL and returns @count.
*/
-long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
+long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
long count)
{
mm_segment_t old_fs = get_fs();
@@ -248,7 +292,7 @@ long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
}
/**
- * strnlen_unsafe_user: - Get the size of a user string INCLUDING final NUL.
+ * strnlen_user_nofault: - Get the size of a user string INCLUDING final NUL.
* @unsafe_addr: The string to measure.
* @count: Maximum count (including NUL)
*
@@ -263,7 +307,7 @@ long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
* Unlike strnlen_user, this can be used from IRQ handler etc. because
* it disables pagefaults.
*/
-long strnlen_unsafe_user(const void __user *unsafe_addr, long count)
+long strnlen_user_nofault(const void __user *unsafe_addr, long count)
{
mm_segment_t old_fs = get_fs();
int ret;
diff --git a/mm/madvise.c b/mm/madvise.c
index 8cbd8c1bfe15..dd1d43cf026d 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -40,7 +40,7 @@ struct madvise_walk_private {
/*
* Any behaviour which results in changes to the vma->vm_flags needs to
- * take mmap_sem for writing. Others, which simply traverse vmas, need
+ * take mmap_lock for writing. Others, which simply traverse vmas, need
* to only take it for reading.
*/
static int madvise_need_mmap_write(int behavior)
@@ -165,7 +165,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
success:
/*
- * vm_flags is protected by the mmap_sem held in write mode.
+ * vm_flags is protected by the mmap_lock held in write mode.
*/
vma->vm_flags = new_flags;
@@ -285,16 +285,16 @@ static long madvise_willneed(struct vm_area_struct *vma,
* Filesystem's fadvise may need to take various locks. We need to
* explicitly grab a reference because the vma (and hence the
* vma's reference to the file) can go away as soon as we drop
- * mmap_sem.
+ * mmap_lock.
*/
- *prev = NULL; /* tell sys_madvise we drop mmap_sem */
+ *prev = NULL; /* tell sys_madvise we drop mmap_lock */
get_file(file);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
offset = (loff_t)(start - vma->vm_start)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
fput(file);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
return 0;
}
@@ -768,9 +768,9 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
return -EINVAL;
if (!userfaultfd_remove(vma, start, end)) {
- *prev = NULL; /* mmap_sem has been dropped, prev is stale */
+ *prev = NULL; /* mmap_lock has been dropped, prev is stale */
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, start);
if (!vma)
return -ENOMEM;
@@ -791,7 +791,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
if (end > vma->vm_end) {
/*
* Don't fail if end > vma->vm_end. If the old
- * vma was splitted while the mmap_sem was
+ * vma was splitted while the mmap_lock was
* released the effect of the concurrent
* operation may not cause madvise() to
* have an undefined result. There may be an
@@ -826,7 +826,7 @@ static long madvise_remove(struct vm_area_struct *vma,
int error;
struct file *f;
- *prev = NULL; /* tell sys_madvise we drop mmap_sem */
+ *prev = NULL; /* tell sys_madvise we drop mmap_lock */
if (vma->vm_flags & VM_LOCKED)
return -EINVAL;
@@ -847,18 +847,18 @@ static long madvise_remove(struct vm_area_struct *vma,
* Filesystem's fallocate may need to take i_mutex. We need to
* explicitly grab a reference because the vma (and hence the
* vma's reference to the file) can go away as soon as we drop
- * mmap_sem.
+ * mmap_lock.
*/
get_file(f);
if (userfaultfd_remove(vma, start, end)) {
- /* mmap_sem was not released by userfaultfd_remove() */
- up_read(&current->mm->mmap_sem);
+ /* mmap_lock was not released by userfaultfd_remove() */
+ mmap_read_unlock(current->mm);
}
error = vfs_fallocate(f,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
offset, end - start);
fput(f);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
return error;
}
@@ -1089,7 +1089,7 @@ int do_madvise(unsigned long start, size_t len_in, int behavior)
write = madvise_need_mmap_write(behavior);
if (write) {
- if (down_write_killable(&current->mm->mmap_sem))
+ if (mmap_write_lock_killable(current->mm))
return -EINTR;
/*
@@ -1105,11 +1105,11 @@ int do_madvise(unsigned long start, size_t len_in, int behavior)
* model.
*/
if (!mmget_still_valid(current->mm)) {
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
return -EINTR;
}
} else {
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
}
/*
@@ -1153,15 +1153,15 @@ int do_madvise(unsigned long start, size_t len_in, int behavior)
goto out;
if (prev)
vma = prev->vm_next;
- else /* madvise_remove dropped mmap_sem */
+ else /* madvise_remove dropped mmap_lock */
vma = find_vma(current->mm, start);
}
out:
blk_finish_plug(&plug);
if (write)
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
else
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return error;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index 743659d88fc4..39aceafc57f6 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -78,7 +78,7 @@
* * memblock_alloc*() - these functions return the **virtual** address
* of the allocated memory.
*
- * Note, that both API variants use implict assumptions about allowed
+ * Note, that both API variants use implicit assumptions about allowed
* memory ranges and the fallback methods. Consult the documentation
* of memblock_alloc_internal() and memblock_alloc_range_nid()
* functions for more elaborate description.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5381afb23d58..0b38b6ad547d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3186,7 +3186,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
* Test whether @memcg has children, dead or alive. Note that this
* function doesn't care whether @memcg has use_hierarchy enabled and
* returns %true if there are child csses according to the cgroup
- * hierarchy. Testing use_hierarchy is the caller's responsiblity.
+ * hierarchy. Testing use_hierarchy is the caller's responsibility.
*/
static inline bool memcg_has_children(struct mem_cgroup *memcg)
{
@@ -4838,7 +4838,7 @@ static struct cftype mem_cgroup_legacy_files[] = {
* limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
* memory-controlled cgroups to 64k.
*
- * However, there usually are many references to the oflline CSS after
+ * However, there usually are many references to the offline CSS after
* the cgroup has been destroyed, such as page cache or reclaimable
* slab objects, that don't need to hang on to the ID. We want to keep
* those dead CSS from occupying IDs, or we might quickly exhaust the
@@ -5614,9 +5614,9 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
{
unsigned long precharge;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
precharge = mc.precharge;
mc.precharge = 0;
@@ -5899,9 +5899,9 @@ static void mem_cgroup_move_charge(void)
atomic_inc(&mc.from->moving_account);
synchronize_rcu();
retry:
- if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mc.mm))) {
/*
- * Someone who are holding the mmap_sem might be waiting in
+ * Someone who are holding the mmap_lock might be waiting in
* waitq. So we cancel all extra charges, wake up all waiters,
* and retry. Because we cancel precharges, we might not be able
* to move enough charges, but moving charge is a best-effort
@@ -5918,7 +5918,7 @@ retry:
walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
NULL);
- up_read(&mc.mm->mmap_sem);
+ mmap_read_unlock(mc.mm);
atomic_dec(&mc.from->moving_account);
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index ababa368cb68..47b8ccb1fb9b 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -212,15 +212,13 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
short addr_lsb = tk->size_shift;
int ret = 0;
- if ((t->mm == current->mm) || !(flags & MF_ACTION_REQUIRED))
- pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
+ pr_err("Memory failure: %#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
pfn, t->comm, t->pid);
if (flags & MF_ACTION_REQUIRED) {
- if (t->mm == current->mm)
- ret = force_sig_mceerr(BUS_MCEERR_AR,
+ WARN_ON_ONCE(t != current);
+ ret = force_sig_mceerr(BUS_MCEERR_AR,
(void __user *)tk->addr, addr_lsb);
- /* send no signal to non-current processes */
} else {
/*
* Don't use force here, it's convenient if the signal
@@ -402,9 +400,15 @@ static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
{
struct task_struct *t;
- for_each_thread(tsk, t)
- if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
- return t;
+ for_each_thread(tsk, t) {
+ if (t->flags & PF_MCE_PROCESS) {
+ if (t->flags & PF_MCE_EARLY)
+ return t;
+ } else {
+ if (sysctl_memory_failure_early_kill)
+ return t;
+ }
+ }
return NULL;
}
@@ -413,21 +417,26 @@ static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
* to be signaled when some page under the process is hwpoisoned.
* Return task_struct of the dedicated thread (main thread unless explicitly
* specified) if the process is "early kill," and otherwise returns NULL.
+ *
+ * Note that the above is true for Action Optional case, but not for Action
+ * Required case where SIGBUS should sent only to the current thread.
*/
static struct task_struct *task_early_kill(struct task_struct *tsk,
int force_early)
{
- struct task_struct *t;
if (!tsk->mm)
return NULL;
- if (force_early)
- return tsk;
- t = find_early_kill_thread(tsk);
- if (t)
- return t;
- if (sysctl_memory_failure_early_kill)
- return tsk;
- return NULL;
+ if (force_early) {
+ /*
+ * Comparing ->mm here because current task might represent
+ * a subthread, while tsk always points to the main thread.
+ */
+ if (tsk->mm == current->mm)
+ return current;
+ else
+ return NULL;
+ }
+ return find_early_kill_thread(tsk);
}
/*
diff --git a/mm/memory.c b/mm/memory.c
index 7b70398f76a0..dc7f3543b1fd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -80,7 +80,6 @@
#include <linux/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
#include "internal.h"
@@ -1186,7 +1185,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
* Here there can be other concurrent MADV_DONTNEED or
* trans huge page faults running, and if the pmd is
* none or trans huge it can change under us. This is
- * because MADV_DONTNEED holds the mmap_sem in read
+ * because MADV_DONTNEED holds the mmap_lock in read
* mode.
*/
if (pmd_none_or_trans_huge_or_clear_bad(pmd))
@@ -1212,7 +1211,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
next = pud_addr_end(addr, end);
if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
if (next - addr != HPAGE_PUD_SIZE) {
- VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
+ mmap_assert_locked(tlb->mm);
split_huge_pud(vma, pud, addr);
} else if (zap_huge_pud(tlb, vma, pud, addr))
goto next;
@@ -1593,7 +1592,7 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
if (addr < vma->vm_start || end_addr >= vma->vm_end)
return -EFAULT;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
- BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
+ BUG_ON(mmap_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
vma->vm_flags |= VM_MIXEDMAP;
}
@@ -1637,7 +1636,7 @@ EXPORT_SYMBOL(vm_insert_pages);
* The page does not need to be reserved.
*
* Usually this function is called from f_op->mmap() handler
- * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
+ * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
* Caller must set VM_MIXEDMAP on vma if it wants to call this
* function from other places, for example from page-fault handler.
*
@@ -1651,7 +1650,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
- BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
+ BUG_ON(mmap_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
vma->vm_flags |= VM_MIXEDMAP;
}
@@ -2467,7 +2466,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
}
/*
- * The same page can be mapped back since last copy attampt.
+ * The same page can be mapped back since last copy attempt.
* Try to copy again under PTL.
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
@@ -2574,7 +2573,7 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
* mapping may be NULL here because some device drivers do not
* set page.mapping but still dirty their pages
*
- * Drop the mmap_sem before waiting on IO, if we can. The file
+ * Drop the mmap_lock before waiting on IO, if we can. The file
* is pinning the mapping, as per above.
*/
if ((dirtied || page_mkwrite) && mapping) {
@@ -2624,7 +2623,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
/*
* Handle the case of a page which we actually need to copy to a new page.
*
- * Called with mmap_sem locked and the old page referenced, but
+ * Called with mmap_lock locked and the old page referenced, but
* without the ptl held.
*
* High level logic flow:
@@ -2888,9 +2887,9 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
* change only once the write actually happens. This avoids a few races,
* and potentially makes it more efficient.
*
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), with pte both mapped and locked.
- * We return with mmap_sem still held, but pte unmapped and unlocked.
+ * We return with mmap_lock still held, but pte unmapped and unlocked.
*/
static vm_fault_t do_wp_page(struct vm_fault *vmf)
__releases(vmf->ptl)
@@ -3079,11 +3078,11 @@ void unmap_mapping_range(struct address_space *mapping,
EXPORT_SYMBOL(unmap_mapping_range);
/*
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
* We return with pte unmapped and unlocked.
*
- * We return with the mmap_sem locked or unlocked in the same cases
+ * We return with the mmap_lock locked or unlocked in the same cases
* as does filemap_fault().
*/
vm_fault_t do_swap_page(struct vm_fault *vmf)
@@ -3304,9 +3303,9 @@ out_release:
}
/*
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
- * We return with mmap_sem still held, but pte unmapped and unlocked.
+ * We return with mmap_lock still held, but pte unmapped and unlocked.
*/
static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
@@ -3324,10 +3323,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
* pte_offset_map() on pmds where a huge pmd might be created
* from a different thread.
*
- * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
+ * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
* parallel threads are excluded by other means.
*
- * Here we only have down_read(mmap_sem).
+ * Here we only have mmap_read_lock(mm).
*/
if (pte_alloc(vma->vm_mm, vmf->pmd))
return VM_FAULT_OOM;
@@ -3420,7 +3419,7 @@ oom:
}
/*
- * The mmap_sem must have been held on entry, and may have been
+ * The mmap_lock must have been held on entry, and may have been
* released depending on flags and vma->vm_ops->fault() return value.
* See filemap_fault() and __lock_page_retry().
*/
@@ -3929,11 +3928,11 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
}
/*
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * We enter with non-exclusive mmap_lock (to exclude vma changes,
* but allow concurrent faults).
- * The mmap_sem may have been released depending on flags and our
+ * The mmap_lock may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
- * If mmap_sem is released, vma may become invalid (for example
+ * If mmap_lock is released, vma may become invalid (for example
* by other thread calling munmap()).
*/
static vm_fault_t do_fault(struct vm_fault *vmf)
@@ -4162,10 +4161,10 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
* with external mmu caches can use to update those (ie the Sparc or
* PowerPC hashed page tables that act as extended TLBs).
*
- * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow
+ * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
* concurrent faults).
*
- * The mmap_sem may have been released depending on flags and our return value.
+ * The mmap_lock may have been released depending on flags and our return value.
* See filemap_fault() and __lock_page_or_retry().
*/
static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
@@ -4187,7 +4186,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
/*
* A regular pmd is established and it can't morph into a huge
* pmd from under us anymore at this point because we hold the
- * mmap_sem read mode and khugepaged takes it in write mode.
+ * mmap_lock read mode and khugepaged takes it in write mode.
* So now it's safe to run pte_offset_map().
*/
vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
@@ -4255,7 +4254,7 @@ unlock:
/*
* By the time we get here, we already hold the mm semaphore
*
- * The mmap_sem may have been released depending on flags and our
+ * The mmap_lock may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
@@ -4350,7 +4349,7 @@ retry_pud:
/*
* By the time we get here, we already hold the mm semaphore
*
- * The mmap_sem may have been released depending on flags and our
+ * The mmap_lock may have been released depending on flags and our
* return value. See filemap_fault() and __lock_page_or_retry().
*/
vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
@@ -4436,19 +4435,11 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
smp_wmb(); /* See comment in __pte_alloc */
spin_lock(&mm->page_table_lock);
-#ifndef __ARCH_HAS_5LEVEL_HACK
if (!p4d_present(*p4d)) {
mm_inc_nr_puds(mm);
p4d_populate(mm, p4d, new);
} else /* Another has populated it */
pud_free(mm, new);
-#else
- if (!pgd_present(*p4d)) {
- mm_inc_nr_puds(mm);
- pgd_populate(mm, p4d, new);
- } else /* Another has populated it */
- pud_free(mm, new);
-#endif /* __ARCH_HAS_5LEVEL_HACK */
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -4667,7 +4658,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
void *old_buf = buf;
int write = gup_flags & FOLL_WRITE;
- if (down_read_killable(&mm->mmap_sem))
+ if (mmap_read_lock_killable(mm))
return 0;
/* ignore errors, just check how much was successfully transferred */
@@ -4718,7 +4709,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
buf += bytes;
addr += bytes;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return buf - old_buf;
}
@@ -4775,7 +4766,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
/*
* we might be running from an atomic context so we cannot sleep
*/
- if (!down_read_trylock(&mm->mmap_sem))
+ if (!mmap_read_trylock(mm))
return;
vma = find_vma(mm, ip);
@@ -4794,7 +4785,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
free_page((unsigned long)buf);
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
@@ -4802,7 +4793,7 @@ void __might_fault(const char *file, int line)
{
/*
* Some code (nfs/sunrpc) uses socket ops on kernel memory while
- * holding the mmap_sem, this is safe because kernel memory doesn't
+ * holding the mmap_lock, this is safe because kernel memory doesn't
* get paged out, therefore we'll never actually fault, and the
* below annotations will generate false positives.
*/
@@ -4813,7 +4804,7 @@ void __might_fault(const char *file, int line)
__might_sleep(file, line, 0);
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
if (current->mm)
- might_lock_read(&current->mm->mmap_sem);
+ might_lock_read(&current->mm->mmap_lock);
#endif
}
EXPORT_SYMBOL(__might_fault);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 926ec704e835..9b34e03e730a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -98,11 +98,14 @@ void mem_hotplug_done(void)
u64 max_mem_size = U64_MAX;
/* add this memory to iomem resource */
-static struct resource *register_memory_resource(u64 start, u64 size)
+static struct resource *register_memory_resource(u64 start, u64 size,
+ const char *resource_name)
{
struct resource *res;
unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
- char *resource_name = "System RAM";
+
+ if (strcmp(resource_name, "System RAM"))
+ flags |= IORESOURCE_MEM_DRIVER_MANAGED;
/*
* Make sure value parsed from 'mem=' only restricts memory adding
@@ -862,10 +865,9 @@ static void reset_node_present_pages(pg_data_t *pgdat)
}
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
-static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
+static pg_data_t __ref *hotadd_new_pgdat(int nid)
{
struct pglist_data *pgdat;
- unsigned long start_pfn = PFN_DOWN(start);
pgdat = NODE_DATA(nid);
if (!pgdat) {
@@ -895,9 +897,8 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
}
/* we can use NODE_DATA(nid) from here */
-
pgdat->node_id = nid;
- pgdat->node_start_pfn = start_pfn;
+ pgdat->node_start_pfn = 0;
/* init node's zones as empty zones, we don't have any present pages.*/
free_area_init_core_hotplug(nid);
@@ -932,7 +933,6 @@ static void rollback_node_hotadd(int nid)
/**
* try_online_node - online a node if offlined
* @nid: the node ID
- * @start: start addr of the node
* @set_node_online: Whether we want to online the node
* called by cpu_up() to online a node without onlined memory.
*
@@ -941,7 +941,7 @@ static void rollback_node_hotadd(int nid)
* 0 -> the node is already online
* -ENOMEM -> the node could not be allocated
*/
-static int __try_online_node(int nid, u64 start, bool set_node_online)
+static int __try_online_node(int nid, bool set_node_online)
{
pg_data_t *pgdat;
int ret = 1;
@@ -949,7 +949,7 @@ static int __try_online_node(int nid, u64 start, bool set_node_online)
if (node_online(nid))
return 0;
- pgdat = hotadd_new_pgdat(nid, start);
+ pgdat = hotadd_new_pgdat(nid);
if (!pgdat) {
pr_err("Cannot online node %d due to NULL pgdat\n", nid);
ret = -ENOMEM;
@@ -973,7 +973,7 @@ int try_online_node(int nid)
int ret;
mem_hotplug_begin();
- ret = __try_online_node(nid, 0, true);
+ ret = __try_online_node(nid, true);
mem_hotplug_done();
return ret;
}
@@ -1017,17 +1017,17 @@ int __ref add_memory_resource(int nid, struct resource *res)
if (ret)
return ret;
+ if (!node_possible(nid)) {
+ WARN(1, "node %d was absent from the node_possible_map\n", nid);
+ return -EINVAL;
+ }
+
mem_hotplug_begin();
- /*
- * Add new range to memblock so that when hotadd_new_pgdat() is called
- * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
- * this new range and calculate total pages correctly. The range will
- * be removed at hot-remove time.
- */
- memblock_add_node(start, size, nid);
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
+ memblock_add_node(start, size, nid);
- ret = __try_online_node(nid, start, false);
+ ret = __try_online_node(nid, false);
if (ret < 0)
goto error;
new_node = ret;
@@ -1060,7 +1060,8 @@ int __ref add_memory_resource(int nid, struct resource *res)
BUG_ON(ret);
/* create new memmap entry */
- firmware_map_add_hotplug(start, start + size, "System RAM");
+ if (!strcmp(res->name, "System RAM"))
+ firmware_map_add_hotplug(start, start + size, "System RAM");
/* device_online() will take the lock when calling online_pages() */
mem_hotplug_done();
@@ -1074,7 +1075,8 @@ error:
/* rollback pgdat allocation and others */
if (new_node)
rollback_node_hotadd(nid);
- memblock_remove(start, size);
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
+ memblock_remove(start, size);
mem_hotplug_done();
return ret;
}
@@ -1085,7 +1087,7 @@ int __ref __add_memory(int nid, u64 start, u64 size)
struct resource *res;
int ret;
- res = register_memory_resource(start, size);
+ res = register_memory_resource(start, size, "System RAM");
if (IS_ERR(res))
return PTR_ERR(res);
@@ -1107,82 +1109,57 @@ int add_memory(int nid, u64 start, u64 size)
}
EXPORT_SYMBOL_GPL(add_memory);
-#ifdef CONFIG_MEMORY_HOTREMOVE
/*
- * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
- * set and the size of the free page is given by page_order(). Using this,
- * the function determines if the pageblock contains only free pages.
- * Due to buddy contraints, a free page at least the size of a pageblock will
- * be located at the start of the pageblock
+ * Add special, driver-managed memory to the system as system RAM. Such
+ * memory is not exposed via the raw firmware-provided memmap as system
+ * RAM, instead, it is detected and added by a driver - during cold boot,
+ * after a reboot, and after kexec.
+ *
+ * Reasons why this memory should not be used for the initial memmap of a
+ * kexec kernel or for placing kexec images:
+ * - The booting kernel is in charge of determining how this memory will be
+ * used (e.g., use persistent memory as system RAM)
+ * - Coordination with a hypervisor is required before this memory
+ * can be used (e.g., inaccessible parts).
+ *
+ * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided
+ * memory map") are created. Also, the created memory resource is flagged
+ * with IORESOURCE_MEM_DRIVER_MANAGED, so in-kernel users can special-case
+ * this memory as well (esp., not place kexec images onto it).
+ *
+ * The resource_name (visible via /proc/iomem) has to have the format
+ * "System RAM ($DRIVER)".
*/
-static inline int pageblock_free(struct page *page)
+int add_memory_driver_managed(int nid, u64 start, u64 size,
+ const char *resource_name)
{
- return PageBuddy(page) && page_order(page) >= pageblock_order;
-}
+ struct resource *res;
+ int rc;
-/* Return the pfn of the start of the next active pageblock after a given pfn */
-static unsigned long next_active_pageblock(unsigned long pfn)
-{
- struct page *page = pfn_to_page(pfn);
+ if (!resource_name ||
+ strstr(resource_name, "System RAM (") != resource_name ||
+ resource_name[strlen(resource_name) - 1] != ')')
+ return -EINVAL;
- /* Ensure the starting page is pageblock-aligned */
- BUG_ON(pfn & (pageblock_nr_pages - 1));
+ lock_device_hotplug();
- /* If the entire pageblock is free, move to the end of free page */
- if (pageblock_free(page)) {
- int order;
- /* be careful. we don't have locks, page_order can be changed.*/
- order = page_order(page);
- if ((order < MAX_ORDER) && (order >= pageblock_order))
- return pfn + (1 << order);
+ res = register_memory_resource(start, size, resource_name);
+ if (IS_ERR(res)) {
+ rc = PTR_ERR(res);
+ goto out_unlock;
}
- return pfn + pageblock_nr_pages;
-}
-
-static bool is_pageblock_removable_nolock(unsigned long pfn)
-{
- struct page *page = pfn_to_page(pfn);
- struct zone *zone;
-
- /*
- * We have to be careful here because we are iterating over memory
- * sections which are not zone aware so we might end up outside of
- * the zone but still within the section.
- * We have to take care about the node as well. If the node is offline
- * its NODE_DATA will be NULL - see page_zone.
- */
- if (!node_online(page_to_nid(page)))
- return false;
-
- zone = page_zone(page);
- pfn = page_to_pfn(page);
- if (!zone_spans_pfn(zone, pfn))
- return false;
-
- return !has_unmovable_pages(zone, page, MIGRATE_MOVABLE,
- MEMORY_OFFLINE);
-}
-
-/* Checks if this range of memory is likely to be hot-removable. */
-bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
-{
- unsigned long end_pfn, pfn;
-
- end_pfn = min(start_pfn + nr_pages,
- zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
-
- /* Check the starting page of each pageblock within the range */
- for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
- if (!is_pageblock_removable_nolock(pfn))
- return false;
- cond_resched();
- }
+ rc = add_memory_resource(nid, res);
+ if (rc < 0)
+ release_memory_resource(res);
- /* All pageblocks in the memory block are likely to be hot-removable */
- return true;
+out_unlock:
+ unlock_device_hotplug();
+ return rc;
}
+EXPORT_SYMBOL_GPL(add_memory_driver_managed);
+#ifdef CONFIG_MEMORY_HOTREMOVE
/*
* Confirm all pages in a range [start, end) belong to the same zone (skipping
* memory holes). When true, return the zone.
@@ -1224,11 +1201,17 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn,
/*
* Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
- * non-lru movable pages and hugepages). We scan pfn because it's much
- * easier than scanning over linked list. This function returns the pfn
- * of the first found movable page if it's found, otherwise 0.
+ * non-lru movable pages and hugepages). Will skip over most unmovable
+ * pages (esp., pages that can be skipped when offlining), but bail out on
+ * definitely unmovable pages.
+ *
+ * Returns:
+ * 0 in case a movable page is found and movable_pfn was updated.
+ * -ENOENT in case no movable page was found.
+ * -EBUSY in case a definitely unmovable page was found.
*/
-static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
+static int scan_movable_pages(unsigned long start, unsigned long end,
+ unsigned long *movable_pfn)
{
unsigned long pfn;
@@ -1240,18 +1223,30 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
continue;
page = pfn_to_page(pfn);
if (PageLRU(page))
- return pfn;
+ goto found;
if (__PageMovable(page))
- return pfn;
+ goto found;
+
+ /*
+ * PageOffline() pages that are not marked __PageMovable() and
+ * have a reference count > 0 (after MEM_GOING_OFFLINE) are
+ * definitely unmovable. If their reference count would be 0,
+ * they could at least be skipped when offlining memory.
+ */
+ if (PageOffline(page) && page_count(page))
+ return -EBUSY;
if (!PageHuge(page))
continue;
head = compound_head(page);
if (page_huge_active(head))
- return pfn;
+ goto found;
skip = compound_nr(head) - (page - head);
pfn += skip - 1;
}
+ return -ENOENT;
+found:
+ *movable_pfn = pfn;
return 0;
}
@@ -1360,7 +1355,7 @@ offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
}
/*
- * Check all pages in range, recoreded as memory resource, are isolated.
+ * Check all pages in range, recorded as memory resource, are isolated.
*/
static int
check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
@@ -1514,7 +1509,8 @@ static int __ref __offline_pages(unsigned long start_pfn,
}
do {
- for (pfn = start_pfn; pfn;) {
+ pfn = start_pfn;
+ do {
if (signal_pending(current)) {
ret = -EINTR;
reason = "signal backoff";
@@ -1524,14 +1520,19 @@ static int __ref __offline_pages(unsigned long start_pfn,
cond_resched();
lru_add_drain_all();
- pfn = scan_movable_pages(pfn, end_pfn);
- if (pfn) {
+ ret = scan_movable_pages(pfn, end_pfn, &pfn);
+ if (!ret) {
/*
* TODO: fatal migration failures should bail
* out
*/
do_migrate_range(pfn, end_pfn);
}
+ } while (!ret);
+
+ if (ret != -ENOENT) {
+ reason = "unmovable page";
+ goto failed_removal_isolated;
}
/*
@@ -1746,8 +1747,12 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size)
mem_hotplug_begin();
arch_remove_memory(nid, start, size, NULL);
- memblock_free(start, size);
- memblock_remove(start, size);
+
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
+ memblock_free(start, size);
+ memblock_remove(start, size);
+ }
+
__release_memory_resource(start, size);
try_offline_node(nid);
@@ -1793,4 +1798,41 @@ int remove_memory(int nid, u64 start, u64 size)
return rc;
}
EXPORT_SYMBOL_GPL(remove_memory);
+
+/*
+ * Try to offline and remove a memory block. Might take a long time to
+ * finish in case memory is still in use. Primarily useful for memory devices
+ * that logically unplugged all memory (so it's no longer in use) and want to
+ * offline + remove the memory block.
+ */
+int offline_and_remove_memory(int nid, u64 start, u64 size)
+{
+ struct memory_block *mem;
+ int rc = -EINVAL;
+
+ if (!IS_ALIGNED(start, memory_block_size_bytes()) ||
+ size != memory_block_size_bytes())
+ return rc;
+
+ lock_device_hotplug();
+ mem = find_memory_block(__pfn_to_section(PFN_DOWN(start)));
+ if (mem)
+ rc = device_offline(&mem->dev);
+ /* Ignore if the device is already offline. */
+ if (rc > 0)
+ rc = 0;
+
+ /*
+ * In case we succeeded to offline the memory block, remove it.
+ * This cannot fail as it cannot get onlined in the meantime.
+ */
+ if (!rc) {
+ rc = try_remove_memory(nid, start, size);
+ WARN_ON_ONCE(rc);
+ }
+ unlock_device_hotplug();
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(offline_and_remove_memory);
#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1965e2681877..381320671677 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -224,7 +224,7 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
* handle an empty nodemask with MPOL_PREFERRED here.
*
* Must be called holding task's alloc_lock to protect task's mems_allowed
- * and mempolicy. May also be called holding the mmap_semaphore for write.
+ * and mempolicy. May also be called holding the mmap_lock for write.
*/
static int mpol_set_nodemask(struct mempolicy *pol,
const nodemask_t *nodes, struct nodemask_scratch *nsc)
@@ -368,7 +368,7 @@ static void mpol_rebind_preferred(struct mempolicy *pol,
/*
* mpol_rebind_policy - Migrate a policy to a different set of nodes
*
- * Per-vma policies are protected by mmap_sem. Allocations using per-task
+ * Per-vma policies are protected by mmap_lock. Allocations using per-task
* policies are protected by task->mems_allowed_seq to prevent a premature
* OOM/allocation failure due to parallel nodemask modification.
*/
@@ -398,17 +398,17 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
/*
* Rebind each vma in mm to new nodemask.
*
- * Call holding a reference to mm. Takes mm->mmap_sem during call.
+ * Call holding a reference to mm. Takes mm->mmap_lock during call.
*/
void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next)
mpol_rebind_policy(vma->vm_policy, new);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
}
static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
@@ -764,7 +764,7 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
/*
* Apply policy to a single VMA
- * This must be called with the mmap_sem held for writing.
+ * This must be called with the mmap_lock held for writing.
*/
static int vma_replace_policy(struct vm_area_struct *vma,
struct mempolicy *pol)
@@ -789,7 +789,7 @@ static int vma_replace_policy(struct vm_area_struct *vma,
}
old = vma->vm_policy;
- vma->vm_policy = new; /* protected by mmap_sem */
+ vma->vm_policy = new; /* protected by mmap_lock */
mpol_put(old);
return 0;
@@ -932,7 +932,7 @@ static int lookup_node(struct mm_struct *mm, unsigned long addr)
put_page(p);
}
if (locked)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return err;
}
@@ -965,10 +965,10 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
* vma/shared policy at addr is NULL. We
* want to return MPOL_DEFAULT in this case.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma_intersection(mm, addr, addr+1);
if (!vma) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return -EFAULT;
}
if (vma->vm_ops && vma->vm_ops->get_policy)
@@ -985,7 +985,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
if (flags & MPOL_F_ADDR) {
/*
* Take a refcount on the mpol, lookup_node()
- * wil drop the mmap_sem, so after calling
+ * wil drop the mmap_lock, so after calling
* lookup_node() only "pol" remains valid, "vma"
* is stale.
*/
@@ -1027,7 +1027,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
out:
mpol_cond_put(pol);
if (vma)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (pol_refcount)
mpol_put(pol_refcount);
return err;
@@ -1136,7 +1136,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
if (err)
return err;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
/*
* Find a 'source' bit set in 'tmp' whose corresponding 'dest'
@@ -1217,7 +1217,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
if (err < 0)
break;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (err < 0)
return err;
return busy;
@@ -1340,12 +1340,12 @@ static long do_mbind(unsigned long start, unsigned long len,
{
NODEMASK_SCRATCH(scratch);
if (scratch) {
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
task_lock(current);
err = mpol_set_nodemask(new, nmask, scratch);
task_unlock(current);
if (err)
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
} else
err = -ENOMEM;
NODEMASK_SCRATCH_FREE(scratch);
@@ -1382,7 +1382,7 @@ up_out:
putback_movable_pages(&pagelist);
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
mpol_out:
mpol_put(new);
return err;
@@ -2185,7 +2185,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
*
* This function allocates a page from the kernel page pool and applies
* a NUMA policy associated with the VMA or the current process.
- * When VMA is not NULL caller must hold down_read on the mmap_sem of the
+ * When VMA is not NULL caller must read-lock the mmap_lock of the
* mm_struct of the VMA to prevent it from going away. Should be used for
* all allocations for pages that will be mapped into user space. Returns
* NULL when no page can be allocated.
diff --git a/mm/migrate.c b/mm/migrate.c
index 7bfd0962149e..f37729673558 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1555,7 +1555,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
unsigned int follflags;
int err;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
err = -EFAULT;
vma = find_vma(mm, addr);
if (!vma || addr < vma->vm_start || !vma_migratable(vma))
@@ -1608,7 +1608,7 @@ out_putpage:
*/
put_page(page);
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return err;
}
@@ -1733,7 +1733,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
{
unsigned long i;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (i = 0; i < nr_pages; i++) {
unsigned long addr = (unsigned long)(*pages);
@@ -1760,7 +1760,7 @@ set_status:
status++;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
/*
@@ -2120,7 +2120,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
* pmd before doing set_pmd_at(), nor to flush the TLB after
* set_pmd_at(). Clearing the pmd here would introduce a race
* condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
- * mmap_sem for reading. If the pmd is set to NULL at any given time,
+ * mmap_lock for reading. If the pmd is set to NULL at any given time,
* MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
* pmd.
*/
@@ -2675,7 +2675,7 @@ restore:
* have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
*
* It is safe to update device page table after migrate_vma_pages() because
- * both destination and source page are still locked, and the mmap_sem is held
+ * both destination and source page are still locked, and the mmap_lock is held
* in read mode (hence no one can unmap the range being migrated).
*
* Once the caller is done cleaning up things and updating its page table (if it
@@ -2772,10 +2772,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
* pte_offset_map() on pmds where a huge pmd might be created
* from a different thread.
*
- * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
+ * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
* parallel threads are excluded by other means.
*
- * Here we only have down_read(mmap_sem).
+ * Here we only have mmap_read_lock(mm).
*/
if (pte_alloc(mm, pmdp))
goto abort;
diff --git a/mm/mincore.c b/mm/mincore.c
index 0e6dd9948f1a..453ff112470f 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -17,9 +17,9 @@
#include <linux/swapops.h>
#include <linux/shmem_fs.h>
#include <linux/hugetlb.h>
+#include <linux/pgtable.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
unsigned long end, struct mm_walk *walk)
@@ -284,9 +284,9 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
* Do at most PAGE_SIZE entries per iteration, due to
* the temporary buffer size.
*/
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (retval <= 0)
break;
diff --git a/mm/mlock.c b/mm/mlock.c
index a72c1eeded77..f8736136fad7 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(can_do_mlock);
* When lazy mlocking via vmscan, it is important to ensure that the
* vma's VM_LOCKED status is not concurrently being modified, otherwise we
* may have mlocked a page that is being munlocked. So lazy mlock must take
- * the mmap_sem for read, and verify that the vma really is locked
+ * the mmap_lock for read, and verify that the vma really is locked
* (see mm/rmap.c).
*/
@@ -381,7 +381,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
/*
* Initialize pte walk starting at the already pinned page where we
* are sure that there is a pte, as it was pinned under the same
- * mmap_sem write op.
+ * mmap_lock write op.
*/
pte = get_locked_pte(vma->vm_mm, start, &ptl);
/* Make sure we do not cross the page table boundary */
@@ -565,7 +565,7 @@ success:
mm->locked_vm += nr_pages;
/*
- * vm_flags is protected by the mmap_sem held in write mode.
+ * vm_flags is protected by the mmap_lock held in write mode.
* It's okay if try_to_unmap_one unmaps a page just after we
* set VM_LOCKED, populate_vma_page_range will bring it back.
*/
@@ -686,7 +686,7 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
lock_limit >>= PAGE_SHIFT;
locked = len >> PAGE_SHIFT;
- if (down_write_killable(&current->mm->mmap_sem))
+ if (mmap_write_lock_killable(current->mm))
return -EINTR;
locked += current->mm->locked_vm;
@@ -705,7 +705,7 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
error = apply_vma_lock_flags(start, len, flags);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
if (error)
return error;
@@ -742,10 +742,10 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
len = PAGE_ALIGN(len + (offset_in_page(start)));
start &= PAGE_MASK;
- if (down_write_killable(&current->mm->mmap_sem))
+ if (mmap_write_lock_killable(current->mm))
return -EINTR;
ret = apply_vma_lock_flags(start, len, 0);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
return ret;
}
@@ -811,14 +811,14 @@ SYSCALL_DEFINE1(mlockall, int, flags)
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
- if (down_write_killable(&current->mm->mmap_sem))
+ if (mmap_write_lock_killable(current->mm))
return -EINTR;
ret = -ENOMEM;
if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
capable(CAP_IPC_LOCK))
ret = apply_mlockall_flags(flags);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
if (!ret && (flags & MCL_CURRENT))
mm_populate(0, TASK_SIZE);
@@ -829,10 +829,10 @@ SYSCALL_DEFINE0(munlockall)
{
int ret;
- if (down_write_killable(&current->mm->mmap_sem))
+ if (mmap_write_lock_killable(current->mm))
return -EINTR;
ret = apply_mlockall_flags(0);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
return ret;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index f609e9ec4a25..59a4682ebf3f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -132,7 +132,7 @@ void vma_set_page_prot(struct vm_area_struct *vma)
vm_flags &= ~VM_SHARED;
vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
}
- /* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */
+ /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
}
@@ -198,7 +198,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
bool downgraded = false;
LIST_HEAD(uf);
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
origbrk = mm->brk;
@@ -238,14 +238,14 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
/*
* Always allow shrinking brk.
- * __do_munmap() may downgrade mmap_sem to read.
+ * __do_munmap() may downgrade mmap_lock to read.
*/
if (brk <= mm->brk) {
int ret;
/*
- * mm->brk must to be protected by write mmap_sem so update it
- * before downgrading mmap_sem. When __do_munmap() fails,
+ * mm->brk must to be protected by write mmap_lock so update it
+ * before downgrading mmap_lock. When __do_munmap() fails,
* mm->brk will be restored from origbrk.
*/
mm->brk = brk;
@@ -272,9 +272,9 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
success:
populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
if (downgraded)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
else
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(oldbrk, newbrk - oldbrk);
@@ -282,7 +282,7 @@ success:
out:
retval = origbrk;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return retval;
}
@@ -505,7 +505,7 @@ static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
* After the update, the vma will be reinserted using
* anon_vma_interval_tree_post_update_vma().
*
- * The entire update must be protected by exclusive mmap_sem and by
+ * The entire update must be protected by exclusive mmap_lock and by
* the root anon_vma's mutex.
*/
static inline void
@@ -1207,7 +1207,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
}
/*
- * Rough compatbility check to quickly see if it's even worth looking
+ * Rough compatibility check to quickly see if it's even worth looking
* at sharing an anon_vma.
*
* They need to have the same vm_file, and the flags can only differ
@@ -1361,7 +1361,7 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode,
}
/*
- * The caller must hold down_write(&current->mm->mmap_sem).
+ * The caller must write-lock current->mm->mmap_lock.
*/
unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
@@ -2371,7 +2371,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
/*
* vma->vm_start/vm_end cannot change under us because the caller
- * is required to hold the mmap_sem in read mode. We need the
+ * is required to hold the mmap_lock in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
anon_vma_lock_write(vma->anon_vma);
@@ -2389,7 +2389,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (!error) {
/*
* vma_gap_update() doesn't support concurrent
- * updates, but we only hold a shared mmap_sem
+ * updates, but we only hold a shared mmap_lock
* lock here, so we need to protect against
* concurrent vma expansions.
* anon_vma_lock_write() doesn't help here, as
@@ -2451,7 +2451,7 @@ int expand_downwards(struct vm_area_struct *vma,
/*
* vma->vm_start/vm_end cannot change under us because the caller
- * is required to hold the mmap_sem in read mode. We need the
+ * is required to hold the mmap_lock in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
anon_vma_lock_write(vma->anon_vma);
@@ -2469,7 +2469,7 @@ int expand_downwards(struct vm_area_struct *vma,
if (!error) {
/*
* vma_gap_update() doesn't support concurrent
- * updates, but we only hold a shared mmap_sem
+ * updates, but we only hold a shared mmap_lock
* lock here, so we need to protect against
* concurrent vma expansions.
* anon_vma_lock_write() doesn't help here, as
@@ -2828,7 +2828,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
detach_vmas_to_be_unmapped(mm, vma, prev, end);
if (downgrade)
- downgrade_write(&mm->mmap_sem);
+ mmap_write_downgrade(mm);
unmap_region(mm, vma, prev, start, end);
@@ -2850,20 +2850,20 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
struct mm_struct *mm = current->mm;
LIST_HEAD(uf);
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
ret = __do_munmap(mm, start, len, &uf, downgrade);
/*
- * Returning 1 indicates mmap_sem is downgraded.
+ * Returning 1 indicates mmap_lock is downgraded.
* But 1 is not legal return value of vm_munmap() and munmap(), reset
* it to 0 before return.
*/
if (ret == 1) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
ret = 0;
} else
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
return ret;
@@ -2911,7 +2911,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
if (pgoff + (size >> PAGE_SHIFT) < pgoff)
return ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
vma = find_vma(mm, start);
@@ -2974,7 +2974,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
prot, flags, pgoff, &populate, NULL);
fput(file);
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (populate)
mm_populate(ret, populate);
if (!IS_ERR_VALUE(ret))
@@ -3074,12 +3074,12 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
if (!len)
return 0;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
ret = do_brk_flags(addr, len, flags, &uf);
populate = ((mm->def_flags & VM_LOCKED) != 0);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
if (populate && !ret)
mm_populate(addr, len);
@@ -3107,12 +3107,12 @@ void exit_mmap(struct mm_struct *mm)
/*
* Manually reap the mm to free as much memory as possible.
* Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
- * this mm from further consideration. Taking mm->mmap_sem for
+ * this mm from further consideration. Taking mm->mmap_lock for
* write after setting MMF_OOM_SKIP will guarantee that the oom
- * reaper will not run on this mm again after mmap_sem is
+ * reaper will not run on this mm again after mmap_lock is
* dropped.
*
- * Nothing can be holding mm->mmap_sem here and the above call
+ * Nothing can be holding mm->mmap_lock here and the above call
* to mmu_notifier_release(mm) ensures mmu notifier callbacks in
* __oom_reap_task_mm() will not block.
*
@@ -3123,8 +3123,8 @@ void exit_mmap(struct mm_struct *mm)
(void)__oom_reap_task_mm(mm);
set_bit(MMF_OOM_SKIP, &mm->flags);
- down_write(&mm->mmap_sem);
- up_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
+ mmap_write_unlock(mm);
}
if (mm->locked_vm) {
@@ -3437,7 +3437,7 @@ bool vma_is_special_mapping(const struct vm_area_struct *vma,
}
/*
- * Called with mm->mmap_sem held for writing.
+ * Called with mm->mmap_lock held for writing.
* Insert a new vma covering the given region, with the given flags.
* Its pages are supplied by the given array of struct page *.
* The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
@@ -3474,7 +3474,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
*/
- down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
+ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
/*
* We can safely modify head.next after taking the
* anon_vma->root->rwsem. If some other vma in this mm shares
@@ -3504,7 +3504,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
*/
if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
BUG();
- down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
+ down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
}
}
@@ -3513,11 +3513,11 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
* operations that could ever happen on a certain mm. This includes
* vmtruncate, try_to_unmap, and all page faults.
*
- * The caller must take the mmap_sem in write mode before calling
+ * The caller must take the mmap_lock in write mode before calling
* mm_take_all_locks(). The caller isn't allowed to release the
- * mmap_sem until mm_drop_all_locks() returns.
+ * mmap_lock until mm_drop_all_locks() returns.
*
- * mmap_sem in write mode is required in order to block all operations
+ * mmap_lock in write mode is required in order to block all operations
* that could modify pagetables and free pages without need of
* altering the vma layout. It's also needed in write mode to avoid new
* anon_vmas to be associated with existing vmas.
@@ -3550,7 +3550,7 @@ int mm_take_all_locks(struct mm_struct *mm)
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
- BUG_ON(down_read_trylock(&mm->mmap_sem));
+ BUG_ON(mmap_read_trylock(mm));
mutex_lock(&mm_all_locks_mutex);
@@ -3622,7 +3622,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
}
/*
- * The mmap_sem cannot be released by the caller until
+ * The mmap_lock cannot be released by the caller until
* mm_drop_all_locks() returns.
*/
void mm_drop_all_locks(struct mm_struct *mm)
@@ -3630,7 +3630,7 @@ void mm_drop_all_locks(struct mm_struct *mm)
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
- BUG_ON(down_read_trylock(&mm->mmap_sem));
+ BUG_ON(mmap_read_trylock(mm));
BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
for (vma = mm->mmap; vma; vma = vma->vm_next) {
diff --git a/mm/mmu_context.c b/mm/mmu_context.c
deleted file mode 100644
index 3e612ae748e9..000000000000
--- a/mm/mmu_context.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (C) 2009 Red Hat, Inc.
- *
- * See ../COPYING for licensing terms.
- */
-
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/sched/mm.h>
-#include <linux/sched/task.h>
-#include <linux/mmu_context.h>
-#include <linux/export.h>
-
-#include <asm/mmu_context.h>
-
-/*
- * use_mm
- * Makes the calling kernel thread take on the specified
- * mm context.
- * (Note: this routine is intended to be called only
- * from a kernel thread context)
- */
-void use_mm(struct mm_struct *mm)
-{
- struct mm_struct *active_mm;
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- active_mm = tsk->active_mm;
- if (active_mm != mm) {
- mmgrab(mm);
- tsk->active_mm = mm;
- }
- tsk->mm = mm;
- switch_mm(active_mm, mm, tsk);
- task_unlock(tsk);
-#ifdef finish_arch_post_lock_switch
- finish_arch_post_lock_switch();
-#endif
-
- if (active_mm != mm)
- mmdrop(active_mm);
-}
-EXPORT_SYMBOL_GPL(use_mm);
-
-/*
- * unuse_mm
- * Reverses the effect of use_mm, i.e. releases the
- * specified mm context which was earlier taken on
- * by the calling kernel thread
- * (Note: this routine is intended to be called only
- * from a kernel thread context)
- */
-void unuse_mm(struct mm_struct *mm)
-{
- struct task_struct *tsk = current;
-
- task_lock(tsk);
- sync_mm_rss(mm);
- tsk->mm = NULL;
- /* active_mm is still 'mm' */
- enter_lazy_tlb(mm, tsk);
- task_unlock(tsk);
-}
-EXPORT_SYMBOL_GPL(unuse_mm);
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index a3538cb2bcbe..03c33c93a582 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -301,7 +301,7 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
{
/*
* If there are parallel threads are doing PTE changes on same range
- * under non-exclusive lock (e.g., mmap_sem read-side) but defer TLB
+ * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB
* flush by batching, one thread may end up seeing inconsistent PTEs
* and result in having stale TLB entries. So flush TLB forcefully
* if we detect parallel PTE batching threads.
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 06852b896fa6..352bb9f3ecc0 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -599,7 +599,7 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
}
/*
- * Same as mmu_notifier_register but here the caller must hold the mmap_sem in
+ * Same as mmu_notifier_register but here the caller must hold the mmap_lock in
* write mode. A NULL mn signals the notifier is being registered for itree
* mode.
*/
@@ -609,7 +609,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
struct mmu_notifier_subscriptions *subscriptions = NULL;
int ret;
- lockdep_assert_held_write(&mm->mmap_sem);
+ mmap_assert_write_locked(mm);
BUG_ON(atomic_read(&mm->mm_users) <= 0);
if (IS_ENABLED(CONFIG_LOCKDEP)) {
@@ -623,7 +623,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
/*
* kmalloc cannot be called under mm_take_all_locks(), but we
* know that mm->notifier_subscriptions can't change while we
- * hold the write side of the mmap_sem.
+ * hold the write side of the mmap_lock.
*/
subscriptions = kzalloc(
sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
@@ -655,7 +655,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
* readers. acquire can only be used while holding the mmgrab or
* mmget, and is safe because once created the
* mmu_notifier_subscriptions is not freed until the mm is destroyed.
- * As above, users holding the mmap_sem or one of the
+ * As above, users holding the mmap_lock or one of the
* mm_take_all_locks() do not need to use acquire semantics.
*/
if (subscriptions)
@@ -689,7 +689,7 @@ EXPORT_SYMBOL_GPL(__mmu_notifier_register);
* @mn: The notifier to attach
* @mm: The mm to attach the notifier to
*
- * Must not hold mmap_sem nor any other VM related lock when calling
+ * Must not hold mmap_lock nor any other VM related lock when calling
* this registration function. Must also ensure mm_users can't go down
* to zero while this runs to avoid races with mmu_notifier_release,
* so mm has to be current->mm or the mm should be pinned safely such
@@ -708,9 +708,9 @@ int mmu_notifier_register(struct mmu_notifier *subscription,
{
int ret;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
ret = __mmu_notifier_register(subscription, mm);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL_GPL(mmu_notifier_register);
@@ -750,7 +750,7 @@ find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
* are the same.
*
* Each call to mmu_notifier_get() must be paired with a call to
- * mmu_notifier_put(). The caller must hold the write side of mm->mmap_sem.
+ * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock.
*
* While the caller has a mmu_notifier get the mm pointer will remain valid,
* and can be converted to an active mm pointer via mmget_not_zero().
@@ -761,7 +761,7 @@ struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
struct mmu_notifier *subscription;
int ret;
- lockdep_assert_held_write(&mm->mmap_sem);
+ mmap_assert_write_locked(mm);
if (mm->notifier_subscriptions) {
subscription = find_get_mmu_notifier(mm, ops);
@@ -983,7 +983,7 @@ int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
struct mmu_notifier_subscriptions *subscriptions;
int ret;
- might_lock(&mm->mmap_sem);
+ might_lock(&mm->mmap_lock);
subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
if (!subscriptions || !subscriptions->has_itree) {
@@ -1006,7 +1006,7 @@ int mmu_interval_notifier_insert_locked(
mm->notifier_subscriptions;
int ret;
- lockdep_assert_held_write(&mm->mmap_sem);
+ mmap_assert_write_locked(mm);
if (!subscriptions || !subscriptions->has_itree) {
ret = __mmu_notifier_register(NULL, mm);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 494192ca954b..ce8b8a5eacbb 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -28,7 +28,7 @@
#include <linux/ksm.h>
#include <linux/uaccess.h>
#include <linux/mm_inline.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
@@ -49,7 +49,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
/*
- * Can be called with only the mmap_sem for reading by
+ * Can be called with only the mmap_lock for reading by
* prot_numa so we must check the pmd isn't constantly
* changing from under us from pmd_none to pmd_trans_huge
* and/or the other way around.
@@ -59,7 +59,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
/*
* The pmd points to a regular pte so the pmd can't change
- * from under us even if the mmap_sem is only hold for
+ * from under us even if the mmap_lock is only hold for
* reading.
*/
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
@@ -228,7 +228,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
next = pmd_addr_end(addr, end);
/*
- * Automatic NUMA balancing walks the tables with mmap_sem
+ * Automatic NUMA balancing walks the tables with mmap_lock
* held for read. It's possible a parallel update to occur
* between pmd_trans_huge() and a pmd_none_or_clear_bad()
* check leading to a false positive and clearing.
@@ -477,7 +477,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
success:
/*
- * vm_flags and vm_page_prot are protected by the mmap_sem
+ * vm_flags and vm_page_prot are protected by the mmap_lock
* held in write mode.
*/
vma->vm_flags = newflags;
@@ -538,7 +538,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
reqprot = prot;
- if (down_write_killable(&current->mm->mmap_sem))
+ if (mmap_write_lock_killable(current->mm))
return -EINTR;
/*
@@ -628,7 +628,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
prot = reqprot;
}
out:
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
return error;
}
@@ -658,7 +658,7 @@ SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
if (init_val & ~PKEY_ACCESS_MASK)
return -EINVAL;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
pkey = mm_pkey_alloc(current->mm);
ret = -ENOSPC;
@@ -672,7 +672,7 @@ SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
}
ret = pkey;
out:
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
return ret;
}
@@ -680,9 +680,9 @@ SYSCALL_DEFINE1(pkey_free, int, pkey)
{
int ret;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
ret = mm_pkey_free(current->mm, pkey);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
/*
* We could provie warnings or errors if any VMA still
diff --git a/mm/mremap.c b/mm/mremap.c
index 6aa6ea605068..5dd572d57ca9 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -146,7 +146,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
/*
* We don't have to worry about the ordering of src and dst
- * pte locks because exclusive mmap_sem prevents deadlock.
+ * pte locks because exclusive mmap_lock prevents deadlock.
*/
old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
new_pte = pte_offset_map(new_pmd, new_addr);
@@ -213,7 +213,7 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
/*
* We don't have to worry about the ordering of src and dst
- * ptlocks because exclusive mmap_sem prevents deadlock.
+ * ptlocks because exclusive mmap_lock prevents deadlock.
*/
old_ptl = pmd_lock(vma->vm_mm, old_pmd);
new_ptl = pmd_lockptr(mm, new_pmd);
@@ -266,7 +266,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
if (!new_pmd)
break;
- if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) {
+ if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) {
if (extent == HPAGE_PMD_SIZE) {
bool moved;
/* See comment in move_ptes() */
@@ -696,7 +696,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
if (!new_len)
return ret;
- if (down_write_killable(&current->mm->mmap_sem))
+ if (mmap_write_lock_killable(current->mm))
return -EINTR;
if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
@@ -710,7 +710,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
* __do_munmap does all the needed commit accounting, and
- * downgrades mmap_sem to read if so directed.
+ * downgrades mmap_lock to read if so directed.
*/
if (old_len >= new_len) {
int retval;
@@ -720,7 +720,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
if (retval < 0 && old_len != new_len) {
ret = retval;
goto out;
- /* Returning 1 indicates mmap_sem is downgraded to read. */
+ /* Returning 1 indicates mmap_lock is downgraded to read. */
} else if (retval == 1)
downgraded = true;
ret = addr;
@@ -785,12 +785,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
out:
if (offset_in_page(ret)) {
vm_unacct_memory(charged);
- locked = 0;
+ locked = false;
}
if (downgraded)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
else
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
if (locked && new_len > old_len)
mm_populate(new_addr + old_len, new_len - old_len);
userfaultfd_unmap_complete(mm, &uf_unmap_early);
diff --git a/mm/msync.c b/mm/msync.c
index c3bd3e75f687..69c6d2029531 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -57,7 +57,7 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
* If the interval [start,end) covers some unmapped address ranges,
* just ignore them, but return -ENOMEM at the end.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, start);
for (;;) {
struct file *file;
@@ -88,12 +88,12 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
if ((flags & MS_SYNC) && file &&
(vma->vm_flags & VM_SHARED)) {
get_file(file);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
error = vfs_fsync_range(file, fstart, fend, 1);
fput(file);
if (error || start >= end)
goto out;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, start);
} else {
if (start >= end) {
@@ -104,7 +104,7 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
}
}
out_unlock:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out:
return error ? : unmapped_error;
}
diff --git a/mm/nommu.c b/mm/nommu.c
index dfae55f41901..cdcad5d61dd1 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -172,11 +172,11 @@ static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
if (ret) {
struct vm_area_struct *vma;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)ret);
if (vma)
vma->vm_flags |= VM_USERMAP;
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
return ret;
@@ -433,7 +433,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
/*
* Ok, looks good - let it rip.
*/
- flush_icache_range(mm->brk, brk);
+ flush_icache_user_range(mm->brk, brk);
return mm->brk = brk;
}
@@ -582,7 +582,7 @@ static void put_nommu_region(struct vm_region *region)
* add a VMA into a process's mm_struct in the appropriate place in the list
* and tree and add to the address space's page tree also if not an anonymous
* page
- * - should be called with mm->mmap_sem held writelocked
+ * - should be called with mm->mmap_lock held writelocked
*/
static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
{
@@ -696,7 +696,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
/*
* look up the first VMA in which addr resides, NULL if none
- * - should be called with mm->mmap_sem at least held readlocked
+ * - should be called with mm->mmap_lock at least held readlocked
*/
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
@@ -742,7 +742,7 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
/*
* look up the first VMA exactly that exactly matches addr
- * - should be called with mm->mmap_sem at least held readlocked
+ * - should be called with mm->mmap_lock at least held readlocked
*/
static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
unsigned long addr,
@@ -1277,7 +1277,7 @@ share:
/* we flush the region from the icache only when the first executable
* mapping of it is made */
if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
- flush_icache_range(region->vm_start, region->vm_end);
+ flush_icache_user_range(region->vm_start, region->vm_end);
region->vm_icache_flushed = true;
}
@@ -1542,9 +1542,9 @@ int vm_munmap(unsigned long addr, size_t len)
struct mm_struct *mm = current->mm;
int ret;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
ret = do_munmap(mm, addr, len, NULL);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL(vm_munmap);
@@ -1631,9 +1631,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
{
unsigned long ret;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
return ret;
}
@@ -1705,7 +1705,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct *vma;
int write = gup_flags & FOLL_WRITE;
- if (down_read_killable(&mm->mmap_sem))
+ if (mmap_read_lock_killable(mm))
return 0;
/* the access must start within one of the target process's mappings */
@@ -1728,7 +1728,7 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
len = 0;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return len;
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 4daedf7b91f6..6e94962893ee 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -126,7 +126,7 @@ static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
/*
* The process p may have detached its own ->mm while exiting or through
- * use_mm(), but one or more of its subthreads may still have a valid
+ * kthread_use_mm(), but one or more of its subthreads may still have a valid
* pointer. Return p, or any of its subthreads with a valid ->mm, with
* task_lock() held.
*/
@@ -569,7 +569,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
{
bool ret = true;
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
trace_skip_task_reaping(tsk->pid);
return false;
}
@@ -577,8 +577,8 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
/*
* MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
* work on the mm anymore. The check for MMF_OOM_SKIP must run
- * under mmap_sem for reading because it serializes against the
- * down_write();up_write() cycle in exit_mmap().
+ * under mmap_lock for reading because it serializes against the
+ * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
*/
if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
trace_skip_task_reaping(tsk->pid);
@@ -600,7 +600,7 @@ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
out_finish:
trace_finish_task_reaping(tsk->pid);
out_unlock:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return ret;
}
@@ -611,7 +611,7 @@ static void oom_reap_task(struct task_struct *tsk)
int attempts = 0;
struct mm_struct *mm = tsk->signal->oom_mm;
- /* Retry the down_read_trylock(mmap_sem) a few times */
+ /* Retry the mmap_read_trylock(mm) a few times */
while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
schedule_timeout_idle(HZ/10);
@@ -629,7 +629,7 @@ done:
/*
* Hide this mm from OOM killer because it has been either reaped or
- * somebody can't call up_write(mmap_sem).
+ * somebody can't call mmap_write_unlock(mm).
*/
set_bit(MMF_OOM_SKIP, &mm->flags);
@@ -898,7 +898,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
/*
* Kill all user processes sharing victim->mm in other thread groups, if
* any. They don't get access to memory reserves, though, to avoid
- * depletion of all memory. This prevents mm->mmap_sem livelock when an
+ * depletion of all memory. This prevents mm->mmap_lock livelock when an
* oom killed thread cannot exit because it requires the semaphore and
* its contended by another thread trying to allocate memory itself.
* That thread will now get access to memory reserves since it has a
@@ -919,8 +919,8 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
continue;
}
/*
- * No use_mm() user needs to read from the userspace so we are
- * ok to reap it.
+ * No kthead_use_mm() user needs to read from the userspace so
+ * we are ok to reap it.
*/
if (unlikely(p->flags & PF_KTHREAD))
continue;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d79ed1f88c7a..28b3e7a67565 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -257,7 +257,7 @@ static void wb_min_max_ratio(struct bdi_writeback *wb,
* requiring writeback.
*
* This number of dirtyable pages is the base value of which the
- * user-configurable dirty ratio is the effictive number of pages that
+ * user-configurable dirty ratio is the effective number of pages that
* are allowed to be actually dirtied. Per individual zone, or
* globally by using the sum of dirtyable pages over all zones.
*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 07ae77d97952..48eb0f1410d4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5575,15 +5575,6 @@ static int __parse_numa_zonelist_order(char *s)
return 0;
}
-static __init int setup_numa_zonelist_order(char *s)
-{
- if (!s)
- return 0;
-
- return __parse_numa_zonelist_order(s);
-}
-early_param("numa_zonelist_order", setup_numa_zonelist_order);
-
char numa_zonelist_order[] = "Node";
/*
@@ -8294,6 +8285,19 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
continue;
+ /*
+ * We treat all PageOffline() pages as movable when offlining
+ * to give drivers a chance to decrement their reference count
+ * in MEM_GOING_OFFLINE in order to indicate that these pages
+ * can be offlined as there are no direct references anymore.
+ * For actually unmovable PageOffline() where the driver does
+ * not support this, we will fail later when trying to actually
+ * move these pages that still have a reference count > 0.
+ * (false negatives in this function only)
+ */
+ if ((flags & MEMORY_OFFLINE) && PageOffline(page))
+ continue;
+
if (__PageMovable(page) || PageLRU(page))
continue;
@@ -8525,6 +8529,7 @@ done:
pfn_max_align_up(end), migratetype);
return ret;
}
+EXPORT_SYMBOL(alloc_contig_range);
static int __alloc_contig_pages(unsigned long start_pfn,
unsigned long nr_pages, gfp_t gfp_mask)
@@ -8640,6 +8645,7 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages)
}
WARN(count != 0, "%d pages are still in use!\n", count);
}
+EXPORT_SYMBOL(free_contig_range);
/*
* The zone indicated has a new number of managed_pages; batch sizes and percpu
@@ -8712,6 +8718,17 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
offlined_pages++;
continue;
}
+ /*
+ * At this point all remaining PageOffline() pages have a
+ * reference count of 0 and can simply be skipped.
+ */
+ if (PageOffline(page)) {
+ BUG_ON(page_count(page));
+ BUG_ON(PageBuddy(page));
+ pfn++;
+ offlined_pages++;
+ continue;
+ }
BUG_ON(page_count(page));
BUG_ON(!PageBuddy(page));
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 295512465065..057c61df12db 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -4,6 +4,7 @@
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
+#include <linux/memory_hotplug.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/pagemap.h>
@@ -30,13 +31,9 @@
*/
static struct page *page_idle_get_page(unsigned long pfn)
{
- struct page *page;
+ struct page *page = pfn_to_online_page(pfn);
pg_data_t *pgdat;
- if (!pfn_valid(pfn))
- return NULL;
-
- page = pfn_to_page(pfn);
if (!page || !PageLRU(page) ||
!get_page_unless_zero(page))
return NULL;
diff --git a/mm/page_io.c b/mm/page_io.c
index 76965be1d40e..e8726f3e3820 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -25,7 +25,6 @@
#include <linux/psi.h>
#include <linux/uio.h>
#include <linux/sched/task.h>
-#include <asm/pgtable.h>
static struct bio *get_swap_bio(gfp_t gfp_flags,
struct page *page, bio_end_io_t end_io)
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 2c11a38d6e87..f6d07c5f0d34 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -151,6 +151,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* a bit mask)
* MEMORY_OFFLINE - isolate to offline (!allocate) memory
* e.g., skip over PageHWPoison() pages
+ * and PageOffline() pages.
* REPORT_FAILURE - report details about the failure to
* isolate the range
*
@@ -259,6 +260,14 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
/* A HWPoisoned page cannot be also PageBuddy */
pfn++;
+ else if ((flags & MEMORY_OFFLINE) && PageOffline(page) &&
+ !page_count(page))
+ /*
+ * The responsible driver agreed to skip PageOffline()
+ * pages when offlining memory by dropping its
+ * reference in MEM_GOING_OFFLINE.
+ */
+ pfn++;
else
break;
}
diff --git a/mm/page_reporting.h b/mm/page_reporting.h
index aa6d37f4dc22..2c385dd4ddbd 100644
--- a/mm/page_reporting.h
+++ b/mm/page_reporting.h
@@ -7,7 +7,7 @@
#include <linux/page-isolation.h>
#include <linux/jump_label.h>
#include <linux/slab.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/scatterlist.h>
#define PAGE_REPORTING_MIN_ORDER pageblock_order
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 928df1638c30..e81640d9f177 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -373,7 +373,7 @@ static int __walk_page_range(unsigned long start, unsigned long end,
* caller-specific data to callbacks, @private should be helpful.
*
* Locking:
- * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_sem,
+ * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
* because these function traverse vma list and/or access to vma's data.
*/
int walk_page_range(struct mm_struct *mm, unsigned long start,
@@ -395,7 +395,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
if (!walk.mm)
return -EINVAL;
- lockdep_assert_held(&walk.mm->mmap_sem);
+ mmap_assert_locked(walk.mm);
vma = find_vma(walk.mm, start);
do {
@@ -453,7 +453,7 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
if (start >= end || !walk.mm)
return -EINVAL;
- lockdep_assert_held(&walk.mm->mmap_sem);
+ mmap_assert_locked(walk.mm);
return __walk_page_range(start, end, &walk);
}
@@ -472,7 +472,7 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
if (!walk.mm)
return -EINVAL;
- lockdep_assert_held(&walk.mm->mmap_sem);
+ mmap_assert_locked(walk.mm);
err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
if (err > 0)
@@ -498,11 +498,11 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
* Also see walk_page_range() for additional information.
*
* Locking:
- * This function can't require that the struct mm_struct::mmap_sem is held,
+ * This function can't require that the struct mm_struct::mmap_lock is held,
* since @mapping may be mapped by multiple processes. Instead
* @mapping->i_mmap_rwsem must be held. This might have implications in the
* callbacks, and it's up tho the caller to ensure that the
- * struct mm_struct::mmap_sem is not needed.
+ * struct mm_struct::mmap_lock is not needed.
*
* Also this means that a caller can't rely on the struct
* vm_area_struct::vm_flags to be constant across a call,
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index d18f0e1b6792..9578db83e312 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -2,15 +2,15 @@
/*
* mm/pgtable-generic.c
*
- * Generic pgtable methods declared in asm-generic/pgtable.h
+ * Generic pgtable methods declared in linux/pgtable.h
*
* Copyright (C) 2010 Linus Torvalds
*/
#include <linux/pagemap.h>
#include <linux/hugetlb.h>
+#include <linux/pgtable.h>
#include <asm/tlb.h>
-#include <asm-generic/pgtable.h>
/*
* If a p?d_bad entry is found while walking page tables, report
@@ -53,7 +53,7 @@ void pmd_clear_bad(pmd_t *pmd)
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
- * Only sets the access flags (dirty, accessed), as well as write
+ * Only sets the access flags (dirty, accessed), as well as write
* permission. Furthermore, we know it always gets set to a "more
* permissive" setting, which allows most architectures to optimize
* this. We return whether the PTE actually changed, which in turn
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 74e957e302fe..cc85ce81914a 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -104,12 +104,12 @@ static int process_vm_rw_single_vec(unsigned long addr,
* access remotely because task/mm might not
* current/current->mm
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
pinned_pages = pin_user_pages_remote(task, mm, pa, pinned_pages,
flags, process_pages,
NULL, &locked);
if (locked)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (pinned_pages <= 0)
return -EFAULT;
diff --git a/mm/ptdump.c b/mm/ptdump.c
index f4ce916f5602..ba88ec43ff21 100644
--- a/mm/ptdump.c
+++ b/mm/ptdump.c
@@ -141,13 +141,13 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
{
const struct ptdump_range *range = st->range;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
while (range->start != range->end) {
walk_page_range_novma(mm, range->start, range->end,
&ptdump_ops, pgd, st);
range++;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Flush out the last page */
st->note_page(st, 0, -1, 0);
diff --git a/mm/rmap.c b/mm/rmap.c
index ad4a0fdcc94c..5fe2dedce1fc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -21,7 +21,7 @@
* Lock ordering in mm:
*
* inode->i_mutex (while writing or truncating, not reading or faulting)
- * mm->mmap_sem
+ * mm->mmap_lock
* page->flags PG_locked (lock_page) * (see huegtlbfs below)
* hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
* mapping->i_mmap_rwsem
@@ -177,7 +177,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
* to do any locking for the common case of already having
* an anon_vma.
*
- * This must be called with the mmap_sem held for reading.
+ * This must be called with the mmap_lock held for reading.
*/
int __anon_vma_prepare(struct vm_area_struct *vma)
{
@@ -1444,7 +1444,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (!PageTransCompound(page)) {
/*
* Holding pte lock, we do *not* need
- * mmap_sem here
+ * mmap_lock here
*/
mlock_vma_page(page);
}
@@ -1817,7 +1817,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
/*
* Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
* because that depends on page_mapped(); but not all its usages
- * are holding mmap_sem. Users without mmap_sem are required to
+ * are holding mmap_lock. Users without mmap_lock are required to
* take a reference count to prevent the anon_vma disappearing
*/
anon_vma = page_anon_vma(page);
@@ -1837,7 +1837,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the anon_vma struct it points to.
*
- * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
+ * When called from try_to_munlock(), the mmap_lock of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
@@ -1889,7 +1889,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
*
- * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
+ * When called from try_to_munlock(), the mmap_lock of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
diff --git a/mm/shmem.c b/mm/shmem.c
index ea95a3e46fbb..a0dbe62f8042 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -82,7 +82,6 @@ static struct vfsmount *shm_mnt;
#include <linux/uuid.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include "internal.h"
@@ -2320,7 +2319,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
PAGE_SIZE);
kunmap_atomic(page_kaddr);
- /* fallback to copy_from_user outside mmap_sem */
+ /* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) {
*pagep = page;
shmem_inode_unacct_blocks(inode, 1);
@@ -4137,7 +4136,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
loff_t size = vma->vm_end - vma->vm_start;
/*
- * Cloning a new file under mmap_sem leads to a lock ordering conflict
+ * Cloning a new file under mmap_lock leads to a lock ordering conflict
* between XFS directory reading and selinux: since this file is only
* accessible to the user through its mapping, use S_PRIVATE flag to
* bypass file security, in the same way as shmem_kernel_file_setup().
diff --git a/mm/slub.c b/mm/slub.c
index d52487919278..b8f798b50d44 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2013,7 +2013,7 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
#ifdef CONFIG_PREEMPTION
/*
- * Calculate the next globally unique transaction for disambiguiation
+ * Calculate the next globally unique transaction for disambiguation
* during cmpxchg. The transactions start with the cpu number and are then
* incremented by CONFIG_NR_CPUS.
*/
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 200aef686722..0db7738d76e9 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -29,7 +29,6 @@
#include <linux/sched.h>
#include <asm/dma.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
/*
* Allocate a block of memory to be used to back the virtual memory map
diff --git a/mm/sparse.c b/mm/sparse.c
index 1aee5a481571..b2b9a3e34696 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -17,7 +17,6 @@
#include "internal.h"
#include <asm/dma.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
/*
* Permanent SPARSEMEM data:
@@ -288,7 +287,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
/*
* Mark all memblocks as present using memory_present(). This is a
- * convienence function that is useful for a number of arches
+ * convenience function that is useful for a number of arches
* to mark all of the systems memory as present during initialization.
*/
void __init memblocks_present(void)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 9d20b00627af..e98ff460e9e9 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -22,7 +22,6 @@
#include <linux/swap_slots.h>
#include <linux/huge_mm.h>
-#include <asm/pgtable.h>
/*
* swapper_space is a fiction, retained to simplify the path through
@@ -553,7 +552,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
* This has been extended to use the NUMA policies from the mm triggering
* the readahead.
*
- * Caller must hold read mmap_sem if vmf->vma is not NULL.
+ * Caller must hold read mmap_lock if vmf->vma is not NULL.
*/
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
@@ -735,7 +734,7 @@ static void swap_ra_info(struct vm_fault *vmf,
* Primitive swap readahead code. We simply read in a few pages whoes
* virtual addresses are around the fault address in the same vma.
*
- * Caller must hold read mmap_sem if vmf->vma is not NULL.
+ * Caller must hold read mmap_lock if vmf->vma is not NULL.
*
*/
static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
diff --git a/mm/swapfile.c b/mm/swapfile.c
index a3d191e205f2..987276c557d1 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -40,7 +40,6 @@
#include <linux/swap_slots.h>
#include <linux/sort.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>
#include <linux/swap_cgroup.h>
@@ -2101,7 +2100,7 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type,
struct vm_area_struct *vma;
int ret = 0;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->anon_vma) {
ret = unuse_vma(vma, type, frontswap,
@@ -2111,7 +2110,7 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type,
}
cond_resched();
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return ret;
}
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 7f5194046b01..b80419320c7d 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -76,7 +76,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
PAGE_SIZE);
kunmap_atomic(page_kaddr);
- /* fallback to copy_from_user outside mmap_sem */
+ /* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) {
ret = -ENOENT;
*pagep = page;
@@ -200,7 +200,7 @@ static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
#ifdef CONFIG_HUGETLB_PAGE
/*
* __mcopy_atomic processing for HUGETLB vmas. Note that this routine is
- * called with mmap_sem held, it will release mmap_sem before returning.
+ * called with mmap_lock held, it will release mmap_lock before returning.
*/
static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
struct vm_area_struct *dst_vma,
@@ -228,7 +228,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
* feature is not supported.
*/
if (zeropage) {
- up_read(&dst_mm->mmap_sem);
+ mmap_read_unlock(dst_mm);
return -EINVAL;
}
@@ -247,7 +247,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
retry:
/*
- * On routine entry dst_vma is set. If we had to drop mmap_sem and
+ * On routine entry dst_vma is set. If we had to drop mmap_lock and
* retry, dst_vma will be set to NULL and we must lookup again.
*/
if (!dst_vma) {
@@ -315,7 +315,7 @@ retry:
cond_resched();
if (unlikely(err == -ENOENT)) {
- up_read(&dst_mm->mmap_sem);
+ mmap_read_unlock(dst_mm);
BUG_ON(!page);
err = copy_huge_page_from_user(page,
@@ -326,7 +326,7 @@ retry:
err = -EFAULT;
goto out;
}
- down_read(&dst_mm->mmap_sem);
+ mmap_read_lock(dst_mm);
dst_vma = NULL;
goto retry;
@@ -346,7 +346,7 @@ retry:
}
out_unlock:
- up_read(&dst_mm->mmap_sem);
+ mmap_read_unlock(dst_mm);
out:
if (page) {
/*
@@ -357,7 +357,7 @@ out:
* private and shared mappings. See the routine
* restore_reserve_on_error for details. Unfortunately, we
* can not call restore_reserve_on_error now as it would
- * require holding mmap_sem.
+ * require holding mmap_lock.
*
* If a reservation for the page existed in the reservation
* map of a private mapping, the map was modified to indicate
@@ -485,7 +485,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
copied = 0;
page = NULL;
retry:
- down_read(&dst_mm->mmap_sem);
+ mmap_read_lock(dst_mm);
/*
* If memory mappings are changing because of non-cooperative
@@ -583,7 +583,7 @@ retry:
if (unlikely(err == -ENOENT)) {
void *page_kaddr;
- up_read(&dst_mm->mmap_sem);
+ mmap_read_unlock(dst_mm);
BUG_ON(!page);
page_kaddr = kmap(page);
@@ -612,7 +612,7 @@ retry:
}
out_unlock:
- up_read(&dst_mm->mmap_sem);
+ mmap_read_unlock(dst_mm);
out:
if (page)
put_page(page);
@@ -652,7 +652,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
/* Does the address range wrap, or is the span zero-sized? */
BUG_ON(start + len <= start);
- down_read(&dst_mm->mmap_sem);
+ mmap_read_lock(dst_mm);
/*
* If memory mappings are changing because of non-cooperative
@@ -686,6 +686,6 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
err = 0;
out_unlock:
- up_read(&dst_mm->mmap_sem);
+ mmap_read_unlock(dst_mm);
return err;
}
diff --git a/mm/util.c b/mm/util.c
index 41b47d8cae09..c63c8e47be57 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -425,7 +425,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
* @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
*
* Assumes @task and @mm are valid (i.e. at least one reference on each), and
- * that mmap_sem is held as writer.
+ * that mmap_lock is held as writer.
*
* Return:
* * 0 on success
@@ -437,7 +437,7 @@ int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
unsigned long locked_vm, limit;
int ret = 0;
- lockdep_assert_held_write(&mm->mmap_sem);
+ mmap_assert_write_locked(mm);
locked_vm = mm->locked_vm;
if (inc) {
@@ -481,10 +481,10 @@ int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
if (pages == 0 || !mm)
return 0;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
ret = __account_locked_vm(mm, pages, inc, current,
capable(CAP_IPC_LOCK));
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
@@ -501,11 +501,11 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
ret = security_mmap_file(file, prot, flag);
if (!ret) {
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
&populate, &uf);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
if (populate)
mm_populate(ret, populate);
@@ -604,6 +604,24 @@ void kvfree(const void *addr)
}
EXPORT_SYMBOL(kvfree);
+/**
+ * kvfree_sensitive - Free a data object containing sensitive information.
+ * @addr: address of the data object to be freed.
+ * @len: length of the data object.
+ *
+ * Use the special memzero_explicit() function to clear the content of a
+ * kvmalloc'ed object containing sensitive data to make sure that the
+ * compiler won't optimize out the data clearing.
+ */
+void kvfree_sensitive(const void *addr, size_t len)
+{
+ if (likely(!ZERO_OR_NULL_PTR(addr))) {
+ memzero_explicit((void *)addr, len);
+ kvfree(addr);
+ }
+}
+EXPORT_SYMBOL(kvfree_sensitive);
+
static inline void *__page_rmapping(struct page *page)
{
unsigned long mapping;
@@ -796,10 +814,6 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
{
long allowed;
- VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
- -(s64)vm_committed_as_batch * num_online_cpus(),
- "memory commitment underflow");
-
vm_acct_memory(pages);
/*
diff --git a/mm/vmacache.c b/mm/vmacache.c
index cdc32a3b02fa..01a6e6688ec1 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -6,7 +6,6 @@
#include <linux/sched/task.h>
#include <linux/mm.h>
#include <linux/vmacache.h>
-#include <asm/pgtable.h>
/*
* Hash based on the pmd of addr if configured with MMU, which provides a good
@@ -25,8 +24,8 @@
* task's vmacache pertains to a different mm (ie, its own). There is
* nothing we can do here.
*
- * Also handle the case where a kernel thread has adopted this mm via use_mm().
- * That kernel thread's vmacache is not applicable to this mm.
+ * Also handle the case where a kernel thread has adopted this mm via
+ * kthread_use_mm(). That kernel thread's vmacache is not applicable to this mm.
*/
static inline bool vmacache_valid_mm(struct mm_struct *mm)
{
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1e94497b7388..3091c2ca60df 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2317,7 +2317,7 @@ static inline void __vfree_deferred(const void *addr)
* Use raw_cpu_ptr() because this can be called from preemptible
* context. Preemption is absolutely fine here, because the llist_add()
* implementation is lockless, so it works even if we are adding to
- * nother cpu's list. schedule_work() should be fine with this too.
+ * another cpu's list. schedule_work() should be fine with this too.
*/
struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3792dd19788c..b6d84326bdf2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -682,7 +682,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
freed += ret;
/*
* Bail out if someone want to register a new shrinker to
- * prevent the regsitration from being stalled for long periods
+ * prevent the registration from being stalled for long periods
* by parallel ongoing shrinking.
*/
if (rwsem_is_contended(&shrinker_rwsem)) {
@@ -1613,7 +1613,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
/*
* Update LRU sizes after isolating pages. The LRU size updates must
- * be complete before mem_cgroup_update_lru_size due to a santity check.
+ * be complete before mem_cgroup_update_lru_size due to a sanity check.
*/
static __always_inline void update_lru_sizes(struct lruvec *lruvec,
enum lru_list lru, unsigned long *nr_zone_taken)
@@ -2371,7 +2371,7 @@ out:
/*
* Minimally target SWAP_CLUSTER_MAX pages to keep
- * reclaim moving forwards, avoiding decremeting
+ * reclaim moving forwards, avoiding decrementing
* sc->priority further than desirable.
*/
scan = max(scan, SWAP_CLUSTER_MAX);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index a7db29f7e5f7..3fb23a21f6dd 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -2069,24 +2069,14 @@ static int unusable_show(struct seq_file *m, void *arg)
return 0;
}
-static const struct seq_operations unusable_op = {
+static const struct seq_operations unusable_sops = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = unusable_show,
};
-static int unusable_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &unusable_op);
-}
-
-static const struct file_operations unusable_file_ops = {
- .open = unusable_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(unusable);
static void extfrag_show_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone)
@@ -2121,24 +2111,14 @@ static int extfrag_show(struct seq_file *m, void *arg)
return 0;
}
-static const struct seq_operations extfrag_op = {
+static const struct seq_operations extfrag_sops = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = extfrag_show,
};
-static int extfrag_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &extfrag_op);
-}
-
-static const struct file_operations extfrag_file_ops = {
- .open = extfrag_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(extfrag);
static int __init extfrag_debug_init(void)
{
@@ -2147,10 +2127,10 @@ static int __init extfrag_debug_init(void)
extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
- &unusable_file_ops);
+ &unusable_fops);
debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
- &extfrag_file_ops);
+ &extfrag_fops);
return 0;
}
diff --git a/mm/zbud.c b/mm/zbud.c
index de5dd4ddaa82..bc93aa4e46fc 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -243,7 +243,7 @@ static struct zbud_header *init_zbud_page(struct page *page)
zhdr->last_chunks = 0;
INIT_LIST_HEAD(&zhdr->buddy);
INIT_LIST_HEAD(&zhdr->lru);
- zhdr->under_reclaim = 0;
+ zhdr->under_reclaim = false;
return zhdr;
}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index f6dc0673e62c..952a01e45c6a 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -39,8 +39,8 @@
#include <linux/highmem.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/pgtable.h>
#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/vmalloc.h>
diff --git a/net/bpfilter/Kconfig b/net/bpfilter/Kconfig
index fed9290e3b41..84015ef3ee27 100644
--- a/net/bpfilter/Kconfig
+++ b/net/bpfilter/Kconfig
@@ -9,8 +9,12 @@ menuconfig BPFILTER
if BPFILTER
config BPFILTER_UMH
tristate "bpfilter kernel module with user mode helper"
- depends on CC_CAN_LINK
+ depends on CC_CAN_LINK_STATIC
default m
help
This builds bpfilter kernel module with embedded user mode helper
+
+ Note: your toolchain must support building static binaries, since
+ rootfs isn't mounted at the time when __init functions are called
+ and do_execv won't be able to find the elf interpreter.
endif
diff --git a/net/bpfilter/Makefile b/net/bpfilter/Makefile
index 36580301da70..f23b53294fba 100644
--- a/net/bpfilter/Makefile
+++ b/net/bpfilter/Makefile
@@ -3,17 +3,14 @@
# Makefile for the Linux BPFILTER layer.
#
-hostprogs := bpfilter_umh
+userprogs := bpfilter_umh
bpfilter_umh-objs := main.o
-KBUILD_HOSTCFLAGS += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi
-HOSTCC := $(CC)
+userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi
-ifeq ($(CONFIG_BPFILTER_UMH), y)
-# builtin bpfilter_umh should be compiled with -static
+# builtin bpfilter_umh should be linked with -static
# since rootfs isn't mounted at the time of __init
# function is called and do_execv won't find elf interpreter
-KBUILD_HOSTLDFLAGS += -static
-endif
+userldflags += -static
$(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 66f22e8aa529..afe0e8184c23 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -176,6 +176,10 @@ int ceph_compare_options(struct ceph_options *new_opt,
}
}
+ ret = ceph_compare_crush_locs(&opt1->crush_locs, &opt2->crush_locs);
+ if (ret)
+ return ret;
+
/* any matching mon ip implies a match */
for (i = 0; i < opt1->num_mon; i++) {
if (ceph_monmap_contains(client->monc.monmap,
@@ -259,6 +263,8 @@ enum {
Opt_secret,
Opt_key,
Opt_ip,
+ Opt_crush_location,
+ Opt_read_from_replica,
/* string args above */
Opt_share,
Opt_crc,
@@ -268,11 +274,25 @@ enum {
Opt_abort_on_full,
};
+enum {
+ Opt_read_from_replica_no,
+ Opt_read_from_replica_balance,
+ Opt_read_from_replica_localize,
+};
+
+static const struct constant_table ceph_param_read_from_replica[] = {
+ {"no", Opt_read_from_replica_no},
+ {"balance", Opt_read_from_replica_balance},
+ {"localize", Opt_read_from_replica_localize},
+ {}
+};
+
static const struct fs_parameter_spec ceph_parameters[] = {
fsparam_flag ("abort_on_full", Opt_abort_on_full),
fsparam_flag_no ("cephx_require_signatures", Opt_cephx_require_signatures),
fsparam_flag_no ("cephx_sign_messages", Opt_cephx_sign_messages),
fsparam_flag_no ("crc", Opt_crc),
+ fsparam_string ("crush_location", Opt_crush_location),
fsparam_string ("fsid", Opt_fsid),
fsparam_string ("ip", Opt_ip),
fsparam_string ("key", Opt_key),
@@ -283,6 +303,8 @@ static const struct fs_parameter_spec ceph_parameters[] = {
fsparam_u32 ("osdkeepalive", Opt_osdkeepalivetimeout),
__fsparam (fs_param_is_s32, "osdtimeout", Opt_osdtimeout,
fs_param_deprecated, NULL),
+ fsparam_enum ("read_from_replica", Opt_read_from_replica,
+ ceph_param_read_from_replica),
fsparam_string ("secret", Opt_secret),
fsparam_flag_no ("share", Opt_share),
fsparam_flag_no ("tcp_nodelay", Opt_tcp_nodelay),
@@ -297,6 +319,7 @@ struct ceph_options *ceph_alloc_options(void)
if (!opt)
return NULL;
+ opt->crush_locs = RB_ROOT;
opt->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*opt->mon_addr),
GFP_KERNEL);
if (!opt->mon_addr) {
@@ -319,6 +342,7 @@ void ceph_destroy_options(struct ceph_options *opt)
if (!opt)
return;
+ ceph_clear_crush_locs(&opt->crush_locs);
kfree(opt->name);
if (opt->key) {
ceph_crypto_key_destroy(opt->key);
@@ -453,6 +477,34 @@ int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
if (!opt->key)
return -ENOMEM;
return get_secret(opt->key, param->string, &log);
+ case Opt_crush_location:
+ ceph_clear_crush_locs(&opt->crush_locs);
+ err = ceph_parse_crush_location(param->string,
+ &opt->crush_locs);
+ if (err) {
+ error_plog(&log, "Failed to parse CRUSH location: %d",
+ err);
+ return err;
+ }
+ break;
+ case Opt_read_from_replica:
+ switch (result.uint_32) {
+ case Opt_read_from_replica_no:
+ opt->osd_req_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS |
+ CEPH_OSD_FLAG_LOCALIZE_READS);
+ break;
+ case Opt_read_from_replica_balance:
+ opt->osd_req_flags |= CEPH_OSD_FLAG_BALANCE_READS;
+ opt->osd_req_flags &= ~CEPH_OSD_FLAG_LOCALIZE_READS;
+ break;
+ case Opt_read_from_replica_localize:
+ opt->osd_req_flags |= CEPH_OSD_FLAG_LOCALIZE_READS;
+ opt->osd_req_flags &= ~CEPH_OSD_FLAG_BALANCE_READS;
+ break;
+ default:
+ BUG();
+ }
+ break;
case Opt_osdtimeout:
warn_plog(&log, "Ignoring osdtimeout");
@@ -535,6 +587,7 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
{
struct ceph_options *opt = client->options;
size_t pos = m->count;
+ struct rb_node *n;
if (opt->name) {
seq_puts(m, "name=");
@@ -544,6 +597,28 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
if (opt->key)
seq_puts(m, "secret=<hidden>,");
+ if (!RB_EMPTY_ROOT(&opt->crush_locs)) {
+ seq_puts(m, "crush_location=");
+ for (n = rb_first(&opt->crush_locs); ; ) {
+ struct crush_loc_node *loc =
+ rb_entry(n, struct crush_loc_node, cl_node);
+
+ seq_printf(m, "%s:%s", loc->cl_loc.cl_type_name,
+ loc->cl_loc.cl_name);
+ n = rb_next(n);
+ if (!n)
+ break;
+
+ seq_putc(m, '|');
+ }
+ seq_putc(m, ',');
+ }
+ if (opt->osd_req_flags & CEPH_OSD_FLAG_BALANCE_READS) {
+ seq_puts(m, "read_from_replica=balance,");
+ } else if (opt->osd_req_flags & CEPH_OSD_FLAG_LOCALIZE_READS) {
+ seq_puts(m, "read_from_replica=localize,");
+ }
+
if (opt->flags & CEPH_OPT_FSID)
seq_printf(m, "fsid=%pU,", &opt->fsid);
if (opt->flags & CEPH_OPT_NOSHARE)
diff --git a/net/ceph/crush/crush.c b/net/ceph/crush/crush.c
index 3d70244bc1b6..254ded0b05f6 100644
--- a/net/ceph/crush/crush.c
+++ b/net/ceph/crush/crush.c
@@ -2,7 +2,6 @@
#ifdef __KERNEL__
# include <linux/slab.h>
# include <linux/crush/crush.h>
-void clear_choose_args(struct crush_map *c);
#else
# include "crush_compat.h"
# include "crush.h"
@@ -130,6 +129,8 @@ void crush_destroy(struct crush_map *map)
#ifndef __KERNEL__
kfree(map->choose_tries);
#else
+ clear_crush_names(&map->type_names);
+ clear_crush_names(&map->names);
clear_choose_args(map);
#endif
kfree(map);
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 1344f232ecc5..409d505ff320 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -81,11 +81,13 @@ static int osdmap_show(struct seq_file *s, void *p)
u32 state = map->osd_state[i];
char sb[64];
- seq_printf(s, "osd%d\t%s\t%3d%%\t(%s)\t%3d%%\n",
+ seq_printf(s, "osd%d\t%s\t%3d%%\t(%s)\t%3d%%\t%2d\n",
i, ceph_pr_addr(addr),
((map->osd_weight[i]*100) >> 16),
ceph_osdmap_state_str(sb, sizeof(sb), state),
- ((ceph_get_primary_affinity(map, i)*100) >> 16));
+ ((ceph_get_primary_affinity(map, i)*100) >> 16),
+ ceph_get_crush_locality(map, i,
+ &client->options->crush_locs));
}
for (n = rb_first(&map->pg_temp); n; n = rb_next(n)) {
struct ceph_pg_mapping *pg =
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 1d4973f8cd7a..4fea3c33af2a 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -932,10 +932,14 @@ static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
op->watch.gen = 0;
}
+/*
+ * @flags: CEPH_OSD_OP_ALLOC_HINT_FLAG_*
+ */
void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
unsigned int which,
u64 expected_object_size,
- u64 expected_write_size)
+ u64 expected_write_size,
+ u32 flags)
{
struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
CEPH_OSD_OP_SETALLOCHINT,
@@ -943,6 +947,7 @@ void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
op->alloc_hint.expected_object_size = expected_object_size;
op->alloc_hint.expected_write_size = expected_write_size;
+ op->alloc_hint.flags = flags;
/*
* CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
@@ -1018,6 +1023,7 @@ static u32 osd_req_encode_op(struct ceph_osd_op *dst,
cpu_to_le64(src->alloc_hint.expected_object_size);
dst->alloc_hint.expected_write_size =
cpu_to_le64(src->alloc_hint.expected_write_size);
+ dst->alloc_hint.flags = cpu_to_le32(src->alloc_hint.flags);
break;
case CEPH_OSD_OP_SETXATTR:
case CEPH_OSD_OP_CMPXATTR:
@@ -1497,6 +1503,45 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc,
(osdc->osdmap->epoch < osdc->epoch_barrier);
}
+static int pick_random_replica(const struct ceph_osds *acting)
+{
+ int i = prandom_u32() % acting->size;
+
+ dout("%s picked osd%d, primary osd%d\n", __func__,
+ acting->osds[i], acting->primary);
+ return i;
+}
+
+/*
+ * Picks the closest replica based on client's location given by
+ * crush_location option. Prefers the primary if the locality is
+ * the same.
+ */
+static int pick_closest_replica(struct ceph_osd_client *osdc,
+ const struct ceph_osds *acting)
+{
+ struct ceph_options *opt = osdc->client->options;
+ int best_i, best_locality;
+ int i = 0, locality;
+
+ do {
+ locality = ceph_get_crush_locality(osdc->osdmap,
+ acting->osds[i],
+ &opt->crush_locs);
+ if (i == 0 ||
+ (locality >= 0 && best_locality < 0) ||
+ (locality >= 0 && best_locality >= 0 &&
+ locality < best_locality)) {
+ best_i = i;
+ best_locality = locality;
+ }
+ } while (++i < acting->size);
+
+ dout("%s picked osd%d with locality %d, primary osd%d\n", __func__,
+ acting->osds[best_i], best_locality, acting->primary);
+ return best_i;
+}
+
enum calc_target_result {
CALC_TARGET_NO_ACTION = 0,
CALC_TARGET_NEED_RESEND,
@@ -1510,6 +1555,8 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
struct ceph_pg_pool_info *pi;
struct ceph_pg pgid, last_pgid;
struct ceph_osds up, acting;
+ bool is_read = t->flags & CEPH_OSD_FLAG_READ;
+ bool is_write = t->flags & CEPH_OSD_FLAG_WRITE;
bool force_resend = false;
bool unpaused = false;
bool legacy_change = false;
@@ -1540,9 +1587,9 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
ceph_oid_copy(&t->target_oid, &t->base_oid);
ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
- if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
+ if (is_read && pi->read_tier >= 0)
t->target_oloc.pool = pi->read_tier;
- if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
+ if (is_write && pi->write_tier >= 0)
t->target_oloc.pool = pi->write_tier;
pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
@@ -1581,7 +1628,8 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
unpaused = true;
}
legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
- ceph_osds_changed(&t->acting, &acting, any_change);
+ ceph_osds_changed(&t->acting, &acting,
+ t->used_replica || any_change);
if (t->pg_num)
split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
@@ -1597,7 +1645,24 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
t->sort_bitwise = sort_bitwise;
t->recovery_deletes = recovery_deletes;
- t->osd = acting.primary;
+ if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS |
+ CEPH_OSD_FLAG_LOCALIZE_READS)) &&
+ !is_write && pi->type == CEPH_POOL_TYPE_REP &&
+ acting.size > 1) {
+ int pos;
+
+ WARN_ON(!is_read || acting.osds[0] != acting.primary);
+ if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) {
+ pos = pick_random_replica(&acting);
+ } else {
+ pos = pick_closest_replica(osdc, &acting);
+ }
+ t->osd = acting.osds[pos];
+ t->used_replica = pos > 0;
+ } else {
+ t->osd = acting.primary;
+ t->used_replica = false;
+ }
}
if (unpaused || legacy_change || force_resend || split)
@@ -2366,13 +2431,17 @@ promote:
static void account_request(struct ceph_osd_request *req)
{
+ struct ceph_osd_client *osdc = req->r_osdc;
+
WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
req->r_flags |= CEPH_OSD_FLAG_ONDISK;
- atomic_inc(&req->r_osdc->num_requests);
+ req->r_flags |= osdc->client->options->osd_req_flags;
+ atomic_inc(&osdc->num_requests);
req->r_start_stamp = jiffies;
+ req->r_start_latency = ktime_get();
}
static void submit_request(struct ceph_osd_request *req, bool wrlocked)
@@ -2389,6 +2458,8 @@ static void finish_request(struct ceph_osd_request *req)
WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
+ req->r_end_latency = ktime_get();
+
if (req->r_osd)
unlink_request(req->r_osd, req);
atomic_dec(&osdc->num_requests);
@@ -3657,6 +3728,26 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
goto out_unlock_osdc;
}
+ if (m.result == -EAGAIN) {
+ dout("req %p tid %llu EAGAIN\n", req, req->r_tid);
+ unlink_request(osd, req);
+ mutex_unlock(&osd->lock);
+
+ /*
+ * The object is missing on the replica or not (yet)
+ * readable. Clear pgid to force a resend to the primary
+ * via legacy_change.
+ */
+ req->r_t.pgid.pool = 0;
+ req->r_t.pgid.seed = 0;
+ WARN_ON(!req->r_t.used_replica);
+ req->r_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS |
+ CEPH_OSD_FLAG_LOCALIZE_READS);
+ req->r_tid = 0;
+ __submit_request(req, false);
+ goto out_unlock_osdc;
+ }
+
if (m.num_ops != req->r_num_ops) {
pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
req->r_num_ops, req->r_tid);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 2a6e63a8edbe..96c25f5e064a 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -138,6 +138,79 @@ bad:
return -EINVAL;
}
+struct crush_name_node {
+ struct rb_node cn_node;
+ int cn_id;
+ char cn_name[];
+};
+
+static struct crush_name_node *alloc_crush_name(size_t name_len)
+{
+ struct crush_name_node *cn;
+
+ cn = kmalloc(sizeof(*cn) + name_len + 1, GFP_NOIO);
+ if (!cn)
+ return NULL;
+
+ RB_CLEAR_NODE(&cn->cn_node);
+ return cn;
+}
+
+static void free_crush_name(struct crush_name_node *cn)
+{
+ WARN_ON(!RB_EMPTY_NODE(&cn->cn_node));
+
+ kfree(cn);
+}
+
+DEFINE_RB_FUNCS(crush_name, struct crush_name_node, cn_id, cn_node)
+
+static int decode_crush_names(void **p, void *end, struct rb_root *root)
+{
+ u32 n;
+
+ ceph_decode_32_safe(p, end, n, e_inval);
+ while (n--) {
+ struct crush_name_node *cn;
+ int id;
+ u32 name_len;
+
+ ceph_decode_32_safe(p, end, id, e_inval);
+ ceph_decode_32_safe(p, end, name_len, e_inval);
+ ceph_decode_need(p, end, name_len, e_inval);
+
+ cn = alloc_crush_name(name_len);
+ if (!cn)
+ return -ENOMEM;
+
+ cn->cn_id = id;
+ memcpy(cn->cn_name, *p, name_len);
+ cn->cn_name[name_len] = '\0';
+ *p += name_len;
+
+ if (!__insert_crush_name(root, cn)) {
+ free_crush_name(cn);
+ return -EEXIST;
+ }
+ }
+
+ return 0;
+
+e_inval:
+ return -EINVAL;
+}
+
+void clear_crush_names(struct rb_root *root)
+{
+ while (!RB_EMPTY_ROOT(root)) {
+ struct crush_name_node *cn =
+ rb_entry(rb_first(root), struct crush_name_node, cn_node);
+
+ erase_crush_name(root, cn);
+ free_crush_name(cn);
+ }
+}
+
static struct crush_choose_arg_map *alloc_choose_arg_map(void)
{
struct crush_choose_arg_map *arg_map;
@@ -354,6 +427,8 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
if (c == NULL)
return ERR_PTR(-ENOMEM);
+ c->type_names = RB_ROOT;
+ c->names = RB_ROOT;
c->choose_args = RB_ROOT;
/* set tunables to default values */
@@ -510,8 +585,14 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
}
}
- ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */
- ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */
+ err = decode_crush_names(p, end, &c->type_names);
+ if (err)
+ goto fail;
+
+ err = decode_crush_names(p, end, &c->names);
+ if (err)
+ goto fail;
+
ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */
/* tunables */
@@ -636,48 +717,11 @@ DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare,
/*
* rbtree of pg pool info
*/
-static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
-{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct ceph_pg_pool_info *pi = NULL;
-
- while (*p) {
- parent = *p;
- pi = rb_entry(parent, struct ceph_pg_pool_info, node);
- if (new->id < pi->id)
- p = &(*p)->rb_left;
- else if (new->id > pi->id)
- p = &(*p)->rb_right;
- else
- return -EEXIST;
- }
-
- rb_link_node(&new->node, parent, p);
- rb_insert_color(&new->node, root);
- return 0;
-}
-
-static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id)
-{
- struct ceph_pg_pool_info *pi;
- struct rb_node *n = root->rb_node;
-
- while (n) {
- pi = rb_entry(n, struct ceph_pg_pool_info, node);
- if (id < pi->id)
- n = n->rb_left;
- else if (id > pi->id)
- n = n->rb_right;
- else
- return pi;
- }
- return NULL;
-}
+DEFINE_RB_FUNCS(pg_pool, struct ceph_pg_pool_info, id, node)
struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
{
- return __lookup_pg_pool(&map->pg_pools, id);
+ return lookup_pg_pool(&map->pg_pools, id);
}
const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
@@ -690,8 +734,7 @@ const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
if (WARN_ON_ONCE(id > (u64) INT_MAX))
return NULL;
- pi = __lookup_pg_pool(&map->pg_pools, (int) id);
-
+ pi = lookup_pg_pool(&map->pg_pools, id);
return pi ? pi->name : NULL;
}
EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
@@ -714,14 +757,14 @@ u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id)
{
struct ceph_pg_pool_info *pi;
- pi = __lookup_pg_pool(&map->pg_pools, id);
+ pi = lookup_pg_pool(&map->pg_pools, id);
return pi ? pi->flags : 0;
}
EXPORT_SYMBOL(ceph_pg_pool_flags);
static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
{
- rb_erase(&pi->node, root);
+ erase_pg_pool(root, pi);
kfree(pi->name);
kfree(pi);
}
@@ -903,7 +946,7 @@ static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
ceph_decode_32_safe(p, end, len, bad);
dout(" pool %llu len %d\n", pool, len);
ceph_decode_need(p, end, len, bad);
- pi = __lookup_pg_pool(&map->pg_pools, pool);
+ pi = lookup_pg_pool(&map->pg_pools, pool);
if (pi) {
char *name = kstrndup(*p, len, GFP_NOFS);
@@ -1154,18 +1197,18 @@ static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
ceph_decode_64_safe(p, end, pool, e_inval);
- pi = __lookup_pg_pool(&map->pg_pools, pool);
+ pi = lookup_pg_pool(&map->pg_pools, pool);
if (!incremental || !pi) {
pi = kzalloc(sizeof(*pi), GFP_NOFS);
if (!pi)
return -ENOMEM;
+ RB_CLEAR_NODE(&pi->node);
pi->id = pool;
- ret = __insert_pg_pool(&map->pg_pools, pi);
- if (ret) {
+ if (!__insert_pg_pool(&map->pg_pools, pi)) {
kfree(pi);
- return ret;
+ return -EEXIST;
}
}
@@ -1829,7 +1872,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
struct ceph_pg_pool_info *pi;
ceph_decode_64_safe(p, end, pool, e_inval);
- pi = __lookup_pg_pool(&map->pg_pools, pool);
+ pi = lookup_pg_pool(&map->pg_pools, pool);
if (pi)
__remove_pg_pool(&map->pg_pools, pi);
}
@@ -2672,3 +2715,221 @@ int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
return acting.primary;
}
EXPORT_SYMBOL(ceph_pg_to_acting_primary);
+
+static struct crush_loc_node *alloc_crush_loc(size_t type_name_len,
+ size_t name_len)
+{
+ struct crush_loc_node *loc;
+
+ loc = kmalloc(sizeof(*loc) + type_name_len + name_len + 2, GFP_NOIO);
+ if (!loc)
+ return NULL;
+
+ RB_CLEAR_NODE(&loc->cl_node);
+ return loc;
+}
+
+static void free_crush_loc(struct crush_loc_node *loc)
+{
+ WARN_ON(!RB_EMPTY_NODE(&loc->cl_node));
+
+ kfree(loc);
+}
+
+static int crush_loc_compare(const struct crush_loc *loc1,
+ const struct crush_loc *loc2)
+{
+ return strcmp(loc1->cl_type_name, loc2->cl_type_name) ?:
+ strcmp(loc1->cl_name, loc2->cl_name);
+}
+
+DEFINE_RB_FUNCS2(crush_loc, struct crush_loc_node, cl_loc, crush_loc_compare,
+ RB_BYPTR, const struct crush_loc *, cl_node)
+
+/*
+ * Parses a set of <bucket type name>':'<bucket name> pairs separated
+ * by '|', e.g. "rack:foo1|rack:foo2|datacenter:bar".
+ *
+ * Note that @crush_location is modified by strsep().
+ */
+int ceph_parse_crush_location(char *crush_location, struct rb_root *locs)
+{
+ struct crush_loc_node *loc;
+ const char *type_name, *name, *colon;
+ size_t type_name_len, name_len;
+
+ dout("%s '%s'\n", __func__, crush_location);
+ while ((type_name = strsep(&crush_location, "|"))) {
+ colon = strchr(type_name, ':');
+ if (!colon)
+ return -EINVAL;
+
+ type_name_len = colon - type_name;
+ if (type_name_len == 0)
+ return -EINVAL;
+
+ name = colon + 1;
+ name_len = strlen(name);
+ if (name_len == 0)
+ return -EINVAL;
+
+ loc = alloc_crush_loc(type_name_len, name_len);
+ if (!loc)
+ return -ENOMEM;
+
+ loc->cl_loc.cl_type_name = loc->cl_data;
+ memcpy(loc->cl_loc.cl_type_name, type_name, type_name_len);
+ loc->cl_loc.cl_type_name[type_name_len] = '\0';
+
+ loc->cl_loc.cl_name = loc->cl_data + type_name_len + 1;
+ memcpy(loc->cl_loc.cl_name, name, name_len);
+ loc->cl_loc.cl_name[name_len] = '\0';
+
+ if (!__insert_crush_loc(locs, loc)) {
+ free_crush_loc(loc);
+ return -EEXIST;
+ }
+
+ dout("%s type_name '%s' name '%s'\n", __func__,
+ loc->cl_loc.cl_type_name, loc->cl_loc.cl_name);
+ }
+
+ return 0;
+}
+
+int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2)
+{
+ struct rb_node *n1 = rb_first(locs1);
+ struct rb_node *n2 = rb_first(locs2);
+ int ret;
+
+ for ( ; n1 && n2; n1 = rb_next(n1), n2 = rb_next(n2)) {
+ struct crush_loc_node *loc1 =
+ rb_entry(n1, struct crush_loc_node, cl_node);
+ struct crush_loc_node *loc2 =
+ rb_entry(n2, struct crush_loc_node, cl_node);
+
+ ret = crush_loc_compare(&loc1->cl_loc, &loc2->cl_loc);
+ if (ret)
+ return ret;
+ }
+
+ if (!n1 && n2)
+ return -1;
+ if (n1 && !n2)
+ return 1;
+ return 0;
+}
+
+void ceph_clear_crush_locs(struct rb_root *locs)
+{
+ while (!RB_EMPTY_ROOT(locs)) {
+ struct crush_loc_node *loc =
+ rb_entry(rb_first(locs), struct crush_loc_node, cl_node);
+
+ erase_crush_loc(locs, loc);
+ free_crush_loc(loc);
+ }
+}
+
+/*
+ * [a-zA-Z0-9-_.]+
+ */
+static bool is_valid_crush_name(const char *name)
+{
+ do {
+ if (!('a' <= *name && *name <= 'z') &&
+ !('A' <= *name && *name <= 'Z') &&
+ !('0' <= *name && *name <= '9') &&
+ *name != '-' && *name != '_' && *name != '.')
+ return false;
+ } while (*++name != '\0');
+
+ return true;
+}
+
+/*
+ * Gets the parent of an item. Returns its id (<0 because the
+ * parent is always a bucket), type id (>0 for the same reason,
+ * via @parent_type_id) and location (via @parent_loc). If no
+ * parent, returns 0.
+ *
+ * Does a linear search, as there are no parent pointers of any
+ * kind. Note that the result is ambigous for items that occur
+ * multiple times in the map.
+ */
+static int get_immediate_parent(struct crush_map *c, int id,
+ u16 *parent_type_id,
+ struct crush_loc *parent_loc)
+{
+ struct crush_bucket *b;
+ struct crush_name_node *type_cn, *cn;
+ int i, j;
+
+ for (i = 0; i < c->max_buckets; i++) {
+ b = c->buckets[i];
+ if (!b)
+ continue;
+
+ /* ignore per-class shadow hierarchy */
+ cn = lookup_crush_name(&c->names, b->id);
+ if (!cn || !is_valid_crush_name(cn->cn_name))
+ continue;
+
+ for (j = 0; j < b->size; j++) {
+ if (b->items[j] != id)
+ continue;
+
+ *parent_type_id = b->type;
+ type_cn = lookup_crush_name(&c->type_names, b->type);
+ parent_loc->cl_type_name = type_cn->cn_name;
+ parent_loc->cl_name = cn->cn_name;
+ return b->id;
+ }
+ }
+
+ return 0; /* no parent */
+}
+
+/*
+ * Calculates the locality/distance from an item to a client
+ * location expressed in terms of CRUSH hierarchy as a set of
+ * (bucket type name, bucket name) pairs. Specifically, looks
+ * for the lowest-valued bucket type for which the location of
+ * @id matches one of the locations in @locs, so for standard
+ * bucket types (host = 1, rack = 3, datacenter = 8, zone = 9)
+ * a matching host is closer than a matching rack and a matching
+ * data center is closer than a matching zone.
+ *
+ * Specifying multiple locations (a "multipath" location) such
+ * as "rack=foo1 rack=foo2 datacenter=bar" is allowed -- @locs
+ * is a multimap. The locality will be:
+ *
+ * - 3 for OSDs in racks foo1 and foo2
+ * - 8 for OSDs in data center bar
+ * - -1 for all other OSDs
+ *
+ * The lowest possible bucket type is 1, so the best locality
+ * for an OSD is 1 (i.e. a matching host). Locality 0 would be
+ * the OSD itself.
+ */
+int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id,
+ struct rb_root *locs)
+{
+ struct crush_loc loc;
+ u16 type_id;
+
+ /*
+ * Instead of repeated get_immediate_parent() calls,
+ * the location of @id could be obtained with a single
+ * depth-first traversal.
+ */
+ for (;;) {
+ id = get_immediate_parent(osdmap->crush, id, &type_id, &loc);
+ if (id >= 0)
+ return -1; /* not local */
+
+ if (lookup_crush_loc(locs, &loc))
+ return type_id;
+ }
+}
diff --git a/net/core/dev.c b/net/core/dev.c
index 10684833f864..061496a1f640 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -79,6 +79,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/mutex.h>
+#include <linux/rwsem.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
@@ -194,7 +195,7 @@ static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id = NR_CPUS;
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
-static seqcount_t devnet_rename_seq;
+static DECLARE_RWSEM(devnet_rename_sem);
static inline void dev_base_seq_inc(struct net *net)
{
@@ -998,33 +999,28 @@ EXPORT_SYMBOL(dev_get_by_napi_id);
* @net: network namespace
* @name: a pointer to the buffer where the name will be stored.
* @ifindex: the ifindex of the interface to get the name from.
- *
- * The use of raw_seqcount_begin() and cond_resched() before
- * retrying is required as we want to give the writers a chance
- * to complete when CONFIG_PREEMPTION is not set.
*/
int netdev_get_name(struct net *net, char *name, int ifindex)
{
struct net_device *dev;
- unsigned int seq;
+ int ret;
-retry:
- seq = raw_seqcount_begin(&devnet_rename_seq);
+ down_read(&devnet_rename_sem);
rcu_read_lock();
+
dev = dev_get_by_index_rcu(net, ifindex);
if (!dev) {
- rcu_read_unlock();
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
strcpy(name, dev->name);
- rcu_read_unlock();
- if (read_seqcount_retry(&devnet_rename_seq, seq)) {
- cond_resched();
- goto retry;
- }
- return 0;
+ ret = 0;
+out:
+ rcu_read_unlock();
+ up_read(&devnet_rename_sem);
+ return ret;
}
/**
@@ -1296,10 +1292,10 @@ int dev_change_name(struct net_device *dev, const char *newname)
likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
return -EBUSY;
- write_seqcount_begin(&devnet_rename_seq);
+ down_write(&devnet_rename_sem);
if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
- write_seqcount_end(&devnet_rename_seq);
+ up_write(&devnet_rename_sem);
return 0;
}
@@ -1307,7 +1303,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
err = dev_get_valid_name(net, dev, newname);
if (err < 0) {
- write_seqcount_end(&devnet_rename_seq);
+ up_write(&devnet_rename_sem);
return err;
}
@@ -1322,11 +1318,11 @@ rollback:
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
- write_seqcount_end(&devnet_rename_seq);
+ up_write(&devnet_rename_sem);
return ret;
}
- write_seqcount_end(&devnet_rename_seq);
+ up_write(&devnet_rename_sem);
netdev_adjacent_rename_links(dev, oldname);
@@ -1347,7 +1343,7 @@ rollback:
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
- write_seqcount_begin(&devnet_rename_seq);
+ down_write(&devnet_rename_sem);
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
diff --git a/net/core/filter.c b/net/core/filter.c
index d01a244b5087..209482a4eaa2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5050,7 +5050,7 @@ static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len
int err;
struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
- if (!seg6_validate_srh(srh, len))
+ if (!seg6_validate_srh(srh, len, false))
return -EINVAL;
switch (type) {
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index b109cc8a6dd8..f93f8ace6c56 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -128,7 +128,7 @@ static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
return -ENOMEM;
if (write) {
- ret = cpumask_parse_user(buffer, *lenp, mask);
+ ret = cpumask_parse(buffer, mask);
if (ret)
goto done;
diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c
index 677068deb68c..5eaf173eaaca 100644
--- a/net/ethtool/linkinfo.c
+++ b/net/ethtool/linkinfo.c
@@ -140,8 +140,7 @@ int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info)
ret = __ethtool_get_link_ksettings(dev, &ksettings);
if (ret < 0) {
- if (info)
- GENL_SET_ERR_MSG(info, "failed to retrieve link settings");
+ GENL_SET_ERR_MSG(info, "failed to retrieve link settings");
goto out_ops;
}
lsettings = &ksettings.base;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f40b1b72f979..afaf582a5aa9 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -902,6 +902,7 @@ void inet_csk_prepare_forced_close(struct sock *sk)
bh_unlock_sock(sk);
sock_put(sk);
inet_csk_prepare_for_destroy_sock(sk);
+ inet_sk(sk)->inet_num = 0;
}
EXPORT_SYMBOL(inet_csk_prepare_forced_close);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 15d47d5e7951..27716e4932bc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1734,7 +1734,7 @@ int tcp_mmap(struct file *file, struct socket *sock,
return -EPERM;
vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
- /* Instruct vm_insert_page() to not down_read(mmap_sem) */
+ /* Instruct vm_insert_page() to not mmap_read_lock(mm) */
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_ops = &tcp_vm_ops;
@@ -1762,11 +1762,11 @@ static int tcp_zerocopy_receive(struct sock *sk,
sock_rps_record_flow(sk);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, address);
if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EINVAL;
}
zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
@@ -1827,7 +1827,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
frags++;
}
out:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (length) {
WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index d64b83e85642..ce4fbba4acce 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -779,7 +779,7 @@ static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
- state->pid_ns = proc_pid_ns(file_inode(seq->file));
+ state->pid_ns = proc_pid_ns(file_inode(seq->file)->i_sb);
rcu_read_lock_bh();
return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 2c843ff5e3a9..20576e87a5f7 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -493,7 +493,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)
opt->srcrt;
- if (!seg6_validate_srh(srh, optlen))
+ if (!seg6_validate_srh(srh, optlen, false))
goto sticky_done;
break;
}
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 37b434293bda..d2f8138e5a73 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -25,7 +25,7 @@
#include <net/seg6_hmac.h>
#endif
-bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
+bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced)
{
unsigned int tlv_offset;
int max_last_entry;
@@ -37,13 +37,17 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
if (((srh->hdrlen + 1) << 3) != len)
return false;
- max_last_entry = (srh->hdrlen / 2) - 1;
-
- if (srh->first_segment > max_last_entry)
+ if (!reduced && srh->segments_left > srh->first_segment) {
return false;
+ } else {
+ max_last_entry = (srh->hdrlen / 2) - 1;
- if (srh->segments_left > srh->first_segment + 1)
- return false;
+ if (srh->first_segment > max_last_entry)
+ return false;
+
+ if (srh->segments_left > srh->first_segment + 1)
+ return false;
+ }
tlv_offset = sizeof(*srh) + ((srh->first_segment + 1) << 4);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index c7cbfeae94f5..e0e9f48ab14f 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -426,7 +426,7 @@ static int seg6_build_state(struct net *net, struct nlattr *nla,
}
/* verify that SRH is consistent */
- if (!seg6_validate_srh(tuninfo->srh, tuninfo_len - sizeof(*tuninfo)))
+ if (!seg6_validate_srh(tuninfo->srh, tuninfo_len - sizeof(*tuninfo), false))
return -EINVAL;
newts = lwtunnel_state_alloc(tuninfo_len + sizeof(*slwt));
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 52493423f329..eba23279912d 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -87,7 +87,7 @@ static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
*/
srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
- if (!seg6_validate_srh(srh, len))
+ if (!seg6_validate_srh(srh, len, true))
return NULL;
return srh;
@@ -495,7 +495,7 @@ bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
return false;
srh->hdrlen = (u8)(srh_state->hdrlen >> 3);
- if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3))
+ if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3, true))
return false;
srh_state->valid = true;
@@ -670,7 +670,7 @@ static int parse_nla_srh(struct nlattr **attrs, struct seg6_local_lwt *slwt)
if (len < sizeof(*srh) + sizeof(struct in6_addr))
return -EINVAL;
- if (!seg6_validate_srh(srh, len))
+ if (!seg6_validate_srh(srh, len, false))
return -EINVAL;
slwt->srh = kmemdup(srh, len, GFP_KERNEL);
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 78f046ec506f..3ac7c8c1548d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -376,7 +376,7 @@ static bool nf_remove_net_hook(struct nf_hook_entries *old,
if (orig_ops[i] != unreg)
continue;
WRITE_ONCE(old->hooks[i].hook, accept_all);
- WRITE_ONCE(orig_ops[i], &dummy_ops);
+ WRITE_ONCE(orig_ops[i], (void *)&dummy_ops);
return true;
}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2f049692e012..6c19b91bbb86 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -513,15 +513,58 @@ static void genl_family_rcv_msg_attrs_free(const struct genl_family *family,
kfree(attrbuf);
}
-static int genl_lock_start(struct netlink_callback *cb)
+struct genl_start_context {
+ const struct genl_family *family;
+ struct nlmsghdr *nlh;
+ struct netlink_ext_ack *extack;
+ const struct genl_ops *ops;
+ int hdrlen;
+};
+
+static int genl_start(struct netlink_callback *cb)
{
- const struct genl_ops *ops = genl_dumpit_info(cb)->ops;
+ struct genl_start_context *ctx = cb->data;
+ const struct genl_ops *ops = ctx->ops;
+ struct genl_dumpit_info *info;
+ struct nlattr **attrs = NULL;
int rc = 0;
+ if (ops->validate & GENL_DONT_VALIDATE_DUMP)
+ goto no_attrs;
+
+ if (ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
+ return -EINVAL;
+
+ attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
+ ops, ctx->hdrlen,
+ GENL_DONT_VALIDATE_DUMP_STRICT,
+ true);
+ if (IS_ERR(attrs))
+ return PTR_ERR(attrs);
+
+no_attrs:
+ info = genl_dumpit_info_alloc();
+ if (!info) {
+ kfree(attrs);
+ return -ENOMEM;
+ }
+ info->family = ctx->family;
+ info->ops = ops;
+ info->attrs = attrs;
+
+ cb->data = info;
if (ops->start) {
- genl_lock();
+ if (!ctx->family->parallel_ops)
+ genl_lock();
rc = ops->start(cb);
- genl_unlock();
+ if (!ctx->family->parallel_ops)
+ genl_unlock();
+ }
+
+ if (rc) {
+ kfree(attrs);
+ genl_dumpit_info_free(info);
+ cb->data = NULL;
}
return rc;
}
@@ -548,7 +591,7 @@ static int genl_lock_done(struct netlink_callback *cb)
rc = ops->done(cb);
genl_unlock();
}
- genl_family_rcv_msg_attrs_free(info->family, info->attrs, true);
+ genl_family_rcv_msg_attrs_free(info->family, info->attrs, false);
genl_dumpit_info_free(info);
return rc;
}
@@ -573,43 +616,23 @@ static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
const struct genl_ops *ops,
int hdrlen, struct net *net)
{
- struct genl_dumpit_info *info;
- struct nlattr **attrs = NULL;
+ struct genl_start_context ctx;
int err;
if (!ops->dumpit)
return -EOPNOTSUPP;
- if (ops->validate & GENL_DONT_VALIDATE_DUMP)
- goto no_attrs;
-
- if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
- return -EINVAL;
-
- attrs = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
- ops, hdrlen,
- GENL_DONT_VALIDATE_DUMP_STRICT,
- true);
- if (IS_ERR(attrs))
- return PTR_ERR(attrs);
-
-no_attrs:
- /* Allocate dumpit info. It is going to be freed by done() callback. */
- info = genl_dumpit_info_alloc();
- if (!info) {
- genl_family_rcv_msg_attrs_free(family, attrs, true);
- return -ENOMEM;
- }
-
- info->family = family;
- info->ops = ops;
- info->attrs = attrs;
+ ctx.family = family;
+ ctx.nlh = nlh;
+ ctx.extack = extack;
+ ctx.ops = ops;
+ ctx.hdrlen = hdrlen;
if (!family->parallel_ops) {
struct netlink_dump_control c = {
.module = family->module,
- .data = info,
- .start = genl_lock_start,
+ .data = &ctx,
+ .start = genl_start,
.dump = genl_lock_dumpit,
.done = genl_lock_done,
};
@@ -617,12 +640,11 @@ no_attrs:
genl_unlock();
err = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
genl_lock();
-
} else {
struct netlink_dump_control c = {
.module = family->module,
- .data = info,
- .start = ops->start,
+ .data = &ctx,
+ .start = genl_start,
.dump = ops->dumpit,
.done = genl_parallel_done,
};
diff --git a/net/rds/Makefile b/net/rds/Makefile
index e647f9de104a..8fdc118e2927 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -7,7 +7,7 @@ rds-y := af_rds.o bind.o cong.o connection.o info.o message.o \
obj-$(CONFIG_RDS_RDMA) += rds_rdma.o
rds_rdma-y := rdma_transport.o \
ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \
- ib_sysctl.o ib_rdma.o ib_fmr.o ib_frmr.o
+ ib_sysctl.o ib_rdma.o ib_frmr.o
obj-$(CONFIG_RDS_TCP) += rds_tcp.o
diff --git a/net/rds/ib.c b/net/rds/ib.c
index a792d8a3872a..deecbdcdae84 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -127,19 +127,23 @@ void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
queue_work(rds_wq, &rds_ibdev->free_work);
}
-static void rds_ib_add_one(struct ib_device *device)
+static int rds_ib_add_one(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
- bool has_fr, has_fmr;
+ int ret;
/* Only handle IB (no iWARP) devices */
if (device->node_type != RDMA_NODE_IB_CA)
- return;
+ return -EOPNOTSUPP;
+
+ /* Device must support FRWR */
+ if (!(device->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
+ return -EOPNOTSUPP;
rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
ibdev_to_node(device));
if (!rds_ibdev)
- return;
+ return -ENOMEM;
spin_lock_init(&rds_ibdev->spinlock);
refcount_set(&rds_ibdev->refcount, 1);
@@ -151,11 +155,6 @@ static void rds_ib_add_one(struct ib_device *device)
rds_ibdev->max_wrs = device->attrs.max_qp_wr;
rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
- has_fr = (device->attrs.device_cap_flags &
- IB_DEVICE_MEM_MGT_EXTENSIONS);
- has_fmr = (device->ops.alloc_fmr && device->ops.dealloc_fmr &&
- device->ops.map_phys_fmr && device->ops.unmap_fmr);
- rds_ibdev->use_fastreg = (has_fr && !has_fmr);
rds_ibdev->odp_capable =
!!(device->attrs.device_cap_flags &
IB_DEVICE_ON_DEMAND_PAGING) &&
@@ -164,7 +163,6 @@ static void rds_ib_add_one(struct ib_device *device)
!!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps &
IB_ODP_SUPPORT_READ);
- rds_ibdev->fmr_max_remaps = device->attrs.max_map_per_fmr?: 32;
rds_ibdev->max_1m_mrs = device->attrs.max_mr ?
min_t(unsigned int, (device->attrs.max_mr / 2),
rds_ib_mr_1m_pool_size) : rds_ib_mr_1m_pool_size;
@@ -182,12 +180,14 @@ static void rds_ib_add_one(struct ib_device *device)
if (!rds_ibdev->vector_load) {
pr_err("RDS/IB: %s failed to allocate vector memory\n",
__func__);
+ ret = -ENOMEM;
goto put_dev;
}
rds_ibdev->dev = device;
rds_ibdev->pd = ib_alloc_pd(device, 0);
if (IS_ERR(rds_ibdev->pd)) {
+ ret = PTR_ERR(rds_ibdev->pd);
rds_ibdev->pd = NULL;
goto put_dev;
}
@@ -195,12 +195,15 @@ static void rds_ib_add_one(struct ib_device *device)
device->dma_device,
sizeof(struct rds_header),
L1_CACHE_BYTES, 0);
- if (!rds_ibdev->rid_hdrs_pool)
+ if (!rds_ibdev->rid_hdrs_pool) {
+ ret = -ENOMEM;
goto put_dev;
+ }
rds_ibdev->mr_1m_pool =
rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL);
if (IS_ERR(rds_ibdev->mr_1m_pool)) {
+ ret = PTR_ERR(rds_ibdev->mr_1m_pool);
rds_ibdev->mr_1m_pool = NULL;
goto put_dev;
}
@@ -208,18 +211,16 @@ static void rds_ib_add_one(struct ib_device *device)
rds_ibdev->mr_8k_pool =
rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL);
if (IS_ERR(rds_ibdev->mr_8k_pool)) {
+ ret = PTR_ERR(rds_ibdev->mr_8k_pool);
rds_ibdev->mr_8k_pool = NULL;
goto put_dev;
}
- rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, fmr_max_remaps = %d, max_1m_mrs = %d, max_8k_mrs = %d\n",
- device->attrs.max_fmr, rds_ibdev->max_wrs, rds_ibdev->max_sge,
- rds_ibdev->fmr_max_remaps, rds_ibdev->max_1m_mrs,
- rds_ibdev->max_8k_mrs);
+ rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, max_1m_mrs = %d, max_8k_mrs = %d\n",
+ device->attrs.max_mr, rds_ibdev->max_wrs, rds_ibdev->max_sge,
+ rds_ibdev->max_1m_mrs, rds_ibdev->max_8k_mrs);
- pr_info("RDS/IB: %s: %s supported and preferred\n",
- device->name,
- rds_ibdev->use_fastreg ? "FRMR" : "FMR");
+ pr_info("RDS/IB: %s: added\n", device->name);
down_write(&rds_ib_devices_lock);
list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
@@ -227,12 +228,13 @@ static void rds_ib_add_one(struct ib_device *device)
refcount_inc(&rds_ibdev->refcount);
ib_set_client_data(device, &rds_ib_client, rds_ibdev);
- refcount_inc(&rds_ibdev->refcount);
rds_ib_nodev_connect();
+ return 0;
put_dev:
rds_ib_dev_put(rds_ibdev);
+ return ret;
}
/*
@@ -274,9 +276,6 @@ static void rds_ib_remove_one(struct ib_device *device, void *client_data)
{
struct rds_ib_device *rds_ibdev = client_data;
- if (!rds_ibdev)
- return;
-
rds_ib_dev_shutdown(rds_ibdev);
/* stop connection attempts from getting a reference to this device. */
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 0296f1f7acda..5ae069d39eab 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -247,13 +247,11 @@ struct rds_ib_device {
struct ib_device *dev;
struct ib_pd *pd;
struct dma_pool *rid_hdrs_pool; /* RDS headers DMA pool */
- u8 use_fastreg:1;
u8 odp_capable:1;
unsigned int max_mrs;
struct rds_ib_mr_pool *mr_1m_pool;
struct rds_ib_mr_pool *mr_8k_pool;
- unsigned int fmr_max_remaps;
unsigned int max_8k_mrs;
unsigned int max_1m_mrs;
int max_sge;
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index c71f4328d138..c3319ff3ee11 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -37,6 +37,7 @@
#include <linux/vmalloc.h>
#include <linux/ratelimit.h>
#include <net/addrconf.h>
+#include <rdma/ib_cm.h>
#include "rds_single_path.h"
#include "rds.h"
@@ -526,10 +527,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
return -EOPNOTSUPP;
/* The fr_queue_space is currently set to 512, to add extra space on
- * completion queue and send queue. This extra space is used for FRMR
+ * completion queue and send queue. This extra space is used for FRWR
* registration and invalidation work requests
*/
- fr_queue_space = (rds_ibdev->use_fastreg ? RDS_IB_DEFAULT_FR_WR : 0);
+ fr_queue_space = RDS_IB_DEFAULT_FR_WR;
/* add the conn now so that connection establishment has the dev */
rds_ib_add_conn(rds_ibdev, conn);
@@ -927,7 +928,8 @@ out:
if (conn)
mutex_unlock(&conn->c_cm_lock);
if (err)
- rdma_reject(cm_id, &err, sizeof(int));
+ rdma_reject(cm_id, &err, sizeof(int),
+ IB_CM_REJ_CONSUMER_DEFINED);
return destroy;
}
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
deleted file mode 100644
index 93c0437e6a5f..000000000000
--- a/net/rds/ib_fmr.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright (c) 2016 Oracle. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "ib_mr.h"
-
-struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
-{
- struct rds_ib_mr_pool *pool;
- struct rds_ib_mr *ibmr = NULL;
- struct rds_ib_fmr *fmr;
- int err = 0;
-
- if (npages <= RDS_MR_8K_MSG_SIZE)
- pool = rds_ibdev->mr_8k_pool;
- else
- pool = rds_ibdev->mr_1m_pool;
-
- if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
-
- /* Switch pools if one of the pool is reaching upper limit */
- if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
- if (pool->pool_type == RDS_IB_MR_8K_POOL)
- pool = rds_ibdev->mr_1m_pool;
- else
- pool = rds_ibdev->mr_8k_pool;
- }
-
- ibmr = rds_ib_try_reuse_ibmr(pool);
- if (ibmr)
- return ibmr;
-
- ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
- rdsibdev_to_node(rds_ibdev));
- if (!ibmr) {
- err = -ENOMEM;
- goto out_no_cigar;
- }
-
- fmr = &ibmr->u.fmr;
- fmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
- (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_ATOMIC),
- &pool->fmr_attr);
- if (IS_ERR(fmr->fmr)) {
- err = PTR_ERR(fmr->fmr);
- fmr->fmr = NULL;
- pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, err);
- goto out_no_cigar;
- }
-
- ibmr->pool = pool;
- if (pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
-
- return ibmr;
-
-out_no_cigar:
- kfree(ibmr);
- atomic_dec(&pool->item_count);
-
- return ERR_PTR(err);
-}
-
-static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev,
- struct rds_ib_mr *ibmr, struct scatterlist *sg,
- unsigned int nents)
-{
- struct ib_device *dev = rds_ibdev->dev;
- struct rds_ib_fmr *fmr = &ibmr->u.fmr;
- struct scatterlist *scat = sg;
- u64 io_addr = 0;
- u64 *dma_pages;
- u32 len;
- int page_cnt, sg_dma_len;
- int i, j;
- int ret;
-
- sg_dma_len = ib_dma_map_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
- if (unlikely(!sg_dma_len)) {
- pr_warn("RDS/IB: %s failed!\n", __func__);
- return -EBUSY;
- }
-
- len = 0;
- page_cnt = 0;
-
- for (i = 0; i < sg_dma_len; ++i) {
- unsigned int dma_len = sg_dma_len(&scat[i]);
- u64 dma_addr = sg_dma_address(&scat[i]);
-
- if (dma_addr & ~PAGE_MASK) {
- if (i > 0) {
- ib_dma_unmap_sg(dev, sg, nents,
- DMA_BIDIRECTIONAL);
- return -EINVAL;
- } else {
- ++page_cnt;
- }
- }
- if ((dma_addr + dma_len) & ~PAGE_MASK) {
- if (i < sg_dma_len - 1) {
- ib_dma_unmap_sg(dev, sg, nents,
- DMA_BIDIRECTIONAL);
- return -EINVAL;
- } else {
- ++page_cnt;
- }
- }
-
- len += dma_len;
- }
-
- page_cnt += len >> PAGE_SHIFT;
- if (page_cnt > ibmr->pool->fmr_attr.max_pages) {
- ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
- return -EINVAL;
- }
-
- dma_pages = kmalloc_array_node(sizeof(u64), page_cnt, GFP_ATOMIC,
- rdsibdev_to_node(rds_ibdev));
- if (!dma_pages) {
- ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
- return -ENOMEM;
- }
-
- page_cnt = 0;
- for (i = 0; i < sg_dma_len; ++i) {
- unsigned int dma_len = sg_dma_len(&scat[i]);
- u64 dma_addr = sg_dma_address(&scat[i]);
-
- for (j = 0; j < dma_len; j += PAGE_SIZE)
- dma_pages[page_cnt++] =
- (dma_addr & PAGE_MASK) + j;
- }
-
- ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
- if (ret) {
- ib_dma_unmap_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
- goto out;
- }
-
- /* Success - we successfully remapped the MR, so we can
- * safely tear down the old mapping.
- */
- rds_ib_teardown_mr(ibmr);
-
- ibmr->sg = scat;
- ibmr->sg_len = nents;
- ibmr->sg_dma_len = sg_dma_len;
- ibmr->remap_count++;
-
- if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
- ret = 0;
-
-out:
- kfree(dma_pages);
-
- return ret;
-}
-
-struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *rds_ibdev,
- struct scatterlist *sg,
- unsigned long nents,
- u32 *key)
-{
- struct rds_ib_mr *ibmr = NULL;
- struct rds_ib_fmr *fmr;
- int ret;
-
- ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
- if (IS_ERR(ibmr))
- return ibmr;
-
- ibmr->device = rds_ibdev;
- fmr = &ibmr->u.fmr;
- ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
- if (ret == 0)
- *key = fmr->fmr->rkey;
- else
- rds_ib_free_mr(ibmr, 0);
-
- return ibmr;
-}
-
-void rds_ib_unreg_fmr(struct list_head *list, unsigned int *nfreed,
- unsigned long *unpinned, unsigned int goal)
-{
- struct rds_ib_mr *ibmr, *next;
- struct rds_ib_fmr *fmr;
- LIST_HEAD(fmr_list);
- int ret = 0;
- unsigned int freed = *nfreed;
-
- /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
- list_for_each_entry(ibmr, list, unmap_list) {
- fmr = &ibmr->u.fmr;
- list_add(&fmr->fmr->list, &fmr_list);
- }
-
- ret = ib_unmap_fmr(&fmr_list);
- if (ret)
- pr_warn("RDS/IB: FMR invalidation failed (err=%d)\n", ret);
-
- /* Now we can destroy the DMA mapping and unpin any pages */
- list_for_each_entry_safe(ibmr, next, list, unmap_list) {
- fmr = &ibmr->u.fmr;
- *unpinned += ibmr->sg_len;
- __rds_ib_teardown_mr(ibmr);
- if (freed < goal ||
- ibmr->remap_count >= ibmr->pool->fmr_attr.max_maps) {
- if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
- rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
- else
- rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
- list_del(&ibmr->unmap_list);
- ib_dealloc_fmr(fmr->fmr);
- kfree(ibmr);
- freed++;
- }
- }
- *nfreed = freed;
-}
-
-void rds_ib_free_fmr_list(struct rds_ib_mr *ibmr)
-{
- struct rds_ib_mr_pool *pool = ibmr->pool;
-
- if (ibmr->remap_count >= pool->fmr_attr.max_maps)
- llist_add(&ibmr->llnode, &pool->drop_list);
- else
- llist_add(&ibmr->llnode, &pool->free_list);
-}
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index 06ecf9d2d4bf..9b6ffff72f2d 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -76,7 +76,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
frmr = &ibmr->u.frmr;
frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG,
- pool->fmr_attr.max_pages);
+ pool->max_pages);
if (IS_ERR(frmr->mr)) {
pr_warn("RDS/IB: %s failed to allocate MR", __func__);
err = PTR_ERR(frmr->mr);
@@ -240,7 +240,7 @@ static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev,
}
frmr->dma_npages += len >> PAGE_SHIFT;
- if (frmr->dma_npages > ibmr->pool->fmr_attr.max_pages) {
+ if (frmr->dma_npages > ibmr->pool->max_pages) {
ret = -EMSGSIZE;
goto out_unmap;
}
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index 0c8252d7fe2b..ea5e9aee4959 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -43,10 +43,6 @@
#define RDS_MR_8K_SCALE (256 / (RDS_MR_8K_MSG_SIZE + 1))
#define RDS_MR_8K_POOL_SIZE (RDS_MR_8K_SCALE * (8192 / 2))
-struct rds_ib_fmr {
- struct ib_fmr *fmr;
-};
-
enum rds_ib_fr_state {
FRMR_IS_FREE, /* mr invalidated & ready for use */
FRMR_IS_INUSE, /* mr is in use or used & can be invalidated */
@@ -84,7 +80,6 @@ struct rds_ib_mr {
u8 odp:1;
union {
- struct rds_ib_fmr fmr;
struct rds_ib_frmr frmr;
struct ib_mr *mr;
} u;
@@ -109,8 +104,7 @@ struct rds_ib_mr_pool {
unsigned long max_items;
unsigned long max_items_soft;
unsigned long max_free_pinned;
- struct ib_fmr_attr fmr_attr;
- bool use_fastreg;
+ unsigned int max_pages;
};
extern struct workqueue_struct *rds_ib_mr_wq;
@@ -136,15 +130,9 @@ u32 rds_ib_get_lkey(void *trans_private);
void __rds_ib_teardown_mr(struct rds_ib_mr *);
void rds_ib_teardown_mr(struct rds_ib_mr *);
-struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *, int);
struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *);
int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *, int, struct rds_ib_mr **);
-struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *, struct scatterlist *,
- unsigned long, u32 *);
struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *);
-void rds_ib_unreg_fmr(struct list_head *, unsigned int *,
- unsigned long *, unsigned int);
-void rds_ib_free_fmr_list(struct rds_ib_mr *);
struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
struct rds_ib_connection *ic,
struct scatterlist *sg,
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index b34b24e237f8..8f070ee7e742 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -181,7 +181,7 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co
struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
iinfo->rdma_mr_max = pool_1m->max_items;
- iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
+ iinfo->rdma_mr_size = pool_1m->max_pages;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -191,7 +191,7 @@ void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
iinfo6->rdma_mr_max = pool_1m->max_items;
- iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages;
+ iinfo6->rdma_mr_size = pool_1m->max_pages;
}
#endif
@@ -406,10 +406,7 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
if (list_empty(&unmap_list))
goto out;
- if (pool->use_fastreg)
- rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
- else
- rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
+ rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
if (!list_empty(&unmap_list)) {
unsigned long flags;
@@ -503,10 +500,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
}
/* Return it to the pool's free list */
- if (rds_ibdev->use_fastreg)
- rds_ib_free_frmr_list(ibmr);
- else
- rds_ib_free_fmr_list(ibmr);
+ rds_ib_free_frmr_list(ibmr);
atomic_add(ibmr->sg_len, &pool->free_pinned);
atomic_inc(&pool->dirty_count);
@@ -622,10 +616,7 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
goto out;
}
- if (rds_ibdev->use_fastreg)
- ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
- else
- ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
+ ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
if (IS_ERR(ibmr)) {
ret = PTR_ERR(ibmr);
pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
@@ -669,19 +660,16 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
if (pool_type == RDS_IB_MR_1M_POOL) {
/* +1 allows for unaligned MRs */
- pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
+ pool->max_pages = RDS_MR_1M_MSG_SIZE + 1;
pool->max_items = rds_ibdev->max_1m_mrs;
} else {
/* pool_type == RDS_IB_MR_8K_POOL */
- pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
+ pool->max_pages = RDS_MR_8K_MSG_SIZE + 1;
pool->max_items = rds_ibdev->max_8k_mrs;
}
- pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
- pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
- pool->fmr_attr.page_shift = PAGE_SHIFT;
+ pool->max_free_pinned = pool->max_items * pool->max_pages / 4;
pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
- pool->use_fastreg = rds_ibdev->use_fastreg;
return pool;
}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index b1449d971883..112e490ebbcd 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -271,6 +271,9 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
break;
case SO_EE_ORIGIN_ICMP6:
+ if (err == EACCES)
+ err = EHOSTUNREACH;
+ /* Fall through */
default:
_proto("Rx Received error report { orig=%u }", ee->ee_origin);
break;
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 8b179e3c802a..543afd9bd664 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -68,7 +68,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
"Proto Local "
" Remote "
" SvID ConnID CallID End Use State Abort "
- " UserID TxSeq TW RxSeq RW RxSerial RxTimo\n");
+ " DebugId TxSeq TW RxSeq RW RxSerial RxTimo\n");
return 0;
}
@@ -100,7 +100,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
rx_hard_ack = READ_ONCE(call->rx_hard_ack);
seq_printf(seq,
"UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
- " %-8.8s %08x %lx %08x %02x %08x %02x %08x %06lx\n",
+ " %-8.8s %08x %08x %08x %02x %08x %02x %08x %06lx\n",
lbuff,
rbuff,
call->service_id,
@@ -110,7 +110,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
atomic_read(&call->usage),
rxrpc_call_states[call->state],
call->abort_code,
- call->user_call_ID,
+ call->debug_id,
tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack,
rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack,
call->rx_serial,
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index f0a5064bf9bd..562a52d01ad1 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -548,18 +548,18 @@ static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
static struct ib_client smc_ib_client;
/* callback function for ib_register_client() */
-static void smc_ib_add_dev(struct ib_device *ibdev)
+static int smc_ib_add_dev(struct ib_device *ibdev)
{
struct smc_ib_device *smcibdev;
u8 port_cnt;
int i;
if (ibdev->node_type != RDMA_NODE_IB_CA)
- return;
+ return -EOPNOTSUPP;
smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
if (!smcibdev)
- return;
+ return -ENOMEM;
smcibdev->ibdev = ibdev;
INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
@@ -594,17 +594,14 @@ static void smc_ib_add_dev(struct ib_device *ibdev)
"");
}
schedule_work(&smcibdev->port_event_work);
+ return 0;
}
/* callback function for ib_unregister_client() */
static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
{
- struct smc_ib_device *smcibdev;
+ struct smc_ib_device *smcibdev = client_data;
- smcibdev = ib_get_client_data(ibdev, &smc_ib_client);
- if (!smcibdev || smcibdev->ibdev != ibdev)
- return;
- ib_set_client_data(ibdev, &smc_ib_client, NULL);
spin_lock(&smc_ib_devices.lock);
list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
spin_unlock(&smc_ib_devices.lock);
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 8b4d72b1a066..010dcb876f9d 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -82,11 +82,11 @@ static size_t rpc_ntop6(const struct sockaddr *sap,
rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u",
IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id);
- if (unlikely((size_t)rc > sizeof(scopebuf)))
+ if (unlikely((size_t)rc >= sizeof(scopebuf)))
return 0;
len += rc;
- if (unlikely(len > buflen))
+ if (unlikely(len >= buflen))
return 0;
strcat(buf, scopebuf);
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 5748ad0ba1bd..a9f0d17fdb0d 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -81,7 +81,7 @@ static int param_get_hashtbl_sz(char *buffer, const struct kernel_param *kp)
unsigned int nbits;
nbits = *(unsigned int *)kp->arg;
- return sprintf(buffer, "%u", 1U << nbits);
+ return sprintf(buffer, "%u\n", 1U << nbits);
}
#define param_check_hashtbl_sz(name, p) __param_check(name, p, unsigned int);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index ac5cac0dd24b..4ecc2a959567 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -254,7 +254,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct
if (IS_ERR(p))
goto err;
done:
- trace_rpcgss_context(ctx->gc_expiry, now, timeout,
+ trace_rpcgss_context(window_size, ctx->gc_expiry, now, timeout,
ctx->gc_acceptor.len, ctx->gc_acceptor.data);
err:
return p;
@@ -697,10 +697,12 @@ retry:
}
schedule();
}
- if (gss_msg->ctx)
+ if (gss_msg->ctx) {
+ trace_rpcgss_ctx_init(gss_cred);
gss_cred_set_ctx(cred, gss_msg->ctx);
- else
+ } else {
err = gss_msg->msg.errno;
+ }
spin_unlock(&pipe->lock);
out_intr:
finish_wait(&gss_msg->waitqueue, &wait);
@@ -1054,11 +1056,11 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
auth->au_verfsize = GSS_VERF_SLACK >> 2;
auth->au_ralign = GSS_VERF_SLACK >> 2;
- auth->au_flags = 0;
+ __set_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags);
auth->au_ops = &authgss_ops;
auth->au_flavor = flavor;
if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
- auth->au_flags |= RPCAUTH_AUTH_DATATOUCH;
+ __set_bit(RPCAUTH_AUTH_DATATOUCH, &auth->au_flags);
refcount_set(&auth->au_count, 1);
kref_init(&gss_auth->kref);
@@ -1284,8 +1286,9 @@ gss_send_destroy_context(struct rpc_cred *cred)
if (new) {
ctx->gc_proc = RPC_GSS_PROC_DESTROY;
+ trace_rpcgss_ctx_destroy(gss_cred);
task = rpc_call_null(gss_auth->client, &new->gc_base,
- RPC_TASK_ASYNC|RPC_TASK_SOFT);
+ RPC_TASK_ASYNC);
if (!IS_ERR(task))
rpc_put_task(task);
@@ -1349,7 +1352,6 @@ gss_destroy_nullcred(struct rpc_cred *cred)
static void
gss_destroy_cred(struct rpc_cred *cred)
{
-
if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
gss_send_destroy_context(cred);
gss_destroy_nullcred(cred);
@@ -1613,6 +1615,7 @@ static int gss_renew_cred(struct rpc_task *task)
new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
if (IS_ERR(new))
return PTR_ERR(new);
+
task->tk_rqstp->rq_cred = new;
put_rpccred(oldcred);
return 0;
@@ -1709,7 +1712,8 @@ gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
/* We leave it to unwrap to calculate au_rslack. For now we just
* calculate the length of the verifier: */
- cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
+ if (test_bit(RPCAUTH_AUTH_UPDATE_SLACK, &cred->cr_auth->au_flags))
+ cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
status = 0;
out:
gss_put_ctx(ctx);
@@ -1927,13 +1931,30 @@ out:
return status;
}
-static int
-gss_unwrap_resp_auth(struct rpc_cred *cred)
+/**
+ * gss_update_rslack - Possibly update RPC receive buffer size estimates
+ * @task: rpc_task for incoming RPC Reply being unwrapped
+ * @cred: controlling rpc_cred for @task
+ * @before: XDR words needed before each RPC Reply message
+ * @after: XDR words needed following each RPC Reply message
+ *
+ */
+static void gss_update_rslack(struct rpc_task *task, struct rpc_cred *cred,
+ unsigned int before, unsigned int after)
{
struct rpc_auth *auth = cred->cr_auth;
- auth->au_rslack = auth->au_verfsize;
- auth->au_ralign = auth->au_verfsize;
+ if (test_and_clear_bit(RPCAUTH_AUTH_UPDATE_SLACK, &auth->au_flags)) {
+ auth->au_ralign = auth->au_verfsize + before;
+ auth->au_rslack = auth->au_verfsize + after;
+ trace_rpcgss_update_slack(task, auth);
+ }
+}
+
+static int
+gss_unwrap_resp_auth(struct rpc_task *task, struct rpc_cred *cred)
+{
+ gss_update_rslack(task, cred, 0, 0);
return 0;
}
@@ -1956,7 +1977,6 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
struct xdr_stream *xdr)
{
struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf;
- struct rpc_auth *auth = cred->cr_auth;
u32 len, offset, seqno, maj_stat;
struct xdr_netobj mic;
int ret;
@@ -2005,8 +2025,7 @@ gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
if (maj_stat != GSS_S_COMPLETE)
goto bad_mic;
- auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len);
- auth->au_ralign = auth->au_verfsize + 2;
+ gss_update_rslack(task, cred, 2, 2 + 1 + XDR_QUADLEN(mic.len));
ret = 0;
out:
@@ -2031,7 +2050,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
{
struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
struct kvec *head = rqstp->rq_rcv_buf.head;
- struct rpc_auth *auth = cred->cr_auth;
u32 offset, opaque_len, maj_stat;
__be32 *p;
@@ -2058,8 +2076,8 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
*/
xdr_init_decode(xdr, rcv_buf, p, rqstp);
- auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack;
- auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align;
+ gss_update_rslack(task, cred, 2 + ctx->gc_gss_ctx->align,
+ 2 + ctx->gc_gss_ctx->slack);
return 0;
unwrap_failed:
@@ -2130,7 +2148,7 @@ gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
goto out_decode;
switch (gss_cred->gc_service) {
case RPC_GSS_SVC_NONE:
- status = gss_unwrap_resp_auth(cred);
+ status = gss_unwrap_resp_auth(task, cred);
break;
case RPC_GSS_SVC_INTEGRITY:
status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 69316ab1b9fa..fae632da1058 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -37,6 +37,8 @@ gss_mech_free(struct gss_api_mech *gm)
for (i = 0; i < gm->gm_pf_num; i++) {
pf = &gm->gm_pfs[i];
+ if (pf->domain)
+ auth_domain_put(pf->domain);
kfree(pf->auth_domain_name);
pf->auth_domain_name = NULL;
}
@@ -59,6 +61,7 @@ make_auth_domain_name(char *name)
static int
gss_mech_svc_setup(struct gss_api_mech *gm)
{
+ struct auth_domain *dom;
struct pf_desc *pf;
int i, status;
@@ -68,10 +71,13 @@ gss_mech_svc_setup(struct gss_api_mech *gm)
status = -ENOMEM;
if (pf->auth_domain_name == NULL)
goto out;
- status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor,
- pf->auth_domain_name);
- if (status)
+ dom = svcauth_gss_register_pseudoflavor(
+ pf->pseudoflavor, pf->auth_domain_name);
+ if (IS_ERR(dom)) {
+ status = PTR_ERR(dom);
goto out;
+ }
+ pf->domain = dom;
}
return 0;
out:
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index 0349f455a862..af9c7f43859c 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -223,7 +223,7 @@ static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
static char *gssp_stringify(struct xdr_netobj *netobj)
{
- return kstrndup(netobj->data, netobj->len, GFP_KERNEL);
+ return kmemdup_nul(netobj->data, netobj->len, GFP_KERNEL);
}
static void gssp_hostbased_service(char **principal)
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 50d93c49ef1a..46027d0c903f 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -809,7 +809,7 @@ u32 svcauth_gss_flavor(struct auth_domain *dom)
EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
-int
+struct auth_domain *
svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
{
struct gss_domain *new;
@@ -826,21 +826,23 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
new->h.flavour = &svcauthops_gss;
new->pseudoflavor = pseudoflavor;
- stat = 0;
test = auth_domain_lookup(name, &new->h);
- if (test != &new->h) { /* Duplicate registration */
+ if (test != &new->h) {
+ pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n",
+ name);
+ stat = -EADDRINUSE;
auth_domain_put(test);
- kfree(new->h.name);
- goto out_free_dom;
+ goto out_free_name;
}
- return 0;
+ return test;
+out_free_name:
+ kfree(new->h.name);
out_free_dom:
kfree(new);
out:
- return stat;
+ return ERR_PTR(stat);
}
-
EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
static inline int
diff --git a/net/sunrpc/auth_gss/trace.c b/net/sunrpc/auth_gss/trace.c
index 5576f1e66de9..49fa583d7f91 100644
--- a/net/sunrpc/auth_gss/trace.c
+++ b/net/sunrpc/auth_gss/trace.c
@@ -6,6 +6,7 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/gss_err.h>
+#include <linux/sunrpc/auth_gss.h>
#define CREATE_TRACE_POINTS
#include <trace/events/rpcgss.h>
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 61b21dafd7c0..a91d1cdad9d7 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -370,10 +370,6 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
const char *nodename = args->nodename;
int err;
- /* sanity check the name before trying to print it */
- dprintk("RPC: creating %s client for %s (xprt %p)\n",
- program->name, args->servername, xprt);
-
err = rpciod_up();
if (err)
goto out_no_rpciod;
@@ -436,6 +432,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
goto out_no_path;
if (parent)
atomic_inc(&parent->cl_count);
+
+ trace_rpc_clnt_new(clnt, xprt, program->name, args->servername);
return clnt;
out_no_path:
@@ -450,6 +448,7 @@ out_err:
out_no_rpciod:
xprt_switch_put(xps);
xprt_put(xprt);
+ trace_rpc_clnt_new_err(program->name, args->servername, err);
return ERR_PTR(err);
}
@@ -634,10 +633,8 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
args->nodename = clnt->cl_nodename;
new = rpc_new_client(args, xps, xprt, clnt);
- if (IS_ERR(new)) {
- err = PTR_ERR(new);
- goto out_err;
- }
+ if (IS_ERR(new))
+ return new;
/* Turn off autobind on clones */
new->cl_autobind = 0;
@@ -650,7 +647,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
return new;
out_err:
- dprintk("RPC: %s: returned error %d\n", __func__, err);
+ trace_rpc_clnt_clone_err(clnt, err);
return ERR_PTR(err);
}
@@ -723,11 +720,8 @@ int rpc_switch_client_transport(struct rpc_clnt *clnt,
int err;
xprt = xprt_create_transport(args);
- if (IS_ERR(xprt)) {
- dprintk("RPC: failed to create new xprt for clnt %p\n",
- clnt);
+ if (IS_ERR(xprt))
return PTR_ERR(xprt);
- }
xps = xprt_switch_alloc(xprt, GFP_KERNEL);
if (xps == NULL) {
@@ -767,7 +761,7 @@ int rpc_switch_client_transport(struct rpc_clnt *clnt,
rpc_release_client(parent);
xprt_switch_put(oldxps);
xprt_put(old);
- dprintk("RPC: replaced xprt for clnt %p\n", clnt);
+ trace_rpc_clnt_replace_xprt(clnt);
return 0;
out_revert:
@@ -777,7 +771,7 @@ out_revert:
rpc_client_register(clnt, pseudoflavor, NULL);
xprt_switch_put(xps);
xprt_put(xprt);
- dprintk("RPC: failed to switch xprt for clnt %p\n", clnt);
+ trace_rpc_clnt_replace_xprt_err(clnt);
return err;
}
EXPORT_SYMBOL_GPL(rpc_switch_client_transport);
@@ -844,10 +838,11 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
if (list_empty(&clnt->cl_tasks))
return;
- dprintk("RPC: killing all tasks for client %p\n", clnt);
+
/*
* Spin lock all_tasks to prevent changes...
*/
+ trace_rpc_clnt_killall(clnt);
spin_lock(&clnt->cl_lock);
list_for_each_entry(rovr, &clnt->cl_tasks, tk_task)
rpc_signal_task(rovr);
@@ -863,9 +858,7 @@ void rpc_shutdown_client(struct rpc_clnt *clnt)
{
might_sleep();
- dprintk_rcu("RPC: shutting down %s client for %s\n",
- clnt->cl_program->name,
- rcu_dereference(clnt->cl_xprt)->servername);
+ trace_rpc_clnt_shutdown(clnt);
while (!list_empty(&clnt->cl_tasks)) {
rpc_killall_tasks(clnt);
@@ -884,6 +877,8 @@ static void rpc_free_client_work(struct work_struct *work)
{
struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work);
+ trace_rpc_clnt_free(clnt);
+
/* These might block on processes that might allocate memory,
* so they cannot be called in rpciod, so they are handled separately
* here.
@@ -901,9 +896,7 @@ rpc_free_client(struct rpc_clnt *clnt)
{
struct rpc_clnt *parent = NULL;
- dprintk_rcu("RPC: destroying %s client for %s\n",
- clnt->cl_program->name,
- rcu_dereference(clnt->cl_xprt)->servername);
+ trace_rpc_clnt_release(clnt);
if (clnt->cl_parent != clnt)
parent = clnt->cl_parent;
rpc_unregister_client(clnt);
@@ -945,8 +938,6 @@ rpc_free_auth(struct rpc_clnt *clnt)
void
rpc_release_client(struct rpc_clnt *clnt)
{
- dprintk("RPC: rpc_release_client(%p)\n", clnt);
-
do {
if (list_empty(&clnt->cl_tasks))
wake_up(&destroy_wait);
@@ -1270,7 +1261,7 @@ void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages,
hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign - 1;
xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len);
- trace_rpc_reply_pages(req);
+ trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf);
}
EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages);
@@ -1624,6 +1615,7 @@ const char
static void
__rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
{
+ trace_rpc_call_rpcerror(task, tk_status, rpc_status);
task->tk_rpc_status = rpc_status;
rpc_exit(task, tk_status);
}
@@ -2531,7 +2523,7 @@ call_decode(struct rpc_task *task)
goto out;
req->rq_rcv_buf.len = req->rq_private_buf.len;
- trace_xprt_recvfrom(&req->rq_rcv_buf);
+ trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
/* Check that the softirq receive buffer is valid */
WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
@@ -2760,7 +2752,8 @@ struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt,
.rpc_op_cred = cred,
.callback_ops = (ops != NULL) ? ops : &rpc_default_ops,
.callback_data = data,
- .flags = flags | RPC_TASK_NULLCREDS,
+ .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN |
+ RPC_TASK_NULLCREDS,
};
return rpc_run_task(&task_setup_data);
@@ -2823,8 +2816,7 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
goto success;
}
- task = rpc_call_null_helper(clnt, xprt, NULL,
- RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC|RPC_TASK_NULLCREDS,
+ task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
&rpc_cb_add_xprt_call_ops, data);
rpc_put_task(task);
@@ -2867,9 +2859,7 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt,
goto out_err;
/* Test the connection */
- task = rpc_call_null_helper(clnt, xprt, NULL,
- RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS,
- NULL, NULL);
+ task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL);
if (IS_ERR(task)) {
status = PTR_ERR(task);
goto out_err;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 4a020b688860..c27123e6ba80 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -795,12 +795,6 @@ void rpcb_getport_async(struct rpc_task *task)
child = rpcb_call_async(rpcb_clnt, map, proc);
rpc_release_client(rpcb_clnt);
- if (IS_ERR(child)) {
- /* rpcb_map_release() has freed the arguments */
- dprintk("RPC: %5u %s: rpc_run_task failed\n",
- task->tk_pid, __func__);
- return;
- }
xprt->stat.bind_count++;
rpc_put_task(child);
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
index 47a756503d11..f6fe2e6cd65a 100644
--- a/net/sunrpc/sunrpc.h
+++ b/net/sunrpc/sunrpc.h
@@ -52,4 +52,5 @@ static inline int sock_is_loopback(struct sock *sk)
int rpc_clients_notifier_register(void);
void rpc_clients_notifier_unregister(void);
+void auth_domain_cleanup(void);
#endif /* _NET_SUNRPC_SUNRPC_H */
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index f9edaa9174a4..236fadc4a439 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -23,6 +23,7 @@
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/xprtsock.h>
+#include "sunrpc.h"
#include "netns.h"
unsigned int sunrpc_net_id;
@@ -131,6 +132,7 @@ cleanup_sunrpc(void)
unregister_rpc_pipefs();
rpc_destroy_mempool();
unregister_pernet_subsys(&sunrpc_net_ops);
+ auth_domain_cleanup();
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
rpc_unregister_sysctl();
#endif
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 9ed3126600ce..c211b607239e 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -88,15 +88,15 @@ param_get_pool_mode(char *buf, const struct kernel_param *kp)
switch (*ip)
{
case SVC_POOL_AUTO:
- return strlcpy(buf, "auto", 20);
+ return strlcpy(buf, "auto\n", 20);
case SVC_POOL_GLOBAL:
- return strlcpy(buf, "global", 20);
+ return strlcpy(buf, "global\n", 20);
case SVC_POOL_PERCPU:
- return strlcpy(buf, "percpu", 20);
+ return strlcpy(buf, "percpu\n", 20);
case SVC_POOL_PERNODE:
- return strlcpy(buf, "pernode", 20);
+ return strlcpy(buf, "pernode\n", 20);
default:
- return sprintf(buf, "%d", *ip);
+ return sprintf(buf, "%d\n", *ip);
}
}
@@ -991,6 +991,7 @@ static int __svc_register(struct net *net, const char *progname,
#endif
}
+ trace_svc_register(progname, version, protocol, port, family, error);
return error;
}
@@ -1000,11 +1001,6 @@ int svc_rpcbind_set_version(struct net *net,
unsigned short proto,
unsigned short port)
{
- dprintk("svc: svc_register(%sv%d, %s, %u, %u)\n",
- progp->pg_name, version,
- proto == IPPROTO_UDP? "udp" : "tcp",
- port, family);
-
return __svc_register(net, progp->pg_name, progp->pg_prog,
version, family, proto, port);
@@ -1024,11 +1020,8 @@ int svc_generic_rpcbind_set(struct net *net,
return 0;
if (vers->vs_hidden) {
- dprintk("svc: svc_register(%sv%d, %s, %u, %u)"
- " (but not telling portmap)\n",
- progp->pg_name, version,
- proto == IPPROTO_UDP? "udp" : "tcp",
- port, family);
+ trace_svc_noregister(progp->pg_name, version, proto,
+ port, family, 0);
return 0;
}
@@ -1106,8 +1099,7 @@ static void __svc_unregister(struct net *net, const u32 program, const u32 versi
if (error == -EPROTONOSUPPORT)
error = rpcb_register(net, program, version, 0, 0);
- dprintk("svc: %s(%sv%u), error %d\n",
- __func__, progname, version, error);
+ trace_svc_unregister(progname, version, error);
}
/*
@@ -1132,9 +1124,6 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
continue;
if (progp->pg_vers[i]->vs_hidden)
continue;
-
- dprintk("svc: attempting to unregister %sv%u\n",
- progp->pg_name, i);
__svc_unregister(net, progp->pg_prog, i, progp->pg_name);
}
}
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 2284ff038dad..43cf8dbde898 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -153,6 +153,7 @@ static void svc_xprt_free(struct kref *kref)
xprt_put(xprt->xpt_bc_xprt);
if (xprt->xpt_bc_xps)
xprt_switch_put(xprt->xpt_bc_xps);
+ trace_svc_xprt_free(xprt);
xprt->xpt_ops->xpo_free(xprt);
module_put(owner);
}
@@ -206,6 +207,7 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
.sin6_port = htons(port),
};
#endif
+ struct svc_xprt *xprt;
struct sockaddr *sap;
size_t len;
@@ -224,7 +226,11 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
return ERR_PTR(-EAFNOSUPPORT);
}
- return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
+ xprt = xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
+ if (IS_ERR(xprt))
+ trace_svc_xprt_create_err(serv->sv_program->pg_name,
+ xcl->xcl_name, sap, xprt);
+ return xprt;
}
/*
@@ -304,15 +310,11 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
{
int err;
- dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred);
if (err == -EPROTONOSUPPORT) {
request_module("svc%s", xprt_name);
err = _svc_create_xprt(serv, xprt_name, net, family, port, flags, cred);
}
- if (err < 0)
- dprintk("svc: transport %s not found, err %d\n",
- xprt_name, -err);
return err;
}
EXPORT_SYMBOL_GPL(svc_create_xprt);
@@ -780,7 +782,6 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
int len = 0;
if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
- dprintk("svc_recv: found XPT_CLOSE\n");
if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
svc_delete_xprt(xprt);
@@ -799,6 +800,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
if (newxpt) {
newxpt->xpt_cred = get_cred(xprt->xpt_cred);
svc_add_new_temp_xprt(serv, newxpt);
+ trace_svc_xprt_accept(newxpt, serv->sv_name);
} else
module_put(xprt->xpt_class->xcl_owner);
} else if (svc_xprt_reserve_slot(rqstp, xprt)) {
@@ -812,7 +814,7 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
else
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
if (len > 0)
- trace_svc_recvfrom(&rqstp->rq_arg);
+ trace_svc_xdr_recvfrom(rqstp, &rqstp->rq_arg);
rqstp->rq_stime = ktime_get();
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
@@ -835,14 +837,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
struct svc_serv *serv = rqstp->rq_server;
int len, err;
- dprintk("svc: server %p waiting for data (to = %ld)\n",
- rqstp, timeout);
-
- if (rqstp->rq_xprt)
- printk(KERN_ERR
- "svc_recv: service %p, transport not NULL!\n",
- rqstp);
-
err = svc_alloc_arg(rqstp);
if (err)
goto out;
@@ -890,7 +884,6 @@ EXPORT_SYMBOL_GPL(svc_recv);
void svc_drop(struct svc_rqst *rqstp)
{
trace_svc_drop(rqstp);
- dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt);
svc_xprt_release(rqstp);
}
EXPORT_SYMBOL_GPL(svc_drop);
@@ -913,17 +906,11 @@ int svc_send(struct svc_rqst *rqstp)
xb->len = xb->head[0].iov_len +
xb->page_len +
xb->tail[0].iov_len;
- trace_svc_sendto(xb);
-
- /* Grab mutex to serialize outgoing data. */
- mutex_lock(&xprt->xpt_mutex);
+ trace_svc_xdr_sendto(rqstp, xb);
trace_svc_stats_latency(rqstp);
- if (test_bit(XPT_DEAD, &xprt->xpt_flags)
- || test_bit(XPT_CLOSE, &xprt->xpt_flags))
- len = -ENOTCONN;
- else
- len = xprt->xpt_ops->xpo_sendto(rqstp);
- mutex_unlock(&xprt->xpt_mutex);
+
+ len = xprt->xpt_ops->xpo_sendto(rqstp);
+
trace_svc_send(rqstp, len);
svc_xprt_release(rqstp);
@@ -1031,11 +1018,10 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
struct svc_serv *serv = xprt->xpt_server;
struct svc_deferred_req *dr;
- /* Only do this once */
if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
- BUG();
+ return;
- dprintk("svc: svc_delete_xprt(%p)\n", xprt);
+ trace_svc_xprt_detach(xprt);
xprt->xpt_ops->xpo_detach(xprt);
if (xprt->xpt_bc_xprt)
xprt->xpt_bc_xprt->ops->close(xprt->xpt_bc_xprt);
@@ -1056,6 +1042,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
void svc_close_xprt(struct svc_xprt *xprt)
{
+ trace_svc_xprt_close(xprt);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
/* someone else will have to effect the close */
@@ -1158,16 +1145,15 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
set_bit(XPT_DEFERRED, &xprt->xpt_flags);
if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
spin_unlock(&xprt->xpt_lock);
- dprintk("revisit canceled\n");
+ trace_svc_defer_drop(dr);
svc_xprt_put(xprt);
- trace_svc_drop_deferred(dr);
kfree(dr);
return;
}
- dprintk("revisit queued\n");
dr->xprt = NULL;
list_add(&dr->handle.recent, &xprt->xpt_deferred);
spin_unlock(&xprt->xpt_lock);
+ trace_svc_defer_queue(dr);
svc_xprt_enqueue(xprt);
svc_xprt_put(xprt);
}
@@ -1213,22 +1199,24 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
dr->argslen << 2);
}
+ trace_svc_defer(rqstp);
svc_xprt_get(rqstp->rq_xprt);
dr->xprt = rqstp->rq_xprt;
set_bit(RQ_DROPME, &rqstp->rq_flags);
dr->handle.revisit = svc_revisit;
- trace_svc_defer(rqstp);
return &dr->handle;
}
/*
* recv data from a deferred request into an active one
*/
-static int svc_deferred_recv(struct svc_rqst *rqstp)
+static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
{
struct svc_deferred_req *dr = rqstp->rq_deferred;
+ trace_svc_defer_recv(dr);
+
/* setup iov_base past transport header */
rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2);
/* The iov_len does not include the transport header bytes */
@@ -1259,7 +1247,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt)
struct svc_deferred_req,
handle.recent);
list_del_init(&dr->handle.recent);
- trace_svc_revisit_deferred(dr);
} else
clear_bit(XPT_DEFERRED, &xprt->xpt_flags);
spin_unlock(&xprt->xpt_lock);
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 552617e3467b..998b196b6176 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -21,6 +21,8 @@
#include <trace/events/sunrpc.h>
+#include "sunrpc.h"
+
#define RPCDBG_FACILITY RPCDBG_AUTH
@@ -205,3 +207,26 @@ struct auth_domain *auth_domain_find(char *name)
return NULL;
}
EXPORT_SYMBOL_GPL(auth_domain_find);
+
+/**
+ * auth_domain_cleanup - check that the auth_domain table is empty
+ *
+ * On module unload the auth_domain_table must be empty. To make it
+ * easier to catch bugs which don't clean up domains properly, we
+ * warn if anything remains in the table at cleanup time.
+ *
+ * Note that we cannot proactively remove the domains at this stage.
+ * The ->release() function might be in a module that has already been
+ * unloaded.
+ */
+
+void auth_domain_cleanup(void)
+{
+ int h;
+ struct auth_domain *hp;
+
+ for (h = 0; h < DN_HASHMAX; h++)
+ hlist_for_each_entry(hp, &auth_domain_table[h], hash)
+ pr_warn("svc: domain %s still present at module unload.\n",
+ hp->name);
+}
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 6c8f802c4261..97c0bddba7a3 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -332,15 +332,6 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
return 0;
}
-static inline int ip_map_update(struct net *net, struct ip_map *ipm,
- struct unix_domain *udom, time64_t expiry)
-{
- struct sunrpc_net *sn;
-
- sn = net_generic(net, sunrpc_net_id);
- return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
-}
-
void svcauth_unix_purge(struct net *net)
{
struct sunrpc_net *sn;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index e7a0037d9b56..5c4ec9386f81 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -45,7 +45,6 @@
#include <net/tcp_states.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
-#include <trace/events/skb.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/clnt.h>
@@ -55,6 +54,8 @@
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/xprt.h>
+#include <trace/events/sunrpc.h>
+
#include "socklib.h"
#include "sunrpc.h"
@@ -108,31 +109,35 @@ static void svc_reclassify_socket(struct socket *sock)
}
#endif
-/*
- * Release an skbuff after use
+/**
+ * svc_tcp_release_rqst - Release transport-related resources
+ * @rqstp: request structure with resources to be released
+ *
*/
-static void svc_release_skb(struct svc_rqst *rqstp)
+static void svc_tcp_release_rqst(struct svc_rqst *rqstp)
{
struct sk_buff *skb = rqstp->rq_xprt_ctxt;
if (skb) {
struct svc_sock *svsk =
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
- rqstp->rq_xprt_ctxt = NULL;
- dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
+ rqstp->rq_xprt_ctxt = NULL;
skb_free_datagram_locked(svsk->sk_sk, skb);
}
}
-static void svc_release_udp_skb(struct svc_rqst *rqstp)
+/**
+ * svc_udp_release_rqst - Release transport-related resources
+ * @rqstp: request structure with resources to be released
+ *
+ */
+static void svc_udp_release_rqst(struct svc_rqst *rqstp)
{
struct sk_buff *skb = rqstp->rq_xprt_ctxt;
if (skb) {
rqstp->rq_xprt_ctxt = NULL;
-
- dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
consume_skb(skb);
}
}
@@ -218,34 +223,68 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
return len;
}
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+static void svc_flush_bvec(const struct bio_vec *bvec, size_t size, size_t seek)
+{
+ struct bvec_iter bi = {
+ .bi_size = size,
+ };
+ struct bio_vec bv;
+
+ bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
+ for_each_bvec(bv, bvec, bi, bi)
+ flush_dcache_page(bv.bv_page);
+}
+#else
+static inline void svc_flush_bvec(const struct bio_vec *bvec, size_t size,
+ size_t seek)
+{
+}
+#endif
+
/*
- * Generic recvfrom routine.
+ * Read from @rqstp's transport socket. The incoming message fills whole
+ * pages in @rqstp's rq_pages array until the last page of the message
+ * has been received into a partial page.
*/
-static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov,
- unsigned int nr, size_t buflen, unsigned int base)
+static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
+ size_t seek)
{
struct svc_sock *svsk =
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
+ struct bio_vec *bvec = rqstp->rq_bvec;
struct msghdr msg = { NULL };
+ unsigned int i;
ssize_t len;
+ size_t t;
rqstp->rq_xprt_hlen = 0;
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- iov_iter_kvec(&msg.msg_iter, READ, iov, nr, buflen);
- if (base != 0) {
- iov_iter_advance(&msg.msg_iter, base);
- buflen -= base;
+
+ for (i = 0, t = 0; t < buflen; i++, t += PAGE_SIZE) {
+ bvec[i].bv_page = rqstp->rq_pages[i];
+ bvec[i].bv_len = PAGE_SIZE;
+ bvec[i].bv_offset = 0;
+ }
+ rqstp->rq_respages = &rqstp->rq_pages[i];
+ rqstp->rq_next_page = rqstp->rq_respages + 1;
+
+ iov_iter_bvec(&msg.msg_iter, READ, bvec, i, buflen);
+ if (seek) {
+ iov_iter_advance(&msg.msg_iter, seek);
+ buflen -= seek;
}
len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT);
+ if (len > 0)
+ svc_flush_bvec(bvec, len, seek);
+
/* If we read a full record, then assume there may be more
* data to read (stream based sockets only!)
*/
if (len == buflen)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- dprintk("svc: socket %p recvfrom(%p, %zu) = %zd\n",
- svsk, iov[0].iov_base, iov[0].iov_len, len);
return len;
}
@@ -282,13 +321,10 @@ static void svc_data_ready(struct sock *sk)
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
if (svsk) {
- dprintk("svc: socket %p(inet %p), busy=%d\n",
- svsk, sk,
- test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
-
/* Refer to svc_setup_socket() for details. */
rmb();
svsk->sk_odata(sk);
+ trace_svcsock_data_ready(&svsk->sk_xprt, 0);
if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
svc_xprt_enqueue(&svsk->sk_xprt);
}
@@ -302,11 +338,9 @@ static void svc_write_space(struct sock *sk)
struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
if (svsk) {
- dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
- svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
-
/* Refer to svc_setup_socket() for details. */
rmb();
+ trace_svcsock_write_space(&svsk->sk_xprt, 0);
svsk->sk_owspace(sk);
svc_xprt_enqueue(&svsk->sk_xprt);
}
@@ -383,8 +417,15 @@ static int svc_udp_get_dest_address(struct svc_rqst *rqstp,
return 0;
}
-/*
- * Receive a datagram from a UDP socket.
+/**
+ * svc_udp_recvfrom - Receive a datagram from a UDP socket.
+ * @rqstp: request structure into which to receive an RPC Call
+ *
+ * Called in a loop when XPT_DATA has been set.
+ *
+ * Returns:
+ * On success, the number of bytes in a received RPC Call, or
+ * %0 if a complete RPC Call message was not ready to return
*/
static int svc_udp_recvfrom(struct svc_rqst *rqstp)
{
@@ -418,20 +459,14 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3);
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- skb = NULL;
err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
0, 0, MSG_PEEK | MSG_DONTWAIT);
- if (err >= 0)
- skb = skb_recv_udp(svsk->sk_sk, 0, 1, &err);
-
- if (skb == NULL) {
- if (err != -EAGAIN) {
- /* possibly an icmp error */
- dprintk("svc: recvfrom returned error %d\n", -err);
- set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- }
- return 0;
- }
+ if (err < 0)
+ goto out_recv_err;
+ skb = skb_recv_udp(svsk->sk_sk, 0, 1, &err);
+ if (!skb)
+ goto out_recv_err;
+
len = svc_addr_len(svc_addr(rqstp));
rqstp->rq_addrlen = len;
if (skb->tstamp == 0) {
@@ -442,26 +477,21 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
sock_write_timestamp(svsk->sk_sk, skb->tstamp);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
- len = skb->len;
+ len = skb->len;
rqstp->rq_arg.len = len;
+ trace_svcsock_udp_recv(&svsk->sk_xprt, len);
rqstp->rq_prot = IPPROTO_UDP;
- if (!svc_udp_get_dest_address(rqstp, cmh)) {
- net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
- cmh->cmsg_level, cmh->cmsg_type);
- goto out_free;
- }
+ if (!svc_udp_get_dest_address(rqstp, cmh))
+ goto out_cmsg_err;
rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp));
if (skb_is_nonlinear(skb)) {
/* we have to copy */
local_bh_disable();
- if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
- local_bh_enable();
- /* checksum error */
- goto out_free;
- }
+ if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb))
+ goto out_bh_enable;
local_bh_enable();
consume_skb(skb);
} else {
@@ -489,6 +519,20 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
serv->sv_stats->netudpcnt++;
return len;
+
+out_recv_err:
+ if (err != -EAGAIN) {
+ /* possibly an icmp error */
+ set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
+ }
+ trace_svcsock_udp_recv_err(&svsk->sk_xprt, err);
+ return 0;
+out_cmsg_err:
+ net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
+ cmh->cmsg_level, cmh->cmsg_type);
+ goto out_free;
+out_bh_enable:
+ local_bh_enable();
out_free:
kfree_skb(skb);
return 0;
@@ -498,6 +542,9 @@ out_free:
* svc_udp_sendto - Send out a reply on a UDP socket
* @rqstp: completed svc_rqst
*
+ * xpt_mutex ensures @rqstp's whole message is written to the socket
+ * without interruption.
+ *
* Returns the number of bytes sent, or a negative errno.
*/
static int svc_udp_sendto(struct svc_rqst *rqstp)
@@ -519,10 +566,15 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
unsigned int uninitialized_var(sent);
int err;
- svc_release_udp_skb(rqstp);
+ svc_udp_release_rqst(rqstp);
svc_set_cmsg_data(rqstp, cmh);
+ mutex_lock(&xprt->xpt_mutex);
+
+ if (svc_xprt_is_dead(xprt))
+ goto out_notconn;
+
err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
xdr_free_bvec(xdr);
if (err == -ECONNREFUSED) {
@@ -530,9 +582,16 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
xdr_free_bvec(xdr);
}
+ trace_svcsock_udp_send(xprt, err);
+
+ mutex_unlock(&xprt->xpt_mutex);
if (err < 0)
return err;
return sent;
+
+out_notconn:
+ mutex_unlock(&xprt->xpt_mutex);
+ return -ENOTCONN;
}
static int svc_udp_has_wspace(struct svc_xprt *xprt)
@@ -576,7 +635,7 @@ static const struct svc_xprt_ops svc_udp_ops = {
.xpo_recvfrom = svc_udp_recvfrom,
.xpo_sendto = svc_udp_sendto,
.xpo_read_payload = svc_sock_read_payload,
- .xpo_release_rqst = svc_release_udp_skb,
+ .xpo_release_rqst = svc_udp_release_rqst,
.xpo_detach = svc_sock_detach,
.xpo_free = svc_sock_free,
.xpo_has_wspace = svc_udp_has_wspace,
@@ -632,9 +691,6 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
- dprintk("svc: socket %p TCP (listen) state change %d\n",
- sk, sk->sk_state);
-
if (svsk) {
/* Refer to svc_setup_socket() for details. */
rmb();
@@ -655,8 +711,7 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
if (svsk) {
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
- } else
- printk("svc: socket %p: no user data\n", sk);
+ }
}
}
@@ -667,15 +722,11 @@ static void svc_tcp_state_change(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
- dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
- sk, sk->sk_state, sk->sk_user_data);
-
- if (!svsk)
- printk("svc: socket %p: no user data\n", sk);
- else {
+ if (svsk) {
/* Refer to svc_setup_socket() for details. */
rmb();
svsk->sk_ostate(sk);
+ trace_svcsock_tcp_state(&svsk->sk_xprt, svsk->sk_sock);
if (sk->sk_state != TCP_ESTABLISHED) {
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
@@ -696,9 +747,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
struct socket *newsock;
struct svc_sock *newsvsk;
int err, slen;
- RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
- dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
if (!sock)
return NULL;
@@ -711,30 +760,18 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
else if (err != -EAGAIN)
net_warn_ratelimited("%s: accept failed (err %d)!\n",
serv->sv_name, -err);
+ trace_svcsock_accept_err(xprt, serv->sv_name, err);
return NULL;
}
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
err = kernel_getpeername(newsock, sin);
if (err < 0) {
- net_warn_ratelimited("%s: peername failed (err %d)!\n",
- serv->sv_name, -err);
+ trace_svcsock_getpeername_err(xprt, serv->sv_name, err);
goto failed; /* aborted connection or whatever */
}
slen = err;
- /* Ideally, we would want to reject connections from unauthorized
- * hosts here, but when we get encryption, the IP of the host won't
- * tell us anything. For now just warn about unpriv connections.
- */
- if (!svc_port_is_privileged(sin)) {
- dprintk("%s: connect from unprivileged port: %s\n",
- serv->sv_name,
- __svc_print_addr(sin, buf, sizeof(buf)));
- }
- dprintk("%s: connect from %s\n", serv->sv_name,
- __svc_print_addr(sin, buf, sizeof(buf)));
-
/* Reset the inherited callbacks before calling svc_setup_socket */
newsock->sk->sk_state_change = svsk->sk_ostate;
newsock->sk->sk_data_ready = svsk->sk_odata;
@@ -752,10 +789,8 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
svc_xprt_set_remote(&newsvsk->sk_xprt, sin, slen);
err = kernel_getsockname(newsock, sin);
slen = err;
- if (unlikely(err < 0)) {
- dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err);
+ if (unlikely(err < 0))
slen = offsetof(struct sockaddr, sa_data);
- }
svc_xprt_set_local(&newsvsk->sk_xprt, sin, slen);
if (sock_is_loopback(newsock->sk))
@@ -772,13 +807,14 @@ failed:
return NULL;
}
-static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
+static size_t svc_tcp_restore_pages(struct svc_sock *svsk,
+ struct svc_rqst *rqstp)
{
- unsigned int i, len, npages;
+ size_t len = svsk->sk_datalen;
+ unsigned int i, npages;
- if (svsk->sk_datalen == 0)
+ if (!len)
return 0;
- len = svsk->sk_datalen;
npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
if (rqstp->rq_pages[i] != NULL)
@@ -827,47 +863,45 @@ out:
}
/*
- * Receive fragment record header.
- * If we haven't gotten the record length yet, get the next four bytes.
+ * Receive fragment record header into sk_marker.
*/
-static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
+static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
+ struct svc_rqst *rqstp)
{
- struct svc_serv *serv = svsk->sk_xprt.xpt_server;
- unsigned int want;
- int len;
+ ssize_t want, len;
+ /* If we haven't gotten the record length yet,
+ * get the next four bytes.
+ */
if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
+ struct msghdr msg = { NULL };
struct kvec iov;
want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
- iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
+ iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
iov.iov_len = want;
- len = svc_recvfrom(rqstp, &iov, 1, want, 0);
+ iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, want);
+ len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT);
if (len < 0)
- goto error;
+ return len;
svsk->sk_tcplen += len;
-
if (len < want) {
- dprintk("svc: short recvfrom while reading record "
- "length (%d of %d)\n", len, want);
- return -EAGAIN;
+ /* call again to read the remaining bytes */
+ goto err_short;
}
-
- dprintk("svc: TCP record, %d bytes\n", svc_sock_reclen(svsk));
+ trace_svcsock_marker(&svsk->sk_xprt, svsk->sk_marker);
if (svc_sock_reclen(svsk) + svsk->sk_datalen >
- serv->sv_max_mesg) {
- net_notice_ratelimited("RPC: fragment too large: %d\n",
- svc_sock_reclen(svsk));
- goto err_delete;
- }
+ svsk->sk_xprt.xpt_server->sv_max_mesg)
+ goto err_too_large;
}
-
return svc_sock_reclen(svsk);
-error:
- dprintk("RPC: TCP recv_record got %d\n", len);
- return len;
-err_delete:
+
+err_too_large:
+ net_notice_ratelimited("svc: %s %s RPC fragment too large: %d\n",
+ __func__, svsk->sk_xprt.xpt_server->sv_name,
+ svc_sock_reclen(svsk));
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+err_short:
return -EAGAIN;
}
@@ -916,87 +950,58 @@ unlock_eagain:
return -EAGAIN;
}
-static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
-{
- int i = 0;
- int t = 0;
-
- while (t < len) {
- vec[i].iov_base = page_address(pages[i]);
- vec[i].iov_len = PAGE_SIZE;
- i++;
- t += PAGE_SIZE;
- }
- return i;
-}
-
static void svc_tcp_fragment_received(struct svc_sock *svsk)
{
/* If we have more data, signal svc_xprt_enqueue() to try again */
- dprintk("svc: TCP %s record (%d bytes)\n",
- svc_sock_final_rec(svsk) ? "final" : "nonfinal",
- svc_sock_reclen(svsk));
svsk->sk_tcplen = 0;
- svsk->sk_reclen = 0;
+ svsk->sk_marker = xdr_zero;
}
-/*
- * Receive data from a TCP socket.
+/**
+ * svc_tcp_recvfrom - Receive data from a TCP socket
+ * @rqstp: request structure into which to receive an RPC Call
+ *
+ * Called in a loop when XPT_DATA has been set.
+ *
+ * Read the 4-byte stream record marker, then use the record length
+ * in that marker to set up exactly the resources needed to receive
+ * the next RPC message into @rqstp.
+ *
+ * Returns:
+ * On success, the number of bytes in a received RPC Call, or
+ * %0 if a complete RPC Call message was not ready to return
+ *
+ * The zero return case handles partial receives and callback Replies.
+ * The state of a partial receive is preserved in the svc_sock for
+ * the next call to svc_tcp_recvfrom.
*/
static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
{
struct svc_sock *svsk =
container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
struct svc_serv *serv = svsk->sk_xprt.xpt_server;
- int len;
- struct kvec *vec;
- unsigned int want, base;
+ size_t want, base;
+ ssize_t len;
__be32 *p;
__be32 calldir;
- int pnum;
-
- dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
- svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
- test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
- test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
- len = svc_tcp_recv_record(svsk, rqstp);
+ clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
+ len = svc_tcp_read_marker(svsk, rqstp);
if (len < 0)
goto error;
base = svc_tcp_restore_pages(svsk, rqstp);
- want = svc_sock_reclen(svsk) - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
-
- vec = rqstp->rq_vec;
-
- pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0], base + want);
-
- rqstp->rq_respages = &rqstp->rq_pages[pnum];
- rqstp->rq_next_page = rqstp->rq_respages + 1;
-
- /* Now receive data */
- len = svc_recvfrom(rqstp, vec, pnum, base + want, base);
+ want = len - (svsk->sk_tcplen - sizeof(rpc_fraghdr));
+ len = svc_tcp_read_msg(rqstp, base + want, base);
if (len >= 0) {
+ trace_svcsock_tcp_recv(&svsk->sk_xprt, len);
svsk->sk_tcplen += len;
svsk->sk_datalen += len;
}
- if (len != want || !svc_sock_final_rec(svsk)) {
- svc_tcp_save_pages(svsk, rqstp);
- if (len < 0 && len != -EAGAIN)
- goto err_delete;
- if (len == want)
- svc_tcp_fragment_received(svsk);
- else
- dprintk("svc: incomplete TCP record (%d of %d)\n",
- (int)(svsk->sk_tcplen - sizeof(rpc_fraghdr)),
- svc_sock_reclen(svsk));
- goto err_noclose;
- }
-
- if (svsk->sk_datalen < 8) {
- svsk->sk_datalen = 0;
- goto err_delete; /* client is nuts. */
- }
+ if (len != want || !svc_sock_final_rec(svsk))
+ goto err_incomplete;
+ if (svsk->sk_datalen < 8)
+ goto err_nuts;
rqstp->rq_arg.len = svsk->sk_datalen;
rqstp->rq_arg.page_base = 0;
@@ -1031,14 +1036,26 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
return rqstp->rq_arg.len;
+err_incomplete:
+ svc_tcp_save_pages(svsk, rqstp);
+ if (len < 0 && len != -EAGAIN)
+ goto err_delete;
+ if (len == want)
+ svc_tcp_fragment_received(svsk);
+ else
+ trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
+ svc_sock_reclen(svsk),
+ svsk->sk_tcplen - sizeof(rpc_fraghdr));
+ goto err_noclose;
error:
if (len != -EAGAIN)
goto err_delete;
- dprintk("RPC: TCP recvfrom got EAGAIN\n");
+ trace_svcsock_tcp_recv_eagain(&svsk->sk_xprt, 0);
return 0;
+err_nuts:
+ svsk->sk_datalen = 0;
err_delete:
- printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
- svsk->sk_xprt.xpt_server->sv_name, -len);
+ trace_svcsock_tcp_recv_err(&svsk->sk_xprt, len);
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
err_noclose:
return 0; /* record not complete */
@@ -1048,6 +1065,9 @@ err_noclose:
* svc_tcp_sendto - Send out a reply on a TCP socket
* @rqstp: completed svc_rqst
*
+ * xpt_mutex ensures @rqstp's whole message is written to the socket
+ * without interruption.
+ *
* Returns the number of bytes sent, or a negative errno.
*/
static int svc_tcp_sendto(struct svc_rqst *rqstp)
@@ -1063,14 +1083,22 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
unsigned int uninitialized_var(sent);
int err;
- svc_release_skb(rqstp);
+ svc_tcp_release_rqst(rqstp);
+ mutex_lock(&xprt->xpt_mutex);
+ if (svc_xprt_is_dead(xprt))
+ goto out_notconn;
err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, marker, &sent);
xdr_free_bvec(xdr);
+ trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
if (err < 0 || sent != (xdr->len + sizeof(marker)))
goto out_close;
+ mutex_unlock(&xprt->xpt_mutex);
return sent;
+out_notconn:
+ mutex_unlock(&xprt->xpt_mutex);
+ return -ENOTCONN;
out_close:
pr_notice("rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
xprt->xpt_server->sv_name,
@@ -1078,6 +1106,7 @@ out_close:
(err < 0) ? err : sent, xdr->len);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
+ mutex_unlock(&xprt->xpt_mutex);
return -EAGAIN;
}
@@ -1094,7 +1123,7 @@ static const struct svc_xprt_ops svc_tcp_ops = {
.xpo_recvfrom = svc_tcp_recvfrom,
.xpo_sendto = svc_tcp_sendto,
.xpo_read_payload = svc_sock_read_payload,
- .xpo_release_rqst = svc_release_skb,
+ .xpo_release_rqst = svc_tcp_release_rqst,
.xpo_detach = svc_tcp_sock_detach,
.xpo_free = svc_sock_free,
.xpo_has_wspace = svc_tcp_has_wspace,
@@ -1132,18 +1161,16 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags);
if (sk->sk_state == TCP_LISTEN) {
- dprintk("setting up TCP socket for listening\n");
strcpy(svsk->sk_xprt.xpt_remotebuf, "listener");
set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
sk->sk_data_ready = svc_tcp_listen_data_ready;
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
} else {
- dprintk("setting up TCP socket for reading\n");
sk->sk_state_change = svc_tcp_state_change;
sk->sk_data_ready = svc_data_ready;
sk->sk_write_space = svc_write_space;
- svsk->sk_reclen = 0;
+ svsk->sk_marker = xdr_zero;
svsk->sk_tcplen = 0;
svsk->sk_datalen = 0;
memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
@@ -1188,7 +1215,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
int err = 0;
- dprintk("svc: svc_setup_socket %p\n", sock);
svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
if (!svsk)
return ERR_PTR(-ENOMEM);
@@ -1225,12 +1251,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
else
svc_tcp_init(svsk, serv);
- dprintk("svc: svc_setup_socket created %p (inet %p), "
- "listen %d close %d\n",
- svsk, svsk->sk_sk,
- test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags),
- test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
-
+ trace_svcsock_new_socket(sock);
return svsk;
}
@@ -1322,11 +1343,6 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
struct sockaddr *newsin = (struct sockaddr *)&addr;
int newlen;
int family;
- RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
-
- dprintk("svc: svc_create_socket(%s, %d, %s)\n",
- serv->sv_program->pg_name, protocol,
- __svc_print_addr(sin, buf, sizeof(buf)));
if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
printk(KERN_WARNING "svc: only UDP and TCP "
@@ -1383,7 +1399,6 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen);
return (struct svc_xprt *)svsk;
bummer:
- dprintk("svc: svc_create_socket error = %d\n", -error);
sock_release(sock);
return ERR_PTR(error);
}
@@ -1397,8 +1412,6 @@ static void svc_sock_detach(struct svc_xprt *xprt)
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
struct sock *sk = svsk->sk_sk;
- dprintk("svc: svc_sock_detach(%p)\n", svsk);
-
/* put back the old socket callbacks */
lock_sock(sk);
sk->sk_state_change = svsk->sk_ostate;
@@ -1415,8 +1428,6 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
- dprintk("svc: svc_tcp_sock_detach(%p)\n", svsk);
-
svc_sock_detach(xprt);
if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
@@ -1431,7 +1442,6 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
static void svc_sock_free(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
- dprintk("svc: svc_sock_free(%p)\n", svsk);
if (svsk->sk_sock->file)
sockfd_put(svsk->sk_sock);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 493a30a296fc..d5cc5db9dbf3 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -663,6 +663,7 @@ static void xprt_autoclose(struct work_struct *work)
container_of(work, struct rpc_xprt, task_cleanup);
unsigned int pflags = memalloc_nofs_save();
+ trace_xprt_disconnect_auto(xprt);
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
xprt->ops->close(xprt);
xprt_release_write(xprt, NULL);
@@ -677,7 +678,7 @@ static void xprt_autoclose(struct work_struct *work)
*/
void xprt_disconnect_done(struct rpc_xprt *xprt)
{
- dprintk("RPC: disconnected transport %p\n", xprt);
+ trace_xprt_disconnect_done(xprt);
spin_lock(&xprt->transport_lock);
xprt_clear_connected(xprt);
xprt_clear_write_space_locked(xprt);
@@ -694,6 +695,8 @@ EXPORT_SYMBOL_GPL(xprt_disconnect_done);
*/
void xprt_force_disconnect(struct rpc_xprt *xprt)
{
+ trace_xprt_disconnect_force(xprt);
+
/* Don't race with the test_bit() in xprt_clear_locked() */
spin_lock(&xprt->transport_lock);
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
@@ -832,8 +835,10 @@ void xprt_connect(struct rpc_task *task)
if (!xprt_lock_write(xprt, task))
return;
- if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
+ if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
+ trace_xprt_disconnect_cleanup(xprt);
xprt->ops->close(xprt);
+ }
if (!xprt_connected(xprt)) {
task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
@@ -1460,7 +1465,7 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
*/
req->rq_ntrans++;
- trace_xprt_sendto(&req->rq_snd_buf);
+ trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
connect_cookie = xprt->connect_cookie;
status = xprt->ops->send_request(req);
if (status != 0) {
@@ -1903,11 +1908,8 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
found:
xprt = t->setup(args);
- if (IS_ERR(xprt)) {
- dprintk("RPC: xprt_create_transport: failed, %ld\n",
- -PTR_ERR(xprt));
+ if (IS_ERR(xprt))
goto out;
- }
if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
xprt->idle_timeout = 0;
INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
@@ -1928,8 +1930,7 @@ found:
rpc_xprt_debugfs_register(xprt);
- dprintk("RPC: created transport %p with %u slots\n", xprt,
- xprt->max_reqs);
+ trace_xprt_create(xprt);
out:
return xprt;
}
@@ -1939,6 +1940,8 @@ static void xprt_destroy_cb(struct work_struct *work)
struct rpc_xprt *xprt =
container_of(work, struct rpc_xprt, task_cleanup);
+ trace_xprt_destroy(xprt);
+
rpc_xprt_debugfs_unregister(xprt);
rpc_destroy_wait_queue(&xprt->binding);
rpc_destroy_wait_queue(&xprt->pending);
@@ -1963,8 +1966,6 @@ static void xprt_destroy_cb(struct work_struct *work)
*/
static void xprt_destroy(struct rpc_xprt *xprt)
{
- dprintk("RPC: destroying transport %p\n", xprt);
-
/*
* Exclude transport connect/disconnect handlers and autoclose
*/
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 3c627dc685cc..2081c8fbfa48 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -892,8 +892,8 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
* or privacy, direct data placement of individual data items
* is not allowed.
*/
- ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
- RPCAUTH_AUTH_DATATOUCH);
+ ddp_allowed = !test_bit(RPCAUTH_AUTH_DATATOUCH,
+ &rqst->rq_cred->cr_auth->au_flags);
/*
* Chunks needed for results?
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index af7eb8d202ae..1ee73f7cf931 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -10,59 +10,34 @@
#include "xprt_rdma.h"
#include <trace/events/rpcrdma.h>
-#define RPCDBG_FACILITY RPCDBG_SVCXPRT
-
-#undef SVCRDMA_BACKCHANNEL_DEBUG
-
/**
- * svc_rdma_handle_bc_reply - Process incoming backchannel reply
- * @xprt: controlling backchannel transport
- * @rdma_resp: pointer to incoming transport header
- * @rcvbuf: XDR buffer into which to decode the reply
+ * svc_rdma_handle_bc_reply - Process incoming backchannel Reply
+ * @rqstp: resources for handling the Reply
+ * @rctxt: Received message
*
- * Returns:
- * %0 if @rcvbuf is filled in, xprt_complete_rqst called,
- * %-EAGAIN if server should call ->recvfrom again.
*/
-int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
- struct xdr_buf *rcvbuf)
+void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp,
+ struct svc_rdma_recv_ctxt *rctxt)
{
+ struct svc_xprt *sxprt = rqstp->rq_xprt;
+ struct rpc_xprt *xprt = sxprt->xpt_bc_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
+ struct xdr_buf *rcvbuf = &rqstp->rq_arg;
struct kvec *dst, *src = &rcvbuf->head[0];
+ __be32 *rdma_resp = rctxt->rc_recv_buf;
struct rpc_rqst *req;
u32 credits;
- size_t len;
- __be32 xid;
- __be32 *p;
- int ret;
-
- p = (__be32 *)src->iov_base;
- len = src->iov_len;
- xid = *rdma_resp;
-
-#ifdef SVCRDMA_BACKCHANNEL_DEBUG
- pr_info("%s: xid=%08x, length=%zu\n",
- __func__, be32_to_cpu(xid), len);
- pr_info("%s: RPC/RDMA: %*ph\n",
- __func__, (int)RPCRDMA_HDRLEN_MIN, rdma_resp);
- pr_info("%s: RPC: %*ph\n",
- __func__, (int)len, p);
-#endif
-
- ret = -EAGAIN;
- if (src->iov_len < 24)
- goto out_shortreply;
spin_lock(&xprt->queue_lock);
- req = xprt_lookup_rqst(xprt, xid);
+ req = xprt_lookup_rqst(xprt, *rdma_resp);
if (!req)
- goto out_notfound;
+ goto out_unlock;
dst = &req->rq_private_buf.head[0];
memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
- if (dst->iov_len < len)
+ if (dst->iov_len < src->iov_len)
goto out_unlock;
- memcpy(dst->iov_base, p, len);
+ memcpy(dst->iov_base, src->iov_base, src->iov_len);
xprt_pin_rqst(req);
spin_unlock(&xprt->queue_lock);
@@ -71,31 +46,17 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
credits = 1; /* don't deadlock */
else if (credits > r_xprt->rx_buf.rb_bc_max_requests)
credits = r_xprt->rx_buf.rb_bc_max_requests;
-
spin_lock(&xprt->transport_lock);
xprt->cwnd = credits << RPC_CWNDSHIFT;
spin_unlock(&xprt->transport_lock);
spin_lock(&xprt->queue_lock);
- ret = 0;
xprt_complete_rqst(req->rq_task, rcvbuf->len);
xprt_unpin_rqst(req);
rcvbuf->len = 0;
out_unlock:
spin_unlock(&xprt->queue_lock);
-out:
- return ret;
-
-out_shortreply:
- dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n",
- xprt, src->iov_len);
- goto out;
-
-out_notfound:
- dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n",
- xprt, be32_to_cpu(xid));
- goto out_unlock;
}
/* Send a backwards direction RPC call.
@@ -192,10 +153,6 @@ rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
*p++ = xdr_zero;
*p = xdr_zero;
-#ifdef SVCRDMA_BACKCHANNEL_DEBUG
- pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer);
-#endif
-
rqst->rq_xtime = ktime_get();
rc = svc_rdma_bc_sendto(rdma, rqst, ctxt);
if (rc)
@@ -206,45 +163,36 @@ put_ctxt:
svc_rdma_send_ctxt_put(rdma, ctxt);
drop_connection:
- dprintk("svcrdma: failed to send bc call\n");
return -ENOTCONN;
}
-/* Send an RPC call on the passive end of a transport
- * connection.
+/**
+ * xprt_rdma_bc_send_request - Send a reverse-direction Call
+ * @rqst: rpc_rqst containing Call message to be sent
+ *
+ * Return values:
+ * %0 if the message was sent successfully
+ * %ENOTCONN if the message was not sent
*/
-static int
-xprt_rdma_bc_send_request(struct rpc_rqst *rqst)
+static int xprt_rdma_bc_send_request(struct rpc_rqst *rqst)
{
struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
- struct svcxprt_rdma *rdma;
+ struct svcxprt_rdma *rdma =
+ container_of(sxprt, struct svcxprt_rdma, sc_xprt);
int ret;
- dprintk("svcrdma: sending bc call with xid: %08x\n",
- be32_to_cpu(rqst->rq_xid));
+ if (test_bit(XPT_DEAD, &sxprt->xpt_flags))
+ return -ENOTCONN;
- mutex_lock(&sxprt->xpt_mutex);
-
- ret = -ENOTCONN;
- rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
- if (!test_bit(XPT_DEAD, &sxprt->xpt_flags)) {
- ret = rpcrdma_bc_send_request(rdma, rqst);
- if (ret == -ENOTCONN)
- svc_close_xprt(sxprt);
- }
-
- mutex_unlock(&sxprt->xpt_mutex);
-
- if (ret < 0)
- return ret;
- return 0;
+ ret = rpcrdma_bc_send_request(rdma, rqst);
+ if (ret == -ENOTCONN)
+ svc_close_xprt(sxprt);
+ return ret;
}
static void
xprt_rdma_bc_close(struct rpc_xprt *xprt)
{
- dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
-
xprt_disconnect_done(xprt);
xprt->cwnd = RPC_CWNDSHIFT;
}
@@ -252,8 +200,6 @@ xprt_rdma_bc_close(struct rpc_xprt *xprt)
static void
xprt_rdma_bc_put(struct rpc_xprt *xprt)
{
- dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
-
xprt_rdma_free_addresses(xprt);
xprt_free(xprt);
}
@@ -288,19 +234,14 @@ xprt_setup_rdma_bc(struct xprt_create *args)
struct rpc_xprt *xprt;
struct rpcrdma_xprt *new_xprt;
- if (args->addrlen > sizeof(xprt->addr)) {
- dprintk("RPC: %s: address too large\n", __func__);
+ if (args->addrlen > sizeof(xprt->addr))
return ERR_PTR(-EBADF);
- }
xprt = xprt_alloc(args->net, sizeof(*new_xprt),
RPCRDMA_MAX_BC_REQUESTS,
RPCRDMA_MAX_BC_REQUESTS);
- if (!xprt) {
- dprintk("RPC: %s: couldn't allocate rpc_xprt\n",
- __func__);
+ if (!xprt)
return ERR_PTR(-ENOMEM);
- }
xprt->timeout = &xprt_rdma_bc_timeout;
xprt_set_bound(xprt);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index efa5fcb5793f..e426fedb9524 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -665,23 +665,23 @@ static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
return hdr_len;
out_short:
- trace_svcrdma_decode_short(rq_arg->len);
+ trace_svcrdma_decode_short_err(rq_arg->len);
return -EINVAL;
out_version:
- trace_svcrdma_decode_badvers(rdma_argp);
+ trace_svcrdma_decode_badvers_err(rdma_argp);
return -EPROTONOSUPPORT;
out_drop:
- trace_svcrdma_decode_drop(rdma_argp);
+ trace_svcrdma_decode_drop_err(rdma_argp);
return 0;
out_proc:
- trace_svcrdma_decode_badproc(rdma_argp);
+ trace_svcrdma_decode_badproc_err(rdma_argp);
return -EINVAL;
out_inval:
- trace_svcrdma_decode_parse(rdma_argp);
+ trace_svcrdma_decode_parse_err(rdma_argp);
return -EINVAL;
}
@@ -878,12 +878,9 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
goto out_drop;
rqstp->rq_xprt_hlen = ret;
- if (svc_rdma_is_backchannel_reply(xprt, p)) {
- ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
- &rqstp->rq_arg);
- svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
- return ret;
- }
+ if (svc_rdma_is_backchannel_reply(xprt, p))
+ goto out_backchannel;
+
svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
p += rpcrdma_fixed_maxsz;
@@ -913,6 +910,8 @@ out_postfail:
svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
return ret;
+out_backchannel:
+ svc_rdma_handle_bc_reply(rqstp, ctxt);
out_drop:
svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
return 0;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 23c2d3ce0dc9..5eb35309ecef 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -9,13 +9,10 @@
#include <linux/sunrpc/rpc_rdma.h>
#include <linux/sunrpc/svc_rdma.h>
-#include <linux/sunrpc/debug.h>
#include "xprt_rdma.h"
#include <trace/events/rpcrdma.h>
-#define RPCDBG_FACILITY RPCDBG_SVCXPRT
-
static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
@@ -39,7 +36,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
struct svc_rdma_rw_ctxt {
struct list_head rw_list;
struct rdma_rw_ctx rw_ctx;
- int rw_nents;
+ unsigned int rw_nents;
struct sg_table rw_sg_table;
struct scatterlist rw_first_sgl[];
};
@@ -67,19 +64,22 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
GFP_KERNEL);
if (!ctxt)
- goto out;
+ goto out_noctx;
INIT_LIST_HEAD(&ctxt->rw_list);
}
ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
ctxt->rw_sg_table.sgl,
- SG_CHUNK_SIZE)) {
- kfree(ctxt);
- ctxt = NULL;
- }
-out:
+ SG_CHUNK_SIZE))
+ goto out_free;
return ctxt;
+
+out_free:
+ kfree(ctxt);
+out_noctx:
+ trace_svcrdma_no_rwctx_err(rdma, sges);
+ return NULL;
}
static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
@@ -107,6 +107,34 @@ void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
}
}
+/**
+ * svc_rdma_rw_ctx_init - Prepare a R/W context for I/O
+ * @rdma: controlling transport instance
+ * @ctxt: R/W context to prepare
+ * @offset: RDMA offset
+ * @handle: RDMA tag/handle
+ * @direction: I/O direction
+ *
+ * Returns on success, the number of WQEs that will be needed
+ * on the workqueue, or a negative errno.
+ */
+static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
+ struct svc_rdma_rw_ctxt *ctxt,
+ u64 offset, u32 handle,
+ enum dma_data_direction direction)
+{
+ int ret;
+
+ ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num,
+ ctxt->rw_sg_table.sgl, ctxt->rw_nents,
+ 0, offset, handle, direction);
+ if (unlikely(ret < 0)) {
+ svc_rdma_put_rw_ctxt(rdma, ctxt);
+ trace_svcrdma_dma_map_rw_err(rdma, ctxt->rw_nents, ret);
+ }
+ return ret;
+}
+
/* A chunk context tracks all I/O for moving one Read or Write
* chunk. This is a a set of rdma_rw's that handle data movement
* for all segments of one chunk.
@@ -428,15 +456,13 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
ctxt = svc_rdma_get_rw_ctxt(rdma,
(write_len >> PAGE_SHIFT) + 2);
if (!ctxt)
- goto out_noctx;
+ return -ENOMEM;
constructor(info, write_len, ctxt);
- ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
- rdma->sc_port_num, ctxt->rw_sg_table.sgl,
- ctxt->rw_nents, 0, seg_offset,
- seg_handle, DMA_TO_DEVICE);
+ ret = svc_rdma_rw_ctx_init(rdma, ctxt, seg_offset, seg_handle,
+ DMA_TO_DEVICE);
if (ret < 0)
- goto out_initerr;
+ return -EIO;
trace_svcrdma_send_wseg(seg_handle, write_len, seg_offset);
@@ -455,18 +481,9 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
return 0;
out_overflow:
- dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
- info->wi_nsegs);
+ trace_svcrdma_small_wrch_err(rdma, remaining, info->wi_seg_no,
+ info->wi_nsegs);
return -E2BIG;
-
-out_noctx:
- dprintk("svcrdma: no R/W ctxs available\n");
- return -ENOMEM;
-
-out_initerr:
- svc_rdma_put_rw_ctxt(rdma, ctxt);
- trace_svcrdma_dma_map_rwctx(rdma, ret);
- return -EIO;
}
/* Send one of an xdr_buf's kvecs by itself. To send a Reply
@@ -616,7 +633,7 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
if (!ctxt)
- goto out_noctx;
+ return -ENOMEM;
ctxt->rw_nents = sge_no;
sg = ctxt->rw_sg_table.sgl;
@@ -646,29 +663,18 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
goto out_overrun;
}
- ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
- cc->cc_rdma->sc_port_num,
- ctxt->rw_sg_table.sgl, ctxt->rw_nents,
- 0, offset, rkey, DMA_FROM_DEVICE);
+ ret = svc_rdma_rw_ctx_init(cc->cc_rdma, ctxt, offset, rkey,
+ DMA_FROM_DEVICE);
if (ret < 0)
- goto out_initerr;
+ return -EIO;
list_add(&ctxt->rw_list, &cc->cc_rwctxts);
cc->cc_sqecount += ret;
return 0;
-out_noctx:
- dprintk("svcrdma: no R/W ctxs available\n");
- return -ENOMEM;
-
out_overrun:
- dprintk("svcrdma: request overruns rq_pages\n");
+ trace_svcrdma_page_overrun_err(cc->cc_rdma, rqstp, info->ri_pageno);
return -EINVAL;
-
-out_initerr:
- trace_svcrdma_dma_map_rwctx(cc->cc_rdma, ret);
- svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
- return -EIO;
}
/* Walk the segments in the Read chunk starting at @p and construct
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index b6c8643867f2..38e7c3c8c4a9 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -868,12 +868,10 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
__be32 *p;
int ret;
- /* Create the RDMA response header. xprt->xpt_mutex,
- * acquired in svc_send(), serializes RPC replies. The
- * code path below that inserts the credit grant value
- * into each transport header runs only inside this
- * critical section.
- */
+ ret = -ENOTCONN;
+ if (svc_xprt_is_dead(xprt))
+ goto err0;
+
ret = -ENOMEM;
sctxt = svc_rdma_send_ctxt_get(rdma);
if (!sctxt)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index ea54785db4f8..d38be57b00ed 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -211,7 +211,12 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id,
newxprt->sc_ord = param->initiator_depth;
sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
- svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
+ newxprt->sc_xprt.xpt_remotelen = svc_addr_len(sa);
+ memcpy(&newxprt->sc_xprt.xpt_remote, sa,
+ newxprt->sc_xprt.xpt_remotelen);
+ snprintf(newxprt->sc_xprt.xpt_remotebuf,
+ sizeof(newxprt->sc_xprt.xpt_remotebuf) - 1, "%pISc", sa);
+
/* The remote port is arbitrary and not under the control of the
* client ULP. Set it to a fixed value so that the DRC continues
* to be effective after a reconnect.
@@ -309,11 +314,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
struct svcxprt_rdma *cma_xprt;
int ret;
- dprintk("svcrdma: Creating RDMA listener\n");
- if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
- dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
+ if (sa->sa_family != AF_INET && sa->sa_family != AF_INET6)
return ERR_PTR(-EAFNOSUPPORT);
- }
cma_xprt = svc_rdma_create_xprt(serv, net);
if (!cma_xprt)
return ERR_PTR(-ENOMEM);
@@ -324,7 +326,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(listen_id)) {
ret = PTR_ERR(listen_id);
- dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
goto err0;
}
@@ -333,23 +334,17 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
*/
#if IS_ENABLED(CONFIG_IPV6)
ret = rdma_set_afonly(listen_id, 1);
- if (ret) {
- dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
+ if (ret)
goto err1;
- }
#endif
ret = rdma_bind_addr(listen_id, sa);
- if (ret) {
- dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
+ if (ret)
goto err1;
- }
cma_xprt->sc_cm_id = listen_id;
ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
- if (ret) {
- dprintk("svcrdma: rdma_listen failed = %d\n", ret);
+ if (ret)
goto err1;
- }
/*
* We need to use the address from the cm_id in case the
@@ -405,9 +400,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
if (!newxprt)
return NULL;
- dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
- newxprt, newxprt->sc_cm_id);
-
dev = newxprt->sc_cm_id->device;
newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
@@ -443,21 +435,17 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
newxprt->sc_pd = ib_alloc_pd(dev, 0);
if (IS_ERR(newxprt->sc_pd)) {
- dprintk("svcrdma: error creating PD for connect request\n");
+ trace_svcrdma_pd_err(newxprt, PTR_ERR(newxprt->sc_pd));
goto errout;
}
newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth,
IB_POLL_WORKQUEUE);
- if (IS_ERR(newxprt->sc_sq_cq)) {
- dprintk("svcrdma: error creating SQ CQ for connect request\n");
+ if (IS_ERR(newxprt->sc_sq_cq))
goto errout;
- }
newxprt->sc_rq_cq =
ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
- if (IS_ERR(newxprt->sc_rq_cq)) {
- dprintk("svcrdma: error creating RQ CQ for connect request\n");
+ if (IS_ERR(newxprt->sc_rq_cq))
goto errout;
- }
memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.event_handler = qp_event_handler;
@@ -481,7 +469,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
if (ret) {
- dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
+ trace_svcrdma_qp_err(newxprt, ret);
goto errout;
}
newxprt->sc_qp = newxprt->sc_cm_id->qp;
@@ -489,8 +477,10 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
newxprt->sc_snd_w_inv = false;
if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) &&
- !rdma_ib_or_roce(dev, newxprt->sc_port_num))
+ !rdma_ib_or_roce(dev, newxprt->sc_port_num)) {
+ trace_svcrdma_fabric_err(newxprt, -EINVAL);
goto errout;
+ }
if (!svc_rdma_post_recvs(newxprt))
goto errout;
@@ -512,15 +502,17 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
conn_param.initiator_depth = min_t(int, newxprt->sc_ord,
dev->attrs.max_qp_init_rd_atom);
if (!conn_param.initiator_depth) {
- dprintk("svcrdma: invalid ORD setting\n");
ret = -EINVAL;
+ trace_svcrdma_initdepth_err(newxprt, ret);
goto errout;
}
conn_param.private_data = &pmsg;
conn_param.private_data_len = sizeof(pmsg);
ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
- if (ret)
+ if (ret) {
+ trace_svcrdma_accept_err(newxprt, ret);
goto errout;
+ }
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
dprintk("svcrdma: new connection %p accepted:\n", newxprt);
@@ -535,12 +527,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
dprintk(" ord : %d\n", conn_param.initiator_depth);
#endif
- trace_svcrdma_xprt_accept(&newxprt->sc_xprt);
return &newxprt->sc_xprt;
errout:
- dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
- trace_svcrdma_xprt_fail(&newxprt->sc_xprt);
/* Take a reference in case the DTO handler runs */
svc_xprt_get(&newxprt->sc_xprt);
if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
@@ -578,8 +567,6 @@ static void __svc_rdma_free(struct work_struct *work)
container_of(work, struct svcxprt_rdma, sc_work);
struct svc_xprt *xprt = &rdma->sc_xprt;
- trace_svcrdma_xprt_free(xprt);
-
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
ib_drain_qp(rdma->sc_qp);
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 659da37020a4..0c4af7f5e241 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -68,7 +68,7 @@
* tunables
*/
-unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
+static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRWR;
@@ -281,8 +281,6 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- trace_xprtrdma_op_destroy(r_xprt);
-
cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
rpcrdma_xprt_disconnect(r_xprt);
@@ -365,10 +363,6 @@ xprt_setup_rdma(struct xprt_create *args)
xprt->max_payload = RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
- dprintk("RPC: %s: %s:%s\n", __func__,
- xprt->address_strings[RPC_DISPLAY_ADDR],
- xprt->address_strings[RPC_DISPLAY_PORT]);
- trace_xprtrdma_create(new_xprt);
return xprt;
}
@@ -385,8 +379,6 @@ void xprt_rdma_close(struct rpc_xprt *xprt)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- trace_xprtrdma_op_close(r_xprt);
-
rpcrdma_xprt_disconnect(r_xprt);
xprt->reestablish_timeout = 0;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 05c4d3a9cda2..2ae348377806 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -141,7 +141,6 @@ void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc)
if (wc->status != IB_WC_SUCCESS &&
r_xprt->rx_ep->re_connect_status == 1) {
r_xprt->rx_ep->re_connect_status = -ECONNABORTED;
- trace_xprtrdma_flush_dct(r_xprt, wc->status);
xprt_force_disconnect(xprt);
}
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 3a143e250b9a..914508ea9b84 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2528,8 +2528,16 @@ static int bc_sendto(struct rpc_rqst *req)
return sent;
}
-/*
- * The send routine. Borrows from svc_send
+/**
+ * bc_send_request - Send a backchannel Call on a TCP socket
+ * @req: rpc_rqst containing Call message to be sent
+ *
+ * xpt_mutex ensures @rqstp's whole message is written to the socket
+ * without interruption.
+ *
+ * Return values:
+ * %0 if the message was sent successfully
+ * %ENOTCONN if the message was not sent
*/
static int bc_send_request(struct rpc_rqst *req)
{
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index c0afcd627c5e..046e4cb3acea 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -221,7 +221,7 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
total = accounted;
- while (rem) {
+ do {
if (!skb || skb->len >= mss) {
skb = tipc_buf_acquire(mss, GFP_KERNEL);
if (unlikely(!skb))
@@ -245,7 +245,7 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
skb_put(skb, cpy);
rem -= cpy;
total += msg_blocks(hdr) - curr;
- }
+ } while (rem);
return total - accounted;
}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 0e989005bdc2..ec10041c6b7d 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -629,7 +629,7 @@ struct tls_context *tls_ctx_create(struct sock *sk)
static void tls_build_proto(struct sock *sk)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
- const struct proto *prot = READ_ONCE(sk->sk_prot);
+ struct proto *prot = READ_ONCE(sk->sk_prot);
/* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
if (ip_ver == TLSV6 &&
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 4b8b1150a738..8b65323207db 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -2055,7 +2055,7 @@ static bool vmci_check_transport(struct vsock_sock *vsk)
return vsk->transport == &vmci_transport;
}
-void vmci_vsock_transport_cb(bool is_host)
+static void vmci_vsock_transport_cb(bool is_host)
{
int features;
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 1bbaf1747e4f..e97db37354e4 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -254,10 +254,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
if (!umem->pgs)
return -ENOMEM;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
npgs = pin_user_pages(address, umem->npgs,
gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (npgs != umem->npgs) {
if (npgs >= 0) {
@@ -336,7 +336,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if ((addr + size) < addr)
return -EINVAL;
- npgs = div_u64(size, PAGE_SIZE);
+ npgs = size >> PAGE_SHIFT;
if (npgs > U32_MAX)
return -EINVAL;
diff --git a/samples/Kconfig b/samples/Kconfig
index 205076cf234e..0cbb6146f3cf 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -6,6 +6,10 @@ menuconfig SAMPLES
if SAMPLES
+config SAMPLE_AUXDISPLAY
+ bool "auxdisplay sample"
+ depends on CC_CAN_LINK
+
config SAMPLE_TRACE_EVENTS
tristate "Build trace_events examples -- loadable modules only"
depends on EVENT_TRACING && m
@@ -118,19 +122,29 @@ config SAMPLE_CONNECTOR
config SAMPLE_HIDRAW
bool "hidraw sample"
- depends on HEADERS_INSTALL
+ depends on CC_CAN_LINK && HEADERS_INSTALL
config SAMPLE_PIDFD
bool "pidfd sample"
- depends on HEADERS_INSTALL
+ depends on CC_CAN_LINK && HEADERS_INSTALL
config SAMPLE_SECCOMP
bool "Build seccomp sample code"
- depends on SECCOMP_FILTER && HEADERS_INSTALL
+ depends on SECCOMP_FILTER && CC_CAN_LINK && HEADERS_INSTALL
help
Build samples of seccomp filters using various methods of
BPF filter construction.
+config SAMPLE_TIMER
+ bool "Timer sample"
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+
+config SAMPLE_UHID
+ bool "UHID sample"
+ depends on CC_CAN_LINK && HEADERS_INSTALL
+ help
+ Build UHID sample program.
+
config SAMPLE_VFIO_MDEV_MTTY
tristate "Build VFIO mtty example mediated device sample code -- loadable modules only"
depends on VFIO_MDEV_DEVICE && m
@@ -178,7 +192,7 @@ config SAMPLE_ANDROID_BINDERFS
config SAMPLE_VFS
bool "Build example programs that use new VFS system calls"
- depends on HEADERS_INSTALL
+ depends on CC_CAN_LINK && HEADERS_INSTALL
help
Build example userspace programs that use new VFS system calls such
as mount API and statx(). Note that this is restricted to the x86
@@ -187,8 +201,12 @@ config SAMPLE_VFS
config SAMPLE_INTEL_MEI
bool "Build example program working with intel mei driver"
depends on INTEL_MEI
+ depends on CC_CAN_LINK && HEADERS_INSTALL
help
Build a sample program to work with mei device.
+config SAMPLE_WATCHDOG
+ bool "watchdog sample"
+ depends on CC_CAN_LINK
endif # SAMPLES
diff --git a/samples/Makefile b/samples/Makefile
index f8f847b4f61f..29c66aadd954 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Linux samples code
-OBJECT_FILES_NON_STANDARD := y
+subdir-$(CONFIG_SAMPLE_AUXDISPLAY) += auxdisplay
obj-$(CONFIG_SAMPLE_ANDROID_BINDERFS) += binderfs/
obj-$(CONFIG_SAMPLE_CONFIGFS) += configfs/
obj-$(CONFIG_SAMPLE_CONNECTOR) += connector/
@@ -16,11 +16,14 @@ subdir-$(CONFIG_SAMPLE_PIDFD) += pidfd
obj-$(CONFIG_SAMPLE_QMI_CLIENT) += qmi/
obj-$(CONFIG_SAMPLE_RPMSG_CLIENT) += rpmsg/
subdir-$(CONFIG_SAMPLE_SECCOMP) += seccomp
+subdir-$(CONFIG_SAMPLE_TIMER) += timers
obj-$(CONFIG_SAMPLE_TRACE_EVENTS) += trace_events/
obj-$(CONFIG_SAMPLE_TRACE_PRINTK) += trace_printk/
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace/
obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += ftrace/
+subdir-$(CONFIG_SAMPLE_UHID) += uhid
obj-$(CONFIG_VIDEO_PCI_SKELETON) += v4l/
obj-y += vfio-mdev/
subdir-$(CONFIG_SAMPLE_VFS) += vfs
obj-$(CONFIG_SAMPLE_INTEL_MEI) += mei/
+subdir-$(CONFIG_SAMPLE_WATCHDOG) += watchdog
diff --git a/samples/auxdisplay/Makefile b/samples/auxdisplay/Makefile
index 0273bab27233..dbdf939af94a 100644
--- a/samples/auxdisplay/Makefile
+++ b/samples/auxdisplay/Makefile
@@ -1,10 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-CC := $(CROSS_COMPILE)gcc
-CFLAGS := -I../../usr/include
-
-PROGS := cfag12864b-example
-
-all: $(PROGS)
-
-clean:
- rm -fr $(PROGS)
+userprogs := cfag12864b-example
+always-y := $(userprogs)
diff --git a/samples/connector/Makefile b/samples/connector/Makefile
index b785cbde5ffa..50cb40e09a7b 100644
--- a/samples/connector/Makefile
+++ b/samples/connector/Makefile
@@ -1,13 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SAMPLE_CONNECTOR) += cn_test.o
-# List of programs to build
-hostprogs := ucon
-always-y := $(hostprogs)
+userprogs := ucon
+always-$(CONFIG_CC_CAN_LINK) := $(userprogs)
-HOSTCFLAGS_ucon.o += -I$(objtree)/usr/include
-
-all: modules
-
-modules clean:
- $(MAKE) -C ../.. M=$(CURDIR) $@
+userccflags += -I usr/include
diff --git a/samples/hidraw/Makefile b/samples/hidraw/Makefile
index 8bd25f77671f..d2c77ed60b39 100644
--- a/samples/hidraw/Makefile
+++ b/samples/hidraw/Makefile
@@ -1,8 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# List of programs to build
-hostprogs := hid-example
-always-y := $(hostprogs)
+userprogs := hid-example
+always-y := $(userprogs)
-HOSTCFLAGS_hid-example.o += -I$(objtree)/usr/include
-
-all: hid-example
+userccflags += -I usr/include
diff --git a/samples/mei/Makefile b/samples/mei/Makefile
index f5b9d02be2cd..329411f82369 100644
--- a/samples/mei/Makefile
+++ b/samples/mei/Makefile
@@ -1,10 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
-hostprogs := mei-amt-version
+userprogs := mei-amt-version
+always-y := $(userprogs)
-HOSTCFLAGS_mei-amt-version.o += -I$(objtree)/usr/include
-
-always-y := $(hostprogs)
-
-all: mei-amt-version
+userccflags += -I usr/include
diff --git a/samples/pidfd/Makefile b/samples/pidfd/Makefile
index ee2979849d92..6e5b67e648c2 100644
--- a/samples/pidfd/Makefile
+++ b/samples/pidfd/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-hostprogs := pidfd-metadata
-always-y := $(hostprogs)
-HOSTCFLAGS_pidfd-metadata.o += -I$(objtree)/usr/include
-all: pidfd-metadata
+usertprogs := pidfd-metadata
+always-y := $(userprogs)
+
+userccflags += -I usr/include
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile
index 89279e8b87df..75916c23e416 100644
--- a/samples/seccomp/Makefile
+++ b/samples/seccomp/Makefile
@@ -1,44 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-ifndef CROSS_COMPILE
-hostprogs := bpf-fancy dropper bpf-direct user-trap
+userprogs := bpf-fancy dropper bpf-direct user-trap
-HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include
-HOSTCFLAGS_bpf-fancy.o += -idirafter $(objtree)/include
-HOSTCFLAGS_bpf-helper.o += -I$(objtree)/usr/include
-HOSTCFLAGS_bpf-helper.o += -idirafter $(objtree)/include
bpf-fancy-objs := bpf-fancy.o bpf-helper.o
-HOSTCFLAGS_dropper.o += -I$(objtree)/usr/include
-HOSTCFLAGS_dropper.o += -idirafter $(objtree)/include
-dropper-objs := dropper.o
+userccflags += -I usr/include
-HOSTCFLAGS_bpf-direct.o += -I$(objtree)/usr/include
-HOSTCFLAGS_bpf-direct.o += -idirafter $(objtree)/include
-bpf-direct-objs := bpf-direct.o
-
-HOSTCFLAGS_user-trap.o += -I$(objtree)/usr/include
-HOSTCFLAGS_user-trap.o += -idirafter $(objtree)/include
-user-trap-objs := user-trap.o
-
-# Try to match the kernel target.
-ifndef CONFIG_64BIT
-
-# s390 has -m31 flag to build 31 bit binaries
-ifndef CONFIG_S390
-MFLAG = -m32
-else
-MFLAG = -m31
-endif
-
-HOSTCFLAGS_bpf-direct.o += $(MFLAG)
-HOSTCFLAGS_dropper.o += $(MFLAG)
-HOSTCFLAGS_bpf-helper.o += $(MFLAG)
-HOSTCFLAGS_bpf-fancy.o += $(MFLAG)
-HOSTCFLAGS_user-trap.o += $(MFLAG)
-HOSTLDLIBS_bpf-direct += $(MFLAG)
-HOSTLDLIBS_bpf-fancy += $(MFLAG)
-HOSTLDLIBS_dropper += $(MFLAG)
-HOSTLDLIBS_user-trap += $(MFLAG)
-endif
-always-y := $(hostprogs)
-endif
+always-y := $(userprogs)
diff --git a/samples/timers/Makefile b/samples/timers/Makefile
index f9fa07460802..15c7ddbc8c51 100644
--- a/samples/timers/Makefile
+++ b/samples/timers/Makefile
@@ -1,16 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ifndef CROSS_COMPILE
-uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+userprogs := hpet_example
+always-y := $(userprogs)
-ifeq ($(ARCH),x86)
-CC := $(CROSS_COMPILE)gcc
-PROGS := hpet_example
-
-all: $(PROGS)
-
-clean:
- rm -fr $(PROGS)
-
-endif
-endif
+userccflags += -I usr/include
diff --git a/samples/uhid/.gitignore b/samples/uhid/.gitignore
new file mode 100644
index 000000000000..0e0a5a929f5d
--- /dev/null
+++ b/samples/uhid/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/uhid-example
diff --git a/samples/uhid/Makefile b/samples/uhid/Makefile
index 5f44ea40d6d5..9e652fc34103 100644
--- a/samples/uhid/Makefile
+++ b/samples/uhid/Makefile
@@ -1,8 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-# List of programs to build
-hostprogs := uhid-example
+userprogs := uhid-example
+always-y := $(userprogs)
-# Tell kbuild to always build the programs
-always-y := $(hostprogs)
-
-HOSTCFLAGS_uhid-example.o += -I$(objtree)/usr/include
+userccflags += -I usr/include
diff --git a/samples/uhid/uhid-example.c b/samples/uhid/uhid-example.c
index b72d645ce828..015cb06a241e 100644
--- a/samples/uhid/uhid-example.c
+++ b/samples/uhid/uhid-example.c
@@ -165,7 +165,7 @@ static int uhid_write(int fd, const struct uhid_event *ev)
fprintf(stderr, "Cannot write to uhid: %m\n");
return -errno;
} else if (ret != sizeof(*ev)) {
- fprintf(stderr, "Wrong size written to uhid: %ld != %lu\n",
+ fprintf(stderr, "Wrong size written to uhid: %zd != %zu\n",
ret, sizeof(ev));
return -EFAULT;
} else {
@@ -236,7 +236,7 @@ static int event(int fd)
fprintf(stderr, "Cannot read uhid-cdev: %m\n");
return -errno;
} else if (ret != sizeof(ev)) {
- fprintf(stderr, "Invalid size read from uhid-dev: %ld != %lu\n",
+ fprintf(stderr, "Invalid size read from uhid-dev: %zd != %zu\n",
ret, sizeof(ev));
return -EFAULT;
}
diff --git a/samples/vfs/Makefile b/samples/vfs/Makefile
index 65acdde5c117..00b6824f9237 100644
--- a/samples/vfs/Makefile
+++ b/samples/vfs/Makefile
@@ -1,10 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-# List of programs to build
-hostprogs := \
- test-fsmount \
- test-statx
+userprogs := test-fsmount test-statx
+always-y := $(userprogs)
-always-y := $(hostprogs)
-
-HOSTCFLAGS_test-fsmount.o += -I$(objtree)/usr/include
-HOSTCFLAGS_test-statx.o += -I$(objtree)/usr/include
+userccflags += -I usr/include
diff --git a/samples/watchdog/Makefile b/samples/watchdog/Makefile
index a9430fa60253..17384cfb387e 100644
--- a/samples/watchdog/Makefile
+++ b/samples/watchdog/Makefile
@@ -1,9 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-CC := $(CROSS_COMPILE)gcc
-PROGS := watchdog-simple
-
-all: $(PROGS)
-
-clean:
- rm -fr $(PROGS)
-
+userprogs := watchdog-simple
+always-y := $(userprogs)
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 6cabf20ce66a..0c3dc983439b 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -16,7 +16,7 @@ pound := \#
dot-target = $(dir $@).$(notdir $@)
###
-# The temporary file to save gcc -MD generated dependencies must not
+# The temporary file to save gcc -MMD generated dependencies must not
# contain a comma
depfile = $(subst $(comma),_,$(dot-target).d)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 9fcbfac15d1d..2e8810b7e5ed 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -50,6 +50,12 @@ ifneq ($(hostprogs)$(hostcxxlibs-y)$(hostcxxlibs-m),)
include scripts/Makefile.host
endif
+# Do not include userprogs rules unless needed.
+userprogs := $(sort $(userprogs))
+ifneq ($(userprogs),)
+include scripts/Makefile.userprogs
+endif
+
ifndef obj
$(warning kbuild: Makefile.build is included improperly)
endif
@@ -63,19 +69,27 @@ endif
# ===========================================================================
+# subdir-builtin and subdir-modorder may contain duplications. Use $(sort ...)
+subdir-builtin := $(sort $(filter %/built-in.a, $(real-obj-y)))
+subdir-modorder := $(sort $(filter %/modules.order, $(obj-m)))
+
+targets-for-builtin := $(extra-y)
+
ifneq ($(strip $(lib-y) $(lib-m) $(lib-)),)
-lib-target := $(obj)/lib.a
+targets-for-builtin += $(obj)/lib.a
endif
ifdef need-builtin
-builtin-target := $(obj)/built-in.a
+targets-for-builtin += $(obj)/built-in.a
endif
-ifeq ($(CONFIG_MODULES)$(need-modorder),y1)
-modorder-target := $(obj)/modules.order
+targets-for-modules := $(patsubst %.o, %.mod, $(filter %.o, $(obj-m)))
+
+ifdef need-modorder
+targets-for-modules += $(obj)/modules.order
endif
-mod-targets := $(patsubst %.o, %.mod, $(obj-m))
+targets += $(targets-for-builtin) $(targets-for-modules)
# Linus' kernel sanity checking tool
ifeq ($(KBUILD_CHECKSRC),1)
@@ -274,8 +288,6 @@ cmd_mod = { \
$(obj)/%.mod: $(obj)/%.o FORCE
$(call if_changed,mod)
-targets += $(mod-targets)
-
quiet_cmd_cc_lst_c = MKLST $@
cmd_cc_lst_c = $(CC) $(c_flags) -g -c -o $*.o $< && \
$(CONFIG_SHELL) $(srctree)/scripts/makelst $*.o \
@@ -348,8 +360,9 @@ endif
$(obj)/%.o: $(src)/%.S $(objtool_dep) FORCE
$(call if_changed_rule,as_o_S)
-targets += $(filter-out $(subdir-obj-y), $(real-obj-y)) $(real-obj-m) $(lib-y)
-targets += $(extra-y) $(always-y) $(MAKECMDGOALS)
+targets += $(filter-out $(subdir-builtin), $(real-obj-y))
+targets += $(filter-out $(subdir-modorder), $(real-obj-m))
+targets += $(lib-y) $(always-y) $(MAKECMDGOALS)
# Linker scripts preprocessor (.lds.S -> .lds)
# ---------------------------------------------------------------------------
@@ -373,44 +386,40 @@ $(obj)/%.asn1.c $(obj)/%.asn1.h: $(src)/%.asn1 $(objtree)/scripts/asn1_compiler
# ---------------------------------------------------------------------------
# To build objects in subdirs, we need to descend into the directories
-$(obj)/%/built-in.a: $(obj)/% ;
+$(subdir-builtin): $(obj)/%/built-in.a: $(obj)/% ;
+$(subdir-modorder): $(obj)/%/modules.order: $(obj)/% ;
#
# Rule to compile a set of .o files into one .a file (without symbol table)
#
-ifdef builtin-target
quiet_cmd_ar_builtin = AR $@
cmd_ar_builtin = rm -f $@; $(AR) cDPrST $@ $(real-prereqs)
-$(builtin-target): $(real-obj-y) FORCE
+$(obj)/built-in.a: $(real-obj-y) FORCE
$(call if_changed,ar_builtin)
-targets += $(builtin-target)
-endif # builtin-target
-
#
# Rule to create modules.order file
#
# Create commands to either record .ko file or cat modules.order from
# a subdirectory
-$(modorder-target): $(subdir-ym) FORCE
- $(Q){ $(foreach m, $(modorder), \
- $(if $(filter %/modules.order, $m), cat $m, echo $m);) :; } \
+# Add $(obj-m) as the prerequisite to avoid updating the timestamp of
+# modules.order unless contained modules are updated.
+
+cmd_modules_order = { $(foreach m, $(real-prereqs), \
+ $(if $(filter %/modules.order, $m), cat $m, echo $(patsubst %.o,%.ko,$m));) :; } \
| $(AWK) '!x[$$0]++' - > $@
+$(obj)/modules.order: $(obj-m) FORCE
+ $(call if_changed,modules_order)
+
#
# Rule to compile a set of .o files into one .a file (with symbol table)
#
-ifdef lib-target
-
-$(lib-target): $(lib-y) FORCE
+$(obj)/lib.a: $(lib-y) FORCE
$(call if_changed,ar)
-targets += $(lib-target)
-
-endif
-
# NOTE:
# Do not replace $(filter %.o,^) with $(real-prereqs). When a single object
# module is turned into a multi object module, $^ will contain header file
@@ -473,8 +482,8 @@ endif
else
-__build: $(if $(KBUILD_BUILTIN),$(builtin-target) $(lib-target) $(extra-y)) \
- $(if $(KBUILD_MODULES),$(obj-m) $(mod-targets) $(modorder-target)) \
+__build: $(if $(KBUILD_BUILTIN), $(targets-for-builtin)) \
+ $(if $(KBUILD_MODULES), $(targets-for-modules)) \
$(subdir-ym) $(always-y)
@:
@@ -487,8 +496,8 @@ PHONY += $(subdir-ym)
$(subdir-ym):
$(Q)$(MAKE) $(build)=$@ \
$(if $(filter $@/, $(KBUILD_SINGLE_TARGETS)),single-build=) \
- need-builtin=$(if $(filter $@/built-in.a, $(subdir-obj-y)),1) \
- need-modorder=$(if $(need-modorder),$(if $(filter $@/modules.order, $(modorder)),1))
+ need-builtin=$(if $(filter $@/built-in.a, $(subdir-builtin)),1) \
+ need-modorder=$(if $(filter $@/modules.order, $(subdir-modorder)),1)
# Add FORCE to the prequisites of a target to force it to be always rebuilt.
# ---------------------------------------------------------------------------
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index 075f0cc2d8d7..e2c76122319d 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -29,7 +29,7 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
__clean-files := $(extra-y) $(extra-m) $(extra-) \
$(always) $(always-y) $(always-m) $(always-) $(targets) $(clean-files) \
- $(hostprogs) $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
+ $(hostprogs) $(hostprogs-y) $(hostprogs-m) $(hostprogs-) $(userprogs) \
$(hostcxxlibs-y) $(hostcxxlibs-m)
__clean-files := $(filter-out $(no-clean-files), $(__clean-files))
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index 2045855d0b75..c8a4a033dc3e 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -88,8 +88,8 @@ _hostcxx_flags += -I $(objtree)/$(obj)
endif
endif
-hostc_flags = -Wp,-MD,$(depfile) $(_hostc_flags)
-hostcxx_flags = -Wp,-MD,$(depfile) $(_hostcxx_flags)
+hostc_flags = -Wp,-MMD,$(depfile) $(_hostc_flags)
+hostcxx_flags = -Wp,-MMD,$(depfile) $(_hostcxx_flags)
#####
# Compile programs on the host
diff --git a/scripts/Makefile.kcsan b/scripts/Makefile.kcsan
new file mode 100644
index 000000000000..bd4da1af5953
--- /dev/null
+++ b/scripts/Makefile.kcsan
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+ifdef CONFIG_KCSAN
+
+# GCC and Clang accept backend options differently. Do not wrap in cc-option,
+# because Clang accepts "--param" even if it is unused.
+ifdef CONFIG_CC_IS_CLANG
+cc-param = -mllvm -$(1)
+else
+cc-param = --param -$(1)
+endif
+
+# Keep most options here optional, to allow enabling more compilers if absence
+# of some options does not break KCSAN nor causes false positive reports.
+CFLAGS_KCSAN := -fsanitize=thread \
+ $(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) -fno-optimize-sibling-calls) \
+ $(call cc-option,$(call cc-param,tsan-instrument-read-before-write=1)) \
+ $(call cc-param,tsan-distinguish-volatile=1)
+
+endif # CONFIG_KCSAN
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 4b799737722c..e3f047692aeb 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -4,8 +4,18 @@ asflags-y += $(EXTRA_AFLAGS)
ccflags-y += $(EXTRA_CFLAGS)
cppflags-y += $(EXTRA_CPPFLAGS)
ldflags-y += $(EXTRA_LDFLAGS)
+ifneq ($(always),)
+$(warning 'always' is deprecated. Please use 'always-y' instead)
always-y += $(always)
-hostprogs += $(hostprogs-y) $(hostprogs-m)
+endif
+ifneq ($(hostprogs-y),)
+$(warning 'hostprogs-y' is deprecated. Please use 'hostprogs' instead)
+hostprogs += $(hostprogs-y)
+endif
+ifneq ($(hostprogs-m),)
+$(warning 'hostprogs-m' is deprecated. Please use 'hostprogs' instead)
+hostprogs += $(hostprogs-m)
+endif
# flags that take effect in current and sub directories
KBUILD_AFLAGS += $(subdir-asflags-y)
@@ -22,40 +32,35 @@ obj-m := $(filter-out $(obj-y),$(obj-m))
# Filter out objects already built-in
lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m)))
-# Determine modorder.
-# Unfortunately, we don't have information about ordering between -y
-# and -m subdirs. Just put -y's first.
-modorder := $(patsubst %/,%/modules.order, $(filter %/, $(obj-y)) $(obj-m:.o=.ko))
+# Subdirectories we need to descend into
+subdir-ym := $(sort $(subdir-y) $(subdir-m) \
+ $(patsubst %/,%, $(filter %/, $(obj-y) $(obj-m))))
+
+# Handle objects in subdirs:
+# - If we encounter foo/ in $(obj-y), replace it by foo/built-in.a and
+# foo/modules.order
+# - If we encounter foo/ in $(obj-m), replace it by foo/modules.order
+#
+# Generate modules.order to determine modorder. Unfortunately, we don't have
+# information about ordering between -y and -m subdirs. Just put -y's first.
+
+ifdef need-modorder
+obj-m := $(patsubst %/,%/modules.order, $(filter %/, $(obj-y)) $(obj-m))
+else
+obj-m := $(filter-out %/, $(obj-m))
+endif
-# Handle objects in subdirs
-# ---------------------------------------------------------------------------
-# o if we encounter foo/ in $(obj-y), replace it by foo/built-in.a
-# and add the directory to the list of dirs to descend into: $(subdir-y)
-# o if we encounter foo/ in $(obj-m), remove it from $(obj-m)
-# and add the directory to the list of dirs to descend into: $(subdir-m)
-__subdir-y := $(patsubst %/,%,$(filter %/, $(obj-y)))
-subdir-y += $(__subdir-y)
-__subdir-m := $(patsubst %/,%,$(filter %/, $(obj-m)))
-subdir-m += $(__subdir-m)
ifdef need-builtin
obj-y := $(patsubst %/, %/built-in.a, $(obj-y))
else
obj-y := $(filter-out %/, $(obj-y))
endif
-obj-m := $(filter-out %/, $(obj-m))
-
-# Subdirectories we need to descend into
-subdir-ym := $(sort $(subdir-y) $(subdir-m))
# If $(foo-objs), $(foo-y), $(foo-m), or $(foo-) exists, foo.o is a composite object
multi-used-y := $(sort $(foreach m,$(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-))), $(m))))
multi-used-m := $(sort $(foreach m,$(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m)) $($(m:.o=-))), $(m))))
multi-used := $(multi-used-y) $(multi-used-m)
-# $(subdir-obj-y) is the list of objects in $(obj-y) which uses dir/ to
-# tell kbuild to descend
-subdir-obj-y := $(filter %/built-in.a, $(obj-y))
-
# Replace multi-part objects by their individual parts,
# including built-in.a from subdirectories
real-obj-y := $(foreach m, $(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m)))
@@ -78,10 +83,8 @@ endif
extra-y := $(addprefix $(obj)/,$(extra-y))
always-y := $(addprefix $(obj)/,$(always-y))
targets := $(addprefix $(obj)/,$(targets))
-modorder := $(addprefix $(obj)/,$(modorder))
obj-m := $(addprefix $(obj)/,$(obj-m))
lib-y := $(addprefix $(obj)/,$(lib-y))
-subdir-obj-y := $(addprefix $(obj)/,$(subdir-obj-y))
real-obj-y := $(addprefix $(obj)/,$(real-obj-y))
real-obj-m := $(addprefix $(obj)/,$(real-obj-m))
multi-used-m := $(addprefix $(obj)/,$(multi-used-m))
@@ -149,6 +152,16 @@ _c_flags += $(if $(patsubst n%,, \
$(CFLAGS_KCOV))
endif
+#
+# Enable KCSAN flags except some files or directories we don't want to check
+# (depends on variables KCSAN_SANITIZE_obj.o, KCSAN_SANITIZE)
+#
+ifeq ($(CONFIG_KCSAN),y)
+_c_flags += $(if $(patsubst n%,, \
+ $(KCSAN_SANITIZE_$(basetarget).o)$(KCSAN_SANITIZE)y), \
+ $(CFLAGS_KCSAN))
+endif
+
# $(srctree)/$(src) for including checkin headers from generated source files
# $(objtree)/$(obj) for including generated headers from checkin source files
ifeq ($(KBUILD_EXTMOD),)
@@ -171,22 +184,22 @@ modkern_aflags = $(if $(part-of-module), \
$(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE), \
$(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL))
-c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
+c_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
-include $(srctree)/include/linux/compiler_types.h \
$(_c_flags) $(modkern_cflags) \
$(basename_flags) $(modname_flags)
-a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
+a_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(_a_flags) $(modkern_aflags)
-cpp_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
+cpp_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(_cpp_flags)
ld_flags = $(KBUILD_LDFLAGS) $(ldflags-y) $(LDFLAGS_$(@F))
DTC_INCLUDE := $(srctree)/scripts/dtc/include-prefixes
-dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \
+dtc_cpp_flags = -Wp,-MMD,$(depfile).pre.tmp -nostdinc \
$(addprefix -I,$(DTC_INCLUDE)) \
-undef -D__DTS__
@@ -241,7 +254,7 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
# ---------------------------------------------------------------------------
quiet_cmd_gzip = GZIP $@
- cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@
+ cmd_gzip = cat $(real-prereqs) | $(_GZIP) -n -f -9 > $@
# DTC
# ---------------------------------------------------------------------------
@@ -287,13 +300,13 @@ $(obj)/%.dtb.S: $(obj)/%.dtb FORCE
quiet_cmd_dtc = DTC $@
cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \
$(HOSTCC) -E $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
- $(DTC) -O $(2) -o $@ -b 0 \
+ $(DTC) -O $(patsubst .%,%,$(suffix $@)) -o $@ -b 0 \
$(addprefix -i,$(dir $<) $(DTC_INCLUDE)) $(DTC_FLAGS) \
-d $(depfile).dtc.tmp $(dtc-tmp) ; \
cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile)
$(obj)/%.dtb: $(src)/%.dts $(DTC) FORCE
- $(call if_changed_dep,dtc,dtb)
+ $(call if_changed_dep,dtc)
DT_CHECKER ?= dt-validate
DT_BINDING_DIR := Documentation/devicetree/bindings
@@ -304,7 +317,7 @@ quiet_cmd_dtb_check = CHECK $@
cmd_dtb_check = $(DT_CHECKER) -u $(srctree)/$(DT_BINDING_DIR) -p $(DT_TMP_SCHEMA) $@
define rule_dtc
- $(call cmd_and_fixdep,dtc,yaml)
+ $(call cmd_and_fixdep,dtc)
$(call cmd,dtb_check)
endef
@@ -334,19 +347,19 @@ printf "%08x\n" $$dec_size | \
)
quiet_cmd_bzip2 = BZIP2 $@
- cmd_bzip2 = { cat $(real-prereqs) | bzip2 -9; $(size_append); } > $@
+ cmd_bzip2 = { cat $(real-prereqs) | $(_BZIP2) -9; $(size_append); } > $@
# Lzma
# ---------------------------------------------------------------------------
quiet_cmd_lzma = LZMA $@
- cmd_lzma = { cat $(real-prereqs) | lzma -9; $(size_append); } > $@
+ cmd_lzma = { cat $(real-prereqs) | $(LZMA) -9; $(size_append); } > $@
quiet_cmd_lzo = LZO $@
- cmd_lzo = { cat $(real-prereqs) | lzop -9; $(size_append); } > $@
+ cmd_lzo = { cat $(real-prereqs) | $(_LZOP) -9; $(size_append); } > $@
quiet_cmd_lz4 = LZ4 $@
- cmd_lz4 = { cat $(real-prereqs) | lz4c -l -c1 stdin stdout; \
+ cmd_lz4 = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \
$(size_append); } > $@
# U-Boot mkimage
@@ -393,7 +406,7 @@ quiet_cmd_xzkern = XZKERN $@
$(size_append); } > $@
quiet_cmd_xzmisc = XZMISC $@
- cmd_xzmisc = cat $(real-prereqs) | xz --check=crc32 --lzma2=dict=1MiB > $@
+ cmd_xzmisc = cat $(real-prereqs) | $(XZ) --check=crc32 --lzma2=dict=1MiB > $@
# ASM offsets
# ---------------------------------------------------------------------------
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 957eed6a17a5..3651cbf6ad49 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -43,34 +43,30 @@ __modpost:
include include/config/auto.conf
include scripts/Kbuild.include
-kernelsymfile := $(objtree)/Module.symvers
-modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers
-
MODPOST = scripts/mod/modpost \
$(if $(CONFIG_MODVERSIONS),-m) \
$(if $(CONFIG_MODULE_SRCVERSION_ALL),-a) \
- $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
- $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
- $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
$(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
- $(if $(CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS)$(KBUILD_NSDEPS),-N) \
- $(if $(KBUILD_MODPOST_WARN),-w)
+ $(if $(KBUILD_MODPOST_WARN),-w) \
+ -o $@
ifdef MODPOST_VMLINUX
-quiet_cmd_modpost = MODPOST vmlinux.o
- cmd_modpost = $(MODPOST) vmlinux.o
+quiet_cmd_modpost = MODPOST $@
+ cmd_modpost = $(MODPOST) $<
-__modpost:
+vmlinux.symvers: vmlinux.o
$(call cmd,modpost)
-else
+__modpost: vmlinux.symvers
-MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - \
- $(if $(KBUILD_NSDEPS),-d $(MODULES_NSDEPS))
+else
ifeq ($(KBUILD_EXTMOD),)
-MODPOST += $(wildcard vmlinux)
+
+input-symdump := vmlinux.symvers
+output-symdump := Module.symvers
+
else
# set src + obj - they may be used in the modules's Makefile
@@ -80,22 +76,57 @@ src := $(obj)
# Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
$(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
+
+# modpost option for external modules
+MODPOST += -e
+
+input-symdump := Module.symvers $(KBUILD_EXTRA_SYMBOLS)
+output-symdump := $(KBUILD_EXTMOD)/Module.symvers
+
+endif
+
+# modpost options for modules (both in-kernel and external)
+MODPOST += \
+ $(addprefix -i ,$(wildcard $(input-symdump))) \
+ $(if $(KBUILD_NSDEPS),-d $(MODULES_NSDEPS)) \
+ $(if $(CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS)$(KBUILD_NSDEPS),-N)
+
+# 'make -i -k' ignores compile errors, and builds as many modules as possible.
+ifneq ($(findstring i,$(filter-out --%,$(MAKEFLAGS))),)
+MODPOST += -n
endif
-# find all modules listed in modules.order
-modules := $(sort $(shell cat $(MODORDER)))
+# Clear VPATH to not search for *.symvers in $(srctree). Check only $(objtree).
+VPATH :=
+$(input-symdump):
+ @echo >&2 'WARNING: Symbol version dump "$@" is missing.'
+ @echo >&2 ' Modules may not have dependencies or modversions.'
-# Read out modules.order instead of expanding $(modules) to pass in modpost.
+# Read out modules.order to pass in modpost.
# Otherwise, allmodconfig would fail with "Argument list too long".
-quiet_cmd_modpost = MODPOST $(words $(modules)) modules
- cmd_modpost = sed 's/ko$$/o/' $(MODORDER) | $(MODPOST)
+quiet_cmd_modpost = MODPOST $@
+ cmd_modpost = sed 's/ko$$/o/' $< | $(MODPOST) -T -
-__modpost:
- $(call cmd,modpost)
+$(output-symdump): $(MODORDER) $(input-symdump) FORCE
+ $(call if_changed,modpost)
+
+targets += $(output-symdump)
+
+__modpost: $(output-symdump)
ifneq ($(KBUILD_MODPOST_NOFINAL),1)
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modfinal
endif
+PHONY += FORCE
+FORCE:
+
+existing-targets := $(wildcard $(sort $(targets)))
+
+-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
+
+PHONY += FORCE
+FORCE:
+
endif
.PHONY: $(PHONY)
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 02135d2671a6..b2b6153af63a 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -45,7 +45,7 @@ if test "$(objtree)" != "$(srctree)"; then \
false; \
fi ; \
$(srctree)/scripts/setlocalversion --save-scmversion; \
-tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
+tar -I $(_GZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \
--transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
rm -f $(objtree)/.scmversion
@@ -127,9 +127,9 @@ util/PERF-VERSION-GEN $(CURDIR)/$(perf-tar)/); \
tar rf $(perf-tar).tar $(perf-tar)/HEAD $(perf-tar)/PERF-VERSION-FILE; \
rm -r $(perf-tar); \
$(if $(findstring tar-src,$@),, \
-$(if $(findstring bz2,$@),bzip2, \
-$(if $(findstring gz,$@),gzip, \
-$(if $(findstring xz,$@),xz, \
+$(if $(findstring bz2,$@),$(_BZIP2), \
+$(if $(findstring gz,$@),$(_GZIP), \
+$(if $(findstring xz,$@),$(XZ), \
$(error unknown target $@)))) \
-f -9 $(perf-tar).tar)
diff --git a/scripts/Makefile.userprogs b/scripts/Makefile.userprogs
new file mode 100644
index 000000000000..fb415297337a
--- /dev/null
+++ b/scripts/Makefile.userprogs
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Build userspace programs for the target system
+#
+
+# Executables compiled from a single .c file
+user-csingle := $(foreach m, $(userprogs), $(if $($(m)-objs),,$(m)))
+
+# Executables linked based on several .o files
+user-cmulti := $(foreach m, $(userprogs), $(if $($(m)-objs),$(m)))
+
+# Objects compiled from .c files
+user-cobjs := $(sort $(foreach m, $(userprogs), $($(m)-objs)))
+
+user-csingle := $(addprefix $(obj)/, $(user-csingle))
+user-cmulti := $(addprefix $(obj)/, $(user-cmulti))
+user-cobjs := $(addprefix $(obj)/, $(user-cobjs))
+
+user_ccflags = -Wp,-MMD,$(depfile) $(KBUILD_USERCFLAGS) $(userccflags) \
+ $($(target-stem)-userccflags)
+user_ldflags = $(KBUILD_USERLDFLAGS) $(userldflags) $($(target-stem)-userldflags)
+
+# Create an executable from a single .c file
+quiet_cmd_user_cc_c = CC [U] $@
+ cmd_user_cc_c = $(CC) $(user_ccflags) $(user_ldflags) -o $@ $< \
+ $($(target-stem)-userldlibs)
+$(user-csingle): $(obj)/%: $(src)/%.c FORCE
+ $(call if_changed_dep,user_cc_c)
+
+# Link an executable based on list of .o files
+quiet_cmd_user_ld = LD [U] $@
+ cmd_user_ld = $(CC) $(user_ldflags) -o $@ \
+ $(addprefix $(obj)/, $($(target-stem)-objs)) \
+ $($(target-stem)-userldlibs)
+$(user-cmulti): FORCE
+ $(call if_changed,user_ld)
+$(call multi_depend, $(user-cmulti), , -objs)
+
+# Create .o file from a .c file
+quiet_cmd_user_cc_o_c = CC [U] $@
+ cmd_user_cc_o_c = $(CC) $(user_ccflags) -c -o $@ $<
+$(user-cobjs): $(obj)/%.o: $(src)/%.c FORCE
+ $(call if_changed_dep,user_cc_o_c)
+
+targets += $(user-csingle) $(user-cmulti) $(user-cobjs)
diff --git a/scripts/atomic/fallbacks/acquire b/scripts/atomic/fallbacks/acquire
index e38871e64db6..59c00529dc7c 100755
--- a/scripts/atomic/fallbacks/acquire
+++ b/scripts/atomic/fallbacks/acquire
@@ -1,8 +1,8 @@
cat <<EOF
-static inline ${ret}
-${atomic}_${pfx}${name}${sfx}_acquire(${params})
+static __always_inline ${ret}
+${arch}${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
- ${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+ ${ret} ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_acquire_fence();
return ret;
}
diff --git a/scripts/atomic/fallbacks/add_negative b/scripts/atomic/fallbacks/add_negative
index e6f4815637de..a66635bceefb 100755
--- a/scripts/atomic/fallbacks/add_negative
+++ b/scripts/atomic/fallbacks/add_negative
@@ -1,6 +1,6 @@
cat <<EOF
/**
- * ${atomic}_add_negative - add and test if negative
+ * ${arch}${atomic}_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type ${atomic}_t
*
@@ -8,9 +8,9 @@ cat <<EOF
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline bool
-${atomic}_add_negative(${int} i, ${atomic}_t *v)
+static __always_inline bool
+${arch}${atomic}_add_negative(${int} i, ${atomic}_t *v)
{
- return ${atomic}_add_return(i, v) < 0;
+ return ${arch}${atomic}_add_return(i, v) < 0;
}
EOF
diff --git a/scripts/atomic/fallbacks/add_unless b/scripts/atomic/fallbacks/add_unless
index 792533885fbf..2ff598a3f9ec 100755
--- a/scripts/atomic/fallbacks/add_unless
+++ b/scripts/atomic/fallbacks/add_unless
@@ -1,6 +1,6 @@
cat << EOF
/**
- * ${atomic}_add_unless - add unless the number is already a given value
+ * ${arch}${atomic}_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -8,9 +8,9 @@ cat << EOF
* Atomically adds @a to @v, if @v was not already @u.
* Returns true if the addition was done.
*/
-static inline bool
-${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+static __always_inline bool
+${arch}${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
- return ${atomic}_fetch_add_unless(v, a, u) != u;
+ return ${arch}${atomic}_fetch_add_unless(v, a, u) != u;
}
EOF
diff --git a/scripts/atomic/fallbacks/andnot b/scripts/atomic/fallbacks/andnot
index 9f3a3216b5e3..3f18663dcefb 100755
--- a/scripts/atomic/fallbacks/andnot
+++ b/scripts/atomic/fallbacks/andnot
@@ -1,7 +1,7 @@
cat <<EOF
-static inline ${ret}
-${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
+static __always_inline ${ret}
+${arch}${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
- ${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
+ ${retstmt}${arch}${atomic}_${pfx}and${sfx}${order}(~i, v);
}
EOF
diff --git a/scripts/atomic/fallbacks/dec b/scripts/atomic/fallbacks/dec
index 10bbc82be31d..e2e01f0574bb 100755
--- a/scripts/atomic/fallbacks/dec
+++ b/scripts/atomic/fallbacks/dec
@@ -1,7 +1,7 @@
cat <<EOF
-static inline ${ret}
-${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
+static __always_inline ${ret}
+${arch}${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
- ${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
+ ${retstmt}${arch}${atomic}_${pfx}sub${sfx}${order}(1, v);
}
EOF
diff --git a/scripts/atomic/fallbacks/dec_and_test b/scripts/atomic/fallbacks/dec_and_test
index 0ce7103b3df2..e8a5e492eb5f 100755
--- a/scripts/atomic/fallbacks/dec_and_test
+++ b/scripts/atomic/fallbacks/dec_and_test
@@ -1,15 +1,15 @@
cat <<EOF
/**
- * ${atomic}_dec_and_test - decrement and test
+ * ${arch}${atomic}_dec_and_test - decrement and test
* @v: pointer of type ${atomic}_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline bool
-${atomic}_dec_and_test(${atomic}_t *v)
+static __always_inline bool
+${arch}${atomic}_dec_and_test(${atomic}_t *v)
{
- return ${atomic}_dec_return(v) == 0;
+ return ${arch}${atomic}_dec_return(v) == 0;
}
EOF
diff --git a/scripts/atomic/fallbacks/dec_if_positive b/scripts/atomic/fallbacks/dec_if_positive
index c52eacec43c8..527adec89c37 100755
--- a/scripts/atomic/fallbacks/dec_if_positive
+++ b/scripts/atomic/fallbacks/dec_if_positive
@@ -1,14 +1,14 @@
cat <<EOF
-static inline ${ret}
-${atomic}_dec_if_positive(${atomic}_t *v)
+static __always_inline ${ret}
+${arch}${atomic}_dec_if_positive(${atomic}_t *v)
{
- ${int} dec, c = ${atomic}_read(v);
+ ${int} dec, c = ${arch}${atomic}_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
break;
- } while (!${atomic}_try_cmpxchg(v, &c, dec));
+ } while (!${arch}${atomic}_try_cmpxchg(v, &c, dec));
return dec;
}
diff --git a/scripts/atomic/fallbacks/dec_unless_positive b/scripts/atomic/fallbacks/dec_unless_positive
index 8a2578f14268..dcab6848ca1e 100755
--- a/scripts/atomic/fallbacks/dec_unless_positive
+++ b/scripts/atomic/fallbacks/dec_unless_positive
@@ -1,13 +1,13 @@
cat <<EOF
-static inline bool
-${atomic}_dec_unless_positive(${atomic}_t *v)
+static __always_inline bool
+${arch}${atomic}_dec_unless_positive(${atomic}_t *v)
{
- ${int} c = ${atomic}_read(v);
+ ${int} c = ${arch}${atomic}_read(v);
do {
if (unlikely(c > 0))
return false;
- } while (!${atomic}_try_cmpxchg(v, &c, c - 1));
+ } while (!${arch}${atomic}_try_cmpxchg(v, &c, c - 1));
return true;
}
diff --git a/scripts/atomic/fallbacks/fence b/scripts/atomic/fallbacks/fence
index 82f68fa6931a..3764fc8ce945 100755
--- a/scripts/atomic/fallbacks/fence
+++ b/scripts/atomic/fallbacks/fence
@@ -1,10 +1,10 @@
cat <<EOF
-static inline ${ret}
-${atomic}_${pfx}${name}${sfx}(${params})
+static __always_inline ${ret}
+${arch}${atomic}_${pfx}${name}${sfx}(${params})
{
${ret} ret;
__atomic_pre_full_fence();
- ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+ ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
__atomic_post_full_fence();
return ret;
}
diff --git a/scripts/atomic/fallbacks/fetch_add_unless b/scripts/atomic/fallbacks/fetch_add_unless
index d2c091db7eae..0e0b9aef1515 100755
--- a/scripts/atomic/fallbacks/fetch_add_unless
+++ b/scripts/atomic/fallbacks/fetch_add_unless
@@ -1,6 +1,6 @@
cat << EOF
/**
- * ${atomic}_fetch_add_unless - add unless the number is already a given value
+ * ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value
* @v: pointer of type ${atomic}_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -8,15 +8,15 @@ cat << EOF
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns original value of @v
*/
-static inline ${int}
-${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+static __always_inline ${int}
+${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
- ${int} c = ${atomic}_read(v);
+ ${int} c = ${arch}${atomic}_read(v);
do {
if (unlikely(c == u))
break;
- } while (!${atomic}_try_cmpxchg(v, &c, c + a));
+ } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a));
return c;
}
diff --git a/scripts/atomic/fallbacks/inc b/scripts/atomic/fallbacks/inc
index f866b3ad2353..15ec62946e8c 100755
--- a/scripts/atomic/fallbacks/inc
+++ b/scripts/atomic/fallbacks/inc
@@ -1,7 +1,7 @@
cat <<EOF
-static inline ${ret}
-${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
+static __always_inline ${ret}
+${arch}${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
{
- ${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
+ ${retstmt}${arch}${atomic}_${pfx}add${sfx}${order}(1, v);
}
EOF
diff --git a/scripts/atomic/fallbacks/inc_and_test b/scripts/atomic/fallbacks/inc_and_test
index 4e2068869f7e..cecc8322a21f 100755
--- a/scripts/atomic/fallbacks/inc_and_test
+++ b/scripts/atomic/fallbacks/inc_and_test
@@ -1,15 +1,15 @@
cat <<EOF
/**
- * ${atomic}_inc_and_test - increment and test
+ * ${arch}${atomic}_inc_and_test - increment and test
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline bool
-${atomic}_inc_and_test(${atomic}_t *v)
+static __always_inline bool
+${arch}${atomic}_inc_and_test(${atomic}_t *v)
{
- return ${atomic}_inc_return(v) == 0;
+ return ${arch}${atomic}_inc_return(v) == 0;
}
EOF
diff --git a/scripts/atomic/fallbacks/inc_not_zero b/scripts/atomic/fallbacks/inc_not_zero
index a7c45c8d107c..50f2d4d48279 100755
--- a/scripts/atomic/fallbacks/inc_not_zero
+++ b/scripts/atomic/fallbacks/inc_not_zero
@@ -1,14 +1,14 @@
cat <<EOF
/**
- * ${atomic}_inc_not_zero - increment unless the number is zero
+ * ${arch}${atomic}_inc_not_zero - increment unless the number is zero
* @v: pointer of type ${atomic}_t
*
* Atomically increments @v by 1, if @v is non-zero.
* Returns true if the increment was done.
*/
-static inline bool
-${atomic}_inc_not_zero(${atomic}_t *v)
+static __always_inline bool
+${arch}${atomic}_inc_not_zero(${atomic}_t *v)
{
- return ${atomic}_add_unless(v, 1, 0);
+ return ${arch}${atomic}_add_unless(v, 1, 0);
}
EOF
diff --git a/scripts/atomic/fallbacks/inc_unless_negative b/scripts/atomic/fallbacks/inc_unless_negative
index 0c266e71dbd4..87629e0d4a80 100755
--- a/scripts/atomic/fallbacks/inc_unless_negative
+++ b/scripts/atomic/fallbacks/inc_unless_negative
@@ -1,13 +1,13 @@
cat <<EOF
-static inline bool
-${atomic}_inc_unless_negative(${atomic}_t *v)
+static __always_inline bool
+${arch}${atomic}_inc_unless_negative(${atomic}_t *v)
{
- ${int} c = ${atomic}_read(v);
+ ${int} c = ${arch}${atomic}_read(v);
do {
if (unlikely(c < 0))
return false;
- } while (!${atomic}_try_cmpxchg(v, &c, c + 1));
+ } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + 1));
return true;
}
diff --git a/scripts/atomic/fallbacks/read_acquire b/scripts/atomic/fallbacks/read_acquire
index 75863b5203f7..341a88dccaa7 100755
--- a/scripts/atomic/fallbacks/read_acquire
+++ b/scripts/atomic/fallbacks/read_acquire
@@ -1,6 +1,6 @@
cat <<EOF
-static inline ${ret}
-${atomic}_read_acquire(const ${atomic}_t *v)
+static __always_inline ${ret}
+${arch}${atomic}_read_acquire(const ${atomic}_t *v)
{
return smp_load_acquire(&(v)->counter);
}
diff --git a/scripts/atomic/fallbacks/release b/scripts/atomic/fallbacks/release
index 3f628a3802d9..f8906d537c0f 100755
--- a/scripts/atomic/fallbacks/release
+++ b/scripts/atomic/fallbacks/release
@@ -1,8 +1,8 @@
cat <<EOF
-static inline ${ret}
-${atomic}_${pfx}${name}${sfx}_release(${params})
+static __always_inline ${ret}
+${arch}${atomic}_${pfx}${name}${sfx}_release(${params})
{
__atomic_release_fence();
- ${retstmt}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+ ${retstmt}${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
}
EOF
diff --git a/scripts/atomic/fallbacks/set_release b/scripts/atomic/fallbacks/set_release
index 45bb5e0cfc08..76068272d5f5 100755
--- a/scripts/atomic/fallbacks/set_release
+++ b/scripts/atomic/fallbacks/set_release
@@ -1,6 +1,6 @@
cat <<EOF
-static inline void
-${atomic}_set_release(${atomic}_t *v, ${int} i)
+static __always_inline void
+${arch}${atomic}_set_release(${atomic}_t *v, ${int} i)
{
smp_store_release(&(v)->counter, i);
}
diff --git a/scripts/atomic/fallbacks/sub_and_test b/scripts/atomic/fallbacks/sub_and_test
index 289ef17a2d7a..c580f4c2136e 100755
--- a/scripts/atomic/fallbacks/sub_and_test
+++ b/scripts/atomic/fallbacks/sub_and_test
@@ -1,6 +1,6 @@
cat <<EOF
/**
- * ${atomic}_sub_and_test - subtract value from variable and test result
+ * ${arch}${atomic}_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type ${atomic}_t
*
@@ -8,9 +8,9 @@ cat <<EOF
* true if the result is zero, or false for all
* other cases.
*/
-static inline bool
-${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
+static __always_inline bool
+${arch}${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
{
- return ${atomic}_sub_return(i, v) == 0;
+ return ${arch}${atomic}_sub_return(i, v) == 0;
}
EOF
diff --git a/scripts/atomic/fallbacks/try_cmpxchg b/scripts/atomic/fallbacks/try_cmpxchg
index 4ed85e2f5378..06db0f738e45 100755
--- a/scripts/atomic/fallbacks/try_cmpxchg
+++ b/scripts/atomic/fallbacks/try_cmpxchg
@@ -1,9 +1,9 @@
cat <<EOF
-static inline bool
-${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
+static __always_inline bool
+${arch}${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
{
${int} r, o = *old;
- r = ${atomic}_cmpxchg${order}(v, o, new);
+ r = ${arch}${atomic}_cmpxchg${order}(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh
index 1bd7c1707633..0fd1cf0c2b94 100755
--- a/scripts/atomic/gen-atomic-fallback.sh
+++ b/scripts/atomic/gen-atomic-fallback.sh
@@ -2,10 +2,11 @@
# SPDX-License-Identifier: GPL-2.0
ATOMICDIR=$(dirname $0)
+ARCH=$2
. ${ATOMICDIR}/atomic-tbl.sh
-#gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
+#gen_template_fallback(template, meta, pfx, name, sfx, order, arch, atomic, int, args...)
gen_template_fallback()
{
local template="$1"; shift
@@ -14,10 +15,11 @@ gen_template_fallback()
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
+ local arch="$1"; shift
local atomic="$1"; shift
local int="$1"; shift
- local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
+ local atomicname="${arch}${atomic}_${pfx}${name}${sfx}${order}"
local ret="$(gen_ret_type "${meta}" "${int}")"
local retstmt="$(gen_ret_stmt "${meta}")"
@@ -32,7 +34,7 @@ gen_template_fallback()
fi
}
-#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
+#gen_proto_fallback(meta, pfx, name, sfx, order, arch, atomic, int, args...)
gen_proto_fallback()
{
local meta="$1"; shift
@@ -56,16 +58,17 @@ cat << EOF
EOF
}
-#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
+#gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...)
gen_proto_order_variants()
{
local meta="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
- local atomic="$1"
+ local arch="$1"
+ local atomic="$2"
- local basename="${atomic}_${pfx}${name}${sfx}"
+ local basename="${arch}${atomic}_${pfx}${name}${sfx}"
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
@@ -94,7 +97,7 @@ gen_proto_order_variants()
gen_basic_fallbacks "${basename}"
if [ ! -z "${template}" ]; then
- printf "#endif /* ${atomic}_${pfx}${name}${sfx} */\n\n"
+ printf "#endif /* ${arch}${atomic}_${pfx}${name}${sfx} */\n\n"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
@@ -149,20 +152,19 @@ cat << EOF
#ifndef _LINUX_ATOMIC_FALLBACK_H
#define _LINUX_ATOMIC_FALLBACK_H
+#include <linux/compiler.h>
+
EOF
-for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
+for xchg in "${ARCH}xchg" "${ARCH}cmpxchg" "${ARCH}cmpxchg64"; do
gen_xchg_fallbacks "${xchg}"
done
grep '^[a-z]' "$1" | while read name meta args; do
- gen_proto "${meta}" "${name}" "atomic" "int" ${args}
+ gen_proto "${meta}" "${name}" "${ARCH}" "atomic" "int" ${args}
done
cat <<EOF
-#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
@@ -170,12 +172,9 @@ cat <<EOF
EOF
grep '^[a-z]' "$1" | while read name meta args; do
- gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
+ gen_proto "${meta}" "${name}" "${ARCH}" "atomic64" "s64" ${args}
done
cat <<EOF
-#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-
#endif /* _LINUX_ATOMIC_FALLBACK_H */
EOF
diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh
index e09812372b17..6afadf73da17 100755
--- a/scripts/atomic/gen-atomic-instrumented.sh
+++ b/scripts/atomic/gen-atomic-instrumented.sh
@@ -20,7 +20,7 @@ gen_param_check()
# We don't write to constant parameters
[ ${type#c} != ${type} ] && rw="read"
- printf "\tkasan_check_${rw}(${name}, sizeof(*${name}));\n"
+ printf "\tinstrument_atomic_${rw}(${name}, sizeof(*${name}));\n"
}
#gen_param_check(arg...)
@@ -84,7 +84,7 @@ gen_proto_order_variant()
[ ! -z "${guard}" ] && printf "#if ${guard}\n"
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
${atomicname}(${params})
{
${checks}
@@ -107,7 +107,7 @@ cat <<EOF
#define ${xchg}(ptr, ...) \\
({ \\
typeof(ptr) __ai_ptr = (ptr); \\
- kasan_check_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
+ instrument_atomic_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
arch_${xchg}(__ai_ptr, __VA_ARGS__); \\
})
EOF
@@ -147,7 +147,8 @@ cat << EOF
#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
#include <linux/build_bug.h>
-#include <linux/kasan-checks.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
EOF
diff --git a/scripts/atomic/gen-atomic-long.sh b/scripts/atomic/gen-atomic-long.sh
index c240a7231b2e..e318d3f92e53 100755
--- a/scripts/atomic/gen-atomic-long.sh
+++ b/scripts/atomic/gen-atomic-long.sh
@@ -46,7 +46,7 @@ gen_proto_order_variant()
local retstmt="$(gen_ret_stmt "${meta}")"
cat <<EOF
-static inline ${ret}
+static __always_inline ${ret}
atomic_long_${name}(${params})
{
${retstmt}${atomic}_${name}(${argscast});
@@ -64,6 +64,7 @@ cat << EOF
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
#define _ASM_GENERIC_ATOMIC_LONG_H
+#include <linux/compiler.h>
#include <asm/types.h>
#ifdef CONFIG_64BIT
diff --git a/scripts/atomic/gen-atomics.sh b/scripts/atomic/gen-atomics.sh
index 000dc6437893..d29e159ef489 100644
--- a/scripts/atomic/gen-atomics.sh
+++ b/scripts/atomic/gen-atomics.sh
@@ -10,10 +10,11 @@ LINUXDIR=${ATOMICDIR}/../..
cat <<EOF |
gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h
gen-atomic-long.sh asm-generic/atomic-long.h
+gen-atomic-fallback.sh linux/atomic-arch-fallback.h arch_
gen-atomic-fallback.sh linux/atomic-fallback.h
EOF
-while read script header; do
- /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header}
+while read script header args; do
+ /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}
HASH="$(sha1sum ${LINUXDIR}/include/${header})"
HASH="${HASH%% *}"
printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header}
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index 877ca2c88246..d98540552941 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -160,7 +160,7 @@ struct item {
struct item *next;
unsigned int len;
unsigned int hash;
- char name[0];
+ char name[];
};
#define HASHSZ 256
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index bf9e0e87a6ef..4c820607540b 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -246,6 +246,8 @@ list_types(0) if ($list_types);
$fix = 1 if ($fix_inplace);
$check_orig = $check;
+die "$P: --git cannot be used with --file or --fix\n" if ($git && ($file || $fix));
+
my $exit = 0;
my $perl_version_ok = 1;
@@ -269,11 +271,11 @@ if ($color =~ /^[01]$/) {
} elsif ($color =~ /^auto$/i) {
$color = (-t STDOUT);
} else {
- die "Invalid color mode: $color\n";
+ die "$P: Invalid color mode: $color\n";
}
# skip TAB size 1 to avoid additional checks on $tabsize - 1
-die "Invalid TAB size: $tabsize\n" if ($tabsize < 2);
+die "$P: Invalid TAB size: $tabsize\n" if ($tabsize < 2);
sub hash_save_array_words {
my ($hashRef, $arrayRef) = @_;
@@ -1060,6 +1062,7 @@ for my $filename (@ARGV) {
while (<$FILE>) {
chomp;
push(@rawlines, $_);
+ $vname = qq("$1") if ($filename eq '-' && $_ =~ m/^Subject:\s+(.+)/i);
}
close($FILE);
@@ -1676,8 +1679,16 @@ sub ctx_statement_level {
sub ctx_locate_comment {
my ($first_line, $end_line) = @_;
+ # If c99 comment on the current line, or the line before or after
+ my ($current_comment) = ($rawlines[$end_line - 1] =~ m@^\+.*(//.*$)@);
+ return $current_comment if (defined $current_comment);
+ ($current_comment) = ($rawlines[$end_line - 2] =~ m@^[\+ ].*(//.*$)@);
+ return $current_comment if (defined $current_comment);
+ ($current_comment) = ($rawlines[$end_line] =~ m@^[\+ ].*(//.*$)@);
+ return $current_comment if (defined $current_comment);
+
# Catch a comment on the end of the line itself.
- my ($current_comment) = ($rawlines[$end_line - 1] =~ m@.*(/\*.*\*/)\s*(?:\\\s*)?$@);
+ ($current_comment) = ($rawlines[$end_line - 1] =~ m@.*(/\*.*\*/)\s*(?:\\\s*)?$@);
return $current_comment if (defined $current_comment);
# Look through the context and try and figure out if there is a
@@ -2396,7 +2407,7 @@ sub process {
if ($rawline=~/^\+\+\+\s+(\S+)/) {
$setup_docs = 0;
- if ($1 =~ m@Documentation/admin-guide/kernel-parameters.rst$@) {
+ if ($1 =~ m@Documentation/admin-guide/kernel-parameters.txt$@) {
$setup_docs = 1;
}
#next;
@@ -2596,7 +2607,7 @@ sub process {
if (($last_binding_patch != -1) &&
($last_binding_patch ^ $is_binding_patch)) {
WARN("DT_SPLIT_BINDING_PATCH",
- "DT binding docs and includes should be a separate patch. See: Documentation/devicetree/bindings/submitting-patches.txt\n");
+ "DT binding docs and includes should be a separate patch. See: Documentation/devicetree/bindings/submitting-patches.rst\n");
}
}
@@ -3062,14 +3073,43 @@ sub process {
#print "is_start<$is_start> is_end<$is_end> length<$length>\n";
}
-# check for MAINTAINERS entries that don't have the right form
- if ($realfile =~ /^MAINTAINERS$/ &&
- $rawline =~ /^\+[A-Z]:/ &&
- $rawline !~ /^\+[A-Z]:\t\S/) {
- if (WARN("MAINTAINERS_STYLE",
- "MAINTAINERS entries use one tab after TYPE:\n" . $herecurr) &&
- $fix) {
- $fixed[$fixlinenr] =~ s/^(\+[A-Z]):\s*/$1:\t/;
+# check MAINTAINERS entries
+ if ($realfile =~ /^MAINTAINERS$/) {
+# check MAINTAINERS entries for the right form
+ if ($rawline =~ /^\+[A-Z]:/ &&
+ $rawline !~ /^\+[A-Z]:\t\S/) {
+ if (WARN("MAINTAINERS_STYLE",
+ "MAINTAINERS entries use one tab after TYPE:\n" . $herecurr) &&
+ $fix) {
+ $fixed[$fixlinenr] =~ s/^(\+[A-Z]):\s*/$1:\t/;
+ }
+ }
+# check MAINTAINERS entries for the right ordering too
+ my $preferred_order = 'MRLSWQBCPTFXNK';
+ if ($rawline =~ /^\+[A-Z]:/ &&
+ $prevrawline =~ /^[\+ ][A-Z]:/) {
+ $rawline =~ /^\+([A-Z]):\s*(.*)/;
+ my $cur = $1;
+ my $curval = $2;
+ $prevrawline =~ /^[\+ ]([A-Z]):\s*(.*)/;
+ my $prev = $1;
+ my $prevval = $2;
+ my $curindex = index($preferred_order, $cur);
+ my $previndex = index($preferred_order, $prev);
+ if ($curindex < 0) {
+ WARN("MAINTAINERS_STYLE",
+ "Unknown MAINTAINERS entry type: '$cur'\n" . $herecurr);
+ } else {
+ if ($previndex >= 0 && $curindex < $previndex) {
+ WARN("MAINTAINERS_STYLE",
+ "Misordered MAINTAINERS entry - list '$cur:' before '$prev:'\n" . $hereprev);
+ } elsif ((($prev eq 'F' && $cur eq 'F') ||
+ ($prev eq 'X' && $cur eq 'X')) &&
+ ($prevval cmp $curval) > 0) {
+ WARN("MAINTAINERS_STYLE",
+ "Misordered MAINTAINERS entry - list file patterns in alphabetic order\n" . $hereprev);
+ }
+ }
}
}
@@ -5905,6 +5945,14 @@ sub process {
}
}
+# check for data_race without a comment.
+ if ($line =~ /\bdata_race\s*\(/) {
+ if (!ctx_has_comment($first_line, $linenr)) {
+ WARN("DATA_RACE",
+ "data_race without comment\n" . $herecurr);
+ }
+ }
+
# check for smp_read_barrier_depends and read_barrier_depends
if (!$file && $line =~ /\b(smp_|)read_barrier_depends\s*\(/) {
WARN("READ_BARRIER_DEPENDS",
@@ -6348,7 +6396,7 @@ sub process {
if (!grep(/$name/, @setup_docs)) {
CHK("UNDOCUMENTED_SETUP",
- "__setup appears un-documented -- check Documentation/admin-guide/kernel-parameters.rst\n" . $herecurr);
+ "__setup appears un-documented -- check Documentation/admin-guide/kernel-parameters.txt\n" . $herecurr);
}
}
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index 371bd17a4983..d2c38584ece6 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -34,8 +34,10 @@ use strict;
# $& (whole re) matches the complete objdump line with the stack growth
# $1 (first bracket) matches the dynamic amount of the stack growth
#
+# $sub: subroutine for special handling to check stack usage.
+#
# use anything else and feel the pain ;)
-my (@stack, $re, $dre, $x, $xs, $funcre);
+my (@stack, $re, $dre, $sub, $x, $xs, $funcre, $min_stack);
{
my $arch = shift;
if ($arch eq "") {
@@ -43,6 +45,11 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
chomp($arch);
}
+ $min_stack = shift;
+ if ($min_stack eq "" || $min_stack !~ /^\d+$/) {
+ $min_stack = 100;
+ }
+
$x = "[0-9a-f]"; # hex character
$xs = "[0-9a-f ]"; # hex character or space
$funcre = qr/^$x* <(.*)>:$/;
@@ -53,7 +60,8 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
$dre = qr/^.*sub.*sp, sp, #(0x$x{1,8})/o;
} elsif ($arch eq 'arm') {
#c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64
- $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
+ $re = qr/.*sub.*sp, sp, #([0-9]{1,4})/o;
+ $sub = \&arm_push_handling;
} elsif ($arch =~ /^x86(_64)?$/ || $arch =~ /^i[3456]86$/) {
#c0105234: 81 ec ac 05 00 00 sub $0x5ac,%esp
# or
@@ -107,13 +115,50 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
}
#
+# To count stack usage of push {*, fp, ip, lr, pc} instruction in ARM,
+# if FRAME POINTER is enabled.
+# e.g. c01f0d48: e92ddff0 push {r4, r5, r6, r7, r8, r9, sl, fp, ip, lr, pc}
+#
+sub arm_push_handling {
+ my $regex = qr/.*push.*fp, ip, lr, pc}/o;
+ my $size = 0;
+ my $line_arg = shift;
+
+ if ($line_arg =~ m/$regex/) {
+ $size = $line_arg =~ tr/,//;
+ $size = ($size + 1) * 4;
+ }
+
+ return $size;
+}
+
+#
# main()
#
-my ($func, $file, $lastslash);
+my ($func, $file, $lastslash, $total_size, $addr, $intro);
+
+$total_size = 0;
while (my $line = <STDIN>) {
if ($line =~ m/$funcre/) {
$func = $1;
+ next if $line !~ m/^($xs*)/;
+ if ($total_size > $min_stack) {
+ push @stack, "$intro$total_size\n";
+ }
+
+ $addr = $1;
+ $addr =~ s/ /0/g;
+ $addr = "0x$addr";
+
+ $intro = "$addr $func [$file]:";
+ my $padlen = 56 - length($intro);
+ while ($padlen > 0) {
+ $intro .= ' ';
+ $padlen -= 8;
+ }
+
+ $total_size = 0;
}
elsif ($line =~ m/(.*):\s*file format/) {
$file = $1;
@@ -134,37 +179,23 @@ while (my $line = <STDIN>) {
}
next if ($size > 0x10000000);
- next if $line !~ m/^($xs*)/;
- my $addr = $1;
- $addr =~ s/ /0/g;
- $addr = "0x$addr";
-
- my $intro = "$addr $func [$file]:";
- my $padlen = 56 - length($intro);
- while ($padlen > 0) {
- $intro .= ' ';
- $padlen -= 8;
- }
- next if ($size < 100);
- push @stack, "$intro$size\n";
+ $total_size += $size;
}
elsif (defined $dre && $line =~ m/$dre/) {
- my $size = "Dynamic ($1)";
+ my $size = $1;
- next if $line !~ m/^($xs*)/;
- my $addr = $1;
- $addr =~ s/ /0/g;
- $addr = "0x$addr";
+ $size = hex($size) if ($size =~ /^0x/);
+ $total_size += $size;
+ }
+ elsif (defined $sub) {
+ my $size = &$sub($line);
- my $intro = "$addr $func [$file]:";
- my $padlen = 56 - length($intro);
- while ($padlen > 0) {
- $intro .= ' ';
- $padlen -= 8;
- }
- push @stack, "$intro$size\n";
+ $total_size += $size;
}
}
+if ($total_size > $min_stack) {
+ push @stack, "$intro$total_size\n";
+}
# Sort output by size (last field)
print sort { ($b =~ /:\t*(\d+)$/)[0] <=> ($a =~ /:\t*(\d+)$/)[0] } @stack;
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
index 013ba3a57669..ce0b99fb5847 100644
--- a/scripts/gcc-plugins/Kconfig
+++ b/scripts/gcc-plugins/Kconfig
@@ -8,7 +8,7 @@ config HAVE_GCC_PLUGINS
menuconfig GCC_PLUGINS
bool "GCC plugins"
depends on HAVE_GCC_PLUGINS
- depends on CC_IS_GCC && GCC_VERSION >= 40800
+ depends on CC_IS_GCC
depends on $(success,$(srctree)/scripts/gcc-plugin.sh $(CC))
default y
help
diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
index 80f354289eeb..4014ba7e2fbd 100644
--- a/scripts/gcc-plugins/Makefile
+++ b/scripts/gcc-plugins/Makefile
@@ -14,7 +14,7 @@ $(objtree)/$(obj)/randomize_layout_seed.h: FORCE
$(call if_changed,create_randomize_layout_seed)
targets = randomize_layout_seed.h randomize_layout_hash.h
-hostcxxlibs-y := $(foreach p,$(GCC_PLUGIN),$(if $(findstring /,$(p)),,$(p)))
+hostcxxlibs-y := $(GCC_PLUGIN)
always-y := $(hostcxxlibs-y)
$(foreach p,$(hostcxxlibs-y:%.so=%),$(eval $(p)-objs := $(p).o))
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 6cbcd1a3e113..484d2fbf5921 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -19,6 +19,7 @@ my $V = '0.26';
use Getopt::Long qw(:config no_auto_abbrev);
use Cwd;
use File::Find;
+use File::Spec::Functions;
my $cur_path = fastgetcwd() . '/';
my $lk_path = "./";
@@ -57,7 +58,7 @@ my $status = 0;
my $letters = "";
my $keywords = 1;
my $sections = 0;
-my $file_emails = 0;
+my $email_file_emails = 0;
my $from_filename = 0;
my $pattern_depth = 0;
my $self_test = undef;
@@ -69,6 +70,12 @@ my $vcs_used = 0;
my $exit = 0;
+my @files = ();
+my @fixes = (); # If a patch description includes Fixes: lines
+my @range = ();
+my @keyword_tvi = ();
+my @file_emails = ();
+
my %commit_author_hash;
my %commit_signer_hash;
@@ -266,7 +273,7 @@ if (!GetOptions(
'pattern-depth=i' => \$pattern_depth,
'k|keywords!' => \$keywords,
'sections!' => \$sections,
- 'fe|file-emails!' => \$file_emails,
+ 'fe|file-emails!' => \$email_file_emails,
'f|file' => \$from_filename,
'find-maintainer-files' => \$find_maintainer_files,
'mpath|maintainer-path=s' => \$maintainer_path,
@@ -424,6 +431,22 @@ sub read_all_maintainer_files {
}
}
+sub maintainers_in_file {
+ my ($file) = @_;
+
+ return if ($file =~ m@\bMAINTAINERS$@);
+
+ if (-f $file && ($email_file_emails || $file =~ /\.yaml$/)) {
+ open(my $f, '<', $file)
+ or die "$P: Can't open $file: $!\n";
+ my $text = do { local($/) ; <$f> };
+ close($f);
+
+ my @poss_addr = $text =~ m$[A-Za-zÀ-ÿ\"\' \,\.\+-]*\s*[\,]*\s*[\(\<\{]{0,1}[A-Za-z0-9_\.\+-]+\@[A-Za-z0-9\.-]+\.[A-Za-z0-9]+[\)\>\}]{0,1}$g;
+ push(@file_emails, clean_file_emails(@poss_addr));
+ }
+}
+
#
# Read mail address map
#
@@ -504,18 +527,13 @@ sub read_mailmap {
## use the filenames on the command line or find the filenames in the patchfiles
-my @files = ();
-my @fixes = (); # If a patch description includes Fixes: lines
-my @range = ();
-my @keyword_tvi = ();
-my @file_emails = ();
-
if (!@ARGV) {
push(@ARGV, "&STDIN");
}
foreach my $file (@ARGV) {
if ($file ne "&STDIN") {
+ $file = canonpath($file);
##if $file is a directory and it lacks a trailing slash, add one
if ((-d $file)) {
$file =~ s@([^/])$@$1/@;
@@ -527,7 +545,7 @@ foreach my $file (@ARGV) {
$file =~ s/^\Q${cur_path}\E//; #strip any absolute path
$file =~ s/^\Q${lk_path}\E//; #or the path to the lk tree
push(@files, $file);
- if ($file ne "MAINTAINERS" && -f $file && ($keywords || $file_emails)) {
+ if ($file ne "MAINTAINERS" && -f $file && $keywords) {
open(my $f, '<', $file)
or die "$P: Can't open $file: $!\n";
my $text = do { local($/) ; <$f> };
@@ -539,10 +557,6 @@ foreach my $file (@ARGV) {
}
}
}
- if ($file_emails) {
- my @poss_addr = $text =~ m$[A-Za-zÀ-ÿ\"\' \,\.\+-]*\s*[\,]*\s*[\(\<\{]{0,1}[A-Za-z0-9_\.\+-]+\@[A-Za-z0-9\.-]+\.[A-Za-z0-9]+[\)\>\}]{0,1}$g;
- push(@file_emails, clean_file_emails(@poss_addr));
- }
}
} else {
my $file_cnt = @files;
@@ -923,6 +937,8 @@ sub get_maintainers {
print("\n");
}
}
+
+ maintainers_in_file($file);
}
if ($keywords) {
@@ -1835,7 +1851,7 @@ tm toggle maintainers
tg toggle git entries
tl toggle open list entries
ts toggle subscriber list entries
-f emails in file [$file_emails]
+f emails in file [$email_file_emails]
k keywords in file [$keywords]
r remove duplicates [$email_remove_duplicates]
p# pattern match depth [$pattern_depth]
@@ -1960,7 +1976,7 @@ EOT
bool_invert(\$email_git_all_signature_types);
$rerun = 1;
} elsif ($sel eq "f") {
- bool_invert(\$file_emails);
+ bool_invert(\$email_file_emails);
$rerun = 1;
} elsif ($sel eq "r") {
bool_invert(\$email_remove_duplicates);
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index a07668a5c36b..955cf3aedf21 100755
--- a/scripts/headers_install.sh
+++ b/scripts/headers_install.sh
@@ -64,7 +64,7 @@ configs=$(sed -e '
d
' $OUTFILE)
-# The entries in the following list are not warned.
+# The entries in the following list do not result in an error.
# Please do not add a new entry. This list is only for existing ones.
# The list will be reduced gradually, and deleted eventually. (hopefully)
#
@@ -81,9 +81,6 @@ arch/ia64/include/uapi/asm/cmpxchg.h:CONFIG_IA64_DEBUG_CMPXCHG
arch/m68k/include/uapi/asm/ptrace.h:CONFIG_COLDFIRE
arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_NO
arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_SUPPORT
-arch/sh/include/uapi/asm/ptrace.h:CONFIG_CPU_SH5
-arch/sh/include/uapi/asm/sigcontext.h:CONFIG_CPU_SH5
-arch/sh/include/uapi/asm/stat.h:CONFIG_CPU_SH5
arch/x86/include/uapi/asm/auxvec.h:CONFIG_IA32_EMULATION
arch/x86/include/uapi/asm/auxvec.h:CONFIG_X86_64
arch/x86/include/uapi/asm/mman.h:CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -98,18 +95,19 @@ include/uapi/linux/raw.h:CONFIG_MAX_RAW_DEVS
for c in $configs
do
- warn=1
+ leak_error=1
for ignore in $config_leak_ignores
do
if echo "$INFILE:$c" | grep -q "$ignore$"; then
- warn=
+ leak_error=
break
fi
done
- if [ "$warn" = 1 ]; then
- echo "warning: $INFILE: leak $c to user-space" >&2
+ if [ "$leak_error" = 1 ]; then
+ echo "error: $INFILE: leak $c to user-space" >&2
+ exit 1
fi
done
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index c9d0a4a8efb3..426881ea954f 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -96,11 +96,13 @@ configfiles=$(wildcard $(srctree)/kernel/configs/$@ $(srctree)/arch/$(SRCARCH)/c
PHONY += kvmconfig
kvmconfig: kvm_guest.config
- @:
+ @echo >&2 "WARNING: 'make $@' will be removed after Linux 5.10"
+ @echo >&2 " Please use 'make $<' instead."
PHONY += xenconfig
xenconfig: xen.config
- @:
+ @echo >&2 "WARNING: 'make $@' will be removed after Linux 5.10"
+ @echo >&2 " Please use 'make $<' instead."
PHONY += tinyconfig
tinyconfig:
@@ -123,7 +125,9 @@ help:
@echo ' gconfig - Update current config utilising a GTK+ based front-end'
@echo ' oldconfig - Update current config utilising a provided .config as base'
@echo ' localmodconfig - Update current config disabling modules not loaded'
+ @echo ' except those preserved by LMC_KEEP environment variable'
@echo ' localyesconfig - Update current config converting local mods to core'
+ @echo ' except those preserved by LMC_KEEP environment variable'
@echo ' defconfig - New config with default from ARCH supplied defconfig'
@echo ' savedefconfig - Save current config as ./defconfig (minimal config)'
@echo ' allnoconfig - New config where all options are answered with no'
@@ -137,9 +141,6 @@ help:
@echo ' helpnewconfig - List new options and help text'
@echo ' olddefconfig - Same as oldconfig but sets new symbols to their'
@echo ' default value without prompting'
- @echo ' kvmconfig - Enable additional options for kvm guest kernel support'
- @echo ' xenconfig - Enable additional options for xen dom0 and guest kernel'
- @echo ' support'
@echo ' tinyconfig - Configure the tiniest possible kernel'
@echo ' testconfig - Run Kconfig unit tests (requires python3 and pytest)'
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index e436ba44c9c5..a5fbd6ccc006 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -65,7 +65,8 @@ void menu_add_entry(struct symbol *sym)
struct menu *menu_add_menu(void)
{
last_entry_ptr = &current_entry->list;
- return current_menu = current_entry;
+ current_menu = current_entry;
+ return current_menu;
}
void menu_end_menu(void)
diff --git a/scripts/kconfig/parser.y b/scripts/kconfig/parser.y
index 708b6c4b13ca..190f1117f35a 100644
--- a/scripts/kconfig/parser.y
+++ b/scripts/kconfig/parser.y
@@ -119,20 +119,24 @@ mainmenu_stmt: T_MAINMENU T_WORD_QUOTE T_EOL
stmt_list:
/* empty */
- | stmt_list common_stmt
+ | stmt_list assignment_stmt
| stmt_list choice_stmt
+ | stmt_list comment_stmt
+ | stmt_list config_stmt
+ | stmt_list if_stmt
| stmt_list menu_stmt
+ | stmt_list menuconfig_stmt
+ | stmt_list source_stmt
| stmt_list T_WORD error T_EOL { zconf_error("unknown statement \"%s\"", $2); }
| stmt_list error T_EOL { zconf_error("invalid statement"); }
;
-common_stmt:
- if_stmt
- | comment_stmt
- | config_stmt
- | menuconfig_stmt
- | source_stmt
- | assignment_stmt
+stmt_list_in_choice:
+ /* empty */
+ | stmt_list_in_choice comment_stmt
+ | stmt_list_in_choice config_stmt
+ | stmt_list_in_choice if_stmt_in_choice
+ | stmt_list_in_choice error T_EOL { zconf_error("invalid statement"); }
;
/* config/menuconfig entry */
@@ -254,7 +258,7 @@ choice_end: end
}
};
-choice_stmt: choice_entry choice_block choice_end
+choice_stmt: choice_entry stmt_list_in_choice choice_end
;
choice_option_list:
@@ -305,11 +309,6 @@ default:
| T_DEF_BOOL { $$ = S_BOOLEAN; }
| T_DEF_TRISTATE { $$ = S_TRISTATE; }
-choice_block:
- /* empty */
- | choice_block common_stmt
-;
-
/* if entry */
if_entry: T_IF expr T_EOL
@@ -331,6 +330,9 @@ if_end: end
if_stmt: if_entry stmt_list if_end
;
+if_stmt_in_choice: if_entry stmt_list_in_choice if_end
+;
+
/* menu entry */
menu: T_MENU T_WORD_QUOTE T_EOL
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index e2f8504f5a2d..19857d18d814 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -143,6 +143,7 @@ my %depends;
my %selects;
my %prompts;
my %objects;
+my %config2kfile;
my $var;
my $iflevel = 0;
my @ifdeps;
@@ -201,6 +202,7 @@ sub read_kconfig {
if (/^\s*(menu)?config\s+(\S+)\s*$/) {
$state = "NEW";
$config = $2;
+ $config2kfile{"CONFIG_$config"} = $kconfig;
# Add depends for 'if' nesting
for (my $i = 0; $i < $iflevel; $i++) {
@@ -591,6 +593,20 @@ while ($repeat) {
}
my %setconfigs;
+my @preserved_kconfigs = split(/:/,$ENV{LMC_KEEP});
+
+sub in_preserved_kconfigs {
+ my $kconfig = $config2kfile{$_[0]};
+ if (!defined($kconfig)) {
+ return 0;
+ }
+ foreach my $excl (@preserved_kconfigs) {
+ if($kconfig =~ /^$excl/) {
+ return 1;
+ }
+ }
+ return 0;
+}
# Finally, read the .config file and turn off any module enabled that
# we could not find a reason to keep enabled.
@@ -644,6 +660,11 @@ foreach my $line (@config_file) {
}
if (/^(CONFIG.*)=(m|y)/) {
+ if (in_preserved_kconfigs($1)) {
+ dprint "Preserve config $1";
+ print;
+ next;
+ }
if (defined($configs{$1})) {
if ($localyesconfig) {
$setconfigs{$1} = 'y';
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 3dc81397d003..9363e37b8870 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -831,7 +831,7 @@ struct symbol *sym_lookup(const char *name, int flags)
memset(symbol, 0, sizeof(*symbol));
symbol->name = new_name;
symbol->type = S_UNKNOWN;
- symbol->flags |= flags;
+ symbol->flags = flags;
symbol->next = symbol_hash[hash];
symbol_hash[hash] = symbol;
diff --git a/scripts/kconfig/tests/rand_nested_choice/Kconfig b/scripts/kconfig/tests/rand_nested_choice/Kconfig
deleted file mode 100644
index 8350de7f732b..000000000000
--- a/scripts/kconfig/tests/rand_nested_choice/Kconfig
+++ /dev/null
@@ -1,35 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-choice
- prompt "choice"
-
-config A
- bool "A"
-
-config B
- bool "B"
-
-if B
-choice
- prompt "sub choice"
-
-config C
- bool "C"
-
-config D
- bool "D"
-
-if D
-choice
- prompt "subsub choice"
-
-config E
- bool "E"
-
-endchoice
-endif # D
-
-endchoice
-endif # B
-
-endchoice
diff --git a/scripts/kconfig/tests/rand_nested_choice/__init__.py b/scripts/kconfig/tests/rand_nested_choice/__init__.py
deleted file mode 100644
index 9e4b2db53581..000000000000
--- a/scripts/kconfig/tests/rand_nested_choice/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-"""
-Set random values recursively in nested choices.
-
-Kconfig can create a choice-in-choice structure by using 'if' statement.
-randconfig should correctly set random choice values.
-
-Related Linux commit: 3b9a19e08960e5cdad5253998637653e592a3c29
-"""
-
-
-def test(conf):
- for i in range(20):
- assert conf.randconfig() == 0
- assert (conf.config_contains('expected_stdout0') or
- conf.config_contains('expected_stdout1') or
- conf.config_contains('expected_stdout2'))
diff --git a/scripts/kconfig/tests/rand_nested_choice/expected_stdout0 b/scripts/kconfig/tests/rand_nested_choice/expected_stdout0
deleted file mode 100644
index 05450f3d4eb5..000000000000
--- a/scripts/kconfig/tests/rand_nested_choice/expected_stdout0
+++ /dev/null
@@ -1,2 +0,0 @@
-CONFIG_A=y
-# CONFIG_B is not set
diff --git a/scripts/kconfig/tests/rand_nested_choice/expected_stdout1 b/scripts/kconfig/tests/rand_nested_choice/expected_stdout1
deleted file mode 100644
index 37ab29584157..000000000000
--- a/scripts/kconfig/tests/rand_nested_choice/expected_stdout1
+++ /dev/null
@@ -1,4 +0,0 @@
-# CONFIG_A is not set
-CONFIG_B=y
-CONFIG_C=y
-# CONFIG_D is not set
diff --git a/scripts/kconfig/tests/rand_nested_choice/expected_stdout2 b/scripts/kconfig/tests/rand_nested_choice/expected_stdout2
deleted file mode 100644
index 849ff47e9848..000000000000
--- a/scripts/kconfig/tests/rand_nested_choice/expected_stdout2
+++ /dev/null
@@ -1,5 +0,0 @@
-# CONFIG_A is not set
-CONFIG_B=y
-# CONFIG_C is not set
-CONFIG_D=y
-CONFIG_E=y
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index f68d76dd97ba..b4c963f8364e 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -321,7 +321,7 @@ if (defined($ENV{'KBUILD_VERBOSE'})) {
# Generated docbook code is inserted in a template at a point where
# docbook v3.1 requires a non-zero sequence of RefEntry's; see:
-# http://www.oasis-open.org/docbook/documentation/reference/html/refentry.html
+# https://www.oasis-open.org/docbook/documentation/reference/html/refentry.html
# We keep track of number of generated entries and generate a dummy
# if needs be to ensure the expanded template can be postprocessed
# into html.
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 3adef49250af..57cb14bd8925 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -241,8 +241,6 @@ on_signals()
}
trap on_signals HUP INT QUIT TERM
-#
-#
# Use "make V=1" to debug this script
case "${KBUILD_VERBOSE}" in
*1*)
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index 5b80a4699740..baf3ab8d9d49 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -6,7 +6,7 @@ ARCH=$2
SMP=$3
PREEMPT=$4
PREEMPT_RT=$5
-CC=$6
+CC_VERSION="$6"
LD=$7
vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
@@ -62,7 +62,6 @@ UTS_VERSION="$(echo $UTS_VERSION $CONFIG_FLAGS $TIMESTAMP | cut -b -$UTS_LEN)"
printf '#define LINUX_COMPILE_BY "%s"\n' "$LINUX_COMPILE_BY"
echo \#define LINUX_COMPILE_HOST \"$LINUX_COMPILE_HOST\"
- CC_VERSION=$($CC -v 2>&1 | grep ' version ' | sed 's/[[:space:]]*$//')
LD_VERSION=$($LD -v | head -n1 | sed 's/(compatible with [^)]*)//' \
| sed 's/[[:space:]]*$//')
printf '#define LINUX_COMPILER "%s"\n' "$CC_VERSION, $LD_VERSION"
diff --git a/scripts/mksysmap b/scripts/mksysmap
index a35acc0d0b82..9aa23d15862a 100755
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -41,4 +41,4 @@
# so we just ignore them to let readprofile continue to work.
# (At least sparc64 has __crc_ in the middle).
-$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( .L\)' > $2
+$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 02d5d79da284..9599e2a3f1e6 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -954,6 +954,8 @@ static const struct dmifield {
{ "bvn", DMI_BIOS_VENDOR },
{ "bvr", DMI_BIOS_VERSION },
{ "bd", DMI_BIOS_DATE },
+ { "br", DMI_BIOS_RELEASE },
+ { "efr", DMI_EC_FIRMWARE_RELEASE },
{ "svn", DMI_SYS_VENDOR },
{ "pn", DMI_PRODUCT_NAME },
{ "pvr", DMI_PRODUCT_VERSION },
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 9a98af90e625..6aea65c65745 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -30,8 +30,6 @@ static int have_vmlinux = 0;
static int all_versions = 0;
/* If we are modposting external module set to 1 */
static int external_module = 0;
-/* Warn about section mismatch in vmlinux if set to 1 */
-static int vmlinux_section_warnings = 1;
/* Only warn about unresolved symbols */
static int warn_unresolved = 0;
/* How a symbol is exported */
@@ -90,26 +88,61 @@ static inline bool strends(const char *str, const char *postfix)
return strcmp(str + strlen(str) - strlen(postfix), postfix) == 0;
}
-static int is_vmlinux(const char *modname)
+void *do_nofail(void *ptr, const char *expr)
{
- const char *myname;
+ if (!ptr)
+ fatal("Memory allocation failure: %s.\n", expr);
- myname = strrchr(modname, '/');
- if (myname)
- myname++;
- else
- myname = modname;
+ return ptr;
+}
+
+char *read_text_file(const char *filename)
+{
+ struct stat st;
+ size_t nbytes;
+ int fd;
+ char *buf;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0) {
+ perror(filename);
+ exit(1);
+ }
- return (strcmp(myname, "vmlinux") == 0) ||
- (strcmp(myname, "vmlinux.o") == 0);
+ if (fstat(fd, &st) < 0) {
+ perror(filename);
+ exit(1);
+ }
+
+ buf = NOFAIL(malloc(st.st_size + 1));
+
+ nbytes = st.st_size;
+
+ while (nbytes) {
+ ssize_t bytes_read;
+
+ bytes_read = read(fd, buf, nbytes);
+ if (bytes_read < 0) {
+ perror(filename);
+ exit(1);
+ }
+
+ nbytes -= bytes_read;
+ }
+ buf[st.st_size] = '\0';
+
+ close(fd);
+
+ return buf;
}
-void *do_nofail(void *ptr, const char *expr)
+char *get_line(char **stringp)
{
- if (!ptr)
- fatal("Memory allocation failure: %s.\n", expr);
+ /* do not return the unwanted extra line at EOF */
+ if (*stringp && **stringp == '\0')
+ return NULL;
- return ptr;
+ return strsep(stringp, "\n");
}
/* A list of all modules we processed */
@@ -128,24 +161,20 @@ static struct module *find_module(const char *modname)
static struct module *new_module(const char *modname)
{
struct module *mod;
- char *p;
- mod = NOFAIL(malloc(sizeof(*mod)));
+ mod = NOFAIL(malloc(sizeof(*mod) + strlen(modname) + 1));
memset(mod, 0, sizeof(*mod));
- p = NOFAIL(strdup(modname));
-
- /* strip trailing .o */
- if (strends(p, ".o")) {
- p[strlen(p) - 2] = '\0';
- mod->is_dot_o = 1;
- }
/* add to list */
- mod->name = p;
+ strcpy(mod->name, modname);
+ mod->is_vmlinux = (strcmp(modname, "vmlinux") == 0);
mod->gpl_compatible = -1;
mod->next = modules;
modules = mod;
+ if (mod->is_vmlinux)
+ have_vmlinux = 1;
+
return mod;
}
@@ -161,12 +190,9 @@ struct symbol {
int crc_valid;
char *namespace;
unsigned int weak:1;
- unsigned int vmlinux:1; /* 1 if symbol is defined in vmlinux */
- unsigned int kernel:1; /* 1 if symbol is from kernel
- * (only for external modules) **/
unsigned int is_static:1; /* 1 if symbol is not global */
enum export export; /* Type of export */
- char name[0];
+ char name[];
};
static struct symbol *symbolhash[SYMBOL_HASH_SIZE];
@@ -288,29 +314,32 @@ static enum export export_no(const char *s)
return export_unknown;
}
-static const char *sech_name(struct elf_info *elf, Elf_Shdr *sechdr)
+static void *sym_get_data_by_offset(const struct elf_info *info,
+ unsigned int secindex, unsigned long offset)
{
- return (void *)elf->hdr +
- elf->sechdrs[elf->secindex_strings].sh_offset +
- sechdr->sh_name;
-}
+ Elf_Shdr *sechdr = &info->sechdrs[secindex];
-static const char *sec_name(struct elf_info *elf, int secindex)
-{
- return sech_name(elf, &elf->sechdrs[secindex]);
+ if (info->hdr->e_type != ET_REL)
+ offset -= sechdr->sh_addr;
+
+ return (void *)info->hdr + sechdr->sh_offset + offset;
}
static void *sym_get_data(const struct elf_info *info, const Elf_Sym *sym)
{
- unsigned int secindex = get_secindex(info, sym);
- Elf_Shdr *sechdr = &info->sechdrs[secindex];
- unsigned long offset;
+ return sym_get_data_by_offset(info, get_secindex(info, sym),
+ sym->st_value);
+}
- offset = sym->st_value;
- if (info->hdr->e_type != ET_REL)
- offset -= sechdr->sh_addr;
+static const char *sech_name(const struct elf_info *info, Elf_Shdr *sechdr)
+{
+ return sym_get_data_by_offset(info, info->secindex_strings,
+ sechdr->sh_name);
+}
- return (void *)info->hdr + sechdr->sh_offset + offset;
+static const char *sec_name(const struct elf_info *info, int secindex)
+{
+ return sech_name(info, &info->sechdrs[secindex]);
}
#define strstarts(str, prefix) (strncmp(str, prefix, strlen(prefix)) == 0)
@@ -386,17 +415,15 @@ static struct symbol *sym_add_exported(const char *name, struct module *mod,
if (!s) {
s = new_symbol(name, mod, export);
- } else if (!external_module || is_vmlinux(s->module->name) ||
+ } else if (!external_module || s->module->is_vmlinux ||
s->module == mod) {
warn("%s: '%s' exported twice. Previous export was in %s%s\n",
mod->name, name, s->module->name,
- is_vmlinux(s->module->name) ? "" : ".ko");
+ s->module->is_vmlinux ? "" : ".ko");
return s;
}
s->module = mod;
- s->vmlinux = is_vmlinux(mod->name);
- s->kernel = 0;
s->export = export;
return s;
}
@@ -416,7 +443,7 @@ static void sym_set_crc(const char *name, unsigned int crc)
s->crc_valid = 1;
}
-void *grab_file(const char *filename, unsigned long *size)
+static void *grab_file(const char *filename, size_t *size)
{
struct stat st;
void *map = MAP_FAILED;
@@ -438,41 +465,7 @@ failed:
return map;
}
-/**
- * Return a copy of the next line in a mmap'ed file.
- * spaces in the beginning of the line is trimmed away.
- * Return a pointer to a static buffer.
- **/
-char *get_next_line(unsigned long *pos, void *file, unsigned long size)
-{
- static char line[4096];
- int skip = 1;
- size_t len = 0;
- signed char *p = (signed char *)file + *pos;
- char *s = line;
-
- for (; *pos < size ; (*pos)++) {
- if (skip && isspace(*p)) {
- p++;
- continue;
- }
- skip = 0;
- if (*p != '\n' && (*pos < size)) {
- len++;
- *s++ = *p++;
- if (len > 4095)
- break; /* Too long, stop */
- } else {
- /* End of string */
- *s = '\0';
- return line;
- }
- }
- /* End of buffer */
- return NULL;
-}
-
-void release_file(void *file, unsigned long size)
+static void release_file(void *file, size_t size)
{
munmap(file, size);
}
@@ -528,9 +521,8 @@ static int parse_elf(struct elf_info *info, const char *filename)
/* Check if file offset is correct */
if (hdr->e_shoff > info->size) {
- fatal("section header offset=%lu in file '%s' is bigger than "
- "filesize=%lu\n", (unsigned long)hdr->e_shoff,
- filename, info->size);
+ fatal("section header offset=%lu in file '%s' is bigger than filesize=%zu\n",
+ (unsigned long)hdr->e_shoff, filename, info->size);
return 0;
}
@@ -683,7 +675,7 @@ static void handle_modversion(const struct module *mod,
if (sym->st_shndx == SHN_UNDEF) {
warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n",
- symname, mod->name, is_vmlinux(mod->name) ? "":".ko");
+ symname, mod->name, mod->is_vmlinux ? "" : ".ko");
return;
}
@@ -705,8 +697,7 @@ static void handle_symbol(struct module *mod, struct elf_info *info,
enum export export;
const char *name;
- if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
- strstarts(symname, "__ksymtab"))
+ if (strstarts(symname, "__ksymtab"))
export = export_from_secname(info, get_secindex(info, sym));
else
export = export_from_sec(info, get_secindex(info, sym));
@@ -1752,11 +1743,7 @@ static void check_section_mismatch(const char *modname, struct elf_info *elf,
static unsigned int *reloc_location(struct elf_info *elf,
Elf_Shdr *sechdr, Elf_Rela *r)
{
- Elf_Shdr *sechdrs = elf->sechdrs;
- int section = sechdr->sh_info;
-
- return (void *)elf->hdr + sechdrs[section].sh_offset +
- r->r_offset;
+ return sym_get_data_by_offset(elf, sechdr->sh_info, r->r_offset);
}
static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
@@ -2005,34 +1992,36 @@ static void read_symbols(const char *modname)
if (!parse_elf(&info, modname))
return;
- mod = new_module(modname);
-
- /* When there's no vmlinux, don't print warnings about
- * unresolved symbols (since there'll be too many ;) */
- if (is_vmlinux(modname)) {
- have_vmlinux = 1;
- mod->skip = 1;
- }
-
- license = get_modinfo(&info, "license");
- if (!license && !is_vmlinux(modname))
- warn("missing MODULE_LICENSE() in %s\n"
- "see include/linux/module.h for "
- "more information\n", modname);
- while (license) {
- if (license_is_gpl_compatible(license))
- mod->gpl_compatible = 1;
- else {
- mod->gpl_compatible = 0;
- break;
+ {
+ char *tmp;
+
+ /* strip trailing .o */
+ tmp = NOFAIL(strdup(modname));
+ tmp[strlen(tmp) - 2] = '\0';
+ mod = new_module(tmp);
+ free(tmp);
+ }
+
+ if (!mod->is_vmlinux) {
+ license = get_modinfo(&info, "license");
+ if (!license)
+ warn("missing MODULE_LICENSE() in %s\n", modname);
+ while (license) {
+ if (license_is_gpl_compatible(license))
+ mod->gpl_compatible = 1;
+ else {
+ mod->gpl_compatible = 0;
+ break;
+ }
+ license = get_next_modinfo(&info, "license", license);
}
- license = get_next_modinfo(&info, "license", license);
- }
- namespace = get_modinfo(&info, "import_ns");
- while (namespace) {
- add_namespace(&mod->imported_namespaces, namespace);
- namespace = get_next_modinfo(&info, "import_ns", namespace);
+ namespace = get_modinfo(&info, "import_ns");
+ while (namespace) {
+ add_namespace(&mod->imported_namespaces, namespace);
+ namespace = get_next_modinfo(&info, "import_ns",
+ namespace);
+ }
}
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
@@ -2070,16 +2059,14 @@ static void read_symbols(const char *modname)
}
}
- if (!is_vmlinux(modname) || vmlinux_section_warnings)
- check_sec_ref(mod, modname, &info);
+ check_sec_ref(mod, modname, &info);
- version = get_modinfo(&info, "version");
- if (version)
- maybe_frob_rcs_version(modname, version, info.modinfo,
- version - (char *)info.hdr);
- if (version || (all_versions && !is_vmlinux(modname)))
- get_src_version(modname, mod->srcversion,
- sizeof(mod->srcversion)-1);
+ if (!mod->is_vmlinux) {
+ version = get_modinfo(&info, "version");
+ if (version || all_versions)
+ get_src_version(modname, mod->srcversion,
+ sizeof(mod->srcversion) - 1);
+ }
parse_elf_finish(&info);
@@ -2143,20 +2130,18 @@ void buf_write(struct buffer *buf, const char *s, int len)
static void check_for_gpl_usage(enum export exp, const char *m, const char *s)
{
- const char *e = is_vmlinux(m) ?"":".ko";
-
switch (exp) {
case export_gpl:
- fatal("GPL-incompatible module %s%s "
- "uses GPL-only symbol '%s'\n", m, e, s);
+ fatal("GPL-incompatible module %s.ko uses GPL-only symbol '%s'\n",
+ m, s);
break;
case export_unused_gpl:
- fatal("GPL-incompatible module %s%s "
- "uses GPL-only symbol marked UNUSED '%s'\n", m, e, s);
+ fatal("GPL-incompatible module %s.ko uses GPL-only symbol marked UNUSED '%s'\n",
+ m, s);
break;
case export_gpl_future:
- warn("GPL-incompatible module %s%s "
- "uses future GPL-only symbol '%s'\n", m, e, s);
+ warn("GPL-incompatible module %s.ko uses future GPL-only symbol '%s'\n",
+ m, s);
break;
case export_plain:
case export_unused:
@@ -2168,13 +2153,11 @@ static void check_for_gpl_usage(enum export exp, const char *m, const char *s)
static void check_for_unused(enum export exp, const char *m, const char *s)
{
- const char *e = is_vmlinux(m) ?"":".ko";
-
switch (exp) {
case export_unused:
case export_unused_gpl:
- warn("module %s%s "
- "uses symbol '%s' marked UNUSED\n", m, e, s);
+ warn("module %s.ko uses symbol '%s' marked UNUSED\n",
+ m, s);
break;
default:
/* ignore */
@@ -2349,7 +2332,7 @@ static void add_depends(struct buffer *b, struct module *mod)
/* Clear ->seen flag of modules that own symbols needed by this. */
for (s = mod->unres; s; s = s->next)
if (s->module)
- s->module->seen = is_vmlinux(s->module->name);
+ s->module->seen = s->module->is_vmlinux;
buf_printf(b, "\n");
buf_printf(b, "MODULE_INFO(depends, \"");
@@ -2382,6 +2365,25 @@ static void add_srcversion(struct buffer *b, struct module *mod)
}
}
+static void write_buf(struct buffer *b, const char *fname)
+{
+ FILE *file;
+
+ file = fopen(fname, "w");
+ if (!file) {
+ perror(fname);
+ exit(1);
+ }
+ if (fwrite(b->p, 1, b->pos, file) != b->pos) {
+ perror(fname);
+ exit(1);
+ }
+ if (fclose(file) != 0) {
+ perror(fname);
+ exit(1);
+ }
+}
+
static void write_if_changed(struct buffer *b, const char *fname)
{
char *tmp;
@@ -2414,32 +2416,24 @@ static void write_if_changed(struct buffer *b, const char *fname)
close_write:
fclose(file);
write:
- file = fopen(fname, "w");
- if (!file) {
- perror(fname);
- exit(1);
- }
- if (fwrite(b->p, 1, b->pos, file) != b->pos) {
- perror(fname);
- exit(1);
- }
- fclose(file);
+ write_buf(b, fname);
}
/* parse Module.symvers file. line format:
* 0x12345678<tab>symbol<tab>module<tab>export<tab>namespace
**/
-static void read_dump(const char *fname, unsigned int kernel)
+static void read_dump(const char *fname)
{
- unsigned long size, pos = 0;
- void *file = grab_file(fname, &size);
- char *line;
+ char *buf, *pos, *line;
- if (!file)
+ buf = read_text_file(fname);
+ if (!buf)
/* No symbol versions, silently ignore */
return;
- while ((line = get_next_line(&pos, file, size))) {
+ pos = buf;
+
+ while ((line = get_line(&pos))) {
char *symname, *namespace, *modname, *d, *export;
unsigned int crc;
struct module *mod;
@@ -2463,21 +2457,18 @@ static void read_dump(const char *fname, unsigned int kernel)
goto fail;
mod = find_module(modname);
if (!mod) {
- if (is_vmlinux(modname))
- have_vmlinux = 1;
mod = new_module(modname);
- mod->skip = 1;
+ mod->from_dump = 1;
}
s = sym_add_exported(symname, mod, export_no(export));
- s->kernel = kernel;
s->is_static = 0;
sym_set_crc(symname, crc);
sym_update_namespace(symname, namespace);
}
- release_file(file, size);
+ free(buf);
return;
fail:
- release_file(file, size);
+ free(buf);
fatal("parse error in symbol dump file\n");
}
@@ -2489,7 +2480,7 @@ static int dump_sym(struct symbol *sym)
{
if (!external_module)
return 1;
- if (sym->vmlinux || sym->kernel)
+ if (sym->module->from_dump)
return 0;
return 1;
}
@@ -2515,7 +2506,7 @@ static void write_dump(const char *fname)
symbol = symbol->next;
}
}
- write_if_changed(&buf, fname);
+ write_buf(&buf, fname);
free(buf.p);
}
@@ -2527,7 +2518,7 @@ static void write_namespace_deps_files(const char *fname)
for (mod = modules; mod; mod = mod->next) {
- if (mod->skip || !mod->missing_namespaces)
+ if (mod->from_dump || !mod->missing_namespaces)
continue;
buf_printf(&ns_deps_buf, "%s.ko:", mod->name);
@@ -2542,8 +2533,8 @@ static void write_namespace_deps_files(const char *fname)
free(ns_deps_buf.p);
}
-struct ext_sym_list {
- struct ext_sym_list *next;
+struct dump_list {
+ struct dump_list *next;
const char *file;
};
@@ -2551,28 +2542,24 @@ int main(int argc, char **argv)
{
struct module *mod;
struct buffer buf = { };
- char *kernel_read = NULL;
char *missing_namespace_deps = NULL;
char *dump_write = NULL, *files_source = NULL;
int opt;
int err;
int n;
- struct ext_sym_list *extsym_iter;
- struct ext_sym_list *extsym_start = NULL;
+ struct dump_list *dump_read_start = NULL;
+ struct dump_list **dump_read_iter = &dump_read_start;
- while ((opt = getopt(argc, argv, "i:e:mnsT:o:awENd:")) != -1) {
+ while ((opt = getopt(argc, argv, "ei:mnT:o:awENd:")) != -1) {
switch (opt) {
- case 'i':
- kernel_read = optarg;
- external_module = 1;
- break;
case 'e':
external_module = 1;
- extsym_iter =
- NOFAIL(malloc(sizeof(*extsym_iter)));
- extsym_iter->next = extsym_start;
- extsym_iter->file = optarg;
- extsym_start = extsym_iter;
+ break;
+ case 'i':
+ *dump_read_iter =
+ NOFAIL(calloc(1, sizeof(**dump_read_iter)));
+ (*dump_read_iter)->file = optarg;
+ dump_read_iter = &(*dump_read_iter)->next;
break;
case 'm':
modversions = 1;
@@ -2586,9 +2573,6 @@ int main(int argc, char **argv)
case 'a':
all_versions = 1;
break;
- case 's':
- vmlinux_section_warnings = 0;
- break;
case 'T':
files_source = optarg;
break;
@@ -2609,13 +2593,13 @@ int main(int argc, char **argv)
}
}
- if (kernel_read)
- read_dump(kernel_read, 1);
- while (extsym_start) {
- read_dump(extsym_start->file, 0);
- extsym_iter = extsym_start->next;
- free(extsym_start);
- extsym_start = extsym_iter;
+ while (dump_read_start) {
+ struct dump_list *tmp;
+
+ read_dump(dump_read_start->file);
+ tmp = dump_read_start->next;
+ free(dump_read_start);
+ dump_read_start = tmp;
}
while (optind < argc)
@@ -2624,12 +2608,19 @@ int main(int argc, char **argv)
if (files_source)
read_symbols_from_files(files_source);
+ /*
+ * When there's no vmlinux, don't print warnings about
+ * unresolved symbols (since there'll be too many ;)
+ */
+ if (!have_vmlinux)
+ warn("Symbol info of vmlinux is missing. Unresolved symbol check will be entirely skipped.\n");
+
err = 0;
for (mod = modules; mod; mod = mod->next) {
char fname[PATH_MAX];
- if (mod->skip)
+ if (mod->is_vmlinux || mod->from_dump)
continue;
buf.pos = 0;
@@ -2662,13 +2653,6 @@ int main(int argc, char **argv)
struct symbol *s;
for (s = symbolhash[n]; s; s = s->next) {
- /*
- * Do not check "vmlinux". This avoids the same warnings
- * shown twice, and false-positives for ARCH=um.
- */
- if (is_vmlinux(s->module->name) && !s->module->is_dot_o)
- continue;
-
if (s->is_static)
warn("\"%s\" [%s] is a static %s\n",
s->name, s->module->name,
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 60dca9b7106b..3aa052722233 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -111,29 +111,29 @@ buf_write(struct buffer *buf, const char *s, int len);
struct namespace_list {
struct namespace_list *next;
- char namespace[0];
+ char namespace[];
};
struct module {
struct module *next;
- const char *name;
int gpl_compatible;
struct symbol *unres;
+ int from_dump; /* 1 if module was loaded from *.symvers */
+ int is_vmlinux;
int seen;
- int skip;
int has_init;
int has_cleanup;
struct buffer dev_table_buf;
char srcversion[25];
- int is_dot_o;
// Missing namespace dependencies
struct namespace_list *missing_namespaces;
// Actual imported namespaces
struct namespace_list *imported_namespaces;
+ char name[];
};
struct elf_info {
- unsigned long size;
+ size_t size;
Elf_Ehdr *hdr;
Elf_Shdr *sechdrs;
Elf_Sym *symtab_start;
@@ -187,16 +187,11 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
void add_moddevtable(struct buffer *buf, struct module *mod);
/* sumversion.c */
-void maybe_frob_rcs_version(const char *modfilename,
- char *version,
- void *modinfo,
- unsigned long modinfo_offset);
void get_src_version(const char *modname, char sum[], unsigned sumlen);
/* from modpost.c */
-void *grab_file(const char *filename, unsigned long *size);
-char* get_next_line(unsigned long *pos, void *file, unsigned long size);
-void release_file(void *file, unsigned long size);
+char *read_text_file(const char *filename);
+char *get_line(char **stringp);
enum loglevel {
LOG_WARN,
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
index 63062024ce0e..d587f40f1117 100644
--- a/scripts/mod/sumversion.c
+++ b/scripts/mod/sumversion.c
@@ -258,9 +258,8 @@ static int parse_file(const char *fname, struct md4_ctx *md)
char *file;
unsigned long i, len;
- file = grab_file(fname, &len);
- if (!file)
- return 0;
+ file = read_text_file(fname);
+ len = strlen(file);
for (i = 0; i < len; i++) {
/* Collapse and ignore \ and CR. */
@@ -287,7 +286,7 @@ static int parse_file(const char *fname, struct md4_ctx *md)
add_char(file[i], md);
}
- release_file(file, len);
+ free(file);
return 1;
}
/* Check whether the file is a static library or not */
@@ -304,9 +303,8 @@ static int is_static_library(const char *objfile)
* to figure out source files. */
static int parse_source_files(const char *objfile, struct md4_ctx *md)
{
- char *cmd, *file, *line, *dir;
+ char *cmd, *file, *line, *dir, *pos;
const char *base;
- unsigned long flen, pos = 0;
int dirlen, ret = 0, check_files = 0;
cmd = NOFAIL(malloc(strlen(objfile) + sizeof("..cmd")));
@@ -324,14 +322,12 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
strncpy(dir, objfile, dirlen);
dir[dirlen] = '\0';
- file = grab_file(cmd, &flen);
- if (!file) {
- warn("could not find %s for %s\n", cmd, objfile);
- goto out;
- }
+ file = read_text_file(cmd);
+
+ pos = file;
/* Sum all files in the same dir or subdirs. */
- while ((line = get_next_line(&pos, file, flen)) != NULL) {
+ while ((line = get_line(&pos))) {
char* p = line;
if (strncmp(line, "source_", sizeof("source_")-1) == 0) {
@@ -382,8 +378,7 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
/* Everyone parsed OK */
ret = 1;
out_file:
- release_file(file, flen);
-out:
+ free(file);
free(dir);
free(cmd);
return ret;
@@ -392,106 +387,34 @@ out:
/* Calc and record src checksum. */
void get_src_version(const char *modname, char sum[], unsigned sumlen)
{
- void *file;
- unsigned long len;
+ char *buf, *pos, *firstline;
struct md4_ctx md;
- char *sources, *end, *fname;
+ char *fname;
char filelist[PATH_MAX + 1];
/* objects for a module are listed in the first line of *.mod file. */
snprintf(filelist, sizeof(filelist), "%.*smod",
(int)strlen(modname) - 1, modname);
- file = grab_file(filelist, &len);
- if (!file)
- /* not a module or .mod file missing - ignore */
- return;
+ buf = read_text_file(filelist);
- sources = file;
-
- end = strchr(sources, '\n');
- if (!end) {
+ pos = buf;
+ firstline = get_line(&pos);
+ if (!firstline) {
warn("bad ending versions file for %s\n", modname);
- goto release;
+ goto free;
}
- *end = '\0';
md4_init(&md);
- while ((fname = strsep(&sources, " ")) != NULL) {
+ while ((fname = strsep(&firstline, " "))) {
if (!*fname)
continue;
if (!(is_static_library(fname)) &&
!parse_source_files(fname, &md))
- goto release;
+ goto free;
}
md4_final_ascii(&md, sum, sumlen);
-release:
- release_file(file, len);
-}
-
-static void write_version(const char *filename, const char *sum,
- unsigned long offset)
-{
- int fd;
-
- fd = open(filename, O_RDWR);
- if (fd < 0) {
- warn("changing sum in %s failed: %s\n",
- filename, strerror(errno));
- return;
- }
-
- if (lseek(fd, offset, SEEK_SET) == (off_t)-1) {
- warn("changing sum in %s:%lu failed: %s\n",
- filename, offset, strerror(errno));
- goto out;
- }
-
- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
- warn("writing sum in %s failed: %s\n",
- filename, strerror(errno));
- goto out;
- }
-out:
- close(fd);
-}
-
-static int strip_rcs_crap(char *version)
-{
- unsigned int len, full_len;
-
- if (strncmp(version, "$Revision", strlen("$Revision")) != 0)
- return 0;
-
- /* Space for version string follows. */
- full_len = strlen(version) + strlen(version + strlen(version) + 1) + 2;
-
- /* Move string to start with version number: prefix will be
- * $Revision$ or $Revision: */
- len = strlen("$Revision");
- if (version[len] == ':' || version[len] == '$')
- len++;
- while (isspace(version[len]))
- len++;
- memmove(version, version+len, full_len-len);
- full_len -= len;
-
- /* Preserve up to next whitespace. */
- len = 0;
- while (version[len] && !isspace(version[len]))
- len++;
- memmove(version + len, version + strlen(version),
- full_len - strlen(version));
- return 1;
-}
-
-/* Clean up RCS-style version numbers. */
-void maybe_frob_rcs_version(const char *modfilename,
- char *version,
- void *modinfo,
- unsigned long version_offset)
-{
- if (strip_rcs_crap(version))
- write_version(modfilename, version, version_offset);
+free:
+ free(buf);
}
diff --git a/scripts/modules-check.sh b/scripts/modules-check.sh
index f51f446707b8..43de226071ae 100755
--- a/scripts/modules-check.sh
+++ b/scripts/modules-check.sh
@@ -3,14 +3,24 @@
set -e
+if [ $# != 1 ]; then
+ echo "Usage: $0 <modules.order>" >& 2
+ exit 1
+fi
+
+exit_code=0
+
# Check uniqueness of module names
check_same_name_modules()
{
- for m in $(sed 's:.*/::' modules.order | sort | uniq -d)
+ for m in $(sed 's:.*/::' $1 | sort | uniq -d)
do
- echo "warning: same module names found:" >&2
+ echo "error: the following would cause module name conflict:" >&2
sed -n "/\/$m/s:^: :p" modules.order >&2
+ exit_code=1
done
}
-check_same_name_modules
+check_same_name_modules "$1"
+
+exit $exit_code
diff --git a/scripts/package/buildtar b/scripts/package/buildtar
index 77c7caefede1..ad62c6879622 100755
--- a/scripts/package/buildtar
+++ b/scripts/package/buildtar
@@ -28,15 +28,15 @@ case "${1}" in
opts=
;;
targz-pkg)
- opts=--gzip
+ opts="-I ${_GZIP}"
tarball=${tarball}.gz
;;
tarbz2-pkg)
- opts=--bzip2
+ opts="-I ${_BZIP2}"
tarball=${tarball}.bz2
;;
tarxz-pkg)
- opts=--xz
+ opts="-I ${XZ}"
tarball=${tarball}.xz
;;
*)
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index d9cd24cf0d40..c45e9afaab2d 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -59,6 +59,7 @@ actualy||actually
acumulating||accumulating
acumulative||accumulative
acumulator||accumulator
+acutally||actually
adapater||adapter
addional||additional
additionaly||additionally
@@ -249,6 +250,7 @@ calescing||coalescing
calle||called
callibration||calibration
callled||called
+callser||caller
calucate||calculate
calulate||calculate
cancelation||cancellation
@@ -671,6 +673,7 @@ hanlde||handle
hanled||handled
happend||happened
harware||hardware
+havind||having
heirarchically||hierarchically
helpfull||helpful
hexdecimal||hexadecimal
@@ -845,6 +848,7 @@ logile||logfile
loobpack||loopback
loosing||losing
losted||lost
+maangement||management
machinary||machinery
maibox||mailbox
maintainance||maintenance
@@ -905,6 +909,7 @@ modfiy||modify
modulues||modules
momery||memory
memomry||memory
+monitring||monitoring
monochorome||monochrome
monochromo||monochrome
monocrome||monochrome
@@ -1010,6 +1015,7 @@ partiton||partition
pased||passed
passin||passing
pathes||paths
+pattrns||patterns
pecularities||peculiarities
peformance||performance
peforming||performing
@@ -1256,6 +1262,7 @@ shoule||should
shrinked||shrunk
siginificantly||significantly
signabl||signal
+significanly||significantly
similary||similarly
similiar||similar
simlar||similar
@@ -1371,6 +1378,7 @@ thead||thread
therfore||therefore
thier||their
threds||threads
+threee||three
threshhold||threshold
thresold||threshold
throught||through
@@ -1410,6 +1418,7 @@ tyep||type
udpate||update
uesd||used
uknown||unknown
+usccess||success
usupported||unsupported
uncommited||uncommitted
unconditionaly||unconditionally
diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh
index 7a2d372f4885..76e9cbcfbeab 100755
--- a/scripts/xz_wrap.sh
+++ b/scripts/xz_wrap.sh
@@ -20,4 +20,4 @@ case $SRCARCH in
sparc) BCJ=--sparc ;;
esac
-exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
+exec $XZ --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
index 0fe336860773..03fae1bd48a6 100644
--- a/security/apparmor/Kconfig
+++ b/security/apparmor/Kconfig
@@ -70,8 +70,9 @@ config SECURITY_APPARMOR_DEBUG_MESSAGES
the kernel message buffer.
config SECURITY_APPARMOR_KUNIT_TEST
- bool "Build KUnit tests for policy_unpack.c"
+ bool "Build KUnit tests for policy_unpack.c" if !KUNIT_ALL_TESTS
depends on KUNIT=y && SECURITY_APPARMOR
+ default KUNIT_ALL_TESTS
help
This builds the AppArmor KUnit tests.
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index f6a3ecfadf80..5fd4a64e431f 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -341,38 +341,6 @@ static struct dentry *aafs_create_dir(const char *name, struct dentry *parent)
}
/**
- * aafs_create_symlink - create a symlink in the apparmorfs filesystem
- * @name: name of dentry to create
- * @parent: parent directory for this dentry
- * @target: if symlink, symlink target string
- * @private: private data
- * @iops: struct of inode_operations that should be used
- *
- * If @target parameter is %NULL, then the @iops parameter needs to be
- * setup to handle .readlink and .get_link inode_operations.
- */
-static struct dentry *aafs_create_symlink(const char *name,
- struct dentry *parent,
- const char *target,
- void *private,
- const struct inode_operations *iops)
-{
- struct dentry *dent;
- char *link = NULL;
-
- if (target) {
- if (!link)
- return ERR_PTR(-ENOMEM);
- }
- dent = aafs_create(name, S_IFLNK | 0444, parent, private, link, NULL,
- iops);
- if (IS_ERR(dent))
- kfree(link);
-
- return dent;
-}
-
-/**
* aafs_remove - removes a file or directory from the apparmorfs filesystem
*
* @dentry: dentry of the file/directory/symlink to removed.
@@ -624,7 +592,7 @@ static __poll_t ns_revision_poll(struct file *file, poll_table *pt)
void __aa_bump_ns_revision(struct aa_ns *ns)
{
- WRITE_ONCE(ns->revision, ns->revision + 1);
+ WRITE_ONCE(ns->revision, READ_ONCE(ns->revision) + 1);
wake_up_interruptible(&ns->wait);
}
@@ -840,7 +808,7 @@ static ssize_t query_label(char *buf, size_t buf_len,
struct multi_transaction {
struct kref count;
ssize_t size;
- char data[0];
+ char data[];
};
#define MULTI_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct multi_transaction))
@@ -1763,25 +1731,25 @@ int __aafs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
}
if (profile->rawdata) {
- dent = aafs_create_symlink("raw_sha1", dir, NULL,
- profile->label.proxy,
- &rawdata_link_sha1_iops);
+ dent = aafs_create("raw_sha1", S_IFLNK | 0444, dir,
+ profile->label.proxy, NULL, NULL,
+ &rawdata_link_sha1_iops);
if (IS_ERR(dent))
goto fail;
aa_get_proxy(profile->label.proxy);
profile->dents[AAFS_PROF_RAW_HASH] = dent;
- dent = aafs_create_symlink("raw_abi", dir, NULL,
- profile->label.proxy,
- &rawdata_link_abi_iops);
+ dent = aafs_create("raw_abi", S_IFLNK | 0444, dir,
+ profile->label.proxy, NULL, NULL,
+ &rawdata_link_abi_iops);
if (IS_ERR(dent))
goto fail;
aa_get_proxy(profile->label.proxy);
profile->dents[AAFS_PROF_RAW_ABI] = dent;
- dent = aafs_create_symlink("raw_data", dir, NULL,
- profile->label.proxy,
- &rawdata_link_data_iops);
+ dent = aafs_create("raw_data", S_IFLNK | 0444, dir,
+ profile->label.proxy, NULL, NULL,
+ &rawdata_link_data_iops);
if (IS_ERR(dent))
goto fail;
aa_get_proxy(profile->label.proxy);
@@ -2364,6 +2332,8 @@ static struct aa_sfs_entry aa_sfs_entry_versions[] = {
static struct aa_sfs_entry aa_sfs_entry_policy[] = {
AA_SFS_DIR("versions", aa_sfs_entry_versions),
AA_SFS_FILE_BOOLEAN("set_load", 1),
+ /* number of out of band transitions supported */
+ AA_SFS_FILE_U64("outofband", MAX_OOB_SUPPORTED),
{ }
};
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index a84ef030fbd7..1c898055a476 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -320,8 +320,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
might_sleep();
/* transition from exec match to xattr set */
- state = aa_dfa_null_transition(profile->xmatch, state);
-
+ state = aa_dfa_outofband_transition(profile->xmatch, state);
d = bprm->file->f_path.dentry;
for (i = 0; i < profile->xattr_count; i++) {
@@ -330,7 +329,13 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
if (size >= 0) {
u32 perm;
- /* Check the xattr value, not just presence */
+ /*
+ * Check the xattr presence before value. This ensure
+ * that not present xattr can be distinguished from a 0
+ * length value or rule that matches any value
+ */
+ state = aa_dfa_null_transition(profile->xmatch, state);
+ /* Check xattr value */
state = aa_dfa_match_len(profile->xmatch, state, value,
size);
perm = dfa_user_allow(profile->xmatch, state);
@@ -340,7 +345,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
}
}
/* transition to next element */
- state = aa_dfa_null_transition(profile->xmatch, state);
+ state = aa_dfa_outofband_transition(profile->xmatch, state);
if (size < 0) {
/*
* No xattr match, so verify if transition to
@@ -620,8 +625,6 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
bool *secure_exec)
{
struct aa_label *new = NULL;
- struct aa_profile *component;
- struct label_it i;
const char *info = NULL, *name = NULL, *target = NULL;
unsigned int state = profile->file.start;
struct aa_perms perms = {};
@@ -670,21 +673,6 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
info = "profile transition not found";
/* remove MAY_EXEC to audit as failure */
perms.allow &= ~MAY_EXEC;
- } else {
- /* verify that each component's xattr requirements are
- * met, and fail execution otherwise
- */
- label_for_each(i, new, component) {
- if (aa_xattrs_match(bprm, component, state) <
- 0) {
- error = -EACCES;
- info = "required xattrs not present";
- perms.allow &= ~MAY_EXEC;
- aa_put_label(new);
- new = NULL;
- goto audit;
- }
- }
}
} else if (COMPLAIN_MODE(profile)) {
/* no exec permission - learning mode */
@@ -854,14 +842,14 @@ static struct aa_label *handle_onexec(struct aa_label *label,
}
/**
- * apparmor_bprm_set_creds - set the new creds on the bprm struct
+ * apparmor_bprm_creds_for_exec - Update the new creds on the bprm struct
* @bprm: binprm for the exec (NOT NULL)
*
* Returns: %0 or error on failure
*
* TODO: once the other paths are done see if we can't refactor into a fn
*/
-int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm)
{
struct aa_task_ctx *ctx;
struct aa_label *label, *new = NULL;
@@ -875,9 +863,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
file_inode(bprm->file)->i_mode
};
- if (bprm->called_set_creds)
- return 0;
-
ctx = task_ctx(current);
AA_BUG(!cred_label(bprm->cred));
AA_BUG(!ctx);
@@ -929,7 +914,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
* aways results in a further reduction of permissions.
*/
if ((bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) &&
- !unconfined(label) && !aa_label_is_subset(new, ctx->nnp)) {
+ !unconfined(label) &&
+ !aa_label_is_unconfined_subset(new, ctx->nnp)) {
error = -EPERM;
info = "no new privs";
goto audit;
@@ -1207,7 +1193,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
* reduce restrictions.
*/
if (task_no_new_privs(current) && !unconfined(label) &&
- !aa_label_is_subset(new, ctx->nnp)) {
+ !aa_label_is_unconfined_subset(new, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
AA_DEBUG("no_new_privs - change_hat denied");
error = -EPERM;
@@ -1228,7 +1214,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, int flags)
* reduce restrictions.
*/
if (task_no_new_privs(current) && !unconfined(label) &&
- !aa_label_is_subset(previous, ctx->nnp)) {
+ !aa_label_is_unconfined_subset(previous, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
AA_DEBUG("no_new_privs - change_hat denied");
error = -EPERM;
@@ -1423,7 +1409,7 @@ check:
* reduce restrictions.
*/
if (task_no_new_privs(current) && !unconfined(label) &&
- !aa_label_is_subset(new, ctx->nnp)) {
+ !aa_label_is_unconfined_subset(new, ctx->nnp)) {
/* not an apparmor denial per se, so don't log it */
AA_DEBUG("no_new_privs - change_hat denied");
error = -EPERM;
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index f1caf3674e1c..9a2d14b7c9f8 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -154,13 +154,13 @@ int aa_audit_file(struct aa_profile *profile, struct aa_perms *perms,
* is_deleted - test if a file has been completely unlinked
* @dentry: dentry of file to test for deletion (NOT NULL)
*
- * Returns: %1 if deleted else %0
+ * Returns: true if deleted else false
*/
static inline bool is_deleted(struct dentry *dentry)
{
if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0)
- return 1;
- return 0;
+ return true;
+ return false;
}
static int path_name(const char *op, struct aa_label *label,
@@ -353,15 +353,15 @@ int aa_path_perm(const char *op, struct aa_label *label,
* this is done as part of the subset test, where a hardlink must have
* a subset of permissions that the target has.
*
- * Returns: %1 if subset else %0
+ * Returns: true if subset else false
*/
static inline bool xindex_is_subset(u32 link, u32 target)
{
if (((link & ~AA_X_UNSAFE) != (target & ~AA_X_UNSAFE)) ||
((link & AA_X_UNSAFE) && !(target & AA_X_UNSAFE)))
- return 0;
+ return false;
- return 1;
+ return true;
}
static int profile_path_link(struct aa_profile *profile,
diff --git a/security/apparmor/include/domain.h b/security/apparmor/include/domain.h
index 21b875fe2d37..d14928fe1c6f 100644
--- a/security/apparmor/include/domain.h
+++ b/security/apparmor/include/domain.h
@@ -30,7 +30,7 @@ struct aa_domain {
struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
const char **name);
-int apparmor_bprm_set_creds(struct linux_binprm *bprm);
+int apparmor_bprm_creds_for_exec(struct linux_binprm *bprm);
void aa_free_domain_entries(struct aa_domain *domain);
int aa_change_hat(const char *hats[], int count, u64 token, int flags);
diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h
index 47942c4ba7ca..1e90384b1523 100644
--- a/security/apparmor/include/label.h
+++ b/security/apparmor/include/label.h
@@ -275,12 +275,14 @@ void aa_labelset_destroy(struct aa_labelset *ls);
void aa_labelset_init(struct aa_labelset *ls);
void __aa_labelset_update_subtree(struct aa_ns *ns);
+void aa_label_destroy(struct aa_label *label);
void aa_label_free(struct aa_label *label);
void aa_label_kref(struct kref *kref);
bool aa_label_init(struct aa_label *label, int size, gfp_t gfp);
struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp);
bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub);
+bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub);
struct aa_profile *__aa_label_next_not_in_set(struct label_it *I,
struct aa_label *set,
struct aa_label *sub);
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
index e23f4aadc1ff..884489590588 100644
--- a/security/apparmor/include/match.h
+++ b/security/apparmor/include/match.h
@@ -37,6 +37,10 @@
#define YYTH_MAGIC 0x1B5E783D
#define YYTH_FLAG_DIFF_ENCODE 1
+#define YYTH_FLAG_OOB_TRANS 2
+#define YYTH_FLAGS (YYTH_FLAG_DIFF_ENCODE | YYTH_FLAG_OOB_TRANS)
+
+#define MAX_OOB_SUPPORTED 1
struct table_set_header {
u32 th_magic; /* YYTH_MAGIC */
@@ -94,6 +98,7 @@ struct table_header {
struct aa_dfa {
struct kref count;
u16 flags;
+ u32 max_oob;
struct table_header *tables[YYTD_ID_TSIZE];
};
@@ -127,6 +132,8 @@ unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
const char *str);
unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
const char c);
+unsigned int aa_dfa_outofband_transition(struct aa_dfa *dfa,
+ unsigned int state);
unsigned int aa_dfa_match_until(struct aa_dfa *dfa, unsigned int start,
const char *str, const char **retpos);
unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start,
@@ -181,5 +188,9 @@ static inline void aa_put_dfa(struct aa_dfa *dfa)
#define MATCH_FLAG_DIFF_ENCODE 0x80000000
#define MARK_DIFF_ENCODE 0x40000000
+#define MATCH_FLAG_OOB_TRANSITION 0x20000000
+#define MATCH_FLAGS_MASK 0xff000000
+#define MATCH_FLAGS_VALID (MATCH_FLAG_DIFF_ENCODE | MATCH_FLAG_OOB_TRANSITION)
+#define MATCH_FLAGS_INVALID (MATCH_FLAGS_MASK & ~MATCH_FLAGS_VALID)
#endif /* __AA_MATCH_H */
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index 470693239e64..e68bcedca976 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -309,10 +309,8 @@ out:
}
-static void label_destroy(struct aa_label *label)
+void aa_label_destroy(struct aa_label *label)
{
- struct aa_label *tmp;
-
AA_BUG(!label);
if (!label_isprofile(label)) {
@@ -328,16 +326,13 @@ static void label_destroy(struct aa_label *label)
}
}
- if (rcu_dereference_protected(label->proxy->label, true) == label)
- rcu_assign_pointer(label->proxy->label, NULL);
-
+ if (label->proxy) {
+ if (rcu_dereference_protected(label->proxy->label, true) == label)
+ rcu_assign_pointer(label->proxy->label, NULL);
+ aa_put_proxy(label->proxy);
+ }
aa_free_secid(label->secid);
- tmp = rcu_dereference_protected(label->proxy->label, true);
- if (tmp == label)
- rcu_assign_pointer(label->proxy->label, NULL);
-
- aa_put_proxy(label->proxy);
label->proxy = (struct aa_proxy *) PROXY_POISON + 1;
}
@@ -346,7 +341,7 @@ void aa_label_free(struct aa_label *label)
if (!label)
return;
- label_destroy(label);
+ aa_label_destroy(label);
kfree(label);
}
@@ -550,6 +545,39 @@ bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub)
return __aa_label_next_not_in_set(&i, set, sub) == NULL;
}
+/**
+ * aa_label_is_unconfined_subset - test if @sub is a subset of @set
+ * @set: label to test against
+ * @sub: label to test if is subset of @set
+ *
+ * This checks for subset but taking into account unconfined. IF
+ * @sub contains an unconfined profile that does not have a matching
+ * unconfined in @set then this will not cause the test to fail.
+ * Conversely we don't care about an unconfined in @set that is not in
+ * @sub
+ *
+ * Returns: true if @sub is special_subset of @set
+ * else false
+ */
+bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub)
+{
+ struct label_it i = { };
+ struct aa_profile *p;
+
+ AA_BUG(!set);
+ AA_BUG(!sub);
+
+ if (sub == set)
+ return true;
+
+ do {
+ p = __aa_label_next_not_in_set(&i, set, sub);
+ if (p && !profile_unconfined(p))
+ break;
+ } while (p);
+
+ return p == NULL;
+}
/**
@@ -1531,13 +1559,13 @@ static const char *label_modename(struct aa_ns *ns, struct aa_label *label,
label_for_each(i, label, profile) {
if (aa_ns_visible(ns, profile->ns, flags & FLAG_VIEW_SUBNS)) {
- if (profile->mode == APPARMOR_UNCONFINED)
+ count++;
+ if (profile == profile->ns->unconfined)
/* special case unconfined so stacks with
* unconfined don't report as mixed. ie.
* profile_foo//&:ns1:unconfined (mixed)
*/
continue;
- count++;
if (mode == -1)
mode = profile->mode;
else if (mode != profile->mode)
@@ -1749,13 +1777,13 @@ void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
AA_DEBUG("label print error");
return;
}
- seq_printf(f, "%s", str);
+ seq_puts(f, str);
kfree(str);
} else if (display_mode(ns, label, flags))
seq_printf(f, "%s (%s)", label->hname,
label_modename(ns, label, flags));
else
- seq_printf(f, "%s", label->hname);
+ seq_puts(f, label->hname);
}
void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 27e371b44dad..ffeaee5ed968 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -804,7 +804,12 @@ static void apparmor_sk_clone_security(const struct sock *sk,
struct aa_sk_ctx *ctx = SK_CTX(sk);
struct aa_sk_ctx *new = SK_CTX(newsk);
+ if (new->label)
+ aa_put_label(new->label);
new->label = aa_get_label(ctx->label);
+
+ if (new->peer)
+ aa_put_label(new->peer);
new->peer = aa_get_label(ctx->peer);
}
@@ -1232,7 +1237,7 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare),
LSM_HOOK_INIT(cred_transfer, apparmor_cred_transfer),
- LSM_HOOK_INIT(bprm_set_creds, apparmor_bprm_set_creds),
+ LSM_HOOK_INIT(bprm_creds_for_exec, apparmor_bprm_creds_for_exec),
LSM_HOOK_INIT(bprm_committing_creds, apparmor_bprm_committing_creds),
LSM_HOOK_INIT(bprm_committed_creds, apparmor_bprm_committed_creds),
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index 525ce22dc0e9..3e9e1eaf990e 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -97,6 +97,9 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
th.td_flags == YYTD_DATA8))
goto out;
+ /* if we have a table it must have some entries */
+ if (th.td_lolen == 0)
+ goto out;
tsize = table_size(th.td_lolen, th.td_flags);
if (bsize < tsize)
goto out;
@@ -198,10 +201,32 @@ static int verify_dfa(struct aa_dfa *dfa)
state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen;
+ if (state_count == 0)
+ goto out;
for (i = 0; i < state_count; i++) {
if (!(BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE) &&
(DEFAULT_TABLE(dfa)[i] >= state_count))
goto out;
+ if (BASE_TABLE(dfa)[i] & MATCH_FLAGS_INVALID) {
+ pr_err("AppArmor DFA state with invalid match flags");
+ goto out;
+ }
+ if ((BASE_TABLE(dfa)[i] & MATCH_FLAG_DIFF_ENCODE)) {
+ if (!(dfa->flags & YYTH_FLAG_DIFF_ENCODE)) {
+ pr_err("AppArmor DFA diff encoded transition state without header flag");
+ goto out;
+ }
+ }
+ if ((BASE_TABLE(dfa)[i] & MATCH_FLAG_OOB_TRANSITION)) {
+ if (base_idx(BASE_TABLE(dfa)[i]) < dfa->max_oob) {
+ pr_err("AppArmor DFA out of bad transition out of range");
+ goto out;
+ }
+ if (!(dfa->flags & YYTH_FLAG_OOB_TRANS)) {
+ pr_err("AppArmor DFA out of bad transition state without header flag");
+ goto out;
+ }
+ }
if (base_idx(BASE_TABLE(dfa)[i]) + 255 >= trans_count) {
pr_err("AppArmor DFA next/check upper bounds error\n");
goto out;
@@ -304,9 +329,23 @@ struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags)
goto fail;
dfa->flags = ntohs(*(__be16 *) (data + 12));
- if (dfa->flags != 0 && dfa->flags != YYTH_FLAG_DIFF_ENCODE)
+ if (dfa->flags & ~(YYTH_FLAGS))
goto fail;
+ /*
+ * TODO: needed for dfa to support more than 1 oob
+ * if (dfa->flags & YYTH_FLAGS_OOB_TRANS) {
+ * if (hsize < 16 + 4)
+ * goto fail;
+ * dfa->max_oob = ntol(*(__be32 *) (data + 16));
+ * if (dfa->max <= MAX_OOB_SUPPORTED) {
+ * pr_err("AppArmor DFA OOB greater than supported\n");
+ * goto fail;
+ * }
+ * }
+ */
+ dfa->max_oob = 1;
+
data += hsize;
size -= hsize;
@@ -495,6 +534,23 @@ unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
return state;
}
+unsigned int aa_dfa_outofband_transition(struct aa_dfa *dfa, unsigned int state)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ u32 b = (base)[(state)];
+
+ if (!(b & MATCH_FLAG_OOB_TRANSITION))
+ return DFA_NOMATCH;
+
+ /* No Equivalence class remapping for outofband transitions */
+ match_char(state, def, base, next, check, -1);
+
+ return state;
+}
+
/**
* aa_dfa_match_until - traverse @dfa until accept state or end of input
* @dfa: the dfa to match @str against (NOT NULL)
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index c6da542de27b..b02dfdbff7cd 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -142,7 +142,7 @@ static int d_namespace_path(const struct path *path, char *buf, char **name,
error = PTR_ERR(res);
*name = buf;
goto out;
- };
+ }
} else if (!our_mnt(path->mnt))
connected = 0;
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 269f2f53c0b1..af4f50fda9e3 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -242,6 +242,7 @@ void aa_free_profile(struct aa_profile *profile)
kzfree(profile->hash);
aa_put_loaddata(profile->rawdata);
+ aa_label_destroy(&profile->label);
kzfree(profile);
}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 2d743c004bc4..b67322abcc33 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -243,11 +243,11 @@ fail:
static bool unpack_X(struct aa_ext *e, enum aa_code code)
{
if (!inbounds(e, 1))
- return 0;
+ return false;
if (*(u8 *) e->pos != code)
- return 0;
+ return false;
e->pos++;
- return 1;
+ return true;
}
/**
@@ -261,10 +261,10 @@ static bool unpack_X(struct aa_ext *e, enum aa_code code)
* name element in the stream. If @name is NULL any name element will be
* skipped and only the typecode will be tested.
*
- * Returns 1 on success (both type code and name tests match) and the read
+ * Returns true on success (both type code and name tests match) and the read
* head is advanced past the headers
*
- * Returns: 0 if either match fails, the read head does not move
+ * Returns: false if either match fails, the read head does not move
*/
static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
{
@@ -289,11 +289,11 @@ static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
/* now check if type code matches */
if (unpack_X(e, code))
- return 1;
+ return true;
fail:
e->pos = pos;
- return 0;
+ return false;
}
static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
@@ -306,12 +306,12 @@ static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
if (data)
*data = get_unaligned((u8 *)e->pos);
e->pos += sizeof(u8);
- return 1;
+ return true;
}
fail:
e->pos = pos;
- return 0;
+ return false;
}
static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
@@ -324,12 +324,12 @@ static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
if (data)
*data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
e->pos += sizeof(u32);
- return 1;
+ return true;
}
fail:
e->pos = pos;
- return 0;
+ return false;
}
static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
@@ -342,12 +342,12 @@ static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
if (data)
*data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
e->pos += sizeof(u64);
- return 1;
+ return true;
}
fail:
e->pos = pos;
- return 0;
+ return false;
}
static size_t unpack_array(struct aa_ext *e, const char *name)
@@ -472,7 +472,7 @@ static struct aa_dfa *unpack_dfa(struct aa_ext *e)
* @e: serialized data extent information (NOT NULL)
* @profile: profile to add the accept table to (NOT NULL)
*
- * Returns: 1 if table successfully unpacked
+ * Returns: true if table successfully unpacked
*/
static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
{
@@ -535,12 +535,12 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
if (!unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
}
- return 1;
+ return true;
fail:
aa_free_domain_entries(&profile->file.trans);
e->pos = saved_pos;
- return 0;
+ return false;
}
static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
@@ -565,11 +565,11 @@ static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
goto fail;
}
- return 1;
+ return true;
fail:
e->pos = pos;
- return 0;
+ return false;
}
static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
@@ -601,7 +601,7 @@ static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
goto fail;
}
- return 1;
+ return true;
fail:
if (profile->secmark) {
@@ -613,7 +613,7 @@ fail:
}
e->pos = pos;
- return 0;
+ return false;
}
static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
@@ -643,11 +643,11 @@ static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
if (!unpack_nameX(e, AA_STRUCTEND, NULL))
goto fail;
}
- return 1;
+ return true;
fail:
e->pos = pos;
- return 0;
+ return false;
}
static u32 strhash(const void *data, u32 len, u32 seed)
@@ -748,10 +748,14 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
goto fail;
if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG))
profile->mode = APPARMOR_COMPLAIN;
+ else if (tmp == PACKED_MODE_ENFORCE)
+ profile->mode = APPARMOR_ENFORCE;
else if (tmp == PACKED_MODE_KILL)
profile->mode = APPARMOR_KILL;
else if (tmp == PACKED_MODE_UNCONFINED)
profile->mode = APPARMOR_UNCONFINED;
+ else
+ goto fail;
if (!unpack_u32(e, &tmp, NULL))
goto fail;
if (tmp)
@@ -990,8 +994,8 @@ static bool verify_xindex(int xindex, int table_size)
xtype = xindex & AA_X_TYPE_MASK;
index = xindex & AA_X_INDEX_MASK;
if (xtype == AA_X_TABLE && index >= table_size)
- return 0;
- return 1;
+ return false;
+ return true;
}
/* verify dfa xindexes are in range of transition tables */
@@ -1000,11 +1004,11 @@ static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
int i;
for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
if (!verify_xindex(dfa_user_xindex(dfa, i), table_size))
- return 0;
+ return false;
if (!verify_xindex(dfa_other_xindex(dfa, i), table_size))
- return 0;
+ return false;
}
- return 1;
+ return true;
}
/**
diff --git a/security/commoncap.c b/security/commoncap.c
index 0ca31c8bc0b1..59bf3c1674c8 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -647,7 +647,8 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
* its xattrs and, if present, apply them to the proposed credentials being
* constructed by execve().
*/
-static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_fcap)
+static int get_file_caps(struct linux_binprm *bprm, struct file *file,
+ bool *effective, bool *has_fcap)
{
int rc = 0;
struct cpu_vfs_cap_data vcaps;
@@ -657,7 +658,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_f
if (!file_caps_enabled)
return 0;
- if (!mnt_may_suid(bprm->file->f_path.mnt))
+ if (!mnt_may_suid(file->f_path.mnt))
return 0;
/*
@@ -665,10 +666,10 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_f
* explicit that capability bits are limited to s_user_ns and its
* descendants.
*/
- if (!current_in_userns(bprm->file->f_path.mnt->mnt_sb->s_user_ns))
+ if (!current_in_userns(file->f_path.mnt->mnt_sb->s_user_ns))
return 0;
- rc = get_vfs_caps_from_disk(bprm->file->f_path.dentry, &vcaps);
+ rc = get_vfs_caps_from_disk(file->f_path.dentry, &vcaps);
if (rc < 0) {
if (rc == -EINVAL)
printk(KERN_NOTICE "Invalid argument reading file caps for %s\n",
@@ -797,26 +798,27 @@ static inline bool nonroot_raised_pE(struct cred *new, const struct cred *old,
}
/**
- * cap_bprm_set_creds - Set up the proposed credentials for execve().
+ * cap_bprm_creds_from_file - Set up the proposed credentials for execve().
* @bprm: The execution parameters, including the proposed creds
+ * @file: The file to pull the credentials from
*
* Set up the proposed credentials for a new execution context being
* constructed by execve(). The proposed creds in @bprm->cred is altered,
* which won't take effect immediately. Returns 0 if successful, -ve on error.
*/
-int cap_bprm_set_creds(struct linux_binprm *bprm)
+int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file)
{
+ /* Process setpcap binaries and capabilities for uid 0 */
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
bool effective = false, has_fcap = false, is_setid;
int ret;
kuid_t root_uid;
- new->cap_ambient = old->cap_ambient;
if (WARN_ON(!cap_ambient_invariant_ok(old)))
return -EPERM;
- ret = get_file_caps(bprm, &effective, &has_fcap);
+ ret = get_file_caps(bprm, file, &effective, &has_fcap);
if (ret < 0)
return ret;
@@ -885,12 +887,11 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
return -EPERM;
/* Check for privilege-elevated exec. */
- bprm->cap_elevated = 0;
if (is_setid ||
(!__is_real(root_uid, new) &&
(effective ||
__cap_grew(permitted, ambient, new))))
- bprm->cap_elevated = 1;
+ bprm->secureexec = 1;
return 0;
}
@@ -1347,7 +1348,7 @@ static struct security_hook_list capability_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_traceme, cap_ptrace_traceme),
LSM_HOOK_INIT(capget, cap_capget),
LSM_HOOK_INIT(capset, cap_capset),
- LSM_HOOK_INIT(bprm_set_creds, cap_bprm_set_creds),
+ LSM_HOOK_INIT(bprm_creds_from_file, cap_bprm_creds_from_file),
LSM_HOOK_INIT(inode_need_killpriv, cap_inode_need_killpriv),
LSM_HOOK_INIT(inode_killpriv, cap_inode_killpriv),
LSM_HOOK_INIT(inode_getsecurity, cap_inode_getsecurity),
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 764b896cd628..168c3b78ac47 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -241,7 +241,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
/* Portable EVM signatures must include an IMA hash */
if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present)
- return -EPERM;
+ error = -EPERM;
out:
kfree(xattr_value);
kfree(desc);
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 64317d95363e..df93ac258e01 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -36,7 +36,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
#define IMA_EVENT_NAME_LEN_MAX 255
-#define IMA_HASH_BITS 9
+#define IMA_HASH_BITS 10
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
@@ -45,13 +45,19 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
#define IMA_TEMPLATE_IMA_NAME "ima"
#define IMA_TEMPLATE_IMA_FMT "d|n"
+#define NR_BANKS(chip) ((chip != NULL) ? chip->nr_allocated_banks : 0)
+
/* current content of the policy */
extern int ima_policy_flag;
/* set during initialization */
extern int ima_hash_algo;
+extern int ima_sha1_idx __ro_after_init;
+extern int ima_hash_algo_idx __ro_after_init;
+extern int ima_extra_slots __ro_after_init;
extern int ima_appraise;
extern struct tpm_chip *ima_tpm_chip;
+extern const char boot_aggregate_name[];
/* IMA event related data */
struct ima_event_data {
@@ -92,7 +98,7 @@ struct ima_template_desc {
struct ima_template_entry {
int pcr;
- u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
+ struct tpm_digest *digests;
struct ima_template_desc *template_desc; /* template descriptor */
u32 template_data_len;
struct ima_field_data template_data[0]; /* template related data */
@@ -138,9 +144,8 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash);
int ima_calc_buffer_hash(const void *buf, loff_t len,
struct ima_digest_data *hash);
int ima_calc_field_array_hash(struct ima_field_data *field_data,
- struct ima_template_desc *desc, int num_fields,
- struct ima_digest_data *hash);
-int __init ima_calc_boot_aggregate(struct ima_digest_data *hash);
+ struct ima_template_entry *entry);
+int ima_calc_boot_aggregate(struct ima_digest_data *hash);
void ima_add_violation(struct file *file, const unsigned char *filename,
struct integrity_iint_cache *iint,
const char *op, const char *cause);
@@ -175,9 +180,10 @@ struct ima_h_table {
};
extern struct ima_h_table ima_htable;
-static inline unsigned long ima_hash_key(u8 *digest)
+static inline unsigned int ima_hash_key(u8 *digest)
{
- return hash_long(*digest, IMA_HASH_BITS);
+ /* there is no point in taking a hash of part of a digest */
+ return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE;
}
#define __ima_hooks(hook) \
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index f6bc00914aa5..bf22de8b7ce0 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -27,6 +27,7 @@ void ima_free_template_entry(struct ima_template_entry *entry)
for (i = 0; i < entry->template_desc->num_fields; i++)
kfree(entry->template_data[i].data);
+ kfree(entry->digests);
kfree(entry);
}
@@ -38,6 +39,7 @@ int ima_alloc_init_template(struct ima_event_data *event_data,
struct ima_template_desc *desc)
{
struct ima_template_desc *template_desc;
+ struct tpm_digest *digests;
int i, result = 0;
if (desc)
@@ -50,6 +52,15 @@ int ima_alloc_init_template(struct ima_event_data *event_data,
if (!*entry)
return -ENOMEM;
+ digests = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
+ sizeof(*digests), GFP_NOFS);
+ if (!digests) {
+ kfree(*entry);
+ *entry = NULL;
+ return -ENOMEM;
+ }
+
+ (*entry)->digests = digests;
(*entry)->template_desc = template_desc;
for (i = 0; i < template_desc->num_fields; i++) {
const struct ima_template_field *field =
@@ -96,26 +107,16 @@ int ima_store_template(struct ima_template_entry *entry,
static const char audit_cause[] = "hashing_error";
char *template_name = entry->template_desc->name;
int result;
- struct {
- struct ima_digest_data hdr;
- char digest[TPM_DIGEST_SIZE];
- } hash;
if (!violation) {
- int num_fields = entry->template_desc->num_fields;
-
- /* this function uses default algo */
- hash.hdr.algo = HASH_ALGO_SHA1;
result = ima_calc_field_array_hash(&entry->template_data[0],
- entry->template_desc,
- num_fields, &hash.hdr);
+ entry);
if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
template_name, op,
audit_cause, result, 0);
return result;
}
- memcpy(entry->digest, hash.hdr.digest, hash.hdr.length);
}
entry->pcr = pcr;
result = ima_add_template_entry(entry, violation, op, inode, filename);
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 88b5e288f241..220b14920c37 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -57,7 +57,22 @@ MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
static struct crypto_shash *ima_shash_tfm;
static struct crypto_ahash *ima_ahash_tfm;
-int __init ima_init_crypto(void)
+struct ima_algo_desc {
+ struct crypto_shash *tfm;
+ enum hash_algo algo;
+};
+
+int ima_sha1_idx __ro_after_init;
+int ima_hash_algo_idx __ro_after_init;
+/*
+ * Additional number of slots reserved, as needed, for SHA1
+ * and IMA default algo.
+ */
+int ima_extra_slots __ro_after_init;
+
+static struct ima_algo_desc *ima_algo_array;
+
+static int __init ima_init_ima_crypto(void)
{
long rc;
@@ -76,26 +91,137 @@ int __init ima_init_crypto(void)
static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
{
struct crypto_shash *tfm = ima_shash_tfm;
- int rc;
+ int rc, i;
if (algo < 0 || algo >= HASH_ALGO__LAST)
algo = ima_hash_algo;
- if (algo != ima_hash_algo) {
- tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
- if (IS_ERR(tfm)) {
- rc = PTR_ERR(tfm);
- pr_err("Can not allocate %s (reason: %d)\n",
- hash_algo_name[algo], rc);
- }
+ if (algo == ima_hash_algo)
+ return tfm;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
+ if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo)
+ return ima_algo_array[i].tfm;
+
+ tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
+ if (IS_ERR(tfm)) {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
}
return tfm;
}
+int __init ima_init_crypto(void)
+{
+ enum hash_algo algo;
+ long rc;
+ int i;
+
+ rc = ima_init_ima_crypto();
+ if (rc)
+ return rc;
+
+ ima_sha1_idx = -1;
+ ima_hash_algo_idx = -1;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
+ algo = ima_tpm_chip->allocated_banks[i].crypto_id;
+ if (algo == HASH_ALGO_SHA1)
+ ima_sha1_idx = i;
+
+ if (algo == ima_hash_algo)
+ ima_hash_algo_idx = i;
+ }
+
+ if (ima_sha1_idx < 0) {
+ ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
+ if (ima_hash_algo == HASH_ALGO_SHA1)
+ ima_hash_algo_idx = ima_sha1_idx;
+ }
+
+ if (ima_hash_algo_idx < 0)
+ ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
+
+ ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
+ sizeof(*ima_algo_array), GFP_KERNEL);
+ if (!ima_algo_array) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
+ algo = ima_tpm_chip->allocated_banks[i].crypto_id;
+ ima_algo_array[i].algo = algo;
+
+ /* unknown TPM algorithm */
+ if (algo == HASH_ALGO__LAST)
+ continue;
+
+ if (algo == ima_hash_algo) {
+ ima_algo_array[i].tfm = ima_shash_tfm;
+ continue;
+ }
+
+ ima_algo_array[i].tfm = ima_alloc_tfm(algo);
+ if (IS_ERR(ima_algo_array[i].tfm)) {
+ if (algo == HASH_ALGO_SHA1) {
+ rc = PTR_ERR(ima_algo_array[i].tfm);
+ ima_algo_array[i].tfm = NULL;
+ goto out_array;
+ }
+
+ ima_algo_array[i].tfm = NULL;
+ }
+ }
+
+ if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) {
+ if (ima_hash_algo == HASH_ALGO_SHA1) {
+ ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm;
+ } else {
+ ima_algo_array[ima_sha1_idx].tfm =
+ ima_alloc_tfm(HASH_ALGO_SHA1);
+ if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) {
+ rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm);
+ goto out_array;
+ }
+ }
+
+ ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1;
+ }
+
+ if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) &&
+ ima_hash_algo_idx != ima_sha1_idx) {
+ ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm;
+ ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo;
+ }
+
+ return 0;
+out_array:
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
+ if (!ima_algo_array[i].tfm ||
+ ima_algo_array[i].tfm == ima_shash_tfm)
+ continue;
+
+ crypto_free_shash(ima_algo_array[i].tfm);
+ }
+out:
+ crypto_free_shash(ima_shash_tfm);
+ return rc;
+}
+
static void ima_free_tfm(struct crypto_shash *tfm)
{
- if (tfm != ima_shash_tfm)
- crypto_free_shash(tfm);
+ int i;
+
+ if (tfm == ima_shash_tfm)
+ return;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
+ if (ima_algo_array[i].tfm == tfm)
+ return;
+
+ crypto_free_shash(tfm);
}
/**
@@ -464,17 +590,15 @@ out:
* Calculate the hash of template data
*/
static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
- struct ima_template_desc *td,
- int num_fields,
- struct ima_digest_data *hash,
- struct crypto_shash *tfm)
+ struct ima_template_entry *entry,
+ int tfm_idx)
{
- SHASH_DESC_ON_STACK(shash, tfm);
+ SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm);
+ struct ima_template_desc *td = entry->template_desc;
+ int num_fields = entry->template_desc->num_fields;
int rc, i;
- shash->tfm = tfm;
-
- hash->length = crypto_shash_digestsize(tfm);
+ shash->tfm = ima_algo_array[tfm_idx].tfm;
rc = crypto_shash_init(shash);
if (rc != 0)
@@ -504,27 +628,44 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
}
if (!rc)
- rc = crypto_shash_final(shash, hash->digest);
+ rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest);
return rc;
}
int ima_calc_field_array_hash(struct ima_field_data *field_data,
- struct ima_template_desc *desc, int num_fields,
- struct ima_digest_data *hash)
+ struct ima_template_entry *entry)
{
- struct crypto_shash *tfm;
- int rc;
+ u16 alg_id;
+ int rc, i;
- tfm = ima_alloc_tfm(hash->algo);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
+ rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx);
+ if (rc)
+ return rc;
- rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
- hash, tfm);
+ entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1;
- ima_free_tfm(tfm);
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
+ if (i == ima_sha1_idx)
+ continue;
+
+ if (i < NR_BANKS(ima_tpm_chip)) {
+ alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
+ entry->digests[i].alg_id = alg_id;
+ }
+
+ /* for unmapped TPM algorithms digest is still a padded SHA1 */
+ if (!ima_algo_array[i].tfm) {
+ memcpy(entry->digests[i].digest,
+ entry->digests[ima_sha1_idx].digest,
+ TPM_DIGEST_SIZE);
+ continue;
+ }
+ rc = ima_calc_field_array_hash_tfm(field_data, entry, i);
+ if (rc)
+ return rc;
+ }
return rc;
}
@@ -645,7 +786,7 @@ int ima_calc_buffer_hash(const void *buf, loff_t len,
return calc_buffer_shash(buf, len, hash);
}
-static void __init ima_pcrread(u32 idx, struct tpm_digest *d)
+static void ima_pcrread(u32 idx, struct tpm_digest *d)
{
if (!ima_tpm_chip)
return;
@@ -655,18 +796,29 @@ static void __init ima_pcrread(u32 idx, struct tpm_digest *d)
}
/*
- * Calculate the boot aggregate hash
+ * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With
+ * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
+ * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
+ * allowing firmware to configure and enable different banks.
+ *
+ * Knowing which TPM bank is read to calculate the boot_aggregate digest
+ * needs to be conveyed to a verifier. For this reason, use the same
+ * hash algorithm for reading the TPM PCRs as for calculating the boot
+ * aggregate digest as stored in the measurement list.
*/
-static int __init ima_calc_boot_aggregate_tfm(char *digest,
- struct crypto_shash *tfm)
+static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
+ struct crypto_shash *tfm)
{
- struct tpm_digest d = { .alg_id = TPM_ALG_SHA1, .digest = {0} };
+ struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
int rc;
u32 i;
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
+ pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
+ d.alg_id);
+
rc = crypto_shash_init(shash);
if (rc != 0)
return rc;
@@ -675,24 +827,48 @@ static int __init ima_calc_boot_aggregate_tfm(char *digest,
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
ima_pcrread(i, &d);
/* now accumulate with current aggregate */
- rc = crypto_shash_update(shash, d.digest, TPM_DIGEST_SIZE);
+ rc = crypto_shash_update(shash, d.digest,
+ crypto_shash_digestsize(tfm));
}
if (!rc)
crypto_shash_final(shash, digest);
return rc;
}
-int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
+int ima_calc_boot_aggregate(struct ima_digest_data *hash)
{
struct crypto_shash *tfm;
- int rc;
+ u16 crypto_id, alg_id;
+ int rc, i, bank_idx = -1;
+
+ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
+ crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
+ if (crypto_id == hash->algo) {
+ bank_idx = i;
+ break;
+ }
+
+ if (crypto_id == HASH_ALGO_SHA256)
+ bank_idx = i;
+
+ if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
+ bank_idx = i;
+ }
+
+ if (bank_idx == -1) {
+ pr_err("No suitable TPM algorithm for boot aggregate\n");
+ return 0;
+ }
+
+ hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
tfm = ima_alloc_tfm(hash->algo);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
hash->length = crypto_shash_digestsize(tfm);
- rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
+ alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
+ rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
ima_free_tfm(tfm);
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 3efc8308ad26..e3fcad871861 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -150,7 +150,7 @@ int ima_measurements_show(struct seq_file *m, void *v)
ima_putc(m, &pcr, sizeof(e->pcr));
/* 2nd: template digest */
- ima_putc(m, e->digest, TPM_DIGEST_SIZE);
+ ima_putc(m, e->digests[ima_sha1_idx].digest, TPM_DIGEST_SIZE);
/* 3rd: template name size */
namelen = !ima_canonical_fmt ? strlen(template_name) :
@@ -233,7 +233,7 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
seq_printf(m, "%2d ", e->pcr);
/* 2nd: SHA1 template hash */
- ima_print_digest(m, e->digest, TPM_DIGEST_SIZE);
+ ima_print_digest(m, e->digests[ima_sha1_idx].digest, TPM_DIGEST_SIZE);
/* 3th: template name */
seq_printf(m, " %s", template_name);
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 567468188a61..4902fe7bd570 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -19,13 +19,13 @@
#include "ima.h"
/* name for boot aggregate entry */
-static const char boot_aggregate_name[] = "boot_aggregate";
+const char boot_aggregate_name[] = "boot_aggregate";
struct tpm_chip *ima_tpm_chip;
/* Add the boot aggregate to the IMA measurement list and extend
* the PCR register.
*
- * Calculate the boot aggregate, a SHA1 over tpm registers 0-7,
+ * Calculate the boot aggregate, a hash over tpm registers 0-7,
* assuming a TPM chip exists, and zeroes if the TPM chip does not
* exist. Add the boot aggregate measurement to the measurement
* list and extend the PCR register.
@@ -49,15 +49,27 @@ static int __init ima_add_boot_aggregate(void)
int violation = 0;
struct {
struct ima_digest_data hdr;
- char digest[TPM_DIGEST_SIZE];
+ char digest[TPM_MAX_DIGEST_SIZE];
} hash;
memset(iint, 0, sizeof(*iint));
memset(&hash, 0, sizeof(hash));
iint->ima_hash = &hash.hdr;
- iint->ima_hash->algo = HASH_ALGO_SHA1;
- iint->ima_hash->length = SHA1_DIGEST_SIZE;
-
+ iint->ima_hash->algo = ima_hash_algo;
+ iint->ima_hash->length = hash_digest_size[ima_hash_algo];
+
+ /*
+ * With TPM 2.0 hash agility, TPM chips could support multiple TPM
+ * PCR banks, allowing firmware to configure and enable different
+ * banks. The SHA1 bank is not necessarily enabled.
+ *
+ * Use the same hash algorithm for reading the TPM PCRs as for
+ * calculating the boot aggregate digest. Preference is given to
+ * the configured IMA default hash algorithm. Otherwise, use the
+ * TCG required banks - SHA256 for TPM 2.0, SHA1 for TPM 1.2.
+ * Ultimately select SHA1 also for TPM 2.0 if the SHA256 PCR bank
+ * is not found.
+ */
if (ima_tpm_chip) {
result = ima_calc_boot_aggregate(&hash.hdr);
if (result < 0) {
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 9d0abedeae77..800fb3bba418 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -394,6 +394,57 @@ int ima_file_mmap(struct file *file, unsigned long prot)
}
/**
+ * ima_file_mprotect - based on policy, limit mprotect change
+ * @prot: contains the protection that will be applied by the kernel.
+ *
+ * Files can be mmap'ed read/write and later changed to execute to circumvent
+ * IMA's mmap appraisal policy rules. Due to locking issues (mmap semaphore
+ * would be taken before i_mutex), files can not be measured or appraised at
+ * this point. Eliminate this integrity gap by denying the mprotect
+ * PROT_EXECUTE change, if an mmap appraise policy rule exists.
+ *
+ * On mprotect change success, return 0. On failure, return -EACESS.
+ */
+int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot)
+{
+ struct ima_template_desc *template;
+ struct file *file = vma->vm_file;
+ char filename[NAME_MAX];
+ char *pathbuf = NULL;
+ const char *pathname = NULL;
+ struct inode *inode;
+ int result = 0;
+ int action;
+ u32 secid;
+ int pcr;
+
+ /* Is mprotect making an mmap'ed file executable? */
+ if (!vma->vm_file || !(prot & PROT_EXEC) || (vma->vm_flags & VM_EXEC))
+ return 0;
+
+ security_task_getsecid(current, &secid);
+ inode = file_inode(vma->vm_file);
+ action = ima_get_action(inode, current_cred(), secid, MAY_EXEC,
+ MMAP_CHECK, &pcr, &template, 0);
+
+ /* Is the mmap'ed file in policy? */
+ if (!(action & (IMA_MEASURE | IMA_APPRAISE_SUBMASK)))
+ return 0;
+
+ if (action & IMA_APPRAISE_SUBMASK)
+ result = -EPERM;
+
+ file = vma->vm_file;
+ pathname = ima_d_path(&file->f_path, &pathbuf, filename);
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, pathname,
+ "collect_data", "failed-mprotect", result, 0);
+ if (pathbuf)
+ __putname(pathbuf);
+
+ return result;
+}
+
+/**
* ima_bprm_check - based on policy, collect/store measurement.
* @bprm: contains the linux_binprm structure
*
@@ -792,6 +843,9 @@ static int __init init_ima(void)
error = ima_init();
}
+ if (error)
+ return error;
+
error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier);
if (error)
pr_warn("Couldn't register LSM notifier, error %d\n", error);
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index c334e0dc6083..e493063a3c34 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -204,7 +204,7 @@ static struct ima_rule_entry *arch_policy_entry __ro_after_init;
static LIST_HEAD(ima_default_rules);
static LIST_HEAD(ima_policy_rules);
static LIST_HEAD(ima_temp_rules);
-static struct list_head *ima_rules;
+static struct list_head *ima_rules = &ima_default_rules;
/* Pre-allocated buffer used for matching keyrings. */
static char *ima_keyrings;
@@ -644,9 +644,12 @@ static void add_rules(struct ima_rule_entry *entries, int count,
list_add_tail(&entry->list, &ima_policy_rules);
}
if (entries[i].action == APPRAISE) {
- temp_ima_appraise |= ima_appraise_flag(entries[i].func);
- if (entries[i].func == POLICY_CHECK)
- temp_ima_appraise |= IMA_APPRAISE_POLICY;
+ if (entries != build_appraise_rules)
+ temp_ima_appraise |=
+ ima_appraise_flag(entries[i].func);
+ else
+ build_ima_appraise |=
+ ima_appraise_flag(entries[i].func);
}
}
}
@@ -765,7 +768,6 @@ void __init ima_init_policy(void)
ARRAY_SIZE(default_appraise_rules),
IMA_DEFAULT_POLICY);
- ima_rules = &ima_default_rules;
ima_update_policy_flag();
}
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 8753212ddb18..fb4ec270f620 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -55,7 +55,8 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
key = ima_hash_key(digest_value);
rcu_read_lock();
hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
- rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE);
+ rc = memcmp(qe->entry->digests[ima_hash_algo_idx].digest,
+ digest_value, hash_digest_size[ima_hash_algo]);
if ((rc == 0) && (qe->entry->pcr == pcr)) {
ret = qe;
break;
@@ -75,7 +76,7 @@ static int get_binary_runtime_size(struct ima_template_entry *entry)
int size = 0;
size += sizeof(u32); /* pcr */
- size += sizeof(entry->digest);
+ size += TPM_DIGEST_SIZE;
size += sizeof(int); /* template name size field */
size += strlen(entry->template_desc->name);
size += sizeof(entry->template_data_len);
@@ -107,7 +108,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry,
atomic_long_inc(&ima_htable.len);
if (update_htable) {
- key = ima_hash_key(entry->digest);
+ key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest);
hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
}
@@ -134,18 +135,14 @@ unsigned long ima_get_binary_runtime_size(void)
return binary_runtime_size + sizeof(struct ima_kexec_hdr);
};
-static int ima_pcr_extend(const u8 *hash, int pcr)
+static int ima_pcr_extend(struct tpm_digest *digests_arg, int pcr)
{
int result = 0;
- int i;
if (!ima_tpm_chip)
return result;
- for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++)
- memcpy(digests[i].digest, hash, TPM_DIGEST_SIZE);
-
- result = tpm_pcr_extend(ima_tpm_chip, pcr, digests);
+ result = tpm_pcr_extend(ima_tpm_chip, pcr, digests_arg);
if (result != 0)
pr_err("Error Communicating to TPM chip, result: %d\n", result);
return result;
@@ -163,7 +160,8 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
const char *op, struct inode *inode,
const unsigned char *filename)
{
- u8 digest[TPM_DIGEST_SIZE];
+ u8 *digest = entry->digests[ima_hash_algo_idx].digest;
+ struct tpm_digest *digests_arg = entry->digests;
const char *audit_cause = "hash_added";
char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
int audit_info = 1;
@@ -171,7 +169,6 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
mutex_lock(&ima_extend_list_mutex);
if (!violation) {
- memcpy(digest, entry->digest, sizeof(digest));
if (ima_lookup_digest_entry(digest, entry->pcr)) {
audit_cause = "hash_exists";
result = -EEXIST;
@@ -187,9 +184,9 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
if (violation) /* invalidate pcr */
- memset(digest, 0xff, sizeof(digest));
+ digests_arg = digests;
- tpmresult = ima_pcr_extend(digest, entry->pcr);
+ tpmresult = ima_pcr_extend(digests_arg, entry->pcr);
if (tpmresult != 0) {
snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
tpmresult);
@@ -215,6 +212,8 @@ int ima_restore_measurement_entry(struct ima_template_entry *entry)
int __init ima_init_digests(void)
{
+ u16 digest_size;
+ u16 crypto_id;
int i;
if (!ima_tpm_chip)
@@ -225,8 +224,17 @@ int __init ima_init_digests(void)
if (!digests)
return -ENOMEM;
- for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++)
+ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
digests[i].alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
+ digest_size = ima_tpm_chip->allocated_banks[i].digest_size;
+ crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
+
+ /* for unmapped TPM algorithms digest is still a padded SHA1 */
+ if (crypto_id == HASH_ALGO__LAST)
+ digest_size = SHA1_DIGEST_SIZE;
+
+ memset(digests[i].digest, 0xff, digest_size);
+ }
return 0;
}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index 062d9ad49afb..5a2def40a733 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -301,6 +301,7 @@ static int ima_restore_template_data(struct ima_template_desc *template_desc,
int template_data_size,
struct ima_template_entry **entry)
{
+ struct tpm_digest *digests;
int ret = 0;
int i;
@@ -309,11 +310,21 @@ static int ima_restore_template_data(struct ima_template_desc *template_desc,
if (!*entry)
return -ENOMEM;
+ digests = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
+ sizeof(*digests), GFP_NOFS);
+ if (!digests) {
+ kfree(*entry);
+ return -ENOMEM;
+ }
+
+ (*entry)->digests = digests;
+
ret = ima_parse_buf(template_data, template_data + template_data_size,
NULL, template_desc->num_fields,
(*entry)->template_data, NULL, NULL,
ENFORCE_FIELDS | ENFORCE_BUFEND, "template data");
if (ret < 0) {
+ kfree((*entry)->digests);
kfree(*entry);
return ret;
}
@@ -346,6 +357,7 @@ static int ima_restore_template_data(struct ima_template_desc *template_desc,
int ima_restore_measurement_list(loff_t size, void *buf)
{
char template_name[MAX_TEMPLATE_NAME_LEN];
+ unsigned char zero[TPM_DIGEST_SIZE] = { 0 };
struct ima_kexec_hdr *khdr = buf;
struct ima_field_data hdr[HDR__LAST] = {
@@ -445,8 +457,17 @@ int ima_restore_measurement_list(loff_t size, void *buf)
if (ret < 0)
break;
- memcpy(entry->digest, hdr[HDR_DIGEST].data,
- hdr[HDR_DIGEST].len);
+ if (memcmp(hdr[HDR_DIGEST].data, zero, sizeof(zero))) {
+ ret = ima_calc_field_array_hash(
+ &entry->template_data[0],
+ entry);
+ if (ret < 0) {
+ pr_err("cannot calculate template digest\n");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
le32_to_cpu(*(hdr[HDR_PCR].data));
ret = ima_restore_measurement_entry(entry);
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index 9cd1e50f3ccc..635c6ac05050 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -286,6 +286,24 @@ int ima_eventdigest_init(struct ima_event_data *event_data,
goto out;
}
+ if ((const char *)event_data->filename == boot_aggregate_name) {
+ if (ima_tpm_chip) {
+ hash.hdr.algo = HASH_ALGO_SHA1;
+ result = ima_calc_boot_aggregate(&hash.hdr);
+
+ /* algo can change depending on available PCR banks */
+ if (!result && hash.hdr.algo != HASH_ALGO_SHA1)
+ result = -EINVAL;
+
+ if (result < 0)
+ memset(&hash, 0, sizeof(hash));
+ }
+
+ cur_digest = hash.hdr.digest;
+ cur_digestsize = hash_digest_size[HASH_ALGO_SHA1];
+ goto out;
+ }
+
if (!event_data->file) /* missing info to re-calculate the digest */
return -EINVAL;
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index 47c041563d41..8153ea01d7bb 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -60,9 +60,7 @@ config BIG_KEYS
bool "Large payload keys"
depends on KEYS
depends on TMPFS
- select CRYPTO
- select CRYPTO_AES
- select CRYPTO_GCM
+ depends on CRYPTO_LIB_CHACHA20POLY1305 = y
help
This option provides support for holding large keys within the kernel
(for example Kerberos ticket caches). The data may be stored out to
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 82008f900930..dd708e8f13c0 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Large capacity key type
*
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ * Copyright (C) 2017-2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -12,20 +12,10 @@
#include <linux/file.h>
#include <linux/shmem_fs.h>
#include <linux/err.h>
-#include <linux/scatterlist.h>
#include <linux/random.h>
-#include <linux/vmalloc.h>
#include <keys/user-type.h>
#include <keys/big_key-type.h>
-#include <crypto/aead.h>
-#include <crypto/gcm.h>
-
-struct big_key_buf {
- unsigned int nr_pages;
- void *virt;
- struct scatterlist *sg;
- struct page *pages[];
-};
+#include <crypto/chacha20poly1305.h>
/*
* Layout of key payload words.
@@ -38,14 +28,6 @@ enum {
};
/*
- * Crypto operation with big_key data
- */
-enum big_key_op {
- BIG_KEY_ENC,
- BIG_KEY_DEC,
-};
-
-/*
* If the data is under this limit, there's no point creating a shm file to
* hold it as the permanently resident metadata for the shmem fs will be at
* least as large as the data.
@@ -53,16 +35,6 @@ enum big_key_op {
#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
/*
- * Key size for big_key data encryption
- */
-#define ENC_KEY_SIZE 32
-
-/*
- * Authentication tag length
- */
-#define ENC_AUTHTAG_SIZE 16
-
-/*
* big_key defined keys take an arbitrary string as the description and an
* arbitrary blob of data as the payload
*/
@@ -75,136 +47,20 @@ struct key_type key_type_big_key = {
.destroy = big_key_destroy,
.describe = big_key_describe,
.read = big_key_read,
- /* no ->update(); don't add it without changing big_key_crypt() nonce */
+ .update = big_key_update,
};
/*
- * Crypto names for big_key data authenticated encryption
- */
-static const char big_key_alg_name[] = "gcm(aes)";
-#define BIG_KEY_IV_SIZE GCM_AES_IV_SIZE
-
-/*
- * Crypto algorithms for big_key data authenticated encryption
- */
-static struct crypto_aead *big_key_aead;
-
-/*
- * Since changing the key affects the entire object, we need a mutex.
- */
-static DEFINE_MUTEX(big_key_aead_lock);
-
-/*
- * Encrypt/decrypt big_key data
- */
-static int big_key_crypt(enum big_key_op op, struct big_key_buf *buf, size_t datalen, u8 *key)
-{
- int ret;
- struct aead_request *aead_req;
- /* We always use a zero nonce. The reason we can get away with this is
- * because we're using a different randomly generated key for every
- * different encryption. Notably, too, key_type_big_key doesn't define
- * an .update function, so there's no chance we'll wind up reusing the
- * key to encrypt updated data. Simply put: one key, one encryption.
- */
- u8 zero_nonce[BIG_KEY_IV_SIZE];
-
- aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL);
- if (!aead_req)
- return -ENOMEM;
-
- memset(zero_nonce, 0, sizeof(zero_nonce));
- aead_request_set_crypt(aead_req, buf->sg, buf->sg, datalen, zero_nonce);
- aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
- aead_request_set_ad(aead_req, 0);
-
- mutex_lock(&big_key_aead_lock);
- if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) {
- ret = -EAGAIN;
- goto error;
- }
- if (op == BIG_KEY_ENC)
- ret = crypto_aead_encrypt(aead_req);
- else
- ret = crypto_aead_decrypt(aead_req);
-error:
- mutex_unlock(&big_key_aead_lock);
- aead_request_free(aead_req);
- return ret;
-}
-
-/*
- * Free up the buffer.
- */
-static void big_key_free_buffer(struct big_key_buf *buf)
-{
- unsigned int i;
-
- if (buf->virt) {
- memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
- vunmap(buf->virt);
- }
-
- for (i = 0; i < buf->nr_pages; i++)
- if (buf->pages[i])
- __free_page(buf->pages[i]);
-
- kfree(buf);
-}
-
-/*
- * Allocate a buffer consisting of a set of pages with a virtual mapping
- * applied over them.
- */
-static void *big_key_alloc_buffer(size_t len)
-{
- struct big_key_buf *buf;
- unsigned int npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned int i, l;
-
- buf = kzalloc(sizeof(struct big_key_buf) +
- sizeof(struct page) * npg +
- sizeof(struct scatterlist) * npg,
- GFP_KERNEL);
- if (!buf)
- return NULL;
-
- buf->nr_pages = npg;
- buf->sg = (void *)(buf->pages + npg);
- sg_init_table(buf->sg, npg);
-
- for (i = 0; i < buf->nr_pages; i++) {
- buf->pages[i] = alloc_page(GFP_KERNEL);
- if (!buf->pages[i])
- goto nomem;
-
- l = min_t(size_t, len, PAGE_SIZE);
- sg_set_page(&buf->sg[i], buf->pages[i], l, 0);
- len -= l;
- }
-
- buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL);
- if (!buf->virt)
- goto nomem;
-
- return buf;
-
-nomem:
- big_key_free_buffer(buf);
- return NULL;
-}
-
-/*
* Preparse a big key
*/
int big_key_preparse(struct key_preparsed_payload *prep)
{
- struct big_key_buf *buf;
struct path *path = (struct path *)&prep->payload.data[big_key_path];
struct file *file;
- u8 *enckey;
+ u8 *buf, *enckey;
ssize_t written;
- size_t datalen = prep->datalen, enclen = datalen + ENC_AUTHTAG_SIZE;
+ size_t datalen = prep->datalen;
+ size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
int ret;
if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
@@ -220,28 +76,28 @@ int big_key_preparse(struct key_preparsed_payload *prep)
* to be swapped out if needed.
*
* File content is stored encrypted with randomly generated key.
+ * Since the key is random for each file, we can set the nonce
+ * to zero, provided we never define a ->update() call.
*/
loff_t pos = 0;
- buf = big_key_alloc_buffer(enclen);
+ buf = kvmalloc(enclen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- memcpy(buf->virt, prep->data, datalen);
/* generate random key */
- enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
+ enckey = kmalloc(CHACHA20POLY1305_KEY_SIZE, GFP_KERNEL);
if (!enckey) {
ret = -ENOMEM;
goto error;
}
- ret = get_random_bytes_wait(enckey, ENC_KEY_SIZE);
+ ret = get_random_bytes_wait(enckey, CHACHA20POLY1305_KEY_SIZE);
if (unlikely(ret))
goto err_enckey;
- /* encrypt aligned data */
- ret = big_key_crypt(BIG_KEY_ENC, buf, datalen, enckey);
- if (ret)
- goto err_enckey;
+ /* encrypt data */
+ chacha20poly1305_encrypt(buf, prep->data, datalen, NULL, 0,
+ 0, enckey);
/* save aligned data to file */
file = shmem_kernel_file_setup("", enclen, 0);
@@ -250,11 +106,11 @@ int big_key_preparse(struct key_preparsed_payload *prep)
goto err_enckey;
}
- written = kernel_write(file, buf->virt, enclen, &pos);
+ written = kernel_write(file, buf, enclen, &pos);
if (written != enclen) {
ret = written;
if (written >= 0)
- ret = -ENOMEM;
+ ret = -EIO;
goto err_fput;
}
@@ -265,7 +121,8 @@ int big_key_preparse(struct key_preparsed_payload *prep)
*path = file->f_path;
path_get(path);
fput(file);
- big_key_free_buffer(buf);
+ memzero_explicit(buf, enclen);
+ kvfree(buf);
} else {
/* Just store the data in a buffer */
void *data = kmalloc(datalen, GFP_KERNEL);
@@ -283,7 +140,8 @@ err_fput:
err_enckey:
kzfree(enckey);
error:
- big_key_free_buffer(buf);
+ memzero_explicit(buf, enclen);
+ kvfree(buf);
return ret;
}
@@ -334,6 +192,23 @@ void big_key_destroy(struct key *key)
}
/*
+ * Update a big key
+ */
+int big_key_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ int ret;
+
+ ret = key_payload_reserve(key, prep->datalen);
+ if (ret < 0)
+ return ret;
+
+ if (key_is_positive(key))
+ big_key_destroy(key);
+
+ return generic_key_instantiate(key, prep);
+}
+
+/*
* describe the big_key key
*/
void big_key_describe(const struct key *key, struct seq_file *m)
@@ -361,14 +236,13 @@ long big_key_read(const struct key *key, char *buffer, size_t buflen)
return datalen;
if (datalen > BIG_KEY_FILE_THRESHOLD) {
- struct big_key_buf *buf;
struct path *path = (struct path *)&key->payload.data[big_key_path];
struct file *file;
- u8 *enckey = (u8 *)key->payload.data[big_key_data];
- size_t enclen = datalen + ENC_AUTHTAG_SIZE;
+ u8 *buf, *enckey = (u8 *)key->payload.data[big_key_data];
+ size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
loff_t pos = 0;
- buf = big_key_alloc_buffer(enclen);
+ buf = kvmalloc(enclen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -379,25 +253,28 @@ long big_key_read(const struct key *key, char *buffer, size_t buflen)
}
/* read file to kernel and decrypt */
- ret = kernel_read(file, buf->virt, enclen, &pos);
- if (ret >= 0 && ret != enclen) {
- ret = -EIO;
+ ret = kernel_read(file, buf, enclen, &pos);
+ if (ret != enclen) {
+ if (ret >= 0)
+ ret = -EIO;
goto err_fput;
}
- ret = big_key_crypt(BIG_KEY_DEC, buf, enclen, enckey);
- if (ret)
+ ret = chacha20poly1305_decrypt(buf, buf, enclen, NULL, 0, 0,
+ enckey) ? 0 : -EBADMSG;
+ if (unlikely(ret))
goto err_fput;
ret = datalen;
/* copy out decrypted data */
- memcpy(buffer, buf->virt, datalen);
+ memcpy(buffer, buf, datalen);
err_fput:
fput(file);
error:
- big_key_free_buffer(buf);
+ memzero_explicit(buf, enclen);
+ kvfree(buf);
} else {
ret = datalen;
memcpy(buffer, key->payload.data[big_key_data], datalen);
@@ -411,39 +288,7 @@ error:
*/
static int __init big_key_init(void)
{
- int ret;
-
- /* init block cipher */
- big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(big_key_aead)) {
- ret = PTR_ERR(big_key_aead);
- pr_err("Can't alloc crypto: %d\n", ret);
- return ret;
- }
-
- if (unlikely(crypto_aead_ivsize(big_key_aead) != BIG_KEY_IV_SIZE)) {
- WARN(1, "big key algorithm changed?");
- ret = -EINVAL;
- goto free_aead;
- }
-
- ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE);
- if (ret < 0) {
- pr_err("Can't set crypto auth tag len: %d\n", ret);
- goto free_aead;
- }
-
- ret = register_key_type(&key_type_big_key);
- if (ret < 0) {
- pr_err("Can't register type: %d\n", ret);
- goto free_aead;
- }
-
- return 0;
-
-free_aead:
- crypto_free_aead(big_key_aead);
- return ret;
+ return register_key_type(&key_type_big_key);
}
late_initcall(big_key_init);
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 6d0ca48ae9a5..153d35c20d3d 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -350,15 +350,4 @@ static inline void key_check(const struct key *key)
#define key_check(key) do {} while(0)
#endif
-
-/*
- * Helper function to clear and free a kvmalloc'ed memory object.
- */
-static inline void __kvzfree(const void *addr, size_t len)
-{
- if (addr) {
- memset((void *)addr, 0, len);
- kvfree(addr);
- }
-}
#endif /* _INTERNAL_H */
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 5e01192e222a..e5ef20a0d05e 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -142,10 +142,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
key_ref_put(keyring_ref);
error3:
- if (payload) {
- memzero_explicit(payload, plen);
- kvfree(payload);
- }
+ kvfree_sensitive(payload, plen);
error2:
kfree(description);
error:
@@ -360,7 +357,7 @@ long keyctl_update_key(key_serial_t id,
key_ref_put(key_ref);
error2:
- __kvzfree(payload, plen);
+ kvfree_sensitive(payload, plen);
error:
return ret;
}
@@ -878,7 +875,7 @@ can_read_key:
*
* Allocating a temporary buffer to hold the keys before
* transferring them to user buffer to avoid potential
- * deadlock involving page fault and mmap_sem.
+ * deadlock involving page fault and mmap_lock.
*
* key_data_len = (buflen <= PAGE_SIZE)
* ? buflen : actual length of key data
@@ -914,7 +911,7 @@ can_read_key:
*/
if (ret > key_data_len) {
if (unlikely(key_data))
- __kvzfree(key_data, key_data_len);
+ kvfree_sensitive(key_data, key_data_len);
key_data_len = ret;
continue; /* Allocate buffer */
}
@@ -923,7 +920,7 @@ can_read_key:
ret = -EFAULT;
break;
}
- __kvzfree(key_data, key_data_len);
+ kvfree_sensitive(key_data, key_data_len);
key_put_out:
key_put(key);
@@ -1225,10 +1222,7 @@ long keyctl_instantiate_key_common(key_serial_t id,
keyctl_change_reqkey_auth(NULL);
error2:
- if (payload) {
- memzero_explicit(payload, plen);
- kvfree(payload);
- }
+ kvfree_sensitive(payload, plen);
error:
return ret;
}
diff --git a/security/security.c b/security/security.c
index 51de970fbb1e..e0290b7e6a08 100644
--- a/security/security.c
+++ b/security/security.c
@@ -823,9 +823,14 @@ int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
return __vm_enough_memory(mm, pages, cap_sys_admin);
}
-int security_bprm_set_creds(struct linux_binprm *bprm)
+int security_bprm_creds_for_exec(struct linux_binprm *bprm)
{
- return call_int_hook(bprm_set_creds, 0, bprm);
+ return call_int_hook(bprm_creds_for_exec, 0, bprm);
+}
+
+int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file)
+{
+ return call_int_hook(bprm_creds_from_file, 0, bprm, file);
}
int security_bprm_check(struct linux_binprm *bprm)
@@ -1459,6 +1464,7 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return call_int_hook(file_ioctl, 0, file, cmd, arg);
}
+EXPORT_SYMBOL_GPL(security_file_ioctl);
static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
{
@@ -1512,7 +1518,12 @@ int security_mmap_addr(unsigned long addr)
int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
unsigned long prot)
{
- return call_int_hook(file_mprotect, 0, vma, reqprot, prot);
+ int ret;
+
+ ret = call_int_hook(file_mprotect, 0, vma, reqprot, prot);
+ if (ret)
+ return ret;
+ return ima_file_mprotect(vma, prot);
}
int security_file_lock(struct file *file, unsigned int cmd)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4c037c2545c1..7e954b555be6 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2286,7 +2286,7 @@ static int check_nnp_nosuid(const struct linux_binprm *bprm,
return -EACCES;
}
-static int selinux_bprm_set_creds(struct linux_binprm *bprm)
+static int selinux_bprm_creds_for_exec(struct linux_binprm *bprm)
{
const struct task_security_struct *old_tsec;
struct task_security_struct *new_tsec;
@@ -2297,8 +2297,6 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
/* SELinux context only depends on initial program or script and not
* the script interpreter */
- if (bprm->called_set_creds)
- return 0;
old_tsec = selinux_cred(current_cred());
new_tsec = selinux_cred(bprm->cred);
@@ -6405,7 +6403,7 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
/* Permission checking based on the specified context is
performed during the actual operation (execve,
open/mkdir/...), when we know the full context of the
- operation. See selinux_bprm_set_creds for the execve
+ operation. See selinux_bprm_creds_for_exec for the execve
checks and may_create for the file creation checks. The
operation will then fail if the context is not permitted. */
tsec = selinux_cred(new);
@@ -6934,7 +6932,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(netlink_send, selinux_netlink_send),
- LSM_HOOK_INIT(bprm_set_creds, selinux_bprm_set_creds),
+ LSM_HOOK_INIT(bprm_creds_for_exec, selinux_bprm_creds_for_exec),
LSM_HOOK_INIT(bprm_committing_creds, selinux_bprm_committing_creds),
LSM_HOOK_INIT(bprm_committed_creds, selinux_bprm_committed_creds),
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 62529f382942..e9e817d09785 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -109,9 +109,7 @@ struct inode_smack {
struct smack_known *smk_inode; /* label of the fso */
struct smack_known *smk_task; /* label of the task */
struct smack_known *smk_mmap; /* label of the mmap domain */
- struct mutex smk_lock; /* initialization lock */
int smk_flags; /* smack inode flags */
- struct rcu_head smk_rcu; /* for freeing inode_smack */
};
struct task_smack {
@@ -148,7 +146,6 @@ struct smk_net4addr {
struct smack_known *smk_label; /* label */
};
-#if IS_ENABLED(CONFIG_IPV6)
/*
* An entry in the table identifying IPv6 hosts.
*/
@@ -159,9 +156,7 @@ struct smk_net6addr {
int smk_masks; /* mask size */
struct smack_known *smk_label; /* label */
};
-#endif /* CONFIG_IPV6 */
-#ifdef SMACK_IPV6_PORT_LABELING
/*
* An entry in the table identifying ports.
*/
@@ -174,7 +169,6 @@ struct smk_port_label {
short smk_sock_type; /* Socket type */
short smk_can_reuse;
};
-#endif /* SMACK_IPV6_PORT_LABELING */
struct smack_known_list_elem {
struct list_head list;
@@ -335,9 +329,7 @@ extern struct smack_known smack_known_web;
extern struct mutex smack_known_lock;
extern struct list_head smack_known_list;
extern struct list_head smk_net4addr_list;
-#if IS_ENABLED(CONFIG_IPV6)
extern struct list_head smk_net6addr_list;
-#endif /* CONFIG_IPV6 */
extern struct mutex smack_onlycap_lock;
extern struct list_head smack_onlycap_list;
@@ -505,10 +497,6 @@ static inline void smk_ad_setfield_u_fs_path_dentry(struct smk_audit_info *a,
struct dentry *d)
{
}
-static inline void smk_ad_setfield_u_fs_path_mnt(struct smk_audit_info *a,
- struct vfsmount *m)
-{
-}
static inline void smk_ad_setfield_u_fs_inode(struct smk_audit_info *a,
struct inode *i)
{
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 8c61d175e195..cd44b79bf1f5 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -50,11 +50,8 @@
#define SMK_RECEIVING 1
#define SMK_SENDING 2
-#ifdef SMACK_IPV6_PORT_LABELING
-DEFINE_MUTEX(smack_ipv6_lock);
+static DEFINE_MUTEX(smack_ipv6_lock);
static LIST_HEAD(smk_ipv6_port_list);
-#endif
-static struct kmem_cache *smack_inode_cache;
struct kmem_cache *smack_rule_cache;
int smack_enabled;
@@ -316,7 +313,6 @@ static void init_inode_smack(struct inode *inode, struct smack_known *skp)
isp->smk_inode = skp;
isp->smk_flags = 0;
- mutex_init(&isp->smk_lock);
}
/**
@@ -891,12 +887,12 @@ static int smack_sb_statfs(struct dentry *dentry)
*/
/**
- * smack_bprm_set_creds - set creds for exec
+ * smack_bprm_creds_for_exec - Update bprm->cred if needed for exec
* @bprm: the exec information
*
* Returns 0 if it gets a blob, -EPERM if exec forbidden and -ENOMEM otherwise
*/
-static int smack_bprm_set_creds(struct linux_binprm *bprm)
+static int smack_bprm_creds_for_exec(struct linux_binprm *bprm)
{
struct inode *inode = file_inode(bprm->file);
struct task_smack *bsp = smack_cred(bprm->cred);
@@ -904,9 +900,6 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
struct superblock_smack *sbsp;
int rc;
- if (bprm->called_set_creds)
- return 0;
-
isp = smack_inode(inode);
if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task)
return 0;
@@ -2320,7 +2313,6 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip)
return NULL;
}
-#if IS_ENABLED(CONFIG_IPV6)
/*
* smk_ipv6_localhost - Check for local ipv6 host address
* @sip: the address
@@ -2388,7 +2380,6 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
return NULL;
}
-#endif /* CONFIG_IPV6 */
/**
* smack_netlabel - Set the secattr on a socket
@@ -2477,7 +2468,6 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
return smack_netlabel(sk, sk_lbl);
}
-#if IS_ENABLED(CONFIG_IPV6)
/**
* smk_ipv6_check - check Smack access
* @subject: subject Smack label
@@ -2510,7 +2500,6 @@ static int smk_ipv6_check(struct smack_known *subject,
rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc);
return rc;
}
-#endif /* CONFIG_IPV6 */
#ifdef SMACK_IPV6_PORT_LABELING
/**
@@ -2599,6 +2588,7 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
mutex_unlock(&smack_ipv6_lock);
return;
}
+#endif
/**
* smk_ipv6_port_check - check Smack port access
@@ -2661,7 +2651,6 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
return smk_ipv6_check(skp, object, address, act);
}
-#endif /* SMACK_IPV6_PORT_LABELING */
/**
* smack_inode_setsecurity - set smack xattrs
@@ -2836,24 +2825,21 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
return 0;
if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) {
struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
-#ifdef SMACK_IPV6_SECMARK_LABELING
- struct smack_known *rsp;
-#endif
+ struct smack_known *rsp = NULL;
if (addrlen < SIN6_LEN_RFC2133)
return 0;
-#ifdef SMACK_IPV6_SECMARK_LABELING
- rsp = smack_ipv6host_label(sip);
+ if (__is_defined(SMACK_IPV6_SECMARK_LABELING))
+ rsp = smack_ipv6host_label(sip);
if (rsp != NULL) {
struct socket_smack *ssp = sock->sk->sk_security;
rc = smk_ipv6_check(ssp->smk_out, rsp, sip,
SMK_CONNECTING);
}
-#endif
-#ifdef SMACK_IPV6_PORT_LABELING
- rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
-#endif
+ if (__is_defined(SMACK_IPV6_PORT_LABELING))
+ rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
+
return rc;
}
if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in))
@@ -3273,13 +3259,12 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
isp = smack_inode(inode);
- mutex_lock(&isp->smk_lock);
/*
* If the inode is already instantiated
* take the quick way out
*/
if (isp->smk_flags & SMK_INODE_INSTANT)
- goto unlockandout;
+ return;
sbp = inode->i_sb;
sbsp = sbp->s_security;
@@ -3330,7 +3315,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
break;
}
isp->smk_flags |= SMK_INODE_INSTANT;
- goto unlockandout;
+ return;
}
/*
@@ -3465,8 +3450,6 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
isp->smk_flags |= (SMK_INODE_INSTANT | transflag);
-unlockandout:
- mutex_unlock(&isp->smk_lock);
return;
}
@@ -4598,7 +4581,7 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(sb_statfs, smack_sb_statfs),
LSM_HOOK_INIT(sb_set_mnt_opts, smack_set_mnt_opts),
- LSM_HOOK_INIT(bprm_set_creds, smack_bprm_set_creds),
+ LSM_HOOK_INIT(bprm_creds_for_exec, smack_bprm_creds_for_exec),
LSM_HOOK_INIT(inode_alloc_security, smack_inode_alloc_security),
LSM_HOOK_INIT(inode_init_security, smack_inode_init_security),
@@ -4760,15 +4743,9 @@ static __init int smack_init(void)
struct cred *cred = (struct cred *) current->cred;
struct task_smack *tsp;
- smack_inode_cache = KMEM_CACHE(inode_smack, 0);
- if (!smack_inode_cache)
- return -ENOMEM;
-
smack_rule_cache = KMEM_CACHE(smack_rule, 0);
- if (!smack_rule_cache) {
- kmem_cache_destroy(smack_inode_cache);
+ if (!smack_rule_cache)
return -ENOMEM;
- }
/*
* Set the security state for the initial task.
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index e3e05c04dbd1..c21b656b3263 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -878,11 +878,21 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
else
rule += strlen(skp->smk_known) + 1;
+ if (rule > data + count) {
+ rc = -EOVERFLOW;
+ goto out;
+ }
+
ret = sscanf(rule, "%d", &maplevel);
if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
goto out;
rule += SMK_DIGITLEN;
+ if (rule > data + count) {
+ rc = -EOVERFLOW;
+ goto out;
+ }
+
ret = sscanf(rule, "%d", &catlen);
if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM)
goto out;
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index bf38fc1b59b2..df4798980416 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -7,6 +7,7 @@
#include "common.h"
#include <linux/magic.h>
+#include <linux/proc_fs.h>
/**
* tomoyo_encode2 - Encode binary string to ascii string.
@@ -161,9 +162,10 @@ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
if (sb->s_magic == PROC_SUPER_MAGIC && *pos == '/') {
char *ep;
const pid_t pid = (pid_t) simple_strtoul(pos + 1, &ep, 10);
+ struct pid_namespace *proc_pidns = proc_pid_ns(sb);
if (*ep == '/' && pid && pid ==
- task_tgid_nr_ns(current, sb->s_fs_info)) {
+ task_tgid_nr_ns(current, proc_pidns)) {
pos = ep - 5;
if (pos < buffer)
goto out;
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 716c92ec941a..f9adddc42ac8 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -63,21 +63,15 @@ static void tomoyo_bprm_committed_creds(struct linux_binprm *bprm)
#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
/**
- * tomoyo_bprm_set_creds - Target for security_bprm_set_creds().
+ * tomoyo_bprm_for_exec - Target for security_bprm_creds_for_exec().
*
* @bprm: Pointer to "struct linux_binprm".
*
* Returns 0.
*/
-static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
+static int tomoyo_bprm_creds_for_exec(struct linux_binprm *bprm)
{
/*
- * Do only if this function is called for the first time of an execve
- * operation.
- */
- if (bprm->called_set_creds)
- return 0;
- /*
* Load policy if /sbin/tomoyo-init exists and /sbin/init is requested
* for the first time.
*/
@@ -539,7 +533,7 @@ static struct security_hook_list tomoyo_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(task_alloc, tomoyo_task_alloc),
LSM_HOOK_INIT(task_free, tomoyo_task_free),
#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
- LSM_HOOK_INIT(bprm_set_creds, tomoyo_bprm_set_creds),
+ LSM_HOOK_INIT(bprm_creds_for_exec, tomoyo_bprm_creds_for_exec),
#endif
LSM_HOOK_INIT(bprm_check_security, tomoyo_bprm_check_security),
LSM_HOOK_INIT(file_fcntl, tomoyo_file_fcntl),
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 930def8201f4..68630244b00f 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -2876,7 +2876,7 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area)
if (runtime->oss.params) {
/* use mutex_trylock() for params_lock for avoiding a deadlock
- * between mmap_sem and params_lock taken by
+ * between mmap_lock and params_lock taken by
* copy_from/to_user() in snd_pcm_oss_write/read()
*/
err = snd_pcm_oss_change_params(substream, true);
diff --git a/sound/core/oss/pcm_plugin.h b/sound/core/oss/pcm_plugin.h
index 8d2f7a4e3ab6..46e273bd4a78 100644
--- a/sound/core/oss/pcm_plugin.h
+++ b/sound/core/oss/pcm_plugin.h
@@ -64,7 +64,7 @@ struct snd_pcm_plugin {
char *buf;
snd_pcm_uframes_t buf_frames;
struct snd_pcm_plugin_channel *buf_channels;
- char extra_data[0];
+ char extra_data[];
};
int snd_pcm_plugin_build(struct snd_pcm_substream *handle,
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 47838f57a647..9630d2523948 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -138,6 +138,16 @@ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
+static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_group *group = &substream->self_group;
+
+ if (substream->pcm->nonatomic)
+ mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING);
+ else
+ spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
+}
+
/**
* snd_pcm_stream_unlock_irq - Unlock the PCM stream
* @substream: PCM substream
@@ -2166,6 +2176,12 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
}
pcm_file = f.file->private_data;
substream1 = pcm_file->substream;
+
+ if (substream == substream1) {
+ res = -EINVAL;
+ goto _badf;
+ }
+
group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group) {
res = -ENOMEM;
@@ -2194,7 +2210,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
snd_pcm_stream_unlock_irq(substream);
snd_pcm_group_lock_irq(target_group, nonatomic);
- snd_pcm_stream_lock(substream1);
+ snd_pcm_stream_lock_nested(substream1);
snd_pcm_group_assign(substream1, target_group);
refcount_inc(&target_group->refs);
snd_pcm_stream_unlock(substream1);
@@ -2210,7 +2226,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
static void relink_to_local(struct snd_pcm_substream *substream)
{
- snd_pcm_stream_lock(substream);
+ snd_pcm_stream_lock_nested(substream);
snd_pcm_group_assign(substream, &substream->self_group);
snd_pcm_stream_unlock(substream);
}
diff --git a/sound/core/seq/oss/seq_oss_timer.h b/sound/core/seq/oss/seq_oss_timer.h
index 2d86125b5d0f..dee190b4ec6b 100644
--- a/sound/core/seq/oss/seq_oss_timer.h
+++ b/sound/core/seq/oss/seq_oss_timer.h
@@ -44,14 +44,4 @@ snd_seq_oss_timer_cur_tick(struct seq_oss_timer *timer)
return timer->cur_tick;
}
-
-/*
- * is realtime event?
- */
-static inline int
-snd_seq_oss_timer_is_realtime(struct seq_oss_timer *timer)
-{
- return timer->realtime;
-}
-
#endif
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
index feefdfc3bcca..c42217e2dd19 100644
--- a/sound/core/sgbuf.c
+++ b/sound/core/sgbuf.c
@@ -9,7 +9,6 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
-#include <asm/pgtable.h>
#include <sound/memalloc.h>
diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
index 577c8e03ec4d..7141f73cddd3 100644
--- a/sound/drivers/Kconfig
+++ b/sound/drivers/Kconfig
@@ -186,18 +186,6 @@ config SND_PORTMAN2X4
To compile this driver as a module, choose M here: the module
will be called snd-portman2x4.
-config SND_ML403_AC97CR
- tristate "Xilinx ML403 AC97 Controller Reference"
- depends on XILINX_VIRTEX
- select SND_AC97_CODEC
- help
- Say Y here to include support for the
- opb_ac97_controller_ref_v1_00_a ip core found in Xilinx's ML403
- reference design.
-
- To compile this driver as a module, choose M here: the module
- will be called snd-ml403_ac97cr.
-
config SND_AC97_POWER_SAVE
bool "AC97 Power-Saving Mode"
depends on SND_AC97_CODEC
diff --git a/sound/drivers/Makefile b/sound/drivers/Makefile
index 615558a281c8..c0fe4eccdaef 100644
--- a/sound/drivers/Makefile
+++ b/sound/drivers/Makefile
@@ -11,7 +11,6 @@ snd-mts64-objs := mts64.o
snd-portman2x4-objs := portman2x4.o
snd-serial-u16550-objs := serial-u16550.o
snd-virmidi-objs := virmidi.o
-snd-ml403-ac97cr-objs := ml403-ac97cr.o pcm-indirect2.o
# Toplevel Module Dependency
obj-$(CONFIG_SND_DUMMY) += snd-dummy.o
@@ -21,6 +20,5 @@ obj-$(CONFIG_SND_SERIAL_U16550) += snd-serial-u16550.o
obj-$(CONFIG_SND_MTPAV) += snd-mtpav.o
obj-$(CONFIG_SND_MTS64) += snd-mts64.o
obj-$(CONFIG_SND_PORTMAN2X4) += snd-portman2x4.o
-obj-$(CONFIG_SND_ML403_AC97CR) += snd-ml403-ac97cr.o
obj-$(CONFIG_SND) += opl3/ opl4/ mpu401/ vx/ pcsp/
diff --git a/sound/drivers/ml403-ac97cr.c b/sound/drivers/ml403-ac97cr.c
deleted file mode 100644
index 0710707da8c1..000000000000
--- a/sound/drivers/ml403-ac97cr.c
+++ /dev/null
@@ -1,1298 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * ALSA driver for Xilinx ML403 AC97 Controller Reference
- * IP: opb_ac97_controller_ref_v1_00_a (EDK 8.1i)
- * IP: opb_ac97_controller_ref_v1_00_a (EDK 9.1i)
- *
- * Copyright (c) by 2007 Joachim Foerster <JOFT@gmx.de>
- */
-
-/* Some notes / status of this driver:
- *
- * - Don't wonder about some strange implementations of things - especially the
- * (heavy) shadowing of codec registers, with which I tried to reduce read
- * accesses to a minimum, because after a variable amount of accesses, the AC97
- * controller doesn't raise the register access finished bit anymore ...
- *
- * - Playback support seems to be pretty stable - no issues here.
- * - Capture support "works" now, too. Overruns don't happen any longer so often.
- * But there might still be some ...
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <linux/platform_device.h>
-
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-
-/* HZ */
-#include <linux/param.h>
-/* jiffies, time_*() */
-#include <linux/jiffies.h>
-/* schedule_timeout*() */
-#include <linux/sched.h>
-/* spin_lock*() */
-#include <linux/spinlock.h>
-/* struct mutex, mutex_init(), mutex_*lock() */
-#include <linux/mutex.h>
-
-/* snd_printk(), snd_printd() */
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
-#include <sound/ac97_codec.h>
-
-#include "pcm-indirect2.h"
-
-
-#define SND_ML403_AC97CR_DRIVER "ml403-ac97cr"
-
-MODULE_AUTHOR("Joachim Foerster <JOFT@gmx.de>");
-MODULE_DESCRIPTION("Xilinx ML403 AC97 Controller Reference");
-MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Xilinx,ML403 AC97 Controller Reference}}");
-
-static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
-static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE;
-
-module_param_array(index, int, NULL, 0444);
-MODULE_PARM_DESC(index, "Index value for ML403 AC97 Controller Reference.");
-module_param_array(id, charp, NULL, 0444);
-MODULE_PARM_DESC(id, "ID string for ML403 AC97 Controller Reference.");
-module_param_array(enable, bool, NULL, 0444);
-MODULE_PARM_DESC(enable, "Enable this ML403 AC97 Controller Reference.");
-
-/* Special feature options */
-/*#define CODEC_WRITE_CHECK_RAF*/ /* don't return after a write to a codec
- * register, while RAF bit is not set
- */
-/* Debug options for code which may be removed completely in a final version */
-#ifdef CONFIG_SND_DEBUG
-/*#define CODEC_STAT*/ /* turn on some minimal "statistics"
- * about codec register usage
- */
-#define SND_PCM_INDIRECT2_STAT /* turn on some "statistics" about the
- * process of copying bytes from the
- * intermediate buffer to the hardware
- * fifo and the other way round
- */
-#endif
-
-/* Definition of a "level/facility dependent" printk(); may be removed
- * completely in a final version
- */
-#undef PDEBUG
-#ifdef CONFIG_SND_DEBUG
-/* "facilities" for PDEBUG */
-#define UNKNOWN (1<<0)
-#define CODEC_SUCCESS (1<<1)
-#define CODEC_FAKE (1<<2)
-#define INIT_INFO (1<<3)
-#define INIT_FAILURE (1<<4)
-#define WORK_INFO (1<<5)
-#define WORK_FAILURE (1<<6)
-
-#define PDEBUG_FACILITIES (UNKNOWN | INIT_FAILURE | WORK_FAILURE)
-
-#define PDEBUG(fac, fmt, args...) do { \
- if (fac & PDEBUG_FACILITIES) \
- snd_printd(KERN_DEBUG SND_ML403_AC97CR_DRIVER ": " \
- fmt, ##args); \
- } while (0)
-#else
-#define PDEBUG(fac, fmt, args...) /* nothing */
-#endif
-
-
-
-/* Defines for "waits"/timeouts (portions of HZ=250 on arch/ppc by default) */
-#define CODEC_TIMEOUT_ON_INIT 5 /* timeout for checking for codec
- * readiness (after insmod)
- */
-#ifndef CODEC_WRITE_CHECK_RAF
-#define CODEC_WAIT_AFTER_WRITE 100 /* general, static wait after a write
- * access to a codec register, may be
- * 0 to completely remove wait
- */
-#else
-#define CODEC_TIMEOUT_AFTER_WRITE 5 /* timeout after a write access to a
- * codec register, if RAF bit is used
- */
-#endif
-#define CODEC_TIMEOUT_AFTER_READ 5 /* timeout after a read access to a
- * codec register (checking RAF bit)
- */
-
-/* Infrastructure for codec register shadowing */
-#define LM4550_REG_OK (1<<0) /* register exists */
-#define LM4550_REG_DONEREAD (1<<1) /* read register once, value should be
- * the same currently in the register
- */
-#define LM4550_REG_NOSAVE (1<<2) /* values written to this register will
- * not be saved in the register
- */
-#define LM4550_REG_NOSHADOW (1<<3) /* don't do register shadowing, use plain
- * hardware access
- */
-#define LM4550_REG_READONLY (1<<4) /* register is read only */
-#define LM4550_REG_FAKEPROBE (1<<5) /* fake write _and_ read actions during
- * probe() correctly
- */
-#define LM4550_REG_FAKEREAD (1<<6) /* fake read access, always return
- * default value
- */
-#define LM4550_REG_ALLFAKE (LM4550_REG_FAKEREAD | LM4550_REG_FAKEPROBE)
-
-struct lm4550_reg {
- u16 value;
- u16 flag;
- u16 wmask;
- u16 def;
-};
-
-struct lm4550_reg lm4550_regfile[64] = {
- [AC97_RESET / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_NOSAVE \
- | LM4550_REG_FAKEREAD,
- .def = 0x0D50},
- [AC97_MASTER / 2] = {.flag = LM4550_REG_OK
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x9F1F,
- .def = 0x8000},
- [AC97_HEADPHONE / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x9F1F,
- .def = 0x8000},
- [AC97_MASTER_MONO / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x801F,
- .def = 0x8000},
- [AC97_PC_BEEP / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x801E,
- .def = 0x0},
- [AC97_PHONE / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x801F,
- .def = 0x8008},
- [AC97_MIC / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x805F,
- .def = 0x8008},
- [AC97_LINE / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x9F1F,
- .def = 0x8808},
- [AC97_CD / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x9F1F,
- .def = 0x8808},
- [AC97_VIDEO / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x9F1F,
- .def = 0x8808},
- [AC97_AUX / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x9F1F,
- .def = 0x8808},
- [AC97_PCM / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x9F1F,
- .def = 0x8008},
- [AC97_REC_SEL / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x707,
- .def = 0x0},
- [AC97_REC_GAIN / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .wmask = 0x8F0F,
- .def = 0x8000},
- [AC97_GENERAL_PURPOSE / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .def = 0x0,
- .wmask = 0xA380},
- [AC97_3D_CONTROL / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEREAD \
- | LM4550_REG_READONLY,
- .def = 0x0101},
- [AC97_POWERDOWN / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_NOSHADOW \
- | LM4550_REG_NOSAVE,
- .wmask = 0xFF00},
- /* may not write ones to
- * REF/ANL/DAC/ADC bits
- * FIXME: Is this ok?
- */
- [AC97_EXTENDED_ID / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEREAD \
- | LM4550_REG_READONLY,
- .def = 0x0201}, /* primary codec */
- [AC97_EXTENDED_STATUS / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_NOSHADOW \
- | LM4550_REG_NOSAVE,
- .wmask = 0x1},
- [AC97_PCM_FRONT_DAC_RATE / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .def = 0xBB80,
- .wmask = 0xFFFF},
- [AC97_PCM_LR_ADC_RATE / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_FAKEPROBE,
- .def = 0xBB80,
- .wmask = 0xFFFF},
- [AC97_VENDOR_ID1 / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_READONLY \
- | LM4550_REG_FAKEREAD,
- .def = 0x4E53},
- [AC97_VENDOR_ID2 / 2] = {.flag = LM4550_REG_OK \
- | LM4550_REG_READONLY \
- | LM4550_REG_FAKEREAD,
- .def = 0x4350}
-};
-
-#define LM4550_RF_OK(reg) (lm4550_regfile[reg / 2].flag & LM4550_REG_OK)
-
-static void lm4550_regfile_init(void)
-{
- int i;
- for (i = 0; i < 64; i++)
- if (lm4550_regfile[i].flag & LM4550_REG_FAKEPROBE)
- lm4550_regfile[i].value = lm4550_regfile[i].def;
-}
-
-static void lm4550_regfile_write_values_after_init(struct snd_ac97 *ac97)
-{
- int i;
- for (i = 0; i < 64; i++)
- if ((lm4550_regfile[i].flag & LM4550_REG_FAKEPROBE) &&
- (lm4550_regfile[i].value != lm4550_regfile[i].def)) {
- PDEBUG(CODEC_FAKE, "lm4550_regfile_write_values_after_"
- "init(): reg=0x%x value=0x%x / %d is different "
- "from def=0x%x / %d\n",
- i, lm4550_regfile[i].value,
- lm4550_regfile[i].value, lm4550_regfile[i].def,
- lm4550_regfile[i].def);
- snd_ac97_write(ac97, i * 2, lm4550_regfile[i].value);
- lm4550_regfile[i].flag |= LM4550_REG_DONEREAD;
- }
-}
-
-
-/* direct registers */
-#define CR_REG(ml403_ac97cr, x) ((ml403_ac97cr)->port + CR_REG_##x)
-
-#define CR_REG_PLAYFIFO 0x00
-#define CR_PLAYDATA(a) ((a) & 0xFFFF)
-
-#define CR_REG_RECFIFO 0x04
-#define CR_RECDATA(a) ((a) & 0xFFFF)
-
-#define CR_REG_STATUS 0x08
-#define CR_RECOVER (1<<7)
-#define CR_PLAYUNDER (1<<6)
-#define CR_CODECREADY (1<<5)
-#define CR_RAF (1<<4)
-#define CR_RECEMPTY (1<<3)
-#define CR_RECFULL (1<<2)
-#define CR_PLAYHALF (1<<1)
-#define CR_PLAYFULL (1<<0)
-
-#define CR_REG_RESETFIFO 0x0C
-#define CR_RECRESET (1<<1)
-#define CR_PLAYRESET (1<<0)
-
-#define CR_REG_CODEC_ADDR 0x10
-/* UG082 says:
- * #define CR_CODEC_ADDR(a) ((a) << 1)
- * #define CR_CODEC_READ (1<<0)
- * #define CR_CODEC_WRITE (0<<0)
- */
-/* RefDesign example says: */
-#define CR_CODEC_ADDR(a) ((a) << 0)
-#define CR_CODEC_READ (1<<7)
-#define CR_CODEC_WRITE (0<<7)
-
-#define CR_REG_CODEC_DATAREAD 0x14
-#define CR_CODEC_DATAREAD(v) ((v) & 0xFFFF)
-
-#define CR_REG_CODEC_DATAWRITE 0x18
-#define CR_CODEC_DATAWRITE(v) ((v) & 0xFFFF)
-
-#define CR_FIFO_SIZE 32
-
-struct snd_ml403_ac97cr {
- /* lock for access to (controller) registers */
- spinlock_t reg_lock;
- /* mutex for the whole sequence of accesses to (controller) registers
- * which affect codec registers
- */
- struct mutex cdc_mutex;
-
- int irq; /* for playback */
- int enable_irq; /* for playback */
-
- int capture_irq;
- int enable_capture_irq;
-
- struct resource *res_port;
- void *port;
-
- struct snd_ac97 *ac97;
- int ac97_fake;
-#ifdef CODEC_STAT
- int ac97_read;
- int ac97_write;
-#endif
-
- struct platform_device *pfdev;
- struct snd_card *card;
- struct snd_pcm *pcm;
- struct snd_pcm_substream *playback_substream;
- struct snd_pcm_substream *capture_substream;
-
- struct snd_pcm_indirect2 ind_rec; /* for playback */
- struct snd_pcm_indirect2 capture_ind2_rec;
-};
-
-static const struct snd_pcm_hardware snd_ml403_ac97cr_playback = {
- .info = (SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_MMAP_VALID),
- .formats = SNDRV_PCM_FMTBIT_S16_BE,
- .rates = (SNDRV_PCM_RATE_CONTINUOUS |
- SNDRV_PCM_RATE_8000_48000),
- .rate_min = 4000,
- .rate_max = 48000,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = (128*1024),
- .period_bytes_min = CR_FIFO_SIZE/2,
- .period_bytes_max = (64*1024),
- .periods_min = 2,
- .periods_max = (128*1024)/(CR_FIFO_SIZE/2),
- .fifo_size = 0,
-};
-
-static const struct snd_pcm_hardware snd_ml403_ac97cr_capture = {
- .info = (SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_MMAP_VALID),
- .formats = SNDRV_PCM_FMTBIT_S16_BE,
- .rates = (SNDRV_PCM_RATE_CONTINUOUS |
- SNDRV_PCM_RATE_8000_48000),
- .rate_min = 4000,
- .rate_max = 48000,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = (128*1024),
- .period_bytes_min = CR_FIFO_SIZE/2,
- .period_bytes_max = (64*1024),
- .periods_min = 2,
- .periods_max = (128*1024)/(CR_FIFO_SIZE/2),
- .fifo_size = 0,
-};
-
-static size_t
-snd_ml403_ac97cr_playback_ind2_zero(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- int copied_words = 0;
- u32 full = 0;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
-
- spin_lock(&ml403_ac97cr->reg_lock);
- while ((full = (in_be32(CR_REG(ml403_ac97cr, STATUS)) &
- CR_PLAYFULL)) != CR_PLAYFULL) {
- out_be32(CR_REG(ml403_ac97cr, PLAYFIFO), 0);
- copied_words++;
- }
- rec->hw_ready = 0;
- spin_unlock(&ml403_ac97cr->reg_lock);
-
- return (size_t) (copied_words * 2);
-}
-
-static size_t
-snd_ml403_ac97cr_playback_ind2_copy(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec,
- size_t bytes)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- u16 *src;
- int copied_words = 0;
- u32 full = 0;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
- src = (u16 *)(substream->runtime->dma_area + rec->sw_data);
-
- spin_lock(&ml403_ac97cr->reg_lock);
- while (((full = (in_be32(CR_REG(ml403_ac97cr, STATUS)) &
- CR_PLAYFULL)) != CR_PLAYFULL) && (bytes > 1)) {
- out_be32(CR_REG(ml403_ac97cr, PLAYFIFO),
- CR_PLAYDATA(src[copied_words]));
- copied_words++;
- bytes = bytes - 2;
- }
- if (full != CR_PLAYFULL)
- rec->hw_ready = 1;
- else
- rec->hw_ready = 0;
- spin_unlock(&ml403_ac97cr->reg_lock);
-
- return (size_t) (copied_words * 2);
-}
-
-static size_t
-snd_ml403_ac97cr_capture_ind2_null(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- int copied_words = 0;
- u32 empty = 0;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
-
- spin_lock(&ml403_ac97cr->reg_lock);
- while ((empty = (in_be32(CR_REG(ml403_ac97cr, STATUS)) &
- CR_RECEMPTY)) != CR_RECEMPTY) {
- volatile u32 trash;
-
- trash = CR_RECDATA(in_be32(CR_REG(ml403_ac97cr, RECFIFO)));
- /* Hmmmm, really necessary? Don't want call to in_be32()
- * to be optimised away!
- */
- trash++;
- copied_words++;
- }
- rec->hw_ready = 0;
- spin_unlock(&ml403_ac97cr->reg_lock);
-
- return (size_t) (copied_words * 2);
-}
-
-static size_t
-snd_ml403_ac97cr_capture_ind2_copy(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec, size_t bytes)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- u16 *dst;
- int copied_words = 0;
- u32 empty = 0;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
- dst = (u16 *)(substream->runtime->dma_area + rec->sw_data);
-
- spin_lock(&ml403_ac97cr->reg_lock);
- while (((empty = (in_be32(CR_REG(ml403_ac97cr, STATUS)) &
- CR_RECEMPTY)) != CR_RECEMPTY) && (bytes > 1)) {
- dst[copied_words] = CR_RECDATA(in_be32(CR_REG(ml403_ac97cr,
- RECFIFO)));
- copied_words++;
- bytes = bytes - 2;
- }
- if (empty != CR_RECEMPTY)
- rec->hw_ready = 1;
- else
- rec->hw_ready = 0;
- spin_unlock(&ml403_ac97cr->reg_lock);
-
- return (size_t) (copied_words * 2);
-}
-
-static snd_pcm_uframes_t
-snd_ml403_ac97cr_pcm_pointer(struct snd_pcm_substream *substream)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- struct snd_pcm_indirect2 *ind2_rec = NULL;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
-
- if (substream == ml403_ac97cr->playback_substream)
- ind2_rec = &ml403_ac97cr->ind_rec;
- if (substream == ml403_ac97cr->capture_substream)
- ind2_rec = &ml403_ac97cr->capture_ind2_rec;
-
- if (ind2_rec != NULL)
- return snd_pcm_indirect2_pointer(substream, ind2_rec);
- return (snd_pcm_uframes_t) 0;
-}
-
-static int
-snd_ml403_ac97cr_pcm_playback_trigger(struct snd_pcm_substream *substream,
- int cmd)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- int err = 0;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- PDEBUG(WORK_INFO, "trigger(playback): START\n");
- ml403_ac97cr->ind_rec.hw_ready = 1;
-
- /* clear play FIFO */
- out_be32(CR_REG(ml403_ac97cr, RESETFIFO), CR_PLAYRESET);
-
- /* enable play irq */
- ml403_ac97cr->enable_irq = 1;
- enable_irq(ml403_ac97cr->irq);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- PDEBUG(WORK_INFO, "trigger(playback): STOP\n");
- ml403_ac97cr->ind_rec.hw_ready = 0;
-#ifdef SND_PCM_INDIRECT2_STAT
- snd_pcm_indirect2_stat(substream, &ml403_ac97cr->ind_rec);
-#endif
- /* disable play irq */
- disable_irq_nosync(ml403_ac97cr->irq);
- ml403_ac97cr->enable_irq = 0;
- break;
- default:
- err = -EINVAL;
- break;
- }
- PDEBUG(WORK_INFO, "trigger(playback): (done)\n");
- return err;
-}
-
-static int
-snd_ml403_ac97cr_pcm_capture_trigger(struct snd_pcm_substream *substream,
- int cmd)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- int err = 0;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- PDEBUG(WORK_INFO, "trigger(capture): START\n");
- ml403_ac97cr->capture_ind2_rec.hw_ready = 0;
-
- /* clear record FIFO */
- out_be32(CR_REG(ml403_ac97cr, RESETFIFO), CR_RECRESET);
-
- /* enable record irq */
- ml403_ac97cr->enable_capture_irq = 1;
- enable_irq(ml403_ac97cr->capture_irq);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- PDEBUG(WORK_INFO, "trigger(capture): STOP\n");
- ml403_ac97cr->capture_ind2_rec.hw_ready = 0;
-#ifdef SND_PCM_INDIRECT2_STAT
- snd_pcm_indirect2_stat(substream,
- &ml403_ac97cr->capture_ind2_rec);
-#endif
- /* disable capture irq */
- disable_irq_nosync(ml403_ac97cr->capture_irq);
- ml403_ac97cr->enable_capture_irq = 0;
- break;
- default:
- err = -EINVAL;
- break;
- }
- PDEBUG(WORK_INFO, "trigger(capture): (done)\n");
- return err;
-}
-
-static int
-snd_ml403_ac97cr_pcm_playback_prepare(struct snd_pcm_substream *substream)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- struct snd_pcm_runtime *runtime;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
- runtime = substream->runtime;
-
- PDEBUG(WORK_INFO,
- "prepare(): period_bytes=%d, minperiod_bytes=%d\n",
- snd_pcm_lib_period_bytes(substream), CR_FIFO_SIZE / 2);
-
- /* set sampling rate */
- snd_ac97_set_rate(ml403_ac97cr->ac97, AC97_PCM_FRONT_DAC_RATE,
- runtime->rate);
- PDEBUG(WORK_INFO, "prepare(): rate=%d\n", runtime->rate);
-
- /* init struct for intermediate buffer */
- memset(&ml403_ac97cr->ind_rec, 0,
- sizeof(struct snd_pcm_indirect2));
- ml403_ac97cr->ind_rec.hw_buffer_size = CR_FIFO_SIZE;
- ml403_ac97cr->ind_rec.sw_buffer_size =
- snd_pcm_lib_buffer_bytes(substream);
- ml403_ac97cr->ind_rec.min_periods = -1;
- ml403_ac97cr->ind_rec.min_multiple =
- snd_pcm_lib_period_bytes(substream) / (CR_FIFO_SIZE / 2);
- PDEBUG(WORK_INFO, "prepare(): hw_buffer_size=%d, "
- "sw_buffer_size=%d, min_multiple=%d\n",
- CR_FIFO_SIZE, ml403_ac97cr->ind_rec.sw_buffer_size,
- ml403_ac97cr->ind_rec.min_multiple);
- return 0;
-}
-
-static int
-snd_ml403_ac97cr_pcm_capture_prepare(struct snd_pcm_substream *substream)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- struct snd_pcm_runtime *runtime;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
- runtime = substream->runtime;
-
- PDEBUG(WORK_INFO,
- "prepare(capture): period_bytes=%d, minperiod_bytes=%d\n",
- snd_pcm_lib_period_bytes(substream), CR_FIFO_SIZE / 2);
-
- /* set sampling rate */
- snd_ac97_set_rate(ml403_ac97cr->ac97, AC97_PCM_LR_ADC_RATE,
- runtime->rate);
- PDEBUG(WORK_INFO, "prepare(capture): rate=%d\n", runtime->rate);
-
- /* init struct for intermediate buffer */
- memset(&ml403_ac97cr->capture_ind2_rec, 0,
- sizeof(struct snd_pcm_indirect2));
- ml403_ac97cr->capture_ind2_rec.hw_buffer_size = CR_FIFO_SIZE;
- ml403_ac97cr->capture_ind2_rec.sw_buffer_size =
- snd_pcm_lib_buffer_bytes(substream);
- ml403_ac97cr->capture_ind2_rec.min_multiple =
- snd_pcm_lib_period_bytes(substream) / (CR_FIFO_SIZE / 2);
- PDEBUG(WORK_INFO, "prepare(capture): hw_buffer_size=%d, "
- "sw_buffer_size=%d, min_multiple=%d\n", CR_FIFO_SIZE,
- ml403_ac97cr->capture_ind2_rec.sw_buffer_size,
- ml403_ac97cr->capture_ind2_rec.min_multiple);
- return 0;
-}
-
-static int snd_ml403_ac97cr_playback_open(struct snd_pcm_substream *substream)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- struct snd_pcm_runtime *runtime;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
- runtime = substream->runtime;
-
- PDEBUG(WORK_INFO, "open(playback)\n");
- ml403_ac97cr->playback_substream = substream;
- runtime->hw = snd_ml403_ac97cr_playback;
-
- snd_pcm_hw_constraint_step(runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
- CR_FIFO_SIZE / 2);
- return 0;
-}
-
-static int snd_ml403_ac97cr_capture_open(struct snd_pcm_substream *substream)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- struct snd_pcm_runtime *runtime;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
- runtime = substream->runtime;
-
- PDEBUG(WORK_INFO, "open(capture)\n");
- ml403_ac97cr->capture_substream = substream;
- runtime->hw = snd_ml403_ac97cr_capture;
-
- snd_pcm_hw_constraint_step(runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
- CR_FIFO_SIZE / 2);
- return 0;
-}
-
-static int snd_ml403_ac97cr_playback_close(struct snd_pcm_substream *substream)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
-
- PDEBUG(WORK_INFO, "close(playback)\n");
- ml403_ac97cr->playback_substream = NULL;
- return 0;
-}
-
-static int snd_ml403_ac97cr_capture_close(struct snd_pcm_substream *substream)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
-
- ml403_ac97cr = snd_pcm_substream_chip(substream);
-
- PDEBUG(WORK_INFO, "close(capture)\n");
- ml403_ac97cr->capture_substream = NULL;
- return 0;
-}
-
-static const struct snd_pcm_ops snd_ml403_ac97cr_playback_ops = {
- .open = snd_ml403_ac97cr_playback_open,
- .close = snd_ml403_ac97cr_playback_close,
- .prepare = snd_ml403_ac97cr_pcm_playback_prepare,
- .trigger = snd_ml403_ac97cr_pcm_playback_trigger,
- .pointer = snd_ml403_ac97cr_pcm_pointer,
-};
-
-static const struct snd_pcm_ops snd_ml403_ac97cr_capture_ops = {
- .open = snd_ml403_ac97cr_capture_open,
- .close = snd_ml403_ac97cr_capture_close,
- .prepare = snd_ml403_ac97cr_pcm_capture_prepare,
- .trigger = snd_ml403_ac97cr_pcm_capture_trigger,
- .pointer = snd_ml403_ac97cr_pcm_pointer,
-};
-
-static irqreturn_t snd_ml403_ac97cr_irq(int irq, void *dev_id)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- struct platform_device *pfdev;
- int cmp_irq;
-
- ml403_ac97cr = (struct snd_ml403_ac97cr *)dev_id;
- if (ml403_ac97cr == NULL)
- return IRQ_NONE;
-
- pfdev = ml403_ac97cr->pfdev;
-
- /* playback interrupt */
- cmp_irq = platform_get_irq(pfdev, 0);
- if (irq == cmp_irq) {
- if (ml403_ac97cr->enable_irq)
- snd_pcm_indirect2_playback_interrupt(
- ml403_ac97cr->playback_substream,
- &ml403_ac97cr->ind_rec,
- snd_ml403_ac97cr_playback_ind2_copy,
- snd_ml403_ac97cr_playback_ind2_zero);
- else
- goto __disable_irq;
- } else {
- /* record interrupt */
- cmp_irq = platform_get_irq(pfdev, 1);
- if (irq == cmp_irq) {
- if (ml403_ac97cr->enable_capture_irq)
- snd_pcm_indirect2_capture_interrupt(
- ml403_ac97cr->capture_substream,
- &ml403_ac97cr->capture_ind2_rec,
- snd_ml403_ac97cr_capture_ind2_copy,
- snd_ml403_ac97cr_capture_ind2_null);
- else
- goto __disable_irq;
- } else
- return IRQ_NONE;
- }
- return IRQ_HANDLED;
-
-__disable_irq:
- PDEBUG(INIT_INFO, "irq(): irq %d is meant to be disabled! So, now try "
- "to disable it _really_!\n", irq);
- disable_irq_nosync(irq);
- return IRQ_HANDLED;
-}
-
-static unsigned short
-snd_ml403_ac97cr_codec_read(struct snd_ac97 *ac97, unsigned short reg)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr = ac97->private_data;
-#ifdef CODEC_STAT
- u32 stat;
- u32 rafaccess = 0;
-#endif
- unsigned long end_time;
- u16 value = 0;
-
- if (!LM4550_RF_OK(reg)) {
- snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": "
- "access to unknown/unused codec register 0x%x "
- "ignored!\n", reg);
- return 0;
- }
- /* check if we can fake/answer this access from our shadow register */
- if ((lm4550_regfile[reg / 2].flag &
- (LM4550_REG_DONEREAD | LM4550_REG_ALLFAKE)) &&
- !(lm4550_regfile[reg / 2].flag & LM4550_REG_NOSHADOW)) {
- if (lm4550_regfile[reg / 2].flag & LM4550_REG_FAKEREAD) {
- PDEBUG(CODEC_FAKE, "codec_read(): faking read from "
- "reg=0x%x, val=0x%x / %d\n",
- reg, lm4550_regfile[reg / 2].def,
- lm4550_regfile[reg / 2].def);
- return lm4550_regfile[reg / 2].def;
- } else if ((lm4550_regfile[reg / 2].flag &
- LM4550_REG_FAKEPROBE) &&
- ml403_ac97cr->ac97_fake) {
- PDEBUG(CODEC_FAKE, "codec_read(): faking read from "
- "reg=0x%x, val=0x%x / %d (probe)\n",
- reg, lm4550_regfile[reg / 2].value,
- lm4550_regfile[reg / 2].value);
- return lm4550_regfile[reg / 2].value;
- } else {
-#ifdef CODEC_STAT
- PDEBUG(CODEC_FAKE, "codec_read(): read access "
- "answered by shadow register 0x%x (value=0x%x "
- "/ %d) (cw=%d cr=%d)\n",
- reg, lm4550_regfile[reg / 2].value,
- lm4550_regfile[reg / 2].value,
- ml403_ac97cr->ac97_write,
- ml403_ac97cr->ac97_read);
-#else
- PDEBUG(CODEC_FAKE, "codec_read(): read access "
- "answered by shadow register 0x%x (value=0x%x "
- "/ %d)\n",
- reg, lm4550_regfile[reg / 2].value,
- lm4550_regfile[reg / 2].value);
-#endif
- return lm4550_regfile[reg / 2].value;
- }
- }
- /* if we are here, we _have_ to access the codec really, no faking */
- if (mutex_lock_interruptible(&ml403_ac97cr->cdc_mutex) != 0)
- return 0;
-#ifdef CODEC_STAT
- ml403_ac97cr->ac97_read++;
-#endif
- spin_lock(&ml403_ac97cr->reg_lock);
- out_be32(CR_REG(ml403_ac97cr, CODEC_ADDR),
- CR_CODEC_ADDR(reg) | CR_CODEC_READ);
- spin_unlock(&ml403_ac97cr->reg_lock);
- end_time = jiffies + (HZ / CODEC_TIMEOUT_AFTER_READ);
- do {
- spin_lock(&ml403_ac97cr->reg_lock);
-#ifdef CODEC_STAT
- rafaccess++;
- stat = in_be32(CR_REG(ml403_ac97cr, STATUS));
- if ((stat & CR_RAF) == CR_RAF) {
- value = CR_CODEC_DATAREAD(
- in_be32(CR_REG(ml403_ac97cr, CODEC_DATAREAD)));
- PDEBUG(CODEC_SUCCESS, "codec_read(): (done) reg=0x%x, "
- "value=0x%x / %d (STATUS=0x%x)\n",
- reg, value, value, stat);
-#else
- if ((in_be32(CR_REG(ml403_ac97cr, STATUS)) &
- CR_RAF) == CR_RAF) {
- value = CR_CODEC_DATAREAD(
- in_be32(CR_REG(ml403_ac97cr, CODEC_DATAREAD)));
- PDEBUG(CODEC_SUCCESS, "codec_read(): (done) "
- "reg=0x%x, value=0x%x / %d\n",
- reg, value, value);
-#endif
- lm4550_regfile[reg / 2].value = value;
- lm4550_regfile[reg / 2].flag |= LM4550_REG_DONEREAD;
- spin_unlock(&ml403_ac97cr->reg_lock);
- mutex_unlock(&ml403_ac97cr->cdc_mutex);
- return value;
- }
- spin_unlock(&ml403_ac97cr->reg_lock);
- schedule_timeout_uninterruptible(1);
- } while (time_after(end_time, jiffies));
- /* read the DATAREAD register anyway, see comment below */
- spin_lock(&ml403_ac97cr->reg_lock);
- value =
- CR_CODEC_DATAREAD(in_be32(CR_REG(ml403_ac97cr, CODEC_DATAREAD)));
- spin_unlock(&ml403_ac97cr->reg_lock);
-#ifdef CODEC_STAT
- snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": "
- "timeout while codec read! "
- "(reg=0x%x, last STATUS=0x%x, DATAREAD=0x%x / %d, %d) "
- "(cw=%d, cr=%d)\n",
- reg, stat, value, value, rafaccess,
- ml403_ac97cr->ac97_write, ml403_ac97cr->ac97_read);
-#else
- snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": "
- "timeout while codec read! "
- "(reg=0x%x, DATAREAD=0x%x / %d)\n",
- reg, value, value);
-#endif
- /* BUG: This is PURE speculation! But after _most_ read timeouts the
- * value in the register is ok!
- */
- lm4550_regfile[reg / 2].value = value;
- lm4550_regfile[reg / 2].flag |= LM4550_REG_DONEREAD;
- mutex_unlock(&ml403_ac97cr->cdc_mutex);
- return value;
-}
-
-static void
-snd_ml403_ac97cr_codec_write(struct snd_ac97 *ac97, unsigned short reg,
- unsigned short val)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr = ac97->private_data;
-
-#ifdef CODEC_STAT
- u32 stat;
- u32 rafaccess = 0;
-#endif
-#ifdef CODEC_WRITE_CHECK_RAF
- unsigned long end_time;
-#endif
-
- if (!LM4550_RF_OK(reg)) {
- snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": "
- "access to unknown/unused codec register 0x%x "
- "ignored!\n", reg);
- return;
- }
- if (lm4550_regfile[reg / 2].flag & LM4550_REG_READONLY) {
- snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": "
- "write access to read only codec register 0x%x "
- "ignored!\n", reg);
- return;
- }
- if ((val & lm4550_regfile[reg / 2].wmask) != val) {
- snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": "
- "write access to codec register 0x%x "
- "with bad value 0x%x / %d!\n",
- reg, val, val);
- val = val & lm4550_regfile[reg / 2].wmask;
- }
- if (((lm4550_regfile[reg / 2].flag & LM4550_REG_FAKEPROBE) &&
- ml403_ac97cr->ac97_fake) &&
- !(lm4550_regfile[reg / 2].flag & LM4550_REG_NOSHADOW)) {
- PDEBUG(CODEC_FAKE, "codec_write(): faking write to reg=0x%x, "
- "val=0x%x / %d\n", reg, val, val);
- lm4550_regfile[reg / 2].value = (val &
- lm4550_regfile[reg / 2].wmask);
- return;
- }
- if (mutex_lock_interruptible(&ml403_ac97cr->cdc_mutex) != 0)
- return;
-#ifdef CODEC_STAT
- ml403_ac97cr->ac97_write++;
-#endif
- spin_lock(&ml403_ac97cr->reg_lock);
- out_be32(CR_REG(ml403_ac97cr, CODEC_DATAWRITE),
- CR_CODEC_DATAWRITE(val));
- out_be32(CR_REG(ml403_ac97cr, CODEC_ADDR),
- CR_CODEC_ADDR(reg) | CR_CODEC_WRITE);
- spin_unlock(&ml403_ac97cr->reg_lock);
-#ifdef CODEC_WRITE_CHECK_RAF
- /* check CR_CODEC_RAF bit to see if write access to register is done;
- * loop until bit is set or timeout happens
- */
- end_time = jiffies + HZ / CODEC_TIMEOUT_AFTER_WRITE;
- do {
- spin_lock(&ml403_ac97cr->reg_lock);
-#ifdef CODEC_STAT
- rafaccess++;
- stat = in_be32(CR_REG(ml403_ac97cr, STATUS))
- if ((stat & CR_RAF) == CR_RAF) {
-#else
- if ((in_be32(CR_REG(ml403_ac97cr, STATUS)) &
- CR_RAF) == CR_RAF) {
-#endif
- PDEBUG(CODEC_SUCCESS, "codec_write(): (done) "
- "reg=0x%x, value=%d / 0x%x\n",
- reg, val, val);
- if (!(lm4550_regfile[reg / 2].flag &
- LM4550_REG_NOSHADOW) &&
- !(lm4550_regfile[reg / 2].flag &
- LM4550_REG_NOSAVE))
- lm4550_regfile[reg / 2].value = val;
- lm4550_regfile[reg / 2].flag |= LM4550_REG_DONEREAD;
- spin_unlock(&ml403_ac97cr->reg_lock);
- mutex_unlock(&ml403_ac97cr->cdc_mutex);
- return;
- }
- spin_unlock(&ml403_ac97cr->reg_lock);
- schedule_timeout_uninterruptible(1);
- } while (time_after(end_time, jiffies));
-#ifdef CODEC_STAT
- snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": "
- "timeout while codec write "
- "(reg=0x%x, val=0x%x / %d, last STATUS=0x%x, %d) "
- "(cw=%d, cr=%d)\n",
- reg, val, val, stat, rafaccess, ml403_ac97cr->ac97_write,
- ml403_ac97cr->ac97_read);
-#else
- snd_printk(KERN_WARNING SND_ML403_AC97CR_DRIVER ": "
- "timeout while codec write (reg=0x%x, val=0x%x / %d)\n",
- reg, val, val);
-#endif
-#else /* CODEC_WRITE_CHECK_RAF */
-#if CODEC_WAIT_AFTER_WRITE > 0
- /* officially, in AC97 spec there is no possibility for a AC97
- * controller to determine, if write access is done or not - so: How
- * is Xilinx able to provide a RAF bit for write access?
- * => very strange, thus just don't check RAF bit (compare with
- * Xilinx's example app in EDK 8.1i) and wait
- */
- schedule_timeout_uninterruptible(HZ / CODEC_WAIT_AFTER_WRITE);
-#endif
- PDEBUG(CODEC_SUCCESS, "codec_write(): (done) "
- "reg=0x%x, value=%d / 0x%x (no RAF check)\n",
- reg, val, val);
-#endif
- mutex_unlock(&ml403_ac97cr->cdc_mutex);
- return;
-}
-
-static int
-snd_ml403_ac97cr_chip_init(struct snd_ml403_ac97cr *ml403_ac97cr)
-{
- unsigned long end_time;
- PDEBUG(INIT_INFO, "chip_init():\n");
- end_time = jiffies + HZ / CODEC_TIMEOUT_ON_INIT;
- do {
- if (in_be32(CR_REG(ml403_ac97cr, STATUS)) & CR_CODECREADY) {
- /* clear both hardware FIFOs */
- out_be32(CR_REG(ml403_ac97cr, RESETFIFO),
- CR_RECRESET | CR_PLAYRESET);
- PDEBUG(INIT_INFO, "chip_init(): (done)\n");
- return 0;
- }
- schedule_timeout_uninterruptible(1);
- } while (time_after(end_time, jiffies));
- snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": "
- "timeout while waiting for codec, "
- "not ready!\n");
- return -EBUSY;
-}
-
-static int snd_ml403_ac97cr_free(struct snd_ml403_ac97cr *ml403_ac97cr)
-{
- PDEBUG(INIT_INFO, "free():\n");
- /* irq release */
- if (ml403_ac97cr->irq >= 0)
- free_irq(ml403_ac97cr->irq, ml403_ac97cr);
- if (ml403_ac97cr->capture_irq >= 0)
- free_irq(ml403_ac97cr->capture_irq, ml403_ac97cr);
- /* give back "port" */
- iounmap(ml403_ac97cr->port);
- kfree(ml403_ac97cr);
- PDEBUG(INIT_INFO, "free(): (done)\n");
- return 0;
-}
-
-static int snd_ml403_ac97cr_dev_free(struct snd_device *snddev)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr = snddev->device_data;
- PDEBUG(INIT_INFO, "dev_free():\n");
- return snd_ml403_ac97cr_free(ml403_ac97cr);
-}
-
-static int
-snd_ml403_ac97cr_create(struct snd_card *card, struct platform_device *pfdev,
- struct snd_ml403_ac97cr **rml403_ac97cr)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr;
- int err;
- static const struct snd_device_ops ops = {
- .dev_free = snd_ml403_ac97cr_dev_free,
- };
- struct resource *resource;
- int irq;
-
- *rml403_ac97cr = NULL;
- ml403_ac97cr = kzalloc(sizeof(*ml403_ac97cr), GFP_KERNEL);
- if (ml403_ac97cr == NULL)
- return -ENOMEM;
- spin_lock_init(&ml403_ac97cr->reg_lock);
- mutex_init(&ml403_ac97cr->cdc_mutex);
- ml403_ac97cr->card = card;
- ml403_ac97cr->pfdev = pfdev;
- ml403_ac97cr->irq = -1;
- ml403_ac97cr->enable_irq = 0;
- ml403_ac97cr->capture_irq = -1;
- ml403_ac97cr->enable_capture_irq = 0;
- ml403_ac97cr->port = NULL;
- ml403_ac97cr->res_port = NULL;
-
- PDEBUG(INIT_INFO, "Trying to reserve resources now ...\n");
- resource = platform_get_resource(pfdev, IORESOURCE_MEM, 0);
- /* get "port" */
- ml403_ac97cr->port = ioremap(resource->start,
- (resource->end) -
- (resource->start) + 1);
- if (ml403_ac97cr->port == NULL) {
- snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": "
- "unable to remap memory region (%pR)\n",
- resource);
- snd_ml403_ac97cr_free(ml403_ac97cr);
- return -EBUSY;
- }
- snd_printk(KERN_INFO SND_ML403_AC97CR_DRIVER ": "
- "remap controller memory region to "
- "0x%x done\n", (unsigned int)ml403_ac97cr->port);
- /* get irq */
- irq = platform_get_irq(pfdev, 0);
- if (request_irq(irq, snd_ml403_ac97cr_irq, 0,
- dev_name(&pfdev->dev), (void *)ml403_ac97cr)) {
- snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": "
- "unable to grab IRQ %d\n",
- irq);
- snd_ml403_ac97cr_free(ml403_ac97cr);
- return -EBUSY;
- }
- ml403_ac97cr->irq = irq;
- snd_printk(KERN_INFO SND_ML403_AC97CR_DRIVER ": "
- "request (playback) irq %d done\n",
- ml403_ac97cr->irq);
- irq = platform_get_irq(pfdev, 1);
- if (request_irq(irq, snd_ml403_ac97cr_irq, 0,
- dev_name(&pfdev->dev), (void *)ml403_ac97cr)) {
- snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": "
- "unable to grab IRQ %d\n",
- irq);
- snd_ml403_ac97cr_free(ml403_ac97cr);
- return -EBUSY;
- }
- ml403_ac97cr->capture_irq = irq;
- snd_printk(KERN_INFO SND_ML403_AC97CR_DRIVER ": "
- "request (capture) irq %d done\n",
- ml403_ac97cr->capture_irq);
-
- err = snd_ml403_ac97cr_chip_init(ml403_ac97cr);
- if (err < 0) {
- snd_ml403_ac97cr_free(ml403_ac97cr);
- return err;
- }
-
- err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, ml403_ac97cr, &ops);
- if (err < 0) {
- PDEBUG(INIT_FAILURE, "probe(): snd_device_new() failed!\n");
- snd_ml403_ac97cr_free(ml403_ac97cr);
- return err;
- }
-
- *rml403_ac97cr = ml403_ac97cr;
- return 0;
-}
-
-static void snd_ml403_ac97cr_mixer_free(struct snd_ac97 *ac97)
-{
- struct snd_ml403_ac97cr *ml403_ac97cr = ac97->private_data;
- PDEBUG(INIT_INFO, "mixer_free():\n");
- ml403_ac97cr->ac97 = NULL;
- PDEBUG(INIT_INFO, "mixer_free(): (done)\n");
-}
-
-static int
-snd_ml403_ac97cr_mixer(struct snd_ml403_ac97cr *ml403_ac97cr)
-{
- struct snd_ac97_bus *bus;
- struct snd_ac97_template ac97;
- int err;
- static const struct snd_ac97_bus_ops ops = {
- .write = snd_ml403_ac97cr_codec_write,
- .read = snd_ml403_ac97cr_codec_read,
- };
- PDEBUG(INIT_INFO, "mixer():\n");
- err = snd_ac97_bus(ml403_ac97cr->card, 0, &ops, NULL, &bus);
- if (err < 0)
- return err;
-
- memset(&ac97, 0, sizeof(ac97));
- ml403_ac97cr->ac97_fake = 1;
- lm4550_regfile_init();
-#ifdef CODEC_STAT
- ml403_ac97cr->ac97_read = 0;
- ml403_ac97cr->ac97_write = 0;
-#endif
- ac97.private_data = ml403_ac97cr;
- ac97.private_free = snd_ml403_ac97cr_mixer_free;
- ac97.scaps = AC97_SCAP_AUDIO | AC97_SCAP_SKIP_MODEM |
- AC97_SCAP_NO_SPDIF;
- err = snd_ac97_mixer(bus, &ac97, &ml403_ac97cr->ac97);
- ml403_ac97cr->ac97_fake = 0;
- lm4550_regfile_write_values_after_init(ml403_ac97cr->ac97);
- PDEBUG(INIT_INFO, "mixer(): (done) snd_ac97_mixer()=%d\n", err);
- return err;
-}
-
-static int
-snd_ml403_ac97cr_pcm(struct snd_ml403_ac97cr *ml403_ac97cr, int device)
-{
- struct snd_pcm *pcm;
- int err;
-
- err = snd_pcm_new(ml403_ac97cr->card, "ML403AC97CR/1", device, 1, 1,
- &pcm);
- if (err < 0)
- return err;
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- &snd_ml403_ac97cr_playback_ops);
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
- &snd_ml403_ac97cr_capture_ops);
- pcm->private_data = ml403_ac97cr;
- pcm->info_flags = 0;
- strcpy(pcm->name, "ML403AC97CR DAC/ADC");
- ml403_ac97cr->pcm = pcm;
-
- snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
- NULL,
- 64 * 1024,
- 128 * 1024);
- return 0;
-}
-
-static int snd_ml403_ac97cr_probe(struct platform_device *pfdev)
-{
- struct snd_card *card;
- struct snd_ml403_ac97cr *ml403_ac97cr = NULL;
- int err;
- int dev = pfdev->id;
-
- if (dev >= SNDRV_CARDS)
- return -ENODEV;
- if (!enable[dev])
- return -ENOENT;
-
- err = snd_card_new(&pfdev->dev, index[dev], id[dev], THIS_MODULE,
- 0, &card);
- if (err < 0)
- return err;
- err = snd_ml403_ac97cr_create(card, pfdev, &ml403_ac97cr);
- if (err < 0) {
- PDEBUG(INIT_FAILURE, "probe(): create failed!\n");
- snd_card_free(card);
- return err;
- }
- PDEBUG(INIT_INFO, "probe(): create done\n");
- card->private_data = ml403_ac97cr;
- err = snd_ml403_ac97cr_mixer(ml403_ac97cr);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
- PDEBUG(INIT_INFO, "probe(): mixer done\n");
- err = snd_ml403_ac97cr_pcm(ml403_ac97cr, 0);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
- PDEBUG(INIT_INFO, "probe(): PCM done\n");
- strcpy(card->driver, SND_ML403_AC97CR_DRIVER);
- strcpy(card->shortname, "ML403 AC97 Controller Reference");
- sprintf(card->longname, "%s %s at 0x%lx, irq %i & %i, device %i",
- card->shortname, card->driver,
- (unsigned long)ml403_ac97cr->port, ml403_ac97cr->irq,
- ml403_ac97cr->capture_irq, dev + 1);
-
- err = snd_card_register(card);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
- platform_set_drvdata(pfdev, card);
- PDEBUG(INIT_INFO, "probe(): (done)\n");
- return 0;
-}
-
-static int snd_ml403_ac97cr_remove(struct platform_device *pfdev)
-{
- snd_card_free(platform_get_drvdata(pfdev));
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:" SND_ML403_AC97CR_DRIVER);
-
-static struct platform_driver snd_ml403_ac97cr_driver = {
- .probe = snd_ml403_ac97cr_probe,
- .remove = snd_ml403_ac97cr_remove,
- .driver = {
- .name = SND_ML403_AC97CR_DRIVER,
- },
-};
-
-module_platform_driver(snd_ml403_ac97cr_driver);
diff --git a/sound/drivers/pcm-indirect2.c b/sound/drivers/pcm-indirect2.c
deleted file mode 100644
index 4c491d0ff071..000000000000
--- a/sound/drivers/pcm-indirect2.c
+++ /dev/null
@@ -1,560 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Helper functions for indirect PCM data transfer to a simple FIFO in
- * hardware (small, no possibility to read "hardware io position",
- * updating position done by interrupt, ...)
- *
- * Copyright (c) by 2007 Joachim Foerster <JOFT@gmx.de>
- *
- * Based on "pcm-indirect.h" (alsa-driver-1.0.13) by
- *
- * Copyright (c) by Takashi Iwai <tiwai@suse.de>
- * Jaroslav Kysela <perex@suse.cz>
- */
-
-/* snd_printk/d() */
-#include <sound/core.h>
-/* struct snd_pcm_substream, struct snd_pcm_runtime, snd_pcm_uframes_t
- * snd_pcm_period_elapsed() */
-#include <sound/pcm.h>
-
-#include "pcm-indirect2.h"
-
-#ifdef SND_PCM_INDIRECT2_STAT
-/* jiffies */
-#include <linux/jiffies.h>
-
-void snd_pcm_indirect2_stat(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- int i;
- int j;
- int k;
- int seconds = (rec->lastbytetime - rec->firstbytetime) / HZ;
-
- snd_printk(KERN_DEBUG "STAT: mul_elapsed: %u, mul_elapsed_real: %d, "
- "irq_occurred: %d\n",
- rec->mul_elapsed, rec->mul_elapsed_real, rec->irq_occured);
- snd_printk(KERN_DEBUG "STAT: min_multiple: %d (irqs/period)\n",
- rec->min_multiple);
- snd_printk(KERN_DEBUG "STAT: firstbytetime: %lu, lastbytetime: %lu, "
- "firstzerotime: %lu\n",
- rec->firstbytetime, rec->lastbytetime, rec->firstzerotime);
- snd_printk(KERN_DEBUG "STAT: bytes2hw: %u Bytes => (by runtime->rate) "
- "length: %d s\n",
- rec->bytes2hw, rec->bytes2hw / 2 / 2 / runtime->rate);
- snd_printk(KERN_DEBUG "STAT: (by measurement) length: %d => "
- "rate: %d Bytes/s = %d Frames/s|Hz\n",
- seconds, rec->bytes2hw / seconds,
- rec->bytes2hw / 2 / 2 / seconds);
- snd_printk(KERN_DEBUG
- "STAT: zeros2hw: %u = %d ms ~ %d * %d zero copies\n",
- rec->zeros2hw, ((rec->zeros2hw / 2 / 2) * 1000) /
- runtime->rate,
- rec->zeros2hw / (rec->hw_buffer_size / 2),
- (rec->hw_buffer_size / 2));
- snd_printk(KERN_DEBUG "STAT: pointer_calls: %u, lastdifftime: %u\n",
- rec->pointer_calls, rec->lastdifftime);
- snd_printk(KERN_DEBUG "STAT: sw_io: %d, sw_data: %d\n", rec->sw_io,
- rec->sw_data);
- snd_printk(KERN_DEBUG "STAT: byte_sizes[]:\n");
- k = 0;
- for (j = 0; j < 8; j++) {
- for (i = j * 8; i < (j + 1) * 8; i++)
- if (rec->byte_sizes[i] != 0) {
- snd_printk(KERN_DEBUG "%u: %u",
- i, rec->byte_sizes[i]);
- k++;
- }
- if (((k % 8) == 0) && (k != 0)) {
- snd_printk(KERN_DEBUG "\n");
- k = 0;
- }
- }
- snd_printk(KERN_DEBUG "\n");
- snd_printk(KERN_DEBUG "STAT: zero_sizes[]:\n");
- for (j = 0; j < 8; j++) {
- k = 0;
- for (i = j * 8; i < (j + 1) * 8; i++)
- if (rec->zero_sizes[i] != 0)
- snd_printk(KERN_DEBUG "%u: %u",
- i, rec->zero_sizes[i]);
- else
- k++;
- if (!k)
- snd_printk(KERN_DEBUG "\n");
- }
- snd_printk(KERN_DEBUG "\n");
- snd_printk(KERN_DEBUG "STAT: min_adds[]:\n");
- for (j = 0; j < 8; j++) {
- if (rec->min_adds[j] != 0)
- snd_printk(KERN_DEBUG "%u: %u", j, rec->min_adds[j]);
- }
- snd_printk(KERN_DEBUG "\n");
- snd_printk(KERN_DEBUG "STAT: mul_adds[]:\n");
- for (j = 0; j < 8; j++) {
- if (rec->mul_adds[j] != 0)
- snd_printk(KERN_DEBUG "%u: %u", j, rec->mul_adds[j]);
- }
- snd_printk(KERN_DEBUG "\n");
- snd_printk(KERN_DEBUG
- "STAT: zero_times_saved: %d, zero_times_notsaved: %d\n",
- rec->zero_times_saved, rec->zero_times_notsaved);
- /* snd_printk(KERN_DEBUG "STAT: zero_times[]\n");
- i = 0;
- for (j = 0; j < 3750; j++) {
- if (rec->zero_times[j] != 0) {
- snd_printk(KERN_DEBUG "%u: %u", j, rec->zero_times[j]);
- i++;
- }
- if (((i % 8) == 0) && (i != 0))
- snd_printk(KERN_DEBUG "\n");
- }
- snd_printk(KERN_DEBUG "\n"); */
- return;
-}
-#endif
-
-/*
- * _internal_ helper function for playback/capture transfer function
- */
-static void
-snd_pcm_indirect2_increase_min_periods(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec,
- int isplay, int iscopy,
- unsigned int bytes)
-{
- if (rec->min_periods >= 0) {
- if (iscopy) {
- rec->sw_io += bytes;
- if (rec->sw_io >= rec->sw_buffer_size)
- rec->sw_io -= rec->sw_buffer_size;
- } else if (isplay) {
- /* If application does not write data in multiples of
- * a period, move sw_data to the next correctly aligned
- * position, so that sw_io can converge to it (in the
- * next step).
- */
- if (!rec->check_alignment) {
- if (rec->bytes2hw %
- snd_pcm_lib_period_bytes(substream)) {
- unsigned bytes2hw_aligned =
- (1 +
- (rec->bytes2hw /
- snd_pcm_lib_period_bytes
- (substream))) *
- snd_pcm_lib_period_bytes
- (substream);
- rec->sw_data =
- bytes2hw_aligned %
- rec->sw_buffer_size;
-#ifdef SND_PCM_INDIRECT2_STAT
- snd_printk(KERN_DEBUG
- "STAT: @re-align: aligned "
- "bytes2hw to next period "
- "size boundary: %d "
- "(instead of %d)\n",
- bytes2hw_aligned,
- rec->bytes2hw);
- snd_printk(KERN_DEBUG
- "STAT: @re-align: sw_data "
- "moves to: %d\n",
- rec->sw_data);
-#endif
- }
- rec->check_alignment = 1;
- }
- /* We are at the end and are copying zeros into the
- * fifo.
- * Now, we have to make sure that sw_io is increased
- * until the position of sw_data: Filling the fifo with
- * the first zeros means, the last bytes were played.
- */
- if (rec->sw_io != rec->sw_data) {
- unsigned int diff;
- if (rec->sw_data > rec->sw_io)
- diff = rec->sw_data - rec->sw_io;
- else
- diff = (rec->sw_buffer_size -
- rec->sw_io) +
- rec->sw_data;
- if (bytes >= diff)
- rec->sw_io = rec->sw_data;
- else {
- rec->sw_io += bytes;
- if (rec->sw_io >= rec->sw_buffer_size)
- rec->sw_io -=
- rec->sw_buffer_size;
- }
- }
- }
- rec->min_period_count += bytes;
- if (rec->min_period_count >= (rec->hw_buffer_size / 2)) {
- rec->min_periods += (rec->min_period_count /
- (rec->hw_buffer_size / 2));
-#ifdef SND_PCM_INDIRECT2_STAT
- if ((rec->min_period_count /
- (rec->hw_buffer_size / 2)) > 7)
- snd_printk(KERN_DEBUG
- "STAT: more than 7 (%d) min_adds "
- "at once - too big to save!\n",
- (rec->min_period_count /
- (rec->hw_buffer_size / 2)));
- else
- rec->min_adds[(rec->min_period_count /
- (rec->hw_buffer_size / 2))]++;
-#endif
- rec->min_period_count = (rec->min_period_count %
- (rec->hw_buffer_size / 2));
- }
- } else if (isplay && iscopy)
- rec->min_periods = 0;
-}
-
-/*
- * helper function for playback/capture pointer callback
- */
-snd_pcm_uframes_t
-snd_pcm_indirect2_pointer(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec)
-{
-#ifdef SND_PCM_INDIRECT2_STAT
- rec->pointer_calls++;
-#endif
- return bytes_to_frames(substream->runtime, rec->sw_io);
-}
-
-/*
- * _internal_ helper function for playback interrupt callback
- */
-static void
-snd_pcm_indirect2_playback_transfer(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec,
- snd_pcm_indirect2_copy_t copy,
- snd_pcm_indirect2_zero_t zero)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_uframes_t appl_ptr = runtime->control->appl_ptr;
-
- /* runtime->control->appl_ptr: position where ALSA will write next time
- * rec->appl_ptr: position where ALSA was last time
- * diff: obviously ALSA wrote that much bytes into the intermediate
- * buffer since we checked last time
- */
- snd_pcm_sframes_t diff = appl_ptr - rec->appl_ptr;
-
- if (diff) {
-#ifdef SND_PCM_INDIRECT2_STAT
- rec->lastdifftime = jiffies;
-#endif
- if (diff < -(snd_pcm_sframes_t) (runtime->boundary / 2))
- diff += runtime->boundary;
- /* number of bytes "added" by ALSA increases the number of
- * bytes which are ready to "be transferred to HW"/"played"
- * Then, set rec->appl_ptr to not count bytes twice next time.
- */
- rec->sw_ready += (int)frames_to_bytes(runtime, diff);
- rec->appl_ptr = appl_ptr;
- }
- if (rec->hw_ready && (rec->sw_ready <= 0)) {
- unsigned int bytes;
-
-#ifdef SND_PCM_INDIRECT2_STAT
- if (rec->firstzerotime == 0) {
- rec->firstzerotime = jiffies;
- snd_printk(KERN_DEBUG
- "STAT: @firstzerotime: mul_elapsed: %d, "
- "min_period_count: %d\n",
- rec->mul_elapsed, rec->min_period_count);
- snd_printk(KERN_DEBUG
- "STAT: @firstzerotime: sw_io: %d, "
- "sw_data: %d, appl_ptr: %u\n",
- rec->sw_io, rec->sw_data,
- (unsigned int)appl_ptr);
- }
- if ((jiffies - rec->firstzerotime) < 3750) {
- rec->zero_times[(jiffies - rec->firstzerotime)]++;
- rec->zero_times_saved++;
- } else
- rec->zero_times_notsaved++;
-#endif
- bytes = zero(substream, rec);
-
-#ifdef SND_PCM_INDIRECT2_STAT
- rec->zeros2hw += bytes;
- if (bytes < 64)
- rec->zero_sizes[bytes]++;
- else
- snd_printk(KERN_DEBUG
- "STAT: %d zero Bytes copied to hardware at "
- "once - too big to save!\n",
- bytes);
-#endif
- snd_pcm_indirect2_increase_min_periods(substream, rec, 1, 0,
- bytes);
- return;
- }
- while (rec->hw_ready && (rec->sw_ready > 0)) {
- /* sw_to_end: max. number of bytes that can be read/take from
- * the current position (sw_data) in _one_ step
- */
- unsigned int sw_to_end = rec->sw_buffer_size - rec->sw_data;
-
- /* bytes: number of bytes we have available (for reading) */
- unsigned int bytes = rec->sw_ready;
-
- if (sw_to_end < bytes)
- bytes = sw_to_end;
- if (!bytes)
- break;
-
-#ifdef SND_PCM_INDIRECT2_STAT
- if (rec->firstbytetime == 0)
- rec->firstbytetime = jiffies;
- rec->lastbytetime = jiffies;
-#endif
- /* copy bytes from intermediate buffer position sw_data to the
- * HW and return number of bytes actually written
- * Furthermore, set hw_ready to 0, if the fifo isn't empty
- * now => more could be transferred to fifo
- */
- bytes = copy(substream, rec, bytes);
- rec->bytes2hw += bytes;
-
-#ifdef SND_PCM_INDIRECT2_STAT
- if (bytes < 64)
- rec->byte_sizes[bytes]++;
- else
- snd_printk(KERN_DEBUG
- "STAT: %d Bytes copied to hardware at once "
- "- too big to save!\n",
- bytes);
-#endif
- /* increase sw_data by the number of actually written bytes
- * (= number of taken bytes from intermediate buffer)
- */
- rec->sw_data += bytes;
- if (rec->sw_data == rec->sw_buffer_size)
- rec->sw_data = 0;
- /* now sw_data is the position where ALSA is going to write
- * in the intermediate buffer next time = position we are going
- * to read from next time
- */
-
- snd_pcm_indirect2_increase_min_periods(substream, rec, 1, 1,
- bytes);
-
- /* we read bytes from intermediate buffer, so we need to say
- * that the number of bytes ready for transfer are decreased
- * now
- */
- rec->sw_ready -= bytes;
- }
- return;
-}
-
-/*
- * helper function for playback interrupt routine
- */
-void
-snd_pcm_indirect2_playback_interrupt(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec,
- snd_pcm_indirect2_copy_t copy,
- snd_pcm_indirect2_zero_t zero)
-{
-#ifdef SND_PCM_INDIRECT2_STAT
- rec->irq_occured++;
-#endif
- /* hardware played some bytes, so there is room again (in fifo) */
- rec->hw_ready = 1;
-
- /* don't call ack() now, instead call transfer() function directly
- * (normally called by ack() )
- */
- snd_pcm_indirect2_playback_transfer(substream, rec, copy, zero);
-
- if (rec->min_periods >= rec->min_multiple) {
-#ifdef SND_PCM_INDIRECT2_STAT
- if ((rec->min_periods / rec->min_multiple) > 7)
- snd_printk(KERN_DEBUG
- "STAT: more than 7 (%d) mul_adds - too big "
- "to save!\n",
- (rec->min_periods / rec->min_multiple));
- else
- rec->mul_adds[(rec->min_periods /
- rec->min_multiple)]++;
- rec->mul_elapsed_real += (rec->min_periods /
- rec->min_multiple);
- rec->mul_elapsed++;
-#endif
- rec->min_periods = (rec->min_periods % rec->min_multiple);
- snd_pcm_period_elapsed(substream);
- }
-}
-
-/*
- * _internal_ helper function for capture interrupt callback
- */
-static void
-snd_pcm_indirect2_capture_transfer(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec,
- snd_pcm_indirect2_copy_t copy,
- snd_pcm_indirect2_zero_t null)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_uframes_t appl_ptr = runtime->control->appl_ptr;
- snd_pcm_sframes_t diff = appl_ptr - rec->appl_ptr;
-
- if (diff) {
-#ifdef SND_PCM_INDIRECT2_STAT
- rec->lastdifftime = jiffies;
-#endif
- if (diff < -(snd_pcm_sframes_t) (runtime->boundary / 2))
- diff += runtime->boundary;
- rec->sw_ready -= frames_to_bytes(runtime, diff);
- rec->appl_ptr = appl_ptr;
- }
- /* if hardware has something, but the intermediate buffer is full
- * => skip contents of buffer
- */
- if (rec->hw_ready && (rec->sw_ready >= (int)rec->sw_buffer_size)) {
- unsigned int bytes;
-
-#ifdef SND_PCM_INDIRECT2_STAT
- if (rec->firstzerotime == 0) {
- rec->firstzerotime = jiffies;
- snd_printk(KERN_DEBUG "STAT: (capture) "
- "@firstzerotime: mul_elapsed: %d, "
- "min_period_count: %d\n",
- rec->mul_elapsed, rec->min_period_count);
- snd_printk(KERN_DEBUG "STAT: (capture) "
- "@firstzerotime: sw_io: %d, sw_data: %d, "
- "appl_ptr: %u\n",
- rec->sw_io, rec->sw_data,
- (unsigned int)appl_ptr);
- }
- if ((jiffies - rec->firstzerotime) < 3750) {
- rec->zero_times[(jiffies - rec->firstzerotime)]++;
- rec->zero_times_saved++;
- } else
- rec->zero_times_notsaved++;
-#endif
- bytes = null(substream, rec);
-
-#ifdef SND_PCM_INDIRECT2_STAT
- rec->zeros2hw += bytes;
- if (bytes < 64)
- rec->zero_sizes[bytes]++;
- else
- snd_printk(KERN_DEBUG
- "STAT: (capture) %d zero Bytes copied to "
- "hardware at once - too big to save!\n",
- bytes);
-#endif
- snd_pcm_indirect2_increase_min_periods(substream, rec, 0, 0,
- bytes);
- /* report an overrun */
- rec->sw_io = SNDRV_PCM_POS_XRUN;
- return;
- }
- while (rec->hw_ready && (rec->sw_ready < (int)rec->sw_buffer_size)) {
- /* sw_to_end: max. number of bytes that we can write to the
- * intermediate buffer (until it's end)
- */
- size_t sw_to_end = rec->sw_buffer_size - rec->sw_data;
-
- /* bytes: max. number of bytes, which may be copied to the
- * intermediate buffer without overflow (in _one_ step)
- */
- size_t bytes = rec->sw_buffer_size - rec->sw_ready;
-
- /* limit number of bytes (for transfer) by available room in
- * the intermediate buffer
- */
- if (sw_to_end < bytes)
- bytes = sw_to_end;
- if (!bytes)
- break;
-
-#ifdef SND_PCM_INDIRECT2_STAT
- if (rec->firstbytetime == 0)
- rec->firstbytetime = jiffies;
- rec->lastbytetime = jiffies;
-#endif
- /* copy bytes from the intermediate buffer (position sw_data)
- * to the HW at most and return number of bytes actually copied
- * from HW
- * Furthermore, set hw_ready to 0, if the fifo is empty now.
- */
- bytes = copy(substream, rec, bytes);
- rec->bytes2hw += bytes;
-
-#ifdef SND_PCM_INDIRECT2_STAT
- if (bytes < 64)
- rec->byte_sizes[bytes]++;
- else
- snd_printk(KERN_DEBUG
- "STAT: (capture) %d Bytes copied to "
- "hardware at once - too big to save!\n",
- bytes);
-#endif
- /* increase sw_data by the number of actually copied bytes from
- * HW
- */
- rec->sw_data += bytes;
- if (rec->sw_data == rec->sw_buffer_size)
- rec->sw_data = 0;
-
- snd_pcm_indirect2_increase_min_periods(substream, rec, 0, 1,
- bytes);
-
- /* number of bytes in the intermediate buffer, which haven't
- * been fetched by ALSA yet.
- */
- rec->sw_ready += bytes;
- }
- return;
-}
-
-/*
- * helper function for capture interrupt routine
- */
-void
-snd_pcm_indirect2_capture_interrupt(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec,
- snd_pcm_indirect2_copy_t copy,
- snd_pcm_indirect2_zero_t null)
-{
-#ifdef SND_PCM_INDIRECT2_STAT
- rec->irq_occured++;
-#endif
- /* hardware recorded some bytes, so there is something to read from the
- * record fifo:
- */
- rec->hw_ready = 1;
-
- /* don't call ack() now, instead call transfer() function directly
- * (normally called by ack() )
- */
- snd_pcm_indirect2_capture_transfer(substream, rec, copy, null);
-
- if (rec->min_periods >= rec->min_multiple) {
-
-#ifdef SND_PCM_INDIRECT2_STAT
- if ((rec->min_periods / rec->min_multiple) > 7)
- snd_printk(KERN_DEBUG
- "STAT: more than 7 (%d) mul_adds - "
- "too big to save!\n",
- (rec->min_periods / rec->min_multiple));
- else
- rec->mul_adds[(rec->min_periods /
- rec->min_multiple)]++;
- rec->mul_elapsed_real += (rec->min_periods /
- rec->min_multiple);
- rec->mul_elapsed++;
-#endif
- rec->min_periods = (rec->min_periods % rec->min_multiple);
- snd_pcm_period_elapsed(substream);
- }
-}
diff --git a/sound/drivers/pcm-indirect2.h b/sound/drivers/pcm-indirect2.h
deleted file mode 100644
index 355ce76d2403..000000000000
--- a/sound/drivers/pcm-indirect2.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Helper functions for indirect PCM data transfer to a simple FIFO in
- * hardware (small, no possibility to read "hardware io position",
- * updating position done by interrupt, ...)
- *
- * Copyright (c) by 2007 Joachim Foerster <JOFT@gmx.de>
- *
- * Based on "pcm-indirect.h" (alsa-driver-1.0.13) by
- *
- * Copyright (c) by Takashi Iwai <tiwai@suse.de>
- * Jaroslav Kysela <perex@suse.cz>
- */
-
-#ifndef __SOUND_PCM_INDIRECT2_H
-#define __SOUND_PCM_INDIRECT2_H
-
-/* struct snd_pcm_substream, struct snd_pcm_runtime, snd_pcm_uframes_t */
-#include <sound/pcm.h>
-
-/* Debug options for code which may be removed completely in a final version */
-#ifdef CONFIG_SND_DEBUG
-#define SND_PCM_INDIRECT2_STAT /* turn on some "statistics" about the
- * process of copying bytes from the
- * intermediate buffer to the hardware
- * fifo and the other way round
- */
-#endif
-
-struct snd_pcm_indirect2 {
- unsigned int hw_buffer_size; /* Byte size of hardware buffer */
- int hw_ready; /* playback: 1 = hw fifo has room left,
- * 0 = hw fifo is full
- */
- unsigned int min_multiple;
- int min_periods; /* counts number of min. periods until
- * min_multiple is reached
- */
- int min_period_count; /* counts bytes to count number of
- * min. periods
- */
-
- unsigned int sw_buffer_size; /* Byte size of software buffer */
-
- /* sw_data: position in intermediate buffer, where we will read (or
- * write) from/to next time (to transfer data to/from HW)
- */
- unsigned int sw_data; /* Offset to next dst (or src) in sw
- * ring buffer
- */
- /* easiest case (playback):
- * sw_data is nearly the same as ~ runtime->control->appl_ptr, with the
- * exception that sw_data is "behind" by the number if bytes ALSA wrote
- * to the intermediate buffer last time.
- * A call to ack() callback synchronizes both indirectly.
- */
-
- /* We have no real sw_io pointer here. Usually sw_io is pointing to the
- * current playback/capture position _inside_ the hardware. Devices
- * with plain FIFOs often have no possibility to publish this position.
- * So we say: if sw_data is updated, that means bytes were copied to
- * the hardware, we increase sw_io by that amount, because there have
- * to be as much bytes which were played. So sw_io will stay behind
- * sw_data all the time and has to converge to sw_data at the end of
- * playback.
- */
- unsigned int sw_io; /* Current software pointer in bytes */
-
- /* sw_ready: number of bytes ALSA copied to the intermediate buffer, so
- * it represents the number of bytes which wait for transfer to the HW
- */
- int sw_ready; /* Bytes ready to be transferred to/from hw */
-
- /* appl_ptr: last known position of ALSA (where ALSA is going to write
- * next time into the intermediate buffer
- */
- snd_pcm_uframes_t appl_ptr; /* Last seen appl_ptr */
-
- unsigned int bytes2hw;
- int check_alignment;
-
-#ifdef SND_PCM_INDIRECT2_STAT
- unsigned int zeros2hw;
- unsigned int mul_elapsed;
- unsigned int mul_elapsed_real;
- unsigned long firstbytetime;
- unsigned long lastbytetime;
- unsigned long firstzerotime;
- unsigned int byte_sizes[64];
- unsigned int zero_sizes[64];
- unsigned int min_adds[8];
- unsigned int mul_adds[8];
- unsigned int zero_times[3750]; /* = 15s */
- unsigned int zero_times_saved;
- unsigned int zero_times_notsaved;
- unsigned int irq_occured;
- unsigned int pointer_calls;
- unsigned int lastdifftime;
-#endif
-};
-
-typedef size_t (*snd_pcm_indirect2_copy_t) (struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec,
- size_t bytes);
-typedef size_t (*snd_pcm_indirect2_zero_t) (struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec);
-
-#ifdef SND_PCM_INDIRECT2_STAT
-void snd_pcm_indirect2_stat(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec);
-#endif
-
-snd_pcm_uframes_t
-snd_pcm_indirect2_pointer(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec);
-void
-snd_pcm_indirect2_playback_interrupt(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec,
- snd_pcm_indirect2_copy_t copy,
- snd_pcm_indirect2_zero_t zero);
-void
-snd_pcm_indirect2_capture_interrupt(struct snd_pcm_substream *substream,
- struct snd_pcm_indirect2 *rec,
- snd_pcm_indirect2_copy_t copy,
- snd_pcm_indirect2_zero_t null);
-
-#endif /* __SOUND_PCM_INDIRECT2_H */
diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
index ecefa7c83134..38603cb2bd5b 100644
--- a/sound/drivers/portman2x4.c
+++ b/sound/drivers/portman2x4.c
@@ -457,7 +457,7 @@ static int portman_probe(struct parport *p)
/* Set for RXDATA0 where no damage will be done. */
/* 5 */
- parport_write_control(p, RXDATA0 + STROBE); /* Write Strobe=1 to command reg. */
+ parport_write_control(p, RXDATA0 | STROBE); /* Write Strobe=1 to command reg. */
/* 6 */
if ((parport_read_status(p) & ESTB) != ESTB)
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 995c2cefc222..25778765cbfe 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -150,8 +150,12 @@ config SND_FIREWIRE_MOTU
Say Y here to enable support for FireWire devices which MOTU produced:
* 828mk2
* Traveler
- * 828mk3
+ * Ultralite
+ * 8pre
+ * 828mk3 (FireWire only)
+ * 828mk3 (Hybrid)
* Audio Express
+ * 4pre
To compile this driver as a module, choose M here: the module
will be called snd-firewire-motu.
@@ -164,6 +168,8 @@ config SND_FIREFACE
Say Y here to include support for RME fireface series.
* Fireface 400
* Fireface 800
+ * Fireface UFX
* Fireface UCX
+ * Fireface 802
endif # SND_FIREWIRE
diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c
index 67d735e9a6a4..fea92e148790 100644
--- a/sound/firewire/amdtp-am824.c
+++ b/sound/firewire/amdtp-am824.c
@@ -82,7 +82,8 @@ int amdtp_am824_set_parameters(struct amdtp_stream *s, unsigned int rate,
if (err < 0)
return err;
- s->ctx_data.rx.fdf = AMDTP_FDF_AM824 | s->sfc;
+ if (s->direction == AMDTP_OUT_STREAM)
+ s->ctx_data.rx.fdf = AMDTP_FDF_AM824 | s->sfc;
p->pcm_channels = pcm_channels;
p->midi_ports = midi_ports;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 37d38efb4c87..f8586f75441d 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -20,6 +20,8 @@
#define CYCLES_PER_SECOND 8000
#define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
+#define OHCI_MAX_SECOND 8
+
/* Always support Linux tracing subsystem. */
#define CREATE_TRACE_POINTS
#include "amdtp-stream-trace.h"
@@ -337,25 +339,26 @@ void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
}
EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
-static unsigned int calculate_data_blocks(struct amdtp_stream *s,
- unsigned int syt)
+static unsigned int calculate_data_blocks(unsigned int *data_block_state,
+ bool is_blocking, bool is_no_info,
+ unsigned int syt_interval, enum cip_sfc sfc)
{
- unsigned int phase, data_blocks;
+ unsigned int data_blocks;
/* Blocking mode. */
- if (s->flags & CIP_BLOCKING) {
+ if (is_blocking) {
/* This module generate empty packet for 'no data'. */
- if (syt == CIP_SYT_NO_INFO)
+ if (is_no_info)
data_blocks = 0;
else
- data_blocks = s->syt_interval;
+ data_blocks = syt_interval;
/* Non-blocking mode. */
} else {
- if (!cip_sfc_is_base_44100(s->sfc)) {
+ if (!cip_sfc_is_base_44100(sfc)) {
// Sample_rate / 8000 is an integer, and precomputed.
- data_blocks = s->ctx_data.rx.data_block_state;
+ data_blocks = *data_block_state;
} else {
- phase = s->ctx_data.rx.data_block_state;
+ unsigned int phase = *data_block_state;
/*
* This calculates the number of data blocks per packet so that
@@ -365,31 +368,30 @@ static unsigned int calculate_data_blocks(struct amdtp_stream *s,
* as possible in the sequence (to prevent underruns of the
* device's buffer).
*/
- if (s->sfc == CIP_SFC_44100)
+ if (sfc == CIP_SFC_44100)
/* 6 6 5 6 5 6 5 ... */
data_blocks = 5 + ((phase & 1) ^
(phase == 0 || phase >= 40));
else
/* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
- data_blocks = 11 * (s->sfc >> 1) + (phase == 0);
- if (++phase >= (80 >> (s->sfc >> 1)))
+ data_blocks = 11 * (sfc >> 1) + (phase == 0);
+ if (++phase >= (80 >> (sfc >> 1)))
phase = 0;
- s->ctx_data.rx.data_block_state = phase;
+ *data_block_state = phase;
}
}
return data_blocks;
}
-static unsigned int calculate_syt(struct amdtp_stream *s,
- unsigned int cycle)
+static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
+ unsigned int *syt_offset_state, enum cip_sfc sfc)
{
- unsigned int syt_offset, phase, index, syt;
+ unsigned int syt_offset;
- if (s->ctx_data.rx.last_syt_offset < TICKS_PER_CYCLE) {
- if (!cip_sfc_is_base_44100(s->sfc))
- syt_offset = s->ctx_data.rx.last_syt_offset +
- s->ctx_data.rx.syt_offset_state;
+ if (*last_syt_offset < TICKS_PER_CYCLE) {
+ if (!cip_sfc_is_base_44100(sfc))
+ syt_offset = *last_syt_offset + *syt_offset_state;
else {
/*
* The time, in ticks, of the n'th SYT_INTERVAL sample is:
@@ -401,28 +403,24 @@ static unsigned int calculate_syt(struct amdtp_stream *s,
* 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
* This code generates _exactly_ the same sequence.
*/
- phase = s->ctx_data.rx.syt_offset_state;
- index = phase % 13;
- syt_offset = s->ctx_data.rx.last_syt_offset;
+ unsigned int phase = *syt_offset_state;
+ unsigned int index = phase % 13;
+
+ syt_offset = *last_syt_offset;
syt_offset += 1386 + ((index && !(index & 3)) ||
phase == 146);
if (++phase >= 147)
phase = 0;
- s->ctx_data.rx.syt_offset_state = phase;
+ *syt_offset_state = phase;
}
} else
- syt_offset = s->ctx_data.rx.last_syt_offset - TICKS_PER_CYCLE;
- s->ctx_data.rx.last_syt_offset = syt_offset;
+ syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
+ *last_syt_offset = syt_offset;
- if (syt_offset < TICKS_PER_CYCLE) {
- syt_offset += s->ctx_data.rx.transfer_delay;
- syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12;
- syt += syt_offset % TICKS_PER_CYCLE;
+ if (syt_offset >= TICKS_PER_CYCLE)
+ syt_offset = CIP_SYT_NO_INFO;
- return syt & CIP_SYT_MASK;
- } else {
- return CIP_SYT_NO_INFO;
- }
+ return syt_offset;
}
static void update_pcm_pointers(struct amdtp_stream *s,
@@ -680,8 +678,8 @@ static inline u32 compute_cycle_count(__be32 ctx_header_tstamp)
static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
{
cycle += addend;
- if (cycle >= 8 * CYCLES_PER_SECOND)
- cycle -= 8 * CYCLES_PER_SECOND;
+ if (cycle >= OHCI_MAX_SECOND * CYCLES_PER_SECOND)
+ cycle -= OHCI_MAX_SECOND * CYCLES_PER_SECOND;
return cycle;
}
@@ -738,21 +736,41 @@ static int generate_device_pkt_descs(struct amdtp_stream *s,
return 0;
}
-static void generate_ideal_pkt_descs(struct amdtp_stream *s,
- struct pkt_desc *descs,
- const __be32 *ctx_header,
- unsigned int packets)
+static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
+ unsigned int transfer_delay)
+{
+ unsigned int syt;
+
+ syt_offset += transfer_delay;
+ syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
+ (syt_offset % TICKS_PER_CYCLE);
+ return syt & CIP_SYT_MASK;
+}
+
+static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
+ const __be32 *ctx_header, unsigned int packets,
+ const struct seq_desc *seq_descs,
+ unsigned int seq_size)
{
unsigned int dbc = s->data_block_counter;
+ unsigned int seq_index = s->ctx_data.rx.seq_index;
int i;
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
unsigned int index = (s->packet_index + i) % s->queue_size;
+ const struct seq_desc *seq = seq_descs + seq_index;
+ unsigned int syt;
desc->cycle = compute_it_cycle(*ctx_header, s->queue_size);
- desc->syt = calculate_syt(s, desc->cycle);
- desc->data_blocks = calculate_data_blocks(s, desc->syt);
+
+ syt = seq->syt_offset;
+ if (syt != CIP_SYT_NO_INFO) {
+ syt = compute_syt(syt, desc->cycle,
+ s->ctx_data.rx.transfer_delay);
+ }
+ desc->syt = syt;
+ desc->data_blocks = seq->data_blocks;
if (s->flags & CIP_DBC_IS_END_EVENT)
dbc = (dbc + desc->data_blocks) & 0xff;
@@ -764,10 +782,13 @@ static void generate_ideal_pkt_descs(struct amdtp_stream *s,
desc->ctx_payload = s->buffer.packets[index].buffer;
+ seq_index = (seq_index + 1) % seq_size;
+
++ctx_header;
}
s->data_block_counter = dbc;
+ s->ctx_data.rx.seq_index = seq_index;
}
static inline void cancel_stream(struct amdtp_stream *s)
@@ -791,24 +812,16 @@ static void process_ctx_payloads(struct amdtp_stream *s,
update_pcm_pointers(s, pcm, pcm_frames);
}
-static void amdtp_stream_master_callback(struct fw_iso_context *context,
- u32 tstamp, size_t header_length,
- void *header, void *private_data);
-
-static void amdtp_stream_master_first_callback(struct fw_iso_context *context,
- u32 tstamp, size_t header_length,
- void *header, void *private_data);
-
static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
size_t header_length, void *header,
void *private_data)
{
struct amdtp_stream *s = private_data;
+ const struct amdtp_domain *d = s->domain;
const __be32 *ctx_header = header;
unsigned int events_per_period = s->ctx_data.rx.events_per_period;
unsigned int event_count = s->ctx_data.rx.event_count;
unsigned int packets;
- bool is_irq_target;
int i;
if (s->packet_index < 0)
@@ -817,14 +830,11 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
// Calculate the number of packets in buffer and check XRUN.
packets = header_length / sizeof(*ctx_header);
- generate_ideal_pkt_descs(s, s->pkt_descs, ctx_header, packets);
+ generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, d->seq_descs,
+ d->seq_size);
process_ctx_payloads(s, s->pkt_descs, packets);
- is_irq_target =
- !!(context->callback.sc == amdtp_stream_master_callback ||
- context->callback.sc == amdtp_stream_master_first_callback);
-
for (i = 0; i < packets; ++i) {
const struct pkt_desc *desc = s->pkt_descs + i;
unsigned int syt;
@@ -843,7 +853,7 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
desc->data_blocks, desc->data_block_counter,
syt, i);
- if (is_irq_target) {
+ if (s == s->domain->irq_target) {
event_count += desc->data_blocks;
if (event_count >= events_per_period) {
event_count -= events_per_period;
@@ -896,14 +906,63 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
}
}
-static void amdtp_stream_master_callback(struct fw_iso_context *context,
- u32 tstamp, size_t header_length,
- void *header, void *private_data)
+static void pool_ideal_seq_descs(struct amdtp_domain *d, unsigned int packets)
{
- struct amdtp_domain *d = private_data;
struct amdtp_stream *irq_target = d->irq_target;
+ unsigned int seq_tail = d->seq_tail;
+ unsigned int seq_size = d->seq_size;
+ unsigned int min_avail;
struct amdtp_stream *s;
+ min_avail = d->seq_size;
+ list_for_each_entry(s, &d->streams, list) {
+ unsigned int seq_index;
+ unsigned int avail;
+
+ if (s->direction == AMDTP_IN_STREAM)
+ continue;
+
+ seq_index = s->ctx_data.rx.seq_index;
+ avail = d->seq_tail;
+ if (seq_index > avail)
+ avail += d->seq_size;
+ avail -= seq_index;
+
+ if (avail < min_avail)
+ min_avail = avail;
+ }
+
+ while (min_avail < packets) {
+ struct seq_desc *desc = d->seq_descs + seq_tail;
+
+ desc->syt_offset = calculate_syt_offset(&d->last_syt_offset,
+ &d->syt_offset_state, irq_target->sfc);
+ desc->data_blocks = calculate_data_blocks(&d->data_block_state,
+ !!(irq_target->flags & CIP_BLOCKING),
+ desc->syt_offset == CIP_SYT_NO_INFO,
+ irq_target->syt_interval, irq_target->sfc);
+
+ ++seq_tail;
+ seq_tail %= seq_size;
+
+ ++min_avail;
+ }
+
+ d->seq_tail = seq_tail;
+}
+
+static void irq_target_callback(struct fw_iso_context *context, u32 tstamp,
+ size_t header_length, void *header,
+ void *private_data)
+{
+ struct amdtp_stream *irq_target = private_data;
+ struct amdtp_domain *d = irq_target->domain;
+ unsigned int packets = header_length / sizeof(__be32);
+ struct amdtp_stream *s;
+
+ // Record enough entries with extra 3 cycles at least.
+ pool_ideal_seq_descs(d, packets + 3);
+
out_stream_callback(context, tstamp, header_length, header, irq_target);
if (amdtp_streaming_error(irq_target))
goto error;
@@ -950,7 +1009,10 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
} else {
cycle = compute_it_cycle(*ctx_header, s->queue_size);
- context->callback.sc = out_stream_callback;
+ if (s == s->domain->irq_target)
+ context->callback.sc = irq_target_callback;
+ else
+ context->callback.sc = out_stream_callback;
}
s->start_cycle = cycle;
@@ -958,64 +1020,29 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
context->callback.sc(context, tstamp, header_length, header, s);
}
-static void amdtp_stream_master_first_callback(struct fw_iso_context *context,
- u32 tstamp, size_t header_length,
- void *header, void *private_data)
-{
- struct amdtp_domain *d = private_data;
- struct amdtp_stream *s = d->irq_target;
- const __be32 *ctx_header = header;
-
- s->callbacked = true;
- wake_up(&s->callback_wait);
-
- s->start_cycle = compute_it_cycle(*ctx_header, s->queue_size);
-
- context->callback.sc = amdtp_stream_master_callback;
-
- context->callback.sc(context, tstamp, header_length, header, d);
-}
-
/**
* amdtp_stream_start - start transferring packets
* @s: the AMDTP stream to start
* @channel: the isochronous channel on the bus
* @speed: firewire speed code
- * @d: the AMDTP domain to which the AMDTP stream belongs
- * @is_irq_target: whether isoc context for the AMDTP stream is used to generate
- * hardware IRQ.
* @start_cycle: the isochronous cycle to start the context. Start immediately
* if negative value is given.
+ * @queue_size: The number of packets in the queue.
+ * @idle_irq_interval: the interval to queue packet during initial state.
*
* The stream cannot be started until it has been configured with
* amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
* device can be started.
*/
static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
- struct amdtp_domain *d, bool is_irq_target,
- int start_cycle)
+ int start_cycle, unsigned int queue_size,
+ unsigned int idle_irq_interval)
{
- static const struct {
- unsigned int data_block;
- unsigned int syt_offset;
- } *entry, initial_state[] = {
- [CIP_SFC_32000] = { 4, 3072 },
- [CIP_SFC_48000] = { 6, 1024 },
- [CIP_SFC_96000] = { 12, 1024 },
- [CIP_SFC_192000] = { 24, 1024 },
- [CIP_SFC_44100] = { 0, 67 },
- [CIP_SFC_88200] = { 0, 67 },
- [CIP_SFC_176400] = { 0, 67 },
- };
- unsigned int events_per_buffer = d->events_per_buffer;
- unsigned int events_per_period = d->events_per_period;
- unsigned int idle_irq_interval;
+ bool is_irq_target = (s == s->domain->irq_target);
unsigned int ctx_header_size;
unsigned int max_ctx_payload_size;
enum dma_data_direction dir;
int type, tag, err;
- fw_iso_callback_t ctx_cb;
- void *ctx_data;
mutex_lock(&s->mutex);
@@ -1034,12 +1061,7 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
s->data_block_counter = UINT_MAX;
} else {
- entry = &initial_state[s->sfc];
-
s->data_block_counter = 0;
- s->ctx_data.rx.data_block_state = entry->data_block;
- s->ctx_data.rx.syt_offset_state = entry->syt_offset;
- s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
}
/* initialize packet buffer */
@@ -1063,37 +1085,15 @@ static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
}
- // This is a case that AMDTP streams in domain run just for MIDI
- // substream. Use the number of events equivalent to 10 msec as
- // interval of hardware IRQ.
- if (events_per_period == 0)
- events_per_period = amdtp_rate_table[s->sfc] / 100;
- if (events_per_buffer == 0)
- events_per_buffer = events_per_period * 3;
-
- idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
- amdtp_rate_table[s->sfc]);
- s->queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
- amdtp_rate_table[s->sfc]);
-
- err = iso_packets_buffer_init(&s->buffer, s->unit, s->queue_size,
+ err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size,
max_ctx_payload_size, dir);
if (err < 0)
goto err_unlock;
-
- if (is_irq_target) {
- s->ctx_data.rx.events_per_period = events_per_period;
- s->ctx_data.rx.event_count = 0;
- ctx_cb = amdtp_stream_master_first_callback;
- ctx_data = d;
- } else {
- ctx_cb = amdtp_stream_first_callback;
- ctx_data = s;
- }
+ s->queue_size = queue_size;
s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
type, channel, speed, ctx_header_size,
- ctx_cb, ctx_data);
+ amdtp_stream_first_callback, s);
if (IS_ERR(s->context)) {
err = PTR_ERR(s->context);
if (err == -EBUSY)
@@ -1302,6 +1302,8 @@ int amdtp_domain_init(struct amdtp_domain *d)
d->events_per_period = 0;
+ d->seq_descs = NULL;
+
return 0;
}
EXPORT_SYMBOL_GPL(amdtp_domain_init);
@@ -1338,6 +1340,7 @@ int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
s->channel = channel;
s->speed = speed;
+ s->domain = d;
return 0;
}
@@ -1374,6 +1377,22 @@ static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle)
*/
int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
{
+ static const struct {
+ unsigned int data_block;
+ unsigned int syt_offset;
+ } *entry, initial_state[] = {
+ [CIP_SFC_32000] = { 4, 3072 },
+ [CIP_SFC_48000] = { 6, 1024 },
+ [CIP_SFC_96000] = { 12, 1024 },
+ [CIP_SFC_192000] = { 24, 1024 },
+ [CIP_SFC_44100] = { 0, 67 },
+ [CIP_SFC_88200] = { 0, 67 },
+ [CIP_SFC_176400] = { 0, 67 },
+ };
+ unsigned int events_per_buffer = d->events_per_buffer;
+ unsigned int events_per_period = d->events_per_period;
+ unsigned int idle_irq_interval;
+ unsigned int queue_size;
struct amdtp_stream *s;
int cycle;
int err;
@@ -1387,12 +1406,34 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
return -ENXIO;
d->irq_target = s;
+ // This is a case that AMDTP streams in domain run just for MIDI
+ // substream. Use the number of events equivalent to 10 msec as
+ // interval of hardware IRQ.
+ if (events_per_period == 0)
+ events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
+ if (events_per_buffer == 0)
+ events_per_buffer = events_per_period * 3;
+
+ queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
+ amdtp_rate_table[d->irq_target->sfc]);
+
+ d->seq_descs = kcalloc(queue_size, sizeof(*d->seq_descs), GFP_KERNEL);
+ if (!d->seq_descs)
+ return -ENOMEM;
+ d->seq_size = queue_size;
+ d->seq_tail = 0;
+
+ entry = &initial_state[s->sfc];
+ d->data_block_state = entry->data_block;
+ d->syt_offset_state = entry->syt_offset;
+ d->last_syt_offset = TICKS_PER_CYCLE;
+
if (ir_delay_cycle > 0) {
struct fw_card *fw_card = fw_parent_device(s->unit)->card;
err = get_current_cycle_time(fw_card, &cycle);
if (err < 0)
- return err;
+ goto error;
// No need to care overflow in cycle field because of enough
// width.
@@ -1423,18 +1464,26 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
} else {
// IT context starts immediately.
cycle_match = -1;
+ s->ctx_data.rx.seq_index = 0;
}
if (s != d->irq_target) {
- err = amdtp_stream_start(s, s->channel, s->speed, d,
- false, cycle_match);
+ err = amdtp_stream_start(s, s->channel, s->speed,
+ cycle_match, queue_size, 0);
if (err < 0)
goto error;
}
}
s = d->irq_target;
- err = amdtp_stream_start(s, s->channel, s->speed, d, true, -1);
+ s->ctx_data.rx.events_per_period = events_per_period;
+ s->ctx_data.rx.event_count = 0;
+ s->ctx_data.rx.seq_index = 0;
+
+ idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
+ amdtp_rate_table[d->irq_target->sfc]);
+ err = amdtp_stream_start(s, s->channel, s->speed, -1, queue_size,
+ idle_irq_interval);
if (err < 0)
goto error;
@@ -1442,6 +1491,8 @@ int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
error:
list_for_each_entry(s, &d->streams, list)
amdtp_stream_stop(s);
+ kfree(d->seq_descs);
+ d->seq_descs = NULL;
return err;
}
EXPORT_SYMBOL_GPL(amdtp_domain_start);
@@ -1466,5 +1517,8 @@ void amdtp_domain_stop(struct amdtp_domain *d)
d->events_per_period = 0;
d->irq_target = NULL;
+
+ kfree(d->seq_descs);
+ d->seq_descs = NULL;
}
EXPORT_SYMBOL_GPL(amdtp_domain_stop);
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index f2d44e2dc3c8..703b710aaf7f 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -108,6 +108,8 @@ typedef unsigned int (*amdtp_stream_process_ctx_payloads_t)(
const struct pkt_desc *desc,
unsigned int packets,
struct snd_pcm_substream *pcm);
+
+struct amdtp_domain;
struct amdtp_stream {
struct fw_unit *unit;
enum cip_flags flags;
@@ -136,9 +138,7 @@ struct amdtp_stream {
struct {
// To calculate CIP data blocks and tstamp.
unsigned int transfer_delay;
- unsigned int data_block_state;
- unsigned int last_syt_offset;
- unsigned int syt_offset_state;
+ unsigned int seq_index;
// To generate CIP header.
unsigned int fdf;
@@ -180,6 +180,7 @@ struct amdtp_stream {
int channel;
int speed;
struct list_head list;
+ struct amdtp_domain *domain;
};
int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
@@ -273,6 +274,11 @@ static inline bool amdtp_stream_wait_callback(struct amdtp_stream *s,
msecs_to_jiffies(timeout)) > 0;
}
+struct seq_desc {
+ unsigned int syt_offset;
+ unsigned int data_blocks;
+};
+
struct amdtp_domain {
struct list_head streams;
@@ -280,6 +286,14 @@ struct amdtp_domain {
unsigned int events_per_buffer;
struct amdtp_stream *irq_target;
+
+ struct seq_desc *seq_descs;
+ unsigned int seq_size;
+ unsigned int seq_tail;
+
+ unsigned int data_block_state;
+ unsigned int syt_offset_state;
+ unsigned int last_syt_offset;
};
int amdtp_domain_init(struct amdtp_domain *d);
diff --git a/sound/firewire/fireface/ff-protocol-latter.c b/sound/firewire/fireface/ff-protocol-latter.c
index 0e4c3a9ed5e4..8d3b23778eb2 100644
--- a/sound/firewire/fireface/ff-protocol-latter.c
+++ b/sound/firewire/fireface/ff-protocol-latter.c
@@ -16,7 +16,8 @@
#define LATTER_SYNC_STATUS 0x0000801c0000ULL
static int parse_clock_bits(u32 data, unsigned int *rate,
- enum snd_ff_clock_src *src)
+ enum snd_ff_clock_src *src,
+ enum snd_ff_unit_version unit_version)
{
static const struct {
unsigned int rate;
@@ -43,6 +44,11 @@ static int parse_clock_bits(u32 data, unsigned int *rate,
};
int i;
+ if (unit_version != SND_FF_UNIT_VERSION_UCX) {
+ // e.g. 0x00fe0f20 but expected 0x00eff002.
+ data = ((data & 0xf0f0f0f0) >> 4) | ((data & 0x0f0f0f0f) << 4);
+ }
+
for (i = 0; i < ARRAY_SIZE(rate_entries); ++i) {
rate_entry = rate_entries + i;
if ((data & 0x0f000000) == rate_entry->flag) {
@@ -79,7 +85,7 @@ static int latter_get_clock(struct snd_ff *ff, unsigned int *rate,
return err;
data = le32_to_cpu(reg);
- return parse_clock_bits(data, rate, src);
+ return parse_clock_bits(data, rate, src, ff->unit_version);
}
static int latter_switch_fetching_mode(struct snd_ff *ff, bool enable)
@@ -107,18 +113,18 @@ static int latter_allocate_resources(struct snd_ff *ff, unsigned int rate)
int err;
// Set the number of data blocks transferred in a second.
- if (rate % 32000 == 0)
- code = 0x00;
+ if (rate % 48000 == 0)
+ code = 0x04;
else if (rate % 44100 == 0)
code = 0x02;
- else if (rate % 48000 == 0)
- code = 0x04;
+ else if (rate % 32000 == 0)
+ code = 0x00;
else
return -EINVAL;
if (rate >= 64000 && rate < 128000)
code |= 0x08;
- else if (rate >= 128000 && rate < 192000)
+ else if (rate >= 128000)
code |= 0x10;
reg = cpu_to_le32(code);
@@ -140,7 +146,7 @@ static int latter_allocate_resources(struct snd_ff *ff, unsigned int rate)
if (curr_rate == rate)
break;
}
- if (count == 10)
+ if (count > 10)
return -ETIMEDOUT;
for (i = 0; i < ARRAY_SIZE(amdtp_rate_table); ++i) {
@@ -181,14 +187,30 @@ static int latter_begin_session(struct snd_ff *ff, unsigned int rate)
__le32 reg;
int err;
- if (rate >= 32000 && rate <= 48000)
- flag = 0x92;
- else if (rate >= 64000 && rate <= 96000)
- flag = 0x8e;
- else if (rate >= 128000 && rate <= 192000)
- flag = 0x8c;
- else
- return -EINVAL;
+ if (ff->unit_version == SND_FF_UNIT_VERSION_UCX) {
+ // For Fireface UCX. Always use the maximum number of data
+ // channels in data block of packet.
+ if (rate >= 32000 && rate <= 48000)
+ flag = 0x92;
+ else if (rate >= 64000 && rate <= 96000)
+ flag = 0x8e;
+ else if (rate >= 128000 && rate <= 192000)
+ flag = 0x8c;
+ else
+ return -EINVAL;
+ } else {
+ // For Fireface UFX and 802. Due to bandwidth limitation on
+ // IEEE 1394a (400 Mbps), Analog 1-12 and AES are available
+ // without any ADAT at quadruple speed.
+ if (rate >= 32000 && rate <= 48000)
+ flag = 0x9e;
+ else if (rate >= 64000 && rate <= 96000)
+ flag = 0x96;
+ else if (rate >= 128000 && rate <= 192000)
+ flag = 0x8e;
+ else
+ return -EINVAL;
+ }
if (generation != fw_parent_device(ff->unit)->card->generation) {
err = fw_iso_resources_update(&ff->tx_resources);
@@ -207,8 +229,6 @@ static int latter_begin_session(struct snd_ff *ff, unsigned int rate)
if (err < 0)
return err;
- // Always use the maximum number of data channels in data block of
- // packet.
reg = cpu_to_le32(flag);
return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
LATTER_ISOC_START, &reg, sizeof(reg), 0);
@@ -263,7 +283,7 @@ static void latter_dump_status(struct snd_ff *ff, struct snd_info_buffer *buffer
}
}
- err = parse_clock_bits(data, &rate, &src);
+ err = parse_clock_bits(data, &rate, &src, ff->unit_version);
if (err < 0)
return;
label = snd_ff_proc_get_clk_label(src);
diff --git a/sound/firewire/fireface/ff-stream.c b/sound/firewire/fireface/ff-stream.c
index 63b79c4a5405..5452115c0ef9 100644
--- a/sound/firewire/fireface/ff-stream.c
+++ b/sound/firewire/fireface/ff-stream.c
@@ -184,7 +184,6 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
*/
if (!amdtp_stream_running(&ff->rx_stream)) {
int spd = fw_parent_device(ff->unit)->max_speed;
- unsigned int ir_delay_cycle;
err = ff->spec->protocol->begin_session(ff, rate);
if (err < 0)
@@ -200,14 +199,7 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
if (err < 0)
goto error;
- // The device postpones start of transmission mostly for several
- // cycles after receiving packets firstly.
- if (ff->spec->protocol == &snd_ff_protocol_ff800)
- ir_delay_cycle = 800; // = 100 msec
- else
- ir_delay_cycle = 16; // = 2 msec
-
- err = amdtp_domain_start(&ff->domain, ir_delay_cycle);
+ err = amdtp_domain_start(&ff->domain, 0);
if (err < 0)
goto error;
diff --git a/sound/firewire/fireface/ff.c b/sound/firewire/fireface/ff.c
index b62a4fd22407..bc39269415d2 100644
--- a/sound/firewire/fireface/ff.c
+++ b/sound/firewire/fireface/ff.c
@@ -16,12 +16,22 @@ MODULE_LICENSE("GPL v2");
static void name_card(struct snd_ff *ff)
{
struct fw_device *fw_dev = fw_parent_device(ff->unit);
+ const char *const names[] = {
+ [SND_FF_UNIT_VERSION_FF800] = "Fireface800",
+ [SND_FF_UNIT_VERSION_FF400] = "Fireface400",
+ [SND_FF_UNIT_VERSION_UFX] = "FirefaceUFX",
+ [SND_FF_UNIT_VERSION_UCX] = "FirefaceUCX",
+ [SND_FF_UNIT_VERSION_802] = "Fireface802",
+ };
+ const char *name;
+
+ name = names[ff->unit_version];
strcpy(ff->card->driver, "Fireface");
- strcpy(ff->card->shortname, ff->spec->name);
- strcpy(ff->card->mixername, ff->spec->name);
+ strcpy(ff->card->shortname, name);
+ strcpy(ff->card->mixername, name);
snprintf(ff->card->longname, sizeof(ff->card->longname),
- "RME %s, GUID %08x%08x at %s, S%d", ff->spec->name,
+ "RME %s, GUID %08x%08x at %s, S%d", name,
fw_dev->config_rom[3], fw_dev->config_rom[4],
dev_name(&ff->unit->device), 100 << fw_dev->max_speed);
}
@@ -101,6 +111,7 @@ static int snd_ff_probe(struct fw_unit *unit,
spin_lock_init(&ff->lock);
init_waitqueue_head(&ff->hwdep_wait);
+ ff->unit_version = entry->version;
ff->spec = (const struct snd_ff_spec *)entry->driver_data;
/* Register this sound card later. */
@@ -145,7 +156,6 @@ static void snd_ff_remove(struct fw_unit *unit)
}
static const struct snd_ff_spec spec_ff800 = {
- .name = "Fireface800",
.pcm_capture_channels = {28, 20, 12},
.pcm_playback_channels = {28, 20, 12},
.midi_in_ports = 1,
@@ -157,7 +167,6 @@ static const struct snd_ff_spec spec_ff800 = {
};
static const struct snd_ff_spec spec_ff400 = {
- .name = "Fireface400",
.pcm_capture_channels = {18, 14, 10},
.pcm_playback_channels = {18, 14, 10},
.midi_in_ports = 2,
@@ -169,7 +178,6 @@ static const struct snd_ff_spec spec_ff400 = {
};
static const struct snd_ff_spec spec_ucx = {
- .name = "FirefaceUCX",
.pcm_capture_channels = {18, 14, 12},
.pcm_playback_channels = {18, 14, 12},
.midi_in_ports = 2,
@@ -180,6 +188,17 @@ static const struct snd_ff_spec spec_ucx = {
.midi_rx_addrs = {0xffff00000030ull, 0xffff00000030ull},
};
+static const struct snd_ff_spec spec_ufx_802 = {
+ .pcm_capture_channels = {30, 22, 14},
+ .pcm_playback_channels = {30, 22, 14},
+ .midi_in_ports = 1,
+ .midi_out_ports = 1,
+ .protocol = &snd_ff_protocol_latter,
+ .midi_high_addr = 0xffff00000034ull,
+ .midi_addr_range = 0x80,
+ .midi_rx_addrs = {0xffff00000030ull, 0xffff00000030ull},
+};
+
static const struct ieee1394_device_id snd_ff_id_table[] = {
/* Fireface 800 */
{
@@ -189,7 +208,7 @@ static const struct ieee1394_device_id snd_ff_id_table[] = {
IEEE1394_MATCH_MODEL_ID,
.vendor_id = OUI_RME,
.specifier_id = OUI_RME,
- .version = 0x000001,
+ .version = SND_FF_UNIT_VERSION_FF800,
.model_id = 0x101800,
.driver_data = (kernel_ulong_t)&spec_ff800,
},
@@ -201,10 +220,22 @@ static const struct ieee1394_device_id snd_ff_id_table[] = {
IEEE1394_MATCH_MODEL_ID,
.vendor_id = OUI_RME,
.specifier_id = OUI_RME,
- .version = 0x000002,
+ .version = SND_FF_UNIT_VERSION_FF400,
.model_id = 0x101800,
.driver_data = (kernel_ulong_t)&spec_ff400,
},
+ // Fireface UFX.
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION |
+ IEEE1394_MATCH_MODEL_ID,
+ .vendor_id = OUI_RME,
+ .specifier_id = OUI_RME,
+ .version = SND_FF_UNIT_VERSION_UFX,
+ .model_id = 0x101800,
+ .driver_data = (kernel_ulong_t)&spec_ufx_802,
+ },
// Fireface UCX.
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
@@ -213,10 +244,22 @@ static const struct ieee1394_device_id snd_ff_id_table[] = {
IEEE1394_MATCH_MODEL_ID,
.vendor_id = OUI_RME,
.specifier_id = OUI_RME,
- .version = 0x000004,
+ .version = SND_FF_UNIT_VERSION_UCX,
.model_id = 0x101800,
.driver_data = (kernel_ulong_t)&spec_ucx,
},
+ // Fireface 802.
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION |
+ IEEE1394_MATCH_MODEL_ID,
+ .vendor_id = OUI_RME,
+ .specifier_id = OUI_RME,
+ .version = SND_FF_UNIT_VERSION_802,
+ .model_id = 0x101800,
+ .driver_data = (kernel_ulong_t)&spec_ufx_802,
+ },
{}
};
MODULE_DEVICE_TABLE(ieee1394, snd_ff_id_table);
diff --git a/sound/firewire/fireface/ff.h b/sound/firewire/fireface/ff.h
index dc7a20f75983..705e7df4f929 100644
--- a/sound/firewire/fireface/ff.h
+++ b/sound/firewire/fireface/ff.h
@@ -34,6 +34,14 @@
#define SND_FF_IN_MIDI_PORTS 2
#define SND_FF_OUT_MIDI_PORTS 2
+enum snd_ff_unit_version {
+ SND_FF_UNIT_VERSION_FF800 = 0x000001,
+ SND_FF_UNIT_VERSION_FF400 = 0x000002,
+ SND_FF_UNIT_VERSION_UFX = 0x000003,
+ SND_FF_UNIT_VERSION_UCX = 0x000004,
+ SND_FF_UNIT_VERSION_802 = 0x000005,
+};
+
enum snd_ff_stream_mode {
SND_FF_STREAM_MODE_LOW = 0,
SND_FF_STREAM_MODE_MID,
@@ -43,8 +51,6 @@ enum snd_ff_stream_mode {
struct snd_ff_protocol;
struct snd_ff_spec {
- const char *const name;
-
const unsigned int pcm_capture_channels[SND_FF_STREAM_MODE_COUNT];
const unsigned int pcm_playback_channels[SND_FF_STREAM_MODE_COUNT];
@@ -66,6 +72,7 @@ struct snd_ff {
bool registered;
struct delayed_work dwork;
+ enum snd_ff_unit_version unit_version;
const struct snd_ff_spec *spec;
/* To handle MIDI tx. */
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index dda797209a27..654e28a6669f 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -177,7 +177,7 @@ struct snd_efw_phys_meters {
u32 in_meters;
u32 reserved4;
u32 reserved5;
- u32 values[0];
+ u32 values[];
} __packed;
enum snd_efw_clock_source {
SND_EFW_CLOCK_SOURCE_INTERNAL = 0,
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
index 0fd36e469ad0..edb31ac26868 100644
--- a/sound/firewire/motu/amdtp-motu.c
+++ b/sound/firewire/motu/amdtp-motu.c
@@ -76,15 +76,11 @@ int amdtp_motu_set_parameters(struct amdtp_stream *s, unsigned int rate,
if (i == ARRAY_SIZE(snd_motu_clock_rates))
return -EINVAL;
- pcm_chunks = formats->fixed_part_pcm_chunks[mode] +
- formats->differed_part_pcm_chunks[mode];
+ // Each data block includes SPH in its head. Data chunks follow with
+ // 3 byte alignment. Padding follows with zero to conform to quadlet
+ // alignment.
+ pcm_chunks = formats->pcm_chunks[mode];
data_chunks = formats->msg_chunks + pcm_chunks;
-
- /*
- * Each data block includes SPH in its head. Data chunks follow with
- * 3 byte alignment. Padding follows with zero to conform to quadlet
- * alignment.
- */
data_block_quadlets = 1 + DIV_ROUND_UP(data_chunks * 3, 4);
err = amdtp_stream_set_parameters(s, rate, data_block_quadlets);
@@ -440,7 +436,7 @@ static unsigned int process_it_ctx_payloads(struct amdtp_stream *s,
int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir,
- const struct snd_motu_protocol *const protocol)
+ const struct snd_motu_spec *spec)
{
amdtp_stream_process_ctx_payloads_t process_ctx_payloads;
int fmt = CIP_FMT_MOTU;
@@ -454,14 +450,15 @@ int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
* Units of version 3 transmits packets with invalid CIP header
* against IEC 61883-1.
*/
- if (protocol == &snd_motu_protocol_v3) {
+ if (spec->protocol_version == SND_MOTU_PROTOCOL_V3) {
flags |= CIP_WRONG_DBS |
CIP_SKIP_DBC_ZERO_CHECK |
CIP_HEADER_WITHOUT_EOH;
fmt = CIP_FMT_MOTU_TX_V3;
}
- if (protocol == &snd_motu_protocol_v2) {
+ if (spec == &snd_motu_spec_8pre ||
+ spec == &snd_motu_spec_ultralite) {
// 8pre has some quirks.
flags |= CIP_WRONG_DBS |
CIP_SKIP_DBC_ZERO_CHECK;
diff --git a/sound/firewire/motu/motu-pcm.c b/sound/firewire/motu/motu-pcm.c
index 2d41a1a4052c..8e1437371263 100644
--- a/sound/firewire/motu/motu-pcm.c
+++ b/sound/firewire/motu/motu-pcm.c
@@ -26,8 +26,7 @@ static int motu_rate_constraint(struct snd_pcm_hw_params *params,
rate = snd_motu_clock_rates[i];
mode = i / 2;
- pcm_channels = formats->fixed_part_pcm_chunks[mode] +
- formats->differed_part_pcm_chunks[mode];
+ pcm_channels = formats->pcm_chunks[mode];
if (!snd_interval_test(c, pcm_channels))
continue;
@@ -59,8 +58,7 @@ static int motu_channels_constraint(struct snd_pcm_hw_params *params,
if (!snd_interval_test(r, rate))
continue;
- pcm_channels = formats->fixed_part_pcm_chunks[mode] +
- formats->differed_part_pcm_chunks[mode];
+ pcm_channels = formats->pcm_chunks[mode];
channels.min = min(channels.min, pcm_channels);
channels.max = max(channels.max, pcm_channels);
}
@@ -82,8 +80,7 @@ static void limit_channels_and_rates(struct snd_motu *motu,
rate = snd_motu_clock_rates[i];
mode = i / 2;
- pcm_channels = formats->fixed_part_pcm_chunks[mode] +
- formats->differed_part_pcm_chunks[mode];
+ pcm_channels = formats->pcm_chunks[mode];
if (pcm_channels == 0)
continue;
@@ -133,7 +130,6 @@ static int init_hw_info(struct snd_motu *motu,
static int pcm_open(struct snd_pcm_substream *substream)
{
struct snd_motu *motu = substream->private_data;
- const struct snd_motu_protocol *const protocol = motu->spec->protocol;
struct amdtp_domain *d = &motu->domain;
enum snd_motu_clock_source src;
int err;
@@ -152,7 +148,7 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto err_locked;
- err = protocol->get_clock_source(motu, &src);
+ err = snd_motu_protocol_get_clock_source(motu, &src);
if (err < 0)
goto err_locked;
@@ -166,7 +162,7 @@ static int pcm_open(struct snd_pcm_substream *substream)
unsigned int frames_per_buffer = d->events_per_buffer;
unsigned int rate;
- err = protocol->get_clock_rate(motu, &rate);
+ err = snd_motu_protocol_get_clock_rate(motu, &rate);
if (err < 0)
goto err_locked;
diff --git a/sound/firewire/motu/motu-proc.c b/sound/firewire/motu/motu-proc.c
index 187f6abd878c..f009cf7aa074 100644
--- a/sound/firewire/motu/motu-proc.c
+++ b/sound/firewire/motu/motu-proc.c
@@ -28,13 +28,12 @@ static void proc_read_clock(struct snd_info_entry *entry,
{
struct snd_motu *motu = entry->private_data;
- const struct snd_motu_protocol *const protocol = motu->spec->protocol;
unsigned int rate;
enum snd_motu_clock_source source;
- if (protocol->get_clock_rate(motu, &rate) < 0)
+ if (snd_motu_protocol_get_clock_rate(motu, &rate) < 0)
return;
- if (protocol->get_clock_source(motu, &source) < 0)
+ if (snd_motu_protocol_get_clock_source(motu, &source) < 0)
return;
snd_iprintf(buffer, "Rate:\t%d\n", rate);
@@ -45,15 +44,14 @@ static void proc_read_format(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_motu *motu = entry->private_data;
- const struct snd_motu_protocol *const protocol = motu->spec->protocol;
unsigned int mode;
struct snd_motu_packet_format *formats;
int i;
- if (protocol->cache_packet_formats(motu) < 0)
+ if (snd_motu_protocol_cache_packet_formats(motu) < 0)
return;
- snd_iprintf(buffer, "tx:\tmsg\tfixed\tdiffered\n");
+ snd_iprintf(buffer, "tx:\tmsg\tfixed\ttotal\n");
for (i = 0; i < SND_MOTU_CLOCK_RATE_COUNT; ++i) {
mode = i >> 1;
@@ -62,11 +60,11 @@ static void proc_read_format(struct snd_info_entry *entry,
"%u:\t%u\t%u\t%u\n",
snd_motu_clock_rates[i],
formats->msg_chunks,
- formats->fixed_part_pcm_chunks[mode],
- formats->differed_part_pcm_chunks[mode]);
+ motu->spec->tx_fixed_pcm_chunks[mode],
+ formats->pcm_chunks[mode]);
}
- snd_iprintf(buffer, "rx:\tmsg\tfixed\tdiffered\n");
+ snd_iprintf(buffer, "rx:\tmsg\tfixed\ttotal\n");
for (i = 0; i < SND_MOTU_CLOCK_RATE_COUNT; ++i) {
mode = i >> 1;
@@ -75,8 +73,8 @@ static void proc_read_format(struct snd_info_entry *entry,
"%u:\t%u\t%u\t%u\n",
snd_motu_clock_rates[i],
formats->msg_chunks,
- formats->fixed_part_pcm_chunks[mode],
- formats->differed_part_pcm_chunks[mode]);
+ motu->spec->rx_fixed_pcm_chunks[mode],
+ formats->pcm_chunks[mode]);
}
}
diff --git a/sound/firewire/motu/motu-protocol-v2.c b/sound/firewire/motu/motu-protocol-v2.c
index 619b6ae73f62..e59e69ab1538 100644
--- a/sound/firewire/motu/motu-protocol-v2.c
+++ b/sound/firewire/motu/motu-protocol-v2.c
@@ -35,7 +35,8 @@ static int get_clock_rate(u32 data, unsigned int *rate)
return 0;
}
-static int v2_get_clock_rate(struct snd_motu *motu, unsigned int *rate)
+int snd_motu_protocol_v2_get_clock_rate(struct snd_motu *motu,
+ unsigned int *rate)
{
__be32 reg;
int err;
@@ -48,7 +49,8 @@ static int v2_get_clock_rate(struct snd_motu *motu, unsigned int *rate)
return get_clock_rate(be32_to_cpu(reg), rate);
}
-static int v2_set_clock_rate(struct snd_motu *motu, unsigned int rate)
+int snd_motu_protocol_v2_set_clock_rate(struct snd_motu *motu,
+ unsigned int rate)
{
__be32 reg;
u32 data;
@@ -76,14 +78,10 @@ static int v2_set_clock_rate(struct snd_motu *motu, unsigned int rate)
sizeof(reg));
}
-static int get_clock_source(struct snd_motu *motu, u32 data,
- enum snd_motu_clock_source *src)
+static int detect_clock_source_optical_model(struct snd_motu *motu, u32 data,
+ enum snd_motu_clock_source *src)
{
- unsigned int index = data & V2_CLOCK_SRC_MASK;
- if (index > 5)
- return -EIO;
-
- switch (index) {
+ switch (data) {
case 0:
*src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
break;
@@ -116,14 +114,50 @@ static int get_clock_source(struct snd_motu *motu, u32 data,
*src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_DSUB;
break;
default:
- return -EIO;
+ *src = SND_MOTU_CLOCK_SOURCE_UNKNOWN;
+ break;
}
return 0;
}
-static int v2_get_clock_source(struct snd_motu *motu,
- enum snd_motu_clock_source *src)
+static int v2_detect_clock_source(struct snd_motu *motu, u32 data,
+ enum snd_motu_clock_source *src)
+{
+ switch (data) {
+ case 0:
+ *src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
+ break;
+ case 2:
+ *src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
+ break;
+ case 3:
+ *src = SND_MOTU_CLOCK_SOURCE_SPH;
+ break;
+ case 4:
+ *src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
+ break;
+ default:
+ *src = SND_MOTU_CLOCK_SOURCE_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int get_clock_source(struct snd_motu *motu, u32 data,
+ enum snd_motu_clock_source *src)
+{
+ data &= V2_CLOCK_SRC_MASK;
+ if (motu->spec == &snd_motu_spec_828mk2 ||
+ motu->spec == &snd_motu_spec_traveler)
+ return detect_clock_source_optical_model(motu, data, src);
+ else
+ return v2_detect_clock_source(motu, data, src);
+}
+
+int snd_motu_protocol_v2_get_clock_source(struct snd_motu *motu,
+ enum snd_motu_clock_source *src)
{
__be32 reg;
int err;
@@ -136,167 +170,189 @@ static int v2_get_clock_source(struct snd_motu *motu,
return get_clock_source(motu, be32_to_cpu(reg), src);
}
-static int v2_switch_fetching_mode(struct snd_motu *motu, bool enable)
+// Expected for Traveler and 896HD, which implements Altera Cyclone EP1C3.
+static int switch_fetching_mode_cyclone(struct snd_motu *motu, u32 *data,
+ bool enable)
{
- enum snd_motu_clock_source src;
- __be32 reg;
- u32 data;
- int err = 0;
+ *data |= V2_CLOCK_MODEL_SPECIFIC;
- // 828mkII implements Altera ACEX 1K EP1K30. Nothing to do.
- if (motu->spec == &snd_motu_spec_828mk2)
- return 0;
+ return 0;
+}
- err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET, &reg,
- sizeof(reg));
+// For UltraLite and 8pre, which implements Xilinx Spartan XC3S200.
+static int switch_fetching_mode_spartan(struct snd_motu *motu, u32 *data,
+ bool enable)
+{
+ unsigned int rate;
+ enum snd_motu_clock_source src;
+ int err;
+
+ err = get_clock_source(motu, *data, &src);
if (err < 0)
return err;
- data = be32_to_cpu(reg);
- err = get_clock_source(motu, data, &src);
+ err = get_clock_rate(*data, &rate);
if (err < 0)
return err;
- data &= ~(V2_CLOCK_FETCH_ENABLE | V2_CLOCK_MODEL_SPECIFIC);
- if (enable)
- data |= V2_CLOCK_FETCH_ENABLE;
+ if (src == SND_MOTU_CLOCK_SOURCE_SPH && rate > 48000)
+ *data |= V2_CLOCK_MODEL_SPECIFIC;
+
+ return 0;
+}
- if (motu->spec->flags & SND_MOTU_SPEC_SUPPORT_CLOCK_X4) {
- // Expected for Traveler and 896HD, which implements Altera
- // Cyclone EP1C3.
- data |= V2_CLOCK_MODEL_SPECIFIC;
+int snd_motu_protocol_v2_switch_fetching_mode(struct snd_motu *motu,
+ bool enable)
+{
+ if (motu->spec == &snd_motu_spec_828mk2) {
+ // 828mkII implements Altera ACEX 1K EP1K30. Nothing to do.
+ return 0;
} else {
- // For UltraLite and 8pre, which implements Xilinx Spartan
- // XC3S200.
- unsigned int rate;
+ __be32 reg;
+ u32 data;
+ int err;
+
+ err = snd_motu_transaction_read(motu, V2_CLOCK_STATUS_OFFSET,
+ &reg, sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg);
- err = get_clock_rate(data, &rate);
+ data &= ~(V2_CLOCK_FETCH_ENABLE | V2_CLOCK_MODEL_SPECIFIC);
+ if (enable)
+ data |= V2_CLOCK_FETCH_ENABLE;
+
+ if (motu->spec == &snd_motu_spec_traveler)
+ err = switch_fetching_mode_cyclone(motu, &data, enable);
+ else
+ err = switch_fetching_mode_spartan(motu, &data, enable);
if (err < 0)
return err;
- if (src == SND_MOTU_CLOCK_SOURCE_SPH && rate > 48000)
- data |= V2_CLOCK_MODEL_SPECIFIC;
+ reg = cpu_to_be32(data);
+ return snd_motu_transaction_write(motu, V2_CLOCK_STATUS_OFFSET,
+ &reg, sizeof(reg));
}
+}
- reg = cpu_to_be32(data);
- return snd_motu_transaction_write(motu, V2_CLOCK_STATUS_OFFSET, &reg,
- sizeof(reg));
+static int detect_packet_formats_828mk2(struct snd_motu *motu, u32 data)
+{
+ if (((data & V2_OPT_IN_IFACE_MASK) >> V2_OPT_IN_IFACE_SHIFT) ==
+ V2_OPT_IFACE_MODE_ADAT) {
+ motu->tx_packet_formats.pcm_chunks[0] += 8;
+ motu->tx_packet_formats.pcm_chunks[1] += 4;
+ }
+
+ if (((data & V2_OPT_OUT_IFACE_MASK) >> V2_OPT_OUT_IFACE_SHIFT) ==
+ V2_OPT_IFACE_MODE_ADAT) {
+ motu->rx_packet_formats.pcm_chunks[0] += 8;
+ motu->rx_packet_formats.pcm_chunks[1] += 4;
+ }
+
+ return 0;
}
-static void calculate_fixed_part(struct snd_motu_packet_format *formats,
- enum amdtp_stream_direction dir,
- enum snd_motu_spec_flags flags,
- unsigned char analog_ports)
+static int detect_packet_formats_traveler(struct snd_motu *motu, u32 data)
{
- unsigned char pcm_chunks[3] = {0, 0, 0};
-
- formats->msg_chunks = 2;
-
- pcm_chunks[0] = analog_ports;
- pcm_chunks[1] = analog_ports;
- if (flags & SND_MOTU_SPEC_SUPPORT_CLOCK_X4)
- pcm_chunks[2] = analog_ports;
-
- if (dir == AMDTP_IN_STREAM) {
- if (flags & SND_MOTU_SPEC_TX_MICINST_CHUNK) {
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
- }
- if (flags & SND_MOTU_SPEC_TX_RETURN_CHUNK) {
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
- }
- } else {
- if (flags & SND_MOTU_SPEC_RX_SEPARATED_MAIN) {
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
- }
-
- // Packets to v2 units include 2 chunks for phone 1/2, except
- // for 176.4/192.0 kHz.
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
+ if (((data & V2_OPT_IN_IFACE_MASK) >> V2_OPT_IN_IFACE_SHIFT) ==
+ V2_OPT_IFACE_MODE_ADAT) {
+ motu->tx_packet_formats.pcm_chunks[0] += 8;
+ motu->tx_packet_formats.pcm_chunks[1] += 4;
}
- if (flags & SND_MOTU_SPEC_HAS_AESEBU_IFACE) {
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
+ if (((data & V2_OPT_OUT_IFACE_MASK) >> V2_OPT_OUT_IFACE_SHIFT) ==
+ V2_OPT_IFACE_MODE_ADAT) {
+ motu->rx_packet_formats.pcm_chunks[0] += 8;
+ motu->rx_packet_formats.pcm_chunks[1] += 4;
}
- /*
- * All of v2 models have a pair of coaxial interfaces for digital in/out
- * port. At 44.1/48.0/88.2/96.0 kHz, packets includes PCM from these
- * ports.
- */
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
-
- formats->fixed_part_pcm_chunks[0] = pcm_chunks[0];
- formats->fixed_part_pcm_chunks[1] = pcm_chunks[1];
- formats->fixed_part_pcm_chunks[2] = pcm_chunks[2];
+ return 0;
}
-static void calculate_differed_part(struct snd_motu_packet_format *formats,
- enum snd_motu_spec_flags flags,
- u32 data, u32 mask, u32 shift)
+static int detect_packet_formats_8pre(struct snd_motu *motu, u32 data)
{
- unsigned char pcm_chunks[2] = {0, 0};
-
- /*
- * When optical interfaces are configured for S/PDIF (TOSLINK),
- * the above PCM frames come from them, instead of coaxial
- * interfaces.
- */
- data = (data & mask) >> shift;
- if (data == V2_OPT_IFACE_MODE_ADAT) {
- if (flags & SND_MOTU_SPEC_HAS_OPT_IFACE_A) {
- pcm_chunks[0] += 8;
- pcm_chunks[1] += 4;
- }
- // 8pre has two sets of optical interface and doesn't reduce
- // chunks for ADAT signals.
- if (flags & SND_MOTU_SPEC_HAS_OPT_IFACE_B) {
- pcm_chunks[1] += 4;
- }
+ if (((data & V2_OPT_IN_IFACE_MASK) >> V2_OPT_IN_IFACE_SHIFT) ==
+ V2_OPT_IFACE_MODE_ADAT) {
+ motu->tx_packet_formats.pcm_chunks[0] += 8;
+ motu->tx_packet_formats.pcm_chunks[1] += 8;
}
- /* At mode x4, no data chunks are supported in this part. */
- formats->differed_part_pcm_chunks[0] = pcm_chunks[0];
- formats->differed_part_pcm_chunks[1] = pcm_chunks[1];
+ if (((data & V2_OPT_OUT_IFACE_MASK) >> V2_OPT_OUT_IFACE_SHIFT) ==
+ V2_OPT_IFACE_MODE_ADAT) {
+ motu->rx_packet_formats.pcm_chunks[0] += 8;
+ motu->rx_packet_formats.pcm_chunks[1] += 8;
+ }
+
+ return 0;
}
-static int v2_cache_packet_formats(struct snd_motu *motu)
+int snd_motu_protocol_v2_cache_packet_formats(struct snd_motu *motu)
{
__be32 reg;
u32 data;
int err;
+ motu->tx_packet_formats.pcm_byte_offset = 10;
+ motu->rx_packet_formats.pcm_byte_offset = 10;
+
+ motu->tx_packet_formats.msg_chunks = 2;
+ motu->rx_packet_formats.msg_chunks = 2;
+
err = snd_motu_transaction_read(motu, V2_IN_OUT_CONF_OFFSET, &reg,
sizeof(reg));
if (err < 0)
return err;
data = be32_to_cpu(reg);
- calculate_fixed_part(&motu->tx_packet_formats, AMDTP_IN_STREAM,
- motu->spec->flags, motu->spec->analog_in_ports);
- calculate_differed_part(&motu->tx_packet_formats, motu->spec->flags,
- data, V2_OPT_IN_IFACE_MASK, V2_OPT_IN_IFACE_SHIFT);
+ memcpy(motu->tx_packet_formats.pcm_chunks,
+ motu->spec->tx_fixed_pcm_chunks,
+ sizeof(motu->tx_packet_formats.pcm_chunks));
+ memcpy(motu->rx_packet_formats.pcm_chunks,
+ motu->spec->rx_fixed_pcm_chunks,
+ sizeof(motu->rx_packet_formats.pcm_chunks));
- calculate_fixed_part(&motu->rx_packet_formats, AMDTP_OUT_STREAM,
- motu->spec->flags, motu->spec->analog_out_ports);
- calculate_differed_part(&motu->rx_packet_formats, motu->spec->flags,
- data, V2_OPT_OUT_IFACE_MASK, V2_OPT_OUT_IFACE_SHIFT);
+ if (motu->spec == &snd_motu_spec_828mk2)
+ return detect_packet_formats_828mk2(motu, data);
+ else if (motu->spec == &snd_motu_spec_traveler)
+ return detect_packet_formats_traveler(motu, data);
+ else if (motu->spec == &snd_motu_spec_8pre)
+ return detect_packet_formats_8pre(motu, data);
+ else
+ return 0;
+}
- motu->tx_packet_formats.pcm_byte_offset = 10;
- motu->rx_packet_formats.pcm_byte_offset = 10;
+const struct snd_motu_spec snd_motu_spec_828mk2 = {
+ .name = "828mk2",
+ .protocol_version = SND_MOTU_PROTOCOL_V2,
+ .flags = SND_MOTU_SPEC_RX_MIDI_2ND_Q |
+ SND_MOTU_SPEC_TX_MIDI_2ND_Q,
+ .tx_fixed_pcm_chunks = {14, 14, 0},
+ .rx_fixed_pcm_chunks = {14, 14, 0},
+};
- return 0;
-}
+const struct snd_motu_spec snd_motu_spec_traveler = {
+ .name = "Traveler",
+ .protocol_version = SND_MOTU_PROTOCOL_V2,
+ .flags = SND_MOTU_SPEC_RX_MIDI_2ND_Q |
+ SND_MOTU_SPEC_TX_MIDI_2ND_Q,
+ .tx_fixed_pcm_chunks = {14, 14, 8},
+ .rx_fixed_pcm_chunks = {14, 14, 8},
+};
+
+const struct snd_motu_spec snd_motu_spec_ultralite = {
+ .name = "UltraLite",
+ .protocol_version = SND_MOTU_PROTOCOL_V2,
+ .flags = SND_MOTU_SPEC_RX_MIDI_2ND_Q |
+ SND_MOTU_SPEC_TX_MIDI_2ND_Q,
+ .tx_fixed_pcm_chunks = {14, 14, 0},
+ .rx_fixed_pcm_chunks = {14, 14, 0},
+};
-const struct snd_motu_protocol snd_motu_protocol_v2 = {
- .get_clock_rate = v2_get_clock_rate,
- .set_clock_rate = v2_set_clock_rate,
- .get_clock_source = v2_get_clock_source,
- .switch_fetching_mode = v2_switch_fetching_mode,
- .cache_packet_formats = v2_cache_packet_formats,
+const struct snd_motu_spec snd_motu_spec_8pre = {
+ .name = "8pre",
+ .protocol_version = SND_MOTU_PROTOCOL_V2,
+ .flags = SND_MOTU_SPEC_RX_MIDI_2ND_Q |
+ SND_MOTU_SPEC_TX_MIDI_2ND_Q,
+ .tx_fixed_pcm_chunks = {10, 6, 0},
+ .rx_fixed_pcm_chunks = {10, 6, 0},
};
diff --git a/sound/firewire/motu/motu-protocol-v3.c b/sound/firewire/motu/motu-protocol-v3.c
index d1545e2b5caa..01a47ac7bb2d 100644
--- a/sound/firewire/motu/motu-protocol-v3.c
+++ b/sound/firewire/motu/motu-protocol-v3.c
@@ -24,7 +24,8 @@
#define V3_NO_ADAT_OPT_OUT_IFACE_A 0x00040000
#define V3_NO_ADAT_OPT_OUT_IFACE_B 0x00400000
-static int v3_get_clock_rate(struct snd_motu *motu, unsigned int *rate)
+int snd_motu_protocol_v3_get_clock_rate(struct snd_motu *motu,
+ unsigned int *rate)
{
__be32 reg;
u32 data;
@@ -45,7 +46,8 @@ static int v3_get_clock_rate(struct snd_motu *motu, unsigned int *rate)
return 0;
}
-static int v3_set_clock_rate(struct snd_motu *motu, unsigned int rate)
+int snd_motu_protocol_v3_set_clock_rate(struct snd_motu *motu,
+ unsigned int rate)
{
__be32 reg;
u32 data;
@@ -85,55 +87,102 @@ static int v3_set_clock_rate(struct snd_motu *motu, unsigned int rate)
return 0;
}
-static int v3_get_clock_source(struct snd_motu *motu,
- enum snd_motu_clock_source *src)
+static int detect_clock_source_828mk3(struct snd_motu *motu, u32 data,
+ enum snd_motu_clock_source *src)
{
- __be32 reg;
- u32 data;
- unsigned int val;
- int err;
-
- err = snd_motu_transaction_read(motu, V3_CLOCK_STATUS_OFFSET, &reg,
- sizeof(reg));
- if (err < 0)
- return err;
- data = be32_to_cpu(reg);
-
- val = data & V3_CLOCK_SOURCE_MASK;
- if (val == 0x00) {
+ switch (data) {
+ case 0x00:
*src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
- } else if (val == 0x01) {
+ break;
+ case 0x01:
*src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
- } else if (val == 0x02) {
+ break;
+ case 0x02:
*src = SND_MOTU_CLOCK_SOURCE_SPH;
- } else if (val == 0x10) {
+ break;
+ case 0x10:
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
- } else if (val == 0x18 || val == 0x19) {
- err = snd_motu_transaction_read(motu, V3_OPT_IFACE_MODE_OFFSET,
- &reg, sizeof(reg));
+ break;
+ case 0x18:
+ case 0x19:
+ {
+ __be32 reg;
+ u32 options;
+ int err;
+
+ err = snd_motu_transaction_read(motu,
+ V3_OPT_IFACE_MODE_OFFSET, &reg, sizeof(reg));
if (err < 0)
return err;
- data = be32_to_cpu(reg);
+ options = be32_to_cpu(reg);
- if (val == 0x18) {
- if (data & V3_NO_ADAT_OPT_IN_IFACE_A)
+ if (data == 0x18) {
+ if (options & V3_NO_ADAT_OPT_IN_IFACE_A)
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_A;
else
*src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_OPT_A;
} else {
- if (data & V3_NO_ADAT_OPT_IN_IFACE_B)
+ if (options & V3_NO_ADAT_OPT_IN_IFACE_B)
*src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_OPT_B;
else
*src = SND_MOTU_CLOCK_SOURCE_ADAT_ON_OPT_B;
}
- } else {
+
+ break;
+ }
+ default:
*src = SND_MOTU_CLOCK_SOURCE_UNKNOWN;
+ break;
}
return 0;
}
-static int v3_switch_fetching_mode(struct snd_motu *motu, bool enable)
+static int v3_detect_clock_source(struct snd_motu *motu, u32 data,
+ enum snd_motu_clock_source *src)
+{
+ switch (data) {
+ case 0x00:
+ *src = SND_MOTU_CLOCK_SOURCE_INTERNAL;
+ break;
+ case 0x01:
+ *src = SND_MOTU_CLOCK_SOURCE_WORD_ON_BNC;
+ break;
+ case 0x02:
+ *src = SND_MOTU_CLOCK_SOURCE_SPH;
+ break;
+ case 0x10:
+ *src = SND_MOTU_CLOCK_SOURCE_SPDIF_ON_COAX;
+ break;
+ default:
+ *src = SND_MOTU_CLOCK_SOURCE_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+int snd_motu_protocol_v3_get_clock_source(struct snd_motu *motu,
+ enum snd_motu_clock_source *src)
+{
+ __be32 reg;
+ u32 data;
+ int err;
+
+ err = snd_motu_transaction_read(motu, V3_CLOCK_STATUS_OFFSET, &reg,
+ sizeof(reg));
+ if (err < 0)
+ return err;
+ data = be32_to_cpu(reg) & V3_CLOCK_SOURCE_MASK;
+
+ if (motu->spec == &snd_motu_spec_828mk3)
+ return detect_clock_source_828mk3(motu, data, src);
+ else
+ return v3_detect_clock_source(motu, data, src);
+}
+
+int snd_motu_protocol_v3_switch_fetching_mode(struct snd_motu *motu,
+ bool enable)
{
__be32 reg;
u32 data;
@@ -155,162 +204,113 @@ static int v3_switch_fetching_mode(struct snd_motu *motu, bool enable)
sizeof(reg));
}
-static void calculate_fixed_part(struct snd_motu_packet_format *formats,
- enum amdtp_stream_direction dir,
- enum snd_motu_spec_flags flags,
- unsigned char analog_ports)
+static int detect_packet_formats_828mk3(struct snd_motu *motu, u32 data)
{
- unsigned char pcm_chunks[3] = {0, 0, 0};
-
- formats->msg_chunks = 2;
-
- pcm_chunks[0] = analog_ports;
- pcm_chunks[1] = analog_ports;
- if (flags & SND_MOTU_SPEC_SUPPORT_CLOCK_X4)
- pcm_chunks[2] = analog_ports;
-
- if (dir == AMDTP_IN_STREAM) {
- if (flags & SND_MOTU_SPEC_TX_MICINST_CHUNK) {
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
- if (flags & SND_MOTU_SPEC_SUPPORT_CLOCK_X4)
- pcm_chunks[2] += 2;
- }
-
- if (flags & SND_MOTU_SPEC_TX_RETURN_CHUNK) {
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
- if (flags & SND_MOTU_SPEC_SUPPORT_CLOCK_X4)
- pcm_chunks[2] += 2;
- }
-
- if (flags & SND_MOTU_SPEC_TX_REVERB_CHUNK) {
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
- }
- } else {
- if (flags & SND_MOTU_SPEC_RX_SEPARATED_MAIN) {
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
+ if (data & V3_ENABLE_OPT_IN_IFACE_A) {
+ if (data & V3_NO_ADAT_OPT_IN_IFACE_A) {
+ motu->tx_packet_formats.pcm_chunks[0] += 4;
+ motu->tx_packet_formats.pcm_chunks[1] += 4;
+ } else {
+ motu->tx_packet_formats.pcm_chunks[0] += 8;
+ motu->tx_packet_formats.pcm_chunks[1] += 4;
}
-
- // Packets to v3 units include 2 chunks for phone 1/2, except
- // for 176.4/192.0 kHz.
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
}
- if (flags & SND_MOTU_SPEC_HAS_AESEBU_IFACE) {
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
+ if (data & V3_ENABLE_OPT_IN_IFACE_B) {
+ if (data & V3_NO_ADAT_OPT_IN_IFACE_B) {
+ motu->tx_packet_formats.pcm_chunks[0] += 4;
+ motu->tx_packet_formats.pcm_chunks[1] += 4;
+ } else {
+ motu->tx_packet_formats.pcm_chunks[0] += 8;
+ motu->tx_packet_formats.pcm_chunks[1] += 4;
+ }
}
- /*
- * At least, packets have two data chunks for S/PDIF on coaxial
- * interface.
- */
- pcm_chunks[0] += 2;
- pcm_chunks[1] += 2;
-
- /*
- * Fixed part consists of PCM chunks multiple of 4, with msg chunks. As
- * a result, this part can includes empty data chunks.
- */
- formats->fixed_part_pcm_chunks[0] = round_up(2 + pcm_chunks[0], 4) - 2;
- formats->fixed_part_pcm_chunks[1] = round_up(2 + pcm_chunks[1], 4) - 2;
- if (flags & SND_MOTU_SPEC_SUPPORT_CLOCK_X4)
- formats->fixed_part_pcm_chunks[2] =
- round_up(2 + pcm_chunks[2], 4) - 2;
-}
-
-static void calculate_differed_part(struct snd_motu_packet_format *formats,
- enum snd_motu_spec_flags flags, u32 data,
- u32 a_enable_mask, u32 a_no_adat_mask,
- u32 b_enable_mask, u32 b_no_adat_mask)
-{
- unsigned char pcm_chunks[3] = {0, 0, 0};
- int i;
-
- if ((flags & SND_MOTU_SPEC_HAS_OPT_IFACE_A) && (data & a_enable_mask)) {
- if (data & a_no_adat_mask) {
- /*
- * Additional two data chunks for S/PDIF on optical
- * interface A. This includes empty data chunks.
- */
- pcm_chunks[0] += 4;
- pcm_chunks[1] += 4;
+ if (data & V3_ENABLE_OPT_OUT_IFACE_A) {
+ if (data & V3_NO_ADAT_OPT_OUT_IFACE_A) {
+ motu->rx_packet_formats.pcm_chunks[0] += 4;
+ motu->rx_packet_formats.pcm_chunks[1] += 4;
} else {
- /*
- * Additional data chunks for ADAT on optical interface
- * A.
- */
- pcm_chunks[0] += 8;
- pcm_chunks[1] += 4;
+ motu->rx_packet_formats.pcm_chunks[0] += 8;
+ motu->rx_packet_formats.pcm_chunks[1] += 4;
}
}
- if ((flags & SND_MOTU_SPEC_HAS_OPT_IFACE_B) && (data & b_enable_mask)) {
- if (data & b_no_adat_mask) {
- /*
- * Additional two data chunks for S/PDIF on optical
- * interface B. This includes empty data chunks.
- */
- pcm_chunks[0] += 4;
- pcm_chunks[1] += 4;
+ if (data & V3_ENABLE_OPT_OUT_IFACE_B) {
+ if (data & V3_NO_ADAT_OPT_OUT_IFACE_B) {
+ motu->rx_packet_formats.pcm_chunks[0] += 4;
+ motu->rx_packet_formats.pcm_chunks[1] += 4;
} else {
- /*
- * Additional data chunks for ADAT on optical interface
- * B.
- */
- pcm_chunks[0] += 8;
- pcm_chunks[1] += 4;
+ motu->rx_packet_formats.pcm_chunks[0] += 8;
+ motu->rx_packet_formats.pcm_chunks[1] += 4;
}
}
- for (i = 0; i < 3; ++i) {
- if (pcm_chunks[i] > 0)
- pcm_chunks[i] = round_up(pcm_chunks[i], 4);
-
- formats->differed_part_pcm_chunks[i] = pcm_chunks[i];
- }
+ return 0;
}
-static int v3_cache_packet_formats(struct snd_motu *motu)
+int snd_motu_protocol_v3_cache_packet_formats(struct snd_motu *motu)
{
__be32 reg;
u32 data;
int err;
+ motu->tx_packet_formats.pcm_byte_offset = 10;
+ motu->rx_packet_formats.pcm_byte_offset = 10;
+
+ motu->tx_packet_formats.msg_chunks = 2;
+ motu->rx_packet_formats.msg_chunks = 2;
+
err = snd_motu_transaction_read(motu, V3_OPT_IFACE_MODE_OFFSET, &reg,
sizeof(reg));
if (err < 0)
return err;
data = be32_to_cpu(reg);
- calculate_fixed_part(&motu->tx_packet_formats, AMDTP_IN_STREAM,
- motu->spec->flags, motu->spec->analog_in_ports);
- calculate_differed_part(&motu->tx_packet_formats,
- motu->spec->flags, data,
- V3_ENABLE_OPT_IN_IFACE_A, V3_NO_ADAT_OPT_IN_IFACE_A,
- V3_ENABLE_OPT_IN_IFACE_B, V3_NO_ADAT_OPT_IN_IFACE_B);
+ memcpy(motu->tx_packet_formats.pcm_chunks,
+ motu->spec->tx_fixed_pcm_chunks,
+ sizeof(motu->tx_packet_formats.pcm_chunks));
+ memcpy(motu->rx_packet_formats.pcm_chunks,
+ motu->spec->rx_fixed_pcm_chunks,
+ sizeof(motu->rx_packet_formats.pcm_chunks));
- calculate_fixed_part(&motu->rx_packet_formats, AMDTP_OUT_STREAM,
- motu->spec->flags, motu->spec->analog_out_ports);
- calculate_differed_part(&motu->rx_packet_formats,
- motu->spec->flags, data,
- V3_ENABLE_OPT_OUT_IFACE_A, V3_NO_ADAT_OPT_OUT_IFACE_A,
- V3_ENABLE_OPT_OUT_IFACE_B, V3_NO_ADAT_OPT_OUT_IFACE_B);
+ if (motu->spec == &snd_motu_spec_828mk3)
+ return detect_packet_formats_828mk3(motu, data);
+ else
+ return 0;
+}
- motu->tx_packet_formats.pcm_byte_offset = 10;
- motu->rx_packet_formats.pcm_byte_offset = 10;
- return 0;
-}
+const struct snd_motu_spec snd_motu_spec_828mk3 = {
+ .name = "828mk3",
+ .protocol_version = SND_MOTU_PROTOCOL_V3,
+ .flags = SND_MOTU_SPEC_RX_MIDI_3RD_Q |
+ SND_MOTU_SPEC_TX_MIDI_3RD_Q,
+ .tx_fixed_pcm_chunks = {18, 18, 14},
+ .rx_fixed_pcm_chunks = {14, 14, 10},
+};
+
+const struct snd_motu_spec snd_motu_spec_ultralite_mk3 = {
+ .name = "UltraLiteMk3",
+ .protocol_version = SND_MOTU_PROTOCOL_V3,
+ .flags = SND_MOTU_SPEC_RX_MIDI_3RD_Q |
+ SND_MOTU_SPEC_TX_MIDI_3RD_Q,
+ .tx_fixed_pcm_chunks = {18, 14, 10},
+ .rx_fixed_pcm_chunks = {14, 14, 14},
+};
+
+const struct snd_motu_spec snd_motu_spec_audio_express = {
+ .name = "AudioExpress",
+ .protocol_version = SND_MOTU_PROTOCOL_V3,
+ .flags = SND_MOTU_SPEC_RX_MIDI_2ND_Q |
+ SND_MOTU_SPEC_TX_MIDI_3RD_Q,
+ .tx_fixed_pcm_chunks = {10, 10, 0},
+ .rx_fixed_pcm_chunks = {10, 10, 0},
+};
-const struct snd_motu_protocol snd_motu_protocol_v3 = {
- .get_clock_rate = v3_get_clock_rate,
- .set_clock_rate = v3_set_clock_rate,
- .get_clock_source = v3_get_clock_source,
- .switch_fetching_mode = v3_switch_fetching_mode,
- .cache_packet_formats = v3_cache_packet_formats,
+const struct snd_motu_spec snd_motu_spec_4pre = {
+ .name = "4pre",
+ .protocol_version = SND_MOTU_PROTOCOL_V3,
+ .tx_fixed_pcm_chunks = {10, 10, 0},
+ .rx_fixed_pcm_chunks = {10, 10, 0},
};
diff --git a/sound/firewire/motu/motu-stream.c b/sound/firewire/motu/motu-stream.c
index a17ddceb1bec..2028c5419f6f 100644
--- a/sound/firewire/motu/motu-stream.c
+++ b/sound/firewire/motu/motu-stream.c
@@ -88,7 +88,7 @@ static void finish_session(struct snd_motu *motu)
u32 data;
int err;
- err = motu->spec->protocol->switch_fetching_mode(motu, false);
+ err = snd_motu_protocol_switch_fetching_mode(motu, false);
if (err < 0)
return;
@@ -110,7 +110,7 @@ int snd_motu_stream_cache_packet_formats(struct snd_motu *motu)
{
int err;
- err = motu->spec->protocol->cache_packet_formats(motu);
+ err = snd_motu_protocol_cache_packet_formats(motu);
if (err < 0)
return err;
@@ -140,7 +140,7 @@ int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate,
unsigned int curr_rate;
int err;
- err = motu->spec->protocol->get_clock_rate(motu, &curr_rate);
+ err = snd_motu_protocol_get_clock_rate(motu, &curr_rate);
if (err < 0)
return err;
if (rate == 0)
@@ -153,7 +153,7 @@ int snd_motu_stream_reserve_duplex(struct snd_motu *motu, unsigned int rate,
fw_iso_resources_free(&motu->tx_resources);
fw_iso_resources_free(&motu->rx_resources);
- err = motu->spec->protocol->set_clock_rate(motu, rate);
+ err = snd_motu_protocol_set_clock_rate(motu, rate);
if (err < 0) {
dev_err(&motu->unit->device,
"fail to set sampling rate: %d\n", err);
@@ -201,9 +201,9 @@ static int ensure_packet_formats(struct snd_motu *motu)
data &= ~(TX_PACKET_EXCLUDE_DIFFERED_DATA_CHUNKS |
RX_PACKET_EXCLUDE_DIFFERED_DATA_CHUNKS|
TX_PACKET_TRANSMISSION_SPEED_MASK);
- if (motu->tx_packet_formats.differed_part_pcm_chunks[0] == 0)
+ if (motu->spec->tx_fixed_pcm_chunks[0] == motu->tx_packet_formats.pcm_chunks[0])
data |= TX_PACKET_EXCLUDE_DIFFERED_DATA_CHUNKS;
- if (motu->rx_packet_formats.differed_part_pcm_chunks[0] == 0)
+ if (motu->spec->rx_fixed_pcm_chunks[0] == motu->rx_packet_formats.pcm_chunks[0])
data |= RX_PACKET_EXCLUDE_DIFFERED_DATA_CHUNKS;
data |= fw_parent_device(motu->unit)->max_speed;
@@ -272,7 +272,7 @@ int snd_motu_stream_start_duplex(struct snd_motu *motu)
goto stop_streams;
}
- err = motu->spec->protocol->switch_fetching_mode(motu, true);
+ err = snd_motu_protocol_switch_fetching_mode(motu, true);
if (err < 0) {
dev_err(&motu->unit->device,
"fail to enable frame fetching: %d\n", err);
@@ -317,7 +317,7 @@ static int init_stream(struct snd_motu *motu, struct amdtp_stream *s)
if (err < 0)
return err;
- err = amdtp_motu_init(s, motu->unit, dir, motu->spec->protocol);
+ err = amdtp_motu_init(s, motu->unit, dir, motu->spec);
if (err < 0)
fw_iso_resources_destroy(resources);
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index f2080d720aa9..a4929c1302dc 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -172,105 +172,6 @@ static void motu_bus_update(struct fw_unit *unit)
snd_motu_transaction_reregister(motu);
}
-const struct snd_motu_spec snd_motu_spec_828mk2 = {
- .name = "828mk2",
- .protocol = &snd_motu_protocol_v2,
- .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
- SND_MOTU_SPEC_TX_MICINST_CHUNK |
- SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_RX_SEPARATED_MAIN |
- SND_MOTU_SPEC_HAS_OPT_IFACE_A |
- SND_MOTU_SPEC_RX_MIDI_2ND_Q |
- SND_MOTU_SPEC_TX_MIDI_2ND_Q,
-
- .analog_in_ports = 8,
- .analog_out_ports = 8,
-};
-
-static const struct snd_motu_spec motu_traveler = {
- .name = "Traveler",
- .protocol = &snd_motu_protocol_v2,
- .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
- SND_MOTU_SPEC_SUPPORT_CLOCK_X4 |
- SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_HAS_AESEBU_IFACE |
- SND_MOTU_SPEC_HAS_OPT_IFACE_A |
- SND_MOTU_SPEC_RX_MIDI_2ND_Q |
- SND_MOTU_SPEC_TX_MIDI_2ND_Q,
-
- .analog_in_ports = 8,
- .analog_out_ports = 8,
-};
-
-static const struct snd_motu_spec motu_ultralite = {
- .name = "UltraLite",
- .protocol = &snd_motu_protocol_v2,
- .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
- SND_MOTU_SPEC_TX_MICINST_CHUNK | // padding.
- SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_RX_MIDI_2ND_Q |
- SND_MOTU_SPEC_TX_MIDI_2ND_Q |
- SND_MOTU_SPEC_RX_SEPARATED_MAIN,
- .analog_in_ports = 8,
- .analog_out_ports = 8,
-};
-
-static const struct snd_motu_spec motu_8pre = {
- .name = "8pre",
- .protocol = &snd_motu_protocol_v2,
- // In tx, use coax chunks for mix-return 1/2. In rx, use coax chunks for
- // dummy 1/2.
- .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
- SND_MOTU_SPEC_HAS_OPT_IFACE_A |
- SND_MOTU_SPEC_HAS_OPT_IFACE_B |
- SND_MOTU_SPEC_RX_MIDI_2ND_Q |
- SND_MOTU_SPEC_TX_MIDI_2ND_Q,
- .analog_in_ports = 8,
- .analog_out_ports = 2,
-};
-
-static const struct snd_motu_spec motu_828mk3 = {
- .name = "828mk3",
- .protocol = &snd_motu_protocol_v3,
- .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
- SND_MOTU_SPEC_SUPPORT_CLOCK_X4 |
- SND_MOTU_SPEC_TX_MICINST_CHUNK |
- SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_TX_REVERB_CHUNK |
- SND_MOTU_SPEC_RX_SEPARATED_MAIN |
- SND_MOTU_SPEC_HAS_OPT_IFACE_A |
- SND_MOTU_SPEC_HAS_OPT_IFACE_B |
- SND_MOTU_SPEC_RX_MIDI_3RD_Q |
- SND_MOTU_SPEC_TX_MIDI_3RD_Q,
-
- .analog_in_ports = 8,
- .analog_out_ports = 8,
-};
-
-static const struct snd_motu_spec motu_audio_express = {
- .name = "AudioExpress",
- .protocol = &snd_motu_protocol_v3,
- .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
- SND_MOTU_SPEC_TX_MICINST_CHUNK |
- SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_RX_SEPARATED_MAIN |
- SND_MOTU_SPEC_RX_MIDI_2ND_Q |
- SND_MOTU_SPEC_TX_MIDI_3RD_Q,
- .analog_in_ports = 2,
- .analog_out_ports = 4,
-};
-
-static const struct snd_motu_spec motu_4pre = {
- .name = "4pre",
- .protocol = &snd_motu_protocol_v3,
- .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
- SND_MOTU_SPEC_TX_MICINST_CHUNK |
- SND_MOTU_SPEC_TX_RETURN_CHUNK |
- SND_MOTU_SPEC_RX_SEPARATED_MAIN,
- .analog_in_ports = 2,
- .analog_out_ports = 2,
-};
-
#define SND_MOTU_DEV_ENTRY(model, data) \
{ \
.match_flags = IEEE1394_MATCH_VENDOR_ID | \
@@ -284,13 +185,14 @@ static const struct snd_motu_spec motu_4pre = {
static const struct ieee1394_device_id motu_id_table[] = {
SND_MOTU_DEV_ENTRY(0x000003, &snd_motu_spec_828mk2),
- SND_MOTU_DEV_ENTRY(0x000009, &motu_traveler),
- SND_MOTU_DEV_ENTRY(0x00000d, &motu_ultralite),
- SND_MOTU_DEV_ENTRY(0x00000f, &motu_8pre),
- SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */
- SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */
- SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
- SND_MOTU_DEV_ENTRY(0x000045, &motu_4pre),
+ SND_MOTU_DEV_ENTRY(0x000009, &snd_motu_spec_traveler),
+ SND_MOTU_DEV_ENTRY(0x00000d, &snd_motu_spec_ultralite),
+ SND_MOTU_DEV_ENTRY(0x00000f, &snd_motu_spec_8pre),
+ SND_MOTU_DEV_ENTRY(0x000015, &snd_motu_spec_828mk3), // FireWire only.
+ SND_MOTU_DEV_ENTRY(0x000019, &snd_motu_spec_ultralite_mk3), // FireWire only.
+ SND_MOTU_DEV_ENTRY(0x000035, &snd_motu_spec_828mk3), // Hybrid.
+ SND_MOTU_DEV_ENTRY(0x000033, &snd_motu_spec_audio_express),
+ SND_MOTU_DEV_ENTRY(0x000045, &snd_motu_spec_4pre),
{ }
};
MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
diff --git a/sound/firewire/motu/motu.h b/sound/firewire/motu/motu.h
index 6efbde405a0d..3d0236ee6716 100644
--- a/sound/firewire/motu/motu.h
+++ b/sound/firewire/motu/motu.h
@@ -36,8 +36,7 @@ struct snd_motu_packet_format {
unsigned char pcm_byte_offset;
unsigned char msg_chunks;
- unsigned char fixed_part_pcm_chunks[3];
- unsigned char differed_part_pcm_chunks[3];
+ unsigned char pcm_chunks[3];
};
struct snd_motu {
@@ -74,19 +73,10 @@ struct snd_motu {
};
enum snd_motu_spec_flags {
- SND_MOTU_SPEC_SUPPORT_CLOCK_X2 = 0x0001,
- SND_MOTU_SPEC_SUPPORT_CLOCK_X4 = 0x0002,
- SND_MOTU_SPEC_TX_MICINST_CHUNK = 0x0004,
- SND_MOTU_SPEC_TX_RETURN_CHUNK = 0x0008,
- SND_MOTU_SPEC_TX_REVERB_CHUNK = 0x0010,
- SND_MOTU_SPEC_HAS_AESEBU_IFACE = 0x0020,
- SND_MOTU_SPEC_HAS_OPT_IFACE_A = 0x0040,
- SND_MOTU_SPEC_HAS_OPT_IFACE_B = 0x0080,
- SND_MOTU_SPEC_RX_MIDI_2ND_Q = 0x0100,
- SND_MOTU_SPEC_RX_MIDI_3RD_Q = 0x0200,
- SND_MOTU_SPEC_TX_MIDI_2ND_Q = 0x0400,
- SND_MOTU_SPEC_TX_MIDI_3RD_Q = 0x0800,
- SND_MOTU_SPEC_RX_SEPARATED_MAIN = 0x1000,
+ SND_MOTU_SPEC_RX_MIDI_2ND_Q = 0x0001,
+ SND_MOTU_SPEC_RX_MIDI_3RD_Q = 0x0002,
+ SND_MOTU_SPEC_TX_MIDI_2ND_Q = 0x0004,
+ SND_MOTU_SPEC_TX_MIDI_3RD_Q = 0x0008,
};
#define SND_MOTU_CLOCK_RATE_COUNT 6
@@ -108,33 +98,33 @@ enum snd_motu_clock_source {
SND_MOTU_CLOCK_SOURCE_UNKNOWN,
};
-struct snd_motu_protocol {
- int (*get_clock_rate)(struct snd_motu *motu, unsigned int *rate);
- int (*set_clock_rate)(struct snd_motu *motu, unsigned int rate);
- int (*get_clock_source)(struct snd_motu *motu,
- enum snd_motu_clock_source *source);
- int (*switch_fetching_mode)(struct snd_motu *motu, bool enable);
- int (*cache_packet_formats)(struct snd_motu *motu);
+enum snd_motu_protocol_version {
+ SND_MOTU_PROTOCOL_V2,
+ SND_MOTU_PROTOCOL_V3,
};
struct snd_motu_spec {
const char *const name;
+ enum snd_motu_protocol_version protocol_version;
enum snd_motu_spec_flags flags;
- unsigned char analog_in_ports;
- unsigned char analog_out_ports;
-
- const struct snd_motu_protocol *const protocol;
+ unsigned char tx_fixed_pcm_chunks[3];
+ unsigned char rx_fixed_pcm_chunks[3];
};
-extern const struct snd_motu_protocol snd_motu_protocol_v2;
-extern const struct snd_motu_protocol snd_motu_protocol_v3;
-
extern const struct snd_motu_spec snd_motu_spec_828mk2;
+extern const struct snd_motu_spec snd_motu_spec_traveler;
+extern const struct snd_motu_spec snd_motu_spec_ultralite;
+extern const struct snd_motu_spec snd_motu_spec_8pre;
+
+extern const struct snd_motu_spec snd_motu_spec_828mk3;
+extern const struct snd_motu_spec snd_motu_spec_ultralite_mk3;
+extern const struct snd_motu_spec snd_motu_spec_audio_express;
+extern const struct snd_motu_spec snd_motu_spec_4pre;
int amdtp_motu_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir,
- const struct snd_motu_protocol *const protocol);
+ const struct snd_motu_spec *spec);
int amdtp_motu_set_parameters(struct amdtp_stream *s, unsigned int rate,
unsigned int midi_ports,
struct snd_motu_packet_format *formats);
@@ -169,4 +159,79 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu);
int snd_motu_create_midi_devices(struct snd_motu *motu);
int snd_motu_create_hwdep_device(struct snd_motu *motu);
+
+int snd_motu_protocol_v2_get_clock_rate(struct snd_motu *motu,
+ unsigned int *rate);
+int snd_motu_protocol_v2_set_clock_rate(struct snd_motu *motu,
+ unsigned int rate);
+int snd_motu_protocol_v2_get_clock_source(struct snd_motu *motu,
+ enum snd_motu_clock_source *src);
+int snd_motu_protocol_v2_switch_fetching_mode(struct snd_motu *motu,
+ bool enable);
+int snd_motu_protocol_v2_cache_packet_formats(struct snd_motu *motu);
+
+int snd_motu_protocol_v3_get_clock_rate(struct snd_motu *motu,
+ unsigned int *rate);
+int snd_motu_protocol_v3_set_clock_rate(struct snd_motu *motu,
+ unsigned int rate);
+int snd_motu_protocol_v3_get_clock_source(struct snd_motu *motu,
+ enum snd_motu_clock_source *src);
+int snd_motu_protocol_v3_switch_fetching_mode(struct snd_motu *motu,
+ bool enable);
+int snd_motu_protocol_v3_cache_packet_formats(struct snd_motu *motu);
+
+static inline int snd_motu_protocol_get_clock_rate(struct snd_motu *motu,
+ unsigned int *rate)
+{
+ if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V2)
+ return snd_motu_protocol_v2_get_clock_rate(motu, rate);
+ else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V3)
+ return snd_motu_protocol_v3_get_clock_rate(motu, rate);
+ else
+ return -ENXIO;
+}
+
+static inline int snd_motu_protocol_set_clock_rate(struct snd_motu *motu,
+ unsigned int rate)
+{
+ if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V2)
+ return snd_motu_protocol_v2_set_clock_rate(motu, rate);
+ else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V3)
+ return snd_motu_protocol_v3_set_clock_rate(motu, rate);
+ else
+ return -ENXIO;
+}
+
+static inline int snd_motu_protocol_get_clock_source(struct snd_motu *motu,
+ enum snd_motu_clock_source *source)
+{
+ if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V2)
+ return snd_motu_protocol_v2_get_clock_source(motu, source);
+ else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V3)
+ return snd_motu_protocol_v3_get_clock_source(motu, source);
+ else
+ return -ENXIO;
+}
+
+static inline int snd_motu_protocol_switch_fetching_mode(struct snd_motu *motu,
+ bool enable)
+{
+ if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V2)
+ return snd_motu_protocol_v2_switch_fetching_mode(motu, enable);
+ else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V3)
+ return snd_motu_protocol_v3_switch_fetching_mode(motu, enable);
+ else
+ return -ENXIO;
+}
+
+static inline int snd_motu_protocol_cache_packet_formats(struct snd_motu *motu)
+{
+ if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V2)
+ return snd_motu_protocol_v2_cache_packet_formats(motu);
+ else if (motu->spec->protocol_version == SND_MOTU_PROTOCOL_V3)
+ return snd_motu_protocol_v3_cache_packet_formats(motu);
+ else
+ return -ENXIO;
+}
+
#endif
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 73bfa71845f6..d0a604c939df 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_exit);
static void default_release(struct device *dev)
{
- snd_hdac_ext_bus_device_exit(container_of(dev, struct hdac_device, dev));
+ snd_hdac_ext_bus_device_exit(dev_to_hdac_dev(dev));
}
/**
diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
index 3fe62be1cbcc..09ddab5f5cae 100644
--- a/sound/hda/hdac_bus.c
+++ b/sound/hda/hdac_bus.c
@@ -81,7 +81,6 @@ int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr,
mutex_unlock(&bus->cmd_mutex);
return err;
}
-EXPORT_SYMBOL_GPL(snd_hdac_bus_exec_verb);
/**
* snd_hdac_bus_exec_verb_unlocked - unlocked version
@@ -150,7 +149,6 @@ void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex)
schedule_work(&bus->unsol_work);
}
-EXPORT_SYMBOL_GPL(snd_hdac_bus_queue_event);
/*
* process queued unsolicited events
@@ -162,6 +160,7 @@ static void snd_hdac_bus_process_unsol_events(struct work_struct *work)
struct hdac_driver *drv;
unsigned int rp, caddr, res;
+ spin_lock_irq(&bus->reg_lock);
while (bus->unsol_rp != bus->unsol_wp) {
rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE;
bus->unsol_rp = rp;
@@ -173,10 +172,13 @@ static void snd_hdac_bus_process_unsol_events(struct work_struct *work)
codec = bus->caddr_tbl[caddr & 0x0f];
if (!codec || !codec->dev.driver)
continue;
+ spin_unlock_irq(&bus->reg_lock);
drv = drv_to_hdac_driver(codec->dev.driver);
if (drv->unsol_event)
drv->unsol_event(codec, res);
+ spin_lock_irq(&bus->reg_lock);
}
+ spin_unlock_irq(&bus->reg_lock);
}
/**
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index bc4a8b606020..011b17cc1efa 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -9,6 +9,7 @@
#include <sound/core.h>
#include <sound/hdaudio.h>
#include <sound/hda_register.h>
+#include "local.h"
/* clear CORB read pointer properly */
static void azx_clear_corbrp(struct hdac_bus *bus)
@@ -527,6 +528,18 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
}
bus->chip_init = true;
+
+ /*
+ * Default value of '8' is as per the HD audio specification (Rev 1.0a).
+ * Following relation is used to derive STRIPE control value.
+ * For sample rate <= 48K:
+ * { ((num_channels * bits_per_sample) / number of SDOs) >= 8 }
+ * For sample rate > 48K:
+ * { ((num_channels * bits_per_sample * rate/48000) /
+ * number of SDOs) >= 8 }
+ */
+ bus->sdo_limit = 8;
+
return true;
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_init_chip);
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index e3119f5cb0d5..333220f0f8af 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -20,7 +20,7 @@ static int get_codec_vendor_name(struct hdac_device *codec);
static void default_release(struct device *dev)
{
- snd_hdac_device_exit(container_of(dev, struct hdac_device, dev));
+ snd_hdac_device_exit(dev_to_hdac_dev(dev));
}
/**
diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
index a314b03b4a4c..a38a2af1654f 100644
--- a/sound/hda/hdac_stream.c
+++ b/sound/hda/hdac_stream.c
@@ -38,7 +38,7 @@ int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus,
else
value = (channels * bits_per_sample) / sdo_line;
- if (value >= 8)
+ if (value >= bus->sdo_limit)
break;
}
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index be1df80ed013..20b8f6cb3ff8 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Jaroslav Kysela <perex@perex.cz>
+#include <linux/acpi.h>
#include <linux/bits.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_intel.h>
#include <sound/core.h>
#include <sound/intel-dsp-config.h>
#include <sound/intel-nhlt.h>
@@ -14,9 +17,14 @@ static int dsp_driver;
module_param(dsp_driver, int, 0444);
MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF)");
-#define FLAG_SST BIT(0)
-#define FLAG_SOF BIT(1)
-#define FLAG_SOF_ONLY_IF_DMIC BIT(16)
+#define FLAG_SST BIT(0)
+#define FLAG_SOF BIT(1)
+#define FLAG_SST_ONLY_IF_DMIC BIT(15)
+#define FLAG_SOF_ONLY_IF_DMIC BIT(16)
+#define FLAG_SOF_ONLY_IF_SOUNDWIRE BIT(17)
+
+#define FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE (FLAG_SOF_ONLY_IF_DMIC | \
+ FLAG_SOF_ONLY_IF_SOUNDWIRE)
struct config_entry {
u32 flags;
@@ -100,6 +108,10 @@ static const struct config_entry config_table[] = {
{}
}
},
+ {
+ .flags = FLAG_SST | FLAG_SST_ONLY_IF_DMIC,
+ .device = 0x9d70,
+ },
#endif
/* Kabylake-LP */
#if IS_ENABLED(CONFIG_SND_SOC_INTEL_KBL)
@@ -116,6 +128,10 @@ static const struct config_entry config_table[] = {
{}
}
},
+ {
+ .flags = FLAG_SST | FLAG_SST_ONLY_IF_DMIC,
+ .device = 0x9d71,
+ },
#endif
/*
@@ -166,7 +182,7 @@ static const struct config_entry config_table[] = {
}
},
{
- .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x9dc8,
},
#endif
@@ -187,7 +203,7 @@ static const struct config_entry config_table[] = {
}
},
{
- .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0xa348,
},
#endif
@@ -204,18 +220,50 @@ static const struct config_entry config_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
}
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "09C6")
+ },
+ },
+ {
+ /* early version of SKU 09C6 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0983")
+ },
+ },
{}
}
},
{
- .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x02c8,
},
#endif
/* Cometlake-H */
#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
{
- .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .flags = FLAG_SOF,
+ .device = 0x06c8,
+ .dmi_table = (const struct dmi_system_id []) {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "098F"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0990"),
+ },
+ },
+ {}
+ }
+ },
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x06c8,
},
#endif
@@ -236,7 +284,7 @@ static const struct config_entry config_table[] = {
}
},
{
- .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x34c8,
},
#endif
@@ -256,9 +304,8 @@ static const struct config_entry config_table[] = {
{}
}
},
-
{
- .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC,
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0xa0c8,
},
#endif
@@ -303,6 +350,28 @@ static int snd_intel_dsp_check_dmic(struct pci_dev *pci)
return ret;
}
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE)
+static int snd_intel_dsp_check_soundwire(struct pci_dev *pci)
+{
+ struct sdw_intel_acpi_info info;
+ acpi_handle handle;
+ int ret;
+
+ handle = ACPI_HANDLE(&pci->dev);
+
+ ret = sdw_intel_acpi_scan(handle, &info);
+ if (ret < 0)
+ return ret;
+
+ return info.link_mask;
+}
+#else
+static int snd_intel_dsp_check_soundwire(struct pci_dev *pci)
+{
+ return 0;
+}
+#endif
+
int snd_intel_dsp_driver_probe(struct pci_dev *pci)
{
const struct config_entry *cfg;
@@ -336,22 +405,36 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci)
return SND_INTEL_DSP_DRIVER_ANY;
if (cfg->flags & FLAG_SOF) {
- if (cfg->flags & FLAG_SOF_ONLY_IF_DMIC) {
+ if (cfg->flags & FLAG_SOF_ONLY_IF_SOUNDWIRE &&
+ snd_intel_dsp_check_soundwire(pci) > 0) {
+ dev_info(&pci->dev, "SoundWire enabled on CannonLake+ platform, using SOF driver\n");
+ return SND_INTEL_DSP_DRIVER_SOF;
+ }
+ if (cfg->flags & FLAG_SOF_ONLY_IF_DMIC &&
+ snd_intel_dsp_check_dmic(pci)) {
+ dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SOF driver\n");
+ return SND_INTEL_DSP_DRIVER_SOF;
+ }
+ if (!(cfg->flags & FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE))
+ return SND_INTEL_DSP_DRIVER_SOF;
+ }
+
+
+ if (cfg->flags & FLAG_SST) {
+ if (cfg->flags & FLAG_SST_ONLY_IF_DMIC) {
if (snd_intel_dsp_check_dmic(pci)) {
- dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SOF driver\n");
- return SND_INTEL_DSP_DRIVER_SOF;
+ dev_info(&pci->dev, "Digital mics found on Skylake+ platform, using SST driver\n");
+ return SND_INTEL_DSP_DRIVER_SST;
}
} else {
- return SND_INTEL_DSP_DRIVER_SOF;
+ return SND_INTEL_DSP_DRIVER_SST;
}
}
- if (cfg->flags & FLAG_SST)
- return SND_INTEL_DSP_DRIVER_SST;
-
return SND_INTEL_DSP_DRIVER_LEGACY;
}
EXPORT_SYMBOL_GPL(snd_intel_dsp_driver_probe);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel DSP config driver");
+MODULE_IMPORT_NS(SOUNDWIRE_INTEL_INIT);
diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
index 99a23fe7fab9..059aaf04f536 100644
--- a/sound/hda/intel-nhlt.c
+++ b/sound/hda/intel-nhlt.c
@@ -1,61 +1,28 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2015-2019 Intel Corporation
#include <linux/acpi.h>
#include <sound/intel-nhlt.h>
-#define NHLT_ACPI_HEADER_SIG "NHLT"
-
-/* Unique identification for getting NHLT blobs */
-static const guid_t osc_guid =
- GUID_INIT(0xA69F886E, 0x6CEB, 0x4594,
- 0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53);
-
struct nhlt_acpi_table *intel_nhlt_init(struct device *dev)
{
- acpi_handle handle;
- union acpi_object *obj;
- struct nhlt_resource_desc *nhlt_ptr;
- struct nhlt_acpi_table *nhlt_table = NULL;
-
- handle = ACPI_HANDLE(dev);
- if (!handle) {
- dev_err(dev, "Didn't find ACPI_HANDLE\n");
- return NULL;
- }
+ struct nhlt_acpi_table *nhlt;
+ acpi_status status;
- obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
-
- if (!obj)
- return NULL;
-
- if (obj->type != ACPI_TYPE_BUFFER) {
- dev_dbg(dev, "No NHLT table found\n");
- ACPI_FREE(obj);
+ status = acpi_get_table(ACPI_SIG_NHLT, 0,
+ (struct acpi_table_header **)&nhlt);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(dev, "NHLT table not found\n");
return NULL;
}
- nhlt_ptr = (struct nhlt_resource_desc *)obj->buffer.pointer;
- if (nhlt_ptr->length)
- nhlt_table = (struct nhlt_acpi_table *)
- memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
- MEMREMAP_WB);
- ACPI_FREE(obj);
- if (nhlt_table &&
- (strncmp(nhlt_table->header.signature,
- NHLT_ACPI_HEADER_SIG,
- strlen(NHLT_ACPI_HEADER_SIG)) != 0)) {
- memunmap(nhlt_table);
- dev_err(dev, "NHLT ACPI header signature incorrect\n");
- return NULL;
- }
- return nhlt_table;
+ return nhlt;
}
EXPORT_SYMBOL_GPL(intel_nhlt_init);
void intel_nhlt_free(struct nhlt_acpi_table *nhlt)
{
- memunmap((void *)nhlt);
+ acpi_put_table((struct acpi_table_header *)nhlt);
}
EXPORT_SYMBOL_GPL(intel_nhlt_free);
diff --git a/sound/hda/local.h b/sound/hda/local.h
index 5b935219352f..896ba142e8bc 100644
--- a/sound/hda/local.h
+++ b/sound/hda/local.h
@@ -36,6 +36,9 @@ void hda_widget_sysfs_exit(struct hdac_device *codec);
int snd_hdac_bus_add_device(struct hdac_bus *bus, struct hdac_device *codec);
void snd_hdac_bus_remove_device(struct hdac_bus *bus,
struct hdac_device *codec);
+void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex);
+int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr,
+ unsigned int cmd, unsigned int *res);
int snd_hdac_exec_verb(struct hdac_device *codec, unsigned int cmd,
unsigned int flags, unsigned int *res);
diff --git a/sound/isa/ad1816a/ad1816a.c b/sound/isa/ad1816a/ad1816a.c
index ce4c8ba2fa98..ca18fe3ff8a5 100644
--- a/sound/isa/ad1816a/ad1816a.c
+++ b/sound/isa/ad1816a/ad1816a.c
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(clockfreq, "Clock frequency for ad1816a driver (default = 0).")
static const struct pnp_card_device_id snd_ad1816a_pnpids[] = {
/* Analog Devices AD1815 */
{ .id = "ADS7150", .devs = { { .id = "ADS7150" }, { .id = "ADS7151" } } },
- /* Analog Device AD1816? */
+ /* Analog Devices AD1816? */
{ .id = "ADS7180", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } },
/* Analog Devices AD1816A - added by Kenneth Platz <kxp@atl.hp.com> */
{ .id = "ADS7181", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } },
diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c
index ff3a05ad99c0..64610571a5e1 100644
--- a/sound/isa/es1688/es1688.c
+++ b/sound/isa/es1688/es1688.c
@@ -267,8 +267,10 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard,
return error;
}
error = snd_es1688_probe(card, dev);
- if (error < 0)
+ if (error < 0) {
+ snd_card_free(card);
return error;
+ }
pnp_set_card_drvdata(pcard, card);
snd_es968_pnp_is_probed = 1;
return 0;
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index c5b1d5900eed..d6420d224d09 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -1171,7 +1171,10 @@ wavefront_send_alias (snd_wavefront_t *dev, wavefront_patch_info *header)
"alias for %d\n",
header->number,
header->hdr.a.OriginalSample);
-
+
+ if (header->number >= WF_MAX_SAMPLE)
+ return -EINVAL;
+
munge_int32 (header->number, &alias_hdr[0], 2);
munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2);
munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset),
@@ -1202,6 +1205,9 @@ wavefront_send_multisample (snd_wavefront_t *dev, wavefront_patch_info *header)
int num_samples;
unsigned char *msample_hdr;
+ if (header->number >= WF_MAX_SAMPLE)
+ return -EINVAL;
+
msample_hdr = kmalloc(WF_MSAMPLE_BYTES, GFP_KERNEL);
if (! msample_hdr)
return -ENOMEM;
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index ebf926728c5f..45ef0f52ec55 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -1356,7 +1356,7 @@ static int patch_cx20551(struct snd_ac97 *ac97)
}
/*
- * Analog Device AD18xx, AD19xx codecs
+ * Analog Devices AD18xx, AD19xx codecs
*/
#ifdef CONFIG_PM
static void ad18xx_resume(struct snd_ac97 *ac97)
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index ddb7c2ce3f7c..def8161cde4c 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -1040,7 +1040,7 @@ static void snd_emu10k1x_proc_reg_write(struct snd_info_entry *entry,
if (sscanf(line, "%x %x %x", &reg, &channel_id, &val) != 3)
continue;
- if (reg < 0x49 && val <= 0xffffffff && channel_id <= 2)
+ if (reg < 0x49 && channel_id <= 2)
snd_emu10k1x_ptr_write(emu, reg, channel_id, val);
}
}
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index e1d3082a4fe9..7ba542e45a3d 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -99,10 +99,10 @@ comment "Set to Y if you want auto-loading the codec driver"
depends on SND_HDA=y && SND_HDA_CODEC_REALTEK=m
config SND_HDA_CODEC_ANALOG
- tristate "Build Analog Device HD-audio codec support"
+ tristate "Build Analog Devices HD-audio codec support"
select SND_HDA_GENERIC
help
- Say Y or M here to include Analog Device HD-audio codec support in
+ Say Y or M here to include Analog Devices HD-audio codec support in
snd-hda-intel driver, such as AD1986A.
comment "Set to Y if you want auto-loading the codec driver"
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 0310193ea1bd..d20aedd103c6 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -36,10 +36,10 @@
#include <linux/time.h>
#include <linux/completion.h>
#include <linux/acpi.h>
+#include <linux/pgtable.h>
#ifdef CONFIG_X86
/* for snoop control */
-#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/cpufeature.h>
#endif
@@ -2662,6 +2662,9 @@ static const struct pci_device_id azx_ids[] = {
{ PCI_DEVICE(0x1002, 0xab20),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
AZX_DCAPS_PM_RUNTIME },
+ { PCI_DEVICE(0x1002, 0xab28),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+ AZX_DCAPS_PM_RUNTIME },
{ PCI_DEVICE(0x1002, 0xab38),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
AZX_DCAPS_PM_RUNTIME },
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 773992a07efa..0cc5fad1af8a 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -52,10 +52,21 @@
#define HDA_IPFS_INTR_MASK 0x188
#define HDA_IPFS_EN_INTR (1 << 16)
+/* FPCI */
+#define FPCI_DBG_CFG_2 0x10F4
+#define FPCI_GCAP_NSDO_SHIFT 18
+#define FPCI_GCAP_NSDO_MASK (0x3 << FPCI_GCAP_NSDO_SHIFT)
+
/* max number of SDs */
#define NUM_CAPTURE_SD 1
#define NUM_PLAYBACK_SD 1
+/*
+ * Tegra194 does not reflect correct number of SDO lines. Below macro
+ * is used to update the GCAP register to workaround the issue.
+ */
+#define TEGRA194_NUM_SDO_LINES 4
+
struct hda_tegra {
struct azx chip;
struct device *dev;
@@ -275,6 +286,7 @@ static int hda_tegra_init_clk(struct hda_tegra *hda)
static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
{
+ struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
struct hdac_bus *bus = azx_bus(chip);
struct snd_card *card = chip->card;
int err;
@@ -298,6 +310,26 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
bus->irq = irq_id;
card->sync_irq = bus->irq;
+ /*
+ * Tegra194 has 4 SDO lines and the STRIPE can be used to
+ * indicate how many of the SDO lines the stream should be
+ * striped. But GCAP register does not reflect the true
+ * capability of HW. Below workaround helps to fix this.
+ *
+ * GCAP_NSDO is bits 19:18 in T_AZA_DBG_CFG_2,
+ * 0 for 1 SDO, 1 for 2 SDO, 2 for 4 SDO lines.
+ */
+ if (of_device_is_compatible(np, "nvidia,tegra194-hda")) {
+ u32 val;
+
+ dev_info(card->dev, "Override SDO lines to %u\n",
+ TEGRA194_NUM_SDO_LINES);
+
+ val = readl(hda->regs + FPCI_DBG_CFG_2) & ~FPCI_GCAP_NSDO_MASK;
+ val |= (TEGRA194_NUM_SDO_LINES >> 1) << FPCI_GCAP_NSDO_SHIFT;
+ writel(val, hda->regs + FPCI_DBG_CFG_2);
+ }
+
gcap = azx_readw(chip, GCAP);
dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap);
@@ -332,6 +364,23 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
/* initialize chip */
azx_init_chip(chip, 1);
+ /*
+ * Playback (for 44.1K/48K, 2-channel, 16-bps) fails with
+ * 4 SDO lines due to legacy design limitation. Following
+ * is, from HD Audio Specification (Revision 1.0a), used to
+ * control striping of the stream across multiple SDO lines
+ * for sample rates <= 48K.
+ *
+ * { ((num_channels * bits_per_sample) / number of SDOs) >= 8 }
+ *
+ * Due to legacy design issue it is recommended that above
+ * ratio must be greater than 8. Since number of SDO lines is
+ * in powers of 2, next available ratio is 16 which can be
+ * used as a limiting factor here.
+ */
+ if (of_device_is_compatible(np, "nvidia,tegra194-hda"))
+ chip->bus.core.sdo_limit = 16;
+
/* codec detection */
if (!bus->codec_mask) {
dev_err(card->dev, "no codecs found!\n");
@@ -408,6 +457,7 @@ static int hda_tegra_create(struct snd_card *card,
static const struct of_device_id hda_tegra_match[] = {
{ .compatible = "nvidia,tegra30-hda" },
+ { .compatible = "nvidia,tegra194-hda" },
{},
};
MODULE_DEVICE_TABLE(of, hda_tegra_match);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 93760a3564cf..fbd7cc6026d8 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2024,7 +2024,7 @@ static const struct hda_pcm_ops generic_ops = {
static int hdmi_get_spk_alloc(struct hdac_device *hdac, int pcm_idx)
{
- struct hda_codec *codec = container_of(hdac, struct hda_codec, core);
+ struct hda_codec *codec = hdac_to_hda_codec(hdac);
struct hdmi_spec *spec = codec->spec;
struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
@@ -2037,7 +2037,7 @@ static int hdmi_get_spk_alloc(struct hdac_device *hdac, int pcm_idx)
static void hdmi_get_chmap(struct hdac_device *hdac, int pcm_idx,
unsigned char *chmap)
{
- struct hda_codec *codec = container_of(hdac, struct hda_codec, core);
+ struct hda_codec *codec = hdac_to_hda_codec(hdac);
struct hdmi_spec *spec = codec->spec;
struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
@@ -2051,7 +2051,7 @@ static void hdmi_get_chmap(struct hdac_device *hdac, int pcm_idx,
static void hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
unsigned char *chmap, int prepared)
{
- struct hda_codec *codec = container_of(hdac, struct hda_codec, core);
+ struct hda_codec *codec = hdac_to_hda_codec(hdac);
struct hdmi_spec *spec = codec->spec;
struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
@@ -2067,7 +2067,7 @@ static void hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
static bool is_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
{
- struct hda_codec *codec = container_of(hdac, struct hda_codec, core);
+ struct hda_codec *codec = hdac_to_hda_codec(hdac);
struct hdmi_spec *spec = codec->spec;
struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
@@ -3787,7 +3787,7 @@ static int atihdmi_paired_chmap_validate(struct hdac_chmap *chmap,
static int atihdmi_pin_set_slot_channel(struct hdac_device *hdac,
hda_nid_t pin_nid, int hdmi_slot, int stream_channel)
{
- struct hda_codec *codec = container_of(hdac, struct hda_codec, core);
+ struct hda_codec *codec = hdac_to_hda_codec(hdac);
int verb;
int ati_channel_setup = 0;
@@ -3823,7 +3823,7 @@ static int atihdmi_pin_set_slot_channel(struct hdac_device *hdac,
static int atihdmi_pin_get_slot_channel(struct hdac_device *hdac,
hda_nid_t pin_nid, int asp_slot)
{
- struct hda_codec *codec = container_of(hdac, struct hda_codec, core);
+ struct hda_codec *codec = hdac_to_hda_codec(hdac);
bool was_odd = false;
int ati_asp_slot = asp_slot;
int verb;
@@ -4169,6 +4169,7 @@ HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
+HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e62d58872b6e..6d73f8beadb6 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -17,6 +17,7 @@
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/input.h>
+#include <linux/leds.h>
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/hda_codec.h>
@@ -81,6 +82,7 @@ struct alc_spec {
/* mute LED for HP laptops, see alc269_fixup_mic_mute_hook() */
int mute_led_polarity;
+ int micmute_led_polarity;
hda_nid_t mute_led_nid;
hda_nid_t cap_mute_led_nid;
@@ -4080,11 +4082,9 @@ static void alc269_fixup_hp_mute_led_mic3(struct hda_codec *codec,
/* update LED status via GPIO */
static void alc_update_gpio_led(struct hda_codec *codec, unsigned int mask,
- bool enabled)
+ int polarity, bool enabled)
{
- struct alc_spec *spec = codec->spec;
-
- if (spec->mute_led_polarity)
+ if (polarity)
enabled = !enabled;
alc_update_gpio_data(codec, mask, !enabled); /* muted -> LED on */
}
@@ -4095,7 +4095,8 @@ static void alc_fixup_gpio_mute_hook(void *private_data, int enabled)
struct hda_codec *codec = private_data;
struct alc_spec *spec = codec->spec;
- alc_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled);
+ alc_update_gpio_led(codec, spec->gpio_mute_led_mask,
+ spec->mute_led_polarity, enabled);
}
/* turn on/off mic-mute LED via GPIO per capture hook */
@@ -4104,9 +4105,30 @@ static void alc_gpio_micmute_update(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
alc_update_gpio_led(codec, spec->gpio_mic_led_mask,
+ spec->micmute_led_polarity,
spec->gen.micmute_led.led_value);
}
+#if IS_REACHABLE(CONFIG_LEDS_TRIGGER_AUDIO)
+static int micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct hda_codec *codec = dev_to_hda_codec(led_cdev->dev->parent);
+ struct alc_spec *spec = codec->spec;
+
+ alc_update_gpio_led(codec, spec->gpio_mic_led_mask,
+ spec->micmute_led_polarity, !!brightness);
+ return 0;
+}
+
+static struct led_classdev micmute_led_cdev = {
+ .name = "hda::micmute",
+ .max_brightness = 1,
+ .brightness_set_blocking = micmute_led_set,
+ .default_trigger = "audio-micmute",
+};
+#endif
+
/* setup mute and mic-mute GPIO bits, add hooks appropriately */
static void alc_fixup_hp_gpio_led(struct hda_codec *codec,
int action,
@@ -4114,6 +4136,9 @@ static void alc_fixup_hp_gpio_led(struct hda_codec *codec,
unsigned int micmute_mask)
{
struct alc_spec *spec = codec->spec;
+#if IS_REACHABLE(CONFIG_LEDS_TRIGGER_AUDIO)
+ int err;
+#endif
alc_fixup_gpio(codec, action, mute_mask | micmute_mask);
@@ -4126,6 +4151,13 @@ static void alc_fixup_hp_gpio_led(struct hda_codec *codec,
if (micmute_mask) {
spec->gpio_mic_led_mask = micmute_mask;
snd_hda_gen_add_micmute_led(codec, alc_gpio_micmute_update);
+
+#if IS_REACHABLE(CONFIG_LEDS_TRIGGER_AUDIO)
+ micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ err = devm_led_classdev_register(&codec->core.dev, &micmute_led_cdev);
+ if (err)
+ codec_warn(codec, "failed to register micmute LED\n");
+#endif
}
}
@@ -4138,7 +4170,11 @@ static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
static void alc285_fixup_hp_gpio_led(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
- alc_fixup_hp_gpio_led(codec, action, 0x04, 0x00);
+ struct alc_spec *spec = codec->spec;
+
+ spec->micmute_led_polarity = 1;
+
+ alc_fixup_hp_gpio_led(codec, action, 0x04, 0x01);
}
static void alc286_fixup_hp_gpio_led(struct hda_codec *codec,
@@ -5808,7 +5844,8 @@ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
snd_hda_gen_hp_automute(codec, jack);
/* mute_led_polarity is set to 0, so we pass inverted value here */
- alc_update_gpio_led(codec, 0x10, !spec->gen.hp_jack_present);
+ alc_update_gpio_led(codec, 0x10, spec->mute_led_polarity,
+ !spec->gen.hp_jack_present);
}
/* Manage GPIOs for HP EliteBook Folio 9480m.
@@ -8124,6 +8161,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC225_STANDARD_PINS,
{0x12, 0xb7a60130},
{0x17, 0x90170110}),
+ SND_HDA_PIN_QUIRK(0x10ec0623, 0x17aa, "Lenovo", ALC283_FIXUP_HEADSET_MIC,
+ {0x14, 0x01014010},
+ {0x17, 0x90170120},
+ {0x18, 0x02a11030},
+ {0x19, 0x02a1103f},
+ {0x21, 0x0221101f}),
{}
};
diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c
index 6a0520c4fb5a..cf801a235df9 100644
--- a/sound/pci/oxygen/xonar_pcm179x.c
+++ b/sound/pci/oxygen/xonar_pcm179x.c
@@ -460,7 +460,7 @@ static void xonar_st_init(struct oxygen *chip)
data->generic.anti_pop_delay = 100;
data->h6 = chip->model.dac_channels_mixer > 2;
- data->has_cs2000 = 1;
+ data->has_cs2000 = true;
data->cs2000_regs[CS2000_FUN_CFG_1] = CS2000_REF_CLK_DIV_1;
data->broken_i2c = true;
@@ -502,7 +502,7 @@ static void xonar_xense_init(struct oxygen *chip)
xonar_init_ext_power(chip);
data->generic.anti_pop_delay = 100;
- data->has_cs2000 = 1;
+ data->has_cs2000 = true;
data->cs2000_regs[CS2000_FUN_CFG_1] = CS2000_REF_CLK_DIV_1;
oxygen_write16(chip, OXYGEN_I2S_A_FORMAT,
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 592532c09a82..2e750b317be1 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -226,7 +226,7 @@ static int snd_pmac_pcm_prepare(struct snd_pmac *chip, struct pmac_stream *rec,
offset += rec->period_size;
}
/* make loop */
- cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
+ cp->command = cpu_to_le16(DBDMA_NOP | BR_ALWAYS);
cp->cmd_dep = cpu_to_le32(rec->cmd.addr);
snd_pmac_dma_stop(rec);
@@ -726,7 +726,7 @@ void snd_pmac_beep_dma_start(struct snd_pmac *chip, int bytes, unsigned long add
chip->extra_dma.cmds->xfer_status = cpu_to_le16(0);
chip->extra_dma.cmds->cmd_dep = cpu_to_le32(chip->extra_dma.addr);
chip->extra_dma.cmds->phy_addr = cpu_to_le32(addr);
- chip->extra_dma.cmds->command = cpu_to_le16(OUTPUT_MORE + BR_ALWAYS);
+ chip->extra_dma.cmds->command = cpu_to_le16(OUTPUT_MORE | BR_ALWAYS);
out_le32(&chip->awacs->control,
(in_le32(&chip->awacs->control) & ~0x1f00)
| (speed << 8));
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 6d2a33b8faa0..b8161a08f2ca 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -926,7 +926,7 @@ static int snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
PAGE_SHIFT, /* use system page size */
0, /* dma type; not used */
NULL,
- _ALIGN_UP(SND_PS3_DMA_REGION_SIZE, PAGE_SIZE));
+ ALIGN(SND_PS3_DMA_REGION_SIZE, PAGE_SIZE));
dev->d_region->ioid = PS3_AUDIO_IOID;
ret = ps3_dma_region_create(dev->d_region);
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 861a21b79484..7f1747518e79 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-utils.o soc-dai.o soc-component.o
-snd-soc-core-objs += soc-pcm.o soc-io.o soc-devres.o soc-ops.o
+snd-soc-core-objs += soc-pcm.o soc-io.o soc-devres.o soc-ops.o soc-link.o soc-card.o
snd-soc-core-$(CONFIG_SND_SOC_COMPRESS) += soc-compress.o
ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
index bce4cee5cb54..e37cf72f2bab 100644
--- a/sound/soc/amd/Kconfig
+++ b/sound/soc/amd/Kconfig
@@ -29,10 +29,23 @@ config SND_SOC_AMD_ACP3x
config SND_SOC_AMD_RV_RT5682_MACH
tristate "AMD RV support for RT5682"
- select SND_SOC_RT5682
+ select SND_SOC_RT5682_I2C
select SND_SOC_MAX98357A
select SND_SOC_CROS_EC_CODEC
select I2C_CROS_EC_TUNNEL
depends on SND_SOC_AMD_ACP3x && I2C && CROS_EC
help
This option enables machine driver for RT5682 and MAX9835.
+
+config SND_SOC_AMD_RENOIR
+ tristate "AMD Audio Coprocessor - Renoir support"
+ depends on X86 && PCI
+ help
+ This option enables ACP support for Renoir platform
+
+config SND_SOC_AMD_RENOIR_MACH
+ tristate "AMD Renoir support for DMIC"
+ select SND_SOC_DMIC
+ depends on SND_SOC_AMD_RENOIR
+ help
+ This option enables machine driver for DMIC
diff --git a/sound/soc/amd/Makefile b/sound/soc/amd/Makefile
index e6f3d9b469f3..e6df2f72a2a1 100644
--- a/sound/soc/amd/Makefile
+++ b/sound/soc/amd/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH) += snd-soc-acp-da7219mx98357-mac
obj-$(CONFIG_SND_SOC_AMD_CZ_RT5645_MACH) += snd-soc-acp-rt5645-mach.o
obj-$(CONFIG_SND_SOC_AMD_ACP3x) += raven/
obj-$(CONFIG_SND_SOC_AMD_RV_RT5682_MACH) += snd-soc-acp-rt5682-mach.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR) += renoir/
diff --git a/sound/soc/amd/raven/acp3x-i2s.c b/sound/soc/amd/raven/acp3x-i2s.c
index f160d35a6832..a532e01a2622 100644
--- a/sound/soc/amd/raven/acp3x-i2s.c
+++ b/sound/soc/amd/raven/acp3x-i2s.c
@@ -15,7 +15,7 @@
#include "acp3x.h"
-#define DRV_NAME "acp3x-i2s"
+#define DRV_NAME "acp3x_i2s_playcap"
static int acp3x_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
@@ -269,7 +269,7 @@ static struct snd_soc_dai_ops acp3x_i2s_dai_ops = {
};
static const struct snd_soc_component_driver acp3x_dai_component = {
- .name = "acp3x-i2s",
+ .name = DRV_NAME,
};
static struct snd_soc_dai_driver acp3x_i2s_dai = {
@@ -348,4 +348,4 @@ module_platform_driver(acp3x_dai_driver);
MODULE_AUTHOR("Vishnuvardhanrao.Ravulapati@amd.com");
MODULE_DESCRIPTION("AMD ACP 3.x PCM Driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:"DRV_NAME);
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index e362f0bc9e46..d8f554f369a8 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -15,7 +15,7 @@
#include "acp3x.h"
-#define DRV_NAME "acp3x-i2s-audio"
+#define DRV_NAME "acp3x_rv_i2s_dma"
static const struct snd_pcm_hardware acp3x_pcm_hardware_playback = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
@@ -241,14 +241,6 @@ static int acp3x_dma_open(struct snd_soc_component *component,
adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
rv_writel(1, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- adata->play_stream = substream;
- adata->i2ssp_play_stream = substream;
- } else {
- adata->capture_stream = substream;
- adata->i2ssp_capture_stream = substream;
- }
-
i2s_data->acp3x_base = adata->acp3x_base;
runtime->private_data = i2s_data;
return ret;
@@ -263,23 +255,42 @@ static int acp3x_dma_hw_params(struct snd_soc_component *component,
struct snd_soc_pcm_runtime *prtd;
struct snd_soc_card *card;
struct acp3x_platform_info *pinfo;
+ struct i2s_dev_data *adata;
u64 size;
prtd = substream->private_data;
card = prtd->card;
pinfo = snd_soc_card_get_drvdata(card);
+ adata = dev_get_drvdata(component->dev);
rtd = substream->runtime->private_data;
if (!rtd)
return -EINVAL;
- if (pinfo)
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ if (pinfo) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
rtd->i2s_instance = pinfo->play_i2s_instance;
- else
+ switch (rtd->i2s_instance) {
+ case I2S_BT_INSTANCE:
+ adata->play_stream = substream;
+ break;
+ case I2S_SP_INSTANCE:
+ default:
+ adata->i2ssp_play_stream = substream;
+ }
+ } else {
rtd->i2s_instance = pinfo->cap_i2s_instance;
- else
+ switch (rtd->i2s_instance) {
+ case I2S_BT_INSTANCE:
+ adata->capture_stream = substream;
+ break;
+ case I2S_SP_INSTANCE:
+ default:
+ adata->i2ssp_capture_stream = substream;
+ }
+ }
+ } else {
pr_err("pinfo failed\n");
-
+ }
size = params_buffer_bytes(params);
rtd->dma_addr = substream->dma_buffer.addr;
rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
@@ -292,7 +303,6 @@ static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_soc_component *component,
{
struct snd_soc_pcm_runtime *prtd;
struct snd_soc_card *card;
- struct acp3x_platform_info *pinfo;
struct i2s_stream_instance *rtd;
u32 pos;
u32 buffersize;
@@ -301,13 +311,6 @@ static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_soc_component *component,
prtd = substream->private_data;
card = prtd->card;
rtd = substream->runtime->private_data;
- pinfo = snd_soc_card_get_drvdata(card);
- if (pinfo) {
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- rtd->i2s_instance = pinfo->play_i2s_instance;
- else
- rtd->i2s_instance = pinfo->cap_i2s_instance;
- }
buffersize = frames_to_bytes(substream->runtime,
substream->runtime->buffer_size);
@@ -531,4 +534,4 @@ MODULE_AUTHOR("Maruthi.Bayyavarapu@amd.com");
MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
MODULE_DESCRIPTION("AMD ACP 3.x PCM Driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:"DRV_NAME);
diff --git a/sound/soc/amd/renoir/Makefile b/sound/soc/amd/renoir/Makefile
new file mode 100644
index 000000000000..e4371932a55a
--- /dev/null
+++ b/sound/soc/amd/renoir/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Renoir platform Support
+snd-rn-pci-acp3x-objs := rn-pci-acp3x.o
+snd-acp3x-pdm-dma-objs := acp3x-pdm-dma.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-rn-pci-acp3x.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-acp3x-pdm-dma.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH) += acp3x-rn.o
diff --git a/sound/soc/amd/renoir/acp3x-pdm-dma.c b/sound/soc/amd/renoir/acp3x-pdm-dma.c
new file mode 100644
index 000000000000..623dfd3ea705
--- /dev/null
+++ b/sound/soc/amd/renoir/acp3x-pdm-dma.c
@@ -0,0 +1,524 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// AMD ALSA SoC PDM Driver
+//
+//Copyright 2020 Advanced Micro Devices, Inc.
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "rn_acp3x.h"
+
+#define DRV_NAME "acp_rn_pdm_dma"
+
+static const struct snd_pcm_hardware acp_pdm_hardware_capture = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .buffer_bytes_max = CAPTURE_MAX_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE,
+ .period_bytes_min = CAPTURE_MIN_PERIOD_SIZE,
+ .period_bytes_max = CAPTURE_MAX_PERIOD_SIZE,
+ .periods_min = CAPTURE_MIN_NUM_PERIODS,
+ .periods_max = CAPTURE_MAX_NUM_PERIODS,
+};
+
+static irqreturn_t pdm_irq_handler(int irq, void *dev_id)
+{
+ struct pdm_dev_data *rn_pdm_data;
+ u16 cap_flag;
+ u32 val;
+
+ rn_pdm_data = dev_id;
+ if (!rn_pdm_data)
+ return IRQ_NONE;
+
+ cap_flag = 0;
+ val = rn_readl(rn_pdm_data->acp_base + ACP_EXTERNAL_INTR_STAT);
+ if ((val & BIT(PDM_DMA_STAT)) && rn_pdm_data->capture_stream) {
+ rn_writel(BIT(PDM_DMA_STAT), rn_pdm_data->acp_base +
+ ACP_EXTERNAL_INTR_STAT);
+ snd_pcm_period_elapsed(rn_pdm_data->capture_stream);
+ cap_flag = 1;
+ }
+
+ if (cap_flag)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static void init_pdm_ring_buffer(u32 physical_addr,
+ u32 buffer_size,
+ u32 watermark_size,
+ void __iomem *acp_base)
+{
+ rn_writel(physical_addr, acp_base + ACP_WOV_RX_RINGBUFADDR);
+ rn_writel(buffer_size, acp_base + ACP_WOV_RX_RINGBUFSIZE);
+ rn_writel(watermark_size, acp_base + ACP_WOV_RX_INTR_WATERMARK_SIZE);
+ rn_writel(0x01, acp_base + ACPAXI2AXI_ATU_CTRL);
+}
+
+static void enable_pdm_clock(void __iomem *acp_base)
+{
+ u32 pdm_clk_enable, pdm_ctrl;
+
+ pdm_clk_enable = ACP_PDM_CLK_FREQ_MASK;
+ pdm_ctrl = 0x00;
+
+ rn_writel(pdm_clk_enable, acp_base + ACP_WOV_CLK_CTRL);
+ pdm_ctrl = rn_readl(acp_base + ACP_WOV_MISC_CTRL);
+ pdm_ctrl |= ACP_WOV_MISC_CTRL_MASK;
+ rn_writel(pdm_ctrl, acp_base + ACP_WOV_MISC_CTRL);
+}
+
+static void enable_pdm_interrupts(void __iomem *acp_base)
+{
+ u32 ext_int_ctrl;
+
+ ext_int_ctrl = rn_readl(acp_base + ACP_EXTERNAL_INTR_CNTL);
+ ext_int_ctrl |= PDM_DMA_INTR_MASK;
+ rn_writel(ext_int_ctrl, acp_base + ACP_EXTERNAL_INTR_CNTL);
+}
+
+static void disable_pdm_interrupts(void __iomem *acp_base)
+{
+ u32 ext_int_ctrl;
+
+ ext_int_ctrl = rn_readl(acp_base + ACP_EXTERNAL_INTR_CNTL);
+ ext_int_ctrl |= ~PDM_DMA_INTR_MASK;
+ rn_writel(ext_int_ctrl, acp_base + ACP_EXTERNAL_INTR_CNTL);
+}
+
+static bool check_pdm_dma_status(void __iomem *acp_base)
+{
+ bool pdm_dma_status;
+ u32 pdm_enable, pdm_dma_enable;
+
+ pdm_dma_status = false;
+ pdm_enable = rn_readl(acp_base + ACP_WOV_PDM_ENABLE);
+ pdm_dma_enable = rn_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ if ((pdm_enable & ACP_PDM_ENABLE) && (pdm_dma_enable &
+ ACP_PDM_DMA_EN_STATUS))
+ pdm_dma_status = true;
+ return pdm_dma_status;
+}
+
+static int start_pdm_dma(void __iomem *acp_base)
+{
+ u32 pdm_enable;
+ u32 pdm_dma_enable;
+ int timeout;
+
+ pdm_enable = 0x01;
+ pdm_dma_enable = 0x01;
+
+ enable_pdm_clock(acp_base);
+ rn_writel(pdm_enable, acp_base + ACP_WOV_PDM_ENABLE);
+ rn_writel(pdm_dma_enable, acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ pdm_dma_enable = 0x00;
+ timeout = 0;
+ while (++timeout < ACP_COUNTER) {
+ pdm_dma_enable = rn_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ if ((pdm_dma_enable & 0x02) == ACP_PDM_DMA_EN_STATUS)
+ return 0;
+ udelay(DELAY_US);
+ }
+ return -ETIMEDOUT;
+}
+
+static int stop_pdm_dma(void __iomem *acp_base)
+{
+ u32 pdm_enable, pdm_dma_enable;
+ int timeout;
+
+ pdm_enable = 0x00;
+ pdm_dma_enable = 0x00;
+
+ pdm_enable = rn_readl(acp_base + ACP_WOV_PDM_ENABLE);
+ pdm_dma_enable = rn_readl(acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ if (pdm_dma_enable & 0x01) {
+ pdm_dma_enable = 0x02;
+ rn_writel(pdm_dma_enable, acp_base + ACP_WOV_PDM_DMA_ENABLE);
+ pdm_dma_enable = 0x00;
+ timeout = 0;
+ while (++timeout < ACP_COUNTER) {
+ pdm_dma_enable = rn_readl(acp_base +
+ ACP_WOV_PDM_DMA_ENABLE);
+ if ((pdm_dma_enable & 0x02) == 0x00)
+ break;
+ udelay(DELAY_US);
+ }
+ if (timeout == ACP_COUNTER)
+ return -ETIMEDOUT;
+ }
+ if (pdm_enable == ACP_PDM_ENABLE) {
+ pdm_enable = ACP_PDM_DISABLE;
+ rn_writel(pdm_enable, acp_base + ACP_WOV_PDM_ENABLE);
+ }
+ rn_writel(0x01, acp_base + ACP_WOV_PDM_FIFO_FLUSH);
+ return 0;
+}
+
+static void config_acp_dma(struct pdm_stream_instance *rtd, int direction)
+{
+ u16 page_idx;
+ u32 low, high, val;
+ dma_addr_t addr;
+
+ addr = rtd->dma_addr;
+ val = 0;
+
+ /* Group Enable */
+ rn_writel(ACP_SRAM_PTE_OFFSET | BIT(31), rtd->acp_base +
+ ACPAXI2AXI_ATU_BASE_ADDR_GRP_1);
+ rn_writel(PAGE_SIZE_4K_ENABLE, rtd->acp_base +
+ ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1);
+
+ for (page_idx = 0; page_idx < rtd->num_pages; page_idx++) {
+ /* Load the low address of page int ACP SRAM through SRBM */
+ low = lower_32_bits(addr);
+ high = upper_32_bits(addr);
+
+ rn_writel(low, rtd->acp_base + ACP_SCRATCH_REG_0 + val);
+ high |= BIT(31);
+ rn_writel(high, rtd->acp_base + ACP_SCRATCH_REG_0 + val + 4);
+ val += 8;
+ addr += PAGE_SIZE;
+ }
+}
+
+static int acp_pdm_dma_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime;
+ struct pdm_dev_data *adata;
+ struct pdm_stream_instance *pdm_data;
+ int ret;
+
+ runtime = substream->runtime;
+ adata = dev_get_drvdata(component->dev);
+ pdm_data = kzalloc(sizeof(*pdm_data), GFP_KERNEL);
+ if (!pdm_data)
+ return -EINVAL;
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ runtime->hw = acp_pdm_hardware_capture;
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0) {
+ dev_err(component->dev, "set integer constraint failed\n");
+ kfree(pdm_data);
+ return ret;
+ }
+
+ enable_pdm_interrupts(adata->acp_base);
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ adata->capture_stream = substream;
+
+ pdm_data->acp_base = adata->acp_base;
+ runtime->private_data = pdm_data;
+ return ret;
+}
+
+static int acp_pdm_dma_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct pdm_stream_instance *rtd;
+ size_t size, period_bytes;
+
+ rtd = substream->runtime->private_data;
+ if (!rtd)
+ return -EINVAL;
+ size = params_buffer_bytes(params);
+ period_bytes = params_period_bytes(params);
+ rtd->dma_addr = substream->dma_buffer.addr;
+ rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
+ config_acp_dma(rtd, substream->stream);
+ init_pdm_ring_buffer(MEM_WINDOW_START, size, period_bytes,
+ rtd->acp_base);
+ return 0;
+}
+
+static u64 acp_pdm_get_byte_count(struct pdm_stream_instance *rtd,
+ int direction)
+{
+ union acp_pdm_dma_count byte_count;
+
+ byte_count.bcount.high =
+ rn_readl(rtd->acp_base +
+ ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH);
+ byte_count.bcount.low =
+ rn_readl(rtd->acp_base +
+ ACP_WOV_RX_LINEARPOSITIONCNTR_LOW);
+ return byte_count.bytescount;
+}
+
+static snd_pcm_uframes_t acp_pdm_dma_pointer(struct snd_soc_component *comp,
+ struct snd_pcm_substream *stream)
+{
+ struct pdm_stream_instance *rtd;
+ u32 pos, buffersize;
+ u64 bytescount;
+
+ rtd = stream->runtime->private_data;
+ buffersize = frames_to_bytes(stream->runtime,
+ stream->runtime->buffer_size);
+ bytescount = acp_pdm_get_byte_count(rtd, stream->stream);
+ if (bytescount > rtd->bytescount)
+ bytescount -= rtd->bytescount;
+ pos = do_div(bytescount, buffersize);
+ return bytes_to_frames(stream->runtime, pos);
+}
+
+static int acp_pdm_dma_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd)
+{
+ struct device *parent = component->dev->parent;
+
+ snd_pcm_set_managed_buffer_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
+ parent, MIN_BUFFER, MAX_BUFFER);
+ return 0;
+}
+
+static int acp_pdm_dma_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ return snd_pcm_lib_default_mmap(substream, vma);
+}
+
+static int acp_pdm_dma_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct pdm_dev_data *adata = dev_get_drvdata(component->dev);
+
+ disable_pdm_interrupts(adata->acp_base);
+ adata->capture_stream = NULL;
+ return 0;
+}
+
+static int acp_pdm_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct pdm_stream_instance *rtd;
+ unsigned int ch_mask;
+
+ rtd = substream->runtime->private_data;
+ switch (params_channels(params)) {
+ case TWO_CH:
+ ch_mask = 0x00;
+ break;
+ default:
+ return -EINVAL;
+ }
+ rn_writel(ch_mask, rtd->acp_base + ACP_WOV_PDM_NO_OF_CHANNELS);
+ rn_writel(PDM_DECIMATION_FACTOR, rtd->acp_base +
+ ACP_WOV_PDM_DECIMATION_FACTOR);
+ return 0;
+}
+
+static int acp_pdm_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct pdm_stream_instance *rtd;
+ int ret;
+ bool pdm_status;
+
+ rtd = substream->runtime->private_data;
+ ret = 0;
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ rtd->bytescount = acp_pdm_get_byte_count(rtd,
+ substream->stream);
+ pdm_status = check_pdm_dma_status(rtd->acp_base);
+ if (!pdm_status)
+ ret = start_pdm_dma(rtd->acp_base);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ pdm_status = check_pdm_dma_status(rtd->acp_base);
+ if (pdm_status)
+ ret = stop_pdm_dma(rtd->acp_base);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static struct snd_soc_dai_ops acp_pdm_dai_ops = {
+ .hw_params = acp_pdm_dai_hw_params,
+ .trigger = acp_pdm_dai_trigger,
+};
+
+static struct snd_soc_dai_driver acp_pdm_dai_driver = {
+ .capture = {
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ },
+ .ops = &acp_pdm_dai_ops,
+};
+
+static const struct snd_soc_component_driver acp_pdm_component = {
+ .name = DRV_NAME,
+ .open = acp_pdm_dma_open,
+ .close = acp_pdm_dma_close,
+ .hw_params = acp_pdm_dma_hw_params,
+ .pointer = acp_pdm_dma_pointer,
+ .mmap = acp_pdm_dma_mmap,
+ .pcm_construct = acp_pdm_dma_new,
+};
+
+static int acp_pdm_audio_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct pdm_dev_data *adata;
+ unsigned int irqflags;
+ int status;
+
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "platform_data not retrieved\n");
+ return -ENODEV;
+ }
+ irqflags = *((unsigned int *)(pdev->dev.platform_data));
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "IORESOURCE_MEM FAILED\n");
+ return -ENODEV;
+ }
+
+ adata = devm_kzalloc(&pdev->dev, sizeof(*adata), GFP_KERNEL);
+ if (!adata)
+ return -ENOMEM;
+
+ adata->acp_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!adata->acp_base)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n");
+ return -ENODEV;
+ }
+
+ adata->pdm_irq = res->start;
+ adata->capture_stream = NULL;
+
+ dev_set_drvdata(&pdev->dev, adata);
+ status = devm_snd_soc_register_component(&pdev->dev,
+ &acp_pdm_component,
+ &acp_pdm_dai_driver, 1);
+ if (status) {
+ dev_err(&pdev->dev, "Fail to register acp pdm dai\n");
+
+ return -ENODEV;
+ }
+ status = devm_request_irq(&pdev->dev, adata->pdm_irq, pdm_irq_handler,
+ irqflags, "ACP_PDM_IRQ", adata);
+ if (status) {
+ dev_err(&pdev->dev, "ACP PDM IRQ request failed\n");
+ return -ENODEV;
+ }
+ pm_runtime_set_autosuspend_delay(&pdev->dev, ACP_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+ return 0;
+}
+
+static int acp_pdm_audio_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int acp_pdm_resume(struct device *dev)
+{
+ struct pdm_dev_data *adata;
+ struct snd_pcm_runtime *runtime;
+ struct pdm_stream_instance *rtd;
+ u32 period_bytes, buffer_len;
+
+ adata = dev_get_drvdata(dev);
+ if (adata->capture_stream && adata->capture_stream->runtime) {
+ runtime = adata->capture_stream->runtime;
+ rtd = runtime->private_data;
+ period_bytes = frames_to_bytes(runtime, runtime->period_size);
+ buffer_len = frames_to_bytes(runtime, runtime->buffer_size);
+ config_acp_dma(rtd, SNDRV_PCM_STREAM_CAPTURE);
+ init_pdm_ring_buffer(MEM_WINDOW_START, buffer_len, period_bytes,
+ adata->acp_base);
+ }
+ enable_pdm_interrupts(adata->acp_base);
+ return 0;
+}
+
+static int acp_pdm_runtime_suspend(struct device *dev)
+{
+ struct pdm_dev_data *adata;
+
+ adata = dev_get_drvdata(dev);
+ disable_pdm_interrupts(adata->acp_base);
+
+ return 0;
+}
+
+static int acp_pdm_runtime_resume(struct device *dev)
+{
+ struct pdm_dev_data *adata;
+
+ adata = dev_get_drvdata(dev);
+ enable_pdm_interrupts(adata->acp_base);
+ return 0;
+}
+
+static const struct dev_pm_ops acp_pdm_pm_ops = {
+ .runtime_suspend = acp_pdm_runtime_suspend,
+ .runtime_resume = acp_pdm_runtime_resume,
+ .resume = acp_pdm_resume,
+};
+
+static struct platform_driver acp_pdm_dma_driver = {
+ .probe = acp_pdm_audio_probe,
+ .remove = acp_pdm_audio_remove,
+ .driver = {
+ .name = "acp_rn_pdm_dma",
+ .pm = &acp_pdm_pm_ops,
+ },
+};
+
+module_platform_driver(acp_pdm_dma_driver);
+
+MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
+MODULE_DESCRIPTION("AMD ACP3x Renior PDM Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/amd/renoir/acp3x-rn.c b/sound/soc/amd/renoir/acp3x-rn.c
new file mode 100644
index 000000000000..306134b89a82
--- /dev/null
+++ b/sound/soc/amd/renoir/acp3x-rn.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Machine driver for AMD Renoir platform using DMIC
+//
+//Copyright 2020 Advanced Micro Devices, Inc.
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <linux/module.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <linux/io.h>
+
+#include "rn_acp3x.h"
+
+#define DRV_NAME "acp_pdm_mach"
+
+SND_SOC_DAILINK_DEF(acp_pdm,
+ DAILINK_COMP_ARRAY(COMP_CPU("acp_rn_pdm_dma.0")));
+
+SND_SOC_DAILINK_DEF(dmic_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("dmic-codec.0",
+ "dmic-hifi")));
+
+SND_SOC_DAILINK_DEF(platform,
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("acp_rn_pdm_dma.0")));
+
+static struct snd_soc_dai_link acp_dai_pdm[] = {
+ {
+ .name = "acp3x-dmic-capture",
+ .stream_name = "DMIC capture",
+ .capture_only = 1,
+ SND_SOC_DAILINK_REG(acp_pdm, dmic_codec, platform),
+ },
+};
+
+static struct snd_soc_card acp_card = {
+ .name = "acp",
+ .owner = THIS_MODULE,
+ .dai_link = acp_dai_pdm,
+ .num_links = 1,
+};
+
+static int acp_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct acp_pdm *machine = NULL;
+ struct snd_soc_card *card;
+
+ card = &acp_card;
+ acp_card.dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, machine);
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "snd_soc_register_card(%s) failed: %d\n",
+ acp_card.name, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static struct platform_driver acp_mach_driver = {
+ .driver = {
+ .name = "acp_pdm_mach",
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = acp_probe,
+};
+
+module_platform_driver(acp_mach_driver);
+
+MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/amd/renoir/rn-pci-acp3x.c b/sound/soc/amd/renoir/rn-pci-acp3x.c
new file mode 100644
index 000000000000..859ed67b93cf
--- /dev/null
+++ b/sound/soc/amd/renoir/rn-pci-acp3x.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// AMD Renoir ACP PCI Driver
+//
+//Copyright 2020 Advanced Micro Devices, Inc.
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+
+#include "rn_acp3x.h"
+
+static int acp_power_gating;
+module_param(acp_power_gating, int, 0644);
+MODULE_PARM_DESC(acp_power_gating, "Enable acp power gating");
+
+struct acp_dev_data {
+ void __iomem *acp_base;
+ struct resource *res;
+ struct platform_device *pdev[ACP_DEVS];
+};
+
+static int rn_acp_power_on(void __iomem *acp_base)
+{
+ u32 val;
+ int timeout;
+
+ val = rn_readl(acp_base + ACP_PGFSM_STATUS);
+
+ if (val == 0)
+ return val;
+
+ if ((val & ACP_PGFSM_STATUS_MASK) !=
+ ACP_POWER_ON_IN_PROGRESS)
+ rn_writel(ACP_PGFSM_CNTL_POWER_ON_MASK,
+ acp_base + ACP_PGFSM_CONTROL);
+ timeout = 0;
+ while (++timeout < 500) {
+ val = rn_readl(acp_base + ACP_PGFSM_STATUS);
+ if (!val)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int rn_acp_power_off(void __iomem *acp_base)
+{
+ u32 val;
+ int timeout;
+
+ rn_writel(ACP_PGFSM_CNTL_POWER_OFF_MASK,
+ acp_base + ACP_PGFSM_CONTROL);
+ timeout = 0;
+ while (++timeout < 500) {
+ val = rn_readl(acp_base + ACP_PGFSM_STATUS);
+ if ((val & ACP_PGFSM_STATUS_MASK) == ACP_POWERED_OFF)
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int rn_acp_reset(void __iomem *acp_base)
+{
+ u32 val;
+ int timeout;
+
+ rn_writel(1, acp_base + ACP_SOFT_RESET);
+ timeout = 0;
+ while (++timeout < 500) {
+ val = rn_readl(acp_base + ACP_SOFT_RESET);
+ if (val & ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK)
+ break;
+ cpu_relax();
+ }
+ rn_writel(0, acp_base + ACP_SOFT_RESET);
+ timeout = 0;
+ while (++timeout < 500) {
+ val = rn_readl(acp_base + ACP_SOFT_RESET);
+ if (!val)
+ return 0;
+ cpu_relax();
+ }
+ return -ETIMEDOUT;
+}
+
+static void rn_acp_enable_interrupts(void __iomem *acp_base)
+{
+ u32 ext_intr_ctrl;
+
+ rn_writel(0x01, acp_base + ACP_EXTERNAL_INTR_ENB);
+ ext_intr_ctrl = rn_readl(acp_base + ACP_EXTERNAL_INTR_CNTL);
+ ext_intr_ctrl |= ACP_ERROR_MASK;
+ rn_writel(ext_intr_ctrl, acp_base + ACP_EXTERNAL_INTR_CNTL);
+}
+
+static void rn_acp_disable_interrupts(void __iomem *acp_base)
+{
+ rn_writel(ACP_EXT_INTR_STAT_CLEAR_MASK, acp_base +
+ ACP_EXTERNAL_INTR_STAT);
+ rn_writel(0x00, acp_base + ACP_EXTERNAL_INTR_ENB);
+}
+
+static int rn_acp_init(void __iomem *acp_base)
+{
+ int ret;
+
+ /* power on */
+ ret = rn_acp_power_on(acp_base);
+ if (ret) {
+ pr_err("ACP power on failed\n");
+ return ret;
+ }
+ rn_writel(0x01, acp_base + ACP_CONTROL);
+ /* Reset */
+ ret = rn_acp_reset(acp_base);
+ if (ret) {
+ pr_err("ACP reset failed\n");
+ return ret;
+ }
+ rn_writel(0x03, acp_base + ACP_CLKMUX_SEL);
+ rn_acp_enable_interrupts(acp_base);
+ return 0;
+}
+
+static int rn_acp_deinit(void __iomem *acp_base)
+{
+ int ret;
+
+ rn_acp_disable_interrupts(acp_base);
+ /* Reset */
+ ret = rn_acp_reset(acp_base);
+ if (ret) {
+ pr_err("ACP reset failed\n");
+ return ret;
+ }
+ rn_writel(0x00, acp_base + ACP_CLKMUX_SEL);
+ rn_writel(0x00, acp_base + ACP_CONTROL);
+ /* power off */
+ if (acp_power_gating) {
+ ret = rn_acp_power_off(acp_base);
+ if (ret) {
+ pr_err("ACP power off failed\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int snd_rn_acp_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ struct acp_dev_data *adata;
+ struct platform_device_info pdevinfo[ACP_DEVS];
+ unsigned int irqflags;
+ int ret, index;
+ u32 addr;
+
+ if (pci_enable_device(pci)) {
+ dev_err(&pci->dev, "pci_enable_device failed\n");
+ return -ENODEV;
+ }
+
+ ret = pci_request_regions(pci, "AMD ACP3x audio");
+ if (ret < 0) {
+ dev_err(&pci->dev, "pci_request_regions failed\n");
+ goto disable_pci;
+ }
+
+ adata = devm_kzalloc(&pci->dev, sizeof(struct acp_dev_data),
+ GFP_KERNEL);
+ if (!adata) {
+ ret = -ENOMEM;
+ goto release_regions;
+ }
+
+ /* check for msi interrupt support */
+ ret = pci_enable_msi(pci);
+ if (ret)
+ /* msi is not enabled */
+ irqflags = IRQF_SHARED;
+ else
+ /* msi is enabled */
+ irqflags = 0;
+
+ addr = pci_resource_start(pci, 0);
+ adata->acp_base = devm_ioremap(&pci->dev, addr,
+ pci_resource_len(pci, 0));
+ if (!adata->acp_base) {
+ ret = -ENOMEM;
+ goto disable_msi;
+ }
+ pci_set_master(pci);
+ pci_set_drvdata(pci, adata);
+ ret = rn_acp_init(adata->acp_base);
+ if (ret)
+ goto disable_msi;
+
+ adata->res = devm_kzalloc(&pci->dev,
+ sizeof(struct resource) * 2,
+ GFP_KERNEL);
+ if (!adata->res) {
+ ret = -ENOMEM;
+ goto de_init;
+ }
+
+ adata->res[0].name = "acp_pdm_iomem";
+ adata->res[0].flags = IORESOURCE_MEM;
+ adata->res[0].start = addr;
+ adata->res[0].end = addr + (ACP_REG_END - ACP_REG_START);
+ adata->res[1].name = "acp_pdm_irq";
+ adata->res[1].flags = IORESOURCE_IRQ;
+ adata->res[1].start = pci->irq;
+ adata->res[1].end = pci->irq;
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo[0].name = "acp_rn_pdm_dma";
+ pdevinfo[0].id = 0;
+ pdevinfo[0].parent = &pci->dev;
+ pdevinfo[0].num_res = 2;
+ pdevinfo[0].res = adata->res;
+ pdevinfo[0].data = &irqflags;
+ pdevinfo[0].size_data = sizeof(irqflags);
+
+ pdevinfo[1].name = "dmic-codec";
+ pdevinfo[1].id = 0;
+ pdevinfo[1].parent = &pci->dev;
+ pdevinfo[2].name = "acp_pdm_mach";
+ pdevinfo[2].id = 0;
+ pdevinfo[2].parent = &pci->dev;
+ for (index = 0; index < ACP_DEVS; index++) {
+ adata->pdev[index] =
+ platform_device_register_full(&pdevinfo[index]);
+ if (IS_ERR(adata->pdev[index])) {
+ dev_err(&pci->dev, "cannot register %s device\n",
+ pdevinfo[index].name);
+ ret = PTR_ERR(adata->pdev[index]);
+ goto unregister_devs;
+ }
+ }
+ pm_runtime_set_autosuspend_delay(&pci->dev, ACP_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&pci->dev);
+ pm_runtime_put_noidle(&pci->dev);
+ pm_runtime_allow(&pci->dev);
+ return 0;
+
+unregister_devs:
+ for (index = 0; index < ACP_DEVS; index++)
+ platform_device_unregister(adata->pdev[index]);
+de_init:
+ if (rn_acp_deinit(adata->acp_base))
+ dev_err(&pci->dev, "ACP de-init failed\n");
+disable_msi:
+ pci_disable_msi(pci);
+release_regions:
+ pci_release_regions(pci);
+disable_pci:
+ pci_disable_device(pci);
+
+ return ret;
+}
+
+static int snd_rn_acp_suspend(struct device *dev)
+{
+ int ret;
+ struct acp_dev_data *adata;
+
+ adata = dev_get_drvdata(dev);
+ ret = rn_acp_deinit(adata->acp_base);
+ if (ret)
+ dev_err(dev, "ACP de-init failed\n");
+ else
+ dev_dbg(dev, "ACP de-initialized\n");
+
+ return ret;
+}
+
+static int snd_rn_acp_resume(struct device *dev)
+{
+ int ret;
+ struct acp_dev_data *adata;
+
+ adata = dev_get_drvdata(dev);
+ ret = rn_acp_init(adata->acp_base);
+ if (ret) {
+ dev_err(dev, "ACP init failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops rn_acp_pm = {
+ .runtime_suspend = snd_rn_acp_suspend,
+ .runtime_resume = snd_rn_acp_resume,
+ .suspend = snd_rn_acp_suspend,
+ .resume = snd_rn_acp_resume,
+};
+
+static void snd_rn_acp_remove(struct pci_dev *pci)
+{
+ struct acp_dev_data *adata;
+ int ret, index;
+
+ adata = pci_get_drvdata(pci);
+ for (index = 0; index < ACP_DEVS; index++)
+ platform_device_unregister(adata->pdev[index]);
+ ret = rn_acp_deinit(adata->acp_base);
+ if (ret)
+ dev_err(&pci->dev, "ACP de-init failed\n");
+ pm_runtime_forbid(&pci->dev);
+ pm_runtime_get_noresume(&pci->dev);
+ pci_disable_msi(pci);
+ pci_release_regions(pci);
+ pci_disable_device(pci);
+}
+
+static const struct pci_device_id snd_rn_acp_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, ACP_DEVICE_ID),
+ .class = PCI_CLASS_MULTIMEDIA_OTHER << 8,
+ .class_mask = 0xffffff },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, snd_rn_acp_ids);
+
+static struct pci_driver rn_acp_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = snd_rn_acp_ids,
+ .probe = snd_rn_acp_probe,
+ .remove = snd_rn_acp_remove,
+ .driver = {
+ .pm = &rn_acp_pm,
+ }
+};
+
+module_pci_driver(rn_acp_driver);
+
+MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
+MODULE_DESCRIPTION("AMD ACP Renoir PCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/amd/renoir/rn_acp3x.h b/sound/soc/amd/renoir/rn_acp3x.h
new file mode 100644
index 000000000000..75228e306e0b
--- /dev/null
+++ b/sound/soc/amd/renoir/rn_acp3x.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ALSA SoC PDM Driver
+ *
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ */
+
+#include "rn_chip_offset_byte.h"
+
+#define ACP_DEVS 3
+#define ACP_PHY_BASE_ADDRESS 0x1240000
+#define ACP_REG_START 0x1240000
+#define ACP_REG_END 0x1250200
+
+#define ACP_DEVICE_ID 0x15E2
+#define ACP_POWER_ON 0x00
+#define ACP_POWER_ON_IN_PROGRESS 0x01
+#define ACP_POWER_OFF 0x02
+#define ACP_POWER_OFF_IN_PROGRESS 0x03
+#define ACP_SOFT_RESET_SOFTRESET_AUDDONE_MASK 0x00010001
+
+#define ACP_PGFSM_CNTL_POWER_ON_MASK 0x01
+#define ACP_PGFSM_CNTL_POWER_OFF_MASK 0x00
+#define ACP_PGFSM_STATUS_MASK 0x03
+#define ACP_POWERED_ON 0x00
+#define ACP_POWER_ON_IN_PROGRESS 0x01
+#define ACP_POWERED_OFF 0x02
+#define ACP_POWER_OFF_IN_PROGRESS 0x03
+
+#define ACP_ERROR_MASK 0x20000000
+#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF
+#define PDM_DMA_STAT 0x10
+#define PDM_DMA_INTR_MASK 0x10000
+#define ACP_ERROR_STAT 29
+#define PDM_DECIMATION_FACTOR 0x2
+#define ACP_PDM_CLK_FREQ_MASK 0x07
+#define ACP_WOV_MISC_CTRL_MASK 0x10
+#define ACP_PDM_ENABLE 0x01
+#define ACP_PDM_DISABLE 0x00
+#define ACP_PDM_DMA_EN_STATUS 0x02
+#define TWO_CH 0x02
+#define DELAY_US 5
+#define ACP_COUNTER 20000
+/* time in ms for runtime suspend delay */
+#define ACP_SUSPEND_DELAY_MS 2000
+
+#define ACP_SRAM_PTE_OFFSET 0x02050000
+#define PAGE_SIZE_4K_ENABLE 0x2
+#define MEM_WINDOW_START 0x4000000
+
+#define CAPTURE_MIN_NUM_PERIODS 4
+#define CAPTURE_MAX_NUM_PERIODS 4
+#define CAPTURE_MAX_PERIOD_SIZE 8192
+#define CAPTURE_MIN_PERIOD_SIZE 4096
+
+#define MAX_BUFFER (CAPTURE_MAX_PERIOD_SIZE * CAPTURE_MAX_NUM_PERIODS)
+#define MIN_BUFFER MAX_BUFFER
+struct pdm_dev_data {
+ u32 pdm_irq;
+ void __iomem *acp_base;
+ struct snd_pcm_substream *capture_stream;
+};
+
+struct pdm_stream_instance {
+ u16 num_pages;
+ u16 channels;
+ dma_addr_t dma_addr;
+ u64 bytescount;
+ void __iomem *acp_base;
+};
+
+union acp_pdm_dma_count {
+ struct {
+ u32 low;
+ u32 high;
+ } bcount;
+ u64 bytescount;
+};
+
+static inline u32 rn_readl(void __iomem *base_addr)
+{
+ return readl(base_addr - ACP_PHY_BASE_ADDRESS);
+}
+
+static inline void rn_writel(u32 val, void __iomem *base_addr)
+{
+ writel(val, base_addr - ACP_PHY_BASE_ADDRESS);
+}
diff --git a/sound/soc/amd/renoir/rn_chip_offset_byte.h b/sound/soc/amd/renoir/rn_chip_offset_byte.h
new file mode 100644
index 000000000000..d20d967b5ff9
--- /dev/null
+++ b/sound/soc/amd/renoir/rn_chip_offset_byte.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ACP 3.1 Register Documentation
+ *
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ */
+
+#ifndef _rn_OFFSET_HEADER
+#define _rn_OFFSET_HEADER
+// Registers from ACP_DMA block
+
+#define ACP_DMA_CNTL_0 0x1240000
+#define ACP_DMA_CNTL_1 0x1240004
+#define ACP_DMA_CNTL_2 0x1240008
+#define ACP_DMA_CNTL_3 0x124000C
+#define ACP_DMA_CNTL_4 0x1240010
+#define ACP_DMA_CNTL_5 0x1240014
+#define ACP_DMA_CNTL_6 0x1240018
+#define ACP_DMA_CNTL_7 0x124001C
+#define ACP_DMA_DSCR_STRT_IDX_0 0x1240020
+#define ACP_DMA_DSCR_STRT_IDX_1 0x1240024
+#define ACP_DMA_DSCR_STRT_IDX_2 0x1240028
+#define ACP_DMA_DSCR_STRT_IDX_3 0x124002C
+#define ACP_DMA_DSCR_STRT_IDX_4 0x1240030
+#define ACP_DMA_DSCR_STRT_IDX_5 0x1240034
+#define ACP_DMA_DSCR_STRT_IDX_6 0x1240038
+#define ACP_DMA_DSCR_STRT_IDX_7 0x124003C
+#define ACP_DMA_DSCR_CNT_0 0x1240040
+#define ACP_DMA_DSCR_CNT_1 0x1240044
+#define ACP_DMA_DSCR_CNT_2 0x1240048
+#define ACP_DMA_DSCR_CNT_3 0x124004C
+#define ACP_DMA_DSCR_CNT_4 0x1240050
+#define ACP_DMA_DSCR_CNT_5 0x1240054
+#define ACP_DMA_DSCR_CNT_6 0x1240058
+#define ACP_DMA_DSCR_CNT_7 0x124005C
+#define ACP_DMA_PRIO_0 0x1240060
+#define ACP_DMA_PRIO_1 0x1240064
+#define ACP_DMA_PRIO_2 0x1240068
+#define ACP_DMA_PRIO_3 0x124006C
+#define ACP_DMA_PRIO_4 0x1240070
+#define ACP_DMA_PRIO_5 0x1240074
+#define ACP_DMA_PRIO_6 0x1240078
+#define ACP_DMA_PRIO_7 0x124007C
+#define ACP_DMA_CUR_DSCR_0 0x1240080
+#define ACP_DMA_CUR_DSCR_1 0x1240084
+#define ACP_DMA_CUR_DSCR_2 0x1240088
+#define ACP_DMA_CUR_DSCR_3 0x124008C
+#define ACP_DMA_CUR_DSCR_4 0x1240090
+#define ACP_DMA_CUR_DSCR_5 0x1240094
+#define ACP_DMA_CUR_DSCR_6 0x1240098
+#define ACP_DMA_CUR_DSCR_7 0x124009C
+#define ACP_DMA_CUR_TRANS_CNT_0 0x12400A0
+#define ACP_DMA_CUR_TRANS_CNT_1 0x12400A4
+#define ACP_DMA_CUR_TRANS_CNT_2 0x12400A8
+#define ACP_DMA_CUR_TRANS_CNT_3 0x12400AC
+#define ACP_DMA_CUR_TRANS_CNT_4 0x12400B0
+#define ACP_DMA_CUR_TRANS_CNT_5 0x12400B4
+#define ACP_DMA_CUR_TRANS_CNT_6 0x12400B8
+#define ACP_DMA_CUR_TRANS_CNT_7 0x12400BC
+#define ACP_DMA_ERR_STS_0 0x12400C0
+#define ACP_DMA_ERR_STS_1 0x12400C4
+#define ACP_DMA_ERR_STS_2 0x12400C8
+#define ACP_DMA_ERR_STS_3 0x12400CC
+#define ACP_DMA_ERR_STS_4 0x12400D0
+#define ACP_DMA_ERR_STS_5 0x12400D4
+#define ACP_DMA_ERR_STS_6 0x12400D8
+#define ACP_DMA_ERR_STS_7 0x12400DC
+#define ACP_DMA_DESC_BASE_ADDR 0x12400E0
+#define ACP_DMA_DESC_MAX_NUM_DSCR 0x12400E4
+#define ACP_DMA_CH_STS 0x12400E8
+#define ACP_DMA_CH_GROUP 0x12400EC
+#define ACP_DMA_CH_RST_STS 0x12400F0
+
+// Registers from ACP_AXI2AXIATU block
+
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1 0x1240C00
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_1 0x1240C04
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_2 0x1240C08
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_2 0x1240C0C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_3 0x1240C10
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_3 0x1240C14
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_4 0x1240C18
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_4 0x1240C1C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_5 0x1240C20
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_5 0x1240C24
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_6 0x1240C28
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_6 0x1240C2C
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_7 0x1240C30
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_7 0x1240C34
+#define ACPAXI2AXI_ATU_PAGE_SIZE_GRP_8 0x1240C38
+#define ACPAXI2AXI_ATU_BASE_ADDR_GRP_8 0x1240C3C
+#define ACPAXI2AXI_ATU_CTRL 0x1240C40
+
+// Registers from ACP_CLKRST block
+
+#define ACP_SOFT_RESET 0x1241000
+#define ACP_CONTROL 0x1241004
+#define ACP_STATUS 0x1241008
+#define ACP_DYNAMIC_CG_MASTER_CONTROL 0x1241010
+
+// Registers from ACP_MISC block
+
+#define ACP_EXTERNAL_INTR_ENB 0x1241800
+#define ACP_EXTERNAL_INTR_CNTL 0x1241804
+#define ACP_EXTERNAL_INTR_STAT 0x1241808
+#define ACP_PGMEM_CTRL 0x12418C0
+#define ACP_ERROR_STATUS 0x12418C4
+#define ACP_SW_I2S_ERROR_REASON 0x12418C8
+#define ACP_MEM_PG_STS 0x12418CC
+
+// Registers from ACP_PGFSM block
+
+#define ACP_I2S_PIN_CONFIG 0x1241400
+#define ACP_PAD_PULLUP_PULLDOWN_CTRL 0x1241404
+#define ACP_PAD_DRIVE_STRENGTH_CTRL 0x1241408
+#define ACP_SW_PAD_KEEPER_EN 0x124140C
+#define ACP_PGFSM_CONTROL 0x124141C
+#define ACP_PGFSM_STATUS 0x1241420
+#define ACP_CLKMUX_SEL 0x1241424
+#define ACP_DEVICE_STATE 0x1241428
+#define AZ_DEVICE_STATE 0x124142C
+#define ACP_INTR_URGENCY_TIMER 0x1241430
+#define AZ_INTR_URGENCY_TIMER 0x1241434
+
+// Registers from ACP_SCRATCH block
+
+#define ACP_SCRATCH_REG_0 0x1250000
+#define ACP_SCRATCH_REG_1 0x1250004
+#define ACP_SCRATCH_REG_2 0x1250008
+#define ACP_SCRATCH_REG_3 0x125000C
+#define ACP_SCRATCH_REG_4 0x1250010
+#define ACP_SCRATCH_REG_5 0x1250014
+#define ACP_SCRATCH_REG_6 0x1250018
+#define ACP_SCRATCH_REG_7 0x125001C
+#define ACP_SCRATCH_REG_8 0x1250020
+#define ACP_SCRATCH_REG_9 0x1250024
+#define ACP_SCRATCH_REG_10 0x1250028
+#define ACP_SCRATCH_REG_11 0x125002C
+#define ACP_SCRATCH_REG_12 0x1250030
+#define ACP_SCRATCH_REG_13 0x1250034
+#define ACP_SCRATCH_REG_14 0x1250038
+#define ACP_SCRATCH_REG_15 0x125003C
+#define ACP_SCRATCH_REG_16 0x1250040
+#define ACP_SCRATCH_REG_17 0x1250044
+#define ACP_SCRATCH_REG_18 0x1250048
+#define ACP_SCRATCH_REG_19 0x125004C
+#define ACP_SCRATCH_REG_20 0x1250050
+#define ACP_SCRATCH_REG_21 0x1250054
+#define ACP_SCRATCH_REG_22 0x1250058
+#define ACP_SCRATCH_REG_23 0x125005C
+#define ACP_SCRATCH_REG_24 0x1250060
+#define ACP_SCRATCH_REG_25 0x1250064
+#define ACP_SCRATCH_REG_26 0x1250068
+#define ACP_SCRATCH_REG_27 0x125006C
+#define ACP_SCRATCH_REG_28 0x1250070
+#define ACP_SCRATCH_REG_29 0x1250074
+#define ACP_SCRATCH_REG_30 0x1250078
+#define ACP_SCRATCH_REG_31 0x125007C
+#define ACP_SCRATCH_REG_32 0x1250080
+#define ACP_SCRATCH_REG_33 0x1250084
+#define ACP_SCRATCH_REG_34 0x1250088
+#define ACP_SCRATCH_REG_35 0x125008C
+#define ACP_SCRATCH_REG_36 0x1250090
+#define ACP_SCRATCH_REG_37 0x1250094
+#define ACP_SCRATCH_REG_38 0x1250098
+#define ACP_SCRATCH_REG_39 0x125009C
+#define ACP_SCRATCH_REG_40 0x12500A0
+#define ACP_SCRATCH_REG_41 0x12500A4
+#define ACP_SCRATCH_REG_42 0x12500A8
+#define ACP_SCRATCH_REG_43 0x12500AC
+#define ACP_SCRATCH_REG_44 0x12500B0
+#define ACP_SCRATCH_REG_45 0x12500B4
+#define ACP_SCRATCH_REG_46 0x12500B8
+#define ACP_SCRATCH_REG_47 0x12500BC
+#define ACP_SCRATCH_REG_48 0x12500C0
+#define ACP_SCRATCH_REG_49 0x12500C4
+#define ACP_SCRATCH_REG_50 0x12500C8
+#define ACP_SCRATCH_REG_51 0x12500CC
+#define ACP_SCRATCH_REG_52 0x12500D0
+#define ACP_SCRATCH_REG_53 0x12500D4
+#define ACP_SCRATCH_REG_54 0x12500D8
+#define ACP_SCRATCH_REG_55 0x12500DC
+#define ACP_SCRATCH_REG_56 0x12500E0
+#define ACP_SCRATCH_REG_57 0x12500E4
+#define ACP_SCRATCH_REG_58 0x12500E8
+#define ACP_SCRATCH_REG_59 0x12500EC
+#define ACP_SCRATCH_REG_60 0x12500F0
+#define ACP_SCRATCH_REG_61 0x12500F4
+#define ACP_SCRATCH_REG_62 0x12500F8
+#define ACP_SCRATCH_REG_63 0x12500FC
+#define ACP_SCRATCH_REG_64 0x1250100
+#define ACP_SCRATCH_REG_65 0x1250104
+#define ACP_SCRATCH_REG_66 0x1250108
+#define ACP_SCRATCH_REG_67 0x125010C
+#define ACP_SCRATCH_REG_68 0x1250110
+#define ACP_SCRATCH_REG_69 0x1250114
+#define ACP_SCRATCH_REG_70 0x1250118
+#define ACP_SCRATCH_REG_71 0x125011C
+#define ACP_SCRATCH_REG_72 0x1250120
+#define ACP_SCRATCH_REG_73 0x1250124
+#define ACP_SCRATCH_REG_74 0x1250128
+#define ACP_SCRATCH_REG_75 0x125012C
+#define ACP_SCRATCH_REG_76 0x1250130
+#define ACP_SCRATCH_REG_77 0x1250134
+#define ACP_SCRATCH_REG_78 0x1250138
+#define ACP_SCRATCH_REG_79 0x125013C
+#define ACP_SCRATCH_REG_80 0x1250140
+#define ACP_SCRATCH_REG_81 0x1250144
+#define ACP_SCRATCH_REG_82 0x1250148
+#define ACP_SCRATCH_REG_83 0x125014C
+#define ACP_SCRATCH_REG_84 0x1250150
+#define ACP_SCRATCH_REG_85 0x1250154
+#define ACP_SCRATCH_REG_86 0x1250158
+#define ACP_SCRATCH_REG_87 0x125015C
+#define ACP_SCRATCH_REG_88 0x1250160
+#define ACP_SCRATCH_REG_89 0x1250164
+#define ACP_SCRATCH_REG_90 0x1250168
+#define ACP_SCRATCH_REG_91 0x125016C
+#define ACP_SCRATCH_REG_92 0x1250170
+#define ACP_SCRATCH_REG_93 0x1250174
+#define ACP_SCRATCH_REG_94 0x1250178
+#define ACP_SCRATCH_REG_95 0x125017C
+#define ACP_SCRATCH_REG_96 0x1250180
+#define ACP_SCRATCH_REG_97 0x1250184
+#define ACP_SCRATCH_REG_98 0x1250188
+#define ACP_SCRATCH_REG_99 0x125018C
+#define ACP_SCRATCH_REG_100 0x1250190
+#define ACP_SCRATCH_REG_101 0x1250194
+#define ACP_SCRATCH_REG_102 0x1250198
+#define ACP_SCRATCH_REG_103 0x125019C
+#define ACP_SCRATCH_REG_104 0x12501A0
+#define ACP_SCRATCH_REG_105 0x12501A4
+#define ACP_SCRATCH_REG_106 0x12501A8
+#define ACP_SCRATCH_REG_107 0x12501AC
+#define ACP_SCRATCH_REG_108 0x12501B0
+#define ACP_SCRATCH_REG_109 0x12501B4
+#define ACP_SCRATCH_REG_110 0x12501B8
+#define ACP_SCRATCH_REG_111 0x12501BC
+#define ACP_SCRATCH_REG_112 0x12501C0
+#define ACP_SCRATCH_REG_113 0x12501C4
+#define ACP_SCRATCH_REG_114 0x12501C8
+#define ACP_SCRATCH_REG_115 0x12501CC
+#define ACP_SCRATCH_REG_116 0x12501D0
+#define ACP_SCRATCH_REG_117 0x12501D4
+#define ACP_SCRATCH_REG_118 0x12501D8
+#define ACP_SCRATCH_REG_119 0x12501DC
+#define ACP_SCRATCH_REG_120 0x12501E0
+#define ACP_SCRATCH_REG_121 0x12501E4
+#define ACP_SCRATCH_REG_122 0x12501E8
+#define ACP_SCRATCH_REG_123 0x12501EC
+#define ACP_SCRATCH_REG_124 0x12501F0
+#define ACP_SCRATCH_REG_125 0x12501F4
+#define ACP_SCRATCH_REG_126 0x12501F8
+#define ACP_SCRATCH_REG_127 0x12501FC
+#define ACP_SCRATCH_REG_128 0x1250200
+
+// Registers from ACP_AUDIO_BUFFERS block
+
+#define ACP_I2S_RX_RINGBUFADDR 0x1242000
+#define ACP_I2S_RX_RINGBUFSIZE 0x1242004
+#define ACP_I2S_RX_LINKPOSITIONCNTR 0x1242008
+#define ACP_I2S_RX_FIFOADDR 0x124200C
+#define ACP_I2S_RX_FIFOSIZE 0x1242010
+#define ACP_I2S_RX_DMA_SIZE 0x1242014
+#define ACP_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x1242018
+#define ACP_I2S_RX_LINEARPOSITIONCNTR_LOW 0x124201C
+#define ACP_I2S_RX_INTR_WATERMARK_SIZE 0x1242020
+#define ACP_I2S_TX_RINGBUFADDR 0x1242024
+#define ACP_I2S_TX_RINGBUFSIZE 0x1242028
+#define ACP_I2S_TX_LINKPOSITIONCNTR 0x124202C
+#define ACP_I2S_TX_FIFOADDR 0x1242030
+#define ACP_I2S_TX_FIFOSIZE 0x1242034
+#define ACP_I2S_TX_DMA_SIZE 0x1242038
+#define ACP_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x124203C
+#define ACP_I2S_TX_LINEARPOSITIONCNTR_LOW 0x1242040
+#define ACP_I2S_TX_INTR_WATERMARK_SIZE 0x1242044
+#define ACP_BT_RX_RINGBUFADDR 0x1242048
+#define ACP_BT_RX_RINGBUFSIZE 0x124204C
+#define ACP_BT_RX_LINKPOSITIONCNTR 0x1242050
+#define ACP_BT_RX_FIFOADDR 0x1242054
+#define ACP_BT_RX_FIFOSIZE 0x1242058
+#define ACP_BT_RX_DMA_SIZE 0x124205C
+#define ACP_BT_RX_LINEARPOSITIONCNTR_HIGH 0x1242060
+#define ACP_BT_RX_LINEARPOSITIONCNTR_LOW 0x1242064
+#define ACP_BT_RX_INTR_WATERMARK_SIZE 0x1242068
+#define ACP_BT_TX_RINGBUFADDR 0x124206C
+#define ACP_BT_TX_RINGBUFSIZE 0x1242070
+#define ACP_BT_TX_LINKPOSITIONCNTR 0x1242074
+#define ACP_BT_TX_FIFOADDR 0x1242078
+#define ACP_BT_TX_FIFOSIZE 0x124207C
+#define ACP_BT_TX_DMA_SIZE 0x1242080
+#define ACP_BT_TX_LINEARPOSITIONCNTR_HIGH 0x1242084
+#define ACP_BT_TX_LINEARPOSITIONCNTR_LOW 0x1242088
+#define ACP_BT_TX_INTR_WATERMARK_SIZE 0x124208C
+#define ACP_HS_RX_RINGBUFADDR 0x1242090
+#define ACP_HS_RX_RINGBUFSIZE 0x1242094
+#define ACP_HS_RX_LINKPOSITIONCNTR 0x1242098
+#define ACP_HS_RX_FIFOADDR 0x124209C
+#define ACP_HS_RX_FIFOSIZE 0x12420A0
+#define ACP_HS_RX_DMA_SIZE 0x12420A4
+#define ACP_HS_RX_LINEARPOSITIONCNTR_HIGH 0x12420A8
+#define ACP_HS_RX_LINEARPOSITIONCNTR_LOW 0x12420AC
+#define ACP_HS_RX_INTR_WATERMARK_SIZE 0x12420B0
+#define ACP_HS_TX_RINGBUFADDR 0x12420B4
+#define ACP_HS_TX_RINGBUFSIZE 0x12420B8
+#define ACP_HS_TX_LINKPOSITIONCNTR 0x12420BC
+#define ACP_HS_TX_FIFOADDR 0x12420C0
+#define ACP_HS_TX_FIFOSIZE 0x12420C4
+#define ACP_HS_TX_DMA_SIZE 0x12420C8
+#define ACP_HS_TX_LINEARPOSITIONCNTR_HIGH 0x12420CC
+#define ACP_HS_TX_LINEARPOSITIONCNTR_LOW 0x12420D0
+#define ACP_HS_TX_INTR_WATERMARK_SIZE 0x12420D4
+
+// Registers from ACP_I2S_TDM block
+
+#define ACP_I2STDM_IER 0x1242400
+#define ACP_I2STDM_IRER 0x1242404
+#define ACP_I2STDM_RXFRMT 0x1242408
+#define ACP_I2STDM_ITER 0x124240C
+#define ACP_I2STDM_TXFRMT 0x1242410
+
+// Registers from ACP_BT_TDM block
+
+#define ACP_BTTDM_IER 0x1242800
+#define ACP_BTTDM_IRER 0x1242804
+#define ACP_BTTDM_RXFRMT 0x1242808
+#define ACP_BTTDM_ITER 0x124280C
+#define ACP_BTTDM_TXFRMT 0x1242810
+
+// Registers from ACP_WOV block
+
+#define ACP_WOV_PDM_ENABLE 0x1242C04
+#define ACP_WOV_PDM_DMA_ENABLE 0x1242C08
+#define ACP_WOV_RX_RINGBUFADDR 0x1242C0C
+#define ACP_WOV_RX_RINGBUFSIZE 0x1242C10
+#define ACP_WOV_RX_LINKPOSITIONCNTR 0x1242C14
+#define ACP_WOV_RX_LINEARPOSITIONCNTR_HIGH 0x1242C18
+#define ACP_WOV_RX_LINEARPOSITIONCNTR_LOW 0x1242C1C
+#define ACP_WOV_RX_INTR_WATERMARK_SIZE 0x1242C20
+#define ACP_WOV_PDM_FIFO_FLUSH 0x1242C24
+#define ACP_WOV_PDM_NO_OF_CHANNELS 0x1242C28
+#define ACP_WOV_PDM_DECIMATION_FACTOR 0x1242C2C
+#define ACP_WOV_PDM_VAD_CTRL 0x1242C30
+#define ACP_WOV_BUFFER_STATUS 0x1242C58
+#define ACP_WOV_MISC_CTRL 0x1242C5C
+#define ACP_WOV_CLK_CTRL 0x1242C60
+#define ACP_PDM_VAD_DYNAMIC_CLK_GATING_EN 0x1242C64
+#define ACP_WOV_ERROR_STATUS_REGISTER 0x1242C68
+#endif
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 1073f468f21f..0f18dfb85bfe 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -765,7 +765,7 @@ static int atmel_ssc_suspend(struct snd_soc_component *component)
struct atmel_ssc_info *ssc_p;
struct platform_device *pdev = to_platform_device(component->dev);
- if (!component->active)
+ if (!snd_soc_component_active(component))
return 0;
ssc_p = &ssc_info[pdev->id];
@@ -793,7 +793,7 @@ static int atmel_ssc_resume(struct snd_soc_component *component)
struct platform_device *pdev = to_platform_device(component->dev);
u32 cr;
- if (!component->active)
+ if (!snd_soc_component_active(component))
return 0;
ssc_p = &ssc_info[pdev->id];
diff --git a/sound/soc/bcm/bcm2835-i2s.c b/sound/soc/bcm/bcm2835-i2s.c
index e6a12e271b07..d80b570e950e 100644
--- a/sound/soc/bcm/bcm2835-i2s.c
+++ b/sound/soc/bcm/bcm2835-i2s.c
@@ -653,7 +653,7 @@ static void bcm2835_i2s_stop(struct bcm2835_i2s_dev *dev,
BCM2835_I2S_CS_A_REG, mask, 0);
/* Stop also the clock when not SND_SOC_DAIFMT_CONT */
- if (!dai->active && !(dev->fmt & SND_SOC_DAIFMT_CONT))
+ if (!snd_soc_dai_active(dai) && !(dev->fmt & SND_SOC_DAIFMT_CONT))
bcm2835_i2s_stop_clock(dev);
}
@@ -695,7 +695,7 @@ static int bcm2835_i2s_startup(struct snd_pcm_substream *substream,
{
struct bcm2835_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
- if (dai->active)
+ if (snd_soc_dai_active(dai))
return 0;
/* Should this still be running stop it */
@@ -723,7 +723,7 @@ static void bcm2835_i2s_shutdown(struct snd_pcm_substream *substream,
bcm2835_i2s_stop(dev, substream, dai);
/* If both streams are stopped, disable module and clock */
- if (dai->active)
+ if (snd_soc_dai_active(dai))
return;
/* Disable the module */
diff --git a/sound/soc/bcm/cygnus-ssp.c b/sound/soc/bcm/cygnus-ssp.c
index 257f5048061e..6e634b448293 100644
--- a/sound/soc/bcm/cygnus-ssp.c
+++ b/sound/soc/bcm/cygnus-ssp.c
@@ -1056,7 +1056,7 @@ static int __cygnus_ssp_suspend(struct snd_soc_dai *cpu_dai)
{
struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
- if (!cpu_dai->active)
+ if (!snd_soc_dai_active(cpu_dai))
return 0;
if (!aio->is_slave) {
@@ -1097,7 +1097,7 @@ static int __cygnus_ssp_resume(struct snd_soc_dai *cpu_dai)
struct cygnus_aio_port *aio = cygnus_dai_get_portinfo(cpu_dai);
int error;
- if (!cpu_dai->active)
+ if (!snd_soc_dai_active(cpu_dai))
return 0;
if (!aio->is_slave) {
diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
index 723f4cf19467..371708b17c09 100644
--- a/sound/soc/cirrus/ep93xx-i2s.c
+++ b/sound/soc/cirrus/ep93xx-i2s.c
@@ -368,7 +368,7 @@ static int ep93xx_i2s_suspend(struct snd_soc_component *component)
{
struct ep93xx_i2s_info *info = snd_soc_component_get_drvdata(component);
- if (!component->active)
+ if (!snd_soc_component_active(component))
return 0;
ep93xx_i2s_disable(info, SNDRV_PCM_STREAM_PLAYBACK);
@@ -381,7 +381,7 @@ static int ep93xx_i2s_resume(struct snd_soc_component *component)
{
struct ep93xx_i2s_info *info = snd_soc_component_get_drvdata(component);
- if (!component->active)
+ if (!snd_soc_component_active(component))
return 0;
ep93xx_i2s_enable(info, SNDRV_PCM_STREAM_PLAYBACK);
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index e60e0b6a689c..986a6308818b 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -116,6 +116,7 @@ config SND_SOC_ALL_CODECS
imply SND_SOC_MAX98926
imply SND_SOC_MAX98927
imply SND_SOC_MAX98373
+ imply SND_SOC_MAX98390
imply SND_SOC_MAX9850
imply SND_SOC_MAX9860
imply SND_SOC_MAX9759
@@ -167,7 +168,7 @@ config SND_SOC_ALL_CODECS
imply SND_SOC_RT5668
imply SND_SOC_RT5670
imply SND_SOC_RT5677
- imply SND_SOC_RT5682
+ imply SND_SOC_RT5682_I2C
imply SND_SOC_RT5682_SDW
imply SND_SOC_RT700_SDW
imply SND_SOC_RT711_SDW
@@ -272,6 +273,7 @@ config SND_SOC_ALL_CODECS
imply SND_SOC_WM9712
imply SND_SOC_WM9713
imply SND_SOC_WSA881X
+ imply SND_SOC_ZL38060
help
Normally ASoC codec drivers are only built if a machine driver which
uses them is also built since they are only usable with a machine
@@ -537,8 +539,7 @@ config SND_SOC_CQ0093VC
config SND_SOC_CROS_EC_CODEC
tristate "codec driver for ChromeOS EC"
depends on CROS_EC
- select CRYPTO
- select CRYPTO_SHA256
+ select CRYPTO_LIB_SHA256
help
If you say yes here you will get support for the
ChromeOS Embedded Controller's Audio Codec.
@@ -681,6 +682,7 @@ config SND_SOC_CX2072X
config SND_SOC_JZ4740_CODEC
depends on MIPS || COMPILE_TEST
+ depends on OF
select REGMAP_MMIO
tristate "Ingenic JZ4740 internal CODEC"
help
@@ -692,6 +694,7 @@ config SND_SOC_JZ4740_CODEC
config SND_SOC_JZ4725B_CODEC
depends on MIPS || COMPILE_TEST
+ depends on OF
select REGMAP
tristate "Ingenic JZ4725B internal CODEC"
help
@@ -703,6 +706,7 @@ config SND_SOC_JZ4725B_CODEC
config SND_SOC_JZ4770_CODEC
depends on MIPS || COMPILE_TEST
+ depends on OF
select REGMAP
tristate "Ingenic JZ4770 internal CODEC"
help
@@ -717,7 +721,7 @@ config SND_SOC_L3
config SND_SOC_DA7210
tristate
- depends on I2C
+ depends on SND_SOC_I2C_AND_SPI
config SND_SOC_DA7213
tristate "Dialog DA7213 CODEC"
@@ -867,6 +871,10 @@ config SND_SOC_MAX98373
tristate "Maxim Integrated MAX98373 Speaker Amplifier"
depends on I2C
+config SND_SOC_MAX98390
+ tristate "Maxim Integrated MAX98390 Speaker Amplifier"
+ depends on I2C
+
config SND_SOC_MAX9850
tristate
depends on I2C
@@ -1135,7 +1143,11 @@ config SND_SOC_RT5677_SPI
config SND_SOC_RT5682
tristate
- depends on I2C || SOUNDWIRE
+
+config SND_SOC_RT5682_I2C
+ tristate
+ depends on I2C
+ select SND_SOC_RT5682
config SND_SOC_RT5682_SDW
tristate "Realtek RT5682 Codec - SDW"
@@ -1569,7 +1581,7 @@ config SND_SOC_WM8978
config SND_SOC_WM8983
tristate
- depends on I2C
+ depends on SND_SOC_I2C_AND_SPI
config SND_SOC_WM8985
tristate "Wolfson Microelectronics WM8985 and WM8758 codec driver"
@@ -1620,19 +1632,19 @@ config SND_SOC_WM9090
config SND_SOC_WM9705
tristate
- depends on SND_SOC_AC97_BUS
+ depends on SND_SOC_AC97_BUS || AC97_BUS_NEW
select REGMAP_AC97
select AC97_BUS_COMPAT if AC97_BUS_NEW
config SND_SOC_WM9712
tristate
- depends on SND_SOC_AC97_BUS
+ depends on SND_SOC_AC97_BUS || AC97_BUS_NEW
select REGMAP_AC97
select AC97_BUS_COMPAT if AC97_BUS_NEW
config SND_SOC_WM9713
tristate
- depends on SND_SOC_AC97_BUS
+ depends on SND_SOC_AC97_BUS || AC97_BUS_NEW
select REGMAP_AC97
select AC97_BUS_COMPAT if AC97_BUS_NEW
@@ -1645,6 +1657,16 @@ config SND_SOC_WSA881X
This enables support for Qualcomm WSA8810/WSA8815 Class-D
Smart Speaker Amplifier.
+config SND_SOC_ZL38060
+ tristate "Microsemi ZL38060 Connected Home Audio Processor"
+ depends on SPI_MASTER
+ select GPIOLIB
+ select REGMAP
+ help
+ Support for ZL38060 Connected Home Audio Processor from Microsemi,
+ which consists of a Digital Signal Processor (DSP), several Digital
+ Audio Interfaces (DAIs), analog outputs, and a block of 14 GPIOs.
+
config SND_SOC_ZX_AUD96P22
tristate "ZTE ZX AUD96P22 CODEC"
depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 03533157cda6..47ae3cebb61e 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -115,6 +115,7 @@ snd-soc-max98925-objs := max98925.o
snd-soc-max98926-objs := max98926.o
snd-soc-max98927-objs := max98927.o
snd-soc-max98373-objs := max98373.o
+snd-soc-max98390-objs := max98390.o
snd-soc-max9850-objs := max9850.o
snd-soc-max9860-objs := max9860.o
snd-soc-mc13783-objs := mc13783.o
@@ -178,6 +179,7 @@ snd-soc-rt5677-objs := rt5677.o
snd-soc-rt5677-spi-objs := rt5677-spi.o
snd-soc-rt5682-objs := rt5682.o
snd-soc-rt5682-sdw-objs := rt5682-sdw.o
+snd-soc-rt5682-i2c-objs := rt5682-i2c.o
snd-soc-rt700-objs := rt700.o rt700-sdw.o
snd-soc-rt711-objs := rt711.o rt711-sdw.o
snd-soc-rt715-objs := rt715.o rt715-sdw.o
@@ -288,6 +290,7 @@ snd-soc-wm9712-objs := wm9712.o
snd-soc-wm9713-objs := wm9713.o
snd-soc-wm-hubs-objs := wm_hubs.o
snd-soc-wsa881x-objs := wsa881x.o
+snd-soc-zl38060-objs := zl38060.o
snd-soc-zx-aud96p22-objs := zx_aud96p22.o
# Amp
snd-soc-max9877-objs := max9877.o
@@ -415,6 +418,7 @@ obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o
obj-$(CONFIG_SND_SOC_MAX98926) += snd-soc-max98926.o
obj-$(CONFIG_SND_SOC_MAX98927) += snd-soc-max98927.o
obj-$(CONFIG_SND_SOC_MAX98373) += snd-soc-max98373.o
+obj-$(CONFIG_SND_SOC_MAX98390) += snd-soc-max98390.o
obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MAX9860) += snd-soc-max9860.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
@@ -478,6 +482,7 @@ obj-$(CONFIG_SND_SOC_RT5670) += snd-soc-rt5670.o
obj-$(CONFIG_SND_SOC_RT5677) += snd-soc-rt5677.o
obj-$(CONFIG_SND_SOC_RT5677_SPI) += snd-soc-rt5677-spi.o
obj-$(CONFIG_SND_SOC_RT5682) += snd-soc-rt5682.o
+obj-$(CONFIG_SND_SOC_RT5682_I2C) += snd-soc-rt5682-i2c.o
obj-$(CONFIG_SND_SOC_RT5682_SDW) += snd-soc-rt5682-sdw.o
obj-$(CONFIG_SND_SOC_RT700) += snd-soc-rt700.o
obj-$(CONFIG_SND_SOC_RT711) += snd-soc-rt711.o
@@ -588,6 +593,7 @@ obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o
obj-$(CONFIG_SND_SOC_WM_ADSP) += snd-soc-wm-adsp.o
obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
obj-$(CONFIG_SND_SOC_WSA881X) += snd-soc-wsa881x.o
+obj-$(CONFIG_SND_SOC_ZL38060) += snd-soc-zl38060.o
obj-$(CONFIG_SND_SOC_ZX_AUD96P22) += snd-soc-zx-aud96p22.o
# Amp
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index c4414c725c1f..43b1337bac37 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -2,7 +2,7 @@
/*
* ad1980.c -- ALSA Soc AD1980 codec support
*
- * Copyright: Analog Device Inc.
+ * Copyright: Analog Devices Inc.
* Author: Roy Huang <roy.huang@analog.com>
* Cliff Cai <cliff.cai@analog.com>
*/
diff --git a/sound/soc/codecs/ad73311.c b/sound/soc/codecs/ad73311.c
index 10daf61f0294..b98bf19f594e 100644
--- a/sound/soc/codecs/ad73311.c
+++ b/sound/soc/codecs/ad73311.c
@@ -2,7 +2,7 @@
/*
* ad73311.c -- ALSA Soc AD73311 codec support
*
- * Copyright: Analog Device Inc.
+ * Copyright: Analog Devices Inc.
* Author: Cliff Cai <cliff.cai@analog.com>
*/
diff --git a/sound/soc/codecs/adau7118-i2c.c b/sound/soc/codecs/adau7118-i2c.c
index a8211362fe82..aa7afb3b826d 100644
--- a/sound/soc/codecs/adau7118-i2c.c
+++ b/sound/soc/codecs/adau7118-i2c.c
@@ -32,6 +32,12 @@ static const struct reg_default adau7118_reg_defaults[] = {
{ ADAU7118_REG_RESET, 0x00 },
};
+static bool adau7118_volatile(struct device *dev, unsigned int reg)
+{
+ return (reg == ADAU7118_REG_RESET);
+}
+
+
static const struct regmap_config adau7118_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -39,6 +45,7 @@ static const struct regmap_config adau7118_regmap_config = {
.num_reg_defaults = ARRAY_SIZE(adau7118_reg_defaults),
.cache_type = REGCACHE_RBTREE,
.max_register = ADAU7118_REG_RESET,
+ .volatile_reg = adau7118_volatile,
};
static int adau7118_probe_i2c(struct i2c_client *i2c,
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index 7cea398ec392..c4b9722c3d8f 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -725,7 +725,7 @@ static int adav80x_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_component *component = dai->component;
struct adav80x *adav80x = snd_soc_component_get_drvdata(component);
- if (!snd_soc_component_is_active(component) || !adav80x->rate)
+ if (!snd_soc_component_active(component) || !adav80x->rate)
return 0;
return snd_pcm_hw_constraint_single(substream->runtime,
@@ -738,7 +738,7 @@ static void adav80x_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_component *component = dai->component;
struct adav80x *adav80x = snd_soc_component_get_drvdata(component);
- if (!snd_soc_component_is_active(component))
+ if (!snd_soc_component_active(component))
adav80x->rate = 0;
}
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 70341b30f567..9716c9624a89 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1926,7 +1926,7 @@ static int arizona_dai_set_sysclk(struct snd_soc_dai *dai,
if (clk_id == dai_priv->clk)
return 0;
- if (dai->active) {
+ if (snd_soc_dai_active(dai)) {
dev_err(component->dev, "Can't change clock on active DAI %d\n",
dai->id);
return -EBUSY;
diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c
index d3dc42aa6825..8d45c628e988 100644
--- a/sound/soc/codecs/cros_ec_codec.c
+++ b/sound/soc/codecs/cros_ec_codec.c
@@ -8,7 +8,6 @@
* EC for audio function.
*/
-#include <crypto/hash.h>
#include <crypto/sha.h>
#include <linux/acpi.h>
#include <linux/delay.h>
@@ -107,24 +106,11 @@ error:
static int calculate_sha256(struct cros_ec_codec_priv *priv,
uint8_t *buf, uint32_t size, uint8_t *digest)
{
- struct crypto_shash *tfm;
+ struct sha256_state sctx;
- tfm = crypto_alloc_shash("sha256", CRYPTO_ALG_TYPE_SHASH, 0);
- if (IS_ERR(tfm)) {
- dev_err(priv->dev, "can't alloc shash\n");
- return PTR_ERR(tfm);
- }
-
- {
- SHASH_DESC_ON_STACK(desc, tfm);
-
- desc->tfm = tfm;
-
- crypto_shash_digest(desc, buf, size, digest);
- shash_desc_zero(desc);
- }
-
- crypto_free_shash(tfm);
+ sha256_init(&sctx);
+ sha256_update(&sctx, buf, size);
+ sha256_final(&sctx, digest);
#ifdef DEBUG
{
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 62f412d6f9f2..d43762ae8f3d 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -356,9 +356,9 @@ static int cs4271_hw_params(struct snd_pcm_substream *substream,
*/
if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
- !dai->stream_active[SNDRV_PCM_STREAM_CAPTURE]) ||
+ !snd_soc_dai_stream_active(dai, SNDRV_PCM_STREAM_CAPTURE)) ||
(substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
- !dai->stream_active[SNDRV_PCM_STREAM_PLAYBACK])) {
+ !snd_soc_dai_stream_active(dai, SNDRV_PCM_STREAM_PLAYBACK))) {
ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
CS4271_MODE2_PDN,
CS4271_MODE2_PDN);
diff --git a/sound/soc/codecs/cs47l15.c b/sound/soc/codecs/cs47l15.c
index 8d1869bf7f9c..402c6b7c7014 100644
--- a/sound/soc/codecs/cs47l15.c
+++ b/sound/soc/codecs/cs47l15.c
@@ -1229,11 +1229,10 @@ static struct snd_soc_dai_driver cs47l15_dai[] = {
},
};
-static int cs47l15_open(struct snd_compr_stream *stream)
+static int cs47l15_open(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
{
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct cs47l15 *cs47l15 = snd_soc_component_get_drvdata(component);
struct madera_priv *priv = &cs47l15->core;
struct madera *madera = priv->madera;
@@ -1329,7 +1328,7 @@ static unsigned int cs47l15_digital_vu[] = {
MADERA_DAC_DIGITAL_VOLUME_5R,
};
-static const struct snd_compr_ops cs47l15_compr_ops = {
+static const struct snd_compress_ops cs47l15_compress_ops = {
.open = &cs47l15_open,
.free = &wm_adsp_compr_free,
.set_params = &wm_adsp_compr_set_params,
@@ -1345,7 +1344,7 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l15 = {
.set_sysclk = &madera_set_sysclk,
.set_pll = &cs47l15_set_fll,
.name = DRV_NAME,
- .compr_ops = &cs47l15_compr_ops,
+ .compress_ops = &cs47l15_compress_ops,
.controls = cs47l15_snd_controls,
.num_controls = ARRAY_SIZE(cs47l15_snd_controls),
.dapm_widgets = cs47l15_dapm_widgets,
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 6b0570f59630..f6d173d0120e 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1068,10 +1068,10 @@ static struct snd_soc_dai_driver cs47l24_dai[] = {
},
};
-static int cs47l24_open(struct snd_compr_stream *stream)
+static int cs47l24_open(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
{
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct cs47l24_priv *priv = snd_soc_component_get_drvdata(component);
struct arizona *arizona = priv->core.arizona;
int n_adsp;
@@ -1178,7 +1178,7 @@ static unsigned int cs47l24_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_4L,
};
-static struct snd_compr_ops cs47l24_compr_ops = {
+static struct snd_compress_ops cs47l24_compress_ops = {
.open = cs47l24_open,
.free = wm_adsp_compr_free,
.set_params = wm_adsp_compr_set_params,
@@ -1194,7 +1194,7 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l24 = {
.set_sysclk = arizona_set_sysclk,
.set_pll = cs47l24_set_fll,
.name = DRV_NAME,
- .compr_ops = &cs47l24_compr_ops,
+ .compress_ops = &cs47l24_compress_ops,
.controls = cs47l24_snd_controls,
.num_controls = ARRAY_SIZE(cs47l24_snd_controls),
.dapm_widgets = cs47l24_dapm_widgets,
diff --git a/sound/soc/codecs/cs47l35.c b/sound/soc/codecs/cs47l35.c
index 18839807c9d1..d7538d50bbd3 100644
--- a/sound/soc/codecs/cs47l35.c
+++ b/sound/soc/codecs/cs47l35.c
@@ -1504,11 +1504,10 @@ static struct snd_soc_dai_driver cs47l35_dai[] = {
},
};
-static int cs47l35_open(struct snd_compr_stream *stream)
+static int cs47l35_open(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
{
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct cs47l35 *cs47l35 = snd_soc_component_get_drvdata(component);
struct madera_priv *priv = &cs47l35->core;
struct madera *madera = priv->madera;
@@ -1622,7 +1621,7 @@ static unsigned int cs47l35_digital_vu[] = {
MADERA_DAC_DIGITAL_VOLUME_5R,
};
-static const struct snd_compr_ops cs47l35_compr_ops = {
+static const struct snd_compress_ops cs47l35_compress_ops = {
.open = &cs47l35_open,
.free = &wm_adsp_compr_free,
.set_params = &wm_adsp_compr_set_params,
@@ -1638,7 +1637,7 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l35 = {
.set_sysclk = &madera_set_sysclk,
.set_pll = &cs47l35_set_fll,
.name = DRV_NAME,
- .compr_ops = &cs47l35_compr_ops,
+ .compress_ops = &cs47l35_compress_ops,
.controls = cs47l35_snd_controls,
.num_controls = ARRAY_SIZE(cs47l35_snd_controls),
.dapm_widgets = cs47l35_dapm_widgets,
diff --git a/sound/soc/codecs/cs47l85.c b/sound/soc/codecs/cs47l85.c
index a575113207f0..9de991adad74 100644
--- a/sound/soc/codecs/cs47l85.c
+++ b/sound/soc/codecs/cs47l85.c
@@ -2447,11 +2447,10 @@ static struct snd_soc_dai_driver cs47l85_dai[] = {
},
};
-static int cs47l85_open(struct snd_compr_stream *stream)
+static int cs47l85_open(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
{
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct cs47l85 *cs47l85 = snd_soc_component_get_drvdata(component);
struct madera_priv *priv = &cs47l85->core;
struct madera *madera = priv->madera;
@@ -2566,7 +2565,7 @@ static const unsigned int cs47l85_digital_vu[] = {
MADERA_DAC_DIGITAL_VOLUME_6R,
};
-static const struct snd_compr_ops cs47l85_compr_ops = {
+static const struct snd_compress_ops cs47l85_compress_ops = {
.open = &cs47l85_open,
.free = &wm_adsp_compr_free,
.set_params = &wm_adsp_compr_set_params,
@@ -2582,7 +2581,7 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l85 = {
.set_sysclk = &madera_set_sysclk,
.set_pll = &cs47l85_set_fll,
.name = DRV_NAME,
- .compr_ops = &cs47l85_compr_ops,
+ .compress_ops = &cs47l85_compress_ops,
.controls = cs47l85_snd_controls,
.num_controls = ARRAY_SIZE(cs47l85_snd_controls),
.dapm_widgets = cs47l85_dapm_widgets,
diff --git a/sound/soc/codecs/cs47l90.c b/sound/soc/codecs/cs47l90.c
index 81a1311b14e6..2715b5da0415 100644
--- a/sound/soc/codecs/cs47l90.c
+++ b/sound/soc/codecs/cs47l90.c
@@ -2358,11 +2358,10 @@ static struct snd_soc_dai_driver cs47l90_dai[] = {
},
};
-static int cs47l90_open(struct snd_compr_stream *stream)
+static int cs47l90_open(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
{
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct cs47l90 *cs47l90 = snd_soc_component_get_drvdata(component);
struct madera_priv *priv = &cs47l90->core;
struct madera *madera = priv->madera;
@@ -2473,7 +2472,7 @@ static unsigned int cs47l90_digital_vu[] = {
MADERA_DAC_DIGITAL_VOLUME_5R,
};
-static const struct snd_compr_ops cs47l90_compr_ops = {
+static const struct snd_compress_ops cs47l90_compress_ops = {
.open = &cs47l90_open,
.free = &wm_adsp_compr_free,
.set_params = &wm_adsp_compr_set_params,
@@ -2489,7 +2488,7 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l90 = {
.set_sysclk = &madera_set_sysclk,
.set_pll = &cs47l90_set_fll,
.name = DRV_NAME,
- .compr_ops = &cs47l90_compr_ops,
+ .compress_ops = &cs47l90_compress_ops,
.controls = cs47l90_snd_controls,
.num_controls = ARRAY_SIZE(cs47l90_snd_controls),
.dapm_widgets = cs47l90_dapm_widgets,
diff --git a/sound/soc/codecs/cs47l92.c b/sound/soc/codecs/cs47l92.c
index 15fc213d178d..108d28007185 100644
--- a/sound/soc/codecs/cs47l92.c
+++ b/sound/soc/codecs/cs47l92.c
@@ -1830,11 +1830,10 @@ static struct snd_soc_dai_driver cs47l92_dai[] = {
},
};
-static int cs47l92_open(struct snd_compr_stream *stream)
+static int cs47l92_open(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
{
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct cs47l92 *cs47l92 = snd_soc_component_get_drvdata(component);
struct madera_priv *priv = &cs47l92->core;
struct madera *madera = priv->madera;
@@ -1933,7 +1932,7 @@ static unsigned int cs47l92_digital_vu[] = {
MADERA_DAC_DIGITAL_VOLUME_5R,
};
-static const struct snd_compr_ops cs47l92_compr_ops = {
+static const struct snd_compress_ops cs47l92_compress_ops = {
.open = &cs47l92_open,
.free = &wm_adsp_compr_free,
.set_params = &wm_adsp_compr_set_params,
@@ -1949,7 +1948,7 @@ static const struct snd_soc_component_driver soc_component_dev_cs47l92 = {
.set_sysclk = &madera_set_sysclk,
.set_pll = &cs47l92_set_fll,
.name = DRV_NAME,
- .compr_ops = &cs47l92_compr_ops,
+ .compress_ops = &cs47l92_compress_ops,
.controls = cs47l92_snd_controls,
.num_controls = ARRAY_SIZE(cs47l92_snd_controls),
.dapm_widgets = cs47l92_dapm_widgets,
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 925a03996db4..3e6ad996741b 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
+#include <linux/pm_runtime.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
@@ -807,6 +808,11 @@ static int da7213_dai_event(struct snd_soc_dapm_widget *w,
static const struct snd_soc_dapm_widget da7213_dapm_widgets[] = {
/*
+ * Power Supply
+ */
+ SND_SOC_DAPM_REGULATOR_SUPPLY("VDDMIC", 0, 0),
+
+ /*
* Input & Output
*/
@@ -932,6 +938,9 @@ static const struct snd_soc_dapm_route da7213_audio_map[] = {
/* Dest Connecting Widget source */
/* Input path */
+ {"Mic Bias 1", NULL, "VDDMIC"},
+ {"Mic Bias 2", NULL, "VDDMIC"},
+
{"MIC1", NULL, "Mic Bias 1"},
{"MIC2", NULL, "Mic Bias 2"},
@@ -1334,10 +1343,10 @@ static int da7213_mute(struct snd_soc_dai *dai, int mute)
#define DA7213_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
-static int da7213_set_dai_sysclk(struct snd_soc_dai *codec_dai,
- int clk_id, unsigned int freq, int dir)
+static int da7213_set_component_sysclk(struct snd_soc_component *component,
+ int clk_id, int source,
+ unsigned int freq, int dir)
{
- struct snd_soc_component *component = codec_dai->component;
struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component);
int ret = 0;
@@ -1345,7 +1354,7 @@ static int da7213_set_dai_sysclk(struct snd_soc_dai *codec_dai,
return 0;
if (((freq < 5000000) && (freq != 32768)) || (freq > 54000000)) {
- dev_err(codec_dai->dev, "Unsupported MCLK value %d\n",
+ dev_err(component->dev, "Unsupported MCLK value %d\n",
freq);
return -EINVAL;
}
@@ -1361,7 +1370,7 @@ static int da7213_set_dai_sysclk(struct snd_soc_dai *codec_dai,
DA7213_PLL_MCLK_SQR_EN);
break;
default:
- dev_err(codec_dai->dev, "Unknown clock source %d\n", clk_id);
+ dev_err(component->dev, "Unknown clock source %d\n", clk_id);
return -EINVAL;
}
@@ -1371,7 +1380,7 @@ static int da7213_set_dai_sysclk(struct snd_soc_dai *codec_dai,
freq = clk_round_rate(da7213->mclk, freq);
ret = clk_set_rate(da7213->mclk, freq);
if (ret) {
- dev_err(codec_dai->dev, "Failed to set clock rate %d\n",
+ dev_err(component->dev, "Failed to set clock rate %d\n",
freq);
return ret;
}
@@ -1383,10 +1392,10 @@ static int da7213_set_dai_sysclk(struct snd_soc_dai *codec_dai,
}
/* Supported PLL input frequencies are 32KHz, 5MHz - 54MHz. */
-static int da7213_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
- int source, unsigned int fref, unsigned int fout)
+static int da7213_set_component_pll(struct snd_soc_component *component,
+ int pll_id, int source,
+ unsigned int fref, unsigned int fout)
{
- struct snd_soc_component *component = codec_dai->component;
struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component);
u8 pll_ctrl, indiv_bits, indiv;
@@ -1498,8 +1507,6 @@ static int da7213_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
static const struct snd_soc_dai_ops da7213_dai_ops = {
.hw_params = da7213_hw_params,
.set_fmt = da7213_set_dai_fmt,
- .set_sysclk = da7213_set_dai_sysclk,
- .set_pll = da7213_set_dai_pll,
.digital_mute = da7213_mute,
};
@@ -1571,6 +1578,7 @@ static int da7213_set_bias_level(struct snd_soc_component *component,
#if defined(CONFIG_OF)
/* DT */
static const struct of_device_id da7213_of_match[] = {
+ { .compatible = "dlg,da7212", },
{ .compatible = "dlg,da7213", },
{ }
};
@@ -1690,6 +1698,8 @@ static int da7213_probe(struct snd_soc_component *component)
{
struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component);
+ pm_runtime_get_sync(component->dev);
+
/* Default to using ALC auto offset calibration mode. */
snd_soc_component_update_bits(component, DA7213_ALC_CTRL1,
DA7213_ALC_CALIB_MODE_MAN, 0);
@@ -1810,6 +1820,8 @@ static int da7213_probe(struct snd_soc_component *component)
DA7213_DMIC_CLK_RATE_MASK, dmic_cfg);
}
+ pm_runtime_put_sync(component->dev);
+
/* Check if MCLK provided */
da7213->mclk = devm_clk_get(component->dev, "mclk");
if (IS_ERR(da7213->mclk)) {
@@ -1831,6 +1843,8 @@ static const struct snd_soc_component_driver soc_component_dev_da7213 = {
.num_dapm_widgets = ARRAY_SIZE(da7213_dapm_widgets),
.dapm_routes = da7213_audio_map,
.num_dapm_routes = ARRAY_SIZE(da7213_audio_map),
+ .set_sysclk = da7213_set_component_sysclk,
+ .set_pll = da7213_set_component_pll,
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
@@ -1847,11 +1861,22 @@ static const struct regmap_config da7213_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
+static void da7213_power_off(void *data)
+{
+ struct da7213_priv *da7213 = data;
+ regulator_bulk_disable(DA7213_NUM_SUPPLIES, da7213->supplies);
+}
+
+static const char *da7213_supply_names[DA7213_NUM_SUPPLIES] = {
+ [DA7213_SUPPLY_VDDA] = "VDDA",
+ [DA7213_SUPPLY_VDDIO] = "VDDIO",
+};
+
static int da7213_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct da7213_priv *da7213;
- int ret;
+ int i, ret;
da7213 = devm_kzalloc(&i2c->dev, sizeof(*da7213), GFP_KERNEL);
if (!da7213)
@@ -1859,6 +1884,25 @@ static int da7213_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, da7213);
+ /* Get required supplies */
+ for (i = 0; i < DA7213_NUM_SUPPLIES; ++i)
+ da7213->supplies[i].supply = da7213_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&i2c->dev, DA7213_NUM_SUPPLIES,
+ da7213->supplies);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to get supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(DA7213_NUM_SUPPLIES, da7213->supplies);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&i2c->dev, da7213_power_off, da7213);
+ if (ret < 0)
+ return ret;
+
da7213->regmap = devm_regmap_init_i2c(i2c, &da7213_regmap_config);
if (IS_ERR(da7213->regmap)) {
ret = PTR_ERR(da7213->regmap);
@@ -1866,6 +1910,11 @@ static int da7213_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ pm_runtime_set_autosuspend_delay(&i2c->dev, 100);
+ pm_runtime_use_autosuspend(&i2c->dev);
+ pm_runtime_set_active(&i2c->dev);
+ pm_runtime_enable(&i2c->dev);
+
ret = devm_snd_soc_register_component(&i2c->dev,
&soc_component_dev_da7213, &da7213_dai, 1);
if (ret < 0) {
@@ -1875,6 +1924,34 @@ static int da7213_i2c_probe(struct i2c_client *i2c,
return ret;
}
+static int __maybe_unused da7213_runtime_suspend(struct device *dev)
+{
+ struct da7213_priv *da7213 = dev_get_drvdata(dev);
+
+ regcache_cache_only(da7213->regmap, true);
+ regcache_mark_dirty(da7213->regmap);
+ regulator_bulk_disable(DA7213_NUM_SUPPLIES, da7213->supplies);
+
+ return 0;
+}
+
+static int __maybe_unused da7213_runtime_resume(struct device *dev)
+{
+ struct da7213_priv *da7213 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_bulk_enable(DA7213_NUM_SUPPLIES, da7213->supplies);
+ if (ret < 0)
+ return ret;
+ regcache_cache_only(da7213->regmap, false);
+ regcache_sync(da7213->regmap);
+ return 0;
+}
+
+static const struct dev_pm_ops da7213_pm = {
+ SET_RUNTIME_PM_OPS(da7213_runtime_suspend, da7213_runtime_resume, NULL)
+};
+
static const struct i2c_device_id da7213_i2c_id[] = {
{ "da7213", 0 },
{ }
@@ -1887,6 +1964,7 @@ static struct i2c_driver da7213_i2c_driver = {
.name = "da7213",
.of_match_table = of_match_ptr(da7213_of_match),
.acpi_match_table = ACPI_PTR(da7213_acpi_match),
+ .pm = &da7213_pm,
},
.probe = da7213_i2c_probe,
.id_table = da7213_i2c_id,
diff --git a/sound/soc/codecs/da7213.h b/sound/soc/codecs/da7213.h
index 3250a3821fcc..3890829dfb6e 100644
--- a/sound/soc/codecs/da7213.h
+++ b/sound/soc/codecs/da7213.h
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <sound/da7213.h>
/*
@@ -521,9 +522,17 @@ enum da7213_sys_clk {
DA7213_SYSCLK_PLL_32KHZ
};
+/* Regulators */
+enum da7213_supplies {
+ DA7213_SUPPLY_VDDA = 0,
+ DA7213_SUPPLY_VDDIO,
+ DA7213_NUM_SUPPLIES,
+};
+
/* Codec private data */
struct da7213_priv {
struct regmap *regmap;
+ struct regulator_bulk_data supplies[DA7213_NUM_SUPPLIES];
struct clk *mclk;
unsigned int mclk_rate;
int clk_src;
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index f5560a49b9e5..5d079d90fd3b 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -59,14 +59,14 @@ static int dmic_aif_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_POST_PMU:
if (dmic->gpio_en)
- gpiod_set_value(dmic->gpio_en, 1);
+ gpiod_set_value_cansleep(dmic->gpio_en, 1);
if (dmic->wakeup_delay)
msleep(dmic->wakeup_delay);
break;
case SND_SOC_DAPM_POST_PMD:
if (dmic->gpio_en)
- gpiod_set_value(dmic->gpio_en, 0);
+ gpiod_set_value_cansleep(dmic->gpio_en, 0);
break;
}
diff --git a/sound/soc/codecs/hdac_hda.h b/sound/soc/codecs/hdac_hda.h
index 598b07d9b6fe..d0efc5e254ae 100644
--- a/sound/soc/codecs/hdac_hda.h
+++ b/sound/soc/codecs/hdac_hda.h
@@ -28,10 +28,6 @@ struct hdac_hda_priv {
bool need_display_power;
};
-#define hdac_to_hda_priv(_hdac) \
- container_of(_hdac, struct hdac_hda_priv, codec.core)
-#define hdac_to_hda_codec(_hdac) container_of(_hdac, struct hda_codec, core)
-
struct hdac_ext_bus_ops *snd_soc_hdac_hda_get_ops(void);
#endif /* __HDAC_HDA_H__ */
diff --git a/sound/soc/codecs/jz4725b.c b/sound/soc/codecs/jz4725b.c
index 2567a5d15b55..e49374c72e70 100644
--- a/sound/soc/codecs/jz4725b.c
+++ b/sound/soc/codecs/jz4725b.c
@@ -574,19 +574,17 @@ static int jz4725b_codec_probe(struct platform_device *pdev)
return ret;
}
-#ifdef CONFIG_OF
static const struct of_device_id jz4725b_codec_of_matches[] = {
{ .compatible = "ingenic,jz4725b-codec", },
{ }
};
MODULE_DEVICE_TABLE(of, jz4725b_codec_of_matches);
-#endif
static struct platform_driver jz4725b_codec_driver = {
.probe = jz4725b_codec_probe,
.driver = {
.name = "jz4725b-codec",
- .of_match_table = of_match_ptr(jz4725b_codec_of_matches),
+ .of_match_table = jz4725b_codec_of_matches,
},
};
module_platform_driver(jz4725b_codec_driver);
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index 460aa1fd1efe..c9900d1cd5c2 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -344,19 +344,17 @@ static int jz4740_codec_probe(struct platform_device *pdev)
return ret;
}
-#ifdef CONFIG_OF
static const struct of_device_id jz4740_codec_of_matches[] = {
{ .compatible = "ingenic,jz4740-codec", },
{ }
};
MODULE_DEVICE_TABLE(of, jz4740_codec_of_matches);
-#endif
static struct platform_driver jz4740_codec_driver = {
.probe = jz4740_codec_probe,
.driver = {
.name = "jz4740-codec",
- .of_match_table = of_match_ptr(jz4740_codec_of_matches),
+ .of_match_table = jz4740_codec_of_matches,
},
};
diff --git a/sound/soc/codecs/jz4770.c b/sound/soc/codecs/jz4770.c
index e7cf2c107607..34775aa62402 100644
--- a/sound/soc/codecs/jz4770.c
+++ b/sound/soc/codecs/jz4770.c
@@ -937,7 +937,7 @@ static struct platform_driver jz4770_codec_driver = {
.probe = jz4770_codec_probe,
.driver = {
.name = "jz4770-codec",
- .of_match_table = of_match_ptr(jz4770_codec_of_matches),
+ .of_match_table = jz4770_codec_of_matches,
},
};
module_platform_driver(jz4770_codec_driver);
diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c
index a448d2a2918a..ec380b0b2d4e 100644
--- a/sound/soc/codecs/madera.c
+++ b/sound/soc/codecs/madera.c
@@ -3279,7 +3279,7 @@ static int madera_dai_set_sysclk(struct snd_soc_dai *dai,
if (is_sync == madera_is_syncclk(dai_priv->clk))
return 0;
- if (dai->active) {
+ if (snd_soc_dai_active(dai)) {
dev_err(component->dev, "Can't change clock on active DAI %d\n",
dai->id);
return -EBUSY;
diff --git a/sound/soc/codecs/max9768.c b/sound/soc/codecs/max9768.c
index d0737db5868a..39dda1b03b3d 100644
--- a/sound/soc/codecs/max9768.c
+++ b/sound/soc/codecs/max9768.c
@@ -220,6 +220,6 @@ static struct i2c_driver max9768_i2c_driver = {
};
module_i2c_driver(max9768_i2c_driver);
-MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("ASoC MAX9768 amplifier driver");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 032adc14562d..e2cc1ad8cb0a 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -2039,7 +2039,7 @@ static int max98090_dai_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if (!max98090->master && dai->active == 1)
+ if (!max98090->master && snd_soc_dai_active(dai) == 1)
queue_delayed_work(system_power_efficient_wq,
&max98090->pll_det_enable_work,
msecs_to_jiffies(10));
@@ -2047,7 +2047,7 @@ static int max98090_dai_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (!max98090->master && dai->active == 1)
+ if (!max98090->master && snd_soc_dai_active(dai) == 1)
schedule_work(&max98090->pll_det_disable_work);
break;
default:
@@ -2109,7 +2109,7 @@ static void max98090_pll_work(struct max98090_priv *max98090)
unsigned int pll;
int i;
- if (!snd_soc_component_is_active(component))
+ if (!snd_soc_component_active(component))
return;
dev_info_ratelimited(component->dev, "PLL unlocked\n");
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index cae1def8902d..96718e3a1ad0 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -850,8 +850,8 @@ static int max98373_resume(struct device *dev)
{
struct max98373_priv *max98373 = dev_get_drvdata(dev);
- max98373_reset(max98373, dev);
regcache_cache_only(max98373->regmap, false);
+ max98373_reset(max98373, dev);
regcache_sync(max98373->regmap);
return 0;
}
diff --git a/sound/soc/codecs/max98390.c b/sound/soc/codecs/max98390.c
new file mode 100644
index 000000000000..0d63ebfbff2f
--- /dev/null
+++ b/sound/soc/codecs/max98390.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * max98390.c -- MAX98390 ALSA Soc Audio driver
+ *
+ * Copyright (C) 2020 Maxim Integrated Products
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/cdev.h>
+#include <linux/dmi.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "max98390.h"
+
+static struct reg_default max98390_reg_defaults[] = {
+ {MAX98390_INT_EN1, 0xf0},
+ {MAX98390_INT_EN2, 0x00},
+ {MAX98390_INT_EN3, 0x00},
+ {MAX98390_INT_FLAG_CLR1, 0x00},
+ {MAX98390_INT_FLAG_CLR2, 0x00},
+ {MAX98390_INT_FLAG_CLR3, 0x00},
+ {MAX98390_IRQ_CTRL, 0x01},
+ {MAX98390_CLK_MON, 0x6d},
+ {MAX98390_DAT_MON, 0x03},
+ {MAX98390_WDOG_CTRL, 0x00},
+ {MAX98390_WDOG_RST, 0x00},
+ {MAX98390_MEAS_ADC_THERM_WARN_THRESH, 0x75},
+ {MAX98390_MEAS_ADC_THERM_SHDN_THRESH, 0x8c},
+ {MAX98390_MEAS_ADC_THERM_HYSTERESIS, 0x08},
+ {MAX98390_PIN_CFG, 0x55},
+ {MAX98390_PCM_RX_EN_A, 0x00},
+ {MAX98390_PCM_RX_EN_B, 0x00},
+ {MAX98390_PCM_TX_EN_A, 0x00},
+ {MAX98390_PCM_TX_EN_B, 0x00},
+ {MAX98390_PCM_TX_HIZ_CTRL_A, 0xff},
+ {MAX98390_PCM_TX_HIZ_CTRL_B, 0xff},
+ {MAX98390_PCM_CH_SRC_1, 0x00},
+ {MAX98390_PCM_CH_SRC_2, 0x00},
+ {MAX98390_PCM_CH_SRC_3, 0x00},
+ {MAX98390_PCM_MODE_CFG, 0xc0},
+ {MAX98390_PCM_MASTER_MODE, 0x1c},
+ {MAX98390_PCM_CLK_SETUP, 0x44},
+ {MAX98390_PCM_SR_SETUP, 0x08},
+ {MAX98390_ICC_RX_EN_A, 0x00},
+ {MAX98390_ICC_RX_EN_B, 0x00},
+ {MAX98390_ICC_TX_EN_A, 0x00},
+ {MAX98390_ICC_TX_EN_B, 0x00},
+ {MAX98390_ICC_HIZ_MANUAL_MODE, 0x00},
+ {MAX98390_ICC_TX_HIZ_EN_A, 0x00},
+ {MAX98390_ICC_TX_HIZ_EN_B, 0x00},
+ {MAX98390_ICC_LNK_EN, 0x00},
+ {MAX98390_R2039_AMP_DSP_CFG, 0x0f},
+ {MAX98390_R203A_AMP_EN, 0x81},
+ {MAX98390_TONE_GEN_DC_CFG, 0x00},
+ {MAX98390_SPK_SRC_SEL, 0x00},
+ {MAX98390_SSM_CFG, 0x85},
+ {MAX98390_MEAS_EN, 0x03},
+ {MAX98390_MEAS_DSP_CFG, 0x0f},
+ {MAX98390_BOOST_CTRL0, 0x1c},
+ {MAX98390_BOOST_CTRL3, 0x01},
+ {MAX98390_BOOST_CTRL1, 0x40},
+ {MAX98390_MEAS_ADC_CFG, 0x07},
+ {MAX98390_MEAS_ADC_BASE_MSB, 0x00},
+ {MAX98390_MEAS_ADC_BASE_LSB, 0x23},
+ {MAX98390_ADC_CH0_DIVIDE, 0x00},
+ {MAX98390_ADC_CH1_DIVIDE, 0x00},
+ {MAX98390_ADC_CH2_DIVIDE, 0x00},
+ {MAX98390_ADC_CH0_FILT_CFG, 0x00},
+ {MAX98390_ADC_CH1_FILT_CFG, 0x00},
+ {MAX98390_ADC_CH2_FILT_CFG, 0x00},
+ {MAX98390_PWR_GATE_CTL, 0x2c},
+ {MAX98390_BROWNOUT_EN, 0x00},
+ {MAX98390_BROWNOUT_INFINITE_HOLD, 0x00},
+ {MAX98390_BROWNOUT_INFINITE_HOLD_CLR, 0x00},
+ {MAX98390_BROWNOUT_LVL_HOLD, 0x00},
+ {MAX98390_BROWNOUT_LVL1_THRESH, 0x00},
+ {MAX98390_BROWNOUT_LVL2_THRESH, 0x00},
+ {MAX98390_BROWNOUT_LVL3_THRESH, 0x00},
+ {MAX98390_BROWNOUT_LVL4_THRESH, 0x00},
+ {MAX98390_BROWNOUT_THRESH_HYSTERYSIS, 0x00},
+ {MAX98390_BROWNOUT_AMP_LIMITER_ATK_REL, 0x1f},
+ {MAX98390_BROWNOUT_AMP_GAIN_ATK_REL, 0x00},
+ {MAX98390_BROWNOUT_AMP1_CLIP_MODE, 0x00},
+ {MAX98390_BROWNOUT_LVL1_CUR_LIMIT, 0x00},
+ {MAX98390_BROWNOUT_LVL1_AMP1_CTRL1, 0x00},
+ {MAX98390_BROWNOUT_LVL1_AMP1_CTRL2, 0x00},
+ {MAX98390_BROWNOUT_LVL1_AMP1_CTRL3, 0x00},
+ {MAX98390_BROWNOUT_LVL2_CUR_LIMIT, 0x00},
+ {MAX98390_BROWNOUT_LVL2_AMP1_CTRL1, 0x00},
+ {MAX98390_BROWNOUT_LVL2_AMP1_CTRL2, 0x00},
+ {MAX98390_BROWNOUT_LVL2_AMP1_CTRL3, 0x00},
+ {MAX98390_BROWNOUT_LVL3_CUR_LIMIT, 0x00},
+ {MAX98390_BROWNOUT_LVL3_AMP1_CTRL1, 0x00},
+ {MAX98390_BROWNOUT_LVL3_AMP1_CTRL2, 0x00},
+ {MAX98390_BROWNOUT_LVL3_AMP1_CTRL3, 0x00},
+ {MAX98390_BROWNOUT_LVL4_CUR_LIMIT, 0x00},
+ {MAX98390_BROWNOUT_LVL4_AMP1_CTRL1, 0x00},
+ {MAX98390_BROWNOUT_LVL4_AMP1_CTRL2, 0x00},
+ {MAX98390_BROWNOUT_LVL4_AMP1_CTRL3, 0x00},
+ {MAX98390_BROWNOUT_ILIM_HLD, 0x00},
+ {MAX98390_BROWNOUT_LIM_HLD, 0x00},
+ {MAX98390_BROWNOUT_CLIP_HLD, 0x00},
+ {MAX98390_BROWNOUT_GAIN_HLD, 0x00},
+ {MAX98390_ENV_TRACK_VOUT_HEADROOM, 0x0f},
+ {MAX98390_ENV_TRACK_BOOST_VOUT_DELAY, 0x80},
+ {MAX98390_ENV_TRACK_REL_RATE, 0x07},
+ {MAX98390_ENV_TRACK_HOLD_RATE, 0x07},
+ {MAX98390_ENV_TRACK_CTRL, 0x01},
+ {MAX98390_BOOST_BYPASS1, 0x49},
+ {MAX98390_BOOST_BYPASS2, 0x2b},
+ {MAX98390_BOOST_BYPASS3, 0x08},
+ {MAX98390_FET_SCALING1, 0x00},
+ {MAX98390_FET_SCALING2, 0x03},
+ {MAX98390_FET_SCALING3, 0x00},
+ {MAX98390_FET_SCALING4, 0x07},
+ {MAX98390_SPK_SPEEDUP, 0x00},
+ {DSMIG_WB_DRC_RELEASE_TIME_1, 0x00},
+ {DSMIG_WB_DRC_RELEASE_TIME_2, 0x00},
+ {DSMIG_WB_DRC_ATTACK_TIME_1, 0x00},
+ {DSMIG_WB_DRC_ATTACK_TIME_2, 0x00},
+ {DSMIG_WB_DRC_COMPRESSION_RATIO, 0x00},
+ {DSMIG_WB_DRC_COMPRESSION_THRESHOLD, 0x00},
+ {DSMIG_WB_DRC_MAKEUPGAIN, 0x00},
+ {DSMIG_WB_DRC_NOISE_GATE_THRESHOLD, 0x00},
+ {DSMIG_WBDRC_HPF_ENABLE, 0x00},
+ {DSMIG_WB_DRC_TEST_SMOOTHER_OUT_EN, 0x00},
+ {DSMIG_PPR_THRESHOLD, 0x00},
+ {DSM_STEREO_BASS_CHANNEL_SELECT, 0x00},
+ {DSM_TPROT_THRESHOLD_BYTE0, 0x00},
+ {DSM_TPROT_THRESHOLD_BYTE1, 0x00},
+ {DSM_TPROT_ROOM_TEMPERATURE_BYTE0, 0x00},
+ {DSM_TPROT_ROOM_TEMPERATURE_BYTE1, 0x00},
+ {DSM_TPROT_RECIP_RDC_ROOM_BYTE0, 0x00},
+ {DSM_TPROT_RECIP_RDC_ROOM_BYTE1, 0x00},
+ {DSM_TPROT_RECIP_RDC_ROOM_BYTE2, 0x00},
+ {DSM_TPROT_RECIP_TCONST_BYTE0, 0x00},
+ {DSM_TPROT_RECIP_TCONST_BYTE1, 0x00},
+ {DSM_TPROT_RECIP_TCONST_BYTE2, 0x00},
+ {DSM_THERMAL_ATTENUATION_SETTINGS, 0x00},
+ {DSM_THERMAL_PILOT_TONE_ATTENUATION, 0x00},
+ {DSM_TPROT_PG_TEMP_THRESH_BYTE0, 0x00},
+ {DSM_TPROT_PG_TEMP_THRESH_BYTE1, 0x00},
+ {DSMIG_DEBUZZER_THRESHOLD, 0x00},
+ {DSMIG_DEBUZZER_ALPHA_COEF_TEST_ONLY, 0x08},
+ {DSM_VOL_ENA, 0x20},
+ {DSM_VOL_CTRL, 0xa0},
+ {DSMIG_EN, 0x00},
+ {MAX98390_R23E1_DSP_GLOBAL_EN, 0x00},
+ {MAX98390_R23FF_GLOBAL_EN, 0x00},
+};
+
+static int max98390_dsm_calibrate(struct snd_soc_component *component);
+
+static int max98390_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = codec_dai->component;
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+ unsigned int mode;
+ unsigned int format;
+ unsigned int invert = 0;
+
+ dev_dbg(component->dev, "%s: fmt 0x%08X\n", __func__, fmt);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ mode = MAX98390_PCM_MASTER_MODE_SLAVE;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ max98390->master = true;
+ mode = MAX98390_PCM_MASTER_MODE_MASTER;
+ break;
+ default:
+ dev_err(component->dev, "DAI clock mode unsupported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98390->regmap,
+ MAX98390_PCM_MASTER_MODE,
+ MAX98390_PCM_MASTER_MODE_MASK,
+ mode);
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ invert = MAX98390_PCM_MODE_CFG_PCM_BCLKEDGE;
+ break;
+ default:
+ dev_err(component->dev, "DAI invert mode unsupported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98390->regmap,
+ MAX98390_PCM_MODE_CFG,
+ MAX98390_PCM_MODE_CFG_PCM_BCLKEDGE,
+ invert);
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ format = MAX98390_PCM_FORMAT_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ format = MAX98390_PCM_FORMAT_LJ;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ format = MAX98390_PCM_FORMAT_TDM_MODE1;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ format = MAX98390_PCM_FORMAT_TDM_MODE0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98390->regmap,
+ MAX98390_PCM_MODE_CFG,
+ MAX98390_PCM_MODE_CFG_FORMAT_MASK,
+ format << MAX98390_PCM_MODE_CFG_FORMAT_SHIFT);
+
+ return 0;
+}
+
+static int max98390_get_bclk_sel(int bclk)
+{
+ int i;
+ /* BCLKs per LRCLK */
+ static int bclk_sel_table[] = {
+ 32, 48, 64, 96, 128, 192, 256, 320, 384, 512,
+ };
+ /* match BCLKs per LRCLK */
+ for (i = 0; i < ARRAY_SIZE(bclk_sel_table); i++) {
+ if (bclk_sel_table[i] == bclk)
+ return i + 2;
+ }
+ return 0;
+}
+
+static int max98390_set_clock(struct snd_soc_component *component,
+ struct snd_pcm_hw_params *params)
+{
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+ /* codec MCLK rate in master mode */
+ static int rate_table[] = {
+ 5644800, 6000000, 6144000, 6500000,
+ 9600000, 11289600, 12000000, 12288000,
+ 13000000, 19200000,
+ };
+ /* BCLK/LRCLK ratio calculation */
+ int blr_clk_ratio = params_channels(params)
+ * snd_pcm_format_width(params_format(params));
+ int value;
+
+ if (max98390->master) {
+ int i;
+ /* match rate to closest value */
+ for (i = 0; i < ARRAY_SIZE(rate_table); i++) {
+ if (rate_table[i] >= max98390->sysclk)
+ break;
+ }
+ if (i == ARRAY_SIZE(rate_table)) {
+ dev_err(component->dev, "failed to find proper clock rate.\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98390->regmap,
+ MAX98390_PCM_MASTER_MODE,
+ MAX98390_PCM_MASTER_MODE_MCLK_MASK,
+ i << MAX98390_PCM_MASTER_MODE_MCLK_RATE_SHIFT);
+ }
+
+ if (!max98390->tdm_mode) {
+ /* BCLK configuration */
+ value = max98390_get_bclk_sel(blr_clk_ratio);
+ if (!value) {
+ dev_err(component->dev, "format unsupported %d\n",
+ params_format(params));
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98390->regmap,
+ MAX98390_PCM_CLK_SETUP,
+ MAX98390_PCM_CLK_SETUP_BSEL_MASK,
+ value);
+ }
+ return 0;
+}
+
+static int max98390_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component =
+ dai->component;
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+
+ unsigned int sampling_rate;
+ unsigned int chan_sz;
+
+ /* pcm mode configuration */
+ switch (snd_pcm_format_width(params_format(params))) {
+ case 16:
+ chan_sz = MAX98390_PCM_MODE_CFG_CHANSZ_16;
+ break;
+ case 24:
+ chan_sz = MAX98390_PCM_MODE_CFG_CHANSZ_24;
+ break;
+ case 32:
+ chan_sz = MAX98390_PCM_MODE_CFG_CHANSZ_32;
+ break;
+ default:
+ dev_err(component->dev, "format unsupported %d\n",
+ params_format(params));
+ goto err;
+ }
+
+ regmap_update_bits(max98390->regmap,
+ MAX98390_PCM_MODE_CFG,
+ MAX98390_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+ dev_dbg(component->dev, "format supported %d",
+ params_format(params));
+
+ /* sampling rate configuration */
+ switch (params_rate(params)) {
+ case 8000:
+ sampling_rate = MAX98390_PCM_SR_SET1_SR_8000;
+ break;
+ case 11025:
+ sampling_rate = MAX98390_PCM_SR_SET1_SR_11025;
+ break;
+ case 12000:
+ sampling_rate = MAX98390_PCM_SR_SET1_SR_12000;
+ break;
+ case 16000:
+ sampling_rate = MAX98390_PCM_SR_SET1_SR_16000;
+ break;
+ case 22050:
+ sampling_rate = MAX98390_PCM_SR_SET1_SR_22050;
+ break;
+ case 24000:
+ sampling_rate = MAX98390_PCM_SR_SET1_SR_24000;
+ break;
+ case 32000:
+ sampling_rate = MAX98390_PCM_SR_SET1_SR_32000;
+ break;
+ case 44100:
+ sampling_rate = MAX98390_PCM_SR_SET1_SR_44100;
+ break;
+ case 48000:
+ sampling_rate = MAX98390_PCM_SR_SET1_SR_48000;
+ break;
+ default:
+ dev_err(component->dev, "rate %d not supported\n",
+ params_rate(params));
+ goto err;
+ }
+
+ /* set DAI_SR to correct LRCLK frequency */
+ regmap_update_bits(max98390->regmap,
+ MAX98390_PCM_SR_SETUP,
+ MAX98390_PCM_SR_SET1_SR_MASK,
+ sampling_rate);
+
+ return max98390_set_clock(component, params);
+err:
+ return -EINVAL;
+}
+
+static int max98390_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_component *component = dai->component;
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+
+ int bsel;
+ unsigned int chan_sz;
+
+ if (!tx_mask && !rx_mask && !slots && !slot_width)
+ max98390->tdm_mode = false;
+ else
+ max98390->tdm_mode = true;
+
+ dev_dbg(component->dev,
+ "Tdm mode : %d\n", max98390->tdm_mode);
+
+ /* BCLK configuration */
+ bsel = max98390_get_bclk_sel(slots * slot_width);
+ if (!bsel) {
+ dev_err(component->dev, "BCLK %d not supported\n",
+ slots * slot_width);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98390->regmap,
+ MAX98390_PCM_CLK_SETUP,
+ MAX98390_PCM_CLK_SETUP_BSEL_MASK,
+ bsel);
+
+ /* Channel size configuration */
+ switch (slot_width) {
+ case 16:
+ chan_sz = MAX98390_PCM_MODE_CFG_CHANSZ_16;
+ break;
+ case 24:
+ chan_sz = MAX98390_PCM_MODE_CFG_CHANSZ_24;
+ break;
+ case 32:
+ chan_sz = MAX98390_PCM_MODE_CFG_CHANSZ_32;
+ break;
+ default:
+ dev_err(component->dev, "format unsupported %d\n",
+ slot_width);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98390->regmap,
+ MAX98390_PCM_MODE_CFG,
+ MAX98390_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+ /* Rx slot configuration */
+ regmap_write(max98390->regmap,
+ MAX98390_PCM_RX_EN_A,
+ rx_mask & 0xFF);
+ regmap_write(max98390->regmap,
+ MAX98390_PCM_RX_EN_B,
+ (rx_mask & 0xFF00) >> 8);
+
+ /* Tx slot Hi-Z configuration */
+ regmap_write(max98390->regmap,
+ MAX98390_PCM_TX_HIZ_CTRL_A,
+ ~tx_mask & 0xFF);
+ regmap_write(max98390->regmap,
+ MAX98390_PCM_TX_HIZ_CTRL_B,
+ (~tx_mask & 0xFF00) >> 8);
+
+ return 0;
+}
+
+static int max98390_dai_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_component *component = dai->component;
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+
+ max98390->sysclk = freq;
+ return 0;
+}
+
+static const struct snd_soc_dai_ops max98390_dai_ops = {
+ .set_sysclk = max98390_dai_set_sysclk,
+ .set_fmt = max98390_dai_set_fmt,
+ .hw_params = max98390_dai_hw_params,
+ .set_tdm_slot = max98390_dai_tdm_slot,
+};
+
+static int max98390_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ regmap_update_bits(max98390->regmap,
+ MAX98390_R203A_AMP_EN,
+ MAX98390_AMP_EN_MASK, 1);
+ regmap_update_bits(max98390->regmap,
+ MAX98390_R23FF_GLOBAL_EN,
+ MAX98390_GLOBAL_EN_MASK, 1);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ regmap_update_bits(max98390->regmap,
+ MAX98390_R23FF_GLOBAL_EN,
+ MAX98390_GLOBAL_EN_MASK, 0);
+ regmap_update_bits(max98390->regmap,
+ MAX98390_R203A_AMP_EN,
+ MAX98390_AMP_EN_MASK, 0);
+ break;
+ }
+ return 0;
+}
+
+static const char * const max98390_switch_text[] = {
+ "Left", "Right", "LeftRight"};
+
+static const char * const max98390_boost_voltage_text[] = {
+ "6.5V", "6.625V", "6.75V", "6.875V", "7V", "7.125V", "7.25V", "7.375V",
+ "7.5V", "7.625V", "7.75V", "7.875V", "8V", "8.125V", "8.25V", "8.375V",
+ "8.5V", "8.625V", "8.75V", "8.875V", "9V", "9.125V", "9.25V", "9.375V",
+ "9.5V", "9.625V", "9.75V", "9.875V", "10V"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98390_boost_voltage,
+ MAX98390_BOOST_CTRL0, 0,
+ max98390_boost_voltage_text);
+
+static DECLARE_TLV_DB_SCALE(max98390_spk_tlv, 300, 300, 0);
+static DECLARE_TLV_DB_SCALE(max98390_digital_tlv, -8000, 50, 0);
+
+static const char * const max98390_current_limit_text[] = {
+ "0.00A", "0.50A", "1.00A", "1.05A", "1.10A", "1.15A", "1.20A", "1.25A",
+ "1.30A", "1.35A", "1.40A", "1.45A", "1.50A", "1.55A", "1.60A", "1.65A",
+ "1.70A", "1.75A", "1.80A", "1.85A", "1.90A", "1.95A", "2.00A", "2.05A",
+ "2.10A", "2.15A", "2.20A", "2.25A", "2.30A", "2.35A", "2.40A", "2.45A",
+ "2.50A", "2.55A", "2.60A", "2.65A", "2.70A", "2.75A", "2.80A", "2.85A",
+ "2.90A", "2.95A", "3.00A", "3.05A", "3.10A", "3.15A", "3.20A", "3.25A",
+ "3.30A", "3.35A", "3.40A", "3.45A", "3.50A", "3.55A", "3.60A", "3.65A",
+ "3.70A", "3.75A", "3.80A", "3.85A", "3.90A", "3.95A", "4.00A", "4.05A",
+ "4.10A"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98390_current_limit,
+ MAX98390_BOOST_CTRL1, 0,
+ max98390_current_limit_text);
+
+static int max98390_ref_rdc_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+
+ max98390->ref_rdc_value = ucontrol->value.integer.value[0];
+
+ regmap_write(max98390->regmap, DSM_TPROT_RECIP_RDC_ROOM_BYTE0,
+ max98390->ref_rdc_value & 0x000000ff);
+ regmap_write(max98390->regmap, DSM_TPROT_RECIP_RDC_ROOM_BYTE1,
+ (max98390->ref_rdc_value >> 8) & 0x000000ff);
+ regmap_write(max98390->regmap, DSM_TPROT_RECIP_RDC_ROOM_BYTE2,
+ (max98390->ref_rdc_value >> 16) & 0x000000ff);
+
+ return 0;
+}
+
+static int max98390_ref_rdc_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.integer.value[0] = max98390->ref_rdc_value;
+
+ return 0;
+}
+
+static int max98390_ambient_temp_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+
+ max98390->ambient_temp_value = ucontrol->value.integer.value[0];
+
+ regmap_write(max98390->regmap, DSM_TPROT_ROOM_TEMPERATURE_BYTE1,
+ (max98390->ambient_temp_value >> 8) & 0x000000ff);
+ regmap_write(max98390->regmap, DSM_TPROT_ROOM_TEMPERATURE_BYTE0,
+ (max98390->ambient_temp_value) & 0x000000ff);
+
+ return 0;
+}
+
+static int max98390_ambient_temp_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.integer.value[0] = max98390->ambient_temp_value;
+
+ return 0;
+}
+
+static int max98390_adaptive_rdc_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+
+ dev_warn(component->dev, "Put adaptive rdc not supported\n");
+
+ return 0;
+}
+
+static int max98390_adaptive_rdc_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int rdc, rdc0;
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+
+ regmap_read(max98390->regmap, THERMAL_RDC_RD_BACK_BYTE1, &rdc);
+ regmap_read(max98390->regmap, THERMAL_RDC_RD_BACK_BYTE0, &rdc0);
+ ucontrol->value.integer.value[0] = rdc0 | rdc << 8;
+
+ return 0;
+}
+
+static int max98390_dsm_calib_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ /* Do nothing */
+ return 0;
+}
+
+static int max98390_dsm_calib_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+
+ max98390_dsm_calibrate(component);
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new max98390_snd_controls[] = {
+ SOC_SINGLE_TLV("Digital Volume", DSM_VOL_CTRL,
+ 0, 184, 0,
+ max98390_digital_tlv),
+ SOC_SINGLE_TLV("Speaker Volume", MAX98390_R203D_SPK_GAIN,
+ 0, 6, 0,
+ max98390_spk_tlv),
+ SOC_SINGLE("Ramp Up Bypass Switch", MAX98390_R2039_AMP_DSP_CFG,
+ MAX98390_AMP_DSP_CFG_RMP_UP_SHIFT, 1, 0),
+ SOC_SINGLE("Ramp Down Bypass Switch", MAX98390_R2039_AMP_DSP_CFG,
+ MAX98390_AMP_DSP_CFG_RMP_DN_SHIFT, 1, 0),
+ SOC_SINGLE("Boost Clock Phase", MAX98390_BOOST_CTRL3,
+ MAX98390_BOOST_CLK_PHASE_CFG_SHIFT, 3, 0),
+ SOC_ENUM("Boost Output Voltage", max98390_boost_voltage),
+ SOC_ENUM("Current Limit", max98390_current_limit),
+ SOC_SINGLE_EXT("DSM Rdc", SND_SOC_NOPM, 0, 0xffffff, 0,
+ max98390_ref_rdc_get, max98390_ref_rdc_put),
+ SOC_SINGLE_EXT("DSM Ambient Temp", SND_SOC_NOPM, 0, 0xffff, 0,
+ max98390_ambient_temp_get, max98390_ambient_temp_put),
+ SOC_SINGLE_EXT("DSM Adaptive Rdc", SND_SOC_NOPM, 0, 0xffff, 0,
+ max98390_adaptive_rdc_get, max98390_adaptive_rdc_put),
+ SOC_SINGLE_EXT("DSM Calibration", SND_SOC_NOPM, 0, 1, 0,
+ max98390_dsm_calib_get, max98390_dsm_calib_put),
+};
+
+static const struct soc_enum dai_sel_enum =
+ SOC_ENUM_SINGLE(MAX98390_PCM_CH_SRC_1,
+ MAX98390_PCM_RX_CH_SRC_SHIFT,
+ 3, max98390_switch_text);
+
+static const struct snd_kcontrol_new max98390_dai_controls =
+ SOC_DAPM_ENUM("DAI Sel", dai_sel_enum);
+
+static const struct snd_soc_dapm_widget max98390_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC_E("Amp Enable", "HiFi Playback",
+ MAX98390_R203A_AMP_EN, 0, 0, max98390_dac_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX("DAI Sel Mux", SND_SOC_NOPM, 0, 0,
+ &max98390_dai_controls),
+ SND_SOC_DAPM_OUTPUT("BE_OUT"),
+};
+
+static const struct snd_soc_dapm_route max98390_audio_map[] = {
+ /* Plabyack */
+ {"DAI Sel Mux", "Left", "Amp Enable"},
+ {"DAI Sel Mux", "Right", "Amp Enable"},
+ {"DAI Sel Mux", "LeftRight", "Amp Enable"},
+ {"BE_OUT", NULL, "DAI Sel Mux"},
+};
+
+static bool max98390_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98390_SOFTWARE_RESET ... MAX98390_INT_EN3:
+ case MAX98390_IRQ_CTRL ... MAX98390_WDOG_CTRL:
+ case MAX98390_MEAS_ADC_THERM_WARN_THRESH
+ ... MAX98390_BROWNOUT_INFINITE_HOLD:
+ case MAX98390_BROWNOUT_LVL_HOLD ... THERMAL_COILTEMP_RD_BACK_BYTE0:
+ case DSMIG_DEBUZZER_THRESHOLD ... MAX98390_R24FF_REV_ID:
+ return true;
+ default:
+ return false;
+ }
+};
+
+static bool max98390_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98390_SOFTWARE_RESET ... MAX98390_INT_EN3:
+ case MAX98390_MEAS_ADC_CH0_READ ... MAX98390_MEAS_ADC_CH2_READ:
+ case MAX98390_PWR_GATE_STATUS ... MAX98390_BROWNOUT_STATUS:
+ case MAX98390_BROWNOUT_LOWEST_STATUS:
+ case MAX98390_ENV_TRACK_BOOST_VOUT_READ:
+ case DSM_STBASS_HPF_B0_BYTE0 ... DSM_DEBUZZER_ATTACK_TIME_BYTE2:
+ case THERMAL_RDC_RD_BACK_BYTE1 ... THERMAL_COILTEMP_RD_BACK_BYTE0:
+ case DSM_THERMAL_GAIN ... DSM_WBDRC_GAIN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#define MAX98390_RATES SNDRV_PCM_RATE_8000_48000
+
+#define MAX98390_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver max98390_dai[] = {
+ {
+ .name = "max98390-aif1",
+ .playback = {
+ .stream_name = "HiFi Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98390_RATES,
+ .formats = MAX98390_FORMATS,
+ },
+ .capture = {
+ .stream_name = "HiFi Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98390_RATES,
+ .formats = MAX98390_FORMATS,
+ },
+ .ops = &max98390_dai_ops,
+ }
+};
+
+static int max98390_dsm_init(struct snd_soc_component *component)
+{
+ int ret;
+ int param_size, param_start_addr;
+ char filename[128];
+ const char *vendor, *product;
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+ const struct firmware *fw;
+ char *dsm_param;
+
+ vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+
+ if (vendor && product) {
+ snprintf(filename, sizeof(filename), "dsm_param_%s_%s.bin",
+ vendor, product);
+ } else {
+ sprintf(filename, "dsm_param.bin");
+ }
+ ret = request_firmware(&fw, filename, component->dev);
+ if (ret) {
+ ret = request_firmware(&fw, "dsm_param.bin", component->dev);
+ if (ret)
+ goto err;
+ }
+
+ dev_dbg(component->dev,
+ "max98390: param fw size %zd\n",
+ fw->size);
+ if (fw->size < MAX98390_DSM_PARAM_MIN_SIZE) {
+ dev_err(component->dev,
+ "param fw is invalid.\n");
+ goto err_alloc;
+ }
+ dsm_param = (char *)fw->data;
+ param_start_addr = (dsm_param[0] & 0xff) | (dsm_param[1] & 0xff) << 8;
+ param_size = (dsm_param[2] & 0xff) | (dsm_param[3] & 0xff) << 8;
+ if (param_size > MAX98390_DSM_PARAM_MAX_SIZE ||
+ param_start_addr < DSM_STBASS_HPF_B0_BYTE0 ||
+ fw->size < param_size + MAX98390_DSM_PAYLOAD_OFFSET) {
+ dev_err(component->dev,
+ "param fw is invalid.\n");
+ goto err_alloc;
+ }
+ regmap_write(max98390->regmap, MAX98390_R203A_AMP_EN, 0x80);
+ dsm_param += MAX98390_DSM_PAYLOAD_OFFSET;
+ regmap_bulk_write(max98390->regmap, param_start_addr,
+ dsm_param, param_size);
+ regmap_write(max98390->regmap, MAX98390_R23E1_DSP_GLOBAL_EN, 0x01);
+
+err_alloc:
+ release_firmware(fw);
+err:
+ return ret;
+}
+
+static int max98390_dsm_calibrate(struct snd_soc_component *component)
+{
+ unsigned int rdc, rdc_cal_result, temp;
+ unsigned int rdc_integer, rdc_factor;
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+
+ regmap_write(max98390->regmap, MAX98390_R203A_AMP_EN, 0x81);
+ regmap_write(max98390->regmap, MAX98390_R23FF_GLOBAL_EN, 0x01);
+
+ regmap_read(max98390->regmap,
+ THERMAL_RDC_RD_BACK_BYTE1, &rdc);
+ regmap_read(max98390->regmap,
+ THERMAL_RDC_RD_BACK_BYTE0, &rdc_cal_result);
+ rdc_cal_result |= (rdc << 8) & 0x0000FFFF;
+ if (rdc_cal_result)
+ max98390->ref_rdc_value = 268435456U / rdc_cal_result;
+
+ regmap_read(max98390->regmap, MAX98390_MEAS_ADC_CH2_READ, &temp);
+ max98390->ambient_temp_value = temp * 52 - 1188;
+
+ rdc_integer = rdc_cal_result * 937 / 65536;
+ rdc_factor = ((rdc_cal_result * 937 * 100) / 65536)
+ - (rdc_integer * 100);
+
+ dev_info(component->dev, "rdc resistance about %d.%02d ohm, reg=0x%X temp reg=0x%X\n",
+ rdc_integer, rdc_factor, rdc_cal_result, temp);
+
+ regmap_write(max98390->regmap, MAX98390_R23FF_GLOBAL_EN, 0x00);
+ regmap_write(max98390->regmap, MAX98390_R203A_AMP_EN, 0x80);
+
+ return 0;
+}
+
+static int max98390_probe(struct snd_soc_component *component)
+{
+ struct max98390_priv *max98390 =
+ snd_soc_component_get_drvdata(component);
+
+ regmap_write(max98390->regmap, MAX98390_SOFTWARE_RESET, 0x01);
+ /* Sleep reset settle time */
+ msleep(20);
+ /* Update dsm bin param */
+ max98390_dsm_init(component);
+
+ /* Amp Setting */
+ regmap_write(max98390->regmap, MAX98390_CLK_MON, 0x6f);
+ regmap_write(max98390->regmap, MAX98390_PCM_RX_EN_A, 0x03);
+ regmap_write(max98390->regmap, MAX98390_PWR_GATE_CTL, 0x2d);
+ regmap_write(max98390->regmap, MAX98390_ENV_TRACK_VOUT_HEADROOM, 0x0e);
+ regmap_write(max98390->regmap, MAX98390_BOOST_BYPASS1, 0x46);
+ regmap_write(max98390->regmap, MAX98390_FET_SCALING3, 0x03);
+
+ /* Dsm Setting */
+ regmap_write(max98390->regmap, DSM_VOL_CTRL, 0x94);
+ regmap_write(max98390->regmap, DSMIG_EN, 0x19);
+ regmap_write(max98390->regmap, MAX98390_R203A_AMP_EN, 0x80);
+ if (max98390->ref_rdc_value) {
+ regmap_write(max98390->regmap, DSM_TPROT_RECIP_RDC_ROOM_BYTE0,
+ max98390->ref_rdc_value & 0x000000ff);
+ regmap_write(max98390->regmap, DSM_TPROT_RECIP_RDC_ROOM_BYTE1,
+ (max98390->ref_rdc_value >> 8) & 0x000000ff);
+ regmap_write(max98390->regmap, DSM_TPROT_RECIP_RDC_ROOM_BYTE2,
+ (max98390->ref_rdc_value >> 16) & 0x000000ff);
+ }
+ if (max98390->ambient_temp_value) {
+ regmap_write(max98390->regmap, DSM_TPROT_ROOM_TEMPERATURE_BYTE1,
+ (max98390->ambient_temp_value >> 8) & 0x000000ff);
+ regmap_write(max98390->regmap, DSM_TPROT_ROOM_TEMPERATURE_BYTE0,
+ (max98390->ambient_temp_value) & 0x000000ff);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max98390_suspend(struct device *dev)
+{
+ struct max98390_priv *max98390 = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s:Enter\n", __func__);
+
+ regcache_cache_only(max98390->regmap, true);
+ regcache_mark_dirty(max98390->regmap);
+
+ return 0;
+}
+
+static int max98390_resume(struct device *dev)
+{
+ struct max98390_priv *max98390 = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s:Enter\n", __func__);
+
+ regcache_cache_only(max98390->regmap, false);
+ regcache_sync(max98390->regmap);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops max98390_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(max98390_suspend, max98390_resume)
+};
+
+static const struct snd_soc_component_driver soc_codec_dev_max98390 = {
+ .probe = max98390_probe,
+ .controls = max98390_snd_controls,
+ .num_controls = ARRAY_SIZE(max98390_snd_controls),
+ .dapm_widgets = max98390_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98390_dapm_widgets),
+ .dapm_routes = max98390_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max98390_audio_map),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct regmap_config max98390_regmap = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = MAX98390_R24FF_REV_ID,
+ .reg_defaults = max98390_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(max98390_reg_defaults),
+ .readable_reg = max98390_readable_register,
+ .volatile_reg = max98390_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id max98390_dt_ids[] = {
+ { .compatible = "maxim,max98390", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max98390_dt_ids);
+#endif
+
+static int max98390_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ int reg = 0;
+
+ struct max98390_priv *max98390 = NULL;
+ struct i2c_adapter *adapter = to_i2c_adapter(i2c->dev.parent);
+
+ ret = i2c_check_functionality(adapter,
+ I2C_FUNC_SMBUS_BYTE
+ | I2C_FUNC_SMBUS_BYTE_DATA);
+ if (!ret) {
+ dev_err(&i2c->dev, "I2C check functionality failed\n");
+ return -ENXIO;
+ }
+
+ max98390 = devm_kzalloc(&i2c->dev, sizeof(*max98390), GFP_KERNEL);
+ if (!max98390) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ i2c_set_clientdata(i2c, max98390);
+
+ ret = device_property_read_u32(&i2c->dev, "maxim,temperature_calib",
+ &max98390->ambient_temp_value);
+ if (ret) {
+ dev_info(&i2c->dev,
+ "no optional property 'temperature_calib' found, default:\n");
+ }
+ ret = device_property_read_u32(&i2c->dev, "maxim,r0_calib",
+ &max98390->ref_rdc_value);
+ if (ret) {
+ dev_info(&i2c->dev,
+ "no optional property 'r0_calib' found, default:\n");
+ }
+
+ dev_info(&i2c->dev,
+ "%s: r0_calib: 0x%x,temperature_calib: 0x%x",
+ __func__, max98390->ref_rdc_value,
+ max98390->ambient_temp_value);
+
+ /* regmap initialization */
+ max98390->regmap = devm_regmap_init_i2c(i2c, &max98390_regmap);
+ if (IS_ERR(max98390->regmap)) {
+ ret = PTR_ERR(max98390->regmap);
+ dev_err(&i2c->dev,
+ "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
+ /* Check Revision ID */
+ ret = regmap_read(max98390->regmap,
+ MAX98390_R24FF_REV_ID, &reg);
+ if (ret) {
+ dev_err(&i2c->dev,
+ "ret=%d, Failed to read: 0x%02X\n",
+ ret, MAX98390_R24FF_REV_ID);
+ return ret;
+ }
+ dev_info(&i2c->dev, "MAX98390 revisionID: 0x%02X\n", reg);
+
+ ret = devm_snd_soc_register_component(&i2c->dev,
+ &soc_codec_dev_max98390,
+ max98390_dai, ARRAY_SIZE(max98390_dai));
+
+ return ret;
+}
+
+static const struct i2c_device_id max98390_i2c_id[] = {
+ { "max98390", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, max98390_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id max98390_of_match[] = {
+ { .compatible = "maxim,max98390", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, max98390_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id max98390_acpi_match[] = {
+ { "MX98390", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, max98390_acpi_match);
+#endif
+
+static struct i2c_driver max98390_i2c_driver = {
+ .driver = {
+ .name = "max98390",
+ .of_match_table = of_match_ptr(max98390_of_match),
+ .acpi_match_table = ACPI_PTR(max98390_acpi_match),
+ .pm = &max98390_pm,
+ },
+ .probe = max98390_i2c_probe,
+ .id_table = max98390_i2c_id,
+};
+
+module_i2c_driver(max98390_i2c_driver)
+
+MODULE_DESCRIPTION("ALSA SoC MAX98390 driver");
+MODULE_AUTHOR("Steve Lee <steves.lee@maximintegrated.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98390.h b/sound/soc/codecs/max98390.h
new file mode 100644
index 000000000000..5f444e7779b0
--- /dev/null
+++ b/sound/soc/codecs/max98390.h
@@ -0,0 +1,664 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, Maxim Integrated.
+ */
+
+#ifndef _MAX98390_H
+#define _MAX98390_H
+
+/* MAX98390 Register Address */
+#define MAX98390_SOFTWARE_RESET 0x2000
+#define MAX98390_INT_RAW1 0x2002
+#define MAX98390_INT_RAW2 0x2003
+#define MAX98390_INT_RAW3 0x2004
+#define MAX98390_INT_STATE1 0x2005
+#define MAX98390_INT_STATE2 0x2006
+#define MAX98390_INT_STATE3 0x2007
+#define MAX98390_INT_FLAG1 0x2008
+#define MAX98390_INT_FLAG2 0x2009
+#define MAX98390_INT_FLAG3 0x200a
+#define MAX98390_INT_EN1 0x200b
+#define MAX98390_INT_EN2 0x200c
+#define MAX98390_INT_EN3 0x200d
+#define MAX98390_INT_FLAG_CLR1 0x200e
+#define MAX98390_INT_FLAG_CLR2 0x200f
+#define MAX98390_INT_FLAG_CLR3 0x2010
+#define MAX98390_IRQ_CTRL 0x2011
+#define MAX98390_CLK_MON 0x2012
+#define MAX98390_DAT_MON 0x2014
+#define MAX98390_WDOG_CTRL 0x2015
+#define MAX98390_WDOG_RST 0x2016
+#define MAX98390_MEAS_ADC_THERM_WARN_THRESH 0x2017
+#define MAX98390_MEAS_ADC_THERM_SHDN_THRESH 0x2018
+#define MAX98390_MEAS_ADC_THERM_HYSTERESIS 0x2019
+#define MAX98390_PIN_CFG 0x201a
+#define MAX98390_PCM_RX_EN_A 0x201b
+#define MAX98390_PCM_RX_EN_B 0x201c
+#define MAX98390_PCM_TX_EN_A 0x201d
+#define MAX98390_PCM_TX_EN_B 0x201e
+#define MAX98390_PCM_TX_HIZ_CTRL_A 0x201f
+#define MAX98390_PCM_TX_HIZ_CTRL_B 0x2020
+#define MAX98390_PCM_CH_SRC_1 0x2021
+#define MAX98390_PCM_CH_SRC_2 0x2022
+#define MAX98390_PCM_CH_SRC_3 0x2023
+#define MAX98390_PCM_MODE_CFG 0x2024
+#define MAX98390_PCM_MASTER_MODE 0x2025
+#define MAX98390_PCM_CLK_SETUP 0x2026
+#define MAX98390_PCM_SR_SETUP 0x2027
+#define MAX98390_ICC_RX_EN_A 0x202c
+#define MAX98390_ICC_RX_EN_B 0x202d
+#define MAX98390_ICC_TX_EN_A 0x202e
+#define MAX98390_ICC_TX_EN_B 0x202f
+#define MAX98390_ICC_HIZ_MANUAL_MODE 0x2030
+#define MAX98390_ICC_TX_HIZ_EN_A 0x2031
+#define MAX98390_ICC_TX_HIZ_EN_B 0x2032
+#define MAX98390_ICC_LNK_EN 0x2033
+#define MAX98390_R2039_AMP_DSP_CFG 0x2039
+#define MAX98390_R203A_AMP_EN 0x203a
+#define MAX98390_TONE_GEN_DC_CFG 0x203b
+#define MAX98390_SPK_SRC_SEL 0x203c
+#define MAX98390_R203D_SPK_GAIN 0x203d
+#define MAX98390_SSM_CFG 0x203e
+#define MAX98390_MEAS_EN 0x203f
+#define MAX98390_MEAS_DSP_CFG 0x2040
+#define MAX98390_BOOST_CTRL0 0x2041
+#define MAX98390_BOOST_CTRL3 0x2042
+#define MAX98390_BOOST_CTRL1 0x2043
+#define MAX98390_MEAS_ADC_CFG 0x2044
+#define MAX98390_MEAS_ADC_BASE_MSB 0x2045
+#define MAX98390_MEAS_ADC_BASE_LSB 0x2046
+#define MAX98390_ADC_CH0_DIVIDE 0x2047
+#define MAX98390_ADC_CH1_DIVIDE 0x2048
+#define MAX98390_ADC_CH2_DIVIDE 0x2049
+#define MAX98390_ADC_CH0_FILT_CFG 0x204a
+#define MAX98390_ADC_CH1_FILT_CFG 0x204b
+#define MAX98390_ADC_CH2_FILT_CFG 0x204c
+#define MAX98390_MEAS_ADC_CH0_READ 0x204d
+#define MAX98390_MEAS_ADC_CH1_READ 0x204e
+#define MAX98390_MEAS_ADC_CH2_READ 0x204f
+#define MAX98390_PWR_GATE_CTL 0x2050
+#define MAX98390_PWR_GATE_STATUS 0x2051
+#define MAX98390_VBAT_LOW_STATUS 0x2052
+#define MAX98390_PVDD_LOW_STATUS 0x2053
+#define MAX98390_BROWNOUT_STATUS 0x2054
+#define MAX98390_BROWNOUT_EN 0x2055
+#define MAX98390_BROWNOUT_INFINITE_HOLD 0x2056
+#define MAX98390_BROWNOUT_INFINITE_HOLD_CLR 0x2057
+#define MAX98390_BROWNOUT_LVL_HOLD 0x2058
+#define MAX98390_BROWNOUT_LVL1_THRESH 0x2059
+#define MAX98390_BROWNOUT_LVL2_THRESH 0x205a
+#define MAX98390_BROWNOUT_LVL3_THRESH 0x205b
+#define MAX98390_BROWNOUT_LVL4_THRESH 0x205c
+#define MAX98390_BROWNOUT_THRESH_HYSTERYSIS 0x205d
+#define MAX98390_BROWNOUT_AMP_LIMITER_ATK_REL 0x205e
+#define MAX98390_BROWNOUT_AMP_GAIN_ATK_REL 0x205f
+#define MAX98390_BROWNOUT_AMP1_CLIP_MODE 0x2060
+#define MAX98390_BROWNOUT_LVL1_CUR_LIMIT 0x2061
+#define MAX98390_BROWNOUT_LVL1_AMP1_CTRL1 0x2062
+#define MAX98390_BROWNOUT_LVL1_AMP1_CTRL2 0x2063
+#define MAX98390_BROWNOUT_LVL1_AMP1_CTRL3 0x2064
+#define MAX98390_BROWNOUT_LVL2_CUR_LIMIT 0x2065
+#define MAX98390_BROWNOUT_LVL2_AMP1_CTRL1 0x2066
+#define MAX98390_BROWNOUT_LVL2_AMP1_CTRL2 0x2067
+#define MAX98390_BROWNOUT_LVL2_AMP1_CTRL3 0x2068
+#define MAX98390_BROWNOUT_LVL3_CUR_LIMIT 0x2069
+#define MAX98390_BROWNOUT_LVL3_AMP1_CTRL1 0x206a
+#define MAX98390_BROWNOUT_LVL3_AMP1_CTRL2 0x206b
+#define MAX98390_BROWNOUT_LVL3_AMP1_CTRL3 0x206c
+#define MAX98390_BROWNOUT_LVL4_CUR_LIMIT 0x206d
+#define MAX98390_BROWNOUT_LVL4_AMP1_CTRL1 0x206e
+#define MAX98390_BROWNOUT_LVL4_AMP1_CTRL2 0x206f
+#define MAX98390_BROWNOUT_LVL4_AMP1_CTRL3 0x2070
+#define MAX98390_BROWNOUT_LOWEST_STATUS 0x2071
+#define MAX98390_BROWNOUT_ILIM_HLD 0x2072
+#define MAX98390_BROWNOUT_LIM_HLD 0x2073
+#define MAX98390_BROWNOUT_CLIP_HLD 0x2074
+#define MAX98390_BROWNOUT_GAIN_HLD 0x2075
+#define MAX98390_ENV_TRACK_VOUT_HEADROOM 0x2076
+#define MAX98390_ENV_TRACK_BOOST_VOUT_DELAY 0x2077
+#define MAX98390_ENV_TRACK_REL_RATE 0x2078
+#define MAX98390_ENV_TRACK_HOLD_RATE 0x2079
+#define MAX98390_ENV_TRACK_CTRL 0x207a
+#define MAX98390_ENV_TRACK_BOOST_VOUT_READ 0x207b
+#define MAX98390_BOOST_BYPASS1 0x207c
+#define MAX98390_BOOST_BYPASS2 0x207d
+#define MAX98390_BOOST_BYPASS3 0x207e
+#define MAX98390_FET_SCALING1 0x207f
+#define MAX98390_FET_SCALING2 0x2080
+#define MAX98390_FET_SCALING3 0x2081
+#define MAX98390_FET_SCALING4 0x2082
+#define MAX98390_SPK_SPEEDUP 0x2084
+
+#define DSM_STBASS_HPF_B0_BYTE0 0x2101
+#define DSM_STBASS_HPF_B0_BYTE1 0x2102
+#define DSM_STBASS_HPF_B0_BYTE2 0x2103
+#define DSM_STBASS_HPF_B1_BYTE0 0x2105
+#define DSM_STBASS_HPF_B1_BYTE1 0x2106
+#define DSM_STBASS_HPF_B1_BYTE2 0x2107
+#define DSM_STBASS_HPF_B2_BYTE0 0x2109
+#define DSM_STBASS_HPF_B2_BYTE1 0x210a
+#define DSM_STBASS_HPF_B2_BYTE2 0x210b
+#define DSM_STBASS_HPF_A1_BYTE0 0x210d
+#define DSM_STBASS_HPF_A1_BYTE1 0x210e
+#define DSM_STBASS_HPF_A1_BYTE2 0x210f
+#define DSM_STBASS_HPF_A2_BYTE0 0x2111
+#define DSM_STBASS_HPF_A2_BYTE1 0x2112
+#define DSM_STBASS_HPF_A2_BYTE2 0x2113
+#define DSM_STBASS_LPF_B0_BYTE0 0x2115
+#define DSM_STBASS_LPF_B0_BYTE1 0x2116
+#define DSM_STBASS_LPF_B0_BYTE2 0x2117
+#define DSM_STBASS_LPF_B1_BYTE0 0x2119
+#define DSM_STBASS_LPF_B1_BYTE1 0x211a
+#define DSM_STBASS_LPF_B1_BYTE2 0x211b
+#define DSM_STBASS_LPF_B2_BYTE0 0x211d
+#define DSM_STBASS_LPF_B2_BYTE1 0x211e
+#define DSM_STBASS_LPF_B2_BYTE2 0x211f
+#define DSM_STBASS_LPF_A1_BYTE0 0x2121
+#define DSM_STBASS_LPF_A1_BYTE1 0x2122
+#define DSM_STBASS_LPF_A1_BYTE2 0x2123
+#define DSM_STBASS_LPF_A2_BYTE0 0x2125
+#define DSM_STBASS_LPF_A2_BYTE1 0x2126
+#define DSM_STBASS_LPF_A2_BYTE2 0x2127
+#define DSM_EQ_BQ1_B0_BYTE0 0x2129
+#define DSM_EQ_BQ1_B0_BYTE1 0x212a
+#define DSM_EQ_BQ1_B0_BYTE2 0x212b
+#define DSM_EQ_BQ1_B1_BYTE0 0x212d
+#define DSM_EQ_BQ1_B1_BYTE1 0x212e
+#define DSM_EQ_BQ1_B1_BYTE2 0x212f
+#define DSM_EQ_BQ1_B2_BYTE0 0x2131
+#define DSM_EQ_BQ1_B2_BYTE1 0x2132
+#define DSM_EQ_BQ1_B2_BYTE2 0x2133
+#define DSM_EQ_BQ1_A1_BYTE0 0x2135
+#define DSM_EQ_BQ1_A1_BYTE1 0x2136
+#define DSM_EQ_BQ1_A1_BYTE2 0x2137
+#define DSM_EQ_BQ1_A2_BYTE0 0x2139
+#define DSM_EQ_BQ1_A2_BYTE1 0x213a
+#define DSM_EQ_BQ1_A2_BYTE2 0x213b
+#define DSM_EQ_BQ2_B0_BYTE0 0x213d
+#define DSM_EQ_BQ2_B0_BYTE1 0x213e
+#define DSM_EQ_BQ2_B0_BYTE2 0x213f
+#define DSM_EQ_BQ2_B1_BYTE0 0x2141
+#define DSM_EQ_BQ2_B1_BYTE1 0x2142
+#define DSM_EQ_BQ2_B1_BYTE2 0x2143
+#define DSM_EQ_BQ2_B2_BYTE0 0x2145
+#define DSM_EQ_BQ2_B2_BYTE1 0x2146
+#define DSM_EQ_BQ2_B2_BYTE2 0x2147
+#define DSM_EQ_BQ2_A1_BYTE0 0x2149
+#define DSM_EQ_BQ2_A1_BYTE1 0x214a
+#define DSM_EQ_BQ2_A1_BYTE2 0x214b
+#define DSM_EQ_BQ2_A2_BYTE0 0x214d
+#define DSM_EQ_BQ2_A2_BYTE1 0x214e
+#define DSM_EQ_BQ2_A2_BYTE2 0x214f
+#define DSM_EQ_BQ3_B0_BYTE0 0x2151
+#define DSM_EQ_BQ3_B0_BYTE1 0x2152
+#define DSM_EQ_BQ3_B0_BYTE2 0x2153
+#define DSM_EQ_BQ3_B1_BYTE0 0x2155
+#define DSM_EQ_BQ3_B1_BYTE1 0x2156
+#define DSM_EQ_BQ3_B1_BYTE2 0x2157
+#define DSM_EQ_BQ3_B2_BYTE0 0x2159
+#define DSM_EQ_BQ3_B2_BYTE1 0x215a
+#define DSM_EQ_BQ3_B2_BYTE2 0x215b
+#define DSM_EQ_BQ3_A1_BYTE0 0x215d
+#define DSM_EQ_BQ3_A1_BYTE1 0x215e
+#define DSM_EQ_BQ3_A1_BYTE2 0x215f
+#define DSM_EQ_BQ3_A2_BYTE0 0x2161
+#define DSM_EQ_BQ3_A2_BYTE1 0x2162
+#define DSM_EQ_BQ3_A2_BYTE2 0x2163
+#define DSM_EQ_BQ4_B0_BYTE0 0x2165
+#define DSM_EQ_BQ4_B0_BYTE1 0x2166
+#define DSM_EQ_BQ4_B0_BYTE2 0x2167
+#define DSM_EQ_BQ4_B1_BYTE0 0x2169
+#define DSM_EQ_BQ4_B1_BYTE1 0x216a
+#define DSM_EQ_BQ4_B1_BYTE2 0x216b
+#define DSM_EQ_BQ4_B2_BYTE0 0x216d
+#define DSM_EQ_BQ4_B2_BYTE1 0x216e
+#define DSM_EQ_BQ4_B2_BYTE2 0x216f
+#define DSM_EQ_BQ4_A1_BYTE0 0x2171
+#define DSM_EQ_BQ4_A1_BYTE1 0x2172
+#define DSM_EQ_BQ4_A1_BYTE2 0x2173
+#define DSM_EQ_BQ4_A2_BYTE0 0x2175
+#define DSM_EQ_BQ4_A2_BYTE1 0x2176
+#define DSM_EQ_BQ4_A2_BYTE2 0x2177
+#define DSM_EQ_BQ5_B0_BYTE0 0x2179
+#define DSM_EQ_BQ5_B0_BYTE1 0x217a
+#define DSM_EQ_BQ5_B0_BYTE2 0x217b
+#define DSM_EQ_BQ5_B1_BYTE0 0x217d
+#define DSM_EQ_BQ5_B1_BYTE1 0x217e
+#define DSM_EQ_BQ5_B1_BYTE2 0x217f
+#define DSM_EQ_BQ5_B2_BYTE0 0x2181
+#define DSM_EQ_BQ5_B2_BYTE1 0x2182
+#define DSM_EQ_BQ5_B2_BYTE2 0x2183
+#define DSM_EQ_BQ5_A1_BYTE0 0x2185
+#define DSM_EQ_BQ5_A1_BYTE1 0x2186
+#define DSM_EQ_BQ5_A1_BYTE2 0x2187
+#define DSM_EQ_BQ5_A2_BYTE0 0x2189
+#define DSM_EQ_BQ5_A2_BYTE1 0x218a
+#define DSM_EQ_BQ5_A2_BYTE2 0x218b
+#define DSM_EQ_BQ6_B0_BYTE0 0x218d
+#define DSM_EQ_BQ6_B0_BYTE1 0x218e
+#define DSM_EQ_BQ6_B0_BYTE2 0x218f
+#define DSM_EQ_BQ6_B1_BYTE0 0x2191
+#define DSM_EQ_BQ6_B1_BYTE1 0x2192
+#define DSM_EQ_BQ6_B1_BYTE2 0x2193
+#define DSM_EQ_BQ6_B2_BYTE0 0x2195
+#define DSM_EQ_BQ6_B2_BYTE1 0x2196
+#define DSM_EQ_BQ6_B2_BYTE2 0x2197
+#define DSM_EQ_BQ6_A1_BYTE0 0x2199
+#define DSM_EQ_BQ6_A1_BYTE1 0x219a
+#define DSM_EQ_BQ6_A1_BYTE2 0x219b
+#define DSM_EQ_BQ6_A2_BYTE0 0x219d
+#define DSM_EQ_BQ6_A2_BYTE1 0x219e
+#define DSM_EQ_BQ6_A2_BYTE2 0x219f
+#define DSM_EQ_BQ7_B0_BYTE0 0x21a1
+#define DSM_EQ_BQ7_B0_BYTE1 0x21a2
+#define DSM_EQ_BQ7_B0_BYTE2 0x21a3
+#define DSM_EQ_BQ7_B1_BYTE0 0x21a5
+#define DSM_EQ_BQ7_B1_BYTE1 0x21a6
+#define DSM_EQ_BQ7_B1_BYTE2 0x21a7
+#define DSM_EQ_BQ7_B2_BYTE0 0x21a9
+#define DSM_EQ_BQ7_B2_BYTE1 0x21aa
+#define DSM_EQ_BQ7_B2_BYTE2 0x21ab
+#define DSM_EQ_BQ7_A1_BYTE0 0x21ad
+#define DSM_EQ_BQ7_A1_BYTE1 0x21ae
+#define DSM_EQ_BQ7_A1_BYTE2 0x21af
+#define DSM_EQ_BQ7_A2_BYTE0 0x21b1
+#define DSM_EQ_BQ7_A2_BYTE1 0x21b2
+#define DSM_EQ_BQ7_A2_BYTE2 0x21b3
+#define DSM_EQ_BQ8_B0_BYTE0 0x21b5
+#define DSM_EQ_BQ8_B0_BYTE1 0x21b6
+#define DSM_EQ_BQ8_B0_BYTE2 0x21b7
+#define DSM_EQ_BQ8_B1_BYTE0 0x21b9
+#define DSM_EQ_BQ8_B1_BYTE1 0x21ba
+#define DSM_EQ_BQ8_B1_BYTE2 0x21bb
+#define DSM_EQ_BQ8_B2_BYTE0 0x21bd
+#define DSM_EQ_BQ8_B2_BYTE1 0x21be
+#define DSM_EQ_BQ8_B2_BYTE2 0x21bf
+#define DSM_EQ_BQ8_A1_BYTE0 0x21c1
+#define DSM_EQ_BQ8_A1_BYTE1 0x21c2
+#define DSM_EQ_BQ8_A1_BYTE2 0x21c3
+#define DSM_EQ_BQ8_A2_BYTE0 0x21c5
+#define DSM_EQ_BQ8_A2_BYTE1 0x21c6
+#define DSM_EQ_BQ8_A2_BYTE2 0x21c7
+#define DSM_LFX_BQ_B0_BYTE0 0x21c9
+#define DSM_LFX_BQ_B0_BYTE1 0x21ca
+#define DSM_LFX_BQ_B0_BYTE2 0x21cb
+#define DSM_LFX_BQ_B1_BYTE0 0x21cd
+#define DSM_LFX_BQ_B1_BYTE1 0x21ce
+#define DSM_LFX_BQ_B1_BYTE2 0x21cf
+#define DSM_LFX_BQ_B2_BYTE0 0x21d1
+#define DSM_LFX_BQ_B2_BYTE1 0x21d2
+#define DSM_LFX_BQ_B2_BYTE2 0x21d3
+#define DSM_LFX_BQ_A1_BYTE0 0x21d5
+#define DSM_LFX_BQ_A1_BYTE1 0x21d6
+#define DSM_LFX_BQ_A1_BYTE2 0x21d7
+#define DSM_LFX_BQ_A2_BYTE0 0x21d9
+#define DSM_LFX_BQ_A2_BYTE1 0x21da
+#define DSM_LFX_BQ_A2_BYTE2 0x21db
+#define DSM_PPR_HPF_B0_BYTE0 0x21dd
+#define DSM_PPR_HPF_B0_BYTE1 0x21de
+#define DSM_PPR_HPF_B0_BYTE2 0x21df
+#define DSM_PPR_HPF_B1_BYTE0 0x21e1
+#define DSM_PPR_HPF_B1_BYTE1 0x21e2
+#define DSM_PPR_HPF_B1_BYTE2 0x21e3
+#define DSM_PPR_HPF_B2_BYTE0 0x21e5
+#define DSM_PPR_HPF_B2_BYTE1 0x21e6
+#define DSM_PPR_HPF_B2_BYTE2 0x21e7
+#define DSM_PPR_HPF_A1_BYTE0 0x21e9
+#define DSM_PPR_HPF_A1_BYTE1 0x21ea
+#define DSM_PPR_HPF_A1_BYTE2 0x21eb
+#define DSM_PPR_HPF_A2_BYTE0 0x21ed
+#define DSM_PPR_HPF_A2_BYTE1 0x21ee
+#define DSM_PPR_HPF_A2_BYTE2 0x21ef
+#define DSM_PPR_LPF_B0_BYTE0 0x21f1
+#define DSM_PPR_LPF_B0_BYTE1 0x21f2
+#define DSM_PPR_LPF_B0_BYTE2 0x21f3
+#define DSM_PPR_LPF_B1_BYTE0 0x21f5
+#define DSM_PPR_LPF_B1_BYTE1 0x21f6
+#define DSM_PPR_LPF_B1_BYTE2 0x21f7
+#define DSM_PPR_LPF_B2_BYTE0 0x21f9
+#define DSM_PPR_LPF_B2_BYTE1 0x21fa
+#define DSM_PPR_LPF_B2_BYTE2 0x21fb
+#define DSM_PPR_LPF_A1_BYTE0 0x21fd
+#define DSM_PPR_LPF_A1_BYTE1 0x21fe
+#define DSM_PPR_LPF_A1_BYTE2 0x21ff
+#define DSM_PPR_LPF_A2_BYTE0 0x2201
+#define DSM_PPR_LPF_A2_BYTE1 0x2202
+#define DSM_PPR_LPF_A2_BYTE2 0x2203
+#define DSM_SPL_BQ_B0_BYTE0 0x2205
+#define DSM_SPL_BQ_B0_BYTE1 0x2206
+#define DSM_SPL_BQ_B0_BYTE2 0x2207
+#define DSM_SPL_BQ_B1_BYTE0 0x2209
+#define DSM_SPL_BQ_B1_BYTE1 0x220a
+#define DSM_SPL_BQ_B1_BYTE2 0x220b
+#define DSM_SPL_BQ_B2_BYTE0 0x220d
+#define DSM_SPL_BQ_B2_BYTE1 0x220e
+#define DSM_SPL_BQ_B2_BYTE2 0x220f
+#define DSM_SPL_BQ_A1_BYTE0 0x2211
+#define DSM_SPL_BQ_A1_BYTE1 0x2212
+#define DSM_SPL_BQ_A1_BYTE2 0x2213
+#define DSM_SPL_BQ_A2_BYTE0 0x2215
+#define DSM_SPL_BQ_A2_BYTE1 0x2216
+#define DSM_SPL_BQ_A2_BYTE2 0x2217
+#define DSM_EXCUR_BQ_B0_BYTE0 0x2219
+#define DSM_EXCUR_BQ_B0_BYTE1 0x221a
+#define DSM_EXCUR_BQ_B0_BYTE2 0x221b
+#define DSM_EXCUR_BQ_B1_BYTE0 0x221d
+#define DSM_EXCUR_BQ_B1_BYTE1 0x221e
+#define DSM_EXCUR_BQ_B1_BYTE2 0x221f
+#define DSM_EXCUR_BQ_B2_BYTE0 0x2221
+#define DSM_EXCUR_BQ_B2_BYTE1 0x2222
+#define DSM_EXCUR_BQ_B2_BYTE2 0x2223
+#define DSM_EXCUR_BQ_A1_BYTE0 0x2225
+#define DSM_EXCUR_BQ_A1_BYTE1 0x2226
+#define DSM_EXCUR_BQ_A1_BYTE2 0x2227
+#define DSM_EXCUR_BQ_A2_BYTE0 0x2229
+#define DSM_EXCUR_BQ_A2_BYTE1 0x222a
+#define DSM_EXCUR_BQ_A2_BYTE2 0x222b
+#define DSM_EXCPROT_HPF1_B0_BYTE0 0x222d
+#define DSM_EXCPROT_HPF1_B0_BYTE1 0x222e
+#define DSM_EXCPROT_HPF1_B0_BYTE2 0x222f
+#define DSM_EXCPROT_HPF1_B1_BYTE0 0x2231
+#define DSM_EXCPROT_HPF1_B1_BYTE1 0x2232
+#define DSM_EXCPROT_HPF1_B1_BYTE2 0x2233
+#define DSM_EXCPROT_HPF1_B2_BYTE0 0x2235
+#define DSM_EXCPROT_HPF1_B2_BYTE1 0x2236
+#define DSM_EXCPROT_HPF1_B2_BYTE2 0x2237
+#define DSM_EXCPROT_HPF1_A1_BYTE0 0x2239
+#define DSM_EXCPROT_HPF1_A1_BYTE1 0x223a
+#define DSM_EXCPROT_HPF1_A1_BYTE2 0x223b
+#define DSM_EXCPROT_HPF1_A2_BYTE0 0x223d
+#define DSM_EXCPROT_HPF1_A2_BYTE1 0x223e
+#define DSM_EXCPROT_HPF1_A2_BYTE2 0x223f
+#define DSM_EXCPROT_HPF2_B0_BYTE0 0x2241
+#define DSM_EXCPROT_HPF2_B0_BYTE1 0x2242
+#define DSM_EXCPROT_HPF2_B0_BYTE2 0x2243
+#define DSM_EXCPROT_HPF2_B1_BYTE0 0x2245
+#define DSM_EXCPROT_HPF2_B1_BYTE1 0x2246
+#define DSM_EXCPROT_HPF2_B1_BYTE2 0x2247
+#define DSM_EXCPROT_HPF2_B2_BYTE0 0x2249
+#define DSM_EXCPROT_HPF2_B2_BYTE1 0x224a
+#define DSM_EXCPROT_HPF2_B2_BYTE2 0x224b
+#define DSM_EXCPROT_HPF2_A1_BYTE0 0x224d
+#define DSM_EXCPROT_HPF2_A1_BYTE1 0x224e
+#define DSM_EXCPROT_HPF2_A1_BYTE2 0x224f
+#define DSM_EXCPROT_HPF2_A2_BYTE0 0x2251
+#define DSM_EXCPROT_HPF2_A2_BYTE1 0x2252
+#define DSM_EXCPROT_HPF2_A2_BYTE2 0x2253
+#define DSM_EXCPROT_HPF3_B0_BYTE0 0x2255
+#define DSM_EXCPROT_HPF3_B0_BYTE1 0x2256
+#define DSM_EXCPROT_HPF3_B0_BYTE2 0x2257
+#define DSM_EXCPROT_HPF3_B1_BYTE0 0x2259
+#define DSM_EXCPROT_HPF3_B1_BYTE1 0x225a
+#define DSM_EXCPROT_HPF3_B1_BYTE2 0x225b
+#define DSM_EXCPROT_HPF3_B2_BYTE0 0x225d
+#define DSM_EXCPROT_HPF3_B2_BYTE1 0x225e
+#define DSM_EXCPROT_HPF3_B2_BYTE2 0x225f
+#define DSM_EXCPROT_HPF3_A1_BYTE0 0x2261
+#define DSM_EXCPROT_HPF3_A1_BYTE1 0x2262
+#define DSM_EXCPROT_HPF3_A1_BYTE2 0x2263
+#define DSM_EXCPROT_HPF3_A2_BYTE0 0x2265
+#define DSM_EXCPROT_HPF3_A2_BYTE1 0x2266
+#define DSM_EXCPROT_HPF3_A2_BYTE2 0x2267
+#define DSM_EXCPROT_HPF4_B0_BYTE0 0x2269
+#define DSM_EXCPROT_HPF4_B0_BYTE1 0x226a
+#define DSM_EXCPROT_HPF4_B0_BYTE2 0x226b
+#define DSM_EXCPROT_HPF4_B1_BYTE0 0x226d
+#define DSM_EXCPROT_HPF4_B1_BYTE1 0x226e
+#define DSM_EXCPROT_HPF4_B1_BYTE2 0x226f
+#define DSM_EXCPROT_HPF4_B2_BYTE0 0x2271
+#define DSM_EXCPROT_HPF4_B2_BYTE1 0x2272
+#define DSM_EXCPROT_HPF4_B2_BYTE2 0x2273
+#define DSM_EXCPROT_HPF4_A1_BYTE0 0x2275
+#define DSM_EXCPROT_HPF4_A1_BYTE1 0x2276
+#define DSM_EXCPROT_HPF4_A1_BYTE2 0x2277
+#define DSM_EXCPROT_HPF4_A2_BYTE0 0x2279
+#define DSM_EXCPROT_HPF4_A2_BYTE1 0x227a
+#define DSM_EXCPROT_HPF4_A2_BYTE2 0x227b
+#define DSM_EXCPROT_HPF5_B0_BYTE0 0x227d
+#define DSM_EXCPROT_HPF5_B0_BYTE1 0x227e
+#define DSM_EXCPROT_HPF5_B0_BYTE2 0x227f
+#define DSM_EXCPROT_HPF5_B1_BYTE0 0x2281
+#define DSM_EXCPROT_HPF5_B1_BYTE1 0x2282
+#define DSM_EXCPROT_HPF5_B1_BYTE2 0x2283
+#define DSM_EXCPROT_HPF5_B2_BYTE0 0x2285
+#define DSM_EXCPROT_HPF5_B2_BYTE1 0x2286
+#define DSM_EXCPROT_HPF5_B2_BYTE2 0x2287
+#define DSM_EXCPROT_HPF5_A1_BYTE0 0x2289
+#define DSM_EXCPROT_HPF5_A1_BYTE1 0x228a
+#define DSM_EXCPROT_HPF5_A1_BYTE2 0x228b
+#define DSM_EXCPROT_HPF5_A2_BYTE0 0x228d
+#define DSM_EXCPROT_HPF5_A2_BYTE1 0x228e
+#define DSM_EXCPROT_HPF5_A2_BYTE2 0x228f
+#define DSM_DEBUZZ_BPF_B0_BYTE0 0x2291
+#define DSM_DEBUZZ_BPF_B0_BYTE1 0x2292
+#define DSM_DEBUZZ_BPF_B0_BYTE2 0x2293
+#define DSM_DEBUZZ_BPF_B1_BYTE0 0x2295
+#define DSM_DEBUZZ_BPF_B1_BYTE1 0x2296
+#define DSM_DEBUZZ_BPF_B1_BYTE2 0x2297
+#define DSM_DEBUZZ_BPF_B2_BYTE0 0x2299
+#define DSM_DEBUZZ_BPF_B2_BYTE1 0x229a
+#define DSM_DEBUZZ_BPF_B2_BYTE2 0x229b
+#define DSM_DEBUZZ_BPF_A1_BYTE0 0x229d
+#define DSM_DEBUZZ_BPF_A1_BYTE1 0x229e
+#define DSM_DEBUZZ_BPF_A1_BYTE2 0x229f
+#define DSM_DEBUZZ_BPF_A2_BYTE0 0x22a1
+#define DSM_DEBUZZ_BPF_A2_BYTE1 0x22a2
+#define DSM_DEBUZZ_BPF_A2_BYTE2 0x22a3
+#define DSM_DEBUZZ_PORT_B0_BYTE0 0x22a5
+#define DSM_DEBUZZ_PORT_B0_BYTE1 0x22a6
+#define DSM_DEBUZZ_PORT_B0_BYTE2 0x22a7
+#define DSM_DEBUZZ_PORT_B1_BYTE0 0x22a9
+#define DSM_DEBUZZ_PORT_B1_BYTE1 0x22aa
+#define DSM_DEBUZZ_PORT_B1_BYTE2 0x22ab
+#define DSM_DEBUZZ_PORT_B2_BYTE0 0x22ad
+#define DSM_DEBUZZ_PORT_B2_BYTE1 0x22ae
+#define DSM_DEBUZZ_PORT_B2_BYTE2 0x22af
+#define DSM_DEBUZZ_PORT_A1_BYTE0 0x22b1
+#define DSM_DEBUZZ_PORT_A1_BYTE1 0x22b2
+#define DSM_DEBUZZ_PORT_A1_BYTE2 0x22b3
+#define DSM_DEBUZZ_PORT_A2_BYTE0 0x22b5
+#define DSM_DEBUZZ_PORT_A2_BYTE1 0x22b6
+#define DSM_DEBUZZ_PORT_A2_BYTE2 0x22b7
+#define DSM_DEBUZZ_NOTCH_B0_BYTE0 0x22b9
+#define DSM_DEBUZZ_NOTCH_B0_BYTE1 0x22ba
+#define DSM_DEBUZZ_NOTCH_B0_BYTE2 0x22bb
+#define DSM_DEBUZZ_NOTCH_B1_BYTE0 0x22bd
+#define DSM_DEBUZZ_NOTCH_B1_BYTE1 0x22be
+#define DSM_DEBUZZ_NOTCH_B1_BYTE2 0x22bf
+#define DSM_DEBUZZ_NOTCH_B2_BYTE0 0x22c1
+#define DSM_DEBUZZ_NOTCH_B2_BYTE1 0x22c2
+#define DSM_DEBUZZ_NOTCH_B2_BYTE2 0x22c3
+#define DSM_DEBUZZ_NOTCH_A1_BYTE0 0x22c5
+#define DSM_DEBUZZ_NOTCH_A1_BYTE1 0x22c6
+#define DSM_DEBUZZ_NOTCH_A1_BYTE2 0x22c7
+#define DSM_DEBUZZ_NOTCH_A2_BYTE0 0x22c9
+#define DSM_DEBUZZ_NOTCH_A2_BYTE1 0x22ca
+#define DSM_DEBUZZ_NOTCH_A2_BYTE2 0x22cb
+#define DSM_THERMAL_BQ_B0_BYTE0 0x22cd
+#define DSM_THERMAL_BQ_B0_BYTE1 0x22ce
+#define DSM_THERMAL_BQ_B0_BYTE2 0x22cf
+#define DSM_THERMAL_BQ_B1_BYTE0 0x22d1
+#define DSM_THERMAL_BQ_B1_BYTE1 0x22d2
+#define DSM_THERMAL_BQ_B1_BYTE2 0x22d3
+#define DSM_THERMAL_BQ_B2_BYTE0 0x22d5
+#define DSM_THERMAL_BQ_B2_BYTE1 0x22d6
+#define DSM_THERMAL_BQ_B2_BYTE2 0x22d7
+#define DSM_THERMAL_BQ_A1_BYTE0 0x22d9
+#define DSM_THERMAL_BQ_A1_BYTE1 0x22da
+#define DSM_THERMAL_BQ_A1_BYTE2 0x22db
+#define DSM_THERMAL_BQ_A2_BYTE0 0x22dd
+#define DSM_THERMAL_BQ_A2_BYTE1 0x22de
+#define DSM_THERMAL_BQ_A2_BYTE2 0x22df
+#define DSM_WBDRC_FILT1_B0_BYTE0 0x22e1
+#define DSM_WBDRC_FILT1_B0_BYTE1 0x22e2
+#define DSM_WBDRC_FILT1_B0_BYTE2 0x22e3
+#define DSM_WBDRC_FILT1_B1_BYTE0 0x22e5
+#define DSM_WBDRC_FILT1_B1_BYTE1 0x22e6
+#define DSM_WBDRC_FILT1_B1_BYTE2 0x22e7
+#define DSM_WBDRC_FILT1_B2_BYTE0 0x22e9
+#define DSM_WBDRC_FILT1_B2_BYTE1 0x22ea
+#define DSM_WBDRC_FILT1_B2_BYTE2 0x22eb
+#define DSM_WBDRC_FILT1_A1_BYTE0 0x22ed
+#define DSM_WBDRC_FILT1_A1_BYTE1 0x22ee
+#define DSM_WBDRC_FILT1_A1_BYTE2 0x22ef
+#define DSM_WBDRC_FILT1_A2_BYTE0 0x22f1
+#define DSM_WBDRC_FILT1_A2_BYTE1 0x22f2
+#define DSM_WBDRC_FILT1_A2_BYTE2 0x22f3
+#define DSM_WBDRC_FILT2_B0_BYTE0 0x22f5
+#define DSM_WBDRC_FILT2_B0_BYTE1 0x22f6
+#define DSM_WBDRC_FILT2_B0_BYTE2 0x22f7
+#define DSM_WBDRC_FILT2_B1_BYTE0 0x22f9
+#define DSM_WBDRC_FILT2_B1_BYTE1 0x22fa
+#define DSM_WBDRC_FILT2_B1_BYTE2 0x22fb
+#define DSM_WBDRC_FILT2_B2_BYTE0 0x22fd
+#define DSM_WBDRC_FILT2_B2_BYTE1 0x22fe
+#define DSM_WBDRC_FILT2_B2_BYTE2 0x22ff
+#define DSM_WBDRC_FILT2_A1_BYTE0 0x2301
+#define DSM_WBDRC_FILT2_A1_BYTE1 0x2302
+#define DSM_WBDRC_FILT2_A1_BYTE2 0x2303
+#define DSM_WBDRC_FILT2_A2_BYTE0 0x2305
+#define DSM_WBDRC_FILT2_A2_BYTE1 0x2306
+#define DSM_WBDRC_FILT2_A2_BYTE2 0x2307
+#define DSM_PPR_RELEASE_TIME_BYTE0 0x2309
+#define DSM_PPR_RELEASE_TIME_BYTE1 0x230a
+#define DSM_PPR_RELEASE_TIME_BYTE2 0x230b
+#define DSM_PPR_ATTACK_TIME_BYTE0 0x230d
+#define DSM_PPR_ATTACK_TIME_BYTE1 0x230e
+#define DSM_PPR_ATTACK_TIME_BYTE2 0x230f
+#define DSM_DEBUZZER_RELEASE_TIME_BYTE0 0x2311
+#define DSM_DEBUZZER_RELEASE_TIME_BYTE1 0x2312
+#define DSM_DEBUZZER_RELEASE_TIME_BYTE2 0x2313
+#define DSM_DEBUZZER_ATTACK_TIME_BYTE0 0x2315
+#define DSM_DEBUZZER_ATTACK_TIME_BYTE1 0x2316
+#define DSM_DEBUZZER_ATTACK_TIME_BYTE2 0x2317
+
+#define DSMIG_WB_DRC_RELEASE_TIME_1 0x2380
+#define DSMIG_WB_DRC_RELEASE_TIME_2 0x2381
+#define DSMIG_WB_DRC_ATTACK_TIME_1 0x2382
+#define DSMIG_WB_DRC_ATTACK_TIME_2 0x2383
+#define DSMIG_WB_DRC_COMPRESSION_RATIO 0x2384
+#define DSMIG_WB_DRC_COMPRESSION_THRESHOLD 0x2385
+#define DSMIG_WB_DRC_MAKEUPGAIN 0x2386
+#define DSMIG_WB_DRC_NOISE_GATE_THRESHOLD 0x2387
+#define DSMIG_WBDRC_HPF_ENABLE 0x2388
+#define DSMIG_WB_DRC_TEST_SMOOTHER_OUT_EN 0x2389
+#define DSMIG_PPR_THRESHOLD 0x238b
+#define DSM_STEREO_BASS_CHANNEL_SELECT 0x238d
+#define DSM_TPROT_THRESHOLD_BYTE0 0x238e
+#define DSM_TPROT_THRESHOLD_BYTE1 0x238f
+#define DSM_TPROT_ROOM_TEMPERATURE_BYTE0 0x2390
+#define DSM_TPROT_ROOM_TEMPERATURE_BYTE1 0x2391
+#define DSM_TPROT_RECIP_RDC_ROOM_BYTE0 0x2392
+#define DSM_TPROT_RECIP_RDC_ROOM_BYTE1 0x2393
+#define DSM_TPROT_RECIP_RDC_ROOM_BYTE2 0x2394
+#define DSM_TPROT_RECIP_TCONST_BYTE0 0x2395
+#define DSM_TPROT_RECIP_TCONST_BYTE1 0x2396
+#define DSM_TPROT_RECIP_TCONST_BYTE2 0x2397
+#define DSM_THERMAL_ATTENUATION_SETTINGS 0x2398
+#define DSM_THERMAL_PILOT_TONE_ATTENUATION 0x2399
+#define DSM_TPROT_PG_TEMP_THRESH_BYTE0 0x239a
+#define DSM_TPROT_PG_TEMP_THRESH_BYTE1 0x239b
+
+#define THERMAL_RDC_RD_BACK_BYTE1 0x239c
+#define THERMAL_RDC_RD_BACK_BYTE0 0x239d
+#define THERMAL_COILTEMP_RD_BACK_BYTE1 0x239e
+#define THERMAL_COILTEMP_RD_BACK_BYTE0 0x239f
+
+#define DSMIG_DEBUZZER_THRESHOLD 0x23b5
+#define DSMIG_DEBUZZER_ALPHA_COEF_TEST_ONLY 0x23b6
+#define DSM_VOL_ENA 0x23b9
+#define DSM_VOL_CTRL 0x23ba
+
+#define DSMIG_EN 0x23e0
+#define MAX98390_R23E1_DSP_GLOBAL_EN 0x23e1
+
+#define DSM_THERMAL_GAIN 0x23f0
+#define DSM_PPR_GAIN 0x23f1
+#define DSM_DBZ_GAIN 0x23f2
+#define DSM_WBDRC_GAIN 0x23f3
+
+#define MAX98390_R23FF_GLOBAL_EN 0x23FF
+#define MAX98390_R24FF_REV_ID 0x24FF
+
+/* MAX98390_R2021_PCM_RX_SRC_1 */
+#define MAX98390_PCM_RX_CH_SRC_SHIFT (0)
+#define MAX98390_PCM_RX_CH_SRC_BASS_SHIFT (4)
+
+/* MAX98390_R2022_PCM_TX_SRC_1 */
+#define MAX98390_PCM_TX_CH_SRC_A_V_SHIFT (0)
+#define MAX98390_PCM_TX_CH_SRC_A_I_SHIFT (4)
+
+/* MAX98390_R2024_PCM_DATA_FMT_CFG */
+#define MAX98390_PCM_MODE_CFG_FORMAT_MASK (0x7 << 3)
+#define MAX98390_PCM_MODE_CFG_FORMAT_SHIFT (3)
+#define MAX98390_PCM_TX_CH_INTERLEAVE_MASK (0x1 << 2)
+#define MAX98390_PCM_FORMAT_I2S (0x0 << 0)
+#define MAX98390_PCM_FORMAT_LJ (0x1 << 0)
+#define MAX98390_PCM_FORMAT_TDM_MODE0 (0x3 << 0)
+#define MAX98390_PCM_FORMAT_TDM_MODE1 (0x4 << 0)
+#define MAX98390_PCM_FORMAT_TDM_MODE2 (0x5 << 0)
+#define MAX98390_PCM_MODE_CFG_CHANSZ_MASK (0x3 << 6)
+#define MAX98390_PCM_MODE_CFG_CHANSZ_16 (0x1 << 6)
+#define MAX98390_PCM_MODE_CFG_CHANSZ_24 (0x2 << 6)
+#define MAX98390_PCM_MODE_CFG_CHANSZ_32 (0x3 << 6)
+
+/* MAX98390_R2039_AMP_DSP_CFG */
+#define MAX98390_AMP_DSP_CFG_RMP_UP_SHIFT (4)
+#define MAX98390_AMP_DSP_CFG_RMP_DN_SHIFT (5)
+
+/* MAX98390_R203A_AMP_EN */
+#define MAX98390_R203A_AMP_EN_SHIFT (0)
+
+/* MAX98390_PCM_MASTER_MODE */
+#define MAX98390_PCM_MASTER_MODE_MASK (0x3 << 0)
+#define MAX98390_PCM_MASTER_MODE_SLAVE (0x0 << 0)
+#define MAX98390_PCM_MASTER_MODE_MASTER (0x3 << 0)
+
+#define MAX98390_PCM_MASTER_MODE_MCLK_MASK (0xF << 2)
+#define MAX98390_PCM_MASTER_MODE_MCLK_RATE_SHIFT (2)
+
+/* PCM_CLK_SETUP */
+#define MAX98390_PCM_MODE_CFG_PCM_BCLKEDGE (0x1 << 2)
+#define MAX98390_PCM_CLK_SETUP_BSEL_MASK (0xF << 0)
+
+/* PCM_SR_SETUP */
+#define MAX98390_PCM_SR_SET1_SR_MASK (0xF << 0)
+#define MAX98390_PCM_SR_SET1_SR_8000 (0x0 << 0)
+#define MAX98390_PCM_SR_SET1_SR_11025 (0x1 << 0)
+#define MAX98390_PCM_SR_SET1_SR_12000 (0x2 << 0)
+#define MAX98390_PCM_SR_SET1_SR_16000 (0x3 << 0)
+#define MAX98390_PCM_SR_SET1_SR_22050 (0x4 << 0)
+#define MAX98390_PCM_SR_SET1_SR_24000 (0x5 << 0)
+#define MAX98390_PCM_SR_SET1_SR_32000 (0x6 << 0)
+#define MAX98390_PCM_SR_SET1_SR_44100 (0x7 << 0)
+#define MAX98390_PCM_SR_SET1_SR_48000 (0x8 << 0)
+
+/* PCM_TO_SPK_MONO_MIX_1 */
+#define MAX98390_PCM_TO_SPK_MONOMIX_CFG_MASK (0x3 << 6)
+#define MAX98390_PCM_TO_SPK_MONOMIX_CFG_SHIFT (6)
+#define MAX98390_PCM_TO_SPK_CH0_SRC_MASK (0xF << 0)
+#define MAX98390_PCM_TO_SPK_CH1_SRC_MASK (0xF << 4)
+
+/* MAX98390_BOOST_CTRL3 */
+#define MAX98390_BOOST_CLK_PHASE_CFG_SHIFT (2)
+
+/* SOFT_RESET */
+#define MAX98390_SOFT_RESET_MASK (0x1 << 0)
+
+#define MAX98390_GLOBAL_EN_MASK (0x1 << 0)
+#define MAX98390_AMP_EN_MASK (0x1 << 0)
+
+/* DSM register offset */
+#define MAX98390_DSM_PAYLOAD_OFFSET 16
+#define MAX98390_DSM_PARAM_MAX_SIZE 770
+#define MAX98390_DSM_PARAM_MIN_SIZE 670
+
+struct max98390_priv {
+ struct regmap *regmap;
+ unsigned int sysclk;
+ unsigned int master;
+ unsigned int tdm_mode;
+ unsigned int ref_rdc_value;
+ unsigned int ambient_temp_value;
+};
+#endif
diff --git a/sound/soc/codecs/max9867.c b/sound/soc/codecs/max9867.c
index 8600c5439e1e..c72cb2888c21 100644
--- a/sound/soc/codecs/max9867.c
+++ b/sound/soc/codecs/max9867.c
@@ -23,8 +23,21 @@ static const char *const max9867_spmode[] = {
};
static const char *const max9867_filter_text[] = {"IIR", "FIR"};
+static const char *const max9867_adc_dac_filter_text[] = {
+ "Disabled",
+ "Elliptical/16/256",
+ "Butterworth/16/500",
+ "Elliptical/8/256",
+ "Butterworth/8/500",
+ "Butterworth/8-24"
+};
+
static SOC_ENUM_SINGLE_DECL(max9867_filter, MAX9867_CODECFLTR, 7,
max9867_filter_text);
+static SOC_ENUM_SINGLE_DECL(max9867_dac_filter, MAX9867_CODECFLTR, 0,
+ max9867_adc_dac_filter_text);
+static SOC_ENUM_SINGLE_DECL(max9867_adc_filter, MAX9867_CODECFLTR, 4,
+ max9867_adc_dac_filter_text);
static SOC_ENUM_SINGLE_DECL(max9867_spkmode, MAX9867_MODECONFIG, 0,
max9867_spmode);
static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(max9867_master_tlv,
@@ -46,24 +59,27 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(max9867_micboost_tlv,
static const struct snd_kcontrol_new max9867_snd_controls[] = {
SOC_DOUBLE_R_TLV("Master Playback Volume", MAX9867_LEFTVOL,
- MAX9867_RIGHTVOL, 0, 41, 1, max9867_master_tlv),
+ MAX9867_RIGHTVOL, 0, 40, 1, max9867_master_tlv),
SOC_DOUBLE_R_TLV("Line Capture Volume", MAX9867_LEFTLINELVL,
MAX9867_RIGHTLINELVL, 0, 15, 1, max9867_line_tlv),
SOC_DOUBLE_R_TLV("Mic Capture Volume", MAX9867_LEFTMICGAIN,
MAX9867_RIGHTMICGAIN, 0, 20, 1, max9867_mic_tlv),
SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", MAX9867_LEFTMICGAIN,
- MAX9867_RIGHTMICGAIN, 5, 4, 0, max9867_micboost_tlv),
+ MAX9867_RIGHTMICGAIN, 5, 3, 0, max9867_micboost_tlv),
SOC_SINGLE("Digital Sidetone Volume", MAX9867_SIDETONE, 0, 31, 1),
SOC_SINGLE_TLV("Digital Playback Volume", MAX9867_DACLEVEL, 0, 15, 1,
max9867_dac_tlv),
SOC_SINGLE_TLV("Digital Boost Playback Volume", MAX9867_DACLEVEL, 4, 3, 0,
max9867_dacboost_tlv),
- SOC_DOUBLE_TLV("Digital Capture Volume", MAX9867_ADCLEVEL, 0, 4, 15, 1,
+ SOC_DOUBLE_TLV("Digital Capture Volume", MAX9867_ADCLEVEL, 4, 0, 15, 1,
max9867_adc_tlv),
SOC_ENUM("Speaker Mode", max9867_spkmode),
SOC_SINGLE("Volume Smoothing Switch", MAX9867_MODECONFIG, 6, 1, 0),
SOC_SINGLE("Line ZC Switch", MAX9867_MODECONFIG, 5, 1, 0),
SOC_ENUM("DSP Filter", max9867_filter),
+ SOC_ENUM("ADC Filter", max9867_adc_filter),
+ SOC_ENUM("DAC Filter", max9867_dac_filter),
+ SOC_SINGLE("Mono Playback Switch", MAX9867_IFC1B, 3, 1, 0),
};
/* Input mixer */
@@ -88,20 +104,38 @@ static const struct snd_kcontrol_new max9867_line_out_control =
SOC_DAPM_DOUBLE_R("Switch",
MAX9867_LEFTVOL, MAX9867_RIGHTVOL, 6, 1, 1);
+/* DMIC mux */
+static const char *const dmic_mux_text[] = {
+ "ADC", "DMIC"
+};
+static SOC_ENUM_SINGLE_DECL(left_dmic_mux_enum,
+ MAX9867_MICCONFIG, 5, dmic_mux_text);
+static SOC_ENUM_SINGLE_DECL(right_dmic_mux_enum,
+ MAX9867_MICCONFIG, 4, dmic_mux_text);
+static const struct snd_kcontrol_new max9867_left_dmic_mux =
+ SOC_DAPM_ENUM("DMICL Mux", left_dmic_mux_enum);
+static const struct snd_kcontrol_new max9867_right_dmic_mux =
+ SOC_DAPM_ENUM("DMICR Mux", right_dmic_mux_enum);
static const struct snd_soc_dapm_widget max9867_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("MICL"),
SND_SOC_DAPM_INPUT("MICR"),
+ SND_SOC_DAPM_INPUT("DMICL"),
+ SND_SOC_DAPM_INPUT("DMICR"),
SND_SOC_DAPM_INPUT("LINL"),
SND_SOC_DAPM_INPUT("LINR"),
- SND_SOC_DAPM_PGA("Left Line Input", MAX9867_PWRMAN, 6, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Right Line Input", MAX9867_PWRMAN, 5, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Left Line Input", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Line Input", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER_NAMED_CTL("Input Mixer", SND_SOC_NOPM, 0, 0,
max9867_input_mixer_controls,
ARRAY_SIZE(max9867_input_mixer_controls)),
- SND_SOC_DAPM_ADC("ADCL", "HiFi Capture", MAX9867_PWRMAN, 1, 0),
- SND_SOC_DAPM_ADC("ADCR", "HiFi Capture", MAX9867_PWRMAN, 0, 0),
+ SND_SOC_DAPM_MUX("DMICL Mux", SND_SOC_NOPM, 0, 0,
+ &max9867_left_dmic_mux),
+ SND_SOC_DAPM_MUX("DMICR Mux", SND_SOC_NOPM, 0, 0,
+ &max9867_right_dmic_mux),
+ SND_SOC_DAPM_ADC("ADCL", "HiFi Capture", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_ADC("ADCR", "HiFi Capture", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_MIXER("Digital", SND_SOC_NOPM, 0, 0,
max9867_sidetone_mixer_controls,
@@ -109,8 +143,8 @@ static const struct snd_soc_dapm_widget max9867_dapm_widgets[] = {
SND_SOC_DAPM_MIXER_NAMED_CTL("Output Mixer", SND_SOC_NOPM, 0, 0,
max9867_output_mixer_controls,
ARRAY_SIZE(max9867_output_mixer_controls)),
- SND_SOC_DAPM_DAC("DACL", "HiFi Playback", MAX9867_PWRMAN, 3, 0),
- SND_SOC_DAPM_DAC("DACR", "HiFi Playback", MAX9867_PWRMAN, 2, 0),
+ SND_SOC_DAPM_DAC("DACL", "HiFi Playback", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC("DACR", "HiFi Playback", SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_SWITCH("Master Playback", SND_SOC_NOPM, 0, 0,
&max9867_line_out_control),
SND_SOC_DAPM_OUTPUT("LOUT"),
@@ -124,8 +158,12 @@ static const struct snd_soc_dapm_route max9867_audio_map[] = {
{"Input Mixer", "Mic Capture Switch", "MICR"},
{"Input Mixer", "Line Capture Switch", "Left Line Input"},
{"Input Mixer", "Line Capture Switch", "Right Line Input"},
- {"ADCL", NULL, "Input Mixer"},
- {"ADCR", NULL, "Input Mixer"},
+ {"DMICL Mux", "DMIC", "DMICL"},
+ {"DMICR Mux", "DMIC", "DMICR"},
+ {"DMICL Mux", "ADC", "Input Mixer"},
+ {"DMICR Mux", "ADC", "Input Mixer"},
+ {"ADCL", NULL, "DMICL Mux"},
+ {"ADCR", NULL, "DMICR Mux"},
{"Digital", "Sidetone Switch", "ADCL"},
{"Digital", "Sidetone Switch", "ADCR"},
@@ -346,7 +384,8 @@ static int max9867_dai_set_fmt(struct snd_soc_dai *codec_dai,
}
regmap_write(max9867->regmap, MAX9867_IFC1A, iface1A);
- regmap_write(max9867->regmap, MAX9867_IFC1B, iface1B);
+ regmap_update_bits(max9867->regmap, MAX9867_IFC1B,
+ MAX9867_IFC1B_BCLK_MASK, iface1B);
return 0;
}
@@ -413,15 +452,14 @@ static int max9867_set_bias_level(struct snd_soc_component *component,
if (err)
return err;
- err = regmap_update_bits(max9867->regmap, MAX9867_PWRMAN,
- MAX9867_SHTDOWN, MAX9867_SHTDOWN);
+ err = regmap_write(max9867->regmap,
+ MAX9867_PWRMAN, 0xff);
if (err)
return err;
}
break;
case SND_SOC_BIAS_OFF:
- err = regmap_update_bits(max9867->regmap, MAX9867_PWRMAN,
- MAX9867_SHTDOWN, 0);
+ err = regmap_write(max9867->regmap, MAX9867_PWRMAN, 0);
if (err)
return err;
@@ -463,35 +501,10 @@ static bool max9867_volatile_register(struct device *dev, unsigned int reg)
}
}
-static const struct reg_default max9867_reg[] = {
- { 0x04, 0x00 },
- { 0x05, 0x00 },
- { 0x06, 0x00 },
- { 0x07, 0x00 },
- { 0x08, 0x00 },
- { 0x09, 0x00 },
- { 0x0A, 0x00 },
- { 0x0B, 0x00 },
- { 0x0C, 0x00 },
- { 0x0D, 0x00 },
- { 0x0E, 0x40 },
- { 0x0F, 0x40 },
- { 0x10, 0x00 },
- { 0x11, 0x00 },
- { 0x12, 0x00 },
- { 0x13, 0x00 },
- { 0x14, 0x00 },
- { 0x15, 0x00 },
- { 0x16, 0x00 },
- { 0x17, 0x00 },
-};
-
static const struct regmap_config max9867_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = MAX9867_REVISION,
- .reg_defaults = max9867_reg,
- .num_reg_defaults = ARRAY_SIZE(max9867_reg),
.volatile_reg = max9867_volatile_register,
.cache_type = REGCACHE_RBTREE,
};
diff --git a/sound/soc/codecs/max9867.h b/sound/soc/codecs/max9867.h
index d459d49449cb..3092c3b99075 100644
--- a/sound/soc/codecs/max9867.h
+++ b/sound/soc/codecs/max9867.h
@@ -58,7 +58,6 @@
#define MAX9867_MICCONFIG 0x15
#define MAX9867_MODECONFIG 0x16
#define MAX9867_PWRMAN 0x17
-#define MAX9867_SHTDOWN 0x80
#define MAX9867_REVISION 0xff
#define MAX9867_CACHEREGNUM 10
diff --git a/sound/soc/codecs/nau8810.c b/sound/soc/codecs/nau8810.c
index de26758c30a8..33ebc6398426 100644
--- a/sound/soc/codecs/nau8810.c
+++ b/sound/soc/codecs/nau8810.c
@@ -355,6 +355,8 @@ static const struct snd_kcontrol_new nau8810_snd_controls[] = {
/* Speaker Output Mixer */
static const struct snd_kcontrol_new nau8810_speaker_mixer_controls[] = {
+ SOC_DAPM_SINGLE("AUX Bypass Switch", NAU8810_REG_SPKMIX,
+ NAU8810_AUXSPK_SFT, 1, 0),
SOC_DAPM_SINGLE("Line Bypass Switch", NAU8810_REG_SPKMIX,
NAU8810_BYPSPK_SFT, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", NAU8810_REG_SPKMIX,
@@ -363,6 +365,8 @@ static const struct snd_kcontrol_new nau8810_speaker_mixer_controls[] = {
/* Mono Output Mixer */
static const struct snd_kcontrol_new nau8810_mono_mixer_controls[] = {
+ SOC_DAPM_SINGLE("AUX Bypass Switch", NAU8810_REG_MONOMIX,
+ NAU8810_AUXMOUT_SFT, 1, 0),
SOC_DAPM_SINGLE("Line Bypass Switch", NAU8810_REG_MONOMIX,
NAU8810_BYPMOUT_SFT, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", NAU8810_REG_MONOMIX,
@@ -371,6 +375,8 @@ static const struct snd_kcontrol_new nau8810_mono_mixer_controls[] = {
/* PGA Mute */
static const struct snd_kcontrol_new nau8810_pgaboost_mixer_controls[] = {
+ SOC_DAPM_SINGLE("AUX PGA Switch", NAU8810_REG_ADCBOOST,
+ NAU8810_AUXBSTGAIN_SFT, 0x7, 0),
SOC_DAPM_SINGLE("PGA Mute Switch", NAU8810_REG_PGAGAIN,
NAU8810_PGAMT_SFT, 1, 1),
SOC_DAPM_SINGLE("PMIC PGA Switch", NAU8810_REG_ADCBOOST,
@@ -379,6 +385,8 @@ static const struct snd_kcontrol_new nau8810_pgaboost_mixer_controls[] = {
/* Input PGA */
static const struct snd_kcontrol_new nau8810_inpga[] = {
+ SOC_DAPM_SINGLE("AUX Switch", NAU8810_REG_INPUT_SIGNAL,
+ NAU8810_AUXPGA_SFT, 1, 0),
SOC_DAPM_SINGLE("MicN Switch", NAU8810_REG_INPUT_SIGNAL,
NAU8810_NMICPGA_SFT, 1, 0),
SOC_DAPM_SINGLE("MicP Switch", NAU8810_REG_INPUT_SIGNAL,
@@ -401,6 +409,23 @@ static int check_mclk_select_pll(struct snd_soc_dapm_widget *source,
return (value & NAU8810_CLKM_MASK);
}
+static int check_mic_enabled(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(source->dapm);
+ struct nau8810 *nau8810 = snd_soc_component_get_drvdata(component);
+ unsigned int value;
+
+ regmap_read(nau8810->regmap, NAU8810_REG_INPUT_SIGNAL, &value);
+ if (value & NAU8810_PMICPGA_EN || value & NAU8810_NMICPGA_EN)
+ return 1;
+ regmap_read(nau8810->regmap, NAU8810_REG_ADCBOOST, &value);
+ if (value & NAU8810_PMICBSTGAIN_MASK)
+ return 1;
+ return 0;
+}
+
static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("Speaker Mixer", NAU8810_REG_POWER3,
NAU8810_SPKMX_EN_SFT, 0, &nau8810_speaker_mixer_controls[0],
@@ -425,6 +450,8 @@ static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("Input Boost Stage", NAU8810_REG_POWER2,
NAU8810_BST_EN_SFT, 0, nau8810_pgaboost_mixer_controls,
ARRAY_SIZE(nau8810_pgaboost_mixer_controls)),
+ SND_SOC_DAPM_PGA("AUX Input", NAU8810_REG_POWER1,
+ NAU8810_AUX_EN_SFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("Mic Bias", NAU8810_REG_POWER1,
NAU8810_MICBIAS_EN_SFT, 0, NULL, 0),
@@ -434,6 +461,7 @@ static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
SND_SOC_DAPM_SWITCH("Digital Loopback", SND_SOC_NOPM, 0, 0,
&nau8810_loopback),
+ SND_SOC_DAPM_INPUT("AUX"),
SND_SOC_DAPM_INPUT("MICN"),
SND_SOC_DAPM_INPUT("MICP"),
SND_SOC_DAPM_OUTPUT("MONOOUT"),
@@ -445,10 +473,12 @@ static const struct snd_soc_dapm_route nau8810_dapm_routes[] = {
{"DAC", NULL, "PLL", check_mclk_select_pll},
/* Mono output mixer */
+ {"Mono Mixer", "AUX Bypass Switch", "AUX Input"},
{"Mono Mixer", "PCM Playback Switch", "DAC"},
{"Mono Mixer", "Line Bypass Switch", "Input Boost Stage"},
/* Speaker output mixer */
+ {"Speaker Mixer", "AUX Bypass Switch", "AUX Input"},
{"Speaker Mixer", "PCM Playback Switch", "DAC"},
{"Speaker Mixer", "Line Bypass Switch", "Input Boost Stage"},
@@ -463,13 +493,16 @@ static const struct snd_soc_dapm_route nau8810_dapm_routes[] = {
/* Input Boost Stage */
{"ADC", NULL, "Input Boost Stage"},
{"ADC", NULL, "PLL", check_mclk_select_pll},
+ {"Input Boost Stage", "AUX PGA Switch", "AUX Input"},
{"Input Boost Stage", "PGA Mute Switch", "Input PGA"},
{"Input Boost Stage", "PMIC PGA Switch", "MICP"},
/* Input PGA */
- {"Input PGA", NULL, "Mic Bias"},
+ {"Input PGA", NULL, "Mic Bias", check_mic_enabled},
+ {"Input PGA", "AUX Switch", "AUX Input"},
{"Input PGA", "MicN Switch", "MICN"},
{"Input PGA", "MicP Switch", "MICP"},
+ {"AUX Input", NULL, "AUX"},
/* Digital Looptack */
{"Digital Loopback", "Switch", "ADC"},
@@ -862,6 +895,8 @@ static int nau8810_i2c_probe(struct i2c_client *i2c,
static const struct i2c_device_id nau8810_i2c_id[] = {
{ "nau8810", 0 },
+ { "nau8812", 0 },
+ { "nau8814", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, nau8810_i2c_id);
@@ -869,6 +904,8 @@ MODULE_DEVICE_TABLE(i2c, nau8810_i2c_id);
#ifdef CONFIG_OF
static const struct of_device_id nau8810_of_match[] = {
{ .compatible = "nuvoton,nau8810", },
+ { .compatible = "nuvoton,nau8812", },
+ { .compatible = "nuvoton,nau8814", },
{ }
};
MODULE_DEVICE_TABLE(of, nau8810_of_match);
diff --git a/sound/soc/codecs/nau8810.h b/sound/soc/codecs/nau8810.h
index 1ada31883dc6..6a7cacbe044a 100644
--- a/sound/soc/codecs/nau8810.h
+++ b/sound/soc/codecs/nau8810.h
@@ -69,6 +69,7 @@
/* NAU8810_REG_POWER1 (0x1) */
#define NAU8810_DCBUF_EN (0x1 << 8)
+#define NAU8810_AUX_EN_SFT 6
#define NAU8810_PLL_EN_SFT 5
#define NAU8810_MICBIAS_EN_SFT 4
#define NAU8810_ABIAS_EN (0x1 << 3)
@@ -228,7 +229,10 @@
/* NAU8810_REG_INPUT_SIGNAL (0x2C) */
#define NAU8810_PMICPGA_SFT 0
+#define NAU8810_PMICPGA_EN (0x1 << NAU8810_PMICPGA_SFT)
#define NAU8810_NMICPGA_SFT 1
+#define NAU8810_NMICPGA_EN (0x1 << NAU8810_NMICPGA_SFT)
+#define NAU8810_AUXPGA_SFT 2
/* NAU8810_REG_PGAGAIN (0x2D) */
#define NAU8810_PGAGAIN_SFT 0
@@ -236,12 +240,15 @@
#define NAU8810_PGAZC_SFT 7
/* NAU8810_REG_ADCBOOST (0x2F) */
+#define NAU8810_AUXBSTGAIN_SFT 0
#define NAU8810_PMICBSTGAIN_SFT 4
+#define NAU8810_PMICBSTGAIN_MASK (0x7 << NAU8810_PMICBSTGAIN_SFT)
#define NAU8810_PGABST_SFT 8
/* NAU8810_REG_SPKMIX (0x32) */
#define NAU8810_DACSPK_SFT 0
#define NAU8810_BYPSPK_SFT 1
+#define NAU8810_AUXSPK_SFT 5
/* NAU8810_REG_SPKGAIN (0x36) */
#define NAU8810_SPKGAIN_SFT 0
@@ -251,6 +258,7 @@
/* NAU8810_REG_MONOMIX (0x38) */
#define NAU8810_DACMOUT_SFT 0
#define NAU8810_BYPMOUT_SFT 1
+#define NAU8810_AUXMOUT_SFT 2
#define NAU8810_MOUTMXMT_SFT 6
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index d181c217d835..8c9daf32bab8 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -80,8 +80,8 @@ int rl6231_calc_dmic_clk(int rate)
for (i = 0; i < ARRAY_SIZE(div); i++) {
if ((div[i] % 3) == 0)
continue;
- /* find divider that gives DMIC frequency below 3.072MHz */
- if (3072000 * div[i] >= rate)
+ /* find divider that gives DMIC frequency below 1.536MHz */
+ if (1536000 * div[i] >= rate)
return i;
}
@@ -97,12 +97,13 @@ struct pll_calc_map {
int n;
int m;
bool m_bp;
+ bool k_bp;
};
static const struct pll_calc_map pll_preset_table[] = {
- {19200000, 4096000, 23, 14, 1, false},
- {19200000, 24576000, 3, 30, 3, false},
- {3840000, 24576000, 3, 30, 0, true},
+ {19200000, 4096000, 23, 14, 1, false, false},
+ {19200000, 24576000, 3, 30, 3, false, false},
+ {3840000, 24576000, 3, 30, 0, true, false},
};
static unsigned int find_best_div(unsigned int in,
@@ -128,7 +129,7 @@ static unsigned int find_best_div(unsigned int in,
* rl6231_pll_calc - Calcualte PLL M/N/K code.
* @freq_in: external clock provided to codec.
* @freq_out: target clock which codec works on.
- * @pll_code: Pointer to structure with M, N, K and bypass flag.
+ * @pll_code: Pointer to structure with M, N, K, m_bypass and k_bypass flag.
*
* Calcualte M/N/K code to configure PLL for codec.
*
@@ -143,7 +144,7 @@ int rl6231_pll_calc(const unsigned int freq_in,
unsigned int red, pll_out, in_t, out_t, div, div_t;
unsigned int red_t = abs(freq_out - freq_in);
unsigned int f_in, f_out, f_max;
- bool bypass = false;
+ bool m_bypass = false, k_bypass = false;
if (RL6231_PLL_INP_MAX < freq_in || RL6231_PLL_INP_MIN > freq_in)
return -EINVAL;
@@ -154,7 +155,8 @@ int rl6231_pll_calc(const unsigned int freq_in,
k = pll_preset_table[i].k;
m = pll_preset_table[i].m;
n = pll_preset_table[i].n;
- bypass = pll_preset_table[i].m_bp;
+ m_bypass = pll_preset_table[i].m_bp;
+ k_bypass = pll_preset_table[i].k_bp;
pr_debug("Use preset PLL parameter table\n");
goto code_find;
}
@@ -172,12 +174,14 @@ int rl6231_pll_calc(const unsigned int freq_in,
f_in = freq_in / div;
f_out = freq_out / div;
k = min_k;
+ if (min_k < -1)
+ min_k = -1;
for (k_t = min_k; k_t <= max_k; k_t++) {
for (n_t = 0; n_t <= max_n; n_t++) {
in_t = f_in * (n_t + 2);
pll_out = f_out * (k_t + 2);
if (in_t == pll_out) {
- bypass = true;
+ m_bypass = true;
n = n_t;
k = k_t;
goto code_find;
@@ -185,7 +189,7 @@ int rl6231_pll_calc(const unsigned int freq_in,
out_t = in_t / (k_t + 2);
red = abs(f_out - out_t);
if (red < red_t) {
- bypass = true;
+ m_bypass = true;
n = n_t;
m = 0;
k = k_t;
@@ -197,7 +201,7 @@ int rl6231_pll_calc(const unsigned int freq_in,
out_t = in_t / ((m_t + 2) * (k_t + 2));
red = abs(f_out - out_t);
if (red < red_t) {
- bypass = false;
+ m_bypass = false;
n = n_t;
m = m_t;
k = k_t;
@@ -211,8 +215,13 @@ int rl6231_pll_calc(const unsigned int freq_in,
pr_debug("Only get approximation about PLL\n");
code_find:
+ if (k == -1) {
+ k_bypass = true;
+ k = 0;
+ }
- pll_code->m_bp = bypass;
+ pll_code->m_bp = m_bypass;
+ pll_code->k_bp = k_bypass;
pll_code->m_code = m;
pll_code->n_code = n;
pll_code->k_code = k;
diff --git a/sound/soc/codecs/rl6231.h b/sound/soc/codecs/rl6231.h
index 6d8ed0377296..928082750860 100644
--- a/sound/soc/codecs/rl6231.h
+++ b/sound/soc/codecs/rl6231.h
@@ -18,6 +18,7 @@
struct rl6231_pll_code {
bool m_bp; /* Indicates bypass m code or not. */
+ bool k_bp; /* Indicates bypass k code or not. */
int m_code;
int n_code;
int k_code;
diff --git a/sound/soc/codecs/rt1015.c b/sound/soc/codecs/rt1015.c
index bb310bc7febd..67e2e944d21b 100644
--- a/sound/soc/codecs/rt1015.c
+++ b/sound/soc/codecs/rt1015.c
@@ -475,7 +475,7 @@ static int rt1015_bypass_boost_put(struct snd_kcontrol *kcontrol,
snd_soc_component_write(component,
RT1015_CLSD_INTERNAL9, 0x0140);
snd_soc_component_write(component,
- RT1015_GAT_BOOST, 0x00fe);
+ RT1015_GAT_BOOST, 0x0efe);
snd_soc_component_write(component,
RT1015_PWR_STATE_CTRL, 0x000d);
msleep(500);
@@ -780,6 +780,14 @@ static int rt1015_set_component_pll(struct snd_soc_component *component,
freq_out == rt1015->pll_out)
return 0;
+ if (source == RT1015_PLL_S_BCLK) {
+ if (rt1015->bclk_ratio == 0) {
+ dev_err(component->dev,
+ "Can not support bclk ratio as 0.\n");
+ return -EINVAL;
+ }
+ }
+
switch (source) {
case RT1015_PLL_S_MCLK:
snd_soc_component_update_bits(component, RT1015_CLK2,
@@ -819,12 +827,30 @@ static int rt1015_set_component_pll(struct snd_soc_component *component,
return 0;
}
+static int rt1015_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt1015_priv *rt1015 = snd_soc_component_get_drvdata(component);
+
+ dev_dbg(component->dev, "%s ratio=%d\n", __func__, ratio);
+
+ rt1015->bclk_ratio = ratio;
+
+ if (ratio == 50) {
+ dev_dbg(component->dev, "Unsupport bclk ratio\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rt1015_probe(struct snd_soc_component *component)
{
struct rt1015_priv *rt1015 =
snd_soc_component_get_drvdata(component);
rt1015->component = component;
+ rt1015->bclk_ratio = 0;
snd_soc_component_write(component, RT1015_BAT_RPO_STEP1, 0x061c);
return 0;
@@ -844,6 +870,7 @@ static void rt1015_remove(struct snd_soc_component *component)
static struct snd_soc_dai_ops rt1015_aif_dai_ops = {
.hw_params = rt1015_hw_params,
.set_fmt = rt1015_set_dai_fmt,
+ .set_bclk_ratio = rt1015_set_bclk_ratio,
};
static struct snd_soc_dai_driver rt1015_dai[] = {
diff --git a/sound/soc/codecs/rt1015.h b/sound/soc/codecs/rt1015.h
index ef3745a4faae..6fbe802082c4 100644
--- a/sound/soc/codecs/rt1015.h
+++ b/sound/soc/codecs/rt1015.h
@@ -362,6 +362,7 @@ struct rt1015_priv {
int sysclk_src;
int lrck;
int bclk;
+ int bclk_ratio;
int id;
int pll_src;
int pll_in;
diff --git a/sound/soc/codecs/rt1016.c b/sound/soc/codecs/rt1016.c
new file mode 100644
index 000000000000..a23d368ab4da
--- /dev/null
+++ b/sound/soc/codecs/rt1016.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// rt1016.c -- RT1016 ALSA SoC audio amplifier driver
+//
+// Copyright 2020 Realtek Semiconductor Corp.
+// Author: Oder Chiou <oder_chiou@realtek.com>
+//
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "rl6231.h"
+#include "rt1016.h"
+
+static const struct reg_sequence rt1016_patch[] = {
+ {RT1016_VOL_CTRL_3, 0x8900},
+ {RT1016_ANA_CTRL_1, 0xa002},
+ {RT1016_ANA_CTRL_2, 0x0002},
+ {RT1016_CLOCK_4, 0x6700},
+ {RT1016_CLASSD_3, 0xdc55},
+ {RT1016_CLASSD_4, 0x376a},
+ {RT1016_CLASSD_5, 0x009f},
+};
+
+static const struct reg_default rt1016_reg[] = {
+ {0x00, 0x0000},
+ {0x01, 0x5400},
+ {0x02, 0x5506},
+ {0x03, 0xf800},
+ {0x04, 0x0000},
+ {0x05, 0xbfbf},
+ {0x06, 0x8900},
+ {0x07, 0xa002},
+ {0x08, 0x0000},
+ {0x09, 0x0000},
+ {0x0a, 0x0000},
+ {0x0c, 0x0000},
+ {0x0d, 0x0000},
+ {0x0e, 0x10ec},
+ {0x0f, 0x6595},
+ {0x11, 0x0002},
+ {0x1c, 0x0000},
+ {0x1d, 0x0000},
+ {0x1e, 0x0000},
+ {0x1f, 0xf000},
+ {0x20, 0x0000},
+ {0x21, 0x6000},
+ {0x22, 0x0000},
+ {0x23, 0x6700},
+ {0x24, 0x0000},
+ {0x25, 0x0000},
+ {0x26, 0x0000},
+ {0x40, 0x0018},
+ {0x60, 0x00a5},
+ {0x80, 0x0010},
+ {0x81, 0x0009},
+ {0x82, 0x0000},
+ {0x83, 0x0000},
+ {0xa0, 0x0700},
+ {0xc0, 0x0080},
+ {0xc1, 0x02a0},
+ {0xc2, 0x1400},
+ {0xc3, 0x0a4a},
+ {0xc4, 0x552a},
+ {0xc5, 0x087e},
+ {0xc6, 0x0020},
+ {0xc7, 0xa833},
+ {0xc8, 0x0433},
+ {0xc9, 0x8040},
+ {0xca, 0xdc55},
+ {0xcb, 0x376a},
+ {0xcc, 0x009f},
+ {0xcf, 0x0020},
+};
+
+static bool rt1016_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RT1016_ANA_FLAG:
+ case RT1016_VERSION2_ID:
+ case RT1016_VERSION1_ID:
+ case RT1016_VENDER_ID:
+ case RT1016_DEVICE_ID:
+ case RT1016_TEST_SIGNAL:
+ case RT1016_SC_CTRL_1:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool rt1016_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RT1016_RESET:
+ case RT1016_PADS_CTRL_1:
+ case RT1016_PADS_CTRL_2:
+ case RT1016_I2C_CTRL:
+ case RT1016_VOL_CTRL_1:
+ case RT1016_VOL_CTRL_2:
+ case RT1016_VOL_CTRL_3:
+ case RT1016_ANA_CTRL_1:
+ case RT1016_MUX_SEL:
+ case RT1016_RX_I2S_CTRL:
+ case RT1016_ANA_FLAG:
+ case RT1016_VERSION2_ID:
+ case RT1016_VERSION1_ID:
+ case RT1016_VENDER_ID:
+ case RT1016_DEVICE_ID:
+ case RT1016_ANA_CTRL_2:
+ case RT1016_TEST_SIGNAL:
+ case RT1016_TEST_CTRL_1:
+ case RT1016_TEST_CTRL_2:
+ case RT1016_TEST_CTRL_3:
+ case RT1016_CLOCK_1:
+ case RT1016_CLOCK_2:
+ case RT1016_CLOCK_3:
+ case RT1016_CLOCK_4:
+ case RT1016_CLOCK_5:
+ case RT1016_CLOCK_6:
+ case RT1016_CLOCK_7:
+ case RT1016_I2S_CTRL:
+ case RT1016_DAC_CTRL_1:
+ case RT1016_SC_CTRL_1:
+ case RT1016_SC_CTRL_2:
+ case RT1016_SC_CTRL_3:
+ case RT1016_SC_CTRL_4:
+ case RT1016_SIL_DET:
+ case RT1016_SYS_CLK:
+ case RT1016_BIAS_CUR:
+ case RT1016_DAC_CTRL_2:
+ case RT1016_LDO_CTRL:
+ case RT1016_CLASSD_1:
+ case RT1016_PLL1:
+ case RT1016_PLL2:
+ case RT1016_PLL3:
+ case RT1016_CLASSD_2:
+ case RT1016_CLASSD_OUT:
+ case RT1016_CLASSD_3:
+ case RT1016_CLASSD_4:
+ case RT1016_CLASSD_5:
+ case RT1016_PWR_CTRL:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -9550, 50, 0);
+
+static const struct snd_kcontrol_new rt1016_snd_controls[] = {
+ SOC_DOUBLE_TLV("DAC Playback Volume", RT1016_VOL_CTRL_2,
+ RT1016_L_VOL_SFT, RT1016_R_VOL_SFT, 191, 0, dac_vol_tlv),
+ SOC_DOUBLE("DAC Playback Switch", RT1016_VOL_CTRL_1,
+ RT1016_DA_MUTE_L_SFT, RT1016_DA_MUTE_R_SFT, 1, 1),
+};
+
+static int rt1016_is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(source->dapm);
+ struct rt1016_priv *rt1016 = snd_soc_component_get_drvdata(component);
+
+ if (rt1016->sysclk_src == RT1016_SCLK_S_PLL)
+ return 1;
+ else
+ return 0;
+}
+
+/* Interface data select */
+static const char * const rt1016_data_select[] = {
+ "L/R", "R/L", "L/L", "R/R"
+};
+
+static SOC_ENUM_SINGLE_DECL(rt1016_if_data_swap_enum,
+ RT1016_I2S_CTRL, RT1016_I2S_DATA_SWAP_SFT, rt1016_data_select);
+
+static const struct snd_kcontrol_new rt1016_if_data_swap_mux =
+ SOC_DAPM_ENUM("Data Swap Mux", rt1016_if_data_swap_enum);
+
+static const struct snd_soc_dapm_widget rt1016_dapm_widgets[] = {
+ SND_SOC_DAPM_MUX("Data Swap Mux", SND_SOC_NOPM, 0, 0,
+ &rt1016_if_data_swap_mux),
+
+ SND_SOC_DAPM_SUPPLY("DAC Filter", RT1016_CLOCK_3,
+ RT1016_PWR_DAC_FILTER_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAMOD", RT1016_CLOCK_3, RT1016_PWR_DACMOD_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY("FIFO", RT1016_CLOCK_3, RT1016_PWR_CLK_FIFO_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Pure DC", RT1016_CLOCK_3,
+ RT1016_PWR_CLK_PUREDC_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("CLK Silence Det", RT1016_CLOCK_3,
+ RT1016_PWR_SIL_DET_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RC 25M", RT1016_CLOCK_3, RT1016_PWR_RC_25M_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL1", RT1016_CLOCK_3, RT1016_PWR_PLL1_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ANA CTRL", RT1016_CLOCK_3, RT1016_PWR_ANA_CTRL_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("CLK SYS", RT1016_CLOCK_3, RT1016_PWR_CLK_SYS_BIT,
+ 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("LRCK Det", RT1016_CLOCK_4, RT1016_PWR_LRCK_DET_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("BCLK Det", RT1016_CLOCK_4, RT1016_PWR_BCLK_DET_BIT,
+ 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("CKGEN DAC", RT1016_DAC_CTRL_2,
+ RT1016_CKGEN_DAC_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("VCM SLOW", RT1016_CLASSD_1, RT1016_VCM_SLOW_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Silence Det", RT1016_SIL_DET,
+ RT1016_SIL_DET_EN_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL2", RT1016_PLL2, RT1016_PLL2_EN_BIT, 0, NULL,
+ 0),
+
+ SND_SOC_DAPM_SUPPLY_S("BG1 BG2", 1, RT1016_PWR_CTRL,
+ RT1016_PWR_BG_1_2_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("MBIAS BG", 1, RT1016_PWR_CTRL,
+ RT1016_PWR_MBIAS_BG_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("PLL", 1, RT1016_PWR_CTRL, RT1016_PWR_PLL_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("BASIC", 1, RT1016_PWR_CTRL, RT1016_PWR_BASIC_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("CLASS D", 1, RT1016_PWR_CTRL,
+ RT1016_PWR_CLSD_BIT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("25M", 1, RT1016_PWR_CTRL, RT1016_PWR_25M_BIT, 0,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DACL", 1, RT1016_PWR_CTRL, RT1016_PWR_DACL_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("DACR", 1, RT1016_PWR_CTRL, RT1016_PWR_DACR_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("LDO2", 1, RT1016_PWR_CTRL, RT1016_PWR_LDO2_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("VREF", 1, RT1016_PWR_CTRL, RT1016_PWR_VREF_BIT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("MBIAS", 1, RT1016_PWR_CTRL, RT1016_PWR_MBIAS_BIT,
+ 0, NULL, 0),
+
+ SND_SOC_DAPM_AIF_IN("AIFRX", "AIF Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_OUTPUT("SPO"),
+};
+
+static const struct snd_soc_dapm_route rt1016_dapm_routes[] = {
+ { "Data Swap Mux", "L/R", "AIFRX" },
+ { "Data Swap Mux", "R/L", "AIFRX" },
+ { "Data Swap Mux", "L/L", "AIFRX" },
+ { "Data Swap Mux", "R/R", "AIFRX" },
+
+ { "DAC", NULL, "DAC Filter" },
+ { "DAC", NULL, "DAMOD" },
+ { "DAC", NULL, "FIFO" },
+ { "DAC", NULL, "Pure DC" },
+ { "DAC", NULL, "Silence Det" },
+ { "DAC", NULL, "ANA CTRL" },
+ { "DAC", NULL, "CLK SYS" },
+ { "DAC", NULL, "LRCK Det" },
+ { "DAC", NULL, "BCLK Det" },
+ { "DAC", NULL, "CKGEN DAC" },
+ { "DAC", NULL, "VCM SLOW" },
+
+ { "PLL", NULL, "PLL1" },
+ { "PLL", NULL, "PLL2" },
+ { "25M", NULL, "RC 25M" },
+ { "Silence Det", NULL, "CLK Silence Det" },
+
+ { "DAC", NULL, "Data Swap Mux" },
+ { "DAC", NULL, "BG1 BG2" },
+ { "DAC", NULL, "MBIAS BG" },
+ { "DAC", NULL, "PLL", rt1016_is_sys_clk_from_pll},
+ { "DAC", NULL, "BASIC" },
+ { "DAC", NULL, "CLASS D" },
+ { "DAC", NULL, "25M" },
+ { "DAC", NULL, "DACL" },
+ { "DAC", NULL, "DACR" },
+ { "DAC", NULL, "LDO2" },
+ { "DAC", NULL, "VREF" },
+ { "DAC", NULL, "MBIAS" },
+
+ { "SPO", NULL, "DAC" },
+};
+
+static int rt1016_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt1016_priv *rt1016 = snd_soc_component_get_drvdata(component);
+ int pre_div, bclk_ms, frame_size;
+ unsigned int val_len = 0;
+
+ rt1016->lrck = params_rate(params);
+ pre_div = rl6231_get_clk_info(rt1016->sysclk, rt1016->lrck);
+ if (pre_div < 0) {
+ dev_err(component->dev, "Unsupported clock rate\n");
+ return -EINVAL;
+ }
+
+ frame_size = snd_soc_params_to_frame_size(params);
+ if (frame_size < 0) {
+ dev_err(component->dev, "Unsupported frame size: %d\n",
+ frame_size);
+ return -EINVAL;
+ }
+
+ bclk_ms = frame_size > 32;
+ rt1016->bclk = rt1016->lrck * (32 << bclk_ms);
+
+ if (bclk_ms && rt1016->master)
+ snd_soc_component_update_bits(component, RT1016_I2S_CTRL,
+ RT1016_I2S_BCLK_MS_MASK, RT1016_I2S_BCLK_MS_64);
+
+ dev_dbg(component->dev, "lrck is %dHz and pre_div is %d for iis %d\n",
+ rt1016->lrck, pre_div, dai->id);
+
+ switch (params_width(params)) {
+ case 16:
+ val_len = RT1016_I2S_DL_16;
+ break;
+ case 20:
+ val_len = RT1016_I2S_DL_20;
+ break;
+ case 24:
+ val_len = RT1016_I2S_DL_24;
+ break;
+ case 32:
+ val_len = RT1016_I2S_DL_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, RT1016_I2S_CTRL,
+ RT1016_I2S_DL_MASK, val_len);
+ snd_soc_component_update_bits(component, RT1016_CLOCK_2,
+ RT1016_FS_PD_MASK | RT1016_OSR_PD_MASK,
+ ((pre_div + 3) << RT1016_FS_PD_SFT) |
+ (pre_div << RT1016_OSR_PD_SFT));
+
+ return 0;
+}
+
+static int rt1016_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt1016_priv *rt1016 = snd_soc_component_get_drvdata(component);
+ unsigned int reg_val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ reg_val |= RT1016_I2S_MS_M;
+ rt1016->master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ reg_val |= RT1016_I2S_MS_S;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ reg_val |= RT1016_I2S_BCLK_POL_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+ reg_val |= RT1016_I2S_DF_LEFT;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_A:
+ reg_val |= RT1016_I2S_DF_PCM_A;
+ break;
+
+ case SND_SOC_DAIFMT_DSP_B:
+ reg_val |= RT1016_I2S_DF_PCM_B;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component, RT1016_I2S_CTRL,
+ RT1016_I2S_MS_MASK | RT1016_I2S_BCLK_POL_MASK |
+ RT1016_I2S_DF_MASK, reg_val);
+
+ return 0;
+}
+
+static int rt1016_set_component_sysclk(struct snd_soc_component *component,
+ int clk_id, int source, unsigned int freq, int dir)
+{
+ struct rt1016_priv *rt1016 = snd_soc_component_get_drvdata(component);
+ unsigned int reg_val = 0;
+
+ if (freq == rt1016->sysclk && clk_id == rt1016->sysclk_src)
+ return 0;
+
+ switch (clk_id) {
+ case RT1016_SCLK_S_MCLK:
+ reg_val |= RT1016_CLK_SYS_SEL_MCLK;
+ break;
+
+ case RT1016_SCLK_S_PLL:
+ reg_val |= RT1016_CLK_SYS_SEL_PLL;
+ break;
+
+ default:
+ dev_err(component->dev, "Invalid clock id (%d)\n", clk_id);
+ return -EINVAL;
+ }
+
+ rt1016->sysclk = freq;
+ rt1016->sysclk_src = clk_id;
+
+ dev_dbg(component->dev, "Sysclk is %dHz and clock id is %d\n",
+ freq, clk_id);
+
+ snd_soc_component_update_bits(component, RT1016_CLOCK_1,
+ RT1016_CLK_SYS_SEL_MASK, reg_val);
+
+ return 0;
+}
+
+static int rt1016_set_component_pll(struct snd_soc_component *component,
+ int pll_id, int source, unsigned int freq_in,
+ unsigned int freq_out)
+{
+ struct rt1016_priv *rt1016 = snd_soc_component_get_drvdata(component);
+ struct rl6231_pll_code pll_code;
+ int ret;
+
+ if (!freq_in || !freq_out) {
+ dev_dbg(component->dev, "PLL disabled\n");
+
+ rt1016->pll_in = 0;
+ rt1016->pll_out = 0;
+
+ return 0;
+ }
+
+ if (source == rt1016->pll_src && freq_in == rt1016->pll_in &&
+ freq_out == rt1016->pll_out)
+ return 0;
+
+ switch (source) {
+ case RT1016_PLL_S_MCLK:
+ snd_soc_component_update_bits(component, RT1016_CLOCK_1,
+ RT1016_PLL_SEL_MASK, RT1016_PLL_SEL_MCLK);
+ break;
+
+ case RT1016_PLL_S_BCLK:
+ snd_soc_component_update_bits(component, RT1016_CLOCK_1,
+ RT1016_PLL_SEL_MASK, RT1016_PLL_SEL_BCLK);
+ break;
+
+ default:
+ dev_err(component->dev, "Unknown PLL Source %d\n", source);
+ return -EINVAL;
+ }
+
+ ret = rl6231_pll_calc(freq_in, freq_out * 4, &pll_code);
+ if (ret < 0) {
+ dev_err(component->dev, "Unsupport input clock %d\n", freq_in);
+ return ret;
+ }
+
+ dev_dbg(component->dev, "mbypass=%d m=%d n=%d kbypass=%d k=%d\n",
+ pll_code.m_bp, (pll_code.m_bp ? 0 : pll_code.m_code),
+ pll_code.n_code, pll_code.k_bp,
+ (pll_code.k_bp ? 0 : pll_code.k_code));
+
+ snd_soc_component_write(component, RT1016_PLL1,
+ (pll_code.m_bp ? 0 : pll_code.m_code) << RT1016_PLL_M_SFT |
+ pll_code.m_bp << RT1016_PLL_M_BP_SFT | pll_code.n_code);
+ snd_soc_component_write(component, RT1016_PLL2,
+ pll_code.k_bp << RT1016_PLL_K_BP_SFT |
+ (pll_code.k_bp ? 0 : pll_code.k_code));
+
+ rt1016->pll_in = freq_in;
+ rt1016->pll_out = freq_out;
+ rt1016->pll_src = source;
+
+ return 0;
+}
+
+static int rt1016_probe(struct snd_soc_component *component)
+{
+ struct rt1016_priv *rt1016 =
+ snd_soc_component_get_drvdata(component);
+
+ rt1016->component = component;
+
+ return 0;
+}
+
+static void rt1016_remove(struct snd_soc_component *component)
+{
+ struct rt1016_priv *rt1016 = snd_soc_component_get_drvdata(component);
+
+ regmap_write(rt1016->regmap, RT1016_RESET, 0);
+}
+
+#define RT1016_STEREO_RATES SNDRV_PCM_RATE_8000_48000
+#define RT1016_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
+
+static struct snd_soc_dai_ops rt1016_aif_dai_ops = {
+ .hw_params = rt1016_hw_params,
+ .set_fmt = rt1016_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver rt1016_dai[] = {
+ {
+ .name = "rt1016-aif",
+ .id = 0,
+ .playback = {
+ .stream_name = "AIF Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT1016_STEREO_RATES,
+ .formats = RT1016_FORMATS,
+ },
+ .ops = &rt1016_aif_dai_ops,
+ }
+};
+
+#ifdef CONFIG_PM
+static int rt1016_suspend(struct snd_soc_component *component)
+{
+ struct rt1016_priv *rt1016 = snd_soc_component_get_drvdata(component);
+
+ regcache_cache_only(rt1016->regmap, true);
+ regcache_mark_dirty(rt1016->regmap);
+
+ return 0;
+}
+
+static int rt1016_resume(struct snd_soc_component *component)
+{
+ struct rt1016_priv *rt1016 = snd_soc_component_get_drvdata(component);
+
+ regcache_cache_only(rt1016->regmap, false);
+ regcache_sync(rt1016->regmap);
+
+ return 0;
+}
+#else
+#define rt1016_suspend NULL
+#define rt1016_resume NULL
+#endif
+
+static const struct snd_soc_component_driver soc_component_dev_rt1016 = {
+ .probe = rt1016_probe,
+ .remove = rt1016_remove,
+ .suspend = rt1016_suspend,
+ .resume = rt1016_resume,
+ .controls = rt1016_snd_controls,
+ .num_controls = ARRAY_SIZE(rt1016_snd_controls),
+ .dapm_widgets = rt1016_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt1016_dapm_widgets),
+ .dapm_routes = rt1016_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(rt1016_dapm_routes),
+ .set_sysclk = rt1016_set_component_sysclk,
+ .set_pll = rt1016_set_component_pll,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct regmap_config rt1016_regmap = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = RT1016_PWR_CTRL,
+ .volatile_reg = rt1016_volatile_register,
+ .readable_reg = rt1016_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = rt1016_reg,
+ .num_reg_defaults = ARRAY_SIZE(rt1016_reg),
+};
+
+static const struct i2c_device_id rt1016_i2c_id[] = {
+ { "rt1016", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rt1016_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id rt1016_of_match[] = {
+ { .compatible = "realtek,rt1016", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt1016_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id rt1016_acpi_match[] = {
+ {"10EC1016", 0,},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, rt1016_acpi_match);
+#endif
+
+static int rt1016_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct rt1016_priv *rt1016;
+ int ret;
+ unsigned int val;
+
+ rt1016 = devm_kzalloc(&i2c->dev, sizeof(struct rt1016_priv),
+ GFP_KERNEL);
+ if (rt1016 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, rt1016);
+
+ rt1016->regmap = devm_regmap_init_i2c(i2c, &rt1016_regmap);
+ if (IS_ERR(rt1016->regmap)) {
+ ret = PTR_ERR(rt1016->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ regmap_read(rt1016->regmap, RT1016_DEVICE_ID, &val);
+ if (val != RT1016_DEVICE_ID_VAL) {
+ dev_err(&i2c->dev,
+ "Device with ID register %x is not rt1016\n", val);
+ return -ENODEV;
+ }
+
+ regmap_write(rt1016->regmap, RT1016_RESET, 0);
+
+ ret = regmap_register_patch(rt1016->regmap, rt1016_patch,
+ ARRAY_SIZE(rt1016_patch));
+ if (ret != 0)
+ dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
+
+ return devm_snd_soc_register_component(&i2c->dev,
+ &soc_component_dev_rt1016,
+ rt1016_dai, ARRAY_SIZE(rt1016_dai));
+}
+
+static void rt1016_i2c_shutdown(struct i2c_client *client)
+{
+ struct rt1016_priv *rt1016 = i2c_get_clientdata(client);
+
+ regmap_write(rt1016->regmap, RT1016_RESET, 0);
+}
+
+static struct i2c_driver rt1016_i2c_driver = {
+ .driver = {
+ .name = "rt1016",
+ .of_match_table = of_match_ptr(rt1016_of_match),
+ .acpi_match_table = ACPI_PTR(rt1016_acpi_match),
+ },
+ .probe = rt1016_i2c_probe,
+ .shutdown = rt1016_i2c_shutdown,
+ .id_table = rt1016_i2c_id,
+};
+module_i2c_driver(rt1016_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC RT1016 driver");
+MODULE_AUTHOR("Oder Chiou <oder_chiou@realtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt1016.h b/sound/soc/codecs/rt1016.h
new file mode 100644
index 000000000000..041d6a5a6f46
--- /dev/null
+++ b/sound/soc/codecs/rt1016.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * rt1016.h -- RT1016 ALSA SoC audio amplifier driver
+ *
+ * Copyright 2020 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT1016_H__
+#define __RT1016_H__
+
+#define RT1016_DEVICE_ID_VAL 0x6595
+
+#define RT1016_RESET 0x00
+#define RT1016_PADS_CTRL_1 0x01
+#define RT1016_PADS_CTRL_2 0x02
+#define RT1016_I2C_CTRL 0x03
+#define RT1016_VOL_CTRL_1 0x04
+#define RT1016_VOL_CTRL_2 0x05
+#define RT1016_VOL_CTRL_3 0x06
+#define RT1016_ANA_CTRL_1 0x07
+#define RT1016_MUX_SEL 0x08
+#define RT1016_RX_I2S_CTRL 0x09
+#define RT1016_ANA_FLAG 0x0a
+#define RT1016_VERSION2_ID 0x0c
+#define RT1016_VERSION1_ID 0x0d
+#define RT1016_VENDER_ID 0x0e
+#define RT1016_DEVICE_ID 0x0f
+#define RT1016_ANA_CTRL_2 0x11
+#define RT1016_TEST_SIGNAL 0x1c
+#define RT1016_TEST_CTRL_1 0x1d
+#define RT1016_TEST_CTRL_2 0x1e
+#define RT1016_TEST_CTRL_3 0x1f
+#define RT1016_CLOCK_1 0x20
+#define RT1016_CLOCK_2 0x21
+#define RT1016_CLOCK_3 0x22
+#define RT1016_CLOCK_4 0x23
+#define RT1016_CLOCK_5 0x24
+#define RT1016_CLOCK_6 0x25
+#define RT1016_CLOCK_7 0x26
+#define RT1016_I2S_CTRL 0x40
+#define RT1016_DAC_CTRL_1 0x60
+#define RT1016_SC_CTRL_1 0x80
+#define RT1016_SC_CTRL_2 0x81
+#define RT1016_SC_CTRL_3 0x82
+#define RT1016_SC_CTRL_4 0x83
+#define RT1016_SIL_DET 0xa0
+#define RT1016_SYS_CLK 0xc0
+#define RT1016_BIAS_CUR 0xc1
+#define RT1016_DAC_CTRL_2 0xc2
+#define RT1016_LDO_CTRL 0xc3
+#define RT1016_CLASSD_1 0xc4
+#define RT1016_PLL1 0xc5
+#define RT1016_PLL2 0xc6
+#define RT1016_PLL3 0xc7
+#define RT1016_CLASSD_2 0xc8
+#define RT1016_CLASSD_OUT 0xc9
+#define RT1016_CLASSD_3 0xca
+#define RT1016_CLASSD_4 0xcb
+#define RT1016_CLASSD_5 0xcc
+#define RT1016_PWR_CTRL 0xcf
+
+/* global definition */
+#define RT1016_L_VOL_MASK (0xff << 8)
+#define RT1016_L_VOL_SFT 8
+#define RT1016_R_VOL_MASK (0xff)
+#define RT1016_R_VOL_SFT 0
+
+/* 0x04 */
+#define RT1016_DA_MUTE_L_SFT 7
+#define RT1016_DA_MUTE_R_SFT 6
+
+/* 0x20 */
+#define RT1016_CLK_SYS_SEL_MASK (0x1 << 15)
+#define RT1016_CLK_SYS_SEL_SFT 15
+#define RT1016_CLK_SYS_SEL_MCLK (0x0 << 15)
+#define RT1016_CLK_SYS_SEL_PLL (0x1 << 15)
+#define RT1016_PLL_SEL_MASK (0x1 << 13)
+#define RT1016_PLL_SEL_SFT 13
+#define RT1016_PLL_SEL_MCLK (0x0 << 13)
+#define RT1016_PLL_SEL_BCLK (0x1 << 13)
+
+/* 0x21 */
+#define RT1016_FS_PD_MASK (0x7 << 13)
+#define RT1016_FS_PD_SFT 13
+#define RT1016_OSR_PD_MASK (0x3 << 10)
+#define RT1016_OSR_PD_SFT 10
+
+/* 0x22 */
+#define RT1016_PWR_DAC_FILTER (0x1 << 11)
+#define RT1016_PWR_DAC_FILTER_BIT 11
+#define RT1016_PWR_DACMOD (0x1 << 10)
+#define RT1016_PWR_DACMOD_BIT 10
+#define RT1016_PWR_CLK_FIFO (0x1 << 9)
+#define RT1016_PWR_CLK_FIFO_BIT 9
+#define RT1016_PWR_CLK_PUREDC (0x1 << 8)
+#define RT1016_PWR_CLK_PUREDC_BIT 8
+#define RT1016_PWR_SIL_DET (0x1 << 7)
+#define RT1016_PWR_SIL_DET_BIT 7
+#define RT1016_PWR_RC_25M (0x1 << 6)
+#define RT1016_PWR_RC_25M_BIT 6
+#define RT1016_PWR_PLL1 (0x1 << 5)
+#define RT1016_PWR_PLL1_BIT 5
+#define RT1016_PWR_ANA_CTRL (0x1 << 4)
+#define RT1016_PWR_ANA_CTRL_BIT 4
+#define RT1016_PWR_CLK_SYS (0x1 << 3)
+#define RT1016_PWR_CLK_SYS_BIT 3
+
+/* 0x23 */
+#define RT1016_PWR_LRCK_DET (0x1 << 15)
+#define RT1016_PWR_LRCK_DET_BIT 15
+#define RT1016_PWR_BCLK_DET (0x1 << 11)
+#define RT1016_PWR_BCLK_DET_BIT 11
+
+/* 0x40 */
+#define RT1016_I2S_BCLK_MS_MASK (0x1 << 15)
+#define RT1016_I2S_BCLK_MS_SFT 15
+#define RT1016_I2S_BCLK_MS_32 (0x0 << 15)
+#define RT1016_I2S_BCLK_MS_64 (0x1 << 15)
+#define RT1016_I2S_BCLK_POL_MASK (0x1 << 13)
+#define RT1016_I2S_BCLK_POL_SFT 13
+#define RT1016_I2S_BCLK_POL_NOR (0x0 << 13)
+#define RT1016_I2S_BCLK_POL_INV (0x1 << 13)
+#define RT1016_I2S_DATA_SWAP_MASK (0x1 << 10)
+#define RT1016_I2S_DATA_SWAP_SFT 10
+#define RT1016_I2S_DL_MASK (0x7 << 4)
+#define RT1016_I2S_DL_SFT 4
+#define RT1016_I2S_DL_16 (0x1 << 4)
+#define RT1016_I2S_DL_20 (0x2 << 4)
+#define RT1016_I2S_DL_24 (0x3 << 4)
+#define RT1016_I2S_DL_32 (0x4 << 4)
+#define RT1016_I2S_MS_MASK (0x1 << 3)
+#define RT1016_I2S_MS_SFT 3
+#define RT1016_I2S_MS_M (0x0 << 3)
+#define RT1016_I2S_MS_S (0x1 << 3)
+#define RT1016_I2S_DF_MASK (0x7 << 0)
+#define RT1016_I2S_DF_SFT 0
+#define RT1016_I2S_DF_I2S (0x0)
+#define RT1016_I2S_DF_LEFT (0x1)
+#define RT1016_I2S_DF_PCM_A (0x2)
+#define RT1016_I2S_DF_PCM_B (0x3)
+
+/* 0xa0 */
+#define RT1016_SIL_DET_EN (0x1 << 15)
+#define RT1016_SIL_DET_EN_BIT 15
+
+/* 0xc2 */
+#define RT1016_CKGEN_DAC (0x1 << 13)
+#define RT1016_CKGEN_DAC_BIT 13
+
+/* 0xc4 */
+#define RT1016_VCM_SLOW (0x1 << 6)
+#define RT1016_VCM_SLOW_BIT 6
+
+/* 0xc5 */
+#define RT1016_PLL_M_MAX 0xf
+#define RT1016_PLL_M_MASK (RT1016_PLL_M_MAX << 12)
+#define RT1016_PLL_M_SFT 12
+#define RT1016_PLL_M_BP (0x1 << 11)
+#define RT1016_PLL_M_BP_SFT 11
+#define RT1016_PLL_N_MAX 0x1ff
+#define RT1016_PLL_N_MASK (RT1016_PLL_N_MAX << 0)
+#define RT1016_PLL_N_SFT 0
+
+/* 0xc6 */
+#define RT1016_PLL2_EN (0x1 << 15)
+#define RT1016_PLL2_EN_BIT 15
+#define RT1016_PLL_K_BP (0x1 << 5)
+#define RT1016_PLL_K_BP_SFT 5
+#define RT1016_PLL_K_MAX 0x1f
+#define RT1016_PLL_K_MASK (RT1016_PLL_K_MAX)
+#define RT1016_PLL_K_SFT 0
+
+/* 0xcf */
+#define RT1016_PWR_BG_1_2 (0x1 << 12)
+#define RT1016_PWR_BG_1_2_BIT 12
+#define RT1016_PWR_MBIAS_BG (0x1 << 11)
+#define RT1016_PWR_MBIAS_BG_BIT 11
+#define RT1016_PWR_PLL (0x1 << 9)
+#define RT1016_PWR_PLL_BIT 9
+#define RT1016_PWR_BASIC (0x1 << 8)
+#define RT1016_PWR_BASIC_BIT 8
+#define RT1016_PWR_CLSD (0x1 << 7)
+#define RT1016_PWR_CLSD_BIT 7
+#define RT1016_PWR_25M (0x1 << 6)
+#define RT1016_PWR_25M_BIT 6
+#define RT1016_PWR_DACL (0x1 << 4)
+#define RT1016_PWR_DACL_BIT 4
+#define RT1016_PWR_DACR (0x1 << 3)
+#define RT1016_PWR_DACR_BIT 3
+#define RT1016_PWR_LDO2 (0x1 << 2)
+#define RT1016_PWR_LDO2_BIT 2
+#define RT1016_PWR_VREF (0x1 << 1)
+#define RT1016_PWR_VREF_BIT 1
+#define RT1016_PWR_MBIAS (0x1 << 0)
+#define RT1016_PWR_MBIAS_BIT 0
+
+/* System Clock Source */
+enum {
+ RT1016_SCLK_S_MCLK,
+ RT1016_SCLK_S_PLL,
+};
+
+/* PLL1 Source */
+enum {
+ RT1016_PLL_S_MCLK,
+ RT1016_PLL_S_BCLK,
+};
+
+enum {
+ RT1016_AIF1,
+ RT1016_AIFS,
+};
+
+struct rt1016_priv {
+ struct snd_soc_component *component;
+ struct regmap *regmap;
+ int sysclk;
+ int sysclk_src;
+ int lrck;
+ int bclk;
+ int master;
+ int pll_src;
+ int pll_in;
+ int pll_out;
+};
+
+#endif /* __RT1016_H__ */
diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c
index a5a7e46de246..b0ba0d2acbdd 100644
--- a/sound/soc/codecs/rt1308-sdw.c
+++ b/sound/soc/codecs/rt1308-sdw.c
@@ -178,10 +178,6 @@ static int rt1308_io_init(struct device *dev, struct sdw_slave *slave)
if (rt1308->hw_init)
return 0;
- ret = rt1308_read_prop(slave);
- if (ret < 0)
- goto _io_init_err_;
-
if (rt1308->first_hw_init) {
regcache_cache_only(rt1308->regmap, false);
regcache_cache_bypass(rt1308->regmap, true);
@@ -235,9 +231,9 @@ static int rt1308_io_init(struct device *dev, struct sdw_slave *slave)
efuse_c_btl_r = tmp;
regmap_read(rt1308->regmap, 0xc872, &tmp);
efuse_c_btl_r = efuse_c_btl_r | (tmp << 8);
- dev_info(&slave->dev, "%s m_btl_l=0x%x, m_btl_r=0x%x\n", __func__,
+ dev_dbg(&slave->dev, "%s m_btl_l=0x%x, m_btl_r=0x%x\n", __func__,
efuse_m_btl_l, efuse_m_btl_r);
- dev_info(&slave->dev, "%s c_btl_l=0x%x, c_btl_r=0x%x\n", __func__,
+ dev_dbg(&slave->dev, "%s c_btl_l=0x%x, c_btl_r=0x%x\n", __func__,
efuse_c_btl_l, efuse_c_btl_r);
/* initial settings */
@@ -282,7 +278,6 @@ static int rt1308_io_init(struct device *dev, struct sdw_slave *slave)
dev_dbg(&slave->dev, "%s hw_init complete\n", __func__);
-_io_init_err_:
return ret;
}
@@ -482,6 +477,9 @@ static int rt1308_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
{
struct sdw_stream_data *stream;
+ if (!sdw_stream)
+ return 0;
+
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (!stream)
return -ENOMEM;
@@ -684,9 +682,6 @@ static int rt1308_sdw_probe(struct sdw_slave *slave,
{
struct regmap *regmap;
- /* Assign ops */
- slave->ops = &rt1308_slave_ops;
-
/* Regmap Initialization */
regmap = devm_regmap_init_sdw(slave, &rt1308_sdw_regmap);
if (!regmap)
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 6ba1849a77b0..e2e1d5b03b38 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3625,6 +3625,12 @@ static const struct rt5645_platform_data asus_t100ha_platform_data = {
.inv_jd1_1 = true,
};
+static const struct rt5645_platform_data asus_t101ha_platform_data = {
+ .dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
+ .dmic2_data_pin = RT5645_DMIC2_DISABLE,
+ .jd_mode = 3,
+};
+
static const struct rt5645_platform_data lenovo_ideapad_miix_310_pdata = {
.jd_mode = 3,
.in2_diff = true,
@@ -3709,6 +3715,14 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&asus_t100ha_platform_data,
},
{
+ .ident = "ASUS T101HA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+ },
+ .driver_data = (void *)&asus_t101ha_platform_data,
+ },
+ {
.ident = "MINIX Z83-4",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MINIX"),
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index 3f40d2751833..7bfade8b3d6e 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -605,20 +605,15 @@ static int rt5677_spi_probe(struct spi_device *spi)
g_spi = spi;
- ret = snd_soc_register_component(&spi->dev, &rt5677_spi_dai_component,
- &rt5677_spi_dai, 1);
+ ret = devm_snd_soc_register_component(&spi->dev,
+ &rt5677_spi_dai_component,
+ &rt5677_spi_dai, 1);
if (ret < 0)
dev_err(&spi->dev, "Failed to register component.\n");
return ret;
}
-static int rt5677_spi_remove(struct spi_device *spi)
-{
- snd_soc_unregister_component(&spi->dev);
- return 0;
-}
-
static const struct acpi_device_id rt5677_spi_acpi_id[] = {
{ "RT5677AA", 0 },
{ }
@@ -631,7 +626,6 @@ static struct spi_driver rt5677_spi_driver = {
.acpi_match_table = ACPI_PTR(rt5677_spi_acpi_id),
},
.probe = rt5677_spi_probe,
- .remove = rt5677_spi_remove,
};
module_spi_driver(rt5677_spi_driver);
diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
new file mode 100644
index 000000000000..e28d08b1cd65
--- /dev/null
+++ b/sound/soc/codecs/rt5682-i2c.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// rt5682.c -- RT5682 ALSA SoC audio component driver
+//
+// Copyright 2018 Realtek Semiconductor Corp.
+// Author: Bard Liao <bardliao@realtek.com>
+//
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/mutex.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/jack.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/rt5682.h>
+
+#include "rl6231.h"
+#include "rt5682.h"
+
+static const struct rt5682_platform_data i2s_default_platform_data = {
+ .dmic1_data_pin = RT5682_DMIC1_DATA_GPIO2,
+ .dmic1_clk_pin = RT5682_DMIC1_CLK_GPIO3,
+ .jd_src = RT5682_JD1,
+ .btndet_delay = 16,
+ .dai_clk_names[RT5682_DAI_WCLK_IDX] = "rt5682-dai-wclk",
+ .dai_clk_names[RT5682_DAI_BCLK_IDX] = "rt5682-dai-bclk",
+};
+
+static const struct regmap_config rt5682_regmap = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .max_register = RT5682_I2C_MODE,
+ .volatile_reg = rt5682_volatile_register,
+ .readable_reg = rt5682_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = rt5682_reg,
+ .num_reg_defaults = RT5682_REG_NUM,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static void rt5682_jd_check_handler(struct work_struct *work)
+{
+ struct rt5682_priv *rt5682 = container_of(work, struct rt5682_priv,
+ jd_check_work.work);
+
+ if (snd_soc_component_read32(rt5682->component, RT5682_AJD1_CTRL)
+ & RT5682_JDH_RS_MASK) {
+ /* jack out */
+ rt5682->jack_type = rt5682_headset_detect(rt5682->component, 0);
+
+ snd_soc_jack_report(rt5682->hs_jack, rt5682->jack_type,
+ SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
+ } else {
+ schedule_delayed_work(&rt5682->jd_check_work, 500);
+ }
+}
+
+static irqreturn_t rt5682_irq(int irq, void *data)
+{
+ struct rt5682_priv *rt5682 = data;
+
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work, msecs_to_jiffies(250));
+
+ return IRQ_HANDLED;
+}
+
+static struct snd_soc_dai_driver rt5682_dai[] = {
+ {
+ .name = "rt5682-aif1",
+ .id = RT5682_AIF1,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .ops = &rt5682_aif1_dai_ops,
+ },
+ {
+ .name = "rt5682-aif2",
+ .id = RT5682_AIF2,
+ .capture = {
+ .stream_name = "AIF2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .ops = &rt5682_aif2_dai_ops,
+ },
+};
+
+static int rt5682_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct rt5682_platform_data *pdata = dev_get_platdata(&i2c->dev);
+ struct rt5682_priv *rt5682;
+ int i, ret;
+ unsigned int val;
+
+ rt5682 = devm_kzalloc(&i2c->dev, sizeof(struct rt5682_priv),
+ GFP_KERNEL);
+ if (!rt5682)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, rt5682);
+
+ rt5682->pdata = i2s_default_platform_data;
+
+ if (pdata)
+ rt5682->pdata = *pdata;
+ else
+ rt5682_parse_dt(rt5682, &i2c->dev);
+
+ rt5682->regmap = devm_regmap_init_i2c(i2c, &rt5682_regmap);
+ if (IS_ERR(rt5682->regmap)) {
+ ret = PTR_ERR(rt5682->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rt5682->supplies); i++)
+ rt5682->supplies[i].supply = rt5682_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5682->supplies),
+ rt5682->supplies);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(rt5682->supplies),
+ rt5682->supplies);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ if (gpio_is_valid(rt5682->pdata.ldo1_en)) {
+ if (devm_gpio_request_one(&i2c->dev, rt5682->pdata.ldo1_en,
+ GPIOF_OUT_INIT_HIGH, "rt5682"))
+ dev_err(&i2c->dev, "Fail gpio_request gpio_ldo\n");
+ }
+
+ /* Sleep for 300 ms miniumum */
+ usleep_range(300000, 350000);
+
+ regmap_write(rt5682->regmap, RT5682_I2C_MODE, 0x1);
+ usleep_range(10000, 15000);
+
+ regmap_read(rt5682->regmap, RT5682_DEVICE_ID, &val);
+ if (val != DEVICE_ID) {
+ dev_err(&i2c->dev,
+ "Device with ID register %x is not rt5682\n", val);
+ return -ENODEV;
+ }
+
+ mutex_init(&rt5682->calibrate_mutex);
+ rt5682_calibrate(rt5682);
+
+ rt5682_apply_patch_list(rt5682, &i2c->dev);
+
+ regmap_write(rt5682->regmap, RT5682_DEPOP_1, 0x0000);
+
+ /* DMIC pin*/
+ if (rt5682->pdata.dmic1_data_pin != RT5682_DMIC1_NULL) {
+ switch (rt5682->pdata.dmic1_data_pin) {
+ case RT5682_DMIC1_DATA_GPIO2: /* share with LRCK2 */
+ regmap_update_bits(rt5682->regmap, RT5682_DMIC_CTRL_1,
+ RT5682_DMIC_1_DP_MASK, RT5682_DMIC_1_DP_GPIO2);
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP2_PIN_MASK, RT5682_GP2_PIN_DMIC_SDA);
+ break;
+
+ case RT5682_DMIC1_DATA_GPIO5: /* share with DACDAT1 */
+ regmap_update_bits(rt5682->regmap, RT5682_DMIC_CTRL_1,
+ RT5682_DMIC_1_DP_MASK, RT5682_DMIC_1_DP_GPIO5);
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP5_PIN_MASK, RT5682_GP5_PIN_DMIC_SDA);
+ break;
+
+ default:
+ dev_warn(&i2c->dev, "invalid DMIC_DAT pin\n");
+ break;
+ }
+
+ switch (rt5682->pdata.dmic1_clk_pin) {
+ case RT5682_DMIC1_CLK_GPIO1: /* share with IRQ */
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP1_PIN_MASK, RT5682_GP1_PIN_DMIC_CLK);
+ break;
+
+ case RT5682_DMIC1_CLK_GPIO3: /* share with BCLK2 */
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP3_PIN_MASK, RT5682_GP3_PIN_DMIC_CLK);
+ break;
+
+ default:
+ dev_warn(&i2c->dev, "invalid DMIC_CLK pin\n");
+ break;
+ }
+ }
+
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
+ RT5682_LDO1_DVO_MASK | RT5682_HP_DRIVER_MASK,
+ RT5682_LDO1_DVO_12 | RT5682_HP_DRIVER_5X);
+ regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0380);
+ regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
+ RT5682_GP4_PIN_MASK | RT5682_GP5_PIN_MASK,
+ RT5682_GP4_PIN_ADCDAT1 | RT5682_GP5_PIN_DACDAT1);
+ regmap_write(rt5682->regmap, RT5682_TEST_MODE_CTRL_1, 0x0000);
+ regmap_update_bits(rt5682->regmap, RT5682_BIAS_CUR_CTRL_8,
+ RT5682_HPA_CP_BIAS_CTRL_MASK, RT5682_HPA_CP_BIAS_3UA);
+ regmap_update_bits(rt5682->regmap, RT5682_CHARGE_PUMP_1,
+ RT5682_CP_CLK_HP_MASK, RT5682_CP_CLK_HP_300KHZ);
+ regmap_update_bits(rt5682->regmap, RT5682_HP_CHARGE_PUMP_1,
+ RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
+ regmap_update_bits(rt5682->regmap, RT5682_DMIC_CTRL_1,
+ RT5682_FIFO_CLK_DIV_MASK, RT5682_FIFO_CLK_DIV_2);
+
+ INIT_DELAYED_WORK(&rt5682->jack_detect_work,
+ rt5682_jack_detect_handler);
+ INIT_DELAYED_WORK(&rt5682->jd_check_work,
+ rt5682_jd_check_handler);
+
+ if (i2c->irq) {
+ ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ rt5682_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ | IRQF_ONESHOT, "rt5682", rt5682);
+ if (ret)
+ dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
+ }
+
+ return devm_snd_soc_register_component(&i2c->dev,
+ &rt5682_soc_component_dev,
+ rt5682_dai, ARRAY_SIZE(rt5682_dai));
+}
+
+static void rt5682_i2c_shutdown(struct i2c_client *client)
+{
+ struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
+
+ rt5682_reset(rt5682);
+}
+
+static const struct of_device_id rt5682_of_match[] = {
+ {.compatible = "realtek,rt5682i"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, rt5682_of_match);
+
+static const struct acpi_device_id rt5682_acpi_match[] = {
+ {"10EC5682", 0,},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, rt5682_acpi_match);
+
+static const struct i2c_device_id rt5682_i2c_id[] = {
+ {"rt5682", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, rt5682_i2c_id);
+
+static struct i2c_driver rt5682_i2c_driver = {
+ .driver = {
+ .name = "rt5682",
+ .of_match_table = rt5682_of_match,
+ .acpi_match_table = rt5682_acpi_match,
+ },
+ .probe = rt5682_i2c_probe,
+ .shutdown = rt5682_i2c_shutdown,
+ .id_table = rt5682_i2c_id,
+};
+module_i2c_driver(rt5682_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC RT5682 driver");
+MODULE_AUTHOR("Bard Liao <bardliao@realtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
index a2d1d3ae1e31..4cecc5ce545c 100644
--- a/sound/soc/codecs/rt5682-sdw.c
+++ b/sound/soc/codecs/rt5682-sdw.c
@@ -14,6 +14,7 @@
#include <linux/acpi.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/mutex.h>
#include <linux/soundwire/sdw.h>
@@ -28,7 +29,461 @@
#include <sound/tlv.h>
#include "rt5682.h"
-#include "rt5682-sdw.h"
+
+#define RT5682_SDW_ADDR_L 0x3000
+#define RT5682_SDW_ADDR_H 0x3001
+#define RT5682_SDW_DATA_L 0x3004
+#define RT5682_SDW_DATA_H 0x3005
+#define RT5682_SDW_CMD 0x3008
+
+static int rt5682_sdw_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
+ unsigned int data_l, data_h;
+
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_CMD, 0);
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_H, (reg >> 8) & 0xff);
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_L, (reg & 0xff));
+ regmap_read(rt5682->sdw_regmap, RT5682_SDW_DATA_H, &data_h);
+ regmap_read(rt5682->sdw_regmap, RT5682_SDW_DATA_L, &data_l);
+
+ *val = (data_h << 8) | data_l;
+
+ dev_vdbg(dev, "[%s] %04x => %04x\n", __func__, reg, *val);
+
+ return 0;
+}
+
+static int rt5682_sdw_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
+
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_CMD, 1);
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_H, (reg >> 8) & 0xff);
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_L, (reg & 0xff));
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_DATA_H, (val >> 8) & 0xff);
+ regmap_write(rt5682->sdw_regmap, RT5682_SDW_DATA_L, (val & 0xff));
+
+ dev_vdbg(dev, "[%s] %04x <= %04x\n", __func__, reg, val);
+
+ return 0;
+}
+
+static const struct regmap_config rt5682_sdw_indirect_regmap = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .max_register = RT5682_I2C_MODE,
+ .volatile_reg = rt5682_volatile_register,
+ .readable_reg = rt5682_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = rt5682_reg,
+ .num_reg_defaults = RT5682_REG_NUM,
+ .use_single_read = true,
+ .use_single_write = true,
+ .reg_read = rt5682_sdw_read,
+ .reg_write = rt5682_sdw_write,
+};
+
+struct sdw_stream_data {
+ struct sdw_stream_runtime *sdw_stream;
+};
+
+static int rt5682_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
+ int direction)
+{
+ struct sdw_stream_data *stream;
+
+ if (!sdw_stream)
+ return 0;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ stream->sdw_stream = (struct sdw_stream_runtime *)sdw_stream;
+
+ /* Use tx_mask or rx_mask to configure stream tag and set dma_data */
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ dai->playback_dma_data = stream;
+ else
+ dai->capture_dma_data = stream;
+
+ return 0;
+}
+
+static void rt5682_sdw_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sdw_stream_data *stream;
+
+ stream = snd_soc_dai_get_dma_data(dai, substream);
+ snd_soc_dai_set_dma_data(dai, substream, NULL);
+ kfree(stream);
+}
+
+static int rt5682_sdw_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ struct sdw_stream_config stream_config;
+ struct sdw_port_config port_config;
+ enum sdw_data_direction direction;
+ struct sdw_stream_data *stream;
+ int retval, port, num_channels;
+ unsigned int val_p = 0, val_c = 0, osr_p = 0, osr_c = 0;
+
+ dev_dbg(dai->dev, "%s %s", __func__, dai->name);
+
+ stream = snd_soc_dai_get_dma_data(dai, substream);
+ if (!stream)
+ return -ENOMEM;
+
+ if (!rt5682->slave)
+ return -EINVAL;
+
+ /* SoundWire specific configuration */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ direction = SDW_DATA_DIR_RX;
+ port = 1;
+ } else {
+ direction = SDW_DATA_DIR_TX;
+ port = 2;
+ }
+
+ stream_config.frame_rate = params_rate(params);
+ stream_config.ch_count = params_channels(params);
+ stream_config.bps = snd_pcm_format_width(params_format(params));
+ stream_config.direction = direction;
+
+ num_channels = params_channels(params);
+ port_config.ch_mask = (1 << (num_channels)) - 1;
+ port_config.num = port;
+
+ retval = sdw_stream_add_slave(rt5682->slave, &stream_config,
+ &port_config, 1, stream->sdw_stream);
+ if (retval) {
+ dev_err(dai->dev, "Unable to configure port\n");
+ return retval;
+ }
+
+ switch (params_rate(params)) {
+ case 48000:
+ val_p = RT5682_SDW_REF_1_48K;
+ val_c = RT5682_SDW_REF_2_48K;
+ break;
+ case 96000:
+ val_p = RT5682_SDW_REF_1_96K;
+ val_c = RT5682_SDW_REF_2_96K;
+ break;
+ case 192000:
+ val_p = RT5682_SDW_REF_1_192K;
+ val_c = RT5682_SDW_REF_2_192K;
+ break;
+ case 32000:
+ val_p = RT5682_SDW_REF_1_32K;
+ val_c = RT5682_SDW_REF_2_32K;
+ break;
+ case 24000:
+ val_p = RT5682_SDW_REF_1_24K;
+ val_c = RT5682_SDW_REF_2_24K;
+ break;
+ case 16000:
+ val_p = RT5682_SDW_REF_1_16K;
+ val_c = RT5682_SDW_REF_2_16K;
+ break;
+ case 12000:
+ val_p = RT5682_SDW_REF_1_12K;
+ val_c = RT5682_SDW_REF_2_12K;
+ break;
+ case 8000:
+ val_p = RT5682_SDW_REF_1_8K;
+ val_c = RT5682_SDW_REF_2_8K;
+ break;
+ case 44100:
+ val_p = RT5682_SDW_REF_1_44K;
+ val_c = RT5682_SDW_REF_2_44K;
+ break;
+ case 88200:
+ val_p = RT5682_SDW_REF_1_88K;
+ val_c = RT5682_SDW_REF_2_88K;
+ break;
+ case 176400:
+ val_p = RT5682_SDW_REF_1_176K;
+ val_c = RT5682_SDW_REF_2_176K;
+ break;
+ case 22050:
+ val_p = RT5682_SDW_REF_1_22K;
+ val_c = RT5682_SDW_REF_2_22K;
+ break;
+ case 11025:
+ val_p = RT5682_SDW_REF_1_11K;
+ val_c = RT5682_SDW_REF_2_11K;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (params_rate(params) <= 48000) {
+ osr_p = RT5682_DAC_OSR_D_8;
+ osr_c = RT5682_ADC_OSR_D_8;
+ } else if (params_rate(params) <= 96000) {
+ osr_p = RT5682_DAC_OSR_D_4;
+ osr_c = RT5682_ADC_OSR_D_4;
+ } else {
+ osr_p = RT5682_DAC_OSR_D_2;
+ osr_c = RT5682_ADC_OSR_D_2;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_update_bits(rt5682->regmap, RT5682_SDW_REF_CLK,
+ RT5682_SDW_REF_1_MASK, val_p);
+ regmap_update_bits(rt5682->regmap, RT5682_ADDA_CLK_1,
+ RT5682_DAC_OSR_MASK, osr_p);
+ } else {
+ regmap_update_bits(rt5682->regmap, RT5682_SDW_REF_CLK,
+ RT5682_SDW_REF_2_MASK, val_c);
+ regmap_update_bits(rt5682->regmap, RT5682_ADDA_CLK_1,
+ RT5682_ADC_OSR_MASK, osr_c);
+ }
+
+ return retval;
+}
+
+static int rt5682_sdw_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
+ struct sdw_stream_data *stream =
+ snd_soc_dai_get_dma_data(dai, substream);
+
+ if (!rt5682->slave)
+ return -EINVAL;
+
+ sdw_stream_remove_slave(rt5682->slave, stream->sdw_stream);
+ return 0;
+}
+
+static struct snd_soc_dai_ops rt5682_sdw_ops = {
+ .hw_params = rt5682_sdw_hw_params,
+ .hw_free = rt5682_sdw_hw_free,
+ .set_sdw_stream = rt5682_set_sdw_stream,
+ .shutdown = rt5682_sdw_shutdown,
+};
+
+static struct snd_soc_dai_driver rt5682_dai[] = {
+ {
+ .name = "rt5682-aif1",
+ .id = RT5682_AIF1,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .ops = &rt5682_aif1_dai_ops,
+ },
+ {
+ .name = "rt5682-aif2",
+ .id = RT5682_AIF2,
+ .capture = {
+ .stream_name = "AIF2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .ops = &rt5682_aif2_dai_ops,
+ },
+ {
+ .name = "rt5682-sdw",
+ .id = RT5682_SDW,
+ .playback = {
+ .stream_name = "SDW Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .capture = {
+ .stream_name = "SDW Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = RT5682_STEREO_RATES,
+ .formats = RT5682_FORMATS,
+ },
+ .ops = &rt5682_sdw_ops,
+ },
+};
+
+static int rt5682_sdw_init(struct device *dev, struct regmap *regmap,
+ struct sdw_slave *slave)
+{
+ struct rt5682_priv *rt5682;
+ int ret;
+
+ rt5682 = devm_kzalloc(dev, sizeof(*rt5682), GFP_KERNEL);
+ if (!rt5682)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, rt5682);
+ rt5682->slave = slave;
+ rt5682->sdw_regmap = regmap;
+ rt5682->is_sdw = true;
+
+ rt5682->regmap = devm_regmap_init(dev, NULL, dev,
+ &rt5682_sdw_indirect_regmap);
+ if (IS_ERR(rt5682->regmap)) {
+ ret = PTR_ERR(rt5682->regmap);
+ dev_err(dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Mark hw_init to false
+ * HW init will be performed when device reports present
+ */
+ rt5682->hw_init = false;
+ rt5682->first_hw_init = false;
+
+ mutex_init(&rt5682->calibrate_mutex);
+ INIT_DELAYED_WORK(&rt5682->jack_detect_work,
+ rt5682_jack_detect_handler);
+
+ ret = devm_snd_soc_register_component(dev,
+ &rt5682_soc_component_dev,
+ rt5682_dai, ARRAY_SIZE(rt5682_dai));
+ dev_dbg(&slave->dev, "%s\n", __func__);
+
+ return ret;
+}
+
+static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
+{
+ struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
+ int ret = 0;
+ unsigned int val;
+
+ if (rt5682->hw_init)
+ return 0;
+
+ regmap_read(rt5682->regmap, RT5682_DEVICE_ID, &val);
+ if (val != DEVICE_ID) {
+ dev_err(dev, "Device with ID register %x is not rt5682\n", val);
+ return -ENODEV;
+ }
+
+ /*
+ * PM runtime is only enabled when a Slave reports as Attached
+ */
+ if (!rt5682->first_hw_init) {
+ /* set autosuspend parameters */
+ pm_runtime_set_autosuspend_delay(&slave->dev, 3000);
+ pm_runtime_use_autosuspend(&slave->dev);
+
+ /* update count of parent 'active' children */
+ pm_runtime_set_active(&slave->dev);
+
+ /* make sure the device does not suspend immediately */
+ pm_runtime_mark_last_busy(&slave->dev);
+
+ pm_runtime_enable(&slave->dev);
+ }
+
+ pm_runtime_get_noresume(&slave->dev);
+
+ if (rt5682->first_hw_init) {
+ regcache_cache_only(rt5682->regmap, false);
+ regcache_cache_bypass(rt5682->regmap, true);
+ }
+
+ rt5682_calibrate(rt5682);
+
+ if (rt5682->first_hw_init) {
+ regcache_cache_bypass(rt5682->regmap, false);
+ regcache_mark_dirty(rt5682->regmap);
+ regcache_sync(rt5682->regmap);
+
+ /* volatile registers */
+ regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
+ RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
+
+ goto reinit;
+ }
+
+ rt5682_apply_patch_list(rt5682, dev);
+
+ regmap_write(rt5682->regmap, RT5682_DEPOP_1, 0x0000);
+
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
+ RT5682_LDO1_DVO_MASK | RT5682_HP_DRIVER_MASK,
+ RT5682_LDO1_DVO_12 | RT5682_HP_DRIVER_5X);
+ regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0380);
+ regmap_write(rt5682->regmap, RT5682_TEST_MODE_CTRL_1, 0x0000);
+ regmap_update_bits(rt5682->regmap, RT5682_BIAS_CUR_CTRL_8,
+ RT5682_HPA_CP_BIAS_CTRL_MASK, RT5682_HPA_CP_BIAS_3UA);
+ regmap_update_bits(rt5682->regmap, RT5682_CHARGE_PUMP_1,
+ RT5682_CP_CLK_HP_MASK, RT5682_CP_CLK_HP_300KHZ);
+ regmap_update_bits(rt5682->regmap, RT5682_HP_CHARGE_PUMP_1,
+ RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
+
+ /* Soundwire */
+ regmap_write(rt5682->regmap, RT5682_PLL2_INTERNAL, 0xa266);
+ regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_1, 0x1700);
+ regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_2, 0x0006);
+ regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_3, 0x2600);
+ regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_4, 0x0c8f);
+ regmap_write(rt5682->regmap, RT5682_PLL_TRACK_2, 0x3000);
+ regmap_write(rt5682->regmap, RT5682_PLL_TRACK_3, 0x4000);
+ regmap_update_bits(rt5682->regmap, RT5682_GLB_CLK,
+ RT5682_SCLK_SRC_MASK | RT5682_PLL2_SRC_MASK,
+ RT5682_SCLK_SRC_PLL2 | RT5682_PLL2_SRC_SDW);
+
+ regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
+ RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
+ regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
+ regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
+ RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
+ regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
+ RT5682_SAR_POW_MASK, RT5682_SAR_POW_EN);
+ regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+ RT5682_POW_IRQ | RT5682_POW_JDH |
+ RT5682_POW_ANA, RT5682_POW_IRQ |
+ RT5682_POW_JDH | RT5682_POW_ANA);
+ regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_2,
+ RT5682_PWR_JDH, RT5682_PWR_JDH);
+ regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+ RT5682_JD1_EN_MASK | RT5682_JD1_IRQ_MASK,
+ RT5682_JD1_EN | RT5682_JD1_IRQ_PUL);
+
+reinit:
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work, msecs_to_jiffies(250));
+
+ /* Mark Slave initialization complete */
+ rt5682->hw_init = true;
+ rt5682->first_hw_init = true;
+
+ pm_runtime_mark_last_busy(&slave->dev);
+ pm_runtime_put_autosuspend(&slave->dev);
+
+ dev_dbg(&slave->dev, "%s hw_init complete\n", __func__);
+
+ return ret;
+}
static bool rt5682_sdw_readable_register(struct device *dev, unsigned int reg)
{
@@ -46,7 +501,7 @@ static bool rt5682_sdw_readable_register(struct device *dev, unsigned int reg)
}
}
-const struct regmap_config rt5682_sdw_regmap = {
+static const struct regmap_config rt5682_sdw_regmap = {
.name = "sdw",
.reg_bits = 32,
.val_bits = 8,
@@ -241,9 +696,6 @@ static int rt5682_sdw_probe(struct sdw_slave *slave,
{
struct regmap *regmap;
- /* Assign ops */
- slave->ops = &rt5682_slave_ops;
-
/* Regmap Initialization */
regmap = devm_regmap_init_sdw(slave, &rt5682_sdw_regmap);
if (IS_ERR(regmap))
diff --git a/sound/soc/codecs/rt5682-sdw.h b/sound/soc/codecs/rt5682-sdw.h
deleted file mode 100644
index 76e6f607066e..000000000000
--- a/sound/soc/codecs/rt5682-sdw.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only
- *
- * rt5682-sdw.h -- RT5682 SDW ALSA SoC audio driver
- *
- * Copyright 2019 Realtek Semiconductor Corp.
- * Author: Oder Chiou <oder_chiou@realtek.com>
- */
-
-#ifndef __RT5682_SDW_H__
-#define __RT5682_SDW_H__
-
-#define RT5682_SDW_ADDR_L 0x3000
-#define RT5682_SDW_ADDR_H 0x3001
-#define RT5682_SDW_DATA_L 0x3004
-#define RT5682_SDW_DATA_H 0x3005
-#define RT5682_SDW_CMD 0x3008
-
-#define RT5682_PROBE_TIMEOUT 2000
-
-#endif /* __RT5682_SDW_H__ */
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index d36f560ad7a8..d3245123101d 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * rt5682.c -- RT5682 ALSA SoC audio component driver
- *
- * Copyright 2018 Realtek Semiconductor Corp.
- * Author: Bard Liao <bardliao@realtek.com>
- */
+//
+// rt5682.c -- RT5682 ALSA SoC audio component driver
+//
+// Copyright 2018 Realtek Semiconductor Corp.
+// Author: Bard Liao <bardliao@realtek.com>
+//
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/acpi.h>
@@ -31,22 +30,13 @@
#include "rl6231.h"
#include "rt5682.h"
-#include "rt5682-sdw.h"
-static const char *rt5682_supply_names[RT5682_NUM_SUPPLIES] = {
+const char *rt5682_supply_names[RT5682_NUM_SUPPLIES] = {
"AVDD",
"MICVDD",
"VBAT",
};
-
-static const struct rt5682_platform_data i2s_default_platform_data = {
- .dmic1_data_pin = RT5682_DMIC1_DATA_GPIO2,
- .dmic1_clk_pin = RT5682_DMIC1_CLK_GPIO3,
- .jd_src = RT5682_JD1,
- .btndet_delay = 16,
- .dai_clk_names[RT5682_DAI_WCLK_IDX] = "rt5682-dai-wclk",
- .dai_clk_names[RT5682_DAI_BCLK_IDX] = "rt5682-dai-bclk",
-};
+EXPORT_SYMBOL_GPL(rt5682_supply_names);
static const struct reg_sequence patch_list[] = {
{RT5682_HP_IMP_SENS_CTRL_19, 0x1000},
@@ -55,7 +45,18 @@ static const struct reg_sequence patch_list[] = {
{RT5682_PLL2_INTERNAL, 0x8266},
};
-static const struct reg_default rt5682_reg[] = {
+void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev)
+{
+ int ret;
+
+ ret = regmap_multi_reg_write(rt5682->regmap, patch_list,
+ ARRAY_SIZE(patch_list));
+ if (ret)
+ dev_warn(dev, "Failed to apply regmap patch: %d\n", ret);
+}
+EXPORT_SYMBOL_GPL(rt5682_apply_patch_list);
+
+const struct reg_default rt5682_reg[RT5682_REG_NUM] = {
{0x0002, 0x8080},
{0x0003, 0x8000},
{0x0005, 0x0000},
@@ -375,8 +376,9 @@ static const struct reg_default rt5682_reg[] = {
{0x03f2, 0x0800},
{0x03f3, 0x0800},
};
+EXPORT_SYMBOL_GPL(rt5682_reg);
-static bool rt5682_volatile_register(struct device *dev, unsigned int reg)
+bool rt5682_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case RT5682_RESET:
@@ -403,8 +405,9 @@ static bool rt5682_volatile_register(struct device *dev, unsigned int reg)
return false;
}
}
+EXPORT_SYMBOL_GPL(rt5682_volatile_register);
-static bool rt5682_readable_register(struct device *dev, unsigned int reg)
+bool rt5682_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case RT5682_RESET:
@@ -733,6 +736,7 @@ static bool rt5682_readable_register(struct device *dev, unsigned int reg)
return false;
}
}
+EXPORT_SYMBOL_GPL(rt5682_readable_register);
static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
@@ -800,12 +804,14 @@ static SOC_ENUM_SINGLE_DECL(rt5682_dacr_enum,
static const struct snd_kcontrol_new rt5682_dac_r_mux =
SOC_DAPM_ENUM("DAC R Mux", rt5682_dacr_enum);
-static void rt5682_reset(struct rt5682_priv *rt5682)
+void rt5682_reset(struct rt5682_priv *rt5682)
{
regmap_write(rt5682->regmap, RT5682_RESET, 0);
if (!rt5682->is_sdw)
regmap_write(rt5682->regmap, RT5682_I2C_MODE, 1);
}
+EXPORT_SYMBOL_GPL(rt5682_reset);
+
/**
* rt5682_sel_asrc_clk_src - select ASRC clock source for a set of filters
* @component: SoC audio component device.
@@ -823,7 +829,6 @@ static void rt5682_reset(struct rt5682_priv *rt5682)
int rt5682_sel_asrc_clk_src(struct snd_soc_component *component,
unsigned int filter_mask, unsigned int clk_src)
{
-
switch (clk_src) {
case RT5682_CLK_SEL_SYS:
case RT5682_CLK_SEL_I2S1_ASRC:
@@ -857,7 +862,7 @@ static int rt5682_button_detect(struct snd_soc_component *component)
val = snd_soc_component_read32(component, RT5682_4BTN_IL_CMD_1);
btn_type = val & 0xfff0;
snd_soc_component_write(component, RT5682_4BTN_IL_CMD_1, val);
- pr_debug("%s btn_type=%x\n", __func__, btn_type);
+ dev_dbg(component->dev, "%s btn_type=%x\n", __func__, btn_type);
snd_soc_component_update_bits(component,
RT5682_SAR_IL_CMD_2, 0x10, 0x10);
@@ -910,15 +915,13 @@ static void rt5682_enable_push_button_irq(struct snd_soc_component *component,
*
* Returns detect status.
*/
-static int rt5682_headset_detect(struct snd_soc_component *component,
- int jack_insert)
+int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
struct snd_soc_dapm_context *dapm = &component->dapm;
unsigned int val, count;
if (jack_insert) {
-
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
RT5682_PWR_VREF2 | RT5682_PWR_MB,
RT5682_PWR_VREF2 | RT5682_PWR_MB);
@@ -951,8 +954,8 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
break;
default:
rt5682->jack_type = SND_JACK_HEADPHONE;
+ break;
}
-
} else {
rt5682_enable_push_button_irq(component, false);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
@@ -973,38 +976,10 @@ static int rt5682_headset_detect(struct snd_soc_component *component,
dev_dbg(component->dev, "jack_type = %d\n", rt5682->jack_type);
return rt5682->jack_type;
}
-
-static irqreturn_t rt5682_irq(int irq, void *data)
-{
- struct rt5682_priv *rt5682 = data;
-
- mod_delayed_work(system_power_efficient_wq,
- &rt5682->jack_detect_work, msecs_to_jiffies(250));
-
- return IRQ_HANDLED;
-}
-
-static void rt5682_jd_check_handler(struct work_struct *work)
-{
- struct rt5682_priv *rt5682 = container_of(work, struct rt5682_priv,
- jd_check_work.work);
-
- if (snd_soc_component_read32(rt5682->component, RT5682_AJD1_CTRL)
- & RT5682_JDH_RS_MASK) {
- /* jack out */
- rt5682->jack_type = rt5682_headset_detect(rt5682->component, 0);
-
- snd_soc_jack_report(rt5682->hs_jack, rt5682->jack_type,
- SND_JACK_HEADSET |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3);
- } else {
- schedule_delayed_work(&rt5682->jd_check_work, 500);
- }
-}
+EXPORT_SYMBOL_GPL(rt5682_headset_detect);
static int rt5682_set_jack_detect(struct snd_soc_component *component,
- struct snd_soc_jack *hs_jack, void *data)
+ struct snd_soc_jack *hs_jack, void *data)
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
@@ -1013,9 +988,9 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
if (!rt5682->is_sdw) {
if (!hs_jack) {
regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
- RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
+ RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
- RT5682_POW_JDH | RT5682_POW_JDL, 0);
+ RT5682_POW_JDH | RT5682_POW_JDL, 0);
cancel_delayed_work_sync(&rt5682->jack_detect_work);
return 0;
}
@@ -1058,15 +1033,15 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
0x7f7f, (rt5682->pdata.btndet_delay << 8 |
rt5682->pdata.btndet_delay));
mod_delayed_work(system_power_efficient_wq,
- &rt5682->jack_detect_work,
- msecs_to_jiffies(250));
+ &rt5682->jack_detect_work,
+ msecs_to_jiffies(250));
break;
case RT5682_JD_NULL:
regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
- RT5682_POW_JDH | RT5682_POW_JDL, 0);
+ RT5682_POW_JDH | RT5682_POW_JDL, 0);
break;
default:
@@ -1078,7 +1053,7 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
return 0;
}
-static void rt5682_jack_detect_handler(struct work_struct *work)
+void rt5682_jack_detect_handler(struct work_struct *work)
{
struct rt5682_priv *rt5682 =
container_of(work, struct rt5682_priv, jack_detect_work.work);
@@ -1135,7 +1110,6 @@ static void rt5682_jack_detect_handler(struct work_struct *work)
case 0x0000: /* unpressed */
break;
default:
- btn_type = 0;
dev_err(rt5682->component->dev,
"Unexpected button code 0x%04x\n",
btn_type);
@@ -1148,9 +1122,9 @@ static void rt5682_jack_detect_handler(struct work_struct *work)
}
snd_soc_jack_report(rt5682->hs_jack, rt5682->jack_type,
- SND_JACK_HEADSET |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3);
+ SND_JACK_HEADSET |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
if (!rt5682->is_sdw) {
if (rt5682->jack_type & (SND_JACK_BTN_0 | SND_JACK_BTN_1 |
@@ -1162,6 +1136,7 @@ static void rt5682_jack_detect_handler(struct work_struct *work)
mutex_unlock(&rt5682->calibrate_mutex);
}
+EXPORT_SYMBOL_GPL(rt5682_jack_detect_handler);
static const struct snd_kcontrol_new rt5682_snd_controls[] = {
/* DAC Digital Volume */
@@ -1184,15 +1159,14 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
3, 0, adc_bst_tlv),
};
-
static int rt5682_div_sel(struct rt5682_priv *rt5682,
- int target, const int div[], int size)
+ int target, const int div[], int size)
{
int i;
if (rt5682->sysclk < target) {
- pr_err("sysclk rate %d is too low\n",
- rt5682->sysclk);
+ dev_err(rt5682->component->dev,
+ "sysclk rate %d is too low\n", rt5682->sysclk);
return 0;
}
@@ -1201,18 +1175,18 @@ static int rt5682_div_sel(struct rt5682_priv *rt5682,
if (target * div[i] == rt5682->sysclk)
return i;
if (target * div[i + 1] > rt5682->sysclk) {
- dev_dbg(rt5682->component->dev, "can't find div for sysclk %d\n",
+ dev_dbg(rt5682->component->dev,
+ "can't find div for sysclk %d\n",
rt5682->sysclk);
return i;
}
}
if (target * div[i] < rt5682->sysclk)
- pr_err("sysclk rate %d is too high\n",
- rt5682->sysclk);
+ dev_err(rt5682->component->dev,
+ "sysclk rate %d is too high\n", rt5682->sysclk);
return size - 1;
-
}
/**
@@ -1226,7 +1200,7 @@ static int rt5682_div_sel(struct rt5682_priv *rt5682,
* It is better for clock to approximate 3MHz.
*/
static int set_dmic_clk(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+ struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
@@ -1246,7 +1220,7 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
}
static int set_filter_clk(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+ struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
@@ -1290,7 +1264,7 @@ static int set_filter_clk(struct snd_soc_dapm_widget *w,
}
static int is_sys_clk_from_pll1(struct snd_soc_dapm_widget *w,
- struct snd_soc_dapm_widget *sink)
+ struct snd_soc_dapm_widget *sink)
{
unsigned int val;
struct snd_soc_component *component =
@@ -1305,7 +1279,7 @@ static int is_sys_clk_from_pll1(struct snd_soc_dapm_widget *w,
}
static int is_sys_clk_from_pll2(struct snd_soc_dapm_widget *w,
- struct snd_soc_dapm_widget *sink)
+ struct snd_soc_dapm_widget *sink)
{
unsigned int val;
struct snd_soc_component *component =
@@ -1320,7 +1294,7 @@ static int is_sys_clk_from_pll2(struct snd_soc_dapm_widget *w,
}
static int is_using_asrc(struct snd_soc_dapm_widget *w,
- struct snd_soc_dapm_widget *sink)
+ struct snd_soc_dapm_widget *sink)
{
unsigned int reg, shift, val;
struct snd_soc_component *component =
@@ -1347,7 +1321,6 @@ static int is_using_asrc(struct snd_soc_dapm_widget *w,
default:
return 0;
}
-
}
/* Digital Mixer */
@@ -1501,13 +1474,13 @@ static const struct snd_kcontrol_new rt5682_alg_dac_r1_mux =
/* Out Switch */
static const struct snd_kcontrol_new hpol_switch =
SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5682_HP_CTRL_1,
- RT5682_L_MUTE_SFT, 1, 1);
+ RT5682_L_MUTE_SFT, 1, 1);
static const struct snd_kcontrol_new hpor_switch =
SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5682_HP_CTRL_1,
- RT5682_R_MUTE_SFT, 1, 1);
+ RT5682_R_MUTE_SFT, 1, 1);
static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+ struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
@@ -1532,17 +1505,13 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
snd_soc_component_update_bits(component,
RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0000);
break;
-
- default:
- return 0;
}
return 0;
-
}
static int set_dmic_power(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+ struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
@@ -1557,16 +1526,13 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
/*Add delay to avoid pop noise*/
msleep(delay);
break;
-
- default:
- return 0;
}
return 0;
}
static int rt5682_set_verf(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
+ struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
@@ -1583,9 +1549,6 @@ static int rt5682_set_verf(struct snd_soc_dapm_widget *w,
snd_soc_component_update_bits(component,
RT5682_PWR_ANLG_1, RT5682_PWR_FV2, 0);
break;
-
- default:
- break;
}
break;
@@ -1603,14 +1566,8 @@ static int rt5682_set_verf(struct snd_soc_dapm_widget *w,
RT5682_PWR_ANLG_1, RT5682_PWR_FV2,
RT5682_PWR_FV2);
break;
-
- default:
- break;
}
break;
-
- default:
- return 0;
}
return 0;
@@ -1743,23 +1700,23 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
/* Digital Interface Select */
SND_SOC_DAPM_MUX("IF1 01 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
- &rt5682_if1_01_adc_swap_mux),
+ &rt5682_if1_01_adc_swap_mux),
SND_SOC_DAPM_MUX("IF1 23 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
- &rt5682_if1_23_adc_swap_mux),
+ &rt5682_if1_23_adc_swap_mux),
SND_SOC_DAPM_MUX("IF1 45 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
- &rt5682_if1_45_adc_swap_mux),
+ &rt5682_if1_45_adc_swap_mux),
SND_SOC_DAPM_MUX("IF1 67 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
- &rt5682_if1_67_adc_swap_mux),
+ &rt5682_if1_67_adc_swap_mux),
SND_SOC_DAPM_MUX("IF2 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
- &rt5682_if2_adc_swap_mux),
+ &rt5682_if2_adc_swap_mux),
SND_SOC_DAPM_MUX("ADCDAT Mux", SND_SOC_NOPM, 0, 0,
- &rt5682_adcdat_pin_ctrl),
+ &rt5682_adcdat_pin_ctrl),
SND_SOC_DAPM_MUX("DAC L Mux", SND_SOC_NOPM, 0, 0,
- &rt5682_dac_l_mux),
+ &rt5682_dac_l_mux),
SND_SOC_DAPM_MUX("DAC R Mux", SND_SOC_NOPM, 0, 0,
- &rt5682_dac_r_mux),
+ &rt5682_dac_r_mux),
/* Audio Interface */
SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0,
@@ -1831,7 +1788,6 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
/* Output Lines */
SND_SOC_DAPM_OUTPUT("HPOL"),
SND_SOC_DAPM_OUTPUT("HPOR"),
-
};
static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
@@ -1997,7 +1953,7 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
};
static int rt5682_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
- unsigned int rx_mask, int slots, int slot_width)
+ unsigned int rx_mask, int slots, int slot_width)
{
struct snd_soc_component *component = dai->component;
unsigned int cl, val = 0;
@@ -2065,9 +2021,8 @@ static int rt5682_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
return 0;
}
-
static int rt5682_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
struct snd_soc_component *component = dai->component;
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
@@ -2085,7 +2040,7 @@ static int rt5682_hw_params(struct snd_pcm_substream *substream,
}
dev_dbg(dai->dev, "lrck is %dHz and pre_div is %d for iis %d\n",
- rt5682->lrck[dai->id], pre_div, dai->id);
+ rt5682->lrck[dai->id], pre_div, dai->id);
switch (params_width(params)) {
case 16:
@@ -2469,7 +2424,7 @@ static int rt5682_set_bclk2_ratio(struct snd_soc_dai *dai, unsigned int ratio)
}
static int rt5682_set_bias_level(struct snd_soc_component *component,
- enum snd_soc_bias_level level)
+ enum snd_soc_bias_level level)
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
@@ -2492,8 +2447,7 @@ static int rt5682_set_bias_level(struct snd_soc_component *component,
regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
RT5682_PWR_BG, 0);
break;
-
- default:
+ case SND_SOC_BIAS_ON:
break;
}
@@ -2667,7 +2621,7 @@ static unsigned long rt5682_bclk_recalc_rate(struct clk_hw *hw,
unsigned int bclks_per_wclk;
snd_soc_component_read(component, RT5682_TDM_TCON_CTRL,
- &bclks_per_wclk);
+ &bclks_per_wclk);
switch (bclks_per_wclk & RT5682_TDM_BCLK_MS1_MASK) {
case RT5682_TDM_BCLK_MS1_256:
@@ -2922,7 +2876,8 @@ static int rt5682_resume(struct snd_soc_component *component)
regcache_cache_only(rt5682->regmap, false);
regcache_sync(rt5682->regmap);
- rt5682_irq(0, rt5682);
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work, msecs_to_jiffies(250));
return 0;
}
@@ -2931,267 +2886,22 @@ static int rt5682_resume(struct snd_soc_component *component)
#define rt5682_resume NULL
#endif
-#define RT5682_STEREO_RATES SNDRV_PCM_RATE_8000_192000
-#define RT5682_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
- SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
-
-static const struct snd_soc_dai_ops rt5682_aif1_dai_ops = {
+const struct snd_soc_dai_ops rt5682_aif1_dai_ops = {
.hw_params = rt5682_hw_params,
.set_fmt = rt5682_set_dai_fmt,
.set_tdm_slot = rt5682_set_tdm_slot,
.set_bclk_ratio = rt5682_set_bclk1_ratio,
};
+EXPORT_SYMBOL_GPL(rt5682_aif1_dai_ops);
-static const struct snd_soc_dai_ops rt5682_aif2_dai_ops = {
+const struct snd_soc_dai_ops rt5682_aif2_dai_ops = {
.hw_params = rt5682_hw_params,
.set_fmt = rt5682_set_dai_fmt,
.set_bclk_ratio = rt5682_set_bclk2_ratio,
};
+EXPORT_SYMBOL_GPL(rt5682_aif2_dai_ops);
-#if IS_ENABLED(CONFIG_SND_SOC_RT5682_SDW)
-struct sdw_stream_data {
- struct sdw_stream_runtime *sdw_stream;
-};
-
-static int rt5682_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
- int direction)
-{
- struct sdw_stream_data *stream;
-
- stream = kzalloc(sizeof(*stream), GFP_KERNEL);
- if (!stream)
- return -ENOMEM;
-
- stream->sdw_stream = (struct sdw_stream_runtime *)sdw_stream;
-
- /* Use tx_mask or rx_mask to configure stream tag and set dma_data */
- if (direction == SNDRV_PCM_STREAM_PLAYBACK)
- dai->playback_dma_data = stream;
- else
- dai->capture_dma_data = stream;
-
- return 0;
-}
-
-static void rt5682_sdw_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct sdw_stream_data *stream;
-
- stream = snd_soc_dai_get_dma_data(dai, substream);
- snd_soc_dai_set_dma_data(dai, substream, NULL);
- kfree(stream);
-}
-
-static int rt5682_sdw_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_component *component = dai->component;
- struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
- struct sdw_stream_config stream_config;
- struct sdw_port_config port_config;
- enum sdw_data_direction direction;
- struct sdw_stream_data *stream;
- int retval, port, num_channels;
- unsigned int val_p = 0, val_c = 0, osr_p = 0, osr_c = 0;
-
- dev_dbg(dai->dev, "%s %s", __func__, dai->name);
- stream = snd_soc_dai_get_dma_data(dai, substream);
-
- if (!stream)
- return -ENOMEM;
-
- if (!rt5682->slave)
- return -EINVAL;
-
- /* SoundWire specific configuration */
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- direction = SDW_DATA_DIR_RX;
- port = 1;
- } else {
- direction = SDW_DATA_DIR_TX;
- port = 2;
- }
-
- stream_config.frame_rate = params_rate(params);
- stream_config.ch_count = params_channels(params);
- stream_config.bps = snd_pcm_format_width(params_format(params));
- stream_config.direction = direction;
-
- num_channels = params_channels(params);
- port_config.ch_mask = (1 << (num_channels)) - 1;
- port_config.num = port;
-
- retval = sdw_stream_add_slave(rt5682->slave, &stream_config,
- &port_config, 1, stream->sdw_stream);
- if (retval) {
- dev_err(dai->dev, "Unable to configure port\n");
- return retval;
- }
-
- switch (params_rate(params)) {
- case 48000:
- val_p = RT5682_SDW_REF_1_48K;
- val_c = RT5682_SDW_REF_2_48K;
- break;
- case 96000:
- val_p = RT5682_SDW_REF_1_96K;
- val_c = RT5682_SDW_REF_2_96K;
- break;
- case 192000:
- val_p = RT5682_SDW_REF_1_192K;
- val_c = RT5682_SDW_REF_2_192K;
- break;
- case 32000:
- val_p = RT5682_SDW_REF_1_32K;
- val_c = RT5682_SDW_REF_2_32K;
- break;
- case 24000:
- val_p = RT5682_SDW_REF_1_24K;
- val_c = RT5682_SDW_REF_2_24K;
- break;
- case 16000:
- val_p = RT5682_SDW_REF_1_16K;
- val_c = RT5682_SDW_REF_2_16K;
- break;
- case 12000:
- val_p = RT5682_SDW_REF_1_12K;
- val_c = RT5682_SDW_REF_2_12K;
- break;
- case 8000:
- val_p = RT5682_SDW_REF_1_8K;
- val_c = RT5682_SDW_REF_2_8K;
- break;
- case 44100:
- val_p = RT5682_SDW_REF_1_44K;
- val_c = RT5682_SDW_REF_2_44K;
- break;
- case 88200:
- val_p = RT5682_SDW_REF_1_88K;
- val_c = RT5682_SDW_REF_2_88K;
- break;
- case 176400:
- val_p = RT5682_SDW_REF_1_176K;
- val_c = RT5682_SDW_REF_2_176K;
- break;
- case 22050:
- val_p = RT5682_SDW_REF_1_22K;
- val_c = RT5682_SDW_REF_2_22K;
- break;
- case 11025:
- val_p = RT5682_SDW_REF_1_11K;
- val_c = RT5682_SDW_REF_2_11K;
- break;
- default:
- return -EINVAL;
- }
-
- if (params_rate(params) <= 48000) {
- osr_p = RT5682_DAC_OSR_D_8;
- osr_c = RT5682_ADC_OSR_D_8;
- } else if (params_rate(params) <= 96000) {
- osr_p = RT5682_DAC_OSR_D_4;
- osr_c = RT5682_ADC_OSR_D_4;
- } else {
- osr_p = RT5682_DAC_OSR_D_2;
- osr_c = RT5682_ADC_OSR_D_2;
- }
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- regmap_update_bits(rt5682->regmap, RT5682_SDW_REF_CLK,
- RT5682_SDW_REF_1_MASK, val_p);
- regmap_update_bits(rt5682->regmap, RT5682_ADDA_CLK_1,
- RT5682_DAC_OSR_MASK, osr_p);
- } else {
- regmap_update_bits(rt5682->regmap, RT5682_SDW_REF_CLK,
- RT5682_SDW_REF_2_MASK, val_c);
- regmap_update_bits(rt5682->regmap, RT5682_ADDA_CLK_1,
- RT5682_ADC_OSR_MASK, osr_c);
- }
-
- return retval;
-}
-
-static int rt5682_sdw_hw_free(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_component *component = dai->component;
- struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
- struct sdw_stream_data *stream =
- snd_soc_dai_get_dma_data(dai, substream);
-
- if (!rt5682->slave)
- return -EINVAL;
-
- sdw_stream_remove_slave(rt5682->slave, stream->sdw_stream);
- return 0;
-}
-
-static struct snd_soc_dai_ops rt5682_sdw_ops = {
- .hw_params = rt5682_sdw_hw_params,
- .hw_free = rt5682_sdw_hw_free,
- .set_sdw_stream = rt5682_set_sdw_stream,
- .shutdown = rt5682_sdw_shutdown,
-};
-#endif
-
-static struct snd_soc_dai_driver rt5682_dai[] = {
- {
- .name = "rt5682-aif1",
- .id = RT5682_AIF1,
- .playback = {
- .stream_name = "AIF1 Playback",
- .channels_min = 1,
- .channels_max = 2,
- .rates = RT5682_STEREO_RATES,
- .formats = RT5682_FORMATS,
- },
- .capture = {
- .stream_name = "AIF1 Capture",
- .channels_min = 1,
- .channels_max = 2,
- .rates = RT5682_STEREO_RATES,
- .formats = RT5682_FORMATS,
- },
- .ops = &rt5682_aif1_dai_ops,
- },
- {
- .name = "rt5682-aif2",
- .id = RT5682_AIF2,
- .capture = {
- .stream_name = "AIF2 Capture",
- .channels_min = 1,
- .channels_max = 2,
- .rates = RT5682_STEREO_RATES,
- .formats = RT5682_FORMATS,
- },
- .ops = &rt5682_aif2_dai_ops,
- },
-#if IS_ENABLED(CONFIG_SND_SOC_RT5682_SDW)
- {
- .name = "rt5682-sdw",
- .id = RT5682_SDW,
- .playback = {
- .stream_name = "SDW Playback",
- .channels_min = 1,
- .channels_max = 2,
- .rates = RT5682_STEREO_RATES,
- .formats = RT5682_FORMATS,
- },
- .capture = {
- .stream_name = "SDW Capture",
- .channels_min = 1,
- .channels_max = 2,
- .rates = RT5682_STEREO_RATES,
- .formats = RT5682_FORMATS,
- },
- .ops = &rt5682_sdw_ops,
- },
-#endif
-};
-
-static const struct snd_soc_component_driver soc_component_dev_rt5682 = {
+const struct snd_soc_component_driver rt5682_soc_component_dev = {
.probe = rt5682_probe,
.remove = rt5682_remove,
.suspend = rt5682_suspend,
@@ -3210,27 +2920,9 @@ static const struct snd_soc_component_driver soc_component_dev_rt5682 = {
.endianness = 1,
.non_legacy_dai_naming = 1,
};
+EXPORT_SYMBOL_GPL(rt5682_soc_component_dev);
-static const struct regmap_config rt5682_regmap = {
- .reg_bits = 16,
- .val_bits = 16,
- .max_register = RT5682_I2C_MODE,
- .volatile_reg = rt5682_volatile_register,
- .readable_reg = rt5682_readable_register,
- .cache_type = REGCACHE_RBTREE,
- .reg_defaults = rt5682_reg,
- .num_reg_defaults = ARRAY_SIZE(rt5682_reg),
- .use_single_read = true,
- .use_single_write = true,
-};
-
-static const struct i2c_device_id rt5682_i2c_id[] = {
- {"rt5682", 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, rt5682_i2c_id);
-
-static int rt5682_parse_dt(struct rt5682_priv *rt5682, struct device *dev)
+int rt5682_parse_dt(struct rt5682_priv *rt5682, struct device *dev)
{
device_property_read_u32(dev, "realtek,dmic1-data-pin",
@@ -3258,8 +2950,9 @@ static int rt5682_parse_dt(struct rt5682_priv *rt5682, struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(rt5682_parse_dt);
-static void rt5682_calibrate(struct rt5682_priv *rt5682)
+void rt5682_calibrate(struct rt5682_priv *rt5682)
{
int value, count;
@@ -3296,7 +2989,7 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
}
if (count >= 60)
- pr_err("HP Calibration Failure\n");
+ dev_err(rt5682->component->dev, "HP Calibration Failure\n");
/* restore settings */
regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0x02af);
@@ -3308,415 +3001,8 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4);
mutex_unlock(&rt5682->calibrate_mutex);
-
-}
-
-#if IS_ENABLED(CONFIG_SND_SOC_RT5682_SDW)
-static int rt5682_sdw_read(void *context, unsigned int reg, unsigned int *val)
-{
- struct device *dev = context;
- struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
- unsigned int data_l, data_h;
-
- regmap_write(rt5682->sdw_regmap, RT5682_SDW_CMD, 0);
- regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_H, (reg >> 8) & 0xff);
- regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_L, (reg & 0xff));
- regmap_read(rt5682->sdw_regmap, RT5682_SDW_DATA_H, &data_h);
- regmap_read(rt5682->sdw_regmap, RT5682_SDW_DATA_L, &data_l);
-
- *val = (data_h << 8) | data_l;
-
- dev_vdbg(dev, "[%s] %04x => %04x\n", __func__, reg, *val);
-
- return 0;
-}
-
-static int rt5682_sdw_write(void *context, unsigned int reg, unsigned int val)
-{
- struct device *dev = context;
- struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
-
- regmap_write(rt5682->sdw_regmap, RT5682_SDW_CMD, 1);
- regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_H, (reg >> 8) & 0xff);
- regmap_write(rt5682->sdw_regmap, RT5682_SDW_ADDR_L, (reg & 0xff));
- regmap_write(rt5682->sdw_regmap, RT5682_SDW_DATA_H, (val >> 8) & 0xff);
- regmap_write(rt5682->sdw_regmap, RT5682_SDW_DATA_L, (val & 0xff));
-
- dev_vdbg(dev, "[%s] %04x <= %04x\n", __func__, reg, val);
-
- return 0;
-}
-
-static const struct regmap_config rt5682_sdw_regmap = {
- .reg_bits = 16,
- .val_bits = 16,
- .max_register = RT5682_I2C_MODE,
- .volatile_reg = rt5682_volatile_register,
- .readable_reg = rt5682_readable_register,
- .cache_type = REGCACHE_RBTREE,
- .reg_defaults = rt5682_reg,
- .num_reg_defaults = ARRAY_SIZE(rt5682_reg),
- .use_single_read = true,
- .use_single_write = true,
- .reg_read = rt5682_sdw_read,
- .reg_write = rt5682_sdw_write,
-};
-
-int rt5682_sdw_init(struct device *dev, struct regmap *regmap,
- struct sdw_slave *slave)
-{
- struct rt5682_priv *rt5682;
- int ret;
-
- rt5682 = devm_kzalloc(dev, sizeof(*rt5682), GFP_KERNEL);
- if (!rt5682)
- return -ENOMEM;
-
- dev_set_drvdata(dev, rt5682);
- rt5682->slave = slave;
- rt5682->sdw_regmap = regmap;
- rt5682->is_sdw = true;
-
- rt5682->regmap = devm_regmap_init(dev, NULL, dev, &rt5682_sdw_regmap);
- if (IS_ERR(rt5682->regmap)) {
- ret = PTR_ERR(rt5682->regmap);
- dev_err(dev, "Failed to allocate register map: %d\n",
- ret);
- return ret;
- }
-
- /*
- * Mark hw_init to false
- * HW init will be performed when device reports present
- */
- rt5682->hw_init = false;
- rt5682->first_hw_init = false;
-
- mutex_init(&rt5682->calibrate_mutex);
- INIT_DELAYED_WORK(&rt5682->jack_detect_work,
- rt5682_jack_detect_handler);
-
- ret = devm_snd_soc_register_component(dev, &soc_component_dev_rt5682,
- rt5682_dai, ARRAY_SIZE(rt5682_dai));
-
- dev_dbg(&slave->dev, "%s\n", __func__);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(rt5682_sdw_init);
-
-int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
-{
- struct rt5682_priv *rt5682 = dev_get_drvdata(dev);
- int ret = 0;
- unsigned int val;
-
- if (rt5682->hw_init)
- return 0;
-
- regmap_read(rt5682->regmap, RT5682_DEVICE_ID, &val);
- if (val != DEVICE_ID) {
- pr_err("Device with ID register %x is not rt5682\n", val);
- return -ENODEV;
- }
-
- /*
- * PM runtime is only enabled when a Slave reports as Attached
- */
- if (!rt5682->first_hw_init) {
- /* set autosuspend parameters */
- pm_runtime_set_autosuspend_delay(&slave->dev, 3000);
- pm_runtime_use_autosuspend(&slave->dev);
-
- /* update count of parent 'active' children */
- pm_runtime_set_active(&slave->dev);
-
- /* make sure the device does not suspend immediately */
- pm_runtime_mark_last_busy(&slave->dev);
-
- pm_runtime_enable(&slave->dev);
- }
-
- pm_runtime_get_noresume(&slave->dev);
-
- rt5682_reset(rt5682);
-
- if (rt5682->first_hw_init) {
- regcache_cache_only(rt5682->regmap, false);
- regcache_cache_bypass(rt5682->regmap, true);
- }
-
- rt5682_calibrate(rt5682);
-
- if (rt5682->first_hw_init) {
- regcache_cache_bypass(rt5682->regmap, false);
- regcache_mark_dirty(rt5682->regmap);
- regcache_sync(rt5682->regmap);
-
- /* volatile registers */
- regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
- RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
-
- goto reinit;
- }
-
- ret = regmap_multi_reg_write(rt5682->regmap, patch_list,
- ARRAY_SIZE(patch_list));
- if (ret != 0)
- dev_warn(dev, "Failed to apply regmap patch: %d\n", ret);
-
- regmap_write(rt5682->regmap, RT5682_DEPOP_1, 0x0000);
-
- regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
- RT5682_LDO1_DVO_MASK | RT5682_HP_DRIVER_MASK,
- RT5682_LDO1_DVO_12 | RT5682_HP_DRIVER_5X);
- regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0380);
- regmap_write(rt5682->regmap, RT5682_TEST_MODE_CTRL_1, 0x0000);
- regmap_update_bits(rt5682->regmap, RT5682_BIAS_CUR_CTRL_8,
- RT5682_HPA_CP_BIAS_CTRL_MASK, RT5682_HPA_CP_BIAS_3UA);
- regmap_update_bits(rt5682->regmap, RT5682_CHARGE_PUMP_1,
- RT5682_CP_CLK_HP_MASK, RT5682_CP_CLK_HP_300KHZ);
- regmap_update_bits(rt5682->regmap, RT5682_HP_CHARGE_PUMP_1,
- RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
-
- /* Soundwire */
- regmap_write(rt5682->regmap, RT5682_PLL2_INTERNAL, 0xa266);
- regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_1, 0x1700);
- regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_2, 0x0006);
- regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_3, 0x2600);
- regmap_write(rt5682->regmap, RT5682_PLL2_CTRL_4, 0x0c8f);
- regmap_write(rt5682->regmap, RT5682_PLL_TRACK_2, 0x3000);
- regmap_write(rt5682->regmap, RT5682_PLL_TRACK_3, 0x4000);
- regmap_update_bits(rt5682->regmap, RT5682_GLB_CLK,
- RT5682_SCLK_SRC_MASK | RT5682_PLL2_SRC_MASK,
- RT5682_SCLK_SRC_PLL2 | RT5682_PLL2_SRC_SDW);
-
- regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
- RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
- regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
- regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
- RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
- regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
- RT5682_SAR_POW_MASK, RT5682_SAR_POW_EN);
- regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
- RT5682_POW_IRQ | RT5682_POW_JDH |
- RT5682_POW_ANA, RT5682_POW_IRQ |
- RT5682_POW_JDH | RT5682_POW_ANA);
- regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_2,
- RT5682_PWR_JDH, RT5682_PWR_JDH);
- regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
- RT5682_JD1_EN_MASK | RT5682_JD1_IRQ_MASK,
- RT5682_JD1_EN | RT5682_JD1_IRQ_PUL);
-
-reinit:
- mod_delayed_work(system_power_efficient_wq,
- &rt5682->jack_detect_work, msecs_to_jiffies(250));
-
- /* Mark Slave initialization complete */
- rt5682->hw_init = true;
- rt5682->first_hw_init = true;
-
- pm_runtime_mark_last_busy(&slave->dev);
- pm_runtime_put_autosuspend(&slave->dev);
-
- dev_dbg(&slave->dev, "%s hw_init complete\n", __func__);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(rt5682_io_init);
-#endif
-
-static int rt5682_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
-{
- struct rt5682_platform_data *pdata = dev_get_platdata(&i2c->dev);
- struct rt5682_priv *rt5682;
- int i, ret;
- unsigned int val;
-
- rt5682 = devm_kzalloc(&i2c->dev, sizeof(struct rt5682_priv),
- GFP_KERNEL);
-
- if (rt5682 == NULL)
- return -ENOMEM;
-
- i2c_set_clientdata(i2c, rt5682);
-
- rt5682->pdata = i2s_default_platform_data;
-
- if (pdata)
- rt5682->pdata = *pdata;
- else
- rt5682_parse_dt(rt5682, &i2c->dev);
-
- rt5682->regmap = devm_regmap_init_i2c(i2c, &rt5682_regmap);
- if (IS_ERR(rt5682->regmap)) {
- ret = PTR_ERR(rt5682->regmap);
- dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
- ret);
- return ret;
- }
-
- for (i = 0; i < ARRAY_SIZE(rt5682->supplies); i++)
- rt5682->supplies[i].supply = rt5682_supply_names[i];
-
- ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5682->supplies),
- rt5682->supplies);
- if (ret != 0) {
- dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
- return ret;
- }
-
- ret = regulator_bulk_enable(ARRAY_SIZE(rt5682->supplies),
- rt5682->supplies);
- if (ret != 0) {
- dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
- return ret;
- }
-
- if (gpio_is_valid(rt5682->pdata.ldo1_en)) {
- if (devm_gpio_request_one(&i2c->dev, rt5682->pdata.ldo1_en,
- GPIOF_OUT_INIT_HIGH, "rt5682"))
- dev_err(&i2c->dev, "Fail gpio_request gpio_ldo\n");
- }
-
- /* Sleep for 300 ms miniumum */
- usleep_range(300000, 350000);
-
- regmap_write(rt5682->regmap, RT5682_I2C_MODE, 0x1);
- usleep_range(10000, 15000);
-
- regmap_read(rt5682->regmap, RT5682_DEVICE_ID, &val);
- if (val != DEVICE_ID) {
- pr_err("Device with ID register %x is not rt5682\n", val);
- return -ENODEV;
- }
-
- rt5682_reset(rt5682);
-
- mutex_init(&rt5682->calibrate_mutex);
- rt5682_calibrate(rt5682);
-
- ret = regmap_multi_reg_write(rt5682->regmap, patch_list,
- ARRAY_SIZE(patch_list));
- if (ret != 0)
- dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
-
- regmap_write(rt5682->regmap, RT5682_DEPOP_1, 0x0000);
-
- /* DMIC pin*/
- if (rt5682->pdata.dmic1_data_pin != RT5682_DMIC1_NULL) {
- switch (rt5682->pdata.dmic1_data_pin) {
- case RT5682_DMIC1_DATA_GPIO2: /* share with LRCK2 */
- regmap_update_bits(rt5682->regmap, RT5682_DMIC_CTRL_1,
- RT5682_DMIC_1_DP_MASK, RT5682_DMIC_1_DP_GPIO2);
- regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
- RT5682_GP2_PIN_MASK, RT5682_GP2_PIN_DMIC_SDA);
- break;
-
- case RT5682_DMIC1_DATA_GPIO5: /* share with DACDAT1 */
- regmap_update_bits(rt5682->regmap, RT5682_DMIC_CTRL_1,
- RT5682_DMIC_1_DP_MASK, RT5682_DMIC_1_DP_GPIO5);
- regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
- RT5682_GP5_PIN_MASK, RT5682_GP5_PIN_DMIC_SDA);
- break;
-
- default:
- dev_warn(&i2c->dev, "invalid DMIC_DAT pin\n");
- break;
- }
-
- switch (rt5682->pdata.dmic1_clk_pin) {
- case RT5682_DMIC1_CLK_GPIO1: /* share with IRQ */
- regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
- RT5682_GP1_PIN_MASK, RT5682_GP1_PIN_DMIC_CLK);
- break;
-
- case RT5682_DMIC1_CLK_GPIO3: /* share with BCLK2 */
- regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
- RT5682_GP3_PIN_MASK, RT5682_GP3_PIN_DMIC_CLK);
- break;
-
- default:
- dev_warn(&i2c->dev, "invalid DMIC_CLK pin\n");
- break;
- }
- }
-
- regmap_update_bits(rt5682->regmap, RT5682_PWR_ANLG_1,
- RT5682_LDO1_DVO_MASK | RT5682_HP_DRIVER_MASK,
- RT5682_LDO1_DVO_12 | RT5682_HP_DRIVER_5X);
- regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0380);
- regmap_update_bits(rt5682->regmap, RT5682_GPIO_CTRL_1,
- RT5682_GP4_PIN_MASK | RT5682_GP5_PIN_MASK,
- RT5682_GP4_PIN_ADCDAT1 | RT5682_GP5_PIN_DACDAT1);
- regmap_write(rt5682->regmap, RT5682_TEST_MODE_CTRL_1, 0x0000);
- regmap_update_bits(rt5682->regmap, RT5682_BIAS_CUR_CTRL_8,
- RT5682_HPA_CP_BIAS_CTRL_MASK, RT5682_HPA_CP_BIAS_3UA);
- regmap_update_bits(rt5682->regmap, RT5682_CHARGE_PUMP_1,
- RT5682_CP_CLK_HP_MASK, RT5682_CP_CLK_HP_300KHZ);
- regmap_update_bits(rt5682->regmap, RT5682_HP_CHARGE_PUMP_1,
- RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
- regmap_update_bits(rt5682->regmap, RT5682_DMIC_CTRL_1,
- RT5682_FIFO_CLK_DIV_MASK, RT5682_FIFO_CLK_DIV_2);
-
- INIT_DELAYED_WORK(&rt5682->jack_detect_work,
- rt5682_jack_detect_handler);
- INIT_DELAYED_WORK(&rt5682->jd_check_work,
- rt5682_jd_check_handler);
-
-
- if (i2c->irq) {
- ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
- rt5682_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
- | IRQF_ONESHOT, "rt5682", rt5682);
- if (ret)
- dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
-
- }
-
- return devm_snd_soc_register_component(&i2c->dev,
- &soc_component_dev_rt5682,
- rt5682_dai, ARRAY_SIZE(rt5682_dai));
}
-
-static void rt5682_i2c_shutdown(struct i2c_client *client)
-{
- struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
-
- rt5682_reset(rt5682);
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id rt5682_of_match[] = {
- {.compatible = "realtek,rt5682i"},
- {},
-};
-MODULE_DEVICE_TABLE(of, rt5682_of_match);
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id rt5682_acpi_match[] = {
- {"10EC5682", 0,},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, rt5682_acpi_match);
-#endif
-
-static struct i2c_driver __maybe_unused rt5682_i2c_driver = {
- .driver = {
- .name = "rt5682",
- .of_match_table = of_match_ptr(rt5682_of_match),
- .acpi_match_table = ACPI_PTR(rt5682_acpi_match),
- },
- .probe = rt5682_i2c_probe,
- .shutdown = rt5682_i2c_shutdown,
- .id_table = rt5682_i2c_id,
-};
-
-#ifdef CONFIG_I2C
-module_i2c_driver(rt5682_i2c_driver);
-#endif
+EXPORT_SYMBOL_GPL(rt5682_calibrate);
MODULE_DESCRIPTION("ASoC RT5682 driver");
MODULE_AUTHOR("Bard Liao <bardliao@realtek.com>");
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index 0baeece84ec4..f172c9ebd227 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -1337,6 +1337,13 @@
#define RT5682_SAR_SOUR_BTN (0x3f)
#define RT5682_SAR_SOUR_TYPE (0x0)
+/* soundwire timeout */
+#define RT5682_PROBE_TIMEOUT 2000
+
+
+#define RT5682_STEREO_RATES SNDRV_PCM_RATE_8000_192000
+#define RT5682_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
/* System Clock Source */
enum {
@@ -1418,10 +1425,29 @@ struct rt5682_priv {
int jack_type;
};
+extern const char *rt5682_supply_names[RT5682_NUM_SUPPLIES];
+
int rt5682_sel_asrc_clk_src(struct snd_soc_component *component,
unsigned int filter_mask, unsigned int clk_src);
-int rt5682_sdw_init(struct device *dev, struct regmap *regmap,
- struct sdw_slave *slave);
-int rt5682_io_init(struct device *dev, struct sdw_slave *slave);
+
+void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev);
+
+int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert);
+void rt5682_jack_detect_handler(struct work_struct *work);
+
+bool rt5682_volatile_register(struct device *dev, unsigned int reg);
+bool rt5682_readable_register(struct device *dev, unsigned int reg);
+
+int rt5682_register_component(struct device *dev);
+void rt5682_calibrate(struct rt5682_priv *rt5682);
+void rt5682_reset(struct rt5682_priv *rt5682);
+int rt5682_parse_dt(struct rt5682_priv *rt5682, struct device *dev);
+
+#define RT5682_REG_NUM 318
+extern const struct reg_default rt5682_reg[RT5682_REG_NUM];
+
+extern const struct snd_soc_dai_ops rt5682_aif1_dai_ops;
+extern const struct snd_soc_dai_ops rt5682_aif2_dai_ops;
+extern const struct snd_soc_component_driver rt5682_soc_component_dev;
#endif /* __RT5682_H__ */
diff --git a/sound/soc/codecs/rt700-sdw.c b/sound/soc/codecs/rt700-sdw.c
index d4e0f953bcce..4d14048d1197 100644
--- a/sound/soc/codecs/rt700-sdw.c
+++ b/sound/soc/codecs/rt700-sdw.c
@@ -450,9 +450,6 @@ static int rt700_sdw_probe(struct sdw_slave *slave,
{
struct regmap *sdw_regmap, *regmap;
- /* Assign ops */
- slave->ops = &rt700_slave_ops;
-
/* Regmap Initialization */
sdw_regmap = devm_regmap_init_sdw(slave, &rt700_sdw_regmap);
if (!sdw_regmap)
diff --git a/sound/soc/codecs/rt700.c b/sound/soc/codecs/rt700.c
index ff68f0e4f629..687ac2153666 100644
--- a/sound/soc/codecs/rt700.c
+++ b/sound/soc/codecs/rt700.c
@@ -860,6 +860,9 @@ static int rt700_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
{
struct sdw_stream_data *stream;
+ if (!sdw_stream)
+ return 0;
+
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (!stream)
return -ENOMEM;
diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
index fc3a3fa3d51b..45b928954b58 100644
--- a/sound/soc/codecs/rt711-sdw.c
+++ b/sound/soc/codecs/rt711-sdw.c
@@ -450,9 +450,6 @@ static int rt711_sdw_probe(struct sdw_slave *slave,
{
struct regmap *sdw_regmap, *regmap;
- /* Assign ops */
- slave->ops = &rt711_slave_ops;
-
/* Regmap Initialization */
sdw_regmap = devm_regmap_init_sdw(slave, &rt711_sdw_regmap);
if (!sdw_regmap)
diff --git a/sound/soc/codecs/rt711.c b/sound/soc/codecs/rt711.c
index 2daed7692a3b..65b59dbfb43c 100644
--- a/sound/soc/codecs/rt711.c
+++ b/sound/soc/codecs/rt711.c
@@ -906,6 +906,9 @@ static int rt711_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
{
struct sdw_stream_data *stream;
+ if (!sdw_stream)
+ return 0;
+
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (!stream)
return -ENOMEM;
diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
index 64ef56ef0318..d11b23d6b240 100644
--- a/sound/soc/codecs/rt715-sdw.c
+++ b/sound/soc/codecs/rt715-sdw.c
@@ -525,9 +525,6 @@ static int rt715_sdw_probe(struct sdw_slave *slave,
{
struct regmap *sdw_regmap, *regmap;
- /* Assign ops */
- slave->ops = &rt715_slave_ops;
-
/* Regmap Initialization */
sdw_regmap = devm_regmap_init_sdw(slave, &rt715_sdw_regmap);
if (!sdw_regmap)
diff --git a/sound/soc/codecs/rt715.c b/sound/soc/codecs/rt715.c
index 2cbc57b16b13..099c8bd20006 100644
--- a/sound/soc/codecs/rt715.c
+++ b/sound/soc/codecs/rt715.c
@@ -530,6 +530,9 @@ static int rt715_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
struct sdw_stream_data *stream;
+ if (!sdw_stream)
+ return 0;
+
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (!stream)
return -ENOMEM;
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index db4b3ec55311..e9ccebbc31e4 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -893,13 +893,13 @@ static int sta32x_probe(struct snd_soc_component *component)
sta32x->supplies);
if (ret != 0) {
dev_err(component->dev, "Failed to enable supplies: %d\n", ret);
- return ret;
+ goto err_clk_disable_unprepare;
}
ret = sta32x_startup_sequence(sta32x);
if (ret < 0) {
dev_err(component->dev, "Failed to startup device\n");
- return ret;
+ goto err_regulator_bulk_disable;
}
/* CONFA */
@@ -983,6 +983,13 @@ static int sta32x_probe(struct snd_soc_component *component)
regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
return 0;
+
+err_regulator_bulk_disable:
+ regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
+err_clk_disable_unprepare:
+ if (sta32x->xti_clk)
+ clk_disable_unprepare(sta32x->xti_clk);
+ return ret;
}
static void sta32x_remove(struct snd_soc_component *component)
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index 56671f21cfe5..d90e5f2b6f27 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -602,6 +602,7 @@ static int tas2552_component_probe(struct snd_soc_component *component)
return 0;
probe_fail:
+ pm_runtime_put_noidle(component->dev);
gpiod_set_value(tas2552->enable_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(tas2552->supplies),
diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
index 38897568ee96..35fe8ee5bce9 100644
--- a/sound/soc/codecs/tlv320adcx140.c
+++ b/sound/soc/codecs/tlv320adcx140.c
@@ -180,6 +180,17 @@ static const struct snd_kcontrol_new decimation_filter_controls[] = {
SOC_DAPM_ENUM("Decimation Filter", decimation_filter_enum),
};
+static const char * const pdmclk_text[] = {
+ "2.8224 MHz", "1.4112 MHz", "705.6 kHz", "5.6448 MHz"
+};
+
+static SOC_ENUM_SINGLE_DECL(pdmclk_select_enum, ADCX140_PDMCLK_CFG, 0,
+ pdmclk_text);
+
+static const struct snd_kcontrol_new pdmclk_div_controls[] = {
+ SOC_DAPM_ENUM("PDM Clk Divider Select", pdmclk_select_enum),
+};
+
static const char * const resistor_text[] = {
"2.5 kOhm", "10 kOhm", "20 kOhm"
};
@@ -416,6 +427,9 @@ static const struct snd_soc_dapm_widget adcx140_dapm_widgets[] = {
SND_SOC_DAPM_MUX("IN4 Analog Mic Resistor", SND_SOC_NOPM, 0, 0,
in4_resistor_controls),
+ SND_SOC_DAPM_MUX("PDM Clk Div Select", SND_SOC_NOPM, 0, 0,
+ pdmclk_div_controls),
+
SND_SOC_DAPM_MUX("Decimation Filter", SND_SOC_NOPM, 0, 0,
decimation_filter_controls),
};
@@ -493,6 +507,11 @@ static const struct snd_soc_dapm_route adcx140_audio_map[] = {
{"IN4 Analog Mic Resistor", "10 kOhm", "MIC4M Input Mux"},
{"IN4 Analog Mic Resistor", "20 kOhm", "MIC4M Input Mux"},
+ {"PDM Clk Div Select", "2.8224 MHz", "MIC1P Input Mux"},
+ {"PDM Clk Div Select", "1.4112 MHz", "MIC1P Input Mux"},
+ {"PDM Clk Div Select", "705.6 kHz", "MIC1P Input Mux"},
+ {"PDM Clk Div Select", "5.6448 MHz", "MIC1P Input Mux"},
+
{"MIC1 Analog Mux", "Line In", "MIC1P"},
{"MIC2 Analog Mux", "Line In", "MIC2P"},
{"MIC3 Analog Mux", "Line In", "MIC3P"},
@@ -511,11 +530,11 @@ static const struct snd_soc_dapm_route adcx140_audio_map[] = {
static const struct snd_kcontrol_new adcx140_snd_controls[] = {
SOC_SINGLE_TLV("Analog CH1 Mic Gain Volume", ADCX140_CH1_CFG1, 2, 42, 0,
adc_tlv),
- SOC_SINGLE_TLV("Analog CH2 Mic Gain Volume", ADCX140_CH1_CFG2, 2, 42, 0,
+ SOC_SINGLE_TLV("Analog CH2 Mic Gain Volume", ADCX140_CH2_CFG1, 2, 42, 0,
adc_tlv),
- SOC_SINGLE_TLV("Analog CH3 Mic Gain Volume", ADCX140_CH1_CFG3, 2, 42, 0,
+ SOC_SINGLE_TLV("Analog CH3 Mic Gain Volume", ADCX140_CH3_CFG1, 2, 42, 0,
adc_tlv),
- SOC_SINGLE_TLV("Analog CH4 Mic Gain Volume", ADCX140_CH1_CFG4, 2, 42, 0,
+ SOC_SINGLE_TLV("Analog CH4 Mic Gain Volume", ADCX140_CH4_CFG1, 2, 42, 0,
adc_tlv),
SOC_SINGLE_TLV("DRE Threshold", ADCX140_DRE_CFG0, 4, 9, 0,
@@ -563,7 +582,7 @@ static int adcx140_reset(struct adcx140_priv *adcx140)
/* 8.4.2: wait >= 10 ms after entering sleep mode. */
usleep_range(10000, 100000);
- return 0;
+ return ret;
}
static int adcx140_hw_params(struct snd_pcm_substream *substream,
@@ -739,33 +758,82 @@ static int adcx140_codec_probe(struct snd_soc_component *component)
{
struct adcx140_priv *adcx140 = snd_soc_component_get_drvdata(component);
int sleep_cfg_val = ADCX140_WAKE_DEV;
- u8 bias_source;
- u8 vref_source;
+ u32 bias_source;
+ u32 vref_source;
+ u8 bias_cfg;
+ int pdm_count;
+ u32 pdm_edges[ADCX140_NUM_PDM_EDGES];
+ u32 pdm_edge_val = 0;
+ int gpi_count;
+ u32 gpi_inputs[ADCX140_NUM_GPI_PINS];
+ u32 gpi_input_val = 0;
+ int i;
int ret;
- ret = device_property_read_u8(adcx140->dev, "ti,mic-bias-source",
+ ret = device_property_read_u32(adcx140->dev, "ti,mic-bias-source",
&bias_source);
if (ret)
bias_source = ADCX140_MIC_BIAS_VAL_VREF;
- if (bias_source < ADCX140_MIC_BIAS_VAL_VREF ||
- bias_source > ADCX140_MIC_BIAS_VAL_AVDD) {
+ if (bias_source > ADCX140_MIC_BIAS_VAL_AVDD) {
dev_err(adcx140->dev, "Mic Bias source value is invalid\n");
return -EINVAL;
}
- ret = device_property_read_u8(adcx140->dev, "ti,vref-source",
+ ret = device_property_read_u32(adcx140->dev, "ti,vref-source",
&vref_source);
if (ret)
vref_source = ADCX140_MIC_BIAS_VREF_275V;
- if (vref_source < ADCX140_MIC_BIAS_VREF_275V ||
- vref_source > ADCX140_MIC_BIAS_VREF_1375V) {
+ if (vref_source > ADCX140_MIC_BIAS_VREF_1375V) {
dev_err(adcx140->dev, "Mic Bias source value is invalid\n");
return -EINVAL;
}
- bias_source |= vref_source;
+ bias_cfg = bias_source << ADCX140_MIC_BIAS_SHIFT | vref_source;
+
+ pdm_count = device_property_count_u32(adcx140->dev,
+ "ti,pdm-edge-select");
+ if (pdm_count <= ADCX140_NUM_PDM_EDGES && pdm_count > 0) {
+ ret = device_property_read_u32_array(adcx140->dev,
+ "ti,pdm-edge-select",
+ pdm_edges, pdm_count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pdm_count; i++)
+ pdm_edge_val |= pdm_edges[i] << (ADCX140_PDM_EDGE_SHIFT - i);
+
+ ret = regmap_write(adcx140->regmap, ADCX140_PDM_CFG,
+ pdm_edge_val);
+ if (ret)
+ return ret;
+ }
+
+ gpi_count = device_property_count_u32(adcx140->dev, "ti,gpi-config");
+ if (gpi_count <= ADCX140_NUM_GPI_PINS && gpi_count > 0) {
+ ret = device_property_read_u32_array(adcx140->dev,
+ "ti,gpi-config",
+ gpi_inputs, gpi_count);
+ if (ret)
+ return ret;
+
+ gpi_input_val = gpi_inputs[ADCX140_GPI1_INDEX] << ADCX140_GPI_SHIFT |
+ gpi_inputs[ADCX140_GPI2_INDEX];
+
+ ret = regmap_write(adcx140->regmap, ADCX140_GPI_CFG0,
+ gpi_input_val);
+ if (ret)
+ return ret;
+
+ gpi_input_val = gpi_inputs[ADCX140_GPI3_INDEX] << ADCX140_GPI_SHIFT |
+ gpi_inputs[ADCX140_GPI4_INDEX];
+
+ ret = regmap_write(adcx140->regmap, ADCX140_GPI_CFG1,
+ gpi_input_val);
+ if (ret)
+ return ret;
+ }
ret = adcx140_reset(adcx140);
if (ret)
@@ -785,7 +853,7 @@ static int adcx140_codec_probe(struct snd_soc_component *component)
ret = regmap_update_bits(adcx140->regmap, ADCX140_BIAS_CFG,
ADCX140_MIC_BIAS_VAL_MSK |
- ADCX140_MIC_BIAS_VREF_MSK, bias_source);
+ ADCX140_MIC_BIAS_VREF_MSK, bias_cfg);
if (ret)
dev_err(adcx140->dev, "setting MIC bias failed %d\n", ret);
out:
diff --git a/sound/soc/codecs/tlv320adcx140.h b/sound/soc/codecs/tlv320adcx140.h
index 6d055e55909e..39206bf1af12 100644
--- a/sound/soc/codecs/tlv320adcx140.h
+++ b/sound/soc/codecs/tlv320adcx140.h
@@ -116,6 +116,7 @@
#define ADCX140_MIC_BIAS_VAL_VREF_1096 1
#define ADCX140_MIC_BIAS_VAL_AVDD 6
#define ADCX140_MIC_BIAS_VAL_MSK GENMASK(6, 4)
+#define ADCX140_MIC_BIAS_SHIFT 4
#define ADCX140_MIC_BIAS_VREF_275V 0
#define ADCX140_MIC_BIAS_VREF_25V 1
@@ -128,4 +129,14 @@
#define ADCX140_TX_OFFSET_MASK GENMASK(4, 0)
+#define ADCX140_NUM_PDM_EDGES 4
+#define ADCX140_PDM_EDGE_SHIFT 7
+
+#define ADCX140_NUM_GPI_PINS 4
+#define ADCX140_GPI_SHIFT 4
+#define ADCX140_GPI1_INDEX 0
+#define ADCX140_GPI2_INDEX 1
+#define ADCX140_GPI3_INDEX 2
+#define ADCX140_GPI4_INDEX 3
+
#endif /* _TLV320ADCX140_ */
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index f8e2f4b74db3..9868fb22323c 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -394,7 +394,7 @@ static void tlv320aic23_shutdown(struct snd_pcm_substream *substream,
struct aic23 *aic23 = snd_soc_component_get_drvdata(component);
/* deactivate */
- if (!snd_soc_component_is_active(component)) {
+ if (!snd_soc_component_active(component)) {
udelay(50);
snd_soc_component_write(component, TLV320AIC23_ACTIVE, 0x0);
}
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index 808654b10deb..d905e03aaec7 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -449,7 +449,7 @@ static int dac33_set_fifo_mode(struct snd_kcontrol *kcontrol,
if (dac33->fifo_mode == ucontrol->value.enumerated.item[0])
return 0;
/* Do not allow changes while stream is running*/
- if (snd_soc_component_is_active(component))
+ if (snd_soc_component_active(component))
return -EPERM;
if (ucontrol->value.enumerated.item[0] >= DAC33_FIFO_LAST_MODE)
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 26b2ee428aee..89f2bfeeb70e 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -110,7 +110,7 @@ static int uda1380_write(struct snd_soc_component *component, unsigned int reg,
/* the interpolator & decimator regs must only be written when the
* codec DAI is active.
*/
- if (!snd_soc_component_is_active(component) && (reg >= UDA1380_MVOL))
+ if (!snd_soc_component_active(component) && (reg >= UDA1380_MVOL))
return 0;
pr_debug("uda1380: hw write %x val %x\n", reg, value);
if (i2c_master_send(uda1380->i2c, data, 3) == 3) {
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 700cc1212770..fb073f4dc7ed 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -1919,7 +1919,7 @@ static int wcd9335_hw_params(struct snd_pcm_substream *substream,
__func__, params_rate(params));
return -EINVAL;
- };
+ }
ret = wcd9335_set_decimator_rate(dai, tx_fs_rate,
params_rate(params));
@@ -1935,13 +1935,13 @@ static int wcd9335_hw_params(struct snd_pcm_substream *substream,
dev_err(wcd->dev, "%s: Invalid format 0x%x\n",
__func__, params_width(params));
return -EINVAL;
- };
+ }
break;
default:
dev_err(wcd->dev, "Invalid stream type %d\n",
substream->stream);
return -EINVAL;
- };
+ }
wcd->dai[dai->id].sconfig.rate = params_rate(params);
wcd9335_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
@@ -2216,7 +2216,7 @@ static int wcd9335_set_compander(struct snd_kcontrol *kc,
break;
default:
break;
- };
+ }
return 0;
}
@@ -2565,7 +2565,7 @@ static int wcd9335_micbias_control(struct snd_soc_component *component,
0xC0, 0x00);
}
break;
- };
+ }
return 0;
}
@@ -2603,7 +2603,7 @@ static int __wcd9335_codec_enable_micbias(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_POST_PMD:
wcd9335_micbias_control(comp, micb_num, MICB_DISABLE, true);
break;
- };
+ }
return 0;
}
@@ -2846,7 +2846,7 @@ static int wcd9335_codec_enable_dec(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_POST_PMD:
snd_soc_component_update_bits(comp, tx_vol_ctl_reg, 0x10, 0x00);
break;
- };
+ }
out:
kfree(wname);
return ret;
@@ -2952,7 +2952,7 @@ static int wcd9335_codec_enable_dmic(struct snd_soc_dapm_widget *w,
dev_err(comp->dev, "%s: Invalid DMIC Selection\n",
__func__);
return -EINVAL;
- };
+ }
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -2985,7 +2985,7 @@ static int wcd9335_codec_enable_dmic(struct snd_soc_dapm_widget *w,
dmic_rate_val << dmic_rate_shift);
}
break;
- };
+ }
return 0;
}
@@ -3076,7 +3076,7 @@ static int wcd9335_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
dev_err(comp->dev, "%s: No gain register avail for %s\n",
__func__, w->name);
return 0;
- };
+ }
switch (event) {
case SND_SOC_DAPM_POST_PMU:
@@ -3086,7 +3086,7 @@ static int wcd9335_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
break;
case SND_SOC_DAPM_POST_PMD:
break;
- };
+ }
return 0;
}
@@ -3141,7 +3141,7 @@ static u16 wcd9335_interp_get_primary_reg(u16 reg, u16 *ind)
prim_int_reg = WCD9335_CDC_RX8_RX_PATH_CTL;
*ind = 8;
break;
- };
+ }
return prim_int_reg;
}
@@ -3229,7 +3229,7 @@ static int wcd9335_codec_enable_prim_interpolator(
wcd9335_codec_hd2_control(comp, prim_int_reg, event);
}
break;
- };
+ }
return 0;
}
@@ -3352,7 +3352,7 @@ static int wcd9335_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
wcd9335_config_compander(comp, w->shift, event);
wcd9335_codec_enable_prim_interpolator(comp, reg, event);
break;
- };
+ }
return 0;
}
@@ -3575,7 +3575,7 @@ static int wcd9335_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
((hph_mode == CLS_H_LOHIFI) ?
CLS_H_HIFI : hph_mode));
break;
- };
+ }
return 0;
}
@@ -3616,7 +3616,7 @@ static int wcd9335_codec_ear_dac_event(struct snd_soc_dapm_widget *w,
wcd_clsh_ctrl_set_state(wcd->clsh_ctrl, WCD_CLSH_EVENT_POST_PA,
WCD_CLSH_STATE_EAR, CLS_H_NORMAL);
break;
- };
+ }
return 0;
}
@@ -3725,7 +3725,7 @@ static int wcd9335_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
WCD_CLSH_STATE_HPHR, ((hph_mode == CLS_H_LOHIFI) ?
CLS_H_HIFI : hph_mode));
break;
- };
+ }
return 0;
}
@@ -3773,7 +3773,7 @@ static int wcd9335_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
*/
usleep_range(5000, 5500);
break;
- };
+ }
return 0;
}
@@ -3829,7 +3829,7 @@ static int wcd9335_codec_enable_lineout_pa(struct snd_soc_dapm_widget *w,
*/
usleep_range(5000, 5500);
break;
- };
+ }
return 0;
}
@@ -3875,7 +3875,7 @@ static int wcd9335_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
WCD9335_ANA_RX_BIAS_ENABLE_MASK,
WCD9335_ANA_RX_BIAS_DISABLE);
break;
- };
+ }
return 0;
}
@@ -3921,7 +3921,7 @@ static int wcd9335_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
*/
usleep_range(5000, 5500);
break;
- };
+ }
return 0;
}
@@ -3957,7 +3957,7 @@ static int wcd9335_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
usleep_range(5000, 5500);
break;
- };
+ }
return 0;
}
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
index 5269857e2746..531b8b79e55f 100644
--- a/sound/soc/codecs/wcd934x.c
+++ b/sound/soc/codecs/wcd934x.c
@@ -1787,7 +1787,7 @@ static int wcd934x_hw_params(struct snd_pcm_substream *substream,
params_rate(params));
return -EINVAL;
- };
+ }
ret = wcd934x_set_decimator_rate(dai, tx_fs_rate,
params_rate(params));
@@ -1803,13 +1803,13 @@ static int wcd934x_hw_params(struct snd_pcm_substream *substream,
dev_err(wcd->dev, "Invalid format 0x%x\n",
params_width(params));
return -EINVAL;
- };
+ }
break;
default:
dev_err(wcd->dev, "Invalid stream type %d\n",
substream->stream);
return -EINVAL;
- };
+ }
wcd->dai[dai->id].sconfig.rate = params_rate(params);
wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
@@ -2489,7 +2489,7 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
break;
default:
break;
- };
+ }
return 0;
}
@@ -3539,7 +3539,7 @@ static int wcd934x_codec_enable_mix_path(struct snd_soc_dapm_widget *w,
val += offset_val;
snd_soc_component_write(comp, gain_reg, val);
break;
- };
+ }
return 0;
}
@@ -3593,7 +3593,7 @@ static int wcd934x_codec_enable_main_path(struct snd_soc_dapm_widget *w,
snd_soc_component_write(comp, gain_reg,
snd_soc_component_read32(comp, gain_reg));
break;
- };
+ }
return 0;
}
@@ -3618,7 +3618,7 @@ static int wcd934x_codec_ear_dac_event(struct snd_soc_dapm_widget *w,
wcd_clsh_ctrl_set_state(wcd->clsh_ctrl, WCD_CLSH_EVENT_POST_PA,
WCD_CLSH_STATE_EAR, CLS_H_NORMAL);
break;
- };
+ }
return 0;
}
@@ -3670,7 +3670,7 @@ static int wcd934x_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
break;
default:
break;
- };
+ }
return 0;
}
@@ -3720,7 +3720,7 @@ static int wcd934x_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
break;
default:
break;
- };
+ }
return 0;
}
@@ -3801,7 +3801,7 @@ static int wcd934x_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
*/
usleep_range(20000, 20100);
break;
- };
+ }
return 0;
}
@@ -3863,7 +3863,7 @@ static int wcd934x_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
*/
usleep_range(20000, 20100);
break;
- };
+ }
return 0;
}
@@ -3878,7 +3878,7 @@ static u32 wcd934x_get_dmic_sample_rate(struct snd_soc_component *comp,
u16 adc_mux_ctl_reg, tx_fs_reg;
u32 dmic_fs;
- while (dec_found == 0 && adc_mux_index < WCD934X_MAX_VALID_ADC_MUX) {
+ while (!dec_found && adc_mux_index < WCD934X_MAX_VALID_ADC_MUX) {
if (adc_mux_index < 4) {
adc_mux_ctl_reg = WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0 +
(adc_mux_index * 2);
@@ -4015,7 +4015,7 @@ static int wcd934x_codec_enable_dmic(struct snd_soc_dapm_widget *w,
dev_err(comp->dev, "%s: Invalid DMIC Selection\n",
__func__);
return -EINVAL;
- };
+ }
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -4040,7 +4040,7 @@ static int wcd934x_codec_enable_dmic(struct snd_soc_dapm_widget *w,
snd_soc_component_update_bits(comp, dmic_clk_reg,
dmic_clk_en, 0);
break;
- };
+ }
return 0;
}
@@ -4267,7 +4267,7 @@ static int wcd934x_codec_enable_dec(struct snd_soc_dapm_widget *w,
WCD934X_DEC_PWR_LVL_MASK,
WCD934X_DEC_PWR_LVL_DF);
break;
- };
+ }
out:
kfree(wname);
return ret;
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c
index b30bfcd6a125..c56b9329240f 100644
--- a/sound/soc/codecs/wl1273.c
+++ b/sound/soc/codecs/wl1273.c
@@ -183,7 +183,7 @@ static int snd_wl1273_set_audio_route(struct snd_kcontrol *kcontrol,
return 0;
/* Do not allow changes while stream is running */
- if (snd_soc_component_is_active(component))
+ if (snd_soc_component_active(component))
return -EPERM;
if (ucontrol->value.enumerated.item[0] >= ARRAY_SIZE(wl1273_audio_route))
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index d6d4b4121369..2ed3fa67027d 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1909,10 +1909,9 @@ static struct snd_soc_dai_driver wm5102_dai[] = {
},
};
-static int wm5102_open(struct snd_compr_stream *stream)
+static int wm5102_open(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
{
- struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct wm5102_priv *priv = snd_soc_component_get_drvdata(component);
return wm_adsp_compr_open(&priv->core.adsp[0], stream);
@@ -1992,7 +1991,7 @@ static unsigned int wm5102_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_5R,
};
-static struct snd_compr_ops wm5102_compr_ops = {
+static struct snd_compress_ops wm5102_compress_ops = {
.open = wm5102_open,
.free = wm_adsp_compr_free,
.set_params = wm_adsp_compr_set_params,
@@ -2008,7 +2007,7 @@ static const struct snd_soc_component_driver soc_component_dev_wm5102 = {
.set_sysclk = arizona_set_sysclk,
.set_pll = wm5102_set_fll,
.name = DRV_NAME,
- .compr_ops = &wm5102_compr_ops,
+ .compress_ops = &wm5102_compress_ops,
.controls = wm5102_snd_controls,
.num_controls = ARRAY_SIZE(wm5102_snd_controls),
.dapm_widgets = wm5102_dapm_widgets,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 499e87d1dfcc..44de44bff423 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2237,10 +2237,10 @@ static struct snd_soc_dai_driver wm5110_dai[] = {
},
};
-static int wm5110_open(struct snd_compr_stream *stream)
+static int wm5110_open(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
{
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct wm5110_priv *priv = snd_soc_component_get_drvdata(component);
struct arizona *arizona = priv->core.arizona;
int n_adsp;
@@ -2355,7 +2355,7 @@ static unsigned int wm5110_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_6R,
};
-static struct snd_compr_ops wm5110_compr_ops = {
+static struct snd_compress_ops wm5110_compress_ops = {
.open = wm5110_open,
.free = wm_adsp_compr_free,
.set_params = wm_adsp_compr_set_params,
@@ -2371,7 +2371,7 @@ static const struct snd_soc_component_driver soc_component_dev_wm5110 = {
.set_sysclk = arizona_set_sysclk,
.set_pll = wm5110_set_fll,
.name = DRV_NAME,
- .compr_ops = &wm5110_compr_ops,
+ .compress_ops = &wm5110_compress_ops,
.controls = wm5110_snd_controls,
.num_controls = ARRAY_SIZE(wm5110_snd_controls),
.dapm_widgets = wm5110_dapm_widgets,
diff --git a/sound/soc/codecs/wm8524.c b/sound/soc/codecs/wm8524.c
index 91e3d1570c45..4e9ab542f648 100644
--- a/sound/soc/codecs/wm8524.c
+++ b/sound/soc/codecs/wm8524.c
@@ -159,7 +159,9 @@ static int wm8524_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
#define WM8524_RATES SNDRV_PCM_RATE_8000_192000
-#define WM8524_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
+#define WM8524_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops wm8524_dai_ops = {
.startup = wm8524_startup,
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index 8036b18fdeb9..5ad905dd78b7 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -198,7 +198,7 @@ static void wm8711_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_component *component = dai->component;
/* deactivate */
- if (!snd_soc_component_is_active(component)) {
+ if (!snd_soc_component_active(component)) {
udelay(50);
snd_soc_component_write(component, WM8711_ACTIVE, 0x0);
}
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 95a12718f3af..8753c55c73fa 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -241,7 +241,7 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
if (wm8753->dai_func == ucontrol->value.enumerated.item[0])
return 0;
- if (snd_soc_component_is_active(component))
+ if (snd_soc_component_active(component))
return -EBUSY;
ioctl = snd_soc_component_read32(component, WM8753_IOCTL);
@@ -1304,7 +1304,7 @@ static int wm8753_mute(struct snd_soc_dai *dai, int mute)
/* the digital mute covers the HiFi and Voice DAC's on the WM8753.
* make sure we check if they are not both active when we mute */
if (mute && wm8753->dai_func == 1) {
- if (!snd_soc_component_is_active(component))
+ if (!snd_soc_component_active(component))
snd_soc_component_write(component, WM8753_DAC, mute_reg | 0x8);
} else {
if (mute)
diff --git a/sound/soc/codecs/wm8782.c b/sound/soc/codecs/wm8782.c
index aa5577e364d0..f89855c616eb 100644
--- a/sound/soc/codecs/wm8782.c
+++ b/sound/soc/codecs/wm8782.c
@@ -7,7 +7,7 @@
* Author: Johannes Stezenbach <js@sig21.net>
*
* based on ad73311.c
- * Copyright: Analog Device Inc.
+ * Copyright: Analog Devices Inc.
* Author: Cliff Cai <cliff.cai@analog.com>
*/
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index 271235a69c01..3e239fa9bc8d 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -443,12 +443,6 @@ SOC_SINGLE("LINEOUT2 LP -12dB", WM8900_REG_LOUTMIXCTL1,
};
-static const struct snd_kcontrol_new wm8900_dapm_loutput2_control =
-SOC_DAPM_SINGLE("LINEOUT2L Switch", WM8900_REG_POWER3, 6, 1, 0);
-
-static const struct snd_kcontrol_new wm8900_dapm_routput2_control =
-SOC_DAPM_SINGLE("LINEOUT2R Switch", WM8900_REG_POWER3, 5, 1, 0);
-
static const struct snd_kcontrol_new wm8900_loutmix_controls[] = {
SOC_DAPM_SINGLE("LINPUT3 Bypass Switch", WM8900_REG_LOUTMIXCTL1, 7, 1, 0),
SOC_DAPM_SINGLE("AUX Bypass Switch", WM8900_REG_AUXOUT_CTL, 7, 1, 0),
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index d9d59f45833f..1cc23a05ffe4 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -118,7 +118,7 @@ static const struct reg_default wm8962_reg[] = {
{ 5, 0x0018 }, /* R5 - ADC & DAC Control 1 */
{ 6, 0x2008 }, /* R6 - ADC & DAC Control 2 */
{ 7, 0x000A }, /* R7 - Audio Interface 0 */
-
+ { 8, 0x01E4 }, /* R8 - Clocking2 */
{ 9, 0x0300 }, /* R9 - Audio Interface 1 */
{ 10, 0x00C0 }, /* R10 - Left DAC volume */
{ 11, 0x00C0 }, /* R11 - Right DAC volume */
@@ -788,7 +788,6 @@ static bool wm8962_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM8962_CLOCKING1:
- case WM8962_CLOCKING2:
case WM8962_SOFTWARE_RESET:
case WM8962_THERMAL_SHUTDOWN_STATUS:
case WM8962_ADDITIONAL_CONTROL_4:
@@ -2881,6 +2880,7 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
ret = pm_runtime_get_sync(component->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(component->dev);
dev_err(component->dev, "Failed to resume device: %d\n", ret);
return ret;
}
@@ -3013,6 +3013,7 @@ static irqreturn_t wm8962_irq(int irq, void *data)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
+ pm_runtime_put_noidle(dev);
dev_err(dev, "Failed to resume: %d\n", ret);
return IRQ_NONE;
}
@@ -3799,8 +3800,8 @@ static int wm8962_runtime_resume(struct device *dev)
/* SYSCLK defaults to on; make sure it is off so we can safely
* write to registers if the device is declocked.
*/
- regmap_update_bits(wm8962->regmap, WM8962_CLOCKING2,
- WM8962_SYSCLK_ENA, 0);
+ regmap_write_bits(wm8962->regmap, WM8962_CLOCKING2,
+ WM8962_SYSCLK_ENA, 0);
/* Ensure we have soft control over all registers */
regmap_update_bits(wm8962->regmap, WM8962_CLOCKING2,
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index cfe7892696dd..499a29b47d5e 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -32,93 +32,14 @@ struct wm8990_priv {
unsigned int pcmclk;
};
-static bool wm8990_volatile_register(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case WM8990_RESET:
- return true;
- default:
- return false;
- }
-}
-
-static const struct reg_default wm8990_reg_defaults[] = {
- { 1, 0x0000 }, /* R1 - Power Management (1) */
- { 2, 0x6000 }, /* R2 - Power Management (2) */
- { 3, 0x0000 }, /* R3 - Power Management (3) */
- { 4, 0x4050 }, /* R4 - Audio Interface (1) */
- { 5, 0x4000 }, /* R5 - Audio Interface (2) */
- { 6, 0x01C8 }, /* R6 - Clocking (1) */
- { 7, 0x0000 }, /* R7 - Clocking (2) */
- { 8, 0x0040 }, /* R8 - Audio Interface (3) */
- { 9, 0x0040 }, /* R9 - Audio Interface (4) */
- { 10, 0x0004 }, /* R10 - DAC CTRL */
- { 11, 0x00C0 }, /* R11 - Left DAC Digital Volume */
- { 12, 0x00C0 }, /* R12 - Right DAC Digital Volume */
- { 13, 0x0000 }, /* R13 - Digital Side Tone */
- { 14, 0x0100 }, /* R14 - ADC CTRL */
- { 15, 0x00C0 }, /* R15 - Left ADC Digital Volume */
- { 16, 0x00C0 }, /* R16 - Right ADC Digital Volume */
-
- { 18, 0x0000 }, /* R18 - GPIO CTRL 1 */
- { 19, 0x1000 }, /* R19 - GPIO1 & GPIO2 */
- { 20, 0x1010 }, /* R20 - GPIO3 & GPIO4 */
- { 21, 0x1010 }, /* R21 - GPIO5 & GPIO6 */
- { 22, 0x8000 }, /* R22 - GPIOCTRL 2 */
- { 23, 0x0800 }, /* R23 - GPIO_POL */
- { 24, 0x008B }, /* R24 - Left Line Input 1&2 Volume */
- { 25, 0x008B }, /* R25 - Left Line Input 3&4 Volume */
- { 26, 0x008B }, /* R26 - Right Line Input 1&2 Volume */
- { 27, 0x008B }, /* R27 - Right Line Input 3&4 Volume */
- { 28, 0x0000 }, /* R28 - Left Output Volume */
- { 29, 0x0000 }, /* R29 - Right Output Volume */
- { 30, 0x0066 }, /* R30 - Line Outputs Volume */
- { 31, 0x0022 }, /* R31 - Out3/4 Volume */
- { 32, 0x0079 }, /* R32 - Left OPGA Volume */
- { 33, 0x0079 }, /* R33 - Right OPGA Volume */
- { 34, 0x0003 }, /* R34 - Speaker Volume */
- { 35, 0x0003 }, /* R35 - ClassD1 */
-
- { 37, 0x0100 }, /* R37 - ClassD3 */
- { 38, 0x0079 }, /* R38 - ClassD4 */
- { 39, 0x0000 }, /* R39 - Input Mixer1 */
- { 40, 0x0000 }, /* R40 - Input Mixer2 */
- { 41, 0x0000 }, /* R41 - Input Mixer3 */
- { 42, 0x0000 }, /* R42 - Input Mixer4 */
- { 43, 0x0000 }, /* R43 - Input Mixer5 */
- { 44, 0x0000 }, /* R44 - Input Mixer6 */
- { 45, 0x0000 }, /* R45 - Output Mixer1 */
- { 46, 0x0000 }, /* R46 - Output Mixer2 */
- { 47, 0x0000 }, /* R47 - Output Mixer3 */
- { 48, 0x0000 }, /* R48 - Output Mixer4 */
- { 49, 0x0000 }, /* R49 - Output Mixer5 */
- { 50, 0x0000 }, /* R50 - Output Mixer6 */
- { 51, 0x0180 }, /* R51 - Out3/4 Mixer */
- { 52, 0x0000 }, /* R52 - Line Mixer1 */
- { 53, 0x0000 }, /* R53 - Line Mixer2 */
- { 54, 0x0000 }, /* R54 - Speaker Mixer */
- { 55, 0x0000 }, /* R55 - Additional Control */
- { 56, 0x0000 }, /* R56 - AntiPOP1 */
- { 57, 0x0000 }, /* R57 - AntiPOP2 */
- { 58, 0x0000 }, /* R58 - MICBIAS */
-
- { 60, 0x0008 }, /* R60 - PLL1 */
- { 61, 0x0031 }, /* R61 - PLL2 */
- { 62, 0x0026 }, /* R62 - PLL3 */
-};
-
#define wm8990_reset(c) snd_soc_component_write(c, WM8990_RESET, 0)
-static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);
-
static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);
static const DECLARE_TLV_DB_SCALE(out_mix_tlv, 0, -2100, 0);
static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);
-static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);
-
static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);
static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);
@@ -486,14 +407,6 @@ static SOC_ENUM_SINGLE_DECL(wm8990_ainrmux_enum,
static const struct snd_kcontrol_new wm8990_dapm_ainrmux_controls =
SOC_DAPM_ENUM("Route", wm8990_ainrmux_enum);
-/* RXVOICE */
-static const struct snd_kcontrol_new wm8990_dapm_rxvoice_controls[] = {
-SOC_DAPM_SINGLE_TLV("LIN4/RXN", WM8990_INPUT_MIXER5, WM8990_LR4BVOL_SHIFT,
- WM8990_LR4BVOL_MASK, 0, in_mix_tlv),
-SOC_DAPM_SINGLE_TLV("RIN4/RXP", WM8990_INPUT_MIXER6, WM8990_RL4BVOL_SHIFT,
- WM8990_RL4BVOL_MASK, 0, in_mix_tlv),
-};
-
/* LOMIX */
static const struct snd_kcontrol_new wm8990_dapm_lomix_controls[] = {
SOC_DAPM_SINGLE("LOMIX Right ADC Bypass Switch", WM8990_OUTPUT_MIXER1,
@@ -1306,17 +1219,6 @@ static const struct snd_soc_component_driver soc_component_dev_wm8990 = {
.non_legacy_dai_naming = 1,
};
-static const struct regmap_config wm8990_regmap = {
- .reg_bits = 8,
- .val_bits = 16,
-
- .max_register = WM8990_PLL3,
- .volatile_reg = wm8990_volatile_register,
- .reg_defaults = wm8990_reg_defaults,
- .num_reg_defaults = ARRAY_SIZE(wm8990_reg_defaults),
- .cache_type = REGCACHE_RBTREE,
-};
-
static int wm8990_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index 93c156782d59..f8375d67e901 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -476,14 +476,6 @@ static SOC_ENUM_SINGLE_DECL(wm8991_ainrmux_enum,
static const struct snd_kcontrol_new wm8991_dapm_ainrmux_controls =
SOC_DAPM_ENUM("Route", wm8991_ainrmux_enum);
-/* RXVOICE */
-static const struct snd_kcontrol_new wm8991_dapm_rxvoice_controls[] = {
- SOC_DAPM_SINGLE_TLV("LIN4RXN", WM8991_INPUT_MIXER5, WM8991_LR4BVOL_SHIFT,
- WM8991_LR4BVOL_MASK, 0, in_mix_tlv),
- SOC_DAPM_SINGLE_TLV("RIN4RXP", WM8991_INPUT_MIXER6, WM8991_RL4BVOL_SHIFT,
- WM8991_RL4BVOL_MASK, 0, in_mix_tlv),
-};
-
/* LOMIX */
static const struct snd_kcontrol_new wm8991_dapm_lomix_controls[] = {
SOC_DAPM_SINGLE("LOMIX Right ADC Bypass Switch", WM8991_OUTPUT_MIXER1,
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 15ce64a48a87..55d0b9be6ff0 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -285,7 +285,6 @@ static const DECLARE_TLV_DB_SCALE(st_tlv, -3600, 300, 0);
static const DECLARE_TLV_DB_SCALE(wm8994_3d_tlv, -1600, 183, 0);
static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
static const DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
-static const DECLARE_TLV_DB_SCALE(mixin_boost_tlv, 0, 900, 0);
#define WM8994_DRC_SWITCH(xname, reg, shift) \
SOC_SINGLE_EXT(xname, reg, shift, 1, 0, \
@@ -733,13 +732,6 @@ SOC_SINGLE_TLV("AIF2DAC Noise Gate Threshold Volume",
7, 1, ng_tlv),
};
-static const struct snd_kcontrol_new wm1811_snd_controls[] = {
-SOC_SINGLE_TLV("MIXINL IN1LP Boost Volume", WM8994_INPUT_MIXER_1, 7, 1, 0,
- mixin_boost_tlv),
-SOC_SINGLE_TLV("MIXINL IN1RP Boost Volume", WM8994_INPUT_MIXER_1, 8, 1, 0,
- mixin_boost_tlv),
-};
-
/* We run all mode setting through a function to enforce audio mode */
static void wm1811_jackdet_set_mode(struct snd_soc_component *component, u16 mode)
{
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 1ef69409ccd1..519ca2e69637 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -3509,7 +3509,8 @@ out:
}
EXPORT_SYMBOL_GPL(wm_adsp_compr_open);
-int wm_adsp_compr_free(struct snd_compr_stream *stream)
+int wm_adsp_compr_free(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
{
struct wm_adsp_compr *compr = stream->runtime->private_data;
struct wm_adsp *dsp = compr->dsp;
@@ -3583,7 +3584,8 @@ static inline unsigned int wm_adsp_compr_frag_words(struct wm_adsp_compr *compr)
return compr->size.fragment_size / WM_ADSP_DATA_WORD_SIZE;
}
-int wm_adsp_compr_set_params(struct snd_compr_stream *stream,
+int wm_adsp_compr_set_params(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
struct snd_compr_params *params)
{
struct wm_adsp_compr *compr = stream->runtime->private_data;
@@ -3610,7 +3612,8 @@ int wm_adsp_compr_set_params(struct snd_compr_stream *stream,
}
EXPORT_SYMBOL_GPL(wm_adsp_compr_set_params);
-int wm_adsp_compr_get_caps(struct snd_compr_stream *stream,
+int wm_adsp_compr_get_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
struct snd_compr_caps *caps)
{
struct wm_adsp_compr *compr = stream->runtime->private_data;
@@ -3976,7 +3979,8 @@ static int wm_adsp_buffer_get_error(struct wm_adsp_compr_buf *buf)
return 0;
}
-int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd)
+int wm_adsp_compr_trigger(struct snd_soc_component *component,
+ struct snd_compr_stream *stream, int cmd)
{
struct wm_adsp_compr *compr = stream->runtime->private_data;
struct wm_adsp *dsp = compr->dsp;
@@ -4139,7 +4143,8 @@ static int wm_adsp_buffer_reenable_irq(struct wm_adsp_compr_buf *buf)
buf->irq_count);
}
-int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
+int wm_adsp_compr_pointer(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
struct snd_compr_tstamp *tstamp)
{
struct wm_adsp_compr *compr = stream->runtime->private_data;
@@ -4297,7 +4302,8 @@ static int wm_adsp_compr_read(struct wm_adsp_compr *compr,
return ntotal;
}
-int wm_adsp_compr_copy(struct snd_compr_stream *stream, char __user *buf,
+int wm_adsp_compr_copy(struct snd_soc_component *component,
+ struct snd_compr_stream *stream, char __user *buf,
size_t count)
{
struct wm_adsp_compr *compr = stream->runtime->private_data;
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index 4c481cf20275..1996350b817e 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -190,16 +190,22 @@ int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int wm_adsp_compr_open(struct wm_adsp *dsp, struct snd_compr_stream *stream);
-int wm_adsp_compr_free(struct snd_compr_stream *stream);
-int wm_adsp_compr_set_params(struct snd_compr_stream *stream,
+int wm_adsp_compr_free(struct snd_soc_component *component,
+ struct snd_compr_stream *stream);
+int wm_adsp_compr_set_params(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
struct snd_compr_params *params);
-int wm_adsp_compr_get_caps(struct snd_compr_stream *stream,
+int wm_adsp_compr_get_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
struct snd_compr_caps *caps);
-int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd);
+int wm_adsp_compr_trigger(struct snd_soc_component *component,
+ struct snd_compr_stream *stream, int cmd);
int wm_adsp_compr_handle_irq(struct wm_adsp *dsp);
-int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
+int wm_adsp_compr_pointer(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
struct snd_compr_tstamp *tstamp);
-int wm_adsp_compr_copy(struct snd_compr_stream *stream,
+int wm_adsp_compr_copy(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
char __user *buf, size_t count);
int wm_adsp_write_ctl(struct wm_adsp *dsp, const char *name, int type,
unsigned int alg, void *buf, size_t len);
diff --git a/sound/soc/codecs/zl38060.c b/sound/soc/codecs/zl38060.c
new file mode 100644
index 000000000000..42726dc0ba39
--- /dev/null
+++ b/sound/soc/codecs/zl38060.c
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Codec driver for Microsemi ZL38060 Connected Home Audio Processor.
+//
+// Copyright(c) 2020 Sven Van Asbroeck
+
+// The ZL38060 is very flexible and configurable. This driver implements only a
+// tiny subset of the chip's possible configurations:
+//
+// - DSP block bypassed: DAI routed straight to DACs
+// microphone routed straight to DAI
+// - chip's internal clock is driven by a 12 MHz external crystal
+// - chip's DAI connected to CPU is I2S, and bit + frame clock master
+// - chip must be strapped for "host boot": in this mode, firmware will be
+// provided by this driver.
+
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/ihex.h>
+
+#include <sound/pcm_params.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#define DRV_NAME "zl38060"
+
+#define ZL38_RATES (SNDRV_PCM_RATE_8000 |\
+ SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_48000)
+#define ZL38_FORMATS SNDRV_PCM_FMTBIT_S16_LE
+
+#define HBI_FIRMWARE_PAGE 0xFF
+#define ZL38_MAX_RAW_XFER 0x100
+
+#define REG_TDMA_CFG_CLK 0x0262
+#define CFG_CLK_PCLK_SHIFT 4
+#define CFG_CLK_PCLK_MASK (0x7ff << CFG_CLK_PCLK_SHIFT)
+#define CFG_CLK_PCLK(bits) ((bits - 1) << CFG_CLK_PCLK_SHIFT)
+#define CFG_CLK_MASTER BIT(15)
+#define CFG_CLK_FSRATE_MASK 0x7
+#define CFG_CLK_FSRATE_8KHZ 0x1
+#define CFG_CLK_FSRATE_16KHZ 0x2
+#define CFG_CLK_FSRATE_48KHZ 0x6
+
+#define REG_CLK_CFG 0x0016
+#define CLK_CFG_SOURCE_XTAL BIT(15)
+
+#define REG_CLK_STATUS 0x0014
+#define CLK_STATUS_HWRST BIT(0)
+
+#define REG_PARAM_RESULT 0x0034
+#define PARAM_RESULT_READY 0xD3D3
+
+#define REG_PG255_BASE_HI 0x000C
+#define REG_PG255_OFFS(addr) ((HBI_FIRMWARE_PAGE << 8) | (addr & 0xFF))
+#define REG_FWR_EXEC 0x012C
+
+#define REG_CMD 0x0032
+#define REG_HW_REV 0x0020
+#define REG_FW_PROD 0x0022
+#define REG_FW_REV 0x0024
+
+#define REG_SEMA_FLAGS 0x0006
+#define SEMA_FLAGS_BOOT_CMD BIT(0)
+#define SEMA_FLAGS_APP_REBOOT BIT(1)
+
+#define REG_HW_REV 0x0020
+#define REG_FW_PROD 0x0022
+#define REG_FW_REV 0x0024
+#define REG_GPIO_DIR 0x02DC
+#define REG_GPIO_DAT 0x02DA
+
+#define BOOTCMD_LOAD_COMPLETE 0x000D
+#define BOOTCMD_FW_GO 0x0008
+
+#define FIRMWARE_MAJOR 2
+#define FIRMWARE_MINOR 2
+
+struct zl38_codec_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ bool is_stream_in_use[2];
+ struct gpio_chip *gpio_chip;
+};
+
+static int zl38_fw_issue_command(struct regmap *regmap, u16 cmd)
+{
+ unsigned int val;
+ int err;
+
+ err = regmap_read_poll_timeout(regmap, REG_SEMA_FLAGS, val,
+ !(val & SEMA_FLAGS_BOOT_CMD), 10000,
+ 10000 * 100);
+ if (err)
+ return err;
+ err = regmap_write(regmap, REG_CMD, cmd);
+ if (err)
+ return err;
+ err = regmap_update_bits(regmap, REG_SEMA_FLAGS, SEMA_FLAGS_BOOT_CMD,
+ SEMA_FLAGS_BOOT_CMD);
+ if (err)
+ return err;
+
+ return regmap_read_poll_timeout(regmap, REG_CMD, val, !val, 10000,
+ 10000 * 100);
+}
+
+static int zl38_fw_go(struct regmap *regmap)
+{
+ int err;
+
+ err = zl38_fw_issue_command(regmap, BOOTCMD_LOAD_COMPLETE);
+ if (err)
+ return err;
+
+ return zl38_fw_issue_command(regmap, BOOTCMD_FW_GO);
+}
+
+static int zl38_fw_enter_boot_mode(struct regmap *regmap)
+{
+ unsigned int val;
+ int err;
+
+ err = regmap_update_bits(regmap, REG_CLK_STATUS, CLK_STATUS_HWRST,
+ CLK_STATUS_HWRST);
+ if (err)
+ return err;
+
+ return regmap_read_poll_timeout(regmap, REG_PARAM_RESULT, val,
+ val == PARAM_RESULT_READY, 1000, 50000);
+}
+
+static int
+zl38_fw_send_data(struct regmap *regmap, u32 addr, const void *data, u16 len)
+{
+ __be32 addr_base = cpu_to_be32(addr & ~0xFF);
+ int err;
+
+ err = regmap_raw_write(regmap, REG_PG255_BASE_HI, &addr_base,
+ sizeof(addr_base));
+ if (err)
+ return err;
+ return regmap_raw_write(regmap, REG_PG255_OFFS(addr), data, len);
+}
+
+static int zl38_fw_send_xaddr(struct regmap *regmap, const void *data)
+{
+ /* execution address from ihex: 32-bit little endian.
+ * device register expects 32-bit big endian.
+ */
+ u32 addr = le32_to_cpup(data);
+ __be32 baddr = cpu_to_be32(addr);
+
+ return regmap_raw_write(regmap, REG_FWR_EXEC, &baddr, sizeof(baddr));
+}
+
+static int zl38_load_firmware(struct device *dev, struct regmap *regmap)
+{
+ const struct ihex_binrec *rec;
+ const struct firmware *fw;
+ u32 addr;
+ u16 len;
+ int err;
+
+ /* how to get this firmware:
+ * 1. request and download chip firmware from Microsemi
+ * (provided by Microsemi in srec format)
+ * 2. convert downloaded firmware from srec to ihex. Simple tool:
+ * https://gitlab.com/TheSven73/s3-to-irec
+ * 3. convert ihex to binary (.fw) using ihex2fw tool which is included
+ * with the Linux kernel sources
+ */
+ err = request_ihex_firmware(&fw, "zl38060.fw", dev);
+ if (err)
+ return err;
+ err = zl38_fw_enter_boot_mode(regmap);
+ if (err)
+ goto out;
+ rec = (const struct ihex_binrec *)fw->data;
+ while (rec) {
+ addr = be32_to_cpu(rec->addr);
+ len = be16_to_cpu(rec->len);
+ if (addr) {
+ /* regular data ihex record */
+ err = zl38_fw_send_data(regmap, addr, rec->data, len);
+ } else if (len == 4) {
+ /* execution address ihex record */
+ err = zl38_fw_send_xaddr(regmap, rec->data);
+ } else {
+ err = -EINVAL;
+ }
+ if (err)
+ goto out;
+ /* next ! */
+ rec = ihex_next_binrec(rec);
+ }
+ err = zl38_fw_go(regmap);
+
+out:
+ release_firmware(fw);
+ return err;
+}
+
+
+static int zl38_software_reset(struct regmap *regmap)
+{
+ unsigned int val;
+ int err;
+
+ err = regmap_update_bits(regmap, REG_SEMA_FLAGS, SEMA_FLAGS_APP_REBOOT,
+ SEMA_FLAGS_APP_REBOOT);
+ if (err)
+ return err;
+
+ /* wait for host bus interface to settle.
+ * Not sure if this is required: Microsemi's vendor driver does this,
+ * but the firmware manual does not mention it. Leave it in, there's
+ * little downside, apart from a slower reset.
+ */
+ msleep(50);
+
+ return regmap_read_poll_timeout(regmap, REG_SEMA_FLAGS, val,
+ !(val & SEMA_FLAGS_APP_REBOOT), 10000,
+ 10000 * 100);
+}
+
+static int zl38_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct zl38_codec_priv *priv = snd_soc_dai_get_drvdata(dai);
+ int err;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ /* firmware default is normal i2s */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ /* firmware default is normal bitclock and frame */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ /* always 32 bits per frame (= 16 bits/channel, 2 channels) */
+ err = regmap_update_bits(priv->regmap, REG_TDMA_CFG_CLK,
+ CFG_CLK_MASTER | CFG_CLK_PCLK_MASK,
+ CFG_CLK_MASTER | CFG_CLK_PCLK(32));
+ if (err)
+ return err;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int zl38_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct zl38_codec_priv *priv = snd_soc_dai_get_drvdata(dai);
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ unsigned int fsrate;
+ int err;
+
+ /* We cannot change hw_params while the dai is already in use - the
+ * software reset will corrupt the audio. However, this is not required,
+ * as the chip's TDM buses are fully symmetric, which mandates identical
+ * rates, channels, and samplebits for record and playback.
+ */
+ if (priv->is_stream_in_use[!tx])
+ goto skip_setup;
+
+ switch (params_rate(params)) {
+ case 8000:
+ fsrate = CFG_CLK_FSRATE_8KHZ;
+ break;
+ case 16000:
+ fsrate = CFG_CLK_FSRATE_16KHZ;
+ break;
+ case 48000:
+ fsrate = CFG_CLK_FSRATE_48KHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = regmap_update_bits(priv->regmap, REG_TDMA_CFG_CLK,
+ CFG_CLK_FSRATE_MASK, fsrate);
+ if (err)
+ return err;
+
+ /* chip requires a software reset to apply audio register changes */
+ err = zl38_software_reset(priv->regmap);
+ if (err)
+ return err;
+
+skip_setup:
+ priv->is_stream_in_use[tx] = true;
+
+ return 0;
+}
+
+static int zl38_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct zl38_codec_priv *priv = snd_soc_dai_get_drvdata(dai);
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+
+ priv->is_stream_in_use[tx] = false;
+
+ return 0;
+}
+
+/* stereo bypass with no AEC */
+static const struct reg_sequence cp_config_stereo_bypass[] = {
+ /* interconnects must be programmed first */
+ { 0x0210, 0x0005 }, /* DAC1 in <= I2S1-L */
+ { 0x0212, 0x0006 }, /* DAC2 in <= I2S1-R */
+ { 0x0214, 0x0001 }, /* I2S1-L in <= MIC1 */
+ { 0x0216, 0x0001 }, /* I2S1-R in <= MIC1 */
+ { 0x0224, 0x0000 }, /* AEC-S in <= n/a */
+ { 0x0226, 0x0000 }, /* AEC-R in <= n/a */
+ /* output enables must be programmed next */
+ { 0x0202, 0x000F }, /* enable I2S1 + DAC */
+};
+
+static const struct snd_soc_dai_ops zl38_dai_ops = {
+ .set_fmt = zl38_set_fmt,
+ .hw_params = zl38_hw_params,
+ .hw_free = zl38_hw_free,
+};
+
+static struct snd_soc_dai_driver zl38_dai = {
+ .name = "zl38060-tdma",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = ZL38_RATES,
+ .formats = ZL38_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = ZL38_RATES,
+ .formats = ZL38_FORMATS,
+ },
+ .ops = &zl38_dai_ops,
+ .symmetric_rates = 1,
+ .symmetric_samplebits = 1,
+ .symmetric_channels = 1,
+};
+
+static const struct snd_soc_dapm_widget zl38_dapm_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("DAC1"),
+ SND_SOC_DAPM_OUTPUT("DAC2"),
+
+ SND_SOC_DAPM_INPUT("DMICL"),
+};
+
+static const struct snd_soc_dapm_route zl38_dapm_routes[] = {
+ { "DAC1", NULL, "Playback" },
+ { "DAC2", NULL, "Playback" },
+
+ { "Capture", NULL, "DMICL" },
+};
+
+static const struct snd_soc_component_driver zl38_component_dev = {
+ .dapm_widgets = zl38_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(zl38_dapm_widgets),
+ .dapm_routes = zl38_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(zl38_dapm_routes),
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static void chip_gpio_set(struct gpio_chip *c, unsigned int offset, int val)
+{
+ struct regmap *regmap = gpiochip_get_data(c);
+ unsigned int mask = BIT(offset);
+
+ regmap_update_bits(regmap, REG_GPIO_DAT, mask, val ? mask : 0);
+}
+
+static int chip_gpio_get(struct gpio_chip *c, unsigned int offset)
+{
+ struct regmap *regmap = gpiochip_get_data(c);
+ unsigned int mask = BIT(offset);
+ unsigned int val;
+ int err;
+
+ err = regmap_read(regmap, REG_GPIO_DAT, &val);
+ if (err)
+ return err;
+
+ return !!(val & mask);
+}
+
+static int chip_direction_input(struct gpio_chip *c, unsigned int offset)
+{
+ struct regmap *regmap = gpiochip_get_data(c);
+ unsigned int mask = BIT(offset);
+
+ return regmap_update_bits(regmap, REG_GPIO_DIR, mask, 0);
+}
+
+static int
+chip_direction_output(struct gpio_chip *c, unsigned int offset, int val)
+{
+ struct regmap *regmap = gpiochip_get_data(c);
+ unsigned int mask = BIT(offset);
+
+ chip_gpio_set(c, offset, val);
+ return regmap_update_bits(regmap, REG_GPIO_DIR, mask, mask);
+}
+
+static const struct gpio_chip template_chip = {
+ .owner = THIS_MODULE,
+ .label = DRV_NAME,
+
+ .base = -1,
+ .ngpio = 14,
+ .direction_input = chip_direction_input,
+ .direction_output = chip_direction_output,
+ .get = chip_gpio_get,
+ .set = chip_gpio_set,
+
+ .can_sleep = true,
+};
+
+static int zl38_check_revision(struct device *dev, struct regmap *regmap)
+{
+ unsigned int hwrev, fwprod, fwrev;
+ int fw_major, fw_minor, fw_micro;
+ int err;
+
+ err = regmap_read(regmap, REG_HW_REV, &hwrev);
+ if (err)
+ return err;
+ err = regmap_read(regmap, REG_FW_PROD, &fwprod);
+ if (err)
+ return err;
+ err = regmap_read(regmap, REG_FW_REV, &fwrev);
+ if (err)
+ return err;
+
+ fw_major = (fwrev >> 12) & 0xF;
+ fw_minor = (fwrev >> 8) & 0xF;
+ fw_micro = fwrev & 0xFF;
+ dev_info(dev, "hw rev 0x%x, fw product code %d, firmware rev %d.%d.%d",
+ hwrev & 0x1F, fwprod, fw_major, fw_minor, fw_micro);
+
+ if (fw_major != FIRMWARE_MAJOR || fw_minor < FIRMWARE_MINOR) {
+ dev_err(dev, "unsupported firmware. driver supports %d.%d",
+ FIRMWARE_MAJOR, FIRMWARE_MINOR);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int zl38_bus_read(void *context,
+ const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size)
+{
+ struct spi_device *spi = context;
+ const u8 *reg_buf8 = reg_buf;
+ size_t len = 0;
+ u8 offs, page;
+ u8 txbuf[4];
+
+ if (reg_size != 2 || val_size > ZL38_MAX_RAW_XFER)
+ return -EINVAL;
+
+ offs = reg_buf8[1] >> 1;
+ page = reg_buf8[0];
+
+ if (page) {
+ txbuf[len++] = 0xFE;
+ txbuf[len++] = page == HBI_FIRMWARE_PAGE ? 0xFF : page - 1;
+ txbuf[len++] = offs;
+ txbuf[len++] = val_size / 2 - 1;
+ } else {
+ txbuf[len++] = offs | 0x80;
+ txbuf[len++] = val_size / 2 - 1;
+ }
+
+ return spi_write_then_read(spi, txbuf, len, val_buf, val_size);
+}
+
+static int zl38_bus_write(void *context, const void *data, size_t count)
+{
+ struct spi_device *spi = context;
+ u8 buf[4 + ZL38_MAX_RAW_XFER];
+ size_t val_len, len = 0;
+ const u8 *data8 = data;
+ u8 offs, page;
+
+ if (count > (2 + ZL38_MAX_RAW_XFER) || count < 4)
+ return -EINVAL;
+ val_len = count - 2;
+ offs = data8[1] >> 1;
+ page = data8[0];
+
+ if (page) {
+ buf[len++] = 0xFE;
+ buf[len++] = page == HBI_FIRMWARE_PAGE ? 0xFF : page - 1;
+ buf[len++] = offs;
+ buf[len++] = (val_len / 2 - 1) | 0x80;
+ } else {
+ buf[len++] = offs | 0x80;
+ buf[len++] = (val_len / 2 - 1) | 0x80;
+ }
+ memcpy(buf + len, data8 + 2, val_len);
+ len += val_len;
+
+ return spi_write(spi, buf, len);
+}
+
+static const struct regmap_bus zl38_regmap_bus = {
+ .read = zl38_bus_read,
+ .write = zl38_bus_write,
+ .max_raw_write = ZL38_MAX_RAW_XFER,
+ .max_raw_read = ZL38_MAX_RAW_XFER,
+};
+
+static const struct regmap_config zl38_regmap_conf = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_stride = 2,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int zl38_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct zl38_codec_priv *priv;
+ struct gpio_desc *reset_gpio;
+ int err;
+
+ /* get the chip to a known state by putting it in reset */
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpio))
+ return PTR_ERR(reset_gpio);
+ if (reset_gpio) {
+ /* datasheet: need > 10us for a digital + analog reset */
+ usleep_range(15, 50);
+ /* take the chip out of reset */
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ /* datasheet: need > 3ms for digital section to become stable */
+ usleep_range(3000, 10000);
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ dev_set_drvdata(dev, priv);
+ priv->regmap = devm_regmap_init(dev, &zl38_regmap_bus, spi,
+ &zl38_regmap_conf);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ err = zl38_load_firmware(dev, priv->regmap);
+ if (err)
+ return err;
+
+ err = zl38_check_revision(dev, priv->regmap);
+ if (err)
+ return err;
+
+ priv->gpio_chip = devm_kmemdup(dev, &template_chip,
+ sizeof(template_chip), GFP_KERNEL);
+ if (!priv->gpio_chip)
+ return -ENOMEM;
+#ifdef CONFIG_OF_GPIO
+ priv->gpio_chip->of_node = dev->of_node;
+#endif
+ err = devm_gpiochip_add_data(dev, priv->gpio_chip, priv->regmap);
+ if (err)
+ return err;
+
+ /* setup the cross-point switch for stereo bypass */
+ err = regmap_multi_reg_write(priv->regmap, cp_config_stereo_bypass,
+ ARRAY_SIZE(cp_config_stereo_bypass));
+ if (err)
+ return err;
+ /* setup for 12MHz crystal connected to the chip */
+ err = regmap_update_bits(priv->regmap, REG_CLK_CFG, CLK_CFG_SOURCE_XTAL,
+ CLK_CFG_SOURCE_XTAL);
+ if (err)
+ return err;
+
+ return devm_snd_soc_register_component(dev, &zl38_component_dev,
+ &zl38_dai, 1);
+}
+
+static const struct of_device_id zl38_dt_ids[] = {
+ { .compatible = "mscc,zl38060", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zl38_dt_ids);
+
+static const struct spi_device_id zl38_spi_ids[] = {
+ { "zl38060", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, zl38_spi_ids);
+
+static struct spi_driver zl38060_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(zl38_dt_ids),
+ },
+ .probe = zl38_spi_probe,
+ .id_table = zl38_spi_ids,
+};
+module_spi_driver(zl38060_spi_driver);
+
+MODULE_DESCRIPTION("ASoC ZL38060 driver");
+MODULE_AUTHOR("Sven Van Asbroeck <TheSven73@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
index 515f88456dbd..fd4160289fac 100644
--- a/sound/soc/dwc/dwc-i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
@@ -429,7 +429,7 @@ static int dw_i2s_resume(struct snd_soc_component *component)
for_each_component_dais(component, dai) {
for_each_pcm_streams(stream)
- if (dai->stream_active[stream])
+ if (snd_soc_dai_stream_active(dai, stream))
dw_i2s_config(dev, stream);
}
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 65e8cd4be930..ea7b4787a8af 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -84,6 +84,17 @@ config SND_SOC_FSL_MICFIL
Say Y if you want to add Pulse Density Modulation microphone
interface (MICFIL) support for NXP.
+config SND_SOC_FSL_EASRC
+ tristate "Enhanced Asynchronous Sample Rate Converter (EASRC) module support"
+ depends on SND_SOC_FSL_ASRC
+ select REGMAP_MMIO
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Say Y if you want to add Enhanced ASRC support for NXP. The ASRC is
+ a digital module that converts audio from a source sample rate to a
+ destination sample rate. It is a new design module compare with the
+ old ASRC.
+
config SND_SOC_FSL_UTILS
tristate
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile
index 8cde88c72d93..b835eebf8825 100644
--- a/sound/soc/fsl/Makefile
+++ b/sound/soc/fsl/Makefile
@@ -24,6 +24,7 @@ snd-soc-fsl-micfil-objs := fsl_micfil.o
snd-soc-fsl-utils-objs := fsl_utils.o
snd-soc-fsl-dma-objs := fsl_dma.o
snd-soc-fsl-mqs-objs := fsl_mqs.o
+snd-soc-fsl-easrc-objs := fsl_easrc.o
obj-$(CONFIG_SND_SOC_FSL_AUDMIX) += snd-soc-fsl-audmix.o
obj-$(CONFIG_SND_SOC_FSL_ASOC_CARD) += snd-soc-fsl-asoc-card.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_SND_SOC_FSL_ESAI) += snd-soc-fsl-esai.o
obj-$(CONFIG_SND_SOC_FSL_MICFIL) += snd-soc-fsl-micfil.o
obj-$(CONFIG_SND_SOC_FSL_UTILS) += snd-soc-fsl-utils.o
obj-$(CONFIG_SND_SOC_FSL_MQS) += snd-soc-fsl-mqs.o
+obj-$(CONFIG_SND_SOC_FSL_EASRC) += snd-soc-fsl-easrc.o
obj-$(CONFIG_SND_SOC_POWERPC_DMA) += snd-soc-fsl-dma.o
# MPC5200 Platform Support
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index bb33601fab84..00be73900888 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -581,7 +581,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
if (!fsl_asoc_card_is_ac97(priv) && !codec_dev) {
dev_err(&pdev->dev, "failed to find codec device\n");
- ret = -EINVAL;
+ ret = -EPROBE_DEFER;
goto asrc_fail;
}
@@ -680,17 +680,23 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
goto asrc_fail;
}
- ret = of_property_read_u32(asrc_np, "fsl,asrc-width", &width);
+ ret = of_property_read_u32(asrc_np, "fsl,asrc-format",
+ &priv->asrc_format);
if (ret) {
- dev_err(&pdev->dev, "failed to get output rate\n");
- ret = -EINVAL;
- goto asrc_fail;
+ /* Fallback to old binding; translate to asrc_format */
+ ret = of_property_read_u32(asrc_np, "fsl,asrc-width",
+ &width);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to decide output format\n");
+ goto asrc_fail;
+ }
+
+ if (width == 24)
+ priv->asrc_format = SNDRV_PCM_FORMAT_S24_LE;
+ else
+ priv->asrc_format = SNDRV_PCM_FORMAT_S16_LE;
}
-
- if (width == 24)
- priv->asrc_format = SNDRV_PCM_FORMAT_S24_LE;
- else
- priv->asrc_format = SNDRV_PCM_FORMAT_S16_LE;
}
/* Finish card registering */
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 0dcebc24c312..95f6a9617b0b 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -21,10 +21,10 @@
#define IDEAL_RATIO_DECIMAL_DEPTH 26
#define pair_err(fmt, ...) \
- dev_err(&asrc_priv->pdev->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__)
+ dev_err(&asrc->pdev->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__)
#define pair_dbg(fmt, ...) \
- dev_dbg(&asrc_priv->pdev->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__)
+ dev_dbg(&asrc->pdev->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__)
/* Corresponding to process_option */
static unsigned int supported_asrc_rate[] = {
@@ -154,18 +154,18 @@ static void fsl_asrc_sel_proc(int inrate, int outrate,
* within range [ANCA, ANCA+ANCB-1], depends on the channels of pair A
* while pair A and pair C are comparatively independent.
*/
-int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair)
+static int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair)
{
enum asrc_pair_index index = ASRC_INVALID_PAIR;
- struct fsl_asrc *asrc_priv = pair->asrc_priv;
- struct device *dev = &asrc_priv->pdev->dev;
+ struct fsl_asrc *asrc = pair->asrc;
+ struct device *dev = &asrc->pdev->dev;
unsigned long lock_flags;
int i, ret = 0;
- spin_lock_irqsave(&asrc_priv->lock, lock_flags);
+ spin_lock_irqsave(&asrc->lock, lock_flags);
for (i = ASRC_PAIR_A; i < ASRC_PAIR_MAX_NUM; i++) {
- if (asrc_priv->pair[i] != NULL)
+ if (asrc->pair[i] != NULL)
continue;
index = i;
@@ -177,17 +177,17 @@ int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair)
if (index == ASRC_INVALID_PAIR) {
dev_err(dev, "all pairs are busy now\n");
ret = -EBUSY;
- } else if (asrc_priv->channel_avail < channels) {
+ } else if (asrc->channel_avail < channels) {
dev_err(dev, "can't afford required channels: %d\n", channels);
ret = -EINVAL;
} else {
- asrc_priv->channel_avail -= channels;
- asrc_priv->pair[index] = pair;
+ asrc->channel_avail -= channels;
+ asrc->pair[index] = pair;
pair->channels = channels;
pair->index = index;
}
- spin_unlock_irqrestore(&asrc_priv->lock, lock_flags);
+ spin_unlock_irqrestore(&asrc->lock, lock_flags);
return ret;
}
@@ -195,25 +195,25 @@ int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair)
/**
* Release ASRC pair
*
- * It clears the resource from asrc_priv and releases the occupied channels.
+ * It clears the resource from asrc and releases the occupied channels.
*/
-void fsl_asrc_release_pair(struct fsl_asrc_pair *pair)
+static void fsl_asrc_release_pair(struct fsl_asrc_pair *pair)
{
- struct fsl_asrc *asrc_priv = pair->asrc_priv;
+ struct fsl_asrc *asrc = pair->asrc;
enum asrc_pair_index index = pair->index;
unsigned long lock_flags;
/* Make sure the pair is disabled */
- regmap_update_bits(asrc_priv->regmap, REG_ASRCTR,
+ regmap_update_bits(asrc->regmap, REG_ASRCTR,
ASRCTR_ASRCEi_MASK(index), 0);
- spin_lock_irqsave(&asrc_priv->lock, lock_flags);
+ spin_lock_irqsave(&asrc->lock, lock_flags);
- asrc_priv->channel_avail += pair->channels;
- asrc_priv->pair[index] = NULL;
+ asrc->channel_avail += pair->channels;
+ asrc->pair[index] = NULL;
pair->error = 0;
- spin_unlock_irqrestore(&asrc_priv->lock, lock_flags);
+ spin_unlock_irqrestore(&asrc->lock, lock_flags);
}
/**
@@ -221,10 +221,10 @@ void fsl_asrc_release_pair(struct fsl_asrc_pair *pair)
*/
static void fsl_asrc_set_watermarks(struct fsl_asrc_pair *pair, u32 in, u32 out)
{
- struct fsl_asrc *asrc_priv = pair->asrc_priv;
+ struct fsl_asrc *asrc = pair->asrc;
enum asrc_pair_index index = pair->index;
- regmap_update_bits(asrc_priv->regmap, REG_ASRMCR(index),
+ regmap_update_bits(asrc->regmap, REG_ASRMCR(index),
ASRMCRi_EXTTHRSHi_MASK |
ASRMCRi_INFIFO_THRESHOLD_MASK |
ASRMCRi_OUTFIFO_THRESHOLD_MASK,
@@ -257,7 +257,7 @@ static u32 fsl_asrc_cal_asrck_divisor(struct fsl_asrc_pair *pair, u32 div)
static int fsl_asrc_set_ideal_ratio(struct fsl_asrc_pair *pair,
int inrate, int outrate)
{
- struct fsl_asrc *asrc_priv = pair->asrc_priv;
+ struct fsl_asrc *asrc = pair->asrc;
enum asrc_pair_index index = pair->index;
unsigned long ratio;
int i;
@@ -286,8 +286,8 @@ static int fsl_asrc_set_ideal_ratio(struct fsl_asrc_pair *pair,
break;
}
- regmap_write(asrc_priv->regmap, REG_ASRIDRL(index), ratio);
- regmap_write(asrc_priv->regmap, REG_ASRIDRH(index), ratio >> 24);
+ regmap_write(asrc->regmap, REG_ASRIDRL(index), ratio);
+ regmap_write(asrc->regmap, REG_ASRIDRH(index), ratio >> 24);
return 0;
}
@@ -308,8 +308,10 @@ static int fsl_asrc_set_ideal_ratio(struct fsl_asrc_pair *pair,
*/
static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate)
{
- struct asrc_config *config = pair->config;
- struct fsl_asrc *asrc_priv = pair->asrc_priv;
+ struct fsl_asrc_pair_priv *pair_priv = pair->private;
+ struct asrc_config *config = pair_priv->config;
+ struct fsl_asrc *asrc = pair->asrc;
+ struct fsl_asrc_priv *asrc_priv = asrc->private;
enum asrc_pair_index index = pair->index;
enum asrc_word_width input_word_width;
enum asrc_word_width output_word_width;
@@ -441,18 +443,18 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate)
channels /= 2;
/* Update channels for current pair */
- regmap_update_bits(asrc_priv->regmap, REG_ASRCNCR,
+ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
ASRCNCR_ANCi_MASK(index, asrc_priv->soc->channel_bits),
ASRCNCR_ANCi(index, channels, asrc_priv->soc->channel_bits));
/* Default setting: Automatic selection for processing mode */
- regmap_update_bits(asrc_priv->regmap, REG_ASRCTR,
+ regmap_update_bits(asrc->regmap, REG_ASRCTR,
ASRCTR_ATSi_MASK(index), ASRCTR_ATS(index));
- regmap_update_bits(asrc_priv->regmap, REG_ASRCTR,
+ regmap_update_bits(asrc->regmap, REG_ASRCTR,
ASRCTR_USRi_MASK(index), 0);
/* Set the input and output clock sources */
- regmap_update_bits(asrc_priv->regmap, REG_ASRCSR,
+ regmap_update_bits(asrc->regmap, REG_ASRCSR,
ASRCSR_AICSi_MASK(index) | ASRCSR_AOCSi_MASK(index),
ASRCSR_AICS(index, clk_index[IN]) |
ASRCSR_AOCS(index, clk_index[OUT]));
@@ -462,19 +464,19 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate)
outdiv = fsl_asrc_cal_asrck_divisor(pair, div[OUT]);
/* Suppose indiv and outdiv includes prescaler, so add its MASK too */
- regmap_update_bits(asrc_priv->regmap, REG_ASRCDR(index),
+ regmap_update_bits(asrc->regmap, REG_ASRCDR(index),
ASRCDRi_AOCPi_MASK(index) | ASRCDRi_AICPi_MASK(index) |
ASRCDRi_AOCDi_MASK(index) | ASRCDRi_AICDi_MASK(index),
ASRCDRi_AOCP(index, outdiv) | ASRCDRi_AICP(index, indiv));
/* Implement word_width configurations */
- regmap_update_bits(asrc_priv->regmap, REG_ASRMCR1(index),
+ regmap_update_bits(asrc->regmap, REG_ASRMCR1(index),
ASRMCR1i_OW16_MASK | ASRMCR1i_IWD_MASK,
ASRMCR1i_OW16(output_word_width) |
ASRMCR1i_IWD(input_word_width));
/* Enable BUFFER STALL */
- regmap_update_bits(asrc_priv->regmap, REG_ASRMCR(index),
+ regmap_update_bits(asrc->regmap, REG_ASRMCR(index),
ASRMCRi_BUFSTALLi_MASK, ASRMCRi_BUFSTALLi);
/* Set default thresholds for input and output FIFO */
@@ -486,18 +488,18 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate)
return 0;
/* Clear ASTSx bit to use Ideal Ratio mode */
- regmap_update_bits(asrc_priv->regmap, REG_ASRCTR,
+ regmap_update_bits(asrc->regmap, REG_ASRCTR,
ASRCTR_ATSi_MASK(index), 0);
/* Enable Ideal Ratio mode */
- regmap_update_bits(asrc_priv->regmap, REG_ASRCTR,
+ regmap_update_bits(asrc->regmap, REG_ASRCTR,
ASRCTR_IDRi_MASK(index) | ASRCTR_USRi_MASK(index),
ASRCTR_IDR(index) | ASRCTR_USR(index));
fsl_asrc_sel_proc(inrate, outrate, &pre_proc, &post_proc);
/* Apply configurations for pre- and post-processing */
- regmap_update_bits(asrc_priv->regmap, REG_ASRCFG,
+ regmap_update_bits(asrc->regmap, REG_ASRCFG,
ASRCFG_PREMODi_MASK(index) | ASRCFG_POSTMODi_MASK(index),
ASRCFG_PREMOD(index, pre_proc) |
ASRCFG_POSTMOD(index, post_proc));
@@ -512,28 +514,28 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate)
*/
static void fsl_asrc_start_pair(struct fsl_asrc_pair *pair)
{
- struct fsl_asrc *asrc_priv = pair->asrc_priv;
+ struct fsl_asrc *asrc = pair->asrc;
enum asrc_pair_index index = pair->index;
int reg, retry = 10, i;
/* Enable the current pair */
- regmap_update_bits(asrc_priv->regmap, REG_ASRCTR,
+ regmap_update_bits(asrc->regmap, REG_ASRCTR,
ASRCTR_ASRCEi_MASK(index), ASRCTR_ASRCE(index));
/* Wait for status of initialization */
do {
udelay(5);
- regmap_read(asrc_priv->regmap, REG_ASRCFG, &reg);
+ regmap_read(asrc->regmap, REG_ASRCFG, &reg);
reg &= ASRCFG_INIRQi_MASK(index);
} while (!reg && --retry);
/* Make the input fifo to ASRC STALL level */
- regmap_read(asrc_priv->regmap, REG_ASRCNCR, &reg);
+ regmap_read(asrc->regmap, REG_ASRCNCR, &reg);
for (i = 0; i < pair->channels * 4; i++)
- regmap_write(asrc_priv->regmap, REG_ASRDI(index), 0);
+ regmap_write(asrc->regmap, REG_ASRDI(index), 0);
/* Enable overload interrupt */
- regmap_write(asrc_priv->regmap, REG_ASRIER, ASRIER_AOLIE);
+ regmap_write(asrc->regmap, REG_ASRIER, ASRIER_AOLIE);
}
/**
@@ -541,33 +543,34 @@ static void fsl_asrc_start_pair(struct fsl_asrc_pair *pair)
*/
static void fsl_asrc_stop_pair(struct fsl_asrc_pair *pair)
{
- struct fsl_asrc *asrc_priv = pair->asrc_priv;
+ struct fsl_asrc *asrc = pair->asrc;
enum asrc_pair_index index = pair->index;
/* Stop the current pair */
- regmap_update_bits(asrc_priv->regmap, REG_ASRCTR,
+ regmap_update_bits(asrc->regmap, REG_ASRCTR,
ASRCTR_ASRCEi_MASK(index), 0);
}
/**
* Get DMA channel according to the pair and direction.
*/
-struct dma_chan *fsl_asrc_get_dma_channel(struct fsl_asrc_pair *pair, bool dir)
+static struct dma_chan *fsl_asrc_get_dma_channel(struct fsl_asrc_pair *pair,
+ bool dir)
{
- struct fsl_asrc *asrc_priv = pair->asrc_priv;
+ struct fsl_asrc *asrc = pair->asrc;
enum asrc_pair_index index = pair->index;
char name[4];
sprintf(name, "%cx%c", dir == IN ? 'r' : 't', index + 'a');
- return dma_request_slave_channel(&asrc_priv->pdev->dev, name);
+ return dma_request_slave_channel(&asrc->pdev->dev, name);
}
-EXPORT_SYMBOL_GPL(fsl_asrc_get_dma_channel);
static int fsl_asrc_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct fsl_asrc *asrc_priv = snd_soc_dai_get_drvdata(dai);
+ struct fsl_asrc *asrc = snd_soc_dai_get_drvdata(dai);
+ struct fsl_asrc_priv *asrc_priv = asrc->private;
/* Odd channel number is not valid for older ASRC (channel_bits==3) */
if (asrc_priv->soc->channel_bits == 3)
@@ -583,13 +586,13 @@ static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct fsl_asrc *asrc_priv = snd_soc_dai_get_drvdata(dai);
+ struct fsl_asrc *asrc = snd_soc_dai_get_drvdata(dai);
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
+ struct fsl_asrc_pair_priv *pair_priv = pair->private;
unsigned int channels = params_channels(params);
unsigned int rate = params_rate(params);
struct asrc_config config;
- snd_pcm_format_t format;
int ret;
ret = fsl_asrc_request_pair(channels, pair);
@@ -598,12 +601,7 @@ static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
return ret;
}
- pair->config = &config;
-
- if (asrc_priv->asrc_width == 16)
- format = SNDRV_PCM_FORMAT_S16_LE;
- else
- format = SNDRV_PCM_FORMAT_S24_LE;
+ pair_priv->config = &config;
config.pair = pair->index;
config.channel_num = channels;
@@ -612,13 +610,13 @@ static int fsl_asrc_dai_hw_params(struct snd_pcm_substream *substream,
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
config.input_format = params_format(params);
- config.output_format = format;
+ config.output_format = asrc->asrc_format;
config.input_sample_rate = rate;
- config.output_sample_rate = asrc_priv->asrc_rate;
+ config.output_sample_rate = asrc->asrc_rate;
} else {
- config.input_format = format;
+ config.input_format = asrc->asrc_format;
config.output_format = params_format(params);
- config.input_sample_rate = asrc_priv->asrc_rate;
+ config.input_sample_rate = asrc->asrc_rate;
config.output_sample_rate = rate;
}
@@ -676,10 +674,10 @@ static const struct snd_soc_dai_ops fsl_asrc_dai_ops = {
static int fsl_asrc_dai_probe(struct snd_soc_dai *dai)
{
- struct fsl_asrc *asrc_priv = snd_soc_dai_get_drvdata(dai);
+ struct fsl_asrc *asrc = snd_soc_dai_get_drvdata(dai);
- snd_soc_dai_init_dma_data(dai, &asrc_priv->dma_params_tx,
- &asrc_priv->dma_params_rx);
+ snd_soc_dai_init_dma_data(dai, &asrc->dma_params_tx,
+ &asrc->dma_params_rx);
return 0;
}
@@ -858,30 +856,35 @@ static const struct regmap_config fsl_asrc_regmap_config = {
/**
* Initialize ASRC registers with a default configurations
*/
-static int fsl_asrc_init(struct fsl_asrc *asrc_priv)
+static int fsl_asrc_init(struct fsl_asrc *asrc)
{
+ unsigned long ipg_rate;
+
/* Halt ASRC internal FP when input FIFO needs data for pair A, B, C */
- regmap_write(asrc_priv->regmap, REG_ASRCTR, ASRCTR_ASRCEN);
+ regmap_write(asrc->regmap, REG_ASRCTR, ASRCTR_ASRCEN);
/* Disable interrupt by default */
- regmap_write(asrc_priv->regmap, REG_ASRIER, 0x0);
+ regmap_write(asrc->regmap, REG_ASRIER, 0x0);
/* Apply recommended settings for parameters from Reference Manual */
- regmap_write(asrc_priv->regmap, REG_ASRPM1, 0x7fffff);
- regmap_write(asrc_priv->regmap, REG_ASRPM2, 0x255555);
- regmap_write(asrc_priv->regmap, REG_ASRPM3, 0xff7280);
- regmap_write(asrc_priv->regmap, REG_ASRPM4, 0xff7280);
- regmap_write(asrc_priv->regmap, REG_ASRPM5, 0xff7280);
+ regmap_write(asrc->regmap, REG_ASRPM1, 0x7fffff);
+ regmap_write(asrc->regmap, REG_ASRPM2, 0x255555);
+ regmap_write(asrc->regmap, REG_ASRPM3, 0xff7280);
+ regmap_write(asrc->regmap, REG_ASRPM4, 0xff7280);
+ regmap_write(asrc->regmap, REG_ASRPM5, 0xff7280);
/* Base address for task queue FIFO. Set to 0x7C */
- regmap_update_bits(asrc_priv->regmap, REG_ASRTFR1,
+ regmap_update_bits(asrc->regmap, REG_ASRTFR1,
ASRTFR1_TF_BASE_MASK, ASRTFR1_TF_BASE(0xfc));
- /* Set the processing clock for 76KHz to 133M */
- regmap_write(asrc_priv->regmap, REG_ASR76K, 0x06D6);
-
- /* Set the processing clock for 56KHz to 133M */
- return regmap_write(asrc_priv->regmap, REG_ASR56K, 0x0947);
+ /*
+ * Set the period of the 76KHz and 56KHz sampling clocks based on
+ * the ASRC processing clock.
+ * On iMX6, ipg_clk = 133MHz, REG_ASR76K = 0x06D6, REG_ASR56K = 0x0947
+ */
+ ipg_rate = clk_get_rate(asrc->ipg_clk);
+ regmap_write(asrc->regmap, REG_ASR76K, ipg_rate / 76000);
+ return regmap_write(asrc->regmap, REG_ASR56K, ipg_rate / 56000);
}
/**
@@ -889,15 +892,15 @@ static int fsl_asrc_init(struct fsl_asrc *asrc_priv)
*/
static irqreturn_t fsl_asrc_isr(int irq, void *dev_id)
{
- struct fsl_asrc *asrc_priv = (struct fsl_asrc *)dev_id;
- struct device *dev = &asrc_priv->pdev->dev;
+ struct fsl_asrc *asrc = (struct fsl_asrc *)dev_id;
+ struct device *dev = &asrc->pdev->dev;
enum asrc_pair_index index;
u32 status;
- regmap_read(asrc_priv->regmap, REG_ASRSTR, &status);
+ regmap_read(asrc->regmap, REG_ASRSTR, &status);
/* Clean overload error */
- regmap_write(asrc_priv->regmap, REG_ASRSTR, ASRSTR_AOLE);
+ regmap_write(asrc->regmap, REG_ASRSTR, ASRSTR_AOLE);
/*
* We here use dev_dbg() for all exceptions because ASRC itself does
@@ -905,31 +908,31 @@ static irqreturn_t fsl_asrc_isr(int irq, void *dev_id)
* interrupt would result a ridged conversion.
*/
for (index = ASRC_PAIR_A; index < ASRC_PAIR_MAX_NUM; index++) {
- if (!asrc_priv->pair[index])
+ if (!asrc->pair[index])
continue;
if (status & ASRSTR_ATQOL) {
- asrc_priv->pair[index]->error |= ASRC_TASK_Q_OVERLOAD;
+ asrc->pair[index]->error |= ASRC_TASK_Q_OVERLOAD;
dev_dbg(dev, "ASRC Task Queue FIFO overload\n");
}
if (status & ASRSTR_AOOL(index)) {
- asrc_priv->pair[index]->error |= ASRC_OUTPUT_TASK_OVERLOAD;
+ asrc->pair[index]->error |= ASRC_OUTPUT_TASK_OVERLOAD;
pair_dbg("Output Task Overload\n");
}
if (status & ASRSTR_AIOL(index)) {
- asrc_priv->pair[index]->error |= ASRC_INPUT_TASK_OVERLOAD;
+ asrc->pair[index]->error |= ASRC_INPUT_TASK_OVERLOAD;
pair_dbg("Input Task Overload\n");
}
if (status & ASRSTR_AODO(index)) {
- asrc_priv->pair[index]->error |= ASRC_OUTPUT_BUFFER_OVERFLOW;
+ asrc->pair[index]->error |= ASRC_OUTPUT_BUFFER_OVERFLOW;
pair_dbg("Output Data Buffer has overflowed\n");
}
if (status & ASRSTR_AIDU(index)) {
- asrc_priv->pair[index]->error |= ASRC_INPUT_BUFFER_UNDERRUN;
+ asrc->pair[index]->error |= ASRC_INPUT_BUFFER_UNDERRUN;
pair_dbg("Input Data Buffer has underflowed\n");
}
}
@@ -937,21 +940,33 @@ static irqreturn_t fsl_asrc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int fsl_asrc_get_fifo_addr(u8 dir, enum asrc_pair_index index)
+{
+ return REG_ASRDx(dir, index);
+}
+
static int fsl_asrc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct fsl_asrc *asrc_priv;
+ struct fsl_asrc_priv *asrc_priv;
+ struct fsl_asrc *asrc;
struct resource *res;
void __iomem *regs;
int irq, ret, i;
u32 map_idx;
char tmp[16];
+ u32 width;
+
+ asrc = devm_kzalloc(&pdev->dev, sizeof(*asrc), GFP_KERNEL);
+ if (!asrc)
+ return -ENOMEM;
asrc_priv = devm_kzalloc(&pdev->dev, sizeof(*asrc_priv), GFP_KERNEL);
if (!asrc_priv)
return -ENOMEM;
- asrc_priv->pdev = pdev;
+ asrc->pdev = pdev;
+ asrc->private = asrc_priv;
/* Get the addresses and IRQ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -959,13 +974,13 @@ static int fsl_asrc_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- asrc_priv->paddr = res->start;
+ asrc->paddr = res->start;
- asrc_priv->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "mem", regs,
- &fsl_asrc_regmap_config);
- if (IS_ERR(asrc_priv->regmap)) {
+ asrc->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "mem", regs,
+ &fsl_asrc_regmap_config);
+ if (IS_ERR(asrc->regmap)) {
dev_err(&pdev->dev, "failed to init regmap\n");
- return PTR_ERR(asrc_priv->regmap);
+ return PTR_ERR(asrc->regmap);
}
irq = platform_get_irq(pdev, 0);
@@ -973,26 +988,26 @@ static int fsl_asrc_probe(struct platform_device *pdev)
return irq;
ret = devm_request_irq(&pdev->dev, irq, fsl_asrc_isr, 0,
- dev_name(&pdev->dev), asrc_priv);
+ dev_name(&pdev->dev), asrc);
if (ret) {
dev_err(&pdev->dev, "failed to claim irq %u: %d\n", irq, ret);
return ret;
}
- asrc_priv->mem_clk = devm_clk_get(&pdev->dev, "mem");
- if (IS_ERR(asrc_priv->mem_clk)) {
+ asrc->mem_clk = devm_clk_get(&pdev->dev, "mem");
+ if (IS_ERR(asrc->mem_clk)) {
dev_err(&pdev->dev, "failed to get mem clock\n");
- return PTR_ERR(asrc_priv->mem_clk);
+ return PTR_ERR(asrc->mem_clk);
}
- asrc_priv->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(asrc_priv->ipg_clk)) {
+ asrc->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(asrc->ipg_clk)) {
dev_err(&pdev->dev, "failed to get ipg clock\n");
- return PTR_ERR(asrc_priv->ipg_clk);
+ return PTR_ERR(asrc->ipg_clk);
}
- asrc_priv->spba_clk = devm_clk_get(&pdev->dev, "spba");
- if (IS_ERR(asrc_priv->spba_clk))
+ asrc->spba_clk = devm_clk_get(&pdev->dev, "spba");
+ if (IS_ERR(asrc->spba_clk))
dev_warn(&pdev->dev, "failed to get spba clock\n");
for (i = 0; i < ASRC_CLK_MAX_NUM; i++) {
@@ -1010,6 +1025,13 @@ static int fsl_asrc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ asrc->use_edma = asrc_priv->soc->use_edma;
+ asrc->get_dma_channel = fsl_asrc_get_dma_channel;
+ asrc->request_pair = fsl_asrc_request_pair;
+ asrc->release_pair = fsl_asrc_release_pair;
+ asrc->get_fifo_addr = fsl_asrc_get_fifo_addr;
+ asrc->pair_priv_size = sizeof(struct fsl_asrc_pair_priv);
+
if (of_device_is_compatible(np, "fsl,imx35-asrc")) {
asrc_priv->clk_map[IN] = input_clk_map_imx35;
asrc_priv->clk_map[OUT] = output_clk_map_imx35;
@@ -1037,36 +1059,53 @@ static int fsl_asrc_probe(struct platform_device *pdev)
}
}
- ret = fsl_asrc_init(asrc_priv);
+ ret = fsl_asrc_init(asrc);
if (ret) {
dev_err(&pdev->dev, "failed to init asrc %d\n", ret);
return ret;
}
- asrc_priv->channel_avail = 10;
+ asrc->channel_avail = 10;
ret = of_property_read_u32(np, "fsl,asrc-rate",
- &asrc_priv->asrc_rate);
+ &asrc->asrc_rate);
if (ret) {
dev_err(&pdev->dev, "failed to get output rate\n");
return ret;
}
- ret = of_property_read_u32(np, "fsl,asrc-width",
- &asrc_priv->asrc_width);
+ ret = of_property_read_u32(np, "fsl,asrc-format", &asrc->asrc_format);
if (ret) {
- dev_err(&pdev->dev, "failed to get output width\n");
- return ret;
+ ret = of_property_read_u32(np, "fsl,asrc-width", &width);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to decide output format\n");
+ return ret;
+ }
+
+ switch (width) {
+ case 16:
+ asrc->asrc_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ case 24:
+ asrc->asrc_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ default:
+ dev_warn(&pdev->dev,
+ "unsupported width, use default S24_LE\n");
+ asrc->asrc_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ }
}
- if (asrc_priv->asrc_width != 16 && asrc_priv->asrc_width != 24) {
- dev_warn(&pdev->dev, "unsupported width, switching to 24bit\n");
- asrc_priv->asrc_width = 24;
+ if (!(FSL_ASRC_FORMATS & (1ULL << asrc->asrc_format))) {
+ dev_warn(&pdev->dev, "unsupported width, use default S24_LE\n");
+ asrc->asrc_format = SNDRV_PCM_FORMAT_S24_LE;
}
- platform_set_drvdata(pdev, asrc_priv);
+ platform_set_drvdata(pdev, asrc);
pm_runtime_enable(&pdev->dev);
- spin_lock_init(&asrc_priv->lock);
+ spin_lock_init(&asrc->lock);
+ regcache_cache_only(asrc->regmap, true);
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_asrc_component,
&fsl_asrc_dai, 1);
@@ -1081,17 +1120,19 @@ static int fsl_asrc_probe(struct platform_device *pdev)
#ifdef CONFIG_PM
static int fsl_asrc_runtime_resume(struct device *dev)
{
- struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
+ struct fsl_asrc *asrc = dev_get_drvdata(dev);
+ struct fsl_asrc_priv *asrc_priv = asrc->private;
int i, ret;
+ u32 asrctr;
- ret = clk_prepare_enable(asrc_priv->mem_clk);
+ ret = clk_prepare_enable(asrc->mem_clk);
if (ret)
return ret;
- ret = clk_prepare_enable(asrc_priv->ipg_clk);
+ ret = clk_prepare_enable(asrc->ipg_clk);
if (ret)
goto disable_mem_clk;
- if (!IS_ERR(asrc_priv->spba_clk)) {
- ret = clk_prepare_enable(asrc_priv->spba_clk);
+ if (!IS_ERR(asrc->spba_clk)) {
+ ret = clk_prepare_enable(asrc->spba_clk);
if (ret)
goto disable_ipg_clk;
}
@@ -1101,79 +1142,64 @@ static int fsl_asrc_runtime_resume(struct device *dev)
goto disable_asrck_clk;
}
+ /* Stop all pairs provisionally */
+ regmap_read(asrc->regmap, REG_ASRCTR, &asrctr);
+ regmap_update_bits(asrc->regmap, REG_ASRCTR,
+ ASRCTR_ASRCEi_ALL_MASK, 0);
+
+ /* Restore all registers */
+ regcache_cache_only(asrc->regmap, false);
+ regcache_mark_dirty(asrc->regmap);
+ regcache_sync(asrc->regmap);
+
+ regmap_update_bits(asrc->regmap, REG_ASRCFG,
+ ASRCFG_NDPRi_ALL_MASK | ASRCFG_POSTMODi_ALL_MASK |
+ ASRCFG_PREMODi_ALL_MASK, asrc_priv->regcache_cfg);
+
+ /* Restart enabled pairs */
+ regmap_update_bits(asrc->regmap, REG_ASRCTR,
+ ASRCTR_ASRCEi_ALL_MASK, asrctr);
+
return 0;
disable_asrck_clk:
for (i--; i >= 0; i--)
clk_disable_unprepare(asrc_priv->asrck_clk[i]);
- if (!IS_ERR(asrc_priv->spba_clk))
- clk_disable_unprepare(asrc_priv->spba_clk);
+ if (!IS_ERR(asrc->spba_clk))
+ clk_disable_unprepare(asrc->spba_clk);
disable_ipg_clk:
- clk_disable_unprepare(asrc_priv->ipg_clk);
+ clk_disable_unprepare(asrc->ipg_clk);
disable_mem_clk:
- clk_disable_unprepare(asrc_priv->mem_clk);
+ clk_disable_unprepare(asrc->mem_clk);
return ret;
}
static int fsl_asrc_runtime_suspend(struct device *dev)
{
- struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
+ struct fsl_asrc *asrc = dev_get_drvdata(dev);
+ struct fsl_asrc_priv *asrc_priv = asrc->private;
int i;
- for (i = 0; i < ASRC_CLK_MAX_NUM; i++)
- clk_disable_unprepare(asrc_priv->asrck_clk[i]);
- if (!IS_ERR(asrc_priv->spba_clk))
- clk_disable_unprepare(asrc_priv->spba_clk);
- clk_disable_unprepare(asrc_priv->ipg_clk);
- clk_disable_unprepare(asrc_priv->mem_clk);
-
- return 0;
-}
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_PM_SLEEP
-static int fsl_asrc_suspend(struct device *dev)
-{
- struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
-
- regmap_read(asrc_priv->regmap, REG_ASRCFG,
+ regmap_read(asrc->regmap, REG_ASRCFG,
&asrc_priv->regcache_cfg);
- regcache_cache_only(asrc_priv->regmap, true);
- regcache_mark_dirty(asrc_priv->regmap);
-
- return 0;
-}
-
-static int fsl_asrc_resume(struct device *dev)
-{
- struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
- u32 asrctr;
-
- /* Stop all pairs provisionally */
- regmap_read(asrc_priv->regmap, REG_ASRCTR, &asrctr);
- regmap_update_bits(asrc_priv->regmap, REG_ASRCTR,
- ASRCTR_ASRCEi_ALL_MASK, 0);
-
- /* Restore all registers */
- regcache_cache_only(asrc_priv->regmap, false);
- regcache_sync(asrc_priv->regmap);
+ regcache_cache_only(asrc->regmap, true);
- regmap_update_bits(asrc_priv->regmap, REG_ASRCFG,
- ASRCFG_NDPRi_ALL_MASK | ASRCFG_POSTMODi_ALL_MASK |
- ASRCFG_PREMODi_ALL_MASK, asrc_priv->regcache_cfg);
-
- /* Restart enabled pairs */
- regmap_update_bits(asrc_priv->regmap, REG_ASRCTR,
- ASRCTR_ASRCEi_ALL_MASK, asrctr);
+ for (i = 0; i < ASRC_CLK_MAX_NUM; i++)
+ clk_disable_unprepare(asrc_priv->asrck_clk[i]);
+ if (!IS_ERR(asrc->spba_clk))
+ clk_disable_unprepare(asrc->spba_clk);
+ clk_disable_unprepare(asrc->ipg_clk);
+ clk_disable_unprepare(asrc->mem_clk);
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
static const struct dev_pm_ops fsl_asrc_pm = {
SET_RUNTIME_PM_OPS(fsl_asrc_runtime_suspend, fsl_asrc_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(fsl_asrc_suspend, fsl_asrc_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct fsl_asrc_soc_data fsl_asrc_imx35_data = {
diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h
index 8a821132d9d0..86d2422ad606 100644
--- a/sound/soc/fsl/fsl_asrc.h
+++ b/sound/soc/fsl/fsl_asrc.h
@@ -10,8 +10,7 @@
#ifndef _FSL_ASRC_H
#define _FSL_ASRC_H
-#define IN 0
-#define OUT 1
+#include "fsl_asrc_common.h"
#define ASRC_DMA_BUFFER_NUM 2
#define ASRC_INPUTFIFO_THRESHOLD 32
@@ -283,14 +282,6 @@
#define ASRMCR1i_OW16_MASK (1 << ASRMCR1i_OW16_SHIFT)
#define ASRMCR1i_OW16(v) ((v) << ASRMCR1i_OW16_SHIFT)
-
-enum asrc_pair_index {
- ASRC_INVALID_PAIR = -1,
- ASRC_PAIR_A = 0,
- ASRC_PAIR_B = 1,
- ASRC_PAIR_C = 2,
-};
-
#define ASRC_PAIR_MAX_NUM (ASRC_PAIR_C + 1)
enum asrc_inclk {
@@ -446,83 +437,28 @@ struct fsl_asrc_soc_data {
};
/**
- * fsl_asrc_pair: ASRC Pair private data
+ * fsl_asrc_pair_priv: ASRC Pair private data
*
- * @asrc_priv: pointer to its parent module
* @config: configuration profile
- * @error: error record
- * @index: pair index (ASRC_PAIR_A, ASRC_PAIR_B, ASRC_PAIR_C)
- * @channels: occupied channel number
- * @desc: input and output dma descriptors
- * @dma_chan: inputer and output DMA channels
- * @dma_data: private dma data
- * @pos: hardware pointer position
- * @private: pair private area
*/
-struct fsl_asrc_pair {
- struct fsl_asrc *asrc_priv;
+struct fsl_asrc_pair_priv {
struct asrc_config *config;
- unsigned int error;
-
- enum asrc_pair_index index;
- unsigned int channels;
-
- struct dma_async_tx_descriptor *desc[2];
- struct dma_chan *dma_chan[2];
- struct imx_dma_data dma_data;
- unsigned int pos;
-
- void *private;
};
/**
- * fsl_asrc_pair: ASRC private data
+ * fsl_asrc_priv: ASRC private data
*
- * @dma_params_rx: DMA parameters for receive channel
- * @dma_params_tx: DMA parameters for transmit channel
- * @pdev: platform device pointer
- * @regmap: regmap handler
- * @paddr: physical address to the base address of registers
- * @mem_clk: clock source to access register
- * @ipg_clk: clock source to drive peripheral
- * @spba_clk: SPBA clock (optional, depending on SoC design)
* @asrck_clk: clock sources to driver ASRC internal logic
- * @lock: spin lock for resource protection
- * @pair: pair pointers
* @soc: soc specific data
- * @channel_avail: non-occupied channel numbers
* @clk_map: clock map for input/output clock
- * @asrc_rate: default sample rate for ASoC Back-Ends
- * @asrc_width: default sample width for ASoC Back-Ends
* @regcache_cfg: store register value of REG_ASRCFG
*/
-struct fsl_asrc {
- struct snd_dmaengine_dai_dma_data dma_params_rx;
- struct snd_dmaengine_dai_dma_data dma_params_tx;
- struct platform_device *pdev;
- struct regmap *regmap;
- unsigned long paddr;
- struct clk *mem_clk;
- struct clk *ipg_clk;
- struct clk *spba_clk;
+struct fsl_asrc_priv {
struct clk *asrck_clk[ASRC_CLK_MAX_NUM];
- spinlock_t lock;
-
- struct fsl_asrc_pair *pair[ASRC_PAIR_MAX_NUM];
const struct fsl_asrc_soc_data *soc;
- unsigned int channel_avail;
unsigned char *clk_map[2];
- int asrc_rate;
- int asrc_width;
-
u32 regcache_cfg;
};
-#define DRV_NAME "fsl-asrc-dai"
-extern struct snd_soc_component_driver fsl_asrc_component;
-struct dma_chan *fsl_asrc_get_dma_channel(struct fsl_asrc_pair *pair, bool dir);
-int fsl_asrc_request_pair(int channels, struct fsl_asrc_pair *pair);
-void fsl_asrc_release_pair(struct fsl_asrc_pair *pair);
-
#endif /* _FSL_ASRC_H */
diff --git a/sound/soc/fsl/fsl_asrc_common.h b/sound/soc/fsl/fsl_asrc_common.h
new file mode 100644
index 000000000000..77665b15c8db
--- /dev/null
+++ b/sound/soc/fsl/fsl_asrc_common.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 NXP
+ *
+ */
+
+#ifndef _FSL_ASRC_COMMON_H
+#define _FSL_ASRC_COMMON_H
+
+/* directions */
+#define IN 0
+#define OUT 1
+
+enum asrc_pair_index {
+ ASRC_INVALID_PAIR = -1,
+ ASRC_PAIR_A = 0,
+ ASRC_PAIR_B = 1,
+ ASRC_PAIR_C = 2,
+ ASRC_PAIR_D = 3,
+};
+
+#define PAIR_CTX_NUM 0x4
+
+/**
+ * fsl_asrc_pair: ASRC Pair common data
+ *
+ * @asrc: pointer to its parent module
+ * @error: error record
+ * @index: pair index (ASRC_PAIR_A, ASRC_PAIR_B, ASRC_PAIR_C)
+ * @channels: occupied channel number
+ * @desc: input and output dma descriptors
+ * @dma_chan: inputer and output DMA channels
+ * @dma_data: private dma data
+ * @pos: hardware pointer position
+ * @private: pair private area
+ */
+struct fsl_asrc_pair {
+ struct fsl_asrc *asrc;
+ unsigned int error;
+
+ enum asrc_pair_index index;
+ unsigned int channels;
+
+ struct dma_async_tx_descriptor *desc[2];
+ struct dma_chan *dma_chan[2];
+ struct imx_dma_data dma_data;
+ unsigned int pos;
+
+ void *private;
+};
+
+/**
+ * fsl_asrc: ASRC common data
+ *
+ * @dma_params_rx: DMA parameters for receive channel
+ * @dma_params_tx: DMA parameters for transmit channel
+ * @pdev: platform device pointer
+ * @regmap: regmap handler
+ * @paddr: physical address to the base address of registers
+ * @mem_clk: clock source to access register
+ * @ipg_clk: clock source to drive peripheral
+ * @spba_clk: SPBA clock (optional, depending on SoC design)
+ * @lock: spin lock for resource protection
+ * @pair: pair pointers
+ * @channel_avail: non-occupied channel numbers
+ * @asrc_rate: default sample rate for ASoC Back-Ends
+ * @asrc_format: default sample format for ASoC Back-Ends
+ * @use_edma: edma is used
+ * @get_dma_channel: function pointer
+ * @request_pair: function pointer
+ * @release_pair: function pointer
+ * @get_fifo_addr: function pointer
+ * @pair_priv_size: size of pair private struct.
+ * @private: private data structure
+ */
+struct fsl_asrc {
+ struct snd_dmaengine_dai_dma_data dma_params_rx;
+ struct snd_dmaengine_dai_dma_data dma_params_tx;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ unsigned long paddr;
+ struct clk *mem_clk;
+ struct clk *ipg_clk;
+ struct clk *spba_clk;
+ spinlock_t lock; /* spin lock for resource protection */
+
+ struct fsl_asrc_pair *pair[PAIR_CTX_NUM];
+ unsigned int channel_avail;
+
+ int asrc_rate;
+ snd_pcm_format_t asrc_format;
+ bool use_edma;
+
+ struct dma_chan *(*get_dma_channel)(struct fsl_asrc_pair *pair, bool dir);
+ int (*request_pair)(int channels, struct fsl_asrc_pair *pair);
+ void (*release_pair)(struct fsl_asrc_pair *pair);
+ int (*get_fifo_addr)(u8 dir, enum asrc_pair_index index);
+ size_t pair_priv_size;
+
+ void *private;
+};
+
+#define DRV_NAME "fsl-asrc-dai"
+extern struct snd_soc_component_driver fsl_asrc_component;
+
+#endif /* _FSL_ASRC_COMMON_H */
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index e7178817d7a7..d6a3fc5f87e5 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -12,7 +12,7 @@
#include <sound/dmaengine_pcm.h>
#include <sound/pcm_params.h>
-#include "fsl_asrc.h"
+#include "fsl_asrc_common.h"
#define FSL_ASRC_DMABUF_SIZE (256 * 1024)
@@ -135,7 +135,7 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
- struct fsl_asrc *asrc_priv = pair->asrc_priv;
+ struct fsl_asrc *asrc = pair->asrc;
struct dma_slave_config config_fe, config_be;
enum asrc_pair_index index = pair->index;
struct device *dev = component->dev;
@@ -146,7 +146,7 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
struct device *dev_be;
u8 dir = tx ? OUT : IN;
dma_cap_mask_t mask;
- int ret;
+ int ret, width;
/* Fetch the Back-End dma_data from DPCM */
for_each_dpcm_be(rtd, stream, dpcm) {
@@ -170,10 +170,10 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
/* Override dma_data of the Front-End and config its dmaengine */
dma_params_fe = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
- dma_params_fe->addr = asrc_priv->paddr + REG_ASRDx(!dir, index);
+ dma_params_fe->addr = asrc->paddr + asrc->get_fifo_addr(!dir, index);
dma_params_fe->maxburst = dma_params_be->maxburst;
- pair->dma_chan[!dir] = fsl_asrc_get_dma_channel(pair, !dir);
+ pair->dma_chan[!dir] = asrc->get_dma_channel(pair, !dir);
if (!pair->dma_chan[!dir]) {
dev_err(dev, "failed to request DMA channel\n");
return -EINVAL;
@@ -203,7 +203,7 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
* need to configure dma_request and dma_request2, but get dma_chan via
* dma_request_slave_channel directly with dma name of Front-End device
*/
- if (!asrc_priv->soc->use_edma) {
+ if (!asrc->use_edma) {
/* Get DMA request of Back-End */
tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
tmp_data = tmp_chan->private;
@@ -211,7 +211,7 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
dma_release_channel(tmp_chan);
/* Get DMA request of Front-End */
- tmp_chan = fsl_asrc_get_dma_channel(pair, dir);
+ tmp_chan = asrc->get_dma_channel(pair, dir);
tmp_data = tmp_chan->private;
pair->dma_data.dma_request2 = tmp_data->dma_request;
pair->dma_data.peripheral_type = tmp_data->peripheral_type;
@@ -222,7 +222,7 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
dma_request_channel(mask, filter, &pair->dma_data);
} else {
pair->dma_chan[dir] =
- fsl_asrc_get_dma_channel(pair, dir);
+ asrc->get_dma_channel(pair, dir);
}
if (!pair->dma_chan[dir]) {
@@ -230,10 +230,19 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
return -EINVAL;
}
- if (asrc_priv->asrc_width == 16)
+ width = snd_pcm_format_physical_width(asrc->asrc_format);
+ if (width < 8 || width > 64)
+ return -EINVAL;
+ else if (width == 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (width == 16)
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
- else
+ else if (width == 24)
+ buswidth = DMA_SLAVE_BUSWIDTH_3_BYTES;
+ else if (width <= 32)
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
config_be.direction = DMA_DEV_TO_DEV;
config_be.src_addr_width = buswidth;
@@ -242,16 +251,17 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
config_be.dst_maxburst = dma_params_be->maxburst;
if (tx) {
- config_be.src_addr = asrc_priv->paddr + REG_ASRDO(index);
+ config_be.src_addr = asrc->paddr + asrc->get_fifo_addr(OUT, index);
config_be.dst_addr = dma_params_be->addr;
} else {
- config_be.dst_addr = asrc_priv->paddr + REG_ASRDI(index);
+ config_be.dst_addr = asrc->paddr + asrc->get_fifo_addr(IN, index);
config_be.src_addr = dma_params_be->addr;
}
ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be);
if (ret) {
dev_err(dev, "failed to config DMA channel for Back-End\n");
+ dma_release_channel(pair->dma_chan[dir]);
return ret;
}
@@ -288,7 +298,7 @@ static int fsl_asrc_dma_startup(struct snd_soc_component *component,
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_dmaengine_dai_dma_data *dma_data;
struct device *dev = component->dev;
- struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
+ struct fsl_asrc *asrc = dev_get_drvdata(dev);
struct fsl_asrc_pair *pair;
struct dma_chan *tmp_chan = NULL;
u8 dir = tx ? OUT : IN;
@@ -302,11 +312,12 @@ static int fsl_asrc_dma_startup(struct snd_soc_component *component,
return ret;
}
- pair = kzalloc(sizeof(struct fsl_asrc_pair), GFP_KERNEL);
+ pair = kzalloc(sizeof(*pair) + asrc->pair_priv_size, GFP_KERNEL);
if (!pair)
return -ENOMEM;
- pair->asrc_priv = asrc_priv;
+ pair->asrc = asrc;
+ pair->private = (void *)pair + sizeof(struct fsl_asrc_pair);
runtime->private_data = pair;
@@ -314,14 +325,14 @@ static int fsl_asrc_dma_startup(struct snd_soc_component *component,
* Request pair function needs channel num as input, for this
* dummy pair, we just request "1" channel temporarily.
*/
- ret = fsl_asrc_request_pair(1, pair);
+ ret = asrc->request_pair(1, pair);
if (ret < 0) {
dev_err(dev, "failed to request asrc pair\n");
goto req_pair_err;
}
/* Request a dummy dma channel, which will be released later. */
- tmp_chan = fsl_asrc_get_dma_channel(pair, dir);
+ tmp_chan = asrc->get_dma_channel(pair, dir);
if (!tmp_chan) {
dev_err(dev, "failed to get dma channel\n");
ret = -EINVAL;
@@ -347,7 +358,7 @@ out:
dma_release_channel(tmp_chan);
dma_chan_err:
- fsl_asrc_release_pair(pair);
+ asrc->release_pair(pair);
req_pair_err:
if (release_pair)
@@ -361,15 +372,15 @@ static int fsl_asrc_dma_shutdown(struct snd_soc_component *component,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct fsl_asrc_pair *pair = runtime->private_data;
- struct fsl_asrc *asrc_priv;
+ struct fsl_asrc *asrc;
if (!pair)
return 0;
- asrc_priv = pair->asrc_priv;
+ asrc = pair->asrc;
- if (asrc_priv->pair[pair->index] == pair)
- asrc_priv->pair[pair->index] = NULL;
+ if (asrc->pair[pair->index] == pair)
+ asrc->pair[pair->index] = NULL;
kfree(pair);
diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c
index 5faecbeb5497..8b9027f76d8a 100644
--- a/sound/soc/fsl/fsl_audmix.c
+++ b/sound/soc/fsl/fsl_audmix.c
@@ -116,7 +116,7 @@ static int fsl_audmix_put_mix_clk_src(struct snd_kcontrol *kcontrol,
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int *item = ucontrol->value.enumerated.item;
unsigned int reg_val, val, mix_clk;
- int ret = 0;
+ int ret;
/* Get current state */
ret = snd_soc_component_read(comp, FSL_AUDMIX_CTR, &reg_val);
@@ -159,7 +159,7 @@ static int fsl_audmix_put_out_src(struct snd_kcontrol *kcontrol,
unsigned int *item = ucontrol->value.enumerated.item;
u32 out_src, mix_clk;
unsigned int reg_val, val, mask = 0, ctr = 0;
- int ret = 0;
+ int ret;
/* Get current state */
ret = snd_soc_component_read(comp, FSL_AUDMIX_CTR, &reg_val);
diff --git a/sound/soc/fsl/fsl_easrc.c b/sound/soc/fsl/fsl_easrc.c
new file mode 100644
index 000000000000..c6b5eb2d2af7
--- /dev/null
+++ b/sound/soc/fsl/fsl_easrc.c
@@ -0,0 +1,2117 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2019 NXP
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kobject.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/sched/signal.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/gcd.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include <sound/core.h>
+
+#include "fsl_easrc.h"
+#include "imx-pcm.h"
+
+#define FSL_EASRC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_U16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S24_3LE | \
+ SNDRV_PCM_FMTBIT_U24_LE | \
+ SNDRV_PCM_FMTBIT_U24_3LE | \
+ SNDRV_PCM_FMTBIT_S32_LE | \
+ SNDRV_PCM_FMTBIT_U32_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_U20_3LE | \
+ SNDRV_PCM_FMTBIT_FLOAT_LE)
+
+static int fsl_easrc_iec958_put_bits(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ struct fsl_asrc *easrc = snd_soc_component_get_drvdata(comp);
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ struct soc_mreg_control *mc =
+ (struct soc_mreg_control *)kcontrol->private_value;
+ unsigned int regval = ucontrol->value.integer.value[0];
+
+ easrc_priv->bps_iec958[mc->regbase] = regval;
+
+ return 0;
+}
+
+static int fsl_easrc_iec958_get_bits(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ struct fsl_asrc *easrc = snd_soc_component_get_drvdata(comp);
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ struct soc_mreg_control *mc =
+ (struct soc_mreg_control *)kcontrol->private_value;
+
+ ucontrol->value.enumerated.item[0] = easrc_priv->bps_iec958[mc->regbase];
+
+ return 0;
+}
+
+static int fsl_easrc_get_reg(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mreg_control *mc =
+ (struct soc_mreg_control *)kcontrol->private_value;
+ unsigned int regval;
+ int ret;
+
+ ret = snd_soc_component_read(component, mc->regbase, &regval);
+ if (ret < 0)
+ return ret;
+
+ ucontrol->value.integer.value[0] = regval;
+
+ return 0;
+}
+
+static int fsl_easrc_set_reg(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct soc_mreg_control *mc =
+ (struct soc_mreg_control *)kcontrol->private_value;
+ unsigned int regval = ucontrol->value.integer.value[0];
+ int ret;
+
+ ret = snd_soc_component_write(component, mc->regbase, regval);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+#define SOC_SINGLE_REG_RW(xname, xreg) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .info = snd_soc_info_xr_sx, .get = fsl_easrc_get_reg, \
+ .put = fsl_easrc_set_reg, \
+ .private_value = (unsigned long)&(struct soc_mreg_control) \
+ { .regbase = xreg, .regcount = 1, .nbits = 32, \
+ .invert = 0, .min = 0, .max = 0xffffffff, } }
+
+#define SOC_SINGLE_VAL_RW(xname, xreg) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = (xname), \
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ .info = snd_soc_info_xr_sx, .get = fsl_easrc_iec958_get_bits, \
+ .put = fsl_easrc_iec958_put_bits, \
+ .private_value = (unsigned long)&(struct soc_mreg_control) \
+ { .regbase = xreg, .regcount = 1, .nbits = 32, \
+ .invert = 0, .min = 0, .max = 2, } }
+
+static const struct snd_kcontrol_new fsl_easrc_snd_controls[] = {
+ SOC_SINGLE("Context 0 Dither Switch", REG_EASRC_COC(0), 0, 1, 0),
+ SOC_SINGLE("Context 1 Dither Switch", REG_EASRC_COC(1), 0, 1, 0),
+ SOC_SINGLE("Context 2 Dither Switch", REG_EASRC_COC(2), 0, 1, 0),
+ SOC_SINGLE("Context 3 Dither Switch", REG_EASRC_COC(3), 0, 1, 0),
+
+ SOC_SINGLE("Context 0 IEC958 Validity", REG_EASRC_COC(0), 2, 1, 0),
+ SOC_SINGLE("Context 1 IEC958 Validity", REG_EASRC_COC(1), 2, 1, 0),
+ SOC_SINGLE("Context 2 IEC958 Validity", REG_EASRC_COC(2), 2, 1, 0),
+ SOC_SINGLE("Context 3 IEC958 Validity", REG_EASRC_COC(3), 2, 1, 0),
+
+ SOC_SINGLE_VAL_RW("Context 0 IEC958 Bits Per Sample", 0),
+ SOC_SINGLE_VAL_RW("Context 1 IEC958 Bits Per Sample", 1),
+ SOC_SINGLE_VAL_RW("Context 2 IEC958 Bits Per Sample", 2),
+ SOC_SINGLE_VAL_RW("Context 3 IEC958 Bits Per Sample", 3),
+
+ SOC_SINGLE_REG_RW("Context 0 IEC958 CS0", REG_EASRC_CS0(0)),
+ SOC_SINGLE_REG_RW("Context 1 IEC958 CS0", REG_EASRC_CS0(1)),
+ SOC_SINGLE_REG_RW("Context 2 IEC958 CS0", REG_EASRC_CS0(2)),
+ SOC_SINGLE_REG_RW("Context 3 IEC958 CS0", REG_EASRC_CS0(3)),
+ SOC_SINGLE_REG_RW("Context 0 IEC958 CS1", REG_EASRC_CS1(0)),
+ SOC_SINGLE_REG_RW("Context 1 IEC958 CS1", REG_EASRC_CS1(1)),
+ SOC_SINGLE_REG_RW("Context 2 IEC958 CS1", REG_EASRC_CS1(2)),
+ SOC_SINGLE_REG_RW("Context 3 IEC958 CS1", REG_EASRC_CS1(3)),
+ SOC_SINGLE_REG_RW("Context 0 IEC958 CS2", REG_EASRC_CS2(0)),
+ SOC_SINGLE_REG_RW("Context 1 IEC958 CS2", REG_EASRC_CS2(1)),
+ SOC_SINGLE_REG_RW("Context 2 IEC958 CS2", REG_EASRC_CS2(2)),
+ SOC_SINGLE_REG_RW("Context 3 IEC958 CS2", REG_EASRC_CS2(3)),
+ SOC_SINGLE_REG_RW("Context 0 IEC958 CS3", REG_EASRC_CS3(0)),
+ SOC_SINGLE_REG_RW("Context 1 IEC958 CS3", REG_EASRC_CS3(1)),
+ SOC_SINGLE_REG_RW("Context 2 IEC958 CS3", REG_EASRC_CS3(2)),
+ SOC_SINGLE_REG_RW("Context 3 IEC958 CS3", REG_EASRC_CS3(3)),
+ SOC_SINGLE_REG_RW("Context 0 IEC958 CS4", REG_EASRC_CS4(0)),
+ SOC_SINGLE_REG_RW("Context 1 IEC958 CS4", REG_EASRC_CS4(1)),
+ SOC_SINGLE_REG_RW("Context 2 IEC958 CS4", REG_EASRC_CS4(2)),
+ SOC_SINGLE_REG_RW("Context 3 IEC958 CS4", REG_EASRC_CS4(3)),
+ SOC_SINGLE_REG_RW("Context 0 IEC958 CS5", REG_EASRC_CS5(0)),
+ SOC_SINGLE_REG_RW("Context 1 IEC958 CS5", REG_EASRC_CS5(1)),
+ SOC_SINGLE_REG_RW("Context 2 IEC958 CS5", REG_EASRC_CS5(2)),
+ SOC_SINGLE_REG_RW("Context 3 IEC958 CS5", REG_EASRC_CS5(3)),
+};
+
+/*
+ * fsl_easrc_set_rs_ratio
+ *
+ * According to the resample taps, calculate the resample ratio
+ * ratio = in_rate / out_rate
+ */
+static int fsl_easrc_set_rs_ratio(struct fsl_asrc_pair *ctx)
+{
+ struct fsl_asrc *easrc = ctx->asrc;
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ struct fsl_easrc_ctx_priv *ctx_priv = ctx->private;
+ unsigned int in_rate = ctx_priv->in_params.norm_rate;
+ unsigned int out_rate = ctx_priv->out_params.norm_rate;
+ unsigned int int_bits;
+ unsigned int frac_bits;
+ u64 val;
+ u32 *r;
+
+ switch (easrc_priv->rs_num_taps) {
+ case EASRC_RS_32_TAPS:
+ int_bits = 5;
+ frac_bits = 39;
+ break;
+ case EASRC_RS_64_TAPS:
+ int_bits = 6;
+ frac_bits = 38;
+ break;
+ case EASRC_RS_128_TAPS:
+ int_bits = 7;
+ frac_bits = 37;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = (u64)in_rate << frac_bits;
+ do_div(val, out_rate);
+ r = (uint32_t *)&val;
+
+ if (r[1] & 0xFFFFF000) {
+ dev_err(&easrc->pdev->dev, "ratio exceed range\n");
+ return -EINVAL;
+ }
+
+ regmap_write(easrc->regmap, REG_EASRC_RRL(ctx->index),
+ EASRC_RRL_RS_RL(r[0]));
+ regmap_write(easrc->regmap, REG_EASRC_RRH(ctx->index),
+ EASRC_RRH_RS_RH(r[1]));
+
+ return 0;
+}
+
+/* Normalize input and output sample rates */
+static void fsl_easrc_normalize_rates(struct fsl_asrc_pair *ctx)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv;
+ int a, b;
+
+ if (!ctx)
+ return;
+
+ ctx_priv = ctx->private;
+
+ a = ctx_priv->in_params.sample_rate;
+ b = ctx_priv->out_params.sample_rate;
+
+ a = gcd(a, b);
+
+ /* Divide by gcd to normalize the rate */
+ ctx_priv->in_params.norm_rate = ctx_priv->in_params.sample_rate / a;
+ ctx_priv->out_params.norm_rate = ctx_priv->out_params.sample_rate / a;
+}
+
+/* Resets the pointer of the coeff memory pointers */
+static int fsl_easrc_coeff_mem_ptr_reset(struct fsl_asrc *easrc,
+ unsigned int ctx_id, int mem_type)
+{
+ struct device *dev;
+ u32 reg, mask, val;
+
+ if (!easrc)
+ return -ENODEV;
+
+ dev = &easrc->pdev->dev;
+
+ switch (mem_type) {
+ case EASRC_PF_COEFF_MEM:
+ /* This resets the prefilter memory pointer addr */
+ if (ctx_id >= EASRC_CTX_MAX_NUM) {
+ dev_err(dev, "Invalid context id[%d]\n", ctx_id);
+ return -EINVAL;
+ }
+
+ reg = REG_EASRC_CCE1(ctx_id);
+ mask = EASRC_CCE1_COEF_MEM_RST_MASK;
+ val = EASRC_CCE1_COEF_MEM_RST;
+ break;
+ case EASRC_RS_COEFF_MEM:
+ /* This resets the resampling memory pointer addr */
+ reg = REG_EASRC_CRCC;
+ mask = EASRC_CRCC_RS_CPR_MASK;
+ val = EASRC_CRCC_RS_CPR;
+ break;
+ default:
+ dev_err(dev, "Unknown memory type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * To reset the write pointer back to zero, the register field
+ * ASRC_CTX_CTRL_EXT1x[PF_COEFF_MEM_RST] can be toggled from
+ * 0x0 to 0x1 to 0x0.
+ */
+ regmap_update_bits(easrc->regmap, reg, mask, 0);
+ regmap_update_bits(easrc->regmap, reg, mask, val);
+ regmap_update_bits(easrc->regmap, reg, mask, 0);
+
+ return 0;
+}
+
+static inline uint32_t bits_taps_to_val(unsigned int t)
+{
+ switch (t) {
+ case EASRC_RS_32_TAPS:
+ return 32;
+ case EASRC_RS_64_TAPS:
+ return 64;
+ case EASRC_RS_128_TAPS:
+ return 128;
+ }
+
+ return 0;
+}
+
+static int fsl_easrc_resampler_config(struct fsl_asrc *easrc)
+{
+ struct device *dev = &easrc->pdev->dev;
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ struct asrc_firmware_hdr *hdr = easrc_priv->firmware_hdr;
+ struct interp_params *interp = easrc_priv->interp;
+ struct interp_params *selected_interp = NULL;
+ unsigned int num_coeff;
+ unsigned int i;
+ u64 *coef;
+ u32 *r;
+ int ret;
+
+ if (!hdr) {
+ dev_err(dev, "firmware not loaded!\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < hdr->interp_scen; i++) {
+ if ((interp[i].num_taps - 1) !=
+ bits_taps_to_val(easrc_priv->rs_num_taps))
+ continue;
+
+ coef = interp[i].coeff;
+ selected_interp = &interp[i];
+ dev_dbg(dev, "Selected interp_filter: %u taps - %u phases\n",
+ selected_interp->num_taps,
+ selected_interp->num_phases);
+ break;
+ }
+
+ if (!selected_interp) {
+ dev_err(dev, "failed to get interpreter configuration\n");
+ return -EINVAL;
+ }
+
+ /*
+ * RS_LOW - first half of center tap of the sinc function
+ * RS_HIGH - second half of center tap of the sinc function
+ * This is due to the fact the resampling function must be
+ * symetrical - i.e. odd number of taps
+ */
+ r = (uint32_t *)&selected_interp->center_tap;
+ regmap_write(easrc->regmap, REG_EASRC_RCTCL, EASRC_RCTCL_RS_CL(r[0]));
+ regmap_write(easrc->regmap, REG_EASRC_RCTCH, EASRC_RCTCH_RS_CH(r[1]));
+
+ /*
+ * Write Number of Resampling Coefficient Taps
+ * 00b - 32-Tap Resampling Filter
+ * 01b - 64-Tap Resampling Filter
+ * 10b - 128-Tap Resampling Filter
+ * 11b - N/A
+ */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CRCC,
+ EASRC_CRCC_RS_TAPS_MASK,
+ EASRC_CRCC_RS_TAPS(easrc_priv->rs_num_taps));
+
+ /* Reset prefilter coefficient pointer back to 0 */
+ ret = fsl_easrc_coeff_mem_ptr_reset(easrc, 0, EASRC_RS_COEFF_MEM);
+ if (ret)
+ return ret;
+
+ /*
+ * When the filter is programmed to run in:
+ * 32-tap mode, 16-taps, 128-phases 4-coefficients per phase
+ * 64-tap mode, 32-taps, 64-phases 4-coefficients per phase
+ * 128-tap mode, 64-taps, 32-phases 4-coefficients per phase
+ * This means the number of writes is constant no matter
+ * the mode we are using
+ */
+ num_coeff = 16 * 128 * 4;
+
+ for (i = 0; i < num_coeff; i++) {
+ r = (uint32_t *)&coef[i];
+ regmap_write(easrc->regmap, REG_EASRC_CRCM,
+ EASRC_CRCM_RS_CWD(r[0]));
+ regmap_write(easrc->regmap, REG_EASRC_CRCM,
+ EASRC_CRCM_RS_CWD(r[1]));
+ }
+
+ return 0;
+}
+
+/**
+ * Scale filter coefficients (64 bits float)
+ * For input float32 normalized range (1.0,-1.0) -> output int[16,24,32]:
+ * scale it by multiplying filter coefficients by 2^31
+ * For input int[16, 24, 32] -> output float32
+ * scale it by multiplying filter coefficients by 2^-15, 2^-23, 2^-31
+ * input:
+ * asrc: Structure pointer of fsl_asrc
+ * infilter : Pointer to non-scaled input filter
+ * shift: The multiply factor
+ * output:
+ * outfilter: scaled filter
+ */
+static int fsl_easrc_normalize_filter(struct fsl_asrc *easrc,
+ u64 *infilter,
+ u64 *outfilter,
+ int shift)
+{
+ struct device *dev = &easrc->pdev->dev;
+ u64 coef = *infilter;
+ s64 exp = (coef & 0x7ff0000000000000ll) >> 52;
+ u64 outcoef;
+
+ /*
+ * If exponent is zero (value == 0), or 7ff (value == NaNs)
+ * dont touch the content
+ */
+ if (exp == 0 || exp == 0x7ff) {
+ *outfilter = coef;
+ return 0;
+ }
+
+ /* coef * 2^shift ==> exp + shift */
+ exp += shift;
+
+ if ((shift > 0 && exp >= 0x7ff) || (shift < 0 && exp <= 0)) {
+ dev_err(dev, "coef out of range\n");
+ return -EINVAL;
+ }
+
+ outcoef = (u64)(coef & 0x800FFFFFFFFFFFFFll) + ((u64)exp << 52);
+ *outfilter = outcoef;
+
+ return 0;
+}
+
+static int fsl_easrc_write_pf_coeff_mem(struct fsl_asrc *easrc, int ctx_id,
+ u64 *coef, int n_taps, int shift)
+{
+ struct device *dev = &easrc->pdev->dev;
+ int ret = 0;
+ int i;
+ u32 *r;
+ u64 tmp;
+
+ /* If STx_NUM_TAPS is set to 0x0 then return */
+ if (!n_taps)
+ return 0;
+
+ if (!coef) {
+ dev_err(dev, "coef table is NULL\n");
+ return -EINVAL;
+ }
+
+ /*
+ * When switching between stages, the address pointer
+ * should be reset back to 0x0 before performing a write
+ */
+ ret = fsl_easrc_coeff_mem_ptr_reset(easrc, ctx_id, EASRC_PF_COEFF_MEM);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < (n_taps + 1) / 2; i++) {
+ ret = fsl_easrc_normalize_filter(easrc, &coef[i], &tmp, shift);
+ if (ret)
+ return ret;
+
+ r = (uint32_t *)&tmp;
+ regmap_write(easrc->regmap, REG_EASRC_PCF(ctx_id),
+ EASRC_PCF_CD(r[0]));
+ regmap_write(easrc->regmap, REG_EASRC_PCF(ctx_id),
+ EASRC_PCF_CD(r[1]));
+ }
+
+ return 0;
+}
+
+static int fsl_easrc_prefilter_config(struct fsl_asrc *easrc,
+ unsigned int ctx_id)
+{
+ struct prefil_params *prefil, *selected_prefil = NULL;
+ struct fsl_easrc_ctx_priv *ctx_priv;
+ struct fsl_easrc_priv *easrc_priv;
+ struct asrc_firmware_hdr *hdr;
+ struct fsl_asrc_pair *ctx;
+ struct device *dev;
+ u32 inrate, outrate, offset = 0;
+ u32 in_s_rate, out_s_rate, in_s_fmt, out_s_fmt;
+ int ret, i;
+
+ if (!easrc)
+ return -ENODEV;
+
+ dev = &easrc->pdev->dev;
+
+ if (ctx_id >= EASRC_CTX_MAX_NUM) {
+ dev_err(dev, "Invalid context id[%d]\n", ctx_id);
+ return -EINVAL;
+ }
+
+ easrc_priv = easrc->private;
+
+ ctx = easrc->pair[ctx_id];
+ ctx_priv = ctx->private;
+
+ in_s_rate = ctx_priv->in_params.sample_rate;
+ out_s_rate = ctx_priv->out_params.sample_rate;
+ in_s_fmt = ctx_priv->in_params.sample_format;
+ out_s_fmt = ctx_priv->out_params.sample_format;
+
+ ctx_priv->in_filled_sample = bits_taps_to_val(easrc_priv->rs_num_taps) / 2;
+ ctx_priv->out_missed_sample = ctx_priv->in_filled_sample * out_s_rate / in_s_rate;
+
+ ctx_priv->st1_num_taps = 0;
+ ctx_priv->st2_num_taps = 0;
+
+ regmap_write(easrc->regmap, REG_EASRC_CCE1(ctx_id), 0);
+ regmap_write(easrc->regmap, REG_EASRC_CCE2(ctx_id), 0);
+
+ /*
+ * The audio float point data range is (-1, 1), the asrc would output
+ * all zero for float point input and integer output case, that is to
+ * drop the fractional part of the data directly.
+ *
+ * In order to support float to int conversion or int to float
+ * conversion we need to do special operation on the coefficient to
+ * enlarge/reduce the data to the expected range.
+ *
+ * For float to int case:
+ * Up sampling:
+ * 1. Create a 1 tap filter with center tap (only tap) of 2^31
+ * in 64 bits floating point.
+ * double value = (double)(((uint64_t)1) << 31)
+ * 2. Program 1 tap prefilter with center tap above.
+ *
+ * Down sampling,
+ * 1. If the filter is single stage filter, add "shift" to the exponent
+ * of stage 1 coefficients.
+ * 2. If the filter is two stage filter , add "shift" to the exponent
+ * of stage 2 coefficients.
+ *
+ * The "shift" is 31, same for int16, int24, int32 case.
+ *
+ * For int to float case:
+ * Up sampling:
+ * 1. Create a 1 tap filter with center tap (only tap) of 2^-31
+ * in 64 bits floating point.
+ * 2. Program 1 tap prefilter with center tap above.
+ *
+ * Down sampling,
+ * 1. If the filter is single stage filter, subtract "shift" to the
+ * exponent of stage 1 coefficients.
+ * 2. If the filter is two stage filter , subtract "shift" to the
+ * exponent of stage 2 coefficients.
+ *
+ * The "shift" is 15,23,31, different for int16, int24, int32 case.
+ *
+ */
+ if (out_s_rate >= in_s_rate) {
+ if (out_s_rate == in_s_rate)
+ regmap_update_bits(easrc->regmap,
+ REG_EASRC_CCE1(ctx_id),
+ EASRC_CCE1_RS_BYPASS_MASK,
+ EASRC_CCE1_RS_BYPASS);
+
+ ctx_priv->st1_num_taps = 1;
+ ctx_priv->st1_coeff = &easrc_priv->const_coeff;
+ ctx_priv->st1_num_exp = 1;
+ ctx_priv->st2_num_taps = 0;
+
+ if (in_s_fmt == SNDRV_PCM_FORMAT_FLOAT_LE &&
+ out_s_fmt != SNDRV_PCM_FORMAT_FLOAT_LE)
+ ctx_priv->st1_addexp = 31;
+ else if (in_s_fmt != SNDRV_PCM_FORMAT_FLOAT_LE &&
+ out_s_fmt == SNDRV_PCM_FORMAT_FLOAT_LE)
+ ctx_priv->st1_addexp -= ctx_priv->in_params.fmt.addexp;
+ } else {
+ inrate = ctx_priv->in_params.norm_rate;
+ outrate = ctx_priv->out_params.norm_rate;
+
+ hdr = easrc_priv->firmware_hdr;
+ prefil = easrc_priv->prefil;
+
+ for (i = 0; i < hdr->prefil_scen; i++) {
+ if (inrate == prefil[i].insr &&
+ outrate == prefil[i].outsr) {
+ selected_prefil = &prefil[i];
+ dev_dbg(dev, "Selected prefilter: %u insr, %u outsr, %u st1_taps, %u st2_taps\n",
+ selected_prefil->insr,
+ selected_prefil->outsr,
+ selected_prefil->st1_taps,
+ selected_prefil->st2_taps);
+ break;
+ }
+ }
+
+ if (!selected_prefil) {
+ dev_err(dev, "Conversion from in ratio %u(%u) to out ratio %u(%u) is not supported\n",
+ in_s_rate, inrate,
+ out_s_rate, outrate);
+ return -EINVAL;
+ }
+
+ /*
+ * In prefilter coeff array, first st1_num_taps represent the
+ * stage1 prefilter coefficients followed by next st2_num_taps
+ * representing stage 2 coefficients
+ */
+ ctx_priv->st1_num_taps = selected_prefil->st1_taps;
+ ctx_priv->st1_coeff = selected_prefil->coeff;
+ ctx_priv->st1_num_exp = selected_prefil->st1_exp;
+
+ offset = ((selected_prefil->st1_taps + 1) / 2);
+ ctx_priv->st2_num_taps = selected_prefil->st2_taps;
+ ctx_priv->st2_coeff = selected_prefil->coeff + offset;
+
+ if (in_s_fmt == SNDRV_PCM_FORMAT_FLOAT_LE &&
+ out_s_fmt != SNDRV_PCM_FORMAT_FLOAT_LE) {
+ /* only change stage2 coefficient for 2 stage case */
+ if (ctx_priv->st2_num_taps > 0)
+ ctx_priv->st2_addexp = 31;
+ else
+ ctx_priv->st1_addexp = 31;
+ } else if (in_s_fmt != SNDRV_PCM_FORMAT_FLOAT_LE &&
+ out_s_fmt == SNDRV_PCM_FORMAT_FLOAT_LE) {
+ if (ctx_priv->st2_num_taps > 0)
+ ctx_priv->st2_addexp -= ctx_priv->in_params.fmt.addexp;
+ else
+ ctx_priv->st1_addexp -= ctx_priv->in_params.fmt.addexp;
+ }
+ }
+
+ ctx_priv->in_filled_sample += (ctx_priv->st1_num_taps / 2) * ctx_priv->st1_num_exp +
+ ctx_priv->st2_num_taps / 2;
+ ctx_priv->out_missed_sample = ctx_priv->in_filled_sample * out_s_rate / in_s_rate;
+
+ if (ctx_priv->in_filled_sample * out_s_rate % in_s_rate != 0)
+ ctx_priv->out_missed_sample += 1;
+ /*
+ * To modify the value of a prefilter coefficient, the user must
+ * perform a write to the register ASRC_PRE_COEFF_FIFOn[COEFF_DATA]
+ * while the respective context RUN_EN bit is set to 0b0
+ */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx_id),
+ EASRC_CC_EN_MASK, 0);
+
+ if (ctx_priv->st1_num_taps > EASRC_MAX_PF_TAPS) {
+ dev_err(dev, "ST1 taps [%d] mus be lower than %d\n",
+ ctx_priv->st1_num_taps, EASRC_MAX_PF_TAPS);
+ ret = -EINVAL;
+ goto ctx_error;
+ }
+
+ /* Update ctx ST1_NUM_TAPS in Context Control Extended 2 register */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CCE2(ctx_id),
+ EASRC_CCE2_ST1_TAPS_MASK,
+ EASRC_CCE2_ST1_TAPS(ctx_priv->st1_num_taps - 1));
+
+ /* Prefilter Coefficient Write Select to write in ST1 coeff */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CCE1(ctx_id),
+ EASRC_CCE1_COEF_WS_MASK,
+ EASRC_PF_ST1_COEFF_WR << EASRC_CCE1_COEF_WS_SHIFT);
+
+ ret = fsl_easrc_write_pf_coeff_mem(easrc, ctx_id,
+ ctx_priv->st1_coeff,
+ ctx_priv->st1_num_taps,
+ ctx_priv->st1_addexp);
+ if (ret)
+ goto ctx_error;
+
+ if (ctx_priv->st2_num_taps > 0) {
+ if (ctx_priv->st2_num_taps + ctx_priv->st1_num_taps > EASRC_MAX_PF_TAPS) {
+ dev_err(dev, "ST2 taps [%d] mus be lower than %d\n",
+ ctx_priv->st2_num_taps, EASRC_MAX_PF_TAPS);
+ ret = -EINVAL;
+ goto ctx_error;
+ }
+
+ regmap_update_bits(easrc->regmap, REG_EASRC_CCE1(ctx_id),
+ EASRC_CCE1_PF_TSEN_MASK,
+ EASRC_CCE1_PF_TSEN);
+ /*
+ * Enable prefilter stage1 writeback floating point
+ * which is used for FLOAT_LE case
+ */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CCE1(ctx_id),
+ EASRC_CCE1_PF_ST1_WBFP_MASK,
+ EASRC_CCE1_PF_ST1_WBFP);
+
+ regmap_update_bits(easrc->regmap, REG_EASRC_CCE1(ctx_id),
+ EASRC_CCE1_PF_EXP_MASK,
+ EASRC_CCE1_PF_EXP(ctx_priv->st1_num_exp - 1));
+
+ /* Update ctx ST2_NUM_TAPS in Context Control Extended 2 reg */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CCE2(ctx_id),
+ EASRC_CCE2_ST2_TAPS_MASK,
+ EASRC_CCE2_ST2_TAPS(ctx_priv->st2_num_taps - 1));
+
+ /* Prefilter Coefficient Write Select to write in ST2 coeff */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CCE1(ctx_id),
+ EASRC_CCE1_COEF_WS_MASK,
+ EASRC_PF_ST2_COEFF_WR << EASRC_CCE1_COEF_WS_SHIFT);
+
+ ret = fsl_easrc_write_pf_coeff_mem(easrc, ctx_id,
+ ctx_priv->st2_coeff,
+ ctx_priv->st2_num_taps,
+ ctx_priv->st2_addexp);
+ if (ret)
+ goto ctx_error;
+ }
+
+ return 0;
+
+ctx_error:
+ return ret;
+}
+
+static int fsl_easrc_max_ch_for_slot(struct fsl_asrc_pair *ctx,
+ struct fsl_easrc_slot *slot)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv = ctx->private;
+ int st1_mem_alloc = 0, st2_mem_alloc = 0;
+ int pf_mem_alloc = 0;
+ int max_channels = 8 - slot->num_channel;
+ int channels = 0;
+
+ if (ctx_priv->st1_num_taps > 0) {
+ if (ctx_priv->st2_num_taps > 0)
+ st1_mem_alloc =
+ (ctx_priv->st1_num_taps - 1) * ctx_priv->st1_num_exp + 1;
+ else
+ st1_mem_alloc = ctx_priv->st1_num_taps;
+ }
+
+ if (ctx_priv->st2_num_taps > 0)
+ st2_mem_alloc = ctx_priv->st2_num_taps;
+
+ pf_mem_alloc = st1_mem_alloc + st2_mem_alloc;
+
+ if (pf_mem_alloc != 0)
+ channels = (6144 - slot->pf_mem_used) / pf_mem_alloc;
+ else
+ channels = 8;
+
+ if (channels < max_channels)
+ max_channels = channels;
+
+ return max_channels;
+}
+
+static int fsl_easrc_config_one_slot(struct fsl_asrc_pair *ctx,
+ struct fsl_easrc_slot *slot,
+ unsigned int slot_ctx_idx,
+ unsigned int *req_channels,
+ unsigned int *start_channel,
+ unsigned int *avail_channel)
+{
+ struct fsl_asrc *easrc = ctx->asrc;
+ struct fsl_easrc_ctx_priv *ctx_priv = ctx->private;
+ int st1_chanxexp, st1_mem_alloc = 0, st2_mem_alloc = 0;
+ unsigned int reg0, reg1, reg2, reg3;
+ unsigned int addr;
+
+ if (slot->slot_index == 0) {
+ reg0 = REG_EASRC_DPCS0R0(slot_ctx_idx);
+ reg1 = REG_EASRC_DPCS0R1(slot_ctx_idx);
+ reg2 = REG_EASRC_DPCS0R2(slot_ctx_idx);
+ reg3 = REG_EASRC_DPCS0R3(slot_ctx_idx);
+ } else {
+ reg0 = REG_EASRC_DPCS1R0(slot_ctx_idx);
+ reg1 = REG_EASRC_DPCS1R1(slot_ctx_idx);
+ reg2 = REG_EASRC_DPCS1R2(slot_ctx_idx);
+ reg3 = REG_EASRC_DPCS1R3(slot_ctx_idx);
+ }
+
+ if (*req_channels <= *avail_channel) {
+ slot->num_channel = *req_channels;
+ *req_channels = 0;
+ } else {
+ slot->num_channel = *avail_channel;
+ *req_channels -= *avail_channel;
+ }
+
+ slot->min_channel = *start_channel;
+ slot->max_channel = *start_channel + slot->num_channel - 1;
+ slot->ctx_index = ctx->index;
+ slot->busy = true;
+ *start_channel += slot->num_channel;
+
+ regmap_update_bits(easrc->regmap, reg0,
+ EASRC_DPCS0R0_MAXCH_MASK,
+ EASRC_DPCS0R0_MAXCH(slot->max_channel));
+
+ regmap_update_bits(easrc->regmap, reg0,
+ EASRC_DPCS0R0_MINCH_MASK,
+ EASRC_DPCS0R0_MINCH(slot->min_channel));
+
+ regmap_update_bits(easrc->regmap, reg0,
+ EASRC_DPCS0R0_NUMCH_MASK,
+ EASRC_DPCS0R0_NUMCH(slot->num_channel - 1));
+
+ regmap_update_bits(easrc->regmap, reg0,
+ EASRC_DPCS0R0_CTXNUM_MASK,
+ EASRC_DPCS0R0_CTXNUM(slot->ctx_index));
+
+ if (ctx_priv->st1_num_taps > 0) {
+ if (ctx_priv->st2_num_taps > 0)
+ st1_mem_alloc =
+ (ctx_priv->st1_num_taps - 1) * slot->num_channel *
+ ctx_priv->st1_num_exp + slot->num_channel;
+ else
+ st1_mem_alloc = ctx_priv->st1_num_taps * slot->num_channel;
+
+ slot->pf_mem_used = st1_mem_alloc;
+ regmap_update_bits(easrc->regmap, reg2,
+ EASRC_DPCS0R2_ST1_MA_MASK,
+ EASRC_DPCS0R2_ST1_MA(st1_mem_alloc));
+
+ if (slot->slot_index == 1)
+ addr = PREFILTER_MEM_LEN - st1_mem_alloc;
+ else
+ addr = 0;
+
+ regmap_update_bits(easrc->regmap, reg2,
+ EASRC_DPCS0R2_ST1_SA_MASK,
+ EASRC_DPCS0R2_ST1_SA(addr));
+ }
+
+ if (ctx_priv->st2_num_taps > 0) {
+ st1_chanxexp = slot->num_channel * (ctx_priv->st1_num_exp - 1);
+
+ regmap_update_bits(easrc->regmap, reg1,
+ EASRC_DPCS0R1_ST1_EXP_MASK,
+ EASRC_DPCS0R1_ST1_EXP(st1_chanxexp));
+
+ st2_mem_alloc = slot->num_channel * ctx_priv->st2_num_taps;
+ slot->pf_mem_used += st2_mem_alloc;
+ regmap_update_bits(easrc->regmap, reg3,
+ EASRC_DPCS0R3_ST2_MA_MASK,
+ EASRC_DPCS0R3_ST2_MA(st2_mem_alloc));
+
+ if (slot->slot_index == 1)
+ addr = PREFILTER_MEM_LEN - st1_mem_alloc - st2_mem_alloc;
+ else
+ addr = st1_mem_alloc;
+
+ regmap_update_bits(easrc->regmap, reg3,
+ EASRC_DPCS0R3_ST2_SA_MASK,
+ EASRC_DPCS0R3_ST2_SA(addr));
+ }
+
+ regmap_update_bits(easrc->regmap, reg0,
+ EASRC_DPCS0R0_EN_MASK, EASRC_DPCS0R0_EN);
+
+ return 0;
+}
+
+/*
+ * fsl_easrc_config_slot
+ *
+ * A single context can be split amongst any of the 4 context processing pipes
+ * in the design.
+ * The total number of channels consumed within the context processor must be
+ * less than or equal to 8. if a single context is configured to contain more
+ * than 8 channels then it must be distributed across multiple context
+ * processing pipe slots.
+ *
+ */
+static int fsl_easrc_config_slot(struct fsl_asrc *easrc, unsigned int ctx_id)
+{
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ struct fsl_asrc_pair *ctx = easrc->pair[ctx_id];
+ int req_channels = ctx->channels;
+ int start_channel = 0, avail_channel;
+ struct fsl_easrc_slot *slot0, *slot1;
+ struct fsl_easrc_slot *slota, *slotb;
+ int i, ret;
+
+ if (req_channels <= 0)
+ return -EINVAL;
+
+ for (i = 0; i < EASRC_CTX_MAX_NUM; i++) {
+ slot0 = &easrc_priv->slot[i][0];
+ slot1 = &easrc_priv->slot[i][1];
+
+ if (slot0->busy && slot1->busy) {
+ continue;
+ } else if ((slot0->busy && slot0->ctx_index == ctx->index) ||
+ (slot1->busy && slot1->ctx_index == ctx->index)) {
+ continue;
+ } else if (!slot0->busy) {
+ slota = slot0;
+ slotb = slot1;
+ slota->slot_index = 0;
+ } else if (!slot1->busy) {
+ slota = slot1;
+ slotb = slot0;
+ slota->slot_index = 1;
+ }
+
+ if (!slota || !slotb)
+ continue;
+
+ avail_channel = fsl_easrc_max_ch_for_slot(ctx, slotb);
+ if (avail_channel <= 0)
+ continue;
+
+ ret = fsl_easrc_config_one_slot(ctx, slota, i, &req_channels,
+ &start_channel, &avail_channel);
+ if (ret)
+ return ret;
+
+ if (req_channels > 0)
+ continue;
+ else
+ break;
+ }
+
+ if (req_channels > 0) {
+ dev_err(&easrc->pdev->dev, "no avail slot.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * fsl_easrc_release_slot
+ *
+ * Clear the slot configuration
+ */
+static int fsl_easrc_release_slot(struct fsl_asrc *easrc, unsigned int ctx_id)
+{
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ struct fsl_asrc_pair *ctx = easrc->pair[ctx_id];
+ int i;
+
+ for (i = 0; i < EASRC_CTX_MAX_NUM; i++) {
+ if (easrc_priv->slot[i][0].busy &&
+ easrc_priv->slot[i][0].ctx_index == ctx->index) {
+ easrc_priv->slot[i][0].busy = false;
+ easrc_priv->slot[i][0].num_channel = 0;
+ easrc_priv->slot[i][0].pf_mem_used = 0;
+ /* set registers */
+ regmap_write(easrc->regmap, REG_EASRC_DPCS0R0(i), 0);
+ regmap_write(easrc->regmap, REG_EASRC_DPCS0R1(i), 0);
+ regmap_write(easrc->regmap, REG_EASRC_DPCS0R2(i), 0);
+ regmap_write(easrc->regmap, REG_EASRC_DPCS0R3(i), 0);
+ }
+
+ if (easrc_priv->slot[i][1].busy &&
+ easrc_priv->slot[i][1].ctx_index == ctx->index) {
+ easrc_priv->slot[i][1].busy = false;
+ easrc_priv->slot[i][1].num_channel = 0;
+ easrc_priv->slot[i][1].pf_mem_used = 0;
+ /* set registers */
+ regmap_write(easrc->regmap, REG_EASRC_DPCS1R0(i), 0);
+ regmap_write(easrc->regmap, REG_EASRC_DPCS1R1(i), 0);
+ regmap_write(easrc->regmap, REG_EASRC_DPCS1R2(i), 0);
+ regmap_write(easrc->regmap, REG_EASRC_DPCS1R3(i), 0);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * fsl_easrc_config_context
+ *
+ * Configure the register relate with context.
+ */
+int fsl_easrc_config_context(struct fsl_asrc *easrc, unsigned int ctx_id)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv;
+ struct fsl_asrc_pair *ctx;
+ struct device *dev;
+ unsigned long lock_flags;
+ int ret;
+
+ if (!easrc)
+ return -ENODEV;
+
+ dev = &easrc->pdev->dev;
+
+ if (ctx_id >= EASRC_CTX_MAX_NUM) {
+ dev_err(dev, "Invalid context id[%d]\n", ctx_id);
+ return -EINVAL;
+ }
+
+ ctx = easrc->pair[ctx_id];
+
+ ctx_priv = ctx->private;
+
+ fsl_easrc_normalize_rates(ctx);
+
+ ret = fsl_easrc_set_rs_ratio(ctx);
+ if (ret)
+ return ret;
+
+ /* Initialize the context coeficients */
+ ret = fsl_easrc_prefilter_config(easrc, ctx->index);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&easrc->lock, lock_flags);
+ ret = fsl_easrc_config_slot(easrc, ctx->index);
+ spin_unlock_irqrestore(&easrc->lock, lock_flags);
+ if (ret)
+ return ret;
+
+ /*
+ * Both prefilter and resampling filters can use following
+ * initialization modes:
+ * 2 - zero-fil mode
+ * 1 - replication mode
+ * 0 - software control
+ */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CCE1(ctx_id),
+ EASRC_CCE1_RS_INIT_MASK,
+ EASRC_CCE1_RS_INIT(ctx_priv->rs_init_mode));
+
+ regmap_update_bits(easrc->regmap, REG_EASRC_CCE1(ctx_id),
+ EASRC_CCE1_PF_INIT_MASK,
+ EASRC_CCE1_PF_INIT(ctx_priv->pf_init_mode));
+
+ /*
+ * Context Input FIFO Watermark
+ * DMA request is generated when input FIFO < FIFO_WTMK
+ */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx_id),
+ EASRC_CC_FIFO_WTMK_MASK,
+ EASRC_CC_FIFO_WTMK(ctx_priv->in_params.fifo_wtmk));
+
+ /*
+ * Context Output FIFO Watermark
+ * DMA request is generated when output FIFO > FIFO_WTMK
+ * So we set fifo_wtmk -1 to register.
+ */
+ regmap_update_bits(easrc->regmap, REG_EASRC_COC(ctx_id),
+ EASRC_COC_FIFO_WTMK_MASK,
+ EASRC_COC_FIFO_WTMK(ctx_priv->out_params.fifo_wtmk - 1));
+
+ /* Number of channels */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx_id),
+ EASRC_CC_CHEN_MASK,
+ EASRC_CC_CHEN(ctx->channels - 1));
+ return 0;
+}
+
+static int fsl_easrc_process_format(struct fsl_asrc_pair *ctx,
+ struct fsl_easrc_data_fmt *fmt,
+ snd_pcm_format_t raw_fmt)
+{
+ struct fsl_asrc *easrc = ctx->asrc;
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ int ret;
+
+ if (!fmt)
+ return -EINVAL;
+
+ /*
+ * Context Input Floating Point Format
+ * 0 - Integer Format
+ * 1 - Single Precision FP Format
+ */
+ fmt->floating_point = !snd_pcm_format_linear(raw_fmt);
+ fmt->sample_pos = 0;
+ fmt->iec958 = 0;
+
+ /* Get the data width */
+ switch (snd_pcm_format_width(raw_fmt)) {
+ case 16:
+ fmt->width = EASRC_WIDTH_16_BIT;
+ fmt->addexp = 15;
+ break;
+ case 20:
+ fmt->width = EASRC_WIDTH_20_BIT;
+ fmt->addexp = 19;
+ break;
+ case 24:
+ fmt->width = EASRC_WIDTH_24_BIT;
+ fmt->addexp = 23;
+ break;
+ case 32:
+ fmt->width = EASRC_WIDTH_32_BIT;
+ fmt->addexp = 31;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (raw_fmt) {
+ case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
+ fmt->width = easrc_priv->bps_iec958[ctx->index];
+ fmt->iec958 = 1;
+ fmt->floating_point = 0;
+ if (fmt->width == EASRC_WIDTH_16_BIT) {
+ fmt->sample_pos = 12;
+ fmt->addexp = 15;
+ } else if (fmt->width == EASRC_WIDTH_20_BIT) {
+ fmt->sample_pos = 8;
+ fmt->addexp = 19;
+ } else if (fmt->width == EASRC_WIDTH_24_BIT) {
+ fmt->sample_pos = 4;
+ fmt->addexp = 23;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Data Endianness
+ * 0 - Little-Endian
+ * 1 - Big-Endian
+ */
+ ret = snd_pcm_format_big_endian(raw_fmt);
+ if (ret < 0)
+ return ret;
+
+ fmt->endianness = ret;
+
+ /*
+ * Input Data sign
+ * 0b - Signed Format
+ * 1b - Unsigned Format
+ */
+ fmt->unsign = snd_pcm_format_unsigned(raw_fmt) > 0 ? 1 : 0;
+
+ return 0;
+}
+
+int fsl_easrc_set_ctx_format(struct fsl_asrc_pair *ctx,
+ snd_pcm_format_t *in_raw_format,
+ snd_pcm_format_t *out_raw_format)
+{
+ struct fsl_asrc *easrc = ctx->asrc;
+ struct fsl_easrc_ctx_priv *ctx_priv = ctx->private;
+ struct fsl_easrc_data_fmt *in_fmt = &ctx_priv->in_params.fmt;
+ struct fsl_easrc_data_fmt *out_fmt = &ctx_priv->out_params.fmt;
+ int ret;
+
+ /* Get the bitfield values for input data format */
+ if (in_raw_format && out_raw_format) {
+ ret = fsl_easrc_process_format(ctx, in_fmt, *in_raw_format);
+ if (ret)
+ return ret;
+ }
+
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx->index),
+ EASRC_CC_BPS_MASK,
+ EASRC_CC_BPS(in_fmt->width));
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx->index),
+ EASRC_CC_ENDIANNESS_MASK,
+ in_fmt->endianness << EASRC_CC_ENDIANNESS_SHIFT);
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx->index),
+ EASRC_CC_FMT_MASK,
+ in_fmt->floating_point << EASRC_CC_FMT_SHIFT);
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx->index),
+ EASRC_CC_INSIGN_MASK,
+ in_fmt->unsign << EASRC_CC_INSIGN_SHIFT);
+
+ /* In Sample Position */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx->index),
+ EASRC_CC_SAMPLE_POS_MASK,
+ EASRC_CC_SAMPLE_POS(in_fmt->sample_pos));
+
+ /* Get the bitfield values for input data format */
+ if (in_raw_format && out_raw_format) {
+ ret = fsl_easrc_process_format(ctx, out_fmt, *out_raw_format);
+ if (ret)
+ return ret;
+ }
+
+ regmap_update_bits(easrc->regmap, REG_EASRC_COC(ctx->index),
+ EASRC_COC_BPS_MASK,
+ EASRC_COC_BPS(out_fmt->width));
+ regmap_update_bits(easrc->regmap, REG_EASRC_COC(ctx->index),
+ EASRC_COC_ENDIANNESS_MASK,
+ out_fmt->endianness << EASRC_COC_ENDIANNESS_SHIFT);
+ regmap_update_bits(easrc->regmap, REG_EASRC_COC(ctx->index),
+ EASRC_COC_FMT_MASK,
+ out_fmt->floating_point << EASRC_COC_FMT_SHIFT);
+ regmap_update_bits(easrc->regmap, REG_EASRC_COC(ctx->index),
+ EASRC_COC_OUTSIGN_MASK,
+ out_fmt->unsign << EASRC_COC_OUTSIGN_SHIFT);
+
+ /* Out Sample Position */
+ regmap_update_bits(easrc->regmap, REG_EASRC_COC(ctx->index),
+ EASRC_COC_SAMPLE_POS_MASK,
+ EASRC_COC_SAMPLE_POS(out_fmt->sample_pos));
+
+ regmap_update_bits(easrc->regmap, REG_EASRC_COC(ctx->index),
+ EASRC_COC_IEC_EN_MASK,
+ out_fmt->iec958 << EASRC_COC_IEC_EN_SHIFT);
+
+ return ret;
+}
+
+/*
+ * The ASRC provides interleaving support in hardware to ensure that a
+ * variety of sample sources can be internally combined
+ * to conform with this format. Interleaving parameters are accessed
+ * through the ASRC_CTRL_IN_ACCESSa and ASRC_CTRL_OUT_ACCESSa registers
+ */
+int fsl_easrc_set_ctx_organziation(struct fsl_asrc_pair *ctx)
+{
+ struct fsl_easrc_ctx_priv *ctx_priv;
+ struct device *dev;
+ struct fsl_asrc *easrc;
+
+ if (!ctx)
+ return -ENODEV;
+
+ easrc = ctx->asrc;
+ ctx_priv = ctx->private;
+ dev = &easrc->pdev->dev;
+
+ /* input interleaving parameters */
+ regmap_update_bits(easrc->regmap, REG_EASRC_CIA(ctx->index),
+ EASRC_CIA_ITER_MASK,
+ EASRC_CIA_ITER(ctx_priv->in_params.iterations));
+ regmap_update_bits(easrc->regmap, REG_EASRC_CIA(ctx->index),
+ EASRC_CIA_GRLEN_MASK,
+ EASRC_CIA_GRLEN(ctx_priv->in_params.group_len));
+ regmap_update_bits(easrc->regmap, REG_EASRC_CIA(ctx->index),
+ EASRC_CIA_ACCLEN_MASK,
+ EASRC_CIA_ACCLEN(ctx_priv->in_params.access_len));
+
+ /* output interleaving parameters */
+ regmap_update_bits(easrc->regmap, REG_EASRC_COA(ctx->index),
+ EASRC_COA_ITER_MASK,
+ EASRC_COA_ITER(ctx_priv->out_params.iterations));
+ regmap_update_bits(easrc->regmap, REG_EASRC_COA(ctx->index),
+ EASRC_COA_GRLEN_MASK,
+ EASRC_COA_GRLEN(ctx_priv->out_params.group_len));
+ regmap_update_bits(easrc->regmap, REG_EASRC_COA(ctx->index),
+ EASRC_COA_ACCLEN_MASK,
+ EASRC_COA_ACCLEN(ctx_priv->out_params.access_len));
+
+ return 0;
+}
+
+/*
+ * Request one of the available contexts
+ *
+ * Returns a negative number on error and >=0 as context id
+ * on success
+ */
+int fsl_easrc_request_context(int channels, struct fsl_asrc_pair *ctx)
+{
+ enum asrc_pair_index index = ASRC_INVALID_PAIR;
+ struct fsl_asrc *easrc = ctx->asrc;
+ struct device *dev;
+ unsigned long lock_flags;
+ int ret = 0;
+ int i;
+
+ dev = &easrc->pdev->dev;
+
+ spin_lock_irqsave(&easrc->lock, lock_flags);
+
+ for (i = ASRC_PAIR_A; i < EASRC_CTX_MAX_NUM; i++) {
+ if (easrc->pair[i])
+ continue;
+
+ index = i;
+ break;
+ }
+
+ if (index == ASRC_INVALID_PAIR) {
+ dev_err(dev, "all contexts are busy\n");
+ ret = -EBUSY;
+ } else if (channels > easrc->channel_avail) {
+ dev_err(dev, "can't give the required channels: %d\n",
+ channels);
+ ret = -EINVAL;
+ } else {
+ ctx->index = index;
+ ctx->channels = channels;
+ easrc->pair[index] = ctx;
+ easrc->channel_avail -= channels;
+ }
+
+ spin_unlock_irqrestore(&easrc->lock, lock_flags);
+
+ return ret;
+}
+
+/*
+ * Release the context
+ *
+ * This funciton is mainly doing the revert thing in request context
+ */
+void fsl_easrc_release_context(struct fsl_asrc_pair *ctx)
+{
+ unsigned long lock_flags;
+ struct fsl_asrc *easrc;
+ struct device *dev;
+
+ if (!ctx)
+ return;
+
+ easrc = ctx->asrc;
+ dev = &easrc->pdev->dev;
+
+ spin_lock_irqsave(&easrc->lock, lock_flags);
+
+ fsl_easrc_release_slot(easrc, ctx->index);
+
+ easrc->channel_avail += ctx->channels;
+ easrc->pair[ctx->index] = NULL;
+
+ spin_unlock_irqrestore(&easrc->lock, lock_flags);
+}
+
+/*
+ * Start the context
+ *
+ * Enable the DMA request and context
+ */
+int fsl_easrc_start_context(struct fsl_asrc_pair *ctx)
+{
+ struct fsl_asrc *easrc = ctx->asrc;
+
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx->index),
+ EASRC_CC_FWMDE_MASK, EASRC_CC_FWMDE);
+ regmap_update_bits(easrc->regmap, REG_EASRC_COC(ctx->index),
+ EASRC_COC_FWMDE_MASK, EASRC_COC_FWMDE);
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx->index),
+ EASRC_CC_EN_MASK, EASRC_CC_EN);
+ return 0;
+}
+
+/*
+ * Stop the context
+ *
+ * Disable the DMA request and context
+ */
+int fsl_easrc_stop_context(struct fsl_asrc_pair *ctx)
+{
+ struct fsl_asrc *easrc = ctx->asrc;
+ int val, i;
+ int size = 0;
+ int retry = 200;
+
+ regmap_read(easrc->regmap, REG_EASRC_CC(ctx->index), &val);
+
+ if (val & EASRC_CC_EN_MASK) {
+ regmap_update_bits(easrc->regmap,
+ REG_EASRC_CC(ctx->index),
+ EASRC_CC_STOP_MASK, EASRC_CC_STOP);
+ do {
+ regmap_read(easrc->regmap, REG_EASRC_SFS(ctx->index), &val);
+ val &= EASRC_SFS_NSGO_MASK;
+ size = val >> EASRC_SFS_NSGO_SHIFT;
+
+ /* Read FIFO, drop the data */
+ for (i = 0; i < size * ctx->channels; i++)
+ regmap_read(easrc->regmap, REG_EASRC_RDFIFO(ctx->index), &val);
+ /* Check RUN_STOP_DONE */
+ regmap_read(easrc->regmap, REG_EASRC_IRQF, &val);
+ if (val & EASRC_IRQF_RSD(1 << ctx->index)) {
+ /*Clear RUN_STOP_DONE*/
+ regmap_write_bits(easrc->regmap,
+ REG_EASRC_IRQF,
+ EASRC_IRQF_RSD(1 << ctx->index),
+ EASRC_IRQF_RSD(1 << ctx->index));
+ break;
+ }
+ udelay(100);
+ } while (--retry);
+
+ if (retry == 0)
+ dev_warn(&easrc->pdev->dev, "RUN STOP fail\n");
+ }
+
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx->index),
+ EASRC_CC_EN_MASK | EASRC_CC_STOP_MASK, 0);
+ regmap_update_bits(easrc->regmap, REG_EASRC_CC(ctx->index),
+ EASRC_CC_FWMDE_MASK, 0);
+ regmap_update_bits(easrc->regmap, REG_EASRC_COC(ctx->index),
+ EASRC_COC_FWMDE_MASK, 0);
+ return 0;
+}
+
+struct dma_chan *fsl_easrc_get_dma_channel(struct fsl_asrc_pair *ctx,
+ bool dir)
+{
+ struct fsl_asrc *easrc = ctx->asrc;
+ enum asrc_pair_index index = ctx->index;
+ char name[8];
+
+ /* Example of dma name: ctx0_rx */
+ sprintf(name, "ctx%c_%cx", index + '0', dir == IN ? 'r' : 't');
+
+ return dma_request_slave_channel(&easrc->pdev->dev, name);
+};
+EXPORT_SYMBOL_GPL(fsl_easrc_get_dma_channel);
+
+static const unsigned int easrc_rates[] = {
+ 8000, 11025, 12000, 16000,
+ 22050, 24000, 32000, 44100,
+ 48000, 64000, 88200, 96000,
+ 128000, 176400, 192000, 256000,
+ 352800, 384000, 705600, 768000,
+};
+
+static const struct snd_pcm_hw_constraint_list easrc_rate_constraints = {
+ .count = ARRAY_SIZE(easrc_rates),
+ .list = easrc_rates,
+};
+
+static int fsl_easrc_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &easrc_rate_constraints);
+}
+
+static int fsl_easrc_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct fsl_asrc_pair *ctx = runtime->private_data;
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = fsl_easrc_start_context(ctx);
+ if (ret)
+ return ret;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = fsl_easrc_stop_context(ctx);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fsl_easrc_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct fsl_asrc *easrc = snd_soc_dai_get_drvdata(dai);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct device *dev = &easrc->pdev->dev;
+ struct fsl_asrc_pair *ctx = runtime->private_data;
+ struct fsl_easrc_ctx_priv *ctx_priv = ctx->private;
+ unsigned int channels = params_channels(params);
+ unsigned int rate = params_rate(params);
+ snd_pcm_format_t format = params_format(params);
+ int ret;
+
+ ret = fsl_easrc_request_context(channels, ctx);
+ if (ret) {
+ dev_err(dev, "failed to request context\n");
+ return ret;
+ }
+
+ ctx_priv->ctx_streams |= BIT(substream->stream);
+
+ /*
+ * Set the input and output ratio so we can compute
+ * the resampling ratio in RS_LOW/HIGH
+ */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ctx_priv->in_params.sample_rate = rate;
+ ctx_priv->in_params.sample_format = format;
+ ctx_priv->out_params.sample_rate = easrc->asrc_rate;
+ ctx_priv->out_params.sample_format = easrc->asrc_format;
+ } else {
+ ctx_priv->out_params.sample_rate = rate;
+ ctx_priv->out_params.sample_format = format;
+ ctx_priv->in_params.sample_rate = easrc->asrc_rate;
+ ctx_priv->in_params.sample_format = easrc->asrc_format;
+ }
+
+ ctx->channels = channels;
+ ctx_priv->in_params.fifo_wtmk = 0x20;
+ ctx_priv->out_params.fifo_wtmk = 0x20;
+
+ /*
+ * Do only rate conversion and keep the same format for input
+ * and output data
+ */
+ ret = fsl_easrc_set_ctx_format(ctx,
+ &ctx_priv->in_params.sample_format,
+ &ctx_priv->out_params.sample_format);
+ if (ret) {
+ dev_err(dev, "failed to set format %d", ret);
+ return ret;
+ }
+
+ ret = fsl_easrc_config_context(easrc, ctx->index);
+ if (ret) {
+ dev_err(dev, "failed to config context\n");
+ return ret;
+ }
+
+ ctx_priv->in_params.iterations = 1;
+ ctx_priv->in_params.group_len = ctx->channels;
+ ctx_priv->in_params.access_len = ctx->channels;
+ ctx_priv->out_params.iterations = 1;
+ ctx_priv->out_params.group_len = ctx->channels;
+ ctx_priv->out_params.access_len = ctx->channels;
+
+ ret = fsl_easrc_set_ctx_organziation(ctx);
+ if (ret) {
+ dev_err(dev, "failed to set fifo organization\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_easrc_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct fsl_asrc_pair *ctx = runtime->private_data;
+ struct fsl_easrc_ctx_priv *ctx_priv;
+
+ if (!ctx)
+ return -EINVAL;
+
+ ctx_priv = ctx->private;
+
+ if (ctx_priv->ctx_streams & BIT(substream->stream)) {
+ ctx_priv->ctx_streams &= ~BIT(substream->stream);
+ fsl_easrc_release_context(ctx);
+ }
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops fsl_easrc_dai_ops = {
+ .startup = fsl_easrc_startup,
+ .trigger = fsl_easrc_trigger,
+ .hw_params = fsl_easrc_hw_params,
+ .hw_free = fsl_easrc_hw_free,
+};
+
+static int fsl_easrc_dai_probe(struct snd_soc_dai *cpu_dai)
+{
+ struct fsl_asrc *easrc = dev_get_drvdata(cpu_dai->dev);
+
+ snd_soc_dai_init_dma_data(cpu_dai,
+ &easrc->dma_params_tx,
+ &easrc->dma_params_rx);
+ return 0;
+}
+
+static struct snd_soc_dai_driver fsl_easrc_dai = {
+ .probe = fsl_easrc_dai_probe,
+ .playback = {
+ .stream_name = "ASRC-Playback",
+ .channels_min = 1,
+ .channels_max = 32,
+ .rate_min = 8000,
+ .rate_max = 768000,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .formats = FSL_EASRC_FORMATS,
+ },
+ .capture = {
+ .stream_name = "ASRC-Capture",
+ .channels_min = 1,
+ .channels_max = 32,
+ .rate_min = 8000,
+ .rate_max = 768000,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .formats = FSL_EASRC_FORMATS |
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE,
+ },
+ .ops = &fsl_easrc_dai_ops,
+};
+
+static const struct snd_soc_component_driver fsl_easrc_component = {
+ .name = "fsl-easrc-dai",
+ .controls = fsl_easrc_snd_controls,
+ .num_controls = ARRAY_SIZE(fsl_easrc_snd_controls),
+};
+
+static const struct reg_default fsl_easrc_reg_defaults[] = {
+ {REG_EASRC_WRFIFO(0), 0x00000000},
+ {REG_EASRC_WRFIFO(1), 0x00000000},
+ {REG_EASRC_WRFIFO(2), 0x00000000},
+ {REG_EASRC_WRFIFO(3), 0x00000000},
+ {REG_EASRC_RDFIFO(0), 0x00000000},
+ {REG_EASRC_RDFIFO(1), 0x00000000},
+ {REG_EASRC_RDFIFO(2), 0x00000000},
+ {REG_EASRC_RDFIFO(3), 0x00000000},
+ {REG_EASRC_CC(0), 0x00000000},
+ {REG_EASRC_CC(1), 0x00000000},
+ {REG_EASRC_CC(2), 0x00000000},
+ {REG_EASRC_CC(3), 0x00000000},
+ {REG_EASRC_CCE1(0), 0x00000000},
+ {REG_EASRC_CCE1(1), 0x00000000},
+ {REG_EASRC_CCE1(2), 0x00000000},
+ {REG_EASRC_CCE1(3), 0x00000000},
+ {REG_EASRC_CCE2(0), 0x00000000},
+ {REG_EASRC_CCE2(1), 0x00000000},
+ {REG_EASRC_CCE2(2), 0x00000000},
+ {REG_EASRC_CCE2(3), 0x00000000},
+ {REG_EASRC_CIA(0), 0x00000000},
+ {REG_EASRC_CIA(1), 0x00000000},
+ {REG_EASRC_CIA(2), 0x00000000},
+ {REG_EASRC_CIA(3), 0x00000000},
+ {REG_EASRC_DPCS0R0(0), 0x00000000},
+ {REG_EASRC_DPCS0R0(1), 0x00000000},
+ {REG_EASRC_DPCS0R0(2), 0x00000000},
+ {REG_EASRC_DPCS0R0(3), 0x00000000},
+ {REG_EASRC_DPCS0R1(0), 0x00000000},
+ {REG_EASRC_DPCS0R1(1), 0x00000000},
+ {REG_EASRC_DPCS0R1(2), 0x00000000},
+ {REG_EASRC_DPCS0R1(3), 0x00000000},
+ {REG_EASRC_DPCS0R2(0), 0x00000000},
+ {REG_EASRC_DPCS0R2(1), 0x00000000},
+ {REG_EASRC_DPCS0R2(2), 0x00000000},
+ {REG_EASRC_DPCS0R2(3), 0x00000000},
+ {REG_EASRC_DPCS0R3(0), 0x00000000},
+ {REG_EASRC_DPCS0R3(1), 0x00000000},
+ {REG_EASRC_DPCS0R3(2), 0x00000000},
+ {REG_EASRC_DPCS0R3(3), 0x00000000},
+ {REG_EASRC_DPCS1R0(0), 0x00000000},
+ {REG_EASRC_DPCS1R0(1), 0x00000000},
+ {REG_EASRC_DPCS1R0(2), 0x00000000},
+ {REG_EASRC_DPCS1R0(3), 0x00000000},
+ {REG_EASRC_DPCS1R1(0), 0x00000000},
+ {REG_EASRC_DPCS1R1(1), 0x00000000},
+ {REG_EASRC_DPCS1R1(2), 0x00000000},
+ {REG_EASRC_DPCS1R1(3), 0x00000000},
+ {REG_EASRC_DPCS1R2(0), 0x00000000},
+ {REG_EASRC_DPCS1R2(1), 0x00000000},
+ {REG_EASRC_DPCS1R2(2), 0x00000000},
+ {REG_EASRC_DPCS1R2(3), 0x00000000},
+ {REG_EASRC_DPCS1R3(0), 0x00000000},
+ {REG_EASRC_DPCS1R3(1), 0x00000000},
+ {REG_EASRC_DPCS1R3(2), 0x00000000},
+ {REG_EASRC_DPCS1R3(3), 0x00000000},
+ {REG_EASRC_COC(0), 0x00000000},
+ {REG_EASRC_COC(1), 0x00000000},
+ {REG_EASRC_COC(2), 0x00000000},
+ {REG_EASRC_COC(3), 0x00000000},
+ {REG_EASRC_COA(0), 0x00000000},
+ {REG_EASRC_COA(1), 0x00000000},
+ {REG_EASRC_COA(2), 0x00000000},
+ {REG_EASRC_COA(3), 0x00000000},
+ {REG_EASRC_SFS(0), 0x00000000},
+ {REG_EASRC_SFS(1), 0x00000000},
+ {REG_EASRC_SFS(2), 0x00000000},
+ {REG_EASRC_SFS(3), 0x00000000},
+ {REG_EASRC_RRL(0), 0x00000000},
+ {REG_EASRC_RRL(1), 0x00000000},
+ {REG_EASRC_RRL(2), 0x00000000},
+ {REG_EASRC_RRL(3), 0x00000000},
+ {REG_EASRC_RRH(0), 0x00000000},
+ {REG_EASRC_RRH(1), 0x00000000},
+ {REG_EASRC_RRH(2), 0x00000000},
+ {REG_EASRC_RRH(3), 0x00000000},
+ {REG_EASRC_RUC(0), 0x00000000},
+ {REG_EASRC_RUC(1), 0x00000000},
+ {REG_EASRC_RUC(2), 0x00000000},
+ {REG_EASRC_RUC(3), 0x00000000},
+ {REG_EASRC_RUR(0), 0x7FFFFFFF},
+ {REG_EASRC_RUR(1), 0x7FFFFFFF},
+ {REG_EASRC_RUR(2), 0x7FFFFFFF},
+ {REG_EASRC_RUR(3), 0x7FFFFFFF},
+ {REG_EASRC_RCTCL, 0x00000000},
+ {REG_EASRC_RCTCH, 0x00000000},
+ {REG_EASRC_PCF(0), 0x00000000},
+ {REG_EASRC_PCF(1), 0x00000000},
+ {REG_EASRC_PCF(2), 0x00000000},
+ {REG_EASRC_PCF(3), 0x00000000},
+ {REG_EASRC_CRCM, 0x00000000},
+ {REG_EASRC_CRCC, 0x00000000},
+ {REG_EASRC_IRQC, 0x00000FFF},
+ {REG_EASRC_IRQF, 0x00000000},
+ {REG_EASRC_CS0(0), 0x00000000},
+ {REG_EASRC_CS0(1), 0x00000000},
+ {REG_EASRC_CS0(2), 0x00000000},
+ {REG_EASRC_CS0(3), 0x00000000},
+ {REG_EASRC_CS1(0), 0x00000000},
+ {REG_EASRC_CS1(1), 0x00000000},
+ {REG_EASRC_CS1(2), 0x00000000},
+ {REG_EASRC_CS1(3), 0x00000000},
+ {REG_EASRC_CS2(0), 0x00000000},
+ {REG_EASRC_CS2(1), 0x00000000},
+ {REG_EASRC_CS2(2), 0x00000000},
+ {REG_EASRC_CS2(3), 0x00000000},
+ {REG_EASRC_CS3(0), 0x00000000},
+ {REG_EASRC_CS3(1), 0x00000000},
+ {REG_EASRC_CS3(2), 0x00000000},
+ {REG_EASRC_CS3(3), 0x00000000},
+ {REG_EASRC_CS4(0), 0x00000000},
+ {REG_EASRC_CS4(1), 0x00000000},
+ {REG_EASRC_CS4(2), 0x00000000},
+ {REG_EASRC_CS4(3), 0x00000000},
+ {REG_EASRC_CS5(0), 0x00000000},
+ {REG_EASRC_CS5(1), 0x00000000},
+ {REG_EASRC_CS5(2), 0x00000000},
+ {REG_EASRC_CS5(3), 0x00000000},
+ {REG_EASRC_DBGC, 0x00000000},
+ {REG_EASRC_DBGS, 0x00000000},
+};
+
+static const struct regmap_range fsl_easrc_readable_ranges[] = {
+ regmap_reg_range(REG_EASRC_RDFIFO(0), REG_EASRC_RCTCH),
+ regmap_reg_range(REG_EASRC_PCF(0), REG_EASRC_PCF(3)),
+ regmap_reg_range(REG_EASRC_CRCC, REG_EASRC_DBGS),
+};
+
+static const struct regmap_access_table fsl_easrc_readable_table = {
+ .yes_ranges = fsl_easrc_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(fsl_easrc_readable_ranges),
+};
+
+static const struct regmap_range fsl_easrc_writeable_ranges[] = {
+ regmap_reg_range(REG_EASRC_WRFIFO(0), REG_EASRC_WRFIFO(3)),
+ regmap_reg_range(REG_EASRC_CC(0), REG_EASRC_COA(3)),
+ regmap_reg_range(REG_EASRC_RRL(0), REG_EASRC_RCTCH),
+ regmap_reg_range(REG_EASRC_PCF(0), REG_EASRC_DBGC),
+};
+
+static const struct regmap_access_table fsl_easrc_writeable_table = {
+ .yes_ranges = fsl_easrc_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(fsl_easrc_writeable_ranges),
+};
+
+static const struct regmap_range fsl_easrc_volatileable_ranges[] = {
+ regmap_reg_range(REG_EASRC_RDFIFO(0), REG_EASRC_RDFIFO(3)),
+ regmap_reg_range(REG_EASRC_SFS(0), REG_EASRC_SFS(3)),
+ regmap_reg_range(REG_EASRC_IRQF, REG_EASRC_IRQF),
+ regmap_reg_range(REG_EASRC_DBGS, REG_EASRC_DBGS),
+};
+
+static const struct regmap_access_table fsl_easrc_volatileable_table = {
+ .yes_ranges = fsl_easrc_volatileable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(fsl_easrc_volatileable_ranges),
+};
+
+static const struct regmap_config fsl_easrc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+
+ .max_register = REG_EASRC_DBGS,
+ .reg_defaults = fsl_easrc_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(fsl_easrc_reg_defaults),
+ .rd_table = &fsl_easrc_readable_table,
+ .wr_table = &fsl_easrc_writeable_table,
+ .volatile_table = &fsl_easrc_volatileable_table,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+#ifdef DEBUG
+static void fsl_easrc_dump_firmware(struct fsl_asrc *easrc)
+{
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ struct asrc_firmware_hdr *firm = easrc_priv->firmware_hdr;
+ struct interp_params *interp = easrc_priv->interp;
+ struct prefil_params *prefil = easrc_priv->prefil;
+ struct device *dev = &easrc->pdev->dev;
+ int i;
+
+ if (firm->magic != FIRMWARE_MAGIC) {
+ dev_err(dev, "Wrong magic. Something went wrong!");
+ return;
+ }
+
+ dev_dbg(dev, "Firmware v%u dump:\n", firm->firmware_version);
+ dev_dbg(dev, "Num prefilter scenarios: %u\n", firm->prefil_scen);
+ dev_dbg(dev, "Num interpolation scenarios: %u\n", firm->interp_scen);
+ dev_dbg(dev, "\nInterpolation scenarios:\n");
+
+ for (i = 0; i < firm->interp_scen; i++) {
+ if (interp[i].magic != FIRMWARE_MAGIC) {
+ dev_dbg(dev, "%d. wrong interp magic: %x\n",
+ i, interp[i].magic);
+ continue;
+ }
+ dev_dbg(dev, "%d. taps: %u, phases: %u, center: %llu\n", i,
+ interp[i].num_taps, interp[i].num_phases,
+ interp[i].center_tap);
+ }
+
+ for (i = 0; i < firm->prefil_scen; i++) {
+ if (prefil[i].magic != FIRMWARE_MAGIC) {
+ dev_dbg(dev, "%d. wrong prefil magic: %x\n",
+ i, prefil[i].magic);
+ continue;
+ }
+ dev_dbg(dev, "%d. insr: %u, outsr: %u, st1: %u, st2: %u\n", i,
+ prefil[i].insr, prefil[i].outsr,
+ prefil[i].st1_taps, prefil[i].st2_taps);
+ }
+
+ dev_dbg(dev, "end of firmware dump\n");
+}
+#endif
+
+static int fsl_easrc_get_firmware(struct fsl_asrc *easrc)
+{
+ struct fsl_easrc_priv *easrc_priv;
+ const struct firmware **fw_p;
+ u32 pnum, inum, offset;
+ const u8 *data;
+ int ret;
+
+ if (!easrc)
+ return -EINVAL;
+
+ easrc_priv = easrc->private;
+ fw_p = &easrc_priv->fw;
+
+ ret = request_firmware(fw_p, easrc_priv->fw_name, &easrc->pdev->dev);
+ if (ret)
+ return ret;
+
+ data = easrc_priv->fw->data;
+
+ easrc_priv->firmware_hdr = (struct asrc_firmware_hdr *)data;
+ pnum = easrc_priv->firmware_hdr->prefil_scen;
+ inum = easrc_priv->firmware_hdr->interp_scen;
+
+ if (inum) {
+ offset = sizeof(struct asrc_firmware_hdr);
+ easrc_priv->interp = (struct interp_params *)(data + offset);
+ }
+
+ if (pnum) {
+ offset = sizeof(struct asrc_firmware_hdr) +
+ inum * sizeof(struct interp_params);
+ easrc_priv->prefil = (struct prefil_params *)(data + offset);
+ }
+
+#ifdef DEBUG
+ fsl_easrc_dump_firmware(easrc);
+#endif
+
+ return 0;
+}
+
+static irqreturn_t fsl_easrc_isr(int irq, void *dev_id)
+{
+ struct fsl_asrc *easrc = (struct fsl_asrc *)dev_id;
+ struct device *dev = &easrc->pdev->dev;
+ int val;
+
+ regmap_read(easrc->regmap, REG_EASRC_IRQF, &val);
+
+ if (val & EASRC_IRQF_OER_MASK)
+ dev_dbg(dev, "output FIFO underflow\n");
+
+ if (val & EASRC_IRQF_IFO_MASK)
+ dev_dbg(dev, "input FIFO overflow\n");
+
+ return IRQ_HANDLED;
+}
+
+static int fsl_easrc_get_fifo_addr(u8 dir, enum asrc_pair_index index)
+{
+ return REG_EASRC_FIFO(dir, index);
+}
+
+static const struct of_device_id fsl_easrc_dt_ids[] = {
+ { .compatible = "fsl,imx8mn-easrc",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_easrc_dt_ids);
+
+static int fsl_easrc_probe(struct platform_device *pdev)
+{
+ struct fsl_easrc_priv *easrc_priv;
+ struct device *dev = &pdev->dev;
+ struct fsl_asrc *easrc;
+ struct resource *res;
+ struct device_node *np;
+ void __iomem *regs;
+ int ret, irq;
+
+ easrc = devm_kzalloc(dev, sizeof(*easrc), GFP_KERNEL);
+ if (!easrc)
+ return -ENOMEM;
+
+ easrc_priv = devm_kzalloc(dev, sizeof(*easrc_priv), GFP_KERNEL);
+ if (!easrc_priv)
+ return -ENOMEM;
+
+ easrc->pdev = pdev;
+ easrc->private = easrc_priv;
+ np = dev->of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs)) {
+ dev_err(&pdev->dev, "failed ioremap\n");
+ return PTR_ERR(regs);
+ }
+
+ easrc->paddr = res->start;
+
+ easrc->regmap = devm_regmap_init_mmio_clk(dev, "mem", regs,
+ &fsl_easrc_regmap_config);
+ if (IS_ERR(easrc->regmap)) {
+ dev_err(dev, "failed to init regmap");
+ return PTR_ERR(easrc->regmap);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq for node %pOF\n", np);
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, fsl_easrc_isr, 0,
+ dev_name(dev), easrc);
+ if (ret) {
+ dev_err(dev, "failed to claim irq %u: %d\n", irq, ret);
+ return ret;
+ }
+
+ easrc->mem_clk = devm_clk_get(dev, "mem");
+ if (IS_ERR(easrc->mem_clk)) {
+ dev_err(dev, "failed to get mem clock\n");
+ return PTR_ERR(easrc->mem_clk);
+ }
+
+ /* Set default value */
+ easrc->channel_avail = 32;
+ easrc->get_dma_channel = fsl_easrc_get_dma_channel;
+ easrc->request_pair = fsl_easrc_request_context;
+ easrc->release_pair = fsl_easrc_release_context;
+ easrc->get_fifo_addr = fsl_easrc_get_fifo_addr;
+ easrc->pair_priv_size = sizeof(struct fsl_easrc_ctx_priv);
+
+ easrc_priv->rs_num_taps = EASRC_RS_32_TAPS;
+ easrc_priv->const_coeff = 0x3FF0000000000000;
+
+ ret = of_property_read_u32(np, "fsl,asrc-rate", &easrc->asrc_rate);
+ if (ret) {
+ dev_err(dev, "failed to asrc rate\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "fsl,asrc-format", &easrc->asrc_format);
+ if (ret) {
+ dev_err(dev, "failed to asrc format\n");
+ return ret;
+ }
+
+ if (!(FSL_EASRC_FORMATS & (1ULL << easrc->asrc_format))) {
+ dev_warn(dev, "unsupported format, switching to S24_LE\n");
+ easrc->asrc_format = SNDRV_PCM_FORMAT_S24_LE;
+ }
+
+ ret = of_property_read_string(np, "firmware-name",
+ &easrc_priv->fw_name);
+ if (ret) {
+ dev_err(dev, "failed to get firmware name\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, easrc);
+ pm_runtime_enable(dev);
+
+ spin_lock_init(&easrc->lock);
+
+ regcache_cache_only(easrc->regmap, true);
+
+ ret = devm_snd_soc_register_component(dev, &fsl_easrc_component,
+ &fsl_easrc_dai, 1);
+ if (ret) {
+ dev_err(dev, "failed to register ASoC DAI\n");
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_component(dev, &fsl_asrc_component,
+ NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ASoC platform\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_easrc_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static __maybe_unused int fsl_easrc_runtime_suspend(struct device *dev)
+{
+ struct fsl_asrc *easrc = dev_get_drvdata(dev);
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ unsigned long lock_flags;
+
+ regcache_cache_only(easrc->regmap, true);
+
+ clk_disable_unprepare(easrc->mem_clk);
+
+ spin_lock_irqsave(&easrc->lock, lock_flags);
+ easrc_priv->firmware_loaded = 0;
+ spin_unlock_irqrestore(&easrc->lock, lock_flags);
+
+ return 0;
+}
+
+static __maybe_unused int fsl_easrc_runtime_resume(struct device *dev)
+{
+ struct fsl_asrc *easrc = dev_get_drvdata(dev);
+ struct fsl_easrc_priv *easrc_priv = easrc->private;
+ struct fsl_easrc_ctx_priv *ctx_priv;
+ struct fsl_asrc_pair *ctx;
+ unsigned long lock_flags;
+ int ret;
+ int i;
+
+ ret = clk_prepare_enable(easrc->mem_clk);
+ if (ret)
+ return ret;
+
+ regcache_cache_only(easrc->regmap, false);
+ regcache_mark_dirty(easrc->regmap);
+ regcache_sync(easrc->regmap);
+
+ spin_lock_irqsave(&easrc->lock, lock_flags);
+ if (easrc_priv->firmware_loaded) {
+ spin_unlock_irqrestore(&easrc->lock, lock_flags);
+ goto skip_load;
+ }
+ easrc_priv->firmware_loaded = 1;
+ spin_unlock_irqrestore(&easrc->lock, lock_flags);
+
+ ret = fsl_easrc_get_firmware(easrc);
+ if (ret) {
+ dev_err(dev, "failed to get firmware\n");
+ goto disable_mem_clk;
+ }
+
+ /*
+ * Write Resampling Coefficients
+ * The coefficient RAM must be configured prior to beginning of
+ * any context processing within the ASRC
+ */
+ ret = fsl_easrc_resampler_config(easrc);
+ if (ret) {
+ dev_err(dev, "resampler config failed\n");
+ goto disable_mem_clk;
+ }
+
+ for (i = ASRC_PAIR_A; i < EASRC_CTX_MAX_NUM; i++) {
+ ctx = easrc->pair[i];
+ if (!ctx)
+ continue;
+
+ ctx_priv = ctx->private;
+ fsl_easrc_set_rs_ratio(ctx);
+ ctx_priv->out_missed_sample = ctx_priv->in_filled_sample *
+ ctx_priv->out_params.sample_rate /
+ ctx_priv->in_params.sample_rate;
+ if (ctx_priv->in_filled_sample * ctx_priv->out_params.sample_rate
+ % ctx_priv->in_params.sample_rate != 0)
+ ctx_priv->out_missed_sample += 1;
+
+ ret = fsl_easrc_write_pf_coeff_mem(easrc, i,
+ ctx_priv->st1_coeff,
+ ctx_priv->st1_num_taps,
+ ctx_priv->st1_addexp);
+ if (ret)
+ goto disable_mem_clk;
+
+ ret = fsl_easrc_write_pf_coeff_mem(easrc, i,
+ ctx_priv->st2_coeff,
+ ctx_priv->st2_num_taps,
+ ctx_priv->st2_addexp);
+ if (ret)
+ goto disable_mem_clk;
+ }
+
+skip_load:
+ return 0;
+
+disable_mem_clk:
+ clk_disable_unprepare(easrc->mem_clk);
+ return ret;
+}
+
+static const struct dev_pm_ops fsl_easrc_pm_ops = {
+ SET_RUNTIME_PM_OPS(fsl_easrc_runtime_suspend,
+ fsl_easrc_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver fsl_easrc_driver = {
+ .probe = fsl_easrc_probe,
+ .remove = fsl_easrc_remove,
+ .driver = {
+ .name = "fsl-easrc",
+ .pm = &fsl_easrc_pm_ops,
+ .of_match_table = fsl_easrc_dt_ids,
+ },
+};
+module_platform_driver(fsl_easrc_driver);
+
+MODULE_DESCRIPTION("NXP Enhanced Asynchronous Sample Rate (eASRC) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/fsl/fsl_easrc.h b/sound/soc/fsl/fsl_easrc.h
new file mode 100644
index 000000000000..30620d56252c
--- /dev/null
+++ b/sound/soc/fsl/fsl_easrc.h
@@ -0,0 +1,651 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 NXP
+ */
+
+#ifndef _FSL_EASRC_H
+#define _FSL_EASRC_H
+
+#include <sound/asound.h>
+#include <linux/platform_data/dma-imx.h>
+
+#include "fsl_asrc_common.h"
+
+/* EASRC Register Map */
+
+/* ASRC Input Write FIFO */
+#define REG_EASRC_WRFIFO(ctx) (0x000 + 4 * (ctx))
+/* ASRC Output Read FIFO */
+#define REG_EASRC_RDFIFO(ctx) (0x010 + 4 * (ctx))
+/* ASRC Context Control */
+#define REG_EASRC_CC(ctx) (0x020 + 4 * (ctx))
+/* ASRC Context Control Extended 1 */
+#define REG_EASRC_CCE1(ctx) (0x030 + 4 * (ctx))
+/* ASRC Context Control Extended 2 */
+#define REG_EASRC_CCE2(ctx) (0x040 + 4 * (ctx))
+/* ASRC Control Input Access */
+#define REG_EASRC_CIA(ctx) (0x050 + 4 * (ctx))
+/* ASRC Datapath Processor Control Slot0 */
+#define REG_EASRC_DPCS0R0(ctx) (0x060 + 4 * (ctx))
+#define REG_EASRC_DPCS0R1(ctx) (0x070 + 4 * (ctx))
+#define REG_EASRC_DPCS0R2(ctx) (0x080 + 4 * (ctx))
+#define REG_EASRC_DPCS0R3(ctx) (0x090 + 4 * (ctx))
+/* ASRC Datapath Processor Control Slot1 */
+#define REG_EASRC_DPCS1R0(ctx) (0x0A0 + 4 * (ctx))
+#define REG_EASRC_DPCS1R1(ctx) (0x0B0 + 4 * (ctx))
+#define REG_EASRC_DPCS1R2(ctx) (0x0C0 + 4 * (ctx))
+#define REG_EASRC_DPCS1R3(ctx) (0x0D0 + 4 * (ctx))
+/* ASRC Context Output Control */
+#define REG_EASRC_COC(ctx) (0x0E0 + 4 * (ctx))
+/* ASRC Control Output Access */
+#define REG_EASRC_COA(ctx) (0x0F0 + 4 * (ctx))
+/* ASRC Sample FIFO Status */
+#define REG_EASRC_SFS(ctx) (0x100 + 4 * (ctx))
+/* ASRC Resampling Ratio Low */
+#define REG_EASRC_RRL(ctx) (0x110 + 8 * (ctx))
+/* ASRC Resampling Ratio High */
+#define REG_EASRC_RRH(ctx) (0x114 + 8 * (ctx))
+/* ASRC Resampling Ratio Update Control */
+#define REG_EASRC_RUC(ctx) (0x130 + 4 * (ctx))
+/* ASRC Resampling Ratio Update Rate */
+#define REG_EASRC_RUR(ctx) (0x140 + 4 * (ctx))
+/* ASRC Resampling Center Tap Coefficient Low */
+#define REG_EASRC_RCTCL (0x150)
+/* ASRC Resampling Center Tap Coefficient High */
+#define REG_EASRC_RCTCH (0x154)
+/* ASRC Prefilter Coefficient FIFO */
+#define REG_EASRC_PCF(ctx) (0x160 + 4 * (ctx))
+/* ASRC Context Resampling Coefficient Memory */
+#define REG_EASRC_CRCM 0x170
+/* ASRC Context Resampling Coefficient Control*/
+#define REG_EASRC_CRCC 0x174
+/* ASRC Interrupt Control */
+#define REG_EASRC_IRQC 0x178
+/* ASRC Interrupt Status Flags */
+#define REG_EASRC_IRQF 0x17C
+/* ASRC Channel Status 0 */
+#define REG_EASRC_CS0(ctx) (0x180 + 4 * (ctx))
+/* ASRC Channel Status 1 */
+#define REG_EASRC_CS1(ctx) (0x190 + 4 * (ctx))
+/* ASRC Channel Status 2 */
+#define REG_EASRC_CS2(ctx) (0x1A0 + 4 * (ctx))
+/* ASRC Channel Status 3 */
+#define REG_EASRC_CS3(ctx) (0x1B0 + 4 * (ctx))
+/* ASRC Channel Status 4 */
+#define REG_EASRC_CS4(ctx) (0x1C0 + 4 * (ctx))
+/* ASRC Channel Status 5 */
+#define REG_EASRC_CS5(ctx) (0x1D0 + 4 * (ctx))
+/* ASRC Debug Control Register */
+#define REG_EASRC_DBGC 0x1E0
+/* ASRC Debug Status Register */
+#define REG_EASRC_DBGS 0x1E4
+
+#define REG_EASRC_FIFO(x, ctx) (x == IN ? REG_EASRC_WRFIFO(ctx) \
+ : REG_EASRC_RDFIFO(ctx))
+
+/* ASRC Context Control (CC) */
+#define EASRC_CC_EN_SHIFT 31
+#define EASRC_CC_EN_MASK BIT(EASRC_CC_EN_SHIFT)
+#define EASRC_CC_EN BIT(EASRC_CC_EN_SHIFT)
+#define EASRC_CC_STOP_SHIFT 29
+#define EASRC_CC_STOP_MASK BIT(EASRC_CC_STOP_SHIFT)
+#define EASRC_CC_STOP BIT(EASRC_CC_STOP_SHIFT)
+#define EASRC_CC_FWMDE_SHIFT 28
+#define EASRC_CC_FWMDE_MASK BIT(EASRC_CC_FWMDE_SHIFT)
+#define EASRC_CC_FWMDE BIT(EASRC_CC_FWMDE_SHIFT)
+#define EASRC_CC_FIFO_WTMK_SHIFT 16
+#define EASRC_CC_FIFO_WTMK_WIDTH 7
+#define EASRC_CC_FIFO_WTMK_MASK ((BIT(EASRC_CC_FIFO_WTMK_WIDTH) - 1) \
+ << EASRC_CC_FIFO_WTMK_SHIFT)
+#define EASRC_CC_FIFO_WTMK(v) (((v) << EASRC_CC_FIFO_WTMK_SHIFT) \
+ & EASRC_CC_FIFO_WTMK_MASK)
+#define EASRC_CC_SAMPLE_POS_SHIFT 11
+#define EASRC_CC_SAMPLE_POS_WIDTH 5
+#define EASRC_CC_SAMPLE_POS_MASK ((BIT(EASRC_CC_SAMPLE_POS_WIDTH) - 1) \
+ << EASRC_CC_SAMPLE_POS_SHIFT)
+#define EASRC_CC_SAMPLE_POS(v) (((v) << EASRC_CC_SAMPLE_POS_SHIFT) \
+ & EASRC_CC_SAMPLE_POS_MASK)
+#define EASRC_CC_ENDIANNESS_SHIFT 10
+#define EASRC_CC_ENDIANNESS_MASK BIT(EASRC_CC_ENDIANNESS_SHIFT)
+#define EASRC_CC_ENDIANNESS BIT(EASRC_CC_ENDIANNESS_SHIFT)
+#define EASRC_CC_BPS_SHIFT 8
+#define EASRC_CC_BPS_WIDTH 2
+#define EASRC_CC_BPS_MASK ((BIT(EASRC_CC_BPS_WIDTH) - 1) \
+ << EASRC_CC_BPS_SHIFT)
+#define EASRC_CC_BPS(v) (((v) << EASRC_CC_BPS_SHIFT) \
+ & EASRC_CC_BPS_MASK)
+#define EASRC_CC_FMT_SHIFT 7
+#define EASRC_CC_FMT_MASK BIT(EASRC_CC_FMT_SHIFT)
+#define EASRC_CC_FMT BIT(EASRC_CC_FMT_SHIFT)
+#define EASRC_CC_INSIGN_SHIFT 6
+#define EASRC_CC_INSIGN_MASK BIT(EASRC_CC_INSIGN_SHIFT)
+#define EASRC_CC_INSIGN BIT(EASRC_CC_INSIGN_SHIFT)
+#define EASRC_CC_CHEN_SHIFT 0
+#define EASRC_CC_CHEN_WIDTH 5
+#define EASRC_CC_CHEN_MASK ((BIT(EASRC_CC_CHEN_WIDTH) - 1) \
+ << EASRC_CC_CHEN_SHIFT)
+#define EASRC_CC_CHEN(v) (((v) << EASRC_CC_CHEN_SHIFT) \
+ & EASRC_CC_CHEN_MASK)
+
+/* ASRC Context Control Extended 1 (CCE1) */
+#define EASRC_CCE1_COEF_WS_SHIFT 25
+#define EASRC_CCE1_COEF_WS_MASK BIT(EASRC_CCE1_COEF_WS_SHIFT)
+#define EASRC_CCE1_COEF_WS BIT(EASRC_CCE1_COEF_WS_SHIFT)
+#define EASRC_CCE1_COEF_MEM_RST_SHIFT 24
+#define EASRC_CCE1_COEF_MEM_RST_MASK BIT(EASRC_CCE1_COEF_MEM_RST_SHIFT)
+#define EASRC_CCE1_COEF_MEM_RST BIT(EASRC_CCE1_COEF_MEM_RST_SHIFT)
+#define EASRC_CCE1_PF_EXP_SHIFT 16
+#define EASRC_CCE1_PF_EXP_WIDTH 8
+#define EASRC_CCE1_PF_EXP_MASK ((BIT(EASRC_CCE1_PF_EXP_WIDTH) - 1) \
+ << EASRC_CCE1_PF_EXP_SHIFT)
+#define EASRC_CCE1_PF_EXP(v) (((v) << EASRC_CCE1_PF_EXP_SHIFT) \
+ & EASRC_CCE1_PF_EXP_MASK)
+#define EASRC_CCE1_PF_ST1_WBFP_SHIFT 9
+#define EASRC_CCE1_PF_ST1_WBFP_MASK BIT(EASRC_CCE1_PF_ST1_WBFP_SHIFT)
+#define EASRC_CCE1_PF_ST1_WBFP BIT(EASRC_CCE1_PF_ST1_WBFP_SHIFT)
+#define EASRC_CCE1_PF_TSEN_SHIFT 8
+#define EASRC_CCE1_PF_TSEN_MASK BIT(EASRC_CCE1_PF_TSEN_SHIFT)
+#define EASRC_CCE1_PF_TSEN BIT(EASRC_CCE1_PF_TSEN_SHIFT)
+#define EASRC_CCE1_RS_BYPASS_SHIFT 7
+#define EASRC_CCE1_RS_BYPASS_MASK BIT(EASRC_CCE1_RS_BYPASS_SHIFT)
+#define EASRC_CCE1_RS_BYPASS BIT(EASRC_CCE1_RS_BYPASS_SHIFT)
+#define EASRC_CCE1_PF_BYPASS_SHIFT 6
+#define EASRC_CCE1_PF_BYPASS_MASK BIT(EASRC_CCE1_PF_BYPASS_SHIFT)
+#define EASRC_CCE1_PF_BYPASS BIT(EASRC_CCE1_PF_BYPASS_SHIFT)
+#define EASRC_CCE1_RS_STOP_SHIFT 5
+#define EASRC_CCE1_RS_STOP_MASK BIT(EASRC_CCE1_RS_STOP_SHIFT)
+#define EASRC_CCE1_RS_STOP BIT(EASRC_CCE1_RS_STOP_SHIFT)
+#define EASRC_CCE1_PF_STOP_SHIFT 4
+#define EASRC_CCE1_PF_STOP_MASK BIT(EASRC_CCE1_PF_STOP_SHIFT)
+#define EASRC_CCE1_PF_STOP BIT(EASRC_CCE1_PF_STOP_SHIFT)
+#define EASRC_CCE1_RS_INIT_SHIFT 2
+#define EASRC_CCE1_RS_INIT_WIDTH 2
+#define EASRC_CCE1_RS_INIT_MASK ((BIT(EASRC_CCE1_RS_INIT_WIDTH) - 1) \
+ << EASRC_CCE1_RS_INIT_SHIFT)
+#define EASRC_CCE1_RS_INIT(v) (((v) << EASRC_CCE1_RS_INIT_SHIFT) \
+ & EASRC_CCE1_RS_INIT_MASK)
+#define EASRC_CCE1_PF_INIT_SHIFT 0
+#define EASRC_CCE1_PF_INIT_WIDTH 2
+#define EASRC_CCE1_PF_INIT_MASK ((BIT(EASRC_CCE1_PF_INIT_WIDTH) - 1) \
+ << EASRC_CCE1_PF_INIT_SHIFT)
+#define EASRC_CCE1_PF_INIT(v) (((v) << EASRC_CCE1_PF_INIT_SHIFT) \
+ & EASRC_CCE1_PF_INIT_MASK)
+
+/* ASRC Context Control Extended 2 (CCE2) */
+#define EASRC_CCE2_ST2_TAPS_SHIFT 16
+#define EASRC_CCE2_ST2_TAPS_WIDTH 9
+#define EASRC_CCE2_ST2_TAPS_MASK ((BIT(EASRC_CCE2_ST2_TAPS_WIDTH) - 1) \
+ << EASRC_CCE2_ST2_TAPS_SHIFT)
+#define EASRC_CCE2_ST2_TAPS(v) (((v) << EASRC_CCE2_ST2_TAPS_SHIFT) \
+ & EASRC_CCE2_ST2_TAPS_MASK)
+#define EASRC_CCE2_ST1_TAPS_SHIFT 0
+#define EASRC_CCE2_ST1_TAPS_WIDTH 9
+#define EASRC_CCE2_ST1_TAPS_MASK ((BIT(EASRC_CCE2_ST1_TAPS_WIDTH) - 1) \
+ << EASRC_CCE2_ST1_TAPS_SHIFT)
+#define EASRC_CCE2_ST1_TAPS(v) (((v) << EASRC_CCE2_ST1_TAPS_SHIFT) \
+ & EASRC_CCE2_ST1_TAPS_MASK)
+
+/* ASRC Control Input Access (CIA) */
+#define EASRC_CIA_ITER_SHIFT 16
+#define EASRC_CIA_ITER_WIDTH 6
+#define EASRC_CIA_ITER_MASK ((BIT(EASRC_CIA_ITER_WIDTH) - 1) \
+ << EASRC_CIA_ITER_SHIFT)
+#define EASRC_CIA_ITER(v) (((v) << EASRC_CIA_ITER_SHIFT) \
+ & EASRC_CIA_ITER_MASK)
+#define EASRC_CIA_GRLEN_SHIFT 8
+#define EASRC_CIA_GRLEN_WIDTH 6
+#define EASRC_CIA_GRLEN_MASK ((BIT(EASRC_CIA_GRLEN_WIDTH) - 1) \
+ << EASRC_CIA_GRLEN_SHIFT)
+#define EASRC_CIA_GRLEN(v) (((v) << EASRC_CIA_GRLEN_SHIFT) \
+ & EASRC_CIA_GRLEN_MASK)
+#define EASRC_CIA_ACCLEN_SHIFT 0
+#define EASRC_CIA_ACCLEN_WIDTH 6
+#define EASRC_CIA_ACCLEN_MASK ((BIT(EASRC_CIA_ACCLEN_WIDTH) - 1) \
+ << EASRC_CIA_ACCLEN_SHIFT)
+#define EASRC_CIA_ACCLEN(v) (((v) << EASRC_CIA_ACCLEN_SHIFT) \
+ & EASRC_CIA_ACCLEN_MASK)
+
+/* ASRC Datapath Processor Control Slot0 Register0 (DPCS0R0) */
+#define EASRC_DPCS0R0_MAXCH_SHIFT 24
+#define EASRC_DPCS0R0_MAXCH_WIDTH 5
+#define EASRC_DPCS0R0_MAXCH_MASK ((BIT(EASRC_DPCS0R0_MAXCH_WIDTH) - 1) \
+ << EASRC_DPCS0R0_MAXCH_SHIFT)
+#define EASRC_DPCS0R0_MAXCH(v) (((v) << EASRC_DPCS0R0_MAXCH_SHIFT) \
+ & EASRC_DPCS0R0_MAXCH_MASK)
+#define EASRC_DPCS0R0_MINCH_SHIFT 16
+#define EASRC_DPCS0R0_MINCH_WIDTH 5
+#define EASRC_DPCS0R0_MINCH_MASK ((BIT(EASRC_DPCS0R0_MINCH_WIDTH) - 1) \
+ << EASRC_DPCS0R0_MINCH_SHIFT)
+#define EASRC_DPCS0R0_MINCH(v) (((v) << EASRC_DPCS0R0_MINCH_SHIFT) \
+ & EASRC_DPCS0R0_MINCH_MASK)
+#define EASRC_DPCS0R0_NUMCH_SHIFT 8
+#define EASRC_DPCS0R0_NUMCH_WIDTH 5
+#define EASRC_DPCS0R0_NUMCH_MASK ((BIT(EASRC_DPCS0R0_NUMCH_WIDTH) - 1) \
+ << EASRC_DPCS0R0_NUMCH_SHIFT)
+#define EASRC_DPCS0R0_NUMCH(v) (((v) << EASRC_DPCS0R0_NUMCH_SHIFT) \
+ & EASRC_DPCS0R0_NUMCH_MASK)
+#define EASRC_DPCS0R0_CTXNUM_SHIFT 1
+#define EASRC_DPCS0R0_CTXNUM_WIDTH 2
+#define EASRC_DPCS0R0_CTXNUM_MASK ((BIT(EASRC_DPCS0R0_CTXNUM_WIDTH) - 1) \
+ << EASRC_DPCS0R0_CTXNUM_SHIFT)
+#define EASRC_DPCS0R0_CTXNUM(v) (((v) << EASRC_DPCS0R0_CTXNUM_SHIFT) \
+ & EASRC_DPCS0R0_CTXNUM_MASK)
+#define EASRC_DPCS0R0_EN_SHIFT 0
+#define EASRC_DPCS0R0_EN_MASK BIT(EASRC_DPCS0R0_EN_SHIFT)
+#define EASRC_DPCS0R0_EN BIT(EASRC_DPCS0R0_EN_SHIFT)
+
+/* ASRC Datapath Processor Control Slot0 Register1 (DPCS0R1) */
+#define EASRC_DPCS0R1_ST1_EXP_SHIFT 0
+#define EASRC_DPCS0R1_ST1_EXP_WIDTH 13
+#define EASRC_DPCS0R1_ST1_EXP_MASK ((BIT(EASRC_DPCS0R1_ST1_EXP_WIDTH) - 1) \
+ << EASRC_DPCS0R1_ST1_EXP_SHIFT)
+#define EASRC_DPCS0R1_ST1_EXP(v) (((v) << EASRC_DPCS0R1_ST1_EXP_SHIFT) \
+ & EASRC_DPCS0R1_ST1_EXP_MASK)
+
+/* ASRC Datapath Processor Control Slot0 Register2 (DPCS0R2) */
+#define EASRC_DPCS0R2_ST1_MA_SHIFT 16
+#define EASRC_DPCS0R2_ST1_MA_WIDTH 13
+#define EASRC_DPCS0R2_ST1_MA_MASK ((BIT(EASRC_DPCS0R2_ST1_MA_WIDTH) - 1) \
+ << EASRC_DPCS0R2_ST1_MA_SHIFT)
+#define EASRC_DPCS0R2_ST1_MA(v) (((v) << EASRC_DPCS0R2_ST1_MA_SHIFT) \
+ & EASRC_DPCS0R2_ST1_MA_MASK)
+#define EASRC_DPCS0R2_ST1_SA_SHIFT 0
+#define EASRC_DPCS0R2_ST1_SA_WIDTH 13
+#define EASRC_DPCS0R2_ST1_SA_MASK ((BIT(EASRC_DPCS0R2_ST1_SA_WIDTH) - 1) \
+ << EASRC_DPCS0R2_ST1_SA_SHIFT)
+#define EASRC_DPCS0R2_ST1_SA(v) (((v) << EASRC_DPCS0R2_ST1_SA_SHIFT) \
+ & EASRC_DPCS0R2_ST1_SA_MASK)
+
+/* ASRC Datapath Processor Control Slot0 Register3 (DPCS0R3) */
+#define EASRC_DPCS0R3_ST2_MA_SHIFT 16
+#define EASRC_DPCS0R3_ST2_MA_WIDTH 13
+#define EASRC_DPCS0R3_ST2_MA_MASK ((BIT(EASRC_DPCS0R3_ST2_MA_WIDTH) - 1) \
+ << EASRC_DPCS0R3_ST2_MA_SHIFT)
+#define EASRC_DPCS0R3_ST2_MA(v) (((v) << EASRC_DPCS0R3_ST2_MA_SHIFT) \
+ & EASRC_DPCS0R3_ST2_MA_MASK)
+#define EASRC_DPCS0R3_ST2_SA_SHIFT 0
+#define EASRC_DPCS0R3_ST2_SA_WIDTH 13
+#define EASRC_DPCS0R3_ST2_SA_MASK ((BIT(EASRC_DPCS0R3_ST2_SA_WIDTH) - 1) \
+ << EASRC_DPCS0R3_ST2_SA_SHIFT)
+#define EASRC_DPCS0R3_ST2_SA(v) (((v) << EASRC_DPCS0R3_ST2_SA_SHIFT) \
+ & EASRC_DPCS0R3_ST2_SA_MASK)
+
+/* ASRC Context Output Control (COC) */
+#define EASRC_COC_FWMDE_SHIFT 28
+#define EASRC_COC_FWMDE_MASK BIT(EASRC_COC_FWMDE_SHIFT)
+#define EASRC_COC_FWMDE BIT(EASRC_COC_FWMDE_SHIFT)
+#define EASRC_COC_FIFO_WTMK_SHIFT 16
+#define EASRC_COC_FIFO_WTMK_WIDTH 7
+#define EASRC_COC_FIFO_WTMK_MASK ((BIT(EASRC_COC_FIFO_WTMK_WIDTH) - 1) \
+ << EASRC_COC_FIFO_WTMK_SHIFT)
+#define EASRC_COC_FIFO_WTMK(v) (((v) << EASRC_COC_FIFO_WTMK_SHIFT) \
+ & EASRC_COC_FIFO_WTMK_MASK)
+#define EASRC_COC_SAMPLE_POS_SHIFT 11
+#define EASRC_COC_SAMPLE_POS_WIDTH 5
+#define EASRC_COC_SAMPLE_POS_MASK ((BIT(EASRC_COC_SAMPLE_POS_WIDTH) - 1) \
+ << EASRC_COC_SAMPLE_POS_SHIFT)
+#define EASRC_COC_SAMPLE_POS(v) (((v) << EASRC_COC_SAMPLE_POS_SHIFT) \
+ & EASRC_COC_SAMPLE_POS_MASK)
+#define EASRC_COC_ENDIANNESS_SHIFT 10
+#define EASRC_COC_ENDIANNESS_MASK BIT(EASRC_COC_ENDIANNESS_SHIFT)
+#define EASRC_COC_ENDIANNESS BIT(EASRC_COC_ENDIANNESS_SHIFT)
+#define EASRC_COC_BPS_SHIFT 8
+#define EASRC_COC_BPS_WIDTH 2
+#define EASRC_COC_BPS_MASK ((BIT(EASRC_COC_BPS_WIDTH) - 1) \
+ << EASRC_COC_BPS_SHIFT)
+#define EASRC_COC_BPS(v) (((v) << EASRC_COC_BPS_SHIFT) \
+ & EASRC_COC_BPS_MASK)
+#define EASRC_COC_FMT_SHIFT 7
+#define EASRC_COC_FMT_MASK BIT(EASRC_COC_FMT_SHIFT)
+#define EASRC_COC_FMT BIT(EASRC_COC_FMT_SHIFT)
+#define EASRC_COC_OUTSIGN_SHIFT 6
+#define EASRC_COC_OUTSIGN_MASK BIT(EASRC_COC_OUTSIGN_SHIFT)
+#define EASRC_COC_OUTSIGN_OUT BIT(EASRC_COC_OUTSIGN_SHIFT)
+#define EASRC_COC_IEC_VDATA_SHIFT 2
+#define EASRC_COC_IEC_VDATA_MASK BIT(EASRC_COC_IEC_VDATA_SHIFT)
+#define EASRC_COC_IEC_VDATA BIT(EASRC_COC_IEC_VDATA_SHIFT)
+#define EASRC_COC_IEC_EN_SHIFT 1
+#define EASRC_COC_IEC_EN_MASK BIT(EASRC_COC_IEC_EN_SHIFT)
+#define EASRC_COC_IEC_EN BIT(EASRC_COC_IEC_EN_SHIFT)
+#define EASRC_COC_DITHER_EN_SHIFT 0
+#define EASRC_COC_DITHER_EN_MASK BIT(EASRC_COC_DITHER_EN_SHIFT)
+#define EASRC_COC_DITHER_EN BIT(EASRC_COC_DITHER_EN_SHIFT)
+
+/* ASRC Control Output Access (COA) */
+#define EASRC_COA_ITER_SHIFT 16
+#define EASRC_COA_ITER_WIDTH 6
+#define EASRC_COA_ITER_MASK ((BIT(EASRC_COA_ITER_WIDTH) - 1) \
+ << EASRC_COA_ITER_SHIFT)
+#define EASRC_COA_ITER(v) (((v) << EASRC_COA_ITER_SHIFT) \
+ & EASRC_COA_ITER_MASK)
+#define EASRC_COA_GRLEN_SHIFT 8
+#define EASRC_COA_GRLEN_WIDTH 6
+#define EASRC_COA_GRLEN_MASK ((BIT(EASRC_COA_GRLEN_WIDTH) - 1) \
+ << EASRC_COA_GRLEN_SHIFT)
+#define EASRC_COA_GRLEN(v) (((v) << EASRC_COA_GRLEN_SHIFT) \
+ & EASRC_COA_GRLEN_MASK)
+#define EASRC_COA_ACCLEN_SHIFT 0
+#define EASRC_COA_ACCLEN_WIDTH 6
+#define EASRC_COA_ACCLEN_MASK ((BIT(EASRC_COA_ACCLEN_WIDTH) - 1) \
+ << EASRC_COA_ACCLEN_SHIFT)
+#define EASRC_COA_ACCLEN(v) (((v) << EASRC_COA_ACCLEN_SHIFT) \
+ & EASRC_COA_ACCLEN_MASK)
+
+/* ASRC Sample FIFO Status (SFS) */
+#define EASRC_SFS_IWTMK_SHIFT 23
+#define EASRC_SFS_IWTMK_MASK BIT(EASRC_SFS_IWTMK_SHIFT)
+#define EASRC_SFS_IWTMK BIT(EASRC_SFS_IWTMK_SHIFT)
+#define EASRC_SFS_NSGI_SHIFT 16
+#define EASRC_SFS_NSGI_WIDTH 7
+#define EASRC_SFS_NSGI_MASK ((BIT(EASRC_SFS_NSGI_WIDTH) - 1) \
+ << EASRC_SFS_NSGI_SHIFT)
+#define EASRC_SFS_NSGI(v) (((v) << EASRC_SFS_NSGI_SHIFT) \
+ & EASRC_SFS_NSGI_MASK)
+#define EASRC_SFS_OWTMK_SHIFT 7
+#define EASRC_SFS_OWTMK_MASK BIT(EASRC_SFS_OWTMK_SHIFT)
+#define EASRC_SFS_OWTMK BIT(EASRC_SFS_OWTMK_SHIFT)
+#define EASRC_SFS_NSGO_SHIFT 0
+#define EASRC_SFS_NSGO_WIDTH 7
+#define EASRC_SFS_NSGO_MASK ((BIT(EASRC_SFS_NSGO_WIDTH) - 1) \
+ << EASRC_SFS_NSGO_SHIFT)
+#define EASRC_SFS_NSGO(v) (((v) << EASRC_SFS_NSGO_SHIFT) \
+ & EASRC_SFS_NSGO_MASK)
+
+/* ASRC Resampling Ratio Low (RRL) */
+#define EASRC_RRL_RS_RL_SHIFT 0
+#define EASRC_RRL_RS_RL_WIDTH 32
+#define EASRC_RRL_RS_RL(v) ((v) << EASRC_RRL_RS_RL_SHIFT)
+
+/* ASRC Resampling Ratio High (RRH) */
+#define EASRC_RRH_RS_VLD_SHIFT 31
+#define EASRC_RRH_RS_VLD_MASK BIT(EASRC_RRH_RS_VLD_SHIFT)
+#define EASRC_RRH_RS_VLD BIT(EASRC_RRH_RS_VLD_SHIFT)
+#define EASRC_RRH_RS_RH_SHIFT 0
+#define EASRC_RRH_RS_RH_WIDTH 12
+#define EASRC_RRH_RS_RH_MASK ((BIT(EASRC_RRH_RS_RH_WIDTH) - 1) \
+ << EASRC_RRH_RS_RH_SHIFT)
+#define EASRC_RRH_RS_RH(v) (((v) << EASRC_RRH_RS_RH_SHIFT) \
+ & EASRC_RRH_RS_RH_MASK)
+
+/* ASRC Resampling Ratio Update Control (RSUC) */
+#define EASRC_RSUC_RS_RM_SHIFT 0
+#define EASRC_RSUC_RS_RM_WIDTH 32
+#define EASRC_RSUC_RS_RM(v) ((v) << EASRC_RSUC_RS_RM_SHIFT)
+
+/* ASRC Resampling Ratio Update Rate (RRUR) */
+#define EASRC_RRUR_RRR_SHIFT 0
+#define EASRC_RRUR_RRR_WIDTH 31
+#define EASRC_RRUR_RRR_MASK ((BIT(EASRC_RRUR_RRR_WIDTH) - 1) \
+ << EASRC_RRUR_RRR_SHIFT)
+#define EASRC_RRUR_RRR(v) (((v) << EASRC_RRUR_RRR_SHIFT) \
+ & EASRC_RRUR_RRR_MASK)
+
+/* ASRC Resampling Center Tap Coefficient Low (RCTCL) */
+#define EASRC_RCTCL_RS_CL_SHIFT 0
+#define EASRC_RCTCL_RS_CL_WIDTH 32
+#define EASRC_RCTCL_RS_CL(v) ((v) << EASRC_RCTCL_RS_CL_SHIFT)
+
+/* ASRC Resampling Center Tap Coefficient High (RCTCH) */
+#define EASRC_RCTCH_RS_CH_SHIFT 0
+#define EASRC_RCTCH_RS_CH_WIDTH 32
+#define EASRC_RCTCH_RS_CH(v) ((v) << EASRC_RCTCH_RS_CH_SHIFT)
+
+/* ASRC Prefilter Coefficient FIFO (PCF) */
+#define EASRC_PCF_CD_SHIFT 0
+#define EASRC_PCF_CD_WIDTH 32
+#define EASRC_PCF_CD(v) ((v) << EASRC_PCF_CD_SHIFT)
+
+/* ASRC Context Resampling Coefficient Memory (CRCM) */
+#define EASRC_CRCM_RS_CWD_SHIFT 0
+#define EASRC_CRCM_RS_CWD_WIDTH 32
+#define EASRC_CRCM_RS_CWD(v) ((v) << EASRC_CRCM_RS_CWD_SHIFT)
+
+/* ASRC Context Resampling Coefficient Control (CRCC) */
+#define EASRC_CRCC_RS_CA_SHIFT 16
+#define EASRC_CRCC_RS_CA_WIDTH 11
+#define EASRC_CRCC_RS_CA_MASK ((BIT(EASRC_CRCC_RS_CA_WIDTH) - 1) \
+ << EASRC_CRCC_RS_CA_SHIFT)
+#define EASRC_CRCC_RS_CA(v) (((v) << EASRC_CRCC_RS_CA_SHIFT) \
+ & EASRC_CRCC_RS_CA_MASK)
+#define EASRC_CRCC_RS_TAPS_SHIFT 1
+#define EASRC_CRCC_RS_TAPS_WIDTH 2
+#define EASRC_CRCC_RS_TAPS_MASK ((BIT(EASRC_CRCC_RS_TAPS_WIDTH) - 1) \
+ << EASRC_CRCC_RS_TAPS_SHIFT)
+#define EASRC_CRCC_RS_TAPS(v) (((v) << EASRC_CRCC_RS_TAPS_SHIFT) \
+ & EASRC_CRCC_RS_TAPS_MASK)
+#define EASRC_CRCC_RS_CPR_SHIFT 0
+#define EASRC_CRCC_RS_CPR_MASK BIT(EASRC_CRCC_RS_CPR_SHIFT)
+#define EASRC_CRCC_RS_CPR BIT(EASRC_CRCC_RS_CPR_SHIFT)
+
+/* ASRC Interrupt_Control (IC) */
+#define EASRC_IRQC_RSDM_SHIFT 8
+#define EASRC_IRQC_RSDM_WIDTH 4
+#define EASRC_IRQC_RSDM_MASK ((BIT(EASRC_IRQC_RSDM_WIDTH) - 1) \
+ << EASRC_IRQC_RSDM_SHIFT)
+#define EASRC_IRQC_RSDM(v) (((v) << EASRC_IRQC_RSDM_SHIFT) \
+ & EASRC_IRQC_RSDM_MASK)
+#define EASRC_IRQC_OERM_SHIFT 4
+#define EASRC_IRQC_OERM_WIDTH 4
+#define EASRC_IRQC_OERM_MASK ((BIT(EASRC_IRQC_OERM_WIDTH) - 1) \
+ << EASRC_IRQC_OERM_SHIFT)
+#define EASRC_IRQC_OERM(v) (((v) << EASRC_IRQC_OERM_SHIFT) \
+ & EASRC_IEQC_OERM_MASK)
+#define EASRC_IRQC_IOM_SHIFT 0
+#define EASRC_IRQC_IOM_WIDTH 4
+#define EASRC_IRQC_IOM_MASK ((BIT(EASRC_IRQC_IOM_WIDTH) - 1) \
+ << EASRC_IRQC_IOM_SHIFT)
+#define EASRC_IRQC_IOM(v) (((v) << EASRC_IRQC_IOM_SHIFT) \
+ & EASRC_IRQC_IOM_MASK)
+
+/* ASRC Interrupt Status Flags (ISF) */
+#define EASRC_IRQF_RSD_SHIFT 8
+#define EASRC_IRQF_RSD_WIDTH 4
+#define EASRC_IRQF_RSD_MASK ((BIT(EASRC_IRQF_RSD_WIDTH) - 1) \
+ << EASRC_IRQF_RSD_SHIFT)
+#define EASRC_IRQF_RSD(v) (((v) << EASRC_IRQF_RSD_SHIFT) \
+ & EASRC_IRQF_RSD_MASK)
+#define EASRC_IRQF_OER_SHIFT 4
+#define EASRC_IRQF_OER_WIDTH 4
+#define EASRC_IRQF_OER_MASK ((BIT(EASRC_IRQF_OER_WIDTH) - 1) \
+ << EASRC_IRQF_OER_SHIFT)
+#define EASRC_IRQF_OER(v) (((v) << EASRC_IRQF_OER_SHIFT) \
+ & EASRC_IRQF_OER_MASK)
+#define EASRC_IRQF_IFO_SHIFT 0
+#define EASRC_IRQF_IFO_WIDTH 4
+#define EASRC_IRQF_IFO_MASK ((BIT(EASRC_IRQF_IFO_WIDTH) - 1) \
+ << EASRC_IRQF_IFO_SHIFT)
+#define EASRC_IRQF_IFO(v) (((v) << EASRC_IRQF_IFO_SHIFT) \
+ & EASRC_IRQF_IFO_MASK)
+
+/* ASRC Context Channel STAT */
+#define EASRC_CSx_CSx_SHIFT 0
+#define EASRC_CSx_CSx_WIDTH 32
+#define EASRC_CSx_CSx(v) ((v) << EASRC_CSx_CSx_SHIFT)
+
+/* ASRC Debug Control Register */
+#define EASRC_DBGC_DMS_SHIFT 0
+#define EASRC_DBGC_DMS_WIDTH 6
+#define EASRC_DBGC_DMS_MASK ((BIT(EASRC_DBGC_DMS_WIDTH) - 1) \
+ << EASRC_DBGC_DMS_SHIFT)
+#define EASRC_DBGC_DMS(v) (((v) << EASRC_DBGC_DMS_SHIFT) \
+ & EASRC_DBGC_DMS_MASK)
+
+/* ASRC Debug Status Register */
+#define EASRC_DBGS_DS_SHIFT 0
+#define EASRC_DBGS_DS_WIDTH 32
+#define EASRC_DBGS_DS(v) ((v) << EASRC_DBGS_DS_SHIFT)
+
+/* General Constants */
+#define EASRC_CTX_MAX_NUM 4
+#define EASRC_RS_COEFF_MEM 0
+#define EASRC_PF_COEFF_MEM 1
+
+/* Prefilter constants */
+#define EASRC_PF_ST1_ONLY 0
+#define EASRC_PF_TWO_STAGE_MODE 1
+#define EASRC_PF_ST1_COEFF_WR 0
+#define EASRC_PF_ST2_COEFF_WR 1
+#define EASRC_MAX_PF_TAPS 384
+
+/* Resampling constants */
+#define EASRC_RS_32_TAPS 0
+#define EASRC_RS_64_TAPS 1
+#define EASRC_RS_128_TAPS 2
+
+/* Initialization mode */
+#define EASRC_INIT_MODE_SW_CONTROL 0
+#define EASRC_INIT_MODE_REPLICATE 1
+#define EASRC_INIT_MODE_ZERO_FILL 2
+
+/* FIFO watermarks */
+#define FSL_EASRC_INPUTFIFO_WML 0x4
+#define FSL_EASRC_OUTPUTFIFO_WML 0x1
+
+#define EASRC_INPUTFIFO_THRESHOLD_MIN 0
+#define EASRC_INPUTFIFO_THRESHOLD_MAX 127
+#define EASRC_OUTPUTFIFO_THRESHOLD_MIN 0
+#define EASRC_OUTPUTFIFO_THRESHOLD_MAX 63
+
+#define EASRC_DMA_BUFFER_SIZE (1024 * 48 * 9)
+#define EASRC_MAX_BUFFER_SIZE (1024 * 48)
+
+#define FIRMWARE_MAGIC 0xDEAD
+#define FIRMWARE_VERSION 1
+
+#define PREFILTER_MEM_LEN 0x1800
+
+enum easrc_word_width {
+ EASRC_WIDTH_16_BIT = 0,
+ EASRC_WIDTH_20_BIT = 1,
+ EASRC_WIDTH_24_BIT = 2,
+ EASRC_WIDTH_32_BIT = 3,
+};
+
+struct __attribute__((__packed__)) asrc_firmware_hdr {
+ u32 magic;
+ u32 interp_scen;
+ u32 prefil_scen;
+ u32 firmware_version;
+};
+
+struct __attribute__((__packed__)) interp_params {
+ u32 magic;
+ u32 num_taps;
+ u32 num_phases;
+ u64 center_tap;
+ u64 coeff[8192];
+};
+
+struct __attribute__((__packed__)) prefil_params {
+ u32 magic;
+ u32 insr;
+ u32 outsr;
+ u32 st1_taps;
+ u32 st2_taps;
+ u32 st1_exp;
+ u64 coeff[256];
+};
+
+struct dma_block {
+ void *dma_vaddr;
+ unsigned int length;
+ unsigned int max_buf_size;
+};
+
+struct fsl_easrc_data_fmt {
+ unsigned int width : 2;
+ unsigned int endianness : 1;
+ unsigned int unsign : 1;
+ unsigned int floating_point : 1;
+ unsigned int iec958: 1;
+ unsigned int sample_pos: 5;
+ unsigned int addexp;
+};
+
+struct fsl_easrc_io_params {
+ struct fsl_easrc_data_fmt fmt;
+ unsigned int group_len;
+ unsigned int iterations;
+ unsigned int access_len;
+ unsigned int fifo_wtmk;
+ unsigned int sample_rate;
+ unsigned int sample_format;
+ unsigned int norm_rate;
+};
+
+struct fsl_easrc_slot {
+ bool busy;
+ int ctx_index;
+ int slot_index;
+ int num_channel; /* maximum is 8 */
+ int min_channel;
+ int max_channel;
+ int pf_mem_used;
+};
+
+/**
+ * fsl_easrc_ctx_priv: EASRC context private data
+ *
+ * @in_params: input parameter
+ * @out_params: output parameter
+ * @st1_num_taps: tap number of stage 1
+ * @st2_num_taps: tap number of stage 2
+ * @st1_num_exp: exponent number of stage 1
+ * @pf_init_mode: prefilter init mode
+ * @rs_init_mode: resample filter init mode
+ * @ctx_streams: stream flag of ctx
+ * @rs_ratio: resampler ratio
+ * @st1_coeff: pointer of stage 1 coeff
+ * @st2_coeff: pointer of stage 2 coeff
+ * @in_filled_sample: input filled sample
+ * @out_missed_sample: sample missed in output
+ * @st1_addexp: exponent added for stage1
+ * @st2_addexp: exponent added for stage2
+ */
+struct fsl_easrc_ctx_priv {
+ struct fsl_easrc_io_params in_params;
+ struct fsl_easrc_io_params out_params;
+ unsigned int st1_num_taps;
+ unsigned int st2_num_taps;
+ unsigned int st1_num_exp;
+ unsigned int pf_init_mode;
+ unsigned int rs_init_mode;
+ unsigned int ctx_streams;
+ u64 rs_ratio;
+ u64 *st1_coeff;
+ u64 *st2_coeff;
+ int in_filled_sample;
+ int out_missed_sample;
+ int st1_addexp;
+ int st2_addexp;
+};
+
+/**
+ * fsl_easrc_priv: EASRC private data
+ *
+ * @slot: slot setting
+ * @firmware_hdr: the header of firmware
+ * @interp: pointer to interpolation filter coeff
+ * @prefil: pointer to prefilter coeff
+ * @fw: firmware of coeff table
+ * @fw_name: firmware name
+ * @rs_num_taps: resample filter taps, 32, 64, or 128
+ * @bps_iec958: bits per sample of iec958
+ * @rs_coeff: resampler coefficient
+ * @const_coeff: one tap prefilter coefficient
+ * @firmware_loaded: firmware is loaded
+ */
+struct fsl_easrc_priv {
+ struct fsl_easrc_slot slot[EASRC_CTX_MAX_NUM][2];
+ struct asrc_firmware_hdr *firmware_hdr;
+ struct interp_params *interp;
+ struct prefil_params *prefil;
+ const struct firmware *fw;
+ const char *fw_name;
+ unsigned int rs_num_taps;
+ unsigned int bps_iec958[EASRC_CTX_MAX_NUM];
+ u64 *rs_coeff;
+ u64 const_coeff;
+ int firmware_loaded;
+};
+#endif /* _FSL_EASRC_H */
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index c7a49d03463a..cbcb70d6f8c8 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -22,6 +22,17 @@
SNDRV_PCM_FMTBIT_S24_LE)
/**
+ * fsl_esai_soc_data: soc specific data
+ *
+ * @imx: for imx platform
+ * @reset_at_xrun: flags for enable reset operaton
+ */
+struct fsl_esai_soc_data {
+ bool imx;
+ bool reset_at_xrun;
+};
+
+/**
* fsl_esai: ESAI private data
*
* @dma_params_rx: DMA parameters for receive channel
@@ -33,6 +44,7 @@
* @fsysclk: system clock source to derive HCK, SCK and FS
* @spbaclk: SPBA clock (optional, depending on SoC design)
* @task: tasklet to handle the reset operation
+ * @soc: soc specific data
* @lock: spin lock between hw_reset() and trigger()
* @fifo_depth: depth of tx/rx FIFO
* @slot_width: width of each DAI slot
@@ -44,7 +56,6 @@
* @sck_div: if using PSR/PM dividers for SCKx clock
* @slave_mode: if fully using DAI slave mode
* @synchronous: if using tx/rx synchronous mode
- * @reset_at_xrun: flags for enable reset operaton
* @name: driver name
*/
struct fsl_esai {
@@ -57,6 +68,7 @@ struct fsl_esai {
struct clk *fsysclk;
struct clk *spbaclk;
struct tasklet_struct task;
+ const struct fsl_esai_soc_data *soc;
spinlock_t lock; /* Protect hw_reset and trigger */
u32 fifo_depth;
u32 slot_width;
@@ -70,10 +82,24 @@ struct fsl_esai {
bool sck_div[2];
bool slave_mode;
bool synchronous;
- bool reset_at_xrun;
char name[32];
};
+static struct fsl_esai_soc_data fsl_esai_vf610 = {
+ .imx = false,
+ .reset_at_xrun = true,
+};
+
+static struct fsl_esai_soc_data fsl_esai_imx35 = {
+ .imx = true,
+ .reset_at_xrun = true,
+};
+
+static struct fsl_esai_soc_data fsl_esai_imx6ull = {
+ .imx = true,
+ .reset_at_xrun = false,
+};
+
static irqreturn_t esai_isr(int irq, void *devid)
{
struct fsl_esai *esai_priv = (struct fsl_esai *)devid;
@@ -85,8 +111,12 @@ static irqreturn_t esai_isr(int irq, void *devid)
regmap_read(esai_priv->regmap, REG_ESAI_SAISR, &saisr);
if ((saisr & (ESAI_SAISR_TUE | ESAI_SAISR_ROE)) &&
- esai_priv->reset_at_xrun) {
+ esai_priv->soc->reset_at_xrun) {
dev_dbg(&pdev->dev, "reset module for xrun\n");
+ regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR,
+ ESAI_xCR_xEIE_MASK, 0);
+ regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR,
+ ESAI_xCR_xEIE_MASK, 0);
tasklet_schedule(&esai_priv->task);
}
@@ -484,7 +514,7 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
{
struct fsl_esai *esai_priv = snd_soc_dai_get_drvdata(dai);
- if (!dai->active) {
+ if (!snd_soc_dai_active(dai)) {
/* Set synchronous mode */
regmap_update_bits(esai_priv->regmap, REG_ESAI_SAICR,
ESAI_SAICR_SYNC, esai_priv->synchronous ?
@@ -932,9 +962,11 @@ static int fsl_esai_probe(struct platform_device *pdev)
esai_priv->pdev = pdev;
snprintf(esai_priv->name, sizeof(esai_priv->name), "%pOFn", np);
- if (of_device_is_compatible(np, "fsl,vf610-esai") ||
- of_device_is_compatible(np, "fsl,imx35-esai"))
- esai_priv->reset_at_xrun = true;
+ esai_priv->soc = of_device_get_match_data(&pdev->dev);
+ if (!esai_priv->soc) {
+ dev_err(&pdev->dev, "failed to get soc data\n");
+ return -ENODEV;
+ }
/* Get the addresses and IRQ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1059,9 +1091,9 @@ static int fsl_esai_remove(struct platform_device *pdev)
}
static const struct of_device_id fsl_esai_dt_ids[] = {
- { .compatible = "fsl,imx35-esai", },
- { .compatible = "fsl,vf610-esai", },
- { .compatible = "fsl,imx6ull-esai", },
+ { .compatible = "fsl,imx35-esai", .data = &fsl_esai_imx35 },
+ { .compatible = "fsl,vf610-esai", .data = &fsl_esai_vf610 },
+ { .compatible = "fsl,imx6ull-esai", .data = &fsl_esai_imx6ull },
{}
};
MODULE_DEVICE_TABLE(of, fsl_esai_dt_ids);
diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
index f7f2d29f1bfe..efc5daf53bba 100644
--- a/sound/soc/fsl/fsl_micfil.c
+++ b/sound/soc/fsl/fsl_micfil.c
@@ -217,8 +217,7 @@ static int fsl_micfil_startup(struct snd_pcm_substream *substream,
struct fsl_micfil *micfil = snd_soc_dai_get_drvdata(dai);
if (!micfil) {
- dev_err(dai->dev,
- "micfil dai priv_data not set\n");
+ dev_err(dai->dev, "micfil dai priv_data not set\n");
return -EINVAL;
}
@@ -296,7 +295,7 @@ static int fsl_set_clock_params(struct device *dev, unsigned int rate)
{
struct fsl_micfil *micfil = dev_get_drvdata(dev);
int clk_div;
- int ret = 0;
+ int ret;
ret = fsl_micfil_set_mclk_rate(micfil, rate);
if (ret < 0)
@@ -702,16 +701,14 @@ static int fsl_micfil_probe(struct platform_device *pdev)
for (i = 0; i < MICFIL_IRQ_LINES; i++) {
micfil->irq[i] = platform_get_irq(pdev, i);
dev_err(&pdev->dev, "GET IRQ: %d\n", micfil->irq[i]);
- if (micfil->irq[i] < 0) {
- dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
+ if (micfil->irq[i] < 0)
return micfil->irq[i];
- }
}
if (of_property_read_bool(np, "fsl,shared-interrupt"))
irqflag = IRQF_SHARED;
- /* Digital Microphone interface interrupt - IRQ 109 */
+ /* Digital Microphone interface interrupt */
ret = devm_request_irq(&pdev->dev, micfil->irq[0],
micfil_isr, irqflag,
micfil->name, micfil);
@@ -721,7 +718,7 @@ static int fsl_micfil_probe(struct platform_device *pdev)
return ret;
}
- /* Digital Microphone interface error interrupt - IRQ 110 */
+ /* Digital Microphone interface error interrupt */
ret = devm_request_irq(&pdev->dev, micfil->irq[1],
micfil_err_isr, irqflag,
micfil->name, micfil);
@@ -755,7 +752,6 @@ static int fsl_micfil_probe(struct platform_device *pdev)
return ret;
}
-#ifdef CONFIG_PM
static int __maybe_unused fsl_micfil_runtime_suspend(struct device *dev)
{
struct fsl_micfil *micfil = dev_get_drvdata(dev);
@@ -782,9 +778,7 @@ static int __maybe_unused fsl_micfil_runtime_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM*/
-#ifdef CONFIG_PM_SLEEP
static int __maybe_unused fsl_micfil_suspend(struct device *dev)
{
pm_runtime_force_suspend(dev);
@@ -798,7 +792,6 @@ static int __maybe_unused fsl_micfil_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops fsl_micfil_pm_ops = {
SET_RUNTIME_PM_OPS(fsl_micfil_runtime_suspend,
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index c711d2d93280..1b2e516f9162 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -466,7 +466,7 @@ static int fsl_spdif_startup(struct snd_pcm_substream *substream,
int ret;
/* Reset module and interrupts only for first initialization */
- if (!cpu_dai->active) {
+ if (!snd_soc_dai_active(cpu_dai)) {
ret = clk_prepare_enable(spdif_priv->coreclk);
if (ret) {
dev_err(&pdev->dev, "failed to enable core clock\n");
@@ -554,7 +554,7 @@ static void fsl_spdif_shutdown(struct snd_pcm_substream *substream,
regmap_update_bits(regmap, REG_SPDIF_SCR, mask, scr);
/* Power down SPDIF module only if tx&rx are both inactive */
- if (!cpu_dai->active) {
+ if (!snd_soc_dai_active(cpu_dai)) {
spdif_intr_status_clear(spdif_priv);
regmap_update_bits(regmap, REG_SPDIF_SCR,
SCR_LOW_POWER, SCR_LOW_POWER);
diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
index ab3b76d298b3..fd5dcd6b9f85 100644
--- a/sound/soc/hisilicon/hi6210-i2s.c
+++ b/sound/soc/hisilicon/hi6210-i2s.c
@@ -547,7 +547,7 @@ static int hi6210_i2s_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
+ i2s = devm_kzalloc(dev, sizeof(*i2s), GFP_KERNEL);
if (!i2s)
return -ENOMEM;
@@ -562,28 +562,28 @@ static int hi6210_i2s_probe(struct platform_device *pdev)
i2s->base_phys = (phys_addr_t)res->start;
i2s->dai = hi6210_i2s_dai_init;
- dev_set_drvdata(&pdev->dev, i2s);
+ dev_set_drvdata(dev, i2s);
i2s->sysctrl = syscon_regmap_lookup_by_phandle(node,
"hisilicon,sysctrl-syscon");
if (IS_ERR(i2s->sysctrl))
return PTR_ERR(i2s->sysctrl);
- i2s->clk[CLK_DACODEC] = devm_clk_get(&pdev->dev, "dacodec");
- if (IS_ERR_OR_NULL(i2s->clk[CLK_DACODEC]))
+ i2s->clk[CLK_DACODEC] = devm_clk_get(dev, "dacodec");
+ if (IS_ERR(i2s->clk[CLK_DACODEC]))
return PTR_ERR(i2s->clk[CLK_DACODEC]);
i2s->clocks++;
- i2s->clk[CLK_I2S_BASE] = devm_clk_get(&pdev->dev, "i2s-base");
- if (IS_ERR_OR_NULL(i2s->clk[CLK_I2S_BASE]))
+ i2s->clk[CLK_I2S_BASE] = devm_clk_get(dev, "i2s-base");
+ if (IS_ERR(i2s->clk[CLK_I2S_BASE]))
return PTR_ERR(i2s->clk[CLK_I2S_BASE]);
i2s->clocks++;
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
if (ret)
return ret;
- ret = devm_snd_soc_register_component(&pdev->dev, &hi6210_i2s_i2s_comp,
+ ret = devm_snd_soc_register_component(dev, &hi6210_i2s_i2s_comp,
&i2s->dai, 1);
return ret;
}
diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c
index a495d1050d49..e30b66b94bf6 100644
--- a/sound/soc/img/img-i2s-in.c
+++ b/sound/soc/img/img-i2s-in.c
@@ -482,6 +482,7 @@ static int img_i2s_in_probe(struct platform_device *pdev)
if (IS_ERR(rst)) {
if (PTR_ERR(rst) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
+ pm_runtime_put(&pdev->dev);
goto err_suspend;
}
diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c
index db052ec17d5d..b56a18e7f3ac 100644
--- a/sound/soc/img/img-i2s-out.c
+++ b/sound/soc/img/img-i2s-out.c
@@ -347,8 +347,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK;
ret = pm_runtime_get_sync(i2s->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(i2s->dev);
return ret;
+ }
img_i2s_out_disable(i2s);
@@ -488,8 +490,10 @@ static int img_i2s_out_probe(struct platform_device *pdev)
goto err_pm_disable;
}
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
goto err_suspend;
+ }
reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK;
img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL);
diff --git a/sound/soc/img/img-spdif-in.c b/sound/soc/img/img-spdif-in.c
index fd639f4d082b..46ff8a3621d5 100644
--- a/sound/soc/img/img-spdif-in.c
+++ b/sound/soc/img/img-spdif-in.c
@@ -753,8 +753,10 @@ static int img_spdif_in_probe(struct platform_device *pdev)
goto err_pm_disable;
}
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
goto err_suspend;
+ }
rst = devm_reset_control_get_exclusive(&pdev->dev, "rst");
if (IS_ERR(rst)) {
diff --git a/sound/soc/img/img-spdif-out.c b/sound/soc/img/img-spdif-out.c
index 456c462d52fb..b1d8e4535726 100644
--- a/sound/soc/img/img-spdif-out.c
+++ b/sound/soc/img/img-spdif-out.c
@@ -370,8 +370,10 @@ static int img_spdif_out_probe(struct platform_device *pdev)
goto err_pm_disable;
}
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
goto err_suspend;
+ }
img_spdif_out_writel(spdif, IMG_SPDIF_OUT_CTL_FS_MASK,
IMG_SPDIF_OUT_CTL);
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index c8de0bb5bed9..36f547939f0a 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -209,12 +209,8 @@ config SND_SOC_INTEL_SKYLAKE_SSP_CLK
config SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
bool "HDAudio codec support"
help
- This option broke audio on Linus' Skylake laptop in December 2018
- and the race conditions during the probe were not fixed since.
- This option is DEPRECATED, all HDaudio codec support needs
- to be handled by the SOF driver.
- Distributions should not enable this option and there are no known
- users of this capability.
+ If you have Intel Skylake or Kabylake with HDaudio codec
+ and DMIC present then enable this option by saying Y.
config SND_SOC_INTEL_SKYLAKE_COMMON
tristate
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 8160520fd74c..e16d6dc4d4e6 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
# Core support
obj-$(CONFIG_SND_SOC) += common/
diff --git a/sound/soc/intel/atom/Makefile b/sound/soc/intel/atom/Makefile
index 1dc60471b399..a9326d5ec44c 100644
--- a/sound/soc/intel/atom/Makefile
+++ b/sound/soc/intel/atom/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
snd-soc-sst-atom-hifi2-platform-objs := sst-mfld-platform-pcm.o \
sst-mfld-platform-compress.o \
sst-atom-controls.o
diff --git a/sound/soc/intel/atom/sst-atom-controls.h b/sound/soc/intel/atom/sst-atom-controls.h
index 5356e954a732..620b48d2a064 100644
--- a/sound/soc/intel/atom/sst-atom-controls.h
+++ b/sound/soc/intel/atom/sst-atom-controls.h
@@ -410,7 +410,7 @@ struct sst_cmd_set_gain_dual {
struct sst_cmd_set_params {
struct sst_destination_id dst;
u16 command_id;
- char params[0];
+ char params[];
} __packed;
diff --git a/sound/soc/intel/atom/sst-mfld-platform-compress.c b/sound/soc/intel/atom/sst-mfld-platform-compress.c
index 4a7a9426a3b9..1595e01a7e12 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-compress.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-compress.c
@@ -39,7 +39,8 @@ static void sst_drain_notify(void *arg)
snd_compr_drain_notify(cstream);
}
-static int sst_platform_compr_open(struct snd_compr_stream *cstream)
+static int sst_platform_compr_open(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
{
int ret_val = 0;
@@ -72,7 +73,8 @@ out_ops:
return ret_val;
}
-static int sst_platform_compr_free(struct snd_compr_stream *cstream)
+static int sst_platform_compr_free(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
{
struct sst_runtime_stream *stream;
int ret_val = 0, str_id;
@@ -91,15 +93,14 @@ static int sst_platform_compr_free(struct snd_compr_stream *cstream)
return 0;
}
-static int sst_platform_compr_set_params(struct snd_compr_stream *cstream,
- struct snd_compr_params *params)
+static int sst_platform_compr_set_params(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params)
{
struct sst_runtime_stream *stream;
int retval;
struct snd_sst_params str_params;
struct sst_compress_cb cb;
- struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct sst_data *ctx = snd_soc_component_get_drvdata(component);
stream = cstream->runtime->private_data;
@@ -166,7 +167,8 @@ static int sst_platform_compr_set_params(struct snd_compr_stream *cstream,
return 0;
}
-static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
+static int sst_platform_compr_trigger(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream, int cmd)
{
struct sst_runtime_stream *stream = cstream->runtime->private_data;
@@ -199,8 +201,9 @@ static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
return -EINVAL;
}
-static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
- struct snd_compr_tstamp *tstamp)
+static int sst_platform_compr_pointer(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp)
{
struct sst_runtime_stream *stream;
@@ -212,8 +215,9 @@ static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
return 0;
}
-static int sst_platform_compr_ack(struct snd_compr_stream *cstream,
- size_t bytes)
+static int sst_platform_compr_ack(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ size_t bytes)
{
struct sst_runtime_stream *stream;
@@ -224,8 +228,9 @@ static int sst_platform_compr_ack(struct snd_compr_stream *cstream,
return 0;
}
-static int sst_platform_compr_get_caps(struct snd_compr_stream *cstream,
- struct snd_compr_caps *caps)
+static int sst_platform_compr_get_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_caps *caps)
{
struct sst_runtime_stream *stream =
cstream->runtime->private_data;
@@ -233,8 +238,9 @@ static int sst_platform_compr_get_caps(struct snd_compr_stream *cstream,
return stream->compr_ops->get_caps(caps);
}
-static int sst_platform_compr_get_codec_caps(struct snd_compr_stream *cstream,
- struct snd_compr_codec_caps *codec)
+static int sst_platform_compr_get_codec_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_codec_caps *codec)
{
struct sst_runtime_stream *stream =
cstream->runtime->private_data;
@@ -242,8 +248,9 @@ static int sst_platform_compr_get_codec_caps(struct snd_compr_stream *cstream,
return stream->compr_ops->get_codec_caps(codec);
}
-static int sst_platform_compr_set_metadata(struct snd_compr_stream *cstream,
- struct snd_compr_metadata *metadata)
+static int sst_platform_compr_set_metadata(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata)
{
struct sst_runtime_stream *stream =
cstream->runtime->private_data;
@@ -251,7 +258,7 @@ static int sst_platform_compr_set_metadata(struct snd_compr_stream *cstream,
return stream->compr_ops->set_metadata(sst->dev, stream->id, metadata);
}
-const struct snd_compr_ops sst_platform_compr_ops = {
+const struct snd_compress_ops sst_platform_compress_ops = {
.open = sst_platform_compr_open,
.free = sst_platform_compr_free,
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 82f2b6357778..8817eaae6bb7 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -392,7 +392,7 @@ static int sst_enable_ssp(struct snd_pcm_substream *substream,
{
int ret = 0;
- if (!dai->active) {
+ if (!snd_soc_dai_active(dai)) {
ret = sst_handle_vb_timer(dai, true);
sst_fill_ssp_defaults(dai);
}
@@ -405,7 +405,7 @@ static int sst_be_hw_params(struct snd_pcm_substream *substream,
{
int ret = 0;
- if (dai->active == 1)
+ if (snd_soc_dai_active(dai) == 1)
ret = send_ssp_cmd(dai, dai->name, 1);
return ret;
}
@@ -414,7 +414,7 @@ static int sst_set_format(struct snd_soc_dai *dai, unsigned int fmt)
{
int ret = 0;
- if (!dai->active)
+ if (!snd_soc_dai_active(dai))
return 0;
ret = sst_fill_ssp_config(dai, fmt);
@@ -429,7 +429,7 @@ static int sst_platform_set_ssp_slot(struct snd_soc_dai *dai,
int slots, int slot_width) {
int ret = 0;
- if (!dai->active)
+ if (!snd_soc_dai_active(dai))
return ret;
ret = sst_fill_ssp_slot(dai, tx_mask, rx_mask, slots, slot_width);
@@ -442,7 +442,7 @@ static int sst_platform_set_ssp_slot(struct snd_soc_dai *dai,
static void sst_disable_ssp(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- if (!dai->active) {
+ if (!snd_soc_dai_active(dai)) {
send_ssp_cmd(dai, dai->name, 0);
sst_handle_vb_timer(dai, false);
}
@@ -684,7 +684,7 @@ static const struct snd_soc_component_driver sst_soc_platform_drv = {
.open = sst_soc_open,
.trigger = sst_soc_trigger,
.pointer = sst_soc_pointer,
- .compr_ops = &sst_platform_compr_ops,
+ .compress_ops = &sst_platform_compress_ops,
.pcm_construct = sst_soc_pcm_new,
};
@@ -743,7 +743,7 @@ static int sst_soc_prepare(struct device *dev)
for_each_card_rtds(drv->soc_card, rtd) {
struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0);
- if (dai->active) {
+ if (snd_soc_dai_active(dai)) {
send_ssp_cmd(dai, dai->name, 0);
sst_handle_vb_timer(dai, false);
}
@@ -764,7 +764,7 @@ static void sst_soc_complete(struct device *dev)
for_each_card_rtds(drv->soc_card, rtd) {
struct snd_soc_dai *dai = asoc_rtd_to_cpu(rtd, 0);
- if (dai->active) {
+ if (snd_soc_dai_active(dai)) {
sst_handle_vb_timer(dai, true);
send_ssp_cmd(dai, dai->name, 1);
}
diff --git a/sound/soc/intel/atom/sst-mfld-platform.h b/sound/soc/intel/atom/sst-mfld-platform.h
index fe4749cfa4f5..10c9ecfa7038 100644
--- a/sound/soc/intel/atom/sst-mfld-platform.h
+++ b/sound/soc/intel/atom/sst-mfld-platform.h
@@ -17,7 +17,7 @@
#include "sst-atom-controls.h"
extern struct sst_device *sst;
-extern const struct snd_compr_ops sst_platform_compr_ops;
+extern const struct snd_compress_ops sst_platform_compress_ops;
#define DRV_NAME "sst"
diff --git a/sound/soc/intel/atom/sst/Makefile b/sound/soc/intel/atom/sst/Makefile
index 795d1cf8f386..f17c905df3e2 100644
--- a/sound/soc/intel/atom/sst/Makefile
+++ b/sound/soc/intel/atom/sst/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
snd-intel-sst-core-objs := sst.o sst_ipc.o sst_stream.o sst_drv_interface.o sst_loader.o sst_pvt.o
snd-intel-sst-pci-objs += sst_pci.o
snd-intel-sst-acpi-objs += sst_acpi.o
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 74274bd38f7a..34746fd871b0 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -666,8 +666,8 @@ static bool byt_is_dsp_busy(struct sst_dsp *dsp)
{
u64 ipcx;
- ipcx = sst_dsp_shim_read_unlocked(dsp, SST_IPCX);
- return (ipcx & (SST_IPCX_BUSY | SST_IPCX_DONE));
+ ipcx = sst_dsp_shim_read64_unlocked(dsp, SST_IPCX);
+ return (ipcx & (SST_BYT_IPCX_BUSY | SST_BYT_IPCX_DONE));
}
int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 556c3104e641..a2a5798c9139 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -243,7 +243,7 @@ if SND_SOC_INTEL_SKL
config SND_SOC_INTEL_SKL_RT286_MACH
tristate "SKL with RT286 I2S mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
select SND_SOC_RT286
select SND_SOC_DMIC
@@ -256,7 +256,7 @@ config SND_SOC_INTEL_SKL_RT286_MACH
config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
tristate "SKL with NAU88L25 and SSM4567 in I2S Mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
select SND_SOC_NAU8825
select SND_SOC_SSM4567
@@ -270,7 +270,7 @@ config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
tristate "SKL with NAU88L25 and MAX98357A in I2S Mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
select SND_SOC_NAU8825
select SND_SOC_MAX98357A
@@ -299,7 +299,7 @@ if SND_SOC_INTEL_APL
config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
tristate "Broxton with DA7219 and MAX98357A in I2S Mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
depends on SND_HDA_CODEC_HDMI
select SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
@@ -311,7 +311,7 @@ config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
config SND_SOC_INTEL_BXT_RT298_MACH
tristate "Broxton with RT298 I2S mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
select SND_SOC_RT298
select SND_SOC_DMIC
@@ -324,11 +324,26 @@ config SND_SOC_INTEL_BXT_RT298_MACH
endif ## SND_SOC_INTEL_APL
+if SND_SOC_SOF_APOLLOLAKE
+
+config SND_SOC_INTEL_SOF_WM8804_MACH
+ tristate "SOF with Wolfson/Cirrus WM8804 codec"
+ depends on I2C && ACPI
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ select SND_SOC_WM8804_I2C
+ help
+ This adds support for ASoC machine driver for Intel platforms
+ with the Wolfson/Cirrus WM8804 I2S audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+endif ## SND_SOC_SOF_APOLLOLAKE
+
if SND_SOC_INTEL_KBL
config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
tristate "KBL with RT5663 and MAX98927 in I2S Mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
select SND_SOC_RT5663
select SND_SOC_MAX98927
@@ -370,7 +385,7 @@ config SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH
config SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH
tristate "KBL with DA7219 and MAX98927 in I2S Mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
select SND_SOC_DA7219
select SND_SOC_MAX98927
@@ -396,13 +411,13 @@ config SND_SOC_INTEL_KBL_RT5660_MACH
endif ## SND_SOC_INTEL_KBL
-if SND_SOC_SOF_GEMINILAKE && SND_SOC_SOF_HDA_LINK
+if SND_SOC_SOF_GEMINILAKE
config SND_SOC_INTEL_GLK_DA7219_MAX98357A_MACH
tristate "GLK with DA7219 and MAX98357A in I2S Mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
- depends on SND_HDA_CODEC_HDMI
+ depends on SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC
select SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
help
This adds support for ASoC machine driver for Geminilake platforms
@@ -412,10 +427,10 @@ config SND_SOC_INTEL_GLK_DA7219_MAX98357A_MACH
config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
tristate "GLK with RT5682 and MAX98357A in I2S Mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
- depends on SND_HDA_CODEC_HDMI
- select SND_SOC_RT5682
+ depends on SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC
+ select SND_SOC_RT5682_I2C
select SND_SOC_MAX98357A
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
@@ -425,13 +440,14 @@ config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
Say Y if you have such a device.
If unsure select "N".
-endif ## SND_SOC_SOF_GEMINILAKE && SND_SOC_SOF_HDA_LINK
+endif ## SND_SOC_SOF_GEMINILAKE
if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC || SND_SOC_SOF_HDA_AUDIO_CODEC
config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
tristate "SKL/KBL/BXT/APL with HDA Codecs"
depends on SND_HDA_CODEC_HDMI
+ depends on GPIOLIB
select SND_SOC_HDAC_HDMI
select SND_SOC_DMIC
# SND_SOC_HDAC_HDA is already selected
@@ -446,13 +462,13 @@ endif ## SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC || SND_SOC_SOF_HDA_AUDIO_CODEC
if SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL
config SND_SOC_INTEL_SOF_RT5682_MACH
tristate "SOF with rt5682 codec in I2S Mode"
- depends on I2C && ACPI
- depends on (SND_SOC_SOF_HDA_LINK && (MFD_INTEL_LPSS || COMPILE_TEST)) ||\
+ depends on I2C && ACPI && GPIOLIB
+ depends on ((SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC) &&\
+ (MFD_INTEL_LPSS || COMPILE_TEST)) ||\
(SND_SOC_SOF_BAYTRAIL && (X86_INTEL_LPSS || COMPILE_TEST))
- depends on SND_HDA_CODEC_HDMI
select SND_SOC_MAX98373
select SND_SOC_RT1015
- select SND_SOC_RT5682
+ select SND_SOC_RT5682_I2C
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
help
@@ -480,7 +496,7 @@ if (SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK)
config SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH
tristate "CML_LP with DA7219 and MAX98357A in I2S Mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
select SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON
help
@@ -491,11 +507,11 @@ config SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH
config SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH
tristate "CML with RT1011 and RT5682 in I2S Mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
- depends on SND_HDA_CODEC_HDMI
+ depends on SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC
select SND_SOC_RT1011
- select SND_SOC_RT5682
+ select SND_SOC_RT5682_I2C
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
help
@@ -510,9 +526,9 @@ if SND_SOC_SOF_JASPERLAKE
config SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH
tristate "SOF with DA7219 and MAX98373/MAX98360A in I2S Mode"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
- depends on SND_HDA_CODEC_HDMI
+ depends on SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC
select SND_SOC_DA7219
select SND_SOC_MAX98373
select SND_SOC_DMIC
@@ -524,15 +540,30 @@ config SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH
endif ## SND_SOC_SOF_JASPERLAKE
+if SND_SOC_SOF_ELKHARTLAKE
+
+config SND_SOC_INTEL_EHL_RT5660_MACH
+ tristate "EHL with RT5660 in I2S mode"
+ depends on I2C && ACPI && GPIOLIB
+ depends on MFD_INTEL_LPSS || COMPILE_TEST
+ depends on SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC
+ select SND_SOC_RT5660
+ select SND_SOC_DMIC
+ help
+ This adds support for ASoC machine driver for Elkhart Lake
+ platform with RT5660 I2S audio codec.
+
+endif ## SND_SOC_SOF_ELKHARTLAKE
+
if SND_SOC_SOF_INTEL_SOUNDWIRE
config SND_SOC_INTEL_SOUNDWIRE_SOF_MACH
tristate "SoundWire generic machine driver"
- depends on I2C && ACPI
+ depends on I2C && ACPI && GPIOLIB
depends on MFD_INTEL_LPSS || COMPILE_TEST
depends on SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES || COMPILE_TEST
depends on SOUNDWIRE
- depends on SND_HDA_CODEC_HDMI
+ depends on SND_HDA_CODEC_HDMI && SND_SOC_SOF_HDA_AUDIO_CODEC
select SND_SOC_RT700_SDW
select SND_SOC_RT711_SDW
select SND_SOC_RT1308_SDW
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 1ef6e60bc2a0..15684610f8c6 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
snd-soc-sst-haswell-objs := haswell.o
snd-soc-sst-byt-rt5640-mach-objs := byt-rt5640.o
snd-soc-sst-byt-max98090-mach-objs := byt-max98090.o
@@ -8,6 +8,7 @@ snd-soc-sst-broadwell-objs := broadwell.o
snd-soc-sst-bxt-da7219_max98357a-objs := bxt_da7219_max98357a.o hda_dsp_common.o
snd-soc-sst-bxt-rt298-objs := bxt_rt298.o hda_dsp_common.o
snd-soc-sst-sof-pcm512x-objs := sof_pcm512x.o hda_dsp_common.o
+snd-soc-sst-sof-wm8804-objs := sof_wm8804.o
snd-soc-sst-glk-rt5682_max98357a-objs := glk_rt5682_max98357a.o hda_dsp_common.o
snd-soc-sst-bytcr-rt5640-objs := bytcr_rt5640.o
snd-soc-sst-bytcr-rt5651-objs := bytcr_rt5651.o
@@ -31,6 +32,7 @@ snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o hda_dsp_c
snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o
snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o
snd-soc-sof_da7219_max98373-objs := sof_da7219_max98373.o hda_dsp_common.o
+snd-soc-ehl-rt5660-objs := ehl_rt5660.o hda_dsp_common.o
snd-soc-sof-sdw-objs += sof_sdw.o \
sof_sdw_rt711.o sof_sdw_rt700.o \
sof_sdw_rt1308.o sof_sdw_rt715.o \
@@ -43,6 +45,7 @@ obj-$(CONFIG_SND_SOC_INTEL_BYT_MAX98090_MACH) += snd-soc-sst-byt-max98090-mach.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON) += snd-soc-sst-bxt-da7219_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BXT_RT298_MACH) += snd-soc-sst-bxt-rt298.o
obj-$(CONFIG_SND_SOC_INTEL_SOF_PCM512x_MACH) += snd-soc-sst-sof-pcm512x.o
+obj-$(CONFIG_SND_SOC_INTEL_SOF_WM8804_MACH) += snd-soc-sst-sof-wm8804.o
obj-$(CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH) += snd-soc-sst-glk-rt5682_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_BROADWELL_MACH) += snd-soc-sst-broadwell.o
obj-$(CONFIG_SND_SOC_INTEL_BDW_RT5650_MACH) += snd-soc-sst-bdw-rt5650-mach.o
@@ -68,4 +71,5 @@ obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH) += snd-skl_nau88l25_max9
obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH) += snd-soc-skl_nau88l25_ssm4567.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH) += snd-soc-skl_hda_dsp.o
obj-$(CONFIG_SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH) += snd-soc-sof_da7219_max98373.o
+obj-$(CONFIG_SND_SOC_INTEL_EHL_RT5660_MACH) += snd-soc-ehl-rt5660.o
obj-$(CONFIG_SND_SOC_INTEL_SOUNDWIRE_SOF_MACH) += snd-soc-sof-sdw.o
diff --git a/sound/soc/intel/boards/bdw-rt5650.c b/sound/soc/intel/boards/bdw-rt5650.c
index af2f50293208..a97e912adf4b 100644
--- a/sound/soc/intel/boards/bdw-rt5650.c
+++ b/sound/soc/intel/boards/bdw-rt5650.c
@@ -162,6 +162,34 @@ static int bdw_rt5650_rtd_init(struct snd_soc_pcm_runtime *rtd)
}
#endif
+static const unsigned int channels[] = {
+ 2, 4,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static int bdw_rt5650_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /* Board supports stereo and quad configurations for capture */
+ if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
+ return 0;
+
+ runtime->hw.channels_max = 4;
+ return snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+}
+
+static const struct snd_soc_ops bdw_rt5650_fe_ops = {
+ .startup = bdw_rt5650_fe_startup,
+};
+
static int bdw_rt5650_init(struct snd_soc_pcm_runtime *rtd)
{
struct bdw_rt5650_priv *bdw_rt5650 =
@@ -234,6 +262,7 @@ static struct snd_soc_dai_link bdw_rt5650_dais[] = {
.name = "System PCM",
.stream_name = "System Playback",
.dynamic = 1,
+ .ops = &bdw_rt5650_fe_ops,
#if !IS_ENABLED(CONFIG_SND_SOC_SOF_BROADWELL)
.init = bdw_rt5650_rtd_init,
#endif
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index cc41a348295e..5f96d7ac0a22 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -222,6 +222,31 @@ static int bdw_rt5677_rtd_init(struct snd_soc_pcm_runtime *rtd)
}
#endif
+static const unsigned int channels[] = {
+ 2,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static int bdw_rt5677_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /* Board supports stereo configuration only */
+ runtime->hw.channels_max = 2;
+ return snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+}
+
+static const struct snd_soc_ops bdw_rt5677_fe_ops = {
+ .startup = bdw_rt5677_fe_startup,
+};
+
static int bdw_rt5677_init(struct snd_soc_pcm_runtime *rtd)
{
struct bdw_rt5677_priv *bdw_rt5677 =
@@ -321,6 +346,7 @@ static struct snd_soc_dai_link bdw_rt5677_dais[] = {
},
.dpcm_capture = 1,
.dpcm_playback = 1,
+ .ops = &bdw_rt5677_fe_ops,
SND_SOC_DAILINK_REG(fe, dummy, platform),
},
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index f9a8336a0541..42f8723beef2 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -143,6 +143,31 @@ static int broadwell_rtd_init(struct snd_soc_pcm_runtime *rtd)
}
#endif
+static const unsigned int channels[] = {
+ 2,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static int broadwell_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /* Board supports stereo configuration only */
+ runtime->hw.channels_max = 2;
+ return snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+}
+
+static const struct snd_soc_ops broadwell_fe_ops = {
+ .startup = broadwell_fe_startup,
+};
+
SND_SOC_DAILINK_DEF(system,
DAILINK_COMP_ARRAY(COMP_CPU("System Pin")));
@@ -180,6 +205,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
.init = broadwell_rtd_init,
#endif
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .ops = &broadwell_fe_ops,
.dpcm_playback = 1,
.dpcm_capture = 1,
SND_SOC_DAILINK_REG(system, dummy, platform),
@@ -230,7 +256,8 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
},
};
-static int broadwell_suspend(struct snd_soc_card *card){
+static int broadwell_disable_jack(struct snd_soc_card *card)
+{
struct snd_soc_component *component;
for_each_card_components(card, component) {
@@ -241,9 +268,15 @@ static int broadwell_suspend(struct snd_soc_card *card){
break;
}
}
+
return 0;
}
+static int broadwell_suspend(struct snd_soc_card *card)
+{
+ return broadwell_disable_jack(card);
+}
+
static int broadwell_resume(struct snd_soc_card *card){
struct snd_soc_component *component;
@@ -292,8 +325,16 @@ static int broadwell_audio_probe(struct platform_device *pdev)
return devm_snd_soc_register_card(&pdev->dev, &broadwell_rt286);
}
+static int broadwell_audio_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ return broadwell_disable_jack(card);
+}
+
static struct platform_driver broadwell_audio = {
.probe = broadwell_audio_probe,
+ .remove = broadwell_audio_remove,
.driver = {
.name = "broadwell-audio",
},
diff --git a/sound/soc/intel/boards/bytcht_cx2072x.c b/sound/soc/intel/boards/bytcht_cx2072x.c
index 3b3df7c9008c..fad937610494 100644
--- a/sound/soc/intel/boards/bytcht_cx2072x.c
+++ b/sound/soc/intel/boards/bytcht_cx2072x.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
//
// ASoC DPCM Machine driver for Baytrail / Cherrytrail platforms with
// CX2072X codec
@@ -261,6 +261,9 @@ static int snd_byt_cht_cx2072x_probe(struct platform_device *pdev)
static struct platform_driver snd_byt_cht_cx2072x_driver = {
.driver = {
.name = "bytcht_cx2072x",
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ .pm = &snd_soc_pm_ops,
+#endif
},
.probe = snd_byt_cht_cx2072x_probe,
};
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
index 5e96e7d02733..f3791ff2bad1 100644
--- a/sound/soc/intel/boards/bytcht_da7213.c
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -272,6 +272,9 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
static struct platform_driver bytcht_da7213_driver = {
.driver = {
.name = "bytcht_da7213",
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ .pm = &snd_soc_pm_ops,
+#endif
},
.probe = bytcht_da7213_probe,
};
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index ddcd070100ef..9e5fc9430628 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -605,6 +605,9 @@ static int snd_byt_cht_es8316_mc_remove(struct platform_device *pdev)
static struct platform_driver snd_byt_cht_es8316_mc_driver = {
.driver = {
.name = "bytcht_es8316",
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ .pm = &snd_soc_pm_ops,
+#endif
},
.probe = snd_byt_cht_es8316_mc_probe,
.remove = snd_byt_cht_es8316_mc_remove,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 08f4ae964b02..1fdb70b9e478 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -742,6 +742,30 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* Toshiba Encore WT8-A */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT8-A"),
+ },
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_JD_NOT_INV |
+ BYT_RT5640_MCLK_EN),
+ },
+ { /* Toshiba Encore WT10-A */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A-103"),
+ },
+ .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+ BYT_RT5640_JD_SRC_JD1_IN4P |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_SSP0_AIF2 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Catch-all for generic Insyde tablets, must be last */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
@@ -898,9 +922,6 @@ static int byt_rt5640_init(struct snd_soc_pcm_runtime *runtime)
if (ret)
return ret;
- snd_soc_dapm_ignore_suspend(&card->dapm, "Headphone");
- snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
-
if (byt_rt5640_quirk & BYT_RT5640_MCLK_EN) {
/*
* The firmware might enable the clock at
@@ -1053,7 +1074,6 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBS_CFS,
.be_hw_params_fixup = byt_rt5640_codec_fixup,
- .ignore_suspend = 1,
.nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
@@ -1311,6 +1331,9 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
static struct platform_driver snd_byt_rt5640_mc_driver = {
.driver = {
.name = "bytcr_rt5640",
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ .pm = &snd_soc_pm_ops,
+#endif
},
.probe = snd_byt_rt5640_mc_probe,
};
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 214ef41e23e6..520e916e329c 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -601,8 +601,6 @@ static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
dev_err(card->dev, "unable to add card controls\n");
return ret;
}
- snd_soc_dapm_ignore_suspend(&card->dapm, "Headphone");
- snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
if (byt_rt5651_quirk & BYT_RT5651_MCLK_EN) {
/*
@@ -775,7 +773,6 @@ static struct snd_soc_dai_link byt_rt5651_dais[] = {
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBS_CFS,
.be_hw_params_fixup = byt_rt5651_codec_fixup,
- .ignore_suspend = 1,
.nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
@@ -1100,6 +1097,9 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
static struct platform_driver snd_byt_rt5651_mc_driver = {
.driver = {
.name = "bytcr_rt5651",
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ .pm = &snd_soc_pm_ops,
+#endif
},
.probe = snd_byt_rt5651_mc_probe,
};
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 135701738a44..767ac2ae03e2 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -616,6 +616,9 @@ static int snd_cht_mc_remove(struct platform_device *pdev)
static struct platform_driver snd_cht_mc_driver = {
.driver = {
.name = "cht-bsw-max98090",
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ .pm = &snd_soc_pm_ops,
+#endif
},
.probe = snd_cht_mc_probe,
.remove = snd_cht_mc_remove,
diff --git a/sound/soc/intel/boards/cht_bsw_nau8824.c b/sound/soc/intel/boards/cht_bsw_nau8824.c
index f456150f89c2..2f7c94d335c1 100644
--- a/sound/soc/intel/boards/cht_bsw_nau8824.c
+++ b/sound/soc/intel/boards/cht_bsw_nau8824.c
@@ -108,7 +108,7 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
}
/* NAU88L24 supports 4 butons headset detection
- * KEY_MEDIA
+ * KEY_PLAYPAUSE
* KEY_VOICECOMMAND
* KEY_VOLUMEUP
* KEY_VOLUMEDOWN
@@ -122,7 +122,7 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
"Headset Jack creation failed %d\n", ret);
return ret;
}
- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
@@ -282,6 +282,9 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
static struct platform_driver snd_cht_mc_driver = {
.driver = {
.name = "cht-bsw-nau8824",
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ .pm = &snd_soc_pm_ops,
+#endif
},
.probe = snd_cht_mc_probe,
};
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index e64eca56e426..22de138ffa33 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -680,6 +680,9 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
static struct platform_driver snd_cht_mc_driver = {
.driver = {
.name = "cht-bsw-rt5645",
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ .pm = &snd_soc_pm_ops,
+#endif
},
.probe = snd_cht_mc_probe,
};
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 097023a3ec14..7a43c70a1378 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -459,6 +459,9 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
static struct platform_driver snd_cht_mc_driver = {
.driver = {
.name = "cht-bsw-rt5672",
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+ .pm = &snd_soc_pm_ops,
+#endif
},
.probe = snd_cht_mc_probe,
};
diff --git a/sound/soc/intel/boards/cml_rt1011_rt5682.c b/sound/soc/intel/boards/cml_rt1011_rt5682.c
index 8167b2977e1d..68eff29daf8f 100644
--- a/sound/soc/intel/boards/cml_rt1011_rt5682.c
+++ b/sound/soc/intel/boards/cml_rt1011_rt5682.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2019 Intel Corporation.
/*
@@ -30,6 +30,36 @@
#define CML_RT5682_CODEC_DAI "rt5682-aif1"
#define NAME_SIZE 32
+#define SOF_RT1011_SPEAKER_WL BIT(0)
+#define SOF_RT1011_SPEAKER_WR BIT(1)
+#define SOF_RT1011_SPEAKER_TL BIT(2)
+#define SOF_RT1011_SPEAKER_TR BIT(3)
+#define SPK_CH 4
+
+/* Default: Woofer speakers */
+static unsigned long sof_rt1011_quirk = SOF_RT1011_SPEAKER_WL |
+ SOF_RT1011_SPEAKER_WR;
+
+static int sof_rt1011_quirk_cb(const struct dmi_system_id *id)
+{
+ sof_rt1011_quirk = (unsigned long)id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id sof_rt1011_quirk_table[] = {
+ {
+ .callback = sof_rt1011_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Helios"),
+ },
+ .driver_data = (void *)(SOF_RT1011_SPEAKER_WL | SOF_RT1011_SPEAKER_WR |
+ SOF_RT1011_SPEAKER_TL | SOF_RT1011_SPEAKER_TR),
+ },
+ {
+ }
+};
+
static struct snd_soc_jack hdmi_jack[3];
struct hdmi_pcm {
@@ -48,15 +78,16 @@ struct card_private {
static const struct snd_kcontrol_new cml_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone Jack"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
- SOC_DAPM_PIN_SWITCH("TL Ext Spk"),
- SOC_DAPM_PIN_SWITCH("TR Ext Spk"),
SOC_DAPM_PIN_SWITCH("WL Ext Spk"),
SOC_DAPM_PIN_SWITCH("WR Ext Spk"),
};
+static const struct snd_kcontrol_new cml_rt1011_tt_controls[] = {
+ SOC_DAPM_PIN_SWITCH("TL Ext Spk"),
+ SOC_DAPM_PIN_SWITCH("TR Ext Spk"),
+};
+
static const struct snd_soc_dapm_widget cml_rt1011_rt5682_widgets[] = {
- SND_SOC_DAPM_SPK("TL Ext Spk", NULL),
- SND_SOC_DAPM_SPK("TR Ext Spk", NULL),
SND_SOC_DAPM_SPK("WL Ext Spk", NULL),
SND_SOC_DAPM_SPK("WR Ext Spk", NULL),
SND_SOC_DAPM_HP("Headphone Jack", NULL),
@@ -64,10 +95,13 @@ static const struct snd_soc_dapm_widget cml_rt1011_rt5682_widgets[] = {
SND_SOC_DAPM_MIC("SoC DMIC", NULL),
};
+static const struct snd_soc_dapm_widget cml_rt1011_tt_widgets[] = {
+ SND_SOC_DAPM_SPK("TL Ext Spk", NULL),
+ SND_SOC_DAPM_SPK("TR Ext Spk", NULL),
+};
+
static const struct snd_soc_dapm_route cml_rt1011_rt5682_map[] = {
- /*speaker*/
- {"TL Ext Spk", NULL, "TL SPO"},
- {"TR Ext Spk", NULL, "TR SPO"},
+ /*WL/WR speaker*/
{"WL Ext Spk", NULL, "WL SPO"},
{"WR Ext Spk", NULL, "WR SPO"},
@@ -82,6 +116,12 @@ static const struct snd_soc_dapm_route cml_rt1011_rt5682_map[] = {
{"DMic", NULL, "SoC DMIC"},
};
+static const struct snd_soc_dapm_route cml_rt1011_tt_map[] = {
+ /*TL/TR speaker*/
+ {"TL Ext Spk", NULL, "TL SPO" },
+ {"TR Ext Spk", NULL, "TR SPO" },
+};
+
static int cml_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
@@ -121,6 +161,35 @@ static int cml_rt5682_codec_init(struct snd_soc_pcm_runtime *rtd)
return ret;
};
+static int cml_rt1011_spk_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret = 0;
+ struct snd_soc_card *card = rtd->card;
+
+ if (sof_rt1011_quirk & (SOF_RT1011_SPEAKER_TL |
+ SOF_RT1011_SPEAKER_TR)) {
+
+ ret = snd_soc_add_card_controls(card, cml_rt1011_tt_controls,
+ ARRAY_SIZE(cml_rt1011_tt_controls));
+ if (ret)
+ return ret;
+
+ ret = snd_soc_dapm_new_controls(&card->dapm,
+ cml_rt1011_tt_widgets,
+ ARRAY_SIZE(cml_rt1011_tt_widgets));
+ if (ret)
+ return ret;
+
+ ret = snd_soc_dapm_add_routes(&card->dapm, cml_rt1011_tt_map,
+ ARRAY_SIZE(cml_rt1011_tt_map));
+
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static int cml_rt5682_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -191,30 +260,38 @@ static int cml_rt1011_hw_params(struct snd_pcm_substream *substream,
* The feedback is captured for each codec individually.
* Hence all 4 codecs use 1 Tx slot each for feedback.
*/
- if (!strcmp(codec_dai->component->name, "i2c-10EC1011:00")) {
- ret = snd_soc_dai_set_tdm_slot(codec_dai,
- 0x4, 0x1, 4, 24);
- if (ret < 0)
- break;
- }
- if (!strcmp(codec_dai->component->name, "i2c-10EC1011:02")) {
- ret = snd_soc_dai_set_tdm_slot(codec_dai,
- 0x1, 0x1, 4, 24);
- if (ret < 0)
- break;
+ if (sof_rt1011_quirk & (SOF_RT1011_SPEAKER_WL |
+ SOF_RT1011_SPEAKER_WR)) {
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:00")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x4, 0x1, 4, 24);
+ if (ret < 0)
+ break;
+ }
+
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:01")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x8, 0x2, 4, 24);
+ if (ret < 0)
+ break;
+ }
}
- /* TDM Rx slot 2 is used for Right Woofer & Tweeters pair */
- if (!strcmp(codec_dai->component->name, "i2c-10EC1011:01")) {
- ret = snd_soc_dai_set_tdm_slot(codec_dai,
- 0x8, 0x2, 4, 24);
- if (ret < 0)
- break;
- }
- if (!strcmp(codec_dai->component->name, "i2c-10EC1011:03")) {
- ret = snd_soc_dai_set_tdm_slot(codec_dai,
- 0x2, 0x2, 4, 24);
- if (ret < 0)
- break;
+
+ if (sof_rt1011_quirk & (SOF_RT1011_SPEAKER_TL |
+ SOF_RT1011_SPEAKER_TR)) {
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:02")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x1, 0x1, 4, 24);
+ if (ret < 0)
+ break;
+ }
+
+ if (!strcmp(codec_dai->component->name, "i2c-10EC1011:03")) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai,
+ 0x2, 0x2, 4, 24);
+ if (ret < 0)
+ break;
+ }
}
}
if (ret < 0)
@@ -302,9 +379,7 @@ SND_SOC_DAILINK_DEF(ssp1_pin,
SND_SOC_DAILINK_DEF(ssp1_codec,
DAILINK_COMP_ARRAY(
/* WL */ COMP_CODEC("i2c-10EC1011:00", CML_RT1011_CODEC_DAI),
- /* WR */ COMP_CODEC("i2c-10EC1011:01", CML_RT1011_CODEC_DAI),
- /* TL */ COMP_CODEC("i2c-10EC1011:02", CML_RT1011_CODEC_DAI),
- /* TR */ COMP_CODEC("i2c-10EC1011:03", CML_RT1011_CODEC_DAI)));
+ /* WR */ COMP_CODEC("i2c-10EC1011:01", CML_RT1011_CODEC_DAI)));
SND_SOC_DAILINK_DEF(dmic_pin,
DAILINK_COMP_ARRAY(COMP_CPU("DMIC01 Pin")));
@@ -398,6 +473,7 @@ static struct snd_soc_dai_link cml_rt1011_rt5682_dailink[] = {
.dpcm_playback = 1,
.dpcm_capture = 1, /* Capture stream provides Feedback */
.no_pcm = 1,
+ .init = cml_rt1011_spk_init,
.ops = &cml_rt1011_ops,
SND_SOC_DAILINK_REG(ssp1_pin, ssp1_codec, platform),
},
@@ -412,14 +488,6 @@ static struct snd_soc_codec_conf rt1011_conf[] = {
.dlc = COMP_CODEC_CONF("i2c-10EC1011:01"),
.name_prefix = "WR",
},
- {
- .dlc = COMP_CODEC_CONF("i2c-10EC1011:02"),
- .name_prefix = "TL",
- },
- {
- .dlc = COMP_CODEC_CONF("i2c-10EC1011:03"),
- .name_prefix = "TR",
- },
};
/* Cometlake audio machine driver for RT1011 and RT5682 */
@@ -441,10 +509,12 @@ static struct snd_soc_card snd_soc_card_cml = {
static int snd_cml_rt1011_probe(struct platform_device *pdev)
{
+ struct snd_soc_dai_link_component *rt1011_dais_components;
+ struct snd_soc_codec_conf *rt1011_dais_confs;
struct card_private *ctx;
struct snd_soc_acpi_mach *mach;
const char *platform_name;
- int ret;
+ int ret, i;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -455,6 +525,73 @@ static int snd_cml_rt1011_probe(struct platform_device *pdev)
snd_soc_card_cml.dev = &pdev->dev;
platform_name = mach->mach_params.platform;
+ dmi_check_system(sof_rt1011_quirk_table);
+
+ dev_info(&pdev->dev, "sof_rt1011_quirk = %lx\n", sof_rt1011_quirk);
+
+ if (sof_rt1011_quirk & (SOF_RT1011_SPEAKER_TL |
+ SOF_RT1011_SPEAKER_TR)) {
+ rt1011_dais_confs = devm_kzalloc(&pdev->dev,
+ sizeof(struct snd_soc_codec_conf) *
+ SPK_CH, GFP_KERNEL);
+
+ if (!rt1011_dais_confs)
+ return -ENOMEM;
+
+ rt1011_dais_components = devm_kzalloc(&pdev->dev,
+ sizeof(struct snd_soc_dai_link_component) *
+ SPK_CH, GFP_KERNEL);
+
+ if (!rt1011_dais_components)
+ return -ENOMEM;
+
+ for (i = 0; i < SPK_CH; i++) {
+ rt1011_dais_confs[i].dlc.name = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL,
+ "i2c-10EC1011:0%d",
+ i);
+
+ if (!rt1011_dais_confs[i].dlc.name)
+ return -ENOMEM;
+
+ switch (i) {
+ case 0:
+ rt1011_dais_confs[i].name_prefix = "WL";
+ break;
+ case 1:
+ rt1011_dais_confs[i].name_prefix = "WR";
+ break;
+ case 2:
+ rt1011_dais_confs[i].name_prefix = "TL";
+ break;
+ case 3:
+ rt1011_dais_confs[i].name_prefix = "TR";
+ break;
+ default:
+ return -EINVAL;
+ }
+ rt1011_dais_components[i].name = devm_kasprintf(&pdev->dev,
+ GFP_KERNEL,
+ "i2c-10EC1011:0%d",
+ i);
+ if (!rt1011_dais_components[i].name)
+ return -ENOMEM;
+
+ rt1011_dais_components[i].dai_name = CML_RT1011_CODEC_DAI;
+ }
+
+ snd_soc_card_cml.codec_conf = rt1011_dais_confs;
+ snd_soc_card_cml.num_configs = SPK_CH;
+
+ for (i = 0; i < ARRAY_SIZE(cml_rt1011_rt5682_dailink); i++) {
+ if (!strcmp(cml_rt1011_rt5682_dailink[i].codecs->dai_name,
+ CML_RT1011_CODEC_DAI)) {
+ cml_rt1011_rt5682_dailink[i].codecs = rt1011_dais_components;
+ cml_rt1011_rt5682_dailink[i].num_codecs = SPK_CH;
+ }
+ }
+ }
+
/* set platform name for each dailink */
ret = snd_soc_fixup_dai_links_platform_name(&snd_soc_card_cml,
platform_name);
@@ -482,5 +619,6 @@ MODULE_DESCRIPTION("Cometlake Audio Machine driver - RT1011 and RT5682 in I2S mo
MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>");
MODULE_AUTHOR("Sathya Prakash M R <sathya.prakash.m.r@intel.com>");
MODULE_AUTHOR("Shuming Fan <shumingf@realtek.com>");
+MODULE_AUTHOR("Mac Chiang <mac.chiang@intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:cml_rt1011_rt5682");
diff --git a/sound/soc/intel/boards/ehl_rt5660.c b/sound/soc/intel/boards/ehl_rt5660.c
new file mode 100644
index 000000000000..78160e3b1615
--- /dev/null
+++ b/sound/soc/intel/boards/ehl_rt5660.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2020 Intel Corporation
+
+/*
+ * ehl_rt5660 - ASOC Machine driver for Elkhart Lake platforms
+ * with rt5660 codec
+ */
+
+#include <linux/acpi.h>
+#include <sound/core.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <sound/jack.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+
+#include "hda_dsp_common.h"
+#include "../../codecs/rt5660.h"
+
+#define DUAL_CHANNEL 2
+#define HDMI_LINK_START 3
+#define HDMI_LINE_END 6
+#define NAME_SIZE 32
+#define IDISP_CODEC_MASK 0x4
+
+struct sof_card_private {
+ struct list_head hdmi_pcm_list;
+ bool idisp_codec;
+};
+
+static const struct snd_kcontrol_new rt5660_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Speaker"),
+ /* There are two MICBIAS in rt5660, each for one MIC */
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic2"),
+ SOC_DAPM_PIN_SWITCH("Line Out"),
+};
+
+static const struct snd_soc_dapm_widget rt5660_widgets[] = {
+ SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic2", NULL),
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+ SND_SOC_DAPM_LINE("Line Out", NULL),
+};
+
+static const struct snd_soc_dapm_route rt5660_map[] = {
+ {"Speaker", NULL, "SPO"},
+
+ {"Headset Mic", NULL, "MICBIAS1"},
+ {"Headset Mic2", NULL, "MICBIAS2"},
+
+ {"IN1P", NULL, "Headset Mic"},
+ {"IN2P", NULL, "Headset Mic2"},
+
+ {"Line Out", NULL, "LOUTL"},
+ {"Line Out", NULL, "LOUTR"},
+
+ {"DMic", NULL, "SoC DMIC"},
+};
+
+struct sof_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+static int hdmi_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct sof_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
+ struct sof_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ /* dai_link id is 1:1 mapped to the PCM device */
+ pcm->device = rtd->dai_link->id;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int card_late_probe(struct snd_soc_card *card)
+{
+ struct sof_card_private *ctx = snd_soc_card_get_drvdata(card);
+ struct sof_hdmi_pcm *pcm;
+
+ if (list_empty(&ctx->hdmi_pcm_list))
+ return -ENOENT;
+
+ if (!ctx->idisp_codec)
+ return 0;
+
+ pcm = list_first_entry(&ctx->hdmi_pcm_list, struct sof_hdmi_pcm, head);
+
+ return hda_dsp_hdmi_build_controls(card, pcm->codec_dai->component);
+}
+
+static int rt5660_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai,
+ RT5660_SCLK_S_PLL1,
+ params_rate(params) * 512,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "snd_soc_dai_set_sysclk err = %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ RT5660_PLL1_S_BCLK,
+ params_rate(params) * 50,
+ params_rate(params) * 512);
+ if (ret < 0)
+ dev_err(codec_dai->dev, "can't set codec pll: %d\n", ret);
+
+ return ret;
+}
+
+static struct snd_soc_ops rt5660_ops = {
+ .hw_params = rt5660_hw_params,
+};
+
+SND_SOC_DAILINK_DEF(ssp0_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("SSP0 Pin")));
+
+SND_SOC_DAILINK_DEF(rt5660_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("i2c-10EC5660:00", "rt5660-aif1")));
+
+SND_SOC_DAILINK_DEF(platform,
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("0000:00:1f.3")));
+
+SND_SOC_DAILINK_DEF(dmic_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("DMIC01 Pin")));
+SND_SOC_DAILINK_DEF(dmic_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("dmic-codec", "dmic-hifi")));
+SND_SOC_DAILINK_DEF(dmic16k,
+ DAILINK_COMP_ARRAY(COMP_CPU("DMIC16k Pin")));
+
+SND_SOC_DAILINK_DEF(idisp1_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("iDisp1 Pin")));
+SND_SOC_DAILINK_DEF(idisp1_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi1")));
+
+SND_SOC_DAILINK_DEF(idisp2_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("iDisp2 Pin")));
+SND_SOC_DAILINK_DEF(idisp2_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi2")));
+
+SND_SOC_DAILINK_DEF(idisp3_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("iDisp3 Pin")));
+SND_SOC_DAILINK_DEF(idisp3_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi3")));
+
+SND_SOC_DAILINK_DEF(idisp4_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("iDisp4 Pin")));
+SND_SOC_DAILINK_DEF(idisp4_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("ehdaudio0D2", "intel-hdmi-hifi4")));
+
+static struct snd_soc_dai_link ehl_rt5660_dailink[] = {
+ /* back ends */
+ {
+ .name = "SSP0-Codec",
+ .id = 0,
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ops = &rt5660_ops,
+ .nonatomic = true,
+ SND_SOC_DAILINK_REG(ssp0_pin, rt5660_codec, platform),
+ },
+ {
+ .name = "dmic48k",
+ .id = 1,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(dmic_pin, dmic_codec, platform),
+ },
+ {
+ .name = "dmic16k",
+ .id = 2,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(dmic16k, dmic_codec, platform),
+ },
+ {
+ .name = "iDisp1",
+ .id = 5,
+ .init = hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(idisp1_pin, idisp1_codec, platform),
+ },
+ {
+ .name = "iDisp2",
+ .id = 6,
+ .init = hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(idisp2_pin, idisp2_codec, platform),
+ },
+ {
+ .name = "iDisp3",
+ .id = 7,
+ .init = hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(idisp3_pin, idisp3_codec, platform),
+ },
+ {
+ .name = "iDisp4",
+ .id = 8,
+ .init = hdmi_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(idisp4_pin, idisp4_codec, platform),
+ },
+};
+
+/* SoC card */
+static struct snd_soc_card snd_soc_card_ehl_rt5660 = {
+ .name = "ehl-rt5660",
+ .owner = THIS_MODULE,
+ .dai_link = ehl_rt5660_dailink,
+ .num_links = ARRAY_SIZE(ehl_rt5660_dailink),
+ .dapm_widgets = rt5660_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(rt5660_widgets),
+ .dapm_routes = rt5660_map,
+ .num_dapm_routes = ARRAY_SIZE(rt5660_map),
+ .controls = rt5660_controls,
+ .num_controls = ARRAY_SIZE(rt5660_controls),
+ .fully_routed = true,
+ .late_probe = card_late_probe,
+};
+
+/* If hdmi codec is not supported, switch to use dummy codec */
+static void hdmi_link_init(struct snd_soc_card *card,
+ struct sof_card_private *ctx,
+ struct snd_soc_acpi_mach *mach)
+{
+ struct snd_soc_dai_link *link;
+ int i;
+
+ if (mach->mach_params.common_hdmi_codec_drv &&
+ (mach->mach_params.codec_mask & IDISP_CODEC_MASK)) {
+ ctx->idisp_codec = true;
+ return;
+ }
+
+ /*
+ * if HDMI is not enabled in kernel config, or
+ * hdmi codec is not supported
+ */
+ for (i = HDMI_LINK_START; i <= HDMI_LINE_END; i++) {
+ link = &card->dai_link[i];
+ link->codecs[0].name = "snd-soc-dummy";
+ link->codecs[0].dai_name = "snd-soc-dummy-dai";
+ }
+}
+
+static int snd_ehl_rt5660_probe(struct platform_device *pdev)
+{
+ struct snd_soc_acpi_mach *mach;
+ struct snd_soc_card *card = &snd_soc_card_ehl_rt5660;
+ struct sof_card_private *ctx;
+ int ret;
+
+ card->dev = &pdev->dev;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+ snd_soc_card_set_drvdata(card, ctx);
+
+ mach = pdev->dev.platform_data;
+ ret = snd_soc_fixup_dai_links_platform_name(card,
+ mach->mach_params.platform);
+ if (ret)
+ return ret;
+
+ hdmi_link_init(card, ctx, mach);
+
+ return devm_snd_soc_register_card(&pdev->dev, card);
+}
+
+static const struct platform_device_id ehl_board_ids[] = {
+ { .name = "ehl_rt5660" },
+ { }
+};
+
+static struct platform_driver snd_ehl_rt5660_driver = {
+ .driver = {
+ .name = "ehl_rt5660",
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = snd_ehl_rt5660_probe,
+ .id_table = ehl_board_ids,
+};
+
+module_platform_driver(snd_ehl_rt5660_driver);
+
+MODULE_DESCRIPTION("ASoC Intel(R) Elkhartlake + rt5660 Machine driver");
+MODULE_AUTHOR("libin.yang@intel.com");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ehl_rt5660");
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index f13158e4a1fc..954ab01f695b 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2018 Intel Corporation.
/*
@@ -407,7 +407,7 @@ static struct snd_soc_dai_link geminilake_dais[] = {
.name = "Glk Audio Echo Reference cap",
.stream_name = "Echoreference Capture",
.init = NULL,
- .capture_only = 1,
+ .dpcm_capture = 1,
.nonatomic = 1,
.dynamic = 1,
SND_SOC_DAILINK_REG(echoref, dummy, platform),
diff --git a/sound/soc/intel/boards/hda_dsp_common.c b/sound/soc/intel/boards/hda_dsp_common.c
index 9179f07f9ee4..244b57fba64c 100644
--- a/sound/soc/intel/boards/hda_dsp_common.c
+++ b/sound/soc/intel/boards/hda_dsp_common.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright(c) 2019 Intel Corporation. All rights reserved.
diff --git a/sound/soc/intel/boards/hda_dsp_common.h b/sound/soc/intel/boards/hda_dsp_common.h
index 431f7f09dccb..727edd256962 100644
--- a/sound/soc/intel/boards/hda_dsp_common.h
+++ b/sound/soc/intel/boards/hda_dsp_common.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2019 Intel Corporation.
*/
diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
index 32cd90b8d4c4..dc3d897ad280 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2017-18 Intel Corporation.
/*
diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
index abd4e3839678..e29c31ffd241 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2018 Intel Corporation.
/*
@@ -692,7 +692,7 @@ static struct snd_soc_dai_link kabylake_dais[] = {
.name = "Kbl Audio Echo Reference cap",
.stream_name = "Echoreference Capture",
.init = NULL,
- .capture_only = 1,
+ .dpcm_capture = 1,
.nonatomic = 1,
SND_SOC_DAILINK_REG(echoref, dummy, platform),
},
@@ -858,7 +858,7 @@ static struct snd_soc_dai_link kabylake_max98_927_373_dais[] = {
.name = "Kbl Audio Echo Reference cap",
.stream_name = "Echoreference Capture",
.init = NULL,
- .capture_only = 1,
+ .dpcm_capture = 1,
.nonatomic = 1,
SND_SOC_DAILINK_REG(echoref, dummy, platform),
},
diff --git a/sound/soc/intel/boards/kbl_rt5660.c b/sound/soc/intel/boards/kbl_rt5660.c
index 6460e3f0c974..d2a078454784 100644
--- a/sound/soc/intel/boards/kbl_rt5660.c
+++ b/sound/soc/intel/boards/kbl_rt5660.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2018-19 Canonical Corporation.
/*
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 658a9da3a40f..09ba55fc36d5 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -672,7 +672,7 @@ static struct snd_soc_dai_link kabylake_dais[] = {
.name = "Kbl Audio Echo Reference cap",
.stream_name = "Echoreference Capture",
.init = NULL,
- .capture_only = 1,
+ .dpcm_capture = 1,
.nonatomic = 1,
SND_SOC_DAILINK_REG(echoref, dummy, platform),
},
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 1b1f8d7a4ea3..b34cf6cf1139 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -566,7 +566,7 @@ static struct snd_soc_dai_link kabylake_dais[] = {
.name = "Kbl Audio Echo Reference cap",
.stream_name = "Echoreference Capture",
.init = NULL,
- .capture_only = 1,
+ .dpcm_capture = 1,
.nonatomic = 1,
SND_SOC_DAILINK_REG(echoref, dummy, platform),
},
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.c b/sound/soc/intel/boards/skl_hda_dsp_common.c
index 78ff5f24c40e..07bfb2e64b3b 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_common.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2015-18 Intel Corporation.
/*
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.h b/sound/soc/intel/boards/skl_hda_dsp_common.h
index e8545d13062f..507750ef67f3 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_common.h
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2015-18 Intel Corporation.
*/
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index 3be764299ab0..79c8947f840b 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2015-18 Intel Corporation.
/*
@@ -113,6 +113,8 @@ static char hda_soc_components[30];
#define IDISP_ROUTE_COUNT (IDISP_DAI_COUNT * 2)
#define IDISP_CODEC_MASK 0x4
+#define HDA_CODEC_AUTOSUSPEND_DELAY_MS 1000
+
static int skl_hda_fill_card_info(struct snd_soc_acpi_mach_params *mach_params)
{
struct snd_soc_card *card = &hda_soc_card;
@@ -168,6 +170,29 @@ static int skl_hda_fill_card_info(struct snd_soc_acpi_mach_params *mach_params)
return 0;
}
+static void skl_set_hda_codec_autosuspend_delay(struct snd_soc_card *card)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ struct hdac_hda_priv *hda_pvt;
+ struct snd_soc_dai *dai;
+
+ for_each_card_rtds(card, rtd) {
+ if (!strstr(rtd->dai_link->codecs->name, "ehdaudio"))
+ continue;
+ dai = asoc_rtd_to_codec(rtd, 0);
+ hda_pvt = snd_soc_component_get_drvdata(dai->component);
+ if (hda_pvt) {
+ /*
+ * all codecs are on the same bus, so it's sufficient
+ * to look up only the first one
+ */
+ snd_hda_set_power_save(hda_pvt->codec.bus,
+ HDA_CODEC_AUTOSUSPEND_DELAY_MS);
+ break;
+ }
+ }
+}
+
static int skl_hda_audio_probe(struct platform_device *pdev)
{
struct snd_soc_acpi_mach *mach;
@@ -206,7 +231,11 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
hda_soc_card.components = hda_soc_components;
}
- return devm_snd_soc_register_card(&pdev->dev, &hda_soc_card);
+ ret = devm_snd_soc_register_card(&pdev->dev, &hda_soc_card);
+ if (!ret)
+ skl_set_hda_codec_autosuspend_delay(&hda_soc_card);
+
+ return ret;
}
static struct platform_driver skl_hda_audio = {
diff --git a/sound/soc/intel/boards/sof_da7219_max98373.c b/sound/soc/intel/boards/sof_da7219_max98373.c
index b707dd3b5625..703703858595 100644
--- a/sound/soc/intel/boards/sof_da7219_max98373.c
+++ b/sound/soc/intel/boards/sof_da7219_max98373.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2019 Intel Corporation.
/*
@@ -86,6 +86,8 @@ static const struct snd_soc_dapm_widget widgets[] = {
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
platform_clock_control, SND_SOC_DAPM_POST_PMD |
SND_SOC_DAPM_PRE_PMU),
+
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
@@ -99,6 +101,9 @@ static const struct snd_soc_dapm_route audio_map[] = {
{ "Left Spk", NULL, "Left BE_OUT" },
{ "Right Spk", NULL, "Right BE_OUT" },
+
+ /* digital mics */
+ {"DMic", NULL, "SoC DMIC"},
};
/* For MAX98360A amp */
@@ -111,6 +116,8 @@ static const struct snd_soc_dapm_widget max98360a_widgets[] = {
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
platform_clock_control, SND_SOC_DAPM_POST_PMD |
SND_SOC_DAPM_PRE_PMU),
+
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
};
static const struct snd_soc_dapm_route max98360a_map[] = {
@@ -123,6 +130,9 @@ static const struct snd_soc_dapm_route max98360a_map[] = {
{ "Headset Mic", NULL, "Platform Clock" },
{"Spk", NULL, "Speaker"},
+
+ /* digital mics */
+ {"DMic", NULL, "SoC DMIC"},
};
static struct snd_soc_jack headset;
@@ -265,6 +275,9 @@ SND_SOC_DAILINK_DEF(dmic_pin,
SND_SOC_DAILINK_DEF(dmic_codec,
DAILINK_COMP_ARRAY(COMP_CODEC("dmic-codec", "dmic-hifi")));
+SND_SOC_DAILINK_DEF(dmic16k_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("DMIC16k Pin")));
+
SND_SOC_DAILINK_DEF(idisp1_pin,
DAILINK_COMP_ARRAY(COMP_CPU("iDisp1 Pin")));
SND_SOC_DAILINK_DEF(idisp1_codec,
@@ -337,6 +350,14 @@ static struct snd_soc_dai_link dais[] = {
.no_pcm = 1,
SND_SOC_DAILINK_REG(idisp3_pin, idisp3_codec, platform),
},
+ {
+ .name = "dmic16k",
+ .id = 6,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ SND_SOC_DAILINK_REG(dmic16k_pin, dmic_codec, platform),
+ }
};
static struct snd_soc_card card_da7219_m98373 = {
diff --git a/sound/soc/intel/boards/sof_maxim_common.c b/sound/soc/intel/boards/sof_maxim_common.c
index 463b39a7ccfd..1a549b32d1c9 100644
--- a/sound/soc/intel/boards/sof_maxim_common.c
+++ b/sound/soc/intel/boards/sof_maxim_common.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright(c) 2020 Intel Corporation. All rights reserved.
#include <linux/string.h>
diff --git a/sound/soc/intel/boards/sof_maxim_common.h b/sound/soc/intel/boards/sof_maxim_common.h
index 406bf0e81155..785b34335368 100644
--- a/sound/soc/intel/boards/sof_maxim_common.h
+++ b/sound/soc/intel/boards/sof_maxim_common.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright(c) 2020 Intel Corporation.
*/
diff --git a/sound/soc/intel/boards/sof_pcm512x.c b/sound/soc/intel/boards/sof_pcm512x.c
index fb7811899999..9fa8a4911276 100644
--- a/sound/soc/intel/boards/sof_pcm512x.c
+++ b/sound/soc/intel/boards/sof_pcm512x.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2018-2020 Intel Corporation.
/*
@@ -126,7 +126,6 @@ static struct snd_soc_dai_link_component platform_component[] = {
}
};
-#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
static int sof_card_late_probe(struct snd_soc_card *card)
{
struct sof_card_private *ctx = snd_soc_card_get_drvdata(card);
@@ -146,12 +145,6 @@ static int sof_card_late_probe(struct snd_soc_card *card)
return hda_dsp_hdmi_build_controls(card, pcm->codec_dai->component);
}
-#else
-static int sof_card_late_probe(struct snd_soc_card *card)
-{
- return 0;
-}
-#endif
static const struct snd_kcontrol_new sof_controls[] = {
SOC_DAPM_PIN_SWITCH("Ext Spk"),
@@ -374,14 +367,12 @@ static int sof_audio_probe(struct platform_device *pdev)
sof_pcm512x_quirk = SOF_PCM512X_SSP_CODEC(2);
} else {
dmic_be_num = 2;
-#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
if (mach->mach_params.common_hdmi_codec_drv &&
(mach->mach_params.codec_mask & IDISP_CODEC_MASK))
ctx->idisp_codec = true;
/* links are always present in topology */
hdmi_num = 3;
-#endif
}
dmi_check_system(sof_pcm512x_quirk_table);
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
index 8c29431b5847..13a48b0c35ae 100644
--- a/sound/soc/intel/boards/sof_rt5682.c
+++ b/sound/soc/intel/boards/sof_rt5682.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2019-2020 Intel Corporation.
/*
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index a64dc563b47e..e1c1a8ba78e6 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020 Intel Corporation
/*
@@ -411,25 +411,36 @@ static int create_codec_dai_name(struct device *dev,
static int set_codec_init_func(const struct snd_soc_acpi_link_adr *link,
struct snd_soc_dai_link *dai_links,
- bool playback)
+ bool playback, int group_id)
{
int i;
- for (i = 0; i < link->num_adr; i++) {
- unsigned int part_id;
- int codec_index;
-
- part_id = SDW_PART_ID(link->adr_d[i].adr);
- codec_index = find_codec_info_part(part_id);
+ do {
+ /*
+ * Initialize the codec. If codec is part of an aggregated
+ * group (group_id>0), initialize all codecs belonging to
+ * same group.
+ */
+ for (i = 0; i < link->num_adr; i++) {
+ unsigned int part_id;
+ int codec_index;
- if (codec_index < 0)
- return codec_index;
+ part_id = SDW_PART_ID(link->adr_d[i].adr);
+ codec_index = find_codec_info_part(part_id);
- if (codec_info_list[codec_index].init)
- codec_info_list[codec_index].init(link, dai_links,
- &codec_info_list[codec_index],
- playback);
- }
+ if (codec_index < 0)
+ return codec_index;
+ /* The group_id is > 0 iff the codec is aggregated */
+ if (link->adr_d[i].endpoints->group_id != group_id)
+ continue;
+ if (codec_info_list[codec_index].init)
+ codec_info_list[codec_index].init(link,
+ dai_links,
+ &codec_info_list[codec_index],
+ playback);
+ }
+ link++;
+ } while (link->mask && group_id);
return 0;
}
@@ -623,7 +634,7 @@ static int create_sdw_dailink(struct device *dev, int *be_index,
NULL, &sdw_ops);
ret = set_codec_init_func(link, dai_links + (*be_index)++,
- playback);
+ playback, group_id);
if (ret < 0) {
dev_err(dev, "failed to init codec %d", codec_index);
return ret;
@@ -652,9 +663,7 @@ static int sof_card_dai_links_create(struct device *dev,
struct snd_soc_card *card)
{
int ssp_num, sdw_be_num = 0, hdmi_num = 0, dmic_num;
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
struct snd_soc_dai_link_component *idisp_components;
-#endif
struct snd_soc_dai_link_component *ssp_components;
struct snd_soc_acpi_mach_params *mach_params;
const struct snd_soc_acpi_link_adr *adr_link;
@@ -675,10 +684,8 @@ static int sof_card_dai_links_create(struct device *dev,
for (i = 0; i < ARRAY_SIZE(codec_info_list); i++)
codec_info_list[i].amp_num = 0;
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
hdmi_num = sof_sdw_quirk & SOF_SDW_TGL_HDMI ?
SOF_TGL_HDMI_COUNT : SOF_PRE_TGL_HDMI_COUNT;
-#endif
ssp_mask = SOF_SSP_GET_PORT(sof_sdw_quirk);
/*
@@ -838,7 +845,6 @@ DMIC:
INC_ID(be_id, cpu_id, link_id);
}
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
/* HDMI */
if (hdmi_num > 0) {
idisp_components = devm_kcalloc(dev, hdmi_num,
@@ -875,7 +881,6 @@ DMIC:
sof_sdw_hdmi_init, NULL);
INC_ID(be_id, cpu_id, link_id);
}
-#endif
card->dai_link = links;
card->num_links = num_links;
@@ -898,6 +903,7 @@ static int mc_probe(struct platform_device *pdev)
struct snd_soc_card *card = &card_sof_sdw;
struct snd_soc_acpi_mach *mach;
struct mc_private *ctx;
+ int amp_num = 0, i;
int ret;
dev_dbg(&pdev->dev, "Entry %s\n", __func__);
@@ -908,9 +914,7 @@ static int mc_probe(struct platform_device *pdev)
dmi_check_system(sof_sdw_quirk_table);
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
-#endif
card->dev = &pdev->dev;
@@ -924,9 +928,18 @@ static int mc_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(card, ctx);
+ /*
+ * the default amp_num is zero for each codec and
+ * amp_num will only be increased for active amp
+ * codecs on used platform
+ */
+ for (i = 0; i < ARRAY_SIZE(codec_info_list); i++)
+ amp_num += codec_info_list[i].amp_num;
+
card->components = devm_kasprintf(card->dev, GFP_KERNEL,
- "cfg-spk:%d",
- (sof_sdw_quirk & SOF_SDW_FOUR_SPK) ? 4 : 2);
+ "cfg-spk:%d cfg-amp:%d",
+ (sof_sdw_quirk & SOF_SDW_FOUR_SPK)
+ ? 4 : 2, amp_num);
if (!card->components)
return -ENOMEM;
diff --git a/sound/soc/intel/boards/sof_sdw_common.h b/sound/soc/intel/boards/sof_sdw_common.h
index dd593ff3575b..69b363b8a686 100644
--- a/sound/soc/intel/boards/sof_sdw_common.h
+++ b/sound/soc/intel/boards/sof_sdw_common.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0-only
* Copyright (c) 2020 Intel Corporation
*/
diff --git a/sound/soc/intel/boards/sof_sdw_dmic.c b/sound/soc/intel/boards/sof_sdw_dmic.c
index e92176bf0ad4..89b0824b2381 100644
--- a/sound/soc/intel/boards/sof_sdw_dmic.c
+++ b/sound/soc/intel/boards/sof_sdw_dmic.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020 Intel Corporation
/*
diff --git a/sound/soc/intel/boards/sof_sdw_hdmi.c b/sound/soc/intel/boards/sof_sdw_hdmi.c
index c7b5612a39e6..0654b38a7e0d 100644
--- a/sound/soc/intel/boards/sof_sdw_hdmi.c
+++ b/sound/soc/intel/boards/sof_sdw_hdmi.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020 Intel Corporation
/*
@@ -16,7 +16,6 @@
#include "../../codecs/hdac_hdmi.h"
#include "hda_dsp_common.h"
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
static struct snd_soc_jack hdmi[MAX_HDMI_NUM];
struct hdmi_pcm {
@@ -28,7 +27,7 @@ struct hdmi_pcm {
int sof_sdw_hdmi_init(struct snd_soc_pcm_runtime *rtd)
{
struct mc_private *ctx = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai = rtd->codec_dai;
+ struct snd_soc_dai *dai = asoc_rtd_to_codec(rtd, 0);
struct hdmi_pcm *pcm;
pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
@@ -89,9 +88,3 @@ int sof_sdw_hdmi_card_late_probe(struct snd_soc_card *card)
return hdac_hdmi_jack_port_init(component, &card->dapm);
}
-#else
-int hdmi_card_late_probe(struct snd_soc_card *card)
-{
- return 0;
-}
-#endif
diff --git a/sound/soc/intel/boards/sof_sdw_rt1308.c b/sound/soc/intel/boards/sof_sdw_rt1308.c
index 321768e54d08..177cc781ada6 100644
--- a/sound/soc/intel/boards/sof_sdw_rt1308.c
+++ b/sound/soc/intel/boards/sof_sdw_rt1308.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020 Intel Corporation
/*
@@ -93,7 +93,7 @@ static int rt1308_i2s_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int clk_id, clk_freq, pll_out;
int err;
diff --git a/sound/soc/intel/boards/sof_sdw_rt5682.c b/sound/soc/intel/boards/sof_sdw_rt5682.c
index 5aa6211a1ed9..da354ba83939 100644
--- a/sound/soc/intel/boards/sof_sdw_rt5682.c
+++ b/sound/soc/intel/boards/sof_sdw_rt5682.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020 Intel Corporation
/*
@@ -47,7 +47,8 @@ static int rt5682_rtd_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct mc_private *ctx = snd_soc_card_get_drvdata(card);
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
struct snd_soc_jack *jack;
int ret;
diff --git a/sound/soc/intel/boards/sof_sdw_rt700.c b/sound/soc/intel/boards/sof_sdw_rt700.c
index 2ee4e6910d7f..5f50491ba5ee 100644
--- a/sound/soc/intel/boards/sof_sdw_rt700.c
+++ b/sound/soc/intel/boards/sof_sdw_rt700.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020 Intel Corporation
/*
@@ -47,7 +47,8 @@ static int rt700_rtd_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct mc_private *ctx = snd_soc_card_get_drvdata(card);
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
struct snd_soc_jack *jack;
int ret;
@@ -94,10 +95,10 @@ static int rt700_rtd_init(struct snd_soc_pcm_runtime *rtd)
jack = &ctx->sdw_headset;
- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_VOLUMEUP);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_PLAYPAUSE);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
ret = snd_soc_component_set_jack(component, jack, NULL);
if (ret)
diff --git a/sound/soc/intel/boards/sof_sdw_rt711.c b/sound/soc/intel/boards/sof_sdw_rt711.c
index 2a4917e3d561..d4d75c8dc6b7 100644
--- a/sound/soc/intel/boards/sof_sdw_rt711.c
+++ b/sound/soc/intel/boards/sof_sdw_rt711.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020 Intel Corporation
/*
@@ -71,7 +71,8 @@ static int rt711_rtd_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct mc_private *ctx = snd_soc_card_get_drvdata(card);
- struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *component = codec_dai->component;
struct snd_soc_jack *jack;
int ret;
@@ -118,10 +119,10 @@ static int rt711_rtd_init(struct snd_soc_pcm_runtime *rtd)
jack = &ctx->sdw_headset;
- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_VOLUMEUP);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_PLAYPAUSE);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
ret = snd_soc_component_set_jack(component, jack, NULL);
diff --git a/sound/soc/intel/boards/sof_sdw_rt715.c b/sound/soc/intel/boards/sof_sdw_rt715.c
index 321e1cbc03ed..9b298f79e784 100644
--- a/sound/soc/intel/boards/sof_sdw_rt715.c
+++ b/sound/soc/intel/boards/sof_sdw_rt715.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020 Intel Corporation
/*
diff --git a/sound/soc/intel/boards/sof_wm8804.c b/sound/soc/intel/boards/sof_wm8804.c
new file mode 100644
index 000000000000..c13fd20da559
--- /dev/null
+++ b/sound/soc/intel/boards/sof_wm8804.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2018-2020, Intel Corporation
+//
+// sof-wm8804.c - ASoC machine driver for Up and Up2 board
+// based on WM8804/Hifiberry Digi+
+
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+#include "../../codecs/wm8804.h"
+
+struct sof_card_private {
+ struct gpio_desc *gpio_44;
+ struct gpio_desc *gpio_48;
+ int sample_rate;
+};
+
+#define SOF_WM8804_UP2_QUIRK BIT(0)
+
+static unsigned long sof_wm8804_quirk;
+
+static int sof_wm8804_quirk_cb(const struct dmi_system_id *id)
+{
+ sof_wm8804_quirk = (unsigned long)id->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id sof_wm8804_quirk_table[] = {
+ {
+ .callback = sof_wm8804_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UP-APL01"),
+ },
+ .driver_data = (void *)SOF_WM8804_UP2_QUIRK,
+ },
+ {}
+};
+
+static int sof_wm8804_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct sof_card_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_component *codec = codec_dai->component;
+ const int sysclk = 27000000; /* This is fixed on this board */
+ int samplerate;
+ long mclk_freq;
+ int mclk_div;
+ int sampling_freq;
+ bool clk_44;
+ int ret;
+
+ samplerate = params_rate(params);
+ if (samplerate == ctx->sample_rate)
+ return 0;
+
+ ctx->sample_rate = 0;
+
+ if (samplerate <= 96000) {
+ mclk_freq = samplerate * 256;
+ mclk_div = WM8804_MCLKDIV_256FS;
+ } else {
+ mclk_freq = samplerate * 128;
+ mclk_div = WM8804_MCLKDIV_128FS;
+ }
+
+ switch (samplerate) {
+ case 32000:
+ sampling_freq = 0x03;
+ break;
+ case 44100:
+ sampling_freq = 0x00;
+ break;
+ case 48000:
+ sampling_freq = 0x02;
+ break;
+ case 88200:
+ sampling_freq = 0x08;
+ break;
+ case 96000:
+ sampling_freq = 0x0a;
+ break;
+ case 176400:
+ sampling_freq = 0x0c;
+ break;
+ case 192000:
+ sampling_freq = 0x0e;
+ break;
+ default:
+ dev_err(rtd->card->dev,
+ "unsupported samplerate %d\n", samplerate);
+ return -EINVAL;
+ }
+
+ if (samplerate % 16000)
+ clk_44 = true; /* use 44.1 kHz root frequency */
+ else
+ clk_44 = false;
+
+ if (!(IS_ERR_OR_NULL(ctx->gpio_44) ||
+ IS_ERR_OR_NULL(ctx->gpio_48))) {
+ /*
+ * ensure both GPIOs are LOW first, then drive the
+ * relevant one to HIGH
+ */
+ if (clk_44) {
+ gpiod_set_value_cansleep(ctx->gpio_48, !clk_44);
+ gpiod_set_value_cansleep(ctx->gpio_44, clk_44);
+ } else {
+ gpiod_set_value_cansleep(ctx->gpio_44, clk_44);
+ gpiod_set_value_cansleep(ctx->gpio_48, !clk_44);
+ }
+ }
+
+ snd_soc_dai_set_clkdiv(codec_dai, WM8804_MCLK_DIV, mclk_div);
+ snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
+
+ ret = snd_soc_dai_set_sysclk(codec_dai, WM8804_TX_CLKSRC_PLL,
+ sysclk, SND_SOC_CLOCK_OUT);
+ if (ret < 0) {
+ dev_err(rtd->card->dev,
+ "Failed to set WM8804 SYSCLK: %d\n", ret);
+ return ret;
+ }
+
+ /* set sampling frequency status bits */
+ snd_soc_component_update_bits(codec, WM8804_SPDTX4, 0x0f,
+ sampling_freq);
+
+ ctx->sample_rate = samplerate;
+
+ return 0;
+}
+
+/* machine stream operations */
+static struct snd_soc_ops sof_wm8804_ops = {
+ .hw_params = sof_wm8804_hw_params,
+};
+
+SND_SOC_DAILINK_DEF(ssp5_pin,
+ DAILINK_COMP_ARRAY(COMP_CPU("SSP5 Pin")));
+
+SND_SOC_DAILINK_DEF(ssp5_codec,
+ DAILINK_COMP_ARRAY(COMP_CODEC("i2c-1AEC8804:00", "wm8804-spdif")));
+
+SND_SOC_DAILINK_DEF(platform,
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("0000:00:0e.0")));
+
+static struct snd_soc_dai_link dailink[] = {
+ /* back ends */
+ {
+ .name = "SSP5-Codec",
+ .id = 0,
+ .no_pcm = 1,
+ .nonatomic = true,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ops = &sof_wm8804_ops,
+ SND_SOC_DAILINK_REG(ssp5_pin, ssp5_codec, platform),
+ },
+};
+
+/* SoC card */
+static struct snd_soc_card sof_wm8804_card = {
+ .name = "wm8804", /* sof- prefix added automatically */
+ .owner = THIS_MODULE,
+ .dai_link = dailink,
+ .num_links = ARRAY_SIZE(dailink),
+};
+
+ /* i2c-<HID>:00 with HID being 8 chars */
+static char codec_name[SND_ACPI_I2C_ID_LEN];
+
+/*
+ * to control the HifiBerry Digi+ PRO, it's required to toggle GPIO to
+ * select the clock source. On the Up2 board, this means
+ * Pin29/BCM5/Linux GPIO 430 and Pin 31/BCM6/ Linux GPIO 404.
+ *
+ * Using the ACPI device name is not very nice, but since we only use
+ * the value for the Up2 board there is no risk of conflict with other
+ * platforms.
+ */
+
+static struct gpiod_lookup_table up2_gpios_table = {
+ /* .dev_id is set during probe */
+ .table = {
+ GPIO_LOOKUP("INT3452:01", 73, "BCM-GPIO5", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT3452:01", 74, "BCM-GPIO6", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
+static int sof_wm8804_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card;
+ struct snd_soc_acpi_mach *mach;
+ struct sof_card_private *ctx;
+ struct acpi_device *adev;
+ int dai_index = 0;
+ int ret;
+ int i;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mach = pdev->dev.platform_data;
+ card = &sof_wm8804_card;
+ card->dev = &pdev->dev;
+
+ dmi_check_system(sof_wm8804_quirk_table);
+
+ if (sof_wm8804_quirk & SOF_WM8804_UP2_QUIRK) {
+ up2_gpios_table.dev_id = dev_name(&pdev->dev);
+ gpiod_add_lookup_table(&up2_gpios_table);
+
+ /*
+ * The gpios are required for specific boards with
+ * local oscillators, and optional in other cases.
+ * Since we can't identify when they are needed, use
+ * the GPIO as non-optional
+ */
+
+ ctx->gpio_44 = devm_gpiod_get(&pdev->dev, "BCM-GPIO5",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpio_44)) {
+ ret = PTR_ERR(ctx->gpio_44);
+ dev_err(&pdev->dev,
+ "could not get BCM-GPIO5: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->gpio_48 = devm_gpiod_get(&pdev->dev, "BCM-GPIO6",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpio_48)) {
+ ret = PTR_ERR(ctx->gpio_48);
+ dev_err(&pdev->dev,
+ "could not get BCM-GPIO6: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /* fix index of codec dai */
+ for (i = 0; i < ARRAY_SIZE(dailink); i++) {
+ if (!strcmp(dailink[i].codecs->name, "i2c-1AEC8804:00")) {
+ dai_index = i;
+ break;
+ }
+ }
+
+ /* fixup codec name based on HID */
+ adev = acpi_dev_get_first_match_dev(mach->id, NULL, -1);
+ if (adev) {
+ snprintf(codec_name, sizeof(codec_name),
+ "%s%s", "i2c-", acpi_dev_name(adev));
+ put_device(&adev->dev);
+ dailink[dai_index].codecs->name = codec_name;
+ }
+
+ snd_soc_card_set_drvdata(card, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev, card);
+}
+
+static int sof_wm8804_remove(struct platform_device *pdev)
+{
+ if (sof_wm8804_quirk & SOF_WM8804_UP2_QUIRK)
+ gpiod_remove_lookup_table(&up2_gpios_table);
+ return 0;
+}
+
+static struct platform_driver sof_wm8804_driver = {
+ .driver = {
+ .name = "sof-wm8804",
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = sof_wm8804_probe,
+ .remove = sof_wm8804_remove,
+};
+module_platform_driver(sof_wm8804_driver);
+
+MODULE_DESCRIPTION("ASoC Intel(R) SOF + WM8804 Machine driver");
+MODULE_AUTHOR("Pierre-Louis Bossart");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sof-wm8804");
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index bd352878f89a..2674c9790fa1 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
snd-soc-sst-dsp-objs := sst-dsp.o
snd-soc-sst-acpi-objs := sst-acpi.o
snd-soc-sst-ipc-objs := sst-ipc.o
diff --git a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
index f5092bc48364..32f77e29c2ff 100644
--- a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* soc-acpi-intel-bxt-match.c - tables and support for BXT ACPI enumeration.
*
@@ -71,7 +71,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = {
},
{
.id = "1AEC8804",
- .drv_name = "bxt-wm8804",
+ .drv_name = "sof-wm8804",
.sof_fw_filename = "sof-apl.ri",
.sof_tplg_filename = "sof-apl-wm8804.tplg",
},
diff --git a/sound/soc/intel/common/soc-acpi-intel-cfl-match.c b/sound/soc/intel/common/soc-acpi-intel-cfl-match.c
index ff9d6938b9f6..27b4b73d94d4 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cfl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cfl-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* soc-apci-intel-cfl-match.c - tables and support for CFL ACPI enumeration.
*
diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
index 7d85bd5aff9f..cdea0c09fe0a 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cml-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* soc-acpi-intel-cml-match.c - tables and support for CML ACPI enumeration.
*
diff --git a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
index 828980d5630d..6a0bcc1a8429 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cnl-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* soc-acpi-intel-cnl-match.c - tables and support for CNL ACPI enumeration.
*
diff --git a/sound/soc/intel/common/soc-acpi-intel-ehl-match.c b/sound/soc/intel/common/soc-acpi-intel-ehl-match.c
index a1290c3fa99f..45e07d886013 100644
--- a/sound/soc/intel/common/soc-acpi-intel-ehl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-ehl-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* soc-apci-intel-ehl-match.c - tables and support for EHL ACPI enumeration.
*
@@ -8,8 +8,15 @@
#include <sound/soc-acpi.h>
#include <sound/soc-acpi-intel-match.h>
+#include "../skylake/skl.h"
struct snd_soc_acpi_mach snd_soc_acpi_intel_ehl_machines[] = {
+ {
+ .id = "INTC1027",
+ .drv_name = "ehl_rt5660",
+ .sof_fw_filename = "sof-ehl.ri",
+ .sof_tplg_filename = "sof-ehl-rt5660.tplg",
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_ehl_machines);
diff --git a/sound/soc/intel/common/soc-acpi-intel-glk-match.c b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
index 60dea358fa04..26cb3b16cdd3 100644
--- a/sound/soc/intel/common/soc-acpi-intel-glk-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-glk-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* soc-acpi-intel-glk-match.c - tables and support for GLK ACPI enumeration.
*
diff --git a/sound/soc/intel/common/soc-acpi-intel-hda-match.c b/sound/soc/intel/common/soc-acpi-intel-hda-match.c
index cc972d2ac691..aa9cb522aac9 100644
--- a/sound/soc/intel/common/soc-acpi-intel-hda-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-hda-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2018, Intel Corporation.
/*
diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
index 16ec9f382b0f..6927bbbc66fc 100644
--- a/sound/soc/intel/common/soc-acpi-intel-icl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* soc-acpi-intel-icl-match.c - tables and support for ICL ACPI enumeration.
*
diff --git a/sound/soc/intel/common/soc-acpi-intel-jsl-match.c b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
index 4388a32718d8..859f8a1bd914 100644
--- a/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-jsl-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* soc-apci-intel-jsl-match.c - tables and support for JSL ACPI enumeration.
*
diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
index e200baa11011..a4fbe6707ca7 100644
--- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* soc-acpi-intel-kbl-match.c - tables and support for KBL ACPI enumeration.
*
diff --git a/sound/soc/intel/common/soc-acpi-intel-skl-match.c b/sound/soc/intel/common/soc-acpi-intel-skl-match.c
index 42fa40a8d932..26f9ce146523 100644
--- a/sound/soc/intel/common/soc-acpi-intel-skl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-skl-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* soc-acpi-intel-skl-match.c - tables and support for SKL ACPI enumeration.
*
diff --git a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
index 449d9d2286ae..5a56f4359479 100644
--- a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* soc-apci-intel-tgl-match.c - tables and support for ICL ACPI enumeration.
*
diff --git a/sound/soc/intel/common/soc-intel-quirks.h b/sound/soc/intel/common/soc-intel-quirks.h
index a9176150c6ed..b07df3059926 100644
--- a/sound/soc/intel/common/soc-intel-quirks.h
+++ b/sound/soc/intel/common/soc-intel-quirks.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* soc-intel-quirks.h - prototypes for quirk autodetection
*
diff --git a/sound/soc/intel/common/sst-dsp.c b/sound/soc/intel/common/sst-dsp.c
index ec66be269b69..36c077aa386e 100644
--- a/sound/soc/intel/common/sst-dsp.c
+++ b/sound/soc/intel/common/sst-dsp.c
@@ -10,7 +10,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/delay.h>
#include "sst-dsp.h"
@@ -34,16 +34,13 @@ EXPORT_SYMBOL_GPL(sst_shim32_read);
void sst_shim32_write64(void __iomem *addr, u32 offset, u64 value)
{
- memcpy_toio(addr + offset, &value, sizeof(value));
+ writeq(value, addr + offset);
}
EXPORT_SYMBOL_GPL(sst_shim32_write64);
u64 sst_shim32_read64(void __iomem *addr, u32 offset)
{
- u64 val;
-
- memcpy_fromio(&val, addr + offset, sizeof(val));
- return val;
+ return readq(addr + offset);
}
EXPORT_SYMBOL_GPL(sst_shim32_read64);
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index d27947aeb079..0594f89ea7f2 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -16,12 +16,12 @@
#include <linux/dmaengine.h>
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/pgtable.h>
/* supported DMA engine drivers */
#include <linux/dma/dw.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "sst-dsp.h"
#include "sst-dsp-priv.h"
diff --git a/sound/soc/intel/haswell/sst-haswell-dsp.c b/sound/soc/intel/haswell/sst-haswell-dsp.c
index 88c3f63bded9..de80e19454c1 100644
--- a/sound/soc/intel/haswell/sst-haswell-dsp.c
+++ b/sound/soc/intel/haswell/sst-haswell-dsp.c
@@ -243,45 +243,92 @@ static irqreturn_t hsw_irq(int irq, void *context)
return ret;
}
+#define CSR_DEFAULT_VALUE 0x8480040E
+#define ISC_DEFAULT_VALUE 0x0
+#define ISD_DEFAULT_VALUE 0x0
+#define IMC_DEFAULT_VALUE 0x7FFF0003
+#define IMD_DEFAULT_VALUE 0x7FFF0003
+#define IPCC_DEFAULT_VALUE 0x0
+#define IPCD_DEFAULT_VALUE 0x0
+#define CLKCTL_DEFAULT_VALUE 0x7FF
+#define CSR2_DEFAULT_VALUE 0x0
+#define LTR_CTRL_DEFAULT_VALUE 0x0
+#define HMD_CTRL_DEFAULT_VALUE 0x0
+
+static void hsw_set_shim_defaults(struct sst_dsp *sst)
+{
+ sst_dsp_shim_write_unlocked(sst, SST_CSR, CSR_DEFAULT_VALUE);
+ sst_dsp_shim_write_unlocked(sst, SST_ISRX, ISC_DEFAULT_VALUE);
+ sst_dsp_shim_write_unlocked(sst, SST_ISRD, ISD_DEFAULT_VALUE);
+ sst_dsp_shim_write_unlocked(sst, SST_IMRX, IMC_DEFAULT_VALUE);
+ sst_dsp_shim_write_unlocked(sst, SST_IMRD, IMD_DEFAULT_VALUE);
+ sst_dsp_shim_write_unlocked(sst, SST_IPCX, IPCC_DEFAULT_VALUE);
+ sst_dsp_shim_write_unlocked(sst, SST_IPCD, IPCD_DEFAULT_VALUE);
+ sst_dsp_shim_write_unlocked(sst, SST_CLKCTL, CLKCTL_DEFAULT_VALUE);
+ sst_dsp_shim_write_unlocked(sst, SST_CSR2, CSR2_DEFAULT_VALUE);
+ sst_dsp_shim_write_unlocked(sst, SST_LTRC, LTR_CTRL_DEFAULT_VALUE);
+ sst_dsp_shim_write_unlocked(sst, SST_HMDC, HMD_CTRL_DEFAULT_VALUE);
+}
+
+/* all clock-gating minus DCLCGE and DTCGE */
+#define SST_VDRTCL2_CG_OTHER 0xB7D
+
static void hsw_set_dsp_D3(struct sst_dsp *sst)
{
- u32 val;
u32 reg;
- /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ /* disable clock core gating */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE);
+ reg &= ~(SST_VDRTCL2_DCLCGE);
writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
- /* enable power gating and switch off DRAM & IRAM blocks */
- val = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
- val |= SST_VDRTCL0_DSRAMPGE_MASK |
- SST_VDRTCL0_ISRAMPGE_MASK;
- val &= ~(SST_VDRTCL0_D3PGD | SST_VDRTCL0_D3SRAMPGD);
- writel(val, sst->addr.pci_cfg + SST_VDRTCTL0);
+ /* stall, reset and set 24MHz XOSC */
+ sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
+ SST_CSR_24MHZ_LPCS | SST_CSR_STALL | SST_CSR_RST,
+ SST_CSR_24MHZ_LPCS | SST_CSR_STALL | SST_CSR_RST);
- /* switch off audio PLL */
- val = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- val |= SST_VDRTCL2_APLLSE_MASK;
- writel(val, sst->addr.pci_cfg + SST_VDRTCTL2);
+ /* DRAM power gating all */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+ reg |= SST_VDRTCL0_ISRAMPGE_MASK |
+ SST_VDRTCL0_DSRAMPGE_MASK;
+ reg &= ~(SST_VDRTCL0_D3SRAMPGD);
+ reg |= SST_VDRTCL0_D3PGD;
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
+ udelay(50);
- /* disable MCLK(clkctl.smos = 0) */
- sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL,
- SST_CLKCTL_MASK, 0);
+ /* PLL shutdown enable */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ reg |= SST_VDRTCL2_APLLSE_MASK;
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
- /* Set D3 state, delay 50 us */
- val = readl(sst->addr.pci_cfg + SST_PMCS);
- val |= SST_PMCS_PS_MASK;
- writel(val, sst->addr.pci_cfg + SST_PMCS);
- udelay(50);
+ /* disable MCLK */
+ sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL,
+ SST_CLKCTL_MASK, 0);
- /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ /* switch clock gating */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ reg |= SST_VDRTCL2_CG_OTHER;
+ reg &= ~(SST_VDRTCL2_DTCGE);
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+ /* enable DTCGE separatelly */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE;
+ reg |= SST_VDRTCL2_DTCGE;
writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+ /* set shim defaults */
+ hsw_set_shim_defaults(sst);
+
+ /* set D3 */
+ reg = readl(sst->addr.pci_cfg + SST_PMCS);
+ reg |= SST_PMCS_PS_MASK;
+ writel(reg, sst->addr.pci_cfg + SST_PMCS);
udelay(50);
+ /* enable clock core gating */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ reg |= SST_VDRTCL2_DCLCGE;
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+ udelay(50);
}
static void hsw_reset(struct sst_dsp *sst)
@@ -299,75 +346,62 @@ static void hsw_reset(struct sst_dsp *sst)
SST_CSR_RST | SST_CSR_STALL, SST_CSR_STALL);
}
+/* recommended CSR state for power-up */
+#define SST_CSR_D0_MASK (0x18A09C0C | SST_CSR_DCS_MASK)
+
static int hsw_set_dsp_D0(struct sst_dsp *sst)
{
- int tries = 10;
- u32 reg, fw_dump_bit;
+ u32 reg;
- /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ /* disable clock core gating */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE);
+ reg &= ~(SST_VDRTCL2_DCLCGE);
writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
- /* Disable D3PG (VDRTCTL0.D3PGD = 1) */
- reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
- reg |= SST_VDRTCL0_D3PGD;
- writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
+ /* switch clock gating */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ reg |= SST_VDRTCL2_CG_OTHER;
+ reg &= ~(SST_VDRTCL2_DTCGE);
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
- /* Set D0 state */
+ /* set D0 */
reg = readl(sst->addr.pci_cfg + SST_PMCS);
- reg &= ~SST_PMCS_PS_MASK;
+ reg &= ~(SST_PMCS_PS_MASK);
writel(reg, sst->addr.pci_cfg + SST_PMCS);
- /* check that ADSP shim is enabled */
- while (tries--) {
- reg = readl(sst->addr.pci_cfg + SST_PMCS) & SST_PMCS_PS_MASK;
- if (reg == 0)
- goto finish;
-
- msleep(1);
- }
-
- return -ENODEV;
-
-finish:
- /* select SSP1 19.2MHz base clock, SSP clock 0, turn off Low Power Clock */
- sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
- SST_CSR_S1IOCS | SST_CSR_SBCS1 | SST_CSR_LPCS, 0x0);
+ /* DRAM power gating none */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+ reg &= ~(SST_VDRTCL0_ISRAMPGE_MASK |
+ SST_VDRTCL0_DSRAMPGE_MASK);
+ reg |= SST_VDRTCL0_D3SRAMPGD;
+ reg |= SST_VDRTCL0_D3PGD;
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
+ mdelay(10);
- /* stall DSP core, set clk to 192/96Mhz */
- sst_dsp_shim_update_bits_unlocked(sst,
- SST_CSR, SST_CSR_STALL | SST_CSR_DCS_MASK,
- SST_CSR_STALL | SST_CSR_DCS(4));
+ /* set shim defaults */
+ hsw_set_shim_defaults(sst);
- /* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */
+ /* restore MCLK */
sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL,
- SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0,
- SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0);
+ SST_CLKCTL_MASK, SST_CLKCTL_MASK);
- /* Stall and reset core, set CSR */
- hsw_reset(sst);
-
- /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ /* PLL shutdown disable */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE;
+ reg &= ~(SST_VDRTCL2_APLLSE_MASK);
writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+ sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
+ SST_CSR_D0_MASK, SST_CSR_SBCS0 | SST_CSR_SBCS1 |
+ SST_CSR_STALL | SST_CSR_DCS(4));
udelay(50);
- /* switch on audio PLL */
+ /* enable clock core gating */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg &= ~SST_VDRTCL2_APLLSE_MASK;
+ reg |= SST_VDRTCL2_DCLCGE;
writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
- /* set default power gating control, enable power gating control for all blocks. that is,
- can't be accessed, please enable each block before accessing. */
- reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
- reg |= SST_VDRTCL0_DSRAMPGE_MASK | SST_VDRTCL0_ISRAMPGE_MASK;
- /* for D0, always enable the block(DSRAM[0]) used for FW dump */
- fw_dump_bit = 1 << SST_VDRTCL0_DSRAMPGE_SHIFT;
- writel(reg & ~fw_dump_bit, sst->addr.pci_cfg + SST_VDRTCTL0);
-
+ /* clear reset */
+ sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, SST_CSR_RST, 0);
/* disable DMA finish function for SSP0 & SSP1 */
sst_dsp_shim_update_bits_unlocked(sst, SST_CSR2, SST_CSR2_SDFD_SSP1,
@@ -384,12 +418,6 @@ finish:
sst_dsp_shim_update_bits(sst, SST_IMRD, (SST_IMRD_DONE | SST_IMRD_BUSY |
SST_IMRD_SSP0 | SST_IMRD_DMAC), 0x0);
- /* clear IPC registers */
- sst_dsp_shim_write(sst, SST_IPCX, 0x0);
- sst_dsp_shim_write(sst, SST_IPCD, 0x0);
- sst_dsp_shim_write(sst, 0x80, 0x6);
- sst_dsp_shim_write(sst, 0xe0, 0x300a);
-
return 0;
}
@@ -415,11 +443,6 @@ static void hsw_sleep(struct sst_dsp *sst)
{
dev_dbg(sst->dev, "HSW_PM dsp runtime suspend\n");
- /* put DSP into reset and stall */
- sst_dsp_shim_update_bits(sst, SST_CSR,
- SST_CSR_24MHZ_LPCS | SST_CSR_RST | SST_CSR_STALL,
- SST_CSR_RST | SST_CSR_STALL | SST_CSR_24MHZ_LPCS);
-
hsw_set_dsp_D3(sst);
dev_dbg(sst->dev, "HSW_PM dsp runtime suspend exit\n");
}
diff --git a/sound/soc/intel/haswell/sst-haswell-pcm.c b/sound/soc/intel/haswell/sst-haswell-pcm.c
index c183f8e94ee4..16ac16f5a641 100644
--- a/sound/soc/intel/haswell/sst-haswell-pcm.c
+++ b/sound/soc/intel/haswell/sst-haswell-pcm.c
@@ -10,8 +10,8 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
index 48544ff1a3e6..dd39149b89b1 100644
--- a/sound/soc/intel/skylake/Makefile
+++ b/sound/soc/intel/skylake/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-only
snd-soc-skl-objs := skl.o skl-pcm.o skl-nhlt.o skl-messages.o skl-topology.o \
skl-sst-ipc.o skl-sst-dsp.o cnl-sst-dsp.o skl-sst-cldma.o \
skl-sst.o bxt-sst.o cnl-sst.o skl-sst-utils.o
diff --git a/sound/soc/intel/skylake/skl-i2s.h b/sound/soc/intel/skylake/skl-i2s.h
index d7c15873c0d4..dfce91e11be1 100644
--- a/sound/soc/intel/skylake/skl-i2s.h
+++ b/sound/soc/intel/skylake/skl-i2s.h
@@ -46,7 +46,7 @@ struct skl_i2s_config_mclk {
struct skl_i2s_config_mclk_ext {
u32 mdivctrl;
u32 mdivr_count;
- u32 mdivr[0];
+ u32 mdivr[];
} __packed;
struct skl_i2s_config_blob_signature {
diff --git a/sound/soc/intel/skylake/skl-ssp-clk.c b/sound/soc/intel/skylake/skl-ssp-clk.c
index bd43885f3805..a3a73c26f9aa 100644
--- a/sound/soc/intel/skylake/skl-ssp-clk.c
+++ b/sound/soc/intel/skylake/skl-ssp-clk.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
// Copyright(c) 2015-17 Intel Corporation
/*
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index d43cbf4a71ef..b233f89517c1 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -290,7 +290,7 @@ int snd_skl_parse_uuids(struct sst_dsp *ctx, const struct firmware *fw,
goto free_uuid_list;
}
- guid_copy(&module->uuid, (guid_t *)&mod_entry->uuid);
+ import_guid(&module->uuid, mod_entry->uuid);
module->id = (i | (index << 12));
module->is_loadable = mod_entry->type.load_type;
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 69cd7a81bf2a..b9aab47d1202 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -14,6 +14,7 @@
#include <linux/uuid.h>
#include <sound/intel-nhlt.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include <sound/soc-topology.h>
#include <uapi/sound/snd_sst_tokens.h>
#include <uapi/sound/skl-tplg-interface.h>
@@ -578,6 +579,38 @@ static int skl_tplg_unload_pipe_modules(struct skl_dev *skl,
return ret;
}
+static bool skl_tplg_is_multi_fmt(struct skl_dev *skl, struct skl_pipe *pipe)
+{
+ struct skl_pipe_fmt *cur_fmt;
+ struct skl_pipe_fmt *next_fmt;
+ int i;
+
+ if (pipe->nr_cfgs <= 1)
+ return false;
+
+ if (pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
+ return true;
+
+ for (i = 0; i < pipe->nr_cfgs - 1; i++) {
+ if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ cur_fmt = &pipe->configs[i].out_fmt;
+ next_fmt = &pipe->configs[i + 1].out_fmt;
+ } else {
+ cur_fmt = &pipe->configs[i].in_fmt;
+ next_fmt = &pipe->configs[i + 1].in_fmt;
+ }
+
+ if (!CHECK_HW_PARAMS(cur_fmt->channels, cur_fmt->freq,
+ cur_fmt->bps,
+ next_fmt->channels,
+ next_fmt->freq,
+ next_fmt->bps))
+ return true;
+ }
+
+ return false;
+}
+
/*
* Here, we select pipe format based on the pipe type and pipe
* direction to determine the current config index for the pipeline.
@@ -600,6 +633,14 @@ skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig)
return 0;
}
+ if (skl_tplg_is_multi_fmt(skl, pipe)) {
+ pipe->cur_config_idx = pipe->pipe_config_idx;
+ pipe->memory_pages = pconfig->mem_pages;
+ dev_dbg(skl->dev, "found pipe config idx:%d\n",
+ pipe->cur_config_idx);
+ return 0;
+ }
+
if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE) {
dev_dbg(skl->dev, "No conn_type detected, take 0th config\n");
pipe->cur_config_idx = 0;
@@ -1314,6 +1355,68 @@ static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol,
+ bool is_set)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct hdac_bus *bus = snd_soc_component_get_drvdata(component);
+ struct skl_dev *skl = bus_to_skl(bus);
+ struct skl_pipeline *ppl;
+ struct skl_pipe *pipe = NULL;
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+ u32 *pipe_id;
+
+ if (!ec)
+ return -EINVAL;
+
+ if (is_set && ucontrol->value.enumerated.item[0] > ec->items)
+ return -EINVAL;
+
+ pipe_id = ec->dobj.private;
+
+ list_for_each_entry(ppl, &skl->ppl_list, node) {
+ if (ppl->pipe->ppl_id == *pipe_id) {
+ pipe = ppl->pipe;
+ break;
+ }
+ }
+ if (!pipe)
+ return -EIO;
+
+ if (is_set)
+ pipe->pipe_config_idx = ucontrol->value.enumerated.item[0];
+ else
+ ucontrol->value.enumerated.item[0] = pipe->pipe_config_idx;
+
+ return 0;
+}
+
+static int skl_tplg_multi_config_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
+}
+
+static int skl_tplg_multi_config_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
+}
+
+static int skl_tplg_multi_config_get_dmic(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false);
+}
+
+static int skl_tplg_multi_config_set_dmic(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true);
+}
+
static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
unsigned int __user *data, unsigned int size)
{
@@ -1853,6 +1956,16 @@ static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
.get = skl_tplg_mic_control_get,
.put = skl_tplg_mic_control_set,
},
+ {
+ .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT,
+ .get = skl_tplg_multi_config_get,
+ .put = skl_tplg_multi_config_set,
+ },
+ {
+ .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC,
+ .get = skl_tplg_multi_config_get_dmic,
+ .put = skl_tplg_multi_config_set_dmic,
+ }
};
static int skl_tplg_fill_pipe_cfg(struct device *dev,
@@ -1989,7 +2102,7 @@ static int skl_tplg_get_uuid(struct device *dev, guid_t *guid,
struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn)
{
if (uuid_tkn->token == SKL_TKN_UUID) {
- guid_copy(guid, (guid_t *)&uuid_tkn->uuid);
+ import_guid(guid, uuid_tkn->uuid);
return 0;
}
@@ -3013,12 +3126,21 @@ static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
case SND_SOC_TPLG_CTL_ENUM:
tplg_ec = container_of(hdr,
struct snd_soc_tplg_enum_control, hdr);
- if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) {
+ if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) {
se = (struct soc_enum *)kctl->private_value;
if (tplg_ec->priv.size)
- return skl_init_enum_data(bus->dev, se,
- tplg_ec);
+ skl_init_enum_data(bus->dev, se, tplg_ec);
}
+
+ /*
+ * now that the control initializations are done, remove
+ * write permission for the DMIC configuration enums to
+ * avoid conflicts between NHLT settings and user interaction
+ */
+
+ if (hdr->ops.get == SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC)
+ kctl->access = SNDRV_CTL_ELEM_ACCESS_READ;
+
break;
default:
@@ -3376,8 +3498,8 @@ static int skl_tplg_get_manifest_tkn(struct device *dev,
dev_err(dev, "Too many UUID tokens\n");
return -EINVAL;
}
- guid_copy(&skl->modules[uuid_index++]->uuid,
- (guid_t *)&array->uuid->uuid);
+ import_guid(&skl->modules[uuid_index++]->uuid,
+ array->uuid->uuid);
tuple_size += sizeof(*array->uuid);
continue;
@@ -3488,6 +3610,37 @@ static int skl_manifest_load(struct snd_soc_component *cmpnt, int index,
return 0;
}
+static void skl_tplg_complete(struct snd_soc_component *component)
+{
+ struct snd_soc_dobj *dobj;
+ struct snd_soc_acpi_mach *mach =
+ dev_get_platdata(component->card->dev);
+ int i;
+
+ list_for_each_entry(dobj, &component->dobj_list, list) {
+ struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
+ struct soc_enum *se =
+ (struct soc_enum *)kcontrol->private_value;
+ char **texts = dobj->control.dtexts;
+ char chan_text[4];
+
+ if (dobj->type != SND_SOC_DOBJ_ENUM ||
+ dobj->control.kcontrol->put !=
+ skl_tplg_multi_config_set_dmic)
+ continue;
+ sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
+
+ for (i = 0; i < se->items; i++) {
+ struct snd_ctl_elem_value val;
+
+ if (strstr(texts[i], chan_text)) {
+ val.value.enumerated.item[0] = i;
+ kcontrol->put(kcontrol, &val);
+ }
+ }
+ }
+}
+
static struct snd_soc_tplg_ops skl_tplg_ops = {
.widget_load = skl_tplg_widget_load,
.control_load = skl_tplg_control_load,
@@ -3497,6 +3650,7 @@ static struct snd_soc_tplg_ops skl_tplg_ops = {
.io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
.manifest = skl_manifest_load,
.dai_load = skl_dai_load,
+ .complete = skl_tplg_complete,
};
/*
@@ -3565,8 +3719,20 @@ int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus)
ret = request_firmware(&fw, skl->tplg_name, bus->dev);
if (ret < 0) {
- dev_info(bus->dev, "tplg fw %s load failed with %d, falling back to dfw_sst.bin",
- skl->tplg_name, ret);
+ char alt_tplg_name[64];
+
+ snprintf(alt_tplg_name, sizeof(alt_tplg_name), "%s-tplg.bin",
+ skl->mach->drv_name);
+ dev_info(bus->dev, "tplg fw %s load failed with %d, trying alternative tplg name %s",
+ skl->tplg_name, ret, alt_tplg_name);
+
+ ret = request_firmware(&fw, alt_tplg_name, bus->dev);
+ if (!ret)
+ goto component_load;
+
+ dev_info(bus->dev, "tplg %s failed with %d, falling back to dfw_sst.bin",
+ alt_tplg_name, ret);
+
ret = request_firmware(&fw, "dfw_sst.bin", bus->dev);
if (ret < 0) {
dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n",
@@ -3575,6 +3741,8 @@ int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus)
}
}
+component_load:
+
/*
* The complete tplg for SKL is loaded as index 0, we don't use
* any other index
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index e967800dbb62..9889f728752c 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -119,7 +119,7 @@ struct skl_cpr_gtw_cfg {
struct skl_dma_control {
u32 node_id;
u32 config_length;
- u32 config_data[0];
+ u32 config_data[];
} __packed;
struct skl_cpr_cfg {
@@ -152,7 +152,7 @@ struct skl_up_down_mixer_cfg {
struct skl_algo_cfg {
struct skl_base_cfg base_cfg;
- char params[0];
+ char params[];
} __packed;
struct skl_base_outfmt_cfg {
@@ -306,6 +306,7 @@ struct skl_pipe {
struct skl_path_config configs[SKL_MAX_PATH_CONFIGS];
struct list_head w_list;
bool passthru;
+ u32 pipe_config_idx;
};
enum skl_module_state {
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 2bfbf59277c4..26057f38a014 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -49,7 +49,7 @@ struct skl_astate_param {
struct skl_astate_config {
u32 count;
- struct skl_astate_param astate_table[0];
+ struct skl_astate_param astate_table[];
};
struct skl_fw_config {
diff --git a/sound/soc/jz4740/Kconfig b/sound/soc/jz4740/Kconfig
index e72f826062e9..29144720cb62 100644
--- a/sound/soc/jz4740/Kconfig
+++ b/sound/soc/jz4740/Kconfig
@@ -2,7 +2,7 @@
config SND_JZ4740_SOC_I2S
tristate "SoC Audio (I2S protocol) for Ingenic JZ4740 SoC"
depends on MIPS || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on OF && HAS_IOMEM
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Say Y if you want to use I2S protocol and I2S codec on Ingenic JZ4740
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index 6f6f8dad0356..c7bd20104b20 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -129,7 +129,7 @@ static int jz4740_i2s_startup(struct snd_pcm_substream *substream,
uint32_t conf, ctrl;
int ret;
- if (dai->active)
+ if (snd_soc_dai_active(dai))
return 0;
ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL);
@@ -153,7 +153,7 @@ static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream,
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf;
- if (dai->active)
+ if (snd_soc_dai_active(dai))
return;
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
@@ -332,7 +332,7 @@ static int jz4740_i2s_suspend(struct snd_soc_component *component)
struct jz4740_i2s *i2s = snd_soc_component_get_drvdata(component);
uint32_t conf;
- if (component->active) {
+ if (snd_soc_component_active(component)) {
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
conf &= ~JZ_AIC_CONF_ENABLE;
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
@@ -355,7 +355,7 @@ static int jz4740_i2s_resume(struct snd_soc_component *component)
if (ret)
return ret;
- if (component->active) {
+ if (snd_soc_component_active(component)) {
ret = clk_prepare_enable(i2s->clk_i2s);
if (ret) {
clk_disable_unprepare(i2s->clk_aic);
@@ -504,7 +504,6 @@ static const struct snd_soc_component_driver jz4740_i2s_component = {
.resume = jz4740_i2s_resume,
};
-#ifdef CONFIG_OF
static const struct of_device_id jz4740_of_matches[] = {
{ .compatible = "ingenic,jz4740-i2s", .data = &jz4740_i2s_soc_info },
{ .compatible = "ingenic,jz4760-i2s", .data = &jz4760_i2s_soc_info },
@@ -513,7 +512,6 @@ static const struct of_device_id jz4740_of_matches[] = {
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, jz4740_of_matches);
-#endif
static int jz4740_i2s_dev_probe(struct platform_device *pdev)
{
@@ -558,7 +556,7 @@ static struct platform_driver jz4740_i2s_driver = {
.probe = jz4740_i2s_dev_probe,
.driver = {
.name = "jz4740-i2s",
- .of_match_table = of_match_ptr(jz4740_of_matches)
+ .of_match_table = jz4740_of_matches,
},
};
diff --git a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
index 7f930556d961..7f3ac04b9425 100644
--- a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
+++ b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
@@ -807,10 +807,9 @@ static int mt6797_afe_pcm_dev_probe(struct platform_device *pdev)
/* request irq */
irq_id = platform_get_irq(pdev, 0);
- if (!irq_id) {
- dev_err(dev, "%pOFn no irq found\n", dev->of_node);
- return -ENXIO;
- }
+ if (irq_id < 0)
+ return irq_id;
+
ret = devm_request_irq(dev, irq_id, mt6797_afe_irq_handler,
IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
if (ret) {
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index 1e3f2d786066..1cc044425a9e 100644
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -297,7 +297,7 @@ static int mt8173_afe_i2s_startup(struct snd_pcm_substream *substream,
{
struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
- if (dai->active)
+ if (snd_soc_dai_active(dai))
return 0;
regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
@@ -310,7 +310,7 @@ static void mt8173_afe_i2s_shutdown(struct snd_pcm_substream *substream,
{
struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
- if (dai->active)
+ if (snd_soc_dai_active(dai))
return;
mt8173_afe_set_i2s_enable(afe, false);
@@ -347,7 +347,7 @@ static int mt8173_afe_hdmi_startup(struct snd_pcm_substream *substream,
struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
struct mt8173_afe_private *afe_priv = afe->platform_priv;
- if (dai->active)
+ if (snd_soc_dai_active(dai))
return 0;
mt8173_afe_dais_enable_clks(afe, afe_priv->clocks[MT8173_CLK_I2S3_M],
@@ -361,7 +361,7 @@ static void mt8173_afe_hdmi_shutdown(struct snd_pcm_substream *substream,
struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
struct mt8173_afe_private *afe_priv = afe->platform_priv;
- if (dai->active)
+ if (snd_soc_dai_active(dai))
return;
mt8173_afe_dais_disable_clks(afe, afe_priv->clocks[MT8173_CLK_I2S3_M],
diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
index c8ded53bde1d..e0c4714da92c 100644
--- a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
+++ b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
@@ -1186,10 +1186,9 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
/* request irq */
irq_id = platform_get_irq(pdev, 0);
- if (!irq_id) {
- dev_err(dev, "%pOFn no irq found\n", dev->of_node);
- return -ENXIO;
- }
+ if (irq_id < 0)
+ return irq_id;
+
ret = devm_request_irq(dev, irq_id, mt8183_afe_irq_handler,
IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
if (ret) {
diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
index 5b3dfa79b4ae..ffd7c931e7bb 100644
--- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
+++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
@@ -6,11 +6,12 @@
// Copyright (c) 2018 MediaTek Inc.
// Author: Shunli Wang <shunli.wang@mediatek.com>
+#include <linux/input.h>
#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <sound/jack.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
-#include <sound/jack.h>
-#include <linux/pinctrl/consumer.h>
#include "mt8183-afe-common.h"
#include "../../codecs/da7219-aad.h"
@@ -471,9 +472,18 @@ mt8183_da7219_max98357_headset_init(struct snd_soc_component *component)
if (ret)
return ret;
+ snd_jack_set_key(
+ priv->headset_jack.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(
+ priv->headset_jack.jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(
+ priv->headset_jack.jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(
+ priv->headset_jack.jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+
da7219_aad_jack_det(component, &priv->headset_jack);
- return ret;
+ return 0;
}
static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
index 2e9b56b29d31..b2e867113226 100644
--- a/sound/soc/meson/axg-fifo.c
+++ b/sound/soc/meson/axg-fifo.c
@@ -249,7 +249,7 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
/* Enable pclk to access registers and clock the fifo ip */
ret = clk_prepare_enable(fifo->pclk);
if (ret)
- return ret;
+ goto free_irq;
/* Setup status2 so it reports the memory pointer */
regmap_update_bits(fifo->map, FIFO_CTRL1,
@@ -269,8 +269,14 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
/* Take memory arbitror out of reset */
ret = reset_control_deassert(fifo->arb);
if (ret)
- clk_disable_unprepare(fifo->pclk);
+ goto free_clk;
+
+ return 0;
+free_clk:
+ clk_disable_unprepare(fifo->pclk);
+free_irq:
+ free_irq(fifo->irq, ss);
return ret;
}
EXPORT_SYMBOL_GPL(axg_fifo_pcm_open);
diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
index d51f3344be7c..6de27238e9df 100644
--- a/sound/soc/meson/axg-tdm-interface.c
+++ b/sound/soc/meson/axg-tdm-interface.c
@@ -149,7 +149,7 @@ static int axg_tdm_iface_startup(struct snd_pcm_substream *substream,
}
/* Apply component wide rate symmetry */
- if (dai->component->active) {
+ if (snd_soc_component_active(dai->component)) {
ret = snd_pcm_hw_constraint_single(substream->runtime,
SNDRV_PCM_HW_PARAM_RATE,
iface->rate);
diff --git a/sound/soc/meson/meson-card-utils.c b/sound/soc/meson/meson-card-utils.c
index 2ca8c98e204f..5a4a91c88734 100644
--- a/sound/soc/meson/meson-card-utils.c
+++ b/sound/soc/meson/meson-card-utils.c
@@ -49,19 +49,26 @@ int meson_card_reallocate_links(struct snd_soc_card *card,
links = krealloc(priv->card.dai_link,
num_links * sizeof(*priv->card.dai_link),
GFP_KERNEL | __GFP_ZERO);
+ if (!links)
+ goto err_links;
+
ldata = krealloc(priv->link_data,
num_links * sizeof(*priv->link_data),
GFP_KERNEL | __GFP_ZERO);
-
- if (!links || !ldata) {
- dev_err(priv->card.dev, "failed to allocate links\n");
- return -ENOMEM;
- }
+ if (!ldata)
+ goto err_ldata;
priv->card.dai_link = links;
priv->link_data = ldata;
priv->card.num_links = num_links;
return 0;
+
+err_ldata:
+ kfree(links);
+err_links:
+ dev_err(priv->card.dev, "failed to allocate links\n");
+ return -ENOMEM;
+
}
EXPORT_SYMBOL_GPL(meson_card_reallocate_links);
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 1e38ce858326..07f8cf9980e3 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -733,12 +733,9 @@ static int mxs_saif_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mxs_saif *saif;
- int irq, ret = 0;
+ int irq, ret;
struct device_node *master;
- if (!np)
- return -EINVAL;
-
saif = devm_kzalloc(&pdev->dev, sizeof(*saif), GFP_KERNEL);
if (!saif)
return -ENOMEM;
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index d4c0f580a565..0ac85eada75c 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -9,14 +9,8 @@ config SND_PXA2XX_SOC
to select the audio interfaces to support below.
config SND_MMP_SOC
- bool "Soc Audio for Marvell MMP chips"
- depends on ARCH_MMP
+ bool
select MMP_SRAM
- select SND_SOC_GENERIC_DMAENGINE_PCM
- select SND_ARM
- help
- Say Y if you want to add support for codecs attached to
- the MMP SSPA interface.
config SND_PXA2XX_AC97
tristate
@@ -39,7 +33,13 @@ config SND_PXA_SOC_SSP
select SND_PXA2XX_LIB
config SND_MMP_SOC_SSPA
- tristate
+ tristate "SoC Audio via MMP SSPA ports"
+ depends on ARCH_MMP
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ select SND_ARM
+ help
+ Say Y if you want to add support for codecs attached to
+ the MMP SSPA interface.
config SND_PXA2XX_SOC_CORGI
tristate "SoC Audio support for Sharp Zaurus SL-C7x0"
@@ -128,9 +128,8 @@ config SND_PXA2XX_SOC_E800
Toshiba e800 PDA
config SND_PXA2XX_SOC_EM_X270
- tristate "SoC Audio support for CompuLab EM-x270, eXeda and CM-X300"
- depends on SND_PXA2XX_SOC && (MACH_EM_X270 || MACH_EXEDA || \
- MACH_CM_X300)
+ tristate "SoC Audio support for CompuLab CM-X300"
+ depends on SND_PXA2XX_SOC && MACH_CM_X300
depends on AC97_BUS=n
select REGMAP
select AC97_BUS_NEW
@@ -232,8 +231,8 @@ config SND_PXA2XX_SOC_IMOTE2
config SND_MMP_SOC_BROWNSTONE
tristate "SoC Audio support for Marvell Brownstone"
- depends on SND_MMP_SOC && MACH_BROWNSTONE && I2C
- select SND_MMP_SOC_SSPA
+ depends on SND_MMP_SOC_SSPA && MACH_BROWNSTONE && I2C
+ select SND_MMP_SOC
select MFD_WM8994
select SND_SOC_WM8994
help
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index 3548a2634a63..4255851c71c1 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -11,9 +11,9 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/slab.h>
-#include <linux/pxa2xx_ssp.h>
#include <linux/io.h>
#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -28,71 +28,65 @@
* SSPA audio private data
*/
struct sspa_priv {
- struct ssp_device *sspa;
- struct snd_dmaengine_dai_dma_data *dma_params;
+ void __iomem *tx_base;
+ void __iomem *rx_base;
+
+ struct snd_dmaengine_dai_dma_data playback_dma_data;
+ struct snd_dmaengine_dai_dma_data capture_dma_data;
+ struct clk *clk;
struct clk *audio_clk;
struct clk *sysclk;
- int dai_fmt;
+
int running_cnt;
+ u32 sp;
+ u32 ctrl;
};
-static void mmp_sspa_write_reg(struct ssp_device *sspa, u32 reg, u32 val)
-{
- __raw_writel(val, sspa->mmio_base + reg);
-}
-
-static u32 mmp_sspa_read_reg(struct ssp_device *sspa, u32 reg)
-{
- return __raw_readl(sspa->mmio_base + reg);
-}
-
-static void mmp_sspa_tx_enable(struct ssp_device *sspa)
+static void mmp_sspa_tx_enable(struct sspa_priv *sspa)
{
- unsigned int sspa_sp;
+ unsigned int sspa_sp = sspa->sp;
- sspa_sp = mmp_sspa_read_reg(sspa, SSPA_TXSP);
+ sspa_sp &= ~SSPA_SP_MSL;
sspa_sp |= SSPA_SP_S_EN;
sspa_sp |= SSPA_SP_WEN;
- mmp_sspa_write_reg(sspa, SSPA_TXSP, sspa_sp);
+ __raw_writel(sspa_sp, sspa->tx_base + SSPA_SP);
}
-static void mmp_sspa_tx_disable(struct ssp_device *sspa)
+static void mmp_sspa_tx_disable(struct sspa_priv *sspa)
{
- unsigned int sspa_sp;
+ unsigned int sspa_sp = sspa->sp;
- sspa_sp = mmp_sspa_read_reg(sspa, SSPA_TXSP);
+ sspa_sp &= ~SSPA_SP_MSL;
sspa_sp &= ~SSPA_SP_S_EN;
sspa_sp |= SSPA_SP_WEN;
- mmp_sspa_write_reg(sspa, SSPA_TXSP, sspa_sp);
+ __raw_writel(sspa_sp, sspa->tx_base + SSPA_SP);
}
-static void mmp_sspa_rx_enable(struct ssp_device *sspa)
+static void mmp_sspa_rx_enable(struct sspa_priv *sspa)
{
- unsigned int sspa_sp;
+ unsigned int sspa_sp = sspa->sp;
- sspa_sp = mmp_sspa_read_reg(sspa, SSPA_RXSP);
sspa_sp |= SSPA_SP_S_EN;
sspa_sp |= SSPA_SP_WEN;
- mmp_sspa_write_reg(sspa, SSPA_RXSP, sspa_sp);
+ __raw_writel(sspa_sp, sspa->rx_base + SSPA_SP);
}
-static void mmp_sspa_rx_disable(struct ssp_device *sspa)
+static void mmp_sspa_rx_disable(struct sspa_priv *sspa)
{
- unsigned int sspa_sp;
+ unsigned int sspa_sp = sspa->sp;
- sspa_sp = mmp_sspa_read_reg(sspa, SSPA_RXSP);
sspa_sp &= ~SSPA_SP_S_EN;
sspa_sp |= SSPA_SP_WEN;
- mmp_sspa_write_reg(sspa, SSPA_RXSP, sspa_sp);
+ __raw_writel(sspa_sp, sspa->rx_base + SSPA_SP);
}
static int mmp_sspa_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct sspa_priv *priv = snd_soc_dai_get_drvdata(dai);
+ struct sspa_priv *sspa = snd_soc_dai_get_drvdata(dai);
- clk_enable(priv->sysclk);
- clk_enable(priv->sspa->clk);
+ clk_prepare_enable(sspa->sysclk);
+ clk_prepare_enable(sspa->clk);
return 0;
}
@@ -100,11 +94,10 @@ static int mmp_sspa_startup(struct snd_pcm_substream *substream,
static void mmp_sspa_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct sspa_priv *priv = snd_soc_dai_get_drvdata(dai);
-
- clk_disable(priv->sspa->clk);
- clk_disable(priv->sysclk);
+ struct sspa_priv *sspa = snd_soc_dai_get_drvdata(dai);
+ clk_disable_unprepare(sspa->clk);
+ clk_disable_unprepare(sspa->sysclk);
}
/*
@@ -113,12 +106,16 @@ static void mmp_sspa_shutdown(struct snd_pcm_substream *substream,
static int mmp_sspa_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq, int dir)
{
- struct sspa_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct sspa_priv *sspa = snd_soc_dai_get_drvdata(cpu_dai);
+ struct device *dev = cpu_dai->component->dev;
int ret = 0;
+ if (dev->of_node)
+ return -ENOTSUPP;
+
switch (clk_id) {
case MMP_SSPA_CLK_AUDIO:
- ret = clk_set_rate(priv->audio_clk, freq);
+ ret = clk_set_rate(sspa->audio_clk, freq);
if (ret)
return ret;
break;
@@ -137,17 +134,21 @@ static int mmp_sspa_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
int source, unsigned int freq_in,
unsigned int freq_out)
{
- struct sspa_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct sspa_priv *sspa = snd_soc_dai_get_drvdata(cpu_dai);
+ struct device *dev = cpu_dai->component->dev;
int ret = 0;
+ if (dev->of_node)
+ return -ENOTSUPP;
+
switch (pll_id) {
case MMP_SYSCLK:
- ret = clk_set_rate(priv->sysclk, freq_out);
+ ret = clk_set_rate(sspa->sysclk, freq_out);
if (ret)
return ret;
break;
case MMP_SSPA_CLK:
- ret = clk_set_rate(priv->sspa->clk, freq_out);
+ ret = clk_set_rate(sspa->clk, freq_out);
if (ret)
return ret;
break;
@@ -159,36 +160,20 @@ static int mmp_sspa_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
}
/*
- * Set up the sspa dai format. The sspa port must be inactive
- * before calling this function as the physical
- * interface format is changed.
+ * Set up the sspa dai format.
*/
static int mmp_sspa_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
- struct sspa_priv *sspa_priv = snd_soc_dai_get_drvdata(cpu_dai);
- struct ssp_device *sspa = sspa_priv->sspa;
- u32 sspa_sp, sspa_ctrl;
-
- /* check if we need to change anything at all */
- if (sspa_priv->dai_fmt == fmt)
- return 0;
-
- /* we can only change the settings if the port is not in use */
- if ((mmp_sspa_read_reg(sspa, SSPA_TXSP) & SSPA_SP_S_EN) ||
- (mmp_sspa_read_reg(sspa, SSPA_RXSP) & SSPA_SP_S_EN)) {
- dev_err(sspa->dev,
- "can't change hardware dai format: stream is in use\n");
- return -EINVAL;
- }
+ struct sspa_priv *sspa = snd_soc_dai_get_drvdata(cpu_dai);
/* reset port settings */
- sspa_sp = SSPA_SP_WEN | SSPA_SP_S_RST | SSPA_SP_FFLUSH;
- sspa_ctrl = 0;
+ sspa->sp = SSPA_SP_WEN | SSPA_SP_S_RST | SSPA_SP_FFLUSH;
+ sspa->ctrl = 0;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
- sspa_sp |= SSPA_SP_MSL;
+ sspa->sp |= SSPA_SP_MSL;
break;
case SND_SOC_DAIFMT_CBM_CFM:
break;
@@ -198,7 +183,7 @@ static int mmp_sspa_set_dai_fmt(struct snd_soc_dai *cpu_dai,
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
- sspa_sp |= SSPA_SP_FSP;
+ sspa->sp |= SSPA_SP_FSP;
break;
default:
return -EINVAL;
@@ -206,39 +191,16 @@ static int mmp_sspa_set_dai_fmt(struct snd_soc_dai *cpu_dai,
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
- sspa_sp |= SSPA_TXSP_FPER(63);
- sspa_sp |= SSPA_SP_FWID(31);
- sspa_ctrl |= SSPA_CTL_XDATDLY(1);
+ sspa->ctrl |= SSPA_CTL_XDATDLY(1);
break;
default:
return -EINVAL;
}
- mmp_sspa_write_reg(sspa, SSPA_TXSP, sspa_sp);
- mmp_sspa_write_reg(sspa, SSPA_RXSP, sspa_sp);
-
- sspa_sp &= ~(SSPA_SP_S_RST | SSPA_SP_FFLUSH);
- mmp_sspa_write_reg(sspa, SSPA_TXSP, sspa_sp);
- mmp_sspa_write_reg(sspa, SSPA_RXSP, sspa_sp);
-
- /*
- * FIXME: hw issue, for the tx serial port,
- * can not config the master/slave mode;
- * so must clean this bit.
- * The master/slave mode has been set in the
- * rx port.
- */
- sspa_sp &= ~SSPA_SP_MSL;
- mmp_sspa_write_reg(sspa, SSPA_TXSP, sspa_sp);
-
- mmp_sspa_write_reg(sspa, SSPA_TXCTL, sspa_ctrl);
- mmp_sspa_write_reg(sspa, SSPA_RXCTL, sspa_ctrl);
-
/* Since we are configuring the timings for the format by hand
* we have to defer some things until hw_params() where we
* know parameters like the sample size.
*/
- sspa_priv->dai_fmt = fmt;
return 0;
}
@@ -250,65 +212,71 @@ static int mmp_sspa_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
- struct sspa_priv *sspa_priv = snd_soc_dai_get_drvdata(dai);
- struct ssp_device *sspa = sspa_priv->sspa;
- struct snd_dmaengine_dai_dma_data *dma_params;
- u32 sspa_ctrl;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- sspa_ctrl = mmp_sspa_read_reg(sspa, SSPA_TXCTL);
- else
- sspa_ctrl = mmp_sspa_read_reg(sspa, SSPA_RXCTL);
-
- sspa_ctrl &= ~SSPA_CTL_XFRLEN1_MASK;
- sspa_ctrl |= SSPA_CTL_XFRLEN1(params_channels(params) - 1);
- sspa_ctrl &= ~SSPA_CTL_XWDLEN1_MASK;
- sspa_ctrl |= SSPA_CTL_XWDLEN1(SSPA_CTL_32_BITS);
- sspa_ctrl &= ~SSPA_CTL_XSSZ1_MASK;
+ struct sspa_priv *sspa = snd_soc_dai_get_drvdata(dai);
+ struct device *dev = dai->component->dev;
+ u32 sspa_ctrl = sspa->ctrl;
+ int bits;
+ int bitval;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
- sspa_ctrl |= SSPA_CTL_XSSZ1(SSPA_CTL_8_BITS);
+ bits = 8;
+ bitval = SSPA_CTL_8_BITS;
break;
case SNDRV_PCM_FORMAT_S16_LE:
- sspa_ctrl |= SSPA_CTL_XSSZ1(SSPA_CTL_16_BITS);
- break;
- case SNDRV_PCM_FORMAT_S20_3LE:
- sspa_ctrl |= SSPA_CTL_XSSZ1(SSPA_CTL_20_BITS);
+ bits = 16;
+ bitval = SSPA_CTL_16_BITS;
break;
case SNDRV_PCM_FORMAT_S24_3LE:
- sspa_ctrl |= SSPA_CTL_XSSZ1(SSPA_CTL_24_BITS);
+ bits = 24;
+ bitval = SSPA_CTL_24_BITS;
break;
case SNDRV_PCM_FORMAT_S32_LE:
- sspa_ctrl |= SSPA_CTL_XSSZ1(SSPA_CTL_32_BITS);
+ bits = 32;
+ bitval = SSPA_CTL_32_BITS;
break;
default:
return -EINVAL;
}
+ if (dev->of_node || params_channels(params) == 2)
+ sspa_ctrl |= SSPA_CTL_XPH;
+
+ sspa_ctrl &= ~SSPA_CTL_XWDLEN1_MASK;
+ sspa_ctrl |= SSPA_CTL_XWDLEN1(bitval);
+
+ sspa_ctrl &= ~SSPA_CTL_XSSZ1_MASK;
+ sspa_ctrl |= SSPA_CTL_XSSZ1(bitval);
+
+ sspa_ctrl &= ~SSPA_CTL_XSSZ2_MASK;
+ sspa_ctrl |= SSPA_CTL_XSSZ2(bitval);
+
+ sspa->sp &= ~SSPA_SP_FWID_MASK;
+ sspa->sp |= SSPA_SP_FWID(bits - 1);
+
+ sspa->sp &= ~SSPA_TXSP_FPER_MASK;
+ sspa->sp |= SSPA_TXSP_FPER(bits * 2 - 1);
+
+ if (dev->of_node) {
+ clk_set_rate(sspa->clk, params_rate(params) *
+ params_channels(params) * bits);
+ }
+
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- mmp_sspa_write_reg(sspa, SSPA_TXCTL, sspa_ctrl);
- mmp_sspa_write_reg(sspa, SSPA_TXFIFO_LL, 0x1);
+ __raw_writel(sspa_ctrl, sspa->tx_base + SSPA_CTL);
+ __raw_writel(0x1, sspa->tx_base + SSPA_FIFO_UL);
} else {
- mmp_sspa_write_reg(sspa, SSPA_RXCTL, sspa_ctrl);
- mmp_sspa_write_reg(sspa, SSPA_RXFIFO_UL, 0x0);
+ __raw_writel(sspa_ctrl, sspa->rx_base + SSPA_CTL);
+ __raw_writel(0x0, sspa->rx_base + SSPA_FIFO_UL);
}
- dma_params = &sspa_priv->dma_params[substream->stream];
- dma_params->addr = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
- (sspa->phys_base + SSPA_TXD) :
- (sspa->phys_base + SSPA_RXD);
- snd_soc_dai_set_dma_data(cpu_dai, substream, dma_params);
return 0;
}
static int mmp_sspa_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
- struct sspa_priv *sspa_priv = snd_soc_dai_get_drvdata(dai);
- struct ssp_device *sspa = sspa_priv->sspa;
+ struct sspa_priv *sspa = snd_soc_dai_get_drvdata(dai);
int ret = 0;
switch (cmd) {
@@ -321,25 +289,25 @@ static int mmp_sspa_trigger(struct snd_pcm_substream *substream, int cmd,
* enabled or not; if has been enabled by another
* stream, do not enable again.
*/
- if (!sspa_priv->running_cnt)
+ if (!sspa->running_cnt)
mmp_sspa_rx_enable(sspa);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
mmp_sspa_tx_enable(sspa);
- sspa_priv->running_cnt++;
+ sspa->running_cnt++;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- sspa_priv->running_cnt--;
+ sspa->running_cnt--;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
mmp_sspa_tx_disable(sspa);
/* have no capture stream, disable rx port */
- if (!sspa_priv->running_cnt)
+ if (!sspa->running_cnt)
mmp_sspa_rx_disable(sspa);
break;
@@ -352,17 +320,20 @@ static int mmp_sspa_trigger(struct snd_pcm_substream *substream, int cmd,
static int mmp_sspa_probe(struct snd_soc_dai *dai)
{
- struct sspa_priv *priv = dev_get_drvdata(dai->dev);
+ struct sspa_priv *sspa = dev_get_drvdata(dai->dev);
- snd_soc_dai_set_drvdata(dai, priv);
- return 0;
+ snd_soc_dai_init_dma_data(dai,
+ &sspa->playback_dma_data,
+ &sspa->capture_dma_data);
+ snd_soc_dai_set_drvdata(dai, sspa);
+ return 0;
}
#define MMP_SSPA_RATES SNDRV_PCM_RATE_8000_192000
#define MMP_SSPA_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_S16_LE | \
- SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S24_3LE | \
SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops mmp_sspa_dai_ops = {
@@ -392,68 +363,212 @@ static struct snd_soc_dai_driver mmp_sspa_dai = {
.ops = &mmp_sspa_dai_ops,
};
+#define MMP_PCM_INFO (SNDRV_PCM_INFO_MMAP | \
+ SNDRV_PCM_INFO_MMAP_VALID | \
+ SNDRV_PCM_INFO_INTERLEAVED | \
+ SNDRV_PCM_INFO_PAUSE | \
+ SNDRV_PCM_INFO_RESUME | \
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP)
+
+static const struct snd_pcm_hardware mmp_pcm_hardware[] = {
+ {
+ .info = MMP_PCM_INFO,
+ .period_bytes_min = 1024,
+ .period_bytes_max = 2048,
+ .periods_min = 2,
+ .periods_max = 32,
+ .buffer_bytes_max = 4096,
+ .fifo_size = 32,
+ },
+ {
+ .info = MMP_PCM_INFO,
+ .period_bytes_min = 1024,
+ .period_bytes_max = 2048,
+ .periods_min = 2,
+ .periods_max = 32,
+ .buffer_bytes_max = 4096,
+ .fifo_size = 32,
+ },
+};
+
+static const struct snd_dmaengine_pcm_config mmp_pcm_config = {
+ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
+ .pcm_hardware = mmp_pcm_hardware,
+ .prealloc_buffer_size = 4096,
+};
+
+static int mmp_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ return remap_pfn_range(vma, vma->vm_start,
+ substream->dma_buffer.addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+static int mmp_sspa_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ struct sspa_priv *sspa = snd_soc_component_get_drvdata(component);
+
+ pm_runtime_get_sync(component->dev);
+
+ /* we can only change the settings if the port is not in use */
+ if ((__raw_readl(sspa->tx_base + SSPA_SP) & SSPA_SP_S_EN) ||
+ (__raw_readl(sspa->rx_base + SSPA_SP) & SSPA_SP_S_EN)) {
+ dev_err(component->dev,
+ "can't change hardware dai format: stream is in use\n");
+ return -EBUSY;
+ }
+
+ __raw_writel(sspa->sp, sspa->tx_base + SSPA_SP);
+ __raw_writel(sspa->sp, sspa->rx_base + SSPA_SP);
+
+ sspa->sp &= ~(SSPA_SP_S_RST | SSPA_SP_FFLUSH);
+ __raw_writel(sspa->sp, sspa->tx_base + SSPA_SP);
+ __raw_writel(sspa->sp, sspa->rx_base + SSPA_SP);
+
+ /*
+ * FIXME: hw issue, for the tx serial port,
+ * can not config the master/slave mode;
+ * so must clean this bit.
+ * The master/slave mode has been set in the
+ * rx port.
+ */
+ __raw_writel(sspa->sp & ~SSPA_SP_MSL, sspa->tx_base + SSPA_SP);
+
+ __raw_writel(sspa->ctrl, sspa->tx_base + SSPA_CTL);
+ __raw_writel(sspa->ctrl, sspa->rx_base + SSPA_CTL);
+
+ return 0;
+}
+
+static int mmp_sspa_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream)
+{
+ pm_runtime_put_sync(component->dev);
+ return 0;
+}
+
static const struct snd_soc_component_driver mmp_sspa_component = {
.name = "mmp-sspa",
+ .mmap = mmp_pcm_mmap,
+ .open = mmp_sspa_open,
+ .close = mmp_sspa_close,
};
static int asoc_mmp_sspa_probe(struct platform_device *pdev)
{
- struct sspa_priv *priv;
+ struct sspa_priv *sspa;
+ int ret;
- priv = devm_kzalloc(&pdev->dev,
+ sspa = devm_kzalloc(&pdev->dev,
sizeof(struct sspa_priv), GFP_KERNEL);
- if (!priv)
+ if (!sspa)
return -ENOMEM;
- priv->sspa = devm_kzalloc(&pdev->dev,
- sizeof(struct ssp_device), GFP_KERNEL);
- if (priv->sspa == NULL)
- return -ENOMEM;
+ if (pdev->dev.of_node) {
+ sspa->rx_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sspa->rx_base))
+ return PTR_ERR(sspa->rx_base);
- priv->dma_params = devm_kcalloc(&pdev->dev,
- 2, sizeof(struct snd_dmaengine_dai_dma_data),
- GFP_KERNEL);
- if (priv->dma_params == NULL)
- return -ENOMEM;
+ sspa->tx_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(sspa->tx_base))
+ return PTR_ERR(sspa->tx_base);
- priv->sspa->mmio_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->sspa->mmio_base))
- return PTR_ERR(priv->sspa->mmio_base);
+ sspa->clk = devm_clk_get(&pdev->dev, "bitclk");
+ if (IS_ERR(sspa->clk))
+ return PTR_ERR(sspa->clk);
- priv->sspa->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->sspa->clk))
- return PTR_ERR(priv->sspa->clk);
+ sspa->audio_clk = devm_clk_get(&pdev->dev, "audio");
+ if (IS_ERR(sspa->audio_clk))
+ return PTR_ERR(sspa->audio_clk);
+ } else {
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res == NULL)
+ return -ENODEV;
+
+ sspa->rx_base = devm_ioremap(&pdev->dev, res->start, 0x30);
+ if (!sspa->rx_base)
+ return -ENOMEM;
+
+ sspa->tx_base = devm_ioremap(&pdev->dev,
+ res->start + 0x80, 0x30);
+ if (!sspa->tx_base)
+ return -ENOMEM;
+
+ sspa->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sspa->clk))
+ return PTR_ERR(sspa->clk);
+
+ sspa->audio_clk = clk_get(NULL, "mmp-audio");
+ if (IS_ERR(sspa->audio_clk))
+ return PTR_ERR(sspa->audio_clk);
+
+ sspa->sysclk = clk_get(NULL, "mmp-sysclk");
+ if (IS_ERR(sspa->sysclk)) {
+ clk_put(sspa->audio_clk);
+ return PTR_ERR(sspa->sysclk);
+ }
+ }
+ platform_set_drvdata(pdev, sspa);
- priv->audio_clk = clk_get(NULL, "mmp-audio");
- if (IS_ERR(priv->audio_clk))
- return PTR_ERR(priv->audio_clk);
+ sspa->playback_dma_data.maxburst = 4;
+ sspa->capture_dma_data.maxburst = 4;
+ /* You know, these addresses are actually ignored. */
+ sspa->capture_dma_data.addr = SSPA_D;
+ sspa->playback_dma_data.addr = 0x80 + SSPA_D;
- priv->sysclk = clk_get(NULL, "mmp-sysclk");
- if (IS_ERR(priv->sysclk)) {
- clk_put(priv->audio_clk);
- return PTR_ERR(priv->sysclk);
+ if (pdev->dev.of_node) {
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev,
+ &mmp_pcm_config, 0);
+ if (ret)
+ return ret;
}
- clk_enable(priv->audio_clk);
- priv->dai_fmt = (unsigned int) -1;
- platform_set_drvdata(pdev, priv);
- return devm_snd_soc_register_component(&pdev->dev, &mmp_sspa_component,
- &mmp_sspa_dai, 1);
+ ret = devm_snd_soc_register_component(&pdev->dev, &mmp_sspa_component,
+ &mmp_sspa_dai, 1);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ clk_prepare_enable(sspa->audio_clk);
+
+ return 0;
}
static int asoc_mmp_sspa_remove(struct platform_device *pdev)
{
- struct sspa_priv *priv = platform_get_drvdata(pdev);
+ struct sspa_priv *sspa = platform_get_drvdata(pdev);
- clk_disable(priv->audio_clk);
- clk_put(priv->audio_clk);
- clk_put(priv->sysclk);
+ clk_disable_unprepare(sspa->audio_clk);
+ pm_runtime_disable(&pdev->dev);
+
+ if (pdev->dev.of_node)
+ return 0;
+
+ clk_put(sspa->audio_clk);
+ clk_put(sspa->sysclk);
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id mmp_sspa_of_match[] = {
+ { .compatible = "marvell,mmp-sspa" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mmp_sspa_of_match);
+#endif
+
static struct platform_driver asoc_mmp_sspa_driver = {
.driver = {
.name = "mmp-sspa-dai",
+ .of_match_table = of_match_ptr(mmp_sspa_of_match),
},
.probe = asoc_mmp_sspa_probe,
.remove = asoc_mmp_sspa_remove,
diff --git a/sound/soc/pxa/mmp-sspa.h b/sound/soc/pxa/mmp-sspa.h
index 7d1b7c7325df..938ef2f667e3 100644
--- a/sound/soc/pxa/mmp-sspa.h
+++ b/sound/soc/pxa/mmp-sspa.h
@@ -10,25 +10,15 @@
/*
* SSPA Registers
*/
-#define SSPA_RXD (0x00)
-#define SSPA_RXID (0x04)
-#define SSPA_RXCTL (0x08)
-#define SSPA_RXSP (0x0c)
-#define SSPA_RXFIFO_UL (0x10)
-#define SSPA_RXINT_MASK (0x14)
-#define SSPA_RXC (0x18)
-#define SSPA_RXFIFO_NOFS (0x1c)
-#define SSPA_RXFIFO_SIZE (0x20)
-
-#define SSPA_TXD (0x80)
-#define SSPA_TXID (0x84)
-#define SSPA_TXCTL (0x88)
-#define SSPA_TXSP (0x8c)
-#define SSPA_TXFIFO_LL (0x90)
-#define SSPA_TXINT_MASK (0x94)
-#define SSPA_TXC (0x98)
-#define SSPA_TXFIFO_NOFS (0x9c)
-#define SSPA_TXFIFO_SIZE (0xa0)
+#define SSPA_D (0x00)
+#define SSPA_ID (0x04)
+#define SSPA_CTL (0x08)
+#define SSPA_SP (0x0c)
+#define SSPA_FIFO_UL (0x10)
+#define SSPA_INT_MASK (0x14)
+#define SSPA_C (0x18)
+#define SSPA_FIFO_NOFS (0x1c)
+#define SSPA_FIFO_SIZE (0x20)
/* SSPA Control Register */
#define SSPA_CTL_XPH (1 << 31) /* Read Phase */
@@ -38,7 +28,7 @@
#define SSPA_CTL_XFRLEN2(x) ((x) << 24) /* Transmit Frame Length in Phase 2 */
#define SSPA_CTL_XWDLEN2_MASK (7 << 21)
#define SSPA_CTL_XWDLEN2(x) ((x) << 21) /* Transmit Word Length in Phase 2 */
-#define SSPA_CTL_XDATDLY(x) ((x) << 19) /* Tansmit Data Delay */
+#define SSPA_CTL_XDATDLY(x) ((x) << 19) /* Transmit Data Delay */
#define SSPA_CTL_XSSZ2_MASK (7 << 16)
#define SSPA_CTL_XSSZ2(x) ((x) << 16) /* Transmit Sample Audio Size */
#define SSPA_CTL_XFRLEN1_MASK (7 << 8)
@@ -63,7 +53,9 @@
#define SSPA_SP_FFLUSH (1 << 2) /* FIFO Flush */
#define SSPA_SP_S_RST (1 << 1) /* Active High Reset Signal */
#define SSPA_SP_S_EN (1 << 0) /* Serial Clock Domain Enable */
+#define SSPA_SP_FWID_MASK (0x3f << 20)
#define SSPA_SP_FWID(x) ((x) << 20) /* Frame-Sync Width */
+#define SSPA_TXSP_FPER_MASK (0x3f << 4)
#define SSPA_TXSP_FPER(x) ((x) << 4) /* Frame-Sync Active */
/* sspa clock sources */
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index e615acaa0199..6a72cc1665b7 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -94,7 +94,7 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream,
struct snd_dmaengine_dai_dma_data *dma;
int ret = 0;
- if (!cpu_dai->active) {
+ if (!snd_soc_dai_active(cpu_dai)) {
clk_prepare_enable(ssp->clk);
pxa_ssp_disable(ssp);
}
@@ -119,7 +119,7 @@ static void pxa_ssp_shutdown(struct snd_pcm_substream *substream,
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
- if (!cpu_dai->active) {
+ if (!snd_soc_dai_active(cpu_dai)) {
pxa_ssp_disable(ssp);
clk_disable_unprepare(ssp->clk);
}
@@ -138,7 +138,7 @@ static int pxa_ssp_suspend(struct snd_soc_component *component)
struct ssp_priv *priv = snd_soc_component_get_drvdata(component);
struct ssp_device *ssp = priv->ssp;
- if (!component->active)
+ if (!snd_soc_component_active(component))
clk_prepare_enable(ssp->clk);
priv->cr0 = __raw_readl(ssp->mmio_base + SSCR0);
@@ -165,7 +165,7 @@ static int pxa_ssp_resume(struct snd_soc_component *component)
__raw_writel(priv->to, ssp->mmio_base + SSTO);
__raw_writel(priv->psp, ssp->mmio_base + SSPSP);
- if (component->active)
+ if (snd_soc_component_active(component))
pxa_ssp_enable(ssp);
else
clk_disable_unprepare(ssp->clk);
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 9a32bf72127a..03102e938ba1 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -101,7 +101,7 @@ static int pxa2xx_i2s_startup(struct snd_pcm_substream *substream,
if (IS_ERR(clk_i2s))
return PTR_ERR(clk_i2s);
- if (!cpu_dai->active)
+ if (!snd_soc_dai_active(cpu_dai))
SACR0 = 0;
return 0;
diff --git a/sound/soc/qcom/lpass-apq8016.c b/sound/soc/qcom/lpass-apq8016.c
index 6575da549237..b3610d05b651 100644
--- a/sound/soc/qcom/lpass-apq8016.c
+++ b/sound/soc/qcom/lpass-apq8016.c
@@ -166,28 +166,27 @@ static int apq8016_lpass_init(struct platform_device *pdev)
drvdata->pcnoc_mport_clk = devm_clk_get(dev, "pcnoc-mport-clk");
if (IS_ERR(drvdata->pcnoc_mport_clk)) {
- dev_err(&pdev->dev, "error getting pcnoc-mport-clk: %ld\n",
+ dev_err(dev, "error getting pcnoc-mport-clk: %ld\n",
PTR_ERR(drvdata->pcnoc_mport_clk));
return PTR_ERR(drvdata->pcnoc_mport_clk);
}
ret = clk_prepare_enable(drvdata->pcnoc_mport_clk);
if (ret) {
- dev_err(&pdev->dev, "Error enabling pcnoc-mport-clk: %d\n",
- ret);
+ dev_err(dev, "Error enabling pcnoc-mport-clk: %d\n", ret);
return ret;
}
drvdata->pcnoc_sway_clk = devm_clk_get(dev, "pcnoc-sway-clk");
if (IS_ERR(drvdata->pcnoc_sway_clk)) {
- dev_err(&pdev->dev, "error getting pcnoc-sway-clk: %ld\n",
+ dev_err(dev, "error getting pcnoc-sway-clk: %ld\n",
PTR_ERR(drvdata->pcnoc_sway_clk));
return PTR_ERR(drvdata->pcnoc_sway_clk);
}
ret = clk_prepare_enable(drvdata->pcnoc_sway_clk);
if (ret) {
- dev_err(&pdev->dev, "Error enabling pcnoc_sway_clk: %d\n", ret);
+ dev_err(dev, "Error enabling pcnoc_sway_clk: %d\n", ret);
return ret;
}
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index dbce7e92baf3..e00a4af29c13 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -19,6 +19,16 @@
#include "lpass-lpaif-reg.h"
#include "lpass.h"
+#define LPASS_CPU_MAX_MI2S_LINES 4
+#define LPASS_CPU_I2S_SD0_MASK BIT(0)
+#define LPASS_CPU_I2S_SD1_MASK BIT(1)
+#define LPASS_CPU_I2S_SD2_MASK BIT(2)
+#define LPASS_CPU_I2S_SD3_MASK BIT(3)
+#define LPASS_CPU_I2S_SD0_1_MASK GENMASK(1, 0)
+#define LPASS_CPU_I2S_SD2_3_MASK GENMASK(3, 2)
+#define LPASS_CPU_I2S_SD0_1_2_MASK GENMASK(2, 0)
+#define LPASS_CPU_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
+
static int lpass_cpu_daiops_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
@@ -72,6 +82,7 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
snd_pcm_format_t format = params_format(params);
unsigned int channels = params_channels(params);
unsigned int rate = params_rate(params);
+ unsigned int mode;
unsigned int regval;
int bitwidth, ret;
@@ -99,60 +110,84 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- switch (channels) {
- case 1:
- regval |= LPAIF_I2SCTL_SPKMODE_SD0;
- regval |= LPAIF_I2SCTL_SPKMONO_MONO;
- break;
- case 2:
- regval |= LPAIF_I2SCTL_SPKMODE_SD0;
- regval |= LPAIF_I2SCTL_SPKMONO_STEREO;
- break;
- case 4:
- regval |= LPAIF_I2SCTL_SPKMODE_QUAD01;
- regval |= LPAIF_I2SCTL_SPKMONO_STEREO;
- break;
- case 6:
- regval |= LPAIF_I2SCTL_SPKMODE_6CH;
- regval |= LPAIF_I2SCTL_SPKMONO_STEREO;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ mode = drvdata->mi2s_playback_sd_mode[dai->driver->id];
+ else
+ mode = drvdata->mi2s_capture_sd_mode[dai->driver->id];
+
+ if (!mode) {
+ dev_err(dai->dev, "no line is assigned\n");
+ return -EINVAL;
+ }
+
+ switch (channels) {
+ case 1:
+ case 2:
+ switch (mode) {
+ case LPAIF_I2SCTL_MODE_QUAD01:
+ case LPAIF_I2SCTL_MODE_6CH:
+ case LPAIF_I2SCTL_MODE_8CH:
+ mode = LPAIF_I2SCTL_MODE_SD0;
break;
- case 8:
- regval |= LPAIF_I2SCTL_SPKMODE_8CH;
- regval |= LPAIF_I2SCTL_SPKMONO_STEREO;
+ case LPAIF_I2SCTL_MODE_QUAD23:
+ mode = LPAIF_I2SCTL_MODE_SD2;
break;
- default:
- dev_err(dai->dev, "invalid channels given: %u\n",
- channels);
+ }
+
+ break;
+ case 4:
+ if (mode < LPAIF_I2SCTL_MODE_QUAD01) {
+ dev_err(dai->dev, "cannot configure 4 channels with mode %d\n",
+ mode);
return -EINVAL;
}
- } else {
- switch (channels) {
- case 1:
- regval |= LPAIF_I2SCTL_MICMODE_SD0;
- regval |= LPAIF_I2SCTL_MICMONO_MONO;
- break;
- case 2:
- regval |= LPAIF_I2SCTL_MICMODE_SD0;
- regval |= LPAIF_I2SCTL_MICMONO_STEREO;
- break;
- case 4:
- regval |= LPAIF_I2SCTL_MICMODE_QUAD01;
- regval |= LPAIF_I2SCTL_MICMONO_STEREO;
- break;
- case 6:
- regval |= LPAIF_I2SCTL_MICMODE_6CH;
- regval |= LPAIF_I2SCTL_MICMONO_STEREO;
+
+ switch (mode) {
+ case LPAIF_I2SCTL_MODE_6CH:
+ case LPAIF_I2SCTL_MODE_8CH:
+ mode = LPAIF_I2SCTL_MODE_QUAD01;
break;
- case 8:
- regval |= LPAIF_I2SCTL_MICMODE_8CH;
- regval |= LPAIF_I2SCTL_MICMONO_STEREO;
+ }
+ break;
+ case 6:
+ if (mode < LPAIF_I2SCTL_MODE_6CH) {
+ dev_err(dai->dev, "cannot configure 6 channels with mode %d\n",
+ mode);
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case LPAIF_I2SCTL_MODE_8CH:
+ mode = LPAIF_I2SCTL_MODE_6CH;
break;
- default:
- dev_err(dai->dev, "invalid channels given: %u\n",
- channels);
+ }
+ break;
+ case 8:
+ if (mode < LPAIF_I2SCTL_MODE_8CH) {
+ dev_err(dai->dev, "cannot configure 8 channels with mode %d\n",
+ mode);
return -EINVAL;
}
+ break;
+ default:
+ dev_err(dai->dev, "invalid channels given: %u\n", channels);
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regval |= LPAIF_I2SCTL_SPKMODE(mode);
+
+ if (channels >= 2)
+ regval |= LPAIF_I2SCTL_SPKMONO_STEREO;
+ else
+ regval |= LPAIF_I2SCTL_SPKMONO_MONO;
+ } else {
+ regval |= LPAIF_I2SCTL_MICMODE(mode);
+
+ if (channels >= 2)
+ regval |= LPAIF_I2SCTL_MICMONO_STEREO;
+ else
+ regval |= LPAIF_I2SCTL_MICMONO_MONO;
}
ret = regmap_write(drvdata->lpaif_map,
@@ -413,6 +448,73 @@ static struct regmap_config lpass_cpu_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
+static unsigned int of_lpass_cpu_parse_sd_lines(struct device *dev,
+ struct device_node *node,
+ const char *name)
+{
+ unsigned int lines[LPASS_CPU_MAX_MI2S_LINES];
+ unsigned int sd_line_mask = 0;
+ int num_lines, i;
+
+ num_lines = of_property_read_variable_u32_array(node, name, lines, 0,
+ LPASS_CPU_MAX_MI2S_LINES);
+ if (num_lines < 0)
+ return LPAIF_I2SCTL_MODE_NONE;
+
+ for (i = 0; i < num_lines; i++)
+ sd_line_mask |= BIT(lines[i]);
+
+ switch (sd_line_mask) {
+ case LPASS_CPU_I2S_SD0_MASK:
+ return LPAIF_I2SCTL_MODE_SD0;
+ case LPASS_CPU_I2S_SD1_MASK:
+ return LPAIF_I2SCTL_MODE_SD1;
+ case LPASS_CPU_I2S_SD2_MASK:
+ return LPAIF_I2SCTL_MODE_SD2;
+ case LPASS_CPU_I2S_SD3_MASK:
+ return LPAIF_I2SCTL_MODE_SD3;
+ case LPASS_CPU_I2S_SD0_1_MASK:
+ return LPAIF_I2SCTL_MODE_QUAD01;
+ case LPASS_CPU_I2S_SD2_3_MASK:
+ return LPAIF_I2SCTL_MODE_QUAD23;
+ case LPASS_CPU_I2S_SD0_1_2_MASK:
+ return LPAIF_I2SCTL_MODE_6CH;
+ case LPASS_CPU_I2S_SD0_1_2_3_MASK:
+ return LPAIF_I2SCTL_MODE_8CH;
+ default:
+ dev_err(dev, "Unsupported SD line mask: %#x\n", sd_line_mask);
+ return LPAIF_I2SCTL_MODE_NONE;
+ }
+}
+
+static void of_lpass_cpu_parse_dai_data(struct device *dev,
+ struct lpass_data *data)
+{
+ struct device_node *node;
+ int ret, id;
+
+ /* Allow all channels by default for backwards compatibility */
+ for (id = 0; id < data->variant->num_dai; id++) {
+ data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
+ data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
+ }
+
+ for_each_child_of_node(dev->of_node, node) {
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret || id < 0 || id >= data->variant->num_dai) {
+ dev_err(dev, "valid dai id not found: %d\n", ret);
+ continue;
+ }
+
+ data->mi2s_playback_sd_mode[id] =
+ of_lpass_cpu_parse_sd_lines(dev, node,
+ "qcom,playback-sd-lines");
+ data->mi2s_capture_sd_mode[id] =
+ of_lpass_cpu_parse_sd_lines(dev, node,
+ "qcom,capture-sd-lines");
+ }
+}
+
int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
{
struct lpass_data *drvdata;
@@ -425,12 +527,11 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
dsp_of_node = of_parse_phandle(pdev->dev.of_node, "qcom,adsp", 0);
if (dsp_of_node) {
- dev_err(&pdev->dev, "DSP exists and holds audio resources\n");
+ dev_err(dev, "DSP exists and holds audio resources\n");
return -EBUSY;
}
- drvdata = devm_kzalloc(&pdev->dev, sizeof(struct lpass_data),
- GFP_KERNEL);
+ drvdata = devm_kzalloc(dev, sizeof(struct lpass_data), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
platform_set_drvdata(pdev, drvdata);
@@ -442,11 +543,13 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
drvdata->variant = (struct lpass_variant *)match->data;
variant = drvdata->variant;
+ of_lpass_cpu_parse_dai_data(dev, drvdata);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lpass-lpaif");
- drvdata->lpaif = devm_ioremap_resource(&pdev->dev, res);
+ drvdata->lpaif = devm_ioremap_resource(dev, res);
if (IS_ERR((void const __force *)drvdata->lpaif)) {
- dev_err(&pdev->dev, "error mapping reg resource: %ld\n",
+ dev_err(dev, "error mapping reg resource: %ld\n",
PTR_ERR((void const __force *)drvdata->lpaif));
return PTR_ERR((void const __force *)drvdata->lpaif);
}
@@ -455,10 +558,10 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
variant->wrdma_channels +
variant->wrdma_channel_start);
- drvdata->lpaif_map = devm_regmap_init_mmio(&pdev->dev, drvdata->lpaif,
+ drvdata->lpaif_map = devm_regmap_init_mmio(dev, drvdata->lpaif,
&lpass_cpu_regmap_config);
if (IS_ERR(drvdata->lpaif_map)) {
- dev_err(&pdev->dev, "error initializing regmap: %ld\n",
+ dev_err(dev, "error initializing regmap: %ld\n",
PTR_ERR(drvdata->lpaif_map));
return PTR_ERR(drvdata->lpaif_map);
}
@@ -468,10 +571,10 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
for (i = 0; i < variant->num_dai; i++) {
dai_id = variant->dai_driver[i].id;
- drvdata->mi2s_osr_clk[dai_id] = devm_clk_get(&pdev->dev,
+ drvdata->mi2s_osr_clk[dai_id] = devm_clk_get(dev,
variant->dai_osr_clk_names[i]);
if (IS_ERR(drvdata->mi2s_osr_clk[dai_id])) {
- dev_warn(&pdev->dev,
+ dev_warn(dev,
"%s() error getting optional %s: %ld\n",
__func__,
variant->dai_osr_clk_names[i],
@@ -480,10 +583,10 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
drvdata->mi2s_osr_clk[dai_id] = NULL;
}
- drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(&pdev->dev,
+ drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev,
variant->dai_bit_clk_names[i]);
if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"error getting %s: %ld\n",
variant->dai_bit_clk_names[i],
PTR_ERR(drvdata->mi2s_bit_clk[dai_id]));
@@ -491,41 +594,39 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
}
}
- drvdata->ahbix_clk = devm_clk_get(&pdev->dev, "ahbix-clk");
+ drvdata->ahbix_clk = devm_clk_get(dev, "ahbix-clk");
if (IS_ERR(drvdata->ahbix_clk)) {
- dev_err(&pdev->dev, "error getting ahbix-clk: %ld\n",
+ dev_err(dev, "error getting ahbix-clk: %ld\n",
PTR_ERR(drvdata->ahbix_clk));
return PTR_ERR(drvdata->ahbix_clk);
}
ret = clk_set_rate(drvdata->ahbix_clk, LPASS_AHBIX_CLOCK_FREQUENCY);
if (ret) {
- dev_err(&pdev->dev, "error setting rate on ahbix_clk: %d\n",
- ret);
+ dev_err(dev, "error setting rate on ahbix_clk: %d\n", ret);
return ret;
}
- dev_dbg(&pdev->dev, "set ahbix_clk rate to %lu\n",
+ dev_dbg(dev, "set ahbix_clk rate to %lu\n",
clk_get_rate(drvdata->ahbix_clk));
ret = clk_prepare_enable(drvdata->ahbix_clk);
if (ret) {
- dev_err(&pdev->dev, "error enabling ahbix_clk: %d\n", ret);
+ dev_err(dev, "error enabling ahbix_clk: %d\n", ret);
return ret;
}
- ret = devm_snd_soc_register_component(&pdev->dev,
+ ret = devm_snd_soc_register_component(dev,
&lpass_cpu_comp_driver,
variant->dai_driver,
variant->num_dai);
if (ret) {
- dev_err(&pdev->dev, "error registering cpu driver: %d\n", ret);
+ dev_err(dev, "error registering cpu driver: %d\n", ret);
goto err_clk;
}
ret = asoc_qcom_lpass_platform_register(pdev);
if (ret) {
- dev_err(&pdev->dev, "error registering platform driver: %d\n",
- ret);
+ dev_err(dev, "error registering platform driver: %d\n", ret);
goto err_clk;
}
diff --git a/sound/soc/qcom/lpass-lpaif-reg.h b/sound/soc/qcom/lpass-lpaif-reg.h
index 3d74ae123e9d..72a3e2f69572 100644
--- a/sound/soc/qcom/lpass-lpaif-reg.h
+++ b/sound/soc/qcom/lpass-lpaif-reg.h
@@ -22,17 +22,19 @@
#define LPAIF_I2SCTL_SPKEN_DISABLE (0 << LPAIF_I2SCTL_SPKEN_SHIFT)
#define LPAIF_I2SCTL_SPKEN_ENABLE (1 << LPAIF_I2SCTL_SPKEN_SHIFT)
+#define LPAIF_I2SCTL_MODE_NONE 0
+#define LPAIF_I2SCTL_MODE_SD0 1
+#define LPAIF_I2SCTL_MODE_SD1 2
+#define LPAIF_I2SCTL_MODE_SD2 3
+#define LPAIF_I2SCTL_MODE_SD3 4
+#define LPAIF_I2SCTL_MODE_QUAD01 5
+#define LPAIF_I2SCTL_MODE_QUAD23 6
+#define LPAIF_I2SCTL_MODE_6CH 7
+#define LPAIF_I2SCTL_MODE_8CH 8
+
#define LPAIF_I2SCTL_SPKMODE_MASK 0x3C00
#define LPAIF_I2SCTL_SPKMODE_SHIFT 10
-#define LPAIF_I2SCTL_SPKMODE_NONE (0 << LPAIF_I2SCTL_SPKMODE_SHIFT)
-#define LPAIF_I2SCTL_SPKMODE_SD0 (1 << LPAIF_I2SCTL_SPKMODE_SHIFT)
-#define LPAIF_I2SCTL_SPKMODE_SD1 (2 << LPAIF_I2SCTL_SPKMODE_SHIFT)
-#define LPAIF_I2SCTL_SPKMODE_SD2 (3 << LPAIF_I2SCTL_SPKMODE_SHIFT)
-#define LPAIF_I2SCTL_SPKMODE_SD3 (4 << LPAIF_I2SCTL_SPKMODE_SHIFT)
-#define LPAIF_I2SCTL_SPKMODE_QUAD01 (5 << LPAIF_I2SCTL_SPKMODE_SHIFT)
-#define LPAIF_I2SCTL_SPKMODE_QUAD23 (6 << LPAIF_I2SCTL_SPKMODE_SHIFT)
-#define LPAIF_I2SCTL_SPKMODE_6CH (7 << LPAIF_I2SCTL_SPKMODE_SHIFT)
-#define LPAIF_I2SCTL_SPKMODE_8CH (8 << LPAIF_I2SCTL_SPKMODE_SHIFT)
+#define LPAIF_I2SCTL_SPKMODE(mode) ((mode) << LPAIF_I2SCTL_SPKMODE_SHIFT)
#define LPAIF_I2SCTL_SPKMONO_MASK 0x0200
#define LPAIF_I2SCTL_SPKMONO_SHIFT 9
@@ -46,15 +48,7 @@
#define LPAIF_I2SCTL_MICMODE_MASK GENMASK(7, 4)
#define LPAIF_I2SCTL_MICMODE_SHIFT 4
-#define LPAIF_I2SCTL_MICMODE_NONE (0 << LPAIF_I2SCTL_MICMODE_SHIFT)
-#define LPAIF_I2SCTL_MICMODE_SD0 (1 << LPAIF_I2SCTL_MICMODE_SHIFT)
-#define LPAIF_I2SCTL_MICMODE_SD1 (2 << LPAIF_I2SCTL_MICMODE_SHIFT)
-#define LPAIF_I2SCTL_MICMODE_SD2 (3 << LPAIF_I2SCTL_MICMODE_SHIFT)
-#define LPAIF_I2SCTL_MICMODE_SD3 (4 << LPAIF_I2SCTL_MICMODE_SHIFT)
-#define LPAIF_I2SCTL_MICMODE_QUAD01 (5 << LPAIF_I2SCTL_MICMODE_SHIFT)
-#define LPAIF_I2SCTL_MICMODE_QUAD23 (6 << LPAIF_I2SCTL_MICMODE_SHIFT)
-#define LPAIF_I2SCTL_MICMODE_6CH (7 << LPAIF_I2SCTL_MICMODE_SHIFT)
-#define LPAIF_I2SCTL_MICMODE_8CH (8 << LPAIF_I2SCTL_MICMODE_SHIFT)
+#define LPAIF_I2SCTL_MICMODE(mode) ((mode) << LPAIF_I2SCTL_MICMODE_SHIFT)
#define LPAIF_I2SCTL_MIMONO_MASK GENMASK(3, 3)
#define LPAIF_I2SCTL_MICMONO_SHIFT 3
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index 17113d380dcc..bd19ec57c73d 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -29,6 +29,10 @@ struct lpass_data {
/* MI2S bit clock (derived from system clock by a divider */
struct clk *mi2s_bit_clk[LPASS_MAX_MI2S_PORTS];
+ /* MI2S SD lines to use for playback/capture */
+ unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
+ unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
+
/* low-power audio interface (LPAIF) registers */
void __iomem *lpaif;
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 125af00bba53..aff57052a735 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -176,7 +176,7 @@ static const struct snd_compr_codec_caps q6asm_compr_caps = {
};
static void event_handler(uint32_t opcode, uint32_t token,
- uint32_t *payload, void *priv)
+ void *payload, void *priv)
{
struct q6asm_dai_rtd *prtd = priv;
struct snd_pcm_substream *substream = prtd->substream;
@@ -490,7 +490,7 @@ static int q6asm_dai_hw_params(struct snd_soc_component *component,
}
static void compress_event_handler(uint32_t opcode, uint32_t token,
- uint32_t *payload, void *priv)
+ void *payload, void *priv)
{
struct q6asm_dai_rtd *prtd = priv;
struct snd_compr_stream *substream = prtd->cstream;
@@ -540,19 +540,19 @@ static void compress_event_handler(uint32_t opcode, uint32_t token,
}
}
-static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
+static int q6asm_dai_compr_open(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
{
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct snd_compr_runtime *runtime = stream->runtime;
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct q6asm_dai_data *pdata;
- struct device *dev = c->dev;
+ struct device *dev = component->dev;
struct q6asm_dai_rtd *prtd;
int stream_id, size, ret;
stream_id = cpu_dai->driver->id;
- pdata = snd_soc_component_get_drvdata(c);
+ pdata = snd_soc_component_get_drvdata(component);
if (!pdata) {
dev_err(dev, "Drv data not found ..\n");
return -EINVAL;
@@ -600,7 +600,8 @@ free_prtd:
return ret;
}
-static int q6asm_dai_compr_free(struct snd_compr_stream *stream)
+static int q6asm_dai_compr_free(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
{
struct snd_compr_runtime *runtime = stream->runtime;
struct q6asm_dai_rtd *prtd = runtime->private_data;
@@ -622,13 +623,13 @@ static int q6asm_dai_compr_free(struct snd_compr_stream *stream)
return 0;
}
-static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
+static int q6asm_dai_compr_set_params(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
struct snd_compr_params *params)
{
struct snd_compr_runtime *runtime = stream->runtime;
struct q6asm_dai_rtd *prtd = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
int dir = stream->direction;
struct q6asm_dai_data *pdata;
struct q6asm_flac_cfg flac_cfg;
@@ -636,7 +637,7 @@ static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
struct q6asm_alac_cfg alac_cfg;
struct q6asm_ape_cfg ape_cfg;
unsigned int wma_v9 = 0;
- struct device *dev = c->dev;
+ struct device *dev = component->dev;
int ret;
union snd_codec_options *codec_options;
struct snd_dec_flac *flac;
@@ -649,7 +650,7 @@ static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
memcpy(&prtd->codec_param, params, sizeof(*params));
- pdata = snd_soc_component_get_drvdata(c);
+ pdata = snd_soc_component_get_drvdata(component);
if (!pdata)
return -EINVAL;
@@ -842,7 +843,8 @@ static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
return 0;
}
-static int q6asm_dai_compr_trigger(struct snd_compr_stream *stream, int cmd)
+static int q6asm_dai_compr_trigger(struct snd_soc_component *component,
+ struct snd_compr_stream *stream, int cmd)
{
struct snd_compr_runtime *runtime = stream->runtime;
struct q6asm_dai_rtd *prtd = runtime->private_data;
@@ -870,8 +872,9 @@ static int q6asm_dai_compr_trigger(struct snd_compr_stream *stream, int cmd)
return ret;
}
-static int q6asm_dai_compr_pointer(struct snd_compr_stream *stream,
- struct snd_compr_tstamp *tstamp)
+static int q6asm_dai_compr_pointer(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct snd_compr_tstamp *tstamp)
{
struct snd_compr_runtime *runtime = stream->runtime;
struct q6asm_dai_rtd *prtd = runtime->private_data;
@@ -887,8 +890,9 @@ static int q6asm_dai_compr_pointer(struct snd_compr_stream *stream,
return 0;
}
-static int q6asm_dai_compr_ack(struct snd_compr_stream *stream,
- size_t count)
+static int q6asm_dai_compr_ack(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ size_t count)
{
struct snd_compr_runtime *runtime = stream->runtime;
struct q6asm_dai_rtd *prtd = runtime->private_data;
@@ -901,21 +905,21 @@ static int q6asm_dai_compr_ack(struct snd_compr_stream *stream,
return count;
}
-static int q6asm_dai_compr_mmap(struct snd_compr_stream *stream,
- struct vm_area_struct *vma)
+static int q6asm_dai_compr_mmap(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
+ struct vm_area_struct *vma)
{
struct snd_compr_runtime *runtime = stream->runtime;
struct q6asm_dai_rtd *prtd = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = stream->private_data;
- struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
- struct device *dev = c->dev;
+ struct device *dev = component->dev;
return dma_mmap_coherent(dev, vma,
prtd->dma_buffer.area, prtd->dma_buffer.addr,
prtd->dma_buffer.bytes);
}
-static int q6asm_dai_compr_get_caps(struct snd_compr_stream *stream,
+static int q6asm_dai_compr_get_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
struct snd_compr_caps *caps)
{
caps->direction = SND_COMPRESS_PLAYBACK;
@@ -933,7 +937,8 @@ static int q6asm_dai_compr_get_caps(struct snd_compr_stream *stream,
return 0;
}
-static int q6asm_dai_compr_get_codec_caps(struct snd_compr_stream *stream,
+static int q6asm_dai_compr_get_codec_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
struct snd_compr_codec_caps *codec)
{
switch (codec->codec) {
@@ -947,7 +952,7 @@ static int q6asm_dai_compr_get_codec_caps(struct snd_compr_stream *stream,
return 0;
}
-static struct snd_compr_ops q6asm_dai_compr_ops = {
+static struct snd_compress_ops q6asm_dai_compress_ops = {
.open = q6asm_dai_compr_open,
.free = q6asm_dai_compr_free,
.set_params = q6asm_dai_compr_set_params,
@@ -1021,7 +1026,7 @@ static const struct snd_soc_component_driver q6asm_fe_dai_component = {
.mmap = q6asm_dai_mmap,
.pcm_construct = q6asm_dai_pcm_new,
.pcm_destruct = q6asm_dai_pcm_free,
- .compr_ops = &q6asm_dai_compr_ops,
+ .compress_ops = &q6asm_dai_compress_ops,
};
static struct snd_soc_dai_driver q6asm_fe_dais_template[] = {
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index af19010b9d88..8bd49c8a9517 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -224,6 +224,14 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
RSND_GEN_S_REG(SSI_SYS_STATUS5, 0x884),
RSND_GEN_S_REG(SSI_SYS_STATUS6, 0x888),
RSND_GEN_S_REG(SSI_SYS_STATUS7, 0x88c),
+ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE0, 0x850),
+ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE1, 0x854),
+ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE2, 0x858),
+ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE3, 0x85c),
+ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE4, 0x890),
+ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE5, 0x894),
+ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE6, 0x898),
+ RSND_GEN_S_REG(SSI_SYS_INT_ENABLE7, 0x89c),
RSND_GEN_S_REG(HDMI0_SEL, 0x9e0),
RSND_GEN_S_REG(HDMI1_SEL, 0x9e4),
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index ea6cbaa9743e..d47608ff5fac 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -189,6 +189,14 @@ enum rsnd_reg {
SSI_SYS_STATUS5,
SSI_SYS_STATUS6,
SSI_SYS_STATUS7,
+ SSI_SYS_INT_ENABLE0,
+ SSI_SYS_INT_ENABLE1,
+ SSI_SYS_INT_ENABLE2,
+ SSI_SYS_INT_ENABLE3,
+ SSI_SYS_INT_ENABLE4,
+ SSI_SYS_INT_ENABLE5,
+ SSI_SYS_INT_ENABLE6,
+ SSI_SYS_INT_ENABLE7,
HDMI0_SEL,
HDMI1_SEL,
SSI9_BUSIF0_MODE,
@@ -237,6 +245,7 @@ enum rsnd_reg {
#define SSI9_BUSIF_ADINR(i) (SSI9_BUSIF0_ADINR + (i))
#define SSI9_BUSIF_DALIGN(i) (SSI9_BUSIF0_DALIGN + (i))
#define SSI_SYS_STATUS(i) (SSI_SYS_STATUS0 + (i))
+#define SSI_SYS_INT_ENABLE(i) (SSI_SYS_INT_ENABLE0 + (i))
struct rsnd_priv;
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 4a7d3413917f..47d5ddb526f2 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -372,6 +372,9 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
u32 wsr = ssi->wsr;
int width;
int is_tdm, is_tdm_split;
+ int id = rsnd_mod_id(mod);
+ int i;
+ u32 sys_int_enable = 0;
is_tdm = rsnd_runtime_is_tdm(io);
is_tdm_split = rsnd_runtime_is_tdm_split(io);
@@ -447,6 +450,38 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
cr_mode = DIEN; /* PIO : enable Data interrupt */
}
+ /* enable busif buffer over/under run interrupt. */
+ if (is_tdm || is_tdm_split) {
+ switch (id) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ for (i = 0; i < 4; i++) {
+ sys_int_enable = rsnd_mod_read(mod,
+ SSI_SYS_INT_ENABLE(i * 2));
+ sys_int_enable |= 0xf << (id * 4);
+ rsnd_mod_write(mod,
+ SSI_SYS_INT_ENABLE(i * 2),
+ sys_int_enable);
+ }
+
+ break;
+ case 9:
+ for (i = 0; i < 4; i++) {
+ sys_int_enable = rsnd_mod_read(mod,
+ SSI_SYS_INT_ENABLE((i * 2) + 1));
+ sys_int_enable |= 0xf << 4;
+ rsnd_mod_write(mod,
+ SSI_SYS_INT_ENABLE((i * 2) + 1),
+ sys_int_enable);
+ }
+
+ break;
+ }
+ }
+
init_end:
ssi->cr_own = cr_own;
ssi->cr_mode = cr_mode;
@@ -496,6 +531,13 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod,
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
struct device *dev = rsnd_priv_to_dev(priv);
+ int is_tdm, is_tdm_split;
+ int id = rsnd_mod_id(mod);
+ int i;
+ u32 sys_int_enable = 0;
+
+ is_tdm = rsnd_runtime_is_tdm(io);
+ is_tdm_split = rsnd_runtime_is_tdm_split(io);
if (!rsnd_ssi_is_run_mods(mod, io))
return 0;
@@ -517,6 +559,38 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod,
ssi->wsr = 0;
}
+ /* disable busif buffer over/under run interrupt. */
+ if (is_tdm || is_tdm_split) {
+ switch (id) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ for (i = 0; i < 4; i++) {
+ sys_int_enable = rsnd_mod_read(mod,
+ SSI_SYS_INT_ENABLE(i * 2));
+ sys_int_enable &= ~(0xf << (id * 4));
+ rsnd_mod_write(mod,
+ SSI_SYS_INT_ENABLE(i * 2),
+ sys_int_enable);
+ }
+
+ break;
+ case 9:
+ for (i = 0; i < 4; i++) {
+ sys_int_enable = rsnd_mod_read(mod,
+ SSI_SYS_INT_ENABLE((i * 2) + 1));
+ sys_int_enable &= ~(0xf << 4);
+ rsnd_mod_write(mod,
+ SSI_SYS_INT_ENABLE((i * 2) + 1),
+ sys_int_enable);
+ }
+
+ break;
+ }
+ }
+
return 0;
}
@@ -622,6 +696,11 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod,
int enable)
{
u32 val = 0;
+ int is_tdm, is_tdm_split;
+ int id = rsnd_mod_id(mod);
+
+ is_tdm = rsnd_runtime_is_tdm(io);
+ is_tdm_split = rsnd_runtime_is_tdm_split(io);
if (rsnd_is_gen1(priv))
return 0;
@@ -635,6 +714,19 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod,
if (enable)
val = rsnd_ssi_is_dma_mode(mod) ? 0x0e000000 : 0x0f000000;
+ if (is_tdm || is_tdm_split) {
+ switch (id) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 9:
+ val |= 0x0000ff00;
+ break;
+ }
+ }
+
rsnd_mod_write(mod, SSI_INT_ENABLE, val);
return 0;
@@ -651,6 +743,12 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
u32 status;
bool elapsed = false;
bool stop = false;
+ int id = rsnd_mod_id(mod);
+ int i;
+ int is_tdm, is_tdm_split;
+
+ is_tdm = rsnd_runtime_is_tdm(io);
+ is_tdm_split = rsnd_runtime_is_tdm_split(io);
spin_lock(&priv->lock);
@@ -672,6 +770,53 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
stop = true;
}
+ status = 0;
+
+ if (is_tdm || is_tdm_split) {
+ switch (id) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ for (i = 0; i < 4; i++) {
+ status = rsnd_mod_read(mod,
+ SSI_SYS_STATUS(i * 2));
+ status &= 0xf << (id * 4);
+
+ if (status) {
+ rsnd_dbg_irq_status(dev,
+ "%s err status : 0x%08x\n",
+ rsnd_mod_name(mod), status);
+ rsnd_mod_write(mod,
+ SSI_SYS_STATUS(i * 2),
+ 0xf << (id * 4));
+ stop = true;
+ break;
+ }
+ }
+ break;
+ case 9:
+ for (i = 0; i < 4; i++) {
+ status = rsnd_mod_read(mod,
+ SSI_SYS_STATUS((i * 2) + 1));
+ status &= 0xf << 4;
+
+ if (status) {
+ rsnd_dbg_irq_status(dev,
+ "%s err status : 0x%08x\n",
+ rsnd_mod_name(mod), status);
+ rsnd_mod_write(mod,
+ SSI_SYS_STATUS((i * 2) + 1),
+ 0xf << 4);
+ stop = true;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
rsnd_ssi_status_clear(mod);
rsnd_ssi_interrupt_out:
spin_unlock(&priv->lock);
diff --git a/sound/soc/soc-card.c b/sound/soc/soc-card.c
new file mode 100644
index 000000000000..41c586b86dc3
--- /dev/null
+++ b/sound/soc/soc-card.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// soc-card.c
+//
+// Copyright (C) 2019 Renesas Electronics Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+//
+#include <sound/soc.h>
+#include <sound/jack.h>
+
+#define soc_card_ret(dai, ret) _soc_card_ret(dai, __func__, ret)
+static inline int _soc_card_ret(struct snd_soc_card *card,
+ const char *func, int ret)
+{
+ switch (ret) {
+ case -EPROBE_DEFER:
+ case -ENOTSUPP:
+ case 0:
+ break;
+ default:
+ dev_err(card->dev,
+ "ASoC: error at %s on %s: %d\n",
+ func, card->name, ret);
+ }
+
+ return ret;
+}
+
+struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
+ const char *name)
+{
+ struct snd_card *card = soc_card->snd_card;
+ struct snd_kcontrol *kctl;
+
+ if (unlikely(!name))
+ return NULL;
+
+ list_for_each_entry(kctl, &card->controls, list)
+ if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name)))
+ return kctl;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol);
+
+/**
+ * snd_soc_card_jack_new - Create a new jack
+ * @card: ASoC card
+ * @id: an identifying string for this jack
+ * @type: a bitmask of enum snd_jack_type values that can be detected by
+ * this jack
+ * @jack: structure to use for the jack
+ * @pins: Array of jack pins to be added to the jack or NULL
+ * @num_pins: Number of elements in the @pins array
+ *
+ * Creates a new jack object.
+ *
+ * Returns zero if successful, or a negative error code on failure.
+ * On success jack will be initialised.
+ */
+int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
+ struct snd_soc_jack *jack,
+ struct snd_soc_jack_pin *pins, unsigned int num_pins)
+{
+ int ret;
+
+ mutex_init(&jack->mutex);
+ jack->card = card;
+ INIT_LIST_HEAD(&jack->pins);
+ INIT_LIST_HEAD(&jack->jack_zones);
+ BLOCKING_INIT_NOTIFIER_HEAD(&jack->notifier);
+
+ ret = snd_jack_new(card->snd_card, id, type, &jack->jack, false, false);
+ if (ret)
+ goto end;
+
+ if (num_pins)
+ ret = snd_soc_jack_add_pins(jack, num_pins, pins);
+end:
+ return soc_card_ret(card, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_card_jack_new);
+
+int snd_soc_card_suspend_pre(struct snd_soc_card *card)
+{
+ int ret = 0;
+
+ if (card->suspend_pre)
+ ret = card->suspend_pre(card);
+
+ return soc_card_ret(card, ret);
+}
+
+int snd_soc_card_suspend_post(struct snd_soc_card *card)
+{
+ int ret = 0;
+
+ if (card->suspend_post)
+ ret = card->suspend_post(card);
+
+ return soc_card_ret(card, ret);
+}
+
+int snd_soc_card_resume_pre(struct snd_soc_card *card)
+{
+ int ret = 0;
+
+ if (card->resume_pre)
+ ret = card->resume_pre(card);
+
+ return soc_card_ret(card, ret);
+}
+
+int snd_soc_card_resume_post(struct snd_soc_card *card)
+{
+ int ret = 0;
+
+ if (card->resume_post)
+ ret = card->resume_post(card);
+
+ return soc_card_ret(card, ret);
+}
+
+int snd_soc_card_probe(struct snd_soc_card *card)
+{
+ if (card->probe) {
+ int ret = card->probe(card);
+
+ if (ret < 0)
+ return soc_card_ret(card, ret);
+
+ /*
+ * It has "card->probe" and "card->late_probe" callbacks.
+ * So, set "probed" flag here, because it needs to care
+ * about "late_probe".
+ *
+ * see
+ * snd_soc_bind_card()
+ * snd_soc_card_late_probe()
+ */
+ card->probed = 1;
+ }
+
+ return 0;
+}
+
+int snd_soc_card_late_probe(struct snd_soc_card *card)
+{
+ if (card->late_probe) {
+ int ret = card->late_probe(card);
+
+ if (ret < 0)
+ return soc_card_ret(card, ret);
+ }
+
+ /*
+ * It has "card->probe" and "card->late_probe" callbacks,
+ * and "late_probe" callback is called after "probe".
+ * This means, we can set "card->probed" flag afer "late_probe"
+ * for all cases.
+ *
+ * see
+ * snd_soc_bind_card()
+ * snd_soc_card_probe()
+ */
+ card->probed = 1;
+
+ return 0;
+}
+
+int snd_soc_card_remove(struct snd_soc_card *card)
+{
+ int ret = 0;
+
+ if (card->probed &&
+ card->remove)
+ ret = card->remove(card);
+
+ card->probed = 0;
+
+ return soc_card_ret(card, ret);
+}
+
+int snd_soc_card_set_bias_level(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level)
+{
+ int ret = 0;
+
+ if (card && card->set_bias_level)
+ ret = card->set_bias_level(card, dapm, level);
+
+ return soc_card_ret(card, ret);
+}
+
+int snd_soc_card_set_bias_level_post(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level)
+{
+ int ret = 0;
+
+ if (card && card->set_bias_level_post)
+ ret = card->set_bias_level_post(card, dapm, level);
+
+ return soc_card_ret(card, ret);
+}
+
+int snd_soc_card_add_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link)
+{
+ int ret = 0;
+
+ if (card->add_dai_link)
+ ret = card->add_dai_link(card, dai_link);
+
+ return soc_card_ret(card, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_card_add_dai_link);
+
+void snd_soc_card_remove_dai_link(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link)
+{
+ if (card->remove_dai_link)
+ card->remove_dai_link(card, dai_link);
+}
+EXPORT_SYMBOL_GPL(snd_soc_card_remove_dai_link);
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 50062eb79adb..4984b6a2c370 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -19,6 +19,7 @@
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/soc-dpcm.h>
+#include <sound/soc-link.h>
#include <linux/pm_runtime.h>
static int soc_compr_components_open(struct snd_compr_stream *cstream,
@@ -29,11 +30,11 @@ static int soc_compr_components_open(struct snd_compr_stream *cstream,
int i, ret;
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->open)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->open)
continue;
- ret = component->driver->compr_ops->open(cstream);
+ ret = component->driver->compress_ops->open(component, cstream);
if (ret < 0) {
dev_err(component->dev,
"Compress ASoC: can't open platform %s: %d\n",
@@ -59,11 +60,11 @@ static int soc_compr_components_free(struct snd_compr_stream *cstream,
if (component == last)
break;
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->free)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->free)
continue;
- component->driver->compr_ops->free(cstream);
+ component->driver->compress_ops->free(component, cstream);
}
return 0;
@@ -72,8 +73,8 @@ static int soc_compr_components_free(struct snd_compr_stream *cstream,
static int soc_compr_open(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct snd_soc_component *component, *save = NULL;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_component *component = NULL, *save = NULL;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret, i;
for_each_rtd_components(rtd, i, component) {
@@ -87,29 +88,17 @@ static int soc_compr_open(struct snd_compr_stream *cstream)
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->startup) {
- ret = cpu_dai->driver->cops->startup(cstream, cpu_dai);
- if (ret < 0) {
- dev_err(cpu_dai->dev,
- "Compress ASoC: can't open interface %s: %d\n",
- cpu_dai->name, ret);
- goto out;
- }
- }
+ ret = snd_soc_dai_compr_startup(cpu_dai, cstream);
+ if (ret < 0)
+ goto out;
ret = soc_compr_components_open(cstream, &component);
if (ret < 0)
goto machine_err;
- if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->startup) {
- ret = rtd->dai_link->compr_ops->startup(cstream);
- if (ret < 0) {
- dev_err(rtd->dev,
- "Compress ASoC: %s startup failed: %d\n",
- rtd->dai_link->name, ret);
- goto machine_err;
- }
- }
+ ret = snd_soc_link_compr_startup(cstream);
+ if (ret < 0)
+ goto machine_err;
snd_soc_runtime_activate(rtd, cstream->direction);
@@ -120,8 +109,7 @@ static int soc_compr_open(struct snd_compr_stream *cstream)
machine_err:
soc_compr_components_free(cstream, component);
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
- cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
+ snd_soc_dai_compr_shutdown(cpu_dai, cstream);
out:
mutex_unlock(&rtd->card->pcm_mutex);
pm_err:
@@ -141,7 +129,7 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
struct snd_pcm_substream *fe_substream =
fe->pcm->streams[cstream->direction].substream;
struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0);
struct snd_soc_dpcm *dpcm;
struct snd_soc_dapm_widget_list *list;
int stream;
@@ -178,28 +166,17 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
goto out;
}
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->startup) {
- ret = cpu_dai->driver->cops->startup(cstream, cpu_dai);
- if (ret < 0) {
- dev_err(cpu_dai->dev,
- "Compress ASoC: can't open interface %s: %d\n",
- cpu_dai->name, ret);
- goto out;
- }
- }
+ ret = snd_soc_dai_compr_startup(cpu_dai, cstream);
+ if (ret < 0)
+ goto out;
ret = soc_compr_components_open(cstream, &component);
if (ret < 0)
goto open_err;
- if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->startup) {
- ret = fe->dai_link->compr_ops->startup(cstream);
- if (ret < 0) {
- pr_err("Compress ASoC: %s startup failed: %d\n",
- fe->dai_link->name, ret);
- goto machine_err;
- }
- }
+ ret = snd_soc_link_compr_startup(cstream);
+ if (ret < 0)
+ goto machine_err;
dpcm_clear_pending_state(fe, stream);
dpcm_path_put(&list);
@@ -216,8 +193,7 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
machine_err:
soc_compr_components_free(cstream, component);
open_err:
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
- cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
+ snd_soc_dai_compr_shutdown(cpu_dai, cstream);
out:
dpcm_path_put(&list);
be_err:
@@ -230,8 +206,8 @@ static int soc_compr_free(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int stream, i;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
@@ -245,19 +221,17 @@ static int soc_compr_free(struct snd_compr_stream *cstream)
snd_soc_dai_digital_mute(codec_dai, 1, cstream->direction);
- if (!cpu_dai->active)
+ if (!snd_soc_dai_active(cpu_dai))
cpu_dai->rate = 0;
- if (!codec_dai->active)
+ if (!snd_soc_dai_active(codec_dai))
codec_dai->rate = 0;
- if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->shutdown)
- rtd->dai_link->compr_ops->shutdown(cstream);
+ snd_soc_link_compr_shutdown(cstream);
soc_compr_components_free(cstream, NULL);
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
- cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
+ snd_soc_dai_compr_shutdown(cpu_dai, cstream);
snd_soc_dapm_stream_stop(rtd, stream);
@@ -274,7 +248,7 @@ static int soc_compr_free(struct snd_compr_stream *cstream)
static int soc_compr_free_fe(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *fe = cstream->private_data;
- struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0);
struct snd_soc_dpcm *dpcm;
int stream, ret;
@@ -308,13 +282,11 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
fe->dpcm[stream].runtime = NULL;
- if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->shutdown)
- fe->dai_link->compr_ops->shutdown(cstream);
+ snd_soc_link_compr_shutdown(cstream);
soc_compr_components_free(cstream, NULL);
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
- cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
+ snd_soc_dai_compr_shutdown(cpu_dai, cstream);
mutex_unlock(&fe->card->mutex);
return 0;
@@ -328,11 +300,12 @@ static int soc_compr_components_trigger(struct snd_compr_stream *cstream,
int i, ret;
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->trigger)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->trigger)
continue;
- ret = component->driver->compr_ops->trigger(cstream, cmd);
+ ret = component->driver->compress_ops->trigger(
+ component, cstream, cmd);
if (ret < 0)
return ret;
}
@@ -343,8 +316,8 @@ static int soc_compr_components_trigger(struct snd_compr_stream *cstream,
static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
@@ -353,8 +326,9 @@ static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
if (ret < 0)
goto out;
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->trigger)
- cpu_dai->driver->cops->trigger(cstream, cmd, cpu_dai);
+ ret = snd_soc_dai_compr_trigger(cpu_dai, cstream, cmd);
+ if (ret < 0)
+ goto out;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -373,7 +347,7 @@ out:
static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
{
struct snd_soc_pcm_runtime *fe = cstream->private_data;
- struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0);
int ret, stream;
if (cmd == SND_COMPR_TRIGGER_PARTIAL_DRAIN ||
@@ -387,11 +361,9 @@ static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->trigger) {
- ret = cpu_dai->driver->cops->trigger(cstream, cmd, cpu_dai);
- if (ret < 0)
- goto out;
- }
+ ret = snd_soc_dai_compr_trigger(cpu_dai, cstream, cmd);
+ if (ret < 0)
+ goto out;
ret = soc_compr_components_trigger(cstream, cmd);
if (ret < 0)
@@ -430,11 +402,12 @@ static int soc_compr_components_set_params(struct snd_compr_stream *cstream,
int i, ret;
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->set_params)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->set_params)
continue;
- ret = component->driver->compr_ops->set_params(cstream, params);
+ ret = component->driver->compress_ops->set_params(
+ component, cstream, params);
if (ret < 0)
return ret;
}
@@ -446,7 +419,7 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
struct snd_compr_params *params)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int ret;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
@@ -458,21 +431,17 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
* that these callbacks will configure everything for this compress
* path, like configuring a PCM port for a CODEC.
*/
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_params) {
- ret = cpu_dai->driver->cops->set_params(cstream, params, cpu_dai);
- if (ret < 0)
- goto err;
- }
+ ret = snd_soc_dai_compr_set_params(cpu_dai, cstream, params);
+ if (ret < 0)
+ goto err;
ret = soc_compr_components_set_params(cstream, params);
if (ret < 0)
goto err;
- if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) {
- ret = rtd->dai_link->compr_ops->set_params(cstream);
- if (ret < 0)
- goto err;
- }
+ ret = snd_soc_link_compr_set_params(cstream);
+ if (ret < 0)
+ goto err;
if (cstream->direction == SND_COMPRESS_PLAYBACK)
snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK,
@@ -500,7 +469,7 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
struct snd_soc_pcm_runtime *fe = cstream->private_data;
struct snd_pcm_substream *fe_substream =
fe->pcm->streams[cstream->direction].substream;
- struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0);
int ret, stream;
if (cstream->direction == SND_COMPRESS_PLAYBACK)
@@ -528,21 +497,17 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
if (ret < 0)
goto out;
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_params) {
- ret = cpu_dai->driver->cops->set_params(cstream, params, cpu_dai);
- if (ret < 0)
- goto out;
- }
+ ret = snd_soc_dai_compr_set_params(cpu_dai, cstream, params);
+ if (ret < 0)
+ goto out;
ret = soc_compr_components_set_params(cstream, params);
if (ret < 0)
goto out;
- if (fe->dai_link->compr_ops && fe->dai_link->compr_ops->set_params) {
- ret = fe->dai_link->compr_ops->set_params(cstream);
- if (ret < 0)
- goto out;
- }
+ ret = snd_soc_link_compr_set_params(cstream);
+ if (ret < 0)
+ goto out;
dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
@@ -558,23 +523,22 @@ static int soc_compr_get_params(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int i, ret = 0;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->get_params) {
- ret = cpu_dai->driver->cops->get_params(cstream, params, cpu_dai);
- if (ret < 0)
- goto err;
- }
+ ret = snd_soc_dai_compr_get_params(cpu_dai, cstream, params);
+ if (ret < 0)
+ goto err;
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->get_params)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->get_params)
continue;
- ret = component->driver->compr_ops->get_params(cstream, params);
+ ret = component->driver->compress_ops->get_params(
+ component, cstream, params);
break;
}
@@ -593,11 +557,12 @@ static int soc_compr_get_caps(struct snd_compr_stream *cstream,
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->get_caps)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->get_caps)
continue;
- ret = component->driver->compr_ops->get_caps(cstream, caps);
+ ret = component->driver->compress_ops->get_caps(
+ component, cstream, caps);
break;
}
@@ -615,12 +580,12 @@ static int soc_compr_get_codec_caps(struct snd_compr_stream *cstream,
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->get_codec_caps)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->get_codec_caps)
continue;
- ret = component->driver->compr_ops->get_codec_caps(cstream,
- codec);
+ ret = component->driver->compress_ops->get_codec_caps(
+ component, cstream, codec);
break;
}
@@ -632,23 +597,22 @@ static int soc_compr_ack(struct snd_compr_stream *cstream, size_t bytes)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int i, ret = 0;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->ack) {
- ret = cpu_dai->driver->cops->ack(cstream, bytes, cpu_dai);
- if (ret < 0)
- goto err;
- }
+ ret = snd_soc_dai_compr_ack(cpu_dai, cstream, bytes);
+ if (ret < 0)
+ goto err;
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->ack)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->ack)
continue;
- ret = component->driver->compr_ops->ack(cstream, bytes);
+ ret = component->driver->compress_ops->ack(
+ component, cstream, bytes);
if (ret < 0)
goto err;
}
@@ -664,22 +628,24 @@ static int soc_compr_pointer(struct snd_compr_stream *cstream,
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_component *component;
int i, ret = 0;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->pointer)
- cpu_dai->driver->cops->pointer(cstream, tstamp, cpu_dai);
+ ret = snd_soc_dai_compr_pointer(cpu_dai, cstream, tstamp);
+ if (ret < 0)
+ goto out;
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->pointer)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->pointer)
continue;
- ret = component->driver->compr_ops->pointer(cstream, tstamp);
+ ret = component->driver->compress_ops->pointer(
+ component, cstream, tstamp);
break;
}
-
+out:
mutex_unlock(&rtd->card->pcm_mutex);
return ret;
}
@@ -694,11 +660,12 @@ static int soc_compr_copy(struct snd_compr_stream *cstream,
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->copy)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->copy)
continue;
- ret = component->driver->compr_ops->copy(cstream, buf, count);
+ ret = component->driver->compress_ops->copy(
+ component, cstream, buf, count);
break;
}
@@ -711,22 +678,20 @@ static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int i, ret;
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_metadata) {
- ret = cpu_dai->driver->cops->set_metadata(cstream, metadata, cpu_dai);
- if (ret < 0)
- return ret;
- }
+ ret = snd_soc_dai_compr_set_metadata(cpu_dai, cstream, metadata);
+ if (ret < 0)
+ return ret;
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->set_metadata)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->set_metadata)
continue;
- ret = component->driver->compr_ops->set_metadata(cstream,
- metadata);
+ ret = component->driver->compress_ops->set_metadata(
+ component, cstream, metadata);
if (ret < 0)
return ret;
}
@@ -739,22 +704,20 @@ static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_soc_component *component;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
int i, ret;
- if (cpu_dai->driver->cops && cpu_dai->driver->cops->get_metadata) {
- ret = cpu_dai->driver->cops->get_metadata(cstream, metadata, cpu_dai);
- if (ret < 0)
- return ret;
- }
+ ret = snd_soc_dai_compr_get_metadata(cpu_dai, cstream, metadata);
+ if (ret < 0)
+ return ret;
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->get_metadata)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->get_metadata)
continue;
- return component->driver->compr_ops->get_metadata(cstream,
- metadata);
+ return component->driver->compress_ops->get_metadata(
+ component, cstream, metadata);
}
return 0;
@@ -801,8 +764,8 @@ static struct snd_compr_ops soc_compr_dyn_ops = {
int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
{
struct snd_soc_component *component;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct snd_compr *compr;
struct snd_pcm *be_pcm;
char new_name[64];
@@ -879,8 +842,8 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
}
for_each_rtd_components(rtd, i, component) {
- if (!component->driver->compr_ops ||
- !component->driver->compr_ops->copy)
+ if (!component->driver->compress_ops ||
+ !component->driver->compress_ops->copy)
continue;
compr->ops->copy = soc_compr_copy;
@@ -891,7 +854,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
ret = snd_compress_new(rtd->card->snd_card, num, direction,
new_name, compr);
if (ret < 0) {
- component = rtd->codec_dai->component;
+ component = asoc_rtd_to_codec(rtd, 0)->component;
dev_err(component->dev,
"Compress ASoC: can't create compress for codec %s: %d\n",
component->name, ret);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 843b8b1c89d4..7b387202c5db 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -38,6 +38,7 @@
#include <sound/soc.h>
#include <sound/soc-dpcm.h>
#include <sound/soc-topology.h>
+#include <sound/soc-link.h>
#include <sound/initval.h>
#define CREATE_TRACE_POINTS
@@ -364,7 +365,7 @@ EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime);
*/
void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
int playback = SNDRV_PCM_STREAM_PLAYBACK;
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
@@ -372,7 +373,8 @@ void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd)
dev_dbg(rtd->dev,
"ASoC: pop wq checking: %s status: %s waiting: %s\n",
codec_dai->driver->playback.stream_name,
- codec_dai->stream_active[playback] ? "active" : "inactive",
+ snd_soc_dai_stream_active(codec_dai, playback) ?
+ "active" : "inactive",
rtd->pop_wait ? "yes" : "no");
/* are we waiting on this codec DAI stream */
@@ -487,20 +489,18 @@ static struct snd_soc_pcm_runtime *soc_new_pcm_runtime(
* dais = [][][][][][][][][][][][][][][][][][]
* ^cpu_dais ^codec_dais
* |--- num_cpus ---|--- num_codecs --|
+ * see
+ * asoc_rtd_to_cpu()
+ * asoc_rtd_to_codec()
*/
- rtd->cpu_dais = &rtd->dais[0];
- rtd->codec_dais = &rtd->dais[dai_link->num_cpus];
-
- /*
- * rtd remaining settings
- */
- rtd->card = card;
- rtd->dai_link = dai_link;
+ rtd->num_cpus = dai_link->num_cpus;
+ rtd->num_codecs = dai_link->num_codecs;
+ rtd->card = card;
+ rtd->dai_link = dai_link;
+ rtd->num = card->num_rtd++;
/* see for_each_card_rtds */
list_add_tail(&rtd->list, &card->rtd_list);
- rtd->num = card->num_rtd;
- card->num_rtd++;
return rtd;
@@ -548,7 +548,7 @@ int snd_soc_suspend(struct device *dev)
continue;
for_each_rtd_codec_dais(rtd, i, dai) {
- if (dai->stream_active[playback])
+ if (snd_soc_dai_stream_active(dai, playback))
snd_soc_dai_digital_mute(dai, 1, playback);
}
}
@@ -561,8 +561,7 @@ int snd_soc_suspend(struct device *dev)
snd_pcm_suspend_all(rtd->pcm);
}
- if (card->suspend_pre)
- card->suspend_pre(card);
+ snd_soc_card_suspend_pre(card);
/* close any waiting streams */
snd_soc_flush_all_delayed_work(card);
@@ -632,8 +631,7 @@ int snd_soc_suspend(struct device *dev)
}
}
- if (card->suspend_post)
- card->suspend_post(card);
+ snd_soc_card_suspend_post(card);
return 0;
}
@@ -662,8 +660,7 @@ static void soc_resume_deferred(struct work_struct *work)
/* Bring us up into D2 so that DAPM starts enabling things */
snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D2);
- if (card->resume_pre)
- card->resume_pre(card);
+ snd_soc_card_resume_pre(card);
for_each_card_components(card, component) {
if (snd_soc_component_is_suspended(component))
@@ -690,13 +687,12 @@ static void soc_resume_deferred(struct work_struct *work)
continue;
for_each_rtd_codec_dais(rtd, i, dai) {
- if (dai->stream_active[playback])
+ if (snd_soc_dai_stream_active(dai, playback))
snd_soc_dai_digital_mute(dai, 0, playback);
}
}
- if (card->resume_post)
- card->resume_post(card);
+ snd_soc_card_resume_post(card);
dev_dbg(card->dev, "ASoC: resume work completed\n");
@@ -720,7 +716,7 @@ int snd_soc_resume(struct device *dev)
/* activate pins from sleep state */
for_each_card_components(card, component)
- if (component->active)
+ if (snd_soc_component_active(component))
pinctrl_pm_select_default_state(component->dev);
dev_dbg(dev, "ASoC: Scheduling resume work\n");
@@ -744,9 +740,6 @@ static inline void soc_resume_init(struct snd_soc_card *card)
}
#endif
-static const struct snd_soc_dai_ops null_dai_ops = {
-};
-
static struct device_node
*soc_component_to_node(struct snd_soc_component *component)
{
@@ -865,8 +858,12 @@ static int soc_dai_link_sanity_check(struct snd_soc_card *card,
* Defer card registration if codec component is not added to
* component list.
*/
- if (!soc_find_component(codec))
+ if (!soc_find_component(codec)) {
+ dev_dbg(card->dev,
+ "ASoC: codec component %s not found for link %s\n",
+ codec->name, link->name);
return -EPROBE_DEFER;
+ }
}
for_each_link_platforms(link, i, platform) {
@@ -886,8 +883,12 @@ static int soc_dai_link_sanity_check(struct snd_soc_card *card,
* Defer card registration if platform component is not added to
* component list.
*/
- if (!soc_find_component(platform))
+ if (!soc_find_component(platform)) {
+ dev_dbg(card->dev,
+ "ASoC: platform component %s not found for link %s\n",
+ platform->name, link->name);
return -EPROBE_DEFER;
+ }
}
for_each_link_cpus(link, i, cpu) {
@@ -908,8 +909,12 @@ static int soc_dai_link_sanity_check(struct snd_soc_card *card,
* component list.
*/
if ((cpu->of_node || cpu->name) &&
- !soc_find_component(cpu))
+ !soc_find_component(cpu)) {
+ dev_dbg(card->dev,
+ "ASoC: cpu component %s not found for link %s\n",
+ cpu->name, link->name);
return -EPROBE_DEFER;
+ }
/*
* At least one of CPU DAI name or CPU device name/node must be
@@ -942,8 +947,7 @@ void snd_soc_remove_pcm_runtime(struct snd_soc_card *card,
/*
* Notify the machine driver for extra destruction
*/
- if (card->remove_dai_link)
- card->remove_dai_link(card, rtd->dai_link);
+ snd_soc_card_remove_dai_link(card, rtd->dai_link);
soc_free_pcm_runtime(rtd);
}
@@ -973,8 +977,9 @@ int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
/*
* Notify the machine driver for extra initialization
*/
- if (card->add_dai_link)
- card->add_dai_link(card, dai_link);
+ ret = snd_soc_card_add_dai_link(card, dai_link);
+ if (ret < 0)
+ return ret;
if (dai_link->ignore)
return 0;
@@ -989,36 +994,28 @@ int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
if (!rtd)
return -ENOMEM;
- rtd->num_cpus = dai_link->num_cpus;
for_each_link_cpus(dai_link, i, cpu) {
- rtd->cpu_dais[i] = snd_soc_find_dai(cpu);
- if (!rtd->cpu_dais[i]) {
+ asoc_rtd_to_cpu(rtd, i) = snd_soc_find_dai(cpu);
+ if (!asoc_rtd_to_cpu(rtd, i)) {
dev_info(card->dev, "ASoC: CPU DAI %s not registered\n",
cpu->dai_name);
goto _err_defer;
}
- snd_soc_rtd_add_component(rtd, rtd->cpu_dais[i]->component);
+ snd_soc_rtd_add_component(rtd, asoc_rtd_to_cpu(rtd, i)->component);
}
- /* Single cpu links expect cpu and cpu_dai in runtime data */
- rtd->cpu_dai = rtd->cpu_dais[0];
-
/* Find CODEC from registered CODECs */
- rtd->num_codecs = dai_link->num_codecs;
for_each_link_codecs(dai_link, i, codec) {
- rtd->codec_dais[i] = snd_soc_find_dai(codec);
- if (!rtd->codec_dais[i]) {
+ asoc_rtd_to_codec(rtd, i) = snd_soc_find_dai(codec);
+ if (!asoc_rtd_to_codec(rtd, i)) {
dev_info(card->dev, "ASoC: CODEC DAI %s not registered\n",
codec->dai_name);
goto _err_defer;
}
- snd_soc_rtd_add_component(rtd, rtd->codec_dais[i]->component);
+ snd_soc_rtd_add_component(rtd, asoc_rtd_to_codec(rtd, i)->component);
}
- /* Single codec links expect codec and codec_dai in runtime data */
- rtd->codec_dai = rtd->codec_dais[0];
-
/* Find PLATFORM from registered PLATFORMs */
for_each_link_platforms(dai_link, i, platform) {
for_each_component(component) {
@@ -1037,32 +1034,11 @@ _err_defer:
}
EXPORT_SYMBOL_GPL(snd_soc_add_pcm_runtime);
-static int soc_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_dai *dai;
- int i, ret = 0;
-
- for_each_rtd_dais(rtd, i, dai) {
- struct snd_soc_dai_driver *drv = dai->driver;
-
- if (drv->pcm_new)
- ret = drv->pcm_new(rtd, dai);
- if (ret < 0) {
- dev_err(dai->dev,
- "ASoC: Failed to bind %s with pcm device\n",
- dai->name);
- return ret;
- }
- }
-
- return 0;
-}
-
static int soc_init_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai_link *dai_link = rtd->dai_link;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
struct snd_soc_component *component;
int ret, num, i;
@@ -1070,14 +1046,9 @@ static int soc_init_pcm_runtime(struct snd_soc_card *card,
rtd->pmdown_time = pmdown_time;
/* do machine specific initialization */
- if (dai_link->init) {
- ret = dai_link->init(rtd);
- if (ret < 0) {
- dev_err(card->dev, "ASoC: failed to init %s: %d\n",
- dai_link->name, ret);
- return ret;
- }
- }
+ ret = snd_soc_link_init(rtd);
+ if (ret < 0)
+ return ret;
if (dai_link->dai_fmt) {
ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
@@ -1122,7 +1093,7 @@ static int soc_init_pcm_runtime(struct snd_soc_card *card,
return ret;
}
- return soc_dai_pcm_new(rtd);
+ return snd_soc_pcm_dai_new(rtd);
}
static void soc_set_name_prefix(struct snd_soc_card *card,
@@ -1278,64 +1249,23 @@ err_probe:
return ret;
}
-static void soc_remove_dai(struct snd_soc_dai *dai, int order)
-{
- int err;
-
- if (!dai || !dai->probed || !dai->driver ||
- dai->driver->remove_order != order)
- return;
-
- err = snd_soc_dai_remove(dai);
- if (err < 0)
- dev_err(dai->dev,
- "ASoC: failed to remove %s: %d\n",
- dai->name, err);
-
- dai->probed = 0;
-}
-
-static int soc_probe_dai(struct snd_soc_dai *dai, int order)
-{
- int ret;
-
- if (dai->probed ||
- dai->driver->probe_order != order)
- return 0;
-
- ret = snd_soc_dai_probe(dai);
- if (ret < 0) {
- dev_err(dai->dev, "ASoC: failed to probe DAI %s: %d\n",
- dai->name, ret);
- return ret;
- }
-
- dai->probed = 1;
-
- return 0;
-}
-
static void soc_remove_link_dais(struct snd_soc_card *card)
{
- int i;
- struct snd_soc_dai *dai;
struct snd_soc_pcm_runtime *rtd;
int order;
for_each_comp_order(order) {
for_each_card_rtds(card, rtd) {
- /* remove DAIs */
- for_each_rtd_dais(rtd, i, dai)
- soc_remove_dai(dai, order);
+ /* remove all rtd connected DAIs in good order */
+ snd_soc_pcm_dai_remove(rtd, order);
}
}
}
static int soc_probe_link_dais(struct snd_soc_card *card)
{
- struct snd_soc_dai *dai;
struct snd_soc_pcm_runtime *rtd;
- int i, order, ret;
+ int order, ret;
for_each_comp_order(order) {
for_each_card_rtds(card, rtd) {
@@ -1344,12 +1274,10 @@ static int soc_probe_link_dais(struct snd_soc_card *card)
"ASoC: probe %s dai link %d late %d\n",
card->name, rtd->num, order);
- /* probe the CPU DAI */
- for_each_rtd_dais(rtd, i, dai) {
- ret = soc_probe_dai(dai, order);
- if (ret)
- return ret;
- }
+ /* probe all rtd connected DAIs in good order */
+ ret = snd_soc_pcm_dai_probe(rtd, order);
+ if (ret)
+ return ret;
}
}
@@ -1720,11 +1648,31 @@ match:
dai_link->platforms->name = component->name;
/* convert non BE into BE */
- dai_link->no_pcm = 1;
- dai_link->dpcm_playback = 1;
- dai_link->dpcm_capture = 1;
+ if (!dai_link->no_pcm) {
+ dai_link->no_pcm = 1;
+
+ if (dai_link->dpcm_playback)
+ dev_warn(card->dev,
+ "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_playback=1\n",
+ dai_link->name);
+ if (dai_link->dpcm_capture)
+ dev_warn(card->dev,
+ "invalid configuration, dailink %s has flags no_pcm=0 and dpcm_capture=1\n",
+ dai_link->name);
+
+ /* convert normal link into DPCM one */
+ if (!(dai_link->dpcm_playback ||
+ dai_link->dpcm_capture)) {
+ dai_link->dpcm_playback = !dai_link->capture_only;
+ dai_link->dpcm_capture = !dai_link->playback_only;
+ }
+ }
- /* override any BE fixups */
+ /*
+ * override any BE fixups
+ * see
+ * snd_soc_link_be_hw_params_fixup()
+ */
dai_link->be_hw_params_fixup =
component->driver->be_hw_params_fixup;
@@ -1791,8 +1739,7 @@ static void __soc_setup_card_name(char *name, int len,
}
}
-static void soc_cleanup_card_resources(struct snd_soc_card *card,
- int card_probed)
+static void soc_cleanup_card_resources(struct snd_soc_card *card)
{
struct snd_soc_pcm_runtime *rtd, *n;
@@ -1816,8 +1763,7 @@ static void soc_cleanup_card_resources(struct snd_soc_card *card,
soc_cleanup_card_debugfs(card);
/* remove the card */
- if (card_probed && card->remove)
- card->remove(card);
+ snd_soc_card_remove(card);
if (card->snd_card) {
snd_card_free(card->snd_card);
@@ -1828,12 +1774,10 @@ static void soc_cleanup_card_resources(struct snd_soc_card *card,
static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
{
if (card->instantiated) {
- int card_probed = 1;
-
card->instantiated = false;
snd_soc_flush_all_delayed_work(card);
- soc_cleanup_card_resources(card, card_probed);
+ soc_cleanup_card_resources(card);
if (!unregister)
list_add(&card->list, &unbind_card_list);
} else {
@@ -1847,7 +1791,7 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_component *component;
struct snd_soc_dai_link *dai_link;
- int ret, i, card_probed = 0;
+ int ret, i;
mutex_lock(&client_mutex);
mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_INIT);
@@ -1895,12 +1839,9 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
goto probe_end;
/* initialise the sound card only once */
- if (card->probe) {
- ret = card->probe(card);
- if (ret < 0)
- goto probe_end;
- card_probed = 1;
- }
+ ret = snd_soc_card_probe(card);
+ if (ret < 0)
+ goto probe_end;
/* probe all components used by DAI links on this card */
ret = soc_probe_link_components(card);
@@ -1983,15 +1924,9 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
}
}
- if (card->late_probe) {
- ret = card->late_probe(card);
- if (ret < 0) {
- dev_err(card->dev, "ASoC: %s late_probe() failed: %d\n",
- card->name, ret);
- goto probe_end;
- }
- }
- card_probed = 1;
+ ret = snd_soc_card_late_probe(card);
+ if (ret < 0)
+ goto probe_end;
snd_soc_dapm_new_widgets(card);
@@ -2008,12 +1943,12 @@ static int snd_soc_bind_card(struct snd_soc_card *card)
/* deactivate pins to sleep state */
for_each_card_components(card, component)
- if (!component->active)
+ if (!snd_soc_component_active(component))
pinctrl_pm_select_sleep_state(component->dev);
probe_end:
if (ret < 0)
- soc_cleanup_card_resources(card, card_probed);
+ soc_cleanup_card_resources(card);
mutex_unlock(&card->mutex);
mutex_unlock(&client_mutex);
@@ -2160,22 +2095,6 @@ static int snd_soc_add_controls(struct snd_card *card, struct device *dev,
return 0;
}
-struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
- const char *name)
-{
- struct snd_card *card = soc_card->snd_card;
- struct snd_kcontrol *kctl;
-
- if (unlikely(!name))
- return NULL;
-
- list_for_each_entry(kctl, &card->controls, list)
- if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name)))
- return kctl;
- return NULL;
-}
-EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol);
-
/**
* snd_soc_add_component_controls - Add an array of controls to a component.
*
@@ -2404,8 +2323,6 @@ struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
dai->component = component;
dai->dev = dev;
dai->driver = dai_drv;
- if (!dai->driver->ops)
- dai->driver->ops = &null_dai_ops;
/* see for_each_component_dais */
list_add_tail(&dai->list, &component->dai_list);
diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
index 31c41559034b..b05e18b63a1c 100644
--- a/sound/soc/soc-dai.c
+++ b/sound/soc/soc-dai.c
@@ -8,6 +8,29 @@
#include <sound/soc.h>
#include <sound/soc-dai.h>
+#include <sound/soc-link.h>
+
+#define soc_dai_ret(dai, ret) _soc_dai_ret(dai, __func__, ret)
+static inline int _soc_dai_ret(struct snd_soc_dai *dai,
+ const char *func, int ret)
+{
+ /* Positive, Zero values are not errors */
+ if (ret >= 0)
+ return ret;
+
+ /* Negative values might be errors */
+ switch (ret) {
+ case -EPROBE_DEFER:
+ case -ENOTSUPP:
+ break;
+ default:
+ dev_err(dai->dev,
+ "ASoC: error at %s on %s: %d\n",
+ func, dai->name, ret);
+ }
+
+ return ret;
+}
/**
* snd_soc_dai_set_sysclk - configure DAI system or master clock.
@@ -21,11 +44,16 @@
int snd_soc_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
- if (dai->driver->ops->set_sysclk)
- return dai->driver->ops->set_sysclk(dai, clk_id, freq, dir);
+ int ret;
+
+ if (dai->driver->ops &&
+ dai->driver->ops->set_sysclk)
+ ret = dai->driver->ops->set_sysclk(dai, clk_id, freq, dir);
+ else
+ ret = snd_soc_component_set_sysclk(dai->component, clk_id, 0,
+ freq, dir);
- return snd_soc_component_set_sysclk(dai->component, clk_id, 0,
- freq, dir);
+ return soc_dai_ret(dai, ret);
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_sysclk);
@@ -42,10 +70,13 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_sysclk);
int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai,
int div_id, int div)
{
- if (dai->driver->ops->set_clkdiv)
- return dai->driver->ops->set_clkdiv(dai, div_id, div);
- else
- return -EINVAL;
+ int ret = -EINVAL;
+
+ if (dai->driver->ops &&
+ dai->driver->ops->set_clkdiv)
+ ret = dai->driver->ops->set_clkdiv(dai, div_id, div);
+
+ return soc_dai_ret(dai, ret);
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_clkdiv);
@@ -62,12 +93,17 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_clkdiv);
int snd_soc_dai_set_pll(struct snd_soc_dai *dai, int pll_id, int source,
unsigned int freq_in, unsigned int freq_out)
{
- if (dai->driver->ops->set_pll)
- return dai->driver->ops->set_pll(dai, pll_id, source,
- freq_in, freq_out);
+ int ret;
- return snd_soc_component_set_pll(dai->component, pll_id, source,
- freq_in, freq_out);
+ if (dai->driver->ops &&
+ dai->driver->ops->set_pll)
+ ret = dai->driver->ops->set_pll(dai, pll_id, source,
+ freq_in, freq_out);
+ else
+ ret = snd_soc_component_set_pll(dai->component, pll_id, source,
+ freq_in, freq_out);
+
+ return soc_dai_ret(dai, ret);
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_pll);
@@ -80,10 +116,13 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_pll);
*/
int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
{
- if (dai->driver->ops->set_bclk_ratio)
- return dai->driver->ops->set_bclk_ratio(dai, ratio);
- else
- return -EINVAL;
+ int ret = -EINVAL;
+
+ if (dai->driver->ops &&
+ dai->driver->ops->set_bclk_ratio)
+ ret = dai->driver->ops->set_bclk_ratio(dai, ratio);
+
+ return soc_dai_ret(dai, ret);
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_bclk_ratio);
@@ -96,9 +135,13 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_bclk_ratio);
*/
int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
- if (dai->driver->ops->set_fmt == NULL)
- return -ENOTSUPP;
- return dai->driver->ops->set_fmt(dai, fmt);
+ int ret = -ENOTSUPP;
+
+ if (dai->driver->ops &&
+ dai->driver->ops->set_fmt)
+ ret = dai->driver->ops->set_fmt(dai, fmt);
+
+ return soc_dai_ret(dai, ret);
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_fmt);
@@ -153,7 +196,10 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
unsigned int tx_mask, unsigned int rx_mask,
int slots, int slot_width)
{
- if (dai->driver->ops->xlate_tdm_slot_mask)
+ int ret = -ENOTSUPP;
+
+ if (dai->driver->ops &&
+ dai->driver->ops->xlate_tdm_slot_mask)
dai->driver->ops->xlate_tdm_slot_mask(slots,
&tx_mask, &rx_mask);
else
@@ -162,11 +208,11 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
dai->tx_mask = tx_mask;
dai->rx_mask = rx_mask;
- if (dai->driver->ops->set_tdm_slot)
- return dai->driver->ops->set_tdm_slot(dai, tx_mask, rx_mask,
+ if (dai->driver->ops &&
+ dai->driver->ops->set_tdm_slot)
+ ret = dai->driver->ops->set_tdm_slot(dai, tx_mask, rx_mask,
slots, slot_width);
- else
- return -ENOTSUPP;
+ return soc_dai_ret(dai, ret);
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_tdm_slot);
@@ -186,11 +232,13 @@ int snd_soc_dai_set_channel_map(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot)
{
- if (dai->driver->ops->set_channel_map)
- return dai->driver->ops->set_channel_map(dai, tx_num, tx_slot,
- rx_num, rx_slot);
- else
- return -ENOTSUPP;
+ int ret = -ENOTSUPP;
+
+ if (dai->driver->ops &&
+ dai->driver->ops->set_channel_map)
+ ret = dai->driver->ops->set_channel_map(dai, tx_num, tx_slot,
+ rx_num, rx_slot);
+ return soc_dai_ret(dai, ret);
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_channel_map);
@@ -208,11 +256,13 @@ int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai,
unsigned int *tx_num, unsigned int *tx_slot,
unsigned int *rx_num, unsigned int *rx_slot)
{
- if (dai->driver->ops->get_channel_map)
- return dai->driver->ops->get_channel_map(dai, tx_num, tx_slot,
- rx_num, rx_slot);
- else
- return -ENOTSUPP;
+ int ret = -ENOTSUPP;
+
+ if (dai->driver->ops &&
+ dai->driver->ops->get_channel_map)
+ ret = dai->driver->ops->get_channel_map(dai, tx_num, tx_slot,
+ rx_num, rx_slot);
+ return soc_dai_ret(dai, ret);
}
EXPORT_SYMBOL_GPL(snd_soc_dai_get_channel_map);
@@ -225,10 +275,13 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_get_channel_map);
*/
int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate)
{
- if (dai->driver->ops->set_tristate)
- return dai->driver->ops->set_tristate(dai, tristate);
- else
- return -EINVAL;
+ int ret = -EINVAL;
+
+ if (dai->driver->ops &&
+ dai->driver->ops->set_tristate)
+ ret = dai->driver->ops->set_tristate(dai, tristate);
+
+ return soc_dai_ret(dai, ret);
}
EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate);
@@ -243,13 +296,17 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate);
int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
int direction)
{
- if (dai->driver->ops->mute_stream)
- return dai->driver->ops->mute_stream(dai, mute, direction);
+ int ret = -ENOTSUPP;
+
+ if (dai->driver->ops &&
+ dai->driver->ops->mute_stream)
+ ret = dai->driver->ops->mute_stream(dai, mute, direction);
else if (direction == SNDRV_PCM_STREAM_PLAYBACK &&
+ dai->driver->ops &&
dai->driver->ops->digital_mute)
- return dai->driver->ops->digital_mute(dai, mute);
- else
- return -ENOTSUPP;
+ ret = dai->driver->ops->digital_mute(dai, mute);
+
+ return soc_dai_ret(dai, ret);
}
EXPORT_SYMBOL_GPL(snd_soc_dai_digital_mute);
@@ -258,35 +315,25 @@ int snd_soc_dai_hw_params(struct snd_soc_dai *dai,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- int ret;
+ int ret = 0;
/* perform any topology hw_params fixups before DAI */
- if (rtd->dai_link->be_hw_params_fixup) {
- ret = rtd->dai_link->be_hw_params_fixup(rtd, params);
- if (ret < 0) {
- dev_err(rtd->dev,
- "ASoC: hw_params topology fixup failed %d\n",
- ret);
- return ret;
- }
- }
+ ret = snd_soc_link_be_hw_params_fixup(rtd, params);
+ if (ret < 0)
+ goto end;
- if (dai->driver->ops->hw_params) {
+ if (dai->driver->ops &&
+ dai->driver->ops->hw_params)
ret = dai->driver->ops->hw_params(substream, params, dai);
- if (ret < 0) {
- dev_err(dai->dev, "ASoC: can't set %s hw params: %d\n",
- dai->name, ret);
- return ret;
- }
- }
-
- return 0;
+end:
+ return soc_dai_ret(dai, ret);
}
void snd_soc_dai_hw_free(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream)
{
- if (dai->driver->ops->hw_free)
+ if (dai->driver->ops &&
+ dai->driver->ops->hw_free)
dai->driver->ops->hw_free(substream, dai);
}
@@ -295,96 +342,310 @@ int snd_soc_dai_startup(struct snd_soc_dai *dai,
{
int ret = 0;
- if (dai->driver->ops->startup)
+ if (dai->driver->ops &&
+ dai->driver->ops->startup)
ret = dai->driver->ops->startup(substream, dai);
- return ret;
+ return soc_dai_ret(dai, ret);
}
void snd_soc_dai_shutdown(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream)
{
- if (dai->driver->ops->shutdown)
+ if (dai->driver->ops &&
+ dai->driver->ops->shutdown)
dai->driver->ops->shutdown(substream, dai);
}
-int snd_soc_dai_prepare(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream)
+snd_pcm_sframes_t snd_soc_dai_delay(struct snd_soc_dai *dai,
+ struct snd_pcm_substream *substream)
{
- int ret = 0;
+ int delay = 0;
- if (dai->driver->ops->prepare)
- ret = dai->driver->ops->prepare(substream, dai);
+ if (dai->driver->ops &&
+ dai->driver->ops->delay)
+ delay = dai->driver->ops->delay(substream, dai);
- return ret;
+ return delay;
}
-int snd_soc_dai_trigger(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream,
- int cmd)
+int snd_soc_dai_compress_new(struct snd_soc_dai *dai,
+ struct snd_soc_pcm_runtime *rtd, int num)
{
- int ret = 0;
+ int ret = -ENOTSUPP;
+ if (dai->driver->compress_new)
+ ret = dai->driver->compress_new(rtd, num);
+ return soc_dai_ret(dai, ret);
+}
+
+/*
+ * snd_soc_dai_stream_valid() - check if a DAI supports the given stream
+ *
+ * Returns true if the DAI supports the indicated stream type.
+ */
+bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int dir)
+{
+ struct snd_soc_pcm_stream *stream = snd_soc_dai_get_pcm_stream(dai, dir);
- if (dai->driver->ops->trigger)
- ret = dai->driver->ops->trigger(substream, cmd, dai);
+ /* If the codec specifies any channels at all, it supports the stream */
+ return stream->channels_min;
+}
- return ret;
+void snd_soc_dai_action(struct snd_soc_dai *dai,
+ int stream, int action)
+{
+ /* see snd_soc_dai_stream_active() */
+ dai->stream_active[stream] += action;
+
+ /* see snd_soc_component_active() */
+ dai->component->active += action;
}
+EXPORT_SYMBOL_GPL(snd_soc_dai_action);
-int snd_soc_dai_bespoke_trigger(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream,
- int cmd)
+int snd_soc_dai_active(struct snd_soc_dai *dai)
{
- int ret = 0;
+ int stream, active;
+
+ active = 0;
+ for_each_pcm_streams(stream)
+ active += dai->stream_active[stream];
+
+ return active;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_active);
+
+int snd_soc_pcm_dai_probe(struct snd_soc_pcm_runtime *rtd, int order)
+{
+ struct snd_soc_dai *dai;
+ int i;
+
+ for_each_rtd_dais(rtd, i, dai) {
+ if (dai->driver->probe_order != order)
+ continue;
+
+ if (dai->driver->probe) {
+ int ret = dai->driver->probe(dai);
+
+ if (ret < 0)
+ return soc_dai_ret(dai, ret);
+ }
+
+ dai->probed = 1;
+ }
+
+ return 0;
+}
+
+int snd_soc_pcm_dai_remove(struct snd_soc_pcm_runtime *rtd, int order)
+{
+ struct snd_soc_dai *dai;
+ int i, r, ret = 0;
+
+ for_each_rtd_dais(rtd, i, dai) {
+ if (dai->driver->remove_order != order)
+ continue;
+
+ if (dai->probed &&
+ dai->driver->remove) {
+ r = dai->driver->remove(dai);
+ if (r < 0)
+ ret = r; /* use last error */
+ }
- if (dai->driver->ops->bespoke_trigger)
- ret = dai->driver->ops->bespoke_trigger(substream, cmd, dai);
+ dai->probed = 0;
+ }
return ret;
}
-snd_pcm_sframes_t snd_soc_dai_delay(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream)
+int snd_soc_pcm_dai_new(struct snd_soc_pcm_runtime *rtd)
{
- int delay = 0;
+ struct snd_soc_dai *dai;
+ int i, ret = 0;
+
+ for_each_rtd_dais(rtd, i, dai) {
+ if (dai->driver->pcm_new) {
+ ret = dai->driver->pcm_new(rtd, dai);
+ if (ret < 0)
+ return soc_dai_ret(dai, ret);
+ }
+ }
- if (dai->driver->ops->delay)
- delay = dai->driver->ops->delay(substream, dai);
+ return 0;
+}
- return delay;
+int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *dai;
+ int i, ret;
+
+ for_each_rtd_dais(rtd, i, dai) {
+ if (dai->driver->ops &&
+ dai->driver->ops->prepare) {
+ ret = dai->driver->ops->prepare(substream, dai);
+ if (ret < 0)
+ return soc_dai_ret(dai, ret);
+ }
+ }
+
+ return 0;
}
-int snd_soc_dai_probe(struct snd_soc_dai *dai)
+int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream,
+ int cmd)
{
- if (dai->driver->probe)
- return dai->driver->probe(dai);
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *dai;
+ int i, ret;
+
+ for_each_rtd_dais(rtd, i, dai) {
+ if (dai->driver->ops &&
+ dai->driver->ops->trigger) {
+ ret = dai->driver->ops->trigger(substream, cmd, dai);
+ if (ret < 0)
+ return soc_dai_ret(dai, ret);
+ }
+ }
+
return 0;
}
-int snd_soc_dai_remove(struct snd_soc_dai *dai)
+int snd_soc_pcm_dai_bespoke_trigger(struct snd_pcm_substream *substream,
+ int cmd)
{
- if (dai->driver->remove)
- return dai->driver->remove(dai);
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *dai;
+ int i, ret;
+
+ for_each_rtd_dais(rtd, i, dai) {
+ if (dai->driver->ops &&
+ dai->driver->ops->bespoke_trigger) {
+ ret = dai->driver->ops->bespoke_trigger(substream,
+ cmd, dai);
+ if (ret < 0)
+ return soc_dai_ret(dai, ret);
+ }
+ }
+
return 0;
}
-int snd_soc_dai_compress_new(struct snd_soc_dai *dai,
- struct snd_soc_pcm_runtime *rtd, int num)
+int snd_soc_dai_compr_startup(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream)
{
- if (dai->driver->compress_new)
- return dai->driver->compress_new(rtd, num);
- return -ENOTSUPP;
+ int ret = 0;
+
+ if (dai->driver->cops &&
+ dai->driver->cops->startup)
+ ret = dai->driver->cops->startup(cstream, dai);
+
+ return soc_dai_ret(dai, ret);
}
+EXPORT_SYMBOL_GPL(snd_soc_dai_compr_startup);
-/*
- * snd_soc_dai_stream_valid() - check if a DAI supports the given stream
- *
- * Returns true if the DAI supports the indicated stream type.
- */
-bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int dir)
+void snd_soc_dai_compr_shutdown(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream)
{
- struct snd_soc_pcm_stream *stream = snd_soc_dai_get_pcm_stream(dai, dir);
+ if (dai->driver->cops &&
+ dai->driver->cops->shutdown)
+ dai->driver->cops->shutdown(cstream, dai);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_compr_shutdown);
- /* If the codec specifies any channels at all, it supports the stream */
- return stream->channels_min;
+int snd_soc_dai_compr_trigger(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream, int cmd)
+{
+ int ret = 0;
+
+ if (dai->driver->cops &&
+ dai->driver->cops->trigger)
+ ret = dai->driver->cops->trigger(cstream, cmd, dai);
+
+ return soc_dai_ret(dai, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_compr_trigger);
+
+int snd_soc_dai_compr_set_params(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_params *params)
+{
+ int ret = 0;
+
+ if (dai->driver->cops &&
+ dai->driver->cops->set_params)
+ ret = dai->driver->cops->set_params(cstream, params, dai);
+
+ return soc_dai_ret(dai, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_compr_set_params);
+
+int snd_soc_dai_compr_get_params(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_codec *params)
+{
+ int ret = 0;
+
+ if (dai->driver->cops &&
+ dai->driver->cops->get_params)
+ ret = dai->driver->cops->get_params(cstream, params, dai);
+
+ return soc_dai_ret(dai, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_compr_get_params);
+
+int snd_soc_dai_compr_ack(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ size_t bytes)
+{
+ int ret = 0;
+
+ if (dai->driver->cops &&
+ dai->driver->cops->ack)
+ ret = dai->driver->cops->ack(cstream, bytes, dai);
+
+ return soc_dai_ret(dai, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_compr_ack);
+
+int snd_soc_dai_compr_pointer(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_tstamp *tstamp)
+{
+ int ret = 0;
+
+ if (dai->driver->cops &&
+ dai->driver->cops->pointer)
+ ret = dai->driver->cops->pointer(cstream, tstamp, dai);
+
+ return soc_dai_ret(dai, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_compr_pointer);
+
+int snd_soc_dai_compr_set_metadata(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata)
+{
+ int ret = 0;
+
+ if (dai->driver->cops &&
+ dai->driver->cops->set_metadata)
+ ret = dai->driver->cops->set_metadata(cstream, metadata, dai);
+
+ return soc_dai_ret(dai, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_compr_set_metadata);
+
+int snd_soc_dai_compr_get_metadata(struct snd_soc_dai *dai,
+ struct snd_compr_stream *cstream,
+ struct snd_compr_metadata *metadata)
+{
+ int ret = 0;
+
+ if (dai->driver->cops &&
+ dai->driver->cops->get_metadata)
+ ret = dai->driver->cops->get_metadata(cstream, metadata, dai);
+
+ return soc_dai_ret(dai, ret);
}
+EXPORT_SYMBOL_GPL(snd_soc_dai_compr_get_metadata);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index e2632841b321..2491e1ce16d3 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -725,8 +725,7 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm,
trace_snd_soc_bias_level_start(card, level);
- if (card && card->set_bias_level)
- ret = card->set_bias_level(card, dapm, level);
+ ret = snd_soc_card_set_bias_level(card, dapm, level);
if (ret != 0)
goto out;
@@ -736,8 +735,7 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm,
if (ret != 0)
goto out;
- if (card && card->set_bias_level_post)
- ret = card->set_bias_level_post(card, dapm, level);
+ ret = snd_soc_card_set_bias_level_post(card, dapm, level);
out:
trace_snd_soc_bias_level_done(card, level);
@@ -3835,7 +3833,7 @@ snd_soc_dai_link_event_pre_pmu(struct snd_soc_dapm_widget *w,
"ASoC: startup() failed: %d\n", ret);
goto out;
}
- source->active++;
+ snd_soc_dai_activate(source, substream->stream);
}
substream->stream = SNDRV_PCM_STREAM_PLAYBACK;
@@ -3848,7 +3846,7 @@ snd_soc_dai_link_event_pre_pmu(struct snd_soc_dapm_widget *w,
"ASoC: startup() failed: %d\n", ret);
goto out;
}
- sink->active++;
+ snd_soc_dai_activate(sink, substream->stream);
}
substream->hw_opened = 1;
@@ -3978,14 +3976,14 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
substream->stream = SNDRV_PCM_STREAM_CAPTURE;
snd_soc_dapm_widget_for_each_source_path(w, path) {
source = path->source->priv;
- source->active--;
+ snd_soc_dai_deactivate(source, substream->stream);
snd_soc_dai_shutdown(source, substream);
}
substream->stream = SNDRV_PCM_STREAM_PLAYBACK;
snd_soc_dapm_widget_for_each_sink_path(w, path) {
sink = path->sink->priv;
- sink->active--;
+ snd_soc_dai_deactivate(sink, substream->stream);
snd_soc_dai_shutdown(sink, substream);
}
break;
@@ -4340,16 +4338,16 @@ static void dapm_connect_dai_pair(struct snd_soc_card *card,
codec = codec_dai->playback_widget;
if (playback_cpu && codec) {
- if (dai_link->params && !dai_link->playback_widget) {
+ if (dai_link->params && !rtd->playback_widget) {
substream = streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
dai = snd_soc_dapm_new_dai(card, substream, "playback");
if (IS_ERR(dai))
goto capture;
- dai_link->playback_widget = dai;
+ rtd->playback_widget = dai;
}
dapm_connect_dai_routes(&card->dapm, cpu_dai, playback_cpu,
- dai_link->playback_widget,
+ rtd->playback_widget,
codec_dai, codec);
}
@@ -4358,16 +4356,16 @@ capture:
codec = codec_dai->capture_widget;
if (codec && capture_cpu) {
- if (dai_link->params && !dai_link->capture_widget) {
+ if (dai_link->params && !rtd->capture_widget) {
substream = streams[SNDRV_PCM_STREAM_CAPTURE].substream;
dai = snd_soc_dapm_new_dai(card, substream, "capture");
if (IS_ERR(dai))
return;
- dai_link->capture_widget = dai;
+ rtd->capture_widget = dai;
}
dapm_connect_dai_routes(&card->dapm, codec_dai, codec,
- dai_link->capture_widget,
+ rtd->capture_widget,
cpu_dai, capture_cpu);
}
}
@@ -4427,11 +4425,11 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card)
if (rtd->num_cpus == 1) {
for_each_rtd_codec_dais(rtd, i, codec_dai)
dapm_connect_dai_pair(card, rtd, codec_dai,
- rtd->cpu_dais[0]);
+ asoc_rtd_to_cpu(rtd, 0));
} else if (rtd->num_codecs == rtd->num_cpus) {
for_each_rtd_codec_dais(rtd, i, codec_dai)
dapm_connect_dai_pair(card, rtd, codec_dai,
- rtd->cpu_dais[i]);
+ asoc_rtd_to_cpu(rtd, i));
} else {
dev_err(card->dev,
"N cpus to M codecs link is not supported yet\n");
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index facf1922a714..f728309a0833 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -68,7 +68,7 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
return -EINVAL;
}
- dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
if (ret)
@@ -134,7 +134,7 @@ dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
return snd_soc_set_runtime_hwparams(substream,
pcm->config->pcm_hardware);
- dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
memset(&hw, 0, sizeof(hw));
hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
@@ -203,7 +203,7 @@ static struct dma_chan *dmaengine_pcm_compat_request_channel(
return NULL;
}
- dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0])
return pcm->chan[0];
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index b5748dcd490f..0f1820f36b4d 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -24,44 +24,6 @@ struct jack_gpio_tbl {
};
/**
- * snd_soc_card_jack_new - Create a new jack
- * @card: ASoC card
- * @id: an identifying string for this jack
- * @type: a bitmask of enum snd_jack_type values that can be detected by
- * this jack
- * @jack: structure to use for the jack
- * @pins: Array of jack pins to be added to the jack or NULL
- * @num_pins: Number of elements in the @pins array
- *
- * Creates a new jack object.
- *
- * Returns zero if successful, or a negative error code on failure.
- * On success jack will be initialised.
- */
-int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
- struct snd_soc_jack *jack, struct snd_soc_jack_pin *pins,
- unsigned int num_pins)
-{
- int ret;
-
- mutex_init(&jack->mutex);
- jack->card = card;
- INIT_LIST_HEAD(&jack->pins);
- INIT_LIST_HEAD(&jack->jack_zones);
- BLOCKING_INIT_NOTIFIER_HEAD(&jack->notifier);
-
- ret = snd_jack_new(card->snd_card, id, type, &jack->jack, false, false);
- if (ret)
- return ret;
-
- if (num_pins)
- return snd_soc_jack_add_pins(jack, num_pins, pins);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_card_jack_new);
-
-/**
* snd_soc_jack_report - Report the current status for a jack
*
* @jack: the jack
diff --git a/sound/soc/soc-link.c b/sound/soc/soc-link.c
new file mode 100644
index 000000000000..f849278beba0
--- /dev/null
+++ b/sound/soc/soc-link.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// soc-link.c
+//
+// Copyright (C) 2019 Renesas Electronics Corp.
+// Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+//
+#include <sound/soc.h>
+#include <sound/soc-link.h>
+
+#define soc_link_ret(rtd, ret) _soc_link_ret(rtd, __func__, ret)
+static inline int _soc_link_ret(struct snd_soc_pcm_runtime *rtd,
+ const char *func, int ret)
+{
+ /* Positive, Zero values are not errors */
+ if (ret >= 0)
+ return ret;
+
+ /* Negative values might be errors */
+ switch (ret) {
+ case -EPROBE_DEFER:
+ case -ENOTSUPP:
+ break;
+ default:
+ dev_err(rtd->dev,
+ "ASoC: error at %s on %s: %d\n",
+ func, rtd->dai_link->name, ret);
+ }
+
+ return ret;
+}
+
+int snd_soc_link_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret = 0;
+
+ if (rtd->dai_link->init)
+ ret = rtd->dai_link->init(rtd);
+
+ return soc_link_ret(rtd, ret);
+}
+
+int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ int ret = 0;
+
+ if (rtd->dai_link->be_hw_params_fixup)
+ ret = rtd->dai_link->be_hw_params_fixup(rtd, params);
+
+ return soc_link_ret(rtd, ret);
+}
+
+int snd_soc_link_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret = 0;
+
+ if (rtd->dai_link->ops &&
+ rtd->dai_link->ops->startup)
+ ret = rtd->dai_link->ops->startup(substream);
+
+ return soc_link_ret(rtd, ret);
+}
+
+void snd_soc_link_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ if (rtd->dai_link->ops &&
+ rtd->dai_link->ops->shutdown)
+ rtd->dai_link->ops->shutdown(substream);
+}
+
+int snd_soc_link_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret = 0;
+
+ if (rtd->dai_link->ops &&
+ rtd->dai_link->ops->prepare)
+ ret = rtd->dai_link->ops->prepare(substream);
+
+ return soc_link_ret(rtd, ret);
+}
+
+int snd_soc_link_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret = 0;
+
+ if (rtd->dai_link->ops &&
+ rtd->dai_link->ops->hw_params)
+ ret = rtd->dai_link->ops->hw_params(substream, params);
+
+ return soc_link_ret(rtd, ret);
+}
+
+void snd_soc_link_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ if (rtd->dai_link->ops &&
+ rtd->dai_link->ops->hw_free)
+ rtd->dai_link->ops->hw_free(substream);
+}
+
+int snd_soc_link_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret = 0;
+
+ if (rtd->dai_link->ops &&
+ rtd->dai_link->ops->trigger)
+ ret = rtd->dai_link->ops->trigger(substream, cmd);
+
+ return soc_link_ret(rtd, ret);
+}
+
+int snd_soc_link_compr_startup(struct snd_compr_stream *cstream)
+{
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ int ret = 0;
+
+ if (rtd->dai_link->compr_ops &&
+ rtd->dai_link->compr_ops->startup)
+ ret = rtd->dai_link->compr_ops->startup(cstream);
+
+ return soc_link_ret(rtd, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_link_compr_startup);
+
+void snd_soc_link_compr_shutdown(struct snd_compr_stream *cstream)
+{
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+
+ if (rtd->dai_link->compr_ops &&
+ rtd->dai_link->compr_ops->shutdown)
+ rtd->dai_link->compr_ops->shutdown(cstream);
+}
+EXPORT_SYMBOL_GPL(snd_soc_link_compr_shutdown);
+
+int snd_soc_link_compr_set_params(struct snd_compr_stream *cstream)
+{
+ struct snd_soc_pcm_runtime *rtd = cstream->private_data;
+ int ret = 0;
+
+ if (rtd->dai_link->compr_ops &&
+ rtd->dai_link->compr_ops->set_params)
+ ret = rtd->dai_link->compr_ops->set_params(cstream);
+
+ return soc_link_ret(rtd, ret);
+}
+EXPORT_SYMBOL_GPL(snd_soc_link_compr_set_params);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 1f302de44052..2c114b4542ce 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -24,6 +24,7 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dpcm.h>
+#include <sound/soc-link.h>
#include <sound/initval.h>
#define DPCM_MAX_BE_USERS 8
@@ -136,7 +137,7 @@ static ssize_t dpcm_state_read_file(struct file *file, char __user *user_buf,
return -ENOMEM;
for_each_pcm_streams(stream)
- if (snd_soc_dai_stream_valid(fe->cpu_dai, stream))
+ if (snd_soc_dai_stream_valid(asoc_rtd_to_cpu(fe, 0), stream))
offset += dpcm_show_state(fe, stream,
buf + offset,
out_count - offset);
@@ -202,106 +203,30 @@ static inline void dpcm_remove_debugfs_state(struct snd_soc_dpcm *dpcm)
}
#endif
-static int soc_rtd_startup(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_substream *substream)
-{
- if (rtd->dai_link->ops &&
- rtd->dai_link->ops->startup)
- return rtd->dai_link->ops->startup(substream);
- return 0;
-}
-
-static void soc_rtd_shutdown(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_substream *substream)
-{
- if (rtd->dai_link->ops &&
- rtd->dai_link->ops->shutdown)
- rtd->dai_link->ops->shutdown(substream);
-}
-
-static int soc_rtd_prepare(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_substream *substream)
-{
- if (rtd->dai_link->ops &&
- rtd->dai_link->ops->prepare)
- return rtd->dai_link->ops->prepare(substream);
- return 0;
-}
-
-static int soc_rtd_hw_params(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- if (rtd->dai_link->ops &&
- rtd->dai_link->ops->hw_params)
- return rtd->dai_link->ops->hw_params(substream, params);
- return 0;
-}
-
-static void soc_rtd_hw_free(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_substream *substream)
-{
- if (rtd->dai_link->ops &&
- rtd->dai_link->ops->hw_free)
- rtd->dai_link->ops->hw_free(substream);
-}
-
-static int soc_rtd_trigger(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_substream *substream,
- int cmd)
-{
- if (rtd->dai_link->ops &&
- rtd->dai_link->ops->trigger)
- return rtd->dai_link->ops->trigger(substream, cmd);
- return 0;
-}
-
-static void snd_soc_runtime_action(struct snd_soc_pcm_runtime *rtd,
- int stream, int action)
-{
- struct snd_soc_dai *dai;
- int i;
-
- lockdep_assert_held(&rtd->card->pcm_mutex);
-
- for_each_rtd_dais(rtd, i, dai) {
- dai->stream_active[stream] += action;
- dai->active += action;
- dai->component->active += action;
- }
-}
-
/**
- * snd_soc_runtime_activate() - Increment active count for PCM runtime components
+ * snd_soc_runtime_action() - Increment/Decrement active count for
+ * PCM runtime components
* @rtd: ASoC PCM runtime that is activated
* @stream: Direction of the PCM stream
*
- * Increments the active count for all the DAIs and components attached to a PCM
- * runtime. Should typically be called when a stream is opened.
+ * Increments/Decrements the active count for all the DAIs and components
+ * attached to a PCM runtime.
+ * Should typically be called when a stream is opened.
*
* Must be called with the rtd->card->pcm_mutex being held
*/
-void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream)
+void snd_soc_runtime_action(struct snd_soc_pcm_runtime *rtd,
+ int stream, int action)
{
- snd_soc_runtime_action(rtd, stream, 1);
-}
-EXPORT_SYMBOL_GPL(snd_soc_runtime_activate);
+ struct snd_soc_dai *dai;
+ int i;
-/**
- * snd_soc_runtime_deactivate() - Decrement active count for PCM runtime components
- * @rtd: ASoC PCM runtime that is deactivated
- * @stream: Direction of the PCM stream
- *
- * Decrements the active count for all the DAIs and components attached to a PCM
- * runtime. Should typically be called when a stream is closed.
- *
- * Must be called with the rtd->card->pcm_mutex being held
- */
-void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream)
-{
- snd_soc_runtime_action(rtd, stream, -1);
+ lockdep_assert_held(&rtd->card->pcm_mutex);
+
+ for_each_rtd_dais(rtd, i, dai)
+ snd_soc_dai_action(dai, stream, action);
}
-EXPORT_SYMBOL_GPL(snd_soc_runtime_deactivate);
+EXPORT_SYMBOL_GPL(snd_soc_runtime_action);
/**
* snd_soc_runtime_ignore_pmdown_time() - Check whether to ignore the power down delay
@@ -758,7 +683,7 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
for_each_rtd_dais(rtd, i, dai)
snd_soc_dai_shutdown(dai, substream);
- soc_rtd_shutdown(rtd, substream);
+ snd_soc_link_shutdown(substream);
soc_pcm_components_close(substream);
@@ -772,7 +697,7 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
}
for_each_rtd_components(rtd, i, component)
- if (!component->active)
+ if (!snd_soc_component_active(component))
pinctrl_pm_select_sleep_state(component->dev);
return 0;
@@ -805,12 +730,9 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
if (ret < 0)
goto component_err;
- ret = soc_rtd_startup(rtd, substream);
- if (ret < 0) {
- pr_err("ASoC: %s startup failed: %d\n",
- rtd->dai_link->name, ret);
+ ret = snd_soc_link_startup(substream);
+ if (ret < 0)
goto rtd_startup_err;
- }
/* startup the audio subsystem */
for_each_rtd_dais(rtd, i, dai) {
@@ -836,10 +758,10 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
soc_pcm_init_runtime_hw(substream);
if (rtd->num_codecs == 1)
- codec_dai_name = rtd->codec_dai->name;
+ codec_dai_name = asoc_rtd_to_codec(rtd, 0)->name;
if (rtd->num_cpus == 1)
- cpu_dai_name = rtd->cpu_dai->name;
+ cpu_dai_name = asoc_rtd_to_cpu(rtd, 0)->name;
if (soc_pcm_has_symmetry(substream))
runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX;
@@ -866,7 +788,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
/* Symmetry only applies if we've already got an active stream. */
for_each_rtd_dais(rtd, i, dai) {
- if (dai->active) {
+ if (snd_soc_dai_active(dai)) {
ret = soc_pcm_apply_symmetry(substream, dai);
if (ret != 0)
goto config_err;
@@ -892,7 +814,7 @@ config_err:
for_each_rtd_dais(rtd, i, dai)
snd_soc_dai_shutdown(dai, substream);
- soc_rtd_shutdown(rtd, substream);
+ snd_soc_link_shutdown(substream);
rtd_startup_err:
soc_pcm_components_close(substream);
component_err:
@@ -904,7 +826,7 @@ component_err:
}
for_each_rtd_components(rtd, i, component)
- if (!component->active)
+ if (!snd_soc_component_active(component))
pinctrl_pm_select_sleep_state(component->dev);
return ret;
@@ -934,12 +856,9 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
- ret = soc_rtd_prepare(rtd, substream);
- if (ret < 0) {
- dev_err(rtd->card->dev,
- "ASoC: machine prepare error: %d\n", ret);
+ ret = snd_soc_link_prepare(substream);
+ if (ret < 0)
goto out;
- }
for_each_rtd_components(rtd, i, component) {
ret = snd_soc_component_prepare(component, substream);
@@ -950,13 +869,10 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
}
}
- for_each_rtd_dais(rtd, i, dai) {
- ret = snd_soc_dai_prepare(dai, substream);
- if (ret < 0) {
- dev_err(dai->dev,
- "ASoC: DAI prepare error: %d\n", ret);
- goto out;
- }
+ ret = snd_soc_pcm_dai_prepare(substream);
+ if (ret < 0) {
+ dev_err(rtd->dev, "ASoC: DAI prepare error: %d\n", ret);
+ goto out;
}
/* cancel any delayed stream shutdown that is pending */
@@ -1027,12 +943,9 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
if (ret)
goto out;
- ret = soc_rtd_hw_params(rtd, substream, params);
- if (ret < 0) {
- dev_err(rtd->card->dev,
- "ASoC: machine hw_params failed: %d\n", ret);
+ ret = snd_soc_link_hw_params(substream, params);
+ if (ret < 0)
goto out;
- }
for_each_rtd_codec_dais(rtd, i, codec_dai) {
struct snd_pcm_hw_params codec_params;
@@ -1142,7 +1055,7 @@ codec_err:
codec_dai->rate = 0;
}
- soc_rtd_hw_free(rtd, substream);
+ snd_soc_link_hw_free(substream);
mutex_unlock(&rtd->card->pcm_mutex);
return ret;
@@ -1161,9 +1074,9 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
/* clear the corresponding DAIs parameters when going to be inactive */
for_each_rtd_dais(rtd, i, dai) {
- int active = dai->stream_active[substream->stream];
+ int active = snd_soc_dai_stream_active(dai, substream->stream);
- if (dai->active == 1) {
+ if (snd_soc_dai_active(dai) == 1) {
dai->rate = 0;
dai->channels = 0;
dai->sample_bits = 0;
@@ -1174,7 +1087,7 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
}
/* free any machine hw params */
- soc_rtd_hw_free(rtd, substream);
+ snd_soc_link_hw_free(substream);
/* free any component resources */
soc_pcm_components_hw_free(substream, NULL);
@@ -1195,10 +1108,9 @@ static int soc_pcm_trigger_start(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component;
- struct snd_soc_dai *dai;
int i, ret;
- ret = soc_rtd_trigger(rtd, substream, cmd);
+ ret = snd_soc_link_trigger(substream, cmd);
if (ret < 0)
return ret;
@@ -1208,27 +1120,18 @@ static int soc_pcm_trigger_start(struct snd_pcm_substream *substream, int cmd)
return ret;
}
- for_each_rtd_dais(rtd, i, dai) {
- ret = snd_soc_dai_trigger(dai, substream, cmd);
- if (ret < 0)
- return ret;
- }
-
- return 0;
+ return snd_soc_pcm_dai_trigger(substream, cmd);
}
static int soc_pcm_trigger_stop(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_component *component;
- struct snd_soc_dai *dai;
int i, ret;
- for_each_rtd_dais(rtd, i, dai) {
- ret = snd_soc_dai_trigger(dai, substream, cmd);
- if (ret < 0)
- return ret;
- }
+ ret = snd_soc_pcm_dai_trigger(substream, cmd);
+ if (ret < 0)
+ return ret;
for_each_rtd_components(rtd, i, component) {
ret = snd_soc_component_trigger(component, substream, cmd);
@@ -1236,7 +1139,7 @@ static int soc_pcm_trigger_stop(struct snd_pcm_substream *substream, int cmd)
return ret;
}
- ret = soc_rtd_trigger(rtd, substream, cmd);
+ ret = snd_soc_link_trigger(substream, cmd);
if (ret < 0)
return ret;
@@ -1265,21 +1168,6 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
- int cmd)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *dai;
- int i, ret;
-
- for_each_rtd_dais(rtd, i, dai) {
- ret = snd_soc_dai_bespoke_trigger(dai, substream, cmd);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
/*
* soc level wrapper for pointer callback
* If cpu_dai, codec_dai, component driver has the delay callback, then
@@ -1483,7 +1371,7 @@ static bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget,
int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list)
{
- struct snd_soc_dai *cpu_dai = fe->cpu_dai;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0);
int paths;
if (fe->num_cpus > 1) {
@@ -1842,7 +1730,7 @@ static void dpcm_runtime_merge_chan(struct snd_pcm_substream *substream,
* DAIs connected to a single CPU DAI, use CPU DAI's directly
*/
if (be->num_codecs == 1) {
- codec_stream = snd_soc_dai_get_pcm_stream(be->codec_dais[0], stream);
+ codec_stream = snd_soc_dai_get_pcm_stream(asoc_rtd_to_codec(be, 0), stream);
*channels_min = max(*channels_min,
codec_stream->channels_min);
@@ -1957,7 +1845,7 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
for_each_rtd_cpu_dais (fe, i, fe_cpu_dai) {
/* Symmetry only applies if we've got an active stream. */
- if (fe_cpu_dai->active) {
+ if (snd_soc_dai_active(fe_cpu_dai)) {
err = soc_pcm_apply_symmetry(fe_substream, fe_cpu_dai);
if (err < 0)
return err;
@@ -1986,7 +1874,7 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
/* Symmetry only applies if we've got an active stream. */
for_each_rtd_dais(rtd, i, dai) {
- if (dai->active) {
+ if (snd_soc_dai_active(dai)) {
err = soc_pcm_apply_symmetry(fe_substream, dai);
if (err < 0)
return err;
@@ -2191,16 +2079,9 @@ int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
sizeof(struct snd_pcm_hw_params));
/* perform any hw_params fixups */
- if (be->dai_link->be_hw_params_fixup) {
- ret = be->dai_link->be_hw_params_fixup(be,
- &dpcm->hw_params);
- if (ret < 0) {
- dev_err(be->dev,
- "ASoC: hw_params BE fixup failed %d\n",
- ret);
- goto unwind;
- }
- }
+ ret = snd_soc_link_be_hw_params_fixup(be, &dpcm->hw_params);
+ if (ret < 0)
+ goto unwind;
/* copy the fixed-up hw params for BE dai */
memcpy(&be->dpcm[stream].hw_params, &dpcm->hw_params,
@@ -2483,7 +2364,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd %d\n",
fe->dai_link->name, cmd);
- ret = soc_pcm_bespoke_trigger(substream, cmd);
+ ret = snd_soc_pcm_dai_bespoke_trigger(substream, cmd);
break;
default:
dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd,
@@ -2628,7 +2509,7 @@ static int dpcm_run_update_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd stop\n",
fe->dai_link->name);
- err = soc_pcm_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_STOP);
+ err = snd_soc_pcm_dai_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_STOP);
if (err < 0)
dev_err(fe->dev,"ASoC: trigger FE failed %d\n", err);
} else {
@@ -2706,7 +2587,7 @@ static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream)
dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd start\n",
fe->dai_link->name);
- ret = soc_pcm_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_START);
+ ret = snd_soc_pcm_dai_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_START);
if (ret < 0) {
dev_err(fe->dev,"ASoC: bespoke trigger FE failed %d\n", ret);
goto hw_free;
@@ -2730,12 +2611,12 @@ hw_free:
close:
dpcm_be_dai_shutdown(fe, stream);
disconnect:
- /* disconnect any non started BEs */
+ /* disconnect any closed BEs */
spin_lock_irqsave(&fe->card->dpcm_lock, flags);
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
- if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
- dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
+ if (be->dpcm[stream].state == SND_SOC_DPCM_STATE_CLOSE)
+ dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
}
spin_unlock_irqrestore(&fe->card->dpcm_lock, flags);
@@ -2759,7 +2640,7 @@ static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new)
return 0;
/* only check active links */
- if (!fe->cpu_dai->active)
+ if (!snd_soc_dai_active(asoc_rtd_to_cpu(fe, 0)))
return 0;
/* DAPM sync will call this to update DSP paths */
@@ -2769,13 +2650,13 @@ static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new)
for_each_pcm_streams(stream) {
/* skip if FE doesn't have playback/capture capability */
- if (!snd_soc_dai_stream_valid(fe->cpu_dai, stream) ||
- !snd_soc_dai_stream_valid(fe->codec_dai, stream))
+ if (!snd_soc_dai_stream_valid(asoc_rtd_to_cpu(fe, 0), stream) ||
+ !snd_soc_dai_stream_valid(asoc_rtd_to_codec(fe, 0), stream))
continue;
/* skip if FE isn't currently playing/capturing */
- if (!fe->cpu_dai->stream_active[stream] ||
- !fe->codec_dai->stream_active[stream])
+ if (!snd_soc_dai_stream_active(asoc_rtd_to_cpu(fe, 0), stream) ||
+ !snd_soc_dai_stream_active(asoc_rtd_to_codec(fe, 0), stream))
continue;
paths = dpcm_path_get(fe, stream, &list);
@@ -2908,20 +2789,44 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
struct snd_pcm *pcm;
char new_name[64];
int ret = 0, playback = 0, capture = 0;
+ int stream;
int i;
+ if (rtd->dai_link->dynamic && rtd->num_cpus > 1) {
+ dev_err(rtd->dev,
+ "DPCM doesn't support Multi CPU for Front-Ends yet\n");
+ return -EINVAL;
+ }
+
if (rtd->dai_link->dynamic || rtd->dai_link->no_pcm) {
- cpu_dai = asoc_rtd_to_cpu(rtd, 0);
- if (rtd->num_cpus > 1) {
- dev_err(rtd->dev,
- "DPCM doesn't support Multi CPU yet\n");
- return -EINVAL;
+ if (rtd->dai_link->dpcm_playback) {
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai)
+ if (!snd_soc_dai_stream_valid(cpu_dai,
+ stream)) {
+ dev_err(rtd->card->dev,
+ "CPU DAI %s for rtd %s does not support playback\n",
+ cpu_dai->name,
+ rtd->dai_link->stream_name);
+ return -EINVAL;
+ }
+ playback = 1;
+ }
+ if (rtd->dai_link->dpcm_capture) {
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai)
+ if (!snd_soc_dai_stream_valid(cpu_dai,
+ stream)) {
+ dev_err(rtd->card->dev,
+ "CPU DAI %s for rtd %s does not support capture\n",
+ cpu_dai->name,
+ rtd->dai_link->stream_name);
+ return -EINVAL;
+ }
+ capture = 1;
}
-
- playback = rtd->dai_link->dpcm_playback &&
- snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_PLAYBACK);
- capture = rtd->dai_link->dpcm_capture &&
- snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_CAPTURE);
} else {
/* Adapt stream for codec2codec links */
int cpu_capture = rtd->dai_link->params ?
@@ -2931,9 +2836,9 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
for_each_rtd_codec_dais(rtd, i, codec_dai) {
if (rtd->num_cpus == 1) {
- cpu_dai = rtd->cpu_dais[0];
+ cpu_dai = asoc_rtd_to_cpu(rtd, 0);
} else if (rtd->num_cpus == rtd->num_codecs) {
- cpu_dai = rtd->cpu_dais[i];
+ cpu_dai = asoc_rtd_to_cpu(rtd, i);
} else {
dev_err(rtd->card->dev,
"N cpus to M codecs link is not supported yet\n");
@@ -2980,7 +2885,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
snprintf(new_name, sizeof(new_name), "%s %s-%d",
rtd->dai_link->stream_name,
(rtd->num_codecs > 1) ?
- "multicodec" : rtd->codec_dai->name, num);
+ "multicodec" : asoc_rtd_to_codec(rtd, 0)->name, num);
ret = snd_pcm_new(rtd->card->snd_card, new_name, num, playback,
capture, &pcm);
@@ -3059,8 +2964,8 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
pcm->no_device_suspend = true;
out:
dev_info(rtd->card->dev, "%s <-> %s mapping ok\n",
- (rtd->num_codecs > 1) ? "multicodec" : rtd->codec_dai->name,
- (rtd->num_cpus > 1) ? "multicpu" : rtd->cpu_dai->name);
+ (rtd->num_codecs > 1) ? "multicodec" : asoc_rtd_to_codec(rtd, 0)->name,
+ (rtd->num_cpus > 1) ? "multicpu" : asoc_rtd_to_cpu(rtd, 0)->name);
return ret;
}
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 6df3b0d12d87..9e89633676b7 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -246,8 +246,8 @@ static inline void soc_control_err(struct soc_tplg *tplg,
}
/* pass vendor data to component driver for processing */
-static int soc_tplg_vendor_load_(struct soc_tplg *tplg,
- struct snd_soc_tplg_hdr *hdr)
+static int soc_tplg_vendor_load(struct soc_tplg *tplg,
+ struct snd_soc_tplg_hdr *hdr)
{
int ret = 0;
@@ -268,16 +268,6 @@ static int soc_tplg_vendor_load_(struct soc_tplg *tplg,
return ret;
}
-/* pass vendor data to component driver for processing */
-static int soc_tplg_vendor_load(struct soc_tplg *tplg,
- struct snd_soc_tplg_hdr *hdr)
-{
- if (tplg->pass != SOC_TPLG_PASS_VENDOR)
- return 0;
-
- return soc_tplg_vendor_load_(tplg, hdr);
-}
-
/* optionally pass new dynamic widget to component driver. This is mainly for
* external widgets where we can assign private data/ops */
static int soc_tplg_widget_load(struct soc_tplg *tplg,
@@ -1127,12 +1117,6 @@ static int soc_tplg_kcontrol_elems_load(struct soc_tplg *tplg,
int ret;
int i;
- if (tplg->pass != SOC_TPLG_PASS_MIXER) {
- tplg->pos += le32_to_cpu(hdr->size) +
- le32_to_cpu(hdr->payload_size);
- return 0;
- }
-
dev_dbg(tplg->dev, "ASoC: adding %d kcontrols at 0x%lx\n", hdr->count,
soc_tplg_get_offset(tplg));
@@ -1204,14 +1188,6 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
count = le32_to_cpu(hdr->count);
- if (tplg->pass != SOC_TPLG_PASS_GRAPH) {
- tplg->pos +=
- le32_to_cpu(hdr->size) +
- le32_to_cpu(hdr->payload_size);
-
- return 0;
- }
-
if (soc_tplg_check_elem_count(tplg,
sizeof(struct snd_soc_tplg_dapm_graph_elem),
count, le32_to_cpu(hdr->payload_size), "graph")) {
@@ -1741,9 +1717,6 @@ static int soc_tplg_dapm_widget_elems_load(struct soc_tplg *tplg,
count = le32_to_cpu(hdr->count);
- if (tplg->pass != SOC_TPLG_PASS_WIDGET)
- return 0;
-
dev_dbg(tplg->dev, "ASoC: adding %d DAPM widgets\n", count);
for (i = 0; i < count; i++) {
@@ -2101,9 +2074,6 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
count = le32_to_cpu(hdr->count);
- if (tplg->pass != SOC_TPLG_PASS_PCM_DAI)
- return 0;
-
/* check the element size and count */
pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
size = le32_to_cpu(pcm->size);
@@ -2386,12 +2356,6 @@ static int soc_tplg_link_elems_load(struct soc_tplg *tplg,
count = le32_to_cpu(hdr->count);
- if (tplg->pass != SOC_TPLG_PASS_LINK) {
- tplg->pos += le32_to_cpu(hdr->size) +
- le32_to_cpu(hdr->payload_size);
- return 0;
- };
-
/* check the element size and count */
link = (struct snd_soc_tplg_link_config *)tplg->pos;
size = le32_to_cpu(link->size);
@@ -2528,9 +2492,6 @@ static int soc_tplg_dai_elems_load(struct soc_tplg *tplg,
count = le32_to_cpu(hdr->count);
- if (tplg->pass != SOC_TPLG_PASS_BE_DAI)
- return 0;
-
/* config the existing BE DAIs */
for (i = 0; i < count; i++) {
dai = (struct snd_soc_tplg_dai *)tplg->pos;
@@ -2610,9 +2571,6 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
bool abi_match;
int ret = 0;
- if (tplg->pass != SOC_TPLG_PASS_MANIFEST)
- return 0;
-
manifest = (struct snd_soc_tplg_manifest *)tplg->pos;
/* check ABI version by size, create a new manifest if abi not match */
@@ -2685,12 +2643,6 @@ static int soc_valid_header(struct soc_tplg *tplg,
return -EINVAL;
}
- if (tplg->pass == le32_to_cpu(hdr->type))
- dev_dbg(tplg->dev,
- "ASoC: Got 0x%x bytes of type %d version %d vendor %d at pass %d\n",
- hdr->payload_size, hdr->type, hdr->version,
- hdr->vendor_type, tplg->pass);
-
return 1;
}
@@ -2698,6 +2650,10 @@ static int soc_valid_header(struct soc_tplg *tplg,
static int soc_tplg_load_header(struct soc_tplg *tplg,
struct snd_soc_tplg_hdr *hdr)
{
+ int (*elem_load)(struct soc_tplg *tplg,
+ struct snd_soc_tplg_hdr *hdr);
+ unsigned int hdr_pass;
+
tplg->pos = tplg->hdr_pos + sizeof(struct snd_soc_tplg_hdr);
/* check for matching ID */
@@ -2711,24 +2667,48 @@ static int soc_tplg_load_header(struct soc_tplg *tplg,
case SND_SOC_TPLG_TYPE_MIXER:
case SND_SOC_TPLG_TYPE_ENUM:
case SND_SOC_TPLG_TYPE_BYTES:
- return soc_tplg_kcontrol_elems_load(tplg, hdr);
+ hdr_pass = SOC_TPLG_PASS_MIXER;
+ elem_load = soc_tplg_kcontrol_elems_load;
+ break;
case SND_SOC_TPLG_TYPE_DAPM_GRAPH:
- return soc_tplg_dapm_graph_elems_load(tplg, hdr);
+ hdr_pass = SOC_TPLG_PASS_GRAPH;
+ elem_load = soc_tplg_dapm_graph_elems_load;
+ break;
case SND_SOC_TPLG_TYPE_DAPM_WIDGET:
- return soc_tplg_dapm_widget_elems_load(tplg, hdr);
+ hdr_pass = SOC_TPLG_PASS_WIDGET;
+ elem_load = soc_tplg_dapm_widget_elems_load;
+ break;
case SND_SOC_TPLG_TYPE_PCM:
- return soc_tplg_pcm_elems_load(tplg, hdr);
+ hdr_pass = SOC_TPLG_PASS_PCM_DAI;
+ elem_load = soc_tplg_pcm_elems_load;
+ break;
case SND_SOC_TPLG_TYPE_DAI:
- return soc_tplg_dai_elems_load(tplg, hdr);
+ hdr_pass = SOC_TPLG_PASS_BE_DAI;
+ elem_load = soc_tplg_dai_elems_load;
+ break;
case SND_SOC_TPLG_TYPE_DAI_LINK:
case SND_SOC_TPLG_TYPE_BACKEND_LINK:
/* physical link configurations */
- return soc_tplg_link_elems_load(tplg, hdr);
+ hdr_pass = SOC_TPLG_PASS_LINK;
+ elem_load = soc_tplg_link_elems_load;
+ break;
case SND_SOC_TPLG_TYPE_MANIFEST:
- return soc_tplg_manifest_load(tplg, hdr);
+ hdr_pass = SOC_TPLG_PASS_MANIFEST;
+ elem_load = soc_tplg_manifest_load;
+ break;
default:
/* bespoke vendor data object */
- return soc_tplg_vendor_load(tplg, hdr);
+ hdr_pass = SOC_TPLG_PASS_VENDOR;
+ elem_load = soc_tplg_vendor_load;
+ break;
+ }
+
+ if (tplg->pass == hdr_pass) {
+ dev_dbg(tplg->dev,
+ "ASoC: Got 0x%x bytes of type %d version %d vendor %d at pass %d\n",
+ hdr->payload_size, hdr->type, hdr->version,
+ hdr->vendor_type, tplg->pass);
+ return elem_load(tplg, hdr);
}
return 0;
diff --git a/sound/soc/sof/Makefile b/sound/soc/sof/Makefile
index 8eca2f85c90e..05718dfe6cd2 100644
--- a/sound/soc/sof/Makefile
+++ b/sound/soc/sof/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
snd-sof-objs := core.o ops.o loader.o ipc.o pcm.o pm.o debug.o topology.o\
control.o trace.o utils.o sof-audio.o
diff --git a/sound/soc/sof/compress.c b/sound/soc/sof/compress.c
index 7354dc6a49cf..2d4969c705a4 100644
--- a/sound/soc/sof/compress.c
+++ b/sound/soc/sof/compress.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -13,7 +13,7 @@
#include "ops.h"
#include "probe.h"
-struct snd_compr_ops sof_probe_compressed_ops = {
+struct snd_compress_ops sof_probe_compressed_ops = {
.copy = sof_probe_compr_copy,
};
EXPORT_SYMBOL(sof_probe_compressed_ops);
@@ -117,8 +117,9 @@ int sof_probe_compr_pointer(struct snd_compr_stream *cstream,
}
EXPORT_SYMBOL(sof_probe_compr_pointer);
-int sof_probe_compr_copy(struct snd_compr_stream *cstream,
- char __user *buf, size_t count)
+int sof_probe_compr_copy(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ char __user *buf, size_t count)
{
struct snd_compr_runtime *rtd = cstream->runtime;
unsigned int offset, n;
diff --git a/sound/soc/sof/compress.h b/sound/soc/sof/compress.h
index 800f163603e1..ca8790bd4b13 100644
--- a/sound/soc/sof/compress.h
+++ b/sound/soc/sof/compress.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -13,7 +13,7 @@
#include <sound/compress_driver.h>
-extern struct snd_compr_ops sof_probe_compressed_ops;
+extern struct snd_compress_ops sof_probe_compressed_ops;
int sof_probe_compr_open(struct snd_compr_stream *cstream,
struct snd_soc_dai *dai);
@@ -25,7 +25,8 @@ int sof_probe_compr_trigger(struct snd_compr_stream *cstream, int cmd,
struct snd_soc_dai *dai);
int sof_probe_compr_pointer(struct snd_compr_stream *cstream,
struct snd_compr_tstamp *tstamp, struct snd_soc_dai *dai);
-int sof_probe_compr_copy(struct snd_compr_stream *cstream,
- char __user *buf, size_t count);
+int sof_probe_compr_copy(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
+ char __user *buf, size_t count);
#endif
diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
index dfc412e2d956..186eea105bb1 100644
--- a/sound/soc/sof/control.c
+++ b/sound/soc/sof/control.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -19,8 +19,8 @@ static void update_mute_led(struct snd_sof_control *scontrol,
struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- unsigned int temp = 0;
- unsigned int mask;
+ int temp = 0;
+ int mask;
int i;
mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 91acfae7935c..339c4930b0c0 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -176,6 +176,7 @@ static int sof_probe_continue(struct snd_sof_dev *sdev)
/* init the IPC */
sdev->ipc = snd_sof_ipc_init(sdev);
if (!sdev->ipc) {
+ ret = -ENOMEM;
dev_err(sdev->dev, "error: failed to init DSP IPC %d\n", ret);
goto ipc_err;
}
@@ -342,6 +343,12 @@ int snd_sof_device_remove(struct device *dev)
{
struct snd_sof_dev *sdev = dev_get_drvdata(dev);
struct snd_sof_pdata *pdata = sdev->pdata;
+ int ret;
+
+ ret = snd_sof_dsp_power_down_notify(sdev);
+ if (ret < 0)
+ dev_warn(dev, "error: %d failed to prepare DSP for device removal",
+ ret);
if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
cancel_work_sync(&sdev->probe_work);
diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
index b5c0d6cf72cc..8e15f105d1d5 100644
--- a/sound/soc/sof/debug.c
+++ b/sound/soc/sof/debug.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/imx/Kconfig b/sound/soc/sof/imx/Kconfig
index bae4f7bf5f75..8230285baa43 100644
--- a/sound/soc/sof/imx/Kconfig
+++ b/sound/soc/sof/imx/Kconfig
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
config SND_SOC_SOF_IMX_TOPLEVEL
bool "SOF support for NXP i.MX audio DSPs"
@@ -11,17 +11,41 @@ config SND_SOC_SOF_IMX_TOPLEVEL
if SND_SOC_SOF_IMX_TOPLEVEL
+config SND_SOC_SOF_IMX_OF
+ def_tristate SND_SOC_SOF_OF
+ select SND_SOC_SOF_IMX8 if SND_SOC_SOF_IMX8_SUPPORT
+ select SND_SOC_SOF_IMX8M if SND_SOC_SOF_IMX8M_SUPPORT
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
config SND_SOC_SOF_IMX8_SUPPORT
bool "SOF support for i.MX8"
- depends on IMX_SCU
- depends on IMX_DSP
+ depends on IMX_SCU=y || IMX_SCU=SND_SOC_SOF_IMX_OF
+ depends on IMX_DSP=y || IMX_DSP=SND_SOC_SOF_IMX_OF
help
This adds support for Sound Open Firmware for NXP i.MX8 platforms
Say Y if you have such a device.
If unsure select "N".
config SND_SOC_SOF_IMX8
- def_tristate SND_SOC_SOF_OF
- depends on SND_SOC_SOF_IMX8_SUPPORT
+ tristate
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
+
+config SND_SOC_SOF_IMX8M_SUPPORT
+ bool "SOF support for i.MX8M"
+ depends on IMX_DSP=y || IMX_DSP=SND_SOC_SOF_OF
+ help
+ This adds support for Sound Open Firmware for NXP i.MX8M platforms
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_SOF_IMX8M
+ tristate
+ help
+ This option is not user-selectable but automagically handled by
+ 'select' statements at a higher level
endif ## SND_SOC_SOF_IMX_IMX_TOPLEVEL
diff --git a/sound/soc/sof/imx/Makefile b/sound/soc/sof/imx/Makefile
index 6ef908e8c807..2b933b02bbac 100644
--- a/sound/soc/sof/imx/Makefile
+++ b/sound/soc/sof/imx/Makefile
@@ -1,4 +1,6 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
snd-sof-imx8-objs := imx8.o
+snd-sof-imx8m-objs := imx8m.o
obj-$(CONFIG_SND_SOC_SOF_IMX8) += snd-sof-imx8.o
+obj-$(CONFIG_SND_SOC_SOF_IMX8M) += snd-sof-imx8m.o
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
index b692752b2178..63f9c20a1bac 100644
--- a/sound/soc/sof/imx/imx8.c
+++ b/sound/soc/sof/imx/imx8.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// Copyright 2019 NXP
//
@@ -119,7 +119,7 @@ static void imx8_dsp_handle_request(struct imx_dsp_ipc *ipc)
snd_sof_ipc_msgs_rx(priv->sdev);
}
-struct imx_dsp_ops dsp_ops = {
+static struct imx_dsp_ops dsp_ops = {
.handle_reply = imx8_dsp_handle_reply,
.handle_request = imx8_dsp_handle_request,
};
diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
new file mode 100644
index 000000000000..fa86a9e2990f
--- /dev/null
+++ b/sound/soc/sof/imx/imx8m.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+//
+// Copyright 2020 NXP
+//
+// Author: Daniel Baluta <daniel.baluta@nxp.com>
+//
+// Hardware interface for audio DSP on i.MX8M
+
+#include <linux/firmware.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <linux/module.h>
+#include <sound/sof.h>
+#include <sound/sof/xtensa.h>
+#include <linux/firmware/imx/dsp.h>
+
+#include "../ops.h"
+
+#define MBOX_OFFSET 0x800000
+#define MBOX_SIZE 0x1000
+
+struct imx8m_priv {
+ struct device *dev;
+ struct snd_sof_dev *sdev;
+
+ /* DSP IPC handler */
+ struct imx_dsp_ipc *dsp_ipc;
+ struct platform_device *ipc_dev;
+};
+
+static void imx8m_get_reply(struct snd_sof_dev *sdev)
+{
+ struct snd_sof_ipc_msg *msg = sdev->msg;
+ struct sof_ipc_reply reply;
+ int ret = 0;
+
+ if (!msg) {
+ dev_warn(sdev->dev, "unexpected ipc interrupt\n");
+ return;
+ }
+
+ /* get reply */
+ sof_mailbox_read(sdev, sdev->host_box.offset, &reply, sizeof(reply));
+
+ if (reply.error < 0) {
+ memcpy(msg->reply_data, &reply, sizeof(reply));
+ ret = reply.error;
+ } else {
+ /* reply has correct size? */
+ if (reply.hdr.size != msg->reply_size) {
+ dev_err(sdev->dev, "error: reply expected %zu got %u bytes\n",
+ msg->reply_size, reply.hdr.size);
+ ret = -EINVAL;
+ }
+
+ /* read the message */
+ if (msg->reply_size > 0)
+ sof_mailbox_read(sdev, sdev->host_box.offset,
+ msg->reply_data, msg->reply_size);
+ }
+
+ msg->reply_error = ret;
+}
+
+static int imx8m_get_mailbox_offset(struct snd_sof_dev *sdev)
+{
+ return MBOX_OFFSET;
+}
+
+static int imx8m_get_window_offset(struct snd_sof_dev *sdev, u32 id)
+{
+ return MBOX_OFFSET;
+}
+
+static void imx8m_dsp_handle_reply(struct imx_dsp_ipc *ipc)
+{
+ struct imx8m_priv *priv = imx_dsp_get_data(ipc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sdev->ipc_lock, flags);
+ imx8m_get_reply(priv->sdev);
+ snd_sof_ipc_reply(priv->sdev, 0);
+ spin_unlock_irqrestore(&priv->sdev->ipc_lock, flags);
+}
+
+static void imx8m_dsp_handle_request(struct imx_dsp_ipc *ipc)
+{
+ struct imx8m_priv *priv = imx_dsp_get_data(ipc);
+
+ snd_sof_ipc_msgs_rx(priv->sdev);
+}
+
+static struct imx_dsp_ops imx8m_dsp_ops = {
+ .handle_reply = imx8m_dsp_handle_reply,
+ .handle_request = imx8m_dsp_handle_request,
+};
+
+static int imx8m_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
+{
+ struct imx8m_priv *priv = (struct imx8m_priv *)sdev->private;
+
+ sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
+ msg->msg_size);
+ imx_dsp_ring_doorbell(priv->dsp_ipc, 0);
+
+ return 0;
+}
+
+/*
+ * DSP control.
+ */
+static int imx8m_run(struct snd_sof_dev *sdev)
+{
+ /* TODO: start DSP using Audio MIX bits */
+ return 0;
+}
+
+static int imx8m_probe(struct snd_sof_dev *sdev)
+{
+ struct platform_device *pdev =
+ container_of(sdev->dev, struct platform_device, dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *res_node;
+ struct resource *mmio;
+ struct imx8m_priv *priv;
+ struct resource res;
+ u32 base, size;
+ int ret = 0;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ sdev->private = priv;
+ priv->dev = sdev->dev;
+ priv->sdev = sdev;
+
+ priv->ipc_dev = platform_device_register_data(sdev->dev, "imx-dsp",
+ PLATFORM_DEVID_NONE,
+ pdev, sizeof(*pdev));
+ if (IS_ERR(priv->ipc_dev))
+ return PTR_ERR(priv->ipc_dev);
+
+ priv->dsp_ipc = dev_get_drvdata(&priv->ipc_dev->dev);
+ if (!priv->dsp_ipc) {
+ /* DSP IPC driver not probed yet, try later */
+ ret = -EPROBE_DEFER;
+ dev_err(sdev->dev, "Failed to get drvdata\n");
+ goto exit_pdev_unregister;
+ }
+
+ imx_dsp_set_data(priv->dsp_ipc, priv);
+ priv->dsp_ipc->ops = &imx8m_dsp_ops;
+
+ /* DSP base */
+ mmio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mmio) {
+ base = mmio->start;
+ size = resource_size(mmio);
+ } else {
+ dev_err(sdev->dev, "error: failed to get DSP base at idx 0\n");
+ ret = -EINVAL;
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_IRAM] = devm_ioremap(sdev->dev, base, size);
+ if (!sdev->bar[SOF_FW_BLK_TYPE_IRAM]) {
+ dev_err(sdev->dev, "failed to ioremap base 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+ sdev->mmio_bar = SOF_FW_BLK_TYPE_IRAM;
+
+ res_node = of_parse_phandle(np, "memory-region", 0);
+ if (!res_node) {
+ dev_err(&pdev->dev, "failed to get memory region node\n");
+ ret = -ENODEV;
+ goto exit_pdev_unregister;
+ }
+
+ ret = of_address_to_resource(res_node, 0, &res);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get reserved region address\n");
+ goto exit_pdev_unregister;
+ }
+
+ sdev->bar[SOF_FW_BLK_TYPE_SRAM] = devm_ioremap_wc(sdev->dev, res.start,
+ res.end - res.start +
+ 1);
+ if (!sdev->bar[SOF_FW_BLK_TYPE_SRAM]) {
+ dev_err(sdev->dev, "failed to ioremap mem 0x%x size 0x%x\n",
+ base, size);
+ ret = -ENOMEM;
+ goto exit_pdev_unregister;
+ }
+ sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
+
+ /* set default mailbox offset for FW ready message */
+ sdev->dsp_box.offset = MBOX_OFFSET;
+
+ return 0;
+
+exit_pdev_unregister:
+ platform_device_unregister(priv->ipc_dev);
+ return ret;
+}
+
+static int imx8m_remove(struct snd_sof_dev *sdev)
+{
+ struct imx8m_priv *priv = (struct imx8m_priv *)sdev->private;
+
+ platform_device_unregister(priv->ipc_dev);
+
+ return 0;
+}
+
+/* on i.MX8 there is 1 to 1 match between type and BAR idx */
+static int imx8m_get_bar_index(struct snd_sof_dev *sdev, u32 type)
+{
+ return type;
+}
+
+static void imx8m_ipc_msg_data(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ void *p, size_t sz)
+{
+ sof_mailbox_read(sdev, sdev->dsp_box.offset, p, sz);
+}
+
+static int imx8m_ipc_pcm_params(struct snd_sof_dev *sdev,
+ struct snd_pcm_substream *substream,
+ const struct sof_ipc_pcm_params_reply *reply)
+{
+ return 0;
+}
+
+static struct snd_soc_dai_driver imx8m_dai[] = {
+{
+ .name = "sai-port",
+},
+};
+
+/* i.MX8 ops */
+struct snd_sof_dsp_ops sof_imx8m_ops = {
+ /* probe and remove */
+ .probe = imx8m_probe,
+ .remove = imx8m_remove,
+ /* DSP core boot */
+ .run = imx8m_run,
+
+ /* Block IO */
+ .block_read = sof_block_read,
+ .block_write = sof_block_write,
+
+ /* ipc */
+ .send_msg = imx8m_send_msg,
+ .fw_ready = sof_fw_ready,
+ .get_mailbox_offset = imx8m_get_mailbox_offset,
+ .get_window_offset = imx8m_get_window_offset,
+
+ .ipc_msg_data = imx8m_ipc_msg_data,
+ .ipc_pcm_params = imx8m_ipc_pcm_params,
+
+ /* module loading */
+ .load_module = snd_sof_parse_module_memcpy,
+ .get_bar_index = imx8m_get_bar_index,
+ /* firmware loading */
+ .load_firmware = snd_sof_load_firmware_memcpy,
+
+ /* DAI drivers */
+ .drv = imx8m_dai,
+ .num_drv = 1, /* we have only 1 SAI interface on i.MX8M */
+
+ .hw_info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+};
+EXPORT_SYMBOL(sof_imx8m_ops);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/sof/intel/Makefile b/sound/soc/sof/intel/Makefile
index cee02a2e00f4..f7e9358f1f06 100644
--- a/sound/soc/sof/intel/Makefile
+++ b/sound/soc/sof/intel/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
snd-sof-intel-byt-objs := byt.o
snd-sof-intel-bdw-objs := bdw.o
diff --git a/sound/soc/sof/intel/apl.c b/sound/soc/sof/intel/apl.c
index 02218d22e51f..9e29d4fd393a 100644
--- a/sound/soc/sof/intel/apl.c
+++ b/sound/soc/sof/intel/apl.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/bdw.c b/sound/soc/sof/intel/bdw.c
index a32a3ef78ec5..99fd0bd7276e 100644
--- a/sound/soc/sof/intel/bdw.c
+++ b/sound/soc/sof/intel/bdw.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/byt.c b/sound/soc/sof/intel/byt.c
index 29fd1d86156c..49f67f1b94e0 100644
--- a/sound/soc/sof/intel/byt.c
+++ b/sound/soc/sof/intel/byt.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -82,33 +82,6 @@ static const struct snd_sof_debugfs_map byt_debugfs[] = {
SOF_DEBUGFS_ACCESS_ALWAYS},
};
-static const struct snd_sof_debugfs_map cht_debugfs[] = {
- {"dmac0", BYT_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"dmac1", BYT_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"dmac2", BYT_DSP_BAR, DMAC2_OFFSET, DMAC_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp0", BYT_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp1", BYT_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp2", BYT_DSP_BAR, SSP2_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp3", BYT_DSP_BAR, SSP3_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp4", BYT_DSP_BAR, SSP4_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"ssp5", BYT_DSP_BAR, SSP5_OFFSET, SSP_SIZE,
- SOF_DEBUGFS_ACCESS_ALWAYS},
- {"iram", BYT_DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
- SOF_DEBUGFS_ACCESS_D0_ONLY},
- {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
- SOF_DEBUGFS_ACCESS_D0_ONLY},
- {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_CHT,
- SOF_DEBUGFS_ACCESS_ALWAYS},
-};
-
static void byt_host_done(struct snd_sof_dev *sdev);
static void byt_dsp_done(struct snd_sof_dev *sdev);
static void byt_get_reply(struct snd_sof_dev *sdev);
@@ -187,13 +160,31 @@ static void byt_dump(struct snd_sof_dev *sdev, u32 flags)
static irqreturn_t byt_irq_handler(int irq, void *context)
{
struct snd_sof_dev *sdev = context;
- u64 isr;
+ u64 ipcx, ipcd;
int ret = IRQ_NONE;
- /* Interrupt arrived, check src */
- isr = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_ISRX);
- if (isr & (SHIM_ISRX_DONE | SHIM_ISRX_BUSY))
+ ipcx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCX);
+ ipcd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD);
+
+ if (ipcx & SHIM_BYT_IPCX_DONE) {
+
+ /* reply message from DSP, Mask Done interrupt first */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (ipcd & SHIM_BYT_IPCD_BUSY) {
+
+ /* new message from DSP, Mask Busy interrupt first */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR,
+ SHIM_IMRX,
+ SHIM_IMRX_BUSY,
+ SHIM_IMRX_BUSY);
ret = IRQ_WAKE_THREAD;
+ }
return ret;
}
@@ -202,19 +193,12 @@ static irqreturn_t byt_irq_thread(int irq, void *context)
{
struct snd_sof_dev *sdev = context;
u64 ipcx, ipcd;
- u64 imrx;
- imrx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IMRX);
ipcx = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCX);
+ ipcd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD);
/* reply message from DSP */
- if (ipcx & SHIM_BYT_IPCX_DONE &&
- !(imrx & SHIM_IMRX_DONE)) {
- /* Mask Done interrupt before first */
- snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR,
- SHIM_IMRX,
- SHIM_IMRX_DONE,
- SHIM_IMRX_DONE);
+ if (ipcx & SHIM_BYT_IPCX_DONE) {
spin_lock_irq(&sdev->ipc_lock);
@@ -234,14 +218,7 @@ static irqreturn_t byt_irq_thread(int irq, void *context)
}
/* new message from DSP */
- ipcd = snd_sof_dsp_read64(sdev, BYT_DSP_BAR, SHIM_IPCD);
- if (ipcd & SHIM_BYT_IPCD_BUSY &&
- !(imrx & SHIM_IMRX_BUSY)) {
- /* Mask Busy interrupt before return */
- snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR,
- SHIM_IMRX,
- SHIM_IMRX_BUSY,
- SHIM_IMRX_BUSY);
+ if (ipcd & SHIM_BYT_IPCD_BUSY) {
/* Handle messages from DSP Core */
if ((ipcd & SOF_IPC_PANIC_MAGIC_MASK) == SOF_IPC_PANIC_MAGIC) {
@@ -259,6 +236,10 @@ static irqreturn_t byt_irq_thread(int irq, void *context)
static int byt_send_msg(struct snd_sof_dev *sdev, struct snd_sof_ipc_msg *msg)
{
+ /* unmask and prepare to receive Done interrupt */
+ snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_DONE, 0);
+
/* send the message */
sof_mailbox_write(sdev, sdev->host_box.offset, msg->msg_data,
msg->msg_size);
@@ -324,7 +305,7 @@ static void byt_host_done(struct snd_sof_dev *sdev)
SHIM_BYT_IPCD_DONE,
SHIM_BYT_IPCD_DONE);
- /* unmask busy interrupt */
+ /* unmask and prepare to receive next new message */
snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IMRX,
SHIM_IMRX_BUSY, 0);
}
@@ -334,10 +315,6 @@ static void byt_dsp_done(struct snd_sof_dev *sdev)
/* clear DONE bit - tell DSP we have completed */
snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IPCX,
SHIM_BYT_IPCX_DONE, 0);
-
- /* unmask Done interrupt */
- snd_sof_dsp_update_bits64_unlocked(sdev, BYT_DSP_BAR, SHIM_IMRX,
- SHIM_IMRX_DONE, 0);
}
/*
@@ -594,9 +571,10 @@ irq:
return ret;
}
- /* enable Interrupt from both sides */
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX, 0x3, 0x0);
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRD, 0x3, 0x0);
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
/* set default mailbox offset for FW ready message */
sdev->dsp_box.offset = MBOX_OFFSET;
@@ -681,6 +659,69 @@ EXPORT_SYMBOL_NS(tng_chip_info, SND_SOC_SOF_MERRIFIELD);
#if IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL)
+static void byt_reset_dsp_disable_int(struct snd_sof_dev *sdev)
+{
+ /* Disable Interrupt from both sides */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX, 0x3, 0x3);
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRD, 0x3, 0x3);
+
+ /* Put DSP into reset, set reset vector */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_CSR,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL,
+ SHIM_BYT_CSR_RST | SHIM_BYT_CSR_VECTOR_SEL);
+}
+
+static int byt_suspend(struct snd_sof_dev *sdev, u32 target_state)
+{
+ byt_reset_dsp_disable_int(sdev);
+
+ return 0;
+}
+
+static int byt_resume(struct snd_sof_dev *sdev)
+{
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
+
+ return 0;
+}
+
+static int byt_remove(struct snd_sof_dev *sdev)
+{
+ byt_reset_dsp_disable_int(sdev);
+
+ return 0;
+}
+
+static const struct snd_sof_debugfs_map cht_debugfs[] = {
+ {"dmac0", BYT_DSP_BAR, DMAC0_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac1", BYT_DSP_BAR, DMAC1_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"dmac2", BYT_DSP_BAR, DMAC2_OFFSET, DMAC_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp0", BYT_DSP_BAR, SSP0_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp1", BYT_DSP_BAR, SSP1_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp2", BYT_DSP_BAR, SSP2_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp3", BYT_DSP_BAR, SSP3_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp4", BYT_DSP_BAR, SSP4_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"ssp5", BYT_DSP_BAR, SSP5_OFFSET, SSP_SIZE,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+ {"iram", BYT_DSP_BAR, IRAM_OFFSET, IRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"dram", BYT_DSP_BAR, DRAM_OFFSET, DRAM_SIZE,
+ SOF_DEBUGFS_ACCESS_D0_ONLY},
+ {"shim", BYT_DSP_BAR, SHIM_OFFSET, SHIM_SIZE_CHT,
+ SOF_DEBUGFS_ACCESS_ALWAYS},
+};
+
static int byt_acpi_probe(struct snd_sof_dev *sdev)
{
struct snd_sof_pdata *pdata = sdev->pdata;
@@ -769,9 +810,10 @@ irq:
return ret;
}
- /* enable Interrupt from both sides */
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX, 0x3, 0x0);
- snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRD, 0x3, 0x0);
+ /* enable BUSY and disable DONE Interrupt by default */
+ snd_sof_dsp_update_bits64(sdev, BYT_DSP_BAR, SHIM_IMRX,
+ SHIM_IMRX_BUSY | SHIM_IMRX_DONE,
+ SHIM_IMRX_DONE);
/* set default mailbox offset for FW ready message */
sdev->dsp_box.offset = MBOX_OFFSET;
@@ -783,6 +825,7 @@ irq:
const struct snd_sof_dsp_ops sof_byt_ops = {
/* device init */
.probe = byt_acpi_probe,
+ .remove = byt_remove,
/* DSP core boot / reset */
.run = byt_run,
@@ -832,6 +875,10 @@ const struct snd_sof_dsp_ops sof_byt_ops = {
/*Firmware loading */
.load_firmware = snd_sof_load_firmware_memcpy,
+ /* PM */
+ .suspend = byt_suspend,
+ .resume = byt_resume,
+
/* DAI drivers */
.drv = byt_dai,
.num_drv = 3, /* we have only 3 SSPs on byt*/
@@ -857,6 +904,7 @@ EXPORT_SYMBOL_NS(byt_chip_info, SND_SOC_SOF_BAYTRAIL);
const struct snd_sof_dsp_ops sof_cht_ops = {
/* device init */
.probe = byt_acpi_probe,
+ .remove = byt_remove,
/* DSP core boot / reset */
.run = byt_run,
@@ -906,6 +954,10 @@ const struct snd_sof_dsp_ops sof_cht_ops = {
/*Firmware loading */
.load_firmware = snd_sof_load_firmware_memcpy,
+ /* PM */
+ .suspend = byt_suspend,
+ .resume = byt_resume,
+
/* DAI drivers */
.drv = byt_dai,
/* all 6 SSPs may be available for cherrytrail */
diff --git a/sound/soc/sof/intel/cnl.c b/sound/soc/sof/intel/cnl.c
index e427d00eca71..16db0f50d139 100644
--- a/sound/soc/sof/intel/cnl.c
+++ b/sound/soc/sof/intel/cnl.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c
index 1d2babdda9dd..789148e5584b 100644
--- a/sound/soc/sof/intel/hda-bus.c
+++ b/sound/soc/sof/intel/hda-bus.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index 3041fbbb010a..2c5c451fa19d 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -24,19 +24,44 @@
#define IDISP_VID_INTEL 0x80860000
/* load the legacy HDA codec driver */
-static int hda_codec_load_module(struct hda_codec *codec)
+static int request_codec_module(struct hda_codec *codec)
{
#ifdef MODULE
char alias[MODULE_NAME_LEN];
- const char *module = alias;
+ const char *mod = NULL;
- snd_hdac_codec_modalias(&codec->core, alias, sizeof(alias));
- dev_dbg(&codec->core.dev, "loading codec module: %s\n", module);
- request_module(module);
+ switch (codec->probe_id) {
+ case HDA_CODEC_ID_GENERIC:
+#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
+ mod = "snd-hda-codec-generic";
#endif
+ break;
+ default:
+ snd_hdac_codec_modalias(&codec->core, alias, sizeof(alias));
+ mod = alias;
+ break;
+ }
+
+ if (mod) {
+ dev_dbg(&codec->core.dev, "loading codec module: %s\n", mod);
+ request_module(mod);
+ }
+#endif /* MODULE */
return device_attach(hda_codec_dev(codec));
}
+static int hda_codec_load_module(struct hda_codec *codec)
+{
+ int ret = request_codec_module(codec);
+
+ if (ret <= 0) {
+ codec->probe_id = HDA_CODEC_ID_GENERIC;
+ ret = request_codec_module(codec);
+ }
+
+ return ret;
+}
+
/* enable controller wake up event for all codecs with jack connectors */
void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
{
@@ -78,6 +103,13 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev) {}
EXPORT_SYMBOL_NS(hda_codec_jack_wake_enable, SND_SOC_SOF_HDA_AUDIO_CODEC);
EXPORT_SYMBOL_NS(hda_codec_jack_check, SND_SOC_SOF_HDA_AUDIO_CODEC);
+#if IS_ENABLED(CONFIG_SND_HDA_GENERIC)
+#define is_generic_config(bus) \
+ ((bus)->modelname && !strcmp((bus)->modelname, "generic"))
+#else
+#define is_generic_config(x) 0
+#endif
+
/* probe individual codec */
static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
bool hda_codec_use_common_hdmi)
@@ -87,6 +119,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
#endif
struct hda_bus *hbus = sof_to_hbus(sdev);
struct hdac_device *hdev;
+ struct hda_codec *codec;
u32 hda_cmd = (address << 28) | (AC_NODE_ROOT << 20) |
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
u32 resp = -1;
@@ -108,6 +141,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
hda_priv->codec.bus = hbus;
hdev = &hda_priv->codec.core;
+ codec = &hda_priv->codec;
ret = snd_hdac_ext_bus_device_init(&hbus->core, address, hdev);
if (ret < 0)
@@ -122,6 +156,11 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
hda_priv->need_display_power = true;
}
+ if (is_generic_config(hbus))
+ codec->probe_id = HDA_CODEC_ID_GENERIC;
+ else
+ codec->probe_id = 0;
+
/*
* if common HDMI codec driver is not used, codec load
* is skipped here and hdac_hdmi is used instead
@@ -129,7 +168,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
if (hda_codec_use_common_hdmi ||
(resp & 0xFFFF0000) != IDISP_VID_INTEL) {
hdev->type = HDA_DEV_LEGACY;
- ret = hda_codec_load_module(&hda_priv->codec);
+ ret = hda_codec_load_module(codec);
/*
* handle ret==0 (no driver bound) as an error, but pass
* other return codes without modification
@@ -207,7 +246,6 @@ EXPORT_SYMBOL_NS(hda_codec_i915_init, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
int hda_codec_i915_exit(struct snd_sof_dev *sdev)
{
struct hdac_bus *bus = sof_to_bus(sdev);
- int ret;
if (!bus->audio_component)
return 0;
@@ -215,9 +253,7 @@ int hda_codec_i915_exit(struct snd_sof_dev *sdev)
/* power down unconditionally */
snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
- ret = snd_hdac_i915_exit(bus);
-
- return ret;
+ return snd_hdac_i915_exit(bus);
}
EXPORT_SYMBOL_NS(hda_codec_i915_exit, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
diff --git a/sound/soc/sof/intel/hda-compress.c b/sound/soc/sof/intel/hda-compress.c
index 38a1ebec8478..53c08034fa22 100644
--- a/sound/soc/sof/intel/hda-compress.c
+++ b/sound/soc/sof/intel/hda-compress.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/hda-ctrl.c b/sound/soc/sof/intel/hda-ctrl.c
index 6288b2f99540..fa5f0a718901 100644
--- a/sound/soc/sof/intel/hda-ctrl.c
+++ b/sound/soc/sof/intel/hda-ctrl.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index 833dc303b394..3934cd6bf87a 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index 99087b6afb67..9e5ff8c18f99 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -226,10 +226,10 @@ bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
- is_enable = ((val & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) &&
- (val & HDA_DSP_ADSPCS_SPA_MASK(core_mask)) &&
- !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
- !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)));
+ is_enable = (val & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) &&
+ (val & HDA_DSP_ADSPCS_SPA_MASK(core_mask)) &&
+ !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
+ !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
is_enable, core_mask);
diff --git a/sound/soc/sof/intel/hda-ipc.c b/sound/soc/sof/intel/hda-ipc.c
index 6062bb6011fb..c91aa951df22 100644
--- a/sound/soc/sof/intel/hda-ipc.c
+++ b/sound/soc/sof/intel/hda-ipc.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/hda-ipc.h b/sound/soc/sof/intel/hda-ipc.h
index aef0ceac9803..ade4c3191a39 100644
--- a/sound/soc/sof/intel/hda-ipc.h
+++ b/sound/soc/sof/intel/hda-ipc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c
index e1550ccd0a49..441d05cda604 100644
--- a/sound/soc/sof/intel/hda-loader.c
+++ b/sound/soc/sof/intel/hda-loader.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -293,8 +293,13 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev)
chip_info = desc->chip_info;
- stripped_firmware.data = plat_data->fw->data;
- stripped_firmware.size = plat_data->fw->size;
+ if (plat_data->fw->size <= plat_data->fw_offset) {
+ dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
+ return -EINVAL;
+ }
+
+ stripped_firmware.data = plat_data->fw->data + plat_data->fw_offset;
+ stripped_firmware.size = plat_data->fw->size - plat_data->fw_offset;
/* init for booting wait */
init_waitqueue_head(&sdev->boot_wait);
diff --git a/sound/soc/sof/intel/hda-pcm.c b/sound/soc/sof/intel/hda-pcm.c
index a46a6baa1c3f..53a875ac52d6 100644
--- a/sound/soc/sof/intel/hda-pcm.c
+++ b/sound/soc/sof/intel/hda-pcm.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/hda-stream.c b/sound/soc/sof/intel/hda-stream.c
index 5d386956906f..7f65dcc95811 100644
--- a/sound/soc/sof/intel/hda-stream.c
+++ b/sound/soc/sof/intel/hda-stream.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/hda-trace.c b/sound/soc/sof/intel/hda-trace.c
index 33b23bd6a01e..1eb746d5adeb 100644
--- a/sound/soc/sof/intel/hda-trace.c
+++ b/sound/soc/sof/intel/hda-trace.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 211e91e79eae..63ca920c8e6e 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -135,10 +135,8 @@ static int hda_sdw_acpi_scan(struct snd_sof_dev *sdev)
hdev = sdev->pdata->hw_pdata;
ret = sdw_intel_acpi_scan(handle, &hdev->info);
- if (ret < 0) {
- dev_err(sdev->dev, "%s failed\n", __func__);
+ if (ret < 0)
return -EINVAL;
- }
return 0;
}
@@ -282,6 +280,10 @@ module_param_named(use_msi, hda_use_msi, bool, 0444);
MODULE_PARM_DESC(use_msi, "SOF HDA use PCI MSI mode");
#endif
+static char *hda_model;
+module_param(hda_model, charp, 0444);
+MODULE_PARM_DESC(hda_model, "Use the given HDA board model.");
+
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
static int hda_dmic_num = -1;
module_param_named(dmic_num, hda_dmic_num, int, 0444);
@@ -503,7 +505,7 @@ static int hda_init(struct snd_sof_dev *sdev)
mutex_init(&hbus->prepare_mutex);
hbus->pci = pci;
hbus->mixer_assigned = -1;
- hbus->modelname = "sofbus";
+ hbus->modelname = hda_model;
/* initialise hdac bus */
bus->addr = pci_resource_start(pci, 0);
@@ -604,7 +606,7 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
/* scan SoundWire capabilities exposed by DSDT */
ret = hda_sdw_acpi_scan(sdev);
if (ret < 0) {
- dev_dbg(sdev->dev, "skipping SoundWire, ACPI scan error\n");
+ dev_dbg(sdev->dev, "skipping SoundWire, not detected with ACPI scan\n");
goto skip_soundwire;
}
@@ -1008,6 +1010,10 @@ static int hda_generic_machine_select(struct snd_sof_dev *sdev)
if (!tplg_filename)
return -EINVAL;
+ dev_info(bus->dev,
+ "DMICs detected in NHLT tables: %d\n",
+ dmic_num);
+
pdata->machine = hda_mach;
pdata->tplg_filename = tplg_filename;
}
@@ -1101,7 +1107,15 @@ static int hda_sdw_machine_select(struct snd_sof_dev *sdev)
if (link_mask && !pdata->machine) {
for (mach = pdata->desc->alt_machines;
mach && mach->link_mask; mach++) {
- if (mach->link_mask != link_mask)
+ /*
+ * On some platforms such as Up Extreme all links
+ * are enabled but only one link can be used by
+ * external codec. Instead of exact match of two masks,
+ * first check whether link_mask of mach is subset of
+ * link_mask supported by hw and then go on searching
+ * link_adr
+ */
+ if (~link_mask & mach->link_mask)
continue;
/* No need to match adr if there is no links defined */
diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
index e9825798de77..fe452f0d0ec7 100644
--- a/sound/soc/sof/intel/hda.h
+++ b/sound/soc/sof/intel/hda.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/intel-ipc.c b/sound/soc/sof/intel/intel-ipc.c
index e935f70d611b..310f9168c124 100644
--- a/sound/soc/sof/intel/intel-ipc.c
+++ b/sound/soc/sof/intel/intel-ipc.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/intel/shim.h b/sound/soc/sof/intel/shim.h
index daaf3364c177..6fe8b004b50e 100644
--- a/sound/soc/sof/intel/shim.h
+++ b/sound/soc/sof/intel/shim.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index 1c6794918cbb..36e2d4d43da4 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -335,21 +335,20 @@ int sof_ipc_tx_message_no_pm(struct snd_sof_ipc *ipc, u32 header,
EXPORT_SYMBOL(sof_ipc_tx_message_no_pm);
/* handle reply message from DSP */
-int snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id)
+void snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id)
{
struct snd_sof_ipc_msg *msg = &sdev->ipc->msg;
if (msg->ipc_complete) {
- dev_err(sdev->dev, "error: no reply expected, received 0x%x",
+ dev_dbg(sdev->dev,
+ "no reply expected, received 0x%x, will be ignored",
msg_id);
- return -EINVAL;
+ return;
}
/* wake up and return the error if we have waiters on this message ? */
msg->ipc_complete = true;
wake_up(&msg->waitq);
-
- return 0;
}
EXPORT_SYMBOL(snd_sof_ipc_reply);
diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
index 64af08293daa..b94fa5f5d480 100644
--- a/sound/soc/sof/loader.c
+++ b/sound/soc/sof/loader.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -12,20 +12,29 @@
#include <linux/firmware.h>
#include <sound/sof.h>
+#include <sound/sof/ext_manifest.h>
#include "ops.h"
static int get_ext_windows(struct snd_sof_dev *sdev,
- struct sof_ipc_ext_data_hdr *ext_hdr)
+ const struct sof_ipc_ext_data_hdr *ext_hdr)
{
- struct sof_ipc_window *w =
+ const struct sof_ipc_window *w =
container_of(ext_hdr, struct sof_ipc_window, ext_hdr);
+ size_t w_size = struct_size(w, window, w->num_windows);
if (w->num_windows == 0 || w->num_windows > SOF_IPC_MAX_ELEMS)
return -EINVAL;
+ if (sdev->info_window) {
+ if (memcmp(sdev->info_window, w, w_size)) {
+ dev_err(sdev->dev, "error: mismatch between window descriptor from extended manifest and mailbox");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
/* keep a local copy of the data */
- sdev->info_window = kmemdup(w, struct_size(w, window, w->num_windows),
- GFP_KERNEL);
+ sdev->info_window = kmemdup(w, w_size, GFP_KERNEL);
if (!sdev->info_window)
return -ENOMEM;
@@ -33,13 +42,21 @@ static int get_ext_windows(struct snd_sof_dev *sdev,
}
static int get_cc_info(struct snd_sof_dev *sdev,
- struct sof_ipc_ext_data_hdr *ext_hdr)
+ const struct sof_ipc_ext_data_hdr *ext_hdr)
{
int ret;
- struct sof_ipc_cc_version *cc =
+ const struct sof_ipc_cc_version *cc =
container_of(ext_hdr, struct sof_ipc_cc_version, ext_hdr);
+ if (sdev->cc_version) {
+ if (memcmp(sdev->cc_version, cc, cc->ext_hdr.hdr.size)) {
+ dev_err(sdev->dev, "error: receive diverged cc_version descriptions");
+ return -EINVAL;
+ }
+ return 0;
+ }
+
dev_dbg(sdev->dev, "Firmware info: used compiler %s %d:%d:%d%s used optimization flags %s\n",
cc->name, cc->major, cc->minor, cc->micro, cc->desc,
cc->optim);
@@ -126,6 +143,142 @@ int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset)
}
EXPORT_SYMBOL(snd_sof_fw_parse_ext_data);
+static int ext_man_get_fw_version(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_fw_version *v =
+ container_of(hdr, struct sof_ext_man_fw_version, hdr);
+
+ memcpy(&sdev->fw_ready.version, &v->version, sizeof(v->version));
+ sdev->fw_ready.flags = v->flags;
+
+ /* log ABI versions and check FW compatibility */
+ return snd_sof_ipc_valid(sdev);
+}
+
+static int ext_man_get_windows(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_window *w;
+
+ w = container_of(hdr, struct sof_ext_man_window, hdr);
+
+ return get_ext_windows(sdev, &w->ipc_window.ext_hdr);
+}
+
+static int ext_man_get_cc_info(struct snd_sof_dev *sdev,
+ const struct sof_ext_man_elem_header *hdr)
+{
+ const struct sof_ext_man_cc_version *cc;
+
+ cc = container_of(hdr, struct sof_ext_man_cc_version, hdr);
+
+ return get_cc_info(sdev, &cc->cc_version.ext_hdr);
+}
+
+static ssize_t snd_sof_ext_man_size(const struct firmware *fw)
+{
+ const struct sof_ext_man_header *head;
+
+ head = (struct sof_ext_man_header *)fw->data;
+
+ /*
+ * assert fw size is big enough to contain extended manifest header,
+ * it prevents from reading unallocated memory from `head` in following
+ * step.
+ */
+ if (fw->size < sizeof(*head))
+ return -EINVAL;
+
+ /*
+ * When fw points to extended manifest,
+ * then first u32 must be equal SOF_EXT_MAN_MAGIC_NUMBER.
+ */
+ if (head->magic == SOF_EXT_MAN_MAGIC_NUMBER)
+ return head->full_size;
+
+ /* otherwise given fw don't have an extended manifest */
+ return 0;
+}
+
+/* parse extended FW manifest data structures */
+static int snd_sof_fw_ext_man_parse(struct snd_sof_dev *sdev,
+ const struct firmware *fw)
+{
+ const struct sof_ext_man_elem_header *elem_hdr;
+ const struct sof_ext_man_header *head;
+ ssize_t ext_man_size;
+ ssize_t remaining;
+ uintptr_t iptr;
+ int ret = 0;
+
+ head = (struct sof_ext_man_header *)fw->data;
+ remaining = head->full_size - head->header_size;
+ ext_man_size = snd_sof_ext_man_size(fw);
+
+ /* Assert firmware starts with extended manifest */
+ if (ext_man_size <= 0)
+ return ext_man_size;
+
+ /* incompatible version */
+ if (SOF_EXT_MAN_VERSION_INCOMPATIBLE(SOF_EXT_MAN_VERSION,
+ head->header_version)) {
+ dev_err(sdev->dev, "error: extended manifest version 0x%X differ from used 0x%X\n",
+ head->header_version, SOF_EXT_MAN_VERSION);
+ return -EINVAL;
+ }
+
+ /* get first extended manifest element header */
+ iptr = (uintptr_t)fw->data + head->header_size;
+
+ while (remaining > sizeof(*elem_hdr)) {
+ elem_hdr = (struct sof_ext_man_elem_header *)iptr;
+
+ dev_dbg(sdev->dev, "found sof_ext_man header type %d size 0x%X\n",
+ elem_hdr->type, elem_hdr->size);
+
+ if (elem_hdr->size < sizeof(*elem_hdr) ||
+ elem_hdr->size > remaining) {
+ dev_err(sdev->dev, "error: invalid sof_ext_man header size, type %d size 0x%X\n",
+ elem_hdr->type, elem_hdr->size);
+ return -EINVAL;
+ }
+
+ /* process structure data */
+ switch (elem_hdr->type) {
+ case SOF_EXT_MAN_ELEM_FW_VERSION:
+ ret = ext_man_get_fw_version(sdev, elem_hdr);
+ break;
+ case SOF_EXT_MAN_ELEM_WINDOW:
+ ret = ext_man_get_windows(sdev, elem_hdr);
+ break;
+ case SOF_EXT_MAN_ELEM_CC_VERSION:
+ ret = ext_man_get_cc_info(sdev, elem_hdr);
+ break;
+ default:
+ dev_warn(sdev->dev, "warning: unknown sof_ext_man header type %d size 0x%X\n",
+ elem_hdr->type, elem_hdr->size);
+ break;
+ }
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to parse sof_ext_man header type %d size 0x%X\n",
+ elem_hdr->type, elem_hdr->size);
+ return ret;
+ }
+
+ remaining -= elem_hdr->size;
+ iptr += elem_hdr->size;
+ }
+
+ if (remaining) {
+ dev_err(sdev->dev, "error: sof_ext_man header is inconsistent\n");
+ return -EINVAL;
+ }
+
+ return ext_man_size;
+}
+
/*
* IPC Firmware ready.
*/
@@ -379,12 +532,19 @@ int snd_sof_parse_module_memcpy(struct snd_sof_dev *sdev,
}
EXPORT_SYMBOL(snd_sof_parse_module_memcpy);
-static int check_header(struct snd_sof_dev *sdev, const struct firmware *fw)
+static int check_header(struct snd_sof_dev *sdev, const struct firmware *fw,
+ size_t fw_offset)
{
struct snd_sof_fw_header *header;
+ size_t fw_size = fw->size - fw_offset;
+
+ if (fw->size <= fw_offset) {
+ dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n");
+ return -EINVAL;
+ }
/* Read the header information from the data pointer */
- header = (struct snd_sof_fw_header *)fw->data;
+ header = (struct snd_sof_fw_header *)(fw->data + fw_offset);
/* verify FW sig */
if (strncmp(header->sig, SND_SOF_FW_SIG, SND_SOF_FW_SIG_SIZE) != 0) {
@@ -393,9 +553,9 @@ static int check_header(struct snd_sof_dev *sdev, const struct firmware *fw)
}
/* check size is valid */
- if (fw->size != header->file_size + sizeof(*header)) {
+ if (fw_size != header->file_size + sizeof(*header)) {
dev_err(sdev->dev, "error: invalid filesize mismatch got 0x%zx expected 0x%zx\n",
- fw->size, header->file_size + sizeof(*header));
+ fw_size, header->file_size + sizeof(*header));
return -EINVAL;
}
@@ -406,7 +566,8 @@ static int check_header(struct snd_sof_dev *sdev, const struct firmware *fw)
return 0;
}
-static int load_modules(struct snd_sof_dev *sdev, const struct firmware *fw)
+static int load_modules(struct snd_sof_dev *sdev, const struct firmware *fw,
+ size_t fw_offset)
{
struct snd_sof_fw_header *header;
struct snd_sof_mod_hdr *module;
@@ -415,14 +576,15 @@ static int load_modules(struct snd_sof_dev *sdev, const struct firmware *fw)
int ret, count;
size_t remaining;
- header = (struct snd_sof_fw_header *)fw->data;
+ header = (struct snd_sof_fw_header *)(fw->data + fw_offset);
load_module = sof_ops(sdev)->load_module;
if (!load_module)
return -EINVAL;
/* parse each module */
- module = (struct snd_sof_mod_hdr *)((u8 *)(fw->data) + sizeof(*header));
- remaining = fw->size - sizeof(*header);
+ module = (struct snd_sof_mod_hdr *)(fw->data + fw_offset +
+ sizeof(*header));
+ remaining = fw->size - sizeof(*header) - fw_offset;
/* check for wrap */
if (remaining > fw->size) {
dev_err(sdev->dev, "error: fw size smaller than header size\n");
@@ -464,6 +626,7 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
{
struct snd_sof_pdata *plat_data = sdev->pdata;
const char *fw_filename;
+ ssize_t ext_man_size;
int ret;
/* Don't request firmware again if firmware is already requested */
@@ -481,11 +644,27 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
if (ret < 0) {
dev_err(sdev->dev, "error: request firmware %s failed err: %d\n",
fw_filename, ret);
+ goto err;
} else {
dev_dbg(sdev->dev, "request_firmware %s successful\n",
fw_filename);
}
+ /* check for extended manifest */
+ ext_man_size = snd_sof_fw_ext_man_parse(sdev, plat_data->fw);
+ if (ext_man_size > 0) {
+ /* when no error occurred, drop extended manifest */
+ plat_data->fw_offset = ext_man_size;
+ } else if (!ext_man_size) {
+ /* No extended manifest, so nothing to skip during FW load */
+ dev_dbg(sdev->dev, "firmware doesn't contain extended manifest\n");
+ } else {
+ ret = ext_man_size;
+ dev_err(sdev->dev, "error: firmware %s contains unsupported or invalid extended manifest: %d\n",
+ fw_filename, ret);
+ }
+
+err:
kfree(fw_filename);
return ret;
@@ -502,7 +681,7 @@ int snd_sof_load_firmware_memcpy(struct snd_sof_dev *sdev)
return ret;
/* make sure the FW header and file is valid */
- ret = check_header(sdev, plat_data->fw);
+ ret = check_header(sdev, plat_data->fw, plat_data->fw_offset);
if (ret < 0) {
dev_err(sdev->dev, "error: invalid FW header\n");
goto error;
@@ -516,7 +695,7 @@ int snd_sof_load_firmware_memcpy(struct snd_sof_dev *sdev)
}
/* parse and load firmware modules to DSP */
- ret = load_modules(sdev, plat_data->fw);
+ ret = load_modules(sdev, plat_data->fw, plat_data->fw_offset);
if (ret < 0) {
dev_err(sdev->dev, "error: invalid FW modules\n");
goto error;
diff --git a/sound/soc/sof/nocodec.c b/sound/soc/sof/nocodec.c
index 2233146386cc..d03b5be31255 100644
--- a/sound/soc/sof/nocodec.c
+++ b/sound/soc/sof/nocodec.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -52,8 +52,10 @@ static int sof_nocodec_bes_setup(struct device *dev,
links[i].platforms->name = dev_name(dev);
links[i].codecs->dai_name = "snd-soc-dummy-dai";
links[i].codecs->name = "snd-soc-dummy";
- links[i].dpcm_playback = 1;
- links[i].dpcm_capture = 1;
+ if (ops->drv[i].playback.channels_min)
+ links[i].dpcm_playback = 1;
+ if (ops->drv[i].capture.channels_min)
+ links[i].dpcm_capture = 1;
}
card->dai_link = links;
@@ -66,7 +68,6 @@ int sof_nocodec_setup(struct device *dev,
const struct snd_sof_dsp_ops *ops)
{
struct snd_soc_dai_link *links;
- int ret;
/* create dummy BE dai_links */
links = devm_kzalloc(dev, sizeof(struct snd_soc_dai_link) *
@@ -74,9 +75,8 @@ int sof_nocodec_setup(struct device *dev,
if (!links)
return -ENOMEM;
- ret = sof_nocodec_bes_setup(dev, ops, links, ops->num_drv,
- &sof_nocodec_card);
- return ret;
+ return sof_nocodec_bes_setup(dev, ops, links, ops->num_drv,
+ &sof_nocodec_card);
}
EXPORT_SYMBOL(sof_nocodec_setup);
diff --git a/sound/soc/sof/ops.c b/sound/soc/sof/ops.c
index 7a27c3b719e7..1a394b4c6a2f 100644
--- a/sound/soc/sof/ops.c
+++ b/sound/soc/sof/ops.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/ops.h b/sound/soc/sof/ops.h
index a771500ac442..b21632f5511a 100644
--- a/sound/soc/sof/ops.h
+++ b/sound/soc/sof/ops.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/pcm.c b/sound/soc/sof/pcm.c
index 47cd741f2a8c..22fe9d5e932b 100644
--- a/sound/soc/sof/pcm.c
+++ b/sound/soc/sof/pcm.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -639,6 +639,7 @@ static int sof_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME);
struct snd_sof_dai *dai =
snd_sof_find_dai(component, (char *)rtd->dai_link->name);
+ struct snd_soc_dpcm *dpcm;
/* no topology exists for this BE, try a common configuration */
if (!dai) {
@@ -702,7 +703,16 @@ static int sof_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
}
break;
case SOF_DAI_INTEL_HDA:
- /* do nothing for HDA dai_link */
+ /*
+ * HDaudio does not follow the default trigger
+ * sequence due to firmware implementation
+ */
+ for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
+ struct snd_soc_pcm_runtime *fe = dpcm->fe;
+
+ fe->dai_link->trigger[SNDRV_PCM_STREAM_PLAYBACK] =
+ SND_SOC_DPCM_TRIGGER_POST;
+ }
break;
case SOF_DAI_INTEL_ALH:
/* do nothing for ALH dai_link */
@@ -785,11 +795,11 @@ void snd_sof_new_platform_drv(struct snd_sof_dev *sdev)
pd->pointer = sof_pcm_pointer;
#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMPRESS)
- pd->compr_ops = &sof_compressed_ops;
+ pd->compress_ops = &sof_compressed_ops;
#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_PROBES)
/* override cops when probe support is enabled */
- pd->compr_ops = &sof_probe_compressed_ops;
+ pd->compress_ops = &sof_probe_compressed_ops;
#endif
pd->pcm_construct = sof_pcm_new;
pd->ignore_machine = drv_name;
diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
index c410822d9920..92e5f9b15f3a 100644
--- a/sound/soc/sof/pm.c
+++ b/sound/soc/sof/pm.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -90,7 +90,10 @@ static int sof_resume(struct device *dev, bool runtime_resume)
int ret;
/* do nothing if dsp resume callbacks are not set */
- if (!sof_ops(sdev)->resume || !sof_ops(sdev)->runtime_resume)
+ if (!runtime_resume && !sof_ops(sdev)->resume)
+ return 0;
+
+ if (runtime_resume && !sof_ops(sdev)->runtime_resume)
return 0;
/* DSP was never successfully started, nothing to resume */
@@ -111,8 +114,12 @@ static int sof_resume(struct device *dev, bool runtime_resume)
return ret;
}
- /* Nothing further to do if resuming from a low-power D0 substate */
- if (!runtime_resume && old_state == SOF_DSP_PM_D0)
+ /*
+ * Nothing further to be done for platforms that support the low power
+ * D0 substate.
+ */
+ if (!runtime_resume && sof_ops(sdev)->set_power_state &&
+ old_state == SOF_DSP_PM_D0)
return 0;
sdev->fw_state = SOF_FW_BOOT_PREPARE;
@@ -175,7 +182,10 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
int ret;
/* do nothing if dsp suspend callback is not set */
- if (!sof_ops(sdev)->suspend)
+ if (!runtime_suspend && !sof_ops(sdev)->suspend)
+ return 0;
+
+ if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
return 0;
if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
@@ -250,6 +260,15 @@ suspend:
return ret;
}
+int snd_sof_dsp_power_down_notify(struct snd_sof_dev *sdev)
+{
+ /* Notify DSP of upcoming power down */
+ if (sof_ops(sdev)->remove)
+ return sof_send_pm_ctx_ipc(sdev, SOF_IPC_PM_CTX_SAVE);
+
+ return 0;
+}
+
int snd_sof_runtime_suspend(struct device *dev)
{
return sof_suspend(dev, true);
diff --git a/sound/soc/sof/probe.c b/sound/soc/sof/probe.c
index c38169fe00c5..14509f4d3f86 100644
--- a/sound/soc/sof/probe.c
+++ b/sound/soc/sof/probe.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/probe.h b/sound/soc/sof/probe.h
index 45daa5552834..b04b728c7224 100644
--- a/sound/soc/sof/probe.h
+++ b/sound/soc/sof/probe.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/sof-acpi-dev.c b/sound/soc/sof/sof-acpi-dev.c
index 1278aa95effa..c5eaaa978054 100644
--- a/sound/soc/sof/sof-acpi-dev.c
+++ b/sound/soc/sof/sof-acpi-dev.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c
index fc4ed2a8a914..1c7698f8edd6 100644
--- a/sound/soc/sof/sof-audio.c
+++ b/sound/soc/sof/sof-audio.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/sof-audio.h b/sound/soc/sof/sof-audio.h
index bf65f31af858..9629994fe463 100644
--- a/sound/soc/sof/sof-audio.h
+++ b/sound/soc/sof/sof-audio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -56,7 +56,7 @@ struct snd_sof_pcm {
struct snd_sof_led_control {
unsigned int use_led;
unsigned int direction;
- unsigned int led_value;
+ int led_value;
};
/* ALSA SOF Kcontrol device */
diff --git a/sound/soc/sof/sof-of-dev.c b/sound/soc/sof/sof-of-dev.c
index 16e49f2ee629..f492c5dfa659 100644
--- a/sound/soc/sof/sof-of-dev.c
+++ b/sound/soc/sof/sof-of-dev.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// Copyright 2019 NXP
//
@@ -14,6 +14,7 @@
extern struct snd_sof_dsp_ops sof_imx8_ops;
extern struct snd_sof_dsp_ops sof_imx8x_ops;
+extern struct snd_sof_dsp_ops sof_imx8m_ops;
/* platform specific devices */
#if IS_ENABLED(CONFIG_SND_SOC_SOF_IMX8)
@@ -34,6 +35,16 @@ static struct sof_dev_desc sof_of_imx8qm_desc = {
};
#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_IMX8M)
+static struct sof_dev_desc sof_of_imx8mp_desc = {
+ .default_fw_path = "imx/sof",
+ .default_tplg_path = "imx/sof-tplg",
+ .default_fw_filename = "sof-imx8m.ri",
+ .nocodec_tplg_filename = "sof-imx8-nocodec.tplg",
+ .ops = &sof_imx8m_ops,
+};
+#endif
+
static const struct dev_pm_ops sof_of_pm = {
SET_SYSTEM_SLEEP_PM_OPS(snd_sof_suspend, snd_sof_resume)
SET_RUNTIME_PM_OPS(snd_sof_runtime_suspend, snd_sof_runtime_resume,
@@ -114,6 +125,9 @@ static const struct of_device_id sof_of_ids[] = {
{ .compatible = "fsl,imx8qxp-dsp", .data = &sof_of_imx8qxp_desc},
{ .compatible = "fsl,imx8qm-dsp", .data = &sof_of_imx8qm_desc},
#endif
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_IMX8M)
+ { .compatible = "fsl,imx8mp-dsp", .data = &sof_of_imx8mp_desc},
+#endif
{ }
};
MODULE_DEVICE_TABLE(of, sof_of_ids);
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
index cec631a1389b..b13697dab7c0 100644
--- a/sound/soc/sof/sof-pci-dev.c
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -435,6 +435,8 @@ static const struct pci_device_id sof_pci_ids[] = {
#if IS_ENABLED(CONFIG_SND_SOC_SOF_ELKHARTLAKE)
{ PCI_DEVICE(0x8086, 0x4b55),
.driver_data = (unsigned long)&ehl_desc},
+ { PCI_DEVICE(0x8086, 0x4b58),
+ .driver_data = (unsigned long)&ehl_desc},
#endif
{ 0, }
};
diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h
index a4b297c842df..64f28e082049 100644
--- a/sound/soc/sof/sof-priv.h
+++ b/sound/soc/sof/sof-priv.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -453,6 +453,7 @@ int snd_sof_runtime_resume(struct device *dev);
int snd_sof_runtime_idle(struct device *dev);
int snd_sof_resume(struct device *dev);
int snd_sof_suspend(struct device *dev);
+int snd_sof_dsp_power_down_notify(struct snd_sof_dev *sdev);
int snd_sof_prepare(struct device *dev);
void snd_sof_complete(struct device *dev);
@@ -479,7 +480,7 @@ int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset);
*/
struct snd_sof_ipc *snd_sof_ipc_init(struct snd_sof_dev *sdev);
void snd_sof_ipc_free(struct snd_sof_dev *sdev);
-int snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id);
+void snd_sof_ipc_reply(struct snd_sof_dev *sdev, u32 msg_id);
void snd_sof_ipc_msgs_rx(struct snd_sof_dev *sdev);
int snd_sof_ipc_stream_pcm_params(struct snd_sof_dev *sdev,
struct sof_ipc_pcm_params *params);
@@ -522,7 +523,7 @@ void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev);
/*
* Platform specific ops.
*/
-extern struct snd_compr_ops sof_compressed_ops;
+extern struct snd_compress_ops sof_compressed_ops;
/*
* DSP Architectures.
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index fe8ba3e05e08..6a9703e5ff60 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
@@ -430,6 +430,8 @@ static const struct sof_process_types sof_process[] = {
{"CHAN_SELECTOR", SOF_PROCESS_CHAN_SELECTOR, SOF_COMP_SELECTOR},
{"MUX", SOF_PROCESS_MUX, SOF_COMP_MUX},
{"DEMUX", SOF_PROCESS_DEMUX, SOF_COMP_DEMUX},
+ {"DCBLOCK", SOF_PROCESS_DCBLOCK, SOF_COMP_DCBLOCK},
+ {"SMART_AMP", SOF_PROCESS_SMART_AMP, SOF_COMP_SMART_AMP},
};
static enum sof_ipc_process_type find_process(const char *name)
@@ -655,6 +657,16 @@ static const struct sof_topology_token ssp_tokens[] = {
};
+/* ALH */
+static const struct sof_topology_token alh_tokens[] = {
+ {SOF_TKN_INTEL_ALH_RATE,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_alh_params, rate), 0},
+ {SOF_TKN_INTEL_ALH_CH,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_alh_params, channels), 0},
+};
+
/* DMIC */
static const struct sof_topology_token dmic_tokens[] = {
{SOF_TKN_INTEL_DMIC_DRIVER_VERSION,
@@ -742,6 +754,12 @@ static const struct sof_topology_token dmic_pdm_tokens[] = {
/* HDA */
static const struct sof_topology_token hda_tokens[] = {
+ {SOF_TKN_INTEL_HDA_RATE,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_hda_params, rate), 0},
+ {SOF_TKN_INTEL_HDA_CH,
+ SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+ offsetof(struct sof_ipc_dai_hda_params, channels), 0},
};
/* Leds */
@@ -752,13 +770,15 @@ static const struct sof_topology_token led_tokens[] = {
get_token_u32, offsetof(struct snd_sof_led_control, direction), 0},
};
-static void sof_parse_uuid_tokens(struct snd_soc_component *scomp,
- void *object,
- const struct sof_topology_token *tokens,
- int count,
- struct snd_soc_tplg_vendor_array *array)
+static int sof_parse_uuid_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array,
+ size_t offset)
{
struct snd_soc_tplg_vendor_uuid_elem *elem;
+ int found = 0;
int i, j;
/* parse element by element */
@@ -776,19 +796,26 @@ static void sof_parse_uuid_tokens(struct snd_soc_component *scomp,
continue;
/* matched - now load token */
- tokens[j].get_token(elem, object, tokens[j].offset,
+ tokens[j].get_token(elem, object,
+ offset + tokens[j].offset,
tokens[j].size);
+
+ found++;
}
}
+
+ return found;
}
-static void sof_parse_string_tokens(struct snd_soc_component *scomp,
- void *object,
- const struct sof_topology_token *tokens,
- int count,
- struct snd_soc_tplg_vendor_array *array)
+static int sof_parse_string_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array,
+ size_t offset)
{
struct snd_soc_tplg_vendor_string_elem *elem;
+ int found = 0;
int i, j;
/* parse element by element */
@@ -806,24 +833,27 @@ static void sof_parse_string_tokens(struct snd_soc_component *scomp,
continue;
/* matched - now load token */
- tokens[j].get_token(elem, object, tokens[j].offset,
+ tokens[j].get_token(elem, object,
+ offset + tokens[j].offset,
tokens[j].size);
+
+ found++;
}
}
+
+ return found;
}
-static void sof_parse_word_tokens(struct snd_soc_component *scomp,
- void *object,
- const struct sof_topology_token *tokens,
- int count,
- struct snd_soc_tplg_vendor_array *array)
+static int sof_parse_word_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array,
+ size_t offset)
{
- struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
struct snd_soc_tplg_vendor_value_elem *elem;
- size_t size = sizeof(struct sof_ipc_dai_dmic_pdm_ctrl);
+ int found = 0;
int i, j;
- u32 offset;
- u32 *index = NULL;
/* parse element by element */
for (i = 0; i < le32_to_cpu(array->num_elems); i++) {
@@ -842,58 +872,45 @@ static void sof_parse_word_tokens(struct snd_soc_component *scomp,
if (tokens[j].token != le32_to_cpu(elem->token))
continue;
- /* pdm config array index */
- if (sdev->private)
- index = sdev->private;
-
- /* matched - determine offset */
- switch (tokens[j].token) {
- case SOF_TKN_INTEL_DMIC_PDM_CTRL_ID:
-
- /* inc number of pdm array index */
- if (index)
- (*index)++;
- /* fallthrough */
- case SOF_TKN_INTEL_DMIC_PDM_MIC_A_Enable:
- case SOF_TKN_INTEL_DMIC_PDM_MIC_B_Enable:
- case SOF_TKN_INTEL_DMIC_PDM_POLARITY_A:
- case SOF_TKN_INTEL_DMIC_PDM_POLARITY_B:
- case SOF_TKN_INTEL_DMIC_PDM_CLK_EDGE:
- case SOF_TKN_INTEL_DMIC_PDM_SKEW:
-
- /* check if array index is valid */
- if (!index || *index == 0) {
- dev_err(scomp->dev,
- "error: invalid array offset\n");
- continue;
- } else {
- /* offset within the pdm config array */
- offset = size * (*index - 1);
- }
- break;
- default:
- offset = 0;
- break;
- }
-
/* load token */
tokens[j].get_token(elem, object,
offset + tokens[j].offset,
tokens[j].size);
+
+ found++;
}
}
+
+ return found;
}
-static int sof_parse_tokens(struct snd_soc_component *scomp,
- void *object,
- const struct sof_topology_token *tokens,
- int count,
- struct snd_soc_tplg_vendor_array *array,
- int priv_size)
-{
+/**
+ * sof_parse_token_sets - Parse multiple sets of tokens
+ * @scomp: pointer to soc component
+ * @object: target ipc struct for parsed values
+ * @tokens: token definition array describing what tokens to parse
+ * @count: number of tokens in definition array
+ * @array: source pointer to consecutive vendor arrays to be parsed
+ * @priv_size: total size of the consecutive source arrays
+ * @sets: number of similar token sets to be parsed, 1 set has count elements
+ * @object_size: offset to next target ipc struct with multiple sets
+ *
+ * This function parses multiple sets of tokens in vendor arrays into
+ * consecutive ipc structs.
+ */
+static int sof_parse_token_sets(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array,
+ int priv_size, int sets, size_t object_size)
+{
+ size_t offset = 0;
+ int found = 0;
+ int total = 0;
int asize;
- while (priv_size > 0) {
+ while (priv_size > 0 && total < count * sets) {
asize = le32_to_cpu(array->size);
/* validate asize */
@@ -914,19 +931,19 @@ static int sof_parse_tokens(struct snd_soc_component *scomp,
/* call correct parser depending on type */
switch (le32_to_cpu(array->type)) {
case SND_SOC_TPLG_TUPLE_TYPE_UUID:
- sof_parse_uuid_tokens(scomp, object, tokens, count,
- array);
+ found += sof_parse_uuid_tokens(scomp, object, tokens,
+ count, array, offset);
break;
case SND_SOC_TPLG_TUPLE_TYPE_STRING:
- sof_parse_string_tokens(scomp, object, tokens, count,
- array);
+ found += sof_parse_string_tokens(scomp, object, tokens,
+ count, array, offset);
break;
case SND_SOC_TPLG_TUPLE_TYPE_BOOL:
case SND_SOC_TPLG_TUPLE_TYPE_BYTE:
case SND_SOC_TPLG_TUPLE_TYPE_WORD:
case SND_SOC_TPLG_TUPLE_TYPE_SHORT:
- sof_parse_word_tokens(scomp, object, tokens, count,
- array);
+ found += sof_parse_word_tokens(scomp, object, tokens,
+ count, array, offset);
break;
default:
dev_err(scomp->dev, "error: unknown token type %d\n",
@@ -937,10 +954,35 @@ static int sof_parse_tokens(struct snd_soc_component *scomp,
/* next array */
array = (struct snd_soc_tplg_vendor_array *)((u8 *)array
+ asize);
+
+ /* move to next target struct */
+ if (found >= count) {
+ offset += object_size;
+ total += found;
+ found = 0;
+ }
}
+
return 0;
}
+static int sof_parse_tokens(struct snd_soc_component *scomp,
+ void *object,
+ const struct sof_topology_token *tokens,
+ int count,
+ struct snd_soc_tplg_vendor_array *array,
+ int priv_size)
+{
+ /*
+ * sof_parse_tokens is used when topology contains only a single set of
+ * identical tuples arrays. So additional parameters to
+ * sof_parse_token_sets are sets = 1 (only 1 set) and
+ * object_size = 0 (irrelevant).
+ */
+ return sof_parse_token_sets(scomp, object, tokens, count, array,
+ priv_size, 1, 0);
+}
+
static void sof_dbg_comp_config(struct snd_soc_component *scomp,
struct sof_ipc_comp_config *config)
{
@@ -1203,6 +1245,8 @@ static int sof_control_load(struct snd_soc_component *scomp, int index,
return ret;
}
+ scontrol->led_ctl.led_value = -1;
+
dobj->private = scontrol;
list_add(&scontrol->list, &sdev->kcontrol_list);
return ret;
@@ -1257,15 +1301,45 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp,
switch (w->id) {
case snd_soc_dapm_dai_out:
- for_each_rtd_cpu_dais(rtd, i, cpu_dai)
- cpu_dai->capture_widget = w;
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ /*
+ * Please create DAI widget in the right order
+ * to ensure BE will connect to the right DAI
+ * widget.
+ */
+ if (!cpu_dai->capture_widget) {
+ cpu_dai->capture_widget = w;
+ break;
+ }
+ }
+ if (i == rtd->num_cpus) {
+ dev_err(scomp->dev, "error: can't find BE for DAI %s\n",
+ w->name);
+
+ return -EINVAL;
+ }
dai->name = rtd->dai_link->name;
dev_dbg(scomp->dev, "tplg: connected widget %s -> DAI link %s\n",
w->name, rtd->dai_link->name);
break;
case snd_soc_dapm_dai_in:
- for_each_rtd_cpu_dais(rtd, i, cpu_dai)
- cpu_dai->playback_widget = w;
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ /*
+ * Please create DAI widget in the right order
+ * to ensure BE will connect to the right DAI
+ * widget.
+ */
+ if (!cpu_dai->playback_widget) {
+ cpu_dai->playback_widget = w;
+ break;
+ }
+ }
+ if (i == rtd->num_cpus) {
+ dev_err(scomp->dev, "error: can't find BE for DAI %s\n",
+ w->name);
+
+ return -EINVAL;
+ }
dai->name = rtd->dai_link->name;
dev_dbg(scomp->dev, "tplg: connected widget %s -> DAI link %s\n",
w->name, rtd->dai_link->name);
@@ -2602,7 +2676,11 @@ static void sof_dai_set_format(struct snd_soc_tplg_hw_config *hw_config,
}
}
-/* set config for all DAI's with name matching the link name */
+/*
+ * Send IPC and set the same config for all DAIs with name matching the link
+ * name. Note that the function can only be used for the case that all DAIs
+ * have a common DAI config for now.
+ */
static int sof_set_dai_config(struct snd_sof_dev *sdev, u32 size,
struct snd_soc_dai_link *link,
struct sof_ipc_dai_config *config)
@@ -2615,6 +2693,27 @@ static int sof_set_dai_config(struct snd_sof_dev *sdev, u32 size,
continue;
if (strcmp(link->name, dai->name) == 0) {
+ struct sof_ipc_reply reply;
+ int ret;
+
+ /*
+ * the same dai config will be applied to all DAIs in
+ * the same dai link. We have to ensure that the ipc
+ * dai config's dai_index match to the component's
+ * dai_index.
+ */
+ config->dai_index = dai->comp_dai.dai_index;
+
+ /* send message to DSP */
+ ret = sof_ipc_tx_message(sdev->ipc,
+ config->hdr.cmd, config, size,
+ &reply, sizeof(reply));
+
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: failed to set DAI config for %s index %d\n",
+ dai->name, config->dai_index);
+ return ret;
+ }
dai->dai_config = kmemdup(config, size, GFP_KERNEL);
if (!dai->dai_config)
return -ENOMEM;
@@ -2647,7 +2746,6 @@ static int sof_link_ssp_load(struct snd_soc_component *scomp, int index,
{
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
struct snd_soc_tplg_private *private = &cfg->priv;
- struct sof_ipc_reply reply;
u32 size = sizeof(*config);
int ret;
@@ -2696,17 +2794,6 @@ static int sof_link_ssp_load(struct snd_soc_component *scomp, int index,
return -EINVAL;
}
- /* send message to DSP */
- ret = sof_ipc_tx_message(sdev->ipc,
- config->hdr.cmd, config, size, &reply,
- sizeof(reply));
-
- if (ret < 0) {
- dev_err(scomp->dev, "error: failed to set DAI config for SSP%d\n",
- config->dai_index);
- return ret;
- }
-
/* set config for all DAI's with name matching the link name */
ret = sof_set_dai_config(sdev, size, link, config);
if (ret < 0)
@@ -2724,7 +2811,6 @@ static int sof_link_sai_load(struct snd_soc_component *scomp, int index,
{
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
struct snd_soc_tplg_private *private = &cfg->priv;
- struct sof_ipc_reply reply;
u32 size = sizeof(*config);
int ret;
@@ -2764,17 +2850,6 @@ static int sof_link_sai_load(struct snd_soc_component *scomp, int index,
return -EINVAL;
}
- /* send message to DSP */
- ret = sof_ipc_tx_message(sdev->ipc,
- config->hdr.cmd, config, size, &reply,
- sizeof(reply));
-
- if (ret < 0) {
- dev_err(scomp->dev, "error: failed to set DAI config for SAI%d\n",
- config->dai_index);
- return ret;
- }
-
/* set config for all DAI's with name matching the link name */
ret = sof_set_dai_config(sdev, size, link, config);
if (ret < 0)
@@ -2792,7 +2867,6 @@ static int sof_link_esai_load(struct snd_soc_component *scomp, int index,
{
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
struct snd_soc_tplg_private *private = &cfg->priv;
- struct sof_ipc_reply reply;
u32 size = sizeof(*config);
int ret;
@@ -2833,16 +2907,6 @@ static int sof_link_esai_load(struct snd_soc_component *scomp, int index,
return -EINVAL;
}
- /* send message to DSP */
- ret = sof_ipc_tx_message(sdev->ipc,
- config->hdr.cmd, config, size, &reply,
- sizeof(reply));
- if (ret < 0) {
- dev_err(scomp->dev, "error: failed to set DAI config for ESAI%d\n",
- config->dai_index);
- return ret;
- }
-
/* set config for all DAI's with name matching the link name */
ret = sof_set_dai_config(sdev, size, link, config);
if (ret < 0)
@@ -2860,18 +2924,12 @@ static int sof_link_dmic_load(struct snd_soc_component *scomp, int index,
{
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
struct snd_soc_tplg_private *private = &cfg->priv;
- struct sof_ipc_dai_config *ipc_config;
- struct sof_ipc_reply reply;
struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
struct sof_ipc_fw_version *v = &ready->version;
- u32 size;
+ size_t size = sizeof(*config);
int ret, j;
- /*
- * config is only used for the common params in dmic_params structure
- * that does not include the PDM controller config array
- * Set the common params to 0.
- */
+ /* Ensure the entire DMIC config struct is zeros */
memset(&config->dmic, 0, sizeof(struct sof_ipc_dai_dmic_params));
/* get DMIC tokens */
@@ -2885,34 +2943,20 @@ static int sof_link_dmic_load(struct snd_soc_component *scomp, int index,
}
/*
- * allocate memory for dmic dai config accounting for the
- * variable number of active pdm controllers
- * This will be the ipc payload for setting dai config
- */
- size = sizeof(*config) + sizeof(struct sof_ipc_dai_dmic_pdm_ctrl) *
- config->dmic.num_pdm_active;
-
- ipc_config = kzalloc(size, GFP_KERNEL);
- if (!ipc_config)
- return -ENOMEM;
-
- /* copy the common dai config and dmic params */
- memcpy(ipc_config, config, sizeof(*config));
-
- /*
* alloc memory for private member
* Used to track the pdm config array index currently being parsed
*/
sdev->private = kzalloc(sizeof(u32), GFP_KERNEL);
- if (!sdev->private) {
- kfree(ipc_config);
+ if (!sdev->private)
return -ENOMEM;
- }
/* get DMIC PDM tokens */
- ret = sof_parse_tokens(scomp, &ipc_config->dmic.pdm[0], dmic_pdm_tokens,
+ ret = sof_parse_token_sets(scomp, &config->dmic.pdm[0], dmic_pdm_tokens,
ARRAY_SIZE(dmic_pdm_tokens), private->array,
- le32_to_cpu(private->size));
+ le32_to_cpu(private->size),
+ config->dmic.num_pdm_active,
+ sizeof(struct sof_ipc_dai_dmic_pdm_ctrl));
+
if (ret != 0) {
dev_err(scomp->dev, "error: parse dmic pdm tokens failed %d\n",
le32_to_cpu(private->size));
@@ -2920,125 +2964,53 @@ static int sof_link_dmic_load(struct snd_soc_component *scomp, int index,
}
/* set IPC header size */
- ipc_config->hdr.size = size;
+ config->hdr.size = size;
/* debug messages */
dev_dbg(scomp->dev, "tplg: config DMIC%d driver version %d\n",
- ipc_config->dai_index, ipc_config->dmic.driver_ipc_version);
+ config->dai_index, config->dmic.driver_ipc_version);
dev_dbg(scomp->dev, "pdmclk_min %d pdm_clkmax %d duty_min %hd\n",
- ipc_config->dmic.pdmclk_min, ipc_config->dmic.pdmclk_max,
- ipc_config->dmic.duty_min);
+ config->dmic.pdmclk_min, config->dmic.pdmclk_max,
+ config->dmic.duty_min);
dev_dbg(scomp->dev, "duty_max %hd fifo_fs %d num_pdms active %d\n",
- ipc_config->dmic.duty_max, ipc_config->dmic.fifo_fs,
- ipc_config->dmic.num_pdm_active);
- dev_dbg(scomp->dev, "fifo word length %hd\n",
- ipc_config->dmic.fifo_bits);
+ config->dmic.duty_max, config->dmic.fifo_fs,
+ config->dmic.num_pdm_active);
+ dev_dbg(scomp->dev, "fifo word length %hd\n", config->dmic.fifo_bits);
- for (j = 0; j < ipc_config->dmic.num_pdm_active; j++) {
+ for (j = 0; j < config->dmic.num_pdm_active; j++) {
dev_dbg(scomp->dev, "pdm %hd mic a %hd mic b %hd\n",
- ipc_config->dmic.pdm[j].id,
- ipc_config->dmic.pdm[j].enable_mic_a,
- ipc_config->dmic.pdm[j].enable_mic_b);
+ config->dmic.pdm[j].id,
+ config->dmic.pdm[j].enable_mic_a,
+ config->dmic.pdm[j].enable_mic_b);
dev_dbg(scomp->dev, "pdm %hd polarity a %hd polarity b %hd\n",
- ipc_config->dmic.pdm[j].id,
- ipc_config->dmic.pdm[j].polarity_mic_a,
- ipc_config->dmic.pdm[j].polarity_mic_b);
+ config->dmic.pdm[j].id,
+ config->dmic.pdm[j].polarity_mic_a,
+ config->dmic.pdm[j].polarity_mic_b);
dev_dbg(scomp->dev, "pdm %hd clk_edge %hd skew %hd\n",
- ipc_config->dmic.pdm[j].id,
- ipc_config->dmic.pdm[j].clk_edge,
- ipc_config->dmic.pdm[j].skew);
- }
-
- if (SOF_ABI_VER(v->major, v->minor, v->micro) < SOF_ABI_VER(3, 0, 1)) {
- /* this takes care of backwards compatible handling of fifo_bits_b */
- ipc_config->dmic.reserved_2 = ipc_config->dmic.fifo_bits;
+ config->dmic.pdm[j].id,
+ config->dmic.pdm[j].clk_edge,
+ config->dmic.pdm[j].skew);
}
- /* send message to DSP */
- ret = sof_ipc_tx_message(sdev->ipc,
- ipc_config->hdr.cmd, ipc_config, size, &reply,
- sizeof(reply));
-
- if (ret < 0) {
- dev_err(scomp->dev,
- "error: failed to set DAI config for DMIC%d\n",
- config->dai_index);
- goto err;
- }
+ /*
+ * this takes care of backwards compatible handling of fifo_bits_b.
+ * It is deprecated since firmware ABI version 3.0.1.
+ */
+ if (SOF_ABI_VER(v->major, v->minor, v->micro) < SOF_ABI_VER(3, 0, 1))
+ config->dmic.fifo_bits_b = config->dmic.fifo_bits;
/* set config for all DAI's with name matching the link name */
- ret = sof_set_dai_config(sdev, size, link, ipc_config);
+ ret = sof_set_dai_config(sdev, size, link, config);
if (ret < 0)
dev_err(scomp->dev, "error: failed to save DAI config for DMIC%d\n",
config->dai_index);
err:
kfree(sdev->private);
- kfree(ipc_config);
return ret;
}
-/*
- * for hda link, playback and capture are supported by different dai
- * in FW. Here get the dai_index, set dma channel of each dai
- * and send config to FW. In FW, each dai sets config by dai_index
- */
-static int sof_link_hda_process(struct snd_sof_dev *sdev,
- struct snd_soc_dai_link *link,
- struct sof_ipc_dai_config *config)
-{
- struct sof_ipc_reply reply;
- u32 size = sizeof(*config);
- struct snd_sof_dai *sof_dai;
- int found = 0;
- int ret;
-
- list_for_each_entry(sof_dai, &sdev->dai_list, list) {
- if (!sof_dai->name)
- continue;
-
- if (strcmp(link->name, sof_dai->name) == 0) {
- config->dai_index = sof_dai->comp_dai.dai_index;
- found = 1;
-
- config->hda.link_dma_ch = DMA_CHAN_INVALID;
-
- /* save config in dai component */
- sof_dai->dai_config = kmemdup(config, size, GFP_KERNEL);
- if (!sof_dai->dai_config)
- return -ENOMEM;
-
- sof_dai->cpu_dai_name = link->cpus->dai_name;
-
- /* send message to DSP */
- ret = sof_ipc_tx_message(sdev->ipc,
- config->hdr.cmd, config, size,
- &reply, sizeof(reply));
-
- if (ret < 0) {
- dev_err(sdev->dev, "error: failed to set DAI config for direction:%d of HDA dai %d\n",
- sof_dai->comp_dai.direction,
- config->dai_index);
-
- return ret;
- }
- }
- }
-
- /*
- * machine driver may define a dai link with playback and capture
- * dai enabled, but the dai link in topology would support both, one
- * or none of them. Here print a warning message to notify user
- */
- if (!found) {
- dev_warn(sdev->dev, "warning: failed to find dai for dai link %s",
- link->name);
- }
-
- return 0;
-}
-
static int sof_link_hda_load(struct snd_soc_component *scomp, int index,
struct snd_soc_dai_link *link,
struct snd_soc_tplg_link_config *cfg,
@@ -3056,7 +3028,7 @@ static int sof_link_hda_load(struct snd_soc_component *scomp, int index,
config->hdr.size = size;
/* get any bespoke DAI tokens */
- ret = sof_parse_tokens(scomp, config, hda_tokens,
+ ret = sof_parse_tokens(scomp, &config->hda, hda_tokens,
ARRAY_SIZE(hda_tokens), private->array,
le32_to_cpu(private->size));
if (ret != 0) {
@@ -3065,6 +3037,9 @@ static int sof_link_hda_load(struct snd_soc_component *scomp, int index,
return ret;
}
+ dev_dbg(scomp->dev, "HDA config rate %d channels %d\n",
+ config->hda.rate, config->hda.channels);
+
dai = snd_soc_find_dai(link->cpus);
if (!dai) {
dev_err(scomp->dev, "error: failed to find dai %s in %s",
@@ -3072,7 +3047,9 @@ static int sof_link_hda_load(struct snd_soc_component *scomp, int index,
return -EINVAL;
}
- ret = sof_link_hda_process(sdev, link, config);
+ config->hda.link_dma_ch = DMA_CHAN_INVALID;
+
+ ret = sof_set_dai_config(sdev, size, link, config);
if (ret < 0)
dev_err(scomp->dev, "error: failed to process hda dai link %s",
link->name);
@@ -3087,24 +3064,22 @@ static int sof_link_alh_load(struct snd_soc_component *scomp, int index,
struct sof_ipc_dai_config *config)
{
struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(scomp);
- struct sof_ipc_reply reply;
+ struct snd_soc_tplg_private *private = &cfg->priv;
u32 size = sizeof(*config);
int ret;
- /* init IPC */
- config->hdr.size = size;
-
- /* send message to DSP */
- ret = sof_ipc_tx_message(sdev->ipc,
- config->hdr.cmd, config, size, &reply,
- sizeof(reply));
-
- if (ret < 0) {
- dev_err(scomp->dev, "error: failed to set DAI config for ALH %d\n",
- config->dai_index);
+ ret = sof_parse_tokens(scomp, &config->alh, alh_tokens,
+ ARRAY_SIZE(alh_tokens), private->array,
+ le32_to_cpu(private->size));
+ if (ret != 0) {
+ dev_err(scomp->dev, "error: parse alh tokens failed %d\n",
+ le32_to_cpu(private->size));
return ret;
}
+ /* init IPC */
+ config->hdr.size = size;
+
/* set config for all DAI's with name matching the link name */
ret = sof_set_dai_config(sdev, size, link, config);
if (ret < 0)
@@ -3139,9 +3114,17 @@ static int sof_link_load(struct snd_soc_component *scomp, int index,
if (!link->no_pcm) {
link->nonatomic = true;
- /* set trigger order */
- link->trigger[0] = SND_SOC_DPCM_TRIGGER_POST;
- link->trigger[1] = SND_SOC_DPCM_TRIGGER_POST;
+ /*
+ * set default trigger order for all links. Exceptions to
+ * the rule will be handled in sof_pcm_dai_link_fixup()
+ * For playback, the sequence is the following: start FE,
+ * start BE, stop BE, stop FE; for Capture the sequence is
+ * inverted start BE, start FE, stop FE, stop BE
+ */
+ link->trigger[SNDRV_PCM_STREAM_PLAYBACK] =
+ SND_SOC_DPCM_TRIGGER_PRE;
+ link->trigger[SNDRV_PCM_STREAM_CAPTURE] =
+ SND_SOC_DPCM_TRIGGER_POST;
/* nothing more to do for FE dai links */
return 0;
diff --git a/sound/soc/sof/trace.c b/sound/soc/sof/trace.c
index d815090252f8..69889241a092 100644
--- a/sound/soc/sof/trace.c
+++ b/sound/soc/sof/trace.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/utils.c b/sound/soc/sof/utils.c
index 9831eb57df6c..5539d3afbe8f 100644
--- a/sound/soc/sof/utils.c
+++ b/sound/soc/sof/utils.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sof/xtensa/Makefile b/sound/soc/sof/xtensa/Makefile
index cc89c7472a38..b8376ea04bcf 100644
--- a/sound/soc/sof/xtensa/Makefile
+++ b/sound/soc/sof/xtensa/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
snd-sof-xtensa-dsp-objs := core.o
diff --git a/sound/soc/sof/xtensa/core.c b/sound/soc/sof/xtensa/core.c
index ea08651f0bb3..bbb9a2282ed9 100644
--- a/sound/soc/sof/xtensa/core.c
+++ b/sound/soc/sof/xtensa/core.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
diff --git a/sound/soc/sprd/sprd-pcm-compress.c b/sound/soc/sprd/sprd-pcm-compress.c
index 74d48340cade..749dcb7b993b 100644
--- a/sound/soc/sprd/sprd-pcm-compress.c
+++ b/sound/soc/sprd/sprd-pcm-compress.c
@@ -96,7 +96,8 @@ struct sprd_compr_stream {
int stage1_pointer;
};
-static int sprd_platform_compr_trigger(struct snd_compr_stream *cstream,
+static int sprd_platform_compr_trigger(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
int cmd);
static void sprd_platform_compr_drain_notify(void *arg)
@@ -125,15 +126,14 @@ static void sprd_platform_compr_dma_complete(void *data)
snd_compr_fragment_elapsed(cstream);
}
-static int sprd_platform_compr_dma_config(struct snd_compr_stream *cstream,
+static int sprd_platform_compr_dma_config(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
struct snd_compr_params *params,
int channel)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct sprd_compr_data *data = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct sprd_pcm_dma_params *dma_params = data->dma_params;
@@ -261,14 +261,12 @@ sg_err:
return ret;
}
-static int sprd_platform_compr_set_params(struct snd_compr_stream *cstream,
+static int sprd_platform_compr_set_params(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
struct snd_compr_params *params)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct sprd_compr_params compr_params = { };
int ret;
@@ -279,13 +277,13 @@ static int sprd_platform_compr_set_params(struct snd_compr_stream *cstream,
* means once the source channel's transaction is done, it will trigger
* the destination channel's transaction automatically.
*/
- ret = sprd_platform_compr_dma_config(cstream, params, 1);
+ ret = sprd_platform_compr_dma_config(component, cstream, params, 1);
if (ret) {
dev_err(dev, "failed to config stage 1 DMA: %d\n", ret);
return ret;
}
- ret = sprd_platform_compr_dma_config(cstream, params, 0);
+ ret = sprd_platform_compr_dma_config(component, cstream, params, 0);
if (ret) {
dev_err(dev, "failed to config stage 0 DMA: %d\n", ret);
goto config_err;
@@ -314,12 +312,11 @@ config_err:
return ret;
}
-static int sprd_platform_compr_open(struct snd_compr_stream *cstream)
+static int sprd_platform_compr_open(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
struct sprd_compr_data *data = snd_soc_dai_get_drvdata(asoc_rtd_to_cpu(rtd, 0));
struct sprd_compr_stream *stream;
@@ -392,13 +389,11 @@ err_iram:
return ret;
}
-static int sprd_platform_compr_free(struct snd_compr_stream *cstream)
+static int sprd_platform_compr_free(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
int stream_id = cstream->direction, i;
@@ -420,14 +415,12 @@ static int sprd_platform_compr_free(struct snd_compr_stream *cstream)
return 0;
}
-static int sprd_platform_compr_trigger(struct snd_compr_stream *cstream,
+static int sprd_platform_compr_trigger(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
int cmd)
{
struct snd_compr_runtime *runtime = cstream->runtime;
struct sprd_compr_stream *stream = runtime->private_data;
- struct snd_soc_pcm_runtime *rtd = cstream->private_data;
- struct snd_soc_component *component =
- snd_soc_rtdcom_lookup(rtd, DRV_NAME);
struct device *dev = component->dev;
int channels = stream->num_channels, ret = 0, i;
int stream_id = cstream->direction;
@@ -518,7 +511,8 @@ static int sprd_platform_compr_trigger(struct snd_compr_stream *cstream,
return ret;
}
-static int sprd_platform_compr_pointer(struct snd_compr_stream *cstream,
+static int sprd_platform_compr_pointer(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
struct snd_compr_tstamp *tstamp)
{
struct snd_compr_runtime *runtime = cstream->runtime;
@@ -532,7 +526,8 @@ static int sprd_platform_compr_pointer(struct snd_compr_stream *cstream,
return 0;
}
-static int sprd_platform_compr_copy(struct snd_compr_stream *cstream,
+static int sprd_platform_compr_copy(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
char __user *buf, size_t count)
{
struct snd_compr_runtime *runtime = cstream->runtime;
@@ -609,7 +604,8 @@ copy_done:
return count;
}
-static int sprd_platform_compr_get_caps(struct snd_compr_stream *cstream,
+static int sprd_platform_compr_get_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
struct snd_compr_caps *caps)
{
caps->direction = cstream->direction;
@@ -625,7 +621,8 @@ static int sprd_platform_compr_get_caps(struct snd_compr_stream *cstream,
}
static int
-sprd_platform_compr_get_codec_caps(struct snd_compr_stream *cstream,
+sprd_platform_compr_get_codec_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
struct snd_compr_codec_caps *codec)
{
switch (codec->codec) {
@@ -658,7 +655,7 @@ sprd_platform_compr_get_codec_caps(struct snd_compr_stream *cstream,
return 0;
}
-const struct snd_compr_ops sprd_platform_compr_ops = {
+const struct snd_compress_ops sprd_platform_compress_ops = {
.open = sprd_platform_compr_open,
.free = sprd_platform_compr_free,
.set_params = sprd_platform_compr_set_params,
diff --git a/sound/soc/sprd/sprd-pcm-dma.c b/sound/soc/sprd/sprd-pcm-dma.c
index d12d3cad8cbd..5074123f8855 100644
--- a/sound/soc/sprd/sprd-pcm-dma.c
+++ b/sound/soc/sprd/sprd-pcm-dma.c
@@ -515,7 +515,7 @@ static const struct snd_soc_component_driver sprd_soc_component = {
.mmap = sprd_pcm_mmap,
.pcm_construct = sprd_pcm_new,
.pcm_destruct = sprd_pcm_free,
- .compr_ops = &sprd_platform_compr_ops,
+ .compress_ops = &sprd_platform_compress_ops,
};
static int sprd_soc_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/sprd/sprd-pcm-dma.h b/sound/soc/sprd/sprd-pcm-dma.h
index 08e9fdba82f1..be5e385f5e42 100644
--- a/sound/soc/sprd/sprd-pcm-dma.h
+++ b/sound/soc/sprd/sprd-pcm-dma.h
@@ -6,7 +6,7 @@
#define DRV_NAME "sprd_pcm_dma"
#define SPRD_PCM_CHANNEL_MAX 2
-extern const struct snd_compr_ops sprd_platform_compr_ops;
+extern const struct snd_compress_ops sprd_platform_compress_ops;
struct sprd_pcm_dma_params {
dma_addr_t dev_phys[SPRD_PCM_CHANNEL_MAX];
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index ec39ecba1e8b..2839c6cb8c38 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -205,13 +205,11 @@ static int tegra_alc5632_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
- goto err_fini_utils;
+ goto err_put_cpu_of_node;
}
return 0;
-err_fini_utils:
- tegra_asoc_utils_fini(&alc5632->util_data);
err_put_cpu_of_node:
of_node_put(tegra_alc5632_dai.cpus->of_node);
tegra_alc5632_dai.cpus->of_node = NULL;
@@ -226,12 +224,9 @@ err:
static int tegra_alc5632_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct tegra_alc5632 *machine = snd_soc_card_get_drvdata(card);
snd_soc_unregister_card(card);
- tegra_asoc_utils_fini(&machine->util_data);
-
of_node_put(tegra_alc5632_dai.cpus->of_node);
tegra_alc5632_dai.cpus->of_node = NULL;
tegra_alc5632_dai.platforms->of_node = NULL;
diff --git a/sound/soc/tegra/tegra_asoc_utils.c b/sound/soc/tegra/tegra_asoc_utils.c
index 536a578e9512..587f62a288d1 100644
--- a/sound/soc/tegra/tegra_asoc_utils.c
+++ b/sound/soc/tegra/tegra_asoc_utils.c
@@ -60,8 +60,6 @@ int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
data->set_mclk = 0;
clk_disable_unprepare(data->clk_cdev1);
- clk_disable_unprepare(data->clk_pll_a_out0);
- clk_disable_unprepare(data->clk_pll_a);
err = clk_set_rate(data->clk_pll_a, new_baseclock);
if (err) {
@@ -77,18 +75,6 @@ int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
/* Don't set cdev1/extern1 rate; it's locked to pll_a_out0 */
- err = clk_prepare_enable(data->clk_pll_a);
- if (err) {
- dev_err(data->dev, "Can't enable pll_a: %d\n", err);
- return err;
- }
-
- err = clk_prepare_enable(data->clk_pll_a_out0);
- if (err) {
- dev_err(data->dev, "Can't enable pll_a_out0: %d\n", err);
- return err;
- }
-
err = clk_prepare_enable(data->clk_cdev1);
if (err) {
dev_err(data->dev, "Can't enable cdev1: %d\n", err);
@@ -109,8 +95,6 @@ int tegra_asoc_utils_set_ac97_rate(struct tegra_asoc_utils_data *data)
int err;
clk_disable_unprepare(data->clk_cdev1);
- clk_disable_unprepare(data->clk_pll_a_out0);
- clk_disable_unprepare(data->clk_pll_a);
/*
* AC97 rate is fixed at 24.576MHz and is used for both the host
@@ -130,18 +114,6 @@ int tegra_asoc_utils_set_ac97_rate(struct tegra_asoc_utils_data *data)
/* Don't set cdev1/extern1 rate; it's locked to pll_a_out0 */
- err = clk_prepare_enable(data->clk_pll_a);
- if (err) {
- dev_err(data->dev, "Can't enable pll_a: %d\n", err);
- return err;
- }
-
- err = clk_prepare_enable(data->clk_pll_a_out0);
- if (err) {
- dev_err(data->dev, "Can't enable pll_a_out0: %d\n", err);
- return err;
- }
-
err = clk_prepare_enable(data->clk_cdev1);
if (err) {
dev_err(data->dev, "Can't enable cdev1: %d\n", err);
@@ -158,6 +130,7 @@ EXPORT_SYMBOL_GPL(tegra_asoc_utils_set_ac97_rate);
int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
struct device *dev)
{
+ struct clk *clk_out_1, *clk_extern1;
int ret;
data->dev = dev;
@@ -175,52 +148,78 @@ int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
return -EINVAL;
}
- data->clk_pll_a = clk_get(dev, "pll_a");
+ data->clk_pll_a = devm_clk_get(dev, "pll_a");
if (IS_ERR(data->clk_pll_a)) {
dev_err(data->dev, "Can't retrieve clk pll_a\n");
- ret = PTR_ERR(data->clk_pll_a);
- goto err;
+ return PTR_ERR(data->clk_pll_a);
}
- data->clk_pll_a_out0 = clk_get(dev, "pll_a_out0");
+ data->clk_pll_a_out0 = devm_clk_get(dev, "pll_a_out0");
if (IS_ERR(data->clk_pll_a_out0)) {
dev_err(data->dev, "Can't retrieve clk pll_a_out0\n");
- ret = PTR_ERR(data->clk_pll_a_out0);
- goto err_put_pll_a;
+ return PTR_ERR(data->clk_pll_a_out0);
}
- data->clk_cdev1 = clk_get(dev, "mclk");
+ data->clk_cdev1 = devm_clk_get(dev, "mclk");
if (IS_ERR(data->clk_cdev1)) {
dev_err(data->dev, "Can't retrieve clk cdev1\n");
- ret = PTR_ERR(data->clk_cdev1);
- goto err_put_pll_a_out0;
+ return PTR_ERR(data->clk_cdev1);
}
- ret = tegra_asoc_utils_set_rate(data, 44100, 256 * 44100);
- if (ret)
- goto err_put_cdev1;
+ /*
+ * If clock parents are not set in DT, configure here to use clk_out_1
+ * as mclk and extern1 as parent for Tegra30 and higher.
+ */
+ if (!of_find_property(dev->of_node, "assigned-clock-parents", NULL) &&
+ data->soc > TEGRA_ASOC_UTILS_SOC_TEGRA20) {
+ dev_warn(data->dev,
+ "Configuring clocks for a legacy device-tree\n");
+ dev_warn(data->dev,
+ "Please update DT to use assigned-clock-parents\n");
+ clk_extern1 = devm_clk_get(dev, "extern1");
+ if (IS_ERR(clk_extern1)) {
+ dev_err(data->dev, "Can't retrieve clk extern1\n");
+ return PTR_ERR(clk_extern1);
+ }
+
+ ret = clk_set_parent(clk_extern1, data->clk_pll_a_out0);
+ if (ret < 0) {
+ dev_err(data->dev,
+ "Set parent failed for clk extern1\n");
+ return ret;
+ }
+
+ clk_out_1 = devm_clk_get(dev, "pmc_clk_out_1");
+ if (IS_ERR(clk_out_1)) {
+ dev_err(data->dev, "Can't retrieve pmc_clk_out_1\n");
+ return PTR_ERR(clk_out_1);
+ }
+
+ ret = clk_set_parent(clk_out_1, clk_extern1);
+ if (ret < 0) {
+ dev_err(data->dev,
+ "Set parent failed for pmc_clk_out_1\n");
+ return ret;
+ }
+
+ data->clk_cdev1 = clk_out_1;
+ }
- return 0;
+ /*
+ * FIXME: There is some unknown dependency between audio mclk disable
+ * and suspend-resume functionality on Tegra30, although audio mclk is
+ * only needed for audio.
+ */
+ ret = clk_prepare_enable(data->clk_cdev1);
+ if (ret) {
+ dev_err(data->dev, "Can't enable cdev1: %d\n", ret);
+ return ret;
+ }
-err_put_cdev1:
- clk_put(data->clk_cdev1);
-err_put_pll_a_out0:
- clk_put(data->clk_pll_a_out0);
-err_put_pll_a:
- clk_put(data->clk_pll_a);
-err:
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(tegra_asoc_utils_init);
-void tegra_asoc_utils_fini(struct tegra_asoc_utils_data *data)
-{
- clk_put(data->clk_cdev1);
- clk_put(data->clk_pll_a_out0);
- clk_put(data->clk_pll_a);
-}
-EXPORT_SYMBOL_GPL(tegra_asoc_utils_fini);
-
MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
MODULE_DESCRIPTION("Tegra ASoC utility code");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/tegra/tegra_asoc_utils.h b/sound/soc/tegra/tegra_asoc_utils.h
index 0c13818dee75..a34439587d59 100644
--- a/sound/soc/tegra/tegra_asoc_utils.h
+++ b/sound/soc/tegra/tegra_asoc_utils.h
@@ -34,6 +34,5 @@ int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
int tegra_asoc_utils_set_ac97_rate(struct tegra_asoc_utils_data *data);
int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
struct device *dev);
-void tegra_asoc_utils_fini(struct tegra_asoc_utils_data *data);
#endif
diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c
index d800b62b36f8..ec9050516cd7 100644
--- a/sound/soc/tegra/tegra_max98090.c
+++ b/sound/soc/tegra/tegra_max98090.c
@@ -218,19 +218,18 @@ static int tegra_max98090_probe(struct platform_device *pdev)
ret = snd_soc_of_parse_card_name(card, "nvidia,model");
if (ret)
- goto err;
+ return ret;
ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
if (ret)
- goto err;
+ return ret;
tegra_max98090_dai.codecs->of_node = of_parse_phandle(np,
"nvidia,audio-codec", 0);
if (!tegra_max98090_dai.codecs->of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,audio-codec' missing or invalid\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
tegra_max98090_dai.cpus->of_node = of_parse_phandle(np,
@@ -238,40 +237,31 @@ static int tegra_max98090_probe(struct platform_device *pdev)
if (!tegra_max98090_dai.cpus->of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing or invalid\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
tegra_max98090_dai.platforms->of_node = tegra_max98090_dai.cpus->of_node;
ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
if (ret)
- goto err;
+ return ret;
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
- goto err_fini_utils;
+ return ret;
}
return 0;
-
-err_fini_utils:
- tegra_asoc_utils_fini(&machine->util_data);
-err:
- return ret;
}
static int tegra_max98090_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct tegra_max98090 *machine = snd_soc_card_get_drvdata(card);
snd_soc_unregister_card(card);
- tegra_asoc_utils_fini(&machine->util_data);
-
return 0;
}
diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
index 9878bc3eb89e..201d132731f9 100644
--- a/sound/soc/tegra/tegra_rt5640.c
+++ b/sound/soc/tegra/tegra_rt5640.c
@@ -164,19 +164,18 @@ static int tegra_rt5640_probe(struct platform_device *pdev)
ret = snd_soc_of_parse_card_name(card, "nvidia,model");
if (ret)
- goto err;
+ return ret;
ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
if (ret)
- goto err;
+ return ret;
tegra_rt5640_dai.codecs->of_node = of_parse_phandle(np,
"nvidia,audio-codec", 0);
if (!tegra_rt5640_dai.codecs->of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,audio-codec' missing or invalid\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
tegra_rt5640_dai.cpus->of_node = of_parse_phandle(np,
@@ -184,40 +183,31 @@ static int tegra_rt5640_probe(struct platform_device *pdev)
if (!tegra_rt5640_dai.cpus->of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing or invalid\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
tegra_rt5640_dai.platforms->of_node = tegra_rt5640_dai.cpus->of_node;
ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
if (ret)
- goto err;
+ return ret;
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
- goto err_fini_utils;
+ return ret;
}
return 0;
-
-err_fini_utils:
- tegra_asoc_utils_fini(&machine->util_data);
-err:
- return ret;
}
static int tegra_rt5640_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct tegra_rt5640 *machine = snd_soc_card_get_drvdata(card);
snd_soc_unregister_card(card);
- tegra_asoc_utils_fini(&machine->util_data);
-
return 0;
}
diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c
index 5821313db977..8f71e21f6ee9 100644
--- a/sound/soc/tegra/tegra_rt5677.c
+++ b/sound/soc/tegra/tegra_rt5677.c
@@ -270,13 +270,11 @@ static int tegra_rt5677_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
- goto err_fini_utils;
+ goto err_put_cpu_of_node;
}
return 0;
-err_fini_utils:
- tegra_asoc_utils_fini(&machine->util_data);
err_put_cpu_of_node:
of_node_put(tegra_rt5677_dai.cpus->of_node);
tegra_rt5677_dai.cpus->of_node = NULL;
@@ -291,12 +289,9 @@ err:
static int tegra_rt5677_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct tegra_rt5677 *machine = snd_soc_card_get_drvdata(card);
snd_soc_unregister_card(card);
- tegra_asoc_utils_fini(&machine->util_data);
-
tegra_rt5677_dai.platforms->of_node = NULL;
of_node_put(tegra_rt5677_dai.codecs->of_node);
tegra_rt5677_dai.codecs->of_node = NULL;
diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
index dc411ba2e36d..692fcc3d7d6e 100644
--- a/sound/soc/tegra/tegra_sgtl5000.c
+++ b/sound/soc/tegra/tegra_sgtl5000.c
@@ -156,13 +156,11 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
- goto err_fini_utils;
+ goto err_put_cpu_of_node;
}
return 0;
-err_fini_utils:
- tegra_asoc_utils_fini(&machine->util_data);
err_put_cpu_of_node:
of_node_put(tegra_sgtl5000_dai.cpus->of_node);
tegra_sgtl5000_dai.cpus->of_node = NULL;
@@ -177,13 +175,10 @@ err:
static int tegra_sgtl5000_driver_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct tegra_sgtl5000 *machine = snd_soc_card_get_drvdata(card);
int ret;
ret = snd_soc_unregister_card(card);
- tegra_asoc_utils_fini(&machine->util_data);
-
of_node_put(tegra_sgtl5000_dai.cpus->of_node);
tegra_sgtl5000_dai.cpus->of_node = NULL;
tegra_sgtl5000_dai.platforms->of_node = NULL;
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
index 0d653a605358..2ee2ed190872 100644
--- a/sound/soc/tegra/tegra_wm8753.c
+++ b/sound/soc/tegra/tegra_wm8753.c
@@ -127,19 +127,18 @@ static int tegra_wm8753_driver_probe(struct platform_device *pdev)
ret = snd_soc_of_parse_card_name(card, "nvidia,model");
if (ret)
- goto err;
+ return ret;
ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
if (ret)
- goto err;
+ return ret;
tegra_wm8753_dai.codecs->of_node = of_parse_phandle(np,
"nvidia,audio-codec", 0);
if (!tegra_wm8753_dai.codecs->of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,audio-codec' missing or invalid\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
tegra_wm8753_dai.cpus->of_node = of_parse_phandle(np,
@@ -147,40 +146,31 @@ static int tegra_wm8753_driver_probe(struct platform_device *pdev)
if (!tegra_wm8753_dai.cpus->of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing or invalid\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
tegra_wm8753_dai.platforms->of_node = tegra_wm8753_dai.cpus->of_node;
ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
if (ret)
- goto err;
+ return ret;
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
- goto err_fini_utils;
+ return ret;
}
return 0;
-
-err_fini_utils:
- tegra_asoc_utils_fini(&machine->util_data);
-err:
- return ret;
}
static int tegra_wm8753_driver_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
snd_soc_unregister_card(card);
- tegra_asoc_utils_fini(&machine->util_data);
-
return 0;
}
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index 9b5651502f12..d3ead0213cef 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -177,6 +177,7 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_component *component = codec_dai->component;
struct snd_soc_card *card = rtd->card;
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
+ int shrt = 0;
if (gpio_is_valid(machine->gpio_hp_det)) {
tegra_wm8903_hp_jack_gpio.gpio = machine->gpio_hp_det;
@@ -189,12 +190,15 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
&tegra_wm8903_hp_jack_gpio);
}
+ if (of_property_read_bool(card->dev->of_node, "nvidia,headset"))
+ shrt = SND_JACK_MICROPHONE;
+
snd_soc_card_jack_new(rtd->card, "Mic Jack", SND_JACK_MICROPHONE,
&tegra_wm8903_mic_jack,
tegra_wm8903_mic_jack_pins,
ARRAY_SIZE(tegra_wm8903_mic_jack_pins));
wm8903_mic_detect(component, &tegra_wm8903_mic_jack, SND_JACK_MICROPHONE,
- 0);
+ shrt);
snd_soc_dapm_force_enable_pin(&card->dapm, "MICBIAS");
@@ -319,19 +323,18 @@ static int tegra_wm8903_driver_probe(struct platform_device *pdev)
ret = snd_soc_of_parse_card_name(card, "nvidia,model");
if (ret)
- goto err;
+ return ret;
ret = snd_soc_of_parse_audio_routing(card, "nvidia,audio-routing");
if (ret)
- goto err;
+ return ret;
tegra_wm8903_dai.codecs->of_node = of_parse_phandle(np,
"nvidia,audio-codec", 0);
if (!tegra_wm8903_dai.codecs->of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,audio-codec' missing or invalid\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
tegra_wm8903_dai.cpus->of_node = of_parse_phandle(np,
@@ -339,41 +342,23 @@ static int tegra_wm8903_driver_probe(struct platform_device *pdev)
if (!tegra_wm8903_dai.cpus->of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing or invalid\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
tegra_wm8903_dai.platforms->of_node = tegra_wm8903_dai.cpus->of_node;
ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
if (ret)
- goto err;
+ return ret;
- ret = snd_soc_register_card(card);
+ ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+ dev_err(&pdev->dev, "devm_snd_soc_register_card failed (%d)\n",
ret);
- goto err_fini_utils;
+ return ret;
}
return 0;
-
-err_fini_utils:
- tegra_asoc_utils_fini(&machine->util_data);
-err:
- return ret;
-}
-
-static int tegra_wm8903_driver_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
-
- snd_soc_unregister_card(card);
-
- tegra_asoc_utils_fini(&machine->util_data);
-
- return 0;
}
static const struct of_device_id tegra_wm8903_of_match[] = {
@@ -388,7 +373,6 @@ static struct platform_driver tegra_wm8903_driver = {
.of_match_table = tegra_wm8903_of_match,
},
.probe = tegra_wm8903_driver_probe,
- .remove = tegra_wm8903_driver_remove,
};
module_platform_driver(tegra_wm8903_driver);
diff --git a/sound/soc/tegra/tegra_wm9712.c b/sound/soc/tegra/tegra_wm9712.c
index b85bd9f89073..726edfa21a29 100644
--- a/sound/soc/tegra/tegra_wm9712.c
+++ b/sound/soc/tegra/tegra_wm9712.c
@@ -113,19 +113,17 @@ static int tegra_wm9712_driver_probe(struct platform_device *pdev)
ret = tegra_asoc_utils_set_ac97_rate(&machine->util_data);
if (ret)
- goto asoc_utils_fini;
+ goto codec_unregister;
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
- goto asoc_utils_fini;
+ goto codec_unregister;
}
return 0;
-asoc_utils_fini:
- tegra_asoc_utils_fini(&machine->util_data);
codec_unregister:
platform_device_del(machine->codec);
codec_put:
@@ -140,8 +138,6 @@ static int tegra_wm9712_driver_remove(struct platform_device *pdev)
snd_soc_unregister_card(card);
- tegra_asoc_utils_fini(&machine->util_data);
-
platform_device_unregister(machine->codec);
return 0;
diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
index f9834afaa2e8..6dca6836aa04 100644
--- a/sound/soc/tegra/trimslice.c
+++ b/sound/soc/tegra/trimslice.c
@@ -125,8 +125,7 @@ static int tegra_snd_trimslice_probe(struct platform_device *pdev)
if (!trimslice_tlv320aic23_dai.codecs->of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,audio-codec' missing or invalid\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
trimslice_tlv320aic23_dai.cpus->of_node = of_parse_phandle(np,
@@ -134,8 +133,7 @@ static int tegra_snd_trimslice_probe(struct platform_device *pdev)
if (!trimslice_tlv320aic23_dai.cpus->of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing or invalid\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
trimslice_tlv320aic23_dai.platforms->of_node =
@@ -143,32 +141,24 @@ static int tegra_snd_trimslice_probe(struct platform_device *pdev)
ret = tegra_asoc_utils_init(&trimslice->util_data, &pdev->dev);
if (ret)
- goto err;
+ return ret;
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
ret);
- goto err_fini_utils;
+ return ret;
}
return 0;
-
-err_fini_utils:
- tegra_asoc_utils_fini(&trimslice->util_data);
-err:
- return ret;
}
static int tegra_snd_trimslice_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct tegra_trimslice *trimslice = snd_soc_card_get_drvdata(card);
snd_soc_unregister_card(card);
- tegra_asoc_utils_fini(&trimslice->util_data);
-
return 0;
}
diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index 734ffe925c4d..b93c1ee302c0 100644
--- a/sound/soc/ti/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -1577,7 +1577,7 @@ static void davinci_mcasp_shutdown(struct snd_pcm_substream *substream,
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
return;
- if (!cpu_dai->active) {
+ if (!snd_soc_dai_active(cpu_dai)) {
mcasp->channels = 0;
mcasp->max_format_width = 0;
}
@@ -1896,8 +1896,10 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
PTR_ERR(chan));
return PTR_ERR(chan);
}
- if (WARN_ON(!chan->device || !chan->device->dev))
+ if (WARN_ON(!chan->device || !chan->device->dev)) {
+ dma_release_channel(chan);
return -EINVAL;
+ }
if (chan->device->dev->of_node)
ret = of_property_read_string(chan->device->dev->of_node,
diff --git a/sound/soc/ti/omap-dmic.c b/sound/soc/ti/omap-dmic.c
index 913579c43e9d..01abf1be5d78 100644
--- a/sound/soc/ti/omap-dmic.c
+++ b/sound/soc/ti/omap-dmic.c
@@ -95,7 +95,7 @@ static int omap_dmic_dai_startup(struct snd_pcm_substream *substream,
mutex_lock(&dmic->mutex);
- if (!dai->active)
+ if (!snd_soc_dai_active(dai))
dmic->active = 1;
else
ret = -EBUSY;
@@ -114,7 +114,7 @@ static void omap_dmic_dai_shutdown(struct snd_pcm_substream *substream,
cpu_latency_qos_remove_request(&dmic->pm_qos_req);
- if (!dai->active)
+ if (!snd_soc_dai_active(dai))
dmic->active = 0;
mutex_unlock(&dmic->mutex);
diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
index 3d41ca2238d4..32e3ccdbb7a2 100644
--- a/sound/soc/ti/omap-mcbsp.c
+++ b/sound/soc/ti/omap-mcbsp.c
@@ -77,18 +77,15 @@ static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
pm_runtime_put_sync(mcbsp->dev);
r = clk_set_parent(mcbsp->fclk, fck_src);
- if (r) {
+ if (r)
dev_err(mcbsp->dev, "CLKS: could not clk_set_parent() to %s\n",
src);
- clk_put(fck_src);
- return r;
- }
pm_runtime_get_sync(mcbsp->dev);
clk_put(fck_src);
- return 0;
+ return r;
}
static irqreturn_t omap_mcbsp_irq_handler(int irq, void *data)
@@ -686,7 +683,7 @@ static int omap_mcbsp_init(struct platform_device *pdev)
mcbsp->dma_data[1].addr = omap_mcbsp_dma_reg_params(mcbsp,
SNDRV_PCM_STREAM_CAPTURE);
- mcbsp->fclk = clk_get(&pdev->dev, "fck");
+ mcbsp->fclk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(mcbsp->fclk)) {
ret = PTR_ERR(mcbsp->fclk);
dev_err(mcbsp->dev, "unable to get fck: %d\n", ret);
@@ -711,7 +708,7 @@ static int omap_mcbsp_init(struct platform_device *pdev)
if (ret) {
dev_err(mcbsp->dev,
"Unable to create additional controls\n");
- goto err_thres;
+ return ret;
}
}
@@ -724,8 +721,6 @@ static int omap_mcbsp_init(struct platform_device *pdev)
err_st:
if (mcbsp->pdata->buffer_size)
sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group);
-err_thres:
- clk_put(mcbsp->fclk);
return ret;
}
@@ -788,7 +783,7 @@ static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream,
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
int err = 0;
- if (!cpu_dai->active)
+ if (!snd_soc_dai_active(cpu_dai))
err = omap_mcbsp_request(mcbsp);
/*
@@ -843,7 +838,7 @@ static void omap_mcbsp_dai_shutdown(struct snd_pcm_substream *substream,
mcbsp->latency[stream1] = 0;
- if (!cpu_dai->active) {
+ if (!snd_soc_dai_active(cpu_dai)) {
omap_mcbsp_free(mcbsp);
mcbsp->configured = 0;
}
@@ -1185,7 +1180,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
default:
return -EINVAL;
}
- if (inv_fs == true)
+ if (inv_fs)
regs->pcr0 ^= FSXP | FSRP;
return 0;
@@ -1442,8 +1437,6 @@ static int asoc_mcbsp_remove(struct platform_device *pdev)
omap_mcbsp_st_cleanup(pdev);
- clk_put(mcbsp->fclk);
-
return 0;
}
diff --git a/sound/soc/ti/omap-mcpdm.c b/sound/soc/ti/omap-mcpdm.c
index f2dbadea33bb..d482b62f314a 100644
--- a/sound/soc/ti/omap-mcpdm.c
+++ b/sound/soc/ti/omap-mcpdm.c
@@ -253,7 +253,7 @@ static int omap_mcpdm_dai_startup(struct snd_pcm_substream *substream,
mutex_lock(&mcpdm->mutex);
- if (!dai->active)
+ if (!snd_soc_dai_active(dai))
omap_mcpdm_open_streams(mcpdm);
mutex_unlock(&mcpdm->mutex);
@@ -271,7 +271,7 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
mutex_lock(&mcpdm->mutex);
- if (!dai->active) {
+ if (!snd_soc_dai_active(dai)) {
if (omap_mcpdm_active(mcpdm)) {
omap_mcpdm_stop(mcpdm);
omap_mcpdm_close_streams(mcpdm);
@@ -462,7 +462,7 @@ static int omap_mcpdm_suspend(struct snd_soc_component *component)
{
struct omap_mcpdm *mcpdm = snd_soc_component_get_drvdata(component);
- if (component->active) {
+ if (snd_soc_component_active(component)) {
omap_mcpdm_stop(mcpdm);
omap_mcpdm_close_streams(mcpdm);
}
@@ -484,7 +484,7 @@ static int omap_mcpdm_resume(struct snd_soc_component *component)
while (mcpdm->pm_active_count--)
pm_runtime_get_sync(mcpdm->dev);
- if (component->active) {
+ if (snd_soc_component_active(component)) {
omap_mcpdm_open_streams(mcpdm);
omap_mcpdm_start(mcpdm);
}
diff --git a/sound/soc/uniphier/aio-compress.c b/sound/soc/uniphier/aio-compress.c
index 232d3cc5bce0..0f76bc601ca9 100644
--- a/sound/soc/uniphier/aio-compress.c
+++ b/sound/soc/uniphier/aio-compress.c
@@ -16,8 +16,10 @@
#include "aio.h"
-static int uniphier_aio_compr_prepare(struct snd_compr_stream *cstream);
-static int uniphier_aio_compr_hw_free(struct snd_compr_stream *cstream);
+static int uniphier_aio_compr_prepare(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream);
+static int uniphier_aio_compr_hw_free(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream);
static int uniphier_aio_comprdma_new(struct snd_soc_pcm_runtime *rtd)
{
@@ -70,7 +72,8 @@ static int uniphier_aio_comprdma_free(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static int uniphier_aio_compr_open(struct snd_compr_stream *cstream)
+static int uniphier_aio_compr_open(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
@@ -95,14 +98,15 @@ static int uniphier_aio_compr_open(struct snd_compr_stream *cstream)
return 0;
}
-static int uniphier_aio_compr_free(struct snd_compr_stream *cstream)
+static int uniphier_aio_compr_free(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
struct uniphier_aio_sub *sub = &aio->sub[cstream->direction];
int ret;
- ret = uniphier_aio_compr_hw_free(cstream);
+ ret = uniphier_aio_compr_hw_free(component, cstream);
if (ret)
return ret;
ret = uniphier_aio_comprdma_free(rtd);
@@ -114,7 +118,8 @@ static int uniphier_aio_compr_free(struct snd_compr_stream *cstream)
return 0;
}
-static int uniphier_aio_compr_get_params(struct snd_compr_stream *cstream,
+static int uniphier_aio_compr_get_params(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
struct snd_codec *params)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
@@ -126,7 +131,8 @@ static int uniphier_aio_compr_get_params(struct snd_compr_stream *cstream,
return 0;
}
-static int uniphier_aio_compr_set_params(struct snd_compr_stream *cstream,
+static int uniphier_aio_compr_set_params(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
struct snd_compr_params *params)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
@@ -155,14 +161,15 @@ static int uniphier_aio_compr_set_params(struct snd_compr_stream *cstream,
aio_port_reset(sub);
aio_src_reset(sub);
- ret = uniphier_aio_compr_prepare(cstream);
+ ret = uniphier_aio_compr_prepare(component, cstream);
if (ret)
return ret;
return 0;
}
-static int uniphier_aio_compr_hw_free(struct snd_compr_stream *cstream)
+static int uniphier_aio_compr_hw_free(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct uniphier_aio *aio = uniphier_priv(asoc_rtd_to_cpu(rtd, 0));
@@ -173,7 +180,8 @@ static int uniphier_aio_compr_hw_free(struct snd_compr_stream *cstream)
return 0;
}
-static int uniphier_aio_compr_prepare(struct snd_compr_stream *cstream)
+static int uniphier_aio_compr_prepare(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
struct snd_compr_runtime *runtime = cstream->runtime;
@@ -210,7 +218,8 @@ static int uniphier_aio_compr_prepare(struct snd_compr_stream *cstream)
return 0;
}
-static int uniphier_aio_compr_trigger(struct snd_compr_stream *cstream,
+static int uniphier_aio_compr_trigger(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
int cmd)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
@@ -243,7 +252,8 @@ static int uniphier_aio_compr_trigger(struct snd_compr_stream *cstream,
return ret;
}
-static int uniphier_aio_compr_pointer(struct snd_compr_stream *cstream,
+static int uniphier_aio_compr_pointer(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
struct snd_compr_tstamp *tstamp)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
@@ -316,7 +326,8 @@ static int aio_compr_send_to_hw(struct uniphier_aio_sub *sub,
return 0;
}
-static int uniphier_aio_compr_copy(struct snd_compr_stream *cstream,
+static int uniphier_aio_compr_copy(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
char __user *buf, size_t count)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
@@ -375,7 +386,8 @@ static int uniphier_aio_compr_copy(struct snd_compr_stream *cstream,
return cnt;
}
-static int uniphier_aio_compr_get_caps(struct snd_compr_stream *cstream,
+static int uniphier_aio_compr_get_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *cstream,
struct snd_compr_caps *caps)
{
caps->num_codecs = 1;
@@ -401,7 +413,8 @@ static const struct snd_compr_codec_caps caps_iec = {
.descriptor[0].formats = 0,
};
-static int uniphier_aio_compr_get_codec_caps(struct snd_compr_stream *stream,
+static int uniphier_aio_compr_get_codec_caps(struct snd_soc_component *component,
+ struct snd_compr_stream *stream,
struct snd_compr_codec_caps *codec)
{
if (codec->codec == SND_AUDIOCODEC_IEC61937)
@@ -412,7 +425,7 @@ static int uniphier_aio_compr_get_codec_caps(struct snd_compr_stream *stream,
return 0;
}
-const struct snd_compr_ops uniphier_aio_compr_ops = {
+const struct snd_compress_ops uniphier_aio_compress_ops = {
.open = uniphier_aio_compr_open,
.free = uniphier_aio_compr_free,
.get_params = uniphier_aio_compr_get_params,
diff --git a/sound/soc/uniphier/aio-cpu.c b/sound/soc/uniphier/aio-cpu.c
index fdaa6522720f..25c40c28eba4 100644
--- a/sound/soc/uniphier/aio-cpu.c
+++ b/sound/soc/uniphier/aio-cpu.c
@@ -424,7 +424,7 @@ static void uniphier_aio_dai_suspend(struct snd_soc_dai *dai)
{
struct uniphier_aio *aio = uniphier_priv(dai);
- if (!dai->active)
+ if (!snd_soc_dai_active(dai))
return;
aio->chip->num_wup_aios--;
@@ -448,7 +448,7 @@ static int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
struct uniphier_aio *aio = uniphier_priv(dai);
int ret, i;
- if (!dai->active)
+ if (!snd_soc_dai_active(dai))
return 0;
if (!aio->chip->active)
diff --git a/sound/soc/uniphier/aio-dma.c b/sound/soc/uniphier/aio-dma.c
index 4bbcb007df41..d6bcd476df12 100644
--- a/sound/soc/uniphier/aio-dma.c
+++ b/sound/soc/uniphier/aio-dma.c
@@ -227,7 +227,7 @@ static const struct snd_soc_component_driver uniphier_soc_platform = {
.pointer = uniphier_aiodma_pointer,
.mmap = uniphier_aiodma_mmap,
.pcm_construct = uniphier_aiodma_new,
- .compr_ops = &uniphier_aio_compr_ops,
+ .compress_ops = &uniphier_aio_compress_ops,
};
static const struct regmap_config aiodma_regmap_config = {
diff --git a/sound/soc/uniphier/aio.h b/sound/soc/uniphier/aio.h
index 694ac030950e..0b03571aa9f0 100644
--- a/sound/soc/uniphier/aio.h
+++ b/sound/soc/uniphier/aio.h
@@ -304,7 +304,7 @@ static inline struct uniphier_aio *uniphier_priv(struct snd_soc_dai *dai)
}
int uniphier_aiodma_soc_register_platform(struct platform_device *pdev);
-extern const struct snd_compr_ops uniphier_aio_compr_ops;
+extern const struct snd_compress_ops uniphier_aio_compress_ops;
int uniphier_aio_dai_probe(struct snd_soc_dai *dai);
int uniphier_aio_dai_remove(struct snd_soc_dai *dai);
diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
index 2873e8e6f02b..cdae1190b930 100644
--- a/sound/soc/ux500/mop500.c
+++ b/sound/soc/ux500/mop500.c
@@ -63,10 +63,11 @@ static void mop500_of_node_put(void)
{
int i;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < 2; i++)
of_node_put(mop500_dai_links[i].cpus->of_node);
- of_node_put(mop500_dai_links[i].codecs->of_node);
- }
+
+ /* Both links use the same codec, which is refcounted only once */
+ of_node_put(mop500_dai_links[0].codecs->of_node);
}
static int mop500_of_probe(struct platform_device *pdev,
@@ -81,7 +82,9 @@ static int mop500_of_probe(struct platform_device *pdev,
if (!(msp_np[0] && msp_np[1] && codec_np)) {
dev_err(&pdev->dev, "Phandle missing or invalid\n");
- mop500_of_node_put();
+ for (i = 0; i < 2; i++)
+ of_node_put(msp_np[i]);
+ of_node_put(codec_np);
return -EINVAL;
}
diff --git a/sound/usb/card.c b/sound/usb/card.c
index fd6fd1726ea0..162bdd6eb4d4 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -634,7 +634,6 @@ static int usb_audio_probe(struct usb_interface *intf,
id, &chip);
if (err < 0)
goto __error;
- chip->pm_intf = intf;
break;
} else if (vid[i] != -1 || pid[i] != -1) {
dev_info(&dev->dev,
@@ -651,6 +650,13 @@ static int usb_audio_probe(struct usb_interface *intf,
goto __error;
}
}
+
+ if (chip->num_interfaces >= MAX_CARD_INTERFACES) {
+ dev_info(&dev->dev, "Too many interfaces assigned to the single USB-audio card\n");
+ err = -EINVAL;
+ goto __error;
+ }
+
dev_set_drvdata(&dev->dev, chip);
/*
@@ -703,6 +709,7 @@ static int usb_audio_probe(struct usb_interface *intf,
}
usb_chip[chip->index] = chip;
+ chip->intf[chip->num_interfaces] = intf;
chip->num_interfaces++;
usb_set_intfdata(intf, chip);
atomic_dec(&chip->active);
@@ -818,19 +825,37 @@ void snd_usb_unlock_shutdown(struct snd_usb_audio *chip)
int snd_usb_autoresume(struct snd_usb_audio *chip)
{
+ int i, err;
+
if (atomic_read(&chip->shutdown))
return -EIO;
- if (atomic_inc_return(&chip->active) == 1)
- return usb_autopm_get_interface(chip->pm_intf);
+ if (atomic_inc_return(&chip->active) != 1)
+ return 0;
+
+ for (i = 0; i < chip->num_interfaces; i++) {
+ err = usb_autopm_get_interface(chip->intf[i]);
+ if (err < 0) {
+ /* rollback */
+ while (--i >= 0)
+ usb_autopm_put_interface(chip->intf[i]);
+ atomic_dec(&chip->active);
+ return err;
+ }
+ }
return 0;
}
void snd_usb_autosuspend(struct snd_usb_audio *chip)
{
+ int i;
+
if (atomic_read(&chip->shutdown))
return;
- if (atomic_dec_and_test(&chip->active))
- usb_autopm_put_interface(chip->pm_intf);
+ if (!atomic_dec_and_test(&chip->active))
+ return;
+
+ for (i = 0; i < chip->num_interfaces; i++)
+ usb_autopm_put_interface(chip->intf[i]);
}
static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
@@ -843,9 +868,6 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
if (chip == (void *)-1L)
return 0;
- chip->autosuspended = !!PMSG_IS_AUTO(message);
- if (!chip->autosuspended)
- snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
if (!chip->num_suspended_intf++) {
list_for_each_entry(as, &chip->pcm_list, list) {
snd_usb_pcm_suspend(as);
@@ -858,6 +880,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
snd_usb_mixer_suspend(mixer);
}
+ if (!PMSG_IS_AUTO(message) && !chip->system_suspend) {
+ snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
+ chip->system_suspend = chip->num_suspended_intf;
+ }
+
return 0;
}
@@ -871,10 +898,10 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
if (chip == (void *)-1L)
return 0;
- if (--chip->num_suspended_intf)
- return 0;
atomic_inc(&chip->active); /* avoid autopm */
+ if (chip->num_suspended_intf > 1)
+ goto out;
list_for_each_entry(as, &chip->pcm_list, list) {
err = snd_usb_pcm_resume(as);
@@ -896,9 +923,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
snd_usbmidi_resume(p);
}
- if (!chip->autosuspended)
+ out:
+ if (chip->num_suspended_intf == chip->system_suspend) {
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0);
- chip->autosuspended = 0;
+ chip->system_suspend = 0;
+ }
+ chip->num_suspended_intf--;
err_out:
atomic_dec(&chip->active); /* allow autopm after this point */
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 395403a2d33f..d6219fba9699 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -84,6 +84,10 @@ struct snd_usb_endpoint {
dma_addr_t sync_dma; /* DMA address of syncbuf */
unsigned int pipe; /* the data i/o pipe */
+ unsigned int framesize[2]; /* small/large frame sizes in samples */
+ unsigned int sample_rem; /* remainder from division fs/fps */
+ unsigned int sample_accum; /* sample accumulator */
+ unsigned int fps; /* frames per second */
unsigned int freqn; /* nominal sampling rate in fs/fps in Q16.16 format */
unsigned int freqm; /* momentary sampling rate in fs/fps in Q16.16 format */
int freqshift; /* how much to shift the feedback value to get Q16.16 */
@@ -104,6 +108,7 @@ struct snd_usb_endpoint {
int iface, altsetting;
int skip_packets; /* quirks for devices to ignore the first n packets
in a stream */
+ bool is_implicit_feedback; /* This endpoint is used as implicit feedback */
spinlock_t lock;
struct list_head list;
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 4a9a2f6ef5a4..9bea7d3f99f8 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -124,12 +124,12 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
/*
* For streaming based on information derived from sync endpoints,
- * prepare_outbound_urb_sizes() will call next_packet_size() to
+ * prepare_outbound_urb_sizes() will call slave_next_packet_size() to
* determine the number of samples to be sent in the next packet.
*
- * For implicit feedback, next_packet_size() is unused.
+ * For implicit feedback, slave_next_packet_size() is unused.
*/
-int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
+int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep)
{
unsigned long flags;
int ret;
@@ -146,6 +146,29 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
return ret;
}
+/*
+ * For adaptive and synchronous endpoints, prepare_outbound_urb_sizes()
+ * will call next_packet_size() to determine the number of samples to be
+ * sent in the next packet.
+ */
+int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
+{
+ int ret;
+
+ if (ep->fill_max)
+ return ep->maxframesize;
+
+ ep->sample_accum += ep->sample_rem;
+ if (ep->sample_accum >= ep->fps) {
+ ep->sample_accum -= ep->fps;
+ ret = ep->framesize[1];
+ } else {
+ ret = ep->framesize[0];
+ }
+
+ return ret;
+}
+
static void retire_outbound_urb(struct snd_usb_endpoint *ep,
struct snd_urb_ctx *urb_ctx)
{
@@ -190,6 +213,8 @@ static void prepare_silent_urb(struct snd_usb_endpoint *ep,
if (ctx->packet_size[i])
counts = ctx->packet_size[i];
+ else if (ep->sync_master)
+ counts = snd_usb_endpoint_slave_next_packet_size(ep);
else
counts = snd_usb_endpoint_next_packet_size(ep);
@@ -321,17 +346,17 @@ static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
ep->next_packet_read_pos %= MAX_URBS;
/* take URB out of FIFO */
- if (!list_empty(&ep->ready_playback_urbs))
+ if (!list_empty(&ep->ready_playback_urbs)) {
ctx = list_first_entry(&ep->ready_playback_urbs,
struct snd_urb_ctx, ready_list);
+ list_del_init(&ctx->ready_list);
+ }
}
spin_unlock_irqrestore(&ep->lock, flags);
if (ctx == NULL)
return;
- list_del_init(&ctx->ready_list);
-
/* copy over the length information */
for (i = 0; i < packet->packets; i++)
ctx->packet_size[i] = packet->packet_size[i];
@@ -497,6 +522,8 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
list_add_tail(&ep->list, &chip->ep_list);
+ ep->is_implicit_feedback = 0;
+
__exit_unlock:
mutex_unlock(&chip->mutex);
@@ -597,6 +624,178 @@ static void release_urbs(struct snd_usb_endpoint *ep, int force)
}
/*
+ * Check data endpoint for format differences
+ */
+static bool check_ep_params(struct snd_usb_endpoint *ep,
+ snd_pcm_format_t pcm_format,
+ unsigned int channels,
+ unsigned int period_bytes,
+ unsigned int frames_per_period,
+ unsigned int periods_per_buffer,
+ struct audioformat *fmt,
+ struct snd_usb_endpoint *sync_ep)
+{
+ unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb;
+ unsigned int max_packs_per_period, urbs_per_period, urb_packs;
+ unsigned int max_urbs;
+ int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels;
+ int tx_length_quirk = (ep->chip->tx_length_quirk &&
+ usb_pipeout(ep->pipe));
+ bool ret = 1;
+
+ if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) {
+ /*
+ * When operating in DSD DOP mode, the size of a sample frame
+ * in hardware differs from the actual physical format width
+ * because we need to make room for the DOP markers.
+ */
+ frame_bits += channels << 3;
+ }
+
+ ret = ret && (ep->datainterval == fmt->datainterval);
+ ret = ret && (ep->stride == frame_bits >> 3);
+
+ switch (pcm_format) {
+ case SNDRV_PCM_FORMAT_U8:
+ ret = ret && (ep->silence_value == 0x80);
+ break;
+ case SNDRV_PCM_FORMAT_DSD_U8:
+ case SNDRV_PCM_FORMAT_DSD_U16_LE:
+ case SNDRV_PCM_FORMAT_DSD_U32_LE:
+ case SNDRV_PCM_FORMAT_DSD_U16_BE:
+ case SNDRV_PCM_FORMAT_DSD_U32_BE:
+ ret = ret && (ep->silence_value == 0x69);
+ break;
+ default:
+ ret = ret && (ep->silence_value == 0);
+ }
+
+ /* assume max. frequency is 50% higher than nominal */
+ ret = ret && (ep->freqmax == ep->freqn + (ep->freqn >> 1));
+ /* Round up freqmax to nearest integer in order to calculate maximum
+ * packet size, which must represent a whole number of frames.
+ * This is accomplished by adding 0x0.ffff before converting the
+ * Q16.16 format into integer.
+ * In order to accurately calculate the maximum packet size when
+ * the data interval is more than 1 (i.e. ep->datainterval > 0),
+ * multiply by the data interval prior to rounding. For instance,
+ * a freqmax of 41 kHz will result in a max packet size of 6 (5.125)
+ * frames with a data interval of 1, but 11 (10.25) frames with a
+ * data interval of 2.
+ * (ep->freqmax << ep->datainterval overflows at 8.192 MHz for the
+ * maximum datainterval value of 3, at USB full speed, higher for
+ * USB high speed, noting that ep->freqmax is in units of
+ * frames per packet in Q16.16 format.)
+ */
+ maxsize = (((ep->freqmax << ep->datainterval) + 0xffff) >> 16) *
+ (frame_bits >> 3);
+ if (tx_length_quirk)
+ maxsize += sizeof(__le32); /* Space for length descriptor */
+ /* but wMaxPacketSize might reduce this */
+ if (ep->maxpacksize && ep->maxpacksize < maxsize) {
+ /* whatever fits into a max. size packet */
+ unsigned int data_maxsize = maxsize = ep->maxpacksize;
+
+ if (tx_length_quirk)
+ /* Need to remove the length descriptor to calc freq */
+ data_maxsize -= sizeof(__le32);
+ ret = ret && (ep->freqmax == (data_maxsize / (frame_bits >> 3))
+ << (16 - ep->datainterval));
+ }
+
+ if (ep->fill_max)
+ ret = ret && (ep->curpacksize == ep->maxpacksize);
+ else
+ ret = ret && (ep->curpacksize == maxsize);
+
+ if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) {
+ packs_per_ms = 8 >> ep->datainterval;
+ max_packs_per_urb = MAX_PACKS_HS;
+ } else {
+ packs_per_ms = 1;
+ max_packs_per_urb = MAX_PACKS;
+ }
+ if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep))
+ max_packs_per_urb = min(max_packs_per_urb,
+ 1U << sync_ep->syncinterval);
+ max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval);
+
+ /*
+ * Capture endpoints need to use small URBs because there's no way
+ * to tell in advance where the next period will end, and we don't
+ * want the next URB to complete much after the period ends.
+ *
+ * Playback endpoints with implicit sync much use the same parameters
+ * as their corresponding capture endpoint.
+ */
+ if (usb_pipein(ep->pipe) ||
+ snd_usb_endpoint_implicit_feedback_sink(ep)) {
+
+ urb_packs = packs_per_ms;
+ /*
+ * Wireless devices can poll at a max rate of once per 4ms.
+ * For dataintervals less than 5, increase the packet count to
+ * allow the host controller to use bursting to fill in the
+ * gaps.
+ */
+ if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_WIRELESS) {
+ int interval = ep->datainterval;
+
+ while (interval < 5) {
+ urb_packs <<= 1;
+ ++interval;
+ }
+ }
+ /* make capture URBs <= 1 ms and smaller than a period */
+ urb_packs = min(max_packs_per_urb, urb_packs);
+ while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
+ urb_packs >>= 1;
+ ret = ret && (ep->nurbs == MAX_URBS);
+
+ /*
+ * Playback endpoints without implicit sync are adjusted so that
+ * a period fits as evenly as possible in the smallest number of
+ * URBs. The total number of URBs is adjusted to the size of the
+ * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits.
+ */
+ } else {
+ /* determine how small a packet can be */
+ minsize = (ep->freqn >> (16 - ep->datainterval)) *
+ (frame_bits >> 3);
+ /* with sync from device, assume it can be 12% lower */
+ if (sync_ep)
+ minsize -= minsize >> 3;
+ minsize = max(minsize, 1u);
+
+ /* how many packets will contain an entire ALSA period? */
+ max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize);
+
+ /* how many URBs will contain a period? */
+ urbs_per_period = DIV_ROUND_UP(max_packs_per_period,
+ max_packs_per_urb);
+ /* how many packets are needed in each URB? */
+ urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period);
+
+ /* limit the number of frames in a single URB */
+ ret = ret && (ep->max_urb_frames ==
+ DIV_ROUND_UP(frames_per_period, urbs_per_period));
+
+ /* try to use enough URBs to contain an entire ALSA buffer */
+ max_urbs = min((unsigned) MAX_URBS,
+ MAX_QUEUE * packs_per_ms / urb_packs);
+ ret = ret && (ep->nurbs == min(max_urbs,
+ urbs_per_period * periods_per_buffer));
+ }
+
+ ret = ret && (ep->datainterval == fmt->datainterval);
+ ret = ret && (ep->maxpacksize == fmt->maxpacksize);
+ ret = ret &&
+ (ep->fill_max == !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX));
+
+ return ret;
+}
+
+/*
* configure a data endpoint
*/
static int data_ep_set_params(struct snd_usb_endpoint *ep,
@@ -861,10 +1060,23 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
int err;
if (ep->use_count != 0) {
- usb_audio_warn(ep->chip,
- "Unable to change format on ep #%x: already in use\n",
- ep->ep_num);
- return -EBUSY;
+ bool check = ep->is_implicit_feedback &&
+ check_ep_params(ep, pcm_format,
+ channels, period_bytes,
+ period_frames, buffer_periods,
+ fmt, sync_ep);
+
+ if (!check) {
+ usb_audio_warn(ep->chip,
+ "Unable to change format on ep #%x: already in use\n",
+ ep->ep_num);
+ return -EBUSY;
+ }
+
+ usb_audio_dbg(ep->chip,
+ "Ep #%x already in use as implicit feedback but format not changed\n",
+ ep->ep_num);
+ return 0;
}
/* release old buffers, if any */
@@ -874,10 +1086,17 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
ep->maxpacksize = fmt->maxpacksize;
ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX);
- if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL)
+ if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) {
ep->freqn = get_usb_full_speed_rate(rate);
- else
+ ep->fps = 1000;
+ } else {
ep->freqn = get_usb_high_speed_rate(rate);
+ ep->fps = 8000;
+ }
+
+ ep->sample_rem = rate % ep->fps;
+ ep->framesize[0] = rate / ep->fps;
+ ep->framesize[1] = (rate + (ep->fps - 1)) / ep->fps;
/* calculate the frequency in 16.16 format */
ep->freqm = ep->freqn;
@@ -936,6 +1155,7 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
ep->active_mask = 0;
ep->unlink_mask = 0;
ep->phase = 0;
+ ep->sample_accum = 0;
snd_usb_endpoint_start_quirk(ep);
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index 63a39d4fa8d8..d23fa0a8c11b 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -28,6 +28,7 @@ void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_free(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
+int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep);
int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep);
void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 4f096685ed65..7629116f570e 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -550,6 +550,7 @@ static int line6_hwdep_open(struct snd_hwdep *hw, struct file *file)
/* NOTE: hwdep layer provides atomicity here */
line6->messages.active = 1;
+ line6->messages.nonblock = file->f_flags & O_NONBLOCK ? 1 : 0;
return 0;
}
@@ -579,6 +580,9 @@ line6_hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
while (kfifo_len(&line6->messages.fifo) == 0) {
mutex_unlock(&line6->messages.read_lock);
+ if (line6->messages.nonblock)
+ return -EAGAIN;
+
rv = wait_event_interruptible(
line6->messages.wait_queue,
kfifo_len(&line6->messages.fifo) != 0);
@@ -626,11 +630,27 @@ line6_hwdep_write(struct snd_hwdep *hwdep, const char __user *data, long count,
return rv;
}
+static __poll_t
+line6_hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
+{
+ __poll_t rv;
+ struct usb_line6 *line6 = hwdep->private_data;
+
+ poll_wait(file, &line6->messages.wait_queue, wait);
+
+ mutex_lock(&line6->messages.read_lock);
+ rv = kfifo_len(&line6->messages.fifo) == 0 ? 0 : EPOLLIN | EPOLLRDNORM;
+ mutex_unlock(&line6->messages.read_lock);
+
+ return rv;
+}
+
static const struct snd_hwdep_ops hwdep_ops = {
.open = line6_hwdep_open,
.release = line6_hwdep_release,
.read = line6_hwdep_read,
.write = line6_hwdep_write,
+ .poll = line6_hwdep_poll,
};
/* Insert into circular buffer */
diff --git a/sound/usb/line6/driver.h b/sound/usb/line6/driver.h
index e5e572ed5f30..1a4e3700c80c 100644
--- a/sound/usb/line6/driver.h
+++ b/sound/usb/line6/driver.h
@@ -163,6 +163,7 @@ struct usb_line6 {
struct mutex read_lock;
wait_queue_head_t wait_queue;
unsigned int active:1;
+ unsigned int nonblock:1;
STRUCT_KFIFO_REC_2(LINE6_BUFSIZE_LISTEN * LINE6_RAW_MESSAGES_MAXCOUNT)
fifo;
} messages;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index a5f65a9a0254..b6bcf2f92383 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -2185,6 +2185,421 @@ static int snd_rme_controls_create(struct usb_mixer_interface *mixer)
return 0;
}
+/*
+ * RME Babyface Pro (FS)
+ *
+ * These devices exposes a couple of DSP functions via request to EP0.
+ * Switches are available via control registers, while routing is controlled
+ * by controlling the volume on each possible crossing point.
+ * Volume control is linear, from -inf (dec. 0) to +6dB (dec. 65536) with
+ * 0dB being at dec. 32768.
+ */
+enum {
+ SND_BBFPRO_CTL_REG1 = 0,
+ SND_BBFPRO_CTL_REG2
+};
+
+#define SND_BBFPRO_CTL_REG_MASK 1
+#define SND_BBFPRO_CTL_IDX_MASK 0xff
+#define SND_BBFPRO_CTL_IDX_SHIFT 1
+#define SND_BBFPRO_CTL_VAL_MASK 1
+#define SND_BBFPRO_CTL_VAL_SHIFT 9
+#define SND_BBFPRO_CTL_REG1_CLK_MASTER 0
+#define SND_BBFPRO_CTL_REG1_CLK_OPTICAL 1
+#define SND_BBFPRO_CTL_REG1_SPDIF_PRO 7
+#define SND_BBFPRO_CTL_REG1_SPDIF_EMPH 8
+#define SND_BBFPRO_CTL_REG1_SPDIF_OPTICAL 10
+#define SND_BBFPRO_CTL_REG2_48V_AN1 0
+#define SND_BBFPRO_CTL_REG2_48V_AN2 1
+#define SND_BBFPRO_CTL_REG2_SENS_IN3 2
+#define SND_BBFPRO_CTL_REG2_SENS_IN4 3
+#define SND_BBFPRO_CTL_REG2_PAD_AN1 4
+#define SND_BBFPRO_CTL_REG2_PAD_AN2 5
+
+#define SND_BBFPRO_MIXER_IDX_MASK 0x1ff
+#define SND_BBFPRO_MIXER_VAL_MASK 0x3ffff
+#define SND_BBFPRO_MIXER_VAL_SHIFT 9
+#define SND_BBFPRO_MIXER_VAL_MIN 0 // -inf
+#define SND_BBFPRO_MIXER_VAL_MAX 65536 // +6dB
+
+#define SND_BBFPRO_USBREQ_CTL_REG1 0x10
+#define SND_BBFPRO_USBREQ_CTL_REG2 0x17
+#define SND_BBFPRO_USBREQ_MIXER 0x12
+
+static int snd_bbfpro_ctl_update(struct usb_mixer_interface *mixer, u8 reg,
+ u8 index, u8 value)
+{
+ int err;
+ u16 usb_req, usb_idx, usb_val;
+ struct snd_usb_audio *chip = mixer->chip;
+
+ err = snd_usb_lock_shutdown(chip);
+ if (err < 0)
+ return err;
+
+ if (reg == SND_BBFPRO_CTL_REG1) {
+ usb_req = SND_BBFPRO_USBREQ_CTL_REG1;
+ if (index == SND_BBFPRO_CTL_REG1_CLK_OPTICAL) {
+ usb_idx = 3;
+ usb_val = value ? 3 : 0;
+ } else {
+ usb_idx = 1 << index;
+ usb_val = value ? usb_idx : 0;
+ }
+ } else {
+ usb_req = SND_BBFPRO_USBREQ_CTL_REG2;
+ usb_idx = 1 << index;
+ usb_val = value ? usb_idx : 0;
+ }
+
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0), usb_req,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ usb_val, usb_idx, NULL, 0);
+
+ snd_usb_unlock_shutdown(chip);
+ return err;
+}
+
+static int snd_bbfpro_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u8 reg, idx, val;
+ int pv;
+
+ pv = kcontrol->private_value;
+ reg = pv & SND_BBFPRO_CTL_REG_MASK;
+ idx = (pv >> SND_BBFPRO_CTL_IDX_SHIFT) & SND_BBFPRO_CTL_IDX_MASK;
+ val = kcontrol->private_value >> SND_BBFPRO_CTL_VAL_SHIFT;
+
+ if ((reg == SND_BBFPRO_CTL_REG1 &&
+ idx == SND_BBFPRO_CTL_REG1_CLK_OPTICAL) ||
+ (reg == SND_BBFPRO_CTL_REG2 &&
+ (idx == SND_BBFPRO_CTL_REG2_SENS_IN3 ||
+ idx == SND_BBFPRO_CTL_REG2_SENS_IN4))) {
+ ucontrol->value.enumerated.item[0] = val;
+ } else {
+ ucontrol->value.integer.value[0] = val;
+ }
+ return 0;
+}
+
+static int snd_bbfpro_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ u8 reg, idx;
+ int pv;
+
+ pv = kcontrol->private_value;
+ reg = pv & SND_BBFPRO_CTL_REG_MASK;
+ idx = (pv >> SND_BBFPRO_CTL_IDX_SHIFT) & SND_BBFPRO_CTL_IDX_MASK;
+
+ if (reg == SND_BBFPRO_CTL_REG1 &&
+ idx == SND_BBFPRO_CTL_REG1_CLK_OPTICAL) {
+ static const char * const texts[2] = {
+ "AutoSync",
+ "Internal"
+ };
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
+ } else if (reg == SND_BBFPRO_CTL_REG2 &&
+ (idx == SND_BBFPRO_CTL_REG2_SENS_IN3 ||
+ idx == SND_BBFPRO_CTL_REG2_SENS_IN4)) {
+ static const char * const texts[2] = {
+ "-10dBV",
+ "+4dBu"
+ };
+ return snd_ctl_enum_info(uinfo, 1, 2, texts);
+ }
+
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ return 0;
+}
+
+static int snd_bbfpro_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int err;
+ u8 reg, idx;
+ int old_value, pv, val;
+
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ struct usb_mixer_interface *mixer = list->mixer;
+
+ pv = kcontrol->private_value;
+ reg = pv & SND_BBFPRO_CTL_REG_MASK;
+ idx = (pv >> SND_BBFPRO_CTL_IDX_SHIFT) & SND_BBFPRO_CTL_IDX_MASK;
+ old_value = (pv >> SND_BBFPRO_CTL_VAL_SHIFT) & SND_BBFPRO_CTL_VAL_MASK;
+
+ if ((reg == SND_BBFPRO_CTL_REG1 &&
+ idx == SND_BBFPRO_CTL_REG1_CLK_OPTICAL) ||
+ (reg == SND_BBFPRO_CTL_REG2 &&
+ (idx == SND_BBFPRO_CTL_REG2_SENS_IN3 ||
+ idx == SND_BBFPRO_CTL_REG2_SENS_IN4))) {
+ val = ucontrol->value.enumerated.item[0];
+ } else {
+ val = ucontrol->value.integer.value[0];
+ }
+
+ if (val > 1)
+ return -EINVAL;
+
+ if (val == old_value)
+ return 0;
+
+ kcontrol->private_value = reg
+ | ((idx & SND_BBFPRO_CTL_IDX_MASK) << SND_BBFPRO_CTL_IDX_SHIFT)
+ | ((val & SND_BBFPRO_CTL_VAL_MASK) << SND_BBFPRO_CTL_VAL_SHIFT);
+
+ err = snd_bbfpro_ctl_update(mixer, reg, idx, val);
+ return err < 0 ? err : 1;
+}
+
+static int snd_bbfpro_ctl_resume(struct usb_mixer_elem_list *list)
+{
+ u8 reg, idx;
+ int value, pv;
+
+ pv = list->kctl->private_value;
+ reg = pv & SND_BBFPRO_CTL_REG_MASK;
+ idx = (pv >> SND_BBFPRO_CTL_IDX_SHIFT) & SND_BBFPRO_CTL_IDX_MASK;
+ value = (pv >> SND_BBFPRO_CTL_VAL_SHIFT) & SND_BBFPRO_CTL_VAL_MASK;
+
+ return snd_bbfpro_ctl_update(list->mixer, reg, idx, value);
+}
+
+static int snd_bbfpro_vol_update(struct usb_mixer_interface *mixer, u16 index,
+ u32 value)
+{
+ struct snd_usb_audio *chip = mixer->chip;
+ int err;
+ u16 idx;
+ u16 usb_idx, usb_val;
+ u32 v;
+
+ err = snd_usb_lock_shutdown(chip);
+ if (err < 0)
+ return err;
+
+ idx = index & SND_BBFPRO_MIXER_IDX_MASK;
+ // 18 bit linear volume, split so 2 bits end up in index.
+ v = value & SND_BBFPRO_MIXER_VAL_MASK;
+ usb_idx = idx | (v & 0x3) << 14;
+ usb_val = (v >> 2) & 0xffff;
+
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0),
+ SND_BBFPRO_USBREQ_MIXER,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ usb_val, usb_idx, NULL, 0);
+
+ snd_usb_unlock_shutdown(chip);
+ return err;
+}
+
+static int snd_bbfpro_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] =
+ kcontrol->private_value >> SND_BBFPRO_MIXER_VAL_SHIFT;
+ return 0;
+}
+
+static int snd_bbfpro_vol_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = SND_BBFPRO_MIXER_VAL_MIN;
+ uinfo->value.integer.max = SND_BBFPRO_MIXER_VAL_MAX;
+ return 0;
+}
+
+static int snd_bbfpro_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int err;
+ u16 idx;
+ u32 new_val, old_value, uvalue;
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ struct usb_mixer_interface *mixer = list->mixer;
+
+ uvalue = ucontrol->value.integer.value[0];
+ idx = kcontrol->private_value & SND_BBFPRO_MIXER_IDX_MASK;
+ old_value = kcontrol->private_value >> SND_BBFPRO_MIXER_VAL_SHIFT;
+
+ if (uvalue > SND_BBFPRO_MIXER_VAL_MAX)
+ return -EINVAL;
+
+ if (uvalue == old_value)
+ return 0;
+
+ new_val = uvalue & SND_BBFPRO_MIXER_VAL_MASK;
+
+ kcontrol->private_value = idx
+ | (new_val << SND_BBFPRO_MIXER_VAL_SHIFT);
+
+ err = snd_bbfpro_vol_update(mixer, idx, new_val);
+ return err < 0 ? err : 1;
+}
+
+static int snd_bbfpro_vol_resume(struct usb_mixer_elem_list *list)
+{
+ int pv = list->kctl->private_value;
+ u16 idx = pv & SND_BBFPRO_MIXER_IDX_MASK;
+ u32 val = (pv >> SND_BBFPRO_MIXER_VAL_SHIFT)
+ & SND_BBFPRO_MIXER_VAL_MASK;
+ return snd_bbfpro_vol_update(list->mixer, idx, val);
+}
+
+// Predfine elements
+static const struct snd_kcontrol_new snd_bbfpro_ctl_control = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .index = 0,
+ .info = snd_bbfpro_ctl_info,
+ .get = snd_bbfpro_ctl_get,
+ .put = snd_bbfpro_ctl_put
+};
+
+static const struct snd_kcontrol_new snd_bbfpro_vol_control = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .index = 0,
+ .info = snd_bbfpro_vol_info,
+ .get = snd_bbfpro_vol_get,
+ .put = snd_bbfpro_vol_put
+};
+
+static int snd_bbfpro_ctl_add(struct usb_mixer_interface *mixer, u8 reg,
+ u8 index, char *name)
+{
+ struct snd_kcontrol_new knew = snd_bbfpro_ctl_control;
+
+ knew.name = name;
+ knew.private_value = (reg & SND_BBFPRO_CTL_REG_MASK)
+ | ((index & SND_BBFPRO_CTL_IDX_MASK)
+ << SND_BBFPRO_CTL_IDX_SHIFT);
+
+ return add_single_ctl_with_resume(mixer, 0, snd_bbfpro_ctl_resume,
+ &knew, NULL);
+}
+
+static int snd_bbfpro_vol_add(struct usb_mixer_interface *mixer, u16 index,
+ char *name)
+{
+ struct snd_kcontrol_new knew = snd_bbfpro_vol_control;
+
+ knew.name = name;
+ knew.private_value = index & SND_BBFPRO_MIXER_IDX_MASK;
+
+ return add_single_ctl_with_resume(mixer, 0, snd_bbfpro_vol_resume,
+ &knew, NULL);
+}
+
+static int snd_bbfpro_controls_create(struct usb_mixer_interface *mixer)
+{
+ int err, i, o;
+ char name[48];
+
+ static const char * const input[] = {
+ "AN1", "AN2", "IN3", "IN4", "AS1", "AS2", "ADAT3",
+ "ADAT4", "ADAT5", "ADAT6", "ADAT7", "ADAT8"};
+
+ static const char * const output[] = {
+ "AN1", "AN2", "PH3", "PH4", "AS1", "AS2", "ADAT3", "ADAT4",
+ "ADAT5", "ADAT6", "ADAT7", "ADAT8"};
+
+ for (o = 0 ; o < 12 ; ++o) {
+ for (i = 0 ; i < 12 ; ++i) {
+ // Line routing
+ snprintf(name, sizeof(name),
+ "%s-%s-%s Playback Volume",
+ (i < 2 ? "Mic" : "Line"),
+ input[i], output[o]);
+ err = snd_bbfpro_vol_add(mixer, (26 * o + i), name);
+ if (err < 0)
+ return err;
+
+ // PCM routing... yes, it is output remapping
+ snprintf(name, sizeof(name),
+ "PCM-%s-%s Playback Volume",
+ output[i], output[o]);
+ err = snd_bbfpro_vol_add(mixer, (26 * o + 12 + i),
+ name);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ // Control Reg 1
+ err = snd_bbfpro_ctl_add(mixer, SND_BBFPRO_CTL_REG1,
+ SND_BBFPRO_CTL_REG1_CLK_OPTICAL,
+ "Sample Clock Source");
+ if (err < 0)
+ return err;
+
+ err = snd_bbfpro_ctl_add(mixer, SND_BBFPRO_CTL_REG1,
+ SND_BBFPRO_CTL_REG1_SPDIF_PRO,
+ "IEC958 Pro Mask");
+ if (err < 0)
+ return err;
+
+ err = snd_bbfpro_ctl_add(mixer, SND_BBFPRO_CTL_REG1,
+ SND_BBFPRO_CTL_REG1_SPDIF_EMPH,
+ "IEC958 Emphasis");
+ if (err < 0)
+ return err;
+
+ err = snd_bbfpro_ctl_add(mixer, SND_BBFPRO_CTL_REG1,
+ SND_BBFPRO_CTL_REG1_SPDIF_OPTICAL,
+ "IEC958 Switch");
+ if (err < 0)
+ return err;
+
+ // Control Reg 2
+ err = snd_bbfpro_ctl_add(mixer, SND_BBFPRO_CTL_REG2,
+ SND_BBFPRO_CTL_REG2_48V_AN1,
+ "Mic-AN1 48V");
+ if (err < 0)
+ return err;
+
+ err = snd_bbfpro_ctl_add(mixer, SND_BBFPRO_CTL_REG2,
+ SND_BBFPRO_CTL_REG2_48V_AN2,
+ "Mic-AN2 48V");
+ if (err < 0)
+ return err;
+
+ err = snd_bbfpro_ctl_add(mixer, SND_BBFPRO_CTL_REG2,
+ SND_BBFPRO_CTL_REG2_SENS_IN3,
+ "Line-IN3 Sens.");
+ if (err < 0)
+ return err;
+
+ err = snd_bbfpro_ctl_add(mixer, SND_BBFPRO_CTL_REG2,
+ SND_BBFPRO_CTL_REG2_SENS_IN4,
+ "Line-IN4 Sens.");
+ if (err < 0)
+ return err;
+
+ err = snd_bbfpro_ctl_add(mixer, SND_BBFPRO_CTL_REG2,
+ SND_BBFPRO_CTL_REG2_PAD_AN1,
+ "Mic-AN1 PAD");
+ if (err < 0)
+ return err;
+
+ err = snd_bbfpro_ctl_add(mixer, SND_BBFPRO_CTL_REG2,
+ SND_BBFPRO_CTL_REG2_PAD_AN2,
+ "Mic-AN2 PAD");
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
{
int err = 0;
@@ -2286,6 +2701,9 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x0194f, 0x010c): /* Presonus Studio 1810c */
err = snd_sc1810_init_mixer(mixer);
break;
+ case USB_ID(0x2a39, 0x3fb0): /* RME Babyface Pro FS */
+ err = snd_bbfpro_controls_create(mixer);
+ break;
}
return err;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index a4e4064f9aee..8a05dcb1344f 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -370,6 +370,10 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
ep = 0x81;
ifnum = 2;
goto add_sync_ep_from_ifnum;
+ case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
+ ep = 0x82;
+ ifnum = 0;
+ goto add_sync_ep_from_ifnum;
case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
/* BOSS Katana amplifiers do not need quirks */
return 0;
@@ -404,6 +408,8 @@ add_sync_ep:
if (!subs->sync_endpoint)
return -EINVAL;
+ subs->sync_endpoint->is_implicit_feedback = 1;
+
subs->data_endpoint->sync_master = subs->sync_endpoint;
return 1;
@@ -502,12 +508,15 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
implicit_fb ?
SND_USB_ENDPOINT_TYPE_DATA :
SND_USB_ENDPOINT_TYPE_SYNC);
+
if (!subs->sync_endpoint) {
if (is_playback && attr == USB_ENDPOINT_SYNC_NONE)
return 0;
return -EINVAL;
}
+ subs->sync_endpoint->is_implicit_feedback = implicit_fb;
+
subs->data_endpoint->sync_master = subs->sync_endpoint;
return 0;
@@ -1579,6 +1588,8 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
for (i = 0; i < ctx->packets; i++) {
if (ctx->packet_size[i])
counts = ctx->packet_size[i];
+ else if (ep->sync_master)
+ counts = snd_usb_endpoint_slave_next_packet_size(ep);
else
counts = snd_usb_endpoint_next_packet_size(ep);
diff --git a/sound/usb/proc.c b/sound/usb/proc.c
index 4174ad11fca6..889c550c9f29 100644
--- a/sound/usb/proc.c
+++ b/sound/usb/proc.c
@@ -54,6 +54,38 @@ void snd_usb_audio_create_proc(struct snd_usb_audio *chip)
proc_audio_usbid_read);
}
+static const char * const channel_labels[] = {
+ [SNDRV_CHMAP_NA] = "N/A",
+ [SNDRV_CHMAP_MONO] = "MONO",
+ [SNDRV_CHMAP_FL] = "FL",
+ [SNDRV_CHMAP_FR] = "FR",
+ [SNDRV_CHMAP_FC] = "FC",
+ [SNDRV_CHMAP_LFE] = "LFE",
+ [SNDRV_CHMAP_RL] = "RL",
+ [SNDRV_CHMAP_RR] = "RR",
+ [SNDRV_CHMAP_FLC] = "FLC",
+ [SNDRV_CHMAP_FRC] = "FRC",
+ [SNDRV_CHMAP_RC] = "RC",
+ [SNDRV_CHMAP_SL] = "SL",
+ [SNDRV_CHMAP_SR] = "SR",
+ [SNDRV_CHMAP_TC] = "TC",
+ [SNDRV_CHMAP_TFL] = "TFL",
+ [SNDRV_CHMAP_TFC] = "TFC",
+ [SNDRV_CHMAP_TFR] = "TFR",
+ [SNDRV_CHMAP_TRL] = "TRL",
+ [SNDRV_CHMAP_TRC] = "TRC",
+ [SNDRV_CHMAP_TRR] = "TRR",
+ [SNDRV_CHMAP_TFLC] = "TFLC",
+ [SNDRV_CHMAP_TFRC] = "TFRC",
+ [SNDRV_CHMAP_LLFE] = "LLFE",
+ [SNDRV_CHMAP_RLFE] = "RLFE",
+ [SNDRV_CHMAP_TSL] = "TSL",
+ [SNDRV_CHMAP_TSR] = "TSR",
+ [SNDRV_CHMAP_BC] = "BC",
+ [SNDRV_CHMAP_RLC] = "RLC",
+ [SNDRV_CHMAP_RRC] = "RRC",
+};
+
/*
* proc interface for list the supported pcm formats
*/
@@ -97,6 +129,27 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s
snd_iprintf(buffer, " Data packet interval: %d us\n",
125 * (1 << fp->datainterval));
snd_iprintf(buffer, " Bits: %d\n", fp->fmt_bits);
+
+ if (fp->dsd_raw)
+ snd_iprintf(buffer, " DSD raw: DOP=%d, bitrev=%d\n",
+ fp->dsd_dop, fp->dsd_bitrev);
+
+ if (fp->chmap) {
+ const struct snd_pcm_chmap_elem *map = fp->chmap;
+ int c;
+
+ snd_iprintf(buffer, " Channel map:");
+ for (c = 0; c < map->channels; c++) {
+ if (map->map[c] >= ARRAY_SIZE(channel_labels) ||
+ !channel_labels[map->map[c]])
+ snd_iprintf(buffer, " --");
+ else
+ snd_iprintf(buffer, " %s",
+ channel_labels[map->map[c]]);
+ }
+ snd_iprintf(buffer, "\n");
+ }
+
// snd_iprintf(buffer, " Max Packet Size = %d\n", fp->maxpacksize);
// snd_iprintf(buffer, " EP Attribute = %#x\n", fp->attributes);
}
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index eb89902a83be..4ec491011b19 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -25,6 +25,33 @@
.idProduct = prod, \
.bInterfaceClass = USB_CLASS_VENDOR_SPEC
+#define QUIRK_RENAME_DEVICE(_vendor, _device) \
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { \
+ .vendor_name = _vendor, \
+ .product_name = _device, \
+ .ifnum = QUIRK_NO_INTERFACE \
+ }
+
+#define QUIRK_DEVICE_PROFILE(_vendor, _device, _profile) \
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { \
+ .vendor_name = _vendor, \
+ .product_name = _device, \
+ .profile_name = _profile, \
+ .ifnum = QUIRK_NO_INTERFACE \
+ }
+
+/* HP Thunderbolt Dock Audio Headset */
+{
+ USB_DEVICE(0x03f0, 0x0269),
+ QUIRK_DEVICE_PROFILE("HP", "Thunderbolt Dock Audio Headset",
+ "HP-Thunderbolt-Dock-Audio-Headset"),
+},
+/* HP Thunderbolt Dock Audio Module */
+{
+ USB_DEVICE(0x03f0, 0x0567),
+ QUIRK_DEVICE_PROFILE("HP", "Thunderbolt Dock Audio Module",
+ "HP-Thunderbolt-Dock-Audio-Module"),
+},
/* FTDI devices */
{
USB_DEVICE(0x0403, 0xb8d8),
@@ -61,20 +88,12 @@
/* Creative/E-Mu devices */
{
USB_DEVICE(0x041e, 0x3010),
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "Creative Labs",
- .product_name = "Sound Blaster MP3+",
- .ifnum = QUIRK_NO_INTERFACE
- }
+ QUIRK_RENAME_DEVICE("Creative Labs", "Sound Blaster MP3+")
},
/* Creative/Toshiba Multimedia Center SB-0500 */
{
USB_DEVICE(0x041e, 0x3048),
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "Toshiba",
- .product_name = "SB-0500",
- .ifnum = QUIRK_NO_INTERFACE
- }
+ QUIRK_RENAME_DEVICE("Toshiba", "SB-0500")
},
{
/* E-Mu 0202 USB */
@@ -207,11 +226,7 @@
.idProduct = 0x0990,
.bInterfaceClass = USB_CLASS_AUDIO,
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "Logitech, Inc.",
- .product_name = "QuickCam Pro 9000",
- .ifnum = QUIRK_NO_INTERFACE
- }
+ QUIRK_RENAME_DEVICE("Logitech, Inc.", "QuickCam Pro 9000")
},
/*
@@ -2596,11 +2611,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
},
{
USB_DEVICE(0x0ccd, 0x0028),
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "TerraTec",
- .product_name = "Aureon5.1MkII",
- .ifnum = QUIRK_NO_INTERFACE
- }
+ QUIRK_RENAME_DEVICE("TerraTec", "Aureon5.1MkII")
},
{
USB_DEVICE(0x0ccd, 0x0035),
@@ -2615,19 +2626,11 @@ YAMAHA_DEVICE(0x7010, "UB99"),
/* Stanton/N2IT Final Scratch v1 device ('Scratchamp') */
{
USB_DEVICE(0x103d, 0x0100),
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "Stanton",
- .product_name = "ScratchAmp",
- .ifnum = QUIRK_NO_INTERFACE
- }
+ QUIRK_RENAME_DEVICE("Stanton", "ScratchAmp")
},
{
USB_DEVICE(0x103d, 0x0101),
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "Stanton",
- .product_name = "ScratchAmp",
- .ifnum = QUIRK_NO_INTERFACE
- }
+ QUIRK_RENAME_DEVICE("Stanton", "ScratchAmp")
},
/* Novation EMS devices */
@@ -2788,11 +2791,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
{
/* aka. Serato Scratch Live DJ Box */
USB_DEVICE(0x13e5, 0x0001),
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "Rane",
- .product_name = "SL-1",
- .ifnum = QUIRK_NO_INTERFACE
- }
+ QUIRK_RENAME_DEVICE("Rane", "SL-1")
},
/* Native Instruments MK2 series */
@@ -3259,10 +3258,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
* is also used by the CM106 based cards, so make it unique.
*/
USB_DEVICE(0x0d8c, 0x0103),
- .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
- .product_name = "Audio Advantage MicroII",
- .ifnum = QUIRK_NO_INTERFACE
- }
+ QUIRK_RENAME_DEVICE(NULL, "Audio Advantage MicroII")
},
/* disabled due to regression for other devices;
@@ -3368,12 +3364,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
/* Dell WD15 Dock */
{
USB_DEVICE(0x0bda, 0x4014),
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "Dell",
- .product_name = "WD15 Dock",
- .profile_name = "Dell-WD15-Dock",
- .ifnum = QUIRK_NO_INTERFACE
- }
+ QUIRK_DEVICE_PROFILE("Dell", "WD15 Dock", "Dell-WD15-Dock")
},
/* Dell WD19 Dock */
{
@@ -3553,12 +3544,8 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
#define ALC1220_VB_DESKTOP(vend, prod) { \
USB_DEVICE(vend, prod), \
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { \
- .vendor_name = "Realtek", \
- .product_name = "ALC1220-VB-DT", \
- .profile_name = "Realtek-ALC1220-VB-Desktop", \
- .ifnum = QUIRK_NO_INTERFACE \
- } \
+ QUIRK_DEVICE_PROFILE("Realtek", "ALC1220-VB-DT", \
+ "Realtek-ALC1220-VB-Desktop") \
}
ALC1220_VB_DESKTOP(0x0414, 0xa002), /* Gigabyte TRX40 Aorus Pro WiFi */
ALC1220_VB_DESKTOP(0x0db0, 0x0d64), /* MSI TRX40 Creator */
@@ -3574,20 +3561,75 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
*/
{
USB_DEVICE(0x0414, 0xa000),
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "Gigabyte",
- .product_name = "Aorus Master Front Headphone",
- .profile_name = "Gigabyte-Aorus-Master-Front-Headphone",
- .ifnum = QUIRK_NO_INTERFACE
- }
+ QUIRK_DEVICE_PROFILE("Gigabyte", "Aorus Master Front Headphone",
+ "Gigabyte-Aorus-Master-Front-Headphone")
},
{
USB_DEVICE(0x0414, 0xa001),
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "Gigabyte",
- .product_name = "Aorus Master Main Audio",
- .profile_name = "Gigabyte-Aorus-Master-Main-Audio",
- .ifnum = QUIRK_NO_INTERFACE
+ QUIRK_DEVICE_PROFILE("Gigabyte", "Aorus Master Main Audio",
+ "Gigabyte-Aorus-Master-Main-Audio")
+},
+{
+ /*
+ * Pioneer DJ DJM-900NXS2
+ * 10 channels playback & 12 channels capture @ 44.1/48/96kHz S24LE
+ */
+ USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000a),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 10,
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .endpoint = 0x01,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC|
+ USB_ENDPOINT_SYNC_ASYNC,
+ .rates = SNDRV_PCM_RATE_44100|
+ SNDRV_PCM_RATE_48000|
+ SNDRV_PCM_RATE_96000,
+ .rate_min = 44100,
+ .rate_max = 96000,
+ .nr_rates = 3,
+ .rate_table = (unsigned int[]) {
+ 44100, 48000, 96000
+ }
+ }
+ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 12,
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .endpoint = 0x82,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC|
+ USB_ENDPOINT_SYNC_ASYNC|
+ USB_ENDPOINT_USAGE_IMPLICIT_FB,
+ .rates = SNDRV_PCM_RATE_44100|
+ SNDRV_PCM_RATE_48000|
+ SNDRV_PCM_RATE_96000,
+ .rate_min = 44100,
+ .rate_max = 96000,
+ .nr_rates = 3,
+ .rate_table = (unsigned int[]) {
+ 44100, 48000, 96000
+ }
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
}
},
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index d8a765be5dfe..bca0179a0ef8 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1458,6 +1458,30 @@ static void set_format_emu_quirk(struct snd_usb_substream *subs,
subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
}
+
+/*
+ * Pioneer DJ DJM-900NXS2
+ * Device needs to know the sample rate each time substream is started
+ */
+static int pioneer_djm_set_format_quirk(struct snd_usb_substream *subs)
+{
+
+ /* Convert sample rate value to little endian */
+ u8 sr[3];
+
+ sr[0] = subs->cur_rate & 0xff;
+ sr[1] = (subs->cur_rate >> 8) & 0xff;
+ sr[2] = (subs->cur_rate >> 16) & 0xff;
+
+ /* Configure device */
+ usb_set_interface(subs->dev, 0, 1);
+ snd_usb_ctl_msg(subs->stream->chip->dev,
+ usb_rcvctrlpipe(subs->stream->chip->dev, 0),
+ 0x01, 0x22, 0x0100, 0x0082, &sr, 0x0003);
+
+ return 0;
+}
+
void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
struct audioformat *fmt)
{
@@ -1468,6 +1492,9 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
set_format_emu_quirk(subs, fmt);
break;
+ case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
+ pioneer_djm_set_format_quirk(subs);
+ break;
}
}
@@ -1807,20 +1834,6 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
*/
fp->attributes &= ~UAC_EP_CS_ATTR_FILL_MAX;
break;
- case USB_ID(0x1235, 0x8200): /* Focusrite Scarlett 2i4 2nd gen */
- case USB_ID(0x1235, 0x8202): /* Focusrite Scarlett 2i2 2nd gen */
- case USB_ID(0x1235, 0x8205): /* Focusrite Scarlett Solo 2nd gen */
- /*
- * Reports that playback should use Synch: Synchronous
- * while still providing a feedback endpoint.
- * Synchronous causes snapping on some sample rates.
- * Force it to use Synch: Asynchronous.
- */
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- fp->ep_attr &= ~USB_ENDPOINT_SYNCTYPE;
- fp->ep_attr |= USB_ENDPOINT_SYNC_ASYNC;
- }
- break;
}
}
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 1c892c7f14d7..b91c4c0807ec 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -19,14 +19,16 @@
struct media_device;
struct media_intf_devnode;
+#define MAX_CARD_INTERFACES 16
+
struct snd_usb_audio {
int index;
struct usb_device *dev;
struct snd_card *card;
- struct usb_interface *pm_intf;
+ struct usb_interface *intf[MAX_CARD_INTERFACES];
u32 usb_id;
struct mutex mutex;
- unsigned int autosuspended:1;
+ unsigned int system_suspend;
atomic_t active;
atomic_t shutdown;
atomic_t usage_count;
diff --git a/sound/usb/usx2y/usbusx2y.h b/sound/usb/usx2y/usbusx2y.h
index e0f77172ce8f..144b85f57bd2 100644
--- a/sound/usb/usx2y/usbusx2y.h
+++ b/sound/usb/usx2y/usbusx2y.h
@@ -18,7 +18,7 @@ struct snd_usX2Y_AsyncSeq {
struct snd_usX2Y_urbSeq {
int submitted;
int len;
- struct urb *urb[0];
+ struct urb *urb[];
};
#include "usx2yhwdeppcm.h"
diff --git a/tools/arch/sh/include/asm/barrier.h b/tools/arch/sh/include/asm/barrier.h
index bde5221af282..7eaea27cdd67 100644
--- a/tools/arch/sh/include/asm/barrier.h
+++ b/tools/arch/sh/include/asm/barrier.h
@@ -22,7 +22,7 @@
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
-#if defined(__SH4A__) || defined(__SH5__)
+#if defined(__SH4A__)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() mb()
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 12c9684d59ba..ef452b817f44 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -301,6 +301,9 @@
#define MSR_PP1_ENERGY_STATUS 0x00000641
#define MSR_PP1_POLICY 0x00000642
+#define MSR_AMD_PKG_ENERGY_STATUS 0xc001029b
+#define MSR_AMD_RAPL_POWER_UNIT 0xc0010299
+
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL_1 0x00000649
diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
index 81b350ffd03f..eff16b77d5eb 100755
--- a/tools/bootconfig/test-bootconfig.sh
+++ b/tools/bootconfig/test-bootconfig.sh
@@ -124,9 +124,16 @@ for i in samples/good-* ; do
xpass $BOOTCONF -a $i $INITRD
done
+
+echo
+echo "=== Summary ==="
+echo "# of Passed: $(expr $NO - $NG - 1)"
+echo "# of Failed: $NG"
+
echo
if [ $NG -eq 0 ]; then
echo "All tests passed"
else
echo "$NG tests failed"
+ exit 1
fi
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 3abd4316cd4f..cb152370fdef 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -40,7 +40,6 @@ FEATURE_TESTS_BASIC := \
glibc \
gtk2 \
gtk2-infobar \
- libaudit \
libbfd \
libcap \
libelf \
@@ -112,7 +111,6 @@ FEATURE_DISPLAY ?= \
dwarf_getlocations \
glibc \
gtk2 \
- libaudit \
libbfd \
libcap \
libelf \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 84f845b9627d..b1f0321180f5 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -91,7 +91,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(
###############################
$(OUTPUT)test-all.bin:
- $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma
+ $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma
$(OUTPUT)test-hello.bin:
$(BUILD)
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 88145e8cde1a..5479e543b194 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -74,10 +74,6 @@
# include "test-libunwind.c"
#undef main
-#define main main_test_libaudit
-# include "test-libaudit.c"
-#undef main
-
#define main main_test_libslang
# include "test-libslang.c"
#undef main
@@ -208,7 +204,6 @@ int main(int argc, char *argv[])
main_test_libelf_gelf_getnote();
main_test_libelf_getshdrstrndx();
main_test_libunwind();
- main_test_libaudit();
main_test_libslang();
main_test_gtk2(argc, argv);
main_test_gtk2_infobar(argc, argv);
diff --git a/tools/gpio/lsgpio.c b/tools/gpio/lsgpio.c
index e1430f504c13..8a71ad36f83b 100644
--- a/tools/gpio/lsgpio.c
+++ b/tools/gpio/lsgpio.c
@@ -49,6 +49,18 @@ struct gpio_flag flagnames[] = {
.name = "open-source",
.mask = GPIOLINE_FLAG_OPEN_SOURCE,
},
+ {
+ .name = "pull-up",
+ .mask = GPIOLINE_FLAG_BIAS_PULL_UP,
+ },
+ {
+ .name = "pull-down",
+ .mask = GPIOLINE_FLAG_BIAS_PULL_DOWN,
+ },
+ {
+ .name = "bias-disabled",
+ .mask = GPIOLINE_FLAG_BIAS_DISABLE,
+ },
};
void print_flags(unsigned long flags)
diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h
index 95c072b70d0e..b9d4322e1e65 100644
--- a/tools/include/linux/compiler-gcc.h
+++ b/tools/include/linux/compiler-gcc.h
@@ -27,6 +27,18 @@
#define __pure __attribute__((pure))
#endif
#define noinline __attribute__((noinline))
+#ifdef __has_attribute
+#if __has_attribute(disable_tail_calls)
+#define __no_tail_call __attribute__((disable_tail_calls))
+#endif
+#endif
+#ifndef __no_tail_call
+#if GCC_VERSION > 40201
+#define __no_tail_call __attribute__((optimize("no-optimize-sibling-calls")))
+#else
+#define __no_tail_call
+#endif
+#endif
#ifndef __packed
#define __packed __attribute__((packed))
#endif
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 180f7714a5f1..9f9002734e19 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -47,6 +47,9 @@
#ifndef noinline
#define noinline
#endif
+#ifndef __no_tail_call
+#define __no_tail_call
+#endif
/* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type
diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h
index 89ca6fe257cc..efb6c3f5f2a9 100644
--- a/tools/include/linux/kallsyms.h
+++ b/tools/include/linux/kallsyms.h
@@ -20,7 +20,7 @@ static inline const char *kallsyms_lookup(unsigned long addr,
#include <execinfo.h>
#include <stdlib.h>
-static inline void print_ip_sym(unsigned long ip)
+static inline void print_ip_sym(const char *loglvl, unsigned long ip)
{
char **name;
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 63d65a702900..5fbb90a80d23 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -505,6 +505,28 @@ static const char *uaccess_safe_builtin[] = {
"__asan_report_store4_noabort",
"__asan_report_store8_noabort",
"__asan_report_store16_noabort",
+ /* KCSAN */
+ "__kcsan_check_access",
+ "kcsan_found_watchpoint",
+ "kcsan_setup_watchpoint",
+ "kcsan_check_scoped_accesses",
+ "kcsan_disable_current",
+ "kcsan_enable_current_nowarn",
+ /* KCSAN/TSAN */
+ "__tsan_func_entry",
+ "__tsan_func_exit",
+ "__tsan_read_range",
+ "__tsan_write_range",
+ "__tsan_read1",
+ "__tsan_read2",
+ "__tsan_read4",
+ "__tsan_read8",
+ "__tsan_read16",
+ "__tsan_write1",
+ "__tsan_write2",
+ "__tsan_write4",
+ "__tsan_write8",
+ "__tsan_write16",
/* KCOV */
"write_comp_data",
"check_kcov_mode",
diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt
index 271484754fee..e817179c5027 100644
--- a/tools/perf/Documentation/itrace.txt
+++ b/tools/perf/Documentation/itrace.txt
@@ -1,5 +1,5 @@
i synthesize instructions events
- b synthesize branches events
+ b synthesize branches events (branch misses for Arm SPE)
c synthesize branches events (calls only)
r synthesize branches events (returns only)
x synthesize transactions events
@@ -9,6 +9,10 @@
of aux-output (refer to perf record)
e synthesize error events
d create a debug log
+ f synthesize first level cache events
+ m synthesize last level cache events
+ t synthesize TLB events
+ a synthesize remote access events
g synthesize a call chain (use with i or x)
G synthesize a call chain on existing event records
l synthesize last branch entries (use with i or x)
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index 2133eb320cb0..98efdab5fbd4 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -40,7 +40,7 @@ RECORD OPTIONS
--------------
-e::
--event=::
- Select the PMU event. Use 'perf mem record -e list'
+ Select the PMU event. Use 'perf c2c record -e list'
to list available events.
-v::
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index f16d8a71d3f5..c7d3df5798e2 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -667,6 +667,11 @@ convert.*::
Limit the size of ordered_events queue, so we could control
allocation size of perf data files without proper finished
round events.
+stat.*::
+
+ stat.big-num::
+ (boolean) Change the default for "--big-num". To make
+ "--no-big-num" the default, set "stat.big-num=false".
intel-pt.*::
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index eb8b7d42591a..f4cd49a7fcdb 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -687,7 +687,7 @@ The v4.2 kernel introduced support for a context switch metadata event,
PERF_RECORD_SWITCH, which allows unprivileged users to see when their processes
are scheduled out and in, just not by whom, which is left for the
PERF_RECORD_SWITCH_CPU_WIDE, that is only accessible in system wide context,
-which in turn requires CAP_SYS_ADMIN.
+which in turn requires CAP_PERFMON or CAP_SYS_ADMIN.
Please see the 45ac1403f564 ("perf: Add PERF_RECORD_SWITCH to indicate context
switches") commit, that introduces these metadata events for further info.
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 561ef55743e2..fa8a5fcd27ab 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -458,7 +458,9 @@ This option sets the time out limit. The default value is 500 ms.
--switch-events::
Record context switch events i.e. events of type PERF_RECORD_SWITCH or
-PERF_RECORD_SWITCH_CPU_WIDE.
+PERF_RECORD_SWITCH_CPU_WIDE. In some cases (e.g. Intel PT or CoreSight)
+switch events will be enabled automatically, which can be suppressed by
+by the option --no-switch-events.
--clang-path=PATH::
Path to clang binary to use for compiling BPF scriptlets.
@@ -613,6 +615,17 @@ appended unit character - B/K/M/G
The number of threads to run when synthesizing events for existing processes.
By default, the number of threads equals 1.
+ifdef::HAVE_LIBPFM[]
+--pfm-events events::
+Select a PMU event using libpfm4 syntax (see http://perfmon2.sf.net)
+including support for event filters. For example '--pfm-events
+inst_retired:any_p:u:c=1:i'. More than one event can be passed to the
+option using the comma separator. Hardware events and generic hardware
+events cannot be mixed together. The latter must be used with the -e
+option. The -e option and this one can be mixed and matched. Events
+can be grouped using the {} notation.
+endif::HAVE_LIBPFM[]
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1], linkperf:perf-intel-pt[1]
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 3fb5028aef08..b029ee728a0b 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -71,6 +71,16 @@ report::
--tid=<tid>::
stat events on existing thread id (comma separated list)
+ifdef::HAVE_LIBPFM[]
+--pfm-events events::
+Select a PMU event using libpfm4 syntax (see http://perfmon2.sf.net)
+including support for event filters. For example '--pfm-events
+inst_retired:any_p:u:c=1:i'. More than one event can be passed to the
+option using the comma separator. Hardware events and generic hardware
+events cannot be mixed together. The latter must be used with the -e
+option. The -e option and this one can be mixed and matched. Events
+can be grouped using the {} notation.
+endif::HAVE_LIBPFM[]
-a::
--all-cpus::
@@ -93,7 +103,9 @@ report::
-B::
--big-num::
- print large numbers with thousands' separators according to locale
+ print large numbers with thousands' separators according to locale.
+ Enabled by default. Use "--no-big-num" to disable.
+ Default setting can be changed with "perf config stat.big-num=false".
-C::
--cpu=::
@@ -234,6 +246,25 @@ filter out the startup phase of the program, which is often very different.
Print statistics of transactional execution if supported.
+--metric-no-group::
+By default, events to compute a metric are placed in weak groups. The
+group tries to enforce scheduling all or none of the events. The
+--metric-no-group option places events outside of groups and may
+increase the chance of the event being scheduled - leading to more
+accuracy. However, as events may not be scheduled together accuracy
+for metrics like instructions per cycle can be lower - as both metrics
+may no longer be being measured at the same time.
+
+--metric-no-merge::
+By default metric events in different weak groups can be shared if one
+group contains all the events needed by another. In such cases one
+group will be eliminated reducing event multiplexing and making it so
+that certain groups of metrics sum to 100%. A downside to sharing a
+group is that the group may require multiplexing and so accuracy for a
+small group that need not have multiplexing is lowered. This option
+forbids the event merging logic from sharing events between groups and
+may be used to increase accuracy in this case.
+
STAT RECORD
-----------
Stores stat data into perf data file.
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 20227dabc208..ee2024691d46 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -329,6 +329,17 @@ Default is to monitor all CPUS.
The known limitations include exception handing such as
setjmp/longjmp will have calls/returns not match.
+ifdef::HAVE_LIBPFM[]
+--pfm-events events::
+Select a PMU event using libpfm4 syntax (see http://perfmon2.sf.net)
+including support for event filters. For example '--pfm-events
+inst_retired:any_p:u:c=1:i'. More than one event can be passed to the
+option using the comma separator. Hardware events and generic hardware
+events cannot be mixed together. The latter must be used with the -e
+option. The -e option and this one can be mixed and matched. Events
+can be grouped using the {} notation.
+endif::HAVE_LIBPFM[]
+
INTERACTIVE PROMPTING KEYS
--------------------------
diff --git a/tools/perf/Documentation/security.txt b/tools/perf/Documentation/security.txt
new file mode 100644
index 000000000000..4fe3b8b1958f
--- /dev/null
+++ b/tools/perf/Documentation/security.txt
@@ -0,0 +1,237 @@
+Overview
+========
+
+For general security related questions of perf_event_open() syscall usage,
+performance monitoring and observability operations by Perf see here:
+https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html
+
+Enabling LSM based mandatory access control (MAC) to perf_event_open() syscall
+==============================================================================
+
+LSM hooks for mandatory access control for perf_event_open() syscall can be
+used starting from Linux v5.3. Below are the steps to extend Fedora (v31) with
+Targeted policy with perf_event_open() access control capabilities:
+
+1. Download selinux-policy SRPM package (e.g. selinux-policy-3.14.4-48.fc31.src.rpm on FC31)
+ and install it so rpmbuild directory would exist in the current working directory:
+
+ # rpm -Uhv selinux-policy-3.14.4-48.fc31.src.rpm
+
+2. Get into rpmbuild/SPECS directory and unpack the source code:
+
+ # rpmbuild -bp selinux-policy.spec
+
+3. Place patch below at rpmbuild/BUILD/selinux-policy-b86eaaf4dbcf2d51dd4432df7185c0eaf3cbcc02
+ directory and apply it:
+
+ # patch -p1 < selinux-policy-perf-events-perfmon.patch
+ patching file policy/flask/access_vectors
+ patching file policy/flask/security_classes
+ # cat selinux-policy-perf-events-perfmon.patch
+diff -Nura a/policy/flask/access_vectors b/policy/flask/access_vectors
+--- a/policy/flask/access_vectors 2020-02-04 18:19:53.000000000 +0300
++++ b/policy/flask/access_vectors 2020-02-28 23:37:25.000000000 +0300
+@@ -174,6 +174,7 @@
+ wake_alarm
+ block_suspend
+ audit_read
++ perfmon
+ }
+
+ #
+@@ -1099,3 +1100,15 @@
+
+ class xdp_socket
+ inherits socket
++
++class perf_event
++{
++ open
++ cpu
++ kernel
++ tracepoint
++ read
++ write
++}
++
++
+diff -Nura a/policy/flask/security_classes b/policy/flask/security_classes
+--- a/policy/flask/security_classes 2020-02-04 18:19:53.000000000 +0300
++++ b/policy/flask/security_classes 2020-02-28 21:35:17.000000000 +0300
+@@ -200,4 +200,6 @@
+
+ class xdp_socket
+
++class perf_event
++
+ # FLASK
+
+4. Get into rpmbuild/SPECS directory and build policy packages from patched sources:
+
+ # rpmbuild --noclean --noprep -ba selinux-policy.spec
+
+ so you have this:
+
+ # ls -alh rpmbuild/RPMS/noarch/
+ total 33M
+ drwxr-xr-x. 2 root root 4.0K Mar 20 12:16 .
+ drwxr-xr-x. 3 root root 4.0K Mar 20 12:16 ..
+ -rw-r--r--. 1 root root 112K Mar 20 12:16 selinux-policy-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 1.2M Mar 20 12:17 selinux-policy-devel-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 2.3M Mar 20 12:17 selinux-policy-doc-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 12M Mar 20 12:17 selinux-policy-minimum-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 4.5M Mar 20 12:16 selinux-policy-mls-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 111K Mar 20 12:16 selinux-policy-sandbox-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 14M Mar 20 12:17 selinux-policy-targeted-3.14.4-48.fc31.noarch.rpm
+
+5. Install SELinux packages from Fedora repo, if not already done so, and
+ update with the patched rpms above:
+
+ # rpm -Uhv rpmbuild/RPMS/noarch/selinux-policy-*
+
+6. Enable SELinux Permissive mode for Targeted policy, if not already done so:
+
+ # cat /etc/selinux/config
+
+ # This file controls the state of SELinux on the system.
+ # SELINUX= can take one of these three values:
+ # enforcing - SELinux security policy is enforced.
+ # permissive - SELinux prints warnings instead of enforcing.
+ # disabled - No SELinux policy is loaded.
+ SELINUX=permissive
+ # SELINUXTYPE= can take one of these three values:
+ # targeted - Targeted processes are protected,
+ # minimum - Modification of targeted policy. Only selected processes are protected.
+ # mls - Multi Level Security protection.
+ SELINUXTYPE=targeted
+
+7. Enable filesystem SELinux labeling at the next reboot:
+
+ # touch /.autorelabel
+
+8. Reboot machine and it will label filesystems and load Targeted policy into the kernel;
+
+9. Login and check that dmesg output doesn't mention that perf_event class is unknown to SELinux subsystem;
+
+10. Check that SELinux is enabled and in Permissive mode
+
+ # getenforce
+ Permissive
+
+11. Turn SELinux into Enforcing mode:
+
+ # setenforce 1
+ # getenforce
+ Enforcing
+
+Opening access to perf_event_open() syscall on Fedora with SELinux
+==================================================================
+
+Access to performance monitoring and observability operations by Perf
+can be limited for superuser or CAP_PERFMON or CAP_SYS_ADMIN privileged
+processes. MAC policy settings (e.g. SELinux) can be loaded into the kernel
+and prevent unauthorized access to perf_event_open() syscall. In such case
+Perf tool provides a message similar to the one below:
+
+ # perf stat
+ Error:
+ Access to performance monitoring and observability operations is limited.
+ Enforced MAC policy settings (SELinux) can limit access to performance
+ monitoring and observability operations. Inspect system audit records for
+ more perf_event access control information and adjusting the policy.
+ Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open
+ access to performance monitoring and observability operations for users
+ without CAP_PERFMON or CAP_SYS_ADMIN Linux capability.
+ perf_event_paranoid setting is -1:
+ -1: Allow use of (almost) all events by all users
+ Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK
+ >= 0: Disallow raw and ftrace function tracepoint access
+ >= 1: Disallow CPU event access
+ >= 2: Disallow kernel profiling
+ To make the adjusted perf_event_paranoid setting permanent preserve it
+ in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)
+
+To make sure that access is limited by MAC policy settings inspect system
+audit records using journalctl command or /var/log/audit/audit.log so the
+output would contain AVC denied records related to perf_event:
+
+ # journalctl --reverse --no-pager | grep perf_event
+
+ python3[1318099]: SELinux is preventing perf from open access on the perf_event labeled unconfined_t.
+ If you believe that perf should be allowed open access on perf_event labeled unconfined_t by default.
+ setroubleshoot[1318099]: SELinux is preventing perf from open access on the perf_event labeled unconfined_t. For complete SELinux messages run: sealert -l 4595ce5b-e58f-462c-9d86-3bc2074935de
+ audit[1318098]: AVC avc: denied { open } for pid=1318098 comm="perf" scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tclass=perf_event permissive=0
+
+In order to open access to perf_event_open() syscall MAC policy settings can
+require to be extended. On SELinux system this can be done by loading a special
+policy module extending base policy settings. Perf related policy module can
+be generated using the system audit records about blocking perf_event access.
+Run the command below to generate my-perf.te policy extension file with
+perf_event related rules:
+
+ # ausearch -c 'perf' --raw | audit2allow -M my-perf && cat my-perf.te
+
+ module my-perf 1.0;
+
+ require {
+ type unconfined_t;
+ class perf_event { cpu kernel open read tracepoint write };
+ }
+
+ #============= unconfined_t ==============
+ allow unconfined_t self:perf_event { cpu kernel open read tracepoint write };
+
+Now compile, pack and load my-perf.pp extension module into the kernel:
+
+ # checkmodule -M -m -o my-perf.mod my-perf.te
+ # semodule_package -o my-perf.pp -m my-perf.mod
+ # semodule -X 300 -i my-perf.pp
+
+After all those taken steps above access to perf_event_open() syscall should
+now be allowed by the policy settings. Check access running Perf like this:
+
+ # perf stat
+ ^C
+ Performance counter stats for 'system wide':
+
+ 36,387.41 msec cpu-clock # 7.999 CPUs utilized
+ 2,629 context-switches # 0.072 K/sec
+ 57 cpu-migrations # 0.002 K/sec
+ 1 page-faults # 0.000 K/sec
+ 263,721,559 cycles # 0.007 GHz
+ 175,746,713 instructions # 0.67 insn per cycle
+ 19,628,798 branches # 0.539 M/sec
+ 1,259,201 branch-misses # 6.42% of all branches
+
+ 4.549061439 seconds time elapsed
+
+The generated perf-event.pp related policy extension module can be removed
+from the kernel using this command:
+
+ # semodule -X 300 -r my-perf
+
+Alternatively the module can be temporarily disabled and enabled back using
+these two commands:
+
+ # semodule -d my-perf
+ # semodule -e my-perf
+
+If something went wrong
+=======================
+
+To turn SELinux into Permissive mode:
+ # setenforce 0
+
+To fully disable SELinux during kernel boot [3] set kernel command line parameter selinux=0
+
+To remove SELinux labeling from local filesystems:
+ # find / -mount -print0 | xargs -0 setfattr -h -x security.selinux
+
+To fully turn SELinux off a machine set SELINUX=disabled at /etc/selinux/config file and reboot;
+
+Links
+=====
+
+[1] https://download-ib01.fedoraproject.org/pub/fedora/linux/updates/31/Everything/SRPMS/Packages/s/selinux-policy-3.14.4-49.fc31.src.rpm
+[2] https://docs.fedoraproject.org/en-US/Fedora/11/html/Security-Enhanced_Linux/sect-Security-Enhanced_Linux-Working_with_SELinux-Enabling_and_Disabling_SELinux.html
+[3] https://danwalsh.livejournal.com/10972.html
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 12a8204d63c6..877ca6be0ed0 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -23,12 +23,28 @@ include $(srctree)/tools/scripts/Makefile.arch
$(call detected_var,SRCARCH)
NO_PERF_REGS := 1
-NO_SYSCALL_TABLE := 1
+
+ifneq ($(NO_SYSCALL_TABLE),1)
+ NO_SYSCALL_TABLE := 1
+
+ ifeq ($(SRCARCH),x86)
+ ifeq (${IS_64_BIT}, 1)
+ NO_SYSCALL_TABLE := 0
+ endif
+ else
+ ifeq ($(SRCARCH),$(filter $(SRCARCH),powerpc arm64 s390))
+ NO_SYSCALL_TABLE := 0
+ endif
+ endif
+
+ ifneq ($(NO_SYSCALL_TABLE),1)
+ CFLAGS += -DHAVE_SYSCALL_TABLE_SUPPORT
+ endif
+endif
# Additional ARCH settings for ppc
ifeq ($(SRCARCH),powerpc)
NO_PERF_REGS := 0
- NO_SYSCALL_TABLE := 0
CFLAGS += -I$(OUTPUT)arch/powerpc/include/generated
LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
endif
@@ -37,7 +53,6 @@ endif
ifeq ($(SRCARCH),x86)
$(call detected,CONFIG_X86)
ifeq (${IS_64_BIT}, 1)
- NO_SYSCALL_TABLE := 0
CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -I$(OUTPUT)arch/x86/include/generated
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
LIBUNWIND_LIBS = -lunwind-x86_64 -lunwind -llzma
@@ -55,7 +70,6 @@ endif
ifeq ($(SRCARCH),arm64)
NO_PERF_REGS := 0
- NO_SYSCALL_TABLE := 0
CFLAGS += -I$(OUTPUT)arch/arm64/include/generated
LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
endif
@@ -70,7 +84,6 @@ endif
ifeq ($(ARCH),s390)
NO_PERF_REGS := 0
- NO_SYSCALL_TABLE := 0
CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated
endif
@@ -78,10 +91,6 @@ ifeq ($(NO_PERF_REGS),0)
$(call detected,CONFIG_PERF_REGS)
endif
-ifneq ($(NO_SYSCALL_TABLE),1)
- CFLAGS += -DHAVE_SYSCALL_TABLE_SUPPORT
-endif
-
# So far there's only x86 and arm libdw unwind support merged in perf.
# Disable it on all other architectures in case libdw unwind
# support is detected in system. Add supported architectures
@@ -346,7 +355,7 @@ ifndef NO_BIONIC
endif
ifeq ($(feature-eventfd), 1)
- CFLAGS += -DHAVE_EVENTFD
+ CFLAGS += -DHAVE_EVENTFD_SUPPORT
endif
ifeq ($(feature-get_current_dir_name), 1)
@@ -651,6 +660,7 @@ ifeq ($(NO_SYSCALL_TABLE),0)
$(call detected,CONFIG_TRACE)
else
ifndef NO_LIBAUDIT
+ $(call feature_check,libaudit)
ifneq ($(feature-libaudit), 1)
msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
NO_LIBAUDIT := 1
@@ -1012,6 +1022,19 @@ ifdef LIBCLANGLLVM
endif
endif
+ifdef LIBPFM4
+ $(call feature_check,libpfm4)
+ ifeq ($(feature-libpfm4), 1)
+ CFLAGS += -DHAVE_LIBPFM
+ EXTLIBS += -lpfm
+ ASCIIDOC_EXTRA = -aHAVE_LIBPFM=1
+ $(call detected,CONFIG_LIBPFM4)
+ else
+ msg := $(warning libpfm4 not found, disables libpfm4 support. Please install libpfm4-dev);
+ NO_LIBPFM4 := 1
+ endif
+endif
+
# Among the variables below, these:
# perfexecdir
# perf_include_dir
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 94a495594e99..86dbb51bb272 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -118,6 +118,12 @@ include ../scripts/utilities.mak
#
# Define LIBBPF_DYNAMIC to enable libbpf dynamic linking.
#
+# Define NO_SYSCALL_TABLE=1 to disable the use of syscall id to/from name tables
+# generated from the kernel .tbl or unistd.h files and use, if available, libaudit
+# for doing the conversions to/from strings/id.
+#
+# Define LIBPFM4 to enable libpfm4 events extension.
+#
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -278,6 +284,7 @@ strip-libs = $(filter-out -l%,$(1))
ifneq ($(OUTPUT),)
TE_PATH=$(OUTPUT)
+ PLUGINS_PATH=$(OUTPUT)
BPF_PATH=$(OUTPUT)
SUBCMD_PATH=$(OUTPUT)
LIBPERF_PATH=$(OUTPUT)
@@ -288,6 +295,7 @@ else
endif
else
TE_PATH=$(TRACE_EVENT_DIR)
+ PLUGINS_PATH=$(TRACE_EVENT_DIR)plugins/
API_PATH=$(LIB_DIR)
BPF_PATH=$(BPF_DIR)
SUBCMD_PATH=$(SUBCMD_DIR)
@@ -297,7 +305,7 @@ endif
LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
export LIBTRACEEVENT
-LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)plugins/libtraceevent-dynamic-list
+LIBTRACEEVENT_DYNAMIC_LIST = $(PLUGINS_PATH)libtraceevent-dynamic-list
#
# The static build has no dynsym table, so this does not work for
@@ -756,10 +764,10 @@ $(LIBTRACEEVENT): FORCE
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
libtraceevent_plugins: FORCE
- $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins
+ $(Q)$(MAKE) -C $(TRACE_EVENT_DIR)plugins $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins
$(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins
- $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)plugins/libtraceevent-dynamic-list
+ $(Q)$(MAKE) -C $(TRACE_EVENT_DIR)plugins $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent-dynamic-list
$(LIBTRACEEVENT)-clean:
$(call QUIET_CLEAN, libtraceevent)
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 97aa02c4491d..cea5e33d61d2 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -216,7 +216,7 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
struct evsel *evsel)
{
char msg[BUFSIZ], path[PATH_MAX], *sink;
- struct perf_evsel_config_term *term;
+ struct evsel_config_term *term;
int ret = -EINVAL;
u32 hash;
@@ -224,7 +224,7 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
return 0;
list_for_each_entry(term, &evsel->config_terms, list) {
- if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
+ if (term->type != EVSEL__CONFIG_TERM_DRV_CFG)
continue;
sink = term->val.str;
@@ -265,7 +265,8 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
ptr->evlist = evlist;
ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
- if (perf_can_record_switch_events())
+ if (!record_opts__no_switch_events(opts) &&
+ perf_can_record_switch_events())
opts->record_switch_events = true;
evlist__for_each_entry(evlist, evsel) {
diff --git a/tools/perf/arch/arm64/util/unwind-libdw.c b/tools/perf/arch/arm64/util/unwind-libdw.c
index 7623d85e77f3..a50941629649 100644
--- a/tools/perf/arch/arm64/util/unwind-libdw.c
+++ b/tools/perf/arch/arm64/util/unwind-libdw.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/event.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index e5c9504f8586..e86e210bf514 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -2,6 +2,7 @@ perf-y += header.o
perf-y += kvm-stat.o
perf-y += perf_regs.o
perf-y += mem-events.o
+perf-y += sym-handling.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_DWARF) += skip-callchain-idx.o
diff --git a/tools/perf/arch/powerpc/util/unwind-libdw.c b/tools/perf/arch/powerpc/util/unwind-libdw.c
index abf2dbc7f829..7b2d96ec28e3 100644
--- a/tools/perf/arch/powerpc/util/unwind-libdw.c
+++ b/tools/perf/arch/powerpc/util/unwind-libdw.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
#include <linux/kernel.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/event.h"
/* See backends/ppc_initreg.c and backends/ppc_regs.c in elfutils. */
static const int special_regs[3][2] = {
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index ef43be9b6ec2..4e40402a4f81 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -55,6 +55,14 @@ int test__arch_unwind_sample(struct perf_sample *sample,
return -1;
}
+#ifdef MEMORY_SANITIZER
+ /*
+ * Assignments to buf in the assembly function perf_regs_load aren't
+ * seen by memory sanitizer. Zero the memory to convince memory
+ * sanitizer the memory is initialized.
+ */
+ memset(buf, 0, sizeof(u64) * PERF_REGS_MAX);
+#endif
perf_regs_load(buf);
regs->abi = PERF_SAMPLE_REGS_ABI;
regs->regs = buf;
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 3f7c20cc7b79..839ef52c1ac2 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -59,7 +59,8 @@ struct intel_pt_recording {
size_t priv_size;
};
-static int intel_pt_parse_terms_with_default(struct list_head *formats,
+static int intel_pt_parse_terms_with_default(const char *pmu_name,
+ struct list_head *formats,
const char *str,
u64 *config)
{
@@ -78,7 +79,8 @@ static int intel_pt_parse_terms_with_default(struct list_head *formats,
goto out_free;
attr.config = *config;
- err = perf_pmu__config_terms(formats, &attr, terms, true, NULL);
+ err = perf_pmu__config_terms(pmu_name, formats, &attr, terms, true,
+ NULL);
if (err)
goto out_free;
@@ -88,11 +90,12 @@ out_free:
return err;
}
-static int intel_pt_parse_terms(struct list_head *formats, const char *str,
- u64 *config)
+static int intel_pt_parse_terms(const char *pmu_name, struct list_head *formats,
+ const char *str, u64 *config)
{
*config = 0;
- return intel_pt_parse_terms_with_default(formats, str, config);
+ return intel_pt_parse_terms_with_default(pmu_name, formats, str,
+ config);
}
static u64 intel_pt_masked_bits(u64 mask, u64 bits)
@@ -229,7 +232,8 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
- intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, buf,
+ &config);
return config;
}
@@ -337,13 +341,16 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
if (priv_size != ptr->priv_size)
return -EINVAL;
- intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
- intel_pt_parse_terms(&intel_pt_pmu->format, "noretcomp",
- &noretcomp_bit);
- intel_pt_parse_terms(&intel_pt_pmu->format, "mtc", &mtc_bit);
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
+ "tsc", &tsc_bit);
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
+ "noretcomp", &noretcomp_bit);
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
+ "mtc", &mtc_bit);
mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
"mtc_period");
- intel_pt_parse_terms(&intel_pt_pmu->format, "cyc", &cyc_bit);
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
+ "cyc", &cyc_bit);
intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
@@ -556,10 +563,9 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
static void intel_pt_config_sample_mode(struct perf_pmu *intel_pt_pmu,
struct evsel *evsel)
{
- struct perf_evsel_config_term *term;
u64 user_bits = 0, bits;
+ struct evsel_config_term *term = evsel__get_config_term(evsel, CFG_CHG);
- term = perf_evsel__get_config_term(evsel, CFG_CHG);
if (term)
user_bits = term->val.cfg_chg;
@@ -769,7 +775,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
}
}
- intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
+ "tsc", &tsc_bit);
if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit))
have_timing_info = true;
@@ -780,7 +787,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* Per-cpu recording needs sched_switch events to distinguish different
* threads.
*/
- if (have_timing_info && !perf_cpu_map__empty(cpus)) {
+ if (have_timing_info && !perf_cpu_map__empty(cpus) &&
+ !record_opts__no_switch_events(opts)) {
if (perf_can_record_switch_events()) {
bool cpu_wide = !target__none(&opts->target) &&
!target__has_task(&opts->target);
@@ -875,7 +883,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* per-cpu with no sched_switch (except workload-only).
*/
if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) &&
- !target__none(&opts->target))
+ !target__none(&opts->target) &&
+ !intel_pt_evsel->core.attr.exclude_user)
ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
return 0;
diff --git a/tools/perf/arch/x86/util/unwind-libdw.c b/tools/perf/arch/x86/util/unwind-libdw.c
index fda8f4206ee4..eea2bf87232b 100644
--- a/tools/perf/arch/x86/util/unwind-libdw.c
+++ b/tools/perf/arch/x86/util/unwind-libdw.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/event.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
index cadc18d42aa4..ca2d591aad8a 100644
--- a/tools/perf/bench/epoll-ctl.c
+++ b/tools/perf/bench/epoll-ctl.c
@@ -5,7 +5,7 @@
* Benchmark the various operations allowed for epoll_ctl(2).
* The idea is to concurrently stress a single epoll instance
*/
-#ifdef HAVE_EVENTFD
+#ifdef HAVE_EVENTFD_SUPPORT
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
@@ -412,4 +412,4 @@ int bench_epoll_ctl(int argc, const char **argv)
errmem:
err(EXIT_FAILURE, "calloc");
}
-#endif // HAVE_EVENTFD
+#endif // HAVE_EVENTFD_SUPPORT
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
index cf797362675b..75dca9773186 100644
--- a/tools/perf/bench/epoll-wait.c
+++ b/tools/perf/bench/epoll-wait.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#ifdef HAVE_EVENTFD
+#ifdef HAVE_EVENTFD_SUPPORT
/*
* Copyright (C) 2018 Davidlohr Bueso.
*
@@ -540,4 +540,4 @@ int bench_epoll_wait(int argc, const char **argv)
errmem:
err(EXIT_FAILURE, "calloc");
}
-#endif // HAVE_EVENTFD
+#endif // HAVE_EVENTFD_SUPPORT
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index 97e4a4fb3362..71d830d7b923 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -40,7 +40,7 @@ struct sender_context {
unsigned int num_fds;
int ready_out;
int wakefd;
- int out_fds[0];
+ int out_fds[];
};
struct receiver_context {
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index d3e5a84f87a2..4940d10074c3 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -432,7 +432,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
hists__collapse_resort(hists, NULL);
/* Don't sort callchain */
evsel__reset_sample_bit(pos, CALLCHAIN);
- perf_evsel__output_resort(pos, NULL);
+ evsel__output_resort(pos, NULL);
if (symbol_conf.event_group && !evsel__is_group_leader(pos))
continue;
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index 083273209c88..cad31b1d3438 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -67,14 +67,14 @@ static struct bench futex_benchmarks[] = {
{ NULL, NULL, NULL }
};
-#ifdef HAVE_EVENTFD
+#ifdef HAVE_EVENTFD_SUPPORT
static struct bench epoll_benchmarks[] = {
{ "wait", "Benchmark epoll concurrent epoll_waits", bench_epoll_wait },
{ "ctl", "Benchmark epoll concurrent epoll_ctls", bench_epoll_ctl },
{ "all", "Run all futex benchmarks", NULL },
{ NULL, NULL, NULL }
};
-#endif // HAVE_EVENTFD
+#endif // HAVE_EVENTFD_SUPPORT
static struct bench internals_benchmarks[] = {
{ "synthesize", "Benchmark perf event synthesis", bench_synthesize },
@@ -95,7 +95,7 @@ static struct collection collections[] = {
{ "numa", "NUMA scheduling and MM benchmarks", numa_benchmarks },
#endif
{"futex", "Futex stressing benchmarks", futex_benchmarks },
-#ifdef HAVE_EVENTFD
+#ifdef HAVE_EVENTFD_SUPPORT
{"epoll", "Epoll stressing benchmarks", epoll_benchmarks },
#endif
{ "internals", "Perf-internals benchmarks", internals_benchmarks },
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 1baf4cae086f..d617d5682c68 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -2887,8 +2887,15 @@ static int parse_record_events(const struct option *opt,
{
bool *event_set = (bool *) opt->value;
+ if (!strcmp(str, "list")) {
+ perf_mem_events__list();
+ exit(0);
+ }
+ if (perf_mem_events__parse(str))
+ exit(-1);
+
*event_set = true;
- return perf_mem_events__parse(str);
+ return 0;
}
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 440501994931..98e992801251 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -34,7 +34,7 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
return PTR_ERR(session);
evlist__for_each_entry(session->evlist, pos) {
- perf_evsel__fprintf(pos, details, stdout);
+ evsel__fprintf(pos, details, stdout);
if (pos->core.attr.type == PERF_TYPE_TRACEPOINT)
has_tracepoint = true;
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index 55eda54240fb..2bfc1b0db536 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -45,6 +45,7 @@ struct filter_entry {
char name[];
};
+static volatile int workload_exec_errno;
static bool done;
static void sig_handler(int sig __maybe_unused)
@@ -63,7 +64,7 @@ static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
siginfo_t *info __maybe_unused,
void *ucontext __maybe_unused)
{
- /* workload_exec_errno = info->si_value.sival_int; */
+ workload_exec_errno = info->si_value.sival_int;
done = true;
}
@@ -383,6 +384,14 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
write_tracing_file("tracing_on", "0");
+ if (workload_exec_errno) {
+ const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
+ /* flush stdout first so below error msg appears at the end. */
+ fflush(stdout);
+ pr_err("workload failed: %s\n", emsg);
+ goto out_close_fd;
+ }
+
/* read remaining buffer contents */
while (true) {
int n = read(trace_fd, buf, sizeof(buf));
@@ -397,7 +406,7 @@ out_close_fd:
out_reset:
reset_tracing_files(ftrace);
out:
- return done ? 0 : -1;
+ return (done && !workload_exec_errno) ? 0 : -1;
}
static int perf_ftrace_config(const char *var, const char *value, void *cb)
@@ -494,7 +503,7 @@ int cmd_ftrace(int argc, const char **argv)
argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc && target__none(&ftrace.target))
- usage_with_options(ftrace_usage, ftrace_options);
+ ftrace.target.system_wide = true;
ret = target__validate(&ftrace.target);
if (ret) {
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 53932db97a79..4a6de4b03ac0 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -51,7 +51,7 @@ struct perf_inject {
struct event_entry {
struct list_head node;
u32 tid;
- union perf_event event[0];
+ union perf_event event[];
};
static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 965ef017496f..0a7fe4cb5555 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -42,7 +42,7 @@ int cmd_list(int argc, const char **argv)
OPT_END()
};
const char * const list_usage[] = {
- "perf list [<options>] [hw|sw|cache|tracepoint|pmu|sdt|event_glob]",
+ "perf list [<options>] [hw|sw|cache|tracepoint|pmu|sdt|metric|metricgroup|event_glob]",
NULL
};
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index 68a7eb84561a..3523279af6af 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -38,26 +38,16 @@ static int parse_record_events(const struct option *opt,
const char *str, int unset __maybe_unused)
{
struct perf_mem *mem = *(struct perf_mem **)opt->value;
- int j;
- if (strcmp(str, "list")) {
- if (!perf_mem_events__parse(str)) {
- mem->operation = 0;
- return 0;
- }
- exit(-1);
+ if (!strcmp(str, "list")) {
+ perf_mem_events__list();
+ exit(0);
}
+ if (perf_mem_events__parse(str))
+ exit(-1);
- for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- struct perf_mem_event *e = &perf_mem_events[j];
-
- fprintf(stderr, "%-13s%-*s%s\n",
- e->tag,
- verbose > 0 ? 25 : 0,
- verbose > 0 ? perf_mem_events__name(j) : "",
- e->supported ? ": available" : "");
- }
- exit(0);
+ mem->operation = 0;
+ return 0;
}
static const char * const __usage[] = {
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 70548df2abb9..6b1507566770 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -364,6 +364,9 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs)
for (k = 0; k < pev->ntevs; k++) {
struct probe_trace_event *tev = &pev->tevs[k];
+ /* Skipped events have no event name */
+ if (!tev->event)
+ continue;
/* We use tev's name for showing new events */
show_perf_probe_event(tev->group, tev->event, pev,
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index e4efdbf1a81e..e108d90ae2ed 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -45,6 +45,7 @@
#include "util/units.h"
#include "util/bpf-event.h"
#include "util/util.h"
+#include "util/pfm.h"
#include "asm/bug.h"
#include "perf.h"
@@ -56,6 +57,9 @@
#include <unistd.h>
#include <sched.h>
#include <signal.h>
+#ifdef HAVE_EVENTFD_SUPPORT
+#include <sys/eventfd.h>
+#endif
#include <sys/mman.h>
#include <sys/wait.h>
#include <sys/types.h>
@@ -538,6 +542,9 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
static volatile int signr = -1;
static volatile int child_finished;
+#ifdef HAVE_EVENTFD_SUPPORT
+static int done_fd = -1;
+#endif
static void sig_handler(int sig)
{
@@ -547,6 +554,21 @@ static void sig_handler(int sig)
signr = sig;
done = 1;
+#ifdef HAVE_EVENTFD_SUPPORT
+{
+ u64 tmp = 1;
+ /*
+ * It is possible for this signal handler to run after done is checked
+ * in the main loop, but before the perf counter fds are polled. If this
+ * happens, the poll() will continue to wait even though done is set,
+ * and will only break out if either another signal is received, or the
+ * counters are ready for read. To ensure the poll() doesn't sleep when
+ * done is set, use an eventfd (done_fd) to wake up the poll().
+ */
+ if (write(done_fd, &tmp, sizeof(tmp)) < 0)
+ pr_err("failed to signal wakeup fd, error: %m\n");
+}
+#endif // HAVE_EVENTFD_SUPPORT
}
static void sigsegv_handler(int sig)
@@ -825,19 +847,28 @@ static int record__open(struct record *rec)
int rc = 0;
/*
- * For initial_delay we need to add a dummy event so that we can track
- * PERF_RECORD_MMAP while we wait for the initial delay to enable the
- * real events, the ones asked by the user.
+ * For initial_delay or system wide, we need to add a dummy event so
+ * that we can track PERF_RECORD_MMAP to cover the delay of waiting or
+ * event synthesis.
*/
- if (opts->initial_delay) {
+ if (opts->initial_delay || target__has_cpu(&opts->target)) {
if (perf_evlist__add_dummy(evlist))
return -ENOMEM;
+ /* Disable tracking of mmaps on lead event. */
pos = evlist__first(evlist);
pos->tracking = 0;
+ /* Set up dummy event. */
pos = evlist__last(evlist);
pos->tracking = 1;
- pos->core.attr.enable_on_exec = 1;
+ /*
+ * Enable the dummy event when the process is forked for
+ * initial_delay, immediately for system wide.
+ */
+ if (opts->initial_delay)
+ pos->core.attr.enable_on_exec = 1;
+ else
+ pos->immediate = 1;
}
perf_evlist__config(evlist, opts, &callchain_param);
@@ -1538,6 +1569,20 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
pr_err("Compression initialization failed.\n");
return -1;
}
+#ifdef HAVE_EVENTFD_SUPPORT
+ done_fd = eventfd(0, EFD_NONBLOCK);
+ if (done_fd < 0) {
+ pr_err("Failed to create wakeup eventfd, error: %m\n");
+ status = -1;
+ goto out_delete_session;
+ }
+ err = evlist__add_pollfd(rec->evlist, done_fd);
+ if (err < 0) {
+ pr_err("Failed to add wakeup eventfd to poll list\n");
+ status = err;
+ goto out_delete_session;
+ }
+#endif // HAVE_EVENTFD_SUPPORT
session->header.env.comp_type = PERF_COMP_ZSTD;
session->header.env.comp_level = rec->opts.comp_level;
@@ -1896,6 +1941,10 @@ out_child:
}
out_delete_session:
+#ifdef HAVE_EVENTFD_SUPPORT
+ if (done_fd >= 0)
+ close(done_fd);
+#endif
zstd_fini(&session->zstd_data);
perf_session__delete(session);
@@ -2453,8 +2502,9 @@ static struct option __record_options[] = {
"Record namespaces events"),
OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
"Record cgroup events"),
- OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
- "Record context switch events"),
+ OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
+ &record.opts.record_switch_events_set,
+ "Record context switch events"),
OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
"Configure all used events to run in kernel space.",
PARSE_OPT_EXCLUSIVE),
@@ -2506,6 +2556,11 @@ static struct option __record_options[] = {
OPT_UINTEGER(0, "num-thread-synthesize",
&record.opts.nr_threads_synthesize,
"number of threads to run for event synthesis"),
+#ifdef HAVE_LIBPFM
+ OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
+ "libpfm4 event selector. use 'perf list' to list available events",
+ parse_libpfm_events_option),
+#endif
OPT_END()
};
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index ba63390246c2..b63b3fb2de70 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -47,7 +47,6 @@
#include "util/time-utils.h"
#include "util/auxtrace.h"
#include "util/units.h"
-#include "util/branch.h"
#include "util/util.h" // perf_tip()
#include "ui/ui.h"
#include "ui/progress.h"
@@ -402,16 +401,7 @@ static int report__setup_sample_type(struct report *rep)
}
}
- if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
- if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER)) {
- callchain_param.record_mode = CALLCHAIN_DWARF;
- dwarf_callchain_users = true;
- } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
- callchain_param.record_mode = CALLCHAIN_LBR;
- else
- callchain_param.record_mode = CALLCHAIN_FP;
- }
+ callchain_param_setup(sample_type);
if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
@@ -716,8 +706,7 @@ static void report__output_resort(struct report *rep)
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
evlist__for_each_entry(rep->session->evlist, pos) {
- perf_evsel__output_resort_cb(pos, &prog,
- hists__resort_cb, rep);
+ evsel__output_resort_cb(pos, &prog, hists__resort_cb, rep);
}
ui_progress__finish();
@@ -1090,6 +1079,26 @@ parse_percent_limit(const struct option *opt, const char *str,
return 0;
}
+static int process_attr(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct evlist **pevlist)
+{
+ u64 sample_type;
+ int err;
+
+ err = perf_event__process_attr(tool, event, pevlist);
+ if (err)
+ return err;
+
+ /*
+ * Check if we need to enable callchains based
+ * on events sample_type.
+ */
+ sample_type = perf_evlist__combined_sample_type(*pevlist);
+ callchain_param_setup(sample_type);
+ return 0;
+}
+
int cmd_report(int argc, const char **argv)
{
struct perf_session *session;
@@ -1120,7 +1129,7 @@ int cmd_report(int argc, const char **argv)
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
.read = process_read_event,
- .attr = perf_event__process_attr,
+ .attr = process_attr,
.tracing_data = perf_event__process_tracing_data,
.build_id = perf_event__process_build_id,
.id_index = perf_event__process_id_index,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 56d7bcd12671..5da243676f12 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -167,6 +167,7 @@ static struct {
u64 fields;
u64 invalid_fields;
u64 user_set_fields;
+ u64 user_unset_fields;
} output[OUTPUT_TYPE_MAX] = {
[PERF_TYPE_HARDWARE] = {
@@ -2085,6 +2086,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct evlist *evlist;
struct evsel *evsel, *pos;
+ u64 sample_type;
int err;
static struct evsel_script *es;
@@ -2117,12 +2119,34 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
return 0;
}
- set_print_ip_opts(&evsel->core.attr);
-
- if (evsel->core.attr.sample_type)
+ if (evsel->core.attr.sample_type) {
err = perf_evsel__check_attr(evsel, scr->session);
+ if (err)
+ return err;
+ }
- return err;
+ /*
+ * Check if we need to enable callchains based
+ * on events sample_type.
+ */
+ sample_type = perf_evlist__combined_sample_type(evlist);
+ callchain_param_setup(sample_type);
+
+ /* Enable fields for callchain entries */
+ if (symbol_conf.use_callchain &&
+ (sample_type & PERF_SAMPLE_CALLCHAIN ||
+ sample_type & PERF_SAMPLE_BRANCH_STACK ||
+ (sample_type & PERF_SAMPLE_REGS_USER &&
+ sample_type & PERF_SAMPLE_STACK_USER))) {
+ int type = output_type(evsel->core.attr.type);
+
+ if (!(output[type].user_unset_fields & PERF_OUTPUT_IP))
+ output[type].fields |= PERF_OUTPUT_IP;
+ if (!(output[type].user_unset_fields & PERF_OUTPUT_SYM))
+ output[type].fields |= PERF_OUTPUT_SYM;
+ }
+ set_print_ip_opts(&evsel->core.attr);
+ return 0;
}
static int print_event_with_time(struct perf_tool *tool,
@@ -2434,7 +2458,7 @@ static int __cmd_script(struct perf_script *script)
struct script_spec {
struct list_head node;
struct scripting_ops *ops;
- char spec[0];
+ char spec[];
};
static LIST_HEAD(script_specs);
@@ -2672,9 +2696,11 @@ parse:
if (change == REMOVE) {
output[j].fields &= ~all_output_options[i].field;
output[j].user_set_fields &= ~all_output_options[i].field;
+ output[j].user_unset_fields |= all_output_options[i].field;
} else {
output[j].fields |= all_output_options[i].field;
output[j].user_set_fields |= all_output_options[i].field;
+ output[j].user_unset_fields &= ~all_output_options[i].field;
}
output[j].user_set = true;
output[j].wildcard_set = true;
@@ -3286,7 +3312,10 @@ static int parse_xed(const struct option *opt __maybe_unused,
const char *str __maybe_unused,
int unset __maybe_unused)
{
- force_pager("xed -F insn: -A -64 | less");
+ if (isatty(1))
+ force_pager("xed -F insn: -A -64 | less");
+ else
+ force_pager("xed -F insn: -A -64");
return 0;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e0c1ad23c768..9be020e0098a 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -66,6 +66,7 @@
#include "util/time-utils.h"
#include "util/top.h"
#include "util/affinity.h"
+#include "util/pfm.h"
#include "asm/bug.h"
#include <linux/time64.h>
@@ -189,6 +190,59 @@ static struct perf_stat_config stat_config = {
.big_num = true,
};
+static bool cpus_map_matched(struct evsel *a, struct evsel *b)
+{
+ if (!a->core.cpus && !b->core.cpus)
+ return true;
+
+ if (!a->core.cpus || !b->core.cpus)
+ return false;
+
+ if (a->core.cpus->nr != b->core.cpus->nr)
+ return false;
+
+ for (int i = 0; i < a->core.cpus->nr; i++) {
+ if (a->core.cpus->map[i] != b->core.cpus->map[i])
+ return false;
+ }
+
+ return true;
+}
+
+static void evlist__check_cpu_maps(struct evlist *evlist)
+{
+ struct evsel *evsel, *pos, *leader;
+ char buf[1024];
+
+ evlist__for_each_entry(evlist, evsel) {
+ leader = evsel->leader;
+
+ /* Check that leader matches cpus with each member. */
+ if (leader == evsel)
+ continue;
+ if (cpus_map_matched(leader, evsel))
+ continue;
+
+ /* If there's mismatch disable the group and warn user. */
+ WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n");
+ evsel__group_desc(leader, buf, sizeof(buf));
+ pr_warning(" %s\n", buf);
+
+ if (verbose) {
+ cpu_map__snprint(leader->core.cpus, buf, sizeof(buf));
+ pr_warning(" %s: %s\n", leader->name, buf);
+ cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf));
+ pr_warning(" %s: %s\n", evsel->name, buf);
+ }
+
+ for_each_group_evsel(pos, leader) {
+ pos->leader = pos;
+ pos->core.nr_members = 0;
+ }
+ evsel->leader->core.nr_members = 0;
+ }
+}
+
static inline void diff_timespec(struct timespec *r, struct timespec *a,
struct timespec *b)
{
@@ -314,14 +368,14 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
return 0;
}
-static void read_counters(struct timespec *rs)
+static int read_affinity_counters(struct timespec *rs)
{
struct evsel *counter;
struct affinity affinity;
int i, ncpus, cpu;
if (affinity__setup(&affinity) < 0)
- return;
+ return -1;
ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus);
if (!target__has_cpu(&target) || target__has_per_thread(&target))
@@ -341,6 +395,15 @@ static void read_counters(struct timespec *rs)
}
}
affinity__cleanup(&affinity);
+ return 0;
+}
+
+static void read_counters(struct timespec *rs)
+{
+ struct evsel *counter;
+
+ if (!stat_config.summary && (read_affinity_counters(rs) < 0))
+ return;
evlist__for_each_entry(evsel_list, counter) {
if (counter->err)
@@ -351,6 +414,46 @@ static void read_counters(struct timespec *rs)
}
}
+static int runtime_stat_new(struct perf_stat_config *config, int nthreads)
+{
+ int i;
+
+ config->stats = calloc(nthreads, sizeof(struct runtime_stat));
+ if (!config->stats)
+ return -1;
+
+ config->stats_num = nthreads;
+
+ for (i = 0; i < nthreads; i++)
+ runtime_stat__init(&config->stats[i]);
+
+ return 0;
+}
+
+static void runtime_stat_delete(struct perf_stat_config *config)
+{
+ int i;
+
+ if (!config->stats)
+ return;
+
+ for (i = 0; i < config->stats_num; i++)
+ runtime_stat__exit(&config->stats[i]);
+
+ zfree(&config->stats);
+}
+
+static void runtime_stat_reset(struct perf_stat_config *config)
+{
+ int i;
+
+ if (!config->stats)
+ return;
+
+ for (i = 0; i < config->stats_num; i++)
+ perf_stat__reset_shadow_per_stat(&config->stats[i]);
+}
+
static void process_interval(void)
{
struct timespec ts, rs;
@@ -359,6 +462,7 @@ static void process_interval(void)
diff_timespec(&rs, &ts, &ref_time);
perf_stat__reset_shadow_per_stat(&rt_stat);
+ runtime_stat_reset(&stat_config);
read_counters(&rs);
if (STAT_RECORD) {
@@ -367,7 +471,7 @@ static void process_interval(void)
}
init_stats(&walltime_nsecs_stats);
- update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000);
+ update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
print_counters(&rs, 0, NULL);
}
@@ -722,7 +826,21 @@ try_again_reset:
if (stat_config.walltime_run_table)
stat_config.walltime_run[run_idx] = t1 - t0;
- update_stats(&walltime_nsecs_stats, t1 - t0);
+ if (interval) {
+ stat_config.interval = 0;
+ stat_config.summary = true;
+ init_stats(&walltime_nsecs_stats);
+ update_stats(&walltime_nsecs_stats, t1 - t0);
+
+ if (stat_config.aggr_mode == AGGR_GLOBAL)
+ perf_evlist__save_aggr_prev_raw_counts(evsel_list);
+
+ perf_evlist__copy_prev_raw_counts(evsel_list);
+ perf_evlist__reset_prev_raw_counts(evsel_list);
+ runtime_stat_reset(&stat_config);
+ perf_stat__reset_shadow_per_stat(&rt_stat);
+ } else
+ update_stats(&walltime_nsecs_stats, t1 - t0);
/*
* Closing a group leader splits the group, and as we only disable
@@ -821,10 +939,16 @@ static void sig_atexit(void)
kill(getpid(), signr);
}
+void perf_stat__set_big_num(int set)
+{
+ stat_config.big_num = (set != 0);
+}
+
static int stat__set_big_num(const struct option *opt __maybe_unused,
const char *s __maybe_unused, int unset)
{
big_num_opt = unset ? 0 : 1;
+ perf_stat__set_big_num(!unset);
return 0;
}
@@ -840,7 +964,10 @@ static int parse_metric_groups(const struct option *opt,
const char *str,
int unset __maybe_unused)
{
- return metricgroup__parse_groups(opt, str, &stat_config.metric_events);
+ return metricgroup__parse_groups(opt, str,
+ stat_config.metric_no_group,
+ stat_config.metric_no_merge,
+ &stat_config.metric_events);
}
static struct option stat_options[] = {
@@ -918,6 +1045,10 @@ static struct option stat_options[] = {
"ms to wait before starting measurement after program start"),
OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
"Only print computed metrics. No raw values", enable_metric_only),
+ OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
+ "don't group metric events, impacts multiplexing"),
+ OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
+ "don't try to share events between metrics in a group"),
OPT_BOOLEAN(0, "topdown", &topdown_run,
"measure topdown level 1 statistics"),
OPT_BOOLEAN(0, "smi-cost", &smi_cost,
@@ -935,6 +1066,11 @@ static struct option stat_options[] = {
"Use with 'percore' event qualifier to show the event "
"counts of one hardware thread by sum up total hardware "
"threads of same physical core"),
+#ifdef HAVE_LIBPFM
+ OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
+ "libpfm4 event selector. use 'perf list' to list available events",
+ parse_libpfm_events_option),
+#endif
OPT_END()
};
@@ -1442,6 +1578,8 @@ static int add_default_attributes(void)
struct option opt = { .value = &evsel_list };
return metricgroup__parse_groups(&opt, "transaction",
+ stat_config.metric_no_group,
+ stat_config.metric_no_merge,
&stat_config.metric_events);
}
@@ -1737,35 +1875,6 @@ int process_cpu_map_event(struct perf_session *session,
return set_maps(st);
}
-static int runtime_stat_new(struct perf_stat_config *config, int nthreads)
-{
- int i;
-
- config->stats = calloc(nthreads, sizeof(struct runtime_stat));
- if (!config->stats)
- return -1;
-
- config->stats_num = nthreads;
-
- for (i = 0; i < nthreads; i++)
- runtime_stat__init(&config->stats[i]);
-
- return 0;
-}
-
-static void runtime_stat_delete(struct perf_stat_config *config)
-{
- int i;
-
- if (!config->stats)
- return;
-
- for (i = 0; i < config->stats_num; i++)
- runtime_stat__exit(&config->stats[i]);
-
- zfree(&config->stats);
-}
-
static const char * const stat_report_usage[] = {
"perf stat report [<options>]",
NULL,
@@ -2057,6 +2166,8 @@ int cmd_stat(int argc, const char **argv)
goto out;
}
+ evlist__check_cpu_maps(evsel_list);
+
/*
* Initialize thread_map with comm names,
* so we could print it out on output.
@@ -2147,7 +2258,7 @@ int cmd_stat(int argc, const char **argv)
}
}
- if (!forever && status != -1 && !interval)
+ if (!forever && status != -1 && (!interval || stat_config.summary))
print_counters(NULL, argc, argv);
if (STAT_RECORD) {
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index c76f84b174c4..4e380e7b5230 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -128,7 +128,7 @@ struct sample_wrapper {
struct sample_wrapper *next;
u64 timestamp;
- unsigned char data[0];
+ unsigned char data[];
};
#define TYPE_NONE 0
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 372c38254654..13889d73f8dd 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -53,6 +53,7 @@
#include "util/debug.h"
#include "util/ordered-events.h"
+#include "util/pfm.h"
#include <assert.h>
#include <elf.h>
@@ -307,7 +308,7 @@ static void perf_top__resort_hists(struct perf_top *t)
}
evlist__for_each_entry(evlist, pos) {
- perf_evsel__output_resort(pos, NULL);
+ evsel__output_resort(pos, NULL);
}
}
@@ -949,7 +950,7 @@ static int perf_top__overwrite_check(struct perf_top *top)
{
struct record_opts *opts = &top->record_opts;
struct evlist *evlist = top->evlist;
- struct perf_evsel_config_term *term;
+ struct evsel_config_term *term;
struct list_head *config_terms;
struct evsel *evsel;
int set, overwrite = -1;
@@ -958,7 +959,7 @@ static int perf_top__overwrite_check(struct perf_top *top)
set = -1;
config_terms = &evsel->config_terms;
list_for_each_entry(term, config_terms, list) {
- if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE)
+ if (term->type == EVSEL__CONFIG_TERM_OVERWRITE)
set = term->val.overwrite ? 1 : 0;
}
@@ -1575,6 +1576,11 @@ int cmd_top(int argc, const char **argv)
"WARNING: should be used on grouped events."),
OPT_BOOLEAN(0, "stitch-lbr", &top.stitch_lbr,
"Enable LBR callgraph stitching approach"),
+#ifdef HAVE_LIBPFM
+ OPT_CALLBACK(0, "pfm-events", &top.evlist, "event",
+ "libpfm4 event selector. use 'perf list' to list available events",
+ parse_libpfm_events_option),
+#endif
OPTS_EVSWITCH(&top.evswitch),
OPT_END()
};
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index a46efb907bd4..4cbb64edc998 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -461,11 +461,11 @@ static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
{
- struct evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
+ struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
/* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
if (IS_ERR(evsel))
- evsel = perf_evsel__newtp("syscalls", direction);
+ evsel = evsel__newtp("syscalls", direction);
if (IS_ERR(evsel))
return NULL;
@@ -1748,12 +1748,26 @@ static int trace__read_syscall_info(struct trace *trace, int id)
struct syscall *sc;
const char *name = syscalltbl__name(trace->sctbl, id);
+#ifdef HAVE_SYSCALL_TABLE_SUPPORT
if (trace->syscalls.table == NULL) {
trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
if (trace->syscalls.table == NULL)
return -ENOMEM;
}
+#else
+ if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
+ // When using libaudit we don't know beforehand what is the max syscall id
+ struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ memset(table + trace->sctbl->syscalls.max_id, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
+ trace->syscalls.table = table;
+ trace->sctbl->syscalls.max_id = id;
+ }
+#endif
sc = trace->syscalls.table + id;
if (sc->nonexistent)
return 0;
@@ -2077,8 +2091,20 @@ static struct syscall *trace__syscall_info(struct trace *trace,
err = -EINVAL;
- if (id > trace->sctbl->syscalls.max_id)
+#ifdef HAVE_SYSCALL_TABLE_SUPPORT
+ if (id > trace->sctbl->syscalls.max_id) {
+#else
+ if (id >= trace->sctbl->syscalls.max_id) {
+ /*
+ * With libaudit we don't know beforehand what is the max_id,
+ * so we let trace__read_syscall_info() figure that out as we
+ * go on reading syscalls.
+ */
+ err = trace__read_syscall_info(trace, id);
+ if (err)
+#endif
goto out_cant_read;
+ }
if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
(err = trace__read_syscall_info(trace, id)) != 0)
@@ -3045,7 +3071,7 @@ static bool evlist__add_vfs_getname(struct evlist *evlist)
return found;
}
-static struct evsel *perf_evsel__new_pgfault(u64 config)
+static struct evsel *evsel__new_pgfault(u64 config)
{
struct evsel *evsel;
struct perf_event_attr attr = {
@@ -3174,6 +3200,26 @@ out_enomem:
}
#ifdef HAVE_LIBBPF_SUPPORT
+static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
+{
+ if (trace->bpf_obj == NULL)
+ return NULL;
+
+ return bpf_object__find_map_by_name(trace->bpf_obj, name);
+}
+
+static void trace__set_bpf_map_filtered_pids(struct trace *trace)
+{
+ trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
+}
+
+static void trace__set_bpf_map_syscalls(struct trace *trace)
+{
+ trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
+ trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
+ trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
+}
+
static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
{
if (trace->bpf_obj == NULL)
@@ -3512,6 +3558,20 @@ static void trace__delete_augmented_syscalls(struct trace *trace)
trace->bpf_obj = NULL;
}
#else // HAVE_LIBBPF_SUPPORT
+static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
+ const char *name __maybe_unused)
+{
+ return NULL;
+}
+
+static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
+{
+}
+
+static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
+{
+}
+
static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
{
return 0;
@@ -3841,7 +3901,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
}
if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
- pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
+ pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
if (pgfault_maj == NULL)
goto out_error_mem;
evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
@@ -3849,7 +3909,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
}
if ((trace->trace_pgfaults & TRACE_PFMIN)) {
- pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
+ pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
if (pgfault_min == NULL)
goto out_error_mem;
evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
@@ -4600,26 +4660,6 @@ static int trace__parse_cgroups(const struct option *opt, const char *str, int u
return 0;
}
-static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
-{
- if (trace->bpf_obj == NULL)
- return NULL;
-
- return bpf_object__find_map_by_name(trace->bpf_obj, name);
-}
-
-static void trace__set_bpf_map_filtered_pids(struct trace *trace)
-{
- trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
-}
-
-static void trace__set_bpf_map_syscalls(struct trace *trace)
-{
- trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
- trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
- trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
-}
-
static int trace__config(const char *var, const char *value, void *arg)
{
struct trace *trace = arg;
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index cf147db4e5ca..94c2bc22c2bb 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -128,4 +128,8 @@ check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in
# diff non-symmetric files
check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
+# check duplicated library files
+check_2 tools/perf/util/hashmap.h tools/lib/bpf/hashmap.h
+check_2 tools/perf/util/hashmap.c tools/lib/bpf/hashmap.c
+
cd tools/perf
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index c441a34cb1c0..fcca275e5bf9 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -32,34 +32,41 @@ static void print_error(jvmtiEnv *jvmti, const char *msg, jvmtiError ret)
#ifdef HAVE_JVMTI_CMLR
static jvmtiError
-do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
- jvmti_line_info_t *tab, jint *nr)
+do_get_line_number(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
+ jvmti_line_info_t *tab)
{
- jint i, lines = 0;
- jint nr_lines = 0;
+ jint i, nr_lines = 0;
jvmtiLineNumberEntry *loc_tab = NULL;
jvmtiError ret;
+ jint src_line = -1;
ret = (*jvmti)->GetLineNumberTable(jvmti, m, &nr_lines, &loc_tab);
- if (ret != JVMTI_ERROR_NONE) {
+ if (ret == JVMTI_ERROR_ABSENT_INFORMATION || ret == JVMTI_ERROR_NATIVE_METHOD) {
+ /* No debug information for this method */
+ return ret;
+ } else if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "GetLineNumberTable", ret);
return ret;
}
- for (i = 0; i < nr_lines; i++) {
- if (loc_tab[i].start_location < bci) {
- tab[lines].pc = (unsigned long)pc;
- tab[lines].line_number = loc_tab[i].line_number;
- tab[lines].discrim = 0; /* not yet used */
- tab[lines].methodID = m;
- lines++;
- } else {
- break;
- }
+ for (i = 0; i < nr_lines && loc_tab[i].start_location <= bci; i++) {
+ src_line = i;
+ }
+
+ if (src_line != -1) {
+ tab->pc = (unsigned long)pc;
+ tab->line_number = loc_tab[src_line].line_number;
+ tab->discrim = 0; /* not yet used */
+ tab->methodID = m;
+
+ ret = JVMTI_ERROR_NONE;
+ } else {
+ ret = JVMTI_ERROR_ABSENT_INFORMATION;
}
+
(*jvmti)->Deallocate(jvmti, (unsigned char *)loc_tab);
- *nr = lines;
- return JVMTI_ERROR_NONE;
+
+ return ret;
}
static jvmtiError
@@ -67,9 +74,8 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
{
const jvmtiCompiledMethodLoadRecordHeader *hdr;
jvmtiCompiledMethodLoadInlineRecord *rec;
- jvmtiLineNumberEntry *lne = NULL;
PCStackInfo *c;
- jint nr, ret;
+ jint ret;
int nr_total = 0;
int i, lines_total = 0;
@@ -82,21 +88,7 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
for (hdr = compile_info; hdr != NULL; hdr = hdr->next) {
if (hdr->kind == JVMTI_CMLR_INLINE_INFO) {
rec = (jvmtiCompiledMethodLoadInlineRecord *)hdr;
- for (i = 0; i < rec->numpcs; i++) {
- c = rec->pcinfo + i;
- nr = 0;
- /*
- * unfortunately, need a tab to get the number of lines!
- */
- ret = (*jvmti)->GetLineNumberTable(jvmti, c->methods[0], &nr, &lne);
- if (ret == JVMTI_ERROR_NONE) {
- /* free what was allocated for nothing */
- (*jvmti)->Deallocate(jvmti, (unsigned char *)lne);
- nr_total += (int)nr;
- } else {
- print_error(jvmti, "GetLineNumberTable", ret);
- }
- }
+ nr_total += rec->numpcs;
}
}
@@ -115,14 +107,17 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
rec = (jvmtiCompiledMethodLoadInlineRecord *)hdr;
for (i = 0; i < rec->numpcs; i++) {
c = rec->pcinfo + i;
- nr = 0;
- ret = do_get_line_numbers(jvmti, c->pc,
- c->methods[0],
- c->bcis[0],
- *tab + lines_total,
- &nr);
+ /*
+ * c->methods is the stack of inlined method calls
+ * at c->pc. [0] is the leaf method. Caller frames
+ * are ignored at the moment.
+ */
+ ret = do_get_line_number(jvmti, c->pc,
+ c->methods[0],
+ c->bcis[0],
+ *tab + lines_total);
if (ret == JVMTI_ERROR_NONE)
- lines_total += nr;
+ lines_total++;
}
}
}
@@ -246,8 +241,6 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
char *class_sign = NULL;
char *func_name = NULL;
char *func_sign = NULL;
- char *file_name = NULL;
- char fn[PATH_MAX];
uint64_t addr = (uint64_t)(uintptr_t)code_addr;
jvmtiError ret;
int nr_lines = 0; /* in line_tab[] */
@@ -264,7 +257,9 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
if (has_line_numbers && map && map_length) {
ret = get_line_numbers(jvmti, compile_info, &line_tab, &nr_lines);
if (ret != JVMTI_ERROR_NONE) {
- warnx("jvmti: cannot get line table for method");
+ if (ret != JVMTI_ERROR_NOT_FOUND) {
+ warnx("jvmti: cannot get line table for method");
+ }
nr_lines = 0;
} else if (nr_lines > 0) {
line_file_names = malloc(sizeof(char*) * nr_lines);
@@ -282,12 +277,6 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
}
}
- ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
- if (ret != JVMTI_ERROR_NONE) {
- print_error(jvmti, "GetSourceFileName", ret);
- goto error;
- }
-
ret = (*jvmti)->GetClassSignature(jvmti, decl_class,
&class_sign, NULL);
if (ret != JVMTI_ERROR_NONE) {
@@ -302,8 +291,6 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
goto error;
}
- copy_class_filename(class_sign, file_name, fn, PATH_MAX);
-
/*
* write source line info record if we have it
*/
@@ -323,7 +310,6 @@ error:
(*jvmti)->Deallocate(jvmti, (unsigned char *)func_name);
(*jvmti)->Deallocate(jvmti, (unsigned char *)func_sign);
(*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
- (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
free(line_tab);
while (line_file_names && (nr_lines > 0)) {
if (line_file_names[nr_lines - 1]) {
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/metrics.json b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
index bffb2d4a6420..fc4aa6c2ddc9 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
@@ -169,7 +169,7 @@
},
{
"BriefDescription": "Cycles GCT empty where dispatch was held",
- "MetricExpr": "(PM_GCT_NOSLOT_DISP_HELD_MAP + PM_GCT_NOSLOT_DISP_HELD_SRQ + PM_GCT_NOSLOT_DISP_HELD_ISSQ + PM_GCT_NOSLOT_DISP_HELD_OTHER) / PM_RUN_INST_CMPL)",
+ "MetricExpr": "(PM_GCT_NOSLOT_DISP_HELD_MAP + PM_GCT_NOSLOT_DISP_HELD_SRQ + PM_GCT_NOSLOT_DISP_HELD_ISSQ + PM_GCT_NOSLOT_DISP_HELD_OTHER) / PM_RUN_INST_CMPL",
"MetricGroup": "cpi_breakdown",
"MetricName": "gct_empty_disp_held_cpi"
},
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/metrics.json b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
index 811c2a8c1c9e..80816d6402e9 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
@@ -208,6 +208,84 @@
"MetricName": "fxu_stall_cpi"
},
{
+ "BriefDescription": "Instruction Completion Table empty for this thread due to branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_br_mpred_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to Icache Miss and branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED_ICMISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_br_mpred_icmiss_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table other stalls",
+ "MetricExpr": "(PM_ICT_NOSLOT_CYC - PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_BR_MPRED_ICMISS - PM_ICT_NOSLOT_BR_MPRED - PM_ICT_NOSLOT_DISP_HELD)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_cyc_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruciton is held at dispatch for any reason",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_HB_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_hb_full_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to dispatch hold on this thread due to Issue q full, BRQ full, XVCF Full, Count cache, Link, Tar full",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_ISSQ/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_issq_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_DISP_HELD_OTHER_CPI",
+ "MetricExpr": "(PM_ICT_NOSLOT_DISP_HELD - PM_ICT_NOSLOT_DISP_HELD_HB_FULL - PM_ICT_NOSLOT_DISP_HELD_SYNC - PM_ICT_NOSLOT_DISP_HELD_TBEGIN - PM_ICT_NOSLOT_DISP_HELD_ISSQ)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Dispatch held due to a synchronizing instruction at dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_SYNC/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_sync_cpi"
+ },
+ {
+ "BriefDescription": "the NTC instruction is being held at dispatch because it is a tbegin instruction and there is an older tbegin in the pipeline that must complete before the younger tbegin can dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_TBEGIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_tbegin_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_IC_L2_CPI",
+ "MetricExpr": "(PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_IC_L3 - PM_ICT_NOSLOT_IC_L3MISS)/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_ic_l2_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to icache misses that were sourced from the local L3",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_ic_l3_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to icache misses that were sourced from beyond the local L3. The source could be local/remote/distant memory or another core's cache",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_ic_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to Icache Miss",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_ic_miss_cpi"
+ },
+ {
"MetricExpr": "(PM_NTC_ISSUE_HELD_DARQ_FULL + PM_NTC_ISSUE_HELD_ARB + PM_NTC_ISSUE_HELD_OTHER)/PM_RUN_INST_CMPL",
"MetricGroup": "cpi_breakdown",
"MetricName": "issue_hold_cpi"
@@ -313,7 +391,7 @@
"MetricName": "nested_tend_stall_cpi"
},
{
- "BriefDescription": "Number of cycles the ICT has no itags assigned to this thread",
+ "BriefDescription": "Number of cycles the Instruction Completion Table has no itags assigned to this thread",
"MetricExpr": "PM_ICT_NOSLOT_CYC/PM_RUN_INST_CMPL",
"MetricGroup": "cpi_breakdown",
"MetricName": "nothing_dispatched_cpi"
@@ -362,7 +440,7 @@
},
{
"BriefDescription": "Completion stall for other reasons",
- "MetricExpr": "PM_CMPLU_STALL - PM_CMPLU_STALL_NTC_DISP_FIN - PM_CMPLU_STALL_NTC_FLUSH - PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_BRU)/PM_RUN_INST_CMPL",
+ "MetricExpr": "(PM_CMPLU_STALL - PM_CMPLU_STALL_NTC_DISP_FIN - PM_CMPLU_STALL_NTC_FLUSH - PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_BRU)/PM_RUN_INST_CMPL",
"MetricGroup": "cpi_breakdown",
"MetricName": "other_stall_cpi"
},
@@ -425,7 +503,7 @@
"MetricName": "st_fwd_stall_cpi"
},
{
- "BriefDescription": "Nothing completed and ICT not empty",
+ "BriefDescription": "Nothing completed and Instruction Completion Table not empty",
"MetricExpr": "PM_CMPLU_STALL/PM_RUN_INST_CMPL",
"MetricGroup": "cpi_breakdown",
"MetricName": "stall_cpi"
@@ -1820,71 +1898,6 @@
"MetricName": "fxu_all_idle"
},
{
- "BriefDescription": "Ict empty for this thread due to branch mispred",
- "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_br_mpred_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to Icache Miss and branch mispred",
- "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED_ICMISS/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_br_mpred_icmiss_cpi"
- },
- {
- "BriefDescription": "ICT other stalls",
- "MetricExpr": "(PM_ICT_NOSLOT_CYC - PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_BR_MPRED_ICMISS - PM_ICT_NOSLOT_BR_MPRED - PM_ICT_NOSLOT_DISP_HELD)/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_cyc_other_cpi"
- },
- {
- "BriefDescription": "Cycles in which the NTC instruciton is held at dispatch for any reason",
- "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF",
- "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_HB_FULL/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_hb_full_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to dispatch hold on this thread due to Issue q full, BRQ full, XVCF Full, Count cache, Link, Tar full",
- "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_ISSQ/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_issq_cpi"
- },
- {
- "BriefDescription": "ICT_NOSLOT_DISP_HELD_OTHER_CPI",
- "MetricExpr": "(PM_ICT_NOSLOT_DISP_HELD - PM_ICT_NOSLOT_DISP_HELD_HB_FULL - PM_ICT_NOSLOT_DISP_HELD_SYNC - PM_ICT_NOSLOT_DISP_HELD_TBEGIN - PM_ICT_NOSLOT_DISP_HELD_ISSQ)/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_other_cpi"
- },
- {
- "BriefDescription": "Dispatch held due to a synchronizing instruction at dispatch",
- "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_SYNC/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_sync_cpi"
- },
- {
- "BriefDescription": "the NTC instruction is being held at dispatch because it is a tbegin instruction and there is an older tbegin in the pipeline that must complete before the younger tbegin can dispatch",
- "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_TBEGIN/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_tbegin_cpi"
- },
- {
- "BriefDescription": "ICT_NOSLOT_IC_L2_CPI",
- "MetricExpr": "(PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_IC_L3 - PM_ICT_NOSLOT_IC_L3MISS)/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_ic_l2_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from the local L3",
- "MetricExpr": "PM_ICT_NOSLOT_IC_L3/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_ic_l3_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from beyond the local L3. The source could be local/remote/distant memory or another core's cache",
- "MetricExpr": "PM_ICT_NOSLOT_IC_L3MISS/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_ic_l3miss_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to Icache Miss",
- "MetricExpr": "PM_ICT_NOSLOT_IC_MISS/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_ic_miss_cpi"
- },
- {
"BriefDescription": "Rate of IERAT reloads from L2",
"MetricExpr": "PM_IPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
"MetricName": "ipteg_from_l2_rate_percent"
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 7fde0d2943cd..d25eebce34c9 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -328,31 +328,31 @@
},
{
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
+ "MetricExpr": "1000000000 * ( cha@event\\=0x36\\,umask\\=0x21@ / cha@event\\=0x35\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
"MetricGroup": "Memory_Lat",
"MetricName": "DRAM_Read_Latency"
},
{
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
+ "MetricExpr": "cha@event\\=0x36\\,umask\\=0x21@ / cha@event\\=0x36\\,umask\\=0x21\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
{
"BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
- "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 0 == 1 else 0 else 0",
+ "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ )",
"MetricGroup": "Memory_Lat",
"MetricName": "MEM_PMM_Read_Latency"
},
{
"BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
- "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
+ "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time )",
"MetricGroup": "Memory_BW",
"MetricName": "PMM_Read_BW"
},
{
"BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
- "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
+ "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time )",
"MetricGroup": "Memory_BW",
"MetricName": "PMM_Write_BW"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index b4f91137f40c..390bdab1be9d 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -328,13 +328,13 @@
},
{
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
+ "MetricExpr": "1000000000 * ( cha@event\\=0x36\\,umask\\=0x21@ / cha@event\\=0x35\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
"MetricGroup": "Memory_Lat",
"MetricName": "DRAM_Read_Latency"
},
{
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
+ "MetricExpr": "cha@event\\=0x36\\,umask\\=0x21@ / cha@event\\=0x36\\,umask\\=0x21\\,thresh\\=1@",
"MetricGroup": "Memory_BW",
"MetricName": "DRAM_Parallel_Reads"
},
diff --git a/tools/perf/pmu-events/jsmn.h b/tools/perf/pmu-events/jsmn.h
index c7b0f6ea2a31..1bdfd55fff30 100644
--- a/tools/perf/pmu-events/jsmn.h
+++ b/tools/perf/pmu-events/jsmn.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef __JSMN_H_
#define __JSMN_H_
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index c75557aeef0e..cd00498a5dce 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -57,6 +57,8 @@ perf-y += maps.o
perf-y += time-utils-test.o
perf-y += genelf.o
perf-y += api-io.o
+perf-y += demangle-java-test.o
+perf-y += pfm.o
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
$(call rule_mkdir)
diff --git a/tools/perf/tests/attr/system-wide-dummy b/tools/perf/tests/attr/system-wide-dummy
new file mode 100644
index 000000000000..eba723cc0d38
--- /dev/null
+++ b/tools/perf/tests/attr/system-wide-dummy
@@ -0,0 +1,50 @@
+# Event added by system-wide or CPU perf-record to handle the race of
+# processes starting while /proc is processed.
+[event]
+fd=1
+group_fd=-1
+cpu=*
+pid=-1
+flags=8
+type=1
+size=120
+config=9
+sample_period=4000
+sample_type=455
+read_format=4
+# Event will be enabled right away.
+disabled=0
+inherit=1
+pinned=0
+exclusive=0
+exclude_user=0
+exclude_kernel=0
+exclude_hv=0
+exclude_idle=0
+mmap=1
+comm=1
+freq=1
+inherit_stat=0
+enable_on_exec=0
+task=1
+watermark=0
+precise_ip=0
+mmap_data=0
+sample_id_all=1
+exclude_host=0
+exclude_guest=0
+exclude_callchain_kernel=0
+exclude_callchain_user=0
+mmap2=1
+comm_exec=1
+context_switch=0
+write_backward=0
+namespaces=0
+use_clockid=0
+wakeup_events=0
+bp_type=0
+config1=0
+config2=0
+branch_sample_type=0
+sample_regs_user=0
+sample_stack_user=0
diff --git a/tools/perf/tests/attr/test-record-C0 b/tools/perf/tests/attr/test-record-C0
index 93818054ae20..317730b906dd 100644
--- a/tools/perf/tests/attr/test-record-C0
+++ b/tools/perf/tests/attr/test-record-C0
@@ -9,6 +9,14 @@ cpu=0
# no enable on exec for CPU attached
enable_on_exec=0
-# PERF_SAMPLE_IP | PERF_SAMPLE_TID PERF_SAMPLE_TIME | # PERF_SAMPLE_PERIOD
+# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
+# PERF_SAMPLE_ID | PERF_SAMPLE_PERIOD
# + PERF_SAMPLE_CPU added by -C 0
-sample_type=391
+sample_type=455
+
+# Dummy event handles mmaps, comm and task.
+mmap=0
+comm=0
+task=0
+
+[event:system-wide-dummy]
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 3471ec52ea11..da5b6cc23f25 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -75,6 +75,13 @@ static struct test generic_tests[] = {
{
.desc = "PMU events",
.func = test__pmu_events,
+ .subtest = {
+ .skip_if_fail = false,
+ .get_nr = test__pmu_events_subtest_get_nr,
+ .get_desc = test__pmu_events_subtest_get_desc,
+ .skip_reason = test__pmu_events_subtest_skip_reason,
+ },
+
},
{
.desc = "DSO data read",
@@ -310,6 +317,15 @@ static struct test generic_tests[] = {
.func = test__jit_write_elf,
},
{
+ .desc = "Test libpfm4 support",
+ .func = test__pfm,
+ .subtest = {
+ .skip_if_fail = true,
+ .get_nr = test__pfm_subtest_get_nr,
+ .get_desc = test__pfm_subtest_get_desc,
+ }
+ },
+ {
.desc = "Test api io",
.func = test__api_io,
},
@@ -318,6 +334,10 @@ static struct test generic_tests[] = {
.func = test__maps__merge_in,
},
{
+ .desc = "Demangle Java",
+ .func = test__demangle_java,
+ },
+ {
.func = NULL,
},
};
@@ -327,7 +347,7 @@ static struct test *tests[] = {
arch_tests,
};
-static bool perf_test__matches(struct test *test, int curr, int argc, const char *argv[])
+static bool perf_test__matches(const char *desc, int curr, int argc, const char *argv[])
{
int i;
@@ -344,7 +364,7 @@ static bool perf_test__matches(struct test *test, int curr, int argc, const char
continue;
}
- if (strcasestr(test->desc, argv[i]))
+ if (strcasestr(desc, argv[i]))
return true;
}
@@ -429,8 +449,15 @@ static int test_and_print(struct test *t, bool force_skip, int subtest)
case TEST_OK:
pr_info(" Ok\n");
break;
- case TEST_SKIP:
- color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
+ case TEST_SKIP: {
+ const char *skip_reason = NULL;
+ if (t->subtest.skip_reason)
+ skip_reason = t->subtest.skip_reason(subtest);
+ if (skip_reason)
+ color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", skip_reason);
+ else
+ color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
+ }
break;
case TEST_FAIL:
default:
@@ -566,7 +593,7 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width)
.priv = &st,
};
- if (!perf_test__matches(&test, curr, argc, argv))
+ if (!perf_test__matches(test.desc, curr, argc, argv))
continue;
st.file = ent->d_name;
@@ -594,9 +621,25 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
for_each_test(j, t) {
int curr = i++, err;
+ int subi;
- if (!perf_test__matches(t, curr, argc, argv))
- continue;
+ if (!perf_test__matches(t->desc, curr, argc, argv)) {
+ bool skip = true;
+ int subn;
+
+ if (!t->subtest.get_nr)
+ continue;
+
+ subn = t->subtest.get_nr();
+
+ for (subi = 0; subi < subn; subi++) {
+ if (perf_test__matches(t->subtest.get_desc(subi), curr, argc, argv))
+ skip = false;
+ }
+
+ if (skip)
+ continue;
+ }
if (t->is_supported && !t->is_supported()) {
pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc);
@@ -624,7 +667,6 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
*/
int subw = width > 2 ? width - 2 : width;
bool skip = false;
- int subi;
if (subn <= 0) {
color_fprintf(stderr, PERF_COLOR_YELLOW,
@@ -641,6 +683,9 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
}
for (subi = 0; subi < subn; subi++) {
+ if (!perf_test__matches(t->subtest.get_desc(subi), curr, argc, argv))
+ continue;
+
pr_info("%2d.%1d: %-*s:", i, subi + 1, subw,
t->subtest.get_desc(subi));
err = test_and_print(t, skip, subi);
@@ -674,7 +719,7 @@ static int perf_test__list_shell(int argc, const char **argv, int i)
.desc = shell_test__description(bf, sizeof(bf), path, ent->d_name),
};
- if (!perf_test__matches(&t, curr, argc, argv))
+ if (!perf_test__matches(t.desc, curr, argc, argv))
continue;
pr_info("%2d: %s\n", i, t.desc);
@@ -693,7 +738,7 @@ static int perf_test__list(int argc, const char **argv)
for_each_test(j, t) {
int curr = i++;
- if (!perf_test__matches(t, curr, argc, argv) ||
+ if (!perf_test__matches(t->desc, curr, argc, argv) ||
(t->is_supported && !t->is_supported()))
continue;
diff --git a/tools/perf/tests/demangle-java-test.c b/tools/perf/tests/demangle-java-test.c
new file mode 100644
index 000000000000..8f3b90832fb0
--- /dev/null
+++ b/tools/perf/tests/demangle-java-test.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "tests.h"
+#include "session.h"
+#include "debug.h"
+#include "demangle-java.h"
+
+int test__demangle_java(struct test *test __maybe_unused, int subtest __maybe_unused)
+{
+ int ret = TEST_OK;
+ char *buf = NULL;
+ size_t i;
+
+ struct {
+ const char *mangled, *demangled;
+ } test_cases[] = {
+ { "Ljava/lang/StringLatin1;equals([B[B)Z",
+ "boolean java.lang.StringLatin1.equals(byte[], byte[])" },
+ { "Ljava/util/zip/ZipUtils;CENSIZ([BI)J",
+ "long java.util.zip.ZipUtils.CENSIZ(byte[], int)" },
+ { "Ljava/util/regex/Pattern$BmpCharProperty;match(Ljava/util/regex/Matcher;ILjava/lang/CharSequence;)Z",
+ "boolean java.util.regex.Pattern$BmpCharProperty.match(java.util.regex.Matcher, int, java.lang.CharSequence)" },
+ { "Ljava/lang/AbstractStringBuilder;appendChars(Ljava/lang/String;II)V",
+ "void java.lang.AbstractStringBuilder.appendChars(java.lang.String, int, int)" },
+ { "Ljava/lang/Object;<init>()V",
+ "void java.lang.Object<init>()" },
+ };
+
+ for (i = 0; i < sizeof(test_cases) / sizeof(test_cases[0]); i++) {
+ buf = java_demangle_sym(test_cases[i].mangled, 0);
+ if (strcmp(buf, test_cases[i].demangled)) {
+ pr_debug("FAILED: %s: %s != %s\n", test_cases[i].mangled,
+ buf, test_cases[i].demangled);
+ ret = TEST_FAIL;
+ }
+ free(buf);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 779ce280a0e9..2491d167bf76 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -37,6 +37,7 @@ static int init_live_machine(struct machine *machine)
union perf_event event;
pid_t pid = getpid();
+ memset(&event, 0, sizeof(event));
return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
mmap_handler, machine, true);
}
@@ -94,7 +95,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
return strcmp((const char *) symbol, funcs[idx]);
}
-noinline int test_dwarf_unwind__thread(struct thread *thread)
+__no_tail_call noinline int test_dwarf_unwind__thread(struct thread *thread)
{
struct perf_sample sample;
unsigned long cnt = 0;
@@ -125,7 +126,7 @@ noinline int test_dwarf_unwind__thread(struct thread *thread)
static int global_unwind_retval = -INT_MAX;
-noinline int test_dwarf_unwind__compare(void *p1, void *p2)
+__no_tail_call noinline int test_dwarf_unwind__compare(void *p1, void *p2)
{
/* Any possible value should be 'thread' */
struct thread *thread = *(struct thread **)p1;
@@ -144,7 +145,7 @@ noinline int test_dwarf_unwind__compare(void *p1, void *p2)
return p1 - p2;
}
-noinline int test_dwarf_unwind__krava_3(struct thread *thread)
+__no_tail_call noinline int test_dwarf_unwind__krava_3(struct thread *thread)
{
struct thread *array[2] = {thread, thread};
void *fp = &bsearch;
@@ -163,12 +164,12 @@ noinline int test_dwarf_unwind__krava_3(struct thread *thread)
return global_unwind_retval;
}
-noinline int test_dwarf_unwind__krava_2(struct thread *thread)
+__no_tail_call noinline int test_dwarf_unwind__krava_2(struct thread *thread)
{
return test_dwarf_unwind__krava_3(thread);
}
-noinline int test_dwarf_unwind__krava_1(struct thread *thread)
+__no_tail_call noinline int test_dwarf_unwind__krava_1(struct thread *thread)
{
return test_dwarf_unwind__krava_2(thread);
}
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index 61ecd8e33a01..f7f3e5b4c180 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -100,12 +100,11 @@ int test__perf_evsel__roundtrip_name_test(struct test *test __maybe_unused, int
{
int err = 0, ret = 0;
- err = perf_evsel__name_array_test(perf_evsel__hw_names);
+ err = perf_evsel__name_array_test(evsel__hw_names);
if (err)
ret = err;
- err = __perf_evsel__name_array_test(perf_evsel__sw_names,
- PERF_COUNT_SW_DUMMY + 1);
+ err = __perf_evsel__name_array_test(evsel__sw_names, PERF_COUNT_SW_DUMMY + 1);
if (err)
ret = err;
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index ce8aa32bc3ee..0e224a0a55d9 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -35,11 +35,11 @@ static int perf_evsel__test_field(struct evsel *evsel, const char *name,
int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtest __maybe_unused)
{
- struct evsel *evsel = perf_evsel__newtp("sched", "sched_switch");
+ struct evsel *evsel = evsel__newtp("sched", "sched_switch");
int ret = 0;
if (IS_ERR(evsel)) {
- pr_debug("perf_evsel__newtp failed with %ld\n", PTR_ERR(evsel));
+ pr_debug("evsel__newtp failed with %ld\n", PTR_ERR(evsel));
return -1;
}
@@ -66,10 +66,10 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
evsel__delete(evsel);
- evsel = perf_evsel__newtp("sched", "sched_wakeup");
+ evsel = evsel__newtp("sched", "sched_wakeup");
if (IS_ERR(evsel)) {
- pr_debug("perf_evsel__newtp failed with %ld\n", PTR_ERR(evsel));
+ pr_debug("evsel__newtp failed with %ld\n", PTR_ERR(evsel));
return -1;
}
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index f9e8e5628836..1cb02ca2b15f 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -19,15 +19,13 @@ static int test(struct expr_parse_ctx *ctx, const char *e, double val2)
int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
{
const char *p;
- const char **other;
- double val;
- int i, ret;
+ double val, *val_ptr;
+ int ret;
struct expr_parse_ctx ctx;
- int num_other;
expr__ctx_init(&ctx);
- expr__add_id(&ctx, "FOO", 1);
- expr__add_id(&ctx, "BAR", 2);
+ expr__add_id(&ctx, strdup("FOO"), 1);
+ expr__add_id(&ctx, strdup("BAR"), 2);
ret = test(&ctx, "1+1", 2);
ret |= test(&ctx, "FOO+BAR", 3);
@@ -39,6 +37,8 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
ret |= test(&ctx, "min(1,2) + 1", 2);
ret |= test(&ctx, "max(1,2) + 1", 3);
ret |= test(&ctx, "1+1 if 3*4 else 0", 2);
+ ret |= test(&ctx, "1.1 + 2.1", 3.2);
+ ret |= test(&ctx, ".1 + 2.", 2.1);
if (ret)
return ret;
@@ -51,25 +51,29 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
ret = expr__parse(&val, &ctx, p, 1);
TEST_ASSERT_VAL("missing operand", ret == -1);
+ expr__ctx_clear(&ctx);
TEST_ASSERT_VAL("find other",
- expr__find_other("FOO + BAR + BAZ + BOZO", "FOO", &other, &num_other, 1) == 0);
- TEST_ASSERT_VAL("find other", num_other == 3);
- TEST_ASSERT_VAL("find other", !strcmp(other[0], "BAR"));
- TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
- TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
- TEST_ASSERT_VAL("find other", other[3] == NULL);
+ expr__find_other("FOO + BAR + BAZ + BOZO", "FOO",
+ &ctx, 1) == 0);
+ TEST_ASSERT_VAL("find other", hashmap__size(&ctx.ids) == 3);
+ TEST_ASSERT_VAL("find other", hashmap__find(&ctx.ids, "BAR",
+ (void **)&val_ptr));
+ TEST_ASSERT_VAL("find other", hashmap__find(&ctx.ids, "BAZ",
+ (void **)&val_ptr));
+ TEST_ASSERT_VAL("find other", hashmap__find(&ctx.ids, "BOZO",
+ (void **)&val_ptr));
+ expr__ctx_clear(&ctx);
TEST_ASSERT_VAL("find other",
- expr__find_other("EVENT1\\,param\\=?@ + EVENT2\\,param\\=?@", NULL,
- &other, &num_other, 3) == 0);
- TEST_ASSERT_VAL("find other", num_other == 2);
- TEST_ASSERT_VAL("find other", !strcmp(other[0], "EVENT1,param=3/"));
- TEST_ASSERT_VAL("find other", !strcmp(other[1], "EVENT2,param=3/"));
- TEST_ASSERT_VAL("find other", other[2] == NULL);
+ expr__find_other("EVENT1\\,param\\=?@ + EVENT2\\,param\\=?@",
+ NULL, &ctx, 3) == 0);
+ TEST_ASSERT_VAL("find other", hashmap__size(&ctx.ids) == 2);
+ TEST_ASSERT_VAL("find other", hashmap__find(&ctx.ids, "EVENT1,param=3/",
+ (void **)&val_ptr));
+ TEST_ASSERT_VAL("find other", hashmap__find(&ctx.ids, "EVENT2,param=3/",
+ (void **)&val_ptr));
- for (i = 0; i < num_other; i++)
- zfree(&other[i]);
- free((void *)other);
+ expr__ctx_clear(&ctx);
return 0;
}
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 7a542f1c1c78..3f2e1a581247 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -190,7 +190,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
* function since TEST_ASSERT_VAL() returns in case of failure.
*/
hists__collapse_resort(hists, NULL);
- perf_evsel__output_resort(hists_to_evsel(hists), NULL);
+ evsel__output_resort(hists_to_evsel(hists), NULL);
if (verbose > 2) {
pr_info("use callchain: %d, cumulate callchain: %d\n",
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 618b51ffcc01..123e07d35b55 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -142,7 +142,7 @@ int test__hists_filter(struct test *test __maybe_unused, int subtest __maybe_unu
struct hists *hists = evsel__hists(evsel);
hists__collapse_resort(hists, NULL);
- perf_evsel__output_resort(evsel, NULL);
+ evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("Normal histogram\n");
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index 38f804ff6452..8973f35df604 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -155,7 +155,7 @@ static int test1(struct evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- perf_evsel__output_resort(evsel, NULL);
+ evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -255,7 +255,7 @@ static int test2(struct evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- perf_evsel__output_resort(evsel, NULL);
+ evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -309,7 +309,7 @@ static int test3(struct evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- perf_evsel__output_resort(evsel, NULL);
+ evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -387,7 +387,7 @@ static int test4(struct evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- perf_evsel__output_resort(evsel, NULL);
+ evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -490,7 +490,7 @@ static int test5(struct evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- perf_evsel__output_resort(evsel, NULL);
+ evsel__output_resort(evsel, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 5d0c3a9c47a1..9b651dfe0a6b 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -84,10 +84,13 @@ make_no_libaudit := NO_LIBAUDIT=1
make_no_libbionic := NO_LIBBIONIC=1
make_no_auxtrace := NO_AUXTRACE=1
make_no_libbpf := NO_LIBBPF=1
+make_no_libbpf_DEBUG := NO_LIBBPF=1 DEBUG=1
make_no_libcrypto := NO_LIBCRYPTO=1
make_with_babeltrace:= LIBBABELTRACE=1
make_no_sdt := NO_SDT=1
+make_no_syscall_tbl := NO_SYSCALL_TABLE=1
make_with_clangllvm := LIBCLANGLLVM=1
+make_with_libpfm4 := LIBPFM4=1
make_tags := tags
make_cscope := cscope
make_help := help
@@ -112,7 +115,7 @@ make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
make_minimal += NO_LIBCRYPTO=1 NO_SDT=1 NO_JVMTI=1 NO_LIBZSTD=1
-make_minimal += NO_LIBCAP=1
+make_minimal += NO_LIBCAP=1 NO_SYSCALL_TABLE=1
# $(run) contains all available tests
run := make_pure
@@ -144,8 +147,13 @@ run += make_no_libaudit
run += make_no_libbionic
run += make_no_auxtrace
run += make_no_libbpf
+run += make_no_libbpf_DEBUG
+run += make_no_libcrypto
+run += make_no_sdt
+run += make_no_syscall_tbl
run += make_with_babeltrace
run += make_with_clangllvm
+run += make_with_libpfm4
run += make_help
run += make_doc
run += make_perf_o
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index d4b8eb6e337a..7b0dbfc0e17d 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -79,9 +79,9 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
char name[64];
snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
- evsels[i] = perf_evsel__newtp("syscalls", name);
+ evsels[i] = evsel__newtp("syscalls", name);
if (IS_ERR(evsels[i])) {
- pr_debug("perf_evsel__new(%s)\n", name);
+ pr_debug("evsel__new(%s)\n", name);
goto out_delete_evlist;
}
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index 900934be22d2..71f85e2cc127 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -44,7 +44,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
CPU_ZERO(&cpu_set);
- evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
+ evsel = evsel__newtp("syscalls", "sys_enter_openat");
if (IS_ERR(evsel)) {
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
pr_debug("%s\n", errbuf);
@@ -90,8 +90,8 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
* we use the auto allocation it will allocate just for 1 cpu,
* as we start by cpu 0.
*/
- if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
- pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
+ if (evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
+ pr_debug("evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
goto out_close_fd;
}
@@ -117,7 +117,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
}
}
- perf_evsel__free_counts(evsel);
+ evsel__free_counts(evsel);
out_close_fd:
perf_evsel__close_fd(&evsel->core);
out_evsel_delete:
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index 1dc2897d2df9..1f5f5e79ae25 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -46,9 +46,9 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
goto out;
}
- evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
+ evsel = evsel__newtp("syscalls", "sys_enter_openat");
if (IS_ERR(evsel)) {
- pr_debug("%s: perf_evsel__newtp\n", __func__);
+ pr_debug("%s: evsel__newtp\n", __func__);
goto out_delete_evlist;
}
diff --git a/tools/perf/tests/openat-syscall.c b/tools/perf/tests/openat-syscall.c
index db5d8bb8cd06..85a8f0fe7aea 100644
--- a/tools/perf/tests/openat-syscall.c
+++ b/tools/perf/tests/openat-syscall.c
@@ -27,7 +27,7 @@ int test__openat_syscall_event(struct test *test __maybe_unused, int subtest __m
return -1;
}
- evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
+ evsel = evsel__newtp("syscalls", "sys_enter_openat");
if (IS_ERR(evsel)) {
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
pr_debug("%s\n", errbuf);
diff --git a/tools/perf/tests/pfm.c b/tools/perf/tests/pfm.c
new file mode 100644
index 000000000000..76a53126efdf
--- /dev/null
+++ b/tools/perf/tests/pfm.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test support for libpfm4 event encodings.
+ *
+ * Copyright 2020 Google LLC.
+ */
+#include "tests.h"
+#include "util/debug.h"
+#include "util/evlist.h"
+#include "util/pfm.h"
+
+#include <linux/kernel.h>
+
+#ifdef HAVE_LIBPFM
+static int test__pfm_events(void);
+static int test__pfm_group(void);
+#endif
+
+static const struct {
+ int (*func)(void);
+ const char *desc;
+} pfm_testcase_table[] = {
+#ifdef HAVE_LIBPFM
+ {
+ .func = test__pfm_events,
+ .desc = "test of individual --pfm-events",
+ },
+ {
+ .func = test__pfm_group,
+ .desc = "test groups of --pfm-events",
+ },
+#endif
+};
+
+#ifdef HAVE_LIBPFM
+static int count_pfm_events(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int count = 0;
+
+ perf_evlist__for_each_entry(evlist, evsel) {
+ count++;
+ }
+ return count;
+}
+
+static int test__pfm_events(void)
+{
+ struct evlist *evlist;
+ struct option opt;
+ size_t i;
+ const struct {
+ const char *events;
+ int nr_events;
+ } table[] = {
+ {
+ .events = "",
+ .nr_events = 0,
+ },
+ {
+ .events = "instructions",
+ .nr_events = 1,
+ },
+ {
+ .events = "instructions,cycles",
+ .nr_events = 2,
+ },
+ {
+ .events = "stereolab",
+ .nr_events = 0,
+ },
+ {
+ .events = "instructions,instructions",
+ .nr_events = 2,
+ },
+ {
+ .events = "stereolab,instructions",
+ .nr_events = 0,
+ },
+ {
+ .events = "instructions,stereolab",
+ .nr_events = 1,
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(table); i++) {
+ evlist = evlist__new();
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ opt.value = evlist;
+ parse_libpfm_events_option(&opt,
+ table[i].events,
+ 0);
+ TEST_ASSERT_EQUAL(table[i].events,
+ count_pfm_events(&evlist->core),
+ table[i].nr_events);
+ TEST_ASSERT_EQUAL(table[i].events,
+ evlist->nr_groups,
+ 0);
+
+ evlist__delete(evlist);
+ }
+ return 0;
+}
+
+static int test__pfm_group(void)
+{
+ struct evlist *evlist;
+ struct option opt;
+ size_t i;
+ const struct {
+ const char *events;
+ int nr_events;
+ int nr_groups;
+ } table[] = {
+ {
+ .events = "{},",
+ .nr_events = 0,
+ .nr_groups = 0,
+ },
+ {
+ .events = "{instructions}",
+ .nr_events = 1,
+ .nr_groups = 1,
+ },
+ {
+ .events = "{instructions},{}",
+ .nr_events = 1,
+ .nr_groups = 1,
+ },
+ {
+ .events = "{},{instructions}",
+ .nr_events = 0,
+ .nr_groups = 0,
+ },
+ {
+ .events = "{instructions},{instructions}",
+ .nr_events = 2,
+ .nr_groups = 2,
+ },
+ {
+ .events = "{instructions,cycles},{instructions,cycles}",
+ .nr_events = 4,
+ .nr_groups = 2,
+ },
+ {
+ .events = "{stereolab}",
+ .nr_events = 0,
+ .nr_groups = 0,
+ },
+ {
+ .events =
+ "{instructions,cycles},{instructions,stereolab}",
+ .nr_events = 3,
+ .nr_groups = 1,
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(table); i++) {
+ evlist = evlist__new();
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ opt.value = evlist;
+ parse_libpfm_events_option(&opt,
+ table[i].events,
+ 0);
+ TEST_ASSERT_EQUAL(table[i].events,
+ count_pfm_events(&evlist->core),
+ table[i].nr_events);
+ TEST_ASSERT_EQUAL(table[i].events,
+ evlist->nr_groups,
+ table[i].nr_groups);
+
+ evlist__delete(evlist);
+ }
+ return 0;
+}
+#endif
+
+const char *test__pfm_subtest_get_desc(int i)
+{
+ if (i < 0 || i >= (int)ARRAY_SIZE(pfm_testcase_table))
+ return NULL;
+ return pfm_testcase_table[i].desc;
+}
+
+int test__pfm_subtest_get_nr(void)
+{
+ return (int)ARRAY_SIZE(pfm_testcase_table);
+}
+
+int test__pfm(struct test *test __maybe_unused, int i __maybe_unused)
+{
+#ifdef HAVE_LIBPFM
+ if (i < 0 || i >= (int)ARRAY_SIZE(pfm_testcase_table))
+ return TEST_FAIL;
+ return pfm_testcase_table[i].func();
+#else
+ return TEST_SKIP;
+#endif
+}
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index d64261da8bf7..ab64b4a4e284 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include "math.h"
#include "parse-events.h"
#include "pmu.h"
#include "tests.h"
@@ -8,6 +9,9 @@
#include <linux/zalloc.h>
#include "debug.h"
#include "../pmu-events/pmu-events.h"
+#include "util/evlist.h"
+#include "util/expr.h"
+#include "util/parse-events.h"
struct perf_pmu_test_event {
struct pmu_event event;
@@ -144,7 +148,7 @@ static struct pmu_events_map *__test_pmu_get_events_map(void)
}
/* Verify generated events from pmu-events.c is as expected */
-static int __test_pmu_event_table(void)
+static int test_pmu_event_table(void)
{
struct pmu_events_map *map = __test_pmu_get_events_map();
struct pmu_event *table;
@@ -347,14 +351,11 @@ static int __test__pmu_event_aliases(char *pmu_name, int *count)
return res;
}
-int test__pmu_events(struct test *test __maybe_unused,
- int subtest __maybe_unused)
+
+static int test_aliases(void)
{
struct perf_pmu *pmu = NULL;
- if (__test_pmu_event_table())
- return -1;
-
while ((pmu = perf_pmu__scan(pmu)) != NULL) {
int count = 0;
@@ -377,3 +378,163 @@ int test__pmu_events(struct test *test __maybe_unused,
return 0;
}
+
+static bool is_number(const char *str)
+{
+ char *end_ptr;
+ double v;
+
+ errno = 0;
+ v = strtod(str, &end_ptr);
+ (void)v; // We're not interested in this value, only if it is valid
+ return errno == 0 && end_ptr != str;
+}
+
+static int check_parse_id(const char *id, bool same_cpu, struct pmu_event *pe)
+{
+ struct parse_events_error error;
+ struct evlist *evlist;
+ int ret;
+
+ /* Numbers are always valid. */
+ if (is_number(id))
+ return 0;
+
+ evlist = evlist__new();
+ memset(&error, 0, sizeof(error));
+ ret = parse_events(evlist, id, &error);
+ if (ret && same_cpu) {
+ pr_warning("Parse event failed metric '%s' id '%s' expr '%s'\n",
+ pe->metric_name, id, pe->metric_expr);
+ pr_warning("Error string '%s' help '%s'\n", error.str,
+ error.help);
+ } else if (ret) {
+ pr_debug3("Parse event failed, but for an event that may not be supported by this CPU.\nid '%s' metric '%s' expr '%s'\n",
+ id, pe->metric_name, pe->metric_expr);
+ ret = 0;
+ }
+ evlist__delete(evlist);
+ free(error.str);
+ free(error.help);
+ free(error.first_str);
+ free(error.first_help);
+ return ret;
+}
+
+static void expr_failure(const char *msg,
+ const struct pmu_events_map *map,
+ const struct pmu_event *pe)
+{
+ pr_debug("%s for map %s %s %s\n",
+ msg, map->cpuid, map->version, map->type);
+ pr_debug("On metric %s\n", pe->metric_name);
+ pr_debug("On expression %s\n", pe->metric_expr);
+}
+
+static int test_parsing(void)
+{
+ struct pmu_events_map *cpus_map = perf_pmu__find_map(NULL);
+ struct pmu_events_map *map;
+ struct pmu_event *pe;
+ int i, j, k;
+ int ret = 0;
+ struct expr_parse_ctx ctx;
+ double result;
+
+ i = 0;
+ for (;;) {
+ map = &pmu_events_map[i++];
+ if (!map->table)
+ break;
+ j = 0;
+ for (;;) {
+ struct hashmap_entry *cur;
+ size_t bkt;
+
+ pe = &map->table[j++];
+ if (!pe->name && !pe->metric_group && !pe->metric_name)
+ break;
+ if (!pe->metric_expr)
+ continue;
+ expr__ctx_init(&ctx);
+ if (expr__find_other(pe->metric_expr, NULL, &ctx, 0)
+ < 0) {
+ expr_failure("Parse other failed", map, pe);
+ ret++;
+ continue;
+ }
+
+ /*
+ * Add all ids with a made up value. The value may
+ * trigger divide by zero when subtracted and so try to
+ * make them unique.
+ */
+ k = 1;
+ hashmap__for_each_entry((&ctx.ids), cur, bkt)
+ expr__add_id(&ctx, strdup(cur->key), k++);
+
+ hashmap__for_each_entry((&ctx.ids), cur, bkt) {
+ if (check_parse_id(cur->key, map == cpus_map,
+ pe))
+ ret++;
+ }
+
+ if (expr__parse(&result, &ctx, pe->metric_expr, 0)) {
+ expr_failure("Parse failed", map, pe);
+ ret++;
+ }
+ expr__ctx_clear(&ctx);
+ }
+ }
+ /* TODO: fail when not ok */
+ return ret == 0 ? TEST_OK : TEST_SKIP;
+}
+
+static const struct {
+ int (*func)(void);
+ const char *desc;
+} pmu_events_testcase_table[] = {
+ {
+ .func = test_pmu_event_table,
+ .desc = "PMU event table sanity",
+ },
+ {
+ .func = test_aliases,
+ .desc = "PMU event map aliases",
+ },
+ {
+ .func = test_parsing,
+ .desc = "Parsing of PMU event table metrics",
+ },
+};
+
+const char *test__pmu_events_subtest_get_desc(int subtest)
+{
+ if (subtest < 0 ||
+ subtest >= (int)ARRAY_SIZE(pmu_events_testcase_table))
+ return NULL;
+ return pmu_events_testcase_table[subtest].desc;
+}
+
+const char *test__pmu_events_subtest_skip_reason(int subtest)
+{
+ if (subtest < 0 ||
+ subtest >= (int)ARRAY_SIZE(pmu_events_testcase_table))
+ return NULL;
+ if (pmu_events_testcase_table[subtest].func != test_parsing)
+ return NULL;
+ return "some metrics failed";
+}
+
+int test__pmu_events_subtest_get_nr(void)
+{
+ return (int)ARRAY_SIZE(pmu_events_testcase_table);
+}
+
+int test__pmu_events(struct test *test __maybe_unused, int subtest)
+{
+ if (subtest < 0 ||
+ subtest >= (int)ARRAY_SIZE(pmu_events_testcase_table))
+ return TEST_FAIL;
+ return pmu_events_testcase_table[subtest].func();
+}
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 74379ff1f7fa..5c11fe2b3040 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -156,8 +156,8 @@ int test__pmu(struct test *test __maybe_unused, int subtest __maybe_unused)
if (ret)
break;
- ret = perf_pmu__config_terms(&formats, &attr, terms,
- false, NULL);
+ ret = perf_pmu__config_terms("perf-pmu-test", &formats, &attr,
+ terms, false, NULL);
if (ret)
break;
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index bfb9986093d8..4b9b731977c8 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -56,7 +56,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
evsel = evsel__new(&attr);
if (evsel == NULL) {
- pr_debug("perf_evsel__new\n");
+ pr_debug("evsel__new\n");
goto out_delete_evlist;
}
evlist__add(evlist, evsel);
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index d6d4ac34eeb7..76a4e352eaaf 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -34,6 +34,7 @@ struct test {
bool skip_if_fail;
int (*get_nr)(void);
const char *(*get_desc)(int subtest);
+ const char *(*skip_reason)(int subtest);
} subtest;
bool (*is_supported)(void);
void *priv;
@@ -50,6 +51,9 @@ int test__perf_evsel__tp_sched_test(struct test *test, int subtest);
int test__syscall_openat_tp_fields(struct test *test, int subtest);
int test__pmu(struct test *test, int subtest);
int test__pmu_events(struct test *test, int subtest);
+const char *test__pmu_events_subtest_get_desc(int subtest);
+const char *test__pmu_events_subtest_skip_reason(int subtest);
+int test__pmu_events_subtest_get_nr(void);
int test__attr(struct test *test, int subtest);
int test__dso_data(struct test *test, int subtest);
int test__dso_data_cache(struct test *test, int subtest);
@@ -113,6 +117,10 @@ int test__maps__merge_in(struct test *t, int subtest);
int test__time_utils(struct test *t, int subtest);
int test__jit_write_elf(struct test *test, int subtest);
int test__api_io(struct test *test, int subtest);
+int test__demangle_java(struct test *test, int subtest);
+int test__pfm(struct test *test, int subtest);
+const char *test__pfm_subtest_get_desc(int subtest);
+int test__pfm_subtest_get_nr(void);
bool test__bp_signal_is_supported(void);
bool test__bp_account_is_supported(void);
diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
index 22c9fc900c84..9f9ea45cddc4 100755
--- a/tools/perf/trace/beauty/arch_errno_names.sh
+++ b/tools/perf/trace/beauty/arch_errno_names.sh
@@ -57,7 +57,7 @@ process_arch()
local arch="$1"
local asm_errno=$(asm_errno_file "$arch")
- $gcc $include_path -E -dM -x c $asm_errno \
+ $gcc $CFLAGS $include_path -E -dM -x c $asm_errno \
|grep -hE '^#define[[:blank:]]+(E[^[:blank:]]+)[[:blank:]]+([[:digit:]]+).*' \
|awk '{ print $2","$3; }' \
|sort -t, -k2 -nu \
@@ -91,7 +91,7 @@ EoHEADER
# in tools/perf/arch
archlist=""
for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do
- test -d arch/$arch && archlist="$archlist $arch"
+ test -d $toolsdir/perf/arch/$arch && archlist="$archlist $arch"
done
for arch in x86 $archlist generic; do
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index ca07a162d602..8d18380ecd10 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -106,7 +106,7 @@ perf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
perf-$(CONFIG_AUXTRACE) += intel-pt.o
perf-$(CONFIG_AUXTRACE) += intel-bts.o
perf-$(CONFIG_AUXTRACE) += arm-spe.o
-perf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
+perf-$(CONFIG_AUXTRACE) += arm-spe-decoder/
perf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
ifdef CONFIG_LIBOPENCSD
@@ -136,6 +136,10 @@ perf-$(CONFIG_LIBELF) += symbol-elf.o
perf-$(CONFIG_LIBELF) += probe-file.o
perf-$(CONFIG_LIBELF) += probe-event.o
+ifndef CONFIG_LIBBPF
+perf-y += hashmap.o
+endif
+
ifndef CONFIG_LIBELF
perf-y += symbol-minimal.o
endif
@@ -179,6 +183,8 @@ perf-$(CONFIG_LIBBPF) += bpf-event.o
perf-$(CONFIG_CXX) += c++/
+perf-$(CONFIG_LIBPFM4) += pfm.o
+
CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
CFLAGS_llvm-utils.o += -DPERF_INCLUDE_DIR="BUILD_STR($(perf_include_dir_SQ))"
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index d828c2d2edee..76bfb4a9d94e 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -41,7 +41,6 @@
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <bpf/libbpf.h>
#include <subcmd/parse-options.h>
#include <subcmd/run-command.h>
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 2d88069d6428..0a0cd4f32175 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -144,7 +144,7 @@ struct annotation_line {
u32 idx;
int idx_asm;
int data_nr;
- struct annotation_data data[0];
+ struct annotation_data data[];
};
struct disasm_line {
@@ -227,7 +227,7 @@ void symbol__calc_percent(struct symbol *sym, struct evsel *evsel);
struct sym_hist {
u64 nr_samples;
u64 period;
- struct sym_hist_entry addr[0];
+ struct sym_hist_entry addr[];
};
struct cyc_hist {
diff --git a/tools/perf/util/arm-spe-decoder/Build b/tools/perf/util/arm-spe-decoder/Build
new file mode 100644
index 000000000000..f8dae13fc876
--- /dev/null
+++ b/tools/perf/util/arm-spe-decoder/Build
@@ -0,0 +1 @@
+perf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o arm-spe-decoder.o
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
new file mode 100644
index 000000000000..302a14d0aca9
--- /dev/null
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arm_spe_decoder.c: ARM SPE support
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <errno.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <linux/compiler.h>
+#include <linux/zalloc.h>
+
+#include "../auxtrace.h"
+#include "../debug.h"
+#include "../util.h"
+
+#include "arm-spe-decoder.h"
+
+#ifndef BIT
+#define BIT(n) (1UL << (n))
+#endif
+
+static u64 arm_spe_calc_ip(int index, u64 payload)
+{
+ u8 *addr = (u8 *)&payload;
+ int ns, el;
+
+ /* Instruction virtual address or Branch target address */
+ if (index == SPE_ADDR_PKT_HDR_INDEX_INS ||
+ index == SPE_ADDR_PKT_HDR_INDEX_BRANCH) {
+ ns = addr[7] & SPE_ADDR_PKT_NS;
+ el = (addr[7] & SPE_ADDR_PKT_EL_MASK) >> SPE_ADDR_PKT_EL_OFFSET;
+
+ /* Fill highest byte for EL1 or EL2 (VHE) mode */
+ if (ns && (el == SPE_ADDR_PKT_EL1 || el == SPE_ADDR_PKT_EL2))
+ addr[7] = 0xff;
+ /* Clean highest byte for other cases */
+ else
+ addr[7] = 0x0;
+
+ /* Data access virtual address */
+ } else if (index == SPE_ADDR_PKT_HDR_INDEX_DATA_VIRT) {
+
+ /* Fill highest byte if bits [48..55] is 0xff */
+ if (addr[6] == 0xff)
+ addr[7] = 0xff;
+ /* Otherwise, cleanup tags */
+ else
+ addr[7] = 0x0;
+
+ /* Data access physical address */
+ } else if (index == SPE_ADDR_PKT_HDR_INDEX_DATA_PHYS) {
+ /* Cleanup byte 7 */
+ addr[7] = 0x0;
+ } else {
+ pr_err("unsupported address packet index: 0x%x\n", index);
+ }
+
+ return payload;
+}
+
+struct arm_spe_decoder *arm_spe_decoder_new(struct arm_spe_params *params)
+{
+ struct arm_spe_decoder *decoder;
+
+ if (!params->get_trace)
+ return NULL;
+
+ decoder = zalloc(sizeof(struct arm_spe_decoder));
+ if (!decoder)
+ return NULL;
+
+ decoder->get_trace = params->get_trace;
+ decoder->data = params->data;
+
+ return decoder;
+}
+
+void arm_spe_decoder_free(struct arm_spe_decoder *decoder)
+{
+ free(decoder);
+}
+
+static int arm_spe_get_data(struct arm_spe_decoder *decoder)
+{
+ struct arm_spe_buffer buffer = { .buf = 0, };
+ int ret;
+
+ pr_debug("Getting more data\n");
+ ret = decoder->get_trace(&buffer, decoder->data);
+ if (ret < 0)
+ return ret;
+
+ decoder->buf = buffer.buf;
+ decoder->len = buffer.len;
+
+ if (!decoder->len)
+ pr_debug("No more data\n");
+
+ return decoder->len;
+}
+
+static int arm_spe_get_next_packet(struct arm_spe_decoder *decoder)
+{
+ int ret;
+
+ do {
+ if (!decoder->len) {
+ ret = arm_spe_get_data(decoder);
+
+ /* Failed to read out trace data */
+ if (ret <= 0)
+ return ret;
+ }
+
+ ret = arm_spe_get_packet(decoder->buf, decoder->len,
+ &decoder->packet);
+ if (ret <= 0) {
+ /* Move forward for 1 byte */
+ decoder->buf += 1;
+ decoder->len -= 1;
+ return -EBADMSG;
+ }
+
+ decoder->buf += ret;
+ decoder->len -= ret;
+ } while (decoder->packet.type == ARM_SPE_PAD);
+
+ return 1;
+}
+
+static int arm_spe_read_record(struct arm_spe_decoder *decoder)
+{
+ int err;
+ int idx;
+ u64 payload, ip;
+
+ memset(&decoder->record, 0x0, sizeof(decoder->record));
+
+ while (1) {
+ err = arm_spe_get_next_packet(decoder);
+ if (err <= 0)
+ return err;
+
+ idx = decoder->packet.index;
+ payload = decoder->packet.payload;
+
+ switch (decoder->packet.type) {
+ case ARM_SPE_TIMESTAMP:
+ decoder->record.timestamp = payload;
+ return 1;
+ case ARM_SPE_END:
+ return 1;
+ case ARM_SPE_ADDRESS:
+ ip = arm_spe_calc_ip(idx, payload);
+ if (idx == SPE_ADDR_PKT_HDR_INDEX_INS)
+ decoder->record.from_ip = ip;
+ else if (idx == SPE_ADDR_PKT_HDR_INDEX_BRANCH)
+ decoder->record.to_ip = ip;
+ break;
+ case ARM_SPE_COUNTER:
+ break;
+ case ARM_SPE_CONTEXT:
+ break;
+ case ARM_SPE_OP_TYPE:
+ break;
+ case ARM_SPE_EVENTS:
+ if (payload & BIT(EV_L1D_REFILL))
+ decoder->record.type |= ARM_SPE_L1D_MISS;
+
+ if (payload & BIT(EV_L1D_ACCESS))
+ decoder->record.type |= ARM_SPE_L1D_ACCESS;
+
+ if (payload & BIT(EV_TLB_WALK))
+ decoder->record.type |= ARM_SPE_TLB_MISS;
+
+ if (payload & BIT(EV_TLB_ACCESS))
+ decoder->record.type |= ARM_SPE_TLB_ACCESS;
+
+ if ((idx == 1 || idx == 2 || idx == 3) &&
+ (payload & BIT(EV_LLC_MISS)))
+ decoder->record.type |= ARM_SPE_LLC_MISS;
+
+ if ((idx == 1 || idx == 2 || idx == 3) &&
+ (payload & BIT(EV_LLC_ACCESS)))
+ decoder->record.type |= ARM_SPE_LLC_ACCESS;
+
+ if ((idx == 1 || idx == 2 || idx == 3) &&
+ (payload & BIT(EV_REMOTE_ACCESS)))
+ decoder->record.type |= ARM_SPE_REMOTE_ACCESS;
+
+ if (payload & BIT(EV_MISPRED))
+ decoder->record.type |= ARM_SPE_BRANCH_MISS;
+
+ break;
+ case ARM_SPE_DATA_SOURCE:
+ break;
+ case ARM_SPE_BAD:
+ break;
+ case ARM_SPE_PAD:
+ break;
+ default:
+ pr_err("Get packet error!\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int arm_spe_decode(struct arm_spe_decoder *decoder)
+{
+ return arm_spe_read_record(decoder);
+}
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
new file mode 100644
index 000000000000..a5111a8d4360
--- /dev/null
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arm_spe_decoder.h: Arm Statistical Profiling Extensions support
+ * Copyright (c) 2019-2020, Arm Ltd.
+ */
+
+#ifndef INCLUDE__ARM_SPE_DECODER_H__
+#define INCLUDE__ARM_SPE_DECODER_H__
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "arm-spe-pkt-decoder.h"
+
+enum arm_spe_events {
+ EV_EXCEPTION_GEN = 0,
+ EV_RETIRED = 1,
+ EV_L1D_ACCESS = 2,
+ EV_L1D_REFILL = 3,
+ EV_TLB_ACCESS = 4,
+ EV_TLB_WALK = 5,
+ EV_NOT_TAKEN = 6,
+ EV_MISPRED = 7,
+ EV_LLC_ACCESS = 8,
+ EV_LLC_MISS = 9,
+ EV_REMOTE_ACCESS = 10,
+ EV_ALIGNMENT = 11,
+ EV_PARTIAL_PREDICATE = 17,
+ EV_EMPTY_PREDICATE = 18,
+};
+
+enum arm_spe_sample_type {
+ ARM_SPE_L1D_ACCESS = 1 << 0,
+ ARM_SPE_L1D_MISS = 1 << 1,
+ ARM_SPE_LLC_ACCESS = 1 << 2,
+ ARM_SPE_LLC_MISS = 1 << 3,
+ ARM_SPE_TLB_ACCESS = 1 << 4,
+ ARM_SPE_TLB_MISS = 1 << 5,
+ ARM_SPE_BRANCH_MISS = 1 << 6,
+ ARM_SPE_REMOTE_ACCESS = 1 << 7,
+};
+
+struct arm_spe_record {
+ enum arm_spe_sample_type type;
+ int err;
+ u64 from_ip;
+ u64 to_ip;
+ u64 timestamp;
+};
+
+struct arm_spe_insn;
+
+struct arm_spe_buffer {
+ const unsigned char *buf;
+ size_t len;
+ u64 offset;
+ u64 trace_nr;
+};
+
+struct arm_spe_params {
+ int (*get_trace)(struct arm_spe_buffer *buffer, void *data);
+ void *data;
+};
+
+struct arm_spe_decoder {
+ int (*get_trace)(struct arm_spe_buffer *buffer, void *data);
+ void *data;
+ struct arm_spe_record record;
+
+ const unsigned char *buf;
+ size_t len;
+
+ struct arm_spe_pkt packet;
+};
+
+struct arm_spe_decoder *arm_spe_decoder_new(struct arm_spe_params *params);
+void arm_spe_decoder_free(struct arm_spe_decoder *decoder);
+
+int arm_spe_decode(struct arm_spe_decoder *decoder);
+
+#endif
diff --git a/tools/perf/util/arm-spe-pkt-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c
index b94001b756c7..b94001b756c7 100644
--- a/tools/perf/util/arm-spe-pkt-decoder.c
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.h b/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.h
new file mode 100644
index 000000000000..4c870521b8eb
--- /dev/null
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#ifndef INCLUDE__ARM_SPE_PKT_DECODER_H__
+#define INCLUDE__ARM_SPE_PKT_DECODER_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define ARM_SPE_PKT_DESC_MAX 256
+
+#define ARM_SPE_NEED_MORE_BYTES -1
+#define ARM_SPE_BAD_PACKET -2
+
+#define ARM_SPE_PKT_MAX_SZ 16
+
+enum arm_spe_pkt_type {
+ ARM_SPE_BAD,
+ ARM_SPE_PAD,
+ ARM_SPE_END,
+ ARM_SPE_TIMESTAMP,
+ ARM_SPE_ADDRESS,
+ ARM_SPE_COUNTER,
+ ARM_SPE_CONTEXT,
+ ARM_SPE_OP_TYPE,
+ ARM_SPE_EVENTS,
+ ARM_SPE_DATA_SOURCE,
+};
+
+struct arm_spe_pkt {
+ enum arm_spe_pkt_type type;
+ unsigned char index;
+ uint64_t payload;
+};
+
+#define SPE_ADDR_PKT_HDR_INDEX_INS (0x0)
+#define SPE_ADDR_PKT_HDR_INDEX_BRANCH (0x1)
+#define SPE_ADDR_PKT_HDR_INDEX_DATA_VIRT (0x2)
+#define SPE_ADDR_PKT_HDR_INDEX_DATA_PHYS (0x3)
+
+#define SPE_ADDR_PKT_NS BIT(7)
+#define SPE_ADDR_PKT_CH BIT(6)
+#define SPE_ADDR_PKT_EL_OFFSET (5)
+#define SPE_ADDR_PKT_EL_MASK (0x3 << SPE_ADDR_PKT_EL_OFFSET)
+#define SPE_ADDR_PKT_EL0 (0)
+#define SPE_ADDR_PKT_EL1 (1)
+#define SPE_ADDR_PKT_EL2 (2)
+#define SPE_ADDR_PKT_EL3 (3)
+
+const char *arm_spe_pkt_name(enum arm_spe_pkt_type);
+
+int arm_spe_get_packet(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet);
+
+int arm_spe_pkt_desc(const struct arm_spe_pkt *packet, char *buf, size_t len);
+#endif
diff --git a/tools/perf/util/arm-spe-pkt-decoder.h b/tools/perf/util/arm-spe-pkt-decoder.h
deleted file mode 100644
index d786ef65113f..000000000000
--- a/tools/perf/util/arm-spe-pkt-decoder.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Arm Statistical Profiling Extensions (SPE) support
- * Copyright (c) 2017-2018, Arm Ltd.
- */
-
-#ifndef INCLUDE__ARM_SPE_PKT_DECODER_H__
-#define INCLUDE__ARM_SPE_PKT_DECODER_H__
-
-#include <stddef.h>
-#include <stdint.h>
-
-#define ARM_SPE_PKT_DESC_MAX 256
-
-#define ARM_SPE_NEED_MORE_BYTES -1
-#define ARM_SPE_BAD_PACKET -2
-
-enum arm_spe_pkt_type {
- ARM_SPE_BAD,
- ARM_SPE_PAD,
- ARM_SPE_END,
- ARM_SPE_TIMESTAMP,
- ARM_SPE_ADDRESS,
- ARM_SPE_COUNTER,
- ARM_SPE_CONTEXT,
- ARM_SPE_OP_TYPE,
- ARM_SPE_EVENTS,
- ARM_SPE_DATA_SOURCE,
-};
-
-struct arm_spe_pkt {
- enum arm_spe_pkt_type type;
- unsigned char index;
- uint64_t payload;
-};
-
-const char *arm_spe_pkt_name(enum arm_spe_pkt_type);
-
-int arm_spe_get_packet(const unsigned char *buf, size_t len,
- struct arm_spe_pkt *packet);
-
-int arm_spe_pkt_desc(const struct arm_spe_pkt *packet, char *buf, size_t len);
-#endif
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 875a0dd540e5..3882a5360ada 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -4,46 +4,85 @@
* Copyright (c) 2017-2018, Arm Ltd.
*/
+#include <byteswap.h>
#include <endian.h>
#include <errno.h>
-#include <byteswap.h>
#include <inttypes.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/kernel.h>
#include <linux/log2.h>
+#include <linux/types.h>
#include <linux/zalloc.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include "auxtrace.h"
#include "color.h"
+#include "debug.h"
+#include "evlist.h"
#include "evsel.h"
#include "machine.h"
#include "session.h"
-#include "debug.h"
-#include "auxtrace.h"
+#include "symbol.h"
+#include "thread.h"
+#include "thread-stack.h"
+#include "tool.h"
+#include "util/synthetic-events.h"
+
#include "arm-spe.h"
-#include "arm-spe-pkt-decoder.h"
+#include "arm-spe-decoder/arm-spe-decoder.h"
+#include "arm-spe-decoder/arm-spe-pkt-decoder.h"
+
+#define MAX_TIMESTAMP (~0ULL)
struct arm_spe {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
struct auxtrace_heap heap;
+ struct itrace_synth_opts synth_opts;
u32 auxtrace_type;
struct perf_session *session;
struct machine *machine;
u32 pmu_type;
+
+ u8 timeless_decoding;
+ u8 data_queued;
+
+ u8 sample_flc;
+ u8 sample_llc;
+ u8 sample_tlb;
+ u8 sample_branch;
+ u8 sample_remote_access;
+
+ u64 l1d_miss_id;
+ u64 l1d_access_id;
+ u64 llc_miss_id;
+ u64 llc_access_id;
+ u64 tlb_miss_id;
+ u64 tlb_access_id;
+ u64 branch_miss_id;
+ u64 remote_access_id;
+
+ u64 kernel_start;
+
+ unsigned long num_events;
};
struct arm_spe_queue {
- struct arm_spe *spe;
- unsigned int queue_nr;
- struct auxtrace_buffer *buffer;
- bool on_heap;
- bool done;
- pid_t pid;
- pid_t tid;
- int cpu;
+ struct arm_spe *spe;
+ unsigned int queue_nr;
+ struct auxtrace_buffer *buffer;
+ struct auxtrace_buffer *old_buffer;
+ union perf_event *event_buf;
+ bool on_heap;
+ bool done;
+ pid_t pid;
+ pid_t tid;
+ int cpu;
+ struct arm_spe_decoder *decoder;
+ u64 time;
+ u64 timestamp;
+ struct thread *thread;
};
static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
@@ -92,44 +131,520 @@ static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf,
arm_spe_dump(spe, buf, len);
}
-static int arm_spe_process_event(struct perf_session *session __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_sample *sample __maybe_unused,
- struct perf_tool *tool __maybe_unused)
+static int arm_spe_get_trace(struct arm_spe_buffer *b, void *data)
+{
+ struct arm_spe_queue *speq = data;
+ struct auxtrace_buffer *buffer = speq->buffer;
+ struct auxtrace_buffer *old_buffer = speq->old_buffer;
+ struct auxtrace_queue *queue;
+
+ queue = &speq->spe->queues.queue_array[speq->queue_nr];
+
+ buffer = auxtrace_buffer__next(queue, buffer);
+ /* If no more data, drop the previous auxtrace_buffer and return */
+ if (!buffer) {
+ if (old_buffer)
+ auxtrace_buffer__drop_data(old_buffer);
+ b->len = 0;
+ return 0;
+ }
+
+ speq->buffer = buffer;
+
+ /* If the aux_buffer doesn't have data associated, try to load it */
+ if (!buffer->data) {
+ /* get the file desc associated with the perf data file */
+ int fd = perf_data__fd(speq->spe->session->data);
+
+ buffer->data = auxtrace_buffer__get_data(buffer, fd);
+ if (!buffer->data)
+ return -ENOMEM;
+ }
+
+ b->len = buffer->size;
+ b->buf = buffer->data;
+
+ if (b->len) {
+ if (old_buffer)
+ auxtrace_buffer__drop_data(old_buffer);
+ speq->old_buffer = buffer;
+ } else {
+ auxtrace_buffer__drop_data(buffer);
+ return arm_spe_get_trace(b, data);
+ }
+
+ return 0;
+}
+
+static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe,
+ unsigned int queue_nr)
+{
+ struct arm_spe_params params = { .get_trace = 0, };
+ struct arm_spe_queue *speq;
+
+ speq = zalloc(sizeof(*speq));
+ if (!speq)
+ return NULL;
+
+ speq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
+ if (!speq->event_buf)
+ goto out_free;
+
+ speq->spe = spe;
+ speq->queue_nr = queue_nr;
+ speq->pid = -1;
+ speq->tid = -1;
+ speq->cpu = -1;
+
+ /* params set */
+ params.get_trace = arm_spe_get_trace;
+ params.data = speq;
+
+ /* create new decoder */
+ speq->decoder = arm_spe_decoder_new(&params);
+ if (!speq->decoder)
+ goto out_free;
+
+ return speq;
+
+out_free:
+ zfree(&speq->event_buf);
+ free(speq);
+
+ return NULL;
+}
+
+static inline u8 arm_spe_cpumode(struct arm_spe *spe, u64 ip)
+{
+ return ip >= spe->kernel_start ?
+ PERF_RECORD_MISC_KERNEL :
+ PERF_RECORD_MISC_USER;
+}
+
+static void arm_spe_prep_sample(struct arm_spe *spe,
+ struct arm_spe_queue *speq,
+ union perf_event *event,
+ struct perf_sample *sample)
+{
+ struct arm_spe_record *record = &speq->decoder->record;
+
+ if (!spe->timeless_decoding)
+ sample->time = speq->timestamp;
+
+ sample->ip = record->from_ip;
+ sample->cpumode = arm_spe_cpumode(spe, sample->ip);
+ sample->pid = speq->pid;
+ sample->tid = speq->tid;
+ sample->addr = record->to_ip;
+ sample->period = 1;
+ sample->cpu = speq->cpu;
+
+ event->sample.header.type = PERF_RECORD_SAMPLE;
+ event->sample.header.misc = sample->cpumode;
+ event->sample.header.size = sizeof(struct perf_event_header);
+}
+
+static inline int
+arm_spe_deliver_synth_event(struct arm_spe *spe,
+ struct arm_spe_queue *speq __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample)
+{
+ int ret;
+
+ ret = perf_session__deliver_synth_event(spe->session, event, sample);
+ if (ret)
+ pr_err("ARM SPE: failed to deliver event, error %d\n", ret);
+
+ return ret;
+}
+
+static int
+arm_spe_synth_spe_events_sample(struct arm_spe_queue *speq,
+ u64 spe_events_id)
+{
+ struct arm_spe *spe = speq->spe;
+ union perf_event *event = speq->event_buf;
+ struct perf_sample sample = { .ip = 0, };
+
+ arm_spe_prep_sample(spe, speq, event, &sample);
+
+ sample.id = spe_events_id;
+ sample.stream_id = spe_events_id;
+
+ return arm_spe_deliver_synth_event(spe, speq, event, &sample);
+}
+
+static int arm_spe_sample(struct arm_spe_queue *speq)
+{
+ const struct arm_spe_record *record = &speq->decoder->record;
+ struct arm_spe *spe = speq->spe;
+ int err;
+
+ if (spe->sample_flc) {
+ if (record->type & ARM_SPE_L1D_MISS) {
+ err = arm_spe_synth_spe_events_sample(
+ speq, spe->l1d_miss_id);
+ if (err)
+ return err;
+ }
+
+ if (record->type & ARM_SPE_L1D_ACCESS) {
+ err = arm_spe_synth_spe_events_sample(
+ speq, spe->l1d_access_id);
+ if (err)
+ return err;
+ }
+ }
+
+ if (spe->sample_llc) {
+ if (record->type & ARM_SPE_LLC_MISS) {
+ err = arm_spe_synth_spe_events_sample(
+ speq, spe->llc_miss_id);
+ if (err)
+ return err;
+ }
+
+ if (record->type & ARM_SPE_LLC_ACCESS) {
+ err = arm_spe_synth_spe_events_sample(
+ speq, spe->llc_access_id);
+ if (err)
+ return err;
+ }
+ }
+
+ if (spe->sample_tlb) {
+ if (record->type & ARM_SPE_TLB_MISS) {
+ err = arm_spe_synth_spe_events_sample(
+ speq, spe->tlb_miss_id);
+ if (err)
+ return err;
+ }
+
+ if (record->type & ARM_SPE_TLB_ACCESS) {
+ err = arm_spe_synth_spe_events_sample(
+ speq, spe->tlb_access_id);
+ if (err)
+ return err;
+ }
+ }
+
+ if (spe->sample_branch && (record->type & ARM_SPE_BRANCH_MISS)) {
+ err = arm_spe_synth_spe_events_sample(speq,
+ spe->branch_miss_id);
+ if (err)
+ return err;
+ }
+
+ if (spe->sample_remote_access &&
+ (record->type & ARM_SPE_REMOTE_ACCESS)) {
+ err = arm_spe_synth_spe_events_sample(speq,
+ spe->remote_access_id);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
+{
+ struct arm_spe *spe = speq->spe;
+ int ret;
+
+ if (!spe->kernel_start)
+ spe->kernel_start = machine__kernel_start(spe->machine);
+
+ while (1) {
+ ret = arm_spe_decode(speq->decoder);
+ if (!ret) {
+ pr_debug("No data or all data has been processed.\n");
+ return 1;
+ }
+
+ /*
+ * Error is detected when decode SPE trace data, continue to
+ * the next trace data and find out more records.
+ */
+ if (ret < 0)
+ continue;
+
+ ret = arm_spe_sample(speq);
+ if (ret)
+ return ret;
+
+ if (!spe->timeless_decoding && speq->timestamp >= *timestamp) {
+ *timestamp = speq->timestamp;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static int arm_spe__setup_queue(struct arm_spe *spe,
+ struct auxtrace_queue *queue,
+ unsigned int queue_nr)
+{
+ struct arm_spe_queue *speq = queue->priv;
+ struct arm_spe_record *record;
+
+ if (list_empty(&queue->head) || speq)
+ return 0;
+
+ speq = arm_spe__alloc_queue(spe, queue_nr);
+
+ if (!speq)
+ return -ENOMEM;
+
+ queue->priv = speq;
+
+ if (queue->cpu != -1)
+ speq->cpu = queue->cpu;
+
+ if (!speq->on_heap) {
+ int ret;
+
+ if (spe->timeless_decoding)
+ return 0;
+
+retry:
+ ret = arm_spe_decode(speq->decoder);
+
+ if (!ret)
+ return 0;
+
+ if (ret < 0)
+ goto retry;
+
+ record = &speq->decoder->record;
+
+ speq->timestamp = record->timestamp;
+ ret = auxtrace_heap__add(&spe->heap, queue_nr, speq->timestamp);
+ if (ret)
+ return ret;
+ speq->on_heap = true;
+ }
+
+ return 0;
+}
+
+static int arm_spe__setup_queues(struct arm_spe *spe)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < spe->queues.nr_queues; i++) {
+ ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int arm_spe__update_queues(struct arm_spe *spe)
{
+ if (spe->queues.new_data) {
+ spe->queues.new_data = false;
+ return arm_spe__setup_queues(spe);
+ }
+
return 0;
}
+static bool arm_spe__is_timeless_decoding(struct arm_spe *spe)
+{
+ struct evsel *evsel;
+ struct evlist *evlist = spe->session->evlist;
+ bool timeless_decoding = true;
+
+ /*
+ * Circle through the list of event and complain if we find one
+ * with the time bit set.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
+ timeless_decoding = false;
+ }
+
+ return timeless_decoding;
+}
+
+static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
+ struct auxtrace_queue *queue)
+{
+ struct arm_spe_queue *speq = queue->priv;
+ pid_t tid;
+
+ tid = machine__get_current_tid(spe->machine, speq->cpu);
+ if (tid != -1) {
+ speq->tid = tid;
+ thread__zput(speq->thread);
+ } else
+ speq->tid = queue->tid;
+
+ if ((!speq->thread) && (speq->tid != -1)) {
+ speq->thread = machine__find_thread(spe->machine, -1,
+ speq->tid);
+ }
+
+ if (speq->thread) {
+ speq->pid = speq->thread->pid_;
+ if (queue->cpu == -1)
+ speq->cpu = speq->thread->cpu;
+ }
+}
+
+static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
+{
+ unsigned int queue_nr;
+ u64 ts;
+ int ret;
+
+ while (1) {
+ struct auxtrace_queue *queue;
+ struct arm_spe_queue *speq;
+
+ if (!spe->heap.heap_cnt)
+ return 0;
+
+ if (spe->heap.heap_array[0].ordinal >= timestamp)
+ return 0;
+
+ queue_nr = spe->heap.heap_array[0].queue_nr;
+ queue = &spe->queues.queue_array[queue_nr];
+ speq = queue->priv;
+
+ auxtrace_heap__pop(&spe->heap);
+
+ if (spe->heap.heap_cnt) {
+ ts = spe->heap.heap_array[0].ordinal + 1;
+ if (ts > timestamp)
+ ts = timestamp;
+ } else {
+ ts = timestamp;
+ }
+
+ arm_spe_set_pid_tid_cpu(spe, queue);
+
+ ret = arm_spe_run_decoder(speq, &ts);
+ if (ret < 0) {
+ auxtrace_heap__add(&spe->heap, queue_nr, ts);
+ return ret;
+ }
+
+ if (!ret) {
+ ret = auxtrace_heap__add(&spe->heap, queue_nr, ts);
+ if (ret < 0)
+ return ret;
+ } else {
+ speq->on_heap = false;
+ }
+ }
+
+ return 0;
+}
+
+static int arm_spe_process_timeless_queues(struct arm_spe *spe, pid_t tid,
+ u64 time_)
+{
+ struct auxtrace_queues *queues = &spe->queues;
+ unsigned int i;
+ u64 ts = 0;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ struct auxtrace_queue *queue = &spe->queues.queue_array[i];
+ struct arm_spe_queue *speq = queue->priv;
+
+ if (speq && (tid == -1 || speq->tid == tid)) {
+ speq->time = time_;
+ arm_spe_set_pid_tid_cpu(spe, queue);
+ arm_spe_run_decoder(speq, &ts);
+ }
+ }
+ return 0;
+}
+
+static int arm_spe_process_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool)
+{
+ int err = 0;
+ u64 timestamp;
+ struct arm_spe *spe = container_of(session->auxtrace,
+ struct arm_spe, auxtrace);
+
+ if (dump_trace)
+ return 0;
+
+ if (!tool->ordered_events) {
+ pr_err("SPE trace requires ordered events\n");
+ return -EINVAL;
+ }
+
+ if (sample->time && (sample->time != (u64) -1))
+ timestamp = sample->time;
+ else
+ timestamp = 0;
+
+ if (timestamp || spe->timeless_decoding) {
+ err = arm_spe__update_queues(spe);
+ if (err)
+ return err;
+ }
+
+ if (spe->timeless_decoding) {
+ if (event->header.type == PERF_RECORD_EXIT) {
+ err = arm_spe_process_timeless_queues(spe,
+ event->fork.tid,
+ sample->time);
+ }
+ } else if (timestamp) {
+ if (event->header.type == PERF_RECORD_EXIT) {
+ err = arm_spe_process_queues(spe, timestamp);
+ if (err)
+ return err;
+ }
+ }
+
+ return err;
+}
+
static int arm_spe_process_auxtrace_event(struct perf_session *session,
union perf_event *event,
struct perf_tool *tool __maybe_unused)
{
struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
auxtrace);
- struct auxtrace_buffer *buffer;
- off_t data_offset;
- int fd = perf_data__fd(session->data);
- int err;
- if (perf_data__is_pipe(session->data)) {
- data_offset = 0;
- } else {
- data_offset = lseek(fd, 0, SEEK_CUR);
- if (data_offset == -1)
- return -errno;
- }
+ if (!spe->data_queued) {
+ struct auxtrace_buffer *buffer;
+ off_t data_offset;
+ int fd = perf_data__fd(session->data);
+ int err;
- err = auxtrace_queues__add_event(&spe->queues, session, event,
- data_offset, &buffer);
- if (err)
- return err;
+ if (perf_data__is_pipe(session->data)) {
+ data_offset = 0;
+ } else {
+ data_offset = lseek(fd, 0, SEEK_CUR);
+ if (data_offset == -1)
+ return -errno;
+ }
- /* Dump here now we have copied a piped trace out of the pipe */
- if (dump_trace) {
- if (auxtrace_buffer__get_data(buffer, fd)) {
- arm_spe_dump_event(spe, buffer->data,
- buffer->size);
- auxtrace_buffer__put_data(buffer);
+ err = auxtrace_queues__add_event(&spe->queues, session, event,
+ data_offset, &buffer);
+ if (err)
+ return err;
+
+ /* Dump here now we have copied a piped trace out of the pipe */
+ if (dump_trace) {
+ if (auxtrace_buffer__get_data(buffer, fd)) {
+ arm_spe_dump_event(spe, buffer->data,
+ buffer->size);
+ auxtrace_buffer__put_data(buffer);
+ }
}
}
@@ -139,7 +654,25 @@ static int arm_spe_process_auxtrace_event(struct perf_session *session,
static int arm_spe_flush(struct perf_session *session __maybe_unused,
struct perf_tool *tool __maybe_unused)
{
- return 0;
+ struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
+ auxtrace);
+ int ret;
+
+ if (dump_trace)
+ return 0;
+
+ if (!tool->ordered_events)
+ return -EINVAL;
+
+ ret = arm_spe__update_queues(spe);
+ if (ret < 0)
+ return ret;
+
+ if (spe->timeless_decoding)
+ return arm_spe_process_timeless_queues(spe, -1,
+ MAX_TIMESTAMP - 1);
+
+ return arm_spe_process_queues(spe, MAX_TIMESTAMP);
}
static void arm_spe_free_queue(void *priv)
@@ -148,6 +681,9 @@ static void arm_spe_free_queue(void *priv)
if (!speq)
return;
+ thread__zput(speq->thread);
+ arm_spe_decoder_free(speq->decoder);
+ zfree(&speq->event_buf);
free(speq);
}
@@ -196,11 +732,189 @@ static void arm_spe_print_info(__u64 *arr)
fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]);
}
+struct arm_spe_synth {
+ struct perf_tool dummy_tool;
+ struct perf_session *session;
+};
+
+static int arm_spe_event_synth(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct arm_spe_synth *arm_spe_synth =
+ container_of(tool, struct arm_spe_synth, dummy_tool);
+
+ return perf_session__deliver_synth_event(arm_spe_synth->session,
+ event, NULL);
+}
+
+static int arm_spe_synth_event(struct perf_session *session,
+ struct perf_event_attr *attr, u64 id)
+{
+ struct arm_spe_synth arm_spe_synth;
+
+ memset(&arm_spe_synth, 0, sizeof(struct arm_spe_synth));
+ arm_spe_synth.session = session;
+
+ return perf_event__synthesize_attr(&arm_spe_synth.dummy_tool, attr, 1,
+ &id, arm_spe_event_synth);
+}
+
+static void arm_spe_set_event_name(struct evlist *evlist, u64 id,
+ const char *name)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.id && evsel->core.id[0] == id) {
+ if (evsel->name)
+ zfree(&evsel->name);
+ evsel->name = strdup(name);
+ break;
+ }
+ }
+}
+
+static int
+arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
+{
+ struct evlist *evlist = session->evlist;
+ struct evsel *evsel;
+ struct perf_event_attr attr;
+ bool found = false;
+ u64 id;
+ int err;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.attr.type == spe->pmu_type) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("No selected events with SPE trace data\n");
+ return 0;
+ }
+
+ memset(&attr, 0, sizeof(struct perf_event_attr));
+ attr.size = sizeof(struct perf_event_attr);
+ attr.type = PERF_TYPE_HARDWARE;
+ attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
+ attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
+ PERF_SAMPLE_PERIOD;
+ if (spe->timeless_decoding)
+ attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
+ else
+ attr.sample_type |= PERF_SAMPLE_TIME;
+
+ attr.exclude_user = evsel->core.attr.exclude_user;
+ attr.exclude_kernel = evsel->core.attr.exclude_kernel;
+ attr.exclude_hv = evsel->core.attr.exclude_hv;
+ attr.exclude_host = evsel->core.attr.exclude_host;
+ attr.exclude_guest = evsel->core.attr.exclude_guest;
+ attr.sample_id_all = evsel->core.attr.sample_id_all;
+ attr.read_format = evsel->core.attr.read_format;
+
+ /* create new id val to be a fixed offset from evsel id */
+ id = evsel->core.id[0] + 1000000000;
+
+ if (!id)
+ id = 1;
+
+ if (spe->synth_opts.flc) {
+ spe->sample_flc = true;
+
+ /* Level 1 data cache miss */
+ err = arm_spe_synth_event(session, &attr, id);
+ if (err)
+ return err;
+ spe->l1d_miss_id = id;
+ arm_spe_set_event_name(evlist, id, "l1d-miss");
+ id += 1;
+
+ /* Level 1 data cache access */
+ err = arm_spe_synth_event(session, &attr, id);
+ if (err)
+ return err;
+ spe->l1d_access_id = id;
+ arm_spe_set_event_name(evlist, id, "l1d-access");
+ id += 1;
+ }
+
+ if (spe->synth_opts.llc) {
+ spe->sample_llc = true;
+
+ /* Last level cache miss */
+ err = arm_spe_synth_event(session, &attr, id);
+ if (err)
+ return err;
+ spe->llc_miss_id = id;
+ arm_spe_set_event_name(evlist, id, "llc-miss");
+ id += 1;
+
+ /* Last level cache access */
+ err = arm_spe_synth_event(session, &attr, id);
+ if (err)
+ return err;
+ spe->llc_access_id = id;
+ arm_spe_set_event_name(evlist, id, "llc-access");
+ id += 1;
+ }
+
+ if (spe->synth_opts.tlb) {
+ spe->sample_tlb = true;
+
+ /* TLB miss */
+ err = arm_spe_synth_event(session, &attr, id);
+ if (err)
+ return err;
+ spe->tlb_miss_id = id;
+ arm_spe_set_event_name(evlist, id, "tlb-miss");
+ id += 1;
+
+ /* TLB access */
+ err = arm_spe_synth_event(session, &attr, id);
+ if (err)
+ return err;
+ spe->tlb_access_id = id;
+ arm_spe_set_event_name(evlist, id, "tlb-access");
+ id += 1;
+ }
+
+ if (spe->synth_opts.branches) {
+ spe->sample_branch = true;
+
+ /* Branch miss */
+ err = arm_spe_synth_event(session, &attr, id);
+ if (err)
+ return err;
+ spe->branch_miss_id = id;
+ arm_spe_set_event_name(evlist, id, "branch-miss");
+ id += 1;
+ }
+
+ if (spe->synth_opts.remote_access) {
+ spe->sample_remote_access = true;
+
+ /* Remote access */
+ err = arm_spe_synth_event(session, &attr, id);
+ if (err)
+ return err;
+ spe->remote_access_id = id;
+ arm_spe_set_event_name(evlist, id, "remote-access");
+ id += 1;
+ }
+
+ return 0;
+}
+
int arm_spe_process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
- size_t min_sz = sizeof(u64) * ARM_SPE_PMU_TYPE;
+ size_t min_sz = sizeof(u64) * ARM_SPE_AUXTRACE_PRIV_MAX;
struct arm_spe *spe;
int err;
@@ -221,6 +935,7 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
spe->auxtrace_type = auxtrace_info->type;
spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
+ spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
spe->auxtrace.process_event = arm_spe_process_event;
spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
spe->auxtrace.flush_events = arm_spe_flush;
@@ -231,8 +946,30 @@ int arm_spe_process_auxtrace_info(union perf_event *event,
arm_spe_print_info(&auxtrace_info->priv[0]);
+ if (dump_trace)
+ return 0;
+
+ if (session->itrace_synth_opts && session->itrace_synth_opts->set)
+ spe->synth_opts = *session->itrace_synth_opts;
+ else
+ itrace_synth_opts__set_default(&spe->synth_opts, false);
+
+ err = arm_spe_synth_events(spe, session);
+ if (err)
+ goto err_free_queues;
+
+ err = auxtrace_queues__process_index(&spe->queues, session);
+ if (err)
+ goto err_free_queues;
+
+ if (spe->queues.populated)
+ spe->data_queued = true;
+
return 0;
+err_free_queues:
+ auxtrace_queues__free(&spe->queues);
+ session->auxtrace = NULL;
err_free:
free(spe);
return err;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 749487a41cc7..25c639ac4ad4 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -55,7 +55,6 @@
#include "util/mmap.h"
#include <linux/ctype.h>
-#include <linux/kernel.h>
#include "symbol/kallsyms.h"
#include <internal/lib.h>
@@ -729,7 +728,7 @@ int auxtrace_parse_sample_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts, const char *str)
{
- struct perf_evsel_config_term *term;
+ struct evsel_config_term *term;
struct evsel *aux_evsel;
bool has_aux_sample_size = false;
bool has_aux_leader = false;
@@ -771,7 +770,7 @@ no_opt:
evlist__for_each_entry(evlist, evsel) {
if (evsel__is_aux_event(evsel))
aux_evsel = evsel;
- term = perf_evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
+ term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE);
if (term) {
has_aux_sample_size = true;
evsel->core.attr.aux_sample_size = term->val.aux_sample_size;
@@ -1331,6 +1330,11 @@ void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
synth_opts->pwr_events = true;
synth_opts->other_events = true;
synth_opts->errors = true;
+ synth_opts->flc = true;
+ synth_opts->llc = true;
+ synth_opts->tlb = true;
+ synth_opts->remote_access = true;
+
if (no_sample) {
synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
synth_opts->period = 1;
@@ -1491,6 +1495,18 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
goto out_err;
p = endptr;
break;
+ case 'f':
+ synth_opts->flc = true;
+ break;
+ case 'm':
+ synth_opts->llc = true;
+ break;
+ case 't':
+ synth_opts->tlb = true;
+ break;
+ case 'a':
+ synth_opts->remote_access = true;
+ break;
case ' ':
case ',':
break;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 0220a2e86c16..142ccf7d34df 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -63,6 +63,7 @@ enum itrace_period_type {
* because 'perf inject' will write it out
* @instructions: whether to synthesize 'instructions' events
* @branches: whether to synthesize 'branches' events
+ * (branch misses only for Arm SPE)
* @transactions: whether to synthesize events for transactions
* @ptwrites: whether to synthesize events for ptwrites
* @pwr_events: whether to synthesize power events
@@ -78,6 +79,10 @@ enum itrace_period_type {
* @thread_stack: feed branches to the thread_stack
* @last_branch: add branch context to 'instruction' events
* @add_last_branch: add branch context to existing event records
+ * @flc: whether to synthesize first level cache events
+ * @llc: whether to synthesize last level cache events
+ * @tlb: whether to synthesize TLB events
+ * @remote_access: whether to synthesize remote access events
* @callchain_sz: maximum callchain size
* @last_branch_sz: branch context size
* @period: 'instructions' events period
@@ -107,6 +112,10 @@ struct itrace_synth_opts {
bool thread_stack;
bool last_branch;
bool add_last_branch;
+ bool flc;
+ bool llc;
+ bool tlb;
+ bool remote_access;
unsigned int callchain_sz;
unsigned int last_branch_sz;
unsigned long long period;
@@ -596,7 +605,7 @@ bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
#define ITRACE_HELP \
" i: synthesize instructions events\n" \
-" b: synthesize branches events\n" \
+" b: synthesize branches events (branch misses for Arm SPE)\n" \
" c: synthesize branches events (calls only)\n" \
" r: synthesize branches events (returns only)\n" \
" x: synthesize transactions events\n" \
@@ -604,6 +613,10 @@ bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
" p: synthesize power events\n" \
" e: synthesize error events\n" \
" d: create a debug log\n" \
+" f: synthesize first level cache events\n" \
+" m: synthesize last level cache events\n" \
+" t: synthesize TLB events\n" \
+" a: synthesize remote access events\n" \
" g[len]: synthesize a call chain (use with i or x)\n" \
" l[len]: synthesize last branch entries (use with i or x)\n" \
" sNUMBER: skip initial number of events\n" \
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 83bfb8768235..2feb751516ab 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -1225,7 +1225,7 @@ bpf__obj_config_map(struct bpf_object *obj,
out:
free(map_name);
if (!err)
- key_scan_pos += strlen(map_opt);
+ *key_scan_pos += strlen(map_opt);
return err;
}
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index 4d3f02fa223d..17b2ccc61094 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -46,7 +46,7 @@ struct branch_entry {
struct branch_stack {
u64 nr;
u64 hw_idx;
- struct branch_entry entries[0];
+ struct branch_entry entries[];
};
/*
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 818aa4efd386..2775b752f2fa 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -1599,3 +1599,17 @@ void callchain_cursor_reset(struct callchain_cursor *cursor)
for (node = cursor->first; node != NULL; node = node->next)
map__zput(node->ms.map);
}
+
+void callchain_param_setup(u64 sample_type)
+{
+ if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
+ if ((sample_type & PERF_SAMPLE_REGS_USER) &&
+ (sample_type & PERF_SAMPLE_STACK_USER)) {
+ callchain_param.record_mode = CALLCHAIN_DWARF;
+ dwarf_callchain_users = true;
+ } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ callchain_param.record_mode = CALLCHAIN_LBR;
+ else
+ callchain_param.record_mode = CALLCHAIN_FP;
+ }
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 8f668ee29f25..fe36a9e5ccd1 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -297,4 +297,5 @@ int callchain_branch_counts(struct callchain_root *root,
u64 *branch_count, u64 *predicted_count,
u64 *abort_count, u64 *cycles_count);
+void callchain_param_setup(u64 sample_type);
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
index 6b3988a7aba8..fa8248aadb59 100644
--- a/tools/perf/util/cloexec.c
+++ b/tools/perf/util/cloexec.c
@@ -65,7 +65,7 @@ static int perf_flag_probe(void)
return 1;
}
- WARN_ONCE(err != EINVAL && err != EBUSY,
+ WARN_ONCE(err != EINVAL && err != EBUSY && err != EACCES,
"perf_event_open(..., PERF_FLAG_FD_CLOEXEC) failed with unexpected error %d (%s)\n",
err, str_error_r(err, sbuf, sizeof(sbuf)));
@@ -83,7 +83,7 @@ static int perf_flag_probe(void)
if (fd >= 0)
close(fd);
- if (WARN_ONCE(fd < 0 && err != EBUSY,
+ if (WARN_ONCE(fd < 0 && err != EBUSY && err != EACCES,
"perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
err, str_error_r(err, sbuf, sizeof(sbuf))))
return -1;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index ef38eba56ed0..20be0504fb95 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -17,10 +17,10 @@
#include "util/event.h" /* proc_map_timeout */
#include "util/hist.h" /* perf_hist_config */
#include "util/llvm-utils.h" /* perf_llvm_config */
+#include "util/stat.h" /* perf_stat__set_big_num */
#include "build-id.h"
#include "debug.h"
#include "config.h"
-#include "debug.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <stdlib.h>
@@ -452,6 +452,15 @@ static int perf_ui_config(const char *var, const char *value)
return 0;
}
+static int perf_stat_config(const char *var, const char *value)
+{
+ if (!strcmp(var, "stat.big-num"))
+ perf_stat__set_big_num(perf_config_bool(var, value));
+
+ /* Add other config variables here. */
+ return 0;
+}
+
int perf_default_config(const char *var, const char *value,
void *dummy __maybe_unused)
{
@@ -473,6 +482,9 @@ int perf_default_config(const char *var, const char *value,
if (strstarts(var, "buildid."))
return perf_buildid_config(var, value);
+ if (strstarts(var, "stat."))
+ return perf_stat_config(var, value);
+
/* Add other config variables here. */
return 0;
}
diff --git a/tools/perf/util/counts.c b/tools/perf/util/counts.c
index f94e1a23dad6..582f3aeaf5e4 100644
--- a/tools/perf/util/counts.c
+++ b/tools/perf/util/counts.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
#include <stdlib.h>
+#include <string.h>
#include "evsel.h"
#include "counts.h"
#include <linux/zalloc.h>
@@ -42,24 +43,25 @@ void perf_counts__delete(struct perf_counts *counts)
}
}
-static void perf_counts__reset(struct perf_counts *counts)
+void perf_counts__reset(struct perf_counts *counts)
{
xyarray__reset(counts->loaded);
xyarray__reset(counts->values);
+ memset(&counts->aggr, 0, sizeof(struct perf_counts_values));
}
-void perf_evsel__reset_counts(struct evsel *evsel)
+void evsel__reset_counts(struct evsel *evsel)
{
perf_counts__reset(evsel->counts);
}
-int perf_evsel__alloc_counts(struct evsel *evsel, int ncpus, int nthreads)
+int evsel__alloc_counts(struct evsel *evsel, int ncpus, int nthreads)
{
evsel->counts = perf_counts__new(ncpus, nthreads);
return evsel->counts != NULL ? 0 : -ENOMEM;
}
-void perf_evsel__free_counts(struct evsel *evsel)
+void evsel__free_counts(struct evsel *evsel)
{
perf_counts__delete(evsel->counts);
evsel->counts = NULL;
diff --git a/tools/perf/util/counts.h b/tools/perf/util/counts.h
index 92196df4945f..7ff36bf6d644 100644
--- a/tools/perf/util/counts.h
+++ b/tools/perf/util/counts.h
@@ -37,9 +37,10 @@ perf_counts__set_loaded(struct perf_counts *counts, int cpu, int thread, bool lo
struct perf_counts *perf_counts__new(int ncpus, int nthreads);
void perf_counts__delete(struct perf_counts *counts);
+void perf_counts__reset(struct perf_counts *counts);
-void perf_evsel__reset_counts(struct evsel *evsel);
-int perf_evsel__alloc_counts(struct evsel *evsel, int ncpus, int nthreads);
-void perf_evsel__free_counts(struct evsel *evsel);
+void evsel__reset_counts(struct evsel *evsel);
+int evsel__alloc_counts(struct evsel *evsel, int ncpus, int nthreads);
+void evsel__free_counts(struct evsel *evsel);
#endif /* __PERF_COUNTS_H */
diff --git a/tools/perf/util/cputopo.h b/tools/perf/util/cputopo.h
index 7bf6b811f715..6201c3790d86 100644
--- a/tools/perf/util/cputopo.h
+++ b/tools/perf/util/cputopo.h
@@ -22,7 +22,7 @@ struct numa_topology_node {
struct numa_topology {
u32 nr;
- struct numa_topology_node nodes[0];
+ struct numa_topology_node nodes[];
};
struct cpu_topology *cpu_topology__new(void);
diff --git a/tools/perf/util/demangle-java.c b/tools/perf/util/demangle-java.c
index 6fb7f34c0814..39c05200ed65 100644
--- a/tools/perf/util/demangle-java.c
+++ b/tools/perf/util/demangle-java.c
@@ -15,7 +15,7 @@ enum {
MODE_CLASS = 1,
MODE_FUNC = 2,
MODE_TYPE = 3,
- MODE_CTYPE = 3, /* class arg */
+ MODE_CTYPE = 4, /* class arg */
};
#define BASE_ENT(c, n) [c - 'A']=n
@@ -27,7 +27,7 @@ static const char *base_types['Z' - 'A' + 1] = {
BASE_ENT('I', "int" ),
BASE_ENT('J', "long" ),
BASE_ENT('S', "short" ),
- BASE_ENT('Z', "bool" ),
+ BASE_ENT('Z', "boolean" ),
};
/*
@@ -59,15 +59,16 @@ __demangle_java_sym(const char *str, const char *end, char *buf, int maxlen, int
switch (*q) {
case 'L':
- if (mode == MODE_PREFIX || mode == MODE_CTYPE) {
- if (mode == MODE_CTYPE) {
+ if (mode == MODE_PREFIX || mode == MODE_TYPE) {
+ if (mode == MODE_TYPE) {
if (narg)
rlen += scnprintf(buf + rlen, maxlen - rlen, ", ");
narg++;
}
- rlen += scnprintf(buf + rlen, maxlen - rlen, "class ");
if (mode == MODE_PREFIX)
mode = MODE_CLASS;
+ else
+ mode = MODE_CTYPE;
} else
buf[rlen++] = *q;
break;
@@ -120,7 +121,7 @@ __demangle_java_sym(const char *str, const char *end, char *buf, int maxlen, int
if (mode != MODE_CLASS && mode != MODE_CTYPE)
goto error;
/* safe because at least one other char to process */
- if (isalpha(*(q + 1)))
+ if (isalpha(*(q + 1)) && mode == MODE_CLASS)
rlen += scnprintf(buf + rlen, maxlen - rlen, ".");
if (mode == MODE_CLASS)
mode = MODE_FUNC;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index f338990e0fe6..99f0a39c3c59 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -47,6 +47,7 @@ char dso__symtab_origin(const struct dso *dso)
[DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D',
[DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
+ [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x',
[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
[DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
[DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
@@ -129,6 +130,21 @@ int dso__read_binary_type_filename(const struct dso *dso,
snprintf(filename + len, size - len, "%s", dso->long_name);
break;
+ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
+ /*
+ * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
+ * /usr/lib/debug/lib when it is expected to be in
+ * /usr/lib/debug/usr/lib
+ */
+ if (strlen(dso->long_name) < 9 ||
+ strncmp(dso->long_name, "/usr/lib/", 9)) {
+ ret = -1;
+ break;
+ }
+ len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
+ snprintf(filename + len, size - len, "%s", dso->long_name + 4);
+ break;
+
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
{
const char *last_slash;
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 9553a1fd9e8a..d3d03274b0d1 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -30,6 +30,7 @@ enum dso_binary_type {
DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__GUEST_KMODULE,
@@ -137,7 +138,7 @@ struct dso_cache {
struct rb_node rb_node;
u64 offset;
u64 size;
- char data[0];
+ char data[];
};
struct auxtrace_cache;
@@ -209,7 +210,7 @@ struct dso {
struct nsinfo *nsinfo;
struct dso_id id;
refcount_t refcnt;
- char name[0];
+ char name[];
};
/* dso__for_each_symbol - iterate over the symbols of given type
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index b8289f160f07..6ae01c3c2ffa 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -79,7 +79,7 @@ struct sample_read {
struct ip_callchain {
u64 nr;
- u64 ips[0];
+ u64 ips[];
};
struct branch_stack;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 0a0b760d6948..173b4f0e0e6e 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -233,7 +233,7 @@ void perf_evlist__set_leader(struct evlist *evlist)
int __perf_evlist__add_default(struct evlist *evlist, bool precise)
{
- struct evsel *evsel = perf_evsel__new_cycles(precise);
+ struct evsel *evsel = evsel__new_cycles(precise);
if (evsel == NULL)
return -ENOMEM;
@@ -249,7 +249,7 @@ int perf_evlist__add_dummy(struct evlist *evlist)
.config = PERF_COUNT_SW_DUMMY,
.size = sizeof(attr), /* to capture ABI version */
};
- struct evsel *evsel = perf_evsel__new_idx(&attr, evlist->core.nr_entries);
+ struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
if (evsel == NULL)
return -ENOMEM;
@@ -266,7 +266,7 @@ static int evlist__add_attrs(struct evlist *evlist,
size_t i;
for (i = 0; i < nr_attrs; i++) {
- evsel = perf_evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
+ evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
if (evsel == NULL)
goto out_delete_partial_list;
list_add_tail(&evsel->core.node, &head);
@@ -325,7 +325,7 @@ perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
int perf_evlist__add_newtp(struct evlist *evlist,
const char *sys, const char *name, void *handler)
{
- struct evsel *evsel = perf_evsel__newtp(sys, name);
+ struct evsel *evsel = evsel__newtp(sys, name);
if (IS_ERR(evsel))
return -1;
@@ -380,22 +380,33 @@ void evlist__disable(struct evlist *evlist)
{
struct evsel *pos;
struct affinity affinity;
- int cpu, i;
+ int cpu, i, imm = 0;
+ bool has_imm = false;
if (affinity__setup(&affinity) < 0)
return;
- evlist__for_each_cpu(evlist, i, cpu) {
- affinity__set(&affinity, cpu);
-
- evlist__for_each_entry(evlist, pos) {
- if (evsel__cpu_iter_skip(pos, cpu))
- continue;
- if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
- continue;
- evsel__disable_cpu(pos, pos->cpu_iter - 1);
+ /* Disable 'immediate' events last */
+ for (imm = 0; imm <= 1; imm++) {
+ evlist__for_each_cpu(evlist, i, cpu) {
+ affinity__set(&affinity, cpu);
+
+ evlist__for_each_entry(evlist, pos) {
+ if (evsel__cpu_iter_skip(pos, cpu))
+ continue;
+ if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
+ continue;
+ if (pos->immediate)
+ has_imm = true;
+ if (pos->immediate != imm)
+ continue;
+ evsel__disable_cpu(pos, pos->cpu_iter - 1);
+ }
}
+ if (!has_imm)
+ break;
}
+
affinity__cleanup(&affinity);
evlist__for_each_entry(evlist, pos) {
if (!evsel__is_group_leader(pos) || !pos->core.fd)
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f3e60c45d59a..96e5171dce41 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -56,14 +56,14 @@ struct perf_missing_features perf_missing_features;
static clockid_t clockid;
-static int perf_evsel__no_extra_init(struct evsel *evsel __maybe_unused)
+static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)
{
return 0;
}
void __weak test_attr__ready(void) { }
-static void perf_evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
+static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
{
}
@@ -73,13 +73,12 @@ static struct {
void (*fini)(struct evsel *evsel);
} perf_evsel__object = {
.size = sizeof(struct evsel),
- .init = perf_evsel__no_extra_init,
- .fini = perf_evsel__no_extra_fini,
+ .init = evsel__no_extra_init,
+ .fini = evsel__no_extra_fini,
};
-int perf_evsel__object_config(size_t object_size,
- int (*init)(struct evsel *evsel),
- void (*fini)(struct evsel *evsel))
+int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel),
+ void (*fini)(struct evsel *evsel))
{
if (object_size == 0)
@@ -255,11 +254,12 @@ void evsel__init(struct evsel *evsel,
evsel->metric_expr = NULL;
evsel->metric_name = NULL;
evsel->metric_events = NULL;
+ evsel->per_pkg_mask = NULL;
evsel->collect_stat = false;
evsel->pmu_name = NULL;
}
-struct evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
+struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
{
struct evsel *evsel = zalloc(perf_evsel__object.size);
@@ -292,7 +292,7 @@ static bool perf_event_can_profile_kernel(void)
return perf_event_paranoid_check(1);
}
-struct evsel *perf_evsel__new_cycles(bool precise)
+struct evsel *evsel__new_cycles(bool precise)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
@@ -334,7 +334,7 @@ error_free:
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
-struct evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
+struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx)
{
struct evsel *evsel = zalloc(perf_evsel__object.size);
int err = -ENOMEM;
@@ -372,7 +372,7 @@ out_err:
return ERR_PTR(err);
}
-const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
+const char *evsel__hw_names[PERF_COUNT_HW_MAX] = {
"cycles",
"instructions",
"cache-references",
@@ -387,8 +387,8 @@ const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
static const char *__evsel__hw_name(u64 config)
{
- if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
- return perf_evsel__hw_names[config];
+ if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
+ return evsel__hw_names[config];
return "unknown-hardware";
}
@@ -435,7 +435,7 @@ static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}
-const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
+const char *evsel__sw_names[PERF_COUNT_SW_MAX] = {
"cpu-clock",
"task-clock",
"page-faults",
@@ -450,8 +450,8 @@ const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
static const char *__evsel__sw_name(u64 config)
{
- if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
- return perf_evsel__sw_names[config];
+ if (config < PERF_COUNT_SW_MAX && evsel__sw_names[config])
+ return evsel__sw_names[config];
return "unknown-software";
}
@@ -486,8 +486,7 @@ static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
}
-const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
- [PERF_EVSEL__MAX_ALIASES] = {
+const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
{ "L1-dcache", "l1-d", "l1d", "L1-data", },
{ "L1-icache", "l1-i", "l1i", "L1-instruction", },
{ "LLC", "L2", },
@@ -497,15 +496,13 @@ const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
{ "node", },
};
-const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_EVSEL__MAX_ALIASES] = {
+const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = {
{ "load", "loads", "read", },
{ "store", "stores", "write", },
{ "prefetch", "prefetches", "speculative-read", "speculative-load", },
};
-const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
- [PERF_EVSEL__MAX_ALIASES] = {
+const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = {
{ "refs", "Reference", "ops", "access", },
{ "misses", "miss", },
};
@@ -521,7 +518,7 @@ const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
* L1I : Read and prefetch only
* ITLB and BPU : Read-only
*/
-static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
+static unsigned long evsel__hw_cache_stat[C(MAX)] = {
[C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
[C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
[C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
@@ -533,7 +530,7 @@ static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
bool evsel__is_cache_op_valid(u8 type, u8 op)
{
- if (perf_evsel__hw_cache_stat[type] & COP(op))
+ if (evsel__hw_cache_stat[type] & COP(op))
return true; /* valid */
else
return false; /* invalid */
@@ -542,13 +539,13 @@ bool evsel__is_cache_op_valid(u8 type, u8 op)
int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size)
{
if (result) {
- return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
- perf_evsel__hw_cache_op[op][0],
- perf_evsel__hw_cache_result[result][0]);
+ return scnprintf(bf, size, "%s-%s-%s", evsel__hw_cache[type][0],
+ evsel__hw_cache_op[op][0],
+ evsel__hw_cache_result[result][0]);
}
- return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
- perf_evsel__hw_cache_op[op][1]);
+ return scnprintf(bf, size, "%s-%s", evsel__hw_cache[type][0],
+ evsel__hw_cache_op[op][1]);
}
static int __evsel__hw_cache_name(u64 config, char *bf, size_t size)
@@ -768,10 +765,10 @@ perf_evsel__reset_callgraph(struct evsel *evsel,
}
}
-static void apply_config_terms(struct evsel *evsel,
- struct record_opts *opts, bool track)
+static void evsel__apply_config_terms(struct evsel *evsel,
+ struct record_opts *opts, bool track)
{
- struct perf_evsel_config_term *term;
+ struct evsel_config_term *term;
struct list_head *config_terms = &evsel->config_terms;
struct perf_event_attr *attr = &evsel->core.attr;
/* callgraph default */
@@ -784,30 +781,30 @@ static void apply_config_terms(struct evsel *evsel,
list_for_each_entry(term, config_terms, list) {
switch (term->type) {
- case PERF_EVSEL__CONFIG_TERM_PERIOD:
+ case EVSEL__CONFIG_TERM_PERIOD:
if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
attr->sample_period = term->val.period;
attr->freq = 0;
evsel__reset_sample_bit(evsel, PERIOD);
}
break;
- case PERF_EVSEL__CONFIG_TERM_FREQ:
+ case EVSEL__CONFIG_TERM_FREQ:
if (!(term->weak && opts->user_freq != UINT_MAX)) {
attr->sample_freq = term->val.freq;
attr->freq = 1;
evsel__set_sample_bit(evsel, PERIOD);
}
break;
- case PERF_EVSEL__CONFIG_TERM_TIME:
+ case EVSEL__CONFIG_TERM_TIME:
if (term->val.time)
evsel__set_sample_bit(evsel, TIME);
else
evsel__reset_sample_bit(evsel, TIME);
break;
- case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
+ case EVSEL__CONFIG_TERM_CALLGRAPH:
callgraph_buf = term->val.str;
break;
- case PERF_EVSEL__CONFIG_TERM_BRANCH:
+ case EVSEL__CONFIG_TERM_BRANCH:
if (term->val.str && strcmp(term->val.str, "no")) {
evsel__set_sample_bit(evsel, BRANCH_STACK);
parse_branch_str(term->val.str,
@@ -815,16 +812,16 @@ static void apply_config_terms(struct evsel *evsel,
} else
evsel__reset_sample_bit(evsel, BRANCH_STACK);
break;
- case PERF_EVSEL__CONFIG_TERM_STACK_USER:
+ case EVSEL__CONFIG_TERM_STACK_USER:
dump_size = term->val.stack_user;
break;
- case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
+ case EVSEL__CONFIG_TERM_MAX_STACK:
max_stack = term->val.max_stack;
break;
- case PERF_EVSEL__CONFIG_TERM_MAX_EVENTS:
+ case EVSEL__CONFIG_TERM_MAX_EVENTS:
evsel->max_events = term->val.max_events;
break;
- case PERF_EVSEL__CONFIG_TERM_INHERIT:
+ case EVSEL__CONFIG_TERM_INHERIT:
/*
* attr->inherit should has already been set by
* evsel__config. If user explicitly set
@@ -833,20 +830,20 @@ static void apply_config_terms(struct evsel *evsel,
*/
attr->inherit = term->val.inherit ? 1 : 0;
break;
- case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
+ case EVSEL__CONFIG_TERM_OVERWRITE:
attr->write_backward = term->val.overwrite ? 1 : 0;
break;
- case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
+ case EVSEL__CONFIG_TERM_DRV_CFG:
break;
- case PERF_EVSEL__CONFIG_TERM_PERCORE:
+ case EVSEL__CONFIG_TERM_PERCORE:
break;
- case PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT:
+ case EVSEL__CONFIG_TERM_AUX_OUTPUT:
attr->aux_output = term->val.aux_output ? 1 : 0;
break;
- case PERF_EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
+ case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
/* Already applied by auxtrace */
break;
- case PERF_EVSEL__CONFIG_TERM_CFG_CHG:
+ case EVSEL__CONFIG_TERM_CFG_CHG:
break;
default:
break;
@@ -907,10 +904,9 @@ static bool is_dummy_event(struct evsel *evsel)
(evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
}
-struct perf_evsel_config_term *__perf_evsel__get_config_term(struct evsel *evsel,
- enum evsel_term_type type)
+struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
{
- struct perf_evsel_config_term *term, *found_term = NULL;
+ struct evsel_config_term *term, *found_term = NULL;
list_for_each_entry(term, &evsel->config_terms, list) {
if (term->type == type)
@@ -1145,7 +1141,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
* Apply event specific term settings,
* it overloads any global configuration.
*/
- apply_config_terms(evsel, opts, track);
+ evsel__apply_config_terms(evsel, opts, track);
evsel->ignore_missing_thread = opts->ignore_missing_thread;
@@ -1158,11 +1154,14 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
}
/*
+ * A dummy event never triggers any actual counter and therefore
+ * cannot be used with branch_stack.
+ *
* For initial_delay, a dummy event is added implicitly.
* The software event will trigger -EOPNOTSUPP error out,
* if BRANCH_STACK bit is set.
*/
- if (opts->initial_delay && is_dummy_event(evsel))
+ if (is_dummy_event(evsel))
evsel__reset_sample_bit(evsel, BRANCH_STACK);
}
@@ -1241,9 +1240,9 @@ int evsel__disable(struct evsel *evsel)
return err;
}
-static void perf_evsel__free_config_terms(struct evsel *evsel)
+static void evsel__free_config_terms(struct evsel *evsel)
{
- struct perf_evsel_config_term *term, *h;
+ struct evsel_config_term *term, *h;
list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
list_del_init(&term->list);
@@ -1257,10 +1256,10 @@ void evsel__exit(struct evsel *evsel)
{
assert(list_empty(&evsel->core.node));
assert(evsel->evlist == NULL);
- perf_evsel__free_counts(evsel);
+ evsel__free_counts(evsel);
perf_evsel__free_fd(&evsel->core);
perf_evsel__free_id(&evsel->core);
- perf_evsel__free_config_terms(evsel);
+ evsel__free_config_terms(evsel);
cgroup__put(evsel->cgrp);
perf_cpu_map__put(evsel->core.cpus);
perf_cpu_map__put(evsel->core.own_cpus);
@@ -1268,6 +1267,8 @@ void evsel__exit(struct evsel *evsel)
zfree(&evsel->group_name);
zfree(&evsel->name);
zfree(&evsel->pmu_name);
+ zfree(&evsel->per_pkg_mask);
+ zfree(&evsel->metric_events);
perf_evsel__object.fini(evsel);
}
@@ -1425,7 +1426,7 @@ int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale)
if (FD(evsel, cpu, thread) < 0)
return -EINVAL;
- if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
+ if (evsel->counts == NULL && evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
return -ENOMEM;
if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0)
@@ -2416,7 +2417,7 @@ bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
/* Is there already the separator in the name. */
if (strchr(name, '/') ||
- strchr(name, ':'))
+ (strchr(name, ':') && !evsel->is_libpfm_event))
sep = "";
if (asprintf(&new_name, "%s%su", name, sep) < 0)
@@ -2477,31 +2478,40 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
int err, char *msg, size_t size)
{
char sbuf[STRERR_BUFSIZE];
- int printed = 0;
+ int printed = 0, enforced = 0;
switch (err) {
case EPERM:
case EACCES:
+ printed += scnprintf(msg + printed, size - printed,
+ "Access to performance monitoring and observability operations is limited.\n");
+
+ if (!sysfs__read_int("fs/selinux/enforce", &enforced)) {
+ if (enforced) {
+ printed += scnprintf(msg + printed, size - printed,
+ "Enforced MAC policy settings (SELinux) can limit access to performance\n"
+ "monitoring and observability operations. Inspect system audit records for\n"
+ "more perf_event access control information and adjusting the policy.\n");
+ }
+ }
+
if (err == EPERM)
- printed = scnprintf(msg, size,
+ printed += scnprintf(msg, size,
"No permission to enable %s event.\n\n", evsel__name(evsel));
return scnprintf(msg + printed, size - printed,
- "You may not have permission to collect %sstats.\n\n"
- "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
- "which controls use of the performance events system by\n"
- "unprivileged users (without CAP_PERFMON or CAP_SYS_ADMIN).\n\n"
- "The current value is %d:\n\n"
+ "Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n"
+ "access to performance monitoring and observability operations for users\n"
+ "without CAP_PERFMON or CAP_SYS_ADMIN Linux capability.\n"
+ "perf_event_paranoid setting is %d:\n"
" -1: Allow use of (almost) all events by all users\n"
" Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
- ">= 0: Disallow ftrace function tracepoint by users without CAP_PERFMON or CAP_SYS_ADMIN\n"
- " Disallow raw tracepoint access by users without CAP_SYS_PERFMON or CAP_SYS_ADMIN\n"
- ">= 1: Disallow CPU event access by users without CAP_PERFMON or CAP_SYS_ADMIN\n"
- ">= 2: Disallow kernel profiling by users without CAP_PERFMON or CAP_SYS_ADMIN\n\n"
- "To make this setting permanent, edit /etc/sysctl.conf too, e.g.:\n\n"
- " kernel.perf_event_paranoid = -1\n" ,
- target->system_wide ? "system-wide " : "",
- perf_event_paranoid());
+ ">= 0: Disallow raw and ftrace function tracepoint access\n"
+ ">= 1: Disallow CPU event access\n"
+ ">= 2: Disallow kernel profiling\n"
+ "To make the adjusted perf_event_paranoid setting permanent preserve it\n"
+ "in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)",
+ perf_event_paranoid());
case ENOENT:
return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel));
case EMFILE:
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 351c0aaf2a11..0f963c2a88a5 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -76,6 +76,7 @@ struct evsel {
bool ignore_missing_thread;
bool forced_leader;
bool use_uncore_alias;
+ bool is_libpfm_event;
/* parse modifier helper */
int exclude_GH;
int sample_read;
@@ -154,31 +155,31 @@ void perf_counts_values__scale(struct perf_counts_values *count,
void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
struct perf_counts_values *count);
-int perf_evsel__object_config(size_t object_size,
- int (*init)(struct evsel *evsel),
- void (*fini)(struct evsel *evsel));
+int evsel__object_config(size_t object_size,
+ int (*init)(struct evsel *evsel),
+ void (*fini)(struct evsel *evsel));
struct perf_pmu *evsel__find_pmu(struct evsel *evsel);
bool evsel__is_aux_event(struct evsel *evsel);
-struct evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
+struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx);
static inline struct evsel *evsel__new(struct perf_event_attr *attr)
{
- return perf_evsel__new_idx(attr, 0);
+ return evsel__new_idx(attr, 0);
}
-struct evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
+struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx);
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
-static inline struct evsel *perf_evsel__newtp(const char *sys, const char *name)
+static inline struct evsel *evsel__newtp(const char *sys, const char *name)
{
- return perf_evsel__newtp_idx(sys, name, 0);
+ return evsel__newtp_idx(sys, name, 0);
}
-struct evsel *perf_evsel__new_cycles(bool precise);
+struct evsel *evsel__new_cycles(bool precise);
struct tep_event *event_format__new(const char *sys, const char *name);
@@ -198,16 +199,13 @@ void evsel__calc_id_pos(struct evsel *evsel);
bool evsel__is_cache_op_valid(u8 type, u8 op);
-#define PERF_EVSEL__MAX_ALIASES 8
+#define EVSEL__MAX_ALIASES 8
-extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
- [PERF_EVSEL__MAX_ALIASES];
-extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_EVSEL__MAX_ALIASES];
-extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
- [PERF_EVSEL__MAX_ALIASES];
-extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
-extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
+extern const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
+extern const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES];
+extern const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES];
+extern const char *evsel__hw_names[PERF_COUNT_HW_MAX];
+extern const char *evsel__sw_names[PERF_COUNT_SW_MAX];
int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size);
const char *evsel__name(struct evsel *evsel);
diff --git a/tools/perf/util/evsel_config.h b/tools/perf/util/evsel_config.h
index f8938916577c..aee6f808b512 100644
--- a/tools/perf/util/evsel_config.h
+++ b/tools/perf/util/evsel_config.h
@@ -6,30 +6,30 @@
#include <stdbool.h>
/*
- * The 'struct perf_evsel_config_term' is used to pass event
+ * The 'struct evsel_config_term' is used to pass event
* specific configuration data to evsel__config routine.
* It is allocated within event parsing and attached to
- * perf_evsel::config_terms list head.
+ * evsel::config_terms list head.
*/
enum evsel_term_type {
- PERF_EVSEL__CONFIG_TERM_PERIOD,
- PERF_EVSEL__CONFIG_TERM_FREQ,
- PERF_EVSEL__CONFIG_TERM_TIME,
- PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
- PERF_EVSEL__CONFIG_TERM_STACK_USER,
- PERF_EVSEL__CONFIG_TERM_INHERIT,
- PERF_EVSEL__CONFIG_TERM_MAX_STACK,
- PERF_EVSEL__CONFIG_TERM_MAX_EVENTS,
- PERF_EVSEL__CONFIG_TERM_OVERWRITE,
- PERF_EVSEL__CONFIG_TERM_DRV_CFG,
- PERF_EVSEL__CONFIG_TERM_BRANCH,
- PERF_EVSEL__CONFIG_TERM_PERCORE,
- PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT,
- PERF_EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE,
- PERF_EVSEL__CONFIG_TERM_CFG_CHG,
+ EVSEL__CONFIG_TERM_PERIOD,
+ EVSEL__CONFIG_TERM_FREQ,
+ EVSEL__CONFIG_TERM_TIME,
+ EVSEL__CONFIG_TERM_CALLGRAPH,
+ EVSEL__CONFIG_TERM_STACK_USER,
+ EVSEL__CONFIG_TERM_INHERIT,
+ EVSEL__CONFIG_TERM_MAX_STACK,
+ EVSEL__CONFIG_TERM_MAX_EVENTS,
+ EVSEL__CONFIG_TERM_OVERWRITE,
+ EVSEL__CONFIG_TERM_DRV_CFG,
+ EVSEL__CONFIG_TERM_BRANCH,
+ EVSEL__CONFIG_TERM_PERCORE,
+ EVSEL__CONFIG_TERM_AUX_OUTPUT,
+ EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE,
+ EVSEL__CONFIG_TERM_CFG_CHG,
};
-struct perf_evsel_config_term {
+struct evsel_config_term {
struct list_head list;
enum evsel_term_type type;
bool free_str;
@@ -53,10 +53,9 @@ struct perf_evsel_config_term {
struct evsel;
-struct perf_evsel_config_term *__perf_evsel__get_config_term(struct evsel *evsel,
- enum evsel_term_type type);
+struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type);
-#define perf_evsel__get_config_term(evsel, type) \
- __perf_evsel__get_config_term(evsel, PERF_EVSEL__CONFIG_TERM_ ## type)
+#define evsel__get_config_term(evsel, type) \
+ __evsel__get_config_term(evsel, EVSEL__CONFIG_TERM_ ## type)
#endif // __PERF_EVSEL_CONFIG_H
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 99aed708bd5a..fb498a723a00 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -35,8 +35,7 @@ static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, vo
return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
}
-int perf_evsel__fprintf(struct evsel *evsel,
- struct perf_attr_details *details, FILE *fp)
+int evsel__fprintf(struct evsel *evsel, struct perf_attr_details *details, FILE *fp)
{
bool first = true;
int printed = 0;
diff --git a/tools/perf/util/evsel_fprintf.h b/tools/perf/util/evsel_fprintf.h
index 47e6c8456bb1..3093d096c29f 100644
--- a/tools/perf/util/evsel_fprintf.h
+++ b/tools/perf/util/evsel_fprintf.h
@@ -15,8 +15,7 @@ struct perf_attr_details {
bool trace_fields;
};
-int perf_evsel__fprintf(struct evsel *evsel,
- struct perf_attr_details *details, FILE *fp);
+int evsel__fprintf(struct evsel *evsel, struct perf_attr_details *details, FILE *fp);
#define EVSEL__PRINT_IP (1<<0)
#define EVSEL__PRINT_SYM (1<<1)
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index aa631e37ad1e..f64ab91c432b 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -4,25 +4,76 @@
#include "expr.h"
#include "expr-bison.h"
#include "expr-flex.h"
+#include <linux/kernel.h>
#ifdef PARSER_DEBUG
extern int expr_debug;
#endif
+static size_t key_hash(const void *key, void *ctx __maybe_unused)
+{
+ const char *str = (const char *)key;
+ size_t hash = 0;
+
+ while (*str != '\0') {
+ hash *= 31;
+ hash += *str;
+ str++;
+ }
+ return hash;
+}
+
+static bool key_equal(const void *key1, const void *key2,
+ void *ctx __maybe_unused)
+{
+ return !strcmp((const char *)key1, (const char *)key2);
+}
+
/* Caller must make sure id is allocated */
-void expr__add_id(struct expr_parse_ctx *ctx, const char *name, double val)
+int expr__add_id(struct expr_parse_ctx *ctx, const char *name, double val)
{
- int idx;
+ double *val_ptr = NULL, *old_val = NULL;
+ char *old_key = NULL;
+ int ret;
+
+ if (val != 0.0) {
+ val_ptr = malloc(sizeof(double));
+ if (!val_ptr)
+ return -ENOMEM;
+ *val_ptr = val;
+ }
+ ret = hashmap__set(&ctx->ids, name, val_ptr,
+ (const void **)&old_key, (void **)&old_val);
+ free(old_key);
+ free(old_val);
+ return ret;
+}
+
+int expr__get_id(struct expr_parse_ctx *ctx, const char *id, double *val_ptr)
+{
+ double *data;
- assert(ctx->num_ids < MAX_PARSE_ID);
- idx = ctx->num_ids++;
- ctx->ids[idx].name = name;
- ctx->ids[idx].val = val;
+ if (!hashmap__find(&ctx->ids, id, (void **)&data))
+ return -1;
+ *val_ptr = (data == NULL) ? 0.0 : *data;
+ return 0;
}
void expr__ctx_init(struct expr_parse_ctx *ctx)
{
- ctx->num_ids = 0;
+ hashmap__init(&ctx->ids, key_hash, key_equal, NULL);
+}
+
+void expr__ctx_clear(struct expr_parse_ctx *ctx)
+{
+ struct hashmap_entry *cur;
+ size_t bkt;
+
+ hashmap__for_each_entry((&ctx->ids), cur, bkt) {
+ free((char *)cur->key);
+ free(cur->value);
+ }
+ hashmap__clear(&ctx->ids);
}
static int
@@ -45,6 +96,7 @@ __expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
#ifdef PARSER_DEBUG
expr_debug = 1;
+ expr_set_debug(1, scanner);
#endif
ret = expr_parse(val, ctx, scanner);
@@ -55,61 +107,25 @@ __expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
return ret;
}
-int expr__parse(double *final_val, struct expr_parse_ctx *ctx, const char *expr, int runtime)
+int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
+ const char *expr, int runtime)
{
return __expr__parse(final_val, ctx, expr, EXPR_PARSE, runtime) ? -1 : 0;
}
-static bool
-already_seen(const char *val, const char *one, const char **other,
- int num_other)
-{
- int i;
-
- if (one && !strcasecmp(one, val))
- return true;
- for (i = 0; i < num_other; i++)
- if (!strcasecmp(other[i], val))
- return true;
- return false;
-}
-
-int expr__find_other(const char *expr, const char *one, const char ***other,
- int *num_other, int runtime)
+int expr__find_other(const char *expr, const char *one,
+ struct expr_parse_ctx *ctx, int runtime)
{
- int err, i = 0, j = 0;
- struct expr_parse_ctx ctx;
-
- expr__ctx_init(&ctx);
- err = __expr__parse(NULL, &ctx, expr, EXPR_OTHER, runtime);
- if (err)
- return -1;
-
- *other = malloc((ctx.num_ids + 1) * sizeof(char *));
- if (!*other)
- return -ENOMEM;
-
- for (i = 0, j = 0; i < ctx.num_ids; i++) {
- const char *str = ctx.ids[i].name;
-
- if (already_seen(str, one, *other, j))
- continue;
-
- str = strdup(str);
- if (!str)
- goto out;
- (*other)[j++] = str;
- }
- (*other)[j] = NULL;
-
-out:
- if (i != ctx.num_ids) {
- while (--j)
- free((char *) (*other)[i]);
- free(*other);
- err = -1;
+ double *old_val = NULL;
+ char *old_key = NULL;
+ int ret = __expr__parse(NULL, ctx, expr, EXPR_OTHER, runtime);
+
+ if (one) {
+ hashmap__delete(&ctx->ids, one,
+ (const void **)&old_key, (void **)&old_val);
+ free(old_key);
+ free(old_val);
}
- *num_other = j;
- return err;
+ return ret;
}
diff --git a/tools/perf/util/expr.h b/tools/perf/util/expr.h
index 87d627bb699b..8a2c1074f90f 100644
--- a/tools/perf/util/expr.h
+++ b/tools/perf/util/expr.h
@@ -2,17 +2,17 @@
#ifndef PARSE_CTX_H
#define PARSE_CTX_H 1
-#define EXPR_MAX_OTHER 20
-#define MAX_PARSE_ID EXPR_MAX_OTHER
-
-struct expr_parse_id {
- const char *name;
- double val;
-};
+// There are fixes that need to land upstream before we can use libbpf's headers,
+// for now use our copy uncoditionally, since the data structures at this point
+// are exactly the same, no problem.
+//#ifdef HAVE_LIBBPF_SUPPORT
+//#include <bpf/hashmap.h>
+//#else
+#include "util/hashmap.h"
+//#endif
struct expr_parse_ctx {
- int num_ids;
- struct expr_parse_id ids[MAX_PARSE_ID];
+ struct hashmap ids;
};
struct expr_scanner_ctx {
@@ -21,9 +21,12 @@ struct expr_scanner_ctx {
};
void expr__ctx_init(struct expr_parse_ctx *ctx);
-void expr__add_id(struct expr_parse_ctx *ctx, const char *id, double val);
-int expr__parse(double *final_val, struct expr_parse_ctx *ctx, const char *expr, int runtime);
-int expr__find_other(const char *expr, const char *one, const char ***other,
- int *num_other, int runtime);
+void expr__ctx_clear(struct expr_parse_ctx *ctx);
+int expr__add_id(struct expr_parse_ctx *ctx, const char *id, double val);
+int expr__get_id(struct expr_parse_ctx *ctx, const char *id, double *val_ptr);
+int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
+ const char *expr, int runtime);
+int expr__find_other(const char *expr, const char *one,
+ struct expr_parse_ctx *ids, int runtime);
#endif
diff --git a/tools/perf/util/expr.l b/tools/perf/util/expr.l
index 74b9b59b1aa5..f397bf8b1a48 100644
--- a/tools/perf/util/expr.l
+++ b/tools/perf/util/expr.l
@@ -10,12 +10,12 @@
char *expr_get_text(yyscan_t yyscanner);
YYSTYPE *expr_get_lval(yyscan_t yyscanner);
-static int __value(YYSTYPE *yylval, char *str, int base, int token)
+static double __value(YYSTYPE *yylval, char *str, int token)
{
- u64 num;
+ double num;
errno = 0;
- num = strtoull(str, NULL, base);
+ num = strtod(str, NULL);
if (errno)
return EXPR_ERROR;
@@ -23,12 +23,12 @@ static int __value(YYSTYPE *yylval, char *str, int base, int token)
return token;
}
-static int value(yyscan_t scanner, int base)
+static int value(yyscan_t scanner)
{
YYSTYPE *yylval = expr_get_lval(scanner);
char *text = expr_get_text(scanner);
- return __value(yylval, text, base, NUMBER);
+ return __value(yylval, text, NUMBER);
}
/*
@@ -81,12 +81,12 @@ static int str(yyscan_t scanner, int token, int runtime)
}
%}
-number [0-9]+
+number ([0-9]+\.?[0-9]*|[0-9]*\.?[0-9]+)
sch [-,=]
spec \\{sch}
sym [0-9a-zA-Z_\.:@?]+
-symbol {spec}*{sym}*{spec}*{sym}*{spec}*{sym}
+symbol ({spec}|{sym})+
%%
struct expr_scanner_ctx *sctx = expr_get_extra(yyscanner);
@@ -105,7 +105,7 @@ min { return MIN; }
if { return IF; }
else { return ELSE; }
#smt_on { return SMT_ON; }
-{number} { return value(yyscanner, 10); }
+{number} { return value(yyscanner); }
{symbol} { return str(yyscanner, ID, sctx->runtime); }
"|" { return '|'; }
"^" { return '^'; }
diff --git a/tools/perf/util/expr.y b/tools/perf/util/expr.y
index cd17486c1c5d..bf3e898e3055 100644
--- a/tools/perf/util/expr.y
+++ b/tools/perf/util/expr.y
@@ -27,6 +27,7 @@
%token EXPR_PARSE EXPR_OTHER EXPR_ERROR
%token <num> NUMBER
%token <str> ID
+%destructor { free ($$); } <str>
%token MIN MAX IF ELSE SMT_ON
%left MIN MAX IF
%left '|'
@@ -46,19 +47,6 @@ static void expr_error(double *final_val __maybe_unused,
pr_debug("%s\n", s);
}
-static int lookup_id(struct expr_parse_ctx *ctx, char *id, double *val)
-{
- int i;
-
- for (i = 0; i < ctx->num_ids; i++) {
- if (!strcasecmp(ctx->ids[i].name, id)) {
- *val = ctx->ids[i].val;
- return 0;
- }
- }
- return -1;
-}
-
%}
%%
@@ -72,15 +60,10 @@ all_other: all_other other
other: ID
{
- if (ctx->num_ids + 1 >= EXPR_MAX_OTHER) {
- pr_err("failed: way too many variables");
- YYABORT;
- }
-
- ctx->ids[ctx->num_ids++].name = $1;
+ expr__add_id(ctx, $1, 0.0);
}
|
-MIN | MAX | IF | ELSE | SMT_ON | NUMBER | '|' | '^' | '&' | '-' | '+' | '*' | '/' | '%' | '(' | ')'
+MIN | MAX | IF | ELSE | SMT_ON | NUMBER | '|' | '^' | '&' | '-' | '+' | '*' | '/' | '%' | '(' | ')' | ','
all_expr: if_expr { *final_val = $1; }
@@ -92,10 +75,12 @@ if_expr:
;
expr: NUMBER
- | ID { if (lookup_id(ctx, $1, &$$) < 0) {
+ | ID { if (expr__get_id(ctx, $1, &$$)) {
pr_debug("%s not found\n", $1);
+ free($1);
YYABORT;
}
+ free($1);
}
| expr '|' expr { $$ = (long)$1 | (long)$3; }
| expr '&' expr { $$ = (long)$1 & (long)$3; }
@@ -103,8 +88,18 @@ expr: NUMBER
| expr '+' expr { $$ = $1 + $3; }
| expr '-' expr { $$ = $1 - $3; }
| expr '*' expr { $$ = $1 * $3; }
- | expr '/' expr { if ($3 == 0) YYABORT; $$ = $1 / $3; }
- | expr '%' expr { if ((long)$3 == 0) YYABORT; $$ = (long)$1 % (long)$3; }
+ | expr '/' expr { if ($3 == 0) {
+ pr_debug("division by zero\n");
+ YYABORT;
+ }
+ $$ = $1 / $3;
+ }
+ | expr '%' expr { if ((long)$3 == 0) {
+ pr_debug("division by zero\n");
+ YYABORT;
+ }
+ $$ = (long)$1 % (long)$3;
+ }
| '-' expr %prec NEG { $$ = -$2; }
| '(' if_expr ')' { $$ = $2; }
| MIN '(' expr ',' expr ')' { $$ = $3 < $5 ? $3 : $5; }
diff --git a/tools/perf/util/genelf_debug.c b/tools/perf/util/genelf_debug.c
index 30e9f618f6cd..dd40683bd4c0 100644
--- a/tools/perf/util/genelf_debug.c
+++ b/tools/perf/util/genelf_debug.c
@@ -342,7 +342,7 @@ static void emit_lineno_info(struct buffer_ext *be,
*/
/* start state of the state machine we take care of */
- unsigned long last_vma = code_addr;
+ unsigned long last_vma = 0;
char const *cur_filename = NULL;
unsigned long cur_file_idx = 0;
int last_line = 1;
@@ -473,7 +473,7 @@ jit_process_debug_info(uint64_t code_addr,
ent = debug_entry_next(ent);
}
add_compilation_unit(di, buffer_ext_size(dl));
- add_debug_line(dl, debug, nr_debug_entries, 0);
+ add_debug_line(dl, debug, nr_debug_entries, GEN_ELF_TEXT_OFFSET);
add_debug_abbrev(da);
if (0) buffer_ext_dump(da, "abbrev");
diff --git a/tools/perf/util/hashmap.c b/tools/perf/util/hashmap.c
new file mode 100644
index 000000000000..a405dad068f5
--- /dev/null
+++ b/tools/perf/util/hashmap.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * Generic non-thread safe hash map implementation.
+ *
+ * Copyright (c) 2019 Facebook
+ */
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <linux/err.h>
+#include "hashmap.h"
+
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
+/* start with 4 buckets */
+#define HASHMAP_MIN_CAP_BITS 2
+
+static void hashmap_add_entry(struct hashmap_entry **pprev,
+ struct hashmap_entry *entry)
+{
+ entry->next = *pprev;
+ *pprev = entry;
+}
+
+static void hashmap_del_entry(struct hashmap_entry **pprev,
+ struct hashmap_entry *entry)
+{
+ *pprev = entry->next;
+ entry->next = NULL;
+}
+
+void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn, void *ctx)
+{
+ map->hash_fn = hash_fn;
+ map->equal_fn = equal_fn;
+ map->ctx = ctx;
+
+ map->buckets = NULL;
+ map->cap = 0;
+ map->cap_bits = 0;
+ map->sz = 0;
+}
+
+struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn,
+ void *ctx)
+{
+ struct hashmap *map = malloc(sizeof(struct hashmap));
+
+ if (!map)
+ return ERR_PTR(-ENOMEM);
+ hashmap__init(map, hash_fn, equal_fn, ctx);
+ return map;
+}
+
+void hashmap__clear(struct hashmap *map)
+{
+ struct hashmap_entry *cur, *tmp;
+ size_t bkt;
+
+ hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
+ free(cur);
+ }
+ free(map->buckets);
+ map->buckets = NULL;
+ map->cap = map->cap_bits = map->sz = 0;
+}
+
+void hashmap__free(struct hashmap *map)
+{
+ if (!map)
+ return;
+
+ hashmap__clear(map);
+ free(map);
+}
+
+size_t hashmap__size(const struct hashmap *map)
+{
+ return map->sz;
+}
+
+size_t hashmap__capacity(const struct hashmap *map)
+{
+ return map->cap;
+}
+
+static bool hashmap_needs_to_grow(struct hashmap *map)
+{
+ /* grow if empty or more than 75% filled */
+ return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap);
+}
+
+static int hashmap_grow(struct hashmap *map)
+{
+ struct hashmap_entry **new_buckets;
+ struct hashmap_entry *cur, *tmp;
+ size_t new_cap_bits, new_cap;
+ size_t h, bkt;
+
+ new_cap_bits = map->cap_bits + 1;
+ if (new_cap_bits < HASHMAP_MIN_CAP_BITS)
+ new_cap_bits = HASHMAP_MIN_CAP_BITS;
+
+ new_cap = 1UL << new_cap_bits;
+ new_buckets = calloc(new_cap, sizeof(new_buckets[0]));
+ if (!new_buckets)
+ return -ENOMEM;
+
+ hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
+ h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
+ hashmap_add_entry(&new_buckets[h], cur);
+ }
+
+ map->cap = new_cap;
+ map->cap_bits = new_cap_bits;
+ free(map->buckets);
+ map->buckets = new_buckets;
+
+ return 0;
+}
+
+static bool hashmap_find_entry(const struct hashmap *map,
+ const void *key, size_t hash,
+ struct hashmap_entry ***pprev,
+ struct hashmap_entry **entry)
+{
+ struct hashmap_entry *cur, **prev_ptr;
+
+ if (!map->buckets)
+ return false;
+
+ for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
+ cur;
+ prev_ptr = &cur->next, cur = cur->next) {
+ if (map->equal_fn(cur->key, key, map->ctx)) {
+ if (pprev)
+ *pprev = prev_ptr;
+ *entry = cur;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int hashmap__insert(struct hashmap *map, const void *key, void *value,
+ enum hashmap_insert_strategy strategy,
+ const void **old_key, void **old_value)
+{
+ struct hashmap_entry *entry;
+ size_t h;
+ int err;
+
+ if (old_key)
+ *old_key = NULL;
+ if (old_value)
+ *old_value = NULL;
+
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ if (strategy != HASHMAP_APPEND &&
+ hashmap_find_entry(map, key, h, NULL, &entry)) {
+ if (old_key)
+ *old_key = entry->key;
+ if (old_value)
+ *old_value = entry->value;
+
+ if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) {
+ entry->key = key;
+ entry->value = value;
+ return 0;
+ } else if (strategy == HASHMAP_ADD) {
+ return -EEXIST;
+ }
+ }
+
+ if (strategy == HASHMAP_UPDATE)
+ return -ENOENT;
+
+ if (hashmap_needs_to_grow(map)) {
+ err = hashmap_grow(map);
+ if (err)
+ return err;
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ }
+
+ entry = malloc(sizeof(struct hashmap_entry));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key = key;
+ entry->value = value;
+ hashmap_add_entry(&map->buckets[h], entry);
+ map->sz++;
+
+ return 0;
+}
+
+bool hashmap__find(const struct hashmap *map, const void *key, void **value)
+{
+ struct hashmap_entry *entry;
+ size_t h;
+
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ if (!hashmap_find_entry(map, key, h, NULL, &entry))
+ return false;
+
+ if (value)
+ *value = entry->value;
+ return true;
+}
+
+bool hashmap__delete(struct hashmap *map, const void *key,
+ const void **old_key, void **old_value)
+{
+ struct hashmap_entry **pprev, *entry;
+ size_t h;
+
+ h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
+ if (!hashmap_find_entry(map, key, h, &pprev, &entry))
+ return false;
+
+ if (old_key)
+ *old_key = entry->key;
+ if (old_value)
+ *old_value = entry->value;
+
+ hashmap_del_entry(pprev, entry);
+ free(entry);
+ map->sz--;
+
+ return true;
+}
+
diff --git a/tools/perf/util/hashmap.h b/tools/perf/util/hashmap.h
new file mode 100644
index 000000000000..df59fd4fc95b
--- /dev/null
+++ b/tools/perf/util/hashmap.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/*
+ * Generic non-thread safe hash map implementation.
+ *
+ * Copyright (c) 2019 Facebook
+ */
+#ifndef __LIBBPF_HASHMAP_H
+#define __LIBBPF_HASHMAP_H
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <limits.h>
+#ifndef __WORDSIZE
+#define __WORDSIZE (__SIZEOF_LONG__ * 8)
+#endif
+
+static inline size_t hash_bits(size_t h, int bits)
+{
+ /* shuffle bits and return requested number of upper bits */
+ return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
+}
+
+typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
+typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx);
+
+struct hashmap_entry {
+ const void *key;
+ void *value;
+ struct hashmap_entry *next;
+};
+
+struct hashmap {
+ hashmap_hash_fn hash_fn;
+ hashmap_equal_fn equal_fn;
+ void *ctx;
+
+ struct hashmap_entry **buckets;
+ size_t cap;
+ size_t cap_bits;
+ size_t sz;
+};
+
+#define HASHMAP_INIT(hash_fn, equal_fn, ctx) { \
+ .hash_fn = (hash_fn), \
+ .equal_fn = (equal_fn), \
+ .ctx = (ctx), \
+ .buckets = NULL, \
+ .cap = 0, \
+ .cap_bits = 0, \
+ .sz = 0, \
+}
+
+void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn, void *ctx);
+struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
+ hashmap_equal_fn equal_fn,
+ void *ctx);
+void hashmap__clear(struct hashmap *map);
+void hashmap__free(struct hashmap *map);
+
+size_t hashmap__size(const struct hashmap *map);
+size_t hashmap__capacity(const struct hashmap *map);
+
+/*
+ * Hashmap insertion strategy:
+ * - HASHMAP_ADD - only add key/value if key doesn't exist yet;
+ * - HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
+ * update value;
+ * - HASHMAP_UPDATE - update value, if key already exists; otherwise, do
+ * nothing and return -ENOENT;
+ * - HASHMAP_APPEND - always add key/value pair, even if key already exists.
+ * This turns hashmap into a multimap by allowing multiple values to be
+ * associated with the same key. Most useful read API for such hashmap is
+ * hashmap__for_each_key_entry() iteration. If hashmap__find() is still
+ * used, it will return last inserted key/value entry (first in a bucket
+ * chain).
+ */
+enum hashmap_insert_strategy {
+ HASHMAP_ADD,
+ HASHMAP_SET,
+ HASHMAP_UPDATE,
+ HASHMAP_APPEND,
+};
+
+/*
+ * hashmap__insert() adds key/value entry w/ various semantics, depending on
+ * provided strategy value. If a given key/value pair replaced already
+ * existing key/value pair, both old key and old value will be returned
+ * through old_key and old_value to allow calling code do proper memory
+ * management.
+ */
+int hashmap__insert(struct hashmap *map, const void *key, void *value,
+ enum hashmap_insert_strategy strategy,
+ const void **old_key, void **old_value);
+
+static inline int hashmap__add(struct hashmap *map,
+ const void *key, void *value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL);
+}
+
+static inline int hashmap__set(struct hashmap *map,
+ const void *key, void *value,
+ const void **old_key, void **old_value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_SET,
+ old_key, old_value);
+}
+
+static inline int hashmap__update(struct hashmap *map,
+ const void *key, void *value,
+ const void **old_key, void **old_value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_UPDATE,
+ old_key, old_value);
+}
+
+static inline int hashmap__append(struct hashmap *map,
+ const void *key, void *value)
+{
+ return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL);
+}
+
+bool hashmap__delete(struct hashmap *map, const void *key,
+ const void **old_key, void **old_value);
+
+bool hashmap__find(const struct hashmap *map, const void *key, void **value);
+
+/*
+ * hashmap__for_each_entry - iterate over all entries in hashmap
+ * @map: hashmap to iterate
+ * @cur: struct hashmap_entry * used as a loop cursor
+ * @bkt: integer used as a bucket loop cursor
+ */
+#define hashmap__for_each_entry(map, cur, bkt) \
+ for (bkt = 0; bkt < map->cap; bkt++) \
+ for (cur = map->buckets[bkt]; cur; cur = cur->next)
+
+/*
+ * hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe
+ * against removals
+ * @map: hashmap to iterate
+ * @cur: struct hashmap_entry * used as a loop cursor
+ * @tmp: struct hashmap_entry * used as a temporary next cursor storage
+ * @bkt: integer used as a bucket loop cursor
+ */
+#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
+ for (bkt = 0; bkt < map->cap; bkt++) \
+ for (cur = map->buckets[bkt]; \
+ cur && ({tmp = cur->next; true; }); \
+ cur = tmp)
+
+/*
+ * hashmap__for_each_key_entry - iterate over entries associated with given key
+ * @map: hashmap to iterate
+ * @cur: struct hashmap_entry * used as a loop cursor
+ * @key: key to iterate entries for
+ */
+#define hashmap__for_each_key_entry(map, cur, _key) \
+ for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
+ map->cap_bits); \
+ map->buckets ? map->buckets[bkt] : NULL; }); \
+ cur; \
+ cur = cur->next) \
+ if (map->equal_fn(cur->key, (_key), map->ctx))
+
+#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
+ for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
+ map->cap_bits); \
+ cur = map->buckets ? map->buckets[bkt] : NULL; }); \
+ cur && ({ tmp = cur->next; true; }); \
+ cur = tmp) \
+ if (map->equal_fn(cur->key, (_key), map->ctx))
+
+#endif /* __LIBBPF_HASHMAP_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 0ce47283a8a1..7a67d017d72c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3574,7 +3574,7 @@ static int perf_header__read_pipe(struct perf_session *session)
return -EINVAL;
}
- return 0;
+ return f_header.size == sizeof(f_header) ? 0 : -1;
}
static int read_attr(int fd, struct perf_header *ph,
@@ -3676,7 +3676,7 @@ int perf_session__read_header(struct perf_session *session)
struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
- int nr_attrs, nr_ids, i, j;
+ int nr_attrs, nr_ids, i, j, err;
int fd = perf_data__fd(data);
session->evlist = evlist__new();
@@ -3685,8 +3685,16 @@ int perf_session__read_header(struct perf_session *session)
session->evlist->env = &header->env;
session->machines.host.env = &header->env;
- if (perf_data__is_pipe(data))
- return perf_header__read_pipe(session);
+
+ /*
+ * We can read 'pipe' data event from regular file,
+ * check for the pipe header regardless of source.
+ */
+ err = perf_header__read_pipe(session);
+ if (!err || (err && perf_data__is_pipe(data))) {
+ data->is_pipe = true;
+ return err;
+ }
if (perf_file_header__read(&f_header, header, fd) < 0)
return -EINVAL;
@@ -3947,12 +3955,22 @@ int perf_event__process_tracing_data(struct perf_session *session,
{
ssize_t size_read, padding, size = event->tracing_data.size;
int fd = perf_data__fd(session->data);
- off_t offset = lseek(fd, 0, SEEK_CUR);
char buf[BUFSIZ];
- /* setup for reading amidst mmap */
- lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
- SEEK_SET);
+ /*
+ * The pipe fd is already in proper place and in any case
+ * we can't move it, and we'd screw the case where we read
+ * 'pipe' data from regular file. The trace_report reads
+ * data from 'fd' so we need to set it directly behind the
+ * event, where the tracing data starts.
+ */
+ if (!perf_data__is_pipe(session->data)) {
+ off_t offset = lseek(fd, 0, SEEK_CUR);
+
+ /* setup for reading amidst mmap */
+ lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
+ SEEK_SET);
+ }
size_read = trace_report(fd, &session->tevent,
session->repipe);
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 12b65d00cf65..8a793e4c9400 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1930,8 +1930,8 @@ static void output_resort(struct hists *hists, struct ui_progress *prog,
}
}
-void perf_evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
- hists__resort_cb_t cb, void *cb_arg)
+void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
+ hists__resort_cb_t cb, void *cb_arg)
{
bool use_callchain;
@@ -1945,9 +1945,9 @@ void perf_evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
}
-void perf_evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
+void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
{
- return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL);
+ return evsel__output_resort_cb(evsel, prog, NULL, NULL);
}
void hists__output_resort(struct hists *hists, struct ui_progress *prog)
@@ -2845,9 +2845,8 @@ static int hists_evsel__init(struct evsel *evsel)
int hists__init(void)
{
- int err = perf_evsel__object_config(sizeof(struct hists_evsel),
- hists_evsel__init,
- hists_evsel__exit);
+ int err = evsel__object_config(sizeof(struct hists_evsel),
+ hists_evsel__init, hists_evsel__exit);
if (err)
fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 4141295a66fa..96b1c13bbccc 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -173,9 +173,9 @@ void hist_entry__delete(struct hist_entry *he);
typedef int (*hists__resort_cb_t)(struct hist_entry *he, void *arg);
-void perf_evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
- hists__resort_cb_t cb, void *cb_arg);
-void perf_evsel__output_resort(struct evsel *evsel, struct ui_progress *prog);
+void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
+ hists__resort_cb_t cb, void *cb_arg);
+void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog);
void hists__output_resort(struct hists *hists, struct ui_progress *prog);
void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
hists__resort_cb_t cb);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index f17b1e769ae4..e4dd8bf610ce 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -913,11 +913,11 @@ static void intel_pt_add_callchain(struct intel_pt *pt,
sample->callchain = pt->chain;
}
-static struct branch_stack *intel_pt_alloc_br_stack(struct intel_pt *pt)
+static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
{
size_t sz = sizeof(struct branch_stack);
- sz += pt->br_stack_sz * sizeof(struct branch_entry);
+ sz += entry_cnt * sizeof(struct branch_entry);
return zalloc(sz);
}
@@ -930,7 +930,7 @@ static int intel_pt_br_stack_init(struct intel_pt *pt)
evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
}
- pt->br_stack = intel_pt_alloc_br_stack(pt);
+ pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
if (!pt->br_stack)
return -ENOMEM;
@@ -951,6 +951,9 @@ static void intel_pt_add_br_stack(struct intel_pt *pt,
sample->branch_stack = pt->br_stack;
}
+/* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
+#define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
+
static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
unsigned int queue_nr)
{
@@ -968,8 +971,10 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
goto out_free;
}
- if (pt->synth_opts.last_branch) {
- ptq->last_branch = intel_pt_alloc_br_stack(pt);
+ if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
+ unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
+
+ ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
if (!ptq->last_branch)
goto out_free;
}
@@ -1720,9 +1725,6 @@ static void intel_pt_add_lbrs(struct branch_stack *br_stack,
}
}
-/* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
-#define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3)
-
static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
{
const struct intel_pt_blk_items *items = &ptq->state->items;
@@ -1798,25 +1800,18 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
- struct {
- struct branch_stack br_stack;
- struct branch_entry entries[LBRS_MAX];
- } br;
-
if (items->mask[INTEL_PT_LBR_0_POS] ||
items->mask[INTEL_PT_LBR_1_POS] ||
items->mask[INTEL_PT_LBR_2_POS]) {
- intel_pt_add_lbrs(&br.br_stack, items);
- sample.branch_stack = &br.br_stack;
+ intel_pt_add_lbrs(ptq->last_branch, items);
} else if (pt->synth_opts.last_branch) {
thread_stack__br_sample(ptq->thread, ptq->cpu,
ptq->last_branch,
pt->br_stack_sz);
- sample.branch_stack = ptq->last_branch;
} else {
- br.br_stack.nr = 0;
- sample.branch_stack = &br.br_stack;
+ ptq->last_branch->nr = 0;
}
+ sample.branch_stack = ptq->last_branch;
}
if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index e3ccb0ce1938..32bb05e03fb2 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -57,7 +57,7 @@ struct debug_line_info {
unsigned long vma;
unsigned int lineno;
/* The filename format is unspecified, absolute path, relative etc. */
- char const filename[0];
+ char const filename[];
};
struct jit_tool {
diff --git a/tools/perf/util/jitdump.h b/tools/perf/util/jitdump.h
index f2c3823cc81a..ab2842def83d 100644
--- a/tools/perf/util/jitdump.h
+++ b/tools/perf/util/jitdump.h
@@ -93,7 +93,7 @@ struct debug_entry {
uint64_t addr;
int lineno; /* source line number starting at 1 */
int discrim; /* column discriminator, 0 is default */
- const char name[0]; /* null terminated filename, \xff\0 if same as previous entry */
+ const char name[]; /* null terminated filename, \xff\0 if same as previous entry */
};
struct jr_code_debug_info {
@@ -101,7 +101,7 @@ struct jr_code_debug_info {
uint64_t code_addr;
uint64_t nr_entry;
- struct debug_entry entries[0];
+ struct debug_entry entries[];
};
struct jr_code_unwinding_info {
@@ -110,7 +110,7 @@ struct jr_code_unwinding_info {
uint64_t unwinding_size;
uint64_t eh_frame_hdr_size;
uint64_t mapped_size;
- const char unwinding_data[0];
+ const char unwinding_data[];
};
union jr_entry {
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 8ed2135893bb..d5384807372b 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -738,8 +738,8 @@ int machine__process_switch_event(struct machine *machine __maybe_unused,
static int is_bpf_image(const char *name)
{
- return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) ||
- strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1);
+ return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) == 0 ||
+ strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1) == 0;
}
static int machine__process_ksymbol_register(struct machine *machine,
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index aa29589f6904..ea0af0bc4314 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -103,6 +103,21 @@ int perf_mem_events__init(void)
return found ? 0 : -ENOENT;
}
+void perf_mem_events__list(void)
+{
+ int j;
+
+ for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ struct perf_mem_event *e = &perf_mem_events[j];
+
+ fprintf(stderr, "%-13s%-*s%s\n",
+ e->tag,
+ verbose > 0 ? 25 : 0,
+ verbose > 0 ? perf_mem_events__name(j) : "",
+ e->supported ? ": available" : "");
+ }
+}
+
static const char * const tlb_access[] = {
"N/A",
"HIT",
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index f1389bdae7bf..904dad34f7f7 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -39,6 +39,8 @@ int perf_mem_events__init(void);
char *perf_mem_events__name(int i);
+void perf_mem_events__list(void);
+
struct mem_info;
int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index b071df373f8b..9e21aa767e41 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -85,50 +85,103 @@ static void metricgroup__rblist_init(struct rblist *metric_events)
struct egroup {
struct list_head nd;
- int idnum;
- const char **ids;
+ struct expr_parse_ctx pctx;
const char *metric_name;
const char *metric_expr;
const char *metric_unit;
int runtime;
+ bool has_constraint;
};
+/**
+ * Find a group of events in perf_evlist that correpond to those from a parsed
+ * metric expression. Note, as find_evsel_group is called in the same order as
+ * perf_evlist was constructed, metric_no_merge doesn't need to test for
+ * underfilling a group.
+ * @perf_evlist: a list of events something like: {metric1 leader, metric1
+ * sibling, metric1 sibling}:W,duration_time,{metric2 leader, metric2 sibling,
+ * metric2 sibling}:W,duration_time
+ * @pctx: the parse context for the metric expression.
+ * @metric_no_merge: don't attempt to share events for the metric with other
+ * metrics.
+ * @has_constraint: is there a contraint on the group of events? In which case
+ * the events won't be grouped.
+ * @metric_events: out argument, null terminated array of evsel's associated
+ * with the metric.
+ * @evlist_used: in/out argument, bitmap tracking which evlist events are used.
+ * @return the first metric event or NULL on failure.
+ */
static struct evsel *find_evsel_group(struct evlist *perf_evlist,
- const char **ids,
- int idnum,
+ struct expr_parse_ctx *pctx,
+ bool metric_no_merge,
+ bool has_constraint,
struct evsel **metric_events,
- bool *evlist_used)
+ unsigned long *evlist_used)
{
- struct evsel *ev;
- int i = 0, j = 0;
- bool leader_found;
+ struct evsel *ev, *current_leader = NULL;
+ double *val_ptr;
+ int i = 0, matched_events = 0, events_to_match;
+ const int idnum = (int)hashmap__size(&pctx->ids);
+
+ /* duration_time is grouped separately. */
+ if (!has_constraint &&
+ hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
+ events_to_match = idnum - 1;
+ else
+ events_to_match = idnum;
evlist__for_each_entry (perf_evlist, ev) {
- if (evlist_used[j++])
+ /*
+ * Events with a constraint aren't grouped and match the first
+ * events available.
+ */
+ if (has_constraint && ev->weak_group)
continue;
- if (!strcmp(ev->name, ids[i])) {
- if (!metric_events[i])
- metric_events[i] = ev;
- i++;
- if (i == idnum)
- break;
- } else {
- /* Discard the whole match and start again */
- i = 0;
+ /* Ignore event if already used and merging is disabled. */
+ if (metric_no_merge && test_bit(ev->idx, evlist_used))
+ continue;
+ if (!has_constraint && ev->leader != current_leader) {
+ /*
+ * Start of a new group, discard the whole match and
+ * start again.
+ */
+ matched_events = 0;
memset(metric_events, 0,
sizeof(struct evsel *) * idnum);
+ current_leader = ev->leader;
+ }
+ if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr)) {
+ if (has_constraint) {
+ /*
+ * Events aren't grouped, ensure the same event
+ * isn't matched from two groups.
+ */
+ for (i = 0; i < matched_events; i++) {
+ if (!strcmp(ev->name,
+ metric_events[i]->name)) {
+ break;
+ }
+ }
+ if (i != matched_events)
+ continue;
+ }
+ metric_events[matched_events++] = ev;
+ }
+ if (matched_events == events_to_match)
+ break;
+ }
- if (!strcmp(ev->name, ids[i])) {
- if (!metric_events[i])
- metric_events[i] = ev;
- i++;
- if (i == idnum)
- break;
+ if (events_to_match != idnum) {
+ /* Add the first duration_time. */
+ evlist__for_each_entry(perf_evlist, ev) {
+ if (!strcmp(ev->name, "duration_time")) {
+ metric_events[matched_events++] = ev;
+ break;
}
}
}
- if (i != idnum) {
+ if (matched_events != idnum) {
/* Not whole match */
return NULL;
}
@@ -136,25 +189,16 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
metric_events[idnum] = NULL;
for (i = 0; i < idnum; i++) {
- leader_found = false;
- evlist__for_each_entry(perf_evlist, ev) {
- if (!leader_found && (ev == metric_events[i]))
- leader_found = true;
-
- if (leader_found &&
- !strcmp(ev->name, metric_events[i]->name)) {
- ev->metric_leader = metric_events[i];
- }
- j++;
- }
ev = metric_events[i];
- evlist_used[ev->idx] = true;
+ ev->metric_leader = ev;
+ set_bit(ev->idx, evlist_used);
}
return metric_events[0];
}
static int metricgroup__setup_events(struct list_head *groups,
+ bool metric_no_merge,
struct evlist *perf_evlist,
struct rblist *metric_events_list)
{
@@ -163,40 +207,44 @@ static int metricgroup__setup_events(struct list_head *groups,
int i = 0;
int ret = 0;
struct egroup *eg;
- struct evsel *evsel;
- bool *evlist_used;
+ struct evsel *evsel, *tmp;
+ unsigned long *evlist_used;
- evlist_used = calloc(perf_evlist->core.nr_entries, sizeof(bool));
- if (!evlist_used) {
- ret = -ENOMEM;
- return ret;
- }
+ evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
+ if (!evlist_used)
+ return -ENOMEM;
list_for_each_entry (eg, groups, nd) {
struct evsel **metric_events;
- metric_events = calloc(sizeof(void *), eg->idnum + 1);
+ metric_events = calloc(sizeof(void *),
+ hashmap__size(&eg->pctx.ids) + 1);
if (!metric_events) {
ret = -ENOMEM;
break;
}
- evsel = find_evsel_group(perf_evlist, eg->ids, eg->idnum,
- metric_events, evlist_used);
+ evsel = find_evsel_group(perf_evlist, &eg->pctx,
+ metric_no_merge,
+ eg->has_constraint, metric_events,
+ evlist_used);
if (!evsel) {
pr_debug("Cannot resolve %s: %s\n",
eg->metric_name, eg->metric_expr);
+ free(metric_events);
continue;
}
- for (i = 0; i < eg->idnum; i++)
+ for (i = 0; metric_events[i]; i++)
metric_events[i]->collect_stat = true;
me = metricgroup__lookup(metric_events_list, evsel, true);
if (!me) {
ret = -ENOMEM;
+ free(metric_events);
break;
}
expr = malloc(sizeof(struct metric_expr));
if (!expr) {
ret = -ENOMEM;
+ free(metric_events);
break;
}
expr->metric_expr = eg->metric_expr;
@@ -207,7 +255,13 @@ static int metricgroup__setup_events(struct list_head *groups,
list_add(&expr->nd, &me->head);
}
- free(evlist_used);
+ evlist__for_each_entry_safe(perf_evlist, tmp, evsel) {
+ if (!test_bit(evsel->idx, evlist_used)) {
+ evlist__remove(perf_evlist, evsel);
+ evsel__delete(evsel);
+ }
+ }
+ bitmap_free(evlist_used);
return ret;
}
@@ -415,43 +469,49 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
}
static void metricgroup__add_metric_weak_group(struct strbuf *events,
- const char **ids,
- int idnum)
+ struct expr_parse_ctx *ctx)
{
- bool no_group = false;
- int i;
+ struct hashmap_entry *cur;
+ size_t bkt;
+ bool no_group = true, has_duration = false;
- for (i = 0; i < idnum; i++) {
- pr_debug("found event %s\n", ids[i]);
+ hashmap__for_each_entry((&ctx->ids), cur, bkt) {
+ pr_debug("found event %s\n", (const char *)cur->key);
/*
* Duration time maps to a software event and can make
* groups not count. Always use it outside a
* group.
*/
- if (!strcmp(ids[i], "duration_time")) {
- if (i > 0)
- strbuf_addf(events, "}:W,");
- strbuf_addf(events, "duration_time");
- no_group = true;
+ if (!strcmp(cur->key, "duration_time")) {
+ has_duration = true;
continue;
}
strbuf_addf(events, "%s%s",
- i == 0 || no_group ? "{" : ",",
- ids[i]);
+ no_group ? "{" : ",",
+ (const char *)cur->key);
no_group = false;
}
- if (!no_group)
+ if (!no_group) {
strbuf_addf(events, "}:W");
+ if (has_duration)
+ strbuf_addf(events, ",duration_time");
+ } else if (has_duration)
+ strbuf_addf(events, "duration_time");
}
static void metricgroup__add_metric_non_group(struct strbuf *events,
- const char **ids,
- int idnum)
+ struct expr_parse_ctx *ctx)
{
- int i;
-
- for (i = 0; i < idnum; i++)
- strbuf_addf(events, ",%s", ids[i]);
+ struct hashmap_entry *cur;
+ size_t bkt;
+ bool first = true;
+
+ hashmap__for_each_entry((&ctx->ids), cur, bkt) {
+ if (!first)
+ strbuf_addf(events, ",");
+ strbuf_addf(events, "%s", (const char *)cur->key);
+ first = false;
+ }
}
static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
@@ -492,46 +552,58 @@ int __weak arch_get_runtimeparam(void)
return 1;
}
-static int __metricgroup__add_metric(struct strbuf *events,
- struct list_head *group_list, struct pmu_event *pe, int runtime)
+static int __metricgroup__add_metric(struct list_head *group_list,
+ struct pmu_event *pe,
+ bool metric_no_group,
+ int runtime)
{
-
- const char **ids;
- int idnum;
struct egroup *eg;
- if (expr__find_other(pe->metric_expr, NULL, &ids, &idnum, runtime) < 0)
- return -EINVAL;
-
- if (events->len > 0)
- strbuf_addf(events, ",");
-
- if (metricgroup__has_constraint(pe))
- metricgroup__add_metric_non_group(events, ids, idnum);
- else
- metricgroup__add_metric_weak_group(events, ids, idnum);
-
eg = malloc(sizeof(*eg));
if (!eg)
return -ENOMEM;
- eg->ids = ids;
- eg->idnum = idnum;
+ expr__ctx_init(&eg->pctx);
eg->metric_name = pe->metric_name;
eg->metric_expr = pe->metric_expr;
eg->metric_unit = pe->unit;
eg->runtime = runtime;
- list_add_tail(&eg->nd, group_list);
+ eg->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
+
+ if (expr__find_other(pe->metric_expr, NULL, &eg->pctx, runtime) < 0) {
+ expr__ctx_clear(&eg->pctx);
+ free(eg);
+ return -EINVAL;
+ }
+
+ if (list_empty(group_list))
+ list_add(&eg->nd, group_list);
+ else {
+ struct list_head *pos;
+
+ /* Place the largest groups at the front. */
+ list_for_each_prev(pos, group_list) {
+ struct egroup *old = list_entry(pos, struct egroup, nd);
+
+ if (hashmap__size(&eg->pctx.ids) <=
+ hashmap__size(&old->pctx.ids))
+ break;
+ }
+ list_add(&eg->nd, pos);
+ }
return 0;
}
-static int metricgroup__add_metric(const char *metric, struct strbuf *events,
+static int metricgroup__add_metric(const char *metric, bool metric_no_group,
+ struct strbuf *events,
struct list_head *group_list)
{
struct pmu_events_map *map = perf_pmu__find_map(NULL);
struct pmu_event *pe;
- int i, ret = -EINVAL;
+ struct egroup *eg;
+ int i, ret;
+ bool has_match = false;
if (!map)
return 0;
@@ -539,17 +611,26 @@ static int metricgroup__add_metric(const char *metric, struct strbuf *events,
for (i = 0; ; i++) {
pe = &map->table[i];
- if (!pe->name && !pe->metric_group && !pe->metric_name)
+ if (!pe->name && !pe->metric_group && !pe->metric_name) {
+ /* End of pmu events. */
+ if (!has_match)
+ return -EINVAL;
break;
+ }
if (!pe->metric_expr)
continue;
if (match_metric(pe->metric_group, metric) ||
match_metric(pe->metric_name, metric)) {
-
+ has_match = true;
pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
if (!strstr(pe->metric_expr, "?")) {
- ret = __metricgroup__add_metric(events, group_list, pe, 1);
+ ret = __metricgroup__add_metric(group_list,
+ pe,
+ metric_no_group,
+ 1);
+ if (ret)
+ return ret;
} else {
int j, count;
@@ -560,17 +641,33 @@ static int metricgroup__add_metric(const char *metric, struct strbuf *events,
* those events to group_list.
*/
- for (j = 0; j < count; j++)
- ret = __metricgroup__add_metric(events, group_list, pe, j);
+ for (j = 0; j < count; j++) {
+ ret = __metricgroup__add_metric(
+ group_list, pe,
+ metric_no_group, j);
+ if (ret)
+ return ret;
+ }
}
- if (ret == -ENOMEM)
- break;
}
}
- return ret;
+ list_for_each_entry(eg, group_list, nd) {
+ if (events->len > 0)
+ strbuf_addf(events, ",");
+
+ if (eg->has_constraint) {
+ metricgroup__add_metric_non_group(events,
+ &eg->pctx);
+ } else {
+ metricgroup__add_metric_weak_group(events,
+ &eg->pctx);
+ }
+ }
+ return 0;
}
-static int metricgroup__add_metric_list(const char *list, struct strbuf *events,
+static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
+ struct strbuf *events,
struct list_head *group_list)
{
char *llist, *nlist, *p;
@@ -585,7 +682,8 @@ static int metricgroup__add_metric_list(const char *list, struct strbuf *events,
strbuf_addf(events, "%s", "");
while ((p = strsep(&llist, ",")) != NULL) {
- ret = metricgroup__add_metric(p, events, group_list);
+ ret = metricgroup__add_metric(p, metric_no_group, events,
+ group_list);
if (ret == -EINVAL) {
fprintf(stderr, "Cannot find metric or group `%s'\n",
p);
@@ -603,20 +701,19 @@ static int metricgroup__add_metric_list(const char *list, struct strbuf *events,
static void metricgroup__free_egroups(struct list_head *group_list)
{
struct egroup *eg, *egtmp;
- int i;
list_for_each_entry_safe (eg, egtmp, group_list, nd) {
- for (i = 0; i < eg->idnum; i++)
- zfree(&eg->ids[i]);
- zfree(&eg->ids);
+ expr__ctx_clear(&eg->pctx);
list_del_init(&eg->nd);
free(eg);
}
}
int metricgroup__parse_groups(const struct option *opt,
- const char *str,
- struct rblist *metric_events)
+ const char *str,
+ bool metric_no_group,
+ bool metric_no_merge,
+ struct rblist *metric_events)
{
struct parse_events_error parse_error;
struct evlist *perf_evlist = *(struct evlist **)opt->value;
@@ -626,7 +723,8 @@ int metricgroup__parse_groups(const struct option *opt,
if (metric_events->nr_entries == 0)
metricgroup__rblist_init(metric_events);
- ret = metricgroup__add_metric_list(str, &extra_events, &group_list);
+ ret = metricgroup__add_metric_list(str, metric_no_group,
+ &extra_events, &group_list);
if (ret)
return ret;
pr_debug("adding %s\n", extra_events.buf);
@@ -637,8 +735,8 @@ int metricgroup__parse_groups(const struct option *opt,
goto out;
}
strbuf_release(&extra_events);
- ret = metricgroup__setup_events(&group_list, perf_evlist,
- metric_events);
+ ret = metricgroup__setup_events(&group_list, metric_no_merge,
+ perf_evlist, metric_events);
out:
metricgroup__free_egroups(&group_list);
return ret;
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index 6b09eb30b4ec..287850bcdeca 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -29,8 +29,10 @@ struct metric_event *metricgroup__lookup(struct rblist *metric_events,
struct evsel *evsel,
bool create);
int metricgroup__parse_groups(const struct option *opt,
- const char *str,
- struct rblist *metric_events);
+ const char *str,
+ bool metric_no_group,
+ bool metric_no_merge,
+ struct rblist *metric_events);
void metricgroup__print(bool metrics, bool groups, char *filter,
bool raw, bool details);
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index 0920fb0ec6cc..75345946c4b9 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -29,7 +29,7 @@ typedef int (*ordered_events__deliver_t)(struct ordered_events *oe,
struct ordered_events_buffer {
struct list_head list;
- struct ordered_event event[0];
+ struct ordered_event event[];
};
struct ordered_events {
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index b7a0518d607d..3decbb203846 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -26,7 +26,7 @@
#include <api/fs/tracing_path.h>
#include <perf/cpumap.h>
#include "parse-events-bison.h"
-#define YY_EXTRA_TYPE int
+#define YY_EXTRA_TYPE void*
#include "parse-events-flex.h"
#include "pmu.h"
#include "thread_map.h"
@@ -36,6 +36,7 @@
#include "metricgroup.h"
#include "util/evsel_config.h"
#include "util/event.h"
+#include "util/pfm.h"
#define MAX_NAME_LEN 100
@@ -204,7 +205,8 @@ void parse_events__handle_error(struct parse_events_error *err, int idx,
err->help = help;
break;
default:
- WARN_ONCE(1, "WARNING: multiple event parsing errors\n");
+ pr_debug("Multiple errors dropping message: %s (%s)\n",
+ err->str, err->help);
free(err->str);
err->str = str;
free(err->help);
@@ -344,6 +346,7 @@ static char *get_config_name(struct list_head *head_terms)
static struct evsel *
__add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
+ bool init_attr,
char *name, struct perf_pmu *pmu,
struct list_head *config_terms, bool auto_merge_stats,
const char *cpu_list)
@@ -352,9 +355,10 @@ __add_event(struct list_head *list, int *idx,
struct perf_cpu_map *cpus = pmu ? pmu->cpus :
cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
- event_attr_init(attr);
+ if (init_attr)
+ event_attr_init(attr);
- evsel = perf_evsel__new_idx(attr, *idx);
+ evsel = evsel__new_idx(attr, *idx);
if (!evsel)
return NULL;
@@ -370,15 +374,25 @@ __add_event(struct list_head *list, int *idx,
if (config_terms)
list_splice(config_terms, &evsel->config_terms);
- list_add_tail(&evsel->core.node, list);
+ if (list)
+ list_add_tail(&evsel->core.node, list);
+
return evsel;
}
+struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
+ char *name, struct perf_pmu *pmu)
+{
+ return __add_event(NULL, &idx, attr, false, name, pmu, NULL, false,
+ NULL);
+}
+
static int add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr, char *name,
struct list_head *config_terms)
{
- return __add_event(list, idx, attr, name, NULL, config_terms, false, NULL) ? 0 : -ENOMEM;
+ return __add_event(list, idx, attr, true, name, NULL, config_terms,
+ false, NULL) ? 0 : -ENOMEM;
}
static int add_event_tool(struct list_head *list, int *idx,
@@ -390,7 +404,8 @@ static int add_event_tool(struct list_head *list, int *idx,
.config = PERF_COUNT_SW_DUMMY,
};
- evsel = __add_event(list, idx, &attr, NULL, NULL, NULL, false, "0");
+ evsel = __add_event(list, idx, &attr, true, NULL, NULL, NULL, false,
+ "0");
if (!evsel)
return -ENOMEM;
evsel->tool_event = tool_event;
@@ -399,13 +414,13 @@ static int add_event_tool(struct list_head *list, int *idx,
return 0;
}
-static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
+static int parse_aliases(char *str, const char *names[][EVSEL__MAX_ALIASES], int size)
{
int i, j;
int n, longest = -1;
for (i = 0; i < size; i++) {
- for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) {
+ for (j = 0; j < EVSEL__MAX_ALIASES && names[i][j]; j++) {
n = strlen(names[i][j]);
if (n > longest && !strncasecmp(str, names[i][j], n))
longest = n;
@@ -444,8 +459,7 @@ int parse_events_add_cache(struct list_head *list, int *idx,
* No fallback - if we cannot get a clear cache type
* then bail out:
*/
- cache_type = parse_aliases(type, perf_evsel__hw_cache,
- PERF_COUNT_HW_CACHE_MAX);
+ cache_type = parse_aliases(type, evsel__hw_cache, PERF_COUNT_HW_CACHE_MAX);
if (cache_type == -1)
return -EINVAL;
@@ -458,7 +472,7 @@ int parse_events_add_cache(struct list_head *list, int *idx,
n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
if (cache_op == -1) {
- cache_op = parse_aliases(str, perf_evsel__hw_cache_op,
+ cache_op = parse_aliases(str, evsel__hw_cache_op,
PERF_COUNT_HW_CACHE_OP_MAX);
if (cache_op >= 0) {
if (!evsel__is_cache_op_valid(cache_type, cache_op))
@@ -468,7 +482,7 @@ int parse_events_add_cache(struct list_head *list, int *idx,
}
if (cache_result == -1) {
- cache_result = parse_aliases(str, perf_evsel__hw_cache_result,
+ cache_result = parse_aliases(str, evsel__hw_cache_result,
PERF_COUNT_HW_CACHE_RESULT_MAX);
if (cache_result >= 0)
continue;
@@ -538,9 +552,8 @@ static int add_tracepoint(struct list_head *list, int *idx,
struct parse_events_error *err,
struct list_head *head_config)
{
- struct evsel *evsel;
+ struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
- evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++);
if (IS_ERR(evsel)) {
tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name);
return PTR_ERR(evsel);
@@ -1214,14 +1227,14 @@ static int get_config_terms(struct list_head *head_config,
struct list_head *head_terms __maybe_unused)
{
#define ADD_CONFIG_TERM(__type, __weak) \
- struct perf_evsel_config_term *__t; \
+ struct evsel_config_term *__t; \
\
__t = zalloc(sizeof(*__t)); \
if (!__t) \
return -ENOMEM; \
\
INIT_LIST_HEAD(&__t->list); \
- __t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
+ __t->type = EVSEL__CONFIG_TERM_ ## __type; \
__t->weak = __weak; \
list_add_tail(&__t->list, head_terms)
@@ -1312,7 +1325,7 @@ do { \
}
/*
- * Add PERF_EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
+ * Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
* each bit of attr->config that the user has changed.
*/
static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
@@ -1400,10 +1413,10 @@ int parse_events_add_tool(struct parse_events_state *parse_state,
static bool config_term_percore(struct list_head *config_terms)
{
- struct perf_evsel_config_term *term;
+ struct evsel_config_term *term;
list_for_each_entry(term, config_terms, list) {
- if (term->type == PERF_EVSEL__CONFIG_TERM_PERCORE)
+ if (term->type == EVSEL__CONFIG_TERM_PERCORE)
return term->val.percore;
}
@@ -1424,6 +1437,19 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
bool use_uncore_alias;
LIST_HEAD(config_terms);
+ if (verbose > 1) {
+ fprintf(stderr, "Attempting to add event pmu '%s' with '",
+ name);
+ if (head_config) {
+ struct parse_events_term *term;
+
+ list_for_each_entry(term, head_config, list) {
+ fprintf(stderr, "%s,", term->config);
+ }
+ }
+ fprintf(stderr, "' that may result in non-fatal errors\n");
+ }
+
pmu = perf_pmu__find(name);
if (!pmu) {
char *err_str;
@@ -1446,8 +1472,8 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (!head_config) {
attr.type = pmu->type;
- evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL,
- auto_merge_stats, NULL);
+ evsel = __add_event(list, &parse_state->idx, &attr, true, NULL,
+ pmu, NULL, auto_merge_stats, NULL);
if (evsel) {
evsel->pmu_name = name ? strdup(name) : NULL;
evsel->use_uncore_alias = use_uncore_alias;
@@ -1460,6 +1486,19 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (perf_pmu__check_alias(pmu, head_config, &info))
return -EINVAL;
+ if (verbose > 1) {
+ fprintf(stderr, "After aliases, add event pmu '%s' with '",
+ name);
+ if (head_config) {
+ struct parse_events_term *term;
+
+ list_for_each_entry(term, head_config, list) {
+ fprintf(stderr, "%s,", term->config);
+ }
+ }
+ fprintf(stderr, "' that may result in non-fatal errors\n");
+ }
+
/*
* Configure hardcoded terms first, no need to check
* return value when called with fail == 0 ;)
@@ -1478,17 +1517,18 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
return -ENOMEM;
if (perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
- struct perf_evsel_config_term *pos, *tmp;
+ struct evsel_config_term *pos, *tmp;
list_for_each_entry_safe(pos, tmp, &config_terms, list) {
list_del_init(&pos->list);
- zfree(&pos->val.str);
+ if (pos->free_str)
+ zfree(&pos->val.str);
free(pos);
}
return -EINVAL;
}
- evsel = __add_event(list, &parse_state->idx, &attr,
+ evsel = __add_event(list, &parse_state->idx, &attr, true,
get_config_name(head_config), pmu,
&config_terms, auto_merge_stats, NULL);
if (evsel) {
@@ -1630,12 +1670,11 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list,
* event. That can be used to distinguish the leader from
* other members, even they have the same event name.
*/
- if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) {
+ if ((leader != evsel) &&
+ !strcmp(leader->pmu_name, evsel->pmu_name)) {
is_leader = false;
continue;
}
- /* The name is always alias name */
- WARN_ON(strcmp(leader->name, evsel->name));
/* Store the leader event for each PMU */
leaders[nr_pmu++] = (uintptr_t) evsel;
@@ -2002,13 +2041,14 @@ perf_pmu__parse_check(const char *name)
return r ? r->type : PMU_EVENT_SYMBOL_ERR;
}
-static int parse_events__scanner(const char *str, void *parse_state, int start_token)
+static int parse_events__scanner(const char *str,
+ struct parse_events_state *parse_state)
{
YY_BUFFER_STATE buffer;
void *scanner;
int ret;
- ret = parse_events_lex_init_extra(start_token, &scanner);
+ ret = parse_events_lex_init_extra(parse_state, &scanner);
if (ret)
return ret;
@@ -2016,6 +2056,7 @@ static int parse_events__scanner(const char *str, void *parse_state, int start_t
#ifdef PARSER_DEBUG
parse_events_debug = 1;
+ parse_events_set_debug(1, scanner);
#endif
ret = parse_events_parse(parse_state, scanner);
@@ -2031,11 +2072,12 @@ static int parse_events__scanner(const char *str, void *parse_state, int start_t
int parse_events_terms(struct list_head *terms, const char *str)
{
struct parse_events_state parse_state = {
- .terms = NULL,
+ .terms = NULL,
+ .stoken = PE_START_TERMS,
};
int ret;
- ret = parse_events__scanner(str, &parse_state, PE_START_TERMS);
+ ret = parse_events__scanner(str, &parse_state);
if (!ret) {
list_splice(parse_state.terms, terms);
zfree(&parse_state.terms);
@@ -2054,10 +2096,11 @@ int parse_events(struct evlist *evlist, const char *str,
.idx = evlist->core.nr_entries,
.error = err,
.evlist = evlist,
+ .stoken = PE_START_EVENTS,
};
int ret;
- ret = parse_events__scanner(str, &parse_state, PE_START_EVENTS);
+ ret = parse_events__scanner(str, &parse_state);
perf_pmu__parse_cleanup();
if (!ret && list_empty(&parse_state.list)) {
@@ -2817,6 +2860,8 @@ void print_events(const char *event_glob, bool name_only, bool quiet_flag,
print_sdt_events(NULL, NULL, name_only);
metricgroup__print(true, true, NULL, name_only, details_flag);
+
+ print_libpfm_events(name_only, long_desc);
}
int parse_events__is_hardcoded_term(struct parse_events_term *term)
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 6ead9661238c..1fe23a2f9b36 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -17,6 +17,7 @@ struct evlist;
struct parse_events_error;
struct option;
+struct perf_pmu;
struct tracepoint_path {
char *system;
@@ -128,6 +129,7 @@ struct parse_events_state {
struct parse_events_error *error;
struct evlist *evlist;
struct list_head *terms;
+ int stoken;
};
void parse_events__handle_error(struct parse_events_error *err, int idx,
@@ -187,6 +189,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
bool auto_merge_stats,
bool use_alias);
+struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
+ char *name, struct perf_pmu *pmu);
+
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
char *str,
struct list_head **listp);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index c589fc42f058..002802e17059 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -209,10 +209,10 @@ modifier_bp [rwx]{1,3}
%%
%{
- {
- int start_token;
+ struct parse_events_state *_parse_state = parse_events_get_extra(yyscanner);
- start_token = parse_events_get_extra(yyscanner);
+ {
+ int start_token = _parse_state->stoken;
if (start_token == PE_START_TERMS)
BEGIN(config);
@@ -220,7 +220,7 @@ modifier_bp [rwx]{1,3}
BEGIN(event);
if (start_token) {
- parse_events_set_extra(NULL, yyscanner);
+ _parse_state->stoken = 0;
/*
* The flex parser does not init locations variable
* via the scan_string interface, so we need do the
@@ -252,7 +252,9 @@ modifier_bp [rwx]{1,3}
BEGIN(INITIAL);
REWIND(0);
}
-
+, {
+ return ',';
+ }
}
<array>{
diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c
new file mode 100644
index 000000000000..d735acb6c29c
--- /dev/null
+++ b/tools/perf/util/pfm.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for libpfm4 event encoding.
+ *
+ * Copyright 2020 Google LLC.
+ */
+#include "util/cpumap.h"
+#include "util/debug.h"
+#include "util/event.h"
+#include "util/evlist.h"
+#include "util/evsel.h"
+#include "util/parse-events.h"
+#include "util/pmu.h"
+#include "util/pfm.h"
+
+#include <string.h>
+#include <linux/kernel.h>
+#include <perfmon/pfmlib_perf_event.h>
+
+static void libpfm_initialize(void)
+{
+ int ret;
+
+ ret = pfm_initialize();
+ if (ret != PFM_SUCCESS) {
+ ui__warning("libpfm failed to initialize: %s\n",
+ pfm_strerror(ret));
+ }
+}
+
+int parse_libpfm_events_option(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ struct evlist *evlist = *(struct evlist **)opt->value;
+ struct perf_event_attr attr;
+ struct perf_pmu *pmu;
+ struct evsel *evsel, *grp_leader = NULL;
+ char *p, *q, *p_orig;
+ const char *sep;
+ int grp_evt = -1;
+ int ret;
+
+ libpfm_initialize();
+
+ p_orig = p = strdup(str);
+ if (!p)
+ return -1;
+ /*
+ * force loading of the PMU list
+ */
+ perf_pmu__scan(NULL);
+
+ for (q = p; strsep(&p, ",{}"); q = p) {
+ sep = p ? str + (p - p_orig - 1) : "";
+ if (*sep == '{') {
+ if (grp_evt > -1) {
+ ui__error(
+ "nested event groups not supported\n");
+ goto error;
+ }
+ grp_evt++;
+ }
+
+ /* no event */
+ if (*q == '\0')
+ continue;
+
+ memset(&attr, 0, sizeof(attr));
+ event_attr_init(&attr);
+
+ ret = pfm_get_perf_event_encoding(q, PFM_PLM0|PFM_PLM3,
+ &attr, NULL, NULL);
+
+ if (ret != PFM_SUCCESS) {
+ ui__error("failed to parse event %s : %s\n", str,
+ pfm_strerror(ret));
+ goto error;
+ }
+
+ pmu = perf_pmu__find_by_type((unsigned int)attr.type);
+ evsel = parse_events__add_event(evlist->core.nr_entries,
+ &attr, q, pmu);
+ if (evsel == NULL)
+ goto error;
+
+ evsel->is_libpfm_event = true;
+
+ evlist__add(evlist, evsel);
+
+ if (grp_evt == 0)
+ grp_leader = evsel;
+
+ if (grp_evt > -1) {
+ evsel->leader = grp_leader;
+ grp_leader->core.nr_members++;
+ grp_evt++;
+ }
+
+ if (*sep == '}') {
+ if (grp_evt < 0) {
+ ui__error(
+ "cannot close a non-existing event group\n");
+ goto error;
+ }
+ evlist->nr_groups++;
+ grp_leader = NULL;
+ grp_evt = -1;
+ }
+ }
+ return 0;
+error:
+ free(p_orig);
+ return -1;
+}
+
+static const char *srcs[PFM_ATTR_CTRL_MAX] = {
+ [PFM_ATTR_CTRL_UNKNOWN] = "???",
+ [PFM_ATTR_CTRL_PMU] = "PMU",
+ [PFM_ATTR_CTRL_PERF_EVENT] = "perf_event",
+};
+
+static void
+print_attr_flags(pfm_event_attr_info_t *info)
+{
+ int n = 0;
+
+ if (info->is_dfl) {
+ printf("[default] ");
+ n++;
+ }
+
+ if (info->is_precise) {
+ printf("[precise] ");
+ n++;
+ }
+
+ if (!n)
+ printf("- ");
+}
+
+static void
+print_libpfm_events_detailed(pfm_event_info_t *info, bool long_desc)
+{
+ pfm_event_attr_info_t ainfo;
+ const char *src;
+ int j, ret;
+
+ ainfo.size = sizeof(ainfo);
+
+ printf(" %s\n", info->name);
+ printf(" [%s]\n", info->desc);
+ if (long_desc) {
+ if (info->equiv)
+ printf(" Equiv: %s\n", info->equiv);
+
+ printf(" Code : 0x%"PRIx64"\n", info->code);
+ }
+ pfm_for_each_event_attr(j, info) {
+ ret = pfm_get_event_attr_info(info->idx, j,
+ PFM_OS_PERF_EVENT_EXT, &ainfo);
+ if (ret != PFM_SUCCESS)
+ continue;
+
+ if (ainfo.type == PFM_ATTR_UMASK) {
+ printf(" %s:%s\n", info->name, ainfo.name);
+ printf(" [%s]\n", ainfo.desc);
+ }
+
+ if (!long_desc)
+ continue;
+
+ if (ainfo.ctrl >= PFM_ATTR_CTRL_MAX)
+ ainfo.ctrl = PFM_ATTR_CTRL_UNKNOWN;
+
+ src = srcs[ainfo.ctrl];
+ switch (ainfo.type) {
+ case PFM_ATTR_UMASK:
+ printf(" Umask : 0x%02"PRIx64" : %s: ",
+ ainfo.code, src);
+ print_attr_flags(&ainfo);
+ putchar('\n');
+ break;
+ case PFM_ATTR_MOD_BOOL:
+ printf(" Modif : %s: [%s] : %s (boolean)\n", src,
+ ainfo.name, ainfo.desc);
+ break;
+ case PFM_ATTR_MOD_INTEGER:
+ printf(" Modif : %s: [%s] : %s (integer)\n", src,
+ ainfo.name, ainfo.desc);
+ break;
+ case PFM_ATTR_NONE:
+ case PFM_ATTR_RAW_UMASK:
+ case PFM_ATTR_MAX:
+ default:
+ printf(" Attr : %s: [%s] : %s\n", src,
+ ainfo.name, ainfo.desc);
+ }
+ }
+}
+
+/*
+ * list all pmu::event:umask, pmu::event
+ * printed events may not be all valid combinations of umask for an event
+ */
+static void
+print_libpfm_events_raw(pfm_pmu_info_t *pinfo, pfm_event_info_t *info)
+{
+ pfm_event_attr_info_t ainfo;
+ int j, ret;
+ bool has_umask = false;
+
+ ainfo.size = sizeof(ainfo);
+
+ pfm_for_each_event_attr(j, info) {
+ ret = pfm_get_event_attr_info(info->idx, j,
+ PFM_OS_PERF_EVENT_EXT, &ainfo);
+ if (ret != PFM_SUCCESS)
+ continue;
+
+ if (ainfo.type != PFM_ATTR_UMASK)
+ continue;
+
+ printf("%s::%s:%s\n", pinfo->name, info->name, ainfo.name);
+ has_umask = true;
+ }
+ if (!has_umask)
+ printf("%s::%s\n", pinfo->name, info->name);
+}
+
+void print_libpfm_events(bool name_only, bool long_desc)
+{
+ pfm_event_info_t info;
+ pfm_pmu_info_t pinfo;
+ int i, p, ret;
+
+ libpfm_initialize();
+
+ /* initialize to zero to indicate ABI version */
+ info.size = sizeof(info);
+ pinfo.size = sizeof(pinfo);
+
+ if (!name_only)
+ puts("\nList of pre-defined events (to be used in --pfm-events):\n");
+
+ pfm_for_all_pmus(p) {
+ bool printed_pmu = false;
+
+ ret = pfm_get_pmu_info(p, &pinfo);
+ if (ret != PFM_SUCCESS)
+ continue;
+
+ /* only print events that are supported by host HW */
+ if (!pinfo.is_present)
+ continue;
+
+ /* handled by perf directly */
+ if (pinfo.pmu == PFM_PMU_PERF_EVENT)
+ continue;
+
+ for (i = pinfo.first_event; i != -1;
+ i = pfm_get_event_next(i)) {
+
+ ret = pfm_get_event_info(i, PFM_OS_PERF_EVENT_EXT,
+ &info);
+ if (ret != PFM_SUCCESS)
+ continue;
+
+ if (!name_only && !printed_pmu) {
+ printf("%s:\n", pinfo.name);
+ printed_pmu = true;
+ }
+
+ if (!name_only)
+ print_libpfm_events_detailed(&info, long_desc);
+ else
+ print_libpfm_events_raw(&pinfo, &info);
+ }
+ if (!name_only && printed_pmu)
+ putchar('\n');
+ }
+}
diff --git a/tools/perf/util/pfm.h b/tools/perf/util/pfm.h
new file mode 100644
index 000000000000..7d70dda87012
--- /dev/null
+++ b/tools/perf/util/pfm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for libpfm4 event encoding.
+ *
+ * Copyright 2020 Google LLC.
+ */
+#ifndef __PERF_PFM_H
+#define __PERF_PFM_H
+
+#include <subcmd/parse-options.h>
+
+#ifdef HAVE_LIBPFM
+int parse_libpfm_events_option(const struct option *opt, const char *str,
+ int unset);
+
+void print_libpfm_events(bool name_only, bool long_desc);
+
+#else
+#include <linux/compiler.h>
+
+static inline int parse_libpfm_events_option(
+ const struct option *opt __maybe_unused,
+ const char *str __maybe_unused,
+ int unset __maybe_unused)
+{
+ return 0;
+}
+
+static inline void print_libpfm_events(bool name_only __maybe_unused,
+ bool long_desc __maybe_unused)
+{
+}
+
+#endif
+
+
+#endif /* __PERF_PFM_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 92bd7fafcce6..93fe72a9dc0b 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1056,7 +1056,8 @@ error:
* Setup one of config[12] attr members based on the
* user input data - term parameter.
*/
-static int pmu_config_term(struct list_head *formats,
+static int pmu_config_term(const char *pmu_name,
+ struct list_head *formats,
struct perf_event_attr *attr,
struct parse_events_term *term,
struct list_head *head_terms,
@@ -1082,16 +1083,24 @@ static int pmu_config_term(struct list_head *formats,
format = pmu_find_format(formats, term->config);
if (!format) {
- if (verbose > 0)
- printf("Invalid event/parameter '%s'\n", term->config);
+ char *pmu_term = pmu_formats_string(formats);
+ char *unknown_term;
+ char *help_msg;
+
+ if (asprintf(&unknown_term,
+ "unknown term '%s' for pmu '%s'",
+ term->config, pmu_name) < 0)
+ unknown_term = NULL;
+ help_msg = parse_events_formats_error_string(pmu_term);
if (err) {
- char *pmu_term = pmu_formats_string(formats);
-
parse_events__handle_error(err, term->err_term,
- strdup("unknown term"),
- parse_events_formats_error_string(pmu_term));
- free(pmu_term);
+ unknown_term,
+ help_msg);
+ } else {
+ pr_debug("%s (%s)\n", unknown_term, help_msg);
+ free(unknown_term);
}
+ free(pmu_term);
return -EINVAL;
}
@@ -1168,7 +1177,7 @@ static int pmu_config_term(struct list_head *formats,
return 0;
}
-int perf_pmu__config_terms(struct list_head *formats,
+int perf_pmu__config_terms(const char *pmu_name, struct list_head *formats,
struct perf_event_attr *attr,
struct list_head *head_terms,
bool zero, struct parse_events_error *err)
@@ -1176,7 +1185,7 @@ int perf_pmu__config_terms(struct list_head *formats,
struct parse_events_term *term;
list_for_each_entry(term, head_terms, list) {
- if (pmu_config_term(formats, attr, term, head_terms,
+ if (pmu_config_term(pmu_name, formats, attr, term, head_terms,
zero, err))
return -EINVAL;
}
@@ -1196,8 +1205,8 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
bool zero = !!pmu->default_config;
attr->type = pmu->type;
- return perf_pmu__config_terms(&pmu->format, attr, head_terms,
- zero, err);
+ return perf_pmu__config_terms(pmu->name, &pmu->format, attr,
+ head_terms, zero, err);
}
static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index cb6fbec50313..85e0c7f2515c 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -9,7 +9,7 @@
#include "parse-events.h"
#include "pmu-events/pmu-events.h"
-struct perf_evsel_config_term;
+struct evsel_config_term;
enum {
PERF_PMU_FORMAT_VALUE_CONFIG,
@@ -76,7 +76,7 @@ struct perf_pmu *perf_pmu__find_by_type(unsigned int type);
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms,
struct parse_events_error *error);
-int perf_pmu__config_terms(struct list_head *formats,
+int perf_pmu__config_terms(const char *pmu_name, struct list_head *formats,
struct perf_event_attr *attr,
struct list_head *head_terms,
bool zero, struct parse_events_error *error);
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index eea132f512b0..a08f373d3305 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -102,7 +102,7 @@ void exit_probe_symbol_maps(void)
symbol__exit();
}
-static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
+static struct ref_reloc_sym *kernel_get_ref_reloc_sym(struct map **pmap)
{
/* kmap->ref_reloc_sym should be set if host_machine is initialized */
struct kmap *kmap;
@@ -114,6 +114,10 @@ static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
kmap = map__kmap(map);
if (!kmap)
return NULL;
+
+ if (pmap)
+ *pmap = map;
+
return kmap->ref_reloc_sym;
}
@@ -125,7 +129,7 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr,
struct map *map;
/* ref_reloc_sym is just a label. Need a special fix*/
- reloc_sym = kernel_get_ref_reloc_sym();
+ reloc_sym = kernel_get_ref_reloc_sym(NULL);
if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
*addr = (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
else {
@@ -232,21 +236,22 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
static bool kprobe_blacklist__listed(unsigned long address);
static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
{
- u64 etext_addr = 0;
- int ret;
-
- /* Get the address of _etext for checking non-probable text symbol */
- ret = kernel_get_symbol_address_by_name("_etext", &etext_addr,
- false, false);
+ struct map *map;
+ bool ret = false;
- if (ret == 0 && etext_addr < address)
- pr_warning("%s is out of .text, skip it.\n", symbol);
- else if (kprobe_blacklist__listed(address))
+ map = kernel_get_module_map(NULL);
+ if (map) {
+ ret = address <= map->start || map->end < address;
+ if (ret)
+ pr_warning("%s is out of .text, skip it.\n", symbol);
+ map__put(map);
+ }
+ if (!ret && kprobe_blacklist__listed(address)) {
pr_warning("%s is blacklisted function, skip it.\n", symbol);
- else
- return false;
+ ret = true;
+ }
- return true;
+ return ret;
}
/*
@@ -745,6 +750,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
int ntevs)
{
struct ref_reloc_sym *reloc_sym;
+ struct map *map;
char *tmp;
int i, skipped = 0;
@@ -753,7 +759,7 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
return post_process_offline_probe_trace_events(tevs, ntevs,
symbol_conf.vmlinux_name);
- reloc_sym = kernel_get_ref_reloc_sym();
+ reloc_sym = kernel_get_ref_reloc_sym(&map);
if (!reloc_sym) {
pr_warning("Relocated base symbol is not found!\n");
return -EINVAL;
@@ -764,9 +770,13 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
continue;
if (tevs[i].point.retprobe && !kretprobe_offset_is_supported())
continue;
- /* If we found a wrong one, mark it by NULL symbol */
+ /*
+ * If we found a wrong one, mark it by NULL symbol.
+ * Since addresses in debuginfo is same as objdump, we need
+ * to convert it to addresses on memory.
+ */
if (kprobe_warn_out_range(tevs[i].point.symbol,
- tevs[i].point.address)) {
+ map__objdump_2mem(map, tevs[i].point.address))) {
tmp = NULL;
skipped++;
} else {
@@ -1765,8 +1775,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev)
fmt1_str = strtok_r(argv0_str, ":", &fmt);
fmt2_str = strtok_r(NULL, "/", &fmt);
fmt3_str = strtok_r(NULL, " \t", &fmt);
- if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL
- || fmt3_str == NULL) {
+ if (fmt1_str == NULL || fmt2_str == NULL || fmt3_str == NULL) {
semantic_error("Failed to parse event name: %s\n", argv[0]);
ret = -EINVAL;
goto out;
@@ -2936,7 +2945,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
/* Note that the symbols in the kmodule are not relocated */
if (!pev->uprobes && !pev->target &&
(!pp->retprobe || kretprobe_offset_is_supported())) {
- reloc_sym = kernel_get_ref_reloc_sym();
+ reloc_sym = kernel_get_ref_reloc_sym(NULL);
if (!reloc_sym) {
pr_warning("Relocated base symbol is not found!\n");
ret = -EINVAL;
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index e4cff49384f4..55924255c535 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -101,6 +101,7 @@ enum dso_binary_type distro_dwarf_types[] = {
DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
diff --git a/tools/perf/util/pstack.c b/tools/perf/util/pstack.c
index 80ff41fc45be..a1d1e4ef6257 100644
--- a/tools/perf/util/pstack.c
+++ b/tools/perf/util/pstack.c
@@ -15,7 +15,7 @@
struct pstack {
unsigned short top;
unsigned short max_nr_entries;
- void *entries[0];
+ void *entries[];
};
struct pstack *pstack__new(unsigned short max_nr_entries)
diff --git a/tools/perf/util/record.h b/tools/perf/util/record.h
index 923565c3b155..39d1de4b2a36 100644
--- a/tools/perf/util/record.h
+++ b/tools/perf/util/record.h
@@ -36,6 +36,7 @@ struct record_opts {
bool record_namespaces;
bool record_cgroup;
bool record_switch_events;
+ bool record_switch_events_set;
bool all_kernel;
bool all_user;
bool kernel_callchains;
@@ -76,4 +77,9 @@ extern struct option *record_options;
int record__parse_freq(const struct option *opt, const char *str, int unset);
+static inline bool record_opts__no_switch_events(const struct record_opts *opts)
+{
+ return opts->record_switch_events_set && !opts->record_switch_events;
+}
+
#endif // _PERF_RECORD_H
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index c11d89e0ee55..1a157e84a04a 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -33,7 +33,6 @@
#include "../perf.h"
#include "arch/common.h"
#include <internal/lib.h>
-#include <linux/err.h>
#ifdef HAVE_ZSTD_SUPPORT
static int perf_session__process_compressed_event(struct perf_session *session,
@@ -1104,7 +1103,7 @@ static void regs_dump__printf(u64 mask, u64 *regs)
for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs[i++];
- printf(".... %-5s 0x%" PRIx64 "\n",
+ printf(".... %-5s 0x%016" PRIx64 "\n",
perf_reg_name(rid), val);
}
}
@@ -1542,8 +1541,13 @@ static s64 perf_session__process_user_event(struct perf_session *session,
*/
return 0;
case PERF_RECORD_HEADER_TRACING_DATA:
- /* setup for reading amidst mmap */
- lseek(fd, file_offset, SEEK_SET);
+ /*
+ * Setup for reading amidst mmap, but only when we
+ * are in 'file' mode. The 'pipe' fd is in proper
+ * place already.
+ */
+ if (!perf_data__is_pipe(session->data))
+ lseek(fd, file_offset, SEEK_SET);
return tool->tracing_data(session, event);
case PERF_RECORD_HEADER_BUILD_ID:
return tool->build_id(session, event);
diff --git a/tools/perf/util/sideband_evlist.c b/tools/perf/util/sideband_evlist.c
index 1580a3cbec2d..ded9ced02797 100644
--- a/tools/perf/util/sideband_evlist.c
+++ b/tools/perf/util/sideband_evlist.c
@@ -22,7 +22,7 @@ int perf_evlist__add_sb_event(struct evlist *evlist, struct perf_event_attr *att
attr->sample_id_all = 1;
}
- evsel = perf_evsel__new_idx(attr, evlist->core.nr_entries);
+ evsel = evsel__new_idx(attr, evlist->core.nr_entries);
if (!evsel)
return -1;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index c1f8879f92cc..d42339df20f8 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -2817,7 +2817,7 @@ static char *prefix_if_not_in(const char *pre, char *str)
return str;
if (asprintf(&n, "%s,%s", pre, str) < 0)
- return NULL;
+ n = NULL;
free(str);
return n;
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 129b8c5f2538..a7c13a88ecb9 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -323,35 +323,46 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list)
{
struct evsel *counter, *leader, **metric_events, *oc;
bool found;
- const char **metric_names;
+ struct expr_parse_ctx ctx;
+ struct hashmap_entry *cur;
+ size_t bkt;
int i;
- int num_metric_names;
+ expr__ctx_init(&ctx);
evlist__for_each_entry(evsel_list, counter) {
bool invalid = false;
leader = counter->leader;
if (!counter->metric_expr)
continue;
+
+ expr__ctx_clear(&ctx);
metric_events = counter->metric_events;
if (!metric_events) {
- if (expr__find_other(counter->metric_expr, counter->name,
- &metric_names, &num_metric_names, 1) < 0)
+ if (expr__find_other(counter->metric_expr,
+ counter->name,
+ &ctx, 1) < 0)
continue;
metric_events = calloc(sizeof(struct evsel *),
- num_metric_names + 1);
- if (!metric_events)
+ hashmap__size(&ctx.ids) + 1);
+ if (!metric_events) {
+ expr__ctx_clear(&ctx);
return;
+ }
counter->metric_events = metric_events;
}
- for (i = 0; i < num_metric_names; i++) {
+ i = 0;
+ hashmap__for_each_entry((&ctx.ids), cur, bkt) {
+ const char *metric_name = (const char *)cur->key;
+
found = false;
if (leader) {
/* Search in group */
for_each_group_member (oc, leader) {
- if (!strcasecmp(oc->name, metric_names[i]) &&
+ if (!strcasecmp(oc->name,
+ metric_name) &&
!oc->collect_stat) {
found = true;
break;
@@ -360,7 +371,8 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list)
}
if (!found) {
/* Search ignoring groups */
- oc = perf_stat__find_event(evsel_list, metric_names[i]);
+ oc = perf_stat__find_event(evsel_list,
+ metric_name);
}
if (!oc) {
/* Deduping one is good enough to handle duplicated PMUs. */
@@ -373,27 +385,28 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list)
* of events. So we ask the user instead to add the missing
* events.
*/
- if (!printed || strcasecmp(printed, metric_names[i])) {
+ if (!printed ||
+ strcasecmp(printed, metric_name)) {
fprintf(stderr,
"Add %s event to groups to get metric expression for %s\n",
- metric_names[i],
+ metric_name,
counter->name);
- printed = strdup(metric_names[i]);
+ printed = strdup(metric_name);
}
invalid = true;
continue;
}
- metric_events[i] = oc;
+ metric_events[i++] = oc;
oc->collect_stat = true;
}
metric_events[i] = NULL;
- free(metric_names);
if (invalid) {
free(metric_events);
counter->metric_events = NULL;
counter->metric_expr = NULL;
}
}
+ expr__ctx_clear(&ctx);
}
static double runtime_stat_avg(struct runtime_stat *st,
@@ -724,7 +737,6 @@ static void generic_metric(struct perf_stat_config *config,
const char *metric_name,
const char *metric_unit,
int runtime,
- double avg,
int cpu,
struct perf_stat_output_ctx *out,
struct runtime_stat *st)
@@ -737,8 +749,6 @@ static void generic_metric(struct perf_stat_config *config,
char *n, *pn;
expr__ctx_init(&pctx);
- /* Must be first id entry */
- expr__add_id(&pctx, name, avg);
for (i = 0; metric_events[i]; i++) {
struct saved_value *v;
struct stats *stats;
@@ -797,7 +807,7 @@ static void generic_metric(struct perf_stat_config *config,
print_metric(config, ctxp, NULL, "%8.1f",
metric_bf, ratio);
} else {
- print_metric(config, ctxp, NULL, "%8.1f",
+ print_metric(config, ctxp, NULL, "%8.2f",
metric_name ?
metric_name :
out->force_header ? name : "",
@@ -814,8 +824,7 @@ static void generic_metric(struct perf_stat_config *config,
(metric_name ? metric_name : name) : "", 0);
}
- for (i = 1; i < pctx.num_ids; i++)
- zfree(&pctx.ids[i].name);
+ expr__ctx_clear(&pctx);
}
void perf_stat__print_shadow_stats(struct perf_stat_config *config,
@@ -1027,7 +1036,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
print_metric(config, ctxp, NULL, NULL, name, 0);
} else if (evsel->metric_expr) {
generic_metric(config, evsel->metric_expr, evsel->metric_events, evsel->name,
- evsel->metric_name, NULL, 1, avg, cpu, out, st);
+ evsel->metric_name, NULL, 1, cpu, out, st);
} else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
char unit = 'M';
char unit_buf[10];
@@ -1056,7 +1065,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
out->new_line(config, ctxp);
generic_metric(config, mexp->metric_expr, mexp->metric_events,
evsel->name, mexp->metric_name,
- mexp->metric_unit, mexp->runtime, avg, cpu, out, st);
+ mexp->metric_unit, mexp->runtime, cpu, out, st);
}
}
if (num == 0)
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 774468341851..cdb154381a87 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -115,7 +115,7 @@ static void perf_stat_evsel_id_init(struct evsel *evsel)
}
}
-static void perf_evsel__reset_stat_priv(struct evsel *evsel)
+static void evsel__reset_stat_priv(struct evsel *evsel)
{
int i;
struct perf_stat_evsel *ps = evsel->stats;
@@ -126,16 +126,16 @@ static void perf_evsel__reset_stat_priv(struct evsel *evsel)
perf_stat_evsel_id_init(evsel);
}
-static int perf_evsel__alloc_stat_priv(struct evsel *evsel)
+static int evsel__alloc_stat_priv(struct evsel *evsel)
{
evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
if (evsel->stats == NULL)
return -ENOMEM;
- perf_evsel__reset_stat_priv(evsel);
+ evsel__reset_stat_priv(evsel);
return 0;
}
-static void perf_evsel__free_stat_priv(struct evsel *evsel)
+static void evsel__free_stat_priv(struct evsel *evsel)
{
struct perf_stat_evsel *ps = evsel->stats;
@@ -144,8 +144,7 @@ static void perf_evsel__free_stat_priv(struct evsel *evsel)
zfree(&evsel->stats);
}
-static int perf_evsel__alloc_prev_raw_counts(struct evsel *evsel,
- int ncpus, int nthreads)
+static int evsel__alloc_prev_raw_counts(struct evsel *evsel, int ncpus, int nthreads)
{
struct perf_counts *counts;
@@ -156,29 +155,26 @@ static int perf_evsel__alloc_prev_raw_counts(struct evsel *evsel,
return counts ? 0 : -ENOMEM;
}
-static void perf_evsel__free_prev_raw_counts(struct evsel *evsel)
+static void evsel__free_prev_raw_counts(struct evsel *evsel)
{
perf_counts__delete(evsel->prev_raw_counts);
evsel->prev_raw_counts = NULL;
}
-static void perf_evsel__reset_prev_raw_counts(struct evsel *evsel)
+static void evsel__reset_prev_raw_counts(struct evsel *evsel)
{
- if (evsel->prev_raw_counts) {
- evsel->prev_raw_counts->aggr.val = 0;
- evsel->prev_raw_counts->aggr.ena = 0;
- evsel->prev_raw_counts->aggr.run = 0;
- }
+ if (evsel->prev_raw_counts)
+ perf_counts__reset(evsel->prev_raw_counts);
}
-static int perf_evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
+static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
{
int ncpus = evsel__nr_cpus(evsel);
int nthreads = perf_thread_map__nr(evsel->core.threads);
- if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
- perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
- (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
+ if (evsel__alloc_stat_priv(evsel) < 0 ||
+ evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
+ (alloc_raw && evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
return -ENOMEM;
return 0;
@@ -189,7 +185,7 @@ int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw)
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- if (perf_evsel__alloc_stats(evsel, alloc_raw))
+ if (evsel__alloc_stats(evsel, alloc_raw))
goto out_free;
}
@@ -205,9 +201,9 @@ void perf_evlist__free_stats(struct evlist *evlist)
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- perf_evsel__free_stat_priv(evsel);
- perf_evsel__free_counts(evsel);
- perf_evsel__free_prev_raw_counts(evsel);
+ evsel__free_stat_priv(evsel);
+ evsel__free_counts(evsel);
+ evsel__free_prev_raw_counts(evsel);
}
}
@@ -216,8 +212,8 @@ void perf_evlist__reset_stats(struct evlist *evlist)
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
- perf_evsel__reset_stat_priv(evsel);
- perf_evsel__reset_counts(evsel);
+ evsel__reset_stat_priv(evsel);
+ evsel__reset_counts(evsel);
}
}
@@ -226,7 +222,51 @@ void perf_evlist__reset_prev_raw_counts(struct evlist *evlist)
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel)
- perf_evsel__reset_prev_raw_counts(evsel);
+ evsel__reset_prev_raw_counts(evsel);
+}
+
+static void perf_evsel__copy_prev_raw_counts(struct evsel *evsel)
+{
+ int ncpus = evsel__nr_cpus(evsel);
+ int nthreads = perf_thread_map__nr(evsel->core.threads);
+
+ for (int thread = 0; thread < nthreads; thread++) {
+ for (int cpu = 0; cpu < ncpus; cpu++) {
+ *perf_counts(evsel->counts, cpu, thread) =
+ *perf_counts(evsel->prev_raw_counts, cpu,
+ thread);
+ }
+ }
+
+ evsel->counts->aggr = evsel->prev_raw_counts->aggr;
+}
+
+void perf_evlist__copy_prev_raw_counts(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel)
+ perf_evsel__copy_prev_raw_counts(evsel);
+}
+
+void perf_evlist__save_aggr_prev_raw_counts(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ /*
+ * To collect the overall statistics for interval mode,
+ * we copy the counts from evsel->prev_raw_counts to
+ * evsel->counts. The perf_stat_process_counter creates
+ * aggr values from per cpu values, but the per cpu values
+ * are 0 for AGGR_GLOBAL. So we use a trick that saves the
+ * previous aggr value to the first member of perf_counts,
+ * then aggr calculation in process_counter_values can work
+ * correctly.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ *perf_counts(evsel->prev_raw_counts, 0, 0) =
+ evsel->prev_raw_counts->aggr;
+ }
}
static void zero_per_pkg(struct evsel *counter)
@@ -368,7 +408,7 @@ int perf_stat_process_counter(struct perf_stat_config *config,
* interval mode, otherwise overall avg running
* averages will be shown for each interval.
*/
- if (config->interval) {
+ if (config->interval || config->summary) {
for (i = 0; i < 3; i++)
init_stats(&ps->res_stats[i]);
}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index b4fdfaa7f2c0..f75ae679eb28 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -110,6 +110,9 @@ struct perf_stat_config {
bool all_kernel;
bool all_user;
bool percore_show_thread;
+ bool summary;
+ bool metric_no_group;
+ bool metric_no_merge;
FILE *output;
unsigned int interval;
unsigned int timeout;
@@ -132,6 +135,8 @@ struct perf_stat_config {
struct rblist metric_events;
};
+void perf_stat__set_big_num(int set);
+
void update_stats(struct stats *stats, u64 val);
double avg_stats(struct stats *stats);
double stddev_stats(struct stats *stats);
@@ -198,6 +203,8 @@ int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw);
void perf_evlist__free_stats(struct evlist *evlist);
void perf_evlist__reset_stats(struct evlist *evlist);
void perf_evlist__reset_prev_raw_counts(struct evlist *evlist);
+void perf_evlist__copy_prev_raw_counts(struct evlist *evlist);
+void perf_evlist__save_aggr_prev_raw_counts(struct evlist *evlist);
int perf_stat_process_counter(struct perf_stat_config *config,
struct evsel *counter);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index be5b493f8284..5e43054bffea 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1458,6 +1458,7 @@ struct kcore_copy_info {
u64 first_symbol;
u64 last_symbol;
u64 first_module;
+ u64 first_module_symbol;
u64 last_module_symbol;
size_t phnum;
struct list_head phdrs;
@@ -1534,6 +1535,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
return 0;
if (strchr(name, '[')) {
+ if (!kci->first_module_symbol || start < kci->first_module_symbol)
+ kci->first_module_symbol = start;
if (start > kci->last_module_symbol)
kci->last_module_symbol = start;
return 0;
@@ -1731,6 +1734,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
kci->etext += page_size;
}
+ if (kci->first_module_symbol &&
+ (!kci->first_module || kci->first_module_symbol < kci->first_module))
+ kci->first_module = kci->first_module_symbol;
+
kci->first_module = round_down(kci->first_module, page_size);
if (kci->last_module_symbol) {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 381da6b39f89..5ddf84dcbae7 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -79,6 +79,7 @@ static enum dso_binary_type binary_type_symtab[] = {
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+ DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
DSO_BINARY_TYPE__NOT_FOUND,
};
@@ -1223,6 +1224,7 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
m->end = old_map->start;
list_add_tail(&m->node, &merged);
+ new_map->pgoff += old_map->end - new_map->start;
new_map->start = old_map->end;
}
} else {
@@ -1243,6 +1245,7 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
* |new......| -> |new...|
* |old....| -> |old....|
*/
+ new_map->pgoff += old_map->end - new_map->start;
new_map->start = old_map->end;
}
}
@@ -1529,6 +1532,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
+ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
return !kmod && dso->kernel == DSO_TYPE_USER;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 93fc43db1be3..ff4f4c47e148 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -55,7 +55,7 @@ struct symbol {
u8 inlined:1;
u8 arch_sym;
bool annotate2;
- char name[0];
+ char name[];
};
void symbol__delete(struct symbol *sym);
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 820fceeb19a9..03bd99d3be16 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -8,9 +8,9 @@
#include "syscalltbl.h"
#include <stdlib.h>
#include <linux/compiler.h>
+#include <linux/zalloc.h>
#ifdef HAVE_SYSCALL_TABLE_SUPPORT
-#include <linux/zalloc.h>
#include <string.h>
#include "string2.h"
@@ -142,7 +142,7 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g
struct syscalltbl *syscalltbl__new(void)
{
- struct syscalltbl *tbl = malloc(sizeof(*tbl));
+ struct syscalltbl *tbl = zalloc(sizeof(*tbl));
if (tbl)
tbl->audit_machine = audit_detect_machine();
return tbl;
diff --git a/tools/perf/util/syscalltbl.h b/tools/perf/util/syscalltbl.h
index 9172613028d0..a41d2ca9e4ae 100644
--- a/tools/perf/util/syscalltbl.h
+++ b/tools/perf/util/syscalltbl.h
@@ -3,14 +3,12 @@
#define __PERF_SYSCALLTBL_H
struct syscalltbl {
- union {
- int audit_machine;
- struct {
- int max_id;
- int nr_entries;
- void *entries;
- } syscalls;
- };
+ int audit_machine;
+ struct {
+ int max_id;
+ int nr_entries;
+ void *entries;
+ } syscalls;
};
struct syscalltbl *syscalltbl__new(void);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 086e98ff42a3..0e5c4786f296 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -428,7 +428,7 @@ try_id:
if (!ppath->next) {
error:
pr_debug("No memory to alloc tracepoints list\n");
- put_tracepoints_path(&path);
+ put_tracepoints_path(path.next);
return NULL;
}
next:
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index b4649f5a0c2f..9aededc0bc06 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -243,7 +243,7 @@ struct eh_frame_hdr {
* encoded_t fde_addr;
* } binary_search_table[fde_count];
*/
- char data[0];
+ char data[];
} __packed;
static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index 5aaddc79adf7..dd38c2b2e1b4 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -35,7 +35,7 @@ static acpi_status osl_add_table_to_list(char *signature, u32 instance);
static acpi_status
osl_read_table_from_file(char *filename,
acpi_size file_offset,
- char *signature, struct acpi_table_header **table);
+ struct acpi_table_header **table);
static acpi_status
osl_map_table(acpi_size address,
@@ -1184,8 +1184,6 @@ osl_table_name_from_file(char *filename, char *signature, u32 *instance)
*
* PARAMETERS: filename - File that contains the desired table
* file_offset - Offset of the table in file
- * signature - Optional ACPI Signature for desired table.
- * A null terminated 4-character string.
* table - Where a pointer to the table is returned
*
* RETURN: Status; Table buffer is returned if AE_OK.
@@ -1197,7 +1195,7 @@ osl_table_name_from_file(char *filename, char *signature, u32 *instance)
static acpi_status
osl_read_table_from_file(char *filename,
acpi_size file_offset,
- char *signature, struct acpi_table_header **table)
+ struct acpi_table_header **table)
{
FILE *table_file;
struct acpi_table_header header;
@@ -1225,6 +1223,8 @@ osl_read_table_from_file(char *filename,
goto exit;
}
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+
/* If signature is specified, it must match the table */
if (signature) {
@@ -1244,6 +1244,7 @@ osl_read_table_from_file(char *filename,
goto exit;
}
}
+#endif
table_length = ap_get_table_length(&header);
if (table_length == 0) {
@@ -1366,7 +1367,7 @@ osl_get_customized_table(char *pathname,
/* There is no physical address saved for customized tables, use zero */
*address = 0;
- status = osl_read_table_from_file(table_filename, 0, NULL, table);
+ status = osl_read_table_from_file(table_filename, 0, table);
return (status);
}
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 7dca74774dd2..787b6d4ad716 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -20,11 +20,20 @@ import kunit_config
import kunit_kernel
import kunit_parser
-KunitResult = namedtuple('KunitResult', ['status','result'])
+KunitResult = namedtuple('KunitResult', ['status','result','elapsed_time'])
+KunitConfigRequest = namedtuple('KunitConfigRequest',
+ ['build_dir', 'make_options'])
+KunitBuildRequest = namedtuple('KunitBuildRequest',
+ ['jobs', 'build_dir', 'alltests',
+ 'make_options'])
+KunitExecRequest = namedtuple('KunitExecRequest',
+ ['timeout', 'build_dir', 'alltests'])
+KunitParseRequest = namedtuple('KunitParseRequest',
+ ['raw_output', 'input_data'])
KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs',
- 'build_dir', 'defconfig',
- 'alltests', 'make_options'])
+ 'build_dir', 'alltests',
+ 'make_options'])
KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0]
@@ -46,14 +55,24 @@ def get_kernel_root_path():
sys.exit(1)
return parts[0]
-def run_tests(linux: kunit_kernel.LinuxSourceTree,
- request: KunitRequest) -> KunitResult:
+def config_tests(linux: kunit_kernel.LinuxSourceTree,
+ request: KunitConfigRequest) -> KunitResult:
+ kunit_parser.print_with_timestamp('Configuring KUnit Kernel ...')
+
config_start = time.time()
+ create_default_kunitconfig()
success = linux.build_reconfig(request.build_dir, request.make_options)
config_end = time.time()
if not success:
- return KunitResult(KunitStatus.CONFIG_FAILURE, 'could not configure kernel')
+ return KunitResult(KunitStatus.CONFIG_FAILURE,
+ 'could not configure kernel',
+ config_end - config_start)
+ return KunitResult(KunitStatus.SUCCESS,
+ 'configured kernel successfully',
+ config_end - config_start)
+def build_tests(linux: kunit_kernel.LinuxSourceTree,
+ request: KunitBuildRequest) -> KunitResult:
kunit_parser.print_with_timestamp('Building KUnit Kernel ...')
build_start = time.time()
@@ -64,86 +83,166 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
build_end = time.time()
if not success:
return KunitResult(KunitStatus.BUILD_FAILURE, 'could not build kernel')
+ if not success:
+ return KunitResult(KunitStatus.BUILD_FAILURE,
+ 'could not build kernel',
+ build_end - build_start)
+ return KunitResult(KunitStatus.SUCCESS,
+ 'built kernel successfully',
+ build_end - build_start)
+def exec_tests(linux: kunit_kernel.LinuxSourceTree,
+ request: KunitExecRequest) -> KunitResult:
kunit_parser.print_with_timestamp('Starting KUnit Kernel ...')
test_start = time.time()
- kunit_output = linux.run_kernel(
+ result = linux.run_kernel(
timeout=None if request.alltests else request.timeout,
build_dir=request.build_dir)
+
+ test_end = time.time()
+
+ return KunitResult(KunitStatus.SUCCESS,
+ result,
+ test_end - test_start)
+
+def parse_tests(request: KunitParseRequest) -> KunitResult:
+ parse_start = time.time()
+
+ test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS,
+ [],
+ 'Tests not Parsed.')
if request.raw_output:
- raw_output = kunit_parser.raw_output(kunit_output)
- isolated = list(kunit_parser.isolate_kunit_output(raw_output))
- test_result = kunit_parser.parse_test_result(isolated)
+ kunit_parser.raw_output(request.input_data)
else:
- test_result = kunit_parser.parse_run_tests(kunit_output)
- test_end = time.time()
+ test_result = kunit_parser.parse_run_tests(request.input_data)
+ parse_end = time.time()
+
+ if test_result.status != kunit_parser.TestStatus.SUCCESS:
+ return KunitResult(KunitStatus.TEST_FAILURE, test_result,
+ parse_end - parse_start)
+
+ return KunitResult(KunitStatus.SUCCESS, test_result,
+ parse_end - parse_start)
+
+
+def run_tests(linux: kunit_kernel.LinuxSourceTree,
+ request: KunitRequest) -> KunitResult:
+ run_start = time.time()
+
+ config_request = KunitConfigRequest(request.build_dir,
+ request.make_options)
+ config_result = config_tests(linux, config_request)
+ if config_result.status != KunitStatus.SUCCESS:
+ return config_result
+
+ build_request = KunitBuildRequest(request.jobs, request.build_dir,
+ request.alltests,
+ request.make_options)
+ build_result = build_tests(linux, build_request)
+ if build_result.status != KunitStatus.SUCCESS:
+ return build_result
+
+ exec_request = KunitExecRequest(request.timeout, request.build_dir,
+ request.alltests)
+ exec_result = exec_tests(linux, exec_request)
+ if exec_result.status != KunitStatus.SUCCESS:
+ return exec_result
+
+ parse_request = KunitParseRequest(request.raw_output,
+ exec_result.result)
+ parse_result = parse_tests(parse_request)
+
+ run_end = time.time()
kunit_parser.print_with_timestamp((
'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' +
'building, %.3fs running\n') % (
- test_end - config_start,
- config_end - config_start,
- build_end - build_start,
- test_end - test_start))
+ run_end - run_start,
+ config_result.elapsed_time,
+ build_result.elapsed_time,
+ exec_result.elapsed_time))
+ return parse_result
+
+def add_common_opts(parser):
+ parser.add_argument('--build_dir',
+ help='As in the make command, it specifies the build '
+ 'directory.',
+ type=str, default='.kunit', metavar='build_dir')
+ parser.add_argument('--make_options',
+ help='X=Y make option, can be repeated.',
+ action='append')
+ parser.add_argument('--alltests',
+ help='Run all KUnit tests through allyesconfig',
+ action='store_true')
+
+def add_build_opts(parser):
+ parser.add_argument('--jobs',
+ help='As in the make command, "Specifies the number of '
+ 'jobs (commands) to run simultaneously."',
+ type=int, default=8, metavar='jobs')
+
+def add_exec_opts(parser):
+ parser.add_argument('--timeout',
+ help='maximum number of seconds to allow for all tests '
+ 'to run. This does not include time taken to build the '
+ 'tests.',
+ type=int,
+ default=300,
+ metavar='timeout')
+
+def add_parse_opts(parser):
+ parser.add_argument('--raw_output', help='don\'t format output from kernel',
+ action='store_true')
- if test_result.status != kunit_parser.TestStatus.SUCCESS:
- return KunitResult(KunitStatus.TEST_FAILURE, test_result)
- else:
- return KunitResult(KunitStatus.SUCCESS, test_result)
def main(argv, linux=None):
parser = argparse.ArgumentParser(
description='Helps writing and running KUnit tests.')
subparser = parser.add_subparsers(dest='subcommand')
+ # The 'run' command will config, build, exec, and parse in one go.
run_parser = subparser.add_parser('run', help='Runs KUnit tests.')
- run_parser.add_argument('--raw_output', help='don\'t format output from kernel',
- action='store_true')
-
- run_parser.add_argument('--timeout',
- help='maximum number of seconds to allow for all tests '
- 'to run. This does not include time taken to build the '
- 'tests.',
- type=int,
- default=300,
- metavar='timeout')
-
- run_parser.add_argument('--jobs',
- help='As in the make command, "Specifies the number of '
- 'jobs (commands) to run simultaneously."',
- type=int, default=8, metavar='jobs')
-
- run_parser.add_argument('--build_dir',
- help='As in the make command, it specifies the build '
- 'directory.',
- type=str, default='', metavar='build_dir')
-
- run_parser.add_argument('--defconfig',
- help='Uses a default .kunitconfig.',
- action='store_true')
-
- run_parser.add_argument('--alltests',
- help='Run all KUnit tests through allyesconfig',
- action='store_true')
-
- run_parser.add_argument('--make_options',
- help='X=Y make option, can be repeated.',
- action='append')
+ add_common_opts(run_parser)
+ add_build_opts(run_parser)
+ add_exec_opts(run_parser)
+ add_parse_opts(run_parser)
+
+ config_parser = subparser.add_parser('config',
+ help='Ensures that .config contains all of '
+ 'the options in .kunitconfig')
+ add_common_opts(config_parser)
+
+ build_parser = subparser.add_parser('build', help='Builds a kernel with KUnit tests')
+ add_common_opts(build_parser)
+ add_build_opts(build_parser)
+
+ exec_parser = subparser.add_parser('exec', help='Run a kernel with KUnit tests')
+ add_common_opts(exec_parser)
+ add_exec_opts(exec_parser)
+ add_parse_opts(exec_parser)
+
+ # The 'parse' option is special, as it doesn't need the kernel source
+ # (therefore there is no need for a build_dir, hence no add_common_opts)
+ # and the '--file' argument is not relevant to 'run', so isn't in
+ # add_parse_opts()
+ parse_parser = subparser.add_parser('parse',
+ help='Parses KUnit results from a file, '
+ 'and parses formatted results.')
+ add_parse_opts(parse_parser)
+ parse_parser.add_argument('file',
+ help='Specifies the file to read results from.',
+ type=str, nargs='?', metavar='input_file')
cli_args = parser.parse_args(argv)
if cli_args.subcommand == 'run':
- if get_kernel_root_path():
- os.chdir(get_kernel_root_path())
+ if not os.path.exists(cli_args.build_dir):
+ os.mkdir(cli_args.build_dir)
+ kunit_kernel.kunitconfig_path = os.path.join(
+ cli_args.build_dir,
+ kunit_kernel.kunitconfig_path)
- if cli_args.build_dir:
- if not os.path.exists(cli_args.build_dir):
- os.mkdir(cli_args.build_dir)
- kunit_kernel.kunitconfig_path = os.path.join(
- cli_args.build_dir,
- kunit_kernel.kunitconfig_path)
-
- if cli_args.defconfig:
+ if not os.path.exists(kunit_kernel.kunitconfig_path):
create_default_kunitconfig()
if not linux:
@@ -153,12 +252,94 @@ def main(argv, linux=None):
cli_args.timeout,
cli_args.jobs,
cli_args.build_dir,
- cli_args.defconfig,
cli_args.alltests,
cli_args.make_options)
result = run_tests(linux, request)
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
+ elif cli_args.subcommand == 'config':
+ if cli_args.build_dir:
+ if not os.path.exists(cli_args.build_dir):
+ os.mkdir(cli_args.build_dir)
+ kunit_kernel.kunitconfig_path = os.path.join(
+ cli_args.build_dir,
+ kunit_kernel.kunitconfig_path)
+
+ if not os.path.exists(kunit_kernel.kunitconfig_path):
+ create_default_kunitconfig()
+
+ if not linux:
+ linux = kunit_kernel.LinuxSourceTree()
+
+ request = KunitConfigRequest(cli_args.build_dir,
+ cli_args.make_options)
+ result = config_tests(linux, request)
+ kunit_parser.print_with_timestamp((
+ 'Elapsed time: %.3fs\n') % (
+ result.elapsed_time))
+ if result.status != KunitStatus.SUCCESS:
+ sys.exit(1)
+ elif cli_args.subcommand == 'build':
+ if cli_args.build_dir:
+ if not os.path.exists(cli_args.build_dir):
+ os.mkdir(cli_args.build_dir)
+ kunit_kernel.kunitconfig_path = os.path.join(
+ cli_args.build_dir,
+ kunit_kernel.kunitconfig_path)
+
+ if not os.path.exists(kunit_kernel.kunitconfig_path):
+ create_default_kunitconfig()
+
+ if not linux:
+ linux = kunit_kernel.LinuxSourceTree()
+
+ request = KunitBuildRequest(cli_args.jobs,
+ cli_args.build_dir,
+ cli_args.alltests,
+ cli_args.make_options)
+ result = build_tests(linux, request)
+ kunit_parser.print_with_timestamp((
+ 'Elapsed time: %.3fs\n') % (
+ result.elapsed_time))
+ if result.status != KunitStatus.SUCCESS:
+ sys.exit(1)
+ elif cli_args.subcommand == 'exec':
+ if cli_args.build_dir:
+ if not os.path.exists(cli_args.build_dir):
+ os.mkdir(cli_args.build_dir)
+ kunit_kernel.kunitconfig_path = os.path.join(
+ cli_args.build_dir,
+ kunit_kernel.kunitconfig_path)
+
+ if not os.path.exists(kunit_kernel.kunitconfig_path):
+ create_default_kunitconfig()
+
+ if not linux:
+ linux = kunit_kernel.LinuxSourceTree()
+
+ exec_request = KunitExecRequest(cli_args.timeout,
+ cli_args.build_dir,
+ cli_args.alltests)
+ exec_result = exec_tests(linux, exec_request)
+ parse_request = KunitParseRequest(cli_args.raw_output,
+ exec_result.result)
+ result = parse_tests(parse_request)
+ kunit_parser.print_with_timestamp((
+ 'Elapsed time: %.3fs\n') % (
+ exec_result.elapsed_time))
+ if result.status != KunitStatus.SUCCESS:
+ sys.exit(1)
+ elif cli_args.subcommand == 'parse':
+ if cli_args.file == None:
+ kunit_output = sys.stdin
+ else:
+ with open(cli_args.file, 'r') as f:
+ kunit_output = f.read().splitlines()
+ request = KunitParseRequest(cli_args.raw_output,
+ kunit_output)
+ result = parse_tests(request)
+ if result.status != KunitStatus.SUCCESS:
+ sys.exit(1)
else:
parser.print_help()
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 984588d6ba95..5bb7b118ebd9 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -239,6 +239,24 @@ class KUnitMainTest(unittest.TestCase):
self.print_patch.stop()
pass
+ def test_config_passes_args_pass(self):
+ kunit.main(['config'], self.linux_source_mock)
+ assert self.linux_source_mock.build_reconfig.call_count == 1
+ assert self.linux_source_mock.run_kernel.call_count == 0
+
+ def test_build_passes_args_pass(self):
+ kunit.main(['build'], self.linux_source_mock)
+ assert self.linux_source_mock.build_reconfig.call_count == 0
+ self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, '', None)
+ assert self.linux_source_mock.run_kernel.call_count == 0
+
+ def test_exec_passes_args_pass(self):
+ kunit.main(['exec'], self.linux_source_mock)
+ assert self.linux_source_mock.build_reconfig.call_count == 0
+ assert self.linux_source_mock.run_kernel.call_count == 1
+ self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=300)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
def test_run_passes_args_pass(self):
kunit.main(['run'], self.linux_source_mock)
assert self.linux_source_mock.build_reconfig.call_count == 1
@@ -247,6 +265,13 @@ class KUnitMainTest(unittest.TestCase):
build_dir='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
+ def test_exec_passes_args_fail(self):
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ with self.assertRaises(SystemExit) as e:
+ kunit.main(['exec'], self.linux_source_mock)
+ assert type(e.exception) == SystemExit
+ assert e.exception.code == 1
+
def test_run_passes_args_fail(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
with self.assertRaises(SystemExit) as e:
@@ -257,14 +282,28 @@ class KUnitMainTest(unittest.TestCase):
assert self.linux_source_mock.run_kernel.call_count == 1
self.print_mock.assert_any_call(StrContains(' 0 tests run'))
+ def test_exec_raw_output(self):
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ kunit.main(['exec', '--raw_output'], self.linux_source_mock)
+ assert self.linux_source_mock.run_kernel.call_count == 1
+ for kall in self.print_mock.call_args_list:
+ assert kall != mock.call(StrContains('Testing complete.'))
+ assert kall != mock.call(StrContains(' 0 tests run'))
+
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- with self.assertRaises(SystemExit) as e:
- kunit.main(['run', '--raw_output'], self.linux_source_mock)
- assert type(e.exception) == SystemExit
- assert e.exception.code == 1
+ kunit.main(['run', '--raw_output'], self.linux_source_mock)
assert self.linux_source_mock.build_reconfig.call_count == 1
assert self.linux_source_mock.run_kernel.call_count == 1
+ for kall in self.print_mock.call_args_list:
+ assert kall != mock.call(StrContains('Testing complete.'))
+ assert kall != mock.call(StrContains(' 0 tests run'))
+
+ def test_exec_timeout(self):
+ timeout = 3453
+ kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
+ self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='', timeout=timeout)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_timeout(self):
timeout = 3453
@@ -282,5 +321,21 @@ class KUnitMainTest(unittest.TestCase):
build_dir=build_dir, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
+ def test_config_builddir(self):
+ build_dir = '.kunit'
+ kunit.main(['config', '--build_dir', build_dir], self.linux_source_mock)
+ assert self.linux_source_mock.build_reconfig.call_count == 1
+
+ def test_build_builddir(self):
+ build_dir = '.kunit'
+ kunit.main(['build', '--build_dir', build_dir], self.linux_source_mock)
+ self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, build_dir, None)
+
+ def test_exec_builddir(self):
+ build_dir = '.kunit'
+ kunit.main(['exec', '--build_dir', build_dir], self.linux_source_mock)
+ self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=build_dir, timeout=300)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
if __name__ == '__main__':
unittest.main()
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 2ff68702fd41..1195bd85af38 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -249,10 +249,17 @@ else
$(error Error: set INSTALL_PATH to use install)
endif
+FORMAT ?= .gz
+TAR_PATH = $(abspath ${INSTALL_PATH}/kselftest-packages/kselftest.tar${FORMAT})
+gen_tar: install
+ @mkdir -p ${INSTALL_PATH}/kselftest-packages/
+ @tar caf ${TAR_PATH} --exclude=kselftest-packages -C ${INSTALL_PATH} .
+ @echo "Created ${TAR_PATH}"
+
clean:
@for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
done;
-.PHONY: khdr all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean
+.PHONY: khdr all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean gen_tar
diff --git a/tools/testing/selftests/exec/.gitignore b/tools/testing/selftests/exec/.gitignore
index c078ece12ff0..94b02a18f230 100644
--- a/tools/testing/selftests/exec/.gitignore
+++ b/tools/testing/selftests/exec/.gitignore
@@ -9,3 +9,4 @@ execveat.ephemeral
execveat.denatured
/recursion-depth
xxxxxxxx*
+pipe
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index 33339e31e365..4453b8f8def3 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -3,8 +3,9 @@ CFLAGS = -Wall
CFLAGS += -Wno-nonnull
CFLAGS += -D_GNU_SOURCE
+TEST_PROGS := binfmt_script
TEST_GEN_PROGS := execveat
-TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
+TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir pipe
# Makefile is a run-time dependency, since it's accessed by the execveat test
TEST_FILES := Makefile
diff --git a/tools/testing/selftests/exec/binfmt_script b/tools/testing/selftests/exec/binfmt_script
new file mode 100755
index 000000000000..05f94a741c7a
--- /dev/null
+++ b/tools/testing/selftests/exec/binfmt_script
@@ -0,0 +1,171 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test that truncation of bprm->buf doesn't cause unexpected execs paths, along
+# with various other pathological cases.
+import os, subprocess
+
+# Relevant commits
+#
+# b5372fe5dc84 ("exec: load_script: Do not exec truncated interpreter path")
+# 6eb3c3d0a52d ("exec: increase BINPRM_BUF_SIZE to 256")
+
+# BINPRM_BUF_SIZE
+SIZE=256
+
+NAME_MAX=int(subprocess.check_output(["getconf", "NAME_MAX", "."]))
+
+test_num=0
+
+code='''#!/usr/bin/perl
+print "Executed interpreter! Args:\n";
+print "0 : '$0'\n";
+$counter = 1;
+foreach my $a (@ARGV) {
+ print "$counter : '$a'\n";
+ $counter++;
+}
+'''
+
+##
+# test - produce a binfmt_script hashbang line for testing
+#
+# @size: bytes for bprm->buf line, including hashbang but not newline
+# @good: whether this script is expected to execute correctly
+# @hashbang: the special 2 bytes for running binfmt_script
+# @leading: any leading whitespace before the executable path
+# @root: start of executable pathname
+# @target: end of executable pathname
+# @arg: bytes following the executable pathname
+# @fill: character to fill between @root and @target to reach @size bytes
+# @newline: character to use as newline, not counted towards @size
+# ...
+def test(name, size, good=True, leading="", root="./", target="/perl",
+ fill="A", arg="", newline="\n", hashbang="#!"):
+ global test_num, tests, NAME_MAX
+ test_num += 1
+ if test_num > tests:
+ raise ValueError("more binfmt_script tests than expected! (want %d, expected %d)"
+ % (test_num, tests))
+
+ middle = ""
+ remaining = size - len(hashbang) - len(leading) - len(root) - len(target) - len(arg)
+ # The middle of the pathname must not exceed NAME_MAX
+ while remaining >= NAME_MAX:
+ middle += fill * (NAME_MAX - 1)
+ middle += '/'
+ remaining -= NAME_MAX
+ middle += fill * remaining
+
+ dirpath = root + middle
+ binary = dirpath + target
+ if len(target):
+ os.makedirs(dirpath, mode=0o755, exist_ok=True)
+ open(binary, "w").write(code)
+ os.chmod(binary, 0o755)
+
+ buf=hashbang + leading + root + middle + target + arg + newline
+ if len(newline) > 0:
+ buf += 'echo this is not really perl\n'
+
+ script = "binfmt_script-%s" % (name)
+ open(script, "w").write(buf)
+ os.chmod(script, 0o755)
+
+ proc = subprocess.Popen(["./%s" % (script)], shell=True,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ stdout = proc.communicate()[0]
+
+ if proc.returncode == 0 and b'Executed interpreter' in stdout:
+ if good:
+ print("ok %d - binfmt_script %s (successful good exec)"
+ % (test_num, name))
+ else:
+ print("not ok %d - binfmt_script %s succeeded when it should have failed"
+ % (test_num, name))
+ else:
+ if good:
+ print("not ok %d - binfmt_script %s failed when it should have succeeded (rc:%d)"
+ % (test_num, name, proc.returncode))
+ else:
+ print("ok %d - binfmt_script %s (correctly failed bad exec)"
+ % (test_num, name))
+
+ # Clean up crazy binaries
+ os.unlink(script)
+ if len(target):
+ elements = binary.split('/')
+ os.unlink(binary)
+ elements.pop()
+ while len(elements) > 1:
+ os.rmdir("/".join(elements))
+ elements.pop()
+
+tests=27
+print("TAP version 1.3")
+print("1..%d" % (tests))
+
+### FAIL (8 tests)
+
+# Entire path is well past the BINFMT_BUF_SIZE.
+test(name="too-big", size=SIZE+80, good=False)
+# Path is right at max size, making it impossible to tell if it was truncated.
+test(name="exact", size=SIZE, good=False)
+# Same as above, but with leading whitespace.
+test(name="exact-space", size=SIZE, good=False, leading=" ")
+# Huge buffer of only whitespace.
+test(name="whitespace-too-big", size=SIZE+71, good=False, root="",
+ fill=" ", target="")
+# A good path, but it gets truncated due to leading whitespace.
+test(name="truncated", size=SIZE+17, good=False, leading=" " * 19)
+# Entirely empty except for #!
+test(name="empty", size=2, good=False, root="",
+ fill="", target="", newline="")
+# Within size, but entirely spaces
+test(name="spaces", size=SIZE-1, good=False, root="", fill=" ",
+ target="", newline="")
+# Newline before binary.
+test(name="newline-prefix", size=SIZE-1, good=False, leading="\n",
+ root="", fill=" ", target="")
+
+### ok (19 tests)
+
+# The original test case that was broken by commit:
+# 8099b047ecc4 ("exec: load_script: don't blindly truncate shebang string")
+test(name="test.pl", size=439, leading=" ",
+ root="./nix/store/bwav8kz8b3y471wjsybgzw84mrh4js9-perl-5.28.1/bin",
+ arg=" -I/nix/store/x6yyav38jgr924nkna62q3pkp0dgmzlx-perl5.28.1-File-Slurp-9999.25/lib/perl5/site_perl -I/nix/store/ha8v67sl8dac92r9z07vzr4gv1y9nwqz-perl5.28.1-Net-DBus-1.1.0/lib/perl5/site_perl -I/nix/store/dcrkvnjmwh69ljsvpbdjjdnqgwx90a9d-perl5.28.1-XML-Parser-2.44/lib/perl5/site_perl -I/nix/store/rmji88k2zz7h4zg97385bygcydrf2q8h-perl5.28.1-XML-Twig-3.52/lib/perl5/site_perl")
+# One byte under size, leaving newline visible.
+test(name="one-under", size=SIZE-1)
+# Two bytes under size, leaving newline visible.
+test(name="two-under", size=SIZE-2)
+# Exact size, but trailing whitespace visible instead of newline
+test(name="exact-trunc-whitespace", size=SIZE, arg=" ")
+# Exact size, but trailing space and first arg char visible instead of newline.
+test(name="exact-trunc-arg", size=SIZE, arg=" f")
+# One bute under, with confirmed non-truncated arg since newline now visible.
+test(name="one-under-full-arg", size=SIZE-1, arg=" f")
+# Short read buffer by one byte.
+test(name="one-under-no-nl", size=SIZE-1, newline="")
+# Short read buffer by half buffer size.
+test(name="half-under-no-nl", size=int(SIZE/2), newline="")
+# One byte under with whitespace arg. leaving wenline visible.
+test(name="one-under-trunc-arg", size=SIZE-1, arg=" ")
+# One byte under with whitespace leading. leaving wenline visible.
+test(name="one-under-leading", size=SIZE-1, leading=" ")
+# One byte under with whitespace leading and as arg. leaving newline visible.
+test(name="one-under-leading-trunc-arg", size=SIZE-1, leading=" ", arg=" ")
+# Same as above, but with 2 bytes under
+test(name="two-under-no-nl", size=SIZE-2, newline="")
+test(name="two-under-trunc-arg", size=SIZE-2, arg=" ")
+test(name="two-under-leading", size=SIZE-2, leading=" ")
+test(name="two-under-leading-trunc-arg", size=SIZE-2, leading=" ", arg=" ")
+# Same as above, but with buffer half filled
+test(name="two-under-no-nl", size=int(SIZE/2), newline="")
+test(name="two-under-trunc-arg", size=int(SIZE/2), arg=" ")
+test(name="two-under-leading", size=int(SIZE/2), leading=" ")
+test(name="two-under-lead-trunc-arg", size=int(SIZE/2), leading=" ", arg=" ")
+
+if test_num != tests:
+ raise ValueError("fewer binfmt_script tests than expected! (ran %d, expected %d"
+ % (test_num, tests))
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index cbb6efbdb786..67bf7254a48f 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -5,7 +5,9 @@
* Selftests for execveat(2).
*/
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE /* to get O_PATH, AT_EMPTY_PATH */
+#endif
#include <sys/sendfile.h>
#include <sys/stat.h>
#include <sys/syscall.h>
@@ -311,6 +313,10 @@ static int run_tests(void)
fail += check_execveat_fail(AT_FDCWD, fullname_symlink,
AT_SYMLINK_NOFOLLOW, ELOOP);
+ /* Non-regular file failure */
+ fail += check_execveat_fail(dot_dfd, "pipe", 0, EACCES);
+ unlink("pipe");
+
/* Shell script wrapping executable file: */
/* dfd + path */
fail += check_execveat(subdir_dfd, "../script", 0);
@@ -384,6 +390,8 @@ static void prerequisites(void)
fd = open("subdir.ephemeral/script", O_RDWR|O_CREAT|O_TRUNC, 0755);
write(fd, script, strlen(script));
close(fd);
+
+ mkfifo("pipe", 0755);
}
int main(int argc, char **argv)
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
index 021c03fd885d..23465823532b 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
@@ -14,6 +14,8 @@ if [ ! -f set_event ]; then
exit_unsupported
fi
+[ -f error_log ] || exit_unsupported
+
ftrace_errlog_check 'event filter parse error' '((sig >= 10 && sig < 15) || dsig ^== 17) && comm != bash' 'events/signal/signal_generate/filter'
exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
index 61a3c7e2634d..697c77ef2e2b 100644
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -119,12 +119,14 @@ yield() {
ping $LOCALHOST -c 1 || sleep .001 || usleep 1 || sleep 1
}
+# Since probe event command may include backslash, explicitly use printf "%s"
+# to NOT interpret it.
ftrace_errlog_check() { # err-prefix command-with-error-pos-by-^ command-file
- pos=$(echo -n "${2%^*}" | wc -c) # error position
- command=$(echo "$2" | tr -d ^)
+ pos=$(printf "%s" "${2%^*}" | wc -c) # error position
+ command=$(printf "%s" "$2" | tr -d ^)
echo "Test command: $command"
echo > error_log
- (! echo "$command" >> "$3" ) 2> /dev/null
+ (! printf "%s" "$command" >> "$3" ) 2> /dev/null
grep "$1: error:" -A 3 error_log
N=$(tail -n 1 error_log | wc -c)
# " Command: " and "^\n" => 13
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
index ef1e9bafb098..eb0f4ab4e070 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
@@ -91,7 +91,9 @@ esac
if grep -q "Create/append/" README && grep -q "imm-value" README; then
echo 'p:kprobes/testevent _do_fork' > kprobe_events
check_error '^r:kprobes/testevent do_exit' # DIFF_PROBE_TYPE
-echo 'p:kprobes/testevent _do_fork abcd=\1' > kprobe_events
+
+# Explicitly use printf "%s" to not interpret \1
+printf "%s" 'p:kprobes/testevent _do_fork abcd=\1' > kprobe_events
check_error 'p:kprobes/testevent _do_fork ^bcd=\1' # DIFF_ARG_TYPE
check_error 'p:kprobes/testevent _do_fork ^abcd=\1:u8' # DIFF_ARG_TYPE
check_error 'p:kprobes/testevent _do_fork ^abcd=\"foo"' # DIFF_ARG_TYPE
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
index 77be6e1f6e7b..e232059a8ab2 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
@@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then
exit_unsupported
fi
+if [ ! -f events/sched/sched_process_fork/hist ]; then
+ echo "hist trigger is not supported"
+ exit_unsupported
+fi
+
echo "Test field variable support"
echo 'wakeup_latency u64 lat; pid_t pid; int prio; char comm[16]' > synthetic_events
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
index f3eb8aacec0e..07cfcb8157b6 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
@@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then
exit_unsupported
fi
+if [ ! -f events/sched/sched_process_fork/hist ]; then
+ echo "hist trigger is not supported"
+ exit_unsupported
+fi
+
echo "Test create synthetic event"
echo 'waking_latency u64 lat pid_t pid' > synthetic_events
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
index d281f056f980..73e413c2ca26 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
@@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then
exit_unsupported
fi
+if [ ! -f events/sched/sched_process_fork/hist ]; then
+ echo "hist trigger is not supported"
+ exit_unsupported
+fi
+
echo "Test multiple actions on hist trigger"
echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events
TRIGGER1=events/sched/sched_wakeup/trigger
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
index a708f0e7858a..ebe0ad827f9f 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
@@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then
exit_unsupported
fi
+if [ ! -f events/sched/sched_process_fork/hist ]; then
+ echo "hist trigger is not supported"
+ exit_unsupported
+fi
+
echo "Test create synthetic event"
echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
index dfce6932d8be..2a2ef767249e 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
@@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then
exit_unsupported
fi
+if [ ! -f events/sched/sched_process_fork/hist ]; then
+ echo "hist trigger is not supported"
+ exit_unsupported
+fi
+
echo "Test create synthetic event"
echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
index 0035995c2194..98d73bfb0296 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
@@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then
exit_unsupported
fi
+if [ ! -f events/sched/sched_process_fork/hist ]; then
+ echo "hist trigger is not supported"
+ exit_unsupported
+fi
+
echo "Test create synthetic event"
echo 'wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc
index f546c1b66a9b..01b01b9c4e07 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc
@@ -12,6 +12,11 @@ if [ ! -f set_event ]; then
exit_unsupported
fi
+if [ ! -f events/sched/sched_process_fork/hist ]; then
+ echo "hist trigger is not supported"
+ exit_unsupported
+fi
+
if [ ! -f snapshot ]; then
echo "snapshot is not supported"
exit_unsupported
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc
index 8021d60aafec..c3baa486aeb4 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc
@@ -17,6 +17,11 @@ if [ ! -f synthetic_events ]; then
exit_unsupported
fi
+if [ ! -f events/sched/sched_process_fork/hist ]; then
+ echo "hist trigger is not supported"
+ exit_unsupported
+fi
+
grep -q "trace(<synthetic_event>" README || exit_unsupported # version issue
echo "Test create synthetic event"
diff --git a/tools/testing/selftests/gen_kselftest_tar.sh b/tools/testing/selftests/gen_kselftest_tar.sh
index 8b2b6088540d..4a974bc03385 100755
--- a/tools/testing/selftests/gen_kselftest_tar.sh
+++ b/tools/testing/selftests/gen_kselftest_tar.sh
@@ -49,6 +49,11 @@ main()
# directory
./kselftest_install.sh "$install_dir"
(cd "$install_work"; tar $copts "$dest"/kselftest${ext} $install_name)
+
+ # Don't put the message at the actual end as people may be parsing the
+ # "archive created" line in their scripts.
+ echo -e "\nConsider using 'make gen_tar' instead of this script\n"
+
echo "Kselftest archive kselftest${ext} created!"
# clean up top-level install work directory
diff --git a/tools/testing/selftests/lib/config b/tools/testing/selftests/lib/config
index 14a77ea4a8da..b80ee3f6e265 100644
--- a/tools/testing/selftests/lib/config
+++ b/tools/testing/selftests/lib/config
@@ -2,3 +2,4 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_BITMAP=m
CONFIG_PRIME_NUMBERS=m
CONFIG_TEST_STRSCPY=m
+CONFIG_TEST_BITOPS=m
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
index 9c60337317c6..020137b61407 100755
--- a/tools/testing/selftests/ntb/ntb_test.sh
+++ b/tools/testing/selftests/ntb/ntb_test.sh
@@ -241,7 +241,7 @@ function get_files_count()
split_remote $LOC
if [[ "$REMOTE" == "" ]]; then
- echo $(ls -1 "$LOC"/${NAME}* 2>/dev/null | wc -l)
+ echo $(ls -1 "$VPATH"/${NAME}* 2>/dev/null | wc -l)
else
echo $(ssh "$REMOTE" "ls -1 \"$VPATH\"/${NAME}* | \
wc -l" 2> /dev/null)
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 644770c3b754..0830e63818c1 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -19,6 +19,7 @@ SUB_DIRS = alignment \
copyloops \
dscr \
mm \
+ nx-gzip \
pmu \
signal \
primitives \
diff --git a/tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules b/tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules
new file mode 100644
index 000000000000..5a7118495cb3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules
@@ -0,0 +1 @@
+SUBSYSTEM=="nxgzip", KERNEL=="nx-gzip", MODE="0666"
diff --git a/tools/testing/selftests/powerpc/nx-gzip/Makefile b/tools/testing/selftests/powerpc/nx-gzip/Makefile
new file mode 100644
index 000000000000..640fad6cc2c7
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/Makefile
@@ -0,0 +1,8 @@
+CFLAGS = -O3 -m64 -I./include
+
+TEST_GEN_FILES := gzfht_test gunz_test
+TEST_PROGS := nx-gzip-test.sh
+
+include ../../lib.mk
+
+$(TEST_GEN_FILES): gzip_vas.c
diff --git a/tools/testing/selftests/powerpc/nx-gzip/README b/tools/testing/selftests/powerpc/nx-gzip/README
new file mode 100644
index 000000000000..9809dbaa1905
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/README
@@ -0,0 +1,45 @@
+Test the nx-gzip function:
+=========================
+
+Verify that following device exists:
+ /dev/crypto/nx-gzip
+If you get a permission error run as sudo or set the device permissions:
+ sudo chmod go+rw /dev/crypto/nx-gzip
+However, chmod may not survive across boots. You may create a udev file such
+as:
+ /etc/udev/rules.d/99-nx-gzip.rules
+
+
+To manually build and run:
+$ gcc -O3 -I./include -o gzfht_test gzfht_test.c gzip_vas.c
+$ gcc -O3 -I./include -o gunz_test gunz_test.c gzip_vas.c
+
+
+Compress any file using Fixed Huffman mode. Output will have a .nx.gz suffix:
+$ ./gzfht_test gzip_vas.c
+file gzip_vas.c read, 6413 bytes
+compressed 6413 to 3124 bytes total, crc32 checksum = abd15e8a
+
+
+Uncompress the previous output. Output will have a .nx.gunzip suffix:
+./gunz_test gzip_vas.c.nx.gz
+gzHeader FLG 0
+00 00 00 00 04 03
+gzHeader MTIME, XFL, OS ignored
+computed checksum abd15e8a isize 0000190d
+stored checksum abd15e8a isize 0000190d
+decomp is complete: fclose
+
+
+Compare two files:
+$ sha1sum gzip_vas.c.nx.gz.nx.gunzip gzip_vas.c
+bf43e3c0c3651f5f22b6f9784cd9b1eeab4120b6 gzip_vas.c.nx.gz.nx.gunzip
+bf43e3c0c3651f5f22b6f9784cd9b1eeab4120b6 gzip_vas.c
+
+
+Note that the code here are intended for testing the nx-gzip hardware function.
+They are not intended for demonstrating performance or compression ratio.
+By being simplistic these selftests expect to allocate the entire set of source
+and target pages in the memory so it needs enough memory to work.
+For more information and source code consider using:
+https://github.com/libnxz/power-gzip
diff --git a/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c b/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
new file mode 100644
index 000000000000..6ee0fded0391
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c
@@ -0,0 +1,1028 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* P9 gunzip sample code for demonstrating the P9 NX hardware
+ * interface. Not intended for productive uses or for performance or
+ * compression ratio measurements. Note also that /dev/crypto/gzip,
+ * VAS and skiboot support are required
+ *
+ * Copyright 2020 IBM Corp.
+ *
+ * Author: Bulent Abali <abali@us.ibm.com>
+ *
+ * https://github.com/libnxz/power-gzip for zlib api and other utils
+ * Definitions of acronyms used here. See
+ * P9 NX Gzip Accelerator User's Manual for details:
+ * https://github.com/libnxz/power-gzip/blob/develop/doc/power_nx_gzip_um.pdf
+ *
+ * adler/crc: 32 bit checksums appended to stream tail
+ * ce: completion extension
+ * cpb: coprocessor parameter block (metadata)
+ * crb: coprocessor request block (command)
+ * csb: coprocessor status block (status)
+ * dht: dynamic huffman table
+ * dde: data descriptor element (address, length)
+ * ddl: list of ddes
+ * dh/fh: dynamic and fixed huffman types
+ * fc: coprocessor function code
+ * histlen: history/dictionary length
+ * history: sliding window of up to 32KB of data
+ * lzcount: Deflate LZ symbol counts
+ * rembytecnt: remaining byte count
+ * sfbt: source final block type; last block's type during decomp
+ * spbc: source processed byte count
+ * subc: source unprocessed bit count
+ * tebc: target ending bit count; valid bits in the last byte
+ * tpbc: target processed byte count
+ * vas: virtual accelerator switch; the user mode interface
+ */
+
+#define _ISOC11_SOURCE // For aligned_alloc()
+#define _DEFAULT_SOURCE // For endian.h
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/fcntl.h>
+#include <sys/mman.h>
+#include <endian.h>
+#include <bits/endian.h>
+#include <sys/ioctl.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include "nxu.h"
+#include "nx.h"
+#include "crb.h"
+
+int nx_dbg;
+FILE *nx_gzip_log;
+
+#define NX_MIN(X, Y) (((X) < (Y))?(X):(Y))
+#define NX_MAX(X, Y) (((X) > (Y))?(X):(Y))
+
+#define GETINPC(X) fgetc(X)
+#define FNAME_MAX 1024
+
+/* fifo queue management */
+#define fifo_used_bytes(used) (used)
+#define fifo_free_bytes(used, len) ((len)-(used))
+/* amount of free bytes in the first and last parts */
+#define fifo_free_first_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
+ ? (len)-((cur)+(used)) : 0)
+#define fifo_free_last_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
+ ? (cur) : (len)-(used))
+/* amount of used bytes in the first and last parts */
+#define fifo_used_first_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
+ ? (used) : (len)-(cur))
+#define fifo_used_last_bytes(cur, used, len) ((((cur)+(used)) <= (len)) \
+ ? 0 : ((used)+(cur))-(len))
+/* first and last free parts start here */
+#define fifo_free_first_offset(cur, used) ((cur)+(used))
+#define fifo_free_last_offset(cur, used, len) \
+ fifo_used_last_bytes(cur, used, len)
+/* first and last used parts start here */
+#define fifo_used_first_offset(cur) (cur)
+#define fifo_used_last_offset(cur) (0)
+
+const int fifo_in_len = 1<<24;
+const int fifo_out_len = 1<<24;
+const int page_sz = 1<<16;
+const int line_sz = 1<<7;
+const int window_max = 1<<15;
+
+/*
+ * Adds an (address, len) pair to the list of ddes (ddl) and updates
+ * the base dde. ddl[0] is the only dde in a direct dde which
+ * contains a single (addr,len) pair. For more pairs, ddl[0] becomes
+ * the indirect (base) dde that points to a list of direct ddes.
+ * See Section 6.4 of the NX-gzip user manual for DDE description.
+ * Addr=NULL, len=0 clears the ddl[0]. Returns the total number of
+ * bytes in ddl. Caller is responsible for allocting the array of
+ * nx_dde_t *ddl. If N addresses are required in the scatter-gather
+ * list, the ddl array must have N+1 entries minimum.
+ */
+static inline uint32_t nx_append_dde(struct nx_dde_t *ddl, void *addr,
+ uint32_t len)
+{
+ uint32_t ddecnt;
+ uint32_t bytes;
+
+ if (addr == NULL && len == 0) {
+ clearp_dde(ddl);
+ return 0;
+ }
+
+ NXPRT(fprintf(stderr, "%d: %s addr %p len %x\n", __LINE__, addr,
+ __func__, len));
+
+ /* Number of ddes in the dde list ; == 0 when it is a direct dde */
+ ddecnt = getpnn(ddl, dde_count);
+ bytes = getp32(ddl, ddebc);
+
+ if (ddecnt == 0 && bytes == 0) {
+ /* First dde is unused; make it a direct dde */
+ bytes = len;
+ putp32(ddl, ddebc, bytes);
+ putp64(ddl, ddead, (uint64_t) addr);
+ } else if (ddecnt == 0) {
+ /* Converting direct to indirect dde
+ * ddl[0] becomes head dde of ddl
+ * copy direct to indirect first.
+ */
+ ddl[1] = ddl[0];
+
+ /* Add the new dde next */
+ clear_dde(ddl[2]);
+ put32(ddl[2], ddebc, len);
+ put64(ddl[2], ddead, (uint64_t) addr);
+
+ /* Ddl head points to 2 direct ddes */
+ ddecnt = 2;
+ putpnn(ddl, dde_count, ddecnt);
+ bytes = bytes + len;
+ putp32(ddl, ddebc, bytes);
+ /* Pointer to the first direct dde */
+ putp64(ddl, ddead, (uint64_t) &ddl[1]);
+ } else {
+ /* Append a dde to an existing indirect ddl */
+ ++ddecnt;
+ clear_dde(ddl[ddecnt]);
+ put64(ddl[ddecnt], ddead, (uint64_t) addr);
+ put32(ddl[ddecnt], ddebc, len);
+
+ putpnn(ddl, dde_count, ddecnt);
+ bytes = bytes + len;
+ putp32(ddl, ddebc, bytes); /* byte sum of all dde */
+ }
+ return bytes;
+}
+
+/*
+ * Touch specified number of pages represented in number bytes
+ * beginning from the first buffer in a dde list.
+ * Do not touch the pages past buf_sz-th byte's page.
+ *
+ * Set buf_sz = 0 to touch all pages described by the ddep.
+ */
+static int nx_touch_pages_dde(struct nx_dde_t *ddep, long buf_sz, long page_sz,
+ int wr)
+{
+ uint32_t indirect_count;
+ uint32_t buf_len;
+ long total;
+ uint64_t buf_addr;
+ struct nx_dde_t *dde_list;
+ int i;
+
+ assert(!!ddep);
+
+ indirect_count = getpnn(ddep, dde_count);
+
+ NXPRT(fprintf(stderr, "%s dde_count %d request len ", __func__,
+ indirect_count));
+ NXPRT(fprintf(stderr, "0x%lx\n", buf_sz));
+
+ if (indirect_count == 0) {
+ /* Direct dde */
+ buf_len = getp32(ddep, ddebc);
+ buf_addr = getp64(ddep, ddead);
+
+ NXPRT(fprintf(stderr, "touch direct ddebc 0x%x ddead %p\n",
+ buf_len, (void *)buf_addr));
+
+ if (buf_sz == 0)
+ nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr);
+ else
+ nxu_touch_pages((void *)buf_addr, NX_MIN(buf_len,
+ buf_sz), page_sz, wr);
+
+ return ERR_NX_OK;
+ }
+
+ /* Indirect dde */
+ if (indirect_count > MAX_DDE_COUNT)
+ return ERR_NX_EXCESSIVE_DDE;
+
+ /* First address of the list */
+ dde_list = (struct nx_dde_t *) getp64(ddep, ddead);
+
+ if (buf_sz == 0)
+ buf_sz = getp32(ddep, ddebc);
+
+ total = 0;
+ for (i = 0; i < indirect_count; i++) {
+ buf_len = get32(dde_list[i], ddebc);
+ buf_addr = get64(dde_list[i], ddead);
+ total += buf_len;
+
+ NXPRT(fprintf(stderr, "touch loop len 0x%x ddead %p total ",
+ buf_len, (void *)buf_addr));
+ NXPRT(fprintf(stderr, "0x%lx\n", total));
+
+ /* Touching fewer pages than encoded in the ddebc */
+ if (total > buf_sz) {
+ buf_len = NX_MIN(buf_len, total - buf_sz);
+ nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr);
+ NXPRT(fprintf(stderr, "touch loop break len 0x%x ",
+ buf_len));
+ NXPRT(fprintf(stderr, "ddead %p\n", (void *)buf_addr));
+ break;
+ }
+ nxu_touch_pages((void *)buf_addr, buf_len, page_sz, wr);
+ }
+ return ERR_NX_OK;
+}
+
+/*
+ * Src and dst buffers are supplied in scatter gather lists.
+ * NX function code and other parameters supplied in cmdp.
+ */
+static int nx_submit_job(struct nx_dde_t *src, struct nx_dde_t *dst,
+ struct nx_gzip_crb_cpb_t *cmdp, void *handle)
+{
+ uint64_t csbaddr;
+
+ memset((void *)&cmdp->crb.csb, 0, sizeof(cmdp->crb.csb));
+
+ cmdp->crb.source_dde = *src;
+ cmdp->crb.target_dde = *dst;
+
+ /* Status, output byte count in tpbc */
+ csbaddr = ((uint64_t) &cmdp->crb.csb) & csb_address_mask;
+ put64(cmdp->crb, csb_address, csbaddr);
+
+ /* NX reports input bytes in spbc; cleared */
+ cmdp->cpb.out_spbc_comp_wrap = 0;
+ cmdp->cpb.out_spbc_comp_with_count = 0;
+ cmdp->cpb.out_spbc_decomp = 0;
+
+ /* Clear output */
+ put32(cmdp->cpb, out_crc, INIT_CRC);
+ put32(cmdp->cpb, out_adler, INIT_ADLER);
+
+ /* Submit the crb, the job descriptor, to the accelerator. */
+ return nxu_submit_job(cmdp, handle);
+}
+
+int decompress_file(int argc, char **argv, void *devhandle)
+{
+ FILE *inpf = NULL;
+ FILE *outf = NULL;
+
+ int c, expect, i, cc, rc = 0;
+ char gzfname[FNAME_MAX];
+
+ /* Queuing, file ops, byte counting */
+ char *fifo_in, *fifo_out;
+ int used_in, cur_in, used_out, cur_out, read_sz, n;
+ int first_free, last_free, first_used, last_used;
+ int first_offset, last_offset;
+ int write_sz, free_space, source_sz;
+ int source_sz_estimate, target_sz_estimate;
+ uint64_t last_comp_ratio = 0; /* 1000 max */
+ uint64_t total_out = 0;
+ int is_final, is_eof;
+
+ /* nx hardware */
+ int sfbt, subc, spbc, tpbc, nx_ce, fc, resuming = 0;
+ int history_len = 0;
+ struct nx_gzip_crb_cpb_t cmd, *cmdp;
+ struct nx_dde_t *ddl_in;
+ struct nx_dde_t dde_in[6] __aligned(128);
+ struct nx_dde_t *ddl_out;
+ struct nx_dde_t dde_out[6] __aligned(128);
+ int pgfault_retries;
+
+ /* when using mmap'ed files */
+ off_t input_file_offset;
+
+ if (argc > 2) {
+ fprintf(stderr, "usage: %s <fname> or stdin\n", argv[0]);
+ fprintf(stderr, " writes to stdout or <fname>.nx.gunzip\n");
+ return -1;
+ }
+
+ if (argc == 1) {
+ inpf = stdin;
+ outf = stdout;
+ } else if (argc == 2) {
+ char w[1024];
+ char *wp;
+
+ inpf = fopen(argv[1], "r");
+ if (inpf == NULL) {
+ perror(argv[1]);
+ return -1;
+ }
+
+ /* Make a new file name to write to. Ignoring '.gz' */
+ wp = (NULL != (wp = strrchr(argv[1], '/'))) ? (wp+1) : argv[1];
+ strcpy(w, wp);
+ strcat(w, ".nx.gunzip");
+
+ outf = fopen(w, "w");
+ if (outf == NULL) {
+ perror(w);
+ return -1;
+ }
+ }
+
+ /* Decode the gzip header */
+ c = GETINPC(inpf); expect = 0x1f; /* ID1 */
+ if (c != expect)
+ goto err1;
+
+ c = GETINPC(inpf); expect = 0x8b; /* ID2 */
+ if (c != expect)
+ goto err1;
+
+ c = GETINPC(inpf); expect = 0x08; /* CM */
+ if (c != expect)
+ goto err1;
+
+ int flg = GETINPC(inpf); /* FLG */
+
+ if (flg & 0xE0 || flg & 0x4 || flg == EOF)
+ goto err2;
+
+ fprintf(stderr, "gzHeader FLG %x\n", flg);
+
+ /* Read 6 bytes; ignoring the MTIME, XFL, OS fields in this
+ * sample code.
+ */
+ for (i = 0; i < 6; i++) {
+ char tmp[10];
+
+ tmp[i] = GETINPC(inpf);
+ if (tmp[i] == EOF)
+ goto err3;
+ fprintf(stderr, "%02x ", tmp[i]);
+ if (i == 5)
+ fprintf(stderr, "\n");
+ }
+ fprintf(stderr, "gzHeader MTIME, XFL, OS ignored\n");
+
+ /* FNAME */
+ if (flg & 0x8) {
+ int k = 0;
+
+ do {
+ c = GETINPC(inpf);
+ if (c == EOF || k >= FNAME_MAX)
+ goto err3;
+ gzfname[k++] = c;
+ } while (c);
+ fprintf(stderr, "gzHeader FNAME: %s\n", gzfname);
+ }
+
+ /* FHCRC */
+ if (flg & 0x2) {
+ c = GETINPC(inpf);
+ if (c == EOF)
+ goto err3;
+ c = GETINPC(inpf);
+ if (c == EOF)
+ goto err3;
+ fprintf(stderr, "gzHeader FHCRC: ignored\n");
+ }
+
+ used_in = cur_in = used_out = cur_out = 0;
+ is_final = is_eof = 0;
+
+ /* Allocate one page larger to prevent page faults due to NX
+ * overfetching.
+ * Either do this (char*)(uintptr_t)aligned_alloc or use
+ * -std=c11 flag to make the int-to-pointer warning go away.
+ */
+ assert((fifo_in = (char *)(uintptr_t)aligned_alloc(line_sz,
+ fifo_in_len + page_sz)) != NULL);
+ assert((fifo_out = (char *)(uintptr_t)aligned_alloc(line_sz,
+ fifo_out_len + page_sz + line_sz)) != NULL);
+ /* Leave unused space due to history rounding rules */
+ fifo_out = fifo_out + line_sz;
+ nxu_touch_pages(fifo_out, fifo_out_len, page_sz, 1);
+
+ ddl_in = &dde_in[0];
+ ddl_out = &dde_out[0];
+ cmdp = &cmd;
+ memset(&cmdp->crb, 0, sizeof(cmdp->crb));
+
+read_state:
+
+ /* Read from .gz file */
+
+ NXPRT(fprintf(stderr, "read_state:\n"));
+
+ if (is_eof != 0)
+ goto write_state;
+
+ /* We read in to fifo_in in two steps: first: read in to from
+ * cur_in to the end of the buffer. last: if free space wrapped
+ * around, read from fifo_in offset 0 to offset cur_in.
+ */
+
+ /* Reset fifo head to reduce unnecessary wrap arounds */
+ cur_in = (used_in == 0) ? 0 : cur_in;
+
+ /* Free space total is reduced by a gap */
+ free_space = NX_MAX(0, fifo_free_bytes(used_in, fifo_in_len)
+ - line_sz);
+
+ /* Free space may wrap around as first and last */
+ first_free = fifo_free_first_bytes(cur_in, used_in, fifo_in_len);
+ last_free = fifo_free_last_bytes(cur_in, used_in, fifo_in_len);
+
+ /* Start offsets of the free memory */
+ first_offset = fifo_free_first_offset(cur_in, used_in);
+ last_offset = fifo_free_last_offset(cur_in, used_in, fifo_in_len);
+
+ /* Reduce read_sz because of the line_sz gap */
+ read_sz = NX_MIN(free_space, first_free);
+ n = 0;
+ if (read_sz > 0) {
+ /* Read in to offset cur_in + used_in */
+ n = fread(fifo_in + first_offset, 1, read_sz, inpf);
+ used_in = used_in + n;
+ free_space = free_space - n;
+ assert(n <= read_sz);
+ if (n != read_sz) {
+ /* Either EOF or error; exit the read loop */
+ is_eof = 1;
+ goto write_state;
+ }
+ }
+
+ /* If free space wrapped around */
+ if (last_free > 0) {
+ /* Reduce read_sz because of the line_sz gap */
+ read_sz = NX_MIN(free_space, last_free);
+ n = 0;
+ if (read_sz > 0) {
+ n = fread(fifo_in + last_offset, 1, read_sz, inpf);
+ used_in = used_in + n; /* Increase used space */
+ free_space = free_space - n; /* Decrease free space */
+ assert(n <= read_sz);
+ if (n != read_sz) {
+ /* Either EOF or error; exit the read loop */
+ is_eof = 1;
+ goto write_state;
+ }
+ }
+ }
+
+ /* At this point we have used_in bytes in fifo_in with the
+ * data head starting at cur_in and possibly wrapping around.
+ */
+
+write_state:
+
+ /* Write decompressed data to output file */
+
+ NXPRT(fprintf(stderr, "write_state:\n"));
+
+ if (used_out == 0)
+ goto decomp_state;
+
+ /* If fifo_out has data waiting, write it out to the file to
+ * make free target space for the accelerator used bytes in
+ * the first and last parts of fifo_out.
+ */
+
+ first_used = fifo_used_first_bytes(cur_out, used_out, fifo_out_len);
+ last_used = fifo_used_last_bytes(cur_out, used_out, fifo_out_len);
+
+ write_sz = first_used;
+
+ n = 0;
+ if (write_sz > 0) {
+ n = fwrite(fifo_out + cur_out, 1, write_sz, outf);
+ used_out = used_out - n;
+ /* Move head of the fifo */
+ cur_out = (cur_out + n) % fifo_out_len;
+ assert(n <= write_sz);
+ if (n != write_sz) {
+ fprintf(stderr, "error: write\n");
+ rc = -1;
+ goto err5;
+ }
+ }
+
+ if (last_used > 0) { /* If more data available in the last part */
+ write_sz = last_used; /* Keep it here for later */
+ n = 0;
+ if (write_sz > 0) {
+ n = fwrite(fifo_out, 1, write_sz, outf);
+ used_out = used_out - n;
+ cur_out = (cur_out + n) % fifo_out_len;
+ assert(n <= write_sz);
+ if (n != write_sz) {
+ fprintf(stderr, "error: write\n");
+ rc = -1;
+ goto err5;
+ }
+ }
+ }
+
+decomp_state:
+
+ /* NX decompresses input data */
+
+ NXPRT(fprintf(stderr, "decomp_state:\n"));
+
+ if (is_final)
+ goto finish_state;
+
+ /* Address/len lists */
+ clearp_dde(ddl_in);
+ clearp_dde(ddl_out);
+
+ /* FC, CRC, HistLen, Table 6-6 */
+ if (resuming) {
+ /* Resuming a partially decompressed input.
+ * The key to resume is supplying the 32KB
+ * dictionary (history) to NX, which is basically
+ * the last 32KB of output produced.
+ */
+ fc = GZIP_FC_DECOMPRESS_RESUME;
+
+ cmdp->cpb.in_crc = cmdp->cpb.out_crc;
+ cmdp->cpb.in_adler = cmdp->cpb.out_adler;
+
+ /* Round up the history size to quadword. Section 2.10 */
+ history_len = (history_len + 15) / 16;
+ putnn(cmdp->cpb, in_histlen, history_len);
+ history_len = history_len * 16; /* bytes */
+
+ if (history_len > 0) {
+ /* Chain in the history buffer to the DDE list */
+ if (cur_out >= history_len) {
+ nx_append_dde(ddl_in, fifo_out
+ + (cur_out - history_len),
+ history_len);
+ } else {
+ nx_append_dde(ddl_in, fifo_out
+ + ((fifo_out_len + cur_out)
+ - history_len),
+ history_len - cur_out);
+ /* Up to 32KB history wraps around fifo_out */
+ nx_append_dde(ddl_in, fifo_out, cur_out);
+ }
+
+ }
+ } else {
+ /* First decompress job */
+ fc = GZIP_FC_DECOMPRESS;
+
+ history_len = 0;
+ /* Writing 0 clears out subc as well */
+ cmdp->cpb.in_histlen = 0;
+ total_out = 0;
+
+ put32(cmdp->cpb, in_crc, INIT_CRC);
+ put32(cmdp->cpb, in_adler, INIT_ADLER);
+ put32(cmdp->cpb, out_crc, INIT_CRC);
+ put32(cmdp->cpb, out_adler, INIT_ADLER);
+
+ /* Assuming 10% compression ratio initially; use the
+ * most recently measured compression ratio as a
+ * heuristic to estimate the input and output
+ * sizes. If we give too much input, the target buffer
+ * overflows and NX cycles are wasted, and then we
+ * must retry with smaller input size. 1000 is 100%.
+ */
+ last_comp_ratio = 100UL;
+ }
+ cmdp->crb.gzip_fc = 0;
+ putnn(cmdp->crb, gzip_fc, fc);
+
+ /*
+ * NX source buffers
+ */
+ first_used = fifo_used_first_bytes(cur_in, used_in, fifo_in_len);
+ last_used = fifo_used_last_bytes(cur_in, used_in, fifo_in_len);
+
+ if (first_used > 0)
+ nx_append_dde(ddl_in, fifo_in + cur_in, first_used);
+
+ if (last_used > 0)
+ nx_append_dde(ddl_in, fifo_in, last_used);
+
+ /*
+ * NX target buffers
+ */
+ first_free = fifo_free_first_bytes(cur_out, used_out, fifo_out_len);
+ last_free = fifo_free_last_bytes(cur_out, used_out, fifo_out_len);
+
+ /* Reduce output free space amount not to overwrite the history */
+ int target_max = NX_MAX(0, fifo_free_bytes(used_out, fifo_out_len)
+ - (1<<16));
+
+ NXPRT(fprintf(stderr, "target_max %d (0x%x)\n", target_max,
+ target_max));
+
+ first_free = NX_MIN(target_max, first_free);
+ if (first_free > 0) {
+ first_offset = fifo_free_first_offset(cur_out, used_out);
+ nx_append_dde(ddl_out, fifo_out + first_offset, first_free);
+ }
+
+ if (last_free > 0) {
+ last_free = NX_MIN(target_max - first_free, last_free);
+ if (last_free > 0) {
+ last_offset = fifo_free_last_offset(cur_out, used_out,
+ fifo_out_len);
+ nx_append_dde(ddl_out, fifo_out + last_offset,
+ last_free);
+ }
+ }
+
+ /* Target buffer size is used to limit the source data size
+ * based on previous measurements of compression ratio.
+ */
+
+ /* source_sz includes history */
+ source_sz = getp32(ddl_in, ddebc);
+ assert(source_sz > history_len);
+ source_sz = source_sz - history_len;
+
+ /* Estimating how much source is needed to 3/4 fill a
+ * target_max size target buffer. If we overshoot, then NX
+ * must repeat the job with smaller input and we waste
+ * bandwidth. If we undershoot then we use more NX calls than
+ * necessary.
+ */
+
+ source_sz_estimate = ((uint64_t)target_max * last_comp_ratio * 3UL)
+ / 4000;
+
+ if (source_sz_estimate < source_sz) {
+ /* Target might be small, therefore limiting the
+ * source data.
+ */
+ source_sz = source_sz_estimate;
+ target_sz_estimate = target_max;
+ } else {
+ /* Source file might be small, therefore limiting target
+ * touch pages to a smaller value to save processor cycles.
+ */
+ target_sz_estimate = ((uint64_t)source_sz * 1000UL)
+ / (last_comp_ratio + 1);
+ target_sz_estimate = NX_MIN(2 * target_sz_estimate,
+ target_max);
+ }
+
+ source_sz = source_sz + history_len;
+
+ /* Some NX condition codes require submitting the NX job again.
+ * Kernel doesn't handle NX page faults. Expects user code to
+ * touch pages.
+ */
+ pgfault_retries = NX_MAX_FAULTS;
+
+restart_nx:
+
+ putp32(ddl_in, ddebc, source_sz);
+
+ /* Fault in pages */
+ nxu_touch_pages(cmdp, sizeof(struct nx_gzip_crb_cpb_t), page_sz, 1);
+ nx_touch_pages_dde(ddl_in, 0, page_sz, 0);
+ nx_touch_pages_dde(ddl_out, target_sz_estimate, page_sz, 1);
+
+ /* Send job to NX */
+ cc = nx_submit_job(ddl_in, ddl_out, cmdp, devhandle);
+
+ switch (cc) {
+
+ case ERR_NX_TRANSLATION:
+
+ /* We touched the pages ahead of time. In the most common case
+ * we shouldn't be here. But may be some pages were paged out.
+ * Kernel should have placed the faulting address to fsaddr.
+ */
+ NXPRT(fprintf(stderr, "ERR_NX_TRANSLATION %p\n",
+ (void *)cmdp->crb.csb.fsaddr));
+
+ if (pgfault_retries == NX_MAX_FAULTS) {
+ /* Try once with exact number of pages */
+ --pgfault_retries;
+ goto restart_nx;
+ } else if (pgfault_retries > 0) {
+ /* If still faulting try fewer input pages
+ * assuming memory outage
+ */
+ if (source_sz > page_sz)
+ source_sz = NX_MAX(source_sz / 2, page_sz);
+ --pgfault_retries;
+ goto restart_nx;
+ } else {
+ fprintf(stderr, "cannot make progress; too many ");
+ fprintf(stderr, "page fault retries cc= %d\n", cc);
+ rc = -1;
+ goto err5;
+ }
+
+ case ERR_NX_DATA_LENGTH:
+
+ NXPRT(fprintf(stderr, "ERR_NX_DATA_LENGTH; "));
+ NXPRT(fprintf(stderr, "stream may have trailing data\n"));
+
+ /* Not an error in the most common case; it just says
+ * there is trailing data that we must examine.
+ *
+ * CC=3 CE(1)=0 CE(0)=1 indicates partial completion
+ * Fig.6-7 and Table 6-8.
+ */
+ nx_ce = get_csb_ce_ms3b(cmdp->crb.csb);
+
+ if (!csb_ce_termination(nx_ce) &&
+ csb_ce_partial_completion(nx_ce)) {
+ /* Check CPB for more information
+ * spbc and tpbc are valid
+ */
+ sfbt = getnn(cmdp->cpb, out_sfbt); /* Table 6-4 */
+ subc = getnn(cmdp->cpb, out_subc); /* Table 6-4 */
+ spbc = get32(cmdp->cpb, out_spbc_decomp);
+ tpbc = get32(cmdp->crb.csb, tpbc);
+ assert(target_max >= tpbc);
+
+ goto ok_cc3; /* not an error */
+ } else {
+ /* History length error when CE(1)=1 CE(0)=0. */
+ rc = -1;
+ fprintf(stderr, "history length error cc= %d\n", cc);
+ goto err5;
+ }
+
+ case ERR_NX_TARGET_SPACE:
+
+ /* Target buffer not large enough; retry smaller input
+ * data; give at least 1 byte. SPBC/TPBC are not valid.
+ */
+ assert(source_sz > history_len);
+ source_sz = ((source_sz - history_len + 2) / 2) + history_len;
+ NXPRT(fprintf(stderr, "ERR_NX_TARGET_SPACE; retry with "));
+ NXPRT(fprintf(stderr, "smaller input data src %d hist %d\n",
+ source_sz, history_len));
+ goto restart_nx;
+
+ case ERR_NX_OK:
+
+ /* This should not happen for gzip formatted data;
+ * we need trailing crc and isize
+ */
+ fprintf(stderr, "ERR_NX_OK\n");
+ spbc = get32(cmdp->cpb, out_spbc_decomp);
+ tpbc = get32(cmdp->crb.csb, tpbc);
+ assert(target_max >= tpbc);
+ assert(spbc >= history_len);
+ source_sz = spbc - history_len;
+ goto offsets_state;
+
+ default:
+ fprintf(stderr, "error: cc= %d\n", cc);
+ rc = -1;
+ goto err5;
+ }
+
+ok_cc3:
+
+ NXPRT(fprintf(stderr, "cc3: sfbt: %x\n", sfbt));
+
+ assert(spbc > history_len);
+ source_sz = spbc - history_len;
+
+ /* Table 6-4: Source Final Block Type (SFBT) describes the
+ * last processed deflate block and clues the software how to
+ * resume the next job. SUBC indicates how many input bits NX
+ * consumed but did not process. SPBC indicates how many
+ * bytes of source were given to the accelerator including
+ * history bytes.
+ */
+
+ switch (sfbt) {
+ int dhtlen;
+
+ case 0x0: /* Deflate final EOB received */
+
+ /* Calculating the checksum start position. */
+
+ source_sz = source_sz - subc / 8;
+ is_final = 1;
+ break;
+
+ /* Resume decompression cases are below. Basically
+ * indicates where NX has suspended and how to resume
+ * the input stream.
+ */
+
+ case 0x8: /* Within a literal block; use rembytecount */
+ case 0x9: /* Within a literal block; use rembytecount; bfinal=1 */
+
+ /* Supply the partially processed source byte again */
+ source_sz = source_sz - ((subc + 7) / 8);
+
+ /* SUBC LS 3bits: number of bits in the first source byte need
+ * to be processed.
+ * 000 means all 8 bits; Table 6-3
+ * Clear subc, histlen, sfbt, rembytecnt, dhtlen
+ */
+ cmdp->cpb.in_subc = 0;
+ cmdp->cpb.in_sfbt = 0;
+ putnn(cmdp->cpb, in_subc, subc % 8);
+ putnn(cmdp->cpb, in_sfbt, sfbt);
+ putnn(cmdp->cpb, in_rembytecnt, getnn(cmdp->cpb,
+ out_rembytecnt));
+ break;
+
+ case 0xA: /* Within a FH block; */
+ case 0xB: /* Within a FH block; bfinal=1 */
+
+ source_sz = source_sz - ((subc + 7) / 8);
+
+ /* Clear subc, histlen, sfbt, rembytecnt, dhtlen */
+ cmdp->cpb.in_subc = 0;
+ cmdp->cpb.in_sfbt = 0;
+ putnn(cmdp->cpb, in_subc, subc % 8);
+ putnn(cmdp->cpb, in_sfbt, sfbt);
+ break;
+
+ case 0xC: /* Within a DH block; */
+ case 0xD: /* Within a DH block; bfinal=1 */
+
+ source_sz = source_sz - ((subc + 7) / 8);
+
+ /* Clear subc, histlen, sfbt, rembytecnt, dhtlen */
+ cmdp->cpb.in_subc = 0;
+ cmdp->cpb.in_sfbt = 0;
+ putnn(cmdp->cpb, in_subc, subc % 8);
+ putnn(cmdp->cpb, in_sfbt, sfbt);
+
+ dhtlen = getnn(cmdp->cpb, out_dhtlen);
+ putnn(cmdp->cpb, in_dhtlen, dhtlen);
+ assert(dhtlen >= 42);
+
+ /* Round up to a qword */
+ dhtlen = (dhtlen + 127) / 128;
+
+ while (dhtlen > 0) { /* Copy dht from cpb.out to cpb.in */
+ --dhtlen;
+ cmdp->cpb.in_dht[dhtlen] = cmdp->cpb.out_dht[dhtlen];
+ }
+ break;
+
+ case 0xE: /* Within a block header; bfinal=0; */
+ /* Also given if source data exactly ends (SUBC=0) with
+ * EOB code with BFINAL=0. Means the next byte will
+ * contain a block header.
+ */
+ case 0xF: /* within a block header with BFINAL=1. */
+
+ source_sz = source_sz - ((subc + 7) / 8);
+
+ /* Clear subc, histlen, sfbt, rembytecnt, dhtlen */
+ cmdp->cpb.in_subc = 0;
+ cmdp->cpb.in_sfbt = 0;
+ putnn(cmdp->cpb, in_subc, subc % 8);
+ putnn(cmdp->cpb, in_sfbt, sfbt);
+
+ /* Engine did not process any data */
+ if (is_eof && (source_sz == 0))
+ is_final = 1;
+ }
+
+offsets_state:
+
+ /* Adjust the source and target buffer offsets and lengths */
+
+ NXPRT(fprintf(stderr, "offsets_state:\n"));
+
+ /* Delete input data from fifo_in */
+ used_in = used_in - source_sz;
+ cur_in = (cur_in + source_sz) % fifo_in_len;
+ input_file_offset = input_file_offset + source_sz;
+
+ /* Add output data to fifo_out */
+ used_out = used_out + tpbc;
+
+ assert(used_out <= fifo_out_len);
+
+ total_out = total_out + tpbc;
+
+ /* Deflate history is 32KB max. No need to supply more
+ * than 32KB on a resume.
+ */
+ history_len = (total_out > window_max) ? window_max : total_out;
+
+ /* To estimate expected expansion in the next NX job; 500 means 50%.
+ * Deflate best case is around 1 to 1000.
+ */
+ last_comp_ratio = (1000UL * ((uint64_t)source_sz + 1))
+ / ((uint64_t)tpbc + 1);
+ last_comp_ratio = NX_MAX(NX_MIN(1000UL, last_comp_ratio), 1);
+ NXPRT(fprintf(stderr, "comp_ratio %ld source_sz %d spbc %d tpbc %d\n",
+ last_comp_ratio, source_sz, spbc, tpbc));
+
+ resuming = 1;
+
+finish_state:
+
+ NXPRT(fprintf(stderr, "finish_state:\n"));
+
+ if (is_final) {
+ if (used_out)
+ goto write_state; /* More data to write out */
+ else if (used_in < 8) {
+ /* Need at least 8 more bytes containing gzip crc
+ * and isize.
+ */
+ rc = -1;
+ goto err4;
+ } else {
+ /* Compare checksums and exit */
+ int i;
+ unsigned char tail[8];
+ uint32_t cksum, isize;
+
+ for (i = 0; i < 8; i++)
+ tail[i] = fifo_in[(cur_in + i) % fifo_in_len];
+ fprintf(stderr, "computed checksum %08x isize %08x\n",
+ cmdp->cpb.out_crc, (uint32_t) (total_out
+ % (1ULL<<32)));
+ cksum = ((uint32_t) tail[0] | (uint32_t) tail[1]<<8
+ | (uint32_t) tail[2]<<16
+ | (uint32_t) tail[3]<<24);
+ isize = ((uint32_t) tail[4] | (uint32_t) tail[5]<<8
+ | (uint32_t) tail[6]<<16
+ | (uint32_t) tail[7]<<24);
+ fprintf(stderr, "stored checksum %08x isize %08x\n",
+ cksum, isize);
+
+ if (cksum == cmdp->cpb.out_crc && isize == (uint32_t)
+ (total_out % (1ULL<<32))) {
+ rc = 0; goto ok1;
+ } else {
+ rc = -1; goto err4;
+ }
+ }
+ } else
+ goto read_state;
+
+ return -1;
+
+err1:
+ fprintf(stderr, "error: not a gzip file, expect %x, read %x\n",
+ expect, c);
+ return -1;
+
+err2:
+ fprintf(stderr, "error: the FLG byte is wrong or not being handled\n");
+ return -1;
+
+err3:
+ fprintf(stderr, "error: gzip header\n");
+ return -1;
+
+err4:
+ fprintf(stderr, "error: checksum missing or mismatch\n");
+
+err5:
+ok1:
+ fprintf(stderr, "decomp is complete: fclose\n");
+ fclose(outf);
+
+ return rc;
+}
+
+
+int main(int argc, char **argv)
+{
+ int rc;
+ struct sigaction act;
+ void *handle;
+
+ nx_dbg = 0;
+ nx_gzip_log = NULL;
+ act.sa_handler = 0;
+ act.sa_sigaction = nxu_sigsegv_handler;
+ act.sa_flags = SA_SIGINFO;
+ act.sa_restorer = 0;
+ sigemptyset(&act.sa_mask);
+ sigaction(SIGSEGV, &act, NULL);
+
+ handle = nx_function_begin(NX_FUNC_COMP_GZIP, 0);
+ if (!handle) {
+ fprintf(stderr, "Unable to init NX, errno %d\n", errno);
+ exit(-1);
+ }
+
+ rc = decompress_file(argc, argv, handle);
+
+ nx_function_end(handle);
+
+ return rc;
+}
diff --git a/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
new file mode 100644
index 000000000000..7496a83f9c9d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* P9 gzip sample code for demonstrating the P9 NX hardware interface.
+ * Not intended for productive uses or for performance or compression
+ * ratio measurements. For simplicity of demonstration, this sample
+ * code compresses in to fixed Huffman blocks only (Deflate btype=1)
+ * and has very simple memory management. Dynamic Huffman blocks
+ * (Deflate btype=2) are more involved as detailed in the user guide.
+ * Note also that /dev/crypto/gzip, VAS and skiboot support are
+ * required.
+ *
+ * Copyright 2020 IBM Corp.
+ *
+ * https://github.com/libnxz/power-gzip for zlib api and other utils
+ *
+ * Author: Bulent Abali <abali@us.ibm.com>
+ *
+ * Definitions of acronyms used here. See
+ * P9 NX Gzip Accelerator User's Manual for details:
+ * https://github.com/libnxz/power-gzip/blob/develop/doc/power_nx_gzip_um.pdf
+ *
+ * adler/crc: 32 bit checksums appended to stream tail
+ * ce: completion extension
+ * cpb: coprocessor parameter block (metadata)
+ * crb: coprocessor request block (command)
+ * csb: coprocessor status block (status)
+ * dht: dynamic huffman table
+ * dde: data descriptor element (address, length)
+ * ddl: list of ddes
+ * dh/fh: dynamic and fixed huffman types
+ * fc: coprocessor function code
+ * histlen: history/dictionary length
+ * history: sliding window of up to 32KB of data
+ * lzcount: Deflate LZ symbol counts
+ * rembytecnt: remaining byte count
+ * sfbt: source final block type; last block's type during decomp
+ * spbc: source processed byte count
+ * subc: source unprocessed bit count
+ * tebc: target ending bit count; valid bits in the last byte
+ * tpbc: target processed byte count
+ * vas: virtual accelerator switch; the user mode interface
+ */
+
+#define _ISOC11_SOURCE // For aligned_alloc()
+#define _DEFAULT_SOURCE // For endian.h
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/fcntl.h>
+#include <sys/mman.h>
+#include <endian.h>
+#include <bits/endian.h>
+#include <sys/ioctl.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include "nxu.h"
+#include "nx.h"
+
+int nx_dbg;
+FILE *nx_gzip_log;
+
+#define NX_MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
+#define FNAME_MAX 1024
+#define FEXT ".nx.gz"
+
+/*
+ * LZ counts returned in the user supplied nx_gzip_crb_cpb_t structure.
+ */
+static int compress_fht_sample(char *src, uint32_t srclen, char *dst,
+ uint32_t dstlen, int with_count,
+ struct nx_gzip_crb_cpb_t *cmdp, void *handle)
+{
+ uint32_t fc;
+
+ assert(!!cmdp);
+
+ put32(cmdp->crb, gzip_fc, 0); /* clear */
+ fc = (with_count) ? GZIP_FC_COMPRESS_RESUME_FHT_COUNT :
+ GZIP_FC_COMPRESS_RESUME_FHT;
+ putnn(cmdp->crb, gzip_fc, fc);
+ putnn(cmdp->cpb, in_histlen, 0); /* resuming with no history */
+ memset((void *) &cmdp->crb.csb, 0, sizeof(cmdp->crb.csb));
+
+ /* Section 6.6 programming notes; spbc may be in two different
+ * places depending on FC.
+ */
+ if (!with_count)
+ put32(cmdp->cpb, out_spbc_comp, 0);
+ else
+ put32(cmdp->cpb, out_spbc_comp_with_count, 0);
+
+ /* Figure 6-3 6-4; CSB location */
+ put64(cmdp->crb, csb_address, 0);
+ put64(cmdp->crb, csb_address,
+ (uint64_t) &cmdp->crb.csb & csb_address_mask);
+
+ /* Source direct dde (scatter-gather list) */
+ clear_dde(cmdp->crb.source_dde);
+ putnn(cmdp->crb.source_dde, dde_count, 0);
+ put32(cmdp->crb.source_dde, ddebc, srclen);
+ put64(cmdp->crb.source_dde, ddead, (uint64_t) src);
+
+ /* Target direct dde (scatter-gather list) */
+ clear_dde(cmdp->crb.target_dde);
+ putnn(cmdp->crb.target_dde, dde_count, 0);
+ put32(cmdp->crb.target_dde, ddebc, dstlen);
+ put64(cmdp->crb.target_dde, ddead, (uint64_t) dst);
+
+ /* Submit the crb, the job descriptor, to the accelerator */
+ return nxu_submit_job(cmdp, handle);
+}
+
+/*
+ * Prepares a blank no filename no timestamp gzip header and returns
+ * the number of bytes written to buf.
+ * Gzip specification at https://tools.ietf.org/html/rfc1952
+ */
+int gzip_header_blank(char *buf)
+{
+ int i = 0;
+
+ buf[i++] = 0x1f; /* ID1 */
+ buf[i++] = 0x8b; /* ID2 */
+ buf[i++] = 0x08; /* CM */
+ buf[i++] = 0x00; /* FLG */
+ buf[i++] = 0x00; /* MTIME */
+ buf[i++] = 0x00; /* MTIME */
+ buf[i++] = 0x00; /* MTIME */
+ buf[i++] = 0x00; /* MTIME */
+ buf[i++] = 0x04; /* XFL 4=fastest */
+ buf[i++] = 0x03; /* OS UNIX */
+
+ return i;
+}
+
+/* Caller must free the allocated buffer return nonzero on error. */
+int read_alloc_input_file(char *fname, char **buf, size_t *bufsize)
+{
+ struct stat statbuf;
+ FILE *fp;
+ char *p;
+ size_t num_bytes;
+
+ if (stat(fname, &statbuf)) {
+ perror(fname);
+ return(-1);
+ }
+ fp = fopen(fname, "r");
+ if (fp == NULL) {
+ perror(fname);
+ return(-1);
+ }
+ assert(NULL != (p = (char *) malloc(statbuf.st_size)));
+ num_bytes = fread(p, 1, statbuf.st_size, fp);
+ if (ferror(fp) || (num_bytes != statbuf.st_size)) {
+ perror(fname);
+ return(-1);
+ }
+ *buf = p;
+ *bufsize = num_bytes;
+ return 0;
+}
+
+/* Returns nonzero on error */
+int write_output_file(char *fname, char *buf, size_t bufsize)
+{
+ FILE *fp;
+ size_t num_bytes;
+
+ fp = fopen(fname, "w");
+ if (fp == NULL) {
+ perror(fname);
+ return(-1);
+ }
+ num_bytes = fwrite(buf, 1, bufsize, fp);
+ if (ferror(fp) || (num_bytes != bufsize)) {
+ perror(fname);
+ return(-1);
+ }
+ fclose(fp);
+ return 0;
+}
+
+/*
+ * Z_SYNC_FLUSH as described in zlib.h.
+ * Returns number of appended bytes
+ */
+int append_sync_flush(char *buf, int tebc, int final)
+{
+ uint64_t flush;
+ int shift = (tebc & 0x7);
+
+ if (tebc > 0) {
+ /* Last byte is partially full */
+ buf = buf - 1;
+ *buf = *buf & (unsigned char) ((1<<tebc)-1);
+ } else
+ *buf = 0;
+ flush = ((0x1ULL & final) << shift) | *buf;
+ shift = shift + 3; /* BFINAL and BTYPE written */
+ shift = (shift <= 8) ? 8 : 16;
+ flush |= (0xFFFF0000ULL) << shift; /* Zero length block */
+ shift = shift + 32;
+ while (shift > 0) {
+ *buf++ = (unsigned char) (flush & 0xffULL);
+ flush = flush >> 8;
+ shift = shift - 8;
+ }
+ return(((tebc > 5) || (tebc == 0)) ? 5 : 4);
+}
+
+/*
+ * Final deflate block bit. This call assumes the block
+ * beginning is byte aligned.
+ */
+static void set_bfinal(void *buf, int bfinal)
+{
+ char *b = buf;
+
+ if (bfinal)
+ *b = *b | (unsigned char) 0x01;
+ else
+ *b = *b & (unsigned char) 0xfe;
+}
+
+int compress_file(int argc, char **argv, void *handle)
+{
+ char *inbuf, *outbuf, *srcbuf, *dstbuf;
+ char outname[FNAME_MAX];
+ uint32_t srclen, dstlen;
+ uint32_t flushlen, chunk;
+ size_t inlen, outlen, dsttotlen, srctotlen;
+ uint32_t crc, spbc, tpbc, tebc;
+ int lzcounts = 0;
+ int cc;
+ int num_hdr_bytes;
+ struct nx_gzip_crb_cpb_t *cmdp;
+ uint32_t pagelen = 65536;
+ int fault_tries = NX_MAX_FAULTS;
+
+ cmdp = (void *)(uintptr_t)
+ aligned_alloc(sizeof(struct nx_gzip_crb_cpb_t),
+ sizeof(struct nx_gzip_crb_cpb_t));
+
+ if (argc != 2) {
+ fprintf(stderr, "usage: %s <fname>\n", argv[0]);
+ exit(-1);
+ }
+ if (read_alloc_input_file(argv[1], &inbuf, &inlen))
+ exit(-1);
+ fprintf(stderr, "file %s read, %ld bytes\n", argv[1], inlen);
+
+ /* Generous output buffer for header/trailer */
+ outlen = 2 * inlen + 1024;
+
+ assert(NULL != (outbuf = (char *)malloc(outlen)));
+ nxu_touch_pages(outbuf, outlen, pagelen, 1);
+
+ /* Compress piecemeal in smallish chunks */
+ chunk = 1<<22;
+
+ /* Write the gzip header to the stream */
+ num_hdr_bytes = gzip_header_blank(outbuf);
+ dstbuf = outbuf + num_hdr_bytes;
+ outlen = outlen - num_hdr_bytes;
+ dsttotlen = num_hdr_bytes;
+
+ srcbuf = inbuf;
+ srctotlen = 0;
+
+ /* Init the CRB, the coprocessor request block */
+ memset(&cmdp->crb, 0, sizeof(cmdp->crb));
+
+ /* Initial gzip crc32 */
+ put32(cmdp->cpb, in_crc, 0);
+
+ while (inlen > 0) {
+
+ /* Submit chunk size source data per job */
+ srclen = NX_MIN(chunk, inlen);
+ /* Supply large target in case data expands */
+ dstlen = NX_MIN(2*srclen, outlen);
+
+ /* Page faults are handled by the user code */
+
+ /* Fault-in pages; an improved code wouldn't touch so
+ * many pages but would try to estimate the
+ * compression ratio and adjust both the src and dst
+ * touch amounts.
+ */
+ nxu_touch_pages(cmdp, sizeof(struct nx_gzip_crb_cpb_t), pagelen,
+ 1);
+ nxu_touch_pages(srcbuf, srclen, pagelen, 0);
+ nxu_touch_pages(dstbuf, dstlen, pagelen, 1);
+
+ cc = compress_fht_sample(
+ srcbuf, srclen,
+ dstbuf, dstlen,
+ lzcounts, cmdp, handle);
+
+ if (cc != ERR_NX_OK && cc != ERR_NX_TPBC_GT_SPBC &&
+ cc != ERR_NX_TRANSLATION) {
+ fprintf(stderr, "nx error: cc= %d\n", cc);
+ exit(-1);
+ }
+
+ /* Page faults are handled by the user code */
+ if (cc == ERR_NX_TRANSLATION) {
+ NXPRT(fprintf(stderr, "page fault: cc= %d, ", cc));
+ NXPRT(fprintf(stderr, "try= %d, fsa= %08llx\n",
+ fault_tries,
+ (unsigned long long) cmdp->crb.csb.fsaddr));
+ fault_tries--;
+ if (fault_tries > 0) {
+ continue;
+ } else {
+ fprintf(stderr, "error: cannot progress; ");
+ fprintf(stderr, "too many faults\n");
+ exit(-1);
+ };
+ }
+
+ fault_tries = NX_MAX_FAULTS; /* Reset for the next chunk */
+
+ inlen = inlen - srclen;
+ srcbuf = srcbuf + srclen;
+ srctotlen = srctotlen + srclen;
+
+ /* Two possible locations for spbc depending on the function
+ * code.
+ */
+ spbc = (!lzcounts) ? get32(cmdp->cpb, out_spbc_comp) :
+ get32(cmdp->cpb, out_spbc_comp_with_count);
+ assert(spbc == srclen);
+
+ /* Target byte count */
+ tpbc = get32(cmdp->crb.csb, tpbc);
+ /* Target ending bit count */
+ tebc = getnn(cmdp->cpb, out_tebc);
+ NXPRT(fprintf(stderr, "compressed chunk %d ", spbc));
+ NXPRT(fprintf(stderr, "to %d bytes, tebc= %d\n", tpbc, tebc));
+
+ if (inlen > 0) { /* More chunks to go */
+ set_bfinal(dstbuf, 0);
+ dstbuf = dstbuf + tpbc;
+ dsttotlen = dsttotlen + tpbc;
+ outlen = outlen - tpbc;
+ /* Round up to the next byte with a flush
+ * block; do not set the BFINAqL bit.
+ */
+ flushlen = append_sync_flush(dstbuf, tebc, 0);
+ dsttotlen = dsttotlen + flushlen;
+ outlen = outlen - flushlen;
+ dstbuf = dstbuf + flushlen;
+ NXPRT(fprintf(stderr, "added sync_flush %d bytes\n",
+ flushlen));
+ } else { /* Done */
+ /* Set the BFINAL bit of the last block per Deflate
+ * specification.
+ */
+ set_bfinal(dstbuf, 1);
+ dstbuf = dstbuf + tpbc;
+ dsttotlen = dsttotlen + tpbc;
+ outlen = outlen - tpbc;
+ }
+
+ /* Resuming crc32 for the next chunk */
+ crc = get32(cmdp->cpb, out_crc);
+ put32(cmdp->cpb, in_crc, crc);
+ crc = be32toh(crc);
+ }
+
+ /* Append crc32 and ISIZE to the end */
+ memcpy(dstbuf, &crc, 4);
+ memcpy(dstbuf+4, &srctotlen, 4);
+ dsttotlen = dsttotlen + 8;
+ outlen = outlen - 8;
+
+ assert(FNAME_MAX > (strlen(argv[1]) + strlen(FEXT)));
+ strcpy(outname, argv[1]);
+ strcat(outname, FEXT);
+ if (write_output_file(outname, outbuf, dsttotlen)) {
+ fprintf(stderr, "write error: %s\n", outname);
+ exit(-1);
+ }
+
+ fprintf(stderr, "compressed %ld to %ld bytes total, ", srctotlen,
+ dsttotlen);
+ fprintf(stderr, "crc32 checksum = %08x\n", crc);
+
+ if (inbuf != NULL)
+ free(inbuf);
+
+ if (outbuf != NULL)
+ free(outbuf);
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int rc;
+ struct sigaction act;
+ void *handle;
+
+ nx_dbg = 0;
+ nx_gzip_log = NULL;
+ act.sa_handler = 0;
+ act.sa_sigaction = nxu_sigsegv_handler;
+ act.sa_flags = SA_SIGINFO;
+ act.sa_restorer = 0;
+ sigemptyset(&act.sa_mask);
+ sigaction(SIGSEGV, &act, NULL);
+
+ handle = nx_function_begin(NX_FUNC_COMP_GZIP, 0);
+ if (!handle) {
+ fprintf(stderr, "Unable to init NX, errno %d\n", errno);
+ exit(-1);
+ }
+
+ rc = compress_file(argc, argv, handle);
+
+ nx_function_end(handle);
+
+ return rc;
+}
diff --git a/tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c b/tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c
new file mode 100644
index 000000000000..c055885da40a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/*
+ * Copyright 2020 IBM Corp.
+ *
+ * Author: Bulent Abali <abali@us.ibm.com>
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/fcntl.h>
+#include <sys/mman.h>
+#include <endian.h>
+#include <bits/endian.h>
+#include <sys/ioctl.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include "vas-api.h"
+#include "nx.h"
+#include "copy-paste.h"
+#include "nxu.h"
+#include "nx_dbg.h"
+#include <sys/platform/ppc.h>
+
+#define barrier()
+#define hwsync() ({ asm volatile("sync" ::: "memory"); })
+
+#ifndef NX_NO_CPU_PRI
+#define cpu_pri_default() ({ asm volatile ("or 2, 2, 2"); })
+#define cpu_pri_low() ({ asm volatile ("or 31, 31, 31"); })
+#else
+#define cpu_pri_default()
+#define cpu_pri_low()
+#endif
+
+void *nx_fault_storage_address;
+
+struct nx_handle {
+ int fd;
+ int function;
+ void *paste_addr;
+};
+
+static int open_device_nodes(char *devname, int pri, struct nx_handle *handle)
+{
+ int rc, fd;
+ void *addr;
+ struct vas_tx_win_open_attr txattr;
+
+ fd = open(devname, O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, " open device name %s\n", devname);
+ return -errno;
+ }
+
+ memset(&txattr, 0, sizeof(txattr));
+ txattr.version = 1;
+ txattr.vas_id = pri;
+ rc = ioctl(fd, VAS_TX_WIN_OPEN, (unsigned long)&txattr);
+ if (rc < 0) {
+ fprintf(stderr, "ioctl() n %d, error %d\n", rc, errno);
+ rc = -errno;
+ goto out;
+ }
+
+ addr = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0ULL);
+ if (addr == MAP_FAILED) {
+ fprintf(stderr, "mmap() failed, errno %d\n", errno);
+ rc = -errno;
+ goto out;
+ }
+ handle->fd = fd;
+ handle->paste_addr = (void *)((char *)addr + 0x400);
+
+ rc = 0;
+out:
+ close(fd);
+ return rc;
+}
+
+void *nx_function_begin(int function, int pri)
+{
+ int rc;
+ char *devname = "/dev/crypto/nx-gzip";
+ struct nx_handle *nxhandle;
+
+ if (function != NX_FUNC_COMP_GZIP) {
+ errno = EINVAL;
+ fprintf(stderr, " NX_FUNC_COMP_GZIP not found\n");
+ return NULL;
+ }
+
+
+ nxhandle = malloc(sizeof(*nxhandle));
+ if (!nxhandle) {
+ errno = ENOMEM;
+ fprintf(stderr, " No memory\n");
+ return NULL;
+ }
+
+ nxhandle->function = function;
+ rc = open_device_nodes(devname, pri, nxhandle);
+ if (rc < 0) {
+ errno = -rc;
+ fprintf(stderr, " open_device_nodes failed\n");
+ return NULL;
+ }
+
+ return nxhandle;
+}
+
+int nx_function_end(void *handle)
+{
+ int rc = 0;
+ struct nx_handle *nxhandle = handle;
+
+ rc = munmap(nxhandle->paste_addr - 0x400, 4096);
+ if (rc < 0) {
+ fprintf(stderr, "munmap() failed, errno %d\n", errno);
+ return rc;
+ }
+ close(nxhandle->fd);
+ free(nxhandle);
+
+ return rc;
+}
+
+static int nx_wait_for_csb(struct nx_gzip_crb_cpb_t *cmdp)
+{
+ long poll = 0;
+ uint64_t t;
+
+ /* Save power and let other threads use the h/w. top may show
+ * 100% but only because OS doesn't know we slowed the this
+ * h/w thread while polling. We're letting other threads have
+ * higher throughput on the core.
+ */
+ cpu_pri_low();
+
+#define CSB_MAX_POLL 200000000UL
+#define USLEEP_TH 300000UL
+
+ t = __ppc_get_timebase();
+
+ while (getnn(cmdp->crb.csb, csb_v) == 0) {
+ ++poll;
+ hwsync();
+
+ cpu_pri_low();
+
+ /* usleep(0) takes around 29000 ticks ~60 us.
+ * 300000 is spinning for about 600 us then
+ * start sleeping.
+ */
+ if ((__ppc_get_timebase() - t) > USLEEP_TH) {
+ cpu_pri_default();
+ usleep(1);
+ }
+
+ if (poll > CSB_MAX_POLL)
+ break;
+
+ /* Fault address from signal handler */
+ if (nx_fault_storage_address) {
+ cpu_pri_default();
+ return -EAGAIN;
+ }
+
+ }
+
+ cpu_pri_default();
+
+ /* hw has updated csb and output buffer */
+ hwsync();
+
+ /* Check CSB flags. */
+ if (getnn(cmdp->crb.csb, csb_v) == 0) {
+ fprintf(stderr, "CSB still not valid after %d polls.\n",
+ (int) poll);
+ prt_err("CSB still not valid after %d polls, giving up.\n",
+ (int) poll);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int nxu_run_job(struct nx_gzip_crb_cpb_t *cmdp, void *handle)
+{
+ int i, ret, retries;
+ struct nx_handle *nxhandle = handle;
+
+ assert(handle != NULL);
+ i = 0;
+ retries = 5000;
+ while (i++ < retries) {
+ hwsync();
+ vas_copy(&cmdp->crb, 0);
+ ret = vas_paste(nxhandle->paste_addr, 0);
+ hwsync();
+
+ NXPRT(fprintf(stderr, "Paste attempt %d/%d returns 0x%x\n",
+ i, retries, ret));
+
+ if ((ret == 2) || (ret == 3)) {
+
+ ret = nx_wait_for_csb(cmdp);
+ if (!ret) {
+ goto out;
+ } else if (ret == -EAGAIN) {
+ long x;
+
+ prt_err("Touching address %p, 0x%lx\n",
+ nx_fault_storage_address,
+ *(long *) nx_fault_storage_address);
+ x = *(long *) nx_fault_storage_address;
+ *(long *) nx_fault_storage_address = x;
+ nx_fault_storage_address = 0;
+ continue;
+ } else {
+ prt_err("wait_for_csb() returns %d\n", ret);
+ break;
+ }
+ } else {
+ if (i < 10) {
+ /* spin for few ticks */
+#define SPIN_TH 500UL
+ uint64_t fail_spin;
+
+ fail_spin = __ppc_get_timebase();
+ while ((__ppc_get_timebase() - fail_spin) <
+ SPIN_TH)
+ ;
+ } else {
+ /* sleep */
+ unsigned int pr = 0;
+
+ if (pr++ % 100 == 0) {
+ prt_err("Paste attempt %d/", i);
+ prt_err("%d, failed pid= %d\n", retries,
+ getpid());
+ }
+ usleep(1);
+ }
+ continue;
+ }
+ }
+
+out:
+ cpu_pri_default();
+
+ return ret;
+}
+
+int nxu_submit_job(struct nx_gzip_crb_cpb_t *cmdp, void *handle)
+{
+ int cc;
+
+ cc = nxu_run_job(cmdp, handle);
+
+ if (!cc)
+ cc = getnn(cmdp->crb.csb, csb_cc); /* CC Table 6-8 */
+
+ return cc;
+}
+
+
+void nxu_sigsegv_handler(int sig, siginfo_t *info, void *ctx)
+{
+ fprintf(stderr, "%d: Got signal %d si_code %d, si_addr %p\n", getpid(),
+ sig, info->si_code, info->si_addr);
+
+ nx_fault_storage_address = info->si_addr;
+}
+
+/*
+ * Fault in pages prior to NX job submission. wr=1 may be required to
+ * touch writeable pages. System zero pages do not fault-in the page as
+ * intended. Typically set wr=1 for NX target pages and set wr=0 for NX
+ * source pages.
+ */
+int nxu_touch_pages(void *buf, long buf_len, long page_len, int wr)
+{
+ char *begin = buf;
+ char *end = (char *) buf + buf_len - 1;
+ volatile char t;
+
+ assert(buf_len >= 0 && !!buf);
+
+ NXPRT(fprintf(stderr, "touch %p %p len 0x%lx wr=%d\n", buf,
+ (buf + buf_len), buf_len, wr));
+
+ if (buf_len <= 0 || buf == NULL)
+ return -1;
+
+ do {
+ t = *begin;
+ if (wr)
+ *begin = t;
+ begin = begin + page_len;
+ } while (begin < end);
+
+ /* When buf_sz is small or buf tail is in another page */
+ t = *end;
+ if (wr)
+ *end = t;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h b/tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h
new file mode 100644
index 000000000000..0db2d6485037
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/* From asm-compat.h */
+#define __stringify_in_c(...) #__VA_ARGS__
+#define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
+
+/*
+ * Macros taken from arch/powerpc/include/asm/ppc-opcode.h and other
+ * header files.
+ */
+#define ___PPC_RA(a) (((a) & 0x1f) << 16)
+#define ___PPC_RB(b) (((b) & 0x1f) << 11)
+
+#define PPC_INST_COPY 0x7c20060c
+#define PPC_INST_PASTE 0x7c20070d
+
+#define PPC_COPY(a, b) stringify_in_c(.long PPC_INST_COPY | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_PASTE(a, b) stringify_in_c(.long PPC_INST_PASTE | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define CR0_SHIFT 28
+#define CR0_MASK 0xF
+/*
+ * Copy/paste instructions:
+ *
+ * copy RA,RB
+ * Copy contents of address (RA) + effective_address(RB)
+ * to internal copy-buffer.
+ *
+ * paste RA,RB
+ * Paste contents of internal copy-buffer to the address
+ * (RA) + effective_address(RB)
+ */
+static inline int vas_copy(void *crb, int offset)
+{
+ asm volatile(PPC_COPY(%0, %1)";"
+ :
+ : "b" (offset), "b" (crb)
+ : "memory");
+
+ return 0;
+}
+
+static inline int vas_paste(void *paste_address, int offset)
+{
+ __u32 cr;
+
+ cr = 0;
+ asm volatile(PPC_PASTE(%1, %2)";"
+ "mfocrf %0, 0x80;"
+ : "=r" (cr)
+ : "b" (offset), "b" (paste_address)
+ : "memory", "cr0");
+
+ return (cr >> CR0_SHIFT) & CR0_MASK;
+}
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/crb.h b/tools/testing/selftests/powerpc/nx-gzip/include/crb.h
new file mode 100644
index 000000000000..ab101085fa7e
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/crb.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __CRB_H
+#define __CRB_H
+#include <linux/types.h>
+#include "nx.h"
+
+/* CCW 842 CI/FC masks
+ * NX P8 workbook, section 4.3.1, figure 4-6
+ * "CI/FC Boundary by NX CT type"
+ */
+#define CCW_CI_842 (0x00003ff8)
+#define CCW_FC_842 (0x00000007)
+
+/* Chapter 6.5.8 Coprocessor-Completion Block (CCB) */
+
+#define CCB_VALUE (0x3fffffffffffffff)
+#define CCB_ADDRESS (0xfffffffffffffff8)
+#define CCB_CM (0x0000000000000007)
+#define CCB_CM0 (0x0000000000000004)
+#define CCB_CM12 (0x0000000000000003)
+
+#define CCB_CM0_ALL_COMPLETIONS (0x0)
+#define CCB_CM0_LAST_IN_CHAIN (0x4)
+#define CCB_CM12_STORE (0x0)
+#define CCB_CM12_INTERRUPT (0x1)
+
+#define CCB_SIZE (0x10)
+#define CCB_ALIGN CCB_SIZE
+
+struct coprocessor_completion_block {
+ __be64 value;
+ __be64 address;
+} __aligned(CCB_ALIGN);
+
+
+/* Chapter 6.5.7 Coprocessor-Status Block (CSB) */
+
+#define CSB_V (0x80)
+#define CSB_F (0x04)
+#define CSB_CH (0x03)
+#define CSB_CE_INCOMPLETE (0x80)
+#define CSB_CE_TERMINATION (0x40)
+#define CSB_CE_TPBC (0x20)
+
+#define CSB_CC_SUCCESS (0)
+#define CSB_CC_INVALID_ALIGN (1)
+#define CSB_CC_OPERAND_OVERLAP (2)
+#define CSB_CC_DATA_LENGTH (3)
+#define CSB_CC_TRANSLATION (5)
+#define CSB_CC_PROTECTION (6)
+#define CSB_CC_RD_EXTERNAL (7)
+#define CSB_CC_INVALID_OPERAND (8)
+#define CSB_CC_PRIVILEGE (9)
+#define CSB_CC_INTERNAL (10)
+#define CSB_CC_WR_EXTERNAL (12)
+#define CSB_CC_NOSPC (13)
+#define CSB_CC_EXCESSIVE_DDE (14)
+#define CSB_CC_WR_TRANSLATION (15)
+#define CSB_CC_WR_PROTECTION (16)
+#define CSB_CC_UNKNOWN_CODE (17)
+#define CSB_CC_ABORT (18)
+#define CSB_CC_TRANSPORT (20)
+#define CSB_CC_SEGMENTED_DDL (31)
+#define CSB_CC_PROGRESS_POINT (32)
+#define CSB_CC_DDE_OVERFLOW (33)
+#define CSB_CC_SESSION (34)
+#define CSB_CC_PROVISION (36)
+#define CSB_CC_CHAIN (37)
+#define CSB_CC_SEQUENCE (38)
+#define CSB_CC_HW (39)
+
+#define CSB_SIZE (0x10)
+#define CSB_ALIGN CSB_SIZE
+
+struct coprocessor_status_block {
+ __u8 flags;
+ __u8 cs;
+ __u8 cc;
+ __u8 ce;
+ __be32 count;
+ __be64 address;
+} __aligned(CSB_ALIGN);
+
+
+/* Chapter 6.5.10 Data-Descriptor List (DDL)
+ * each list contains one or more Data-Descriptor Entries (DDE)
+ */
+
+#define DDE_P (0x8000)
+
+#define DDE_SIZE (0x10)
+#define DDE_ALIGN DDE_SIZE
+
+struct data_descriptor_entry {
+ __be16 flags;
+ __u8 count;
+ __u8 index;
+ __be32 length;
+ __be64 address;
+} __aligned(DDE_ALIGN);
+
+
+/* Chapter 6.5.2 Coprocessor-Request Block (CRB) */
+
+#define CRB_SIZE (0x80)
+#define CRB_ALIGN (0x100) /* Errata: requires 256 alignment */
+
+
+/* Coprocessor Status Block field
+ * ADDRESS address of CSB
+ * C CCB is valid
+ * AT 0 = addrs are virtual, 1 = addrs are phys
+ * M enable perf monitor
+ */
+#define CRB_CSB_ADDRESS (0xfffffffffffffff0)
+#define CRB_CSB_C (0x0000000000000008)
+#define CRB_CSB_AT (0x0000000000000002)
+#define CRB_CSB_M (0x0000000000000001)
+
+struct coprocessor_request_block {
+ __be32 ccw;
+ __be32 flags;
+ __be64 csb_addr;
+
+ struct data_descriptor_entry source;
+ struct data_descriptor_entry target;
+
+ struct coprocessor_completion_block ccb;
+
+ __u8 reserved[48];
+
+ struct coprocessor_status_block csb;
+} __aligned(CRB_ALIGN);
+
+#define crb_csb_addr(c) __be64_to_cpu(c->csb_addr)
+#define crb_nx_fault_addr(c) __be64_to_cpu(c->stamp.nx.fault_storage_addr)
+#define crb_nx_flags(c) c->stamp.nx.flags
+#define crb_nx_fault_status(c) c->stamp.nx.fault_status
+#define crb_nx_pswid(c) c->stamp.nx.pswid
+
+
+/* RFC02167 Initiate Coprocessor Instructions document
+ * Chapter 8.2.1.1.1 RS
+ * Chapter 8.2.3 Coprocessor Directive
+ * Chapter 8.2.4 Execution
+ *
+ * The CCW must be converted to BE before passing to icswx()
+ */
+
+#define CCW_PS (0xff000000)
+#define CCW_CT (0x00ff0000)
+#define CCW_CD (0x0000ffff)
+#define CCW_CL (0x0000c000)
+
+#endif
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/nx.h b/tools/testing/selftests/powerpc/nx-gzip/include/nx.h
new file mode 100644
index 000000000000..1abe23fc29e8
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/nx.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2020 IBM Corp.
+ *
+ */
+#ifndef _NX_H
+#define _NX_H
+
+#include <stdbool.h>
+
+#define NX_FUNC_COMP_842 1
+#define NX_FUNC_COMP_GZIP 2
+
+#ifndef __aligned
+#define __aligned(x) __attribute__((aligned(x)))
+#endif
+
+struct nx842_func_args {
+ bool use_crc;
+ bool decompress; /* true decompress; false compress */
+ bool move_data;
+ int timeout; /* seconds */
+};
+
+struct nxbuf_t {
+ int len;
+ char *buf;
+};
+
+/* @function should be EFT (aka 842), GZIP etc */
+void *nx_function_begin(int function, int pri);
+
+int nx_function(void *handle, struct nxbuf_t *in, struct nxbuf_t *out,
+ void *arg);
+
+int nx_function_end(void *handle);
+
+#endif /* _NX_H */
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h b/tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h
new file mode 100644
index 000000000000..16464e19c47f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2020 IBM Corporation
+ *
+ */
+
+#ifndef _NXU_DBG_H_
+#define _NXU_DBG_H_
+
+#include <sys/file.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <time.h>
+#include <pthread.h>
+
+extern FILE * nx_gzip_log;
+extern int nx_gzip_trace;
+extern unsigned int nx_gzip_inflate_impl;
+extern unsigned int nx_gzip_deflate_impl;
+extern unsigned int nx_gzip_inflate_flags;
+extern unsigned int nx_gzip_deflate_flags;
+
+extern int nx_dbg;
+pthread_mutex_t mutex_log;
+
+#define nx_gzip_trace_enabled() (nx_gzip_trace & 0x1)
+#define nx_gzip_hw_trace_enabled() (nx_gzip_trace & 0x2)
+#define nx_gzip_sw_trace_enabled() (nx_gzip_trace & 0x4)
+#define nx_gzip_gather_statistics() (nx_gzip_trace & 0x8)
+#define nx_gzip_per_stream_stat() (nx_gzip_trace & 0x10)
+
+#define prt(fmt, ...) do { \
+ pthread_mutex_lock(&mutex_log); \
+ flock(nx_gzip_log->_fileno, LOCK_EX); \
+ time_t t; struct tm *m; time(&t); m = localtime(&t); \
+ fprintf(nx_gzip_log, "[%04d/%02d/%02d %02d:%02d:%02d] " \
+ "pid %d: " fmt, \
+ (int)m->tm_year + 1900, (int)m->tm_mon+1, (int)m->tm_mday, \
+ (int)m->tm_hour, (int)m->tm_min, (int)m->tm_sec, \
+ (int)getpid(), ## __VA_ARGS__); \
+ fflush(nx_gzip_log); \
+ flock(nx_gzip_log->_fileno, LOCK_UN); \
+ pthread_mutex_unlock(&mutex_log); \
+} while (0)
+
+/* Use in case of an error */
+#define prt_err(fmt, ...) do { if (nx_dbg >= 0) { \
+ prt("%s:%u: Error: "fmt, \
+ __FILE__, __LINE__, ## __VA_ARGS__); \
+}} while (0)
+
+/* Use in case of an warning */
+#define prt_warn(fmt, ...) do { if (nx_dbg >= 1) { \
+ prt("%s:%u: Warning: "fmt, \
+ __FILE__, __LINE__, ## __VA_ARGS__); \
+}} while (0)
+
+/* Informational printouts */
+#define prt_info(fmt, ...) do { if (nx_dbg >= 2) { \
+ prt("Info: "fmt, ## __VA_ARGS__); \
+}} while (0)
+
+/* Trace zlib wrapper code */
+#define prt_trace(fmt, ...) do { if (nx_gzip_trace_enabled()) { \
+ prt("### "fmt, ## __VA_ARGS__); \
+}} while (0)
+
+/* Trace statistics */
+#define prt_stat(fmt, ...) do { if (nx_gzip_gather_statistics()) { \
+ prt("### "fmt, ## __VA_ARGS__); \
+}} while (0)
+
+/* Trace zlib hardware implementation */
+#define hw_trace(fmt, ...) do { \
+ if (nx_gzip_hw_trace_enabled()) \
+ fprintf(nx_gzip_log, "hhh " fmt, ## __VA_ARGS__); \
+ } while (0)
+
+/* Trace zlib software implementation */
+#define sw_trace(fmt, ...) do { \
+ if (nx_gzip_sw_trace_enabled()) \
+ fprintf(nx_gzip_log, "sss " fmt, ## __VA_ARGS__); \
+ } while (0)
+
+
+/**
+ * str_to_num - Convert string into number and copy with endings like
+ * KiB for kilobyte
+ * MiB for megabyte
+ * GiB for gigabyte
+ */
+uint64_t str_to_num(char *str);
+void nx_lib_debug(int onoff);
+
+#endif /* _NXU_DBG_H_ */
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/nxu.h b/tools/testing/selftests/powerpc/nx-gzip/include/nxu.h
new file mode 100644
index 000000000000..20a4e883e0d3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/nxu.h
@@ -0,0 +1,650 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Hardware interface of the NX-GZIP compression accelerator
+ *
+ * Copyright (C) IBM Corporation, 2020
+ *
+ * Author: Bulent Abali <abali@us.ibm.com>
+ *
+ */
+
+#ifndef _NXU_H
+#define _NXU_H
+
+#include <stdint.h>
+#include <endian.h>
+#include "nx.h"
+
+/* deflate */
+#define LLSZ 286
+#define DSZ 30
+
+/* nx */
+#define DHTSZ 18
+#define DHT_MAXSZ 288
+#define MAX_DDE_COUNT 256
+
+/* util */
+#ifdef NXDBG
+#define NXPRT(X) X
+#else
+#define NXPRT(X)
+#endif
+
+#ifdef NXTIMER
+#include <sys/platform/ppc.h>
+#define NX_CLK(X) X
+#define nx_get_time() __ppc_get_timebase()
+#define nx_get_freq() __ppc_get_timebase_freq()
+#else
+#define NX_CLK(X)
+#define nx_get_time() (-1)
+#define nx_get_freq() (-1)
+#endif
+
+#define NX_MAX_FAULTS 500
+
+/*
+ * Definitions of acronyms used here. See
+ * P9 NX Gzip Accelerator User's Manual for details:
+ * https://github.com/libnxz/power-gzip/blob/develop/doc/power_nx_gzip_um.pdf
+ *
+ * adler/crc: 32 bit checksums appended to stream tail
+ * ce: completion extension
+ * cpb: coprocessor parameter block (metadata)
+ * crb: coprocessor request block (command)
+ * csb: coprocessor status block (status)
+ * dht: dynamic huffman table
+ * dde: data descriptor element (address, length)
+ * ddl: list of ddes
+ * dh/fh: dynamic and fixed huffman types
+ * fc: coprocessor function code
+ * histlen: history/dictionary length
+ * history: sliding window of up to 32KB of data
+ * lzcount: Deflate LZ symbol counts
+ * rembytecnt: remaining byte count
+ * sfbt: source final block type; last block's type during decomp
+ * spbc: source processed byte count
+ * subc: source unprocessed bit count
+ * tebc: target ending bit count; valid bits in the last byte
+ * tpbc: target processed byte count
+ * vas: virtual accelerator switch; the user mode interface
+ */
+
+union nx_qw_t {
+ uint32_t word[4];
+ uint64_t dword[2];
+} __aligned(16);
+
+/*
+ * Note: NX registers with fewer than 32 bits are declared by
+ * convention as uint32_t variables in unions. If *_offset and *_mask
+ * are defined for a variable, then use get_ put_ macros to
+ * conveniently access the register fields for endian conversions.
+ */
+
+struct nx_dde_t {
+ /* Data Descriptor Element, Section 6.4 */
+ union {
+ uint32_t dde_count;
+ /* When dde_count == 0 ddead is a pointer to a data buffer;
+ * ddebc is the buffer length bytes.
+ * When dde_count > 0 dde is an indirect dde; ddead is a
+ * pointer to a contiguous list of direct ddes; ddebc is the
+ * total length of all data pointed to by the list of direct
+ * ddes. Note that only one level of indirection is permitted.
+ * See Section 6.4 of the user manual for additional details.
+ */
+ };
+ uint32_t ddebc; /* dde byte count */
+ uint64_t ddead; /* dde address */
+} __aligned(16);
+
+struct nx_csb_t {
+ /* Coprocessor Status Block, Section 6.6 */
+ union {
+ uint32_t csb_v;
+ /* Valid bit. v must be set to 0 by the program
+ * before submitting the coprocessor command.
+ * Software can poll for the v bit
+ */
+
+ uint32_t csb_f;
+ /* 16B CSB size. Written to 0 by DMA when it writes the CPB */
+
+ uint32_t csb_cs;
+ /* cs completion sequence; unused */
+
+ uint32_t csb_cc;
+ /* cc completion code; cc != 0 exception occurred */
+
+ uint32_t csb_ce;
+ /* ce completion extension */
+
+ };
+ uint32_t tpbc;
+ /* target processed byte count TPBC */
+
+ uint64_t fsaddr;
+ /* Section 6.12.1 CSB NonZero error summary. FSA Failing storage
+ * address. Address where error occurred. When available, written
+ * to A field of CSB
+ */
+} __aligned(16);
+
+struct nx_ccb_t {
+ /* Coprocessor Completion Block, Section 6.7 */
+
+ uint32_t reserved[3];
+ union {
+ /* When crb.c==0 (no ccb defined) it is reserved;
+ * When crb.c==1 (ccb defined) it is cm
+ */
+
+ uint32_t ccb_cm;
+ /* Signal interrupt of crb.c==1 and cm==1 */
+
+ uint32_t word;
+ /* generic access to the 32bit word */
+ };
+} __aligned(16);
+
+struct vas_stamped_crb_t {
+ /*
+ * CRB operand of the paste coprocessor instruction is stamped
+ * in quadword 4 with the information shown here as its written
+ * in to the receive FIFO of the coprocessor
+ */
+
+ union {
+ uint32_t vas_buf_num;
+ /* Verification only vas buffer number which correlates to
+ * the low order bits of the atag in the paste command
+ */
+
+ uint32_t send_wc_id;
+ /* Pointer to Send Window Context that provides for NX address
+ * translation information, such as MSR and LPCR bits, job
+ * completion interrupt RA, PSWID, and job utilization counter.
+ */
+
+ };
+ union {
+ uint32_t recv_wc_id;
+ /* Pointer to Receive Window Context. NX uses this to return
+ * credits to a Receive FIFO as entries are dequeued.
+ */
+
+ };
+ uint32_t reserved2;
+ union {
+ uint32_t vas_invalid;
+ /* Invalid bit. If this bit is 1 the CRB is discarded by
+ * NX upon fetching from the receive FIFO. If this bit is 0
+ * the CRB is processed normally. The bit is stamped to 0
+ * by VAS and may be written to 1 by hypervisor while
+ * the CRB is in the receive FIFO (in memory).
+ */
+
+ };
+};
+
+struct nx_stamped_fault_crb_t {
+ /*
+ * A CRB that has a translation fault is stamped by NX in quadword 4
+ * and pasted to the Fault Send Window in VAS.
+ */
+ uint64_t fsa;
+ union {
+ uint32_t nxsf_t;
+ uint32_t nxsf_fs;
+ };
+ uint32_t pswid;
+};
+
+union stamped_crb_t {
+ struct vas_stamped_crb_t vas;
+ struct nx_stamped_fault_crb_t nx;
+};
+
+struct nx_gzip_cpb_t {
+ /*
+ * Coprocessor Parameter Block In/Out are used to pass metadata
+ * to/from accelerator. Tables 6.5 and 6.6 of the user manual.
+ */
+
+ /* CPBInput */
+
+ struct {
+ union {
+ union nx_qw_t qw0;
+ struct {
+ uint32_t in_adler; /* bits 0:31 */
+ uint32_t in_crc; /* bits 32:63 */
+ union {
+ uint32_t in_histlen; /* bits 64:75 */
+ uint32_t in_subc; /* bits 93:95 */
+ };
+ union {
+ /* bits 108:111 */
+ uint32_t in_sfbt;
+ /* bits 112:127 */
+ uint32_t in_rembytecnt;
+ /* bits 116:127 */
+ uint32_t in_dhtlen;
+ };
+ };
+ };
+ union {
+ union nx_qw_t in_dht[DHTSZ]; /* qw[1:18] */
+ char in_dht_char[DHT_MAXSZ]; /* byte access */
+ };
+ union nx_qw_t reserved[5]; /* qw[19:23] */
+ };
+
+ /* CPBOutput */
+
+ volatile struct {
+ union {
+ union nx_qw_t qw24;
+ struct {
+ uint32_t out_adler; /* bits 0:31 qw[24] */
+ uint32_t out_crc; /* bits 32:63 qw[24] */
+ union {
+ /* bits 77:79 qw[24] */
+ uint32_t out_tebc;
+ /* bits 80:95 qw[24] */
+ uint32_t out_subc;
+ };
+ union {
+ /* bits 108:111 qw[24] */
+ uint32_t out_sfbt;
+ /* bits 112:127 qw[24] */
+ uint32_t out_rembytecnt;
+ /* bits 116:127 qw[24] */
+ uint32_t out_dhtlen;
+ };
+ };
+ };
+ union {
+ union nx_qw_t qw25[79]; /* qw[25:103] */
+ /* qw[25] compress no lzcounts or wrap */
+ uint32_t out_spbc_comp_wrap;
+ uint32_t out_spbc_wrap; /* qw[25] wrap */
+ /* qw[25] compress no lzcounts */
+ uint32_t out_spbc_comp;
+ /* 286 LL and 30 D symbol counts */
+ uint32_t out_lzcount[LLSZ+DSZ];
+ struct {
+ union nx_qw_t out_dht[DHTSZ]; /* qw[25:42] */
+ /* qw[43] decompress */
+ uint32_t out_spbc_decomp;
+ };
+ };
+ /* qw[104] compress with lzcounts */
+ uint32_t out_spbc_comp_with_count;
+ };
+} __aligned(128);
+
+struct nx_gzip_crb_t {
+ union { /* byte[0:3] */
+ uint32_t gzip_fc; /* bits[24-31] */
+ };
+ uint32_t reserved1; /* byte[4:7] */
+ union {
+ uint64_t csb_address; /* byte[8:15] */
+ struct {
+ uint32_t reserved2;
+ union {
+ uint32_t crb_c;
+ /* c==0 no ccb defined */
+
+ uint32_t crb_at;
+ /* at==0 address type is ignored;
+ * all addrs effective assumed.
+ */
+
+ };
+ };
+ };
+ struct nx_dde_t source_dde; /* byte[16:31] */
+ struct nx_dde_t target_dde; /* byte[32:47] */
+ volatile struct nx_ccb_t ccb; /* byte[48:63] */
+ volatile union {
+ /* byte[64:239] shift csb by 128 bytes out of the crb; csb was
+ * in crb earlier; JReilly says csb written with partial inject
+ */
+ union nx_qw_t reserved64[11];
+ union stamped_crb_t stamp; /* byte[64:79] */
+ };
+ volatile struct nx_csb_t csb;
+} __aligned(128);
+
+struct nx_gzip_crb_cpb_t {
+ struct nx_gzip_crb_t crb;
+ struct nx_gzip_cpb_t cpb;
+} __aligned(2048);
+
+
+/*
+ * NX hardware convention has the msb bit on the left numbered 0.
+ * The defines below has *_offset defined as the right most bit
+ * position of a field. x of size_mask(x) is the field width in bits.
+ */
+
+#define size_mask(x) ((1U<<(x))-1)
+
+/*
+ * Offsets and Widths within the containing 32 bits of the various NX
+ * gzip hardware registers. Use the getnn/putnn macros to access
+ * these regs
+ */
+
+#define dde_count_mask size_mask(8)
+#define dde_count_offset 23
+
+/* CSB */
+
+#define csb_v_mask size_mask(1)
+#define csb_v_offset 0
+#define csb_f_mask size_mask(1)
+#define csb_f_offset 6
+#define csb_cs_mask size_mask(8)
+#define csb_cs_offset 15
+#define csb_cc_mask size_mask(8)
+#define csb_cc_offset 23
+#define csb_ce_mask size_mask(8)
+#define csb_ce_offset 31
+
+/* CCB */
+
+#define ccb_cm_mask size_mask(3)
+#define ccb_cm_offset 31
+
+/* VAS stamped CRB fields */
+
+#define vas_buf_num_mask size_mask(6)
+#define vas_buf_num_offset 5
+#define send_wc_id_mask size_mask(16)
+#define send_wc_id_offset 31
+#define recv_wc_id_mask size_mask(16)
+#define recv_wc_id_offset 31
+#define vas_invalid_mask size_mask(1)
+#define vas_invalid_offset 31
+
+/* NX stamped fault CRB fields */
+
+#define nxsf_t_mask size_mask(1)
+#define nxsf_t_offset 23
+#define nxsf_fs_mask size_mask(8)
+#define nxsf_fs_offset 31
+
+/* CPB input */
+
+#define in_histlen_mask size_mask(12)
+#define in_histlen_offset 11
+#define in_dhtlen_mask size_mask(12)
+#define in_dhtlen_offset 31
+#define in_subc_mask size_mask(3)
+#define in_subc_offset 31
+#define in_sfbt_mask size_mask(4)
+#define in_sfbt_offset 15
+#define in_rembytecnt_mask size_mask(16)
+#define in_rembytecnt_offset 31
+
+/* CPB output */
+
+#define out_tebc_mask size_mask(3)
+#define out_tebc_offset 15
+#define out_subc_mask size_mask(16)
+#define out_subc_offset 31
+#define out_sfbt_mask size_mask(4)
+#define out_sfbt_offset 15
+#define out_rembytecnt_mask size_mask(16)
+#define out_rembytecnt_offset 31
+#define out_dhtlen_mask size_mask(12)
+#define out_dhtlen_offset 31
+
+/* CRB */
+
+#define gzip_fc_mask size_mask(8)
+#define gzip_fc_offset 31
+#define crb_c_mask size_mask(1)
+#define crb_c_offset 28
+#define crb_at_mask size_mask(1)
+#define crb_at_offset 30
+#define csb_address_mask ~(15UL) /* mask off bottom 4b */
+
+/*
+ * Access macros for the registers. Do not access registers directly
+ * because of the endian conversion. P9 processor may run either as
+ * Little or Big endian. However the NX coprocessor regs are always
+ * big endian.
+ * Use the 32 and 64b macros to access respective
+ * register sizes.
+ * Use nn forms for the register fields shorter than 32 bits.
+ */
+
+#define getnn(ST, REG) ((be32toh(ST.REG) >> (31-REG##_offset)) \
+ & REG##_mask)
+#define getpnn(ST, REG) ((be32toh((ST)->REG) >> (31-REG##_offset)) \
+ & REG##_mask)
+#define get32(ST, REG) (be32toh(ST.REG))
+#define getp32(ST, REG) (be32toh((ST)->REG))
+#define get64(ST, REG) (be64toh(ST.REG))
+#define getp64(ST, REG) (be64toh((ST)->REG))
+
+#define unget32(ST, REG) (get32(ST, REG) & ~((REG##_mask) \
+ << (31-REG##_offset)))
+/* get 32bits less the REG field */
+
+#define ungetp32(ST, REG) (getp32(ST, REG) & ~((REG##_mask) \
+ << (31-REG##_offset)))
+/* get 32bits less the REG field */
+
+#define clear_regs(ST) memset((void *)(&(ST)), 0, sizeof(ST))
+#define clear_dde(ST) do { ST.dde_count = ST.ddebc = 0; ST.ddead = 0; \
+ } while (0)
+#define clearp_dde(ST) do { (ST)->dde_count = (ST)->ddebc = 0; \
+ (ST)->ddead = 0; \
+ } while (0)
+#define clear_struct(ST) memset((void *)(&(ST)), 0, sizeof(ST))
+#define putnn(ST, REG, X) (ST.REG = htobe32(unget32(ST, REG) | (((X) \
+ & REG##_mask) << (31-REG##_offset))))
+#define putpnn(ST, REG, X) ((ST)->REG = htobe32(ungetp32(ST, REG) \
+ | (((X) & REG##_mask) << (31-REG##_offset))))
+
+#define put32(ST, REG, X) (ST.REG = htobe32(X))
+#define putp32(ST, REG, X) ((ST)->REG = htobe32(X))
+#define put64(ST, REG, X) (ST.REG = htobe64(X))
+#define putp64(ST, REG, X) ((ST)->REG = htobe64(X))
+
+/*
+ * Completion extension ce(0) ce(1) ce(2). Bits ce(3-7)
+ * unused. Section 6.6 Figure 6.7.
+ */
+
+#define get_csb_ce(ST) ((uint32_t)getnn(ST, csb_ce))
+#define get_csb_ce_ms3b(ST) (get_csb_ce(ST) >> 5)
+#define put_csb_ce_ms3b(ST, X) putnn(ST, csb_ce, ((uint32_t)(X) << 5))
+
+#define CSB_CE_PARTIAL 0x4
+#define CSB_CE_TERMINATE 0x2
+#define CSB_CE_TPBC_VALID 0x1
+
+#define csb_ce_termination(X) (!!((X) & CSB_CE_TERMINATE))
+/* termination, output buffers may be modified, SPBC/TPBC invalid Fig.6-7 */
+
+#define csb_ce_check_completion(X) (!csb_ce_termination(X))
+/* if not terminated then check full or partial completion */
+
+#define csb_ce_partial_completion(X) (!!((X) & CSB_CE_PARTIAL))
+#define csb_ce_full_completion(X) (!csb_ce_partial_completion(X))
+#define csb_ce_tpbc_valid(X) (!!((X) & CSB_CE_TPBC_VALID))
+/* TPBC indicates successfully stored data count */
+
+#define csb_ce_default_err(X) csb_ce_termination(X)
+/* most error CEs have CE(0)=0 and CE(1)=1 */
+
+#define csb_ce_cc3_partial(X) csb_ce_partial_completion(X)
+/* some CC=3 are partially completed, Table 6-8 */
+
+#define csb_ce_cc64(X) ((X)&(CSB_CE_PARTIAL \
+ | CSB_CE_TERMINATE) == 0)
+/* Compression: when TPBC>SPBC then CC=64 Table 6-8; target didn't
+ * compress smaller than source.
+ */
+
+/* Decompress SFBT combinations Tables 5-3, 6-4, 6-6 */
+
+#define SFBT_BFINAL 0x1
+#define SFBT_LIT 0x4
+#define SFBT_FHT 0x5
+#define SFBT_DHT 0x6
+#define SFBT_HDR 0x7
+
+/*
+ * NX gzip function codes. Table 6.2.
+ * Bits 0:4 are the FC. Bit 5 is used by the DMA controller to
+ * select one of the two Byte Count Limits.
+ */
+
+#define GZIP_FC_LIMIT_MASK 0x01
+#define GZIP_FC_COMPRESS_FHT 0x00
+#define GZIP_FC_COMPRESS_DHT 0x02
+#define GZIP_FC_COMPRESS_FHT_COUNT 0x04
+#define GZIP_FC_COMPRESS_DHT_COUNT 0x06
+#define GZIP_FC_COMPRESS_RESUME_FHT 0x08
+#define GZIP_FC_COMPRESS_RESUME_DHT 0x0a
+#define GZIP_FC_COMPRESS_RESUME_FHT_COUNT 0x0c
+#define GZIP_FC_COMPRESS_RESUME_DHT_COUNT 0x0e
+#define GZIP_FC_DECOMPRESS 0x10
+#define GZIP_FC_DECOMPRESS_SINGLE_BLK_N_SUSPEND 0x12
+#define GZIP_FC_DECOMPRESS_RESUME 0x14
+#define GZIP_FC_DECOMPRESS_RESUME_SINGLE_BLK_N_SUSPEND 0x16
+#define GZIP_FC_WRAP 0x1e
+
+#define fc_is_compress(fc) (((fc) & 0x10) == 0)
+#define fc_has_count(fc) (fc_is_compress(fc) && (((fc) & 0x4) != 0))
+
+/* CSB.CC Error codes */
+
+#define ERR_NX_OK 0
+#define ERR_NX_ALIGNMENT 1
+#define ERR_NX_OPOVERLAP 2
+#define ERR_NX_DATA_LENGTH 3
+#define ERR_NX_TRANSLATION 5
+#define ERR_NX_PROTECTION 6
+#define ERR_NX_EXTERNAL_UE7 7
+#define ERR_NX_INVALID_OP 8
+#define ERR_NX_PRIVILEGE 9
+#define ERR_NX_INTERNAL_UE 10
+#define ERR_NX_EXTERN_UE_WR 12
+#define ERR_NX_TARGET_SPACE 13
+#define ERR_NX_EXCESSIVE_DDE 14
+#define ERR_NX_TRANSL_WR 15
+#define ERR_NX_PROTECT_WR 16
+#define ERR_NX_SUBFUNCTION 17
+#define ERR_NX_FUNC_ABORT 18
+#define ERR_NX_BYTE_MAX 19
+#define ERR_NX_CORRUPT_CRB 20
+#define ERR_NX_INVALID_CRB 21
+#define ERR_NX_INVALID_DDE 30
+#define ERR_NX_SEGMENTED_DDL 31
+#define ERR_NX_DDE_OVERFLOW 33
+#define ERR_NX_TPBC_GT_SPBC 64
+#define ERR_NX_MISSING_CODE 66
+#define ERR_NX_INVALID_DIST 67
+#define ERR_NX_INVALID_DHT 68
+#define ERR_NX_EXTERNAL_UE90 90
+#define ERR_NX_WDOG_TIMER 224
+#define ERR_NX_AT_FAULT 250
+#define ERR_NX_INTR_SERVER 252
+#define ERR_NX_UE253 253
+#define ERR_NX_NO_HW 254
+#define ERR_NX_HUNG_OP 255
+#define ERR_NX_END 256
+
+/* initial values for non-resume operations */
+#define INIT_CRC 0 /* crc32(0L, Z_NULL, 0) */
+#define INIT_ADLER 1 /* adler32(0L, Z_NULL, 0) adler is initialized to 1 */
+
+/* prototypes */
+int nxu_submit_job(struct nx_gzip_crb_cpb_t *c, void *handle);
+
+extern void nxu_sigsegv_handler(int sig, siginfo_t *info, void *ctx);
+extern int nxu_touch_pages(void *buf, long buf_len, long page_len, int wr);
+
+/* caller supplies a print buffer 4*sizeof(crb) */
+
+char *nx_crb_str(struct nx_gzip_crb_t *crb, char *prbuf);
+char *nx_cpb_str(struct nx_gzip_cpb_t *cpb, char *prbuf);
+char *nx_prt_hex(void *cp, int sz, char *prbuf);
+char *nx_lzcount_str(struct nx_gzip_cpb_t *cpb, char *prbuf);
+char *nx_strerror(int e);
+
+#ifdef NX_SIM
+#include <stdio.h>
+int nx_sim_init(void *ctx);
+int nx_sim_end(void *ctx);
+int nxu_run_sim_job(struct nx_gzip_crb_cpb_t *c, void *ctx);
+#endif /* NX_SIM */
+
+/* Deflate stream manipulation */
+
+#define set_final_bit(x) (x |= (unsigned char)1)
+#define clr_final_bit(x) (x &= ~(unsigned char)1)
+
+#define append_empty_fh_blk(p, b) do { *(p) = (2 | (1&(b))); *((p)+1) = 0; \
+ } while (0)
+/* append 10 bits 0000001b 00...... ;
+ * assumes appending starts on a byte boundary; b is the final bit.
+ */
+
+
+#ifdef NX_842
+
+/* 842 Engine */
+
+struct nx_eft_crb_t {
+ union { /* byte[0:3] */
+ uint32_t eft_fc; /* bits[29-31] */
+ };
+ uint32_t reserved1; /* byte[4:7] */
+ union {
+ uint64_t csb_address; /* byte[8:15] */
+ struct {
+ uint32_t reserved2;
+ union {
+ uint32_t crb_c;
+ /* c==0 no ccb defined */
+
+ uint32_t crb_at;
+ /* at==0 address type is ignored;
+ * all addrs effective assumed.
+ */
+
+ };
+ };
+ };
+ struct nx_dde_t source_dde; /* byte[16:31] */
+ struct nx_dde_t target_dde; /* byte[32:47] */
+ struct nx_ccb_t ccb; /* byte[48:63] */
+ union {
+ union nx_qw_t reserved64[3]; /* byte[64:96] */
+ };
+ struct nx_csb_t csb;
+} __aligned(128);
+
+/* 842 CRB */
+
+#define EFT_FC_MASK size_mask(3)
+#define EFT_FC_OFFSET 31
+#define EFT_FC_COMPRESS 0x0
+#define EFT_FC_COMPRESS_WITH_CRC 0x1
+#define EFT_FC_DECOMPRESS 0x2
+#define EFT_FC_DECOMPRESS_WITH_CRC 0x3
+#define EFT_FC_BLK_DATA_MOVE 0x4
+#endif /* NX_842 */
+
+#endif /* _NXU_H */
diff --git a/tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h b/tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h
new file mode 120000
index 000000000000..77fb4c7236d0
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h
@@ -0,0 +1 @@
+../../../../../../arch/powerpc/include/uapi/asm/vas-api.h \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh b/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh
new file mode 100755
index 000000000000..c7b46c5fd7b3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+if [[ ! -w /dev/crypto/nx-gzip ]]; then
+ echo "Can't access /dev/crypto/nx-gzip, skipping"
+ echo "skip: $0"
+ exit 4
+fi
+
+set -e
+
+function cleanup
+{
+ rm -f nx-tempfile*
+}
+
+trap cleanup EXIT
+
+function test_sizes
+{
+ local n=$1
+ local fname="nx-tempfile.$n"
+
+ for size in 4K 64K 1M 64M
+ do
+ echo "Testing $size ($n) ..."
+ dd if=/dev/urandom of=$fname bs=$size count=1
+ ./gzfht_test $fname
+ ./gunz_test ${fname}.nx.gz
+ done
+}
+
+echo "Doing basic test of different sizes ..."
+test_sizes 0
+
+echo "Running tests in parallel ..."
+for i in {1..16}
+do
+ test_sizes $i &
+done
+
+wait
+
+echo "OK"
+
+exit 0
diff --git a/tools/testing/selftests/powerpc/pmu/.gitignore b/tools/testing/selftests/powerpc/pmu/.gitignore
index ff7896903d7b..f69b1e2641a1 100644
--- a/tools/testing/selftests/powerpc/pmu/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/.gitignore
@@ -2,3 +2,4 @@
count_instructions
l3_bank_test
per_event_excludes
+count_stcx_fail
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index 19046db995fe..904672fb78dd 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -2,7 +2,7 @@
noarg:
$(MAKE) -C ../
-TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes
+TEST_GEN_PROGS := count_instructions count_stcx_fail l3_bank_test per_event_excludes
EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
top_srcdir = ../../../../..
@@ -13,8 +13,12 @@ all: $(TEST_GEN_PROGS) ebb
$(TEST_GEN_PROGS): $(EXTRA_SOURCES)
# loop.S can only be built 64-bit
+$(OUTPUT)/count_instructions: CFLAGS += -m64
$(OUTPUT)/count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES)
- $(CC) $(CFLAGS) -m64 -o $@ $^
+
+$(OUTPUT)/count_stcx_fail: CFLAGS += -m64
+$(OUTPUT)/count_stcx_fail: loop.S $(EXTRA_SOURCES)
+
$(OUTPUT)/per_event_excludes: ../utils.c
diff --git a/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c b/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c
new file mode 100644
index 000000000000..7b4ac4537702
--- /dev/null
+++ b/tools/testing/selftests/powerpc/pmu/count_stcx_fail.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/prctl.h>
+
+#include "event.h"
+#include "utils.h"
+#include "lib.h"
+
+extern void thirty_two_instruction_loop_with_ll_sc(u64 loops, u64 *ll_sc_target);
+
+static void setup_event(struct event *e, u64 config, int type, char *name)
+{
+ event_init_opts(e, config, type, name);
+
+ e->attr.disabled = 1;
+ e->attr.exclude_kernel = 1;
+ e->attr.exclude_hv = 1;
+ e->attr.exclude_idle = 1;
+}
+
+static int do_count_loop(struct event *events, u64 instructions,
+ u64 overhead, bool report)
+{
+ s64 difference, expected;
+ double percentage;
+ u64 dummy;
+
+ prctl(PR_TASK_PERF_EVENTS_ENABLE);
+
+ /* Run for 1M instructions */
+ thirty_two_instruction_loop_with_ll_sc(instructions >> 5, &dummy);
+
+ prctl(PR_TASK_PERF_EVENTS_DISABLE);
+
+ event_read(&events[0]);
+ event_read(&events[1]);
+ event_read(&events[2]);
+
+ expected = instructions + overhead + (events[2].result.value * 10);
+ difference = events[0].result.value - expected;
+ percentage = (double)difference / events[0].result.value * 100;
+
+ if (report) {
+ printf("-----\n");
+ event_report(&events[0]);
+ event_report(&events[1]);
+ event_report(&events[2]);
+
+ printf("Looped for %llu instructions, overhead %llu\n", instructions, overhead);
+ printf("Expected %llu\n", expected);
+ printf("Actual %llu\n", events[0].result.value);
+ printf("Delta %lld, %f%%\n", difference, percentage);
+ }
+
+ event_reset(&events[0]);
+ event_reset(&events[1]);
+ event_reset(&events[2]);
+
+ if (difference < 0)
+ difference = -difference;
+
+ /* Tolerate a difference below 0.0001 % */
+ difference *= 10000 * 100;
+ if (difference / events[0].result.value)
+ return -1;
+
+ return 0;
+}
+
+/* Count how many instructions it takes to do a null loop */
+static u64 determine_overhead(struct event *events)
+{
+ u64 current, overhead;
+ int i;
+
+ do_count_loop(events, 0, 0, false);
+ overhead = events[0].result.value;
+
+ for (i = 0; i < 100; i++) {
+ do_count_loop(events, 0, 0, false);
+ current = events[0].result.value;
+ if (current < overhead) {
+ printf("Replacing overhead %llu with %llu\n", overhead, current);
+ overhead = current;
+ }
+ }
+
+ return overhead;
+}
+
+#define PM_MRK_STCX_FAIL 0x03e158
+#define PM_STCX_FAIL 0x01e058
+
+static int test_body(void)
+{
+ struct event events[3];
+ u64 overhead;
+
+ setup_event(&events[0], PERF_COUNT_HW_INSTRUCTIONS, PERF_TYPE_HARDWARE, "instructions");
+ setup_event(&events[1], PERF_COUNT_HW_CPU_CYCLES, PERF_TYPE_HARDWARE, "cycles");
+ setup_event(&events[2], PM_STCX_FAIL, PERF_TYPE_RAW, "stcx_fail");
+
+ if (event_open(&events[0])) {
+ perror("perf_event_open");
+ return -1;
+ }
+
+ if (event_open_with_group(&events[1], events[0].fd)) {
+ perror("perf_event_open");
+ return -1;
+ }
+
+ if (event_open_with_group(&events[2], events[0].fd)) {
+ perror("perf_event_open");
+ return -1;
+ }
+
+ overhead = determine_overhead(events);
+ printf("Overhead of null loop: %llu instructions\n", overhead);
+
+ /* Run for 1Mi instructions */
+ FAIL_IF(do_count_loop(events, 1000000, overhead, true));
+
+ /* Run for 10Mi instructions */
+ FAIL_IF(do_count_loop(events, 10000000, overhead, true));
+
+ /* Run for 100Mi instructions */
+ FAIL_IF(do_count_loop(events, 100000000, overhead, true));
+
+ /* Run for 1Bi instructions */
+ FAIL_IF(do_count_loop(events, 1000000000, overhead, true));
+
+ /* Run for 16Bi instructions */
+ FAIL_IF(do_count_loop(events, 16000000000, overhead, true));
+
+ /* Run for 64Bi instructions */
+ FAIL_IF(do_count_loop(events, 64000000000, overhead, true));
+
+ event_close(&events[0]);
+ event_close(&events[1]);
+
+ return 0;
+}
+
+static int count_ll_sc(void)
+{
+ return eat_cpu(test_body);
+}
+
+int main(void)
+{
+ return test_harness(count_ll_sc, "count_ll_sc");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/trace.h b/tools/testing/selftests/powerpc/pmu/ebb/trace.h
index 7c0fb5d2bdb1..da2a3be5441f 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/trace.h
+++ b/tools/testing/selftests/powerpc/pmu/ebb/trace.h
@@ -18,7 +18,7 @@ struct trace_entry
{
u8 type;
u8 length;
- u8 data[0];
+ u8 data[];
};
struct trace_buffer
@@ -26,7 +26,7 @@ struct trace_buffer
u64 size;
bool overflow;
void *tail;
- u8 data[0];
+ u8 data[];
};
struct trace_buffer *trace_buffer_allocate(u64 size);
diff --git a/tools/testing/selftests/powerpc/pmu/loop.S b/tools/testing/selftests/powerpc/pmu/loop.S
index 8cc9b5e2c9de..c52ba09b6fed 100644
--- a/tools/testing/selftests/powerpc/pmu/loop.S
+++ b/tools/testing/selftests/powerpc/pmu/loop.S
@@ -41,3 +41,38 @@ FUNC_START(thirty_two_instruction_loop)
subi r3,r3,1
b FUNC_NAME(thirty_two_instruction_loop)
FUNC_END(thirty_two_instruction_loop)
+
+FUNC_START(thirty_two_instruction_loop_with_ll_sc)
+ cmpdi r3,0
+ beqlr
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1 # 5
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+1: ldarx r6,0,r4 # 10
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1 # 15
+ addi r5,r5,1
+ addi r5,r5,1
+ stdcx. r6,0,r4
+ bne- 1b
+ addi r5,r5,1 # 20
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1 # 25
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1
+ addi r5,r5,1 # 30
+ subi r3,r3,1
+ b FUNC_NAME(thirty_two_instruction_loop_with_ll_sc)
+FUNC_END(thirty_two_instruction_loop_with_ll_sc)
diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile
index 932a032bf036..d6ae54663aed 100644
--- a/tools/testing/selftests/powerpc/signal/Makefile
+++ b/tools/testing/selftests/powerpc/signal/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := signal signal_tm sigfuz sigreturn_vdso
+TEST_GEN_PROGS := signal signal_tm sigfuz sigreturn_vdso sig_sc_double_restart
CFLAGS += -maltivec
$(OUTPUT)/signal_tm: CFLAGS += -mhtm
diff --git a/tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c b/tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c
new file mode 100644
index 000000000000..e3972264615b
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test that a syscall does not get restarted twice, handled by trap_norestart()
+ *
+ * Based on Al's description, and a test for the bug fixed in this commit:
+ *
+ * commit 9a81c16b527528ad307843be5571111aa8d35a80
+ * Author: Al Viro <viro@zeniv.linux.org.uk>
+ * Date: Mon Sep 20 21:48:57 2010 +0100
+ *
+ * powerpc: fix double syscall restarts
+ *
+ * Make sigreturn zero regs->trap, make do_signal() do the same on all
+ * paths. As it is, signal interrupting e.g. read() from fd 512 (==
+ * ERESTARTSYS) with another signal getting unblocked when the first
+ * handler finishes will lead to restart one insn earlier than it ought
+ * to. Same for multiple signals with in-kernel handlers interrupting
+ * that sucker at the same time. Same for multiple signals of any kind
+ * interrupting that sucker on 64bit...
+ */
+#define _GNU_SOURCE
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <signal.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "utils.h"
+
+static void SIGUSR1_handler(int sig)
+{
+ kill(getpid(), SIGUSR2);
+ /*
+ * SIGUSR2 is blocked until the handler exits, at which point it will
+ * be raised again and think there is a restart to be done because the
+ * pending restarted syscall has 512 (ERESTARTSYS) in r3. The second
+ * restart will retreat NIP another 4 bytes to fail case branch.
+ */
+}
+
+static void SIGUSR2_handler(int sig)
+{
+}
+
+static ssize_t raw_read(int fd, void *buf, size_t count)
+{
+ register long nr asm("r0") = __NR_read;
+ register long _fd asm("r3") = fd;
+ register void *_buf asm("r4") = buf;
+ register size_t _count asm("r5") = count;
+
+ asm volatile(
+" b 0f \n"
+" b 1f \n"
+" 0: sc 0 \n"
+" bns 2f \n"
+" neg %0,%0 \n"
+" b 2f \n"
+" 1: \n"
+" li %0,%4 \n"
+" 2: \n"
+ : "+r"(_fd), "+r"(nr), "+r"(_buf), "+r"(_count)
+ : "i"(-ENOANO)
+ : "memory", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "ctr", "cr0");
+
+ if (_fd < 0) {
+ errno = -_fd;
+ _fd = -1;
+ }
+
+ return _fd;
+}
+
+#define DATA "test 123"
+#define DLEN (strlen(DATA)+1)
+
+int test_restart(void)
+{
+ int pipefd[2];
+ pid_t pid;
+ char buf[512];
+
+ if (pipe(pipefd) == -1) {
+ perror("pipe");
+ exit(EXIT_FAILURE);
+ }
+
+ pid = fork();
+ if (pid == -1) {
+ perror("fork");
+ exit(EXIT_FAILURE);
+ }
+
+ if (pid == 0) { /* Child reads from pipe */
+ struct sigaction act;
+ int fd;
+
+ memset(&act, 0, sizeof(act));
+ sigaddset(&act.sa_mask, SIGUSR2);
+ act.sa_handler = SIGUSR1_handler;
+ act.sa_flags = SA_RESTART;
+ if (sigaction(SIGUSR1, &act, NULL) == -1) {
+ perror("sigaction");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(&act, 0, sizeof(act));
+ act.sa_handler = SIGUSR2_handler;
+ act.sa_flags = SA_RESTART;
+ if (sigaction(SIGUSR2, &act, NULL) == -1) {
+ perror("sigaction");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Let's get ERESTARTSYS into r3 */
+ while ((fd = dup(pipefd[0])) != 512) {
+ if (fd == -1) {
+ perror("dup");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (raw_read(fd, buf, 512) == -1) {
+ if (errno == ENOANO) {
+ fprintf(stderr, "Double restart moved restart before sc instruction.\n");
+ _exit(EXIT_FAILURE);
+ }
+ perror("read");
+ exit(EXIT_FAILURE);
+ }
+
+ if (strncmp(buf, DATA, DLEN)) {
+ fprintf(stderr, "bad test string %s\n", buf);
+ exit(EXIT_FAILURE);
+ }
+
+ return 0;
+
+ } else {
+ int wstatus;
+
+ usleep(100000); /* Hack to get reader waiting */
+ kill(pid, SIGUSR1);
+ usleep(100000);
+ if (write(pipefd[1], DATA, DLEN) != DLEN) {
+ perror("write");
+ exit(EXIT_FAILURE);
+ }
+ close(pipefd[0]);
+ close(pipefd[1]);
+ if (wait(&wstatus) == -1) {
+ perror("wait");
+ exit(EXIT_FAILURE);
+ }
+ if (!WIFEXITED(wstatus)) {
+ fprintf(stderr, "child exited abnormally\n");
+ exit(EXIT_FAILURE);
+ }
+
+ FAIL_IF(WEXITSTATUS(wstatus) != EXIT_SUCCESS);
+
+ return 0;
+ }
+}
+
+int main(void)
+{
+ test_harness_set_timeout(10);
+ return test_harness(test_restart, "sig sys restart");
+}
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 4bca5a9327a4..bed4b5318a86 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -2,7 +2,9 @@
/fd-001-lookup
/fd-002-posix-eq
/fd-003-kthread
+/proc-fsconfig-hidepid
/proc-loadavg-001
+/proc-multiple-procfs
/proc-pid-vm
/proc-self-map-files-001
/proc-self-map-files-002
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index a8ed0f684829..8be8a03d2973 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -19,5 +19,7 @@ TEST_GEN_PROGS += self
TEST_GEN_PROGS += setns-dcache
TEST_GEN_PROGS += setns-sysvipc
TEST_GEN_PROGS += thread-self
+TEST_GEN_PROGS += proc-multiple-procfs
+TEST_GEN_PROGS += proc-fsconfig-hidepid
include ../lib.mk
diff --git a/tools/testing/selftests/proc/proc-fsconfig-hidepid.c b/tools/testing/selftests/proc/proc-fsconfig-hidepid.c
new file mode 100644
index 000000000000..b9af8f537185
--- /dev/null
+++ b/tools/testing/selftests/proc/proc-fsconfig-hidepid.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright © 2020 Alexey Gladkov <gladkov.alexey@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <linux/mount.h>
+#include <linux/unistd.h>
+
+static inline int fsopen(const char *fsname, unsigned int flags)
+{
+ return syscall(__NR_fsopen, fsname, flags);
+}
+
+static inline int fsconfig(int fd, unsigned int cmd, const char *key, const void *val, int aux)
+{
+ return syscall(__NR_fsconfig, fd, cmd, key, val, aux);
+}
+
+int main(void)
+{
+ int fsfd, ret;
+ int hidepid = 2;
+
+ assert((fsfd = fsopen("proc", 0)) != -1);
+
+ ret = fsconfig(fsfd, FSCONFIG_SET_BINARY, "hidepid", &hidepid, 0);
+ assert(ret == -1);
+ assert(errno == EINVAL);
+
+ assert(!fsconfig(fsfd, FSCONFIG_SET_STRING, "hidepid", "2", 0));
+ assert(!fsconfig(fsfd, FSCONFIG_SET_STRING, "hidepid", "invisible", 0));
+
+ assert(!close(fsfd));
+
+ return 0;
+}
diff --git a/tools/testing/selftests/proc/proc-multiple-procfs.c b/tools/testing/selftests/proc/proc-multiple-procfs.c
new file mode 100644
index 000000000000..ab912ad95dab
--- /dev/null
+++ b/tools/testing/selftests/proc/proc-multiple-procfs.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright © 2020 Alexey Gladkov <gladkov.alexey@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/mount.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+int main(void)
+{
+ struct stat proc_st1, proc_st2;
+ char procbuff[] = "/tmp/proc.XXXXXX/meminfo";
+ char procdir1[] = "/tmp/proc.XXXXXX";
+ char procdir2[] = "/tmp/proc.XXXXXX";
+
+ assert(mkdtemp(procdir1) != NULL);
+ assert(mkdtemp(procdir2) != NULL);
+
+ assert(!mount("proc", procdir1, "proc", 0, "hidepid=1"));
+ assert(!mount("proc", procdir2, "proc", 0, "hidepid=2"));
+
+ snprintf(procbuff, sizeof(procbuff), "%s/meminfo", procdir1);
+ assert(!stat(procbuff, &proc_st1));
+
+ snprintf(procbuff, sizeof(procbuff), "%s/meminfo", procdir2);
+ assert(!stat(procbuff, &proc_st2));
+
+ umount(procdir1);
+ umount(procdir2);
+
+ assert(proc_st1.st_dev != proc_st2.st_dev);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/sysctl/config b/tools/testing/selftests/sysctl/config
index 6ca14800d755..fc263efd1fad 100644
--- a/tools/testing/selftests/sysctl/config
+++ b/tools/testing/selftests/sysctl/config
@@ -1 +1 @@
-CONFIG_TEST_SYSCTL=y
+CONFIG_TEST_SYSCTL=m
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
index 6a970b127c9b..19515dcb7d04 100755
--- a/tools/testing/selftests/sysctl/sysctl.sh
+++ b/tools/testing/selftests/sysctl/sysctl.sh
@@ -39,16 +39,7 @@ ALL_TESTS="$ALL_TESTS 0003:1:1:int_0002"
ALL_TESTS="$ALL_TESTS 0004:1:1:uint_0001"
ALL_TESTS="$ALL_TESTS 0005:3:1:int_0003"
ALL_TESTS="$ALL_TESTS 0006:50:1:bitmap_0001"
-
-test_modprobe()
-{
- if [ ! -d $DIR ]; then
- echo "$0: $DIR not present" >&2
- echo "You must have the following enabled in your kernel:" >&2
- cat $TEST_DIR/config >&2
- exit $ksft_skip
- fi
-}
+ALL_TESTS="$ALL_TESTS 0007:1:1:boot_int"
function allow_user_defaults()
{
@@ -122,13 +113,15 @@ test_reqs()
function load_req_mod()
{
- if [ ! -d $DIR ]; then
+ if [ ! -d $SYSCTL ]; then
if ! modprobe -q -n $TEST_DRIVER; then
echo "$0: module $TEST_DRIVER not found [SKIP]"
+ echo "You must set CONFIG_TEST_SYSCTL=m in your kernel" >&2
exit $ksft_skip
fi
modprobe $TEST_DRIVER
if [ $? -ne 0 ]; then
+ echo "$0: modprobe $TEST_DRIVER failed."
exit
fi
fi
@@ -752,6 +745,46 @@ sysctl_test_0006()
run_bitmaptest
}
+sysctl_test_0007()
+{
+ TARGET="${SYSCTL}/boot_int"
+ if [ ! -f $TARGET ]; then
+ echo "Skipping test for $TARGET as it is not present ..."
+ return $ksft_skip
+ fi
+
+ if [ -d $DIR ]; then
+ echo "Boot param test only possible sysctl_test is built-in, not module:"
+ cat $TEST_DIR/config >&2
+ return $ksft_skip
+ fi
+
+ echo -n "Testing if $TARGET is set to 1 ..."
+ ORIG=$(cat "${TARGET}")
+
+ if [ x$ORIG = "x1" ]; then
+ echo "ok"
+ return 0
+ fi
+ echo "FAIL"
+ echo "Checking if /proc/cmdline contains setting of the expected parameter ..."
+ if [ ! -f /proc/cmdline ]; then
+ echo "/proc/cmdline does not exist, test inconclusive"
+ return 0
+ fi
+
+ FOUND=$(grep -c "sysctl[./]debug[./]test_sysctl[./]boot_int=1" /proc/cmdline)
+ if [ $FOUND = "1" ]; then
+ echo "Kernel param found but $TARGET is not 1, TEST FAILED"
+ rc=1
+ test_rc
+ fi
+
+ echo "Skipping test, expected kernel parameter missing."
+ echo "To perform this test, make sure kernel is booted with parameter: sysctl.debug.test_sysctl.boot_int=1"
+ return $ksft_skip
+}
+
list_tests()
{
echo "Test ID list:"
@@ -766,6 +799,7 @@ list_tests()
echo "0004 x $(get_test_count 0004) - tests proc_douintvec()"
echo "0005 x $(get_test_count 0005) - tests proc_douintvec() array"
echo "0006 x $(get_test_count 0006) - tests proc_do_large_bitmap()"
+ echo "0007 x $(get_test_count 0007) - tests setting sysctl from kernel boot param"
}
usage()
@@ -929,7 +963,6 @@ test_reqs
allow_user_defaults
check_production_sysctl_writes_strict
load_req_mod
-test_modprobe
trap "test_finish" EXIT
diff --git a/tools/testing/selftests/timens/clock_nanosleep.c b/tools/testing/selftests/timens/clock_nanosleep.c
index 8e7b7c72ef65..72d41b955fb2 100644
--- a/tools/testing/selftests/timens/clock_nanosleep.c
+++ b/tools/testing/selftests/timens/clock_nanosleep.c
@@ -119,7 +119,7 @@ int main(int argc, char *argv[])
ksft_set_plan(4);
- check_config_posix_timers();
+ check_supported_timers();
if (unshare_timens())
return 1;
diff --git a/tools/testing/selftests/timens/timens.c b/tools/testing/selftests/timens/timens.c
index 098be7c83be3..52b6a1185f52 100644
--- a/tools/testing/selftests/timens/timens.c
+++ b/tools/testing/selftests/timens/timens.c
@@ -155,7 +155,7 @@ int main(int argc, char *argv[])
nscheck();
- check_config_posix_timers();
+ check_supported_timers();
ksft_set_plan(ARRAY_SIZE(clocks) * 2);
diff --git a/tools/testing/selftests/timens/timens.h b/tools/testing/selftests/timens/timens.h
index e09e7e39bc52..d4fc52d47146 100644
--- a/tools/testing/selftests/timens/timens.h
+++ b/tools/testing/selftests/timens/timens.h
@@ -14,15 +14,26 @@
#endif
static int config_posix_timers = true;
+static int config_alarm_timers = true;
-static inline void check_config_posix_timers(void)
+static inline void check_supported_timers(void)
{
+ struct timespec ts;
+
if (timer_create(-1, 0, 0) == -1 && errno == ENOSYS)
config_posix_timers = false;
+
+ if (clock_gettime(CLOCK_BOOTTIME_ALARM, &ts) == -1 && errno == EINVAL)
+ config_alarm_timers = false;
}
static inline bool check_skip(int clockid)
{
+ if (!config_alarm_timers && clockid == CLOCK_BOOTTIME_ALARM) {
+ ksft_test_result_skip("CLOCK_BOOTTIME_ALARM isn't supported\n");
+ return true;
+ }
+
if (config_posix_timers)
return false;
diff --git a/tools/testing/selftests/timens/timer.c b/tools/testing/selftests/timens/timer.c
index 96dba11ebe44..5e7f0051bd7b 100644
--- a/tools/testing/selftests/timens/timer.c
+++ b/tools/testing/selftests/timens/timer.c
@@ -22,6 +22,9 @@ int run_test(int clockid, struct timespec now)
timer_t fd;
int i;
+ if (check_skip(clockid))
+ return 0;
+
for (i = 0; i < 2; i++) {
struct sigevent sevp = {.sigev_notify = SIGEV_NONE};
int flags = 0;
@@ -74,6 +77,8 @@ int main(int argc, char *argv[])
nscheck();
+ check_supported_timers();
+
ksft_set_plan(3);
clock_gettime(CLOCK_MONOTONIC, &mtime_now);
diff --git a/tools/testing/selftests/timens/timerfd.c b/tools/testing/selftests/timens/timerfd.c
index eff1ec5ff215..9edd43d6b2c1 100644
--- a/tools/testing/selftests/timens/timerfd.c
+++ b/tools/testing/selftests/timens/timerfd.c
@@ -28,6 +28,9 @@ int run_test(int clockid, struct timespec now)
long long elapsed;
int fd, i;
+ if (check_skip(clockid))
+ return 0;
+
if (tclock_gettime(clockid, &now))
return pr_perror("clock_gettime(%d)", clockid);
@@ -81,6 +84,8 @@ int main(int argc, char *argv[])
nscheck();
+ check_supported_timers();
+
ksft_set_plan(3);
clock_gettime(CLOCK_MONOTONIC, &mtime_now);
diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh
index 8155c2ea7ccb..663062701d5a 100755
--- a/tools/testing/selftests/tpm2/test_smoke.sh
+++ b/tools/testing/selftests/tpm2/test_smoke.sh
@@ -1,6 +1,11 @@
#!/bin/bash
# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+[ -f /dev/tpm0 ] || exit $ksft_skip
+
python -m unittest -v tpm2_tests.SmokeTest
python -m unittest -v tpm2_tests.AsyncTest
diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh
index a6f5e346635e..36c9d030a1c6 100755
--- a/tools/testing/selftests/tpm2/test_space.sh
+++ b/tools/testing/selftests/tpm2/test_space.sh
@@ -1,4 +1,9 @@
#!/bin/bash
# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+[ -f /dev/tpmrm0 ] || exit $ksft_skip
+
python -m unittest -v tpm2_tests.SpaceTest
diff --git a/tools/testing/selftests/vDSO/.gitignore b/tools/testing/selftests/vDSO/.gitignore
index 382cfb39a1a3..5eb64d41e541 100644
--- a/tools/testing/selftests/vDSO/.gitignore
+++ b/tools/testing/selftests/vDSO/.gitignore
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
vdso_test
+vdso_test_gettimeofday
+vdso_test_getcpu
vdso_standalone_test_x86
diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile
index 9e03d61f52fd..0069f2f83f86 100644
--- a/tools/testing/selftests/vDSO/Makefile
+++ b/tools/testing/selftests/vDSO/Makefile
@@ -4,7 +4,7 @@ include ../lib.mk
uname_M := $(shell uname -m 2>/dev/null || echo not)
ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
-TEST_GEN_PROGS := $(OUTPUT)/vdso_test
+TEST_GEN_PROGS := $(OUTPUT)/vdso_test_gettimeofday $(OUTPUT)/vdso_test_getcpu
ifeq ($(ARCH),x86)
TEST_GEN_PROGS += $(OUTPUT)/vdso_standalone_test_x86
endif
@@ -17,7 +17,8 @@ LDLIBS += -lgcc_s
endif
all: $(TEST_GEN_PROGS)
-$(OUTPUT)/vdso_test: parse_vdso.c vdso_test.c
+$(OUTPUT)/vdso_test_gettimeofday: parse_vdso.c vdso_test_gettimeofday.c
+$(OUTPUT)/vdso_test_getcpu: parse_vdso.c vdso_test_getcpu.c
$(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
$(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \
vdso_standalone_test_x86.c parse_vdso.c \
diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c
index 1dbb4b87268f..413f75620a35 100644
--- a/tools/testing/selftests/vDSO/parse_vdso.c
+++ b/tools/testing/selftests/vDSO/parse_vdso.c
@@ -21,29 +21,7 @@
#include <limits.h>
#include <elf.h>
-/*
- * To use this vDSO parser, first call one of the vdso_init_* functions.
- * If you've already parsed auxv, then pass the value of AT_SYSINFO_EHDR
- * to vdso_init_from_sysinfo_ehdr. Otherwise pass auxv to vdso_init_from_auxv.
- * Then call vdso_sym for each symbol you want. For example, to look up
- * gettimeofday on x86_64, use:
- *
- * <some pointer> = vdso_sym("LINUX_2.6", "gettimeofday");
- * or
- * <some pointer> = vdso_sym("LINUX_2.6", "__vdso_gettimeofday");
- *
- * vdso_sym will return 0 if the symbol doesn't exist or if the init function
- * failed or was not called. vdso_sym is a little slow, so its return value
- * should be cached.
- *
- * vdso_sym is threadsafe; the init functions are not.
- *
- * These are the prototypes:
- */
-extern void vdso_init_from_auxv(void *auxv);
-extern void vdso_init_from_sysinfo_ehdr(uintptr_t base);
-extern void *vdso_sym(const char *version, const char *name);
-
+#include "parse_vdso.h"
/* And here's the code. */
#ifndef ELF_BITS
diff --git a/tools/testing/selftests/vDSO/parse_vdso.h b/tools/testing/selftests/vDSO/parse_vdso.h
new file mode 100644
index 000000000000..de0453067d7c
--- /dev/null
+++ b/tools/testing/selftests/vDSO/parse_vdso.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef PARSE_VDSO_H
+#define PARSE_VDSO_H
+
+#include <stdint.h>
+
+/*
+ * To use this vDSO parser, first call one of the vdso_init_* functions.
+ * If you've already parsed auxv, then pass the value of AT_SYSINFO_EHDR
+ * to vdso_init_from_sysinfo_ehdr. Otherwise pass auxv to vdso_init_from_auxv.
+ * Then call vdso_sym for each symbol you want. For example, to look up
+ * gettimeofday on x86_64, use:
+ *
+ * <some pointer> = vdso_sym("LINUX_2.6", "gettimeofday");
+ * or
+ * <some pointer> = vdso_sym("LINUX_2.6", "__vdso_gettimeofday");
+ *
+ * vdso_sym will return 0 if the symbol doesn't exist or if the init function
+ * failed or was not called. vdso_sym is a little slow, so its return value
+ * should be cached.
+ *
+ * vdso_sym is threadsafe; the init functions are not.
+ *
+ * These are the prototypes:
+ */
+void *vdso_sym(const char *version, const char *name);
+void vdso_init_from_sysinfo_ehdr(uintptr_t base);
+void vdso_init_from_auxv(void *auxv);
+
+#endif
diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
index 5ac4b00acfbc..8a44ff973ee1 100644
--- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
@@ -16,9 +16,7 @@
#include <unistd.h>
#include <stdint.h>
-extern void *vdso_sym(const char *version, const char *name);
-extern void vdso_init_from_sysinfo_ehdr(uintptr_t base);
-extern void vdso_init_from_auxv(void *auxv);
+#include "parse_vdso.h"
/* We need a libc functions... */
int strcmp(const char *a, const char *b)
diff --git a/tools/testing/selftests/vDSO/vdso_test.c b/tools/testing/selftests/vDSO/vdso_test.c
deleted file mode 100644
index 719d5a6bd664..000000000000
--- a/tools/testing/selftests/vDSO/vdso_test.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * vdso_test.c: Sample code to test parse_vdso.c
- * Copyright (c) 2014 Andy Lutomirski
- *
- * Compile with:
- * gcc -std=gnu99 vdso_test.c parse_vdso.c
- *
- * Tested on x86, 32-bit and 64-bit. It may work on other architectures, too.
- */
-
-#include <stdint.h>
-#include <elf.h>
-#include <stdio.h>
-#include <sys/auxv.h>
-#include <sys/time.h>
-
-#include "../kselftest.h"
-
-extern void *vdso_sym(const char *version, const char *name);
-extern void vdso_init_from_sysinfo_ehdr(uintptr_t base);
-extern void vdso_init_from_auxv(void *auxv);
-
-/*
- * ARM64's vDSO exports its gettimeofday() implementation with a different
- * name and version from other architectures, so we need to handle it as
- * a special case.
- */
-#if defined(__aarch64__)
-const char *version = "LINUX_2.6.39";
-const char *name = "__kernel_gettimeofday";
-#else
-const char *version = "LINUX_2.6";
-const char *name = "__vdso_gettimeofday";
-#endif
-
-int main(int argc, char **argv)
-{
- unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
- if (!sysinfo_ehdr) {
- printf("AT_SYSINFO_EHDR is not present!\n");
- return KSFT_SKIP;
- }
-
- vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR));
-
- /* Find gettimeofday. */
- typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
- gtod_t gtod = (gtod_t)vdso_sym(version, name);
-
- if (!gtod) {
- printf("Could not find %s\n", name);
- return KSFT_SKIP;
- }
-
- struct timeval tv;
- long ret = gtod(&tv, 0);
-
- if (ret == 0) {
- printf("The time is %lld.%06lld\n",
- (long long)tv.tv_sec, (long long)tv.tv_usec);
- } else {
- printf("%s failed\n", name);
- return KSFT_FAIL;
- }
-
- return 0;
-}
diff --git a/tools/testing/selftests/vDSO/vdso_test_getcpu.c b/tools/testing/selftests/vDSO/vdso_test_getcpu.c
new file mode 100644
index 000000000000..fc25ede131b8
--- /dev/null
+++ b/tools/testing/selftests/vDSO/vdso_test_getcpu.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vdso_test_getcpu.c: Sample code to test parse_vdso.c and vDSO getcpu()
+ *
+ * Copyright (c) 2020 Arm Ltd
+ */
+
+#include <stdint.h>
+#include <elf.h>
+#include <stdio.h>
+#include <sys/auxv.h>
+#include <sys/time.h>
+
+#include "../kselftest.h"
+#include "parse_vdso.h"
+
+const char *version = "LINUX_2.6";
+const char *name = "__vdso_getcpu";
+
+struct getcpu_cache;
+typedef long (*getcpu_t)(unsigned int *, unsigned int *,
+ struct getcpu_cache *);
+
+int main(int argc, char **argv)
+{
+ unsigned long sysinfo_ehdr;
+ unsigned int cpu, node;
+ getcpu_t get_cpu;
+ long ret;
+
+ sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
+ if (!sysinfo_ehdr) {
+ printf("AT_SYSINFO_EHDR is not present!\n");
+ return KSFT_SKIP;
+ }
+
+ vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR));
+
+ get_cpu = (getcpu_t)vdso_sym(version, name);
+ if (!get_cpu) {
+ printf("Could not find %s\n", name);
+ return KSFT_SKIP;
+ }
+
+ ret = get_cpu(&cpu, &node, 0);
+ if (ret == 0) {
+ printf("Running on CPU %u node %u\n", cpu, node);
+ } else {
+ printf("%s failed\n", name);
+ return KSFT_FAIL;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c b/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c
new file mode 100644
index 000000000000..8ccc73ed8240
--- /dev/null
+++ b/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vdso_test_gettimeofday.c: Sample code to test parse_vdso.c and
+ * vDSO gettimeofday()
+ * Copyright (c) 2014 Andy Lutomirski
+ *
+ * Compile with:
+ * gcc -std=gnu99 vdso_test_gettimeofday.c parse_vdso_gettimeofday.c
+ *
+ * Tested on x86, 32-bit and 64-bit. It may work on other architectures, too.
+ */
+
+#include <stdint.h>
+#include <elf.h>
+#include <stdio.h>
+#include <sys/auxv.h>
+#include <sys/time.h>
+
+#include "../kselftest.h"
+#include "parse_vdso.h"
+
+/*
+ * ARM64's vDSO exports its gettimeofday() implementation with a different
+ * name and version from other architectures, so we need to handle it as
+ * a special case.
+ */
+#if defined(__aarch64__)
+const char *version = "LINUX_2.6.39";
+const char *name = "__kernel_gettimeofday";
+#else
+const char *version = "LINUX_2.6";
+const char *name = "__vdso_gettimeofday";
+#endif
+
+int main(int argc, char **argv)
+{
+ unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
+ if (!sysinfo_ehdr) {
+ printf("AT_SYSINFO_EHDR is not present!\n");
+ return KSFT_SKIP;
+ }
+
+ vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR));
+
+ /* Find gettimeofday. */
+ typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
+ gtod_t gtod = (gtod_t)vdso_sym(version, name);
+
+ if (!gtod) {
+ printf("Could not find %s\n", name);
+ return KSFT_SKIP;
+ }
+
+ struct timeval tv;
+ long ret = gtod(&tv, 0);
+
+ if (ret == 0) {
+ printf("The time is %lld.%06lld\n",
+ (long long)tv.tv_sec, (long long)tv.tv_usec);
+ } else {
+ printf("%s failed\n", name);
+ return KSFT_FAIL;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore
index 8df6a074e370..849e8226395a 100644
--- a/tools/testing/selftests/vm/.gitignore
+++ b/tools/testing/selftests/vm/.gitignore
@@ -10,6 +10,7 @@ mlock2-tests
mremap_dontunmap
on-fault-limit
transhuge-stress
+protection_keys
userfaultfd
mlock-intersect-test
mlock-random-test
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 9f18440080ef..a9026706d597 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -22,6 +22,30 @@ TEST_GEN_FILES += transhuge-stress
TEST_GEN_FILES += userfaultfd
TEST_GEN_FILES += khugepaged
+ifeq ($(ARCH),x86_64)
+CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_32bit_program.c -m32)
+CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_64bit_program.c)
+CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_program.c -no-pie)
+
+TARGETS := protection_keys
+BINARIES_32 := $(TARGETS:%=%_32)
+BINARIES_64 := $(TARGETS:%=%_64)
+
+ifeq ($(CAN_BUILD_WITH_NOPIE),1)
+CFLAGS += -no-pie
+endif
+
+ifeq ($(CAN_BUILD_I386),1)
+TEST_GEN_FILES += $(BINARIES_32)
+endif
+
+ifeq ($(CAN_BUILD_X86_64),1)
+TEST_GEN_FILES += $(BINARIES_64)
+endif
+else
+TEST_GEN_FILES += protection_keys
+endif
+
ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64))
TEST_GEN_FILES += va_128TBswitch
TEST_GEN_FILES += virtual_address_range
@@ -37,6 +61,55 @@ include ../lib.mk
$(OUTPUT)/hmm-tests: LDLIBS += -lhugetlbfs -lpthread
+ifeq ($(ARCH),x86_64)
+BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
+BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
+
+define gen-target-rule-32
+$(1) $(1)_32: $(OUTPUT)/$(1)_32
+.PHONY: $(1) $(1)_32
+endef
+
+define gen-target-rule-64
+$(1) $(1)_64: $(OUTPUT)/$(1)_64
+.PHONY: $(1) $(1)_64
+endef
+
+ifeq ($(CAN_BUILD_I386),1)
+$(BINARIES_32): CFLAGS += -m32
+$(BINARIES_32): LDLIBS += -lrt -ldl -lm
+$(BINARIES_32): %_32: %.c
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
+$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t))))
+endif
+
+ifeq ($(CAN_BUILD_X86_64),1)
+$(BINARIES_64): CFLAGS += -m64
+$(BINARIES_64): LDLIBS += -lrt -ldl
+$(BINARIES_64): %_64: %.c
+ $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
+$(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t))))
+endif
+
+# x86_64 users should be encouraged to install 32-bit libraries
+ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),01)
+all: warn_32bit_failure
+
+warn_32bit_failure:
+ @echo "Warning: you seem to have a broken 32-bit build" 2>&1; \
+ echo "environment. This will reduce test coverage of 64-bit" 2>&1; \
+ echo "kernels. If you are using a Debian-like distribution," 2>&1; \
+ echo "try:"; 2>&1; \
+ echo ""; \
+ echo " apt-get install gcc-multilib libc6-i386 libc6-dev-i386"; \
+ echo ""; \
+ echo "If you are using a Fedora-like distribution, try:"; \
+ echo ""; \
+ echo " yum install glibc-devel.*i686"; \
+ exit 0;
+endif
+endif
+
$(OUTPUT)/userfaultfd: LDLIBS += -lpthread
$(OUTPUT)/mlock-random-test: LDLIBS += -lcap
diff --git a/tools/testing/selftests/vm/khugepaged.c b/tools/testing/selftests/vm/khugepaged.c
index 51b89cedd09d..8b75821302a7 100644
--- a/tools/testing/selftests/vm/khugepaged.c
+++ b/tools/testing/selftests/vm/khugepaged.c
@@ -502,7 +502,7 @@ static bool wait_for_scan(const char *msg, char *p)
madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
- return !timeout;
+ return timeout == -1;
}
static void alloc_at_fault(void)
diff --git a/tools/testing/selftests/vm/mremap_dontunmap.c b/tools/testing/selftests/vm/mremap_dontunmap.c
index ee06cb0b9efb..3a7b5ef0b0c6 100644
--- a/tools/testing/selftests/vm/mremap_dontunmap.c
+++ b/tools/testing/selftests/vm/mremap_dontunmap.c
@@ -11,7 +11,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <stdlib.h>
#include <unistd.h>
#include "../kselftest.h"
diff --git a/tools/testing/selftests/vm/pkey-helpers.h b/tools/testing/selftests/vm/pkey-helpers.h
new file mode 100644
index 000000000000..622a85848f61
--- /dev/null
+++ b/tools/testing/selftests/vm/pkey-helpers.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PKEYS_HELPER_H
+#define _PKEYS_HELPER_H
+#define _GNU_SOURCE
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <signal.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <ucontext.h>
+#include <sys/mman.h>
+
+/* Define some kernel-like types */
+#define u8 __u8
+#define u16 __u16
+#define u32 __u32
+#define u64 __u64
+
+#define PTR_ERR_ENOTSUP ((void *)-ENOTSUP)
+
+#ifndef DEBUG_LEVEL
+#define DEBUG_LEVEL 0
+#endif
+#define DPRINT_IN_SIGNAL_BUF_SIZE 4096
+extern int dprint_in_signal;
+extern char dprint_in_signal_buffer[DPRINT_IN_SIGNAL_BUF_SIZE];
+
+extern int test_nr;
+extern int iteration_nr;
+
+#ifdef __GNUC__
+__attribute__((format(printf, 1, 2)))
+#endif
+static inline void sigsafe_printf(const char *format, ...)
+{
+ va_list ap;
+
+ if (!dprint_in_signal) {
+ va_start(ap, format);
+ vprintf(format, ap);
+ va_end(ap);
+ } else {
+ int ret;
+ /*
+ * No printf() functions are signal-safe.
+ * They deadlock easily. Write the format
+ * string to get some output, even if
+ * incomplete.
+ */
+ ret = write(1, format, strlen(format));
+ if (ret < 0)
+ exit(1);
+ }
+}
+#define dprintf_level(level, args...) do { \
+ if (level <= DEBUG_LEVEL) \
+ sigsafe_printf(args); \
+} while (0)
+#define dprintf0(args...) dprintf_level(0, args)
+#define dprintf1(args...) dprintf_level(1, args)
+#define dprintf2(args...) dprintf_level(2, args)
+#define dprintf3(args...) dprintf_level(3, args)
+#define dprintf4(args...) dprintf_level(4, args)
+
+extern void abort_hooks(void);
+#define pkey_assert(condition) do { \
+ if (!(condition)) { \
+ dprintf0("assert() at %s::%d test_nr: %d iteration: %d\n", \
+ __FILE__, __LINE__, \
+ test_nr, iteration_nr); \
+ dprintf0("errno at assert: %d", errno); \
+ abort_hooks(); \
+ exit(__LINE__); \
+ } \
+} while (0)
+
+__attribute__((noinline)) int read_ptr(int *ptr);
+void expected_pkey_fault(int pkey);
+int sys_pkey_alloc(unsigned long flags, unsigned long init_val);
+int sys_pkey_free(unsigned long pkey);
+int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
+ unsigned long pkey);
+void record_pkey_malloc(void *ptr, long size, int prot);
+
+#if defined(__i386__) || defined(__x86_64__) /* arch */
+#include "pkey-x86.h"
+#elif defined(__powerpc64__) /* arch */
+#include "pkey-powerpc.h"
+#else /* arch */
+#error Architecture not supported
+#endif /* arch */
+
+#define PKEY_MASK (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE)
+
+static inline u64 set_pkey_bits(u64 reg, int pkey, u64 flags)
+{
+ u32 shift = pkey_bit_position(pkey);
+ /* mask out bits from pkey in old value */
+ reg &= ~((u64)PKEY_MASK << shift);
+ /* OR in new bits for pkey */
+ reg |= (flags & PKEY_MASK) << shift;
+ return reg;
+}
+
+static inline u64 get_pkey_bits(u64 reg, int pkey)
+{
+ u32 shift = pkey_bit_position(pkey);
+ /*
+ * shift down the relevant bits to the lowest two, then
+ * mask off all the other higher bits
+ */
+ return ((reg >> shift) & PKEY_MASK);
+}
+
+extern u64 shadow_pkey_reg;
+
+static inline u64 _read_pkey_reg(int line)
+{
+ u64 pkey_reg = __read_pkey_reg();
+
+ dprintf4("read_pkey_reg(line=%d) pkey_reg: %016llx"
+ " shadow: %016llx\n",
+ line, pkey_reg, shadow_pkey_reg);
+ assert(pkey_reg == shadow_pkey_reg);
+
+ return pkey_reg;
+}
+
+#define read_pkey_reg() _read_pkey_reg(__LINE__)
+
+static inline void write_pkey_reg(u64 pkey_reg)
+{
+ dprintf4("%s() changing %016llx to %016llx\n", __func__,
+ __read_pkey_reg(), pkey_reg);
+ /* will do the shadow check for us: */
+ read_pkey_reg();
+ __write_pkey_reg(pkey_reg);
+ shadow_pkey_reg = pkey_reg;
+ dprintf4("%s(%016llx) pkey_reg: %016llx\n", __func__,
+ pkey_reg, __read_pkey_reg());
+}
+
+/*
+ * These are technically racy. since something could
+ * change PKEY register between the read and the write.
+ */
+static inline void __pkey_access_allow(int pkey, int do_allow)
+{
+ u64 pkey_reg = read_pkey_reg();
+ int bit = pkey * 2;
+
+ if (do_allow)
+ pkey_reg &= (1<<bit);
+ else
+ pkey_reg |= (1<<bit);
+
+ dprintf4("pkey_reg now: %016llx\n", read_pkey_reg());
+ write_pkey_reg(pkey_reg);
+}
+
+static inline void __pkey_write_allow(int pkey, int do_allow_write)
+{
+ u64 pkey_reg = read_pkey_reg();
+ int bit = pkey * 2 + 1;
+
+ if (do_allow_write)
+ pkey_reg &= (1<<bit);
+ else
+ pkey_reg |= (1<<bit);
+
+ write_pkey_reg(pkey_reg);
+ dprintf4("pkey_reg now: %016llx\n", read_pkey_reg());
+}
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
+#define ALIGN_UP(x, align_to) (((x) + ((align_to)-1)) & ~((align_to)-1))
+#define ALIGN_DOWN(x, align_to) ((x) & ~((align_to)-1))
+#define ALIGN_PTR_UP(p, ptr_align_to) \
+ ((typeof(p))ALIGN_UP((unsigned long)(p), ptr_align_to))
+#define ALIGN_PTR_DOWN(p, ptr_align_to) \
+ ((typeof(p))ALIGN_DOWN((unsigned long)(p), ptr_align_to))
+#define __stringify_1(x...) #x
+#define __stringify(x...) __stringify_1(x)
+
+static inline u32 *siginfo_get_pkey_ptr(siginfo_t *si)
+{
+#ifdef si_pkey
+ return &si->si_pkey;
+#else
+ return (u32 *)(((u8 *)si) + si_pkey_offset);
+#endif
+}
+
+static inline int kernel_has_pkeys(void)
+{
+ /* try allocating a key and see if it succeeds */
+ int ret = sys_pkey_alloc(0, 0);
+ if (ret <= 0) {
+ return 0;
+ }
+ sys_pkey_free(ret);
+ return 1;
+}
+
+static inline int is_pkeys_supported(void)
+{
+ /* check if the cpu supports pkeys */
+ if (!cpu_has_pkeys()) {
+ dprintf1("SKIP: %s: no CPU support\n", __func__);
+ return 0;
+ }
+
+ /* check if the kernel supports pkeys */
+ if (!kernel_has_pkeys()) {
+ dprintf1("SKIP: %s: no kernel support\n", __func__);
+ return 0;
+ }
+
+ return 1;
+}
+
+#endif /* _PKEYS_HELPER_H */
diff --git a/tools/testing/selftests/vm/pkey-powerpc.h b/tools/testing/selftests/vm/pkey-powerpc.h
new file mode 100644
index 000000000000..1ebb586b2fbc
--- /dev/null
+++ b/tools/testing/selftests/vm/pkey-powerpc.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _PKEYS_POWERPC_H
+#define _PKEYS_POWERPC_H
+
+#ifndef SYS_mprotect_key
+# define SYS_mprotect_key 386
+#endif
+#ifndef SYS_pkey_alloc
+# define SYS_pkey_alloc 384
+# define SYS_pkey_free 385
+#endif
+#define REG_IP_IDX PT_NIP
+#define REG_TRAPNO PT_TRAP
+#define gregs gp_regs
+#define fpregs fp_regs
+#define si_pkey_offset 0x20
+
+#undef PKEY_DISABLE_ACCESS
+#define PKEY_DISABLE_ACCESS 0x3 /* disable read and write */
+
+#undef PKEY_DISABLE_WRITE
+#define PKEY_DISABLE_WRITE 0x2
+
+#define NR_PKEYS 32
+#define NR_RESERVED_PKEYS_4K 27 /* pkey-0, pkey-1, exec-only-pkey
+ and 24 other keys that cannot be
+ represented in the PTE */
+#define NR_RESERVED_PKEYS_64K_3KEYS 3 /* PowerNV and KVM: pkey-0,
+ pkey-1 and exec-only key */
+#define NR_RESERVED_PKEYS_64K_4KEYS 4 /* PowerVM: pkey-0, pkey-1,
+ pkey-31 and exec-only key */
+#define PKEY_BITS_PER_PKEY 2
+#define HPAGE_SIZE (1UL << 24)
+#define PAGE_SIZE sysconf(_SC_PAGESIZE)
+
+static inline u32 pkey_bit_position(int pkey)
+{
+ return (NR_PKEYS - pkey - 1) * PKEY_BITS_PER_PKEY;
+}
+
+static inline u64 __read_pkey_reg(void)
+{
+ u64 pkey_reg;
+
+ asm volatile("mfspr %0, 0xd" : "=r" (pkey_reg));
+
+ return pkey_reg;
+}
+
+static inline void __write_pkey_reg(u64 pkey_reg)
+{
+ u64 amr = pkey_reg;
+
+ dprintf4("%s() changing %016llx to %016llx\n",
+ __func__, __read_pkey_reg(), pkey_reg);
+
+ asm volatile("isync; mtspr 0xd, %0; isync"
+ : : "r" ((unsigned long)(amr)) : "memory");
+
+ dprintf4("%s() pkey register after changing %016llx to %016llx\n",
+ __func__, __read_pkey_reg(), pkey_reg);
+}
+
+static inline int cpu_has_pkeys(void)
+{
+ /* No simple way to determine this */
+ return 1;
+}
+
+static inline bool arch_is_powervm()
+{
+ struct stat buf;
+
+ if ((stat("/sys/firmware/devicetree/base/ibm,partition-name", &buf) == 0) &&
+ (stat("/sys/firmware/devicetree/base/hmc-managed?", &buf) == 0) &&
+ (stat("/sys/firmware/devicetree/base/chosen/qemu,graphic-width", &buf) == -1) )
+ return true;
+
+ return false;
+}
+
+static inline int get_arch_reserved_keys(void)
+{
+ if (sysconf(_SC_PAGESIZE) == 4096)
+ return NR_RESERVED_PKEYS_4K;
+ else
+ if (arch_is_powervm())
+ return NR_RESERVED_PKEYS_64K_4KEYS;
+ else
+ return NR_RESERVED_PKEYS_64K_3KEYS;
+}
+
+void expect_fault_on_read_execonly_key(void *p1, int pkey)
+{
+ /*
+ * powerpc does not allow userspace to change permissions of exec-only
+ * keys since those keys are not allocated by userspace. The signal
+ * handler wont be able to reset the permissions, which means the code
+ * will infinitely continue to segfault here.
+ */
+ return;
+}
+
+/* 4-byte instructions * 16384 = 64K page */
+#define __page_o_noops() asm(".rept 16384 ; nop; .endr")
+
+void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey)
+{
+ void *ptr;
+ int ret;
+
+ dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
+ size, prot, pkey);
+ pkey_assert(pkey < NR_PKEYS);
+ ptr = mmap(NULL, size, prot, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ pkey_assert(ptr != (void *)-1);
+
+ ret = syscall(__NR_subpage_prot, ptr, size, NULL);
+ if (ret) {
+ perror("subpage_perm");
+ return PTR_ERR_ENOTSUP;
+ }
+
+ ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey);
+ pkey_assert(!ret);
+ record_pkey_malloc(ptr, size, prot);
+
+ dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr);
+ return ptr;
+}
+
+#endif /* _PKEYS_POWERPC_H */
diff --git a/tools/testing/selftests/vm/pkey-x86.h b/tools/testing/selftests/vm/pkey-x86.h
new file mode 100644
index 000000000000..3be20f5d5275
--- /dev/null
+++ b/tools/testing/selftests/vm/pkey-x86.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _PKEYS_X86_H
+#define _PKEYS_X86_H
+
+#ifdef __i386__
+
+#ifndef SYS_mprotect_key
+# define SYS_mprotect_key 380
+#endif
+
+#ifndef SYS_pkey_alloc
+# define SYS_pkey_alloc 381
+# define SYS_pkey_free 382
+#endif
+
+#define REG_IP_IDX REG_EIP
+#define si_pkey_offset 0x14
+
+#else
+
+#ifndef SYS_mprotect_key
+# define SYS_mprotect_key 329
+#endif
+
+#ifndef SYS_pkey_alloc
+# define SYS_pkey_alloc 330
+# define SYS_pkey_free 331
+#endif
+
+#define REG_IP_IDX REG_RIP
+#define si_pkey_offset 0x20
+
+#endif
+
+#ifndef PKEY_DISABLE_ACCESS
+# define PKEY_DISABLE_ACCESS 0x1
+#endif
+
+#ifndef PKEY_DISABLE_WRITE
+# define PKEY_DISABLE_WRITE 0x2
+#endif
+
+#define NR_PKEYS 16
+#define NR_RESERVED_PKEYS 2 /* pkey-0 and exec-only-pkey */
+#define PKEY_BITS_PER_PKEY 2
+#define HPAGE_SIZE (1UL<<21)
+#define PAGE_SIZE 4096
+#define MB (1<<20)
+
+static inline void __page_o_noops(void)
+{
+ /* 8-bytes of instruction * 512 bytes = 1 page */
+ asm(".rept 512 ; nopl 0x7eeeeeee(%eax) ; .endr");
+}
+
+static inline u64 __read_pkey_reg(void)
+{
+ unsigned int eax, edx;
+ unsigned int ecx = 0;
+ unsigned pkey_reg;
+
+ asm volatile(".byte 0x0f,0x01,0xee\n\t"
+ : "=a" (eax), "=d" (edx)
+ : "c" (ecx));
+ pkey_reg = eax;
+ return pkey_reg;
+}
+
+static inline void __write_pkey_reg(u64 pkey_reg)
+{
+ unsigned int eax = pkey_reg;
+ unsigned int ecx = 0;
+ unsigned int edx = 0;
+
+ dprintf4("%s() changing %016llx to %016llx\n", __func__,
+ __read_pkey_reg(), pkey_reg);
+ asm volatile(".byte 0x0f,0x01,0xef\n\t"
+ : : "a" (eax), "c" (ecx), "d" (edx));
+ assert(pkey_reg == __read_pkey_reg());
+}
+
+static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ /* ecx is often an input as well as an output. */
+ asm volatile(
+ "cpuid;"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (*eax), "2" (*ecx));
+}
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx) */
+#define X86_FEATURE_PKU (1<<3) /* Protection Keys for Userspace */
+#define X86_FEATURE_OSPKE (1<<4) /* OS Protection Keys Enable */
+
+static inline int cpu_has_pkeys(void)
+{
+ unsigned int eax;
+ unsigned int ebx;
+ unsigned int ecx;
+ unsigned int edx;
+
+ eax = 0x7;
+ ecx = 0x0;
+ __cpuid(&eax, &ebx, &ecx, &edx);
+
+ if (!(ecx & X86_FEATURE_PKU)) {
+ dprintf2("cpu does not have PKU\n");
+ return 0;
+ }
+ if (!(ecx & X86_FEATURE_OSPKE)) {
+ dprintf2("cpu does not have OSPKE\n");
+ return 0;
+ }
+ return 1;
+}
+
+static inline u32 pkey_bit_position(int pkey)
+{
+ return pkey * PKEY_BITS_PER_PKEY;
+}
+
+#define XSTATE_PKEY_BIT (9)
+#define XSTATE_PKEY 0x200
+
+int pkey_reg_xstate_offset(void)
+{
+ unsigned int eax;
+ unsigned int ebx;
+ unsigned int ecx;
+ unsigned int edx;
+ int xstate_offset;
+ int xstate_size;
+ unsigned long XSTATE_CPUID = 0xd;
+ int leaf;
+
+ /* assume that XSTATE_PKEY is set in XCR0 */
+ leaf = XSTATE_PKEY_BIT;
+ {
+ eax = XSTATE_CPUID;
+ ecx = leaf;
+ __cpuid(&eax, &ebx, &ecx, &edx);
+
+ if (leaf == XSTATE_PKEY_BIT) {
+ xstate_offset = ebx;
+ xstate_size = eax;
+ }
+ }
+
+ if (xstate_size == 0) {
+ printf("could not find size/offset of PKEY in xsave state\n");
+ return 0;
+ }
+
+ return xstate_offset;
+}
+
+static inline int get_arch_reserved_keys(void)
+{
+ return NR_RESERVED_PKEYS;
+}
+
+void expect_fault_on_read_execonly_key(void *p1, int pkey)
+{
+ int ptr_contents;
+
+ ptr_contents = read_ptr(p1);
+ dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
+ expected_pkey_fault(pkey);
+}
+
+void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey)
+{
+ return PTR_ERR_ENOTSUP;
+}
+
+#endif /* _PKEYS_X86_H */
diff --git a/tools/testing/selftests/vm/protection_keys.c b/tools/testing/selftests/vm/protection_keys.c
new file mode 100644
index 000000000000..fc19addcb5c8
--- /dev/null
+++ b/tools/testing/selftests/vm/protection_keys.c
@@ -0,0 +1,1580 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tests Memory Protection Keys (see Documentation/vm/protection-keys.txt)
+ *
+ * There are examples in here of:
+ * * how to set protection keys on memory
+ * * how to set/clear bits in pkey registers (the rights register)
+ * * how to handle SEGV_PKUERR signals and extract pkey-relevant
+ * information from the siginfo
+ *
+ * Things to add:
+ * make sure KSM and KSM COW breaking works
+ * prefault pages in at malloc, or not
+ * protect MPX bounds tables with protection keys?
+ * make sure VMA splitting/merging is working correctly
+ * OOMs can destroy mm->mmap (see exit_mmap()), so make sure it is immune to pkeys
+ * look for pkey "leaks" where it is still set on a VMA but "freed" back to the kernel
+ * do a plain mprotect() to a mprotect_pkey() area and make sure the pkey sticks
+ *
+ * Compile like this:
+ * gcc -o protection_keys -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
+ * gcc -m32 -o protection_keys_32 -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
+ */
+#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__
+#include <errno.h>
+#include <linux/futex.h>
+#include <time.h>
+#include <sys/time.h>
+#include <sys/syscall.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <signal.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <ucontext.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/ptrace.h>
+#include <setjmp.h>
+
+#include "pkey-helpers.h"
+
+int iteration_nr = 1;
+int test_nr;
+
+u64 shadow_pkey_reg;
+int dprint_in_signal;
+char dprint_in_signal_buffer[DPRINT_IN_SIGNAL_BUF_SIZE];
+
+void cat_into_file(char *str, char *file)
+{
+ int fd = open(file, O_RDWR);
+ int ret;
+
+ dprintf2("%s(): writing '%s' to '%s'\n", __func__, str, file);
+ /*
+ * these need to be raw because they are called under
+ * pkey_assert()
+ */
+ if (fd < 0) {
+ fprintf(stderr, "error opening '%s'\n", str);
+ perror("error: ");
+ exit(__LINE__);
+ }
+
+ ret = write(fd, str, strlen(str));
+ if (ret != strlen(str)) {
+ perror("write to file failed");
+ fprintf(stderr, "filename: '%s' str: '%s'\n", file, str);
+ exit(__LINE__);
+ }
+ close(fd);
+}
+
+#if CONTROL_TRACING > 0
+static int warned_tracing;
+int tracing_root_ok(void)
+{
+ if (geteuid() != 0) {
+ if (!warned_tracing)
+ fprintf(stderr, "WARNING: not run as root, "
+ "can not do tracing control\n");
+ warned_tracing = 1;
+ return 0;
+ }
+ return 1;
+}
+#endif
+
+void tracing_on(void)
+{
+#if CONTROL_TRACING > 0
+#define TRACEDIR "/sys/kernel/debug/tracing"
+ char pidstr[32];
+
+ if (!tracing_root_ok())
+ return;
+
+ sprintf(pidstr, "%d", getpid());
+ cat_into_file("0", TRACEDIR "/tracing_on");
+ cat_into_file("\n", TRACEDIR "/trace");
+ if (1) {
+ cat_into_file("function_graph", TRACEDIR "/current_tracer");
+ cat_into_file("1", TRACEDIR "/options/funcgraph-proc");
+ } else {
+ cat_into_file("nop", TRACEDIR "/current_tracer");
+ }
+ cat_into_file(pidstr, TRACEDIR "/set_ftrace_pid");
+ cat_into_file("1", TRACEDIR "/tracing_on");
+ dprintf1("enabled tracing\n");
+#endif
+}
+
+void tracing_off(void)
+{
+#if CONTROL_TRACING > 0
+ if (!tracing_root_ok())
+ return;
+ cat_into_file("0", "/sys/kernel/debug/tracing/tracing_on");
+#endif
+}
+
+void abort_hooks(void)
+{
+ fprintf(stderr, "running %s()...\n", __func__);
+ tracing_off();
+#ifdef SLEEP_ON_ABORT
+ sleep(SLEEP_ON_ABORT);
+#endif
+}
+
+/*
+ * This attempts to have roughly a page of instructions followed by a few
+ * instructions that do a write, and another page of instructions. That
+ * way, we are pretty sure that the write is in the second page of
+ * instructions and has at least a page of padding behind it.
+ *
+ * *That* lets us be sure to madvise() away the write instruction, which
+ * will then fault, which makes sure that the fault code handles
+ * execute-only memory properly.
+ */
+#ifdef __powerpc64__
+/* This way, both 4K and 64K alignment are maintained */
+__attribute__((__aligned__(65536)))
+#else
+__attribute__((__aligned__(PAGE_SIZE)))
+#endif
+void lots_o_noops_around_write(int *write_to_me)
+{
+ dprintf3("running %s()\n", __func__);
+ __page_o_noops();
+ /* Assume this happens in the second page of instructions: */
+ *write_to_me = __LINE__;
+ /* pad out by another page: */
+ __page_o_noops();
+ dprintf3("%s() done\n", __func__);
+}
+
+void dump_mem(void *dumpme, int len_bytes)
+{
+ char *c = (void *)dumpme;
+ int i;
+
+ for (i = 0; i < len_bytes; i += sizeof(u64)) {
+ u64 *ptr = (u64 *)(c + i);
+ dprintf1("dump[%03d][@%p]: %016llx\n", i, ptr, *ptr);
+ }
+}
+
+static u32 hw_pkey_get(int pkey, unsigned long flags)
+{
+ u64 pkey_reg = __read_pkey_reg();
+
+ dprintf1("%s(pkey=%d, flags=%lx) = %x / %d\n",
+ __func__, pkey, flags, 0, 0);
+ dprintf2("%s() raw pkey_reg: %016llx\n", __func__, pkey_reg);
+
+ return (u32) get_pkey_bits(pkey_reg, pkey);
+}
+
+static int hw_pkey_set(int pkey, unsigned long rights, unsigned long flags)
+{
+ u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
+ u64 old_pkey_reg = __read_pkey_reg();
+ u64 new_pkey_reg;
+
+ /* make sure that 'rights' only contains the bits we expect: */
+ assert(!(rights & ~mask));
+
+ /* modify bits accordingly in old pkey_reg and assign it */
+ new_pkey_reg = set_pkey_bits(old_pkey_reg, pkey, rights);
+
+ __write_pkey_reg(new_pkey_reg);
+
+ dprintf3("%s(pkey=%d, rights=%lx, flags=%lx) = %x"
+ " pkey_reg now: %016llx old_pkey_reg: %016llx\n",
+ __func__, pkey, rights, flags, 0, __read_pkey_reg(),
+ old_pkey_reg);
+ return 0;
+}
+
+void pkey_disable_set(int pkey, int flags)
+{
+ unsigned long syscall_flags = 0;
+ int ret;
+ int pkey_rights;
+ u64 orig_pkey_reg = read_pkey_reg();
+
+ dprintf1("START->%s(%d, 0x%x)\n", __func__,
+ pkey, flags);
+ pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
+
+ pkey_rights = hw_pkey_get(pkey, syscall_flags);
+
+ dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
+ pkey, pkey, pkey_rights);
+
+ pkey_assert(pkey_rights >= 0);
+
+ pkey_rights |= flags;
+
+ ret = hw_pkey_set(pkey, pkey_rights, syscall_flags);
+ assert(!ret);
+ /* pkey_reg and flags have the same format */
+ shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, pkey, pkey_rights);
+ dprintf1("%s(%d) shadow: 0x%016llx\n",
+ __func__, pkey, shadow_pkey_reg);
+
+ pkey_assert(ret >= 0);
+
+ pkey_rights = hw_pkey_get(pkey, syscall_flags);
+ dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
+ pkey, pkey, pkey_rights);
+
+ dprintf1("%s(%d) pkey_reg: 0x%016llx\n",
+ __func__, pkey, read_pkey_reg());
+ if (flags)
+ pkey_assert(read_pkey_reg() >= orig_pkey_reg);
+ dprintf1("END<---%s(%d, 0x%x)\n", __func__,
+ pkey, flags);
+}
+
+void pkey_disable_clear(int pkey, int flags)
+{
+ unsigned long syscall_flags = 0;
+ int ret;
+ int pkey_rights = hw_pkey_get(pkey, syscall_flags);
+ u64 orig_pkey_reg = read_pkey_reg();
+
+ pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
+
+ dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
+ pkey, pkey, pkey_rights);
+ pkey_assert(pkey_rights >= 0);
+
+ pkey_rights &= ~flags;
+
+ ret = hw_pkey_set(pkey, pkey_rights, 0);
+ shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, pkey, pkey_rights);
+ pkey_assert(ret >= 0);
+
+ pkey_rights = hw_pkey_get(pkey, syscall_flags);
+ dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
+ pkey, pkey, pkey_rights);
+
+ dprintf1("%s(%d) pkey_reg: 0x%016llx\n", __func__,
+ pkey, read_pkey_reg());
+ if (flags)
+ assert(read_pkey_reg() <= orig_pkey_reg);
+}
+
+void pkey_write_allow(int pkey)
+{
+ pkey_disable_clear(pkey, PKEY_DISABLE_WRITE);
+}
+void pkey_write_deny(int pkey)
+{
+ pkey_disable_set(pkey, PKEY_DISABLE_WRITE);
+}
+void pkey_access_allow(int pkey)
+{
+ pkey_disable_clear(pkey, PKEY_DISABLE_ACCESS);
+}
+void pkey_access_deny(int pkey)
+{
+ pkey_disable_set(pkey, PKEY_DISABLE_ACCESS);
+}
+
+/* Failed address bound checks: */
+#ifndef SEGV_BNDERR
+# define SEGV_BNDERR 3
+#endif
+
+#ifndef SEGV_PKUERR
+# define SEGV_PKUERR 4
+#endif
+
+static char *si_code_str(int si_code)
+{
+ if (si_code == SEGV_MAPERR)
+ return "SEGV_MAPERR";
+ if (si_code == SEGV_ACCERR)
+ return "SEGV_ACCERR";
+ if (si_code == SEGV_BNDERR)
+ return "SEGV_BNDERR";
+ if (si_code == SEGV_PKUERR)
+ return "SEGV_PKUERR";
+ return "UNKNOWN";
+}
+
+int pkey_faults;
+int last_si_pkey = -1;
+void signal_handler(int signum, siginfo_t *si, void *vucontext)
+{
+ ucontext_t *uctxt = vucontext;
+ int trapno;
+ unsigned long ip;
+ char *fpregs;
+#if defined(__i386__) || defined(__x86_64__) /* arch */
+ u32 *pkey_reg_ptr;
+ int pkey_reg_offset;
+#endif /* arch */
+ u64 siginfo_pkey;
+ u32 *si_pkey_ptr;
+
+ dprint_in_signal = 1;
+ dprintf1(">>>>===============SIGSEGV============================\n");
+ dprintf1("%s()::%d, pkey_reg: 0x%016llx shadow: %016llx\n",
+ __func__, __LINE__,
+ __read_pkey_reg(), shadow_pkey_reg);
+
+ trapno = uctxt->uc_mcontext.gregs[REG_TRAPNO];
+ ip = uctxt->uc_mcontext.gregs[REG_IP_IDX];
+ fpregs = (char *) uctxt->uc_mcontext.fpregs;
+
+ dprintf2("%s() trapno: %d ip: 0x%016lx info->si_code: %s/%d\n",
+ __func__, trapno, ip, si_code_str(si->si_code),
+ si->si_code);
+
+#if defined(__i386__) || defined(__x86_64__) /* arch */
+#ifdef __i386__
+ /*
+ * 32-bit has some extra padding so that userspace can tell whether
+ * the XSTATE header is present in addition to the "legacy" FPU
+ * state. We just assume that it is here.
+ */
+ fpregs += 0x70;
+#endif /* i386 */
+ pkey_reg_offset = pkey_reg_xstate_offset();
+ pkey_reg_ptr = (void *)(&fpregs[pkey_reg_offset]);
+
+ /*
+ * If we got a PKEY fault, we *HAVE* to have at least one bit set in
+ * here.
+ */
+ dprintf1("pkey_reg_xstate_offset: %d\n", pkey_reg_xstate_offset());
+ if (DEBUG_LEVEL > 4)
+ dump_mem(pkey_reg_ptr - 128, 256);
+ pkey_assert(*pkey_reg_ptr);
+#endif /* arch */
+
+ dprintf1("siginfo: %p\n", si);
+ dprintf1(" fpregs: %p\n", fpregs);
+
+ if ((si->si_code == SEGV_MAPERR) ||
+ (si->si_code == SEGV_ACCERR) ||
+ (si->si_code == SEGV_BNDERR)) {
+ printf("non-PK si_code, exiting...\n");
+ exit(4);
+ }
+
+ si_pkey_ptr = siginfo_get_pkey_ptr(si);
+ dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
+ dump_mem((u8 *)si_pkey_ptr - 8, 24);
+ siginfo_pkey = *si_pkey_ptr;
+ pkey_assert(siginfo_pkey < NR_PKEYS);
+ last_si_pkey = siginfo_pkey;
+
+ /*
+ * need __read_pkey_reg() version so we do not do shadow_pkey_reg
+ * checking
+ */
+ dprintf1("signal pkey_reg from pkey_reg: %016llx\n",
+ __read_pkey_reg());
+ dprintf1("pkey from siginfo: %016llx\n", siginfo_pkey);
+#if defined(__i386__) || defined(__x86_64__) /* arch */
+ dprintf1("signal pkey_reg from xsave: %08x\n", *pkey_reg_ptr);
+ *(u64 *)pkey_reg_ptr = 0x00000000;
+ dprintf1("WARNING: set PKEY_REG=0 to allow faulting instruction to continue\n");
+#elif defined(__powerpc64__) /* arch */
+ /* restore access and let the faulting instruction continue */
+ pkey_access_allow(siginfo_pkey);
+#endif /* arch */
+ pkey_faults++;
+ dprintf1("<<<<==================================================\n");
+ dprint_in_signal = 0;
+}
+
+int wait_all_children(void)
+{
+ int status;
+ return waitpid(-1, &status, 0);
+}
+
+void sig_chld(int x)
+{
+ dprint_in_signal = 1;
+ dprintf2("[%d] SIGCHLD: %d\n", getpid(), x);
+ dprint_in_signal = 0;
+}
+
+void setup_sigsegv_handler(void)
+{
+ int r, rs;
+ struct sigaction newact;
+ struct sigaction oldact;
+
+ /* #PF is mapped to sigsegv */
+ int signum = SIGSEGV;
+
+ newact.sa_handler = 0;
+ newact.sa_sigaction = signal_handler;
+
+ /*sigset_t - signals to block while in the handler */
+ /* get the old signal mask. */
+ rs = sigprocmask(SIG_SETMASK, 0, &newact.sa_mask);
+ pkey_assert(rs == 0);
+
+ /* call sa_sigaction, not sa_handler*/
+ newact.sa_flags = SA_SIGINFO;
+
+ newact.sa_restorer = 0; /* void(*)(), obsolete */
+ r = sigaction(signum, &newact, &oldact);
+ r = sigaction(SIGALRM, &newact, &oldact);
+ pkey_assert(r == 0);
+}
+
+void setup_handlers(void)
+{
+ signal(SIGCHLD, &sig_chld);
+ setup_sigsegv_handler();
+}
+
+pid_t fork_lazy_child(void)
+{
+ pid_t forkret;
+
+ forkret = fork();
+ pkey_assert(forkret >= 0);
+ dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
+
+ if (!forkret) {
+ /* in the child */
+ while (1) {
+ dprintf1("child sleeping...\n");
+ sleep(30);
+ }
+ }
+ return forkret;
+}
+
+int sys_mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
+ unsigned long pkey)
+{
+ int sret;
+
+ dprintf2("%s(0x%p, %zx, prot=%lx, pkey=%lx)\n", __func__,
+ ptr, size, orig_prot, pkey);
+
+ errno = 0;
+ sret = syscall(SYS_mprotect_key, ptr, size, orig_prot, pkey);
+ if (errno) {
+ dprintf2("SYS_mprotect_key sret: %d\n", sret);
+ dprintf2("SYS_mprotect_key prot: 0x%lx\n", orig_prot);
+ dprintf2("SYS_mprotect_key failed, errno: %d\n", errno);
+ if (DEBUG_LEVEL >= 2)
+ perror("SYS_mprotect_pkey");
+ }
+ return sret;
+}
+
+int sys_pkey_alloc(unsigned long flags, unsigned long init_val)
+{
+ int ret = syscall(SYS_pkey_alloc, flags, init_val);
+ dprintf1("%s(flags=%lx, init_val=%lx) syscall ret: %d errno: %d\n",
+ __func__, flags, init_val, ret, errno);
+ return ret;
+}
+
+int alloc_pkey(void)
+{
+ int ret;
+ unsigned long init_val = 0x0;
+
+ dprintf1("%s()::%d, pkey_reg: 0x%016llx shadow: %016llx\n",
+ __func__, __LINE__, __read_pkey_reg(), shadow_pkey_reg);
+ ret = sys_pkey_alloc(0, init_val);
+ /*
+ * pkey_alloc() sets PKEY register, so we need to reflect it in
+ * shadow_pkey_reg:
+ */
+ dprintf4("%s()::%d, ret: %d pkey_reg: 0x%016llx"
+ " shadow: 0x%016llx\n",
+ __func__, __LINE__, ret, __read_pkey_reg(),
+ shadow_pkey_reg);
+ if (ret) {
+ /* clear both the bits: */
+ shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, ret,
+ ~PKEY_MASK);
+ dprintf4("%s()::%d, ret: %d pkey_reg: 0x%016llx"
+ " shadow: 0x%016llx\n",
+ __func__,
+ __LINE__, ret, __read_pkey_reg(),
+ shadow_pkey_reg);
+ /*
+ * move the new state in from init_val
+ * (remember, we cheated and init_val == pkey_reg format)
+ */
+ shadow_pkey_reg = set_pkey_bits(shadow_pkey_reg, ret,
+ init_val);
+ }
+ dprintf4("%s()::%d, ret: %d pkey_reg: 0x%016llx"
+ " shadow: 0x%016llx\n",
+ __func__, __LINE__, ret, __read_pkey_reg(),
+ shadow_pkey_reg);
+ dprintf1("%s()::%d errno: %d\n", __func__, __LINE__, errno);
+ /* for shadow checking: */
+ read_pkey_reg();
+ dprintf4("%s()::%d, ret: %d pkey_reg: 0x%016llx"
+ " shadow: 0x%016llx\n",
+ __func__, __LINE__, ret, __read_pkey_reg(),
+ shadow_pkey_reg);
+ return ret;
+}
+
+int sys_pkey_free(unsigned long pkey)
+{
+ int ret = syscall(SYS_pkey_free, pkey);
+ dprintf1("%s(pkey=%ld) syscall ret: %d\n", __func__, pkey, ret);
+ return ret;
+}
+
+/*
+ * I had a bug where pkey bits could be set by mprotect() but
+ * not cleared. This ensures we get lots of random bit sets
+ * and clears on the vma and pte pkey bits.
+ */
+int alloc_random_pkey(void)
+{
+ int max_nr_pkey_allocs;
+ int ret;
+ int i;
+ int alloced_pkeys[NR_PKEYS];
+ int nr_alloced = 0;
+ int random_index;
+ memset(alloced_pkeys, 0, sizeof(alloced_pkeys));
+ srand((unsigned int)time(NULL));
+
+ /* allocate every possible key and make a note of which ones we got */
+ max_nr_pkey_allocs = NR_PKEYS;
+ for (i = 0; i < max_nr_pkey_allocs; i++) {
+ int new_pkey = alloc_pkey();
+ if (new_pkey < 0)
+ break;
+ alloced_pkeys[nr_alloced++] = new_pkey;
+ }
+
+ pkey_assert(nr_alloced > 0);
+ /* select a random one out of the allocated ones */
+ random_index = rand() % nr_alloced;
+ ret = alloced_pkeys[random_index];
+ /* now zero it out so we don't free it next */
+ alloced_pkeys[random_index] = 0;
+
+ /* go through the allocated ones that we did not want and free them */
+ for (i = 0; i < nr_alloced; i++) {
+ int free_ret;
+ if (!alloced_pkeys[i])
+ continue;
+ free_ret = sys_pkey_free(alloced_pkeys[i]);
+ pkey_assert(!free_ret);
+ }
+ dprintf1("%s()::%d, ret: %d pkey_reg: 0x%016llx"
+ " shadow: 0x%016llx\n", __func__,
+ __LINE__, ret, __read_pkey_reg(), shadow_pkey_reg);
+ return ret;
+}
+
+int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
+ unsigned long pkey)
+{
+ int nr_iterations = random() % 100;
+ int ret;
+
+ while (0) {
+ int rpkey = alloc_random_pkey();
+ ret = sys_mprotect_pkey(ptr, size, orig_prot, pkey);
+ dprintf1("sys_mprotect_pkey(%p, %zx, prot=0x%lx, pkey=%ld) ret: %d\n",
+ ptr, size, orig_prot, pkey, ret);
+ if (nr_iterations-- < 0)
+ break;
+
+ dprintf1("%s()::%d, ret: %d pkey_reg: 0x%016llx"
+ " shadow: 0x%016llx\n",
+ __func__, __LINE__, ret, __read_pkey_reg(),
+ shadow_pkey_reg);
+ sys_pkey_free(rpkey);
+ dprintf1("%s()::%d, ret: %d pkey_reg: 0x%016llx"
+ " shadow: 0x%016llx\n",
+ __func__, __LINE__, ret, __read_pkey_reg(),
+ shadow_pkey_reg);
+ }
+ pkey_assert(pkey < NR_PKEYS);
+
+ ret = sys_mprotect_pkey(ptr, size, orig_prot, pkey);
+ dprintf1("mprotect_pkey(%p, %zx, prot=0x%lx, pkey=%ld) ret: %d\n",
+ ptr, size, orig_prot, pkey, ret);
+ pkey_assert(!ret);
+ dprintf1("%s()::%d, ret: %d pkey_reg: 0x%016llx"
+ " shadow: 0x%016llx\n", __func__,
+ __LINE__, ret, __read_pkey_reg(), shadow_pkey_reg);
+ return ret;
+}
+
+struct pkey_malloc_record {
+ void *ptr;
+ long size;
+ int prot;
+};
+struct pkey_malloc_record *pkey_malloc_records;
+struct pkey_malloc_record *pkey_last_malloc_record;
+long nr_pkey_malloc_records;
+void record_pkey_malloc(void *ptr, long size, int prot)
+{
+ long i;
+ struct pkey_malloc_record *rec = NULL;
+
+ for (i = 0; i < nr_pkey_malloc_records; i++) {
+ rec = &pkey_malloc_records[i];
+ /* find a free record */
+ if (rec)
+ break;
+ }
+ if (!rec) {
+ /* every record is full */
+ size_t old_nr_records = nr_pkey_malloc_records;
+ size_t new_nr_records = (nr_pkey_malloc_records * 2 + 1);
+ size_t new_size = new_nr_records * sizeof(struct pkey_malloc_record);
+ dprintf2("new_nr_records: %zd\n", new_nr_records);
+ dprintf2("new_size: %zd\n", new_size);
+ pkey_malloc_records = realloc(pkey_malloc_records, new_size);
+ pkey_assert(pkey_malloc_records != NULL);
+ rec = &pkey_malloc_records[nr_pkey_malloc_records];
+ /*
+ * realloc() does not initialize memory, so zero it from
+ * the first new record all the way to the end.
+ */
+ for (i = 0; i < new_nr_records - old_nr_records; i++)
+ memset(rec + i, 0, sizeof(*rec));
+ }
+ dprintf3("filling malloc record[%d/%p]: {%p, %ld}\n",
+ (int)(rec - pkey_malloc_records), rec, ptr, size);
+ rec->ptr = ptr;
+ rec->size = size;
+ rec->prot = prot;
+ pkey_last_malloc_record = rec;
+ nr_pkey_malloc_records++;
+}
+
+void free_pkey_malloc(void *ptr)
+{
+ long i;
+ int ret;
+ dprintf3("%s(%p)\n", __func__, ptr);
+ for (i = 0; i < nr_pkey_malloc_records; i++) {
+ struct pkey_malloc_record *rec = &pkey_malloc_records[i];
+ dprintf4("looking for ptr %p at record[%ld/%p]: {%p, %ld}\n",
+ ptr, i, rec, rec->ptr, rec->size);
+ if ((ptr < rec->ptr) ||
+ (ptr >= rec->ptr + rec->size))
+ continue;
+
+ dprintf3("found ptr %p at record[%ld/%p]: {%p, %ld}\n",
+ ptr, i, rec, rec->ptr, rec->size);
+ nr_pkey_malloc_records--;
+ ret = munmap(rec->ptr, rec->size);
+ dprintf3("munmap ret: %d\n", ret);
+ pkey_assert(!ret);
+ dprintf3("clearing rec->ptr, rec: %p\n", rec);
+ rec->ptr = NULL;
+ dprintf3("done clearing rec->ptr, rec: %p\n", rec);
+ return;
+ }
+ pkey_assert(false);
+}
+
+
+void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey)
+{
+ void *ptr;
+ int ret;
+
+ read_pkey_reg();
+ dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
+ size, prot, pkey);
+ pkey_assert(pkey < NR_PKEYS);
+ ptr = mmap(NULL, size, prot, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ pkey_assert(ptr != (void *)-1);
+ ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey);
+ pkey_assert(!ret);
+ record_pkey_malloc(ptr, size, prot);
+ read_pkey_reg();
+
+ dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr);
+ return ptr;
+}
+
+void *malloc_pkey_anon_huge(long size, int prot, u16 pkey)
+{
+ int ret;
+ void *ptr;
+
+ dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
+ size, prot, pkey);
+ /*
+ * Guarantee we can fit at least one huge page in the resulting
+ * allocation by allocating space for 2:
+ */
+ size = ALIGN_UP(size, HPAGE_SIZE * 2);
+ ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ pkey_assert(ptr != (void *)-1);
+ record_pkey_malloc(ptr, size, prot);
+ mprotect_pkey(ptr, size, prot, pkey);
+
+ dprintf1("unaligned ptr: %p\n", ptr);
+ ptr = ALIGN_PTR_UP(ptr, HPAGE_SIZE);
+ dprintf1(" aligned ptr: %p\n", ptr);
+ ret = madvise(ptr, HPAGE_SIZE, MADV_HUGEPAGE);
+ dprintf1("MADV_HUGEPAGE ret: %d\n", ret);
+ ret = madvise(ptr, HPAGE_SIZE, MADV_WILLNEED);
+ dprintf1("MADV_WILLNEED ret: %d\n", ret);
+ memset(ptr, 0, HPAGE_SIZE);
+
+ dprintf1("mmap()'d thp for pkey %d @ %p\n", pkey, ptr);
+ return ptr;
+}
+
+int hugetlb_setup_ok;
+#define SYSFS_FMT_NR_HUGE_PAGES "/sys/kernel/mm/hugepages/hugepages-%ldkB/nr_hugepages"
+#define GET_NR_HUGE_PAGES 10
+void setup_hugetlbfs(void)
+{
+ int err;
+ int fd;
+ char buf[256];
+ long hpagesz_kb;
+ long hpagesz_mb;
+
+ if (geteuid() != 0) {
+ fprintf(stderr, "WARNING: not run as root, can not do hugetlb test\n");
+ return;
+ }
+
+ cat_into_file(__stringify(GET_NR_HUGE_PAGES), "/proc/sys/vm/nr_hugepages");
+
+ /*
+ * Now go make sure that we got the pages and that they
+ * are PMD-level pages. Someone might have made PUD-level
+ * pages the default.
+ */
+ hpagesz_kb = HPAGE_SIZE / 1024;
+ hpagesz_mb = hpagesz_kb / 1024;
+ sprintf(buf, SYSFS_FMT_NR_HUGE_PAGES, hpagesz_kb);
+ fd = open(buf, O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "opening sysfs %ldM hugetlb config: %s\n",
+ hpagesz_mb, strerror(errno));
+ return;
+ }
+
+ /* -1 to guarantee leaving the trailing \0 */
+ err = read(fd, buf, sizeof(buf)-1);
+ close(fd);
+ if (err <= 0) {
+ fprintf(stderr, "reading sysfs %ldM hugetlb config: %s\n",
+ hpagesz_mb, strerror(errno));
+ return;
+ }
+
+ if (atoi(buf) != GET_NR_HUGE_PAGES) {
+ fprintf(stderr, "could not confirm %ldM pages, got: '%s' expected %d\n",
+ hpagesz_mb, buf, GET_NR_HUGE_PAGES);
+ return;
+ }
+
+ hugetlb_setup_ok = 1;
+}
+
+void *malloc_pkey_hugetlb(long size, int prot, u16 pkey)
+{
+ void *ptr;
+ int flags = MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB;
+
+ if (!hugetlb_setup_ok)
+ return PTR_ERR_ENOTSUP;
+
+ dprintf1("doing %s(%ld, %x, %x)\n", __func__, size, prot, pkey);
+ size = ALIGN_UP(size, HPAGE_SIZE * 2);
+ pkey_assert(pkey < NR_PKEYS);
+ ptr = mmap(NULL, size, PROT_NONE, flags, -1, 0);
+ pkey_assert(ptr != (void *)-1);
+ mprotect_pkey(ptr, size, prot, pkey);
+
+ record_pkey_malloc(ptr, size, prot);
+
+ dprintf1("mmap()'d hugetlbfs for pkey %d @ %p\n", pkey, ptr);
+ return ptr;
+}
+
+void *malloc_pkey_mmap_dax(long size, int prot, u16 pkey)
+{
+ void *ptr;
+ int fd;
+
+ dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
+ size, prot, pkey);
+ pkey_assert(pkey < NR_PKEYS);
+ fd = open("/dax/foo", O_RDWR);
+ pkey_assert(fd >= 0);
+
+ ptr = mmap(0, size, prot, MAP_SHARED, fd, 0);
+ pkey_assert(ptr != (void *)-1);
+
+ mprotect_pkey(ptr, size, prot, pkey);
+
+ record_pkey_malloc(ptr, size, prot);
+
+ dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr);
+ close(fd);
+ return ptr;
+}
+
+void *(*pkey_malloc[])(long size, int prot, u16 pkey) = {
+
+ malloc_pkey_with_mprotect,
+ malloc_pkey_with_mprotect_subpage,
+ malloc_pkey_anon_huge,
+ malloc_pkey_hugetlb
+/* can not do direct with the pkey_mprotect() API:
+ malloc_pkey_mmap_direct,
+ malloc_pkey_mmap_dax,
+*/
+};
+
+void *malloc_pkey(long size, int prot, u16 pkey)
+{
+ void *ret;
+ static int malloc_type;
+ int nr_malloc_types = ARRAY_SIZE(pkey_malloc);
+
+ pkey_assert(pkey < NR_PKEYS);
+
+ while (1) {
+ pkey_assert(malloc_type < nr_malloc_types);
+
+ ret = pkey_malloc[malloc_type](size, prot, pkey);
+ pkey_assert(ret != (void *)-1);
+
+ malloc_type++;
+ if (malloc_type >= nr_malloc_types)
+ malloc_type = (random()%nr_malloc_types);
+
+ /* try again if the malloc_type we tried is unsupported */
+ if (ret == PTR_ERR_ENOTSUP)
+ continue;
+
+ break;
+ }
+
+ dprintf3("%s(%ld, prot=%x, pkey=%x) returning: %p\n", __func__,
+ size, prot, pkey, ret);
+ return ret;
+}
+
+int last_pkey_faults;
+#define UNKNOWN_PKEY -2
+void expected_pkey_fault(int pkey)
+{
+ dprintf2("%s(): last_pkey_faults: %d pkey_faults: %d\n",
+ __func__, last_pkey_faults, pkey_faults);
+ dprintf2("%s(%d): last_si_pkey: %d\n", __func__, pkey, last_si_pkey);
+ pkey_assert(last_pkey_faults + 1 == pkey_faults);
+
+ /*
+ * For exec-only memory, we do not know the pkey in
+ * advance, so skip this check.
+ */
+ if (pkey != UNKNOWN_PKEY)
+ pkey_assert(last_si_pkey == pkey);
+
+#if defined(__i386__) || defined(__x86_64__) /* arch */
+ /*
+ * The signal handler shold have cleared out PKEY register to let the
+ * test program continue. We now have to restore it.
+ */
+ if (__read_pkey_reg() != 0)
+#else /* arch */
+ if (__read_pkey_reg() != shadow_pkey_reg)
+#endif /* arch */
+ pkey_assert(0);
+
+ __write_pkey_reg(shadow_pkey_reg);
+ dprintf1("%s() set pkey_reg=%016llx to restore state after signal "
+ "nuked it\n", __func__, shadow_pkey_reg);
+ last_pkey_faults = pkey_faults;
+ last_si_pkey = -1;
+}
+
+#define do_not_expect_pkey_fault(msg) do { \
+ if (last_pkey_faults != pkey_faults) \
+ dprintf0("unexpected PKey fault: %s\n", msg); \
+ pkey_assert(last_pkey_faults == pkey_faults); \
+} while (0)
+
+int test_fds[10] = { -1 };
+int nr_test_fds;
+void __save_test_fd(int fd)
+{
+ pkey_assert(fd >= 0);
+ pkey_assert(nr_test_fds < ARRAY_SIZE(test_fds));
+ test_fds[nr_test_fds] = fd;
+ nr_test_fds++;
+}
+
+int get_test_read_fd(void)
+{
+ int test_fd = open("/etc/passwd", O_RDONLY);
+ __save_test_fd(test_fd);
+ return test_fd;
+}
+
+void close_test_fds(void)
+{
+ int i;
+
+ for (i = 0; i < nr_test_fds; i++) {
+ if (test_fds[i] < 0)
+ continue;
+ close(test_fds[i]);
+ test_fds[i] = -1;
+ }
+ nr_test_fds = 0;
+}
+
+#define barrier() __asm__ __volatile__("": : :"memory")
+__attribute__((noinline)) int read_ptr(int *ptr)
+{
+ /*
+ * Keep GCC from optimizing this away somehow
+ */
+ barrier();
+ return *ptr;
+}
+
+void test_pkey_alloc_free_attach_pkey0(int *ptr, u16 pkey)
+{
+ int i, err;
+ int max_nr_pkey_allocs;
+ int alloced_pkeys[NR_PKEYS];
+ int nr_alloced = 0;
+ long size;
+
+ pkey_assert(pkey_last_malloc_record);
+ size = pkey_last_malloc_record->size;
+ /*
+ * This is a bit of a hack. But mprotect() requires
+ * huge-page-aligned sizes when operating on hugetlbfs.
+ * So, make sure that we use something that's a multiple
+ * of a huge page when we can.
+ */
+ if (size >= HPAGE_SIZE)
+ size = HPAGE_SIZE;
+
+ /* allocate every possible key and make sure key-0 never got allocated */
+ max_nr_pkey_allocs = NR_PKEYS;
+ for (i = 0; i < max_nr_pkey_allocs; i++) {
+ int new_pkey = alloc_pkey();
+ pkey_assert(new_pkey != 0);
+
+ if (new_pkey < 0)
+ break;
+ alloced_pkeys[nr_alloced++] = new_pkey;
+ }
+ /* free all the allocated keys */
+ for (i = 0; i < nr_alloced; i++) {
+ int free_ret;
+
+ if (!alloced_pkeys[i])
+ continue;
+ free_ret = sys_pkey_free(alloced_pkeys[i]);
+ pkey_assert(!free_ret);
+ }
+
+ /* attach key-0 in various modes */
+ err = sys_mprotect_pkey(ptr, size, PROT_READ, 0);
+ pkey_assert(!err);
+ err = sys_mprotect_pkey(ptr, size, PROT_WRITE, 0);
+ pkey_assert(!err);
+ err = sys_mprotect_pkey(ptr, size, PROT_EXEC, 0);
+ pkey_assert(!err);
+ err = sys_mprotect_pkey(ptr, size, PROT_READ|PROT_WRITE, 0);
+ pkey_assert(!err);
+ err = sys_mprotect_pkey(ptr, size, PROT_READ|PROT_WRITE|PROT_EXEC, 0);
+ pkey_assert(!err);
+}
+
+void test_read_of_write_disabled_region(int *ptr, u16 pkey)
+{
+ int ptr_contents;
+
+ dprintf1("disabling write access to PKEY[1], doing read\n");
+ pkey_write_deny(pkey);
+ ptr_contents = read_ptr(ptr);
+ dprintf1("*ptr: %d\n", ptr_contents);
+ dprintf1("\n");
+}
+void test_read_of_access_disabled_region(int *ptr, u16 pkey)
+{
+ int ptr_contents;
+
+ dprintf1("disabling access to PKEY[%02d], doing read @ %p\n", pkey, ptr);
+ read_pkey_reg();
+ pkey_access_deny(pkey);
+ ptr_contents = read_ptr(ptr);
+ dprintf1("*ptr: %d\n", ptr_contents);
+ expected_pkey_fault(pkey);
+}
+
+void test_read_of_access_disabled_region_with_page_already_mapped(int *ptr,
+ u16 pkey)
+{
+ int ptr_contents;
+
+ dprintf1("disabling access to PKEY[%02d], doing read @ %p\n",
+ pkey, ptr);
+ ptr_contents = read_ptr(ptr);
+ dprintf1("reading ptr before disabling the read : %d\n",
+ ptr_contents);
+ read_pkey_reg();
+ pkey_access_deny(pkey);
+ ptr_contents = read_ptr(ptr);
+ dprintf1("*ptr: %d\n", ptr_contents);
+ expected_pkey_fault(pkey);
+}
+
+void test_write_of_write_disabled_region_with_page_already_mapped(int *ptr,
+ u16 pkey)
+{
+ *ptr = __LINE__;
+ dprintf1("disabling write access; after accessing the page, "
+ "to PKEY[%02d], doing write\n", pkey);
+ pkey_write_deny(pkey);
+ *ptr = __LINE__;
+ expected_pkey_fault(pkey);
+}
+
+void test_write_of_write_disabled_region(int *ptr, u16 pkey)
+{
+ dprintf1("disabling write access to PKEY[%02d], doing write\n", pkey);
+ pkey_write_deny(pkey);
+ *ptr = __LINE__;
+ expected_pkey_fault(pkey);
+}
+void test_write_of_access_disabled_region(int *ptr, u16 pkey)
+{
+ dprintf1("disabling access to PKEY[%02d], doing write\n", pkey);
+ pkey_access_deny(pkey);
+ *ptr = __LINE__;
+ expected_pkey_fault(pkey);
+}
+
+void test_write_of_access_disabled_region_with_page_already_mapped(int *ptr,
+ u16 pkey)
+{
+ *ptr = __LINE__;
+ dprintf1("disabling access; after accessing the page, "
+ " to PKEY[%02d], doing write\n", pkey);
+ pkey_access_deny(pkey);
+ *ptr = __LINE__;
+ expected_pkey_fault(pkey);
+}
+
+void test_kernel_write_of_access_disabled_region(int *ptr, u16 pkey)
+{
+ int ret;
+ int test_fd = get_test_read_fd();
+
+ dprintf1("disabling access to PKEY[%02d], "
+ "having kernel read() to buffer\n", pkey);
+ pkey_access_deny(pkey);
+ ret = read(test_fd, ptr, 1);
+ dprintf1("read ret: %d\n", ret);
+ pkey_assert(ret);
+}
+void test_kernel_write_of_write_disabled_region(int *ptr, u16 pkey)
+{
+ int ret;
+ int test_fd = get_test_read_fd();
+
+ pkey_write_deny(pkey);
+ ret = read(test_fd, ptr, 100);
+ dprintf1("read ret: %d\n", ret);
+ if (ret < 0 && (DEBUG_LEVEL > 0))
+ perror("verbose read result (OK for this to be bad)");
+ pkey_assert(ret);
+}
+
+void test_kernel_gup_of_access_disabled_region(int *ptr, u16 pkey)
+{
+ int pipe_ret, vmsplice_ret;
+ struct iovec iov;
+ int pipe_fds[2];
+
+ pipe_ret = pipe(pipe_fds);
+
+ pkey_assert(pipe_ret == 0);
+ dprintf1("disabling access to PKEY[%02d], "
+ "having kernel vmsplice from buffer\n", pkey);
+ pkey_access_deny(pkey);
+ iov.iov_base = ptr;
+ iov.iov_len = PAGE_SIZE;
+ vmsplice_ret = vmsplice(pipe_fds[1], &iov, 1, SPLICE_F_GIFT);
+ dprintf1("vmsplice() ret: %d\n", vmsplice_ret);
+ pkey_assert(vmsplice_ret == -1);
+
+ close(pipe_fds[0]);
+ close(pipe_fds[1]);
+}
+
+void test_kernel_gup_write_to_write_disabled_region(int *ptr, u16 pkey)
+{
+ int ignored = 0xdada;
+ int futex_ret;
+ int some_int = __LINE__;
+
+ dprintf1("disabling write to PKEY[%02d], "
+ "doing futex gunk in buffer\n", pkey);
+ *ptr = some_int;
+ pkey_write_deny(pkey);
+ futex_ret = syscall(SYS_futex, ptr, FUTEX_WAIT, some_int-1, NULL,
+ &ignored, ignored);
+ if (DEBUG_LEVEL > 0)
+ perror("futex");
+ dprintf1("futex() ret: %d\n", futex_ret);
+}
+
+/* Assumes that all pkeys other than 'pkey' are unallocated */
+void test_pkey_syscalls_on_non_allocated_pkey(int *ptr, u16 pkey)
+{
+ int err;
+ int i;
+
+ /* Note: 0 is the default pkey, so don't mess with it */
+ for (i = 1; i < NR_PKEYS; i++) {
+ if (pkey == i)
+ continue;
+
+ dprintf1("trying get/set/free to non-allocated pkey: %2d\n", i);
+ err = sys_pkey_free(i);
+ pkey_assert(err);
+
+ err = sys_pkey_free(i);
+ pkey_assert(err);
+
+ err = sys_mprotect_pkey(ptr, PAGE_SIZE, PROT_READ, i);
+ pkey_assert(err);
+ }
+}
+
+/* Assumes that all pkeys other than 'pkey' are unallocated */
+void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
+{
+ int err;
+ int bad_pkey = NR_PKEYS+99;
+
+ /* pass a known-invalid pkey in: */
+ err = sys_mprotect_pkey(ptr, PAGE_SIZE, PROT_READ, bad_pkey);
+ pkey_assert(err);
+}
+
+void become_child(void)
+{
+ pid_t forkret;
+
+ forkret = fork();
+ pkey_assert(forkret >= 0);
+ dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
+
+ if (!forkret) {
+ /* in the child */
+ return;
+ }
+ exit(0);
+}
+
+/* Assumes that all pkeys other than 'pkey' are unallocated */
+void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
+{
+ int err;
+ int allocated_pkeys[NR_PKEYS] = {0};
+ int nr_allocated_pkeys = 0;
+ int i;
+
+ for (i = 0; i < NR_PKEYS*3; i++) {
+ int new_pkey;
+ dprintf1("%s() alloc loop: %d\n", __func__, i);
+ new_pkey = alloc_pkey();
+ dprintf4("%s()::%d, err: %d pkey_reg: 0x%016llx"
+ " shadow: 0x%016llx\n",
+ __func__, __LINE__, err, __read_pkey_reg(),
+ shadow_pkey_reg);
+ read_pkey_reg(); /* for shadow checking */
+ dprintf2("%s() errno: %d ENOSPC: %d\n", __func__, errno, ENOSPC);
+ if ((new_pkey == -1) && (errno == ENOSPC)) {
+ dprintf2("%s() failed to allocate pkey after %d tries\n",
+ __func__, nr_allocated_pkeys);
+ } else {
+ /*
+ * Ensure the number of successes never
+ * exceeds the number of keys supported
+ * in the hardware.
+ */
+ pkey_assert(nr_allocated_pkeys < NR_PKEYS);
+ allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
+ }
+
+ /*
+ * Make sure that allocation state is properly
+ * preserved across fork().
+ */
+ if (i == NR_PKEYS*2)
+ become_child();
+ }
+
+ dprintf3("%s()::%d\n", __func__, __LINE__);
+
+ /*
+ * On x86:
+ * There are 16 pkeys supported in hardware. Three are
+ * allocated by the time we get here:
+ * 1. The default key (0)
+ * 2. One possibly consumed by an execute-only mapping.
+ * 3. One allocated by the test code and passed in via
+ * 'pkey' to this function.
+ * Ensure that we can allocate at least another 13 (16-3).
+ *
+ * On powerpc:
+ * There are either 5, 28, 29 or 32 pkeys supported in
+ * hardware depending on the page size (4K or 64K) and
+ * platform (powernv or powervm). Four are allocated by
+ * the time we get here. These include pkey-0, pkey-1,
+ * exec-only pkey and the one allocated by the test code.
+ * Ensure that we can allocate the remaining.
+ */
+ pkey_assert(i >= (NR_PKEYS - get_arch_reserved_keys() - 1));
+
+ for (i = 0; i < nr_allocated_pkeys; i++) {
+ err = sys_pkey_free(allocated_pkeys[i]);
+ pkey_assert(!err);
+ read_pkey_reg(); /* for shadow checking */
+ }
+}
+
+/*
+ * pkey 0 is special. It is allocated by default, so you do not
+ * have to call pkey_alloc() to use it first. Make sure that it
+ * is usable.
+ */
+void test_mprotect_with_pkey_0(int *ptr, u16 pkey)
+{
+ long size;
+ int prot;
+
+ assert(pkey_last_malloc_record);
+ size = pkey_last_malloc_record->size;
+ /*
+ * This is a bit of a hack. But mprotect() requires
+ * huge-page-aligned sizes when operating on hugetlbfs.
+ * So, make sure that we use something that's a multiple
+ * of a huge page when we can.
+ */
+ if (size >= HPAGE_SIZE)
+ size = HPAGE_SIZE;
+ prot = pkey_last_malloc_record->prot;
+
+ /* Use pkey 0 */
+ mprotect_pkey(ptr, size, prot, 0);
+
+ /* Make sure that we can set it back to the original pkey. */
+ mprotect_pkey(ptr, size, prot, pkey);
+}
+
+void test_ptrace_of_child(int *ptr, u16 pkey)
+{
+ __attribute__((__unused__)) int peek_result;
+ pid_t child_pid;
+ void *ignored = 0;
+ long ret;
+ int status;
+ /*
+ * This is the "control" for our little expermient. Make sure
+ * we can always access it when ptracing.
+ */
+ int *plain_ptr_unaligned = malloc(HPAGE_SIZE);
+ int *plain_ptr = ALIGN_PTR_UP(plain_ptr_unaligned, PAGE_SIZE);
+
+ /*
+ * Fork a child which is an exact copy of this process, of course.
+ * That means we can do all of our tests via ptrace() and then plain
+ * memory access and ensure they work differently.
+ */
+ child_pid = fork_lazy_child();
+ dprintf1("[%d] child pid: %d\n", getpid(), child_pid);
+
+ ret = ptrace(PTRACE_ATTACH, child_pid, ignored, ignored);
+ if (ret)
+ perror("attach");
+ dprintf1("[%d] attach ret: %ld %d\n", getpid(), ret, __LINE__);
+ pkey_assert(ret != -1);
+ ret = waitpid(child_pid, &status, WUNTRACED);
+ if ((ret != child_pid) || !(WIFSTOPPED(status))) {
+ fprintf(stderr, "weird waitpid result %ld stat %x\n",
+ ret, status);
+ pkey_assert(0);
+ }
+ dprintf2("waitpid ret: %ld\n", ret);
+ dprintf2("waitpid status: %d\n", status);
+
+ pkey_access_deny(pkey);
+ pkey_write_deny(pkey);
+
+ /* Write access, untested for now:
+ ret = ptrace(PTRACE_POKEDATA, child_pid, peek_at, data);
+ pkey_assert(ret != -1);
+ dprintf1("poke at %p: %ld\n", peek_at, ret);
+ */
+
+ /*
+ * Try to access the pkey-protected "ptr" via ptrace:
+ */
+ ret = ptrace(PTRACE_PEEKDATA, child_pid, ptr, ignored);
+ /* expect it to work, without an error: */
+ pkey_assert(ret != -1);
+ /* Now access from the current task, and expect an exception: */
+ peek_result = read_ptr(ptr);
+ expected_pkey_fault(pkey);
+
+ /*
+ * Try to access the NON-pkey-protected "plain_ptr" via ptrace:
+ */
+ ret = ptrace(PTRACE_PEEKDATA, child_pid, plain_ptr, ignored);
+ /* expect it to work, without an error: */
+ pkey_assert(ret != -1);
+ /* Now access from the current task, and expect NO exception: */
+ peek_result = read_ptr(plain_ptr);
+ do_not_expect_pkey_fault("read plain pointer after ptrace");
+
+ ret = ptrace(PTRACE_DETACH, child_pid, ignored, 0);
+ pkey_assert(ret != -1);
+
+ ret = kill(child_pid, SIGKILL);
+ pkey_assert(ret != -1);
+
+ wait(&status);
+
+ free(plain_ptr_unaligned);
+}
+
+void *get_pointer_to_instructions(void)
+{
+ void *p1;
+
+ p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE);
+ dprintf3("&lots_o_noops: %p\n", &lots_o_noops_around_write);
+ /* lots_o_noops_around_write should be page-aligned already */
+ assert(p1 == &lots_o_noops_around_write);
+
+ /* Point 'p1' at the *second* page of the function: */
+ p1 += PAGE_SIZE;
+
+ /*
+ * Try to ensure we fault this in on next touch to ensure
+ * we get an instruction fault as opposed to a data one
+ */
+ madvise(p1, PAGE_SIZE, MADV_DONTNEED);
+
+ return p1;
+}
+
+void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
+{
+ void *p1;
+ int scratch;
+ int ptr_contents;
+ int ret;
+
+ p1 = get_pointer_to_instructions();
+ lots_o_noops_around_write(&scratch);
+ ptr_contents = read_ptr(p1);
+ dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
+
+ ret = mprotect_pkey(p1, PAGE_SIZE, PROT_EXEC, (u64)pkey);
+ pkey_assert(!ret);
+ pkey_access_deny(pkey);
+
+ dprintf2("pkey_reg: %016llx\n", read_pkey_reg());
+
+ /*
+ * Make sure this is an *instruction* fault
+ */
+ madvise(p1, PAGE_SIZE, MADV_DONTNEED);
+ lots_o_noops_around_write(&scratch);
+ do_not_expect_pkey_fault("executing on PROT_EXEC memory");
+ expect_fault_on_read_execonly_key(p1, pkey);
+}
+
+void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
+{
+ void *p1;
+ int scratch;
+ int ptr_contents;
+ int ret;
+
+ dprintf1("%s() start\n", __func__);
+
+ p1 = get_pointer_to_instructions();
+ lots_o_noops_around_write(&scratch);
+ ptr_contents = read_ptr(p1);
+ dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
+
+ /* Use a *normal* mprotect(), not mprotect_pkey(): */
+ ret = mprotect(p1, PAGE_SIZE, PROT_EXEC);
+ pkey_assert(!ret);
+
+ dprintf2("pkey_reg: %016llx\n", read_pkey_reg());
+
+ /* Make sure this is an *instruction* fault */
+ madvise(p1, PAGE_SIZE, MADV_DONTNEED);
+ lots_o_noops_around_write(&scratch);
+ do_not_expect_pkey_fault("executing on PROT_EXEC memory");
+ expect_fault_on_read_execonly_key(p1, UNKNOWN_PKEY);
+
+ /*
+ * Put the memory back to non-PROT_EXEC. Should clear the
+ * exec-only pkey off the VMA and allow it to be readable
+ * again. Go to PROT_NONE first to check for a kernel bug
+ * that did not clear the pkey when doing PROT_NONE.
+ */
+ ret = mprotect(p1, PAGE_SIZE, PROT_NONE);
+ pkey_assert(!ret);
+
+ ret = mprotect(p1, PAGE_SIZE, PROT_READ|PROT_EXEC);
+ pkey_assert(!ret);
+ ptr_contents = read_ptr(p1);
+ do_not_expect_pkey_fault("plain read on recently PROT_EXEC area");
+}
+
+void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey)
+{
+ int size = PAGE_SIZE;
+ int sret;
+
+ if (cpu_has_pkeys()) {
+ dprintf1("SKIP: %s: no CPU support\n", __func__);
+ return;
+ }
+
+ sret = syscall(SYS_mprotect_key, ptr, size, PROT_READ, pkey);
+ pkey_assert(sret < 0);
+}
+
+void (*pkey_tests[])(int *ptr, u16 pkey) = {
+ test_read_of_write_disabled_region,
+ test_read_of_access_disabled_region,
+ test_read_of_access_disabled_region_with_page_already_mapped,
+ test_write_of_write_disabled_region,
+ test_write_of_write_disabled_region_with_page_already_mapped,
+ test_write_of_access_disabled_region,
+ test_write_of_access_disabled_region_with_page_already_mapped,
+ test_kernel_write_of_access_disabled_region,
+ test_kernel_write_of_write_disabled_region,
+ test_kernel_gup_of_access_disabled_region,
+ test_kernel_gup_write_to_write_disabled_region,
+ test_executing_on_unreadable_memory,
+ test_implicit_mprotect_exec_only_memory,
+ test_mprotect_with_pkey_0,
+ test_ptrace_of_child,
+ test_pkey_syscalls_on_non_allocated_pkey,
+ test_pkey_syscalls_bad_args,
+ test_pkey_alloc_exhaust,
+ test_pkey_alloc_free_attach_pkey0,
+};
+
+void run_tests_once(void)
+{
+ int *ptr;
+ int prot = PROT_READ|PROT_WRITE;
+
+ for (test_nr = 0; test_nr < ARRAY_SIZE(pkey_tests); test_nr++) {
+ int pkey;
+ int orig_pkey_faults = pkey_faults;
+
+ dprintf1("======================\n");
+ dprintf1("test %d preparing...\n", test_nr);
+
+ tracing_on();
+ pkey = alloc_random_pkey();
+ dprintf1("test %d starting with pkey: %d\n", test_nr, pkey);
+ ptr = malloc_pkey(PAGE_SIZE, prot, pkey);
+ dprintf1("test %d starting...\n", test_nr);
+ pkey_tests[test_nr](ptr, pkey);
+ dprintf1("freeing test memory: %p\n", ptr);
+ free_pkey_malloc(ptr);
+ sys_pkey_free(pkey);
+
+ dprintf1("pkey_faults: %d\n", pkey_faults);
+ dprintf1("orig_pkey_faults: %d\n", orig_pkey_faults);
+
+ tracing_off();
+ close_test_fds();
+
+ printf("test %2d PASSED (iteration %d)\n", test_nr, iteration_nr);
+ dprintf1("======================\n\n");
+ }
+ iteration_nr++;
+}
+
+void pkey_setup_shadow(void)
+{
+ shadow_pkey_reg = __read_pkey_reg();
+}
+
+int main(void)
+{
+ int nr_iterations = 22;
+ int pkeys_supported = is_pkeys_supported();
+
+ setup_handlers();
+
+ printf("has pkeys: %d\n", pkeys_supported);
+
+ if (!pkeys_supported) {
+ int size = PAGE_SIZE;
+ int *ptr;
+
+ printf("running PKEY tests for unsupported CPU/OS\n");
+
+ ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ assert(ptr != (void *)-1);
+ test_mprotect_pkey_on_unsupported_cpu(ptr, 1);
+ exit(0);
+ }
+
+ pkey_setup_shadow();
+ printf("startup pkey_reg: %016llx\n", read_pkey_reg());
+ setup_hugetlbfs();
+
+ while (nr_iterations-- > 0)
+ run_tests_once();
+
+ printf("done (all tests OK)\n");
+ return 0;
+}
diff --git a/tools/testing/selftests/x86/.gitignore b/tools/testing/selftests/x86/.gitignore
index 022a1f3b64ef..1aaef5bf119a 100644
--- a/tools/testing/selftests/x86/.gitignore
+++ b/tools/testing/selftests/x86/.gitignore
@@ -12,5 +12,4 @@ ldt_gdt
iopl
mpx-mini-test
ioperm
-protection_keys
test_vdso
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 5d49bfec1e9a..5f16821c7f63 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -12,7 +12,7 @@ CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh $(CC) trivial_program.c -no-pie)
TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
check_initial_reg_state sigreturn iopl ioperm \
- protection_keys test_vdso test_vsyscall mov_ss_trap \
+ test_vdso test_vsyscall mov_ss_trap \
syscall_arg_fault
TARGETS_C_32BIT_ONLY := entry_from_vm86 test_syscall_vdso unwind_vdso \
test_FCMOV test_FCOMI test_FISTTP \
diff --git a/tools/testing/selftests/x86/pkey-helpers.h b/tools/testing/selftests/x86/pkey-helpers.h
deleted file mode 100644
index 254e5436bdd9..000000000000
--- a/tools/testing/selftests/x86/pkey-helpers.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _PKEYS_HELPER_H
-#define _PKEYS_HELPER_H
-#define _GNU_SOURCE
-#include <string.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <stdbool.h>
-#include <signal.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <ucontext.h>
-#include <sys/mman.h>
-
-#define NR_PKEYS 16
-#define PKRU_BITS_PER_PKEY 2
-
-#ifndef DEBUG_LEVEL
-#define DEBUG_LEVEL 0
-#endif
-#define DPRINT_IN_SIGNAL_BUF_SIZE 4096
-extern int dprint_in_signal;
-extern char dprint_in_signal_buffer[DPRINT_IN_SIGNAL_BUF_SIZE];
-static inline void sigsafe_printf(const char *format, ...)
-{
- va_list ap;
-
- if (!dprint_in_signal) {
- va_start(ap, format);
- vprintf(format, ap);
- va_end(ap);
- } else {
- int ret;
- /*
- * No printf() functions are signal-safe.
- * They deadlock easily. Write the format
- * string to get some output, even if
- * incomplete.
- */
- ret = write(1, format, strlen(format));
- if (ret < 0)
- exit(1);
- }
-}
-#define dprintf_level(level, args...) do { \
- if (level <= DEBUG_LEVEL) \
- sigsafe_printf(args); \
-} while (0)
-#define dprintf0(args...) dprintf_level(0, args)
-#define dprintf1(args...) dprintf_level(1, args)
-#define dprintf2(args...) dprintf_level(2, args)
-#define dprintf3(args...) dprintf_level(3, args)
-#define dprintf4(args...) dprintf_level(4, args)
-
-extern unsigned int shadow_pkru;
-static inline unsigned int __rdpkru(void)
-{
- unsigned int eax, edx;
- unsigned int ecx = 0;
- unsigned int pkru;
-
- asm volatile(".byte 0x0f,0x01,0xee\n\t"
- : "=a" (eax), "=d" (edx)
- : "c" (ecx));
- pkru = eax;
- return pkru;
-}
-
-static inline unsigned int _rdpkru(int line)
-{
- unsigned int pkru = __rdpkru();
-
- dprintf4("rdpkru(line=%d) pkru: %x shadow: %x\n",
- line, pkru, shadow_pkru);
- assert(pkru == shadow_pkru);
-
- return pkru;
-}
-
-#define rdpkru() _rdpkru(__LINE__)
-
-static inline void __wrpkru(unsigned int pkru)
-{
- unsigned int eax = pkru;
- unsigned int ecx = 0;
- unsigned int edx = 0;
-
- dprintf4("%s() changing %08x to %08x\n", __func__, __rdpkru(), pkru);
- asm volatile(".byte 0x0f,0x01,0xef\n\t"
- : : "a" (eax), "c" (ecx), "d" (edx));
- assert(pkru == __rdpkru());
-}
-
-static inline void wrpkru(unsigned int pkru)
-{
- dprintf4("%s() changing %08x to %08x\n", __func__, __rdpkru(), pkru);
- /* will do the shadow check for us: */
- rdpkru();
- __wrpkru(pkru);
- shadow_pkru = pkru;
- dprintf4("%s(%08x) pkru: %08x\n", __func__, pkru, __rdpkru());
-}
-
-/*
- * These are technically racy. since something could
- * change PKRU between the read and the write.
- */
-static inline void __pkey_access_allow(int pkey, int do_allow)
-{
- unsigned int pkru = rdpkru();
- int bit = pkey * 2;
-
- if (do_allow)
- pkru &= (1<<bit);
- else
- pkru |= (1<<bit);
-
- dprintf4("pkru now: %08x\n", rdpkru());
- wrpkru(pkru);
-}
-
-static inline void __pkey_write_allow(int pkey, int do_allow_write)
-{
- long pkru = rdpkru();
- int bit = pkey * 2 + 1;
-
- if (do_allow_write)
- pkru &= (1<<bit);
- else
- pkru |= (1<<bit);
-
- wrpkru(pkru);
- dprintf4("pkru now: %08x\n", rdpkru());
-}
-
-#define PROT_PKEY0 0x10 /* protection key value (bit 0) */
-#define PROT_PKEY1 0x20 /* protection key value (bit 1) */
-#define PROT_PKEY2 0x40 /* protection key value (bit 2) */
-#define PROT_PKEY3 0x80 /* protection key value (bit 3) */
-
-#define PAGE_SIZE 4096
-#define MB (1<<20)
-
-static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- /* ecx is often an input as well as an output. */
- asm volatile(
- "cpuid;"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (*eax), "2" (*ecx));
-}
-
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx) */
-#define X86_FEATURE_PKU (1<<3) /* Protection Keys for Userspace */
-#define X86_FEATURE_OSPKE (1<<4) /* OS Protection Keys Enable */
-
-static inline int cpu_has_pku(void)
-{
- unsigned int eax;
- unsigned int ebx;
- unsigned int ecx;
- unsigned int edx;
-
- eax = 0x7;
- ecx = 0x0;
- __cpuid(&eax, &ebx, &ecx, &edx);
-
- if (!(ecx & X86_FEATURE_PKU)) {
- dprintf2("cpu does not have PKU\n");
- return 0;
- }
- if (!(ecx & X86_FEATURE_OSPKE)) {
- dprintf2("cpu does not have OSPKE\n");
- return 0;
- }
- return 1;
-}
-
-#define XSTATE_PKRU_BIT (9)
-#define XSTATE_PKRU 0x200
-
-int pkru_xstate_offset(void)
-{
- unsigned int eax;
- unsigned int ebx;
- unsigned int ecx;
- unsigned int edx;
- int xstate_offset;
- int xstate_size;
- unsigned long XSTATE_CPUID = 0xd;
- int leaf;
-
- /* assume that XSTATE_PKRU is set in XCR0 */
- leaf = XSTATE_PKRU_BIT;
- {
- eax = XSTATE_CPUID;
- ecx = leaf;
- __cpuid(&eax, &ebx, &ecx, &edx);
-
- if (leaf == XSTATE_PKRU_BIT) {
- xstate_offset = ebx;
- xstate_size = eax;
- }
- }
-
- if (xstate_size == 0) {
- printf("could not find size/offset of PKRU in xsave state\n");
- return 0;
- }
-
- return xstate_offset;
-}
-
-#endif /* _PKEYS_HELPER_H */
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
deleted file mode 100644
index 480995bceefa..000000000000
--- a/tools/testing/selftests/x86/protection_keys.c
+++ /dev/null
@@ -1,1506 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Tests x86 Memory Protection Keys (see Documentation/core-api/protection-keys.rst)
- *
- * There are examples in here of:
- * * how to set protection keys on memory
- * * how to set/clear bits in PKRU (the rights register)
- * * how to handle SEGV_PKRU signals and extract pkey-relevant
- * information from the siginfo
- *
- * Things to add:
- * make sure KSM and KSM COW breaking works
- * prefault pages in at malloc, or not
- * protect MPX bounds tables with protection keys?
- * make sure VMA splitting/merging is working correctly
- * OOMs can destroy mm->mmap (see exit_mmap()), so make sure it is immune to pkeys
- * look for pkey "leaks" where it is still set on a VMA but "freed" back to the kernel
- * do a plain mprotect() to a mprotect_pkey() area and make sure the pkey sticks
- *
- * Compile like this:
- * gcc -o protection_keys -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
- * gcc -m32 -o protection_keys_32 -O2 -g -std=gnu99 -pthread -Wall protection_keys.c -lrt -ldl -lm
- */
-#define _GNU_SOURCE
-#include <errno.h>
-#include <linux/futex.h>
-#include <sys/time.h>
-#include <sys/syscall.h>
-#include <string.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <stdbool.h>
-#include <signal.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <ucontext.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <sys/ptrace.h>
-#include <setjmp.h>
-
-#include "pkey-helpers.h"
-
-int iteration_nr = 1;
-int test_nr;
-
-unsigned int shadow_pkru;
-
-#define HPAGE_SIZE (1UL<<21)
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
-#define ALIGN_UP(x, align_to) (((x) + ((align_to)-1)) & ~((align_to)-1))
-#define ALIGN_DOWN(x, align_to) ((x) & ~((align_to)-1))
-#define ALIGN_PTR_UP(p, ptr_align_to) ((typeof(p))ALIGN_UP((unsigned long)(p), ptr_align_to))
-#define ALIGN_PTR_DOWN(p, ptr_align_to) ((typeof(p))ALIGN_DOWN((unsigned long)(p), ptr_align_to))
-#define __stringify_1(x...) #x
-#define __stringify(x...) __stringify_1(x)
-
-#define PTR_ERR_ENOTSUP ((void *)-ENOTSUP)
-
-int dprint_in_signal;
-char dprint_in_signal_buffer[DPRINT_IN_SIGNAL_BUF_SIZE];
-
-extern void abort_hooks(void);
-#define pkey_assert(condition) do { \
- if (!(condition)) { \
- dprintf0("assert() at %s::%d test_nr: %d iteration: %d\n", \
- __FILE__, __LINE__, \
- test_nr, iteration_nr); \
- dprintf0("errno at assert: %d", errno); \
- abort_hooks(); \
- exit(__LINE__); \
- } \
-} while (0)
-
-void cat_into_file(char *str, char *file)
-{
- int fd = open(file, O_RDWR);
- int ret;
-
- dprintf2("%s(): writing '%s' to '%s'\n", __func__, str, file);
- /*
- * these need to be raw because they are called under
- * pkey_assert()
- */
- if (fd < 0) {
- fprintf(stderr, "error opening '%s'\n", str);
- perror("error: ");
- exit(__LINE__);
- }
-
- ret = write(fd, str, strlen(str));
- if (ret != strlen(str)) {
- perror("write to file failed");
- fprintf(stderr, "filename: '%s' str: '%s'\n", file, str);
- exit(__LINE__);
- }
- close(fd);
-}
-
-#if CONTROL_TRACING > 0
-static int warned_tracing;
-int tracing_root_ok(void)
-{
- if (geteuid() != 0) {
- if (!warned_tracing)
- fprintf(stderr, "WARNING: not run as root, "
- "can not do tracing control\n");
- warned_tracing = 1;
- return 0;
- }
- return 1;
-}
-#endif
-
-void tracing_on(void)
-{
-#if CONTROL_TRACING > 0
-#define TRACEDIR "/sys/kernel/debug/tracing"
- char pidstr[32];
-
- if (!tracing_root_ok())
- return;
-
- sprintf(pidstr, "%d", getpid());
- cat_into_file("0", TRACEDIR "/tracing_on");
- cat_into_file("\n", TRACEDIR "/trace");
- if (1) {
- cat_into_file("function_graph", TRACEDIR "/current_tracer");
- cat_into_file("1", TRACEDIR "/options/funcgraph-proc");
- } else {
- cat_into_file("nop", TRACEDIR "/current_tracer");
- }
- cat_into_file(pidstr, TRACEDIR "/set_ftrace_pid");
- cat_into_file("1", TRACEDIR "/tracing_on");
- dprintf1("enabled tracing\n");
-#endif
-}
-
-void tracing_off(void)
-{
-#if CONTROL_TRACING > 0
- if (!tracing_root_ok())
- return;
- cat_into_file("0", "/sys/kernel/debug/tracing/tracing_on");
-#endif
-}
-
-void abort_hooks(void)
-{
- fprintf(stderr, "running %s()...\n", __func__);
- tracing_off();
-#ifdef SLEEP_ON_ABORT
- sleep(SLEEP_ON_ABORT);
-#endif
-}
-
-static inline void __page_o_noops(void)
-{
- /* 8-bytes of instruction * 512 bytes = 1 page */
- asm(".rept 512 ; nopl 0x7eeeeeee(%eax) ; .endr");
-}
-
-/*
- * This attempts to have roughly a page of instructions followed by a few
- * instructions that do a write, and another page of instructions. That
- * way, we are pretty sure that the write is in the second page of
- * instructions and has at least a page of padding behind it.
- *
- * *That* lets us be sure to madvise() away the write instruction, which
- * will then fault, which makes sure that the fault code handles
- * execute-only memory properly.
- */
-__attribute__((__aligned__(PAGE_SIZE)))
-void lots_o_noops_around_write(int *write_to_me)
-{
- dprintf3("running %s()\n", __func__);
- __page_o_noops();
- /* Assume this happens in the second page of instructions: */
- *write_to_me = __LINE__;
- /* pad out by another page: */
- __page_o_noops();
- dprintf3("%s() done\n", __func__);
-}
-
-/* Define some kernel-like types */
-#define u8 uint8_t
-#define u16 uint16_t
-#define u32 uint32_t
-#define u64 uint64_t
-
-#ifdef __i386__
-
-#ifndef SYS_mprotect_key
-# define SYS_mprotect_key 380
-#endif
-
-#ifndef SYS_pkey_alloc
-# define SYS_pkey_alloc 381
-# define SYS_pkey_free 382
-#endif
-
-#define REG_IP_IDX REG_EIP
-#define si_pkey_offset 0x14
-
-#else
-
-#ifndef SYS_mprotect_key
-# define SYS_mprotect_key 329
-#endif
-
-#ifndef SYS_pkey_alloc
-# define SYS_pkey_alloc 330
-# define SYS_pkey_free 331
-#endif
-
-#define REG_IP_IDX REG_RIP
-#define si_pkey_offset 0x20
-
-#endif
-
-void dump_mem(void *dumpme, int len_bytes)
-{
- char *c = (void *)dumpme;
- int i;
-
- for (i = 0; i < len_bytes; i += sizeof(u64)) {
- u64 *ptr = (u64 *)(c + i);
- dprintf1("dump[%03d][@%p]: %016jx\n", i, ptr, *ptr);
- }
-}
-
-/* Failed address bound checks: */
-#ifndef SEGV_BNDERR
-# define SEGV_BNDERR 3
-#endif
-
-#ifndef SEGV_PKUERR
-# define SEGV_PKUERR 4
-#endif
-
-static char *si_code_str(int si_code)
-{
- if (si_code == SEGV_MAPERR)
- return "SEGV_MAPERR";
- if (si_code == SEGV_ACCERR)
- return "SEGV_ACCERR";
- if (si_code == SEGV_BNDERR)
- return "SEGV_BNDERR";
- if (si_code == SEGV_PKUERR)
- return "SEGV_PKUERR";
- return "UNKNOWN";
-}
-
-int pkru_faults;
-int last_si_pkey = -1;
-void signal_handler(int signum, siginfo_t *si, void *vucontext)
-{
- ucontext_t *uctxt = vucontext;
- int trapno;
- unsigned long ip;
- char *fpregs;
- u32 *pkru_ptr;
- u64 siginfo_pkey;
- u32 *si_pkey_ptr;
- int pkru_offset;
- fpregset_t fpregset;
-
- dprint_in_signal = 1;
- dprintf1(">>>>===============SIGSEGV============================\n");
- dprintf1("%s()::%d, pkru: 0x%x shadow: %x\n", __func__, __LINE__,
- __rdpkru(), shadow_pkru);
-
- trapno = uctxt->uc_mcontext.gregs[REG_TRAPNO];
- ip = uctxt->uc_mcontext.gregs[REG_IP_IDX];
- fpregset = uctxt->uc_mcontext.fpregs;
- fpregs = (void *)fpregset;
-
- dprintf2("%s() trapno: %d ip: 0x%lx info->si_code: %s/%d\n", __func__,
- trapno, ip, si_code_str(si->si_code), si->si_code);
-#ifdef __i386__
- /*
- * 32-bit has some extra padding so that userspace can tell whether
- * the XSTATE header is present in addition to the "legacy" FPU
- * state. We just assume that it is here.
- */
- fpregs += 0x70;
-#endif
- pkru_offset = pkru_xstate_offset();
- pkru_ptr = (void *)(&fpregs[pkru_offset]);
-
- dprintf1("siginfo: %p\n", si);
- dprintf1(" fpregs: %p\n", fpregs);
- /*
- * If we got a PKRU fault, we *HAVE* to have at least one bit set in
- * here.
- */
- dprintf1("pkru_xstate_offset: %d\n", pkru_xstate_offset());
- if (DEBUG_LEVEL > 4)
- dump_mem(pkru_ptr - 128, 256);
- pkey_assert(*pkru_ptr);
-
- if ((si->si_code == SEGV_MAPERR) ||
- (si->si_code == SEGV_ACCERR) ||
- (si->si_code == SEGV_BNDERR)) {
- printf("non-PK si_code, exiting...\n");
- exit(4);
- }
-
- si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
- dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
- dump_mem((u8 *)si_pkey_ptr - 8, 24);
- siginfo_pkey = *si_pkey_ptr;
- pkey_assert(siginfo_pkey < NR_PKEYS);
- last_si_pkey = siginfo_pkey;
-
- dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr);
- /* need __rdpkru() version so we do not do shadow_pkru checking */
- dprintf1("signal pkru from pkru: %08x\n", __rdpkru());
- dprintf1("pkey from siginfo: %jx\n", siginfo_pkey);
- *(u64 *)pkru_ptr = 0x00000000;
- dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n");
- pkru_faults++;
- dprintf1("<<<<==================================================\n");
- dprint_in_signal = 0;
-}
-
-int wait_all_children(void)
-{
- int status;
- return waitpid(-1, &status, 0);
-}
-
-void sig_chld(int x)
-{
- dprint_in_signal = 1;
- dprintf2("[%d] SIGCHLD: %d\n", getpid(), x);
- dprint_in_signal = 0;
-}
-
-void setup_sigsegv_handler(void)
-{
- int r, rs;
- struct sigaction newact;
- struct sigaction oldact;
-
- /* #PF is mapped to sigsegv */
- int signum = SIGSEGV;
-
- newact.sa_handler = 0;
- newact.sa_sigaction = signal_handler;
-
- /*sigset_t - signals to block while in the handler */
- /* get the old signal mask. */
- rs = sigprocmask(SIG_SETMASK, 0, &newact.sa_mask);
- pkey_assert(rs == 0);
-
- /* call sa_sigaction, not sa_handler*/
- newact.sa_flags = SA_SIGINFO;
-
- newact.sa_restorer = 0; /* void(*)(), obsolete */
- r = sigaction(signum, &newact, &oldact);
- r = sigaction(SIGALRM, &newact, &oldact);
- pkey_assert(r == 0);
-}
-
-void setup_handlers(void)
-{
- signal(SIGCHLD, &sig_chld);
- setup_sigsegv_handler();
-}
-
-pid_t fork_lazy_child(void)
-{
- pid_t forkret;
-
- forkret = fork();
- pkey_assert(forkret >= 0);
- dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
-
- if (!forkret) {
- /* in the child */
- while (1) {
- dprintf1("child sleeping...\n");
- sleep(30);
- }
- }
- return forkret;
-}
-
-#ifndef PKEY_DISABLE_ACCESS
-# define PKEY_DISABLE_ACCESS 0x1
-#endif
-
-#ifndef PKEY_DISABLE_WRITE
-# define PKEY_DISABLE_WRITE 0x2
-#endif
-
-static u32 hw_pkey_get(int pkey, unsigned long flags)
-{
- u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
- u32 pkru = __rdpkru();
- u32 shifted_pkru;
- u32 masked_pkru;
-
- dprintf1("%s(pkey=%d, flags=%lx) = %x / %d\n",
- __func__, pkey, flags, 0, 0);
- dprintf2("%s() raw pkru: %x\n", __func__, pkru);
-
- shifted_pkru = (pkru >> (pkey * PKRU_BITS_PER_PKEY));
- dprintf2("%s() shifted_pkru: %x\n", __func__, shifted_pkru);
- masked_pkru = shifted_pkru & mask;
- dprintf2("%s() masked pkru: %x\n", __func__, masked_pkru);
- /*
- * shift down the relevant bits to the lowest two, then
- * mask off all the other high bits.
- */
- return masked_pkru;
-}
-
-static int hw_pkey_set(int pkey, unsigned long rights, unsigned long flags)
-{
- u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
- u32 old_pkru = __rdpkru();
- u32 new_pkru;
-
- /* make sure that 'rights' only contains the bits we expect: */
- assert(!(rights & ~mask));
-
- /* copy old pkru */
- new_pkru = old_pkru;
- /* mask out bits from pkey in old value: */
- new_pkru &= ~(mask << (pkey * PKRU_BITS_PER_PKEY));
- /* OR in new bits for pkey: */
- new_pkru |= (rights << (pkey * PKRU_BITS_PER_PKEY));
-
- __wrpkru(new_pkru);
-
- dprintf3("%s(pkey=%d, rights=%lx, flags=%lx) = %x pkru now: %x old_pkru: %x\n",
- __func__, pkey, rights, flags, 0, __rdpkru(), old_pkru);
- return 0;
-}
-
-void pkey_disable_set(int pkey, int flags)
-{
- unsigned long syscall_flags = 0;
- int ret;
- int pkey_rights;
- u32 orig_pkru = rdpkru();
-
- dprintf1("START->%s(%d, 0x%x)\n", __func__,
- pkey, flags);
- pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
-
- pkey_rights = hw_pkey_get(pkey, syscall_flags);
-
- dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
- pkey, pkey, pkey_rights);
- pkey_assert(pkey_rights >= 0);
-
- pkey_rights |= flags;
-
- ret = hw_pkey_set(pkey, pkey_rights, syscall_flags);
- assert(!ret);
- /*pkru and flags have the same format */
- shadow_pkru |= flags << (pkey * 2);
- dprintf1("%s(%d) shadow: 0x%x\n", __func__, pkey, shadow_pkru);
-
- pkey_assert(ret >= 0);
-
- pkey_rights = hw_pkey_get(pkey, syscall_flags);
- dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
- pkey, pkey, pkey_rights);
-
- dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru());
- if (flags)
- pkey_assert(rdpkru() > orig_pkru);
- dprintf1("END<---%s(%d, 0x%x)\n", __func__,
- pkey, flags);
-}
-
-void pkey_disable_clear(int pkey, int flags)
-{
- unsigned long syscall_flags = 0;
- int ret;
- int pkey_rights = hw_pkey_get(pkey, syscall_flags);
- u32 orig_pkru = rdpkru();
-
- pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
-
- dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
- pkey, pkey, pkey_rights);
- pkey_assert(pkey_rights >= 0);
-
- pkey_rights |= flags;
-
- ret = hw_pkey_set(pkey, pkey_rights, 0);
- /* pkru and flags have the same format */
- shadow_pkru &= ~(flags << (pkey * 2));
- pkey_assert(ret >= 0);
-
- pkey_rights = hw_pkey_get(pkey, syscall_flags);
- dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
- pkey, pkey, pkey_rights);
-
- dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru());
- if (flags)
- assert(rdpkru() > orig_pkru);
-}
-
-void pkey_write_allow(int pkey)
-{
- pkey_disable_clear(pkey, PKEY_DISABLE_WRITE);
-}
-void pkey_write_deny(int pkey)
-{
- pkey_disable_set(pkey, PKEY_DISABLE_WRITE);
-}
-void pkey_access_allow(int pkey)
-{
- pkey_disable_clear(pkey, PKEY_DISABLE_ACCESS);
-}
-void pkey_access_deny(int pkey)
-{
- pkey_disable_set(pkey, PKEY_DISABLE_ACCESS);
-}
-
-int sys_mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
- unsigned long pkey)
-{
- int sret;
-
- dprintf2("%s(0x%p, %zx, prot=%lx, pkey=%lx)\n", __func__,
- ptr, size, orig_prot, pkey);
-
- errno = 0;
- sret = syscall(SYS_mprotect_key, ptr, size, orig_prot, pkey);
- if (errno) {
- dprintf2("SYS_mprotect_key sret: %d\n", sret);
- dprintf2("SYS_mprotect_key prot: 0x%lx\n", orig_prot);
- dprintf2("SYS_mprotect_key failed, errno: %d\n", errno);
- if (DEBUG_LEVEL >= 2)
- perror("SYS_mprotect_pkey");
- }
- return sret;
-}
-
-int sys_pkey_alloc(unsigned long flags, unsigned long init_val)
-{
- int ret = syscall(SYS_pkey_alloc, flags, init_val);
- dprintf1("%s(flags=%lx, init_val=%lx) syscall ret: %d errno: %d\n",
- __func__, flags, init_val, ret, errno);
- return ret;
-}
-
-int alloc_pkey(void)
-{
- int ret;
- unsigned long init_val = 0x0;
-
- dprintf1("alloc_pkey()::%d, pkru: 0x%x shadow: %x\n",
- __LINE__, __rdpkru(), shadow_pkru);
- ret = sys_pkey_alloc(0, init_val);
- /*
- * pkey_alloc() sets PKRU, so we need to reflect it in
- * shadow_pkru:
- */
- dprintf4("alloc_pkey()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n",
- __LINE__, ret, __rdpkru(), shadow_pkru);
- if (ret) {
- /* clear both the bits: */
- shadow_pkru &= ~(0x3 << (ret * 2));
- dprintf4("alloc_pkey()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n",
- __LINE__, ret, __rdpkru(), shadow_pkru);
- /*
- * move the new state in from init_val
- * (remember, we cheated and init_val == pkru format)
- */
- shadow_pkru |= (init_val << (ret * 2));
- }
- dprintf4("alloc_pkey()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n",
- __LINE__, ret, __rdpkru(), shadow_pkru);
- dprintf1("alloc_pkey()::%d errno: %d\n", __LINE__, errno);
- /* for shadow checking: */
- rdpkru();
- dprintf4("alloc_pkey()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n",
- __LINE__, ret, __rdpkru(), shadow_pkru);
- return ret;
-}
-
-int sys_pkey_free(unsigned long pkey)
-{
- int ret = syscall(SYS_pkey_free, pkey);
- dprintf1("%s(pkey=%ld) syscall ret: %d\n", __func__, pkey, ret);
- return ret;
-}
-
-/*
- * I had a bug where pkey bits could be set by mprotect() but
- * not cleared. This ensures we get lots of random bit sets
- * and clears on the vma and pte pkey bits.
- */
-int alloc_random_pkey(void)
-{
- int max_nr_pkey_allocs;
- int ret;
- int i;
- int alloced_pkeys[NR_PKEYS];
- int nr_alloced = 0;
- int random_index;
- memset(alloced_pkeys, 0, sizeof(alloced_pkeys));
-
- /* allocate every possible key and make a note of which ones we got */
- max_nr_pkey_allocs = NR_PKEYS;
- max_nr_pkey_allocs = 1;
- for (i = 0; i < max_nr_pkey_allocs; i++) {
- int new_pkey = alloc_pkey();
- if (new_pkey < 0)
- break;
- alloced_pkeys[nr_alloced++] = new_pkey;
- }
-
- pkey_assert(nr_alloced > 0);
- /* select a random one out of the allocated ones */
- random_index = rand() % nr_alloced;
- ret = alloced_pkeys[random_index];
- /* now zero it out so we don't free it next */
- alloced_pkeys[random_index] = 0;
-
- /* go through the allocated ones that we did not want and free them */
- for (i = 0; i < nr_alloced; i++) {
- int free_ret;
- if (!alloced_pkeys[i])
- continue;
- free_ret = sys_pkey_free(alloced_pkeys[i]);
- pkey_assert(!free_ret);
- }
- dprintf1("%s()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n", __func__,
- __LINE__, ret, __rdpkru(), shadow_pkru);
- return ret;
-}
-
-int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
- unsigned long pkey)
-{
- int nr_iterations = random() % 100;
- int ret;
-
- while (0) {
- int rpkey = alloc_random_pkey();
- ret = sys_mprotect_pkey(ptr, size, orig_prot, pkey);
- dprintf1("sys_mprotect_pkey(%p, %zx, prot=0x%lx, pkey=%ld) ret: %d\n",
- ptr, size, orig_prot, pkey, ret);
- if (nr_iterations-- < 0)
- break;
-
- dprintf1("%s()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n", __func__,
- __LINE__, ret, __rdpkru(), shadow_pkru);
- sys_pkey_free(rpkey);
- dprintf1("%s()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n", __func__,
- __LINE__, ret, __rdpkru(), shadow_pkru);
- }
- pkey_assert(pkey < NR_PKEYS);
-
- ret = sys_mprotect_pkey(ptr, size, orig_prot, pkey);
- dprintf1("mprotect_pkey(%p, %zx, prot=0x%lx, pkey=%ld) ret: %d\n",
- ptr, size, orig_prot, pkey, ret);
- pkey_assert(!ret);
- dprintf1("%s()::%d, ret: %d pkru: 0x%x shadow: 0x%x\n", __func__,
- __LINE__, ret, __rdpkru(), shadow_pkru);
- return ret;
-}
-
-struct pkey_malloc_record {
- void *ptr;
- long size;
- int prot;
-};
-struct pkey_malloc_record *pkey_malloc_records;
-struct pkey_malloc_record *pkey_last_malloc_record;
-long nr_pkey_malloc_records;
-void record_pkey_malloc(void *ptr, long size, int prot)
-{
- long i;
- struct pkey_malloc_record *rec = NULL;
-
- for (i = 0; i < nr_pkey_malloc_records; i++) {
- rec = &pkey_malloc_records[i];
- /* find a free record */
- if (rec)
- break;
- }
- if (!rec) {
- /* every record is full */
- size_t old_nr_records = nr_pkey_malloc_records;
- size_t new_nr_records = (nr_pkey_malloc_records * 2 + 1);
- size_t new_size = new_nr_records * sizeof(struct pkey_malloc_record);
- dprintf2("new_nr_records: %zd\n", new_nr_records);
- dprintf2("new_size: %zd\n", new_size);
- pkey_malloc_records = realloc(pkey_malloc_records, new_size);
- pkey_assert(pkey_malloc_records != NULL);
- rec = &pkey_malloc_records[nr_pkey_malloc_records];
- /*
- * realloc() does not initialize memory, so zero it from
- * the first new record all the way to the end.
- */
- for (i = 0; i < new_nr_records - old_nr_records; i++)
- memset(rec + i, 0, sizeof(*rec));
- }
- dprintf3("filling malloc record[%d/%p]: {%p, %ld}\n",
- (int)(rec - pkey_malloc_records), rec, ptr, size);
- rec->ptr = ptr;
- rec->size = size;
- rec->prot = prot;
- pkey_last_malloc_record = rec;
- nr_pkey_malloc_records++;
-}
-
-void free_pkey_malloc(void *ptr)
-{
- long i;
- int ret;
- dprintf3("%s(%p)\n", __func__, ptr);
- for (i = 0; i < nr_pkey_malloc_records; i++) {
- struct pkey_malloc_record *rec = &pkey_malloc_records[i];
- dprintf4("looking for ptr %p at record[%ld/%p]: {%p, %ld}\n",
- ptr, i, rec, rec->ptr, rec->size);
- if ((ptr < rec->ptr) ||
- (ptr >= rec->ptr + rec->size))
- continue;
-
- dprintf3("found ptr %p at record[%ld/%p]: {%p, %ld}\n",
- ptr, i, rec, rec->ptr, rec->size);
- nr_pkey_malloc_records--;
- ret = munmap(rec->ptr, rec->size);
- dprintf3("munmap ret: %d\n", ret);
- pkey_assert(!ret);
- dprintf3("clearing rec->ptr, rec: %p\n", rec);
- rec->ptr = NULL;
- dprintf3("done clearing rec->ptr, rec: %p\n", rec);
- return;
- }
- pkey_assert(false);
-}
-
-
-void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey)
-{
- void *ptr;
- int ret;
-
- rdpkru();
- dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
- size, prot, pkey);
- pkey_assert(pkey < NR_PKEYS);
- ptr = mmap(NULL, size, prot, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
- pkey_assert(ptr != (void *)-1);
- ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey);
- pkey_assert(!ret);
- record_pkey_malloc(ptr, size, prot);
- rdpkru();
-
- dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr);
- return ptr;
-}
-
-void *malloc_pkey_anon_huge(long size, int prot, u16 pkey)
-{
- int ret;
- void *ptr;
-
- dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
- size, prot, pkey);
- /*
- * Guarantee we can fit at least one huge page in the resulting
- * allocation by allocating space for 2:
- */
- size = ALIGN_UP(size, HPAGE_SIZE * 2);
- ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
- pkey_assert(ptr != (void *)-1);
- record_pkey_malloc(ptr, size, prot);
- mprotect_pkey(ptr, size, prot, pkey);
-
- dprintf1("unaligned ptr: %p\n", ptr);
- ptr = ALIGN_PTR_UP(ptr, HPAGE_SIZE);
- dprintf1(" aligned ptr: %p\n", ptr);
- ret = madvise(ptr, HPAGE_SIZE, MADV_HUGEPAGE);
- dprintf1("MADV_HUGEPAGE ret: %d\n", ret);
- ret = madvise(ptr, HPAGE_SIZE, MADV_WILLNEED);
- dprintf1("MADV_WILLNEED ret: %d\n", ret);
- memset(ptr, 0, HPAGE_SIZE);
-
- dprintf1("mmap()'d thp for pkey %d @ %p\n", pkey, ptr);
- return ptr;
-}
-
-int hugetlb_setup_ok;
-#define GET_NR_HUGE_PAGES 10
-void setup_hugetlbfs(void)
-{
- int err;
- int fd;
- char buf[] = "123";
-
- if (geteuid() != 0) {
- fprintf(stderr, "WARNING: not run as root, can not do hugetlb test\n");
- return;
- }
-
- cat_into_file(__stringify(GET_NR_HUGE_PAGES), "/proc/sys/vm/nr_hugepages");
-
- /*
- * Now go make sure that we got the pages and that they
- * are 2M pages. Someone might have made 1G the default.
- */
- fd = open("/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages", O_RDONLY);
- if (fd < 0) {
- perror("opening sysfs 2M hugetlb config");
- return;
- }
-
- /* -1 to guarantee leaving the trailing \0 */
- err = read(fd, buf, sizeof(buf)-1);
- close(fd);
- if (err <= 0) {
- perror("reading sysfs 2M hugetlb config");
- return;
- }
-
- if (atoi(buf) != GET_NR_HUGE_PAGES) {
- fprintf(stderr, "could not confirm 2M pages, got: '%s' expected %d\n",
- buf, GET_NR_HUGE_PAGES);
- return;
- }
-
- hugetlb_setup_ok = 1;
-}
-
-void *malloc_pkey_hugetlb(long size, int prot, u16 pkey)
-{
- void *ptr;
- int flags = MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB;
-
- if (!hugetlb_setup_ok)
- return PTR_ERR_ENOTSUP;
-
- dprintf1("doing %s(%ld, %x, %x)\n", __func__, size, prot, pkey);
- size = ALIGN_UP(size, HPAGE_SIZE * 2);
- pkey_assert(pkey < NR_PKEYS);
- ptr = mmap(NULL, size, PROT_NONE, flags, -1, 0);
- pkey_assert(ptr != (void *)-1);
- mprotect_pkey(ptr, size, prot, pkey);
-
- record_pkey_malloc(ptr, size, prot);
-
- dprintf1("mmap()'d hugetlbfs for pkey %d @ %p\n", pkey, ptr);
- return ptr;
-}
-
-void *malloc_pkey_mmap_dax(long size, int prot, u16 pkey)
-{
- void *ptr;
- int fd;
-
- dprintf1("doing %s(size=%ld, prot=0x%x, pkey=%d)\n", __func__,
- size, prot, pkey);
- pkey_assert(pkey < NR_PKEYS);
- fd = open("/dax/foo", O_RDWR);
- pkey_assert(fd >= 0);
-
- ptr = mmap(0, size, prot, MAP_SHARED, fd, 0);
- pkey_assert(ptr != (void *)-1);
-
- mprotect_pkey(ptr, size, prot, pkey);
-
- record_pkey_malloc(ptr, size, prot);
-
- dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr);
- close(fd);
- return ptr;
-}
-
-void *(*pkey_malloc[])(long size, int prot, u16 pkey) = {
-
- malloc_pkey_with_mprotect,
- malloc_pkey_anon_huge,
- malloc_pkey_hugetlb
-/* can not do direct with the pkey_mprotect() API:
- malloc_pkey_mmap_direct,
- malloc_pkey_mmap_dax,
-*/
-};
-
-void *malloc_pkey(long size, int prot, u16 pkey)
-{
- void *ret;
- static int malloc_type;
- int nr_malloc_types = ARRAY_SIZE(pkey_malloc);
-
- pkey_assert(pkey < NR_PKEYS);
-
- while (1) {
- pkey_assert(malloc_type < nr_malloc_types);
-
- ret = pkey_malloc[malloc_type](size, prot, pkey);
- pkey_assert(ret != (void *)-1);
-
- malloc_type++;
- if (malloc_type >= nr_malloc_types)
- malloc_type = (random()%nr_malloc_types);
-
- /* try again if the malloc_type we tried is unsupported */
- if (ret == PTR_ERR_ENOTSUP)
- continue;
-
- break;
- }
-
- dprintf3("%s(%ld, prot=%x, pkey=%x) returning: %p\n", __func__,
- size, prot, pkey, ret);
- return ret;
-}
-
-int last_pkru_faults;
-#define UNKNOWN_PKEY -2
-void expected_pk_fault(int pkey)
-{
- dprintf2("%s(): last_pkru_faults: %d pkru_faults: %d\n",
- __func__, last_pkru_faults, pkru_faults);
- dprintf2("%s(%d): last_si_pkey: %d\n", __func__, pkey, last_si_pkey);
- pkey_assert(last_pkru_faults + 1 == pkru_faults);
-
- /*
- * For exec-only memory, we do not know the pkey in
- * advance, so skip this check.
- */
- if (pkey != UNKNOWN_PKEY)
- pkey_assert(last_si_pkey == pkey);
-
- /*
- * The signal handler shold have cleared out PKRU to let the
- * test program continue. We now have to restore it.
- */
- if (__rdpkru() != 0)
- pkey_assert(0);
-
- __wrpkru(shadow_pkru);
- dprintf1("%s() set PKRU=%x to restore state after signal nuked it\n",
- __func__, shadow_pkru);
- last_pkru_faults = pkru_faults;
- last_si_pkey = -1;
-}
-
-#define do_not_expect_pk_fault(msg) do { \
- if (last_pkru_faults != pkru_faults) \
- dprintf0("unexpected PK fault: %s\n", msg); \
- pkey_assert(last_pkru_faults == pkru_faults); \
-} while (0)
-
-int test_fds[10] = { -1 };
-int nr_test_fds;
-void __save_test_fd(int fd)
-{
- pkey_assert(fd >= 0);
- pkey_assert(nr_test_fds < ARRAY_SIZE(test_fds));
- test_fds[nr_test_fds] = fd;
- nr_test_fds++;
-}
-
-int get_test_read_fd(void)
-{
- int test_fd = open("/etc/passwd", O_RDONLY);
- __save_test_fd(test_fd);
- return test_fd;
-}
-
-void close_test_fds(void)
-{
- int i;
-
- for (i = 0; i < nr_test_fds; i++) {
- if (test_fds[i] < 0)
- continue;
- close(test_fds[i]);
- test_fds[i] = -1;
- }
- nr_test_fds = 0;
-}
-
-#define barrier() __asm__ __volatile__("": : :"memory")
-__attribute__((noinline)) int read_ptr(int *ptr)
-{
- /*
- * Keep GCC from optimizing this away somehow
- */
- barrier();
- return *ptr;
-}
-
-void test_read_of_write_disabled_region(int *ptr, u16 pkey)
-{
- int ptr_contents;
-
- dprintf1("disabling write access to PKEY[1], doing read\n");
- pkey_write_deny(pkey);
- ptr_contents = read_ptr(ptr);
- dprintf1("*ptr: %d\n", ptr_contents);
- dprintf1("\n");
-}
-void test_read_of_access_disabled_region(int *ptr, u16 pkey)
-{
- int ptr_contents;
-
- dprintf1("disabling access to PKEY[%02d], doing read @ %p\n", pkey, ptr);
- rdpkru();
- pkey_access_deny(pkey);
- ptr_contents = read_ptr(ptr);
- dprintf1("*ptr: %d\n", ptr_contents);
- expected_pk_fault(pkey);
-}
-void test_write_of_write_disabled_region(int *ptr, u16 pkey)
-{
- dprintf1("disabling write access to PKEY[%02d], doing write\n", pkey);
- pkey_write_deny(pkey);
- *ptr = __LINE__;
- expected_pk_fault(pkey);
-}
-void test_write_of_access_disabled_region(int *ptr, u16 pkey)
-{
- dprintf1("disabling access to PKEY[%02d], doing write\n", pkey);
- pkey_access_deny(pkey);
- *ptr = __LINE__;
- expected_pk_fault(pkey);
-}
-void test_kernel_write_of_access_disabled_region(int *ptr, u16 pkey)
-{
- int ret;
- int test_fd = get_test_read_fd();
-
- dprintf1("disabling access to PKEY[%02d], "
- "having kernel read() to buffer\n", pkey);
- pkey_access_deny(pkey);
- ret = read(test_fd, ptr, 1);
- dprintf1("read ret: %d\n", ret);
- pkey_assert(ret);
-}
-void test_kernel_write_of_write_disabled_region(int *ptr, u16 pkey)
-{
- int ret;
- int test_fd = get_test_read_fd();
-
- pkey_write_deny(pkey);
- ret = read(test_fd, ptr, 100);
- dprintf1("read ret: %d\n", ret);
- if (ret < 0 && (DEBUG_LEVEL > 0))
- perror("verbose read result (OK for this to be bad)");
- pkey_assert(ret);
-}
-
-void test_kernel_gup_of_access_disabled_region(int *ptr, u16 pkey)
-{
- int pipe_ret, vmsplice_ret;
- struct iovec iov;
- int pipe_fds[2];
-
- pipe_ret = pipe(pipe_fds);
-
- pkey_assert(pipe_ret == 0);
- dprintf1("disabling access to PKEY[%02d], "
- "having kernel vmsplice from buffer\n", pkey);
- pkey_access_deny(pkey);
- iov.iov_base = ptr;
- iov.iov_len = PAGE_SIZE;
- vmsplice_ret = vmsplice(pipe_fds[1], &iov, 1, SPLICE_F_GIFT);
- dprintf1("vmsplice() ret: %d\n", vmsplice_ret);
- pkey_assert(vmsplice_ret == -1);
-
- close(pipe_fds[0]);
- close(pipe_fds[1]);
-}
-
-void test_kernel_gup_write_to_write_disabled_region(int *ptr, u16 pkey)
-{
- int ignored = 0xdada;
- int futex_ret;
- int some_int = __LINE__;
-
- dprintf1("disabling write to PKEY[%02d], "
- "doing futex gunk in buffer\n", pkey);
- *ptr = some_int;
- pkey_write_deny(pkey);
- futex_ret = syscall(SYS_futex, ptr, FUTEX_WAIT, some_int-1, NULL,
- &ignored, ignored);
- if (DEBUG_LEVEL > 0)
- perror("futex");
- dprintf1("futex() ret: %d\n", futex_ret);
-}
-
-/* Assumes that all pkeys other than 'pkey' are unallocated */
-void test_pkey_syscalls_on_non_allocated_pkey(int *ptr, u16 pkey)
-{
- int err;
- int i;
-
- /* Note: 0 is the default pkey, so don't mess with it */
- for (i = 1; i < NR_PKEYS; i++) {
- if (pkey == i)
- continue;
-
- dprintf1("trying get/set/free to non-allocated pkey: %2d\n", i);
- err = sys_pkey_free(i);
- pkey_assert(err);
-
- err = sys_pkey_free(i);
- pkey_assert(err);
-
- err = sys_mprotect_pkey(ptr, PAGE_SIZE, PROT_READ, i);
- pkey_assert(err);
- }
-}
-
-/* Assumes that all pkeys other than 'pkey' are unallocated */
-void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
-{
- int err;
- int bad_pkey = NR_PKEYS+99;
-
- /* pass a known-invalid pkey in: */
- err = sys_mprotect_pkey(ptr, PAGE_SIZE, PROT_READ, bad_pkey);
- pkey_assert(err);
-}
-
-void become_child(void)
-{
- pid_t forkret;
-
- forkret = fork();
- pkey_assert(forkret >= 0);
- dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
-
- if (!forkret) {
- /* in the child */
- return;
- }
- exit(0);
-}
-
-/* Assumes that all pkeys other than 'pkey' are unallocated */
-void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
-{
- int err;
- int allocated_pkeys[NR_PKEYS] = {0};
- int nr_allocated_pkeys = 0;
- int i;
-
- for (i = 0; i < NR_PKEYS*3; i++) {
- int new_pkey;
- dprintf1("%s() alloc loop: %d\n", __func__, i);
- new_pkey = alloc_pkey();
- dprintf4("%s()::%d, err: %d pkru: 0x%x shadow: 0x%x\n", __func__,
- __LINE__, err, __rdpkru(), shadow_pkru);
- rdpkru(); /* for shadow checking */
- dprintf2("%s() errno: %d ENOSPC: %d\n", __func__, errno, ENOSPC);
- if ((new_pkey == -1) && (errno == ENOSPC)) {
- dprintf2("%s() failed to allocate pkey after %d tries\n",
- __func__, nr_allocated_pkeys);
- } else {
- /*
- * Ensure the number of successes never
- * exceeds the number of keys supported
- * in the hardware.
- */
- pkey_assert(nr_allocated_pkeys < NR_PKEYS);
- allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
- }
-
- /*
- * Make sure that allocation state is properly
- * preserved across fork().
- */
- if (i == NR_PKEYS*2)
- become_child();
- }
-
- dprintf3("%s()::%d\n", __func__, __LINE__);
-
- /*
- * There are 16 pkeys supported in hardware. Three are
- * allocated by the time we get here:
- * 1. The default key (0)
- * 2. One possibly consumed by an execute-only mapping.
- * 3. One allocated by the test code and passed in via
- * 'pkey' to this function.
- * Ensure that we can allocate at least another 13 (16-3).
- */
- pkey_assert(i >= NR_PKEYS-3);
-
- for (i = 0; i < nr_allocated_pkeys; i++) {
- err = sys_pkey_free(allocated_pkeys[i]);
- pkey_assert(!err);
- rdpkru(); /* for shadow checking */
- }
-}
-
-/*
- * pkey 0 is special. It is allocated by default, so you do not
- * have to call pkey_alloc() to use it first. Make sure that it
- * is usable.
- */
-void test_mprotect_with_pkey_0(int *ptr, u16 pkey)
-{
- long size;
- int prot;
-
- assert(pkey_last_malloc_record);
- size = pkey_last_malloc_record->size;
- /*
- * This is a bit of a hack. But mprotect() requires
- * huge-page-aligned sizes when operating on hugetlbfs.
- * So, make sure that we use something that's a multiple
- * of a huge page when we can.
- */
- if (size >= HPAGE_SIZE)
- size = HPAGE_SIZE;
- prot = pkey_last_malloc_record->prot;
-
- /* Use pkey 0 */
- mprotect_pkey(ptr, size, prot, 0);
-
- /* Make sure that we can set it back to the original pkey. */
- mprotect_pkey(ptr, size, prot, pkey);
-}
-
-void test_ptrace_of_child(int *ptr, u16 pkey)
-{
- __attribute__((__unused__)) int peek_result;
- pid_t child_pid;
- void *ignored = 0;
- long ret;
- int status;
- /*
- * This is the "control" for our little expermient. Make sure
- * we can always access it when ptracing.
- */
- int *plain_ptr_unaligned = malloc(HPAGE_SIZE);
- int *plain_ptr = ALIGN_PTR_UP(plain_ptr_unaligned, PAGE_SIZE);
-
- /*
- * Fork a child which is an exact copy of this process, of course.
- * That means we can do all of our tests via ptrace() and then plain
- * memory access and ensure they work differently.
- */
- child_pid = fork_lazy_child();
- dprintf1("[%d] child pid: %d\n", getpid(), child_pid);
-
- ret = ptrace(PTRACE_ATTACH, child_pid, ignored, ignored);
- if (ret)
- perror("attach");
- dprintf1("[%d] attach ret: %ld %d\n", getpid(), ret, __LINE__);
- pkey_assert(ret != -1);
- ret = waitpid(child_pid, &status, WUNTRACED);
- if ((ret != child_pid) || !(WIFSTOPPED(status))) {
- fprintf(stderr, "weird waitpid result %ld stat %x\n",
- ret, status);
- pkey_assert(0);
- }
- dprintf2("waitpid ret: %ld\n", ret);
- dprintf2("waitpid status: %d\n", status);
-
- pkey_access_deny(pkey);
- pkey_write_deny(pkey);
-
- /* Write access, untested for now:
- ret = ptrace(PTRACE_POKEDATA, child_pid, peek_at, data);
- pkey_assert(ret != -1);
- dprintf1("poke at %p: %ld\n", peek_at, ret);
- */
-
- /*
- * Try to access the pkey-protected "ptr" via ptrace:
- */
- ret = ptrace(PTRACE_PEEKDATA, child_pid, ptr, ignored);
- /* expect it to work, without an error: */
- pkey_assert(ret != -1);
- /* Now access from the current task, and expect an exception: */
- peek_result = read_ptr(ptr);
- expected_pk_fault(pkey);
-
- /*
- * Try to access the NON-pkey-protected "plain_ptr" via ptrace:
- */
- ret = ptrace(PTRACE_PEEKDATA, child_pid, plain_ptr, ignored);
- /* expect it to work, without an error: */
- pkey_assert(ret != -1);
- /* Now access from the current task, and expect NO exception: */
- peek_result = read_ptr(plain_ptr);
- do_not_expect_pk_fault("read plain pointer after ptrace");
-
- ret = ptrace(PTRACE_DETACH, child_pid, ignored, 0);
- pkey_assert(ret != -1);
-
- ret = kill(child_pid, SIGKILL);
- pkey_assert(ret != -1);
-
- wait(&status);
-
- free(plain_ptr_unaligned);
-}
-
-void *get_pointer_to_instructions(void)
-{
- void *p1;
-
- p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE);
- dprintf3("&lots_o_noops: %p\n", &lots_o_noops_around_write);
- /* lots_o_noops_around_write should be page-aligned already */
- assert(p1 == &lots_o_noops_around_write);
-
- /* Point 'p1' at the *second* page of the function: */
- p1 += PAGE_SIZE;
-
- /*
- * Try to ensure we fault this in on next touch to ensure
- * we get an instruction fault as opposed to a data one
- */
- madvise(p1, PAGE_SIZE, MADV_DONTNEED);
-
- return p1;
-}
-
-void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
-{
- void *p1;
- int scratch;
- int ptr_contents;
- int ret;
-
- p1 = get_pointer_to_instructions();
- lots_o_noops_around_write(&scratch);
- ptr_contents = read_ptr(p1);
- dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
-
- ret = mprotect_pkey(p1, PAGE_SIZE, PROT_EXEC, (u64)pkey);
- pkey_assert(!ret);
- pkey_access_deny(pkey);
-
- dprintf2("pkru: %x\n", rdpkru());
-
- /*
- * Make sure this is an *instruction* fault
- */
- madvise(p1, PAGE_SIZE, MADV_DONTNEED);
- lots_o_noops_around_write(&scratch);
- do_not_expect_pk_fault("executing on PROT_EXEC memory");
- ptr_contents = read_ptr(p1);
- dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
- expected_pk_fault(pkey);
-}
-
-void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
-{
- void *p1;
- int scratch;
- int ptr_contents;
- int ret;
-
- dprintf1("%s() start\n", __func__);
-
- p1 = get_pointer_to_instructions();
- lots_o_noops_around_write(&scratch);
- ptr_contents = read_ptr(p1);
- dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
-
- /* Use a *normal* mprotect(), not mprotect_pkey(): */
- ret = mprotect(p1, PAGE_SIZE, PROT_EXEC);
- pkey_assert(!ret);
-
- dprintf2("pkru: %x\n", rdpkru());
-
- /* Make sure this is an *instruction* fault */
- madvise(p1, PAGE_SIZE, MADV_DONTNEED);
- lots_o_noops_around_write(&scratch);
- do_not_expect_pk_fault("executing on PROT_EXEC memory");
- ptr_contents = read_ptr(p1);
- dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
- expected_pk_fault(UNKNOWN_PKEY);
-
- /*
- * Put the memory back to non-PROT_EXEC. Should clear the
- * exec-only pkey off the VMA and allow it to be readable
- * again. Go to PROT_NONE first to check for a kernel bug
- * that did not clear the pkey when doing PROT_NONE.
- */
- ret = mprotect(p1, PAGE_SIZE, PROT_NONE);
- pkey_assert(!ret);
-
- ret = mprotect(p1, PAGE_SIZE, PROT_READ|PROT_EXEC);
- pkey_assert(!ret);
- ptr_contents = read_ptr(p1);
- do_not_expect_pk_fault("plain read on recently PROT_EXEC area");
-}
-
-void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey)
-{
- int size = PAGE_SIZE;
- int sret;
-
- if (cpu_has_pku()) {
- dprintf1("SKIP: %s: no CPU support\n", __func__);
- return;
- }
-
- sret = syscall(SYS_mprotect_key, ptr, size, PROT_READ, pkey);
- pkey_assert(sret < 0);
-}
-
-void (*pkey_tests[])(int *ptr, u16 pkey) = {
- test_read_of_write_disabled_region,
- test_read_of_access_disabled_region,
- test_write_of_write_disabled_region,
- test_write_of_access_disabled_region,
- test_kernel_write_of_access_disabled_region,
- test_kernel_write_of_write_disabled_region,
- test_kernel_gup_of_access_disabled_region,
- test_kernel_gup_write_to_write_disabled_region,
- test_executing_on_unreadable_memory,
- test_implicit_mprotect_exec_only_memory,
- test_mprotect_with_pkey_0,
- test_ptrace_of_child,
- test_pkey_syscalls_on_non_allocated_pkey,
- test_pkey_syscalls_bad_args,
- test_pkey_alloc_exhaust,
-};
-
-void run_tests_once(void)
-{
- int *ptr;
- int prot = PROT_READ|PROT_WRITE;
-
- for (test_nr = 0; test_nr < ARRAY_SIZE(pkey_tests); test_nr++) {
- int pkey;
- int orig_pkru_faults = pkru_faults;
-
- dprintf1("======================\n");
- dprintf1("test %d preparing...\n", test_nr);
-
- tracing_on();
- pkey = alloc_random_pkey();
- dprintf1("test %d starting with pkey: %d\n", test_nr, pkey);
- ptr = malloc_pkey(PAGE_SIZE, prot, pkey);
- dprintf1("test %d starting...\n", test_nr);
- pkey_tests[test_nr](ptr, pkey);
- dprintf1("freeing test memory: %p\n", ptr);
- free_pkey_malloc(ptr);
- sys_pkey_free(pkey);
-
- dprintf1("pkru_faults: %d\n", pkru_faults);
- dprintf1("orig_pkru_faults: %d\n", orig_pkru_faults);
-
- tracing_off();
- close_test_fds();
-
- printf("test %2d PASSED (iteration %d)\n", test_nr, iteration_nr);
- dprintf1("======================\n\n");
- }
- iteration_nr++;
-}
-
-void pkey_setup_shadow(void)
-{
- shadow_pkru = __rdpkru();
-}
-
-int main(void)
-{
- int nr_iterations = 22;
-
- setup_handlers();
-
- printf("has pku: %d\n", cpu_has_pku());
-
- if (!cpu_has_pku()) {
- int size = PAGE_SIZE;
- int *ptr;
-
- printf("running PKEY tests for unsupported CPU/OS\n");
-
- ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
- assert(ptr != (void *)-1);
- test_mprotect_pkey_on_unsupported_cpu(ptr, 1);
- exit(0);
- }
-
- pkey_setup_shadow();
- printf("startup pkru: %x\n", rdpkru());
- setup_hugetlbfs();
-
- while (nr_iterations-- > 0)
- run_tests_once();
-
- printf("done (all tests OK)\n");
- return 0;
-}
diff --git a/usr/include/Makefile b/usr/include/Makefile
index b568a95d1f62..55362f3ab393 100644
--- a/usr/include/Makefile
+++ b/usr/include/Makefile
@@ -8,7 +8,11 @@
# We cannot go as far as adding -Wpedantic since it emits too many warnings.
UAPI_CFLAGS := -std=c90 -Wall -Werror=implicit-function-declaration
-override c_flags = $(UAPI_CFLAGS) -Wp,-MD,$(depfile) -I$(objtree)/usr/include
+# In theory, we do not care -m32 or -m64 for header compile tests.
+# It is here just because CONFIG_CC_CAN_LINK is tested with -m32 or -m64.
+UAPI_CFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
+
+override c_flags = $(UAPI_CFLAGS) -Wp,-MMD,$(depfile) -I$(objtree)/usr/include
# The following are excluded for now because they fail to build.
#
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index a36828fbf40a..45799606bb3e 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -60,11 +60,11 @@ static void async_pf_execute(struct work_struct *work)
* mm and might be done in another context, so we must
* access remotely.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
&locked);
if (locked)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (IS_ENABLED(CONFIG_KVM_ASYNC_PF_SYNC))
kvm_arch_async_page_present(vcpu, apf);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7b6013f2ba19..a852af5c3214 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -55,7 +55,6 @@
#include <asm/processor.h>
#include <asm/ioctl.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include "coalesced_mmio.h"
#include "async_pf.h"
@@ -1638,7 +1637,7 @@ unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
if (kvm_is_error_hva(addr))
return PAGE_SIZE;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, addr);
if (!vma)
goto out;
@@ -1646,7 +1645,7 @@ unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
size = vma_kernel_pagesize(vma);
out:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return size;
}
@@ -1746,7 +1745,6 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
bool *writable, kvm_pfn_t *pfn)
{
struct page *page[1];
- int npages;
/*
* Fast pin a writable pfn only if it is a write fault request
@@ -1756,8 +1754,7 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
if (!(write_fault || writable))
return false;
- npages = __get_user_pages_fast(addr, 1, 1, page);
- if (npages == 1) {
+ if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
*pfn = page_to_pfn(page[0]);
if (writable)
@@ -1797,7 +1794,7 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
if (unlikely(!write_fault) && writable) {
struct page *wpage;
- if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
+ if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
*writable = true;
put_page(page);
page = wpage;
@@ -1901,7 +1898,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
if (npages == 1)
return pfn;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
if (npages == -EHWPOISON ||
(!async && check_user_page_hwpoison(addr))) {
pfn = KVM_PFN_ERR_HWPOISON;
@@ -1925,7 +1922,7 @@ retry:
pfn = KVM_PFN_ERR_FAULT;
}
exit:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return pfn;
}
@@ -2009,7 +2006,7 @@ int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
if (entry < nr_pages)
return 0;
- return __get_user_pages_fast(addr, nr_pages, 1, pages);
+ return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
}
EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);